aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_dbg.c4
-rw-r--r--drivers/acpi/acpi_lpit.c3
-rw-r--r--drivers/acpi/acpi_lpss.c14
-rw-r--r--drivers/acpi/acpi_tad.c2
-rw-r--r--drivers/acpi/acpi_watchdog.c7
-rw-r--r--drivers/acpi/acpica/acglobal.h1
-rw-r--r--drivers/acpi/acpica/acpredef.h4
-rw-r--r--drivers/acpi/acpica/dbhistry.c1
-rw-r--r--drivers/acpi/acpica/dsfield.c22
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/apei/bert.c6
-rw-r--r--drivers/acpi/apei/einj.c5
-rw-r--r--drivers/acpi/apei/erst.c4
-rw-r--r--drivers/acpi/apei/ghes.c73
-rw-r--r--drivers/acpi/apei/hest.c5
-rw-r--r--drivers/acpi/arm64/gtdt.c4
-rw-r--r--drivers/acpi/arm64/iort.c126
-rw-r--r--drivers/acpi/button.c1
-rw-r--r--drivers/acpi/cppc_acpi.c5
-rw-r--r--drivers/acpi/device_pm.c31
-rw-r--r--drivers/acpi/dptf/dptf_power.c147
-rw-r--r--drivers/acpi/ec.c47
-rw-r--r--drivers/acpi/evged.c22
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pci_mcfg.c8
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c1
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/processor_idle.c9
-rw-r--r--drivers/acpi/sbs.c3
-rw-r--r--drivers/acpi/scan.c15
-rw-r--r--drivers/acpi/sleep.c51
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/utils.c25
-rw-r--r--drivers/acpi/video_detect.c10
-rw-r--r--drivers/amba/bus.c14
-rw-r--r--drivers/android/binderfs.c4
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-scsi.c30
-rw-r--r--drivers/atm/Kconfig4
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/core.c330
-rw-r--r--drivers/base/dd.c33
-rw-r--r--drivers/base/firmware_loader/fallback.c15
-rw-r--r--drivers/base/firmware_loader/fallback.h8
-rw-r--r--drivers/base/firmware_loader/fallback_platform.c2
-rw-r--r--drivers/base/firmware_loader/fallback_table.c2
-rw-r--r--drivers/base/firmware_loader/firmware.h3
-rw-r--r--drivers/base/firmware_loader/main.c14
-rw-r--r--drivers/base/memory.c44
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/base/platform-msi.c2
-rw-r--r--drivers/base/platform.c52
-rw-r--r--drivers/base/power/main.c348
-rw-r--r--drivers/base/power/runtime.c6
-rw-r--r--drivers/base/power/sysfs.c4
-rw-r--r--drivers/base/property.c13
-rw-r--r--drivers/base/regmap/regmap-debugfs.c6
-rw-r--r--drivers/base/regmap/regmap-i2c.c61
-rw-r--r--drivers/base/regmap/regmap-irq.c84
-rw-r--r--drivers/base/regmap/regmap.c23
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/base/swnode.c71
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/drbd/drbd_bitmap.c4
-rw-r--r--drivers/block/drbd/drbd_int.h28
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c13
-rw-r--r--drivers/block/drbd/drbd_req.c27
-rw-r--r--drivers/block/drbd/drbd_worker.c6
-rw-r--r--drivers/block/floppy.c466
-rw-r--r--drivers/block/loop.c391
-rw-r--r--drivers/block/null_blk_main.c35
-rw-r--r--drivers/block/null_blk_zoned.c41
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/ps3disk.c1
-rw-r--r--drivers/block/rnbd/Kconfig28
-rw-r--r--drivers/block/rnbd/Makefile15
-rw-r--r--drivers/block/rnbd/README92
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c639
-rw-r--r--drivers/block/rnbd/rnbd-clt.c1729
-rw-r--r--drivers/block/rnbd/rnbd-clt.h156
-rw-r--r--drivers/block/rnbd/rnbd-common.c23
-rw-r--r--drivers/block/rnbd/rnbd-log.h41
-rw-r--r--drivers/block/rnbd/rnbd-proto.h303
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c134
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h92
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c215
-rw-r--r--drivers/block/rnbd/rnbd-srv.c844
-rw-r--r--drivers/block/rnbd/rnbd-srv.h78
-rw-r--r--drivers/block/rsxx/dev.c19
-rw-r--r--drivers/block/swim.c6
-rw-r--r--drivers/block/zram/zcomp.c51
-rw-r--r--drivers/block/zram/zcomp.h5
-rw-r--r--drivers/block/zram/zram_drv.c24
-rw-r--r--drivers/bluetooth/btbcm.c142
-rw-r--r--drivers/bluetooth/btbcm.h10
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c18
-rw-r--r--drivers/bluetooth/btmtksdio.c4
-rw-r--r--drivers/bluetooth/btmtkuart.c17
-rw-r--r--drivers/bluetooth/btqca.c32
-rw-r--r--drivers/bluetooth/btqca.h3
-rw-r--r--drivers/bluetooth/btrtl.c10
-rw-r--r--drivers/bluetooth/btusb.c205
-rw-r--r--drivers/bluetooth/hci_bcm.c35
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_qca.c178
-rw-r--r--drivers/bluetooth/hci_serdev.c4
-rw-r--r--drivers/bus/Kconfig41
-rw-r--r--drivers/bus/Makefile4
-rw-r--r--drivers/bus/arm-integrator-lm.c128
-rw-r--r--drivers/bus/bt1-apb.c421
-rw-r--r--drivers/bus/bt1-axi.c314
-rw-r--r--drivers/bus/mhi/core/boot.c75
-rw-r--r--drivers/bus/mhi/core/init.c10
-rw-r--r--drivers/bus/mhi/core/internal.h9
-rw-r--r--drivers/bus/mhi/core/main.c197
-rw-r--r--drivers/bus/mhi/core/pm.c229
-rw-r--r--drivers/bus/ti-sysc.c25
-rw-r--r--drivers/bus/vexpress-config.c354
-rw-r--r--drivers/cdrom/cdrom.c87
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/intel-gtt.c21
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/cctrng.c735
-rw-r--r--drivers/char/hw_random/cctrng.h72
-rw-r--r--drivers/char/hw_random/omap-rng.c5
-rw-r--r--drivers/char/hw_random/optee-rng.c2
-rw-r--r--drivers/char/hw_random/xgene-rng.c4
-rw-r--r--drivers/char/ipmi/Kconfig2
-rw-r--r--drivers/char/ipmi/bt-bmc.c21
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c9
-rw-r--r--drivers/char/ipmi/ipmi_si_hotmod.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c24
-rw-r--r--drivers/char/mem.c101
-rw-r--r--drivers/char/nvram.c4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c14
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/tlclk.c17
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c12
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c2
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/clk-qoriq.c30
-rw-r--r--drivers/clk/clk.c9
-rw-r--r--drivers/clk/mediatek/Kconfig7
-rw-r--r--drivers/clk/mediatek/Makefile1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt6797-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt8173-mm.c146
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c104
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c9
-rw-r--r--drivers/clk/qcom/Kconfig1
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c3
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c17
-rw-r--r--drivers/clk/tegra/clk-tegra124.c2
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-816x.c1
-rw-r--r--drivers/clk/ti/clkctrl.c99
-rw-r--r--drivers/clk/versatile/Kconfig21
-rw-r--r--drivers/clk/versatile/clk-impd1.c122
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c20
-rw-r--r--drivers/clk/zynqmp/clk-gate-zynqmp.c9
-rw-r--r--drivers/clk/zynqmp/clk-mux-zynqmp.c6
-rw-r--r--drivers/clk/zynqmp/clkc.c17
-rw-r--r--drivers/clk/zynqmp/divider.c12
-rw-r--r--drivers/clk/zynqmp/pll.c29
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arc_timer.c4
-rw-r--r--drivers/clocksource/arm_arch_timer.c4
-rw-r--r--drivers/clocksource/dw_apb_timer.c5
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c8
-rw-r--r--drivers/clocksource/mips-gic-timer.c50
-rw-r--r--drivers/clocksource/timer-atmel-st.c3
-rw-r--r--drivers/clocksource/timer-davinci.c24
-rw-r--r--drivers/clocksource/timer-imx-tpm.c8
-rw-r--r--drivers/clocksource/timer-ti-32k.c48
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c727
-rw-r--r--drivers/clocksource/timer-ti-dm.c4
-rw-r--r--drivers/clocksource/timer-versatile.c3
-rw-r--r--drivers/connector/cn_proc.c21
-rw-r--r--drivers/cpufreq/Kconfig3
-rw-r--r--drivers/cpufreq/Kconfig.arm7
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq.c11
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c84
-rw-r--r--drivers/cpufreq/intel_pstate.c3
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c22
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c2
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c76
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c217
-rw-r--r--drivers/cpuidle/Kconfig.arm13
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-psci.c8
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c39
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c (renamed from drivers/soc/qcom/spm.c)138
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c1
-rw-r--r--drivers/cpuidle/sysfs.c73
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c4
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c10
-rw-r--r--drivers/crypto/bcm/cipher.c27
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c2
-rw-r--r--drivers/crypto/ccp/Kconfig3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c9
-rw-r--r--drivers/crypto/ccp/sev-dev.c58
-rw-r--r--drivers/crypto/ccree/cc_cipher.c9
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c103
-rw-r--r--drivers/crypto/chelsio/chcr_core.c23
-rw-r--r--drivers/crypto/chelsio/chcr_core.h10
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c6
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.c107
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.h9
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c195
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.h1
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c14
-rw-r--r--drivers/crypto/hisilicon/Kconfig4
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h18
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c99
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c355
-rw-r--r--drivers/crypto/hisilicon/qm.c2433
-rw-r--r--drivers/crypto/hisilicon/qm.h120
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h5
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c20
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c379
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h8
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c20
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c362
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_main.c4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c12
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c95
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c12
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c10
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c7
-rw-r--r--drivers/crypto/n2_core.c7
-rw-r--r--drivers/crypto/nx/Makefile2
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c (renamed from drivers/crypto/nx/nx-842-powernv.c)204
-rw-r--r--drivers/crypto/omap-sham.c21
-rw-r--r--drivers/crypto/s5p-sss.c39
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c230
-rw-r--r--drivers/crypto/stm32/stm32-hash.c38
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c22
-rw-r--r--drivers/dax/dax-private.h1
-rw-r--r--drivers/dax/device.c1
-rw-r--r--drivers/dax/kmem.c42
-rw-r--r--drivers/dca/dca-sysfs.c4
-rw-r--r--drivers/devfreq/Kconfig8
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq.c19
-rw-r--r--drivers/devfreq/imx-bus.c179
-rw-r--r--drivers/devfreq/tegra30-devfreq.c7
-rw-r--r--drivers/dma-buf/Makefile3
-rw-r--r--drivers/dma-buf/dma-buf.c2
-rw-r--r--drivers/dma-buf/dma-fence-chain.c10
-rw-r--r--drivers/dma-buf/dma-fence.c2
-rw-r--r--drivers/dma-buf/selftests.h1
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c715
-rw-r--r--drivers/dma/dmatest.c9
-rw-r--r--drivers/dma/idxd/device.c7
-rw-r--r--drivers/dma/idxd/irq.c26
-rw-r--r--drivers/dma/owl-dma.c8
-rw-r--r--drivers/dma/tegra210-adma.c2
-rw-r--r--drivers/dma/ti/k3-udma.c6
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/edac/amd8131_edac.c8
-rw-r--r--drivers/edac/armada_xp_edac.c14
-rw-r--r--drivers/edac/i10nm_base.c29
-rw-r--r--drivers/edac/skx_base.c33
-rw-r--r--drivers/edac/skx_common.c17
-rw-r--r--drivers/edac/skx_common.h13
-rw-r--r--drivers/edac/thunderx_edac.c8
-rw-r--r--drivers/edac/xgene_edac.c3
-rw-r--r--drivers/extcon/extcon-adc-jack.c3
-rw-r--r--drivers/extcon/extcon-arizona.c17
-rw-r--r--drivers/extcon/extcon-max14577.c10
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firmware/Kconfig9
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/Makefile4
-rw-r--r--drivers/firmware/arm_scmi/base.c7
-rw-r--r--drivers/firmware/arm_scmi/common.h11
-rw-r--r--drivers/firmware/arm_scmi/driver.c133
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c17
-rw-r--r--drivers/firmware/arm_scmi/perf.c5
-rw-r--r--drivers/firmware/arm_scmi/power.c6
-rw-r--r--drivers/firmware/arm_scmi/sensors.c4
-rw-r--r--drivers/firmware/arm_scmi/shmem.c15
-rw-r--r--drivers/firmware/arm_scmi/smc.c153
-rw-r--r--drivers/firmware/arm_sdei.c49
-rw-r--r--drivers/firmware/dmi-id.c6
-rw-r--r--drivers/firmware/dmi_scan.c30
-rw-r--r--drivers/firmware/efi/Kconfig15
-rw-r--r--drivers/firmware/efi/arm-init.c4
-rw-r--r--drivers/firmware/efi/cper.c62
-rw-r--r--drivers/firmware/efi/earlycon.c14
-rw-r--r--drivers/firmware/efi/efi.c49
-rw-r--r--drivers/firmware/efi/efivars.c4
-rw-r--r--drivers/firmware/efi/libstub/Makefile50
-rw-r--r--drivers/firmware/efi/libstub/alignedmem.c57
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c53
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c106
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c381
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c (renamed from drivers/firmware/efi/libstub/arm-stub.c)99
-rw-r--r--drivers/firmware/efi/libstub/efistub.h204
-rw-r--r--drivers/firmware/efi/libstub/fdt.c24
-rw-r--r--drivers/firmware/efi/libstub/file.c48
-rw-r--r--drivers/firmware/efi/libstub/gop.c583
-rw-r--r--drivers/firmware/efi/libstub/mem.c193
-rw-r--r--drivers/firmware/efi/libstub/pci.c10
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c6
-rw-r--r--drivers/firmware/efi/libstub/relocate.c174
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c4
-rw-r--r--drivers/firmware/efi/libstub/tpm.c7
-rw-r--r--drivers/firmware/efi/libstub/vsprintf.c564
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c179
-rw-r--r--drivers/firmware/efi/test/efi_test.c12
-rw-r--r--drivers/firmware/efi/tpm.c5
-rw-r--r--drivers/firmware/imx/imx-scu.c64
-rw-r--r--drivers/firmware/psci/psci.c21
-rw-r--r--drivers/firmware/qcom_scm-legacy.c2
-rw-r--r--drivers/firmware/qcom_scm.c11
-rw-r--r--drivers/firmware/raspberrypi.c73
-rw-r--r--drivers/firmware/smccc/Kconfig16
-rw-r--r--drivers/firmware/smccc/Makefile3
-rw-r--r--drivers/firmware/smccc/smccc.c31
-rw-r--r--drivers/firmware/stratix10-rsu.c10
-rw-r--r--drivers/firmware/stratix10-svc.c62
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c4
-rw-r--r--drivers/firmware/tegra/bpmp.c9
-rw-r--r--drivers/firmware/trusted_foundations.c21
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c5
-rw-r--r--drivers/firmware/xilinx/zynqmp.c607
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c8
-rw-r--r--drivers/fpga/dfl-afu-main.c35
-rw-r--r--drivers/fpga/dfl-fme-main.c23
-rw-r--r--drivers/fpga/dfl-fme-perf.c1020
-rw-r--r--drivers/fpga/dfl-fme-pr.c4
-rw-r--r--drivers/fpga/dfl-fme.h2
-rw-r--r--drivers/fpga/dfl.c15
-rw-r--r--drivers/fpga/dfl.h39
-rw-r--r--drivers/fpga/ice40-spi.c10
-rw-r--r--drivers/fpga/machxo2-spi.c12
-rw-r--r--drivers/fpga/stratix10-soc.c28
-rw-r--r--drivers/fpga/zynqmp-fpga.c14
-rw-r--r--drivers/gnss/serial.h2
-rw-r--r--drivers/gnss/sirf.c8
-rw-r--r--drivers/gpio/Kconfig24
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/TODO4
-rw-r--r--drivers/gpio/gpio-aggregator.c568
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-dwapb.c248
-rw-r--r--drivers/gpio/gpio-exar.c7
-rw-r--r--drivers/gpio/gpio-f7188x.c33
-rw-r--r--drivers/gpio/gpio-ftgpio010.c2
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-max730x.c12
-rw-r--r--drivers/gpio/gpio-mb86s7x.c28
-rw-r--r--drivers/gpio/gpio-merrifield.c10
-rw-r--r--drivers/gpio/gpio-mlxbf2.c11
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c2
-rw-r--r--drivers/gpio/gpio-mockup.c53
-rw-r--r--drivers/gpio/gpio-mvebu.c15
-rw-r--r--drivers/gpio/gpio-pca953x.c96
-rw-r--r--drivers/gpio/gpio-pch.c73
-rw-r--r--drivers/gpio/gpio-pl061.c9
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-regmap.c349
-rw-r--r--drivers/gpio/gpio-tegra186.c1
-rw-r--r--drivers/gpio/gpio-xgene-sb.c14
-rw-r--r--drivers/gpio/gpiolib-acpi.c6
-rw-r--r--drivers/gpio/gpiolib-devprop.c5
-rw-r--r--drivers/gpio/gpiolib-of.c31
-rw-r--r--drivers/gpio/gpiolib.c191
-rw-r--r--drivers/gpio/gpiolib.h27
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c415
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h (renamed from drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h)18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c623
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c447
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c398
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2905
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c209
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c419
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c262
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c471
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c55
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h6
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c562
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c73
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c644
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c225
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c319
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h (renamed from drivers/gpu/drm/amd/display/dc/basics/log_helpers.c)25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c110
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c195
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h)23
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h14
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h15
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c26
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c33
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c33
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c101
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h30
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c482
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c103
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c448
-rw-r--r--drivers/gpu/drm/amd/display/modules/vmid/vmid.c7
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h27
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h33
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h114
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c336
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c49
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c184
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c61
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c135
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c40
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c157
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c71
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c141
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c149
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c181
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c107
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c139
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c35
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c42
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c48
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c69
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c68
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c106
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c4
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c7
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c16
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c4
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx.h3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c31
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_out.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c25
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c12
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c16
-rw-r--r--drivers/gpu/drm/bridge/Kconfig26
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c26
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c620
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c1213
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.h144
-rw-r--r--drivers/gpu/drm/bridge/panel.c7
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c2
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c86
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c4
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig19
-rw-r--r--drivers/gpu/drm/cirrus/Makefile2
-rw-r--r--drivers/gpu/drm/drm_atomic.c8
-rw-r--r--drivers/gpu/drm/drm_auth.c69
-rw-r--r--drivers/gpu/drm/drm_blend.c16
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/drm_client.c8
-rw-r--r--drivers/gpu/drm/drm_connector.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h4
-rw-r--r--drivers/gpu/drm/drm_debugfs.c45
-rw-r--r--drivers/gpu/drm/drm_dma.c2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c271
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c404
-rw-r--r--drivers/gpu/drm/drm_drv.c230
-rw-r--r--drivers/gpu/drm/drm_edid.c114
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c35
-rw-r--r--drivers/gpu/drm/drm_file.c9
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c23
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c224
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c124
-rw-r--r--drivers/gpu/drm/drm_internal.h7
-rw-r--r--drivers/gpu/drm/drm_ioctl.c6
-rw-r--r--drivers/gpu/drm/drm_managed.c275
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c33
-rw-r--r--drivers/gpu/drm/drm_mm.c133
-rw-r--r--drivers/gpu/drm/drm_mode_config.c110
-rw-r--r--drivers/gpu/drm/drm_mode_object.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c26
-rw-r--r--drivers/gpu/drm/drm_pci.c4
-rw-r--r--drivers/gpu/drm/drm_plane.c9
-rw-r--r--drivers/gpu/drm/drm_scatter.c11
-rw-r--r--drivers/gpu/drm/drm_vblank.c98
-rw-r--r--drivers/gpu/drm/drm_vm.c4
-rw-r--r--drivers/gpu/drm/drm_vram_helper_common.c94
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c20
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c182
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c14
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c14
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c47
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c100
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c16
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c11
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c31
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.h1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tmd_vid.c6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tpo_vid.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c19
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c18
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c99
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c8
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c43
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c23
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile12
-rw-r--r--drivers/gpu/drm/i915/Makefile32
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c188
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c144
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c194
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c121
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c808
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c920
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c130
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c558
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2014
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c164
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c299
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c178
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c105
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c149
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h4
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c89
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c848
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c30
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c83
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c566
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c52
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c171
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c128
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c26
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.c2
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.h9
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c74
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c211
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h89
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c (renamed from drivers/gpu/drm/i915/i915_gem_fence_reg.c)170
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h (renamed from drivers/gpu/drm/i915/i915_gem_fence_reg.h)17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c69
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c (renamed from drivers/gpu/drm/i915/gt/intel_engine_pool.c)114
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h (renamed from drivers/gpu/drm/i915/gt/intel_engine_pool_types.h)15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c102
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1195
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c449
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c49
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c719
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c1331
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.h17
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c173
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.h23
-rw-r--r--drivers/gpu/drm/i915/gt/st_shmem_utils.c63
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c94
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c46
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c97
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c124
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c53
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c35
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c30
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c49
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c257
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_active.c137
-rw-r--r--drivers/gpu/drm/i915/i915_active.h14
-rw-r--r--drivers/gpu/drm/i915/i915_config.c15
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c356
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c156
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h47
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c33
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c235
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c41
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c596
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h46
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c41
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h193
-rw-r--r--drivers/gpu/drm/i915/i915_request.c156
-rw-r--r--drivers/gpu/drm/i915/i915_request.h30
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c39
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h3
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h3
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c12
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c5
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.h23
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c121
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h4
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c76
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h5
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c366
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h6
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c60
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h6
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c12
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h22
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c47
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.c90
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.c88
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.c101
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.c88
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.c118
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.c98
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c88
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.c121
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c26
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c104
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c623
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c29
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.h13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c38
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c8
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c8
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c8
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c8
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c49
-rw-r--r--drivers/gpu/drm/lima/Kconfig2
-rw-r--r--drivers/gpu/drm/lima/Makefile4
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.c25
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.h2
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c3
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.h5
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c257
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.h44
-rw-r--r--drivers/gpu/drm/lima/lima_device.c228
-rw-r--r--drivers/gpu/drm/lima/lima_device.h17
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.c17
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.h2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c141
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h1
-rw-r--r--drivers/gpu/drm/lima/lima_dump.h77
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c21
-rw-r--r--drivers/gpu/drm/lima/lima_gp.h2
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.c38
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.h2
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c49
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h2
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.c77
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.h2
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c31
-rw-r--r--drivers/gpu/drm/lima/lima_pp.h4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c193
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h11
-rw-r--r--drivers/gpu/drm/lima/lima_trace.c7
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h50
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h3
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c10
-rw-r--r--drivers/gpu/drm/mcde/mcde_drm.h2
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c52
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c9
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c57
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c259
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c58
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c22
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c22
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c54
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c28
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c38
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h6
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c16
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h15
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c119
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c127
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c18
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c14
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c23
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c19
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core827d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core907d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core917d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c137
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c81
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c48
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c450
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c153
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.h5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c (renamed from drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c33
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c29
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/panel/Kconfig29
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c367
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c31
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c4
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c691
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c46
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c247
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c2
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c303
-rw-r--r--drivers/gpu/drm/pl111/Makefile1
-rw-r--r--drivers/gpu/drm/pl111/pl111_debugfs.c8
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h2
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c13
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c148
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c138
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.h29
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c28
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c32
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c25
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h20
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c15
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c8
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c3
-rw-r--r--drivers/gpu/drm/radeon/Makefile35
-rw-r--r--drivers/gpu/drm/radeon/atom.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c18
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c14
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c9
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c13
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c6
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c8
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c8
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c43
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c137
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h17
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c83
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c14
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c6
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c6
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c16
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c13
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c7
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c13
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c13
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c12
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c10
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c13
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c8
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h2
-rw-r--r--drivers/gpu/drm/stm/drv.c10
-rw-r--r--drivers/gpu/drm/stm/ltdc.c102
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c111
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h10
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c40
-rw-r--r--drivers/gpu/drm/tegra/dc.c11
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c11
-rw-r--r--drivers/gpu/drm/tegra/drm.h4
-rw-r--r--drivers/gpu/drm/tegra/dsi.c21
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c21
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/rgb.c8
-rw-r--r--drivers/gpu/drm/tegra/sor.c20
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c16
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c11
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h6
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c25
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c21
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.h1
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c8
-rw-r--r--drivers/gpu/drm/tiny/Kconfig19
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c (renamed from drivers/gpu/drm/cirrus/cirrus.c)82
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c242
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c16
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c16
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c16
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c16
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c16
-rw-r--r--drivers/gpu/drm/tiny/repaper.c28
-rw-r--r--drivers/gpu/drm/tiny/st7586.c16
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c56
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c45
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c10
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c31
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c20
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c53
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h9
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c17
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c16
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c10
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c32
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c29
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c8
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c21
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h5
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c11
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c4
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c8
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c8
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c8
-rw-r--r--drivers/gpu/host1x/dev.c59
-rw-r--r--drivers/greybus/Kconfig6
-rw-r--r--drivers/greybus/arpc.h2
-rw-r--r--drivers/hid/Kconfig11
-rw-r--r--drivers/hid/hid-apple.c30
-rw-r--r--drivers/hid/hid-asus.c122
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-mcp2221.c169
-rw-r--r--drivers/hid/hid-multitouch.c60
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-sony.c17
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c2
-rw-r--r--drivers/hv/channel.c58
-rw-r--r--drivers/hv/channel_mgmt.c439
-rw-r--r--drivers/hv/connection.c58
-rw-r--r--drivers/hv/hv.c16
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hv_trace.h25
-rw-r--r--drivers/hv/hyperv_vmbus.h81
-rw-r--r--drivers/hv/vmbus_drv.c314
-rw-r--r--drivers/hwmon/Kconfig59
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adt7411.c3
-rw-r--r--drivers/hwmon/amd_energy.c408
-rw-r--r--drivers/hwmon/applesmc.c12
-rw-r--r--drivers/hwmon/bt1-pvt.c1146
-rw-r--r--drivers/hwmon/bt1-pvt.h244
-rw-r--r--drivers/hwmon/da9052-hwmon.c4
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c26
-rw-r--r--drivers/hwmon/drivetemp.c2
-rw-r--r--drivers/hwmon/gsc-hwmon.c390
-rw-r--r--drivers/hwmon/hwmon.c136
-rw-r--r--drivers/hwmon/ina2xx.c183
-rw-r--r--drivers/hwmon/lm70.c47
-rw-r--r--drivers/hwmon/lm75.c8
-rw-r--r--drivers/hwmon/lm75.h31
-rw-r--r--drivers/hwmon/lm90.c45
-rw-r--r--drivers/hwmon/nct6775.c10
-rw-r--r--drivers/hwmon/nct7802.c6
-rw-r--r--drivers/hwmon/nct7904.c150
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/max16601.c314
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c8
-rw-r--r--drivers/hwtracing/coresight/Kconfig2
-rw-r--r--drivers/hwtracing/coresight/Makefile3
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c16
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.c232
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h8
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c91
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h21
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c204
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c16
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c2
-rw-r--r--drivers/hwtracing/coresight/coresight.c82
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c2
-rw-r--r--drivers/i2c/busses/i2c-altera.c10
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c20
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/i2c-core-base.c24
-rw-r--r--drivers/i2c/i2c-core-of.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c1
-rw-r--r--drivers/i3c/master.c16
-rw-r--r--drivers/ide/ide-cd.c17
-rw-r--r--drivers/ide/ide-io.c7
-rw-r--r--drivers/iio/accel/Kconfig10
-rw-r--r--drivers/iio/accel/bma180.c208
-rw-r--r--drivers/iio/accel/dmard06.c3
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c18
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c11
-rw-r--r--drivers/iio/accel/mxc4005.c4
-rw-r--r--drivers/iio/accel/sca3000.c2
-rw-r--r--drivers/iio/accel/st_accel.h2
-rw-r--r--drivers/iio/accel/st_accel_buffer.c3
-rw-r--r--drivers/iio/accel/st_accel_core.c83
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/adc/Kconfig55
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7476.c59
-rw-r--r--drivers/iio/adc/ad7780.c27
-rw-r--r--drivers/iio/adc/ad7791.c64
-rw-r--r--drivers/iio/adc/ad7793.c144
-rw-r--r--drivers/iio/adc/ad9467.c422
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c8
-rw-r--r--drivers/iio/adc/adi-axi-adc.c482
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c233
-rw-r--r--drivers/iio/adc/at91_adc.c5
-rw-r--r--drivers/iio/adc/exynos_adc.c17
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c4
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c6
-rw-r--r--drivers/iio/adc/max1241.c227
-rw-r--r--drivers/iio/adc/max1363.c32
-rw-r--r--drivers/iio/adc/mcp3422.c5
-rw-r--r--drivers/iio/adc/mp2629_adc.c208
-rw-r--r--drivers/iio/adc/stm32-adc-core.c34
-rw-r--r--drivers/iio/adc/stm32-adc.c8
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c21
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c4
-rw-r--r--drivers/iio/adc/ti-ads124s08.c7
-rw-r--r--drivers/iio/adc/ti-ads8344.c8
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc.h2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c41
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c31
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c11
-rw-r--r--drivers/iio/buffer/kfifo_buf.c22
-rw-r--r--drivers/iio/chemical/Kconfig11
-rw-r--r--drivers/iio/chemical/Makefile1
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c177
-rw-r--r--drivers/iio/chemical/atlas-sensor.c50
-rw-r--r--drivers/iio/chemical/bme680_core.c36
-rw-r--r--drivers/iio/chemical/ccs811.c112
-rw-r--r--drivers/iio/chemical/pms7003.c17
-rw-r--r--drivers/iio/chemical/sps30.c9
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c18
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h3
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c13
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c13
-rw-r--r--drivers/iio/dac/Kconfig6
-rw-r--r--drivers/iio/dac/ad5360.c17
-rw-r--r--drivers/iio/dac/ad5380.c8
-rw-r--r--drivers/iio/dac/ad5421.c21
-rw-r--r--drivers/iio/dac/ad5446.c18
-rw-r--r--drivers/iio/dac/ad5449.c12
-rw-r--r--drivers/iio/dac/ad5592r-base.c30
-rw-r--r--drivers/iio/dac/ad5592r-base.h1
-rw-r--r--drivers/iio/dac/ad5592r.c4
-rw-r--r--drivers/iio/dac/ad5593r.c2
-rw-r--r--drivers/iio/dac/ad5624r_spi.c8
-rw-r--r--drivers/iio/dac/ad5686.c10
-rw-r--r--drivers/iio/dac/ad5686.h2
-rw-r--r--drivers/iio/dac/ad5755.c22
-rw-r--r--drivers/iio/dac/ad5761.c16
-rw-r--r--drivers/iio/dac/ad5764.c12
-rw-r--r--drivers/iio/dac/ltc2632.c67
-rw-r--r--drivers/iio/dac/ti-dac7612.c4
-rw-r--r--drivers/iio/dac/vf610_dac.c12
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c31
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/adis16130.c4
-rw-r--r--drivers/iio/gyro/adis16136.c10
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c6
-rw-r--r--drivers/iio/gyro/bmg160_spi.c5
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c18
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c4
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c3
-rw-r--r--drivers/iio/gyro/st_gyro_core.c9
-rw-r--r--drivers/iio/health/afe4403.c14
-rw-r--r--drivers/iio/health/max30100.c7
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c12
-rw-r--r--drivers/iio/humidity/hts221_buffer.c6
-rw-r--r--drivers/iio/humidity/hts221_i2c.c6
-rw-r--r--drivers/iio/humidity/hts221_spi.c6
-rw-r--r--drivers/iio/imu/Kconfig13
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c27
-rw-r--r--drivers/iio/imu/adis16400.c21
-rw-r--r--drivers/iio/imu/adis16460.c27
-rw-r--r--drivers/iio/imu/adis16475.c1338
-rw-r--r--drivers/iio/imu/adis16480.c16
-rw-r--r--drivers/iio/imu/adis_buffer.c58
-rw-r--r--drivers/iio/imu/adis_trigger.c72
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c4
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c23
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c21
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c103
-rw-r--r--drivers/iio/industrialio-buffer.c93
-rw-r--r--drivers/iio/industrialio-core.c126
-rw-r--r--drivers/iio/industrialio-trigger.c53
-rw-r--r--drivers/iio/inkern.c27
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/bh1780.c6
-rw-r--r--drivers/iio/light/cm32181.c271
-rw-r--r--drivers/iio/light/cm3232.c3
-rw-r--r--drivers/iio/light/gp2ap002.c19
-rw-r--r--drivers/iio/light/gp2ap020a00f.c6
-rw-r--r--drivers/iio/light/hid-sensor-als.c18
-rw-r--r--drivers/iio/light/hid-sensor-prox.c18
-rw-r--r--drivers/iio/light/isl29125.c28
-rw-r--r--drivers/iio/light/ltr501.c41
-rw-r--r--drivers/iio/light/opt3001.c3
-rw-r--r--drivers/iio/light/si1133.c18
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c7
-rw-r--r--drivers/iio/light/st_uvis25_spi.c7
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/tsl2772.c6
-rw-r--r--drivers/iio/light/vcnl4000.c746
-rw-r--r--drivers/iio/light/vl6180.c3
-rw-r--r--drivers/iio/light/zopt2201.c4
-rw-r--r--drivers/iio/magnetometer/ak8974.c201
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c4
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c18
-rw-r--r--drivers/iio/magnetometer/mmc35240.c4
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c5
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c3
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c18
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c18
-rw-r--r--drivers/iio/pressure/bmp280-core.c100
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c18
-rw-r--r--drivers/iio/pressure/hp206c.c8
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c4
-rw-r--r--drivers/iio/pressure/ms5611_spi.c4
-rw-r--r--drivers/iio/pressure/st_pressure_core.c7
-rw-r--r--drivers/iio/pressure/zpa2326.c9
-rw-r--r--drivers/iio/proximity/Kconfig24
-rw-r--r--drivers/iio/proximity/Makefile2
-rw-r--r--drivers/iio/proximity/ping.c7
-rw-r--r--drivers/iio/proximity/sx9310.c1069
-rw-r--r--drivers/iio/proximity/vcnl3020.c258
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c12
-rw-r--r--drivers/iio/temperature/ltc2983.c4
-rw-r--r--drivers/iio/temperature/max31856.c5
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile9
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/cache.c7
-rw-r--r--drivers/infiniband/core/cm.c306
-rw-r--r--drivers/infiniband/core/cma.c114
-rw-r--r--drivers/infiniband/core/cma_configfs.c13
-rw-r--r--drivers/infiniband/core/cma_priv.h1
-rw-r--r--drivers/infiniband/core/cma_trace.h20
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/cq.c173
-rw-r--r--drivers/infiniband/core/device.c22
-rw-r--r--drivers/infiniband/core/fmr_pool.c494
-rw-r--r--drivers/infiniband/core/lag.c138
-rw-r--r--drivers/infiniband/core/mad.c255
-rw-r--r--drivers/infiniband/core/multicast.c12
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/rdma_core.c48
-rw-r--r--drivers/infiniband/core/rdma_core.h7
-rw-r--r--drivers/infiniband/core/rw.c2
-rw-r--r--drivers/infiniband/core/sa_query.c51
-rw-r--r--drivers/infiniband/core/sysfs.c10
-rw-r--r--drivers/infiniband/core/ucma.c65
-rw-r--r--drivers/infiniband/core/ud_header.c2
-rw-r--r--drivers/infiniband/core/user_mad.c22
-rw-r--r--drivers/infiniband/core/uverbs.h25
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c76
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c24
-rw-r--r--drivers/infiniband/core/uverbs_main.c52
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c95
-rw-r--r--drivers/infiniband/core/uverbs_std_types_async_fd.c30
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c17
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c12
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c401
-rw-r--r--drivers/infiniband/core/uverbs_std_types_srq.c234
-rw-r--r--drivers/infiniband/core/uverbs_std_types_wq.c194
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c3
-rw-r--r--drivers/infiniband/core/verbs.c159
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c76
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h18
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c357
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h42
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c88
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h91
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h53
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h106
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/efa/efa.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h63
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c18
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h11
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c52
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c19
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile4
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c12
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h3
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c303
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h5
-rw-r--r--drivers/infiniband/hw/hfi1/common.h13
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c231
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h38
-rw-r--r--drivers/infiniband/hw/hfi1/init.c13
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h171
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c309
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_rx.c95
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c828
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c36
-rw-r--r--drivers/infiniband/hw/hfi1/msix.h7
-rw-r--r--drivers/infiniband/hw/hfi1/netdev.h118
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c481
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c18
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c42
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h11
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c7
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c14
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h5
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c325
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c148
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c351
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h246
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c114
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c360
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1675
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c71
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c1644
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c509
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c378
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h1
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c11
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c93
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c14
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile29
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c35
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c131
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h6
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c11
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c27
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c156
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c38
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c5
-rw-r--r--drivers/infiniband/hw/mlx5/main.c216
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h75
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c13
-rw-r--r--drivers/infiniband/hw/mlx5/qos.c13
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c3779
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h46
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/qp.c)338
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c113
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c1504
-rw-r--r--drivers/infiniband/hw/mlx5/wr.h76
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c262
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c105
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h23
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/main.c1
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c155
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h15
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c24
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c9
-rw-r--r--drivers/infiniband/sw/siw/siw.h4
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c42
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c1
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c1
-rw-r--r--drivers/infiniband/ulp/Makefile1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c47
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h79
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c19
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c188
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c126
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c5
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c12
-rw-r--r--drivers/infiniband/ulp/rtrs/Kconfig27
-rw-r--r--drivers/infiniband/ulp/rtrs/Makefile15
-rw-r--r--drivers/infiniband/ulp/rtrs/README213
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c200
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c483
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c2992
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h252
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-log.h28
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h399
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c38
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c321
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2178
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h148
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c612
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h196
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c265
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h27
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c67
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h5
-rw-r--r--drivers/input/evdev.c19
-rw-r--r--drivers/input/joystick/xpad.c12
-rw-r--r--drivers/input/keyboard/applespi.c2
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c14
-rw-r--r--drivers/input/keyboard/dlink-dir685-touchkeys.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c72
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/rmi4/rmi_driver.c5
-rw-r--r--drivers/input/serio/i8042-ppcio.h57
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/i8042.h2
-rw-r--r--drivers/input/touchscreen/elants_i2c.c11
-rw-r--r--drivers/input/touchscreen/mms114.c12
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c1
-rw-r--r--drivers/interconnect/Kconfig3
-rw-r--r--drivers/interconnect/Makefile1
-rw-r--r--drivers/interconnect/core.c143
-rw-r--r--drivers/interconnect/imx/Kconfig17
-rw-r--r--drivers/interconnect/imx/Makefile9
-rw-r--r--drivers/interconnect/imx/imx.c284
-rw-r--r--drivers/interconnect/imx/imx.h61
-rw-r--r--drivers/interconnect/imx/imx8mm.c105
-rw-r--r--drivers/interconnect/imx/imx8mn.c94
-rw-r--r--drivers/interconnect/imx/imx8mq.c103
-rw-r--r--drivers/interconnect/internal.h2
-rw-r--r--drivers/iommu/amd_iommu.c3
-rw-r--r--drivers/iommu/amd_iommu_init.c9
-rw-r--r--drivers/iommu/dma-iommu.c5
-rw-r--r--drivers/iommu/iommu.c19
-rw-r--r--drivers/ipack/carriers/tpci200.c1
-rw-r--r--drivers/irqchip/Kconfig27
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c170
-rw-r--r--drivers/irqchip/irq-gic-v3.c3
-rw-r--r--drivers/irqchip/irq-gic.c1
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c214
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c255
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c243
-rw-r--r--drivers/irqchip/irq-sifive-plic.c23
-rw-r--r--drivers/leds/Kconfig29
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/leds-ariel.c133
-rw-r--r--drivers/leds/leds-aw2013.c436
-rw-r--r--drivers/leds/leds-lm355x.c1
-rw-r--r--drivers/leds/leds-lp3952.c2
-rw-r--r--drivers/leds/leds-lt3593.c1
-rw-r--r--drivers/leds/leds-netxbig.c148
-rw-r--r--drivers/leds/leds-pca963x.c2
-rw-r--r--drivers/leds/leds-pwm.c16
-rw-r--r--drivers/leds/leds-sgm3140.c320
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds-tlc591xx.c5
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c4
-rw-r--r--drivers/lightnvm/pblk-cache.c8
-rw-r--r--drivers/lightnvm/pblk-init.c5
-rw-r--r--drivers/lightnvm/pblk-read.c11
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/ams/ams-input.c37
-rw-r--r--drivers/macintosh/ams/ams.h4
-rw-r--r--drivers/macintosh/mac_hid.c3
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/macintosh/windfarm_pm112.c21
-rw-r--r--drivers/md/Kconfig20
-rw-r--r--drivers/md/Makefile3
-rw-r--r--drivers/md/bcache/Kconfig9
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c6
-rw-r--r--drivers/md/bcache/btree.c16
-rw-r--r--drivers/md/bcache/extents.c12
-rw-r--r--drivers/md/bcache/io.c8
-rw-r--r--drivers/md/bcache/journal.c34
-rw-r--r--drivers/md/bcache/request.c25
-rw-r--r--drivers/md/bcache/super.c232
-rw-r--r--drivers/md/bcache/sysfs.c8
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/dm-bufio.c113
-rw-r--r--drivers/md/dm-crypt.c80
-rw-r--r--drivers/md/dm-ebs-target.c471
-rw-r--r--drivers/md/dm-historical-service-time.c561
-rw-r--r--drivers/md/dm-integrity.c8
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c123
-rw-r--r--drivers/md/dm-path-selector.h2
-rw-r--r--drivers/md/dm-queue-length.c2
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-service-time.c2
-rw-r--r--drivers/md/dm-stats.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-table.c17
-rw-r--r--drivers/md/dm-writecache.c42
-rw-r--r--drivers/md/dm-zoned-metadata.c1046
-rw-r--r--drivers/md/dm-zoned-reclaim.c210
-rw-r--r--drivers/md/dm-zoned-target.c463
-rw-r--r--drivers/md/dm-zoned.h113
-rw-r--r--drivers/md/dm.c35
-rw-r--r--drivers/md/md-bitmap.c12
-rw-r--r--drivers/md/md-linear.h2
-rw-r--r--drivers/md/md.c71
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h4
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c6
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c22
-rw-r--r--drivers/media/Kconfig242
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/cec/Kconfig25
-rw-r--r--drivers/media/cec/Makefile16
-rw-r--r--drivers/media/cec/core/Makefile16
-rw-r--r--drivers/media/cec/core/cec-adap.c (renamed from drivers/media/cec/cec-adap.c)8
-rw-r--r--drivers/media/cec/core/cec-api.c (renamed from drivers/media/cec/cec-api.c)0
-rw-r--r--drivers/media/cec/core/cec-core.c (renamed from drivers/media/cec/cec-core.c)0
-rw-r--r--drivers/media/cec/core/cec-notifier.c (renamed from drivers/media/cec/cec-notifier.c)2
-rw-r--r--drivers/media/cec/core/cec-pin-error-inj.c (renamed from drivers/media/cec/cec-pin-error-inj.c)0
-rw-r--r--drivers/media/cec/core/cec-pin-priv.h (renamed from drivers/media/cec/cec-pin-priv.h)0
-rw-r--r--drivers/media/cec/core/cec-pin.c (renamed from drivers/media/cec/cec-pin.c)0
-rw-r--r--drivers/media/cec/core/cec-priv.h (renamed from drivers/media/cec/cec-priv.h)0
-rw-r--r--drivers/media/cec/platform/Kconfig120
-rw-r--r--drivers/media/cec/platform/Makefile14
-rw-r--r--drivers/media/cec/platform/cec-gpio/Makefile (renamed from drivers/media/platform/cec-gpio/Makefile)0
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c (renamed from drivers/media/platform/cec-gpio/cec-gpio.c)18
-rw-r--r--drivers/media/cec/platform/cros-ec/Makefile2
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c (renamed from drivers/media/platform/cros-ec-cec/cros-ec-cec.c)0
-rw-r--r--drivers/media/cec/platform/meson/Makefile3
-rw-r--r--drivers/media/cec/platform/meson/ao-cec-g12a.c (renamed from drivers/media/platform/meson/ao-cec-g12a.c)0
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c (renamed from drivers/media/platform/meson/ao-cec.c)0
-rw-r--r--drivers/media/cec/platform/s5p/Makefile (renamed from drivers/media/platform/s5p-cec/Makefile)2
-rw-r--r--drivers/media/cec/platform/s5p/exynos_hdmi_cec.h (renamed from drivers/media/platform/s5p-cec/exynos_hdmi_cec.h)0
-rw-r--r--drivers/media/cec/platform/s5p/exynos_hdmi_cecctrl.c (renamed from drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c)0
-rw-r--r--drivers/media/cec/platform/s5p/regs-cec.h (renamed from drivers/media/platform/s5p-cec/regs-cec.h)0
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c (renamed from drivers/media/platform/s5p-cec/s5p_cec.c)0
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.h (renamed from drivers/media/platform/s5p-cec/s5p_cec.h)0
-rw-r--r--drivers/media/cec/platform/seco/Makefile2
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.c (renamed from drivers/media/platform/seco-cec/seco-cec.c)2
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.h (renamed from drivers/media/platform/seco-cec/seco-cec.h)0
-rw-r--r--drivers/media/cec/platform/sti/Makefile2
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c (renamed from drivers/media/platform/sti/cec/stih-cec.c)0
-rw-r--r--drivers/media/cec/platform/stm32/Makefile2
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c (renamed from drivers/media/platform/stm32/stm32-cec.c)0
-rw-r--r--drivers/media/cec/platform/tegra/Makefile2
-rw-r--r--drivers/media/cec/platform/tegra/tegra_cec.c (renamed from drivers/media/platform/tegra-cec/tegra_cec.c)0
-rw-r--r--drivers/media/cec/platform/tegra/tegra_cec.h (renamed from drivers/media/platform/tegra-cec/tegra_cec.h)0
-rw-r--r--drivers/media/cec/usb/Kconfig8
-rw-r--r--drivers/media/cec/usb/Makefile6
-rw-r--r--drivers/media/cec/usb/pulse8/Kconfig (renamed from drivers/media/usb/pulse8-cec/Kconfig)3
-rw-r--r--drivers/media/cec/usb/pulse8/Makefile (renamed from drivers/media/usb/pulse8-cec/Makefile)0
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c (renamed from drivers/media/usb/pulse8-cec/pulse8-cec.c)6
-rw-r--r--drivers/media/cec/usb/rainshadow/Kconfig (renamed from drivers/media/usb/rainshadow-cec/Kconfig)3
-rw-r--r--drivers/media/cec/usb/rainshadow/Makefile (renamed from drivers/media/usb/rainshadow-cec/Makefile)0
-rw-r--r--drivers/media/cec/usb/rainshadow/rainshadow-cec.c (renamed from drivers/media/usb/rainshadow-cec/rainshadow-cec.c)0
-rw-r--r--drivers/media/common/Kconfig2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c3
-rw-r--r--drivers/media/dvb-core/Kconfig27
-rw-r--r--drivers/media/dvb-core/dvbdev.c5
-rw-r--r--drivers/media/dvb-frontends/Kconfig16
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_top.c2
-rw-r--r--drivers/media/dvb-frontends/dib3000.h2
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c2
-rw-r--r--drivers/media/dvb-frontends/eds1547.h2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c14
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c4
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c2
-rw-r--r--drivers/media/dvb-frontends/z0194a.h2
-rw-r--r--drivers/media/firewire/Kconfig5
-rw-r--r--drivers/media/i2c/Kconfig457
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c40
-rw-r--r--drivers/media/i2c/et8ek8/Kconfig4
-rw-r--r--drivers/media/i2c/imx214.c4
-rw-r--r--drivers/media/i2c/imx219.c110
-rw-r--r--drivers/media/i2c/m5mols/Kconfig5
-rw-r--r--drivers/media/i2c/max2175.c6
-rw-r--r--drivers/media/i2c/ov13858.c13
-rw-r--r--drivers/media/i2c/ov2740.c1016
-rw-r--r--drivers/media/i2c/ov5640.c4
-rw-r--r--drivers/media/i2c/ov5670.c14
-rw-r--r--drivers/media/i2c/ov8856.c191
-rw-r--r--drivers/media/i2c/s5k5baf.c2
-rw-r--r--drivers/media/i2c/smiapp/Kconfig5
-rw-r--r--drivers/media/mc/Kconfig19
-rw-r--r--drivers/media/mc/mc-entity.c2
-rw-r--r--drivers/media/mmc/Kconfig1
-rw-r--r--drivers/media/mmc/siano/Kconfig2
-rw-r--r--drivers/media/mmc/siano/smssdio.c10
-rw-r--r--drivers/media/pci/Kconfig14
-rw-r--r--drivers/media/pci/bt8xx/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/Kconfig4
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c22
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c51
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c31
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c99
-rw-r--r--drivers/media/pci/cx88/cx88-core.c3
-rw-r--r--drivers/media/pci/cx88/cx88-input.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/ddbridge/Kconfig1
-rw-r--r--drivers/media/pci/ddbridge/Makefile2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c4
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-dummy-fe.c153
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-dummy-fe.h16
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig4
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c26
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c19
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c17
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c4
-rw-r--r--drivers/media/pci/mantis/mantis_dvb.c2
-rw-r--r--drivers/media/pci/meye/Kconfig2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/pci/sta2x11/Kconfig6
-rw-r--r--drivers/media/platform/Kconfig191
-rw-r--r--drivers/media/platform/Makefile19
-rw-r--r--drivers/media/platform/am437x/Kconfig4
-rw-r--r--drivers/media/platform/atmel/Kconfig4
-rw-r--r--drivers/media/platform/cadence/Kconfig8
-rw-r--r--drivers/media/platform/coda/coda-bit.c9
-rw-r--r--drivers/media/platform/coda/coda-common.c199
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c577
-rw-r--r--drivers/media/platform/coda/coda.h12
-rw-r--r--drivers/media/platform/cros-ec-cec/Makefile2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig5
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c2
-rw-r--r--drivers/media/platform/meson/Makefile3
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.c8
-rw-r--r--drivers/media/platform/pxa_camera.c4
-rw-r--r--drivers/media/platform/qcom/venus/core.c21
-rw-r--r--drivers/media/platform/qcom/venus/core.h7
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c18
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c10
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h3
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.h10
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c200
-rw-r--r--drivers/media/platform/qcom/venus/venc.c10
-rw-r--r--drivers/media/platform/rcar-fcp.c5
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig8
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c40
-rw-r--r--drivers/media/platform/seco-cec/Makefile2
-rw-r--r--drivers/media/platform/sh_veu.c1203
-rw-r--r--drivers/media/platform/sti/cec/Makefile2
-rw-r--r--drivers/media/platform/stm32/Makefile1
-rw-r--r--drivers/media/platform/sunxi/Kconfig2
-rw-r--r--drivers/media/platform/sunxi/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/Kconfig6
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/Kconfig4
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c6
-rw-r--r--drivers/media/platform/tegra-cec/Makefile2
-rw-r--r--drivers/media/platform/ti-vpe/cal.c439
-rw-r--r--drivers/media/platform/ti-vpe/cal_regs.h21
-rw-r--r--drivers/media/platform/video-mux.c87
-rw-r--r--drivers/media/platform/xilinx/Kconfig4
-rw-r--r--drivers/media/radio/Kconfig12
-rw-r--r--drivers/media/radio/si470x/Kconfig2
-rw-r--r--drivers/media/radio/wl128x/Kconfig5
-rw-r--r--drivers/media/rc/bpf-lirc.c4
-rw-r--r--drivers/media/rc/gpio-ir-tx.c51
-rw-r--r--drivers/media/rc/iguanair.c36
-rw-r--r--drivers/media/rc/ir-rx51.c2
-rw-r--r--drivers/media/rc/rc-core-priv.h22
-rw-r--r--drivers/media/spi/Kconfig8
-rw-r--r--drivers/media/test-drivers/Kconfig26
-rw-r--r--drivers/media/test-drivers/Makefile9
-rw-r--r--drivers/media/test-drivers/vicodec/Kconfig (renamed from drivers/media/platform/vicodec/Kconfig)2
-rw-r--r--drivers/media/test-drivers/vicodec/Makefile (renamed from drivers/media/platform/vicodec/Makefile)0
-rw-r--r--drivers/media/test-drivers/vicodec/codec-fwht.c (renamed from drivers/media/platform/vicodec/codec-fwht.c)0
-rw-r--r--drivers/media/test-drivers/vicodec/codec-fwht.h (renamed from drivers/media/platform/vicodec/codec-fwht.h)0
-rw-r--r--drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c (renamed from drivers/media/platform/vicodec/codec-v4l2-fwht.c)0
-rw-r--r--drivers/media/test-drivers/vicodec/codec-v4l2-fwht.h (renamed from drivers/media/platform/vicodec/codec-v4l2-fwht.h)0
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c (renamed from drivers/media/platform/vicodec/vicodec-core.c)15
-rw-r--r--drivers/media/test-drivers/vim2m.c (renamed from drivers/media/platform/vim2m.c)8
-rw-r--r--drivers/media/test-drivers/vimc/Kconfig (renamed from drivers/media/platform/vimc/Kconfig)4
-rw-r--r--drivers/media/test-drivers/vimc/Makefile (renamed from drivers/media/platform/vimc/Makefile)0
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c (renamed from drivers/media/platform/vimc/vimc-capture.c)37
-rw-r--r--drivers/media/test-drivers/vimc/vimc-common.c (renamed from drivers/media/platform/vimc/vimc-common.c)83
-rw-r--r--drivers/media/test-drivers/vimc/vimc-common.h (renamed from drivers/media/platform/vimc/vimc-common.h)88
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c (renamed from drivers/media/platform/vimc/vimc-core.c)90
-rw-r--r--drivers/media/test-drivers/vimc/vimc-debayer.c (renamed from drivers/media/platform/vimc/vimc-debayer.c)88
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c (renamed from drivers/media/platform/vimc/vimc-scaler.c)27
-rw-r--r--drivers/media/test-drivers/vimc/vimc-sensor.c (renamed from drivers/media/platform/vimc/vimc-sensor.c)23
-rw-r--r--drivers/media/test-drivers/vimc/vimc-streamer.c (renamed from drivers/media/platform/vimc/vimc-streamer.c)0
-rw-r--r--drivers/media/test-drivers/vimc/vimc-streamer.h (renamed from drivers/media/platform/vimc/vimc-streamer.h)7
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig (renamed from drivers/media/platform/vivid/Kconfig)2
-rw-r--r--drivers/media/test-drivers/vivid/Makefile (renamed from drivers/media/platform/vivid/Makefile)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c (renamed from drivers/media/platform/vivid/vivid-cec.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.h (renamed from drivers/media/platform/vivid/vivid-cec.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c (renamed from drivers/media/platform/vivid/vivid-core.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h (renamed from drivers/media/platform/vivid/vivid-core.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c (renamed from drivers/media/platform/vivid/vivid-ctrls.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.h (renamed from drivers/media/platform/vivid/vivid-ctrls.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c (renamed from drivers/media/platform/vivid/vivid-kthread-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.h (renamed from drivers/media/platform/vivid/vivid-kthread-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.c (renamed from drivers/media/platform/vivid/vivid-kthread-out.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.h (renamed from drivers/media/platform/vivid/vivid-kthread-out.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.c (renamed from drivers/media/platform/vivid/vivid-kthread-touch.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.h (renamed from drivers/media/platform/vivid/vivid-kthread-touch.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-cap.c (renamed from drivers/media/platform/vivid/vivid-meta-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-cap.h (renamed from drivers/media/platform/vivid/vivid-meta-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.c (renamed from drivers/media/platform/vivid/vivid-meta-out.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.h (renamed from drivers/media/platform/vivid/vivid-meta-out.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.c (renamed from drivers/media/platform/vivid/vivid-osd.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.h (renamed from drivers/media/platform/vivid/vivid-osd.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-common.c (renamed from drivers/media/platform/vivid/vivid-radio-common.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-common.h (renamed from drivers/media/platform/vivid/vivid-radio-common.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c (renamed from drivers/media/platform/vivid/vivid-radio-rx.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.h (renamed from drivers/media/platform/vivid/vivid-radio-rx.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.c (renamed from drivers/media/platform/vivid/vivid-radio-tx.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.h (renamed from drivers/media/platform/vivid/vivid-radio-tx.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-rds-gen.c (renamed from drivers/media/platform/vivid/vivid-rds-gen.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-rds-gen.h (renamed from drivers/media/platform/vivid/vivid-rds-gen.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c (renamed from drivers/media/platform/vivid/vivid-sdr-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.h (renamed from drivers/media/platform/vivid/vivid-sdr-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c (renamed from drivers/media/platform/vivid/vivid-touch-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.h (renamed from drivers/media/platform/vivid/vivid-touch-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c (renamed from drivers/media/platform/vivid/vivid-vbi-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.h (renamed from drivers/media/platform/vivid/vivid-vbi-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-gen.c (renamed from drivers/media/platform/vivid/vivid-vbi-gen.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-gen.h (renamed from drivers/media/platform/vivid/vivid-vbi-gen.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c (renamed from drivers/media/platform/vivid/vivid-vbi-out.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.h (renamed from drivers/media/platform/vivid/vivid-vbi-out.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c (renamed from drivers/media/platform/vivid/vivid-vid-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.h (renamed from drivers/media/platform/vivid/vivid-vid-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c (renamed from drivers/media/platform/vivid/vivid-vid-common.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.h (renamed from drivers/media/platform/vivid/vivid-vid-common.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c (renamed from drivers/media/platform/vivid/vivid-vid-out.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.h (renamed from drivers/media/platform/vivid/vivid-vid-out.h)0
-rw-r--r--drivers/media/tuners/Kconfig6
-rw-r--r--drivers/media/tuners/si2157.c419
-rw-r--r--drivers/media/tuners/si2157_priv.h2
-rw-r--r--drivers/media/usb/Kconfig12
-rw-r--r--drivers/media/usb/Makefile2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c35
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c85
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig10
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c25
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h2
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig3
-rw-r--r--drivers/media/usb/dvb-usb/a800.c8
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005-remote.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c6
-rw-r--r--drivers/media/usb/dvb-usb/af9005.h2
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c6
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c6
-rw-r--r--drivers/media/usb/dvb-usb/dib0700.h31
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c6
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mb.c4
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc-common.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb.h2
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c31
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.h2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-common.h3
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c3
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c10
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h10
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c31
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c2
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.h2
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c6
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c2
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c9
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.h2
-rw-r--r--drivers/media/usb/dvb-usb/umt-010.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.h2
-rw-r--r--drivers/media/usb/gspca/Kconfig2
-rw-r--r--drivers/media/usb/gspca/mr97310a.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c4
-rw-r--r--drivers/media/usb/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c54
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c9
-rw-r--r--drivers/media/usb/zr364xx/Kconfig2
-rw-r--r--drivers/media/v4l2-core/Kconfig27
-rw-r--r--drivers/media/v4l2-core/Makefile3
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c92
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c25
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c106
-rw-r--r--drivers/media/v4l2-core/v4l2-h264.c270
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c72
-rw-r--r--drivers/media/v4l2-core/v4l2-jpeg.c632
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c95
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c91
-rw-r--r--drivers/memory/Kconfig11
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/bt1-l2-ctl.c322
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c8
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/mfd/Kconfig61
-rw-r--r--drivers/mfd/Makefile7
-rw-r--r--drivers/mfd/gateworks-gsc.c277
-rw-r--r--drivers/mfd/htc-i2cpld.c6
-rw-r--r--drivers/mfd/intel-lpss-pci.c2
-rw-r--r--drivers/mfd/intel_pmc_bxt.c468
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c34
-rw-r--r--drivers/mfd/intel_soc_pmic_mrfld.c10
-rw-r--r--drivers/mfd/max77620.c1
-rw-r--r--drivers/mfd/mp2629.c79
-rw-r--r--drivers/mfd/mt6358-irq.c235
-rw-r--r--drivers/mfd/mt6360-core.c424
-rw-r--r--drivers/mfd/mt6397-core.c101
-rw-r--r--drivers/mfd/mt6397-irq.c35
-rw-r--r--drivers/mfd/sm501.c24
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c1
-rw-r--r--drivers/mfd/stm32-timers.c32
-rw-r--r--drivers/mfd/stmfx.c22
-rw-r--r--drivers/mfd/stpmic1.c2
-rw-r--r--drivers/mfd/tqmx86.c2
-rw-r--r--drivers/mfd/vexpress-sysreg.c99
-rw-r--r--drivers/mfd/wcd934x.c1
-rw-r--r--drivers/mfd/wm8994-core.c8
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/rts5249.c29
-rw-r--r--drivers/misc/cardreader/rts5260.c26
-rw-r--r--drivers/misc/cardreader/rts5261.c47
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c46
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h1
-rw-r--r--drivers/misc/cxl/Kconfig8
-rw-r--r--drivers/misc/fastrpc.c13
-rw-r--r--drivers/misc/genwqe/card_utils.c42
-rw-r--r--drivers/misc/habanalabs/Makefile3
-rw-r--r--drivers/misc/habanalabs/command_buffer.c28
-rw-r--r--drivers/misc/habanalabs/command_submission.c385
-rw-r--r--drivers/misc/habanalabs/context.c8
-rw-r--r--drivers/misc/habanalabs/debugfs.c116
-rw-r--r--drivers/misc/habanalabs/device.c53
-rw-r--r--drivers/misc/habanalabs/firmware_if.c297
-rw-r--r--drivers/misc/habanalabs/gaudi/Makefile5
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c6748
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h261
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c884
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c121
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c9090
-rw-r--r--drivers/misc/habanalabs/goya/goya.c345
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h12
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c100
-rw-r--r--drivers/misc/habanalabs/habanalabs.h187
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c14
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c118
-rw-r--r--drivers/misc/habanalabs/hwmon.c75
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h43
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h174
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h348
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h800
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h4974
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h299
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h800
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h72
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h502
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h1062
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h56
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h82
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h2578
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h800
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi.h59
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h310
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h694
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h367
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h36
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h458
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h212
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h27
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h3
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h56
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_reg_map.h43
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h58
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h2
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h16
-rw-r--r--drivers/misc/habanalabs/memory.c37
-rw-r--r--drivers/misc/habanalabs/pci.c63
-rw-r--r--drivers/misc/habanalabs/sysfs.c17
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/pci-txe.c2
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c2
-rw-r--r--drivers/misc/mic/scif/scif_rma.c26
-rw-r--r--drivers/misc/ocxl/context.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c10
-rw-r--r--drivers/misc/sgi-xp/xpnet.c8
-rw-r--r--drivers/misc/vexpress-syscfg.c280
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c2
-rw-r--r--drivers/misc/xilinx_sdfec.c61
-rw-r--r--drivers/mmc/core/block.c5
-rw-r--r--drivers/mmc/core/bus.c14
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/debugfs.c6
-rw-r--r--drivers/mmc/core/mmc.c6
-rw-r--r--drivers/mmc/core/queue.c16
-rw-r--r--drivers/mmc/core/quirks.h2
-rw-r--r--drivers/mmc/core/regulator.c17
-rw-r--r--drivers/mmc/core/sd.c30
-rw-r--r--drivers/mmc/core/sdio.c136
-rw-r--r--drivers/mmc/host/Kconfig29
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/alcor.c6
-rw-r--r--drivers/mmc/host/android-goldfish.c10
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/au1xmmc.c20
-rw-r--r--drivers/mmc/host/bcm2835.c3
-rw-r--r--drivers/mmc/host/cavium.c3
-rw-r--r--drivers/mmc/host/cb710-mmc.c8
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c9
-rw-r--r--drivers/mmc/host/jz4740_mmc.c13
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c5
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c158
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c914
-rw-r--r--drivers/mmc/host/meson-mx-sdhc.h141
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c5
-rw-r--r--drivers/mmc/host/mmc_hsq.c29
-rw-r--r--drivers/mmc/host/mmc_hsq.h1
-rw-r--r--drivers/mmc/host/mmc_spi.c20
-rw-r--r--drivers/mmc/host/mmci.c30
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c15
-rw-r--r--drivers/mmc/host/mtk-sd.c21
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/owl-mmc.c8
-rw-r--r--drivers/mmc/host/renesas_sdhi.h5
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c146
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c7
-rw-r--r--drivers/mmc/host/sdhci-acpi.c10
-rw-r--r--drivers/mmc/host/sdhci-cadence.c10
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c45
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c521
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-msm.c162
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c632
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c12
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c74
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c34
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c9
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c129
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c8
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-sprd.c28
-rw-r--r--drivers/mmc/host/sdhci-tegra.c57
-rw-r--r--drivers/mmc/host/sdhci.c293
-rw-r--r--drivers/mmc/host/sdhci.h38
-rw-r--r--drivers/mmc/host/sdricoh_cs.c105
-rw-r--r--drivers/mmc/host/sunxi-mmc.c10
-rw-r--r--drivers/mmc/host/tifm_sd.c9
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c10
-rw-r--r--drivers/mmc/host/uniphier-sd.c12
-rw-r--r--drivers/mmc/host/usdhi6rol0.c9
-rw-r--r--drivers/mmc/host/via-sdmmc.c7
-rw-r--r--drivers/mmc/host/wbsd.c26
-rw-r--r--drivers/mtd/Kconfig10
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/mtdcore.c5
-rw-r--r--drivers/mtd/mtdpstore.c578
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c3
-rw-r--r--drivers/mtd/nand/spi/core.c4
-rw-r--r--drivers/mtd/ubi/debug.c12
-rw-r--r--drivers/mtd/ubi/io.c4
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/appletalk/Kconfig8
-rw-r--r--drivers/net/arcnet/Kconfig6
-rw-r--r--drivers/net/bareudp.c18
-rw-r--r--drivers/net/bonding/bond_alb.c46
-rw-r--r--drivers/net/bonding/bond_main.c300
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/bonding/bonding_priv.h2
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c5
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c82
-rw-r--r--drivers/net/dsa/b53/b53_priv.h8
-rw-r--r--drivers/net/dsa/b53/b53_srab.c4
-rw-r--r--drivers/net/dsa/dsa_loop.c1
-rw-r--r--drivers/net/dsa/mt7530.c33
-rw-r--r--drivers/net/dsa/mt7530.h7
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c55
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h7
-rw-r--r--drivers/net/dsa/ocelot/felix.c116
-rw-r--r--drivers/net/dsa/ocelot/felix.h12
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c214
-rw-r--r--drivers/net/dsa/sja1105/Kconfig9
-rw-r--r--drivers/net/dsa/sja1105/Makefile4
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h103
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c58
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c208
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c144
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c215
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c1235
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h13
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c274
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h118
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c127
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h36
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c782
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.h74
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c10
-rw-r--r--drivers/net/ethernet/3com/3c509.c1
-rw-r--r--drivers/net/ethernet/3com/3c515.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c345
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h19
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c124
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h80
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c26
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c85
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c55
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h17
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h2
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/7990.h2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h40
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c79
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c335
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c30
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c348
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h44
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c83
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h101
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c60
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c841
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h127
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c234
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h102
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h391
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c131
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h606
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c320
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c6
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c43
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c116
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c270
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h216
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c790
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c96
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h12
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c166
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c96
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c204
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dnet.c3
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c3
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c150
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c520
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h86
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c28
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h59
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c177
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h97
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c34
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h86
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h159
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c1103
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c127
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c13
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c166
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h53
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c195
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1710
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c388
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h87
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c538
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c205
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c98
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c78
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c53
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c1210
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h154
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c33
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c158
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c207
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h159
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c1294
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h109
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c17
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c22
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c117
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c166
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c381
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h3
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h76
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c663
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c146
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c105
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c134
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c1697
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c840
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h166
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c919
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c355
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c397
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c605
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c733
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c415
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c1221
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c380
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h13
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h449
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h51
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c186
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.h30
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c113
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c783
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c1043
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h44
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c157
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c79
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c309
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c34
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c30
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c8
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig9
-rw-r--r--drivers/net/ethernet/mediatek/Makefile3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c1651
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c646
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c332
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c301
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c947
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c687
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c235
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c279
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c322
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c600
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c228
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c626
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h185
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c220
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c305
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c378
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c621
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c1324
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h7
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/micrel/Makefile2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h151
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c (renamed from drivers/net/ethernet/micrel/ks8851.c)698
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1393
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c357
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c485
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c5
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c17
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c81
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c5
-rw-r--r--drivers/net/ethernet/mscc/Makefile2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c237
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c113
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c30
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c29
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c324
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.h41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c12
-rw-r--r--drivers/net/ethernet/neterion/Kconfig4
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c125
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c16
-rw-r--r--drivers/net/ethernet/ni/nixge.c3
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h1089
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c177
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c25
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c136
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c253
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h17
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c232
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c25
-rw-r--r--drivers/net/ethernet/realtek/8139too.c26
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1020
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c10
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c3
-rw-r--r--drivers/net/ethernet/seeq/ether3.c5
-rw-r--r--drivers/net/ethernet/sfc/ef10.c217
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c27
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c25
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h12
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c82
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h10
-rw-r--r--drivers/net/ethernet/sfc/nic.h11
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/rx.c3
-rw-r--r--drivers/net/ethernet/sfc/siena.c8
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c8
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c9
-rw-r--r--drivers/net/ethernet/socionext/netsec.c32
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c315
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c160
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/sun/cassini.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c12
-rw-r--r--drivers/net/ethernet/ti/Kconfig41
-rw-r--r--drivers/net/ethernet/ti/Makefile5
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c36
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c208
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h13
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c626
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h29
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c1086
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h74
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c26
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c23
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c422
-rw-r--r--drivers/net/ethernet/ti/cpts.h27
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c7
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c8
-rw-r--r--drivers/net/fddi/Kconfig2
-rw-r--r--drivers/net/hamradio/Kconfig14
-rw-r--r--drivers/net/hamradio/bpqether.c23
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ipa/gsi.c128
-rw-r--r--drivers/net/ipa/gsi.h15
-rw-r--r--drivers/net/ipa/gsi_trans.c5
-rw-r--r--drivers/net/ipa/ipa.h10
-rw-r--r--drivers/net/ipa/ipa_clock.c4
-rw-r--r--drivers/net/ipa/ipa_cmd.c73
-rw-r--r--drivers/net/ipa/ipa_cmd.h11
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c14
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c15
-rw-r--r--drivers/net/ipa/ipa_data.h29
-rw-r--r--drivers/net/ipa/ipa_endpoint.c176
-rw-r--r--drivers/net/ipa/ipa_endpoint.h3
-rw-r--r--drivers/net/ipa/ipa_main.c8
-rw-r--r--drivers/net/ipa/ipa_mem.c210
-rw-r--r--drivers/net/ipa/ipa_mem.h3
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/macvlan.c14
-rw-r--r--drivers/net/net_failover.c3
-rw-r--r--drivers/net/netdevsim/dev.c13
-rw-r--r--drivers/net/phy/Kconfig21
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/at803x.c310
-rw-r--r--drivers/net/phy/bcm-phy-lib.c337
-rw-r--r--drivers/net/phy/bcm-phy-lib.h19
-rw-r--r--drivers/net/phy/bcm54140.c860
-rw-r--r--drivers/net/phy/bcm87xx.c2
-rw-r--r--drivers/net/phy/broadcom.c70
-rw-r--r--drivers/net/phy/cortina.c4
-rw-r--r--drivers/net/phy/dp83867.c4
-rw-r--r--drivers/net/phy/dp83869.c36
-rw-r--r--drivers/net/phy/marvell.c484
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c4
-rw-r--r--drivers/net/phy/mdio-ipq4019.c160
-rw-r--r--drivers/net/phy/mdio-moxart.c1
-rw-r--r--drivers/net/phy/mdio-mscc-miim.c33
-rw-r--r--drivers/net/phy/mdio_bus.c25
-rw-r--r--drivers/net/phy/micrel.c128
-rw-r--r--drivers/net/phy/mscc/mscc.h3
-rw-r--r--drivers/net/phy/mscc/mscc_mac.h6
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c16
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.h3
-rw-r--r--drivers/net/phy/mscc/mscc_main.c106
-rw-r--r--drivers/net/phy/nxp-tja11xx.c412
-rw-r--r--drivers/net/phy/phy-c45.c1
-rw-r--r--drivers/net/phy/phy-core.c11
-rw-r--r--drivers/net/phy/phy.c196
-rw-r--r--drivers/net/phy/phy_device.c284
-rw-r--r--drivers/net/phy/phylink.c60
-rw-r--r--drivers/net/phy/realtek.c15
-rw-r--r--drivers/net/phy/swphy.c2
-rw-r--r--drivers/net/phy/teranetics.c1
-rw-r--r--drivers/net/plip/Kconfig2
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/rionet.c3
-rw-r--r--drivers/net/team/team.c1
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/ax88179_178a.c79
-rw-r--r--drivers/net/usb/cdc_ether.c11
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/r8152.c23
-rw-r--r--drivers/net/usb/sierra_net.c5
-rw-r--r--drivers/net/veth.c34
-rw-r--r--drivers/net/virtio_net.c25
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h31
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c191
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c277
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h25
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/net/vxlan.c374
-rw-r--r--drivers/net/wan/Kconfig4
-rw-r--r--drivers/net/wireguard/messages.h2
-rw-r--r--drivers/net/wireguard/noise.c22
-rw-r--r--drivers/net/wireguard/noise.h14
-rw-r--r--drivers/net/wireguard/queueing.h10
-rw-r--r--drivers/net/wireguard/receive.c44
-rw-r--r--drivers/net/wireguard/selftest/counter.c17
-rw-r--r--drivers/net/wireguard/send.c19
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h38
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h20
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c399
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h66
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c51
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c329
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c71
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c74
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c216
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c186
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c142
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h116
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c52
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h61
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c61
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c47
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h25
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.c48
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c297
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c60
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c69
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c87
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c22
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h65
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c102
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c35
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c170
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h88
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c26
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h8
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c49
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c21
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h16
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h58
-rw-r--r--drivers/net/wireless/atmel/atmel.c3
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/sdio.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c13
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c301
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c151
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c79
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h2
-rw-r--r--drivers/net/wireless/cisco/airo.c12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c29
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h28
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c175
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/soc.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c220
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h128
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c105
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c175
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c143
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c132
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c22
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/spectrum_cs.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_oid.h8
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_mgt.h2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c64
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h8
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h2
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h2
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c35
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c29
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c38
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c39
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c87
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c94
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c291
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c765
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c389
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c1579
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h314
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c73
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h190
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c135
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c184
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c447
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c145
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c93
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c463
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c285
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c243
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h125
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c702
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c1477
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h346
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c838
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c3182
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h1034
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h469
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c191
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h375
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c83
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h54
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/ray_cs.c3
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig26
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile28
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c24
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c27
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c55
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h32
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c437
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c43
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c82
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h101
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c82
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h108
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c2753
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h283
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d_table.c1196
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d_table.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c30
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c52
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c30
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c183
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h28
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c14666
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c30
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c20
-rw-r--r--drivers/net/wireless/rndis_wlan.c32
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c2
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c9
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c37
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c1
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c10
-rw-r--r--drivers/nfc/st21nfca/dep.c4
-rw-r--r--drivers/ntb/core.c9
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c4
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c6
-rw-r--r--drivers/ntb/hw/intel/Makefile2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c49
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.h1
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.c13
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.h8
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c552
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.h100
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h12
-rw-r--r--drivers/ntb/test/ntb_perf.c49
-rw-r--r--drivers/ntb/test/ntb_pingpong.c14
-rw-r--r--drivers/ntb/test/ntb_tool.c9
-rw-r--r--drivers/nvdimm/blk.c6
-rw-r--r--drivers/nvdimm/btt.c6
-rw-r--r--drivers/nvdimm/nd.h19
-rw-r--r--drivers/nvdimm/pmem.c6
-rw-r--r--drivers/nvme/host/core.c324
-rw-r--r--drivers/nvme/host/fc.c577
-rw-r--r--drivers/nvme/host/fc.h227
-rw-r--r--drivers/nvme/host/lightnvm.c7
-rw-r--r--drivers/nvme/host/multipath.c16
-rw-r--r--drivers/nvme/host/nvme.h28
-rw-r--r--drivers/nvme/host/pci.c133
-rw-r--r--drivers/nvme/host/rdma.c321
-rw-r--r--drivers/nvme/host/tcp.c117
-rw-r--r--drivers/nvme/target/Kconfig1
-rw-r--r--drivers/nvme/target/admin-cmd.c42
-rw-r--r--drivers/nvme/target/configfs.c272
-rw-r--r--drivers/nvme/target/core.c166
-rw-r--r--drivers/nvme/target/discovery.c8
-rw-r--r--drivers/nvme/target/fabrics-cmd.c15
-rw-r--r--drivers/nvme/target/fc.c805
-rw-r--r--drivers/nvme/target/fcloop.c155
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c120
-rw-r--r--drivers/nvme/target/io-cmd-file.c23
-rw-r--r--drivers/nvme/target/nvmet.h36
-rw-r--r--drivers/nvme/target/rdma.c420
-rw-r--r--drivers/nvme/target/tcp.c107
-rw-r--r--drivers/nvme/target/trace.h28
-rw-r--r--drivers/nvmem/core.c104
-rw-r--r--drivers/nvmem/imx-ocotp.c9
-rw-r--r--drivers/nvmem/jz4780-efuse.c4
-rw-r--r--drivers/nvmem/qfprom.c14
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c11
-rw-r--r--drivers/of/dynamic.c3
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/of/kobj.c3
-rw-r--r--drivers/of/of_mdio.c73
-rw-r--r--drivers/of/of_reserved_mem.c51
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/of/property.c20
-rw-r--r--drivers/oprofile/event_buffer.c2
-rw-r--r--drivers/parport/daisy.c29
-rw-r--r--drivers/parport/ieee1284.c94
-rw-r--r--drivers/parport/ieee1284_ops.c70
-rw-r--r--drivers/parport/parport_amiga.c22
-rw-r--r--drivers/parport/parport_atari.c2
-rw-r--r--drivers/parport/parport_cs.c6
-rw-r--r--drivers/parport/parport_gsc.c25
-rw-r--r--drivers/parport/parport_gsc.h21
-rw-r--r--drivers/parport/parport_ip32.c117
-rw-r--r--drivers/parport/parport_mfc3.c21
-rw-r--r--drivers/parport/parport_pc.c263
-rw-r--r--drivers/parport/parport_sunbpp.c2
-rw-r--r--drivers/parport/probe.c34
-rw-r--r--drivers/parport/procfs.c45
-rw-r--r--drivers/parport/share.c292
-rw-r--r--drivers/pci/controller/Kconfig32
-rw-r--r--drivers/pci/controller/Makefile4
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c10
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h6
-rw-r--r--drivers/pci/controller/dwc/Kconfig17
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c8
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c4
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c22
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h3
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c19
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c383
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c4
-rw-r--r--drivers/pci/controller/pci-aardvark.c266
-rw-r--r--drivers/pci/controller/pci-host-common.c18
-rw-r--r--drivers/pci/controller/pci-host-generic.c26
-rw-r--r--drivers/pci/controller/pci-hyperv.c126
-rw-r--r--drivers/pci/controller/pci-loongson.c247
-rw-r--r--drivers/pci/controller/pci-tegra.c7
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c14
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c16
-rw-r--r--drivers/pci/controller/pci-v3-semi.c6
-rw-r--r--drivers/pci/controller/pci-xgene.c4
-rw-r--r--drivers/pci/controller/pcie-altera.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c37
-rw-r--r--drivers/pci/controller/pcie-mediatek.c3
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c563
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c1130
-rw-r--r--drivers/pci/controller/pcie-rcar.c1211
-rw-r--r--drivers/pci/controller/pcie-rcar.h140
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c2
-rw-r--r--drivers/pci/controller/pcie-tango.c13
-rw-r--r--drivers/pci/controller/vmd.c6
-rw-r--r--drivers/pci/ecam.c10
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c3
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c204
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c13
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c5
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/p2pdma.c2
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-bridge-emul.c61
-rw-r--r--drivers/pci/pci-driver.c34
-rw-r--r--drivers/pci/pci-label.c4
-rw-r--r--drivers/pci/pci.c64
-rw-r--r--drivers/pci/pcie/Kconfig1
-rw-r--r--drivers/pci/pcie/aer.c340
-rw-r--r--drivers/pci/pcie/aspm.c10
-rw-r--r--drivers/pci/pcie/dpc.c3
-rw-r--r--drivers/pci/pcie/edr.c4
-rw-r--r--drivers/pci/pcie/pme.c4
-rw-r--r--drivers/pci/pcie/portdrv.h13
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/pcie/ptm.c22
-rw-r--r--drivers/pci/probe.c67
-rw-r--r--drivers/pci/quirks.c50
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/setup-bus.c115
-rw-r--r--drivers/pci/setup-res.c9
-rw-r--r--drivers/pci/switch/switchtec.c2
-rw-r--r--drivers/pcmcia/cs_internal.h6
-rw-r--r--drivers/pcmcia/electra_cf.c45
-rw-r--r--drivers/pcmcia/pcmcia_cis.c6
-rw-r--r--drivers/pcmcia/yenta_socket.c40
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/arm_dsu_pmu.c4
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c5
-rw-r--r--drivers/perf/arm_spe_pmu.c8
-rw-r--r--drivers/perf/hisilicon/Kconfig7
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c10
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c12
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c10
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c23
-rw-r--r--drivers/phy/amlogic/Kconfig15
-rw-r--r--drivers/phy/amlogic/Makefile1
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb3.c283
-rw-r--r--drivers/phy/amlogic/phy-meson8b-usb2.c149
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-usb.c57
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c16
-rw-r--r--drivers/phy/cadence/Kconfig9
-rw-r--r--drivers/phy/cadence/Makefile1
-rw-r--r--drivers/phy/cadence/phy-cadence-salvo.c325
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c27
-rw-r--r--drivers/phy/intel/Kconfig15
-rw-r--r--drivers/phy/intel/Makefile1
-rw-r--r--drivers/phy/intel/phy-intel-combo.c632
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/qualcomm/Kconfig17
-rw-r--r--drivers/phy/qualcomm/Makefile2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c148
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c254
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h238
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c287
-rw-r--r--drivers/phy/samsung/phy-s5pv210-usb2.c4
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c104
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c65
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c60
-rw-r--r--drivers/pinctrl/Kconfig17
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/actions/pinctrl-s700.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c80
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c26
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8dxl.c193
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c58
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c282
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c30
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c22
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h27
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c344
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c15
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c32
-rw-r--r--drivers/pinctrl/mediatek/Kconfig13
-rw-r--r--drivers/pinctrl/mediatek/Makefile5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c9
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6765.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c28
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c14
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8505.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c2
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c1
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c21
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c514
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.h52
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_i2c.c124
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_spi.c262
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c127
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c11
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c4
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c6
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c2
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c9
-rw-r--r--drivers/pinctrl/qcom/Kconfig9
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c28
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c1361
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c82
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig4
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c744
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h1
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c20
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c7
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c2
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c2
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c3
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c45
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c119
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c7
-rw-r--r--drivers/platform/mips/Kconfig6
-rw-r--r--drivers/platform/mips/Makefile1
-rw-r--r--drivers/platform/mips/rs780e-acpi.c169
-rw-r--r--drivers/platform/x86/Kconfig66
-rw-r--r--drivers/platform/x86/Makefile4
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c25
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c1
-rw-r--r--drivers/platform/x86/asus-wmi.c117
-rw-r--r--drivers/platform/x86/dcdbas.c43
-rw-r--r--drivers/platform/x86/dell-laptop.c11
-rw-r--r--drivers/platform/x86/dell-wmi.c10
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/hp-wmi.c30
-rw-r--r--drivers/platform/x86/intel-hid.c7
-rw-r--r--drivers/platform/x86/intel-vbtn.c104
-rw-r--r--drivers/platform/x86/intel-wmi-sbl-fw-update.c145
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_typec.c106
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c15
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c949
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c447
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c43
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c68
-rw-r--r--drivers/platform/x86/intel_scu_pltdrv.c60
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c11
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c17
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c15
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c97
-rw-r--r--drivers/platform/x86/lg-laptop.c18
-rw-r--r--drivers/platform/x86/samsung-laptop.c3
-rw-r--r--drivers/platform/x86/sony-laptop.c60
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c173
-rw-r--r--drivers/platform/x86/toshiba_acpi.c26
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c113
-rw-r--r--drivers/platform/x86/wmi.c45
-rw-r--r--drivers/pnp/pnpbios/pnpbios.h2
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/mt6323-poweroff.c2
-rw-r--r--drivers/power/reset/vexpress-poweroff.c8
-rw-r--r--drivers/power/supply/Kconfig10
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/bd70528-charger.c10
-rw-r--r--drivers/power/supply/mp2629_charger.c669
-rw-r--r--drivers/power/supply/test_power.c2
-rw-r--r--drivers/powercap/intel_rapl_common.c4
-rw-r--r--drivers/ps3/ps3-lpm.c8
-rw-r--r--drivers/ps3/ps3-vuart.c5
-rw-r--r--drivers/ptp/ptp_chardev.c1
-rw-r--r--drivers/ptp/ptp_clock.c9
-rw-r--r--drivers/ptp/ptp_clockmatrix.c94
-rw-r--r--drivers/ptp/ptp_clockmatrix.h8
-rw-r--r--drivers/ptp/ptp_idt82p33.c6
-rw-r--r--drivers/ptp/ptp_ines.c8
-rw-r--r--drivers/ptp/ptp_kvm.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c32
-rw-r--r--drivers/regulator/88pg86x.c4
-rw-r--r--drivers/regulator/88pm800-regulator.c4
-rw-r--r--drivers/regulator/Kconfig11
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/ab8500.c22
-rw-r--r--drivers/regulator/act8865-regulator.c4
-rw-r--r--drivers/regulator/act8945a-regulator.c2
-rw-r--r--drivers/regulator/arizona-ldo1.c2
-rw-r--r--drivers/regulator/arizona-micsupp.c4
-rw-r--r--drivers/regulator/as3711-regulator.c6
-rw-r--r--drivers/regulator/as3722-regulator.c4
-rw-r--r--drivers/regulator/axp20x-regulator.c16
-rw-r--r--drivers/regulator/bcm590xx-regulator.c8
-rw-r--r--drivers/regulator/bd70528-regulator.c8
-rw-r--r--drivers/regulator/bd71828-regulator.c10
-rw-r--r--drivers/regulator/bd718x7-regulator.c238
-rw-r--r--drivers/regulator/core.c59
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/regulator/db8500-prcmu.c2
-rw-r--r--drivers/regulator/helpers.c130
-rw-r--r--drivers/regulator/hi6421-regulator.c4
-rw-r--r--drivers/regulator/lochnagar-regulator.c4
-rw-r--r--drivers/regulator/lp873x-regulator.c4
-rw-r--r--drivers/regulator/lp87565-regulator.c2
-rw-r--r--drivers/regulator/lp8788-buck.c2
-rw-r--r--drivers/regulator/max77650-regulator.c2
-rw-r--r--drivers/regulator/max77826-regulator.c301
-rw-r--r--drivers/regulator/max8998.c105
-rw-r--r--drivers/regulator/mcp16502.c4
-rw-r--r--drivers/regulator/mp8859.c2
-rw-r--r--drivers/regulator/mt6323-regulator.c6
-rw-r--r--drivers/regulator/mt6358-regulator.c8
-rw-r--r--drivers/regulator/mt6380-regulator.c6
-rw-r--r--drivers/regulator/mt6397-regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c4
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c10
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c24
-rw-r--r--drivers/regulator/qcom_smd-regulator.c78
-rw-r--r--drivers/regulator/rk808-regulator.c10
-rw-r--r--drivers/regulator/s2mps11.c14
-rw-r--r--drivers/regulator/sky81452-regulator.c2
-rw-r--r--drivers/regulator/stpmic1_regulator.c18
-rw-r--r--drivers/regulator/tps65086-regulator.c10
-rw-r--r--drivers/regulator/tps65217-regulator.c4
-rw-r--r--drivers/regulator/tps65218-regulator.c6
-rw-r--r--drivers/regulator/tps65912-regulator.c4
-rw-r--r--drivers/regulator/tps80031-regulator.c7
-rw-r--r--drivers/regulator/twl-regulator.c4
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c2
-rw-r--r--drivers/regulator/wm831x-ldo.c4
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/regulator/wm8400-regulator.c2
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c69
-rw-r--r--drivers/reset/reset-imx7.c101
-rw-r--r--drivers/reset/reset-zynqmp.c26
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/rtc-88pm860x.c6
-rw-r--r--drivers/rtc/rtc-abx80x.c66
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c10
-rw-r--r--drivers/rtc/rtc-goldfish.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c173
-rw-r--r--drivers/rtc/rtc-lpc24xx.c4
-rw-r--r--drivers/rtc/rtc-max77686.c22
-rw-r--r--drivers/rtc/rtc-mc13xxx.c4
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-mt2712.c16
-rw-r--r--drivers/rtc/rtc-mt6397.c18
-rw-r--r--drivers/rtc/rtc-pcf2127.c31
-rw-r--r--drivers/rtc/rtc-rc5t619.c4
-rw-r--r--drivers/rtc/rtc-rv3028.c2
-rw-r--r--drivers/rtc/rtc-snvs.c59
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c20
-rw-r--r--drivers/s390/block/dasd_ioctl.c76
-rw-r--r--drivers/s390/net/Kconfig9
-rw-r--r--drivers/s390/net/ctcm_main.c40
-rw-r--r--drivers/s390/net/ism_drv.c4
-rw-r--r--drivers/s390/net/lcs.c59
-rw-r--r--drivers/s390/net/netiucv.c104
-rw-r--r--drivers/s390/net/qeth_core.h49
-rw-r--r--drivers/s390/net/qeth_core_main.c496
-rw-r--r--drivers/s390/net/qeth_core_mpc.h25
-rw-r--r--drivers/s390/net/qeth_core_sys.c15
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c19
-rw-r--r--drivers/s390/net/smsgiucv.c65
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_diag.h6
-rw-r--r--drivers/s390/scsi/zfcp_erp.c84
-rw-r--r--drivers/s390/scsi/zfcp_ext.h11
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c76
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c19
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c131
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c16
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c1
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/aacraid/linit.c16
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c18
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c19
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c14
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c4
-rw-r--r--drivers/scsi/bfa/bfa_svc.c7
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c4
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c18
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c7
-rw-r--r--drivers/scsi/cxlflash/main.c1
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fnic/fnic_main.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c6
-rw-r--r--drivers/scsi/fnic/vnic_dev.c12
-rw-r--r--drivers/scsi/fnic/vnic_wq.c4
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c17
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c26
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/isci/isci.h6
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h25
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c528
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h180
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c841
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h158
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c173
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c81
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mpt3sas/Makefile3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c266
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debugfs.c157
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/mvsas/mv_init.c6
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/qedf/qedf.h6
-rw-r--r--drivers/scsi/qedf/qedf_els.c10
-rw-r--r--drivers/scsi/qedf/qedf_io.c48
-rw-r--r--drivers/scsi/qedf/qedf_main.c135
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c21
-rw-r--r--drivers/scsi/qedi/qedi_main.c22
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c866
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h443
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h728
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h768
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c380
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c287
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c123
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h64
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c208
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h36
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c323
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h232
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_debug.c2048
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_ioctl.c20
-rw-r--r--drivers/scsi/scsi_lib.c313
-rw-r--r--drivers/scsi/scsi_pm.c10
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c68
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/scsi/sd.h43
-rw-r--r--drivers/scsi/sd_zbc.c402
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/snic/snic.h2
-rw-r--r--drivers/scsi/snic/snic_ctl.c5
-rw-r--r--drivers/scsi/sr.c29
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/storvsc_drv.c96
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c13
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c30
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c10
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c61
-rw-r--r--drivers/scsi/ufs/ufs.h43
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h7
-rw-r--r--drivers/scsi/ufs/ufshcd.c515
-rw-r--r--drivers/scsi/ufs/ufshcd.h45
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/slimbus/core.c6
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c5
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c112
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c6
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c13
-rw-r--r--drivers/soc/fsl/qbman/qman.c5
-rw-r--r--drivers/soc/fsl/qe/qe.c4
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soc/imx/Makefile3
-rw-r--r--drivers/soc/imx/soc-imx.c192
-rw-r--r--drivers/soc/imx/soc-imx8m.c7
-rw-r--r--drivers/soc/kendryte/k210-sysctl.c12
-rw-r--r--drivers/soc/mediatek/Kconfig7
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c4
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c378
-rw-r--r--drivers/soc/qcom/Kconfig16
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/cmd-db.c78
-rw-r--r--drivers/soc/qcom/pdr_interface.c4
-rw-r--r--drivers/soc/qcom/qcom_aoss.c1
-rw-r--r--drivers/soc/qcom/rpmh-internal.h59
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c746
-rw-r--r--drivers/soc/qcom/rpmh.c97
-rw-r--r--drivers/soc/qcom/rpmhpd.c24
-rw-r--r--drivers/soc/qcom/rpmpd.c5
-rw-r--r--drivers/soc/qcom/smp2p.c4
-rw-r--r--drivers/soc/qcom/socinfo.c6
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a7742-sysc.c42
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/rcar-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/sifive/sifive_l2_cache.c40
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c57
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c6
-rw-r--r--drivers/soc/tegra/fuse/fuse.h8
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c32
-rw-r--r--drivers/soc/tegra/pmc.c3
-rw-r--r--drivers/soc/ti/Kconfig10
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/k3-socinfo.c152
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c2
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c26
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c17
-rw-r--r--drivers/soundwire/Makefile8
-rw-r--r--drivers/soundwire/bus.c71
-rw-r--r--drivers/soundwire/bus.h4
-rw-r--r--drivers/soundwire/bus_type.c22
-rw-r--r--drivers/soundwire/cadence_master.c8
-rw-r--r--drivers/soundwire/debugfs.c2
-rw-r--r--drivers/soundwire/intel.c13
-rw-r--r--drivers/soundwire/intel_init.c4
-rw-r--r--drivers/soundwire/master.c172
-rw-r--r--drivers/soundwire/mipi_disco.c11
-rw-r--r--drivers/soundwire/qcom.c34
-rw-r--r--drivers/soundwire/slave.c10
-rw-r--r--drivers/soundwire/sysfs_local.h14
-rw-r--r--drivers/soundwire/sysfs_slave.c214
-rw-r--r--drivers/soundwire/sysfs_slave_dpn.c300
-rw-r--r--drivers/spi/Kconfig22
-rw-r--r--drivers/spi/Makefile6
-rw-r--r--drivers/spi/spi-amd.c315
-rw-r--r--drivers/spi/spi-armada-3700.c10
-rw-r--r--drivers/spi/spi-atmel.c1
-rw-r--r--drivers/spi/spi-axi-spi-engine.c32
-rw-r--r--drivers/spi/spi-bcm-qspi.c181
-rw-r--r--drivers/spi/spi-bcm2835.c26
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-dw-core.c (renamed from drivers/spi/spi-dw.c)233
-rw-r--r--drivers/spi/spi-dw-dma.c480
-rw-r--r--drivers/spi/spi-dw-mid.c322
-rw-r--r--drivers/spi/spi-dw-mmio.c86
-rw-r--r--drivers/spi/spi-dw-pci.c50
-rw-r--r--drivers/spi/spi-dw.h66
-rw-r--r--drivers/spi/spi-ep93xx.c8
-rw-r--r--drivers/spi/spi-fsl-dspi.c47
-rw-r--r--drivers/spi/spi-fsl-lpspi.c21
-rw-r--r--drivers/spi/spi-fsl-qspi.c11
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c26
-rw-r--r--drivers/spi/spi-imx.c31
-rw-r--r--drivers/spi/spi-mem.c10
-rw-r--r--drivers/spi/spi-mtk-nor.c2
-rw-r--r--drivers/spi/spi-mux.c8
-rw-r--r--drivers/spi/spi-orion.c70
-rw-r--r--drivers/spi/spi-pxa2xx.c6
-rw-r--r--drivers/spi/spi-rb4xx.c19
-rw-r--r--drivers/spi/spi-rockchip.c229
-rw-r--r--drivers/spi/spi-sc18is602.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/spi/spi-sprd-adi.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c62
-rw-r--r--drivers/spi/spi-stm32.c19
-rw-r--r--drivers/spi/spi-sun6i.c1
-rw-r--r--drivers/spi/spi-tegra114.c1
-rw-r--r--drivers/spi/spi-tegra20-sflash.c1
-rw-r--r--drivers/spi/spi-tegra20-slink.c1
-rw-r--r--drivers/spi/spi-uniphier.c11
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c5
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/ssb/scan.c6
-rw-r--r--drivers/ssb/sprom.c12
-rw-r--r--drivers/staging/android/ion/ion_heap.c4
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c4
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c12
-rw-r--r--drivers/staging/comedi/Makefile1
-rw-r--r--drivers/staging/comedi/comedi_compat32.c455
-rw-r--r--drivers/staging/comedi/comedi_compat32.h28
-rw-r--r--drivers/staging/comedi/comedi_fops.c564
-rw-r--r--drivers/staging/comedi/comedi_internal.h6
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/staging/comedi/range.c17
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c32
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/README2
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c6
-rw-r--r--drivers/staging/gasket/gasket_page_table.c2
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c2
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c2
-rw-r--r--drivers/staging/greybus/hid.c3
-rw-r--r--drivers/staging/greybus/light.c3
-rw-r--r--drivers/staging/greybus/loopback.c2
-rw-r--r--drivers/staging/greybus/sdio.c11
-rw-r--r--drivers/staging/greybus/uart.c23
-rw-r--r--drivers/staging/iio/Documentation/overview.txt2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c77
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c17
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c9
-rw-r--r--drivers/staging/media/Kconfig6
-rw-r--r--drivers/staging/media/Makefile3
-rw-r--r--drivers/staging/media/atomisp/Kconfig36
-rw-r--r--drivers/staging/media/atomisp/Makefile363
-rw-r--r--drivers/staging/media/atomisp/TODO89
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig86
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile18
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c1406
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c1139
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c207
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c972
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c1910
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c1340
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c1288
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h404
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h680
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h1791
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h845
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h1272
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Kconfig11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ad5823.h62
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c2006
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h1391
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm.h102
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_bo.h315
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_common.h96
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_pool.h115
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_vm.h65
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h1359
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h38
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h247
-rw-r--r--drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h27
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3554.h130
-rw-r--r--drivers/staging/media/atomisp/include/mmu/isp_mmu.h168
-rw-r--r--drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp-regs.h199
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_acc.c605
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_acc.h119
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c6629
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h442
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_common.h74
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h663
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c4706
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.h277
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c1177
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h367
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c426
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h58
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c205
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_file.c227
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_file.h43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c1306
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.h50
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c1081
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_helper.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h307
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c3094
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h66
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c1456
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h466
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tables.h187
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.c163
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_trace_event.h127
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c1997
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.h36
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h376
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h58
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h173
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c320
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h83
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c275
-rw-r--r--drivers/staging/media/atomisp/pci/bits.h104
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h297
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h51
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h39
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c873
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c118
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c50
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h141
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/src/util.c225
-rw-r--r--drivers/staging/media/atomisp/pci/cell_params.h40
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_configs.c385
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_params.c3419
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_states.c223
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h68
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hrt/isp2400_mamoiada_params.h228
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h63
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_configs.c386
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_params.c3366
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_states.c223
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c40
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h62
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h305
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c22
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h58
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_private.h267
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c40
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h61
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c43
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h35
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h106
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c21
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h36
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h167
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h50
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h182
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h113
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h134
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h205
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h208
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h169
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h68
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h79
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h89
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h35
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h39
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h90
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h198
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h256
-rw-r--r--drivers/staging/media/atomisp/pci/css_trace.h278
-rw-r--r--drivers/staging/media/atomisp/pci/defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/dma_v2_defs.h199
-rw-r--r--drivers/staging/media/atomisp/pci/gdc_v2_defs.h163
-rw-r--r--drivers/staging/media/atomisp/pci/gp_timer_defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/gpio_block_defs.h41
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_2401_irq_types_hrt.h68
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h81
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h254
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h32
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h89
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h84
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h33
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c71
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h126
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c299
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h207
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h41
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c19
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h61
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h77
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c569
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h99
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h80
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c125
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c108
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h143
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c70
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h43
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h22
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h44
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c19
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h30
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c241
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h121
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c1849
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c451
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h134
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h44
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c128
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h57
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h160
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c81
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h101
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h166
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c74
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h34
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c276
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h57
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h114
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h109
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h22
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h93
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h54
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h34
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h28
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_defs.h411
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h73
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h24
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h42
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h177
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/error_support.h39
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h47
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gpio.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h135
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h98
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h72
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h79
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h110
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h59
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h58
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h33
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gpio_public.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h32
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ibuf_ctrl_public.h93
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h115
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h184
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h185
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h101
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h94
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h79
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h223
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h40
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h59
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/ibuf_ctrl.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_dma.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h39
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h153
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_access/memory_access.h174
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_realloc.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h26
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h39
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h36
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h41
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/string_support.h165
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/system_types.h24
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h44
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h40
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h36
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c91
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h22
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h35
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h56
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h26
-rw-r--r--drivers/staging/media/atomisp/pci/hive_types.h128
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c733
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c1511
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c233
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c252
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_vm.c212
-rw-r--r--drivers/staging/media/atomisp/pci/hrt/hive_isp_css_custom_host_hrt.h106
-rw-r--r--drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.c124
-rw-r--r--drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.h57
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css.h57
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_3a.h189
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h476
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_buffer.h85
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_control.h131
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.c95
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.h60
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_dvs.h297
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_env.h94
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_err.h63
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_event_public.h196
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_firmware.h64
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frac.h37
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_format.h101
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_public.h353
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_host_data.h45
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_input_port.h60
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_irq.h235
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.h183
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.h394
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.h73
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_memory_access.c85
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_metadata.h72
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mipi.h82
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu.h32
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu_private.h29
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_morph.h39
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe.h189
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe_public.h569
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_prbs.h53
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_properties.h41
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_shading.h40
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream.h111
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_format.h29
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_public.h585
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_timer.h68
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_tpg.h78
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_types.h605
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version.h40
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version_data.h27
-rw-r--r--drivers/staging/media/atomisp/pci/if_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h53
-rw-r--r--drivers/staging/media/atomisp/pci/input_selector_defs.h88
-rw-r--r--drivers/staging/media/atomisp/pci/input_switch_2400_defs.h30
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h243
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_defs.h126
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_global.h10
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_local.h10
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_private.h10
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_public.h8
-rw-r--r--drivers/staging/media/atomisp/pci/irq_controller_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c61
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c55
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c66
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c196
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h64
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h106
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c131
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h71
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c64
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c73
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h54
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c64
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c127
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h54
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h78
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c121
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c157
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h48
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h54
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c58
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c214
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h110
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c78
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h42
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c53
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c131
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h48
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c65
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h51
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h59
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c301
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h60
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c338
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h45
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h153
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h87
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c63
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c88
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h52
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c117
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h65
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h61
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c213
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h97
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c109
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h79
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c131
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h54
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h59
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h70
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c93
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c93
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c80
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c74
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h73
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c49
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c51
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h23
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h63
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c15
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c76
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c154
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h53
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h68
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c163
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h75
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c61
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c135
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c76
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c386
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h77
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h53
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h221
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c158
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h77
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h42
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h134
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h101
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h220
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c437
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h101
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h55
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c350
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h95
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h75
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c74
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h52
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h63
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c120
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h56
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h57
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c138
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h37
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c86
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c65
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h50
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c81
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h70
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c248
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h83
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h97
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c217
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h60
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h49
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h80
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c118
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h56
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h45
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h93
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h37
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h180
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h79
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_global.h155
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h539
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_private.h122
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h369
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_support.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_system_global.h348
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_system_local.h325
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_global.h205
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_local.h106
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_private.h129
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_mamoiada_params.h228
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_system_global.h457
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_system_local.h406
-rw-r--r--drivers/staging/media/atomisp/pci/isp_acquisition_defs.h229
-rw-r--r--drivers/staging/media/atomisp/pci/isp_capture_defs.h278
-rw-r--r--drivers/staging/media/atomisp/pci/memory_realloc.c81
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/isp_mmu.c566
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c77
-rw-r--r--drivers/staging/media/atomisp/pci/mmu_defs.h23
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h228
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c1852
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h177
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h50
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c566
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h502
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h15
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h67
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c3540
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h30
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/src/event.c112
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h53
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c77
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h163
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h115
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c989
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h33
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c552
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h53
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c538
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h102
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h81
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c216
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h184
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h53
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c167
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h26
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c121
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h38
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c87
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h24
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c123
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c89
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h24
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c600
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c892
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h24
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h286
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h27
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c786
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h175
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h53
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c422
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c176
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h85
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h72
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h99
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c39
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c336
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h68
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h45
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c184
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h43
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c31
-rw-r--r--drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h20
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c11110
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h410
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_dvs_info.h36
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c333
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.h55
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_frac.h40
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_host_data.c42
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.c85
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.h34
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h1061
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_legacy.h70
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metadata.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.c175
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.h55
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c757
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.h49
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mmu.c60
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_morph.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.c286
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h85
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.c402
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.h34
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c5247
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.h188
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params_internal.h21
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_pipe.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_properties.c43
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_shading.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c1829
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h248
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.c76
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.h23
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_struct.h85
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_uds.h37
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_version.c37
-rw-r--r--drivers/staging/media/atomisp/pci/str2mem_defs.h39
-rw-r--r--drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/system_global.h10
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.h10
-rw-r--r--drivers/staging/media/atomisp/pci/timed_controller_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/version.h20
-rw-r--r--drivers/staging/media/hantro/Kconfig6
-rw-r--r--drivers/staging/media/hantro/Makefile2
-rw-r--r--drivers/staging/media/hantro/hantro.h7
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c28
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c237
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h31
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c111
-rw-r--r--drivers/staging/media/imx/Kconfig5
-rw-r--r--drivers/staging/media/imx/TODO29
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c15
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c14
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c42
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c13
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c223
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c50
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c2
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c6
-rw-r--r--drivers/staging/media/imx/imx-media-of.c114
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c550
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c12
-rw-r--r--drivers/staging/media/imx/imx-media.h63
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c93
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c177
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c582
-rw-r--r--drivers/staging/media/ipu3/Kconfig3
-rw-r--r--drivers/staging/media/ipu3/TODO6
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h7
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c14
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-pool.h4
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c7
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.c30
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.c10
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c92
-rw-r--r--drivers/staging/media/ipu3/ipu3.c5
-rw-r--r--drivers/staging/media/ipu3/ipu3.h4
-rw-r--r--drivers/staging/media/meson/vdec/codec_vp9.c31
-rw-r--r--drivers/staging/media/omap4iss/Kconfig4
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml76
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig2
-rw-r--r--drivers/staging/media/rkisp1/Kconfig6
-rw-r--r--drivers/staging/media/rkisp1/Makefile2
-rw-r--r--drivers/staging/media/rkisp1/TODO6
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-capture.c101
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-common.h16
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-dev.c114
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-isp.c94
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-resizer.c36
-rw-r--r--drivers/staging/media/rkvdec/Kconfig16
-rw-r--r--drivers/staging/media/rkvdec/Makefile3
-rw-r--r--drivers/staging/media/rkvdec/TODO11
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c1156
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-regs.h223
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c1103
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.h121
-rw-r--r--drivers/staging/media/soc_camera/soc-camera.rst171
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig5
-rw-r--r--drivers/staging/media/tegra-video/Kconfig12
-rw-r--r--drivers/staging/media/tegra-video/Makefile8
-rw-r--r--drivers/staging/media/tegra-video/TODO11
-rw-r--r--drivers/staging/media/tegra-video/csi.c539
-rw-r--r--drivers/staging/media/tegra-video/csi.h147
-rw-r--r--drivers/staging/media/tegra-video/tegra210.c978
-rw-r--r--drivers/staging/media/tegra-video/vi.c1074
-rw-r--r--drivers/staging/media/tegra-video/vi.h257
-rw-r--r--drivers/staging/media/tegra-video/video.c155
-rw-r--r--drivers/staging/media/tegra-video/video.h29
-rw-r--r--drivers/staging/media/usbvision/Kconfig2
-rw-r--r--drivers/staging/media/usbvision/usbvision-core.c2
-rw-r--r--drivers/staging/most/usb/Kconfig2
-rw-r--r--drivers/staging/most/usb/usb.c305
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi9
-rw-r--r--drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt28
-rw-r--r--drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml36
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c64
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c2
-rw-r--r--drivers/staging/pi433/pi433_if.c1
-rw-r--r--drivers/staging/qlge/qlge_dbg.c7
-rw-r--r--drivers/staging/qlge/qlge_main.c476
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c99
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c33
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c19
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c54
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_hwconfig.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c62
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c3
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c116
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c24
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c18
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c126
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c158
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h2
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c2
-rw-r--r--drivers/staging/rtl8712/wifi.h9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c22
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c45
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c13
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h4
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c44
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h8
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c58
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c33
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c26
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c8
-rw-r--r--drivers/staging/sm750fb/sm750.c154
-rw-r--r--drivers/staging/sm750fb/sm750.h21
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/speakup/speakup_decext.c4
-rw-r--r--drivers/staging/speakup/speakup_decpc.c4
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c5
-rw-r--r--drivers/staging/speakup/speakup_dummy.c4
-rw-r--r--drivers/staging/speakup/speakup_soft.c4
-rw-r--r--drivers/staging/speakup/spk_types.h3
-rw-r--r--drivers/staging/speakup/spkguide.txt7
-rw-r--r--drivers/staging/speakup/sysfs-driver-speakup6
-rw-r--r--drivers/staging/speakup/varhandlers.c1
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c383
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h62
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c97
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-common.h18
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h14
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h81
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c7
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c33
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c19
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h7
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c166
-rw-r--r--drivers/staging/vt6655/Makefile3
-rw-r--r--drivers/staging/vt6655/baseband.c320
-rw-r--r--drivers/staging/vt6655/baseband.h37
-rw-r--r--drivers/staging/vt6655/card.c145
-rw-r--r--drivers/staging/vt6655/card.h4
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/device_main.c37
-rw-r--r--drivers/staging/vt6655/rf.c4
-rw-r--r--drivers/staging/vt6655/rxtx.c252
-rw-r--r--drivers/staging/vt6656/Makefile6
-rw-r--r--drivers/staging/vt6656/baseband.c620
-rw-r--r--drivers/staging/vt6656/baseband.h17
-rw-r--r--drivers/staging/vt6656/card.c570
-rw-r--r--drivers/staging/vt6656/card.h20
-rw-r--r--drivers/staging/vt6656/device.h20
-rw-r--r--drivers/staging/vt6656/firmware.c106
-rw-r--r--drivers/staging/vt6656/firmware.h25
-rw-r--r--drivers/staging/vt6656/key.c47
-rw-r--r--drivers/staging/vt6656/key.h13
-rw-r--r--drivers/staging/vt6656/mac.c128
-rw-r--r--drivers/staging/vt6656/mac.h28
-rw-r--r--drivers/staging/vt6656/main_usb.c181
-rw-r--r--drivers/staging/vt6656/power.c34
-rw-r--r--drivers/staging/vt6656/power.h2
-rw-r--r--drivers/staging/vt6656/rf.c463
-rw-r--r--drivers/staging/vt6656/rf.h3
-rw-r--r--drivers/staging/vt6656/rxtx.c674
-rw-r--r--drivers/staging/vt6656/rxtx.h20
-rw-r--r--drivers/staging/vt6656/usbpipe.c70
-rw-r--r--drivers/staging/vt6656/usbpipe.h11
-rw-r--r--drivers/staging/vt6656/wcmd.c3
-rw-r--r--drivers/staging/wfx/Makefile1
-rw-r--r--drivers/staging/wfx/TODO51
-rw-r--r--drivers/staging/wfx/bh.c50
-rw-r--r--drivers/staging/wfx/bh.h1
-rw-r--r--drivers/staging/wfx/bus.h2
-rw-r--r--drivers/staging/wfx/bus_sdio.c86
-rw-r--r--drivers/staging/wfx/bus_spi.c44
-rw-r--r--drivers/staging/wfx/data_rx.c16
-rw-r--r--drivers/staging/wfx/data_rx.h3
-rw-r--r--drivers/staging/wfx/data_tx.c352
-rw-r--r--drivers/staging/wfx/data_tx.h8
-rw-r--r--drivers/staging/wfx/debug.c70
-rw-r--r--drivers/staging/wfx/fwio.c14
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h623
-rw-r--r--drivers/staging/wfx/hif_api_general.h495
-rw-r--r--drivers/staging/wfx/hif_api_mib.h671
-rw-r--r--drivers/staging/wfx/hif_rx.c221
-rw-r--r--drivers/staging/wfx/hif_tx.c119
-rw-r--r--drivers/staging/wfx/hif_tx.h10
-rw-r--r--drivers/staging/wfx/hif_tx_mib.c386
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h436
-rw-r--r--drivers/staging/wfx/hwio.c18
-rw-r--r--drivers/staging/wfx/key.c71
-rw-r--r--drivers/staging/wfx/key.h2
-rw-r--r--drivers/staging/wfx/main.c78
-rw-r--r--drivers/staging/wfx/main.h4
-rw-r--r--drivers/staging/wfx/queue.c533
-rw-r--r--drivers/staging/wfx/queue.h42
-rw-r--r--drivers/staging/wfx/scan.c17
-rw-r--r--drivers/staging/wfx/sta.c871
-rw-r--r--drivers/staging/wfx/sta.h38
-rw-r--r--drivers/staging/wfx/traces.h31
-rw-r--r--drivers/staging/wfx/wfx.h47
-rw-r--r--drivers/staging/wilc1000/cfg80211.c36
-rw-r--r--drivers/staging/wilc1000/cfg80211.h5
-rw-r--r--drivers/staging/wilc1000/hif.c4
-rw-r--r--drivers/staging/wilc1000/netdev.c21
-rw-r--r--drivers/staging/wilc1000/netdev.h9
-rw-r--r--drivers/target/iscsi/Kconfig2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c35
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c30
-rw-r--r--drivers/target/loopback/tcm_loop.c36
-rw-r--r--drivers/target/target_core_alua.c10
-rw-r--r--drivers/target/target_core_configfs.c82
-rw-r--r--drivers/target/target_core_device.c13
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c7
-rw-r--r--drivers/target/target_core_user.c177
-rw-r--r--drivers/tee/Kconfig2
-rw-r--r--drivers/tee/optee/call.c6
-rw-r--r--drivers/tee/tee_core.c159
-rw-r--r--drivers/tee/tee_shm.c31
-rw-r--r--drivers/thermal/imx_sc_thermal.c2
-rw-r--r--drivers/thunderbolt/Kconfig1
-rw-r--r--drivers/thunderbolt/icm.c22
-rw-r--r--drivers/thunderbolt/nhi.c5
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/switch.c11
-rw-r--r--drivers/tty/hvc/hvc_console.c23
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/mxser.c7
-rw-r--r--drivers/tty/n_gsm.c39
-rw-r--r--drivers/tty/n_hdlc.c7
-rw-r--r--drivers/tty/rocket.c10
-rw-r--r--drivers/tty/serial/8250/8250_core.c18
-rw-r--r--drivers/tty/serial/8250/8250_early.c23
-rw-r--r--drivers/tty/serial/8250/8250_exar.c65
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c13
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/8250_port.c9
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/8250/serial_cs.c6
-rw-r--r--drivers/tty/serial/Kconfig16
-rw-r--r--drivers/tty/serial/amba-pl011.c33
-rw-r--r--drivers/tty/serial/ar933x_uart.c6
-rw-r--r--drivers/tty/serial/atmel_serial.c6
-rw-r--r--drivers/tty/serial/fsl_lpuart.c27
-rw-r--r--drivers/tty/serial/imx.c13
-rw-r--r--drivers/tty/serial/kgdboc.c318
-rw-r--r--drivers/tty/serial/lantiq.c40
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c1
-rw-r--r--drivers/tty/serial/omap-serial.c52
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c39
-rw-r--r--drivers/tty/serial/samsung_tty.c84
-rw-r--r--drivers/tty/serial/sc16is7xx.c73
-rw-r--r--drivers/tty/serial/serial_core.c22
-rw-r--r--drivers/tty/serial/sh-sci.h1
-rw-r--r--drivers/tty/serial/sifive.c1
-rw-r--r--drivers/tty/serial/stm32-usart.c74
-rw-r--r--drivers/tty/serial/stm32-usart.h1
-rw-r--r--drivers/tty/serial/xilinx_uartps.c12
-rw-r--r--drivers/tty/sysrq.c68
-rw-r--r--drivers/tty/vt/keyboard.c26
-rw-r--r--drivers/tty/vt/selection.c133
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c3
-rw-r--r--drivers/uio/uio_hv_generic.c1
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c3
-rw-r--r--drivers/usb/cdns3/core.c47
-rw-r--r--drivers/usb/cdns3/core.h2
-rw-r--r--drivers/usb/cdns3/drd.c4
-rw-r--r--drivers/usb/cdns3/ep0.c7
-rw-r--r--drivers/usb/cdns3/gadget.c37
-rw-r--r--drivers/usb/chipidea/Kconfig37
-rw-r--r--drivers/usb/chipidea/Makefile13
-rw-r--r--drivers/usb/chipidea/ci.h1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c13
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c30
-rw-r--r--drivers/usb/chipidea/ci_hdrc_zevio.c67
-rw-r--r--drivers/usb/chipidea/core.c48
-rw-r--r--drivers/usb/chipidea/udc.c170
-rw-r--r--drivers/usb/chipidea/udc.h6
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c334
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/usblp.c5
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/devio.c25
-rw-r--r--drivers/usb/core/hcd-pci.c7
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/otg_whitelist.h2
-rw-r--r--drivers/usb/core/sysfs.c6
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/core.c23
-rw-r--r--drivers/usb/dwc2/core.h6
-rw-r--r--drivers/usb/dwc2/core_intr.c7
-rw-r--r--drivers/usb/dwc2/debug.h2
-rw-r--r--drivers/usb/dwc2/hcd.h2
-rw-r--r--drivers/usb/dwc2/hw.h3
-rw-r--r--drivers/usb/dwc2/params.c19
-rw-r--r--drivers/usb/dwc2/platform.c39
-rw-r--r--drivers/usb/dwc3/Kconfig1
-rw-r--r--drivers/usb/dwc3/core.c62
-rw-r--r--drivers/usb/dwc3/core.h83
-rw-r--r--drivers/usb/dwc3/debug.h4
-rw-r--r--drivers/usb/dwc3/debugfs.c14
-rw-r--r--drivers/usb/dwc3/drd.c6
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c41
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c422
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c30
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c1
-rw-r--r--drivers/usb/dwc3/gadget.c472
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/dwc3/io.h2
-rw-r--r--drivers/usb/dwc3/trace.h2
-rw-r--r--drivers/usb/early/xhci-dbc.c1
-rw-r--r--drivers/usb/early/xhci-dbc.h2
-rw-r--r--drivers/usb/gadget/composite.c78
-rw-r--r--drivers/usb/gadget/configfs.c17
-rw-r--r--drivers/usb/gadget/function/f_acm.c16
-rw-r--r--drivers/usb/gadget/function/f_eem.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/function/f_serial.c16
-rw-r--r--drivers/usb/gadget/function/f_tcm.c3
-rw-r--r--drivers/usb/gadget/function/f_uvc.h2
-rw-r--r--drivers/usb/gadget/function/rndis.h2
-rw-r--r--drivers/usb/gadget/function/u_audio.h2
-rw-r--r--drivers/usb/gadget/function/u_ecm.h2
-rw-r--r--drivers/usb/gadget/function/u_eem.h2
-rw-r--r--drivers/usb/gadget/function/u_ether.h2
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h2
-rw-r--r--drivers/usb/gadget/function/u_fs.h2
-rw-r--r--drivers/usb/gadget/function/u_gether.h2
-rw-r--r--drivers/usb/gadget/function/u_hid.h2
-rw-r--r--drivers/usb/gadget/function/u_midi.h2
-rw-r--r--drivers/usb/gadget/function/u_ncm.h2
-rw-r--r--drivers/usb/gadget/function/u_phonet.h2
-rw-r--r--drivers/usb/gadget/function/u_printer.h2
-rw-r--r--drivers/usb/gadget/function/u_rndis.h2
-rw-r--r--drivers/usb/gadget/function/u_serial.c57
-rw-r--r--drivers/usb/gadget/function/u_serial.h4
-rw-r--r--drivers/usb/gadget/function/u_tcm.h2
-rw-r--r--drivers/usb/gadget/function/u_uac1.h2
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.h2
-rw-r--r--drivers/usb/gadget/function/u_uac2.h2
-rw-r--r--drivers/usb/gadget/function/u_uvc.h2
-rw-r--r--drivers/usb/gadget/function/uvc.h4
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c4
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.h2
-rw-r--r--drivers/usb/gadget/function/uvc_video.c76
-rw-r--r--drivers/usb/gadget/function/uvc_video.h4
-rw-r--r--drivers/usb/gadget/legacy/audio.c4
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c4
-rw-r--r--drivers/usb/gadget/legacy/inode.c3
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c14
-rw-r--r--drivers/usb/gadget/legacy/ncm.c4
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c315
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c16
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c236
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h12
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c116
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h12
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c27
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c1
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c11
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c2
-rw-r--r--drivers/usb/gadget/udc/net2272.c4
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c2
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c148
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c1
-rw-r--r--drivers/usb/gadget/usbstring.c24
-rw-r--r--drivers/usb/host/Kconfig29
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-brcm.c280
-rw-r--r--drivers/usb/host/ehci-fsl.h2
-rw-r--r--drivers/usb/host/ehci-mv.c12
-rw-r--r--drivers/usb/host/ehci-mxc.c15
-rw-r--r--drivers/usb/host/ehci-pci.c6
-rw-r--r--drivers/usb/host/ehci-platform.c4
-rw-r--r--drivers/usb/host/ehci-tegra.c1
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/fhci.h2
-rw-r--r--drivers/usb/host/imx21-hcd.h2
-rw-r--r--drivers/usb/host/ohci-pci.c9
-rw-r--r--drivers/usb/host/ohci-platform.c5
-rw-r--r--drivers/usb/host/ohci-sm501.c7
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/pci-quirks.c24
-rw-r--r--drivers/usb/host/r8a66597.h2
-rw-r--r--drivers/usb/host/u132-hcd.c10
-rw-r--r--drivers/usb/host/uhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-debugfs.h2
-rw-r--r--drivers/usb/host/xhci-ext-caps.h2
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/host/xhci-mvebu.h2
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c645
-rw-r--r--drivers/usb/host/xhci-pci.c47
-rw-r--r--drivers/usb/host/xhci-pci.h28
-rw-r--r--drivers/usb/host/xhci-plat.c24
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-rcar.h2
-rw-r--r--drivers/usb/host/xhci-ring.c4
-rw-r--r--drivers/usb/host/xhci-trace.h2
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/isp1760/isp1760-core.h2
-rw-r--r--drivers/usb/isp1760/isp1760-regs.h2
-rw-r--r--drivers/usb/isp1760/isp1760-udc.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_struct.h2
-rw-r--r--drivers/usb/misc/usb_u132.h2
-rw-r--r--drivers/usb/mtu3/mtu3.h2
-rw-r--r--drivers/usb/mtu3/mtu3_debug.h2
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c4
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h2
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h2
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h2
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h2
-rw-r--r--drivers/usb/musb/davinci.h2
-rw-r--r--drivers/usb/musb/jz4740.c4
-rw-r--r--drivers/usb/musb/mediatek.c6
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_debug.h2
-rw-r--r--drivers/usb/musb/musb_debugfs.c10
-rw-r--r--drivers/usb/musb/musb_dma.h2
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_host.c10
-rw-r--r--drivers/usb/musb/musb_host.h2
-rw-r--r--drivers/usb/musb/musb_io.h2
-rw-r--r--drivers/usb/musb/musb_regs.h2
-rw-r--r--drivers/usb/musb/musb_trace.h2
-rw-r--r--drivers/usb/musb/omap2430.h2
-rw-r--r--drivers/usb/musb/tusb6010.h2
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-jz4770.c12
-rw-r--r--drivers/usb/phy/phy-mv-usb.h2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c12
-rw-r--r--drivers/usb/renesas_usbhs/common.h2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h2
-rw-r--r--drivers/usb/renesas_usbhs/mod.h2
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h2
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.h2
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.h2
-rw-r--r--drivers/usb/renesas_usbhs/rza.h2
-rw-r--r--drivers/usb/roles/class.c4
-rw-r--r--drivers/usb/serial/belkin_sa.h2
-rw-r--r--drivers/usb/serial/ch341.c68
-rw-r--r--drivers/usb/serial/io_16654.h2
-rw-r--r--drivers/usb/serial/io_edgeport.h2
-rw-r--r--drivers/usb/serial/io_ionsp.h2
-rw-r--r--drivers/usb/serial/io_ti.h2
-rw-r--r--drivers/usb/serial/io_usbvend.h2
-rw-r--r--drivers/usb/serial/iuu_phoenix.h2
-rw-r--r--drivers/usb/serial/mct_u232.h2
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/oti6858.h2
-rw-r--r--drivers/usb/serial/pl2303.h2
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/serial/visor.h2
-rw-r--r--drivers/usb/serial/whiteheat.h2
-rw-r--r--drivers/usb/storage/debug.h2
-rw-r--r--drivers/usb/storage/initializers.h2
-rw-r--r--drivers/usb/storage/protocol.h2
-rw-r--r--drivers/usb/storage/scsiglue.h2
-rw-r--r--drivers/usb/storage/sierra_ms.c4
-rw-r--r--drivers/usb/storage/transport.h2
-rw-r--r--drivers/usb/storage/unusual_alauda.h2
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_datafab.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/unusual_ene_ub6250.h2
-rw-r--r--drivers/usb/storage/unusual_freecom.h2
-rw-r--r--drivers/usb/storage/unusual_isd200.h2
-rw-r--r--drivers/usb/storage/unusual_jumpshot.h2
-rw-r--r--drivers/usb/storage/unusual_karma.h2
-rw-r--r--drivers/usb/storage/unusual_onetouch.h2
-rw-r--r--drivers/usb/storage/unusual_realtek.h2
-rw-r--r--drivers/usb/storage/unusual_sddr09.h2
-rw-r--r--drivers/usb/storage/unusual_sddr55.h2
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/usb/storage/unusual_usbat.h2
-rw-r--r--drivers/usb/storage/usb.h2
-rw-r--r--drivers/usb/typec/Kconfig3
-rw-r--r--drivers/usb/typec/class.c36
-rw-r--r--drivers/usb/typec/mux/Kconfig2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c60
-rw-r--r--drivers/usb/typec/tcpm/Kconfig2
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c32
-rw-r--r--drivers/usb/typec/tcpm/fusb302_reg.h2
-rw-r--r--drivers/usb/typec/tps6598x.c64
-rw-r--r--drivers/usb/typec/ucsi/Makefile4
-rw-r--r--drivers/usb/typec/ucsi/psy.c241
-rw-r--r--drivers/usb/typec/ucsi/trace.c10
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c41
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h26
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c15
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c353
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c50
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c14
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h15
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c24
-rw-r--r--drivers/vfio/vfio.c13
-rw-r--r--drivers/vfio/vfio_iommu_type1.c609
-rw-r--r--drivers/vhost/net.c1
-rw-r--r--drivers/vhost/scsi.c1
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/video/backlight/backlight.c21
-rw-r--r--drivers/video/backlight/l4f00242t03.c45
-rw-r--r--drivers/video/backlight/lp855x_bl.c20
-rw-r--r--drivers/video/backlight/qcom-wled.c589
-rw-r--r--drivers/video/fbdev/Kconfig4
-rw-r--r--drivers/video/fbdev/amifb.c4
-rw-r--r--drivers/video/fbdev/arcfb.c10
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c1
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c14
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c6
-rw-r--r--drivers/video/fbdev/controlfb.c803
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/cyber2000fb.c2
-rw-r--r--drivers/video/fbdev/i810/i810_main.c10
-rw-r--r--drivers/video/fbdev/imxfb.c27
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.c22
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.h2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb_accel.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c20
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c14
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c114
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h20
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c43
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c8
-rw-r--r--drivers/video/fbdev/pm2fb.c2
-rw-r--r--drivers/video/fbdev/pm3fb.c8
-rw-r--r--drivers/video/fbdev/ps3fb.c4
-rw-r--r--drivers/video/fbdev/pxa168fb.c5
-rw-r--r--drivers/video/fbdev/riva/riva_hw.c18
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c14
-rw-r--r--drivers/video/fbdev/sa1100fb.c20
-rw-r--r--drivers/video/fbdev/sa1100fb.h3
-rw-r--r--drivers/video/fbdev/savage/savagefb.h2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c102
-rw-r--r--drivers/video/fbdev/udlfb.c6
-rw-r--r--drivers/video/fbdev/uvesafb.c14
-rw-r--r--drivers/video/fbdev/valkyriefb.c4
-rw-r--r--drivers/video/fbdev/vesafb.c16
-rw-r--r--drivers/video/fbdev/via/debug.h6
-rw-r--r--drivers/video/fbdev/via/viafbdev.c2
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c1
-rw-r--r--drivers/video/fbdev/w100fb.c2
-rw-r--r--drivers/video/hdmi.c65
-rw-r--r--drivers/visorbus/controlvmchannel.h2
-rw-r--r--drivers/visorbus/vbuschannel.h2
-rw-r--r--drivers/visorbus/visorbus_private.h2
-rw-r--r--drivers/w1/masters/omap_hdq.c82
-rw-r--r--drivers/w1/slaves/w1_ds2430.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c1624
-rw-r--r--drivers/watchdog/Kconfig15
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/arm_smc_wdt.c188
-rw-r--r--drivers/watchdog/da9062_wdt.c32
-rw-r--r--drivers/watchdog/da9063_wdt.c20
-rw-r--r--drivers/watchdog/iTCO_wdt.c25
-rw-r--r--drivers/watchdog/imx2_wdt.c2
-rw-r--r--drivers/watchdog/imx_sc_wdt.c5
-rw-r--r--drivers/watchdog/intel-mid_wdt.c53
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/xen/privcmd.c1
5547 files changed, 487910 insertions, 91309 deletions
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 7a265c2171c0..6041974c7627 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -745,7 +745,7 @@ static const struct acpi_debugger_ops acpi_aml_debugger = {
.notify_command_complete = acpi_aml_notify_command_complete,
};
-int __init acpi_aml_init(void)
+static int __init acpi_aml_init(void)
{
int ret;
@@ -771,7 +771,7 @@ int __init acpi_aml_init(void)
return 0;
}
-void __exit acpi_aml_exit(void)
+static void __exit acpi_aml_exit(void)
{
if (acpi_aml_initialized) {
acpi_unregister_debugger(&acpi_aml_debugger);
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 953437a216f6..48e5059d67ca 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -151,10 +151,11 @@ void acpi_init_lpit(void)
struct acpi_table_lpit *lpit;
status = acpi_get_table(ACPI_SIG_LPIT, 0, (struct acpi_table_header **)&lpit);
-
if (ACPI_FAILURE(status))
return;
lpit_process((u64)lpit + sizeof(*lpit),
(u64)lpit + lpit->header.length);
+
+ acpi_put_table((struct acpi_table_header *)lpit);
}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index dee999938213..5e2bfbcf526f 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1041,7 +1041,7 @@ static int acpi_lpss_do_suspend_late(struct device *dev)
{
int ret;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_suspend_late(dev);
@@ -1093,6 +1093,9 @@ static int acpi_lpss_resume_early(struct device *dev)
if (pdata->dev_desc->resume_from_noirq)
return 0;
+ if (dev_pm_skip_resume(dev))
+ return 0;
+
return acpi_lpss_do_resume_early(dev);
}
@@ -1102,12 +1105,9 @@ static int acpi_lpss_resume_noirq(struct device *dev)
int ret;
/* Follow acpi_subsys_resume_noirq(). */
- if (dev_pm_may_skip_resume(dev))
+ if (dev_pm_skip_resume(dev))
return 0;
- if (dev_pm_smart_suspend_and_suspended(dev))
- pm_runtime_set_active(dev);
-
ret = pm_generic_resume_noirq(dev);
if (ret)
return ret;
@@ -1169,7 +1169,7 @@ static int acpi_lpss_poweroff_late(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
if (pdata->dev_desc->resume_from_noirq)
@@ -1182,7 +1182,7 @@ static int acpi_lpss_poweroff_noirq(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
if (pdata->dev_desc->resume_from_noirq) {
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 33a4bcdaa4d7..7d45cce0c3c1 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -624,7 +624,7 @@ static int acpi_tad_probe(struct platform_device *pdev)
*/
device_init_wakeup(dev, true);
dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED);
+ DPM_FLAG_MAY_SKIP_RESUME);
/*
* The platform bus type layer tells the ACPI PM domain powers up the
* device, so set the runtime PM status of it to "active".
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 6e9ec6e3fe47..5c1e9ea43123 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -73,6 +73,7 @@ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
}
if (acpi_watchdog_uses_rtc(wdat)) {
+ acpi_put_table((struct acpi_table_header *)wdat);
pr_info("Skipping WDAT on this system because it uses RTC SRAM\n");
return NULL;
}
@@ -117,12 +118,12 @@ void __init acpi_watchdog_init(void)
/* Watchdog disabled by BIOS */
if (!(wdat->flags & ACPI_WDAT_ENABLED))
- return;
+ goto fail_put_wdat;
/* Skip legacy PCI WDT devices */
if (wdat->pci_segment != 0xff || wdat->pci_bus != 0xff ||
wdat->pci_device != 0xff || wdat->pci_function != 0xff)
- return;
+ goto fail_put_wdat;
INIT_LIST_HEAD(&resource_list);
@@ -188,4 +189,6 @@ void __init acpi_watchdog_init(void)
fail_free_resource_list:
resource_list_free(&resource_list);
+fail_put_wdat:
+ acpi_put_table((struct acpi_table_header *)wdat);
}
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 38ffa2c0a496..1030a0ce1599 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -290,6 +290,7 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
#ifdef ACPI_DEBUGGER
ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_next_cmd_num, 1);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_ini_methods);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_region_support);
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index cd0f5df0ea23..2cbb56652f1c 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -640,10 +640,10 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_NIC", METHOD_0ARGS, /* ACPI 6.3 */
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
- {{"_NIG", METHOD_1ARGS(ACPI_TYPE_BUFFER), /* ACPI 6.3 */
+ {{"_NIG", METHOD_0ARGS, /* ACPI 6.3 */
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
- {{"_NIH", METHOD_0ARGS, /* ACPI 6.3 */
+ {{"_NIH", METHOD_1ARGS(ACPI_TYPE_BUFFER), /* ACPI 6.3 */
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
{{"_NTT", METHOD_0ARGS,
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index bb9600b867ee..f5fba14461a6 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -27,7 +27,6 @@ static HISTORY_INFO acpi_gbl_history_buffer[HISTORY_SIZE];
static u16 acpi_gbl_lo_history = 0;
static u16 acpi_gbl_num_history = 0;
static u16 acpi_gbl_next_history_index = 0;
-u32 acpi_gbl_next_cmd_num = 1;
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index c901f5aec739..fa768b3a989e 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -177,7 +177,10 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
arg->common.value.string, ACPI_TYPE_ANY,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
- if (ACPI_FAILURE(status)) {
+ if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE)
+ && status == AE_ALREADY_EXISTS) {
+ status = AE_OK;
+ } else if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(walk_state->scope_info,
arg->common.value.string, status);
return_ACPI_STATUS(status);
@@ -514,13 +517,20 @@ acpi_ds_create_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
if (info.region_node->object->region.space_id ==
- ACPI_ADR_SPACE_PLATFORM_COMM
- && !(region_node->object->field.internal_pcc_buffer =
- ACPI_ALLOCATE_ZEROED(info.region_node->object->region.
- length))) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ ACPI_ADR_SPACE_PLATFORM_COMM) {
+ region_node->object->field.internal_pcc_buffer =
+ ACPI_ALLOCATE_ZEROED(info.region_node->object->region.
+ length);
+ if (!region_node->object->field.internal_pcc_buffer) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
}
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index e85eb31e5075..3323a2ba6a31 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -22,7 +22,7 @@ ACPI_MODULE_NAME("exfield")
*/
#define ACPI_INVALID_PROTOCOL_ID 0x80
#define ACPI_MAX_PROTOCOL_ID 0x0F
-const u8 acpi_protocol_lengths[] = {
+static const u8 acpi_protocol_lengths[] = {
ACPI_INVALID_PROTOCOL_ID, /* 0 - reserved */
ACPI_INVALID_PROTOCOL_ID, /* 1 - reserved */
0x00, /* 2 - ATTRIB_QUICK */
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 1155fb9dcc3a..19e50fcbf4d6 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -119,7 +119,7 @@ static int __init bert_init(void)
rc = bert_check_table(bert_tab);
if (rc) {
pr_err(FW_BUG "table invalid.\n");
- return rc;
+ goto out_put_bert_tab;
}
region_len = bert_tab->region_length;
@@ -127,7 +127,7 @@ static int __init bert_init(void)
rc = apei_resources_add(&bert_resources, bert_tab->address,
region_len, true);
if (rc)
- return rc;
+ goto out_put_bert_tab;
rc = apei_resources_request(&bert_resources, "APEI BERT");
if (rc)
goto out_fini;
@@ -142,6 +142,8 @@ static int __init bert_init(void)
apei_resources_release(&bert_resources);
out_fini:
apei_resources_fini(&bert_resources);
+out_put_bert_tab:
+ acpi_put_table((struct acpi_table_header *)bert_tab);
return rc;
}
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 086373f8ccb1..133156759551 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -692,7 +692,7 @@ static int __init einj_init(void)
rc = einj_check_table(einj_tab);
if (rc) {
pr_warn(FW_BUG "Invalid EINJ table.\n");
- return -EINVAL;
+ goto err_put_table;
}
rc = -ENOMEM;
@@ -760,6 +760,8 @@ err_release:
err_fini:
apei_resources_fini(&einj_resources);
debugfs_remove_recursive(einj_debug_dir);
+err_put_table:
+ acpi_put_table((struct acpi_table_header *)einj_tab);
return rc;
}
@@ -780,6 +782,7 @@ static void __exit einj_exit(void)
apei_resources_release(&einj_resources);
apei_resources_fini(&einj_resources);
debugfs_remove_recursive(einj_debug_dir);
+ acpi_put_table((struct acpi_table_header *)einj_tab);
}
module_init(einj_init);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 2015a0967cbb..2e0b0fcad960 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1122,7 +1122,7 @@ static int __init erst_init(void)
rc = erst_check_table(erst_tab);
if (rc) {
pr_err(FW_BUG "ERST table is invalid.\n");
- goto err;
+ goto err_put_erst_tab;
}
apei_resources_init(&erst_resources);
@@ -1196,6 +1196,8 @@ err_release:
apei_resources_release(&erst_resources);
err_fini:
apei_resources_fini(&erst_resources);
+err_put_erst_tab:
+ acpi_put_table((struct acpi_table_header *)erst_tab);
err:
erst_disable = 1;
return rc;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 24c9642e8fc7..81bf71b10d44 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -40,6 +40,7 @@
#include <linux/sched/clock.h>
#include <linux/uuid.h>
#include <linux/ras.h>
+#include <linux/task_work.h>
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
@@ -167,12 +168,6 @@ int ghes_estatus_pool_init(int num_ghes)
if (!addr)
goto err_pool_alloc;
- /*
- * New allocation must be visible in all pgd before it can be found by
- * an NMI allocating from the pool.
- */
- vmalloc_sync_mappings();
-
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
if (rc)
goto err_pool_add;
@@ -414,23 +409,46 @@ static void ghes_clear_estatus(struct ghes *ghes,
ghes_ack_error(ghes->generic_v2);
}
-static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
+/*
+ * Called as task_work before returning to user-space.
+ * Ensure any queued work has been done before we return to the context that
+ * triggered the notification.
+ */
+static void ghes_kick_task_work(struct callback_head *head)
+{
+ struct acpi_hest_generic_status *estatus;
+ struct ghes_estatus_node *estatus_node;
+ u32 node_len;
+
+ estatus_node = container_of(head, struct ghes_estatus_node, task_work);
+ if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
+ memory_failure_queue_kick(estatus_node->task_work_cpu);
+
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
+}
+
+static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+ int sev)
{
-#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
unsigned long pfn;
int flags = -1;
int sec_sev = ghes_severity(gdata->error_severity);
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
+ return false;
+
if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
- return;
+ return false;
pfn = mem_err->physical_addr >> PAGE_SHIFT;
if (!pfn_valid(pfn)) {
pr_warn_ratelimited(FW_WARN GHES_PFX
"Invalid address in generic error data: %#llx\n",
mem_err->physical_addr);
- return;
+ return false;
}
/* iff following two events can be handled properly by now */
@@ -440,9 +458,12 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
flags = 0;
- if (flags != -1)
+ if (flags != -1) {
memory_failure_queue(pfn, flags);
-#endif
+ return true;
+ }
+
+ return false;
}
/*
@@ -490,7 +511,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
#endif
}
-static void ghes_do_proc(struct ghes *ghes,
+static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
@@ -498,6 +519,7 @@ static void ghes_do_proc(struct ghes *ghes,
guid_t *sec_type;
const guid_t *fru_id = &guid_null;
char *fru_text = "";
+ bool queued = false;
sev = ghes_severity(estatus->error_severity);
apei_estatus_for_each_section(estatus, gdata) {
@@ -515,7 +537,7 @@ static void ghes_do_proc(struct ghes *ghes,
ghes_edac_report_mem_error(sev, mem_err);
arch_apei_report_mem_error(sev, mem_err);
- ghes_handle_memory_failure(gdata, sev);
+ queued = ghes_handle_memory_failure(gdata, sev);
}
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
ghes_handle_aer(gdata);
@@ -532,6 +554,8 @@ static void ghes_do_proc(struct ghes *ghes,
gdata->error_data_length);
}
}
+
+ return queued;
}
static void __ghes_print_estatus(const char *pfx,
@@ -827,7 +851,9 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
+ bool task_work_pending;
u32 len, node_len;
+ int ret;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -842,14 +868,26 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
- ghes_do_proc(estatus_node->ghes, estatus);
+ task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
if (!ghes_estatus_cached(estatus)) {
generic = estatus_node->generic;
if (ghes_print_estatus(NULL, generic, estatus))
ghes_estatus_cache_add(generic, estatus);
}
- gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
- node_len);
+
+ if (task_work_pending && current->mm != &init_mm) {
+ estatus_node->task_work.func = ghes_kick_task_work;
+ estatus_node->task_work_cpu = smp_processor_id();
+ ret = task_work_add(current, &estatus_node->task_work,
+ true);
+ if (ret)
+ estatus_node->task_work.func = NULL;
+ }
+
+ if (!estatus_node->task_work.func)
+ gen_pool_free(ghes_estatus_pool,
+ (unsigned long)estatus_node, node_len);
+
llnode = next;
}
}
@@ -909,6 +947,7 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
+ estatus_node->task_work.func = NULL;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 822402480f7d..953a2fae8b15 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -243,8 +243,8 @@ void __init acpi_hest_init(void)
} else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
- rc = -EINVAL;
- goto err;
+ hest_disable = HEST_DISABLED;
+ return;
}
rc = apei_hest_parse(hest_parse_cmc, NULL);
@@ -266,4 +266,5 @@ void __init acpi_hest_init(void)
return;
err:
hest_disable = HEST_DISABLED;
+ acpi_put_table((struct acpi_table_header *)hest_tab);
}
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 01962c63a711..f2d0e5915dab 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -394,7 +394,7 @@ static int __init gtdt_sbsa_gwdt_init(void)
*/
ret = acpi_gtdt_init(table, &timer_count);
if (ret || !timer_count)
- return ret;
+ goto out_put_gtdt;
for_each_platform_timer(platform_timer) {
if (is_non_secure_watchdog(platform_timer)) {
@@ -408,6 +408,8 @@ static int __init gtdt_sbsa_gwdt_init(void)
if (gwdt_count)
pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count);
+out_put_gtdt:
+ acpi_put_table(table);
return ret;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 7d04424189df..28a6b387e80e 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -299,61 +299,8 @@ out:
return status;
}
-struct iort_workaround_oem_info {
- char oem_id[ACPI_OEM_ID_SIZE + 1];
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
- u32 oem_revision;
-};
-
-static bool apply_id_count_workaround;
-
-static struct iort_workaround_oem_info wa_info[] __initdata = {
- {
- .oem_id = "HISI ",
- .oem_table_id = "HIP07 ",
- .oem_revision = 0,
- }, {
- .oem_id = "HISI ",
- .oem_table_id = "HIP08 ",
- .oem_revision = 0,
- }
-};
-
-static void __init
-iort_check_id_count_workaround(struct acpi_table_header *tbl)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
- if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
- !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- wa_info[i].oem_revision == tbl->oem_revision) {
- apply_id_count_workaround = true;
- pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n");
- break;
- }
- }
-}
-
-static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map)
-{
- u32 map_max = map->input_base + map->id_count;
-
- /*
- * The IORT specification revision D (Section 3, table 4, page 9) says
- * Number of IDs = The number of IDs in the range minus one, but the
- * IORT code ignored the "minus one", and some firmware did that too,
- * so apply a workaround here to keep compatible with both the spec
- * compliant and non-spec compliant firmwares.
- */
- if (apply_id_count_workaround)
- map_max--;
-
- return map_max;
-}
-
static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
- u32 *rid_out)
+ u32 *rid_out, bool check_overlap)
{
/* Single mapping does not care for input id */
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
@@ -368,10 +315,37 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return -ENXIO;
}
- if (rid_in < map->input_base || rid_in > iort_get_map_max(map))
+ if (rid_in < map->input_base ||
+ (rid_in > map->input_base + map->id_count))
return -ENXIO;
+ if (check_overlap) {
+ /*
+ * We already found a mapping for this input ID at the end of
+ * another region. If it coincides with the start of this
+ * region, we assume the prior match was due to the off-by-1
+ * issue mentioned below, and allow it to be superseded.
+ * Otherwise, things are *really* broken, and we just disregard
+ * duplicate matches entirely to retain compatibility.
+ */
+ pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
+ map, rid_in);
+ if (rid_in != map->input_base)
+ return -ENXIO;
+
+ pr_err(FW_BUG "applying workaround.\n");
+ }
+
*rid_out = map->output_base + (rid_in - map->input_base);
+
+ /*
+ * Due to confusion regarding the meaning of the id_count field (which
+ * carries the number of IDs *minus 1*), we may have to disregard this
+ * match if it is at the end of the range, and overlaps with the start
+ * of another one.
+ */
+ if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
+ return -EAGAIN;
return 0;
}
@@ -414,6 +388,7 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
static int iort_get_id_mapping_index(struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
+ struct acpi_iort_pmcg *pmcg;
switch (node->type) {
case ACPI_IORT_NODE_SMMU_V3:
@@ -441,6 +416,10 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
return smmu->id_mapping_index;
case ACPI_IORT_NODE_PMCG:
+ pmcg = (struct acpi_iort_pmcg *)node->node_data;
+ if (pmcg->overflow_gsiv || node->mapping_count == 0)
+ return -EINVAL;
+
return 0;
default:
return -EINVAL;
@@ -456,7 +435,8 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
/* Parse the ID mapping tree to find specified node type */
while (node) {
struct acpi_iort_id_mapping *map;
- int i, index;
+ int i, index, rc = 0;
+ u32 out_ref = 0, map_id = id;
if (IORT_TYPE_MASK(node->type) & type_mask) {
if (id_out)
@@ -490,15 +470,18 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
if (i == index)
continue;
- if (!iort_id_map(map, node->type, id, &id))
+ rc = iort_id_map(map, node->type, map_id, &id, out_ref);
+ if (!rc)
break;
+ if (rc == -EAGAIN)
+ out_ref = map->output_reference;
}
- if (i == node->mapping_count)
+ if (i == node->mapping_count && !out_ref)
goto fail_map;
node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
- map->output_reference);
+ rc ? out_ref : map->output_reference);
}
fail_map:
@@ -789,15 +772,6 @@ void acpi_configure_pmsi_domain(struct device *dev)
dev_set_msi_domain(dev, msi_domain);
}
-static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
- void *data)
-{
- u32 *rid = data;
-
- *rid = alias;
- return 0;
-}
-
#ifdef CONFIG_IOMMU_API
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
@@ -1148,13 +1122,10 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
else
size = 1ULL << 32;
- if (dev_is_pci(dev)) {
- ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
- if (ret == -ENODEV)
- ret = rc_dma_get_range(dev, &size);
- } else {
- ret = nc_dma_get_range(dev, &size);
- }
+ ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
+ if (ret == -ENODEV)
+ ret = dev_is_pci(dev) ? rc_dma_get_range(dev, &size)
+ : nc_dma_get_range(dev, &size);
if (!ret) {
/*
@@ -1692,6 +1663,10 @@ void __init acpi_iort_init(void)
{
acpi_status status;
+ /* iort_table will be used at runtime after the iort init,
+ * so we don't need to call acpi_put_table() to release
+ * the IORT table mapping.
+ */
status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
@@ -1703,6 +1678,5 @@ void __init acpi_iort_init(void)
return;
}
- iort_check_id_count_workaround(iort_table);
iort_init_platform_devices();
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 78cfc70cb320..3c35e57dd854 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -24,7 +24,6 @@
#define PREFIX "ACPI: "
#define ACPI_BUTTON_CLASS "button"
-#define ACPI_BUTTON_FILE_INFO "info"
#define ACPI_BUTTON_FILE_STATE "state"
#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
#define ACPI_BUTTON_NOTIFY_STATUS 0x80
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 8b2e89c20c11..7a99b19bb893 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -350,7 +350,7 @@ static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
*(u16 *)msg, ret);
}
-struct mbox_client cppc_mbox_cl = {
+static struct mbox_client cppc_mbox_cl = {
.tx_done = cppc_chan_tx_done,
.knows_txdone = true,
};
@@ -597,7 +597,7 @@ bool __weak cpc_ffh_supported(void)
*
* Return: 0 for success, errno for failure
*/
-int pcc_data_alloc(int pcc_ss_id)
+static int pcc_data_alloc(int pcc_ss_id)
{
if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
return -EINVAL;
@@ -846,6 +846,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
"acpi_cppc");
if (ret) {
per_cpu(cpc_desc_ptr, pr->id) = NULL;
+ kobject_put(&cpc_ptr->kobj);
goto out_free;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 5832bc10aca8..b44b12a931e7 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1084,7 +1084,7 @@ int acpi_subsys_suspend_late(struct device *dev)
{
int ret;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_suspend_late(dev);
@@ -1100,10 +1100,8 @@ int acpi_subsys_suspend_noirq(struct device *dev)
{
int ret;
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev->power.may_skip_resume = true;
+ if (dev_pm_skip_suspend(dev))
return 0;
- }
ret = pm_generic_suspend_noirq(dev);
if (ret)
@@ -1116,8 +1114,8 @@ int acpi_subsys_suspend_noirq(struct device *dev)
* acpi_subsys_complete() to take care of fixing up the device's state
* anyway, if need be.
*/
- dev->power.may_skip_resume = device_may_wakeup(dev) ||
- !device_can_wakeup(dev);
+ if (device_can_wakeup(dev) && !device_may_wakeup(dev))
+ dev->power.may_skip_resume = false;
return 0;
}
@@ -1129,17 +1127,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
*/
static int acpi_subsys_resume_noirq(struct device *dev)
{
- if (dev_pm_may_skip_resume(dev))
+ if (dev_pm_skip_resume(dev))
return 0;
- /*
- * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
- * during system suspend, so update their runtime PM status to "active"
- * as they will be put into D0 going forward.
- */
- if (dev_pm_smart_suspend_and_suspended(dev))
- pm_runtime_set_active(dev);
-
return pm_generic_resume_noirq(dev);
}
@@ -1153,7 +1143,12 @@ static int acpi_subsys_resume_noirq(struct device *dev)
*/
static int acpi_subsys_resume_early(struct device *dev)
{
- int ret = acpi_dev_resume(dev);
+ int ret;
+
+ if (dev_pm_skip_resume(dev))
+ return 0;
+
+ ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
@@ -1218,7 +1213,7 @@ static int acpi_subsys_poweroff_late(struct device *dev)
{
int ret;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_poweroff_late(dev);
@@ -1234,7 +1229,7 @@ static int acpi_subsys_poweroff_late(struct device *dev)
*/
static int acpi_subsys_poweroff_noirq(struct device *dev)
{
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
return pm_generic_poweroff_noirq(dev);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index e4e8b75d39f0..5fab7e350db8 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -10,12 +10,19 @@
#include <linux/platform_device.h>
/*
- * Presentation of attributes which are defined for INT3407. They are:
+ * Presentation of attributes which are defined for INT3407 and INT3532.
+ * They are:
* PMAX : Maximum platform powe
* PSRC : Platform power source
* ARTG : Adapter rating
* CTYP : Charger type
* PBSS : Battery steady power
+ * PROP : Rest of worst case platform Power
+ * PBSS : Power Battery Steady State
+ * PBSS : Power Battery Steady State
+ * RBHF : High Frequency Impedance
+ * VBNL : Instantaneous No-Load Voltage
+ * CMPP : Current Discharge Capability
*/
#define DPTF_POWER_SHOW(name, object) \
static ssize_t name##_show(struct device *dev,\
@@ -39,12 +46,42 @@ DPTF_POWER_SHOW(platform_power_source, PSRC)
DPTF_POWER_SHOW(adapter_rating_mw, ARTG)
DPTF_POWER_SHOW(battery_steady_power_mw, PBSS)
DPTF_POWER_SHOW(charger_type, CTYP)
+DPTF_POWER_SHOW(rest_of_platform_power_mw, PROP)
+DPTF_POWER_SHOW(max_steady_state_power_mw, PBSS)
+DPTF_POWER_SHOW(high_freq_impedance_mohm, RBHF)
+DPTF_POWER_SHOW(no_load_voltage_mv, VBNL)
+DPTF_POWER_SHOW(current_discharge_capbility_ma, CMPP);
static DEVICE_ATTR_RO(max_platform_power_mw);
static DEVICE_ATTR_RO(platform_power_source);
static DEVICE_ATTR_RO(adapter_rating_mw);
static DEVICE_ATTR_RO(battery_steady_power_mw);
static DEVICE_ATTR_RO(charger_type);
+static DEVICE_ATTR_RO(rest_of_platform_power_mw);
+static DEVICE_ATTR_RO(max_steady_state_power_mw);
+static DEVICE_ATTR_RO(high_freq_impedance_mohm);
+static DEVICE_ATTR_RO(no_load_voltage_mv);
+static DEVICE_ATTR_RO(current_discharge_capbility_ma);
+
+static ssize_t prochot_confirm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi_dev = dev_get_drvdata(dev);
+ acpi_status status;
+ int seq_no;
+
+ if (kstrtouint(buf, 0, &seq_no) < 0)
+ return -EINVAL;
+
+ status = acpi_execute_simple_method(acpi_dev->handle, "PBOK", seq_no);
+ if (ACPI_SUCCESS(status))
+ return count;
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR_WO(prochot_confirm);
static struct attribute *dptf_power_attrs[] = {
&dev_attr_max_platform_power_mw.attr,
@@ -52,6 +89,8 @@ static struct attribute *dptf_power_attrs[] = {
&dev_attr_adapter_rating_mw.attr,
&dev_attr_battery_steady_power_mw.attr,
&dev_attr_charger_type.attr,
+ &dev_attr_rest_of_platform_power_mw.attr,
+ &dev_attr_prochot_confirm.attr,
NULL
};
@@ -60,10 +99,79 @@ static const struct attribute_group dptf_power_attribute_group = {
.name = "dptf_power"
};
+static struct attribute *dptf_battery_attrs[] = {
+ &dev_attr_max_platform_power_mw.attr,
+ &dev_attr_max_steady_state_power_mw.attr,
+ &dev_attr_high_freq_impedance_mohm.attr,
+ &dev_attr_no_load_voltage_mv.attr,
+ &dev_attr_current_discharge_capbility_ma.attr,
+ NULL
+};
+
+static const struct attribute_group dptf_battery_attribute_group = {
+ .attrs = dptf_battery_attrs,
+ .name = "dptf_battery"
+};
+
+#define MAX_POWER_CHANGED 0x80
+#define POWER_STATE_CHANGED 0x81
+#define STEADY_STATE_POWER_CHANGED 0x83
+#define POWER_PROP_CHANGE_EVENT 0x84
+#define IMPEDANCED_CHNGED 0x85
+#define VOLTAGE_CURRENT_CHANGED 0x86
+
+static long long dptf_participant_type(acpi_handle handle)
+{
+ unsigned long long ptype;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, "PTYP", NULL, &ptype);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return ptype;
+}
+
+static void dptf_power_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct platform_device *pdev = data;
+ char *attr;
+
+ switch (event) {
+ case POWER_STATE_CHANGED:
+ attr = "platform_power_source";
+ break;
+ case POWER_PROP_CHANGE_EVENT:
+ attr = "rest_of_platform_power_mw";
+ break;
+ case MAX_POWER_CHANGED:
+ attr = "max_platform_power_mw";
+ break;
+ case STEADY_STATE_POWER_CHANGED:
+ attr = "max_steady_state_power_mw";
+ break;
+ case VOLTAGE_CURRENT_CHANGED:
+ attr = "no_load_voltage_mv";
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported event [0x%x]\n", event);
+ return;
+ }
+
+ /*
+ * Notify that an attribute is changed, so that user space can read
+ * again.
+ */
+ if (dptf_participant_type(handle) == 0x0CULL)
+ sysfs_notify(&pdev->dev.kobj, "dptf_battery", attr);
+ else
+ sysfs_notify(&pdev->dev.kobj, "dptf_power", attr);
+}
+
static int dptf_power_add(struct platform_device *pdev)
{
+ const struct attribute_group *attr_group;
struct acpi_device *acpi_dev;
- acpi_status status;
unsigned long long ptype;
int result;
@@ -71,17 +179,29 @@ static int dptf_power_add(struct platform_device *pdev)
if (!acpi_dev)
return -ENODEV;
- status = acpi_evaluate_integer(acpi_dev->handle, "PTYP", NULL, &ptype);
- if (ACPI_FAILURE(status))
+ ptype = dptf_participant_type(acpi_dev->handle);
+ if (ptype == 0x11)
+ attr_group = &dptf_power_attribute_group;
+ else if (ptype == 0x0C)
+ attr_group = &dptf_battery_attribute_group;
+ else
return -ENODEV;
- if (ptype != 0x11)
- return -ENODEV;
+ result = acpi_install_notify_handler(acpi_dev->handle,
+ ACPI_DEVICE_NOTIFY,
+ dptf_power_notify,
+ (void *)pdev);
+ if (result)
+ return result;
result = sysfs_create_group(&pdev->dev.kobj,
- &dptf_power_attribute_group);
- if (result)
+ attr_group);
+ if (result) {
+ acpi_remove_notify_handler(acpi_dev->handle,
+ ACPI_DEVICE_NOTIFY,
+ dptf_power_notify);
return result;
+ }
platform_set_drvdata(pdev, acpi_dev);
@@ -90,14 +210,23 @@ static int dptf_power_add(struct platform_device *pdev)
static int dptf_power_remove(struct platform_device *pdev)
{
+ struct acpi_device *acpi_dev = platform_get_drvdata(pdev);
+
+ acpi_remove_notify_handler(acpi_dev->handle,
+ ACPI_DEVICE_NOTIFY,
+ dptf_power_notify);
- sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group);
+ if (dptf_participant_type(acpi_dev->handle) == 0x0CULL)
+ sysfs_remove_group(&pdev->dev.kobj, &dptf_battery_attribute_group);
+ else
+ sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group);
return 0;
}
static const struct acpi_device_id int3407_device_ids[] = {
{"INT3407", 0},
+ {"INT3532", 0},
{"INTC1047", 0},
{"", 0},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index b4c0152e92aa..04ce2b96c3da 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -34,7 +34,6 @@
#define ACPI_EC_CLASS "embedded_controller"
#define ACPI_EC_DEVICE_NAME "Embedded Controller"
-#define ACPI_EC_FILE_INFO "info"
/* EC status register */
#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
@@ -1783,13 +1782,14 @@ static void __init acpi_ec_ecdt_start(void)
return;
status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
- if (ACPI_FAILURE(status))
- return;
+ if (ACPI_SUCCESS(status)) {
+ boot_ec->handle = handle;
- boot_ec->handle = handle;
+ /* Add a special ACPI device object to represent the boot EC. */
+ acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
+ }
- /* Add a special ACPI device object to represent the boot EC. */
- acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
+ acpi_put_table((struct acpi_table_header *)ecdt_ptr);
}
/*
@@ -1891,12 +1891,12 @@ void __init acpi_ec_ecdt_probe(void)
* Asus X50GL:
* https://bugzilla.kernel.org/show_bug.cgi?id=11880
*/
- return;
+ goto out;
}
ec = acpi_ec_alloc();
if (!ec)
- return;
+ goto out;
if (EC_FLAGS_CORRECT_ECDT) {
ec->command_addr = ecdt_ptr->data.address;
@@ -1922,13 +1922,16 @@ void __init acpi_ec_ecdt_probe(void)
ret = acpi_ec_setup(ec, NULL);
if (ret) {
acpi_ec_free(ec);
- return;
+ goto out;
}
boot_ec = ec;
boot_ec_is_ecdt = true;
pr_info("Boot ECDT EC used to handle transactions\n");
+
+out:
+ acpi_put_table((struct acpi_table_header *)ecdt_ptr);
}
#ifdef CONFIG_PM_SLEEP
@@ -1994,23 +1997,35 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
}
-bool acpi_ec_other_gpes_active(void)
-{
- return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
-}
-
bool acpi_ec_dispatch_gpe(void)
{
u32 ret;
if (!first_ec)
+ return acpi_any_gpe_status_set(U32_MAX);
+
+ /*
+ * Report wakeup if the status bit is set for any enabled GPE other
+ * than the EC one.
+ */
+ if (acpi_any_gpe_status_set(first_ec->gpe))
+ return true;
+
+ if (ec_no_wakeup)
return false;
+ /*
+ * Dispatch the EC GPE in-band, but do not report wakeup in any case
+ * to allow the caller to process events properly after that.
+ */
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
if (ret == ACPI_INTERRUPT_HANDLED) {
- pm_pr_dbg("EC GPE dispatched\n");
- return true;
+ pm_pr_dbg("ACPI EC GPE dispatched\n");
+
+ /* Flush the event and query workqueues. */
+ acpi_ec_flush_work();
}
+
return false;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index aba0d0027586..ccd900690b6f 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -79,6 +79,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
struct resource r;
struct acpi_resource_irq *p = &ares->data.irq;
struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
+ char ev_name[5];
+ u8 trigger;
if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
return AE_OK;
@@ -87,14 +89,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
dev_err(dev, "unable to parse IRQ resource\n");
return AE_ERROR;
}
- if (ares->type == ACPI_RESOURCE_TYPE_IRQ)
+ if (ares->type == ACPI_RESOURCE_TYPE_IRQ) {
gsi = p->interrupts[0];
- else
+ trigger = p->triggering;
+ } else {
gsi = pext->interrupts[0];
+ trigger = pext->triggering;
+ }
irq = r.start;
- if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) {
+ switch (gsi) {
+ case 0 ... 255:
+ sprintf(ev_name, "_%c%02hhX",
+ trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
+
+ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
+ break;
+ /* fall through */
+ default:
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
+ break;
+
dev_err(dev, "cannot locate _EVT method\n");
return AE_ERROR;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e387517d3354..43411a7457cd 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -202,7 +202,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
#ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void);
-bool acpi_ec_other_gpes_active(void);
bool acpi_ec_dispatch_gpe(void);
#endif
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index ed3d2182cf2c..606da5d77ad3 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -31,8 +31,6 @@
ACPI_MODULE_NAME("pci_link");
#define ACPI_PCI_LINK_CLASS "pci_irq_routing"
#define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link"
-#define ACPI_PCI_LINK_FILE_INFO "info"
-#define ACPI_PCI_LINK_FILE_STATUS "state"
#define ACPI_PCI_LINK_MAX_POSSIBLE 16
static int acpi_pci_link_add(struct acpi_device *device,
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 6b347d9920cc..54b36b7ad47d 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -29,7 +29,7 @@ struct mcfg_fixup {
u32 oem_revision;
u16 segment;
struct resource bus_range;
- struct pci_ecam_ops *ops;
+ const struct pci_ecam_ops *ops;
struct resource cfgres;
};
@@ -165,7 +165,7 @@ static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops)
+ const struct pci_ecam_ops **ecam_ops)
{
#ifdef CONFIG_PCI_QUIRKS
u16 segment = root->segment;
@@ -191,9 +191,9 @@ static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
static LIST_HEAD(pci_mcfg_list);
int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops)
+ const struct pci_ecam_ops **ecam_ops)
{
- struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
+ const struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
struct resource *bus_res = &root->secondary;
u16 seg = root->segment;
struct mcfg_entry *e;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ac8ad6cb82aa..f90e841c59f5 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -483,13 +483,8 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
- if (pci_aer_available()) {
- if (aer_acpi_firmware_first())
- dev_info(&device->dev,
- "PCIe AER handled by firmware\n");
- else
- control |= OSC_PCI_EXPRESS_AER_CONTROL;
- }
+ if (pci_aer_available())
+ control |= OSC_PCI_EXPRESS_AER_CONTROL;
/*
* Per the Downstream Port Containment Related Enhancements ECN to
@@ -938,7 +933,7 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
* assignments made by firmware for this host bridge.
*/
obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 1,
- IGNORE_PCI_BOOT_CONFIG_DSM, NULL);
+ DSM_PCI_PRESERVE_BOOT_CONFIG, NULL);
if (obj && obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 0)
host_bridge->preserve_config = 1;
ACPI_FREE(obj);
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 7ccd7d9660bc..a5101b07611a 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -102,6 +102,7 @@ static struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
.power_table_count = ARRAY_SIZE(chtdc_ti_power_table),
.thermal_table = chtdc_ti_thermal_table,
.thermal_table_count = ARRAY_SIZE(chtdc_ti_thermal_table),
+ .pmic_i2c_address = 0x5e,
};
static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev)
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index fe1e7bc91a5e..837b875d075e 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -36,8 +36,6 @@
ACPI_MODULE_NAME("power");
#define ACPI_POWER_CLASS "power_resource"
#define ACPI_POWER_DEVICE_NAME "Power Resource"
-#define ACPI_POWER_FILE_INFO "info"
-#define ACPI_POWER_FILE_STATUS "state"
#define ACPI_POWER_RESOURCE_STATE_OFF 0x00
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index dcc289e30166..75534c5b5433 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -308,11 +308,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
if (ret)
return ret;
- /*
- * It is expected that there will be at least 2 states, C1 and
- * something else (C2 or C3), so fail if that is not the case.
- */
- if (pr->power.count < 2)
+ if (!pr->power.count)
return -EFAULT;
pr->flags.has_cst = 1;
@@ -468,8 +464,7 @@ static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
if (pr->power.states[i].valid) {
pr->power.count = i;
- if (pr->power.states[i].type >= ACPI_STATE_C2)
- pr->flags.power = 1;
+ pr->flags.power = 1;
}
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 6e88224f60f0..f158b8c30113 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -28,9 +28,6 @@
#define ACPI_SBS_CLASS "sbs"
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
-#define ACPI_SBS_FILE_INFO "info"
-#define ACPI_SBS_FILE_STATE "state"
-#define ACPI_SBS_FILE_ALARM "alarm"
#define ACPI_BATTERY_DIR_NAME "BAT%i"
#define ACPI_AC_DIR_NAME "AC0"
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 6d3448895382..5287ab98b8c1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2157,10 +2157,13 @@ static void __init acpi_get_spcr_uart_addr(void)
status = acpi_get_table(ACPI_SIG_SPCR, 0,
(struct acpi_table_header **)&spcr_ptr);
- if (ACPI_SUCCESS(status))
- spcr_uart_addr = spcr_ptr->serial_port.address;
- else
- printk(KERN_WARNING PREFIX "STAO table present, but SPCR is missing\n");
+ if (ACPI_FAILURE(status)) {
+ pr_warn(PREFIX "STAO table present, but SPCR is missing\n");
+ return;
+ }
+
+ spcr_uart_addr = spcr_ptr->serial_port.address;
+ acpi_put_table((struct acpi_table_header *)spcr_ptr);
}
static bool acpi_scan_initialized;
@@ -2196,10 +2199,12 @@ int __init acpi_scan_init(void)
(struct acpi_table_header **)&stao_ptr);
if (ACPI_SUCCESS(status)) {
if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
- printk(KERN_INFO PREFIX "STAO Name List not yet supported.");
+ pr_info(PREFIX "STAO Name List not yet supported.\n");
if (stao_ptr->ignore_uart)
acpi_get_spcr_uart_addr();
+
+ acpi_put_table((struct acpi_table_header *)stao_ptr);
}
acpi_gpe_apply_masked_gpes();
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 4edc8a3ce40f..aff13bf4d947 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -980,13 +980,6 @@ static int acpi_s2idle_prepare_late(void)
return 0;
}
-static void acpi_s2idle_sync(void)
-{
- /* The EC driver uses special workqueues that need to be flushed. */
- acpi_ec_flush_work();
- acpi_os_wait_events_complete(); /* synchronize Notify handling */
-}
-
static bool acpi_s2idle_wake(void)
{
if (!acpi_sci_irq_valid())
@@ -999,36 +992,34 @@ static bool acpi_s2idle_wake(void)
* wakeup is pending anyway and the SCI is not the source of
* it).
*/
- if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+ if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
+ pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
return true;
+ }
/*
* If the status bit of any enabled fixed event is set, the
* wakeup is regarded as valid.
*/
- if (acpi_any_fixed_event_status_set())
+ if (acpi_any_fixed_event_status_set()) {
+ pm_pr_dbg("ACPI fixed event wakeup\n");
return true;
+ }
/* Check wakeups from drivers sharing the SCI. */
- if (acpi_check_wakeup_handlers())
+ if (acpi_check_wakeup_handlers()) {
+ pm_pr_dbg("ACPI custom handler wakeup\n");
return true;
+ }
- /*
- * If the status bit is set for any enabled GPE other than the
- * EC one, the wakeup is regarded as a genuine one.
- */
- if (acpi_ec_other_gpes_active())
+ /* Check non-EC GPE wakeups and dispatch the EC GPE. */
+ if (acpi_ec_dispatch_gpe()) {
+ pm_pr_dbg("ACPI non-EC GPE wakeup\n");
return true;
+ }
/*
- * If the EC GPE status bit has not been set, the wakeup is
- * regarded as a spurious one.
- */
- if (!acpi_ec_dispatch_gpe())
- return false;
-
- /*
- * Cancel the wakeup and process all pending events in case
+ * Cancel the SCI wakeup and process all pending events in case
* there are any wakeup ones in there.
*
* Note that if any non-EC GPEs are active at this point, the
@@ -1036,8 +1027,7 @@ static bool acpi_s2idle_wake(void)
* should be missed by canceling the wakeup here.
*/
pm_system_cancel_wakeup();
-
- acpi_s2idle_sync();
+ acpi_os_wait_events_complete();
/*
* The SCI is in the "suspended" state now and it cannot produce
@@ -1045,8 +1035,10 @@ static bool acpi_s2idle_wake(void)
* are pending here, they must be resulting from the processing
* of EC events above or coming from somewhere else.
*/
- if (pm_wakeup_pending())
+ if (pm_wakeup_pending()) {
+ pm_pr_dbg("Wakeup after ACPI Notify sync\n");
return true;
+ }
rearm_wake_irq(acpi_sci_irq);
}
@@ -1070,7 +1062,8 @@ static void acpi_s2idle_restore(void)
* of GPEs.
*/
acpi_os_wait_events_complete(); /* synchronize GPE processing */
- acpi_s2idle_sync();
+ acpi_ec_flush_work(); /* flush the EC driver's workqueues */
+ acpi_os_wait_events_complete(); /* synchronize Notify handling */
s2idle_wakeup = false;
@@ -1297,8 +1290,10 @@ static void acpi_sleep_hibernate_setup(void)
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
- if (facs)
+ if (facs) {
s4_hardware_signature = facs->hardware_signature;
+ acpi_put_table((struct acpi_table_header *)facs);
+ }
}
#else /* !CONFIG_HIBERNATION */
static inline void acpi_sleep_hibernate_setup(void) {}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index c60d2c6d31d6..3a89909b50a6 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -993,8 +993,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
error = kobject_init_and_add(&hotplug->kobj,
&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
- if (error)
+ if (error) {
+ kobject_put(&hotplug->kobj);
goto err_out;
+ }
kobject_uevent(&hotplug->kobj, KOBJ_ADD);
return;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 804ac0df58ec..838b719ec7ce 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -606,6 +606,31 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
}
/**
+ * acpi_evaluate_reg: Evaluate _REG method to register OpRegion presence
+ * @handle: ACPI device handle
+ * @space_id: ACPI address space id to register OpRegion presence for
+ * @function: Parameter to pass to _REG one of ACPI_REG_CONNECT or
+ * ACPI_REG_DISCONNECT
+ *
+ * Evaluate device's _REG method to register OpRegion presence.
+ */
+acpi_status acpi_evaluate_reg(acpi_handle handle, u8 space_id, u32 function)
+{
+ struct acpi_object_list arg_list;
+ union acpi_object params[2];
+
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = space_id;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = function;
+ arg_list.count = 2;
+ arg_list.pointer = params;
+
+ return acpi_evaluate_object(handle, "_REG", &arg_list, NULL);
+}
+EXPORT_SYMBOL(acpi_evaluate_reg);
+
+/**
* acpi_evaluate_dsm - evaluate device's _DSM method
* @handle: ACPI device handle
* @guid: GUID of requested functions, should be 16 bytes
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b4994e50608d..2499d7e3c710 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -361,6 +361,16 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "JV50"),
},
},
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */
+ .callback = video_detect_force_native,
+ .ident = "Acer TravelMate 5735Z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"),
+ DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 8558b629880b..ecc304149067 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -505,7 +505,7 @@ static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
-static void amba_deferred_retry_func(struct work_struct *dummy)
+static int amba_deferred_retry(void)
{
struct deferred_device *ddev, *tmp;
@@ -521,11 +521,19 @@ static void amba_deferred_retry_func(struct work_struct *dummy)
kfree(ddev);
}
+ mutex_unlock(&deferred_devices_lock);
+
+ return 0;
+}
+late_initcall(amba_deferred_retry);
+
+static void amba_deferred_retry_func(struct work_struct *dummy)
+{
+ amba_deferred_retry();
+
if (!list_empty(&deferred_devices))
schedule_delayed_work(&deferred_retry_work,
DEFERRED_DEVICE_TIMEOUT);
-
- mutex_unlock(&deferred_devices_lock);
}
/**
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 9ecad74183a3..7cf566aafe1f 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -650,7 +650,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
struct binderfs_info *info;
struct binderfs_mount_opts *ctx = fc->fs_private;
struct inode *inode = NULL;
- struct binderfs_device device_info = { 0 };
+ struct binderfs_device device_info = {};
const char *name;
size_t len;
@@ -747,7 +747,7 @@ static const struct fs_context_operations binderfs_fs_context_ops = {
static int binderfs_init_fs_context(struct fs_context *fc)
{
- struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_mount_opts *ctx;
ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
if (!ctx)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index beca5f91bb4c..69361ec43db5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5209,7 +5209,7 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
* sata_link_init_spd - Initialize link->sata_spd_limit
* @link: Link to configure sata_spd_limit for
*
- * Initialize @link->[hw_]sata_spd_limit to the currently
+ * Initialize ``link->[hw_]sata_spd_limit`` to the currently
* configured value.
*
* LOCKING:
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 36e588d88b95..435781a16875 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -649,7 +649,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
- qc->extrabytes = scmd->request->extra_len;
+ qc->extrabytes = scmd->extra_len;
qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
}
@@ -1017,16 +1017,11 @@ void ata_scsi_sdev_config(struct scsi_device *sdev)
* RETURNS:
* 1 if ; otherwise, 0.
*/
-static int atapi_drain_needed(struct request *rq)
+bool ata_scsi_dma_need_drain(struct request *rq)
{
- if (likely(!blk_rq_is_passthrough(rq)))
- return 0;
-
- if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
- return 0;
-
return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
}
+EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
{
@@ -1039,21 +1034,21 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
blk_queue_max_hw_sectors(q, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) {
- void *buf;
-
sdev->sector_size = ATA_SECT_SIZE;
/* set DMA padding */
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
- /* configure draining */
- buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
- if (!buf) {
+ /* make room for appending the drain */
+ blk_queue_max_segments(q, queue_max_segments(q) - 1);
+
+ sdev->dma_drain_len = ATAPI_MAX_DRAIN;
+ sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len,
+ q->bounce_gfp | GFP_KERNEL);
+ if (!sdev->dma_drain_buf) {
ata_dev_err(dev, "drain buffer allocation failed\n");
return -ENOMEM;
}
-
- blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
} else {
sdev->sector_size = ata_id_logical_sector_size(dev->id);
sdev->manage_start_stop = 1;
@@ -1135,7 +1130,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
void ata_scsi_slave_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- struct request_queue *q = sdev->request_queue;
unsigned long flags;
struct ata_device *dev;
@@ -1152,9 +1146,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
}
spin_unlock_irqrestore(ap->lock, flags);
- kfree(q->dma_drain_buffer);
- q->dma_drain_buffer = NULL;
- q->dma_drain_size = 0;
+ kfree(sdev->dma_drain_buf);
}
EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 8c37294f1d1e..cfb0d16b60ad 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -306,7 +306,7 @@ config ATM_IA
for more info about the cards. Say Y (or M to compile as a module
named iphase) here if you have one of these cards.
- See the file <file:Documentation/networking/iphase.txt> for further
+ See the file <file:Documentation/networking/iphase.rst> for further
details.
config ATM_IA_DEBUG
@@ -336,7 +336,7 @@ config ATM_FORE200E
on PCI and SBUS hosts. Say Y (or M to compile as a module
named fore_200e) here if you have one of these ATM adapters.
- See the file <file:Documentation/networking/fore200e.txt> for
+ See the file <file:Documentation/networking/fore200e.rst> for
further details.
config ATM_FORE200E_USE_TASKLET
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 40fb069a8a7e..95c22c0f9036 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -153,6 +153,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
+extern void driver_deferred_probe_force_trigger(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 073045cb214e..67d39a90b45c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -49,6 +49,9 @@ static LIST_HEAD(wait_for_suppliers);
static DEFINE_MUTEX(wfs_lock);
static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
+static unsigned int defer_fw_devlink_count;
+static DEFINE_MUTEX(defer_fw_devlink_lock);
+static bool fw_devlink_is_permissive(void);
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
@@ -365,6 +368,7 @@ struct device_link *device_link_add(struct device *consumer,
link->flags |= DL_FLAG_STATELESS;
goto reorder;
} else {
+ link->flags |= DL_FLAG_STATELESS;
goto out;
}
}
@@ -433,12 +437,16 @@ struct device_link *device_link_add(struct device *consumer,
flags & DL_FLAG_PM_RUNTIME)
pm_runtime_resume(supplier);
+ list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+ list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
if (flags & DL_FLAG_SYNC_STATE_ONLY) {
dev_dbg(consumer,
"Linked as a sync state only consumer to %s\n",
dev_name(supplier));
goto out;
}
+
reorder:
/*
* Move the consumer and all of the devices depending on it to the end
@@ -449,12 +457,9 @@ reorder:
*/
device_reorder_to_tail(consumer, NULL);
- list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
- list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
-
dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
- out:
+out:
device_pm_unlock();
device_links_write_unlock();
@@ -527,7 +532,7 @@ static void device_link_add_missing_supplier_links(void)
int ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
if (!ret)
list_del_init(&dev->links.needs_suppliers);
- else if (ret != -ENODEV)
+ else if (ret != -ENODEV || fw_devlink_is_permissive())
dev->links.need_for_probe = false;
}
mutex_unlock(&wfs_lock);
@@ -641,9 +646,17 @@ static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
- list_for_each_entry(link, &dev->links.suppliers, c_node)
- if (link->status == DL_STATE_CONSUMER_PROBE)
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->status != DL_STATE_CONSUMER_PROBE)
+ continue;
+
+ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ } else {
+ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+ }
}
/**
@@ -682,11 +695,11 @@ int device_links_check_suppliers(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED) ||
- link->flags & DL_FLAG_SYNC_STATE_ONLY)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
- if (link->status != DL_STATE_AVAILABLE) {
+ if (link->status != DL_STATE_AVAILABLE &&
+ !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
device_links_missing_supplier(dev);
ret = -EPROBE_DEFER;
break;
@@ -829,6 +842,13 @@ static void __device_links_supplier_defer_sync(struct device *sup)
list_add_tail(&sup->links.defer_sync, &deferred_sync);
}
+static void device_link_drop_managed(struct device_link *link)
+{
+ link->flags &= ~DL_FLAG_MANAGED;
+ WRITE_ONCE(link->status, DL_STATE_NONE);
+ kref_put(&link->kref, __device_link_del);
+}
+
/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
@@ -842,7 +862,7 @@ static void __device_links_supplier_defer_sync(struct device *sup)
*/
void device_links_driver_bound(struct device *dev)
{
- struct device_link *link;
+ struct device_link *link, *ln;
LIST_HEAD(sync_list);
/*
@@ -882,18 +902,35 @@ void device_links_driver_bound(struct device *dev)
else
__device_links_queue_sync_state(dev, &sync_list);
- list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
+ struct device *supplier;
+
if (!(link->flags & DL_FLAG_MANAGED))
continue;
- WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
- WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ supplier = link->supplier;
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
+ /*
+ * When DL_FLAG_SYNC_STATE_ONLY is set, it means no
+ * other DL_MANAGED_LINK_FLAGS have been set. So, it's
+ * save to drop the managed link completely.
+ */
+ device_link_drop_managed(link);
+ } else {
+ WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+ WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ }
+ /*
+ * This needs to be done even for the deleted
+ * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last
+ * device link that was preventing the supplier from getting a
+ * sync_state() call.
+ */
if (defer_sync_state_count)
- __device_links_supplier_defer_sync(link->supplier);
+ __device_links_supplier_defer_sync(supplier);
else
- __device_links_queue_sync_state(link->supplier,
- &sync_list);
+ __device_links_queue_sync_state(supplier, &sync_list);
}
dev->links.status = DL_DEV_DRIVER_BOUND;
@@ -903,13 +940,6 @@ void device_links_driver_bound(struct device *dev)
device_links_flush_sync_list(&sync_list, dev);
}
-static void device_link_drop_managed(struct device_link *link)
-{
- link->flags &= ~DL_FLAG_MANAGED;
- WRITE_ONCE(link->status, DL_STATE_NONE);
- kref_put(&link->kref, __device_link_del);
-}
-
/**
* __device_links_no_driver - Update links of a device without a driver.
* @dev: Device without a drvier.
@@ -930,11 +960,21 @@ static void __device_links_no_driver(struct device *dev)
if (!(link->flags & DL_FLAG_MANAGED))
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
device_link_drop_managed(link);
- else if (link->status == DL_STATE_CONSUMER_PROBE ||
- link->status == DL_STATE_ACTIVE)
+ continue;
+ }
+
+ if (link->status != DL_STATE_CONSUMER_PROBE &&
+ link->status != DL_STATE_ACTIVE)
+ continue;
+
+ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ } else {
+ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
}
dev->links.status = DL_DEV_NO_DRIVER;
@@ -1143,6 +1183,150 @@ static void device_links_purge(struct device *dev)
device_links_write_unlock();
}
+static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+static int __init fw_devlink_setup(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "off") == 0) {
+ fw_devlink_flags = 0;
+ } else if (strcmp(arg, "permissive") == 0) {
+ fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+ } else if (strcmp(arg, "on") == 0) {
+ fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ } else if (strcmp(arg, "rpm") == 0) {
+ fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
+ DL_FLAG_PM_RUNTIME;
+ }
+ return 0;
+}
+early_param("fw_devlink", fw_devlink_setup);
+
+u32 fw_devlink_get_flags(void)
+{
+ return fw_devlink_flags;
+}
+
+static bool fw_devlink_is_permissive(void)
+{
+ return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
+}
+
+static void fw_devlink_link_device(struct device *dev)
+{
+ int fw_ret;
+
+ if (!fw_devlink_flags)
+ return;
+
+ mutex_lock(&defer_fw_devlink_lock);
+ if (!defer_fw_devlink_count)
+ device_link_add_missing_supplier_links();
+
+ /*
+ * The device's fwnode not having add_links() doesn't affect if other
+ * consumers can find this device as a supplier. So, this check is
+ * intentionally placed after device_link_add_missing_supplier_links().
+ */
+ if (!fwnode_has_op(dev->fwnode, add_links))
+ goto out;
+
+ /*
+ * If fw_devlink is being deferred, assume all devices have mandatory
+ * suppliers they need to link to later. Then, when the fw_devlink is
+ * resumed, all these devices will get a chance to try and link to any
+ * suppliers they have.
+ */
+ if (!defer_fw_devlink_count) {
+ fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
+ if (fw_ret == -ENODEV && fw_devlink_is_permissive())
+ fw_ret = -EAGAIN;
+ } else {
+ fw_ret = -ENODEV;
+ }
+
+ if (fw_ret == -ENODEV)
+ device_link_wait_for_mandatory_supplier(dev);
+ else if (fw_ret)
+ device_link_wait_for_optional_supplier(dev);
+
+out:
+ mutex_unlock(&defer_fw_devlink_lock);
+}
+
+/**
+ * fw_devlink_pause - Pause parsing of fwnode to create device links
+ *
+ * Calling this function defers any fwnode parsing to create device links until
+ * fw_devlink_resume() is called. Both these functions are ref counted and the
+ * caller needs to match the calls.
+ *
+ * While fw_devlink is paused:
+ * - Any device that is added won't have its fwnode parsed to create device
+ * links.
+ * - The probe of the device will also be deferred during this period.
+ * - Any devices that were already added, but waiting for suppliers won't be
+ * able to link to newly added devices.
+ *
+ * Once fw_devlink_resume():
+ * - All the fwnodes that was not parsed will be parsed.
+ * - All the devices that were deferred probing will be reattempted if they
+ * aren't waiting for any more suppliers.
+ *
+ * This pair of functions, is mainly meant to optimize the parsing of fwnodes
+ * when a lot of devices that need to link to each other are added in a short
+ * interval of time. For example, adding all the top level devices in a system.
+ *
+ * For example, if N devices are added and:
+ * - All the consumers are added before their suppliers
+ * - All the suppliers of the N devices are part of the N devices
+ *
+ * Then:
+ *
+ * - With the use of fw_devlink_pause() and fw_devlink_resume(), each device
+ * will only need one parsing of its fwnode because it is guaranteed to find
+ * all the supplier devices already registered and ready to link to. It won't
+ * have to do another pass later to find one or more suppliers it couldn't
+ * find in the first parse of the fwnode. So, we'll only need O(N) fwnode
+ * parses.
+ *
+ * - Without the use of fw_devlink_pause() and fw_devlink_resume(), we would
+ * end up doing O(N^2) parses of fwnodes because every device that's added is
+ * guaranteed to trigger a parse of the fwnode of every device added before
+ * it. This O(N^2) parse is made worse by the fact that when a fwnode of a
+ * device is parsed, all it descendant devices might need to have their
+ * fwnodes parsed too (even if the devices themselves aren't added).
+ */
+void fw_devlink_pause(void)
+{
+ mutex_lock(&defer_fw_devlink_lock);
+ defer_fw_devlink_count++;
+ mutex_unlock(&defer_fw_devlink_lock);
+}
+
+/** fw_devlink_resume - Resume parsing of fwnode to create device links
+ *
+ * This function is used in conjunction with fw_devlink_pause() and is ref
+ * counted. See documentation for fw_devlink_pause() for more details.
+ */
+void fw_devlink_resume(void)
+{
+ mutex_lock(&defer_fw_devlink_lock);
+ if (!defer_fw_devlink_count) {
+ WARN(true, "Unmatched fw_devlink pause/resume!");
+ goto out;
+ }
+
+ defer_fw_devlink_count--;
+ if (defer_fw_devlink_count)
+ goto out;
+
+ device_link_add_missing_supplier_links();
+ driver_deferred_probe_force_trigger();
+out:
+ mutex_unlock(&defer_fw_devlink_lock);
+}
/* Device links support end. */
int (*platform_notify)(struct device *dev) = NULL;
@@ -1374,7 +1558,7 @@ static void device_release(struct kobject *kobj)
else if (dev->class && dev->class->dev_release)
dev->class->dev_release(dev);
else
- WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
+ WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
dev_name(dev));
kfree(p);
}
@@ -2345,36 +2529,6 @@ static int device_private_init(struct device *dev)
return 0;
}
-static u32 fw_devlink_flags;
-static int __init fw_devlink_setup(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- if (strcmp(arg, "off") == 0) {
- fw_devlink_flags = 0;
- } else if (strcmp(arg, "permissive") == 0) {
- fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
- } else if (strcmp(arg, "on") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
- } else if (strcmp(arg, "rpm") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
- DL_FLAG_PM_RUNTIME;
- }
- return 0;
-}
-early_param("fw_devlink", fw_devlink_setup);
-
-u32 fw_devlink_get_flags(void)
-{
- return fw_devlink_flags;
-}
-
-static bool fw_devlink_is_permissive(void)
-{
- return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
-}
-
/**
* device_add - add device to device hierarchy.
* @dev: device.
@@ -2407,9 +2561,8 @@ int device_add(struct device *dev)
struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
- int error = -EINVAL, fw_ret;
+ int error = -EINVAL;
struct kobject *glue_dir = NULL;
- bool is_fwnode_dev = false;
dev = get_device(dev);
if (!dev)
@@ -2507,11 +2660,6 @@ int device_add(struct device *dev)
kobject_uevent(&dev->kobj, KOBJ_ADD);
- if (dev->fwnode && !dev->fwnode->dev) {
- dev->fwnode->dev = dev;
- is_fwnode_dev = true;
- }
-
/*
* Check if any of the other devices (consumers) have been waiting for
* this device (supplier) to be added so that they can create a device
@@ -2520,19 +2668,13 @@ int device_add(struct device *dev)
* This needs to happen after device_pm_add() because device_link_add()
* requires the supplier be registered before it's called.
*
- * But this also needs to happe before bus_probe_device() to make sure
+ * But this also needs to happen before bus_probe_device() to make sure
* waiting consumers can link to it before the driver is bound to the
* device and the driver sync_state callback is called for this device.
*/
- device_link_add_missing_supplier_links();
-
- if (fw_devlink_flags && is_fwnode_dev &&
- fwnode_has_op(dev->fwnode, add_links)) {
- fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
- if (fw_ret == -ENODEV && !fw_devlink_is_permissive())
- device_link_wait_for_mandatory_supplier(dev);
- else if (fw_ret)
- device_link_wait_for_optional_supplier(dev);
+ if (dev->fwnode && !dev->fwnode->dev) {
+ dev->fwnode->dev = dev;
+ fw_devlink_link_device(dev);
}
bus_probe_device(dev);
@@ -3194,40 +3336,6 @@ error:
}
/**
- * device_create_vargs - creates a device and registers it with sysfs
- * @class: pointer to the struct class that this device should be registered to
- * @parent: pointer to the parent struct device of this new device, if any
- * @devt: the dev_t for the char device to be added
- * @drvdata: the data to be added to the device for callbacks
- * @fmt: string for the device's name
- * @args: va_list for the device's name
- *
- * This function can be used by char device classes. A struct device
- * will be created in sysfs, registered to the specified class.
- *
- * A "dev" file will be created, showing the dev_t for the device, if
- * the dev_t is not 0,0.
- * If a pointer to a parent struct device is passed in, the newly created
- * struct device will be a child of that device in sysfs.
- * The pointer to the struct device will be returned from the call.
- * Any further sysfs files that might be required can be created using this
- * pointer.
- *
- * Returns &struct device pointer on success, or ERR_PTR() on error.
- *
- * Note: the struct class passed to this function must have previously
- * been created with a call to class_create().
- */
-struct device *device_create_vargs(struct class *class, struct device *parent,
- dev_t devt, void *drvdata, const char *fmt,
- va_list args)
-{
- return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
- fmt, args);
-}
-EXPORT_SYMBOL_GPL(device_create_vargs);
-
-/**
* device_create - creates a device and registers it with sysfs
* @class: pointer to the struct class that this device should be registered to
* @parent: pointer to the parent struct device of this new device, if any
@@ -3258,7 +3366,8 @@ struct device *device_create(struct class *class, struct device *parent,
struct device *dev;
va_start(vargs, fmt);
- dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
+ dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
+ fmt, vargs);
va_end(vargs);
return dev;
}
@@ -3896,6 +4005,7 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
else
dev->fwnode = fwnode;
}
+EXPORT_SYMBOL_GPL(set_secondary_fwnode);
/**
* device_set_of_node_from_dev - reuse device-tree node of another device
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 94037be7f5d7..9a1d940342ac 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -164,6 +164,11 @@ static void driver_deferred_probe_trigger(void)
if (!driver_deferred_probe_enable)
return;
+ driver_deferred_probe_force_trigger();
+}
+
+void driver_deferred_probe_force_trigger(void)
+{
/*
* A successful probe means that all the devices in the pending list
* should be triggered to be reprobed. Move all the deferred devices
@@ -254,12 +259,12 @@ __setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
int driver_deferred_probe_check_state(struct device *dev)
{
if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
- dev_warn(dev, "ignoring dependency for device, assuming no driver");
+ dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
return -ENODEV;
}
if (!driver_deferred_probe_timeout && initcalls_done) {
- dev_warn(dev, "deferred probe timeout, ignoring dependency");
+ dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
return -ETIMEDOUT;
}
@@ -275,7 +280,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
flush_work(&deferred_probe_work);
list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
- dev_info(private->device, "deferred probe pending");
+ dev_info(private->device, "deferred probe pending\n");
wake_up(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -336,7 +341,7 @@ bool device_is_bound(struct device *dev)
static void driver_bound(struct device *dev)
{
if (device_is_bound(dev)) {
- printk(KERN_WARNING "%s: device %s already bound\n",
+ pr_warn("%s: device %s already bound\n",
__func__, kobject_name(&dev->kobj));
return;
}
@@ -505,8 +510,8 @@ re_probe:
}
if (driver_sysfs_add(dev)) {
- printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
- __func__, dev_name(dev));
+ pr_err("%s: driver_sysfs_add(%s) failed\n",
+ __func__, dev_name(dev));
goto probe_failed;
}
@@ -597,9 +602,8 @@ pinctrl_bind_failed:
break;
default:
/* driver matched but the probe failed */
- printk(KERN_WARNING
- "%s: probe of %s failed with error %d\n",
- drv->name, dev_name(dev), ret);
+ pr_warn("%s: probe of %s failed with error %d\n",
+ drv->name, dev_name(dev), ret);
}
/*
* Ignore errors returned by ->probe so that the next driver can try
@@ -624,8 +628,8 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv)
ret = really_probe(dev, drv);
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
- printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
- dev_name(dev), ret, (s64) ktime_to_us(delta));
+ pr_debug("probe of %s returned %d after %lld usecs\n",
+ dev_name(dev), ret, (s64) ktime_to_us(delta));
return ret;
}
@@ -713,8 +717,7 @@ static inline bool cmdline_requested_async_probing(const char *drv_name)
static int __init save_async_options(char *buf)
{
if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
- printk(KERN_WARNING
- "Too long list of driver names for 'driver_async_probe'!\n");
+ pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
return 0;
@@ -789,7 +792,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
} else if (ret < 0) {
- dev_dbg(dev, "Bus failed to match device: %d", ret);
+ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
} /* ret > 0 means positive match */
@@ -1022,7 +1025,7 @@ static int __driver_attach(struct device *dev, void *data)
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
} else if (ret < 0) {
- dev_dbg(dev, "Bus failed to match device: %d", ret);
+ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
} /* ret > 0 means positive match */
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 1e9c96e3ed63..5327bfc6ba71 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -9,6 +9,7 @@
#include <linux/umh.h>
#include <linux/sysctl.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "fallback.h"
#include "firmware.h"
@@ -17,6 +18,8 @@
* firmware fallback mechanism
*/
+MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+
extern struct firmware_fallback_config fw_fallback_config;
/* These getters are vetted to use int properly */
@@ -460,7 +463,7 @@ static const struct attribute_group *fw_dev_attr_groups[] = {
static struct fw_sysfs *
fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, enum fw_opt opt_flags)
+ struct device *device, u32 opt_flags)
{
struct fw_sysfs *fw_sysfs;
struct device *f_dev;
@@ -493,7 +496,7 @@ exit:
* In charge of constructing a sysfs fallback interface for firmware loading.
**/
static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
- enum fw_opt opt_flags, long timeout)
+ u32 opt_flags, long timeout)
{
int retval = 0;
struct device *f_dev = &fw_sysfs->dev;
@@ -547,7 +550,7 @@ err_put_dev:
static int fw_load_from_user_helper(struct firmware *firmware,
const char *name, struct device *device,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
struct fw_sysfs *fw_sysfs;
long timeout;
@@ -588,7 +591,7 @@ out_unlock:
return ret;
}
-static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
+static bool fw_force_sysfs_fallback(u32 opt_flags)
{
if (fw_fallback_config.force_sysfs_fallback)
return true;
@@ -597,7 +600,7 @@ static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
return true;
}
-static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
+static bool fw_run_sysfs_fallback(u32 opt_flags)
{
int ret;
@@ -640,7 +643,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
**/
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
- enum fw_opt opt_flags,
+ u32 opt_flags,
int ret)
{
if (!fw_run_sysfs_fallback(opt_flags))
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 06f4577733a8..2afdb6adb23f 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -33,7 +33,7 @@ struct firmware_fallback_config {
#ifdef CONFIG_FW_LOADER_USER_HELPER
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
- enum fw_opt opt_flags,
+ u32 opt_flags,
int ret);
void kill_pending_fw_fallback_reqs(bool only_kill_custom);
@@ -45,7 +45,7 @@ void unregister_sysfs_loader(void);
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
- enum fw_opt opt_flags,
+ u32 opt_flags,
int ret)
{
/* Keep carrying over the same error */
@@ -67,10 +67,10 @@ static inline void unregister_sysfs_loader(void)
#endif /* CONFIG_FW_LOADER_USER_HELPER */
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
-int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags);
+int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags);
#else
static inline int firmware_fallback_platform(struct fw_priv *fw_priv,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
return -ENOENT;
}
diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c
index c88c745590fe..cdd2c9a9f38a 100644
--- a/drivers/base/firmware_loader/fallback_platform.c
+++ b/drivers/base/firmware_loader/fallback_platform.c
@@ -8,7 +8,7 @@
#include "fallback.h"
#include "firmware.h"
-int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags)
+int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags)
{
const u8 *data;
size_t size;
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index a182e318bd09..46a731dede6f 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -21,7 +21,7 @@ struct firmware_fallback_config fw_fallback_config = {
.loading_timeout = 60,
.old_timeout = 60,
};
-EXPORT_SYMBOL_GPL(fw_fallback_config);
+EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
#ifdef CONFIG_SYSCTL
struct ctl_table firmware_config_table[] = {
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 25836a6afc9f..933e2192fbe8 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -136,8 +136,7 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
__fw_state_set(fw_priv, FW_STATUS_DONE);
}
-int assign_fw(struct firmware *fw, struct device *device,
- enum fw_opt opt_flags);
+int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags);
#ifdef CONFIG_FW_LOADER_PAGED_BUF
void fw_free_paged_buf(struct fw_priv *fw_priv);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 76f79913916d..ca871b13524e 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -210,7 +210,7 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
static int alloc_lookup_fw_priv(const char *fw_name,
struct firmware_cache *fwc,
struct fw_priv **fw_priv, void *dbuf,
- size_t size, enum fw_opt opt_flags)
+ size_t size, u32 opt_flags)
{
struct fw_priv *tmp;
@@ -548,9 +548,6 @@ static void firmware_free_data(const struct firmware *fw)
static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
{
fw->priv = fw_priv;
-#ifdef CONFIG_FW_LOADER_USER_HELPER
- fw->pages = fw_priv->pages;
-#endif
fw->size = fw_priv->size;
fw->data = fw_priv->data;
@@ -635,8 +632,7 @@ static int fw_add_devm_name(struct device *dev, const char *name)
}
#endif
-int assign_fw(struct firmware *fw, struct device *device,
- enum fw_opt opt_flags)
+int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags)
{
struct fw_priv *fw_priv = fw->priv;
int ret;
@@ -687,7 +683,7 @@ int assign_fw(struct firmware *fw, struct device *device,
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
struct device *device, void *dbuf, size_t size,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
struct firmware *firmware;
struct fw_priv *fw_priv;
@@ -753,7 +749,7 @@ static void fw_abort_batch_reqs(struct firmware *fw)
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device, void *buf, size_t size,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
struct firmware *fw = NULL;
int ret;
@@ -990,7 +986,7 @@ struct firmware_work {
struct device *device;
void *context;
void (*cont)(const struct firmware *fw, void *context);
- enum fw_opt opt_flags;
+ u32 opt_flags;
};
static void request_firmware_work_func(struct work_struct *work)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index dbec3a05590a..2b09b68b9f78 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -21,6 +21,7 @@
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/slab.h>
+#include <linux/xarray.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -74,6 +75,13 @@ static struct bus_type memory_subsys = {
.offline = memory_subsys_offline,
};
+/*
+ * Memory blocks are cached in a local radix tree to avoid
+ * a costly linear search for the corresponding device on
+ * the subsystem bus.
+ */
+static DEFINE_XARRAY(memory_blocks);
+
static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
@@ -489,22 +497,23 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
return 0;
}
-/* A reference for the returned memory block device is acquired. */
+/*
+ * A reference for the returned memory block device is acquired.
+ *
+ * Called under device_hotplug_lock.
+ */
static struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
- struct device *dev;
+ struct memory_block *mem;
- dev = subsys_find_device_by_id(&memory_subsys, block_id, NULL);
- return dev ? to_memory_block(dev) : NULL;
+ mem = xa_load(&memory_blocks, block_id);
+ if (mem)
+ get_device(&mem->dev);
+ return mem;
}
/*
- * For now, we have a linear search to go find the appropriate
- * memory_block corresponding to a particular phys_index. If
- * this gets to be a real problem, we can always use a radix
- * tree or something here.
- *
- * This could be made generic for all device subsystems.
+ * Called under device_hotplug_lock.
*/
struct memory_block *find_memory_block(struct mem_section *section)
{
@@ -548,9 +557,16 @@ int register_memory(struct memory_block *memory)
memory->dev.offline = memory->state == MEM_OFFLINE;
ret = device_register(&memory->dev);
- if (ret)
+ if (ret) {
put_device(&memory->dev);
-
+ return ret;
+ }
+ ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
+ GFP_KERNEL));
+ if (ret) {
+ put_device(&memory->dev);
+ device_unregister(&memory->dev);
+ }
return ret;
}
@@ -604,6 +620,8 @@ static void unregister_memory(struct memory_block *memory)
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
return;
+ WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
+
/* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
@@ -750,6 +768,8 @@ void __init memory_dev_init(void)
*
* In case func() returns an error, walking is aborted and the error is
* returned.
+ *
+ * Called under device_hotplug_lock.
*/
int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 10d7e818e118..5b02f69769e8 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -415,6 +415,9 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d AnonPages: %8lu kB\n"
"Node %d Shmem: %8lu kB\n"
"Node %d KernelStack: %8lu kB\n"
+#ifdef CONFIG_SHADOW_CALL_STACK
+ "Node %d ShadowCallStack:%8lu kB\n"
+#endif
"Node %d PageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
@@ -438,8 +441,11 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
nid, K(i.sharedram),
nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
+#ifdef CONFIG_SHADOW_CALL_STACK
+ nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_KB),
+#endif
nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
- nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
+ nid, 0UL,
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
nid, K(sreclaimable +
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 8da314b81eab..c4a17e5edf8b 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -387,7 +387,7 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
*
* @domain: The platform-msi domain
* @virq: The base irq from which to perform the allocate operation
- * @nvec: How many interrupts to free from @virq
+ * @nr_irqs: How many interrupts to free from @virq
*
* Return 0 on success, or an error code on failure. Must be called
* with irq_domain_mutex held (which can only be done as part of a
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index b27d0f6c18c9..c0d0a5490ac6 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -147,28 +147,30 @@ EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
* request_irq() APIs. This is the same as platform_get_irq(), except that it
* does not print an error message if an IRQ can not be obtained.
*
- * Example:
+ * For example::
+ *
* int irq = platform_get_irq_optional(pdev, 0);
* if (irq < 0)
* return irq;
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
{
+ int ret;
#ifdef CONFIG_SPARC
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
if (!dev || num >= dev->archdata.num_irqs)
return -ENXIO;
- return dev->archdata.irqs[num];
+ ret = dev->archdata.irqs[num];
+ goto out;
#else
struct resource *r;
- int ret;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
ret = of_irq_get(dev->dev.of_node, num);
if (ret > 0 || ret == -EPROBE_DEFER)
- return ret;
+ goto out;
}
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
@@ -176,7 +178,7 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
if (r && r->flags & IORESOURCE_DISABLED) {
ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
if (ret)
- return ret;
+ goto out;
}
}
@@ -190,13 +192,17 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
struct irq_data *irqd;
irqd = irq_get_irq_data(r->start);
- if (!irqd)
- return -ENXIO;
+ if (!irqd) {
+ ret = -ENXIO;
+ goto out;
+ }
irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
}
- if (r)
- return r->start;
+ if (r) {
+ ret = r->start;
+ goto out;
+ }
/*
* For the index 0 interrupt, allow falling back to GpioInt
@@ -209,11 +215,14 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
/* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER)
- return ret;
+ goto out;
}
- return -ENXIO;
+ ret = -ENXIO;
#endif
+out:
+ WARN(ret == 0, "0 is an invalid IRQ number\n");
+ return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
@@ -226,12 +235,13 @@ EXPORT_SYMBOL_GPL(platform_get_irq_optional);
* IRQ fails. Device drivers should check the return value for errors so as to
* not pass a negative integer value to the request_irq() APIs.
*
- * Example:
+ * For example::
+ *
* int irq = platform_get_irq(pdev, 0);
* if (irq < 0)
* return irq;
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
@@ -303,8 +313,10 @@ static int __platform_get_irq_byname(struct platform_device *dev,
}
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
- if (r)
+ if (r) {
+ WARN(r->start == 0, "0 is an invalid IRQ number\n");
return r->start;
+ }
return -ENXIO;
}
@@ -316,7 +328,7 @@ static int __platform_get_irq_byname(struct platform_device *dev,
*
* Get an IRQ like platform_get_irq(), but then by name rather then by index.
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
@@ -338,7 +350,7 @@ EXPORT_SYMBOL_GPL(platform_get_irq_byname);
* Get an optional IRQ by name like platform_get_irq_byname(). Except that it
* does not print an error message if an IRQ can not be obtained.
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_byname_optional(struct platform_device *dev,
const char *name)
@@ -670,7 +682,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo)
{
- int ret = -ENOMEM;
+ int ret;
struct platform_device *pdev;
pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
@@ -851,6 +863,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv,
/* temporary section violation during probe() */
drv->probe = probe;
retval = code = __platform_driver_register(drv, module);
+ if (retval)
+ return retval;
/*
* Fixup that section violation, being paranoid about code scanning
@@ -975,7 +989,7 @@ EXPORT_SYMBOL_GPL(__platform_register_drivers);
* @drivers: an array of drivers to unregister
* @count: the number of drivers to unregister
*
- * Unegisters platform drivers specified by an array. This is typically used
+ * Unregisters platform drivers specified by an array. This is typically used
* to complement an earlier call to platform_register_drivers(). Drivers are
* unregistered in the reverse order in which they were registered.
*/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0e07e17c2def..bb98b813554f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -562,72 +562,26 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
- * suspend_event - Return a "suspend" message for given "resume" one.
- * @resume_msg: PM message representing a system-wide resume transition.
- */
-static pm_message_t suspend_event(pm_message_t resume_msg)
-{
- switch (resume_msg.event) {
- case PM_EVENT_RESUME:
- return PMSG_SUSPEND;
- case PM_EVENT_THAW:
- case PM_EVENT_RESTORE:
- return PMSG_FREEZE;
- case PM_EVENT_RECOVER:
- return PMSG_HIBERNATE;
- }
- return PMSG_ON;
-}
-
-/**
- * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * dev_pm_skip_resume - System-wide device resume optimization check.
* @dev: Target device.
*
- * Checks whether or not the device may be left in suspend after a system-wide
- * transition to the working state.
+ * Return:
+ * - %false if the transition under way is RESTORE.
+ * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
+ * - The logical negation of %power.must_resume otherwise (that is, when the
+ * transition under way is RESUME).
*/
-bool dev_pm_may_skip_resume(struct device *dev)
+bool dev_pm_skip_resume(struct device *dev)
{
- return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
-}
-
-static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
+ if (pm_transition.event == PM_EVENT_RESTORE)
+ return false;
- if (info_p)
- *info_p = info;
+ if (pm_transition.event == PM_EVENT_THAW)
+ return dev_pm_skip_suspend(dev);
- return callback;
+ return !dev->power.must_resume;
}
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p);
-
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
- pm_message_t state,
- const char **info_p);
-
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -639,8 +593,8 @@ static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
*/
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
bool skip_resume;
int error = 0;
@@ -656,37 +610,41 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dpm_wait_for_superior(dev, async))
goto Out;
- skip_resume = dev_pm_may_skip_resume(dev);
+ skip_resume = dev_pm_skip_resume(dev);
+ /*
+ * If the driver callback is skipped below or by the middle layer
+ * callback and device_resume_early() also skips the driver callback for
+ * this device later, it needs to appear as "suspended" to PM-runtime,
+ * so change its status accordingly.
+ *
+ * Otherwise, the device is going to be resumed, so set its PM-runtime
+ * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
+ * to avoid confusing drivers that don't use it.
+ */
+ if (skip_resume)
+ pm_runtime_set_suspended(dev);
+ else if (dev_pm_skip_suspend(dev))
+ pm_runtime_set_active(dev);
- callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
if (skip_resume)
goto Skip;
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- pm_message_t suspend_msg = suspend_event(state);
-
- /*
- * If "freeze" callbacks have been skipped during a transition
- * related to hibernation, the subsequent "thaw" callbacks must
- * be skipped too or bad things may happen. Otherwise, resume
- * callbacks are going to be run for the device, so its runtime
- * PM status must be changed to reflect the new state after the
- * transition under way.
- */
- if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
- !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
- if (state.event == PM_EVENT_THAW) {
- skip_resume = true;
- goto Skip;
- } else {
- pm_runtime_set_active(dev);
- }
- }
- }
-
if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
@@ -698,20 +656,6 @@ Run:
Skip:
dev->power.is_noirq_suspended = false;
- if (skip_resume) {
- /* Make the next phases of resume skip the device. */
- dev->power.is_late_suspended = false;
- dev->power.is_suspended = false;
- /*
- * The device is going to be left in suspend, but it might not
- * have been in runtime suspend before the system suspended, so
- * its runtime PM status needs to be updated to avoid confusing
- * the runtime PM framework when runtime PM is enabled for the
- * device again.
- */
- pm_runtime_set_suspended(dev);
- }
-
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
@@ -810,35 +754,6 @@ void dpm_resume_noirq(pm_message_t state)
cpuidle_resume();
}
-static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
-
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -849,8 +764,8 @@ static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
*/
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
@@ -865,17 +780,37 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dpm_wait_for_superior(dev, async))
goto Out;
- callback = dpm_subsys_resume_early_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_resume(dev))
+ goto Skip;
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev->driver && dev->driver->pm) {
info = "early driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
dev->power.is_late_suspended = false;
- Out:
+Out:
TRACE_RESUME(error);
pm_runtime_enable(dev);
@@ -1245,61 +1180,6 @@ static void dpm_superior_set_must_resume(struct device *dev)
device_links_read_unlock(idx);
}
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
-
-static bool device_must_resume(struct device *dev, pm_message_t state,
- bool no_subsys_suspend_noirq)
-{
- pm_message_t resume_msg = resume_event(state);
-
- /*
- * If all of the device driver's "noirq", "late" and "early" callbacks
- * are invoked directly by the core, the decision to allow the device to
- * stay in suspend can be based on its current runtime PM status and its
- * wakeup settings.
- */
- if (no_subsys_suspend_noirq &&
- !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
- !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
- !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
- return !pm_runtime_status_suspended(dev) &&
- (resume_msg.event != PM_EVENT_RESUME ||
- (device_can_wakeup(dev) && !device_may_wakeup(dev)));
-
- /*
- * The only safe strategy here is to require that if the device may not
- * be left in suspend, resume callbacks must be invoked for it.
- */
- return !dev->power.may_skip_resume;
-}
-
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1311,9 +1191,8 @@ static bool device_must_resume(struct device *dev, pm_message_t state,
*/
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
- bool no_subsys_cb = false;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
@@ -1327,13 +1206,23 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
- no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
-
- if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+ if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
@@ -1351,13 +1240,16 @@ Run:
Skip:
dev->power.is_noirq_suspended = true;
- if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
- dev->power.must_resume = dev->power.must_resume ||
- atomic_read(&dev->power.usage_count) > 1 ||
- device_must_resume(dev, state, no_subsys_cb);
- } else {
+ /*
+ * Skipping the resume of devices that were in use right before the
+ * system suspend (as indicated by their PM-runtime usage counters)
+ * would be suboptimal. Also resume them if doing that is not allowed
+ * to be skipped.
+ */
+ if (atomic_read(&dev->power.usage_count) > 1 ||
+ !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume))
dev->power.must_resume = true;
- }
if (dev->power.must_resume)
dpm_superior_set_must_resume(dev);
@@ -1474,35 +1366,6 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
spin_unlock_irq(&parent->power.lock);
}
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
-
/**
* __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1513,8 +1376,8 @@ static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
*/
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
@@ -1535,12 +1398,23 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
- if (dev_pm_smart_suspend_and_suspended(dev) &&
- !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+ if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
@@ -1766,7 +1640,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.direct_complete = false;
}
- dev->power.may_skip_resume = false;
+ dev->power.may_skip_resume = true;
dev->power.must_resume = false;
dpm_watchdog_set(&wd, dev);
@@ -1970,7 +1844,7 @@ unlock:
spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
(ret > 0 || dev->power.no_pm_callbacks) &&
- !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
spin_unlock_irq(&dev->power.lock);
return 0;
}
@@ -2128,7 +2002,7 @@ void device_pm_check_callbacks(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
-bool dev_pm_smart_suspend_and_suspended(struct device *dev)
+bool dev_pm_skip_suspend(struct device *dev)
{
return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
pm_runtime_status_suspended(dev);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 99c7da112c95..9f62790f644c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -523,13 +523,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
repeat:
retval = rpm_check_suspend_allowed(dev);
-
if (retval < 0)
- ; /* Conditions are wrong. */
+ goto out; /* Conditions are wrong. */
/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
- else if (dev->power.runtime_status == RPM_RESUMING &&
- !(rpmflags & RPM_ASYNC))
+ if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
retval = -EAGAIN;
if (retval)
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 2b99fe1eb207..24d25cf8ab14 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -666,7 +666,7 @@ int dpm_sysfs_add(struct device *dev)
if (rc)
return rc;
- if (pm_runtime_callbacks_present(dev)) {
+ if (!pm_runtime_has_no_callbacks(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
if (rc)
goto err_out;
@@ -709,7 +709,7 @@ int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
if (rc)
return rc;
- if (pm_runtime_callbacks_present(dev)) {
+ if (!pm_runtime_has_no_callbacks(dev)) {
rc = sysfs_group_change_owner(
&dev->kobj, &pm_runtime_attr_group, kuid, kgid);
if (rc)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 5f35c0ccf5e0..1e6d75e65938 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -708,14 +708,23 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
struct fwnode_handle *child)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct fwnode_handle *fwnode = NULL;
+ struct fwnode_handle *fwnode = NULL, *next;
if (dev->of_node)
fwnode = &dev->of_node->fwnode;
else if (adev)
fwnode = acpi_fwnode_handle(adev);
- return fwnode_get_next_child_node(fwnode, child);
+ /* Try to find a child in primary fwnode */
+ next = fwnode_get_next_child_node(fwnode, child);
+ if (next)
+ return next;
+
+ /* When no more children in primary, continue with secondary */
+ if (!IS_ERR_OR_NULL(fwnode->secondary))
+ next = fwnode_get_next_child_node(fwnode->secondary, child);
+
+ return next;
}
EXPORT_SYMBOL_GPL(device_get_next_child_node);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index e72843fe41df..089e5dc7144a 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -227,6 +227,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (*ppos < 0 || !count)
return -EINVAL;
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -371,6 +374,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
if (*ppos < 0 || !count)
return -EINVAL;
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 008f8da69d97..62b95a9212ae 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -246,6 +246,63 @@ static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
};
+static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (count < 2)
+ return -EINVAL;
+
+ count--;
+ return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
+ (u8 *)data + 1);
+}
+
+static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret, count, len = val_size;
+
+ if (reg_size != 2)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(i2c, ((u16 *)reg)[0] & 0xff,
+ ((u16 *)reg)[0] >> 8);
+ if (ret < 0)
+ return ret;
+
+ count = 0;
+ do {
+ /* Current Address Read */
+ ret = i2c_smbus_read_byte(i2c);
+ if (ret < 0)
+ break;
+
+ *((u8 *)val++) = ret;
+ count++;
+ len--;
+ } while (len > 0);
+
+ if (count == val_size)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
+ .write = regmap_i2c_smbus_i2c_write_reg16,
+ .read = regmap_i2c_smbus_i2c_read_reg16,
+ .max_raw_read = I2C_SMBUS_BLOCK_MAX,
+ .max_raw_write = I2C_SMBUS_BLOCK_MAX,
+};
+
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
const struct regmap_config *config)
{
@@ -255,6 +312,10 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
return &regmap_i2c_smbus_i2c_block;
+ else if (config->val_bits == 8 && config->reg_bits == 16 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return &regmap_i2c_smbus_i2c_block_reg16;
else if (config->val_bits == 16 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 3d64c9331a82..4340e1d268b6 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -541,8 +541,9 @@ static const struct irq_domain_ops regmap_domain_ops = {
};
/**
- * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
+ * regmap_add_irq_chip_np() - Use standard regmap IRQ controller handling
*
+ * @np: The device_node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts.
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
@@ -556,9 +557,10 @@ static const struct irq_domain_ops regmap_domain_ops = {
* register cache. The chip driver is responsible for restoring the
* register values used by the IRQ controller over suspend and resume.
*/
-int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
- int irq_base, const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data)
+int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data *d;
int i;
@@ -769,12 +771,10 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
}
if (irq_base)
- d->domain = irq_domain_add_legacy(map->dev->of_node,
- chip->num_irqs, irq_base, 0,
- &regmap_domain_ops, d);
+ d->domain = irq_domain_add_legacy(np, chip->num_irqs, irq_base,
+ 0, &regmap_domain_ops, d);
else
- d->domain = irq_domain_add_linear(map->dev->of_node,
- chip->num_irqs,
+ d->domain = irq_domain_add_linear(np, chip->num_irqs,
&regmap_domain_ops, d);
if (!d->domain) {
dev_err(map->dev, "Failed to create IRQ domain\n");
@@ -808,6 +808,30 @@ err_alloc:
kfree(d);
return ret;
}
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np);
+
+/**
+ * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
+ *
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts.
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success.
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * This is the same as regmap_add_irq_chip_np, except that the device
+ * node of the regmap is used.
+ */
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ return regmap_add_irq_chip_np(map->dev->of_node, map, irq, irq_flags,
+ irq_base, chip, data);
+}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
/**
@@ -875,9 +899,10 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
}
/**
- * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
+ * devm_regmap_add_irq_chip_np() - Resource manager regmap_add_irq_chip_np()
*
* @dev: The device pointer on which irq_chip belongs to.
+ * @np: The device_node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
@@ -890,10 +915,11 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
* The &regmap_irq_chip_data will be automatically released when the device is
* unbound.
*/
-int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
- int irq_flags, int irq_base,
- const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data)
+int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np,
+ struct regmap *map, int irq, int irq_flags,
+ int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data **ptr, *d;
int ret;
@@ -903,8 +929,8 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
if (!ptr)
return -ENOMEM;
- ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
- chip, &d);
+ ret = regmap_add_irq_chip_np(np, map, irq, irq_flags, irq_base,
+ chip, &d);
if (ret < 0) {
devres_free(ptr);
return ret;
@@ -915,6 +941,32 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
*data = d;
return 0;
}
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_np);
+
+/**
+ * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
+ *
+ * @dev: The device pointer on which irq_chip belongs to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * The &regmap_irq_chip_data will be automatically released when the device is
+ * unbound.
+ */
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ return devm_regmap_add_irq_chip_np(dev, map->dev->of_node, map, irq,
+ irq_flags, irq_base, chip, data);
+}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
/**
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 59f911e57719..c472f624382d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -827,6 +827,7 @@ struct regmap *__regmap_init(struct device *dev,
} else if (!bus->read || !bus->write) {
map->reg_read = _regmap_bus_reg_read;
map->reg_write = _regmap_bus_reg_write;
+ map->reg_update_bits = bus->reg_update_bits;
map->defer_caching = false;
goto skip_format_initialization;
@@ -2936,6 +2937,28 @@ int regmap_update_bits_base(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_update_bits_base);
+/**
+ * regmap_test_bits() - Check if all specified bits are set in a register.
+ *
+ * @map: Register map to operate on
+ * @reg: Register to read from
+ * @bits: Bits to test
+ *
+ * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the
+ * tested bits is not set and 1 if all tested bits are set.
+ */
+int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
+{
+ unsigned int val, ret;
+
+ ret = regmap_read(map, reg, &val);
+ if (ret)
+ return ret;
+
+ return (val & bits) == bits;
+}
+EXPORT_SYMBOL_GPL(regmap_test_bits);
+
void regmap_async_complete_cb(struct regmap_async *async, int ret)
{
struct regmap *map = async->map;
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 4af11a423475..a5bae551167d 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -46,7 +46,7 @@ static umode_t soc_attribute_mode(struct kobject *kobj,
struct attribute *attr,
int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
if ((attr == &dev_attr_machine.attr)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index de8d3543e8fe..e5eb27375416 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -712,19 +712,68 @@ EXPORT_SYMBOL_GPL(software_node_register_nodes);
* @nodes: Zero terminated array of software nodes to be unregistered
*
* Unregister multiple software nodes at once.
+ *
+ * NOTE: Be careful using this call if the nodes had parent pointers set up in
+ * them before registering. If so, it is wiser to remove the nodes
+ * individually, in the correct order (child before parent) instead of relying
+ * on the sequential order of the list of nodes in the array.
*/
void software_node_unregister_nodes(const struct software_node *nodes)
{
- struct swnode *swnode;
int i;
- for (i = 0; nodes[i].name; i++) {
- swnode = software_node_to_swnode(&nodes[i]);
+ for (i = 0; nodes[i].name; i++)
+ software_node_unregister(&nodes[i]);
+}
+EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
+
+/**
+ * software_node_register_node_group - Register a group of software nodes
+ * @node_group: NULL terminated array of software node pointers to be registered
+ *
+ * Register multiple software nodes at once.
+ */
+int software_node_register_node_group(const struct software_node **node_group)
+{
+ unsigned int i;
+ int ret;
+
+ if (!node_group)
+ return 0;
+
+ for (i = 0; node_group[i]; i++) {
+ ret = software_node_register(node_group[i]);
+ if (ret) {
+ software_node_unregister_node_group(node_group);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(software_node_register_node_group);
+
+/**
+ * software_node_unregister_node_group - Unregister a group of software nodes
+ * @node_group: NULL terminated array of software node pointers to be unregistered
+ *
+ * Unregister multiple software nodes at once.
+ */
+void software_node_unregister_node_group(const struct software_node **node_group)
+{
+ struct swnode *swnode;
+ unsigned int i;
+
+ if (!node_group)
+ return;
+
+ for (i = 0; node_group[i]; i++) {
+ swnode = software_node_to_swnode(node_group[i]);
if (swnode)
fwnode_remove_software_node(&swnode->fwnode);
}
}
-EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
+EXPORT_SYMBOL_GPL(software_node_unregister_node_group);
/**
* software_node_register - Register static software node
@@ -741,6 +790,20 @@ int software_node_register(const struct software_node *node)
}
EXPORT_SYMBOL_GPL(software_node_register);
+/**
+ * software_node_unregister - Unregister static software node
+ * @node: The software node to be unregistered
+ */
+void software_node_unregister(const struct software_node *node)
+{
+ struct swnode *swnode;
+
+ swnode = software_node_to_swnode(node);
+ if (swnode)
+ fwnode_remove_software_node(&swnode->fwnode);
+}
+EXPORT_SYMBOL_GPL(software_node_unregister);
+
struct fwnode_handle *
fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 025b1b77b11a..084b9efcefca 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -458,4 +458,6 @@ config BLK_DEV_RSXX
To compile this driver as a module, choose M here: the
module will be called rsxx.
+source "drivers/block/rnbd/Kconfig"
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 795facd8cf19..e1f63117ee94 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_ZRAM) += zram/
+obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
null_blk-objs := null_blk_main.o
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index a27804d71e12..5ca7216e9e01 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -407,7 +407,6 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info->name = "aoe";
q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 15e99697234a..df53dca5d02c 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -396,9 +396,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
bytes = sizeof(struct page *)*want;
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
if (!new_pages) {
- new_pages = __vmalloc(bytes,
- GFP_NOIO | __GFP_ZERO,
- PAGE_KERNEL);
+ new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO);
if (!new_pages)
return NULL;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index aae99a2d7bd4..14345a87c7cc 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1570,34 +1570,6 @@ extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
-static inline void drbd_tcp_cork(struct socket *sock)
-{
- int val = 1;
- (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char*)&val, sizeof(val));
-}
-
-static inline void drbd_tcp_uncork(struct socket *sock)
-{
- int val = 0;
- (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char*)&val, sizeof(val));
-}
-
-static inline void drbd_tcp_nodelay(struct socket *sock)
-{
- int val = 1;
- (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char*)&val, sizeof(val));
-}
-
-static inline void drbd_tcp_quickack(struct socket *sock)
-{
- int val = 2;
- (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
- (char*)&val, sizeof(val));
-}
-
/* sets the number of 512 byte sectors of our virtual device */
void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index c094c3c2c5d4..45fbd526c453 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -660,7 +660,7 @@ static int __send_command(struct drbd_connection *connection, int vnr,
/* DRBD protocol "pings" are latency critical.
* This is supposed to trigger tcp_push_pending_frames() */
if (!err && (cmd == P_PING || cmd == P_PING_ACK))
- drbd_tcp_nodelay(sock->socket);
+ tcp_sock_set_nodelay(sock->socket->sk);
return err;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c15e7083b13a..3a3f2b6a821f 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1051,8 +1051,8 @@ randomize:
/* we don't want delays.
* we use TCP_CORK where appropriate, though */
- drbd_tcp_nodelay(sock.socket);
- drbd_tcp_nodelay(msock.socket);
+ tcp_sock_set_nodelay(sock.socket->sk);
+ tcp_sock_set_nodelay(msock.socket->sk);
connection->data.socket = sock.socket;
connection->meta.socket = msock.socket;
@@ -1223,7 +1223,7 @@ static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, str
* quickly as possible, and let remote TCP know what we have
* received so far. */
if (err == -EAGAIN) {
- drbd_tcp_quickack(connection->data.socket);
+ tcp_sock_set_quickack(connection->data.socket->sk, 2);
drbd_unplug_all_devices(connection);
}
if (err > 0) {
@@ -4959,8 +4959,7 @@ static int receive_UnplugRemote(struct drbd_connection *connection, struct packe
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
- drbd_tcp_quickack(connection->data.socket);
-
+ tcp_sock_set_quickack(connection->data.socket->sk, 2);
return 0;
}
@@ -6162,7 +6161,7 @@ void drbd_send_acks_wf(struct work_struct *ws)
rcu_read_unlock();
if (tcp_cork)
- drbd_tcp_cork(connection->meta.socket);
+ tcp_sock_set_cork(connection->meta.socket->sk, true);
err = drbd_finish_peer_reqs(device);
kref_put(&device->kref, drbd_destroy_device);
@@ -6175,7 +6174,7 @@ void drbd_send_acks_wf(struct work_struct *ws)
}
if (tcp_cork)
- drbd_tcp_uncork(connection->meta.socket);
+ tcp_sock_set_cork(connection->meta.socket->sk, false);
return;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 840c3aef3c5c..c80a2f1c3c2a 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -21,24 +21,6 @@
static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
-/* Update disk stats at start of I/O request */
-static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
-{
- struct request_queue *q = device->rq_queue;
-
- generic_start_io_acct(q, bio_op(req->master_bio),
- req->i.size >> 9, &device->vdisk->part0);
-}
-
-/* Update disk stats when completing request upwards */
-static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
-{
- struct request_queue *q = device->rq_queue;
-
- generic_end_io_acct(q, bio_op(req->master_bio),
- &device->vdisk->part0, req->start_jif);
-}
-
static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
{
struct drbd_request *req;
@@ -263,7 +245,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
start_new_tl_epoch(first_peer_device(device)->connection);
/* Update disk stats */
- _drbd_end_io_acct(device, req);
+ bio_end_io_acct(req->master_bio, req->start_jif);
/* If READ failed,
* have it be pushed back to the retry work queue,
@@ -1222,16 +1204,15 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
bio_endio(bio);
return ERR_PTR(-ENOMEM);
}
- req->start_jif = start_jif;
+
+ /* Update disk stats */
+ req->start_jif = bio_start_io_acct(req->master_bio);
if (!get_ldev(device)) {
bio_put(req->private_bio);
req->private_bio = NULL;
}
- /* Update disk stats */
- _drbd_start_io_acct(device, req);
-
/* process discards always from our submitter thread */
if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio_op(bio) == REQ_OP_DISCARD)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 0dc019da1f8d..2b89c9f2ca70 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -2098,7 +2098,7 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
if (uncork) {
mutex_lock(&connection->data.mutex);
if (connection->data.socket)
- drbd_tcp_uncork(connection->data.socket);
+ tcp_sock_set_cork(connection->data.socket->sk, false);
mutex_unlock(&connection->data.mutex);
}
@@ -2153,9 +2153,9 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
mutex_lock(&connection->data.mutex);
if (connection->data.socket) {
if (cork)
- drbd_tcp_cork(connection->data.socket);
+ tcp_sock_set_cork(connection->data.socket->sk, true);
else if (!uncork)
- drbd_tcp_uncork(connection->data.socket);
+ tcp_sock_set_cork(connection->data.socket->sk, false);
}
mutex_unlock(&connection->data.mutex);
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index c3daa64cb52c..3e9db22db2a8 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -337,8 +337,7 @@ static bool initialized;
/*
* globals used by 'result()'
*/
-#define MAX_REPLIES 16
-static unsigned char reply_buffer[MAX_REPLIES];
+static unsigned char reply_buffer[FD_RAW_REPLY_SIZE];
static int inr; /* size of reply buffer, when called from interrupt */
#define ST0 0
#define ST1 1
@@ -595,12 +594,12 @@ static unsigned char in_sector_offset; /* offset within physical sector,
static inline unsigned char fdc_inb(int fdc, int reg)
{
- return fd_inb(fdc_state[fdc].address + reg);
+ return fd_inb(fdc_state[fdc].address, reg);
}
static inline void fdc_outb(unsigned char value, int fdc, int reg)
{
- fd_outb(value, fdc_state[fdc].address + reg);
+ fd_outb(value, fdc_state[fdc].address, reg);
}
static inline bool drive_no_geom(int drive)
@@ -668,16 +667,12 @@ static struct output_log {
static int output_log_pos;
-#define current_reqD -1
#define MAXTIMEOUT -2
static void __reschedule_timeout(int drive, const char *message)
{
unsigned long delay;
- if (drive == current_reqD)
- drive = current_drive;
-
if (drive < 0 || drive >= N_DRIVE) {
delay = 20UL * HZ;
drive = 0;
@@ -827,59 +822,70 @@ static int set_dor(int fdc, char mask, char data)
return olddor;
}
-static void twaddle(void)
+static void twaddle(int fdc, int drive)
{
- if (drive_params[current_drive].select_delay)
+ if (drive_params[drive].select_delay)
return;
- fdc_outb(fdc_state[current_fdc].dor & ~(0x10 << UNIT(current_drive)),
- current_fdc, FD_DOR);
- fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
- drive_state[current_drive].select_date = jiffies;
+ fdc_outb(fdc_state[fdc].dor & ~(0x10 << UNIT(drive)),
+ fdc, FD_DOR);
+ fdc_outb(fdc_state[fdc].dor, fdc, FD_DOR);
+ drive_state[drive].select_date = jiffies;
}
/*
- * Reset all driver information about the current fdc.
+ * Reset all driver information about the specified fdc.
* This is needed after a reset, and after a raw command.
*/
-static void reset_fdc_info(int mode)
+static void reset_fdc_info(int fdc, int mode)
{
int drive;
- fdc_state[current_fdc].spec1 = fdc_state[current_fdc].spec2 = -1;
- fdc_state[current_fdc].need_configure = 1;
- fdc_state[current_fdc].perp_mode = 1;
- fdc_state[current_fdc].rawcmd = 0;
+ fdc_state[fdc].spec1 = fdc_state[fdc].spec2 = -1;
+ fdc_state[fdc].need_configure = 1;
+ fdc_state[fdc].perp_mode = 1;
+ fdc_state[fdc].rawcmd = 0;
for (drive = 0; drive < N_DRIVE; drive++)
- if (FDC(drive) == current_fdc &&
+ if (FDC(drive) == fdc &&
(mode || drive_state[drive].track != NEED_1_RECAL))
drive_state[drive].track = NEED_2_RECAL;
}
-/* selects the fdc and drive, and enables the fdc's input/dma. */
+/*
+ * selects the fdc and drive, and enables the fdc's input/dma.
+ * Both current_drive and current_fdc are changed to match the new drive.
+ */
static void set_fdc(int drive)
{
- unsigned int new_fdc = current_fdc;
+ unsigned int fdc;
- if (drive >= 0 && drive < N_DRIVE) {
- new_fdc = FDC(drive);
- current_drive = drive;
+ if (drive < 0 || drive >= N_DRIVE) {
+ pr_info("bad drive value %d\n", drive);
+ return;
}
- if (new_fdc >= N_FDC) {
+
+ fdc = FDC(drive);
+ if (fdc >= N_FDC) {
pr_info("bad fdc value\n");
return;
}
- current_fdc = new_fdc;
- set_dor(current_fdc, ~0, 8);
+
+ set_dor(fdc, ~0, 8);
#if N_FDC > 1
- set_dor(1 - current_fdc, ~8, 0);
+ set_dor(1 - fdc, ~8, 0);
#endif
- if (fdc_state[current_fdc].rawcmd == 2)
- reset_fdc_info(1);
- if (fdc_inb(current_fdc, FD_STATUS) != STATUS_READY)
- fdc_state[current_fdc].reset = 1;
+ if (fdc_state[fdc].rawcmd == 2)
+ reset_fdc_info(fdc, 1);
+ if (fdc_inb(fdc, FD_STATUS) != STATUS_READY)
+ fdc_state[fdc].reset = 1;
+
+ current_drive = drive;
+ current_fdc = fdc;
}
-/* locks the driver */
+/*
+ * locks the driver.
+ * Both current_drive and current_fdc are changed to match the new drive.
+ */
static int lock_fdc(int drive)
{
if (WARN(atomic_read(&usage_count) == 0,
@@ -1062,12 +1068,9 @@ static void setup_DMA(void)
unsigned long f;
if (raw_cmd->length == 0) {
- int i;
-
- pr_info("zero dma transfer size:");
- for (i = 0; i < raw_cmd->cmd_count; i++)
- pr_cont("%x,", raw_cmd->cmd[i]);
- pr_cont("\n");
+ print_hex_dump(KERN_INFO, "zero dma transfer size: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ raw_cmd->fullcmd, raw_cmd->cmd_count, false);
cont->done(0);
fdc_state[current_fdc].reset = 1;
return;
@@ -1104,62 +1107,62 @@ static void setup_DMA(void)
#endif
}
-static void show_floppy(void);
+static void show_floppy(int fdc);
/* waits until the fdc becomes ready */
-static int wait_til_ready(void)
+static int wait_til_ready(int fdc)
{
int status;
int counter;
- if (fdc_state[current_fdc].reset)
+ if (fdc_state[fdc].reset)
return -1;
for (counter = 0; counter < 10000; counter++) {
- status = fdc_inb(current_fdc, FD_STATUS);
+ status = fdc_inb(fdc, FD_STATUS);
if (status & STATUS_READY)
return status;
}
if (initialized) {
- DPRINT("Getstatus times out (%x) on fdc %d\n", status, current_fdc);
- show_floppy();
+ DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc);
+ show_floppy(fdc);
}
- fdc_state[current_fdc].reset = 1;
+ fdc_state[fdc].reset = 1;
return -1;
}
/* sends a command byte to the fdc */
-static int output_byte(char byte)
+static int output_byte(int fdc, char byte)
{
- int status = wait_til_ready();
+ int status = wait_til_ready(fdc);
if (status < 0)
return -1;
if (is_ready_state(status)) {
- fdc_outb(byte, current_fdc, FD_DATA);
+ fdc_outb(byte, fdc, FD_DATA);
output_log[output_log_pos].data = byte;
output_log[output_log_pos].status = status;
output_log[output_log_pos].jiffies = jiffies;
output_log_pos = (output_log_pos + 1) % OLOGSIZE;
return 0;
}
- fdc_state[current_fdc].reset = 1;
+ fdc_state[fdc].reset = 1;
if (initialized) {
DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
- byte, current_fdc, status);
- show_floppy();
+ byte, fdc, status);
+ show_floppy(fdc);
}
return -1;
}
/* gets the response from the fdc */
-static int result(void)
+static int result(int fdc)
{
int i;
int status = 0;
- for (i = 0; i < MAX_REPLIES; i++) {
- status = wait_til_ready();
+ for (i = 0; i < FD_RAW_REPLY_SIZE; i++) {
+ status = wait_til_ready(fdc);
if (status < 0)
break;
status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA;
@@ -1169,24 +1172,24 @@ static int result(void)
return i;
}
if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY))
- reply_buffer[i] = fdc_inb(current_fdc, FD_DATA);
+ reply_buffer[i] = fdc_inb(fdc, FD_DATA);
else
break;
}
if (initialized) {
DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
- current_fdc, status, i);
- show_floppy();
+ fdc, status, i);
+ show_floppy(fdc);
}
- fdc_state[current_fdc].reset = 1;
+ fdc_state[fdc].reset = 1;
return -1;
}
#define MORE_OUTPUT -2
/* does the fdc need more output? */
-static int need_more_output(void)
+static int need_more_output(int fdc)
{
- int status = wait_til_ready();
+ int status = wait_til_ready(fdc);
if (status < 0)
return -1;
@@ -1194,13 +1197,13 @@ static int need_more_output(void)
if (is_ready_state(status))
return MORE_OUTPUT;
- return result();
+ return result(fdc);
}
/* Set perpendicular mode as required, based on data rate, if supported.
* 82077 Now tested. 1Mbps data rate only possible with 82077-1.
*/
-static void perpendicular_mode(void)
+static void perpendicular_mode(int fdc)
{
unsigned char perp_mode;
@@ -1215,7 +1218,7 @@ static void perpendicular_mode(void)
default:
DPRINT("Invalid data rate for perpendicular mode!\n");
cont->done(0);
- fdc_state[current_fdc].reset = 1;
+ fdc_state[fdc].reset = 1;
/*
* convenient way to return to
* redo without too much hassle
@@ -1226,12 +1229,12 @@ static void perpendicular_mode(void)
} else
perp_mode = 0;
- if (fdc_state[current_fdc].perp_mode == perp_mode)
+ if (fdc_state[fdc].perp_mode == perp_mode)
return;
- if (fdc_state[current_fdc].version >= FDC_82077_ORIG) {
- output_byte(FD_PERPENDICULAR);
- output_byte(perp_mode);
- fdc_state[current_fdc].perp_mode = perp_mode;
+ if (fdc_state[fdc].version >= FDC_82077_ORIG) {
+ output_byte(fdc, FD_PERPENDICULAR);
+ output_byte(fdc, perp_mode);
+ fdc_state[fdc].perp_mode = perp_mode;
} else if (perp_mode) {
DPRINT("perpendicular mode not supported by this FDC.\n");
}
@@ -1240,16 +1243,15 @@ static void perpendicular_mode(void)
static int fifo_depth = 0xa;
static int no_fifo;
-static int fdc_configure(void)
+static int fdc_configure(int fdc)
{
/* Turn on FIFO */
- output_byte(FD_CONFIGURE);
- if (need_more_output() != MORE_OUTPUT)
+ output_byte(fdc, FD_CONFIGURE);
+ if (need_more_output(fdc) != MORE_OUTPUT)
return 0;
- output_byte(0);
- output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
- output_byte(0); /* pre-compensation from track
- 0 upwards */
+ output_byte(fdc, 0);
+ output_byte(fdc, 0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
+ output_byte(fdc, 0); /* pre-compensation from track 0 upwards */
return 1;
}
@@ -1274,7 +1276,7 @@ static int fdc_configure(void)
*
* These values are rounded up to the next highest available delay time.
*/
-static void fdc_specify(void)
+static void fdc_specify(int fdc, int drive)
{
unsigned char spec1;
unsigned char spec2;
@@ -1286,10 +1288,10 @@ static void fdc_specify(void)
int hlt_max_code = 0x7f;
int hut_max_code = 0xf;
- if (fdc_state[current_fdc].need_configure &&
- fdc_state[current_fdc].version >= FDC_82072A) {
- fdc_configure();
- fdc_state[current_fdc].need_configure = 0;
+ if (fdc_state[fdc].need_configure &&
+ fdc_state[fdc].version >= FDC_82072A) {
+ fdc_configure(fdc);
+ fdc_state[fdc].need_configure = 0;
}
switch (raw_cmd->rate & 0x03) {
@@ -1298,13 +1300,13 @@ static void fdc_specify(void)
break;
case 1:
dtr = 300;
- if (fdc_state[current_fdc].version >= FDC_82078) {
+ if (fdc_state[fdc].version >= FDC_82078) {
/* chose the default rate table, not the one
* where 1 = 2 Mbps */
- output_byte(FD_DRIVESPEC);
- if (need_more_output() == MORE_OUTPUT) {
- output_byte(UNIT(current_drive));
- output_byte(0xc0);
+ output_byte(fdc, FD_DRIVESPEC);
+ if (need_more_output(fdc) == MORE_OUTPUT) {
+ output_byte(fdc, UNIT(drive));
+ output_byte(fdc, 0xc0);
}
}
break;
@@ -1313,14 +1315,14 @@ static void fdc_specify(void)
break;
}
- if (fdc_state[current_fdc].version >= FDC_82072) {
+ if (fdc_state[fdc].version >= FDC_82072) {
scale_dtr = dtr;
hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
}
/* Convert step rate from microseconds to milliseconds and 4 bits */
- srt = 16 - DIV_ROUND_UP(drive_params[current_drive].srt * scale_dtr / 1000,
+ srt = 16 - DIV_ROUND_UP(drive_params[drive].srt * scale_dtr / 1000,
NOMINAL_DTR);
if (slow_floppy)
srt = srt / 4;
@@ -1328,14 +1330,14 @@ static void fdc_specify(void)
SUPBOUND(srt, 0xf);
INFBOUND(srt, 0);
- hlt = DIV_ROUND_UP(drive_params[current_drive].hlt * scale_dtr / 2,
+ hlt = DIV_ROUND_UP(drive_params[drive].hlt * scale_dtr / 2,
NOMINAL_DTR);
if (hlt < 0x01)
hlt = 0x01;
else if (hlt > 0x7f)
hlt = hlt_max_code;
- hut = DIV_ROUND_UP(drive_params[current_drive].hut * scale_dtr / 16,
+ hut = DIV_ROUND_UP(drive_params[drive].hut * scale_dtr / 16,
NOMINAL_DTR);
if (hut < 0x1)
hut = 0x1;
@@ -1346,12 +1348,12 @@ static void fdc_specify(void)
spec2 = (hlt << 1) | (use_virtual_dma & 1);
/* If these parameters did not change, just return with success */
- if (fdc_state[current_fdc].spec1 != spec1 ||
- fdc_state[current_fdc].spec2 != spec2) {
+ if (fdc_state[fdc].spec1 != spec1 ||
+ fdc_state[fdc].spec2 != spec2) {
/* Go ahead and set spec1 and spec2 */
- output_byte(FD_SPECIFY);
- output_byte(fdc_state[current_fdc].spec1 = spec1);
- output_byte(fdc_state[current_fdc].spec2 = spec2);
+ output_byte(fdc, FD_SPECIFY);
+ output_byte(fdc, fdc_state[fdc].spec1 = spec1);
+ output_byte(fdc, fdc_state[fdc].spec2 = spec2);
}
} /* fdc_specify */
@@ -1513,7 +1515,7 @@ static void setup_rw_floppy(void)
r = 0;
for (i = 0; i < raw_cmd->cmd_count; i++)
- r |= output_byte(raw_cmd->cmd[i]);
+ r |= output_byte(current_fdc, raw_cmd->fullcmd[i]);
debugt(__func__, "rw_command");
@@ -1524,7 +1526,7 @@ static void setup_rw_floppy(void)
}
if (!(flags & FD_RAW_INTR)) {
- inr = result();
+ inr = result(current_fdc);
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
fd_watchdog();
@@ -1562,29 +1564,29 @@ static void seek_interrupt(void)
floppy_ready();
}
-static void check_wp(void)
+static void check_wp(int fdc, int drive)
{
- if (test_bit(FD_VERIFY_BIT, &drive_state[current_drive].flags)) {
+ if (test_bit(FD_VERIFY_BIT, &drive_state[drive].flags)) {
/* check write protection */
- output_byte(FD_GETSTATUS);
- output_byte(UNIT(current_drive));
- if (result() != 1) {
- fdc_state[current_fdc].reset = 1;
+ output_byte(fdc, FD_GETSTATUS);
+ output_byte(fdc, UNIT(drive));
+ if (result(fdc) != 1) {
+ fdc_state[fdc].reset = 1;
return;
}
- clear_bit(FD_VERIFY_BIT, &drive_state[current_drive].flags);
+ clear_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
clear_bit(FD_NEED_TWADDLE_BIT,
- &drive_state[current_drive].flags);
- debug_dcl(drive_params[current_drive].flags,
+ &drive_state[drive].flags);
+ debug_dcl(drive_params[drive].flags,
"checking whether disk is write protected\n");
- debug_dcl(drive_params[current_drive].flags, "wp=%x\n",
+ debug_dcl(drive_params[drive].flags, "wp=%x\n",
reply_buffer[ST3] & 0x40);
if (!(reply_buffer[ST3] & 0x40))
set_bit(FD_DISK_WRITABLE_BIT,
- &drive_state[current_drive].flags);
+ &drive_state[drive].flags);
else
clear_bit(FD_DISK_WRITABLE_BIT,
- &drive_state[current_drive].flags);
+ &drive_state[drive].flags);
}
}
@@ -1628,7 +1630,7 @@ static void seek_floppy(void)
track = 1;
}
} else {
- check_wp();
+ check_wp(current_fdc, current_drive);
if (raw_cmd->track != drive_state[current_drive].track &&
(raw_cmd->flags & FD_RAW_NEED_SEEK))
track = raw_cmd->track;
@@ -1639,9 +1641,9 @@ static void seek_floppy(void)
}
do_floppy = seek_interrupt;
- output_byte(FD_SEEK);
- output_byte(UNIT(current_drive));
- if (output_byte(track) < 0) {
+ output_byte(current_fdc, FD_SEEK);
+ output_byte(current_fdc, UNIT(current_drive));
+ if (output_byte(current_fdc, track) < 0) {
reset_fdc();
return;
}
@@ -1742,14 +1744,14 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id)
do_print = !handler && print_unex && initialized;
- inr = result();
+ inr = result(current_fdc);
if (do_print)
print_result("unexpected interrupt", inr);
if (inr == 0) {
int max_sensei = 4;
do {
- output_byte(FD_SENSEI);
- inr = result();
+ output_byte(current_fdc, FD_SENSEI);
+ inr = result(current_fdc);
if (do_print)
print_result("sensei", inr);
max_sensei--;
@@ -1771,8 +1773,8 @@ static void recalibrate_floppy(void)
{
debugt(__func__, "");
do_floppy = recal_interrupt;
- output_byte(FD_RECALIBRATE);
- if (output_byte(UNIT(current_drive)) < 0)
+ output_byte(current_fdc, FD_RECALIBRATE);
+ if (output_byte(current_fdc, UNIT(current_drive)) < 0)
reset_fdc();
}
@@ -1782,7 +1784,7 @@ static void recalibrate_floppy(void)
static void reset_interrupt(void)
{
debugt(__func__, "");
- result(); /* get the status ready for set_fdc */
+ result(current_fdc); /* get the status ready for set_fdc */
if (fdc_state[current_fdc].reset) {
pr_info("reset set in interrupt, calling %ps\n", cont->error);
cont->error(); /* a reset just after a reset. BAD! */
@@ -1792,7 +1794,9 @@ static void reset_interrupt(void)
/*
* reset is done by pulling bit 2 of DOR low for a while (old FDCs),
- * or by setting the self clearing bit 7 of STATUS (newer FDCs)
+ * or by setting the self clearing bit 7 of STATUS (newer FDCs).
+ * This WILL trigger an interrupt, causing the handlers in the current
+ * cont's ->redo() to be called via reset_interrupt().
*/
static void reset_fdc(void)
{
@@ -1800,7 +1804,7 @@ static void reset_fdc(void)
do_floppy = reset_interrupt;
fdc_state[current_fdc].reset = 0;
- reset_fdc_info(0);
+ reset_fdc_info(current_fdc, 0);
/* Pseudo-DMA may intercept 'reset finished' interrupt. */
/* Irrelevant for systems with true DMA (i386). */
@@ -1819,7 +1823,7 @@ static void reset_fdc(void)
}
}
-static void show_floppy(void)
+static void show_floppy(int fdc)
{
int i;
@@ -1842,7 +1846,7 @@ static void show_floppy(void)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
reply_buffer, resultsize, true);
- pr_info("status=%x\n", fdc_inb(current_fdc, FD_STATUS));
+ pr_info("status=%x\n", fdc_inb(fdc, FD_STATUS));
pr_info("fdc_busy=%lu\n", fdc_busy);
if (do_floppy)
pr_info("do_floppy=%ps\n", do_floppy);
@@ -1868,7 +1872,7 @@ static void floppy_shutdown(struct work_struct *arg)
unsigned long flags;
if (initialized)
- show_floppy();
+ show_floppy(current_fdc);
cancel_activity();
flags = claim_dma_lock();
@@ -1934,7 +1938,7 @@ static void floppy_ready(void)
"calling disk change from floppy_ready\n");
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
disk_change(current_drive) && !drive_params[current_drive].select_delay)
- twaddle(); /* this clears the dcl on certain
+ twaddle(current_fdc, current_drive); /* this clears the dcl on certain
* drive/controller combinations */
#ifdef fd_chose_dma_mode
@@ -1946,20 +1950,20 @@ static void floppy_ready(void)
#endif
if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) {
- perpendicular_mode();
- fdc_specify(); /* must be done here because of hut, hlt ... */
+ perpendicular_mode(current_fdc);
+ fdc_specify(current_fdc, current_drive); /* must be done here because of hut, hlt ... */
seek_floppy();
} else {
if ((raw_cmd->flags & FD_RAW_READ) ||
(raw_cmd->flags & FD_RAW_WRITE))
- fdc_specify();
+ fdc_specify(current_fdc, current_drive);
setup_rw_floppy();
}
}
static void floppy_start(void)
{
- reschedule_timeout(current_reqD, "floppy start");
+ reschedule_timeout(current_drive, "floppy start");
scandrives();
debug_dcl(drive_params[current_drive].flags,
@@ -2004,6 +2008,9 @@ static const struct cont_t intr_cont = {
.done = (done_f)empty
};
+/* schedules handler, waiting for completion. May be interrupted, will then
+ * return -EINTR, in which case the driver will automatically be unlocked.
+ */
static int wait_til_done(void (*handler)(void), bool interruptible)
{
int ret;
@@ -2059,18 +2066,19 @@ static void success_and_wakeup(void)
* ==========================
*/
-static int next_valid_format(void)
+static int next_valid_format(int drive)
{
int probed_format;
- probed_format = drive_state[current_drive].probed_format;
+ probed_format = drive_state[drive].probed_format;
while (1) {
- if (probed_format >= 8 || !drive_params[current_drive].autodetect[probed_format]) {
- drive_state[current_drive].probed_format = 0;
+ if (probed_format >= FD_AUTODETECT_SIZE ||
+ !drive_params[drive].autodetect[probed_format]) {
+ drive_state[drive].probed_format = 0;
return 1;
}
- if (floppy_type[drive_params[current_drive].autodetect[probed_format]].sect) {
- drive_state[current_drive].probed_format = probed_format;
+ if (floppy_type[drive_params[drive].autodetect[probed_format]].sect) {
+ drive_state[drive].probed_format = probed_format;
return 0;
}
probed_format++;
@@ -2083,7 +2091,7 @@ static void bad_flp_intr(void)
if (probing) {
drive_state[current_drive].probed_format++;
- if (!next_valid_format())
+ if (!next_valid_format(current_drive))
return;
}
err_count = ++(*errors);
@@ -2843,6 +2851,9 @@ static int set_next_request(void)
return current_req != NULL;
}
+/* Starts or continues processing request. Will automatically unlock the
+ * driver at end of request.
+ */
static void redo_fd_request(void)
{
int drive;
@@ -2867,7 +2878,7 @@ do_request:
}
drive = (long)current_req->rq_disk->private_data;
set_fdc(drive);
- reschedule_timeout(current_reqD, "redo fd request");
+ reschedule_timeout(current_drive, "redo fd request");
set_floppy(drive);
raw_cmd = &default_raw_cmd;
@@ -2885,7 +2896,7 @@ do_request:
if (!_floppy) { /* Autodetection */
if (!probing) {
drive_state[current_drive].probed_format = 0;
- if (next_valid_format()) {
+ if (next_valid_format(current_drive)) {
DPRINT("no autodetectable formats\n");
_floppy = NULL;
request_done(0);
@@ -2904,7 +2915,7 @@ do_request:
}
if (test_bit(FD_NEED_TWADDLE_BIT, &drive_state[current_drive].flags))
- twaddle();
+ twaddle(current_fdc, current_drive);
schedule_bh(floppy_start);
debugt(__func__, "queue fd request");
return;
@@ -2917,6 +2928,7 @@ static const struct cont_t rw_cont = {
.done = request_done
};
+/* schedule the request and automatically unlock the driver on completion */
static void process_fd_request(void)
{
cont = &rw_cont;
@@ -2938,17 +2950,17 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
(unsigned long long) current_req->cmd_flags))
return BLK_STS_IOERR;
- spin_lock_irq(&floppy_lock);
- list_add_tail(&bd->rq->queuelist, &floppy_reqs);
- spin_unlock_irq(&floppy_lock);
-
if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
- return BLK_STS_OK;
+ return BLK_STS_RESOURCE;
}
+ spin_lock_irq(&floppy_lock);
+ list_add_tail(&bd->rq->queuelist, &floppy_reqs);
+ spin_unlock_irq(&floppy_lock);
+
command_status = FD_COMMAND_NONE;
__reschedule_timeout(MAXTIMEOUT, "fd_request");
set_fdc(0);
@@ -2996,6 +3008,10 @@ static const struct cont_t reset_cont = {
.done = generic_done
};
+/*
+ * Resets the FDC connected to drive <drive>.
+ * Both current_drive and current_fdc are changed to match the new drive.
+ */
static int user_reset_fdc(int drive, int arg, bool interruptible)
{
int ret;
@@ -3006,6 +3022,9 @@ static int user_reset_fdc(int drive, int arg, bool interruptible)
if (arg == FD_RESET_ALWAYS)
fdc_state[current_fdc].reset = 1;
if (fdc_state[current_fdc].reset) {
+ /* note: reset_fdc will take care of unlocking the driver
+ * on completion.
+ */
cont = &reset_cont;
ret = wait_til_done(reset_fdc, interruptible);
if (ret == -EINTR)
@@ -3059,7 +3078,7 @@ static void raw_cmd_done(int flag)
raw_cmd->flags |= FD_RAW_HARDFAILURE;
} else {
raw_cmd->reply_count = inr;
- if (raw_cmd->reply_count > MAX_REPLIES)
+ if (raw_cmd->reply_count > FD_RAW_REPLY_SIZE)
raw_cmd->reply_count = 0;
for (i = 0; i < raw_cmd->reply_count; i++)
raw_cmd->reply[i] = reply_buffer[i];
@@ -3170,18 +3189,10 @@ loop:
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
- if (ptr->cmd_count > 33)
- /* the command may now also take up the space
- * initially intended for the reply & the
- * reply count. Needed for long 82078 commands
- * such as RESTORE, which takes ... 17 command
- * bytes. Murphy's law #137: When you reserve
- * 16 bytes for a structure, you'll one day
- * discover that you really need 17...
- */
+ if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE)
return -EINVAL;
- for (i = 0; i < 16; i++)
+ for (i = 0; i < FD_RAW_REPLY_SIZE; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
@@ -3423,13 +3434,13 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static bool valid_floppy_drive_params(const short autodetect[8],
+static bool valid_floppy_drive_params(const short autodetect[FD_AUTODETECT_SIZE],
int native_format)
{
size_t floppy_type_size = ARRAY_SIZE(floppy_type);
size_t i = 0;
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < FD_AUTODETECT_SIZE; ++i) {
if (autodetect[i] < 0 ||
autodetect[i] >= floppy_type_size)
return false;
@@ -3610,7 +3621,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;
- twaddle();
+ twaddle(current_fdc, current_drive);
process_fd_request();
return 0;
default:
@@ -3654,7 +3665,7 @@ struct compat_floppy_drive_params {
struct floppy_max_errors max_errors;
char flags;
char read_track;
- short autodetect[8];
+ short autodetect[FD_AUTODETECT_SIZE];
compat_int_t checkfreq;
compat_int_t native_format;
};
@@ -4298,79 +4309,79 @@ static const struct block_device_operations floppy_fops = {
/* Determine the floppy disk controller type */
/* This routine was written by David C. Niemi */
-static char __init get_fdc_version(void)
+static char __init get_fdc_version(int fdc)
{
int r;
- output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
- if (fdc_state[current_fdc].reset)
+ output_byte(fdc, FD_DUMPREGS); /* 82072 and better know DUMPREGS */
+ if (fdc_state[fdc].reset)
return FDC_NONE;
- r = result();
+ r = result(fdc);
if (r <= 0x00)
return FDC_NONE; /* No FDC present ??? */
if ((r == 1) && (reply_buffer[0] == 0x80)) {
- pr_info("FDC %d is an 8272A\n", current_fdc);
+ pr_info("FDC %d is an 8272A\n", fdc);
return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
}
if (r != 10) {
pr_info("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
- current_fdc, r);
+ fdc, r);
return FDC_UNKNOWN;
}
- if (!fdc_configure()) {
- pr_info("FDC %d is an 82072\n", current_fdc);
+ if (!fdc_configure(fdc)) {
+ pr_info("FDC %d is an 82072\n", fdc);
return FDC_82072; /* 82072 doesn't know CONFIGURE */
}
- output_byte(FD_PERPENDICULAR);
- if (need_more_output() == MORE_OUTPUT) {
- output_byte(0);
+ output_byte(fdc, FD_PERPENDICULAR);
+ if (need_more_output(fdc) == MORE_OUTPUT) {
+ output_byte(fdc, 0);
} else {
- pr_info("FDC %d is an 82072A\n", current_fdc);
+ pr_info("FDC %d is an 82072A\n", fdc);
return FDC_82072A; /* 82072A as found on Sparcs. */
}
- output_byte(FD_UNLOCK);
- r = result();
+ output_byte(fdc, FD_UNLOCK);
+ r = result(fdc);
if ((r == 1) && (reply_buffer[0] == 0x80)) {
- pr_info("FDC %d is a pre-1991 82077\n", current_fdc);
+ pr_info("FDC %d is a pre-1991 82077\n", fdc);
return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
* LOCK/UNLOCK */
}
if ((r != 1) || (reply_buffer[0] != 0x00)) {
pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
- current_fdc, r);
+ fdc, r);
return FDC_UNKNOWN;
}
- output_byte(FD_PARTID);
- r = result();
+ output_byte(fdc, FD_PARTID);
+ r = result(fdc);
if (r != 1) {
pr_info("FDC %d init: PARTID: unexpected return of %d bytes.\n",
- current_fdc, r);
+ fdc, r);
return FDC_UNKNOWN;
}
if (reply_buffer[0] == 0x80) {
- pr_info("FDC %d is a post-1991 82077\n", current_fdc);
+ pr_info("FDC %d is a post-1991 82077\n", fdc);
return FDC_82077; /* Revised 82077AA passes all the tests */
}
switch (reply_buffer[0] >> 5) {
case 0x0:
/* Either a 82078-1 or a 82078SL running at 5Volt */
- pr_info("FDC %d is an 82078.\n", current_fdc);
+ pr_info("FDC %d is an 82078.\n", fdc);
return FDC_82078;
case 0x1:
- pr_info("FDC %d is a 44pin 82078\n", current_fdc);
+ pr_info("FDC %d is a 44pin 82078\n", fdc);
return FDC_82078;
case 0x2:
- pr_info("FDC %d is a S82078B\n", current_fdc);
+ pr_info("FDC %d is a S82078B\n", fdc);
return FDC_S82078B;
case 0x3:
- pr_info("FDC %d is a National Semiconductor PC87306\n", current_fdc);
+ pr_info("FDC %d is a National Semiconductor PC87306\n", fdc);
return FDC_87306;
default:
pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n",
- current_fdc, reply_buffer[0] >> 5);
+ fdc, reply_buffer[0] >> 5);
return FDC_82078_UNKN;
}
} /* get_fdc_version */
@@ -4534,11 +4545,13 @@ static void floppy_device_release(struct device *dev)
static int floppy_resume(struct device *dev)
{
int fdc;
+ int saved_drive;
+ saved_drive = current_drive;
for (fdc = 0; fdc < N_FDC; fdc++)
if (fdc_state[fdc].address != -1)
- user_reset_fdc(-1, FD_RESET_ALWAYS, false);
-
+ user_reset_fdc(REVDRIVE(fdc, 0), FD_RESET_ALWAYS, false);
+ set_fdc(saved_drive);
return 0;
}
@@ -4646,16 +4659,15 @@ static int __init do_floppy_init(void)
config_types();
for (i = 0; i < N_FDC; i++) {
- current_fdc = i;
- memset(&fdc_state[current_fdc], 0, sizeof(*fdc_state));
- fdc_state[current_fdc].dtr = -1;
- fdc_state[current_fdc].dor = 0x4;
+ memset(&fdc_state[i], 0, sizeof(*fdc_state));
+ fdc_state[i].dtr = -1;
+ fdc_state[i].dor = 0x4;
#if defined(__sparc__) || defined(__mc68000__)
/*sparcs/sun3x don't have a DOR reset which we can fall back on to */
#ifdef __mc68000__
if (MACH_IS_SUN3X)
#endif
- fdc_state[current_fdc].version = FDC_82072A;
+ fdc_state[i].version = FDC_82072A;
#endif
}
@@ -4697,30 +4709,29 @@ static int __init do_floppy_init(void)
msleep(10);
for (i = 0; i < N_FDC; i++) {
- current_fdc = i;
- fdc_state[current_fdc].driver_version = FD_DRIVER_VERSION;
+ fdc_state[i].driver_version = FD_DRIVER_VERSION;
for (unit = 0; unit < 4; unit++)
- fdc_state[current_fdc].track[unit] = 0;
- if (fdc_state[current_fdc].address == -1)
+ fdc_state[i].track[unit] = 0;
+ if (fdc_state[i].address == -1)
continue;
- fdc_state[current_fdc].rawcmd = 2;
- if (user_reset_fdc(-1, FD_RESET_ALWAYS, false)) {
+ fdc_state[i].rawcmd = 2;
+ if (user_reset_fdc(REVDRIVE(i, 0), FD_RESET_ALWAYS, false)) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
- floppy_release_regions(current_fdc);
- fdc_state[current_fdc].address = -1;
- fdc_state[current_fdc].version = FDC_NONE;
+ floppy_release_regions(i);
+ fdc_state[i].address = -1;
+ fdc_state[i].version = FDC_NONE;
continue;
}
/* Try to determine the floppy controller type */
- fdc_state[current_fdc].version = get_fdc_version();
- if (fdc_state[current_fdc].version == FDC_NONE) {
+ fdc_state[i].version = get_fdc_version(i);
+ if (fdc_state[i].version == FDC_NONE) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
- floppy_release_regions(current_fdc);
- fdc_state[current_fdc].address = -1;
+ floppy_release_regions(i);
+ fdc_state[i].address = -1;
continue;
}
if (can_use_virtual_dma == 2 &&
- fdc_state[current_fdc].version < FDC_82072A)
+ fdc_state[i].version < FDC_82072A)
can_use_virtual_dma = 0;
have_no_fdc = 0;
@@ -4728,7 +4739,7 @@ static int __init do_floppy_init(void)
* properly, so force a reset for the standard FDC clones,
* to avoid interrupt garbage.
*/
- user_reset_fdc(-1, FD_RESET_ALWAYS, false);
+ user_reset_fdc(REVDRIVE(i, 0), FD_RESET_ALWAYS, false);
}
current_fdc = 0;
cancel_delayed_work(&fd_timeout);
@@ -4855,6 +4866,8 @@ static void floppy_release_regions(int fdc)
static int floppy_grab_irq_and_dma(void)
{
+ int fdc;
+
if (atomic_inc_return(&usage_count) > 1)
return 0;
@@ -4882,24 +4895,24 @@ static int floppy_grab_irq_and_dma(void)
}
}
- for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) {
- if (fdc_state[current_fdc].address != -1) {
- if (floppy_request_regions(current_fdc))
+ for (fdc = 0; fdc < N_FDC; fdc++) {
+ if (fdc_state[fdc].address != -1) {
+ if (floppy_request_regions(fdc))
goto cleanup;
}
}
- for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) {
- if (fdc_state[current_fdc].address != -1) {
- reset_fdc_info(1);
- fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
+ for (fdc = 0; fdc < N_FDC; fdc++) {
+ if (fdc_state[fdc].address != -1) {
+ reset_fdc_info(fdc, 1);
+ fdc_outb(fdc_state[fdc].dor, fdc, FD_DOR);
}
}
- current_fdc = 0;
+
set_dor(0, ~0, 8); /* avoid immediate interrupt */
- for (current_fdc = 0; current_fdc < N_FDC; current_fdc++)
- if (fdc_state[current_fdc].address != -1)
- fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (fdc_state[fdc].address != -1)
+ fdc_outb(fdc_state[fdc].dor, fdc, FD_DOR);
/*
* The driver will try and free resources and relies on us
* to know if they were allocated or not.
@@ -4910,15 +4923,16 @@ static int floppy_grab_irq_and_dma(void)
cleanup:
fd_free_irq();
fd_free_dma();
- while (--current_fdc >= 0)
- floppy_release_regions(current_fdc);
+ while (--fdc >= 0)
+ floppy_release_regions(fdc);
+ current_fdc = 0;
atomic_dec(&usage_count);
return -1;
}
static void floppy_release_irq_and_dma(void)
{
- int old_fdc;
+ int fdc;
#ifndef __sparc__
int drive;
#endif
@@ -4959,11 +4973,9 @@ static void floppy_release_irq_and_dma(void)
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
- old_fdc = current_fdc;
- for (current_fdc = 0; current_fdc < N_FDC; current_fdc++)
- if (fdc_state[current_fdc].address != -1)
- floppy_release_regions(current_fdc);
- current_fdc = old_fdc;
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (fdc_state[fdc].address != -1)
+ floppy_release_regions(fdc);
}
#ifdef MODULE
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index da693e6a834e..2e96d8b8758b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -228,26 +228,36 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
blk_mq_unfreeze_queue(lo->lo_queue);
}
+/**
+ * loop_validate_block_size() - validates the passed in block size
+ * @bsize: size to validate
+ */
static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
+loop_validate_block_size(unsigned short bsize)
{
- loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
- sector_t x = (sector_t)size;
- struct block_device *bdev = lo->lo_device;
+ if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ return -EINVAL;
- if (unlikely((loff_t)x != size))
- return -EFBIG;
- if (lo->lo_offset != offset)
- lo->lo_offset = offset;
- if (lo->lo_sizelimit != sizelimit)
- lo->lo_sizelimit = sizelimit;
- set_capacity(lo->lo_disk, x);
- bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
- /* let user-space know about the new size */
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
return 0;
}
+/**
+ * loop_set_size() - sets device size and notifies userspace
+ * @lo: struct loop_device to set the size for
+ * @size: new size of the loop device
+ *
+ * Callers must validate that the size passed into this function fits into
+ * a sector_t, eg using loop_validate_size()
+ */
+static void loop_set_size(struct loop_device *lo, loff_t size)
+{
+ struct block_device *bdev = lo->lo_device;
+
+ bd_set_size(bdev, size << SECTOR_SHIFT);
+
+ set_capacity_revalidate_and_notify(lo->lo_disk, size, false);
+}
+
static inline int
lo_do_transfer(struct loop_device *lo, int cmd,
struct page *rpage, unsigned roffs,
@@ -634,8 +644,8 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
static inline void loop_update_dio(struct loop_device *lo)
{
- __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
- lo->use_dio);
+ __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
+ lo->use_dio);
}
static void loop_reread_partitions(struct loop_device *lo,
@@ -919,7 +929,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
static int loop_kthread_worker_fn(void *worker_ptr)
{
- current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
return kthread_worker_fn(worker_ptr);
}
@@ -952,23 +962,125 @@ static void loop_update_rotational(struct loop_device *lo)
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
}
-static int loop_set_fd(struct loop_device *lo, fmode_t mode,
- struct block_device *bdev, unsigned int arg)
+static int
+loop_release_xfer(struct loop_device *lo)
+{
+ int err = 0;
+ struct loop_func_table *xfer = lo->lo_encryption;
+
+ if (xfer) {
+ if (xfer->release)
+ err = xfer->release(lo);
+ lo->transfer = NULL;
+ lo->lo_encryption = NULL;
+ module_put(xfer->owner);
+ }
+ return err;
+}
+
+static int
+loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
+ const struct loop_info64 *i)
+{
+ int err = 0;
+
+ if (xfer) {
+ struct module *owner = xfer->owner;
+
+ if (!try_module_get(owner))
+ return -EINVAL;
+ if (xfer->init)
+ err = xfer->init(lo, i);
+ if (err)
+ module_put(owner);
+ else
+ lo->lo_encryption = xfer;
+ }
+ return err;
+}
+
+/**
+ * loop_set_status_from_info - configure device from loop_info
+ * @lo: struct loop_device to configure
+ * @info: struct loop_info64 to configure the device with
+ *
+ * Configures the loop device parameters according to the passed
+ * in loop_info64 configuration.
+ */
+static int
+loop_set_status_from_info(struct loop_device *lo,
+ const struct loop_info64 *info)
+{
+ int err;
+ struct loop_func_table *xfer;
+ kuid_t uid = current_uid();
+
+ if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
+ return -EINVAL;
+
+ err = loop_release_xfer(lo);
+ if (err)
+ return err;
+
+ if (info->lo_encrypt_type) {
+ unsigned int type = info->lo_encrypt_type;
+
+ if (type >= MAX_LO_CRYPT)
+ return -EINVAL;
+ xfer = xfer_funcs[type];
+ if (xfer == NULL)
+ return -EINVAL;
+ } else
+ xfer = NULL;
+
+ err = loop_init_xfer(lo, xfer, info);
+ if (err)
+ return err;
+
+ lo->lo_offset = info->lo_offset;
+ lo->lo_sizelimit = info->lo_sizelimit;
+ memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
+ memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
+ lo->lo_file_name[LO_NAME_SIZE-1] = 0;
+ lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
+
+ if (!xfer)
+ xfer = &none_funcs;
+ lo->transfer = xfer->transfer;
+ lo->ioctl = xfer->ioctl;
+
+ lo->lo_flags = info->lo_flags;
+
+ lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
+ lo->lo_init[0] = info->lo_init[0];
+ lo->lo_init[1] = info->lo_init[1];
+ if (info->lo_encrypt_key_size) {
+ memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
+ info->lo_encrypt_key_size);
+ lo->lo_key_owner = uid;
+ }
+
+ return 0;
+}
+
+static int loop_configure(struct loop_device *lo, fmode_t mode,
+ struct block_device *bdev,
+ const struct loop_config *config)
{
struct file *file;
struct inode *inode;
struct address_space *mapping;
struct block_device *claimed_bdev = NULL;
- int lo_flags = 0;
int error;
loff_t size;
bool partscan;
+ unsigned short bsize;
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
error = -EBADF;
- file = fget(arg);
+ file = fget(config->fd);
if (!file)
goto out;
@@ -977,7 +1089,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
* here to avoid changing device under exclusive owner.
*/
if (!(mode & FMODE_EXCL)) {
- claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
+ claimed_bdev = bd_start_claiming(bdev, loop_configure);
if (IS_ERR(claimed_bdev)) {
error = PTR_ERR(claimed_bdev);
goto out_putf;
@@ -999,52 +1111,58 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mapping = file->f_mapping;
inode = mapping->host;
+ size = get_loop_size(lo, file);
+
+ if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (config->block_size) {
+ error = loop_validate_block_size(config->block_size);
+ if (error)
+ goto out_unlock;
+ }
+
+ error = loop_set_status_from_info(lo, &config->info);
+ if (error)
+ goto out_unlock;
+
if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
!file->f_op->write_iter)
- lo_flags |= LO_FLAGS_READ_ONLY;
+ lo->lo_flags |= LO_FLAGS_READ_ONLY;
- error = -EFBIG;
- size = get_loop_size(lo, file);
- if ((loff_t)(sector_t)size != size)
- goto out_unlock;
error = loop_prepare_queue(lo);
if (error)
goto out_unlock;
- error = 0;
-
- set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
+ set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->use_dio = false;
+ lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
- lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
- lo->transfer = NULL;
- lo->ioctl = NULL;
- lo->lo_sizelimit = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
+ if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
- if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
+ if (config->block_size)
+ bsize = config->block_size;
+ else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
/* In case of direct I/O, match underlying block size */
- unsigned short bsize = bdev_logical_block_size(
- inode->i_sb->s_bdev);
+ bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
+ else
+ bsize = 512;
- blk_queue_logical_block_size(lo->lo_queue, bsize);
- blk_queue_physical_block_size(lo->lo_queue, bsize);
- blk_queue_io_min(lo->lo_queue, bsize);
- }
+ blk_queue_logical_block_size(lo->lo_queue, bsize);
+ blk_queue_physical_block_size(lo->lo_queue, bsize);
+ blk_queue_io_min(lo->lo_queue, bsize);
loop_update_rotational(lo);
loop_update_dio(lo);
- set_capacity(lo->lo_disk, size);
- bd_set_size(bdev, size << 9);
loop_sysfs_init(lo);
- /* let user-space know about the new size */
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+ loop_set_size(lo, size);
set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
block_size(inode->i_bdev) : PAGE_SIZE);
@@ -1062,14 +1180,14 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (partscan)
loop_reread_partitions(lo, bdev);
if (claimed_bdev)
- bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
+ bd_abort_claiming(bdev, claimed_bdev, loop_configure);
return 0;
out_unlock:
mutex_unlock(&loop_ctl_mutex);
out_bdev:
if (claimed_bdev)
- bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
+ bd_abort_claiming(bdev, claimed_bdev, loop_configure);
out_putf:
fput(file);
out:
@@ -1078,43 +1196,6 @@ out:
return error;
}
-static int
-loop_release_xfer(struct loop_device *lo)
-{
- int err = 0;
- struct loop_func_table *xfer = lo->lo_encryption;
-
- if (xfer) {
- if (xfer->release)
- err = xfer->release(lo);
- lo->transfer = NULL;
- lo->lo_encryption = NULL;
- module_put(xfer->owner);
- }
- return err;
-}
-
-static int
-loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
- const struct loop_info64 *i)
-{
- int err = 0;
-
- if (xfer) {
- struct module *owner = xfer->owner;
-
- if (!try_module_get(owner))
- return -EINVAL;
- if (xfer->init)
- err = xfer->init(lo, i);
- if (err)
- module_put(owner);
- else
- lo->lo_encryption = xfer;
- }
- return err;
-}
-
static int __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp = NULL;
@@ -1263,10 +1344,11 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- struct loop_func_table *xfer;
- kuid_t uid = current_uid();
struct block_device *bdev;
+ kuid_t uid = current_uid();
+ int prev_lo_flags;
bool partscan = false;
+ bool size_changed = false;
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
@@ -1281,13 +1363,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
err = -ENXIO;
goto out_unlock;
}
- if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
- err = -EINVAL;
- goto out_unlock;
- }
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
+ size_changed = true;
sync_blockdev(lo->lo_device);
kill_bdev(lo->lo_device);
}
@@ -1295,79 +1374,44 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
- err = loop_release_xfer(lo);
- if (err)
+ if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
+ /* If any pages were dirtied after kill_bdev(), try again */
+ err = -EAGAIN;
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ __func__, lo->lo_number, lo->lo_file_name,
+ lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
+ }
- if (info->lo_encrypt_type) {
- unsigned int type = info->lo_encrypt_type;
-
- if (type >= MAX_LO_CRYPT) {
- err = -EINVAL;
- goto out_unfreeze;
- }
- xfer = xfer_funcs[type];
- if (xfer == NULL) {
- err = -EINVAL;
- goto out_unfreeze;
- }
- } else
- xfer = NULL;
+ prev_lo_flags = lo->lo_flags;
- err = loop_init_xfer(lo, xfer, info);
+ err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
- if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit) {
- /* kill_bdev should have truncated all the pages */
- if (lo->lo_device->bd_inode->i_mapping->nrpages) {
- err = -EAGAIN;
- pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
- __func__, lo->lo_number, lo->lo_file_name,
- lo->lo_device->bd_inode->i_mapping->nrpages);
- goto out_unfreeze;
- }
- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
- err = -EFBIG;
- goto out_unfreeze;
- }
+ /* Mask out flags that can't be set using LOOP_SET_STATUS. */
+ lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS;
+ /* For those flags, use the previous values instead */
+ lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
+ /* For flags that can't be cleared, use previous values too */
+ lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+
+ if (size_changed) {
+ loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
+ lo->lo_backing_file);
+ loop_set_size(lo, new_size);
}
loop_config_discard(lo);
- memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
- memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
- lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
-
- if (!xfer)
- xfer = &none_funcs;
- lo->transfer = xfer->transfer;
- lo->ioctl = xfer->ioctl;
-
- if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
- (info->lo_flags & LO_FLAGS_AUTOCLEAR))
- lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
-
- lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
- lo->lo_init[0] = info->lo_init[0];
- lo->lo_init[1] = info->lo_init[1];
- if (info->lo_encrypt_key_size) {
- memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
- lo->lo_key_owner = uid;
- }
-
/* update dio if lo_offset or transfer is changed */
__loop_update_dio(lo, lo->use_dio);
out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
- if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
- !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
- lo->lo_flags |= LO_FLAGS_PARTSCAN;
+ if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
+ !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
bdev = lo->lo_device;
partscan = true;
@@ -1531,10 +1575,15 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
static int loop_set_capacity(struct loop_device *lo)
{
+ loff_t size;
+
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
+ size = get_loop_size(lo, lo->lo_backing_file);
+ loop_set_size(lo, size);
+
+ return 0;
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
@@ -1558,8 +1607,9 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
- return -EINVAL;
+ err = loop_validate_block_size(arg);
+ if (err)
+ return err;
if (lo->lo_queue->limits.logical_block_size == arg)
return 0;
@@ -1617,11 +1667,31 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct loop_device *lo = bdev->bd_disk->private_data;
+ void __user *argp = (void __user *) arg;
int err;
switch (cmd) {
- case LOOP_SET_FD:
- return loop_set_fd(lo, mode, bdev, arg);
+ case LOOP_SET_FD: {
+ /*
+ * Legacy case - pass in a zeroed out struct loop_config with
+ * only the file descriptor set , which corresponds with the
+ * default parameters we'd have used otherwise.
+ */
+ struct loop_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.fd = arg;
+
+ return loop_configure(lo, mode, bdev, &config);
+ }
+ case LOOP_CONFIGURE: {
+ struct loop_config config;
+
+ if (copy_from_user(&config, argp, sizeof(config)))
+ return -EFAULT;
+
+ return loop_configure(lo, mode, bdev, &config);
+ }
case LOOP_CHANGE_FD:
return loop_change_fd(lo, bdev, arg);
case LOOP_CLR_FD:
@@ -1629,21 +1699,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_SET_STATUS:
err = -EPERM;
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
- err = loop_set_status_old(lo,
- (struct loop_info __user *)arg);
+ err = loop_set_status_old(lo, argp);
}
break;
case LOOP_GET_STATUS:
- return loop_get_status_old(lo, (struct loop_info __user *) arg);
+ return loop_get_status_old(lo, argp);
case LOOP_SET_STATUS64:
err = -EPERM;
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
- err = loop_set_status64(lo,
- (struct loop_info64 __user *) arg);
+ err = loop_set_status64(lo, argp);
}
break;
case LOOP_GET_STATUS64:
- return loop_get_status64(lo, (struct loop_info64 __user *) arg);
+ return loop_get_status64(lo, argp);
case LOOP_SET_CAPACITY:
case LOOP_SET_DIRECT_IO:
case LOOP_SET_BLOCK_SIZE:
@@ -1795,6 +1863,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_CLR_FD:
case LOOP_GET_STATUS64:
case LOOP_SET_STATUS64:
+ case LOOP_CONFIGURE:
arg = (unsigned long) compat_ptr(arg);
/* fall through */
case LOOP_SET_FD:
@@ -2037,7 +2106,7 @@ static int loop_add(struct loop_device **l, int i)
lo->tag_set.queue_depth = 128;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
- lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 8efd8778e209..87b31f9ca362 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1250,8 +1250,34 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
return errno_to_blk_status(err);
}
+static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct bio *bio;
+
+ if (dev->memory_backed)
+ return;
+
+ if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) {
+ zero_fill_bio(cmd->bio);
+ } else if (req_op(cmd->rq) == REQ_OP_READ) {
+ __rq_for_each_bio(bio, cmd->rq)
+ zero_fill_bio(bio);
+ }
+}
+
static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
{
+ /*
+ * Since root privileges are required to configure the null_blk
+ * driver, it is fine that this driver does not initialize the
+ * data buffers of read commands. Zero-initialize these buffers
+ * anyway if KMSAN is enabled to prevent that KMSAN complains
+ * about null_blk not initializing read data buffers.
+ */
+ if (IS_ENABLED(CONFIG_KMSAN))
+ nullb_zero_read_cmd_buffer(cmd);
+
/* Complete IO by inline, softirq or timer */
switch (cmd->nq->dev->irqmode) {
case NULL_IRQ_SOFTIRQ:
@@ -1397,7 +1423,7 @@ static bool should_requeue_request(struct request *rq)
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
pr_info("rq %p timed out\n", rq);
- blk_mq_complete_request(rq);
+ blk_mq_force_complete_rq(rq);
return BLK_EH_DONE;
}
@@ -1535,6 +1561,13 @@ static void null_config_discard(struct nullb *nullb)
{
if (nullb->dev->discard == false)
return;
+
+ if (nullb->dev->zoned) {
+ nullb->dev->discard = false;
+ pr_info("discard option is ignored in zoned mode\n");
+ return;
+ }
+
nullb->q->limits.discard_granularity = nullb->dev->blocksize;
nullb->q->limits.discard_alignment = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 9e4bcdad1a80..cc47606d8ffe 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -23,6 +23,10 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
pr_err("zone_size must be power-of-two\n");
return -EINVAL;
}
+ if (dev->zone_size > dev->size) {
+ pr_err("Zone size larger than device capacity\n");
+ return -EINVAL;
+ }
dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
dev->nr_zones = dev_size >>
@@ -70,13 +74,20 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
int null_register_zoned_dev(struct nullb *nullb)
{
+ struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- if (queue_is_mq(q))
- return blk_revalidate_disk_zones(nullb->disk);
+ if (queue_is_mq(q)) {
+ int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
- blk_queue_chunk_sectors(q, nullb->dev->zone_size_sects);
- q->nr_zones = blkdev_nr_zones(nullb->disk);
+ if (ret)
+ return ret;
+ } else {
+ blk_queue_chunk_sectors(q, dev->zone_size_sects);
+ q->nr_zones = blkdev_nr_zones(nullb->disk);
+ }
+
+ blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
return 0;
}
@@ -138,7 +149,7 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
}
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
- unsigned int nr_sectors)
+ unsigned int nr_sectors, bool append)
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
@@ -158,9 +169,21 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
case BLK_ZONE_COND_CLOSED:
- /* Writes must be at the write pointer position */
- if (sector != zone->wp)
+ /*
+ * Regular writes must be at the write pointer position.
+ * Zone append writes are automatically issued at the write
+ * pointer and the position returned using the request or BIO
+ * sector.
+ */
+ if (append) {
+ sector = zone->wp;
+ if (cmd->bio)
+ cmd->bio->bi_iter.bi_sector = sector;
+ else
+ cmd->rq->__sector = sector;
+ } else if (sector != zone->wp) {
return BLK_STS_IOERR;
+ }
if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
@@ -242,7 +265,9 @@ blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
{
switch (op) {
case REQ_OP_WRITE:
- return null_zone_write(cmd, sector, nr_sectors);
+ return null_zone_write(cmd, sector, nr_sectors, false);
+ case REQ_OP_ZONE_APPEND:
+ return null_zone_write(cmd, sector, nr_sectors, true);
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index cda5cf917e9a..5124eca90e83 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -1032,7 +1032,7 @@ static int __init pcd_init(void)
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (cd->present) {
- register_cdrom(&cd->info);
+ register_cdrom(cd->disk, &cd->info);
cd->disk->private_data = cd;
add_disk(cd->disk);
}
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index c5c6487a19d5..7b55811c2a81 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -454,7 +454,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
queue->queuedata = dev;
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
- blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
diff --git a/drivers/block/rnbd/Kconfig b/drivers/block/rnbd/Kconfig
new file mode 100644
index 000000000000..4b6d3d816d1f
--- /dev/null
+++ b/drivers/block/rnbd/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config BLK_DEV_RNBD
+ bool
+
+config BLK_DEV_RNBD_CLIENT
+ tristate "RDMA Network Block Device driver client"
+ depends on INFINIBAND_RTRS_CLIENT
+ select BLK_DEV_RNBD
+ help
+ RNBD client is a network block device driver using rdma transport.
+
+ RNBD client allows for mapping of a remote block devices over
+ RTRS protocol from a target system where RNBD server is running.
+
+ If unsure, say N.
+
+config BLK_DEV_RNBD_SERVER
+ tristate "RDMA Network Block Device driver server"
+ depends on INFINIBAND_RTRS_SERVER
+ select BLK_DEV_RNBD
+ help
+ RNBD server is the server side of RNBD using rdma transport.
+
+ RNBD server allows for exporting local block devices to a remote client
+ over RTRS protocol.
+
+ If unsure, say N.
diff --git a/drivers/block/rnbd/Makefile b/drivers/block/rnbd/Makefile
new file mode 100644
index 000000000000..5bb1a7ad1ada
--- /dev/null
+++ b/drivers/block/rnbd/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+ccflags-y := -I$(srctree)/drivers/infiniband/ulp/rtrs
+
+rnbd-client-y := rnbd-clt.o \
+ rnbd-clt-sysfs.o \
+ rnbd-common.o
+
+rnbd-server-y := rnbd-common.o \
+ rnbd-srv.o \
+ rnbd-srv-dev.o \
+ rnbd-srv-sysfs.o
+
+obj-$(CONFIG_BLK_DEV_RNBD_CLIENT) += rnbd-client.o
+obj-$(CONFIG_BLK_DEV_RNBD_SERVER) += rnbd-server.o
diff --git a/drivers/block/rnbd/README b/drivers/block/rnbd/README
new file mode 100644
index 000000000000..1773c0aa0bd4
--- /dev/null
+++ b/drivers/block/rnbd/README
@@ -0,0 +1,92 @@
+********************************
+RDMA Network Block Device (RNBD)
+********************************
+
+Introduction
+------------
+
+RNBD (RDMA Network Block Device) is a pair of kernel modules
+(client and server) that allow for remote access of a block device on
+the server over RTRS protocol using the RDMA (InfiniBand, RoCE, iWARP)
+transport. After being mapped, the remote block devices can be accessed
+on the client side as local block devices.
+
+I/O is transferred between client and server by the RTRS transport
+modules. The administration of RNBD and RTRS modules is done via
+sysfs entries.
+
+Requirements
+------------
+
+ RTRS kernel modules
+
+Quick Start
+-----------
+
+Server side:
+ # modprobe rnbd_server
+
+Client side:
+ # modprobe rnbd_client
+ # echo "sessname=blya path=ip:10.50.100.66 device_path=/dev/ram0" > \
+ /sys/devices/virtual/rnbd-client/ctl/map_device
+
+ Where "sessname=" is a session name, a string to identify the session
+ on client and on server sides; "path=" is a destination IP address or
+ a pair of a source and a destination IPs, separated by comma. Multiple
+ "path=" options can be specified in order to use multipath (see RTRS
+ description for details); "device_path=" is the block device to be
+ mapped from the server side. After the session to the server machine is
+ established, the mapped device will appear on the client side under
+ /dev/rnbd<N>.
+
+
+RNBD-Server Module Parameters
+=============================
+
+dev_search_path
+---------------
+
+When a device is mapped from the client, the server generates the path
+to the block device on the server side by concatenating dev_search_path
+and the "device_path" that was specified in the map_device operation.
+
+The default dev_search_path is: "/".
+
+dev_search_path option can also contain %SESSNAME% in order to provide
+different device namespaces for different sessions. See "device_path"
+option for details.
+
+============================
+Protocol (rnbd/rnbd-proto.h)
+============================
+
+1. Before mapping first device from a given server, client sends an
+RNBD_MSG_SESS_INFO to the server. Server responds with
+RNBD_MSG_SESS_INFO_RSP. Currently the messages only contain the protocol
+version for backward compatibility.
+
+2. Client requests to open a device by sending RNBD_MSG_OPEN message. This
+contains the path to the device and access mode (read-only or writable).
+Server responds to the message with RNBD_MSG_OPEN_RSP. This contains
+a 32 bit device id to be used for IOs and device "geometry" related
+information: side, max_hw_sectors, etc.
+
+3. Client attaches RNBD_MSG_IO to each IO message send to a device. This
+message contains device id, provided by server in his rnbd_msg_open_rsp,
+sector to be accessed, read-write flags and bi_size.
+
+4. Client closes a device by sending RNBD_MSG_CLOSE which contains only the
+device id provided by the server.
+
+=========================================
+Contributors List(in alphabetical order)
+=========================================
+Danil Kipnis <danil.kipnis@profitbricks.com>
+Fabian Holler <mail@fholler.de>
+Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
+Jack Wang <jinpu.wang@profitbricks.com>
+Kleber Souza <kleber.souza@profitbricks.com>
+Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
+Milind Dumbare <Milind.dumbare@gmail.com>
+Roman Penyaev <roman.penyaev@profitbricks.com>
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
new file mode 100644
index 000000000000..4f4474eecadb
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/parser.h>
+#include <linux/module.h>
+#include <linux/in6.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <rdma/ib.h>
+#include <rdma/rdma_cm.h>
+
+#include "rnbd-clt.h"
+
+static struct device *rnbd_dev;
+static struct class *rnbd_dev_class;
+static struct kobject *rnbd_devs_kobj;
+
+enum {
+ RNBD_OPT_ERR = 0,
+ RNBD_OPT_DEST_PORT = 1 << 0,
+ RNBD_OPT_PATH = 1 << 1,
+ RNBD_OPT_DEV_PATH = 1 << 2,
+ RNBD_OPT_ACCESS_MODE = 1 << 3,
+ RNBD_OPT_SESSNAME = 1 << 6,
+};
+
+static const unsigned int rnbd_opt_mandatory[] = {
+ RNBD_OPT_PATH,
+ RNBD_OPT_DEV_PATH,
+ RNBD_OPT_SESSNAME,
+};
+
+static const match_table_t rnbd_opt_tokens = {
+ {RNBD_OPT_PATH, "path=%s" },
+ {RNBD_OPT_DEV_PATH, "device_path=%s"},
+ {RNBD_OPT_DEST_PORT, "dest_port=%d" },
+ {RNBD_OPT_ACCESS_MODE, "access_mode=%s"},
+ {RNBD_OPT_SESSNAME, "sessname=%s" },
+ {RNBD_OPT_ERR, NULL },
+};
+
+struct rnbd_map_options {
+ char *sessname;
+ struct rtrs_addr *paths;
+ size_t *path_cnt;
+ char *pathname;
+ u16 *dest_port;
+ enum rnbd_access_mode *access_mode;
+};
+
+static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
+ struct rnbd_map_options *opt)
+{
+ char *options, *sep_opt;
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int opt_mask = 0;
+ int token;
+ int ret = -EINVAL;
+ int i, dest_port;
+ int p_cnt = 0;
+
+ options = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ sep_opt = strstrip(options);
+ while ((p = strsep(&sep_opt, " ")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, rnbd_opt_tokens, args);
+ opt_mask |= token;
+
+ switch (token) {
+ case RNBD_OPT_SESSNAME:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) > NAME_MAX) {
+ pr_err("map_device: sessname too long\n");
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+ strlcpy(opt->sessname, p, NAME_MAX);
+ kfree(p);
+ break;
+
+ case RNBD_OPT_PATH:
+ if (p_cnt >= max_path_cnt) {
+ pr_err("map_device: too many (> %zu) paths provided\n",
+ max_path_cnt);
+ ret = -ENOMEM;
+ goto out;
+ }
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = rtrs_addr_to_sockaddr(p, strlen(p),
+ *opt->dest_port,
+ &opt->paths[p_cnt]);
+ if (ret) {
+ pr_err("Can't parse path %s: %d\n", p, ret);
+ kfree(p);
+ goto out;
+ }
+
+ p_cnt++;
+
+ kfree(p);
+ break;
+
+ case RNBD_OPT_DEV_PATH:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) > NAME_MAX) {
+ pr_err("map_device: Device path too long\n");
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+ strlcpy(opt->pathname, p, NAME_MAX);
+ kfree(p);
+ break;
+
+ case RNBD_OPT_DEST_PORT:
+ if (match_int(args, &dest_port) || dest_port < 0 ||
+ dest_port > 65535) {
+ pr_err("bad destination port number parameter '%d'\n",
+ dest_port);
+ ret = -EINVAL;
+ goto out;
+ }
+ *opt->dest_port = dest_port;
+ break;
+
+ case RNBD_OPT_ACCESS_MODE:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!strcmp(p, "ro")) {
+ *opt->access_mode = RNBD_ACCESS_RO;
+ } else if (!strcmp(p, "rw")) {
+ *opt->access_mode = RNBD_ACCESS_RW;
+ } else if (!strcmp(p, "migration")) {
+ *opt->access_mode = RNBD_ACCESS_MIGRATION;
+ } else {
+ pr_err("map_device: Invalid access_mode: '%s'\n",
+ p);
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+
+ kfree(p);
+ break;
+
+ default:
+ pr_err("map_device: Unknown parameter or missing value '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rnbd_opt_mandatory); i++) {
+ if ((opt_mask & rnbd_opt_mandatory[i])) {
+ ret = 0;
+ } else {
+ pr_err("map_device: Parameters missing\n");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ *opt->path_cnt = p_cnt;
+ kfree(options);
+ return ret;
+}
+
+static ssize_t state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ switch (dev->dev_state) {
+ case DEV_STATE_INIT:
+ return snprintf(page, PAGE_SIZE, "init\n");
+ case DEV_STATE_MAPPED:
+ /* TODO fix cli tool before changing to proper state */
+ return snprintf(page, PAGE_SIZE, "open\n");
+ case DEV_STATE_MAPPED_DISCONNECTED:
+ /* TODO fix cli tool before changing to proper state */
+ return snprintf(page, PAGE_SIZE, "closed\n");
+ case DEV_STATE_UNMAPPED:
+ return snprintf(page, PAGE_SIZE, "unmapped\n");
+ default:
+ return snprintf(page, PAGE_SIZE, "unknown\n");
+ }
+}
+
+static struct kobj_attribute rnbd_clt_state_attr = __ATTR_RO(state);
+
+static ssize_t mapping_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", dev->pathname);
+}
+
+static struct kobj_attribute rnbd_clt_mapping_path_attr =
+ __ATTR_RO(mapping_path);
+
+static ssize_t access_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ rnbd_access_mode_str(dev->access_mode));
+}
+
+static struct kobj_attribute rnbd_clt_access_mode =
+ __ATTR_RO(access_mode);
+
+static ssize_t rnbd_clt_unmap_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo <normal|force> > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_clt_unmap_dev_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_clt_dev *dev;
+ char *opt, *options;
+ bool force;
+ int err;
+
+ opt = kstrdup(buf, GFP_KERNEL);
+ if (!opt)
+ return -ENOMEM;
+
+ options = strstrip(opt);
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+ if (sysfs_streq(options, "normal")) {
+ force = false;
+ } else if (sysfs_streq(options, "force")) {
+ force = true;
+ } else {
+ rnbd_clt_err(dev,
+ "unmap_device: Invalid value: %s\n",
+ options);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rnbd_clt_info(dev, "Unmapping device, option: %s.\n",
+ force ? "force" : "normal");
+
+ /*
+ * We take explicit module reference only for one reason: do not
+ * race with lockless rnbd_destroy_sessions().
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ err = -ENODEV;
+ goto out;
+ }
+ err = rnbd_clt_unmap_device(dev, force, &attr->attr);
+ if (err) {
+ if (err != -EALREADY)
+ rnbd_clt_err(dev, "unmap_device: %d\n", err);
+ goto module_put;
+ }
+
+ /*
+ * Here device can be vanished!
+ */
+
+ err = count;
+
+module_put:
+ module_put(THIS_MODULE);
+out:
+ kfree(opt);
+
+ return err;
+}
+
+static struct kobj_attribute rnbd_clt_unmap_device_attr =
+ __ATTR(unmap_device, 0644, rnbd_clt_unmap_dev_show,
+ rnbd_clt_unmap_dev_store);
+
+static ssize_t rnbd_clt_resize_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo <new size in sectors> > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long sectors;
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ ret = kstrtoul(buf, 0, &sectors);
+ if (ret)
+ return ret;
+
+ ret = rnbd_clt_resize_disk(dev, (size_t)sectors);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rnbd_clt_resize_dev_attr =
+ __ATTR(resize, 0644, rnbd_clt_resize_dev_show,
+ rnbd_clt_resize_dev_store);
+
+static ssize_t rnbd_clt_remap_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo <1> > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_clt_remap_dev_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_clt_dev *dev;
+ char *opt, *options;
+ int err;
+
+ opt = kstrdup(buf, GFP_KERNEL);
+ if (!opt)
+ return -ENOMEM;
+
+ options = strstrip(opt);
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+ if (!sysfs_streq(options, "1")) {
+ rnbd_clt_err(dev,
+ "remap_device: Invalid value: %s\n",
+ options);
+ err = -EINVAL;
+ goto out;
+ }
+ err = rnbd_clt_remap_device(dev);
+ if (likely(!err))
+ err = count;
+
+out:
+ kfree(opt);
+
+ return err;
+}
+
+static struct kobj_attribute rnbd_clt_remap_device_attr =
+ __ATTR(remap_device, 0644, rnbd_clt_remap_dev_show,
+ rnbd_clt_remap_dev_store);
+
+static ssize_t session_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", dev->sess->sessname);
+}
+
+static struct kobj_attribute rnbd_clt_session_attr =
+ __ATTR_RO(session);
+
+static struct attribute *rnbd_dev_attrs[] = {
+ &rnbd_clt_unmap_device_attr.attr,
+ &rnbd_clt_resize_dev_attr.attr,
+ &rnbd_clt_remap_device_attr.attr,
+ &rnbd_clt_mapping_path_attr.attr,
+ &rnbd_clt_state_attr.attr,
+ &rnbd_clt_session_attr.attr,
+ &rnbd_clt_access_mode.attr,
+ NULL,
+};
+
+void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
+{
+ /*
+ * The module unload rnbd_client_exit path is racing with unmapping of
+ * the last single device from the sysfs manually
+ * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
+ * of sysfs link already was removed already.
+ */
+ if (strlen(dev->blk_symlink_name) && try_module_get(THIS_MODULE)) {
+ sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
+ module_put(THIS_MODULE);
+ }
+}
+
+static struct kobj_type rnbd_dev_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = rnbd_dev_attrs,
+};
+
+static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
+{
+ int ret;
+ struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
+
+ ret = kobject_init_and_add(&dev->kobj, &rnbd_dev_ktype, gd_kobj, "%s",
+ "rnbd");
+ if (ret)
+ rnbd_clt_err(dev, "Failed to create device sysfs dir, err: %d\n",
+ ret);
+
+ return ret;
+}
+
+static ssize_t rnbd_clt_map_device_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
+}
+
+static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
+ size_t len)
+{
+ int ret;
+ char pathname[NAME_MAX], *s;
+
+ strlcpy(pathname, dev->pathname, sizeof(pathname));
+ while ((s = strchr(pathname, '/')))
+ s[0] = '!';
+
+ ret = snprintf(buf, len, "%s", pathname);
+ if (ret >= len)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+
+static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
+{
+ struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
+ int ret;
+
+ ret = rnbd_clt_get_path_name(dev, dev->blk_symlink_name,
+ sizeof(dev->blk_symlink_name));
+ if (ret) {
+ rnbd_clt_err(dev, "Failed to get /sys/block symlink path, err: %d\n",
+ ret);
+ goto out_err;
+ }
+
+ ret = sysfs_create_link(rnbd_devs_kobj, gd_kobj,
+ dev->blk_symlink_name);
+ if (ret) {
+ rnbd_clt_err(dev, "Creating /sys/block symlink failed, err: %d\n",
+ ret);
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ dev->blk_symlink_name[0] = '\0';
+ return ret;
+}
+
+static ssize_t rnbd_clt_map_device_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_clt_dev *dev;
+ struct rnbd_map_options opt;
+ int ret;
+ char pathname[NAME_MAX];
+ char sessname[NAME_MAX];
+ enum rnbd_access_mode access_mode = RNBD_ACCESS_RW;
+ u16 port_nr = RTRS_PORT;
+
+ struct sockaddr_storage *addrs;
+ struct rtrs_addr paths[6];
+ size_t path_cnt;
+
+ opt.sessname = sessname;
+ opt.paths = paths;
+ opt.path_cnt = &path_cnt;
+ opt.pathname = pathname;
+ opt.dest_port = &port_nr;
+ opt.access_mode = &access_mode;
+ addrs = kcalloc(ARRAY_SIZE(paths) * 2, sizeof(*addrs), GFP_KERNEL);
+ if (!addrs)
+ return -ENOMEM;
+
+ for (path_cnt = 0; path_cnt < ARRAY_SIZE(paths); path_cnt++) {
+ paths[path_cnt].src = &addrs[path_cnt * 2];
+ paths[path_cnt].dst = &addrs[path_cnt * 2 + 1];
+ }
+
+ ret = rnbd_clt_parse_map_options(buf, ARRAY_SIZE(paths), &opt);
+ if (ret)
+ goto out;
+
+ pr_info("Mapping device %s on session %s, (access_mode: %s)\n",
+ pathname, sessname,
+ rnbd_access_mode_str(access_mode));
+
+ dev = rnbd_clt_map_device(sessname, paths, path_cnt, port_nr, pathname,
+ access_mode);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto out;
+ }
+
+ ret = rnbd_clt_add_dev_kobj(dev);
+ if (ret)
+ goto unmap_dev;
+
+ ret = rnbd_clt_add_dev_symlink(dev);
+ if (ret)
+ goto unmap_dev;
+
+ kfree(addrs);
+ return count;
+
+unmap_dev:
+ rnbd_clt_unmap_device(dev, true, NULL);
+out:
+ kfree(addrs);
+ return ret;
+}
+
+static struct kobj_attribute rnbd_clt_map_device_attr =
+ __ATTR(map_device, 0644,
+ rnbd_clt_map_device_show, rnbd_clt_map_device_store);
+
+static struct attribute *default_attrs[] = {
+ &rnbd_clt_map_device_attr.attr,
+ NULL,
+};
+
+static struct attribute_group default_attr_group = {
+ .attrs = default_attrs,
+};
+
+static const struct attribute_group *default_attr_groups[] = {
+ &default_attr_group,
+ NULL,
+};
+
+int rnbd_clt_create_sysfs_files(void)
+{
+ int err;
+
+ rnbd_dev_class = class_create(THIS_MODULE, "rnbd-client");
+ if (IS_ERR(rnbd_dev_class))
+ return PTR_ERR(rnbd_dev_class);
+
+ rnbd_dev = device_create_with_groups(rnbd_dev_class, NULL,
+ MKDEV(0, 0), NULL,
+ default_attr_groups, "ctl");
+ if (IS_ERR(rnbd_dev)) {
+ err = PTR_ERR(rnbd_dev);
+ goto cls_destroy;
+ }
+ rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
+ if (!rnbd_devs_kobj) {
+ err = -ENOMEM;
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+cls_destroy:
+ class_destroy(rnbd_dev_class);
+
+ return err;
+}
+
+void rnbd_clt_destroy_default_group(void)
+{
+ sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group);
+}
+
+void rnbd_clt_destroy_sysfs_files(void)
+{
+ kobject_del(rnbd_devs_kobj);
+ kobject_put(rnbd_devs_kobj);
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+ class_destroy(rnbd_dev_class);
+}
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
new file mode 100644
index 000000000000..cc6a4e2587ae
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -0,0 +1,1729 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/scatterlist.h>
+#include <linux/idr.h>
+
+#include "rnbd-clt.h"
+
+MODULE_DESCRIPTION("RDMA Network Block Device Client");
+MODULE_LICENSE("GPL");
+
+static int rnbd_client_major;
+static DEFINE_IDA(index_ida);
+static DEFINE_MUTEX(ida_lock);
+static DEFINE_MUTEX(sess_lock);
+static LIST_HEAD(sess_list);
+
+/*
+ * Maximum number of partitions an instance can have.
+ * 6 bits = 64 minors = 63 partitions (one minor is used for the device itself)
+ */
+#define RNBD_PART_BITS 6
+
+static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess)
+{
+ return refcount_inc_not_zero(&sess->refcount);
+}
+
+static void free_sess(struct rnbd_clt_session *sess);
+
+static void rnbd_clt_put_sess(struct rnbd_clt_session *sess)
+{
+ might_sleep();
+
+ if (refcount_dec_and_test(&sess->refcount))
+ free_sess(sess);
+}
+
+static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
+{
+ might_sleep();
+
+ if (!refcount_dec_and_test(&dev->refcount))
+ return;
+
+ mutex_lock(&ida_lock);
+ ida_simple_remove(&index_ida, dev->clt_device_id);
+ mutex_unlock(&ida_lock);
+ kfree(dev->hw_queues);
+ rnbd_clt_put_sess(dev->sess);
+ mutex_destroy(&dev->lock);
+ kfree(dev);
+}
+
+static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
+{
+ return refcount_inc_not_zero(&dev->refcount);
+}
+
+static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
+ const struct rnbd_msg_open_rsp *rsp)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+
+ if (!rsp->logical_block_size)
+ return -EINVAL;
+
+ dev->device_id = le32_to_cpu(rsp->device_id);
+ dev->nsectors = le64_to_cpu(rsp->nsectors);
+ dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
+ dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
+ dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors);
+ dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
+ dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
+ dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
+ dev->secure_discard = le16_to_cpu(rsp->secure_discard);
+ dev->rotational = rsp->rotational;
+
+ dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
+ dev->max_segments = BMAX_SEGMENTS;
+
+ dev->max_hw_sectors = min_t(u32, dev->max_hw_sectors,
+ le32_to_cpu(rsp->max_hw_sectors));
+ dev->max_segments = min_t(u16, dev->max_segments,
+ le16_to_cpu(rsp->max_segments));
+
+ return 0;
+}
+
+static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
+ size_t new_nsectors)
+{
+ int err = 0;
+
+ rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
+ dev->nsectors, new_nsectors);
+ dev->nsectors = new_nsectors;
+ set_capacity(dev->gd, dev->nsectors);
+ err = revalidate_disk(dev->gd);
+ if (err)
+ rnbd_clt_err(dev,
+ "Failed to change device size from %zu to %zu, err: %d\n",
+ dev->nsectors, new_nsectors, err);
+ return err;
+}
+
+static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp)
+{
+ int err = 0;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_UNMAPPED) {
+ rnbd_clt_info(dev,
+ "Ignoring Open-Response message from server for unmapped device\n");
+ err = -ENOENT;
+ goto out;
+ }
+ if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
+ u64 nsectors = le64_to_cpu(rsp->nsectors);
+
+ /*
+ * If the device was remapped and the size changed in the
+ * meantime we need to revalidate it
+ */
+ if (dev->nsectors != nsectors)
+ rnbd_clt_change_capacity(dev, nsectors);
+ rnbd_clt_info(dev, "Device online, device remapped successfully\n");
+ }
+ err = rnbd_clt_set_dev_attr(dev, rsp);
+ if (err)
+ goto out;
+ dev->dev_state = DEV_STATE_MAPPED;
+
+out:
+ mutex_unlock(&dev->lock);
+
+ return err;
+}
+
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
+{
+ int ret = 0;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state != DEV_STATE_MAPPED) {
+ pr_err("Failed to set new size of the device, device is not opened\n");
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = rnbd_clt_change_capacity(dev, newsize);
+
+out:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
+{
+ if (WARN_ON(!q->hctx))
+ return;
+
+ /* We can come here from interrupt, thus async=true */
+ blk_mq_run_hw_queue(q->hctx, true);
+}
+
+enum {
+ RNBD_DELAY_IFBUSY = -1,
+};
+
+/**
+ * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun
+ * @sess: Session to find a queue for
+ * @cpu: Cpu to start the search from
+ *
+ * Description:
+ * Each CPU has a list of HW queues, which needs to be rerun. If a list
+ * is not empty - it is marked with a bit. This function finds first
+ * set bit in a bitmap and returns corresponding CPU list.
+ */
+static struct rnbd_cpu_qlist *
+rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
+{
+ int bit;
+
+ /* Search from cpu to nr_cpu_ids */
+ bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu);
+ if (bit < nr_cpu_ids) {
+ return per_cpu_ptr(sess->cpu_queues, bit);
+ } else if (cpu != 0) {
+ /* Search from 0 to cpu */
+ bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
+ if (bit < cpu)
+ return per_cpu_ptr(sess->cpu_queues, bit);
+ }
+
+ return NULL;
+}
+
+static inline int nxt_cpu(int cpu)
+{
+ return (cpu + 1) % nr_cpu_ids;
+}
+
+/**
+ * rnbd_rerun_if_needed() - rerun next queue marked as stopped
+ * @sess: Session to rerun a queue on
+ *
+ * Description:
+ * Each CPU has it's own list of HW queues, which should be rerun.
+ * Function finds such list with HW queues, takes a list lock, picks up
+ * the first HW queue out of the list and requeues it.
+ *
+ * Return:
+ * True if the queue was requeued, false otherwise.
+ *
+ * Context:
+ * Does not matter.
+ */
+static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess)
+{
+ struct rnbd_queue *q = NULL;
+ struct rnbd_cpu_qlist *cpu_q;
+ unsigned long flags;
+ int *cpup;
+
+ /*
+ * To keep fairness and not to let other queues starve we always
+ * try to wake up someone else in round-robin manner. That of course
+ * increases latency but queues always have a chance to be executed.
+ */
+ cpup = get_cpu_ptr(sess->cpu_rr);
+ for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q;
+ cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
+ if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags))
+ continue;
+ if (unlikely(!test_bit(cpu_q->cpu, sess->cpu_queues_bm)))
+ goto unlock;
+ q = list_first_entry_or_null(&cpu_q->requeue_list,
+ typeof(*q), requeue_list);
+ if (WARN_ON(!q))
+ goto clear_bit;
+ list_del_init(&q->requeue_list);
+ clear_bit_unlock(0, &q->in_list);
+
+ if (list_empty(&cpu_q->requeue_list)) {
+ /* Clear bit if nothing is left */
+clear_bit:
+ clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ }
+unlock:
+ spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
+
+ if (q)
+ break;
+ }
+
+ /**
+ * Saves the CPU that is going to be requeued on the per-cpu var. Just
+ * incrementing it doesn't work because rnbd_get_cpu_qlist() will
+ * always return the first CPU with something on the queue list when the
+ * value stored on the var is greater than the last CPU with something
+ * on the list.
+ */
+ if (cpu_q)
+ *cpup = cpu_q->cpu;
+ put_cpu_var(sess->cpu_rr);
+
+ if (q)
+ rnbd_clt_dev_requeue(q);
+
+ return q;
+}
+
+/**
+ * rnbd_rerun_all_if_idle() - rerun all queues left in the list if
+ * session is idling (there are no requests
+ * in-flight).
+ * @sess: Session to rerun the queues on
+ *
+ * Description:
+ * This function tries to rerun all stopped queues if there are no
+ * requests in-flight anymore. This function tries to solve an obvious
+ * problem, when number of tags < than number of queues (hctx), which
+ * are stopped and put to sleep. If last permit, which has been just put,
+ * does not wake up all left queues (hctxs), IO requests hang forever.
+ *
+ * That can happen when all number of permits, say N, have been exhausted
+ * from one CPU, and we have many block devices per session, say M.
+ * Each block device has it's own queue (hctx) for each CPU, so eventually
+ * we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids.
+ * If number of permits N < M x nr_cpu_ids finally we will get an IO hang.
+ *
+ * To avoid this hang last caller of rnbd_put_permit() (last caller is the
+ * one who observes sess->busy == 0) must wake up all remaining queues.
+ *
+ * Context:
+ * Does not matter.
+ */
+static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess)
+{
+ bool requeued;
+
+ do {
+ requeued = rnbd_rerun_if_needed(sess);
+ } while (atomic_read(&sess->busy) == 0 && requeued);
+}
+
+static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait)
+{
+ struct rtrs_permit *permit;
+
+ permit = rtrs_clt_get_permit(sess->rtrs, con_type,
+ wait ? RTRS_PERMIT_WAIT :
+ RTRS_PERMIT_NOWAIT);
+ if (likely(permit))
+ /* We have a subtle rare case here, when all permits can be
+ * consumed before busy counter increased. This is safe,
+ * because loser will get NULL as a permit, observe 0 busy
+ * counter and immediately restart the queue himself.
+ */
+ atomic_inc(&sess->busy);
+
+ return permit;
+}
+
+static void rnbd_put_permit(struct rnbd_clt_session *sess,
+ struct rtrs_permit *permit)
+{
+ rtrs_clt_put_permit(sess->rtrs, permit);
+ atomic_dec(&sess->busy);
+ /* Paired with rnbd_clt_dev_add_to_requeue(). Decrement first
+ * and then check queue bits.
+ */
+ smp_mb__after_atomic();
+ rnbd_rerun_all_if_idle(sess);
+}
+
+static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait)
+{
+ struct rnbd_iu *iu;
+ struct rtrs_permit *permit;
+
+ permit = rnbd_get_permit(sess, con_type,
+ wait ? RTRS_PERMIT_WAIT :
+ RTRS_PERMIT_NOWAIT);
+ if (unlikely(!permit))
+ return NULL;
+ iu = rtrs_permit_to_pdu(permit);
+ iu->permit = permit;
+ /*
+ * 1st reference is dropped after finishing sending a "user" message,
+ * 2nd reference is dropped after confirmation with the response is
+ * returned.
+ * 1st and 2nd can happen in any order, so the rnbd_iu should be
+ * released (rtrs_permit returned to ibbtrs) only leased after both
+ * are finished.
+ */
+ atomic_set(&iu->refcount, 2);
+ init_waitqueue_head(&iu->comp.wait);
+ iu->comp.errno = INT_MAX;
+
+ return iu;
+}
+
+static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
+{
+ if (atomic_dec_and_test(&iu->refcount))
+ rnbd_put_permit(sess, iu->permit);
+}
+
+static void rnbd_softirq_done_fn(struct request *rq)
+{
+ struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_iu *iu;
+
+ iu = blk_mq_rq_to_pdu(rq);
+ rnbd_put_permit(sess, iu->permit);
+ blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
+}
+
+static void msg_io_conf(void *priv, int errno)
+{
+ struct rnbd_iu *iu = priv;
+ struct rnbd_clt_dev *dev = iu->dev;
+ struct request *rq = iu->rq;
+ int rw = rq_data_dir(rq);
+
+ iu->errno = errno;
+
+ blk_mq_complete_request(rq);
+
+ if (errno)
+ rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n",
+ rw == READ ? "read" : "write", errno);
+}
+
+static void wake_up_iu_comp(struct rnbd_iu *iu, int errno)
+{
+ iu->comp.errno = errno;
+ wake_up(&iu->comp.wait);
+}
+
+static void msg_conf(void *priv, int errno)
+{
+ struct rnbd_iu *iu = priv;
+
+ iu->errno = errno;
+ schedule_work(&iu->work);
+}
+
+enum wait_type {
+ NO_WAIT = 0,
+ WAIT = 1
+};
+
+static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
+ struct rnbd_iu *iu, struct kvec *vec, size_t nr,
+ size_t len, struct scatterlist *sg, unsigned int sg_len,
+ void (*conf)(struct work_struct *work),
+ int *errno, enum wait_type wait)
+{
+ int err;
+ struct rtrs_clt_req_ops req_ops;
+
+ INIT_WORK(&iu->work, conf);
+ req_ops = (struct rtrs_clt_req_ops) {
+ .priv = iu,
+ .conf_fn = msg_conf,
+ };
+ err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit,
+ vec, nr, len, sg, sg_len);
+ if (!err && wait) {
+ wait_event(iu->comp.wait, iu->comp.errno != INT_MAX);
+ *errno = iu->comp.errno;
+ } else {
+ *errno = 0;
+ }
+
+ return err;
+}
+
+static void msg_close_conf(struct work_struct *work)
+{
+ struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
+ struct rnbd_clt_dev *dev = iu->dev;
+
+ wake_up_iu_comp(iu, iu->errno);
+ rnbd_put_iu(dev->sess, iu);
+ rnbd_clt_put_dev(dev);
+}
+
+static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_msg_close msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ int err, errno;
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu)
+ return -ENOMEM;
+
+ iu->buf = NULL;
+ iu->dev = dev;
+
+ sg_mark_end(&iu->sglist[0]);
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
+ msg.device_id = cpu_to_le32(device_id);
+
+ WARN_ON(!rnbd_clt_get_dev(dev));
+ err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 1, 0, NULL, 0,
+ msg_close_conf, &errno, wait);
+ if (err) {
+ rnbd_clt_put_dev(dev);
+ rnbd_put_iu(sess, iu);
+ } else {
+ err = errno;
+ }
+
+ rnbd_put_iu(sess, iu);
+ return err;
+}
+
+static void msg_open_conf(struct work_struct *work)
+{
+ struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
+ struct rnbd_msg_open_rsp *rsp = iu->buf;
+ struct rnbd_clt_dev *dev = iu->dev;
+ int errno = iu->errno;
+
+ if (errno) {
+ rnbd_clt_err(dev,
+ "Opening failed, server responded: %d\n",
+ errno);
+ } else {
+ errno = process_msg_open_rsp(dev, rsp);
+ if (errno) {
+ u32 device_id = le32_to_cpu(rsp->device_id);
+ /*
+ * If server thinks its fine, but we fail to process
+ * then be nice and send a close to server.
+ */
+ (void)send_msg_close(dev, device_id, NO_WAIT);
+ }
+ }
+ kfree(rsp);
+ wake_up_iu_comp(iu, errno);
+ rnbd_put_iu(dev->sess, iu);
+ rnbd_clt_put_dev(dev);
+}
+
+static void msg_sess_info_conf(struct work_struct *work)
+{
+ struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
+ struct rnbd_msg_sess_info_rsp *rsp = iu->buf;
+ struct rnbd_clt_session *sess = iu->sess;
+
+ if (!iu->errno)
+ sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR);
+
+ kfree(rsp);
+ wake_up_iu_comp(iu, iu->errno);
+ rnbd_put_iu(sess, iu);
+ rnbd_clt_put_sess(sess);
+}
+
+static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_msg_open_rsp *rsp;
+ struct rnbd_msg_open msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ int err, errno;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu) {
+ kfree(rsp);
+ return -ENOMEM;
+ }
+
+ iu->buf = rsp;
+ iu->dev = dev;
+
+ sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
+ msg.access_mode = dev->access_mode;
+ strlcpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
+
+ WARN_ON(!rnbd_clt_get_dev(dev));
+ err = send_usr_msg(sess->rtrs, READ, iu,
+ &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ msg_open_conf, &errno, wait);
+ if (err) {
+ rnbd_clt_put_dev(dev);
+ rnbd_put_iu(sess, iu);
+ kfree(rsp);
+ } else {
+ err = errno;
+ }
+
+ rnbd_put_iu(sess, iu);
+ return err;
+}
+
+static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
+{
+ struct rnbd_msg_sess_info_rsp *rsp;
+ struct rnbd_msg_sess_info msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ int err, errno;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu) {
+ kfree(rsp);
+ return -ENOMEM;
+ }
+
+ iu->buf = rsp;
+ iu->sess = sess;
+
+ sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
+ msg.ver = RNBD_PROTO_VER_MAJOR;
+
+ if (!rnbd_clt_get_sess(sess)) {
+ /*
+ * That can happen only in one case, when RTRS has restablished
+ * the connection and link_ev() is called, but session is almost
+ * dead, last reference on session is put and caller is waiting
+ * for RTRS to close everything.
+ */
+ err = -ENODEV;
+ goto put_iu;
+ }
+ err = send_usr_msg(sess->rtrs, READ, iu,
+ &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ msg_sess_info_conf, &errno, wait);
+ if (err) {
+ rnbd_clt_put_sess(sess);
+put_iu:
+ rnbd_put_iu(sess, iu);
+ kfree(rsp);
+ } else {
+ err = errno;
+ }
+
+ rnbd_put_iu(sess, iu);
+ return err;
+}
+
+static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess)
+{
+ struct rnbd_clt_dev *dev;
+
+ mutex_lock(&sess->lock);
+ list_for_each_entry(dev, &sess->devs_list, list) {
+ rnbd_clt_err(dev, "Device disconnected.\n");
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_MAPPED)
+ dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED;
+ mutex_unlock(&dev->lock);
+ }
+ mutex_unlock(&sess->lock);
+}
+
+static void remap_devs(struct rnbd_clt_session *sess)
+{
+ struct rnbd_clt_dev *dev;
+ struct rtrs_attrs attrs;
+ int err;
+
+ /*
+ * Careful here: we are called from RTRS link event directly,
+ * thus we can't send any RTRS request and wait for response
+ * or RTRS will not be able to complete request with failure
+ * if something goes wrong (failing of outstanding requests
+ * happens exactly from the context where we are blocking now).
+ *
+ * So to avoid deadlocks each usr message sent from here must
+ * be asynchronous.
+ */
+
+ err = send_msg_sess_info(sess, NO_WAIT);
+ if (err) {
+ pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err);
+ return;
+ }
+
+ rtrs_clt_query(sess->rtrs, &attrs);
+ mutex_lock(&sess->lock);
+ sess->max_io_size = attrs.max_io_size;
+
+ list_for_each_entry(dev, &sess->devs_list, list) {
+ bool skip;
+
+ mutex_lock(&dev->lock);
+ skip = (dev->dev_state == DEV_STATE_INIT);
+ mutex_unlock(&dev->lock);
+ if (skip)
+ /*
+ * When device is establishing connection for the first
+ * time - do not remap, it will be closed soon.
+ */
+ continue;
+
+ rnbd_clt_info(dev, "session reconnected, remapping device\n");
+ err = send_msg_open(dev, NO_WAIT);
+ if (err) {
+ rnbd_clt_err(dev, "send_msg_open(): %d\n", err);
+ break;
+ }
+ }
+ mutex_unlock(&sess->lock);
+}
+
+static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev)
+{
+ struct rnbd_clt_session *sess = priv;
+
+ switch (ev) {
+ case RTRS_CLT_LINK_EV_DISCONNECTED:
+ set_dev_states_to_disconnected(sess);
+ break;
+ case RTRS_CLT_LINK_EV_RECONNECTED:
+ remap_devs(sess);
+ break;
+ default:
+ pr_err("Unknown session event received (%d), session: %s\n",
+ ev, sess->sessname);
+ }
+}
+
+static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues)
+{
+ unsigned int cpu;
+ struct rnbd_cpu_qlist *cpu_q;
+
+ for_each_possible_cpu(cpu) {
+ cpu_q = per_cpu_ptr(cpu_queues, cpu);
+
+ cpu_q->cpu = cpu;
+ INIT_LIST_HEAD(&cpu_q->requeue_list);
+ spin_lock_init(&cpu_q->requeue_lock);
+ }
+}
+
+static void destroy_mq_tags(struct rnbd_clt_session *sess)
+{
+ if (sess->tag_set.tags)
+ blk_mq_free_tag_set(&sess->tag_set);
+}
+
+static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess)
+{
+ sess->rtrs_ready = true;
+ wake_up_all(&sess->rtrs_waitq);
+}
+
+static void close_rtrs(struct rnbd_clt_session *sess)
+{
+ might_sleep();
+
+ if (!IS_ERR_OR_NULL(sess->rtrs)) {
+ rtrs_clt_close(sess->rtrs);
+ sess->rtrs = NULL;
+ wake_up_rtrs_waiters(sess);
+ }
+}
+
+static void free_sess(struct rnbd_clt_session *sess)
+{
+ WARN_ON(!list_empty(&sess->devs_list));
+
+ might_sleep();
+
+ close_rtrs(sess);
+ destroy_mq_tags(sess);
+ if (!list_empty(&sess->list)) {
+ mutex_lock(&sess_lock);
+ list_del(&sess->list);
+ mutex_unlock(&sess_lock);
+ }
+ free_percpu(sess->cpu_queues);
+ free_percpu(sess->cpu_rr);
+ mutex_destroy(&sess->lock);
+ kfree(sess);
+}
+
+static struct rnbd_clt_session *alloc_sess(const char *sessname)
+{
+ struct rnbd_clt_session *sess;
+ int err, cpu;
+
+ sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE);
+ if (!sess)
+ return ERR_PTR(-ENOMEM);
+ strlcpy(sess->sessname, sessname, sizeof(sess->sessname));
+ atomic_set(&sess->busy, 0);
+ mutex_init(&sess->lock);
+ INIT_LIST_HEAD(&sess->devs_list);
+ INIT_LIST_HEAD(&sess->list);
+ bitmap_zero(sess->cpu_queues_bm, NR_CPUS);
+ init_waitqueue_head(&sess->rtrs_waitq);
+ refcount_set(&sess->refcount, 1);
+
+ sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist);
+ if (!sess->cpu_queues) {
+ err = -ENOMEM;
+ goto err;
+ }
+ rnbd_init_cpu_qlists(sess->cpu_queues);
+
+ /*
+ * That is simple percpu variable which stores cpu indeces, which are
+ * incremented on each access. We need that for the sake of fairness
+ * to wake up queues in a round-robin manner.
+ */
+ sess->cpu_rr = alloc_percpu(int);
+ if (!sess->cpu_rr) {
+ err = -ENOMEM;
+ goto err;
+ }
+ for_each_possible_cpu(cpu)
+ * per_cpu_ptr(sess->cpu_rr, cpu) = cpu;
+
+ return sess;
+
+err:
+ free_sess(sess);
+
+ return ERR_PTR(err);
+}
+
+static int wait_for_rtrs_connection(struct rnbd_clt_session *sess)
+{
+ wait_event(sess->rtrs_waitq, sess->rtrs_ready);
+ if (IS_ERR_OR_NULL(sess->rtrs))
+ return -ECONNRESET;
+
+ return 0;
+}
+
+static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess)
+ __releases(&sess_lock)
+ __acquires(&sess_lock)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE);
+ if (IS_ERR_OR_NULL(sess->rtrs)) {
+ finish_wait(&sess->rtrs_waitq, &wait);
+ return;
+ }
+ mutex_unlock(&sess_lock);
+ /* loop in caller, see __find_and_get_sess().
+ * You can't leave mutex locked and call schedule(), you will catch a
+ * deadlock with a caller of free_sess(), which has just put the last
+ * reference and is about to take the sess_lock in order to delete
+ * the session from the list.
+ */
+ schedule();
+ mutex_lock(&sess_lock);
+}
+
+static struct rnbd_clt_session *__find_and_get_sess(const char *sessname)
+ __releases(&sess_lock)
+ __acquires(&sess_lock)
+{
+ struct rnbd_clt_session *sess, *sn;
+ int err;
+
+again:
+ list_for_each_entry_safe(sess, sn, &sess_list, list) {
+ if (strcmp(sessname, sess->sessname))
+ continue;
+
+ if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs))
+ /*
+ * No RTRS connection, session is dying.
+ */
+ continue;
+
+ if (rnbd_clt_get_sess(sess)) {
+ /*
+ * Alive session is found, wait for RTRS connection.
+ */
+ mutex_unlock(&sess_lock);
+ err = wait_for_rtrs_connection(sess);
+ if (err)
+ rnbd_clt_put_sess(sess);
+ mutex_lock(&sess_lock);
+
+ if (err)
+ /* Session is dying, repeat the loop */
+ goto again;
+
+ return sess;
+ }
+ /*
+ * Ref is 0, session is dying, wait for RTRS disconnect
+ * in order to avoid session names clashes.
+ */
+ wait_for_rtrs_disconnection(sess);
+ /*
+ * RTRS is disconnected and soon session will be freed,
+ * so repeat a loop.
+ */
+ goto again;
+ }
+
+ return NULL;
+}
+
+static struct
+rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first)
+{
+ struct rnbd_clt_session *sess = NULL;
+
+ mutex_lock(&sess_lock);
+ sess = __find_and_get_sess(sessname);
+ if (!sess) {
+ sess = alloc_sess(sessname);
+ if (IS_ERR(sess)) {
+ mutex_unlock(&sess_lock);
+ return sess;
+ }
+ list_add(&sess->list, &sess_list);
+ *first = true;
+ } else
+ *first = false;
+ mutex_unlock(&sess_lock);
+
+ return sess;
+}
+
+static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
+{
+ struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+
+ if (dev->read_only && (mode & FMODE_WRITE))
+ return -EPERM;
+
+ if (dev->dev_state == DEV_STATE_UNMAPPED ||
+ !rnbd_clt_get_dev(dev))
+ return -EIO;
+
+ return 0;
+}
+
+static void rnbd_client_release(struct gendisk *gen, fmode_t mode)
+{
+ struct rnbd_clt_dev *dev = gen->private_data;
+
+ rnbd_clt_put_dev(dev);
+}
+
+static int rnbd_client_getgeo(struct block_device *block_device,
+ struct hd_geometry *geo)
+{
+ u64 size;
+ struct rnbd_clt_dev *dev;
+
+ dev = block_device->bd_disk->private_data;
+ size = dev->size * (dev->logical_block_size / SECTOR_SIZE);
+ geo->cylinders = size >> 6; /* size/64 */
+ geo->heads = 4;
+ geo->sectors = 16;
+ geo->start = 0;
+
+ return 0;
+}
+
+static const struct block_device_operations rnbd_client_ops = {
+ .owner = THIS_MODULE,
+ .open = rnbd_client_open,
+ .release = rnbd_client_release,
+ .getgeo = rnbd_client_getgeo
+};
+
+/* The amount of data that belongs to an I/O and the amount of data that
+ * should be read or written to the disk (bi_size) can differ.
+ *
+ * E.g. When WRITE_SAME is used, only a small amount of data is
+ * transferred that is then written repeatedly over a lot of sectors.
+ *
+ * Get the size of data to be transferred via RTRS by summing up the size
+ * of the scather-gather list entries.
+ */
+static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len)
+{
+ struct scatterlist *sg;
+ size_t tsize = 0;
+ int i;
+
+ for_each_sg(sglist, sg, len, i)
+ tsize += sg->length;
+ return tsize;
+}
+
+static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
+ struct request *rq,
+ struct rnbd_iu *iu)
+{
+ struct rtrs_clt *rtrs = dev->sess->rtrs;
+ struct rtrs_permit *permit = iu->permit;
+ struct rnbd_msg_io msg;
+ struct rtrs_clt_req_ops req_ops;
+ unsigned int sg_cnt = 0;
+ struct kvec vec;
+ size_t size;
+ int err;
+
+ iu->rq = rq;
+ iu->dev = dev;
+ msg.sector = cpu_to_le64(blk_rq_pos(rq));
+ msg.bi_size = cpu_to_le32(blk_rq_bytes(rq));
+ msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq));
+ msg.prio = cpu_to_le16(req_get_ioprio(rq));
+
+ /*
+ * We only support discards with single segment for now.
+ * See queue limits.
+ */
+ if (req_op(rq) != REQ_OP_DISCARD)
+ sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sglist);
+
+ if (sg_cnt == 0)
+ /* Do not forget to mark the end */
+ sg_mark_end(&iu->sglist[0]);
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_IO);
+ msg.device_id = cpu_to_le32(dev->device_id);
+
+ vec = (struct kvec) {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ size = rnbd_clt_get_sg_size(iu->sglist, sg_cnt);
+ req_ops = (struct rtrs_clt_req_ops) {
+ .priv = iu,
+ .conf_fn = msg_io_conf,
+ };
+ err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
+ &vec, 1, size, iu->sglist, sg_cnt);
+ if (unlikely(err)) {
+ rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy
+ * @dev: Device to be checked
+ * @q: Queue to be added to the requeue list if required
+ *
+ * Description:
+ * If session is busy, that means someone will requeue us when resources
+ * are freed. If session is not doing anything - device is not added to
+ * the list and @false is returned.
+ */
+static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev,
+ struct rnbd_queue *q)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_cpu_qlist *cpu_q;
+ unsigned long flags;
+ bool added = true;
+ bool need_set;
+
+ cpu_q = get_cpu_ptr(sess->cpu_queues);
+ spin_lock_irqsave(&cpu_q->requeue_lock, flags);
+
+ if (likely(!test_and_set_bit_lock(0, &q->in_list))) {
+ if (WARN_ON(!list_empty(&q->requeue_list)))
+ goto unlock;
+
+ need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ if (need_set) {
+ set_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ /* Paired with rnbd_put_permit(). Set a bit first
+ * and then observe the busy counter.
+ */
+ smp_mb__before_atomic();
+ }
+ if (likely(atomic_read(&sess->busy))) {
+ list_add_tail(&q->requeue_list, &cpu_q->requeue_list);
+ } else {
+ /* Very unlikely, but possible: busy counter was
+ * observed as zero. Drop all bits and return
+ * false to restart the queue by ourselves.
+ */
+ if (need_set)
+ clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ clear_bit_unlock(0, &q->in_list);
+ added = false;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
+ put_cpu_ptr(sess->cpu_queues);
+
+ return added;
+}
+
+static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev,
+ struct blk_mq_hw_ctx *hctx,
+ int delay)
+{
+ struct rnbd_queue *q = hctx->driver_data;
+
+ if (delay != RNBD_DELAY_IFBUSY)
+ blk_mq_delay_run_hw_queue(hctx, delay);
+ else if (unlikely(!rnbd_clt_dev_add_to_requeue(dev, q)))
+ /*
+ * If session is not busy we have to restart
+ * the queue ourselves.
+ */
+ blk_mq_delay_run_hw_queue(hctx, 10/*ms*/);
+}
+
+static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
+ int err;
+
+ if (unlikely(dev->dev_state != DEV_STATE_MAPPED))
+ return BLK_STS_IOERR;
+
+ iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON,
+ RTRS_PERMIT_NOWAIT);
+ if (unlikely(!iu->permit)) {
+ rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY);
+ return BLK_STS_RESOURCE;
+ }
+
+ blk_mq_start_request(rq);
+ err = rnbd_client_xfer_request(dev, rq, iu);
+ if (likely(err == 0))
+ return BLK_STS_OK;
+ if (unlikely(err == -EAGAIN || err == -ENOMEM)) {
+ rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
+ rnbd_put_permit(dev->sess, iu->permit);
+ return BLK_STS_RESOURCE;
+ }
+
+ rnbd_put_permit(dev->sess, iu->permit);
+ return BLK_STS_IOERR;
+}
+
+static int rnbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
+
+ sg_init_table(iu->sglist, BMAX_SEGMENTS);
+ return 0;
+}
+
+static struct blk_mq_ops rnbd_mq_ops = {
+ .queue_rq = rnbd_queue_rq,
+ .init_request = rnbd_init_request,
+ .complete = rnbd_softirq_done_fn,
+};
+
+static int setup_mq_tags(struct rnbd_clt_session *sess)
+{
+ struct blk_mq_tag_set *tag_set = &sess->tag_set;
+
+ memset(tag_set, 0, sizeof(*tag_set));
+ tag_set->ops = &rnbd_mq_ops;
+ tag_set->queue_depth = sess->queue_depth;
+ tag_set->numa_node = NUMA_NO_NODE;
+ tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_TAG_SHARED;
+ tag_set->cmd_size = sizeof(struct rnbd_iu);
+ tag_set->nr_hw_queues = num_online_cpus();
+
+ return blk_mq_alloc_tag_set(tag_set);
+}
+
+static struct rnbd_clt_session *
+find_and_get_or_create_sess(const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t path_cnt, u16 port_nr)
+{
+ struct rnbd_clt_session *sess;
+ struct rtrs_attrs attrs;
+ int err;
+ bool first;
+ struct rtrs_clt_ops rtrs_ops;
+
+ sess = find_or_create_sess(sessname, &first);
+ if (sess == ERR_PTR(-ENOMEM))
+ return ERR_PTR(-ENOMEM);
+ else if (!first)
+ return sess;
+
+ rtrs_ops = (struct rtrs_clt_ops) {
+ .priv = sess,
+ .link_ev = rnbd_clt_link_ev,
+ };
+ /*
+ * Nothing was found, establish rtrs connection and proceed further.
+ */
+ sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
+ paths, path_cnt, port_nr,
+ sizeof(struct rnbd_iu),
+ RECONNECT_DELAY, BMAX_SEGMENTS,
+ BLK_MAX_SEGMENT_SIZE,
+ MAX_RECONNECTS);
+ if (IS_ERR(sess->rtrs)) {
+ err = PTR_ERR(sess->rtrs);
+ goto wake_up_and_put;
+ }
+ rtrs_clt_query(sess->rtrs, &attrs);
+ sess->max_io_size = attrs.max_io_size;
+ sess->queue_depth = attrs.queue_depth;
+
+ err = setup_mq_tags(sess);
+ if (err)
+ goto close_rtrs;
+
+ err = send_msg_sess_info(sess, WAIT);
+ if (err)
+ goto close_rtrs;
+
+ wake_up_rtrs_waiters(sess);
+
+ return sess;
+
+close_rtrs:
+ close_rtrs(sess);
+put_sess:
+ rnbd_clt_put_sess(sess);
+
+ return ERR_PTR(err);
+
+wake_up_and_put:
+ wake_up_rtrs_waiters(sess);
+ goto put_sess;
+}
+
+static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
+ struct rnbd_queue *q,
+ struct blk_mq_hw_ctx *hctx)
+{
+ INIT_LIST_HEAD(&q->requeue_list);
+ q->dev = dev;
+ q->hctx = hctx;
+}
+
+static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
+{
+ int i;
+ struct blk_mq_hw_ctx *hctx;
+ struct rnbd_queue *q;
+
+ queue_for_each_hw_ctx(dev->queue, hctx, i) {
+ q = &dev->hw_queues[i];
+ rnbd_init_hw_queue(dev, q, hctx);
+ hctx->driver_data = q;
+ }
+}
+
+static int setup_mq_dev(struct rnbd_clt_dev *dev)
+{
+ dev->queue = blk_mq_init_queue(&dev->sess->tag_set);
+ if (IS_ERR(dev->queue)) {
+ rnbd_clt_err(dev, "Initializing multiqueue queue failed, err: %ld\n",
+ PTR_ERR(dev->queue));
+ return PTR_ERR(dev->queue);
+ }
+ rnbd_init_mq_hw_queues(dev);
+ return 0;
+}
+
+static void setup_request_queue(struct rnbd_clt_dev *dev)
+{
+ blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
+ blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
+ blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
+ blk_queue_max_write_same_sectors(dev->queue,
+ dev->max_write_same_sectors);
+
+ /*
+ * we don't support discards to "discontiguous" segments
+ * in on request
+ */
+ blk_queue_max_discard_segments(dev->queue, 1);
+
+ blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
+ dev->queue->limits.discard_granularity = dev->discard_granularity;
+ dev->queue->limits.discard_alignment = dev->discard_alignment;
+ if (dev->max_discard_sectors)
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue);
+ if (dev->secure_discard)
+ blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
+
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
+ blk_queue_max_segments(dev->queue, dev->max_segments);
+ blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
+ blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
+ blk_queue_write_cache(dev->queue, true, true);
+ dev->queue->queuedata = dev;
+}
+
+static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
+{
+ dev->gd->major = rnbd_client_major;
+ dev->gd->first_minor = idx << RNBD_PART_BITS;
+ dev->gd->fops = &rnbd_client_ops;
+ dev->gd->queue = dev->queue;
+ dev->gd->private_data = dev;
+ snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
+ idx);
+ pr_debug("disk_name=%s, capacity=%zu\n",
+ dev->gd->disk_name,
+ dev->nsectors * (dev->logical_block_size / SECTOR_SIZE)
+ );
+
+ set_capacity(dev->gd, dev->nsectors);
+
+ if (dev->access_mode == RNBD_ACCESS_RO) {
+ dev->read_only = true;
+ set_disk_ro(dev->gd, true);
+ } else {
+ dev->read_only = false;
+ }
+
+ if (!dev->rotational)
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
+}
+
+static int rnbd_client_setup_device(struct rnbd_clt_session *sess,
+ struct rnbd_clt_dev *dev, int idx)
+{
+ int err;
+
+ dev->size = dev->nsectors * dev->logical_block_size;
+
+ err = setup_mq_dev(dev);
+ if (err)
+ return err;
+
+ setup_request_queue(dev);
+
+ dev->gd = alloc_disk_node(1 << RNBD_PART_BITS, NUMA_NO_NODE);
+ if (!dev->gd) {
+ blk_cleanup_queue(dev->queue);
+ return -ENOMEM;
+ }
+
+ rnbd_clt_setup_gen_disk(dev, idx);
+
+ return 0;
+}
+
+static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ enum rnbd_access_mode access_mode,
+ const char *pathname)
+{
+ struct rnbd_clt_dev *dev;
+ int ret;
+
+ dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->hw_queues = kcalloc(nr_cpu_ids, sizeof(*dev->hw_queues),
+ GFP_KERNEL);
+ if (!dev->hw_queues) {
+ ret = -ENOMEM;
+ goto out_alloc;
+ }
+
+ mutex_lock(&ida_lock);
+ ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS),
+ GFP_KERNEL);
+ mutex_unlock(&ida_lock);
+ if (ret < 0) {
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+ pathname, sess->sessname, ret);
+ goto out_queues;
+ }
+ dev->clt_device_id = ret;
+ dev->sess = sess;
+ dev->access_mode = access_mode;
+ strlcpy(dev->pathname, pathname, sizeof(dev->pathname));
+ mutex_init(&dev->lock);
+ refcount_set(&dev->refcount, 1);
+ dev->dev_state = DEV_STATE_INIT;
+
+ /*
+ * Here we called from sysfs entry, thus clt-sysfs is
+ * responsible that session will not disappear.
+ */
+ WARN_ON(!rnbd_clt_get_sess(sess));
+
+ return dev;
+
+out_queues:
+ kfree(dev->hw_queues);
+out_alloc:
+ kfree(dev);
+ return ERR_PTR(ret);
+}
+
+static bool __exists_dev(const char *pathname)
+{
+ struct rnbd_clt_session *sess;
+ struct rnbd_clt_dev *dev;
+ bool found = false;
+
+ list_for_each_entry(sess, &sess_list, list) {
+ mutex_lock(&sess->lock);
+ list_for_each_entry(dev, &sess->devs_list, list) {
+ if (!strncmp(dev->pathname, pathname,
+ sizeof(dev->pathname))) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&sess->lock);
+ if (found)
+ break;
+ }
+
+ return found;
+}
+
+static bool exists_devpath(const char *pathname)
+{
+ bool found;
+
+ mutex_lock(&sess_lock);
+ found = __exists_dev(pathname);
+ mutex_unlock(&sess_lock);
+
+ return found;
+}
+
+static bool insert_dev_if_not_exists_devpath(const char *pathname,
+ struct rnbd_clt_session *sess,
+ struct rnbd_clt_dev *dev)
+{
+ bool found;
+
+ mutex_lock(&sess_lock);
+ found = __exists_dev(pathname);
+ if (!found) {
+ mutex_lock(&sess->lock);
+ list_add_tail(&dev->list, &sess->devs_list);
+ mutex_unlock(&sess->lock);
+ }
+ mutex_unlock(&sess_lock);
+
+ return found;
+}
+
+static void delete_dev(struct rnbd_clt_dev *dev)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+
+ mutex_lock(&sess->lock);
+ list_del(&dev->list);
+ mutex_unlock(&sess->lock);
+}
+
+struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
+ struct rtrs_addr *paths,
+ size_t path_cnt, u16 port_nr,
+ const char *pathname,
+ enum rnbd_access_mode access_mode)
+{
+ struct rnbd_clt_session *sess;
+ struct rnbd_clt_dev *dev;
+ int ret;
+
+ if (exists_devpath(pathname))
+ return ERR_PTR(-EEXIST);
+
+ sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr);
+ if (IS_ERR(sess))
+ return ERR_CAST(sess);
+
+ dev = init_dev(sess, access_mode, pathname);
+ if (IS_ERR(dev)) {
+ pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n",
+ pathname, sess->sessname, PTR_ERR(dev));
+ ret = PTR_ERR(dev);
+ goto put_sess;
+ }
+ if (insert_dev_if_not_exists_devpath(pathname, sess, dev)) {
+ ret = -EEXIST;
+ goto put_dev;
+ }
+ ret = send_msg_open(dev, WAIT);
+ if (ret) {
+ rnbd_clt_err(dev,
+ "map_device: failed, can't open remote device, err: %d\n",
+ ret);
+ goto del_dev;
+ }
+ mutex_lock(&dev->lock);
+ pr_debug("Opened remote device: session=%s, path='%s'\n",
+ sess->sessname, pathname);
+ ret = rnbd_client_setup_device(sess, dev, dev->clt_device_id);
+ if (ret) {
+ rnbd_clt_err(dev,
+ "map_device: Failed to configure device, err: %d\n",
+ ret);
+ mutex_unlock(&dev->lock);
+ goto del_dev;
+ }
+
+ rnbd_clt_info(dev,
+ "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n",
+ dev->gd->disk_name, dev->nsectors,
+ dev->logical_block_size, dev->physical_block_size,
+ dev->max_write_same_sectors, dev->max_discard_sectors,
+ dev->discard_granularity, dev->discard_alignment,
+ dev->secure_discard, dev->max_segments,
+ dev->max_hw_sectors, dev->rotational);
+
+ mutex_unlock(&dev->lock);
+
+ add_disk(dev->gd);
+ rnbd_clt_put_sess(sess);
+
+ return dev;
+
+del_dev:
+ delete_dev(dev);
+put_dev:
+ rnbd_clt_put_dev(dev);
+put_sess:
+ rnbd_clt_put_sess(sess);
+
+ return ERR_PTR(ret);
+}
+
+static void destroy_gen_disk(struct rnbd_clt_dev *dev)
+{
+ del_gendisk(dev->gd);
+ blk_cleanup_queue(dev->queue);
+ put_disk(dev->gd);
+}
+
+static void destroy_sysfs(struct rnbd_clt_dev *dev,
+ const struct attribute *sysfs_self)
+{
+ rnbd_clt_remove_dev_symlink(dev);
+ if (dev->kobj.state_initialized) {
+ if (sysfs_self)
+ /* To avoid deadlock firstly remove itself */
+ sysfs_remove_file_self(&dev->kobj, sysfs_self);
+ kobject_del(&dev->kobj);
+ kobject_put(&dev->kobj);
+ }
+}
+
+int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
+ const struct attribute *sysfs_self)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ int refcount, ret = 0;
+ bool was_mapped;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_UNMAPPED) {
+ rnbd_clt_info(dev, "Device is already being unmapped\n");
+ ret = -EALREADY;
+ goto err;
+ }
+ refcount = refcount_read(&dev->refcount);
+ if (!force && refcount > 1) {
+ rnbd_clt_err(dev,
+ "Closing device failed, device is in use, (%d device users)\n",
+ refcount - 1);
+ ret = -EBUSY;
+ goto err;
+ }
+ was_mapped = (dev->dev_state == DEV_STATE_MAPPED);
+ dev->dev_state = DEV_STATE_UNMAPPED;
+ mutex_unlock(&dev->lock);
+
+ delete_dev(dev);
+ destroy_sysfs(dev, sysfs_self);
+ destroy_gen_disk(dev);
+ if (was_mapped && sess->rtrs)
+ send_msg_close(dev, dev->device_id, WAIT);
+
+ rnbd_clt_info(dev, "Device is unmapped\n");
+
+ /* Likely last reference put */
+ rnbd_clt_put_dev(dev);
+
+ /*
+ * Here device and session can be vanished!
+ */
+
+ return 0;
+err:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+int rnbd_clt_remap_device(struct rnbd_clt_dev *dev)
+{
+ int err;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED)
+ err = 0;
+ else if (dev->dev_state == DEV_STATE_UNMAPPED)
+ err = -ENODEV;
+ else if (dev->dev_state == DEV_STATE_MAPPED)
+ err = -EALREADY;
+ else
+ err = -EBUSY;
+ mutex_unlock(&dev->lock);
+ if (!err) {
+ rnbd_clt_info(dev, "Remapping device.\n");
+ err = send_msg_open(dev, WAIT);
+ if (err)
+ rnbd_clt_err(dev, "remap_device: %d\n", err);
+ }
+
+ return err;
+}
+
+static void unmap_device_work(struct work_struct *work)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(work, typeof(*dev), unmap_on_rmmod_work);
+ rnbd_clt_unmap_device(dev, true, NULL);
+}
+
+static void rnbd_destroy_sessions(void)
+{
+ struct rnbd_clt_session *sess, *sn;
+ struct rnbd_clt_dev *dev, *tn;
+
+ /* Firstly forbid access through sysfs interface */
+ rnbd_clt_destroy_default_group();
+ rnbd_clt_destroy_sysfs_files();
+
+ /*
+ * Here at this point there is no any concurrent access to sessions
+ * list and devices list:
+ * 1. New session or device can'be be created - session sysfs files
+ * are removed.
+ * 2. Device or session can't be removed - module reference is taken
+ * into account in unmap device sysfs callback.
+ * 3. No IO requests inflight - each file open of block_dev increases
+ * module reference in get_disk().
+ *
+ * But still there can be user requests inflights, which are sent by
+ * asynchronous send_msg_*() functions, thus before unmapping devices
+ * RTRS session must be explicitly closed.
+ */
+
+ list_for_each_entry_safe(sess, sn, &sess_list, list) {
+ WARN_ON(!rnbd_clt_get_sess(sess));
+ close_rtrs(sess);
+ list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
+ /*
+ * Here unmap happens in parallel for only one reason:
+ * blk_cleanup_queue() takes around half a second, so
+ * on huge amount of devices the whole module unload
+ * procedure takes minutes.
+ */
+ INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work);
+ queue_work(system_long_wq, &dev->unmap_on_rmmod_work);
+ }
+ rnbd_clt_put_sess(sess);
+ }
+ /* Wait for all scheduled unmap works */
+ flush_workqueue(system_long_wq);
+ WARN_ON(!list_empty(&sess_list));
+}
+
+static int __init rnbd_client_init(void)
+{
+ int err = 0;
+
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
+ rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd");
+ if (rnbd_client_major <= 0) {
+ pr_err("Failed to load module, block device registration failed\n");
+ return -EBUSY;
+ }
+
+ err = rnbd_clt_create_sysfs_files();
+ if (err) {
+ pr_err("Failed to load module, creating sysfs device files failed, err: %d\n",
+ err);
+ unregister_blkdev(rnbd_client_major, "rnbd");
+ }
+
+ return err;
+}
+
+static void __exit rnbd_client_exit(void)
+{
+ rnbd_destroy_sessions();
+ unregister_blkdev(rnbd_client_major, "rnbd");
+ ida_destroy(&index_ida);
+}
+
+module_init(rnbd_client_init);
+module_exit(rnbd_client_exit);
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
new file mode 100644
index 000000000000..ed33654aa486
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RNBD_CLT_H
+#define RNBD_CLT_H
+
+#include <linux/wait.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/blk-mq.h>
+#include <linux/refcount.h>
+
+#include <rtrs.h>
+#include "rnbd-proto.h"
+#include "rnbd-log.h"
+
+/* Max. number of segments per IO request, Mellanox Connect X ~ Connect X5,
+ * choose minimial 30 for all, minus 1 for internal protocol, so 29.
+ */
+#define BMAX_SEGMENTS 29
+/* time in seconds between reconnect tries, default to 30 s */
+#define RECONNECT_DELAY 30
+/*
+ * Number of times to reconnect on error before giving up, 0 for * disabled,
+ * -1 for forever
+ */
+#define MAX_RECONNECTS -1
+
+enum rnbd_clt_dev_state {
+ DEV_STATE_INIT,
+ DEV_STATE_MAPPED,
+ DEV_STATE_MAPPED_DISCONNECTED,
+ DEV_STATE_UNMAPPED,
+};
+
+struct rnbd_iu_comp {
+ wait_queue_head_t wait;
+ int errno;
+};
+
+struct rnbd_iu {
+ union {
+ struct request *rq; /* for block io */
+ void *buf; /* for user messages */
+ };
+ struct rtrs_permit *permit;
+ union {
+ /* use to send msg associated with a dev */
+ struct rnbd_clt_dev *dev;
+ /* use to send msg associated with a sess */
+ struct rnbd_clt_session *sess;
+ };
+ struct scatterlist sglist[BMAX_SEGMENTS];
+ struct work_struct work;
+ int errno;
+ struct rnbd_iu_comp comp;
+ atomic_t refcount;
+};
+
+struct rnbd_cpu_qlist {
+ struct list_head requeue_list;
+ spinlock_t requeue_lock;
+ unsigned int cpu;
+};
+
+struct rnbd_clt_session {
+ struct list_head list;
+ struct rtrs_clt *rtrs;
+ wait_queue_head_t rtrs_waitq;
+ bool rtrs_ready;
+ struct rnbd_cpu_qlist __percpu
+ *cpu_queues;
+ DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
+ int __percpu *cpu_rr; /* per-cpu var for CPU round-robin */
+ atomic_t busy;
+ int queue_depth;
+ u32 max_io_size;
+ struct blk_mq_tag_set tag_set;
+ struct mutex lock; /* protects state and devs_list */
+ struct list_head devs_list; /* list of struct rnbd_clt_dev */
+ refcount_t refcount;
+ char sessname[NAME_MAX];
+ u8 ver; /* protocol version */
+};
+
+/**
+ * Submission queues.
+ */
+struct rnbd_queue {
+ struct list_head requeue_list;
+ unsigned long in_list;
+ struct rnbd_clt_dev *dev;
+ struct blk_mq_hw_ctx *hctx;
+};
+
+struct rnbd_clt_dev {
+ struct rnbd_clt_session *sess;
+ struct request_queue *queue;
+ struct rnbd_queue *hw_queues;
+ u32 device_id;
+ /* local Idr index - used to track minor number allocations. */
+ u32 clt_device_id;
+ struct mutex lock;
+ enum rnbd_clt_dev_state dev_state;
+ char pathname[NAME_MAX];
+ enum rnbd_access_mode access_mode;
+ bool read_only;
+ bool rotational;
+ u32 max_hw_sectors;
+ u32 max_write_same_sectors;
+ u32 max_discard_sectors;
+ u32 discard_granularity;
+ u32 discard_alignment;
+ u16 secure_discard;
+ u16 physical_block_size;
+ u16 logical_block_size;
+ u16 max_segments;
+ size_t nsectors;
+ u64 size; /* device size in bytes */
+ struct list_head list;
+ struct gendisk *gd;
+ struct kobject kobj;
+ char blk_symlink_name[NAME_MAX];
+ refcount_t refcount;
+ struct work_struct unmap_on_rmmod_work;
+};
+
+/* rnbd-clt.c */
+
+struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
+ struct rtrs_addr *paths,
+ size_t path_cnt, u16 port_nr,
+ const char *pathname,
+ enum rnbd_access_mode access_mode);
+int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
+ const struct attribute *sysfs_self);
+
+int rnbd_clt_remap_device(struct rnbd_clt_dev *dev);
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize);
+
+/* rnbd-clt-sysfs.c */
+
+int rnbd_clt_create_sysfs_files(void);
+
+void rnbd_clt_destroy_sysfs_files(void);
+void rnbd_clt_destroy_default_group(void);
+
+void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev);
+
+#endif /* RNBD_CLT_H */
diff --git a/drivers/block/rnbd/rnbd-common.c b/drivers/block/rnbd/rnbd-common.c
new file mode 100644
index 000000000000..596c3f732403
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-common.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#include "rnbd-proto.h"
+
+const char *rnbd_access_mode_str(enum rnbd_access_mode mode)
+{
+ switch (mode) {
+ case RNBD_ACCESS_RO:
+ return "ro";
+ case RNBD_ACCESS_RW:
+ return "rw";
+ case RNBD_ACCESS_MIGRATION:
+ return "migration";
+ default:
+ return "unknown";
+ }
+}
diff --git a/drivers/block/rnbd/rnbd-log.h b/drivers/block/rnbd/rnbd-log.h
new file mode 100644
index 000000000000..136e7d6c3451
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-log.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_LOG_H
+#define RNBD_LOG_H
+
+#include "rnbd-clt.h"
+#include "rnbd-srv.h"
+
+#define rnbd_clt_log(fn, dev, fmt, ...) ( \
+ fn("<%s@%s> " fmt, (dev)->pathname, \
+ (dev)->sess->sessname, \
+ ##__VA_ARGS__))
+#define rnbd_srv_log(fn, dev, fmt, ...) ( \
+ fn("<%s@%s>: " fmt, (dev)->pathname, \
+ (dev)->sess->sessname, ##__VA_ARGS__))
+
+#define rnbd_clt_err(dev, fmt, ...) \
+ rnbd_clt_log(pr_err, dev, fmt, ##__VA_ARGS__)
+#define rnbd_clt_err_rl(dev, fmt, ...) \
+ rnbd_clt_log(pr_err_ratelimited, dev, fmt, ##__VA_ARGS__)
+#define rnbd_clt_info(dev, fmt, ...) \
+ rnbd_clt_log(pr_info, dev, fmt, ##__VA_ARGS__)
+#define rnbd_clt_info_rl(dev, fmt, ...) \
+ rnbd_clt_log(pr_info_ratelimited, dev, fmt, ##__VA_ARGS__)
+
+#define rnbd_srv_err(dev, fmt, ...) \
+ rnbd_srv_log(pr_err, dev, fmt, ##__VA_ARGS__)
+#define rnbd_srv_err_rl(dev, fmt, ...) \
+ rnbd_srv_log(pr_err_ratelimited, dev, fmt, ##__VA_ARGS__)
+#define rnbd_srv_info(dev, fmt, ...) \
+ rnbd_srv_log(pr_info, dev, fmt, ##__VA_ARGS__)
+#define rnbd_srv_info_rl(dev, fmt, ...) \
+ rnbd_srv_log(pr_info_ratelimited, dev, fmt, ##__VA_ARGS__)
+
+#endif /* RNBD_LOG_H */
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
new file mode 100644
index 000000000000..ca166241452c
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_PROTO_H
+#define RNBD_PROTO_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/limits.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <rdma/ib.h>
+
+#define RNBD_PROTO_VER_MAJOR 2
+#define RNBD_PROTO_VER_MINOR 0
+
+/* The default port number the RTRS server is listening on. */
+#define RTRS_PORT 1234
+
+/**
+ * enum rnbd_msg_types - RNBD message types
+ * @RNBD_MSG_SESS_INFO: initial session info from client to server
+ * @RNBD_MSG_SESS_INFO_RSP: initial session info from server to client
+ * @RNBD_MSG_OPEN: open (map) device request
+ * @RNBD_MSG_OPEN_RSP: response to an @RNBD_MSG_OPEN
+ * @RNBD_MSG_IO: block IO request operation
+ * @RNBD_MSG_CLOSE: close (unmap) device request
+ */
+enum rnbd_msg_type {
+ RNBD_MSG_SESS_INFO,
+ RNBD_MSG_SESS_INFO_RSP,
+ RNBD_MSG_OPEN,
+ RNBD_MSG_OPEN_RSP,
+ RNBD_MSG_IO,
+ RNBD_MSG_CLOSE,
+};
+
+/**
+ * struct rnbd_msg_hdr - header of RNBD messages
+ * @type: Message type, valid values see: enum rnbd_msg_types
+ */
+struct rnbd_msg_hdr {
+ __le16 type;
+ __le16 __padding;
+};
+
+/**
+ * We allow to map RO many times and RW only once. We allow to map yet another
+ * time RW, if MIGRATION is provided (second RW export can be required for
+ * example for VM migration)
+ */
+enum rnbd_access_mode {
+ RNBD_ACCESS_RO,
+ RNBD_ACCESS_RW,
+ RNBD_ACCESS_MIGRATION,
+};
+
+/**
+ * struct rnbd_msg_sess_info - initial session info from client to server
+ * @hdr: message header
+ * @ver: RNBD protocol version
+ */
+struct rnbd_msg_sess_info {
+ struct rnbd_msg_hdr hdr;
+ u8 ver;
+ u8 reserved[31];
+};
+
+/**
+ * struct rnbd_msg_sess_info_rsp - initial session info from server to client
+ * @hdr: message header
+ * @ver: RNBD protocol version
+ */
+struct rnbd_msg_sess_info_rsp {
+ struct rnbd_msg_hdr hdr;
+ u8 ver;
+ u8 reserved[31];
+};
+
+/**
+ * struct rnbd_msg_open - request to open a remote device.
+ * @hdr: message header
+ * @access_mode: the mode to open remote device, valid values see:
+ * enum rnbd_access_mode
+ * @device_name: device path on remote side
+ */
+struct rnbd_msg_open {
+ struct rnbd_msg_hdr hdr;
+ u8 access_mode;
+ u8 resv1;
+ s8 dev_name[NAME_MAX];
+ u8 reserved[3];
+};
+
+/**
+ * struct rnbd_msg_close - request to close a remote device.
+ * @hdr: message header
+ * @device_id: device_id on server side to identify the device
+ */
+struct rnbd_msg_close {
+ struct rnbd_msg_hdr hdr;
+ __le32 device_id;
+};
+
+/**
+ * struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
+ * @hdr: message header
+ * @device_id: device_id on server side to identify the device
+ * @nsectors: number of sectors in the usual 512b unit
+ * @max_hw_sectors: max hardware sectors in the usual 512b unit
+ * @max_write_same_sectors: max sectors for WRITE SAME in the 512b unit
+ * @max_discard_sectors: max. sectors that can be discarded at once in 512b
+ * unit.
+ * @discard_granularity: size of the internal discard allocation unit in bytes
+ * @discard_alignment: offset from internal allocation assignment in bytes
+ * @physical_block_size: physical block size device supports in bytes
+ * @logical_block_size: logical block size device supports in bytes
+ * @max_segments: max segments hardware support in one transfer
+ * @secure_discard: supports secure discard
+ * @rotation: is a rotational disc?
+ */
+struct rnbd_msg_open_rsp {
+ struct rnbd_msg_hdr hdr;
+ __le32 device_id;
+ __le64 nsectors;
+ __le32 max_hw_sectors;
+ __le32 max_write_same_sectors;
+ __le32 max_discard_sectors;
+ __le32 discard_granularity;
+ __le32 discard_alignment;
+ __le16 physical_block_size;
+ __le16 logical_block_size;
+ __le16 max_segments;
+ __le16 secure_discard;
+ u8 rotational;
+ u8 reserved[11];
+};
+
+/**
+ * struct rnbd_msg_io - message for I/O read/write
+ * @hdr: message header
+ * @device_id: device_id on server side to find the right device
+ * @sector: bi_sector attribute from struct bio
+ * @rw: valid values are defined in enum rnbd_io_flags
+ * @bi_size: number of bytes for I/O read/write
+ * @prio: priority
+ */
+struct rnbd_msg_io {
+ struct rnbd_msg_hdr hdr;
+ __le32 device_id;
+ __le64 sector;
+ __le32 rw;
+ __le32 bi_size;
+ __le16 prio;
+};
+
+#define RNBD_OP_BITS 8
+#define RNBD_OP_MASK ((1 << RNBD_OP_BITS) - 1)
+
+/**
+ * enum rnbd_io_flags - RNBD request types from rq_flag_bits
+ * @RNBD_OP_READ: read sectors from the device
+ * @RNBD_OP_WRITE: write sectors to the device
+ * @RNBD_OP_FLUSH: flush the volatile write cache
+ * @RNBD_OP_DISCARD: discard sectors
+ * @RNBD_OP_SECURE_ERASE: securely erase sectors
+ * @RNBD_OP_WRITE_SAME: write the same sectors many times
+
+ * @RNBD_F_SYNC: request is sync (sync write or read)
+ * @RNBD_F_FUA: forced unit access
+ */
+enum rnbd_io_flags {
+
+ /* Operations */
+
+ RNBD_OP_READ = 0,
+ RNBD_OP_WRITE = 1,
+ RNBD_OP_FLUSH = 2,
+ RNBD_OP_DISCARD = 3,
+ RNBD_OP_SECURE_ERASE = 4,
+ RNBD_OP_WRITE_SAME = 5,
+
+ RNBD_OP_LAST,
+
+ /* Flags */
+
+ RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0),
+ RNBD_F_FUA = 1<<(RNBD_OP_BITS + 1),
+
+ RNBD_F_ALL = (RNBD_F_SYNC | RNBD_F_FUA)
+
+};
+
+static inline u32 rnbd_op(u32 flags)
+{
+ return flags & RNBD_OP_MASK;
+}
+
+static inline u32 rnbd_flags(u32 flags)
+{
+ return flags & ~RNBD_OP_MASK;
+}
+
+static inline bool rnbd_flags_supported(u32 flags)
+{
+ u32 op;
+
+ op = rnbd_op(flags);
+ flags = rnbd_flags(flags);
+
+ if (op >= RNBD_OP_LAST)
+ return false;
+ if (flags & ~RNBD_F_ALL)
+ return false;
+
+ return true;
+}
+
+static inline u32 rnbd_to_bio_flags(u32 rnbd_opf)
+{
+ u32 bio_opf;
+
+ switch (rnbd_op(rnbd_opf)) {
+ case RNBD_OP_READ:
+ bio_opf = REQ_OP_READ;
+ break;
+ case RNBD_OP_WRITE:
+ bio_opf = REQ_OP_WRITE;
+ break;
+ case RNBD_OP_FLUSH:
+ bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
+ break;
+ case RNBD_OP_DISCARD:
+ bio_opf = REQ_OP_DISCARD;
+ break;
+ case RNBD_OP_SECURE_ERASE:
+ bio_opf = REQ_OP_SECURE_ERASE;
+ break;
+ case RNBD_OP_WRITE_SAME:
+ bio_opf = REQ_OP_WRITE_SAME;
+ break;
+ default:
+ WARN(1, "Unknown RNBD type: %d (flags %d)\n",
+ rnbd_op(rnbd_opf), rnbd_opf);
+ bio_opf = 0;
+ }
+
+ if (rnbd_opf & RNBD_F_SYNC)
+ bio_opf |= REQ_SYNC;
+
+ if (rnbd_opf & RNBD_F_FUA)
+ bio_opf |= REQ_FUA;
+
+ return bio_opf;
+}
+
+static inline u32 rq_to_rnbd_flags(struct request *rq)
+{
+ u32 rnbd_opf;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ rnbd_opf = RNBD_OP_READ;
+ break;
+ case REQ_OP_WRITE:
+ rnbd_opf = RNBD_OP_WRITE;
+ break;
+ case REQ_OP_DISCARD:
+ rnbd_opf = RNBD_OP_DISCARD;
+ break;
+ case REQ_OP_SECURE_ERASE:
+ rnbd_opf = RNBD_OP_SECURE_ERASE;
+ break;
+ case REQ_OP_WRITE_SAME:
+ rnbd_opf = RNBD_OP_WRITE_SAME;
+ break;
+ case REQ_OP_FLUSH:
+ rnbd_opf = RNBD_OP_FLUSH;
+ break;
+ default:
+ WARN(1, "Unknown request type %d (flags %llu)\n",
+ req_op(rq), (unsigned long long)rq->cmd_flags);
+ rnbd_opf = 0;
+ }
+
+ if (op_is_sync(rq->cmd_flags))
+ rnbd_opf |= RNBD_F_SYNC;
+
+ if (op_is_flush(rq->cmd_flags))
+ rnbd_opf |= RNBD_F_FUA;
+
+ return rnbd_opf;
+}
+
+const char *rnbd_access_mode_str(enum rnbd_access_mode mode);
+
+#endif /* RNBD_PROTO_H */
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
new file mode 100644
index 000000000000..5eddfd29ab64
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rnbd-srv-dev.h"
+#include "rnbd-log.h"
+
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
+ struct bio_set *bs)
+{
+ struct rnbd_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->blk_open_flags = flags;
+ dev->bdev = blkdev_get_by_path(path, flags, THIS_MODULE);
+ ret = PTR_ERR_OR_ZERO(dev->bdev);
+ if (ret)
+ goto err;
+
+ dev->blk_open_flags = flags;
+ bdevname(dev->bdev, dev->name);
+ dev->ibd_bio_set = bs;
+
+ return dev;
+
+err:
+ kfree(dev);
+ return ERR_PTR(ret);
+}
+
+void rnbd_dev_close(struct rnbd_dev *dev)
+{
+ blkdev_put(dev->bdev, dev->blk_open_flags);
+ kfree(dev);
+}
+
+static void rnbd_dev_bi_end_io(struct bio *bio)
+{
+ struct rnbd_dev_blk_io *io = bio->bi_private;
+
+ rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
+}
+
+/**
+ * rnbd_bio_map_kern - map kernel address into bio
+ * @data: pointer to buffer to map
+ * @bs: bio_set to use.
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio allocation
+ *
+ * Map the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
+ unsigned int len, gfp_t gfp_mask)
+{
+ unsigned long kaddr = (unsigned long)data;
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ int offset, i;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ offset = offset_in_page(kaddr);
+ for (i = 0; i < nr_pages; i++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ if (bio_add_page(bio, virt_to_page(data), bytes,
+ offset) < bytes) {
+ /* we don't support partial mappings */
+ bio_put(bio);
+ return ERR_PTR(-EINVAL);
+ }
+
+ data += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ bio->bi_end_io = bio_put;
+ return bio;
+}
+
+int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
+ size_t len, u32 bi_size, enum rnbd_io_flags flags,
+ short prio, void *priv)
+{
+ struct rnbd_dev_blk_io *io;
+ struct bio *bio;
+
+ /* Generate bio with pages pointing to the rdma buffer */
+ bio = rnbd_bio_map_kern(data, dev->ibd_bio_set, len, GFP_KERNEL);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ io = container_of(bio, struct rnbd_dev_blk_io, bio);
+
+ io->dev = dev;
+ io->priv = priv;
+
+ bio->bi_end_io = rnbd_dev_bi_end_io;
+ bio->bi_private = io;
+ bio->bi_opf = rnbd_to_bio_flags(flags);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_iter.bi_size = bi_size;
+ bio_set_prio(bio, prio);
+ bio_set_dev(bio, dev->bdev);
+
+ submit_bio(bio);
+
+ return 0;
+}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
new file mode 100644
index 000000000000..0f65b09a270e
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_SRV_DEV_H
+#define RNBD_SRV_DEV_H
+
+#include <linux/fs.h>
+#include "rnbd-proto.h"
+
+struct rnbd_dev {
+ struct block_device *bdev;
+ struct bio_set *ibd_bio_set;
+ fmode_t blk_open_flags;
+ char name[BDEVNAME_SIZE];
+};
+
+struct rnbd_dev_blk_io {
+ struct rnbd_dev *dev;
+ void *priv;
+ /* have to be last member for front_pad usage of bioset_init */
+ struct bio bio;
+};
+
+/**
+ * rnbd_dev_open() - Open a device
+ * @flags: open flags
+ * @bs: bio_set to use during block io,
+ */
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
+ struct bio_set *bs);
+
+/**
+ * rnbd_dev_close() - Close a device
+ */
+void rnbd_dev_close(struct rnbd_dev *dev);
+
+void rnbd_endio(void *priv, int error);
+
+static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
+{
+ return queue_max_segments(bdev_get_queue(dev->bdev));
+}
+
+static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev)
+{
+ return queue_max_hw_sectors(bdev_get_queue(dev->bdev));
+}
+
+static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
+{
+ return blk_queue_secure_erase(bdev_get_queue(dev->bdev));
+}
+
+static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
+{
+ if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
+ return 0;
+
+ return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev),
+ REQ_OP_DISCARD);
+}
+
+static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
+{
+ return bdev_get_queue(dev->bdev)->limits.discard_granularity;
+}
+
+static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
+{
+ return bdev_get_queue(dev->bdev)->limits.discard_alignment;
+}
+
+/**
+ * rnbd_dev_submit_io() - Submit an I/O to the disk
+ * @dev: device to that the I/O is submitted
+ * @sector: address to read/write data to
+ * @data: I/O data to write or buffer to read I/O date into
+ * @len: length of @data
+ * @bi_size: Amount of data that will be read/written
+ * @prio: IO priority
+ * @priv: private data passed to @io_fn
+ */
+int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
+ size_t len, u32 bi_size, enum rnbd_io_flags flags,
+ short prio, void *priv);
+
+#endif /* RNBD_SRV_DEV_H */
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
new file mode 100644
index 000000000000..106775c074d1
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <uapi/linux/limits.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/genhd.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+
+#include "rnbd-srv.h"
+
+static struct device *rnbd_dev;
+static struct class *rnbd_dev_class;
+static struct kobject *rnbd_devs_kobj;
+
+static void rnbd_srv_dev_release(struct kobject *kobj)
+{
+ struct rnbd_srv_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_srv_dev, dev_kobj);
+
+ kfree(dev);
+}
+
+static struct kobj_type dev_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rnbd_srv_dev_release
+};
+
+int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
+ struct block_device *bdev,
+ const char *dev_name)
+{
+ struct kobject *bdev_kobj;
+ int ret;
+
+ ret = kobject_init_and_add(&dev->dev_kobj, &dev_ktype,
+ rnbd_devs_kobj, dev_name);
+ if (ret)
+ return ret;
+
+ dev->dev_sessions_kobj = kobject_create_and_add("sessions",
+ &dev->dev_kobj);
+ if (!dev->dev_sessions_kobj)
+ goto put_dev_kobj;
+
+ bdev_kobj = &disk_to_dev(bdev->bd_disk)->kobj;
+ ret = sysfs_create_link(&dev->dev_kobj, bdev_kobj, "block_dev");
+ if (ret)
+ goto put_sess_kobj;
+
+ return 0;
+
+put_sess_kobj:
+ kobject_put(dev->dev_sessions_kobj);
+put_dev_kobj:
+ kobject_put(&dev->dev_kobj);
+ return ret;
+}
+
+void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev)
+{
+ sysfs_remove_link(&dev->dev_kobj, "block_dev");
+ kobject_del(dev->dev_sessions_kobj);
+ kobject_put(dev->dev_sessions_kobj);
+ kobject_del(&dev->dev_kobj);
+ kobject_put(&dev->dev_kobj);
+}
+
+static ssize_t read_only_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *page)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%d\n",
+ !(sess_dev->open_flags & FMODE_WRITE));
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_ro_attr =
+ __ATTR_RO(read_only);
+
+static ssize_t access_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n",
+ rnbd_access_mode_str(sess_dev->access_mode));
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_access_mode_attr =
+ __ATTR_RO(access_mode);
+
+static ssize_t mapping_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", sess_dev->pathname);
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_mapping_path_attr =
+ __ATTR_RO(mapping_path);
+
+static struct attribute *rnbd_srv_default_dev_sessions_attrs[] = {
+ &rnbd_srv_dev_session_access_mode_attr.attr,
+ &rnbd_srv_dev_session_ro_attr.attr,
+ &rnbd_srv_dev_session_mapping_path_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rnbd_srv_default_dev_session_attr_group = {
+ .attrs = rnbd_srv_default_dev_sessions_attrs,
+};
+
+void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev)
+{
+ sysfs_remove_group(&sess_dev->kobj,
+ &rnbd_srv_default_dev_session_attr_group);
+
+ kobject_del(&sess_dev->kobj);
+ kobject_put(&sess_dev->kobj);
+}
+
+static void rnbd_srv_sess_dev_release(struct kobject *kobj)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+ rnbd_destroy_sess_dev(sess_dev);
+}
+
+static struct kobj_type rnbd_srv_sess_dev_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rnbd_srv_sess_dev_release,
+};
+
+int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&sess_dev->kobj, &rnbd_srv_sess_dev_ktype,
+ sess_dev->dev->dev_sessions_kobj, "%s",
+ sess_dev->sess->sessname);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&sess_dev->kobj,
+ &rnbd_srv_default_dev_session_attr_group);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kobject_put(&sess_dev->kobj);
+
+ return ret;
+}
+
+int rnbd_srv_create_sysfs_files(void)
+{
+ int err;
+
+ rnbd_dev_class = class_create(THIS_MODULE, "rnbd-server");
+ if (IS_ERR(rnbd_dev_class))
+ return PTR_ERR(rnbd_dev_class);
+
+ rnbd_dev = device_create(rnbd_dev_class, NULL,
+ MKDEV(0, 0), NULL, "ctl");
+ if (IS_ERR(rnbd_dev)) {
+ err = PTR_ERR(rnbd_dev);
+ goto cls_destroy;
+ }
+ rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
+ if (!rnbd_devs_kobj) {
+ err = -ENOMEM;
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+cls_destroy:
+ class_destroy(rnbd_dev_class);
+
+ return err;
+}
+
+void rnbd_srv_destroy_sysfs_files(void)
+{
+ kobject_del(rnbd_devs_kobj);
+ kobject_put(rnbd_devs_kobj);
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+ class_destroy(rnbd_dev_class);
+}
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
new file mode 100644
index 000000000000..86e61523907b
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+
+#include "rnbd-srv.h"
+#include "rnbd-srv-dev.h"
+
+MODULE_DESCRIPTION("RDMA Network Block Device Server");
+MODULE_LICENSE("GPL");
+
+static u16 port_nr = RTRS_PORT;
+
+module_param_named(port_nr, port_nr, ushort, 0444);
+MODULE_PARM_DESC(port_nr,
+ "The port number the server is listening on (default: "
+ __stringify(RTRS_PORT)")");
+
+#define DEFAULT_DEV_SEARCH_PATH "/"
+
+static char dev_search_path[PATH_MAX] = DEFAULT_DEV_SEARCH_PATH;
+
+static int dev_search_path_set(const char *val, const struct kernel_param *kp)
+{
+ const char *p = strrchr(val, '\n') ? : val + strlen(val);
+
+ if (strlen(val) >= sizeof(dev_search_path))
+ return -EINVAL;
+
+ snprintf(dev_search_path, sizeof(dev_search_path), "%.*s",
+ (int)(p - val), val);
+
+ pr_info("dev_search_path changed to '%s'\n", dev_search_path);
+
+ return 0;
+}
+
+static struct kparam_string dev_search_path_kparam_str = {
+ .maxlen = sizeof(dev_search_path),
+ .string = dev_search_path
+};
+
+static const struct kernel_param_ops dev_search_path_ops = {
+ .set = dev_search_path_set,
+ .get = param_get_string,
+};
+
+module_param_cb(dev_search_path, &dev_search_path_ops,
+ &dev_search_path_kparam_str, 0444);
+MODULE_PARM_DESC(dev_search_path,
+ "Sets the dev_search_path. When a device is mapped this path is prepended to the device path from the map device operation. If %SESSNAME% is specified in a path, then device will be searched in a session namespace. (default: "
+ DEFAULT_DEV_SEARCH_PATH ")");
+
+static DEFINE_MUTEX(sess_lock);
+static DEFINE_SPINLOCK(dev_lock);
+
+static LIST_HEAD(sess_list);
+static LIST_HEAD(dev_list);
+
+struct rnbd_io_private {
+ struct rtrs_srv_op *id;
+ struct rnbd_srv_sess_dev *sess_dev;
+};
+
+static void rnbd_sess_dev_release(struct kref *kref)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kref, struct rnbd_srv_sess_dev, kref);
+ complete(sess_dev->destroy_comp);
+}
+
+static inline void rnbd_put_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
+{
+ kref_put(&sess_dev->kref, rnbd_sess_dev_release);
+}
+
+void rnbd_endio(void *priv, int error)
+{
+ struct rnbd_io_private *rnbd_priv = priv;
+ struct rnbd_srv_sess_dev *sess_dev = rnbd_priv->sess_dev;
+
+ rnbd_put_sess_dev(sess_dev);
+
+ rtrs_srv_resp_rdma(rnbd_priv->id, error);
+
+ kfree(priv);
+}
+
+static struct rnbd_srv_sess_dev *
+rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+ int ret = 0;
+
+ rcu_read_lock();
+ sess_dev = xa_load(&srv_sess->index_idr, dev_id);
+ if (likely(sess_dev))
+ ret = kref_get_unless_zero(&sess_dev->kref);
+ rcu_read_unlock();
+
+ if (!sess_dev || !ret)
+ return ERR_PTR(-ENXIO);
+
+ return sess_dev;
+}
+
+static int process_rdma(struct rtrs_srv *sess,
+ struct rnbd_srv_session *srv_sess,
+ struct rtrs_srv_op *id, void *data, u32 datalen,
+ const void *usr, size_t usrlen)
+{
+ const struct rnbd_msg_io *msg = usr;
+ struct rnbd_io_private *priv;
+ struct rnbd_srv_sess_dev *sess_dev;
+ u32 dev_id;
+ int err;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_id = le32_to_cpu(msg->device_id);
+
+ sess_dev = rnbd_get_sess_dev(dev_id, srv_sess);
+ if (IS_ERR(sess_dev)) {
+ pr_err_ratelimited("Got I/O request on session %s for unknown device id %d\n",
+ srv_sess->sessname, dev_id);
+ err = -ENOTCONN;
+ goto err;
+ }
+
+ priv->sess_dev = sess_dev;
+ priv->id = id;
+
+ err = rnbd_dev_submit_io(sess_dev->rnbd_dev, le64_to_cpu(msg->sector),
+ data, datalen, le32_to_cpu(msg->bi_size),
+ le32_to_cpu(msg->rw),
+ srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
+ usrlen < sizeof(*msg) ?
+ 0 : le16_to_cpu(msg->prio), priv);
+ if (unlikely(err)) {
+ rnbd_srv_err(sess_dev, "Submitting I/O to device failed, err: %d\n",
+ err);
+ goto sess_dev_put;
+ }
+
+ return 0;
+
+sess_dev_put:
+ rnbd_put_sess_dev(sess_dev);
+err:
+ kfree(priv);
+ return err;
+}
+
+static void destroy_device(struct rnbd_srv_dev *dev)
+{
+ WARN_ONCE(!list_empty(&dev->sess_dev_list),
+ "Device %s is being destroyed but still in use!\n",
+ dev->id);
+
+ spin_lock(&dev_lock);
+ list_del(&dev->list);
+ spin_unlock(&dev_lock);
+
+ mutex_destroy(&dev->lock);
+ if (dev->dev_kobj.state_in_sysfs)
+ /*
+ * Destroy kobj only if it was really created.
+ */
+ rnbd_srv_destroy_dev_sysfs(dev);
+ else
+ kfree(dev);
+}
+
+static void destroy_device_cb(struct kref *kref)
+{
+ struct rnbd_srv_dev *dev;
+
+ dev = container_of(kref, struct rnbd_srv_dev, kref);
+
+ destroy_device(dev);
+}
+
+static void rnbd_put_srv_dev(struct rnbd_srv_dev *dev)
+{
+ kref_put(&dev->kref, destroy_device_cb);
+}
+
+void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
+{
+ DECLARE_COMPLETION_ONSTACK(dc);
+
+ xa_erase(&sess_dev->sess->index_idr, sess_dev->device_id);
+ synchronize_rcu();
+ sess_dev->destroy_comp = &dc;
+ rnbd_put_sess_dev(sess_dev);
+ wait_for_completion(&dc); /* wait for inflights to drop to zero */
+
+ rnbd_dev_close(sess_dev->rnbd_dev);
+ list_del(&sess_dev->sess_list);
+ mutex_lock(&sess_dev->dev->lock);
+ list_del(&sess_dev->dev_list);
+ if (sess_dev->open_flags & FMODE_WRITE)
+ sess_dev->dev->open_write_cnt--;
+ mutex_unlock(&sess_dev->dev->lock);
+
+ rnbd_put_srv_dev(sess_dev->dev);
+
+ rnbd_srv_info(sess_dev, "Device closed\n");
+ kfree(sess_dev);
+}
+
+static void destroy_sess(struct rnbd_srv_session *srv_sess)
+{
+ struct rnbd_srv_sess_dev *sess_dev, *tmp;
+
+ if (list_empty(&srv_sess->sess_dev_list))
+ goto out;
+
+ mutex_lock(&srv_sess->lock);
+ list_for_each_entry_safe(sess_dev, tmp, &srv_sess->sess_dev_list,
+ sess_list)
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&srv_sess->lock);
+
+out:
+ xa_destroy(&srv_sess->index_idr);
+ bioset_exit(&srv_sess->sess_bio_set);
+
+ pr_info("RTRS Session %s disconnected\n", srv_sess->sessname);
+
+ mutex_lock(&sess_lock);
+ list_del(&srv_sess->list);
+ mutex_unlock(&sess_lock);
+
+ mutex_destroy(&srv_sess->lock);
+ kfree(srv_sess);
+}
+
+static int create_sess(struct rtrs_srv *rtrs)
+{
+ struct rnbd_srv_session *srv_sess;
+ char sessname[NAME_MAX];
+ int err;
+
+ err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname));
+ if (err) {
+ pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err);
+
+ return err;
+ }
+ srv_sess = kzalloc(sizeof(*srv_sess), GFP_KERNEL);
+ if (!srv_sess)
+ return -ENOMEM;
+
+ srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
+ err = bioset_init(&srv_sess->sess_bio_set, srv_sess->queue_depth,
+ offsetof(struct rnbd_dev_blk_io, bio),
+ BIOSET_NEED_BVECS);
+ if (err) {
+ pr_err("Allocating srv_session for session %s failed\n",
+ sessname);
+ kfree(srv_sess);
+ return err;
+ }
+
+ xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
+ INIT_LIST_HEAD(&srv_sess->sess_dev_list);
+ mutex_init(&srv_sess->lock);
+ mutex_lock(&sess_lock);
+ list_add(&srv_sess->list, &sess_list);
+ mutex_unlock(&sess_lock);
+
+ srv_sess->rtrs = rtrs;
+ strlcpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname));
+
+ rtrs_srv_set_sess_priv(rtrs, srv_sess);
+
+ return 0;
+}
+
+static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
+ enum rtrs_srv_link_ev ev, void *priv)
+{
+ struct rnbd_srv_session *srv_sess = priv;
+
+ switch (ev) {
+ case RTRS_SRV_LINK_EV_CONNECTED:
+ return create_sess(rtrs);
+
+ case RTRS_SRV_LINK_EV_DISCONNECTED:
+ if (WARN_ON_ONCE(!srv_sess))
+ return -EINVAL;
+
+ destroy_sess(srv_sess);
+ return 0;
+
+ default:
+ pr_warn("Received unknown RTRS session event %d from session %s\n",
+ ev, srv_sess->sessname);
+ return -EINVAL;
+ }
+}
+
+static int process_msg_close(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen)
+{
+ const struct rnbd_msg_close *close_msg = usr;
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = rnbd_get_sess_dev(le32_to_cpu(close_msg->device_id),
+ srv_sess);
+ if (IS_ERR(sess_dev))
+ return 0;
+
+ rnbd_put_sess_dev(sess_dev);
+ mutex_lock(&srv_sess->lock);
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&srv_sess->lock);
+ return 0;
+}
+
+static int process_msg_open(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen);
+
+static int process_msg_sess_info(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen);
+
+static int rnbd_srv_rdma_ev(struct rtrs_srv *rtrs, void *priv,
+ struct rtrs_srv_op *id, int dir,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen)
+{
+ struct rnbd_srv_session *srv_sess = priv;
+ const struct rnbd_msg_hdr *hdr = usr;
+ int ret = 0;
+ u16 type;
+
+ if (WARN_ON_ONCE(!srv_sess))
+ return -ENODEV;
+
+ type = le16_to_cpu(hdr->type);
+
+ switch (type) {
+ case RNBD_MSG_IO:
+ return process_rdma(rtrs, srv_sess, id, data, datalen, usr,
+ usrlen);
+ case RNBD_MSG_CLOSE:
+ ret = process_msg_close(rtrs, srv_sess, data, datalen,
+ usr, usrlen);
+ break;
+ case RNBD_MSG_OPEN:
+ ret = process_msg_open(rtrs, srv_sess, usr, usrlen,
+ data, datalen);
+ break;
+ case RNBD_MSG_SESS_INFO:
+ ret = process_msg_sess_info(rtrs, srv_sess, usr, usrlen,
+ data, datalen);
+ break;
+ default:
+ pr_warn("Received unexpected message type %d with dir %d from session %s\n",
+ type, dir, srv_sess->sessname);
+ return -EINVAL;
+ }
+
+ rtrs_srv_resp_rdma(id, ret);
+ return 0;
+}
+
+static struct rnbd_srv_sess_dev
+*rnbd_sess_dev_alloc(struct rnbd_srv_session *srv_sess)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+ int error;
+
+ sess_dev = kzalloc(sizeof(*sess_dev), GFP_KERNEL);
+ if (!sess_dev)
+ return ERR_PTR(-ENOMEM);
+
+ error = xa_alloc(&srv_sess->index_idr, &sess_dev->device_id, sess_dev,
+ xa_limit_32b, GFP_NOWAIT);
+ if (error < 0) {
+ pr_warn("Allocating idr failed, err: %d\n", error);
+ kfree(sess_dev);
+ return ERR_PTR(error);
+ }
+
+ return sess_dev;
+}
+
+static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id)
+{
+ struct rnbd_srv_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ strlcpy(dev->id, id, sizeof(dev->id));
+ kref_init(&dev->kref);
+ INIT_LIST_HEAD(&dev->sess_dev_list);
+ mutex_init(&dev->lock);
+
+ return dev;
+}
+
+static struct rnbd_srv_dev *
+rnbd_srv_find_or_add_srv_dev(struct rnbd_srv_dev *new_dev)
+{
+ struct rnbd_srv_dev *dev;
+
+ spin_lock(&dev_lock);
+ list_for_each_entry(dev, &dev_list, list) {
+ if (!strncmp(dev->id, new_dev->id, sizeof(dev->id))) {
+ if (!kref_get_unless_zero(&dev->kref))
+ /*
+ * We lost the race, device is almost dead.
+ * Continue traversing to find a valid one.
+ */
+ continue;
+ spin_unlock(&dev_lock);
+ return dev;
+ }
+ }
+ list_add(&new_dev->list, &dev_list);
+ spin_unlock(&dev_lock);
+
+ return new_dev;
+}
+
+static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev *srv_dev,
+ struct rnbd_srv_session *srv_sess,
+ enum rnbd_access_mode access_mode)
+{
+ int ret = -EPERM;
+
+ mutex_lock(&srv_dev->lock);
+
+ switch (access_mode) {
+ case RNBD_ACCESS_RO:
+ ret = 0;
+ break;
+ case RNBD_ACCESS_RW:
+ if (srv_dev->open_write_cnt == 0) {
+ srv_dev->open_write_cnt++;
+ ret = 0;
+ } else {
+ pr_err("Mapping device '%s' for session %s with RW permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
+ srv_dev->id, srv_sess->sessname,
+ srv_dev->open_write_cnt,
+ rnbd_access_mode_str(access_mode));
+ }
+ break;
+ case RNBD_ACCESS_MIGRATION:
+ if (srv_dev->open_write_cnt < 2) {
+ srv_dev->open_write_cnt++;
+ ret = 0;
+ } else {
+ pr_err("Mapping device '%s' for session %s with migration permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
+ srv_dev->id, srv_sess->sessname,
+ srv_dev->open_write_cnt,
+ rnbd_access_mode_str(access_mode));
+ }
+ break;
+ default:
+ pr_err("Received mapping request for device '%s' on session %s with invalid access mode: %d\n",
+ srv_dev->id, srv_sess->sessname, access_mode);
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&srv_dev->lock);
+
+ return ret;
+}
+
+static struct rnbd_srv_dev *
+rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
+ struct rnbd_srv_session *srv_sess,
+ enum rnbd_access_mode access_mode)
+{
+ int ret;
+ struct rnbd_srv_dev *new_dev, *dev;
+
+ new_dev = rnbd_srv_init_srv_dev(rnbd_dev->name);
+ if (IS_ERR(new_dev))
+ return new_dev;
+
+ dev = rnbd_srv_find_or_add_srv_dev(new_dev);
+ if (dev != new_dev)
+ kfree(new_dev);
+
+ ret = rnbd_srv_check_update_open_perm(dev, srv_sess, access_mode);
+ if (ret) {
+ rnbd_put_srv_dev(dev);
+ return ERR_PTR(ret);
+ }
+
+ return dev;
+}
+
+static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
+ struct rnbd_srv_sess_dev *sess_dev)
+{
+ struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
+
+ rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
+ rsp->device_id =
+ cpu_to_le32(sess_dev->device_id);
+ rsp->nsectors =
+ cpu_to_le64(get_capacity(rnbd_dev->bdev->bd_disk));
+ rsp->logical_block_size =
+ cpu_to_le16(bdev_logical_block_size(rnbd_dev->bdev));
+ rsp->physical_block_size =
+ cpu_to_le16(bdev_physical_block_size(rnbd_dev->bdev));
+ rsp->max_segments =
+ cpu_to_le16(rnbd_dev_get_max_segs(rnbd_dev));
+ rsp->max_hw_sectors =
+ cpu_to_le32(rnbd_dev_get_max_hw_sects(rnbd_dev));
+ rsp->max_write_same_sectors =
+ cpu_to_le32(bdev_write_same(rnbd_dev->bdev));
+ rsp->max_discard_sectors =
+ cpu_to_le32(rnbd_dev_get_max_discard_sects(rnbd_dev));
+ rsp->discard_granularity =
+ cpu_to_le32(rnbd_dev_get_discard_granularity(rnbd_dev));
+ rsp->discard_alignment =
+ cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
+ rsp->secure_discard =
+ cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
+ rsp->rotational =
+ !blk_queue_nonrot(bdev_get_queue(rnbd_dev->bdev));
+}
+
+static struct rnbd_srv_sess_dev *
+rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
+ const struct rnbd_msg_open *open_msg,
+ struct rnbd_dev *rnbd_dev, fmode_t open_flags,
+ struct rnbd_srv_dev *srv_dev)
+{
+ struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
+
+ if (IS_ERR(sdev))
+ return sdev;
+
+ kref_init(&sdev->kref);
+
+ strlcpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
+
+ sdev->rnbd_dev = rnbd_dev;
+ sdev->sess = srv_sess;
+ sdev->dev = srv_dev;
+ sdev->open_flags = open_flags;
+ sdev->access_mode = open_msg->access_mode;
+
+ return sdev;
+}
+
+static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess,
+ const char *dev_name)
+{
+ char *full_path;
+ char *a, *b;
+
+ full_path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!full_path)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Replace %SESSNAME% with a real session name in order to
+ * create device namespace.
+ */
+ a = strnstr(dev_search_path, "%SESSNAME%", sizeof(dev_search_path));
+ if (a) {
+ int len = a - dev_search_path;
+
+ len = snprintf(full_path, PATH_MAX, "%.*s/%s/%s", len,
+ dev_search_path, srv_sess->sessname, dev_name);
+ if (len >= PATH_MAX) {
+ pr_err("Too long path: %s, %s, %s\n",
+ dev_search_path, srv_sess->sessname, dev_name);
+ kfree(full_path);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ snprintf(full_path, PATH_MAX, "%s/%s",
+ dev_search_path, dev_name);
+ }
+
+ /* eliminitate duplicated slashes */
+ a = strchr(full_path, '/');
+ b = a;
+ while (*b != '\0') {
+ if (*b == '/' && *a == '/') {
+ b++;
+ } else {
+ a++;
+ *a = *b;
+ b++;
+ }
+ }
+ a++;
+ *a = '\0';
+
+ return full_path;
+}
+
+static int process_msg_sess_info(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen)
+{
+ const struct rnbd_msg_sess_info *sess_info_msg = msg;
+ struct rnbd_msg_sess_info_rsp *rsp = data;
+
+ srv_sess->ver = min_t(u8, sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
+ pr_debug("Session %s using protocol version %d (client version: %d, server version: %d)\n",
+ srv_sess->sessname, srv_sess->ver,
+ sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
+
+ rsp->hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO_RSP);
+ rsp->ver = srv_sess->ver;
+
+ return 0;
+}
+
+/**
+ * find_srv_sess_dev() - a dev is already opened by this name
+ * @srv_sess: the session to search.
+ * @dev_name: string containing the name of the device.
+ *
+ * Return struct rnbd_srv_sess_dev if srv_sess already opened the dev_name
+ * NULL if the session didn't open the device yet.
+ */
+static struct rnbd_srv_sess_dev *
+find_srv_sess_dev(struct rnbd_srv_session *srv_sess, const char *dev_name)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ if (list_empty(&srv_sess->sess_dev_list))
+ return NULL;
+
+ list_for_each_entry(sess_dev, &srv_sess->sess_dev_list, sess_list)
+ if (!strcmp(sess_dev->pathname, dev_name))
+ return sess_dev;
+
+ return NULL;
+}
+
+static int process_msg_open(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen)
+{
+ int ret;
+ struct rnbd_srv_dev *srv_dev;
+ struct rnbd_srv_sess_dev *srv_sess_dev;
+ const struct rnbd_msg_open *open_msg = msg;
+ fmode_t open_flags;
+ char *full_path;
+ struct rnbd_dev *rnbd_dev;
+ struct rnbd_msg_open_rsp *rsp = data;
+
+ pr_debug("Open message received: session='%s' path='%s' access_mode=%d\n",
+ srv_sess->sessname, open_msg->dev_name,
+ open_msg->access_mode);
+ open_flags = FMODE_READ;
+ if (open_msg->access_mode != RNBD_ACCESS_RO)
+ open_flags |= FMODE_WRITE;
+
+ mutex_lock(&srv_sess->lock);
+
+ srv_sess_dev = find_srv_sess_dev(srv_sess, open_msg->dev_name);
+ if (srv_sess_dev)
+ goto fill_response;
+
+ if ((strlen(dev_search_path) + strlen(open_msg->dev_name))
+ >= PATH_MAX) {
+ pr_err("Opening device for session %s failed, device path too long. '%s/%s' is longer than PATH_MAX (%d)\n",
+ srv_sess->sessname, dev_search_path, open_msg->dev_name,
+ PATH_MAX);
+ ret = -EINVAL;
+ goto reject;
+ }
+ if (strstr(open_msg->dev_name, "..")) {
+ pr_err("Opening device for session %s failed, device path %s contains relative path ..\n",
+ srv_sess->sessname, open_msg->dev_name);
+ ret = -EINVAL;
+ goto reject;
+ }
+ full_path = rnbd_srv_get_full_path(srv_sess, open_msg->dev_name);
+ if (IS_ERR(full_path)) {
+ ret = PTR_ERR(full_path);
+ pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %d\n",
+ open_msg->dev_name, srv_sess->sessname, ret);
+ goto reject;
+ }
+
+ rnbd_dev = rnbd_dev_open(full_path, open_flags,
+ &srv_sess->sess_bio_set);
+ if (IS_ERR(rnbd_dev)) {
+ pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
+ full_path, srv_sess->sessname, PTR_ERR(rnbd_dev));
+ ret = PTR_ERR(rnbd_dev);
+ goto free_path;
+ }
+
+ srv_dev = rnbd_srv_get_or_create_srv_dev(rnbd_dev, srv_sess,
+ open_msg->access_mode);
+ if (IS_ERR(srv_dev)) {
+ pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
+ full_path, srv_sess->sessname, PTR_ERR(srv_dev));
+ ret = PTR_ERR(srv_dev);
+ goto rnbd_dev_close;
+ }
+
+ srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
+ rnbd_dev, open_flags,
+ srv_dev);
+ if (IS_ERR(srv_sess_dev)) {
+ pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n",
+ full_path, srv_sess->sessname, PTR_ERR(srv_sess_dev));
+ ret = PTR_ERR(srv_sess_dev);
+ goto srv_dev_put;
+ }
+
+ /* Create the srv_dev sysfs files if they haven't been created yet. The
+ * reason to delay the creation is not to create the sysfs files before
+ * we are sure the device can be opened.
+ */
+ mutex_lock(&srv_dev->lock);
+ if (!srv_dev->dev_kobj.state_in_sysfs) {
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev,
+ rnbd_dev->name);
+ if (ret) {
+ mutex_unlock(&srv_dev->lock);
+ rnbd_srv_err(srv_sess_dev,
+ "Opening device failed, failed to create device sysfs files, err: %d\n",
+ ret);
+ goto free_srv_sess_dev;
+ }
+ }
+
+ ret = rnbd_srv_create_dev_session_sysfs(srv_sess_dev);
+ if (ret) {
+ mutex_unlock(&srv_dev->lock);
+ rnbd_srv_err(srv_sess_dev,
+ "Opening device failed, failed to create dev client sysfs files, err: %d\n",
+ ret);
+ goto free_srv_sess_dev;
+ }
+
+ list_add(&srv_sess_dev->dev_list, &srv_dev->sess_dev_list);
+ mutex_unlock(&srv_dev->lock);
+
+ list_add(&srv_sess_dev->sess_list, &srv_sess->sess_dev_list);
+
+ rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->id);
+
+ kfree(full_path);
+
+fill_response:
+ rnbd_srv_fill_msg_open_rsp(rsp, srv_sess_dev);
+ mutex_unlock(&srv_sess->lock);
+ return 0;
+
+free_srv_sess_dev:
+ xa_erase(&srv_sess->index_idr, srv_sess_dev->device_id);
+ synchronize_rcu();
+ kfree(srv_sess_dev);
+srv_dev_put:
+ if (open_msg->access_mode != RNBD_ACCESS_RO) {
+ mutex_lock(&srv_dev->lock);
+ srv_dev->open_write_cnt--;
+ mutex_unlock(&srv_dev->lock);
+ }
+ rnbd_put_srv_dev(srv_dev);
+rnbd_dev_close:
+ rnbd_dev_close(rnbd_dev);
+free_path:
+ kfree(full_path);
+reject:
+ mutex_unlock(&srv_sess->lock);
+ return ret;
+}
+
+static struct rtrs_srv_ctx *rtrs_ctx;
+
+static struct rtrs_srv_ops rtrs_ops;
+static int __init rnbd_srv_init_module(void)
+{
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
+ rtrs_ops = (struct rtrs_srv_ops) {
+ .rdma_ev = rnbd_srv_rdma_ev,
+ .link_ev = rnbd_srv_link_ev,
+ };
+ rtrs_ctx = rtrs_srv_open(&rtrs_ops, port_nr);
+ if (IS_ERR(rtrs_ctx)) {
+ err = PTR_ERR(rtrs_ctx);
+ pr_err("rtrs_srv_open(), err: %d\n", err);
+ return err;
+ }
+
+ err = rnbd_srv_create_sysfs_files();
+ if (err) {
+ pr_err("rnbd_srv_create_sysfs_files(), err: %d\n", err);
+ rtrs_srv_close(rtrs_ctx);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit rnbd_srv_cleanup_module(void)
+{
+ rtrs_srv_close(rtrs_ctx);
+ WARN_ON(!list_empty(&sess_list));
+ rnbd_srv_destroy_sysfs_files();
+}
+
+module_init(rnbd_srv_init_module);
+module_exit(rnbd_srv_cleanup_module);
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
new file mode 100644
index 000000000000..5a8544b5e74f
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_SRV_H
+#define RNBD_SRV_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+
+#include <rtrs.h>
+#include "rnbd-proto.h"
+#include "rnbd-log.h"
+
+struct rnbd_srv_session {
+ /* Entry inside global sess_list */
+ struct list_head list;
+ struct rtrs_srv *rtrs;
+ char sessname[NAME_MAX];
+ int queue_depth;
+ struct bio_set sess_bio_set;
+
+ struct xarray index_idr;
+ /* List of struct rnbd_srv_sess_dev */
+ struct list_head sess_dev_list;
+ struct mutex lock;
+ u8 ver;
+};
+
+struct rnbd_srv_dev {
+ /* Entry inside global dev_list */
+ struct list_head list;
+ struct kobject dev_kobj;
+ struct kobject *dev_sessions_kobj;
+ struct kref kref;
+ char id[NAME_MAX];
+ /* List of rnbd_srv_sess_dev structs */
+ struct list_head sess_dev_list;
+ struct mutex lock;
+ int open_write_cnt;
+};
+
+/* Structure which binds N devices and N sessions */
+struct rnbd_srv_sess_dev {
+ /* Entry inside rnbd_srv_dev struct */
+ struct list_head dev_list;
+ /* Entry inside rnbd_srv_session struct */
+ struct list_head sess_list;
+ struct rnbd_dev *rnbd_dev;
+ struct rnbd_srv_session *sess;
+ struct rnbd_srv_dev *dev;
+ struct kobject kobj;
+ u32 device_id;
+ fmode_t open_flags;
+ struct kref kref;
+ struct completion *destroy_comp;
+ char pathname[NAME_MAX];
+ enum rnbd_access_mode access_mode;
+};
+
+/* rnbd-srv-sysfs.c */
+
+int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
+ struct block_device *bdev,
+ const char *dir_name);
+void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev);
+int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
+void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
+int rnbd_srv_create_sysfs_files(void);
+void rnbd_srv_destroy_sysfs_files(void);
+void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev);
+
+#endif /* RNBD_SRV_H */
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 8ffa8260dcaf..3ba07ab30c84 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -96,20 +96,6 @@ static const struct block_device_operations rsxx_fops = {
.ioctl = rsxx_blkdev_ioctl,
};
-static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
-{
- generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio),
- &card->gendisk->part0);
-}
-
-static void disk_stats_complete(struct rsxx_cardinfo *card,
- struct bio *bio,
- unsigned long start_time)
-{
- generic_end_io_acct(card->queue, bio_op(bio),
- &card->gendisk->part0, start_time);
-}
-
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
void *cb_data,
unsigned int error)
@@ -121,7 +107,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
if (atomic_dec_and_test(&meta->pending_dmas)) {
if (!card->eeh_state && card->gendisk)
- disk_stats_complete(card, meta->bio, meta->start_time);
+ bio_end_io_acct(meta->bio, meta->start_time);
if (atomic_read(&meta->error))
bio_io_error(meta->bio);
@@ -167,10 +153,9 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
bio_meta->bio = bio;
atomic_set(&bio_meta->error, 0);
atomic_set(&bio_meta->pending_dmas, 0);
- bio_meta->start_time = jiffies;
if (!unlikely(card->halt))
- disk_stats_start(card, bio);
+ bio_meta->start_time = bio_start_io_acct(bio);
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 4c297f69171d..dd34504382e5 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -327,7 +327,7 @@ static inline void swim_motor(struct swim __iomem *base,
swim_select(base, RELAX);
if (swim_readbit(base, MOTOR_ON))
break;
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
} else if (action == OFF) {
@@ -346,7 +346,7 @@ static inline void swim_eject(struct swim __iomem *base)
swim_select(base, RELAX);
if (!swim_readbit(base, DISK_IN))
break;
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
swim_select(base, RELAX);
@@ -370,7 +370,7 @@ static inline int swim_step(struct swim __iomem *base)
for (wait = 0; wait < HZ; wait++) {
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
swim_select(base, RELAX);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 1a8564a79d8d..33e3b76c4fa9 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -29,7 +29,6 @@ static const char * const backends[] = {
#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
"zstd",
#endif
- NULL
};
static void zcomp_strm_free(struct zcomp_strm *zstrm)
@@ -37,19 +36,16 @@ static void zcomp_strm_free(struct zcomp_strm *zstrm)
if (!IS_ERR_OR_NULL(zstrm->tfm))
crypto_free_comp(zstrm->tfm);
free_pages((unsigned long)zstrm->buffer, 1);
- kfree(zstrm);
+ zstrm->tfm = NULL;
+ zstrm->buffer = NULL;
}
/*
- * allocate new zcomp_strm structure with ->tfm initialized by
- * backend, return NULL on error
+ * Initialize zcomp_strm structure with ->tfm initialized by backend, and
+ * ->buffer. Return a negative value on error.
*/
-static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
+static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
{
- struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
- if (!zstrm)
- return NULL;
-
zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
@@ -58,16 +54,16 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
zcomp_strm_free(zstrm);
- zstrm = NULL;
+ return -ENOMEM;
}
- return zstrm;
+ return 0;
}
bool zcomp_available_algorithm(const char *comp)
{
int i;
- i = __sysfs_match_string(backends, -1, comp);
+ i = sysfs_match_string(backends, comp);
if (i >= 0)
return true;
@@ -86,9 +82,9 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
{
bool known_algorithm = false;
ssize_t sz = 0;
- int i = 0;
+ int i;
- for (; backends[i]; i++) {
+ for (i = 0; i < ARRAY_SIZE(backends); i++) {
if (!strcmp(comp, backends[i])) {
known_algorithm = true;
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
@@ -113,12 +109,13 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
- return *get_cpu_ptr(comp->stream);
+ local_lock(&comp->stream->lock);
+ return this_cpu_ptr(comp->stream);
}
void zcomp_stream_put(struct zcomp *comp)
{
- put_cpu_ptr(comp->stream);
+ local_unlock(&comp->stream->lock);
}
int zcomp_compress(struct zcomp_strm *zstrm,
@@ -159,17 +156,15 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
+ int ret;
- if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
- return 0;
+ zstrm = per_cpu_ptr(comp->stream, cpu);
+ local_lock_init(&zstrm->lock);
- zstrm = zcomp_strm_alloc(comp);
- if (IS_ERR_OR_NULL(zstrm)) {
+ ret = zcomp_strm_init(zstrm, comp);
+ if (ret)
pr_err("Can't allocate a compression stream\n");
- return -ENOMEM;
- }
- *per_cpu_ptr(comp->stream, cpu) = zstrm;
- return 0;
+ return ret;
}
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
@@ -177,10 +172,8 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
- zstrm = *per_cpu_ptr(comp->stream, cpu);
- if (!IS_ERR_OR_NULL(zstrm))
- zcomp_strm_free(zstrm);
- *per_cpu_ptr(comp->stream, cpu) = NULL;
+ zstrm = per_cpu_ptr(comp->stream, cpu);
+ zcomp_strm_free(zstrm);
return 0;
}
@@ -188,7 +181,7 @@ static int zcomp_init(struct zcomp *comp)
{
int ret;
- comp->stream = alloc_percpu(struct zcomp_strm *);
+ comp->stream = alloc_percpu(struct zcomp_strm);
if (!comp->stream)
return -ENOMEM;
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 1806475b919d..40f6420f4b2e 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -5,8 +5,11 @@
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
+#include <linux/local_lock.h>
struct zcomp_strm {
+ /* The members ->buffer and ->tfm are protected by ->lock. */
+ local_lock_t lock;
/* compression/decompression buffer */
void *buffer;
struct crypto_comp *tfm;
@@ -14,7 +17,7 @@ struct zcomp_strm {
/* dynamic per-device compression frontend */
struct zcomp {
- struct zcomp_strm * __percpu *stream;
+ struct zcomp_strm __percpu *stream;
const char *name;
struct hlist_node node;
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index ebb234f36909..6e2ad90b17a3 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1510,13 +1510,8 @@ static void zram_bio_discard(struct zram *zram, u32 index,
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, unsigned int op, struct bio *bio)
{
- unsigned long start_time = jiffies;
- struct request_queue *q = zram->disk->queue;
int ret;
- generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
- &zram->disk->part0);
-
if (!op_is_write(op)) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
@@ -1526,8 +1521,6 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset, bio);
}
- generic_end_io_acct(q, op, &zram->disk->part0, start_time);
-
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
@@ -1548,6 +1541,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
u32 index;
struct bio_vec bvec;
struct bvec_iter iter;
+ unsigned long start_time;
index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
offset = (bio->bi_iter.bi_sector &
@@ -1563,6 +1557,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
break;
}
+ start_time = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
struct bio_vec bv = bvec;
unsigned int unwritten = bvec.bv_len;
@@ -1571,8 +1566,10 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
unwritten);
if (zram_bvec_rw(zram, &bv, index, offset,
- bio_op(bio), bio) < 0)
- goto out;
+ bio_op(bio), bio) < 0) {
+ bio->bi_status = BLK_STS_IOERR;
+ break;
+ }
bv.bv_offset += bv.bv_len;
unwritten -= bv.bv_len;
@@ -1580,12 +1577,8 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
update_position(&index, &offset, &bv);
} while (unwritten);
}
-
+ bio_end_io_acct(bio, start_time);
bio_endio(bio);
- return;
-
-out:
- bio_io_error(bio);
}
/*
@@ -1633,6 +1626,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
u32 index;
struct zram *zram;
struct bio_vec bv;
+ unsigned long start_time;
if (PageTransHuge(page))
return -ENOTSUPP;
@@ -1651,7 +1645,9 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
+ start_time = disk_start_io_acct(bdev->bd_disk, SECTORS_PER_PAGE, op);
ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
+ disk_end_io_acct(bdev->bd_disk, op, start_time);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1f498f358f60..1b9743b7f2ef 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -27,6 +27,11 @@
#define BDADDR_BCM4345C5 (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0xc5, 0x45, 0x43}})
#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
+#define BCM_FW_NAME_LEN 64
+#define BCM_FW_NAME_COUNT_MAX 2
+/* For kmalloc-ing the fw-name array instead of putting it on the stack */
+typedef char bcm_fw_name[BCM_FW_NAME_LEN];
+
int btbcm_check_bdaddr(struct hci_dev *hdev)
{
struct hci_rp_read_bd_addr *bda;
@@ -358,6 +363,13 @@ static int btbcm_read_info(struct hci_dev *hdev)
bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
kfree_skb(skb);
+ return 0;
+}
+
+static int btbcm_print_local_name(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (IS_ERR(skb))
@@ -380,6 +392,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4204, "BCM2076B1" }, /* 002.002.004 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
+ { 0x4606, "BCM4324B5" }, /* 002.006.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
{ 0x2122, "BCM4343A0" }, /* 001.001.034 */
@@ -395,27 +408,32 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
};
static const struct bcm_subver_table bcm_usb_subver_table[] = {
+ { 0x2105, "BCM20703A1" }, /* 001.001.005 */
{ 0x210b, "BCM43142A0" }, /* 001.001.011 */
{ 0x2112, "BCM4314A0" }, /* 001.001.018 */
{ 0x2118, "BCM20702A0" }, /* 001.001.024 */
{ 0x2126, "BCM4335A0" }, /* 001.001.038 */
{ 0x220e, "BCM20702A1" }, /* 001.002.014 */
- { 0x230f, "BCM4354A2" }, /* 001.003.015 */
+ { 0x230f, "BCM4356A2" }, /* 001.003.015 */
{ 0x4106, "BCM4335B0" }, /* 002.001.006 */
{ 0x410e, "BCM20702B0" }, /* 002.001.014 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
+ { 0x6607, "BCM4350C5" }, /* 003.006.007 */
{ }
};
-int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
- bool reinit)
+int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
{
u16 subver, rev, pid, vid;
- const char *hw_name = "BCM";
struct sk_buff *skb;
struct hci_rp_read_local_version *ver;
const struct bcm_subver_table *bcm_subver_table;
+ const char *hw_name = NULL;
+ char postfix[16] = "";
+ int fw_name_count = 0;
+ bcm_fw_name *fw_name;
+ const struct firmware *fw;
int i, err;
/* Reset */
@@ -434,15 +452,14 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
kfree_skb(skb);
/* Read controller information */
- if (!reinit) {
+ if (!(*fw_load_done)) {
err = btbcm_read_info(hdev);
if (err)
return err;
}
-
- /* Upper nibble of rev should be between 0 and 3? */
- if (((rev & 0xf000) >> 12) > 3)
- return 0;
+ err = btbcm_print_local_name(hdev);
+ if (err)
+ return err;
bcm_subver_table = (hdev->bus == HCI_USB) ? bcm_usb_subver_table :
bcm_uart_subver_table;
@@ -454,6 +471,13 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
}
}
+ bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
+ hw_name ? hw_name : "BCM", (subver & 0xe000) >> 13,
+ (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
+ if (*fw_load_done)
+ return 0;
+
if (hdev->bus == HCI_USB) {
/* Read USB Product Info */
skb = btbcm_read_usb_product(hdev);
@@ -464,85 +488,81 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
pid = get_unaligned_le16(skb->data + 3);
kfree_skb(skb);
- snprintf(fw_name, len, "brcm/%s-%4.4x-%4.4x.hcd",
- hw_name, vid, pid);
- } else {
- snprintf(fw_name, len, "brcm/%s.hcd", hw_name);
+ snprintf(postfix, sizeof(postfix), "-%4.4x-%4.4x", vid, pid);
}
- bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
- hw_name, (subver & 0xe000) >> 13,
- (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+ fw_name = kmalloc(BCM_FW_NAME_COUNT_MAX * BCM_FW_NAME_LEN, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ if (hw_name) {
+ snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN,
+ "brcm/%s%s.hcd", hw_name, postfix);
+ fw_name_count++;
+ }
+
+ snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN,
+ "brcm/BCM%s.hcd", postfix);
+ fw_name_count++;
+
+ for (i = 0; i < fw_name_count; i++) {
+ err = firmware_request_nowarn(&fw, fw_name[i], &hdev->dev);
+ if (err == 0) {
+ bt_dev_info(hdev, "%s '%s' Patch",
+ hw_name ? hw_name : "BCM", fw_name[i]);
+ *fw_load_done = true;
+ break;
+ }
+ }
+
+ if (*fw_load_done) {
+ err = btbcm_patchram(hdev, fw);
+ if (err)
+ bt_dev_info(hdev, "BCM: Patch failed (%d)", err);
+
+ release_firmware(fw);
+ } else {
+ bt_dev_err(hdev, "BCM: firmware Patch file not found, tried:");
+ for (i = 0; i < fw_name_count; i++)
+ bt_dev_err(hdev, "BCM: '%s'", fw_name[i]);
+ }
+ kfree(fw_name);
return 0;
}
EXPORT_SYMBOL_GPL(btbcm_initialize);
-int btbcm_finalize(struct hci_dev *hdev)
+int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done)
{
- char fw_name[64];
int err;
- /* Re-initialize */
- err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true);
- if (err)
- return err;
+ /* Re-initialize if necessary */
+ if (*fw_load_done) {
+ err = btbcm_initialize(hdev, fw_load_done);
+ if (err)
+ return err;
+ }
btbcm_check_bdaddr(hdev);
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- /* Some devices ship with the controller default address.
- * Allow the bootloader to set a valid address through the
- * device tree.
- */
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
-
return 0;
}
EXPORT_SYMBOL_GPL(btbcm_finalize);
int btbcm_setup_patchram(struct hci_dev *hdev)
{
- char fw_name[64];
- const struct firmware *fw;
- struct sk_buff *skb;
+ bool fw_load_done = false;
int err;
/* Initialize */
- err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), false);
- if (err)
- return err;
-
- err = request_firmware(&fw, fw_name, &hdev->dev);
- if (err < 0) {
- bt_dev_info(hdev, "BCM: Patch %s not found", fw_name);
- goto done;
- }
-
- btbcm_patchram(hdev, fw);
-
- release_firmware(fw);
-
- /* Re-initialize */
- err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true);
+ err = btbcm_initialize(hdev, &fw_load_done);
if (err)
return err;
- /* Read Local Name */
- skb = btbcm_read_local_name(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
- kfree_skb(skb);
-
-done:
- btbcm_check_bdaddr(hdev);
-
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
-
- return 0;
+ /* Re-initialize after loading Patch */
+ return btbcm_finalize(hdev, &fw_load_done);
}
EXPORT_SYMBOL_GPL(btbcm_setup_patchram);
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index 014ef847a486..8bf01565fdfc 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -62,9 +62,8 @@ int btbcm_write_pcm_int_params(struct hci_dev *hdev,
int btbcm_setup_patchram(struct hci_dev *hdev);
int btbcm_setup_apple(struct hci_dev *hdev);
-int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
- bool reinit);
-int btbcm_finalize(struct hci_dev *hdev);
+int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done);
+int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done);
#else
@@ -105,13 +104,12 @@ static inline int btbcm_setup_apple(struct hci_dev *hdev)
return 0;
}
-static inline int btbcm_initialize(struct hci_dev *hdev, char *fw_name,
- size_t len, bool reinit)
+static inline int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
{
return 0;
}
-static inline int btbcm_finalize(struct hci_dev *hdev)
+static inline int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done)
{
return 0;
}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0f3a020703ab..a296f8526433 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -355,31 +355,31 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8688 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8688_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8688 },
/* Marvell SD8787 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
/* Marvell SD8787 Bluetooth AMP device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787_BT_AMP),
.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
/* Marvell SD8797 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8797 },
/* Marvell SD8887 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9136),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8887 },
/* Marvell SD8897 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8897 },
/* Marvell SD8977 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9146),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8977 },
/* Marvell SD8987 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x914A),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8987 },
/* Marvell SD8997 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8997 },
{ } /* Terminating entry */
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 519788c442ca..bff095be2f97 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -51,9 +51,9 @@ static const struct btmtksdio_data mt7668_data = {
};
static const struct sdio_device_id btmtksdio_table[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7663),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7663),
.driver_data = (kernel_ulong_t)&mt7663_data },
- {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7668),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668),
.driver_data = (kernel_ulong_t)&mt7668_data },
{ } /* Terminating entry */
};
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index e11169ad8247..6c40bc75fb5b 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -695,8 +695,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
/* Send a dummy byte 0xff to activate the new baudrate */
param = 0xff;
- err = serdev_device_write(bdev->serdev, &param, sizeof(param),
- MAX_SCHEDULE_TIMEOUT);
+ err = serdev_device_write_buf(bdev->serdev, &param, sizeof(param));
if (err < 0 || err < sizeof(param))
return err;
@@ -1015,7 +1014,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
if (btmtkuart_is_standalone(bdev)) {
err = clk_prepare_enable(bdev->osc);
if (err < 0)
- return err;
+ goto err_hci_free_dev;
if (bdev->boot) {
gpiod_set_value_cansleep(bdev->boot, 1);
@@ -1028,10 +1027,8 @@ static int btmtkuart_probe(struct serdev_device *serdev)
/* Power on */
err = regulator_enable(bdev->vcc);
- if (err < 0) {
- clk_disable_unprepare(bdev->osc);
- return err;
- }
+ if (err < 0)
+ goto err_clk_disable_unprepare;
/* Reset if the reset-gpios is available otherwise the board
* -level design should be guaranteed.
@@ -1063,7 +1060,6 @@ static int btmtkuart_probe(struct serdev_device *serdev)
err = hci_register_dev(hdev);
if (err < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
- hci_free_dev(hdev);
goto err_regulator_disable;
}
@@ -1072,6 +1068,11 @@ static int btmtkuart_probe(struct serdev_device *serdev)
err_regulator_disable:
if (btmtkuart_is_standalone(bdev))
regulator_disable(bdev->vcc);
+err_clk_disable_unprepare:
+ if (btmtkuart_is_standalone(bdev))
+ clk_disable_unprepare(bdev->osc);
+err_hci_free_dev:
+ hci_free_dev(hdev);
return err;
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index a16845c0751d..c5984966f315 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -32,7 +32,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
* VSE event. WCN3991 sends version command response as a payload to
* command complete event.
*/
- if (soc_type == QCA_WCN3991) {
+ if (soc_type >= QCA_WCN3991) {
event_type = 0;
rlen += 1;
rtype = EDL_PATCH_VER_REQ_CMD;
@@ -69,22 +69,26 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
goto out;
}
- if (soc_type == QCA_WCN3991)
+ if (soc_type >= QCA_WCN3991)
memmove(&edl->data, &edl->data[1], sizeof(*ver));
ver = (struct qca_btsoc_version *)(edl->data);
- BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
- BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
- BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rom_ver));
- BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
+ bt_dev_info(hdev, "QCA Product ID :0x%08x",
+ le32_to_cpu(ver->product_id));
+ bt_dev_info(hdev, "QCA SOC Version :0x%08x",
+ le32_to_cpu(ver->soc_id));
+ bt_dev_info(hdev, "QCA ROM Version :0x%08x",
+ le16_to_cpu(ver->rom_ver));
+ bt_dev_info(hdev, "QCA Patch Version:0x%08x",
+ le16_to_cpu(ver->patch_ver));
/* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used.
*/
*soc_version = (le32_to_cpu(ver->soc_id) << 16) |
- (le16_to_cpu(ver->rom_ver) & 0x0000ffff);
+ (le16_to_cpu(ver->rom_ver) & 0x0000ffff);
if (*soc_version == 0)
err = -EILSEQ;
@@ -217,7 +221,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
tlv_nvm->data[0] |= 0x80;
/* UART Baud Rate */
- if (soc_type == QCA_WCN3991)
+ if (soc_type >= QCA_WCN3991)
tlv_nvm->data[1] = nvm_baud_rate;
else
tlv_nvm->data[2] = nvm_baud_rate;
@@ -268,7 +272,7 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
* VSE event. WCN3991 sends version command response as a payload to
* command complete event.
*/
- if (soc_type == QCA_WCN3991) {
+ if (soc_type >= QCA_WCN3991) {
event_type = 0;
rlen = sizeof(*edl);
rtype = EDL_PATCH_TLV_REQ_CMD;
@@ -301,7 +305,7 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
err = -EIO;
}
- if (soc_type == QCA_WCN3991)
+ if (soc_type >= QCA_WCN3991)
goto out;
tlv_resp = (struct tlv_seg_resp *)(edl->data);
@@ -442,6 +446,11 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
(soc_ver & 0x0000000f);
snprintf(config.fwname, sizeof(config.fwname),
"qca/crbtfw%02x.tlv", rom_ver);
+ } else if (soc_type == QCA_QCA6390) {
+ rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
+ (soc_ver & 0x0000000f);
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htbtfw%02x.tlv", rom_ver);
} else {
snprintf(config.fwname, sizeof(config.fwname),
"qca/rampatch_%08x.bin", soc_ver);
@@ -464,6 +473,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
else if (qca_is_wcn399x(soc_type))
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02x.bin", rom_ver);
+ else if (soc_type == QCA_QCA6390)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htnv%02x.bin", rom_ver);
else
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index e16a4d650597..6e1e62dd4b95 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -125,8 +125,9 @@ enum qca_btsoc_type {
QCA_AR3002,
QCA_ROME,
QCA_WCN3990,
- QCA_WCN3991,
QCA_WCN3998,
+ QCA_WCN3991,
+ QCA_QCA6390,
};
#if IS_ENABLED(CONFIG_BT_QCA)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 67f4bc21e7c5..3a9afc905f24 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -130,12 +130,19 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8821c_config" },
/* 8761A */
- { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0,
+ { IC_INFO(RTL_ROM_LMP_8761A, 0xa),
.config_needed = false,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8761a_fw.bin",
.cfg_name = "rtl_bt/rtl8761a_config" },
+ /* 8761B */
+ { IC_INFO(RTL_ROM_LMP_8761A, 0xb),
+ .config_needed = false,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8761b_fw.bin",
+ .cfg_name = "rtl_bt/rtl8761b_config" },
+
/* 8822C with UART interface */
{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
IC_MATCH_FL_HCIBUS,
@@ -267,6 +274,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8723B, 9 }, /* 8723D */
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */
{ RTL_ROM_LMP_8822B, 13 }, /* 8822C */
+ { RTL_ROM_LMP_8761A, 14 }, /* 8761B */
};
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3bdec42c9612..5f022e9cf667 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -58,6 +58,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_CW6622 0x100000
#define BTUSB_MEDIATEK 0x200000
#define BTUSB_WIDEBAND_SPEECH 0x400000
+#define BTUSB_VALID_LE_STATES 0x800000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -335,11 +336,14 @@ static const struct usb_device_id blacklist_table[] = {
/* Intel Bluetooth devices */
{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW |
- BTUSB_WIDEBAND_SPEECH },
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH},
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@ -348,7 +352,8 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW |
- BTUSB_WIDEBAND_SPEECH },
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
@@ -492,6 +497,8 @@ struct btusb_data {
__u8 cmdreq;
unsigned int sco_num;
+ unsigned int air_mode;
+ bool usb_alt6_packet_flow;
int isoc_altsetting;
int suspend_count;
@@ -983,6 +990,42 @@ static void btusb_isoc_complete(struct urb *urb)
}
}
+static inline void __fill_isoc_descriptor_msbc(struct urb *urb, int len,
+ int mtu, struct btusb_data *data)
+{
+ int i, offset = 0;
+ unsigned int interval;
+
+ BT_DBG("len %d mtu %d", len, mtu);
+
+ /* For mSBC ALT 6 setting the host will send the packet at continuous
+ * flow. As per core spec 5, vol 4, part B, table 2.1. For ALT setting
+ * 6 the HCI PACKET INTERVAL should be 7.5ms for every usb packets.
+ * To maintain the rate we send 63bytes of usb packets alternatively for
+ * 7ms and 8ms to maintain the rate as 7.5ms.
+ */
+ if (data->usb_alt6_packet_flow) {
+ interval = 7;
+ data->usb_alt6_packet_flow = false;
+ } else {
+ interval = 6;
+ data->usb_alt6_packet_flow = true;
+ }
+
+ for (i = 0; i < interval; i++) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = offset;
+ }
+
+ if (len && i < BTUSB_MAX_ISOC_FRAMES) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = len;
+ i++;
+ }
+
+ urb->number_of_packets = i;
+}
+
static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
{
int i, offset = 0;
@@ -1386,9 +1429,13 @@ static struct urb *alloc_isoc_urb(struct hci_dev *hdev, struct sk_buff *skb)
urb->transfer_flags = URB_ISO_ASAP;
- __fill_isoc_descriptor(urb, skb->len,
- le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
-
+ if (data->isoc_altsetting == 6)
+ __fill_isoc_descriptor_msbc(urb, skb->len,
+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize),
+ data);
+ else
+ __fill_isoc_descriptor(urb, skb->len,
+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
skb->dev = (void *)hdev;
return urb;
@@ -1484,6 +1531,7 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
if (hci_conn_num(hdev, SCO_LINK) != data->sco_num) {
data->sco_num = hci_conn_num(hdev, SCO_LINK);
+ data->air_mode = evt;
schedule_work(&data->work);
}
}
@@ -1531,11 +1579,70 @@ static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
return 0;
}
+static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ int err;
+
+ if (data->isoc_altsetting != new_alts) {
+ unsigned long flags;
+
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
+
+ /* When isochronous alternate setting needs to be
+ * changed, because SCO connection has been added
+ * or removed, a packet fragment may be left in the
+ * reassembling state. This could lead to wrongly
+ * assembled fragments.
+ *
+ * Clear outstanding fragment when selecting a new
+ * alternate setting.
+ */
+ spin_lock_irqsave(&data->rxlock, flags);
+ kfree_skb(data->sco_skb);
+ data->sco_skb = NULL;
+ spin_unlock_irqrestore(&data->rxlock, flags);
+
+ err = __set_isoc_interface(hdev, new_alts);
+ if (err < 0)
+ return err;
+ }
+
+ if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
+ if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0)
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ else
+ btusb_submit_isoc_urb(hdev, GFP_KERNEL);
+ }
+
+ return 0;
+}
+
+static struct usb_host_interface *btusb_find_altsetting(struct btusb_data *data,
+ int alt)
+{
+ struct usb_interface *intf = data->isoc;
+ int i;
+
+ BT_DBG("Looking for Alt no :%d", alt);
+
+ if (!intf)
+ return NULL;
+
+ for (i = 0; i < intf->num_altsetting; i++) {
+ if (intf->altsetting[i].desc.bAlternateSetting == alt)
+ return &intf->altsetting[i];
+ }
+
+ return NULL;
+}
+
static void btusb_work(struct work_struct *work)
{
struct btusb_data *data = container_of(work, struct btusb_data, work);
struct hci_dev *hdev = data->hdev;
- int new_alts;
+ int new_alts = 0;
int err;
if (data->sco_num > 0) {
@@ -1550,44 +1657,27 @@ static void btusb_work(struct work_struct *work)
set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
}
- if (hdev->voice_setting & 0x0020) {
- static const int alts[3] = { 2, 4, 5 };
-
- new_alts = alts[data->sco_num - 1];
- } else {
- new_alts = data->sco_num;
- }
-
- if (data->isoc_altsetting != new_alts) {
- unsigned long flags;
+ if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_CVSD) {
+ if (hdev->voice_setting & 0x0020) {
+ static const int alts[3] = { 2, 4, 5 };
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->isoc_anchor);
-
- /* When isochronous alternate setting needs to be
- * changed, because SCO connection has been added
- * or removed, a packet fragment may be left in the
- * reassembling state. This could lead to wrongly
- * assembled fragments.
- *
- * Clear outstanding fragment when selecting a new
- * alternate setting.
- */
- spin_lock_irqsave(&data->rxlock, flags);
- kfree_skb(data->sco_skb);
- data->sco_skb = NULL;
- spin_unlock_irqrestore(&data->rxlock, flags);
+ new_alts = alts[data->sco_num - 1];
+ } else {
+ new_alts = data->sco_num;
+ }
+ } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
- if (__set_isoc_interface(hdev, new_alts) < 0)
- return;
- }
+ data->usb_alt6_packet_flow = true;
- if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
- if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0)
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ /* Check if Alt 6 is supported for Transparent audio */
+ if (btusb_find_altsetting(data, 6))
+ new_alts = 6;
else
- btusb_submit_isoc_urb(hdev, GFP_KERNEL);
+ bt_dev_err(hdev, "Device does not support ALT setting 6");
}
+
+ if (btusb_switch_alt_setting(hdev, new_alts) < 0)
+ bt_dev_err(hdev, "set USB alt:(%d) failed!", new_alts);
} else {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -2252,7 +2342,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (ver.fw_variant == 0x23) {
clear_bit(BTUSB_BOOTLOADER, &data->flags);
btintel_check_bdaddr(hdev);
- return 0;
+ goto finish;
}
/* If the device is not in bootloader mode, then the only possible
@@ -2452,6 +2542,23 @@ done:
*/
btintel_load_ddc_config(hdev, fwname);
+ /* Read the Intel version information after loading the FW */
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+
+ btintel_version_info(hdev, &ver);
+
+finish:
+ /* All Intel controllers that support the Microsoft vendor
+ * extension are using 0xFC1E for VsMsftOpCode.
+ */
+ switch (ver.hw_variant) {
+ case 0x12: /* ThP */
+ hci_set_msft_opcode(hdev, 0xFC1E);
+ break;
+ }
+
/* Set the event mask for Intel specific vendor events. This enables
* a few extra events that are useful during general operation. It
* does not enable any debugging related events.
@@ -2461,13 +2568,6 @@ done:
*/
btintel_set_event_mask(hdev, false);
- /* Read the Intel version information after loading the FW */
- err = btintel_read_version(hdev, &ver);
- if (err)
- return err;
-
- btintel_version_info(hdev, &ver);
-
return 0;
}
@@ -3600,6 +3700,13 @@ static void btusb_check_needs_reset_resume(struct usb_interface *intf)
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
+static bool btusb_prevent_wake(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+
+ return !device_may_wakeup(&data->udev->dev);
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3733,6 +3840,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
+ hdev->prevent_wake = btusb_prevent_wake;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -3877,6 +3985,9 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ if (id->driver_info & BTUSB_VALID_LE_STATES)
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index b236cb11c0dc..8ea5ca8d71d6 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -118,6 +118,7 @@ struct bcm_device {
u32 oper_speed;
int irq;
bool irq_active_low;
+ bool irq_acquired;
#ifdef CONFIG_PM
struct hci_uart *hu;
@@ -333,6 +334,8 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
+ bdev->irq_acquired = true;
+
device_init_wakeup(bdev->dev, true);
pm_runtime_set_autosuspend_delay(bdev->dev,
@@ -514,7 +517,7 @@ static int bcm_close(struct hci_uart *hu)
}
if (bdev) {
- if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
+ if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) {
devm_free_irq(bdev->dev, bdev->irq, bdev);
device_init_wakeup(bdev->dev, false);
pm_runtime_disable(bdev->dev);
@@ -550,8 +553,7 @@ static int bcm_flush(struct hci_uart *hu)
static int bcm_setup(struct hci_uart *hu)
{
struct bcm_data *bcm = hu->priv;
- char fw_name[64];
- const struct firmware *fw;
+ bool fw_load_done = false;
unsigned int speed;
int err;
@@ -560,21 +562,12 @@ static int bcm_setup(struct hci_uart *hu)
hu->hdev->set_diag = bcm_set_diag;
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
- err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name), false);
+ err = btbcm_initialize(hu->hdev, &fw_load_done);
if (err)
return err;
- err = request_firmware(&fw, fw_name, &hu->hdev->dev);
- if (err < 0) {
- bt_dev_info(hu->hdev, "BCM: Patch %s not found", fw_name);
+ if (!fw_load_done)
return 0;
- }
-
- err = btbcm_patchram(hu->hdev, fw);
- if (err) {
- bt_dev_info(hu->hdev, "BCM: Patch failed (%d)", err);
- goto finalize;
- }
/* Init speed if any */
if (hu->init_speed)
@@ -613,13 +606,16 @@ static int bcm_setup(struct hci_uart *hu)
btbcm_write_pcm_int_params(hu->hdev, &params);
}
-finalize:
- release_firmware(fw);
-
- err = btbcm_finalize(hu->hdev);
+ err = btbcm_finalize(hu->hdev, &fw_load_done);
if (err)
return err;
+ /* Some devices ship with the controller default address.
+ * Allow the bootloader to set a valid address through the
+ * device tree.
+ */
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks);
+
if (!bcm_request_irq(bcm))
err = bcm_setup_sleep(hu);
@@ -1153,7 +1149,8 @@ static int bcm_of_probe(struct bcm_device *bdev)
device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params",
bdev->pcm_int_params, 5);
bdev->irq = of_irq_get_byname(bdev->dev->of_node, "host-wakeup");
-
+ bdev->irq_active_low = irq_get_trigger_type(bdev->irq)
+ & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW);
return 0;
}
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 106c110efe56..e60b2e0773db 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -1018,6 +1018,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = {
#ifdef CONFIG_BT_HCIUART_RTL
{ .compatible = "realtek,rtl8822cs-bt",
.data = (const void *)&rtl_vnd },
+ { .compatible = "realtek,rtl8723bs-bt",
+ .data = (const void *)&rtl_vnd },
#endif
{ },
};
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 439392b1c043..81c3c38baba1 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -26,6 +26,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
@@ -74,6 +75,9 @@ enum qca_flags {
QCA_HW_ERROR_EVENT
};
+enum qca_capabilities {
+ QCA_CAP_WIDEBAND_SPEECH = BIT(0),
+};
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
@@ -110,6 +114,7 @@ struct qca_memdump_data {
char *memdump_buf_tail;
u32 current_seq_no;
u32 received_dump;
+ u32 ram_dump_size;
};
struct qca_memdump_event_hdr {
@@ -186,10 +191,11 @@ struct qca_vreg {
unsigned int load_uA;
};
-struct qca_vreg_data {
+struct qca_device_data {
enum qca_btsoc_type soc_type;
struct qca_vreg *vregs;
size_t num_vregs;
+ uint32_t capabilities;
};
/*
@@ -596,10 +602,12 @@ static int qca_open(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qca_is_wcn399x(qcadev->btsoc_type)) {
+
+ if (qca_is_wcn399x(qcadev->btsoc_type))
hu->init_speed = qcadev->init_speed;
+
+ if (qcadev->oper_speed)
hu->oper_speed = qcadev->oper_speed;
- }
}
timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
@@ -969,6 +977,8 @@ static void qca_controller_memdump(struct work_struct *work)
char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
u16 seq_no;
u32 dump_size;
+ u32 rx_size;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
@@ -1018,10 +1028,12 @@ static void qca_controller_memdump(struct work_struct *work)
dump_size);
queue_delayed_work(qca->workqueue,
&qca->ctrl_memdump_timeout,
- msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
+ msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)
+ );
skb_pull(skb, sizeof(dump_size));
memdump_buf = vmalloc(dump_size);
+ qca_memdump->ram_dump_size = dump_size;
qca_memdump->memdump_buf_head = memdump_buf;
qca_memdump->memdump_buf_tail = memdump_buf;
}
@@ -1044,26 +1056,57 @@ static void qca_controller_memdump(struct work_struct *work)
* the controller. In such cases let us store the dummy
* packets in the buffer.
*/
+ /* For QCA6390, controller does not lost packets but
+ * sequence number field of packat sometimes has error
+ * bits, so skip this checking for missing packet.
+ */
while ((seq_no > qca_memdump->current_seq_no + 1) &&
- seq_no != QCA_LAST_SEQUENCE_NUM) {
+ (soc_type != QCA_QCA6390) &&
+ seq_no != QCA_LAST_SEQUENCE_NUM) {
bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
qca_memdump->current_seq_no);
+ rx_size = qca_memdump->received_dump;
+ rx_size += QCA_DUMP_PACKET_SIZE;
+ if (rx_size > qca_memdump->ram_dump_size) {
+ bt_dev_err(hu->hdev,
+ "QCA memdump received %d, no space for missed packet",
+ qca_memdump->received_dump);
+ break;
+ }
memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
qca_memdump->current_seq_no++;
}
- memcpy(memdump_buf, (unsigned char *) skb->data, skb->len);
- memdump_buf = memdump_buf + skb->len;
- qca_memdump->memdump_buf_tail = memdump_buf;
- qca_memdump->current_seq_no = seq_no + 1;
- qca_memdump->received_dump += skb->len;
+ rx_size = qca_memdump->received_dump + skb->len;
+ if (rx_size <= qca_memdump->ram_dump_size) {
+ if ((seq_no != QCA_LAST_SEQUENCE_NUM) &&
+ (seq_no != qca_memdump->current_seq_no))
+ bt_dev_err(hu->hdev,
+ "QCA memdump unexpected packet %d",
+ seq_no);
+ bt_dev_dbg(hu->hdev,
+ "QCA memdump packet %d with length %d",
+ seq_no, skb->len);
+ memcpy(memdump_buf, (unsigned char *)skb->data,
+ skb->len);
+ memdump_buf = memdump_buf + skb->len;
+ qca_memdump->memdump_buf_tail = memdump_buf;
+ qca_memdump->current_seq_no = seq_no + 1;
+ qca_memdump->received_dump += skb->len;
+ } else {
+ bt_dev_err(hu->hdev,
+ "QCA memdump received %d, no space for packet %d",
+ qca_memdump->received_dump, seq_no);
+ }
qca->qca_memdump = qca_memdump;
kfree_skb(skb);
if (seq_no == QCA_LAST_SEQUENCE_NUM) {
- bt_dev_info(hu->hdev, "QCA writing crash dump of size %d bytes",
- qca_memdump->received_dump);
+ bt_dev_info(hu->hdev,
+ "QCA memdump Done, received %d, total %d",
+ qca_memdump->received_dump,
+ qca_memdump->ram_dump_size);
memdump_buf = qca_memdump->memdump_buf_head;
dev_coredumpv(&hu->serdev->dev, memdump_buf,
qca_memdump->received_dump, GFP_KERNEL);
@@ -1596,7 +1639,7 @@ static int qca_setup(struct hci_uart *hu)
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
bt_dev_info(hdev, "setting up %s",
- qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME");
+ qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
retry:
ret = qca_power_on(hdev);
@@ -1665,10 +1708,10 @@ retry:
}
/* Setup bdaddr */
- if (qca_is_wcn399x(soc_type))
- hu->hdev->set_bdaddr = qca_set_bdaddr;
- else
+ if (soc_type == QCA_ROME)
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+ else
+ hu->hdev->set_bdaddr = qca_set_bdaddr;
return ret;
}
@@ -1688,7 +1731,7 @@ static const struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
-static const struct qca_vreg_data qca_soc_data_wcn3990 = {
+static const struct qca_device_data qca_soc_data_wcn3990 = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
{ "vddio", 15000 },
@@ -1699,7 +1742,7 @@ static const struct qca_vreg_data qca_soc_data_wcn3990 = {
.num_vregs = 4,
};
-static const struct qca_vreg_data qca_soc_data_wcn3991 = {
+static const struct qca_device_data qca_soc_data_wcn3991 = {
.soc_type = QCA_WCN3991,
.vregs = (struct qca_vreg []) {
{ "vddio", 15000 },
@@ -1708,9 +1751,10 @@ static const struct qca_vreg_data qca_soc_data_wcn3991 = {
{ "vddch0", 450000 },
},
.num_vregs = 4,
+ .capabilities = QCA_CAP_WIDEBAND_SPEECH,
};
-static const struct qca_vreg_data qca_soc_data_wcn3998 = {
+static const struct qca_device_data qca_soc_data_wcn3998 = {
.soc_type = QCA_WCN3998,
.vregs = (struct qca_vreg []) {
{ "vddio", 10000 },
@@ -1721,6 +1765,11 @@ static const struct qca_vreg_data qca_soc_data_wcn3998 = {
.num_vregs = 4,
};
+static const struct qca_device_data qca_soc_data_qca6390 = {
+ .soc_type = QCA_QCA6390,
+ .num_vregs = 0,
+};
+
static void qca_power_shutdown(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
@@ -1764,7 +1813,7 @@ static int qca_power_off(struct hci_dev *hdev)
enum qca_btsoc_type soc_type = qca_soc_type(hu);
/* Stop sending shutdown command if soc crashes. */
- if (qca_is_wcn399x(soc_type)
+ if (soc_type != QCA_ROME
&& qca->memdump_state == QCA_MEMDUMP_IDLE) {
qca_send_pre_shutdown_cmd(hdev);
usleep_range(8000, 10000);
@@ -1852,7 +1901,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
struct hci_dev *hdev;
- const struct qca_vreg_data *data;
+ const struct qca_device_data *data;
int err;
bool power_ctrl_enabled = true;
@@ -1865,6 +1914,11 @@ static int qca_serdev_probe(struct serdev_device *serdev)
serdev_device_set_drvdata(serdev, qcadev);
device_property_read_string(&serdev->dev, "firmware-name",
&qcadev->firmware_name);
+ device_property_read_u32(&serdev->dev, "max-speed",
+ &qcadev->oper_speed);
+ if (!qcadev->oper_speed)
+ BT_DBG("UART will pick default operating speed");
+
if (data && qca_is_wcn399x(data->soc_type)) {
qcadev->btsoc_type = data->soc_type;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
@@ -1889,18 +1943,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return PTR_ERR(qcadev->susclk);
}
- device_property_read_u32(&serdev->dev, "max-speed",
- &qcadev->oper_speed);
- if (!qcadev->oper_speed)
- BT_DBG("UART will pick default operating speed");
-
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("wcn3990 serdev registration failed");
return err;
}
} else {
- qcadev->btsoc_type = QCA_ROME;
+ if (data)
+ qcadev->btsoc_type = data->soc_type;
+ else
+ qcadev->btsoc_type = QCA_ROME;
+
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (!qcadev->bt_en) {
@@ -1930,12 +1983,19 @@ static int qca_serdev_probe(struct serdev_device *serdev)
}
}
+ hdev = qcadev->serdev_hu.hdev;
+
if (power_ctrl_enabled) {
- hdev = qcadev->serdev_hu.hdev;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
hdev->shutdown = qca_power_off;
}
+ /* Wideband speech support must be set per driver since it can't be
+ * queried via hci.
+ */
+ if (data && (data->capabilities & QCA_CAP_WIDEBAND_SPEECH))
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+
return 0;
}
@@ -1951,10 +2011,43 @@ static void qca_serdev_remove(struct serdev_device *serdev)
hci_uart_unregister_device(&qcadev->serdev_hu);
}
+static void qca_serdev_shutdown(struct device *dev)
+{
+ int ret;
+ int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
+ struct serdev_device *serdev = to_serdev_device(dev);
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ const u8 ibs_wake_cmd[] = { 0xFD };
+ const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
+
+ if (qcadev->btsoc_type == QCA_QCA6390) {
+ serdev_device_write_flush(serdev);
+ ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
+ sizeof(ibs_wake_cmd));
+ if (ret < 0) {
+ BT_ERR("QCA send IBS_WAKE_IND error: %d", ret);
+ return;
+ }
+ serdev_device_wait_until_sent(serdev, timeout);
+ usleep_range(8000, 10000);
+
+ serdev_device_write_flush(serdev);
+ ret = serdev_device_write_buf(serdev, edl_reset_soc_cmd,
+ sizeof(edl_reset_soc_cmd));
+ if (ret < 0) {
+ BT_ERR("QCA send EDL_RESET_REQ error: %d", ret);
+ return;
+ }
+ serdev_device_wait_until_sent(serdev, timeout);
+ usleep_range(8000, 10000);
+ }
+}
+
static int __maybe_unused qca_suspend(struct device *dev)
{
- struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
- struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = to_serdev_device(dev);
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ struct hci_uart *hu = &qcadev->serdev_hu;
struct qca_data *qca = hu->priv;
unsigned long flags;
int ret = 0;
@@ -2033,8 +2126,9 @@ error:
static int __maybe_unused qca_resume(struct device *dev)
{
- struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
- struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = to_serdev_device(dev);
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ struct hci_uart *hu = &qcadev->serdev_hu;
struct qca_data *qca = hu->priv;
clear_bit(QCA_SUSPENDING, &qca->flags);
@@ -2044,21 +2138,39 @@ static int __maybe_unused qca_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
+#ifdef CONFIG_OF
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
+ { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
+ { .compatible = "qcom,qca9377-bt" },
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
+ { "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match);
+#endif
+
static struct serdev_device_driver qca_serdev_driver = {
.probe = qca_serdev_probe,
.remove = qca_serdev_remove,
.driver = {
.name = "hci_uart_qca",
- .of_match_table = qca_bluetooth_of_match,
+ .of_match_table = of_match_ptr(qca_bluetooth_of_match),
+ .acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match),
+ .shutdown = qca_serdev_shutdown,
.pm = &qca_pm_ops,
},
};
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 4652896d4990..599855e4c57c 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -21,8 +21,6 @@
#include "hci_uart.h"
-static struct serdev_device_ops hci_serdev_client_ops;
-
static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type)
{
struct hci_dev *hdev = hu->hdev;
@@ -260,7 +258,7 @@ static int hci_uart_receive_buf(struct serdev_device *serdev, const u8 *data,
return count;
}
-static struct serdev_device_ops hci_serdev_client_ops = {
+static const struct serdev_device_ops hci_serdev_client_ops = {
.receive_buf = hci_uart_receive_buf,
.write_wakeup = hci_uart_write_wakeup,
};
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6d4e4497b59b..c8818e3b1079 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -20,6 +20,15 @@ config ARM_CCI400_PORT_CTRL
Low level power management driver for CCI400 cache coherent
interconnect for ARM platforms.
+config ARM_INTEGRATOR_LM
+ bool "ARM Integrator Logic Module bus"
+ depends on HAS_IOMEM
+ depends on ARCH_INTEGRATOR || COMPILE_TEST
+ default ARCH_INTEGRATOR
+ help
+ Say y here to enable support for the ARM Logic Module bus
+ found on the ARM Integrator AP (Application Platform)
+
config BRCMSTB_GISB_ARB
bool "Broadcom STB GISB bus arbiter"
depends on ARM || ARM64 || MIPS
@@ -29,6 +38,36 @@ config BRCMSTB_GISB_ARB
arbiter. This driver provides timeout and target abort error handling
and internal bus master decoding.
+config BT1_APB
+ bool "Baikal-T1 APB-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Baikal-T1 AXI-APB bridge is used to access the SoC subsystem CSRs.
+ IO requests are routed to this bus by means of the DW AMBA 3 AXI
+ Interconnect. In case of any APB protocol collisions, slave device
+ not responding on timeout an IRQ is raised with an erroneous address
+ reported to the APB terminator (APB Errors Handler Block). This
+ driver provides the interrupt handler to detect the erroneous
+ address, prints an error message about the address fault, updates an
+ errors counter. The counter and the APB-bus operations timeout can be
+ accessed via corresponding sysfs nodes.
+
+config BT1_AXI
+ bool "Baikal-T1 AXI-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ AXI3-bus is the main communication bus connecting all high-speed
+ peripheral IP-cores with RAM controller and with MIPS P5600 cores on
+ Baikal-T1 SoC. Traffic arbitration is done by means of DW AMBA 3 AXI
+ Interconnect (so called AXI Main Interconnect) routing IO requests
+ from one SoC block to another. This driver provides a way to detect
+ any bus protocol errors and device not responding situations by
+ means of an embedded on top of the interconnect errors handler
+ block (EHB). AXI Interconnect QoS arbitration tuning is currently
+ unsupported.
+
config MOXTET
tristate "CZ.NIC Turris Mox module configuration bus"
depends on SPI_MASTER && OF
@@ -183,7 +222,7 @@ config UNIPHIER_SYSTEM_BUS
needed to use on-board devices connected to UniPhier SoCs.
config VEXPRESS_CONFIG
- bool "Versatile Express configuration bus"
+ tristate "Versatile Express configuration bus"
default y if ARCH_VEXPRESS
depends on ARM || ARM64
depends on OF
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 05f32cd694a4..397e35392bff 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -5,7 +5,7 @@
# Interconnect bus drivers for ARM platforms
obj-$(CONFIG_ARM_CCI) += arm-cci.o
-
+obj-$(CONFIG_ARM_INTEGRATOR_LM) += arm-integrator-lm.o
obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_MOXTET) += moxtet.o
@@ -13,6 +13,8 @@ obj-$(CONFIG_MOXTET) += moxtet.o
# DPAA2 fsl-mc bus
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
+obj-$(CONFIG_BT1_APB) += bt1-apb.o
+obj-$(CONFIG_BT1_AXI) += bt1-axi.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
new file mode 100644
index 000000000000..845b6c43fef8
--- /dev/null
+++ b/drivers/bus/arm-integrator-lm.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Integrator Logical Module bus driver
+ * Copyright (C) 2020 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* All information about the connected logic modules are in here */
+#define INTEGRATOR_SC_DEC_OFFSET 0x10
+
+/* Base address for the expansion modules */
+#define INTEGRATOR_AP_EXP_BASE 0xc0000000
+#define INTEGRATOR_AP_EXP_STRIDE 0x10000000
+
+static int integrator_lm_populate(int num, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ u32 base;
+ int ret;
+
+ base = INTEGRATOR_AP_EXP_BASE + (num * INTEGRATOR_AP_EXP_STRIDE);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ struct resource res;
+
+ ret = of_address_to_resource(child, 0, &res);
+ if (ret) {
+ dev_info(dev, "no valid address on child\n");
+ continue;
+ }
+
+ /* First populate the syscon then any devices */
+ if (res.start == base) {
+ dev_info(dev, "populate module @0x%08x from DT\n",
+ base);
+ ret = of_platform_default_populate(child, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate module\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_syscon_match[] = {
+ { .compatible = "arm,integrator-ap-syscon"},
+ { },
+};
+
+static int integrator_ap_lm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *syscon;
+ static struct regmap *map;
+ u32 val;
+ int ret;
+ int i;
+
+ /* Look up the system controller */
+ syscon = of_find_matching_node(NULL, integrator_ap_syscon_match);
+ if (!syscon) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return -ENODEV;
+ }
+ map = syscon_node_to_regmap(syscon);
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val);
+ if (ret) {
+ dev_err(dev, "could not read from Integrator/AP syscon\n");
+ return ret;
+ }
+
+ /* Loop over the connected modules */
+ for (i = 0; i < 4; i++) {
+ if (!(val & BIT(4 + i)))
+ continue;
+
+ dev_info(dev, "detected module in slot %d\n", i);
+ ret = integrator_lm_populate(i, dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_lm_match[] = {
+ { .compatible = "arm,integrator-ap-lm"},
+ { },
+};
+
+static struct platform_driver integrator_ap_lm_driver = {
+ .probe = integrator_ap_lm_probe,
+ .driver = {
+ .name = "integratorap-lm",
+ .of_match_table = integrator_ap_lm_match,
+ },
+};
+module_platform_driver(integrator_ap_lm_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Integrator AP Logical Module driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
new file mode 100644
index 000000000000..b25ff941e7c7
--- /dev/null
+++ b/drivers/bus/bt1-apb.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 APB-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+#include <linux/clk.h>
+#include <linux/sysfs.h>
+
+#define APB_EHB_ISR 0x00
+#define APB_EHB_ISR_PENDING BIT(0)
+#define APB_EHB_ISR_MASK BIT(1)
+#define APB_EHB_ADDR 0x04
+#define APB_EHB_TIMEOUT 0x08
+
+#define APB_EHB_TIMEOUT_MIN 0x000003FFU
+#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU
+
+/*
+ * struct bt1_apb - Baikal-T1 APB EHB private data
+ * @dev: Pointer to the device structure.
+ * @regs: APB EHB registers map.
+ * @res: No-device error injection memory region.
+ * @irq: Errors IRQ number.
+ * @rate: APB-bus reference clock rate.
+ * @pclk: APB-reference clock.
+ * @prst: APB domain reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_apb {
+ struct device *dev;
+
+ struct regmap *regs;
+ void __iomem *res;
+ int irq;
+
+ unsigned long rate;
+ struct clk *pclk;
+
+ struct reset_control *prst;
+
+ atomic_t count;
+};
+
+static const struct regmap_config bt1_apb_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = APB_EHB_TIMEOUT,
+ .fast_io = true
+};
+
+static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
+{
+ u64 timeout = (u64)n * USEC_PER_SEC;
+
+ do_div(timeout, apb->rate);
+
+ return timeout;
+
+}
+
+static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
+ unsigned long timeout)
+{
+ u64 n = (u64)timeout * apb->rate;
+
+ do_div(n, USEC_PER_SEC);
+
+ return n;
+
+}
+
+static irqreturn_t bt1_apb_isr(int irq, void *data)
+{
+ struct bt1_apb *apb = data;
+ u32 addr = 0;
+
+ regmap_read(apb->regs, APB_EHB_ADDR, &addr);
+
+ dev_crit_ratelimited(apb->dev,
+ "APB-bus fault %d: Slave access timeout at 0x%08x\n",
+ atomic_inc_return(&apb->count),
+ addr);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_apb_clear_data(void *data)
+{
+ struct bt1_apb *apb = data;
+ struct platform_device *pdev = to_platform_device(apb->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
+ if (!apb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_apb_clear_data, apb);
+ if (ret) {
+ dev_err(dev, "Can't add APB EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ apb->dev = dev;
+ atomic_set(&apb->count, 0);
+ platform_set_drvdata(pdev, apb);
+
+ return apb;
+}
+
+static int bt1_apb_request_regs(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
+ if (IS_ERR(regs)) {
+ dev_err(apb->dev, "Couldn't map APB EHB registers\n");
+ return PTR_ERR(regs);
+ }
+
+ apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
+ if (IS_ERR(apb->regs)) {
+ dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
+ return PTR_ERR(apb->regs);
+ }
+
+ apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
+ if (IS_ERR(apb->res))
+ dev_err(apb->dev, "Couldn't map reserved region\n");
+
+ return PTR_ERR_OR_ZERO(apb->res);
+}
+
+static int bt1_apb_request_rst(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
+ if (IS_ERR(apb->prst)) {
+ dev_warn(apb->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(apb->prst);
+ }
+
+ ret = reset_control_deassert(apb->prst);
+ if (ret)
+ dev_err(apb->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_apb_disable_clk(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ clk_disable_unprepare(apb->pclk);
+}
+
+static int bt1_apb_request_clk(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->pclk = devm_clk_get(apb->dev, "pclk");
+ if (IS_ERR(apb->pclk)) {
+ dev_err(apb->dev, "Couldn't get APB clock descriptor\n");
+ return PTR_ERR(apb->pclk);
+ }
+
+ ret = clk_prepare_enable(apb->pclk);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't enable the APB clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
+ return ret;
+ }
+
+ apb->rate = clk_get_rate(apb->pclk);
+ if (!apb->rate) {
+ dev_err(apb->dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bt1_apb_clear_irq(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
+}
+
+static int bt1_apb_request_irq(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ int ret;
+
+ apb->irq = platform_get_irq(pdev, 0);
+ if (apb->irq < 0)
+ return apb->irq;
+
+ ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
+ "bt1-apb", apb);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
+ return ret;
+ }
+
+ ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
+ return ret;
+ }
+
+ /* Unmask IRQ and clear it' pending flag. */
+ regmap_update_bits(apb->regs, APB_EHB_ISR,
+ APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
+ APB_EHB_ISR_MASK);
+
+ return 0;
+}
+
+static ssize_t count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
+ if (ret)
+ return ret;
+
+ timeout = bt1_apb_n_to_timeout_us(apb, n);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
+}
+
+static ssize_t timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ if (kstrtoul(buf, 0, &timeout) < 0)
+ return -EINVAL;
+
+ n = bt1_apb_timeout_to_n_us(apb, timeout);
+ n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
+
+ ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
+
+ return ret ?: count;
+}
+static DEVICE_ATTR_RW(timeout);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ /*
+ * Either dummy read from the unmapped address in the APB IO area
+ * or manually set the IRQ status.
+ */
+ if (sysfs_streq(data, "nodev"))
+ readl(apb->res);
+ else if (sysfs_streq(data, "irq"))
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
+ APB_EHB_ISR_PENDING);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_apb_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_apb_sysfs);
+
+static void bt1_apb_remove_sysfs(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
+}
+
+static int bt1_apb_init_sysfs(struct bt1_apb *apb)
+{
+ int ret;
+
+ ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
+ if (ret) {
+ dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
+ if (ret)
+ dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_apb_probe(struct platform_device *pdev)
+{
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = bt1_apb_create_data(pdev);
+ if (IS_ERR(apb))
+ return PTR_ERR(apb);
+
+ ret = bt1_apb_request_regs(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_rst(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_clk(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_irq(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_init_sysfs(apb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_apb_of_match[] = {
+ { .compatible = "baikal,bt1-apb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
+
+static struct platform_driver bt1_apb_driver = {
+ .probe = bt1_apb_probe,
+ .driver = {
+ .name = "bt1-apb",
+ .of_match_table = bt1_apb_of_match
+ }
+};
+module_platform_driver(bt1_apb_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
new file mode 100644
index 000000000000..e7a6744acc7b
--- /dev/null
+++ b/drivers/bus/bt1-axi.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 AXI-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/sysfs.h>
+
+#define BT1_AXI_WERRL 0x110
+#define BT1_AXI_WERRH 0x114
+#define BT1_AXI_WERRH_TYPE BIT(23)
+#define BT1_AXI_WERRH_ADDR_FLD 24
+#define BT1_AXI_WERRH_ADDR_MASK GENMASK(31, BT1_AXI_WERRH_ADDR_FLD)
+
+/*
+ * struct bt1_axi - Baikal-T1 AXI-bus private data
+ * @dev: Pointer to the device structure.
+ * @qos_regs: AXI Interconnect QoS tuning registers.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @irq: Errors IRQ number.
+ * @aclk: AXI reference clock.
+ * @arst: AXI Interconnect reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_axi {
+ struct device *dev;
+
+ void __iomem *qos_regs;
+ struct regmap *sys_regs;
+ int irq;
+
+ struct clk *aclk;
+
+ struct reset_control *arst;
+
+ atomic_t count;
+};
+
+static irqreturn_t bt1_axi_isr(int irq, void *data)
+{
+ struct bt1_axi *axi = data;
+ u32 low = 0, high = 0;
+
+ regmap_read(axi->sys_regs, BT1_AXI_WERRL, &low);
+ regmap_read(axi->sys_regs, BT1_AXI_WERRH, &high);
+
+ dev_crit_ratelimited(axi->dev,
+ "AXI-bus fault %d: %s at 0x%x%08x\n",
+ atomic_inc_return(&axi->count),
+ high & BT1_AXI_WERRH_TYPE ? "no slave" : "slave protocol error",
+ high, low);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_axi_clear_data(void *data)
+{
+ struct bt1_axi *axi = data;
+ struct platform_device *pdev = to_platform_device(axi->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_axi *bt1_axi_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = devm_kzalloc(dev, sizeof(*axi), GFP_KERNEL);
+ if (!axi)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_axi_clear_data, axi);
+ if (ret) {
+ dev_err(dev, "Can't add AXI EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ axi->dev = dev;
+ atomic_set(&axi->count, 0);
+ platform_set_drvdata(pdev, axi);
+
+ return axi;
+}
+
+static int bt1_axi_request_regs(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ struct device *dev = axi->dev;
+
+ axi->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(axi->sys_regs)) {
+ dev_err(dev, "Couldn't find syscon registers\n");
+ return PTR_ERR(axi->sys_regs);
+ }
+
+ axi->qos_regs = devm_platform_ioremap_resource_byname(pdev, "qos");
+ if (IS_ERR(axi->qos_regs))
+ dev_err(dev, "Couldn't map AXI-bus QoS registers\n");
+
+ return PTR_ERR_OR_ZERO(axi->qos_regs);
+}
+
+static int bt1_axi_request_rst(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
+ if (IS_ERR(axi->arst)) {
+ dev_warn(axi->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(axi->arst);
+ }
+
+ ret = reset_control_deassert(axi->arst);
+ if (ret)
+ dev_err(axi->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_axi_disable_clk(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ clk_disable_unprepare(axi->aclk);
+}
+
+static int bt1_axi_request_clk(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->aclk = devm_clk_get(axi->dev, "aclk");
+ if (IS_ERR(axi->aclk)) {
+ dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n");
+ return PTR_ERR(axi->aclk);
+ }
+
+ ret = clk_prepare_enable(axi->aclk);
+ if (ret) {
+ dev_err(axi->dev, "Couldn't enable the AXI clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI clock disable action\n");
+
+ return ret;
+}
+
+static int bt1_axi_request_irq(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ int ret;
+
+ axi->irq = platform_get_irq(pdev, 0);
+ if (axi->irq < 0)
+ return axi->irq;
+
+ ret = devm_request_irq(axi->dev, axi->irq, bt1_axi_isr, IRQF_SHARED,
+ "bt1-axi", axi);
+ if (ret)
+ dev_err(axi->dev, "Couldn't request AXI EHB IRQ\n");
+
+ return ret;
+}
+
+static ssize_t count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ /*
+ * Performing unaligned read from the memory will cause the CM2 bus
+ * error while unaligned writing - the AXI bus write error handled
+ * by this driver.
+ */
+ if (sysfs_streq(data, "bus"))
+ readb(axi->qos_regs);
+ else if (sysfs_streq(data, "unaligned"))
+ writeb(0, axi->qos_regs);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_axi_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_axi_sysfs);
+
+static void bt1_axi_remove_sysfs(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ device_remove_groups(axi->dev, bt1_axi_sysfs_groups);
+}
+
+static int bt1_axi_init_sysfs(struct bt1_axi *axi)
+{
+ int ret;
+
+ ret = device_add_groups(axi->dev, bt1_axi_sysfs_groups);
+ if (ret) {
+ dev_err(axi->dev, "Failed to add sysfs files group\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_remove_sysfs, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_axi_probe(struct platform_device *pdev)
+{
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = bt1_axi_create_data(pdev);
+ if (IS_ERR(axi))
+ return PTR_ERR(axi);
+
+ ret = bt1_axi_request_regs(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_rst(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_clk(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_irq(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_init_sysfs(axi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_axi_of_match[] = {
+ { .compatible = "baikal,bt1-axi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_axi_of_match);
+
+static struct platform_driver bt1_axi_driver = {
+ .probe = bt1_axi_probe,
+ .driver = {
+ .name = "bt1-axi",
+ .of_match_table = bt1_axi_of_match
+ }
+};
+module_platform_driver(bt1_axi_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index ebad5eb48e5a..0b38014d040e 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -43,10 +43,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
- sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
-
- if (unlikely(!sequence_id))
- sequence_id = 1;
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
@@ -121,7 +118,8 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
ee = mhi_get_exec_env(mhi_cntrl);
}
- dev_dbg(dev, "Waiting for image download completion, current EE: %s\n",
+ dev_dbg(dev,
+ "Waiting for RDDM image download via BHIe, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
while (retry--) {
@@ -152,11 +150,14 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 rx_status;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
+ dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
+
/* Wait for the image download to complete */
wait_event_timeout(mhi_cntrl->state_event,
mhi_read_reg_field(mhi_cntrl, base,
@@ -174,8 +175,10 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status, sequence_id;
+ int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -183,6 +186,9 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
return -EIO;
}
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n",
+ sequence_id);
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
@@ -191,26 +197,25 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
- sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
sequence_id);
read_unlock_bh(pm_lock);
/* Wait for the image download to complete */
- wait_event_timeout(mhi_cntrl->state_event,
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
- mhi_read_reg_field(mhi_cntrl, base,
- BHIE_TXVECSTATUS_OFFS,
- BHIE_TXVECSTATUS_STATUS_BMSK,
- BHIE_TXVECSTATUS_STATUS_SHFT,
- &tx_status) || tx_status,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ BHIE_TXVECSTATUS_STATUS_SHFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
return -EIO;
- return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+ return (!ret) ? -ETIMEDOUT : 0;
}
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
@@ -239,14 +244,15 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
goto invalid_pm_state;
}
- dev_dbg(dev, "Starting SBL download via BHI\n");
+ session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n",
+ session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
- session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
@@ -377,30 +383,18 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
}
}
-void mhi_fw_load_worker(struct work_struct *work)
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
- struct mhi_controller *mhi_cntrl;
const struct firmware *firmware = NULL;
struct image_info *image_info;
- struct device *dev;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
const char *fw_name;
void *buf;
dma_addr_t dma_addr;
size_t size;
int ret;
- mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
- dev = &mhi_cntrl->mhi_dev->dev;
-
- dev_dbg(dev, "Waiting for device to enter PBL from: %s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee));
-
- ret = wait_event_timeout(mhi_cntrl->state_event,
- MHI_IN_PBL(mhi_cntrl->ee) ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
- if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device MHI is not in valid state\n");
return;
}
@@ -446,7 +440,12 @@ void mhi_fw_load_worker(struct work_struct *work)
release_firmware(firmware);
/* Error or in EDL mode, we're done */
- if (ret || mhi_cntrl->ee == MHI_EE_EDL)
+ if (ret) {
+ dev_err(dev, "MHI did not load SBL, ret:%d\n", ret);
+ return;
+ }
+
+ if (mhi_cntrl->ee == MHI_EE_EDL)
return;
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -474,8 +473,10 @@ fw_load_ee_pthru:
if (!mhi_cntrl->fbc_download)
return;
- if (ret)
+ if (ret) {
+ dev_err(dev, "MHI did not enter READY state\n");
goto error_read;
+ }
/* Wait for the SBL event */
ret = wait_event_timeout(mhi_cntrl->state_event,
@@ -493,6 +494,8 @@ fw_load_ee_pthru:
ret = mhi_fw_load_amss(mhi_cntrl,
/* Vector table is the last entry */
&image_info->mhi_buf[image_info->entries - 1]);
+ if (ret)
+ dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
release_firmware(firmware);
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index eb2ab058a01d..e43a190a7a36 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -34,6 +34,8 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
[DEV_ST_TRANSITION_READY] = "READY",
[DEV_ST_TRANSITION_SBL] = "SBL",
[DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
+ [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
+ [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
};
const char * const mhi_state_str[MHI_STATE_MAX] = {
@@ -291,6 +293,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
}
/* Setup cmd context */
+ ret = -ENOMEM;
mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
sizeof(*mhi_ctxt->cmd_ctxt) *
NR_OF_CMD_RINGS,
@@ -834,8 +837,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
spin_lock_init(&mhi_cntrl->transition_lock);
spin_lock_init(&mhi_cntrl->wlock);
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
- INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
- INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
init_waitqueue_head(&mhi_cntrl->state_event);
mhi_cmd = mhi_cntrl->mhi_cmd;
@@ -863,6 +864,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mutex_init(&mhi_chan->mutex);
init_completion(&mhi_chan->completion);
rwlock_init(&mhi_chan->lock);
+
+ /* used in setting bei field of TRE */
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ mhi_chan->intmod = mhi_event->intmod;
}
if (mhi_cntrl->bounce_buf) {
@@ -1100,6 +1105,7 @@ static int mhi_driver_probe(struct device *dev)
}
}
+ ret = -EINVAL;
if (dl_chan) {
/*
* If channel supports LPM notifications then status_cb should
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 095d95bc0e37..b1f640b75a94 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -386,6 +386,8 @@ enum dev_st_transition {
DEV_ST_TRANSITION_READY,
DEV_ST_TRANSITION_SBL,
DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_SYS_ERR,
+ DEV_ST_TRANSITION_DISABLE,
DEV_ST_TRANSITION_MAX,
};
@@ -452,6 +454,7 @@ enum mhi_pm_state {
#define PRIMARY_CMD_RING 0
#define MHI_DEV_WAKE_DB 127
#define MHI_MAX_MTU 0xffff
+#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1)
enum mhi_er_type {
MHI_ER_TYPE_INVALID = 0x0,
@@ -586,7 +589,7 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
enum dev_st_transition state);
void mhi_pm_st_worker(struct work_struct *work);
-void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
void mhi_fw_load_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
@@ -627,6 +630,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -670,8 +674,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
- void *buf, void *cb, size_t buf_len, enum mhi_flags flags);
-
+ struct mhi_buf_info *info, enum mhi_flags flags);
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info);
int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 97e06cc586e4..1f622ce6be8b 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -258,7 +258,7 @@ int mhi_destroy_device(struct device *dev, void *data)
return 0;
}
-static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
@@ -270,6 +270,7 @@ static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
if (mhi_drv->status_cb)
mhi_drv->status_cb(mhi_dev, cb_reason);
}
+EXPORT_SYMBOL_GPL(mhi_notify);
/* Bind MHI channels to MHI devices */
void mhi_create_devices(struct mhi_controller *mhi_cntrl)
@@ -368,30 +369,37 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
return IRQ_HANDLED;
}
-irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
{
- struct mhi_controller *mhi_cntrl = dev;
+ struct mhi_controller *mhi_cntrl = priv;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state state = MHI_STATE_MAX;
enum mhi_pm_state pm_state = 0;
enum mhi_ee_type ee = 0;
write_lock_irq(&mhi_cntrl->pm_lock);
- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- state = mhi_get_mhi_state(mhi_cntrl);
- ee = mhi_cntrl->ee;
- mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto exit_intvec;
}
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_cntrl->ee;
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
+ TO_MHI_STATE_STR(state));
+
if (state == MHI_STATE_SYS_ERR) {
- dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
+ dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
}
write_unlock_irq(&mhi_cntrl->pm_lock);
- /* If device in RDDM don't bother processing SYS error */
- if (mhi_cntrl->ee == MHI_EE_RDDM) {
- if (mhi_cntrl->ee != ee) {
+ /* If device supports RDDM don't bother processing SYS error */
+ if (mhi_cntrl->rddm_image) {
+ if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
wake_up_all(&mhi_cntrl->state_event);
}
@@ -405,7 +413,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
if (MHI_IN_PBL(ee))
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
else
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
}
exit_intvec:
@@ -513,7 +521,10 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
result.buf_addr = buf_info->cb_buf;
- result.bytes_xferd = xfer_len;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd =
+ min_t(u16, xfer_len, buf_info->len);
mhi_del_ring_element(mhi_cntrl, buf_ring);
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
@@ -597,7 +608,9 @@ static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
-EOVERFLOW : 0;
- result.bytes_xferd = xfer_len;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
result.buf_addr = buf_info->cb_buf;
result.dir = mhi_chan->dir;
@@ -722,13 +735,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
{
enum mhi_pm_state new_state;
+ /* skip SYS_ERROR handling if RDDM supported */
+ if (mhi_cntrl->ee == MHI_EE_RDDM ||
+ mhi_cntrl->rddm_image)
+ break;
+
dev_dbg(dev, "System error detected\n");
write_lock_irq(&mhi_cntrl->pm_lock);
new_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (new_state == MHI_PM_SYS_ERR_DETECT)
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
break;
}
default:
@@ -774,9 +792,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
}
case MHI_PKT_TYPE_TX_EVENT:
chan = MHI_TRE_GET_EV_CHID(local_rp);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
break;
default:
dev_err(dev, "Unhandled event type: %d\n", type);
@@ -819,14 +846,23 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
chan = MHI_TRE_GET_EV_CHID(local_rp);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
-
- if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
- } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
- parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
}
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
@@ -896,7 +932,7 @@ void mhi_ctrl_ev_task(unsigned long data)
}
write_unlock_irq(&mhi_cntrl->pm_lock);
if (pm_state == MHI_PM_SYS_ERR_DETECT)
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
}
}
@@ -918,9 +954,7 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
- struct mhi_buf_info *buf_info;
- struct mhi_tre *mhi_tre;
+ struct mhi_buf_info buf_info = { };
int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
@@ -945,27 +979,15 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
- /* Generate the TRE */
- buf_info = buf_ring->wp;
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
- buf_info->v_addr = skb->data;
- buf_info->cb_buf = skb;
- buf_info->wp = tre_ring->wp;
- buf_info->dir = mhi_chan->dir;
- buf_info->len = len;
- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
- goto map_error;
-
- mhi_tre = tre_ring->wp;
-
- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
-
- /* increment WP */
- mhi_add_ring_element(mhi_cntrl, tre_ring);
- mhi_add_ring_element(mhi_cntrl, buf_ring);
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
@@ -979,11 +1001,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
read_unlock_bh(&mhi_cntrl->pm_lock);
return 0;
-
-map_error:
- read_unlock_bh(&mhi_cntrl->pm_lock);
-
- return ret;
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
@@ -995,9 +1012,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
mhi_dev->dl_chan;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
- struct mhi_buf_info *buf_info;
- struct mhi_tre *mhi_tre;
+ struct mhi_buf_info buf_info = { };
+ int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
if (mhi_chan->pre_alloc)
@@ -1024,25 +1040,16 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
- /* Generate the TRE */
- buf_info = buf_ring->wp;
- WARN_ON(buf_info->used);
- buf_info->p_addr = mhi_buf->dma_addr;
- buf_info->pre_mapped = true;
- buf_info->cb_buf = mhi_buf;
- buf_info->wp = tre_ring->wp;
- buf_info->dir = mhi_chan->dir;
- buf_info->len = len;
-
- mhi_tre = tre_ring->wp;
-
- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+ buf_info.p_addr = mhi_buf->dma_addr;
+ buf_info.cb_buf = mhi_buf;
+ buf_info.pre_mapped = true;
+ buf_info.len = len;
- /* increment WP */
- mhi_add_ring_element(mhi_cntrl, tre_ring);
- mhi_add_ring_element(mhi_cntrl, buf_ring);
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
@@ -1060,7 +1067,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
EXPORT_SYMBOL_GPL(mhi_queue_dma);
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
- void *buf, void *cb, size_t buf_len, enum mhi_flags flags)
+ struct mhi_buf_info *info, enum mhi_flags flags)
{
struct mhi_ring *buf_ring, *tre_ring;
struct mhi_tre *mhi_tre;
@@ -1072,15 +1079,22 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
tre_ring = &mhi_chan->tre_ring;
buf_info = buf_ring->wp;
- buf_info->v_addr = buf;
- buf_info->cb_buf = cb;
+ WARN_ON(buf_info->used);
+ buf_info->pre_mapped = info->pre_mapped;
+ if (info->pre_mapped)
+ buf_info->p_addr = info->p_addr;
+ else
+ buf_info->v_addr = info->v_addr;
+ buf_info->cb_buf = info->cb_buf;
buf_info->wp = tre_ring->wp;
buf_info->dir = mhi_chan->dir;
- buf_info->len = buf_len;
+ buf_info->len = info->len;
- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
- return ret;
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ return ret;
+ }
eob = !!(flags & MHI_EOB);
eot = !!(flags & MHI_EOT);
@@ -1089,7 +1103,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_tre = tre_ring->wp;
mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
/* increment WP */
@@ -1106,6 +1120,7 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring;
+ struct mhi_buf_info buf_info = { };
unsigned long flags;
int ret;
@@ -1121,7 +1136,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
+ buf_info.v_addr = buf;
+ buf_info.cb_buf = buf;
+ buf_info.len = len;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret))
return ret;
@@ -1322,7 +1341,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
while (nr_el--) {
void *buf;
-
+ struct mhi_buf_info info = { };
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
@@ -1330,8 +1349,10 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
}
/* Prepare transfer descriptors */
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf,
- len, MHI_EOT);
+ info.v_addr = buf;
+ info.cb_buf = buf;
+ info.len = len;
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
if (ret) {
kfree(buf);
goto error_pre_alloc;
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index dc83d65f7784..796098078083 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -288,14 +288,18 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- write_lock_irq(&mhi_chan->lock);
- if (mhi_chan->db_cfg.reset_req)
+ if (mhi_chan->db_cfg.reset_req) {
+ write_lock_irq(&mhi_chan->lock);
mhi_chan->db_cfg.db_mode = true;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ read_lock_irq(&mhi_chan->lock);
/* Only ring DB if ring is not empty */
if (tre_ring->base && tre_ring->wp != tre_ring->rp)
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- write_unlock_irq(&mhi_chan->lock);
+ read_unlock_irq(&mhi_chan->lock);
}
mhi_cntrl->wake_put(mhi_cntrl, false);
@@ -449,19 +453,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
to_mhi_pm_state_str(transition_state));
/* We must notify MHI control driver so it can clean up first */
- if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
- /*
- * If controller supports RDDM, we do not process
- * SYS error state, instead we will jump directly
- * to RDDM state
- */
- if (mhi_cntrl->rddm_image) {
- dev_dbg(dev,
- "Controller supports RDDM, so skip SYS_ERR\n");
- return;
- }
+ if (transition_state == MHI_PM_SYS_ERR_PROCESS)
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
- }
mutex_lock(&mhi_cntrl->pm_mutex);
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -527,8 +520,6 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
mutex_unlock(&mhi_cntrl->pm_mutex);
dev_dbg(dev, "Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
- flush_work(&mhi_cntrl->st_worker);
- flush_work(&mhi_cntrl->fw_worker);
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
@@ -608,13 +599,17 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
}
/* SYS_ERR worker */
-void mhi_pm_sys_err_worker(struct work_struct *work)
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
{
- struct mhi_controller *mhi_cntrl = container_of(work,
- struct mhi_controller,
- syserr_worker);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* skip if controller supports RDDM */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
+ return;
+ }
- mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
}
/* Device State Transition worker */
@@ -643,7 +638,7 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (MHI_IN_PBL(mhi_cntrl->ee))
- wake_up_all(&mhi_cntrl->state_event);
+ mhi_fw_load_handler(mhi_cntrl);
break;
case DEV_ST_TRANSITION_SBL:
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -662,6 +657,14 @@ void mhi_pm_st_worker(struct work_struct *work)
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
break;
+ case DEV_ST_TRANSITION_SYS_ERR:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ break;
+ case DEV_ST_TRANSITION_DISABLE:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+ break;
default:
break;
}
@@ -669,6 +672,149 @@ void mhi_pm_st_worker(struct work_struct *work)
}
}
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ int ret;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Return busy if there are any pending resources */
+ if (atomic_read(&mhi_cntrl->dev_wake))
+ return -EBUSY;
+
+ /* Take MHI out of M2 state */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M1 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Could not enter M0/M1 state");
+ return -EIO;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ if (atomic_read(&mhi_cntrl->dev_wake)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ return -EBUSY;
+ }
+
+ dev_info(dev, "Allowing M3 transition\n");
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M3 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev, "Wait for M3 completion\n");
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M3 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M3 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Notify clients about entering LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_suspend);
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state cur_state;
+ int ret;
+
+ dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Notify clients about exiting LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+ if (cur_state != MHI_PM_M3_EXIT) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M0 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M0 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume);
+
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
{
int ret;
@@ -760,6 +906,7 @@ static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
{
+ enum mhi_state state;
enum mhi_ee_type current_ee;
enum dev_st_transition next_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -829,13 +976,36 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
goto error_bhi_offset;
}
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT,
+ &val) ||
+ !val,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (ret) {
+ ret = -EIO;
+ dev_info(dev, "Failed to reset MHI due to syserr state\n");
+ goto error_bhi_offset;
+ }
+
+ /*
+ * device cleares INTVEC as part of RESET processing,
+ * re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
/* Transition to next state */
next_state = MHI_IN_PBL(current_ee) ?
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
- if (next_state == DEV_ST_TRANSITION_PBL)
- schedule_work(&mhi_cntrl->fw_worker);
-
mhi_queue_state_transition(mhi_cntrl, next_state);
mutex_unlock(&mhi_cntrl->pm_mutex);
@@ -876,7 +1046,12 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
}
- mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
+
+ /* Wait for shutdown to complete */
+ flush_work(&mhi_cntrl->st_worker);
+
mhi_deinit_free_irq(mhi_cntrl);
if (!mhi_cntrl->pre_init) {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e5f5f48d69d2..3affd180baac 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1275,13 +1275,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
- 0),
- /* Some timers on omap4 and later */
- SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff,
- 0),
- SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff,
- 0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
@@ -1404,6 +1397,13 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
+ /* Some timers on omap4 and later */
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
@@ -2744,6 +2744,17 @@ static int sysc_init_soc(struct sysc *ddata)
if (match && match->data)
sysc_soc->soc = (int)match->data;
+ /* Ignore devices that are not available on HS and EMU SoCs */
+ if (!sysc_soc->general_purpose) {
+ switch (sysc_soc->soc) {
+ case SOC_3430 ... SOC_3630:
+ sysc_add_disabled(0x48304000); /* timer12 */
+ break;
+ default:
+ break;
+ };
+ }
+
match = soc_device_match(sysc_soc_feat_match);
if (!match)
return 0;
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index ff70575b2db6..a58ac0c8e282 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -6,10 +6,61 @@
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/vexpress.h>
+#define SYS_MISC 0x0
+#define SYS_MISC_MASTERSITE (1 << 14)
+
+#define SYS_PROCID0 0x24
+#define SYS_PROCID1 0x28
+#define SYS_HBI_MASK 0xfff
+#define SYS_PROCIDx_HBI_SHIFT 0
+
+#define SYS_CFGDATA 0x40
+
+#define SYS_CFGCTRL 0x44
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT 0x48
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+#define VEXPRESS_SITE_MB 0
+#define VEXPRESS_SITE_DB1 1
+#define VEXPRESS_SITE_DB2 2
+#define VEXPRESS_SITE_MASTER 0xf
+
+struct vexpress_syscfg {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head funcs;
+};
+
+struct vexpress_syscfg_func {
+ struct list_head list;
+ struct vexpress_syscfg *syscfg;
+ struct regmap *regmap;
+ int num_templates;
+ u32 template[]; /* Keep it last! */
+};
+
+struct vexpress_config_bridge_ops {
+ struct regmap * (*regmap_init)(struct device *dev, void *context);
+ void (*regmap_exit)(struct regmap *regmap, void *context);
+};
struct vexpress_config_bridge {
struct vexpress_config_bridge_ops *ops;
@@ -18,26 +69,20 @@ struct vexpress_config_bridge {
static DEFINE_MUTEX(vexpress_config_mutex);
-static struct class *vexpress_config_class;
static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
-void vexpress_config_set_master(u32 site)
+static void vexpress_config_set_master(u32 site)
{
vexpress_config_site_master = site;
}
-u32 vexpress_config_get_master(void)
-{
- return vexpress_config_site_master;
-}
-
-void vexpress_config_lock(void *arg)
+static void vexpress_config_lock(void *arg)
{
mutex_lock(&vexpress_config_mutex);
}
-void vexpress_config_unlock(void *arg)
+static void vexpress_config_unlock(void *arg)
{
mutex_unlock(&vexpress_config_mutex);
}
@@ -59,7 +104,7 @@ static void vexpress_config_find_prop(struct device_node *node,
}
}
-int vexpress_config_get_topo(struct device_node *node, u32 *site,
+static int vexpress_config_get_topo(struct device_node *node, u32 *site,
u32 *position, u32 *dcc)
{
vexpress_config_find_prop(node, "arm,vexpress,site", site);
@@ -88,9 +133,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
struct regmap *regmap;
struct regmap **res;
- if (WARN_ON(dev->parent->class != vexpress_config_class))
- return ERR_PTR(-ENODEV);
-
bridge = dev_get_drvdata(dev->parent);
if (WARN_ON(!bridge))
return ERR_PTR(-EINVAL);
@@ -113,91 +155,265 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
-struct device *vexpress_config_bridge_register(struct device *parent,
- struct vexpress_config_bridge_ops *ops, void *context)
+static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int index, bool write, u32 *data)
{
- struct device *dev;
- struct vexpress_config_bridge *bridge;
+ struct vexpress_syscfg *syscfg = func->syscfg;
+ u32 command, status;
+ int tries;
+ long timeout;
- if (!vexpress_config_class) {
- vexpress_config_class = class_create(THIS_MODULE,
- "vexpress-config");
- if (IS_ERR(vexpress_config_class))
- return (void *)vexpress_config_class;
+ if (WARN_ON(index >= func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = func->template[index];
+ command |= SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
+ func, command, *data);
+ writel(*data, syscfg->base + SYS_CFGDATA);
+ writel(0, syscfg->base + SYS_CFGSTAT);
+ writel(command, syscfg->base + SYS_CFGCTRL);
+ mb();
+
+ /* The operation can take ages... Go to sleep, 100us initially */
+ tries = 100;
+ timeout = 100;
+ do {
+ if (!irqs_disabled()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(timeout));
+ if (signal_pending(current))
+ return -EINTR;
+ } else {
+ udelay(timeout);
+ }
+
+ status = readl(syscfg->base + SYS_CFGSTAT);
+ if (status & SYS_CFGSTAT_ERR)
+ return -EFAULT;
+
+ if (timeout > 20)
+ timeout -= 20;
+ } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
+ if (WARN_ON_ONCE(!tries))
+ return -ETIMEDOUT;
+
+ if (!write) {
+ *data = readl(syscfg->base + SYS_CFGDATA);
+ dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
}
- dev = device_create(vexpress_config_class, parent, 0,
- NULL, "%s.bridge", dev_name(parent));
+ return 0;
+}
+
+static int vexpress_syscfg_read(void *context, unsigned int index,
+ unsigned int *val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, false, val);
+}
+
+static int vexpress_syscfg_write(void *context, unsigned int index,
+ unsigned int val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, true, &val);
+}
+
+static struct regmap_config vexpress_syscfg_regmap_config = {
+ .lock = vexpress_config_lock,
+ .unlock = vexpress_config_unlock,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = vexpress_syscfg_read,
+ .reg_write = vexpress_syscfg_write,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
- if (IS_ERR(dev))
- return dev;
+static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
+ void *context)
+{
+ int err;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func;
+ struct property *prop;
+ const __be32 *val = NULL;
+ __be32 energy_quirk[4];
+ int num;
+ u32 site, position, dcc;
+ int i;
+
+ err = vexpress_config_get_topo(dev->of_node, &site,
+ &position, &dcc);
+ if (err)
+ return ERR_PTR(err);
+
+ prop = of_find_property(dev->of_node,
+ "arm,vexpress-sysreg,func", NULL);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
- bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- put_device(dev);
- device_unregister(dev);
+ num = prop->length / sizeof(u32) / 2;
+ val = prop->value;
+
+ /*
+ * "arm,vexpress-energy" function used to be described
+ * by its first device only, now it requires both
+ */
+ if (num == 1 && of_device_is_compatible(dev->of_node,
+ "arm,vexpress-energy")) {
+ num = 2;
+ energy_quirk[0] = *val;
+ energy_quirk[2] = *val++;
+ energy_quirk[1] = *val;
+ energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
+ val = energy_quirk;
+ }
+
+ func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
+ if (!func)
return ERR_PTR(-ENOMEM);
+
+ func->syscfg = syscfg;
+ func->num_templates = num;
+
+ for (i = 0; i < num; i++) {
+ u32 function, device;
+
+ function = be32_to_cpup(val++);
+ device = be32_to_cpup(val++);
+
+ dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
+ func, site, position, dcc,
+ function, device);
+
+ func->template[i] = SYS_CFGCTRL_DCC(dcc);
+ func->template[i] |= SYS_CFGCTRL_SITE(site);
+ func->template[i] |= SYS_CFGCTRL_POSITION(position);
+ func->template[i] |= SYS_CFGCTRL_FUNC(function);
+ func->template[i] |= SYS_CFGCTRL_DEVICE(device);
}
- bridge->ops = ops;
- bridge->context = context;
- dev_set_drvdata(dev, bridge);
+ vexpress_syscfg_regmap_config.max_register = num - 1;
- dev_dbg(parent, "Registered bridge '%s', parent node %p\n",
- dev_name(dev), parent->of_node);
+ func->regmap = regmap_init(dev, NULL, func,
+ &vexpress_syscfg_regmap_config);
- return dev;
-}
+ if (IS_ERR(func->regmap)) {
+ void *err = func->regmap;
+ kfree(func);
+ return err;
+ }
+
+ list_add(&func->list, &syscfg->funcs);
-static int vexpress_config_node_match(struct device *dev, const void *data)
+ return func->regmap;
+}
+
+static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
{
- const struct device_node *node = data;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func, *tmp;
- dev_dbg(dev, "Parent node %p, looking for %p\n",
- dev->parent->of_node, node);
+ regmap_exit(regmap);
- return dev->parent->of_node == node;
+ list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
+ if (func->regmap == regmap) {
+ list_del(&syscfg->funcs);
+ kfree(func);
+ break;
+ }
+ }
}
-static int vexpress_config_populate(struct device_node *node)
+static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
+ .regmap_init = vexpress_syscfg_regmap_init,
+ .regmap_exit = vexpress_syscfg_regmap_exit,
+};
+
+
+static int vexpress_syscfg_probe(struct platform_device *pdev)
{
- struct device_node *bridge;
- struct device *parent;
- int ret;
+ struct vexpress_syscfg *syscfg;
+ struct resource *res;
+ struct vexpress_config_bridge *bridge;
+ struct device_node *node;
+ int master;
+ u32 dt_hbi;
+
+ syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
+ if (!syscfg)
+ return -ENOMEM;
+ syscfg->dev = &pdev->dev;
+ INIT_LIST_HEAD(&syscfg->funcs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ syscfg->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(syscfg->base))
+ return PTR_ERR(syscfg->base);
- bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ bridge = devm_kmalloc(&pdev->dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return -EINVAL;
+ return -ENOMEM;
- parent = class_find_device(vexpress_config_class, NULL, bridge,
- vexpress_config_node_match);
- of_node_put(bridge);
- if (WARN_ON(!parent))
- return -ENODEV;
+ bridge->ops = &vexpress_syscfg_bridge_ops;
+ bridge->context = syscfg;
- ret = of_platform_populate(node, NULL, NULL, parent);
+ dev_set_drvdata(&pdev->dev, bridge);
- put_device(parent);
+ master = readl(syscfg->base + SYS_MISC) & SYS_MISC_MASTERSITE ?
+ VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
+ vexpress_config_set_master(master);
- return ret;
-}
+ /* Confirm board type against DT property, if available */
+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
+ u32 id = readl(syscfg->base + (master == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
+ u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
-static int __init vexpress_config_init(void)
-{
- int err = 0;
- struct device_node *node;
+ if (WARN_ON(dt_hbi != hbi))
+ dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
+ dt_hbi, hbi);
+ }
- /* Need the config devices early, before the "normal" devices... */
for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
- err = vexpress_config_populate(node);
- if (err) {
- of_node_put(node);
- break;
- }
+ struct device_node *bridge_np;
+
+ bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ if (bridge_np != pdev->dev.parent->of_node)
+ continue;
+
+ of_platform_populate(node, NULL, NULL, &pdev->dev);
}
- return err;
+ return 0;
}
-postcore_initcall(vexpress_config_init);
+static const struct platform_device_id vexpress_syscfg_id_table[] = {
+ { "vexpress-syscfg", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, vexpress_syscfg_id_table);
+
+static struct platform_driver vexpress_syscfg_driver = {
+ .driver.name = "vexpress-syscfg",
+ .id_table = vexpress_syscfg_id_table,
+ .probe = vexpress_syscfg_probe,
+};
+module_platform_driver(vexpress_syscfg_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index faca0f346fff..a0a7ae705de8 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -586,7 +586,7 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
return 0;
}
-int register_cdrom(struct cdrom_device_info *cdi)
+int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
{
static char banner_printed;
const struct cdrom_device_ops *cdo = cdi->ops;
@@ -601,6 +601,9 @@ int register_cdrom(struct cdrom_device_info *cdi)
cdrom_sysctl_register();
}
+ cdi->disk = disk;
+ disk->cdi = cdi;
+
ENSURE(cdo, drive_status, CDC_DRIVE_STATUS);
if (cdo->check_events == NULL && cdo->media_changed == NULL)
WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC));
@@ -2292,37 +2295,46 @@ retry:
return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
}
-static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
- void __user *argp)
+int cdrom_multisession(struct cdrom_device_info *cdi,
+ struct cdrom_multisession *info)
{
- struct cdrom_multisession ms_info;
u8 requested_format;
int ret;
- cd_dbg(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
-
if (!(cdi->ops->capability & CDC_MULTI_SESSION))
return -ENOSYS;
- if (copy_from_user(&ms_info, argp, sizeof(ms_info)))
- return -EFAULT;
-
- requested_format = ms_info.addr_format;
+ requested_format = info->addr_format;
if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
return -EINVAL;
- ms_info.addr_format = CDROM_LBA;
+ info->addr_format = CDROM_LBA;
- ret = cdi->ops->get_last_session(cdi, &ms_info);
- if (ret)
- return ret;
+ ret = cdi->ops->get_last_session(cdi, info);
+ if (!ret)
+ sanitize_format(&info->addr, &info->addr_format,
+ requested_format);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdrom_multisession);
- sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format);
+static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_multisession info;
+ int ret;
+
+ cd_dbg(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
- if (copy_to_user(argp, &ms_info, sizeof(ms_info)))
+ if (copy_from_user(&info, argp, sizeof(info)))
+ return -EFAULT;
+ ret = cdrom_multisession(cdi, &info);
+ if (ret)
+ return ret;
+ if (copy_to_user(argp, &info, sizeof(info)))
return -EFAULT;
cd_dbg(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
- return 0;
+ return ret;
}
static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
@@ -2663,32 +2675,37 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
return 0;
}
+int cdrom_read_tocentry(struct cdrom_device_info *cdi,
+ struct cdrom_tocentry *entry)
+{
+ u8 requested_format = entry->cdte_format;
+ int ret;
+
+ if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
+ return -EINVAL;
+
+ /* make interface to low-level uniform */
+ entry->cdte_format = CDROM_MSF;
+ ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, entry);
+ if (!ret)
+ sanitize_format(&entry->cdte_addr, &entry->cdte_format,
+ requested_format);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdrom_read_tocentry);
+
static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_tocentry entry;
- u8 requested_format;
int ret;
- /* cd_dbg(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
-
if (copy_from_user(&entry, argp, sizeof(entry)))
return -EFAULT;
-
- requested_format = entry.cdte_format;
- if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
- return -EINVAL;
- /* make interface to low-level uniform */
- entry.cdte_format = CDROM_MSF;
- ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry);
- if (ret)
- return ret;
- sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format);
-
- if (copy_to_user(argp, &entry, sizeof(entry)))
+ ret = cdrom_read_tocentry(cdi, &entry);
+ if (!ret && copy_to_user(argp, &entry, sizeof(entry)))
return -EFAULT;
- /* cd_dbg(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
- return 0;
+ return ret;
}
static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi,
@@ -3631,7 +3648,7 @@ static void cdrom_update_settings(void)
}
static int cdrom_sysctl_handler(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index c51292c2a131..09b0cd292720 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -770,7 +770,7 @@ static int probe_gdrom(struct platform_device *devptr)
goto probe_fail_no_disk;
}
probe_gdrom_setupdisk();
- if (register_cdrom(gd.cd_info)) {
+ if (register_cdrom(gd.disk, gd.cd_info)) {
err = -ENODEV;
goto probe_fail_cdrom_register;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d4665fe9ccd2..ac25833eb19e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -209,7 +209,7 @@ config DTLK
config XILINX_HWICAP
tristate "Xilinx HWICAP Support"
- depends on XILINX_VIRTEX || MICROBLAZE
+ depends on MICROBLAZE
help
This option enables support for Xilinx Internal Configuration
Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 66a62d17a3f5..4b34a5195c65 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr,
unsigned int flags)
{
intel_private.driver->write_entry(addr, pg, flags);
+ readl(intel_private.gtt + pg);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
j++;
}
}
- wmb();
+ readl(intel_private.gtt + j - 1);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void)
static void i9xx_chipset_flush(void)
{
+ wmb();
if (intel_private.i9xx_flush_page)
writel(1, intel_private.i9xx_flush_page);
}
@@ -1405,13 +1407,16 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
- mask = intel_private.driver->dma_mask_size;
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask %d-bit failed!\n", mask);
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(mask));
+ if (bridge) {
+ mask = intel_private.driver->dma_mask_size;
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask %d-bit failed!\n",
+ mask);
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(mask));
+ }
if (intel_gtt_init() != 0) {
intel_gmch_remove();
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 9bc46da8d77a..ac00d78ee9cc 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -474,6 +474,19 @@ config HW_RANDOM_KEYSTONE
help
This option enables Keystone's hardware random generator.
+config HW_RANDOM_CCTRNG
+ tristate "Arm CryptoCell True Random Number Generator support"
+ depends on HAS_IOMEM && OF
+ help
+ Say 'Y' to enable the True Random Number Generator driver for the
+ Arm TrustZone CryptoCell family of processors.
+ Currently the CryptoCell 713 and 703 are supported.
+ The driver is supported only in SoC where Trusted Execution
+ Environment is not used.
+ Choose 'M' to compile this driver as a module. The module
+ will be called cctrng.
+ If unsure, say 'N'.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index a7801b49ce6c..2c6724735345 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
+obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
new file mode 100644
index 000000000000..619148fb2dc9
--- /dev/null
+++ b/drivers/char/hw_random/cctrng.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/workqueue.h>
+#include <linux/circ_buf.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/fips.h>
+
+#include "cctrng.h"
+
+#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
+#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
+#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
+
+#define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \
+ (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
+
+#define CC_HW_RESET_LOOP_COUNT 10
+#define CC_TRNG_SUSPEND_TIMEOUT 3000
+
+/* data circular buffer in words must be:
+ * - of a power-of-2 size (limitation of circ_buf.h macros)
+ * - at least 6, the size generated in the EHR according to HW implementation
+ */
+#define CCTRNG_DATA_BUF_WORDS 32
+
+/* The timeout for the TRNG operation should be calculated with the formula:
+ * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
+ * while:
+ * - SAMPLE_CNT is input value from the characterisation process
+ * - all the rest are constants
+ */
+#define EHR_NUM 1
+#define VN_COEFF 4
+#define EHR_LENGTH CC_TRNG_EHR_IN_BITS
+#define SCALE_VALUE 2
+#define CCTRNG_TIMEOUT(smpl_cnt) \
+ (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
+
+struct cctrng_drvdata {
+ struct platform_device *pdev;
+ void __iomem *cc_base;
+ struct clk *clk;
+ struct hwrng rng;
+ u32 active_rosc;
+ /* Sampling interval for each ring oscillator:
+ * count of ring oscillator cycles between consecutive bits sampling.
+ * Value of 0 indicates non-valid rosc
+ */
+ u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
+
+ u32 data_buf[CCTRNG_DATA_BUF_WORDS];
+ struct circ_buf circ;
+ struct work_struct compwork;
+ struct work_struct startwork;
+
+ /* pending_hw - 1 when HW is pending, 0 when it is idle */
+ atomic_t pending_hw;
+
+ /* protects against multiple concurrent consumers of data_buf */
+ spinlock_t read_lock;
+};
+
+
+/* functions for write/read CC registers */
+static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
+
+static int cc_trng_pm_get(struct device *dev)
+{
+ int rc = 0;
+
+ rc = pm_runtime_get_sync(dev);
+
+ /* pm_runtime_get_sync() can return 1 as a valid return code */
+ return (rc == 1 ? 0 : rc);
+}
+
+static void cc_trng_pm_put_suspend(struct device *dev)
+{
+ int rc = 0;
+
+ pm_runtime_mark_last_busy(dev);
+ rc = pm_runtime_put_autosuspend(dev);
+ if (rc)
+ dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
+}
+
+static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ /* must be before the enabling to avoid redundant suspending */
+ pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
+ return pm_runtime_set_active(dev);
+}
+
+static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ /* enable the PM module*/
+ pm_runtime_enable(dev);
+}
+
+static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ pm_runtime_disable(dev);
+}
+
+
+static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+ struct device_node *np = drvdata->pdev->dev.of_node;
+ int rc;
+ int i;
+ /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
+ int ret = -EINVAL;
+
+ rc = of_property_read_u32_array(np, "arm,rosc-ratio",
+ drvdata->smpl_ratio,
+ CC_TRNG_NUM_OF_ROSCS);
+ if (rc) {
+ /* arm,rosc-ratio was not found in device tree */
+ return rc;
+ }
+
+ /* verify that at least one rosc has (sampling ratio > 0) */
+ for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
+ dev_dbg(dev, "rosc %d sampling ratio %u",
+ i, drvdata->smpl_ratio[i]);
+
+ if (drvdata->smpl_ratio[i] > 0)
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
+ drvdata->active_rosc += 1;
+
+ while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
+ if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
+ return 0;
+
+ drvdata->active_rosc += 1;
+ }
+ return -EINVAL;
+}
+
+
+static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
+{
+ u32 max_cycles;
+
+ /* Set watchdog threshold to maximal allowed time (in CPU cycles) */
+ max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
+ cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
+
+ /* enable the RND source */
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
+
+ /* unmask RNG interrupts */
+ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
+}
+
+
+/* increase circular data buffer index (head/tail) */
+static inline void circ_idx_inc(int *idx, int bytes)
+{
+ *idx += (bytes + 3) >> 2;
+ *idx &= (CCTRNG_DATA_BUF_WORDS - 1);
+}
+
+static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
+{
+ return CIRC_SPACE(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+
+}
+
+static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ /* current implementation ignores "wait" */
+
+ struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
+ struct device *dev = &(drvdata->pdev->dev);
+ u32 *buf = (u32 *)drvdata->circ.buf;
+ size_t copied = 0;
+ size_t cnt_w;
+ size_t size;
+ size_t left;
+
+ if (!spin_trylock(&drvdata->read_lock)) {
+ /* concurrent consumers from data_buf cannot be served */
+ dev_dbg_ratelimited(dev, "unable to hold lock\n");
+ return 0;
+ }
+
+ /* copy till end of data buffer (without wrap back) */
+ cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+ size = min((cnt_w<<2), max);
+ memcpy(data, &(buf[drvdata->circ.tail]), size);
+ copied = size;
+ circ_idx_inc(&drvdata->circ.tail, size);
+ /* copy rest of data in data buffer */
+ left = max - copied;
+ if (left > 0) {
+ cnt_w = CIRC_CNT(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+ size = min((cnt_w<<2), left);
+ memcpy(data, &(buf[drvdata->circ.tail]), size);
+ copied += size;
+ circ_idx_inc(&drvdata->circ.tail, size);
+ }
+
+ spin_unlock(&drvdata->read_lock);
+
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
+ /* re-check space in buffer to avoid potential race */
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ /* increment device's usage counter */
+ int rc = cc_trng_pm_get(dev);
+
+ if (rc) {
+ dev_err(dev,
+ "cc_trng_pm_get returned %x\n",
+ rc);
+ return rc;
+ }
+
+ /* schedule execution of deferred work handler
+ * for filling of data buffer
+ */
+ schedule_work(&drvdata->startwork);
+ } else {
+ atomic_set(&drvdata->pending_hw, 0);
+ }
+ }
+ }
+
+ return copied;
+}
+
+static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
+{
+ u32 tmp_smpl_cnt = 0;
+ struct device *dev = &(drvdata->pdev->dev);
+
+ dev_dbg(dev, "cctrng hw trigger.\n");
+
+ /* enable the HW RND clock */
+ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
+
+ /* do software reset */
+ cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
+ /* in order to verify that the reset has completed,
+ * the sample count need to be verified
+ */
+ do {
+ /* enable the HW RND clock */
+ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
+
+ /* set sampling ratio (rng_clocks) between consecutive bits */
+ cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
+ drvdata->smpl_ratio[drvdata->active_rosc]);
+
+ /* read the sampling ratio */
+ tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
+
+ } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
+
+ /* disable the RND source for setting new parameters in HW */
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
+
+ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
+
+ cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
+
+ /* Debug Control register: set to 0 - no bypasses */
+ cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
+
+ cc_trng_enable_rnd_source(drvdata);
+}
+
+static void cc_trng_compwork_handler(struct work_struct *w)
+{
+ u32 isr = 0;
+ u32 ehr_valid = 0;
+ struct cctrng_drvdata *drvdata =
+ container_of(w, struct cctrng_drvdata, compwork);
+ struct device *dev = &(drvdata->pdev->dev);
+ int i;
+
+ /* stop DMA and the RNG source */
+ cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
+
+ /* read RNG_ISR and check for errors */
+ isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
+ ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
+ dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
+
+ if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
+ fips_fail_notify();
+ /* FIPS error is fatal */
+ panic("Got HW CRNGT error while fips is enabled!\n");
+ }
+
+ /* Clear all pending RNG interrupts */
+ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
+
+
+ if (!ehr_valid) {
+ /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
+ if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
+ CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
+ dev_dbg(dev, "cctrng autocorr/timeout error.\n");
+ goto next_rosc;
+ }
+
+ /* in case of VN error, ignore it */
+ }
+
+ /* read EHR data from registers */
+ for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
+ /* calc word ptr in data_buf */
+ u32 *buf = (u32 *)drvdata->circ.buf;
+
+ buf[drvdata->circ.head] = cc_ioread(drvdata,
+ CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
+
+ /* EHR_DATA registers are cleared on read. In case 0 value was
+ * returned, restart the entropy collection.
+ */
+ if (buf[drvdata->circ.head] == 0) {
+ dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
+ drvdata->active_rosc);
+ goto next_rosc;
+ }
+
+ circ_idx_inc(&drvdata->circ.head, 1<<2);
+ }
+
+ atomic_set(&drvdata->pending_hw, 0);
+
+ /* continue to fill data buffer if needed */
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
+ /* Re-enable rnd source */
+ cc_trng_enable_rnd_source(drvdata);
+ return;
+ }
+ }
+
+ cc_trng_pm_put_suspend(dev);
+
+ dev_dbg(dev, "compwork handler done\n");
+ return;
+
+next_rosc:
+ if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
+ (cc_trng_change_rosc(drvdata) == 0)) {
+ /* trigger trng hw with next rosc */
+ cc_trng_hw_trigger(drvdata);
+ } else {
+ atomic_set(&drvdata->pending_hw, 0);
+ cc_trng_pm_put_suspend(dev);
+ }
+}
+
+static irqreturn_t cc_isr(int irq, void *dev_id)
+{
+ struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
+ struct device *dev = &(drvdata->pdev->dev);
+ u32 irr;
+
+ /* if driver suspended return, probably shared interrupt */
+ if (pm_runtime_suspended(dev))
+ return IRQ_NONE;
+
+ /* read the interrupt status */
+ irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
+ dev_dbg(dev, "Got IRR=0x%08X\n", irr);
+
+ if (irr == 0) /* Probably shared interrupt line */
+ return IRQ_NONE;
+
+ /* clear interrupt - must be before processing events */
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
+
+ /* RNG interrupt - most probable */
+ if (irr & CC_HOST_RNG_IRQ_MASK) {
+ /* Mask RNG interrupts - will be unmasked in deferred work */
+ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
+
+ /* We clear RNG interrupt here,
+ * to avoid it from firing as we'll unmask RNG interrupts.
+ */
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
+ CC_HOST_RNG_IRQ_MASK);
+
+ irr &= ~CC_HOST_RNG_IRQ_MASK;
+
+ /* schedule execution of deferred work handler */
+ schedule_work(&drvdata->compwork);
+ }
+
+ if (irr) {
+ dev_dbg_ratelimited(dev,
+ "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
+ /* Just warning */
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cc_trng_startwork_handler(struct work_struct *w)
+{
+ struct cctrng_drvdata *drvdata =
+ container_of(w, struct cctrng_drvdata, startwork);
+
+ drvdata->active_rosc = 0;
+ cc_trng_hw_trigger(drvdata);
+}
+
+
+static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
+{
+ struct clk *clk;
+ struct device *dev = &(drvdata->pdev->dev);
+ int rc = 0;
+
+ clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "Error getting clock: %pe\n", clk);
+ return PTR_ERR(clk);
+ }
+ drvdata->clk = clk;
+
+ rc = clk_prepare_enable(drvdata->clk);
+ if (rc) {
+ dev_err(dev, "Failed to enable clock\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
+{
+ clk_disable_unprepare(drvdata->clk);
+}
+
+
+static int cctrng_probe(struct platform_device *pdev)
+{
+ struct resource *req_mem_cc_regs = NULL;
+ struct cctrng_drvdata *drvdata;
+ struct device *dev = &pdev->dev;
+ int rc = 0;
+ u32 val;
+ int irq;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!drvdata->rng.name)
+ return -ENOMEM;
+
+ drvdata->rng.read = cctrng_read;
+ drvdata->rng.priv = (unsigned long)drvdata;
+ drvdata->rng.quality = CC_TRNG_QUALITY;
+
+ platform_set_drvdata(pdev, drvdata);
+ drvdata->pdev = pdev;
+
+ drvdata->circ.buf = (char *)drvdata->data_buf;
+
+ /* Get device resources */
+ /* First CC registers space */
+ req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* Map registers space */
+ drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ if (IS_ERR(drvdata->cc_base)) {
+ dev_err(dev, "Failed to ioremap registers");
+ return PTR_ERR(drvdata->cc_base);
+ }
+
+ dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
+ req_mem_cc_regs);
+ dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
+ &req_mem_cc_regs->start, drvdata->cc_base);
+
+ /* Then IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Failed getting IRQ resource\n");
+ return irq;
+ }
+
+ /* parse sampling rate from device tree */
+ rc = cc_trng_parse_sampling_ratio(drvdata);
+ if (rc) {
+ dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
+ return rc;
+ }
+
+ rc = cc_trng_clk_init(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_trng_clk_init failed\n");
+ return rc;
+ }
+
+ INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
+ INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
+ spin_lock_init(&drvdata->read_lock);
+
+ /* register the driver isr function */
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n", irq);
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
+
+ /* Clear all pending interrupts */
+ val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
+ dev_dbg(dev, "IRR=0x%08X\n", val);
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
+
+ /* unmask HOST RNG interrupt */
+ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
+ cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
+ ~CC_HOST_RNG_IRQ_MASK);
+
+ /* init PM */
+ rc = cc_trng_pm_init(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_trng_pm_init failed\n");
+ goto post_clk_err;
+ }
+
+ /* increment device's usage counter */
+ rc = cc_trng_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
+ goto post_pm_err;
+ }
+
+ /* set pending_hw to verify that HW won't be triggered from read */
+ atomic_set(&drvdata->pending_hw, 1);
+
+ /* registration of the hwrng device */
+ rc = hwrng_register(&drvdata->rng);
+ if (rc) {
+ dev_err(dev, "Could not register hwrng device.\n");
+ goto post_pm_err;
+ }
+
+ /* trigger HW to start generate data */
+ drvdata->active_rosc = 0;
+ cc_trng_hw_trigger(drvdata);
+
+ /* All set, we can allow auto-suspend */
+ cc_trng_pm_go(drvdata);
+
+ dev_info(dev, "ARM cctrng device initialized\n");
+
+ return 0;
+
+post_pm_err:
+ cc_trng_pm_fini(drvdata);
+
+post_clk_err:
+ cc_trng_clk_fini(drvdata);
+
+ return rc;
+}
+
+static int cctrng_remove(struct platform_device *pdev)
+{
+ struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "Releasing cctrng resources...\n");
+
+ hwrng_unregister(&drvdata->rng);
+
+ cc_trng_pm_fini(drvdata);
+
+ cc_trng_clk_fini(drvdata);
+
+ dev_info(dev, "ARM cctrng device terminated\n");
+
+ return 0;
+}
+
+static int __maybe_unused cctrng_suspend(struct device *dev)
+{
+ struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
+ POWER_DOWN_ENABLE);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ return 0;
+}
+
+static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
+{
+ unsigned int val;
+ unsigned int i;
+
+ for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
+ /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
+ * completed and device is fully functional
+ */
+ val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
+ if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
+ /* hw indicate reset completed */
+ return true;
+ }
+ /* allow scheduling other process on the processor */
+ schedule();
+ }
+ /* reset not completed */
+ return false;
+}
+
+static int __maybe_unused cctrng_resume(struct device *dev)
+{
+ struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
+ int rc;
+
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ /* Enables the device source clk */
+ rc = clk_prepare_enable(drvdata->clk);
+ if (rc) {
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
+ return rc;
+ }
+
+ /* wait for Cryptocell reset completion */
+ if (!cctrng_wait_for_reset_completion(drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ return -EBUSY;
+ }
+
+ /* unmask HOST RNG interrupt */
+ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
+ cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
+ ~CC_HOST_RNG_IRQ_MASK);
+
+ cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
+ POWER_DOWN_DISABLE);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
+
+static const struct of_device_id arm_cctrng_dt_match[] = {
+ { .compatible = "arm,cryptocell-713-trng", },
+ { .compatible = "arm,cryptocell-703-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
+
+static struct platform_driver cctrng_driver = {
+ .driver = {
+ .name = "cctrng",
+ .of_match_table = arm_cctrng_dt_match,
+ .pm = &cctrng_pm,
+ },
+ .probe = cctrng_probe,
+ .remove = cctrng_remove,
+};
+
+static int __init cctrng_mod_init(void)
+{
+ /* Compile time assertion checks */
+ BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
+ BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
+
+ return platform_driver_register(&cctrng_driver);
+}
+module_init(cctrng_mod_init);
+
+static void __exit cctrng_mod_exit(void)
+{
+ platform_driver_unregister(&cctrng_driver);
+}
+module_exit(cctrng_mod_exit);
+
+/* Module description */
+MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
+MODULE_AUTHOR("ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cctrng.h b/drivers/char/hw_random/cctrng.h
new file mode 100644
index 000000000000..1f2fde95adcb
--- /dev/null
+++ b/drivers/char/hw_random/cctrng.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
+
+#include <linux/bitops.h>
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+/* hwrng quality: bits of true entropy per 1024 bits of input */
+#define CC_TRNG_QUALITY 1024
+
+/* CryptoCell TRNG HW definitions */
+#define CC_TRNG_NUM_OF_ROSCS 4
+/* The number of words generated in the entropy holding register (EHR)
+ * 6 words (192 bit) according to HW implementation
+ */
+#define CC_TRNG_EHR_IN_WORDS 6
+#define CC_TRNG_EHR_IN_BITS (CC_TRNG_EHR_IN_WORDS * BITS_PER_TYPE(u32))
+
+#define CC_HOST_RNG_IRQ_MASK BIT(CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT)
+
+/* RNG interrupt mask */
+#define CC_RNG_INT_MASK (BIT(CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT))
+
+// --------------------------------------
+// BLOCK: RNG
+// --------------------------------------
+#define CC_RNG_IMR_REG_OFFSET 0x0100UL
+#define CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT 0x0UL
+#define CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT 0x1UL
+#define CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT 0x2UL
+#define CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT 0x3UL
+#define CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT 0x4UL
+#define CC_RNG_ISR_REG_OFFSET 0x0104UL
+#define CC_RNG_ISR_EHR_VALID_BIT_SHIFT 0x0UL
+#define CC_RNG_ISR_EHR_VALID_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SHIFT 0x1UL
+#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_CRNGT_ERR_BIT_SHIFT 0x2UL
+#define CC_RNG_ISR_CRNGT_ERR_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_WATCHDOG_BIT_SHIFT 0x4UL
+#define CC_RNG_ISR_WATCHDOG_BIT_SIZE 0x1UL
+#define CC_RNG_ICR_REG_OFFSET 0x0108UL
+#define CC_TRNG_CONFIG_REG_OFFSET 0x010CUL
+#define CC_EHR_DATA_0_REG_OFFSET 0x0114UL
+#define CC_RND_SOURCE_ENABLE_REG_OFFSET 0x012CUL
+#define CC_SAMPLE_CNT1_REG_OFFSET 0x0130UL
+#define CC_TRNG_DEBUG_CONTROL_REG_OFFSET 0x0138UL
+#define CC_RNG_SW_RESET_REG_OFFSET 0x0140UL
+#define CC_RNG_CLK_ENABLE_REG_OFFSET 0x01C4UL
+#define CC_RNG_DMA_ENABLE_REG_OFFSET 0x01C8UL
+#define CC_RNG_WATCHDOG_VAL_REG_OFFSET 0x01D8UL
+// --------------------------------------
+// BLOCK: SEC_HOST_RGF
+// --------------------------------------
+#define CC_HOST_RGF_IRR_REG_OFFSET 0x0A00UL
+#define CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_RGF_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_RGF_ICR_REG_OFFSET 0x0A08UL
+
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0x0A78UL
+
+// --------------------------------------
+// BLOCK: NVM
+// --------------------------------------
+#define CC_NVM_IS_IDLE_REG_OFFSET 0x0F10UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 65952393e1bb..7290c603fcb8 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -392,11 +392,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "%s: error getting IRQ resource - %d\n",
- __func__, irq);
+ if (irq < 0)
return irq;
- }
err = devm_request_irq(dev, irq, omap4_rng_irq,
IRQF_TRIGGER_NONE, dev_name(dev), priv);
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
index ddfbabaa5f8f..49b2e02537dd 100644
--- a/drivers/char/hw_random/optee-rng.c
+++ b/drivers/char/hw_random/optee-rng.c
@@ -226,7 +226,7 @@ static int optee_rng_probe(struct device *dev)
return -ENODEV;
/* Open session with hwrng Trusted App */
- memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ export_uuid(sess_arg.uuid, &rng_device->id.uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index d7516a446987..008e6db9ce01 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -328,10 +328,8 @@ static int xgene_rng_probe(struct platform_device *pdev)
return PTR_ERR(ctx->csr_base);
rc = platform_get_irq(pdev, 0);
- if (rc < 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
+ if (rc < 0)
return rc;
- }
ctx->irq = rc;
dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 7dc2c3ec4051..07847d9a459a 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -14,7 +14,7 @@ menuconfig IPMI_HANDLER
IPMI is a standard for managing sensors (temperature,
voltage, etc.) in a system.
- See <file:Documentation/IPMI.txt> for more details on the driver.
+ See <file:Documentation/driver-api/ipmi.rst> for more details on the driver.
If unsure, say N.
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index d36aeacb290e..a395e2e70dc5 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -399,15 +399,15 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
struct device *dev = &pdev->dev;
int rc;
- bt_bmc->irq = platform_get_irq(pdev, 0);
- if (!bt_bmc->irq)
- return -ENODEV;
+ bt_bmc->irq = platform_get_irq_optional(pdev, 0);
+ if (bt_bmc->irq < 0)
+ return bt_bmc->irq;
rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
DEVICE_NAME, bt_bmc);
if (rc < 0) {
dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
- bt_bmc->irq = 0;
+ bt_bmc->irq = rc;
return rc;
}
@@ -430,9 +430,6 @@ static int bt_bmc_probe(struct platform_device *pdev)
struct device *dev;
int rc;
- if (!pdev || !pdev->dev.of_node)
- return -ENODEV;
-
dev = &pdev->dev;
dev_info(dev, "Found bt bmc device\n");
@@ -466,9 +463,9 @@ static int bt_bmc_probe(struct platform_device *pdev)
init_waitqueue_head(&bt_bmc->queue);
bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
- bt_bmc->miscdev.name = DEVICE_NAME,
- bt_bmc->miscdev.fops = &bt_bmc_fops,
- bt_bmc->miscdev.parent = dev;
+ bt_bmc->miscdev.name = DEVICE_NAME,
+ bt_bmc->miscdev.fops = &bt_bmc_fops,
+ bt_bmc->miscdev.parent = dev;
rc = misc_register(&bt_bmc->miscdev);
if (rc) {
dev_err(dev, "Unable to register misc device\n");
@@ -477,7 +474,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
bt_bmc_config_irq(bt_bmc, pdev);
- if (bt_bmc->irq) {
+ if (bt_bmc->irq >= 0) {
dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
} else {
dev_info(dev, "No IRQ; using timer\n");
@@ -503,7 +500,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
misc_deregister(&bt_bmc->miscdev);
- if (!bt_bmc->irq)
+ if (bt_bmc->irq < 0)
del_timer_sync(&bt_bmc->poll_timer);
return 0;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c48d8f086382..e1b22fe0916c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -33,6 +33,7 @@
#include <linux/workqueue.h>
#include <linux/uuid.h>
#include <linux/nospec.h>
+#include <linux/vmalloc.h>
#define IPMI_DRIVER_VERSION "39.2"
@@ -1153,7 +1154,7 @@ static void free_user_work(struct work_struct *work)
remove_work);
cleanup_srcu_struct(&user->release_barrier);
- kfree(user);
+ vfree(user);
}
int ipmi_create_user(unsigned int if_num,
@@ -1185,7 +1186,7 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
+ new_user = vzalloc(sizeof(*new_user));
if (!new_user)
return -ENOMEM;
@@ -1232,7 +1233,7 @@ int ipmi_create_user(unsigned int if_num,
out_kfree:
srcu_read_unlock(&ipmi_interfaces_srcu, index);
- kfree(new_user);
+ vfree(new_user);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
@@ -3171,7 +3172,7 @@ static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
goto out;
}
- guid_copy(&bmc->fetch_guid, (guid_t *)(msg->msg.data + 1));
+ import_guid(&bmc->fetch_guid, msg->msg.data + 1);
/*
* Make sure the guid data is available before setting
* dyn_guid_set.
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
index 42a925f8cf69..4fbb4e18bae2 100644
--- a/drivers/char/ipmi/ipmi_si_hotmod.c
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -18,7 +18,7 @@ static int hotmod_handler(const char *val, const struct kernel_param *kp);
module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
- " Documentation/IPMI.txt in the kernel sources for the"
+ " Documentation/driver-api/ipmi.rst in the kernel sources for the"
" gory details.");
/*
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index c7cc8538b84a..77b8d551ae7f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -968,7 +968,7 @@ static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
* that are not BT and do not have interrupts. It starts spinning
* when an operation is complete or until max_busy tells it to stop
* (if that is enabled). See the paragraph on kimid_max_busy_us in
- * Documentation/IPMI.txt for details.
+ * Documentation/driver-api/ipmi.rst for details.
*/
static int ipmi_thread(void *data)
{
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 638c693e17ad..129b5713f187 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -393,6 +393,8 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
dev_info(io.dev, "%pR regsize %d spacing %d irq %d\n",
res, io.regsize, io.regspacing, io.irq);
+ request_module("acpi_ipmi");
+
return ipmi_si_add_smi(&io);
err_free:
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index b7145f370d3b..198b65d45c5e 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -189,8 +189,6 @@ struct ssif_addr_info {
struct device *dev;
struct i2c_client *client;
- struct i2c_client *added_client;
-
struct mutex clients_mutex;
struct list_head clients;
@@ -1472,6 +1470,7 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
if (acpi_handle) {
ssif_info->addr_source = SI_ACPI;
ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
+ request_module("acpi_ipmi");
return true;
}
#endif
@@ -1940,21 +1939,6 @@ out_remove_attr:
goto out;
}
-static int ssif_adapter_handler(struct device *adev, void *opaque)
-{
- struct ssif_addr_info *addr_info = opaque;
-
- if (adev->type != &i2c_adapter_type)
- return 0;
-
- addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
- &addr_info->binfo);
-
- if (!addr_info->adapter_name)
- return 1; /* Only try the first I2C adapter by default. */
- return 0;
-}
-
static int new_ssif_client(int addr, char *adapter_name,
int debug, int slave_addr,
enum ipmi_addr_src addr_src,
@@ -1998,9 +1982,7 @@ static int new_ssif_client(int addr, char *adapter_name,
list_add_tail(&addr_info->link, &ssif_infos);
- if (initialized)
- i2c_for_each_dev(addr_info, ssif_adapter_handler);
- /* Otherwise address list will get it */
+ /* Address list will get it */
out_unlock:
mutex_unlock(&ssif_infos_mutex);
@@ -2120,8 +2102,6 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
mutex_lock(&ssif_infos_mutex);
- i2c_unregister_device(addr_info->added_client);
-
list_del(&addr_info->link);
kfree(addr_info);
mutex_unlock(&ssif_infos_mutex);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 43dd0891ca1e..31cae88a730b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -31,11 +31,15 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/security.h>
+#include <linux/pseudo_fs.h>
+#include <uapi/linux/magic.h>
+#include <linux/mount.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
#endif
+#define DEVMEM_MINOR 1
#define DEVPORT_MINOR 4
static inline unsigned long size_inside_page(unsigned long start,
@@ -805,12 +809,64 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
+static struct inode *devmem_inode;
+
+#ifdef CONFIG_IO_STRICT_DEVMEM
+void revoke_devmem(struct resource *res)
+{
+ struct inode *inode = READ_ONCE(devmem_inode);
+
+ /*
+ * Check that the initialization has completed. Losing the race
+ * is ok because it means drivers are claiming resources before
+ * the fs_initcall level of init and prevent /dev/mem from
+ * establishing mappings.
+ */
+ if (!inode)
+ return;
+
+ /*
+ * The expectation is that the driver has successfully marked
+ * the resource busy by this point, so devmem_is_allowed()
+ * should start returning false, however for performance this
+ * does not iterate the entire resource range.
+ */
+ if (devmem_is_allowed(PHYS_PFN(res->start)) &&
+ devmem_is_allowed(PHYS_PFN(res->end))) {
+ /*
+ * *cringe* iomem=relaxed says "go ahead, what's the
+ * worst that can happen?"
+ */
+ return;
+ }
+
+ unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
+}
+#endif
+
static int open_port(struct inode *inode, struct file *filp)
{
+ int rc;
+
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- return security_locked_down(LOCKDOWN_DEV_MEM);
+ rc = security_locked_down(LOCKDOWN_DEV_MEM);
+ if (rc)
+ return rc;
+
+ if (iminor(inode) != DEVMEM_MINOR)
+ return 0;
+
+ /*
+ * Use a unified address space to have a single point to manage
+ * revocations when drivers want to take over a /dev/mem mapped
+ * range.
+ */
+ inode->i_mapping = devmem_inode->i_mapping;
+ filp->f_mapping = inode->i_mapping;
+
+ return 0;
}
#define zero_lseek null_lseek
@@ -885,7 +941,7 @@ static const struct memdev {
fmode_t fmode;
} devlist[] = {
#ifdef CONFIG_DEVMEM
- [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
+ [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
#endif
#ifdef CONFIG_DEVKMEM
[2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
@@ -939,6 +995,45 @@ static char *mem_devnode(struct device *dev, umode_t *mode)
static struct class *mem_class;
+static int devmem_fs_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type devmem_fs_type = {
+ .name = "devmem",
+ .owner = THIS_MODULE,
+ .init_fs_context = devmem_fs_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static int devmem_init_inode(void)
+{
+ static struct vfsmount *devmem_vfs_mount;
+ static int devmem_fs_cnt;
+ struct inode *inode;
+ int rc;
+
+ rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
+ if (rc < 0) {
+ pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
+ return rc;
+ }
+
+ inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
+ simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
+ return rc;
+ }
+
+ /* publish /dev/mem initialized */
+ WRITE_ONCE(devmem_inode, inode);
+
+ return 0;
+}
+
static int __init chr_dev_init(void)
{
int minor;
@@ -960,6 +1055,8 @@ static int __init chr_dev_init(void)
*/
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;
+ if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
+ continue;
device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
NULL, devlist[minor].name);
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 4667844eee69..8206412d25ba 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -232,8 +232,6 @@ static ssize_t nvram_misc_read(struct file *file, char __user *buf,
ssize_t ret;
- if (!access_ok(buf, count))
- return -EFAULT;
if (*ppos >= nvram_size)
return 0;
@@ -264,8 +262,6 @@ static ssize_t nvram_misc_write(struct file *file, const char __user *buf,
char *tmp;
ssize_t ret;
- if (!access_ok(buf, count))
- return -EFAULT;
if (*ppos >= nvram_size)
return 0;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 4edb4174a1e2..89681f07bc78 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1404,7 +1404,6 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
unsigned int iobase = dev->p_dev->resource[0]->start;
struct inode *inode = file_inode(filp);
struct pcmcia_device *link;
- int size;
int rc;
void __user *argp = (void __user *)arg;
#ifdef CM4000_DEBUG
@@ -1441,19 +1440,6 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
DEBUGP(4, dev, "iocnr mismatch\n");
goto out;
}
- size = _IOC_SIZE(cmd);
- rc = -EFAULT;
- DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n",
- _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd);
-
- if (_IOC_DIR(cmd) & _IOC_READ) {
- if (!access_ok(argp, size))
- goto out;
- }
- if (_IOC_DIR(cmd) & _IOC_WRITE) {
- if (!access_ok(argp, size))
- goto out;
- }
rc = 0;
switch (cmd) {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0d10e31fd342..a7cf6aa65908 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -327,7 +327,6 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
-#include <linux/cryptohash.h>
#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
@@ -337,6 +336,7 @@
#include <linux/completion.h>
#include <linux/uuid.h>
#include <crypto/chacha.h>
+#include <crypto/sha.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@@ -1397,14 +1397,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
__u32 w[5];
unsigned long l[LONGS(20)];
} hash;
- __u32 workspace[SHA_WORKSPACE_WORDS];
+ __u32 workspace[SHA1_WORKSPACE_WORDS];
unsigned long flags;
/*
* If we have an architectural hardware random number
* generator, use it for SHA's initial vector
*/
- sha_init(hash.w);
+ sha1_init(hash.w);
for (i = 0; i < LONGS(20); i++) {
unsigned long v;
if (!arch_get_random_long(&v))
@@ -1415,7 +1415,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
/* Generate a hash across the pool, 16 words (512 bits) at a time */
spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+ sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
* We mix the hash back into the pool to prevent backtracking
@@ -2057,7 +2057,7 @@ static char sysctl_bootid[16];
* sysctl system call, as 16 bytes of binary data.
*/
static int proc_do_uuid(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 6d81bb3bb503..896a3550fba9 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -777,17 +777,21 @@ static int __init tlclk_init(void)
{
int ret;
+ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+ if (!alarm_events) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
if (ret < 0) {
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
+ kfree(alarm_events);
return ret;
}
tlclk_major = ret;
- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
- if (!alarm_events) {
- ret = -ENOMEM;
- goto out1;
- }
/* Read telecom clock IRQ number (Set by BIOS) */
if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
@@ -796,7 +800,6 @@ static int __init tlclk_init(void)
ret = -EBUSY;
goto out2;
}
- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
@@ -837,8 +840,8 @@ out3:
release_region(TLCLK_BASE, 8);
out2:
kfree(alarm_events);
-out1:
unregister_chrdev(tlclk_major, "telco_clock");
+out1:
return ret;
}
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index e741b1157525..37a05800980c 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -51,8 +51,7 @@ static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos)
int i;
event_header = addr;
- size = sizeof(struct tcg_pcr_event) - sizeof(event_header->event)
- + event_header->event_size;
+ size = struct_size(event_header, event, event_header->event_size);
if (*pos == 0) {
if (addr + size < limit) {
@@ -98,8 +97,8 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
event_header = log->bios_event_log;
if (v == SEQ_START_TOKEN) {
- event_size = sizeof(struct tcg_pcr_event) -
- sizeof(event_header->event) + event_header->event_size;
+ event_size = struct_size(event_header, event,
+ event_header->event_size);
marker = event_header;
} else {
event = v;
@@ -136,9 +135,8 @@ static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v)
size_t size;
if (v == SEQ_START_TOKEN) {
- size = sizeof(struct tcg_pcr_event) -
- sizeof(event_header->event) + event_header->event_size;
-
+ size = struct_size(event_header, event,
+ event_header->event_size);
temp_ptr = event_header;
if (size > 0)
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 22bf553ccf9d..2491a2cb54a2 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -241,7 +241,7 @@ static int ftpm_tee_probe(struct platform_device *pdev)
/* Open a session with fTPM TA */
memset(&sess_arg, 0, sizeof(sess_arg));
- memcpy(sess_arg.uuid, ftpm_ta_uuid.b, TEE_IOCTL_UUID_LEN);
+ export_uuid(sess_arg.uuid, &ftpm_ta_uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 3cbaec925606..00c5e3acee46 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -871,7 +871,7 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
return 0;
/* Try lock this page */
- if (pipe_buf_steal(pipe, buf) == 0) {
+ if (pipe_buf_try_steal(pipe, buf)) {
/* Get reference and unlock page for moving */
get_page(buf->page);
unlock_page(buf->page);
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index f4169cc2fd31..1b431110b6f5 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -105,7 +105,7 @@ obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
-obj-$(CONFIG_ARCH_SPRD) += sprd/
+obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
@@ -114,7 +114,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_U8500) += ux500/
-obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
+obj-y += versatile/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_X86) += x86/
endif
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index d5946f7486d6..374afcab89af 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -95,6 +95,7 @@ struct clockgen {
};
static struct clockgen clockgen;
+static bool add_cpufreq_dev __initdata;
static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
{
@@ -1019,7 +1020,7 @@ static void __init create_muxes(struct clockgen *cg)
}
}
-static void __init clockgen_init(struct device_node *np);
+static void __init _clockgen_init(struct device_node *np, bool legacy);
/*
* Legacy nodes may get probed before the parent clockgen node.
@@ -1030,7 +1031,7 @@ static void __init clockgen_init(struct device_node *np);
static void __init legacy_init_clockgen(struct device_node *np)
{
if (!clockgen.node)
- clockgen_init(of_get_parent(np));
+ _clockgen_init(of_get_parent(np), true);
}
/* Legacy node */
@@ -1447,7 +1448,7 @@ static bool __init has_erratum_a4510(void)
}
#endif
-static void __init clockgen_init(struct device_node *np)
+static void __init _clockgen_init(struct device_node *np, bool legacy)
{
int i, ret;
bool is_old_ls1021a = false;
@@ -1516,12 +1517,35 @@ static void __init clockgen_init(struct device_node *np)
__func__, np, ret);
}
+ /* Don't create cpufreq device for legacy clockgen blocks */
+ add_cpufreq_dev = !legacy;
+
return;
err:
iounmap(clockgen.regs);
clockgen.regs = NULL;
}
+static void __init clockgen_init(struct device_node *np)
+{
+ _clockgen_init(np, false);
+}
+
+static int __init clockgen_cpufreq_init(void)
+{
+ struct platform_device *pdev;
+
+ if (add_cpufreq_dev) {
+ pdev = platform_device_register_simple("qoriq-cpufreq", -1,
+ NULL, 0);
+ if (IS_ERR(pdev))
+ pr_err("Couldn't register qoriq-cpufreq err=%ld\n",
+ PTR_ERR(pdev));
+ }
+ return 0;
+}
+device_initcall(clockgen_cpufreq_init);
+
CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 39c59f063aa0..407f6919604c 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -114,7 +114,11 @@ static int clk_pm_runtime_get(struct clk_core *core)
return 0;
ret = pm_runtime_get_sync(core->dev);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ pm_runtime_put_noidle(core->dev);
+ return ret;
+ }
+ return 0;
}
static void clk_pm_runtime_put(struct clk_core *core)
@@ -3519,6 +3523,9 @@ static int __clk_core_init(struct clk_core *core)
out:
clk_pm_runtime_put(core);
unlock:
+ if (ret)
+ hlist_del_init(&core->child_node);
+
clk_prepare_unlock();
if (!ret)
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index ea3c70d1307e..9e28db8125cd 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -274,6 +274,13 @@ config COMMON_CLK_MT8173
---help---
This driver supports MediaTek MT8173 clocks.
+config COMMON_CLK_MT8173_MMSYS
+ bool "Clock driver for MediaTek MT8173 mmsys"
+ depends on COMMON_CLK_MT8173
+ default COMMON_CLK_MT8173
+ help
+ This driver supports MediaTek MT8173 mmsys clocks.
+
config COMMON_CLK_MT8183
bool "Clock driver for MediaTek MT8183"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 8cdb76a5cd71..bb0536942075 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
+obj-$(CONFIG_COMMON_CLK_MT8173_MMSYS) += clk-mt8173-mm.o
obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
obj-$(CONFIG_COMMON_CLK_MT8183_AUDIOSYS) += clk-mt8183-audio.o
obj-$(CONFIG_COMMON_CLK_MT8183_CAMSYS) += clk-mt8183-cam.o
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index 054b597d4a73..cb18e1849492 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -79,16 +79,12 @@ static const struct mtk_gate mm_clks[] = {
GATE_DISP1(CLK_MM_TVE_FMM, "mm_tve_fmm", "mm_sel", 14),
};
-static const struct of_device_id of_match_clk_mt2701_mm[] = {
- { .compatible = "mediatek,mt2701-mmsys", },
- {}
-};
-
static int clk_mt2701_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
@@ -108,7 +104,6 @@ static struct platform_driver clk_mt2701_mm_drv = {
.probe = clk_mt2701_mm_probe,
.driver = {
.name = "clk-mt2701-mm",
- .of_match_table = of_match_clk_mt2701_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index 1c5948be35f3..5519c3d68c1f 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -128,9 +128,10 @@ static const struct mtk_gate mm_clks[] = {
static int clk_mt2712_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -146,16 +147,10 @@ static int clk_mt2712_mm_probe(struct platform_device *pdev)
return r;
}
-static const struct of_device_id of_match_clk_mt2712_mm[] = {
- { .compatible = "mediatek,mt2712-mmsys", },
- {}
-};
-
static struct platform_driver clk_mt2712_mm_drv = {
.probe = clk_mt2712_mm_probe,
.driver = {
.name = "clk-mt2712-mm",
- .of_match_table = of_match_clk_mt2712_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index fb5fbb8e3e41..059c1a41ac7a 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -84,15 +84,11 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM1(CLK_MM_DISP_OVL_FBDC, "mm_disp_ovl_fbdc", "mm_sel", 16),
};
-static const struct of_device_id of_match_clk_mt6779_mm[] = {
- { .compatible = "mediatek,mt6779-mmsys", },
- {}
-};
-
static int clk_mt6779_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -106,7 +102,6 @@ static struct platform_driver clk_mt6779_mm_drv = {
.probe = clk_mt6779_mm_probe,
.driver = {
.name = "clk-mt6779-mm",
- .of_match_table = of_match_clk_mt6779_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index 8f05653b387d..01fdce287247 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -92,16 +92,12 @@ static const struct mtk_gate mm_clks[] = {
"clk26m", 3),
};
-static const struct of_device_id of_match_clk_mt6797_mm[] = {
- { .compatible = "mediatek,mt6797-mmsys", },
- {}
-};
-
static int clk_mt6797_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
@@ -121,7 +117,6 @@ static struct platform_driver clk_mt6797_mm_drv = {
.probe = clk_mt6797_mm_probe,
.driver = {
.name = "clk-mt6797-mm",
- .of_match_table = of_match_clk_mt6797_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
new file mode 100644
index 000000000000..36fa20be77b6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8173-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mt8173_mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "rtc_sel", 15),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_SPLIT1, "mm_disp_split1", "mm_sel", 29),
+ GATE_MM0(CLK_MM_DISP_MERGE, "mm_disp_merge", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DISP_PWM0MM, "mm_disp_pwm0mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DISP_PWM026M, "mm_disp_pwm026m", "pwm_sel", 1),
+ GATE_MM1(CLK_MM_DISP_PWM1MM, "mm_disp_pwm1mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DISP_PWM126M, "mm_disp_pwm126m", "pwm_sel", 3),
+ GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
+ GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_dig", 5),
+ GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
+ GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_dig", 7),
+ GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "dpi0_sel", 8),
+ GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
+ GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "lvds_pxl", 10),
+ GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
+ GATE_MM1(CLK_MM_HDMI_PIXEL, "mm_hdmi_pixel", "dpi0_sel", 12),
+ GATE_MM1(CLK_MM_HDMI_PLLCK, "mm_hdmi_pllck", "hdmi_sel", 13),
+ GATE_MM1(CLK_MM_HDMI_AUDIO, "mm_hdmi_audio", "apll1", 14),
+ GATE_MM1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll2", 15),
+ GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "lvds_pxl", 16),
+ GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvds_cts", 17),
+ GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
+ GATE_MM1(CLK_MM_HDMI_HDCP, "mm_hdmi_hdcp", "hdcp_sel", 19),
+ GATE_MM1(CLK_MM_HDMI_HDCP24M, "mm_hdmi_hdcp24m", "hdcp_24m_sel", 20),
+};
+
+struct clk_mt8173_mm_driver_data {
+ const struct mtk_gate *gates_clk;
+ int gates_num;
+};
+
+static const struct clk_mt8173_mm_driver_data mt8173_mmsys_driver_data = {
+ .gates_clk = mt8173_mm_clks,
+ .gates_num = ARRAY_SIZE(mt8173_mm_clks),
+};
+
+static int clk_mt8173_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ const struct clk_mt8173_mm_driver_data *data;
+ struct clk_onecell_data *clk_data;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ data = &mt8173_mmsys_driver_data;
+
+ ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
+ clk_data);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8173_mm_drv = {
+ .driver = {
+ .name = "clk-mt8173-mm",
+ },
+ .probe = clk_mt8173_mm_probe,
+};
+
+builtin_platform_driver(clk_mt8173_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 537a7f49b0f7..8f898ac476c0 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -753,93 +753,6 @@ static const struct mtk_gate img_clks[] __initconst = {
GATE_IMG(CLK_IMG_FD, "img_fd", "mm_sel", 11),
};
-static const struct mtk_gate_regs mm0_cg_regs __initconst = {
- .set_ofs = 0x0104,
- .clr_ofs = 0x0108,
- .sta_ofs = 0x0100,
-};
-
-static const struct mtk_gate_regs mm1_cg_regs __initconst = {
- .set_ofs = 0x0114,
- .clr_ofs = 0x0118,
- .sta_ofs = 0x0110,
-};
-
-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-static const struct mtk_gate mm_clks[] __initconst = {
- /* MM0 */
- GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
- GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
- GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
- GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
- GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
- GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
- GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
- GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
- GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
- GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
- GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
- GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
- GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
- GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
- GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "rtc_sel", 15),
- GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
- GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
- GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
- GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
- GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
- GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
- GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
- GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
- GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
- GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
- GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
- GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
- GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
- GATE_MM0(CLK_MM_DISP_SPLIT1, "mm_disp_split1", "mm_sel", 29),
- GATE_MM0(CLK_MM_DISP_MERGE, "mm_disp_merge", "mm_sel", 30),
- GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
- /* MM1 */
- GATE_MM1(CLK_MM_DISP_PWM0MM, "mm_disp_pwm0mm", "mm_sel", 0),
- GATE_MM1(CLK_MM_DISP_PWM026M, "mm_disp_pwm026m", "pwm_sel", 1),
- GATE_MM1(CLK_MM_DISP_PWM1MM, "mm_disp_pwm1mm", "mm_sel", 2),
- GATE_MM1(CLK_MM_DISP_PWM126M, "mm_disp_pwm126m", "pwm_sel", 3),
- GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
- GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_dig", 5),
- GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
- GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_dig", 7),
- GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "dpi0_sel", 8),
- GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
- GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "lvds_pxl", 10),
- GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
- GATE_MM1(CLK_MM_HDMI_PIXEL, "mm_hdmi_pixel", "dpi0_sel", 12),
- GATE_MM1(CLK_MM_HDMI_PLLCK, "mm_hdmi_pllck", "hdmi_sel", 13),
- GATE_MM1(CLK_MM_HDMI_AUDIO, "mm_hdmi_audio", "apll1", 14),
- GATE_MM1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll2", 15),
- GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "lvds_pxl", 16),
- GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvds_cts", 17),
- GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
- GATE_MM1(CLK_MM_HDMI_HDCP, "mm_hdmi_hdcp", "hdcp_sel", 19),
- GATE_MM1(CLK_MM_HDMI_HDCP24M, "mm_hdmi_hdcp24m", "hdcp_24m_sel", 20),
-};
-
static const struct mtk_gate_regs vdec0_cg_regs __initconst = {
.set_ofs = 0x0000,
.clr_ofs = 0x0004,
@@ -1144,23 +1057,6 @@ static void __init mtk_imgsys_init(struct device_node *node)
}
CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8173-imgsys", mtk_imgsys_init);
-static void __init mtk_mmsys_init(struct device_node *node)
-{
- struct clk_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
-
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_mmsys, "mediatek,mt8173-mmsys", mtk_mmsys_init);
-
static void __init mtk_vdecsys_init(struct device_node *node)
{
struct clk_onecell_data *clk_data;
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
index 720c696b506d..9d60e09619c1 100644
--- a/drivers/clk/mediatek/clk-mt8183-mm.c
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -84,8 +84,9 @@ static const struct mtk_gate mm_clks[] = {
static int clk_mt8183_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -95,16 +96,10 @@ static int clk_mt8183_mm_probe(struct platform_device *pdev)
return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
-static const struct of_device_id of_match_clk_mt8183_mm[] = {
- { .compatible = "mediatek,mt8183-mmsys", },
- {}
-};
-
static struct platform_driver clk_mt8183_mm_drv = {
.probe = clk_mt8183_mm_probe,
.driver = {
.name = "clk-mt8183-mm",
- .of_match_table = of_match_clk_mt8183_mm,
},
};
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 11ec6f466467..abb121f8de52 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -377,6 +377,7 @@ config SM_GCC_8150
config SM_GCC_8250
tristate "SM8250 Global Clock Controller"
+ select QCOM_GDSC
help
Support for the global clock controller on SM8250 devices.
Say Y if you want to use peripheral devices such as UART,
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index ef98fdc51755..732bc7c937e6 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -76,8 +76,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_even",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- .name = "bi_tcxo",
+ .hw = &gpll0.clkr.hw,
},
.num_parents = 1,
.ops = &clk_trion_pll_postdiv_ops,
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index d17cfb7a3ff4..d7243c09cc84 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -156,8 +156,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" };
PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" };
-PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" };
-
PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
@@ -468,16 +466,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
RK2928_CLKGATE_CON(2), 8, GFLAGS),
- GATE(0, "cpll_gpu", "cpll", 0,
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
- GATE(0, "gpll_gpu", "gpll", 0,
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
- GATE(0, "hdmiphy_gpu", "hdmiphy", 0,
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
- GATE(0, "usb480m_gpu", "usb480m", 0,
+ COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0,
+ RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
- COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0,
- RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS),
COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
@@ -582,8 +573,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
/* PD_GPU */
- GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS),
- GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS),
+ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
+ GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
/* PD_BUS */
GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 64e229ddf2a5..e931319dcc9d 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1292,7 +1292,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0 },
{ TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0 },
{ TEGRA124_CLK_UARTD, TEGRA124_CLK_PLL_P, 408000000, 0 },
- { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 0 },
+ { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 282240000, 0 },
{ TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA124_CLK_I2S0, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S1, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index e001b9bcb6bf..7dc30dd6c8d5 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -212,7 +212,7 @@ static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = {
};
static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = {
- { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+ { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk-24mhz-clkctrl:0000:0" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 7d215cdf9dda..9daf3825f289 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -73,6 +73,7 @@ static const char *enable_init_clks[] = {
"ddr_pll_clk1",
"ddr_pll_clk2",
"ddr_pll_clk3",
+ "sysclk6_ck",
};
int __init dm816x_dt_clk_init(void)
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 062266034d84..864c484bde1b 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -255,24 +255,53 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
return entry->clk;
}
+/* Get clkctrl clock base name based on clkctrl_name or dts node */
+static const char * __init clkctrl_get_clock_name(struct device_node *np,
+ const char *clkctrl_name,
+ int offset, int index,
+ bool legacy_naming)
+{
+ char *clock_name;
+
+ /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
+ if (clkctrl_name && !legacy_naming) {
+ clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
+ clkctrl_name, offset, index);
+ strreplace(clock_name, '_', '-');
+
+ return clock_name;
+ }
+
+ /* l4per:1234:0 old style naming based on clkctrl_name */
+ if (clkctrl_name)
+ return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
+ clkctrl_name, offset, index);
+
+ /* l4per_cm:1234:0 old style naming based on parent node name */
+ if (legacy_naming)
+ return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
+ np->parent, offset, index);
+
+ /* l4per-clkctrl:1234:0 style naming based on node name */
+ return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
+}
+
static int __init
_ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
struct device_node *node, struct clk_hw *clk_hw,
u16 offset, u8 bit, const char * const *parents,
- int num_parents, const struct clk_ops *ops)
+ int num_parents, const struct clk_ops *ops,
+ const char *clkctrl_name)
{
struct clk_init_data init = { NULL };
struct clk *clk;
struct omap_clkctrl_clk *clkctrl_clk;
int ret = 0;
- if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
- init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
- node->parent, node, offset,
- bit);
- else
- init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node,
- offset, bit);
+ init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
+ ti_clk_get_features()->flags &
+ TI_CLK_CLKCTRL_COMPAT);
+
clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
if (!init.name || !clkctrl_clk) {
ret = -ENOMEM;
@@ -309,7 +338,7 @@ static void __init
_ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
struct device_node *node, u16 offset,
const struct omap_clkctrl_bit_data *data,
- void __iomem *reg)
+ void __iomem *reg, const char *clkctrl_name)
{
struct clk_hw_omap *clk_hw;
@@ -322,7 +351,7 @@ _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
data->bit, data->parents, 1,
- &omap_gate_clk_ops))
+ &omap_gate_clk_ops, clkctrl_name))
kfree(clk_hw);
}
@@ -330,7 +359,7 @@ static void __init
_ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
struct device_node *node, u16 offset,
const struct omap_clkctrl_bit_data *data,
- void __iomem *reg)
+ void __iomem *reg, const char *clkctrl_name)
{
struct clk_omap_mux *mux;
int num_parents = 0;
@@ -357,7 +386,7 @@ _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
data->bit, data->parents, num_parents,
- &ti_clk_mux_ops))
+ &ti_clk_mux_ops, clkctrl_name))
kfree(mux);
}
@@ -365,7 +394,7 @@ static void __init
_ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
struct device_node *node, u16 offset,
const struct omap_clkctrl_bit_data *data,
- void __iomem *reg)
+ void __iomem *reg, const char *clkctrl_name)
{
struct clk_omap_divider *div;
const struct omap_clkctrl_div_data *div_data = data->data;
@@ -393,7 +422,7 @@ _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
data->bit, data->parents, 1,
- &ti_clk_divider_ops))
+ &ti_clk_divider_ops, clkctrl_name))
kfree(div);
}
@@ -401,7 +430,7 @@ static void __init
_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
struct device_node *node,
const struct omap_clkctrl_reg_data *data,
- void __iomem *reg)
+ void __iomem *reg, const char *clkctrl_name)
{
const struct omap_clkctrl_bit_data *bits = data->bit_data;
@@ -412,17 +441,17 @@ _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
switch (bits->type) {
case TI_CLK_GATE:
_ti_clkctrl_setup_gate(provider, node, data->offset,
- bits, reg);
+ bits, reg, clkctrl_name);
break;
case TI_CLK_DIVIDER:
_ti_clkctrl_setup_div(provider, node, data->offset,
- bits, reg);
+ bits, reg, clkctrl_name);
break;
case TI_CLK_MUX:
_ti_clkctrl_setup_mux(provider, node, data->offset,
- bits, reg);
+ bits, reg, clkctrl_name);
break;
default:
@@ -461,42 +490,10 @@ static char * __init clkctrl_get_name(struct device_node *np)
return name;
}
}
- of_node_put(np);
return NULL;
}
-/* Get clkctrl clock base name based on clkctrl_name or dts node */
-static const char * __init clkctrl_get_clock_name(struct device_node *np,
- const char *clkctrl_name,
- int offset, int index,
- bool legacy_naming)
-{
- char *clock_name;
-
- /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
- if (clkctrl_name && !legacy_naming) {
- clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
- clkctrl_name, offset, index);
- strreplace(clock_name, '_', '-');
-
- return clock_name;
- }
-
- /* l4per:1234:0 old style naming based on clkctrl_name */
- if (clkctrl_name)
- return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
- clkctrl_name, offset, index);
-
- /* l4per_cm:1234:0 old style naming based on parent node name */
- if (legacy_naming)
- return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
- np->parent, offset, index);
-
- /* l4per-clkctrl:1234:0 style naming based on node name */
- return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
-}
-
static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
{
struct omap_clkctrl_provider *provider;
@@ -664,7 +661,7 @@ clkdm_found:
hw->enable_reg.ptr = provider->base + reg_data->offset;
_ti_clkctrl_setup_subclks(provider, node, reg_data,
- hw->enable_reg.ptr);
+ hw->enable_reg.ptr, clkctrl_name);
if (reg_data->flags & CLKF_SW_SUP)
hw->enable_bit = MODULEMODE_SWCTRL;
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index c2618f1477a2..8c1b0e8e8d32 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -1,33 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
-config ICST
- bool
-config COMMON_CLK_VERSATILE
- bool "Clock driver for ARM Reference designs"
+menu "Clock driver for ARM Reference designs"
depends on ARCH_INTEGRATOR || ARCH_REALVIEW || \
- ARCH_VERSATILE || ARCH_VEXPRESS || ARM64 || \
- COMPILE_TEST
+ ARCH_VERSATILE || ARCH_VEXPRESS || COMPILE_TEST
+
+config ICST
+ bool "Clock driver for ARM Reference designs ICST"
select REGMAP_MMIO
---help---
Supports clocking on ARM Reference designs:
- Integrator/AP and Integrator/CP
- RealView PB1176, EB, PB11MP and PBX
- - Versatile Express
config CLK_SP810
bool "Clock driver for ARM SP810 System Controller"
- depends on COMMON_CLK_VERSATILE
- default y if ARCH_VEXPRESS
+ default y if (ARCH_VEXPRESS && ARM)
---help---
Supports clock muxing (REFCLK/TIMCLK to TIMERCLKEN0-3) capabilities
of the ARM SP810 System Controller cell.
config CLK_VEXPRESS_OSC
- bool "Clock driver for Versatile Express OSC clock generators"
- depends on COMMON_CLK_VERSATILE
+ tristate "Clock driver for Versatile Express OSC clock generators"
depends on VEXPRESS_CONFIG
+ select REGMAP_MMIO
default y if ARCH_VEXPRESS
---help---
Simple regmap-based driver driving clock generators on Versatile
Express platforms hidden behind its configuration infrastructure,
commonly known as OSCs.
+
+endmenu
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index b05da8516d4c..ca798249544d 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -8,7 +8,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/clk-integrator.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -20,26 +19,6 @@
#define IMPD1_OSC2 0x04
#define IMPD1_LOCK 0x08
-struct impd1_clk {
- char *pclkname;
- struct clk *pclk;
- char *vco1name;
- struct clk *vco1clk;
- char *vco2name;
- struct clk *vco2clk;
- struct clk *mmciclk;
- char *uartname;
- struct clk *uartclk;
- char *spiname;
- struct clk *spiclk;
- char *scname;
- struct clk *scclk;
- struct clk_lookup *clks[15];
-};
-
-/* One entry for each connected IM-PD1 LM */
-static struct impd1_clk impd1_clks[4];
-
/*
* There are two VCO's on the IM-PD1
*/
@@ -80,106 +59,6 @@ static const struct clk_icst_desc impd1_icst2_desc = {
.lock_offset = IMPD1_LOCK,
};
-/**
- * integrator_impd1_clk_init() - set up the integrator clock tree
- * @base: base address of the logic module (LM)
- * @id: the ID of this LM
- */
-void integrator_impd1_clk_init(void __iomem *base, unsigned int id)
-{
- struct impd1_clk *imc;
- struct clk *clk;
- struct clk *pclk;
- int i;
-
- if (id > 3) {
- pr_crit("no more than 4 LMs can be attached\n");
- return;
- }
- imc = &impd1_clks[id];
-
- /* Register the fixed rate PCLK */
- imc->pclkname = kasprintf(GFP_KERNEL, "lm%x-pclk", id);
- pclk = clk_register_fixed_rate(NULL, imc->pclkname, NULL, 0, 0);
- imc->pclk = pclk;
-
- imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id);
- clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, NULL,
- base);
- imc->vco1clk = clk;
- imc->clks[0] = clkdev_alloc(pclk, "apb_pclk", "lm%x:01000", id);
- imc->clks[1] = clkdev_alloc(clk, NULL, "lm%x:01000", id);
-
- /* VCO2 is also called "CLK2" */
- imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id);
- clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, NULL,
- base);
- imc->vco2clk = clk;
-
- /* MMCI uses CLK2 right off */
- imc->clks[2] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00700", id);
- imc->clks[3] = clkdev_alloc(clk, NULL, "lm%x:00700", id);
-
- /* UART reference clock divides CLK2 by a fixed factor 4 */
- imc->uartname = kasprintf(GFP_KERNEL, "lm%x-uartclk", id);
- clk = clk_register_fixed_factor(NULL, imc->uartname, imc->vco2name,
- CLK_IGNORE_UNUSED, 1, 4);
- imc->uartclk = clk;
- imc->clks[4] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00100", id);
- imc->clks[5] = clkdev_alloc(clk, NULL, "lm%x:00100", id);
- imc->clks[6] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00200", id);
- imc->clks[7] = clkdev_alloc(clk, NULL, "lm%x:00200", id);
-
- /* SPI PL022 clock divides CLK2 by a fixed factor 64 */
- imc->spiname = kasprintf(GFP_KERNEL, "lm%x-spiclk", id);
- clk = clk_register_fixed_factor(NULL, imc->spiname, imc->vco2name,
- CLK_IGNORE_UNUSED, 1, 64);
- imc->clks[8] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00300", id);
- imc->clks[9] = clkdev_alloc(clk, NULL, "lm%x:00300", id);
-
- /* The GPIO blocks and AACI have only PCLK */
- imc->clks[10] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00400", id);
- imc->clks[11] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00500", id);
- imc->clks[12] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00800", id);
-
- /* Smart Card clock divides CLK2 by a fixed factor 4 */
- imc->scname = kasprintf(GFP_KERNEL, "lm%x-scclk", id);
- clk = clk_register_fixed_factor(NULL, imc->scname, imc->vco2name,
- CLK_IGNORE_UNUSED, 1, 4);
- imc->scclk = clk;
- imc->clks[13] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00600", id);
- imc->clks[14] = clkdev_alloc(clk, NULL, "lm%x:00600", id);
-
- for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
- clkdev_add(imc->clks[i]);
-}
-EXPORT_SYMBOL_GPL(integrator_impd1_clk_init);
-
-void integrator_impd1_clk_exit(unsigned int id)
-{
- int i;
- struct impd1_clk *imc;
-
- if (id > 3)
- return;
- imc = &impd1_clks[id];
-
- for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
- clkdev_drop(imc->clks[i]);
- clk_unregister(imc->spiclk);
- clk_unregister(imc->uartclk);
- clk_unregister(imc->vco2clk);
- clk_unregister(imc->vco1clk);
- clk_unregister(imc->pclk);
- kfree(imc->scname);
- kfree(imc->spiname);
- kfree(imc->uartname);
- kfree(imc->vco2name);
- kfree(imc->vco1name);
- kfree(imc->pclkname);
-}
-EXPORT_SYMBOL_GPL(integrator_impd1_clk_exit);
-
static int integrator_impd1_clk_spawn(struct device *dev,
struct device_node *parent,
struct device_node *np)
@@ -206,6 +85,7 @@ static int integrator_impd1_clk_spawn(struct device *dev,
return -ENODEV;
}
+ of_property_read_string(np, "clock-output-names", &name);
parent_name = of_clk_get_parent_name(np, 0);
clk = icst_clk_setup(NULL, desc, name, parent_name, map,
ICST_INTEGRATOR_IM_PD1);
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 7ade146a3ea9..b2b32fa2d7c3 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -7,6 +7,7 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -65,8 +66,8 @@ static int vexpress_osc_probe(struct platform_device *pdev)
{
struct clk_init_data init;
struct vexpress_osc *osc;
- struct clk *clk;
u32 range[2];
+ int ret;
osc = devm_kzalloc(&pdev->dev, sizeof(*osc), GFP_KERNEL);
if (!osc)
@@ -92,11 +93,11 @@ static int vexpress_osc_probe(struct platform_device *pdev)
osc->hw.init = &init;
- clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(&pdev->dev, &osc->hw);
+ if (ret < 0)
+ return ret;
- of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk);
+ devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get, &osc->hw);
clk_hw_set_rate_range(&osc->hw, osc->rate_min, osc->rate_max);
dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name);
@@ -108,6 +109,7 @@ static const struct of_device_id vexpress_osc_of_match[] = {
{ .compatible = "arm,vexpress-osc", },
{}
};
+MODULE_DEVICE_TABLE(of, vexpress_osc_of_match);
static struct platform_driver vexpress_osc_driver = {
.driver = {
@@ -116,9 +118,5 @@ static struct platform_driver vexpress_osc_driver = {
},
.probe = vexpress_osc_probe,
};
-
-static int __init vexpress_osc_init(void)
-{
- return platform_driver_register(&vexpress_osc_driver);
-}
-core_initcall(vexpress_osc_init);
+module_platform_driver(vexpress_osc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/zynqmp/clk-gate-zynqmp.c b/drivers/clk/zynqmp/clk-gate-zynqmp.c
index 83b236f20fff..10c9b889324f 100644
--- a/drivers/clk/zynqmp/clk-gate-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-gate-zynqmp.c
@@ -37,9 +37,8 @@ static int zynqmp_clk_gate_enable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_enable(clk_id);
+ ret = zynqmp_pm_clock_enable(clk_id);
if (ret)
pr_warn_once("%s() clock enabled failed for %s, ret = %d\n",
@@ -58,9 +57,8 @@ static void zynqmp_clk_gate_disable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_disable(clk_id);
+ ret = zynqmp_pm_clock_disable(clk_id);
if (ret)
pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
@@ -79,9 +77,8 @@ static int zynqmp_clk_gate_is_enabled(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int state, ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getstate(clk_id, &state);
+ ret = zynqmp_pm_clock_getstate(clk_id, &state);
if (ret) {
pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
__func__, clk_name, ret);
diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
index 0af8f74c5fa5..06194149be83 100644
--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
@@ -47,9 +47,8 @@ static u8 zynqmp_clk_mux_get_parent(struct clk_hw *hw)
u32 clk_id = mux->clk_id;
u32 val;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getparent(clk_id, &val);
+ ret = zynqmp_pm_clock_getparent(clk_id, &val);
if (ret)
pr_warn_once("%s() getparent failed for clock: %s, ret = %d\n",
@@ -71,9 +70,8 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = mux->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_setparent(clk_id, index);
+ ret = zynqmp_pm_clock_setparent(clk_id, index);
if (ret)
pr_warn_once("%s() set parent failed for clock: %s, ret = %d\n",
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 10e89f23880b..5eed5ce10179 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -134,7 +134,6 @@ static struct clk_hw *(* const clk_topology[]) (const char *name, u32 clk_id,
static struct zynqmp_clock *clock;
static struct clk_hw_onecell_data *zynqmp_data;
static unsigned int clock_max_idx;
-static const struct zynqmp_eemi_ops *eemi_ops;
/**
* zynqmp_is_valid_clock() - Check whether clock is valid or not
@@ -206,7 +205,7 @@ static int zynqmp_pm_clock_get_num_clocks(u32 *nclocks)
qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
*nclocks = ret_payload[1];
return ret;
@@ -231,7 +230,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id,
qdata.qid = PM_QID_CLOCK_GET_NAME;
qdata.arg1 = clock_id;
- eemi_ops->query_data(qdata, ret_payload);
+ zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, ret_payload, sizeof(*response));
return 0;
@@ -265,7 +264,7 @@ static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index,
qdata.arg1 = clock_id;
qdata.arg2 = index;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -296,7 +295,7 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
qdata.qid = PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS;
qdata.arg1 = clk_id;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
if (ret)
return ERR_PTR(ret);
@@ -339,7 +338,7 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index,
qdata.arg1 = clock_id;
qdata.arg2 = index;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -364,7 +363,7 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id,
qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
qdata.arg1 = clock_id;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -738,10 +737,6 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
int ret;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
ret = zynqmp_clk_setup(dev->of_node);
return ret;
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 4be2cc76aa2e..8eed715707e3 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -83,9 +83,8 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
u32 div_type = divider->div_type;
u32 div, value;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getdivider(clk_id, &div);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &div);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
@@ -163,11 +162,10 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
u32 div_type = divider->div_type;
u32 bestdiv;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- ret = eemi_ops->clock_getdivider(clk_id, &bestdiv);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &bestdiv);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
@@ -219,7 +217,6 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
u32 div_type = divider->div_type;
u32 value, div;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
value = zynqmp_divider_get_val(parent_rate, rate, divider->flags);
if (div_type == TYPE_DIV1) {
@@ -233,7 +230,7 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
div = __ffs(div);
- ret = eemi_ops->clock_setdivider(clk_id, div);
+ ret = zynqmp_pm_clock_setdivider(clk_id, div);
if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
@@ -258,7 +255,6 @@ static const struct clk_ops zynqmp_clk_divider_ops = {
*/
u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -266,7 +262,7 @@ u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
qdata.arg1 = clk_id;
qdata.arg2 = type;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
/*
* To maintain backward compatibility return maximum possible value
* (0xFFFF) if query for max divisor is not successful.
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 89b599530105..92f449ed38e5 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -50,10 +50,8 @@ static inline enum pll_mode zynqmp_pll_get_mode(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->ioctl(0, IOCTL_GET_PLL_FRAC_MODE, clk_id, 0,
- ret_payload);
+ ret = zynqmp_pm_get_pll_frac_mode(clk_id, ret_payload);
if (ret)
pr_warn_once("%s() PLL get frac mode failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -73,14 +71,13 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
const char *clk_name = clk_hw_get_name(hw);
int ret;
u32 mode;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (on)
mode = PLL_MODE_FRAC;
else
mode = PLL_MODE_INT;
- ret = eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_MODE, clk_id, mode, NULL);
+ ret = zynqmp_pm_set_pll_frac_mode(clk_id, mode);
if (ret)
pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -139,17 +136,15 @@ static unsigned long zynqmp_pll_recalc_rate(struct clk_hw *hw,
unsigned long rate, frac;
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getdivider(clk_id, &fbdiv);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &fbdiv);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
rate = parent_rate * fbdiv;
if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
- eemi_ops->ioctl(0, IOCTL_GET_PLL_FRAC_DATA, clk_id, 0,
- ret_payload);
+ zynqmp_pm_get_pll_frac_data(clk_id, ret_payload);
data = ret_payload[1];
frac = (parent_rate * data) / FRAC_DIV;
rate = rate + frac;
@@ -177,7 +172,6 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
u32 fbdiv;
long rate_div, frac, m, f;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
rate_div = (rate * FRAC_DIV) / parent_rate;
@@ -187,21 +181,21 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
rate = parent_rate * m;
frac = (parent_rate * f) / FRAC_DIV;
- ret = eemi_ops->clock_setdivider(clk_id, m);
+ ret = zynqmp_pm_clock_setdivider(clk_id, m);
if (ret == -EUSERS)
WARN(1, "More than allowed devices are using the %s, which is forbidden\n",
clk_name);
else if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
- eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_DATA, clk_id, f, NULL);
+ zynqmp_pm_set_pll_frac_data(clk_id, f);
return rate + frac;
}
fbdiv = DIV_ROUND_CLOSEST(rate, parent_rate);
fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- ret = eemi_ops->clock_setdivider(clk_id, fbdiv);
+ ret = zynqmp_pm_clock_setdivider(clk_id, fbdiv);
if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -222,9 +216,8 @@ static int zynqmp_pll_is_enabled(struct clk_hw *hw)
u32 clk_id = clk->clk_id;
unsigned int state;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getstate(clk_id, &state);
+ ret = zynqmp_pm_clock_getstate(clk_id, &state);
if (ret) {
pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -246,12 +239,11 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = clk->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (zynqmp_pll_is_enabled(hw))
return 0;
- ret = eemi_ops->clock_enable(clk_id);
+ ret = zynqmp_pm_clock_enable(clk_id);
if (ret)
pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -269,12 +261,11 @@ static void zynqmp_pll_disable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = clk->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (!zynqmp_pll_is_enabled(hw))
return;
- ret = eemi_ops->clock_disable(clk_id);
+ ret = zynqmp_pm_clock_disable(clk_id);
if (ret)
pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
__func__, clk_name, ret);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f2142e6bbea3..91418381fcd4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -120,7 +120,6 @@ config OWL_TIMER
config RDA_TIMER
bool "RDA timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
select TIMER_OF
help
@@ -562,16 +561,16 @@ config CLKSRC_VERSATILE
bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
select TIMER_OF
- default y if MFD_VEXPRESS_SYSREG
+ default y if (ARCH_VEXPRESS || ARCH_VERSATILE) && ARM
help
This option enables clock source based on free running
counter available in the "System Registers" block of
- ARM Versatile, RealView and Versatile Express reference
- platforms.
+ ARM Versatile and Versatile Express reference platforms.
config CLKSRC_MIPS_GIC
bool
depends on MIPS_GIC
+ select CLOCKSOURCE_WATCHDOG
select TIMER_OF
config CLKSRC_TANGO_XTAL
@@ -709,6 +708,7 @@ config MICROCHIP_PIT64B
bool "Microchip PIT64B support"
depends on OF || COMPILE_TEST
select CLKSRC_MMIO
+ select TIMER_OF
help
This option enables Microchip PIT64B timer for Atmel
based system. It supports the oneshot, the periodic
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 641ba5383ab5..bdda1a2e4097 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
+obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm-systimer.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index b29b5a75333e..de93dd1a8c7b 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -334,10 +334,8 @@ static int __init arc_clockevent_setup(struct device_node *node)
}
ret = arc_get_timer_clk(node);
- if (ret) {
- pr_err("clockevent: missing clk\n");
+ if (ret)
return ret;
- }
/* Needs apriori irq_set_percpu_devid() done in intc map function */
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 2204a444e801..ecf7b7db2d05 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1588,10 +1588,8 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
arch_timers_present |= ARCH_TIMER_TYPE_CP15;
ret = acpi_gtdt_init(table, &platform_timer_count);
- if (ret) {
- pr_err("Failed to init GTDT table.\n");
+ if (ret)
return ret;
- }
arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index b207a77b0831..f5f24a95ee82 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -222,7 +222,8 @@ static int apbt_next_event(unsigned long delta,
/**
* dw_apb_clockevent_init() - use an APB timer as a clock_event_device
*
- * @cpu: The CPU the events will be targeted at.
+ * @cpu: The CPU the events will be targeted at or -1 if CPU affiliation
+ * isn't required.
* @name: The name used for the timer and the IRQ for it.
* @rating: The rating to give the timer.
* @base: I/O base for the timer registers.
@@ -257,7 +258,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->ced.max_delta_ticks = 0x7fffffff;
dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
dw_ced->ced.min_delta_ticks = 5000;
- dw_ced->ced.cpumask = cpumask_of(cpu);
+ dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu);
dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
dw_ced->ced.set_state_shutdown = apbt_shutdown;
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 8c28b127759f..ab3ddebe8344 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -73,7 +73,7 @@ static void __init add_clockevent(struct device_node *event_timer)
timer_get_base_and_rate(event_timer, &iobase, &rate);
- ced = dw_apb_clockevent_init(0, event_timer->name, 300, iobase, irq,
+ ced = dw_apb_clockevent_init(-1, event_timer->name, 300, iobase, irq,
rate);
if (!ced)
panic("Unable to initialise clockevent device");
@@ -147,10 +147,6 @@ static int num_called;
static int __init dw_apb_timer_init(struct device_node *timer)
{
switch (num_called) {
- case 0:
- pr_debug("%s: found clockevent timer\n", __func__);
- add_clockevent(timer);
- break;
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
@@ -161,6 +157,8 @@ static int __init dw_apb_timer_init(struct device_node *timer)
#endif
break;
default:
+ pr_debug("%s: found clockevent timer\n", __func__);
+ add_clockevent(timer);
break;
}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 8b5f8ae723cb..be4175f415ba 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -16,6 +16,7 @@
#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
+#include <linux/sched_clock.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <asm/mips-cps.h>
@@ -23,14 +24,14 @@
static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
static int gic_timer_irq;
static unsigned int gic_frequency;
+static bool __read_mostly gic_clock_unstable;
-static u64 notrace gic_read_count(void)
+static void gic_clocksource_unstable(char *reason);
+
+static u64 notrace gic_read_count_2x32(void)
{
unsigned int hi, hi2, lo;
- if (mips_cm_is64)
- return read_gic_counter();
-
do {
hi = read_gic_counter_32h();
lo = read_gic_counter_32l();
@@ -40,6 +41,19 @@ static u64 notrace gic_read_count(void)
return (((u64) hi) << 32) + lo;
}
+static u64 notrace gic_read_count_64(void)
+{
+ return read_gic_counter();
+}
+
+static u64 notrace gic_read_count(void)
+{
+ if (mips_cm_is64)
+ return gic_read_count_64();
+
+ return gic_read_count_2x32();
+}
+
static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
{
int cpu = cpumask_first(evt->cpumask);
@@ -114,8 +128,10 @@ static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
{
struct clk_notifier_data *cnd = data;
- if (action == POST_RATE_CHANGE)
+ if (action == POST_RATE_CHANGE) {
+ gic_clocksource_unstable("ref clock rate change");
on_each_cpu(gic_update_frequency, (void *)cnd->new_rate, 1);
+ }
return NOTIFY_OK;
}
@@ -161,6 +177,18 @@ static struct clocksource gic_clocksource = {
.vdso_clock_mode = VDSO_CLOCKMODE_GIC,
};
+static void gic_clocksource_unstable(char *reason)
+{
+ if (gic_clock_unstable)
+ return;
+
+ gic_clock_unstable = true;
+
+ pr_info("GIC timer is unstable due to %s\n", reason);
+
+ clocksource_mark_unstable(&gic_clocksource);
+}
+
static int __init __gic_clocksource_init(void)
{
unsigned int count_width;
@@ -228,6 +256,18 @@ static int __init gic_clocksource_of_init(struct device_node *node)
/* And finally start the counter */
clear_gic_config(GIC_CONFIG_COUNTSTOP);
+ /*
+ * It's safe to use the MIPS GIC timer as a sched clock source only if
+ * its ticks are stable, which is true on either the platforms with
+ * stable CPU frequency or on the platforms with CM3 and CPU frequency
+ * change performed by the CPC core clocks divider.
+ */
+ if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) {
+ sched_clock_register(mips_cm_is64 ?
+ gic_read_count_64 : gic_read_count_2x32,
+ 64, gic_frequency);
+ }
+
return 0;
}
TIMER_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index ab0aabfae5f0..73e8aee445da 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -139,7 +139,6 @@ static int
clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
{
u32 alm;
- int status = 0;
unsigned int val;
BUG_ON(delta < 2);
@@ -163,7 +162,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
alm += delta;
regmap_write(regmap_st, AT91_ST_RTAR, alm);
- return status;
+ return 0;
}
static struct clock_event_device clkevt = {
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index e421946a91c5..bb4eee31ae08 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -18,7 +18,7 @@
#include <clocksource/timer-davinci.h>
#undef pr_fmt
-#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+#define pr_fmt(fmt) "%s: " fmt, __func__
#define DAVINCI_TIMER_REG_TIM12 0x10
#define DAVINCI_TIMER_REG_TIM34 0x14
@@ -250,31 +250,29 @@ int __init davinci_timer_register(struct clk *clk,
rv = clk_prepare_enable(clk);
if (rv) {
- pr_err("Unable to prepare and enable the timer clock");
+ pr_err("Unable to prepare and enable the timer clock\n");
return rv;
}
if (!request_mem_region(timer_cfg->reg.start,
resource_size(&timer_cfg->reg),
"davinci-timer")) {
- pr_err("Unable to request memory region");
+ pr_err("Unable to request memory region\n");
return -EBUSY;
}
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
if (!base) {
- pr_err("Unable to map the register range");
+ pr_err("Unable to map the register range\n");
return -ENOMEM;
}
davinci_timer_init(base);
tick_rate = clk_get_rate(clk);
- clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL | __GFP_NOFAIL);
- if (!clockevent) {
- pr_err("Error allocating memory for clockevent data");
+ clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
+ if (!clockevent)
return -ENOMEM;
- }
clockevent->dev.name = "tim12";
clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
@@ -298,7 +296,7 @@ int __init davinci_timer_register(struct clk *clk,
davinci_timer_irq_timer, IRQF_TIMER,
"clockevent/tim12", clockevent);
if (rv) {
- pr_err("Unable to request the clockevent interrupt");
+ pr_err("Unable to request the clockevent interrupt\n");
return rv;
}
@@ -325,7 +323,7 @@ int __init davinci_timer_register(struct clk *clk,
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
if (rv) {
- pr_err("Unable to register clocksource");
+ pr_err("Unable to register clocksource\n");
return rv;
}
@@ -343,20 +341,20 @@ static int __init of_davinci_timer_register(struct device_node *np)
rv = of_address_to_resource(np, 0, &timer_cfg.reg);
if (rv) {
- pr_err("Unable to get the register range for timer");
+ pr_err("Unable to get the register range for timer\n");
return rv;
}
rv = of_irq_to_resource_table(np, timer_cfg.irq,
DAVINCI_TIMER_NUM_IRQS);
if (rv != DAVINCI_TIMER_NUM_IRQS) {
- pr_err("Unable to get the interrupts for timer");
+ pr_err("Unable to get the interrupts for timer\n");
return rv;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_err("Unable to get the timer clock");
+ pr_err("Unable to get the timer clock\n");
return PTR_ERR(clk);
}
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index 6334a35fdc2f..2cdc077a39f5 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -61,17 +61,19 @@ static inline void tpm_irq_acknowledge(void)
writel(TPM_STATUS_CH0F, timer_base + TPM_STATUS);
}
-static struct delay_timer tpm_delay_timer;
-
static inline unsigned long tpm_read_counter(void)
{
return readl(timer_base + TPM_CNT);
}
+#if defined(CONFIG_ARM)
+static struct delay_timer tpm_delay_timer;
+
static unsigned long tpm_read_current_timer(void)
{
return tpm_read_counter();
}
+#endif
static u64 notrace tpm_read_sched_clock(void)
{
@@ -144,9 +146,11 @@ static struct timer_of to_tpm = {
static int __init tpm_clocksource_init(void)
{
+#if defined(CONFIG_ARM)
tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
tpm_delay_timer.freq = timer_of_rate(&to_tpm) >> 3;
register_current_timer_delay(&tpm_delay_timer);
+#endif
sched_clock_register(tpm_read_sched_clock, counter_width,
timer_of_rate(&to_tpm) >> 3);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index abd5f158d6e2..ae12bbf3d68c 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -24,6 +24,7 @@
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
*/
+#include <linux/clk.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/sched_clock.h>
@@ -76,6 +77,49 @@ static u64 notrace omap_32k_read_sched_clock(void)
return ti_32k_read_cycles(&ti_32k_timer.cs);
}
+static void __init ti_32k_timer_enable_clock(struct device_node *np,
+ const char *name)
+{
+ struct clk *clock;
+ int error;
+
+ clock = of_clk_get_by_name(np->parent, name);
+ if (IS_ERR(clock)) {
+ /* Only some SoCs have a separate interface clock */
+ if (PTR_ERR(clock) == -EINVAL && !strncmp("ick", name, 3))
+ return;
+
+ pr_warn("%s: could not get clock %s %li\n",
+ __func__, name, PTR_ERR(clock));
+ return;
+ }
+
+ error = clk_prepare_enable(clock);
+ if (error) {
+ pr_warn("%s: could not enable %s: %i\n",
+ __func__, name, error);
+ return;
+ }
+}
+
+static void __init ti_32k_timer_module_init(struct device_node *np,
+ void __iomem *base)
+{
+ void __iomem *sysc = base + 4;
+
+ if (!of_device_is_compatible(np->parent, "ti,sysc"))
+ return;
+
+ ti_32k_timer_enable_clock(np, "fck");
+ ti_32k_timer_enable_clock(np, "ick");
+
+ /*
+ * Force idle module as wkup domain is active with MPU.
+ * No need to tag the module disabled for ti-sysc probe.
+ */
+ writel_relaxed(0, sysc);
+}
+
static int __init ti_32k_timer_init(struct device_node *np)
{
int ret;
@@ -90,6 +134,7 @@ static int __init ti_32k_timer_init(struct device_node *np)
ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
ti_32k_timer.counter = ti_32k_timer.base;
+ ti_32k_timer_module_init(np, ti_32k_timer.base);
/*
* 32k sync Counter IP register offsets vary between the highlander
@@ -104,6 +149,8 @@ static int __init ti_32k_timer_init(struct device_node *np)
else
ti_32k_timer.counter += OMAP2_32KSYNCNT_CR_OFF_LOW;
+ pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
ret = clocksource_register_hz(&ti_32k_timer.cs, 32768);
if (ret) {
pr_err("32k_counter: can't register clocksource\n");
@@ -111,7 +158,6 @@ static int __init ti_32k_timer_init(struct device_node *np)
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
- pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
return 0;
}
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
new file mode 100644
index 000000000000..6fd1f219a512
--- /dev/null
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#include <linux/clk/clk-conf.h>
+
+#include <clocksource/timer-ti-dm.h>
+#include <dt-bindings/bus/ti-sysc.h>
+
+/* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
+#define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \
+ SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
+
+#define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2)
+#define DMTIMER_RESET_WAIT 100000
+
+#define DMTIMER_INST_DONT_CARE ~0U
+
+static int counter_32k;
+static u32 clocksource;
+static u32 clockevent;
+
+/*
+ * Subset of the timer registers we use. Note that the register offsets
+ * depend on the timer revision detected.
+ */
+struct dmtimer_systimer {
+ void __iomem *base;
+ u8 sysc;
+ u8 irq_stat;
+ u8 irq_ena;
+ u8 pend;
+ u8 load;
+ u8 counter;
+ u8 ctrl;
+ u8 wakeup;
+ u8 ifctrl;
+ unsigned long rate;
+};
+
+struct dmtimer_clockevent {
+ struct clock_event_device dev;
+ struct dmtimer_systimer t;
+ u32 period;
+};
+
+struct dmtimer_clocksource {
+ struct clocksource dev;
+ struct dmtimer_systimer t;
+ unsigned int loadval;
+};
+
+/* Assumes v1 ip if bits [31:16] are zero */
+static bool dmtimer_systimer_revision1(struct dmtimer_systimer *t)
+{
+ u32 tidr = readl_relaxed(t->base);
+
+ return !(tidr >> 16);
+}
+
+static int __init dmtimer_systimer_type1_reset(struct dmtimer_systimer *t)
+{
+ void __iomem *syss = t->base + OMAP_TIMER_V1_SYS_STAT_OFFSET;
+ int ret;
+ u32 l;
+
+ writel_relaxed(BIT(1) | BIT(2), t->base + t->ifctrl);
+ ret = readl_poll_timeout_atomic(syss, l, l & BIT(0), 100,
+ DMTIMER_RESET_WAIT);
+
+ return ret;
+}
+
+/* Note we must use io_base instead of func_base for type2 OCP regs */
+static int __init dmtimer_systimer_type2_reset(struct dmtimer_systimer *t)
+{
+ void __iomem *sysc = t->base + t->sysc;
+ u32 l;
+
+ l = readl_relaxed(sysc);
+ l |= BIT(0);
+ writel_relaxed(l, sysc);
+
+ return readl_poll_timeout_atomic(sysc, l, !(l & BIT(0)), 100,
+ DMTIMER_RESET_WAIT);
+}
+
+static int __init dmtimer_systimer_reset(struct dmtimer_systimer *t)
+{
+ int ret;
+
+ if (dmtimer_systimer_revision1(t))
+ ret = dmtimer_systimer_type1_reset(t);
+ else
+ ret = dmtimer_systimer_type2_reset(t);
+ if (ret < 0) {
+ pr_err("%s failed with %i\n", __func__, ret);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id counter_match_table[] = {
+ { .compatible = "ti,omap-counter32k" },
+ { /* Sentinel */ },
+};
+
+/*
+ * Check if the SoC als has a usable working 32 KiHz counter. The 32 KiHz
+ * counter is handled by timer-ti-32k, but we need to detect it as it
+ * affects the preferred dmtimer system timer configuration. There is
+ * typically no use for a dmtimer clocksource if the 32 KiHz counter is
+ * present, except on am437x as described below.
+ */
+static void __init dmtimer_systimer_check_counter32k(void)
+{
+ struct device_node *np;
+
+ if (counter_32k)
+ return;
+
+ np = of_find_matching_node(NULL, counter_match_table);
+ if (!np) {
+ counter_32k = -ENODEV;
+
+ return;
+ }
+
+ if (of_device_is_available(np))
+ counter_32k = 1;
+ else
+ counter_32k = -ENODEV;
+
+ of_node_put(np);
+}
+
+static const struct of_device_id dmtimer_match_table[] = {
+ { .compatible = "ti,omap2420-timer", },
+ { .compatible = "ti,omap3430-timer", },
+ { .compatible = "ti,omap4430-timer", },
+ { .compatible = "ti,omap5430-timer", },
+ { .compatible = "ti,am335x-timer", },
+ { .compatible = "ti,am335x-timer-1ms", },
+ { .compatible = "ti,dm814-timer", },
+ { .compatible = "ti,dm816-timer", },
+ { /* Sentinel */ },
+};
+
+/*
+ * Checks that system timers are configured to not reset and idle during
+ * the generic timer-ti-dm device driver probe. And that the system timer
+ * source clocks are properly configured. Also, let's not hog any DSP and
+ * PWM capable timers unnecessarily as system timers.
+ */
+static bool __init dmtimer_is_preferred(struct device_node *np)
+{
+ if (!of_device_is_available(np))
+ return false;
+
+ if (!of_property_read_bool(np->parent,
+ "ti,no-reset-on-init"))
+ return false;
+
+ if (!of_property_read_bool(np->parent, "ti,no-idle"))
+ return false;
+
+ /* Secure gptimer12 is always clocked with a fixed source */
+ if (!of_property_read_bool(np, "ti,timer-secure")) {
+ if (!of_property_read_bool(np, "assigned-clocks"))
+ return false;
+
+ if (!of_property_read_bool(np, "assigned-clock-parents"))
+ return false;
+ }
+
+ if (of_property_read_bool(np, "ti,timer-dsp"))
+ return false;
+
+ if (of_property_read_bool(np, "ti,timer-pwm"))
+ return false;
+
+ return true;
+}
+
+/*
+ * Finds the first available usable always-on timer, and assigns it to either
+ * clockevent or clocksource depending if the counter_32k is available on the
+ * SoC or not.
+ *
+ * Some omap3 boards with unreliable oscillator must not use the counter_32k
+ * or dmtimer1 with 32 KiHz source. Additionally, the boards with unreliable
+ * oscillator should really set counter_32k as disabled, and delete dmtimer1
+ * ti,always-on property, but let's not count on it. For these quirky cases,
+ * we prefer using the always-on secure dmtimer12 with the internal 32 KiHz
+ * clock as the clocksource, and any available dmtimer as clockevent.
+ *
+ * For am437x, we are using am335x style dmtimer clocksource. It is unclear
+ * if this quirk handling is really needed, but let's change it separately
+ * based on testing as it might cause side effects.
+ */
+static void __init dmtimer_systimer_assign_alwon(void)
+{
+ struct device_node *np;
+ u32 pa = 0;
+ bool quirk_unreliable_oscillator = false;
+
+ /* Quirk unreliable 32 KiHz oscillator with incomplete dts */
+ if (of_machine_is_compatible("ti,omap3-beagle") ||
+ of_machine_is_compatible("timll,omap3-devkit8000")) {
+ quirk_unreliable_oscillator = true;
+ counter_32k = -ENODEV;
+ }
+
+ /* Quirk am437x using am335x style dmtimer clocksource */
+ if (of_machine_is_compatible("ti,am43"))
+ counter_32k = -ENODEV;
+
+ for_each_matching_node(np, dmtimer_match_table) {
+ if (!dmtimer_is_preferred(np))
+ continue;
+
+ if (of_property_read_bool(np, "ti,timer-alwon")) {
+ const __be32 *addr;
+
+ addr = of_get_address(np, 0, NULL, NULL);
+ pa = of_translate_address(np, addr);
+ if (pa) {
+ /* Quirky omap3 boards must use dmtimer12 */
+ if (quirk_unreliable_oscillator &&
+ pa == 0x48318000)
+ continue;
+
+ of_node_put(np);
+ break;
+ }
+ }
+ }
+
+ /* Usually no need for dmtimer clocksource if we have counter32 */
+ if (counter_32k >= 0) {
+ clockevent = pa;
+ clocksource = 0;
+ } else {
+ clocksource = pa;
+ clockevent = DMTIMER_INST_DONT_CARE;
+ }
+}
+
+/* Finds the first usable dmtimer, used for the don't care case */
+static u32 __init dmtimer_systimer_find_first_available(void)
+{
+ struct device_node *np;
+ const __be32 *addr;
+ u32 pa = 0;
+
+ for_each_matching_node(np, dmtimer_match_table) {
+ if (!dmtimer_is_preferred(np))
+ continue;
+
+ addr = of_get_address(np, 0, NULL, NULL);
+ pa = of_translate_address(np, addr);
+ if (pa) {
+ if (pa == clocksource || pa == clockevent) {
+ pa = 0;
+ continue;
+ }
+
+ of_node_put(np);
+ break;
+ }
+ }
+
+ return pa;
+}
+
+/* Selects the best clocksource and clockevent to use */
+static void __init dmtimer_systimer_select_best(void)
+{
+ dmtimer_systimer_check_counter32k();
+ dmtimer_systimer_assign_alwon();
+
+ if (clockevent == DMTIMER_INST_DONT_CARE)
+ clockevent = dmtimer_systimer_find_first_available();
+
+ pr_debug("%s: counter_32k: %i clocksource: %08x clockevent: %08x\n",
+ __func__, counter_32k, clocksource, clockevent);
+}
+
+/* Interface clocks are only available on some SoCs variants */
+static int __init dmtimer_systimer_init_clock(struct device_node *np,
+ const char *name,
+ unsigned long *rate)
+{
+ struct clk *clock;
+ unsigned long r;
+ int error;
+
+ clock = of_clk_get_by_name(np, name);
+ if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3))
+ return 0;
+ else if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ error = clk_prepare_enable(clock);
+ if (error)
+ return error;
+
+ r = clk_get_rate(clock);
+ if (!r)
+ return -ENODEV;
+
+ *rate = r;
+
+ return 0;
+}
+
+static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
+{
+ u32 val;
+
+ if (dmtimer_systimer_revision1(t))
+ val = DMTIMER_TYPE1_ENABLE;
+ else
+ val = DMTIMER_TYPE2_ENABLE;
+
+ writel_relaxed(val, t->base + t->sysc);
+}
+
+static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
+{
+ writel_relaxed(0, t->base + t->sysc);
+}
+
+static int __init dmtimer_systimer_setup(struct device_node *np,
+ struct dmtimer_systimer *t)
+{
+ unsigned long rate;
+ u8 regbase;
+ int error;
+
+ if (!of_device_is_compatible(np->parent, "ti,sysc"))
+ return -EINVAL;
+
+ t->base = of_iomap(np, 0);
+ if (!t->base)
+ return -ENXIO;
+
+ /*
+ * Enable optional assigned-clock-parents configured at the timer
+ * node level. For regular device drivers, this is done automatically
+ * by bus related code such as platform_drv_probe().
+ */
+ error = of_clk_set_defaults(np, false);
+ if (error < 0)
+ pr_err("%s: clock source init failed: %i\n", __func__, error);
+
+ /* For ti-sysc, we have timer clocks at the parent module level */
+ error = dmtimer_systimer_init_clock(np->parent, "fck", &rate);
+ if (error)
+ goto err_unmap;
+
+ t->rate = rate;
+
+ error = dmtimer_systimer_init_clock(np->parent, "ick", &rate);
+ if (error)
+ goto err_unmap;
+
+ if (dmtimer_systimer_revision1(t)) {
+ t->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
+ t->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
+ t->pend = _OMAP_TIMER_WRITE_PEND_OFFSET;
+ regbase = 0;
+ } else {
+ t->irq_stat = OMAP_TIMER_V2_IRQSTATUS;
+ t->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET;
+ regbase = OMAP_TIMER_V2_FUNC_OFFSET;
+ t->pend = regbase + _OMAP_TIMER_WRITE_PEND_OFFSET;
+ }
+
+ t->sysc = OMAP_TIMER_OCP_CFG_OFFSET;
+ t->load = regbase + _OMAP_TIMER_LOAD_OFFSET;
+ t->counter = regbase + _OMAP_TIMER_COUNTER_OFFSET;
+ t->ctrl = regbase + _OMAP_TIMER_CTRL_OFFSET;
+ t->wakeup = regbase + _OMAP_TIMER_WAKEUP_EN_OFFSET;
+ t->ifctrl = regbase + _OMAP_TIMER_IF_CTRL_OFFSET;
+
+ dmtimer_systimer_enable(t);
+ dmtimer_systimer_reset(t);
+ pr_debug("dmtimer rev %08x sysc %08x\n", readl_relaxed(t->base),
+ readl_relaxed(t->base + t->sysc));
+
+ return 0;
+
+err_unmap:
+ iounmap(t->base);
+
+ return error;
+}
+
+/* Clockevent */
+static struct dmtimer_clockevent *
+to_dmtimer_clockevent(struct clock_event_device *clockevent)
+{
+ return container_of(clockevent, struct dmtimer_clockevent, dev);
+}
+
+static irqreturn_t dmtimer_clockevent_interrupt(int irq, void *data)
+{
+ struct dmtimer_clockevent *clkevt = data;
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
+ clkevt->dev.event_handler(&clkevt->dev);
+
+ return IRQ_HANDLED;
+}
+
+static int dmtimer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+ void __iomem *pend = t->base + t->pend;
+
+ writel_relaxed(0xffffffff - cycles, t->base + t->counter);
+ while (readl_relaxed(pend) & WP_TCRR)
+ cpu_relax();
+
+ writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
+ while (readl_relaxed(pend) & WP_TCLR)
+ cpu_relax();
+
+ return 0;
+}
+
+static int dmtimer_clockevent_shutdown(struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+ void __iomem *ctrl = t->base + t->ctrl;
+ u32 l;
+
+ l = readl_relaxed(ctrl);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ l &= ~BIT(0);
+ writel_relaxed(l, ctrl);
+ /* Flush posted write */
+ l = readl_relaxed(ctrl);
+ /* Wait for functional clock period x 3.5 */
+ udelay(3500000 / t->rate + 1);
+ }
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
+
+ return 0;
+}
+
+static int dmtimer_set_periodic(struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+ void __iomem *pend = t->base + t->pend;
+
+ dmtimer_clockevent_shutdown(evt);
+
+ /* Looks like we need to first set the load value separately */
+ writel_relaxed(clkevt->period, t->base + t->load);
+ while (readl_relaxed(pend) & WP_TLDR)
+ cpu_relax();
+
+ writel_relaxed(clkevt->period, t->base + t->counter);
+ while (readl_relaxed(pend) & WP_TCRR)
+ cpu_relax();
+
+ writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+ t->base + t->ctrl);
+ while (readl_relaxed(pend) & WP_TCLR)
+ cpu_relax();
+
+ return 0;
+}
+
+static void omap_clockevent_idle(struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ dmtimer_systimer_disable(t);
+}
+
+static void omap_clockevent_unidle(struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ dmtimer_systimer_enable(t);
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
+}
+
+static int __init dmtimer_clockevent_init(struct device_node *np)
+{
+ struct dmtimer_clockevent *clkevt;
+ struct clock_event_device *dev;
+ struct dmtimer_systimer *t;
+ int error;
+
+ clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
+ if (!clkevt)
+ return -ENOMEM;
+
+ t = &clkevt->t;
+ dev = &clkevt->dev;
+
+ /*
+ * We mostly use cpuidle_coupled with ARM local timers for runtime,
+ * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
+ */
+ dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ dev->rating = 300;
+ dev->set_next_event = dmtimer_set_next_event;
+ dev->set_state_shutdown = dmtimer_clockevent_shutdown;
+ dev->set_state_periodic = dmtimer_set_periodic;
+ dev->set_state_oneshot = dmtimer_clockevent_shutdown;
+ dev->tick_resume = dmtimer_clockevent_shutdown;
+ dev->cpumask = cpu_possible_mask;
+
+ dev->irq = irq_of_parse_and_map(np, 0);
+ if (!dev->irq) {
+ error = -ENXIO;
+ goto err_out_free;
+ }
+
+ error = dmtimer_systimer_setup(np, &clkevt->t);
+ if (error)
+ goto err_out_free;
+
+ clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
+
+ /*
+ * For clock-event timers we never read the timer counter and
+ * so we are not impacted by errata i103 and i767. Therefore,
+ * we can safely ignore this errata for clock-event timers.
+ */
+ writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
+
+ error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
+ IRQF_TIMER, "clockevent", clkevt);
+ if (error)
+ goto err_out_unmap;
+
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
+
+ pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
+ of_find_property(np, "ti,timer-alwon", NULL) ?
+ "always-on " : "", t->rate, np->parent);
+
+ clockevents_config_and_register(dev, t->rate,
+ 3, /* Timer internal resynch latency */
+ 0xffffffff);
+
+ if (of_device_is_compatible(np, "ti,am33xx") ||
+ of_device_is_compatible(np, "ti,am43")) {
+ dev->suspend = omap_clockevent_idle;
+ dev->resume = omap_clockevent_unidle;
+ }
+
+ return 0;
+
+err_out_unmap:
+ iounmap(t->base);
+
+err_out_free:
+ kfree(clkevt);
+
+ return error;
+}
+
+/* Clocksource */
+static struct dmtimer_clocksource *
+to_dmtimer_clocksource(struct clocksource *cs)
+{
+ return container_of(cs, struct dmtimer_clocksource, dev);
+}
+
+static u64 dmtimer_clocksource_read_cycles(struct clocksource *cs)
+{
+ struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
+ struct dmtimer_systimer *t = &clksrc->t;
+
+ return (u64)readl_relaxed(t->base + t->counter);
+}
+
+static void __iomem *dmtimer_sched_clock_counter;
+
+static u64 notrace dmtimer_read_sched_clock(void)
+{
+ return readl_relaxed(dmtimer_sched_clock_counter);
+}
+
+static void dmtimer_clocksource_suspend(struct clocksource *cs)
+{
+ struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
+ struct dmtimer_systimer *t = &clksrc->t;
+
+ clksrc->loadval = readl_relaxed(t->base + t->counter);
+ dmtimer_systimer_disable(t);
+}
+
+static void dmtimer_clocksource_resume(struct clocksource *cs)
+{
+ struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
+ struct dmtimer_systimer *t = &clksrc->t;
+
+ dmtimer_systimer_enable(t);
+ writel_relaxed(clksrc->loadval, t->base + t->counter);
+ writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
+ t->base + t->ctrl);
+}
+
+static int __init dmtimer_clocksource_init(struct device_node *np)
+{
+ struct dmtimer_clocksource *clksrc;
+ struct dmtimer_systimer *t;
+ struct clocksource *dev;
+ int error;
+
+ clksrc = kzalloc(sizeof(*clksrc), GFP_KERNEL);
+ if (!clksrc)
+ return -ENOMEM;
+
+ dev = &clksrc->dev;
+ t = &clksrc->t;
+
+ error = dmtimer_systimer_setup(np, t);
+ if (error)
+ goto err_out_free;
+
+ dev->name = "dmtimer";
+ dev->rating = 300;
+ dev->read = dmtimer_clocksource_read_cycles;
+ dev->mask = CLOCKSOURCE_MASK(32);
+ dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ if (of_device_is_compatible(np, "ti,am33xx") ||
+ of_device_is_compatible(np, "ti,am43")) {
+ dev->suspend = dmtimer_clocksource_suspend;
+ dev->resume = dmtimer_clocksource_resume;
+ }
+
+ writel_relaxed(0, t->base + t->counter);
+ writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
+ t->base + t->ctrl);
+
+ pr_info("TI gptimer clocksource: %s%pOF\n",
+ of_find_property(np, "ti,timer-alwon", NULL) ?
+ "always-on " : "", np->parent);
+
+ if (!dmtimer_sched_clock_counter) {
+ dmtimer_sched_clock_counter = t->base + t->counter;
+ sched_clock_register(dmtimer_read_sched_clock, 32, t->rate);
+ }
+
+ if (clocksource_register_hz(dev, t->rate))
+ pr_err("Could not register clocksource %pOF\n", np);
+
+ return 0;
+
+err_out_free:
+ kfree(clksrc);
+
+ return -ENODEV;
+}
+
+/*
+ * To detect between a clocksource and clockevent, we assume the device tree
+ * has no interrupts configured for a clocksource timer.
+ */
+static int __init dmtimer_systimer_init(struct device_node *np)
+{
+ const __be32 *addr;
+ u32 pa;
+
+ /* One time init for the preferred timer configuration */
+ if (!clocksource && !clockevent)
+ dmtimer_systimer_select_best();
+
+ if (!clocksource && !clockevent) {
+ pr_err("%s: unable to detect system timers, update dtb?\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ addr = of_get_address(np, 0, NULL, NULL);
+ pa = of_translate_address(np, addr);
+ if (!pa)
+ return -EINVAL;
+
+ if (counter_32k <= 0 && clocksource == pa)
+ return dmtimer_clocksource_init(np);
+
+ if (clockevent == pa)
+ return dmtimer_clockevent_init(np);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(systimer_omap2, "ti,omap2420-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_omap3, "ti,omap3430-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_omap4, "ti,omap4430-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_omap5, "ti,omap5430-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_am33x, "ti,am335x-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_am3ms, "ti,am335x-timer-1ms", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_dm814, "ti,dm814-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_dm816, "ti,dm816-timer", dmtimer_systimer_init);
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 2531eab3d6d7..60aff087947a 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -258,9 +258,7 @@ static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
__omap_dm_timer_enable_posted(timer);
omap_dm_timer_disable(timer);
- rc = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
-
- return rc;
+ return 0;
}
static inline u32 omap_dm_timer_reserved_systimer(int id)
diff --git a/drivers/clocksource/timer-versatile.c b/drivers/clocksource/timer-versatile.c
index e4ebb656d005..f5d017b31afa 100644
--- a/drivers/clocksource/timer-versatile.c
+++ b/drivers/clocksource/timer-versatile.c
@@ -6,6 +6,7 @@
#include <linux/clocksource.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
@@ -22,6 +23,8 @@ static int __init versatile_sched_clock_init(struct device_node *node)
{
void __iomem *base = of_iomap(node, 0);
+ of_node_clear_flag(node, OF_POPULATED);
+
if (!base)
return -ENXIO;
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index d58ce664da84..646ad385e490 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -18,6 +18,7 @@
#include <linux/pid_namespace.h>
#include <linux/cn_proc.h>
+#include <linux/local_lock.h>
/*
* Size of a cn_msg followed by a proc_event structure. Since the
@@ -38,25 +39,31 @@ static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
-/* proc_event_counts is used as the sequence number of the netlink message */
-static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
+/* local_event.count is used as the sequence number of the netlink message */
+struct local_event {
+ local_lock_t lock;
+ __u32 count;
+};
+static DEFINE_PER_CPU(struct local_event, local_event) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
static inline void send_msg(struct cn_msg *msg)
{
- preempt_disable();
+ local_lock(&local_event.lock);
- msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
+ msg->seq = __this_cpu_inc_return(local_event.count) - 1;
((struct proc_event *)msg->data)->cpu = smp_processor_id();
/*
- * Preemption remains disabled during send to ensure the messages are
- * ordered according to their sequence numbers.
+ * local_lock() disables preemption during send to ensure the messages
+ * are ordered according to their sequence numbers.
*
* If cn_netlink_send() fails, the data is not sent.
*/
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
- preempt_enable();
+ local_unlock(&local_event.lock);
}
void proc_fork_connector(struct task_struct *task)
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index c3e6bd59e920..e91750132552 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -323,7 +323,8 @@ endif
config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
- depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
+ depends on OF && COMMON_CLK
+ depends on PPC_E500MC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
select CLK_QORIQ
help
This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 15c1a1231516..c6cbfc8baf72 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -295,11 +295,11 @@ config ARM_TANGO_CPUFREQ
default y
config ARM_TEGRA20_CPUFREQ
- tristate "Tegra20 CPUFreq support"
- depends on ARCH_TEGRA
+ tristate "Tegra20/30 CPUFreq support"
+ depends on ARCH_TEGRA && CPUFREQ_DT
default y
help
- This adds the CPUFreq driver support for Tegra20 SOCs.
+ This adds the CPUFreq driver support for Tegra20/30 SOCs.
config ARM_TEGRA124_CPUFREQ
bool "Tegra124 CPUFreq support"
@@ -317,6 +317,7 @@ config ARM_TEGRA186_CPUFREQ
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
depends on ARCH_OMAP2PLUS
+ default ARCH_OMAP2PLUS
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index cb9db16bea61..e8e20fef400b 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -53,6 +53,7 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "renesas,r7s72100", },
{ .compatible = "renesas,r8a73a4", },
{ .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7742", },
{ .compatible = "renesas,r8a7743", },
{ .compatible = "renesas,r8a7744", },
{ .compatible = "renesas,r8a7745", },
@@ -105,6 +106,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "calxeda,highbank", },
{ .compatible = "calxeda,ecx-2000", },
+ { .compatible = "fsl,imx7ulp", },
{ .compatible = "fsl,imx7d", },
{ .compatible = "fsl,imx8mq", },
{ .compatible = "fsl,imx8mm", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 045f9fe157ce..d03f250f68e4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2535,26 +2535,27 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
static int cpufreq_boost_set_sw(int state)
{
struct cpufreq_policy *policy;
- int ret = -EINVAL;
for_each_active_policy(policy) {
+ int ret;
+
if (!policy->freq_table)
- continue;
+ return -ENXIO;
ret = cpufreq_frequency_table_cpuinfo(policy,
policy->freq_table);
if (ret) {
pr_err("%s: Policy frequency update failed\n",
__func__);
- break;
+ return ret;
}
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
- break;
+ return ret;
}
- return ret;
+ return 0;
}
int cpufreq_boost_trigger_state(int state)
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index de206d2745fe..3fe9125156b4 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -3,7 +3,9 @@
* Copyright 2019 NXP
*/
+#include <linux/clk.h>
#include <linux/cpu.h>
+#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -12,8 +14,11 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include "cpufreq-dt.h"
+
#define OCOTP_CFG3_SPEED_GRADE_SHIFT 8
#define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8)
#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
@@ -22,20 +27,92 @@
#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT 5
#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 5)
+#define IMX7ULP_MAX_RUN_FREQ 528000
+
/* cpufreq-dt device registered by imx-cpufreq-dt */
static struct platform_device *cpufreq_dt_pdev;
static struct opp_table *cpufreq_opp_table;
+static struct device *cpu_dev;
+
+enum IMX7ULP_CPUFREQ_CLKS {
+ ARM,
+ CORE,
+ SCS_SEL,
+ HSRUN_CORE,
+ HSRUN_SCS_SEL,
+ FIRC,
+};
+
+static struct clk_bulk_data imx7ulp_clks[] = {
+ { .id = "arm" },
+ { .id = "core" },
+ { .id = "scs_sel" },
+ { .id = "hsrun_core" },
+ { .id = "hsrun_scs_sel" },
+ { .id = "firc" },
+};
+
+static unsigned int imx7ulp_get_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return clk_get_rate(imx7ulp_clks[FIRC].clk);
+}
+
+static int imx7ulp_target_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int newfreq = policy->freq_table[index].frequency;
+
+ clk_set_parent(imx7ulp_clks[SCS_SEL].clk, imx7ulp_clks[FIRC].clk);
+ clk_set_parent(imx7ulp_clks[HSRUN_SCS_SEL].clk, imx7ulp_clks[FIRC].clk);
+
+ if (newfreq > IMX7ULP_MAX_RUN_FREQ)
+ clk_set_parent(imx7ulp_clks[ARM].clk,
+ imx7ulp_clks[HSRUN_CORE].clk);
+ else
+ clk_set_parent(imx7ulp_clks[ARM].clk, imx7ulp_clks[CORE].clk);
+
+ return 0;
+}
+
+static struct cpufreq_dt_platform_data imx7ulp_data = {
+ .target_intermediate = imx7ulp_target_intermediate,
+ .get_intermediate = imx7ulp_get_intermediate,
+};
static int imx_cpufreq_dt_probe(struct platform_device *pdev)
{
- struct device *cpu_dev = get_cpu_device(0);
+ struct platform_device *dt_pdev;
u32 cell_value, supported_hw[2];
int speed_grade, mkt_segment;
int ret;
+ cpu_dev = get_cpu_device(0);
+
if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL))
return -ENODEV;
+ if (of_machine_is_compatible("fsl,imx7ulp")) {
+ ret = clk_bulk_get(cpu_dev, ARRAY_SIZE(imx7ulp_clks),
+ imx7ulp_clks);
+ if (ret)
+ return ret;
+
+ dt_pdev = platform_device_register_data(NULL, "cpufreq-dt",
+ -1, &imx7ulp_data,
+ sizeof(imx7ulp_data));
+ if (IS_ERR(dt_pdev)) {
+ clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
+ ret = PTR_ERR(dt_pdev);
+ dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
+ return ret;
+ }
+
+ cpufreq_dt_pdev = dt_pdev;
+
+ return 0;
+ }
+
ret = nvmem_cell_read_u32(cpu_dev, "speed_grade", &cell_value);
if (ret)
return ret;
@@ -98,7 +175,10 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
static int imx_cpufreq_dt_remove(struct platform_device *pdev)
{
platform_device_unregister(cpufreq_dt_pdev);
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ if (!of_machine_is_compatible("fsl,imx7ulp"))
+ dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ else
+ clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4d3429b2058f..8e23a698ce04 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2771,6 +2771,8 @@ static int __init intel_pstate_init(void)
pr_info("Invalid MSRs\n");
return -ENODEV;
}
+ /* Without HWP start in the passive mode. */
+ default_driver = &intel_cpufreq;
hwp_cpu_matched:
/*
@@ -2816,7 +2818,6 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "disable")) {
no_load = 1;
} else if (!strcmp(str, "passive")) {
- pr_info("Passive mode enabled\n");
default_driver = &intel_cpufreq;
no_hwp = 1;
}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 909f40fbcde2..d05e761d9572 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <asm/clock.h>
#include <asm/idle.h>
#include <asm/mach-loongson2ef/loongson.h>
@@ -58,29 +57,20 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
loongson2_clockmod_table[index].driver_data) / 8;
/* setting the cpu frequency */
- clk_set_rate(policy->clk, freq * 1000);
+ loongson2_cpu_set_rate(freq);
return 0;
}
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- struct clk *cpuclk;
int i;
unsigned long rate;
int ret;
- cpuclk = clk_get(NULL, "cpu_clk");
- if (IS_ERR(cpuclk)) {
- pr_err("couldn't get CPU clk\n");
- return PTR_ERR(cpuclk);
- }
-
rate = cpu_clock_freq / 1000;
- if (!rate) {
- clk_put(cpuclk);
+ if (!rate)
return -EINVAL;
- }
/* clock table init */
for (i = 2;
@@ -88,20 +78,16 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
- ret = clk_set_rate(cpuclk, rate * 1000);
- if (ret) {
- clk_put(cpuclk);
+ ret = loongson2_cpu_set_rate(rate);
+ if (ret)
return ret;
- }
- policy->clk = cpuclk;
cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
return 0;
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
- clk_put(policy->clk);
return 0;
}
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index a1b8238872a2..d06b37822c3d 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -277,7 +277,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np, "operating-points-v2-qcom-cpu");
+ ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
if (!ret) {
of_node_put(np);
return -ENOENT;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 8e436dc75c8b..6b6b20da2bcf 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <linux/platform_device.h>
/**
* struct cpu_data
@@ -29,12 +30,6 @@ struct cpu_data {
struct cpufreq_frequency_table *table;
};
-/*
- * Don't use cpufreq on this SoC -- used when the SoC would have otherwise
- * matched a more generic compatible.
- */
-#define SOC_BLACKLIST 1
-
/**
* struct soc_data - SoC specific data
* @flags: SOC_xxx
@@ -264,64 +259,51 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
-static const struct soc_data blacklist = {
- .flags = SOC_BLACKLIST,
-};
-
-static const struct of_device_id node_matches[] __initconst = {
+static const struct of_device_id qoriq_cpufreq_blacklist[] = {
/* e6500 cannot use cpufreq due to erratum A-008083 */
- { .compatible = "fsl,b4420-clockgen", &blacklist },
- { .compatible = "fsl,b4860-clockgen", &blacklist },
- { .compatible = "fsl,t2080-clockgen", &blacklist },
- { .compatible = "fsl,t4240-clockgen", &blacklist },
-
- { .compatible = "fsl,ls1012a-clockgen", },
- { .compatible = "fsl,ls1021a-clockgen", },
- { .compatible = "fsl,ls1028a-clockgen", },
- { .compatible = "fsl,ls1043a-clockgen", },
- { .compatible = "fsl,ls1046a-clockgen", },
- { .compatible = "fsl,ls1088a-clockgen", },
- { .compatible = "fsl,ls2080a-clockgen", },
- { .compatible = "fsl,lx2160a-clockgen", },
- { .compatible = "fsl,p4080-clockgen", },
- { .compatible = "fsl,qoriq-clockgen-1.0", },
- { .compatible = "fsl,qoriq-clockgen-2.0", },
+ { .compatible = "fsl,b4420-clockgen", },
+ { .compatible = "fsl,b4860-clockgen", },
+ { .compatible = "fsl,t2080-clockgen", },
+ { .compatible = "fsl,t4240-clockgen", },
{}
};
-static int __init qoriq_cpufreq_init(void)
+static int qoriq_cpufreq_probe(struct platform_device *pdev)
{
int ret;
- struct device_node *np;
- const struct of_device_id *match;
- const struct soc_data *data;
-
- np = of_find_matching_node(NULL, node_matches);
- if (!np)
- return -ENODEV;
-
- match = of_match_node(node_matches, np);
- data = match->data;
-
- of_node_put(np);
+ struct device_node *np;
- if (data && data->flags & SOC_BLACKLIST)
+ np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist);
+ if (np) {
+ dev_info(&pdev->dev, "Disabling due to erratum A-008083");
return -ENODEV;
+ }
ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
- if (!ret)
- pr_info("Freescale QorIQ CPU frequency scaling driver\n");
+ if (ret)
+ return ret;
- return ret;
+ dev_info(&pdev->dev, "Freescale QorIQ CPU frequency scaling driver\n");
+ return 0;
}
-module_init(qoriq_cpufreq_init);
-static void __exit qoriq_cpufreq_exit(void)
+static int qoriq_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&qoriq_cpufreq_driver);
+
+ return 0;
}
-module_exit(qoriq_cpufreq_exit);
+static struct platform_driver qoriq_cpufreq_platform_driver = {
+ .driver = {
+ .name = "qoriq-cpufreq",
+ },
+ .probe = qoriq_cpufreq_probe,
+ .remove = qoriq_cpufreq_remove,
+};
+module_platform_driver(qoriq_cpufreq_platform_driver);
+
+MODULE_ALIAS("platform:qoriq-cpufreq");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs");
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index f84ecd22f488..8c893043953e 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -7,201 +7,96 @@
* Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
*/
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/types.h>
-static struct cpufreq_frequency_table freq_table[] = {
- { .frequency = 216000 },
- { .frequency = 312000 },
- { .frequency = 456000 },
- { .frequency = 608000 },
- { .frequency = 760000 },
- { .frequency = 816000 },
- { .frequency = 912000 },
- { .frequency = 1000000 },
- { .frequency = CPUFREQ_TABLE_END },
-};
-
-struct tegra20_cpufreq {
- struct device *dev;
- struct cpufreq_driver driver;
- struct clk *cpu_clk;
- struct clk *pll_x_clk;
- struct clk *pll_p_clk;
- bool pll_x_prepared;
-};
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
-static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy,
- unsigned int index)
+static bool cpu0_node_has_opp_v2_prop(void)
{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
- unsigned int ifreq = clk_get_rate(cpufreq->pll_p_clk) / 1000;
-
- /*
- * Don't switch to intermediate freq if:
- * - we are already at it, i.e. policy->cur == ifreq
- * - index corresponds to ifreq
- */
- if (freq_table[index].frequency == ifreq || policy->cur == ifreq)
- return 0;
-
- return ifreq;
-}
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
-static int tegra_target_intermediate(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
- int ret;
-
- /*
- * Take an extra reference to the main pll so it doesn't turn
- * off when we move the cpu off of it as enabling it again while we
- * switch to it from tegra_target() would take additional time.
- *
- * When target-freq is equal to intermediate freq we don't need to
- * switch to an intermediate freq and so this routine isn't called.
- * Also, we wouldn't be using pll_x anymore and must not take extra
- * reference to it, as it can be disabled now to save some power.
- */
- clk_prepare_enable(cpufreq->pll_x_clk);
-
- ret = clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_p_clk);
- if (ret)
- clk_disable_unprepare(cpufreq->pll_x_clk);
- else
- cpufreq->pll_x_prepared = true;
+ if (of_get_property(np, "operating-points-v2", NULL))
+ ret = true;
+ of_node_put(np);
return ret;
}
-static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
- unsigned long rate = freq_table[index].frequency;
- unsigned int ifreq = clk_get_rate(cpufreq->pll_p_clk) / 1000;
- int ret;
-
- /*
- * target freq == pll_p, don't need to take extra reference to pll_x_clk
- * as it isn't used anymore.
- */
- if (rate == ifreq)
- return clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_p_clk);
-
- ret = clk_set_rate(cpufreq->pll_x_clk, rate * 1000);
- /* Restore to earlier frequency on error, i.e. pll_x */
- if (ret)
- dev_err(cpufreq->dev, "Failed to change pll_x to %lu\n", rate);
-
- ret = clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_x_clk);
- /* This shouldn't fail while changing or restoring */
- WARN_ON(ret);
-
- /*
- * Drop count to pll_x clock only if we switched to intermediate freq
- * earlier while transitioning to a target frequency.
- */
- if (cpufreq->pll_x_prepared) {
- clk_disable_unprepare(cpufreq->pll_x_clk);
- cpufreq->pll_x_prepared = false;
- }
-
- return ret;
-}
-
-static int tegra_cpu_init(struct cpufreq_policy *policy)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
-
- clk_prepare_enable(cpufreq->cpu_clk);
-
- /* FIXME: what's the actual transition time? */
- cpufreq_generic_init(policy, freq_table, 300 * 1000);
- policy->clk = cpufreq->cpu_clk;
- policy->suspend_freq = freq_table[0].frequency;
- return 0;
-}
-
-static int tegra_cpu_exit(struct cpufreq_policy *policy)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
-
- clk_disable_unprepare(cpufreq->cpu_clk);
- return 0;
-}
-
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
- struct tegra20_cpufreq *cpufreq;
+ struct platform_device *cpufreq_dt;
+ struct opp_table *opp_table;
+ struct device *cpu_dev;
+ u32 versions[2];
int err;
- cpufreq = devm_kzalloc(&pdev->dev, sizeof(*cpufreq), GFP_KERNEL);
- if (!cpufreq)
- return -ENOMEM;
+ if (!cpu0_node_has_opp_v2_prop()) {
+ dev_err(&pdev->dev, "operating points not found\n");
+ dev_err(&pdev->dev, "please update your device tree\n");
+ return -ENODEV;
+ }
+
+ if (of_machine_is_compatible("nvidia,tegra20")) {
+ versions[0] = BIT(tegra_sku_info.cpu_process_id);
+ versions[1] = BIT(tegra_sku_info.soc_speedo_id);
+ } else {
+ versions[0] = BIT(tegra_sku_info.cpu_process_id);
+ versions[1] = BIT(tegra_sku_info.cpu_speedo_id);
+ }
+
+ dev_info(&pdev->dev, "hardware version 0x%x 0x%x\n",
+ versions[0], versions[1]);
- cpufreq->cpu_clk = clk_get_sys(NULL, "cclk");
- if (IS_ERR(cpufreq->cpu_clk))
- return PTR_ERR(cpufreq->cpu_clk);
+ cpu_dev = get_cpu_device(0);
+ if (WARN_ON(!cpu_dev))
+ return -ENODEV;
- cpufreq->pll_x_clk = clk_get_sys(NULL, "pll_x");
- if (IS_ERR(cpufreq->pll_x_clk)) {
- err = PTR_ERR(cpufreq->pll_x_clk);
- goto put_cpu;
+ opp_table = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
+ err = PTR_ERR_OR_ZERO(opp_table);
+ if (err) {
+ dev_err(&pdev->dev, "failed to set supported hw: %d\n", err);
+ return err;
}
- cpufreq->pll_p_clk = clk_get_sys(NULL, "pll_p");
- if (IS_ERR(cpufreq->pll_p_clk)) {
- err = PTR_ERR(cpufreq->pll_p_clk);
- goto put_pll_x;
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ err = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to create cpufreq-dt device: %d\n", err);
+ goto err_put_supported_hw;
}
- cpufreq->dev = &pdev->dev;
- cpufreq->driver.get = cpufreq_generic_get;
- cpufreq->driver.attr = cpufreq_generic_attr;
- cpufreq->driver.init = tegra_cpu_init;
- cpufreq->driver.exit = tegra_cpu_exit;
- cpufreq->driver.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK;
- cpufreq->driver.verify = cpufreq_generic_frequency_table_verify;
- cpufreq->driver.suspend = cpufreq_generic_suspend;
- cpufreq->driver.driver_data = cpufreq;
- cpufreq->driver.target_index = tegra_target;
- cpufreq->driver.get_intermediate = tegra_get_intermediate;
- cpufreq->driver.target_intermediate = tegra_target_intermediate;
- snprintf(cpufreq->driver.name, CPUFREQ_NAME_LEN, "tegra");
-
- err = cpufreq_register_driver(&cpufreq->driver);
- if (err)
- goto put_pll_p;
-
- platform_set_drvdata(pdev, cpufreq);
+ platform_set_drvdata(pdev, cpufreq_dt);
return 0;
-put_pll_p:
- clk_put(cpufreq->pll_p_clk);
-put_pll_x:
- clk_put(cpufreq->pll_x_clk);
-put_cpu:
- clk_put(cpufreq->cpu_clk);
+err_put_supported_hw:
+ dev_pm_opp_put_supported_hw(opp_table);
return err;
}
static int tegra20_cpufreq_remove(struct platform_device *pdev)
{
- struct tegra20_cpufreq *cpufreq = platform_get_drvdata(pdev);
+ struct platform_device *cpufreq_dt;
+ struct opp_table *opp_table;
- cpufreq_unregister_driver(&cpufreq->driver);
+ cpufreq_dt = platform_get_drvdata(pdev);
+ platform_device_unregister(cpufreq_dt);
- clk_put(cpufreq->pll_p_clk);
- clk_put(cpufreq->pll_x_clk);
- clk_put(cpufreq->cpu_clk);
+ opp_table = dev_pm_opp_get_opp_table(get_cpu_device(0));
+ dev_pm_opp_put_supported_hw(opp_table);
+ dev_pm_opp_put_opp_table(opp_table);
return 0;
}
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 99a2d72ac02b..51a7e89085c0 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -94,3 +94,16 @@ config ARM_TEGRA_CPUIDLE
select ARM_CPU_SUSPEND
help
Select this to enable cpuidle for NVIDIA Tegra20/30/114/124 SoCs.
+
+config ARM_QCOM_SPM_CPUIDLE
+ bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
+ depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64
+ select ARM_CPU_SUSPEND
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ select DT_IDLE_STATES
+ select QCOM_SCM
+ help
+ Select this to enable cpuidle for Qualcomm processors.
+ The Subsystem Power Manager (SPM) controls low power modes for the
+ CPU and L2 cores. It interface with various system drivers to put
+ the cores in low power modes.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 55a464f6a78b..f07800cbb43f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o
cpuidle_psci-y := cpuidle-psci.o
cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o
obj-$(CONFIG_ARM_TEGRA_CPUIDLE) += cpuidle-tegra.o
+obj-$(CONFIG_ARM_QCOM_SPM_CPUIDLE) += cpuidle-qcom-spm.o
###############################################################################
# MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index bae9140a65a5..d0fb585073c6 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -58,6 +58,10 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
u32 state;
int ret;
+ ret = cpu_pm_enter();
+ if (ret)
+ return -1;
+
/* Do runtime PM to manage a hierarchical CPU toplogy. */
pm_runtime_put_sync_suspend(pd_dev);
@@ -65,10 +69,12 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
if (!state)
state = states[idx];
- ret = psci_enter_state(idx, state);
+ ret = psci_cpu_suspend_enter(state) ? -1 : idx;
pm_runtime_get_sync(pd_dev);
+ cpu_pm_exit();
+
/* Clear the domain state to start fresh when back from idle. */
psci_set_domain_state(0);
return ret;
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 74c247972bb3..6513ef2af66a 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -19,6 +19,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/runlatch.h>
+#include <asm/idle.h>
#include <asm/plpar_wrappers.h>
struct cpuidle_driver pseries_idle_driver = {
@@ -31,39 +32,15 @@ static struct cpuidle_state *cpuidle_state_table __read_mostly;
static u64 snooze_timeout __read_mostly;
static bool snooze_timeout_en __read_mostly;
-static inline void idle_loop_prolog(unsigned long *in_purr)
-{
- ppc64_runlatch_off();
- *in_purr = mfspr(SPRN_PURR);
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- get_lppaca()->idle = 1;
-}
-
-static inline void idle_loop_epilog(unsigned long in_purr)
-{
- u64 wait_cycles;
-
- wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
- wait_cycles += mfspr(SPRN_PURR) - in_purr;
- get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
- get_lppaca()->idle = 0;
-
- ppc64_runlatch_on();
-}
-
static int snooze_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- unsigned long in_purr;
u64 snooze_exit_time;
set_thread_flag(TIF_POLLING_NRFLAG);
- idle_loop_prolog(&in_purr);
+ pseries_idle_prolog();
local_irq_enable();
snooze_exit_time = get_tb() + snooze_timeout;
@@ -87,7 +64,7 @@ static int snooze_loop(struct cpuidle_device *dev,
local_irq_disable();
- idle_loop_epilog(in_purr);
+ pseries_idle_epilog();
return index;
}
@@ -113,9 +90,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- unsigned long in_purr;
- idle_loop_prolog(&in_purr);
+ pseries_idle_prolog();
get_lppaca()->donate_dedicated_cpu = 1;
HMT_medium();
@@ -124,7 +100,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
local_irq_disable();
get_lppaca()->donate_dedicated_cpu = 0;
- idle_loop_epilog(in_purr);
+ pseries_idle_epilog();
return index;
}
@@ -133,9 +109,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- unsigned long in_purr;
- idle_loop_prolog(&in_purr);
+ pseries_idle_prolog();
/*
* Yield the processor to the hypervisor. We return if
@@ -147,7 +122,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
check_and_cede_processor();
local_irq_disable();
- idle_loop_epilog(in_purr);
+ pseries_idle_epilog();
return index;
}
diff --git a/drivers/soc/qcom/spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 8e10e02c6aa5..adf91a6e4d7d 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -19,10 +19,11 @@
#include <linux/cpu_pm.h>
#include <linux/qcom_scm.h>
-#include <asm/cpuidle.h>
#include <asm/proc-fns.h>
#include <asm/suspend.h>
+#include "dt_idle_states.h"
+
#define MAX_PMIC_DATA 2
#define MAX_SEQ_DATA 64
#define SPM_CTL_INDEX 0x7f
@@ -62,6 +63,7 @@ struct spm_reg_data {
};
struct spm_driver_data {
+ struct cpuidle_driver cpuidle_driver;
void __iomem *reg_base;
const struct spm_reg_data *reg_data;
};
@@ -107,11 +109,6 @@ static const struct spm_reg_data spm_reg_8064_cpu = {
.start_index[PM_SLEEP_MODE_SPC] = 2,
};
-static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
-
-typedef int (*idle_fn)(void);
-static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
-
static inline void spm_register_write(struct spm_driver_data *drv,
enum spm_reg reg, u32 val)
{
@@ -172,10 +169,9 @@ static int qcom_pm_collapse(unsigned long int unused)
return -1;
}
-static int qcom_cpu_spc(void)
+static int qcom_cpu_spc(struct spm_driver_data *drv)
{
int ret;
- struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv);
spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
ret = cpu_suspend(0, qcom_pm_collapse);
@@ -190,94 +186,49 @@ static int qcom_cpu_spc(void)
return ret;
}
-static int qcom_idle_enter(unsigned long index)
+static int spm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
- return __this_cpu_read(qcom_idle_ops)[index]();
+ struct spm_driver_data *data = container_of(drv, struct spm_driver_data,
+ cpuidle_driver);
+
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(qcom_cpu_spc, idx, data);
}
-static const struct of_device_id qcom_idle_state_match[] __initconst = {
- { .compatible = "qcom,idle-state-spc", .data = qcom_cpu_spc },
+static struct cpuidle_driver qcom_spm_idle_driver = {
+ .name = "qcom_spm",
+ .owner = THIS_MODULE,
+ .states[0] = {
+ .enter = spm_enter_idle_state,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+ .name = "WFI",
+ .desc = "ARM WFI",
+ }
+};
+
+static const struct of_device_id qcom_idle_state_match[] = {
+ { .compatible = "qcom,idle-state-spc", .data = spm_enter_idle_state },
{ },
};
-static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
+static int spm_cpuidle_init(struct cpuidle_driver *drv, int cpu)
{
- const struct of_device_id *match_id;
- struct device_node *state_node;
- int i;
- int state_count = 1;
- idle_fn idle_fns[CPUIDLE_STATE_MAX];
- idle_fn *fns;
- cpumask_t mask;
- bool use_scm_power_down = false;
-
- if (!qcom_scm_is_available())
- return -EPROBE_DEFER;
-
- for (i = 0; ; i++) {
- state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
- if (!state_node)
- break;
-
- if (!of_device_is_available(state_node))
- continue;
-
- if (i == CPUIDLE_STATE_MAX) {
- pr_warn("%s: cpuidle states reached max possible\n",
- __func__);
- break;
- }
-
- match_id = of_match_node(qcom_idle_state_match, state_node);
- if (!match_id)
- return -ENODEV;
-
- idle_fns[state_count] = match_id->data;
-
- /* Check if any of the states allow power down */
- if (match_id->data == qcom_cpu_spc)
- use_scm_power_down = true;
-
- state_count++;
- }
-
- if (state_count == 1)
- goto check_spm;
-
- fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns),
- GFP_KERNEL);
- if (!fns)
- return -ENOMEM;
-
- for (i = 1; i < state_count; i++)
- fns[i] = idle_fns[i];
+ int ret;
- if (use_scm_power_down) {
- /* We have atleast one power down mode */
- cpumask_clear(&mask);
- cpumask_set_cpu(cpu, &mask);
- qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
- }
+ memcpy(drv, &qcom_spm_idle_driver, sizeof(*drv));
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
- per_cpu(qcom_idle_ops, cpu) = fns;
+ /* Parse idle states from device tree */
+ ret = dt_init_idle_driver(drv, qcom_idle_state_match, 1);
+ if (ret <= 0)
+ return ret ? : -ENODEV;
- /*
- * SPM probe for the cpu should have happened by now, if the
- * SPM device does not exist, return -ENXIO to indicate that the
- * cpu does not support idle states.
- */
-check_spm:
- return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
+ /* We have atleast one power down mode */
+ return qcom_scm_set_warm_boot_addr(cpu_resume_arm, drv->cpumask);
}
-static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
- .suspend = qcom_idle_enter,
- .init = qcom_cpuidle_init,
-};
-
-CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
-CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
-
static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
int *spm_cpu)
{
@@ -323,11 +274,15 @@ static int spm_dev_probe(struct platform_device *pdev)
struct resource *res;
const struct of_device_id *match_id;
void __iomem *addr;
- int cpu;
+ int cpu, ret;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
drv = spm_get_drv(pdev, &cpu);
if (!drv)
return -EINVAL;
+ platform_set_drvdata(pdev, drv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
@@ -340,6 +295,10 @@ static int spm_dev_probe(struct platform_device *pdev)
drv->reg_data = match_id->data;
+ ret = spm_cpuidle_init(&drv->cpuidle_driver, cpu);
+ if (ret)
+ return ret;
+
/* Write the SPM sequences first.. */
addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
__iowrite32_copy(addr, drv->reg_data->seq,
@@ -362,13 +321,20 @@ static int spm_dev_probe(struct platform_device *pdev)
/* Set up Standby as the default low power mode */
spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
- per_cpu(cpu_spm_drv, cpu) = drv;
+ return cpuidle_register(&drv->cpuidle_driver, NULL);
+}
+
+static int spm_dev_remove(struct platform_device *pdev)
+{
+ struct spm_driver_data *drv = platform_get_drvdata(pdev);
+ cpuidle_unregister(&drv->cpuidle_driver);
return 0;
}
static struct platform_driver spm_driver = {
.probe = spm_dev_probe,
+ .remove = spm_dev_remove,
.driver = {
.name = "saw",
.of_match_table = spm_match_table,
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 313b0290e97b..150045849d78 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -365,7 +365,6 @@ static int tegra_cpuidle_probe(struct platform_device *pdev)
break;
case TEGRA30:
- tegra_cpuidle_disable_state(TEGRA_CC6);
break;
case TEGRA114:
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index cdeedbf02646..091d1caceb41 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -18,14 +18,6 @@
#include "cpuidle.h"
-static unsigned int sysfs_switch;
-static int __init cpuidle_sysfs_setup(char *unused)
-{
- sysfs_switch = 1;
- return 1;
-}
-__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
-
static ssize_t show_available_governors(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -35,10 +27,10 @@ static ssize_t show_available_governors(struct device *dev,
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
- if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) -
- CPUIDLE_NAME_LEN - 2))
+ if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name);
+
+ i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name);
}
out:
@@ -85,58 +77,43 @@ static ssize_t store_current_governor(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- char gov_name[CPUIDLE_NAME_LEN];
- int ret = -EINVAL;
- size_t len = count;
+ char gov_name[CPUIDLE_NAME_LEN + 1];
+ int ret;
struct cpuidle_governor *gov;
- if (!len || len >= sizeof(gov_name))
+ ret = sscanf(buf, "%" __stringify(CPUIDLE_NAME_LEN) "s", gov_name);
+ if (ret != 1)
return -EINVAL;
- memcpy(gov_name, buf, len);
- gov_name[len] = '\0';
- if (gov_name[len - 1] == '\n')
- gov_name[--len] = '\0';
-
mutex_lock(&cpuidle_lock);
-
+ ret = -EINVAL;
list_for_each_entry(gov, &cpuidle_governors, governor_list) {
- if (strlen(gov->name) == len && !strcmp(gov->name, gov_name)) {
+ if (!strncmp(gov->name, gov_name, CPUIDLE_NAME_LEN)) {
ret = cpuidle_switch_governor(gov);
break;
}
}
-
mutex_unlock(&cpuidle_lock);
- if (ret)
- return ret;
- else
- return count;
+ return ret ? ret : count;
}
-static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL);
-static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
-
-static struct attribute *cpuidle_default_attrs[] = {
- &dev_attr_current_driver.attr,
- &dev_attr_current_governor_ro.attr,
- NULL
-};
-
static DEVICE_ATTR(available_governors, 0444, show_available_governors, NULL);
+static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL);
static DEVICE_ATTR(current_governor, 0644, show_current_governor,
- store_current_governor);
+ store_current_governor);
+static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
-static struct attribute *cpuidle_switch_attrs[] = {
+static struct attribute *cpuidle_attrs[] = {
&dev_attr_available_governors.attr,
&dev_attr_current_driver.attr,
&dev_attr_current_governor.attr,
+ &dev_attr_current_governor_ro.attr,
NULL
};
static struct attribute_group cpuidle_attr_group = {
- .attrs = cpuidle_default_attrs,
+ .attrs = cpuidle_attrs,
.name = "cpuidle",
};
@@ -146,9 +123,6 @@ static struct attribute_group cpuidle_attr_group = {
*/
int cpuidle_add_interface(struct device *dev)
{
- if (sysfs_switch)
- cpuidle_attr_group.attrs = cpuidle_switch_attrs;
-
return sysfs_create_group(&dev->kobj, &cpuidle_attr_group);
}
@@ -167,11 +141,6 @@ struct cpuidle_attr {
ssize_t (*store)(struct cpuidle_device *, const char *, size_t count);
};
-#define define_one_ro(_name, show) \
- static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
-#define define_one_rw(_name, show, store) \
- static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store)
-
#define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr)
struct cpuidle_device_kobj {
@@ -431,12 +400,12 @@ static inline void cpuidle_remove_s2idle_attr_group(struct cpuidle_state_kobj *k
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr,
- char * buf)
+ char *buf)
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
- struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
+ struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
if (cattr->show)
ret = cattr->show(state, state_usage, buf);
@@ -515,7 +484,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
&kdev->kobj, "state%d", i);
if (ret) {
- kfree(kobj);
+ kobject_put(&kobj->kobj);
goto error_state;
}
cpuidle_add_s2idle_attr_group(kobj);
@@ -646,7 +615,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
&kdev->kobj, "driver");
if (ret) {
- kfree(kdrv);
+ kobject_put(&kdrv->kobj);
return ret;
}
@@ -740,7 +709,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (error) {
- kfree(kdev);
+ kobject_put(&kdev->kobj);
return error;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index a5fd8975f3d3..a6abb701bfc6 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -8,7 +8,7 @@
* This file add support for AES cipher with 128,192,256 bits keysize in
* CBC and ECB mode.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include <linux/crypto.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 3e4e4bbda34c..b957061424a1 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -7,7 +7,7 @@
*
* Core file which registers crypto algorithms supported by the CryptoEngine.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include <linux/clk.h>
#include <linux/crypto.h>
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 84d52fc3a2da..c89cb2ee2496 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -8,7 +8,7 @@
* This file add support for AES cipher with 128,192,256 bits keysize in
* CBC and ECB mode.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include <linux/crypto.h>
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 6b301afffd11..5d9d0fedcb06 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -7,7 +7,7 @@
*
* Core file which registers crypto algorithms supported by the SecuritySystem
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -537,10 +537,8 @@ static int sun8i_ss_probe(struct platform_device *pdev)
return err;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n");
+ if (irq < 0)
return irq;
- }
ss->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(ss->reset)) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 9d4ead2f7ebb..411857fad8ba 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -253,10 +253,8 @@ static int meson_crypto_probe(struct platform_device *pdev)
mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
for (i = 0; i < MAXFLOW; i++) {
mc->irqs[i] = platform_get_irq(pdev, i);
- if (mc->irqs[i] < 0) {
- dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i);
+ if (mc->irqs[i] < 0)
return mc->irqs[i];
- }
err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0,
"gxl-crypto", mc);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index e536e2a6bbd8..75ccf41a7cb9 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -31,7 +31,6 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index fcf1effc7661..62ba0325a618 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2239,16 +2239,12 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
if (keylen > blocksize) {
- SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
-
- hdesc->tfm = tfm_ctx->child_hash;
-
tfm_ctx->hmac_key_length = blocksize;
- ret = crypto_shash_digest(hdesc, key, keylen,
- tfm_ctx->hmac_key);
+
+ ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
+ tfm_ctx->hmac_key);
if (ret)
return ret;
-
} else {
memcpy(tfm_ctx->hmac_key, key, keylen);
tfm_ctx->hmac_key_length = keylen;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c8b9408541a9..a353217a0d33 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -308,9 +308,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
container_of(areq, struct skcipher_request, base);
struct iproc_ctx_s *ctx = rctx->ctx;
struct spu_cipher_parms cipher_parms;
- int err = 0;
- unsigned int chunksize = 0; /* Num bytes of request to submit */
- int remaining = 0; /* Bytes of request still to process */
+ int err;
+ unsigned int chunksize; /* Num bytes of request to submit */
+ int remaining; /* Bytes of request still to process */
int chunk_start; /* Beginning of data for current SPU msg */
/* IV or ctr value to use in this SPU msg */
@@ -698,7 +698,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
/* number of bytes still to be hashed in this req */
unsigned int nbytes_to_hash = 0;
- int err = 0;
+ int err;
unsigned int chunksize = 0; /* length of hash carry + new data */
/*
* length of new data, not from hash carry, to be submitted in
@@ -1664,7 +1664,7 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
struct spu_hw *spu = &iproc_priv.spu;
struct brcm_message *mssg = msg;
struct iproc_reqctx_s *rctx;
- int err = 0;
+ int err;
rctx = mssg->ctx;
if (unlikely(!rctx)) {
@@ -1967,7 +1967,7 @@ static int ahash_enqueue(struct ahash_request *req)
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
- int err = 0;
+ int err;
const char *alg_name;
flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
@@ -2299,7 +2299,7 @@ ahash_finup_exit:
static int ahash_digest(struct ahash_request *req)
{
- int err = 0;
+ int err;
flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
@@ -4436,7 +4436,7 @@ static int spu_mb_init(struct device *dev)
for (i = 0; i < iproc_priv.spu.num_chan; i++) {
iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
if (IS_ERR(iproc_priv.mbox[i])) {
- err = (int)PTR_ERR(iproc_priv.mbox[i]);
+ err = PTR_ERR(iproc_priv.mbox[i]);
dev_err(dev,
"Mbox channel %d request failed with err %d",
i, err);
@@ -4717,21 +4717,20 @@ static int spu_dt_read(struct platform_device *pdev)
matched_spu_type = of_device_get_match_data(dev);
if (!matched_spu_type) {
- dev_err(&pdev->dev, "Failed to match device\n");
+ dev_err(dev, "Failed to match device\n");
return -ENODEV;
}
spu->spu_type = matched_spu_type->type;
spu->spu_subtype = matched_spu_type->subtype;
- i = 0;
for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
if (IS_ERR(spu->reg_vbase[i])) {
err = PTR_ERR(spu->reg_vbase[i]);
- dev_err(&pdev->dev, "Failed to map registers: %d\n",
+ dev_err(dev, "Failed to map registers: %d\n",
err);
spu->reg_vbase[i] = NULL;
return err;
@@ -4747,7 +4746,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
- int err = 0;
+ int err;
iproc_priv.pdev = pdev;
platform_set_drvdata(iproc_priv.pdev,
@@ -4757,7 +4756,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto failure;
- err = spu_mb_init(&pdev->dev);
+ err = spu_mb_init(dev);
if (err < 0)
goto failure;
@@ -4766,7 +4765,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
else if (spu->spu_type == SPU_TYPE_SPU2)
iproc_priv.bcm_hdr_len = 0;
- spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
+ spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
spu_counters_init();
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index e91be9b8b083..788c6607078b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -346,7 +346,7 @@ static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
}
/**
- * nitrox_bist_check - Check NITORX BIST registers status
+ * nitrox_bist_check - Check NITROX BIST registers status
* @ndev: NITROX device
*/
static int nitrox_bist_check(struct nitrox_device *ndev)
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index e0a8bd15aa74..32268e239bf1 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD
config CRYPTO_DEV_SP_CCP
bool "Cryptographic Coprocessor device"
default y
- depends on CRYPTO_DEV_CCP_DD
+ depends on CRYPTO_DEV_CCP_DD && DMADEVICES
select HW_RANDOM
select DMA_ENGINE
- select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 474e6f1a6a84..b0cc2bd73af8 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -272,9 +272,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
-
- SHASH_DESC_ON_STACK(sdesc, shash);
-
unsigned int block_size = crypto_shash_blocksize(shash);
unsigned int digest_size = crypto_shash_digestsize(shash);
int i, ret;
@@ -289,10 +286,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
if (key_len > block_size) {
/* Must hash the input key */
- sdesc->tfm = shash;
-
- ret = crypto_shash_digest(sdesc, key, key_len,
- ctx->u.sha.key);
+ ret = crypto_shash_tfm_digest(shash, key, key_len,
+ ctx->u.sha.key);
if (ret)
return -EINVAL;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 896f190b9a50..a2426334be61 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -20,6 +20,7 @@
#include <linux/hw_random.h>
#include <linux/ccp.h>
#include <linux/firmware.h>
+#include <linux/gfp.h>
#include <asm/smp.h>
@@ -44,6 +45,14 @@ MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during
static bool psp_dead;
static int psp_timeout;
+/* Trusted Memory Region (TMR):
+ * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
+ * to allocate the memory, which will return aligned memory for the specified
+ * allocation order.
+ */
+#define SEV_ES_TMR_SIZE (1024 * 1024)
+static void *sev_es_tmr;
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -214,6 +223,20 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
+ if (sev_es_tmr) {
+ u64 tmr_pa;
+
+ /*
+ * Do not include the encryption mask on the physical
+ * address of the TMR (firmware should clear it anyway).
+ */
+ tmr_pa = __pa(sev_es_tmr);
+
+ sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES;
+ sev->init_cmd_buf.tmr_address = tmr_pa;
+ sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE;
+ }
+
rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
if (rc)
return rc;
@@ -371,8 +394,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
goto cmd;
/* allocate a physically contiguous buffer to store the CSR blob */
- if (!access_ok(input.address, input.length) ||
- input.length > SEV_FW_BLOB_MAX_SIZE) {
+ if (input.length > SEV_FW_BLOB_MAX_SIZE) {
ret = -EFAULT;
goto e_free;
}
@@ -609,12 +631,6 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
- /* Check if we have write access to the userspace buffer */
- if (input.address &&
- input.length &&
- !access_ok(input.address, input.length))
- return -EFAULT;
-
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -730,15 +746,13 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
goto cmd;
/* Allocate a physically contiguous buffer to store the PDH blob. */
- if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
+ if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) {
ret = -EFAULT;
goto e_free;
}
/* Allocate a physically contiguous buffer to store the cert chain blob. */
- if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.cert_chain_address, input.cert_chain_len)) {
+ if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) {
ret = -EFAULT;
goto e_free;
}
@@ -1012,6 +1026,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
+ struct page *tmr_page;
int error, rc;
if (!sev)
@@ -1041,6 +1056,16 @@ void sev_pci_init(void)
sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
+ /* Obtain the TMR memory area for SEV-ES use */
+ tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE));
+ if (tmr_page) {
+ sev_es_tmr = page_address(tmr_page);
+ } else {
+ sev_es_tmr = NULL;
+ dev_warn(sev->dev,
+ "SEV: TMR allocation failed, SEV-ES support unavailable\n");
+ }
+
/* Initialize the platform */
rc = sev_platform_init(&error);
if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
@@ -1075,4 +1100,13 @@ void sev_pci_exit(void)
return;
sev_platform_shutdown(NULL);
+
+ if (sev_es_tmr) {
+ /* The TMR area was encrypted, flush it from the cache */
+ wbinvd_on_all_cpus();
+
+ free_pages((unsigned long)sev_es_tmr,
+ get_order(SEV_ES_TMR_SIZE));
+ sev_es_tmr = NULL;
+ }
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index a84335328f37..872ea3ff1c6b 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -427,12 +427,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
int key_len = keylen >> 1;
int err;
- SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
-
- desc->tfm = ctx_p->shash_tfm;
-
- err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
- ctx_p->user.key + key_len);
+ err = crypto_shash_tfm_digest(ctx_p->shash_tfm,
+ ctx_p->user.key, key_len,
+ ctx_p->user.key + key_len);
if (err) {
dev_err(dev, "Failed to hash ESSIV key.\n");
return err;
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index c454afce7781..7083767602fc 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -26,7 +26,7 @@ static struct debugfs_reg32 ver_sig_regs[] = {
{ .name = "VERSION" }, /* Must be 1st */
};
-static struct debugfs_reg32 pid_cid_regs[] = {
+static const struct debugfs_reg32 pid_cid_regs[] = {
CC_DEBUG_REG(PERIPHERAL_ID_0),
CC_DEBUG_REG(PERIPHERAL_ID_1),
CC_DEBUG_REG(PERIPHERAL_ID_2),
@@ -38,7 +38,7 @@ static struct debugfs_reg32 pid_cid_regs[] = {
CC_DEBUG_REG(COMPONENT_ID_3),
};
-static struct debugfs_reg32 debug_regs[] = {
+static const struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index c29b80dd30d8..f26a7a15551a 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -44,7 +44,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
@@ -256,7 +255,7 @@ static void get_aes_decrypt_key(unsigned char *dec_key,
return;
}
for (i = 0; i < nk; i++)
- w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+ w_ring[i] = get_unaligned_be32(&key[i * 4]);
i = 0;
temp = w_ring[nk - 1];
@@ -275,7 +274,7 @@ static void get_aes_decrypt_key(unsigned char *dec_key,
}
i--;
for (k = 0, j = i % nk; k < nk; k++) {
- *((u32 *)dec_key + k) = htonl(w_ring[j]);
+ put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
j--;
if (j < 0)
j += nk;
@@ -1054,8 +1053,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
u32 temp = be32_to_cpu(*--b);
temp = ~temp;
- c = (u64)temp + 1; // No of block can processed withou overflow
- if ((bytes / AES_BLOCK_SIZE) > c)
+ c = (u64)temp + 1; // No of block can processed without overflow
+ if ((bytes / AES_BLOCK_SIZE) >= c)
bytes = c * AES_BLOCK_SIZE;
return bytes;
}
@@ -1077,7 +1076,14 @@ static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
keylen = ablkctx->enckey_len / 2;
key = ablkctx->key + keylen;
- ret = aes_expandkey(&aes, key, keylen);
+ /* For a 192 bit key remove the padded zeroes which was
+ * added in chcr_xts_setkey
+ */
+ if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
+ == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
+ ret = aes_expandkey(&aes, key, keylen - 8);
+ else
+ ret = aes_expandkey(&aes, key, keylen);
if (ret)
return ret;
aes_encrypt(&aes, iv, iv);
@@ -1158,15 +1164,16 @@ static int chcr_final_cipher_iv(struct skcipher_request *req,
static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err)
{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chcr_context *ctx = c_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
- struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
- struct cipher_wr_param wrparam;
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct chcr_dev *dev = c_ctx(tfm)->dev;
+ struct chcr_context *ctx = c_ctx(tfm);
+ struct adapter *adap = padap(ctx->dev);
+ struct cipher_wr_param wrparam;
+ struct sk_buff *skb;
int bytes;
if (err)
@@ -1197,6 +1204,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
+ memcpy(req->iv, reqctx->init_iv, IV);
+ atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
req->src,
@@ -1248,20 +1257,28 @@ static int process_cipher(struct skcipher_request *req,
struct sk_buff **skb,
unsigned short op_type)
{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
- struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct adapter *adap = padap(c_ctx(tfm)->dev);
struct cipher_wr_param wrparam;
int bytes, err = -EINVAL;
+ int subtype;
reqctx->processed = 0;
reqctx->partial_req = 0;
if (!req->iv)
goto error;
+ subtype = get_cryptoalg_subtype(tfm);
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
(req->cryptlen == 0) ||
(req->cryptlen % crypto_skcipher_blocksize(tfm))) {
+ if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
+ goto fallback;
+ else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
+ subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+ goto fallback;
pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
ablkctx->enckey_len, req->cryptlen, ivsize);
goto error;
@@ -1302,12 +1319,10 @@ static int process_cipher(struct skcipher_request *req,
} else {
bytes = req->cryptlen;
}
- if (get_cryptoalg_subtype(tfm) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->iv, bytes);
}
- if (get_cryptoalg_subtype(tfm) ==
- CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
@@ -1315,20 +1330,25 @@ static int process_cipher(struct skcipher_request *req,
/* initialize counter portion of counter block */
*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ memcpy(reqctx->init_iv, reqctx->iv, IV);
} else {
memcpy(reqctx->iv, req->iv, IV);
+ memcpy(reqctx->init_iv, req->iv, IV);
}
if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
+fallback: atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
req->src,
req->dst,
req->cryptlen,
- reqctx->iv,
+ subtype ==
+ CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
+ reqctx->iv : req->iv,
op_type);
goto error;
}
@@ -1443,6 +1463,7 @@ static int chcr_device_init(struct chcr_context *ctx)
if (!ctx->dev) {
u_ctx = assign_chcr_device();
if (!u_ctx) {
+ err = -ENXIO;
pr_err("chcr device assignment fails\n");
goto out;
}
@@ -1757,7 +1778,7 @@ static int chcr_ahash_final(struct ahash_request *req)
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
struct chcr_context *ctx = h_ctx(rtfm);
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- int error = -EINVAL;
+ int error;
unsigned int cpu;
cpu = get_cpu();
@@ -1984,7 +2005,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->data_len += params.bfr_len + params.sg_len;
if (req->nbytes == 0) {
- create_last_hash_block(req_ctx->reqbfr, bs, 0);
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.more = 1;
params.bfr_len = bs;
}
@@ -2250,12 +2271,28 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
ablkctx->enckey_len = key_len;
get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
+ /* Both keys for xts must be aligned to 16 byte boundary
+ * by padding with zeros. So for 24 byte keys padding 8 zeroes.
+ */
+ if (key_len == 48) {
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
+ + 16) >> 4;
+ memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
+ memset(ablkctx->key + 24, 0, 8);
+ memset(ablkctx->key + 56, 0, 8);
+ ablkctx->enckey_len = 64;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ } else {
+ ablkctx->key_ctx_hdr =
FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
CHCR_KEYCTX_NO_KEY, 1,
0, context_size);
+ }
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
@@ -2556,7 +2593,7 @@ int chcr_aead_dma_map(struct device *dev,
int dst_size;
dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
+ 0 : authsize);
if (!req->cryptlen || !dst_size)
return 0;
reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
@@ -2603,15 +2640,16 @@ void chcr_aead_dma_unmap(struct device *dev,
int dst_size;
dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
+ 0 : authsize);
if (!req->cryptlen || !dst_size)
return;
dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL);
if (req->src == req->dst) {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src,
+ sg_nents_for_len(req->src, dst_size),
+ DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
@@ -2888,8 +2926,7 @@ static int ccm_format_packet(struct aead_request *req,
memcpy(ivptr, req->iv, 16);
}
if (assoclen)
- *((unsigned short *)(reqctx->scratch_pad + 16)) =
- htons(assoclen);
+ put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
rc = generate_b0(req, ivptr, op_type);
/* zero the ctr value */
@@ -2910,7 +2947,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
unsigned int ccm_xtra;
- unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
@@ -3163,8 +3200,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
} else {
memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
}
- *((unsigned int *)(ivptr + 12)) = htonl(0x01);
-
+ put_unaligned_be32(0x01, &ivptr[12]);
ulptx = (struct ulptx_sgl *)(ivptr + 16);
chcr_add_aead_dst_ent(req, phys_cpl, qid);
@@ -3702,6 +3738,13 @@ static int chcr_aead_op(struct aead_request *req,
return -ENOSPC;
}
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+ crypto_ipsec_check_assoclen(req->assoclen) != 0) {
+ pr_err("RFC4106: Invalid value of assoclen %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index ffd4ec0c7374..bd8dac806e7a 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -33,6 +33,13 @@ static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+static const struct tlsdev_ops chcr_ktls_ops = {
+ .tls_dev_add = chcr_ktls_dev_add,
+ .tls_dev_del = chcr_ktls_dev_del,
+};
+#endif
+
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
static void update_netdev_features(void);
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
@@ -56,6 +63,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
.tx_handler = chcr_uld_tx_handler,
#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+ .tlsdev_ops = &chcr_ktls_ops,
+#endif
};
static void detach_work_fn(struct work_struct *work)
@@ -207,11 +217,6 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
}
u_ctx->lldi = *lld;
chcr_dev_init(u_ctx);
-
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
- chcr_enable_ktls(padap(&u_ctx->dev));
-#endif
out:
return u_ctx;
}
@@ -348,20 +353,12 @@ static void __exit chcr_crypto_exit(void)
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
- chcr_disable_ktls(adap);
-#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
- chcr_disable_ktls(adap);
-#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 2c09672e00a4..67d77abd6775 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -37,6 +37,7 @@
#define __CHCR_CORE_H__
#include <crypto/algapi.h>
+#include <net/tls.h>
#include "t4_hw.h"
#include "cxgb4.h"
#include "t4_msg.h"
@@ -223,10 +224,15 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
#ifdef CONFIG_CHELSIO_TLS_DEVICE
-void chcr_enable_ktls(struct adapter *adap);
-void chcr_disable_ktls(struct adapter *adap);
int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+extern int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn);
+extern void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction);
#endif
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 542bebae001f..b3fdbdc25acb 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -302,6 +302,7 @@ struct chcr_skcipher_req_ctx {
unsigned int op;
u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN];
u16 txqidx;
u16 rxqidx;
};
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 9fd3b9d1ec2f..967babd67a51 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -40,7 +40,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
@@ -294,9 +293,6 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
return false;
}
- /* Inline single pdu */
- if (skb_shinfo(skb)->gso_size)
- return false;
return true;
}
@@ -406,7 +402,7 @@ inline void *copy_esn_pktxt(struct sk_buff *skb,
xo = xfrm_offload(skb);
aadiv->spi = (esphdr->spi);
- seqlo = htonl(esphdr->seq_no);
+ seqlo = ntohl(esphdr->seq_no);
seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
memcpy(aadiv->seq_no, &seqno, 8);
iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
index 43d9e2420110..91dee616d15e 100644
--- a/drivers/crypto/chelsio/chcr_ktls.c
+++ b/drivers/crypto/chelsio/chcr_ktls.c
@@ -221,6 +221,7 @@ static int chcr_ktls_act_open_req(struct sock *sk,
return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
}
+#if IS_ENABLED(CONFIG_IPV6)
/*
* chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
* @sk - tcp socket.
@@ -270,6 +271,7 @@ static int chcr_ktls_act_open_req6(struct sock *sk,
return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
}
+#endif /* #if IS_ENABLED(CONFIG_IPV6) */
/*
* chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
@@ -290,20 +292,26 @@ static int chcr_setup_connection(struct sock *sk,
tx_info->atid = atid;
tx_info->ip_family = sk->sk_family;
- if (sk->sk_family == AF_INET ||
- (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ if (sk->sk_family == AF_INET) {
tx_info->ip_family = AF_INET;
ret = chcr_ktls_act_open_req(sk, tx_info, atid);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
- tx_info->ip_family = AF_INET6;
- ret =
- cxgb4_clip_get(tx_info->netdev,
- (const u32 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8,
- 1);
- if (ret)
- goto out;
- ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
+ if (!sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
+ tx_info->ip_family = AF_INET;
+ ret = chcr_ktls_act_open_req(sk, tx_info, atid);
+ } else {
+ tx_info->ip_family = AF_INET6;
+ ret = cxgb4_clip_get(tx_info->netdev,
+ (const u32 *)
+ &sk->sk_v6_rcv_saddr.s6_addr,
+ 1);
+ if (ret)
+ goto out;
+ ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
+ }
+#endif
}
/* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
@@ -373,9 +381,9 @@ static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
* @tls_cts - tls context.
* @direction - TX/RX crypto direction
*/
-static void chcr_ktls_dev_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction)
+void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
{
struct chcr_ktls_ofld_ctx_tx *tx_ctx =
chcr_get_ktls_tx_context(tls_ctx);
@@ -394,11 +402,13 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
if (tx_info->l2te)
cxgb4_l2t_release(tx_info->l2te);
+#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
cxgb4_clip_release(netdev,
(const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
1);
+#endif
/* clear tid */
if (tx_info->tid != -1) {
@@ -411,6 +421,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
kvfree(tx_info);
tx_ctx->chcr_info = NULL;
+ /* release module refcount */
+ module_put(THIS_MODULE);
}
/*
@@ -422,10 +434,10 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
* @direction - TX/RX crypto direction
* return: SUCCESS/FAILURE.
*/
-static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn)
+int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
@@ -489,12 +501,16 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto out2;
/* get peer ip */
- if (sk->sk_family == AF_INET ||
- (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ if (sk->sk_family == AF_INET) {
memcpy(daaddr, &sk->sk_daddr, 4);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
- memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
+ if (!sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)
+ memcpy(daaddr, &sk->sk_daddr, 4);
+ else
+ memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
+#endif
}
/* get the l2t index */
@@ -528,6 +544,12 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (ret)
goto out2;
+ /* Driver shouldn't be removed until any single connection exists */
+ if (!try_module_get(THIS_MODULE)) {
+ ret = -EINVAL;
+ goto out2;
+ }
+
atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
return 0;
out2:
@@ -537,43 +559,6 @@ out:
return ret;
}
-static const struct tlsdev_ops chcr_ktls_ops = {
- .tls_dev_add = chcr_ktls_dev_add,
- .tls_dev_del = chcr_ktls_dev_del,
-};
-
-/*
- * chcr_enable_ktls: add NETIF_F_HW_TLS_TX flag in all the ports.
- */
-void chcr_enable_ktls(struct adapter *adap)
-{
- struct net_device *netdev;
- int i;
-
- for_each_port(adap, i) {
- netdev = adap->port[i];
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
- netdev->tlsdev_ops = &chcr_ktls_ops;
- }
-}
-
-/*
- * chcr_disable_ktls: remove NETIF_F_HW_TLS_TX flag from all the ports.
- */
-void chcr_disable_ktls(struct adapter *adap)
-{
- struct net_device *netdev;
- int i;
-
- for_each_port(adap, i) {
- netdev = adap->port[i];
- netdev->features &= ~NETIF_F_HW_TLS_TX;
- netdev->hw_features &= ~NETIF_F_HW_TLS_TX;
- netdev->tlsdev_ops = NULL;
- }
-}
-
/*
* chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number
* handling.
@@ -932,7 +917,9 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
u32 ctrl, iplen, maclen;
+#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *ip6;
+#endif
unsigned int ndesc;
struct tcphdr *tcp;
int len16, pktlen;
@@ -987,9 +974,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
+#endif
}
/* now take care of the tcp header, if fin is not set then clear push
* bit as well, and if fin is set, it will be sent at the last so we
diff --git a/drivers/crypto/chelsio/chcr_ktls.h b/drivers/crypto/chelsio/chcr_ktls.h
index 5a7ae2ca446e..5cbd84b1da05 100644
--- a/drivers/crypto/chelsio/chcr_ktls.h
+++ b/drivers/crypto/chelsio/chcr_ktls.h
@@ -89,10 +89,15 @@ static inline int chcr_get_first_rx_qid(struct adapter *adap)
return u_ctx->lldi.rxq_ids[0];
}
-void chcr_enable_ktls(struct adapter *adap);
-void chcr_disable_ktls(struct adapter *adap);
int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn);
+void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction);
#endif /* CONFIG_CHELSIO_TLS_DEVICE */
#endif /* __CHCR_KTLS_H__ */
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index d5720a859443..9a642c79a657 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -18,13 +18,20 @@
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
+#include <net/ip6_route.h>
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/dst.h>
#include <net/tls.h>
+#include <net/addrconf.h>
+#include <net/secure_seq.h>
#include "chtls.h"
#include "chtls_cm.h"
+#include "clip_tbl.h"
/*
* State transitions and actions for close. Note that if we are in SYN_SENT
@@ -82,15 +89,36 @@ static void chtls_sock_release(struct kref *ref)
kfree(csk);
}
-static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev,
+static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
struct sock *sk)
{
struct net_device *ndev = cdev->ports[0];
+ struct net_device *temp;
+ int addr_type;
+
+ switch (sk->sk_family) {
+ case PF_INET:
+ if (likely(!inet_sk(sk)->inet_rcv_saddr))
+ return ndev;
+ ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
+ break;
+ case PF_INET6:
+ addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
+ if (likely(addr_type == IPV6_ADDR_ANY))
+ return ndev;
+
+ for_each_netdev_rcu(&init_net, temp) {
+ if (ipv6_chk_addr(&init_net, (struct in6_addr *)
+ &sk->sk_v6_rcv_saddr, temp, 1)) {
+ ndev = temp;
+ break;
+ }
+ }
+ break;
+ default:
+ return NULL;
+ }
- if (likely(!inet_sk(sk)->inet_rcv_saddr))
- return ndev;
-
- ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
if (!ndev)
return NULL;
@@ -446,7 +474,10 @@ void chtls_destroy_sock(struct sock *sk)
free_tls_keyid(sk);
kref_put(&csk->kref, chtls_sock_release);
csk->cdev = NULL;
- sk->sk_prot = &tcp_prot;
+ if (sk->sk_family == AF_INET)
+ sk->sk_prot = &tcp_prot;
+ else
+ sk->sk_prot = &tcpv6_prot;
sk->sk_prot->destroy(sk);
}
@@ -473,7 +504,8 @@ static void chtls_disconnect_acceptq(struct sock *listen_sk)
while (*pprev) {
struct request_sock *req = *pprev;
- if (req->rsk_ops == &chtls_rsk_ops) {
+ if (req->rsk_ops == &chtls_rsk_ops ||
+ req->rsk_ops == &chtls_rsk_opsv6) {
struct sock *child = req->sk;
*pprev = req->dl_next;
@@ -600,14 +632,13 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
struct listen_ctx *ctx;
struct adapter *adap;
struct port_info *pi;
+ bool clip_valid;
int stid;
int ret;
- if (sk->sk_family != PF_INET)
- return -EAGAIN;
-
+ clip_valid = false;
rcu_read_lock();
- ndev = chtls_ipv4_netdev(cdev, sk);
+ ndev = chtls_find_netdev(cdev, sk);
rcu_read_unlock();
if (!ndev)
return -EBADF;
@@ -638,16 +669,35 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
if (!listen_hash_add(cdev, sk, stid))
goto free_stid;
- ret = cxgb4_create_server(ndev, stid,
- inet_sk(sk)->inet_rcv_saddr,
- inet_sk(sk)->inet_sport, 0,
- cdev->lldi->rxq_ids[0]);
+ if (sk->sk_family == PF_INET) {
+ ret = cxgb4_create_server(ndev, stid,
+ inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_sport, 0,
+ cdev->lldi->rxq_ids[0]);
+ } else {
+ int addr_type;
+
+ addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
+ if (addr_type != IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(ndev, (const u32 *)
+ &sk->sk_v6_rcv_saddr, 1);
+ if (ret)
+ goto del_hash;
+ clip_valid = true;
+ }
+ ret = cxgb4_create_server6(ndev, stid,
+ &sk->sk_v6_rcv_saddr,
+ inet_sk(sk)->inet_sport,
+ cdev->lldi->rxq_ids[0]);
+ }
if (ret > 0)
ret = net_xmit_errno(ret);
if (ret)
goto del_hash;
return 0;
del_hash:
+ if (clip_valid)
+ cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1);
listen_hash_del(cdev, sk);
free_stid:
cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
@@ -661,6 +711,8 @@ free_ctx:
void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
{
struct listen_ctx *listen_ctx;
+ struct chtls_sock *csk;
+ int addr_type = 0;
int stid;
stid = listen_hash_del(cdev, sk);
@@ -671,7 +723,16 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
chtls_reset_synq(listen_ctx);
cxgb4_remove_server(cdev->lldi->ports[0], stid,
- cdev->lldi->rxq_ids[0], 0);
+ cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6);
+
+ if (sk->sk_family == PF_INET6) {
+ csk = rcu_dereference_sk_user_data(sk);
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &sk->sk_v6_rcv_saddr);
+ if (addr_type != IPV6_ADDR_ANY)
+ cxgb4_clip_release(csk->egress_dev, (const u32 *)
+ &sk->sk_v6_rcv_saddr, 1);
+ }
chtls_disconnect_acceptq(sk);
}
@@ -880,7 +941,10 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
tp = tcp_sk(sk);
tcpoptsz = 0;
- iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
+ if (sk->sk_family == AF_INET6)
+ iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+ else
+ iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
if (req->tcpopt.tstamp)
tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
@@ -1045,11 +1109,29 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (!newsk)
goto free_oreq;
- dst = inet_csk_route_child_sock(lsk, newsk, oreq);
- if (!dst)
- goto free_sk;
+ if (lsk->sk_family == AF_INET) {
+ dst = inet_csk_route_child_sock(lsk, newsk, oreq);
+ if (!dst)
+ goto free_sk;
- n = dst_neigh_lookup(dst, &iph->saddr);
+ n = dst_neigh_lookup(dst, &iph->saddr);
+ } else {
+ const struct ipv6hdr *ip6h;
+ struct flowi6 fl6;
+
+ ip6h = (const struct ipv6hdr *)network_hdr;
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_TCP;
+ fl6.saddr = ip6h->daddr;
+ fl6.daddr = ip6h->saddr;
+ fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port;
+ fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num);
+ security_req_classify_flow(oreq, flowi6_to_flowi(&fl6));
+ dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL);
+ if (IS_ERR(dst))
+ goto free_sk;
+ n = dst_neigh_lookup(dst, &ip6h->saddr);
+ }
if (!n)
goto free_sk;
@@ -1072,9 +1154,28 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
tp = tcp_sk(newsk);
newinet = inet_sk(newsk);
- newinet->inet_daddr = iph->saddr;
- newinet->inet_rcv_saddr = iph->daddr;
- newinet->inet_saddr = iph->daddr;
+ if (iph->version == 0x4) {
+ newinet->inet_daddr = iph->saddr;
+ newinet->inet_rcv_saddr = iph->daddr;
+ newinet->inet_saddr = iph->daddr;
+ } else {
+ struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk;
+ struct inet_request_sock *treq = inet_rsk(oreq);
+ struct ipv6_pinfo *newnp = inet6_sk(newsk);
+ struct ipv6_pinfo *np = inet6_sk(lsk);
+
+ inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
+ memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ newsk->sk_v6_daddr = treq->ir_v6_rmt_addr;
+ newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr;
+ inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr;
+ newnp->ipv6_fl_list = NULL;
+ newnp->pktoptions = NULL;
+ newsk->sk_bound_dev_if = treq->ir_iif;
+ newinet->inet_opt = NULL;
+ newinet->inet_daddr = LOOPBACK4_IPV6;
+ newinet->inet_saddr = LOOPBACK4_IPV6;
+ }
oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
sk_setup_caps(newsk, dst);
@@ -1156,6 +1257,7 @@ static void chtls_pass_accept_request(struct sock *sk,
struct sk_buff *reply_skb;
struct chtls_sock *csk;
struct chtls_dev *cdev;
+ struct ipv6hdr *ip6h;
struct tcphdr *tcph;
struct sock *newsk;
struct ethhdr *eh;
@@ -1196,37 +1298,50 @@ static void chtls_pass_accept_request(struct sock *sk,
if (sk_acceptq_is_full(sk))
goto reject;
- oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
- if (!oreq)
- goto reject;
-
- oreq->rsk_rcv_wnd = 0;
- oreq->rsk_window_clamp = 0;
- oreq->cookie_ts = 0;
- oreq->mss = 0;
- oreq->ts_recent = 0;
eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
+ ip6h = (struct ipv6hdr *)(eh + 1);
network_hdr = (void *)(eh + 1);
} else {
vlan_eh = (struct vlan_ethhdr *)(req + 1);
iph = (struct iphdr *)(vlan_eh + 1);
+ ip6h = (struct ipv6hdr *)(vlan_eh + 1);
network_hdr = (void *)(vlan_eh + 1);
}
- if (iph->version != 0x4)
- goto free_oreq;
- tcph = (struct tcphdr *)(iph + 1);
- skb_set_network_header(skb, (void *)iph - (void *)req);
+ if (iph->version == 0x4) {
+ tcph = (struct tcphdr *)(iph + 1);
+ skb_set_network_header(skb, (void *)iph - (void *)req);
+ oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
+ } else {
+ tcph = (struct tcphdr *)(ip6h + 1);
+ skb_set_network_header(skb, (void *)ip6h - (void *)req);
+ oreq = inet_reqsk_alloc(&chtls_rsk_opsv6, sk, false);
+ }
+
+ if (!oreq)
+ goto reject;
+
+ oreq->rsk_rcv_wnd = 0;
+ oreq->rsk_window_clamp = 0;
+ oreq->cookie_ts = 0;
+ oreq->mss = 0;
+ oreq->ts_recent = 0;
tcp_rsk(oreq)->tfo_listener = false;
tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
chtls_set_req_port(oreq, tcph->source, tcph->dest);
- chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
- ip_dsfield = ipv4_get_dsfield(iph);
+ if (iph->version == 0x4) {
+ chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
+ ip_dsfield = ipv4_get_dsfield(iph);
+ } else {
+ inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb));
+ }
if (req->tcpopt.wsf <= 14 &&
sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
inet_rsk(oreq)->wscale_ok = 1;
@@ -1243,7 +1358,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
- goto reject;
+ goto free_oreq;
if (chtls_get_module(newsk))
goto reject;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
index 3fac0c74a41f..47ba81e42f5d 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -79,6 +79,7 @@ enum {
typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb);
extern struct request_sock_ops chtls_rsk_ops;
+extern struct request_sock_ops chtls_rsk_opsv6;
struct deferred_skb_cb {
defer_handler_t handler;
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index dccef3a2908b..e1401d9cc756 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp)
make_tx_data_wr(sk, skb, immdlen, len,
credits_needed, completion);
tp->snd_nxt += len;
- tp->lsndtime = tcp_time_stamp(tp);
+ tp->lsndtime = tcp_jiffies32;
if (completion)
ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
} else {
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 2110d0893bc7..7dfffdde9593 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -13,6 +13,8 @@
#include <linux/net.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
#include <net/tcp.h>
#include <net/tls.h>
@@ -30,8 +32,8 @@ static DEFINE_MUTEX(cdev_mutex);
static DEFINE_MUTEX(notify_mutex);
static RAW_NOTIFIER_HEAD(listen_notify_list);
-static struct proto chtls_cpl_prot;
-struct request_sock_ops chtls_rsk_ops;
+static struct proto chtls_cpl_prot, chtls_cpl_protv6;
+struct request_sock_ops chtls_rsk_ops, chtls_rsk_opsv6;
static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
static void register_listen_notifier(struct notifier_block *nb)
@@ -586,7 +588,10 @@ static struct cxgb4_uld_info chtls_uld_info = {
void chtls_install_cpl_ops(struct sock *sk)
{
- sk->sk_prot = &chtls_cpl_prot;
+ if (sk->sk_family == AF_INET)
+ sk->sk_prot = &chtls_cpl_prot;
+ else
+ sk->sk_prot = &chtls_cpl_protv6;
}
static void __init chtls_init_ulp_ops(void)
@@ -603,6 +608,9 @@ static void __init chtls_init_ulp_ops(void)
chtls_cpl_prot.recvmsg = chtls_recvmsg;
chtls_cpl_prot.setsockopt = chtls_setsockopt;
chtls_cpl_prot.getsockopt = chtls_getsockopt;
+ chtls_cpl_protv6 = chtls_cpl_prot;
+ chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6,
+ &tcpv6_prot, PF_INET6);
}
static int __init chtls_register(void)
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index f09c6cf7823e..9c3b3ca815e6 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -29,6 +29,7 @@ config CRYPTO_DEV_HISI_SEC2
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ACPI
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
@@ -42,6 +43,7 @@ config CRYPTO_DEV_HISI_QM
depends on ARM64 || COMPILE_TEST
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
+ depends on ACPI
help
HiSilicon accelerator engines use a common queue management
interface. Specific engine driver may use this module.
@@ -52,6 +54,7 @@ config CRYPTO_DEV_HISI_ZIP
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
depends on UACCE || UACCE=n
+ depends on ACPI
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon ZIP Driver
@@ -61,6 +64,7 @@ config CRYPTO_DEV_HISI_HPRE
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ACPI
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 03d512ec6336..ed730d173e95 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -25,6 +25,17 @@ enum hpre_ctrl_dbgfs_file {
HPRE_DEBUG_FILE_NUM,
};
+enum hpre_dfx_dbgfs_file {
+ HPRE_SEND_CNT,
+ HPRE_RECV_CNT,
+ HPRE_SEND_FAIL_CNT,
+ HPRE_SEND_BUSY_CNT,
+ HPRE_OVER_THRHLD_CNT,
+ HPRE_OVERTIME_THRHLD,
+ HPRE_INVALID_REQ_CNT,
+ HPRE_DFX_FILE_NUM
+};
+
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
struct hpre_debugfs_file {
@@ -34,6 +45,11 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
+struct hpre_dfx {
+ atomic64_t value;
+ enum hpre_dfx_dbgfs_file type;
+};
+
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
@@ -41,13 +57,13 @@ struct hpre_debugfs_file {
*/
struct hpre_debug {
struct dentry *debug_root;
+ struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
struct hpre {
struct hisi_qm qm;
struct hpre_debug debug;
- u32 num_vfs;
unsigned long status;
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 65425250b2e9..7b5cb27d473d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <linux/module.h>
+#include <linux/time.h>
#include "hpre.h"
struct hpre_ctx;
@@ -32,6 +33,9 @@ struct hpre_ctx;
#define HPRE_SQE_DONE_SHIFT 30
#define HPRE_DH_MAX_P_SZ 512
+#define HPRE_DFX_SEC_TO_US 1000000
+#define HPRE_DFX_US_TO_NS 1000
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -68,6 +72,7 @@ struct hpre_dh_ctx {
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
+ struct hpre *hpre;
spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
@@ -90,6 +95,7 @@ struct hpre_asym_request {
int err;
int req_id;
hpre_cb cb;
+ struct timespec64 req_time;
};
static DEFINE_MUTEX(hpre_alg_lock);
@@ -119,6 +125,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
{
struct hpre_ctx *ctx;
+ struct hpre_dfx *dfx;
int id;
ctx = hpre_req->ctx;
@@ -129,6 +136,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx->req_list[id] = hpre_req;
hpre_req->req_id = id;
+ dfx = ctx->hpre->debug.dfx;
+ if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
+ ktime_get_ts64(&hpre_req->req_time);
+
return id;
}
@@ -309,12 +320,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
{
+ struct hpre *hpre;
+
if (!ctx || !qp || qlen < 0)
return -EINVAL;
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
+ hpre = container_of(ctx->qp->qm, struct hpre, qm);
+ ctx->hpre = hpre;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list)
return -ENOMEM;
@@ -337,38 +352,80 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
ctx->key_sz = 0;
}
+static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
+ u64 overtime_thrhld)
+{
+ struct timespec64 reply_time;
+ u64 time_use_us;
+
+ ktime_get_ts64(&reply_time);
+ time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
+ HPRE_DFX_SEC_TO_US +
+ (reply_time.tv_nsec - req->req_time.tv_nsec) /
+ HPRE_DFX_US_TO_NS;
+
+ if (time_use_us <= overtime_thrhld)
+ return false;
+
+ return true;
+}
+
static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct kpp_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
areq = req->areq.dh;
areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct akcipher_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
areq = req->areq.rsa;
areq->dst_len = ctx->key_sz;
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
akcipher_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
{
struct hpre_ctx *ctx = qp->qp_ctx;
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_sqe *sqe = resp;
+ struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
- ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
+
+ if (unlikely(!req)) {
+ atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
+ return;
+ }
+
+ req->cb(ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx)
@@ -436,6 +493,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
return 0;
}
+static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ int ctr = 0;
+ int ret;
+
+ do {
+ atomic64_inc(&dfx[HPRE_SEND_CNT].value);
+ ret = hisi_qp_send(ctx->qp, msg);
+ if (ret != -EBUSY)
+ break;
+ atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
+ } while (ctr++ < HPRE_TRY_SEND_TIMES);
+
+ if (likely(!ret))
+ return ret;
+
+ if (ret != -EBUSY)
+ atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
+
+ return ret;
+}
+
#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
@@ -444,7 +524,6 @@ static int hpre_dh_compute_value(struct kpp_request *req)
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
ret = hpre_msg_request_set(ctx, req, false);
@@ -465,11 +544,9 @@ static int hpre_dh_compute_value(struct kpp_request *req)
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -647,7 +724,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -677,11 +753,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -699,7 +772,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -736,11 +808,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 88be53bf4a38..a3ee127a70e3 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -59,10 +59,6 @@
#define HPRE_HAC_ECC2_CNT 0x301a08
#define HPRE_HAC_INT_STATUS 0x301800
#define HPRE_HAC_SOURCE_INT 0x301600
-#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
-#define MASTER_TRANS_RETURN_RW 3
-#define HPRE_MASTER_TRANS_RETURN 0x300150
-#define HPRE_MASTER_GLOBAL_CTRL 0x300000
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
@@ -80,7 +76,16 @@
#define HPRE_BD_USR_MASK 0x3
#define HPRE_CLUSTER_CORE_MASK 0xf
+#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
+#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
+#define HPRE_WR_MSI_PORT BIT(2)
+
+#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
+#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
+
#define HPRE_VIA_MSI_DSM 1
+#define HPRE_SQE_MASK_OFFSET 8
+#define HPRE_SQE_MASK_LEN 24
static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
@@ -131,7 +136,7 @@ static const u64 hpre_cluster_offsets[] = {
HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
};
-static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
+static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
{"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
{"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
{"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
@@ -139,7 +144,7 @@ static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
{"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
};
-static struct debugfs_reg32 hpre_com_dfx_regs[] = {
+static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
{"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
{"AXQOS ", HPRE_VFG_AXQOS},
{"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
@@ -156,44 +161,38 @@ static struct debugfs_reg32 hpre_com_dfx_regs[] = {
{"INT_STATUS ", HPRE_INT_STATUS},
};
-static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
-{
- struct pci_dev *pdev;
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
- if (!pdev) {
- q_num = HPRE_QUEUE_NUM_V2;
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- if (rev_id != QM_HW_V2)
- return -EINVAL;
-
- q_num = HPRE_QUEUE_NUM_V2;
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n == 0 || n > q_num)
- return -EINVAL;
+static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
+ "send_cnt",
+ "recv_cnt",
+ "send_fail_cnt",
+ "send_busy_cnt",
+ "over_thrhld_cnt",
+ "overtime_thrhld",
+ "invalid_req_cnt"
+};
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
- .set = hpre_pf_q_num_set,
+ .set = pf_q_num_set,
.get = param_get_int,
};
-static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
-module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
-MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
+static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
+
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
+
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
struct hisi_qp *hpre_create_qp(void)
{
@@ -232,9 +231,8 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0;
}
-static int hpre_set_user_domain_and_cache(struct hpre *hpre)
+static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
unsigned long offset;
int ret, i;
@@ -324,17 +322,34 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
+ u32 val;
+
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+
+ /* disable HPRE block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
+ u32 val;
+
+ /* clear HPRE hw error source if having */
+ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+
/* enable hpre hw error interrupts */
writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+
+ /* enable HPRE block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -354,9 +369,7 @@ static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
- struct hpre_debug *debug = file->debug;
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- u32 num_vfs = hpre->num_vfs;
+ u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
@@ -523,6 +536,33 @@ static const struct file_operations hpre_ctrl_debug_fops = {
.write = hpre_ctrl_debug_write,
};
+static int hpre_debugfs_atomic64_get(void *data, u64 *val)
+{
+ struct hpre_dfx *dfx_item = data;
+
+ *val = atomic64_read(&dfx_item->value);
+
+ return 0;
+}
+
+static int hpre_debugfs_atomic64_set(void *data, u64 val)
+{
+ struct hpre_dfx *dfx_item = data;
+ struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+
+ if (val)
+ return -EINVAL;
+
+ if (dfx_item->type == HPRE_OVERTIME_THRHLD)
+ atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
+ atomic64_set(&dfx_item->value, val);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
+ hpre_debugfs_atomic64_set, "%llu\n");
+
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
@@ -620,6 +660,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
+static void hpre_dfx_debug_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hpre_dfx *dfx = hpre->debug.dfx;
+ struct hisi_qm *qm = &hpre->qm;
+ struct dentry *parent;
+ int i;
+
+ parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
+ for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
+ dfx[i].type = i;
+ debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
+ &hpre_atomic64_ops);
+ }
+}
+
static int hpre_debugfs_init(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
@@ -629,6 +685,8 @@ static int hpre_debugfs_init(struct hpre *hpre)
dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
qm->debug.debug_root = dir;
+ qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
@@ -640,6 +698,9 @@ static int hpre_debugfs_init(struct hpre *hpre)
if (ret)
goto failed_to_create;
}
+
+ hpre_dfx_debug_init(&hpre->debug);
+
return 0;
failed_to_create:
@@ -654,32 +715,27 @@ static void hpre_debugfs_exit(struct hpre *hpre)
debugfs_remove_recursive(qm->debug.debug_root);
}
-static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id < 0)
- return -ENODEV;
-
- if (rev_id == QM_HW_V1) {
+ if (pdev->revision == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
qm->pdev = pdev;
- qm->ver = rev_id;
+ qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
+
qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
- QM_HW_PF : QM_HW_VF;
- if (pdev->is_physfn) {
+ QM_HW_PF : QM_HW_VF;
+ if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HPRE_PF_DEF_Q_BASE;
- qm->qp_num = hpre_pf_q_num;
+ qm->qp_num = pf_q_num;
+ qm->qm_list = &hpre_devices;
}
- qm->use_dma_api = true;
- return 0;
+ return hisi_qm_init(qm);
}
static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
@@ -693,8 +749,6 @@ static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
err->msg, err->int_msk);
err++;
}
-
- writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
}
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
@@ -702,16 +756,38 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + HPRE_HAC_INT_STATUS);
}
+static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+}
+
+static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 value;
+
+ value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+}
+
static const struct hisi_qm_err_ini hpre_err_ini = {
+ .hw_init = hpre_set_user_domain_and_cache,
.hw_err_enable = hpre_hw_error_enable,
.hw_err_disable = hpre_hw_error_disable,
.get_dev_hw_err_status = hpre_get_hw_err_status,
+ .clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
+ .open_axi_master_ooo = hpre_open_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
.fe = 0,
- .msi = QM_DB_RANDOM_INVALID,
+ .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+ HPRE_OOO_ECC_2BIT_ERR,
+ .msi_wr_port = HPRE_WR_MSI_PORT,
+ .acpi_rst = "HRST",
}
};
@@ -722,7 +798,7 @@ static int hpre_pf_probe_init(struct hpre *hpre)
qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
- ret = hpre_set_user_domain_and_cache(hpre);
+ ret = hpre_set_user_domain_and_cache(qm);
if (ret)
return ret;
@@ -732,6 +808,20 @@ static int hpre_pf_probe_init(struct hpre *hpre)
return 0;
}
+static int hpre_probe_init(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ int ret;
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = hpre_pf_probe_init(hpre);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_qm *qm;
@@ -742,26 +832,17 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hpre)
return -ENOMEM;
- pci_set_drvdata(pdev, hpre);
-
qm = &hpre->qm;
- ret = hpre_qm_pre_init(qm, pdev);
- if (ret)
- return ret;
-
- ret = hisi_qm_init(qm);
- if (ret)
+ ret = hpre_qm_init(qm, pdev);
+ if (ret) {
+ pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
return ret;
+ }
- if (pdev->is_physfn) {
- ret = hpre_pf_probe_init(hpre);
- if (ret)
- goto err_with_qm_init;
- } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_with_qm_init;
+ ret = hpre_probe_init(hpre);
+ if (ret) {
+ pci_err(pdev, "Failed to probe (%d)!\n", ret);
+ goto err_with_qm_init;
}
ret = hisi_qm_start(qm);
@@ -779,8 +860,18 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
}
+
+ if (qm->fun_type == QM_HW_PF && vfs_num) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_with_crypto_register;
+ }
+
return 0;
+err_with_crypto_register:
+ hpre_algs_unregister();
+
err_with_qm_start:
hisi_qm_del_from_list(qm, &hpre_devices);
hisi_qm_stop(qm);
@@ -794,107 +885,6 @@ err_with_qm_init:
return ret;
}
-static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 qp_num = qm->qp_num;
- int q_num, remain_q_num, i;
- u32 q_base = qp_num;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_qp_num - qp_num;
-
- /* If remaining queues are not enough, return error. */
- if (remain_q_num < num_vfs)
- return -EINVAL;
-
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
- if (ret)
- return ret;
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int hpre_clear_vft_config(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 num_vfs = hpre->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- hpre->num_vfs = 0;
-
- return 0;
-}
-
-static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
- if (pre_existing_vfs) {
- pci_err(pdev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
- ret = hpre_vf_q_assign(hpre, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hpre->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't enable VF!\n");
- hpre_clear_vft_config(hpre);
- return ret;
- }
-
- return num_vfs;
-}
-
-static int hpre_sriov_disable(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* remove in hpre_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return hpre_clear_vft_config(hpre);
-}
-
-static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- if (num_vfs)
- return hpre_sriov_enable(pdev, num_vfs);
- else
- return hpre_sriov_disable(pdev);
-}
-
static void hpre_remove(struct pci_dev *pdev)
{
struct hpre *hpre = pci_get_drvdata(pdev);
@@ -903,8 +893,8 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_algs_unregister();
hisi_qm_del_from_list(qm, &hpre_devices);
- if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
- ret = hpre_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
+ ret = hisi_qm_sriov_disable(pdev);
if (ret) {
pci_err(pdev, "Disable SRIOV fail!\n");
return;
@@ -924,6 +914,9 @@ static void hpre_remove(struct pci_dev *pdev)
static const struct pci_error_handlers hpre_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hpre_pci_driver = {
@@ -931,7 +924,7 @@ static struct pci_driver hpre_pci_driver = {
.id_table = hpre_dev_ids,
.probe = hpre_probe,
.remove = hpre_remove,
- .sriov_configure = hpre_sriov_configure,
+ .sriov_configure = hisi_qm_sriov_configure,
.err_handler = &hpre_err_handler,
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f795fb557630..9bb263cec6c3 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <asm/page.h>
+#include <linux/acpi.h>
+#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/irqreturn.h>
#include <linux/log2.h>
@@ -53,6 +56,7 @@
#define QM_SQ_TYPE_SHIFT 8
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
+#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
/* cqc shift */
#define QM_CQ_HOP_NUM_SHIFT 0
@@ -64,6 +68,7 @@
#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
#define QM_QC_CQE_SIZE 4
+#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
/* eqc shift */
#define QM_EQE_AEQE_SIZE (2UL << 12)
@@ -122,9 +127,11 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
+#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
#define QM_ABNORMAL_INT_STATUS 0x100008
+#define QM_ABNORMAL_INT_SET 0x10000c
#define QM_ABNORMAL_INF00 0x100010
#define QM_FIFO_OVERFLOW_TYPE 0xc0
#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
@@ -140,6 +147,27 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_RAS_MSI_INT_SEL 0x1040f4
+#define QM_DEV_RESET_FLAG 0
+#define QM_RESET_WAIT_TIMEOUT 400
+#define QM_PEH_VENDOR_ID 0x1000d8
+#define ACC_VENDOR_ID_VALUE 0x5a5a
+#define QM_PEH_DFX_INFO0 0x1000fc
+#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
+#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
+#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
+#define ACC_MASTER_TRANS_RETURN_RW 3
+#define ACC_MASTER_TRANS_RETURN 0x300150
+#define ACC_MASTER_GLOBAL_CTRL 0x300000
+#define ACC_AM_CFG_PORT_WR_EN 0x30001c
+#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
+#define ACC_AM_ROB_ECC_INT_STS 0x300104
+#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
+
+#define POLL_PERIOD 10
+#define POLL_TIMEOUT 1000
+#define WAIT_PERIOD_US_MAX 200
+#define WAIT_PERIOD_US_MIN 100
+#define MAX_WAIT_COUNTS 1000
#define QM_CACHE_WB_START 0x204
#define QM_CACHE_WB_DONE 0x208
@@ -147,7 +175,12 @@
#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
#define QMC_ALIGN(sz) ALIGN(sz, 32)
+#define QM_DBG_READ_LEN 256
+#define QM_DBG_WRITE_LEN 1024
#define QM_DBG_TMP_BUF_LEN 22
+#define QM_PCI_COMMAND_INVALID ~0
+
+#define QM_SQE_ADDR_MASK GENMASK(7, 0)
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
@@ -190,6 +223,12 @@ enum vft_type {
CQC_VFT,
};
+enum acc_err_result {
+ ACC_ERR_NONE,
+ ACC_ERR_NEED_RESET,
+ ACC_ERR_RECOVERED,
+};
+
struct qm_cqe {
__le32 rsvd0;
__le16 cmd_id;
@@ -284,10 +323,22 @@ struct hisi_qm_hw_ops {
u8 cmd, u16 index, u8 priority);
u32 (*get_irq_num)(struct hisi_qm *qm);
int (*debug_init)(struct hisi_qm *qm);
- void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi);
+ void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
void (*hw_error_uninit)(struct hisi_qm *qm);
- pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm);
+ enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
+};
+
+struct qm_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
+static struct qm_dfx_item qm_dfx_files[] = {
+ {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
+ {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
+ {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
+ {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
+ {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
};
static const char * const qm_debug_file_name[] = {
@@ -325,6 +376,93 @@ static const char * const qm_fifo_overflow[] = {
"cq", "eq", "aeq",
};
+static const char * const qm_s[] = {
+ "init", "start", "close", "stop",
+};
+
+static const char * const qp_s[] = {
+ "none", "init", "start", "stop", "close",
+};
+
+static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
+{
+ enum qm_state curr = atomic_read(&qm->status.flags);
+ bool avail = false;
+
+ switch (curr) {
+ case QM_INIT:
+ if (new == QM_START || new == QM_CLOSE)
+ avail = true;
+ break;
+ case QM_START:
+ if (new == QM_STOP)
+ avail = true;
+ break;
+ case QM_STOP:
+ if (new == QM_CLOSE || new == QM_START)
+ avail = true;
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
+ qm_s[curr], qm_s[new]);
+
+ if (!avail)
+ dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
+ qm_s[curr], qm_s[new]);
+
+ return avail;
+}
+
+static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
+ enum qp_state new)
+{
+ enum qm_state qm_curr = atomic_read(&qm->status.flags);
+ enum qp_state qp_curr = 0;
+ bool avail = false;
+
+ if (qp)
+ qp_curr = atomic_read(&qp->qp_status.flags);
+
+ switch (new) {
+ case QP_INIT:
+ if (qm_curr == QM_START || qm_curr == QM_INIT)
+ avail = true;
+ break;
+ case QP_START:
+ if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
+ (qm_curr == QM_START && qp_curr == QP_STOP))
+ avail = true;
+ break;
+ case QP_STOP:
+ if ((qm_curr == QM_START && qp_curr == QP_START) ||
+ (qp_curr == QP_INIT))
+ avail = true;
+ break;
+ case QP_CLOSE:
+ if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
+ (qm_curr == QM_START && qp_curr == QP_STOP) ||
+ (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
+ (qm_curr == QM_STOP && qp_curr == QP_INIT))
+ avail = true;
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
+ qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
+
+ if (!avail)
+ dev_warn(&qm->pdev->dev,
+ "Can not change qp state from %s to %s in QM %s\n",
+ qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
+
+ return avail;
+}
+
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
static int qm_wait_mb_ready(struct hisi_qm *qm)
{
@@ -393,6 +531,8 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
busy_unlock:
mutex_unlock(&qm->mailbox_lock);
+ if (ret)
+ atomic64_inc(&qm->debug.dfx.mb_err_cnt);
return ret;
}
@@ -460,7 +600,7 @@ static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
- return qm->qp_array[cqn];
+ return &qm->qp_array[cqn];
}
static void qm_cq_head_update(struct hisi_qp *qp)
@@ -510,8 +650,7 @@ static void qm_work_process(struct work_struct *work)
while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
eqe_num++;
qp = qm_to_hisi_qp(qm, eqe);
- if (qp)
- qm_poll_qp(qp, qm);
+ qm_poll_qp(qp, qm);
if (qm->status.eq_head == QM_Q_DEPTH - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
@@ -551,6 +690,7 @@ static irqreturn_t qm_irq(int irq, void *data)
if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
return do_qm_irq(irq, data);
+ atomic64_inc(&qm->debug.dfx.err_irq_cnt);
dev_err(&qm->pdev->dev, "invalid int source\n");
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
@@ -563,6 +703,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
u32 type;
+ atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
return IRQ_NONE;
@@ -590,79 +731,20 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t qm_abnormal_irq(int irq, void *data)
-{
- const struct hisi_qm_hw_error *err = qm_hw_error;
- struct hisi_qm *qm = data;
- struct device *dev = &qm->pdev->dev;
- u32 error_status, tmp;
-
- /* read err sts */
- tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
- error_status = qm->msi_mask & tmp;
-
- while (err->msg) {
- if (err->int_msk & error_status)
- dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
-
- err++;
- }
-
- /* clear err sts */
- writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
-
- return IRQ_HANDLED;
-}
-
-static int qm_irq_register(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, IRQF_SHARED, qm->dev_name, qm);
- if (ret)
- return ret;
-
- if (qm->ver == QM_HW_V2) {
- ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
- if (ret)
- goto err_aeq_irq;
-
- if (qm->fun_type == QM_HW_PF) {
- ret = request_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, IRQF_SHARED,
- qm->dev_name, qm);
- if (ret)
- goto err_abonormal_irq;
- }
- }
-
- return 0;
-
-err_abonormal_irq:
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-err_aeq_irq:
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- return ret;
-}
-
static void qm_irq_unregister(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- if (qm->ver == QM_HW_V2) {
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
+ if (qm->ver == QM_HW_V1)
+ return;
+
+ free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
- }
+ if (qm->fun_type == QM_HW_PF)
+ free_irq(pci_irq_vector(pdev,
+ QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
}
static void qm_init_qp_status(struct hisi_qp *qp)
@@ -672,7 +754,7 @@ static void qm_init_qp_status(struct hisi_qp *qp)
qp_status->sq_tail = 0;
qp_status->cq_head = 0;
qp_status->cqc_phase = true;
- qp_status->flags = 0;
+ atomic_set(&qp_status->flags, 0);
}
static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
@@ -683,36 +765,26 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
if (number > 0) {
switch (type) {
case SQC_VFT:
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1) {
tmp = QM_SQC_VFT_BUF_SIZE |
QM_SQC_VFT_SQC_SIZE |
QM_SQC_VFT_INDEX_NUMBER |
QM_SQC_VFT_VALID |
(u64)base << QM_SQC_VFT_START_SQN_SHIFT;
- break;
- case QM_HW_V2:
+ } else {
tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
QM_SQC_VFT_VALID |
(u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
- break;
- case QM_HW_UNKNOWN:
- break;
}
break;
case CQC_VFT:
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1) {
tmp = QM_CQC_VFT_BUF_SIZE |
QM_CQC_VFT_SQC_SIZE |
QM_CQC_VFT_INDEX_NUMBER |
QM_CQC_VFT_VALID;
- break;
- case QM_HW_V2:
+ } else {
tmp = QM_CQC_VFT_VALID;
- break;
- case QM_HW_UNKNOWN:
- break;
}
break;
}
@@ -986,6 +1058,473 @@ static const struct file_operations qm_regs_fops = {
.release = single_release,
};
+static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ char buf[QM_DBG_READ_LEN];
+ int len;
+
+ if (*pos)
+ return 0;
+
+ if (count < QM_DBG_READ_LEN)
+ return -ENOSPC;
+
+ len = snprintf(buf, QM_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
+
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+
+ return (*pos = len);
+}
+
+static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
+ dma_addr_t *dma_addr)
+{
+ struct device *dev = &qm->pdev->dev;
+ void *ctx_addr;
+
+ ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
+ if (!ctx_addr)
+ return ERR_PTR(-ENOMEM);
+
+ *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, *dma_addr)) {
+ dev_err(dev, "DMA mapping error!\n");
+ kfree(ctx_addr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return ctx_addr;
+}
+
+static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
+ const void *ctx_addr, dma_addr_t *dma_addr)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
+ kfree(ctx_addr);
+}
+
+static int dump_show(struct hisi_qm *qm, void *info,
+ unsigned int info_size, char *info_name)
+{
+ struct device *dev = &qm->pdev->dev;
+ u8 *info_buf, *info_curr = info;
+ u32 i;
+#define BYTE_PER_DW 4
+
+ info_buf = kzalloc(info_size, GFP_KERNEL);
+ if (!info_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < info_size; i++, info_curr++) {
+ if (i % BYTE_PER_DW == 0)
+ info_buf[i + 3UL] = *info_curr;
+ else if (i % BYTE_PER_DW == 1)
+ info_buf[i + 1UL] = *info_curr;
+ else if (i % BYTE_PER_DW == 2)
+ info_buf[i - 1] = *info_curr;
+ else if (i % BYTE_PER_DW == 3)
+ info_buf[i - 3] = *info_curr;
+ }
+
+ dev_info(dev, "%s DUMP\n", info_name);
+ for (i = 0; i < info_size; i += BYTE_PER_DW) {
+ pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
+ info_buf[i], info_buf[i + 1UL],
+ info_buf[i + 2UL], info_buf[i + 3UL]);
+ }
+
+ kfree(info_buf);
+
+ return 0;
+}
+
+static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+{
+ return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
+}
+
+static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+{
+ return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
+}
+
+static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_sqc *sqc, *sqc_curr;
+ dma_addr_t sqc_dma;
+ u32 qp_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &qp_id);
+ if (ret || qp_id >= qm->qp_num) {
+ dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ return -EINVAL;
+ }
+
+ sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
+ if (IS_ERR(sqc))
+ return PTR_ERR(sqc);
+
+ ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
+ if (ret) {
+ down_read(&qm->qps_lock);
+ if (qm->sqc) {
+ sqc_curr = qm->sqc + qp_id;
+
+ ret = dump_show(qm, sqc_curr, sizeof(*sqc),
+ "SOFT SQC");
+ if (ret)
+ dev_info(dev, "Show soft sqc failed!\n");
+ }
+ up_read(&qm->qps_lock);
+
+ goto err_free_ctx;
+ }
+
+ ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
+ if (ret)
+ dev_info(dev, "Show hw sqc failed!\n");
+
+err_free_ctx:
+ qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
+ return ret;
+}
+
+static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_cqc *cqc, *cqc_curr;
+ dma_addr_t cqc_dma;
+ u32 qp_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &qp_id);
+ if (ret || qp_id >= qm->qp_num) {
+ dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ return -EINVAL;
+ }
+
+ cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
+ if (IS_ERR(cqc))
+ return PTR_ERR(cqc);
+
+ ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
+ if (ret) {
+ down_read(&qm->qps_lock);
+ if (qm->cqc) {
+ cqc_curr = qm->cqc + qp_id;
+
+ ret = dump_show(qm, cqc_curr, sizeof(*cqc),
+ "SOFT CQC");
+ if (ret)
+ dev_info(dev, "Show soft cqc failed!\n");
+ }
+ up_read(&qm->qps_lock);
+
+ goto err_free_ctx;
+ }
+
+ ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
+ if (ret)
+ dev_info(dev, "Show hw cqc failed!\n");
+
+err_free_ctx:
+ qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
+ return ret;
+}
+
+static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
+ int cmd, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ dma_addr_t xeqc_dma;
+ void *xeqc;
+ int ret;
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
+ if (IS_ERR(xeqc))
+ return PTR_ERR(xeqc);
+
+ ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
+ if (ret)
+ goto err_free_ctx;
+
+ ret = dump_show(qm, xeqc, size, name);
+ if (ret)
+ dev_info(dev, "Show hw %s failed!\n", name);
+
+err_free_ctx:
+ qm_ctx_free(qm, size, xeqc, &xeqc_dma);
+ return ret;
+}
+
+static int q_dump_param_parse(struct hisi_qm *qm, char *s,
+ u32 *e_id, u32 *q_id)
+{
+ struct device *dev = &qm->pdev->dev;
+ unsigned int qp_num = qm->qp_num;
+ char *presult;
+ int ret;
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ dev_err(dev, "Please input qp number!\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(presult, 0, q_id);
+ if (ret || *q_id >= qp_num) {
+ dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
+ return -EINVAL;
+ }
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ dev_err(dev, "Please input sqe number!\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(presult, 0, e_id);
+ if (ret || *e_id >= QM_Q_DEPTH) {
+ dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
+ return -EINVAL;
+ }
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_sq_dump(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ void *sqe, *sqe_curr;
+ struct hisi_qp *qp;
+ u32 qp_id, sqe_id;
+ int ret;
+
+ ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
+ if (ret)
+ return ret;
+
+ sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
+ if (!sqe)
+ return -ENOMEM;
+
+ qp = &qm->qp_array[qp_id];
+ memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
+ sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
+ memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
+ qm->debug.sqe_mask_len);
+
+ ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
+ if (ret)
+ dev_info(dev, "Show sqe failed!\n");
+
+ kfree(sqe);
+
+ return ret;
+}
+
+static int qm_cq_dump(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_cqe *cqe_curr;
+ struct hisi_qp *qp;
+ u32 qp_id, cqe_id;
+ int ret;
+
+ ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
+ if (ret)
+ return ret;
+
+ qp = &qm->qp_array[qp_id];
+ cqe_curr = qp->cqe + cqe_id;
+ ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
+ if (ret)
+ dev_info(dev, "Show cqe failed!\n");
+
+ return ret;
+}
+
+static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
+ size_t size, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ void *xeqe;
+ u32 xeqe_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &xeqe_id);
+ if (ret || xeqe_id >= QM_Q_DEPTH) {
+ dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
+ return -EINVAL;
+ }
+
+ down_read(&qm->qps_lock);
+
+ if (qm->eqe && !strcmp(name, "EQE")) {
+ xeqe = qm->eqe + xeqe_id;
+ } else if (qm->aeqe && !strcmp(name, "AEQE")) {
+ xeqe = qm->aeqe + xeqe_id;
+ } else {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = dump_show(qm, xeqe, size, name);
+ if (ret)
+ dev_info(dev, "Show %s failed!\n", name);
+
+err_unlock:
+ up_read(&qm->qps_lock);
+ return ret;
+}
+
+static int qm_dbg_help(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "available commands:\n");
+ dev_info(dev, "sqc <num>\n");
+ dev_info(dev, "cqc <num>\n");
+ dev_info(dev, "eqc\n");
+ dev_info(dev, "aeqc\n");
+ dev_info(dev, "sq <num> <e>\n");
+ dev_info(dev, "cq <num> <e>\n");
+ dev_info(dev, "eq <e>\n");
+ dev_info(dev, "aeq <e>\n");
+
+ return 0;
+}
+
+static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *presult, *s;
+ int ret;
+
+ s = kstrdup(cmd_buf, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ kfree(s);
+ return -EINVAL;
+ }
+
+ if (!strcmp(presult, "sqc"))
+ ret = qm_sqc_dump(qm, s);
+ else if (!strcmp(presult, "cqc"))
+ ret = qm_cqc_dump(qm, s);
+ else if (!strcmp(presult, "eqc"))
+ ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
+ QM_MB_CMD_EQC, "EQC");
+ else if (!strcmp(presult, "aeqc"))
+ ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
+ QM_MB_CMD_AEQC, "AEQC");
+ else if (!strcmp(presult, "sq"))
+ ret = qm_sq_dump(qm, s);
+ else if (!strcmp(presult, "cq"))
+ ret = qm_cq_dump(qm, s);
+ else if (!strcmp(presult, "eq"))
+ ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
+ else if (!strcmp(presult, "aeq"))
+ ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
+ else if (!strcmp(presult, "help"))
+ ret = qm_dbg_help(qm, s);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ dev_info(dev, "Please echo help\n");
+
+ kfree(s);
+
+ return ret;
+}
+
+static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp;
+ int ret;
+
+ if (*pos)
+ return 0;
+
+ /* Judge if the instance is being reset. */
+ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
+ return 0;
+
+ if (count > QM_DBG_WRITE_LEN)
+ return -ENOSPC;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOMEM;
+
+ if (copy_from_user(cmd_buf, buffer, count)) {
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ ret = qm_cmd_write_dump(qm, cmd_buf);
+ if (ret) {
+ kfree(cmd_buf);
+ return ret;
+ }
+
+ kfree(cmd_buf);
+
+ return count;
+}
+
+static const struct file_operations qm_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_cmd_read,
+ .write = qm_cmd_write,
+};
+
static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
{
struct dentry *qm_d = qm->debug.qm_d;
@@ -1001,20 +1540,21 @@ static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
return 0;
}
-static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
{
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
{
- u32 irq_enable = ce | nfe | fe | msi;
+ u32 irq_enable = ce | nfe | fe;
u32 irq_unmask = ~irq_enable;
qm->error_mask = ce | nfe | fe;
- qm->msi_mask = msi;
+
+ /* clear QM hw residual error source */
+ writel(QM_ABNORMAL_INT_SOURCE_CLR,
+ qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
@@ -1022,9 +1562,6 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
- /* use RAS irq default, so only set QM_RAS_MSI_INT_SEL for MSI */
- writel(msi, qm->io_base + QM_RAS_MSI_INT_SEL);
-
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
@@ -1071,7 +1608,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
}
}
-static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
+static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
u32 error_status, tmp;
@@ -1080,15 +1617,20 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
error_status = qm->error_mask & tmp;
if (error_status) {
- qm_log_hw_error(qm, error_status);
+ if (error_status & QM_ECC_MBIT)
+ qm->err_status.is_qm_ecc_mbit = true;
- /* clear err sts */
- writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ qm_log_hw_error(qm, error_status);
+ if (error_status == QM_DB_RANDOM_INVALID) {
+ writel(error_status, qm->io_base +
+ QM_ABNORMAL_INT_SOURCE);
+ return ACC_ERR_RECOVERED;
+ }
- return PCI_ERS_RESULT_NEED_RESET;
+ return ACC_ERR_NEED_RESET;
}
- return PCI_ERS_RESULT_RECOVERED;
+ return ACC_ERR_RECOVERED;
}
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
@@ -1117,68 +1659,61 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
return qp->sqe + sq_tail * qp->qm->sqe_size;
}
-/**
- * hisi_qm_create_qp() - Create a queue pair from qm.
- * @qm: The qm we create a qp from.
- * @alg_type: Accelerator specific algorithm type in sqc.
- *
- * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
- * qp memory fails.
- */
-struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
+static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
{
struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp;
- int qp_id, ret;
-
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ int qp_id;
- write_lock(&qm->qps_lock);
+ if (!qm_qp_avail_state(qm, NULL, QP_INIT))
+ return ERR_PTR(-EPERM);
- qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num);
- if (qp_id >= qm->qp_num) {
- write_unlock(&qm->qps_lock);
- dev_info(&qm->pdev->dev, "QM all queues are busy!\n");
- ret = -EBUSY;
- goto err_free_qp;
+ if (qm->qp_in_used == qm->qp_num) {
+ dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
+ qm->qp_num);
+ atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
+ return ERR_PTR(-EBUSY);
}
- set_bit(qp_id, qm->qp_bitmap);
- qm->qp_array[qp_id] = qp;
- qm->qp_in_used++;
- write_unlock(&qm->qps_lock);
-
- qp->qm = qm;
+ qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
+ if (qp_id < 0) {
+ dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
+ qm->qp_num);
+ atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
+ return ERR_PTR(-EBUSY);
+ }
- if (qm->use_dma_api) {
- qp->qdma.size = qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH;
- qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size,
- &qp->qdma.dma, GFP_KERNEL);
- if (!qp->qdma.va) {
- ret = -ENOMEM;
- goto err_clear_bit;
- }
+ qp = &qm->qp_array[qp_id];
- dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n",
- qp->qdma.va, &qp->qdma.dma, qp->qdma.size);
- }
+ memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ qp->event_cb = NULL;
+ qp->req_cb = NULL;
qp->qp_id = qp_id;
qp->alg_type = alg_type;
+ qm->qp_in_used++;
+ atomic_set(&qp->qp_status.flags, QP_INIT);
return qp;
+}
+
+/**
+ * hisi_qm_create_qp() - Create a queue pair from qm.
+ * @qm: The qm we create a qp from.
+ * @alg_type: Accelerator specific algorithm type in sqc.
+ *
+ * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
+ * qp memory fails.
+ */
+struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
+{
+ struct hisi_qp *qp;
-err_clear_bit:
- write_lock(&qm->qps_lock);
- qm->qp_array[qp_id] = NULL;
- clear_bit(qp_id, qm->qp_bitmap);
- write_unlock(&qm->qps_lock);
-err_free_qp:
- kfree(qp);
- return ERR_PTR(ret);
+ down_write(&qm->qps_lock);
+ qp = qm_create_qp_nolock(qm, alg_type);
+ up_write(&qm->qps_lock);
+
+ return qp;
}
EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
@@ -1191,19 +1726,18 @@ EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
void hisi_qm_release_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct qm_dma *qdma = &qp->qdma;
- struct device *dev = &qm->pdev->dev;
- if (qm->use_dma_api && qdma->va)
- dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
+ down_write(&qm->qps_lock);
+
+ if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
+ up_write(&qm->qps_lock);
+ return;
+ }
- write_lock(&qm->qps_lock);
- qm->qp_array[qp->qp_id] = NULL;
- clear_bit(qp->qp_id, qm->qp_bitmap);
qm->qp_in_used--;
- write_unlock(&qm->qps_lock);
+ idr_remove(&qm->qp_idr, qp->qp_id);
- kfree(qp);
+ up_write(&qm->qps_lock);
}
EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
@@ -1234,7 +1768,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
if (ver == QM_HW_V1) {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
- } else if (ver == QM_HW_V2) {
+ } else {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
sqc->w8 = 0; /* rand_qc */
}
@@ -1261,7 +1795,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
if (ver == QM_HW_V1) {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
- } else if (ver == QM_HW_V2) {
+ } else {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
cqc->w8 = 0;
}
@@ -1274,6 +1808,27 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
return ret;
}
+static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
+{
+ struct hisi_qm *qm = qp->qm;
+ struct device *dev = &qm->pdev->dev;
+ int qp_id = qp->qp_id;
+ int pasid = arg;
+ int ret;
+
+ if (!qm_qp_avail_state(qm, qp, QP_START))
+ return -EPERM;
+
+ ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
+ if (ret)
+ return ret;
+
+ atomic_set(&qp->qp_status.flags, QP_START);
+ dev_dbg(dev, "queue %d started\n", qp_id);
+
+ return 0;
+}
+
/**
* hisi_qm_start_qp() - Start a qp into running.
* @qp: The qp we want to start to run.
@@ -1285,48 +1840,112 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{
struct hisi_qm *qm = qp->qm;
- struct device *dev = &qm->pdev->dev;
- enum qm_hw_ver ver = qm->ver;
- int qp_id = qp->qp_id;
- int pasid = arg;
- size_t off = 0;
int ret;
-#define QP_INIT_BUF(qp, type, size) do { \
- (qp)->type = ((qp)->qdma.va + (off)); \
- (qp)->type##_dma = (qp)->qdma.dma + (off); \
- off += (size); \
-} while (0)
+ down_write(&qm->qps_lock);
+ ret = qm_start_qp_nolock(qp, arg);
+ up_write(&qm->qps_lock);
- if (!qp->qdma.dma) {
- dev_err(dev, "cannot get qm dma buffer\n");
- return -EINVAL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
+
+/**
+ * Determine whether the queue is cleared by judging the tail pointers of
+ * sq and cq.
+ */
+static int qm_drain_qp(struct hisi_qp *qp)
+{
+ size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
+ struct hisi_qm *qm = qp->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ dma_addr_t dma_addr;
+ int ret = 0, i = 0;
+ void *addr;
+
+ /*
+ * No need to judge if ECC multi-bit error occurs because the
+ * master OOO will be blocked.
+ */
+ if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
+ return 0;
+
+ addr = qm_ctx_alloc(qm, size, &dma_addr);
+ if (IS_ERR(addr)) {
+ dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
+ return -ENOMEM;
}
- /* sq need 128 bytes alignment */
- if (qp->qdma.dma & QM_SQE_DATA_ALIGN_MASK) {
- dev_err(dev, "qm sq is not aligned to 128 byte\n");
- return -EINVAL;
+ while (++i) {
+ ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to dump sqc!\n");
+ break;
+ }
+ sqc = addr;
+
+ ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
+ qp->qp_id);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to dump cqc!\n");
+ break;
+ }
+ cqc = addr + sizeof(struct qm_sqc);
+
+ if ((sqc->tail == cqc->tail) &&
+ (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
+ break;
+
+ if (i == MAX_WAIT_COUNTS) {
+ dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
+ ret = -EBUSY;
+ break;
+ }
+
+ usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
}
- QP_INIT_BUF(qp, sqe, qm->sqe_size * QM_Q_DEPTH);
- QP_INIT_BUF(qp, cqe, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ qm_ctx_free(qm, size, addr, &dma_addr);
- dev_dbg(dev, "init qp buffer(v%d):\n"
- " sqe (%pK, %lx)\n"
- " cqe (%pK, %lx)\n",
- ver, qp->sqe, (unsigned long)qp->sqe_dma,
- qp->cqe, (unsigned long)qp->cqe_dma);
+ return ret;
+}
- ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
+static int qm_stop_qp_nolock(struct hisi_qp *qp)
+{
+ struct device *dev = &qp->qm->pdev->dev;
+ int ret;
+
+ /*
+ * It is allowed to stop and release qp when reset, If the qp is
+ * stopped when reset but still want to be released then, the
+ * is_resetting flag should be set negative so that this qp will not
+ * be restarted after reset.
+ */
+ if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
+ qp->is_resetting = false;
+ return 0;
+ }
+
+ if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
+ return -EPERM;
+
+ atomic_set(&qp->qp_status.flags, QP_STOP);
+
+ ret = qm_drain_qp(qp);
if (ret)
- return ret;
+ dev_err(dev, "Failed to drain out data for stopping!\n");
- dev_dbg(dev, "queue %d started\n", qp_id);
+ if (qp->qm->wq)
+ flush_workqueue(qp->qm->wq);
+ else
+ flush_work(&qp->qm->work);
+
+ dev_dbg(dev, "stop queue %u!", qp->qp_id);
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/**
* hisi_qm_stop_qp() - Stop a qp in qm.
@@ -1336,27 +1955,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
*/
int hisi_qm_stop_qp(struct hisi_qp *qp)
{
- struct device *dev = &qp->qm->pdev->dev;
- int i = 0;
-
- /* it is stopped */
- if (test_bit(QP_STOP, &qp->qp_status.flags))
- return 0;
-
- while (atomic_read(&qp->qp_status.used)) {
- i++;
- msleep(20);
- if (i == 10) {
- dev_err(dev, "Cannot drain out data for stopping, Force to stop!\n");
- return 0;
- }
- }
-
- set_bit(QP_STOP, &qp->qp_status.flags);
+ int ret;
- dev_dbg(dev, "stop queue %u!", qp->qp_id);
+ down_write(&qp->qm->qps_lock);
+ ret = qm_stop_qp_nolock(qp);
+ up_write(&qp->qm->qps_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
@@ -1367,6 +1972,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
*
* This function will return -EBUSY if qp is currently full, and -EAGAIN
* if qp related qm is resetting.
+ *
+ * Note: This function may run with qm_irq_thread and ACC reset at same time.
+ * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
+ * reset may happen, we have no lock here considering performance. This
+ * causes current qm_db sending fail or can not receive sended sqe. QM
+ * sync/async receive function should handle the error sqe. ACC reset
+ * done function should clear used sqe to 0.
*/
int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
@@ -1375,7 +1987,9 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
void *sqe = qm_get_avail_sqe(qp);
- if (unlikely(test_bit(QP_STOP, &qp->qp_status.flags))) {
+ if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
+ atomic_read(&qp->qm->status.flags) == QM_STOP ||
+ qp->is_resetting)) {
dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
return -EAGAIN;
}
@@ -1397,12 +2011,13 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
{
unsigned int val;
- if (qm->ver == QM_HW_V2) {
- writel(0x1, qm->io_base + QM_CACHE_WB_START);
- if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
- val, val & BIT(0), 10, 1000))
- dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
- }
+ if (qm->ver == QM_HW_V1)
+ return;
+
+ writel(0x1, qm->io_base + QM_CACHE_WB_START);
+ if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
+ val, val & BIT(0), 10, 1000))
+ dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
}
static void qm_qp_event_notifier(struct hisi_qp *qp)
@@ -1412,16 +2027,7 @@ static void qm_qp_event_notifier(struct hisi_qp *qp)
static int hisi_qm_get_available_instances(struct uacce_device *uacce)
{
- int i, ret;
- struct hisi_qm *qm = uacce->priv;
-
- read_lock(&qm->qps_lock);
- for (i = 0, ret = 0; i < qm->qp_num; i++)
- if (!qm->qp_array[i])
- ret++;
- read_unlock(&qm->qps_lock);
-
- return ret;
+ return hisi_qm_get_free_qp_num(uacce->priv);
}
static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
@@ -1468,12 +2074,12 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
switch (qfr->type) {
case UACCE_QFRT_MMIO:
- if (qm->ver == QM_HW_V2) {
- if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
- QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
+ if (qm->ver == QM_HW_V1) {
+ if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
} else {
- if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
+ if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
+ QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
}
@@ -1519,9 +2125,9 @@ static int qm_set_sqctype(struct uacce_queue *q, u16 type)
struct hisi_qm *qm = q->uacce->priv;
struct hisi_qp *qp = q->priv;
- write_lock(&qm->qps_lock);
+ down_write(&qm->qps_lock);
qp->alg_type = type;
- write_unlock(&qm->qps_lock);
+ up_write(&qm->qps_lock);
return 0;
}
@@ -1623,107 +2229,121 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
{
int ret;
- read_lock(&qm->qps_lock);
+ down_read(&qm->qps_lock);
ret = qm->qp_num - qm->qp_in_used;
- read_unlock(&qm->qps_lock);
+ up_read(&qm->qps_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
-/**
- * hisi_qm_init() - Initialize configures about qm.
- * @qm: The qm needing init.
- *
- * This function init qm, then we can call hisi_qm_start to put qm into work.
- */
-int hisi_qm_init(struct hisi_qm *qm)
+static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
{
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
- unsigned int num_vec;
- int ret;
+ struct device *dev = &qm->pdev->dev;
+ struct qm_dma *qdma;
+ int i;
- switch (qm->ver) {
- case QM_HW_V1:
- qm->ops = &qm_hw_ops_v1;
- break;
- case QM_HW_V2:
- qm->ops = &qm_hw_ops_v2;
- break;
- default:
- return -EINVAL;
+ for (i = num - 1; i >= 0; i--) {
+ qdma = &qm->qp_array[i].qdma;
+ dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
}
- ret = qm_alloc_uacce(qm);
- if (ret < 0)
- dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
+ kfree(qm->qp_array);
+}
- ret = pci_enable_device_mem(pdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to enable device mem!\n");
- goto err_remove_uacce;
- }
+static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
+{
+ struct device *dev = &qm->pdev->dev;
+ size_t off = qm->sqe_size * QM_Q_DEPTH;
+ struct hisi_qp *qp;
- ret = pci_request_mem_regions(pdev, qm->dev_name);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to request mem regions!\n");
- goto err_disable_pcidev;
- }
+ qp = &qm->qp_array[id];
+ qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
+ GFP_KERNEL);
+ if (!qp->qdma.va)
+ return -ENOMEM;
- qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
- qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
- qm->io_base = ioremap(qm->phys_base, qm->phys_size);
- if (!qm->io_base) {
- ret = -EIO;
- goto err_release_mem_regions;
- }
+ qp->sqe = qp->qdma.va;
+ qp->sqe_dma = qp->qdma.dma;
+ qp->cqe = qp->qdma.va + off;
+ qp->cqe_dma = qp->qdma.dma + off;
+ qp->qdma.size = dma_size;
+ qp->qm = qm;
+ qp->qp_id = id;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (ret < 0)
- goto err_iounmap;
- pci_set_master(pdev);
+ return 0;
+}
- if (!qm->ops->get_irq_num) {
- ret = -EOPNOTSUPP;
- goto err_iounmap;
+static int hisi_qm_memory_init(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ size_t qp_dma_size, off = 0;
+ int i, ret = 0;
+
+#define QM_INIT_BUF(qm, type, num) do { \
+ (qm)->type = ((qm)->qdma.va + (off)); \
+ (qm)->type##_dma = (qm)->qdma.dma + (off); \
+ off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
+} while (0)
+
+ idr_init(&qm->qp_idr);
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
+ QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
+ qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
+ GFP_ATOMIC);
+ dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
+ if (!qm->qdma.va)
+ return -ENOMEM;
+
+ QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, sqc, qm->qp_num);
+ QM_INIT_BUF(qm, cqc, qm->qp_num);
+
+ qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
+ if (!qm->qp_array) {
+ ret = -ENOMEM;
+ goto err_alloc_qp_array;
}
- num_vec = qm->ops->get_irq_num(qm);
- ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
- if (ret < 0) {
- dev_err(dev, "Failed to enable MSI vectors!\n");
- goto err_iounmap;
+
+ /* one more page for device or qp statuses */
+ qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
+ sizeof(struct qm_cqe) * QM_Q_DEPTH;
+ qp_dma_size = PAGE_ALIGN(qp_dma_size);
+ for (i = 0; i < qm->qp_num; i++) {
+ ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+ if (ret)
+ goto err_init_qp_mem;
+
+ dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
}
- ret = qm_irq_register(qm);
- if (ret)
- goto err_free_irq_vectors;
+ return ret;
- qm->qp_in_used = 0;
- mutex_init(&qm->mailbox_lock);
- rwlock_init(&qm->qps_lock);
- INIT_WORK(&qm->work, qm_work_process);
+err_init_qp_mem:
+ hisi_qp_memory_uninit(qm, i);
+err_alloc_qp_array:
+ dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
- dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf",
- qm->use_dma_api ? "dma api" : "iommu api");
+ return ret;
+}
- return 0;
+static void hisi_qm_pre_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
-err_free_irq_vectors:
- pci_free_irq_vectors(pdev);
-err_iounmap:
- iounmap(qm->io_base);
-err_release_mem_regions:
- pci_release_mem_regions(pdev);
-err_disable_pcidev:
- pci_disable_device(pdev);
-err_remove_uacce:
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
+ if (qm->ver == QM_HW_V1)
+ qm->ops = &qm_hw_ops_v1;
+ else
+ qm->ops = &qm_hw_ops_v2;
- return ret;
+ pci_set_drvdata(pdev, qm);
+ mutex_init(&qm->mailbox_lock);
+ init_rwsem(&qm->qps_lock);
+ qm->qp_in_used = 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_init);
/**
* hisi_qm_uninit() - Uninitialize qm.
@@ -1736,10 +2356,20 @@ void hisi_qm_uninit(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
+ down_write(&qm->qps_lock);
+
+ if (!qm_avail_state(qm, QM_CLOSE)) {
+ up_write(&qm->qps_lock);
+ return;
+ }
+
uacce_remove(qm->uacce);
qm->uacce = NULL;
- if (qm->use_dma_api && qm->qdma.va) {
+ hisi_qp_memory_uninit(qm, qm->qp_num);
+ idr_destroy(&qm->qp_idr);
+
+ if (qm->qdma.va) {
hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma);
@@ -1751,6 +2381,8 @@ void hisi_qm_uninit(struct hisi_qm *qm)
iounmap(qm->io_base);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
+
+ up_write(&qm->qps_lock);
}
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
@@ -1781,12 +2413,6 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
/**
- * hisi_qm_set_vft() - Set "virtual function table" for a qm.
- * @fun_num: Number of operated function.
- * @qm: The qm in which to set vft, alway in a PF.
- * @base: The base number of queue in vft.
- * @number: The number of queues in vft. 0 means invalid vft.
- *
* This function is alway called in PF driver, it is used to assign queues
* among PF and VFs.
*
@@ -1794,7 +2420,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
* Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
* (VF function number 0x2)
*/
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
+static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
u32 number)
{
u32 max_q_num = qm->ctrl_qp_num;
@@ -1805,7 +2431,6 @@ int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_vft);
static void qm_init_eq_aeq_status(struct hisi_qm *qm)
{
@@ -1872,22 +2497,10 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
static int __hisi_qm_start(struct hisi_qm *qm)
{
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
- size_t off = 0;
int ret;
-#define QM_INIT_BUF(qm, type, num) do { \
- (qm)->type = ((qm)->qdma.va + (off)); \
- (qm)->type##_dma = (qm)->qdma.dma + (off); \
- off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
-} while (0)
-
WARN_ON(!qm->qdma.dma);
- if (qm->qp_num == 0)
- return -EINVAL;
-
if (qm->fun_type == QM_HW_PF) {
ret = qm_dev_mem_reset(qm);
if (ret)
@@ -1898,21 +2511,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
return ret;
}
- QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
- QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
- QM_INIT_BUF(qm, sqc, qm->qp_num);
- QM_INIT_BUF(qm, cqc, qm->qp_num);
-
- dev_dbg(dev, "init qm buffer:\n"
- " eqe (%pK, %lx)\n"
- " aeqe (%pK, %lx)\n"
- " sqc (%pK, %lx)\n"
- " cqc (%pK, %lx)\n",
- qm->eqe, (unsigned long)qm->eqe_dma,
- qm->aeqe, (unsigned long)qm->aeqe_dma,
- qm->sqc, (unsigned long)qm->sqc_dma,
- qm->cqc, (unsigned long)qm->cqc_dma);
-
ret = qm_eq_ctx_cfg(qm);
if (ret)
return ret;
@@ -1940,43 +2538,102 @@ static int __hisi_qm_start(struct hisi_qm *qm)
int hisi_qm_start(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
+ int ret = 0;
+
+ down_write(&qm->qps_lock);
+
+ if (!qm_avail_state(qm, QM_START)) {
+ up_write(&qm->qps_lock);
+ return -EPERM;
+ }
dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
if (!qm->qp_num) {
dev_err(dev, "qp_num should not be 0\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unlock;
}
- if (!qm->qp_bitmap) {
- qm->qp_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(qm->qp_num),
- sizeof(long), GFP_KERNEL);
- qm->qp_array = devm_kcalloc(dev, qm->qp_num,
- sizeof(struct hisi_qp *),
- GFP_KERNEL);
- if (!qm->qp_bitmap || !qm->qp_array)
- return -ENOMEM;
+ ret = __hisi_qm_start(qm);
+ if (!ret)
+ atomic_set(&qm->status.flags, QM_START);
+
+err_unlock:
+ up_write(&qm->qps_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_start);
+
+static int qm_restart(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct hisi_qp *qp;
+ int ret, i;
+
+ ret = hisi_qm_start(qm);
+ if (ret < 0)
+ return ret;
+
+ down_write(&qm->qps_lock);
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
+ qp->is_resetting == true) {
+ ret = qm_start_qp_nolock(qp, 0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start qp%d!\n", i);
+
+ up_write(&qm->qps_lock);
+ return ret;
+ }
+ qp->is_resetting = false;
+ }
}
+ up_write(&qm->qps_lock);
- if (!qm->use_dma_api) {
- dev_dbg(&qm->pdev->dev, "qm delay start\n");
- return 0;
- } else if (!qm->qdma.va) {
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
- QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
- qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size,
- &qm->qdma.dma, GFP_KERNEL);
- dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n",
- qm->qdma.va, &qm->qdma.dma, qm->qdma.size);
- if (!qm->qdma.va)
- return -ENOMEM;
+ return 0;
+}
+
+/* Stop started qps in reset flow */
+static int qm_stop_started_qp(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct hisi_qp *qp;
+ int i, ret;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
+ qp->is_resetting = true;
+ ret = qm_stop_qp_nolock(qp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to stop qp%d!\n", i);
+ return ret;
+ }
+ }
}
- return __hisi_qm_start(qm);
+ return 0;
+}
+
+/**
+ * This function clears all queues memory in a qm. Reset of accelerator can
+ * use this to clear queues.
+ */
+static void qm_clear_queues(struct hisi_qm *qm)
+{
+ struct hisi_qp *qp;
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (qp->is_resetting)
+ memset(qp->qdma.va, 0, qp->qdma.size);
+ }
+
+ memset(qm->qdma.va, 0, qm->qdma.size);
}
-EXPORT_SYMBOL_GPL(hisi_qm_start);
/**
* hisi_qm_stop() - Stop a qm.
@@ -1988,43 +2645,98 @@ EXPORT_SYMBOL_GPL(hisi_qm_start);
*/
int hisi_qm_stop(struct hisi_qm *qm)
{
- struct device *dev;
- struct hisi_qp *qp;
- int ret = 0, i;
+ struct device *dev = &qm->pdev->dev;
+ int ret = 0;
- if (!qm || !qm->pdev) {
- WARN_ON(1);
- return -EINVAL;
+ down_write(&qm->qps_lock);
+
+ if (!qm_avail_state(qm, QM_STOP)) {
+ ret = -EPERM;
+ goto err_unlock;
}
- dev = &qm->pdev->dev;
+ if (qm->status.stop_reason == QM_SOFT_RESET ||
+ qm->status.stop_reason == QM_FLR) {
+ ret = qm_stop_started_qp(qm);
+ if (ret < 0) {
+ dev_err(dev, "Failed to stop started qp!\n");
+ goto err_unlock;
+ }
+ }
/* Mask eq and aeq irq */
writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
- /* Stop all qps belong to this qm */
- for (i = 0; i < qm->qp_num; i++) {
- qp = qm->qp_array[i];
- if (qp) {
- ret = hisi_qm_stop_qp(qp);
- if (ret < 0) {
- dev_err(dev, "Failed to stop qp%d!\n", i);
- return -EBUSY;
- }
- }
- }
-
if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, 0, 0);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "Failed to set vft!\n");
+ ret = -EBUSY;
+ goto err_unlock;
+ }
}
+ qm_clear_queues(qm);
+ atomic_set(&qm->status.flags, QM_STOP);
+
+err_unlock:
+ up_write(&qm->qps_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_stop);
+static ssize_t qm_status_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char buf[QM_DBG_READ_LEN];
+ int val, cp_len, len;
+
+ if (*pos)
+ return 0;
+
+ if (count < QM_DBG_READ_LEN)
+ return -ENOSPC;
+
+ val = atomic_read(&qm->status.flags);
+ len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
+ if (!len)
+ return -EFAULT;
+
+ cp_len = copy_to_user(buffer, buf, len);
+ if (cp_len)
+ return -EFAULT;
+
+ return (*pos = len);
+}
+
+static const struct file_operations qm_status_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_status_read,
+};
+
+static int qm_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
+ return 0;
+}
+
+static int qm_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
+ qm_debugfs_atomic64_set, "%llu\n");
+
/**
* hisi_qm_debug_init() - Initialize qm related debugfs files.
* @qm: The qm for which we want to add debugfs files.
@@ -2033,7 +2745,9 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop);
*/
int hisi_qm_debug_init(struct hisi_qm *qm)
{
+ struct qm_dfx *dfx = &qm->debug.dfx;
struct dentry *qm_d;
+ void *data;
int i, ret;
qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
@@ -2047,7 +2761,20 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+
+ debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
+
+ debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
+ &qm_status_fops);
+ for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
+ data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
+ debugfs_create_file(qm_dfx_files[i].name,
+ 0644,
+ qm_d,
+ data,
+ &qm_atomic64_ops);
+ }
return 0;
@@ -2095,8 +2822,7 @@ static void qm_hw_error_init(struct hisi_qm *qm)
return;
}
- qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe,
- err_info->fe, err_info->msi);
+ qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
}
static void qm_hw_error_uninit(struct hisi_qm *qm)
@@ -2109,36 +2835,17 @@ static void qm_hw_error_uninit(struct hisi_qm *qm)
qm->ops->hw_error_uninit(qm);
}
-static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm)
+static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
- return PCI_ERS_RESULT_NONE;
+ return ACC_ERR_NONE;
}
return qm->ops->hw_error_handle(qm);
}
/**
- * hisi_qm_get_hw_version() - Get hardware version of a qm.
- * @pdev: The device which hardware version we want to get.
- *
- * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN
- * if the hardware version is not supported.
- */
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
-{
- switch (pdev->revision) {
- case QM_HW_V1:
- case QM_HW_V2:
- return pdev->revision;
- default:
- return QM_HW_UNKNOWN;
- }
-}
-EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
-
-/**
* hisi_qm_dev_err_init() - Initialize device error configuration.
* @qm: The qm for which we want to do error initialization.
*
@@ -2299,34 +3006,163 @@ err:
}
EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
-static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm)
+static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
+{
+ u32 remain_q_num, q_num, i, j;
+ u32 q_base = qm->qp_num;
+ int ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_qp_num - qm->qp_num;
+
+ /* If remain queues not enough, return error. */
+ if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
+ return -EINVAL;
+
+ q_num = remain_q_num / num_vfs;
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ if (ret) {
+ for (j = i; j > 0; j--)
+ hisi_qm_set_vft(qm, j, 0, 0);
+ return ret;
+ }
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int qm_clear_vft_config(struct hisi_qm *qm)
+{
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= qm->vfs_num; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+ qm->vfs_num = 0;
+
+ return 0;
+}
+
+/**
+ * hisi_qm_sriov_enable() - enable virtual functions
+ * @pdev: the PCIe device
+ * @max_vfs: the number of virtual functions to enable
+ *
+ * Returns the number of enabled VFs. If there are VFs enabled already or
+ * max_vfs is more than the total number of device can be enabled, returns
+ * failure.
+ */
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int pre_existing_vfs, num_vfs, total_vfs, ret;
+
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+ pre_existing_vfs = pci_num_vf(pdev);
+ if (pre_existing_vfs) {
+ pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
+ pre_existing_vfs);
+ return 0;
+ }
+
+ num_vfs = min_t(int, max_vfs, total_vfs);
+ ret = qm_vf_q_assign(qm, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ qm->vfs_num = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ qm_clear_vft_config(qm);
+ return ret;
+ }
+
+ pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
+
+ return num_vfs;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
+
+/**
+ * hisi_qm_sriov_disable - disable virtual functions
+ * @pdev: the PCI device
+ *
+ * Return failure if there are VFs assigned already.
+ */
+int hisi_qm_sriov_disable(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* remove in hpre_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+ return qm_clear_vft_config(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
+
+/**
+ * hisi_qm_sriov_configure - configure the number of VFs
+ * @pdev: The PCI device
+ * @num_vfs: The number of VFs need enabled
+ *
+ * Enable SR-IOV according to num_vfs, 0 means disable.
+ */
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return hisi_qm_sriov_disable(pdev);
+ else
+ return hisi_qm_sriov_enable(pdev, num_vfs);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
+
+static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
{
u32 err_sts;
if (!qm->err_ini->get_dev_hw_err_status) {
dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
- return PCI_ERS_RESULT_NONE;
+ return ACC_ERR_NONE;
}
/* get device hardware error status */
err_sts = qm->err_ini->get_dev_hw_err_status(qm);
if (err_sts) {
+ if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+
if (!qm->err_ini->log_dev_hw_err) {
dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
- return PCI_ERS_RESULT_NEED_RESET;
+ return ACC_ERR_NEED_RESET;
}
qm->err_ini->log_dev_hw_err(qm, err_sts);
- return PCI_ERS_RESULT_NEED_RESET;
+ return ACC_ERR_NEED_RESET;
}
- return PCI_ERS_RESULT_RECOVERED;
+ return ACC_ERR_RECOVERED;
}
-static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
+static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
{
- struct hisi_qm *qm = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, dev_ret;
+ enum acc_err_result qm_ret, dev_ret;
/* log qm error */
qm_ret = qm_hw_error_handle(qm);
@@ -2334,9 +3170,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
/* log device error */
dev_ret = qm_dev_err_handle(qm);
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+ return (qm_ret == ACC_ERR_NEED_RESET ||
+ dev_ret == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
}
/**
@@ -2350,6 +3186,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ enum acc_err_result ret;
+
if (pdev->is_virtfn)
return PCI_ERS_RESULT_NONE;
@@ -2357,10 +3196,756 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- return qm_process_dev_error(pdev);
+ ret = qm_process_dev_error(qm);
+ if (ret == ACC_ERR_NEED_RESET)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_RECOVERED;
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
+static int qm_get_hw_error_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+}
+
+static int qm_check_req_recv(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
+ (val == ACC_VENDOR_ID_VALUE),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(&pdev->dev, "Fails to read QM reg!\n");
+ return ret;
+ }
+
+ writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
+ (val == PCI_VENDOR_ID_HUAWEI),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret)
+ dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
+
+ return ret;
+}
+
+static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u16 cmd;
+ int i;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (set)
+ cmd |= PCI_COMMAND_MEMORY;
+ else
+ cmd &= ~PCI_COMMAND_MEMORY;
+
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ for (i = 0; i < MAX_WAIT_COUNTS; i++) {
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u16 sriov_ctrl;
+ int pos;
+ int i;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
+ if (set)
+ sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
+ else
+ sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
+ pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
+
+ for (i = 0; i < MAX_WAIT_COUNTS; i++) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
+ if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
+ ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int qm_set_msi(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ if (set) {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ 0);
+ } else {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ ACC_PEH_MSI_DISABLE);
+ if (qm->err_status.is_qm_ecc_mbit ||
+ qm->err_status.is_dev_ecc_mbit)
+ return 0;
+
+ mdelay(1);
+ if (readl(qm->io_base + QM_PEH_DFX_INFO0))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int qm_vf_reset_prepare(struct hisi_qm *qm)
+{
+ struct hisi_qm_list *qm_list = qm->qm_list;
+ int stop_reason = qm->status.stop_reason;
+ struct pci_dev *pdev = qm->pdev;
+ struct pci_dev *virtfn;
+ struct hisi_qm *vf_qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(vf_qm, &qm_list->list, list) {
+ virtfn = vf_qm->pdev;
+ if (virtfn == pdev)
+ continue;
+
+ if (pci_physfn(virtfn) == pdev) {
+ vf_qm->status.stop_reason = stop_reason;
+ ret = hisi_qm_stop(vf_qm);
+ if (ret)
+ goto stop_fail;
+ }
+ }
+
+stop_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int delay = 0;
+
+ /* All reset requests need to be queued for processing */
+ while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qm_controller_reset_prepare(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset not ready!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(qm);
+ if (ret) {
+ pci_err(pdev, "Fails to stop VFs!\n");
+ return ret;
+ }
+ }
+
+ qm->status.stop_reason = QM_SOFT_RESET;
+ ret = hisi_qm_stop(qm);
+ if (ret) {
+ pci_err(pdev, "Fails to stop QM!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+{
+ u32 nfe_enb = 0;
+
+ if (!qm->err_status.is_dev_ecc_mbit &&
+ qm->err_status.is_qm_ecc_mbit &&
+ qm->err_ini->close_axi_master_ooo) {
+
+ qm->err_ini->close_axi_master_ooo(qm);
+
+ } else if (qm->err_status.is_dev_ecc_mbit &&
+ !qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_ini->close_axi_master_ooo) {
+
+ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+ }
+}
+
+static int qm_soft_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ /* Ensure all doorbells and mailboxes received by QM */
+ ret = qm_check_req_recv(qm);
+ if (ret)
+ return ret;
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable vf MSE bit.\n");
+ return ret;
+ }
+ }
+
+ ret = qm_set_msi(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable PEH MSI bit.\n");
+ return ret;
+ }
+
+ qm_dev_ecc_mbit_handle(qm);
+
+ /* OOO register set and check */
+ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
+ qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+
+ /* If bus lock, reset chip */
+ ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+ val,
+ (val == ACC_MASTER_TRANS_RETURN_RW),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable pf MSE bit.\n");
+ return ret;
+ }
+
+ /* The reset related sub-control registers are not in PCI BAR */
+ if (ACPI_HANDLE(&pdev->dev)) {
+ unsigned long long value = 0;
+ acpi_status s;
+
+ s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ qm->err_ini->err_info.acpi_rst,
+ NULL, &value);
+ if (ACPI_FAILURE(s)) {
+ pci_err(pdev, "NO controller reset method!\n");
+ return -EIO;
+ }
+
+ if (value) {
+ pci_err(pdev, "Reset step %llu failed!\n", value);
+ return -EIO;
+ }
+ } else {
+ pci_err(pdev, "No reset method!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_vf_reset_done(struct hisi_qm *qm)
+{
+ struct hisi_qm_list *qm_list = qm->qm_list;
+ struct pci_dev *pdev = qm->pdev;
+ struct pci_dev *virtfn;
+ struct hisi_qm *vf_qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(vf_qm, &qm_list->list, list) {
+ virtfn = vf_qm->pdev;
+ if (virtfn == pdev)
+ continue;
+
+ if (pci_physfn(virtfn) == pdev) {
+ ret = qm_restart(vf_qm);
+ if (ret)
+ goto restart_fail;
+ }
+ }
+
+restart_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ return qm->err_ini->get_dev_hw_err_status(qm);
+}
+
+static int qm_dev_hw_init(struct hisi_qm *qm)
+{
+ return qm->err_ini->hw_init(qm);
+}
+
+static void qm_restart_prepare(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_status.is_dev_ecc_mbit)
+ return;
+
+ /* temporarily close the OOO port used for PEH to write out MSI */
+ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+ writel(value & ~qm->err_ini->err_info.msi_wr_port,
+ qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+
+ /* clear dev ecc 2bit error source if having */
+ value = qm_get_dev_err_status(qm) &
+ qm->err_ini->err_info.ecc_2bits_mask;
+ if (value && qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm, value);
+
+ /* clear QM ecc mbit error source */
+ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+
+ /* clear AM Reorder Buffer ecc mbit source */
+ writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
+
+ if (qm->err_ini->open_axi_master_ooo)
+ qm->err_ini->open_axi_master_ooo(qm);
+}
+
+static void qm_restart_done(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_status.is_dev_ecc_mbit)
+ return;
+
+ /* open the OOO port for PEH to write out MSI */
+ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+ value |= qm->err_ini->err_info.msi_wr_port;
+ writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+
+ qm->err_status.is_qm_ecc_mbit = false;
+ qm->err_status.is_dev_ecc_mbit = false;
+}
+
+static int qm_controller_reset_done(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_set_msi(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable PEH MSI bit!\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable pf MSE bit!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable vf MSE bit!\n");
+ return ret;
+ }
+ }
+
+ ret = qm_dev_hw_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init device\n");
+ return ret;
+ }
+
+ qm_restart_prepare(qm);
+
+ ret = qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "Failed to assign queue!\n");
+ return ret;
+ }
+ }
+
+ ret = qm_vf_reset_done(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs!\n");
+ return -EPERM;
+ }
+
+ hisi_qm_dev_err_init(qm);
+ qm_restart_done(qm);
+
+ clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
+
+ return 0;
+}
+
+static int qm_controller_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ pci_info(pdev, "Controller resetting...\n");
+
+ ret = qm_controller_reset_prepare(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_soft_reset(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qm_controller_reset_done(qm);
+ if (ret)
+ return ret;
+
+ pci_info(pdev, "Controller reset complete\n");
+
+ return 0;
+}
+
+/**
+ * hisi_qm_dev_slot_reset() - slot reset
+ * @pdev: the PCIe device
+ *
+ * This function offers QM relate PCIe device reset interface. Drivers which
+ * use QM can use this function as slot_reset in its struct pci_error_handlers.
+ */
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ pci_aer_clear_nonfatal_status(pdev);
+
+ /* reset pcie device controller */
+ ret = qm_controller_reset(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
+
+/* check the interrupt is ecc-mbit error or not */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
+ if (ret)
+ return ret;
+
+ return (qm_get_dev_err_status(qm) &
+ qm->err_ini->err_info.ecc_2bits_mask);
+}
+
+void hisi_qm_reset_prepare(struct pci_dev *pdev)
+{
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ u32 delay = 0;
+ int ret;
+
+ hisi_qm_dev_err_uninit(pf_qm);
+
+ /*
+ * Check whether there is an ECC mbit error, If it occurs, need to
+ * wait for soft reset to fix it.
+ */
+ while (qm_check_dev_error(pf_qm)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return;
+ }
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "FLR not ready!\n");
+ return;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
+ ret);
+ return;
+ }
+ }
+
+ ret = hisi_qm_stop(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
+ return;
+ }
+
+ pci_info(pdev, "FLR resetting...\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
+
+static bool qm_flr_reset_complete(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_pdev = pci_physfn(pdev);
+ struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
+ u32 id;
+
+ pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
+ if (id == QM_PCI_COMMAND_INVALID) {
+ pci_err(pdev, "Device can not be used!\n");
+ return false;
+ }
+
+ clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
+
+ return true;
+}
+
+void hisi_qm_reset_done(struct pci_dev *pdev)
+{
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ hisi_qm_dev_err_init(pf_qm);
+
+ ret = qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
+ goto flr_done;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = qm_dev_hw_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
+ goto flr_done;
+ }
+
+ if (!qm->vfs_num)
+ goto flr_done;
+
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
+ goto flr_done;
+ }
+
+ ret = qm_vf_reset_done(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
+ goto flr_done;
+ }
+ }
+
+flr_done:
+ if (qm_flr_reset_complete(pdev))
+ pci_info(pdev, "FLR reset complete\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
+
+static irqreturn_t qm_abnormal_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+ enum acc_err_result ret;
+
+ atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
+ ret = qm_process_dev_error(qm);
+ if (ret == ACC_ERR_NEED_RESET)
+ schedule_work(&qm->rst_work);
+
+ return IRQ_HANDLED;
+}
+
+static int qm_irq_register(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
+ qm_irq, IRQF_SHARED, qm->dev_name, qm);
+ if (ret)
+ return ret;
+
+ if (qm->ver != QM_HW_V1) {
+ ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
+ qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
+ if (ret)
+ goto err_aeq_irq;
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = request_irq(pci_irq_vector(pdev,
+ QM_ABNORMAL_EVENT_IRQ_VECTOR),
+ qm_abnormal_irq, IRQF_SHARED,
+ qm->dev_name, qm);
+ if (ret)
+ goto err_abonormal_irq;
+ }
+ }
+
+ return 0;
+
+err_abonormal_irq:
+ free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
+err_aeq_irq:
+ free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
+ return ret;
+}
+
+static void hisi_qm_controller_reset(struct work_struct *rst_work)
+{
+ struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
+ int ret;
+
+ /* reset pcie device controller */
+ ret = qm_controller_reset(qm);
+ if (ret)
+ dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
+
+}
+
+/**
+ * hisi_qm_init() - Initialize configures about qm.
+ * @qm: The qm needing init.
+ *
+ * This function init qm, then we can call hisi_qm_start to put qm into work.
+ */
+int hisi_qm_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned int num_vec;
+ int ret;
+
+ hisi_qm_pre_init(qm);
+
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable device mem!\n");
+ goto err_remove_uacce;
+ }
+
+ ret = pci_request_mem_regions(pdev, qm->dev_name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request mem regions!\n");
+ goto err_disable_pcidev;
+ }
+
+ qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
+ qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
+ qm->io_base = ioremap(qm->phys_base, qm->phys_size);
+ if (!qm->io_base) {
+ ret = -EIO;
+ goto err_release_mem_regions;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret < 0)
+ goto err_iounmap;
+ pci_set_master(pdev);
+
+ if (!qm->ops->get_irq_num) {
+ ret = -EOPNOTSUPP;
+ goto err_iounmap;
+ }
+ num_vec = qm->ops->get_irq_num(qm);
+ ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable MSI vectors!\n");
+ goto err_iounmap;
+ }
+
+ ret = qm_irq_register(qm);
+ if (ret)
+ goto err_free_irq_vectors;
+
+ if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
+ /* v2 starts to support get vft by mailbox */
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_irq_unregister;
+ }
+
+ ret = hisi_qm_memory_init(qm);
+ if (ret)
+ goto err_irq_unregister;
+
+ INIT_WORK(&qm->work, qm_work_process);
+ if (qm->fun_type == QM_HW_PF)
+ INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
+
+ atomic_set(&qm->status.flags, QM_INIT);
+
+ return 0;
+
+err_irq_unregister:
+ qm_irq_unregister(qm);
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+err_iounmap:
+ iounmap(qm->io_base);
+err_release_mem_regions:
+ pci_release_mem_regions(pdev);
+err_disable_pcidev:
+ pci_disable_device(pdev);
+err_remove_uacce:
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_init);
+
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index ec5b6f48db6c..0a351de8d838 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -8,6 +8,10 @@
#include <linux/module.h>
#include <linux/pci.h>
+#define QM_QNUM_V1 4096
+#define QM_QNUM_V2 1024
+#define QM_MAX_VFS_NUM_V2 63
+
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@@ -70,7 +74,7 @@
#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
- QM_OF_FIFO_OF)
+ QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID)
#define QM_BASE_CE QM_ECC_1BIT
#define QM_Q_DEPTH 1024
@@ -80,14 +84,31 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
+enum qm_stop_reason {
+ QM_NORMAL,
+ QM_SOFT_RESET,
+ QM_FLR,
+};
+
+enum qm_state {
+ QM_INIT = 0,
+ QM_START,
+ QM_CLOSE,
+ QM_STOP,
+};
+
enum qp_state {
+ QP_INIT = 1,
+ QP_START,
QP_STOP,
+ QP_CLOSE,
};
enum qm_hw_ver {
QM_HW_UNKNOWN = -1,
QM_HW_V1 = 0x20,
QM_HW_V2 = 0x21,
+ QM_HW_V3 = 0x30,
};
enum qm_fun_type {
@@ -101,6 +122,14 @@ enum qm_debug_file {
DEBUG_FILE_NUM,
};
+struct qm_dfx {
+ atomic64_t err_irq_cnt;
+ atomic64_t aeq_irq_cnt;
+ atomic64_t abnormal_irq_cnt;
+ atomic64_t create_qp_err_cnt;
+ atomic64_t mb_err_cnt;
+};
+
struct debugfs_file {
enum qm_debug_file index;
struct mutex lock;
@@ -109,6 +138,9 @@ struct debugfs_file {
struct qm_debug {
u32 curr_qm_qp_num;
+ u32 sqe_mask_offset;
+ u32 sqe_mask_len;
+ struct qm_dfx dfx;
struct dentry *debug_root;
struct dentry *qm_d;
struct debugfs_file files[DEBUG_FILE_NUM];
@@ -125,22 +157,34 @@ struct hisi_qm_status {
bool eqc_phase;
u32 aeq_head;
bool aeqc_phase;
- unsigned long flags;
+ atomic_t flags;
+ int stop_reason;
};
struct hisi_qm;
struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ u32 ecc_2bits_mask;
u32 ce;
u32 nfe;
u32 fe;
- u32 msi;
+};
+
+struct hisi_qm_err_status {
+ u32 is_qm_ecc_mbit;
+ u32 is_dev_ecc_mbit;
};
struct hisi_qm_err_ini {
+ int (*hw_init)(struct hisi_qm *qm);
void (*hw_err_enable)(struct hisi_qm *qm);
void (*hw_err_disable)(struct hisi_qm *qm);
u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
struct hisi_qm_err_info err_info;
};
@@ -161,7 +205,9 @@ struct hisi_qm {
u32 qp_num;
u32 qp_in_used;
u32 ctrl_qp_num;
+ u32 vfs_num;
struct list_head list;
+ struct hisi_qm_list *qm_list;
struct qm_dma qdma;
struct qm_sqc *sqc;
@@ -175,10 +221,12 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_status err_status;
+ unsigned long reset_flag;
- rwlock_t qps_lock;
- unsigned long *qp_bitmap;
- struct hisi_qp **qp_array;
+ struct rw_semaphore qps_lock;
+ struct idr qp_idr;
+ struct hisi_qp *qp_array;
struct mutex mailbox_lock;
@@ -187,13 +235,12 @@ struct hisi_qm {
struct qm_debug debug;
u32 error_mask;
- u32 msi_mask;
struct workqueue_struct *wq;
struct work_struct work;
+ struct work_struct rst_work;
const char *algs;
- bool use_dma_api;
bool use_sva;
resource_size_t phys_base;
resource_size_t phys_size;
@@ -205,7 +252,7 @@ struct hisi_qp_status {
u16 sq_tail;
u16 cq_head;
bool cqc_phase;
- unsigned long flags;
+ atomic_t flags;
};
struct hisi_qp_ops {
@@ -230,10 +277,58 @@ struct hisi_qp {
void (*event_cb)(struct hisi_qp *qp);
struct hisi_qm *qm;
+ bool is_resetting;
u16 pasid;
struct uacce_queue *uacce_q;
};
+static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ device, NULL);
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %d\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || !n || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM_V2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
{
INIT_LIST_HEAD(&qm_list->list);
@@ -267,14 +362,19 @@ void hisi_qm_release_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev);
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
void hisi_qm_dev_err_init(struct hisi_qm *qm);
void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 3598fa17beb2..7b64aca704d6 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -160,6 +160,10 @@ struct sec_debug_file {
struct sec_dfx {
atomic64_t send_cnt;
atomic64_t recv_cnt;
+ atomic64_t send_busy_cnt;
+ atomic64_t err_bd_cnt;
+ atomic64_t invalid_req_cnt;
+ atomic64_t done_flag_cnt;
};
struct sec_debug {
@@ -172,7 +176,6 @@ struct sec_dev {
struct sec_debug debug;
u32 ctx_q_num;
bool iommu_used;
- u32 num_vfs;
unsigned long status;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 7f1c6a31b82f..64614a9bdf21 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -148,6 +148,7 @@ static int sec_aead_verify(struct sec_req *req)
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
struct sec_sqe *bd = resp;
struct sec_ctx *ctx;
struct sec_req *req;
@@ -157,11 +158,16 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
type = bd->type_cipher_auth & SEC_TYPE_MASK;
if (unlikely(type != SEC_BD_TYPE2)) {
+ atomic64_inc(&dfx->err_bd_cnt);
pr_err("err bd type [%d]\n", type);
return;
}
req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ if (unlikely(!req)) {
+ atomic64_inc(&dfx->invalid_req_cnt);
+ return;
+ }
req->err_type = bd->type2.error_type;
ctx = req->ctx;
done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
@@ -174,12 +180,13 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
"err_type[%d],done[%d],flag[%d]\n",
req->err_type, done, flag);
err = -EIO;
+ atomic64_inc(&dfx->done_flag_cnt);
}
if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
err = sec_aead_verify(req);
- atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
+ atomic64_inc(&dfx->recv_cnt);
ctx->req_op->buf_unmap(ctx, req);
@@ -200,10 +207,12 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
return -ENOBUFS;
if (!ret) {
- if (req->fake_busy)
+ if (req->fake_busy) {
+ atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
ret = -EBUSY;
- else
+ } else {
ret = -EINPROGRESS;
+ }
}
return ret;
@@ -832,7 +841,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
struct crypto_authenc_keys *keys)
{
struct crypto_shash *hash_tfm = ctx->hash_tfm;
- SHASH_DESC_ON_STACK(shash, hash_tfm);
int blocksize, ret;
if (!keys->authkeylen) {
@@ -842,8 +850,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
blocksize = crypto_shash_blocksize(hash_tfm);
if (keys->authkeylen > blocksize) {
- ret = crypto_shash_digest(shash, keys->authkey,
- keys->authkeylen, ctx->a_key);
+ ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
+ keys->authkeylen, ctx->a_key);
if (ret) {
pr_err("hisi_sec2: aead auth digest error!\n");
return -EINVAL;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 1f54ebe164b6..a4cb58b54b25 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -80,6 +80,9 @@
#define SEC_VF_CNT_MASK 0xffffffc0
#define SEC_DBGFS_VAL_MAX_LEN 20
+#define SEC_SQE_MASK_OFFSET 64
+#define SEC_SQE_MASK_LEN 48
+
#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
@@ -88,6 +91,11 @@ struct sec_hw_error {
const char *msg;
};
+struct sec_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
static struct hisi_qm_list sec_devices;
@@ -110,7 +118,16 @@ static const char * const sec_dbg_file_name[] = {
[SEC_CLEAR_ENABLE] = "clear_enable",
};
-static struct debugfs_reg32 sec_dfx_regs[] = {
+static struct sec_dfx_item sec_dfx_labels[] = {
+ {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
+ {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
+ {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
+ {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
+ {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
+ {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
+};
+
+static const struct debugfs_reg32 sec_dfx_regs[] = {
{"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"SEC_SAA_EN ", 0x301270},
{"SEC_BD_LATENCY_MIN ", 0x301600},
@@ -136,45 +153,14 @@ static struct debugfs_reg32 sec_dfx_regs[] = {
static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev;
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- SEC_PF_PCI_DEVICE_ID, NULL);
- if (!pdev) {
- q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
- pr_info("No device, suppose queue number is %d!\n", q_num);
- } else {
- rev_id = pdev->revision;
-
- switch (rev_id) {
- case QM_HW_V1:
- q_num = SEC_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = SEC_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret || !n || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
}
static const struct kernel_param_ops sec_pf_q_num_ops = {
.set = sec_pf_q_num_set,
.get = param_get_int,
};
+
static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
@@ -207,6 +193,15 @@ static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
+
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
{
hisi_qm_free_qps(qps, qp_num);
@@ -240,9 +235,8 @@ static const struct pci_device_id sec_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
-static u8 sec_get_endian(struct sec_dev *sec)
+static u8 sec_get_endian(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
u32 reg;
/*
@@ -270,9 +264,8 @@ static u8 sec_get_endian(struct sec_dev *sec)
return SEC_64BE;
}
-static int sec_engine_init(struct sec_dev *sec)
+static int sec_engine_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
int ret;
u32 reg;
@@ -315,7 +308,7 @@ static int sec_engine_init(struct sec_dev *sec)
/* config endian */
reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
- reg |= sec_get_endian(sec);
+ reg |= sec_get_endian(qm);
writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
/* Enable sm4 xts mode multiple iv */
@@ -325,10 +318,8 @@ static int sec_engine_init(struct sec_dev *sec)
return 0;
}
-static int sec_set_user_domain_and_cache(struct sec_dev *sec)
+static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
-
/* qm user domain */
writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
@@ -349,7 +340,7 @@ static int sec_set_user_domain_and_cache(struct sec_dev *sec)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
- return sec_engine_init(sec);
+ return sec_engine_init(qm);
}
/* sec_debug_regs_clear() - clear the sec debug regs */
@@ -424,23 +415,22 @@ static u32 sec_current_qm_read(struct sec_debug_file *file)
static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
{
struct hisi_qm *qm = file->qm;
- struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
u32 vfq_num;
u32 tmp;
- if (val > sec->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (!val) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs;
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
- if (val == sec->num_vfs)
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num =
qm->ctrl_qp_num - qm->qp_num -
- (sec->num_vfs - 1) * vfq_num;
+ (qm->vfs_num - 1) * vfq_num;
else
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -570,10 +560,22 @@ static const struct file_operations sec_dbg_fops = {
static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
*val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+static int sec_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
return 0;
}
+
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
- NULL, "%lld\n");
+ sec_debugfs_atomic64_set, "%lld\n");
static int sec_core_debug_init(struct sec_dev *sec)
{
@@ -582,6 +584,7 @@ static int sec_core_debug_init(struct sec_dev *sec)
struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
+ int i;
tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
@@ -593,13 +596,15 @@ static int sec_core_debug_init(struct sec_dev *sec)
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
regset->base = qm->io_base;
- debugfs_create_regset32("regs", 0444, tmp_d, regset);
-
- debugfs_create_file("send_cnt", 0444, tmp_d,
- &dfx->send_cnt, &sec_atomic64_ops);
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_file("recv_cnt", 0444, tmp_d,
- &dfx->recv_cnt, &sec_atomic64_ops);
+ for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
+ atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
+ sec_dfx_labels[i].offset);
+ debugfs_create_file(sec_dfx_labels[i].name, 0644,
+ tmp_d, data, &sec_atomic64_ops);
+ }
return 0;
}
@@ -630,6 +635,9 @@ static int sec_debugfs_init(struct sec_dev *sec)
qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
sec_debugfs_root);
+
+ qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
@@ -675,8 +683,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
errs++;
}
-
- writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
}
static u32 sec_get_hw_err_status(struct hisi_qm *qm)
@@ -684,17 +690,36 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + SEC_CORE_INT_STATUS);
}
+static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+}
+
+static void sec_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+}
+
static const struct hisi_qm_err_ini sec_err_ini = {
+ .hw_init = sec_set_user_domain_and_cache,
.hw_err_enable = sec_hw_error_enable,
.hw_err_disable = sec_hw_error_disable,
.get_dev_hw_err_status = sec_get_hw_err_status,
+ .clear_dev_hw_err_status = sec_clear_hw_err_status,
.log_dev_hw_err = sec_log_hw_error,
+ .open_axi_master_ooo = sec_open_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
- .msi = QM_DB_RANDOM_INVALID,
+ .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
+ .msi_wr_port = BIT(0),
+ .acpi_rst = "SRST",
}
};
@@ -703,22 +728,14 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1)
qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
- break;
-
- case QM_HW_V2:
+ else
qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
- break;
-
- default:
- return -EINVAL;
- }
qm->err_ini = &sec_err_ini;
- ret = sec_set_user_domain_and_cache(sec);
+ ret = sec_set_user_domain_and_cache(qm);
if (ret)
return ret;
@@ -730,32 +747,30 @@ static int sec_pf_probe_init(struct sec_dev *sec)
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -ENODEV;
+ int ret;
qm->pdev = pdev;
- qm->ver = rev_id;
-
+ qm->ver = pdev->revision;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
+
qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
QM_HW_PF : QM_HW_VF;
- qm->use_dma_api = true;
-
- return hisi_qm_init(qm);
-}
-
-static void sec_qm_uninit(struct hisi_qm *qm)
-{
- hisi_qm_uninit(qm);
-}
-
-static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
-{
- int ret;
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = SEC_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &sec_devices;
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ * v2 hardware has no such problem.
+ */
+ qm->qp_base = SEC_PF_DEF_Q_NUM;
+ qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
+ }
/*
* WQ_HIGHPRI: SEC request must be low delayed,
@@ -763,47 +778,38 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
* WQ_UNBOUND: SEC task is likely with long
* running CPU intensive workloads.
*/
- qm->wq = alloc_workqueue("%s", WQ_HIGHPRI |
- WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
- pci_name(qm->pdev));
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
if (!qm->wq) {
pci_err(qm->pdev, "fail to alloc workqueue\n");
return -ENOMEM;
}
- if (qm->fun_type == QM_HW_PF) {
- qm->qp_base = SEC_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
+ ret = hisi_qm_init(qm);
+ if (ret)
+ destroy_workqueue(qm->wq);
+
+ return ret;
+}
+static void sec_qm_uninit(struct hisi_qm *qm)
+{
+ hisi_qm_uninit(qm);
+}
+
+static int sec_probe_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+
+ if (qm->fun_type == QM_HW_PF) {
ret = sec_pf_probe_init(sec);
if (ret)
- goto err_probe_uninit;
- } else if (qm->fun_type == QM_HW_VF) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
- * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
- * to trigger only one VF in v1 hardware.
- * v2 hardware has no such problem.
- */
- if (qm->ver == QM_HW_V1) {
- qm->qp_base = SEC_PF_DEF_Q_NUM;
- qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
- } else if (qm->ver == QM_HW_V2) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_probe_uninit;
- }
- } else {
- ret = -ENODEV;
- goto err_probe_uninit;
+ return ret;
}
return 0;
-err_probe_uninit:
- destroy_workqueue(qm->wq);
- return ret;
}
static void sec_probe_uninit(struct hisi_qm *qm)
@@ -840,20 +846,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!sec)
return -ENOMEM;
- pci_set_drvdata(pdev, sec);
-
- sec->ctx_q_num = ctx_q_num;
- sec_iommu_used_check(sec);
-
qm = &sec->qm;
-
ret = sec_qm_init(qm, pdev);
if (ret) {
- pci_err(pdev, "Failed to pre init qm!\n");
+ pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
return ret;
}
- ret = sec_probe_init(qm, sec);
+ sec->ctx_q_num = ctx_q_num;
+ sec_iommu_used_check(sec);
+
+ ret = sec_probe_init(sec);
if (ret) {
pci_err(pdev, "Failed to probe!\n");
goto err_qm_uninit;
@@ -877,8 +880,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_remove_from_list;
}
+ if (qm->fun_type == QM_HW_PF && vfs_num) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_crypto_unregister;
+ }
+
return 0;
+err_crypto_unregister:
+ sec_unregister_from_crypto();
+
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
sec_debugfs_exit(sec);
@@ -893,110 +905,6 @@ err_qm_uninit:
return ret;
}
-/* now we only support equal assignment */
-static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs)
-{
- struct hisi_qm *qm = &sec->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num;
- int i, j, ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_qp_num - qp_num;
- q_num = remain_q_num / num_vfs;
-
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret) {
- for (j = i; j > 0; j--)
- hisi_qm_set_vft(qm, j, 0, 0);
- return ret;
- }
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int sec_clear_vft_config(struct sec_dev *sec)
-{
- struct hisi_qm *qm = &sec->qm;
- u32 num_vfs = sec->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
-
- sec->num_vfs = 0;
-
- return 0;
-}
-
-static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct sec_dev *sec = pci_get_drvdata(pdev);
- int pre_existing_vfs, ret;
- u32 num_vfs;
-
- pre_existing_vfs = pci_num_vf(pdev);
-
- if (pre_existing_vfs) {
- pci_err(pdev, "Can't enable VF. Please disable at first!\n");
- return 0;
- }
-
- num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
-
- ret = sec_vf_q_assign(sec, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- sec->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't enable VF!\n");
- sec_clear_vft_config(sec);
- return ret;
- }
-
- return num_vfs;
-}
-
-static int sec_sriov_disable(struct pci_dev *pdev)
-{
- struct sec_dev *sec = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- pci_err(pdev, "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* remove in sec_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return sec_clear_vft_config(sec);
-}
-
-static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- if (num_vfs)
- return sec_sriov_enable(pdev, num_vfs);
- else
- return sec_sriov_disable(pdev);
-}
-
static void sec_remove(struct pci_dev *pdev)
{
struct sec_dev *sec = pci_get_drvdata(pdev);
@@ -1006,8 +914,8 @@ static void sec_remove(struct pci_dev *pdev)
hisi_qm_del_from_list(qm, &sec_devices);
- if (qm->fun_type == QM_HW_PF && sec->num_vfs)
- (void)sec_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev);
sec_debugfs_exit(sec);
@@ -1023,6 +931,9 @@ static void sec_remove(struct pci_dev *pdev)
static const struct pci_error_handlers sec_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver sec_pci_driver = {
@@ -1031,7 +942,7 @@ static struct pci_driver sec_pci_driver = {
.probe = sec_probe,
.remove = sec_remove,
.err_handler = &sec_err_handler,
- .sriov_configure = sec_sriov_configure,
+ .sriov_configure = hisi_qm_sriov_configure,
};
static void sec_register_debugfs(void)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 82dc6f867171..f3ed4c0e5493 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -28,12 +28,20 @@ enum hisi_zip_error_type {
HZIP_NC_ERR = 0x0d,
};
+struct hisi_zip_dfx {
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
+ atomic64_t send_busy_cnt;
+ atomic64_t err_bd_cnt;
+};
+
struct hisi_zip_ctrl;
struct hisi_zip {
struct hisi_qm qm;
struct list_head list;
struct hisi_zip_ctrl *ctrl;
+ struct hisi_zip_dfx dfx;
};
struct hisi_zip_sqe {
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 369ec3220574..c73707c2e539 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -64,7 +64,6 @@ struct hisi_zip_req_q {
struct hisi_zip_qp_ctx {
struct hisi_qp *qp;
- struct hisi_zip_sqe zip_sqe;
struct hisi_zip_req_q req_q;
struct hisi_acc_sgl_pool *sgl_pool;
struct hisi_zip *zip_dev;
@@ -333,6 +332,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
{
struct hisi_zip_sqe *sqe = data;
struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *req = req_q->q + sqe->tag;
struct acomp_req *acomp_req = req->req;
@@ -340,12 +340,14 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
u32 status, dlen, head_size;
int err = 0;
+ atomic64_inc(&dfx->recv_cnt);
status = sqe->dw3 & HZIP_BD_STATUS_M;
if (status != 0 && status != HZIP_NC_ERR) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
sqe->produced);
+ atomic64_inc(&dfx->err_bd_cnt);
err = -EIO;
}
dlen = sqe->produced;
@@ -484,11 +486,12 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_qp_ctx *qp_ctx)
{
- struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct hisi_zip_sqe zip_sqe;
dma_addr_t input;
dma_addr_t output;
int ret;
@@ -511,15 +514,18 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
}
req->dma_dst = output;
- hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen,
+ hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen,
a_req->dlen, req->sskip, req->dskip);
- hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
- hisi_zip_config_tag(zip_sqe, req->req_id);
+ hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL);
+ hisi_zip_config_tag(&zip_sqe, req->req_id);
/* send command to start a task */
- ret = hisi_qp_send(qp, zip_sqe);
- if (ret < 0)
+ atomic64_inc(&dfx->send_cnt);
+ ret = hisi_qp_send(qp, &zip_sqe);
+ if (ret < 0) {
+ atomic64_inc(&dfx->send_busy_cnt);
goto err_unmap_output;
+ }
return -EINPROGRESS;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index fcc85d2dbd07..2229a21ae7c8 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -62,6 +62,7 @@
#define HZIP_CORE_INT_SOURCE 0x3010A0
#define HZIP_CORE_INT_MASK_REG 0x3010A4
+#define HZIP_CORE_INT_SET 0x3010A8
#define HZIP_CORE_INT_STATUS 0x3010AC
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
@@ -83,8 +84,13 @@
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
+#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
+#define HZIP_WR_PORT BIT(11)
#define HZIP_BUF_SIZE 22
+#define HZIP_SQE_MASK_OFFSET 64
+#define HZIP_SQE_MASK_LEN 48
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
@@ -95,6 +101,18 @@ struct hisi_zip_hw_error {
const char *msg;
};
+struct zip_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
+static struct zip_dfx_item zip_dfx_files[] = {
+ {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
+ {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
+ {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)},
+ {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)},
+};
+
static const struct hisi_zip_hw_error zip_hw_error[] = {
{ .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
{ .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
@@ -134,7 +152,6 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_zip_ctrl {
- u32 num_vfs;
struct hisi_zip *hisi_zip;
struct dentry *debug_root;
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
@@ -162,7 +179,7 @@ static const u64 core_offsets[] = {
[HZIP_DECOMP_CORE5] = 0x309000,
};
-static struct debugfs_reg32 hzip_dfx_regs[] = {
+static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_GET_BD_NUM ", 0x00ull},
{"HZIP_GET_RIGHT_BD ", 0x04ull},
{"HZIP_GET_ERROR_BD ", 0x08ull},
@@ -189,38 +206,7 @@ static struct debugfs_reg32 hzip_dfx_regs[] = {
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- PCI_DEVICE_ID_ZIP_PF, NULL);
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- if (!pdev) {
- q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2);
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = HZIP_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = HZIP_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > q_num || n == 0)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
@@ -232,9 +218,14 @@ static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
+
static u32 vfs_num;
-module_param(vfs_num, uint, 0444);
-MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
@@ -250,9 +241,9 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
-static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
+static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
- void __iomem *base = hisi_zip->qm.io_base;
+ void __iomem *base = qm->io_base;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -279,7 +270,7 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
- if (hisi_zip->qm.use_sva) {
+ if (qm->use_sva) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
@@ -295,10 +286,14 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+
+ return 0;
}
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
+ u32 val;
+
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -317,12 +312,24 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ /* enable ZIP block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ val = val | HZIP_AXI_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
+ u32 val;
+
/* disable ZIP hw error interrupts */
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ /* disable ZIP block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ val = val & ~HZIP_AXI_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -342,21 +349,20 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
- struct hisi_zip_ctrl *ctrl = file->ctrl;
u32 vfq_num;
u32 tmp;
- if (val > ctrl->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* Calculate curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs;
- if (val == ctrl->num_vfs)
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
- qm->qp_num - (ctrl->num_vfs - 1) * vfq_num;
+ qm->qp_num - (qm->vfs_num - 1) * vfq_num;
else
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -477,6 +483,27 @@ static const struct file_operations ctrl_debug_fops = {
.write = ctrl_debug_write,
};
+
+static int zip_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
+ return 0;
+}
+
+static int zip_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
+ zip_debugfs_atomic64_set, "%llu\n");
+
static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
{
struct hisi_zip *hisi_zip = ctrl->hisi_zip;
@@ -508,6 +535,25 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
return 0;
}
+static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
+{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
+ struct hisi_zip_dfx *dfx = &zip->dfx;
+ struct dentry *tmp_dir;
+ void *data;
+ int i;
+
+ tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root);
+ for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
+ data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
+ debugfs_create_file(zip_dfx_files[i].name,
+ 0644,
+ tmp_dir,
+ data,
+ &zip_atomic64_ops);
+ }
+}
+
static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
{
int i;
@@ -534,6 +580,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
+ qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
qm->debug.debug_root = dev_d;
ret = hisi_qm_debug_init(qm);
if (ret)
@@ -546,6 +594,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
goto failed_to_create;
}
+ hisi_zip_dfx_debug_init(qm);
+
return 0;
failed_to_create:
@@ -598,8 +648,6 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
err++;
}
-
- writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
}
static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
@@ -607,17 +655,55 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + HZIP_CORE_INT_STATUS);
}
+static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+}
+
+static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 nfe_enb;
+
+ /* Disable ECC Mbit error report. */
+ nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+
+ /* Inject zip ECC Mbit error to block master ooo. */
+ writel(HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_SET);
+}
+
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
+ .hw_init = hisi_zip_set_user_domain_and_cache,
.hw_err_enable = hisi_zip_hw_error_enable,
.hw_err_disable = hisi_zip_hw_error_disable,
.get_dev_hw_err_status = hisi_zip_get_hw_err_status,
+ .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
.log_dev_hw_err = hisi_zip_log_hw_error,
+ .open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
+ .close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE |
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
- .msi = QM_DB_RANDOM_INVALID,
+ .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
+ .msi_wr_port = HZIP_WR_PORT,
+ .acpi_rst = "ZRST",
}
};
@@ -633,177 +719,85 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1)
qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
- break;
-
- case QM_HW_V2:
+ else
qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
- break;
-
- default:
- return -EINVAL;
- }
qm->err_ini = &hisi_zip_err_ini;
- hisi_zip_set_user_domain_and_cache(hisi_zip);
+ hisi_zip_set_user_domain_and_cache(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(hisi_zip);
return 0;
}
-/* Currently we only support equal assignment */
-static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs)
+static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num, i;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_qp_num - qp_num;
- if (remain_q_num < num_vfs)
- return -EINVAL;
+ qm->pdev = pdev;
+ qm->ver = pdev->revision;
+ qm->algs = "zlib\ngzip";
+ qm->sqe_size = HZIP_SQE_SIZE;
+ qm->dev_name = hisi_zip_name;
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret)
- return ret;
- q_base += q_num;
+ qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ?
+ QM_HW_PF : QM_HW_VF;
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = HZIP_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ qm->qm_list = &zip_devices;
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ *
+ * v2 hardware has no such problem.
+ */
+ qm->qp_base = HZIP_PF_DEF_Q_NUM;
+ qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
}
- return 0;
+ return hisi_qm_init(qm);
}
-static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
+static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
{
- struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl;
struct hisi_qm *qm = &hisi_zip->qm;
- u32 i, num_vfs = ctrl->num_vfs;
int ret;
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (qm->fun_type == QM_HW_PF) {
+ ret = hisi_zip_pf_probe_init(hisi_zip);
if (ret)
return ret;
}
- ctrl->num_vfs = 0;
-
return 0;
}
-static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
-
- if (pre_existing_vfs) {
- dev_err(&pdev->dev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HZIP_VF_NUM);
-
- ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hisi_zip->ctrl->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't enable VF!\n");
- hisi_zip_clear_vft_config(hisi_zip);
- return ret;
- }
-
- return num_vfs;
-}
-
-static int hisi_zip_sriov_disable(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- dev_err(&pdev->dev,
- "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* remove in hisi_zip_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return hisi_zip_clear_vft_config(hisi_zip);
-}
-
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_zip *hisi_zip;
- enum qm_hw_ver rev_id;
struct hisi_qm *qm;
int ret;
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
-
hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
if (!hisi_zip)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_zip);
qm = &hisi_zip->qm;
- qm->use_dma_api = true;
- qm->pdev = pdev;
- qm->ver = rev_id;
- qm->algs = "zlib\ngzip";
- qm->sqe_size = HZIP_SQE_SIZE;
- qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
- QM_HW_VF;
- ret = hisi_qm_init(qm);
+ ret = hisi_zip_qm_init(qm, pdev);
if (ret) {
- dev_err(&pdev->dev, "Failed to init qm!\n");
+ pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret);
return ret;
}
- if (qm->fun_type == QM_HW_PF) {
- ret = hisi_zip_pf_probe_init(hisi_zip);
- if (ret)
- return ret;
-
- qm->qp_base = HZIP_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- } else if (qm->fun_type == QM_HW_VF) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
- * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
- * to trigger only one VF in v1 hardware.
- *
- * v2 hardware has no such problem.
- */
- if (qm->ver == QM_HW_V1) {
- qm->qp_base = HZIP_PF_DEF_Q_NUM;
- qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
- } else if (qm->ver == QM_HW_V2)
- /* v2 starts to support get vft by mailbox */
- hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ ret = hisi_zip_probe_init(hisi_zip);
+ if (ret) {
+ pci_err(pdev, "Failed to probe (%d)!\n", ret);
+ goto err_qm_uninit;
}
ret = hisi_qm_start(qm);
@@ -823,7 +817,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
- ret = hisi_zip_sriov_enable(pdev, vfs_num);
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
goto err_remove_from_list;
}
@@ -836,15 +830,8 @@ err_remove_from_list:
hisi_qm_stop(qm);
err_qm_uninit:
hisi_qm_uninit(qm);
- return ret;
-}
-static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- if (num_vfs == 0)
- return hisi_zip_sriov_disable(pdev);
- else
- return hisi_zip_sriov_enable(pdev, num_vfs);
+ return ret;
}
static void hisi_zip_remove(struct pci_dev *pdev)
@@ -852,8 +839,8 @@ static void hisi_zip_remove(struct pci_dev *pdev)
struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
struct hisi_qm *qm = &hisi_zip->qm;
- if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
- hisi_zip_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev);
hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm);
@@ -865,6 +852,9 @@ static void hisi_zip_remove(struct pci_dev *pdev)
static const struct pci_error_handlers hisi_zip_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_zip_pci_driver = {
@@ -873,7 +863,7 @@ static struct pci_driver hisi_zip_pci_driver = {
.probe = hisi_zip_probe,
.remove = hisi_zip_remove,
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
- hisi_zip_sriov_configure : NULL,
+ hisi_qm_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
};
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
index 200fb3303db0..34bb3063eb70 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
@@ -79,13 +79,13 @@ static int otx_cpt_device_init(struct otx_cpt_device *cpt)
/* Check BIST status */
bist = (u64)otx_cpt_check_bist_status(cpt);
if (bist) {
- dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
+ dev_err(dev, "RAM BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
bist = otx_cpt_check_exe_bist_status(cpt);
if (bist) {
- dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
+ dev_err(dev, "Engine BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
index a6774232e9a3..a9e3de65875a 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
@@ -63,11 +63,11 @@ static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
if (vf_id >= 0)
- pr_debug("MBOX opcode %s received from VF%d raw_data %s",
+ pr_debug("MBOX opcode %s received from VF%d raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), vf_id,
raw_data_str);
else
- pr_debug("MBOX opcode %s received from PF raw_data %s",
+ pr_debug("MBOX opcode %s received from PF raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
}
@@ -140,20 +140,20 @@ static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
struct otx_cpt_ucode *ucode;
if (q >= cpt->max_vfs) {
- dev_err(dev, "Requested queue %d is > than maximum avail %d",
+ dev_err(dev, "Requested queue %d is > than maximum avail %d\n",
q, cpt->max_vfs);
return -EINVAL;
}
if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) {
- dev_err(dev, "Requested group %d is > than maximum avail %d",
+ dev_err(dev, "Requested group %d is > than maximum avail %d\n",
grp, OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
eng_grp = &cpt->eng_grps.grp[grp];
if (!eng_grp->is_enabled) {
- dev_err(dev, "Requested engine group %d is disabled", grp);
+ dev_err(dev, "Requested engine group %d is disabled\n", grp);
return -EINVAL;
}
@@ -212,7 +212,7 @@ static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf)
vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
if ((vftype != OTX_CPT_AE_TYPES) &&
(vftype != OTX_CPT_SE_TYPES)) {
- dev_err(dev, "VF%d binding to eng group %llu failed",
+ dev_err(dev, "VF%d binding to eng group %llu failed\n",
vf, mbx.data);
otx_cptpf_mbox_send_nack(cpt, vf, &mbx);
} else {
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index d04baa319592..fec8f3b9b112 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -62,7 +62,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
int i;
if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
- dev_err(dev, "unsupported number of engines %d on octeontx",
+ dev_err(dev, "unsupported number of engines %d on octeontx\n",
eng_grp->g->engs_num);
return bmap;
}
@@ -78,7 +78,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
}
if (!found)
- dev_err(dev, "No engines reserved for engine group %d",
+ dev_err(dev, "No engines reserved for engine group %d\n",
eng_grp->idx);
return bmap;
}
@@ -306,7 +306,7 @@ static int process_tar_file(struct device *dev,
ucode_size = ntohl(ucode_hdr->code_length) * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
- dev_err(dev, "Ucode %s invalid size", filename);
+ dev_err(dev, "Ucode %s invalid size\n", filename);
return -EINVAL;
}
@@ -379,18 +379,18 @@ static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
{
struct tar_ucode_info_t *curr;
- pr_debug("Tar archive filename %s", tar_filename);
- pr_debug("Tar archive pointer %p, size %ld", tar_arch->fw->data,
+ pr_debug("Tar archive filename %s\n", tar_filename);
+ pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
tar_arch->fw->size);
list_for_each_entry(curr, &tar_arch->ucodes, list) {
- pr_debug("Ucode filename %s", curr->ucode.filename);
- pr_debug("Ucode version string %s", curr->ucode.ver_str);
- pr_debug("Ucode version %d.%d.%d.%d",
+ pr_debug("Ucode filename %s\n", curr->ucode.filename);
+ pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n",
curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
- pr_debug("Ucode type (%d) %s", curr->ucode.type,
+ pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
get_ucode_type_str(curr->ucode.type));
- pr_debug("Ucode size %d", curr->ucode.size);
+ pr_debug("Ucode size %d\n", curr->ucode.size);
pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
}
}
@@ -417,14 +417,14 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
goto release_tar_arch;
if (tar_arch->fw->size < TAR_BLOCK_LEN) {
- dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
tar_size = tar_arch->fw->size;
tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
- dev_err(dev, "Unsupported format of tar archive %s",
+ dev_err(dev, "Unsupported format of tar archive %s\n",
tar_filename);
goto release_tar_arch;
}
@@ -437,7 +437,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
if (tar_offs + cur_size > tar_size ||
tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
- dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
@@ -458,7 +458,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
/* Check for the end of the archive */
if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
- dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
@@ -563,13 +563,13 @@ static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
{
- pr_debug("Ucode info");
- pr_debug("Ucode version string %s", ucode->ver_str);
- pr_debug("Ucode version %d.%d.%d.%d", ucode->ver_num.nn,
+ pr_debug("Ucode info\n");
+ pr_debug("Ucode version string %s\n", ucode->ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
- pr_debug("Ucode type %s", get_ucode_type_str(ucode->type));
- pr_debug("Ucode size %d", ucode->size);
- pr_debug("Ucode virt address %16.16llx", (u64)ucode->align_va);
+ pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
+ pr_debug("Ucode size %d\n", ucode->size);
+ pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
}
@@ -600,19 +600,19 @@ static void print_dbg_info(struct device *dev,
u32 mask[4];
int i, j;
- pr_debug("Engine groups global info");
- pr_debug("max SE %d, max AE %d",
+ pr_debug("Engine groups global info\n");
+ pr_debug("max SE %d, max AE %d\n",
eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
- pr_debug("free SE %d", eng_grps->avail.se_cnt);
- pr_debug("free AE %d", eng_grps->avail.ae_cnt);
+ pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
+ pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
- pr_debug("engine_group%d, state %s", i, grp->is_enabled ?
+ pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
"enabled" : "disabled");
if (grp->is_enabled) {
mirrored_grp = &eng_grps->grp[grp->mirror.idx];
- pr_debug("Ucode0 filename %s, version %s",
+ pr_debug("Ucode0 filename %s, version %s\n",
grp->mirror.is_ena ?
mirrored_grp->ucode[0].filename :
grp->ucode[0].filename,
@@ -626,18 +626,18 @@ static void print_dbg_info(struct device *dev,
if (engs->type) {
print_engs_info(grp, engs_info,
2*OTX_CPT_UCODE_NAME_LENGTH, j);
- pr_debug("Slot%d: %s", j, engs_info);
+ pr_debug("Slot%d: %s\n", j, engs_info);
bitmap_to_arr32(mask, engs->bmap,
eng_grps->engs_num);
- pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
mask[3], mask[2], mask[1], mask[0]);
} else
- pr_debug("Slot%d not used", j);
+ pr_debug("Slot%d not used\n", j);
}
if (grp->is_enabled) {
cpt_print_engines_mask(grp, dev, engs_mask,
OTX_CPT_UCODE_NAME_LENGTH);
- pr_debug("Cmask: %s", engs_mask);
+ pr_debug("Cmask: %s\n", engs_mask);
}
}
}
@@ -766,7 +766,7 @@ static int check_engines_availability(struct device *dev,
if (avail_cnt < req_eng->count) {
dev_err(dev,
- "Error available %s engines %d < than requested %d",
+ "Error available %s engines %d < than requested %d\n",
get_eng_type_str(req_eng->type),
avail_cnt, req_eng->count);
return -EBUSY;
@@ -867,7 +867,7 @@ static int copy_ucode_to_dma_mem(struct device *dev,
OTX_CPT_UCODE_ALIGNMENT,
&ucode->dma, GFP_KERNEL);
if (!ucode->va) {
- dev_err(dev, "Unable to allocate space for microcode");
+ dev_err(dev, "Unable to allocate space for microcode\n");
return -ENOMEM;
}
ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
@@ -905,15 +905,15 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
ucode->size = ntohl(ucode_hdr->code_length) * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
- dev_err(dev, "Ucode %s invalid size", ucode_filename);
+ dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
ret = -EINVAL;
goto release_fw;
}
ret = get_ucode_type(ucode_hdr, &ucode->type);
if (ret) {
- dev_err(dev, "Microcode %s unknown type 0x%x", ucode->filename,
- ucode->type);
+ dev_err(dev, "Microcode %s unknown type 0x%x\n",
+ ucode->filename, ucode->type);
goto release_fw;
}
@@ -1083,7 +1083,7 @@ static int eng_grp_update_masks(struct device *dev,
break;
default:
- dev_err(dev, "Invalid engine type %d", engs->type);
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
@@ -1142,13 +1142,14 @@ static int delete_engine_group(struct device *dev,
return -EINVAL;
if (eng_grp->mirror.ref_count) {
- dev_err(dev, "Can't delete engine_group%d as it is used by:",
+ dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
eng_grp->idx);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
if (eng_grp->g->grp[i].mirror.is_ena &&
eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
- dev_err(dev, "engine_group%d", i);
+ pr_cont(" %d", i);
}
+ pr_cont("\n");
return -EINVAL;
}
@@ -1182,7 +1183,7 @@ static int validate_1_ucode_scenario(struct device *dev,
if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
engs[i].type)) {
dev_err(dev,
- "Microcode %s does not support %s engines",
+ "Microcode %s does not support %s engines\n",
eng_grp->ucode[0].filename,
get_eng_type_str(engs[i].type));
return -EINVAL;
@@ -1220,7 +1221,7 @@ static int create_engine_group(struct device *dev,
/* Validate if requested engine types are supported by this device */
for (i = 0; i < engs_cnt; i++)
if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
- dev_err(dev, "Device does not support %s engines",
+ dev_err(dev, "Device does not support %s engines\n",
get_eng_type_str(engs[i].type));
return -EPERM;
}
@@ -1228,7 +1229,7 @@ static int create_engine_group(struct device *dev,
/* Find engine group which is not used */
eng_grp = find_unused_eng_grp(eng_grps);
if (!eng_grp) {
- dev_err(dev, "Error all engine groups are being used");
+ dev_err(dev, "Error all engine groups are being used\n");
return -ENOSPC;
}
@@ -1298,11 +1299,11 @@ static int create_engine_group(struct device *dev,
eng_grp->is_enabled = true;
if (eng_grp->mirror.is_ena)
dev_info(dev,
- "Engine_group%d: reuse microcode %s from group %d",
+ "Engine_group%d: reuse microcode %s from group %d\n",
eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
mirrored_eng_grp->idx);
else
- dev_info(dev, "Engine_group%d: microcode loaded %s",
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[0].ver_str);
return 0;
@@ -1412,14 +1413,14 @@ static ssize_t ucode_load_store(struct device *dev,
} else {
if (del_grp_idx < 0 ||
del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
- dev_err(dev, "Invalid engine group index %d",
+ dev_err(dev, "Invalid engine group index %d\n",
del_grp_idx);
ret = -EINVAL;
return ret;
}
if (!eng_grps->grp[del_grp_idx].is_enabled) {
- dev_err(dev, "Error engine_group%d is not configured",
+ dev_err(dev, "Error engine_group%d is not configured\n",
del_grp_idx);
ret = -EINVAL;
return ret;
@@ -1568,7 +1569,7 @@ void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
udelay(CSR_DELAY);
reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
if (timeout--) {
- dev_warn(&cpt->pdev->dev, "Cores still busy");
+ dev_warn(&cpt->pdev->dev, "Cores still busy\n");
break;
}
}
@@ -1626,7 +1627,7 @@ int otx_cpt_init_eng_grps(struct pci_dev *pdev,
eng_grps->avail.max_ae_cnt;
if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
dev_err(&pdev->dev,
- "Number of engines %d > than max supported %d",
+ "Number of engines %d > than max supported %d\n",
eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
ret = -EINVAL;
goto err;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 06202bcffb33..60e744f680d3 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -1660,7 +1660,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
case OTX_CPT_SE_TYPES:
count = atomic_read(&se_devices.count);
if (count >= CPT_MAX_VF_NUM) {
- dev_err(&pdev->dev, "No space to add a new device");
+ dev_err(&pdev->dev, "No space to add a new device\n");
ret = -ENOSPC;
goto err;
}
@@ -1687,7 +1687,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
case OTX_CPT_AE_TYPES:
count = atomic_read(&ae_devices.count);
if (count >= CPT_MAX_VF_NUM) {
- dev_err(&pdev->dev, "No space to a add new device");
+ dev_err(&pdev->dev, "No space to a add new device\n");
ret = -ENOSPC;
goto err;
}
@@ -1728,7 +1728,7 @@ void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
}
if (!dev_found) {
- dev_err(&pdev->dev, "%s device not found", __func__);
+ dev_err(&pdev->dev, "%s device not found\n", __func__);
goto exit;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index a91860b5dc77..ce3168327a39 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -584,7 +584,7 @@ static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
cptvf_write_vq_done_ack(cptvf, intr);
wqe = get_cptvf_vq_wqe(cptvf, 0);
if (unlikely(!wqe)) {
- dev_err(&pdev->dev, "No work to schedule for VF (%d)",
+ dev_err(&pdev->dev, "No work to schedule for VF (%d)\n",
cptvf->vfid);
return IRQ_NONE;
}
@@ -602,7 +602,7 @@ static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
GFP_KERNEL)) {
dev_err(&pdev->dev,
- "Allocation failed for affinity_mask for VF %d",
+ "Allocation failed for affinity_mask for VF %d\n",
cptvf->vfid);
return;
}
@@ -691,7 +691,7 @@ static ssize_t vf_engine_group_store(struct device *dev,
return -EINVAL;
if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
- dev_err(dev, "Engine group >= than max available groups %d",
+ dev_err(dev, "Engine group >= than max available groups %d\n",
OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
@@ -837,7 +837,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev,
cptvf_misc_intr_handler, 0, "CPT VF misc intr",
cptvf);
if (err) {
- dev_err(dev, "Failed to request misc irq");
+ dev_err(dev, "Failed to request misc irq\n");
goto free_vectors;
}
@@ -854,7 +854,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev,
cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
if (err) {
- dev_err(dev, "cptvf_sw_init() failed");
+ dev_err(dev, "cptvf_sw_init() failed\n");
goto free_misc_irq;
}
/* Convey VQ LEN to PF */
@@ -946,7 +946,7 @@ static void otx_cptvf_remove(struct pci_dev *pdev)
/* Convey DOWN to PF */
if (otx_cptvf_send_vf_down(cptvf)) {
- dev_err(&pdev->dev, "PF not responding to DOWN msg");
+ dev_err(&pdev->dev, "PF not responding to DOWN msg\n");
} else {
sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
index df839b880354..239195cccf93 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
@@ -314,7 +314,7 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
GFP_ATOMIC;
ret = setup_sgio_list(pdev, &info, req, gfp);
if (unlikely(ret)) {
- dev_err(&pdev->dev, "Setting up SG list failed");
+ dev_err(&pdev->dev, "Setting up SG list failed\n");
goto request_cleanup;
}
cpt_req->dlen = info->dlen;
@@ -410,17 +410,17 @@ int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
if (!otx_cpt_device_ready(cptvf)) {
- dev_err(&pdev->dev, "CPT Device is not ready");
+ dev_err(&pdev->dev, "CPT Device is not ready\n");
return -ENODEV;
}
if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) {
- dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
+ dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request\n",
cptvf->vfid);
return -EINVAL;
} else if ((cptvf->vftype == OTX_CPT_AE_TYPES) &&
(req->ctrl.s.se_req)) {
- dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
+ dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request\n",
cptvf->vfid);
return -EINVAL;
}
@@ -461,7 +461,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
/* check for timeout */
if (time_after_eq(jiffies, cpt_info->time_in +
OTX_CPT_COMMAND_TIMEOUT * HZ))
- dev_warn(&pdev->dev, "Request timed out 0x%p", req);
+ dev_warn(&pdev->dev, "Request timed out 0x%p\n", req);
else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) {
cpt_info->time_in = jiffies;
cpt_info->extra_time++;
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index bd6309e57ab8..da3f0b8814aa 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -805,12 +805,9 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
size_t ds = crypto_shash_digestsize(bctx->shash);
int err, i;
- SHASH_DESC_ON_STACK(shash, bctx->shash);
-
- shash->tfm = bctx->shash;
-
if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
+ err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
+ bctx->ipad);
if (err)
return err;
keylen = ds;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index f5c468f2cc82..6a828bbecea4 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -462,7 +462,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child_shash = ctx->child_shash;
struct crypto_ahash *fallback_tfm;
- SHASH_DESC_ON_STACK(shash, child_shash);
int err, bs, ds;
fallback_tfm = ctx->base.fallback_tfm;
@@ -470,14 +469,12 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
if (err)
return err;
- shash->tfm = child_shash;
-
bs = crypto_shash_blocksize(child_shash);
ds = crypto_shash_digestsize(child_shash);
BUG_ON(ds > N2_HASH_KEY_MAX);
if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen,
- ctx->hash_key);
+ err = crypto_shash_tfm_digest(child_shash, key, keylen,
+ ctx->hash_key);
if (err)
return err;
keylen = ds;
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index 015155da59c2..bc89a20e5d9d 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -15,4 +15,4 @@ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compres
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
nx-compress-pseries-objs := nx-842-pseries.o
-nx-compress-powernv-objs := nx-842-powernv.o
+nx-compress-powernv-objs := nx-common-powernv.o
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index c037a2403b82..13c65deda8e9 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Driver for IBM PowerNV 842 compression accelerator
+ * Driver for IBM PowerNV compression accelerator
*
* Copyright (C) 2015 Dan Streetman, IBM Corp
*/
@@ -20,7 +20,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
+MODULE_DESCRIPTION("H/W Compression driver for IBM PowerNV processors");
MODULE_ALIAS_CRYPTO("842");
MODULE_ALIAS_CRYPTO("842-nx");
@@ -40,9 +40,9 @@ struct nx842_workmem {
char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
} __packed __aligned(WORKMEM_ALIGN);
-struct nx842_coproc {
+struct nx_coproc {
unsigned int chip_id;
- unsigned int ct;
+ unsigned int ct; /* Can be 842 or GZIP high/normal*/
unsigned int ci; /* Coprocessor instance, used with icswx */
struct {
struct vas_window *rxwin;
@@ -58,9 +58,16 @@ struct nx842_coproc {
static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
/* no cpu hotplug on powernv, so this list never changes after init */
-static LIST_HEAD(nx842_coprocs);
+static LIST_HEAD(nx_coprocs);
static unsigned int nx842_ct; /* used in icswx function */
+/*
+ * Using same values as in skiboot or coprocessor type representing
+ * in NX workbook.
+ */
+#define NX_CT_GZIP (2) /* on P9 and later */
+#define NX_CT_842 (3)
+
static int (*nx842_powernv_exec)(const unsigned char *in,
unsigned int inlen, unsigned char *out,
unsigned int *outlenp, void *workmem, int fc);
@@ -666,15 +673,15 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
wmem, CCW_FC_842_DECOMP_CRC);
}
-static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc,
+static inline void nx_add_coprocs_list(struct nx_coproc *coproc,
int chipid)
{
coproc->chip_id = chipid;
INIT_LIST_HEAD(&coproc->list);
- list_add(&coproc->list, &nx842_coprocs);
+ list_add(&coproc->list, &nx_coprocs);
}
-static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
+static struct vas_window *nx_alloc_txwin(struct nx_coproc *coproc)
{
struct vas_window *txwin = NULL;
struct vas_tx_win_attr txattr;
@@ -685,7 +692,6 @@ static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
*/
vas_init_tx_win_attr(&txattr, coproc->ct);
txattr.lpid = 0; /* lpid is 0 for kernel requests */
- txattr.pid = 0; /* pid is 0 for kernel requests */
/*
* Open a VAS send window which is used to send request to NX.
@@ -704,9 +710,9 @@ static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
* cpu_txwin is used in copy/paste operation for each compression /
* decompression request.
*/
-static int nx842_open_percpu_txwins(void)
+static int nx_open_percpu_txwins(void)
{
- struct nx842_coproc *coproc, *n;
+ struct nx_coproc *coproc, *n;
unsigned int i, chip_id;
for_each_possible_cpu(i) {
@@ -714,17 +720,18 @@ static int nx842_open_percpu_txwins(void)
chip_id = cpu_to_chip_id(i);
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+ list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
/*
* Kernel requests use only high priority FIFOs. So
* open send windows for these FIFOs.
+ * GZIP is not supported in kernel right now.
*/
if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
continue;
if (coproc->chip_id == chip_id) {
- txwin = nx842_alloc_txwin(coproc);
+ txwin = nx_alloc_txwin(coproc);
if (IS_ERR(txwin))
return PTR_ERR(txwin);
@@ -743,13 +750,28 @@ static int nx842_open_percpu_txwins(void)
return 0;
}
+static int __init nx_set_ct(struct nx_coproc *coproc, const char *priority,
+ int high, int normal)
+{
+ if (!strcmp(priority, "High"))
+ coproc->ct = high;
+ else if (!strcmp(priority, "Normal"))
+ coproc->ct = normal;
+ else {
+ pr_err("Invalid RxFIFO priority value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
- int vasid, int *ct)
+ int vasid, int type, int *ct)
{
struct vas_window *rxwin = NULL;
struct vas_rx_win_attr rxattr;
- struct nx842_coproc *coproc;
u32 lpid, pid, tid, fifo_size;
+ struct nx_coproc *coproc;
u64 rx_fifo;
const char *priority;
int ret;
@@ -794,15 +816,15 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
if (!coproc)
return -ENOMEM;
- if (!strcmp(priority, "High"))
- coproc->ct = VAS_COP_TYPE_842_HIPRI;
- else if (!strcmp(priority, "Normal"))
- coproc->ct = VAS_COP_TYPE_842;
- else {
- pr_err("Invalid RxFIFO priority value\n");
- ret = -EINVAL;
+ if (type == NX_CT_842)
+ ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_842_HIPRI,
+ VAS_COP_TYPE_842);
+ else if (type == NX_CT_GZIP)
+ ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_GZIP_HIPRI,
+ VAS_COP_TYPE_GZIP);
+
+ if (ret)
goto err_out;
- }
vas_init_rx_win_attr(&rxattr, coproc->ct);
rxattr.rx_fifo = (void *)rx_fifo;
@@ -830,7 +852,7 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
coproc->vas.rxwin = rxwin;
coproc->vas.id = vasid;
- nx842_add_coprocs_list(coproc, chip_id);
+ nx_add_coprocs_list(coproc, chip_id);
/*
* (lpid, pid, tid) combination has to be unique for each
@@ -848,13 +870,47 @@ err_out:
return ret;
}
+static int __init nx_coproc_init(int chip_id, int ct_842, int ct_gzip)
+{
+ int ret = 0;
+
+ if (opal_check_token(OPAL_NX_COPROC_INIT)) {
+ ret = opal_nx_coproc_init(chip_id, ct_842);
+
+ if (!ret)
+ ret = opal_nx_coproc_init(chip_id, ct_gzip);
+
+ if (ret) {
+ ret = opal_error_code(ret);
+ pr_err("Failed to initialize NX for chip(%d): %d\n",
+ chip_id, ret);
+ }
+ } else
+ pr_warn("Firmware doesn't support NX initialization\n");
+
+ return ret;
+}
+
+static int __init find_nx_device_tree(struct device_node *dn, int chip_id,
+ int vasid, int type, char *devname,
+ int *ct)
+{
+ int ret = 0;
+
+ if (of_device_is_compatible(dn, devname)) {
+ ret = vas_cfg_coproc_info(dn, chip_id, vasid, type, ct);
+ if (ret)
+ of_node_put(dn);
+ }
+
+ return ret;
+}
-static int __init nx842_powernv_probe_vas(struct device_node *pn)
+static int __init nx_powernv_probe_vas(struct device_node *pn)
{
- struct device_node *dn;
int chip_id, vasid, ret = 0;
- int nx_fifo_found = 0;
- int uninitialized_var(ct);
+ int ct_842 = 0, ct_gzip = 0;
+ struct device_node *dn;
chip_id = of_get_ibm_chip_id(pn);
if (chip_id < 0) {
@@ -869,40 +925,33 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
}
for_each_child_of_node(pn, dn) {
- if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
- ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct);
- if (ret) {
- of_node_put(dn);
- return ret;
- }
- nx_fifo_found++;
- }
+ ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842,
+ "ibm,p9-nx-842", &ct_842);
+
+ if (!ret)
+ ret = find_nx_device_tree(dn, chip_id, vasid,
+ NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
+
+ if (ret)
+ return ret;
}
- if (!nx_fifo_found) {
- pr_err("NX842 FIFO nodes are missing\n");
+ if (!ct_842 || !ct_gzip) {
+ pr_err("NX FIFO nodes are missing\n");
return -EINVAL;
}
/*
* Initialize NX instance for both high and normal priority FIFOs.
*/
- if (opal_check_token(OPAL_NX_COPROC_INIT)) {
- ret = opal_nx_coproc_init(chip_id, ct);
- if (ret) {
- pr_err("Failed to initialize NX for chip(%d): %d\n",
- chip_id, ret);
- ret = opal_error_code(ret);
- }
- } else
- pr_warn("Firmware doesn't support NX initialization\n");
+ ret = nx_coproc_init(chip_id, ct_842, ct_gzip);
return ret;
}
static int __init nx842_powernv_probe(struct device_node *dn)
{
- struct nx842_coproc *coproc;
+ struct nx_coproc *coproc;
unsigned int ct, ci;
int chip_id;
@@ -922,13 +971,13 @@ static int __init nx842_powernv_probe(struct device_node *dn)
return -EINVAL;
}
- coproc = kmalloc(sizeof(*coproc), GFP_KERNEL);
+ coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
if (!coproc)
return -ENOMEM;
coproc->ct = ct;
coproc->ci = ci;
- nx842_add_coprocs_list(coproc, chip_id);
+ nx_add_coprocs_list(coproc, chip_id);
pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci);
@@ -941,9 +990,9 @@ static int __init nx842_powernv_probe(struct device_node *dn)
return 0;
}
-static void nx842_delete_coprocs(void)
+static void nx_delete_coprocs(void)
{
- struct nx842_coproc *coproc, *n;
+ struct nx_coproc *coproc, *n;
struct vas_window *txwin;
int i;
@@ -955,10 +1004,10 @@ static void nx842_delete_coprocs(void)
if (txwin)
vas_win_close(txwin);
- per_cpu(cpu_txwin, i) = 0;
+ per_cpu(cpu_txwin, i) = NULL;
}
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+ list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
if (coproc->vas.rxwin)
vas_win_close(coproc->vas.rxwin);
@@ -1002,7 +1051,7 @@ static struct crypto_alg nx842_powernv_alg = {
.coa_decompress = nx842_crypto_decompress } }
};
-static __init int nx842_powernv_init(void)
+static __init int nx_compress_powernv_init(void)
{
struct device_node *dn;
int ret;
@@ -1017,15 +1066,15 @@ static __init int nx842_powernv_init(void)
BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
for_each_compatible_node(dn, NULL, "ibm,power9-nx") {
- ret = nx842_powernv_probe_vas(dn);
+ ret = nx_powernv_probe_vas(dn);
if (ret) {
- nx842_delete_coprocs();
+ nx_delete_coprocs();
of_node_put(dn);
return ret;
}
}
- if (list_empty(&nx842_coprocs)) {
+ if (list_empty(&nx_coprocs)) {
for_each_compatible_node(dn, NULL, "ibm,power-nx")
nx842_powernv_probe(dn);
@@ -1034,9 +1083,25 @@ static __init int nx842_powernv_init(void)
nx842_powernv_exec = nx842_exec_icswx;
} else {
- ret = nx842_open_percpu_txwins();
+ /*
+ * Register VAS user space API for NX GZIP so
+ * that user space can use GZIP engine.
+ * Using high FIFO priority for kernel requests and
+ * normal FIFO priority is assigned for userspace.
+ * 842 compression is supported only in kernel.
+ */
+ ret = vas_register_coproc_api(THIS_MODULE, VAS_COP_TYPE_GZIP,
+ "nx-gzip");
+
+ /*
+ * GZIP is not supported in kernel right now.
+ * So open tx windows only for 842.
+ */
+ if (!ret)
+ ret = nx_open_percpu_txwins();
+
if (ret) {
- nx842_delete_coprocs();
+ nx_delete_coprocs();
return ret;
}
@@ -1045,18 +1110,27 @@ static __init int nx842_powernv_init(void)
ret = crypto_register_alg(&nx842_powernv_alg);
if (ret) {
- nx842_delete_coprocs();
+ nx_delete_coprocs();
return ret;
}
return 0;
}
-module_init(nx842_powernv_init);
+module_init(nx_compress_powernv_init);
-static void __exit nx842_powernv_exit(void)
+static void __exit nx_compress_powernv_exit(void)
{
+ /*
+ * GZIP engine is supported only in power9 or later and nx842_ct
+ * is used on power8 (icswx).
+ * VAS API for NX GZIP is registered during init for user space
+ * use. So delete this API use for GZIP engine.
+ */
+ if (!nx842_ct)
+ vas_unregister_coproc_api();
+
crypto_unregister_alg(&nx842_powernv_alg);
- nx842_delete_coprocs();
+ nx_delete_coprocs();
}
-module_exit(nx842_powernv_exit);
+module_exit(nx_compress_powernv_exit);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e4072cd38585..063ad5d03f33 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -33,7 +33,6 @@
#include <linux/of_irq.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>
@@ -1245,16 +1244,6 @@ static int omap_sham_update(struct ahash_request *req)
return omap_sham_enqueue(req, OP_UPDATE);
}
-static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
static int omap_sham_final_shash(struct ahash_request *req)
{
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
@@ -1270,9 +1259,8 @@ static int omap_sham_final_shash(struct ahash_request *req)
!test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
offset = get_block_size(ctx);
- return omap_sham_shash_digest(tctx->fallback, req->base.flags,
- ctx->buffer + offset,
- ctx->bufcnt - offset, req->result);
+ return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
+ ctx->bufcnt - offset, req->result);
}
static int omap_sham_final(struct ahash_request *req)
@@ -1351,9 +1339,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
return err;
if (keylen > bs) {
- err = omap_sham_shash_digest(bctx->shash,
- crypto_shash_get_flags(bctx->shash),
- key, keylen, bctx->ipad);
+ err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
+ bctx->ipad);
if (err)
return err;
keylen = ds;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 2a16800d2579..341433fbcc4a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1521,37 +1521,6 @@ static int s5p_hash_update(struct ahash_request *req)
}
/**
- * s5p_hash_shash_digest() - calculate shash digest
- * @tfm: crypto transformation
- * @flags: tfm flags
- * @data: input data
- * @len: length of data
- * @out: output buffer
- */
-static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
-/**
- * s5p_hash_final_shash() - calculate shash digest
- * @req: AHASH request
- */
-static int s5p_hash_final_shash(struct ahash_request *req)
-{
- struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
-
- return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
- ctx->buffer, ctx->bufcnt, req->result);
-}
-
-/**
* s5p_hash_final() - close up hash and calculate digest
* @req: AHASH request
*
@@ -1582,8 +1551,12 @@ static int s5p_hash_final(struct ahash_request *req)
if (ctx->error)
return -EINVAL; /* uncompleted hash is not needed */
- if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
- return s5p_hash_final_shash(req);
+ if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+
+ return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
+ ctx->bufcnt, req->result);
+ }
return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
}
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 8e92e4ac79f1..3ba41148c2a4 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -28,18 +28,23 @@
/* Registers values */
#define CRC_CR_RESET BIT(0)
-#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
-#define CRC_INIT_DEFAULT 0xFFFFFFFF
+#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
+#define CRC_CR_REV_IN_BYTE BIT(5)
+#define CRC_CR_REV_OUT BIT(7)
+#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
#define CRC_AUTOSUSPEND_DELAY 50
+static unsigned int burst_size;
+module_param(burst_size, uint, 0644);
+MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
+
struct stm32_crc {
struct list_head list;
struct device *dev;
void __iomem *regs;
struct clk *clk;
- u8 pending_data[sizeof(u32)];
- size_t nb_pending_bytes;
+ spinlock_t lock;
};
struct stm32_crc_list {
@@ -59,14 +64,13 @@ struct stm32_crc_ctx {
struct stm32_crc_desc_ctx {
u32 partial; /* crc32c: partial in first 4 bytes of that struct */
- struct stm32_crc *crc;
};
static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
- mctx->key = CRC_INIT_DEFAULT;
+ mctx->key = 0;
mctx->poly = CRC32_POLY_LE;
return 0;
}
@@ -75,7 +79,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
- mctx->key = CRC_INIT_DEFAULT;
+ mctx->key = CRC32C_INIT_DEFAULT;
mctx->poly = CRC32C_POLY_LE;
return 0;
}
@@ -92,87 +96,135 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
return 0;
}
+static struct stm32_crc *stm32_crc_get_next_crc(void)
+{
+ struct stm32_crc *crc;
+
+ spin_lock_bh(&crc_list.lock);
+ crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
+ if (crc)
+ list_move_tail(&crc->list, &crc_list.dev_list);
+ spin_unlock_bh(&crc_list.lock);
+
+ return crc;
+}
+
static int stm32_crc_init(struct shash_desc *desc)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
+ unsigned long flags;
- spin_lock_bh(&crc_list.lock);
- list_for_each_entry(crc, &crc_list.dev_list, list) {
- ctx->crc = crc;
- break;
- }
- spin_unlock_bh(&crc_list.lock);
+ crc = stm32_crc_get_next_crc();
+ if (!crc)
+ return -ENODEV;
+
+ pm_runtime_get_sync(crc->dev);
- pm_runtime_get_sync(ctx->crc->dev);
+ spin_lock_irqsave(&crc->lock, flags);
/* Reset, set key, poly and configure in bit reverse mode */
- writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+ writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
/* Store partial result */
- ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
- ctx->crc->nb_pending_bytes = 0;
+ ctx->partial = readl_relaxed(crc->regs + CRC_DR);
- pm_runtime_mark_last_busy(ctx->crc->dev);
- pm_runtime_put_autosuspend(ctx->crc->dev);
+ spin_unlock_irqrestore(&crc->lock, flags);
+
+ pm_runtime_mark_last_busy(crc->dev);
+ pm_runtime_put_autosuspend(crc->dev);
return 0;
}
-static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
- unsigned int length)
+static int burst_update(struct shash_desc *desc, const u8 *d8,
+ size_t length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc *crc = ctx->crc;
- u32 *d32;
- unsigned int i;
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ struct stm32_crc *crc;
+ unsigned long flags;
+
+ crc = stm32_crc_get_next_crc();
+ if (!crc)
+ return -ENODEV;
pm_runtime_get_sync(crc->dev);
- if (unlikely(crc->nb_pending_bytes)) {
- while (crc->nb_pending_bytes != sizeof(u32) && length) {
- /* Fill in pending data */
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+ spin_lock_irqsave(&crc->lock, flags);
+
+ /*
+ * Restore previously calculated CRC for this context as init value
+ * Restore polynomial configuration
+ * Configure in register for word input data,
+ * Configure out register in reversed bit mode data.
+ */
+ writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+
+ if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
+ /* Configure for byte data */
+ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+ while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
+ writeb_relaxed(*d8++, crc->regs + CRC_DR);
length--;
}
-
- if (crc->nb_pending_bytes == sizeof(u32)) {
- /* Process completed pending data */
- writel_relaxed(*(u32 *)crc->pending_data,
- crc->regs + CRC_DR);
- crc->nb_pending_bytes = 0;
- }
+ /* Configure for word data */
+ writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
}
- d32 = (u32 *)d8;
- for (i = 0; i < length >> 2; i++)
- /* Process 32 bits data */
- writel_relaxed(*(d32++), crc->regs + CRC_DR);
+ for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
+ writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
+
+ if (length) {
+ /* Configure for byte data */
+ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+ while (length--)
+ writeb_relaxed(*d8++, crc->regs + CRC_DR);
+ }
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+ spin_unlock_irqrestore(&crc->lock, flags);
+
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
- /* Check for pending data (non 32 bits) */
- length &= 3;
- if (likely(!length))
- return 0;
+ return 0;
+}
- if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
- /* Shall not happen */
- dev_err(crc->dev, "Pending data overflow\n");
- return -EINVAL;
- }
+static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
+ unsigned int length)
+{
+ const unsigned int burst_sz = burst_size;
+ unsigned int rem_sz;
+ const u8 *cur;
+ size_t size;
+ int ret;
- d8 = (const u8 *)d32;
- for (i = 0; i < length; i++)
- /* Store pending data */
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+ if (!burst_sz)
+ return burst_update(desc, d8, length);
+
+ /* Digest first bytes not 32bit aligned at first pass in the loop */
+ size = min(length,
+ burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8,
+ sizeof(u32)));
+ for (rem_sz = length, cur = d8; rem_sz;
+ rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
+ ret = burst_update(desc, cur, size);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -202,6 +254,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
}
+static unsigned int refcnt;
+static DEFINE_MUTEX(refcnt_lock);
static struct shash_alg algs[] = {
/* CRC-32 */
{
@@ -284,20 +338,29 @@ static int stm32_crc_probe(struct platform_device *pdev)
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
+ pm_runtime_irq_safe(dev);
pm_runtime_enable(dev);
+ spin_lock_init(&crc->lock);
+
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);
list_add(&crc->list, &crc_list.dev_list);
spin_unlock(&crc_list.lock);
- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
- if (ret) {
- dev_err(dev, "Failed to register\n");
- clk_disable_unprepare(crc->clk);
- return ret;
+ mutex_lock(&refcnt_lock);
+ if (!refcnt) {
+ ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+ if (ret) {
+ mutex_unlock(&refcnt_lock);
+ dev_err(dev, "Failed to register\n");
+ clk_disable_unprepare(crc->clk);
+ return ret;
+ }
}
+ refcnt++;
+ mutex_unlock(&refcnt_lock);
dev_info(dev, "Initialized\n");
@@ -318,7 +381,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ mutex_lock(&refcnt_lock);
+ if (!--refcnt)
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ mutex_unlock(&refcnt_lock);
pm_runtime_disable(crc->dev);
pm_runtime_put_noidle(crc->dev);
@@ -328,34 +394,60 @@ static int stm32_crc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int stm32_crc_runtime_suspend(struct device *dev)
+static int __maybe_unused stm32_crc_suspend(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
- clk_disable_unprepare(crc->clk);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ clk_unprepare(crc->clk);
return 0;
}
-static int stm32_crc_runtime_resume(struct device *dev)
+static int __maybe_unused stm32_crc_resume(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
int ret;
- ret = clk_prepare_enable(crc->clk);
+ ret = clk_prepare(crc->clk);
if (ret) {
- dev_err(crc->dev, "Failed to prepare_enable clock\n");
+ dev_err(crc->dev, "Failed to prepare clock\n");
+ return ret;
+ }
+
+ return pm_runtime_force_resume(dev);
+}
+
+static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+
+ clk_disable(crc->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_crc_runtime_resume(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(crc->clk);
+ if (ret) {
+ dev_err(crc->dev, "Failed to enable clock\n");
return ret;
}
return 0;
}
-#endif
static const struct dev_pm_ops stm32_crc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend,
+ stm32_crc_resume)
SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
stm32_crc_runtime_resume, NULL)
};
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 167b80eec437..03c5e6683805 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -507,6 +507,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
{
struct dma_slave_config dma_conf;
+ struct dma_chan *chan;
int err;
memset(&dma_conf, 0, sizeof(dma_conf));
@@ -518,11 +519,11 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.dst_maxburst = hdev->dma_maxburst;
dma_conf.device_fc = false;
- hdev->dma_lch = dma_request_chan(hdev->dev, "in");
- if (IS_ERR(hdev->dma_lch)) {
- dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return PTR_ERR(hdev->dma_lch);
- }
+ chan = dma_request_chan(hdev->dev, "in");
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ hdev->dma_lch = chan;
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
if (err) {
@@ -1463,8 +1464,11 @@ static int stm32_hash_probe(struct platform_device *pdev)
hdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hdev->clk)) {
- dev_err(dev, "failed to get clock for hash (%lu)\n",
- PTR_ERR(hdev->clk));
+ if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get clock for hash (%lu)\n",
+ PTR_ERR(hdev->clk));
+ }
+
return PTR_ERR(hdev->clk);
}
@@ -1482,7 +1486,12 @@ static int stm32_hash_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (!IS_ERR(hdev->rst)) {
+ if (IS_ERR(hdev->rst)) {
+ if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_reset;
+ }
+ } else {
reset_control_assert(hdev->rst);
udelay(2);
reset_control_deassert(hdev->rst);
@@ -1493,8 +1502,15 @@ static int stm32_hash_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hdev);
ret = stm32_hash_dma_init(hdev);
- if (ret)
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOENT:
dev_dbg(dev, "DMA mode not available\n");
+ break;
+ default:
+ goto err_dma;
+ }
spin_lock(&stm32_hash.lock);
list_add_tail(&hdev->list, &stm32_hash.dev_list);
@@ -1532,10 +1548,10 @@ err_engine:
spin_lock(&stm32_hash.lock);
list_del(&hdev->list);
spin_unlock(&stm32_hash.lock);
-
+err_dma:
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
-
+err_reset:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 09f7f468eef8..cd11558893cd 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -46,7 +46,6 @@ struct zynqmp_aead_drv_ctx {
} alg;
struct device *dev;
struct crypto_engine *engine;
- const struct zynqmp_eemi_ops *eemi_ops;
};
struct zynqmp_aead_hw_req {
@@ -80,21 +79,15 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
struct device *dev = tfm_ctx->dev;
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct zynqmp_aead_drv_ctx *drv_ctx;
struct zynqmp_aead_hw_req *hwreq;
dma_addr_t dma_addr_data, dma_addr_hw_req;
unsigned int data_size;
unsigned int status;
+ int ret;
size_t dma_size;
char *kbuf;
int err;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
-
- if (!drv_ctx->eemi_ops->aes)
- return -ENOTSUPP;
-
if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
+ GCM_AES_IV_SIZE;
@@ -136,9 +129,12 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
hwreq->key = 0;
}
- drv_ctx->eemi_ops->aes(dma_addr_hw_req, &status);
+ ret = zynqmp_pm_aes_engine(dma_addr_hw_req, &status);
- if (status) {
+ if (ret) {
+ dev_err(dev, "ERROR: AES PM API failed\n");
+ err = ret;
+ } else if (status) {
switch (status) {
case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
dev_err(dev, "ERROR: Gcm Tag mismatch\n");
@@ -388,12 +384,6 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
else
return -ENODEV;
- aes_drv_ctx.eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(aes_drv_ctx.eemi_ops)) {
- dev_err(dev, "Failed to get ZynqMP EEMI interface\n");
- return PTR_ERR(aes_drv_ctx.eemi_ops);
- }
-
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
if (err < 0) {
dev_err(dev, "No usable DMA configuration\n");
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 3107ce80e809..16850d5388ab 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -44,6 +44,7 @@ struct dax_region {
* @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
* @dax_mem_res: physical address range of hotadded DAX memory
+ * @dax_mem_name: name for hotadded DAX memory via add_memory_driver_managed()
*/
struct dev_dax {
struct dax_region *region;
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 1af823b2fe6b..4c0af2eb7e19 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -377,6 +377,7 @@ static int dax_open(struct inode *inode, struct file *filp)
inode->i_mapping->a_ops = &dev_dax_aops;
filp->f_mapping = inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
+ filp->f_sb_err = file_sample_sb_err(filp);
filp->private_data = dev_dax;
inode->i_flags = S_DAX;
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 3d0a7e702c94..275aa5f87399 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -14,6 +14,11 @@
#include "dax-private.h"
#include "bus.h"
+/* Memory resource name used for add_memory_driver_managed(). */
+static const char *kmem_name;
+/* Set if any memory will remain added when the driver will be unloaded. */
+static bool any_hotremove_failed;
+
int dev_dax_kmem_probe(struct device *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
@@ -22,6 +27,7 @@ int dev_dax_kmem_probe(struct device *dev)
resource_size_t kmem_size;
resource_size_t kmem_end;
struct resource *new_res;
+ const char *new_res_name;
int numa_node;
int rc;
@@ -48,11 +54,16 @@ int dev_dax_kmem_probe(struct device *dev)
kmem_size &= ~(memory_block_size_bytes() - 1);
kmem_end = kmem_start + kmem_size;
- /* Region is permanently reserved. Hot-remove not yet implemented. */
- new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev));
+ new_res_name = kstrdup(dev_name(dev), GFP_KERNEL);
+ if (!new_res_name)
+ return -ENOMEM;
+
+ /* Region is permanently reserved if hotremove fails. */
+ new_res = request_mem_region(kmem_start, kmem_size, new_res_name);
if (!new_res) {
dev_warn(dev, "could not reserve region [%pa-%pa]\n",
&kmem_start, &kmem_end);
+ kfree(new_res_name);
return -EBUSY;
}
@@ -63,12 +74,17 @@ int dev_dax_kmem_probe(struct device *dev)
* unknown to us that will break add_memory() below.
*/
new_res->flags = IORESOURCE_SYSTEM_RAM;
- new_res->name = dev_name(dev);
- rc = add_memory(numa_node, new_res->start, resource_size(new_res));
+ /*
+ * Ensure that future kexec'd kernels will not treat this as RAM
+ * automatically.
+ */
+ rc = add_memory_driver_managed(numa_node, new_res->start,
+ resource_size(new_res), kmem_name);
if (rc) {
release_resource(new_res);
kfree(new_res);
+ kfree(new_res_name);
return rc;
}
dev_dax->dax_kmem_res = new_res;
@@ -83,6 +99,7 @@ static int dev_dax_kmem_remove(struct device *dev)
struct resource *res = dev_dax->dax_kmem_res;
resource_size_t kmem_start = res->start;
resource_size_t kmem_size = resource_size(res);
+ const char *res_name = res->name;
int rc;
/*
@@ -93,6 +110,7 @@ static int dev_dax_kmem_remove(struct device *dev)
*/
rc = remove_memory(dev_dax->target_node, kmem_start, kmem_size);
if (rc) {
+ any_hotremove_failed = true;
dev_err(dev,
"DAX region %pR cannot be hotremoved until the next reboot\n",
res);
@@ -102,6 +120,7 @@ static int dev_dax_kmem_remove(struct device *dev)
/* Release and free dax resources */
release_resource(res);
kfree(res);
+ kfree(res_name);
dev_dax->dax_kmem_res = NULL;
return 0;
@@ -116,6 +135,7 @@ static int dev_dax_kmem_remove(struct device *dev)
* permanently pinned as reserved by the unreleased
* request_mem_region().
*/
+ any_hotremove_failed = true;
return 0;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
@@ -129,12 +149,24 @@ static struct dax_device_driver device_dax_kmem_driver = {
static int __init dax_kmem_init(void)
{
- return dax_driver_register(&device_dax_kmem_driver);
+ int rc;
+
+ /* Resource name is permanently allocated if any hotremove fails. */
+ kmem_name = kstrdup_const("System RAM (kmem)", GFP_KERNEL);
+ if (!kmem_name)
+ return -ENOMEM;
+
+ rc = dax_driver_register(&device_dax_kmem_driver);
+ if (rc)
+ kfree_const(kmem_name);
+ return rc;
}
static void __exit dax_kmem_exit(void)
{
dax_driver_unregister(&device_dax_kmem_driver);
+ if (!any_hotremove_failed)
+ kfree_const(kmem_name);
}
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index eb25627b059d..21ebd0af268b 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -24,9 +24,7 @@ int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
cd = device_create(dca_class, dca->cd, MKDEV(0, slot + 1), NULL,
"requester%d", req_count++);
- if (IS_ERR(cd))
- return PTR_ERR(cd);
- return 0;
+ return PTR_ERR_OR_ZERO(cd);
}
void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 0b1df12e0f21..37dc40d1fcfb 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -91,6 +91,14 @@ config ARM_EXYNOS_BUS_DEVFREQ
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_IMX_BUS_DEVFREQ
+ tristate "i.MX Generic Bus DEVFREQ Driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds the generic DEVFREQ driver for i.MX interconnects. It
+ allows adjusting NIC/NOC frequency.
+
config ARM_IMX8M_DDRC_DEVFREQ
tristate "i.MX8M DDRC DEVFREQ Driver"
depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 3eb4d5e6635c..3ca1ad0ecb97 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o
obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 6fecd11dafdd..52b9c3e141f3 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -60,12 +60,12 @@ static struct devfreq *find_device_devfreq(struct device *dev)
{
struct devfreq *tmp_devfreq;
+ lockdep_assert_held(&devfreq_list_lock);
+
if (IS_ERR_OR_NULL(dev)) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- WARN(!mutex_is_locked(&devfreq_list_lock),
- "devfreq_list_lock must be locked.");
list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
if (tmp_devfreq->dev.parent == dev)
@@ -258,12 +258,12 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
{
struct devfreq_governor *tmp_governor;
+ lockdep_assert_held(&devfreq_list_lock);
+
if (IS_ERR_OR_NULL(name)) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- WARN(!mutex_is_locked(&devfreq_list_lock),
- "devfreq_list_lock must be locked.");
list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
@@ -289,12 +289,12 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
struct devfreq_governor *governor;
int err = 0;
+ lockdep_assert_held(&devfreq_list_lock);
+
if (IS_ERR_OR_NULL(name)) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- WARN(!mutex_is_locked(&devfreq_list_lock),
- "devfreq_list_lock must be locked.");
governor = find_devfreq_governor(name);
if (IS_ERR(governor)) {
@@ -392,10 +392,7 @@ int update_devfreq(struct devfreq *devfreq)
int err = 0;
u32 flags = 0;
- if (!mutex_is_locked(&devfreq->lock)) {
- WARN(true, "devfreq->lock must be locked by the caller.\n");
- return -EINVAL;
- }
+ lockdep_assert_held(&devfreq->lock);
if (!devfreq->governor)
return -EINVAL;
@@ -768,7 +765,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->dev.release = devfreq_dev_release;
INIT_LIST_HEAD(&devfreq->node);
devfreq->profile = profile;
- strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
+ strscpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
devfreq->last_status.current_frequency = profile->initial_freq;
devfreq->data = data;
diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c
new file mode 100644
index 000000000000..4f38455ad742
--- /dev/null
+++ b/drivers/devfreq/imx-bus.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct imx_bus {
+ struct devfreq_dev_profile profile;
+ struct devfreq *devfreq;
+ struct clk *clk;
+ struct platform_device *icc_pdev;
+};
+
+static int imx_bus_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ struct dev_pm_opp *new_opp;
+ int ret;
+
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ ret = PTR_ERR(new_opp);
+ dev_err(dev, "failed to get recommended opp: %d\n", ret);
+ return ret;
+ }
+ dev_pm_opp_put(new_opp);
+
+ return dev_pm_opp_set_rate(dev, *freq);
+}
+
+static int imx_bus_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct imx_bus *priv = dev_get_drvdata(dev);
+
+ *freq = clk_get_rate(priv->clk);
+
+ return 0;
+}
+
+static int imx_bus_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct imx_bus *priv = dev_get_drvdata(dev);
+
+ stat->busy_time = 0;
+ stat->total_time = 0;
+ stat->current_frequency = clk_get_rate(priv->clk);
+
+ return 0;
+}
+
+static void imx_bus_exit(struct device *dev)
+{
+ struct imx_bus *priv = dev_get_drvdata(dev);
+
+ dev_pm_opp_of_remove_table(dev);
+ platform_device_unregister(priv->icc_pdev);
+}
+
+/* imx_bus_init_icc() - register matching icc provider if required */
+static int imx_bus_init_icc(struct device *dev)
+{
+ struct imx_bus *priv = dev_get_drvdata(dev);
+ const char *icc_driver_name;
+
+ if (!of_get_property(dev->of_node, "#interconnect-cells", 0))
+ return 0;
+ if (!IS_ENABLED(CONFIG_INTERCONNECT_IMX)) {
+ dev_warn(dev, "imx interconnect drivers disabled\n");
+ return 0;
+ }
+
+ icc_driver_name = of_device_get_match_data(dev);
+ if (!icc_driver_name) {
+ dev_err(dev, "unknown interconnect driver\n");
+ return 0;
+ }
+
+ priv->icc_pdev = platform_device_register_data(
+ dev, icc_driver_name, -1, NULL, 0);
+ if (IS_ERR(priv->icc_pdev)) {
+ dev_err(dev, "failed to register icc provider %s: %ld\n",
+ icc_driver_name, PTR_ERR(priv->icc_pdev));
+ return PTR_ERR(priv->icc_pdev);
+ }
+
+ return 0;
+}
+
+static int imx_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_bus *priv;
+ const char *gov = DEVFREQ_GOV_USERSPACE;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * Fetch the clock to adjust but don't explicitly enable.
+ *
+ * For imx bus clock clk_set_rate is safe no matter if the clock is on
+ * or off and some peripheral side-buses might be off unless enabled by
+ * drivers for devices on those specific buses.
+ *
+ * Rate adjustment on a disabled bus clock just takes effect later.
+ */
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(dev, "failed to fetch clk: %d\n", ret);
+ return ret;
+ }
+ platform_set_drvdata(pdev, priv);
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get OPP table\n");
+ return ret;
+ }
+
+ priv->profile.polling_ms = 1000;
+ priv->profile.target = imx_bus_target;
+ priv->profile.get_dev_status = imx_bus_get_dev_status;
+ priv->profile.exit = imx_bus_exit;
+ priv->profile.get_cur_freq = imx_bus_get_cur_freq;
+ priv->profile.initial_freq = clk_get_rate(priv->clk);
+
+ priv->devfreq = devm_devfreq_add_device(dev, &priv->profile,
+ gov, NULL);
+ if (IS_ERR(priv->devfreq)) {
+ ret = PTR_ERR(priv->devfreq);
+ dev_err(dev, "failed to add devfreq device: %d\n", ret);
+ goto err;
+ }
+
+ ret = imx_bus_init_icc(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_pm_opp_of_remove_table(dev);
+ return ret;
+}
+
+static const struct of_device_id imx_bus_of_match[] = {
+ { .compatible = "fsl,imx8mq-noc", .data = "imx8mq-interconnect", },
+ { .compatible = "fsl,imx8mm-noc", .data = "imx8mm-interconnect", },
+ { .compatible = "fsl,imx8mn-noc", .data = "imx8mn-interconnect", },
+ { .compatible = "fsl,imx8m-noc", },
+ { .compatible = "fsl,imx8m-nic", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx_bus_of_match);
+
+static struct platform_driver imx_bus_platdrv = {
+ .probe = imx_bus_probe,
+ .driver = {
+ .name = "imx-bus-devfreq",
+ .of_match_table = of_match_ptr(imx_bus_of_match),
+ },
+};
+module_platform_driver(imx_bus_platdrv);
+
+MODULE_DESCRIPTION("Generic i.MX bus frequency scaling driver");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 28b2c7ca416e..e94a27804c20 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -420,7 +420,7 @@ tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
- if (dev_freq >= static_cpu_emc_freq)
+ if (dev_freq + actmon_dev->boost_freq >= static_cpu_emc_freq)
return 0;
return static_cpu_emc_freq;
@@ -807,10 +807,9 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
}
err = platform_get_irq(pdev, 0);
- if (err < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
+ if (err < 0)
return err;
- }
+
tegra->irq = err;
irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 9c190026bfab..995e05f609ff 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_UDMABUF) += udmabuf.o
dmabuf_selftests-y := \
selftest.o \
- st-dma-fence.o
+ st-dma-fence.o \
+ st-dma-fence-chain.o
obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 07df88f2e305..01ce125f8e8d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -691,6 +691,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
attach->dev = dev;
attach->dmabuf = dmabuf;
+ if (importer_ops)
+ attach->peer2peer = importer_ops->allow_peer2peer;
attach->importer_ops = importer_ops;
attach->importer_priv = importer_priv;
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 44a741677d25..c435bbba851c 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -62,7 +62,8 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
replacement = NULL;
}
- tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
+ tmp = cmpxchg((struct dma_fence __force **)&chain->prev,
+ prev, replacement);
if (tmp == prev)
dma_fence_put(tmp);
else
@@ -98,6 +99,12 @@ int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
return -EINVAL;
dma_fence_chain_for_each(*pfence, &chain->base) {
+ if ((*pfence)->seqno < seqno) { /* already signaled */
+ dma_fence_put(*pfence);
+ *pfence = NULL;
+ break;
+ }
+
if ((*pfence)->context != chain->base.context ||
to_dma_fence_chain(*pfence)->prev_seqno < seqno)
break;
@@ -221,6 +228,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops);
* @chain: the chain node to initialize
* @prev: the previous fence
* @fence: the current fence
+ * @seqno: the sequence number (syncpt) of the fence within the chain
*
* Initialize a new chain node and either start a new chain or add the node to
* the existing chain of the previous fence.
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 052a41e2451c..90edf2b281b0 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(dma_fence_get_stub);
u64 dma_fence_context_alloc(unsigned num)
{
WARN_ON(!num);
- return atomic64_add_return(num, &dma_fence_context_counter) - num;
+ return atomic64_fetch_add(num, &dma_fence_context_counter);
}
EXPORT_SYMBOL(dma_fence_context_alloc);
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index 5320386f02e5..55918ef9adab 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -11,3 +11,4 @@
*/
selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
selftest(dma_fence, dma_fence)
+selftest(dma_fence_chain, dma_fence_chain)
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
new file mode 100644
index 000000000000..5d45ba7ba3cd
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+static struct kmem_cache *slab_fences;
+
+static inline struct mock_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+ return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+ kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+ .release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+ return &f->base;
+}
+
+static inline struct mock_chain {
+ struct dma_fence_chain base;
+} *to_mock_chain(struct dma_fence *f) {
+ return container_of(f, struct mock_chain, base.base);
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+ struct dma_fence *fence,
+ u64 seqno)
+{
+ struct mock_chain *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ dma_fence_chain_init(&f->base,
+ dma_fence_get(prev),
+ dma_fence_get(fence),
+ seqno);
+
+ return &f->base.base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f, *chain;
+ int err = 0;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, f, 1);
+ if (!chain)
+ err = -ENOMEM;
+
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+ dma_fence_put(chain);
+
+ return err;
+}
+
+struct fence_chains {
+ unsigned int chain_length;
+ struct dma_fence **fences;
+ struct dma_fence **chains;
+
+ struct dma_fence *tail;
+};
+
+static uint64_t seqno_inc(unsigned int i)
+{
+ return i + 1;
+}
+
+static int fence_chains_init(struct fence_chains *fc, unsigned int count,
+ uint64_t (*seqno_fn)(unsigned int))
+{
+ unsigned int i;
+ int err = 0;
+
+ fc->chains = kvmalloc_array(count, sizeof(*fc->chains),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!fc->chains)
+ return -ENOMEM;
+
+ fc->fences = kvmalloc_array(count, sizeof(*fc->fences),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!fc->fences) {
+ err = -ENOMEM;
+ goto err_chains;
+ }
+
+ fc->tail = NULL;
+ for (i = 0; i < count; i++) {
+ fc->fences[i] = mock_fence();
+ if (!fc->fences[i]) {
+ err = -ENOMEM;
+ goto unwind;
+ }
+
+ fc->chains[i] = mock_chain(fc->tail,
+ fc->fences[i],
+ seqno_fn(i));
+ if (!fc->chains[i]) {
+ err = -ENOMEM;
+ goto unwind;
+ }
+
+ fc->tail = fc->chains[i];
+ }
+
+ fc->chain_length = i;
+ return 0;
+
+unwind:
+ for (i = 0; i < count; i++) {
+ dma_fence_put(fc->fences[i]);
+ dma_fence_put(fc->chains[i]);
+ }
+ kvfree(fc->fences);
+err_chains:
+ kvfree(fc->chains);
+ return err;
+}
+
+static void fence_chains_fini(struct fence_chains *fc)
+{
+ unsigned int i;
+
+ for (i = 0; i < fc->chain_length; i++) {
+ dma_fence_signal(fc->fences[i]);
+ dma_fence_put(fc->fences[i]);
+ }
+ kvfree(fc->fences);
+
+ for (i = 0; i < fc->chain_length; i++)
+ dma_fence_put(fc->chains[i]);
+ kvfree(fc->chains);
+}
+
+static int find_seqno(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc);
+ if (err)
+ return err;
+
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 0);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno(0)!\n", err);
+ goto err;
+ }
+
+ for (i = 0; i < fc.chain_length; i++) {
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, i + 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno(%d:%d)!\n",
+ err, fc.chain_length + 1, i + 1);
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+ fc.chain_length + 1, i + 1);
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, i + 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Error reported for finding self\n");
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence reported by find self\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, i + 2);
+ dma_fence_put(fence);
+ if (!err) {
+ pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n",
+ i + 1, i + 2);
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, i);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Error reported for previous fence!\n");
+ goto err;
+ }
+ if (i > 0 && fence != fc.chains[i - 1]) {
+ pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+ i + 1, i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int find_signaled(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+
+ err = fence_chains_init(&fc, 2, seqno_inc);
+ if (err)
+ return err;
+
+ dma_fence_signal(fc.fences[0]);
+
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno()!\n", err);
+ goto err;
+ }
+
+ if (fence && fence != fc.chains[0]) {
+ pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:1\n",
+ fence->seqno);
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, 1);
+ dma_fence_put(fence);
+ if (err)
+ pr_err("Reported %d for finding self!\n", err);
+
+ err = -EINVAL;
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int find_out_of_order(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+
+ err = fence_chains_init(&fc, 3, seqno_inc);
+ if (err)
+ return err;
+
+ dma_fence_signal(fc.fences[1]);
+
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 2);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno()!\n", err);
+ goto err;
+ }
+
+ if (fence && fence != fc.chains[1]) {
+ pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:2\n",
+ fence->seqno);
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, 2);
+ dma_fence_put(fence);
+ if (err)
+ pr_err("Reported %d for finding self!\n", err);
+
+ err = -EINVAL;
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static uint64_t seqno_inc2(unsigned int i)
+{
+ return 2 * i + 2;
+}
+
+static int find_gap(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc2);
+ if (err)
+ return err;
+
+ for (i = 0; i < fc.chain_length; i++) {
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 2 * i + 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno(%d:%d)!\n",
+ err, fc.chain_length + 1, 2 * i + 1);
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence.seqno:%lld reported by find_seqno(%d:%d)\n",
+ fence->seqno,
+ fc.chain_length + 1,
+ 2 * i + 1);
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, 2 * i + 2);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Error reported for finding self\n");
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence reported by find self\n");
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+struct find_race {
+ struct fence_chains fc;
+ atomic_t children;
+};
+
+static int __find_race(void *arg)
+{
+ struct find_race *data = arg;
+ int err = 0;
+
+ while (!kthread_should_stop()) {
+ struct dma_fence *fence = dma_fence_get(data->fc.tail);
+ int seqno;
+
+ seqno = prandom_u32_max(data->fc.chain_length) + 1;
+
+ err = dma_fence_chain_find_seqno(&fence, seqno);
+ if (err) {
+ pr_err("Failed to find fence seqno:%d\n",
+ seqno);
+ dma_fence_put(fence);
+ break;
+ }
+ if (!fence)
+ goto signal;
+
+ err = dma_fence_chain_find_seqno(&fence, seqno);
+ if (err) {
+ pr_err("Reported an invalid fence for find-self:%d\n",
+ seqno);
+ dma_fence_put(fence);
+ break;
+ }
+
+ if (fence->seqno < seqno) {
+ pr_err("Reported an earlier fence.seqno:%lld for seqno:%d\n",
+ fence->seqno, seqno);
+ err = -EINVAL;
+ dma_fence_put(fence);
+ break;
+ }
+
+ dma_fence_put(fence);
+
+signal:
+ seqno = prandom_u32_max(data->fc.chain_length - 1);
+ dma_fence_signal(data->fc.fences[seqno]);
+ cond_resched();
+ }
+
+ if (atomic_dec_and_test(&data->children))
+ wake_up_var(&data->children);
+ return err;
+}
+
+static int find_race(void *arg)
+{
+ struct find_race data;
+ int ncpus = num_online_cpus();
+ struct task_struct **threads;
+ unsigned long count;
+ int err;
+ int i;
+
+ err = fence_chains_init(&data.fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+ if (!threads) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ atomic_set(&data.children, 0);
+ for (i = 0; i < ncpus; i++) {
+ threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i);
+ if (IS_ERR(threads[i])) {
+ ncpus = i;
+ break;
+ }
+ atomic_inc(&data.children);
+ get_task_struct(threads[i]);
+ }
+
+ wait_var_event_timeout(&data.children,
+ !atomic_read(&data.children),
+ 5 * HZ);
+
+ for (i = 0; i < ncpus; i++) {
+ int ret;
+
+ ret = kthread_stop(threads[i]);
+ if (ret && !err)
+ err = ret;
+ put_task_struct(threads[i]);
+ }
+ kfree(threads);
+
+ count = 0;
+ for (i = 0; i < data.fc.chain_length; i++)
+ if (dma_fence_is_signaled(data.fc.fences[i]))
+ count++;
+ pr_info("Completed %lu cycles\n", count);
+
+err:
+ fence_chains_fini(&data.fc);
+ return err;
+}
+
+static int signal_forward(void *arg)
+{
+ struct fence_chains fc;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc);
+ if (err)
+ return err;
+
+ for (i = 0; i < fc.chain_length; i++) {
+ dma_fence_signal(fc.fences[i]);
+
+ if (!dma_fence_is_signaled(fc.chains[i])) {
+ pr_err("chain[%d] not signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (i + 1 < fc.chain_length &&
+ dma_fence_is_signaled(fc.chains[i + 1])) {
+ pr_err("chain[%d] is signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int signal_backward(void *arg)
+{
+ struct fence_chains fc;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc);
+ if (err)
+ return err;
+
+ for (i = fc.chain_length; i--; ) {
+ dma_fence_signal(fc.fences[i]);
+
+ if (i > 0 && dma_fence_is_signaled(fc.chains[i])) {
+ pr_err("chain[%d] is signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < fc.chain_length; i++) {
+ if (!dma_fence_is_signaled(fc.chains[i])) {
+ pr_err("chain[%d] was not signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int __wait_fence_chains(void *arg)
+{
+ struct fence_chains *fc = arg;
+
+ if (dma_fence_wait(fc->tail, false))
+ return -EIO;
+
+ return 0;
+}
+
+static int wait_forward(void *arg)
+{
+ struct fence_chains fc;
+ struct task_struct *tsk;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto err;
+ }
+ get_task_struct(tsk);
+ yield_to(tsk, true);
+
+ for (i = 0; i < fc.chain_length; i++)
+ dma_fence_signal(fc.fences[i]);
+
+ err = kthread_stop(tsk);
+ put_task_struct(tsk);
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int wait_backward(void *arg)
+{
+ struct fence_chains fc;
+ struct task_struct *tsk;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto err;
+ }
+ get_task_struct(tsk);
+ yield_to(tsk, true);
+
+ for (i = fc.chain_length; i--; )
+ dma_fence_signal(fc.fences[i]);
+
+ err = kthread_stop(tsk);
+ put_task_struct(tsk);
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static void randomise_fences(struct fence_chains *fc)
+{
+ unsigned int count = fc->chain_length;
+
+ /* Fisher-Yates shuffle courtesy of Knuth */
+ while (--count) {
+ unsigned int swp;
+
+ swp = prandom_u32_max(count + 1);
+ if (swp == count)
+ continue;
+
+ swap(fc->fences[count], fc->fences[swp]);
+ }
+}
+
+static int wait_random(void *arg)
+{
+ struct fence_chains fc;
+ struct task_struct *tsk;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ randomise_fences(&fc);
+
+ tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto err;
+ }
+ get_task_struct(tsk);
+ yield_to(tsk, true);
+
+ for (i = 0; i < fc.chain_length; i++)
+ dma_fence_signal(fc.fences[i]);
+
+ err = kthread_stop(tsk);
+ put_task_struct(tsk);
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+int dma_fence_chain(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(find_seqno),
+ SUBTEST(find_signaled),
+ SUBTEST(find_out_of_order),
+ SUBTEST(find_gap),
+ SUBTEST(find_race),
+ SUBTEST(signal_forward),
+ SUBTEST(signal_backward),
+ SUBTEST(wait_forward),
+ SUBTEST(wait_backward),
+ SUBTEST(wait_random),
+ };
+ int ret;
+
+ pr_info("sizeof(dma_fence_chain)=%zu\n",
+ sizeof(struct dma_fence_chain));
+
+ slab_fences = KMEM_CACHE(mock_fence,
+ SLAB_TYPESAFE_BY_RCU |
+ SLAB_HWCACHE_ALIGN);
+ if (!slab_fences)
+ return -ENOMEM;
+
+ ret = subtests(tests, NULL);
+
+ kmem_cache_destroy(slab_fences);
+ return ret;
+}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 364dd34799d4..0425984db118 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -1166,10 +1166,11 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
mutex_unlock(&info->lock);
return ret;
} else if (dmatest_run) {
- if (is_threaded_test_pending(info))
- start_threaded_tests(info);
- else
- pr_info("Could not start test, no channels configured\n");
+ if (!is_threaded_test_pending(info)) {
+ pr_info("No channels configured, continue with any\n");
+ add_threaded_test(info);
+ }
+ start_threaded_tests(info);
} else {
stop_threaded_test(info);
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index f6f49f0f6fae..8d79a8787104 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -62,6 +62,13 @@ int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
perm.ignore = 0;
iowrite32(perm.bits, idxd->reg_base + offset);
+ /*
+ * A readback from the device ensures that any previously generated
+ * completion record writes are visible to software based on PCI
+ * ordering rules.
+ */
+ perm.bits = ioread32(idxd->reg_base + offset);
+
return 0;
}
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index d6fcd2e60103..6510791b9921 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -173,6 +173,7 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
struct llist_node *head;
int queued = 0;
+ *processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
return 0;
@@ -197,6 +198,7 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
struct list_head *node, *next;
int queued = 0;
+ *processed = 0;
if (list_empty(&irq_entry->work_list))
return 0;
@@ -218,10 +220,9 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
return queued;
}
-irqreturn_t idxd_wq_thread(int irq, void *data)
+static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
{
- struct idxd_irq_entry *irq_entry = data;
- int rc, processed = 0, retry = 0;
+ int rc, processed, total = 0;
/*
* There are two lists we are processing. The pending_llist is where
@@ -244,15 +245,26 @@ irqreturn_t idxd_wq_thread(int irq, void *data)
*/
do {
rc = irq_process_work_list(irq_entry, &processed);
- if (rc != 0) {
- retry++;
+ total += processed;
+ if (rc != 0)
continue;
- }
rc = irq_process_pending_llist(irq_entry, &processed);
- } while (rc != 0 && retry != 10);
+ total += processed;
+ } while (rc != 0);
+
+ return total;
+}
+
+irqreturn_t idxd_wq_thread(int irq, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ int processed;
+ processed = idxd_desc_process(irq_entry);
idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
+ /* catch anything unprocessed after unmasking */
+ processed += idxd_desc_process(irq_entry);
if (processed == 0)
return IRQ_NONE;
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index c683051257fd..66ef70b00ec0 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -175,13 +175,11 @@ struct owl_dma_txd {
* @id: physical index to this channel
* @base: virtual memory base for the dma channel
* @vchan: the virtual channel currently being served by this physical channel
- * @lock: a lock to use when altering an instance of this struct
*/
struct owl_dma_pchan {
u32 id;
void __iomem *base;
struct owl_dma_vchan *vchan;
- spinlock_t lock;
};
/**
@@ -437,14 +435,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
for (i = 0; i < od->nr_pchans; i++) {
pchan = &od->pchans[i];
- spin_lock_irqsave(&pchan->lock, flags);
+ spin_lock_irqsave(&od->lock, flags);
if (!pchan->vchan) {
pchan->vchan = vchan;
- spin_unlock_irqrestore(&pchan->lock, flags);
+ spin_unlock_irqrestore(&od->lock, flags);
break;
}
- spin_unlock_irqrestore(&pchan->lock, flags);
+ spin_unlock_irqrestore(&od->lock, flags);
}
return pchan;
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index c4ce5dfb149b..db58d7e4f9fe 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -900,7 +900,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&tdma->dma_dev);
if (ret < 0) {
dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret);
- goto irq_dispose;
+ goto rpm_put;
}
ret = of_dma_controller_register(pdev->dev.of_node,
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index a9c0251adf1a..a90e154b0ae0 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -2156,7 +2156,8 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
d->residue += sg_dma_len(sgent);
}
- cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
+ CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
return d;
}
@@ -2733,7 +2734,8 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
tr_req[1].dicnt3 = 1;
}
- cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
+ CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
if (uc->config.metadata_size)
d->vd.tx.metadata_ops = &metadata_ops;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d47749a35863..ff253696d183 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -434,6 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
struct zynqmp_dma_desc_sw *child, *next;
chan->desc_free_cnt++;
+ list_del(&sdesc->node);
list_add_tail(&sdesc->node, &chan->free_list);
list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
chan->desc_free_cnt++;
@@ -608,8 +609,6 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
dma_async_tx_callback callback;
void *callback_param;
- list_del(&desc->node);
-
callback = desc->async_tx.callback;
callback_param = desc->async_tx.callback_param;
if (callback) {
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index f91f3bc1e0b2..9cf7cc1f3f72 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3403,7 +3403,7 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
static int hw_info_get(struct amd64_pvt *pvt)
{
u16 pci_id1, pci_id2;
- int ret = -EINVAL;
+ int ret;
if (pvt->fam >= 0x17) {
pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index 93c82bc17493..169353710982 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -44,14 +44,6 @@ static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
" PCI Access Write Error at 0x%x\n", reg);
}
-static char * const bridge_str[] = {
- [NORTH_A] = "NORTH A",
- [NORTH_B] = "NORTH B",
- [SOUTH_A] = "SOUTH A",
- [SOUTH_B] = "SOUTH B",
- [NO_BRIDGE] = "NO BRIDGE",
-};
-
/* Support up to two AMD8131 chipsets on a platform */
static struct amd8131_dev_info amd8131_devices[] = {
{
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index a7502ebe9bdc..e3e757513d1b 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -78,7 +78,7 @@ struct axp_mc_drvdata {
char msg[128];
};
-/* derived from "DRAM Address Multiplexing" in the ARAMDA XP Functional Spec */
+/* derived from "DRAM Address Multiplexing" in the ARMADA XP Functional Spec */
static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata,
uint8_t cs, uint8_t bank, uint16_t row,
uint16_t col)
@@ -160,12 +160,12 @@ static void axp_mc_check(struct mem_ctl_info *mci)
if (cnt_sbe)
cnt_sbe--;
else
- dev_warn(mci->pdev, "inconsistent SBE count detected");
+ dev_warn(mci->pdev, "inconsistent SBE count detected\n");
} else {
if (cnt_dbe)
cnt_dbe--;
else
- dev_warn(mci->pdev, "inconsistent DBE count detected");
+ dev_warn(mci->pdev, "inconsistent DBE count detected\n");
}
/* report earlier errors */
@@ -304,7 +304,7 @@ static int axp_mc_probe(struct platform_device *pdev)
config = readl(base + SDRAM_CONFIG_REG);
if (!(config & SDRAM_CONFIG_ECC_MASK)) {
- dev_warn(&pdev->dev, "SDRAM ECC is not enabled");
+ dev_warn(&pdev->dev, "SDRAM ECC is not enabled\n");
return -EINVAL;
}
@@ -532,9 +532,9 @@ static int aurora_l2_probe(struct platform_device *pdev)
l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL);
if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN))
- dev_warn(&pdev->dev, "tag parity is not enabled");
+ dev_warn(&pdev->dev, "tag parity is not enabled\n");
if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN))
- dev_warn(&pdev->dev, "data ECC is not enabled");
+ dev_warn(&pdev->dev, "data ECC is not enabled\n");
dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
"cpu", 1, "L", 1, 2, NULL, 0, 0);
@@ -618,7 +618,7 @@ static int __init armada_xp_edac_init(void)
res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (res)
- pr_warn("Aramda XP EDAC drivers fail to register\n");
+ pr_warn("Armada XP EDAC drivers fail to register\n");
return 0;
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index df08de963d10..9b0044cd21cd 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -122,10 +122,22 @@ static int i10nm_get_all_munits(void)
return 0;
}
+static struct res_config i10nm_cfg0 = {
+ .type = I10NM,
+ .decs_did = 0x3452,
+ .busno_cfg_offset = 0xcc,
+};
+
+static struct res_config i10nm_cfg1 = {
+ .type = I10NM,
+ .decs_did = 0x3452,
+ .busno_cfg_offset = 0xd0,
+};
+
static const struct x86_cpu_id i10nm_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &i10nm_cfg0),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &i10nm_cfg0),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &i10nm_cfg1),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -161,7 +173,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
mtr, mcddrtcfg, imc->mc, i, j);
if (IS_DIMM_PRESENT(mtr))
- ndimms += skx_get_dimm_info(mtr, 0, dimm,
+ ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
imc, i, j);
else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
@@ -234,6 +246,7 @@ static int __init i10nm_init(void)
{
u8 mc = 0, src_id = 0, node_id = 0;
const struct x86_cpu_id *id;
+ struct res_config *cfg;
const char *owner;
struct skx_dev *d;
int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
@@ -249,11 +262,17 @@ static int __init i10nm_init(void)
if (!id)
return -ENODEV;
+ cfg = (struct res_config *)id->driver_data;
+
+ /* Newer steppings have different offset for ATOM_TREMONT_D/ICELAKE_X */
+ if (boot_cpu_data.x86_stepping >= 4)
+ cfg->busno_cfg_offset = 0xd0;
+
rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
if (rc)
return rc;
- rc = skx_get_all_bus_mappings(0x3452, 0xcc, I10NM, &i10nm_edac_list);
+ rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
if (rc < 0)
goto fail;
if (rc == 0) {
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 46a3a3440f5e..b907a0f4ece6 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -157,33 +157,35 @@ fail:
return -ENODEV;
}
+static struct res_config skx_cfg = {
+ .type = SKX,
+ .decs_did = 0x2016,
+ .busno_cfg_offset = 0xcc,
+};
+
static const struct x86_cpu_id skx_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
-#define SKX_GET_MTMTR(dev, reg) \
- pci_read_config_dword((dev), 0x87c, &(reg))
-
-static bool skx_check_ecc(struct pci_dev *pdev)
+static bool skx_check_ecc(u32 mcmtr)
{
- u32 mtmtr;
-
- SKX_GET_MTMTR(pdev, mtmtr);
-
- return !!GET_BITFIELD(mtmtr, 2, 2);
+ return !!GET_BITFIELD(mcmtr, 2, 2);
}
static int skx_get_dimm_config(struct mem_ctl_info *mci)
{
struct skx_pvt *pvt = mci->pvt_info;
+ u32 mtr, mcmtr, amap, mcddrtcfg;
struct skx_imc *imc = pvt->imc;
- u32 mtr, amap, mcddrtcfg;
struct dimm_info *dimm;
int i, j;
int ndimms;
+ /* Only the mcmtr on the first channel is effective */
+ pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
+
for (i = 0; i < SKX_NUM_CHANNELS; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
@@ -193,14 +195,14 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
if (IS_DIMM_PRESENT(mtr)) {
- ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j);
+ ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j);
} else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) {
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
EDAC_MOD_STR);
nvdimm_count++;
}
}
- if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
+ if (ndimms && !skx_check_ecc(mcmtr)) {
skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
return -ENODEV;
}
@@ -641,6 +643,7 @@ static inline void teardown_skx_debug(void) {}
static int __init skx_init(void)
{
const struct x86_cpu_id *id;
+ struct res_config *cfg;
const struct munit *m;
const char *owner;
int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
@@ -657,11 +660,13 @@ static int __init skx_init(void)
if (!id)
return -ENODEV;
+ cfg = (struct res_config *)id->driver_data;
+
rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
if (rc)
return rc;
- rc = skx_get_all_bus_mappings(0x2016, 0xcc, SKX, &skx_edac_list);
+ rc = skx_get_all_bus_mappings(cfg, &skx_edac_list);
if (rc < 0)
goto fail;
if (rc == 0) {
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 99bbaf629b8d..46be1a77bd1d 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -197,12 +197,11 @@ static int get_width(u32 mtr)
}
/*
- * We use the per-socket device @did to count how many sockets are present,
+ * We use the per-socket device @cfg->did to count how many sockets are present,
* and to detemine which PCI buses are associated with each socket. Allocate
* and build the full list of all the skx_dev structures that we need here.
*/
-int skx_get_all_bus_mappings(unsigned int did, int off, enum type type,
- struct list_head **list)
+int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
{
struct pci_dev *pdev, *prev;
struct skx_dev *d;
@@ -211,7 +210,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type type,
prev = NULL;
for (;;) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, prev);
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, cfg->decs_did, prev);
if (!pdev)
break;
ndev++;
@@ -221,7 +220,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type type,
return -ENOMEM;
}
- if (pci_read_config_dword(pdev, off, &reg)) {
+ if (pci_read_config_dword(pdev, cfg->busno_cfg_offset, &reg)) {
kfree(d);
pci_dev_put(pdev);
skx_printk(KERN_ERR, "Failed to read bus idx\n");
@@ -230,7 +229,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type type,
d->bus[0] = GET_BITFIELD(reg, 0, 7);
d->bus[1] = GET_BITFIELD(reg, 8, 15);
- if (type == SKX) {
+ if (cfg->type == SKX) {
d->seg = pci_domain_nr(pdev->bus);
d->bus[2] = GET_BITFIELD(reg, 16, 23);
d->bus[3] = GET_BITFIELD(reg, 24, 31);
@@ -304,7 +303,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows")
#define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols")
-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno)
{
int banks = 16, ranks, rows, cols, npages;
@@ -324,8 +323,8 @@ int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
imc->mc, chan, dimmno, size, npages,
banks, 1 << ranks, rows, cols);
- imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
- imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
+ imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0);
+ imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9);
imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
imc->chan[chan].dimms[dimmno].rowbits = rows;
imc->chan[chan].dimms[dimmno].colbits = cols;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 60d1ea669afd..78f8c1de0b71 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -112,6 +112,14 @@ struct decoded_addr {
int bank_group;
};
+struct res_config {
+ enum type type;
+ /* Configuration agent device ID */
+ unsigned int decs_did;
+ /* Default bus number configuration register offset */
+ int busno_cfg_offset;
+};
+
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci);
typedef bool (*skx_decode_f)(struct decoded_addr *res);
typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len);
@@ -123,12 +131,11 @@ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
int skx_get_node_id(struct skx_dev *d, u8 *id);
-int skx_get_all_bus_mappings(unsigned int did, int off, enum type,
- struct list_head **list);
+int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list);
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm);
-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno);
int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 34be60fe6892..4af9744cc6d0 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1278,7 +1278,7 @@ OCX_DEBUGFS_ATTR(lne23_badcnt, OCX_LNE_BAD_CNT(23));
OCX_DEBUGFS_ATTR(com_int, OCX_COM_INT_W1S);
-struct debugfs_entry *ocx_dfs_ents[] = {
+static struct debugfs_entry *ocx_dfs_ents[] = {
&debugfs_tlk0_ecc_ctl,
&debugfs_tlk1_ecc_ctl,
&debugfs_tlk2_ecc_ctl,
@@ -1919,19 +1919,19 @@ err_free:
L2C_DEBUGFS_ATTR(tad_int, L2C_TAD_INT_W1S);
-struct debugfs_entry *l2c_tad_dfs_ents[] = {
+static struct debugfs_entry *l2c_tad_dfs_ents[] = {
&debugfs_tad_int,
};
L2C_DEBUGFS_ATTR(cbc_int, L2C_CBC_INT_W1S);
-struct debugfs_entry *l2c_cbc_dfs_ents[] = {
+static struct debugfs_entry *l2c_cbc_dfs_ents[] = {
&debugfs_cbc_int,
};
L2C_DEBUGFS_ATTR(mci_int, L2C_MCI_INT_W1S);
-struct debugfs_entry *l2c_mci_dfs_ents[] = {
+static struct debugfs_entry *l2c_mci_dfs_ents[] = {
&debugfs_mci_int,
};
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index e4a1032ba0b5..1d2c27a00a4a 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1349,7 +1349,6 @@ static int xgene_edac_l3_remove(struct xgene_edac_dev_ctx *l3)
#define WORD_ALIGNED_ERR_MASK BIT(28)
#define PAGE_ACCESS_ERR_MASK BIT(27)
#define WRITE_ACCESS_MASK BIT(26)
-#define RBERRADDR_RD(src) ((src) & 0x03FFFFFF)
static const char * const soc_mem_err_v1[] = {
"10GbE0",
@@ -1483,13 +1482,11 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
return;
if (reg & STICKYERR_MASK) {
bool write;
- u32 address;
dev_err(edac_dev->dev, "IOB bus access error(s)\n");
if (regmap_read(ctx->edac->rb_map, RBEIR, &reg))
return;
write = reg & WRITE_ACCESS_MASK ? 1 : 0;
- address = RBERRADDR_RD(reg);
if (reg & AGENT_OFFLINE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to offline agent error\n",
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index ad02dc6747a4..0317b614b680 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -124,7 +124,7 @@ static int adc_jack_probe(struct platform_device *pdev)
for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++);
data->num_conditions = i;
- data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel);
+ data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel);
if (IS_ERR(data->chan))
return PTR_ERR(data->chan);
@@ -164,7 +164,6 @@ static int adc_jack_remove(struct platform_device *pdev)
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
- iio_channel_release(data->chan);
return 0;
}
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 7401733db08b..aae82db542a5 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1460,7 +1460,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (!info->input) {
dev_err(arizona->dev, "Can't allocate input dev\n");
ret = -ENOMEM;
- goto err_register;
+ return ret;
}
info->input->name = "Headset";
@@ -1492,7 +1492,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
pdata->micd_pol_gpio, ret);
- goto err_register;
+ return ret;
}
info->micd_pol_gpio = gpio_to_desc(pdata->micd_pol_gpio);
@@ -1515,7 +1515,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
dev_err(arizona->dev,
"Failed to get microphone polarity GPIO: %d\n",
ret);
- goto err_register;
+ return ret;
}
}
@@ -1672,7 +1672,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
ret);
- goto err_gpio;
+ goto err_pm;
}
ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
@@ -1721,14 +1721,14 @@ static int arizona_extcon_probe(struct platform_device *pdev)
dev_warn(arizona->dev, "Failed to set MICVDD to bypass: %d\n",
ret);
- pm_runtime_put(&pdev->dev);
-
ret = input_register_device(info->input);
if (ret) {
dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
goto err_hpdet;
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
err_hpdet:
@@ -1743,10 +1743,11 @@ err_rise_wake:
arizona_set_irq_wake(arizona, jack_irq_rise, 0);
err_rise:
arizona_free_irq(arizona, jack_irq_rise, info);
+err_pm:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
err_gpio:
gpiod_put(info->micd_pol_gpio);
-err_register:
- pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 32f663436e6e..cc47d626095c 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -782,9 +782,19 @@ static const struct platform_device_id max14577_muic_id[] = {
};
MODULE_DEVICE_TABLE(platform, max14577_muic_id);
+static const struct of_device_id of_max14577_muic_dt_match[] = {
+ { .compatible = "maxim,max14577-muic",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX14577, },
+ { .compatible = "maxim,max77836-muic",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX77836, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_max14577_muic_dt_match);
+
static struct platform_driver max14577_muic_driver = {
.driver = {
.name = "max14577-muic",
+ .of_match_table = of_max14577_muic_dt_match,
},
.probe = max14577_muic_probe,
.remove = max14577_muic_remove,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 2dfbfec572f9..0a6438cbb3f3 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -900,7 +900,7 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
{
unsigned long flags;
- int ret, idx = -EINVAL;
+ int ret, idx;
if (!edev || !nb)
return -EINVAL;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 6e291d8f3a27..c7ea4f2d5ca6 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1081,8 +1081,6 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
return -EINVAL;
p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
- if (!access_ok(p, a->size))
- return -EFAULT;
end = (void __user *)p + a->size;
count = 0;
@@ -1120,7 +1118,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
&p->header[transmit_header_bytes / 4];
if (next > end)
return -EINVAL;
- if (__copy_from_user
+ if (copy_from_user
(u.packet.header, p->header, transmit_header_bytes))
return -EFAULT;
if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 8007d4aa76dc..fbd785dd0513 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -178,8 +178,9 @@ config ISCSI_IBFT
Otherwise, say N.
config RASPBERRYPI_FIRMWARE
- tristate "Raspberry Pi Firmware Driver"
+ bool "Raspberry Pi Firmware Driver"
depends on BCM2835_MBOX
+ default USB_PCI
help
This option enables support for communicating with the firmware on the
Raspberry Pi.
@@ -295,15 +296,13 @@ config TURRIS_MOX_RWTM
other manufacturing data and also utilize the Entropy Bit Generator
for hardware random number generation.
-config HAVE_ARM_SMCCC
- bool
-
-source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/imx/Kconfig"
source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/psci/Kconfig"
+source "drivers/firmware/smccc/Kconfig"
source "drivers/firmware/tegra/Kconfig"
source "drivers/firmware/xilinx/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index e9fb838af4df..99510be9f5ed 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -23,12 +23,13 @@ obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
-obj-y += psci/
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
obj-y += imx/
+obj-y += psci/
+obj-y += smccc/
obj-y += tegra/
obj-y += xilinx/
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 6694d0d908d6..1cad32b38b29 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -2,6 +2,8 @@
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
-scmi-transport-y = mailbox.o shmem.o
+scmi-transport-y = shmem.o
+scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
+scmi-transport-$(CONFIG_ARM_PSCI_FW) += smc.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index f804e8af6521..ce7d9203e41b 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -14,6 +14,13 @@ enum scmi_base_protocol_cmd {
BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
BASE_DISCOVER_AGENT = 0x7,
BASE_NOTIFY_ERRORS = 0x8,
+ BASE_SET_DEVICE_PERMISSIONS = 0x9,
+ BASE_SET_PROTOCOL_PERMISSIONS = 0xa,
+ BASE_RESET_AGENT_CONFIGURATION = 0xb,
+};
+
+enum scmi_base_protocol_notify {
+ BASE_ERROR_EVENT = 0x0,
};
struct scmi_msg_resp_base_attributes {
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 5ac06469b01c..31fe5a22a011 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -178,6 +178,8 @@ struct scmi_chan_info {
* @send_message: Callback to send a message
* @mark_txdone: Callback to mark tx as done
* @fetch_response: Callback to fetch response
+ * @fetch_notification: Callback to fetch notification
+ * @clear_channel: Callback to clear a channel
* @poll_done: Callback to poll transfer status
*/
struct scmi_transport_ops {
@@ -190,6 +192,9 @@ struct scmi_transport_ops {
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
void (*fetch_response)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
+ void (*fetch_notification)(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer);
+ void (*clear_channel)(struct scmi_chan_info *cinfo);
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
@@ -210,6 +215,9 @@ struct scmi_desc {
};
extern const struct scmi_desc scmi_mailbox_desc;
+#ifdef CONFIG_HAVE_ARM_SMCCC
+extern const struct scmi_desc scmi_smc_desc;
+#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
@@ -222,5 +230,8 @@ void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer);
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index dbec767222e9..7483cacf63f9 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -76,6 +76,7 @@ struct scmi_xfers_info {
* implementation version and (sub-)vendor identification.
* @handle: Instance of SCMI handle to send to clients
* @tx_minfo: Universal Transmit Message management info
+ * @rx_minfo: Universal Receive Message management info
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
* @protocols_imp: List of protocols implemented, currently maximum of
@@ -89,6 +90,7 @@ struct scmi_info {
struct scmi_revision_info version;
struct scmi_handle handle;
struct scmi_xfers_info tx_minfo;
+ struct scmi_xfers_info rx_minfo;
struct idr tx_idr;
struct idr rx_idr;
u8 *protocols_imp;
@@ -200,37 +202,66 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}
-/**
- * scmi_rx_callback() - callback for receiving messages
- *
- * @cinfo: SCMI channel info
- * @msg_hdr: Message header
- *
- * Processes one received message to appropriate transfer information and
- * signals completion of the transfer.
- *
- * NOTE: This function will be invoked in IRQ context, hence should be
- * as optimal as possible.
- */
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
{
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct scmi_xfers_info *minfo = &info->tx_minfo;
- u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
- u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
- struct device *dev = cinfo->dev;
struct scmi_xfer *xfer;
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->rx_minfo;
- if (msg_type == MSG_TYPE_NOTIFICATION)
- return; /* Notifications not yet supported */
+ xfer = scmi_xfer_get(cinfo->handle, minfo);
+ if (IS_ERR(xfer)) {
+ dev_err(dev, "failed to get free message slot (%ld)\n",
+ PTR_ERR(xfer));
+ info->desc->ops->clear_channel(cinfo);
+ return;
+ }
+
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ scmi_dump_header_dbg(dev, &xfer->hdr);
+ info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
+ xfer);
+
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ MSG_TYPE_NOTIFICATION);
+
+ __scmi_xfer_put(minfo, xfer);
+
+ info->desc->ops->clear_channel(cinfo);
+}
+
+static void scmi_handle_response(struct scmi_chan_info *cinfo,
+ u16 xfer_id, u8 msg_type)
+{
+ struct scmi_xfer *xfer;
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
/* Are we even expecting this? */
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
dev_err(dev, "message for %d is not expected!\n", xfer_id);
+ info->desc->ops->clear_channel(cinfo);
return;
}
xfer = &minfo->xfer_block[xfer_id];
+ /*
+ * Even if a response was indeed expected on this slot at this point,
+ * a buggy platform could wrongly reply feeding us an unexpected
+ * delayed response we're not prepared to handle: bail-out safely
+ * blaming firmware.
+ */
+ if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
+ dev_err(dev,
+ "Delayed Response for %d not expected! Buggy F/W ?\n",
+ xfer_id);
+ info->desc->ops->clear_channel(cinfo);
+ /* It was unexpected, so nobody will clear the xfer if not us */
+ __scmi_xfer_put(minfo, xfer);
+ return;
+ }
scmi_dump_header_dbg(dev, &xfer->hdr);
@@ -240,10 +271,43 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
xfer->hdr.protocol_id, xfer->hdr.seq,
msg_type);
- if (msg_type == MSG_TYPE_DELAYED_RESP)
+ if (msg_type == MSG_TYPE_DELAYED_RESP) {
+ info->desc->ops->clear_channel(cinfo);
complete(xfer->async_done);
- else
+ } else {
complete(&xfer->done);
+ }
+}
+
+/**
+ * scmi_rx_callback() - callback for receiving messages
+ *
+ * @cinfo: SCMI channel info
+ * @msg_hdr: Message header
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+ u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+
+ switch (msg_type) {
+ case MSG_TYPE_NOTIFICATION:
+ scmi_handle_notification(cinfo, msg_hdr);
+ break;
+ case MSG_TYPE_COMMAND:
+ case MSG_TYPE_DELAYED_RESP:
+ scmi_handle_response(cinfo, xfer_id, msg_type);
+ break;
+ default:
+ WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
+ break;
+ }
}
/**
@@ -525,13 +589,13 @@ int scmi_handle_put(const struct scmi_handle *handle)
return 0;
}
-static int scmi_xfer_info_init(struct scmi_info *sinfo)
+static int __scmi_xfer_info_init(struct scmi_info *sinfo,
+ struct scmi_xfers_info *info)
{
int i;
struct scmi_xfer *xfer;
struct device *dev = sinfo->dev;
const struct scmi_desc *desc = sinfo->desc;
- struct scmi_xfers_info *info = &sinfo->tx_minfo;
/* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
@@ -566,6 +630,16 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return 0;
}
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
+{
+ int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+
+ if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
+
+ return ret;
+}
+
static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
int prot_id, bool tx)
{
@@ -699,10 +773,6 @@ static int scmi_probe(struct platform_device *pdev)
info->desc = desc;
INIT_LIST_HEAD(&info->node);
- ret = scmi_xfer_info_init(info);
- if (ret)
- return ret;
-
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
idr_init(&info->rx_idr);
@@ -715,6 +785,10 @@ static int scmi_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = scmi_xfer_info_init(info);
+ if (ret)
+ return ret;
+
ret = scmi_base_protocol_init(handle);
if (ret) {
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
@@ -827,6 +901,9 @@ ATTRIBUTE_GROUPS(versions);
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
+#ifdef CONFIG_ARM_PSCI_FW
+ { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
+#endif
{ /* Sentinel */ },
};
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 73077bbc4ad9..6998dc86b5ce 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -158,6 +158,21 @@ static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
shmem_fetch_response(smbox->shmem, xfer);
}
+static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_fetch_notification(smbox->shmem, max_len, xfer);
+}
+
+static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_clear_channel(smbox->shmem);
+}
+
static bool
mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
@@ -173,6 +188,8 @@ static struct scmi_transport_ops scmi_mailbox_ops = {
.send_message = mailbox_send_message,
.mark_txdone = mailbox_mark_txdone,
.fetch_response = mailbox_fetch_response,
+ .fetch_notification = mailbox_fetch_notification,
+ .clear_channel = mailbox_clear_channel,
.poll_done = mailbox_poll_done,
};
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 34f3a917dd8d..eadc171e254b 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -27,6 +27,11 @@ enum scmi_performance_protocol_cmd {
PERF_DESCRIBE_FASTCHANNEL = 0xb,
};
+enum scmi_performance_protocol_notify {
+ PERFORMANCE_LIMITS_CHANGED = 0x0,
+ PERFORMANCE_LEVEL_CHANGED = 0x1,
+};
+
struct scmi_opp {
u32 perf;
u32 power;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 214886ce84f1..cf7f0312381b 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -12,6 +12,12 @@ enum scmi_power_protocol_cmd {
POWER_STATE_SET = 0x4,
POWER_STATE_GET = 0x5,
POWER_STATE_NOTIFY = 0x6,
+ POWER_STATE_CHANGE_REQUESTED_NOTIFY = 0x7,
+};
+
+enum scmi_power_protocol_notify {
+ POWER_STATE_CHANGED = 0x0,
+ POWER_STATE_CHANGE_REQUESTED = 0x1,
};
struct scmi_msg_resp_power_attributes {
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index eba61b9c1f53..db1b1ab303da 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -14,6 +14,10 @@ enum scmi_sensor_protocol_cmd {
SENSOR_READING_GET = 0x6,
};
+enum scmi_sensor_protocol_notify {
+ SENSOR_TRIP_POINT_EVENT = 0x0,
+};
+
struct scmi_msg_resp_sensor_attributes {
__le16 num_sensors;
u8 max_requests;
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index e1e816e0018c..0e3eaea5d852 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -67,6 +67,21 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
}
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ /* Skip only the length of header in shmem area i.e 4 bytes */
+ xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+}
+
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
+{
+ iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
+}
+
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
new file mode 100644
index 000000000000..49bc4b0e8428
--- /dev/null
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message SMC/HVC
+ * Transport driver
+ *
+ * Copyright 2020 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_smc - Structure representing a SCMI smc transport
+ *
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ * @func_id: smc/hvc call function id
+ */
+
+struct scmi_smc {
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+ struct mutex shmem_lock;
+ u32 func_id;
+};
+
+static bool smc_chan_available(struct device *dev, int idx)
+{
+ struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
+ if (!np)
+ return false;
+
+ of_node_put(np);
+ return true;
+}
+
+static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ struct device *cdev = cinfo->dev;
+ struct scmi_smc *scmi_info;
+ resource_size_t size;
+ struct resource res;
+ struct device_node *np;
+ u32 func_id;
+ int ret;
+
+ if (!tx)
+ return -ENODEV;
+
+ scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
+ if (!scmi_info)
+ return -ENOMEM;
+
+ np = of_parse_phandle(cdev->of_node, "shmem", 0);
+ ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI Tx shared memory\n");
+ return ret;
+ }
+
+ size = resource_size(&res);
+ scmi_info->shmem = devm_ioremap(dev, res.start, size);
+ if (!scmi_info->shmem) {
+ dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
+ if (ret < 0)
+ return ret;
+
+ scmi_info->func_id = func_id;
+ scmi_info->cinfo = cinfo;
+ mutex_init(&scmi_info->shmem_lock);
+ cinfo->transport_info = scmi_info;
+
+ return 0;
+}
+
+static int smc_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ cinfo->transport_info = NULL;
+ scmi_info->cinfo = NULL;
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static int smc_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+ struct arm_smccc_res res;
+
+ mutex_lock(&scmi_info->shmem_lock);
+
+ shmem_tx_prepare(scmi_info->shmem, xfer);
+
+ arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+ scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
+
+ mutex_unlock(&scmi_info->shmem_lock);
+
+ /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
+ if (res.a0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void smc_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ shmem_fetch_response(scmi_info->shmem, xfer);
+}
+
+static bool
+smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ return shmem_poll_done(scmi_info->shmem, xfer);
+}
+
+static struct scmi_transport_ops scmi_smc_ops = {
+ .chan_available = smc_chan_available,
+ .chan_setup = smc_chan_setup,
+ .chan_free = smc_chan_free,
+ .send_message = smc_send_message,
+ .fetch_response = smc_fetch_response,
+ .poll_done = smc_poll_done,
+};
+
+const struct scmi_desc scmi_smc_desc = {
+ .ops = &scmi_smc_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 1,
+ .max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 334c8be0c11f..e7e36aab2386 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -429,7 +429,6 @@ int sdei_event_enable(u32 event_num)
return err;
}
-EXPORT_SYMBOL(sdei_event_enable);
static int sdei_api_event_disable(u32 event_num)
{
@@ -471,7 +470,6 @@ int sdei_event_disable(u32 event_num)
return err;
}
-EXPORT_SYMBOL(sdei_event_disable);
static int sdei_api_event_unregister(u32 event_num)
{
@@ -533,7 +531,6 @@ int sdei_event_unregister(u32 event_num)
return err;
}
-EXPORT_SYMBOL(sdei_event_unregister);
/*
* unregister events, but don't destroy them as they are re-registered by
@@ -643,7 +640,6 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
return err;
}
-EXPORT_SYMBOL(sdei_event_register);
static int sdei_reregister_event_llocked(struct sdei_event *event)
{
@@ -1079,26 +1075,9 @@ static struct platform_driver sdei_driver = {
.probe = sdei_probe,
};
-static bool __init sdei_present_dt(void)
-{
- struct device_node *np, *fw_np;
-
- fw_np = of_find_node_by_name(NULL, "firmware");
- if (!fw_np)
- return false;
-
- np = of_find_matching_node(fw_np, sdei_of_match);
- if (!np)
- return false;
- of_node_put(np);
-
- return true;
-}
-
static bool __init sdei_present_acpi(void)
{
acpi_status status;
- struct platform_device *pdev;
struct acpi_table_header *sdei_table_header;
if (acpi_disabled)
@@ -1113,20 +1092,26 @@ static bool __init sdei_present_acpi(void)
if (ACPI_FAILURE(status))
return false;
- pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL,
- 0);
- if (IS_ERR(pdev))
- return false;
+ acpi_put_table(sdei_table_header);
return true;
}
static int __init sdei_init(void)
{
- if (sdei_present_dt() || sdei_present_acpi())
- platform_driver_register(&sdei_driver);
+ int ret = platform_driver_register(&sdei_driver);
- return 0;
+ if (!ret && sdei_present_acpi()) {
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple(sdei_driver.driver.name,
+ 0, NULL, 0);
+ if (IS_ERR(pdev))
+ pr_info("Failed to register ACPI:SDEI platform device %ld\n",
+ PTR_ERR(pdev));
+ }
+
+ return ret;
}
/*
@@ -1143,6 +1128,14 @@ int sdei_event_handler(struct pt_regs *regs,
mm_segment_t orig_addr_limit;
u32 event_num = arg->event_num;
+ /*
+ * Save restore 'fs'.
+ * The architecture's entry code save/restores 'fs' when taking an
+ * exception from the kernel. This ensures addr_limit isn't inherited
+ * if you interrupted something that allowed the uaccess routines to
+ * access kernel memory.
+ * Do the same here because this doesn't come via the same entry code.
+ */
orig_addr_limit = get_fs();
set_fs(USER_DS);
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index ff39f64f2aae..86d71b0212b1 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -42,6 +42,8 @@ DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor, 0444, DMI_BIOS_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(bios_version, 0444, DMI_BIOS_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(bios_date, 0444, DMI_BIOS_DATE);
DEFINE_DMI_ATTR_WITH_SHOW(sys_vendor, 0444, DMI_SYS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_release, 0444, DMI_BIOS_RELEASE);
+DEFINE_DMI_ATTR_WITH_SHOW(ec_firmware_release, 0444, DMI_EC_FIRMWARE_RELEASE);
DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
@@ -78,6 +80,8 @@ static ssize_t get_modalias(char *buffer, size_t buffer_size)
{ "bvn", DMI_BIOS_VENDOR },
{ "bvr", DMI_BIOS_VERSION },
{ "bd", DMI_BIOS_DATE },
+ { "br", DMI_BIOS_RELEASE },
+ { "efr", DMI_EC_FIRMWARE_RELEASE },
{ "svn", DMI_SYS_VENDOR },
{ "pn", DMI_PRODUCT_NAME },
{ "pvr", DMI_PRODUCT_VERSION },
@@ -187,6 +191,8 @@ static void __init dmi_id_init_attr_table(void)
ADD_DMI_ATTR(bios_vendor, DMI_BIOS_VENDOR);
ADD_DMI_ATTR(bios_version, DMI_BIOS_VERSION);
ADD_DMI_ATTR(bios_date, DMI_BIOS_DATE);
+ ADD_DMI_ATTR(bios_release, DMI_BIOS_RELEASE);
+ ADD_DMI_ATTR(ec_firmware_release, DMI_EC_FIRMWARE_RELEASE);
ADD_DMI_ATTR(sys_vendor, DMI_SYS_VENDOR);
ADD_DMI_ATTR(product_name, DMI_PRODUCT_NAME);
ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f59163cb7cba..5066d1f1d687 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -186,6 +186,34 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
dmi_ident[slot] = p;
}
+static void __init dmi_save_release(const struct dmi_header *dm, int slot,
+ int index)
+{
+ const u8 *minor, *major;
+ char *s;
+
+ /* If the table doesn't have the field, let's return */
+ if (dmi_ident[slot] || dm->length < index)
+ return;
+
+ minor = (u8 *) dm + index;
+ major = (u8 *) dm + index - 1;
+
+ /* As per the spec, if the system doesn't support this field,
+ * the value is FF
+ */
+ if (*major == 0xFF && *minor == 0xFF)
+ return;
+
+ s = dmi_alloc(8);
+ if (!s)
+ return;
+
+ sprintf(s, "%u.%u", *major, *minor);
+
+ dmi_ident[slot] = s;
+}
+
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
int index)
{
@@ -444,6 +472,8 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
dmi_save_ident(dm, DMI_BIOS_DATE, 8);
+ dmi_save_release(dm, DMI_BIOS_RELEASE, 21);
+ dmi_save_release(dm, DMI_EC_FIRMWARE_RELEASE, 23);
break;
case 1: /* System Information */
dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 613828d3f106..6b38f9e5d203 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -106,12 +106,12 @@ config EFI_PARAMS_FROM_FDT
config EFI_RUNTIME_WRAPPERS
bool
-config EFI_ARMSTUB
+config EFI_GENERIC_STUB
bool
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
- depends on EFI_ARMSTUB
+ depends on EFI_GENERIC_STUB
default y
help
Select this config option to add support for the dtb= command
@@ -124,6 +124,17 @@ config EFI_ARMSTUB_DTB_LOADER
functionality for bootloaders that do not have such support
this option is necessary.
+config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
+ bool "Enable the command line initrd loader" if !X86
+ depends on EFI_STUB && (EFI_GENERIC_STUB || X86)
+ default y
+ help
+ Select this config option to add support for the initrd= command
+ line parameter, allowing an initrd that resides on the same volume
+ as the kernel image to be loaded into memory.
+
+ This method is deprecated.
+
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
depends on EFI_VARS
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 9e5e62f5f94d..c697e70ca7e7 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -54,8 +54,8 @@ static phys_addr_t __init efi_to_phys(unsigned long addr)
static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
static const efi_config_table_type_t arch_tables[] __initconst = {
- {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, NULL, &screen_info_table},
- {NULL_GUID, NULL, NULL}
+ {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table},
+ {}
};
static void __init init_screen_info(void)
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 9d2512913d25..f564e15fbc7e 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -407,6 +407,58 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
}
}
+static const char * const fw_err_rec_type_strs[] = {
+ "IPF SAL Error Record",
+ "SOC Firmware Error Record Type1 (Legacy CrashLog Support)",
+ "SOC Firmware Error Record Type2",
+};
+
+static void cper_print_fw_err(const char *pfx,
+ struct acpi_hest_generic_data *gdata,
+ const struct cper_sec_fw_err_rec_ref *fw_err)
+{
+ void *buf = acpi_hest_get_payload(gdata);
+ u32 offset, length = gdata->error_data_length;
+
+ printk("%s""Firmware Error Record Type: %s\n", pfx,
+ fw_err->record_type < ARRAY_SIZE(fw_err_rec_type_strs) ?
+ fw_err_rec_type_strs[fw_err->record_type] : "unknown");
+ printk("%s""Revision: %d\n", pfx, fw_err->revision);
+
+ /* Record Type based on UEFI 2.7 */
+ if (fw_err->revision == 0) {
+ printk("%s""Record Identifier: %08llx\n", pfx,
+ fw_err->record_identifier);
+ } else if (fw_err->revision == 2) {
+ printk("%s""Record Identifier: %pUl\n", pfx,
+ &fw_err->record_identifier_guid);
+ }
+
+ /*
+ * The FW error record may contain trailing data beyond the
+ * structure defined by the specification. As the fields
+ * defined (and hence the offset of any trailing data) vary
+ * with the revision, set the offset to account for this
+ * variation.
+ */
+ if (fw_err->revision == 0) {
+ /* record_identifier_guid not defined */
+ offset = offsetof(struct cper_sec_fw_err_rec_ref,
+ record_identifier_guid);
+ } else if (fw_err->revision == 1) {
+ /* record_identifier not defined */
+ offset = offsetof(struct cper_sec_fw_err_rec_ref,
+ record_identifier);
+ } else {
+ offset = sizeof(*fw_err);
+ }
+
+ buf += offset;
+ length -= offset;
+
+ print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, buf, length, true);
+}
+
static void cper_print_tstamp(const char *pfx,
struct acpi_hest_generic_data_v300 *gdata)
{
@@ -494,6 +546,16 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
#endif
+ } else if (guid_equal(sec_type, &CPER_SEC_FW_ERR_REC_REF)) {
+ struct cper_sec_fw_err_rec_ref *fw_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: Firmware Error Record Reference\n",
+ newpfx);
+ /* The minimal FW Error Record contains 16 bytes */
+ if (gdata->error_data_length >= SZ_16)
+ cper_print_fw_err(newpfx, gdata, fw_err);
+ else
+ goto err_section_too_small;
} else {
const void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index 5d4f84781aa0..a52236e11e5f 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -114,14 +114,16 @@ static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h)
const u32 color_black = 0x00000000;
const u32 color_white = 0x00ffffff;
const u8 *src;
- u8 s8;
- int m;
+ int m, n, bytes;
+ u8 x;
- src = font->data + c * font->height;
- s8 = *(src + h);
+ bytes = BITS_TO_BYTES(font->width);
+ src = font->data + c * font->height * bytes + h * bytes;
- for (m = 0; m < 8; m++) {
- if ((s8 >> (7 - m)) & 1)
+ for (m = 0; m < font->width; m++) {
+ n = m % 8;
+ x = *(src + m / 8);
+ if ((x >> (7 - n)) & 1)
*dst = color_white;
else
*dst = color_black;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 911a2bd0f6b7..9357d6b6e87c 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -130,11 +130,8 @@ static ssize_t systab_show(struct kobject *kobj,
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
- if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) {
- extern char *efi_systab_show_arch(char *str);
-
+ if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
str = efi_systab_show_arch(str);
- }
return str - buf;
}
@@ -502,21 +499,21 @@ void __init efi_mem_reserve(phys_addr_t addr, u64 size)
}
static const efi_config_table_type_t common_tables[] __initconst = {
- {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
- {ACPI_TABLE_GUID, "ACPI", &efi.acpi},
- {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
- {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
- {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
- {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi_mem_attr_table},
- {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi_rng_seed},
- {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
- {LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log},
- {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &mem_reserve},
- {EFI_RT_PROPERTIES_TABLE_GUID, "RTPROP", &rt_prop},
+ {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
+ {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
+ {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
+ {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
+ {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
+ {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
+ {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
+ {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
+ {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
+ {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
+ {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
#ifdef CONFIG_EFI_RCI2_TABLE
- {DELLEMC_EFI_RCI2_TABLE_GUID, NULL, &rci2_table_phys},
+ {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
#endif
- {NULL_GUID, NULL, NULL},
+ {},
};
static __init int match_config_table(const efi_guid_t *guid,
@@ -525,15 +522,13 @@ static __init int match_config_table(const efi_guid_t *guid,
{
int i;
- if (table_types) {
- for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
- if (!efi_guidcmp(*guid, table_types[i].guid)) {
- *(table_types[i].ptr) = table;
- if (table_types[i].name)
- pr_cont(" %s=0x%lx ",
- table_types[i].name, table);
- return 1;
- }
+ for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
+ if (!efi_guidcmp(*guid, table_types[i].guid)) {
+ *(table_types[i].ptr) = table;
+ if (table_types[i].name[0])
+ pr_cont("%s=0x%lx ",
+ table_types[i].name, table);
+ return 1;
}
}
@@ -570,7 +565,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
table = tbl32[i].table;
}
- if (!match_config_table(guid, table, common_tables))
+ if (!match_config_table(guid, table, common_tables) && arch_tables)
match_config_table(guid, table, arch_tables);
}
pr_cont("\n");
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 78ad1ba8c987..26528a46d99e 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -522,8 +522,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
NULL, "%s", short_name);
kfree(short_name);
- if (ret)
+ if (ret) {
+ kobject_put(&new_var->kobj);
return ret;
+ }
kobject_uevent(&new_var->kobj, KOBJ_ADD);
if (efivar_entry_add(new_var, &efivar_sysfs_list)) {
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 094eabdecfe6..cce4a7436052 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -7,7 +7,7 @@
#
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
@@ -23,15 +23,19 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
-cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
+cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt
-KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
+KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
-include $(srctree)/drivers/firmware/efi/libstub/hidden.h \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector) \
+ $(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS
+# remove SCS flags from all objects in this directory
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
@@ -42,16 +46,17 @@ KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
file.o mem.o random.o randomalloc.o pci.o \
- skip_spaces.o lib-cmdline.o lib-ctype.o
+ skip_spaces.o lib-cmdline.o lib-ctype.o \
+ alignedmem.o relocate.o vsprintf.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
-arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+efi-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
- $(patsubst %.c,lib-%.o,$(arm-deps-y))
+lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o fdt.o string.o \
+ $(patsubst %.c,lib-%.o,$(efi-deps-y))
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
@@ -60,6 +65,25 @@ CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
+# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the
+# .bss section, so the .bss section of the EFI stub needs to be included in the
+# .data section of the compressed kernel to ensure initialization. Rename the
+# .bss section here so it's easy to pick out in the linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_X86) += --rename-section .bss=.bss.efistub,load,alloc
+STUBCOPY_RELOC-$(CONFIG_X86_32) := R_386_32
+STUBCOPY_RELOC-$(CONFIG_X86_64) := R_X86_64_64
+
+#
+# ARM discards the .data section because it disallows r/w data in the
+# decompressor. So move our .data to .data.efistub and .bss to .bss.efistub,
+# which are preserved explicitly by the decompressor linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
+ --rename-section .bss=.bss.efistub,load,alloc
+STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
+
+#
# arm64 puts the stub in the kernel proper, which will unnecessarily retain all
# code indefinitely unless it is annotated as __init/__initdata/__initconst etc.
# So let's apply the __init annotations at the section level, by prefixing
@@ -73,8 +97,8 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
# a verification pass to see if any absolute relocations exist in any of the
# object files.
#
-extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y)
-lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y))
+extra-y := $(lib-y)
+lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
@@ -97,11 +121,3 @@ quiet_cmd_stubcopy = STUBCPY $@
/bin/false; \
fi; \
$(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
-
-#
-# ARM discards the .data section because it disallows r/w data in the
-# decompressor. So move our .data to .data.efistub, which is preserved
-# explicitly by the decompressor linker script.
-#
-STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
-STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
new file mode 100644
index 000000000000..cc89c4d6196f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/alignedmem.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_allocate_pages_aligned() - Allocate memory pages
+ * @size: minimum number of bytes to allocate
+ * @addr: On return the address of the first allocated page. The first
+ * allocated page has alignment EFI_ALLOC_ALIGN which is an
+ * architecture dependent multiple of the page size.
+ * @max: the address that the last allocated memory page shall not
+ * exceed
+ * @align: minimum alignment of the base of the allocation
+ *
+ * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
+ * to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
+ * not exceed the address given by @max.
+ *
+ * Return: status code
+ */
+efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+ unsigned long max, unsigned long align)
+{
+ efi_physical_addr_t alloc_addr;
+ efi_status_t status;
+ int slack;
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ alloc_addr = ALIGN_DOWN(max + 1, align) - 1;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ slack = align / EFI_PAGE_SIZE - 1;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack,
+ &alloc_addr);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *addr = ALIGN((unsigned long)alloc_addr, align);
+
+ if (slack > 0) {
+ int l = (alloc_addr % align) / EFI_PAGE_SIZE;
+
+ if (l) {
+ efi_bs_call(free_pages, alloc_addr, slack - l + 1);
+ slack = l - 1;
+ }
+ if (slack)
+ efi_bs_call(free_pages, *addr + size, slack);
+ }
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 7826553af2ba..40243f524556 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -18,7 +18,7 @@ efi_status_t check_platform_features(void)
/* LPAE kernels need compatible hardware */
block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
if (block < 5) {
- pr_efi_err("This LPAE kernel is not supported by your CPU\n");
+ efi_err("This LPAE kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
@@ -120,7 +120,7 @@ static efi_status_t reserve_kernel_base(unsigned long dram_base,
*/
status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n");
+ efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n");
return status;
}
@@ -162,7 +162,7 @@ static efi_status_t reserve_kernel_base(unsigned long dram_base,
(end - start) / EFI_PAGE_SIZE,
&start);
if (status != EFI_SUCCESS) {
- pr_efi_err("reserve_kernel_base(): alloc failed.\n");
+ efi_err("reserve_kernel_base(): alloc failed.\n");
goto out;
}
break;
@@ -199,14 +199,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long kernel_base;
efi_status_t status;
- /*
- * Verify that the DRAM base address is compatible with the ARM
- * boot protocol, which determines the base of DRAM by masking
- * off the low 27 bits of the address at which the zImage is
- * loaded. These assumptions are made by the decompressor,
- * before any memory map is available.
- */
- kernel_base = round_up(dram_base, SZ_128M);
+ /* use a 16 MiB aligned base for the decompressed kernel */
+ kernel_base = round_up(dram_base, SZ_16M) + TEXT_OFFSET;
/*
* Note that some platforms (notably, the Raspberry Pi 2) put
@@ -215,41 +209,14 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
* base of the kernel image is only partially used at the moment.
* (Up to 5 pages are used for the swapper page tables)
*/
- kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
-
- status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size);
- if (status != EFI_SUCCESS) {
- pr_efi_err("Unable to allocate memory for uncompressed kernel.\n");
- return status;
- }
-
- /*
- * Relocate the zImage, so that it appears in the lowest 128 MB
- * memory window.
- */
- *image_addr = (unsigned long)image->image_base;
- *image_size = image->image_size;
- status = efi_relocate_kernel(image_addr, *image_size, *image_size,
- kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
+ status = reserve_kernel_base(kernel_base - 5 * PAGE_SIZE, reserve_addr,
+ reserve_size);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to relocate kernel.\n");
- efi_free(*reserve_size, *reserve_addr);
- *reserve_size = 0;
+ efi_err("Unable to allocate memory for uncompressed kernel.\n");
return status;
}
- /*
- * Check to see if we were able to allocate memory low enough
- * in memory. The kernel determines the base of DRAM from the
- * address at which the zImage is loaded.
- */
- if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) {
- pr_efi_err("Failed to relocate kernel, no low memory available.\n");
- efi_free(*reserve_size, *reserve_addr);
- *reserve_size = 0;
- efi_free(*image_size, *image_addr);
- *image_size = 0;
- return EFI_LOAD_ERROR;
- }
+ *image_addr = kernel_base;
+ *image_size = 0;
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index fc9f8ab533a7..7f6a57dec513 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -26,14 +26,23 @@ efi_status_t check_platform_features(void)
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
- pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n");
+ efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else
- pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n");
+ efi_err("This 16 KB granular kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
}
+/*
+ * Relocatable kernels can fix up the misalignment with respect to
+ * MIN_KIMG_ALIGN, so they only require a minimum alignment of EFI_KIMG_ALIGN
+ * (which accounts for the alignment of statically allocated objects such as
+ * the swapper stack.)
+ */
+static const u64 min_kimg_align = IS_ENABLED(CONFIG_RELOCATABLE) ? EFI_KIMG_ALIGN
+ : MIN_KIMG_ALIGN;
+
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
@@ -43,106 +52,63 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
- unsigned long preferred_offset;
- u64 phys_seed = 0;
+ u32 phys_seed = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- if (!nokaslr()) {
+ if (!efi_nokaslr) {
status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
- pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+ efi_info("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
} else if (status != EFI_SUCCESS) {
- pr_efi_err("efi_get_random_bytes() failed\n");
+ efi_err("efi_get_random_bytes() failed\n");
return status;
}
} else {
- pr_efi("KASLR disabled on kernel command line\n");
+ efi_info("KASLR disabled on kernel command line\n");
}
}
- /*
- * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
- * a 2 MB aligned base, which itself may be lower than dram_base, as
- * long as the resulting offset equals or exceeds it.
- */
- preferred_offset = round_down(dram_base, MIN_KIMG_ALIGN) + TEXT_OFFSET;
- if (preferred_offset < dram_base)
- preferred_offset += MIN_KIMG_ALIGN;
+ if (image->image_base != _text)
+ efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata);
+ *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
- * Produce a displacement in the interval [0, MIN_KIMG_ALIGN)
- * that doesn't violate this kernel's de-facto alignment
- * constraints.
- */
- u32 mask = (MIN_KIMG_ALIGN - 1) & ~(EFI_KIMG_ALIGN - 1);
- u32 offset = (phys_seed >> 32) & mask;
-
- /*
- * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
- * be a multiple of EFI_KIMG_ALIGN, and we must ensure that
- * we preserve the misalignment of 'offset' relative to
- * EFI_KIMG_ALIGN so that statically allocated objects whose
- * alignment exceeds PAGE_SIZE appear correctly aligned in
- * memory.
- */
- offset |= TEXT_OFFSET % EFI_KIMG_ALIGN;
-
- /*
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
- *reserve_size = kernel_memsize + offset;
- status = efi_random_alloc(*reserve_size,
- MIN_KIMG_ALIGN, reserve_addr,
- (u32)phys_seed);
-
- *image_addr = *reserve_addr + offset;
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+ reserve_addr, phys_seed);
} else {
- /*
- * Else, try a straight allocation at the preferred offset.
- * This will work around the issue where, if dram_base == 0x0,
- * efi_low_alloc() refuses to allocate at 0x0 (to prevent the
- * address of the allocation to be mistaken for a FAIL return
- * value or a NULL pointer). It will also ensure that, on
- * platforms where the [dram_base, dram_base + TEXT_OFFSET)
- * interval is partially occupied by the firmware (like on APM
- * Mustang), we can still place the kernel at the address
- * 'dram_base + TEXT_OFFSET'.
- */
- *image_addr = (unsigned long)_text;
- if (*image_addr == preferred_offset)
- return EFI_SUCCESS;
-
- *image_addr = *reserve_addr = preferred_offset;
- *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
-
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- *reserve_size / EFI_PAGE_SIZE,
- (efi_physical_addr_t *)reserve_addr);
+ status = EFI_OUT_OF_RESOURCES;
}
if (status != EFI_SUCCESS) {
- *reserve_size = kernel_memsize + TEXT_OFFSET;
- status = efi_low_alloc(*reserve_size,
- MIN_KIMG_ALIGN, reserve_addr);
+ if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align)) {
+ /*
+ * Just execute from wherever we were loaded by the
+ * UEFI PE/COFF loader if the alignment is suitable.
+ */
+ *image_addr = (u64)_text;
+ *reserve_size = 0;
+ return EFI_SUCCESS;
+ }
+
+ status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
+ ULONG_MAX, min_kimg_align);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to relocate kernel\n");
+ efi_err("Failed to relocate kernel\n");
*reserve_size = 0;
return status;
}
- *image_addr = *reserve_addr + TEXT_OFFSET;
}
- if (image->image_base != _text)
- pr_efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
-
+ *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align;
memcpy((void *)*image_addr, _text, kernel_size);
return EFI_SUCCESS;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 9f34c7242939..89f075275300 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -7,60 +7,151 @@
* Copyright 2011 Intel Corporation; author Matt Fleming
*/
+#include <stdarg.h>
+
+#include <linux/ctype.h>
#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */
#include <asm/efi.h>
+#include <asm/setup.h>
#include "efistub.h"
-static bool __efistub_global efi_nochunk;
-static bool __efistub_global efi_nokaslr;
-static bool __efistub_global efi_noinitrd;
-static bool __efistub_global efi_quiet;
-static bool __efistub_global efi_novamap;
-static bool __efistub_global efi_nosoftreserve;
-static bool __efistub_global efi_disable_pci_dma =
- IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+bool efi_nochunk;
+bool efi_nokaslr;
+bool efi_noinitrd;
+int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+bool efi_novamap;
-bool __pure nochunk(void)
-{
- return efi_nochunk;
-}
-bool __pure nokaslr(void)
-{
- return efi_nokaslr;
-}
-bool __pure noinitrd(void)
+static bool efi_nosoftreserve;
+static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+
+bool __pure __efi_soft_reserve_enabled(void)
{
- return efi_noinitrd;
+ return !efi_nosoftreserve;
}
-bool __pure is_quiet(void)
+
+void efi_char16_puts(efi_char16_t *str)
{
- return efi_quiet;
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, str);
}
-bool __pure novamap(void)
+
+static
+u32 utf8_to_utf32(const u8 **s8)
{
- return efi_novamap;
+ u32 c32;
+ u8 c0, cx;
+ size_t clen, i;
+
+ c0 = cx = *(*s8)++;
+ /*
+ * The position of the most-significant 0 bit gives us the length of
+ * a multi-octet encoding.
+ */
+ for (clen = 0; cx & 0x80; ++clen)
+ cx <<= 1;
+ /*
+ * If the 0 bit is in position 8, this is a valid single-octet
+ * encoding. If the 0 bit is in position 7 or positions 1-3, the
+ * encoding is invalid.
+ * In either case, we just return the first octet.
+ */
+ if (clen < 2 || clen > 4)
+ return c0;
+ /* Get the bits from the first octet. */
+ c32 = cx >> clen--;
+ for (i = 0; i < clen; ++i) {
+ /* Trailing octets must have 10 in most significant bits. */
+ cx = (*s8)[i] ^ 0x80;
+ if (cx & 0xc0)
+ return c0;
+ c32 = (c32 << 6) | cx;
+ }
+ /*
+ * Check for validity:
+ * - The character must be in the Unicode range.
+ * - It must not be a surrogate.
+ * - It must be encoded using the correct number of octets.
+ */
+ if (c32 > 0x10ffff ||
+ (c32 & 0xf800) == 0xd800 ||
+ clen != (c32 >= 0x80) + (c32 >= 0x800) + (c32 >= 0x10000))
+ return c0;
+ *s8 += clen;
+ return c32;
}
-bool __pure __efi_soft_reserve_enabled(void)
+
+void efi_puts(const char *str)
{
- return !efi_nosoftreserve;
+ efi_char16_t buf[128];
+ size_t pos = 0, lim = ARRAY_SIZE(buf);
+ const u8 *s8 = (const u8 *)str;
+ u32 c32;
+
+ while (*s8) {
+ if (*s8 == '\n')
+ buf[pos++] = L'\r';
+ c32 = utf8_to_utf32(&s8);
+ if (c32 < 0x10000) {
+ /* Characters in plane 0 use a single word. */
+ buf[pos++] = c32;
+ } else {
+ /*
+ * Characters in other planes encode into a surrogate
+ * pair.
+ */
+ buf[pos++] = (0xd800 - (0x10000 >> 10)) + (c32 >> 10);
+ buf[pos++] = 0xdc00 + (c32 & 0x3ff);
+ }
+ if (*s8 == '\0' || pos >= lim - 2) {
+ buf[pos] = L'\0';
+ efi_char16_puts(buf);
+ pos = 0;
+ }
+ }
}
-void efi_printk(char *str)
+int efi_printk(const char *fmt, ...)
{
- char *s8;
+ char printf_buf[256];
+ va_list args;
+ int printed;
+ int loglevel = printk_get_level(fmt);
+
+ switch (loglevel) {
+ case '0' ... '9':
+ loglevel -= '0';
+ break;
+ default:
+ /*
+ * Use loglevel -1 for cases where we just want to print to
+ * the screen.
+ */
+ loglevel = -1;
+ break;
+ }
- for (s8 = str; *s8; s8++) {
- efi_char16_t ch[2] = { 0 };
+ if (loglevel >= efi_loglevel)
+ return 0;
- ch[0] = *s8;
- if (*s8 == '\n') {
- efi_char16_t nl[2] = { '\r', 0 };
- efi_char16_printk(nl);
- }
+ if (loglevel >= 0)
+ efi_puts("EFI stub: ");
+
+ fmt = printk_skip_level(fmt);
+
+ va_start(args, fmt);
+ printed = vsnprintf(printf_buf, sizeof(printf_buf), fmt, args);
+ va_end(args);
- efi_char16_printk(ch);
+ efi_puts(printf_buf);
+ if (printed >= sizeof(printf_buf)) {
+ efi_puts("[Message truncated]\n");
+ return -1;
}
+
+ return printed;
}
/*
@@ -91,7 +182,7 @@ efi_status_t efi_parse_options(char const *cmdline)
if (!strcmp(param, "nokaslr")) {
efi_nokaslr = true;
} else if (!strcmp(param, "quiet")) {
- efi_quiet = true;
+ efi_loglevel = CONSOLE_LOGLEVEL_QUIET;
} else if (!strcmp(param, "noinitrd")) {
efi_noinitrd = true;
} else if (!strcmp(param, "efi") && val) {
@@ -105,6 +196,11 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_disable_pci_dma = true;
if (parse_option_str(val, "no_disable_early_pci_dma"))
efi_disable_pci_dma = false;
+ if (parse_option_str(val, "debug"))
+ efi_loglevel = CONSOLE_LOGLEVEL_DEBUG;
+ } else if (!strcmp(param, "video") &&
+ val && strstarts(val, "efifb:")) {
+ efi_parse_option_graphics(val + strlen("efifb:"));
}
}
efi_bs_call(free_pool, buf);
@@ -112,97 +208,79 @@ efi_status_t efi_parse_options(char const *cmdline)
}
/*
- * Get the number of UTF-8 bytes corresponding to an UTF-16 character.
- * This overestimates for surrogates, but that is okay.
- */
-static int efi_utf8_bytes(u16 c)
-{
- return 1 + (c >= 0x80) + (c >= 0x800);
-}
-
-/*
- * Convert an UTF-16 string, not necessarily null terminated, to UTF-8.
- */
-static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
-{
- unsigned int c;
-
- while (n--) {
- c = *src++;
- if (n && c >= 0xd800 && c <= 0xdbff &&
- *src >= 0xdc00 && *src <= 0xdfff) {
- c = 0x10000 + ((c & 0x3ff) << 10) + (*src & 0x3ff);
- src++;
- n--;
- }
- if (c >= 0xd800 && c <= 0xdfff)
- c = 0xfffd; /* Unmatched surrogate */
- if (c < 0x80) {
- *dst++ = c;
- continue;
- }
- if (c < 0x800) {
- *dst++ = 0xc0 + (c >> 6);
- goto t1;
- }
- if (c < 0x10000) {
- *dst++ = 0xe0 + (c >> 12);
- goto t2;
- }
- *dst++ = 0xf0 + (c >> 18);
- *dst++ = 0x80 + ((c >> 12) & 0x3f);
- t2:
- *dst++ = 0x80 + ((c >> 6) & 0x3f);
- t1:
- *dst++ = 0x80 + (c & 0x3f);
- }
-
- return dst;
-}
-
-/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
*/
-char *efi_convert_cmdline(efi_loaded_image_t *image,
- int *cmd_line_len, unsigned long max_addr)
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
{
const u16 *s2;
- u8 *s1 = NULL;
unsigned long cmdline_addr = 0;
- int load_options_chars = efi_table_attr(image, load_options_size) / 2;
+ int options_chars = efi_table_attr(image, load_options_size) / 2;
const u16 *options = efi_table_attr(image, load_options);
- int options_bytes = 0; /* UTF-8 bytes */
- int options_chars = 0; /* UTF-16 chars */
+ int options_bytes = 0, safe_options_bytes = 0; /* UTF-8 bytes */
+ bool in_quote = false;
efi_status_t status;
- u16 zero = 0;
if (options) {
s2 = options;
- while (*s2 && *s2 != '\n'
- && options_chars < load_options_chars) {
- options_bytes += efi_utf8_bytes(*s2++);
- options_chars++;
+ while (options_bytes < COMMAND_LINE_SIZE && options_chars--) {
+ u16 c = *s2++;
+
+ if (c < 0x80) {
+ if (c == L'\0' || c == L'\n')
+ break;
+ if (c == L'"')
+ in_quote = !in_quote;
+ else if (!in_quote && isspace((char)c))
+ safe_options_bytes = options_bytes;
+
+ options_bytes++;
+ continue;
+ }
+
+ /*
+ * Get the number of UTF-8 bytes corresponding to a
+ * UTF-16 character.
+ * The first part handles everything in the BMP.
+ */
+ options_bytes += 2 + (c >= 0x800);
+ /*
+ * Add one more byte for valid surrogate pairs. Invalid
+ * surrogates will be replaced with 0xfffd and take up
+ * only 3 bytes.
+ */
+ if ((c & 0xfc00) == 0xd800) {
+ /*
+ * If the very last word is a high surrogate,
+ * we must ignore it since we can't access the
+ * low surrogate.
+ */
+ if (!options_chars) {
+ options_bytes -= 3;
+ } else if ((*s2 & 0xfc00) == 0xdc00) {
+ options_bytes++;
+ options_chars--;
+ s2++;
+ }
+ }
+ }
+ if (options_bytes >= COMMAND_LINE_SIZE) {
+ options_bytes = safe_options_bytes;
+ efi_err("Command line is too long: truncated to %d bytes\n",
+ options_bytes);
}
- }
-
- if (!options_chars) {
- /* No command line options, so return empty string*/
- options = &zero;
}
options_bytes++; /* NUL termination */
- status = efi_allocate_pages(options_bytes, &cmdline_addr, max_addr);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, options_bytes,
+ (void **)&cmdline_addr);
if (status != EFI_SUCCESS)
return NULL;
- s1 = (u8 *)cmdline_addr;
- s2 = (const u16 *)options;
-
- s1 = efi_utf16_to_utf8(s1, s2, options_chars);
- *s1 = '\0';
+ snprintf((char *)cmdline_addr, options_bytes, "%.*ls",
+ options_bytes - 1, options);
*cmd_line_len = options_bytes;
return (char *)cmdline_addr;
@@ -285,8 +363,8 @@ fail:
void *get_efi_config_table(efi_guid_t guid)
{
- unsigned long tables = efi_table_attr(efi_system_table(), tables);
- int nr_tables = efi_table_attr(efi_system_table(), nr_tables);
+ unsigned long tables = efi_table_attr(efi_system_table, tables);
+ int nr_tables = efi_table_attr(efi_system_table, nr_tables);
int i;
for (i = 0; i < nr_tables; i++) {
@@ -301,12 +379,6 @@ void *get_efi_config_table(efi_guid_t guid)
return NULL;
}
-void efi_char16_printk(efi_char16_t *str)
-{
- efi_call_proto(efi_table_attr(efi_system_table(), con_out),
- output_string, str);
-}
-
/*
* The LINUX_EFI_INITRD_MEDIA_GUID vendor media device path below provides a way
* for the firmware or bootloader to expose the initrd data directly to the stub
@@ -348,6 +420,7 @@ static const struct {
* %EFI_OUT_OF_RESOURCES if memory allocation failed
* %EFI_LOAD_ERROR in all other cases
*/
+static
efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
unsigned long *load_size,
unsigned long max)
@@ -360,9 +433,6 @@ efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
efi_handle_t handle;
efi_status_t status;
- if (!load_addr || !load_size)
- return EFI_INVALID_PARAMETER;
-
dp = (efi_device_path_protocol_t *)&initrd_dev_path;
status = efi_bs_call(locate_device_path, &lf2_proto_guid, &dp, &handle);
if (status != EFI_SUCCESS)
@@ -392,3 +462,80 @@ efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
*load_size = initrd_size;
return EFI_SUCCESS;
}
+
+static
+efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit)
+{
+ if (!IS_ENABLED(CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER) ||
+ (IS_ENABLED(CONFIG_X86) && (!efi_is_native() || image == NULL))) {
+ *load_addr = *load_size = 0;
+ return EFI_SUCCESS;
+ }
+
+ return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2,
+ soft_limit, hard_limit,
+ load_addr, load_size);
+}
+
+efi_status_t efi_load_initrd(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit)
+{
+ efi_status_t status;
+
+ if (!load_addr || !load_size)
+ return EFI_INVALID_PARAMETER;
+
+ status = efi_load_initrd_dev_path(load_addr, load_size, hard_limit);
+ if (status == EFI_SUCCESS) {
+ efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
+ } else if (status == EFI_NOT_FOUND) {
+ status = efi_load_initrd_cmdline(image, load_addr, load_size,
+ soft_limit, hard_limit);
+ if (status == EFI_SUCCESS && *load_size > 0)
+ efi_info("Loaded initrd from command line option\n");
+ }
+
+ return status;
+}
+
+efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
+{
+ efi_event_t events[2], timer;
+ unsigned long index;
+ efi_simple_text_input_protocol_t *con_in;
+ efi_status_t status;
+
+ con_in = efi_table_attr(efi_system_table, con_in);
+ if (!con_in)
+ return EFI_UNSUPPORTED;
+ efi_set_event_at(events, 0, efi_table_attr(con_in, wait_for_key));
+
+ status = efi_bs_call(create_event, EFI_EVT_TIMER, 0, NULL, NULL, &timer);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(set_timer, timer, EfiTimerRelative,
+ EFI_100NSEC_PER_USEC * usec);
+ if (status != EFI_SUCCESS)
+ return status;
+ efi_set_event_at(events, 1, timer);
+
+ status = efi_bs_call(wait_for_event, 2, events, &index);
+ if (status == EFI_SUCCESS) {
+ if (index == 0)
+ status = efi_call_proto(con_in, read_keystroke, key);
+ else
+ status = EFI_TIMEOUT;
+ }
+
+ efi_bs_call(close_event, timer);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 99a5cde7c2d8..e97370bdfdb0 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -36,14 +36,9 @@
#endif
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-static bool __efistub_global flat_va_mapping;
+static bool flat_va_mapping;
-static efi_system_table_t *__efistub_global sys_table;
-
-__pure efi_system_table_t *efi_system_table(void)
-{
- return sys_table;
-}
+const efi_system_table_t *efi_system_table;
static struct screen_info *setup_graphics(void)
{
@@ -60,12 +55,16 @@ static struct screen_info *setup_graphics(void)
si = alloc_screen_info();
if (!si)
return NULL;
- efi_setup_gop(si, &gop_proto, size);
+ status = efi_setup_gop(si, &gop_proto, size);
+ if (status != EFI_SUCCESS) {
+ free_screen_info(si);
+ return NULL;
+ }
}
return si;
}
-void install_memreserve_table(void)
+static void install_memreserve_table(void)
{
struct linux_efi_memreserve *rsv;
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
@@ -74,7 +73,7 @@ void install_memreserve_table(void)
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
(void **)&rsv);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to allocate memreserve entry!\n");
+ efi_err("Failed to allocate memreserve entry!\n");
return;
}
@@ -85,7 +84,7 @@ void install_memreserve_table(void)
status = efi_bs_call(install_configuration_table,
&memreserve_table_guid, rsv);
if (status != EFI_SUCCESS)
- pr_efi_err("Failed to install memreserve config table!\n");
+ efi_err("Failed to install memreserve config table!\n");
}
static unsigned long get_dram_base(void)
@@ -145,7 +144,8 @@ asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
* for both archictectures, with the arch-specific code provided in the
* handle_kernel_image() function.
*/
-efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
{
efi_loaded_image_t *image;
efi_status_t status;
@@ -167,10 +167,10 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
efi_properties_table_t *prop_tbl;
unsigned long max_addr;
- sys_table = sys_table_arg;
+ efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
- if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
status = EFI_INVALID_PARAMETER;
goto fail;
}
@@ -184,16 +184,16 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
* information about the running image, such as size and the command
* line.
*/
- status = sys_table->boottime->handle_protocol(handle,
+ status = efi_system_table->boottime->handle_protocol(handle,
&loaded_image_proto, (void *)&image);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to get loaded image protocol\n");
+ efi_err("Failed to get loaded image protocol\n");
goto fail;
}
dram_base = get_dram_base();
if (dram_base == EFI_ERROR) {
- pr_efi_err("Failed to find DRAM base\n");
+ efi_err("Failed to find DRAM base\n");
status = EFI_LOAD_ERROR;
goto fail;
}
@@ -203,22 +203,32 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
* protocol. We are going to copy the command line into the
* device tree, so this can be allocated anywhere.
*/
- cmdline_ptr = efi_convert_cmdline(image, &cmdline_size, ULONG_MAX);
+ cmdline_ptr = efi_convert_cmdline(image, &cmdline_size);
if (!cmdline_ptr) {
- pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
+ efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
status = EFI_OUT_OF_RESOURCES;
goto fail;
}
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
- cmdline_size == 0)
- efi_parse_options(CONFIG_CMDLINE);
+ cmdline_size == 0) {
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail_free_cmdline;
+ }
+ }
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
- efi_parse_options(cmdline_ptr);
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
+ status = efi_parse_options(cmdline_ptr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail_free_cmdline;
+ }
+ }
- pr_efi("Booting Linux Kernel...\n");
+ efi_info("Booting Linux Kernel...\n");
si = setup_graphics();
@@ -227,8 +237,8 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
&reserve_size,
dram_base, image);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to relocate kernel\n");
- goto fail_free_cmdline;
+ efi_err("Failed to relocate kernel\n");
+ goto fail_free_screeninfo;
}
efi_retrieve_tpm2_eventlog();
@@ -246,42 +256,34 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
secure_boot != efi_secureboot_mode_disabled) {
if (strstr(cmdline_ptr, "dtb="))
- pr_efi("Ignoring DTB from command line.\n");
+ efi_err("Ignoring DTB from command line.\n");
} else {
status = efi_load_dtb(image, &fdt_addr, &fdt_size);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to load device tree!\n");
+ efi_err("Failed to load device tree!\n");
goto fail_free_image;
}
}
if (fdt_addr) {
- pr_efi("Using DTB from command line\n");
+ efi_info("Using DTB from command line\n");
} else {
/* Look for a device tree configuration table entry. */
fdt_addr = (uintptr_t)get_fdt(&fdt_size);
if (fdt_addr)
- pr_efi("Using DTB from configuration table\n");
+ efi_info("Using DTB from configuration table\n");
}
if (!fdt_addr)
- pr_efi("Generating empty DTB\n");
+ efi_info("Generating empty DTB\n");
- if (!noinitrd()) {
+ if (!efi_noinitrd) {
max_addr = efi_get_max_initrd_addr(dram_base, image_addr);
- status = efi_load_initrd_dev_path(&initrd_addr, &initrd_size,
- max_addr);
- if (status == EFI_SUCCESS) {
- pr_efi("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
- } else if (status == EFI_NOT_FOUND) {
- status = efi_load_initrd(image, &initrd_addr, &initrd_size,
- ULONG_MAX, max_addr);
- if (status == EFI_SUCCESS && initrd_size > 0)
- pr_efi("Loaded initrd from command line option\n");
- }
+ status = efi_load_initrd(image, &initrd_addr, &initrd_size,
+ ULONG_MAX, max_addr);
if (status != EFI_SUCCESS)
- pr_efi_err("Failed to load initrd!\n");
+ efi_err("Failed to load initrd!\n");
}
efi_random_get_seed();
@@ -299,7 +301,7 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
/* hibernation expects the runtime regions to stay in the same place */
- if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr() && !flat_va_mapping) {
+ if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) {
/*
* Randomize the base of the UEFI runtime services region.
* Preserve the 2 MB alignment of the region by taking a
@@ -331,7 +333,7 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
/* not reached */
fail_free_initrd:
- pr_efi_err("Failed to update FDT and exit boot services\n");
+ efi_err("Failed to update FDT and exit boot services\n");
efi_free(initrd_size, initrd_addr);
efi_free(fdt_size, fdt_addr);
@@ -339,9 +341,10 @@ fail_free_initrd:
fail_free_image:
efi_free(image_size, image_addr);
efi_free(reserve_size, reserve_addr);
-fail_free_cmdline:
+fail_free_screeninfo:
free_screen_info(si);
- efi_free(cmdline_size, (unsigned long)cmdline_ptr);
+fail_free_cmdline:
+ efi_bs_call(free_pool, cmdline_ptr);
fail:
return status;
}
@@ -372,7 +375,7 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
size = in->num_pages * EFI_PAGE_SIZE;
in->virt_addr = in->phys_addr;
- if (novamap()) {
+ if (efi_novamap) {
continue;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 67d26949fd26..bcd8c0a785f0 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -3,6 +3,13 @@
#ifndef _DRIVERS_FIRMWARE_EFI_EFISTUB_H
#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
+#include <linux/compiler.h>
+#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/kern_levels.h>
+#include <linux/types.h>
+#include <asm/efi.h>
+
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
@@ -25,25 +32,33 @@
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
-#if defined(CONFIG_ARM) || defined(CONFIG_X86)
-#define __efistub_global __section(.data)
-#else
-#define __efistub_global
-#endif
+extern bool efi_nochunk;
+extern bool efi_nokaslr;
+extern bool efi_noinitrd;
+extern int efi_loglevel;
+extern bool efi_novamap;
-extern bool __pure nochunk(void);
-extern bool __pure nokaslr(void);
-extern bool __pure noinitrd(void);
-extern bool __pure is_quiet(void);
-extern bool __pure novamap(void);
+extern const efi_system_table_t *efi_system_table;
-extern __pure efi_system_table_t *efi_system_table(void);
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg);
-#define pr_efi(msg) do { \
- if (!is_quiet()) efi_printk("EFI stub: "msg); \
-} while (0)
+#ifndef ARCH_HAS_EFISTUB_WRAPPERS
-#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg)
+#define efi_is_native() (true)
+#define efi_bs_call(func, ...) efi_system_table->boottime->func(__VA_ARGS__)
+#define efi_rt_call(func, ...) efi_system_table->runtime->func(__VA_ARGS__)
+#define efi_table_attr(inst, attr) (inst->attr)
+#define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__)
+
+#endif
+
+#define efi_info(fmt, ...) \
+ efi_printk(KERN_INFO fmt, ##__VA_ARGS__)
+#define efi_err(fmt, ...) \
+ efi_printk(KERN_ERR "ERROR: " fmt, ##__VA_ARGS__)
+#define efi_debug(fmt, ...) \
+ efi_printk(KERN_DEBUG "DEBUG: " fmt, ##__VA_ARGS__)
/* Helper macros for the usual case of using simple C variables: */
#ifndef fdt_setprop_inplace_var
@@ -77,6 +92,13 @@ extern __pure efi_system_table_t *efi_system_table(void);
((handle = efi_get_handle_at((array), i)) || true); \
i++)
+static inline
+void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
+{
+ *lo = lower_32_bits(data);
+ *hi = upper_32_bits(data);
+}
+
/*
* Allocation types for calls to boottime->allocate_pages.
*/
@@ -92,6 +114,29 @@ extern __pure efi_system_table_t *efi_system_table(void);
#define EFI_LOCATE_BY_REGISTER_NOTIFY 1
#define EFI_LOCATE_BY_PROTOCOL 2
+/*
+ * boottime->stall takes the time period in microseconds
+ */
+#define EFI_USEC_PER_SEC 1000000
+
+/*
+ * boottime->set_timer takes the time in 100ns units
+ */
+#define EFI_100NSEC_PER_USEC ((u64)10)
+
+/*
+ * An efi_boot_memmap is used by efi_get_memory_map() to return the
+ * EFI memory map in a dynamically allocated buffer.
+ *
+ * The buffer allocated for the EFI memory map includes extra room for
+ * a minimum of EFI_MMAP_NR_SLACK_SLOTS additional EFI memory descriptors.
+ * This facilitates the reuse of the EFI memory map buffer when a second
+ * call to ExitBootServices() is needed because of intervening changes to
+ * the EFI memory map. Other related structures, e.g. x86 e820ext, need
+ * to factor in this headroom requirement as well.
+ */
+#define EFI_MMAP_NR_SLACK_SLOTS 8
+
struct efi_boot_memmap {
efi_memory_desc_t **map;
unsigned long *map_size;
@@ -103,6 +148,39 @@ struct efi_boot_memmap {
typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+typedef void *efi_event_t;
+/* Note that notifications won't work in mixed mode */
+typedef void (__efiapi *efi_event_notify_t)(efi_event_t, void *);
+
+#define EFI_EVT_TIMER 0x80000000U
+#define EFI_EVT_RUNTIME 0x40000000U
+#define EFI_EVT_NOTIFY_WAIT 0x00000100U
+#define EFI_EVT_NOTIFY_SIGNAL 0x00000200U
+
+/*
+ * boottime->wait_for_event takes an array of events as input.
+ * Provide a helper to set it up correctly for mixed mode.
+ */
+static inline
+void efi_set_event_at(efi_event_t *events, size_t idx, efi_event_t event)
+{
+ if (efi_is_native())
+ events[idx] = event;
+ else
+ ((u32 *)events)[idx] = (u32)(unsigned long)event;
+}
+
+#define EFI_TPL_APPLICATION 4
+#define EFI_TPL_CALLBACK 8
+#define EFI_TPL_NOTIFY 16
+#define EFI_TPL_HIGH_LEVEL 31
+
+typedef enum {
+ EfiTimerCancel,
+ EfiTimerPeriodic,
+ EfiTimerRelative
+} EFI_TIMER_DELAY;
+
/*
* EFI Boot Services table
*/
@@ -121,11 +199,16 @@ union efi_boot_services {
efi_status_t (__efiapi *allocate_pool)(int, unsigned long,
void **);
efi_status_t (__efiapi *free_pool)(void *);
- void *create_event;
- void *set_timer;
- void *wait_for_event;
+ efi_status_t (__efiapi *create_event)(u32, unsigned long,
+ efi_event_notify_t, void *,
+ efi_event_t *);
+ efi_status_t (__efiapi *set_timer)(efi_event_t,
+ EFI_TIMER_DELAY, u64);
+ efi_status_t (__efiapi *wait_for_event)(unsigned long,
+ efi_event_t *,
+ unsigned long *);
void *signal_event;
- void *close_event;
+ efi_status_t (__efiapi *close_event)(efi_event_t);
void *check_event;
void *install_protocol_interface;
void *reinstall_protocol_interface;
@@ -152,7 +235,7 @@ union efi_boot_services {
efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
unsigned long);
void *get_next_monotonic_count;
- void *stall;
+ efi_status_t (__efiapi *stall)(unsigned long);
void *set_watchdog_timer;
void *connect_controller;
efi_status_t (__efiapi *disconnect_controller)(efi_handle_t,
@@ -237,6 +320,27 @@ union efi_uga_draw_protocol {
} mixed_mode;
};
+typedef struct {
+ u16 scan_code;
+ efi_char16_t unicode_char;
+} efi_input_key_t;
+
+union efi_simple_text_input_protocol {
+ struct {
+ void *reset;
+ efi_status_t (__efiapi *read_keystroke)(efi_simple_text_input_protocol_t *,
+ efi_input_key_t *);
+ efi_event_t wait_for_key;
+ };
+ struct {
+ u32 reset;
+ u32 read_keystroke;
+ u32 wait_for_key;
+ } mixed_mode;
+};
+
+efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key);
+
union efi_simple_text_output_protocol {
struct {
void *reset;
@@ -298,8 +402,10 @@ typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t;
union efi_graphics_output_protocol {
struct {
- void *query_mode;
- void *set_mode;
+ efi_status_t (__efiapi *query_mode)(efi_graphics_output_protocol_t *,
+ u32, unsigned long *,
+ efi_graphics_output_mode_info_t **);
+ efi_status_t (__efiapi *set_mode) (efi_graphics_output_protocol_t *, u32);
void *blt;
efi_graphics_output_protocol_mode_t *mode;
};
@@ -587,8 +693,6 @@ efi_status_t efi_exit_boot_services(void *handle,
void *priv,
efi_exit_boot_map_processing priv_func);
-void efi_char16_printk(efi_char16_t *);
-
efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
unsigned long max_addr,
@@ -612,33 +716,24 @@ efi_status_t check_platform_features(void);
void *get_efi_config_table(efi_guid_t guid);
-void efi_printk(char *str);
+/* NOTE: These functions do not print a trailing newline after the string */
+void efi_char16_puts(efi_char16_t *);
+void efi_puts(const char *str);
+
+__printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
-char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len,
- unsigned long max_addr);
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
-efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long min);
-
-static inline
-efi_status_t efi_low_alloc(unsigned long size, unsigned long align,
- unsigned long *addr)
-{
- /*
- * Don't allocate at 0x0. It will confuse code that
- * checks pointers against NULL. Skip the first 8
- * bytes so we start at a nice even number.
- */
- return efi_low_alloc_above(size, align, addr, 0x8);
-}
-
efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
unsigned long max);
+efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+ unsigned long max, unsigned long align);
+
efi_status_t efi_relocate_kernel(unsigned long *image_addr,
unsigned long image_size,
unsigned long alloc_size,
@@ -648,12 +743,27 @@ efi_status_t efi_relocate_kernel(unsigned long *image_addr,
efi_status_t efi_parse_options(char const *cmdline);
+void efi_parse_option_graphics(char *option);
+
efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
unsigned long size);
-efi_status_t efi_load_dtb(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size);
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
+ const efi_char16_t *optstr,
+ int optstr_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ unsigned long *load_addr,
+ unsigned long *load_size);
+
+
+static inline efi_status_t efi_load_dtb(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ return handle_cmdline_files(image, L"dtb=", sizeof(L"dtb=") - 2,
+ ULONG_MAX, ULONG_MAX, load_addr, load_size);
+}
efi_status_t efi_load_initrd(efi_loaded_image_t *image,
unsigned long *load_addr,
@@ -661,8 +771,4 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
unsigned long soft_limit,
unsigned long hard_limit);
-efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
- unsigned long *load_size,
- unsigned long max);
-
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 46cffac7a5f1..11ecf3c4640e 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -39,7 +39,7 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
/* Do some checks on provided FDT, if it exists: */
if (orig_fdt) {
if (fdt_check_header(orig_fdt)) {
- pr_efi_err("Device Tree header not valid!\n");
+ efi_err("Device Tree header not valid!\n");
return EFI_LOAD_ERROR;
}
/*
@@ -47,7 +47,7 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
* configuration table:
*/
if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
- pr_efi_err("Truncated device tree! foo!\n");
+ efi_err("Truncated device tree! foo!\n");
return EFI_LOAD_ERROR;
}
}
@@ -110,7 +110,7 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
- fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table());
+ fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table);
status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
if (status)
@@ -270,16 +270,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
*/
status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err("Unable to retrieve UEFI memory map.\n");
+ efi_err("Unable to retrieve UEFI memory map.\n");
return status;
}
- pr_efi("Exiting boot services and installing virtual address map...\n");
+ efi_info("Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err("Unable to allocate memory for new device tree.\n");
+ efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
}
@@ -296,7 +296,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
initrd_addr, initrd_size);
if (status != EFI_SUCCESS) {
- pr_efi_err("Unable to construct new device tree.\n");
+ efi_err("Unable to construct new device tree.\n");
goto fail_free_new_fdt;
}
@@ -310,11 +310,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
- if (novamap())
+ if (efi_novamap)
return EFI_SUCCESS;
/* Install the new virtual address map */
- svam = efi_system_table()->runtime->set_virtual_address_map;
+ svam = efi_system_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
desc_ver, runtime_map);
@@ -342,13 +342,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
return EFI_SUCCESS;
}
- pr_efi_err("Exit boot services failed.\n");
+ efi_err("Exit boot services failed.\n");
fail_free_new_fdt:
efi_free(MAX_FDT_SIZE, *new_fdt_addr);
fail:
- efi_system_table()->boottime->free_pool(runtime_map);
+ efi_system_table->boottime->free_pool(runtime_map);
return EFI_LOAD_ERROR;
}
@@ -363,7 +363,7 @@ void *get_fdt(unsigned long *fdt_size)
return NULL;
if (fdt_check_header(fdt) != 0) {
- pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
return NULL;
}
*fdt_size = fdt_totalsize(fdt);
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index ea66b1f16a79..2005e33b33d5 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -46,16 +46,14 @@ static efi_status_t efi_open_file(efi_file_protocol_t *volume,
status = volume->open(volume, &fh, fi->filename, EFI_FILE_MODE_READ, 0);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to open file: ");
- efi_char16_printk(fi->filename);
- efi_printk("\n");
+ efi_err("Failed to open file: %ls\n", fi->filename);
return status;
}
info_sz = sizeof(struct finfo);
status = fh->get_info(fh, &info_guid, &info_sz, fi);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to get file info\n");
+ efi_err("Failed to get file info\n");
fh->close(fh);
return status;
}
@@ -75,13 +73,13 @@ static efi_status_t efi_open_volume(efi_loaded_image_t *image,
status = efi_bs_call(handle_protocol, image->device_handle, &fs_proto,
(void **)&io);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to handle fs_proto\n");
+ efi_err("Failed to handle fs_proto\n");
return status;
}
status = io->open_volume(io, fh);
if (status != EFI_SUCCESS)
- pr_efi_err("Failed to open volume\n");
+ efi_err("Failed to open volume\n");
return status;
}
@@ -121,13 +119,13 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
* We only support loading a file from the same filesystem as
* the kernel image.
*/
-static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
- const efi_char16_t *optstr,
- int optstr_size,
- unsigned long soft_limit,
- unsigned long hard_limit,
- unsigned long *load_addr,
- unsigned long *load_size)
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
+ const efi_char16_t *optstr,
+ int optstr_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ unsigned long *load_addr,
+ unsigned long *load_size)
{
const efi_char16_t *cmdline = image->load_options;
int cmdline_len = image->load_options_size / 2;
@@ -142,7 +140,7 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
if (!load_addr || !load_size)
return EFI_INVALID_PARAMETER;
- if (IS_ENABLED(CONFIG_X86) && !nochunk())
+ if (IS_ENABLED(CONFIG_X86) && !efi_nochunk)
efi_chunk_size = EFI_READ_CHUNK_SIZE;
alloc_addr = alloc_size = 0;
@@ -191,7 +189,7 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
&alloc_addr,
hard_limit);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to allocate memory for files\n");
+ efi_err("Failed to allocate memory for files\n");
goto err_close_file;
}
@@ -215,7 +213,7 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
status = file->read(file, &chunksize, addr);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to read file\n");
+ efi_err("Failed to read file\n");
goto err_close_file;
}
addr += chunksize;
@@ -239,21 +237,3 @@ err_close_volume:
efi_free(alloc_size, alloc_addr);
return status;
}
-
-efi_status_t efi_load_dtb(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size)
-{
- return handle_cmdline_files(image, L"dtb=", sizeof(L"dtb=") - 2,
- ULONG_MAX, ULONG_MAX, load_addr, load_size);
-}
-
-efi_status_t efi_load_initrd(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size,
- unsigned long soft_limit,
- unsigned long hard_limit)
-{
- return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2,
- soft_limit, hard_limit, load_addr, load_size);
-}
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 55e6b3f286fe..ea5da307d542 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -5,169 +5,546 @@
*
* ----------------------------------------------------------------------- */
+#include <linux/bitops.h>
+#include <linux/ctype.h>
#include <linux/efi.h>
#include <linux/screen_info.h>
+#include <linux/string.h>
#include <asm/efi.h>
#include <asm/setup.h>
#include "efistub.h"
-static void find_bits(unsigned long mask, u8 *pos, u8 *size)
+enum efi_cmdline_option {
+ EFI_CMDLINE_NONE,
+ EFI_CMDLINE_MODE_NUM,
+ EFI_CMDLINE_RES,
+ EFI_CMDLINE_AUTO,
+ EFI_CMDLINE_LIST
+};
+
+static struct {
+ enum efi_cmdline_option option;
+ union {
+ u32 mode;
+ struct {
+ u32 width, height;
+ int format;
+ u8 depth;
+ } res;
+ };
+} cmdline = { .option = EFI_CMDLINE_NONE };
+
+static bool parse_modenum(char *option, char **next)
+{
+ u32 m;
+
+ if (!strstarts(option, "mode="))
+ return false;
+ option += strlen("mode=");
+ m = simple_strtoull(option, &option, 0);
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_MODE_NUM;
+ cmdline.mode = m;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_res(char *option, char **next)
+{
+ u32 w, h, d = 0;
+ int pf = -1;
+
+ if (!isdigit(*option))
+ return false;
+ w = simple_strtoull(option, &option, 10);
+ if (*option++ != 'x' || !isdigit(*option))
+ return false;
+ h = simple_strtoull(option, &option, 10);
+ if (*option == '-') {
+ option++;
+ if (strstarts(option, "rgb")) {
+ option += strlen("rgb");
+ pf = PIXEL_RGB_RESERVED_8BIT_PER_COLOR;
+ } else if (strstarts(option, "bgr")) {
+ option += strlen("bgr");
+ pf = PIXEL_BGR_RESERVED_8BIT_PER_COLOR;
+ } else if (isdigit(*option))
+ d = simple_strtoull(option, &option, 10);
+ else
+ return false;
+ }
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_RES;
+ cmdline.res.width = w;
+ cmdline.res.height = h;
+ cmdline.res.format = pf;
+ cmdline.res.depth = d;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_auto(char *option, char **next)
+{
+ if (!strstarts(option, "auto"))
+ return false;
+ option += strlen("auto");
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_AUTO;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_list(char *option, char **next)
{
- u8 first, len;
+ if (!strstarts(option, "list"))
+ return false;
+ option += strlen("list");
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_LIST;
+
+ *next = option;
+ return true;
+}
+
+void efi_parse_option_graphics(char *option)
+{
+ while (*option) {
+ if (parse_modenum(option, &option))
+ continue;
+ if (parse_res(option, &option))
+ continue;
+ if (parse_auto(option, &option))
+ continue;
+ if (parse_list(option, &option))
+ continue;
+
+ while (*option && *option++ != ',')
+ ;
+ }
+}
+
+static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ if (cmdline.mode == cur_mode)
+ return cur_mode;
+
+ max_mode = efi_table_attr(mode, max_mode);
+ if (cmdline.mode >= max_mode) {
+ efi_err("Requested mode is invalid\n");
+ return cur_mode;
+ }
+
+ status = efi_call_proto(gop, query_mode, cmdline.mode,
+ &info_size, &info);
+ if (status != EFI_SUCCESS) {
+ efi_err("Couldn't get mode information\n");
+ return cur_mode;
+ }
+
+ pf = info->pixel_format;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) {
+ efi_err("Invalid PixelFormat\n");
+ return cur_mode;
+ }
+
+ return cmdline.mode;
+}
+
+static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
+{
+ if (pixel_format == PIXEL_BIT_MASK) {
+ u32 mask = pixel_info.red_mask | pixel_info.green_mask |
+ pixel_info.blue_mask | pixel_info.reserved_mask;
+ if (!mask)
+ return 0;
+ return __fls(mask) - __ffs(mask) + 1;
+ } else
+ return 32;
+}
+
+static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ info = efi_table_attr(mode, info);
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ if (w == cmdline.res.width && h == cmdline.res.height &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ return cur_mode;
+
+ max_mode = efi_table_attr(mode, max_mode);
+
+ for (m = 0; m < max_mode; m++) {
+ if (m == cur_mode)
+ continue;
+
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ continue;
+ if (w == cmdline.res.width && h == cmdline.res.height &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ return m;
+ }
+
+ efi_err("Couldn't find requested mode\n");
+
+ return cur_mode;
+}
+
+static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode, best_mode, area;
+ u8 depth;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h, a;
+ u8 d;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ max_mode = efi_table_attr(mode, max_mode);
- first = 0;
- len = 0;
+ info = efi_table_attr(mode, info);
- if (mask) {
- while (!(mask & 0x1)) {
- mask = mask >> 1;
- first++;
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ best_mode = cur_mode;
+ area = w * h;
+ depth = pixel_bpp(pf, pi);
+
+ for (m = 0; m < max_mode; m++) {
+ if (m == cur_mode)
+ continue;
+
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ continue;
+ a = w * h;
+ if (a < area)
+ continue;
+ d = pixel_bpp(pf, pi);
+ if (a > area || d > depth) {
+ best_mode = m;
+ area = a;
+ depth = d;
}
+ }
+
+ return best_mode;
+}
+
+static u32 choose_mode_list(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h;
+ u8 d;
+ const char *dstr;
+ bool valid;
+ efi_input_key_t key;
- while (mask & 0x1) {
- mask = mask >> 1;
- len++;
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ max_mode = efi_table_attr(mode, max_mode);
+
+ efi_printk("Available graphics modes are 0-%u\n", max_mode-1);
+ efi_puts(" * = current mode\n"
+ " - = unusable mode\n");
+ for (m = 0; m < max_mode; m++) {
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
+ d = 0;
+ switch (pf) {
+ case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
+ dstr = "rgb";
+ break;
+ case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
+ dstr = "bgr";
+ break;
+ case PIXEL_BIT_MASK:
+ dstr = "";
+ d = pixel_bpp(pf, pi);
+ break;
+ case PIXEL_BLT_ONLY:
+ dstr = "blt";
+ break;
+ default:
+ dstr = "xxx";
+ break;
}
+
+ efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
+ m,
+ m == cur_mode ? '*' : ' ',
+ !valid ? '-' : ' ',
+ w, h, dstr, d);
+ }
+
+ efi_puts("\nPress any key to continue (or wait 10 seconds)\n");
+ status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key);
+ if (status != EFI_SUCCESS && status != EFI_TIMEOUT) {
+ efi_err("Unable to read key, continuing in 10 seconds\n");
+ efi_bs_call(stall, 10 * EFI_USEC_PER_SEC);
+ }
+
+ return cur_mode;
+}
+
+static void set_mode(efi_graphics_output_protocol_t *gop)
+{
+ efi_graphics_output_protocol_mode_t *mode;
+ u32 cur_mode, new_mode;
+
+ switch (cmdline.option) {
+ case EFI_CMDLINE_MODE_NUM:
+ new_mode = choose_mode_modenum(gop);
+ break;
+ case EFI_CMDLINE_RES:
+ new_mode = choose_mode_res(gop);
+ break;
+ case EFI_CMDLINE_AUTO:
+ new_mode = choose_mode_auto(gop);
+ break;
+ case EFI_CMDLINE_LIST:
+ new_mode = choose_mode_list(gop);
+ break;
+ default:
+ return;
+ }
+
+ mode = efi_table_attr(gop, mode);
+ cur_mode = efi_table_attr(mode, mode);
+
+ if (new_mode == cur_mode)
+ return;
+
+ if (efi_call_proto(gop, set_mode, new_mode) != EFI_SUCCESS)
+ efi_err("Failed to set requested mode\n");
+}
+
+static void find_bits(u32 mask, u8 *pos, u8 *size)
+{
+ if (!mask) {
+ *pos = *size = 0;
+ return;
}
- *pos = first;
- *size = len;
+ /* UEFI spec guarantees that the set bits are contiguous */
+ *pos = __ffs(mask);
+ *size = __fls(mask) - *pos + 1;
}
static void
setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
efi_pixel_bitmask_t pixel_info, int pixel_format)
{
- if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
- si->lfb_depth = 32;
- si->lfb_linelength = pixels_per_scan_line * 4;
- si->red_size = 8;
- si->red_pos = 0;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 16;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
- } else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) {
- si->lfb_depth = 32;
- si->lfb_linelength = pixels_per_scan_line * 4;
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
- } else if (pixel_format == PIXEL_BIT_MASK) {
- find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size);
- find_bits(pixel_info.green_mask, &si->green_pos,
- &si->green_size);
- find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size);
- find_bits(pixel_info.reserved_mask, &si->rsvd_pos,
- &si->rsvd_size);
+ if (pixel_format == PIXEL_BIT_MASK) {
+ find_bits(pixel_info.red_mask,
+ &si->red_pos, &si->red_size);
+ find_bits(pixel_info.green_mask,
+ &si->green_pos, &si->green_size);
+ find_bits(pixel_info.blue_mask,
+ &si->blue_pos, &si->blue_size);
+ find_bits(pixel_info.reserved_mask,
+ &si->rsvd_pos, &si->rsvd_size);
si->lfb_depth = si->red_size + si->green_size +
si->blue_size + si->rsvd_size;
si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
} else {
- si->lfb_depth = 4;
- si->lfb_linelength = si->lfb_width / 2;
- si->red_size = 0;
- si->red_pos = 0;
- si->green_size = 0;
- si->green_pos = 0;
- si->blue_size = 0;
- si->blue_pos = 0;
- si->rsvd_size = 0;
- si->rsvd_pos = 0;
+ if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+ si->red_pos = 0;
+ si->blue_pos = 16;
+ } else /* PIXEL_BGR_RESERVED_8BIT_PER_COLOR */ {
+ si->blue_pos = 0;
+ si->red_pos = 16;
+ }
+
+ si->green_pos = 8;
+ si->rsvd_pos = 24;
+ si->red_size = si->green_size =
+ si->blue_size = si->rsvd_size = 8;
+
+ si->lfb_depth = 32;
+ si->lfb_linelength = pixels_per_scan_line * 4;
}
}
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size, void **handles)
+static efi_graphics_output_protocol_t *
+find_gop(efi_guid_t *proto, unsigned long size, void **handles)
{
- efi_graphics_output_protocol_t *gop, *first_gop;
- u16 width, height;
- u32 pixels_per_scan_line;
- u32 ext_lfb_base;
- efi_physical_addr_t fb_base;
- efi_pixel_bitmask_t pixel_info;
- int pixel_format;
- efi_status_t status;
+ efi_graphics_output_protocol_t *first_gop;
efi_handle_t h;
int i;
first_gop = NULL;
- gop = NULL;
for_each_efi_handle(h, handles, size, i) {
+ efi_status_t status;
+
+ efi_graphics_output_protocol_t *gop;
efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info = NULL;
+ efi_graphics_output_mode_info_t *info;
+
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
- bool conout_found = false;
void *dummy = NULL;
- efi_physical_addr_t current_fb_base;
status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
if (status != EFI_SUCCESS)
continue;
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+ if (info->pixel_format == PIXEL_BLT_ONLY ||
+ info->pixel_format >= PIXEL_FORMAT_MAX)
+ continue;
+
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ *
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
if (status == EFI_SUCCESS)
- conout_found = true;
+ return gop;
- mode = efi_table_attr(gop, mode);
- info = efi_table_attr(mode, info);
- current_fb_base = efi_table_attr(mode, frame_buffer_base);
-
- if ((!first_gop || conout_found) &&
- info->pixel_format != PIXEL_BLT_ONLY) {
- /*
- * Systems that use the UEFI Console Splitter may
- * provide multiple GOP devices, not all of which are
- * backed by real hardware. The workaround is to search
- * for a GOP implementing the ConOut protocol, and if
- * one isn't found, to just fall back to the first GOP.
- */
- width = info->horizontal_resolution;
- height = info->vertical_resolution;
- pixel_format = info->pixel_format;
- pixel_info = info->pixel_information;
- pixels_per_scan_line = info->pixels_per_scan_line;
- fb_base = current_fb_base;
-
- /*
- * Once we've found a GOP supporting ConOut,
- * don't bother looking any further.
- */
+ if (!first_gop)
first_gop = gop;
- if (conout_found)
- break;
- }
}
+ return first_gop;
+}
+
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size, void **handles)
+{
+ efi_graphics_output_protocol_t *gop;
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+
+ gop = find_gop(proto, size, handles);
+
/* Did we find any GOPs? */
- if (!first_gop)
+ if (!gop)
return EFI_NOT_FOUND;
+ /* Change mode if requested */
+ set_mode(gop);
+
/* EFI framebuffer */
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+
si->orig_video_isVGA = VIDEO_TYPE_EFI;
- si->lfb_width = width;
- si->lfb_height = height;
- si->lfb_base = fb_base;
+ si->lfb_width = info->horizontal_resolution;
+ si->lfb_height = info->vertical_resolution;
- ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
- if (ext_lfb_base) {
+ efi_set_u64_split(efi_table_attr(mode, frame_buffer_base),
+ &si->lfb_base, &si->ext_lfb_base);
+ if (si->ext_lfb_base)
si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
- si->ext_lfb_base = ext_lfb_base;
- }
si->pages = 1;
- setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+ setup_pixel_info(si, info->pixels_per_scan_line,
+ info->pixel_information, info->pixel_format);
si->lfb_size = si->lfb_linelength * si->lfb_height;
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
index 869a79c8946f..feef8d4be113 100644
--- a/drivers/firmware/efi/libstub/mem.c
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -5,8 +5,6 @@
#include "efistub.h"
-#define EFI_MMAP_NR_SLACK_SLOTS 8
-
static inline bool mmap_has_headroom(unsigned long buff_size,
unsigned long map_size,
unsigned long desc_size)
@@ -93,120 +91,23 @@ fail:
efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
unsigned long max)
{
- efi_physical_addr_t alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
- int slack = EFI_ALLOC_ALIGN / EFI_PAGE_SIZE - 1;
+ efi_physical_addr_t alloc_addr;
efi_status_t status;
- size = round_up(size, EFI_ALLOC_ALIGN);
+ if (EFI_ALLOC_ALIGN > EFI_PAGE_SIZE)
+ return efi_allocate_pages_aligned(size, addr, max,
+ EFI_ALLOC_ALIGN);
+
+ alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
- EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack,
+ EFI_LOADER_DATA, DIV_ROUND_UP(size, EFI_PAGE_SIZE),
&alloc_addr);
if (status != EFI_SUCCESS)
return status;
- *addr = ALIGN((unsigned long)alloc_addr, EFI_ALLOC_ALIGN);
-
- if (slack > 0) {
- int l = (alloc_addr % EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
-
- if (l) {
- efi_bs_call(free_pages, alloc_addr, slack - l + 1);
- slack = l - 1;
- }
- if (slack)
- efi_bs_call(free_pages, *addr + size, slack);
- }
+ *addr = alloc_addr;
return EFI_SUCCESS;
}
-/**
- * efi_low_alloc_above() - allocate pages at or above given address
- * @size: size of the memory area to allocate
- * @align: minimum alignment of the allocated memory area. It should
- * a power of two.
- * @addr: on exit the address of the allocated memory
- * @min: minimum address to used for the memory allocation
- *
- * Allocate at the lowest possible address that is not below @min as
- * EFI_LOADER_DATA. The allocated pages are aligned according to @align but at
- * least EFI_ALLOC_ALIGN. The first allocated page will not below the address
- * given by @min.
- *
- * Return: status code
- */
-efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long min)
-{
- unsigned long map_size, desc_size, buff_size;
- efi_memory_desc_t *map;
- efi_status_t status;
- unsigned long nr_pages;
- int i;
- struct efi_boot_memmap boot_map;
-
- boot_map.map = &map;
- boot_map.map_size = &map_size;
- boot_map.desc_size = &desc_size;
- boot_map.desc_ver = NULL;
- boot_map.key_ptr = NULL;
- boot_map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&boot_map);
- if (status != EFI_SUCCESS)
- goto fail;
-
- /*
- * Enforce minimum alignment that EFI or Linux requires when
- * requesting a specific address. We are doing page-based (or
- * larger) allocations, and both the address and size must meet
- * alignment constraints.
- */
- if (align < EFI_ALLOC_ALIGN)
- align = EFI_ALLOC_ALIGN;
-
- size = round_up(size, EFI_ALLOC_ALIGN);
- nr_pages = size / EFI_PAGE_SIZE;
- for (i = 0; i < map_size / desc_size; i++) {
- efi_memory_desc_t *desc;
- unsigned long m = (unsigned long)map;
- u64 start, end;
-
- desc = efi_early_memdesc_ptr(m, desc_size, i);
-
- if (desc->type != EFI_CONVENTIONAL_MEMORY)
- continue;
-
- if (efi_soft_reserve_enabled() &&
- (desc->attribute & EFI_MEMORY_SP))
- continue;
-
- if (desc->num_pages < nr_pages)
- continue;
-
- start = desc->phys_addr;
- end = start + desc->num_pages * EFI_PAGE_SIZE;
-
- if (start < min)
- start = min;
-
- start = round_up(start, align);
- if ((start + size) > end)
- continue;
-
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages, &start);
- if (status == EFI_SUCCESS) {
- *addr = start;
- break;
- }
- }
-
- if (i == map_size / desc_size)
- status = EFI_NOT_FOUND;
-
- efi_bs_call(free_pool, map);
-fail:
- return status;
-}
/**
* efi_free() - free memory pages
@@ -229,81 +130,3 @@ void efi_free(unsigned long size, unsigned long addr)
nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
efi_bs_call(free_pages, addr, nr_pages);
}
-
-/**
- * efi_relocate_kernel() - copy memory area
- * @image_addr: pointer to address of memory area to copy
- * @image_size: size of memory area to copy
- * @alloc_size: minimum size of memory to allocate, must be greater or
- * equal to image_size
- * @preferred_addr: preferred target address
- * @alignment: minimum alignment of the allocated memory area. It
- * should be a power of two.
- * @min_addr: minimum target address
- *
- * Copy a memory area to a newly allocated memory area aligned according
- * to @alignment but at least EFI_ALLOC_ALIGN. If the preferred address
- * is not available, the allocated address will not be below @min_addr.
- * On exit, @image_addr is updated to the target copy address that was used.
- *
- * This function is used to copy the Linux kernel verbatim. It does not apply
- * any relocation changes.
- *
- * Return: status code
- */
-efi_status_t efi_relocate_kernel(unsigned long *image_addr,
- unsigned long image_size,
- unsigned long alloc_size,
- unsigned long preferred_addr,
- unsigned long alignment,
- unsigned long min_addr)
-{
- unsigned long cur_image_addr;
- unsigned long new_addr = 0;
- efi_status_t status;
- unsigned long nr_pages;
- efi_physical_addr_t efi_addr = preferred_addr;
-
- if (!image_addr || !image_size || !alloc_size)
- return EFI_INVALID_PARAMETER;
- if (alloc_size < image_size)
- return EFI_INVALID_PARAMETER;
-
- cur_image_addr = *image_addr;
-
- /*
- * The EFI firmware loader could have placed the kernel image
- * anywhere in memory, but the kernel has restrictions on the
- * max physical address it can run at. Some architectures
- * also have a prefered address, so first try to relocate
- * to the preferred address. If that fails, allocate as low
- * as possible while respecting the required alignment.
- */
- nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages, &efi_addr);
- new_addr = efi_addr;
- /*
- * If preferred address allocation failed allocate as low as
- * possible.
- */
- if (status != EFI_SUCCESS) {
- status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
- min_addr);
- }
- if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to allocate usable memory for kernel.\n");
- return status;
- }
-
- /*
- * We know source/dest won't overlap since both memory ranges
- * have been allocated by UEFI, so we can safely use memcpy.
- */
- memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
-
- /* Return the new address of the relocated image. */
- *image_addr = new_addr;
-
- return status;
-}
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
index b025e59b94df..99fb25d2bcf5 100644
--- a/drivers/firmware/efi/libstub/pci.c
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -28,21 +28,21 @@ void efi_pci_disable_bridge_busmaster(void)
if (status != EFI_BUFFER_TOO_SMALL) {
if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
- pr_efi_err("Failed to locate PCI I/O handles'\n");
+ efi_err("Failed to locate PCI I/O handles'\n");
return;
}
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
(void **)&pci_handle);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to allocate memory for 'pci_handle'\n");
+ efi_err("Failed to allocate memory for 'pci_handle'\n");
return;
}
status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
NULL, &pci_handle_size, pci_handle);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to locate PCI I/O handles'\n");
+ efi_err("Failed to locate PCI I/O handles'\n");
goto free_handle;
}
@@ -69,7 +69,7 @@ void efi_pci_disable_bridge_busmaster(void)
* access to the framebuffer. Drivers for true PCIe graphics
* controllers that are behind a PCIe root port do not use
* DMA to implement the GOP framebuffer anyway [although they
- * may use it in their implentation of Gop->Blt()], and so
+ * may use it in their implementation of Gop->Blt()], and so
* disabling DMA in the PCI bridge should not interfere with
* normal operation of the device.
*/
@@ -106,7 +106,7 @@ void efi_pci_disable_bridge_busmaster(void)
status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16,
PCI_COMMAND, 1, &command);
if (status != EFI_SUCCESS)
- pr_efi_err("Failed to disable PCI busmastering\n");
+ efi_err("Failed to disable PCI busmastering\n");
}
free_handle:
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 4578f59e160c..a408df474d83 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -74,6 +74,8 @@ efi_status_t efi_random_alloc(unsigned long size,
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+
/* count the suitable slots in each memory map entry */
for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
efi_memory_desc_t *md = (void *)memory_map + map_offset;
@@ -85,7 +87,7 @@ efi_status_t efi_random_alloc(unsigned long size,
}
/* find a random number between 0 and total_slots */
- target_slot = (total_slots * (u16)random_seed) >> 16;
+ target_slot = (total_slots * (u64)(random_seed & U32_MAX)) >> 32;
/*
* target_slot is now a value in the range [0, total_slots), and so
@@ -109,7 +111,7 @@ efi_status_t efi_random_alloc(unsigned long size,
}
target = round_up(md->phys_addr, align) + target_slot * align;
- pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ pages = size / EFI_PAGE_SIZE;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
EFI_LOADER_DATA, pages, &target);
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
new file mode 100644
index 000000000000..9b1aaf8b123f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_low_alloc_above() - allocate pages at or above given address
+ * @size: size of the memory area to allocate
+ * @align: minimum alignment of the allocated memory area. It should
+ * a power of two.
+ * @addr: on exit the address of the allocated memory
+ * @min: minimum address to used for the memory allocation
+ *
+ * Allocate at the lowest possible address that is not below @min as
+ * EFI_LOADER_DATA. The allocated pages are aligned according to @align but at
+ * least EFI_ALLOC_ALIGN. The first allocated page will not below the address
+ * given by @min.
+ *
+ * Return: status code
+ */
+static efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min)
+{
+ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ int i;
+ struct efi_boot_memmap boot_map;
+
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(&boot_map);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
+ */
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
+ for (i = 0; i < map_size / desc_size; i++) {
+ efi_memory_desc_t *desc;
+ unsigned long m = (unsigned long)map;
+ u64 start, end;
+
+ desc = efi_early_memdesc_ptr(m, desc_size, i);
+
+ if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
+ if (desc->num_pages < nr_pages)
+ continue;
+
+ start = desc->phys_addr;
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
+
+ if (start < min)
+ start = min;
+
+ start = round_up(start, align);
+ if ((start + size) > end)
+ continue;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &start);
+ if (status == EFI_SUCCESS) {
+ *addr = start;
+ break;
+ }
+ }
+
+ if (i == map_size / desc_size)
+ status = EFI_NOT_FOUND;
+
+ efi_bs_call(free_pool, map);
+fail:
+ return status;
+}
+
+/**
+ * efi_relocate_kernel() - copy memory area
+ * @image_addr: pointer to address of memory area to copy
+ * @image_size: size of memory area to copy
+ * @alloc_size: minimum size of memory to allocate, must be greater or
+ * equal to image_size
+ * @preferred_addr: preferred target address
+ * @alignment: minimum alignment of the allocated memory area. It
+ * should be a power of two.
+ * @min_addr: minimum target address
+ *
+ * Copy a memory area to a newly allocated memory area aligned according
+ * to @alignment but at least EFI_ALLOC_ALIGN. If the preferred address
+ * is not available, the allocated address will not be below @min_addr.
+ * On exit, @image_addr is updated to the target copy address that was used.
+ *
+ * This function is used to copy the Linux kernel verbatim. It does not apply
+ * any relocation changes.
+ *
+ * Return: status code
+ */
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment,
+ unsigned long min_addr)
+{
+ unsigned long cur_image_addr;
+ unsigned long new_addr = 0;
+ efi_status_t status;
+ unsigned long nr_pages;
+ efi_physical_addr_t efi_addr = preferred_addr;
+
+ if (!image_addr || !image_size || !alloc_size)
+ return EFI_INVALID_PARAMETER;
+ if (alloc_size < image_size)
+ return EFI_INVALID_PARAMETER;
+
+ cur_image_addr = *image_addr;
+
+ /*
+ * The EFI firmware loader could have placed the kernel image
+ * anywhere in memory, but the kernel has restrictions on the
+ * max physical address it can run at. Some architectures
+ * also have a preferred address, so first try to relocate
+ * to the preferred address. If that fails, allocate as low
+ * as possible while respecting the required alignment.
+ */
+ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &efi_addr);
+ new_addr = efi_addr;
+ /*
+ * If preferred address allocation failed allocate as low as
+ * possible.
+ */
+ if (status != EFI_SUCCESS) {
+ status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
+ min_addr);
+ }
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate usable memory for kernel.\n");
+ return status;
+ }
+
+ /*
+ * We know source/dest won't overlap since both memory ranges
+ * have been allocated by UEFI, so we can safely use memcpy.
+ */
+ memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
+
+ /* Return the new address of the relocated image. */
+ *image_addr = new_addr;
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index a765378ad18c..5efc524b14be 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -67,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(void)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
- pr_efi("UEFI Secure Boot is enabled.\n");
+ efi_info("UEFI Secure Boot is enabled.\n");
return efi_secureboot_mode_enabled;
out_efi_err:
- pr_efi_err("Could not determine UEFI Secure Boot status.\n");
+ efi_err("Could not determine UEFI Secure Boot status.\n");
return efi_secureboot_mode_unknown;
}
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 1d59e103a2e3..7acbac16eae0 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -54,7 +54,7 @@ void efi_retrieve_tpm2_eventlog(void)
efi_status_t status;
efi_physical_addr_t log_location = 0, log_last_entry = 0;
struct linux_efi_tpm_eventlog *log_tbl = NULL;
- struct efi_tcg2_final_events_table *final_events_table;
+ struct efi_tcg2_final_events_table *final_events_table = NULL;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
efi_bool_t truncated;
@@ -119,7 +119,7 @@ void efi_retrieve_tpm2_eventlog(void)
sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) {
- efi_printk("Unable to allocate memory for event log\n");
+ efi_err("Unable to allocate memory for event log\n");
return;
}
@@ -127,7 +127,8 @@ void efi_retrieve_tpm2_eventlog(void)
* Figure out whether any events have already been logged to the
* final events structure, and if so how much space they take up
*/
- final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
+ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
if (final_events_table && final_events_table->nr_events) {
struct tcg_pcr_event2_head *header;
int offset;
diff --git a/drivers/firmware/efi/libstub/vsprintf.c b/drivers/firmware/efi/libstub/vsprintf.c
new file mode 100644
index 000000000000..e65ef49a54cd
--- /dev/null
+++ b/drivers/firmware/efi/libstub/vsprintf.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright 2007 rPath, Inc. - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * Oh, it's a waste of space, but oh-so-yummy for debugging.
+ */
+
+#include <stdarg.h>
+
+#include <linux/compiler.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static
+int skip_atoi(const char **s)
+{
+ int i = 0;
+
+ while (isdigit(**s))
+ i = i * 10 + *((*s)++) - '0';
+ return i;
+}
+
+/*
+ * put_dec_full4 handles numbers in the range 0 <= r < 10000.
+ * The multiplier 0xccd is round(2^15/10), and the approximation
+ * r/10 == (r * 0xccd) >> 15 is exact for all r < 16389.
+ */
+static
+void put_dec_full4(char *end, unsigned int r)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ unsigned int q = (r * 0xccd) >> 15;
+ *--end = '0' + (r - q * 10);
+ r = q;
+ }
+ *--end = '0' + r;
+}
+
+/* put_dec is copied from lib/vsprintf.c with small modifications */
+
+/*
+ * Call put_dec_full4 on x % 10000, return x / 10000.
+ * The approximation x/10000 == (x * 0x346DC5D7) >> 43
+ * holds for all x < 1,128,869,999. The largest value this
+ * helper will ever be asked to convert is 1,125,520,955.
+ * (second call in the put_dec code, assuming n is all-ones).
+ */
+static
+unsigned int put_dec_helper4(char *end, unsigned int x)
+{
+ unsigned int q = (x * 0x346DC5D7ULL) >> 43;
+
+ put_dec_full4(end, x - q * 10000);
+ return q;
+}
+
+/* Based on code by Douglas W. Jones found at
+ * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
+ * (with permission from the author).
+ * Performs no 64-bit division and hence should be fast on 32-bit machines.
+ */
+static
+char *put_dec(char *end, unsigned long long n)
+{
+ unsigned int d3, d2, d1, q, h;
+ char *p = end;
+
+ d1 = ((unsigned int)n >> 16); /* implicit "& 0xffff" */
+ h = (n >> 32);
+ d2 = (h ) & 0xffff;
+ d3 = (h >> 16); /* implicit "& 0xffff" */
+
+ /* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
+ = 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
+ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((unsigned int)n & 0xffff);
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 7671 * d3 + 9496 * d2 + 6 * d1;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 4749 * d3 + 42 * d2;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 281 * d3;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ put_dec_full4(p, q);
+ p -= 4;
+
+ /* strip off the extra 0's we printed */
+ while (p < end && *p == '0')
+ ++p;
+
+ return p;
+}
+
+static
+char *number(char *end, unsigned long long num, int base, char locase)
+{
+ /*
+ * locase = 0 or 0x20. ORing digits or letters with 'locase'
+ * produces same digits or (maybe lowercased) letters
+ */
+
+ /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
+ static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
+
+ switch (base) {
+ case 10:
+ if (num != 0)
+ end = put_dec(end, num);
+ break;
+ case 8:
+ for (; num != 0; num >>= 3)
+ *--end = '0' + (num & 07);
+ break;
+ case 16:
+ for (; num != 0; num >>= 4)
+ *--end = digits[num & 0xf] | locase;
+ break;
+ default:
+ unreachable();
+ };
+
+ return end;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SMALL 32 /* Must be 32 == 0x20 */
+#define SPECIAL 64 /* 0x */
+#define WIDE 128 /* UTF-16 string */
+
+static
+int get_flags(const char **fmt)
+{
+ int flags = 0;
+
+ do {
+ switch (**fmt) {
+ case '-':
+ flags |= LEFT;
+ break;
+ case '+':
+ flags |= PLUS;
+ break;
+ case ' ':
+ flags |= SPACE;
+ break;
+ case '#':
+ flags |= SPECIAL;
+ break;
+ case '0':
+ flags |= ZEROPAD;
+ break;
+ default:
+ return flags;
+ }
+ ++(*fmt);
+ } while (1);
+}
+
+static
+int get_int(const char **fmt, va_list *ap)
+{
+ if (isdigit(**fmt))
+ return skip_atoi(fmt);
+ if (**fmt == '*') {
+ ++(*fmt);
+ /* it's the next argument */
+ return va_arg(*ap, int);
+ }
+ return 0;
+}
+
+static
+unsigned long long get_number(int sign, int qualifier, va_list *ap)
+{
+ if (sign) {
+ switch (qualifier) {
+ case 'L':
+ return va_arg(*ap, long long);
+ case 'l':
+ return va_arg(*ap, long);
+ case 'h':
+ return (short)va_arg(*ap, int);
+ case 'H':
+ return (signed char)va_arg(*ap, int);
+ default:
+ return va_arg(*ap, int);
+ };
+ } else {
+ switch (qualifier) {
+ case 'L':
+ return va_arg(*ap, unsigned long long);
+ case 'l':
+ return va_arg(*ap, unsigned long);
+ case 'h':
+ return (unsigned short)va_arg(*ap, int);
+ case 'H':
+ return (unsigned char)va_arg(*ap, int);
+ default:
+ return va_arg(*ap, unsigned int);
+ }
+ }
+}
+
+static
+char get_sign(long long *num, int flags)
+{
+ if (!(flags & SIGN))
+ return 0;
+ if (*num < 0) {
+ *num = -(*num);
+ return '-';
+ }
+ if (flags & PLUS)
+ return '+';
+ if (flags & SPACE)
+ return ' ';
+ return 0;
+}
+
+static
+size_t utf16s_utf8nlen(const u16 *s16, size_t maxlen)
+{
+ size_t len, clen;
+
+ for (len = 0; len < maxlen && *s16; len += clen) {
+ u16 c0 = *s16++;
+
+ /* First, get the length for a BMP character */
+ clen = 1 + (c0 >= 0x80) + (c0 >= 0x800);
+ if (len + clen > maxlen)
+ break;
+ /*
+ * If this is a high surrogate, and we're already at maxlen, we
+ * can't include the character if it's a valid surrogate pair.
+ * Avoid accessing one extra word just to check if it's valid
+ * or not.
+ */
+ if ((c0 & 0xfc00) == 0xd800) {
+ if (len + clen == maxlen)
+ break;
+ if ((*s16 & 0xfc00) == 0xdc00) {
+ ++s16;
+ ++clen;
+ }
+ }
+ }
+
+ return len;
+}
+
+static
+u32 utf16_to_utf32(const u16 **s16)
+{
+ u16 c0, c1;
+
+ c0 = *(*s16)++;
+ /* not a surrogate */
+ if ((c0 & 0xf800) != 0xd800)
+ return c0;
+ /* invalid: low surrogate instead of high */
+ if (c0 & 0x0400)
+ return 0xfffd;
+ c1 = **s16;
+ /* invalid: missing low surrogate */
+ if ((c1 & 0xfc00) != 0xdc00)
+ return 0xfffd;
+ /* valid surrogate pair */
+ ++(*s16);
+ return (0x10000 - (0xd800 << 10) - 0xdc00) + (c0 << 10) + c1;
+}
+
+#define PUTC(c) \
+do { \
+ if (pos < size) \
+ buf[pos] = (c); \
+ ++pos; \
+} while (0);
+
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list ap)
+{
+ /* The maximum space required is to print a 64-bit number in octal */
+ char tmp[(sizeof(unsigned long long) * 8 + 2) / 3];
+ char *tmp_end = &tmp[ARRAY_SIZE(tmp)];
+ long long num;
+ int base;
+ const char *s;
+ size_t len, pos;
+ char sign;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'hh', 'l' or 'll' for integer fields */
+
+ va_list args;
+
+ /*
+ * We want to pass our input va_list to helper functions by reference,
+ * but there's an annoying edge case. If va_list was originally passed
+ * to us by value, we could just pass &ap down to the helpers. This is
+ * the case on, for example, X86_32.
+ * However, on X86_64 (and possibly others), va_list is actually a
+ * size-1 array containing a structure. Our function parameter ap has
+ * decayed from T[1] to T*, and &ap has type T** rather than T(*)[1],
+ * which is what will be expected by a function taking a va_list *
+ * parameter.
+ * One standard way to solve this mess is by creating a copy in a local
+ * variable of type va_list and then passing a pointer to that local
+ * copy instead, which is what we do here.
+ */
+ va_copy(args, ap);
+
+ for (pos = 0; *fmt; ++fmt) {
+ if (*fmt != '%' || *++fmt == '%') {
+ PUTC(*fmt);
+ continue;
+ }
+
+ /* process flags */
+ flags = get_flags(&fmt);
+
+ /* get field width */
+ field_width = get_int(&fmt, &args);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+
+ if (flags & LEFT)
+ flags &= ~ZEROPAD;
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ precision = get_int(&fmt, &args);
+ if (precision >= 0)
+ flags &= ~ZEROPAD;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l') {
+ qualifier = *fmt;
+ ++fmt;
+ if (qualifier == *fmt) {
+ qualifier -= 'a'-'A';
+ ++fmt;
+ }
+ }
+
+ sign = 0;
+
+ switch (*fmt) {
+ case 'c':
+ flags &= LEFT;
+ s = tmp;
+ if (qualifier == 'l') {
+ ((u16 *)tmp)[0] = (u16)va_arg(args, unsigned int);
+ ((u16 *)tmp)[1] = L'\0';
+ precision = INT_MAX;
+ goto wstring;
+ } else {
+ tmp[0] = (unsigned char)va_arg(args, int);
+ precision = len = 1;
+ }
+ goto output;
+
+ case 's':
+ flags &= LEFT;
+ if (precision < 0)
+ precision = INT_MAX;
+ s = va_arg(args, void *);
+ if (!s)
+ s = precision < 6 ? "" : "(null)";
+ else if (qualifier == 'l') {
+ wstring:
+ flags |= WIDE;
+ precision = len = utf16s_utf8nlen((const u16 *)s, precision);
+ goto output;
+ }
+ precision = len = strnlen(s, precision);
+ goto output;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'p':
+ if (precision < 0)
+ precision = 2 * sizeof(void *);
+ fallthrough;
+ case 'x':
+ flags |= SMALL;
+ fallthrough;
+ case 'X':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ fallthrough;
+ case 'u':
+ flags &= ~SPECIAL;
+ base = 10;
+ break;
+
+ default:
+ /*
+ * Bail out if the conversion specifier is invalid.
+ * There's probably a typo in the format string and the
+ * remaining specifiers are unlikely to match up with
+ * the arguments.
+ */
+ goto fail;
+ }
+ if (*fmt == 'p') {
+ num = (unsigned long)va_arg(args, void *);
+ } else {
+ num = get_number(flags & SIGN, qualifier, &args);
+ }
+
+ sign = get_sign(&num, flags);
+ if (sign)
+ --field_width;
+
+ s = number(tmp_end, num, base, flags & SMALL);
+ len = tmp_end - s;
+ /* default precision is 1 */
+ if (precision < 0)
+ precision = 1;
+ /* precision is minimum number of digits to print */
+ if (precision < len)
+ precision = len;
+ if (flags & SPECIAL) {
+ /*
+ * For octal, a leading 0 is printed only if necessary,
+ * i.e. if it's not already there because of the
+ * precision.
+ */
+ if (base == 8 && precision == len)
+ ++precision;
+ /*
+ * For hexadecimal, the leading 0x is skipped if the
+ * output is empty, i.e. both the number and the
+ * precision are 0.
+ */
+ if (base == 16 && precision > 0)
+ field_width -= 2;
+ else
+ flags &= ~SPECIAL;
+ }
+ /*
+ * For zero padding, increase the precision to fill the field
+ * width.
+ */
+ if ((flags & ZEROPAD) && field_width > precision)
+ precision = field_width;
+
+output:
+ /* Calculate the padding necessary */
+ field_width -= precision;
+ /* Leading padding with ' ' */
+ if (!(flags & LEFT))
+ while (field_width-- > 0)
+ PUTC(' ');
+ /* sign */
+ if (sign)
+ PUTC(sign);
+ /* 0x/0X for hexadecimal */
+ if (flags & SPECIAL) {
+ PUTC('0');
+ PUTC( 'X' | (flags & SMALL));
+ }
+ /* Zero padding and excess precision */
+ while (precision-- > len)
+ PUTC('0');
+ /* Actual output */
+ if (flags & WIDE) {
+ const u16 *ws = (const u16 *)s;
+
+ while (len-- > 0) {
+ u32 c32 = utf16_to_utf32(&ws);
+ u8 *s8;
+ size_t clen;
+
+ if (c32 < 0x80) {
+ PUTC(c32);
+ continue;
+ }
+
+ /* Number of trailing octets */
+ clen = 1 + (c32 >= 0x800) + (c32 >= 0x10000);
+
+ len -= clen;
+ s8 = (u8 *)&buf[pos];
+
+ /* Avoid writing partial character */
+ PUTC('\0');
+ pos += clen;
+ if (pos >= size)
+ continue;
+
+ /* Set high bits of leading octet */
+ *s8 = (0xf00 >> 1) >> clen;
+ /* Write trailing octets in reverse order */
+ for (s8 += clen; clen; --clen, c32 >>= 6)
+ *s8-- = 0x80 | (c32 & 0x3f);
+ /* Set low bits of leading octet */
+ *s8 |= c32;
+ }
+ } else {
+ while (len-- > 0)
+ PUTC(*s++);
+ }
+ /* Trailing padding with ' ' */
+ while (field_width-- > 0)
+ PUTC(' ');
+ }
+fail:
+ va_end(args);
+
+ if (size)
+ buf[min(pos, size-1)] = '\0';
+
+ return pos;
+}
+
+int snprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+ return i;
+}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 05ccb229fb45..5a48d996ed71 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -20,21 +20,9 @@
/* Maximum physical address for 64-bit kernel with 4-level paging */
#define MAXMEM_X86_64_4LEVEL (1ull << 46)
-static efi_system_table_t *sys_table __efistub_global;
-extern const bool efi_is64;
+const efi_system_table_t *efi_system_table;
extern u32 image_offset;
-
-__pure efi_system_table_t *efi_system_table(void)
-{
- return sys_table;
-}
-
-__attribute_const__ bool efi_is_64bit(void)
-{
- if (IS_ENABLED(CONFIG_EFI_MIXED))
- return efi_is64;
- return IS_ENABLED(CONFIG_X86_64);
-}
+static efi_loaded_image_t *image = NULL;
static efi_status_t
preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
@@ -62,7 +50,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
(void **)&rom);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to allocate memory for 'rom'\n");
+ efi_err("Failed to allocate memory for 'rom'\n");
return status;
}
@@ -78,7 +66,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
PCI_VENDOR_ID, 1, &rom->vendor);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to read rom->vendor\n");
+ efi_err("Failed to read rom->vendor\n");
goto free_struct;
}
@@ -86,7 +74,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
PCI_DEVICE_ID, 1, &rom->devid);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to read rom->devid\n");
+ efi_err("Failed to read rom->devid\n");
goto free_struct;
}
@@ -131,7 +119,7 @@ static void setup_efi_pci(struct boot_params *params)
(void **)&pci_handle);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to allocate memory for 'pci_handle'\n");
+ efi_err("Failed to allocate memory for 'pci_handle'\n");
return;
}
@@ -185,7 +173,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
return;
if (efi_table_attr(p, version) != 0x10000) {
- efi_printk("Unsupported properties proto version\n");
+ efi_err("Unsupported properties proto version\n");
return;
}
@@ -198,7 +186,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
size + sizeof(struct setup_data),
(void **)&new);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to allocate memory for 'properties'\n");
+ efi_err("Failed to allocate memory for 'properties'\n");
return;
}
@@ -227,7 +215,7 @@ static const efi_char16_t apple[] = L"Apple";
static void setup_quirks(struct boot_params *boot_params)
{
efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
- efi_table_attr(efi_system_table(), fw_vendor);
+ efi_table_attr(efi_system_table, fw_vendor);
if (!memcmp(fw_vendor, apple, sizeof(apple))) {
if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
@@ -368,7 +356,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
{
struct boot_params *boot_params;
struct setup_header *hdr;
- efi_loaded_image_t *image;
void *image_base;
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
int options_size = 0;
@@ -377,28 +364,29 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
unsigned long ramdisk_addr;
unsigned long ramdisk_size;
- sys_table = sys_table_arg;
+ efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
- if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
+ efi_err("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
efi_exit(handle, status);
}
image_base = efi_table_attr(image, image_base);
image_offset = (void *)startup_32 - image_base;
- status = efi_allocate_pages(0x4000, (unsigned long *)&boot_params, ULONG_MAX);
+ status = efi_allocate_pages(sizeof(struct boot_params),
+ (unsigned long *)&boot_params, ULONG_MAX);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to allocate lowmem for boot params\n");
+ efi_err("Failed to allocate lowmem for boot params\n");
efi_exit(handle, status);
}
- memset(boot_params, 0x0, 0x4000);
+ memset(boot_params, 0x0, sizeof(struct boot_params));
hdr = &boot_params->hdr;
@@ -416,43 +404,21 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->type_of_loader = 0x21;
/* Convert unicode cmdline to ascii */
- cmdline_ptr = efi_convert_cmdline(image, &options_size, ULONG_MAX);
+ cmdline_ptr = efi_convert_cmdline(image, &options_size);
if (!cmdline_ptr)
goto fail;
- hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
- /* Fill in upper bits of command line address, NOP on 32 bit */
- boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
+ efi_set_u64_split((unsigned long)cmdline_ptr,
+ &hdr->cmd_line_ptr, &boot_params->ext_cmd_line_ptr);
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
- if (efi_is_native()) {
- status = efi_parse_options(cmdline_ptr);
- if (status != EFI_SUCCESS)
- goto fail2;
-
- if (!noinitrd()) {
- status = efi_load_initrd(image, &ramdisk_addr,
- &ramdisk_size,
- hdr->initrd_addr_max,
- ULONG_MAX);
- if (status != EFI_SUCCESS)
- goto fail2;
- hdr->ramdisk_image = ramdisk_addr & 0xffffffff;
- hdr->ramdisk_size = ramdisk_size & 0xffffffff;
- boot_params->ext_ramdisk_image = (u64)ramdisk_addr >> 32;
- boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32;
- }
- }
-
- efi_stub_entry(handle, sys_table, boot_params);
+ efi_stub_entry(handle, sys_table_arg, boot_params);
/* not reached */
-fail2:
- efi_free(options_size, (unsigned long)cmdline_ptr);
fail:
- efi_free(0x4000, (unsigned long)boot_params);
+ efi_free(sizeof(struct boot_params), (unsigned long)boot_params);
efi_exit(handle, status);
}
@@ -606,24 +572,18 @@ static efi_status_t allocate_e820(struct boot_params *params,
struct setup_data **e820ext,
u32 *e820ext_size)
{
- unsigned long map_size, desc_size, buff_size;
- struct efi_boot_memmap boot_map;
- efi_memory_desc_t *map;
+ unsigned long map_size, desc_size, map_key;
efi_status_t status;
- __u32 nr_desc;
+ __u32 nr_desc, desc_version;
- boot_map.map = &map;
- boot_map.map_size = &map_size;
- boot_map.desc_size = &desc_size;
- boot_map.desc_ver = NULL;
- boot_map.key_ptr = NULL;
- boot_map.buff_size = &buff_size;
+ /* Only need the size of the mem map and size of each mem descriptor */
+ map_size = 0;
+ status = efi_bs_call(get_memory_map, &map_size, NULL, &map_key,
+ &desc_size, &desc_version);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return (status != EFI_SUCCESS) ? status : EFI_UNSUPPORTED;
- status = efi_get_memory_map(&boot_map);
- if (status != EFI_SUCCESS)
- return status;
-
- nr_desc = buff_size / desc_size;
+ nr_desc = map_size / desc_size + EFI_MMAP_NR_SLACK_SLOTS;
if (nr_desc > ARRAY_SIZE(params->e820_table)) {
u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
@@ -651,17 +611,14 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
: EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
- p->efi->efi_systab = (unsigned long)efi_system_table();
+ efi_set_u64_split((unsigned long)efi_system_table,
+ &p->efi->efi_systab, &p->efi->efi_systab_hi);
p->efi->efi_memdesc_size = *map->desc_size;
p->efi->efi_memdesc_version = *map->desc_ver;
- p->efi->efi_memmap = (unsigned long)*map->map;
+ efi_set_u64_split((unsigned long)*map->map,
+ &p->efi->efi_memmap, &p->efi->efi_memmap_hi);
p->efi->efi_memmap_size = *map->map_size;
-#ifdef CONFIG_X86_64
- p->efi->efi_systab_hi = (unsigned long)efi_system_table() >> 32;
- p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
-#endif
-
return EFI_SUCCESS;
}
@@ -717,12 +674,11 @@ unsigned long efi_main(efi_handle_t handle,
unsigned long buffer_start, buffer_end;
struct setup_header *hdr = &boot_params->hdr;
efi_status_t status;
- unsigned long cmdline_paddr;
- sys_table = sys_table_arg;
+ efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
- if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
/*
@@ -765,7 +721,7 @@ unsigned long efi_main(efi_handle_t handle,
hdr->kernel_alignment,
LOAD_PHYSICAL_ADDR);
if (status != EFI_SUCCESS) {
- efi_printk("efi_relocate_kernel() failed!\n");
+ efi_err("efi_relocate_kernel() failed!\n");
goto fail;
}
/*
@@ -776,35 +732,48 @@ unsigned long efi_main(efi_handle_t handle,
image_offset = 0;
}
- /*
- * efi_pe_entry() may have been called before efi_main(), in which
- * case this is the second time we parse the cmdline. This is ok,
- * parsing the cmdline multiple times does not have side-effects.
- */
- cmdline_paddr = ((u64)hdr->cmd_line_ptr |
- ((u64)boot_params->ext_cmd_line_ptr << 32));
- efi_parse_options((char *)cmdline_paddr);
+#ifdef CONFIG_CMDLINE_BOOL
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+ }
+#endif
+ if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr |
+ ((u64)boot_params->ext_cmd_line_ptr << 32));
+ status = efi_parse_options((char *)cmdline_paddr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+ }
+ }
/*
- * At this point, an initrd may already have been loaded, either by
- * the bootloader and passed via bootparams, or loaded from a initrd=
- * command line option by efi_pe_entry() above. In either case, we
- * permit an initrd loaded from the LINUX_EFI_INITRD_MEDIA_GUID device
- * path to supersede it.
+ * At this point, an initrd may already have been loaded by the
+ * bootloader and passed via bootparams. We permit an initrd loaded
+ * from the LINUX_EFI_INITRD_MEDIA_GUID device path to supersede it.
+ *
+ * If the device path is not present, any command-line initrd=
+ * arguments will be processed only if image is not NULL, which will be
+ * the case only if we were loaded via the PE entry point.
*/
- if (!noinitrd()) {
+ if (!efi_noinitrd) {
unsigned long addr, size;
- status = efi_load_initrd_dev_path(&addr, &size, ULONG_MAX);
- if (status == EFI_SUCCESS) {
- hdr->ramdisk_image = (u32)addr;
- hdr->ramdisk_size = (u32)size;
- boot_params->ext_ramdisk_image = (u64)addr >> 32;
- boot_params->ext_ramdisk_size = (u64)size >> 32;
- } else if (status != EFI_NOT_FOUND) {
- efi_printk("efi_load_initrd_dev_path() failed!\n");
+ status = efi_load_initrd(image, &addr, &size,
+ hdr->initrd_addr_max, ULONG_MAX);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to load initrd!\n");
goto fail;
}
+ if (size > 0) {
+ efi_set_u64_split(addr, &hdr->ramdisk_image,
+ &boot_params->ext_ramdisk_image);
+ efi_set_u64_split(size, &hdr->ramdisk_size,
+ &boot_params->ext_ramdisk_size);
+ }
}
/*
@@ -829,13 +798,13 @@ unsigned long efi_main(efi_handle_t handle,
status = exit_boot(boot_params, handle);
if (status != EFI_SUCCESS) {
- efi_printk("exit_boot() failed!\n");
+ efi_err("exit_boot() failed!\n");
goto fail;
}
return bzimage_addr;
fail:
- efi_printk("efi_main() failed!\n");
+ efi_err("efi_main() failed!\n");
efi_exit(handle, status);
}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 7baf48c01e72..ddf9eae396fe 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -70,9 +70,6 @@ copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src,
return 0;
}
- if (!access_ok(src, 1))
- return -EFAULT;
-
buf = memdup_user(src, len);
if (IS_ERR(buf)) {
*dst = NULL;
@@ -91,9 +88,6 @@ copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src,
static inline int
get_ucs2_strsize_from_user(efi_char16_t __user *src, size_t *len)
{
- if (!access_ok(src, 1))
- return -EFAULT;
-
*len = user_ucs2_strsize(src);
if (*len == 0)
return -EFAULT;
@@ -118,9 +112,6 @@ copy_ucs2_from_user(efi_char16_t **dst, efi_char16_t __user *src)
{
size_t len;
- if (!access_ok(src, 1))
- return -EFAULT;
-
len = user_ucs2_strsize(src);
if (len == 0)
return -EFAULT;
@@ -142,9 +133,6 @@ copy_ucs2_to_user_len(efi_char16_t __user *dst, efi_char16_t *src, size_t len)
if (!src)
return 0;
- if (!access_ok(dst, 1))
- return -EFAULT;
-
return copy_to_user(dst, src, len);
}
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 55b031d2c989..c1955d320fec 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -62,8 +62,11 @@ int __init efi_tpm_eventlog_init(void)
tbl_size = sizeof(*log_tbl) + log_tbl->size;
memblock_reserve(efi.tpm_log, tbl_size);
- if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR)
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+ log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
goto out;
+ }
final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index f71eaa5bf52d..2ab048222fe9 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -8,7 +8,6 @@
*/
#include <linux/err.h>
-#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
@@ -38,6 +37,7 @@ struct imx_sc_ipc {
struct device *dev;
struct mutex lock;
struct completion done;
+ bool fast_ipc;
/* temporarily store the SCU msg */
u32 *msg;
@@ -115,6 +115,7 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
struct imx_sc_rpc_msg *hdr;
u32 *data = msg;
+ int i;
if (!sc_ipc->msg) {
dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
@@ -122,6 +123,19 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
return;
}
+ if (sc_ipc->fast_ipc) {
+ hdr = msg;
+ sc_ipc->rx_size = hdr->size;
+ sc_ipc->msg[0] = *data++;
+
+ for (i = 1; i < sc_ipc->rx_size; i++)
+ sc_ipc->msg[i] = *data++;
+
+ complete(&sc_ipc->done);
+
+ return;
+ }
+
if (sc_chan->idx == 0) {
hdr = msg;
sc_ipc->rx_size = hdr->size;
@@ -143,20 +157,22 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
{
- struct imx_sc_rpc_msg *hdr = msg;
+ struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg;
struct imx_sc_chan *sc_chan;
u32 *data = msg;
int ret;
+ int size;
int i;
/* Check size */
- if (hdr->size > IMX_SC_RPC_MAX_MSG)
+ if (hdr.size > IMX_SC_RPC_MAX_MSG)
return -EINVAL;
- dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc,
- hdr->func, hdr->size);
+ dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc,
+ hdr.func, hdr.size);
- for (i = 0; i < hdr->size; i++) {
+ size = sc_ipc->fast_ipc ? 1 : hdr.size;
+ for (i = 0; i < size; i++) {
sc_chan = &sc_ipc->chans[i % 4];
/*
@@ -168,8 +184,10 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
* Wait for tx_done before every send to ensure that no
* queueing happens at the mailbox channel level.
*/
- wait_for_completion(&sc_chan->tx_done);
- reinit_completion(&sc_chan->tx_done);
+ if (!sc_ipc->fast_ipc) {
+ wait_for_completion(&sc_chan->tx_done);
+ reinit_completion(&sc_chan->tx_done);
+ }
ret = mbox_send_message(sc_chan->ch, &data[i]);
if (ret < 0)
@@ -246,6 +264,8 @@ static int imx_scu_probe(struct platform_device *pdev)
struct imx_sc_chan *sc_chan;
struct mbox_client *cl;
char *chan_name;
+ struct of_phandle_args args;
+ int num_channel;
int ret;
int i;
@@ -253,11 +273,20 @@ static int imx_scu_probe(struct platform_device *pdev)
if (!sc_ipc)
return -ENOMEM;
- for (i = 0; i < SCU_MU_CHAN_NUM; i++) {
- if (i < 4)
+ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
+ "#mbox-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+
+ num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
+ for (i = 0; i < num_channel; i++) {
+ if (i < num_channel / 2)
chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
else
- chan_name = kasprintf(GFP_KERNEL, "rx%d", i - 4);
+ chan_name = kasprintf(GFP_KERNEL, "rx%d",
+ i - num_channel / 2);
if (!chan_name)
return -ENOMEM;
@@ -269,19 +298,22 @@ static int imx_scu_probe(struct platform_device *pdev)
cl->knows_txdone = true;
cl->rx_callback = imx_scu_rx_callback;
- /* Initial tx_done completion as "done" */
- cl->tx_done = imx_scu_tx_done;
- init_completion(&sc_chan->tx_done);
- complete(&sc_chan->tx_done);
+ if (!sc_ipc->fast_ipc) {
+ /* Initial tx_done completion as "done" */
+ cl->tx_done = imx_scu_tx_done;
+ init_completion(&sc_chan->tx_done);
+ complete(&sc_chan->tx_done);
+ }
sc_chan->sc_ipc = sc_ipc;
- sc_chan->idx = i % 4;
+ sc_chan->idx = i % (num_channel / 2);
sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(sc_chan->ch)) {
ret = PTR_ERR(sc_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
chan_name, ret);
+ kfree(chan_name);
return ret;
}
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 2937d44b5df4..92013ecc2d9e 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -46,25 +46,14 @@
* require cooperation with a Trusted OS driver.
*/
static int resident_cpu = -1;
+struct psci_operations psci_ops;
+static enum arm_smccc_conduit psci_conduit = SMCCC_CONDUIT_NONE;
bool psci_tos_resident_on(int cpu)
{
return cpu == resident_cpu;
}
-struct psci_operations psci_ops = {
- .conduit = SMCCC_CONDUIT_NONE,
- .smccc_version = SMCCC_VERSION_1_0,
-};
-
-enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
-{
- if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
- return SMCCC_CONDUIT_NONE;
-
- return psci_ops.conduit;
-}
-
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
static psci_fn *invoke_psci_fn;
@@ -242,7 +231,7 @@ static void set_conduit(enum arm_smccc_conduit conduit)
WARN(1, "Unexpected PSCI conduit %d\n", conduit);
}
- psci_ops.conduit = conduit;
+ psci_conduit = conduit;
}
static int get_set_conduit_method(struct device_node *np)
@@ -411,8 +400,8 @@ static void __init psci_init_smccc(void)
if (feature != PSCI_RET_NOT_SUPPORTED) {
u32 ret;
ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
- if (ret == ARM_SMCCC_VERSION_1_1) {
- psci_ops.smccc_version = SMCCC_VERSION_1_1;
+ if (ret >= ARM_SMCCC_VERSION_1_1) {
+ arm_smccc_version_init(ret, psci_conduit);
ver = ret;
}
}
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index 8532e7c78ef7..eba6b60bfb61 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -56,7 +56,7 @@ struct scm_legacy_command {
__le32 buf_offset;
__le32 resp_hdr_offset;
__le32 id;
- __le32 buf[0];
+ __le32 buf[];
};
/**
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 059bb0fbae9e..0e7233a20f34 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -6,7 +6,6 @@
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/export.h>
-#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -806,8 +805,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
struct qcom_scm_mem_map_info *mem_to_map;
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
- phys_addr_t ptr_phys;
- dma_addr_t ptr_dma;
+ dma_addr_t ptr_phys;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
@@ -824,10 +822,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
ALIGN(dest_sz, SZ_64);
- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
- ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
/* Fill source vmid detail */
src = ptr;
@@ -855,7 +852,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
ptr_phys, src_sz, dest_phys, dest_sz);
- dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
+ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
@@ -943,7 +940,7 @@ bool qcom_scm_hdcp_available(void)
qcom_scm_clk_disable();
- return ret > 0 ? true : false;
+ return ret > 0;
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index da26a584dca0..ef8098856a47 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -12,6 +12,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf))
@@ -19,6 +21,8 @@
#define MBOX_DATA28(msg) ((msg) & ~0xf)
#define MBOX_CHAN_PROPERTY 8
+#define VL805_PCI_CONFIG_VERSION_OFFSET 0x50
+
static struct platform_device *rpi_hwmon;
static struct platform_device *rpi_clk;
@@ -182,16 +186,10 @@ rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
RPI_FIRMWARE_GET_FIRMWARE_REVISION,
&packet, sizeof(packet));
- if (ret == 0) {
- struct tm tm;
-
- time64_to_tm(packet, 0, &tm);
+ if (ret)
+ return;
- dev_info(fw->cl.dev,
- "Attached to firmware from %04ld-%02d-%02d %02d:%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
- tm.tm_hour, tm.tm_min);
- }
+ dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &packet);
}
static void
@@ -286,6 +284,63 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
}
EXPORT_SYMBOL_GPL(rpi_firmware_get);
+/*
+ * The Raspberry Pi 4 gets its USB functionality from VL805, a PCIe chip that
+ * implements xHCI. After a PCI reset, VL805's firmware may either be loaded
+ * directly from an EEPROM or, if not present, by the SoC's co-processor,
+ * VideoCore. RPi4's VideoCore OS contains both the non public firmware load
+ * logic and the VL805 firmware blob. This function triggers the aforementioned
+ * process.
+ */
+int rpi_firmware_init_vl805(struct pci_dev *pdev)
+{
+ struct device_node *fw_np;
+ struct rpi_firmware *fw;
+ u32 dev_addr, version;
+ int ret;
+
+ fw_np = of_find_compatible_node(NULL, NULL,
+ "raspberrypi,bcm2835-firmware");
+ if (!fw_np)
+ return 0;
+
+ fw = rpi_firmware_get(fw_np);
+ of_node_put(fw_np);
+ if (!fw)
+ return -ENODEV;
+
+ /*
+ * Make sure we don't trigger a firmware load unnecessarily.
+ *
+ * If something went wrong with PCI, this whole exercise would be
+ * futile as VideoCore expects from us a configured PCI bus. Just take
+ * the faulty version (likely ~0) and let xHCI's registration fail
+ * further down the line.
+ */
+ pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET, &version);
+ if (version)
+ goto exit;
+
+ dev_addr = pdev->bus->number << 20 | PCI_SLOT(pdev->devfn) << 15 |
+ PCI_FUNC(pdev->devfn) << 12;
+
+ ret = rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_XHCI_RESET,
+ &dev_addr, sizeof(dev_addr));
+ if (ret)
+ return ret;
+
+ /* Wait for vl805 to startup */
+ usleep_range(200, 1000);
+
+ pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET,
+ &version);
+exit:
+ pci_info(pdev, "VL805 firmware version %08x\n", version);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_init_vl805);
+
static const struct of_device_id rpi_firmware_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-firmware", },
{},
diff --git a/drivers/firmware/smccc/Kconfig b/drivers/firmware/smccc/Kconfig
new file mode 100644
index 000000000000..27b675d76235
--- /dev/null
+++ b/drivers/firmware/smccc/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HAVE_ARM_SMCCC
+ bool
+ help
+ Include support for the Secure Monitor Call (SMC) and Hypervisor
+ Call (HVC) instructions on Armv7 and above architectures.
+
+config HAVE_ARM_SMCCC_DISCOVERY
+ bool
+ depends on ARM_PSCI_FW
+ default y
+ help
+ SMCCC v1.0 lacked discoverability and hence PSCI v1.0 was updated
+ to add SMCCC discovery mechanism though the PSCI firmware
+ implementation of PSCI_FEATURES(SMCCC_VERSION) which returns
+ success on firmware compliant to SMCCC v1.1 and above.
diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile
new file mode 100644
index 000000000000..6f369fe3f0b9
--- /dev/null
+++ b/drivers/firmware/smccc/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
new file mode 100644
index 000000000000..4e80921ee212
--- /dev/null
+++ b/drivers/firmware/smccc/smccc.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Arm Limited
+ */
+
+#define pr_fmt(fmt) "smccc: " fmt
+
+#include <linux/init.h>
+#include <linux/arm-smccc.h>
+
+static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
+static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
+
+void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
+{
+ smccc_version = version;
+ smccc_conduit = conduit;
+}
+
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
+{
+ if (smccc_version < ARM_SMCCC_VERSION_1_1)
+ return SMCCC_CONDUIT_NONE;
+
+ return smccc_conduit;
+}
+
+u32 arm_smccc_get_version(void)
+{
+ return smccc_version;
+}
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index f8533338b018..4379475c99ed 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -72,7 +72,7 @@ static void rsu_status_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
- if (data->status == BIT(SVC_STATUS_RSU_OK)) {
+ if (data->status == BIT(SVC_STATUS_OK)) {
priv->status.version = FIELD_GET(RSU_VERSION_MASK,
res->a2);
priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
@@ -108,9 +108,9 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
{
struct stratix10_rsu_priv *priv = client->priv;
- if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support notify\n");
- else if (data->status == BIT(SVC_STATUS_RSU_ERROR))
+ else if (data->status == BIT(SVC_STATUS_ERROR))
dev_err(client->dev, "Failure, returned status is %lu\n",
BIT(data->status));
@@ -133,9 +133,9 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
unsigned int *counter = (unsigned int *)data->kaddr1;
- if (data->status == BIT(SVC_STATUS_RSU_OK))
+ if (data->status == BIT(SVC_STATUS_OK))
priv->retry_counter = *counter;
- else if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
dev_err(client->dev, "Failed to get retry counter %lu\n",
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index d5f0769f3761..e0db8dbfc9d1 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -214,7 +214,7 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
complete(&ctrl->complete_status);
break;
}
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_DONE);
+ cb_data->status = BIT(SVC_STATUS_BUFFER_DONE);
cb_data->kaddr1 = svc_pa_to_va(res.a1);
cb_data->kaddr2 = (res.a2) ?
svc_pa_to_va(res.a2) : NULL;
@@ -227,7 +227,7 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
__func__);
}
} while (res.a0 == INTEL_SIP_SMC_STATUS_OK ||
- res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY ||
+ res.a0 == INTEL_SIP_SMC_STATUS_BUSY ||
wait_for_completion_timeout(&ctrl->complete_status, timeout));
}
@@ -250,7 +250,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
cb_data->kaddr1 = NULL;
cb_data->kaddr2 = NULL;
cb_data->kaddr3 = NULL;
- cb_data->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+ cb_data->status = BIT(SVC_STATUS_ERROR);
pr_debug("%s: polling config status\n", __func__);
@@ -259,7 +259,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE,
0, 0, 0, 0, 0, 0, 0, &res);
if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
- (res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR))
+ (res.a0 == INTEL_SIP_SMC_STATUS_ERROR))
break;
/*
@@ -271,7 +271,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
}
if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
- cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
}
@@ -294,24 +294,18 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
switch (p_data->command) {
case COMMAND_RECONFIG:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_REQUEST_OK);
+ case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ cb_data->status = BIT(SVC_STATUS_OK);
break;
case COMMAND_RECONFIG_DATA_SUBMIT:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
- break;
- case COMMAND_NOOP:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
- cb_data->kaddr1 = svc_pa_to_va(res.a1);
+ cb_data->status = BIT(SVC_STATUS_BUFFER_SUBMITTED);
break;
case COMMAND_RECONFIG_STATUS:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
- break;
- case COMMAND_RSU_UPDATE:
- case COMMAND_RSU_NOTIFY:
- cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
break;
case COMMAND_RSU_RETRY:
- cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
default:
@@ -430,9 +424,9 @@ static int svc_normal_to_secure_thread(void *data)
if (pdata->command == COMMAND_RSU_STATUS) {
if (res.a0 == INTEL_SIP_SMC_RSU_ERROR)
- cbdata->status = BIT(SVC_STATUS_RSU_ERROR);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
else
- cbdata->status = BIT(SVC_STATUS_RSU_OK);
+ cbdata->status = BIT(SVC_STATUS_OK);
cbdata->kaddr1 = &res;
cbdata->kaddr2 = NULL;
@@ -445,7 +439,7 @@ static int svc_normal_to_secure_thread(void *data)
case INTEL_SIP_SMC_STATUS_OK:
svc_thread_recv_status_ok(pdata, cbdata, res);
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
+ case INTEL_SIP_SMC_STATUS_BUSY:
switch (pdata->command) {
case COMMAND_RECONFIG_DATA_SUBMIT:
svc_thread_cmd_data_claim(ctrl,
@@ -460,33 +454,13 @@ static int svc_normal_to_secure_thread(void *data)
break;
}
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED:
+ case INTEL_SIP_SMC_STATUS_REJECTED:
pr_debug("%s: STATUS_REJECTED\n", __func__);
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
+ case INTEL_SIP_SMC_STATUS_ERROR:
case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
- switch (pdata->command) {
- /* for FPGA mgr */
- case COMMAND_RECONFIG_DATA_CLAIM:
- case COMMAND_RECONFIG:
- case COMMAND_RECONFIG_DATA_SUBMIT:
- case COMMAND_RECONFIG_STATUS:
- cbdata->status =
- BIT(SVC_STATUS_RECONFIG_ERROR);
- break;
-
- /* for RSU */
- case COMMAND_RSU_STATUS:
- case COMMAND_RSU_UPDATE:
- case COMMAND_RSU_NOTIFY:
- case COMMAND_RSU_RETRY:
- cbdata->status =
- BIT(SVC_STATUS_RSU_ERROR);
- break;
- }
-
- cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
@@ -502,7 +476,7 @@ static int svc_normal_to_secure_thread(void *data)
if ((pdata->command == COMMAND_RSU_RETRY) ||
(pdata->command == COMMAND_RSU_NOTIFY)) {
cbdata->status =
- BIT(SVC_STATUS_RSU_NO_SUPPORT);
+ BIT(SVC_STATUS_NO_SUPPORT);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index ea308751635f..63ab21d89c2c 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -176,7 +176,7 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
if (!priv->tx.pool) {
dev_err(bpmp->dev, "TX shmem pool not found\n");
- return -ENOMEM;
+ return -EPROBE_DEFER;
}
priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
@@ -188,7 +188,7 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
if (!priv->rx.pool) {
dev_err(bpmp->dev, "RX shmem pool not found\n");
- err = -ENOMEM;
+ err = -EPROBE_DEFER;
goto free_tx;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 6741fcda0c37..fe6702df24bf 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -6,6 +6,7 @@
#include <linux/clk/tegra.h>
#include <linux/genalloc.h>
#include <linux/mailbox_client.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -869,12 +870,8 @@ static struct platform_driver tegra_bpmp_driver = {
.name = "tegra-bpmp",
.of_match_table = tegra_bpmp_match,
.pm = &tegra_bpmp_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = tegra_bpmp_probe,
};
-
-static int __init tegra_bpmp_init(void)
-{
- return platform_driver_register(&tegra_bpmp_driver);
-}
-core_initcall(tegra_bpmp_init);
+builtin_platform_driver(tegra_bpmp_driver);
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
index fc544e19b0a1..1389fa9418a7 100644
--- a/drivers/firmware/trusted_foundations.c
+++ b/drivers/firmware/trusted_foundations.c
@@ -19,6 +19,7 @@
#define TF_CACHE_ENABLE 1
#define TF_CACHE_DISABLE 2
+#define TF_CACHE_REENABLE 4
#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
@@ -29,6 +30,7 @@
#define TF_CPU_PM_S1 0xffffffe4
#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
+static unsigned long tf_idle_mode = TF_PM_MODE_NONE;
static unsigned long cpu_boot_addr;
static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
@@ -85,25 +87,40 @@ static int tf_prepare_idle(unsigned long mode)
cpu_boot_addr);
break;
+ case TF_PM_MODE_NONE:
+ break;
+
default:
return -EINVAL;
}
+ tf_idle_mode = mode;
+
return 0;
}
#ifdef CONFIG_CACHE_L2X0
static void tf_cache_write_sec(unsigned long val, unsigned int reg)
{
- u32 l2x0_way_mask = 0xff;
+ u32 enable_op, l2x0_way_mask = 0xff;
switch (reg) {
case L2X0_CTRL:
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
l2x0_way_mask = 0xffff;
+ switch (tf_idle_mode) {
+ case TF_PM_MODE_LP2:
+ enable_op = TF_CACHE_REENABLE;
+ break;
+
+ default:
+ enable_op = TF_CACHE_ENABLE;
+ break;
+ }
+
if (val == L2X0_CTRL_EN)
- tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_ENABLE,
+ tf_generic_smc(TF_CACHE_MAINT, enable_op,
l2x0_saved_regs.aux_ctrl);
else
tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 43bc6cfdab45..99606b34975e 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -85,14 +85,13 @@ static int get_pm_api_id(char *pm_api_req, u32 *pm_id)
static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 pm_api_version;
int ret;
struct zynqmp_pm_query_data qdata = {0};
switch (pm_id) {
case PM_GET_API_VERSION:
- ret = eemi_ops->get_api_version(&pm_api_version);
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
@@ -102,7 +101,7 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
qdata.arg2 = pm_api_arg[2];
qdata.arg3 = pm_api_arg[3];
- ret = eemi_ops->query_data(qdata, pm_api_ret);
+ ret = zynqmp_pm_query_data(qdata, pm_api_ret);
if (ret)
break;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 41b65164a367..8d1ff2454e2e 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2018 Xilinx, Inc.
+ * Copyright (C) 2014-2020 Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -24,8 +24,6 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
-static const struct zynqmp_eemi_ops *eemi_ops_tbl;
-
static bool feature_check_enabled;
static u32 zynqmp_pm_features[PM_API_MAX];
@@ -219,7 +217,7 @@ static u32 pm_tz_version;
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_get_api_version(u32 *version)
+int zynqmp_pm_get_api_version(u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -237,6 +235,7 @@ static int zynqmp_pm_get_api_version(u32 *version)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_api_version);
/**
* zynqmp_pm_get_chipid - Get silicon ID registers
@@ -246,7 +245,7 @@ static int zynqmp_pm_get_api_version(u32 *version)
* Return: Returns the status of the operation and the idcode and version
* registers in @idcode and @version.
*/
-static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -260,6 +259,7 @@ static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
@@ -324,7 +324,7 @@ static int get_set_conduit_method(struct device_node *np)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
{
int ret;
@@ -338,6 +338,7 @@ static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
*/
return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_query_data);
/**
* zynqmp_pm_clock_enable() - Enable the clock for given id
@@ -348,10 +349,11 @@ static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_enable(u32 clock_id)
+int zynqmp_pm_clock_enable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
/**
* zynqmp_pm_clock_disable() - Disable the clock for given id
@@ -362,10 +364,11 @@ static int zynqmp_pm_clock_enable(u32 clock_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_disable(u32 clock_id)
+int zynqmp_pm_clock_disable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable);
/**
* zynqmp_pm_clock_getstate() - Get the clock state for given id
@@ -377,7 +380,7 @@ static int zynqmp_pm_clock_disable(u32 clock_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -388,6 +391,7 @@ static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate);
/**
* zynqmp_pm_clock_setdivider() - Set the clock divider for given id
@@ -399,11 +403,12 @@ static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider);
/**
* zynqmp_pm_clock_getdivider() - Get the clock divider for given id
@@ -415,7 +420,7 @@ static int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -426,6 +431,7 @@ static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider);
/**
* zynqmp_pm_clock_setrate() - Set the clock rate for given id
@@ -436,13 +442,14 @@ static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
lower_32_bits(rate),
upper_32_bits(rate),
0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate);
/**
* zynqmp_pm_clock_getrate() - Get the clock rate for given id
@@ -454,7 +461,7 @@ static int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -465,6 +472,7 @@ static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
/**
* zynqmp_pm_clock_setparent() - Set the clock parent for given id
@@ -475,11 +483,12 @@ static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
parent_id, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent);
/**
* zynqmp_pm_clock_getparent() - Get the clock parent for given id
@@ -491,7 +500,7 @@ static int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -502,48 +511,191 @@ static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent);
/**
- * zynqmp_is_valid_ioctl() - Check whether IOCTL ID is valid or not
- * @ioctl_id: IOCTL ID
+ * zynqmp_pm_set_pll_frac_mode() - PM API for set PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode (PLL_MODE_FRAC/PLL_MODE_INT)
+ *
+ * This function sets PLL mode
*
- * Return: 1 if IOCTL is valid else 0
+ * Return: Returns status, either success or error+reason
*/
-static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
{
- switch (ioctl_id) {
- case IOCTL_SD_DLL_RESET:
- case IOCTL_SET_SD_TAPDELAY:
- case IOCTL_SET_PLL_FRAC_MODE:
- case IOCTL_GET_PLL_FRAC_MODE:
- case IOCTL_SET_PLL_FRAC_DATA:
- case IOCTL_GET_PLL_FRAC_DATA:
- return 1;
- default:
- return 0;
- }
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE,
+ clk_id, mode, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
/**
- * zynqmp_pm_ioctl() - PM IOCTL API for device control and configs
- * @node_id: Node ID of the device
- * @ioctl_id: ID of the requested IOCTL
- * @arg1: Argument 1 to requested IOCTL call
- * @arg2: Argument 2 to requested IOCTL call
- * @out: Returned output value
+ * zynqmp_pm_get_pll_frac_mode() - PM API for get PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode
*
- * This function calls IOCTL to firmware for device control and configuration.
+ * This function return current PLL mode
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
- u32 *out)
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
{
- if (!zynqmp_is_valid_ioctl(ioctl_id))
- return -EINVAL;
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE,
+ clk_id, 0, mode);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
+
+/**
+ * zynqmp_pm_set_pll_frac_data() - PM API for setting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function sets fraction data.
+ * It is valid for fraction mode only.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA,
+ clk_id, data, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
+
+/**
+ * zynqmp_pm_get_pll_frac_data() - PM API for getting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function returns fraction data value.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA,
+ clk_id, 0, data);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
+
+/**
+ * zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
+ *
+ * @node_id Node ID of the device
+ * @type Type of tap delay to set (input/output)
+ * @value Value to set fot the tap delay
+ *
+ * This function sets input/output tap delay for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ type, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
+
+/**
+ * zynqmp_pm_sd_dll_reset() - Reset DLL logic
+ *
+ * @node_id Node ID of the device
+ * @type Reset type
+ *
+ * This function resets DLL logic for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ type, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to GGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS,
+ index, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function returns GGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS,
+ index, 0, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for writing persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to PGGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for reading persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function returns PGGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0,
+ value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, ioctl_id,
- arg1, arg2, out);
+/**
+ * zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
+ * @value Status value to be written
+ *
+ * This function sets healthy bit value to indicate boot health status
+ * to firmware.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS,
+ value, 0, NULL);
}
/**
@@ -554,12 +706,13 @@ static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
- const enum zynqmp_pm_reset_action assert_flag)
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
{
return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
/**
* zynqmp_pm_reset_get_status - Get status of the reset
@@ -568,8 +721,7 @@ static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
- u32 *status)
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -583,6 +735,7 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_get_status);
/**
* zynqmp_pm_fpga_load - Perform the fpga load
@@ -597,12 +750,12 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
- const u32 flags)
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
{
return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
upper_32_bits(address), size, flags, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_load);
/**
* zynqmp_pm_fpga_get_status - Read value from PCAP status register
@@ -613,7 +766,7 @@ static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_fpga_get_status(u32 *value)
+int zynqmp_pm_fpga_get_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -626,6 +779,7 @@ static int zynqmp_pm_fpga_get_status(u32 *value)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
@@ -636,10 +790,11 @@ static int zynqmp_pm_fpga_get_status(u32 *value)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_init_finalize(void)
+int zynqmp_pm_init_finalize(void)
{
return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
/**
* zynqmp_pm_set_suspend_mode() - Set system suspend mode
@@ -649,10 +804,11 @@ static int zynqmp_pm_init_finalize(void)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_set_suspend_mode(u32 mode)
+int zynqmp_pm_set_suspend_mode(u32 mode)
{
return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
/**
* zynqmp_pm_request_node() - Request a node with specific capabilities
@@ -666,13 +822,13 @@ static int zynqmp_pm_set_suspend_mode(u32 mode)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack)
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
qos, ack, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
/**
* zynqmp_pm_release_node() - Release a node
@@ -684,10 +840,11 @@ static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_release_node(const u32 node)
+int zynqmp_pm_release_node(const u32 node)
{
return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
/**
* zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
@@ -701,13 +858,14 @@ static int zynqmp_pm_release_node(const u32 node)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack)
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
qos, ack, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
/**
* zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using
@@ -717,7 +875,7 @@ static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
*
* Return: Returns status, either success or error code.
*/
-static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -732,47 +890,304 @@ static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
-static const struct zynqmp_eemi_ops eemi_ops = {
- .get_api_version = zynqmp_pm_get_api_version,
- .get_chipid = zynqmp_pm_get_chipid,
- .query_data = zynqmp_pm_query_data,
- .clock_enable = zynqmp_pm_clock_enable,
- .clock_disable = zynqmp_pm_clock_disable,
- .clock_getstate = zynqmp_pm_clock_getstate,
- .clock_setdivider = zynqmp_pm_clock_setdivider,
- .clock_getdivider = zynqmp_pm_clock_getdivider,
- .clock_setrate = zynqmp_pm_clock_setrate,
- .clock_getrate = zynqmp_pm_clock_getrate,
- .clock_setparent = zynqmp_pm_clock_setparent,
- .clock_getparent = zynqmp_pm_clock_getparent,
- .ioctl = zynqmp_pm_ioctl,
- .reset_assert = zynqmp_pm_reset_assert,
- .reset_get_status = zynqmp_pm_reset_get_status,
- .init_finalize = zynqmp_pm_init_finalize,
- .set_suspend_mode = zynqmp_pm_set_suspend_mode,
- .request_node = zynqmp_pm_request_node,
- .release_node = zynqmp_pm_release_node,
- .set_requirement = zynqmp_pm_set_requirement,
- .fpga_load = zynqmp_pm_fpga_load,
- .fpga_get_status = zynqmp_pm_fpga_get_status,
- .aes = zynqmp_pm_aes_engine,
+/**
+ * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
+ * @type: Shutdown or restart? 0 for shutdown, 1 for restart
+ * @subtype: Specifies which system should be restarted or shut down
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
+ 0, 0, NULL);
+}
+
+/**
+ * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
+ * @subtype: Shutdown subtype
+ * @name: Matching string for scope argument
+ *
+ * This struct encapsulates mapping between shutdown scope ID and string.
+ */
+struct zynqmp_pm_shutdown_scope {
+ const enum zynqmp_pm_shutdown_subtype subtype;
+ const char *name;
+};
+
+static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ .name = "subsystem",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ .name = "ps_only",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ .name = "system",
+ },
};
+static struct zynqmp_pm_shutdown_scope *selected_scope =
+ &shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
+
/**
- * zynqmp_pm_get_eemi_ops - Get eemi ops functions
+ * zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
+ * @scope_string: Shutdown scope string
*
- * Return: Pointer of eemi_ops structure
+ * Return: Return pointer to matching shutdown scope struct from
+ * array of available options in system if string is valid,
+ * otherwise returns NULL.
*/
-const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
+static struct zynqmp_pm_shutdown_scope*
+ zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
+{
+ int count;
+
+ for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
+ if (sysfs_streq(scope_string, shutdown_scopes[count].name))
+ return &shutdown_scopes[count];
+
+ return NULL;
+}
+
+static ssize_t shutdown_scope_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
+ if (&shutdown_scopes[i] == selected_scope) {
+ strcat(buf, "[");
+ strcat(buf, shutdown_scopes[i].name);
+ strcat(buf, "]");
+ } else {
+ strcat(buf, shutdown_scopes[i].name);
+ }
+ strcat(buf, " ");
+ }
+ strcat(buf, "\n");
+
+ return strlen(buf);
+}
+
+static ssize_t shutdown_scope_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct zynqmp_pm_shutdown_scope *scope;
+
+ scope = zynqmp_pm_is_shutdown_scope_valid(buf);
+ if (!scope)
+ return -EINVAL;
+
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ scope->subtype);
+ if (ret) {
+ pr_err("unable to set shutdown scope %s\n", buf);
+ return ret;
+ }
+
+ selected_scope = scope;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(shutdown_scope);
+
+static ssize_t health_status_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_boot_health_status(value);
+ if (ret) {
+ dev_err(device, "unable to set healthy bit value to %u\n",
+ value);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(health_status);
+
+static ssize_t ggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_ggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t ggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
+{
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_ggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+err:
+ return count;
+}
+
+/* GGS register show functions */
+#define GGS0_SHOW(N) \
+ ssize_t ggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return ggs_show(device, attr, buf, N); \
+ }
+
+static GGS0_SHOW(0);
+static GGS0_SHOW(1);
+static GGS0_SHOW(2);
+static GGS0_SHOW(3);
+
+/* GGS register store function */
+#define GGS0_STORE(N) \
+ ssize_t ggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return ggs_store(device, attr, buf, count, N); \
+ }
+
+static GGS0_STORE(0);
+static GGS0_STORE(1);
+static GGS0_STORE(2);
+static GGS0_STORE(3);
+
+static ssize_t pggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_pggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t pggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
{
- if (eemi_ops_tbl)
- return eemi_ops_tbl;
- else
- return ERR_PTR(-EPROBE_DEFER);
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_pggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+
+err:
+ return count;
}
-EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
+
+#define PGGS0_SHOW(N) \
+ ssize_t pggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return pggs_show(device, attr, buf, N); \
+ }
+
+#define PGGS0_STORE(N) \
+ ssize_t pggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return pggs_store(device, attr, buf, count, N); \
+ }
+
+/* PGGS register show functions */
+static PGGS0_SHOW(0);
+static PGGS0_SHOW(1);
+static PGGS0_SHOW(2);
+static PGGS0_SHOW(3);
+
+/* PGGS register store functions */
+static PGGS0_STORE(0);
+static PGGS0_STORE(1);
+static PGGS0_STORE(2);
+static PGGS0_STORE(3);
+
+/* GGS register attributes */
+static DEVICE_ATTR_RW(ggs0);
+static DEVICE_ATTR_RW(ggs1);
+static DEVICE_ATTR_RW(ggs2);
+static DEVICE_ATTR_RW(ggs3);
+
+/* PGGS register attributes */
+static DEVICE_ATTR_RW(pggs0);
+static DEVICE_ATTR_RW(pggs1);
+static DEVICE_ATTR_RW(pggs2);
+static DEVICE_ATTR_RW(pggs3);
+
+static struct attribute *zynqmp_firmware_attrs[] = {
+ &dev_attr_ggs0.attr,
+ &dev_attr_ggs1.attr,
+ &dev_attr_ggs2.attr,
+ &dev_attr_ggs3.attr,
+ &dev_attr_pggs0.attr,
+ &dev_attr_pggs1.attr,
+ &dev_attr_pggs2.attr,
+ &dev_attr_pggs3.attr,
+ &dev_attr_shutdown_scope.attr,
+ &dev_attr_health_status.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(zynqmp_firmware);
static int zynqmp_firmware_probe(struct platform_device *pdev)
{
@@ -820,11 +1235,6 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Trustzone version v%d.%d\n", __func__,
pm_tz_version >> 16, pm_tz_version & 0xFFFF);
- /* Assign eemi_ops_table */
- eemi_ops_tbl = &eemi_ops;
-
- zynqmp_pm_api_debugfs_init();
-
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
if (ret) {
@@ -832,6 +1242,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
return ret;
}
+ zynqmp_pm_api_debugfs_init();
+
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
@@ -854,6 +1266,7 @@ static struct platform_driver zynqmp_firmware_driver = {
.driver = {
.name = "zynqmp_firmware",
.of_match_table = zynqmp_firmware_of_match,
+ .dev_groups = zynqmp_firmware_groups,
},
.probe = zynqmp_firmware_probe,
.remove = zynqmp_firmware_remove,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 72380e1d31c7..b2408a710662 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -156,7 +156,7 @@ config FPGA_DFL
config FPGA_DFL_FME
tristate "FPGA DFL FME Driver"
- depends on FPGA_DFL && HWMON
+ depends on FPGA_DFL && HWMON && PERF_EVENTS
help
The FPGA Management Engine (FME) is a feature device implemented
under Device Feature List (DFL) framework. Select this option to
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 4865b74b00a4..d8e21dfc6778 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o
+dfl-fme-objs += dfl-fme-perf.o
dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
dfl-afu-objs += dfl-afu-error.o
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 62f924489db5..02d8cbad1ae2 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -61,10 +61,10 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
region->pages);
if (pinned < 0) {
ret = pinned;
- goto put_pages;
+ goto free_pages;
} else if (pinned != npages) {
ret = -EFAULT;
- goto free_pages;
+ goto put_pages;
}
dev_dbg(dev, "%d pages pinned\n", pinned);
@@ -324,10 +324,6 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
if (user_addr + length < user_addr)
return -EINVAL;
- if (!access_ok((void __user *)(unsigned long)user_addr,
- length))
- return -EINVAL;
-
region = kzalloc(sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 65437b6a6842..b0c31789a909 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -561,14 +561,16 @@ static int afu_open(struct inode *inode, struct file *filp)
if (WARN_ON(!pdata))
return -ENODEV;
- ret = dfl_feature_dev_use_begin(pdata);
- if (ret)
- return ret;
-
- dev_dbg(&fdev->dev, "Device File Open\n");
- filp->private_data = fdev;
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ if (!ret) {
+ dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+ dfl_feature_dev_use_count(pdata));
+ filp->private_data = fdev;
+ }
+ mutex_unlock(&pdata->lock);
- return 0;
+ return ret;
}
static int afu_release(struct inode *inode, struct file *filp)
@@ -581,12 +583,14 @@ static int afu_release(struct inode *inode, struct file *filp)
pdata = dev_get_platdata(&pdev->dev);
mutex_lock(&pdata->lock);
- __port_reset(pdev);
- afu_dma_region_destroy(pdata);
- mutex_unlock(&pdata->lock);
-
dfl_feature_dev_use_end(pdata);
+ if (!dfl_feature_dev_use_count(pdata)) {
+ __port_reset(pdev);
+ afu_dma_region_destroy(pdata);
+ }
+ mutex_unlock(&pdata->lock);
+
return 0;
}
@@ -746,6 +750,12 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
+static const struct vm_operations_struct afu_vma_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct platform_device *pdev = filp->private_data;
@@ -775,6 +785,9 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
!(region.flags & DFL_PORT_REGION_WRITE))
return -EPERM;
+ /* Support debug access to the mapping */
+ vma->vm_ops = &afu_vma_ops;
+
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start,
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 1d4690c99268..fc210d4e1863 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -580,6 +580,10 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
.ops = &fme_power_mgmt_ops,
},
{
+ .id_table = fme_perf_id_table,
+ .ops = &fme_perf_ops,
+ },
+ {
.ops = NULL,
},
};
@@ -600,14 +604,16 @@ static int fme_open(struct inode *inode, struct file *filp)
if (WARN_ON(!pdata))
return -ENODEV;
- ret = dfl_feature_dev_use_begin(pdata);
- if (ret)
- return ret;
-
- dev_dbg(&fdev->dev, "Device File Open\n");
- filp->private_data = pdata;
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ if (!ret) {
+ dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+ dfl_feature_dev_use_count(pdata));
+ filp->private_data = pdata;
+ }
+ mutex_unlock(&pdata->lock);
- return 0;
+ return ret;
}
static int fme_release(struct inode *inode, struct file *filp)
@@ -616,7 +622,10 @@ static int fme_release(struct inode *inode, struct file *filp)
struct platform_device *pdev = pdata->dev;
dev_dbg(&pdev->dev, "Device File Release\n");
+
+ mutex_lock(&pdata->lock);
dfl_feature_dev_use_end(pdata);
+ mutex_unlock(&pdata->lock);
return 0;
}
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
new file mode 100644
index 000000000000..6ce1ed222ea4
--- /dev/null
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -0,0 +1,1020 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Global Performance Reporting
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xu Yilun <yilun.xu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel, Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/perf_event.h>
+#include "dfl.h"
+#include "dfl-fme.h"
+
+/*
+ * Performance Counter Registers for Cache.
+ *
+ * Cache Events are listed below as CACHE_EVNT_*.
+ */
+#define CACHE_CTRL 0x8
+#define CACHE_RESET_CNTR BIT_ULL(0)
+#define CACHE_FREEZE_CNTR BIT_ULL(8)
+#define CACHE_CTRL_EVNT GENMASK_ULL(19, 16)
+#define CACHE_EVNT_RD_HIT 0x0
+#define CACHE_EVNT_WR_HIT 0x1
+#define CACHE_EVNT_RD_MISS 0x2
+#define CACHE_EVNT_WR_MISS 0x3
+#define CACHE_EVNT_RSVD 0x4
+#define CACHE_EVNT_HOLD_REQ 0x5
+#define CACHE_EVNT_DATA_WR_PORT_CONTEN 0x6
+#define CACHE_EVNT_TAG_WR_PORT_CONTEN 0x7
+#define CACHE_EVNT_TX_REQ_STALL 0x8
+#define CACHE_EVNT_RX_REQ_STALL 0x9
+#define CACHE_EVNT_EVICTIONS 0xa
+#define CACHE_EVNT_MAX CACHE_EVNT_EVICTIONS
+#define CACHE_CHANNEL_SEL BIT_ULL(20)
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+#define CACHE_CNTR0 0x10
+#define CACHE_CNTR1 0x18
+#define CACHE_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define CACHE_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Fabric.
+ *
+ * Fabric Events are listed below as FAB_EVNT_*
+ */
+#define FAB_CTRL 0x20
+#define FAB_RESET_CNTR BIT_ULL(0)
+#define FAB_FREEZE_CNTR BIT_ULL(8)
+#define FAB_CTRL_EVNT GENMASK_ULL(19, 16)
+#define FAB_EVNT_PCIE0_RD 0x0
+#define FAB_EVNT_PCIE0_WR 0x1
+#define FAB_EVNT_PCIE1_RD 0x2
+#define FAB_EVNT_PCIE1_WR 0x3
+#define FAB_EVNT_UPI_RD 0x4
+#define FAB_EVNT_UPI_WR 0x5
+#define FAB_EVNT_MMIO_RD 0x6
+#define FAB_EVNT_MMIO_WR 0x7
+#define FAB_EVNT_MAX FAB_EVNT_MMIO_WR
+#define FAB_PORT_ID GENMASK_ULL(21, 20)
+#define FAB_PORT_FILTER BIT_ULL(23)
+#define FAB_PORT_FILTER_DISABLE 0
+#define FAB_PORT_FILTER_ENABLE 1
+#define FAB_CNTR 0x28
+#define FAB_CNTR_EVNT_CNTR GENMASK_ULL(59, 0)
+#define FAB_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Clock.
+ *
+ * Clock Counter can't be reset or frozen by SW.
+ */
+#define CLK_CNTR 0x30
+#define BASIC_EVNT_CLK 0x0
+#define BASIC_EVNT_MAX BASIC_EVNT_CLK
+
+/*
+ * Performance Counter Registers for IOMMU / VT-D.
+ *
+ * VT-D Events are listed below as VTD_EVNT_* and VTD_SIP_EVNT_*
+ */
+#define VTD_CTRL 0x38
+#define VTD_RESET_CNTR BIT_ULL(0)
+#define VTD_FREEZE_CNTR BIT_ULL(8)
+#define VTD_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_EVNT_AFU_MEM_RD_TRANS 0x0
+#define VTD_EVNT_AFU_MEM_WR_TRANS 0x1
+#define VTD_EVNT_AFU_DEVTLB_RD_HIT 0x2
+#define VTD_EVNT_AFU_DEVTLB_WR_HIT 0x3
+#define VTD_EVNT_DEVTLB_4K_FILL 0x4
+#define VTD_EVNT_DEVTLB_2M_FILL 0x5
+#define VTD_EVNT_DEVTLB_1G_FILL 0x6
+#define VTD_EVNT_MAX VTD_EVNT_DEVTLB_1G_FILL
+#define VTD_CNTR 0x40
+#define VTD_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define VTD_SIP_CTRL 0x48
+#define VTD_SIP_RESET_CNTR BIT_ULL(0)
+#define VTD_SIP_FREEZE_CNTR BIT_ULL(8)
+#define VTD_SIP_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_SIP_EVNT_IOTLB_4K_HIT 0x0
+#define VTD_SIP_EVNT_IOTLB_2M_HIT 0x1
+#define VTD_SIP_EVNT_IOTLB_1G_HIT 0x2
+#define VTD_SIP_EVNT_SLPWC_L3_HIT 0x3
+#define VTD_SIP_EVNT_SLPWC_L4_HIT 0x4
+#define VTD_SIP_EVNT_RCC_HIT 0x5
+#define VTD_SIP_EVNT_IOTLB_4K_MISS 0x6
+#define VTD_SIP_EVNT_IOTLB_2M_MISS 0x7
+#define VTD_SIP_EVNT_IOTLB_1G_MISS 0x8
+#define VTD_SIP_EVNT_SLPWC_L3_MISS 0x9
+#define VTD_SIP_EVNT_SLPWC_L4_MISS 0xa
+#define VTD_SIP_EVNT_RCC_MISS 0xb
+#define VTD_SIP_EVNT_MAX VTD_SIP_EVNT_SLPWC_L4_MISS
+#define VTD_SIP_CNTR 0X50
+#define VTD_SIP_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_SIP_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define PERF_TIMEOUT 30
+
+#define PERF_MAX_PORT_NUM 1U
+
+/**
+ * struct fme_perf_priv - priv data structure for fme perf driver
+ *
+ * @dev: parent device.
+ * @ioaddr: mapped base address of mmio region.
+ * @pmu: pmu data structure for fme perf counters.
+ * @id: id of this fme performance report private feature.
+ * @fab_users: current user number on fabric counters.
+ * @fab_port_id: used to indicate current working mode of fabric counters.
+ * @fab_lock: lock to protect fabric counters working mode.
+ * @cpu: active CPU to which the PMU is bound for accesses.
+ * @cpuhp_node: node for CPU hotplug notifier link.
+ * @cpuhp_state: state for CPU hotplug notification;
+ */
+struct fme_perf_priv {
+ struct device *dev;
+ void __iomem *ioaddr;
+ struct pmu pmu;
+ u64 id;
+
+ u32 fab_users;
+ u32 fab_port_id;
+ spinlock_t fab_lock;
+
+ unsigned int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
+};
+
+/**
+ * struct fme_perf_event_ops - callbacks for fme perf events
+ *
+ * @event_init: callback invoked during event init.
+ * @event_destroy: callback invoked during event destroy.
+ * @read_counter: callback to read hardware counters.
+ */
+struct fme_perf_event_ops {
+ int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid);
+ void (*event_destroy)(struct fme_perf_priv *priv, u32 event,
+ u32 portid);
+ u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid);
+};
+
+#define to_fme_perf_priv(_pmu) container_of(_pmu, struct fme_perf_priv, pmu)
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct fme_perf_priv *priv;
+
+ priv = to_fme_perf_priv(pmu);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(priv->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *fme_perf_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group fme_perf_cpumask_group = {
+ .attrs = fme_perf_cpumask_attrs,
+};
+
+#define FME_EVENT_MASK GENMASK_ULL(11, 0)
+#define FME_EVENT_SHIFT 0
+#define FME_EVTYPE_MASK GENMASK_ULL(15, 12)
+#define FME_EVTYPE_SHIFT 12
+#define FME_EVTYPE_BASIC 0
+#define FME_EVTYPE_CACHE 1
+#define FME_EVTYPE_FABRIC 2
+#define FME_EVTYPE_VTD 3
+#define FME_EVTYPE_VTD_SIP 4
+#define FME_EVTYPE_MAX FME_EVTYPE_VTD_SIP
+#define FME_PORTID_MASK GENMASK_ULL(23, 16)
+#define FME_PORTID_SHIFT 16
+#define FME_PORTID_ROOT (0xffU)
+
+#define get_event(_config) FIELD_GET(FME_EVENT_MASK, _config)
+#define get_evtype(_config) FIELD_GET(FME_EVTYPE_MASK, _config)
+#define get_portid(_config) FIELD_GET(FME_PORTID_MASK, _config)
+
+PMU_FORMAT_ATTR(event, "config:0-11");
+PMU_FORMAT_ATTR(evtype, "config:12-15");
+PMU_FORMAT_ATTR(portid, "config:16-23");
+
+static struct attribute *fme_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_evtype.attr,
+ &format_attr_portid.attr,
+ NULL,
+};
+
+static struct attribute_group fme_perf_format_group = {
+ .name = "format",
+ .attrs = fme_perf_format_attrs,
+};
+
+/*
+ * There are no default events, but we need to create
+ * "events" group (with empty attrs) before updating
+ * it with detected events (using pmu->attr_update).
+ */
+static struct attribute *fme_perf_events_attrs_empty[] = {
+ NULL,
+};
+
+static struct attribute_group fme_perf_events_group = {
+ .name = "events",
+ .attrs = fme_perf_events_attrs_empty,
+};
+
+static const struct attribute_group *fme_perf_groups[] = {
+ &fme_perf_format_group,
+ &fme_perf_cpumask_group,
+ &fme_perf_events_group,
+ NULL,
+};
+
+static bool is_portid_root(u32 portid)
+{
+ return portid == FME_PORTID_ROOT;
+}
+
+static bool is_portid_port(u32 portid)
+{
+ return portid < PERF_MAX_PORT_NUM;
+}
+
+static bool is_portid_root_or_port(u32 portid)
+{
+ return is_portid_root(portid) || is_portid_port(portid);
+}
+
+static u64 fme_read_perf_cntr_reg(void __iomem *addr)
+{
+ u32 low;
+ u64 v;
+
+ /*
+ * For 64bit counter registers, the counter may increases and carries
+ * out of bit [31] between 2 32bit reads. So add extra reads to help
+ * to prevent this issue. This only happens in platforms which don't
+ * support 64bit read - readq is split into 2 readl.
+ */
+ do {
+ v = readq(addr);
+ low = readl(addr);
+ } while (((u32)v) > low);
+
+ return v;
+}
+
+static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (event <= BASIC_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 basic_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+
+ return fme_read_perf_cntr_reg(base + CLK_CNTR);
+}
+
+static int cache_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= CACHE_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 cache_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v, count;
+ u8 channel;
+
+ if (event == CACHE_EVNT_WR_HIT || event == CACHE_EVNT_WR_MISS ||
+ event == CACHE_EVNT_DATA_WR_PORT_CONTEN ||
+ event == CACHE_EVNT_TAG_WR_PORT_CONTEN)
+ channel = CACHE_CHANNEL_WR;
+ else
+ channel = CACHE_CHANNEL_RD;
+
+ /* set channel access type and cache event code. */
+ v = readq(base + CACHE_CTRL);
+ v &= ~(CACHE_CHANNEL_SEL | CACHE_CTRL_EVNT);
+ v |= FIELD_PREP(CACHE_CHANNEL_SEL, channel);
+ v |= FIELD_PREP(CACHE_CTRL_EVNT, event);
+ writeq(v, base + CACHE_CTRL);
+
+ if (readq_poll_timeout_atomic(base + CACHE_CNTR0, v,
+ FIELD_GET(CACHE_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched cache event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR0);
+ count = FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR1);
+ count += FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+
+ return count;
+}
+
+static bool is_fabric_event_supported(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ if (event > FAB_EVNT_MAX || !is_portid_root_or_port(portid))
+ return false;
+
+ if (priv->id == FME_FEATURE_ID_GLOBAL_DPERF &&
+ (event == FAB_EVNT_PCIE1_RD || event == FAB_EVNT_UPI_RD ||
+ event == FAB_EVNT_PCIE1_WR || event == FAB_EVNT_UPI_WR))
+ return false;
+
+ return true;
+}
+
+static int fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ int ret = 0;
+ u64 v;
+
+ if (!is_fabric_event_supported(priv, event, portid))
+ return -EINVAL;
+
+ /*
+ * as fabric counter set only can be in either overall or port mode.
+ * In overall mode, it counts overall data for FPGA, and in port mode,
+ * it is configured to monitor on one individual port.
+ *
+ * so every time, a new event is initialized, driver checks
+ * current working mode and if someone is using this counter set.
+ */
+ spin_lock(&priv->fab_lock);
+ if (priv->fab_users && priv->fab_port_id != portid) {
+ dev_dbg(priv->dev, "conflict fabric event monitoring mode.\n");
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ priv->fab_users++;
+
+ /*
+ * skip if current working mode matches, otherwise change the working
+ * mode per input port_id, to monitor overall data or another port.
+ */
+ if (priv->fab_port_id == portid)
+ goto exit;
+
+ priv->fab_port_id = portid;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~(FAB_PORT_FILTER | FAB_PORT_ID);
+
+ if (is_portid_root(portid)) {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_DISABLE);
+ } else {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_ENABLE);
+ v |= FIELD_PREP(FAB_PORT_ID, portid);
+ }
+ writeq(v, base + FAB_CTRL);
+
+exit:
+ spin_unlock(&priv->fab_lock);
+ return ret;
+}
+
+static void fabric_event_destroy(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ spin_lock(&priv->fab_lock);
+ priv->fab_users--;
+ spin_unlock(&priv->fab_lock);
+}
+
+static u64 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~FAB_CTRL_EVNT;
+ v |= FIELD_PREP(FAB_CTRL_EVNT, event);
+ writeq(v, base + FAB_CTRL);
+
+ if (readq_poll_timeout_atomic(base + FAB_CNTR, v,
+ FIELD_GET(FAB_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched fab event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + FAB_CNTR);
+ return FIELD_GET(FAB_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_EVNT_MAX && is_portid_port(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ event += (portid * (VTD_EVNT_MAX + 1));
+
+ v = readq(base + VTD_CTRL);
+ v &= ~VTD_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_CTRL_EVNT, event);
+ writeq(v, base + VTD_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_CNTR, v,
+ FIELD_GET(VTD_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_CNTR);
+ return FIELD_GET(VTD_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_SIP_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + VTD_SIP_CTRL);
+ v &= ~VTD_SIP_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_SIP_CTRL_EVNT, event);
+ writeq(v, base + VTD_SIP_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_SIP_CNTR, v,
+ FIELD_GET(VTD_SIP_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd sip event code in counter register\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_SIP_CNTR);
+ return FIELD_GET(VTD_SIP_CNTR_EVNT_CNTR, v);
+}
+
+static struct fme_perf_event_ops fme_perf_event_ops[] = {
+ [FME_EVTYPE_BASIC] = {.event_init = basic_event_init,
+ .read_counter = basic_read_event_counter,},
+ [FME_EVTYPE_CACHE] = {.event_init = cache_event_init,
+ .read_counter = cache_read_event_counter,},
+ [FME_EVTYPE_FABRIC] = {.event_init = fabric_event_init,
+ .event_destroy = fabric_event_destroy,
+ .read_counter = fabric_read_event_counter,},
+ [FME_EVTYPE_VTD] = {.event_init = vtd_event_init,
+ .read_counter = vtd_read_event_counter,},
+ [FME_EVTYPE_VTD_SIP] = {.event_init = vtd_sip_event_init,
+ .read_counter = vtd_sip_read_event_counter,},
+};
+
+static ssize_t fme_perf_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+ unsigned long config;
+ char *ptr = buf;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ config = (unsigned long)eattr->var;
+
+ ptr += sprintf(ptr, "event=0x%02x", (unsigned int)get_event(config));
+ ptr += sprintf(ptr, ",evtype=0x%02x", (unsigned int)get_evtype(config));
+
+ if (is_portid_root(get_portid(config)))
+ ptr += sprintf(ptr, ",portid=0x%02x\n", FME_PORTID_ROOT);
+ else
+ ptr += sprintf(ptr, ",portid=?\n");
+
+ return (ssize_t)(ptr - buf);
+}
+
+#define FME_EVENT_ATTR(_name) \
+ __ATTR(_name, 0444, fme_perf_event_show, NULL)
+
+#define FME_PORT_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK))
+
+#define FME_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK) | \
+ (FME_PORTID_ROOT << FME_PORTID_SHIFT))
+
+/* FME Perf Basic Events */
+#define FME_EVENT_BASIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_##_name = { \
+ .attr = FME_EVENT_ATTR(_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_BASIC), \
+}
+
+FME_EVENT_BASIC(clock, BASIC_EVNT_CLK);
+
+static struct attribute *fme_perf_basic_events_attrs[] = {
+ &fme_perf_event_clock.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_basic_events_group = {
+ .name = "events",
+ .attrs = fme_perf_basic_events_attrs,
+};
+
+/* FME Perf Cache Events */
+#define FME_EVENT_CACHE(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_cache_##_name = { \
+ .attr = FME_EVENT_ATTR(cache_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_CACHE), \
+}
+
+FME_EVENT_CACHE(read_hit, CACHE_EVNT_RD_HIT);
+FME_EVENT_CACHE(read_miss, CACHE_EVNT_RD_MISS);
+FME_EVENT_CACHE(write_hit, CACHE_EVNT_WR_HIT);
+FME_EVENT_CACHE(write_miss, CACHE_EVNT_WR_MISS);
+FME_EVENT_CACHE(hold_request, CACHE_EVNT_HOLD_REQ);
+FME_EVENT_CACHE(tx_req_stall, CACHE_EVNT_TX_REQ_STALL);
+FME_EVENT_CACHE(rx_req_stall, CACHE_EVNT_RX_REQ_STALL);
+FME_EVENT_CACHE(eviction, CACHE_EVNT_EVICTIONS);
+FME_EVENT_CACHE(data_write_port_contention, CACHE_EVNT_DATA_WR_PORT_CONTEN);
+FME_EVENT_CACHE(tag_write_port_contention, CACHE_EVNT_TAG_WR_PORT_CONTEN);
+
+static struct attribute *fme_perf_cache_events_attrs[] = {
+ &fme_perf_event_cache_read_hit.attr.attr,
+ &fme_perf_event_cache_read_miss.attr.attr,
+ &fme_perf_event_cache_write_hit.attr.attr,
+ &fme_perf_event_cache_write_miss.attr.attr,
+ &fme_perf_event_cache_hold_request.attr.attr,
+ &fme_perf_event_cache_tx_req_stall.attr.attr,
+ &fme_perf_event_cache_rx_req_stall.attr.attr,
+ &fme_perf_event_cache_eviction.attr.attr,
+ &fme_perf_event_cache_data_write_port_contention.attr.attr,
+ &fme_perf_event_cache_tag_write_port_contention.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+
+ return (priv->id == FME_FEATURE_ID_GLOBAL_IPERF) ? attr->mode : 0;
+}
+
+static const struct attribute_group fme_perf_cache_events_group = {
+ .name = "events",
+ .attrs = fme_perf_cache_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf Fabric Events */
+#define FME_EVENT_FABRIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+#define FME_EVENT_FABRIC_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_port_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+FME_EVENT_FABRIC(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC(mmio_write, FAB_EVNT_MMIO_WR);
+
+FME_EVENT_FABRIC_PORT(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC_PORT(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC_PORT(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC_PORT(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC_PORT(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC_PORT(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC_PORT(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC_PORT(mmio_write, FAB_EVNT_MMIO_WR);
+
+static struct attribute *fme_perf_fabric_events_attrs[] = {
+ &fme_perf_event_fab_pcie0_read.attr.attr,
+ &fme_perf_event_fab_pcie0_write.attr.attr,
+ &fme_perf_event_fab_pcie1_read.attr.attr,
+ &fme_perf_event_fab_pcie1_write.attr.attr,
+ &fme_perf_event_fab_upi_read.attr.attr,
+ &fme_perf_event_fab_upi_write.attr.attr,
+ &fme_perf_event_fab_mmio_read.attr.attr,
+ &fme_perf_event_fab_mmio_write.attr.attr,
+ &fme_perf_event_fab_port_pcie0_read.attr.attr,
+ &fme_perf_event_fab_port_pcie0_write.attr.attr,
+ &fme_perf_event_fab_port_pcie1_read.attr.attr,
+ &fme_perf_event_fab_port_pcie1_write.attr.attr,
+ &fme_perf_event_fab_port_upi_read.attr.attr,
+ &fme_perf_event_fab_port_upi_write.attr.attr,
+ &fme_perf_event_fab_port_mmio_read.attr.attr,
+ &fme_perf_event_fab_port_mmio_write.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_fabric_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+ struct dev_ext_attribute *eattr;
+ unsigned long var;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr.attr);
+ var = (unsigned long)eattr->var;
+
+ if (is_fabric_event_supported(priv, get_event(var), get_portid(var)))
+ return attr->mode;
+
+ return 0;
+}
+
+static const struct attribute_group fme_perf_fabric_events_group = {
+ .name = "events",
+ .attrs = fme_perf_fabric_events_attrs,
+ .is_visible = fme_perf_fabric_events_visible,
+};
+
+/* FME Perf VTD Events */
+#define FME_EVENT_VTD_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_port_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_VTD), \
+}
+
+FME_EVENT_VTD_PORT(read_transaction, VTD_EVNT_AFU_MEM_RD_TRANS);
+FME_EVENT_VTD_PORT(write_transaction, VTD_EVNT_AFU_MEM_WR_TRANS);
+FME_EVENT_VTD_PORT(devtlb_read_hit, VTD_EVNT_AFU_DEVTLB_RD_HIT);
+FME_EVENT_VTD_PORT(devtlb_write_hit, VTD_EVNT_AFU_DEVTLB_WR_HIT);
+FME_EVENT_VTD_PORT(devtlb_4k_fill, VTD_EVNT_DEVTLB_4K_FILL);
+FME_EVENT_VTD_PORT(devtlb_2m_fill, VTD_EVNT_DEVTLB_2M_FILL);
+FME_EVENT_VTD_PORT(devtlb_1g_fill, VTD_EVNT_DEVTLB_1G_FILL);
+
+static struct attribute *fme_perf_vtd_events_attrs[] = {
+ &fme_perf_event_vtd_port_read_transaction.attr.attr,
+ &fme_perf_event_vtd_port_write_transaction.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_read_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_write_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_4k_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_2m_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_1g_fill.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf VTD SIP Events */
+#define FME_EVENT_VTD_SIP(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_sip_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_sip_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_VTD_SIP), \
+}
+
+FME_EVENT_VTD_SIP(iotlb_4k_hit, VTD_SIP_EVNT_IOTLB_4K_HIT);
+FME_EVENT_VTD_SIP(iotlb_2m_hit, VTD_SIP_EVNT_IOTLB_2M_HIT);
+FME_EVENT_VTD_SIP(iotlb_1g_hit, VTD_SIP_EVNT_IOTLB_1G_HIT);
+FME_EVENT_VTD_SIP(slpwc_l3_hit, VTD_SIP_EVNT_SLPWC_L3_HIT);
+FME_EVENT_VTD_SIP(slpwc_l4_hit, VTD_SIP_EVNT_SLPWC_L4_HIT);
+FME_EVENT_VTD_SIP(rcc_hit, VTD_SIP_EVNT_RCC_HIT);
+FME_EVENT_VTD_SIP(iotlb_4k_miss, VTD_SIP_EVNT_IOTLB_4K_MISS);
+FME_EVENT_VTD_SIP(iotlb_2m_miss, VTD_SIP_EVNT_IOTLB_2M_MISS);
+FME_EVENT_VTD_SIP(iotlb_1g_miss, VTD_SIP_EVNT_IOTLB_1G_MISS);
+FME_EVENT_VTD_SIP(slpwc_l3_miss, VTD_SIP_EVNT_SLPWC_L3_MISS);
+FME_EVENT_VTD_SIP(slpwc_l4_miss, VTD_SIP_EVNT_SLPWC_L4_MISS);
+FME_EVENT_VTD_SIP(rcc_miss, VTD_SIP_EVNT_RCC_MISS);
+
+static struct attribute *fme_perf_vtd_sip_events_attrs[] = {
+ &fme_perf_event_vtd_sip_iotlb_4k_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_hit.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_4k_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_miss.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_miss.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_sip_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_sip_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+static const struct attribute_group *fme_perf_events_groups[] = {
+ &fme_perf_basic_events_group,
+ &fme_perf_cache_events_group,
+ &fme_perf_fabric_events_group,
+ &fme_perf_vtd_events_group,
+ &fme_perf_vtd_sip_events_group,
+ NULL,
+};
+
+static struct fme_perf_event_ops *get_event_ops(u32 evtype)
+{
+ if (evtype > FME_EVTYPE_MAX)
+ return NULL;
+
+ return &fme_perf_event_ops[evtype];
+}
+
+static void fme_perf_event_destroy(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+
+ if (ops->event_destroy)
+ ops->event_destroy(priv, event->hw.idx, event->hw.config_base);
+}
+
+static int fme_perf_event_init(struct perf_event *event)
+{
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct fme_perf_event_ops *ops;
+ u32 eventid, evtype, portid;
+
+ /* test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * fme counters are shared across all cores.
+ * Therefore, it does not support per-process mode.
+ * Also, it does not support event sampling mode.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ if (event->cpu != priv->cpu)
+ return -EINVAL;
+
+ eventid = get_event(event->attr.config);
+ portid = get_portid(event->attr.config);
+ evtype = get_evtype(event->attr.config);
+ if (evtype > FME_EVTYPE_MAX)
+ return -EINVAL;
+
+ hwc->event_base = evtype;
+ hwc->idx = (int)eventid;
+ hwc->config_base = portid;
+
+ event->destroy = fme_perf_event_destroy;
+
+ dev_dbg(priv->dev, "%s event=0x%x, evtype=0x%x, portid=0x%x,\n",
+ __func__, eventid, evtype, portid);
+
+ ops = get_event_ops(evtype);
+ if (ops->event_init)
+ return ops->event_init(priv, eventid, portid);
+
+ return 0;
+}
+
+static void fme_perf_event_update(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 now, prev, delta;
+
+ now = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ prev = local64_read(&hwc->prev_count);
+ delta = now - prev;
+
+ local64_add(delta, &event->count);
+}
+
+static void fme_perf_event_start(struct perf_event *event, int flags)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ count = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ local64_set(&hwc->prev_count, count);
+}
+
+static void fme_perf_event_stop(struct perf_event *event, int flags)
+{
+ fme_perf_event_update(event);
+}
+
+static int fme_perf_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ fme_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void fme_perf_event_del(struct perf_event *event, int flags)
+{
+ fme_perf_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void fme_perf_event_read(struct perf_event *event)
+{
+ fme_perf_event_update(event);
+}
+
+static void fme_perf_setup_hardware(struct fme_perf_priv *priv)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ /* read and save current working mode for fabric counters */
+ v = readq(base + FAB_CTRL);
+
+ if (FIELD_GET(FAB_PORT_FILTER, v) == FAB_PORT_FILTER_DISABLE)
+ priv->fab_port_id = FME_PORTID_ROOT;
+ else
+ priv->fab_port_id = FIELD_GET(FAB_PORT_ID, v);
+}
+
+static int fme_perf_pmu_register(struct platform_device *pdev,
+ struct fme_perf_priv *priv)
+{
+ struct pmu *pmu = &priv->pmu;
+ char *name;
+ int ret;
+
+ spin_lock_init(&priv->fab_lock);
+
+ fme_perf_setup_hardware(priv);
+
+ pmu->task_ctx_nr = perf_invalid_context;
+ pmu->attr_groups = fme_perf_groups;
+ pmu->attr_update = fme_perf_events_groups;
+ pmu->event_init = fme_perf_event_init;
+ pmu->add = fme_perf_event_add;
+ pmu->del = fme_perf_event_del;
+ pmu->start = fme_perf_event_start;
+ pmu->stop = fme_perf_event_stop;
+ pmu->read = fme_perf_event_read;
+ pmu->capabilities = PERF_PMU_CAP_NO_INTERRUPT |
+ PERF_PMU_CAP_NO_EXCLUDE;
+
+ name = devm_kasprintf(priv->dev, GFP_KERNEL, "dfl_fme%d", pdev->id);
+
+ ret = perf_pmu_register(pmu, name, -1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void fme_perf_pmu_unregister(struct fme_perf_priv *priv)
+{
+ perf_pmu_unregister(&priv->pmu);
+}
+
+static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct fme_perf_priv *priv;
+ int target;
+
+ priv = hlist_entry_safe(node, struct fme_perf_priv, node);
+
+ if (cpu != priv->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ priv->cpu = target;
+ return 0;
+}
+
+static int fme_perf_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->ioaddr = feature->ioaddr;
+ priv->id = feature->id;
+ priv->cpu = raw_smp_processor_id();
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/fpga/dfl_fme:online",
+ NULL, fme_perf_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ priv->cpuhp_state = ret;
+
+ /* Register the pmu instance for cpu hotplug */
+ ret = cpuhp_state_add_instance_nocalls(priv->cpuhp_state, &priv->node);
+ if (ret)
+ goto cpuhp_instance_err;
+
+ ret = fme_perf_pmu_register(pdev, priv);
+ if (ret)
+ goto pmu_register_err;
+
+ feature->priv = priv;
+ return 0;
+
+pmu_register_err:
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+ return ret;
+}
+
+static void fme_perf_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv = feature->priv;
+
+ fme_perf_pmu_unregister(priv);
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+}
+
+const struct dfl_feature_id fme_perf_id_table[] = {
+ {.id = FME_FEATURE_ID_GLOBAL_IPERF,},
+ {.id = FME_FEATURE_ID_GLOBAL_DPERF,},
+ {0,}
+};
+
+const struct dfl_feature_ops fme_perf_ops = {
+ .init = fme_perf_init,
+ .uinit = fme_perf_uinit,
+};
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index a233a53db708..1194c0e850e0 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -97,10 +97,6 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
return -EINVAL;
}
- if (!access_ok((void __user *)(unsigned long)port_pr.buffer_address,
- port_pr.buffer_size))
- return -EFAULT;
-
/*
* align PR buffer per PR bandwidth, as HW ignores the extra padding
* data automatically.
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
index 6685c8ef965b..4195dd68193e 100644
--- a/drivers/fpga/dfl-fme.h
+++ b/drivers/fpga/dfl-fme.h
@@ -38,5 +38,7 @@ extern const struct dfl_feature_id fme_pr_mgmt_id_table[];
extern const struct dfl_feature_ops fme_global_err_ops;
extern const struct dfl_feature_id fme_global_err_id_table[];
extern const struct attribute_group fme_global_err_group;
+extern const struct dfl_feature_ops fme_perf_ops;
+extern const struct dfl_feature_id fme_perf_id_table[];
#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 96a2b8274a33..990994874bf1 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1079,6 +1079,7 @@ static int __init dfl_fpga_init(void)
*/
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
{
+ struct dfl_feature_platform_data *pdata;
struct platform_device *port_pdev;
int ret = -ENODEV;
@@ -1093,7 +1094,11 @@ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
goto put_dev_exit;
}
- ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev));
+ pdata = dev_get_platdata(&port_pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, true);
+ mutex_unlock(&pdata->lock);
if (ret)
goto put_dev_exit;
@@ -1120,6 +1125,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
*/
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
{
+ struct dfl_feature_platform_data *pdata;
struct platform_device *port_pdev;
int ret = -ENODEV;
@@ -1138,7 +1144,12 @@ int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
if (ret)
goto put_dev_exit;
- dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev));
+ pdata = dev_get_platdata(&port_pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ dfl_feature_dev_use_end(pdata);
+ mutex_unlock(&pdata->lock);
+
cdev->released_port_num--;
put_dev_exit:
put_device(&port_pdev->dev);
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 9f0e656de720..2f5d3052e36e 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -197,16 +197,16 @@ struct dfl_feature_driver {
* feature dev (platform device)'s reources.
* @ioaddr: mapped mmio resource address.
* @ops: ops of this sub feature.
+ * @priv: priv data of this feature.
*/
struct dfl_feature {
u64 id;
int resource_index;
void __iomem *ioaddr;
const struct dfl_feature_ops *ops;
+ void *priv;
};
-#define DEV_STATUS_IN_USE 0
-
#define FEATURE_DEV_ID_UNUSED (-1)
/**
@@ -219,8 +219,9 @@ struct dfl_feature {
* @dfl_cdev: ptr to container device.
* @id: id used for this feature device.
* @disable_count: count for port disable.
+ * @excl_open: set on feature device exclusive open.
+ * @open_count: count for feature device open.
* @num: number for sub features.
- * @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
* @private: ptr to feature dev private data.
* @features: sub features of this feature dev.
*/
@@ -232,26 +233,46 @@ struct dfl_feature_platform_data {
struct dfl_fpga_cdev *dfl_cdev;
int id;
unsigned int disable_count;
- unsigned long dev_status;
+ bool excl_open;
+ int open_count;
void *private;
int num;
- struct dfl_feature features[0];
+ struct dfl_feature features[];
};
static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+ bool excl)
{
- /* Test and set IN_USE flags to ensure file is exclusively used */
- if (test_and_set_bit_lock(DEV_STATUS_IN_USE, &pdata->dev_status))
+ if (pdata->excl_open)
return -EBUSY;
+ if (excl) {
+ if (pdata->open_count)
+ return -EBUSY;
+
+ pdata->excl_open = true;
+ }
+ pdata->open_count++;
+
return 0;
}
static inline
void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
{
- clear_bit_unlock(DEV_STATUS_IN_USE, &pdata->dev_status);
+ pdata->excl_open = false;
+
+ if (WARN_ON(pdata->open_count <= 0))
+ return;
+
+ pdata->open_count--;
+}
+
+static inline
+int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->open_count;
}
static inline
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 56e112e14a10..8d689fea0dab 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -46,10 +46,16 @@ static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
struct spi_message message;
struct spi_transfer assert_cs_then_reset_delay = {
.cs_change = 1,
- .delay_usecs = ICE40_SPI_RESET_DELAY
+ .delay = {
+ .value = ICE40_SPI_RESET_DELAY,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
};
struct spi_transfer housekeeping_delay_then_release_cs = {
- .delay_usecs = ICE40_SPI_HOUSEKEEPING_DELAY
+ .delay = {
+ .value = ICE40_SPI_HOUSEKEEPING_DELAY,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
};
int ret;
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index 4d8a87641587..b316369156fe 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -157,7 +157,8 @@ static int machxo2_cleanup(struct fpga_manager *mgr)
spi_message_init(&msg);
tx[1].tx_buf = &refresh;
tx[1].len = sizeof(refresh);
- tx[1].delay_usecs = MACHXO2_REFRESH_USEC;
+ tx[1].delay.value = MACHXO2_REFRESH_USEC;
+ tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx[1], &msg);
ret = spi_sync(spi, &msg);
if (ret)
@@ -208,7 +209,8 @@ static int machxo2_write_init(struct fpga_manager *mgr,
spi_message_init(&msg);
tx[0].tx_buf = &enable;
tx[0].len = sizeof(enable);
- tx[0].delay_usecs = MACHXO2_LOW_DELAY_USEC;
+ tx[0].delay.value = MACHXO2_LOW_DELAY_USEC;
+ tx[0].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx[0], &msg);
tx[1].tx_buf = &erase;
@@ -269,7 +271,8 @@ static int machxo2_write(struct fpga_manager *mgr, const char *buf,
spi_message_init(&msg);
tx.tx_buf = payload;
tx.len = MACHXO2_BUF_SIZE;
- tx.delay_usecs = MACHXO2_HIGH_DELAY_USEC;
+ tx.delay.value = MACHXO2_HIGH_DELAY_USEC;
+ tx.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx, &msg);
ret = spi_sync(spi, &msg);
if (ret) {
@@ -317,7 +320,8 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
spi_message_init(&msg);
tx[1].tx_buf = &refresh;
tx[1].len = sizeof(refresh);
- tx[1].delay_usecs = MACHXO2_REFRESH_USEC;
+ tx[1].delay.value = MACHXO2_REFRESH_USEC;
+ tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx[1], &msg);
ret = spi_sync(spi, &msg);
if (ret)
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 215d33789c74..44b7c569d4dc 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -154,11 +154,11 @@ static void s10_receive_callback(struct stratix10_svc_client *client,
* Here we set status bits as we receive them. Elsewhere, we always use
* test_and_clear_bit() to check status in priv->status
*/
- for (i = 0; i <= SVC_STATUS_RECONFIG_ERROR; i++)
+ for (i = 0; i <= SVC_STATUS_ERROR; i++)
if (status & (1 << i))
set_bit(i, &priv->status);
- if (status & BIT(SVC_STATUS_RECONFIG_BUFFER_DONE)) {
+ if (status & BIT(SVC_STATUS_BUFFER_DONE)) {
s10_unlock_bufs(priv, data->kaddr1);
s10_unlock_bufs(priv, data->kaddr2);
s10_unlock_bufs(priv, data->kaddr3);
@@ -209,8 +209,7 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
}
ret = 0;
- if (!test_and_clear_bit(SVC_STATUS_RECONFIG_REQUEST_OK,
- &priv->status)) {
+ if (!test_and_clear_bit(SVC_STATUS_OK, &priv->status)) {
ret = -ETIMEDOUT;
goto init_done;
}
@@ -323,17 +322,15 @@ static int s10_ops_write(struct fpga_manager *mgr, const char *buf,
&priv->status_return_completion,
S10_BUFFER_TIMEOUT);
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_DONE,
- &priv->status) ||
- test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
+ if (test_and_clear_bit(SVC_STATUS_BUFFER_DONE, &priv->status) ||
+ test_and_clear_bit(SVC_STATUS_BUFFER_SUBMITTED,
&priv->status)) {
ret = 0;
continue;
}
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
- &priv->status)) {
- dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
ret = -EFAULT;
break;
}
@@ -393,13 +390,11 @@ static int s10_ops_write_complete(struct fpga_manager *mgr,
timeout = ret;
ret = 0;
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_COMPLETED,
- &priv->status))
+ if (test_and_clear_bit(SVC_STATUS_COMPLETED, &priv->status))
break;
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
- &priv->status)) {
- dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
ret = -EFAULT;
break;
}
@@ -482,7 +477,8 @@ static int s10_remove(struct platform_device *pdev)
}
static const struct of_device_id s10_of_match[] = {
- { .compatible = "intel,stratix10-soc-fpga-mgr", },
+ {.compatible = "intel,stratix10-soc-fpga-mgr"},
+ {.compatible = "intel,agilex-soc-fpga-mgr"},
{},
};
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index b8a88d21d038..4a1139e05280 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -40,16 +40,12 @@ static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
const char *buf, size_t size)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_fpga_priv *priv;
dma_addr_t dma_addr;
u32 eemi_flags = 0;
char *kbuf;
int ret;
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
- return -ENXIO;
-
priv = mgr->priv;
kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
@@ -63,7 +59,7 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
- ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+ ret = zynqmp_pm_fpga_load(dma_addr, size, eemi_flags);
dma_free_coherent(priv->dev, size, kbuf, dma_addr);
@@ -78,13 +74,9 @@ static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr,
static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- u32 status;
-
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
- return FPGA_MGR_STATE_UNKNOWN;
+ u32 status = 0;
- eemi_ops->fpga_get_status(&status);
+ zynqmp_pm_fpga_get_status(&status);
if (status & IXR_FPGA_DONE_MASK)
return FPGA_MGR_STATE_OPERATING;
diff --git a/drivers/gnss/serial.h b/drivers/gnss/serial.h
index 980ffdc86c2a..621953f7821d 100644
--- a/drivers/gnss/serial.h
+++ b/drivers/gnss/serial.h
@@ -16,7 +16,7 @@ struct gnss_serial {
struct gnss_device *gdev;
speed_t speed;
const struct gnss_serial_ops *ops;
- unsigned long drvdata[0];
+ unsigned long drvdata[];
};
enum gnss_serial_pm_state {
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index effed3a8d398..2ecb1d3e8eeb 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -439,14 +439,18 @@ static int sirf_probe(struct serdev_device *serdev)
data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
GPIOD_OUT_LOW);
- if (IS_ERR(data->on_off))
+ if (IS_ERR(data->on_off)) {
+ ret = PTR_ERR(data->on_off);
goto err_put_device;
+ }
if (data->on_off) {
data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
GPIOD_IN);
- if (IS_ERR(data->wakeup))
+ if (IS_ERR(data->wakeup)) {
+ ret = PTR_ERR(data->wakeup);
goto err_put_device;
+ }
ret = regulator_enable(data->vcc);
if (ret)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1b96169d84f7..bcacd9c74aa8 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -73,6 +73,10 @@ config GPIO_GENERIC
depends on HAS_IOMEM # Only for IOMEM drivers
tristate
+config GPIO_REGMAP
+ depends on REGMAP
+ tristate
+
# put drivers in the right section, in alphabetical order
# This symbol is selected by both I2C and SPI expanders
@@ -422,7 +426,7 @@ config GPIO_OMAP
Say yes here to enable GPIO support for TI OMAP SoCs.
config GPIO_PL061
- bool "PrimeCell PL061 GPIO support"
+ tristate "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
select IRQ_DOMAIN
select GPIOLIB_IRQCHIP
@@ -439,7 +443,7 @@ config GPIO_PMIC_EIC_SPRD
config GPIO_PXA
bool "PXA GPIO support"
- depends on ARCH_PXA || ARCH_MMP
+ depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
help
Say yes here to support the PXA GPIO device
@@ -638,7 +642,7 @@ config GPIO_XGENE
config GPIO_XGENE_SB
tristate "APM X-Gene GPIO standby controller support"
- depends on ARCH_XGENE && OF_GPIO
+ depends on (ARCH_XGENE || COMPILE_TEST)
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -952,7 +956,7 @@ config GPIO_PCA953X
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
- depends on GPIO_PCA953X=y
+ depends on GPIO_PCA953X
select GPIOLIB_IRQCHIP
help
Say yes here to enable the pca953x to be used as an interrupt
@@ -1541,6 +1545,18 @@ config GPIO_VIPERBOARD
endmenu
+config GPIO_AGGREGATOR
+ tristate "GPIO Aggregator"
+ help
+ Say yes here to enable the GPIO Aggregator, which provides a way to
+ aggregate existing GPIO lines into a new virtual GPIO chip.
+ This can serve the following purposes:
+ - Assign permissions for a collection of GPIO lines to a user,
+ - Export a collection of GPIO lines to a virtual machine,
+ - Provide a generic driver for a GPIO-operated device in an
+ industrial control context, to be operated from userspace using
+ the GPIO chardev interface.
+
config GPIO_MOCKUP
tristate "GPIO Testing Driver"
select IRQ_SIM
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b2cfc21a97f3..1e4894e0bf0f 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
# Device drivers. Generally keep list sorted alphabetically
+obj-$(CONFIG_GPIO_REGMAP) += gpio-regmap.o
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
# directly supported by gpio-generic
@@ -25,6 +26,7 @@ obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
+obj-$(CONFIG_GPIO_AGGREGATOR) += gpio-aggregator.o
obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 3a44e6ae52bd..b989c9352da2 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -99,6 +99,10 @@ similar and probe a proper driver in the gpiolib subsystem.
In some cases it makes sense to create a GPIO chip from the local driver
for a few GPIOs. Those should stay where they are.
+At the same time it makes sense to get rid of code duplication in existing or
+new coming drivers. For example, gpio-ml-ioh should be incorporated into
+gpio-pch. In similar way gpio-intel-mid into gpio-pxa.
+
Generic MMIO GPIO
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
new file mode 100644
index 000000000000..9b0adbdddbfc
--- /dev/null
+++ b/drivers/gpio/gpio-aggregator.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// GPIO Aggregator
+//
+// Copyright (C) 2019-2020 Glider bv
+
+#define DRV_NAME "gpio-aggregator"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+
+/*
+ * GPIO Aggregator sysfs interface
+ */
+
+struct gpio_aggregator {
+ struct gpiod_lookup_table *lookups;
+ struct platform_device *pdev;
+ char args[];
+};
+
+static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
+static DEFINE_IDR(gpio_aggregator_idr);
+
+static char *get_arg(char **args)
+{
+ char *start = *args, *end;
+
+ start = skip_spaces(start);
+ if (!*start)
+ return NULL;
+
+ if (*start == '"') {
+ /* Quoted arg */
+ end = strchr(++start, '"');
+ if (!end)
+ return ERR_PTR(-EINVAL);
+ } else {
+ /* Unquoted arg */
+ for (end = start; *end && !isspace(*end); end++) ;
+ }
+
+ if (*end)
+ *end++ = '\0';
+
+ *args = end;
+ return start;
+}
+
+static bool isrange(const char *s)
+{
+ size_t n;
+
+ if (IS_ERR_OR_NULL(s))
+ return false;
+
+ while (1) {
+ n = strspn(s, "0123456789");
+ if (!n)
+ return false;
+
+ s += n;
+
+ switch (*s++) {
+ case '\0':
+ return true;
+
+ case '-':
+ case ',':
+ break;
+
+ default:
+ return false;
+ }
+ }
+}
+
+static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
+ int hwnum, unsigned int *n)
+{
+ struct gpiod_lookup_table *lookups;
+
+ lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
+ GFP_KERNEL);
+ if (!lookups)
+ return -ENOMEM;
+
+ lookups->table[*n] =
+ (struct gpiod_lookup)GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
+
+ (*n)++;
+ memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
+
+ aggr->lookups = lookups;
+ return 0;
+}
+
+static int aggr_parse(struct gpio_aggregator *aggr)
+{
+ unsigned int first_index, last_index, i, n = 0;
+ char *name, *offsets, *first, *last, *next;
+ char *args = aggr->args;
+ int error;
+
+ for (name = get_arg(&args), offsets = get_arg(&args); name;
+ offsets = get_arg(&args)) {
+ if (IS_ERR(name)) {
+ pr_err("Cannot get GPIO specifier: %pe\n", name);
+ return PTR_ERR(name);
+ }
+
+ if (!isrange(offsets)) {
+ /* Named GPIO line */
+ error = aggr_add_gpio(aggr, name, U16_MAX, &n);
+ if (error)
+ return error;
+
+ name = offsets;
+ continue;
+ }
+
+ /* GPIO chip + offset(s) */
+ for (first = offsets; *first; first = next) {
+ next = strchrnul(first, ',');
+ if (*next)
+ *next++ = '\0';
+
+ last = strchr(first, '-');
+ if (last)
+ *last++ = '\0';
+
+ if (kstrtouint(first, 10, &first_index)) {
+ pr_err("Cannot parse GPIO index %s\n", first);
+ return -EINVAL;
+ }
+
+ if (!last) {
+ last_index = first_index;
+ } else if (kstrtouint(last, 10, &last_index)) {
+ pr_err("Cannot parse GPIO index %s\n", last);
+ return -EINVAL;
+ }
+
+ for (i = first_index; i <= last_index; i++) {
+ error = aggr_add_gpio(aggr, name, i, &n);
+ if (error)
+ return error;
+ }
+ }
+
+ name = get_arg(&args);
+ }
+
+ if (!n) {
+ pr_err("No GPIOs specified\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t new_device_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct gpio_aggregator *aggr;
+ struct platform_device *pdev;
+ int res, id;
+
+ /* kernfs guarantees string termination, so count + 1 is safe */
+ aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
+ if (!aggr)
+ return -ENOMEM;
+
+ memcpy(aggr->args, buf, count + 1);
+
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups) {
+ res = -ENOMEM;
+ goto free_ga;
+ }
+
+ mutex_lock(&gpio_aggregator_lock);
+ id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL);
+ mutex_unlock(&gpio_aggregator_lock);
+
+ if (id < 0) {
+ res = id;
+ goto free_table;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id);
+ if (!aggr->lookups->dev_id) {
+ res = -ENOMEM;
+ goto remove_idr;
+ }
+
+ res = aggr_parse(aggr);
+ if (res)
+ goto free_dev_id;
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0);
+ if (IS_ERR(pdev)) {
+ res = PTR_ERR(pdev);
+ goto remove_table;
+ }
+
+ aggr->pdev = pdev;
+ return count;
+
+remove_table:
+ gpiod_remove_lookup_table(aggr->lookups);
+free_dev_id:
+ kfree(aggr->lookups->dev_id);
+remove_idr:
+ mutex_lock(&gpio_aggregator_lock);
+ idr_remove(&gpio_aggregator_idr, id);
+ mutex_unlock(&gpio_aggregator_lock);
+free_table:
+ kfree(aggr->lookups);
+free_ga:
+ kfree(aggr);
+ return res;
+}
+
+static DRIVER_ATTR_WO(new_device);
+
+static void gpio_aggregator_free(struct gpio_aggregator *aggr)
+{
+ platform_device_unregister(aggr->pdev);
+ gpiod_remove_lookup_table(aggr->lookups);
+ kfree(aggr->lookups->dev_id);
+ kfree(aggr->lookups);
+ kfree(aggr);
+}
+
+static ssize_t delete_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator *aggr;
+ unsigned int id;
+ int error;
+
+ if (!str_has_prefix(buf, DRV_NAME "."))
+ return -EINVAL;
+
+ error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
+ if (error)
+ return error;
+
+ mutex_lock(&gpio_aggregator_lock);
+ aggr = idr_remove(&gpio_aggregator_idr, id);
+ mutex_unlock(&gpio_aggregator_lock);
+ if (!aggr)
+ return -ENOENT;
+
+ gpio_aggregator_free(aggr);
+ return count;
+}
+static DRIVER_ATTR_WO(delete_device);
+
+static struct attribute *gpio_aggregator_attrs[] = {
+ &driver_attr_new_device.attr,
+ &driver_attr_delete_device.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gpio_aggregator);
+
+static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+{
+ gpio_aggregator_free(p);
+ return 0;
+}
+
+static void __exit gpio_aggregator_remove_all(void)
+{
+ mutex_lock(&gpio_aggregator_lock);
+ idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
+ idr_destroy(&gpio_aggregator_idr);
+ mutex_unlock(&gpio_aggregator_lock);
+}
+
+
+/*
+ * GPIO Forwarder
+ */
+
+struct gpiochip_fwd {
+ struct gpio_chip chip;
+ struct gpio_desc **descs;
+ union {
+ struct mutex mlock; /* protects tmp[] if can_sleep */
+ spinlock_t slock; /* protects tmp[] if !can_sleep */
+ };
+ unsigned long tmp[]; /* values and descs for multiple ops */
+};
+
+static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_get_direction(fwd->descs[offset]);
+}
+
+static int gpio_fwd_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_direction_input(fwd->descs[offset]);
+}
+
+static int gpio_fwd_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_direction_output(fwd->descs[offset], value);
+}
+
+static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_get_value(fwd->descs[offset]);
+}
+
+static int gpio_fwd_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ unsigned long *values, flags = 0;
+ struct gpio_desc **descs;
+ unsigned int i, j = 0;
+ int error;
+
+ if (chip->can_sleep)
+ mutex_lock(&fwd->mlock);
+ else
+ spin_lock_irqsave(&fwd->slock, flags);
+
+ /* Both values bitmap and desc pointers are stored in tmp[] */
+ values = &fwd->tmp[0];
+ descs = (void *)&fwd->tmp[BITS_TO_LONGS(fwd->chip.ngpio)];
+
+ bitmap_clear(values, 0, fwd->chip.ngpio);
+ for_each_set_bit(i, mask, fwd->chip.ngpio)
+ descs[j++] = fwd->descs[i];
+
+ error = gpiod_get_array_value(j, descs, NULL, values);
+ if (!error) {
+ j = 0;
+ for_each_set_bit(i, mask, fwd->chip.ngpio)
+ __assign_bit(i, bits, test_bit(j++, values));
+ }
+
+ if (chip->can_sleep)
+ mutex_unlock(&fwd->mlock);
+ else
+ spin_unlock_irqrestore(&fwd->slock, flags);
+
+ return error;
+}
+
+static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ gpiod_set_value(fwd->descs[offset], value);
+}
+
+static void gpio_fwd_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ unsigned long *values, flags = 0;
+ struct gpio_desc **descs;
+ unsigned int i, j = 0;
+
+ if (chip->can_sleep)
+ mutex_lock(&fwd->mlock);
+ else
+ spin_lock_irqsave(&fwd->slock, flags);
+
+ /* Both values bitmap and desc pointers are stored in tmp[] */
+ values = &fwd->tmp[0];
+ descs = (void *)&fwd->tmp[BITS_TO_LONGS(fwd->chip.ngpio)];
+
+ for_each_set_bit(i, mask, fwd->chip.ngpio) {
+ __assign_bit(j, values, test_bit(i, bits));
+ descs[j++] = fwd->descs[i];
+ }
+
+ gpiod_set_array_value(j, descs, NULL, values);
+
+ if (chip->can_sleep)
+ mutex_unlock(&fwd->mlock);
+ else
+ spin_unlock_irqrestore(&fwd->slock, flags);
+}
+
+static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_set_config(fwd->descs[offset], config);
+}
+
+/**
+ * gpiochip_fwd_create() - Create a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder.
+ * @descs: Array containing the GPIO descriptors to forward to.
+ * This array must contain @ngpios entries, and must not be deallocated
+ * before the forwarder has been destroyed again.
+ *
+ * This function creates a new gpiochip, which forwards all GPIO operations to
+ * the passed GPIO descriptors.
+ *
+ * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
+ unsigned int ngpios,
+ struct gpio_desc *descs[])
+{
+ const char *label = dev_name(dev);
+ struct gpiochip_fwd *fwd;
+ struct gpio_chip *chip;
+ unsigned int i;
+ int error;
+
+ fwd = devm_kzalloc(dev, struct_size(fwd, tmp,
+ BITS_TO_LONGS(ngpios) + ngpios), GFP_KERNEL);
+ if (!fwd)
+ return ERR_PTR(-ENOMEM);
+
+ chip = &fwd->chip;
+
+ /*
+ * If any of the GPIO lines are sleeping, then the entire forwarder
+ * will be sleeping.
+ * If any of the chips support .set_config(), then the forwarder will
+ * support setting configs.
+ */
+ for (i = 0; i < ngpios; i++) {
+ struct gpio_chip *parent = gpiod_to_chip(descs[i]);
+
+ dev_dbg(dev, "%u => gpio-%d\n", i, desc_to_gpio(descs[i]));
+
+ if (gpiod_cansleep(descs[i]))
+ chip->can_sleep = true;
+ if (parent && parent->set_config)
+ chip->set_config = gpio_fwd_set_config;
+ }
+
+ chip->label = label;
+ chip->parent = dev;
+ chip->owner = THIS_MODULE;
+ chip->get_direction = gpio_fwd_get_direction;
+ chip->direction_input = gpio_fwd_direction_input;
+ chip->direction_output = gpio_fwd_direction_output;
+ chip->get = gpio_fwd_get;
+ chip->get_multiple = gpio_fwd_get_multiple;
+ chip->set = gpio_fwd_set;
+ chip->set_multiple = gpio_fwd_set_multiple;
+ chip->base = -1;
+ chip->ngpio = ngpios;
+ fwd->descs = descs;
+
+ if (chip->can_sleep)
+ mutex_init(&fwd->mlock);
+ else
+ spin_lock_init(&fwd->slock);
+
+ error = devm_gpiochip_add_data(dev, chip, fwd);
+ if (error)
+ return ERR_PTR(error);
+
+ return fwd;
+}
+
+
+/*
+ * GPIO Aggregator platform device
+ */
+
+static int gpio_aggregator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gpio_desc **descs;
+ struct gpiochip_fwd *fwd;
+ int i, n;
+
+ n = gpiod_count(dev, NULL);
+ if (n < 0)
+ return n;
+
+ descs = devm_kmalloc_array(dev, n, sizeof(*descs), GFP_KERNEL);
+ if (!descs)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+ if (IS_ERR(descs[i]))
+ return PTR_ERR(descs[i]);
+ }
+
+ fwd = gpiochip_fwd_create(dev, n, descs);
+ if (IS_ERR(fwd))
+ return PTR_ERR(fwd);
+
+ platform_set_drvdata(pdev, fwd);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_aggregator_dt_ids[] = {
+ /*
+ * Add GPIO-operated devices controlled from userspace below,
+ * or use "driver_override" in sysfs
+ */
+ {},
+};
+MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids);
+#endif
+
+static struct platform_driver gpio_aggregator_driver = {
+ .probe = gpio_aggregator_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .groups = gpio_aggregator_groups,
+ .of_match_table = of_match_ptr(gpio_aggregator_dt_ids),
+ },
+};
+
+static int __init gpio_aggregator_init(void)
+{
+ return platform_driver_register(&gpio_aggregator_driver);
+}
+module_init(gpio_aggregator_init);
+
+static void __exit gpio_aggregator_exit(void)
+{
+ gpio_aggregator_remove_all();
+ platform_driver_unregister(&gpio_aggregator_driver);
+}
+module_exit(gpio_aggregator_exit);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("GPIO Aggregator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index baee8c3f06ad..cf3687a7925f 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -625,7 +625,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kona_gpio->reg_base)) {
- ret = -ENXIO;
+ ret = PTR_ERR(kona_gpio->reg_base);
goto err_irq_domain;
}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 92e127e74813..1d8d55bd63aa 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -49,7 +49,9 @@
#define GPIO_EXT_PORTC 0x58
#define GPIO_EXT_PORTD 0x5c
+#define DWAPB_DRIVER_NAME "gpio-dwapb"
#define DWAPB_MAX_PORTS 4
+
#define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */
#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */
#define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */
@@ -62,6 +64,8 @@
#define GPIO_INTSTATUS_V2 0x3c
#define GPIO_PORTA_EOI_V2 0x40
+#define DWAPB_NR_CLOCKS 2
+
struct dwapb_gpio;
#ifdef CONFIG_PM_SLEEP
@@ -97,7 +101,7 @@ struct dwapb_gpio {
struct irq_domain *domain;
unsigned int flags;
struct reset_control *rst;
- struct clk *clk;
+ struct clk_bulk_data clks[DWAPB_NR_CLOCKS];
};
static inline u32 gpio_reg_v2_convert(unsigned int offset)
@@ -189,22 +193,21 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
{
- u32 irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
- u32 ret = irq_status;
+ unsigned long irq_status;
+ irq_hw_number_t hwirq;
- while (irq_status) {
- int hwirq = fls(irq_status) - 1;
+ irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
+ for_each_set_bit(hwirq, &irq_status, 32) {
int gpio_irq = irq_find_mapping(gpio->domain, hwirq);
+ u32 irq_type = irq_get_trigger_type(gpio_irq);
generic_handle_irq(gpio_irq);
- irq_status &= ~BIT(hwirq);
- if ((irq_get_trigger_type(gpio_irq) & IRQ_TYPE_SENSE_MASK)
- == IRQ_TYPE_EDGE_BOTH)
+ if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
dwapb_toggle_trigger(gpio, hwirq);
}
- return ret;
+ return irq_status;
}
static void dwapb_irq_handler(struct irq_desc *desc)
@@ -212,10 +215,9 @@ static void dwapb_irq_handler(struct irq_desc *desc)
struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ chained_irq_enter(chip, desc);
dwapb_do_irq(gpio);
-
- if (chip->irq_eoi)
- chip->irq_eoi(irq_desc_get_irq_data(desc));
+ chained_irq_exit(chip, desc);
}
static void dwapb_irq_enable(struct irq_data *d)
@@ -228,7 +230,7 @@ static void dwapb_irq_enable(struct irq_data *d)
spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
- val |= BIT(d->hwirq);
+ val |= BIT(irqd_to_hwirq(d));
dwapb_write(gpio, GPIO_INTEN, val);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
@@ -243,46 +245,20 @@ static void dwapb_irq_disable(struct irq_data *d)
spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
- val &= ~BIT(d->hwirq);
+ val &= ~BIT(irqd_to_hwirq(d));
dwapb_write(gpio, GPIO_INTEN, val);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
-static int dwapb_irq_reqres(struct irq_data *d)
-{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
- struct gpio_chip *gc = &gpio->ports[0].gc;
- int ret;
-
- ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d));
- if (ret) {
- dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n",
- irqd_to_hwirq(d));
- return ret;
- }
- return 0;
-}
-
-static void dwapb_irq_relres(struct irq_data *d)
-{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
- struct gpio_chip *gc = &gpio->ports[0].gc;
-
- gpiochip_unlock_as_irq(gc, irqd_to_hwirq(d));
-}
-
static int dwapb_irq_set_type(struct irq_data *d, u32 type)
{
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = igc->private;
struct gpio_chip *gc = &gpio->ports[0].gc;
- int bit = d->hwirq;
+ irq_hw_number_t bit = irqd_to_hwirq(d);
unsigned long level, polarity, flags;
- if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
- IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
spin_lock_irqsave(&gc->bgpio_lock, flags);
@@ -328,11 +304,12 @@ static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = igc->private;
struct dwapb_context *ctx = gpio->ports[0].ctx;
+ irq_hw_number_t bit = irqd_to_hwirq(d);
if (enable)
- ctx->wake_en |= BIT(d->hwirq);
+ ctx->wake_en |= BIT(bit);
else
- ctx->wake_en &= ~BIT(d->hwirq);
+ ctx->wake_en &= ~BIT(bit);
return 0;
}
@@ -350,9 +327,10 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
val_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
if (debounce)
- dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb | mask);
+ val_deb |= mask;
else
- dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb & ~mask);
+ val_deb &= ~mask;
+ dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -373,12 +351,7 @@ static int dwapb_gpio_set_config(struct gpio_chip *gc, unsigned offset,
static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
{
- u32 worked;
- struct dwapb_gpio *gpio = dev_id;
-
- worked = dwapb_do_irq(gpio);
-
- return worked ? IRQ_HANDLED : IRQ_NONE;
+ return IRQ_RETVAL(dwapb_do_irq(dev_id));
}
static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
@@ -388,17 +361,23 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct gpio_chip *gc = &port->gc;
struct fwnode_handle *fwnode = pp->fwnode;
struct irq_chip_generic *irq_gc = NULL;
- unsigned int hwirq, ngpio = gc->ngpio;
+ unsigned int ngpio = gc->ngpio;
struct irq_chip_type *ct;
+ irq_hw_number_t hwirq;
int err, i;
+ if (memchr_inv(pp->irq, 0, sizeof(pp->irq)) == NULL) {
+ dev_warn(gpio->dev, "no IRQ for port%d\n", pp->idx);
+ return;
+ }
+
gpio->domain = irq_domain_create_linear(fwnode, ngpio,
&irq_generic_chip_ops, gpio);
if (!gpio->domain)
return;
err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2,
- "gpio-dwapb", handle_level_irq,
+ DWAPB_DRIVER_NAME, handle_bad_irq,
IRQ_NOREQUEST, 0,
IRQ_GC_INIT_NESTED_LOCK);
if (err) {
@@ -426,8 +405,6 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
ct->chip.irq_set_type = dwapb_irq_set_type;
ct->chip.irq_enable = dwapb_irq_enable;
ct->chip.irq_disable = dwapb_irq_disable;
- ct->chip.irq_request_resources = dwapb_irq_reqres;
- ct->chip.irq_release_resources = dwapb_irq_relres;
#ifdef CONFIG_PM_SLEEP
ct->chip.irq_set_wake = dwapb_irq_set_wake;
#endif
@@ -437,6 +414,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
}
irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
+ irq_gc->chip_types[0].handler = handle_level_irq;
irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
irq_gc->chip_types[1].handler = handle_edge_irq;
@@ -444,7 +422,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
int i;
for (i = 0; i < pp->ngpio; i++) {
- if (pp->irq[i] >= 0)
+ if (pp->irq[i])
irq_set_chained_handler_and_data(pp->irq[i],
dwapb_irq_handler, gpio);
}
@@ -455,7 +433,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
*/
err = devm_request_irq(gpio->dev, pp->irq[0],
dwapb_irq_handler_mfd,
- IRQF_SHARED, "gpio-dwapb-mfd", gpio);
+ IRQF_SHARED, DWAPB_DRIVER_NAME, gpio);
if (err) {
dev_err(gpio->dev, "error requesting IRQ\n");
irq_domain_remove(gpio->domain);
@@ -464,7 +442,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
}
}
- for (hwirq = 0 ; hwirq < ngpio ; hwirq++)
+ for (hwirq = 0; hwirq < ngpio; hwirq++)
irq_create_mapping(gpio->domain, hwirq);
port->gc.to_irq = dwapb_gpio_to_irq;
@@ -480,7 +458,7 @@ static void dwapb_irq_teardown(struct dwapb_gpio *gpio)
if (!gpio->domain)
return;
- for (hwirq = 0 ; hwirq < ngpio ; hwirq++)
+ for (hwirq = 0; hwirq < ngpio; hwirq++)
irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq));
irq_domain_remove(gpio->domain);
@@ -505,10 +483,9 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
return -ENOMEM;
#endif
- dat = gpio->regs + GPIO_EXT_PORTA + (pp->idx * GPIO_EXT_PORT_STRIDE);
- set = gpio->regs + GPIO_SWPORTA_DR + (pp->idx * GPIO_SWPORT_DR_STRIDE);
- dirout = gpio->regs + GPIO_SWPORTA_DDR +
- (pp->idx * GPIO_SWPORT_DDR_STRIDE);
+ dat = gpio->regs + GPIO_EXT_PORTA + pp->idx * GPIO_EXT_PORT_STRIDE;
+ set = gpio->regs + GPIO_SWPORTA_DR + pp->idx * GPIO_SWPORT_DR_STRIDE;
+ dirout = gpio->regs + GPIO_SWPORTA_DDR + pp->idx * GPIO_SWPORT_DDR_STRIDE;
/* This registers 32 GPIO lines per port */
err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
@@ -529,40 +506,66 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
if (pp->idx == 0)
port->gc.set_config = dwapb_gpio_set_config;
- if (pp->has_irq)
+ /* Only port A can provide interrupts in all configurations of the IP */
+ if (pp->idx == 0)
dwapb_configure_irqs(gpio, port, pp);
err = gpiochip_add_data(&port->gc, port);
- if (err)
+ if (err) {
dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
port->idx);
- else
- port->is_registered = true;
+ return err;
+ }
/* Add GPIO-signaled ACPI event support */
- if (pp->has_irq)
- acpi_gpiochip_request_interrupts(&port->gc);
+ acpi_gpiochip_request_interrupts(&port->gc);
- return err;
+ port->is_registered = true;
+
+ return 0;
}
static void dwapb_gpio_unregister(struct dwapb_gpio *gpio)
{
unsigned int m;
- for (m = 0; m < gpio->nr_ports; ++m)
- if (gpio->ports[m].is_registered)
- gpiochip_remove(&gpio->ports[m].gc);
+ for (m = 0; m < gpio->nr_ports; ++m) {
+ struct dwapb_gpio_port *port = &gpio->ports[m];
+
+ if (!port->is_registered)
+ continue;
+
+ acpi_gpiochip_free_interrupts(&port->gc);
+ gpiochip_remove(&port->gc);
+ }
+}
+
+static void dwapb_get_irq(struct device *dev, struct fwnode_handle *fwnode,
+ struct dwapb_port_property *pp)
+{
+ struct device_node *np = NULL;
+ int irq = -ENXIO, j;
+
+ if (fwnode_property_read_bool(fwnode, "interrupt-controller"))
+ np = to_of_node(fwnode);
+
+ for (j = 0; j < pp->ngpio; j++) {
+ if (np)
+ irq = of_irq_get(np, j);
+ else if (has_acpi_companion(dev))
+ irq = platform_get_irq_optional(to_platform_device(dev), j);
+ if (irq > 0)
+ pp->irq[j] = irq;
+ }
}
-static struct dwapb_platform_data *
-dwapb_gpio_get_pdata(struct device *dev)
+static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
{
struct fwnode_handle *fwnode;
struct dwapb_platform_data *pdata;
struct dwapb_port_property *pp;
int nports;
- int i, j;
+ int i;
nports = device_get_child_node_count(dev);
if (nports == 0)
@@ -580,8 +583,6 @@ dwapb_gpio_get_pdata(struct device *dev)
i = 0;
device_for_each_child_node(dev, fwnode) {
- struct device_node *np = NULL;
-
pp = &pdata->properties[i++];
pp->fwnode = fwnode;
@@ -593,8 +594,7 @@ dwapb_gpio_get_pdata(struct device *dev)
return ERR_PTR(-EINVAL);
}
- if (fwnode_property_read_u32(fwnode, "snps,nr-gpios",
- &pp->ngpio)) {
+ if (fwnode_property_read_u32(fwnode, "snps,nr-gpios", &pp->ngpio)) {
dev_info(dev,
"failed to get number of gpios for port%d\n",
i);
@@ -608,28 +608,8 @@ dwapb_gpio_get_pdata(struct device *dev)
* Only port A can provide interrupts in all configurations of
* the IP.
*/
- if (pp->idx != 0)
- continue;
-
- if (dev->of_node && fwnode_property_read_bool(fwnode,
- "interrupt-controller")) {
- np = to_of_node(fwnode);
- }
-
- for (j = 0; j < pp->ngpio; j++) {
- pp->irq[j] = -ENXIO;
-
- if (np)
- pp->irq[j] = of_irq_get(np, j);
- else if (has_acpi_companion(dev))
- pp->irq[j] = platform_get_irq(to_platform_device(dev), j);
-
- if (pp->irq[j] >= 0)
- pp->has_irq = true;
- }
-
- if (!pp->has_irq)
- dev_warn(dev, "no irq for port%d\n", pp->idx);
+ if (pp->idx == 0)
+ dwapb_get_irq(dev, fwnode, pp);
}
return pdata;
@@ -689,29 +669,24 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio->regs))
return PTR_ERR(gpio->regs);
- /* Optional bus clock */
- gpio->clk = devm_clk_get(&pdev->dev, "bus");
- if (!IS_ERR(gpio->clk)) {
- err = clk_prepare_enable(gpio->clk);
- if (err) {
- dev_info(&pdev->dev, "Cannot enable clock\n");
- return err;
- }
+ /* Optional bus and debounce clocks */
+ gpio->clks[0].id = "bus";
+ gpio->clks[1].id = "db";
+ err = devm_clk_bulk_get_optional(&pdev->dev, DWAPB_NR_CLOCKS,
+ gpio->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get APB/Debounce clocks\n");
+ return err;
}
- gpio->flags = 0;
- if (dev->of_node) {
- gpio->flags = (uintptr_t)of_device_get_match_data(dev);
- } else if (has_acpi_companion(dev)) {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(dwapb_acpi_match, dev);
- if (acpi_id) {
- if (acpi_id->driver_data)
- gpio->flags = acpi_id->driver_data;
- }
+ err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable APB/Debounce clocks\n");
+ return err;
}
+ gpio->flags = (uintptr_t)device_get_match_data(dev);
+
for (i = 0; i < gpio->nr_ports; i++) {
err = dwapb_gpio_add_port(gpio, &pdata->properties[i], i);
if (err)
@@ -724,7 +699,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
out_unregister:
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
- clk_disable_unprepare(gpio->clk);
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return err;
}
@@ -736,7 +711,7 @@ static int dwapb_gpio_remove(struct platform_device *pdev)
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
reset_control_assert(gpio->rst);
- clk_disable_unprepare(gpio->clk);
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return 0;
}
@@ -755,8 +730,6 @@ static int dwapb_gpio_suspend(struct device *dev)
unsigned int idx = gpio->ports[i].idx;
struct dwapb_context *ctx = gpio->ports[i].ctx;
- BUG_ON(!ctx);
-
offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
ctx->dir = dwapb_read(gpio, offset);
@@ -775,13 +748,12 @@ static int dwapb_gpio_suspend(struct device *dev)
ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
/* Mask out interrupts */
- dwapb_write(gpio, GPIO_INTMASK,
- 0xffffffff & ~ctx->wake_en);
+ dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
}
}
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
- clk_disable_unprepare(gpio->clk);
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return 0;
}
@@ -791,10 +763,13 @@ static int dwapb_gpio_resume(struct device *dev)
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
struct gpio_chip *gc = &gpio->ports[0].gc;
unsigned long flags;
- int i;
+ int i, err;
- if (!IS_ERR(gpio->clk))
- clk_prepare_enable(gpio->clk);
+ err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
+ if (err) {
+ dev_err(gpio->dev, "Cannot reenable APB/Debounce clocks\n");
+ return err;
+ }
spin_lock_irqsave(&gc->bgpio_lock, flags);
for (i = 0; i < gpio->nr_ports; i++) {
@@ -802,8 +777,6 @@ static int dwapb_gpio_resume(struct device *dev)
unsigned int idx = gpio->ports[i].idx;
struct dwapb_context *ctx = gpio->ports[i].ctx;
- BUG_ON(!ctx);
-
offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
dwapb_write(gpio, offset, ctx->data);
@@ -836,10 +809,10 @@ static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend,
static struct platform_driver dwapb_gpio_driver = {
.driver = {
- .name = "gpio-dwapb",
+ .name = DWAPB_DRIVER_NAME,
.pm = &dwapb_gpio_pm_ops,
- .of_match_table = of_match_ptr(dwapb_of_match),
- .acpi_match_table = ACPI_PTR(dwapb_acpi_match),
+ .of_match_table = dwapb_of_match,
+ .acpi_match_table = dwapb_acpi_match,
},
.probe = dwapb_gpio_probe,
.remove = dwapb_gpio_remove,
@@ -850,3 +823,4 @@ module_platform_driver(dwapb_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver");
+MODULE_ALIAS("platform:" DWAPB_DRIVER_NAME);
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index da1ef0b1c291..b1accfba017d 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev)
mutex_init(&exar_gpio->lock);
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
- if (index < 0)
- goto err_destroy;
+ if (index < 0) {
+ ret = index;
+ goto err_mutex_destroy;
+ }
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
@@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
err_destroy:
ida_simple_remove(&ida_index, index);
+err_mutex_destroy:
mutex_destroy(&exar_gpio->lock);
return ret;
}
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index cadd02993539..18a3147f5a42 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -36,9 +36,19 @@
#define SIO_F71889A_ID 0x1005 /* F71889A chipset ID */
#define SIO_F81866_ID 0x1010 /* F81866 chipset ID */
#define SIO_F81804_ID 0x1502 /* F81804 chipset ID, same for f81966 */
-
-
-enum chips { f71869, f71869a, f71882fg, f71889a, f71889f, f81866, f81804 };
+#define SIO_F81865_ID 0x0704 /* F81865 chipset ID */
+
+
+enum chips {
+ f71869,
+ f71869a,
+ f71882fg,
+ f71889a,
+ f71889f,
+ f81866,
+ f81804,
+ f81865,
+};
static const char * const f7188x_names[] = {
"f71869",
@@ -48,6 +58,7 @@ static const char * const f7188x_names[] = {
"f71889f",
"f81866",
"f81804",
+ "f81865",
};
struct f7188x_sio {
@@ -233,6 +244,15 @@ static struct f7188x_gpio_bank f81804_gpio_bank[] = {
F7188X_GPIO_BANK(90, 8, 0x98),
};
+static struct f7188x_gpio_bank f81865_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 8, 0xF0),
+ F7188X_GPIO_BANK(10, 8, 0xE0),
+ F7188X_GPIO_BANK(20, 8, 0xD0),
+ F7188X_GPIO_BANK(30, 8, 0xC0),
+ F7188X_GPIO_BANK(40, 8, 0xB0),
+ F7188X_GPIO_BANK(50, 8, 0xA0),
+ F7188X_GPIO_BANK(60, 5, 0x90),
+};
static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
@@ -425,6 +445,10 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
data->nr_bank = ARRAY_SIZE(f81804_gpio_bank);
data->bank = f81804_gpio_bank;
break;
+ case f81865:
+ data->nr_bank = ARRAY_SIZE(f81865_gpio_bank);
+ data->bank = f81865_gpio_bank;
+ break;
default:
return -ENODEV;
}
@@ -490,6 +514,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
case SIO_F81804_ID:
sio->type = f81804;
break;
+ case SIO_F81865_ID:
+ sio->type = f81865;
+ break;
default:
pr_info(DRVNAME ": Unsupported Fintek device 0x%04x\n", devid);
goto err;
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index fbddb1662428..4031164780f7 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -193,7 +193,7 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
if (val == deb_div) {
/*
* The debounce timer happens to already be set to the
- * desireable value, what a coincidence! We can just enable
+ * desirable value, what a coincidence! We can just enable
* debounce on this GPIO line and return. This happens more
* often than you think, for example when all GPIO keys
* on a system are requesting the same debounce interval.
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 2f086d0aa1f4..9960bb8b0f5b 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -89,7 +89,7 @@ static struct {
struct device *dev;
struct gpio_chip chip;
struct resource *gpio_base; /* GPIO IO base */
- struct resource *pm_base; /* Power Mangagment IO base */
+ struct resource *pm_base; /* Power Management IO base */
struct ichx_desc *desc; /* Pointer to chipset-specific description */
u32 orig_gpio_ctrl; /* Orig CTRL value, used to restore on exit */
u8 use_gpio; /* Which GPIO groups are usable */
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 1e1935c51096..b8c1fe20f49a 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -47,7 +47,7 @@
static int max7301_direction_input(struct gpio_chip *chip, unsigned offset)
{
- struct max7301 *ts = gpiochip_get_data(chip);
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
u8 *config;
u8 offset_bits, pin_config;
int ret;
@@ -89,7 +89,7 @@ static int __max7301_set(struct max7301 *ts, unsigned offset, int value)
static int max7301_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
- struct max7301 *ts = gpiochip_get_data(chip);
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
u8 *config;
u8 offset_bits;
int ret;
@@ -189,10 +189,6 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.parent = dev;
ts->chip.owner = THIS_MODULE;
- ret = gpiochip_add_data(&ts->chip, ts);
- if (ret)
- goto exit_destroy;
-
/*
* initialize pullups according to platform data and cache the
* register values for later use.
@@ -214,7 +210,9 @@ int __max730x_probe(struct max7301 *ts)
}
}
- return ret;
+ ret = gpiochip_add_data(&ts->chip, ts);
+ if (!ret)
+ return ret;
exit_destroy:
mutex_destroy(&ts->lock);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 501e89548f53..37c5363e391e 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -145,7 +145,9 @@ static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
for (index = 0;; index++) {
irq = platform_get_irq(to_platform_device(gc->parent), index);
- if (irq <= 0)
+ if (irq < 0)
+ return irq;
+ if (irq == 0)
break;
if (irq_get_irq_data(irq)->hwirq == offset)
return irq;
@@ -168,15 +170,13 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gchip->base))
return PTR_ERR(gchip->base);
- if (!has_acpi_companion(&pdev->dev)) {
- gchip->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(gchip->clk))
- return PTR_ERR(gchip->clk);
+ gchip->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(gchip->clk))
+ return PTR_ERR(gchip->clk);
- ret = clk_prepare_enable(gchip->clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(gchip->clk);
+ if (ret)
+ return ret;
spin_lock_init(&gchip->lock);
@@ -186,15 +186,13 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
gchip->gc.set = mb86s70_gpio_set;
+ gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
gchip->gc.owner = THIS_MODULE;
gchip->gc.parent = &pdev->dev;
gchip->gc.base = -1;
- if (has_acpi_companion(&pdev->dev))
- gchip->gc.to_irq = mb86s70_gpio_to_irq;
-
ret = gpiochip_add_data(&gchip->gc, gchip);
if (ret) {
dev_err(&pdev->dev, "couldn't register gpio driver\n");
@@ -202,8 +200,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
return ret;
}
- if (has_acpi_companion(&pdev->dev))
- acpi_gpiochip_request_interrupts(&gchip->gc);
+ acpi_gpiochip_request_interrupts(&gchip->gc);
return 0;
}
@@ -212,8 +209,7 @@ static int mb86s70_gpio_remove(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip = platform_get_drvdata(pdev);
- if (has_acpi_companion(&pdev->dev))
- acpi_gpiochip_free_interrupts(&gchip->gc);
+ acpi_gpiochip_free_interrupts(&gchip->gc);
gpiochip_remove(&gchip->gc);
clk_disable_unprepare(gchip->clk);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 48918a016cd8..706687fab634 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -443,8 +443,8 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
base = pcim_iomap_table(pdev)[1];
- irq_base = readl(base);
- gpio_base = readl(sizeof(u32) + base);
+ irq_base = readl(base + 0 * sizeof(u32));
+ gpio_base = readl(base + 1 * sizeof(u32));
/* Release the IO mapping, since we already get the info from BAR1 */
pcim_iounmap_regions(pdev, BIT(1));
@@ -473,6 +473,10 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
raw_spin_lock_init(&priv->lock);
+ retval = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (retval < 0)
+ return retval;
+
girq = &priv->chip.irq;
girq->chip = &mrfld_irqchip;
girq->init_hw = mrfld_irq_init_hw;
@@ -482,7 +486,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
- girq->parents[0] = pdev->irq;
+ girq->parents[0] = pci_irq_vector(pdev, 0);
girq->first = irq_base;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 7b7085050219..94d5efce1721 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -14,7 +14,6 @@
#include <linux/resource.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/version.h>
/*
* There are 3 YU GPIO blocks:
@@ -110,8 +109,8 @@ static int mlxbf2_gpio_get_lock_res(struct platform_device *pdev)
}
yu_arm_gpio_lock_param.io = devm_ioremap(dev, res->start, size);
- if (IS_ERR(yu_arm_gpio_lock_param.io))
- ret = PTR_ERR(yu_arm_gpio_lock_param.io);
+ if (!yu_arm_gpio_lock_param.io)
+ ret = -ENOMEM;
exit:
mutex_unlock(yu_arm_gpio_lock_param.lock);
@@ -127,8 +126,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
{
u32 arm_gpio_lock_val;
- spin_lock(&gs->gc.bgpio_lock);
mutex_lock(yu_arm_gpio_lock_param.lock);
+ spin_lock(&gs->gc.bgpio_lock);
arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
@@ -136,8 +135,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
* When lock active bit[31] is set, ModeX is write enabled
*/
if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
- mutex_unlock(yu_arm_gpio_lock_param.lock);
spin_unlock(&gs->gc.bgpio_lock);
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
return -EINVAL;
}
@@ -152,8 +151,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
{
writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
- mutex_unlock(yu_arm_gpio_lock_param.lock);
spin_unlock(&gs->gc.bgpio_lock);
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
}
/*
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index f460d71b0c92..538e31fe8903 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -36,7 +36,7 @@ struct ltq_mm {
* @chip: Pointer to our private data structure.
*
* Write the shadow value to the EBU to set the gpios. We need to set the
- * global EBU lock to make sure that PCI/MTD dont break.
+ * global EBU lock to make sure that PCI/MTD don't break.
*/
static void ltq_mm_apply(struct ltq_mm *chip)
{
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 3eb94f3740d1..bc345185db26 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irq_sim.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -48,7 +49,7 @@ struct gpio_mockup_line_status {
struct gpio_mockup_chip {
struct gpio_chip gc;
struct gpio_mockup_line_status *lines;
- struct irq_sim irqsim;
+ struct irq_domain *irq_sim_domain;
struct dentry *dbg_dir;
struct mutex lock;
};
@@ -144,14 +145,12 @@ static void gpio_mockup_set_multiple(struct gpio_chip *gc,
static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
unsigned int offset, int value)
{
+ int curr, irq, irq_type, ret = 0;
struct gpio_desc *desc;
struct gpio_chip *gc;
- struct irq_sim *sim;
- int curr, irq, irq_type;
gc = &chip->gc;
desc = &gc->gpiodev->descs[offset];
- sim = &chip->irqsim;
mutex_lock(&chip->lock);
@@ -161,14 +160,28 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
if (curr == value)
goto out;
- irq = irq_sim_irqnum(sim, offset);
+ irq = irq_find_mapping(chip->irq_sim_domain, offset);
+ if (!irq)
+ /*
+ * This is fine - it just means, nobody is listening
+ * for interrupts on this line, otherwise
+ * irq_create_mapping() would have been called from
+ * the to_irq() callback.
+ */
+ goto set_value;
+
irq_type = irq_get_trigger_type(irq);
if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
- (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
- irq_sim_fire(sim, offset);
+ (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) {
+ ret = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING,
+ true);
+ if (ret)
+ goto out;
+ }
}
+set_value:
/* Change the value unless we're actively driving the line. */
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
!test_bit(FLAG_IS_OUT, &desc->flags))
@@ -177,7 +190,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
out:
chip->lines[offset].pull = value;
mutex_unlock(&chip->lock);
- return 0;
+ return ret;
}
static int gpio_mockup_set_config(struct gpio_chip *gc,
@@ -236,7 +249,7 @@ static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
- return irq_sim_irqnum(&chip->irqsim, offset);
+ return irq_create_mapping(chip->irq_sim_domain, offset);
}
static void gpio_mockup_free(struct gpio_chip *gc, unsigned int offset)
@@ -389,6 +402,19 @@ static int gpio_mockup_name_lines(struct device *dev,
return 0;
}
+static void gpio_mockup_dispose_mappings(void *data)
+{
+ struct gpio_mockup_chip *chip = data;
+ struct gpio_chip *gc = &chip->gc;
+ int i, irq;
+
+ for (i = 0; i < gc->ngpio; i++) {
+ irq = irq_find_mapping(chip->irq_sim_domain, i);
+ if (irq)
+ irq_dispose_mapping(irq);
+ }
+}
+
static int gpio_mockup_probe(struct platform_device *pdev)
{
struct gpio_mockup_chip *chip;
@@ -456,8 +482,13 @@ static int gpio_mockup_probe(struct platform_device *pdev)
return rv;
}
- rv = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
- if (rv < 0)
+ chip->irq_sim_domain = devm_irq_domain_create_sim(dev, NULL,
+ gc->ngpio);
+ if (IS_ERR(chip->irq_sim_domain))
+ return PTR_ERR(chip->irq_sim_domain);
+
+ rv = devm_add_action_or_reset(dev, gpio_mockup_dispose_mappings, chip);
+ if (rv)
return rv;
rv = devm_gpiochip_add_data(dev, &chip->gc, chip);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 3c9f4fb3d5a2..bd65114eb170 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -782,6 +782,15 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
"marvell,armada-370-gpio"))
return 0;
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+ * for the first two GPIO chips. So if the resource is missing
+ * we can't treat it as an error.
+ */
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
+ return 0;
+
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
@@ -804,12 +813,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
- /*
- * There are only two sets of PWM configuration registers for
- * all the GPIO lines on those SoCs which this driver reserves
- * for the first two GPIO chips. So if the resource is missing
- * we can't treat it as an error.
- */
mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
if (IS_ERR(mvpwm->membase))
return PTR_ERR(mvpwm->membase);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 4269ea9a817e..1fca8dd7824f 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -306,37 +306,39 @@ static const struct regmap_config pca953x_i2c_regmap = {
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
+ .disable_locking = true,
.cache_type = REGCACHE_RBTREE,
- /* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */
- .max_register = 0xff,
+ .max_register = 0x7f,
};
-static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,
- bool write, bool addrinc)
+static const struct regmap_config pca953x_ai_i2c_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .read_flag_mask = REG_ADDR_AI,
+ .write_flag_mask = REG_ADDR_AI,
+
+ .readable_reg = pca953x_readable_register,
+ .writeable_reg = pca953x_writeable_register,
+ .volatile_reg = pca953x_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 0x7f,
+};
+
+static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off)
{
int bank_shift = pca953x_bank_shift(chip);
int addr = (reg & PCAL_GPIO_MASK) << bank_shift;
int pinctrl = (reg & PCAL_PINCTRL_MASK) << 1;
u8 regaddr = pinctrl | addr | (off / BANK_SZ);
- /* Single byte read doesn't need AI bit set. */
- if (!addrinc)
- return regaddr;
-
- /* Chips with 24 and more GPIOs always support Auto Increment */
- if (write && NBANK(chip) > 2)
- regaddr |= REG_ADDR_AI;
-
- /* PCA9575 needs address-increment on multi-byte writes */
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE)
- regaddr |= REG_ADDR_AI;
-
return regaddr;
}
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0, true, true);
+ u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -354,7 +356,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long
static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0, false, true);
+ u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -373,8 +375,7 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off,
- true, false);
+ u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -388,10 +389,8 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off,
- true, false);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off,
- true, false);
+ u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
+ u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -411,8 +410,7 @@ exit:
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 inreg = pca953x_recalc_addr(chip, chip->regs->input, off,
- true, false);
+ u8 inreg = pca953x_recalc_addr(chip, chip->regs->input, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -436,8 +434,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off,
- true, false);
+ u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
mutex_lock(&chip->i2c_lock);
@@ -448,8 +445,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off,
- true, false);
+ u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -466,6 +462,23 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
return GPIO_LINE_DIRECTION_OUT;
}
+static int pca953x_gpio_get_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct pca953x_chip *chip = gpiochip_get_data(gc);
+ DECLARE_BITMAP(reg_val, MAX_LINE);
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+ ret = pca953x_read_regs(chip, chip->regs->input, reg_val);
+ mutex_unlock(&chip->i2c_lock);
+ if (ret)
+ return ret;
+
+ bitmap_replace(bits, bits, reg_val, mask, gc->ngpio);
+ return 0;
+}
+
static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
@@ -489,10 +502,8 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
unsigned int offset,
unsigned long config)
{
- u8 pull_en_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_EN, offset,
- true, false);
- u8 pull_sel_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_SEL, offset,
- true, false);
+ u8 pull_en_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_EN, offset);
+ u8 pull_sel_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_SEL, offset);
u8 bit = BIT(offset % BANK_SZ);
int ret;
@@ -551,6 +562,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->get = pca953x_gpio_get_value;
gc->set = pca953x_gpio_set_value;
gc->get_direction = pca953x_gpio_get_direction;
+ gc->get_multiple = pca953x_gpio_get_multiple;
gc->set_multiple = pca953x_gpio_set_multiple;
gc->set_config = pca953x_gpio_set_config;
gc->can_sleep = true;
@@ -863,6 +875,7 @@ static int pca953x_probe(struct i2c_client *client,
int ret;
u32 invert = 0;
struct regulator *reg;
+ const struct regmap_config *regmap_config;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -925,7 +938,17 @@ static int pca953x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- chip->regmap = devm_regmap_init_i2c(client, &pca953x_i2c_regmap);
+ pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
+
+ if (NBANK(chip) > 2 || PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ dev_info(&client->dev, "using AI\n");
+ regmap_config = &pca953x_ai_i2c_regmap;
+ } else {
+ dev_info(&client->dev, "using no AI\n");
+ regmap_config = &pca953x_i2c_regmap;
+ }
+
+ chip->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
goto err_exit;
@@ -956,7 +979,6 @@ static int pca953x_probe(struct i2c_client *client,
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) {
chip->regs = &pca953x_regs;
@@ -1154,7 +1176,7 @@ static struct i2c_driver pca953x_driver = {
.name = "pca953x",
.pm = &pca953x_pm_ops,
.of_match_table = pca953x_dt_ids,
- .acpi_match_table = ACPI_PTR(pca953x_acpi_ids),
+ .acpi_match_table = pca953x_acpi_ids,
},
.probe = pca953x_probe,
.remove = pca953x_remove,
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 3f3d9a94b709..e96d28bf43b4 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
+#include <linux/bits.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -11,11 +12,11 @@
#include <linux/slab.h>
#define PCH_EDGE_FALLING 0
-#define PCH_EDGE_RISING BIT(0)
-#define PCH_LEVEL_L BIT(1)
-#define PCH_LEVEL_H (BIT(0) | BIT(1))
-#define PCH_EDGE_BOTH BIT(2)
-#define PCH_IM_MASK (BIT(0) | BIT(1) | BIT(2))
+#define PCH_EDGE_RISING 1
+#define PCH_LEVEL_L 2
+#define PCH_LEVEL_H 3
+#define PCH_EDGE_BOTH 4
+#define PCH_IM_MASK GENMASK(2, 0)
#define PCH_IRQ_BASE 24
@@ -103,9 +104,9 @@ static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
spin_lock_irqsave(&chip->spinlock, flags);
reg_val = ioread32(&chip->reg->po);
if (val)
- reg_val |= (1 << nr);
+ reg_val |= BIT(nr);
else
- reg_val &= ~(1 << nr);
+ reg_val &= ~BIT(nr);
iowrite32(reg_val, &chip->reg->po);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -115,7 +116,7 @@ static int pch_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct pch_gpio *chip = gpiochip_get_data(gpio);
- return (ioread32(&chip->reg->pi) >> nr) & 1;
+ return !!(ioread32(&chip->reg->pi) & BIT(nr));
}
static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
@@ -130,13 +131,14 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
reg_val = ioread32(&chip->reg->po);
if (val)
- reg_val |= (1 << nr);
+ reg_val |= BIT(nr);
else
- reg_val &= ~(1 << nr);
+ reg_val &= ~BIT(nr);
iowrite32(reg_val, &chip->reg->po);
- pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
- pm |= (1 << nr);
+ pm = ioread32(&chip->reg->pm);
+ pm &= BIT(gpio_pins[chip->ioh]) - 1;
+ pm |= BIT(nr);
iowrite32(pm, &chip->reg->pm);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -151,8 +153,9 @@ static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
- pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
- pm &= ~(1 << nr);
+ pm = ioread32(&chip->reg->pm);
+ pm &= BIT(gpio_pins[chip->ioh]) - 1;
+ pm &= ~BIT(nr);
iowrite32(pm, &chip->reg->pm);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -226,17 +229,15 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
int ch, irq = d->irq;
ch = irq - chip->irq_base;
- if (irq <= chip->irq_base + 7) {
+ if (irq < chip->irq_base + 8) {
im_reg = &chip->reg->im0;
- im_pos = ch;
+ im_pos = ch - 0;
} else {
im_reg = &chip->reg->im1;
im_pos = ch - 8;
}
dev_dbg(chip->dev, "irq=%d type=%d ch=%d pos=%d\n", irq, type, ch, im_pos);
- spin_lock_irqsave(&chip->spinlock, flags);
-
switch (type) {
case IRQ_TYPE_EDGE_RISING:
val = PCH_EDGE_RISING;
@@ -254,20 +255,21 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
val = PCH_LEVEL_L;
break;
default:
- goto unlock;
+ return 0;
}
+ spin_lock_irqsave(&chip->spinlock, flags);
+
/* Set interrupt mode */
im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
iowrite32(im | (val << (im_pos * 4)), im_reg);
/* And the handler */
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ else if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_handler_locked(d, handle_edge_irq);
-unlock:
spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
}
@@ -277,7 +279,7 @@ static void pch_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imaskclr);
+ iowrite32(BIT(d->irq - chip->irq_base), &chip->reg->imaskclr);
}
static void pch_irq_mask(struct irq_data *d)
@@ -285,7 +287,7 @@ static void pch_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
+ iowrite32(BIT(d->irq - chip->irq_base), &chip->reg->imask);
}
static void pch_irq_ack(struct irq_data *d)
@@ -293,21 +295,22 @@ static void pch_irq_ack(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr);
+ iowrite32(BIT(d->irq - chip->irq_base), &chip->reg->iclr);
}
static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
{
struct pch_gpio *chip = dev_id;
unsigned long reg_val = ioread32(&chip->reg->istatus);
- int i, ret = IRQ_NONE;
+ int i;
+
+ dev_dbg(chip->dev, "irq=%d status=0x%lx\n", irq, reg_val);
- for_each_set_bit(i, &reg_val, gpio_pins[chip->ioh]) {
- dev_dbg(chip->dev, "[%d]:irq=%d status=0x%lx\n", i, irq, reg_val);
+ reg_val &= BIT(gpio_pins[chip->ioh]) - 1;
+ for_each_set_bit(i, &reg_val, gpio_pins[chip->ioh])
generic_handle_irq(chip->irq_base + i);
- ret = IRQ_HANDLED;
- }
- return ret;
+
+ return IRQ_RETVAL(reg_val);
}
static int pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
@@ -344,7 +347,6 @@ static int pch_gpio_probe(struct pci_dev *pdev,
s32 ret;
struct pch_gpio *chip;
int irq_base;
- u32 msk;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -357,7 +359,7 @@ static int pch_gpio_probe(struct pci_dev *pdev,
return ret;
}
- ret = pcim_iomap_regions(pdev, 1 << 1, KBUILD_MODNAME);
+ ret = pcim_iomap_regions(pdev, BIT(1), KBUILD_MODNAME);
if (ret) {
dev_err(&pdev->dev, "pci_request_regions FAILED-%d", ret);
return ret;
@@ -393,9 +395,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
chip->irq_base = irq_base;
/* Mask all interrupts, but enable them */
- msk = (1 << gpio_pins[chip->ioh]) - 1;
- iowrite32(msk, &chip->reg->imask);
- iowrite32(msk, &chip->reg->ien);
+ iowrite32(BIT(gpio_pins[chip->ioh]) - 1, &chip->reg->imask);
+ iowrite32(BIT(gpio_pins[chip->ioh]) - 1, &chip->reg->ien);
ret = devm_request_irq(&pdev->dev, pdev->irq, pch_gpio_handler,
IRQF_SHARED, KBUILD_MODNAME, chip);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index e241fb884c12..f1b53dd1df1a 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/device.h>
@@ -408,6 +409,7 @@ static const struct amba_id pl061_ids[] = {
},
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl061_ids);
static struct amba_driver pl061_gpio_driver = {
.drv = {
@@ -419,9 +421,6 @@ static struct amba_driver pl061_gpio_driver = {
.id_table = pl061_ids,
.probe = pl061_probe,
};
+module_amba_driver(pl061_gpio_driver);
-static int __init pl061_gpio_init(void)
-{
- return amba_driver_register(&pl061_gpio_driver);
-}
-device_initcall(pl061_gpio_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 1361270ecf8c..0cb6600b8eee 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -660,8 +660,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
pchip->irq1 = irq1;
gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (!gpio_reg_base)
- return -EINVAL;
+ if (IS_ERR(gpio_reg_base))
+ return PTR_ERR(gpio_reg_base);
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 7284473c9fe3..eac1582c70da 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -250,8 +250,10 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
int error;
error = pm_runtime_get_sync(p->dev);
- if (error < 0)
+ if (error < 0) {
+ pm_runtime_put(p->dev);
return error;
+ }
error = pinctrl_gpio_request(chip->base + offset);
if (error)
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
new file mode 100644
index 000000000000..5412cb3b0b2a
--- /dev/null
+++ b/drivers/gpio/gpio-regmap.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * regmap based generic GPIO driver
+ *
+ * Copyright 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+struct gpio_regmap {
+ struct device *parent;
+ struct regmap *regmap;
+ struct gpio_chip gpio_chip;
+
+ int reg_stride;
+ int ngpio_per_reg;
+ unsigned int reg_dat_base;
+ unsigned int reg_set_base;
+ unsigned int reg_clr_base;
+ unsigned int reg_dir_in_base;
+ unsigned int reg_dir_out_base;
+
+ int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask);
+
+ void *driver_data;
+};
+
+static unsigned int gpio_regmap_addr(unsigned int addr)
+{
+ if (addr == GPIO_REGMAP_ADDR_ZERO)
+ return 0;
+
+ return addr;
+}
+
+static int gpio_regmap_simple_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ unsigned int line = offset % gpio->ngpio_per_reg;
+ unsigned int stride = offset / gpio->ngpio_per_reg;
+
+ *reg = base + stride * gpio->reg_stride;
+ *mask = BIT(line);
+
+ return 0;
+}
+
+static int gpio_regmap_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, val, reg, mask;
+ int ret;
+
+ /* we might not have an output register if we are input only */
+ if (gpio->reg_dat_base)
+ base = gpio_regmap_addr(gpio->reg_dat_base);
+ else
+ base = gpio_regmap_addr(gpio->reg_set_base);
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(gpio->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & mask);
+}
+
+static void gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base = gpio_regmap_addr(gpio->reg_set_base);
+ unsigned int reg, mask;
+
+ gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (val)
+ regmap_update_bits(gpio->regmap, reg, mask, mask);
+ else
+ regmap_update_bits(gpio->regmap, reg, mask, 0);
+}
+
+static void gpio_regmap_set_with_clear(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, reg, mask;
+
+ if (val)
+ base = gpio_regmap_addr(gpio->reg_set_base);
+ else
+ base = gpio_regmap_addr(gpio->reg_clr_base);
+
+ gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ regmap_write(gpio->regmap, reg, mask);
+}
+
+static int gpio_regmap_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, val, reg, mask;
+ int invert, ret;
+
+ if (gpio->reg_dir_out_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_out_base);
+ invert = 0;
+ } else if (gpio->reg_dir_in_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_in_base);
+ invert = 1;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(gpio->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ if (!!(val & mask) ^ invert)
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int gpio_regmap_set_direction(struct gpio_chip *chip,
+ unsigned int offset, bool output)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, val, reg, mask;
+ int invert, ret;
+
+ if (gpio->reg_dir_out_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_out_base);
+ invert = 0;
+ } else if (gpio->reg_dir_in_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_in_base);
+ invert = 1;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ if (invert)
+ val = output ? 0 : mask;
+ else
+ val = output ? mask : 0;
+
+ return regmap_update_bits(gpio->regmap, reg, mask, val);
+}
+
+static int gpio_regmap_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return gpio_regmap_set_direction(chip, offset, false);
+}
+
+static int gpio_regmap_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ gpio_regmap_set(chip, offset, value);
+
+ return gpio_regmap_set_direction(chip, offset, true);
+}
+
+void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data)
+{
+ gpio->driver_data = data;
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_set_drvdata);
+
+void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio)
+{
+ return gpio->driver_data;
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata);
+
+/**
+ * gpio_regmap_register() - Register a generic regmap GPIO controller
+ * @config: configuration for gpio_regmap
+ *
+ * Return: A pointer to the registered gpio_regmap or ERR_PTR error value.
+ */
+struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config)
+{
+ struct gpio_regmap *gpio;
+ struct gpio_chip *chip;
+ int ret;
+
+ if (!config->parent)
+ return ERR_PTR(-EINVAL);
+
+ if (!config->ngpio)
+ return ERR_PTR(-EINVAL);
+
+ /* we need at least one */
+ if (!config->reg_dat_base && !config->reg_set_base)
+ return ERR_PTR(-EINVAL);
+
+ /* if we have a direction register we need both input and output */
+ if ((config->reg_dir_out_base || config->reg_dir_in_base) &&
+ (!config->reg_dat_base || !config->reg_set_base))
+ return ERR_PTR(-EINVAL);
+
+ /* we don't support having both registers simultaneously for now */
+ if (config->reg_dir_out_base && config->reg_dir_in_base)
+ return ERR_PTR(-EINVAL);
+
+ gpio = kzalloc(sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return ERR_PTR(-ENOMEM);
+
+ gpio->parent = config->parent;
+ gpio->regmap = config->regmap;
+ gpio->ngpio_per_reg = config->ngpio_per_reg;
+ gpio->reg_stride = config->reg_stride;
+ gpio->reg_mask_xlate = config->reg_mask_xlate;
+ gpio->reg_dat_base = config->reg_dat_base;
+ gpio->reg_set_base = config->reg_set_base;
+ gpio->reg_clr_base = config->reg_clr_base;
+ gpio->reg_dir_in_base = config->reg_dir_in_base;
+ gpio->reg_dir_out_base = config->reg_dir_out_base;
+
+ /* if not set, assume there is only one register */
+ if (!gpio->ngpio_per_reg)
+ gpio->ngpio_per_reg = config->ngpio;
+
+ /* if not set, assume they are consecutive */
+ if (!gpio->reg_stride)
+ gpio->reg_stride = 1;
+
+ if (!gpio->reg_mask_xlate)
+ gpio->reg_mask_xlate = gpio_regmap_simple_xlate;
+
+ chip = &gpio->gpio_chip;
+ chip->parent = config->parent;
+ chip->base = -1;
+ chip->ngpio = config->ngpio;
+ chip->names = config->names;
+ chip->label = config->label ?: dev_name(config->parent);
+
+ /*
+ * If our regmap is fast_io we should probably set can_sleep to false.
+ * Right now, the regmap doesn't save this property, nor is there any
+ * access function for it.
+ * The only regmap type which uses fast_io is regmap-mmio. For now,
+ * assume a safe default of true here.
+ */
+ chip->can_sleep = true;
+
+ chip->get = gpio_regmap_get;
+ if (gpio->reg_set_base && gpio->reg_clr_base)
+ chip->set = gpio_regmap_set_with_clear;
+ else if (gpio->reg_set_base)
+ chip->set = gpio_regmap_set;
+
+ if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
+ chip->get_direction = gpio_regmap_get_direction;
+ chip->direction_input = gpio_regmap_direction_input;
+ chip->direction_output = gpio_regmap_direction_output;
+ }
+
+ ret = gpiochip_add_data(chip, gpio);
+ if (ret < 0)
+ goto err_free_gpio;
+
+ if (config->irq_domain) {
+ ret = gpiochip_irqchip_add_domain(chip, config->irq_domain);
+ if (ret)
+ goto err_remove_gpiochip;
+ }
+
+ return gpio;
+
+err_remove_gpiochip:
+ gpiochip_remove(chip);
+err_free_gpio:
+ kfree(gpio);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_register);
+
+/**
+ * gpio_regmap_unregister() - Unregister a generic regmap GPIO controller
+ * @gpio: gpio_regmap device to unregister
+ */
+void gpio_regmap_unregister(struct gpio_regmap *gpio)
+{
+ gpiochip_remove(&gpio->gpio_chip);
+ kfree(gpio);
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
+
+static void devm_gpio_regmap_unregister(struct device *dev, void *res)
+{
+ gpio_regmap_unregister(*(struct gpio_regmap **)res);
+}
+
+/**
+ * devm_gpio_regmap_register() - resource managed gpio_regmap_register()
+ * @dev: device that is registering this GPIO device
+ * @config: configuration for gpio_regmap
+ *
+ * Managed gpio_regmap_register(). For generic regmap GPIO device registered by
+ * this function, gpio_regmap_unregister() is automatically called on driver
+ * detach. See gpio_regmap_register() for more information.
+ *
+ * Return: A pointer to the registered gpio_regmap or ERR_PTR error value.
+ */
+struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
+ const struct gpio_regmap_config *config)
+{
+ struct gpio_regmap **ptr, *gpio;
+
+ ptr = devres_alloc(devm_gpio_regmap_unregister, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ gpio = gpio_regmap_register(config);
+ if (!IS_ERR(gpio)) {
+ *ptr = gpio;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return gpio;
+}
+EXPORT_SYMBOL_GPL(devm_gpio_regmap_register);
+
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_DESCRIPTION("GPIO generic regmap driver core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 79b553dc39a3..178e9128ded0 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -894,6 +894,7 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, tegra186_gpio_of_match);
static struct platform_driver tegra186_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 25d86441666e..a809609ee957 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/driver.h>
#include <linux/acpi.h>
@@ -122,7 +122,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
fwspec.fwnode = gc->parent->fwnode;
fwspec.param_count = 2;
fwspec.param[0] = GPIO_TO_HWIRQ(priv, gpio);
- fwspec.param[1] = IRQ_TYPE_NONE;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
return irq_create_fwspec_mapping(&fwspec);
}
@@ -290,10 +290,8 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
- if (priv->nirq > 0) {
- /* Register interrupt handlers for gpio signaled acpi events */
- acpi_gpiochip_request_interrupts(&priv->gc);
- }
+ /* Register interrupt handlers for GPIO signaled ACPI Events */
+ acpi_gpiochip_request_interrupts(&priv->gc);
return ret;
}
@@ -302,9 +300,7 @@ static int xgene_gpio_sb_remove(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv = platform_get_drvdata(pdev);
- if (priv->nirq > 0) {
- acpi_gpiochip_free_interrupts(&priv->gc);
- }
+ acpi_gpiochip_free_interrupts(&priv->gc);
irq_domain_remove(priv->irq_domain);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 0017367e94ee..9276051663da 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1353,7 +1353,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
}
/* Run deferred acpi_gpiochip_request_irqs() */
-static int acpi_gpio_handle_deferred_request_irqs(void)
+static int __init acpi_gpio_handle_deferred_request_irqs(void)
{
struct acpi_gpio_chip *acpi_gpio, *tmp;
@@ -1371,7 +1371,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-static const struct dmi_system_id gpiolib_acpi_quirks[] = {
+static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
{
/*
* The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
@@ -1455,7 +1455,7 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
{} /* Terminating entry */
};
-static int acpi_gpio_setup_params(void)
+static int __init acpi_gpio_setup_params(void)
{
const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
const struct dmi_system_id *id;
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index 53781b253986..26741032fa9e 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -37,8 +37,11 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip,
if (count < 0)
return;
- if (count > gdev->ngpio)
+ if (count > gdev->ngpio) {
+ dev_warn(&gdev->dev, "gpio-line-names is length %d but should be at most length %d",
+ count, gdev->ngpio);
count = gdev->ngpio;
+ }
names = kcalloc(count, sizeof(*names), GFP_KERNEL);
if (!names)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ccc449df3792..219eb0054233 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -344,6 +344,12 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
if (transitory)
lflags |= GPIO_TRANSITORY;
+ if (flags & OF_GPIO_PULL_UP)
+ lflags |= GPIO_PULL_UP;
+
+ if (flags & OF_GPIO_PULL_DOWN)
+ lflags |= GPIO_PULL_DOWN;
+
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
@@ -460,6 +466,24 @@ static struct gpio_desc *of_find_arizona_gpio(struct device *dev,
return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
}
+static struct gpio_desc *of_find_usb_gpio(struct device *dev,
+ const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ /*
+ * Currently this USB quirk is only for the Fairchild FUSB302 host which is using
+ * an undocumented DT GPIO line named "fcs,int_n" without the compulsory "-gpios"
+ * suffix.
+ */
+ if (!IS_ENABLED(CONFIG_TYPEC_FUSB302))
+ return ERR_PTR(-ENOENT);
+
+ if (!con_id || strcmp(con_id, "fcs,int_n"))
+ return ERR_PTR(-ENOENT);
+
+ return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
+}
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
{
@@ -504,6 +528,9 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
if (PTR_ERR(desc) == -ENOENT)
desc = of_find_arizona_gpio(dev, con_id, &of_flags);
+ if (PTR_ERR(desc) == -ENOENT)
+ desc = of_find_usb_gpio(dev, con_id, &of_flags);
+
if (IS_ERR(desc))
return desc;
@@ -585,6 +612,10 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*lflags |= GPIO_ACTIVE_LOW;
if (xlate_flags & OF_GPIO_TRANSITORY)
*lflags |= GPIO_TRANSITORY;
+ if (xlate_flags & OF_GPIO_PULL_UP)
+ *lflags |= GPIO_PULL_UP;
+ if (xlate_flags & OF_GPIO_PULL_DOWN)
+ *lflags |= GPIO_PULL_DOWN;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 182136d98b97..4fa075d49fbc 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -296,6 +296,9 @@ static int gpiodev_add_to_list(struct gpio_device *gdev)
/*
* Convert a GPIO name to its descriptor
+ * Note that there is no guarantee that GPIO names are globally unique!
+ * Hence this function will return, if it exists, a reference to the first GPIO
+ * line found that matches the given name.
*/
static struct gpio_desc *gpio_name_to_desc(const char * const name)
{
@@ -329,10 +332,12 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
}
/*
- * Takes the names from gc->names and checks if they are all unique. If they
- * are, they are assigned to their gpio descriptors.
+ * Take the names from gc->names and assign them to their GPIO descriptors.
+ * Warn if a name is already used for a GPIO line on a different GPIO chip.
*
- * Warning if one of the names is already used for a different GPIO.
+ * Note that:
+ * 1. Non-unique names are still accepted,
+ * 2. Name collisions within the same GPIO chip are not reported.
*/
static int gpiochip_set_desc_names(struct gpio_chip *gc)
{
@@ -729,6 +734,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_descs;
}
+
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
offset);
}
@@ -1083,6 +1092,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_desc;
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
le->irq = gpiod_to_irq(desc);
if (le->irq <= 0) {
ret = -ENODEV;
@@ -1260,8 +1272,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
return -EFAULT;
return 0;
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
- cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
struct gpioline_info lineinfo;
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
@@ -1273,23 +1284,37 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
hwgpio = gpio_chip_hwgpio(desc);
- if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL &&
- test_bit(hwgpio, priv->watched_lines))
- return -EBUSY;
-
gpio_desc_to_lineinfo(desc, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
-
- if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL)
- set_bit(hwgpio, priv->watched_lines);
-
return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
+ } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ struct gpioline_info lineinfo;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+
+ desc = gpiochip_get_desc(gc, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ hwgpio = gpio_chip_hwgpio(desc);
+
+ if (test_bit(hwgpio, priv->watched_lines))
+ return -EBUSY;
+
+ gpio_desc_to_lineinfo(desc, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
+ return -EFAULT;
+
+ set_bit(hwgpio, priv->watched_lines);
+ return 0;
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
if (copy_from_user(&offset, ip, sizeof(offset)))
return -EFAULT;
@@ -1531,9 +1556,8 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
/* From this point, the .release() function cleans up gpio_device */
gdev->dev.release = gpiodevice_release;
- pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
- __func__, gdev->base, gdev->base + gdev->ngpio - 1,
- dev_name(&gdev->dev), gdev->chip->label ? : "generic");
+ dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base,
+ gdev->base + gdev->ngpio - 1, gdev->chip->label ? : "generic");
return 0;
@@ -1549,8 +1573,8 @@ static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
desc = gpiochip_get_desc(gc, hog->chip_hwnum);
if (IS_ERR(desc)) {
- pr_err("%s: unable to get GPIO desc: %ld\n",
- __func__, PTR_ERR(desc));
+ chip_err(gc, "%s: unable to get GPIO desc: %ld\n", __func__,
+ PTR_ERR(desc));
return;
}
@@ -1559,8 +1583,8 @@ static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags);
if (rv)
- pr_err("%s: unable to hog GPIO line (%s:%u): %d\n",
- __func__, gc->label, hog->chip_hwnum, rv);
+ gpiod_err(desc, "%s: unable to hog GPIO line (%s:%u): %d\n",
+ __func__, gc->label, hog->chip_hwnum, rv);
}
static void machine_gpiochip_add(struct gpio_chip *gc)
@@ -1585,8 +1609,8 @@ static void gpiochip_setup_devs(void)
list_for_each_entry(gdev, &gpio_devices, list) {
ret = gpiochip_setup_dev(gdev);
if (ret)
- pr_err("%s: Failed to initialize gpio device (%d)\n",
- dev_name(&gdev->dev), ret);
+ dev_err(&gdev->dev,
+ "Failed to initialize gpio device (%d)\n", ret);
}
}
@@ -2454,32 +2478,37 @@ static void gpiochip_irq_relres(struct irq_data *d)
gpiochip_relres_irq(gc, d->hwirq);
}
+static void gpiochip_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ if (gc->irq.irq_mask)
+ gc->irq.irq_mask(d);
+ gpiochip_disable_irq(gc, d->hwirq);
+}
+
+static void gpiochip_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(gc, d->hwirq);
+ if (gc->irq.irq_unmask)
+ gc->irq.irq_unmask(d);
+}
+
static void gpiochip_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
gpiochip_enable_irq(gc, d->hwirq);
- if (gc->irq.irq_enable)
- gc->irq.irq_enable(d);
- else
- gc->irq.chip->irq_unmask(d);
+ gc->irq.irq_enable(d);
}
static void gpiochip_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- /*
- * Since we override .irq_disable() we need to mimic the
- * behaviour of __irq_disable() in irq/chip.c.
- * First call .irq_disable() if it exists, else mimic the
- * behaviour of mask_irq() which calls .irq_mask() if
- * it exists.
- */
- if (gc->irq.irq_disable)
- gc->irq.irq_disable(d);
- else if (gc->irq.chip->irq_mask)
- gc->irq.chip->irq_mask(d);
+ gc->irq.irq_disable(d);
gpiochip_disable_irq(gc, d->hwirq);
}
@@ -2504,10 +2533,22 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
"detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
return;
}
- gc->irq.irq_enable = irqchip->irq_enable;
- gc->irq.irq_disable = irqchip->irq_disable;
- irqchip->irq_enable = gpiochip_irq_enable;
- irqchip->irq_disable = gpiochip_irq_disable;
+
+ if (irqchip->irq_disable) {
+ gc->irq.irq_disable = irqchip->irq_disable;
+ irqchip->irq_disable = gpiochip_irq_disable;
+ } else {
+ gc->irq.irq_mask = irqchip->irq_mask;
+ irqchip->irq_mask = gpiochip_irq_mask;
+ }
+
+ if (irqchip->irq_enable) {
+ gc->irq.irq_enable = irqchip->irq_enable;
+ irqchip->irq_enable = gpiochip_irq_enable;
+ } else {
+ gc->irq.irq_unmask = irqchip->irq_unmask;
+ irqchip->irq_unmask = gpiochip_irq_unmask;
+ }
}
/**
@@ -2695,7 +2736,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc,
return -EINVAL;
if (!gc->parent) {
- pr_err("missing gpiochip .dev parent pointer\n");
+ chip_err(gc, "missing gpiochip .dev parent pointer\n");
return -EINVAL;
}
gc->irq.threaded = threaded;
@@ -2745,6 +2786,26 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc,
}
EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
+/**
+ * gpiochip_irqchip_add_domain() - adds an irqdomain to a gpiochip
+ * @gc: the gpiochip to add the irqchip to
+ * @domain: the irqdomain to add to the gpiochip
+ *
+ * This function adds an IRQ domain to the gpiochip.
+ */
+int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain)
+{
+ if (!domain)
+ return -EINVAL;
+
+ gc->to_irq = gpiochip_to_irq;
+ gc->irq.domain = domain;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_domain);
+
#else /* CONFIG_GPIOLIB_IRQCHIP */
static inline int gpiochip_add_irqchip(struct gpio_chip *gc,
@@ -2998,8 +3059,6 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
}
done:
spin_unlock_irqrestore(&gpio_lock, flags);
- atomic_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
return ret;
}
@@ -4215,7 +4274,9 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
}
}
- if (test_bit(FLAG_IS_OUT, &desc->flags)) {
+ /* To be valid for IRQ the line needs to be input or open drain */
+ if (test_bit(FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
chip_err(gc,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
@@ -4278,7 +4339,12 @@ void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset)
if (!IS_ERR(desc) &&
!WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
- WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags));
+ /*
+ * We must not be output when using IRQ UNLESS we are
+ * open drain.
+ */
+ WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(FLAG_OPEN_DRAIN, &desc->flags));
set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
}
}
@@ -4641,7 +4707,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (!table)
return desc;
- for (p = &table->table[0]; p->chip_label; p++) {
+ for (p = &table->table[0]; p->key; p++) {
struct gpio_chip *gc;
/* idx must always match exactly */
@@ -4652,18 +4718,30 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (p->con_id && (!con_id || strcmp(p->con_id, con_id)))
continue;
- gc = find_chip_by_name(p->chip_label);
+ if (p->chip_hwnum == U16_MAX) {
+ desc = gpio_name_to_desc(p->key);
+ if (desc) {
+ *flags = p->flags;
+ return desc;
+ }
+
+ dev_warn(dev, "cannot find GPIO line %s, deferring\n",
+ p->key);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ gc = find_chip_by_name(p->key);
if (!gc) {
/*
* As the lookup table indicates a chip with
- * p->chip_label should exist, assume it may
+ * p->key should exist, assume it may
* still appear later and let the interested
* consumer be probed again or let the Deferred
* Probe infrastructure handle the error.
*/
dev_warn(dev, "cannot find GPIO chip %s, deferring\n",
- p->chip_label);
+ p->key);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -4694,7 +4772,7 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
if (!table)
return -ENOENT;
- for (p = &table->table[0]; p->chip_label; p++) {
+ for (p = &table->table[0]; p->key; p++) {
if ((con_id && p->con_id && !strcmp(con_id, p->con_id)) ||
(!con_id && !p->con_id))
count++;
@@ -4865,7 +4943,7 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
- pr_debug("no flags found for %s\n", con_id);
+ gpiod_dbg(desc, "no flags found for %s\n", con_id);
return 0;
}
@@ -4961,6 +5039,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return ERR_PTR(ret);
}
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
return desc;
}
EXPORT_SYMBOL_GPL(gpiod_get_index);
@@ -5026,6 +5107,9 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
return ERR_PTR(ret);
}
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
return desc;
}
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -5090,8 +5174,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/* Mark GPIO as hogged so it can be identified and removed later */
set_bit(FLAG_IS_HOGGED, &desc->flags);
- pr_info("GPIO line %d (%s) hogged as %s%s\n",
- desc_to_gpio(desc), name,
+ gpiod_info(desc, "hogged as %s%s\n",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ?
(dflags & GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low" : "");
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 853ce681b4a4..9ed242316414 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -81,8 +81,7 @@ struct gpio_array {
unsigned long invert_mask[];
};
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
- unsigned int hwnum);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
@@ -163,18 +162,18 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
/* With chip prefix */
-#define chip_emerg(chip, fmt, ...) \
- dev_emerg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_crit(chip, fmt, ...) \
- dev_crit(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_err(chip, fmt, ...) \
- dev_err(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_warn(chip, fmt, ...) \
- dev_warn(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_info(chip, fmt, ...) \
- dev_info(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_dbg(chip, fmt, ...) \
- dev_dbg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
+#define chip_emerg(gc, fmt, ...) \
+ dev_emerg(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_crit(gc, fmt, ...) \
+ dev_crit(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_err(gc, fmt, ...) \
+ dev_err(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_warn(gc, fmt, ...) \
+ dev_warn(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_info(gc, fmt, ...) \
+ dev_info(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_dbg(gc, fmt, ...) \
+ dev_dbg(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
#ifdef CONFIG_GPIO_SYSFS
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 43594978958e..c4fd57d8b717 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -161,7 +161,7 @@ config DRM_LOAD_EDID_FIRMWARE
monitor are unable to provide appropriate EDID data. Since this
feature is provided as a workaround for broken hardware, the
default case is N. Details and instructions how to build your own
- EDID data are given in Documentation/driver-api/edid.rst.
+ EDID data are given in Documentation/admin-guide/edid.rst.
config DRM_DP_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
@@ -310,8 +310,6 @@ source "drivers/gpu/drm/ast/Kconfig"
source "drivers/gpu/drm/mgag200/Kconfig"
-source "drivers/gpu/drm/cirrus/Kconfig"
-
source "drivers/gpu/drm/armada/Kconfig"
source "drivers/gpu/drm/atmel-hlcdc/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 7f72ef5e7811..2c0e5a7e5953 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -17,7 +17,8 @@ drm-y := drm_auth.o drm_cache.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
- drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o
+ drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
+ drm_managed.o
drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
@@ -32,8 +33,7 @@ drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_vram_helper-y := drm_gem_vram_helper.o \
- drm_vram_helper_common.o
+drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
drm_ttm_helper-y := drm_gem_ttm_helper.o
@@ -74,7 +74,6 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
obj-$(CONFIG_DRM_V3D) += v3d/
obj-$(CONFIG_DRM_VC4) += vc4/
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c2bbcdd9c875..210d57a4afc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -55,7 +55,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
- amdgpu_umc.o smu_v11_0_i2c.o
+ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2992a49ad4a5..cd913986863e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -28,6 +28,18 @@
#ifndef __AMDGPU_H__
#define __AMDGPU_H__
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "amdgpu: " fmt
+
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+
+#define dev_fmt(fmt) "amdgpu: " fmt
+
#include "amdgpu_ctx.h"
#include <linux/atomic.h>
@@ -161,6 +173,7 @@ extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dm_abm_level;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
@@ -177,6 +190,8 @@ extern int sched_policy;
static const int sched_policy = KFD_SCHED_POLICY_HWS;
#endif
+extern int amdgpu_tmz;
+
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
#endif
@@ -190,8 +205,6 @@ extern int amdgpu_cik_support;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
-/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
-#define AMDGPU_IB_POOL_SIZE 16
#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 16
@@ -439,7 +452,9 @@ struct amdgpu_fpriv {
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size, struct amdgpu_ib *ib);
+ unsigned size,
+ enum amdgpu_ib_pool_type pool,
+ struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
@@ -512,7 +527,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
-#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
@@ -724,6 +739,7 @@ struct amdgpu_device {
uint32_t rev_id;
uint32_t external_rev_id;
unsigned long flags;
+ unsigned long apu_flags;
int usec_timeout;
const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown;
@@ -751,7 +767,6 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
- struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -843,7 +858,8 @@ struct amdgpu_device {
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
- struct amdgpu_sa_manager ring_tmp_bo;
+ struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
+ struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
/* interrupts */
struct amdgpu_irq irq;
@@ -903,7 +919,9 @@ struct amdgpu_device {
struct amdgpu_display_manager dm;
/* discovery */
- uint8_t *discovery;
+ uint8_t *discovery_bin;
+ uint32_t discovery_tmr_size;
+ struct amdgpu_bo *discovery_memory;
/* mes */
bool enable_mes;
@@ -923,7 +941,7 @@ struct amdgpu_device {
atomic64_t gart_pin_size;
/* soc15 register offset based on ip, instance and segment */
- uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -935,9 +953,6 @@ struct amdgpu_device {
/* link all shadow bo */
struct list_head shadow_list;
struct mutex shadow_list_lock;
- /* keep an lru list of rings by HW IP */
- struct list_head ring_lru_list;
- spinlock_t ring_lru_list_lock;
/* record hw reset is performed */
bool has_hw_reset;
@@ -945,9 +960,8 @@ struct amdgpu_device {
/* s3/s4 mask */
bool in_suspend;
+ bool in_hibernate;
- /* record last mm index being written through WREG32*/
- unsigned long last_mm_index;
bool in_gpu_reset;
enum pp_mp1_state mp1_state;
struct mutex lock_reset;
@@ -966,14 +980,19 @@ struct amdgpu_device {
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
- /* device pstate */
- int pstate;
/* enable runtime pm on the device */
bool runpm;
bool in_runpm;
bool pm_sysfs_en;
bool ucode_sysfs_en;
+
+ /* Chip product information */
+ char product_number[16];
+ char product_name[32];
+ char serial[16];
+
+ struct amdgpu_autodump autodump;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -990,10 +1009,10 @@ int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t acc_flags);
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags);
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
@@ -1010,25 +1029,20 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
/*
* Registers read & write functions.
*/
-
-#define AMDGPU_REGS_IDX (1<<0)
#define AMDGPU_REGS_NO_KIQ (1<<1)
-#define AMDGPU_REGS_KIQ (1<<2)
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
-#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
-#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
+#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
-#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
@@ -1065,7 +1079,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
@@ -1248,5 +1262,9 @@ _name##_show(struct device *dev, \
\
static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name)
-#endif
+static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
+{
+ return adev->gmc.tmz_enabled;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 1e41367ef74e..956cbbda4793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -444,7 +444,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
- /* todo: add DC handling */
if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
!amdgpu_device_has_dc_support(adev)) {
struct amdgpu_encoder *enc = atif->encoder_for_bl;
@@ -463,6 +462,27 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
#endif
}
}
+#if defined(CONFIG_DRM_AMD_DC)
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+ amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct backlight_device *bd = dm->backlight_dev;
+
+ if (bd) {
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
+
+ /*
+ * XXX backlight_device_set_brightness() is
+ * hardwired to post BACKLIGHT_UPDATE_SYSFS.
+ * It probably should accept 'reason' parameter.
+ */
+ backlight_device_set_brightness(bd, req.backlight_level);
+ }
+ }
+#endif
+#endif
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index abfbe89e805e..ad59ac4423b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -564,6 +564,13 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
return adev->gds.gws_size;
}
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->rev_id;
+}
+
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 13feb313e9b3..3f2b695cf19e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -65,6 +65,7 @@ struct kgd_mem {
struct amdgpu_sync sync;
bool aql_queue;
+ bool is_imported;
};
/* KFD Memory Eviction */
@@ -148,6 +149,9 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+ int queue_bit);
+
/* Shared API */
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@@ -175,6 +179,7 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
/* Read user wptr from a specified user address space with page fault
@@ -218,7 +223,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
void *vm, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags);
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem);
+ struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 4ec6d0c03201..691c89705bcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -543,6 +543,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v10_compute_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
#if 0
unsigned long flags;
int retry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 9dff792c9290..68e6e1bc8f3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -362,13 +362,13 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
&param);
if (ret) {
- pr_err("amdgpu: failed to validate PT BOs\n");
+ pr_err("failed to validate PT BOs\n");
return ret;
}
ret = amdgpu_amdkfd_validate(&param, pd);
if (ret) {
- pr_err("amdgpu: failed to validate PD\n");
+ pr_err("failed to validate PD\n");
return ret;
}
@@ -377,7 +377,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
if (vm->use_cpu_for_update) {
ret = amdgpu_bo_kmap(pd, NULL);
if (ret) {
- pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
+ pr_err("failed to kmap PD, ret=%d\n", ret);
return ret;
}
}
@@ -660,15 +660,15 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
- if (!ret)
- ctx->reserved = true;
- else {
- pr_err("Failed to reserve buffers in ttm\n");
+ if (ret) {
+ pr_err("Failed to reserve buffers in ttm.\n");
kfree(ctx->vm_pd);
ctx->vm_pd = NULL;
+ return ret;
}
- return ret;
+ ctx->reserved = true;
+ return 0;
}
/**
@@ -733,17 +733,15 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
- if (!ret)
- ctx->reserved = true;
- else
- pr_err("Failed to reserve buffers in ttm.\n");
-
if (ret) {
+ pr_err("Failed to reserve buffers in ttm.\n");
kfree(ctx->vm_pd);
ctx->vm_pd = NULL;
+ return ret;
}
- return ret;
+ ctx->reserved = true;
+ return 0;
}
/**
@@ -1279,31 +1277,30 @@ err:
}
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem)
+ struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
unsigned long bo_size = mem->bo->tbo.mem.size;
struct kfd_bo_va_list *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
+ unsigned int mapped_to_gpu_memory;
int ret;
+ bool is_imported = 0;
mutex_lock(&mem->lock);
-
- if (mem->mapped_to_gpu_memory > 0) {
- pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
- mem->va, bo_size);
- mutex_unlock(&mem->lock);
- return -EBUSY;
- }
-
+ mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
+ is_imported = mem->is_imported;
mutex_unlock(&mem->lock);
/* lock is not needed after this, since mem is unused and will
* be freed anyway
*/
- /* No more MMU notifiers */
- amdgpu_mn_unregister(mem->bo);
+ if (mapped_to_gpu_memory > 0) {
+ pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
+ mem->va, bo_size);
+ return -EBUSY;
+ }
/* Make sure restore workers don't access the BO any more */
bo_list_entry = &mem->validate_list;
@@ -1311,6 +1308,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
list_del(&bo_list_entry->head);
mutex_unlock(&process_info->lock);
+ /* No more MMU notifiers */
+ amdgpu_mn_unregister(mem->bo);
+
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret))
return ret;
@@ -1342,8 +1342,19 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
kfree(mem->bo->tbo.sg);
}
+ /* Update the size of the BO being freed if it was allocated from
+ * VRAM and is not imported.
+ */
+ if (size) {
+ if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
+ (!is_imported))
+ *size = bo_size;
+ else
+ *size = 0;
+ }
+
/* Free the BO*/
- amdgpu_bo_unref(&mem->bo);
+ drm_gem_object_put_unlocked(&mem->bo->tbo.base);
mutex_destroy(&mem->lock);
kfree(mem);
@@ -1688,7 +1699,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
- (*mem)->bo = amdgpu_bo_ref(bo);
+ drm_gem_object_get(&bo->tbo.base);
+ (*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
@@ -1696,6 +1708,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
(*mem)->process_info = avm->process_info;
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
amdgpu_sync_create(&(*mem)->sync);
+ (*mem)->is_imported = true;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index d1495e1c9289..d9b35df33806 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
- false, false);
+ false, false, false);
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 031b094607bd..78ac6dbe70d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -60,8 +60,6 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
{
CGS_FUNC_ADEV;
switch (space) {
- case CGS_IND_REG__MMIO:
- return RREG32_IDX(index);
case CGS_IND_REG__PCIE:
return RREG32_PCIE(index);
case CGS_IND_REG__SMC:
@@ -77,6 +75,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
+ default:
+ BUG();
}
WARN(1, "Invalid indirect register space");
return 0;
@@ -88,8 +88,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
{
CGS_FUNC_ADEV;
switch (space) {
- case CGS_IND_REG__MMIO:
- return WREG32_IDX(index, value);
case CGS_IND_REG__PCIE:
return WREG32_PCIE(index, value);
case CGS_IND_REG__SMC:
@@ -105,6 +103,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
+ default:
+ BUG();
}
WARN(1, "Invalid indirect register space");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index af91627b19b0..19070226a945 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -924,7 +924,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ring = to_amdgpu_ring(entity->rq->sched);
r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
- chunk_ib->ib_bytes : 0, ib);
+ chunk_ib->ib_bytes : 0,
+ AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -1207,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
- enum drm_sched_priority priority;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@@ -1257,7 +1257,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
- priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6ed36a2c5f73..8842c55d4490 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_sched.h"
#include "amdgpu_ras.h"
+#include <linux/nospec.h>
#define to_amdgpu_ctx_entity(e) \
container_of((e), struct amdgpu_ctx_entity, entity)
@@ -72,13 +73,30 @@ static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sch
}
}
-static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
+static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
+ enum drm_sched_priority prio,
+ u32 hw_ip)
+{
+ unsigned int hw_prio;
+
+ hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
+ amdgpu_ctx_sched_prio_to_compute_prio(prio) :
+ AMDGPU_RING_PRIO_DEFAULT;
+ hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+ if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
+ hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+
+ return hw_prio;
+}
+
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
+ const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
- enum gfx_pipe_priority hw_prio;
+ unsigned int hw_prio;
enum drm_sched_priority priority;
int r;
@@ -90,52 +108,16 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
entity->sequence = 1;
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
- switch (hw_ip) {
- case AMDGPU_HW_IP_GFX:
- sched = &adev->gfx.gfx_ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
- scheds = adev->gfx.compute_prio_sched[hw_prio];
- num_scheds = adev->gfx.num_compute_sched[hw_prio];
- break;
- case AMDGPU_HW_IP_DMA:
- scheds = adev->sdma.sdma_sched;
- num_scheds = adev->sdma.num_sdma_sched;
- break;
- case AMDGPU_HW_IP_UVD:
- sched = &adev->uvd.inst[0].ring.sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCE:
- sched = &adev->vce.ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- sched = &adev->uvd.inst[0].ring_enc[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
- adev->vcn.num_vcn_dec_sched);
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
- adev->vcn.num_vcn_enc_sched);
+ hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
+
+ hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+ scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+ num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+
+ if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
+ sched = drm_sched_pick_best(scheds, num_scheds);
scheds = &sched;
num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_JPEG:
- scheds = adev->jpeg.jpeg_sched;
- num_scheds = adev->jpeg.num_jpeg_sched;
- break;
}
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -178,7 +160,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
return 0;
-
}
static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
@@ -525,7 +506,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
struct amdgpu_device *adev = ctx->adev;
- enum gfx_pipe_priority hw_prio;
+ unsigned int hw_prio;
struct drm_gpu_scheduler **scheds = NULL;
unsigned num_scheds;
@@ -534,9 +515,11 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
/* set hw priority */
if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
- hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
- scheds = adev->gfx.compute_prio_sched[hw_prio];
- num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
+ AMDGPU_HW_IP_COMPUTE);
+ hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
+ scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+ num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
drm_sched_entity_modify_sched(&aentity->entity, scheds,
num_scheds);
}
@@ -665,78 +648,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
-
-
-static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
-{
- int num_compute_sched_normal = 0;
- int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
- int i;
-
- /* use one drm sched array, gfx.compute_sched to store both high and
- * normal priority drm compute schedulers */
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- if (!adev->gfx.compute_ring[i].has_high_prio)
- adev->gfx.compute_sched[num_compute_sched_normal++] =
- &adev->gfx.compute_ring[i].sched;
- else
- adev->gfx.compute_sched[num_compute_sched_high--] =
- &adev->gfx.compute_ring[i].sched;
- }
-
- /* compute ring only has two priority for now */
- i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
- adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
- adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
-
- i = AMDGPU_GFX_PIPE_PRIO_HIGH;
- if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
- /* When compute has no high priority rings then use */
- /* normal priority sched array */
- adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
- adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
- } else {
- adev->gfx.compute_prio_sched[i] =
- &adev->gfx.compute_sched[num_compute_sched_high - 1];
- adev->gfx.num_compute_sched[i] =
- adev->gfx.num_compute_rings - num_compute_sched_normal;
- }
-}
-
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
-{
- int i, j;
-
- amdgpu_ctx_init_compute_sched(adev);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
- adev->gfx.num_gfx_sched++;
- }
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
- adev->sdma.num_sdma_sched++;
- }
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
- &adev->vcn.inst[i].ring_dec.sched;
- }
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j)
- adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
- &adev->vcn.inst[i].ring_enc[j].sched;
- }
-
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (adev->jpeg.harvest_config & (1 << i))
- continue;
- adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
- &adev->jpeg.inst[i].ring_dec.sched;
- }
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index de490f183af2..f54e10314661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
-
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index c0f9a651dc06..d33cb344be69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -27,7 +27,7 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
-
+#include <linux/poll.h>
#include <drm/drm_debugfs.h>
#include "amdgpu.h"
@@ -74,8 +74,82 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
return 0;
}
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned long timeout = 600 * HZ;
+ int ret;
+
+ wake_up_interruptible(&adev->autodump.gpu_hang);
+
+ ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
+ if (ret == 0) {
+ pr_err("autodump: timeout, move on to gpu recovery\n");
+ return -ETIMEDOUT;
+ }
+#endif
+ return 0;
+}
+
#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
+{
+ struct amdgpu_device *adev = inode->i_private;
+ int ret;
+
+ file->private_data = adev;
+
+ mutex_lock(&adev->lock_reset);
+ if (adev->autodump.dumping.done) {
+ reinit_completion(&adev->autodump.dumping);
+ ret = 0;
+ } else {
+ ret = -EBUSY;
+ }
+ mutex_unlock(&adev->lock_reset);
+
+ return ret;
+}
+
+static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
+{
+ struct amdgpu_device *adev = file->private_data;
+
+ complete_all(&adev->autodump.dumping);
+ return 0;
+}
+
+static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
+{
+ struct amdgpu_device *adev = file->private_data;
+
+ poll_wait(file, &adev->autodump.gpu_hang, poll_table);
+
+ if (adev->in_gpu_reset)
+ return POLLIN | POLLRDNORM | POLLWRNORM;
+
+ return 0;
+}
+
+static const struct file_operations autodump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_debugfs_autodump_open,
+ .poll = amdgpu_debugfs_autodump_poll,
+ .release = amdgpu_debugfs_autodump_release,
+};
+
+static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
+{
+ init_completion(&adev->autodump.dumping);
+ complete_all(&adev->autodump.dumping);
+ init_waitqueue_head(&adev->autodump.gpu_hang);
+
+ debugfs_create_file("amdgpu_autodump", 0600,
+ adev->ddev->primary->debugfs_root,
+ adev, &autodump_debug_fops);
+}
+
/**
* amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
*
@@ -152,11 +226,16 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
}
mutex_lock(&adev->grbm_idx_mutex);
@@ -207,6 +286,7 @@ end:
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -255,6 +335,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -263,6 +347,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -275,6 +360,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -304,6 +390,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -311,6 +401,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -325,6 +416,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -354,6 +446,10 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -362,6 +458,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -374,6 +471,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -403,6 +501,10 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -410,6 +512,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -424,6 +527,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -453,6 +557,10 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -461,6 +569,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -473,6 +582,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -502,6 +612,10 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -509,6 +623,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -523,6 +638,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -651,16 +767,24 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
- if (r)
+ if (r) {
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
+ }
- if (size > valuesize)
+ if (size > valuesize) {
+ amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
+ }
outsize = 0;
x = 0;
@@ -673,6 +797,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
}
}
+ amdgpu_virt_disable_access_debugfs(adev);
return !r ? outsize : r;
}
@@ -720,6 +845,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -734,16 +863,20 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
- if (!x)
+ if (!x) {
+ amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
+ }
while (size && (offset < x * 4)) {
uint32_t value;
value = data[offset >> 2];
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
+ }
result += 4;
buf += 4;
@@ -751,6 +884,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
size -= 4;
}
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -805,6 +939,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -840,6 +978,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
err:
kfree(data);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -1369,6 +1508,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_ras_debugfs_create_all(adev);
+ amdgpu_debugfs_autodump_init(adev);
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index de12d1101526..2803884d338d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -31,6 +31,11 @@ struct amdgpu_debugfs {
unsigned num_files;
};
+struct amdgpu_autodump {
+ struct completion dumping;
+ struct wait_queue_head gpu_hang;
+};
+
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
void amdgpu_debugfs_fini(struct amdgpu_device *adev);
@@ -40,3 +45,4 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index affde2de2a0d..a027a8f7b281 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -64,9 +64,11 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
+#include "amdgpu_fru_eeprom.h"
#include <linux/suspend.h>
#include <drm/task_barrier.h>
+#include <linux/pm_runtime.h>
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -138,6 +140,72 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/**
+ * DOC: product_name
+ *
+ * The amdgpu driver provides a sysfs API for reporting the product name
+ * for the device
+ * The file serial_number is used for this and returns the product name
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
+}
+
+static DEVICE_ATTR(product_name, S_IRUGO,
+ amdgpu_device_get_product_name, NULL);
+
+/**
+ * DOC: product_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the part number
+ * for the device
+ * The file serial_number is used for this and returns the part number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_number(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
+}
+
+static DEVICE_ATTR(product_number, S_IRUGO,
+ amdgpu_device_get_product_number, NULL);
+
+/**
+ * DOC: serial_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the serial number
+ * for the device
+ * The file serial_number is used for this and returns the serial number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_serial_number(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
+}
+
+static DEVICE_ATTR(serial_number, S_IRUGO,
+ amdgpu_device_get_serial_number, NULL);
+
+/**
* amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
*
* @dev: drm_device pointer
@@ -231,10 +299,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
}
/*
- * MMIO register access helper functions.
+ * device register access helper functions.
*/
/**
- * amdgpu_mm_rreg - read a memory mapped IO register
+ * amdgpu_device_rreg - read a register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -242,25 +310,19 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
*
* Returns the 32 bit value from the offset specified.
*/
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
- uint32_t acc_flags)
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t acc_flags)
{
uint32_t ret;
- if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_kiq_rreg(adev, reg);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ if ((reg * 4) < adev->rmmio_size)
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
- trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+ else
+ ret = adev->pcie_rreg(adev, (reg * 4));
+ trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
return ret;
}
@@ -306,28 +368,19 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
BUG();
}
-void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
+void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t v, uint32_t acc_flags)
{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+ trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ if ((reg * 4) < adev->rmmio_size)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
-
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
- }
+ else
+ adev->pcie_wreg(adev, (reg * 4), v);
}
/**
- * amdgpu_mm_wreg - write to a memory mapped IO register
+ * amdgpu_device_wreg - write to a register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -336,17 +389,13 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg,
*
* Writes the value specified to the offset specified.
*/
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags)
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
{
- if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
- adev->last_mm_index = v;
- }
-
- if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_kiq_wreg(adev, reg, v);
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+ amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
}
/*
@@ -365,7 +414,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
}
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+ amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
}
/**
@@ -397,20 +446,12 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
*/
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
- if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
- adev->last_mm_index = v;
- }
-
if ((reg * 4) < adev->rio_mem_size)
iowrite32(v, adev->rio_mem + (reg * 4));
else {
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
}
-
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
- }
}
/**
@@ -1126,6 +1167,8 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
+ amdgpu_gmc_tmz_set(adev);
+
return 0;
}
@@ -1147,7 +1190,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
return;
if (state == VGA_SWITCHEROO_ON) {
- pr_info("amdgpu: switched on\n");
+ pr_info("switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1161,7 +1204,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
- pr_info("amdgpu: switched off\n");
+ pr_info("switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_suspend(dev, true);
@@ -1524,9 +1567,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "vega12";
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -1574,8 +1617,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
+ amdgpu_discovery_get_gfx_info(adev);
goto parse_soc_bounding_box;
+ }
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
@@ -1721,19 +1766,31 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_device_parse_gpu_info_fw(adev);
- if (r)
- return r;
-
- if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
- amdgpu_discovery_get_gfx_info(adev);
-
amdgpu_amdkfd_device_probe(adev);
if (amdgpu_sriov_vf(adev)) {
+ /* handle vbios stuff prior full access mode for new handshake */
+ if (adev->virt.req_init_data_ver == 1) {
+ if (!amdgpu_get_bios(adev)) {
+ DRM_ERROR("failed to get vbios\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_atombios_init(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+ return r;
+ }
+ }
+ }
+
+ /* we need to send REQ_GPU here for legacy handshaker otherwise the vbios
+ * will not be prepared by host for this VF */
+ if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver < 1) {
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
- return -EAGAIN;
+ return r;
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
@@ -1763,6 +1820,14 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
/* get the vbios after the asic_funcs are set up */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ r = amdgpu_device_parse_gpu_info_fw(adev);
+ if (r)
+ return r;
+
+ /* skip vbios handling for new handshake */
+ if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver == 1)
+ continue;
+
/* Read BIOS */
if (!amdgpu_get_bios(adev))
return -EINVAL;
@@ -1889,6 +1954,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
return r;
+ if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver > 0) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return -EAGAIN;
+ }
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1975,6 +2046,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
+ amdgpu_fru_get_product_info(adev);
+
init_failed:
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, true);
@@ -2171,6 +2244,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = true;
}
+ amdgpu_ras_set_error_query_ready(adev, true);
+
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
@@ -2203,7 +2278,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (gpu_instance->adev->flags & AMD_IS_APU)
continue;
- r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
+ r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
+ AMDGPU_XGMI_PSTATE_MIN);
if (r) {
DRM_ERROR("pstate setting failed (%d).\n", r);
break;
@@ -2785,12 +2861,12 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
* By default timeout for non compute jobs is 10000.
* And there is no timeout enforced on compute jobs.
* In SR-IOV or passthrough mode, timeout for compute
- * jobs are 10000 by default.
+ * jobs are 60000 by default.
*/
adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
- adev->compute_timeout = adev->gfx_timeout;
+ adev->compute_timeout = msecs_to_jiffies(60000);
else
adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
@@ -2841,6 +2917,14 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
return ret;
}
+static const struct attribute *amdgpu_dev_attributes[] = {
+ &dev_attr_product_name.attr,
+ &dev_attr_product_number.attr,
+ &dev_attr_serial_number.attr,
+ &dev_attr_pcie_replay_count.attr,
+ NULL
+};
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2942,9 +3026,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
- INIT_LIST_HEAD(&adev->ring_lru_list);
- spin_lock_init(&adev->ring_lru_list_lock);
-
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -2953,7 +3034,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
adev->gfx.gfx_off_req_count = 1;
- adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0;
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -3002,18 +3083,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
adev->enable_mes = true;
- if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
- r = amdgpu_discovery_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_discovery_init failed\n");
- return r;
- }
- }
-
- /* early init functions */
- r = amdgpu_device_ip_early_init(adev);
- if (r)
- return r;
+ /* detect hw virtualization here */
+ amdgpu_detect_virtualization(adev);
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
@@ -3021,6 +3092,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
+ /* early init functions */
+ r = amdgpu_device_ip_early_init(adev);
+ if (r)
+ return r;
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3127,14 +3203,13 @@ fence_driver_init:
goto failed;
}
- DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+ dev_info(adev->dev,
+ "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
adev->gfx.config.max_shader_engines,
adev->gfx.config.max_sh_per_se,
adev->gfx.config.max_cu_per_sh,
adev->gfx.cu_info.number);
- amdgpu_ctx_init_sched(adev);
-
adev->accel_working = true;
amdgpu_vm_check_compute_bug(adev);
@@ -3199,9 +3274,9 @@ fence_driver_init:
queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
- r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
+ r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (r) {
- dev_err(adev->dev, "Could not create pcie_replay_count");
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
return r;
}
@@ -3284,9 +3359,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
+
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
@@ -3754,6 +3830,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
@@ -3848,6 +3926,8 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
int i, r = 0;
bool need_full_reset = *need_full_reset_arg;
+ amdgpu_debugfs_wait_dump(adev);
+
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -4052,6 +4132,64 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
mutex_unlock(&adev->lock_reset);
}
+static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
+{
+ struct pci_dev *p = NULL;
+
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (p) {
+ pm_runtime_enable(&(p->dev));
+ pm_runtime_resume(&(p->dev));
+ }
+}
+
+static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+{
+ enum amd_reset_method reset_method;
+ struct pci_dev *p = NULL;
+ u64 expires;
+
+ /*
+ * For now, only BACO and mode1 reset are confirmed
+ * to suffer the audio issue without proper suspended.
+ */
+ reset_method = amdgpu_asic_reset_method(adev);
+ if ((reset_method != AMD_RESET_METHOD_BACO) &&
+ (reset_method != AMD_RESET_METHOD_MODE1))
+ return -EINVAL;
+
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (!p)
+ return -ENODEV;
+
+ expires = pm_runtime_autosuspend_expiration(&(p->dev));
+ if (!expires)
+ /*
+ * If we cannot get the audio device autosuspend delay,
+ * a fixed 4S interval will be used. Considering 3S is
+ * the audio controller default autosuspend delay setting.
+ * 4S used here is guaranteed to cover that.
+ */
+ expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
+
+ while (!pm_runtime_status_suspended(&(p->dev))) {
+ if (!pm_runtime_suspend(&(p->dev)))
+ break;
+
+ if (expires < ktime_get_mono_fast_ns()) {
+ dev_warn(adev->dev, "failed to suspend display audio\n");
+ /* TODO: abort the succeeding gpu reset? */
+ return -ETIMEDOUT;
+ }
+ }
+
+ pm_runtime_disable(&(p->dev));
+
+ return 0;
+}
+
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -4067,7 +4205,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job *job)
{
struct list_head device_list, *device_list_handle = NULL;
- bool need_full_reset, job_signaled;
+ bool need_full_reset = false;
+ bool job_signaled = false;
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
@@ -4075,6 +4214,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool use_baco =
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
true : false;
+ bool audio_suspended = false;
/*
* Flush RAM to disk so that after reboot
@@ -4088,16 +4228,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
emergency_restart();
}
- need_full_reset = job_signaled = false;
- INIT_LIST_HEAD(&device_list);
-
dev_info(adev->dev, "GPU %s begin!\n",
(in_ras_intr && !use_baco) ? "jobs stop":"reset");
- cancel_delayed_work_sync(&adev->delayed_init_work);
-
- hive = amdgpu_get_xgmi_hive(adev, false);
-
/*
* Here we trylock to avoid chain of resets executing from
* either trigger by jobs on different adevs in XGMI hive or jobs on
@@ -4105,39 +4238,25 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* We always reset all schedulers for device and all devices for XGMI
* hive so that should take care of them too.
*/
-
+ hive = amdgpu_get_xgmi_hive(adev, true);
if (hive && !mutex_trylock(&hive->reset_lock)) {
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
job ? job->base.id : -1, hive->hive_id);
+ mutex_unlock(&hive->hive_lock);
return 0;
}
- /* Start with adev pre asic reset first for soft reset check.*/
- if (!amdgpu_device_lock_adev(adev, !hive)) {
- DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
- job ? job->base.id : -1);
- return 0;
- }
-
- /* Block kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_pre_reset(adev);
-
- /* Build list of devices to reset */
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!hive) {
- /*unlock kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_post_reset(adev);
- amdgpu_device_unlock_adev(adev);
+ /*
+ * Build list of devices to reset.
+ * In case we are in XGMI hive mode, resort the device list
+ * to put adev in the 1st position.
+ */
+ INIT_LIST_HEAD(&device_list);
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive)
return -ENODEV;
- }
-
- /*
- * In case we are in XGMI hive mode device reset is done for all the
- * nodes in the hive to retrain all XGMI links and hence the reset
- * sequence is executed in loop on all nodes.
- */
+ if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
+ list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
device_list_handle = &hive->device_list;
} else {
list_add_tail(&adev->gmc.xgmi.head, &device_list);
@@ -4146,19 +4265,40 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- if (tmp_adev != adev) {
- amdgpu_device_lock_adev(tmp_adev, false);
- if (!amdgpu_sriov_vf(tmp_adev))
- amdgpu_amdkfd_pre_reset(tmp_adev);
+ if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
+ DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
+ job ? job->base.id : -1);
+ mutex_unlock(&hive->hive_lock);
+ return 0;
}
/*
+ * Try to put the audio codec into suspend state
+ * before gpu reset started.
+ *
+ * Due to the power domain of the graphics device
+ * is shared with AZ power domain. Without this,
+ * we may change the audio hardware from behind
+ * the audio driver's back. That will trigger
+ * some audio codec errors.
+ */
+ if (!amdgpu_device_suspend_display_audio(tmp_adev))
+ audio_suspended = true;
+
+ amdgpu_ras_set_error_query_ready(tmp_adev, false);
+
+ cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
+
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
+
+ /*
* Mark these ASICs to be reseted as untracked first
* And add them back after reset completed
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- amdgpu_fbdev_set_suspend(adev, 1);
+ amdgpu_fbdev_set_suspend(tmp_adev, 1);
/* disable ras on ALL IPs */
if (!(in_ras_intr && !use_baco) &&
@@ -4178,7 +4318,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
}
}
-
if (in_ras_intr && !use_baco)
goto skip_sched_resume;
@@ -4189,30 +4328,14 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* job->base holds a reference to parent fence
*/
if (job && job->base.s_fence->parent &&
- dma_fence_is_signaled(job->base.s_fence->parent))
+ dma_fence_is_signaled(job->base.s_fence->parent)) {
job_signaled = true;
-
- if (job_signaled) {
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
}
-
- /* Guilty job will be freed after this*/
- r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
- if (r) {
- /*TODO Should we stop ?*/
- DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
- r, adev->ddev->unique);
- adev->asic_reset_res = r;
- }
-
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-
- if (tmp_adev == adev)
- continue;
-
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
&need_full_reset);
@@ -4274,11 +4397,15 @@ skip_sched_resume:
/*unlock kfd: SRIOV would do it separately */
if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
+ if (audio_suspended)
+ amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}
- if (hive)
+ if (hive) {
mutex_unlock(&hive->reset_lock);
+ mutex_unlock(&hive->hive_lock);
+ }
if (r)
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 057f6ea645d7..61a26c15c8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -52,9 +52,6 @@ struct amdgpu_df_funcs {
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val);
- uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
- uint32_t df_inst);
- uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
};
struct amdgpu_df {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 27d8ae19a7a4..b5d6274952a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -23,9 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_discovery.h"
-#include "soc15_common.h"
#include "soc15_hw_ip.h"
-#include "nbio/nbio_2_3_offset.h"
#include "discovery.h"
#define mmRCC_CONFIG_MEMSIZE 0xde3
@@ -135,9 +133,10 @@ static int hw_id_map[MAX_HWIP] = {
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
{
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+ uint64_t pos = vram_size - adev->discovery_tmr_size;
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->discovery_tmr_size, false);
return 0;
}
@@ -158,7 +157,7 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size
return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
}
-int amdgpu_discovery_init(struct amdgpu_device *adev)
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
@@ -169,17 +168,18 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
- if (!adev->discovery)
+ adev->discovery_tmr_size = DISCOVERY_TMR_SIZE;
+ adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL);
+ if (!adev->discovery_bin)
return -ENOMEM;
- r = amdgpu_discovery_read_binary(adev, adev->discovery);
+ r = amdgpu_discovery_read_binary(adev, adev->discovery_bin);
if (r) {
DRM_ERROR("failed to read ip discovery binary\n");
goto out;
}
- bhdr = (struct binary_header *)adev->discovery;
+ bhdr = (struct binary_header *)adev->discovery_bin;
if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
DRM_ERROR("invalid ip discovery binary signature\n");
@@ -192,7 +192,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
size = bhdr->binary_size - offset;
checksum = bhdr->binary_checksum;
- if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
size, checksum)) {
DRM_ERROR("invalid ip discovery binary checksum\n");
r = -EINVAL;
@@ -202,7 +202,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[IP_DISCOVERY];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ihdr = (struct ip_discovery_header *)(adev->discovery + offset);
+ ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
DRM_ERROR("invalid ip discovery data table signature\n");
@@ -210,7 +210,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
ihdr->size, checksum)) {
DRM_ERROR("invalid ip discovery data table checksum\n");
r = -EINVAL;
@@ -220,9 +220,9 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[GC];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ghdr = (struct gpu_info_header *)(adev->discovery + offset);
+ ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset);
- if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
ghdr->size, checksum)) {
DRM_ERROR("invalid gc data table checksum\n");
r = -EINVAL;
@@ -232,16 +232,16 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->discovery);
- adev->discovery = NULL;
+ kfree(adev->discovery_bin);
+ adev->discovery_bin = NULL;
return r;
}
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
- kfree(adev->discovery);
- adev->discovery = NULL;
+ kfree(adev->discovery_bin);
+ adev->discovery_bin = NULL;
}
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
@@ -257,14 +257,16 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
uint8_t num_base_address;
int hw_ip;
int i, j, k;
+ int r;
- if (!adev->discovery) {
- DRM_ERROR("ip discovery uninitialized\n");
- return -EINVAL;
+ r = amdgpu_discovery_init(adev);
+ if (r) {
+ DRM_ERROR("amdgpu_discovery_init failed\n");
+ return r;
}
- bhdr = (struct binary_header *)adev->discovery;
- ihdr = (struct ip_discovery_header *)(adev->discovery +
+ bhdr = (struct binary_header *)adev->discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
@@ -272,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery + die_offset);
+ dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -286,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery + ip_offset);
+ ip = (struct ip *)(adev->discovery_bin + ip_offset);
num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -335,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
uint16_t num_ips;
int i, j;
- if (!adev->discovery) {
+ if (!adev->discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery;
- ihdr = (struct ip_discovery_header *)(adev->discovery +
+ bhdr = (struct binary_header *)adev->discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery + die_offset);
+ dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery + ip_offset);
+ ip = (struct ip *)(adev->discovery_bin + ip_offset);
if (le16_to_cpu(ip->hw_id) == hw_id) {
if (major)
@@ -375,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct gc_info_v1_0 *gc_info;
- if (!adev->discovery) {
+ if (!adev->discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery;
- gc_info = (struct gc_info_v1_0 *)(adev->discovery +
+ bhdr = (struct binary_header *)adev->discovery_bin;
+ gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index ba78e15d9b05..d50d597c45ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -26,7 +26,6 @@
#define DISCOVERY_TMR_SIZE (64 << 10)
-int amdgpu_discovery_init(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 84cee27cd7ef..f7143d927b6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -523,7 +523,8 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
break;
case CHIP_RAVEN:
/* enable S/G on PCO and RV2 */
- if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+ (adev->apu_flags & AMD_APU_IS_PICASSO))
domain |= AMDGPU_GEM_DOMAIN_GTT;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ffeb20f11c07..43d8ed7dbd00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -38,6 +38,7 @@
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
+#include <linux/pci-p2pdma.h>
/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
@@ -179,6 +180,9 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
+ if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
+ attach->peer2peer = false;
+
if (attach->dev->driver == adev->dev->driver)
return 0;
@@ -272,14 +276,21 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct sg_table *sgt;
long r;
if (!bo->pin_count) {
- /* move buffer into GTT */
+ /* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false };
+ unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+ attach->peer2peer) {
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ domains |= AMDGPU_GEM_DOMAIN_VRAM;
+ }
+ amdgpu_bo_placement_from_domain(bo, domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
@@ -289,20 +300,34 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
return ERR_PTR(-EBUSY);
}
- sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
- if (IS_ERR(sgt))
- return sgt;
-
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC))
- goto error_free;
+ switch (bo->tbo.mem.mem_type) {
+ case TTM_PL_TT:
+ sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+ bo->tbo.num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
+
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
+ break;
+
+ case TTM_PL_VRAM:
+ r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
+ dir, &sgt);
+ if (r)
+ return ERR_PTR(r);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
return sgt;
error_free:
sg_free_table(sgt);
kfree(sgt);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EBUSY);
}
/**
@@ -318,9 +343,18 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- sg_free_table(sgt);
- kfree(sgt);
+ struct dma_buf *dma_buf = attach->dmabuf;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (sgt->sgl->page_link) {
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+ } else {
+ amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
+ }
}
/**
@@ -514,6 +548,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
}
static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
.move_notify = amdgpu_dma_buf_move_notify
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index ba1bb95a3cf9..d2a105e3bf7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -856,7 +856,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
}
} else {
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
@@ -1188,3 +1188,13 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
+{
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev))
+ return smu_allow_xgmi_power_down(smu, en);
+
+ return 0;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 936d85aa0fbc..6a8aae70a0e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -450,6 +450,7 @@ struct amdgpu_pm {
/* Used for I2C access to various EEPROMs on relevant ASICs */
struct i2c_adapter smu_i2c;
+ struct list_head pm_attr_list;
};
#define R600_SSTU_DFLT 0
@@ -538,4 +539,6 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
uint32_t cstate);
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 466bfe541e45..126e74758a34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -86,9 +86,10 @@
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
* - 3.36.0 - Allow reading more status registers on si/cik
* - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
+ * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 37
+#define KMS_DRIVER_MINOR 38
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -139,12 +140,14 @@ int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
/* FBC (bit 0) disabled by default*/
uint amdgpu_dc_feature_mask = 0;
+uint amdgpu_dc_debug_mask = 0;
int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
int amdgpu_noretry;
int amdgpu_force_asic_type = -1;
+int amdgpu_tmz = 0;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
@@ -688,13 +691,12 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
/**
* DOC: hws_gws_support(bool)
- * Whether HWS support gws barriers. Default value: false (not supported)
- * This will be replaced with a MEC firmware version check once firmware
- * is ready
+ * Assume that HWS supports GWS barriers regardless of what firmware version
+ * check says. Default value: false (rely on MEC2 firmware version check).
*/
bool hws_gws_support;
module_param(hws_gws_support, bool, 0444);
-MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supported (Default), true = supported)");
+MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
/**
* DOC: queue_preemption_timeout_ms (int)
@@ -714,6 +716,13 @@ MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
+ * DOC: dcdebugmask (uint)
+ * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ */
+MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
+module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
+
+/**
* DOC: abmlevel (uint)
* Override the default ABM (Adaptive Backlight Management) level used for DC
* enabled hardware. Requires DMCU to be supported and loaded.
@@ -729,6 +738,16 @@ uint amdgpu_dm_abm_level = 0;
MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
+/**
+ * DOC: tmz (int)
+ * Trusted Memory Zone (TMZ) is a method to protect data being written
+ * to or read from memory.
+ *
+ * The default value: 0 (off). TODO: change to auto till it is completed.
+ */
+MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto, 0 = off (default), 1 = on)");
+module_param_named(tmz, amdgpu_tmz, int, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1164,14 +1183,6 @@ static int amdgpu_pmops_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- /* GPU comes up enabled by the bios on resume */
- if (amdgpu_device_supports_boco(drm_dev) ||
- amdgpu_device_supports_baco(drm_dev)) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
return amdgpu_device_resume(drm_dev, true);
}
@@ -1181,7 +1192,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_dev->dev_private;
int r;
+ adev->in_hibernate = true;
r = amdgpu_device_suspend(drm_dev, true);
+ adev->in_hibernate = false;
if (r)
return r;
return amdgpu_asic_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9ae7b61f696a..25ddb482466a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
u32 cpp;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ AMDGPU_GEM_CREATE_VRAM_CLEARED;
info = drm_get_format_info(adev->ddev, mode_cmd);
cpp = info->cpp[0];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 7531527067df..d878fe7fee51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -192,14 +192,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
* Used For polling fence.
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+ uint32_t timeout)
{
uint32_t seq;
+ signed long r;
if (!s)
return -EINVAL;
seq = ++ring->fence_drv.sync_seq;
+ r = amdgpu_fence_wait_polling(ring,
+ seq - ring->fence_drv.num_fences_mask,
+ timeout);
+ if (r < 1)
+ return -ETIMEDOUT;
+
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
new file mode 100644
index 000000000000..815c072ac4da
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "smu_v11_0_i2c.h"
+#include "atom.h"
+
+#define I2C_PRODUCT_INFO_ADDR 0xAC
+#define I2C_PRODUCT_INFO_ADDR_SIZE 0x2
+#define I2C_PRODUCT_INFO_OFFSET 0xC0
+
+bool is_fru_eeprom_supported(struct amdgpu_device *adev)
+{
+ /* TODO: Gaming SKUs don't have the FRU EEPROM.
+ * Use this hack to address hangs on modprobe on gaming SKUs
+ * until a proper solution can be implemented by only supporting
+ * the explicit chip IDs for VG20 Server cards
+ *
+ * TODO: Add list of supported Arcturus DIDs once confirmed
+ */
+ if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) ||
+ (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) ||
+ (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4))
+ return true;
+ return false;
+}
+
+int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
+ unsigned char *buff)
+{
+ int ret, size;
+ struct i2c_msg msg = {
+ .addr = I2C_PRODUCT_INFO_ADDR,
+ .flags = I2C_M_RD,
+ .buf = buff,
+ };
+ buff[0] = 0;
+ buff[1] = addrptr;
+ msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + 1;
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+ if (ret < 1) {
+ DRM_WARN("FRU: Failed to get size field");
+ return ret;
+ }
+
+ /* The size returned by the i2c requires subtraction of 0xC0 since the
+ * size apparently always reports as 0xC0+actual size.
+ */
+ size = buff[2] - I2C_PRODUCT_INFO_OFFSET;
+ /* Add 1 since address field was 1 byte */
+ buff[1] = addrptr + 1;
+
+ msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + size;
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+ if (ret < 1) {
+ DRM_WARN("FRU: Failed to get data field");
+ return ret;
+ }
+
+ return size;
+}
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+{
+ unsigned char buff[34];
+ int addrptr = 0, size = 0;
+
+ if (!is_fru_eeprom_supported(adev))
+ return 0;
+
+ /* If algo exists, it means that the i2c_adapter's initialized */
+ if (!adev->pm.smu_i2c.algo) {
+ DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+ return 0;
+ }
+
+ /* There's a lot of repetition here. This is due to the FRU having
+ * variable-length fields. To get the information, we have to find the
+ * size of each field, and then keep reading along and reading along
+ * until we get all of the data that we want. We use addrptr to track
+ * the address as we go
+ */
+
+ /* The first fields are all of size 1-byte, from 0-7 are offsets that
+ * contain information that isn't useful to us.
+ * Bytes 8-a are all 1-byte and refer to the size of the entire struct,
+ * and the language field, so just start from 0xb, manufacturer size
+ */
+ addrptr = 0xb;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
+ return size;
+ }
+
+ /* Increment the addrptr by the size of the field, and 1 due to the
+ * size field being 1 byte. This pattern continues below.
+ */
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU product name, ret:%d", size);
+ return size;
+ }
+
+ /* Product name should only be 32 characters. Any more,
+ * and something could be wrong. Cap it at 32 to be safe
+ */
+ if (size > 32) {
+ DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
+ size = 32;
+ }
+ /* Start at 2 due to buff using fields 0 and 1 for the address */
+ memcpy(adev->product_name, &buff[2], size);
+ adev->product_name[size] = '\0';
+
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU product number, ret:%d", size);
+ return size;
+ }
+
+ /* Product number should only be 16 characters. Any more,
+ * and something could be wrong. Cap it at 16 to be safe
+ */
+ if (size > 16) {
+ DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
+ size = 16;
+ }
+ memcpy(adev->product_number, &buff[2], size);
+ adev->product_number[size] = '\0';
+
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU product version, ret:%d", size);
+ return size;
+ }
+
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
+ return size;
+ }
+
+ /* Serial number should only be 16 characters. Any more,
+ * and something could be wrong. Cap it at 16 to be safe
+ */
+ if (size > 16) {
+ DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
+ size = 16;
+ }
+ memcpy(adev->serial, &buff[2], size);
+ adev->serial[size] = '\0';
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
index f78cbae9db88..968115c97e33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,19 +19,11 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#ifndef __DC_COMMON_DEFS_H__
-#define __DC_COMMON_DEFS_H__
-
-#include "dm_services.h"
-#include "dc_features.h"
-#include "display_mode_structs.h"
-#include "display_mode_enums.h"
-
+#ifndef __AMDGPU_PRODINFO_H__
+#define __AMDGPU_PRODINFO_H__
-double dml_round(double a);
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
-#endif /* __DC_COMMON_DEFS_H__ */
+#endif // __AMDGPU_PRODINFO_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4277125a79ee..4ed9958af94e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/pci.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
@@ -161,16 +162,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
+ struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
- int r;
+ long r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.num_shared = 1;
+ tv.num_shared = 2;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -178,28 +180,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
- "we fail to reserve bo (%d)\n", r);
+ "we fail to reserve bo (%ld)\n", r);
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
- if (bo_va && --bo_va->ref_count == 0) {
- amdgpu_vm_bo_rmv(adev, bo_va);
-
- if (amdgpu_vm_ready(vm)) {
- struct dma_fence *fence = NULL;
+ if (!bo_va || --bo_va->ref_count)
+ goto out_unlock;
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (unlikely(r)) {
- dev_err(adev->dev, "failed to clear page "
- "tables on GEM object close (%d)\n", r);
- }
+ amdgpu_vm_bo_rmv(adev, bo_va);
+ if (!amdgpu_vm_ready(vm))
+ goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
- }
- }
+ fence = dma_resv_get_excl(bo->tbo.base.resv);
+ if (fence) {
+ amdgpu_bo_fence(bo, fence, true);
+ fence = NULL;
}
+
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
+ if (r || !fence)
+ goto out_unlock;
+
+ amdgpu_bo_fence(bo, fence, true);
+ dma_fence_put(fence);
+
+out_unlock:
+ if (unlikely(r < 0))
+ dev_err(adev->dev, "failed to clear page "
+ "tables on GEM object close (%ld)\n", r);
ttm_eu_backoff_reservation(&ticket, &list);
}
@@ -226,7 +234,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
+ AMDGPU_GEM_CREATE_ENCRYPTED))
return -EINVAL;
@@ -234,6 +243,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
return -EINVAL;
+ if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
+ DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
+ return -EINVAL;
+ }
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -854,7 +868,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
- seq_printf(m, " imported from %p", dma_buf);
+ seq_printf(m, " imported from %p%s", dma_buf,
+ attachment->peer2peer ? " P2P" : "");
else if (dma_buf)
seq_printf(m, " exported as %p", dma_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 6b9c9193cdfa..d612033a23ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -48,7 +48,7 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
return bit;
}
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue)
{
*queue = bit % adev->gfx.mec.num_queue_per_pipe;
@@ -274,7 +274,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
continue;
- amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+ amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
/*
* 1. Using pipes 2/3 from MEC 2 seems cause problems.
@@ -304,10 +304,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
- if (r)
- return r;
-
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -318,9 +314,11 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
return r;
ring->eop_gpu_addr = kiq->eop_gpu_addr;
+ ring->no_scheduler = true;
sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
r = amdgpu_ring_init(adev, ring, 1024,
- irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
+ irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
@@ -329,7 +327,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
- amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
amdgpu_ring_fini(ring);
}
@@ -488,6 +485,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
return amdgpu_ring_test_helper(kiq_ring);
}
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+ int queue_bit)
+{
+ int mec, pipe, queue;
+ int set_resource_bit = 0;
+
+ amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+
+ set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
+
+ return set_resource_bit;
+}
+
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -510,7 +520,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
break;
}
- queue_mask |= (1ull << i);
+ queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
}
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
@@ -670,16 +680,23 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r, cnt = 0;
unsigned long flags;
- uint32_t seq;
+ uint32_t seq, reg_val_offs = 0, value = 0;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+ pr_err("critical bug! too many kiq readers\n");
+ goto failed_unlock;
+ }
amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -705,9 +722,18 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
if (cnt > MAX_KIQ_REG_TRY)
goto failed_kiq_read;
- return adev->wb.wb[kiq->reg_val_offs];
+ mb();
+ value = adev->wb.wb[reg_val_offs];
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ return value;
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_read:
+ if (reg_val_offs)
+ amdgpu_device_wb_free(adev, reg_val_offs);
pr_err("failed to read reg:%x\n", reg);
return ~0;
}
@@ -725,7 +751,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -754,6 +783,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
return;
+failed_undo:
+ amdgpu_ring_undo(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_write:
pr_err("failed to write reg:%x\n", reg);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 5825692d07e4..d43c11671a38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -103,7 +103,6 @@ struct amdgpu_kiq {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
const struct kiq_pm4_funcs *pmf;
- uint32_t reg_val_offs;
};
/*
@@ -286,13 +285,8 @@ struct amdgpu_gfx {
bool me_fw_write_wait;
bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
- struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS];
- uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
- struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
- struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
- uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
@@ -370,7 +364,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
int pipe, int queue);
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 5884ab590486..acabb57aa8af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -136,8 +136,8 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
/**
* amdgpu_gmc_vram_location - try to find VRAM location
*
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
* @base: base address at which to put VRAM
*
* Function will try to place VRAM at base address provided
@@ -165,8 +165,8 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
/**
* amdgpu_gmc_gart_location - try to find GART location
*
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
*
* Function will place try to place GART before or after VRAM.
*
@@ -207,8 +207,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
/**
* amdgpu_gmc_agp_location - try to find AGP location
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
*
* Function will place try to find a place for the AGP BAR in the MC address
* space.
@@ -373,3 +373,38 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
+
+/**
+ * amdgpu_tmz_set -- check and set if a device supports TMZ
+ * @adev: amdgpu_device pointer
+ *
+ * Check and set if an the device @adev supports Trusted Memory
+ * Zones (TMZ).
+ */
+void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ /* Don't enable it by default yet.
+ */
+ if (amdgpu_tmz < 1) {
+ adev->gmc.tmz_enabled = false;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
+ } else {
+ adev->gmc.tmz_enabled = true;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
+ }
+ break;
+ default:
+ adev->gmc.tmz_enabled = false;
+ dev_warn(adev->dev,
+ "Trusted Memory Zone (TMZ) feature not supported\n");
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 7546da0cc70c..2bd9423c1dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -213,6 +213,8 @@ struct amdgpu_gmc {
} fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
+ bool tmz_enabled;
+
const struct amdgpu_gmc_funcs *gmc_funcs;
struct amdgpu_xgmi xgmi;
@@ -276,4 +278,6 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
+extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ccbd7acfc4cb..b91853fd66d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -61,12 +61,13 @@
* Returns 0 on success, error on failure.
*/
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size, struct amdgpu_ib *ib)
+ unsigned size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_ib *ib)
{
int r;
if (size) {
- r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
+ r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
&ib->sa_bo, size, 256);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -131,6 +132,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
uint64_t fence_ctx;
uint32_t status = 0, alloc_size;
unsigned fence_flags = 0;
+ bool secure;
unsigned i;
int r = 0;
@@ -159,6 +161,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
+ if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
+ (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
+ dev_err(adev->dev, "secure submissions not supported on compute rings\n");
+ return -EINVAL;
+ }
+
alloc_size = ring->funcs->emit_frame_size + num_ibs *
ring->funcs->emit_ib_size;
@@ -181,6 +189,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dma_fence_put(tmp);
}
+ if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
+ ring->funcs->emit_mem_sync(ring);
+
if (ring->funcs->insert_start)
ring->funcs->insert_start(ring);
@@ -215,6 +226,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
amdgpu_ring_emit_cntxcntl(ring, status);
}
+ /* Setup initial TMZiness and send it off.
+ */
+ secure = false;
+ if (job && ring->funcs->emit_frame_cntl) {
+ secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
+ amdgpu_ring_emit_frame_cntl(ring, true, secure);
+ }
+
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
@@ -226,12 +245,20 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
+ if (job && ring->funcs->emit_frame_cntl) {
+ if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
+ amdgpu_ring_emit_frame_cntl(ring, false, secure);
+ secure = !secure;
+ amdgpu_ring_emit_frame_cntl(ring, true, secure);
+ }
+ }
+
amdgpu_ring_emit_ib(ring, job, ib, status);
status &= ~AMDGPU_HAVE_CTX_SWITCH;
}
- if (ring->funcs->emit_tmz)
- amdgpu_ring_emit_tmz(ring, false);
+ if (job && ring->funcs->emit_frame_cntl)
+ amdgpu_ring_emit_frame_cntl(ring, false, secure);
#ifdef CONFIG_X86_64
if (!(adev->flags & AMD_IS_APU))
@@ -280,22 +307,32 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{
- int r;
+ unsigned size;
+ int r, i;
- if (adev->ib_pool_ready) {
+ if (adev->ib_pool_ready)
return 0;
- }
- r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
- AMDGPU_IB_POOL_SIZE*64*1024,
- AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT);
- if (r) {
- return r;
- }
+ for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
+ if (i == AMDGPU_IB_POOL_DIRECT)
+ size = PAGE_SIZE * 2;
+ else
+ size = AMDGPU_IB_POOL_SIZE;
+
+ r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
+ size, AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto error;
+ }
adev->ib_pool_ready = true;
return 0;
+
+error:
+ while (i--)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ return r;
}
/**
@@ -308,10 +345,14 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
*/
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
{
- if (adev->ib_pool_ready) {
- amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
- adev->ib_pool_ready = false;
- }
+ int i;
+
+ if (!adev->ib_pool_ready)
+ return;
+
+ for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ adev->ib_pool_ready = false;
}
/**
@@ -326,9 +367,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
*/
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
- unsigned i;
- int r, ret = 0;
long tmo_gfx, tmo_mm;
+ int r, ret = 0;
+ unsigned i;
tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
if (amdgpu_sriov_vf(adev)) {
@@ -406,10 +447,16 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
+ seq_printf(m, "--------------------- DELAYED --------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
+ m);
+ seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
+ m);
+ seq_printf(m, "--------------------- DIRECT ---------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
return 0;
-
}
static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 3a67f6c046d4..fe92dcd94d4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
- if ((*id)->owner != vm->direct.fence_context ||
+ if ((*id)->owner != vm->immediate.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
- if ((*id)->owner != vm->direct.fence_context)
+ if ((*id)->owner != vm->immediate.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -448,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->direct.fence_context;
+ id->owner = vm->immediate.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 5ed4227f304b..0cc4c67f95f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -260,7 +260,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
if (nvec > 0) {
adev->irq.msi_enabled = true;
- dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
+ dev_dbg(adev->dev, "using MSI/MSI-X.\n");
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4981e443a884..47207188c569 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -33,6 +33,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
struct amdgpu_task_info ti;
+ struct amdgpu_device *adev = ring->adev;
memset(&ti, 0, sizeof(struct amdgpu_task_info));
@@ -49,10 +50,13 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
ti.process_name, ti.tgid, ti.task_name, ti.pid);
- if (amdgpu_device_should_recover_gpu(ring->adev))
+ if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
- else
+ } else {
drm_sched_suspend_timeout(&ring->sched);
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.tdr_debug = true;
+ }
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -87,7 +91,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job)
+ enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job)
{
int r;
@@ -95,7 +100,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
if (r)
return r;
- r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+ r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
if (r)
kfree(*job);
@@ -140,7 +145,6 @@ void amdgpu_job_free(struct amdgpu_job *job)
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
- enum drm_sched_priority priority;
int r;
if (!f)
@@ -152,7 +156,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 3f7b8433d179..81caac9b958a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -38,6 +38,7 @@
#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
struct amdgpu_fence;
+enum amdgpu_ib_pool_type;
struct amdgpu_job {
struct drm_sched_job base;
@@ -61,14 +62,12 @@ struct amdgpu_job {
/* user fence handling */
uint64_t uf_addr;
uint64_t uf_sequence;
-
};
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job);
-
+ enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 5727f00afc8e..d31d65e6b039 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -144,7 +144,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
const unsigned ib_size_dw = 16;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index bd9ef9cc86de..5131a0a1bc8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -43,8 +43,6 @@ struct amdgpu_jpeg {
uint8_t num_jpeg_inst;
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
struct amdgpu_jpeg_reg internal;
- struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
- uint32_t num_jpeg_sched;
unsigned harvest_config;
struct delayed_work idle_work;
enum amd_powergating_state cur_state;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index fd1dc3236eca..d7e17e34fee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -183,18 +183,18 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
- if (!r) {
- acpi_status = amdgpu_acpi_init(adev);
- if (acpi_status)
- dev_dbg(&dev->pdev->dev,
- "Error during ACPI methods call\n");
- }
+
+ acpi_status = amdgpu_acpi_init(adev);
+ if (acpi_status)
+ dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
if (adev->runpm) {
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
+ /* only need to skip on ATPX */
+ if (amdgpu_device_supports_boco(dev) &&
+ !amdgpu_is_atpx_hybrid())
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
- pm_runtime_set_active(dev->dev);
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 919bd566ba3c..edaac242ff85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -77,7 +77,6 @@ struct amdgpu_nbio_funcs {
u32 *flags);
void (*ih_control)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
- void (*detect_hw_virt)(struct amdgpu_device *adev);
void (*remap_hdp_registers)(struct amdgpu_device *adev);
void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index c687f5415b3f..3d822eba9a5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -753,7 +753,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
amdgpu_bo_size(shadow), NULL, fence,
- true, false);
+ true, false, false);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 5e39ecd8cc28..7d41f7b9a340 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -229,6 +229,17 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
+/**
+ * amdgpu_bo_encrypted - test if the BO is encrypted
+ * @bo: pointer to a buffer object
+ *
+ * Return true if the buffer object is encrypted, false otherwise.
+ */
+static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
+{
+ return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
+}
+
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index abe94a55ecad..d7646cbce346 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -154,18 +154,15 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
*
*/
-static ssize_t amdgpu_get_dpm_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -189,19 +186,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
}
-static ssize_t amdgpu_set_dpm_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -294,18 +288,15 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
*
*/
-static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -332,10 +323,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
"unknown");
}
-static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
@@ -343,9 +334,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -383,6 +371,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
return count;
}
+ if (adev->asic_type == CHIP_RAVEN) {
+ if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
+ if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, false);
+ else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, true);
+ }
+ }
+
/* profile_exit setting is valid only when current mode is in profile mode */
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
@@ -444,8 +441,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
ret = smu_get_power_num_states(&adev->smu, &data);
if (ret)
return ret;
- } else if (adev->powerplay.pp_funcs->get_pp_num_states)
+ } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
amdgpu_dpm_get_pp_num_states(adev, &data);
+ } else {
+ memset(&data, 0, sizeof(data));
+ }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -472,9 +472,6 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -511,9 +508,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -531,9 +525,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
else if (is_support_sw_smu(adev))
@@ -589,9 +580,6 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -631,9 +619,6 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -736,9 +721,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (count > 127)
return -EINVAL;
@@ -828,9 +810,6 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -870,19 +849,16 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
* the corresponding bit from original ppfeature masks and input the
* new ppfeature masks.
*/
-static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t amdgpu_set_pp_features(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint64_t featuremask;
int ret;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
@@ -914,18 +890,15 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
return count;
}
-static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_pp_features(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -982,9 +955,6 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1048,9 +1018,6 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1082,9 +1049,6 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1112,9 +1076,6 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
uint32_t mask = 0;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1146,9 +1107,6 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1176,9 +1134,6 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1212,9 +1167,6 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1242,9 +1194,6 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1278,9 +1227,6 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1308,9 +1254,6 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1344,9 +1287,6 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1374,9 +1314,6 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1410,9 +1347,6 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1438,9 +1372,6 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1479,9 +1410,6 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1507,9 +1435,6 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_sriov_vf(adev))
- return 0;
-
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1568,9 +1493,6 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1612,9 +1534,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
if (ret)
return -EINVAL;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (count < 2 || count > 127)
return -EINVAL;
@@ -1660,17 +1579,14 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
* The SMU firmware computes a percentage of load based on the
* aggregate activity level in the IP cores.
*/
-static ssize_t amdgpu_get_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
return r;
@@ -1696,17 +1612,14 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
* The SMU firmware computes a percentage of load based on the
* aggregate activity level in the IP cores.
*/
-static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
return r;
@@ -1742,11 +1655,14 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- uint64_t count0, count1;
+ uint64_t count0 = 0, count1 = 0;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->flags & AMD_IS_APU)
+ return -ENODATA;
+
+ if (!adev->asic_funcs->get_pcie_usage)
+ return -ENODATA;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1778,66 +1694,191 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
if (adev->unique_id)
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
return 0;
}
-static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
-static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
- amdgpu_get_dpm_forced_performance_level,
- amdgpu_set_dpm_forced_performance_level);
-static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
-static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
-static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_force_state,
- amdgpu_set_pp_force_state);
-static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_table,
- amdgpu_set_pp_table);
-static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_sclk,
- amdgpu_set_pp_dpm_sclk);
-static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_mclk,
- amdgpu_set_pp_dpm_mclk);
-static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_socclk,
- amdgpu_set_pp_dpm_socclk);
-static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_fclk,
- amdgpu_set_pp_dpm_fclk);
-static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_dcefclk,
- amdgpu_set_pp_dpm_dcefclk);
-static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_pcie,
- amdgpu_set_pp_dpm_pcie);
-static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_sclk_od,
- amdgpu_set_pp_sclk_od);
-static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_mclk_od,
- amdgpu_set_pp_mclk_od);
-static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_power_profile_mode,
- amdgpu_set_pp_power_profile_mode);
-static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_od_clk_voltage,
- amdgpu_set_pp_od_clk_voltage);
-static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
- amdgpu_get_busy_percent, NULL);
-static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
- amdgpu_get_memory_busy_percent, NULL);
-static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
-static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_feature_status,
- amdgpu_set_pp_feature_status);
-static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
+static struct amdgpu_device_attr amdgpu_device_attrs[] = {
+ AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
+};
+
+static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ const char *attr_name = dev_attr->attr.name;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ enum amd_asic_type asic_type = adev->asic_type;
+
+ if (!(attr->flags & mask)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
+
+ if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
+ if (asic_type < CHIP_VEGA10)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
+ if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ if (asic_type < CHIP_VEGA20)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+ if (asic_type == CHIP_ARCTURUS)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ *states = ATTR_STATE_SUPPORTED;
+ } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
+ if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pcie_bw)) {
+ /* PCIe Perf counters won't work on APU nodes */
+ if (adev->flags & AMD_IS_APU)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(unique_id)) {
+ if (!adev->unique_id)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_features)) {
+ if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
+ *states = ATTR_STATE_UNSUPPORTED;
+ }
+
+ if (asic_type == CHIP_ARCTURUS) {
+ /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+ if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
+ DEVICE_ATTR_IS(pp_dpm_socclk) ||
+ DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+ }
+
+#undef DEVICE_ATTR_IS
+
+ return 0;
+}
+
+
+static int amdgpu_device_attr_create(struct amdgpu_device *adev,
+ struct amdgpu_device_attr *attr,
+ uint32_t mask, struct list_head *attr_list)
+{
+ int ret = 0;
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ const char *name = dev_attr->attr.name;
+ enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
+ struct amdgpu_device_attr_entry *attr_entry;
+
+ int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
+
+ BUG_ON(!attr);
+
+ attr_update = attr->attr_update ? attr_update : default_attr_update;
+
+ ret = attr_update(adev, attr, mask, &attr_states);
+ if (ret) {
+ dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
+ name, ret);
+ return ret;
+ }
+
+ if (attr_states == ATTR_STATE_UNSUPPORTED)
+ return 0;
+
+ ret = device_create_file(adev->dev, dev_attr);
+ if (ret) {
+ dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
+ name, ret);
+ }
+
+ attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
+ if (!attr_entry)
+ return -ENOMEM;
+
+ attr_entry->attr = attr;
+ INIT_LIST_HEAD(&attr_entry->entry);
+
+ list_add_tail(&attr_entry->entry, attr_list);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
+{
+ struct device_attribute *dev_attr = &attr->dev_attr;
+
+ device_remove_file(adev->dev, dev_attr);
+}
+
+static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
+ struct list_head *attr_list);
+
+static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
+ struct amdgpu_device_attr *attrs,
+ uint32_t counts,
+ uint32_t mask,
+ struct list_head *attr_list)
+{
+ int ret = 0;
+ uint32_t i = 0;
+
+ for (i = 0; i < counts; i++) {
+ ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
+ if (ret)
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ amdgpu_device_attr_remove_groups(adev, attr_list);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
+ struct list_head *attr_list)
+{
+ struct amdgpu_device_attr_entry *entry, *entry_tmp;
+
+ if (list_empty(attr_list))
+ return ;
+
+ list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
+ amdgpu_device_attr_remove(adev, entry->attr);
+ list_del(&entry->entry);
+ kfree(entry);
+ }
+}
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -3238,8 +3279,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
+ uint32_t mask = 0;
if (adev->pm.sysfs_initialized)
return 0;
@@ -3247,6 +3288,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled == 0)
return 0;
+ INIT_LIST_HEAD(&adev->pm.pm_attr_list);
+
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
hwmon_groups);
@@ -3257,160 +3300,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
-
-
- ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
- if (ret) {
- DRM_ERROR("failed to create device file pp_num_states\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_cur_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_force_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_table);
- if (ret) {
- DRM_ERROR("failed to create device file pp_table\n");
- return ret;
- }
-
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
-
- /* Arcturus does not support standalone mclk/socclk/fclk level setting */
- if (adev->asic_type == CHIP_ARCTURUS) {
- dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
- dev_attr_pp_dpm_mclk.store = NULL;
-
- dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
- dev_attr_pp_dpm_socclk.store = NULL;
-
- dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
- dev_attr_pp_dpm_fclk.store = NULL;
+ switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
+ case SRIOV_VF_MODE_ONE_VF:
+ mask = ATTR_FLAG_ONEVF;
+ break;
+ case SRIOV_VF_MODE_MULTI_VF:
+ mask = 0;
+ break;
+ case SRIOV_VF_MODE_BARE_METAL:
+ default:
+ mask = ATTR_FLAG_MASK_ALL;
+ break;
}
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
- if (adev->asic_type >= CHIP_VEGA10) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_socclk\n");
- return ret;
- }
- if (adev->asic_type != CHIP_ARCTURUS) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
- return ret;
- }
- }
- }
- if (adev->asic_type >= CHIP_VEGA20) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_fclk\n");
- return ret;
- }
- }
- if (adev->asic_type != CHIP_ARCTURUS) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_pcie\n");
- return ret;
- }
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
- if (ret) {
- DRM_ERROR("failed to create device file pp_sclk_od\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
- if (ret) {
- DRM_ERROR("failed to create device file pp_mclk_od\n");
- return ret;
- }
- ret = device_create_file(adev->dev,
- &dev_attr_pp_power_profile_mode);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_power_profile_mode\n");
- return ret;
- }
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
- ret = device_create_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_od_clk_voltage\n");
- return ret;
- }
- }
- ret = device_create_file(adev->dev,
- &dev_attr_gpu_busy_percent);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "gpu_busy_level\n");
- return ret;
- }
- /* APU does not have its own dedicated memory */
- if (!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type != CHIP_VEGA10)) {
- ret = device_create_file(adev->dev,
- &dev_attr_mem_busy_percent);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "mem_busy_percent\n");
- return ret;
- }
- }
- /* PCIe Perf counters won't work on APU nodes */
- if (!(adev->flags & AMD_IS_APU)) {
- ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
- if (ret) {
- DRM_ERROR("failed to create device file pcie_bw\n");
- return ret;
- }
- }
- if (adev->unique_id)
- ret = device_create_file(adev->dev, &dev_attr_unique_id);
- if (ret) {
- DRM_ERROR("failed to create device file unique_id\n");
+ ret = amdgpu_device_attr_create_groups(adev,
+ amdgpu_device_attrs,
+ ARRAY_SIZE(amdgpu_device_attrs),
+ mask,
+ &adev->pm.pm_attr_list);
+ if (ret)
return ret;
- }
-
- if ((adev->asic_type >= CHIP_VEGA10) &&
- !(adev->flags & AMD_IS_APU)) {
- ret = device_create_file(adev->dev,
- &dev_attr_pp_features);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_features\n");
- return ret;
- }
- }
adev->pm.sysfs_initialized = true;
@@ -3419,51 +3328,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
if (adev->pm.dpm_enabled == 0)
return;
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
- device_remove_file(adev->dev, &dev_attr_power_dpm_state);
- device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
-
- device_remove_file(adev->dev, &dev_attr_pp_num_states);
- device_remove_file(adev->dev, &dev_attr_pp_cur_state);
- device_remove_file(adev->dev, &dev_attr_pp_force_state);
- device_remove_file(adev->dev, &dev_attr_pp_table);
-
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (adev->asic_type >= CHIP_VEGA10) {
- device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
- if (adev->asic_type != CHIP_ARCTURUS)
- device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
- }
- if (adev->asic_type != CHIP_ARCTURUS)
- device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (adev->asic_type >= CHIP_VEGA20)
- device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
- device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
- device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
- device_remove_file(adev->dev,
- &dev_attr_pp_power_profile_mode);
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
- device_remove_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
- if (!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type != CHIP_VEGA10))
- device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
- if (!(adev->flags & AMD_IS_APU))
- device_remove_file(adev->dev, &dev_attr_pcie_bw);
- if (adev->unique_id)
- device_remove_file(adev->dev, &dev_attr_unique_id);
- if ((adev->asic_type >= CHIP_VEGA10) &&
- !(adev->flags & AMD_IS_APU))
- device_remove_file(adev->dev, &dev_attr_pp_features);
+
+ amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index 5db0ef86e84c..d9ae2b49a402 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -30,6 +30,55 @@ struct cg_flag_name
const char *name;
};
+enum amdgpu_device_attr_flags {
+ ATTR_FLAG_BASIC = (1 << 0),
+ ATTR_FLAG_ONEVF = (1 << 16),
+};
+
+#define ATTR_FLAG_TYPE_MASK (0x0000ffff)
+#define ATTR_FLAG_MODE_MASK (0xffff0000)
+#define ATTR_FLAG_MASK_ALL (0xffffffff)
+
+enum amdgpu_device_attr_states {
+ ATTR_STATE_UNSUPPORTED = 0,
+ ATTR_STATE_SUPPORTED,
+};
+
+struct amdgpu_device_attr {
+ struct device_attribute dev_attr;
+ enum amdgpu_device_attr_flags flags;
+ int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states);
+
+};
+
+struct amdgpu_device_attr_entry {
+ struct list_head entry;
+ struct amdgpu_device_attr *attr;
+};
+
+#define to_amdgpu_device_attr(_dev_attr) \
+ container_of(_dev_attr, struct amdgpu_device_attr, dev_attr)
+
+#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .flags = _flags, \
+ ##__VA_ARGS__, }
+
+#define AMDGPU_DEVICE_ATTR(_name, _mode, _flags, ...) \
+ __AMDGPU_DEVICE_ATTR(_name, _mode, \
+ amdgpu_get_##_name, amdgpu_set_##_name, \
+ _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RW(_name, _flags, ...) \
+ AMDGPU_DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
+ _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RO(_name, _flags, ...) \
+ __AMDGPU_DEVICE_ATTR(_name, S_IRUGO, \
+ amdgpu_get_##_name, NULL, \
+ _flags, ##__VA_ARGS__)
+
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index deaa26808841..7301fdcfb8bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -37,11 +37,11 @@
#include "amdgpu_ras.h"
-static void psp_set_funcs(struct amdgpu_device *adev);
-
static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
+static int psp_load_smu_fw(struct psp_context *psp);
+
/*
* Due to DF Cstate management centralized to PMFW, the firmware
* loading sequence will be updated as below:
@@ -80,8 +80,6 @@ static int psp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- psp_set_funcs(adev);
-
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -201,6 +199,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
int index;
int timeout = 2000;
bool ras_intr = false;
+ bool skip_unsupport = false;
mutex_lock(&psp->mutex);
@@ -232,6 +231,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
+ /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */
+ skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev);
+
/* In some cases, psp response status is not 0 even there is no
* problem while the command is submitted. Some version of PSP FW
* doesn't write 0 to that field.
@@ -239,7 +241,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
+ if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
@@ -268,7 +270,7 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
- if (psp_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
else
cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
@@ -662,6 +664,121 @@ int psp_xgmi_initialize(struct psp_context *psp)
return ret;
}
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+ /* Invoke xgmi ta to get hive id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return ret;
+
+ *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+ return 0;
+}
+
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+ /* Invoke xgmi ta to get the node id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return ret;
+
+ *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+ return 0;
+}
+
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+ int i;
+ int ret;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ /* Fill in the shared memory with topology information as input */
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to get the topology information */
+ ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+ if (ret)
+ return ret;
+
+ /* Read the output topology information from the shared memory */
+ topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+ topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+ for (i = 0; i < topology->num_nodes; i++) {
+ topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+ topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+ topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+ topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+ }
+
+ return 0;
+}
+
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ int i;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = 1;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to set topology information */
+ return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
+}
+
// ras begin
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
@@ -744,13 +861,40 @@ static int psp_ras_unload(struct psp_context *psp)
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+ ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret;
+
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
+ {
+ DRM_WARN("RAS: Unsupported Interface");
+ return -EINVAL;
+ }
+
+ if (!ret) {
+ if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+ dev_warn(psp->adev->dev, "ECC switch disabled\n");
+
+ ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
+ }
+ else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ dev_warn(psp->adev->dev,
+ "RAS internal register access blocked\n");
+ }
+
+ return ret;
}
int psp_ras_enable_features(struct psp_context *psp,
@@ -834,6 +978,33 @@ static int psp_ras_initialize(struct psp_context *psp)
return 0;
}
+
+int psp_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
+ ras_cmd->ras_in_message.trigger_error = *info;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ /* If err_event_athub occurs error inject was successful, however
+ return status from TA is no long reliable */
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ return ras_cmd->ras_status;
+}
// ras end
// HDCP start
@@ -884,6 +1055,7 @@ static int psp_hdcp_load(struct psp_context *psp)
if (!ret) {
psp->hdcp_context.hdcp_initialized = true;
psp->hdcp_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->hdcp_context.mutex);
}
kfree(cmd);
@@ -1029,6 +1201,7 @@ static int psp_dtm_load(struct psp_context *psp)
if (!ret) {
psp->dtm_context.dtm_initialized = true;
psp->dtm_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->dtm_context.mutex);
}
kfree(cmd);
@@ -1169,16 +1342,20 @@ static int psp_hw_start(struct psp_context *psp)
}
/*
- * For those ASICs with DF Cstate management centralized
+ * For ASICs with DF Cstate management centralized
* to PMFW, TMR setup should be performed after PMFW
* loaded and before other non-psp firmware loaded.
*/
- if (!psp->pmfw_centralized_cstate_management) {
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
+ if (psp->pmfw_centralized_cstate_management) {
+ ret = psp_load_smu_fw(psp);
+ if (ret)
return ret;
- }
+ }
+
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
}
return 0;
@@ -1355,7 +1532,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
}
static int psp_execute_np_fw_load(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode)
+ struct amdgpu_firmware_info *ucode)
{
int ret = 0;
@@ -1369,64 +1546,96 @@ static int psp_execute_np_fw_load(struct psp_context *psp,
return ret;
}
+static int psp_load_smu_fw(struct psp_context *psp)
+{
+ int ret;
+ struct amdgpu_device* adev = psp->adev;
+ struct amdgpu_firmware_info *ucode =
+ &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ struct amdgpu_ras *ras = psp->ras.ras;
+
+ if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+
+ if (adev->in_gpu_reset && ras && ras->supported) {
+ ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
+ if (ret) {
+ DRM_WARN("Failed to set MP1 state prepare for reload\n");
+ }
+ }
+
+ ret = psp_execute_np_fw_load(psp, ucode);
+
+ if (ret)
+ DRM_ERROR("PSP load smu failed!\n");
+
+ return ret;
+}
+
+static bool fw_load_skip_check(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode)
+{
+ if (!ucode->fw)
+ return true;
+
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
+ (psp_smu_reload_quirk(psp) ||
+ psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management))
+ return true;
+
+ if (amdgpu_sriov_vf(psp->adev) &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
+ /*skip ucode loading in SRIOV VF */
+ return true;
+
+ if (psp->autoload_supported &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
+ ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
+ /* skip mec JT when autoload is enabled */
+ return true;
+
+ return false;
+}
+
static int psp_np_fw_load(struct psp_context *psp)
{
int i, ret;
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
- if (psp->autoload_supported ||
- psp->pmfw_centralized_cstate_management) {
- ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
- if (!ucode->fw || amdgpu_sriov_vf(adev))
- goto out;
-
- ret = psp_execute_np_fw_load(psp, ucode);
+ if (psp->autoload_supported &&
+ !psp->pmfw_centralized_cstate_management) {
+ ret = psp_load_smu_fw(psp);
if (ret)
return ret;
}
- if (psp->pmfw_centralized_cstate_management) {
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
- return ret;
- }
- }
-
-out:
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
- if (!ucode->fw)
- continue;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
- (psp_smu_reload_quirk(psp) ||
- psp->autoload_supported ||
- psp->pmfw_centralized_cstate_management))
- continue;
-
- if (amdgpu_sriov_vf(adev) &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
- /*skip ucode loading in SRIOV VF */
+ !fw_load_skip_check(psp, ucode)) {
+ ret = psp_load_smu_fw(psp);
+ if (ret)
+ return ret;
continue;
+ }
- if (psp->autoload_supported &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
- /* skip mec JT when autoload is enabled */
+ if (fw_load_skip_check(psp, ucode))
continue;
psp_print_fw_hdr(psp, ucode);
@@ -1438,17 +1647,12 @@ out:
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
- ret = psp_rlc_autoload(psp);
+ ret = psp_rlc_autoload_start(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
return ret;
}
}
-#if 0
- /* check if firmware loaded sucessfully */
- if (!amdgpu_psp_check_fw_loading_status(adev, i))
- return -EINVAL;
-#endif
}
return 0;
@@ -1806,19 +2010,110 @@ int psp_ring_cmd_submit(struct psp_context *psp,
return 0;
}
-static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
- enum AMDGPU_UCODE_ID ucode_type)
+int psp_init_asd_microcode(struct psp_context *psp,
+ const char *chip_name)
{
- struct amdgpu_firmware_info *ucode = NULL;
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[30];
+ const struct psp_firmware_header_v1_0 *asd_hdr;
+ int err = 0;
- if (!adev->firmware.fw_size)
- return false;
+ if (!chip_name) {
+ dev_err(adev->dev, "invalid chip name for asd microcode\n");
+ return -EINVAL;
+ }
- ucode = &adev->firmware.ucode[ucode_type];
- if (!ucode->fw || !ucode->ucode_size)
- return false;
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+ return 0;
+out:
+ dev_err(adev->dev, "fail to initialize asd microcode\n");
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
+ return err;
+}
+
+int psp_init_sos_microcode(struct psp_context *psp,
+ const char *chip_name)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[30];
+ const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
+ const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
+ int err = 0;
+
+ if (!chip_name) {
+ dev_err(adev->dev, "invalid chip name for sos microcode\n");
+ return -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
+ err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ if (err)
+ goto out;
+
+ sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
+
+ switch (sos_hdr->header.header_version_major) {
+ case 1:
+ adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+ adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+ adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+ adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
+ adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
+ adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr->sos_offset_bytes);
+ if (sos_hdr->header.header_version_minor == 1) {
+ sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
+ adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
+ adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
+ adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
+ }
+ if (sos_hdr->header.header_version_minor == 2) {
+ sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
+ adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "unsupported psp sos firmware\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(adev->dev,
+ "failed to init sos firmware\n");
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
- return psp_compare_sram_data(&adev->psp, ucode, ucode_type);
+ return err;
}
static int psp_set_clockgating_state(void *handle,
@@ -1957,16 +2252,6 @@ static void psp_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
}
-static const struct amdgpu_psp_funcs psp_funcs = {
- .check_fw_loading_status = psp_check_fw_loading_status,
-};
-
-static void psp_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->firmware.funcs)
- adev->firmware.funcs = &psp_funcs;
-}
-
const struct amdgpu_ip_block_version psp_v3_1_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_PSP,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 297435c0c7c1..2a56ad996d83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -93,22 +93,8 @@ struct psp_funcs
enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
- bool (*compare_sram_data)(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
- int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
- int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
- int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
- bool (*support_vmr_ring)(struct psp_context *psp);
- int (*ras_trigger_error)(struct psp_context *psp,
- struct ta_ras_trigger_error_input *info);
- int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
- int (*rlc_autoload_start)(struct psp_context *psp);
int (*mem_training_init)(struct psp_context *psp);
void (*mem_training_fini)(struct psp_context *psp);
int (*mem_training)(struct psp_context *psp, uint32_t ops);
@@ -161,6 +147,7 @@ struct psp_hdcp_context {
struct amdgpu_bo *hdcp_shared_bo;
uint64_t hdcp_shared_mc_addr;
void *hdcp_shared_buf;
+ struct mutex mutex;
};
struct psp_dtm_context {
@@ -169,6 +156,7 @@ struct psp_dtm_context {
struct amdgpu_bo *dtm_shared_bo;
uint64_t dtm_shared_mc_addr;
void *dtm_shared_buf;
+ struct mutex mutex;
};
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
@@ -306,8 +294,6 @@ struct amdgpu_psp_funcs {
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
-#define psp_compare_sram_data(psp, ucode, type) \
- (psp)->funcs->compare_sram_data((psp), (ucode), (type))
#define psp_init_microcode(psp) \
((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
#define psp_bootloader_load_kdb(psp) \
@@ -318,22 +304,8 @@ struct amdgpu_psp_funcs {
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
-#define psp_support_vmr_ring(psp) \
- ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_node_id(psp, node_id) \
- ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
-#define psp_xgmi_get_hive_id(psp, hive_id) \
- ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
-#define psp_xgmi_get_topology_info(psp, num_device, topology) \
- ((psp)->funcs->xgmi_get_topology_info ? \
- (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_xgmi_set_topology_info(psp, num_device, topology) \
- ((psp)->funcs->xgmi_set_topology_info ? \
- (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_rlc_autoload(psp) \
- ((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
#define psp_mem_training_init(psp) \
((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
#define psp_mem_training_fini(psp) \
@@ -341,15 +313,6 @@ struct amdgpu_psp_funcs {
#define psp_mem_training(psp, ops) \
((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
-#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
-
-#define psp_ras_trigger_error(psp, info) \
- ((psp)->funcs->ras_trigger_error ? \
- (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
-#define psp_ras_cure_posion(psp, addr) \
- ((psp)->funcs->ras_cure_posion ? \
- (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
-
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
@@ -377,10 +340,21 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
int psp_xgmi_initialize(struct psp_context *psp);
int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology);
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
+int psp_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info);
+
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
@@ -393,4 +367,8 @@ int psp_ring_cmd_submit(struct psp_context *psp,
uint64_t cmd_buf_mc_addr,
uint64_t fence_mc_addr,
int index);
+int psp_init_asd_microcode(struct psp_context *psp,
+ const char *chip_name);
+int psp_init_sos_microcode(struct psp_context *psp,
+ const char *chip_name);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ab379b44679c..50fe08bf2f72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -80,6 +80,20 @@ atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ amdgpu_ras_get_context(adev)->error_query_ready = ready;
+}
+
+bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ return amdgpu_ras_get_context(adev)->error_query_ready;
+
+ return false;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -281,8 +295,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
struct ras_debug_if data;
int ret = 0;
- if (amdgpu_ras_intr_triggered()) {
- DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+ if (!amdgpu_ras_get_error_query_ready(adev)) {
+ dev_warn(adev->dev, "RAS WARN: error injection "
+ "currently inaccessible\n");
return size;
}
@@ -310,7 +325,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
/* umc ce/ue error injection for a bad page is not allowed */
if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+ dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
+ "as bad before error injection!\n",
data.inject.address);
break;
}
@@ -399,7 +415,7 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
.head = obj->head,
};
- if (amdgpu_ras_intr_triggered())
+ if (!amdgpu_ras_get_error_query_ready(obj->adev))
return snprintf(buf, PAGE_SIZE,
"Query currently inaccessible\n");
@@ -486,6 +502,29 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
}
/* obj end */
+void amdgpu_ras_parse_status_code(struct amdgpu_device* adev,
+ const char* invoke_type,
+ const char* block_name,
+ enum ta_ras_status ret)
+{
+ switch (ret) {
+ case TA_RAS_STATUS__SUCCESS:
+ return;
+ case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
+ dev_warn(adev->dev,
+ "RAS WARN: %s %s currently unavailable\n",
+ invoke_type,
+ block_name);
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS ERROR: %s %s error failed ret 0x%X\n",
+ invoke_type,
+ block_name,
+ ret);
+ }
+}
+
/* feature ctl begin */
static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
struct ras_common_if *head)
@@ -549,19 +588,23 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- union ta_ras_cmd_input info;
+ union ta_ras_cmd_input *info;
int ret;
if (!con)
return -EINVAL;
+ info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
if (!enable) {
- info.disable_features = (struct ta_ras_disable_features_input) {
+ info->disable_features = (struct ta_ras_disable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
} else {
- info.enable_features = (struct ta_ras_enable_features_input) {
+ info->enable_features = (struct ta_ras_enable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
@@ -570,26 +613,33 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
/* Are we alerady in that state we are going to set? */
- if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
- return 0;
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
+ ret = 0;
+ goto out;
+ }
if (!amdgpu_ras_intr_triggered()) {
- ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
- DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
- enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
+ amdgpu_ras_parse_status_code(adev,
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ (enum ta_ras_status)ret);
if (ret == TA_RAS_STATUS__RESET_NEEDED)
- return -EAGAIN;
- return -EINVAL;
+ ret = -EAGAIN;
+ else
+ ret = -EINVAL;
+
+ goto out;
}
}
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
-
- return 0;
+ ret = 0;
+out:
+ kfree(info);
+ return ret;
}
/* Only used in device probe stage and called only once. */
@@ -618,7 +668,8 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (ret == -EINVAL) {
ret = __amdgpu_ras_feature_enable(adev, head, 1);
if (!ret)
- DRM_INFO("RAS INFO: %s setup object\n",
+ dev_info(adev->dev,
+ "RAS INFO: %s setup object\n",
ras_block_str(head->block));
}
} else {
@@ -744,17 +795,48 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
info->ce_count = obj->err_data.ce_count;
if (err_data.ce_count) {
- dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
- obj->err_data.ce_count, ras_block_str(info->head.block));
+ dev_info(adev->dev, "%ld correctable hardware errors "
+ "detected in %s block, no user "
+ "action is needed.\n",
+ obj->err_data.ce_count,
+ ras_block_str(info->head.block));
}
if (err_data.ue_count) {
- dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
- obj->err_data.ue_count, ras_block_str(info->head.block));
+ dev_info(adev->dev, "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ obj->err_data.ue_count,
+ ras_block_str(info->head.block));
}
return 0;
}
+/* Trigger XGMI/WAFL error */
+int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+ struct ta_ras_trigger_error_input *block_info)
+{
+ int ret;
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to disallow df cstate");
+
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
+ dev_warn(adev->dev, "Failed to disallow XGMI power down");
+
+ ret = psp_ras_trigger_error(&adev->psp, block_info);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret;
+
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+ dev_warn(adev->dev, "Failed to allow XGMI power down");
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to allow df cstate");
+
+ return ret;
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -788,20 +870,22 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break;
case AMDGPU_RAS_BLOCK__UMC:
case AMDGPU_RAS_BLOCK__MMHUB:
- case AMDGPU_RAS_BLOCK__XGMI_WAFL:
case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
+ break;
default:
- DRM_INFO("%s error injection is not supported yet\n",
+ dev_info(adev->dev, "%s error injection is not supported yet\n",
ras_block_str(info->head.block));
ret = -EINVAL;
}
- if (ret)
- DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
- ras_block_str(info->head.block),
- ret);
+ amdgpu_ras_parse_status_code(adev,
+ "inject",
+ ras_block_str(info->head.block),
+ (enum ta_ras_status)ret);
return ret;
}
@@ -1430,9 +1514,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
/* Build list of devices to query RAS related errors */
- if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
device_list_handle = &hive->device_list;
- } else {
+ else {
+ INIT_LIST_HEAD(&device_list);
list_add_tail(&adev->gmc.xgmi.head, &device_list);
device_list_handle = &device_list;
}
@@ -1535,7 +1620,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
&data->bps[control->num_recs],
true,
save_count)) {
- DRM_ERROR("Failed to save EEPROM table data!");
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
@@ -1563,7 +1648,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
if (amdgpu_ras_eeprom_process_recods(control, bps, false,
control->num_recs)) {
- DRM_ERROR("Failed to load EEPROM table records!");
+ dev_err(adev->dev, "Failed to load EEPROM table records!");
ret = -EIO;
goto out;
}
@@ -1637,7 +1722,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL))
- DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ dev_warn(adev->dev, "RAS WARN: reserve vram for "
+ "retired page %llx fail\n", bp);
data->bps_bo[i] = bo;
data->last_reserved = i + 1;
@@ -1725,7 +1811,7 @@ free:
kfree(*data);
con->eh_data = NULL;
out:
- DRM_WARN("Failed to initialize ras recovery!\n");
+ dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
return ret;
}
@@ -1787,18 +1873,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
return;
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
- DRM_INFO("HBM ECC is active.\n");
+ dev_info(adev->dev, "HBM ECC is active.\n");
*hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
} else
- DRM_INFO("HBM ECC is not presented.\n");
+ dev_info(adev->dev, "HBM ECC is not presented.\n");
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
- DRM_INFO("SRAM ECC is active.\n");
+ dev_info(adev->dev, "SRAM ECC is active.\n");
*hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
} else
- DRM_INFO("SRAM ECC is not presented.\n");
+ dev_info(adev->dev, "SRAM ECC is not presented.\n");
/* hw_supported needs to be aligned with RAS block mask. */
*hw_supported &= AMDGPU_RAS_BLOCK_MASK;
@@ -1855,7 +1941,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (amdgpu_ras_fs_init(adev))
goto fs_out;
- DRM_INFO("RAS INFO: ras initialized successfully, "
+ dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
@@ -2037,7 +2123,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
return;
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
- DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+ dev_info(adev->dev, "uncorrectable hardware error"
+ "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 55c3eceb390d..e7df5d8429f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -334,6 +334,8 @@ struct amdgpu_ras {
uint32_t flags;
bool reboot;
struct amdgpu_ras_eeprom_control eeprom_control;
+
+ bool error_query_ready;
};
struct ras_fs_data {
@@ -629,4 +631,6 @@ static inline void amdgpu_ras_intr_cleared(void)
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a7e1d0425ed0..13ea8ebc421c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -162,11 +162,13 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned max_dw, struct amdgpu_irq_src *irq_src,
- unsigned irq_type)
+ unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int hw_prio)
{
int r, i;
int sched_hw_submission = amdgpu_sched_hw_submission;
+ u32 *num_sched;
+ u32 hw_ip;
/* Set the hw submission limit higher for KIQ because
* it's used for a number of gfx/compute tasks by both
@@ -258,6 +260,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->priority = DRM_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
+ if (!ring->no_scheduler) {
+ hw_ip = ring->funcs->type;
+ num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+ &ring->sched;
+ }
+
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 9a443013d70d..be218754629a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -30,11 +30,15 @@
/* max number of rings */
#define AMDGPU_MAX_RINGS 28
+#define AMDGPU_MAX_HWIP_RINGS 8
#define AMDGPU_MAX_GFX_RINGS 2
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
#define AMDGPU_MAX_UVD_ENC_RINGS 2
+#define AMDGPU_RING_PRIO_DEFAULT 1
+#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX
+
/* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
#define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
@@ -46,17 +50,30 @@
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
+
enum amdgpu_ring_type {
- AMDGPU_RING_TYPE_GFX,
- AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_RING_TYPE_SDMA,
- AMDGPU_RING_TYPE_UVD,
- AMDGPU_RING_TYPE_VCE,
- AMDGPU_RING_TYPE_KIQ,
- AMDGPU_RING_TYPE_UVD_ENC,
- AMDGPU_RING_TYPE_VCN_DEC,
- AMDGPU_RING_TYPE_VCN_ENC,
- AMDGPU_RING_TYPE_VCN_JPEG
+ AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX,
+ AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE,
+ AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA,
+ AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD,
+ AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE,
+ AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC,
+ AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC,
+ AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC,
+ AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG,
+ AMDGPU_RING_TYPE_KIQ
+};
+
+enum amdgpu_ib_pool_type {
+ /* Normal submissions to the top of the pipeline. */
+ AMDGPU_IB_POOL_DELAYED,
+ /* Immediate submissions to the bottom of the pipeline. */
+ AMDGPU_IB_POOL_IMMEDIATE,
+ /* Direct submission to the ring buffer during init and reset. */
+ AMDGPU_IB_POOL_DIRECT,
+
+ AMDGPU_IB_POOL_MAX
};
struct amdgpu_device;
@@ -65,6 +82,11 @@ struct amdgpu_ib;
struct amdgpu_cs_parser;
struct amdgpu_job;
+struct amdgpu_sched {
+ u32 num_scheds;
+ struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS];
+};
+
/*
* Fences.
*/
@@ -96,7 +118,8 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
unsigned flags);
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+ uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
@@ -159,17 +182,20 @@ struct amdgpu_ring_funcs {
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
- void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
+ void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
- void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+ void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
+ bool secure);
/* Try to soft recover the ring to make the fence signal */
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
+ void (*emit_mem_sync)(struct amdgpu_ring *ring);
};
struct amdgpu_ring {
@@ -214,12 +240,12 @@ struct amdgpu_ring {
unsigned vm_inv_eng;
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
+ bool no_scheduler;
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
- bool has_high_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
@@ -241,11 +267,11 @@ struct amdgpu_ring {
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
-#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
+#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
-#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
+#define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
@@ -257,8 +283,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, struct amdgpu_irq_src *irq_src,
- unsigned irq_type);
+ unsigned int ring_size, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int prio);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t val0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 4b352206354b..e5b8fb8e75c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -61,8 +61,6 @@ struct amdgpu_sdma_ras_funcs {
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
- struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
- uint32_t num_sdma_sched;
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src ecc_irq;
@@ -91,7 +89,8 @@ struct amdgpu_buffer_funcs {
/* dst addr in bytes */
uint64_t dst_offset,
/* number of byte to transfer */
- uint32_t byte_count);
+ uint32_t byte_count,
+ bool tmz);
/* maximum bytes in a single operation */
uint32_t fill_max_bytes;
@@ -109,7 +108,7 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
struct amdgpu_sdma_instance *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index b86392253696..b87ca171986a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -249,6 +249,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
+ /* Never sync to VM updates either. */
+ if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
+ owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ continue;
+
/* Ignore fences depending on the sync mode */
switch (mode) {
case AMDGPU_SYNC_ALWAYS:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b158230af8db..2f4d5ca9894f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -44,7 +44,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
+ n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i])
n -= adev->rings[i]->ring_size;
@@ -124,7 +124,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
- size, NULL, &fence, false, false);
+ size, NULL, &fence, false, false, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -170,7 +170,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
- size, NULL, &fence, false, false);
+ size, NULL, &fence, false, false, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 63e734a125fb..5da20fc166d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -35,7 +35,7 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
@@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
(unsigned long)__entry->value)
);
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6309ff72bd78..9cbecd5ba814 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -62,11 +62,6 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem, unsigned num_pages,
- uint64_t offset, unsigned window,
- struct amdgpu_ring *ring,
- uint64_t *addr);
/**
* amdgpu_init_mem_type - Initialize a memory manager for a specific type of
@@ -277,7 +272,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
*
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
- unsigned long *offset)
+ uint64_t *offset)
{
struct drm_mm_node *mm_node = mem->mm_node;
@@ -289,91 +284,191 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
}
/**
+ * amdgpu_ttm_map_buffer - Map memory into the GART windows
+ * @bo: buffer object to map
+ * @mem: memory object to map
+ * @mm_node: drm_mm node object to map
+ * @num_pages: number of pages to map
+ * @offset: offset into @mm_node where to start
+ * @window: which GART window to use
+ * @ring: DMA ring to use for the copy
+ * @tmz: if we should setup a TMZ enabled mapping
+ * @addr: resulting address inside the MC address space
+ *
+ * Setup one of the GART windows to access a specific piece of memory or return
+ * the physical address for local memory.
+ */
+static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem,
+ struct drm_mm_node *mm_node,
+ unsigned num_pages, uint64_t offset,
+ unsigned window, struct amdgpu_ring *ring,
+ bool tmz, uint64_t *addr)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ unsigned num_dw, num_bytes;
+ struct dma_fence *fence;
+ uint64_t src_addr, dst_addr;
+ void *cpu_addr;
+ uint64_t flags;
+ unsigned int i;
+ int r;
+
+ BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+ AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+ /* Map only what can't be accessed directly */
+ if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
+ *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+ return 0;
+ }
+
+ *addr = adev->gmc.gart_start;
+ *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE;
+ *addr += offset & ~PAGE_MASK;
+
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+ num_bytes = num_pages * 8;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+ AMDGPU_IB_POOL_DELAYED, &job);
+ if (r)
+ return r;
+
+ src_addr = num_dw * 4;
+ src_addr += job->ibs[0].gpu_addr;
+
+ dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+ dst_addr, num_bytes, false);
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
+ if (tmz)
+ flags |= AMDGPU_PTE_TMZ;
+
+ cpu_addr = &job->ibs[0].ptr[num_dw];
+
+ if (mem->mem_type == TTM_PL_TT) {
+ struct ttm_dma_tt *dma;
+ dma_addr_t *dma_address;
+
+ dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+ dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+ r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+ cpu_addr);
+ if (r)
+ goto error_free;
+ } else {
+ dma_addr_t dma_address;
+
+ dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+ dma_address += adev->vm_manager.vram_base_offset;
+
+ for (i = 0; i < num_pages; ++i) {
+ r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
+ &dma_address, flags, cpu_addr);
+ if (r)
+ goto error_free;
+
+ dma_address += PAGE_SIZE;
+ }
+ }
+
+ r = amdgpu_job_submit(job, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ if (r)
+ goto error_free;
+
+ dma_fence_put(fence);
+
+ return r;
+
+error_free:
+ amdgpu_job_free(job);
+ return r;
+}
+
+/**
* amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ * @adev: amdgpu device
+ * @src: buffer/address where to read from
+ * @dst: buffer/address where to write to
+ * @size: number of bytes to copy
+ * @tmz: if a secure copy should be used
+ * @resv: resv object to sync to
+ * @f: Returns the last fence if multiple jobs are submitted.
*
* The function copies @size bytes from {src->mem + src->offset} to
* {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
* move and different for a BO to BO copy.
*
- * @f: Returns the last fence if multiple jobs are submitted.
*/
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- struct amdgpu_copy_mem *src,
- struct amdgpu_copy_mem *dst,
- uint64_t size,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
struct dma_resv *resv,
struct dma_fence **f)
{
+ const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE);
+
+ uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *src_mm, *dst_mm;
- uint64_t src_node_start, dst_node_start, src_node_size,
- dst_node_size, src_page_offset, dst_page_offset;
struct dma_fence *fence = NULL;
int r = 0;
- const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
- AMDGPU_GPU_PAGE_SIZE);
if (!adev->mman.buffer_funcs_enabled) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
- src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
- src->offset;
- src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
- src_page_offset = src_node_start & (PAGE_SIZE - 1);
+ src_offset = src->offset;
+ src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
+ src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
- dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
- dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
- dst->offset;
- dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
- dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+ dst_offset = dst->offset;
+ dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
+ dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
mutex_lock(&adev->mman.gtt_window_lock);
while (size) {
- unsigned long cur_size;
- uint64_t from = src_node_start, to = dst_node_start;
+ uint32_t src_page_offset = src_offset & ~PAGE_MASK;
+ uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
struct dma_fence *next;
+ uint32_t cur_size;
+ uint64_t from, to;
/* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
* begins at an offset, then adjust the size accordingly
*/
- cur_size = min3(min(src_node_size, dst_node_size), size,
- GTT_MAX_BYTES);
- if (cur_size + src_page_offset > GTT_MAX_BYTES ||
- cur_size + dst_page_offset > GTT_MAX_BYTES)
- cur_size -= max(src_page_offset, dst_page_offset);
-
- /* Map only what needs to be accessed. Map src to window 0 and
- * dst to window 1
- */
- if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
- r = amdgpu_map_buffer(src->bo, src->mem,
- PFN_UP(cur_size + src_page_offset),
- src_node_start, 0, ring,
- &from);
- if (r)
- goto error;
- /* Adjust the offset because amdgpu_map_buffer returns
- * start of mapped page
- */
- from += src_page_offset;
- }
+ cur_size = max(src_page_offset, dst_page_offset);
+ cur_size = min(min3(src_node_size, dst_node_size, size),
+ (uint64_t)(GTT_MAX_BYTES - cur_size));
+
+ /* Map src to window 0 and dst to window 1. */
+ r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+ PFN_UP(cur_size + src_page_offset),
+ src_offset, 0, ring, tmz, &from);
+ if (r)
+ goto error;
- if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
- r = amdgpu_map_buffer(dst->bo, dst->mem,
- PFN_UP(cur_size + dst_page_offset),
- dst_node_start, 1, ring,
- &to);
- if (r)
- goto error;
- to += dst_page_offset;
- }
+ r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+ PFN_UP(cur_size + dst_page_offset),
+ dst_offset, 1, ring, tmz, &to);
+ if (r)
+ goto error;
r = amdgpu_copy_buffer(ring, from, to, cur_size,
- resv, &next, false, true);
+ resv, &next, false, true, tmz);
if (r)
goto error;
@@ -386,23 +481,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
src_node_size -= cur_size;
if (!src_node_size) {
- src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
- src->mem);
- src_node_size = (src_mm->size << PAGE_SHIFT);
- src_page_offset = 0;
+ ++src_mm;
+ src_node_size = src_mm->size << PAGE_SHIFT;
+ src_offset = 0;
} else {
- src_node_start += cur_size;
- src_page_offset = src_node_start & (PAGE_SIZE - 1);
+ src_offset += cur_size;
}
+
dst_node_size -= cur_size;
if (!dst_node_size) {
- dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
- dst->mem);
- dst_node_size = (dst_mm->size << PAGE_SHIFT);
- dst_page_offset = 0;
+ ++dst_mm;
+ dst_node_size = dst_mm->size << PAGE_SHIFT;
+ dst_offset = 0;
} else {
- dst_node_start += cur_size;
- dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+ dst_offset += cur_size;
}
}
error:
@@ -425,6 +517,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_copy_mem src, dst;
struct dma_fence *fence = NULL;
int r;
@@ -438,14 +531,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT,
+ amdgpu_bo_encrypted(abo),
bo->base.resv, &fence);
if (r)
goto error;
/* clear the space being freed */
if (old_mem->mem_type == TTM_PL_VRAM &&
- (ttm_to_amdgpu_bo(bo)->flags &
- AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+ (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
@@ -742,8 +835,8 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
+ uint64_t offset = (page_offset << PAGE_SHIFT);
struct drm_mm_node *mm;
- unsigned long offset = (page_offset << PAGE_SHIFT);
mm = amdgpu_find_mm_node(&bo->mem, &offset);
return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
@@ -766,18 +859,6 @@ struct amdgpu_ttm_tt {
};
#ifdef CONFIG_DRM_AMDGPU_USERPTR
-/* flags used by HMM internal, not related to CPU/GPU PTE flags */
-static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
- (1 << 0), /* HMM_PFN_VALID */
- (1 << 1), /* HMM_PFN_WRITE */
-};
-
-static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
- 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
- 0, /* HMM_PFN_NONE */
- 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
-};
-
/**
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
* memory and start HMM tracking CPU page table update
@@ -816,18 +897,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
goto out;
}
range->notifier = &bo->notifier;
- range->flags = hmm_range_flags;
- range->values = hmm_range_values;
- range->pfn_shift = PAGE_SHIFT;
range->start = bo->notifier.interval_tree.start;
range->end = bo->notifier.interval_tree.last + 1;
- range->default_flags = hmm_range_flags[HMM_PFN_VALID];
+ range->default_flags = HMM_PFN_REQ_FAULT;
if (!amdgpu_ttm_tt_is_readonly(ttm))
- range->default_flags |= range->flags[HMM_PFN_WRITE];
+ range->default_flags |= HMM_PFN_REQ_WRITE;
- range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns),
- GFP_KERNEL);
- if (unlikely(!range->pfns)) {
+ range->hmm_pfns = kvmalloc_array(ttm->num_pages,
+ sizeof(*range->hmm_pfns), GFP_KERNEL);
+ if (unlikely(!range->hmm_pfns)) {
r = -ENOMEM;
goto out_free_ranges;
}
@@ -852,27 +930,23 @@ retry:
down_read(&mm->mmap_sem);
r = hmm_range_fault(range);
up_read(&mm->mmap_sem);
- if (unlikely(r <= 0)) {
+ if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from
* mmu_interval_read_retry() as well.
*/
- if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout))
+ if (r == -EBUSY && !time_after(jiffies, timeout))
goto retry;
goto out_free_pfns;
}
- for (i = 0; i < ttm->num_pages; i++) {
- /* FIXME: The pages cannot be touched outside the notifier_lock */
- pages[i] = hmm_device_entry_to_page(range, range->pfns[i]);
- if (unlikely(!pages[i])) {
- pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
- i, range->pfns[i]);
- r = -ENOMEM;
-
- goto out_free_pfns;
- }
- }
+ /*
+ * Due to default_flags, all pages are HMM_PFN_VALID or
+ * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
+ * the notifier_lock, and mmu_interval_read_retry() must be done first.
+ */
+ for (i = 0; i < ttm->num_pages; i++)
+ pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
gtt->range = range;
mmput(mm);
@@ -882,7 +956,7 @@ retry:
out_unlock:
up_read(&mm->mmap_sem);
out_free_pfns:
- kvfree(range->pfns);
+ kvfree(range->hmm_pfns);
out_free_ranges:
kfree(range);
out:
@@ -907,7 +981,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
gtt->userptr, ttm->num_pages);
- WARN_ONCE(!gtt->range || !gtt->range->pfns,
+ WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
"No user pages to check\n");
if (gtt->range) {
@@ -917,7 +991,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
*/
r = mmu_interval_read_retry(gtt->range->notifier,
gtt->range->notifier_seq);
- kvfree(gtt->range->pfns);
+ kvfree(gtt->range->hmm_pfns);
kfree(gtt->range);
gtt->range = NULL;
}
@@ -1008,8 +1082,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (ttm->pages[i] !=
- hmm_device_entry_to_page(gtt->range,
- gtt->range->pfns[i]))
+ hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
break;
}
@@ -1027,6 +1100,9 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
+ if (amdgpu_bo_encrypted(abo))
+ flags |= AMDGPU_PTE_TMZ;
+
if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
uint64_t page_idx = 1;
@@ -1539,6 +1615,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
switch (bo->mem.mem_type) {
case TTM_PL_TT:
+ if (amdgpu_bo_is_amdgpu_bo(bo) &&
+ amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
+ return false;
return true;
case TTM_PL_VRAM:
@@ -1587,8 +1666,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO;
- nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
- pos = (nodes->start << PAGE_SHIFT) + offset;
+ pos = offset;
+ nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
+ pos += (nodes->start << PAGE_SHIFT);
while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3;
@@ -1857,17 +1937,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
/*
- * reserve one TMR (64K) memory at the top of VRAM which holds
+ * reserve TMR memory at the top of VRAM which holds
* IP Discovery data and is protected by PSP.
*/
- r = amdgpu_bo_create_kernel_at(adev,
- adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
- DISCOVERY_TMR_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->discovery_memory,
- NULL);
- if (r)
- return r;
+ if (adev->discovery_tmr_size > 0) {
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->gmc.real_vram_size - adev->discovery_tmr_size,
+ adev->discovery_tmr_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->discovery_memory,
+ NULL);
+ if (r)
+ return r;
+ }
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -2015,75 +2097,14 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
}
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem, unsigned num_pages,
- uint64_t offset, unsigned window,
- struct amdgpu_ring *ring,
- uint64_t *addr)
-{
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- struct amdgpu_device *adev = ring->adev;
- struct ttm_tt *ttm = bo->ttm;
- struct amdgpu_job *job;
- unsigned num_dw, num_bytes;
- dma_addr_t *dma_address;
- struct dma_fence *fence;
- uint64_t src_addr, dst_addr;
- uint64_t flags;
- int r;
-
- BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
- AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-
- *addr = adev->gmc.gart_start;
- *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
- AMDGPU_GPU_PAGE_SIZE;
-
- num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- num_bytes = num_pages * 8;
-
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
- if (r)
- return r;
-
- src_addr = num_dw * 4;
- src_addr += job->ibs[0].gpu_addr;
-
- dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
- dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
- amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
- dst_addr, num_bytes);
-
- amdgpu_ring_pad_ib(ring, &job->ibs[0]);
- WARN_ON(job->ibs[0].length_dw > num_dw);
-
- dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
- flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
- r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
- &job->ibs[0].ptr[num_dw]);
- if (r)
- goto error_free;
-
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_free;
-
- dma_fence_put(fence);
-
- return r;
-
-error_free:
- amdgpu_job_free(job);
- return r;
-}
-
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush)
+ bool vm_needs_flush, bool tmz)
{
+ enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -2101,7 +2122,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
if (r)
return r;
@@ -2123,7 +2144,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
- dst_offset, cur_size_in_bytes);
+ dst_offset, cur_size_in_bytes, tmz);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
@@ -2190,7 +2211,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
/* for IB padding */
num_dw += 64;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index bd05bbb4878d..4351d02644a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -24,8 +24,9 @@
#ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__
-#include "amdgpu.h"
+#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
+#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
@@ -74,6 +75,15 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+ struct ttm_mem_reg *mem,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt);
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table *sgt);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
@@ -87,11 +97,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush);
+ bool vm_needs_flush, bool tmz);
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- struct amdgpu_copy_mem *src,
- struct amdgpu_copy_mem *dst,
- uint64_t size,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
struct dma_resv *resv,
struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 9ef312428231..65bb25e31d45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -403,8 +403,8 @@ FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version);
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_fw_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 9dd51f0d2c11..af1b1ccf613c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -110,7 +110,8 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
* even NOMEM error is encountered
*/
if(!err_data->err_addr)
- DRM_WARN("Failed to alloc memory for umc error address record!\n");
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record!\n");
/* umc query_ras_error_address is also responsible for clearing
* error status
@@ -120,10 +121,14 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
/* only uncorrectable error needs gpu reset */
if (err_data->ue_count) {
+ dev_info(adev->dev, "%ld uncorrectable hardware errors "
+ "detected in UMC block\n",
+ err_data->ue_count);
+
if (err_data->err_addr_cnt &&
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt))
- DRM_WARN("Failed to add ras bad page!\n");
+ dev_warn(adev->dev, "Failed to add ras bad page!\n");
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 5fd32ad1c575..5100ebe8858d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1056,7 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err;
}
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 59ddba137946..ecaa2d7483b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -446,7 +446,8 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -524,7 +525,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence *f = NULL;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index a41272fbcba2..2badbc0355f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -56,19 +56,23 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
- unsigned long bo_size;
+ unsigned long bo_size, fw_shared_bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned char fw_check;
int i, r;
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
+ mutex_init(&adev->vcn.vcn_pg_lock);
+ atomic_set(&adev->vcn.total_submission_cnt, 0);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
switch (adev->asic_type) {
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
fw_name = FIRMWARE_RAVEN2;
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
fw_name = FIRMWARE_PICASSO;
else
fw_name = FIRMWARE_RAVEN;
@@ -178,6 +182,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
return r;
}
}
+
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
+ &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
+ return r;
+ }
+
+ fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+ adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
}
return 0;
@@ -192,6 +207,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+
+ kvfree(adev->vcn.inst[j].saved_shm_bo);
+ amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
+ &adev->vcn.inst[j].fw_shared_gpu_addr,
+ (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
+
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
&adev->vcn.inst[j].dpg_sram_gpu_addr,
@@ -210,6 +231,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
}
release_firmware(adev->vcn.fw);
+ mutex_destroy(&adev->vcn.vcn_pg_lock);
return 0;
}
@@ -236,6 +258,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
return -ENOMEM;
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+
+ if (adev->vcn.inst[i].fw_shared_bo == NULL)
+ return 0;
+
+ if (!adev->vcn.inst[i].saved_shm_bo)
+ return -ENOMEM;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+ ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+ memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
}
return 0;
}
@@ -273,6 +306,17 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
}
memset_io(ptr, 0, size);
}
+
+ if (adev->vcn.inst[i].fw_shared_bo == NULL)
+ return -EINVAL;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+ ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+ if (adev->vcn.inst[i].saved_shm_bo != NULL)
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
+ else
+ memset_io(ptr, 0, size);
}
return 0;
}
@@ -295,7 +339,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
- if (fence[j])
+ if (fence[j] ||
+ unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
@@ -307,8 +352,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += fence[j];
}
- if (fences == 0) {
- amdgpu_gfx_off_ctrl(adev, true);
+ if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
} else {
@@ -319,36 +363,46 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (set_clocks) {
- amdgpu_gfx_off_ctrl(adev, false);
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
- }
+ atomic_inc(&adev->vcn.total_submission_cnt);
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ mutex_lock(&adev->vcn.vcn_pg_lock);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
- unsigned int fences = 0;
- unsigned int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
- }
- if (fences)
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+ atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else
- new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ } else {
+ unsigned int fences = 0;
+ unsigned int i;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
- new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+
+ if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ }
adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
}
+ mutex_unlock(&adev->vcn.vcn_pg_lock);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
{
+ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+ atomic_dec(&ring->adev->vcn.total_submission_cnt);
+
schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}
@@ -390,7 +444,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 64,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
goto err;
@@ -557,7 +612,8 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -610,7 +666,8 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 6fe057329de2..90aa12b22725 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -132,6 +132,13 @@
} \
} while (0)
+#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
+
+enum fw_queue_mode {
+ FW_QUEUE_RING_RESET = 1,
+ FW_QUEUE_DPG_HOLD_OFF = 2,
+};
+
enum engine_status_constants {
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0,
@@ -179,10 +186,15 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
+ struct amdgpu_bo *fw_shared_bo;
struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
+ atomic_t dpg_enc_submission_cnt;
+ void *fw_shared_cpu_addr;
+ uint64_t fw_shared_gpu_addr;
+ void *saved_shm_bo;
};
struct amdgpu_vcn {
@@ -196,16 +208,28 @@ struct amdgpu_vcn {
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
- struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
- struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
- uint32_t num_vcn_enc_sched;
- uint32_t num_vcn_dec_sched;
+ struct mutex vcn_pg_lock;
+ atomic_t total_submission_cnt;
unsigned harvest_config;
int (*pause_dpg_mode)(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
};
+struct amdgpu_fw_shared_multi_queue {
+ uint8_t decode_queue_mode;
+ uint8_t encode_generalpurpose_queue_mode;
+ uint8_t encode_lowlatency_queue_mode;
+ uint8_t encode_realtime_queue_mode;
+ uint8_t padding[4];
+};
+
+struct amdgpu_fw_shared {
+ uint32_t present_flag_0;
+ uint8_t pad[53];
+ struct amdgpu_fw_shared_multi_queue multi_queue;
+} __attribute__((__packed__));
+
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
int amdgpu_vcn_suspend(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index adc813cde8e2..f3b38c9e04ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -38,7 +38,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
/* enable virtual display */
- adev->mode_info.num_crtc = 1;
+ if (adev->mode_info.num_crtc == 0)
+ adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
@@ -59,7 +60,10 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
ref, mask);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -81,6 +85,9 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
return;
+failed_undo:
+ amdgpu_ring_undo(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq:
pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
}
@@ -152,6 +159,19 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_init_data)
+ virt->ops->req_init_data(adev);
+
+ if (adev->virt.req_init_data_ver > 0)
+ DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+ else
+ DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+}
+
/**
* amdgpu_virt_wait_reset() - wait for reset gpu completed
* @amdgpu: amdgpu device.
@@ -287,3 +307,82 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
}
+
+void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+{
+ uint32_t reg;
+
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_ARCTURUS:
+ reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
+ break;
+ default: /* other chip doesn't support SRIOV */
+ reg = 0;
+ break;
+ }
+
+ if (reg & 1)
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+ if (reg & 0x80000000)
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+ if (!reg) {
+ if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
+}
+
+bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_is_debug(adev) ? true : false;
+}
+
+bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_is_normal(adev) ? true : false;
+}
+
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev) ||
+ amdgpu_virt_access_debugfs_is_kiq(adev))
+ return 0;
+
+ if (amdgpu_virt_access_debugfs_is_mmio(adev))
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
+}
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
+{
+ enum amdgpu_sriov_vf_mode mode;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ mode = SRIOV_VF_MODE_ONE_VF;
+ else
+ mode = SRIOV_VF_MODE_MULTI_VF;
+ } else {
+ mode = SRIOV_VF_MODE_BARE_METAL;
+ }
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index f0128f745bd2..b90e822cebd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -30,6 +30,17 @@
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
+/* all asic after AI use this offset */
+#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
+/* tonga/fiji use this offset */
+#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+
+enum amdgpu_sriov_vf_mode {
+ SRIOV_VF_MODE_BARE_METAL = 0,
+ SRIOV_VF_MODE_ONE_VF,
+ SRIOV_VF_MODE_MULTI_VF,
+};
+
struct amdgpu_mm_table {
struct amdgpu_bo *bo;
uint32_t *cpu_addr;
@@ -54,6 +65,7 @@ struct amdgpu_vf_error_buffer {
struct amdgpu_virt_ops {
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
+ int (*req_init_data)(struct amdgpu_device *adev);
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
@@ -83,6 +95,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+ /* MM bandwidth */
+ AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
/* PP ONE VF MODE in GIM */
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
};
@@ -256,6 +270,8 @@ struct amdgpu_virt {
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
uint32_t reg_access_mode;
+ int req_init_data_ver;
+ bool tdr_debug;
};
#define amdgpu_sriov_enabled(adev) \
@@ -287,6 +303,10 @@ static inline bool is_virtual_machine(void)
#define amdgpu_sriov_is_pp_one_vf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
+#define amdgpu_sriov_is_debug(adev) \
+ ((!adev->in_gpu_reset) && adev->virt.tdr_debug)
+#define amdgpu_sriov_is_normal(adev) \
+ ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug))
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
@@ -296,6 +316,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
@@ -303,4 +324,11 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_detect_virtualization(struct amdgpu_device *adev);
+
+bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6d9252a27916..7417754e9141 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -82,7 +82,7 @@ struct amdgpu_prt_cb {
struct dma_fence_cb cb;
};
-/**
+/*
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
* happens while holding this lock anywhere to prevent deadlocks when
* an MMU notifier runs in reclaim-FS context.
@@ -726,7 +726,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
- * @direct: use a direct update
+ * @immediate: use an immediate update
*
* Root PD needs to be reserved when calling this.
*
@@ -736,7 +736,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
- bool direct)
+ bool immediate)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
@@ -795,7 +795,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.direct = direct;
+ params.immediate = immediate;
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
@@ -850,11 +850,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
* @adev: amdgpu_device pointer
* @vm: requesting vm
* @level: the page table level
- * @direct: use a direct update
+ * @immediate: use a immediate update
* @bp: resulting BO allocation parameters
*/
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, bool direct,
+ int level, bool immediate,
struct amdgpu_bo_param *bp)
{
memset(bp, 0, sizeof(*bp));
@@ -870,7 +870,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
- bp->no_wait_gpu = direct;
+ bp->no_wait_gpu = immediate;
if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.base.resv;
}
@@ -881,7 +881,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @cursor: Which page table to allocate
- * @direct: use a direct update
+ * @immediate: use an immediate update
*
* Make sure a specific page table or directory is allocated.
*
@@ -892,7 +892,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_vm_pt_cursor *cursor,
- bool direct)
+ bool immediate)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp;
@@ -913,7 +913,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo)
return 0;
- amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
+ amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp);
r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
@@ -925,7 +925,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
+ r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r)
goto error_free_pt;
@@ -1276,7 +1276,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @direct: submit directly to the paging queue
+ * @immediate: submit immediately to the paging queue
*
* Makes sure all directories are up to date.
*
@@ -1284,7 +1284,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
* 0 for success, error for failure.
*/
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, bool direct)
+ struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
int r;
@@ -1295,7 +1295,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.direct = direct;
+ params.immediate = immediate;
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
@@ -1446,20 +1446,24 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ if (!params->unlocked) {
/* make sure that the page tables covering the
* address range are actually allocated
*/
r = amdgpu_vm_alloc_pts(params->adev, params->vm,
- &cursor, params->direct);
+ &cursor, params->immediate);
if (r)
return r;
}
shift = amdgpu_vm_level_shift(adev, cursor.level);
parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
- if (adev->asic_type < CHIP_VEGA10 &&
- (flags & AMDGPU_PTE_VALID)) {
+ if (params->unlocked) {
+ /* Unlocked updates are only allowed on the leaves */
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (adev->asic_type < CHIP_VEGA10 &&
+ (flags & AMDGPU_PTE_VALID)) {
/* No huge page support before GMC v9 */
if (cursor.level != AMDGPU_VM_PTB) {
if (!amdgpu_vm_pt_descendant(adev, &cursor))
@@ -1557,7 +1561,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @direct: direct submission in a page fault
+ * @immediate: immediate submission in a page fault
+ * @unlocked: unlocked invalidation during MM callback
* @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
@@ -1572,8 +1577,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, bool direct,
- struct dma_resv *resv,
+ struct amdgpu_vm *vm, bool immediate,
+ bool unlocked, struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
dma_addr_t *pages_addr,
@@ -1586,8 +1591,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.direct = direct;
+ params.immediate = immediate;
params.pages_addr = pages_addr;
+ params.unlocked = unlocked;
/* Implicitly sync to command submissions in the same VM before
* unmapping. Sync to moving fences before mapping.
@@ -1603,11 +1609,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_unlock;
}
- if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
- struct amdgpu_bo *root = vm->root.base.bo;
+ if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
+ struct dma_fence *tmp = dma_fence_get_stub();
- if (!dma_fence_is_signaled(vm->last_direct))
- amdgpu_bo_fence(root, vm->last_direct, true);
+ amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true);
+ swap(vm->last_unlocked, tmp);
+ dma_fence_put(tmp);
}
r = vm->update_funcs->prepare(&params, resv, sync_mode);
@@ -1721,7 +1728,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
start, last, flags, addr,
dma_addr, fence);
if (r)
@@ -1784,6 +1791,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (bo) {
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+
+ if (amdgpu_bo_encrypted(bo))
+ flags |= AMDGPU_PTE_TMZ;
+
bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
} else {
flags = 0x0;
@@ -2014,7 +2025,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
mapping->start, mapping->last,
init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2124,11 +2135,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
(bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
bo_va->is_xgmi = true;
- mutex_lock(&adev->vm_manager.lock_pstate);
/* Power up XGMI if it can be potentially used */
- if (++adev->vm_manager.xgmi_map_counter == 1)
- amdgpu_xgmi_set_pstate(adev, 1);
- mutex_unlock(&adev->vm_manager.lock_pstate);
+ amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
}
return bo_va;
@@ -2551,12 +2559,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
dma_fence_put(bo_va->last_pt_update);
- if (bo && bo_va->is_xgmi) {
- mutex_lock(&adev->vm_manager.lock_pstate);
- if (--adev->vm_manager.xgmi_map_counter == 0)
- amdgpu_xgmi_set_pstate(adev, 0);
- mutex_unlock(&adev->vm_manager.lock_pstate);
- }
+ if (bo && bo_va->is_xgmi)
+ amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
kfree(bo_va);
}
@@ -2585,7 +2589,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;
/* Don't evict VM page tables while they are updated */
- if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
+ if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
@@ -2762,7 +2766,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0)
return timeout;
- return dma_fence_wait_timeout(vm->last_direct, true, timeout);
+ return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
}
/**
@@ -2798,7 +2802,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/* create scheduler entities for page table updates */
- r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+ r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
@@ -2808,7 +2812,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
- goto error_free_direct;
+ goto error_free_immediate;
vm->pte_support_ats = false;
vm->is_compute_context = false;
@@ -2834,7 +2838,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
- vm->last_direct = dma_fence_get_stub();
+ vm->last_unlocked = dma_fence_get_stub();
mutex_init(&vm->eviction_lock);
vm->evicting = false;
@@ -2888,11 +2892,11 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_delayed:
- dma_fence_put(vm->last_direct);
+ dma_fence_put(vm->last_unlocked);
drm_sched_entity_destroy(&vm->delayed);
-error_free_direct:
- drm_sched_entity_destroy(&vm->direct);
+error_free_immediate:
+ drm_sched_entity_destroy(&vm->immediate);
return r;
}
@@ -2996,10 +3000,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
- if (vm->use_cpu_for_update)
+ if (vm->use_cpu_for_update) {
+ /* Sync with last SDMA update/clear before switching to CPU */
+ r = amdgpu_bo_sync_wait(vm->root.base.bo,
+ AMDGPU_FENCE_OWNER_UNDEFINED, true);
+ if (r)
+ goto free_idr;
+
vm->update_funcs = &amdgpu_vm_cpu_funcs;
- else
+ } else {
vm->update_funcs = &amdgpu_vm_sdma_funcs;
+ }
dma_fence_put(vm->last_update);
vm->last_update = NULL;
vm->is_compute_context = true;
@@ -3089,8 +3100,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0;
}
- dma_fence_wait(vm->last_direct, false);
- dma_fence_put(vm->last_direct);
+ dma_fence_wait(vm->last_unlocked, false);
+ dma_fence_put(vm->last_unlocked);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3107,7 +3118,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&root);
WARN_ON(vm->root.base.bo);
- drm_sched_entity_destroy(&vm->direct);
+ drm_sched_entity_destroy(&vm->immediate);
drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -3166,9 +3177,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
idr_init(&adev->vm_manager.pasid_idr);
spin_lock_init(&adev->vm_manager.pasid_lock);
-
- adev->vm_manager.xgmi_map_counter = 0;
- mutex_init(&adev->vm_manager.lock_pstate);
}
/**
@@ -3343,8 +3351,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
value = 0;
}
- r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
- flags, value, NULL, NULL);
+ r = amdgpu_vm_bo_update_mapping(adev, vm, true, false, NULL, addr,
+ addr + 1, flags, value, NULL, NULL);
if (r)
goto error_unlock;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 06fe30e1492d..c8e68d7890bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -54,6 +54,9 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_SYSTEM (1ULL << 1)
#define AMDGPU_PTE_SNOOPED (1ULL << 2)
+/* RV+ */
+#define AMDGPU_PTE_TMZ (1ULL << 3)
+
/* VI only */
#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
@@ -203,9 +206,14 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm;
/**
- * @direct: if changes should be made directly
+ * @immediate: if changes should be made immediately
*/
- bool direct;
+ bool immediate;
+
+ /**
+ * @unlocked: true if the root BO is not locked
+ */
+ bool unlocked;
/**
* @pages_addr:
@@ -271,11 +279,11 @@ struct amdgpu_vm {
struct dma_fence *last_update;
/* Scheduler entities for page table updates */
- struct drm_sched_entity direct;
+ struct drm_sched_entity immediate;
struct drm_sched_entity delayed;
- /* Last submission to the scheduler entities */
- struct dma_fence *last_direct;
+ /* Last unlocked submission to the scheduler entities */
+ struct dma_fence *last_unlocked;
unsigned int pasid;
/* dedicated to vm */
@@ -349,10 +357,6 @@ struct amdgpu_vm_manager {
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
-
- /* counter of mapped memory through xgmi */
- uint32_t xgmi_map_counter;
- struct mutex lock_pstate;
};
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
@@ -380,7 +384,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, bool direct);
+ struct amdgpu_vm *vm, bool immediate);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index e38516304070..39c704a1fb0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -84,7 +84,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
pe += (unsigned long)amdgpu_bo_kptr(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
for (i = 0; i < count; i++) {
value = p->pages_addr ?
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index cf96c335b258..8d9c6feba660 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -61,10 +61,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
+ enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+ : AMDGPU_IB_POOL_DELAYED;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
if (r)
return r;
@@ -90,11 +92,11 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
{
struct amdgpu_ib *ib = p->job->ibs;
struct drm_sched_entity *entity;
- struct dma_fence *f, *tmp;
struct amdgpu_ring *ring;
+ struct dma_fence *f;
int r;
- entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+ entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
@@ -104,15 +106,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r)
goto error;
- if (p->direct) {
- tmp = dma_fence_get(f);
- swap(p->vm->last_direct, tmp);
+ if (p->unlocked) {
+ struct dma_fence *tmp = dma_fence_get(f);
+
+ swap(p->vm->last_unlocked, f);
dma_fence_put(tmp);
} else {
- dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+ amdgpu_bo_fence(p->vm->root.base.bo, f, true);
}
- if (fence && !p->direct)
+ if (fence && !p->immediate)
swap(*fence, f);
dma_fence_put(f);
return 0;
@@ -142,7 +145,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
- trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
+ trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
@@ -169,7 +172,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
@@ -198,6 +201,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
+ enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+ : AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes;
uint64_t *pte;
int r;
@@ -223,7 +228,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
+ &p->job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 82a3299e53c0..d399e5893170 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -22,6 +22,7 @@
* Authors: Christian König
*/
+#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_atomfirmware.h"
@@ -148,6 +149,15 @@ static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
amdgpu_mem_info_vram_vendor, NULL);
+static const struct attribute *amdgpu_vram_mgr_attributes[] = {
+ &dev_attr_mem_info_vram_total.attr,
+ &dev_attr_mem_info_vis_vram_total.attr,
+ &dev_attr_mem_info_vram_used.attr,
+ &dev_attr_mem_info_vis_vram_used.attr,
+ &dev_attr_mem_info_vram_vendor.attr,
+ NULL
+};
+
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
@@ -172,31 +182,9 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
man->priv = mgr;
/* Add the two VRAM-related sysfs files */
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vram_total\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vram_used\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
- return ret;
- }
+ ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
+ if (ret)
+ DRM_ERROR("Failed to register sysfs\n");
return 0;
}
@@ -219,11 +207,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
- device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
- device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
- device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
- device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
- device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
return 0;
}
@@ -459,6 +443,104 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
}
/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @mem: TTM memory object
+ * @dev: the other device
+ * @dir: dma direction
+ * @sgt: resulting sg table
+ *
+ * Allocate and fill a sg table from a VRAM allocation.
+ */
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+ struct ttm_mem_reg *mem,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt)
+{
+ struct drm_mm_node *node;
+ struct scatterlist *sg;
+ int num_entries = 0;
+ unsigned int pages;
+ int i, r;
+
+ *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+ if (!*sgt)
+ return -ENOMEM;
+
+ for (pages = mem->num_pages, node = mem->mm_node;
+ pages; pages -= node->size, ++node)
+ ++num_entries;
+
+ r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
+ if (r)
+ goto error_free;
+
+ for_each_sg((*sgt)->sgl, sg, num_entries, i)
+ sg->length = 0;
+
+ node = mem->mm_node;
+ for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+ phys_addr_t phys = (node->start << PAGE_SHIFT) +
+ adev->gmc.aper_base;
+ size_t size = node->size << PAGE_SHIFT;
+ dma_addr_t addr;
+
+ ++node;
+ addr = dma_map_resource(dev, phys, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ r = dma_mapping_error(dev, addr);
+ if (r)
+ goto error_unmap;
+
+ sg_set_page(sg, NULL, size, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = size;
+ }
+ return 0;
+
+error_unmap:
+ for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+ if (!sg->length)
+ continue;
+
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ sg_free_table(*sgt);
+
+error_free:
+ kfree(*sgt);
+ return r;
+}
+
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @sgt: sg table to free
+ *
+ * Free a previously allocate sg table.
+ */
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table *sgt)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+/**
* amdgpu_vram_mgr_usage - how many bytes are used in this domain
*
* @man: TTM memory type manager
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 95b3327168ac..91837a991319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -325,9 +325,18 @@ success:
static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
{
+ char node[10];
+ memset(node, 0, sizeof(node));
+
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
- sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
- sysfs_remove_link(hive->kobj, adev->ddev->unique);
+ device_remove_file(adev->dev, &dev_attr_xgmi_error);
+
+ if (adev != hive->adev)
+ sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
+
+ sprintf(node, "node%d", hive->number_devices);
+ sysfs_remove_link(hive->kobj, node);
+
}
@@ -373,7 +382,13 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
if (lock)
mutex_lock(&tmp->hive_lock);
- tmp->pstate = -1;
+ tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
+ tmp->hi_req_gpu = NULL;
+ /*
+ * hive pstate on boot is high in vega20 so we have to go to low
+ * pstate on after boot.
+ */
+ tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
mutex_unlock(&xgmi_mutex);
return tmp;
@@ -383,56 +398,59 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
- struct amdgpu_device *tmp_adev;
- bool update_hive_pstate = true;
- bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
+ struct amdgpu_device *request_adev = hive->hi_req_gpu ?
+ hive->hi_req_gpu : adev;
+ bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
+ bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
- if (!hive)
+ /* fw bug so temporarily disable pstate switching */
+ return 0;
+
+ if (!hive || adev->asic_type != CHIP_VEGA20)
return 0;
mutex_lock(&hive->hive_lock);
- if (hive->pstate == pstate) {
- adev->pstate = is_high_pstate ? pstate : adev->pstate;
+ if (is_hi_req)
+ hive->hi_req_count++;
+ else
+ hive->hi_req_count--;
+
+ /*
+ * Vega20 only needs single peer to request pstate high for the hive to
+ * go high but all peers must request pstate low for the hive to go low
+ */
+ if (hive->pstate == pstate ||
+ (!is_hi_req && hive->hi_req_count && !init_low))
goto out;
- }
- dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
+ dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
- ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
+ ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
if (ret) {
- dev_err(adev->dev,
+ dev_err(request_adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
- adev->gmc.xgmi.node_id,
- adev->gmc.xgmi.hive_id, ret);
+ request_adev->gmc.xgmi.node_id,
+ request_adev->gmc.xgmi.hive_id, ret);
goto out;
}
- /* Update device pstate */
- adev->pstate = pstate;
-
- /*
- * Update the hive pstate only all devices of the hive
- * are in the same pstate
- */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- if (tmp_adev->pstate != adev->pstate) {
- update_hive_pstate = false;
- break;
- }
- }
- if (update_hive_pstate || is_high_pstate)
+ if (init_low)
+ hive->pstate = hive->hi_req_count ?
+ hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
+ else {
hive->pstate = pstate;
-
+ hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
+ adev : NULL;
+ }
out:
mutex_unlock(&hive->hive_lock);
-
return ret;
}
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
- int ret = -EINVAL;
+ int ret;
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
@@ -507,9 +525,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit;
}
- /* Set default device pstate */
- adev->pstate = -1;
-
top_info = &adev->psp.xgmi_context.top_info;
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
@@ -577,14 +592,14 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
if (!hive)
return -EINVAL;
- if (!(hive->number_devices--)) {
+ task_barrier_rem_task(&hive->tb);
+ amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
+ mutex_unlock(&hive->hive_lock);
+
+ if(!(--hive->number_devices)){
amdgpu_xgmi_sysfs_destroy(adev, hive);
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock);
- } else {
- task_barrier_rem_task(&hive->tb);
- amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
- mutex_unlock(&hive->hive_lock);
}
return psp_xgmi_terminate(&adev->psp);
@@ -604,6 +619,8 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
+ amdgpu_xgmi_reset_ras_error_count(adev);
+
if (!adev->gmc.xgmi.ras_if) {
adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
if (!adev->gmc.xgmi.ras_if)
@@ -641,31 +658,34 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr)
{
- uint32_t df_inst_id;
- uint64_t dram_base_addr = 0;
- const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
-
- if ((!df_funcs) ||
- (!df_funcs->get_df_inst_id) ||
- (!df_funcs->get_dram_base_addr)) {
- dev_warn(adev->dev,
- "XGMI: relative phy_addr algorithm is not supported\n");
- return addr;
- }
-
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
- dev_warn(adev->dev,
- "failed to disable DF-Cstate, DF register may not be accessible\n");
- return addr;
- }
+ struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+ return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
- df_inst_id = df_funcs->get_df_inst_id(adev);
- dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
+{
+ WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
+ WREG32_PCIE(pcs_status_reg, 0);
+}
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
- dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
- return addr + dram_base_addr;
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
+ pcs_clear_status(adev,
+ xgmi_pcs_err_status_reg_arct[i]);
+ break;
+ case CHIP_VEGA20:
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
+ pcs_clear_status(adev,
+ xgmi_pcs_err_status_reg_vg20[i]);
+ break;
+ default:
+ break;
+ }
}
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
@@ -758,6 +778,8 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
break;
}
+ amdgpu_xgmi_reset_ras_error_count(adev);
+
err_data->ue_count += ue_cnt;
err_data->ce_count += ce_cnt;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 4a92067fe595..6999eab16a72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -25,6 +25,7 @@
#include <drm/task_barrier.h>
#include "amdgpu_psp.h"
+
struct amdgpu_hive_info {
uint64_t hive_id;
struct list_head device_list;
@@ -33,8 +34,14 @@ struct amdgpu_hive_info {
struct kobject *kobj;
struct device_attribute dev_attr;
struct amdgpu_device *adev;
- int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+ int hi_req_count;
+ struct amdgpu_device *hi_req_gpu;
struct task_barrier tb;
+ enum {
+ AMDGPU_XGMI_PSTATE_MIN,
+ AMDGPU_XGMI_PSTATE_MAX_VEGA20,
+ AMDGPU_XGMI_PSTATE_UNKNOWN
+ } pstate;
};
struct amdgpu_pcs_ras_field {
@@ -56,6 +63,7 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index cae426c7c086..4cfc786699c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -54,6 +54,8 @@
#define PLL_INDEX 2
#define PLL_DATA 3
+#define ATOM_CMD_TIMEOUT_SEC 20
+
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
@@ -744,8 +746,9 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 10000)) {
- DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
+ DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
+ ATOM_CMD_TIMEOUT_SEC);
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 62635e58e45e..fe306d0f73f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1809,12 +1809,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
-static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
-{
- if (is_virtual_machine()) /* passthrough mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
@@ -2177,8 +2171,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
- cik_detect_hw_virtualization(adev);
-
switch (adev->asic_type) {
case CHIP_BONAIRE:
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 580d3f93d670..20f108818b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -320,8 +320,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -679,7 +677,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -980,7 +979,8 @@ static int cik_sdma_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1313,7 +1313,8 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = byte_count;
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index cee6e8a3ad9c..5f3f6ebfb387 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -450,7 +450,7 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 2512e7ebfedf..e38744d06f4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2303,9 +2303,9 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_device *adev = crtc->dev->dev_private;
u32 tmp;
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
@@ -2319,10 +2319,10 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 0dde22db9848..2584ff74423b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2382,9 +2382,9 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_device *adev = crtc->dev->dev_private;
u32 tmp;
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
@@ -2398,10 +2398,10 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 84219534bd38..d05c39f9ae40 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2194,9 +2194,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
@@ -2211,10 +2211,10 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- CUR_CONTROL__CURSOR_EN_MASK |
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3a640702d7d1..ad0f8adb6a2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2205,9 +2205,9 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
@@ -2220,10 +2220,10 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- CUR_CONTROL__CURSOR_EN_MASK |
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 13e12be667fc..d5ff7b6331ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -172,8 +172,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ drm_crtc_vblank_off(crtc);
+ amdgpu_crtc->enabled = false;
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
@@ -286,7 +287,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
static const struct mode_size {
int w;
int h;
- } common_modes[17] = {
+ } common_modes[21] = {
{ 640, 480},
{ 720, 480},
{ 800, 600},
@@ -303,10 +304,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
{1680, 1050},
{1600, 1200},
{1920, 1080},
- {1920, 1200}
+ {1920, 1200},
+ {4096, 3112},
+ {3656, 2664},
+ {3840, 2160},
+ {4096, 2160},
};
- for (i = 0; i < 17; i++) {
+ for (i = 0; i < 21; i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
drm_mode_probed_add(connector, mode);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 5a1bd8ed1a6c..a7b8292cefee 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -686,58 +686,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
}
}
-static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
- uint32_t df_inst)
-{
- uint32_t base_addr_reg_val = 0;
- uint64_t base_addr = 0;
-
- base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
- df_inst * DF_3_6_SMN_REG_INST_DIST);
-
- if (REG_GET_FIELD(base_addr_reg_val,
- DF_CS_UMC_AON0_DramBaseAddress0,
- AddrRngVal) == 0) {
- DRM_WARN("address range not valid");
- return 0;
- }
-
- base_addr = REG_GET_FIELD(base_addr_reg_val,
- DF_CS_UMC_AON0_DramBaseAddress0,
- DramBaseAddr);
-
- return base_addr << 28;
-}
-
-static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
-{
- uint32_t xgmi_node_id = 0;
- uint32_t df_inst_id = 0;
-
- /* Walk through DF dst nodes to find current XGMI node */
- for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
-
- xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
- df_inst_id * DF_3_6_SMN_REG_INST_DIST);
- xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
- DF_CS_UMC_AON0_DramLimitAddress0,
- DstFabricID);
-
- /* TODO: establish reason dest fabric id is offset by 7 */
- xgmi_node_id = xgmi_node_id >> 7;
-
- if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
- break;
- }
-
- if (df_inst_id == DF_3_6_INST_CNT) {
- DRM_WARN("cant match df dst id with gpu node");
- return 0;
- }
-
- return df_inst_id;
-}
-
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
.sw_fini = df_v3_6_sw_fini,
@@ -752,6 +700,4 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.pmc_get_count = df_v3_6_pmc_get_count,
.get_fica = df_v3_6_get_fica,
.set_fica = df_v3_6_set_fica,
- .get_dram_base_addr = df_v3_6_get_dram_base_addr,
- .get_df_inst_id = df_v3_6_get_df_inst_id
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f92c158d89a1..bd5dd4f64311 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -138,6 +138,1062 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x33),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
@@ -272,11 +1328,1691 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1ac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1bc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1cc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x26),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x25),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x3b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
{
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x22),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x35),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -301,7 +3037,7 @@ static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -431,6 +3167,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_0_nv10,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break;
case CHIP_NAVI14:
soc15_program_register_sequence(adev,
@@ -439,6 +3178,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_nv14,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break;
case CHIP_NAVI12:
soc15_program_register_sequence(adev,
@@ -447,6 +3189,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_2_nv12,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
break;
default:
break;
@@ -557,7 +3302,8 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1298,7 +4044,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
return 0;
@@ -1309,7 +4056,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
{
int r;
unsigned irq_type;
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ struct amdgpu_ring *ring;
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -1328,10 +4076,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
-
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type, hw_prio);
if (r)
return r;
@@ -1829,9 +4578,9 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
/* csib */
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
- adev->gfx.rlc.clear_state_gpu_addr >> 32);
+ adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
- adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+ adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
return 0;
@@ -2441,10 +5190,6 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (!enable) {
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -2923,16 +5668,12 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -3268,11 +6009,8 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
- ring->has_high_prio = true;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
- } else {
- ring->has_high_prio = false;
}
}
}
@@ -3802,14 +6540,16 @@ static int gfx_v10_0_hw_init(void *handle)
* loaded firstly, so in direct type, it has to load smc ucode
* here before rlc.
*/
- r = smu_load_microcode(&adev->smu);
- if (r)
- return r;
+ if (adev->smu.ppt_funcs != NULL) {
+ r = smu_load_microcode(&adev->smu);
+ if (r)
+ return r;
- r = smu_check_fw_status(&adev->smu);
- if (r) {
- pr_err("SMC firmware status is not correct\n");
- return r;
+ r = smu_check_fw_status(&adev->smu);
+ if (r) {
+ pr_err("SMC firmware status is not correct\n");
+ return r;
+ }
}
}
@@ -4273,7 +7013,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === CGCG /CGLS for GFX 3D Only === */
gfx_v10_0_update_3d_clock_gating(adev, enable);
/* === MGCG + MGLS === */
- /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
+ gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
}
if (adev->cg_flags &
@@ -4292,14 +7032,21 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
- u32 data;
+ u32 reg, data;
- data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(reg);
+ else
+ data = RREG32(reg);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
- WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ else
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -4341,6 +7088,20 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.reset = gfx_v10_0_rlc_reset,
.start = gfx_v10_0_rlc_start,
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
+ .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v10_0_set_safe_mode,
+ .unset_safe_mode = gfx_v10_0_unset_safe_mode,
+ .init = gfx_v10_0_rlc_init,
+ .get_csb_size = gfx_v10_0_get_csb_size,
+ .get_csb_buffer = gfx_v10_0_get_csb_buffer,
+ .resume = gfx_v10_0_rlc_resume,
+ .stop = gfx_v10_0_rlc_stop,
+ .reset = gfx_v10_0_rlc_reset,
+ .start = gfx_v10_0_rlc_start,
+ .update_spm_vmid = gfx_v10_0_update_spm_vmid,
.rlcg_wreg = gfx_v10_rlcg_wreg,
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
@@ -4350,14 +7111,14 @@ static int gfx_v10_0_set_powergating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
- if (!enable) {
- amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- } else
- amdgpu_gfx_off_ctrl(adev, true);
+ amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
@@ -4370,6 +7131,9 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -4682,7 +7446,8 @@ static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0);
}
-static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
+ uint32_t flags)
{
uint32_t dw2 = 0;
@@ -4690,8 +7455,6 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
gfx_v10_0_ring_emit_ce_meta(ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
- gfx_v10_0_ring_emit_tmz(ring, true);
-
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -4848,16 +7611,19 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
sizeof(de_payload) >> 2);
}
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+ bool secure)
{
+ uint32_t v = secure ? FRAME_TMZ : 0;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+ amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}
-static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -4866,9 +7632,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -4918,6 +7684,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
+static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
+ unsigned vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t value = 0;
+
+ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+}
+
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -5241,6 +8020,29 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ const unsigned int gcr_cntl =
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
+
+ /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
+ amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+ amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
+}
+
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.name = "gfx_v10_0",
.early_init = gfx_v10_0_early_init,
@@ -5288,7 +8090,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 8, /* gfx_v10_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5305,10 +8108,12 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
.preempt_ib = gfx_v10_0_ring_preempt_ib,
- .emit_tmz = gfx_v10_0_ring_emit_tmz,
+ .emit_frame_cntl = gfx_v10_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v10_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v10_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -5328,7 +8133,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v10_0_ring_emit_vm_flush */
- 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+ 8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+ 8, /* gfx_v10_0_emit_mem_sync */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5343,6 +8149,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .emit_mem_sync = gfx_v10_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -5429,9 +8236,11 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
- case CHIP_NAVI12:
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
+ case CHIP_NAVI12:
+ adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 31f44d05e606..79c52c7a02e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1914,7 +1914,8 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1950,7 +1951,6 @@ err1:
static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
if (enable) {
WREG32(mmCP_ME_CNTL, 0);
} else {
@@ -1958,10 +1958,6 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
CP_ME_CNTL__PFP_HALT_MASK |
CP_ME_CNTL__CE_HALT_MASK));
WREG32(mmSCRATCH_UMSK, 0);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -3114,7 +3110,9 @@ static int gfx_v6_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -3136,7 +3134,8 @@ static int gfx_v6_0_sw_init(void *handle)
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -3466,6 +3465,18 @@ static int gfx_v6_0_set_powergating_state(void *handle,
return 0;
}
+static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
@@ -3496,7 +3507,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
- 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+ 3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3507,6 +3519,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
.emit_wreg = gfx_v6_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v6_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
@@ -3520,7 +3533,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
5 + 5 + /* hdp flush / invalidate */
7 + /* gfx_v6_0_ring_emit_pipeline_sync */
SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
- 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3530,6 +3544,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.emit_wreg = gfx_v6_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v6_0_emit_mem_sync,
};
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 733d398c61cc..0cc011f9190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2364,7 +2364,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -2431,15 +2432,12 @@ err1:
*/
static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
- if (enable) {
+ if (enable)
WREG32(mmCP_ME_CNTL, 0);
- } else {
- WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
+ else
+ WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
+ CP_ME_CNTL__PFP_HALT_MASK |
+ CP_ME_CNTL__CE_HALT_MASK));
udelay(50);
}
@@ -2700,15 +2698,11 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
*/
static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
- if (enable) {
+ if (enable)
WREG32(mmCP_MEC_CNTL, 0);
- } else {
- WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
- }
+ else
+ WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+ CP_MEC_CNTL__MEC_ME2_HALT_MASK));
udelay(50);
}
@@ -4439,7 +4433,8 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -4511,7 +4506,9 @@ static int gfx_v7_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -5001,6 +4998,32 @@ static int gfx_v7_0_set_powergating_state(void *handle,
return 0;
}
+static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -5033,7 +5056,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
- 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+ 3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
@@ -5048,6 +5072,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v7_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5064,7 +5089,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5 + /* hdp invalidate */
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
- 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7, /* gfx_v7_0_emit_mem_sync_compute */
.emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
@@ -5077,6 +5103,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fc32586ef80b..1d4128227ffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -888,7 +888,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1550,7 +1551,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+ r = amdgpu_ib_get(adev, NULL, total_size,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
@@ -1892,6 +1894,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -1911,9 +1914,11 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type, hw_prio);
if (r)
return r;
@@ -2017,7 +2022,8 @@ static int gfx_v8_0_sw_init(void *handle)
}
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
- AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -4120,7 +4126,6 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
u32 tmp = RREG32(mmCP_ME_CNTL);
if (enable) {
@@ -4131,8 +4136,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32(mmCP_ME_CNTL, tmp);
udelay(50);
@@ -4320,14 +4323,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32(mmCP_MEC_CNTL, 0);
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -4437,11 +4436,8 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
- ring->has_high_prio = true;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
- } else {
- ring->has_high_prio = false;
}
}
}
@@ -5619,12 +5615,18 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 data;
- data = RREG32(mmRLC_SPM_VMID);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
+ else
+ data = RREG32(mmRLC_SPM_VMID);
data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
- WREG32(mmRLC_SPM_VMID, data);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
+ else
+ WREG32(mmRLC_SPM_VMID, data);
}
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
@@ -6387,10 +6389,10 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
}
-static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -6399,9 +6401,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6815,6 +6817,34 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA |
+ PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA |
+ PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6861,7 +6891,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
12 + 12 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
@@ -6879,6 +6910,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v8_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6895,7 +6927,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
5 + /* hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
- 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7, /* gfx_v8_0_emit_mem_sync_compute */
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
@@ -6908,6 +6941,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 0c390485bc10..711e9dd19705 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -50,18 +50,14 @@
#include "gfx_v9_4.h"
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
-#define mmPWR_MISC_CNTL_STATUS 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
-
#define mmGCEA_PROBE_MAP 0x070c
#define mmGCEA_PROBE_MAP_BASE_IDX 0
@@ -511,8 +507,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
@@ -963,7 +959,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
case CHIP_RAVEN:
soc15_program_register_sequence(adev, golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rv2,
ARRAY_SIZE(golden_settings_gc_9_1_rv2));
@@ -1082,7 +1078,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1236,6 +1233,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
+ /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
{ 0, 0, 0, 0, 0 },
};
@@ -1275,7 +1274,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
+ if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+ (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
((!is_raven_kicker(adev) &&
adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_feature_version < 1) ||
@@ -1618,9 +1618,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
chip_name = "vega20";
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -2120,7 +2120,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
else
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
@@ -2197,6 +2197,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -2215,10 +2216,11 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
-
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type, hw_prio);
if (r)
return r;
@@ -2312,7 +2314,9 @@ static int gfx_v9_0_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -2530,7 +2534,7 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
break;
default:
break;
- };
+ }
}
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
@@ -2965,8 +2969,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
if (adev->asic_type == CHIP_VEGA12 ||
- (adev->asic_type == CHIP_RAVEN &&
- adev->rev_id >= 8))
+ (adev->apu_flags & AMD_APU_IS_RAVEN2))
gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
}
@@ -3102,16 +3105,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (!enable) {
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
}
@@ -3307,15 +3305,11 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -3385,11 +3379,8 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
- ring->has_high_prio = true;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
- } else {
- ring->has_high_prio = false;
}
}
}
@@ -4056,13 +4047,18 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
{
signed long r, cnt = 0;
unsigned long flags;
- uint32_t seq;
+ uint32_t seq, reg_val_offs = 0;
+ uint64_t value = 0;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+ pr_err("critical bug! too many kiq readers\n");
+ goto failed_unlock;
+ }
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 9 | /* src: register*/
@@ -4072,10 +4068,13 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
- amdgpu_fence_emit_polling(ring, &seq);
+ reg_val_offs * 4));
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -4101,10 +4100,19 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
if (cnt > MAX_KIQ_REG_TRY)
goto failed_kiq_read;
- return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
- (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+ mb();
+ value = (uint64_t)adev->wb.wb[reg_val_offs] |
+ (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ return value;
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_read:
+ if (reg_val_offs)
+ amdgpu_device_wb_free(adev, reg_val_offs);
pr_err("failed to read gpu clock\n");
return ~0;
}
@@ -4489,7 +4497,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+ r = amdgpu_ib_get(adev, NULL, total_size,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
@@ -4960,14 +4969,21 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
- u32 data;
+ u32 reg, data;
- data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(reg);
+ else
+ data = RREG32(reg);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
- WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ else
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -5025,10 +5041,9 @@ static int gfx_v9_0_set_powergating_state(void *handle,
switch (adev->asic_type) {
case CHIP_RAVEN:
case CHIP_RENOIR:
- if (!enable) {
+ if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- }
+
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
@@ -5052,12 +5067,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
amdgpu_gfx_off_ctrl(adev, true);
break;
case CHIP_VEGA12:
- if (!enable) {
- amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- } else {
- amdgpu_gfx_off_ctrl(adev, true);
- }
+ amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
@@ -5428,10 +5438,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
}
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+ bool secure)
{
+ uint32_t v = secure ? FRAME_TMZ : 0;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+ amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
@@ -5441,8 +5454,6 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring);
- gfx_v9_0_ring_emit_tmz(ring, true);
-
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -5493,10 +5504,10 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
-static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -5505,9 +5516,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6408,15 +6419,15 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- vml2_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- vml2_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6428,16 +6439,16 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- vml2_walker_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, vml2_walker_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- vml2_walker_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, vml2_walker_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6448,8 +6459,9 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- atc_l2_cache_2m_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, atc_l2_cache_2m_mems[i],
+ sec_count);
err_data->ce_count += sec_count;
}
}
@@ -6460,15 +6472,17 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- atc_l2_cache_4k_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, atc_l2_cache_4k_mems[i],
+ sec_count);
err_data->ce_count += sec_count;
}
ded_count = (data & 0x00018000L) >> 0xf;
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- atc_l2_cache_4k_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, atc_l2_cache_4k_mems[i],
+ ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6481,7 +6495,8 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id, uint32_t value,
uint32_t *sec_count, uint32_t *ded_count)
{
@@ -6498,7 +6513,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
gfx_v9_0_ras_fields[i].sec_count_mask) >>
gfx_v9_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ dev_info(adev->dev, "GFX SubBlock %s, "
+ "Instance[%d][%d], SEC %d\n",
gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
sec_cnt);
@@ -6509,7 +6525,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
gfx_v9_0_ras_fields[i].ded_count_mask) >>
gfx_v9_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ dev_info(adev->dev, "GFX SubBlock %s, "
+ "Instance[%d][%d], DED %d\n",
gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
ded_cnt);
@@ -6598,9 +6615,10 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
- gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
- j, k, reg_value,
- &sec_count, &ded_count);
+ gfx_v9_0_ras_error_count(adev,
+ &gfx_v9_0_edc_counter_regs[i],
+ j, k, reg_value,
+ &sec_count, &ded_count);
}
}
}
@@ -6616,6 +6634,25 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ const unsigned int cp_coher_cntl =
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
+
+ /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -6662,7 +6699,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 7, /* gfx_v9_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6678,11 +6716,12 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
- .emit_tmz = gfx_v9_0_ring_emit_tmz,
+ .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -6702,7 +6741,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
- 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 7, /* gfx_v9_0_emit_mem_sync */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6717,6 +6757,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -6840,7 +6881,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
adev->gds.gds_compute_max_wave_id = 0x27f;
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
else
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index dce945ef21a5..46351db36922 100644..100755
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -732,7 +732,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
vml2_walker_mems[i], sec_count);
err_data->ce_count += sec_count;
}
@@ -740,7 +741,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
vml2_walker_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -752,14 +754,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
utcl2_router_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
utcl2_router_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -772,7 +776,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
atc_l2_cache_2m_mems[i], sec_count);
err_data->ce_count += sec_count;
}
@@ -780,7 +785,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
atc_l2_cache_2m_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -793,7 +799,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
atc_l2_cache_4k_mems[i], sec_count);
err_data->ce_count += sec_count;
}
@@ -801,7 +808,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
atc_l2_cache_4k_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -816,7 +824,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id,
uint32_t value, uint32_t *sec_count,
uint32_t *ded_count)
@@ -833,7 +842,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
gfx_v9_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
gfx_v9_4_ras_fields[i].name, se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
@@ -842,7 +852,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
gfx_v9_4_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], DED %d\n",
gfx_v9_4_ras_fields[i].name, se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
@@ -876,7 +887,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_edc_counter_regs[i]));
if (reg_value)
- gfx_v9_4_ras_error_count(
+ gfx_v9_4_ras_error_count(adev,
&gfx_v9_4_edc_counter_regs[i],
j, k, reg_value, &sec_count,
&ded_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 1a2f18b908fe..6682b843bafe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -80,7 +80,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9775eca6fe43..ba2b7ac0c02d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -170,6 +170,9 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev,
"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID));
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -369,7 +372,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA
* itself.
*/
- r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+ &job);
if (r)
goto error_alloc;
@@ -423,7 +427,13 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ return -ETIME;
+ }
+
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
@@ -676,17 +686,23 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
{
- /* Could aper size report 0 ? */
- adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
- adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+ int r;
/* size in MB on si */
adev->gmc.mc_vram_size =
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
- adev->gmc.visible_vram_size = adev->gmc.aper_size;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_device_resize_fb_bar(adev);
+ if (r)
+ return r;
+ }
+ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
/* In case the PCI BAR is larger than the actual amount of vram */
+ adev->gmc.visible_vram_size = adev->gmc.aper_size;
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b205039350b6..a75e472b4a81 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -61,17 +61,6 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__HBM 0x60000000
#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
-
-static const u32 crtc_offsets[6] =
-{
- SI_CRTC0_REGISTER_OFFSET,
- SI_CRTC1_REGISTER_OFFSET,
- SI_CRTC2_REGISTER_OFFSET,
- SI_CRTC3_REGISTER_OFFSET,
- SI_CRTC4_REGISTER_OFFSET,
- SI_CRTC5_REGISTER_OFFSET
-};
-
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
@@ -858,7 +847,7 @@ static int gmc_v6_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
- dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+ dev_warn(adev->dev, "No suitable DMA available.\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(44);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 9da9596a3638..bcd4baecfe11 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -762,6 +762,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
*
* Print human readable fault information (CIK).
*/
@@ -1019,7 +1020,7 @@ static int gmc_v7_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
- pr_warn("amdgpu: No suitable DMA available\n");
+ pr_warn("No suitable DMA available\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(40);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 27d83204fa2b..26976e50e2a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1005,6 +1005,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
*
* Print human readable fault information (VI).
*/
@@ -1144,7 +1145,7 @@ static int gmc_v8_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
- pr_warn("amdgpu: No suitable DMA available\n");
+ pr_warn("No suitable DMA available\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(40);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 8606f877478f..11e93a82131d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -362,6 +362,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev,
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+ REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, CID));
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -438,9 +441,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
return ((vmhub == AMDGPU_MMHUB_0 ||
vmhub == AMDGPU_MMHUB_1) &&
(!amdgpu_sriov_vf(adev)) &&
- (!(adev->asic_type == CHIP_RAVEN &&
- adev->rev_id < 0x8 &&
- adev->pdev->device == 0x15d8)));
+ (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
+ (adev->apu_flags & AMD_APU_IS_PICASSO))));
}
static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
@@ -618,7 +620,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
pasid, 2, all_hub);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ return -ETIME;
+ }
+
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 0debfd9f428c..b10c95cad9a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -480,7 +480,8 @@ int jpeg_v1_0_sw_init(void *handle)
ring = &adev->jpeg.inst->ring_dec;
sprintf(ring->name, "jpeg_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 6173951db7b4..e67d09cb1b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -106,7 +106,8 @@ static int jpeg_v2_0_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
sprintf(ring->name, "jpeg_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -169,14 +170,11 @@ static int jpeg_v2_0_hw_init(void *handle)
static int jpeg_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index c04c2078a7c1..713c32560445 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -118,7 +118,8 @@ static int jpeg_v2_5_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
sprintf(ring->name, "jpeg_dec_%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -267,7 +268,6 @@ static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
| JPEG_CGC_GATE__JPEG2_DEC_MASK
- | JPEG_CGC_GATE__JPEG_ENC_MASK
| JPEG_CGC_GATE__JMCIF_MASK
| JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 396c2a624de0..405767208a4d 100644..100755
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -96,7 +96,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@@ -690,7 +690,8 @@ static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
};
-static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
+static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
{
uint32_t i;
@@ -704,7 +705,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v1_0_ras_fields[i].sec_count_mask) >>
mmhub_v1_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ dev_info(adev->dev,
+ "MMHUB SubBlock %s, SEC %d\n",
mmhub_v1_0_ras_fields[i].name,
sec_cnt);
*sec_count += sec_cnt;
@@ -714,7 +716,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v1_0_ras_fields[i].ded_count_mask) >>
mmhub_v1_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ dev_info(adev->dev,
+ "MMHUB SubBlock %s, DED %d\n",
mmhub_v1_0_ras_fields[i].name,
ded_cnt);
*ded_count += ded_cnt;
@@ -739,7 +742,8 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
if (reg_value)
- mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i],
+ mmhub_v1_0_get_ras_error_count(adev,
+ &mmhub_v1_0_edc_cnt_regs[i],
reg_value, &sec_count, &ded_count);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 37dbe0f2142f..83b453f5d717 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -26,7 +26,7 @@
#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
-#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500
+#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -46,7 +46,8 @@ enum idh_event {
IDH_SUCCESS,
IDH_FAIL,
IDH_QUERY_ALIVE,
- IDH_EVENT_MAX
+
+ IDH_TEXT_MESSAGE = 255,
};
extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 237fa5e16b7c..ce2bf1fb79ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -30,7 +30,6 @@
#include "navi10_ih.h"
#include "soc15_common.h"
#include "mxgpu_nv.h"
-#include "mxgpu_ai.h"
static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
{
@@ -53,8 +52,7 @@ static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
*/
static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
{
- return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
}
@@ -63,8 +61,7 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
{
u32 reg;
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg != event)
return -ENOENT;
@@ -110,7 +107,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
timeout -= 10;
} while (timeout > 1);
- pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return -ETIME;
}
@@ -118,7 +114,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3)
{
- u32 reg;
int r;
uint8_t trn;
@@ -137,19 +132,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
}
} while (trn);
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
- reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
- MSGBUF_DATA, req);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
- reg);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
- data1);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
- data2);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
- data3);
-
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
xgpu_nv_mailbox_set_valid(adev, true);
/* start to poll ack */
@@ -164,23 +150,48 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
int r;
+ enum idh_event event = -1;
xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
- /* start to check msg if request is idh_req_gpu_init_access */
- if (req == IDH_REQ_GPU_INIT_ACCESS ||
- req == IDH_REQ_GPU_FINI_ACCESS ||
- req == IDH_REQ_GPU_RESET_ACCESS) {
- r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ switch (req) {
+ case IDH_REQ_GPU_INIT_ACCESS:
+ case IDH_REQ_GPU_FINI_ACCESS:
+ case IDH_REQ_GPU_RESET_ACCESS:
+ event = IDH_READY_TO_ACCESS_GPU;
+ break;
+ case IDH_REQ_GPU_INIT_DATA:
+ event = IDH_REQ_GPU_INIT_DATA_READY;
+ break;
+ default:
+ break;
+ }
+
+ if (event != -1) {
+ r = xgpu_nv_poll_msg(adev, event);
if (r) {
- pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
- return r;
+ if (req != IDH_REQ_GPU_INIT_DATA) {
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+ return r;
+ }
+ else /* host doesn't support REQ_GPU_INIT_DATA handshake */
+ adev->virt.req_init_data_ver = 0;
+ } else {
+ if (req == IDH_REQ_GPU_INIT_DATA)
+ {
+ adev->virt.req_init_data_ver =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
+
+ /* assume V1 in case host doesn't set version number */
+ if (adev->virt.req_init_data_ver < 1)
+ adev->virt.req_init_data_ver = 1;
+ }
}
+
/* Retrieve checksum from mailbox2 */
if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
- RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
}
}
@@ -213,6 +224,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
return r;
}
+static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+}
+
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -226,11 +242,14 @@ static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+ u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ tmp |= 2;
+ else
+ tmp &= ~2;
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
- (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+ WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
@@ -282,11 +301,14 @@ static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+ u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ tmp |= 1;
+ else
+ tmp &= ~1;
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
- (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+ WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
@@ -378,6 +400,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
+ .req_init_data = xgpu_nv_request_init_data,
.reset_gpu = xgpu_nv_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_nv_mailbox_trans_msg,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 99b15f6865cb..52605e14a1a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -25,8 +25,32 @@
#define __MXGPU_NV_H__
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
-#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000
-#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000
+
+enum idh_request {
+ IDH_REQ_GPU_INIT_ACCESS = 1,
+ IDH_REL_GPU_INIT_ACCESS,
+ IDH_REQ_GPU_FINI_ACCESS,
+ IDH_REL_GPU_FINI_ACCESS,
+ IDH_REQ_GPU_RESET_ACCESS,
+ IDH_REQ_GPU_INIT_DATA,
+
+ IDH_LOG_VF_ERROR = 200,
+};
+
+enum idh_event {
+ IDH_CLR_MSG_BUF = 0,
+ IDH_READY_TO_ACCESS_GPU,
+ IDH_FLR_NOTIFICATION,
+ IDH_FLR_NOTIFICATION_CMPL,
+ IDH_SUCCESS,
+ IDH_FAIL,
+ IDH_QUERY_ALIVE,
+ IDH_REQ_GPU_INIT_DATA_READY,
+
+ IDH_TEXT_MESSAGE = 255,
+};
extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
@@ -35,7 +59,21 @@ int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
-#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
-#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
+#define mmMAILBOX_CONTROL 0xE5E
+
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (mmMAILBOX_CONTROL * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE + 1)
+
+#define mmMAILBOX_MSGBUF_TRN_DW0 0xE56
+#define mmMAILBOX_MSGBUF_TRN_DW1 0xE57
+#define mmMAILBOX_MSGBUF_TRN_DW2 0xE58
+#define mmMAILBOX_MSGBUF_TRN_DW3 0xE59
+
+#define mmMAILBOX_MSGBUF_RCV_DW0 0xE5A
+#define mmMAILBOX_MSGBUF_RCV_DW1 0xE5B
+#define mmMAILBOX_MSGBUF_RCV_DW2 0xE5C
+#define mmMAILBOX_MSGBUF_RCV_DW3 0xE5D
+
+#define mmMAILBOX_INT_CNTL 0xE5F
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
index f13dc6cc158f..713ee66a4d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
@@ -43,7 +43,8 @@ enum idh_event {
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
- IDH_EVENT_MAX
+
+ IDH_TEXT_MESSAGE = 255
};
extern const struct amdgpu_virt_ops xgpu_vi_virt_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index e08245a446fc..f97857ed3c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -49,8 +49,48 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+
adev->irq.ih.enabled = true;
+
+ if (adev->irq.ih1.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+ adev->irq.ih1.enabled = true;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+ adev->irq.ih2.enabled = true;
+ }
}
/**
@@ -66,12 +106,61 @@ static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
adev->irq.ih.enabled = false;
adev->irq.ih.rptr = 0;
+
+ if (adev->irq.ih1.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 0);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ adev->irq.ih1.enabled = false;
+ adev->irq.ih1.rptr = 0;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 0);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ adev->irq.ih2.enabled = false;
+ adev->irq.ih2.rptr = 0;
+ }
+
}
static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -97,6 +186,43 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
return ih_rb_cntl;
}
+static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
+static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Reroute to IH ring 1 for VMC */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+
+ /* Reroute IH ring 1 for UMC */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+}
+
/**
* navi10_ih_irq_init - init and enable the interrupt ring
*
@@ -111,7 +237,7 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih = &adev->irq.ih;
- u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
+ u32 ih_rb_cntl, ih_chicken;
u32 tmp;
/* disable irqs */
@@ -127,6 +253,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
!!adev->irq.msi_enabled);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+ navi10_ih_reroute_ih(adev);
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
if (ih->use_bus_addr) {
@@ -137,8 +272,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
}
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-
/* set the writeback address whether it's enabled or not */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
lower_32_bits(ih->wptr_addr));
@@ -149,22 +282,68 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
- if (ih->use_doorbell) {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, OFFSET,
- ih->doorbell_index);
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, ENABLE, 1);
- } else {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, ENABLE, 0);
- }
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
+ navi10_ih_doorbell_rptr(ih));
adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
ih->doorbell_index);
+ ih = &adev->irq.ih1;
+ if (ih->ring_size) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
+ (ih->gpu_addr >> 40) & 0xff);
+
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ RB_FULL_DRAIN_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
+ navi10_ih_doorbell_rptr(ih));
+ }
+
+ ih = &adev->irq.ih2;
+ if (ih->ring_size) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
+ (ih->gpu_addr >> 40) & 0xff);
+
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
+ navi10_ih_doorbell_rptr(ih));
+ }
+
+
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
CLIENT18_IS_STORM_CLIENT, 1);
@@ -217,7 +396,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ if (ih == &adev->irq.ih)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ else if (ih == &adev->irq.ih1)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ else
+ BUG();
+
wptr = RREG32_NO_KIQ(reg);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -233,7 +420,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
wptr, ih->rptr, tmp);
ih->rptr = tmp;
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ if (ih == &adev->irq.ih)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ else if (ih == &adev->irq.ih1)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ else
+ BUG();
+
tmp = RREG32_NO_KIQ(reg);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(reg, tmp);
@@ -333,8 +528,52 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
navi10_ih_irq_rearm(adev, ih);
- } else
+ } else if (ih == &adev->irq.ih) {
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
+ } else if (ih == &adev->irq.ih1) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
+ } else if (ih == &adev->irq.ih2) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ }
+}
+
+/**
+ * navi10_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int navi10_ih_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ case 2:
+ *adev->irq.ih2.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih2_work);
+ break;
+ default: break;
+ }
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
+ .process = navi10_ih_self_irq,
+};
+
+static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
}
static int navi10_ih_early_init(void *handle)
@@ -342,6 +581,7 @@ static int navi10_ih_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
navi10_ih_set_interrupt_funcs(adev);
+ navi10_ih_set_self_irq_funcs(adev);
return 0;
}
@@ -351,6 +591,12 @@ static int navi10_ih_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool use_bus_addr;
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+
+ if (r)
+ return r;
+
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
@@ -363,6 +609,20 @@ static int navi10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
r = amdgpu_irq_init(adev);
return r;
@@ -373,6 +633,8 @@ static int navi10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index f3a3fe746222..cbcf04578b99 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -290,23 +290,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
-static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev)
-{
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER);
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
- if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -338,6 +321,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_clockgating_state = nbio_v2_3_get_clockgating_state,
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
- .detect_hw_virt = nbio_v2_3_detect_hw_virt,
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 635d9e1fc0a3..7b2fb050407d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -241,23 +241,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
};
-static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
-{
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
- if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -294,5 +277,4 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_clockgating_state = nbio_v6_1_get_clockgating_state,
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
- .detect_hw_virt = nbio_v6_1_detect_hw_virt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index d6cbf26074bc..d34628e113fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -280,12 +280,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
-static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
-{
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
{
@@ -310,6 +304,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.get_clockgating_state = nbio_v7_0_get_clockgating_state,
.ih_control = nbio_v7_0_ih_control,
.init_registers = nbio_v7_0_init_registers,
- .detect_hw_virt = nbio_v7_0_detect_hw_virt,
.remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 149d386590df..e629156173d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -185,7 +185,7 @@ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
- ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
@@ -292,23 +292,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
};
-static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
-{
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
- if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
@@ -340,14 +323,20 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
obj->err_data.ce_count += err_data.ce_count;
if (err_data.ce_count)
- DRM_INFO("%ld correctable errors detected in %s block\n",
- obj->err_data.ce_count, adev->nbio.ras_if->name);
+ dev_info(adev->dev, "%ld correctable hardware "
+ "errors detected in %s block, "
+ "no user action is needed.\n",
+ obj->err_data.ce_count,
+ adev->nbio.ras_if->name);
if (err_data.ue_count)
- DRM_INFO("%ld uncorrectable errors detected in %s block\n",
- obj->err_data.ue_count, adev->nbio.ras_if->name);
+ dev_info(adev->dev, "%ld uncorrectable hardware "
+ "errors detected in %s block\n",
+ obj->err_data.ue_count,
+ adev->nbio.ras_if->name);
- DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
+ dev_info(adev->dev, "RAS controller interrupt triggered "
+ "by NBIF error\n");
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
@@ -561,7 +550,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
- .detect_hw_virt = nbio_v7_4_detect_hw_virt,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
.handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
.handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 52318b03c424..6655dd2009b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -453,18 +453,19 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
- /* Set IP register base before any HW register access */
- r = nv_reg_base_init(adev);
- if (r)
- return r;
-
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- adev->nbio.funcs->detect_hw_virt(adev);
-
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
adev->virt.ops = &xgpu_nv_virt_ops;
+ /* try send GPU_INIT_DATA request to host */
+ amdgpu_virt_request_init_data(adev);
+ }
+
+ /* Set IP register base before any HW register access */
+ r = nv_reg_base_init(adev);
+ if (r)
+ return r;
switch (adev->asic_type) {
case CHIP_NAVI10:
@@ -497,8 +498,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- !amdgpu_sriov_vf(adev))
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -548,13 +548,6 @@ static bool nv_need_full_reset(struct amdgpu_device *adev)
return true;
}
-static void nv_get_pcie_usage(struct amdgpu_device *adev,
- uint64_t *count0,
- uint64_t *count1)
-{
- /*TODO*/
-}
-
static bool nv_need_reset_on_init(struct amdgpu_device *adev)
{
#if 0
@@ -629,7 +622,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.invalidate_hdp = &nv_invalidate_hdp,
.init_doorbell_index = &nv_init_doorbell_index,
.need_full_reset = &nv_need_full_reset,
- .get_pcie_usage = &nv_get_pcie_usage,
.need_reset_on_init = &nv_need_reset_on_init,
.get_pcie_replay_count = &nv_get_pcie_replay_count,
.supports_baco = &nv_asic_supports_baco,
diff --git a/drivers/gpu/drm/amd/amdgpu/nvd.h b/drivers/gpu/drm/amd/amdgpu/nvd.h
index 1de984647dbb..fd6b58243b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/nvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/nvd.h
@@ -256,6 +256,54 @@
#define PACKET3_BLK_CNTX_UPDATE 0x53
#define PACKET3_INCR_UPDT_STATE 0x55
#define PACKET3_ACQUIRE_MEM 0x58
+/* 1. HEADER
+ * 2. COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 2. COHER_SIZE [31:0]
+ * 3. COHER_SIZE_HI [7:0]
+ * 4. COHER_BASE_LO [31:0]
+ * 5. COHER_BASE_HI [23:0]
+ * 7. POLL_INTERVAL [15:0]
+ * 8. GCR_CNTL [18:0]
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(x) ((x) << 0)
+ /*
+ * 0:NOP
+ * 1:ALL
+ * 2:RANGE
+ * 3:FIRST_LAST
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_RANGE(x) ((x) << 2)
+ /*
+ * 0:ALL
+ * 1:reserved
+ * 2:RANGE
+ * 3:FIRST_LAST
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(x) ((x) << 4)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(x) ((x) << 5)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_WB(x) ((x) << 6)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(x) ((x) << 7)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(x) ((x) << 8)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(x) ((x) << 9)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_US(x) ((x) << 10)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_RANGE(x) ((x) << 11)
+ /*
+ * 0:ALL
+ * 1:VOL
+ * 2:RANGE
+ * 3:FIRST_LAST
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_DISCARD(x) ((x) << 13)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(x) ((x) << 14)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(x) ((x) << 15)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_SEQ(x) ((x) << 16)
+ /*
+ * 0: PARALLEL
+ * 1: FORWARD
+ * 2: REVERSE
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_RANGE_IS_PA (1 << 18)
#define PACKET3_REWIND 0x59
#define PACKET3_INTERRUPT 0x5A
#define PACKET3_GEN_PDEPTE 0x5B
@@ -306,6 +354,7 @@
#define PACKET3_GET_LOD_STATS 0x8E
#define PACKET3_DRAW_MULTI_PREAMBLE 0x8F
#define PACKET3_FRAME_CONTROL 0x90
+# define FRAME_TMZ (1 << 0)
# define FRAME_CMD(x) ((x) << 28)
/*
* x=0: tmz_begin
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 7539104175e8..d7f92634eba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -50,15 +50,14 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
const char *chip_name;
char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_RAVEN:
- if (adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -66,22 +65,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ err = psp_init_asd_microcode(psp, chip_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
- if (err)
- goto out;
-
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -126,8 +113,6 @@ out:
dev_err(adev->dev,
"psp v10.0: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
}
return err;
@@ -230,129 +215,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v10_0_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v10_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (!ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
-
static int psp_v10_0_mode1_reset(struct psp_context *psp)
{
DRM_INFO("psp mode 1 reset not supported now! \n");
@@ -379,7 +241,6 @@ static const struct psp_funcs psp_v10_0_funcs = {
.ring_create = psp_v10_0_ring_create,
.ring_stop = psp_v10_0_ring_stop,
.ring_destroy = psp_v10_0_ring_destroy,
- .compare_sram_data = psp_v10_0_compare_sram_data,
.mode1_reset = psp_v10_0_mode1_reset,
.ring_get_wptr = psp_v10_0_ring_get_wptr,
.ring_set_wptr = psp_v10_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0afd610a1263..1de89cc3c355 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -75,10 +75,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
const char *chip_name;
char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *sos_hdr;
- const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
- const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
- const struct psp_firmware_header_v1_0 *asd_hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
@@ -103,66 +99,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+ err = psp_init_sos_microcode(psp, chip_name);
if (err)
- goto out;
+ return err;
- err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ err = psp_init_asd_microcode(psp, chip_name);
if (err)
- goto out;
-
- sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
- amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
-
- switch (sos_hdr->header.header_version_major) {
- case 1:
- adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
- adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
- le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
- adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr->sos_offset_bytes);
- if (sos_hdr->header.header_version_minor == 1) {
- sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
- adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
- adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
- adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
- adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
- }
- if (sos_hdr->header.header_version_minor == 2) {
- sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
- adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
- adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
- }
- break;
- default:
- dev_err(adev->dev,
- "Unsupported psp sos firmware\n");
- err = -EINVAL;
- goto out;
- }
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out1;
-
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
- if (err)
- goto out1;
-
- asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
- le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+ return err;
switch (adev->asic_type) {
case CHIP_VEGA20:
@@ -194,6 +137,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ if (amdgpu_sriov_vf(adev))
+ break;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -229,15 +174,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
out2:
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
-out1:
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
-out:
- dev_err(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
-
return err;
}
@@ -283,11 +219,8 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- if (psp_v11_0_is_sos_alive(psp)) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
- }
ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
@@ -319,11 +252,8 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- if (psp_v11_0_is_sos_alive(psp)) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
- }
ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
@@ -446,13 +376,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
return 0;
}
-static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
- return true;
- return false;
-}
-
static int psp_v11_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -460,7 +383,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
/* Write the ring destroy command*/
- if (psp_v11_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
else
@@ -471,7 +394,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) */
- if (psp_v11_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
0x80000000, 0x80000000, false);
else
@@ -489,7 +412,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- if (psp_v11_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
ret = psp_v11_0_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
@@ -567,138 +490,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v11_0_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- if (adev->asic_type < CHIP_NAVI10) {
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- } else {
- *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10;
- *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10;
- }
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- if (adev->asic_type < CHIP_NAVI10) {
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- } else {
- *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10;
- *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10;
- }
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v11_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
static int psp_v11_0_mode1_reset(struct psp_context *psp)
{
int ret;
@@ -733,181 +524,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
return 0;
}
-/* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready.
- * For now, return success and hack the hive_id so high level code can
- * start testing
- */
-static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
- int number_devices, struct psp_xgmi_topology_info *topology)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
- struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
- int i;
- int ret;
-
- if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
- return -EINVAL;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- /* Fill in the shared memory with topology information as input */
- topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
- topology_info_input->num_nodes = number_devices;
-
- for (i = 0; i < topology_info_input->num_nodes; i++) {
- topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
- topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
- topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
- topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
- }
-
- /* Invoke xgmi ta to get the topology information */
- ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
- if (ret)
- return ret;
-
- /* Read the output topology information from the shared memory */
- topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
- topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
- for (i = 0; i < topology->num_nodes; i++) {
- topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
- topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
- topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
- topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
- }
-
- return 0;
-}
-
-static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
- int number_devices, struct psp_xgmi_topology_info *topology)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
- int i;
-
- if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
- return -EINVAL;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
- topology_info_input->num_nodes = number_devices;
-
- for (i = 0; i < topology_info_input->num_nodes; i++) {
- topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
- topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
- topology_info_input->nodes[i].is_sharing_enabled = 1;
- topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
- }
-
- /* Invoke xgmi ta to set topology information */
- return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
-}
-
-static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- int ret;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
-
- /* Invoke xgmi ta to get hive id */
- ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
- if (ret)
- return ret;
-
- *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
-
- return 0;
-}
-
-static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- int ret;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
-
- /* Invoke xgmi ta to get the node id */
- ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
- if (ret)
- return ret;
-
- *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
-
- return 0;
-}
-
-static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
- struct ta_ras_trigger_error_input *info)
-{
- struct ta_ras_shared_memory *ras_cmd;
- int ret;
-
- if (!psp->ras.ras_initialized)
- return -EINVAL;
-
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
- ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
- ras_cmd->ras_in_message.trigger_error = *info;
-
- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
- if (ret)
- return -EINVAL;
-
- /* If err_event_athub occurs error inject was successful, however
- return status from TA is no long reliable */
- if (amdgpu_ras_intr_triggered())
- return 0;
-
- return ras_cmd->ras_status;
-}
-
-static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
-{
-#if 0
- // not support yet.
- struct ta_ras_shared_memory *ras_cmd;
- int ret;
-
- if (!psp->ras.ras_initialized)
- return -EINVAL;
-
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
- ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
- ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
-
- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
- if (ret)
- return -EINVAL;
-
- return ras_cmd->ras_status;
-#else
- return -EINVAL;
-#endif
-}
-
-static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
-{
- return psp_rlc_autoload_start(psp);
-}
-
static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
{
int ret;
@@ -1099,7 +715,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
uint32_t data;
struct amdgpu_device *adev = psp->adev;
- if (psp_v11_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -1111,7 +727,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
- if (psp_v11_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
} else
@@ -1203,16 +819,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_create = psp_v11_0_ring_create,
.ring_stop = psp_v11_0_ring_stop,
.ring_destroy = psp_v11_0_ring_destroy,
- .compare_sram_data = psp_v11_0_compare_sram_data,
.mode1_reset = psp_v11_0_mode1_reset,
- .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
- .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
- .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
- .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
- .support_vmr_ring = psp_v11_0_support_vmr_ring,
- .ras_trigger_error = psp_v11_0_ras_trigger_error,
- .ras_cure_posion = psp_v11_0_ras_cure_posion,
- .rlc_autoload_start = psp_v11_0_rlc_autoload_start,
.mem_training_init = psp_v11_0_memory_training_init,
.mem_training_fini = psp_v11_0_memory_training_fini,
.mem_training = psp_v11_0_memory_training,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 58d8b6d732e8..6c9614f77d33 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -45,11 +45,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
- char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *asd_hdr;
-
- DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_RENOIR:
@@ -59,28 +55,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out1;
-
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
- if (err)
- goto out1;
-
- asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
- le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
-
- return 0;
-
-out1:
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
-
+ err = psp_init_asd_microcode(psp, chip_name);
return err;
}
@@ -95,11 +70,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (sol_reg)
return 0;
- }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -228,13 +200,6 @@ static int psp_v12_0_ring_init(struct psp_context *psp,
return 0;
}
-static bool psp_v12_0_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
- return true;
- return false;
-}
-
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -243,7 +208,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- if (psp_v12_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(psp->adev)) {
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -295,7 +260,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
/* Write the ring destroy command*/
- if (psp_v12_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
else
@@ -306,7 +271,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) */
- if (psp_v12_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
0x80000000, 0x80000000, false);
else
@@ -334,128 +299,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v12_0_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v12_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v12_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
static int psp_v12_0_mode1_reset(struct psp_context *psp)
{
int ret;
@@ -495,7 +338,7 @@ static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp)
uint32_t data;
struct amdgpu_device *adev = psp->adev;
- if (psp_v12_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -507,7 +350,7 @@ static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
- if (psp_v12_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
} else
@@ -522,7 +365,6 @@ static const struct psp_funcs psp_v12_0_funcs = {
.ring_create = psp_v12_0_ring_create,
.ring_stop = psp_v12_0_ring_stop,
.ring_destroy = psp_v12_0_ring_destroy,
- .compare_sram_data = psp_v12_0_compare_sram_data,
.mode1_reset = psp_v12_0_mode1_reset,
.ring_get_wptr = psp_v12_0_ring_get_wptr,
.ring_set_wptr = psp_v12_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 735c43c7daab..f2e725f72d2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -50,9 +50,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
#define smnMP1_FIRMWARE_FLAGS 0x3010028
-static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
-
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp);
static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -60,9 +57,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
- char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -76,55 +71,15 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ err = psp_init_sos_microcode(psp, chip_name);
if (err)
- goto out;
-
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
- adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
- le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
- adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(hdr->sos_offset_bytes);
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out;
+ return err;
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ err = psp_init_asd_microcode(psp, chip_name);
if (err)
- goto out;
-
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ return err;
return 0;
-out:
- if (err) {
- dev_err(adev->dev,
- "psp v3.1: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
- }
-
- return err;
}
static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
@@ -168,41 +123,19 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
return ret;
}
-static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
-{
- int i;
-
- if (ver == adev->psp.sos_fw_version)
- return true;
-
- /*
- * Double check if the latest four legacy versions.
- * If yes, it is still the right version.
- */
- for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) {
- if (sos_old_versions[i] == adev->psp.sos_fw_version)
- return true;
- }
-
- return false;
-}
-
static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
{
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg, ver;
+ uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (sol_reg)
return 0;
- }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -227,11 +160,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
0, true);
-
- ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- if (!psp_v3_1_match_version(adev, ver))
- DRM_WARN("SOS version doesn't match\n");
-
return ret;
}
@@ -302,7 +230,7 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
psp_v3_1_reroute_ih(psp);
- if (psp_v3_1_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
ret = psp_v3_1_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
@@ -360,34 +288,26 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- if (psp_v3_1_support_vmr_ring(psp)) {
- /* Write the Destroy GPCOM ring command to C2PMSG_101 */
- psp_ring_reg = GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg);
-
- /* there might be handshake issue which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
- } else {
- /* Write the ring destroy command to C2PMSG_64 */
- psp_ring_reg = 3 << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+ /* Write the ring destroy command*/
+ if (amdgpu_sriov_vf(adev))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
- /* there might be handshake issue which needs delay */
- mdelay(20);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
- }
+ /* Wait for response flag (bit 31) */
+ if (amdgpu_sriov_vf(adev))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
return ret;
}
@@ -410,128 +330,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v3_1_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v3_1_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -575,20 +373,12 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
return 0;
}
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev))
- return true;
-
- return false;
-}
-
static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
{
uint32_t data;
struct amdgpu_device *adev = psp->adev;
- if (psp_v3_1_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -599,7 +389,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
- if (psp_v3_1_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
/* send interrupt to PSP for SRIOV ring write pointer update */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
@@ -616,10 +406,8 @@ static const struct psp_funcs psp_v3_1_funcs = {
.ring_create = psp_v3_1_ring_create,
.ring_stop = psp_v3_1_ring_stop,
.ring_destroy = psp_v3_1_ring_destroy,
- .compare_sram_data = psp_v3_1_compare_sram_data,
.smu_reload_quirk = psp_v3_1_smu_reload_quirk,
.mode1_reset = psp_v3_1_mode1_reset,
- .support_vmr_ring = psp_v3_1_support_vmr_ring,
.ring_get_wptr = psp_v3_1_ring_get_wptr,
.ring_set_wptr = psp_v3_1_ring_set_wptr,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 7d509a40076f..5f304d61999e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -355,8 +355,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -614,7 +612,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -874,7 +873,8 @@ static int sdma_v2_4_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1200,7 +1200,8 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b6109a99fc43..c59f6f6f4c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -529,8 +529,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -886,7 +884,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -1158,7 +1157,8 @@ static int sdma_v3_0_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1638,7 +1638,8 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 5f3a5ee2a3f4..33501c6c7189 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -115,17 +115,21 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
@@ -174,6 +178,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
@@ -203,6 +208,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@ -222,27 +228,35 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
- SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002)
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
};
static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
@@ -472,7 +486,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
ARRAY_SIZE(golden_settings_sdma_4_1));
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
soc15_program_register_sequence(adev,
golden_settings_sdma_rv2,
ARRAY_SIZE(golden_settings_sdma_rv2));
@@ -561,9 +575,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
chip_name = "vega20";
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -923,8 +937,6 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-
- sdma[i]->sched.ready = false;
}
}
@@ -971,8 +983,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-
- sdma[i]->sched.ready = false;
}
}
@@ -1539,7 +1549,8 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -1840,7 +1851,7 @@ static int sdma_v4_0_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = true;
- DRM_INFO("use_doorbell being set to: [%s]\n",
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
/* doorbell size is 2 dwords, get DWORD offset */
@@ -1848,7 +1859,8 @@ static int sdma_v4_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -1866,7 +1878,8 @@ static int sdma_v4_0_sw_init(void *handle)
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -2445,10 +2458,12 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index d2840c2f6286..b544baf306f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -88,6 +88,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
};
+static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+};
+
static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -141,9 +164,14 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
break;
case CHIP_NAVI12:
- soc15_program_register_sequence(adev,
- golden_settings_sdma_5,
- (const u32)ARRAY_SIZE(golden_settings_sdma_5));
+ if (amdgpu_sriov_vf(adev))
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_5_sriov,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
+ else
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_5,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_5));
soc15_program_register_sequence(adev,
golden_settings_sdma_nv12,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
@@ -514,9 +542,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -541,7 +566,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
- u32 f32_cntl, phase_quantum = 0;
+ u32 f32_cntl = 0, phase_quantum = 0;
int i;
if (amdgpu_sdma_phase_quantum) {
@@ -569,9 +594,12 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
- AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ }
+
if (enable && amdgpu_sdma_phase_quantum) {
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
@@ -580,7 +608,8 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
}
}
@@ -603,6 +632,9 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
sdma_v5_0_rlc_stop(adev);
}
+ if (amdgpu_sriov_vf(adev))
+ return;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
@@ -635,7 +667,8 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -711,26 +744,28 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* set minor_ptr_update to 0 after wptr programed */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ }
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
@@ -960,7 +995,8 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
@@ -1236,7 +1272,7 @@ static int sdma_v5_0_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = true;
- DRM_INFO("use_doorbell being set to: [%s]\n",
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
ring->doorbell_index = (i == 0) ?
@@ -1248,7 +1284,8 @@ static int sdma_v5_0_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1399,14 +1436,16 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
{
u32 sdma_cntl;
- u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
- sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
- sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
+ if (!amdgpu_sriov_vf(adev)) {
+ u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
+ sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
+ sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
- sdma_cntl = RREG32(reg_offset);
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_cntl);
+ }
return 0;
}
@@ -1667,10 +1706,12 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 4d415bfdb42f..153db3f763bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1249,12 +1249,6 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
return 0;
}
-static void si_detect_hw_virtualization(struct amdgpu_device *adev)
-{
- if (is_virtual_machine()) /* passthrough mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
@@ -2165,8 +2159,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block =
int si_set_ip_blocks(struct amdgpu_device *adev)
{
- si_detect_hw_virtualization(adev);
-
switch (adev->asic_type) {
case CHIP_VERDE:
case CHIP_TAHITI:
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 42d5601b6bf3..7d2bbcbe547b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -124,7 +124,6 @@ static void si_dma_stop(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
- ring->sched.ready = false;
}
}
@@ -267,7 +266,8 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -504,7 +504,8 @@ static int si_dma_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -775,7 +776,8 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
1, 0, 0, byte_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 0860e85a2d35..c00ba4b23c9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -345,26 +345,6 @@ static const struct si_dte_data dte_data_tahiti =
false
};
-#if 0
-static const struct si_dte_data dte_data_tahiti_le =
-{
- { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
- { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
- 0x5,
- 0xAFC8,
- 0x64,
- 0x32,
- 1,
- 0,
- 0x10,
- { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
- { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
- { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
- 85,
- true
-};
-#endif
-
static const struct si_dte_data dte_data_tahiti_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index d42a8d8a0dea..c7c9e07962b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -564,7 +564,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
+ !(adev->apu_flags & AMD_APU_IS_RAVEN2))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -708,7 +709,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->df.funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
- adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
@@ -1130,16 +1130,23 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RAVEN:
adev->asic_funcs = &soc15_asic_funcs;
+ if (adev->pdev->device == 0x15dd)
+ adev->apu_flags |= AMD_APU_IS_RAVEN;
+ if (adev->pdev->device == 0x15d8)
+ adev->apu_flags |= AMD_APU_IS_PICASSO;
if (adev->rev_id >= 0x8)
+ adev->apu_flags |= AMD_APU_IS_RAVEN2;
+
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->external_rev_id = adev->rev_id + 0x79;
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
adev->external_rev_id = adev->rev_id + 0x41;
else if (adev->rev_id == 1)
adev->external_rev_id = adev->rev_id + 0x20;
else
adev->external_rev_id = adev->rev_id + 0x01;
- if (adev->rev_id >= 0x8) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1157,7 +1164,7 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_VCN_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
- } else if (adev->pdev->device == 0x15d8) {
+ } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1218,11 +1225,12 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x32;
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index c893c645a4b2..56d02aa690a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -35,6 +35,9 @@
#define RREG32_SOC15(ip, inst, reg) \
RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
+ RREG32_NO_KIQ(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index edfe50821cd9..799925d22fc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -253,7 +253,30 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
+/* 1. HEADER
+ * 2. COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 3. COHER_SIZE [31:0]
+ * 4. COHER_SIZE_HI [7:0]
+ * 5. COHER_BASE_LO [31:0]
+ * 6. COHER_BASE_HI [23:0]
+ * 7. POLL_INTERVAL [15:0]
+ */
+/* COHER_CNTL fields for CP_COHER_CNTL */
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_NC_ACTION_ENA(x) ((x) << 3)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WC_ACTION_ENA(x) ((x) << 4)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_INV_METADATA_ACTION_ENA(x) ((x) << 5)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_VOL_ACTION_ENA(x) ((x) << 15)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(x) ((x) << 18)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(x) ((x) << 22)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(x) ((x) << 23)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_CB_ACTION_ENA(x) ((x) << 25)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_DB_ACTION_ENA(x) ((x) << 26)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(x) ((x) << 27)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_VOL_ACTION_ENA(x) ((x) << 28)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(x) ((x) << 29)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_WB_ACTION_ENA(x) ((x) << 30)
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
@@ -286,6 +309,7 @@
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SWITCH_BUFFER 0x8B
#define PACKET3_FRAME_CONTROL 0x90
+# define FRAME_TMZ (1 << 0)
# define FRAME_CMD(x) ((x) << 28)
/*
* x=0: tmz_begin
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index ca7d05993ca2..745ed0fba1ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -24,6 +24,8 @@
#ifndef _TA_RAS_IF_H
#define _TA_RAS_IF_H
+#define RAS_TA_HOST_IF_VER 0
+
/* Responses have bit 31 set */
#define RSP_ID_MASK (1U << 31)
#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
@@ -36,18 +38,24 @@ enum ras_command {
TA_RAS_COMMAND__TRIGGER_ERROR,
};
-enum ta_ras_status {
- TA_RAS_STATUS__SUCCESS = 0x00,
- TA_RAS_STATUS__RESET_NEEDED = 0x01,
- TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0x02,
- TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0x03,
- TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0x04,
- TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0x05,
- TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0x06,
- TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0x07,
- TA_RAS_STATUS__ERROR_TIMEOUT = 0x08,
- TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0x09,
- TA_RAS_STATUS__ERROR_GENERIC = 0x10,
+enum ta_ras_status
+{
+ TA_RAS_STATUS__SUCCESS = 0x00,
+ TA_RAS_STATUS__RESET_NEEDED = 0xA001,
+ TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
+ TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
+ TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004,
+ TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0xA005,
+ TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0xA006,
+ TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007,
+ TA_RAS_STATUS__ERROR_TIMEOUT = 0xA008,
+ TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0XA009,
+ TA_RAS_STATUS__ERROR_GENERIC = 0xA00A,
+ TA_RAS_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B,
+ TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
+ TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
+ TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F
};
enum ta_ras_block {
@@ -97,22 +105,39 @@ struct ta_ras_trigger_error_input {
uint64_t value; // method if error injection. i.e persistent, coherent etc.
};
+struct ta_ras_output_flags
+{
+ uint8_t ras_init_success_flag;
+ uint8_t err_inject_switch_disable_flag;
+ uint8_t reg_access_failure_flag;
+};
+
/* Common input structure for RAS callbacks */
/**********************************************************/
union ta_ras_cmd_input {
struct ta_ras_enable_features_input enable_features;
struct ta_ras_disable_features_input disable_features;
struct ta_ras_trigger_error_input trigger_error;
+
+ uint32_t reserve_pad[256];
+};
+
+union ta_ras_cmd_output
+{
+ struct ta_ras_output_flags flags;
+
+ uint32_t reserve_pad[256];
};
/* Shared Memory structures */
/**********************************************************/
struct ta_ras_shared_memory {
- uint32_t cmd_id;
- uint32_t resp_id;
- enum ta_ras_status ras_status;
- uint32_t reserved;
- union ta_ras_cmd_input ras_in_message;
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ uint32_t ras_status;
+ uint32_t if_version;
+ union ta_ras_cmd_input ras_in_message;
+ union ta_ras_cmd_output ras_out_message;
};
#endif // TL_RAS_IF_H_
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 14d346321a5f..418cf097c918 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -56,24 +56,43 @@ const uint32_t
static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
{
- WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+ rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+ rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN, 1);
+
+ WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
}
static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
{
- WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+ rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+ rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN, 0);
+
+ WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
}
static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
{
- uint32_t rsmu_umc_index;
+ uint32_t rsmu_umc_addr, rsmu_umc_val;
- rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+ rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
- return REG_GET_FIELD(rsmu_umc_index,
+ return REG_GET_FIELD(rsmu_umc_val,
RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN);
}
@@ -85,6 +104,81 @@ static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
}
+static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCntSel_ARCT);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCnt_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCnt);
+ }
+
+ /* select the lower chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear lower chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_1_CE_CNT_INIT);
+
+ /* select the higher chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear higher chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_1_CE_CNT_INIT);
+}
+
+static void umc_v6_1_clear_error_count(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+ uint32_t rsmu_umc_index_state =
+ umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_clear_error_count_per_channel(adev,
+ umc_reg_offset);
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
+}
+
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
@@ -117,23 +211,21 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 0);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
- /* clear the lower chip err count */
- WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* select the higher chip and check the err counter */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 1);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
- /* clear the higher chip err count */
- WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
@@ -209,6 +301,8 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
+
+ umc_v6_1_clear_error_count(adev);
}
static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 82abd8e728ab..3cafba726587 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -118,7 +118,8 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -210,13 +211,10 @@ done:
static int uvd_v4_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 0fa8aae2d78e..a566ff926e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -116,7 +116,8 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -208,13 +209,10 @@ done:
static int uvd_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index e0aadcaf6c8b..0a880bc101b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -216,7 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -279,7 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -416,7 +418,8 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -428,7 +431,9 @@ static int uvd_v6_0_sw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -535,13 +540,10 @@ done:
static int uvd_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 0995378d8263..7a55457e6f9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -224,7 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -286,7 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -450,7 +452,9 @@ static int uvd_v7_0_sw_init(void *handle)
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
sprintf(ring->name, "uvd_%d", ring->me);
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->uvd.inst[j].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -469,7 +473,9 @@ static int uvd_v7_0_sw_init(void *handle)
else
ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
}
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->uvd.inst[j].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -598,7 +604,6 @@ done:
static int uvd_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
if (!amdgpu_sriov_vf(adev))
uvd_v7_0_stop(adev);
@@ -607,12 +612,6 @@ static int uvd_v7_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
- if (adev->uvd.harvest_config & (1 << i))
- continue;
- adev->uvd.inst[i].ring.sched.ready = false;
- }
-
return 0;
}
@@ -1694,7 +1693,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
uvd_v7_0_set_bypass_mode(adev, enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index b6837fcfdba7..0e2945baf0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -434,7 +434,8 @@ static int vce_v2_0_sw_init(void *handle)
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512,
- &adev->vce.irq, 0);
+ &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 217db187207c..6d9108fa22e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -442,7 +442,8 @@ static int vce_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 3fd102efb7af..a0fb119240f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -476,7 +476,8 @@ static int vce_v4_0_sw_init(void *handle)
else
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
- r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -539,7 +540,6 @@ static int vce_v4_0_hw_init(void *handle)
static int vce_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
if (!amdgpu_sriov_vf(adev)) {
/* vce_v4_0_wait_for_idle(handle); */
@@ -549,9 +549,6 @@ static int vce_v4_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 09b0572b838d..1ad79155ed00 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -127,7 +127,8 @@ static int vcn_v1_0_sw_init(void *handle)
ring = &adev->vcn.inst->ring_dec;
sprintf(ring->name, "vcn_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -145,7 +146,8 @@ static int vcn_v1_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
sprintf(ring->name, "vcn_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -227,14 +229,11 @@ done:
static int vcn_v1_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
RREG32_SOC15(VCN, 0, mmUVD_STATUS))
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index ec8091a661df..90ed773695ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,6 +92,7 @@ static int vcn_v2_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ volatile struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -133,7 +134,8 @@ static int vcn_v2_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
sprintf(ring->name, "vcn_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -163,7 +165,8 @@ static int vcn_v2_0_sw_init(void *handle)
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -174,6 +177,8 @@ static int vcn_v2_0_sw_init(void *handle)
if (r)
return r;
+ fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
return 0;
}
@@ -188,6 +193,9 @@ static int vcn_v2_0_sw_fini(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+
+ fw_shared->present_flag_0 = 0;
amdgpu_virt_free_mm_table(adev);
@@ -223,6 +231,10 @@ static int vcn_v2_0_hw_init(void *handle)
if (r)
goto done;
+ //Disable vcn decode for sriov
+ if (amdgpu_sriov_vf(adev))
+ ring->sched.ready = false;
+
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
@@ -248,21 +260,12 @@ done:
static int vcn_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
- int i;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = false;
- }
-
return 0;
}
@@ -359,6 +362,15 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+ /* non-cache window */
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
+
WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
@@ -442,13 +454,16 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/* non-cache window */
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
@@ -773,6 +788,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
@@ -872,6 +888,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
+
/* set the write pointer delay */
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
@@ -894,11 +916,16 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
return 0;
}
static int vcn_v2_0_start(struct amdgpu_device *adev)
{
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1033,6 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* programm the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
@@ -1045,20 +1073,25 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[0];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[1];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
return 0;
}
@@ -1180,6 +1213,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -1189,23 +1223,38 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* Restore */
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[0];
+ ring->wptr = 0;
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[1];
+ ring->wptr = 0;
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
@@ -1796,7 +1845,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
uint32_t table_size = 0;
struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
- struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
struct mmsch_v2_0_cmd_end end = { {0} };
struct mmsch_v2_0_init_header *header;
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
@@ -1806,8 +1854,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
- direct_poll.cmd_header.command_type =
- MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index c6363f5ad564..3c6eafb62ee6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -86,7 +86,7 @@ static int vcn_v2_5_early_init(void *handle)
adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+ harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i;
}
@@ -165,6 +165,8 @@ static int vcn_v2_5_sw_init(void *handle)
return r;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+ volatile struct amdgpu_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << j))
continue;
adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
@@ -175,15 +177,15 @@ static int vcn_v2_5_sw_init(void *handle)
adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
+ adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
+ adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
+ adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
+ adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
+ adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
ring = &adev->vcn.inst[j].ring_dec;
ring->use_doorbell = true;
@@ -191,7 +193,8 @@ static int vcn_v2_5_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
sprintf(ring->name, "vcn_dec_%d", j);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -203,10 +206,15 @@ static int vcn_v2_5_sw_init(void *handle)
(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->vcn.inst[j].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
+
+ fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
}
if (amdgpu_sriov_vf(adev)) {
@@ -230,8 +238,16 @@ static int vcn_v2_5_sw_init(void *handle)
*/
static int vcn_v2_5_sw_fini(void *handle)
{
- int r;
+ int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ volatile struct amdgpu_fw_shared *fw_shared;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ }
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
@@ -308,25 +324,16 @@ done:
static int vcn_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
- int i, j;
+ int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- ring = &adev->vcn.inst[i].ring_dec;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
-
- ring->sched.ready = false;
-
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
- ring = &adev->vcn.inst[i].ring_enc[j];
- ring->sched.ready = false;
- }
}
return 0;
@@ -392,38 +399,47 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
continue;
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
/* cache window 1: stack */
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
/* cache window 2: context */
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
}
@@ -436,88 +452,91 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
offset = size;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
if (!indirect)
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
}
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@@ -671,19 +690,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__MMSCH_MODE_MASK);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+ VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+ VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+ VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+ VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}
/**
@@ -750,17 +769,18 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
/* disable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* enable dynamic power gating mode */
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
- WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
if (indirect)
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
@@ -773,11 +793,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
+ VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */
tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
@@ -789,28 +809,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
+ VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_CNTL),
+ VCN, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_SET_MUXA0),
+ VCN, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_SET_MUXB0),
+ VCN, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_SET_MUX),
+ VCN, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
@@ -818,26 +838,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+ VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+ VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* enable LMI MC and UMC channels */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
/* unblock VCPU register access */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+ VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* enable master interrupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MASTINT_EN),
+ VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
@@ -853,30 +873,41 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* set the write pointer delay */
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
/* programm the RB_BASE for ring buffer */
- WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
- WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
- ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+ ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
return 0;
}
@@ -898,12 +929,12 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
}
/* disable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* set uvd status busy */
- tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
@@ -916,44 +947,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
/* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
/* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* setup mmUVD_LMI_CTRL */
- tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
tmp &= ~0xff;
- WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
/* setup mmUVD_MPC_CNTL */
- tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
/* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
/* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
/* setup mmUVD_MPC_SET_MUX */
- WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
@@ -962,30 +993,31 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
vcn_v2_5_mc_resume(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
if (adev->vcn.harvest_config & (1 << i))
continue;
/* VCN global tiling registers */
- WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
- WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
for (k = 0; k < 10; ++k) {
uint32_t status;
for (j = 0; j < 100; ++j) {
- status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
+ status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
if (status & 2)
break;
if (amdgpu_emu_mode == 1)
@@ -998,11 +1030,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
break;
DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
@@ -1015,15 +1047,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
}
/* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
ring = &adev->vcn.inst[i].ring_dec;
/* force RBC into idle state */
@@ -1033,33 +1065,40 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* programm the RB_BASE for ring buffer */
- WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
- ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
+ ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[1];
- WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
}
return 0;
@@ -1079,33 +1118,33 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
* memory descriptor location
*/
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
/* 2, update vmid of descriptor */
- data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
/* use domain0 for MM scheduler */
data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
/* 3, notify mmsch about the size of this descriptor */
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
/* 4, set resp to zero */
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
/*
* 5, kick off the initialization and wait until
* VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
*/
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
- data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop = 10;
while ((data & 0x10000002) != 0x10000002) {
udelay(100);
- data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop--;
if (!loop)
break;
@@ -1128,14 +1167,12 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
uint32_t table_size = 0;
struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
- struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
struct mmsch_v1_0_cmd_end end = { { 0 } };
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
- direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
header->version = MMSCH_VERSION;
@@ -1150,93 +1187,93 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
table_size = 0;
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
offset = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
size);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
AMDGPU_VCN_STACK_SIZE);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
ring = &adev->vcn.inst[i].ring_enc[0];
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
ring->ring_size / 4);
ring = &adev->vcn.inst[i].ring_dec;
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
upper_32_bits(ring->gpu_addr));
@@ -1248,7 +1285,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
@@ -1269,24 +1306,24 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
uint32_t tmp;
/* Wait for power status to be 1 */
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* wait for read ptr to be equal to write ptr */
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */
- WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
return 0;
@@ -1330,17 +1367,17 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
return r;
/* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
/* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
/* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~(UVD_VCPU_CNTL__CLK_EN_MASK));
/* clear status */
@@ -1349,7 +1386,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
vcn_v2_5_enable_clock_gating(adev);
/* enable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
@@ -1365,55 +1402,69 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
{
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
- int ret_code;
+ int ret_code = 0;
/* pause/unpause if state is changed */
if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
- reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+ reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
- ret_code = 0;
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
+
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
/* wait for ACK */
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
/* Restore */
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
-
+ ring->wptr = 0;
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
-
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
-
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
+ ring->wptr = 0;
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
- /* unpause dpg, no need to wait */
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
@@ -1432,7 +1483,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
}
/**
@@ -1449,7 +1500,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
}
/**
@@ -1463,15 +1514,11 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
- lower_32_bits(ring->wptr) | 0x80000000);
-
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
@@ -1517,9 +1564,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
}
/**
@@ -1537,12 +1584,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
} else {
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
}
}
@@ -1562,14 +1609,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
}
} else {
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 3ce10e05d0d6..af8986a55354 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -448,27 +448,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
-{
- uint32_t reg = 0;
-
- if (adev->asic_type == CHIP_TONGA ||
- adev->asic_type == CHIP_FIJI) {
- reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
- /* bit0: 0 means pf and 1 means vf */
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
- /* bit31: 0 means disable IOV and 1 means enable */
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
- }
-
- if (reg == 0) {
- if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
{mmGRBM_STATUS},
{mmGRBM_STATUS2},
@@ -1728,9 +1707,6 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
- /* in early init stage, vbios code won't work */
- vi_detect_hw_virtualization(adev);
-
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_vi_virt_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 19ddd2312e00..7a01e6133798 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -332,7 +332,7 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 0ec5f25adf56..cf0017f4d9d5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -215,6 +215,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
}
q_properties->is_interop = false;
+ q_properties->is_gws = false;
q_properties->queue_percent = args->queue_percentage;
q_properties->priority = args->queue_priority;
q_properties->queue_address = args->ring_base_address;
@@ -1322,6 +1323,10 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_free;
}
+ /* Update the VRAM usage count */
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+ WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
+
mutex_unlock(&p->mutex);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1337,7 +1342,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
return err;
@@ -1351,6 +1356,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
void *mem;
struct kfd_dev *dev;
int ret;
+ uint64_t size = 0;
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
if (!dev)
@@ -1373,7 +1379,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
}
ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
- (struct kgd_mem *)mem);
+ (struct kgd_mem *)mem, &size);
/* If freeing the buffer failed, leave the handle in place for
* clean-up during process tear-down.
@@ -1382,6 +1388,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
+ WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
+
err_unlock:
mutex_unlock(&p->mutex);
return ret;
@@ -1584,6 +1592,45 @@ copy_from_user_failed:
return err;
}
+static int kfd_ioctl_alloc_queue_gws(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ int retval;
+ struct kfd_ioctl_alloc_queue_gws_args *args = data;
+ struct queue *q;
+ struct kfd_dev *dev;
+
+ mutex_lock(&p->mutex);
+ q = pqm_get_user_queue(&p->pqm, args->queue_id);
+
+ if (q) {
+ dev = q->device;
+ } else {
+ retval = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!dev->gws) {
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+
+ retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
+ mutex_unlock(&p->mutex);
+
+ args->first_gws = 0;
+ return retval;
+
+out_unlock:
+ mutex_unlock(&p->mutex);
+ return retval;
+}
+
static int kfd_ioctl_get_dmabuf_info(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1687,7 +1734,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
return r;
@@ -1786,6 +1833,8 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
kfd_ioctl_import_dmabuf, 0),
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
+ kfd_ioctl_alloc_queue_gws, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index de9f68d5c312..1009a3b8dcc2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -502,7 +502,7 @@ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
num_nodes = crat_table->num_domains;
image_len = crat_table->length;
- pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+ pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
for (node_id = 0; node_id < num_nodes; node_id++) {
top_dev = kfd_create_topology_device(device_list);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 05bc6d96ec52..0491ab2b4a9b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -569,6 +569,23 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
}
+static int kfd_gws_init(struct kfd_dev *kfd)
+{
+ int ret = 0;
+
+ if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
+ return 0;
+
+ if (hws_gws_support
+ || (kfd->device_info->asic_family >= CHIP_VEGA10
+ && kfd->device_info->asic_family <= CHIP_RAVEN
+ && kfd->mec2_fw_version >= 0x1b3))
+ ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
+ amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
+
+ return ret;
+}
+
bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
@@ -578,6 +595,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
+ kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+ KGD_ENGINE_MEC2);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
@@ -598,13 +617,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
} else
kfd->max_proc_per_quantum = hws_max_conc_proc;
- /* Allocate global GWS that is shared by all KFD processes */
- if (hws_gws_support && amdgpu_amdkfd_alloc_gws(kfd->kgd,
- amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws)) {
- dev_err(kfd_device, "Could not allocate %d gws\n",
- amdgpu_amdkfd_get_num_gws(kfd->kgd));
- goto out;
- }
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
@@ -662,6 +674,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto device_queue_manager_error;
}
+ /* If supported on this device, allocate global GWS that is shared
+ * by all KFD processes
+ */
+ if (kfd_gws_init(kfd)) {
+ dev_err(kfd_device, "Could not allocate %d gws\n",
+ amdgpu_amdkfd_get_num_gws(kfd->kgd));
+ goto gws_error;
+ }
+
if (kfd_iommu_device_init(kfd)) {
dev_err(kfd_device, "Error initializing iommuv2\n");
goto device_iommu_error;
@@ -691,6 +712,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_topology_add_device_error:
kfd_resume_error:
device_iommu_error:
+gws_error:
device_queue_manager_uninit(kfd->dqm);
device_queue_manager_error:
kfd_interrupt_exit(kfd);
@@ -701,7 +723,7 @@ kfd_doorbell_error:
kfd_gtt_sa_init_error:
amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
alloc_gtt_mem_failure:
- if (hws_gws_support)
+ if (kfd->gws)
amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
@@ -720,7 +742,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
- if (hws_gws_support)
+ if (kfd->gws)
amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 77ea0f0cb163..e9c4867abeff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -505,8 +505,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_vmid(dqm, qpd, q);
}
qpd->queue_count--;
- if (q->properties.is_active)
+ if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
+ }
return retval;
}
@@ -583,6 +588,20 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
else if (!q->properties.is_active && prev_active)
decrement_queue_count(dqm, q->properties.type);
+ if (q->gws && !q->properties.is_gws) {
+ if (q->properties.is_active) {
+ dqm->gws_queue_count++;
+ pdd->qpd.mapped_gws_queue = true;
+ }
+ q->properties.is_gws = true;
+ } else if (!q->gws && q->properties.is_gws) {
+ if (q->properties.is_active) {
+ dqm->gws_queue_count--;
+ pdd->qpd.mapped_gws_queue = false;
+ }
+ q->properties.is_gws = false;
+ }
+
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active &&
@@ -631,6 +650,10 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.type)];
q->properties.is_active = false;
decrement_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -744,6 +767,10 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.type)];
q->properties.is_active = true;
increment_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count++;
+ qpd->mapped_gws_queue = true;
+ }
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -913,6 +940,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
INIT_LIST_HEAD(&dqm->queues);
dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
dqm->active_cp_queue_count = 0;
+ dqm->gws_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
@@ -1061,7 +1089,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
break;
}
- res.queue_mask |= (1ull << i);
+ res.queue_mask |= 1ull
+ << amdgpu_queue_mask_bit_to_set_resource_bit(
+ (struct amdgpu_device *)dqm->dev->kgd, i);
}
res.gws_mask = ~0ull;
res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
@@ -1082,7 +1112,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
INIT_LIST_HEAD(&dqm->queues);
dqm->active_queue_count = dqm->processes_count = 0;
dqm->active_cp_queue_count = 0;
-
+ dqm->gws_queue_count = 0;
dqm->active_runlist = false;
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1432,6 +1462,10 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
}
/*
@@ -1650,8 +1684,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- if (q->properties.is_active)
+ if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
+ }
dqm->total_queue_count--;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 50d919f814e9..4afa015c69b1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -182,6 +182,7 @@ struct device_queue_manager {
unsigned int processes_count;
unsigned int active_queue_count;
unsigned int active_cp_queue_count;
+ unsigned int gws_queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index e05d75ecda21..fce6ccabe38b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -37,7 +37,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
if (vmid < dev->vm_info.first_vmid_kfd ||
vmid > dev->vm_info.last_vmid_kfd)
- return 0;
+ return false;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
@@ -69,7 +69,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
- return 0;
+ return false;
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 8d871514671e..7c8786b9eb0a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -192,7 +192,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
dev_warn_ratelimited(kfd_device,
"Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
- PCI_BUS_NUM(pdev->devfn),
+ pdev->bus->number,
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
pasid,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index bae706462f96..a2b77d1df854 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -126,6 +126,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.queue_size = queue_size;
prop.is_interop = false;
+ prop.is_gws = false;
prop.priority = 1;
prop.queue_percent = 100;
prop.type = type;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index efdb75e7677b..685ca82d42fe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -41,7 +41,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
bool *over_subscription)
{
- unsigned int process_count, queue_count, compute_queue_count;
+ unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
unsigned int max_proc_per_quantum = 1;
struct kfd_dev *dev = pm->dqm->dev;
@@ -49,6 +49,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->active_queue_count;
compute_queue_count = pm->dqm->active_cp_queue_count;
+ gws_queue_count = pm->dqm->gws_queue_count;
/* check if there is over subscription
* Note: the arbitration between the number of VMIDs and
@@ -61,7 +62,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
max_proc_per_quantum = dev->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_cp_queues_num(pm->dqm)) {
+ compute_queue_count > get_cp_queues_num(pm->dqm) ||
+ gws_queue_count > 1) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 2de01009f1b6..bdca9dc5f118 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->bitfields2.pasid = qpd->pqm->process->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
- packet->bitfields14.num_gws = qpd->num_gws;
+ packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
packet->bitfields14.num_oac = qpd->num_oac;
packet->bitfields14.sdma_enable = 1;
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 4a3049841086..f0587d94294d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -282,6 +282,7 @@ struct kfd_dev {
/* Firmware versions */
uint16_t mec_fw_version;
+ uint16_t mec2_fw_version;
uint16_t sdma_fw_version;
/* Maximum process number mapped to HW scheduler */
@@ -410,6 +411,10 @@ enum KFD_QUEUE_PRIORITY {
* @is_active: Defines if the queue is active or not. @is_active and
* @is_evicted are protected by the DQM lock.
*
+ * @is_gws: Defines if the queue has been updated to be GWS-capable or not.
+ * @is_gws should be protected by the DQM lock, since changing it can yield the
+ * possibility of updating DQM state on number of GWS queues.
+ *
* @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
* of the queue.
*
@@ -432,6 +437,7 @@ struct queue_properties {
bool is_interop;
bool is_evicted;
bool is_active;
+ bool is_gws;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid;
/* Relevant only for sdma queues*/
@@ -563,6 +569,14 @@ struct qcm_process_device {
*/
bool reset_wavefronts;
+ /* This flag tells us if this process has a GWS-capable
+ * queue that will be mapped into the runlist. It's
+ * possible to request a GWS BO, but not have the queue
+ * currently mapped, and this changes how the MAP_PROCESS
+ * PM4 packet is configured.
+ */
+ bool mapped_gws_queue;
+
/*
* All the memory management data should be here too
*/
@@ -615,6 +629,8 @@ enum kfd_pdd_bound {
PDD_BOUND_SUSPENDED,
};
+#define MAX_VRAM_FILENAME_LEN 11
+
/* Data that is per-process-per device. */
struct kfd_process_device {
/*
@@ -657,6 +673,11 @@ struct kfd_process_device {
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound;
+
+ /* VRAM usage */
+ uint64_t vram_usage;
+ struct attribute attr_vram;
+ char vram_filename[MAX_VRAM_FILENAME_LEN];
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -923,6 +944,8 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+ unsigned int qid);
int pqm_get_wave_state(struct process_queue_manager *pqm,
unsigned int qid,
void __user *ctl_stack,
@@ -1050,7 +1073,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev);
/* Check with device cgroup if @kfd device is accessible */
static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
{
-#if defined(CONFIG_CGROUP_DEVICE)
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = kfd->ddev;
return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index fe0cd49d4ea7..d27221ddcdeb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -79,18 +79,22 @@ static struct kfd_procfs_tree procfs;
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- int val = 0;
-
if (strcmp(attr->name, "pasid") == 0) {
struct kfd_process *p = container_of(attr, struct kfd_process,
attr_pasid);
- val = p->pasid;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
+ } else if (strncmp(attr->name, "vram_", 5) == 0) {
+ struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
+ attr_vram);
+ if (pdd)
+ return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
} else {
pr_err("Invalid attribute");
return -EINVAL;
}
- return snprintf(buffer, PAGE_SIZE, "%d\n", val);
+ return 0;
}
static void kfd_procfs_kobj_release(struct kobject *kobj)
@@ -206,6 +210,34 @@ int kfd_procfs_add_queue(struct queue *q)
return 0;
}
+int kfd_procfs_add_vram_usage(struct kfd_process *p)
+{
+ int ret = 0;
+ struct kfd_process_device *pdd;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!p->kobj)
+ return -EFAULT;
+
+ /* Create proc/<pid>/vram_<gpuid> file for each GPU */
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ snprintf(pdd->vram_filename, MAX_VRAM_FILENAME_LEN, "vram_%u",
+ pdd->dev->id);
+ pdd->attr_vram.name = pdd->vram_filename;
+ pdd->attr_vram.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&pdd->attr_vram);
+ ret = sysfs_create_file(p->kobj, &pdd->attr_vram);
+ if (ret)
+ pr_warn("Creating vram usage for gpu id %d failed",
+ (int)pdd->dev->id);
+ }
+
+ return ret;
+}
+
+
void kfd_procfs_del_queue(struct queue *q)
{
if (!q)
@@ -248,7 +280,7 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_dev *dev = pdd->dev;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
}
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -312,7 +344,7 @@ sync_memory_failed:
return err;
err_map_mem:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
err_alloc_mem:
*kptr = NULL;
return err;
@@ -411,6 +443,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
+
+ ret = kfd_procfs_add_vram_usage(process);
+ if (ret)
+ pr_warn("Creating vram usage file for pid %d failed",
+ (int)process->lead_thread->pid);
}
out:
if (!IS_ERR(process))
@@ -488,7 +525,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
peer_pdd->dev->kgd, mem, peer_pdd->vm);
}
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
kfd_process_device_remove_obj_handle(pdd, id);
}
}
@@ -551,6 +588,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+ struct kfd_process_device *pdd;
/* Remove the procfs files */
if (p->kobj) {
@@ -558,6 +596,10 @@ static void kfd_process_wq_release(struct work_struct *work)
kobject_del(p->kobj_queues);
kobject_put(p->kobj_queues);
p->kobj_queues = NULL;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ sysfs_remove_file(p->kobj, &pdd->attr_vram);
+
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
@@ -858,10 +900,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->qpd.dqm = dev->dqm;
pdd->qpd.pqm = &p->pqm;
pdd->qpd.evicted = 0;
+ pdd->qpd.mapped_gws_queue = false;
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
+ pdd->vram_usage = 0;
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
@@ -1078,7 +1122,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
return p;
}
-/* process_evict_queues - Evict all user queues of a process
+/* kfd_process_evict_queues - Evict all user queues of a process
*
* Eviction is reference-counted per process-device. This means multiple
* evictions from different sources can be nested safely.
@@ -1118,7 +1162,7 @@ fail:
return r;
}
-/* process_restore_queues - Restore all user queues of a process */
+/* kfd_process_restore_queues - Restore all user queues of a process */
int kfd_process_restore_queues(struct kfd_process *p)
{
struct kfd_process_device *pdd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 084c35f55d59..eb1635ac8988 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -476,6 +476,15 @@ struct kernel_queue *pqm_get_kernel_queue(
return NULL;
}
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+ unsigned int qid)
+{
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ return pqn ? pqn->q : NULL;
+}
+
int pqm_get_wave_state(struct process_queue_manager *pqm,
unsigned int qid,
void __user *ctl_stack,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index aa0bfa78a667..bb77f7af2b6d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -478,6 +478,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.device_id);
sysfs_show_32bit_prop(buffer, "location_id",
dev->node_props.location_id);
+ sysfs_show_32bit_prop(buffer, "domain",
+ dev->node_props.domain);
sysfs_show_32bit_prop(buffer, "drm_render_minor",
dev->node_props.drm_render_minor);
sysfs_show_64bit_prop(buffer, "hive_id",
@@ -787,7 +789,6 @@ static int kfd_topology_update_sysfs(void)
{
int ret;
- pr_info("Creating topology SYSFS entries\n");
if (!sys_props.kobj_topology) {
sys_props.kobj_topology =
kfd_alloc_struct(sys_props.kobj_topology);
@@ -1048,7 +1049,6 @@ int kfd_topology_init(void)
sys_props.generation_count++;
kfd_update_system_properties();
kfd_debug_print_topology();
- pr_info("Finished initializing topology\n");
} else
pr_err("Failed to update topology in sysfs ret=%d\n", ret);
@@ -1303,7 +1303,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
+ dev->node_props.capability |=
+ ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) <<
+ HSA_CAP_ASIC_REVISION_SHIFT) &
+ HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->pdev);
+ dev->node_props.domain = pci_domain_nr(gpu->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =
@@ -1317,7 +1322,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
gpu->device_info->num_xgmi_sdma_engines;
dev->node_props.num_sdma_queues_per_engine =
gpu->device_info->num_sdma_queues_per_engine;
- dev->node_props.num_gws = (hws_gws_support &&
+ dev->node_props.num_gws = (dev->gpu->gws &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 46eeecaf1b68..326d9b26b7aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -41,7 +41,6 @@
#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
-#define HSA_CAP_RESERVED 0xffffc000
#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
@@ -51,6 +50,10 @@
#define HSA_CAP_SRAM_EDCSUPPORTED 0x00080000
#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
#define HSA_CAP_RASEVENTNOTIFY 0x00200000
+#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000
+#define HSA_CAP_ASIC_REVISION_SHIFT 22
+
+#define HSA_CAP_RESERVED 0xfc078000
struct kfd_node_properties {
uint64_t hive_id;
@@ -77,6 +80,7 @@ struct kfd_node_properties {
uint32_t vendor_id;
uint32_t device_id;
uint32_t location_id;
+ uint32_t domain;
uint32_t max_engine_clk_fcompute;
uint32_t max_engine_clk_ccompute;
int32_t drm_render_minor;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 87858bc57e64..1911a34cc060 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -21,16 +21,12 @@ config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC"
depends on DRM_AMD_DC
help
- Choose this option
- if you want to support
- HDCP authentication
+ Choose this option if you want to support HDCP authentication.
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
help
- Choose this option
- if you want to hit
- kdgb_break in assert.
+ Choose this option if you want to hit kdgb_break in assert.
endmenu
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9c83c1303f08..bdba0bfd6df1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -30,7 +30,7 @@
#include "dc.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
#include "dc/inc/hw/dmcu.h"
#include "dc/inc/hw/abm.h"
#include "dc/dc_dmub_srv.h"
@@ -441,7 +441,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
/**
* dm_crtc_high_irq() - Handles CRTC interrupt
- * @interrupt_params: ignored
+ * @interrupt_params: used for determining the CRTC instance
*
* Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
* event handler.
@@ -455,70 +455,6 @@ static void dm_crtc_high_irq(void *interrupt_params)
unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
- if (acrtc) {
- acrtc_state = to_dm_crtc_state(acrtc->base.state);
-
- DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
- acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
-
- /* Core vblank handling at start of front-porch is only possible
- * in non-vrr mode, as only there vblank timestamping will give
- * valid results while done in front-porch. Otherwise defer it
- * to dm_vupdate_high_irq after end of front-porch.
- */
- if (!amdgpu_dm_vrr_active(acrtc_state))
- drm_crtc_handle_vblank(&acrtc->base);
-
- /* Following stuff must happen at start of vblank, for crc
- * computation and below-the-range btr support in vrr mode.
- */
- amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-
- if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
- acrtc_state->vrr_params.supported &&
- acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
- mod_freesync_handle_v_update(
- adev->dm.freesync_module,
- acrtc_state->stream,
- &acrtc_state->vrr_params);
-
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc_state->stream,
- &acrtc_state->vrr_params.adjust);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- }
- }
-}
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-/**
- * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
- * @interrupt params - interrupt parameters
- *
- * Notify DRM's vblank event handler at VSTARTUP
- *
- * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
- * * We are close enough to VUPDATE - the point of no return for hw
- * * We are in the fixed portion of variable front porch when vrr is enabled
- * * We are before VUPDATE, where double-buffered vrr registers are swapped
- *
- * It is therefore the correct place to signal vblank, send user flip events,
- * and update VRR.
- */
-static void dm_dcn_crtc_high_irq(void *interrupt_params)
-{
- struct common_irq_params *irq_params = interrupt_params;
- struct amdgpu_device *adev = irq_params->adev;
- struct amdgpu_crtc *acrtc;
- struct dm_crtc_state *acrtc_state;
- unsigned long flags;
-
- acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
if (!acrtc)
return;
@@ -528,22 +464,35 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
amdgpu_dm_vrr_active(acrtc_state),
acrtc_state->active_planes);
+ /**
+ * Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!amdgpu_dm_vrr_active(acrtc_state))
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ /**
+ * Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
- drm_crtc_handle_vblank(&acrtc->base);
+
+ /* BTR updates need to happen before VUPDATE on Vega and above. */
+ if (adev->family < AMDGPU_FAMILY_AI)
+ return;
spin_lock_irqsave(&adev->ddev->event_lock, flags);
- if (acrtc_state->vrr_params.supported &&
+ if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
- mod_freesync_handle_v_update(
- adev->dm.freesync_module,
- acrtc_state->stream,
- &acrtc_state->vrr_params);
+ mod_freesync_handle_v_update(adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc_state->stream,
- &acrtc_state->vrr_params.adjust);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
}
/*
@@ -556,7 +505,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
* avoid race conditions between flip programming and completion,
* which could cause too early flip completion events.
*/
- if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+ if (adev->family >= AMDGPU_FAMILY_RV &&
+ acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
acrtc_state->active_planes == 0) {
if (acrtc->event) {
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
@@ -568,7 +518,6 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
-#endif
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
@@ -825,8 +774,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
fw_inst_const_size);
}
- memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
- fw_bss_data_size);
+ if (fw_bss_data_size)
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+ fw_bss_data, fw_bss_data_size);
/* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
@@ -968,6 +918,23 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
+ adev->dm.dc->debug.force_single_disp_pipe_split = false;
+ adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ adev->dm.dc->debug.disable_dsc = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
+ adev->dm.dc->debug.disable_clock_gate = true;
+
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -1265,6 +1232,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
le32_to_cpu(hdr->inst_const_bytes);
+ region_params.fw_inst_const =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info);
@@ -1384,9 +1355,14 @@ static int dm_late_init(void *handle)
struct dmcu_iram_parameters params;
unsigned int linear_lut[16];
int i;
- struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ struct dmcu *dmcu = NULL;
bool ret = false;
+ if (!adev->dm.fw_dmcu)
+ return detect_mst_link_for_all_connectors(adev->ddev);
+
+ dmcu = adev->dm.dc->res_pool->dmcu;
+
for (i = 0; i < 16; i++)
linear_lut[i] = 0xFFFF * i / 15;
@@ -1562,12 +1538,115 @@ static int dm_hw_fini(void *handle)
return 0;
}
+
+static int dm_enable_vblank(struct drm_crtc *crtc);
+static void dm_disable_vblank(struct drm_crtc *crtc);
+
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
+ struct dc_state *state, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc;
+ int rc = -EBUSY;
+ int i = 0;
+
+ for (i = 0; i < state->stream_count; i++) {
+ acrtc = get_crtc_by_otg_inst(
+ adev, state->stream_status[i].primary_otg_inst);
+
+ if (acrtc && state->stream_status[i].plane_count != 0) {
+ irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+ DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
+ if (rc)
+ DRM_WARN("Failed to %s pflip interrupts\n",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ rc = dm_enable_vblank(&acrtc->base);
+ if (rc)
+ DRM_WARN("Failed to enable vblank interrupts\n");
+ } else {
+ dm_disable_vblank(&acrtc->base);
+ }
+
+ }
+ }
+
+}
+
+enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+ struct dc_state *context = NULL;
+ enum dc_status res = DC_ERROR_UNEXPECTED;
+ int i;
+ struct dc_stream_state *del_streams[MAX_PIPES];
+ int del_streams_count = 0;
+
+ memset(del_streams, 0, sizeof(del_streams));
+
+ context = dc_create_state(dc);
+ if (context == NULL)
+ goto context_alloc_fail;
+
+ dc_resource_state_copy_construct_current(dc, context);
+
+ /* First remove from context all streams */
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ del_streams[del_streams_count++] = stream;
+ }
+
+ /* Remove all planes for removed streams and then remove the streams */
+ for (i = 0; i < del_streams_count; i++) {
+ if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
+ if (res != DC_OK)
+ goto fail;
+ }
+
+
+ res = dc_validate_global_state(dc, context, false);
+
+ if (res != DC_OK) {
+ DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
+ goto fail;
+ }
+
+ res = dc_commit_state(dc, context);
+
+fail:
+ dc_release_state(context);
+
+context_alloc_fail:
+ return res;
+}
+
static int dm_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
+ if (adev->in_gpu_reset) {
+ mutex_lock(&dm->dc_lock);
+ dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+
+ amdgpu_dm_commit_zero_streams(dm->dc);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ return ret;
+ }
+
WARN_ON(adev->dm.cached_state);
adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
@@ -1578,7 +1657,7 @@ static int dm_suspend(void *handle)
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
- return ret;
+ return 0;
}
static struct amdgpu_dm_connector *
@@ -1682,6 +1761,46 @@ static void emulated_link_detect(struct dc_link *link)
}
+static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ struct amdgpu_display_manager *dm)
+{
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } * bundle;
+ int k, m;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ dm_error("Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ for (k = 0; k < dc_state->stream_count; k++) {
+ bundle->stream_update.stream = dc_state->streams[k];
+
+ for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ bundle->surface_updates[m].surface =
+ dc_state->stream_status->plane_states[m];
+ bundle->surface_updates[m].surface->force_full_update =
+ true;
+ }
+ dc_commit_updates_for_stream(
+ dm->dc, bundle->surface_updates,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k], &bundle->stream_update, dc_state);
+ }
+
+cleanup:
+ kfree(bundle);
+
+ return;
+}
+
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -1698,8 +1817,44 @@ static int dm_resume(void *handle)
struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
- int i, r;
+ struct dc_state *dc_state;
+ int i, r, j;
+
+ if (adev->in_gpu_reset) {
+ dc_state = dm->cached_dc_state;
+
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+ dc_resume(dm->dc);
+
+ amdgpu_dm_irq_resume_early(adev);
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ dc_state->streams[i]->mode_changed = true;
+ for (j = 0; j < dc_state->stream_status->plane_count; j++) {
+ dc_state->stream_status->plane_states[j]->update_flags.raw
+ = 0xffffffff;
+ }
+ }
+
+ WARN_ON(!dc_commit_state(dm->dc, dc_state));
+
+ dm_gpureset_commit_state(dm->cached_dc_state, dm);
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
+
+ dc_release_state(dm->cached_dc_state);
+ dm->cached_dc_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ mutex_unlock(&dm->dc_lock);
+
+ return 0;
+ }
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
dm_state->context = dc_create_state(dm->dc);
@@ -2445,8 +2600,36 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
+ amdgpu_dm_irq_register_interrupt(
+ adev, &int_params, dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_dcn_crtc_high_irq, c_irq_params);
+ dm_vupdate_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
@@ -3036,9 +3219,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
- if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
- dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
-
/* No userspace support. */
dm->dc->debug.disable_tri_buf = true;
@@ -3309,7 +3489,7 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
}
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
- uint64_t *tiling_flags)
+ uint64_t *tiling_flags, bool *tmz_surface)
{
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
int r = amdgpu_bo_reserve(rbo, false);
@@ -3324,6 +3504,9 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
if (tiling_flags)
amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+ if (tmz_surface)
+ *tmz_surface = amdgpu_bo_encrypted(rbo);
+
amdgpu_bo_unreserve(rbo);
return r;
@@ -3411,6 +3594,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
+ bool tmz_surface,
bool force_disable_dcc)
{
const struct drm_framebuffer *fb = &afb->base;
@@ -3421,6 +3605,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
memset(dcc, 0, sizeof(*dcc));
memset(address, 0, sizeof(*address));
+ address->tmz_surface = tmz_surface;
+
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
plane_size->surface_size.x = 0;
plane_size->surface_size.y = 0;
@@ -3611,6 +3797,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const uint64_t tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
+ bool tmz_surface,
bool force_disable_dcc)
{
const struct drm_framebuffer *fb = plane_state->fb;
@@ -3654,6 +3841,14 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
case DRM_FORMAT_P010:
plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
break;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
+ break;
default:
DRM_ERROR(
"Unsupported screen format %s\n",
@@ -3693,7 +3888,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->rotation, tiling_flags,
&plane_info->tiling_info,
&plane_info->plane_size,
- &plane_info->dcc, address,
+ &plane_info->dcc, address, tmz_surface,
force_disable_dcc);
if (ret)
return ret;
@@ -3717,6 +3912,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_plane_info plane_info;
uint64_t tiling_flags;
int ret;
+ bool tmz_surface = false;
bool force_disable_dcc = false;
ret = fill_dc_scaling_info(plane_state, &scaling_info);
@@ -3728,7 +3924,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->clip_rect = scaling_info.clip_rect;
dc_plane_state->scaling_quality = scaling_info.scaling_quality;
- ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
if (ret)
return ret;
@@ -3736,6 +3932,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
&plane_info,
&dc_plane_state->address,
+ tmz_surface,
force_disable_dcc);
if (ret)
return ret;
@@ -3823,8 +4020,7 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
- const struct drm_connector_state *state,
- bool is_y420)
+ bool is_y420, int requested_bpc)
{
uint8_t bpc;
@@ -3844,10 +4040,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
bpc = bpc ? bpc : 8;
}
- if (!state)
- state = connector->state;
-
- if (state) {
+ if (requested_bpc > 0) {
/*
* Cap display bpc based on the user requested value.
*
@@ -3856,7 +4049,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
* or if this was called outside of atomic check, so it
* can't be used directly.
*/
- bpc = min(bpc, state->max_requested_bpc);
+ bpc = min_t(u8, bpc, requested_bpc);
/* Round down to the nearest even number. */
bpc = bpc - (bpc & 1);
@@ -3978,7 +4171,8 @@ static void fill_stream_properties_from_drm_display_mode(
const struct drm_display_mode *mode_in,
const struct drm_connector *connector,
const struct drm_connector_state *connector_state,
- const struct dc_stream_state *old_stream)
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
@@ -4008,8 +4202,9 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
timing_out->display_color_depth = convert_color_depth_from_display_info(
- connector, connector_state,
- (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
+ connector,
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
+ requested_bpc);
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
@@ -4215,7 +4410,8 @@ static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
- const struct dc_stream_state *old_stream)
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
{
struct drm_display_mode *preferred_mode = NULL;
struct drm_connector *drm_connector;
@@ -4300,10 +4496,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, con_state, NULL);
+ &mode, &aconnector->base, con_state, NULL, requested_bpc);
else
fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, con_state, old_stream);
+ &mode, &aconnector->base, con_state, old_stream, requested_bpc);
stream->timing.flags.DSC = 0;
@@ -4340,14 +4536,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
- if (stream->link->psr_feature_enabled) {
+ if (stream->link->psr_settings.psr_feature_enabled) {
struct dc *core_dc = stream->link->ctx->dc;
if (dc_is_dmcu_initialized(core_dc)) {
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
- stream->psr_version = dmcu->dmcu_version.psr_version;
-
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
@@ -4453,10 +4645,6 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
struct amdgpu_device *adev = crtc->dev->dev_private;
int rc;
- /* Do not set vupdate for DCN hardware */
- if (adev->family > AMDGPU_FAMILY_AI)
- return 0;
-
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -4830,16 +5018,54 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
create_eml_sink(aconnector);
}
+static struct dc_stream_state *
+create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ struct dc_stream_state *stream;
+ int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+ enum dc_status dc_result = DC_OK;
+
+ do {
+ stream = create_stream_for_sink(aconnector, drm_mode,
+ dm_state, old_stream,
+ requested_bpc);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ break;
+ }
+
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+
+ if (dc_result != DC_OK) {
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
+ drm_mode->hdisplay,
+ drm_mode->vdisplay,
+ drm_mode->clock,
+ dc_result);
+
+ dc_stream_release(stream);
+ stream = NULL;
+ requested_bpc -= 2; /* lower bpc to retry validation */
+ }
+
+ } while (stream == NULL && requested_bpc >= 6);
+
+ return stream;
+}
+
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int result = MODE_ERROR;
struct dc_sink *dc_sink;
- struct amdgpu_device *adev = connector->dev->dev_private;
/* TODO: Unhardcode stream count */
struct dc_stream_state *stream;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- enum dc_status dc_result = DC_OK;
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN))
@@ -4860,24 +5086,11 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
- stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
- if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
- goto fail;
- }
-
- dc_result = dc_validate_stream(adev->dm.dc, stream);
-
- if (dc_result == DC_OK)
+ stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
+ if (stream) {
+ dc_stream_release(stream);
result = MODE_OK;
- else
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
- mode->hdisplay,
- mode->vdisplay,
- mode->clock,
- dc_result);
-
- dc_stream_release(stream);
+ }
fail:
/* TODO: error handling*/
@@ -5200,10 +5413,12 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
return 0;
if (!state->duplicated) {
+ int max_bpc = conn_state->max_requested_bpc;
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
aconnector->force_yuv420_output;
- color_depth = convert_color_depth_from_display_info(connector, conn_state,
- is_y420);
+ color_depth = convert_color_depth_from_display_info(connector,
+ is_y420,
+ max_bpc);
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
clock = adjusted_mode->clock;
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
@@ -5358,6 +5573,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
uint64_t tiling_flags;
uint32_t domain;
int r;
+ bool tmz_surface = false;
bool force_disable_dcc = false;
dm_plane_state_old = to_dm_plane_state(plane->state);
@@ -5407,6 +5623,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+ tmz_surface = amdgpu_bo_encrypted(rbo);
+
ttm_eu_backoff_reservation(&ticket, &list);
afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -5422,7 +5640,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
adev, afb, plane_state->format, plane_state->rotation,
tiling_flags, &plane_state->tiling_info,
&plane_state->plane_size, &plane_state->dcc,
- &plane_state->address,
+ &plane_state->address, tmz_surface,
force_disable_dcc);
}
@@ -5569,6 +5787,12 @@ static int get_plane_formats(const struct drm_plane *plane,
formats[num_formats++] = DRM_FORMAT_NV12;
if (plane_cap && plane_cap->pixel_format_support.p010)
formats[num_formats++] = DRM_FORMAT_P010;
+ if (plane_cap && plane_cap->pixel_format_support.fp16) {
+ formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
+ formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
+ formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
+ formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
+ }
break;
case DRM_PLANE_TYPE_OVERLAY:
@@ -6596,6 +6820,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
unsigned long flags;
struct amdgpu_bo *abo;
uint64_t tiling_flags;
+ bool tmz_surface = false;
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
@@ -6648,6 +6873,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
}
fill_dc_scaling_info(new_plane_state,
@@ -6690,12 +6916,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ tmz_surface = amdgpu_bo_encrypted(abo);
+
amdgpu_bo_unreserve(abo);
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
&bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address,
+ tmz_surface,
false);
DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
@@ -6841,7 +7070,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
}
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_allow_active)
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
dc_commit_updates_for_stream(dm->dc,
@@ -6852,12 +7081,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_state);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->psr_version &&
- !acrtc_state->stream->link->psr_feature_enabled)
+ acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_feature_enabled &&
- !acrtc_state->stream->link->psr_allow_active) {
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
+ !acrtc_state->stream->link->psr_settings.psr_allow_active) {
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -7171,7 +7400,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
/* i.e. reset mode */
if (dm_old_crtc_state->stream) {
- if (dm_old_crtc_state->stream->link->psr_allow_active)
+ if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@ -7619,10 +7848,10 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- new_stream = create_stream_for_sink(aconnector,
- &new_crtc_state->mode,
- dm_new_conn_state,
- dm_old_crtc_state->stream);
+ new_stream = create_validate_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
/*
* we can have no stream on ACTION_SET if a display
@@ -7882,6 +8111,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ struct amdgpu_crtc *new_acrtc;
bool needs_reset;
int ret = 0;
@@ -7891,9 +8121,23 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state = to_dm_plane_state(new_plane_state);
dm_old_plane_state = to_dm_plane_state(old_plane_state);
- /*TODO Implement atomic check for cursor plane */
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ /*TODO Implement better atomic check for cursor plane */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if (!enable || !new_plane_crtc ||
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+ if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
+ (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
+ DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
+ new_plane_state->crtc_w, new_plane_state->crtc_h);
+ return -EINVAL;
+ }
+
return 0;
+ }
needs_reset = should_reset_plane(state, plane, old_plane_state,
new_plane_state);
@@ -8068,6 +8312,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
uint64_t tiling_flags;
+ bool tmz_surface = false;
new_plane_crtc = new_plane_state->crtc;
new_dm_plane_state = to_dm_plane_state(new_plane_state);
@@ -8097,6 +8342,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
new_dm_plane_state->dc_state->gamma_correction;
bundle->surface_updates[num_plane].in_transfer_func =
new_dm_plane_state->dc_state->in_transfer_func;
+ bundle->surface_updates[num_plane].gamut_remap_matrix =
+ &new_dm_plane_state->dc_state->gamut_remap_matrix;
bundle->stream_update.gamut_remap =
&new_dm_crtc_state->stream->gamut_remap_matrix;
bundle->stream_update.output_csc_transform =
@@ -8113,14 +8360,14 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
bundle->surface_updates[num_plane].scaling_info = scaling_info;
if (amdgpu_fb) {
- ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
if (ret)
goto cleanup;
ret = fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
plane_info,
- &flip_addr->address,
+ &flip_addr->address, tmz_surface,
false);
if (ret)
goto cleanup;
@@ -8621,8 +8868,17 @@ static void amdgpu_dm_set_psr_caps(struct dc_link *link)
return;
if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
dpcd_data, sizeof(dpcd_data))) {
- link->psr_feature_enabled = dpcd_data[0] ? true:false;
- DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+ link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+
+ if (dpcd_data[0] == 0) {
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->psr_settings.psr_feature_enabled = false;
+ } else {
+ link->psr_settings.psr_version = DC_PSR_VERSION_1;
+ link->psr_settings.psr_feature_enabled = true;
+ }
+
+ DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
}
}
@@ -8637,16 +8893,14 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
struct dc_link *link = NULL;
struct psr_config psr_config = {0};
struct psr_context psr_context = {0};
- struct dc *dc = NULL;
bool ret = false;
if (stream == NULL)
return false;
link = stream->link;
- dc = link->ctx->dc;
- psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+ psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
if (psr_config.psr_version > 0) {
psr_config.psr_exit_link_training_required = 0x1;
@@ -8658,7 +8912,7 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
}
- DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 5cab3e65d992..d61186ff411d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -315,6 +315,7 @@ struct amdgpu_display_manager {
#endif
struct drm_atomic_state *cached_state;
+ struct dc_state *cached_dc_state;
struct dm_comressor_info compressor;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 2233d293a707..4dfb6b55bb2e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -239,7 +239,8 @@ static int __set_output_tf(struct dc_transfer_func *func,
* instead to simulate this.
*/
gamma->type = GAMMA_CUSTOM;
- res = mod_color_calculate_degamma_params(func, gamma, true);
+ res = mod_color_calculate_degamma_params(NULL, func,
+ gamma, true);
} else {
/*
* Assume sRGB. The actual mapping will depend on whether the
@@ -271,7 +272,7 @@ static int __set_input_tf(struct dc_transfer_func *func,
__drm_lut_to_dc_gamma(lut, gamma, false);
- res = mod_color_calculate_degamma_params(func, gamma, true);
+ res = mod_color_calculate_degamma_params(NULL, func, gamma, true);
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
@@ -419,9 +420,21 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state)
{
const struct drm_color_lut *degamma_lut;
+ enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
uint32_t degamma_size;
int r;
+ /* Get the correct base transfer function for implicit degamma. */
+ switch (dc_plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ /* DC doesn't have a transfer function for BT601 specifically. */
+ tf = TRANSFER_FUNCTION_BT709;
+ break;
+ default:
+ break;
+ }
+
if (crtc->cm_has_degamma) {
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut,
&degamma_size);
@@ -455,8 +468,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
* map these to the atomic one instead.
*/
if (crtc->cm_is_degamma_srgb)
- dc_plane_state->in_transfer_func->tf =
- TRANSFER_FUNCTION_SRGB;
+ dc_plane_state->in_transfer_func->tf = tf;
else
dc_plane_state->in_transfer_func->tf =
TRANSFER_FUNCTION_LINEAR;
@@ -471,7 +483,12 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
* in linear space. Assume that the input is sRGB.
*/
dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ dc_plane_state->in_transfer_func->tf = tf;
+
+ if (tf != TRANSFER_FUNCTION_SRGB &&
+ !mod_color_calculate_degamma_params(NULL,
+ dc_plane_state->in_transfer_func, NULL, false))
+ return -ENOMEM;
} else {
/* ...Otherwise we can just bypass the DGM block. */
dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0461fecd68db..076af267b488 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -32,7 +32,7 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
struct dmub_debugfs_trace_header {
uint32_t entry_count;
@@ -838,6 +838,44 @@ static int vrr_range_show(struct seq_file *m, void *data)
return 0;
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+/*
+ * Returns the HDCP capability of the Display (1.4 for now).
+ *
+ * NOTE* Not all HDMI displays report their HDCP caps even when they are capable.
+ * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as always capable.
+ *
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability
+ * or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability
+ */
+static int hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ bool hdcp_cap, hdcp2_cap;
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
+
+ hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link);
+ hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link);
+
+
+ if (hdcp_cap)
+ seq_printf(m, "%s ", "HDCP1.4");
+ if (hdcp2_cap)
+ seq_printf(m, "%s ", "HDCP2.2");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_printf(m, "%s ", "None");
+
+ seq_puts(m, "\n");
+
+ return 0;
+}
+#endif
/* function description
*
* generic SDP message access for testing
@@ -964,6 +1002,9 @@ DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(vrr_range);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
+#endif
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
@@ -1019,12 +1060,23 @@ static const struct {
{"test_pattern", &dp_phy_test_pattern_fops},
{"output_bpc", &output_bpc_fops},
{"vrr_range", &vrr_range_fops},
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+#endif
{"sdp_message", &sdp_message_fops},
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+} hdmi_debugfs_entries[] = {
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+};
+#endif
/*
* Force YUV420 output if available from the given mode
*/
@@ -1093,6 +1145,15 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
+ debugfs_create_file(hdmi_debugfs_entries[i].name,
+ 0644, dir, connector,
+ hdmi_debugfs_entries[i].fops);
+ }
+ }
+#endif
}
/*
@@ -1167,8 +1228,9 @@ static int current_backlight_read(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct dc *dc = adev->dm.dc;
- unsigned int backlight = dc_get_current_backlight_pwm(dc);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link);
seq_printf(m, "0x%x\n", backlight);
return 0;
@@ -1184,8 +1246,9 @@ static int target_backlight_read(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct dc *dc = adev->dm.dc;
- unsigned int backlight = dc_get_target_backlight_pwm(dc);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link);
seq_printf(m, "0x%x\n", backlight);
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 78e1c11d4ae5..dcf84a61de37 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -398,15 +398,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
- memset(display, 0, sizeof(*display));
- memset(link, 0, sizeof(*link));
-
- display->index = aconnector->base.index;
-
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
return;
}
+
+ memset(display, 0, sizeof(*display));
+ memset(link, 0, sizeof(*link));
+
+ display->index = aconnector->base.index;
display->state = MOD_HDCP_DISPLAY_ACTIVE;
if (aconnector->dc_sink != NULL)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index c20fb08c450b..b086d5c906e0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to find connector for link!");
+ DC_LOG_DC("Failed to find connector for link!\n");
return false;
}
@@ -554,6 +554,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_sink *sink)
{
struct amdgpu_dm_connector *aconnector = link->priv;
+ struct drm_connector *connector = &aconnector->base;
struct i2c_adapter *ddc;
int retry = 3;
enum dc_edid_status edid_status;
@@ -571,6 +572,15 @@ enum dc_edid_status dm_helpers_read_local_edid(
edid = drm_get_edid(&aconnector->base, ddc);
+ /* DP Compliance Test 4.2.2.6 */
+ if (link->aux_mode && connector->edid_corrupt)
+ drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
+
+ if (!edid && connector->edid_corrupt) {
+ connector->edid_corrupt = false;
+ return EDID_BAD_CHECKSUM;
+ }
+
if (!edid)
return EDID_NO_RESPONSE;
@@ -605,34 +615,10 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
- if (link->aux_mode) {
- union test_request test_request = { {0} };
- union test_response test_response = { {0} };
-
- dm_helpers_dp_read_dpcd(ctx,
- link,
- DP_TEST_REQUEST,
- &test_request.raw,
- sizeof(union test_request));
-
- if (!test_request.bits.EDID_READ)
- return edid_status;
- test_response.bits.EDID_CHECKSUM_WRITE = 1;
-
- dm_helpers_dp_write_dpcd(ctx,
- link,
- DP_TEST_EDID_CHECKSUM,
- &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
- 1);
-
- dm_helpers_dp_write_dpcd(ctx,
- link,
- DP_TEST_RESPONSE,
- &test_response.raw,
- sizeof(test_response));
-
- }
+ /* DP Compliance Test 4.2.2.3 */
+ if (link->aux_mode)
+ drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
return edid_status;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index d2917759b7ab..ae0a7ef1d595 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -41,53 +41,10 @@
#include "amdgpu_dm_debugfs.h"
#endif
-
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dc/dcn20/dcn20_resource.h"
#endif
-/* #define TRACE_DPCD */
-
-#ifdef TRACE_DPCD
-#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
-
-static inline char *side_band_msg_type_to_str(uint32_t address)
-{
- static char str[10] = {0};
-
- if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
- strcpy(str, "DOWN_REQ");
- else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
- strcpy(str, "UP_REP");
- else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
- strcpy(str, "DOWN_REP");
- else
- strcpy(str, "UP_REQ");
-
- return str;
-}
-
-static void log_dpcd(uint8_t type,
- uint32_t address,
- uint8_t *data,
- uint32_t size,
- bool res)
-{
- DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
- (type == DP_AUX_NATIVE_READ) ||
- (type == DP_AUX_I2C_READ) ?
- "Read" : "Write",
- address,
- SIDE_BAND_MSG(address) ?
- side_band_msg_type_to_str(address) : "Nop",
- res ? "OK" : "Fail");
-
- if (res) {
- print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
- }
-}
-#endif
-
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -136,17 +93,23 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
- struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+ struct amdgpu_dm_connector *aconnector =
+ to_amdgpu_dm_connector(connector);
+ struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
- kfree(amdgpu_dm_connector->edid);
- amdgpu_dm_connector->edid = NULL;
+ if (aconnector->dc_sink) {
+ dc_link_remove_remote_sink(aconnector->dc_link,
+ aconnector->dc_sink);
+ dc_sink_release(aconnector->dc_sink);
+ }
+
+ kfree(aconnector->edid);
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
- drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
- kfree(amdgpu_dm_connector);
+ drm_dp_mst_put_port_malloc(aconnector->port);
+ kfree(aconnector);
}
static int
@@ -435,40 +398,13 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
- DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
drm_dp_mst_get_port_malloc(port);
- DRM_DEBUG_KMS(":%d\n", connector->base.id);
-
return connector;
}
-static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
- if (aconnector->dc_sink) {
- amdgpu_dm_update_freesync_caps(connector, NULL);
- dc_link_remove_remote_sink(aconnector->dc_link,
- aconnector->dc_sink);
- dc_sink_release(aconnector->dc_sink);
- aconnector->dc_sink = NULL;
- aconnector->dc_link->cur_link_settings.lane_count = 0;
- }
-
- drm_connector_unregister(connector);
- drm_connector_put(connector);
-}
-
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
- .destroy_connector = dm_dp_destroy_mst_connector,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index 7ad0cad0f4ef..01b99e0d788e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -24,8 +24,7 @@
# It provides the general basic services required by other DAL
# subcomponents.
-BASICS = conversion.o fixpt31_32.o \
- log_helpers.o vector.o dc_common.o
+BASICS = conversion.o fixpt31_32.o vector.o dc_common.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 8edc2506d49e..bed91572f82a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -113,13 +113,19 @@ static void encoder_control_dmcub(
struct dc_dmub_srv *dmcub,
struct dig_encoder_stream_setup_parameters_v1_5 *dig)
{
- struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
+ union dmub_rb_cmd cmd;
- encoder_control.header.type = DMUB_CMD__VBIOS;
- encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
- encoder_control.encoder_control.dig.stream_param = *dig;
+ memset(&cmd, 0, sizeof(cmd));
- dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header);
+ cmd.digx_encoder_control.header.type = DMUB_CMD__VBIOS;
+ cmd.digx_encoder_control.header.sub_type =
+ DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
+ cmd.digx_encoder_control.header.payload_bytes =
+ sizeof(cmd.digx_encoder_control) -
+ sizeof(cmd.digx_encoder_control.header);
+ cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
+
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
@@ -238,14 +244,19 @@ static void transmitter_control_dmcub(
struct dc_dmub_srv *dmcub,
struct dig_transmitter_control_parameters_v1_6 *dig)
{
- struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
- transmitter_control.header.type = DMUB_CMD__VBIOS;
- transmitter_control.header.sub_type =
+ cmd.dig1_transmitter_control.header.type = DMUB_CMD__VBIOS;
+ cmd.dig1_transmitter_control.header.sub_type =
DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL;
- transmitter_control.transmitter_control.dig = *dig;
+ cmd.dig1_transmitter_control.header.payload_bytes =
+ sizeof(cmd.dig1_transmitter_control) -
+ sizeof(cmd.dig1_transmitter_control.header);
+ cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
- dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header);
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
@@ -339,13 +350,18 @@ static void set_pixel_clock_dmcub(
struct dc_dmub_srv *dmcub,
struct set_pixel_clock_parameter_v1_7 *clk)
{
- struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
+ union dmub_rb_cmd cmd;
- pixel_clock.header.type = DMUB_CMD__VBIOS;
- pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
- pixel_clock.pixel_clock.clk = *clk;
+ memset(&cmd, 0, sizeof(cmd));
- dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header);
+ cmd.set_pixel_clock.header.type = DMUB_CMD__VBIOS;
+ cmd.set_pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
+ cmd.set_pixel_clock.header.payload_bytes =
+ sizeof(cmd.set_pixel_clock) -
+ sizeof(cmd.set_pixel_clock.header);
+ cmd.set_pixel_clock.pixel_clock.clk = *clk;
+
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
@@ -705,13 +721,19 @@ static void enable_disp_power_gating_dmcub(
struct dc_dmub_srv *dmcub,
struct enable_disp_power_gating_parameters_v2_1 *pwr)
{
- struct dmub_rb_cmd_enable_disp_power_gating power_gating;
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
- power_gating.header.type = DMUB_CMD__VBIOS;
- power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
- power_gating.power_gating.pwr = *pwr;
+ cmd.enable_disp_power_gating.header.type = DMUB_CMD__VBIOS;
+ cmd.enable_disp_power_gating.header.sub_type =
+ DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
+ cmd.enable_disp_power_gating.header.payload_bytes =
+ sizeof(cmd.enable_disp_power_gating) -
+ sizeof(cmd.enable_disp_power_gating.header);
+ cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
- dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3960a8db94cb..1e5a92b192a1 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -690,6 +690,26 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
struct dc_debug_options *dbg,
struct dc_state *context)
{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16)) {
+ hack_disable_optional_pipe_split(v);
+ return;
+ }
+ }
+
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
hack_disable_optional_pipe_split(v);
@@ -702,7 +722,6 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
-
unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
{
/* for low power RV2 variants, the highest voltage level we want is 0 */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 8ec2dfe45d40..a5c2114e4292 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -90,7 +90,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
if (edp_link) {
- clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+ clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
dc_link_set_psr_allow_active(edp_link, false, false);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26db1c5d4e4d..b210f8e9d592 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -131,7 +131,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
int dprefclk_src_sel;
- int dp_ref_clk_khz = 600000;
+ int dp_ref_clk_khz;
int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 97b7f32294fd..c320b7af7d34 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -97,9 +97,6 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
VBIOSSMC_MSG_SetDispclkFreq,
requested_dispclk_khz / 1000);
- /* Actual dispclk set is returned in the parameter register */
- actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
-
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 47431ca6986d..45cfb7c45566 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -66,6 +66,8 @@
#include "dce/dce_i2c.h"
+#include "dmub/dmub_srv.h"
+
#define CTX \
dc->ctx
@@ -348,7 +350,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream == stream)
+ if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
break;
}
/* Stream not found */
@@ -365,6 +367,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
param.windowb_x_end = pipe->stream->timing.h_addressable;
param.windowb_y_end = pipe->stream->timing.v_addressable;
+ param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
+ param.odm_mode = pipe->next_odm_pipe ? 1:0;
+
/* Default to the union of both windows */
param.selection = UNION_WINDOW_A_B;
param.continuous_mode = continuous;
@@ -2204,7 +2209,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (should_program_abm) {
if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
- pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
} else {
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);
@@ -2640,33 +2645,12 @@ void dc_set_power_state(
void dc_resume(struct dc *dc)
{
-
uint32_t i;
for (i = 0; i < dc->link_count; i++)
core_link_resume(dc->links[i]);
}
-unsigned int dc_get_current_backlight_pwm(struct dc *dc)
-{
- struct abm *abm = dc->res_pool->abm;
-
- if (abm)
- return abm->funcs->get_current_backlight(abm);
-
- return 0;
-}
-
-unsigned int dc_get_target_backlight_pwm(struct dc *dc)
-{
- struct abm *abm = dc->res_pool->abm;
-
- if (abm)
- return abm->funcs->get_target_backlight(abm);
-
- return 0;
-}
-
bool dc_is_dmcu_initialized(struct dc *dc)
{
struct dmcu *dmcu = dc->res_pool->dmcu;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 67cfff1586e9..48ab51533d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include "dm_services.h"
-#include "atom.h"
+#include "atomfirmware.h"
#include "dm_helpers.h"
#include "dc.h"
#include "grph_object_id.h"
@@ -46,10 +46,11 @@
#include "dmcu.h"
#include "hw/clk_mgr.h"
#include "dce/dmub_psr.h"
+#include "dmub/dmub_srv.h"
+#include "inc/hw/panel_cntl.h"
#define DC_LOGGER_INIT(logger)
-
#define LINK_INFO(...) \
DC_LOG_HW_HOTPLUG( \
__VA_ARGS__)
@@ -64,11 +65,11 @@
enum {
PEAK_FACTOR_X1000 = 1006,
/*
- * Some receivers fail to train on first try and are good
- * on subsequent tries. 2 retries should be plenty. If we
- * don't have a successful training then we don't expect to
- * ever get one.
- */
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
LINK_TRAINING_MAX_VERIFY_RETRY = 2
};
@@ -79,7 +80,7 @@ static void dc_link_destruct(struct dc_link *link)
{
int i;
- if (link->hpd_gpio != NULL) {
+ if (link->hpd_gpio) {
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -87,7 +88,10 @@ static void dc_link_destruct(struct dc_link *link)
if (link->ddc)
dal_ddc_service_destroy(&link->ddc);
- if(link->link_enc)
+ if (link->panel_cntl)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+
+ if (link->link_enc)
link->link_enc->funcs->destroy(&link->link_enc);
if (link->local_sink)
@@ -98,8 +102,8 @@ static void dc_link_destruct(struct dc_link *link)
}
struct gpio *get_hpd_gpio(struct dc_bios *dcb,
- struct graphics_object_id link_id,
- struct gpio_service *gpio_service)
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service)
{
enum bp_result bp_result;
struct graphics_object_hpd_info hpd_info;
@@ -116,10 +120,9 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
return NULL;
}
- return dal_gpio_service_create_irq(
- gpio_service,
- pin_info.offset,
- pin_info.mask);
+ return dal_gpio_service_create_irq(gpio_service,
+ pin_info.offset,
+ pin_info.mask);
}
/*
@@ -134,13 +137,10 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
* @return
* true on success, false otherwise
*/
-static bool program_hpd_filter(
- const struct dc_link *link)
+static bool program_hpd_filter(const struct dc_link *link)
{
bool result = false;
-
struct gpio *hpd;
-
int delay_on_connect_in_ms = 0;
int delay_on_disconnect_in_ms = 0;
@@ -159,10 +159,10 @@ static bool program_hpd_filter(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
/* Program hpd filter to allow DP signal to settle */
/* 500: not able to detect MST <-> SST switch as HPD is low for
- * only 100ms on DELL U2413
- * 0: some passive dongle still show aux mode instead of i2c
- * 20-50:not enough to hide bouncing HPD with passive dongle.
- * also see intermittent i2c read issues.
+ * only 100ms on DELL U2413
+ * 0: some passive dongle still show aux mode instead of i2c
+ * 20-50: not enough to hide bouncing HPD with passive dongle.
+ * also see intermittent i2c read issues.
*/
delay_on_connect_in_ms = 80;
delay_on_disconnect_in_ms = 0;
@@ -175,7 +175,8 @@ static bool program_hpd_filter(
}
/* Obtain HPD handle */
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
if (!hpd)
return result;
@@ -226,8 +227,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
}
/* todo: may need to lock gpio access */
- hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
- if (hpd_pin == NULL)
+ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+ if (!hpd_pin)
goto hpd_gpio_failure;
dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
@@ -248,8 +250,7 @@ hpd_gpio_failure:
return false;
}
-static enum ddc_transaction_type get_ddc_transaction_type(
- enum signal_type sink_signal)
+static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal)
{
enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
@@ -270,7 +271,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
/* MST does not use I2COverAux, but there is the
* SPECIAL use case for "immediate dwnstrm device
- * access" (EPR#370830). */
+ * access" (EPR#370830).
+ */
transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
break;
@@ -281,9 +283,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
return transaction_type;
}
-static enum signal_type get_basic_signal_type(
- struct graphics_object_id encoder,
- struct graphics_object_id downstream)
+static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
+ struct graphics_object_id downstream)
{
if (downstream.type == OBJECT_TYPE_CONNECTOR) {
switch (downstream.id) {
@@ -369,10 +370,11 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
/* Open GPIO and set it to I2C mode */
/* Note: this GpioMode_Input will be converted
* to GpioConfigType_I2cAuxDualMode in GPIO component,
- * which indicates we need additional delay */
+ * which indicates we need additional delay
+ */
- if (GPIO_RESULT_OK != dal_ddc_open(
- ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+ if (dal_ddc_open(ddc, GPIO_MODE_INPUT,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) {
dal_ddc_close(ddc);
return present;
@@ -406,25 +408,25 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
* @brief
* Detect output sink type
*/
-static enum signal_type link_detect_sink(
- struct dc_link *link,
- enum dc_detect_reason reason)
+static enum signal_type link_detect_sink(struct dc_link *link,
+ enum dc_detect_reason reason)
{
- enum signal_type result = get_basic_signal_type(
- link->link_enc->id, link->link_id);
+ enum signal_type result = get_basic_signal_type(link->link_enc->id,
+ link->link_id);
/* Internal digital encoder will detect only dongles
- * that require digital signal */
+ * that require digital signal
+ */
/* Detection mechanism is different
* for different native connectors.
* LVDS connector supports only LVDS signal;
* PCIE is a bus slot, the actual connector needs to be detected first;
* eDP connector supports only eDP signal;
- * HDMI should check straps for audio */
+ * HDMI should check straps for audio
+ */
/* PCIE detects the actual connector on add-on board */
-
if (link->link_id.id == CONNECTOR_ID_PCIE) {
/* ZAZTODO implement PCIE add-on card detection */
}
@@ -432,8 +434,10 @@ static enum signal_type link_detect_sink(
switch (link->link_id.id) {
case CONNECTOR_ID_HDMI_TYPE_A: {
/* check audio support:
- * if native HDMI is not supported, switch to DVI */
- struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+ * if native HDMI is not supported, switch to DVI
+ */
+ struct audio_support *aud_support =
+ &link->dc->res_pool->audio_support;
if (!aud_support->hdmi_audio_native)
if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
@@ -461,16 +465,15 @@ static enum signal_type link_detect_sink(
return result;
}
-static enum signal_type decide_signal_from_strap_and_dongle_type(
- enum display_dongle_type dongle_type,
- struct audio_support *audio_support)
+static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type,
+ struct audio_support *audio_support)
{
enum signal_type signal = SIGNAL_TYPE_NONE;
switch (dongle_type) {
case DISPLAY_DONGLE_DP_HDMI_DONGLE:
if (audio_support->hdmi_audio_on_dongle)
- signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
else
signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
break;
@@ -491,16 +494,14 @@ static enum signal_type decide_signal_from_strap_and_dongle_type(
return signal;
}
-static enum signal_type dp_passive_dongle_detection(
- struct ddc_service *ddc,
- struct display_sink_capability *sink_cap,
- struct audio_support *audio_support)
+static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap,
+ struct audio_support *audio_support)
{
- dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
- ddc, sink_cap);
- return decide_signal_from_strap_and_dongle_type(
- sink_cap->dongle_type,
- audio_support);
+ dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap);
+
+ return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
+ audio_support);
}
static void link_disconnect_sink(struct dc_link *link)
@@ -519,6 +520,96 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
link->local_sink = prev_sink;
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+bool dc_link_is_hdcp14(struct dc_link *link)
+{
+ bool ret = false;
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable,
+ * we can poll for bksv but some displays have an issue with this. Since its so rare
+ * for a display to not be 1.4 capable, this assumtion is ok
+ */
+ ret = true;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+bool dc_link_is_hdcp22(struct dc_link *link)
+{
+ bool ret = false;
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
+ link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable &&
+ (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
+{
+ struct hdcp_protection_message msg22;
+ struct hdcp_protection_message msg14;
+
+ memset(&msg22, 0, sizeof(struct hdcp_protection_message));
+ memset(&msg14, 0, sizeof(struct hdcp_protection_message));
+ memset(link->hdcp_caps.rx_caps.raw, 0,
+ sizeof(link->hdcp_caps.rx_caps.raw));
+
+ if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->ddc->transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
+ link->connector_signal == SIGNAL_TYPE_EDP) {
+ msg22.data = link->hdcp_caps.rx_caps.raw;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
+ msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
+ } else {
+ msg22.data = &link->hdcp_caps.rx_caps.fields.version;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
+ msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
+ }
+ msg22.version = HDCP_VERSION_22;
+ msg22.link = HDCP_LINK_PRIMARY;
+ msg22.max_retries = 5;
+ dc_process_hdcp_msg(signal, link, &msg22);
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
+
+ msg14.data = &link->hdcp_caps.bcaps.raw;
+ msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
+ msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
+ msg14.version = HDCP_VERSION_14;
+ msg14.link = HDCP_LINK_PRIMARY;
+ msg14.max_retries = 5;
+
+ status = dc_process_hdcp_msg(signal, link, &msg14);
+ }
+
+}
+#endif
static void read_current_link_settings_on_detect(struct dc_link *link)
{
@@ -532,18 +623,18 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
// Read DPCD 00101h to find out the number of lanes currently set
for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_LANE_COUNT_SET,
- &lane_count_set.raw,
- sizeof(lane_count_set));
+ status = core_link_read_dpcd(link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
/* First DPCD read after VDD ON can fail if the particular board
* does not have HPD pin wired correctly. So if DPCD read fails,
* which it should never happen, retry a few times. Target worst
* case scenario of 80 ms.
*/
if (status == DC_OK) {
- link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+ link->cur_link_settings.lane_count =
+ lane_count_set.bits.LANE_COUNT_SET;
break;
}
@@ -552,7 +643,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
// Read DPCD 00100h to find if standard link rates are set
core_link_read_dpcd(link, DP_LINK_BW_SET,
- &link_bw_set, sizeof(link_bw_set));
+ &link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
if (link->connector_signal == SIGNAL_TYPE_EDP) {
@@ -560,12 +651,12 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
* Read DPCD 00115h to find the edp link rate set used
*/
core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
+ &link_rate_set, sizeof(link_rate_set));
// edp_supported_link_rates_count = 0 for DP
if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
link->cur_link_settings.link_rate_set = link_rate_set;
link->cur_link_settings.use_link_rate_set = true;
}
@@ -579,7 +670,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
}
// Read DPCD 00003h to find the max down spread.
core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
- &max_down_spread.raw, sizeof(max_down_spread));
+ &max_down_spread.raw, sizeof(max_down_spread));
link->cur_link_settings.link_spread =
max_down_spread.bits.MAX_DOWN_SPREAD ?
LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
@@ -612,6 +703,12 @@ static bool detect_dp(struct dc_link *link,
dal_ddc_service_set_transaction_type(link->ddc,
sink_caps->transaction_type);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ /* In case of fallback to SST when topology discovery below fails
+ * HDCP caps will be querried again later by the upper layer (caller
+ * of this function). */
+ query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link);
+#endif
/*
* This call will initiate MST topology discovery. Which
* will detect MST ports and add new DRM connector DRM
@@ -683,12 +780,12 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
if (new_edid->length == 0)
return false;
- return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
+ return (memcmp(old_edid->raw_edid,
+ new_edid->raw_edid, new_edid->length) == 0);
}
-static bool wait_for_alt_mode(struct dc_link *link)
+static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
{
-
/**
* something is terribly wrong if time out is > 200ms. (5Hz)
* 500 microseconds * 400 tries us 200 ms
@@ -703,7 +800,7 @@ static bool wait_for_alt_mode(struct dc_link *link)
DC_LOGGER_INIT(link->ctx->logger);
- if (link->link_enc->funcs->is_in_alt_mode == NULL)
+ if (!link->link_enc->funcs->is_in_alt_mode)
return true;
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
@@ -718,21 +815,21 @@ static bool wait_for_alt_mode(struct dc_link *link)
udelay(sleep_time_in_microseconds);
/* ask the link if alt mode is enabled, if so return ok */
if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
-
finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(
- link->ctx, finish_timestamp, enter_timestamp);
+ time_taken_in_ns =
+ dm_get_elapse_time_in_ns(link->ctx,
+ finish_timestamp,
+ enter_timestamp);
DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
div_u64(time_taken_in_ns, 1000000));
return true;
}
-
}
finish_timestamp = dm_get_timestamp(link->ctx);
time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
enter_timestamp);
DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
- div_u64(time_taken_in_ns, 1000000));
+ div_u64(time_taken_in_ns, 1000000));
return false;
}
@@ -768,30 +865,30 @@ static bool dc_link_detect_helper(struct dc_link *link,
return false;
if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
- link->connector_signal == SIGNAL_TYPE_EDP) &&
- link->local_sink) {
-
+ link->connector_signal == SIGNAL_TYPE_EDP) &&
+ link->local_sink) {
// need to re-write OUI and brightness in resume case
if (link->connector_signal == SIGNAL_TYPE_EDP) {
dpcd_set_source_specific_data(link);
- dc_link_set_default_brightness_aux(link); //TODO: use cached
+ dc_link_set_default_brightness_aux(link);
+ //TODO: use cached
}
return true;
}
- if (false == dc_link_detect_sink(link, &new_connection_type)) {
+ if (!dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
return false;
}
prev_sink = link->local_sink;
- if (prev_sink != NULL) {
+ if (prev_sink) {
dc_sink_retain(prev_sink);
memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
}
- link_disconnect_sink(link);
+ link_disconnect_sink(link);
if (new_connection_type != dc_connection_none) {
link->type = new_connection_type;
link->link_state_valid = false;
@@ -838,35 +935,31 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_DISPLAY_PORT: {
-
/* wa HPD high coming too early*/
if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
-
/* if alt mode times out, return false */
- if (wait_for_alt_mode(link) == false) {
+ if (!wait_for_entering_dp_alt_mode(link))
return false;
- }
}
- if (!detect_dp(
- link,
- &sink_caps,
- &converter_disable_audio,
- aud_support, reason)) {
- if (prev_sink != NULL)
+ if (!detect_dp(link, &sink_caps,
+ &converter_disable_audio,
+ aud_support, reason)) {
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
// Check if dpcp block is the same
- if (prev_sink != NULL) {
- if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
+ if (prev_sink) {
+ if (memcmp(&link->dpcd_caps, &prev_dpcd_caps,
+ sizeof(struct dpcd_caps)))
same_dpcd = false;
}
/* Active dongle downstream unplug*/
if (link->type == dc_connection_active_dongle &&
- link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
- if (prev_sink != NULL)
+ link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
+ if (prev_sink)
/* Downstream unplug */
dc_sink_release(prev_sink);
return true;
@@ -874,7 +967,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Connected\n",
- link->link_index);
+ link->link_index);
/* Need to setup mst link_cap struct here
* otherwise dc_link_detect() will leave mst link_cap
* empty which leads to allocate_mst_payload() has "0"
@@ -882,15 +975,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
*/
dp_verify_mst_link_cap(link);
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
// For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
if (reason == DETECT_REASON_BOOT &&
- dc_ctx->dc->config.power_down_display_on_boot == false &&
- link->link_status.link_active == true)
+ !dc_ctx->dc->config.power_down_display_on_boot &&
+ link->link_status.link_active)
perform_dp_seamless_boot = true;
if (perform_dp_seamless_boot) {
@@ -903,24 +996,23 @@ static bool dc_link_detect_helper(struct dc_link *link,
default:
DC_ERROR("Invalid connector type! signal:%d\n",
- link->connector_signal);
- if (prev_sink != NULL)
+ link->connector_signal);
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
} /* switch() */
if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
- link->dpcd_sink_count = link->dpcd_caps.sink_count.
- bits.SINK_COUNT;
+ link->dpcd_sink_count =
+ link->dpcd_caps.sink_count.bits.SINK_COUNT;
else
link->dpcd_sink_count = 1;
- dal_ddc_service_set_transaction_type(
- link->ddc,
- sink_caps.transaction_type);
+ dal_ddc_service_set_transaction_type(link->ddc,
+ sink_caps.transaction_type);
- link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode(
- link->ddc);
+ link->aux_mode =
+ dal_ddc_service_is_in_aux_transaction_mode(link->ddc);
sink_init_data.link = link;
sink_init_data.sink_signal = sink_caps.signal;
@@ -928,7 +1020,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
sink = dc_sink_create(&sink_init_data);
if (!sink) {
DC_ERROR("Failed to create sink!\n");
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
@@ -939,10 +1031,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
/* dc_sink_create returns a new reference */
link->local_sink = sink;
- edid_status = dm_helpers_read_local_edid(
- link->ctx,
- link,
- sink);
+ edid_status = dm_helpers_read_local_edid(link->ctx,
+ link, sink);
switch (edid_status) {
case EDID_BAD_CHECKSUM:
@@ -950,7 +1040,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
case EDID_NO_RESPONSE:
DC_LOG_ERROR("No EDID read.\n");
-
/*
* Abort detection for non-DP connectors if we have
* no EDID
@@ -961,7 +1050,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
*/
if (dc_is_hdmi_signal(link->connector_signal) ||
dc_is_dvi_signal(link->connector_signal)) {
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
@@ -974,45 +1063,53 @@ static bool dc_link_detect_helper(struct dc_link *link,
link->ctx->dc->debug.disable_fec = true;
// Check if edid is the same
- if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
- same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+ if ((prev_sink) &&
+ (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
+ same_edid = is_same_edid(&prev_sink->dc_edid,
+ &sink->dc_edid);
if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
link->ctx->dc->debug.hdmi20_disable = true;
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ sink_caps.transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
* TODO debug why Dell 2413 doesn't like
* two link trainings
*/
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ query_hdcp_capability(sink->sink_signal, link);
+#endif
// verify link cap for SST non-seamless boot
if (!perform_dp_seamless_boot)
dp_verify_link_cap_with_retries(link,
- &link->reported_link_cap,
- LINK_TRAINING_MAX_VERIFY_RETRY);
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
link_disconnect_remap(prev_sink, link);
sink = prev_sink;
prev_sink = NULL;
-
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ query_hdcp_capability(sink->sink_signal, link);
+#endif
}
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
- !sink->edid_caps.edid_hdmi)
+ !sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
- &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
- DC_EDID_BLOCK_SIZE,
- "%s: [Block %d] ", sink->edid_caps.display_name, i);
+ &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
+ DC_EDID_BLOCK_SIZE,
+ "%s: [Block %d] ", sink->edid_caps.display_name, i);
}
DC_LOG_DETECTION_EDID_PARSER("%s: "
@@ -1047,17 +1144,18 @@ static bool dc_link_detect_helper(struct dc_link *link,
sink->edid_caps.audio_modes[i].sample_rate,
sink->edid_caps.audio_modes[i].sample_size);
}
-
} else {
/* From Connected-to-Disconnected. */
if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Disconnected\n",
- link->link_index);
+ link->link_index);
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
link->mst_stream_alloc_table.stream_count = 0;
- memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+ memset(link->mst_stream_alloc_table.stream_allocations,
+ 0,
+ sizeof(link->mst_stream_alloc_table.stream_allocations));
}
link->type = dc_connection_none;
@@ -1071,16 +1169,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
- link->link_index, sink,
- (sink_caps.signal == SIGNAL_TYPE_NONE ?
- "Disconnected":"Connected"), prev_sink,
- same_dpcd, same_edid);
+ link->link_index, sink,
+ (sink_caps.signal ==
+ SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
+ prev_sink, same_dpcd, same_edid);
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return true;
-
}
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
@@ -1110,13 +1207,13 @@ bool dc_link_get_hpd_state(struct dc_link *dc_link)
return state;
}
-static enum hpd_source_id get_hpd_line(
- struct dc_link *link)
+static enum hpd_source_id get_hpd_line(struct dc_link *link)
{
struct gpio *hpd;
enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
if (hpd) {
switch (dal_irq_get_source(hpd)) {
@@ -1191,8 +1288,7 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
-static enum transmitter translate_encoder_to_transmitter(
- struct graphics_object_id encoder)
+static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
{
switch (encoder.id) {
case ENCODER_ID_INTERNAL_UNIPHY:
@@ -1256,17 +1352,18 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
-static bool dc_link_construct(
- struct dc_link *link,
- const struct link_init_data *init_params)
+static bool dc_link_construct(struct dc_link *link,
+ const struct link_init_data *init_params)
{
uint8_t i;
struct ddc_service_init_data ddc_service_init_data = { { 0 } };
struct dc_context *dc_ctx = init_params->ctx;
struct encoder_init_data enc_init_data = { 0 };
+ struct panel_cntl_init_data panel_cntl_init_data = { 0 };
struct integrated_info info = {{{ 0 }}};
struct dc_bios *bios = init_params->dc->ctx->dc_bios;
const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
DC_LOGGER_INIT(dc_ctx->logger);
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
@@ -1278,23 +1375,27 @@ static bool dc_link_construct(
link->ctx = dc_ctx;
link->link_index = init_params->link_index;
- memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides));
- memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings));
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
- link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+ link->link_id =
+ bios->funcs->get_connector_id(bios, init_params->connector_index);
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
- __func__, init_params->connector_index,
- link->link_id.type, OBJECT_TYPE_CONNECTOR);
+ __func__, init_params->connector_index,
+ link->link_id.type, OBJECT_TYPE_CONNECTOR);
goto create_fail;
}
if (link->dc->res_pool->funcs->link_init)
link->dc->res_pool->funcs->link_init(link);
- link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
- if (link->hpd_gpio != NULL) {
+ link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+ if (link->hpd_gpio) {
dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
dal_gpio_unlock_pin(link->hpd_gpio);
link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
@@ -1314,9 +1415,9 @@ static bool dc_link_construct(
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
case CONNECTOR_ID_DISPLAY_PORT:
- link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (link->hpd_gpio != NULL)
+ if (link->hpd_gpio)
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
@@ -1324,42 +1425,60 @@ static bool dc_link_construct(
case CONNECTOR_ID_EDP:
link->connector_signal = SIGNAL_TYPE_EDP;
- if (link->hpd_gpio != NULL) {
+ if (link->hpd_gpio) {
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
}
+
break;
case CONNECTOR_ID_LVDS:
link->connector_signal = SIGNAL_TYPE_LVDS;
break;
default:
- DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id);
+ DC_LOG_WARNING("Unsupported Connector type:%d!\n",
+ link->link_id.id);
goto create_fail;
}
/* TODO: #DAL3 Implement id to str function.*/
LINK_INFO("Connector[%d] description:"
- "signal %d\n",
- init_params->connector_index,
- link->connector_signal);
+ "signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
ddc_service_init_data.ctx = link->ctx;
ddc_service_init_data.id = link->link_id;
ddc_service_init_data.link = link;
link->ddc = dal_ddc_service_create(&ddc_service_init_data);
- if (link->ddc == NULL) {
+ if (!link->ddc) {
DC_ERROR("Failed to create ddc_service!\n");
goto ddc_create_fail;
}
link->ddc_hw_inst =
- dal_ddc_get_line(
- dal_ddc_service_get_ddc_pin(link->ddc));
+ dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
+
+
+ if (link->dc->res_pool->funcs->panel_cntl_create &&
+ (link->link_id.id == CONNECTOR_ID_EDP ||
+ link->link_id.id == CONNECTOR_ID_LVDS)) {
+ panel_cntl_init_data.ctx = dc_ctx;
+ panel_cntl_init_data.inst = 0;
+ link->panel_cntl =
+ link->dc->res_pool->funcs->panel_cntl_create(
+ &panel_cntl_init_data);
+
+ if (link->panel_cntl == NULL) {
+ DC_ERROR("Failed to create link panel_cntl!\n");
+ goto panel_cntl_create_fail;
+ }
+ }
enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+ bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
+ &enc_init_data.encoder);
enc_init_data.connector = link->link_id;
enc_init_data.channel = get_ddc_line(link);
enc_init_data.hpd_source = get_hpd_line(link);
@@ -1367,11 +1486,11 @@ static bool dc_link_construct(
link->hpd_src = enc_init_data.hpd_source;
enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
- link->link_enc = link->dc->res_pool->funcs->link_enc_create(
- &enc_init_data);
+ translate_encoder_to_transmitter(enc_init_data.encoder);
+ link->link_enc =
+ link->dc->res_pool->funcs->link_enc_create(&enc_init_data);
- if (link->link_enc == NULL) {
+ if (!link->link_enc) {
DC_ERROR("Failed to create link encoder!\n");
goto link_enc_create_fail;
}
@@ -1379,8 +1498,9 @@ static bool dc_link_construct(
link->link_enc_hw_inst = link->link_enc->transmitter;
for (i = 0; i < 4; i++) {
- if (BP_RESULT_OK !=
- bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+ if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
+ link->link_id, i,
+ &link->device_tag) != BP_RESULT_OK) {
DC_ERROR("Failed to find device tag!\n");
goto device_tag_fail;
}
@@ -1388,13 +1508,14 @@ static bool dc_link_construct(
/* Look for device tag that matches connector signal,
* CRT for rgb, LCD for other supported signal tyes
*/
- if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
+ link->device_tag.dev_id))
continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
- && link->connector_signal != SIGNAL_TYPE_RGB)
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT &&
+ link->connector_signal != SIGNAL_TYPE_RGB)
continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
- && link->connector_signal == SIGNAL_TYPE_RGB)
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
+ link->connector_signal == SIGNAL_TYPE_RGB)
continue;
break;
}
@@ -1406,16 +1527,16 @@ static bool dc_link_construct(
for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
struct external_display_path *path =
&info.ext_disp_conn_info.path[i];
- if (path->device_connector_id.enum_id == link->link_id.enum_id
- && path->device_connector_id.id == link->link_id.id
- && path->device_connector_id.type == link->link_id.type) {
- if (link->device_tag.acpi_device != 0
- && path->device_acpi_enum == link->device_tag.acpi_device) {
+ if (path->device_connector_id.enum_id == link->link_id.enum_id &&
+ path->device_connector_id.id == link->link_id.id &&
+ path->device_connector_id.type == link->link_id.type) {
+ if (link->device_tag.acpi_device != 0 &&
+ path->device_acpi_enum == link->device_tag.acpi_device) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
} else if (path->device_tag ==
- link->device_tag.dev_id.raw_device_tag) {
+ link->device_tag.dev_id.raw_device_tag) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
}
@@ -1431,15 +1552,20 @@ static bool dc_link_construct(
*/
program_hpd_filter(link);
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
return true;
device_tag_fail:
link->link_enc->funcs->destroy(&link->link_enc);
link_enc_create_fail:
+ if (link->panel_cntl != NULL)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+panel_cntl_create_fail:
dal_ddc_service_destroy(&link->ddc);
ddc_create_fail:
create_fail:
- if (link->hpd_gpio != NULL) {
+ if (link->hpd_gpio) {
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -2339,9 +2465,28 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
}
+static struct abm *get_abm_from_stream_res(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx.stream;
+
+ if (stream && stream->link == link) {
+ abm = pipe_ctx.stream_res.abm;
+ break;
+ }
+ }
+ return abm;
+}
+
int dc_link_get_backlight_level(const struct dc_link *link)
{
- struct abm *abm = link->ctx->dc->res_pool->abm;
+
+ struct abm *abm = get_abm_from_stream_res(link);
if (abm == NULL || abm->funcs->get_current_backlight == NULL)
return DC_ERROR_UNEXPECTED;
@@ -2349,71 +2494,63 @@ int dc_link_get_backlight_level(const struct dc_link *link)
return (int) abm->funcs->get_current_backlight(abm);
}
-bool dc_link_set_backlight_level(const struct dc_link *link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
+int dc_link_get_target_backlight_pwm(const struct dc_link *link)
{
- struct dc *dc = link->ctx->dc;
- struct abm *abm = dc->res_pool->abm;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- unsigned int controller_id = 0;
- bool use_smooth_brightness = true;
- int i;
- DC_LOGGER_INIT(link->ctx->logger);
+ struct abm *abm = get_abm_from_stream_res(link);
- if ((dmcu == NULL) ||
- (abm == NULL) ||
- (abm->funcs->set_backlight_level_pwm == NULL))
- return false;
+ if (abm == NULL || abm->funcs->get_target_backlight == NULL)
+ return DC_ERROR_UNEXPECTED;
- use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+ return (int) abm->funcs->get_target_backlight(abm);
+}
- DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
- backlight_pwm_u16_16, backlight_pwm_u16_16);
+static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
- if (dc_is_embedded_signal(link->connector_signal)) {
- for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
- if (dc->current_state->res_ctx.
- pipe_ctx[i].stream->link
- == link) {
- /* DMCU -1 for all controller id values,
- * therefore +1 here
- */
- controller_id =
- dc->current_state->
- res_ctx.pipe_ctx[i].stream_res.tg->inst +
- 1;
-
- /* Disable brightness ramping when the display is blanked
- * as it can hang the DMCU
- */
- if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
- frame_ramp = 0;
- }
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ break;
}
}
- abm->funcs->set_backlight_level_pwm(
- abm,
- backlight_pwm_u16_16,
- frame_ramp,
- controller_id,
- use_smooth_brightness);
}
- return true;
+ return pipe_ctx;
}
-bool dc_link_set_abm_disable(const struct dc_link *link)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
{
struct dc *dc = link->ctx->dc;
- struct abm *abm = dc->res_pool->abm;
- if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
- return false;
+ DC_LOGGER_INIT(link->ctx->logger);
+ DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+ backlight_pwm_u16_16, backlight_pwm_u16_16);
+
+ if (dc_is_embedded_signal(link->connector_signal)) {
+ struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
- abm->funcs->set_abm_immediate_disable(abm);
+ if (pipe_ctx) {
+ /* Disable brightness ramping when the display is blanked
+ * as it can hang the DMCU
+ */
+ if (pipe_ctx->plane_state == NULL)
+ frame_ramp = 0;
+ } else {
+ ASSERT(false);
+ return false;
+ }
+ dc->hwss.set_backlight_level(
+ pipe_ctx,
+ backlight_pwm_u16_16,
+ frame_ramp);
+ }
return true;
}
@@ -2423,12 +2560,12 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if (psr != NULL && link->psr_feature_enabled)
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
psr->funcs->psr_enable(psr, allow_active);
- else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
- link->psr_allow_active = allow_active;
+ link->psr_settings.psr_allow_active = allow_active;
return true;
}
@@ -2439,9 +2576,9 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if (psr != NULL && link->psr_feature_enabled)
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
psr->funcs->psr_get_state(psr, psr_state);
- else if (dmcu != NULL && link->psr_feature_enabled)
+ else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state);
return true;
@@ -2612,14 +2749,14 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->frame_delay = 0;
if (psr)
- link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
+ link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
else
- link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
/* psr_enabled == 0 indicates setup_psr did not succeed, but this
* should not happen since firmware should be running at this point
*/
- if (link->psr_feature_enabled == 0)
+ if (link->psr_settings.psr_feature_enabled == 0)
ASSERT(0);
return true;
@@ -2966,7 +3103,7 @@ void core_link_enable_stream(
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
@@ -3040,6 +3177,18 @@ void core_link_enable_stream(
if (pipe_ctx->stream->dpms_off)
return;
+ /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+ * link training). This is to make sure the bandwidth sent to DIG BE won't be
+ * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+ * will be automatically set at a later time when the video is enabled
+ * (DP_VID_STREAM_EN = 1).
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, true);
+ }
+
status = enable_link(state, pipe_ctx);
if (status != DC_OK) {
@@ -3067,11 +3216,6 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
- }
dc->hwss.enable_stream(pipe_ctx);
/* Set DPS PPS SDP (AKA "info frames") */
@@ -3101,6 +3245,10 @@ void core_link_enable_stream(
dp_set_dsc_enable(pipe_ctx, true);
}
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ core_link_set_avmute(pipe_ctx, false);
+ }
}
void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
@@ -3109,10 +3257,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ core_link_set_avmute(pipe_ctx, true);
+ }
+
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, true);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 256889eed93e..aefd29a440b5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -599,7 +599,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
- payload->length ? true : false;
+ payload->length;
current_payload.address = payload->address;
current_payload.data = &payload->data[retrieved];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 27a7d2a58079..91cd884d6f25 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -13,7 +13,6 @@
#include "core_status.h"
#include "dpcd_defs.h"
-#include "resource.h"
#define DC_LOGGER \
link->ctx->logger
@@ -220,6 +219,30 @@ static enum dpcd_training_patterns
return dpcd_tr_pattern;
}
+static uint8_t dc_dp_initialize_scrambling_data_symbols(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern)
+{
+ uint8_t disable_scrabled_data_symbols = 0;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ disable_scrabled_data_symbols = 1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ disable_scrabled_data_symbols = 0;
+ break;
+ default:
+ ASSERT(0);
+ DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+ return disable_scrabled_data_symbols;
+}
+
static inline bool is_repeater(struct dc_link *link, uint32_t offset)
{
return (!link->is_lttpr_mode_transparent && offset != 0);
@@ -252,6 +275,9 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
+ dc_dp_initialize_scrambling_data_symbols(link, pattern);
+
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
@@ -1710,19 +1736,10 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
- /* Set Default link settings */
- struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
- LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
-
- /* Higher link settings based on feature supported */
- if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
- max_link_cap.link_rate = LINK_RATE_HIGH2;
-
- if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
- max_link_cap.link_rate = LINK_RATE_HIGH3;
+ struct dc_link_settings max_link_cap = {0};
- if (link->link_enc->funcs->get_max_link_cap)
- link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+ /* get max link encoder capability */
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -2426,7 +2443,7 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
union dpcd_psr_configuration psr_configuration;
- if (!link->psr_feature_enabled)
+ if (!link->psr_settings.psr_feature_enabled)
return false;
dm_helpers_dp_read_dpcd(
@@ -2530,7 +2547,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
/* get phy test pattern and pattern parameters from DP receiver */
core_link_read_dpcd(
link,
- DP_TEST_PHY_PATTERN,
+ DP_PHY_TEST_PATTERN,
&dpcd_test_pattern.raw,
sizeof(dpcd_test_pattern));
core_link_read_dpcd(
@@ -4240,7 +4257,7 @@ void dpcd_set_source_specific_data(struct dc_link *link)
{
const uint32_t post_oui_delay = 30; // 30ms
uint8_t dspc = 0;
- enum dc_status ret = DC_ERROR_UNEXPECTED;
+ enum dc_status ret;
ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
sizeof(dspc));
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 51e0ee6e7695..6590f51caefa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -400,7 +400,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
struct dc_stream_state *stream = pipe_ctx->stream;
bool result = false;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
result = true;
else
result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index f4bcc71b2920..0c5619364e7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -532,6 +532,24 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+int get_num_mpc_splits(struct pipe_ctx *pipe)
+{
+ int mpc_split_count = 0;
+ struct pipe_ctx *other_pipe = pipe->bottom_pipe;
+
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->bottom_pipe;
+ }
+ other_pipe = pipe->top_pipe;
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->top_pipe;
+ }
+
+ return mpc_split_count;
+}
+
int get_num_odm_splits(struct pipe_ctx *pipe)
{
int odm_split_count = 0;
@@ -556,16 +574,11 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
/*Check for mpc split*/
struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+ *split_count = get_num_mpc_splits(pipe_ctx);
while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
(*split_idx)++;
- (*split_count)++;
split_pipe = split_pipe->top_pipe;
}
- split_pipe = pipe_ctx->bottom_pipe;
- while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
- (*split_count)++;
- split_pipe = split_pipe->bottom_pipe;
- }
} else {
/*Get odm split index*/
struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
@@ -692,6 +705,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
/* Round up, assume original video size always even dimensions */
data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+
+ data->viewport_unadjusted = data->viewport;
+ data->viewport_c_unadjusted = data->viewport_c;
}
static void calculate_recout(struct pipe_ctx *pipe_ctx)
@@ -1061,8 +1077,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
calculate_viewport(pipe_ctx);
- if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
- pipe_ctx->plane_res.scl_data.viewport.width < 16) {
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 12 ||
+ pipe_ctx->plane_res.scl_data.viewport.width < 12) {
if (store_h_border_left) {
restore_border_left_from_dst(pipe_ctx,
store_h_border_left);
@@ -1358,9 +1374,6 @@ bool dc_add_plane_to_context(
dc_plane_state_retain(plane_state);
while (head_pipe) {
- tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
- ASSERT(tail_pipe);
-
free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -1378,6 +1391,8 @@ bool dc_add_plane_to_context(
free_pipe->plane_state = plane_state;
if (head_pipe != free_pipe) {
+ tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
+ ASSERT(tail_pipe);
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1545,35 +1560,6 @@ bool dc_add_all_planes_for_stream(
return add_all_planes_for_stream(dc, stream, &set, 1, context);
}
-
-static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
- struct dc_stream_state *new_stream)
-{
- if (cur_stream == NULL)
- return true;
-
- if (memcmp(&cur_stream->hdr_static_metadata,
- &new_stream->hdr_static_metadata,
- sizeof(struct dc_info_packet)) != 0)
- return true;
-
- return false;
-}
-
-static bool is_vsc_info_packet_changed(struct dc_stream_state *cur_stream,
- struct dc_stream_state *new_stream)
-{
- if (cur_stream == NULL)
- return true;
-
- if (memcmp(&cur_stream->vsc_infopacket,
- &new_stream->vsc_infopacket,
- sizeof(struct dc_info_packet)) != 0)
- return true;
-
- return false;
-}
-
static bool is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
@@ -1608,15 +1594,9 @@ static bool are_stream_backends_same(
if (is_timing_changed(stream_a, stream_b))
return false;
- if (is_hdr_static_meta_changed(stream_a, stream_b))
- return false;
-
if (stream_a->dpms_off != stream_b->dpms_off)
return false;
- if (is_vsc_info_packet_changed(stream_a, stream_b))
- return false;
-
return true;
}
@@ -1756,21 +1736,6 @@ static struct audio *find_first_free_audio(
return 0;
}
-bool resource_is_stream_unchanged(
- struct dc_state *old_context, struct dc_stream_state *stream)
-{
- int i;
-
- for (i = 0; i < old_context->stream_count; i++) {
- struct dc_stream_state *old_stream = old_context->streams[i];
-
- if (are_stream_backends_same(old_stream, stream))
- return true;
- }
-
- return false;
-}
-
/**
* dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
*/
@@ -2025,17 +1990,6 @@ enum dc_status resource_map_pool_resources(
int pipe_idx = -1;
struct dc_bios *dcb = dc->ctx->dc_bios;
- /* TODO Check if this is needed */
- /*if (!resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- stream->bit_depth_params =
- old_context->streams[i]->bit_depth_params;
- stream->clamping = old_context->streams[i]->clamping;
- continue;
- }
- }
- */
-
calculate_phy_pix_clks(stream);
/* TODO: Check Linux */
@@ -2718,19 +2672,16 @@ bool pipe_need_reprogram(
if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
- if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
- return true;
-
if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
return true;
- if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
- return true;
-
if (false == pipe_ctx_old->stream->link->link_state_valid &&
false == pipe_ctx_old->stream->dpms_off)
return true;
+ if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index a249a0e5edd0..9e16af22e4aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -54,6 +54,7 @@ static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_da
sink->ctx = link->ctx;
sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
sink->converter_disable_audio = init_params->converter_disable_audio;
+ sink->is_mst_legacy = init_params->sink_is_legacy;
sink->dc_container_id = NULL;
sink->sink_id = init_params->link->ctx->dc_sink_id_count;
// increment dc_sink_id_count because we don't want two sinks with same ID
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1935cf6601eb..85908561c741 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -29,6 +29,9 @@
#include "dc_types.h"
#include "grph_object_defs.h"
#include "logger_types.h"
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+#include "hdcp_types.h"
+#endif
#include "gpio_types.h"
#include "link_service_types.h"
#include "grph_object_ctrl_defs.h"
@@ -39,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.76"
+#define DC_VER "3.2.84"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -95,6 +98,49 @@ struct dc_plane_cap {
} max_downscale_factor;
};
+// Color management caps (DPP and MPC)
+struct rom_curve_caps {
+ uint16_t srgb : 1;
+ uint16_t bt2020 : 1;
+ uint16_t gamma2_2 : 1;
+ uint16_t pq : 1;
+ uint16_t hlg : 1;
+};
+
+struct dpp_color_caps {
+ uint16_t dcn_arch : 1; // all DCE generations treated the same
+ // input lut is different than most LUTs, just plain 256-entry lookup
+ uint16_t input_lut_shared : 1; // shared with DGAM
+ uint16_t icsc : 1;
+ uint16_t dgam_ram : 1;
+ uint16_t post_csc : 1; // before gamut remap
+ uint16_t gamma_corr : 1;
+
+ // hdr_mult and gamut remap always available in DPP (in that order)
+ // 3d lut implies shaper LUT,
+ // it may be shared with MPC - check MPC:shared_3d_lut flag
+ uint16_t hw_3d_lut : 1;
+ uint16_t ogam_ram : 1; // blnd gam
+ uint16_t ocsc : 1;
+ struct rom_curve_caps dgam_rom_caps;
+ struct rom_curve_caps ogam_rom_caps;
+};
+
+struct mpc_color_caps {
+ uint16_t gamut_remap : 1;
+ uint16_t ogam_ram : 1;
+ uint16_t ocsc : 1;
+ uint16_t num_3dluts : 3; //3d lut always assumes a preceding shaper LUT
+ uint16_t shared_3d_lut:1; //can be in either DPP or MPC, but single instance
+
+ struct rom_curve_caps ogam_rom_caps;
+};
+
+struct dc_color_caps {
+ struct dpp_color_caps dpp;
+ struct mpc_color_caps mpc;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -117,9 +163,9 @@ struct dc_caps {
bool psp_setup_panel_mode;
bool extended_aux_timeout_support;
bool dmcub_support;
- bool hw_3d_lut;
enum dp_protocol_version max_dp_protocol_version;
struct dc_plane_cap planes[MAX_PLANES];
+ struct dc_color_caps color;
};
struct dc_bug_wa {
@@ -230,7 +276,8 @@ struct dc_config {
bool forced_clocks;
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
- bool psr_on_dmub;
+ bool disable_dmcu;
+ bool enable_4to1MPC;
};
enum visual_confirm {
@@ -238,6 +285,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
VISUAL_CONFIRM_MPCTREE = 4,
+ VISUAL_CONFIRM_PSR = 5,
};
enum dcc_option {
@@ -429,6 +477,7 @@ struct dc_debug_options {
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
bool disable_dsc;
+ bool enable_dram_clock_change_one_display_vactive;
};
struct dc_debug_data {
@@ -474,6 +523,7 @@ struct dc_bounding_box_overrides {
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
+ int dummy_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
@@ -987,6 +1037,7 @@ struct dpcd_caps {
union dpcd_fec_capability fec_cap;
struct dpcd_dsc_capabilities dsc_caps;
struct dc_lttpr_caps lttpr_caps;
+ struct psr_caps psr_caps;
};
@@ -1004,6 +1055,35 @@ union dpcd_sink_ext_caps {
uint8_t raw;
};
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+union hdcp_rx_caps {
+ struct {
+ uint8_t version;
+ uint8_t reserved;
+ struct {
+ uint8_t repeater : 1;
+ uint8_t hdcp_capable : 1;
+ uint8_t reserved : 6;
+ } byte0;
+ } fields;
+ uint8_t raw[3];
+};
+
+union hdcp_bcaps {
+ struct {
+ uint8_t HDCP_CAPABLE:1;
+ uint8_t REPEATER:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+struct hdcp_caps {
+ union hdcp_rx_caps rx_caps;
+ union hdcp_bcaps bcaps;
+};
+#endif
+
#include "dc_link.h"
/*******************************************************************************
@@ -1046,7 +1126,7 @@ struct dc_sink {
void *priv;
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
-
+ bool is_mst_legacy;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
@@ -1073,6 +1153,7 @@ struct dc_sink_init_data {
struct dc_link *link;
uint32_t dongle_max_pix_clk;
bool converter_disable_audio;
+ bool sink_is_legacy;
};
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
@@ -1104,9 +1185,16 @@ void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state);
void dc_resume(struct dc *dc);
-unsigned int dc_get_current_backlight_pwm(struct dc *dc);
-unsigned int dc_get_target_backlight_pwm(struct dc *dc);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+/*
+ * HDCP Interfaces
+ */
+enum hdcp_message_status dc_process_hdcp_msg(
+ enum signal_type signal,
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info);
+#endif
bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 59c298a6484f..eea2429ac67d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -25,7 +25,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
-#include "../dmub/inc/dmub_srv.h"
+#include "../dmub/dmub_srv.h"
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
@@ -58,7 +58,7 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- struct dmub_cmd_header *cmd)
+ union dmub_rb_cmd *cmd)
{
struct dmub_srv *dmub = dc_dmub_srv->dmub;
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 754b6077539c..a3a09ccb6d26 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -27,10 +27,9 @@
#define _DMUB_DC_SRV_H_
#include "os_types.h"
-#include "../dmub/inc/dmub_cmd.h"
+#include "dmub/dmub_srv.h"
struct dmub_srv;
-struct dmub_cmd_header;
struct dc_reg_helper_state {
bool gather_in_progress;
@@ -49,7 +48,7 @@ struct dc_dmub_srv {
};
void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- struct dmub_cmd_header *cmd);
+ union dmub_rb_cmd *cmd);
void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index bb2730e9521e..af177c087d3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -740,5 +740,11 @@ struct dpcd_dsc_capabilities {
union dpcd_dsc_ext_capabilities dsc_ext_caps;
};
+/* These parameters are from PSR capabilities reported by Sink DPCD */
+struct psr_caps {
+ unsigned char psr_version;
+ unsigned int psr_rfb_setup_time;
+ bool psr_exit_link_training_required;
+};
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 737048d8a96c..85a0170be544 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write(
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
@@ -73,7 +73,7 @@ static inline void submit_dmub_burst_write(
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
@@ -92,7 +92,7 @@ static inline void submit_dmub_reg_wait(
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
memset(cmd_buf, 0, sizeof(*cmd_buf));
offload->reg_seq_count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 00ff5e98278c..f63fc25aa6c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -66,6 +66,22 @@ struct time_stamp {
struct link_trace {
struct time_stamp time_stamp;
};
+
+/* PSR feature flags */
+struct psr_settings {
+ bool psr_feature_enabled; // PSR is supported by sink
+ bool psr_allow_active; // PSR is currently active
+ enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
+
+ /* These parameters are calculated in Driver,
+ * based on display timing and Sink capabilities.
+ * If VBLANK region is too small and Sink takes a long time
+ * to set up RFB, it may take an extra frame to enter PSR state.
+ */
+ bool psr_frame_capture_indication_req;
+ unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
/*
* A link contains one or more sinks and their connected status.
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -118,6 +134,7 @@ struct dc_link {
struct dc_context *ctx;
+ struct panel_cntl *panel_cntl;
struct link_encoder *link_enc;
struct graphics_object_id link_id;
union ddi_channel_mapping ddi_channel_mapping;
@@ -126,11 +143,14 @@ struct dc_link {
uint32_t dongle_max_pix_clk;
unsigned short chip_caps;
unsigned int dpcd_sink_count;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ struct hdcp_caps hdcp_caps;
+#endif
enum edp_revision edp_revision;
- bool psr_feature_enabled;
- bool psr_allow_active;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+ struct psr_settings psr_settings;
+
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
@@ -197,7 +217,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
-bool dc_link_set_abm_disable(const struct dc_link *dc_link);
+int dc_link_get_target_backlight_pwm(const struct dc_link *link);
bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
@@ -290,6 +310,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
* DPCD access interfaces
*/
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+bool dc_link_is_hdcp14(struct dc_link *link);
+bool dc_link_is_hdcp22(struct dc_link *link);
+#endif
void dc_link_set_drive_settings(struct dc *dc,
struct link_training_settings *lt_settings,
const struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index a5c7ef47b8d3..49aad691e687 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -167,8 +167,6 @@ struct dc_stream_state {
/* TODO: custom INFO packets */
/* TODO: ABM info (DMCU) */
- /* PSR info */
- unsigned char psr_version;
/* TODO: CEA VIC */
/* DMCU info */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 0d210104ba0a..f236da1c1859 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -862,4 +862,9 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
+enum dc_psr_version {
+ DC_PSR_VERSION_1 = 0,
+ DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index fbfcff700971..f704a8fd52e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index b8a3fc505c9b..4e87e70237e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -55,7 +55,7 @@
#define MCP_DISABLE_ABM_IMMEDIATELY 255
-static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
+static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id, uint32_t panel_inst)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
uint32_t rampingBoundary = 0xFFFF;
@@ -83,125 +83,12 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
return true;
}
-static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
-{
- uint64_t current_backlight;
- uint32_t round_result;
- uint32_t pwm_period_cntl, bl_period, bl_int_count;
- uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
- uint32_t bl_period_mask, bl_pwm_mask;
-
- pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
- REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
- REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
-
- bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
- REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
- REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
-
- if (bl_int_count == 0)
- bl_int_count = 16;
-
- bl_period_mask = (1 << bl_int_count) - 1;
- bl_period &= bl_period_mask;
-
- bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
-
- if (fractional_duty_cycle_en == 0)
- bl_pwm &= bl_pwm_mask;
- else
- bl_pwm &= 0xFFFF;
-
- current_backlight = bl_pwm << (1 + bl_int_count);
-
- if (bl_period == 0)
- bl_period = 0xFFFF;
-
- current_backlight = div_u64(current_backlight, bl_period);
- current_backlight = (current_backlight + 1) >> 1;
-
- current_backlight = (uint64_t)(current_backlight) * bl_period;
-
- round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
- round_result = (round_result >> (bl_int_count-1)) & 1;
-
- current_backlight >>= bl_int_count;
- current_backlight += round_result;
-
- return (uint32_t)(current_backlight);
-}
-
-static void driver_set_backlight_level(struct dce_abm *abm_dce,
- uint32_t backlight_pwm_u16_16)
-{
- uint32_t backlight_16bit;
- uint32_t masked_pwm_period;
- uint8_t bit_count;
- uint64_t active_duty_cycle;
- uint32_t pwm_period_bitcnt;
-
- /*
- * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
- * active duty cycle <= backlight period
- */
-
- /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
- */
- REG_GET_2(BL_PWM_PERIOD_CNTL,
- BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
- BL_PWM_PERIOD, &masked_pwm_period);
-
- if (pwm_period_bitcnt == 0)
- bit_count = 16;
- else
- bit_count = pwm_period_bitcnt;
-
- /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
- masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
-
- /* 1.2 Calculate integer active duty cycle required upper 16 bits
- * contain integer component, lower 16 bits contain fractional component
- * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
- */
- active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
-
- /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
- * components shift by bitCount then mask 16 bits and add rounding bit
- * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
- */
- backlight_16bit = active_duty_cycle >> bit_count;
- backlight_16bit &= 0xFFFF;
- backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
-
- /*
- * 2. Program register with updated value
- */
-
- /* 2.1 Lock group 2 backlight registers */
-
- REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
- BL_PWM_GRP1_REG_LOCK, 1);
-
- // 2.2 Write new active duty cycle
- REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
-
- /* 2.3 Unlock group 2 backlight registers */
- REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_REG_LOCK, 0);
-
- /* 3 Wait for pending bit to be cleared */
- REG_WAIT(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
- 1, 10000);
-}
-
static void dmcu_set_backlight_level(
struct dce_abm *abm_dce,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp,
- uint32_t controller_id)
+ uint32_t controller_id,
+ uint32_t panel_id)
{
unsigned int backlight_8_bit = 0;
uint32_t s2;
@@ -213,7 +100,7 @@ static void dmcu_set_backlight_level(
// Take MSB of fractional part since backlight is not max
backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
- dce_abm_set_pipe(&abm_dce->base, controller_id);
+ dce_abm_set_pipe(&abm_dce->base, controller_id, panel_id);
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
@@ -248,10 +135,9 @@ static void dmcu_set_backlight_level(
0, 1, 80000);
}
-static void dce_abm_init(struct abm *abm)
+static void dce_abm_init(struct abm *abm, uint32_t backlight)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -331,86 +217,12 @@ static bool dce_abm_set_level(struct abm *abm, uint32_t level)
return true;
}
-static bool dce_abm_immediate_disable(struct abm *abm)
+static bool dce_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
{
- struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-
if (abm->dmcu_is_running == false)
return true;
- dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
-
- abm->stored_backlight_registers.BL_PWM_CNTL =
- REG_READ(BL_PWM_CNTL);
- abm->stored_backlight_registers.BL_PWM_CNTL2 =
- REG_READ(BL_PWM_CNTL2);
- abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
- REG_READ(BL_PWM_PERIOD_CNTL);
-
- REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
- &abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
- return true;
-}
-
-static bool dce_abm_init_backlight(struct abm *abm)
-{
- struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- uint32_t value;
-
- /* It must not be 0, so we have to restore them
- * Bios bug w/a - period resets to zero,
- * restoring to cache values which is always correct
- */
- REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
- if (value == 0 || value == 1) {
- if (abm->stored_backlight_registers.BL_PWM_CNTL != 0) {
- REG_WRITE(BL_PWM_CNTL,
- abm->stored_backlight_registers.BL_PWM_CNTL);
- REG_WRITE(BL_PWM_CNTL2,
- abm->stored_backlight_registers.BL_PWM_CNTL2);
- REG_WRITE(BL_PWM_PERIOD_CNTL,
- abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
- REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
- BL_PWM_REF_DIV,
- abm->stored_backlight_registers.
- LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
- } else {
- /* TODO: Note: This should not really happen since VBIOS
- * should have initialized PWM registers on boot.
- */
- REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
- REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
- }
- } else {
- abm->stored_backlight_registers.BL_PWM_CNTL =
- REG_READ(BL_PWM_CNTL);
- abm->stored_backlight_registers.BL_PWM_CNTL2 =
- REG_READ(BL_PWM_CNTL2);
- abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
- REG_READ(BL_PWM_PERIOD_CNTL);
-
- REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
- &abm->stored_backlight_registers.
- LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
- }
-
- /* Have driver take backlight control
- * TakeBacklightControl(true)
- */
- value = REG_READ(BIOS_SCRATCH_2);
- value |= ATOM_S2_VRI_BRIGHT_ENABLE;
- REG_WRITE(BIOS_SCRATCH_2, value);
-
- /* Enable the backlight output */
- REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
-
- /* Disable fractional pwm if configured */
- REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
- abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
-
- /* Unlock group 2 backlight registers */
- REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_REG_LOCK, 0);
+ dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY, panel_inst);
return true;
}
@@ -420,21 +232,18 @@ static bool dce_abm_set_backlight_level_pwm(
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
- bool use_smooth_brightness)
+ unsigned int panel_inst)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
- /* If DMCU is in reset state, DMCU is uninitialized */
- if (use_smooth_brightness)
- dmcu_set_backlight_level(abm_dce,
- backlight_pwm_u16_16,
- frame_ramp,
- controller_id);
- else
- driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
+ dmcu_set_backlight_level(abm_dce,
+ backlight_pwm_u16_16,
+ frame_ramp,
+ controller_id,
+ panel_inst);
return true;
}
@@ -442,12 +251,12 @@ static bool dce_abm_set_backlight_level_pwm(
static const struct abm_funcs dce_funcs = {
.abm_init = dce_abm_init,
.set_abm_level = dce_abm_set_level,
- .init_backlight = dce_abm_init_backlight,
.set_pipe = dce_abm_set_pipe,
.set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
.get_current_backlight = dce_abm_get_current_backlight,
.get_target_backlight = dce_abm_get_target_backlight,
- .set_abm_immediate_disable = dce_abm_immediate_disable
+ .init_abm_config = NULL,
+ .set_abm_immediate_disable = dce_abm_immediate_disable,
};
static void dce_abm_construct(
@@ -461,10 +270,6 @@ static void dce_abm_construct(
base->ctx = ctx;
base->funcs = &dce_funcs;
- base->stored_backlight_registers.BL_PWM_CNTL = 0;
- base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
- base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
- base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
base->dmcu_is_running = false;
abm_dce->regs = regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index ba0caaffa24b..9718a4823372 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -30,11 +30,6 @@
#include "abm.h"
#define ABM_COMMON_REG_LIST_DCE_BASE() \
- SR(BL_PWM_PERIOD_CNTL), \
- SR(BL_PWM_CNTL), \
- SR(BL_PWM_CNTL2), \
- SR(BL_PWM_GRP1_REG_LOCK), \
- SR(LVTMA_PWRSEQ_REF_DIV), \
SR(MASTER_COMM_CNTL_REG), \
SR(MASTER_COMM_CMD_REG), \
SR(MASTER_COMM_DATA_REG1)
@@ -85,15 +80,6 @@
.field_name = reg_name ## __ ## field_name ## post_fix
#define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
- ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
- ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
- ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
- ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
- ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
- ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
- ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
- ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
- ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
@@ -178,19 +164,10 @@
type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
- type BL_PWM_PERIOD; \
- type BL_PWM_PERIOD_BITCNT; \
- type BL_ACTIVE_INT_FRAC_CNT; \
- type BL_PWM_FRACTIONAL_EN; \
type MASTER_COMM_INTERRUPT; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_CMD_REG_BYTE1; \
- type MASTER_COMM_CMD_REG_BYTE2; \
- type BL_PWM_REF_DIV; \
- type BL_PWM_EN; \
- type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
- type BL_PWM_GRP1_REG_LOCK; \
- type BL_PWM_GRP1_REG_UPDATE_PENDING
+ type MASTER_COMM_CMD_REG_BYTE2
struct dce_abm_shift {
ABM_REG_FIELD_LIST(uint8_t);
@@ -201,10 +178,6 @@ struct dce_abm_mask {
};
struct dce_abm_registers {
- uint32_t BL_PWM_PERIOD_CNTL;
- uint32_t BL_PWM_CNTL;
- uint32_t BL_PWM_CNTL2;
- uint32_t LVTMA_PWRSEQ_REF_DIV;
uint32_t DC_ABM1_HG_SAMPLE_RATE;
uint32_t DC_ABM1_LS_SAMPLE_RATE;
uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
@@ -219,7 +192,6 @@ struct dce_abm_registers {
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_DATA_REG1;
uint32_t BIOS_SCRATCH_2;
- uint32_t BL_PWM_GRP1_REG_LOCK;
};
struct dce_abm {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 2e992fbc0d71..d2ad0504b0de 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1014,39 +1014,6 @@ struct pixel_rate_range_table_entry {
unsigned short div_factor;
};
-static const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
- // /1.001 rates
- {25170, 25180, 25200, 1000, 1001}, //25.2MHz -> 25.17
- {59340, 59350, 59400, 1000, 1001}, //59.4Mhz -> 59.340
- {74170, 74180, 74250, 1000, 1001}, //74.25Mhz -> 74.1758
- {125870, 125880, 126000, 1000, 1001}, //126Mhz -> 125.87
- {148350, 148360, 148500, 1000, 1001}, //148.5Mhz -> 148.3516
- {167830, 167840, 168000, 1000, 1001}, //168Mhz -> 167.83
- {222520, 222530, 222750, 1000, 1001}, //222.75Mhz -> 222.527
- {257140, 257150, 257400, 1000, 1001}, //257.4Mhz -> 257.1429
- {296700, 296710, 297000, 1000, 1001}, //297Mhz -> 296.7033
- {342850, 342860, 343200, 1000, 1001}, //343.2Mhz -> 342.857
- {395600, 395610, 396000, 1000, 1001}, //396Mhz -> 395.6
- {409090, 409100, 409500, 1000, 1001}, //409.5Mhz -> 409.091
- {445050, 445060, 445500, 1000, 1001}, //445.5Mhz -> 445.055
- {467530, 467540, 468000, 1000, 1001}, //468Mhz -> 467.5325
- {519230, 519240, 519750, 1000, 1001}, //519.75Mhz -> 519.231
- {525970, 525980, 526500, 1000, 1001}, //526.5Mhz -> 525.974
- {545450, 545460, 546000, 1000, 1001}, //546Mhz -> 545.455
- {593400, 593410, 594000, 1000, 1001}, //594Mhz -> 593.4066
- {623370, 623380, 624000, 1000, 1001}, //624Mhz -> 623.377
- {692300, 692310, 693000, 1000, 1001}, //693Mhz -> 692.308
- {701290, 701300, 702000, 1000, 1001}, //702Mhz -> 701.2987
- {791200, 791210, 792000, 1000, 1001}, //792Mhz -> 791.209
- {890100, 890110, 891000, 1000, 1001}, //891Mhz -> 890.1099
- {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz -> 1186.8131
-
- // *1.001 rates
- {27020, 27030, 27000, 1001, 1000}, //27Mhz
- {54050, 54060, 54000, 1001, 1000}, //54Mhz
- {108100, 108110, 108000, 1001, 1000},//108Mhz
-};
-
static bool dcn20_program_pix_clk(
struct clock_source *clock_source,
struct pixel_clk_params *pix_clk_params,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index c5aa1f48593a..5479d959ec62 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -27,10 +27,6 @@
#include "dc_types.h"
-#define BL_REG_LIST()\
- SR(LVTMA_PWRSEQ_CNTL), \
- SR(LVTMA_PWRSEQ_STATE)
-
#define HWSEQ_DCEF_REG_LIST_DCE8() \
.DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
.DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
@@ -94,20 +90,17 @@
SRII(BLND_CONTROL, BLND, 0),\
SRII(BLND_CONTROL, BLND, 1),\
SR(BLNDV_CONTROL),\
- HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
- BL_REG_LIST()
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
#define HWSEQ_DCE8_REG_LIST() \
HWSEQ_DCEF_REG_LIST_DCE8(), \
HWSEQ_BLND_REG_LIST(), \
- HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
- BL_REG_LIST()
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
#define HWSEQ_DCE10_REG_LIST() \
HWSEQ_DCEF_REG_LIST(), \
HWSEQ_BLND_REG_LIST(), \
- HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
- BL_REG_LIST()
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
#define HWSEQ_ST_REG_LIST() \
HWSEQ_DCE11_REG_LIST_BASE(), \
@@ -134,8 +127,7 @@
SR(DCHUB_FB_LOCATION),\
SR(DCHUB_AGP_BASE),\
SR(DCHUB_AGP_BOT),\
- SR(DCHUB_AGP_TOP), \
- BL_REG_LIST()
+ SR(DCHUB_AGP_TOP)
#define HWSEQ_VG20_REG_LIST() \
HWSEQ_DCE120_REG_LIST(),\
@@ -144,8 +136,7 @@
#define HWSEQ_DCE112_REG_LIST() \
HWSEQ_DCE10_REG_LIST(), \
HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
- HWSEQ_PHYPLL_REG_LIST(CRTC), \
- BL_REG_LIST()
+ HWSEQ_PHYPLL_REG_LIST(CRTC)
#define HWSEQ_DCN_REG_LIST()\
SR(REFCLK_CNTL), \
@@ -207,8 +198,7 @@
SR(D3VGA_CONTROL), \
SR(D4VGA_CONTROL), \
SR(VGA_TEST_CONTROL), \
- SR(DC_IP_REQUEST_CNTL), \
- BL_REG_LIST()
+ SR(DC_IP_REQUEST_CNTL)
#define HWSEQ_DCN2_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
@@ -273,8 +263,7 @@
SR(D4VGA_CONTROL), \
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
- SR(DC_IP_REQUEST_CNTL), \
- BL_REG_LIST()
+ SR(DC_IP_REQUEST_CNTL)
#define HWSEQ_DCN21_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
@@ -324,15 +313,9 @@
SR(D4VGA_CONTROL), \
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
- SR(DC_IP_REQUEST_CNTL), \
- BL_REG_LIST()
+ SR(DC_IP_REQUEST_CNTL)
struct dce_hwseq_registers {
-
- /* Backlight registers */
- uint32_t LVTMA_PWRSEQ_CNTL;
- uint32_t LVTMA_PWRSEQ_STATE;
-
uint32_t DCFE_CLOCK_CONTROL[6];
uint32_t DCFEV_CLOCK_CONTROL;
uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
@@ -465,26 +448,18 @@ struct dce_hwseq_registers {
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
-#define HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
-
#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
.DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
- HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
- HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
@@ -507,8 +482,7 @@ struct dce_hwseq_registers {
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)
#define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\
HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\
@@ -570,8 +544,7 @@ struct dce_hwseq_registers {
HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
- HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh)
#define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -630,8 +603,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \
- HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
#define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -671,10 +643,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN16_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
- HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
#define HWSEQ_REG_FIELD_LIST(type) \
type DCFE_CLOCK_ENABLE; \
@@ -706,11 +675,7 @@ struct dce_hwseq_registers {
type PF_LFB_REGION;\
type PF_MAX_REGION;\
type ENABLE_L1_TLB;\
- type SYSTEM_ACCESS_MODE;\
- type LVTMA_BLON;\
- type LVTMA_DIGON;\
- type LVTMA_DIGON_OVRD;\
- type LVTMA_PWRSEQ_TARGET_STATE_R;
+ type SYSTEM_ACCESS_MODE;
#define HWSEQ_DCN_REG_FIELD_LIST(type) \
type HUBP_VTG_SEL; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 8527cce81c6f..8d8c84c81b34 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -118,7 +118,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.enable_hpd = dce110_link_encoder_enable_hpd,
.disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled,
- .destroy = dce110_link_encoder_destroy
+ .destroy = dce110_link_encoder_destroy,
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap
};
static enum bp_result link_transmitter_control(
@@ -1389,3 +1390,20 @@ void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
}
+
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+ /* Higher link settings based on feature supported */
+ if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+ if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ *link_settings = max_link_cap;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 3c9368df4093..384389f0e2c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -271,4 +271,7 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
bool dce110_is_dig_enabled(struct link_encoder *enc);
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
#endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
new file mode 100644
index 000000000000..ebff9b1e312e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dc_dmub_srv.h"
+#include "panel_cntl.h"
+#include "dce_panel_cntl.h"
+#include "atom.h"
+
+#define TO_DCE_PANEL_CNTL(panel_cntl)\
+ container_of(panel_cntl, struct dce_panel_cntl, base)
+
+#define CTX \
+ dce_panel_cntl->base.ctx
+
+#define DC_LOGGER \
+ dce_panel_cntl->base.ctx->logger
+
+#define REG(reg)\
+ dce_panel_cntl->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name
+
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *dce_panel_cntl)
+{
+ uint64_t current_backlight;
+ uint32_t round_result;
+ uint32_t pwm_period_cntl, bl_period, bl_int_count;
+ uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+ uint32_t bl_period_mask, bl_pwm_mask;
+
+ pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
+
+ bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
+ REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
+
+ if (bl_int_count == 0)
+ bl_int_count = 16;
+
+ bl_period_mask = (1 << bl_int_count) - 1;
+ bl_period &= bl_period_mask;
+
+ bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
+
+ if (fractional_duty_cycle_en == 0)
+ bl_pwm &= bl_pwm_mask;
+ else
+ bl_pwm &= 0xFFFF;
+
+ current_backlight = bl_pwm << (1 + bl_int_count);
+
+ if (bl_period == 0)
+ bl_period = 0xFFFF;
+
+ current_backlight = div_u64(current_backlight, bl_period);
+ current_backlight = (current_backlight + 1) >> 1;
+
+ current_backlight = (uint64_t)(current_backlight) * bl_period;
+
+ round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+
+ round_result = (round_result >> (bl_int_count-1)) & 1;
+
+ current_backlight >>= bl_int_count;
+ current_backlight += round_result;
+
+ return (uint32_t)(current_backlight);
+}
+
+uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+ uint32_t value;
+ uint32_t current_backlight;
+
+ /* It must not be 0, so we have to restore them
+ * Bios bug w/a - period resets to zero,
+ * restoring to cache values which is always correct
+ */
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
+
+ if (value == 0 || value == 1) {
+ if (panel_cntl->stored_backlight_registers.BL_PWM_CNTL != 0) {
+ REG_WRITE(BL_PWM_CNTL,
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL);
+ REG_WRITE(BL_PWM_CNTL2,
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2);
+ REG_WRITE(BL_PWM_PERIOD_CNTL,
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+ REG_UPDATE(PWRSEQ_REF_DIV,
+ BL_PWM_REF_DIV,
+ panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ } else {
+ /* TODO: Note: This should not really happen since VBIOS
+ * should have initialized PWM registers on boot.
+ */
+ REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
+ REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
+ }
+ } else {
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ }
+
+ // Have driver take backlight control
+ // TakeBacklightControl(true)
+ value = REG_READ(BIOS_SCRATCH_2);
+ value |= ATOM_S2_VRI_BRIGHT_ENABLE;
+ REG_WRITE(BIOS_SCRATCH_2, value);
+
+ // Enable the backlight output
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+ // Unlock group 2 backlight registers
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ current_backlight = calculate_16_bit_backlight_from_pwm(dce_panel_cntl);
+
+ return current_backlight;
+}
+
+bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+ uint32_t value;
+
+ REG_GET(PWRSEQ_CNTL, LVTMA_BLON, &value);
+
+ return value;
+}
+
+bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+ uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
+
+ REG_GET(PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
+
+ REG_GET_2(PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
+
+ return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
+}
+
+void dce_store_backlight_level(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+}
+
+void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
+ uint32_t backlight_pwm_u16_16)
+{
+ uint32_t backlight_16bit;
+ uint32_t masked_pwm_period;
+ uint8_t bit_count;
+ uint64_t active_duty_cycle;
+ uint32_t pwm_period_bitcnt;
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+ /*
+ * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
+ * active duty cycle <= backlight period
+ */
+
+ /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
+ */
+ REG_GET_2(BL_PWM_PERIOD_CNTL,
+ BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
+ BL_PWM_PERIOD, &masked_pwm_period);
+
+ if (pwm_period_bitcnt == 0)
+ bit_count = 16;
+ else
+ bit_count = pwm_period_bitcnt;
+
+ /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+ masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
+
+ /* 1.2 Calculate integer active duty cycle required upper 16 bits
+ * contain integer component, lower 16 bits contain fractional component
+ * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+ */
+ active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
+
+ /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
+ * components shift by bitCount then mask 16 bits and add rounding bit
+ * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+ */
+ backlight_16bit = active_duty_cycle >> bit_count;
+ backlight_16bit &= 0xFFFF;
+ backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+ /*
+ * 2. Program register with updated value
+ */
+
+ /* 2.1 Lock group 2 backlight registers */
+
+ REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
+ BL_PWM_GRP1_REG_LOCK, 1);
+
+ // 2.2 Write new active duty cycle
+ REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+ /* 2.3 Unlock group 2 backlight registers */
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ /* 3 Wait for pending bit to be cleared */
+ REG_WAIT(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
+ 1, 10000);
+}
+
+static void dce_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(*panel_cntl);
+
+ kfree(dce_panel_cntl);
+ *panel_cntl = NULL;
+}
+
+static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = {
+ .destroy = dce_panel_cntl_destroy,
+ .hw_init = dce_panel_cntl_hw_init,
+ .is_panel_backlight_on = dce_is_panel_backlight_on,
+ .is_panel_powered_on = dce_is_panel_powered_on,
+ .store_backlight_level = dce_store_backlight_level,
+ .driver_set_backlight = dce_driver_set_backlight,
+};
+
+void dce_panel_cntl_construct(
+ struct dce_panel_cntl *dce_panel_cntl,
+ const struct panel_cntl_init_data *init_data,
+ const struct dce_panel_cntl_registers *regs,
+ const struct dce_panel_cntl_shift *shift,
+ const struct dce_panel_cntl_mask *mask)
+{
+ struct panel_cntl *base = &dce_panel_cntl->base;
+
+ base->stored_backlight_registers.BL_PWM_CNTL = 0;
+ base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
+ base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
+ base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
+
+ dce_panel_cntl->regs = regs;
+ dce_panel_cntl->shift = shift;
+ dce_panel_cntl->mask = mask;
+
+ dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
+ dce_panel_cntl->base.ctx = init_data->ctx;
+ dce_panel_cntl->base.inst = init_data->inst;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
new file mode 100644
index 000000000000..70ec691e14d2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_PANEL_CNTL__DCE_H__
+#define __DC_PANEL_CNTL__DCE_H__
+
+#include "panel_cntl.h"
+
+/* set register offset with instance */
+#define DCE_PANEL_CNTL_SR(reg_name, block)\
+ .reg_name = mm ## block ## _ ## reg_name
+
+#define DCE_PANEL_CNTL_REG_LIST()\
+ DCE_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+ DCE_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+ DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_PERIOD_CNTL), \
+ SR(BL_PWM_GRP1_REG_LOCK), \
+ SR(BIOS_SCRATCH_2)
+
+#define DCN_PANEL_CNTL_SR(reg_name, block)\
+ .reg_name = BASE(mm ## block ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## _ ## reg_name
+
+#define DCN_PANEL_CNTL_REG_LIST()\
+ DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+ DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+ DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_PERIOD_CNTL), \
+ SR(BL_PWM_GRP1_REG_LOCK), \
+ SR(BIOS_SCRATCH_2)
+
+#define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh), \
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh)
+
+#define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \
+ type LVTMA_BLON;\
+ type LVTMA_DIGON;\
+ type LVTMA_DIGON_OVRD;\
+ type LVTMA_PWRSEQ_TARGET_STATE_R; \
+ type BL_PWM_REF_DIV; \
+ type BL_PWM_EN; \
+ type BL_ACTIVE_INT_FRAC_CNT; \
+ type BL_PWM_FRACTIONAL_EN; \
+ type BL_PWM_PERIOD; \
+ type BL_PWM_PERIOD_BITCNT; \
+ type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
+ type BL_PWM_GRP1_REG_LOCK; \
+ type BL_PWM_GRP1_REG_UPDATE_PENDING
+
+struct dce_panel_cntl_shift {
+ DCE_PANEL_CNTL_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_panel_cntl_mask {
+ DCE_PANEL_CNTL_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_panel_cntl_registers {
+ uint32_t PWRSEQ_CNTL;
+ uint32_t PWRSEQ_STATE;
+ uint32_t BL_PWM_CNTL;
+ uint32_t BL_PWM_CNTL2;
+ uint32_t BL_PWM_PERIOD_CNTL;
+ uint32_t BL_PWM_GRP1_REG_LOCK;
+ uint32_t PWRSEQ_REF_DIV;
+ uint32_t BIOS_SCRATCH_2;
+};
+
+struct dce_panel_cntl {
+ struct panel_cntl base;
+ const struct dce_panel_cntl_registers *regs;
+ const struct dce_panel_cntl_shift *shift;
+ const struct dce_panel_cntl_mask *mask;
+};
+
+void dce_panel_cntl_construct(
+ struct dce_panel_cntl *panel_cntl,
+ const struct panel_cntl_init_data *init_data,
+ const struct dce_panel_cntl_registers *regs,
+ const struct dce_panel_cntl_shift *shift,
+ const struct dce_panel_cntl_mask *mask);
+
+#endif /* __DC_PANEL_CNTL__DCE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 451574971b96..4cdaaf4d881c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -1336,7 +1336,6 @@ static void dce110_se_audio_setup(
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
- uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@@ -1344,7 +1343,6 @@ static void dce110_se_audio_setup(
/* This should not happen.it does so we don't get BSOD*/
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
new file mode 100644
index 000000000000..da0b29abfbda
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_abm.h"
+#include "dce_abm.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
+#include "core_types.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed31_32.h"
+
+#include "atom.h"
+
+#define TO_DMUB_ABM(abm)\
+ container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+ (dce_abm->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name
+
+#define CTX \
+ dce_abm->base.ctx
+
+#define DISABLE_ABM_IMMEDIATELY 255
+
+static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint32_t ramping_boundary = 0xFFFF;
+
+ cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+ cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+ cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static void dmcub_set_backlight_level(
+ struct dce_abm *dce_abm,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp,
+ uint32_t otg_inst,
+ uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dce_abm->base.ctx;
+ unsigned int backlight_8_bit = 0;
+ uint32_t s2;
+
+ if (backlight_pwm_u16_16 & 0x10000)
+ // Check for max backlight condition
+ backlight_8_bit = 0xFF;
+ else
+ // Take MSB of fractional part since backlight is not max
+ backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
+ dmub_abm_set_pipe(&dce_abm->base, otg_inst, panel_inst);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
+
+ if (otg_inst == 0)
+ frame_ramp = 0;
+
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ // Update requested backlight level
+ s2 = REG_READ(BIOS_SCRATCH_2);
+
+ s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+ ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+ s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+
+ cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
+ cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+static void dmub_abm_init(struct abm *abm, uint32_t backlight)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101);
+ REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101);
+
+ REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+ ABM1_HG_NUM_OF_BINS_SEL, 0,
+ ABM1_HG_VMAX_SEL, 1,
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+ REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+ ABM1_IPCSC_COEFF_SEL_R, 2,
+ ABM1_IPCSC_COEFF_SEL_G, 4,
+ ABM1_IPCSC_COEFF_SEL_B, 2);
+
+ REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+ BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+ BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL,
+ BL1_PWM_USER_LEVEL, backlight);
+
+ REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+ REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+
+ dmub_abm_enable_fractional_pwm(abm->ctx);
+}
+
+static unsigned int dmub_abm_get_current_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+static unsigned int dmub_abm_get_target_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ cmd.abm_set_level.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
+ cmd.abm_set_level.abm_set_level_data.level = level;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static bool dmub_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
+{
+ dmub_abm_set_pipe(abm, DISABLE_ABM_IMMEDIATELY, panel_inst);
+
+ return true;
+}
+
+static bool dmub_abm_set_backlight_level_pwm(
+ struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+ unsigned int otg_inst,
+ uint32_t panel_inst)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+ dmcub_set_backlight_level(dce_abm,
+ backlight_pwm_u16_16,
+ frame_ramp,
+ otg_inst,
+ panel_inst);
+
+ return true;
+}
+
+static bool dmub_abm_init_config(struct abm *abm,
+ const char *src,
+ unsigned int bytes)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ // TODO: Optimize by only reading back final 4 bytes
+ dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+ // Copy iramtable into cw7
+ memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+
+ // Fw will copy from cw7 to fw_state
+ cmd.abm_init_config.header.type = DMUB_CMD__ABM;
+ cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
+ cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.abm_init_config.abm_init_config_data.bytes = bytes;
+ cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static const struct abm_funcs abm_funcs = {
+ .abm_init = dmub_abm_init,
+ .set_abm_level = dmub_abm_set_level,
+ .set_pipe = dmub_abm_set_pipe,
+ .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm,
+ .get_current_backlight = dmub_abm_get_current_backlight,
+ .get_target_backlight = dmub_abm_get_target_backlight,
+ .set_abm_immediate_disable = dmub_abm_immediate_disable,
+ .init_abm_config = dmub_abm_init_config,
+};
+
+static void dmub_abm_construct(
+ struct dce_abm *abm_dce,
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct abm *base = &abm_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &abm_funcs;
+ base->dmcu_is_running = false;
+
+ abm_dce->regs = regs;
+ abm_dce->abm_shift = abm_shift;
+ abm_dce->abm_mask = abm_mask;
+}
+
+struct abm *dmub_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+
+ if (abm_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+
+ return &abm_dce->base;
+}
+
+void dmub_abm_destroy(struct abm **abm)
+{
+ struct dce_abm *abm_dce = TO_DMUB_ABM(*abm);
+
+ kfree(abm_dce);
+ *abm = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h
index 26583f346c39..3a5d5ac7a86e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,17 +23,18 @@
*
*/
-#include "core_types.h"
-#include "logger.h"
-#include "include/logger_interface.h"
-#include "dm_helpers.h"
+#ifndef __DMUB_ABM_H__
+#define __DMUB_ABM_H__
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
-{
- int i;
+#include "abm.h"
+#include "dce_abm.h"
- if (hex_data)
- for (i = 0; i < hex_data_count; i++)
- DC_LOG_DEBUG("%2.2X ", hex_data[i]);
-}
+struct abm *dmub_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask);
+void dmub_abm_destroy(struct abm **abm);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index bc109d4fc6e6..044a0133ebb1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -26,13 +26,51 @@
#include "dmub_psr.h"
#include "dc.h"
#include "dc_dmub_srv.h"
-#include "../../dmub/inc/dmub_srv.h"
-#include "../../dmub/inc/dmub_gpint_cmd.h"
+#include "dmub/dmub_srv.h"
#include "core_types.h"
#define MAX_PIPES 6
/**
+ * Convert dmcub psr state to dmcu psr state.
+ */
+static void convert_psr_state(uint32_t *psr_state)
+{
+ if (*psr_state == 0)
+ *psr_state = 0;
+ else if (*psr_state == 0x10)
+ *psr_state = 1;
+ else if (*psr_state == 0x11)
+ *psr_state = 2;
+ else if (*psr_state == 0x20)
+ *psr_state = 3;
+ else if (*psr_state == 0x21)
+ *psr_state = 4;
+ else if (*psr_state == 0x30)
+ *psr_state = 5;
+ else if (*psr_state == 0x31)
+ *psr_state = 6;
+ else if (*psr_state == 0x40)
+ *psr_state = 7;
+ else if (*psr_state == 0x41)
+ *psr_state = 8;
+ else if (*psr_state == 0x42)
+ *psr_state = 9;
+ else if (*psr_state == 0x43)
+ *psr_state = 10;
+ else if (*psr_state == 0x44)
+ *psr_state = 11;
+ else if (*psr_state == 0x50)
+ *psr_state = 12;
+ else if (*psr_state == 0x51)
+ *psr_state = 13;
+ else if (*psr_state == 0x52)
+ *psr_state = 14;
+ else if (*psr_state == 0x53)
+ *psr_state = 15;
+}
+
+/**
* Get PSR state from firmware.
*/
static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
@@ -43,6 +81,8 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
dmub_srv_get_gpint_response(srv, psr_state);
+
+ convert_psr_state(psr_state);
}
/**
@@ -53,19 +93,23 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ return false;
+
cmd.psr_set_version.header.type = DMUB_CMD__PSR;
cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
-
- if (stream->psr_version == 0x0) // Unsupported
- return false;
- else if (stream->psr_version == 0x1)
+ switch (stream->link->psr_settings.psr_version) {
+ case DC_PSR_VERSION_1:
cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
- else if (stream->psr_version == 0x2)
- cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
-
- cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+ break;
+ case DC_PSR_VERSION_UNSUPPORTED:
+ default:
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
+ break;
+ }
+ cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
@@ -89,7 +133,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
cmd.psr_enable.header.payload_bytes = 0; // Send header only
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
@@ -113,7 +157,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
@@ -162,7 +206,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
// Hw insts
- copy_settings_data->dpphy_inst = psr_context->phyType;
+ copy_settings_data->dpphy_inst = psr_context->transmitterId;
copy_settings_data->aux_inst = psr_context->channel;
copy_settings_data->digfe_inst = psr_context->engineId;
copy_settings_data->digbe_inst = psr_context->transmitterId;
@@ -187,8 +231,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
+ copy_settings_data->debug.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
+ true : false;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8f78bf9abbca..a28c4ae0f259 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -46,6 +46,7 @@
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "dce100/dce100_hw_sequencer.h"
+#include "dce/dce_panel_cntl.h"
#include "reg_helper.h"
@@ -249,6 +250,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_100_REG_LIST(id),\
@@ -627,6 +640,23 @@ struct link_encoder *dce100_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct output_pixel_processor *dce100_opp_create(
struct dc_context *ctx,
uint32_t inst)
@@ -943,6 +973,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
static const struct resource_funcs dce100_res_pool_funcs = {
.destroy = dce100_destroy_resource_pool,
.link_enc_create = dce100_link_encoder_create,
+ .panel_cntl_create = dce100_panel_cntl_create,
.validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 10527593868c..b77e9dc16086 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -53,6 +53,7 @@
#include "abm.h"
#include "audio.h"
#include "reg_helper.h"
+#include "panel_cntl.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
@@ -697,31 +698,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
}
-/*todo: cloned in stream enc, fix*/
-bool dce110_is_panel_backlight_on(struct dc_link *link)
-{
- struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hws = ctx->dc->hwseq;
- uint32_t value;
-
- REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
-
- return value;
-}
-
-bool dce110_is_panel_powered_on(struct dc_link *link)
-{
- struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hws = ctx->dc->hwseq;
- uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
-
- REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
-
- REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
-
- return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
-}
-
static enum bp_result link_transmitter_control(
struct dc_bios *bios,
struct bp_transmitter_control *cntl)
@@ -810,7 +786,6 @@ void dce110_edp_power_control(
bool power_up)
{
struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hwseq = ctx->dc->hwseq;
struct bp_transmitter_control cntl = { 0 };
enum bp_result bp_result;
@@ -821,7 +796,11 @@ void dce110_edp_power_control(
return;
}
- if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
+ if (!link->panel_cntl)
+ return;
+
+ if (power_up !=
+ link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) {
/* Send VBIOS command to prompt eDP panel power */
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -892,7 +871,6 @@ void dce110_edp_backlight_control(
bool enable)
{
struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hws = ctx->dc->hwseq;
struct bp_transmitter_control cntl = { 0 };
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
@@ -901,7 +879,8 @@ void dce110_edp_backlight_control(
return;
}
- if (enable && hws->funcs.is_panel_backlight_on(link)) {
+ if (enable && link->panel_cntl &&
+ link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl)) {
DC_LOG_HW_RESUME_S3(
"%s: panel already powered up. Do nothing.\n",
__func__);
@@ -1087,7 +1066,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, false);
- dc_link_set_abm_disable(link);
+ link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1432,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
- pipe_ctx->stream->link->psr_feature_enabled = false;
+ pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
return DC_OK;
}
@@ -1838,7 +1817,7 @@ static bool should_enable_fbc(struct dc *dc,
return false;
/* PSR should not be enabled */
- if (pipe_ctx->stream->link->psr_feature_enabled)
+ if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled)
return false;
/* Nothing to compress */
@@ -2376,6 +2355,7 @@ static void init_hw(struct dc *dc)
struct abm *abm;
struct dmcu *dmcu;
struct dce_hwseq *hws = dc->hwseq;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2422,12 +2402,17 @@ static void init_hw(struct dc *dc)
audio->funcs->hw_init(audio);
}
- abm = dc->res_pool->abm;
- if (abm != NULL) {
- abm->funcs->init_backlight(abm);
- abm->funcs->abm_init(abm);
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->panel_cntl)
+ backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
+ abm = dc->res_pool->abm;
+ if (abm != NULL)
+ abm->funcs->abm_init(abm, backlight);
+
dmcu = dc->res_pool->dmcu;
if (dmcu != NULL && abm != NULL)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
@@ -2735,6 +2720,53 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.xfm, attributes);
}
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ struct panel_cntl *panel_cntl = link->panel_cntl;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ bool fw_set_brightness = true;
+ /* DMCU -1 for all controller id values,
+ * therefore +1 here
+ */
+ uint32_t controller_id = pipe_ctx->stream_res.tg->inst + 1;
+
+ if (abm == NULL || panel_cntl == NULL || (abm->funcs->set_backlight_level_pwm == NULL))
+ return false;
+
+ if (dmcu)
+ fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
+ if (!fw_set_brightness && panel_cntl->funcs->driver_set_backlight)
+ panel_cntl->funcs->driver_set_backlight(panel_cntl, backlight_pwm_u16_16);
+ else
+ abm->funcs->set_backlight_level_pwm(
+ abm,
+ backlight_pwm_u16_16,
+ frame_ramp,
+ controller_id,
+ link->panel_cntl->inst);
+
+ return true;
+}
+
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+{
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+
+ if (abm)
+ abm->funcs->set_abm_immediate_disable(abm,
+ pipe_ctx->stream->link->panel_cntl->inst);
+
+ if (panel_cntl)
+ panel_cntl->funcs->store_backlight_level(panel_cntl);
+}
+
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
@@ -2769,7 +2801,9 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dce110_set_cursor_position,
- .set_cursor_attribute = dce110_set_cursor_attribute
+ .set_cursor_attribute = dce110_set_cursor_attribute,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
@@ -2785,8 +2819,6 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 34be166e8ff0..fe5326df00f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -85,9 +85,10 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up);
-bool dce110_is_panel_backlight_on(struct dc_link *link);
-
-bool dce110_is_panel_powered_on(struct dc_link *link);
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
index 4245e1f818a3..e096d2b95ef9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -679,8 +679,7 @@ void dce110_opp_v_set_csc_default(
if (default_adjust->force_hw_default == false) {
const struct out_csc_color_matrix *elm;
/* currently parameter not in use */
- enum grph_color_adjust_option option =
- GRPH_COLOR_MATRIX_HW_DEFAULT;
+ enum grph_color_adjust_option option;
uint32_t i;
/*
* HW default false we program locally defined matrix
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index bf14e9ab040c..9597fc79d7fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -53,6 +53,7 @@
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
#define DC_LOGGER \
dc->ctx->logger
@@ -275,6 +276,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
static const struct dce110_aux_registers_shift aux_shift = {
DCE_AUX_MASK_SH_LIST(__SHIFT)
};
@@ -673,6 +686,23 @@ static struct link_encoder *dce110_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce110_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
static struct output_pixel_processor *dce110_opp_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1203,6 +1233,7 @@ struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link(
static const struct resource_funcs dce110_res_pool_funcs = {
.destroy = dce110_destroy_resource_pool,
.link_enc_create = dce110_link_encoder_create,
+ .panel_cntl_create = dce110_panel_cntl_create,
.validate_bandwidth = dce110_validate_bandwidth,
.validate_plane = dce110_validate_plane,
.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 700ad8b3e54b..51b3fe502670 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -51,6 +51,7 @@
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
#include "reg_helper.h"
@@ -238,6 +239,18 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(5)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
@@ -398,7 +411,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -631,6 +644,23 @@ struct link_encoder *dce112_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce112_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
static struct input_pixel_processor *dce112_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -1021,6 +1051,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce112_res_pool_funcs = {
.destroy = dce112_destroy_resource_pool,
.link_enc_create = dce112_link_encoder_create,
+ .panel_cntl_create = dce112_panel_cntl_create,
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce112_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 53ab88ef71f5..8f362e8c1787 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -44,6 +44,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_ipp.h"
#include "dce/dce_mem_input.h"
+#include "dce/dce_panel_cntl.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dce120/dce120_hw_sequencer.h"
@@ -293,6 +294,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
static const struct dce110_aux_registers_shift aux_shift = {
DCE12_AUX_MASK_SH_LIST(__SHIFT)
};
@@ -503,7 +516,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -715,6 +728,23 @@ static struct link_encoder *dce120_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce120_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
static struct input_pixel_processor *dce120_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -880,6 +910,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce120_res_pool_funcs = {
.destroy = dce120_destroy_resource_pool,
.link_enc_create = dce120_link_encoder_create,
+ .panel_cntl_create = dce120_panel_cntl_create,
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce112_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index 893261c81854..d2ceebdbdf51 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -36,34 +36,6 @@
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
-struct dce80_hw_seq_reg_offsets {
- uint32_t crtc;
-};
-
-static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
-{
- .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-}
-};
-
-#define HW_REG_CRTC(reg, id)\
- (reg + reg_offsets[id].crtc)
-
/*******************************************************************************
* Private definitions
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 2ad5c28c6e66..a19be9de2df7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -50,6 +50,7 @@
#include "dce/dce_hwseq.h"
#include "dce80/dce80_hw_sequencer.h"
#include "dce100/dce100_resource.h"
+#include "dce/dce_panel_cntl.h"
#include "reg_helper.h"
@@ -266,6 +267,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_80_REG_LIST(id),\
@@ -728,6 +741,23 @@ struct link_encoder *dce80_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct clock_source *dce80_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -909,6 +939,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce80_res_pool_funcs = {
.destroy = dce80_destroy_resource_pool,
.link_enc_create = dce80_link_encoder_create,
+ .panel_cntl_create = dce80_panel_cntl_create,
.validate_bandwidth = dce80_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 0e682b5aa3eb..7f8456b9988b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -134,13 +134,6 @@ bool dpp1_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->format == PIXEL_FORMAT_FP16 &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index deccab0228d2..75637c291e75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -93,7 +93,6 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
-
/*
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 31b64733d693..319366ebb44f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1139,6 +1139,8 @@ void hubp1_cursor_set_position(
int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
int x_hotspot = pos->x_hotspot;
int y_hotspot = pos->y_hotspot;
+ int cursor_height = (int)hubp->curs_attr.height;
+ int cursor_width = (int)hubp->curs_attr.width;
uint32_t dst_x_offset;
uint32_t cur_en = pos->enable ? 1 : 0;
@@ -1152,10 +1154,16 @@ void hubp1_cursor_set_position(
if (hubp->curs_attr.address.quad_part == 0)
return;
+ // Rotated cursor width/height and hotspots tweaks for offset calculation
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
- y_hotspot = pos->x_hotspot;
- x_hotspot = pos->y_hotspot;
+ swap(cursor_height, cursor_width);
+ if (param->rotation == ROTATION_ANGLE_90) {
+ src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
+ src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+ }
+ } else if (param->rotation == ROTATION_ANGLE_180) {
+ src_x_offset = pos->x - param->viewport.x;
+ src_y_offset = pos->y - param->viewport.y;
}
if (param->mirror) {
@@ -1177,13 +1185,13 @@ void hubp1_cursor_set_position(
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
- if (src_x_offset + (int)hubp->curs_attr.width <= 0)
+ if (src_x_offset + cursor_width <= 0)
cur_en = 0; /* not visible beyond left edge*/
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset + (int)hubp->curs_attr.height <= 0)
+ if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 085c1a39b313..77f16921e7f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -737,7 +737,8 @@ void dcn10_bios_golden_init(struct dc *dc)
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
if (allow_self_fresh_force_enable == false &&
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
- dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
@@ -826,6 +827,14 @@ enum dc_status dcn10_enable_stream_timing(
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ black_color.color_r_cr = black_color.color_g_y;
+
if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
pipe_ctx->stream_res.tg->funcs->set_blank_color(
pipe_ctx->stream_res.tg,
@@ -903,7 +912,7 @@ static void dcn10_reset_back_end_for_pipe(
if (pipe_ctx->top_pipe == NULL) {
if (pipe_ctx->stream_res.abm)
- pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
@@ -1238,12 +1247,13 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
void dcn10_init_hw(struct dc *dc)
{
- int i;
+ int i, j;
struct abm *abm = dc->res_pool->abm;
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -1333,17 +1343,28 @@ void dcn10_init_hw(struct dc *dc)
continue;
/*
- * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
- * which needs to read dpcd info with the help of aconnector.
- * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
- * cannot be read.
+ * If any of the displays are lit up turn them off.
+ * The reason is that some MST hubs cannot be turned off
+ * completely until we tell them to do so.
+ * If not turned off, then displays connected to MST hub
+ * won't light up.
*/
- if (dc->links[i]->priv) {
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
- dp_receiver_power_ctrl(dc->links[i], false);
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+ /* blank dp stream before power off receiver*/
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+ dp_receiver_power_ctrl(dc->links[i], false);
}
}
}
@@ -1361,17 +1382,54 @@ void dcn10_init_hw(struct dc *dc)
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
+ /* In headless boot cases, DIG may be turned
+ * on which causes HW/SW discrepancies.
+ * To avoid this, power down hardware on boot
+ * if DIG is turned on and seamless boot not enabled
+ */
+ if (dc->config.power_down_display_on_boot) {
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link &&
+ edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ dc->hwss.edp_backlight_control &&
+ dc->hwss.power_down &&
+ dc->hwss.edp_power_control) {
+ dc->hwss.edp_backlight_control(edp_link, false);
+ dc->hwss.power_down(dc);
+ dc->hwss.edp_power_control(edp_link, false);
+ } else {
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+ dc->hwss.power_down) {
+ dc->hwss.power_down(dc);
+ break;
+ }
+
+ }
+ }
+ }
+
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
- if (abm != NULL) {
- abm->funcs->init_backlight(abm);
- abm->funcs->abm_init(abm);
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->panel_cntl)
+ backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
+ if (abm != NULL)
+ abm->funcs->abm_init(abm, backlight);
+
if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
@@ -1625,12 +1683,81 @@ void dcn10_pipe_control_lock(
hws->funcs.verify_allow_pstate_change_high(dc);
}
+/**
+ * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
+ *
+ * Software keepout workaround to prevent cursor update locking from stalling
+ * out cursor updates indefinitely or from old values from being retained in
+ * the case where the viewport changes in the same frame as the cursor.
+ *
+ * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
+ * too close to VUPDATE, then stall out until VUPDATE finishes.
+ *
+ * TODO: Optimize cursor programming to be once per frame before VUPDATE
+ * to avoid the need for this workaround.
+ */
+static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct crtc_position position;
+ uint32_t vupdate_start, vupdate_end;
+ unsigned int lines_to_vupdate, us_to_vupdate, vpos;
+ unsigned int us_per_line, us_vupdate;
+
+ if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
+ return;
+
+ if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
+ return;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ /* Avoid wraparound calculation issues */
+ vupdate_start += stream->timing.v_total;
+ vupdate_end += stream->timing.v_total;
+ vpos += stream->timing.v_total;
+
+ if (vpos <= vupdate_start) {
+ /* VPOS is in VACTIVE or back porch. */
+ lines_to_vupdate = vupdate_start - vpos;
+ } else if (vpos > vupdate_end) {
+ /* VPOS is in the front porch. */
+ return;
+ } else {
+ /* VPOS is in VUPDATE. */
+ lines_to_vupdate = 0;
+ }
+
+ /* Calculate time until VUPDATE in microseconds. */
+ us_per_line =
+ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ /* 70 us is a conservative estimate of cursor update time*/
+ if (us_to_vupdate > 70)
+ return;
+
+ /* Stall out until the cursor update completes. */
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+ udelay(us_to_vupdate + us_vupdate);
+}
+
void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
{
/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
if (!pipe || pipe->top_pipe)
return;
+ /* Prevent cursor lock from stalling out cursor updates. */
+ if (lock)
+ delay_cursor_until_vupdate(dc, pipe);
+
dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
pipe->stream_res.opp->inst, lock);
}
@@ -2095,25 +2222,25 @@ void dcn10_get_surface_visual_confirm_color(
switch (pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB8888:
- /* set boarder color to red */
+ /* set border color to red */
color->color_r_cr = color_value;
break;
case PIXEL_FORMAT_ARGB2101010:
- /* set boarder color to blue */
+ /* set border color to blue */
color->color_b_cb = color_value;
break;
case PIXEL_FORMAT_420BPP8:
- /* set boarder color to green */
+ /* set border color to green */
color->color_g_y = color_value;
break;
case PIXEL_FORMAT_420BPP10:
- /* set boarder color to yellow */
+ /* set border color to yellow */
color->color_g_y = color_value;
color->color_r_cr = color_value;
break;
case PIXEL_FORMAT_FP16:
- /* set boarder color to white */
+ /* set border color to white */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
color->color_g_y = color_value;
@@ -2138,25 +2265,25 @@ void dcn10_get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
- /* HDR10, ARGB2101010 - set boarder color to red */
+ /* HDR10, ARGB2101010 - set border color to red */
color->color_r_cr = color_value;
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
- /* FreeSync 2 ARGB2101010 - set boarder color to pink */
+ /* FreeSync 2 ARGB2101010 - set border color to pink */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
}
break;
case PIXEL_FORMAT_FP16:
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
- /* HDR10, FP16 - set boarder color to blue */
+ /* HDR10, FP16 - set border color to blue */
color->color_b_cb = color_value;
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
- /* FreeSync 2 HDR - set boarder color to green */
+ /* FreeSync 2 HDR - set border color to green */
color->color_g_y = color_value;
}
break;
default:
- /* SDR - set boarder color to Gray */
+ /* SDR - set border color to Gray */
color->color_r_cr = color_value/2;
color->color_b_cb = color_value/2;
color->color_g_y = color_value/2;
@@ -2205,6 +2332,14 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
&blnd_cfg.black_color);
}
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ blnd_cfg.black_color.color_r_cr = blnd_cfg.black_color.color_g_y;
+
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
@@ -2441,12 +2576,12 @@ void dcn10_blank_pixel_data(
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+ stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+ stream->link->panel_cntl->inst);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
} else if (blank) {
- if (stream_res->abm)
- stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
}
@@ -3236,7 +3371,7 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
-static void dcn10_calc_vupdate_position(
+void dcn10_calc_vupdate_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index af51424315d5..42b6e016d71e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -34,6 +34,11 @@ struct dc;
void dcn10_hw_sequencer_construct(struct dc *dc);
int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void dcn10_calc_vupdate_position(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ uint32_t *start_line,
+ uint32_t *end_line);
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
enum dc_status dcn10_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 700509bdf503..7cb8c3fb2665 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -72,6 +72,9 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
@@ -88,8 +91,6 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index d3617d6785a7..7fd385be3f3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -90,7 +90,8 @@ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
.is_dig_enabled = dcn10_is_dig_enabled,
.get_dig_frontend = dcn10_get_dig_frontend,
.get_dig_mode = dcn10_get_dig_mode,
- .destroy = dcn10_link_encoder_destroy
+ .destroy = dcn10_link_encoder_destroy,
+ .get_max_link_cap = dcn10_link_encoder_get_max_link_cap,
};
static enum bp_result link_transmitter_control(
@@ -1370,7 +1371,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
DC_HPD_EN, 0);
}
-
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
@@ -1425,3 +1425,19 @@ enum signal_type dcn10_get_dig_mode(
return SIGNAL_TYPE_NONE;
}
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+ /* Higher link settings based on feature supported */
+ if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+ if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ *link_settings = max_link_cap;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 762109174fb8..68395bcc24fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -575,4 +575,7 @@ void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
enum signal_type dcn10_get_dig_mode(
struct link_encoder *enc);
+
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
#endif /* __DC_LINK_ENCODER__DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 17d96ec6acd8..ec0ab42becba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -299,6 +299,7 @@ void optc1_set_vtg_params(struct timing_generator *optc,
uint32_t asic_blank_end;
uint32_t v_init;
uint32_t v_fp2 = 0;
+ int32_t vertical_line_start;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -315,8 +316,9 @@ void optc1_set_vtg_params(struct timing_generator *optc,
patched_crtc_timing.v_border_top;
/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
- if (optc1->vstartup_start > asic_blank_end)
- v_fp2 = optc1->vstartup_start - asic_blank_end;
+ vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
+ if (vertical_line_start < 0)
+ v_fp2 = -vertical_line_start;
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 9a459a8fe8a0..8d1e52fb0393 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -158,6 +158,7 @@ struct dcn_optc_registers {
uint32_t OTG_GSL_WINDOW_Y;
uint32_t OTG_VUPDATE_KEEPOUT;
uint32_t OTG_CRC_CNTL;
+ uint32_t OTG_CRC_CNTL2;
uint32_t OTG_CRC0_DATA_RG;
uint32_t OTG_CRC0_DATA_B;
uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
@@ -475,7 +476,11 @@ struct dcn_optc_registers {
type OPTC_DSC_SLICE_WIDTH;\
type OPTC_SEGMENT_WIDTH;\
type OPTC_DWB0_SOURCE_SELECT;\
- type OPTC_DWB1_SOURCE_SELECT;
+ type OPTC_DWB1_SOURCE_SELECT;\
+ type OTG_CRC_DSC_MODE;\
+ type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
+ type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
+ type OTG_CRC_DATA_FORMAT;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index ba849aa31e6e..17d5cb422025 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -51,6 +51,7 @@
#include "dce112/dce112_resource.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
+#include "dce/dce_panel_cntl.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
@@ -329,6 +330,18 @@ static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
static const struct dce110_aux_registers_shift aux_shift = {
DCN10_AUX_MASK_SH_LIST(__SHIFT)
};
@@ -817,6 +830,23 @@ struct link_encoder *dcn10_link_encoder_create(
return &enc10->base;
}
+static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct clock_source *dcn10_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -1091,24 +1121,6 @@ static enum dc_status build_mapped_resource(
{
struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
- /*TODO Seems unneeded anymore */
- /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- todo: shouldn't have to copy missing parameter here
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- stream->clamping.pixel_encoding =
- stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- build_clamping_params(stream);
-
- continue;
- }
- }
- */
-
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1301,6 +1313,7 @@ static const struct dc_cap_funcs cap_funcs = {
static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
+ .panel_cntl_create = dcn10_panel_cntl_create,
.validate_bandwidth = dcn_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
@@ -1363,6 +1376,40 @@ static bool dcn10_resource_construct(
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
dc->caps.force_dp_tps4_for_cp2520 = true;
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 1;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 1; // RGAM on DCN1
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 1;
+
+ /* no post-blend color operations */
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 0;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 0;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 7eba9333c328..07b2f9399671 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1274,7 +1274,6 @@ static void enc1_se_audio_setup(
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@@ -1282,7 +1281,6 @@ static void enc1_se_audio_setup(
/* This should not happen.it does so we don't get BSOD*/
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 501532dd523a..c478213ba7ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -80,6 +80,7 @@ struct dcn20_hubbub {
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
struct dcn_watermark_set watermarks;
+ int num_vmid;
struct dcn20_vmid vmid[16];
unsigned int detile_buf_size;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index a023a4d59f41..da5333d165ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -961,8 +961,7 @@ void dcn20_blank_pixel_data(
width = width / odm_cnt;
if (blank) {
- if (stream_res->abm)
- stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
@@ -997,7 +996,8 @@ void dcn20_blank_pixel_data(
if (!blank)
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+ stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+ stream->link->panel_cntl->inst);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
}
@@ -1478,8 +1478,11 @@ static void dcn20_program_pipe(
if (pipe_ctx->update_flags.bits.odm)
hws->funcs.update_odm(dc, context, pipe_ctx);
- if (pipe_ctx->update_flags.bits.enable)
+ if (pipe_ctx->update_flags.bits.enable) {
dcn20_enable_plane(dc, pipe_ctx, context);
+ if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+ dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+ }
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
@@ -2037,8 +2040,7 @@ static void dcn20_reset_back_end_for_pipe(
*/
if (pipe_ctx->top_pipe == NULL) {
- if (pipe_ctx->stream_res.abm)
- pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
@@ -2171,6 +2173,13 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
*/
mpcc_id = hubp->inst;
+ /* If there is no full update, don't need to touch MPC tree*/
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
+ mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+ return;
+ }
+
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 6a21228893ee..2fbde4241559 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -83,6 +83,9 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.init_vm_ctx = dcn20_init_vm_ctx,
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
@@ -98,8 +101,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index e4ac73035c84..8d209dae66e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -49,6 +49,12 @@
#define IND_REG(index) \
(enc10->link_regs->index)
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
static struct mpll_cfg dcn2_mpll_cfg[] = {
// RBR
@@ -260,6 +266,38 @@ void dcn20_link_encoder_enable_dp_output(
}
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t is_in_usb_c_dp4_mode = 0;
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+ /* in usb c dp2 mode, max lane count is 2 */
+ if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+ }
+
+}
+
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable = 0;
+ bool is_usb_c_alt_mode = false;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ /* if value == 1 alt mode is disabled, otherwise it is enabled */
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+ }
+
+ return is_usb_c_alt_mode;
+}
+
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
@@ -338,6 +376,8 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
.fec_is_active = enc2_fec_is_active,
.get_dig_mode = dcn10_get_dig_mode,
.get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn20_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 8cab8107fd94..284a1ee4d249 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -343,6 +343,10 @@ void dcn20_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
void dcn20_link_encoder_construct(
struct dcn20_link_encoder *enc20,
const struct encoder_init_data *init_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 570dfd9a243f..99cc095dc33c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -452,7 +452,7 @@ void mpc2_set_output_gamma(
next_mode = LUT_RAM_A;
mpc20_power_on_ogam_lut(mpc, mpcc_id, true);
- mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A ? true:false);
+ mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
mpc2_program_luta(mpc, mpcc_id, params);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index d875b0c38fde..8c16967fe018 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -409,6 +409,18 @@ void optc2_program_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_MANUAL_TRIG, 1);
}
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_2(OTG_CRC_CNTL2, 0,
+ OTG_CRC_DSC_MODE, params->dsc_mode,
+ OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode);
+
+ return optc1_configure_crc(optc, params);
+}
+
static struct timing_generator_funcs dcn20_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -452,7 +464,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
- .configure_crc = optc1_configure_crc,
+ .configure_crc = optc2_configure_crc,
.set_dsc_config = optc2_set_dsc_config,
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index 239cc40ae474..e0a0a8a8e2c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -36,6 +36,7 @@
SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
SRI(OTG_DSC_START_POSITION, OTG, inst),\
+ SRI(OTG_CRC_CNTL2, OTG, inst),\
SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
@@ -62,6 +63,10 @@
SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
@@ -109,4 +114,6 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
void optc2_setup_manual_trigger(struct timing_generator *optc);
void optc2_program_manual_trigger(struct timing_generator *optc);
bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params);
#endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e4348e3b6389..cef1aa938ab5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -61,6 +61,7 @@
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
#include "dc_link_ddc.h"
+#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
@@ -691,6 +692,18 @@ static const struct dcn10_link_enc_mask le_mask = {
DPCS_DCN2_MASK_SH_LIST(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN20(id),\
@@ -1293,6 +1306,23 @@ struct link_encoder *dcn20_link_encoder_create(
return &enc20->enc10.base;
}
+static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct clock_source *dcn20_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -1623,24 +1653,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
enum dc_status status = DC_OK;
struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
- /*TODO Seems unneeded anymore */
- /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- todo: shouldn't have to copy missing parameter here
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- stream->clamping.pixel_encoding =
- stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- build_clamping_params(stream);
-
- continue;
- }
- }
- */
-
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1651,22 +1663,32 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
}
-static void acquire_dsc(struct resource_context *res_ctx,
- const struct resource_pool *pool,
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
struct display_stream_compressor **dsc,
int pipe_idx)
{
int i;
+ const struct resource_pool *pool = dc->res_pool;
+ struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
- ASSERT(*dsc == NULL);
+ ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
*dsc = NULL;
+ /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
*dsc = pool->dscs[pipe_idx];
res_ctx->is_dsc_acquired[pipe_idx] = true;
return;
}
+ /* Return old DSC to avoid the need for re-programming */
+ if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
+ *dsc = dsc_old;
+ res_ctx->is_dsc_acquired[dsc_old->inst] = true;
+ return ;
+ }
+
/* Find first free DSC */
for (i = 0; i < pool->res_cap->num_dsc; i++)
if (!res_ctx->is_dsc_acquired[i]) {
@@ -1698,7 +1720,6 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
{
enum dc_status result = DC_OK;
int i;
- const struct resource_pool *pool = dc->res_pool;
/* Get a DSC if required and available */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1710,7 +1731,7 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
if (pipe_ctx->stream_res.dsc)
continue;
- acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
+ dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
@@ -1838,12 +1859,13 @@ static void swizzle_to_dml_params(
}
bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
struct resource_context *res_ctx,
- const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
struct pipe_ctx *next_odm_pipe)
{
int pipe_idx = next_odm_pipe->pipe_idx;
+ const struct resource_pool *pool = dc->res_pool;
*next_odm_pipe = *prev_odm_pipe;
@@ -1901,7 +1923,7 @@ bool dcn20_split_stream_for_odm(
}
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
- acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
+ dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
return false;
@@ -1939,8 +1961,6 @@ void dcn20_split_stream_for_mpc(
secondary_pipe->top_pipe = primary_pipe;
ASSERT(primary_pipe->plane_state);
- resource_build_scaling_params(primary_pipe);
- resource_build_scaling_params(secondary_pipe);
}
void dcn20_populate_dml_writeback_from_context(
@@ -2216,12 +2236,12 @@ int dcn20_populate_dml_pipes_from_context(
|| pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
- pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
- pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
- pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
- pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
- pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
- pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+ pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
+ pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c_unadjusted.y;
+ pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport_unadjusted.width;
+ pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c_unadjusted.width;
+ pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport_unadjusted.height;
+ pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c_unadjusted.height;
pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
@@ -2570,13 +2590,15 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split,
+ int *split,
bool *merge)
{
int i, pipe_idx, vlevel_split;
int plane_count = 0;
bool force_split = false;
bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
+ struct vba_vars_st *v = &context->bw_ctx.dml.vba;
+ int max_mpc_comb = v->maxMpcComb;
if (context->stream_count > 1) {
if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
@@ -2584,10 +2606,22 @@ int dcn20_validate_apply_pipe_split_flags(
} else if (dc->debug.force_single_disp_pipe_split)
force_split = true;
- /* TODO: fix dc bugs and remove this split threshold thing */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16))
+ avoid_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
if (pipe->stream && !pipe->prev_odm_pipe &&
(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
++plane_count;
@@ -2602,26 +2636,35 @@ int dcn20_validate_apply_pipe_split_flags(
continue;
for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
- if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
+ if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+ v->ModeSupport[vlevel][0])
break;
/* Impossible to not split this pipe */
if (vlevel > context->bw_ctx.dml.soc.num_states)
vlevel = vlevel_split;
+ else
+ max_mpc_comb = 0;
pipe_idx++;
}
- context->bw_ctx.dml.vba.maxMpcComb = 0;
+ v->maxMpcComb = max_mpc_comb;
}
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
+ int pipe_plane = v->pipe_plane[pipe_idx];
+ bool split4mpc = context->stream_count == 1 && plane_count == 1
+ && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
- split[i] = true;
+ if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] > 1) {
+ if (split4mpc)
+ split[i] = 4;
+ else
+ split[i] = 2;
+ }
if ((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
pipe->stream->view_format ==
@@ -2630,50 +2673,75 @@ int dcn20_validate_apply_pipe_split_flags(
TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
pipe->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_SIDE_BY_SIDE))
- split[i] = true;
+ split[i] = 2;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+ split[i] = 2;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
-
- if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
- /*Already split odm pipe tree, don't try to split again*/
- split[i] = false;
- split[pipe->prev_odm_pipe->pipe_idx] = false;
- } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
- && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
- /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
- split[i] = false;
- split[pipe->top_pipe->pipe_idx] = false;
- } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
- if (split[i] == false) {
- /*Exiting mpc/odm combine*/
- merge[i] = true;
+ v->ODMCombineEnabled[pipe_plane] =
+ v->ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ if (get_num_mpc_splits(pipe) == 1) {
+ /*If need split for mpc but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 MPC */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 MPC */
+ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 2 -> 1 MPC */
+ } else if (get_num_mpc_splits(pipe) == 3) {
+ /*If need split for mpc but 4 way split already*/
+ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
+ || !pipe->bottom_pipe)) {
+ merge[i] = true; /* 4 -> 2 MPC */
+ } else if (split[i] == 0 && pipe->top_pipe &&
+ pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 4 -> 1 MPC */
+ split[i] = 0;
+ } else if (get_num_odm_splits(pipe)) {
+ /* ODM -> MPC transition */
+ ASSERT(0); /* NOT expected yet */
if (pipe->prev_odm_pipe) {
- ASSERT(0); /*should not actually happen yet*/
- merge[pipe->prev_odm_pipe->pipe_idx] = true;
- } else
- merge[pipe->top_pipe->pipe_idx] = true;
- } else {
- /*Transition from mpc combine to odm combine or vice versa*/
- ASSERT(0); /*should not actually happen yet*/
- split[i] = true;
- merge[i] = true;
- if (pipe->prev_odm_pipe) {
- split[pipe->prev_odm_pipe->pipe_idx] = true;
- merge[pipe->prev_odm_pipe->pipe_idx] = true;
- } else {
- split[pipe->top_pipe->pipe_idx] = true;
- merge[pipe->top_pipe->pipe_idx] = true;
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ } else {
+ if (get_num_odm_splits(pipe) == 1) {
+ /*If need split for odm but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 ODM */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 ODM */
+ else if (pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ } else if (get_num_odm_splits(pipe) == 3) {
+ /*If need split for odm but 4 way split already*/
+ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
+ || !pipe->next_odm_pipe)) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* 4 -> 2 ODM */
+ } else if (split[i] == 0 && pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ split[i] = 0;
+ } else if (get_num_mpc_splits(pipe)) {
+ /* MPC -> ODM transition */
+ ASSERT(0); /* NOT expected yet */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ split[i] = 0;
+ merge[i] = true;
}
}
}
/* Adjust dppclk when split is forced, do not bother with dispclk */
- if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
+ if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
+ v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
pipe_idx++;
}
@@ -2689,7 +2757,7 @@ bool dcn20_fast_validate_bw(
int *vlevel_out)
{
bool out = false;
- bool split[MAX_PIPES] = { false };
+ int split[MAX_PIPES] = { 0 };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@@ -2731,7 +2799,7 @@ bool dcn20_fast_validate_bw(
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
if (!dcn20_split_stream_for_odm(
- &context->res_ctx, dc->res_pool,
+ dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
@@ -2749,7 +2817,7 @@ bool dcn20_fast_validate_bw(
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
- if (split[i]) {
+ if (split[i] == 2) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
@@ -2760,14 +2828,17 @@ bool dcn20_fast_validate_bw(
}
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
- &context->res_ctx, dc->res_pool,
+ dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
dcn20_build_mapped_resource(dc, context, pipe->stream);
- } else
+ } else {
dcn20_split_stream_for_mpc(
- &context->res_ctx, dc->res_pool,
- pipe, hsplit_pipe);
+ &context->res_ctx, dc->res_pool,
+ pipe, hsplit_pipe);
+ if (!resource_build_scaling_params(pipe) || !resource_build_scaling_params(hsplit_pipe))
+ goto validate_fail;
+ }
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
@@ -3007,7 +3078,7 @@ void dcn20_calculate_dlg_params(
pipe_idx,
cstate_en,
context->bw_ctx.bw.dcn.clk.p_state_change_support,
- false, false, false);
+ false, false, true);
context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].rq_regs,
@@ -3091,6 +3162,8 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
dc->debug.disable_dram_clock_change_vactive_support;
+ context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
+ dc->debug.enable_dram_clock_change_one_display_vactive;
if (fast_validate) {
return dcn20_validate_bandwidth_internal(dc, context, true);
@@ -3189,8 +3262,6 @@ static struct dc_cap_funcs cap_funcs = {
enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
- enum dc_status result = DC_OK;
-
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
@@ -3202,12 +3273,13 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
swizzle = DC_SW_64KB_S;
plane_state->tiling_info.gfx9.swizzle = swizzle;
- return result;
+ return DC_OK;
}
static struct resource_funcs dcn20_res_pool_funcs = {
.destroy = dcn20_destroy_resource_pool,
.link_enc_create = dcn20_link_encoder_create,
+ .panel_cntl_create = dcn20_panel_cntl_create,
.validate_bandwidth = dcn20_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -3446,6 +3518,13 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
bb->dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+
+ if ((int)(bb->dummy_pstate_latency_us * 1000)
+ != dc->bb_overrides.dummy_clock_change_latency_ns
+ && dc->bb_overrides.dummy_clock_change_latency_ns) {
+ bb->dummy_pstate_latency_us =
+ dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
+ }
}
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
@@ -3681,9 +3760,42 @@ static bool dcn20_resource_construct(
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
- dc->caps.hw_3d_lut = true;
dc->caps.extended_aux_timeout_support = true;
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2, only MPC ROM
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
} else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index 9d5bff9455fd..2c1959845c29 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -123,7 +123,7 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split,
+ int *split,
bool *merge);
void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -135,10 +135,14 @@ void dcn20_split_stream_for_mpc(
struct pipe_ctx *primary_pipe,
struct pipe_ctx *secondary_pipe);
bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
struct resource_context *res_ctx,
- const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
struct pipe_ctx *next_odm_pipe);
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx);
struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
const struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 5e2d14b897af..129f0b62f751 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -49,11 +49,6 @@
#define FN(reg_name, field_name) \
hubbub1->shifts->field_name, hubbub1->masks->field_name
-#ifdef NUM_VMID
-#undef NUM_VMID
-#endif
-#define NUM_VMID 16
-
static uint32_t convert_and_clamp(
uint32_t wm_ns,
uint32_t refclk_mhz,
@@ -138,7 +133,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
dcn21_dchvm_init(hubbub);
- return NUM_VMID;
+ return hubbub1->num_vmid;
}
bool hubbub21_program_urgent_watermarks(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index d285ba622d61..960a0716dde5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -778,21 +778,28 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_
{
struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 };
-
- PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
- PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
- PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
- PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
- PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
- PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
+ cmd.PLAT_54186_wa.header.payload_bytes = sizeof(cmd.PLAT_54186_wa.flip);
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ cmd.PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
+ cmd.PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
+ cmd.PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
+ cmd.PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
+ cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
PERF_TRACE(); // TODO: remove after performance is stable.
- dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header);
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
PERF_TRACE(); // TODO: remove after performance is stable.
dc_dmub_srv_cmd_execute(dmcub);
PERF_TRACE(); // TODO: remove after performance is stable.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 707ce0f28fab..a5baef7e7a7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -86,11 +86,10 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
- .set_cursor_position = dcn10_set_cursor_position,
- .set_cursor_attribute = dcn10_set_cursor_attribute,
- .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
- .optimize_pwr_state = dcn21_optimize_pwr_state,
- .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
@@ -106,8 +105,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
index e45683ac871a..aa46c35b05a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -203,29 +203,6 @@ static bool update_cfg_data(
return true;
}
-void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
- struct dc_link_settings *link_settings)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value;
-
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
-
- if (!value && link_settings->lane_count > LANE_COUNT_TWO)
- link_settings->lane_count = LANE_COUNT_TWO;
-}
-
-bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value;
-
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
-
- // if value == 1 alt mode is disabled, otherwise it is enabled
- return !value;
-}
-
bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -348,8 +325,8 @@ static const struct link_encoder_funcs dcn21_link_enc_funcs = {
.fec_set_ready = enc2_fec_set_ready,
.fec_is_active = enc2_fec_is_active,
.get_dig_frontend = dcn10_get_dig_frontend,
- .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
- .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn21_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index a721bb401ef0..f00a56835084 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -61,6 +61,7 @@
#include "dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "dce110/dce110_resource.h"
+#include "dce/dce_panel_cntl.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
@@ -85,6 +86,7 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "dce/dmub_psr.h"
+#include "dce/dmub_abm.h"
#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -803,7 +805,7 @@ static const struct resource_caps res_cap_rn = {
.num_pll = 5, // maybe 3 because the last two used for USB-c
.num_dwb = 1,
.num_ddc = 5,
- .num_vmid = 1,
+ .num_vmid = 16,
.num_dsc = 3,
};
@@ -995,9 +997,12 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
pool->base.dp_clock_source = NULL;
}
-
- if (pool->base.abm != NULL)
- dce_abm_destroy(&pool->base.abm);
+ if (pool->base.abm != NULL) {
+ if (pool->base.abm->ctx->dc->config.disable_dmcu)
+ dmub_abm_destroy(&pool->base.abm);
+ else
+ dce_abm_destroy(&pool->base.abm);
+ }
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
@@ -1290,6 +1295,7 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)
vmid->shifts = &vmid_shifts;
vmid->masks = &vmid_masks;
}
+ hubbub->num_vmid = res_cap_rn.num_vmid;
return &hubbub->base;
}
@@ -1379,7 +1385,8 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, j, closest_clk_lvl;
+ unsigned int i, closest_clk_lvl;
+ int j;
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -1591,6 +1598,18 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(4, E),
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
@@ -1676,6 +1695,24 @@ static struct link_encoder *dcn21_link_encoder_create(
return &enc21->enc10.base;
}
+
+static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
#define CTX ctx
#define REG(reg_name) \
@@ -1694,12 +1731,8 @@ static int dcn21_populate_dml_pipes_from_context(
{
uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes);
int i;
- struct resource_context *res_ctx = &context->res_ctx;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
+ for (i = 0; i < pipe_cnt; i++) {
pipes[i].pipe.src.hostvm = 1;
pipes[i].pipe.src.gpuvm = 1;
@@ -1724,6 +1757,7 @@ enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_stat
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
+ .panel_cntl_create = dcn21_panel_cntl_create,
.validate_bandwidth = dcn21_validate_bandwidth,
.populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -1770,7 +1804,6 @@ static bool dcn21_resource_construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 256;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.hw_3d_lut = true;
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
@@ -1779,6 +1812,40 @@ static bool dcn21_resource_construct(
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
@@ -1831,17 +1898,19 @@ static bool dcn21_resource_construct(
goto create_fail;
}
- pool->base.dmcu = dcn21_dmcu_create(ctx,
- &dmcu_regs,
- &dmcu_shift,
- &dmcu_mask);
- if (pool->base.dmcu == NULL) {
- dm_error("DC: failed to create dmcu!\n");
- BREAK_TO_DEBUGGER();
- goto create_fail;
+ if (!dc->config.disable_dmcu) {
+ pool->base.dmcu = dcn21_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
}
- if (dc->debug.disable_dmcu) {
+ if (dc->config.disable_dmcu) {
pool->base.psr = dmub_psr_create(ctx);
if (pool->base.psr == NULL) {
@@ -1851,15 +1920,16 @@ static bool dcn21_resource_construct(
}
}
- pool->base.abm = dce_abm_create(ctx,
+ if (dc->config.disable_dmcu)
+ pool->base.abm = dmub_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ else
+ pool->base.abm = dce_abm_create(ctx,
&abm_regs,
&abm_shift,
&abm_mask);
- if (pool->base.abm == NULL) {
- dm_error("DC: failed to create abm!\n");
- BREAK_TO_DEBUGGER();
- goto create_fail;
- }
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 7ee8b8460a9b..e34c3376efc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -63,10 +63,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
endif
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
- dml_common_defs.o
ifdef CONFIG_DRM_AMD_DC_DCN
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 5bbbafacc720..80170f9721ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2599,21 +2599,44 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
}
+ {
+ float SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
+ int PlaneWithMinActiveDRAMClockChangeMargin = -1;
+
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ PlaneWithMinActiveDRAMClockChangeMargin = k;
+ } else {
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j) {
+ PlaneWithMinActiveDRAMClockChangeMargin = j;
+ }
+ }
+ }
}
}
mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (mode_lib->vba.BlendingAndTiming[k] == k))
+ && !(mode_lib->vba.BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+ < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank =
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ }
+ }
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
@@ -2622,13 +2645,17 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
- mode_lib->vba.DRAMClockChangeWatermark += 25;
+
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
- if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ if ((mode_lib->vba.SynchronizedVBlank
+ || mode_lib->vba.NumberOfActivePlanes == 1
+ || (SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0 &&
+ mode_lib->vba.AllowDramClockChangeOneDisplayVactive))
+ && mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
@@ -2640,6 +2667,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported;
}
}
+ }
for (k = 0; k <= mode_lib->vba.soc.num_states; k++)
for (j = 0; j < 2; j++)
mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
index 8c86b63ddf07..1e557ddcb638 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
@@ -26,7 +26,6 @@
#ifndef __DML20_DISPLAY_RQ_DLG_CALC_H__
#define __DML20_DISPLAY_RQ_DLG_CALC_H__
-#include "../dml_common_defs.h"
#include "../display_rq_dlg_helpers.h"
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
index 0378406bf7e7..0d53e871a9d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
@@ -26,7 +26,6 @@
#ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__
#define __DML20V2_DISPLAY_RQ_DLG_CALC_H__
-#include "../dml_common_defs.h"
#include "../display_rq_dlg_helpers.h"
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index e6617c958bb8..a576eed94d9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -3190,6 +3190,7 @@ static void CalculateFlipSchedule(
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
double HostVMInefficiencyFactor;
+ double VRatioClamped;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor =
@@ -3222,31 +3223,32 @@ static void CalculateFlipSchedule(
*DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
*final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), (MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
+ VRatioClamped = (VRatio < 1.0) ? 1.0 : VRatio;
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dml_min(
- dpte_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / (VRatio / 2));
+ dpte_row_height * LineTime / VRatioClamped,
+ dpte_row_height_chroma * LineTime / (VRatioClamped / 2));
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = dml_min(
- meta_row_height * LineTime / VRatio,
- meta_row_height_chroma * LineTime / (VRatio / 2));
+ meta_row_height * LineTime / VRatioClamped,
+ meta_row_height_chroma * LineTime / (VRatioClamped / 2));
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / (VRatio / 2),
- meta_row_height_chroma * LineTime / (VRatio / 2));
+ dpte_row_height * LineTime / VRatioClamped,
+ meta_row_height * LineTime / VRatioClamped,
+ dpte_row_height_chroma * LineTime / (VRatioClamped / 2),
+ meta_row_height_chroma * LineTime / (VRatioClamped / 2));
}
} else {
if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
+ min_row_time = dpte_row_height * LineTime / VRatioClamped;
} else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ min_row_time = meta_row_height * LineTime / VRatioClamped;
} else {
min_row_time = dml_min(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio);
+ dpte_row_height * LineTime / VRatioClamped,
+ meta_row_height * LineTime / VRatioClamped);
}
}
@@ -5944,7 +5946,7 @@ static void CalculateMetaAndPTETimes(
* PixelPTEReqHeightY[k];
}
dpte_groups_per_row_luma_ub = dml_ceil(
- dpte_row_width_luma_ub[k] / dpte_group_width_luma,
+ (float) dpte_row_width_luma_ub[k] / dpte_group_width_luma,
1);
time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k]
/ PixelClock[k] / dpte_groups_per_row_luma_ub;
@@ -5968,7 +5970,7 @@ static void CalculateMetaAndPTETimes(
* PixelPTEReqHeightC[k];
}
dpte_groups_per_row_chroma_ub = dml_ceil(
- dpte_row_width_chroma_ub[k]
+ (float) dpte_row_width_chroma_ub[k]
/ dpte_group_width_chroma,
1);
time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index b8ec08e3b7a3..90a5fefef05b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -1490,19 +1490,30 @@ static void dml_rq_dlg_get_dlg_params(
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
- ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
+ if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l >= (unsigned int)dml_pow(2, 13))
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l = (1 << 13) - 1;
+ else
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
* (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
- ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+ if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c >= (unsigned int)dml_pow(2, 13))
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c = (1 << 13) - 1;
+ else
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
< (unsigned int)dml_pow(2, 13));
}
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ if (src->dcc)
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ else
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = 0;
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
index 83e95f8cbff2..e8f7785e3fc6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
@@ -26,7 +26,7 @@
#ifndef __DML21_DISPLAY_RQ_DLG_CALC_H__
#define __DML21_DISPLAY_RQ_DLG_CALC_H__
-#include "../dml_common_defs.h"
+#include "dm_services.h"
#include "../display_rq_dlg_helpers.h"
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index cf2758ca5b02..c77c3d827e4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -25,8 +25,10 @@
#ifndef __DISPLAY_MODE_LIB_H__
#define __DISPLAY_MODE_LIB_H__
-
-#include "dml_common_defs.h"
+#include "dm_services.h"
+#include "dc_features.h"
+#include "display_mode_structs.h"
+#include "display_mode_enums.h"
#include "display_mode_vba.h"
enum dml_project {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 687010c17324..439ffd04be34 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -118,9 +118,11 @@ struct _vcs_dpi_soc_bounding_box_st {
double urgent_latency_adjustment_fabric_clock_component_us;
double urgent_latency_adjustment_fabric_clock_reference_mhz;
bool disable_dram_clock_change_vactive_support;
+ bool allow_dram_clock_one_display_vactive;
};
struct _vcs_dpi_ip_params_st {
+ bool use_min_dcfclk;
bool gpuvm_enable;
bool hostvm_enable;
unsigned int gpuvm_max_page_table_levels;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 6b525c52124c..b19988f54721 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -224,6 +224,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
mode_lib->vba.DummyPStateCheck;
+ mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive;
mode_lib->vba.Downspreading = soc->downspread_percent;
mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
@@ -280,6 +281,7 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
ip_params_st *ip = &mode_lib->vba.ip;
// IP Parameters
+ mode_lib->vba.UseMinimumRequiredDCFCLK = ip->use_min_dcfclk;
mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
mode_lib->vba.MaxNumOTG = ip->max_num_otg;
mode_lib->vba.MaxNumHDMIFRLOutputs = ip->max_num_hdmi_frl_outputs;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 5d82fc5a7ed7..3f559e725ab1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -27,8 +27,6 @@
#ifndef __DML2_DISPLAY_MODE_VBA_H__
#define __DML2_DISPLAY_MODE_VBA_H__
-#include "dml_common_defs.h"
-
struct display_mode_lib;
void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
@@ -898,6 +896,8 @@ struct vba_vars_st {
bool dummystring[DC__NUM_DPP__MAX];
double BPP;
enum odm_combine_policy ODMCombinePolicy;
+ bool UseMinimumRequiredDCFCLK;
+ bool AllowDramClockChangeOneDisplayVactive;
};
bool CalculateMinAndMaxPrefetchMode(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
index 1f24db830737..2555ef0358c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -26,7 +26,6 @@
#ifndef __DISPLAY_RQ_DLG_HELPERS_H__
#define __DISPLAY_RQ_DLG_HELPERS_H__
-#include "dml_common_defs.h"
#include "display_mode_lib.h"
/* Function: Printer functions
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
index 304164986bd8..9c06913ad767 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -26,8 +26,6 @@
#ifndef __DISPLAY_RQ_DLG_CALC_H__
#define __DISPLAY_RQ_DLG_CALC_H__
-#include "dml_common_defs.h"
-
struct display_mode_lib;
#include "display_rq_dlg_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index ded71ea82413..02e06c9b3230 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -26,7 +26,6 @@
#ifndef __DML_INLINE_DEFS_H__
#define __DML_INLINE_DEFS_H__
-#include "dml_common_defs.h"
#include "dcn_calc_math.h"
#include "dml_logger.h"
@@ -75,6 +74,18 @@ static inline double dml_floor(double a, double granularity)
return (double) dcn_bw_floor2(a, granularity);
}
+static inline double dml_round(double a)
+{
+ double round_pt = 0.5;
+ double ceil = dml_ceil(a, 1);
+ double floor = dml_floor(a, 1);
+
+ if (a - floor >= round_pt)
+ return ceil;
+ else
+ return floor;
+}
+
static inline int dml_log2(double x)
{
return dml_round((double)dcn_bw_log(x, 2));
@@ -112,7 +123,7 @@ static inline double dml_log(double x, double base)
static inline unsigned int dml_round_to_multiple(unsigned int num,
unsigned int multiple,
- bool up)
+ unsigned char up)
{
unsigned int remainder;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 87d682d25278..0ea6662a1563 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -129,7 +129,7 @@ static bool dsc_line_buff_depth_from_dpcd(int dpcd_line_buff_bit_depth, int *lin
static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput)
{
switch (dpcd_throughput) {
- case DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED:
+ case DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED:
*throughput = 0;
break;
case DP_DSC_THROUGHPUT_MODE_0_170:
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 6f730b5bfe42..5e384a8a83dc 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -322,3 +322,92 @@ static const struct protection_properties dp_11_protection = {
.process_transaction = dp_11_process_transaction
};
+static const struct protection_properties *get_protection_properties_by_signal(
+ struct dc_link *link,
+ enum signal_type st,
+ enum hdcp_version version)
+{
+ switch (version) {
+ case HDCP_VERSION_14:
+ switch (st) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return &hdmi_14_protection;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (link &&
+ (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ link->dpcd_caps.dongle_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER)) {
+ return &non_supported_protection;
+ }
+ return &dp_11_protection;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ return &dp_11_protection;
+ default:
+ return &non_supported_protection;
+ }
+ break;
+ case HDCP_VERSION_22:
+ switch (st) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return &hdmi_14_protection; //todo version2.2
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ return &dp_11_protection; //todo version2.2
+ default:
+ return &non_supported_protection;
+ }
+ break;
+ default:
+ return &non_supported_protection;
+ }
+}
+
+enum hdcp_message_status dc_process_hdcp_msg(
+ enum signal_type signal,
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ enum hdcp_message_status status = HDCP_MESSAGE_FAILURE;
+ uint32_t i = 0;
+
+ const struct protection_properties *protection_props;
+
+ if (!message_info)
+ return HDCP_MESSAGE_UNSUPPORTED;
+
+ if (message_info->msg_id < HDCP_MESSAGE_ID_READ_BKSV ||
+ message_info->msg_id >= HDCP_MESSAGE_ID_MAX)
+ return HDCP_MESSAGE_UNSUPPORTED;
+
+ protection_props =
+ get_protection_properties_by_signal(
+ link,
+ signal,
+ message_info->version);
+
+ if (!protection_props->supported)
+ return HDCP_MESSAGE_UNSUPPORTED;
+
+ if (protection_props->process_transaction(
+ link,
+ message_info)) {
+ status = HDCP_MESSAGE_SUCCESS;
+ } else {
+ for (i = 0; i < message_info->max_retries; i++) {
+ if (protection_props->process_transaction(
+ link,
+ message_info)) {
+ status = HDCP_MESSAGE_SUCCESS;
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d523fc9547e7..c7fd702a4a87 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -38,6 +38,7 @@
#endif
#include "dwb.h"
#include "mcif_wb.h"
+#include "panel_cntl.h"
#define MAX_CLOCK_SOURCES 7
@@ -92,6 +93,8 @@ struct clk_bw_params;
struct resource_funcs {
void (*destroy)(struct resource_pool **pool);
void (*link_init)(struct dc_link *link);
+ struct panel_cntl*(*panel_cntl_create)(
+ const struct panel_cntl_init_data *panel_cntl_init_data);
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
bool (*validate_bandwidth)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index d607b3191954..e8ce8c85adf1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -27,27 +27,17 @@
#include "dm_services_types.h"
-struct abm_backlight_registers {
- unsigned int BL_PWM_CNTL;
- unsigned int BL_PWM_CNTL2;
- unsigned int BL_PWM_PERIOD_CNTL;
- unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
-};
-
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
bool dmcu_is_running;
- /* registers setting needs to be saved and restored at InitBacklight */
- struct abm_backlight_registers stored_backlight_registers;
};
struct abm_funcs {
- void (*abm_init)(struct abm *abm);
+ void (*abm_init)(struct abm *abm, uint32_t back_light);
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
- bool (*set_abm_immediate_disable)(struct abm *abm);
- bool (*set_pipe)(struct abm *abm, unsigned int controller_id);
- bool (*init_backlight)(struct abm *abm);
+ bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
+ bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
/* backlight_pwm_u16_16 is unsigned 32 bit,
* 16 bit integer + 16 fractional, where 1.0 is max backlight value.
@@ -56,10 +46,13 @@ struct abm_funcs {
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
- bool use_smooth_brightness);
+ unsigned int panel_inst);
unsigned int (*get_current_backlight)(struct abm *abm);
unsigned int (*get_target_backlight)(struct abm *abm);
+ bool (*init_abm_config)(struct abm *abm,
+ const char *src,
+ unsigned int bytes);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index f5dd0cc73c63..47a566d82d6e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -144,6 +144,8 @@ struct hubbub_funcs {
void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
+
+ void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
new file mode 100644
index 000000000000..f9ab5abb6462
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * panel_cntl.h
+ *
+ * Created on: Oct 6, 2015
+ * Author: yonsun
+ */
+
+#ifndef DC_PANEL_CNTL_H_
+#define DC_PANEL_CNTL_H_
+
+#include "dc_types.h"
+
+#define MAX_BACKLIGHT_LEVEL 0xFFFF
+
+struct panel_cntl_backlight_registers {
+ unsigned int BL_PWM_CNTL;
+ unsigned int BL_PWM_CNTL2;
+ unsigned int BL_PWM_PERIOD_CNTL;
+ unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+struct panel_cntl_funcs {
+ void (*destroy)(struct panel_cntl **panel_cntl);
+ uint32_t (*hw_init)(struct panel_cntl *panel_cntl);
+ bool (*is_panel_backlight_on)(struct panel_cntl *panel_cntl);
+ bool (*is_panel_powered_on)(struct panel_cntl *panel_cntl);
+ void (*store_backlight_level)(struct panel_cntl *panel_cntl);
+ void (*driver_set_backlight)(struct panel_cntl *panel_cntl,
+ uint32_t backlight_pwm_u16_16);
+};
+
+struct panel_cntl_init_data {
+ struct dc_context *ctx;
+ uint32_t inst;
+};
+
+struct panel_cntl {
+ const struct panel_cntl_funcs *funcs;
+ struct dc_context *ctx;
+ uint32_t inst;
+ /* registers setting needs to be saved and restored at InitBacklight */
+ struct panel_cntl_backlight_registers stored_backlight_registers;
+};
+
+#endif /* DC_PANEL_CNTL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index e5e7d94026fc..f803191e3134 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -117,6 +117,9 @@ struct crc_params {
enum crc_selection selection;
+ uint8_t dsc_mode;
+ uint8_t odm_mode;
+
bool continuous_mode;
bool enable;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index fecc80c47c26..2947d1b15512 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -173,6 +173,8 @@ struct scaler_data {
struct scaling_taps taps;
struct rect viewport;
struct rect viewport_c;
+ struct rect viewport_unadjusted;
+ struct rect viewport_c_unadjusted;
struct rect recout;
struct scaling_ratios ratios;
struct scl_inits inits;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index e57467d99d66..8e72f077e552 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -75,9 +75,13 @@ struct hw_sequencer_funcs {
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
+ void (*edp_backlight_control)(
+ struct dc_link *link,
+ bool enable);
void (*program_triplebuffer)(const struct dc *dc,
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
+ void (*power_down)(struct dc *dc);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
@@ -92,6 +96,11 @@ struct hw_sequencer_funcs {
void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
struct crtc_position *position);
int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx);
+ void (*calc_vupdate_position)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ uint32_t *start_line,
+ uint32_t *end_line);
void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
int group_size, struct pipe_ctx *grouped_pipes[]);
void (*enable_timing_synchronization)(struct dc *dc,
@@ -188,6 +197,12 @@ struct hw_sequencer_funcs {
unsigned int bufSize, unsigned int mask);
void (*clear_status_bits)(struct dc *dc, unsigned int mask);
+ bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+
+ void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx);
+
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 52a26e6be066..36e906bb6bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -100,8 +100,6 @@ struct hwseq_private_funcs {
struct dc *dc);
void (*edp_backlight_control)(struct dc_link *link,
bool enable);
- bool (*is_panel_backlight_on)(struct dc_link *link);
- bool (*is_panel_powered_on)(struct dc_link *link);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index ca4c36c0c9bc..a9be495af922 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -138,9 +138,6 @@ struct pipe_ctx *find_idle_secondary_pipe(
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe);
-bool resource_is_stream_unchanged(
- struct dc_state *old_context, struct dc_stream_state *stream);
-
bool resource_validate_attach_surfaces(
const struct dc_validation_set set[],
int set_count,
@@ -180,6 +177,8 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
+int get_num_mpc_splits(struct pipe_ctx *pipe);
+
int get_num_odm_splits(struct pipe_ctx *pipe);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
index 3464b2d5b89a..348e9a600a72 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -84,6 +84,14 @@ static void virtual_link_encoder_destroy(struct link_encoder **enc)
*enc = NULL;
}
+static void virtual_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+ *link_settings = max_link_cap;
+}
static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
.validate_output_with_stream =
@@ -94,6 +102,7 @@ static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
.enable_dp_output = virtual_link_encoder_enable_dp_output,
.enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output,
.disable_output = virtual_link_encoder_disable_output,
+ .get_max_link_cap = virtual_link_encoder_get_max_link_cap,
.dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index c2671f2616c8..26d94eb5ab58 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -64,10 +64,11 @@
* other component within DAL.
*/
-#include "dmub_types.h"
-#include "dmub_cmd.h"
-#include "dmub_gpint_cmd.h"
-#include "dmub_rb.h"
+#include "inc/dmub_types.h"
+#include "inc/dmub_cmd.h"
+#include "inc/dmub_gpint_cmd.h"
+#include "inc/dmub_cmd_dal.h"
+#include "inc/dmub_rb.h"
#if defined(__cplusplus)
extern "C" {
@@ -75,7 +76,6 @@ extern "C" {
/* Forward declarations */
struct dmub_srv;
-struct dmub_cmd_header;
struct dmub_srv_common_regs;
/* enum dmub_status - return code for dmcub functions */
@@ -151,6 +151,7 @@ struct dmub_srv_region_params {
uint32_t inst_const_size;
uint32_t bss_data_size;
uint32_t vbios_size;
+ const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data;
};
@@ -457,7 +458,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
* DMUB_STATUS_INVALID - unspecified error
*/
enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd);
+ const union dmub_rb_cmd *cmd);
/**
* dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
@@ -565,6 +566,16 @@ dmub_srv_send_gpint_command(struct dmub_srv *dmub,
enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
uint32_t *response);
+/**
+ * dmub_flush_buffer_mem() - Read back entire frame buffer region.
+ * This ensures that the write from x86 has been flushed and will not
+ * hang the DMCUB.
+ * @fb: frame buffer to flush
+ *
+ * Can be called after software initialization.
+ */
+void dmub_flush_buffer_mem(const struct dmub_fb *fb);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 10b5fa9d2588..599bf2055bcb 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -228,6 +228,7 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t smu_optimizations_en;
uint8_t frame_delay;
uint8_t frame_cap_ind;
+ struct dmub_psr_debug_flags debug;
};
struct dmub_rb_cmd_psr_copy_settings {
@@ -260,6 +261,8 @@ struct dmub_rb_cmd_psr_set_version {
struct dmub_cmd_abm_set_pipe_data {
uint32_t ramping_boundary;
uint32_t otg_inst;
+ uint32_t panel_inst;
+ uint32_t set_pipe_option;
};
struct dmub_rb_cmd_abm_set_pipe {
@@ -303,6 +306,16 @@ struct dmub_rb_cmd_abm_set_pwm_frac {
struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
};
+struct dmub_cmd_abm_init_config_data {
+ union dmub_addr src;
+ uint16_t bytes;
+};
+
+struct dmub_rb_cmd_abm_init_config {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_init_config_data abm_init_config_data;
+};
+
union dmub_rb_cmd {
struct dmub_rb_cmd_read_modify_write read_modify_write;
struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq;
@@ -324,6 +337,7 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_set_level abm_set_level;
struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
+ struct dmub_rb_cmd_abm_init_config abm_init_config;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index d37535d21928..e42de9ded275 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,17 +32,16 @@
*/
enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_SET_VERSION = 0,
- DMUB_CMD__PSR_COPY_SETTINGS = 1,
- DMUB_CMD__PSR_ENABLE = 2,
- DMUB_CMD__PSR_DISABLE = 3,
- DMUB_CMD__PSR_SET_LEVEL = 4,
+ DMUB_CMD__PSR_SET_VERSION = 0,
+ DMUB_CMD__PSR_COPY_SETTINGS = 1,
+ DMUB_CMD__PSR_ENABLE = 2,
+ DMUB_CMD__PSR_DISABLE = 3,
+ DMUB_CMD__PSR_SET_LEVEL = 4,
};
enum psr_version {
- PSR_VERSION_1 = 0x10, // PSR Version 1
- PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
- PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+ PSR_VERSION_1 = 0,
+ PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
enum dmub_cmd_abm_type {
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
index df875fdd2ab0..2ae48c18bb5b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
@@ -33,8 +33,6 @@
extern "C" {
#endif
-struct dmub_cmd_header;
-
struct dmub_rb_init_params {
void *ctx;
void *base_address;
@@ -71,7 +69,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
}
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
- const struct dmub_cmd_header *cmd)
+ const union dmub_rb_cmd *cmd)
{
uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
const uint64_t *src = (const uint64_t *)cmd;
@@ -93,7 +91,7 @@ static inline bool dmub_rb_push_front(struct dmub_rb *rb,
}
static inline bool dmub_rb_front(struct dmub_rb *rb,
- struct dmub_cmd_header *cmd)
+ union dmub_rb_cmd *cmd)
{
uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
index 41d524b0db2f..bed5b023a396 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
@@ -49,6 +49,12 @@ extern "C" {
#define dmub_udelay(microseconds) udelay(microseconds)
#endif
+/* Maximum number of streams on any ASIC. */
+#define DMUB_MAX_STREAMS 6
+
+/* Maximum number of planes on any ASIC. */
+#define DMUB_MAX_PLANES 6
+
union dmub_addr {
struct {
uint32_t low_part;
@@ -57,6 +63,11 @@ union dmub_addr {
uint64_t quad_part;
};
+struct dmub_psr_debug_flags {
+ uint8_t visual_confirm : 1;
+ uint8_t reserved : 7;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 63bb9e2c81de..edc73d6d7ba2 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -23,7 +23,7 @@
*
*/
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn20.h"
@@ -186,14 +186,22 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
- dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
-
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
- REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
- REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
- DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
- DMCUB_REGION3_CW2_ENABLE, 1);
+ if (cw2->region.base != cw2->region.top) {
+ dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset,
+ &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
+ REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
+ DMCUB_REGION3_CW2_ENABLE, 1);
+ } else {
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_TOP_ADDRESS, 0);
+ }
dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
index 5bed9fcd6b5c..e8f488232e34 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -23,7 +23,7 @@
*
*/
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn21.h"
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
index 4094eca212f0..ca0c8a54b635 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
@@ -24,7 +24,7 @@
*/
#include "dmub_reg.h"
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
struct dmub_reg_value_masks {
uint32_t value;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index ce32cc7933c4..0e3751d94cb0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -23,7 +23,7 @@
*
*/
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
#include "dmub_dcn20.h"
#include "dmub_dcn21.h"
#include "dmub_fw_meta.h"
@@ -70,7 +70,7 @@ static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
return (val + factor - 1) / factor * factor;
}
-static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+void dmub_flush_buffer_mem(const struct dmub_fb *fb)
{
const uint8_t *base = (const uint8_t *)fb->cpu_addr;
uint8_t buf[64];
@@ -91,18 +91,32 @@ static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
}
static const struct dmub_fw_meta_info *
-dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
+dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
{
const union dmub_fw_meta *meta;
+ const uint8_t *blob = NULL;
+ uint32_t blob_size = 0;
+ uint32_t meta_offset = 0;
+
+ if (params->fw_bss_data) {
+ /* Legacy metadata region. */
+ blob = params->fw_bss_data;
+ blob_size = params->bss_data_size;
+ meta_offset = DMUB_FW_META_OFFSET;
+ } else if (params->fw_inst_const) {
+ /* Combined metadata region. */
+ blob = params->fw_inst_const;
+ blob_size = params->inst_const_size;
+ meta_offset = 0;
+ }
- if (fw_bss_data == NULL)
+ if (!blob || !blob_size)
return NULL;
- if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET)
+ if (blob_size < sizeof(union dmub_fw_meta) + meta_offset)
return NULL;
- meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size -
- DMUB_FW_META_OFFSET -
+ meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset -
sizeof(union dmub_fw_meta));
if (meta->info.magic_value != DMUB_FW_META_MAGIC)
@@ -247,8 +261,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
mail->base = dmub_align(bios->top, 256);
mail->top = mail->base + DMUB_MAILBOX_SIZE;
- fw_info = dmub_get_fw_meta_info(params->fw_bss_data,
- params->bss_data_size);
+ fw_info = dmub_get_fw_meta_info(params);
if (fw_info) {
fw_state_size = fw_info->fw_region_size;
@@ -449,7 +462,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
}
enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd)
+ const union dmub_rb_cmd *cmd)
{
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h
index f31e6befc8d6..42229b4effdc 100644
--- a/drivers/gpu/drm/amd/display/include/hdcp_types.h
+++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h
@@ -83,6 +83,12 @@ enum hdcp_link {
HDCP_LINK_SECONDARY
};
+enum hdcp_message_status {
+ HDCP_MESSAGE_SUCCESS,
+ HDCP_MESSAGE_FAILURE,
+ HDCP_MESSAGE_UNSUPPORTED
+};
+
struct hdcp_protection_message {
enum hdcp_version version;
/* relevant only for DVI */
@@ -91,6 +97,7 @@ struct hdcp_protection_message {
uint32_t length;
uint8_t max_retries;
uint8_t *data;
+ enum hdcp_message_status status;
};
#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 6e008de25629..02c23b04d34b 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,8 +40,6 @@ struct dc_state;
*
*/
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
-
void pre_surface_trace(
struct dc *dc,
const struct dc_plane_state *const *plane_states,
@@ -102,14 +100,12 @@ void context_clock_trace(
#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
do { \
(void)(link); \
- dc_conn_log_hex_linux(hex_data, hex_len); \
DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
} while (0)
#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
do { \
(void)(link); \
- dc_conn_log_hex_linux(hex_data, hex_len); \
DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index cac09d500fda..9431b48aecb4 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1782,7 +1782,8 @@ rgb_user_alloc_fail:
return ret;
}
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+ struct dc_transfer_func *input_tf,
const struct dc_gamma *ramp, bool mapUserRamp)
{
struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
@@ -1801,11 +1802,29 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
/* we can use hardcoded curve for plain SRGB TF
* If linear, it's bypass if on user ramp
*/
- if (input_tf->type == TF_TYPE_PREDEFINED &&
- (input_tf->tf == TRANSFER_FUNCTION_SRGB ||
- input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
- !mapUserRamp)
- return true;
+ if (input_tf->type == TF_TYPE_PREDEFINED) {
+ if ((input_tf->tf == TRANSFER_FUNCTION_SRGB ||
+ input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
+ !mapUserRamp)
+ return true;
+
+ if (dc_caps != NULL &&
+ dc_caps->dpp.dcn_arch == 1) {
+
+ if (input_tf->tf == TRANSFER_FUNCTION_PQ &&
+ dc_caps->dpp.dgam_rom_caps.pq == 1)
+ return true;
+
+ if (input_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
+ dc_caps->dpp.dgam_rom_caps.gamma2_2 == 1)
+ return true;
+
+ // HLG OOTF not accounted for
+ if (input_tf->tf == TRANSFER_FUNCTION_HLG &&
+ dc_caps->dpp.dgam_rom_caps.hlg == 1)
+ return true;
+ }
+ }
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
@@ -1902,7 +1921,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
- if (ramp->type == GAMMA_CUSTOM)
+ if (ramp && ramp->type == GAMMA_CUSTOM)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 9994817a9a03..7f56226ba77a 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -30,6 +30,7 @@ struct dc_transfer_func;
struct dc_gamma;
struct dc_transfer_func_distributed_points;
struct dc_rgb_fixed;
+struct dc_color_caps;
enum dc_transfer_func_predefined;
/* For SetRegamma ADL interface support
@@ -100,7 +101,8 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
const struct freesync_hdr_tf_params *fs_params);
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+ struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index c33454a9e0b4..eb7421e83b86 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -443,7 +443,7 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync,
return true;
} else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED &&
in_vrr->fixed.target_refresh_in_uhz !=
- in_config->min_refresh_in_uhz) {
+ in_config->fixed_refresh_in_uhz) {
return true;
} else if (in_vrr->min_refresh_in_uhz != min_refresh_in_uhz) {
return true;
@@ -491,7 +491,7 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
return false;
}
-static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
+static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket)
{
/* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
@@ -523,14 +523,74 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
vrr->state == VRR_STATE_ACTIVE_FIXED)
infopacket->sb[6] |= 0x04;
+ // For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
/* PB7 = FreeSync Minimum refresh rate (Hz) */
- infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+ } else {
+ infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ }
/* PB8 = FreeSync Maximum refresh rate (Hz)
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ //FreeSync HDR
+ infopacket->sb[9] = 0;
+ infopacket->sb[10] = 0;
+}
+
+static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
+ struct dc_info_packet *infopacket)
+{
+ /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+ infopacket->sb[1] = 0x1A;
+
+ /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+ infopacket->sb[2] = 0x00;
+
+ /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+ infopacket->sb[3] = 0x00;
+
+ /* PB4 = Reserved */
+
+ /* PB5 = Reserved */
+
+ /* PB6 = [Bits 7:3 = Reserved] */
+
+ /* PB6 = [Bit 0 = FreeSync Supported] */
+ if (vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x01;
+
+ /* PB6 = [Bit 1 = FreeSync Enabled] */
+ if (vrr->state != VRR_STATE_DISABLED &&
+ vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x02;
+
+ /* PB6 = [Bit 2 = FreeSync Active] */
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ vrr->state == VRR_STATE_ACTIVE_FIXED)
+ infopacket->sb[6] |= 0x04;
+
+ if (vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+ /* PB8 = FreeSync Maximum refresh rate (Hz) */
+ infopacket->sb[8] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+ } else if (vrr->state == VRR_STATE_ACTIVE_VARIABLE) {
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+ /* PB8 = FreeSync Maximum refresh rate (Hz) */
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ } else {
+ // Non-fs case, program nominal range
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ /* PB8 = FreeSync Maximum refresh rate (Hz) */
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ }
//FreeSync HDR
infopacket->sb[9] = 0;
@@ -678,7 +738,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
unsigned int payload_size = 0;
build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
- build_vrr_infopacket_data(vrr, infopacket);
+ build_vrr_infopacket_data_v1(vrr, infopacket);
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
@@ -692,7 +752,24 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
unsigned int payload_size = 0;
build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
- build_vrr_infopacket_data(vrr, infopacket);
+ build_vrr_infopacket_data_v1(vrr, infopacket);
+
+ build_vrr_infopacket_fs2_data(app_tf, infopacket);
+
+ build_vrr_infopacket_checksum(&payload_size, infopacket);
+
+ infopacket->valid = true;
+}
+
+static void build_vrr_infopacket_v3(enum signal_type signal,
+ const struct mod_vrr_params *vrr,
+ enum color_transfer_func app_tf,
+ struct dc_info_packet *infopacket)
+{
+ unsigned int payload_size = 0;
+
+ build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
+ build_vrr_infopacket_data_v3(vrr, infopacket);
build_vrr_infopacket_fs2_data(app_tf, infopacket);
@@ -717,11 +794,14 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
return;
switch (packet_type) {
- case PACKET_TYPE_FS2:
+ case PACKET_TYPE_FS_V3:
+ build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
+ break;
+ case PACKET_TYPE_FS_V2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
break;
case PACKET_TYPE_VRR:
- case PACKET_TYPE_FS1:
+ case PACKET_TYPE_FS_V1:
default:
build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
}
@@ -793,6 +873,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
calc_duration_in_us_from_refresh_in_uhz(
(unsigned int)max_refresh_in_uhz);
+ if (in_config->state == VRR_STATE_ACTIVE_FIXED)
+ in_out_vrr->fixed_refresh_in_uhz = in_config->fixed_refresh_in_uhz;
+ else
+ in_out_vrr->fixed_refresh_in_uhz = 0;
+
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
@@ -843,7 +928,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->min_refresh_in_uhz);
} else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
in_out_vrr->fixed.target_refresh_in_uhz =
- in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->fixed_refresh_in_uhz;
if (in_out_vrr->fixed.ramping_active &&
in_out_vrr->fixed.fixed_active) {
/* Do not update vtotals if ramping is already active
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index cc1d3f470b99..e9fbd94f8635 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -328,7 +328,8 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
/* add display to connection */
hdcp->connection.link = *link;
*display_container = *display;
- status = mod_hdcp_add_display_to_topology(hdcp, display->index);
+ status = mod_hdcp_add_display_to_topology(hdcp, display_container);
+
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
@@ -374,7 +375,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
status = mod_hdcp_remove_display_from_topology(hdcp, index);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ memset(display, 0, sizeof(struct mod_hdcp_display));
/* request authentication when connection is not reset */
if (current_state(hdcp) != HDCP_UNINITIALIZED)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 5cb4546be0ef..b0cefed2eb02 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
/* psp functions */
enum mod_hdcp_status mod_hdcp_add_display_to_topology(
- struct mod_hdcp *hdcp, uint8_t index);
+ struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
@@ -357,8 +357,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(
struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
- enum mod_hdcp_encryption_status *encryption_status);
/* ddc functions */
enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
@@ -503,11 +501,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
}
-static inline uint8_t is_display_added(struct mod_hdcp_display *display)
-{
- return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-}
-
static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
{
return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -515,34 +508,23 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
{
- uint8_t added_count = 0;
+ uint8_t active_count = 0;
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
if (is_display_active(&hdcp->displays[i]))
- added_count++;
- return added_count;
-}
-
-static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
-{
- uint8_t added_count = 0;
- uint8_t i;
-
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->displays[i]))
- added_count++;
- return added_count;
+ active_count++;
+ return active_count;
}
-static inline struct mod_hdcp_display *get_first_added_display(
+static inline struct mod_hdcp_display *get_first_active_display(
struct mod_hdcp *hdcp)
{
uint8_t i;
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->displays[i])) {
+ if (is_display_active(&hdcp->displays[i])) {
display = &hdcp->displays[i];
break;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 37c8c05497d6..f244b72e74e0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 491c00f48026..549c113abcf7 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 44956f9ba178..fb6a19d020f9 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -98,8 +98,8 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
- case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
- return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
@@ -158,8 +158,8 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
- case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION:
- return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index c2929815c3ee..fb1161dd7ea8 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -51,12 +51,15 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct ta_dtm_shared_memory *dtm_cmd;
struct mod_hdcp_display *display =
get_active_display_at_index(hdcp, index);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- if (!display || !is_display_added(display))
+ if (!display || !is_display_active(display))
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ mutex_lock(&psp->dtm_context.mutex);
+
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -66,34 +69,33 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ } else {
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ }
- display->state = MOD_HDCP_DISPLAY_ACTIVE;
- HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
-
- return MOD_HDCP_STATUS_SUCCESS;
-
+ mutex_unlock(&psp->dtm_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
- uint8_t index)
+ struct mod_hdcp_display *display)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display =
- get_active_display_at_index(hdcp, index);
struct mod_hdcp_link *link = &hdcp->connection.link;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
- if (!display || is_display_added(display))
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+ mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -113,21 +115,24 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
- display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ } else {
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->dtm_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
@@ -135,6 +140,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
}
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ mutex_lock(&psp->hdcp_context.mutex);
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
@@ -144,16 +151,18 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
-
- hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
- memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
- sizeof(hdcp->auth.msg.hdcp1.aksv));
- memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
- sizeof(hdcp->auth.msg.hdcp1.an));
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+ } else {
+ hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+ memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+ memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
@@ -162,7 +171,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
uint8_t i = 0;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -171,27 +182,30 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
-
- HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_encryption_enabled(
- &hdcp->displays[i])) {
- hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_HDCP1_DISABLED_TRACE(hdcp,
- hdcp->displays[i].index);
- }
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+ } else {
+ HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(&hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP1_DISABLED_TRACE(
+ hdcp, hdcp->displays[i].index);
+ }
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -206,10 +220,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
- if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
/* needs second part of authentication */
hdcp->connection.is_repeater = 1;
@@ -219,20 +232,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
hdcp->connection.is_hdcp1_revoked = 1;
- return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
+ status = MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
} else
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -241,14 +256,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
-
- if (!is_dp_mst_hdcp(hdcp)) {
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE;
+ } else if (!is_dp_mst_hdcp(hdcp)) {
display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
}
- return MOD_HDCP_STATUS_SUCCESS;
+
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
@@ -257,6 +273,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -287,6 +304,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
}
+ mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
@@ -296,14 +314,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
int i = 0;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->displays[i].adjust.disable)
- continue;
+ if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+ continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -313,21 +332,26 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+ break;
+ }
hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -339,12 +363,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+ hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level != 1)
+ status = MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
- return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
- ? MOD_HDCP_STATUS_SUCCESS
- : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
@@ -364,19 +388,23 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
return MOD_HDCP_STATUS_FAILURE;
}
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
- memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ mutex_lock(&psp->hdcp_context.mutex);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
@@ -393,12 +421,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
- hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
+ else
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
@@ -406,7 +436,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
uint8_t i = 0;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -415,20 +447,21 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
-
- HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_encryption_enabled(
- &hdcp->displays[i])) {
- hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_HDCP2_DISABLED_TRACE(hdcp,
- hdcp->displays[i].index);
- }
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
+ } else {
+ HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(&hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP2_DISABLED_TRACE(
+ hdcp, hdcp->displays[i].index);
+ }
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
@@ -437,7 +470,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -452,12 +487,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
-
- memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.ake_init));
+ status = MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
+ else
+ memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_init));
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
@@ -466,7 +502,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -488,26 +526,32 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
-
- memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
- &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
- sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
-
- if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
- hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
- hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
- return MOD_HDCP_STATUS_SUCCESS;
- } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
- hdcp->connection.is_hdcp2_revoked = 1;
- return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+ } else {
+ memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+
+ memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+
+ if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored =
+ msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater =
+ msg_out->process.is_repeater ? 1 : 0;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+ }
}
-
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -516,7 +560,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -543,16 +589,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
-
- if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+ else if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
else if (!hdcp->connection.is_km_stored &&
- msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
-
+ msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
@@ -561,7 +606,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -577,12 +624,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.lc_init));
+ status = MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
+ else
+ memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.lc_init));
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
@@ -591,7 +639,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -610,13 +660,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
-
- if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+ msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
@@ -625,7 +674,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -642,48 +693,55 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.ske_eks));
- msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks);
-
- if (is_dp_hdcp(hdcp)) {
- memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
- &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
- sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
+ } else {
+ memcpy(hdcp->auth.msg.hdcp2.ske_eks,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+ msg_out->prepare.msg1_desc.msg_size =
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks);
+
+ if (is_dp_hdcp(hdcp)) {
+ memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+ }
}
+ mutex_unlock(&psp->hdcp_context.mutex);
- return MOD_HDCP_STATUS_SUCCESS;
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
-
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
- memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ mutex_lock(&psp->hdcp_context.mutex);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
-
- if (!is_dp_mst_hdcp(hdcp)) {
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
+ } else if (!is_dp_mst_hdcp(hdcp)) {
display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index);
}
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
@@ -692,6 +750,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -712,23 +773,26 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
-
- if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
- hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
- hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
- return MOD_HDCP_STATUS_SUCCESS;
- } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
- hdcp->connection.is_hdcp2_revoked = 1;
- return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+ } else {
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+
+ if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+ }
}
-
-
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -737,7 +801,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
uint8_t i;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -747,9 +813,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->displays[i].adjust.disable)
- continue;
+ if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+ continue;
+
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
@@ -763,8 +829,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
- return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
- : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION;
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp)
@@ -774,7 +845,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -789,15 +862,17 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
-
- hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
-
- memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
+ } else {
+ hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
- return MOD_HDCP_STATUS_SUCCESS;
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+ }
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
@@ -806,7 +881,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -825,38 +902,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) &&
- (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- ? MOD_HDCP_STATUS_SUCCESS
- : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
-}
-
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
- enum mod_hdcp_encryption_status *encryption_status)
-{
- struct psp_context *psp = hdcp->config.psp.handle;
- struct ta_hdcp_shared_memory *hdcp_cmd;
-
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
-
- memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
- hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id;
- hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
- hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
-
- psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
-
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_FAILURE;
-
- if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) {
- if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1)
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
- else
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
- }
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+ msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
+
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dbe7835aabcf..0ba3cf7f336a 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -83,6 +83,8 @@ struct mod_freesync_config {
bool btr;
unsigned int min_refresh_in_uhz;
unsigned int max_refresh_in_uhz;
+ unsigned int fixed_refresh_in_uhz;
+
};
struct mod_vrr_params_btr {
@@ -112,6 +114,7 @@ struct mod_vrr_params {
uint32_t max_duration_in_us;
uint32_t max_refresh_in_uhz;
uint32_t min_duration_in_us;
+ uint32_t fixed_refresh_in_uhz;
struct dc_crtc_timing_adjust adjust;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index c088602bc1a0..eed560eecbab 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -60,7 +60,7 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
@@ -90,7 +90,7 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
@@ -117,7 +117,6 @@ enum mod_hdcp_operation_mode {
enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_INACTIVE = 0,
MOD_HDCP_DISPLAY_ACTIVE,
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index fe2117904329..198c0e64d13a 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -40,8 +40,9 @@ enum color_transfer_func {
enum vrr_packet_type {
PACKET_TYPE_VRR,
- PACKET_TYPE_FS1,
- PACKET_TYPE_FS2,
+ PACKET_TYPE_FS_V1,
+ PACKET_TYPE_FS_V2,
+ PACKET_TYPE_FS_V3,
PACKET_TYPE_VTEM
};
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index cff3ab15fc0c..7cd8a43d1889 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -144,7 +144,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/*VSC packet set to 2 when DP revision >= 1.2*/
- if (stream->psr_version != 0)
+ if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e75a4bb94488..8c37bcc27132 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -24,6 +24,9 @@
#include "power_helpers.h"
#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc.h"
+#include "core_types.h"
#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
@@ -237,7 +240,7 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
}
static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters params,
- struct iram_table_v_2_2 *table)
+ struct iram_table_v_2_2 *table, bool big_endian)
{
unsigned int i;
unsigned int num_entries = NUM_BL_CURVE_SEGS;
@@ -261,10 +264,12 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
ASSERT(lut_index < params.backlight_lut_array_size);
- table->backlight_thresholds[i] =
- cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
- table->backlight_offsets[i] =
- cpu_to_be16(params.backlight_lut_array[lut_index]);
+ table->backlight_thresholds[i] = (big_endian) ?
+ cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)) :
+ cpu_to_le16(DIV_ROUNDUP((i * 65536), num_entries));
+ table->backlight_offsets[i] = (big_endian) ?
+ cpu_to_be16(params.backlight_lut_array[lut_index]) :
+ cpu_to_le16(params.backlight_lut_array[lut_index]);
}
}
@@ -584,18 +589,18 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->crgb_slope[7] = cpu_to_be16(0x1910);
fill_backlight_transform_table_v_2_2(
- params, ram_table);
+ params, ram_table, true);
}
-void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
{
unsigned int i, j;
unsigned int set = params.set;
ram_table->flags = 0x0;
-
- ram_table->min_abm_backlight =
- cpu_to_be16(params.min_abm_backlight);
+ ram_table->min_abm_backlight = (big_endian) ?
+ cpu_to_be16(params.min_abm_backlight) :
+ cpu_to_le16(params.min_abm_backlight);
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
@@ -619,33 +624,51 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->iir_curve[4] = 0x65;
//Gamma 2.2
- ram_table->crgb_thresh[0] = cpu_to_be16(0x127c);
- ram_table->crgb_thresh[1] = cpu_to_be16(0x151b);
- ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5);
- ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56);
- ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83);
- ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72);
- ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0);
- ram_table->crgb_thresh[7] = cpu_to_be16(0x232b);
- ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
- ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
- ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
- ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
- ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
- ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
- ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
- ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
- ram_table->crgb_slope[0] = cpu_to_be16(0x3609);
- ram_table->crgb_slope[1] = cpu_to_be16(0x2dfa);
- ram_table->crgb_slope[2] = cpu_to_be16(0x27ea);
- ram_table->crgb_slope[3] = cpu_to_be16(0x235d);
- ram_table->crgb_slope[4] = cpu_to_be16(0x2042);
- ram_table->crgb_slope[5] = cpu_to_be16(0x1dc3);
- ram_table->crgb_slope[6] = cpu_to_be16(0x1b1a);
- ram_table->crgb_slope[7] = cpu_to_be16(0x1910);
+ ram_table->crgb_thresh[0] = (big_endian) ? cpu_to_be16(0x127c) : cpu_to_le16(0x127c);
+ ram_table->crgb_thresh[1] = (big_endian) ? cpu_to_be16(0x151b) : cpu_to_le16(0x151b);
+ ram_table->crgb_thresh[2] = (big_endian) ? cpu_to_be16(0x17d5) : cpu_to_le16(0x17d5);
+ ram_table->crgb_thresh[3] = (big_endian) ? cpu_to_be16(0x1a56) : cpu_to_le16(0x1a56);
+ ram_table->crgb_thresh[4] = (big_endian) ? cpu_to_be16(0x1c83) : cpu_to_le16(0x1c83);
+ ram_table->crgb_thresh[5] = (big_endian) ? cpu_to_be16(0x1e72) : cpu_to_le16(0x1e72);
+ ram_table->crgb_thresh[6] = (big_endian) ? cpu_to_be16(0x20f0) : cpu_to_le16(0x20f0);
+ ram_table->crgb_thresh[7] = (big_endian) ? cpu_to_be16(0x232b) : cpu_to_le16(0x232b);
+ ram_table->crgb_offset[0] = (big_endian) ? cpu_to_be16(0x2999) : cpu_to_le16(0x2999);
+ ram_table->crgb_offset[1] = (big_endian) ? cpu_to_be16(0x3999) : cpu_to_le16(0x3999);
+ ram_table->crgb_offset[2] = (big_endian) ? cpu_to_be16(0x4666) : cpu_to_le16(0x4666);
+ ram_table->crgb_offset[3] = (big_endian) ? cpu_to_be16(0x5999) : cpu_to_le16(0x5999);
+ ram_table->crgb_offset[4] = (big_endian) ? cpu_to_be16(0x6333) : cpu_to_le16(0x6333);
+ ram_table->crgb_offset[5] = (big_endian) ? cpu_to_be16(0x7800) : cpu_to_le16(0x7800);
+ ram_table->crgb_offset[6] = (big_endian) ? cpu_to_be16(0x8c00) : cpu_to_le16(0x8c00);
+ ram_table->crgb_offset[7] = (big_endian) ? cpu_to_be16(0xa000) : cpu_to_le16(0xa000);
+ ram_table->crgb_slope[0] = (big_endian) ? cpu_to_be16(0x3609) : cpu_to_le16(0x3609);
+ ram_table->crgb_slope[1] = (big_endian) ? cpu_to_be16(0x2dfa) : cpu_to_le16(0x2dfa);
+ ram_table->crgb_slope[2] = (big_endian) ? cpu_to_be16(0x27ea) : cpu_to_le16(0x27ea);
+ ram_table->crgb_slope[3] = (big_endian) ? cpu_to_be16(0x235d) : cpu_to_le16(0x235d);
+ ram_table->crgb_slope[4] = (big_endian) ? cpu_to_be16(0x2042) : cpu_to_le16(0x2042);
+ ram_table->crgb_slope[5] = (big_endian) ? cpu_to_be16(0x1dc3) : cpu_to_le16(0x1dc3);
+ ram_table->crgb_slope[6] = (big_endian) ? cpu_to_be16(0x1b1a) : cpu_to_le16(0x1b1a);
+ ram_table->crgb_slope[7] = (big_endian) ? cpu_to_be16(0x1910) : cpu_to_le16(0x1910);
fill_backlight_transform_table_v_2_2(
- params, ram_table);
+ params, ram_table, big_endian);
+}
+
+bool dmub_init_abm_config(struct abm *abm,
+ struct dmcu_iram_parameters params)
+{
+ unsigned char ram_table[IRAM_SIZE];
+ bool result = false;
+
+ if (abm == NULL)
+ return false;
+
+ memset(&ram_table, 0, sizeof(ram_table));
+
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, false);
+ result = abm->funcs->init_abm_config(
+ abm, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+
+ return result;
}
bool dmcu_load_iram(struct dmcu *dmcu,
@@ -657,17 +680,17 @@ bool dmcu_load_iram(struct dmcu *dmcu,
if (dmcu == NULL)
return false;
- if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+ if (dmcu && !dmcu->funcs->is_dmcu_initialized(dmcu))
return true;
memset(&ram_table, 0, sizeof(ram_table));
if (dmcu->dmcu_version.abm_version == 0x24) {
- fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
- result = dmcu->funcs->load_iram(
- dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
+ result = dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
} else if (dmcu->dmcu_version.abm_version == 0x23) {
- fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
result = dmcu->funcs->load_iram(
dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index e54157026330..46fbca2e2cd1 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -26,6 +26,7 @@
#define MODULES_POWER_POWER_HELPERS_H_
#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
enum abm_defines {
@@ -44,5 +45,7 @@ struct dmcu_iram_parameters {
bool dmcu_load_iram(struct dmcu *dmcu,
struct dmcu_iram_parameters params);
+bool dmub_init_abm_config(struct abm *abm,
+ struct dmcu_iram_parameters params);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
deleted file mode 100644
index 03121ca64fe4..000000000000
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "mod_stats.h"
-#include "dm_services.h"
-#include "dc.h"
-#include "core_types.h"
-
-#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000000
-#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
-
-#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
-#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
-#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
-
-#define DAL_STATS_EVENT_ENTRIES_DEFAULT 0x00000100
-
-#define MOD_STATS_NUM_VSYNCS 5
-#define MOD_STATS_EVENT_STRING_MAX 512
-
-struct stats_time_cache {
- unsigned int entry_id;
-
- unsigned long flip_timestamp_in_ns;
- unsigned long vupdate_timestamp_in_ns;
-
- unsigned int render_time_in_us;
- unsigned int avg_render_time_in_us_last_ten;
- unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
- unsigned int num_vsync_between_flips;
-
- unsigned int flip_to_vsync_time_in_us;
- unsigned int vsync_to_flip_time_in_us;
-
- unsigned int min_window;
- unsigned int max_window;
- unsigned int v_total_min;
- unsigned int v_total_max;
- unsigned int event_triggers;
-
- unsigned int lfc_mid_point_in_us;
- unsigned int num_frames_inserted;
- unsigned int inserted_duration_in_us;
-
- unsigned int flags;
-};
-
-struct stats_event_cache {
- unsigned int entry_id;
- char event_string[MOD_STATS_EVENT_STRING_MAX];
-};
-
-struct core_stats {
- struct mod_stats public;
- struct dc *dc;
-
- bool enabled;
- unsigned int entries;
- unsigned int event_entries;
- unsigned int entry_id;
-
- struct stats_time_cache *time;
- unsigned int index;
-
- struct stats_event_cache *events;
- unsigned int event_index;
-
-};
-
-#define MOD_STATS_TO_CORE(mod_stats)\
- container_of(mod_stats, struct core_stats, public)
-
-bool mod_stats_init(struct mod_stats *mod_stats)
-{
- bool result = false;
- struct core_stats *core_stats = NULL;
- struct dc *dc = NULL;
-
- if (mod_stats == NULL)
- return false;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
- dc = core_stats->dc;
-
- return result;
-}
-
-struct mod_stats *mod_stats_create(struct dc *dc)
-{
- struct core_stats *core_stats = NULL;
- struct persistent_data_flag flag;
- unsigned int reg_data;
- int i = 0;
-
- if (dc == NULL)
- goto fail_construct;
-
- core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
-
- if (core_stats == NULL)
- goto fail_construct;
-
- core_stats->dc = dc;
-
- core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
- if (dm_read_persistent_data(dc->ctx, NULL, NULL,
- DAL_STATS_ENABLE_REGKEY,
- &reg_data, sizeof(unsigned int), &flag))
- core_stats->enabled = reg_data;
-
- if (core_stats->enabled) {
- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
- if (dm_read_persistent_data(dc->ctx, NULL, NULL,
- DAL_STATS_ENTRIES_REGKEY,
- &reg_data, sizeof(unsigned int), &flag)) {
- if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
- else
- core_stats->entries = reg_data;
- }
- core_stats->time = kcalloc(core_stats->entries,
- sizeof(struct stats_time_cache),
- GFP_KERNEL);
-
- if (core_stats->time == NULL)
- goto fail_construct_time;
-
- core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
- core_stats->events = kcalloc(core_stats->event_entries,
- sizeof(struct stats_event_cache),
- GFP_KERNEL);
-
- if (core_stats->events == NULL)
- goto fail_construct_events;
-
- } else {
- core_stats->entries = 0;
- }
-
- /* Purposely leave index 0 unused so we don't need special logic to
- * handle calculation cases that depend on previous flip data.
- */
- core_stats->index = 1;
- core_stats->event_index = 0;
-
- // Keeps track of ordering within the different stats structures
- core_stats->entry_id = 0;
-
- return &core_stats->public;
-
-fail_construct_events:
- kfree(core_stats->time);
-
-fail_construct_time:
- kfree(core_stats);
-
-fail_construct:
- return NULL;
-}
-
-void mod_stats_destroy(struct mod_stats *mod_stats)
-{
- if (mod_stats != NULL) {
- struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- kfree(core_stats->time);
- kfree(core_stats->events);
- kfree(core_stats);
- }
-}
-
-void mod_stats_dump(struct mod_stats *mod_stats)
-{
- struct dc *dc = NULL;
- struct dal_logger *logger = NULL;
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- struct stats_event_cache *events = NULL;
- unsigned int time_index = 1;
- unsigned int event_index = 0;
- unsigned int index = 0;
- struct log_entry log_entry;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
- dc = core_stats->dc;
- logger = dc->ctx->logger;
- time = core_stats->time;
- events = core_stats->events;
-
- DISPLAY_STATS_BEGIN(log_entry);
-
- DISPLAY_STATS("==Display Caps==\n");
-
- DISPLAY_STATS("==Display Stats==\n");
-
- DISPLAY_STATS("%10s %10s %10s %10s %10s"
- " %11s %11s %17s %10s %14s"
- " %10s %10s %10s %10s %10s"
- " %10s %10s %10s %10s\n",
- "render", "avgRender",
- "minWindow", "midPoint", "maxWindow",
- "vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
- "numFrame", "insertDuration",
- "vTotalMin", "vTotalMax", "eventTrigs",
- "vSyncTime1", "vSyncTime2", "vSyncTime3",
- "vSyncTime4", "vSyncTime5", "flags");
-
- for (int i = 0; i < core_stats->entry_id; i++) {
- if (event_index < core_stats->event_index &&
- i == events[event_index].entry_id) {
- DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
- event_index++;
- } else if (time_index < core_stats->index &&
- i == time[time_index].entry_id) {
- DISPLAY_STATS("%10u %10u %10u %10u %10u"
- " %11u %11u %17u %10u %14u"
- " %10u %10u %10u %10u %10u"
- " %10u %10u %10u %10u\n",
- time[time_index].render_time_in_us,
- time[time_index].avg_render_time_in_us_last_ten,
- time[time_index].min_window,
- time[time_index].lfc_mid_point_in_us,
- time[time_index].max_window,
- time[time_index].vsync_to_flip_time_in_us,
- time[time_index].flip_to_vsync_time_in_us,
- time[time_index].num_vsync_between_flips,
- time[time_index].num_frames_inserted,
- time[time_index].inserted_duration_in_us,
- time[time_index].v_total_min,
- time[time_index].v_total_max,
- time[time_index].event_triggers,
- time[time_index].v_sync_time_in_us[0],
- time[time_index].v_sync_time_in_us[1],
- time[time_index].v_sync_time_in_us[2],
- time[time_index].v_sync_time_in_us[3],
- time[time_index].v_sync_time_in_us[4],
- time[time_index].flags);
-
- time_index++;
- }
- }
-
- DISPLAY_STATS_END(log_entry);
-}
-
-void mod_stats_reset_data(struct mod_stats *mod_stats)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- memset(core_stats->time, 0,
- sizeof(struct stats_time_cache) * core_stats->entries);
-
- memset(core_stats->events, 0,
- sizeof(struct stats_event_cache) * core_stats->event_entries);
-
- core_stats->index = 1;
- core_stats->event_index = 0;
-
- // Keeps track of ordering within the different stats structures
- core_stats->entry_id = 0;
-}
-
-void mod_stats_update_event(struct mod_stats *mod_stats,
- char *event_string,
- unsigned int length)
-{
- struct core_stats *core_stats = NULL;
- struct stats_event_cache *events = NULL;
- unsigned int index = 0;
- unsigned int copy_length = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->event_index >= core_stats->event_entries)
- return;
-
- events = core_stats->events;
- index = core_stats->event_index;
-
- copy_length = length;
- if (length > MOD_STATS_EVENT_STRING_MAX)
- copy_length = MOD_STATS_EVENT_STRING_MAX;
-
- memcpy(&events[index].event_string, event_string, copy_length);
- events[index].event_string[copy_length - 1] = '\0';
-
- events[index].entry_id = core_stats->entry_id;
- core_stats->event_index++;
- core_stats->entry_id++;
-}
-
-void mod_stats_update_flip(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->index >= core_stats->entries)
- return;
-
- time = core_stats->time;
- index = core_stats->index;
-
- time[index].flip_timestamp_in_ns = timestamp_in_ns;
- time[index].render_time_in_us =
- (timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000;
-
- if (index >= 10) {
- for (unsigned int i = 0; i < 10; i++)
- time[index].avg_render_time_in_us_last_ten +=
- time[index - i].render_time_in_us;
- time[index].avg_render_time_in_us_last_ten /= 10;
- }
-
- if (time[index].num_vsync_between_flips > 0)
- time[index].vsync_to_flip_time_in_us =
- (timestamp_in_ns -
- time[index].vupdate_timestamp_in_ns) / 1000;
- else
- time[index].vsync_to_flip_time_in_us =
- (timestamp_in_ns -
- time[index - 1].vupdate_timestamp_in_ns) / 1000;
-
- time[index].entry_id = core_stats->entry_id;
- core_stats->index++;
- core_stats->entry_id++;
-}
-
-void mod_stats_update_vupdate(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
- unsigned int num_vsyncs = 0;
- unsigned int prev_vsync_in_ns = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->index >= core_stats->entries)
- return;
-
- time = core_stats->time;
- index = core_stats->index;
- num_vsyncs = time[index].num_vsync_between_flips;
-
- if (num_vsyncs < MOD_STATS_NUM_VSYNCS) {
- if (num_vsyncs == 0) {
- prev_vsync_in_ns =
- time[index - 1].vupdate_timestamp_in_ns;
-
- time[index].flip_to_vsync_time_in_us =
- (timestamp_in_ns -
- time[index - 1].flip_timestamp_in_ns) /
- 1000;
- } else {
- prev_vsync_in_ns =
- time[index].vupdate_timestamp_in_ns;
- }
-
- time[index].v_sync_time_in_us[num_vsyncs] =
- (timestamp_in_ns - prev_vsync_in_ns) / 1000;
- }
-
- time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
- time[index].num_vsync_between_flips++;
-}
-
-void mod_stats_update_freesync(struct mod_stats *mod_stats,
- unsigned int v_total_min,
- unsigned int v_total_max,
- unsigned int event_triggers,
- unsigned int window_min,
- unsigned int window_max,
- unsigned int lfc_mid_point_in_us,
- unsigned int inserted_frames,
- unsigned int inserted_duration_in_us)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->index >= core_stats->entries)
- return;
-
- time = core_stats->time;
- index = core_stats->index;
-
- time[index].v_total_min = v_total_min;
- time[index].v_total_max = v_total_max;
- time[index].event_triggers = event_triggers;
- time[index].min_window = window_min;
- time[index].max_window = window_max;
- time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
- time[index].num_frames_inserted = inserted_frames;
- time[index].inserted_duration_in_us = inserted_duration_in_us;
-}
-
diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
index 00f132f8ad55..61ee4be35d27 100644
--- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
+++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
@@ -112,9 +112,12 @@ uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
evict_vmids(core_vmid);
vmid = get_next_available_vmid(core_vmid);
- add_ptb_to_table(core_vmid, vmid, ptb);
+ if (vmid != -1) {
+ add_ptb_to_table(core_vmid, vmid, ptb);
- dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
+ dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
+ } else
+ ASSERT(0);
}
return vmid;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index d655a76bedc6..e98c84ef206f 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -40,6 +40,13 @@ enum amd_chip_flags {
AMD_EXP_HW_SUPPORT = 0x00080000UL,
};
+enum amd_apu_flags {
+ AMD_APU_IS_RAVEN = 0x00000001UL,
+ AMD_APU_IS_RAVEN2 = 0x00000002UL,
+ AMD_APU_IS_PICASSO = 0x00000004UL,
+ AMD_APU_IS_RENOIR = 0x00000008UL,
+};
+
enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_COMMON,
AMD_IP_BLOCK_TYPE_GMC,
@@ -150,6 +157,13 @@ enum DC_FEATURE_MASK {
DC_PSR_MASK = 0x8,
};
+enum DC_DEBUG_MASK {
+ DC_DISABLE_PIPE_SPLIT = 0x1,
+ DC_DISABLE_STUTTER = 0x2,
+ DC_DISABLE_DSC = 0x4,
+ DC_DISABLE_CLOCK_GATING = 0x8
+};
+
enum amd_dpm_forced_level;
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
index e7db6f9f9c86..8b0b9a2a8fed 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
@@ -5599,6 +5599,7 @@
#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
//GRBM_STATUS
#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
@@ -5619,6 +5620,7 @@
#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
@@ -5832,6 +5834,7 @@
#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
//GRBM_READ_ERROR2
#define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT 0x10
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
@@ -5847,6 +5850,7 @@
#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
#define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK 0x00010000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
index 68d0ffad28c7..92fd27c26a77 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
@@ -1162,8 +1162,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 0
#define mmRCC_CONFIG_RESERVED 0x0de4 // duplicate
#define mmRCC_CONFIG_RESERVED_BASE_IDX 0
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x0de5 // duplicate
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 0
+#endif
// addressBlock: syshub_mmreg_ind_syshubdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
index 435462294fbc..a7cd760ebf8f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
@@ -4251,8 +4251,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2
#define mmRCC_CONFIG_RESERVED 0x00c4
#define mmRCC_CONFIG_RESERVED_BASE_IDX 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+#endif
// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
index ce5830ebe095..0c5a08bc034a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
@@ -2687,8 +2687,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2
#define mmRCC_CONFIG_RESERVED 0x00c4
#define mmRCC_CONFIG_RESERVED_BASE_IDX 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+#endif
// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h
new file mode 100644
index 000000000000..e87c359ea1fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_OFFSET_HEADER
+#define _pwr_10_0_OFFSET_HEADER
+
+#define mmPWR_MISC_CNTL_STATUS 0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h
new file mode 100644
index 000000000000..8a000c21651c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_SH_MASK_HEADER
+#define _pwr_10_0_SH_MASK_HEADER
+
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h
new file mode 100644
index 000000000000..9bf73284ad73
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_OFFSET_HEADER
+#define _smuio_12_0_0_OFFSET_HEADER
+
+#define mmSMUIO_GFX_MISC_CNTL 0x00c8
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+
+#define mmPWR_MISC_CNTL_STATUS 0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h
new file mode 100644
index 000000000000..26556fa3d054
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_SH_MASK_HEADER
+#define _smuio_12_0_0_SH_MASK_HEADER
+
+//SMUIO_GFX_MISC_CNTL
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 70146518174c..b36ea8340afa 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -972,11 +972,13 @@ struct atom_ext_display_path
};
//usCaps
-enum ext_display_path_cap_def
-{
- EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE =0x0001,
- EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN =0x0002,
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK =0x007C,
+enum ext_display_path_cap_def {
+ EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE = 0x0001,
+ EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = 0x0002,
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007C,
+ EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x01 << 2), //PI redriver chip
+ EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x02 << 2), //TI retimer chip
+ EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x03 << 2) //Parade DP->HDMI recoverter chip
};
struct atom_external_display_connection_info
@@ -1876,6 +1878,108 @@ struct atom_smc_dpm_info_v4_6
uint32_t boardreserved[10];
};
+struct atom_smc_dpm_info_v4_7
+{
+ struct atom_common_table_header table_header;
+ // SECTION: BOARD PARAMETERS
+ // I2C Control
+ struct smudpm_i2c_controller_config_v2 I2cControllers[8];
+
+ // SVI2 Board Parameters
+ uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+ uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+
+ uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields
+
+ uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+ uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+ uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN)
+ uint8_t Padding8_V;
+
+ // Telemetry Settings
+ uint16_t GfxMaxCurrent; // in Amps
+ uint8_t GfxOffset; // in Amps
+ uint8_t Padding_TelemetryGfx;
+ uint16_t SocMaxCurrent; // in Amps
+ uint8_t SocOffset; // in Amps
+ uint8_t Padding_TelemetrySoc;
+
+ uint16_t Mem0MaxCurrent; // in Amps
+ uint8_t Mem0Offset; // in Amps
+ uint8_t Padding_TelemetryMem0;
+
+ uint16_t Mem1MaxCurrent; // in Amps
+ uint8_t Mem1Offset; // in Amps
+ uint8_t Padding_TelemetryMem1;
+
+ // GPIO Settings
+ uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching
+ uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching
+ uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event
+ uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event
+
+ uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event
+ uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event
+ uint8_t GthrGpio; // GPIO pin configured for GTHR Event
+ uint8_t GthrPolarity; // replace GPIO polarity for GTHR
+
+ // LED Display Settings
+ uint8_t LedPin0; // GPIO number for LedPin[0]
+ uint8_t LedPin1; // GPIO number for LedPin[1]
+ uint8_t LedPin2; // GPIO number for LedPin[2]
+ uint8_t padding8_4;
+
+ // GFXCLK PLL Spread Spectrum
+ uint8_t PllGfxclkSpreadEnabled; // on or off
+ uint8_t PllGfxclkSpreadPercent; // Q4.4
+ uint16_t PllGfxclkSpreadFreq; // kHz
+
+ // GFXCLK DFLL Spread Spectrum
+ uint8_t DfllGfxclkSpreadEnabled; // on or off
+ uint8_t DfllGfxclkSpreadPercent; // Q4.4
+ uint16_t DfllGfxclkSpreadFreq; // kHz
+
+ // UCLK Spread Spectrum
+ uint8_t UclkSpreadEnabled; // on or off
+ uint8_t UclkSpreadPercent; // Q4.4
+ uint16_t UclkSpreadFreq; // kHz
+
+ // SOCCLK Spread Spectrum
+ uint8_t SoclkSpreadEnabled; // on or off
+ uint8_t SocclkSpreadPercent; // Q4.4
+ uint16_t SocclkSpreadFreq; // kHz
+
+ // Total board power
+ uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
+ uint16_t BoardPadding;
+
+ // Mvdd Svi2 Div Ratio Setting
+ uint32_t MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16)
+
+ // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence
+ uint8_t GpioI2cScl; // Serial Clock
+ uint8_t GpioI2cSda; // Serial Data
+ uint16_t GpioPadding;
+
+ // Additional LED Display Settings
+ uint8_t LedPin3; // GPIO number for LedPin[3] - PCIE GEN Speed
+ uint8_t LedPin4; // GPIO number for LedPin[4] - PMFW Error Status
+ uint16_t LedEnableMask;
+
+ // Power Limit Scalars
+ uint8_t PowerLimitScalar[4]; //[PPT_THROTTLER_COUNT]
+
+ uint8_t MvddUlvPhaseSheddingMask;
+ uint8_t VddciUlvPhaseSheddingMask;
+ uint8_t Padding8_Psi1;
+ uint8_t Padding8_Psi2;
+
+ uint32_t BoardReserved[5];
+};
+
/*
***************************************************************************
Data Table asic_profiling_info structure
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index a69deb3a2ac0..60a6536ff656 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -32,7 +32,6 @@ struct cgs_device;
* enum cgs_ind_reg - Indirect register spaces
*/
enum cgs_ind_reg {
- CGS_IND_REG__MMIO,
CGS_IND_REG__PCIE,
CGS_IND_REG__SMC,
CGS_IND_REG__UVD_CTX,
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index e4e5a53b2b4e..7e6dcdf7df73 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -50,6 +50,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(&hwmgr->smu_lock);
+ mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
hwmgr->feature_mask = adev->pm.pp_feature;
@@ -64,6 +65,8 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ mutex_destroy(&hwmgr->msg_lock);
+
kfree(hwmgr->hardcode_pp_table);
hwmgr->hardcode_pp_table = NULL;
@@ -319,12 +322,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true;
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index e8b27fab6aa1..8c684a6e0156 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -62,6 +62,7 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
size_t size = 0;
int ret = 0, i = 0;
uint32_t feature_mask[2] = { 0 };
@@ -70,6 +71,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
@@ -110,9 +114,6 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
uint32_t feature_low = 0, feature_high = 0;
int ret = 0;
- if (!smu->pm_enabled)
- return ret;
-
feature_low = (feature_mask >> 0 ) & 0xffffffff;
feature_high = (feature_mask >> 32) & 0xffffffff;
@@ -155,6 +156,10 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
uint64_t feature_2_enabled = 0;
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
mutex_lock(&smu->mutex);
@@ -191,16 +196,31 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
if (!if_version && !smu_version)
return -EINVAL;
+ if (smu->smc_fw_if_version && smu->smc_fw_version)
+ {
+ if (if_version)
+ *if_version = smu->smc_fw_if_version;
+
+ if (smu_version)
+ *smu_version = smu->smc_fw_version;
+
+ return 0;
+ }
+
if (if_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
if (ret)
return ret;
+
+ smu->smc_fw_if_version = *if_version;
}
if (smu_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
if (ret)
return ret;
+
+ smu->smc_fw_version = *smu_version;
}
return ret;
@@ -327,13 +347,13 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
- param, &param);
+ param, value);
if (ret)
return ret;
/* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
* now, we un-support it */
- *value = param & 0x7fffffff;
+ *value = *value & 0x7fffffff;
return ret;
}
@@ -417,8 +437,12 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
ret = smu_dpm_set_uvd_enable(smu, !gate);
@@ -511,7 +535,6 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
int table_id = smu_table_get_index(smu, table_index);
uint32_t table_size;
int ret = 0;
-
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
@@ -547,12 +570,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false;
else if (adev->asic_type >= CHIP_ARCTURUS) {
- if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
- return false;
- else
+ if (amdgpu_sriov_is_pp_one_vf(adev) || !amdgpu_sriov_vf(adev))
return true;
- } else
- return false;
+ }
+ return false;
}
bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
@@ -569,8 +590,12 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
uint32_t powerplay_table_size;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
@@ -591,11 +616,13 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table)
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
int ret = 0;
- if (!smu->pm_enabled)
+ if (!adev->pm.dpm_enabled)
return -EINVAL;
+
if (header->usStructureSize != size) {
pr_err("pp table size not matched !\n");
return -EIO;
@@ -636,8 +663,6 @@ int smu_feature_init_dpm(struct smu_context *smu)
int ret = 0;
uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
- if (!smu->pm_enabled)
- return ret;
mutex_lock(&feature->mutex);
bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
@@ -665,7 +690,6 @@ int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
if (smu->is_apu)
return 1;
-
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
return 0;
@@ -932,13 +956,6 @@ static int smu_sw_init(void *handle)
return ret;
}
- if (adev->smu.ppt_funcs->i2c_eeprom_init) {
- ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
-
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -948,9 +965,6 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
- if (adev->smu.ppt_funcs->i2c_eeprom_fini)
- smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
-
kfree(smu->irq_source);
smu->irq_source = NULL;
@@ -1323,6 +1337,9 @@ static int smu_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
ret = smu_start_smc_engine(smu);
if (ret) {
pr_err("SMU is not ready yet!\n");
@@ -1336,9 +1353,6 @@ static int smu_hw_init(void *handle)
smu_set_gfx_cgpg(&adev->smu, true);
}
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
if (!smu->pm_enabled)
return 0;
@@ -1366,10 +1380,11 @@ static int smu_hw_init(void *handle)
if (ret)
goto failed;
- if (!smu->pm_enabled)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+ if (ret)
+ goto failed;
+
+ adev->pm.dpm_enabled = true;
pr_info("SMU is initialized successfully!\n");
@@ -1381,6 +1396,9 @@ failed:
static int smu_stop_dpms(struct smu_context *smu)
{
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
return smu_system_features_control(smu, false);
}
@@ -1403,6 +1421,10 @@ static int smu_hw_fini(void *handle)
if (!smu->pm_enabled)
return 0;
+ adev->pm.dpm_enabled = false;
+
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
if (!amdgpu_sriov_vf(adev)){
ret = smu_stop_thermal_control(smu);
if (ret) {
@@ -1476,7 +1498,7 @@ static int smu_disable_dpm(struct smu_context *smu)
bool use_baco = !smu->is_apu &&
((adev->in_gpu_reset &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
- (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
+ ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
ret = smu_get_smc_version(smu, NULL, &smu_version);
if (ret) {
@@ -1542,6 +1564,10 @@ static int smu_suspend(void *handle)
if (!smu->pm_enabled)
return 0;
+ adev->pm.dpm_enabled = false;
+
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
if(!amdgpu_sriov_vf(adev)) {
ret = smu_disable_dpm(smu);
if (ret)
@@ -1587,11 +1613,17 @@ static int smu_resume(void *handle)
if (ret)
goto failed;
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+ if (ret)
+ goto failed;
+
if (smu->is_apu)
smu_set_gfx_cgpg(&adev->smu, true);
smu->disable_uclk_switch = 0;
+ adev->pm.dpm_enabled = true;
+
pr_info("SMU is resumed successfully!\n");
return 0;
@@ -1603,10 +1635,14 @@ failed:
int smu_display_configuration_change(struct smu_context *smu,
const struct amd_pp_display_configuration *display_config)
{
+ struct amdgpu_device *adev = smu->adev;
int index = 0;
int num_of_active_display = 0;
- if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
+ if (!is_support_sw_smu(smu->adev))
return -EINVAL;
if (!display_config)
@@ -1668,12 +1704,16 @@ int smu_get_current_clocks(struct smu_context *smu,
struct amd_pp_clock_info *clocks)
{
struct amd_pp_simple_clock_info simple_clocks = {0};
+ struct amdgpu_device *adev = smu->adev;
struct smu_clock_info hw_clocks;
int ret = 0;
if (!is_support_sw_smu(smu->adev))
return -EINVAL;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
smu_get_dal_power_level(smu, &simple_clocks);
@@ -1736,7 +1776,7 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1744,12 +1784,12 @@ static int smu_enable_umd_pstate(void *handle,
if (*level & profile_mode_mask) {
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
smu_dpm_ctx->enable_umd_pstate = true;
- amdgpu_device_ip_set_clockgating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -1778,9 +1818,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
long workload;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->pm_enabled)
- return -EINVAL;
-
if (!skip_display_settings) {
ret = smu_display_config_changed(smu);
if (ret) {
@@ -1831,8 +1868,12 @@ int smu_handle_task(struct smu_context *smu,
enum amd_pp_task task_id,
bool lock_needed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (lock_needed)
mutex_lock(&smu->mutex);
@@ -1866,10 +1907,11 @@ int smu_switch_power_profile(struct smu_context *smu,
bool en)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
long workload;
uint32_t index;
- if (!smu->pm_enabled)
+ if (!adev->pm.dpm_enabled)
return -EINVAL;
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
@@ -1900,8 +1942,12 @@ int smu_switch_power_profile(struct smu_context *smu,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
enum amd_dpm_forced_level level;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
@@ -1915,8 +1961,12 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
@@ -1939,8 +1989,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
int smu_set_display_count(struct smu_context *smu, uint32_t count)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
ret = smu_init_display_count(smu, count);
mutex_unlock(&smu->mutex);
@@ -1954,8 +2008,12 @@ int smu_force_clk_levels(struct smu_context *smu,
bool lock_needed)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("force clock level is for dpm manual mode only.\n");
return -EINVAL;
@@ -1973,20 +2031,19 @@ int smu_force_clk_levels(struct smu_context *smu,
return ret;
}
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ * However, the mp1 state setting should still be granted
+ * even if the dpm_enabled cleared.
+ */
int smu_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
uint16_t msg;
int ret;
- /*
- * The SMC is not fully ready. That may be
- * expected as the IP may be masked.
- * So, just return without error.
- */
- if (!smu->pm_enabled)
- return 0;
-
mutex_lock(&smu->mutex);
switch (mp1_state) {
@@ -2023,15 +2080,11 @@ int smu_set_mp1_state(struct smu_context *smu,
int smu_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /*
- * The SMC is not fully ready. That may be
- * expected as the IP may be masked.
- * So, just return without error.
- */
- if (!smu->pm_enabled)
- return 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
return 0;
@@ -2047,6 +2100,28 @@ int smu_set_df_cstate(struct smu_context *smu,
return ret;
}
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
+ if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
+ if (ret)
+ pr_err("[AllowXgmiPowerDown] failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
int smu_write_watermarks_table(struct smu_context *smu)
{
void *watermarks_table = smu->smu_table.watermarks_table;
@@ -2065,6 +2140,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
{
void *table = smu->smu_table.watermarks_table;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
if (!table)
return -EINVAL;
@@ -2089,8 +2168,12 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
int smu_set_ac_dc(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
/* controlled by firmware */
if (smu->dc_controlled_by_gpio)
return 0;
@@ -2149,8 +2232,12 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
int smu_load_microcode(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->load_microcode)
@@ -2163,8 +2250,12 @@ int smu_load_microcode(struct smu_context *smu)
int smu_check_fw_status(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->check_fw_status)
@@ -2191,8 +2282,12 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_speed_rpm)
@@ -2208,10 +2303,15 @@ int smu_get_power_limit(struct smu_context *smu,
bool def,
bool lock_needed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (lock_needed)
+ if (lock_needed) {
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
+ }
if (smu->ppt_funcs->get_power_limit)
ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
@@ -2224,8 +2324,12 @@ int smu_get_power_limit(struct smu_context *smu,
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_power_limit)
@@ -2238,8 +2342,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->print_clk_levels)
@@ -2252,8 +2360,12 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch
int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_od_percentage)
@@ -2266,8 +2378,12 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_od_percentage)
@@ -2282,8 +2398,12 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->od_edit_dpm_table)
@@ -2298,8 +2418,12 @@ int smu_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->read_sensor)
@@ -2312,8 +2436,12 @@ int smu_read_sensor(struct smu_context *smu,
int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_power_profile_mode)
@@ -2329,8 +2457,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
uint32_t param_size,
bool lock_needed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (lock_needed)
mutex_lock(&smu->mutex);
@@ -2346,8 +2478,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
int smu_get_fan_control_mode(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_control_mode)
@@ -2360,8 +2496,12 @@ int smu_get_fan_control_mode(struct smu_context *smu)
int smu_set_fan_control_mode(struct smu_context *smu, int value)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_control_mode)
@@ -2374,8 +2514,12 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_speed_percent)
@@ -2388,8 +2532,12 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_speed_percent)
@@ -2402,8 +2550,12 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_speed_rpm)
@@ -2416,8 +2568,12 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_deep_sleep_dcefclk)
@@ -2430,8 +2586,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (smu->ppt_funcs->set_active_display_count)
ret = smu->ppt_funcs->set_active_display_count(smu, count);
@@ -2442,8 +2602,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type)
@@ -2457,8 +2621,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
int smu_get_max_high_clocks(struct smu_context *smu,
struct amd_pp_simple_clock_info *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_max_high_clocks)
@@ -2473,8 +2641,12 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
enum smu_clk_type clk_type,
struct pp_clock_levels_with_latency *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type_with_latency)
@@ -2489,8 +2661,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type_with_voltage)
@@ -2505,8 +2681,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
int smu_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request *clock_req)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->display_clock_voltage_request)
@@ -2520,8 +2700,12 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = -EINVAL;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->display_disable_memory_clock_switch)
@@ -2534,8 +2718,12 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
int smu_notify_smu_enable_pwe(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->notify_smu_enable_pwe)
@@ -2549,8 +2737,12 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu)
int smu_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_xgmi_pstate)
@@ -2563,8 +2755,12 @@ int smu_set_xgmi_pstate(struct smu_context *smu,
int smu_set_azalia_d3_pme(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_azalia_d3_pme)
@@ -2575,6 +2771,14 @@ int smu_set_azalia_d3_pme(struct smu_context *smu)
return ret;
}
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ *
+ * However, the baco/mode1 reset should still be granted
+ * as they are still supported and necessary.
+ */
bool smu_baco_is_support(struct smu_context *smu)
{
bool ret = false;
@@ -2646,8 +2850,12 @@ int smu_mode2_reset(struct smu_context *smu)
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
@@ -2662,8 +2870,12 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
unsigned int *clock_values_in_khz,
unsigned int *num_states)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_uclk_dpm_states)
@@ -2677,6 +2889,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
{
enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
mutex_lock(&smu->mutex);
@@ -2691,8 +2907,12 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
int smu_get_dpm_clock_table(struct smu_context *smu,
struct dpm_clocks *clock_table)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_dpm_clock_table)
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 1ef0923f7190..27c5fc9572b2 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -128,6 +128,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode),
MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable),
MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl),
+ MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -622,6 +623,9 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct arcturus_dpm_table *dpm_table = NULL;
+ if (amdgpu_ras_intr_triggered())
+ return snprintf(buf, PAGE_SIZE, "unavailable\n");
+
dpm_table = smu_dpm->dpm_context;
switch (type) {
@@ -997,6 +1001,9 @@ static int arcturus_read_sensor(struct smu_context *smu,
PPTable_t *pptable = table_context->driver_pptable;
int ret = 0;
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
if (!data || !size)
return -EINVAL;
@@ -2226,12 +2233,8 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
- struct smu_context *smu = &adev->smu;
int res;
- if (!smu->pm_enabled)
- return -EOPNOTSUPP;
-
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
@@ -2247,12 +2250,6 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- struct smu_context *smu = &adev->smu;
-
- if (!smu->pm_enabled)
- return;
-
i2c_del_adapter(control);
}
@@ -2261,7 +2258,7 @@ static bool arcturus_is_baco_supported(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t val;
- if (!smu_v11_0_baco_is_support(smu))
+ if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev))
return false;
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
@@ -2296,6 +2293,35 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
+static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */
+ if (smu_version < 0x00361700) {
+ pr_err("XGMI power down control is only supported by PMFW 54.23.0 and onwards\n");
+ return -EINVAL;
+ }
+
+ if (en)
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ 1,
+ NULL);
+
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ 0,
+ NULL);
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2389,6 +2415,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
.set_df_cstate = arcturus_set_df_cstate,
+ .allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 689072a312a7..c9cfe90a2947 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -36,6 +36,8 @@
#include "power_state.h"
#include "soc15_common.h"
#include "smu10.h"
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
@@ -43,13 +45,6 @@
#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
#define SMC_RAM_END 0x40000
-#define mmPWR_MISC_CNTL_STATUS 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
-
static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
@@ -81,7 +76,7 @@ static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
return -EINVAL;
}
- smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
+ smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL);
return 0;
}
@@ -214,7 +209,8 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo
smu10_data->deep_sleep_dcefclk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- smu10_data->deep_sleep_dcefclk);
+ smu10_data->deep_sleep_dcefclk,
+ NULL);
}
return 0;
}
@@ -228,7 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
smu10_data->dcf_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinDcefclkByFreq,
- smu10_data->dcf_actual_hard_min_freq);
+ smu10_data->dcf_actual_hard_min_freq,
+ NULL);
}
return 0;
}
@@ -242,7 +239,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
smu10_data->f_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- smu10_data->f_actual_hard_min_freq);
+ smu10_data->f_actual_hard_min_freq,
+ NULL);
}
return 0;
}
@@ -255,7 +253,8 @@ static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count
smu10_data->num_active_display = count;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplayCount,
- smu10_data->num_active_display);
+ smu10_data->num_active_display,
+ NULL);
}
return 0;
@@ -278,7 +277,8 @@ static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetGfxCGPG,
- true);
+ true,
+ NULL);
else
return 0;
}
@@ -324,7 +324,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL);
/* confirm gfx is back to "on" state */
while (!smu10_is_gfx_on(hwmgr))
@@ -344,7 +344,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL);
return 0;
}
@@ -410,12 +410,10 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table **pptable,
uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
{
- uint32_t table_size, i;
+ uint32_t i;
struct smu10_voltage_dependency_table *ptable;
- table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
- ptable = kzalloc(table_size, GFP_KERNEL);
-
+ ptable = kzalloc(struct_size(ptable, entries, num_entry), GFP_KERNEL);
if (NULL == ptable)
return -ENOMEM;
@@ -479,12 +477,10 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
- result = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result);
smu10_data->gfx_min_freq_limit = result / 10 * 1000;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
- result = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result);
smu10_data->gfx_max_freq_limit = result / 10 * 1000;
return 0;
@@ -588,116 +584,148 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- SMU10_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- min_sclk);
+ min_sclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- min_sclk);
+ min_sclk,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- SMU10_UMD_PSTATE_GFXCLK);
+ SMU10_UMD_PSTATE_GFXCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- SMU10_UMD_PSTATE_FCLK);
+ SMU10_UMD_PSTATE_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- SMU10_UMD_PSTATE_SOCCLK);
+ SMU10_UMD_PSTATE_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- SMU10_UMD_PSTATE_GFXCLK);
+ SMU10_UMD_PSTATE_GFXCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_FCLK);
+ SMU10_UMD_PSTATE_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- SMU10_UMD_PSTATE_SOCCLK);
+ SMU10_UMD_PSTATE_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- min_sclk);
+ min_sclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
SMU10_UMD_PSTATE_PEAK_FCLK :
- min_mclk);
+ min_mclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- SMU10_UMD_PSTATE_MIN_SOCCLK);
+ SMU10_UMD_PSTATE_MIN_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_MIN_VCE);
+ SMU10_UMD_PSTATE_MIN_VCE,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- data->gfx_min_freq_limit/100);
+ data->gfx_min_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_min_freq_limit/100);
+ data->gfx_min_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -849,13 +877,15 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
low == 2 ? data->gfx_max_freq_limit/100 :
low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
- data->gfx_min_freq_limit/100);
+ data->gfx_min_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
high == 0 ? data->gfx_min_freq_limit/100 :
high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
break;
case PP_MCLK:
@@ -864,11 +894,13 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- mclk_table->entries[low].clk/100);
+ mclk_table->entries[low].clk/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- mclk_table->entries[high].clk/100);
+ mclk_table->entries[high].clk/100,
+ NULL);
break;
case PP_PCIE:
@@ -888,8 +920,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (now == data->gfx_max_freq_limit/100)
@@ -910,8 +941,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
i == 2 ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -1122,15 +1152,13 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
- sclk = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk);
/* in units of 10KHZ */
*((uint32_t *)value) = sclk * 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
- mclk = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk);
/* in units of 10KHZ */
*((uint32_t *)value) = mclk * 100;
*size = 4;
@@ -1166,20 +1194,20 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL);
}
static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL);
}
static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
{
if (gate)
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL);
else
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL);
}
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
@@ -1191,11 +1219,11 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerDownVcn, 0);
+ PPSMC_MSG_PowerDownVcn, 0, NULL);
smu10_data->vcn_power_gated = true;
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerUpVcn, 0);
+ PPSMC_MSG_PowerUpVcn, 0, NULL);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -1274,8 +1302,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- if ((adev->asic_type == CHIP_RAVEN) &&
- (adev->rev_id != 0x15d8) &&
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
(hwmgr->smu_version >= 0x41e2b))
return true;
else
@@ -1304,7 +1331,8 @@ static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uin
hwmgr->gfxoff_state_changed_by_workload = true;
}
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (!result)
hwmgr->power_profile_mode = input[size];
if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
@@ -1319,13 +1347,13 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DeviceDriverReset,
- mode);
+ mode,
+ NULL);
}
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
.backend_fini = smu10_hwmgr_backend_fini,
- .asic_setup = NULL,
.apply_state_adjust_rules = smu10_apply_state_adjust_rules,
.force_dpm_level = smu10_dpm_force_dpm_level,
.get_power_state_size = smu10_get_power_state_size,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index 1fb296a996f3..0f969de10fab 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -192,7 +192,7 @@ struct smu10_clock_voltage_dependency_record {
struct smu10_voltage_dependency_table {
uint32_t count;
- struct smu10_clock_voltage_dependency_record entries[1];
+ struct smu10_clock_voltage_dependency_record entries[];
};
struct smu10_clock_voltage_information {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 683b29a99366..f2bda3bcbbde 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -29,14 +29,16 @@ static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_UVDDPM_Enable :
- PPSMC_MSG_UVDDPM_Disable);
+ PPSMC_MSG_UVDDPM_Disable,
+ NULL);
}
static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_VCEDPM_Enable :
- PPSMC_MSG_VCEDPM_Disable);
+ PPSMC_MSG_VCEDPM_Disable,
+ NULL);
}
static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
@@ -57,7 +59,8 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_UVDPowerOFF);
+ PPSMC_MSG_UVDPowerOFF,
+ NULL);
return 0;
}
@@ -67,10 +70,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_UVDPowerON, 1);
+ PPSMC_MSG_UVDPowerON, 1, NULL);
} else {
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_UVDPowerON, 0);
+ PPSMC_MSG_UVDPowerON, 0, NULL);
}
}
@@ -81,7 +84,8 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerOFF);
+ PPSMC_MSG_VCEPowerOFF,
+ NULL);
return 0;
}
@@ -89,7 +93,8 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerON);
+ PPSMC_MSG_VCEPowerON,
+ NULL);
return 0;
}
@@ -181,7 +186,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -191,7 +196,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -204,7 +209,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -215,7 +220,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -228,7 +233,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_RLC_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -241,7 +246,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CP_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -255,7 +260,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
CG_GFX_OTHERS_MGCG_MASK);
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -275,7 +280,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -285,7 +290,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -298,7 +303,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -309,7 +314,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -322,7 +327,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -332,7 +337,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -345,7 +350,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -356,7 +361,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -369,7 +374,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -380,7 +385,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -393,7 +398,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_ROM_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -423,8 +428,10 @@ int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
if (enable)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GFX_CU_PG_ENABLE,
- adev->gfx.cu_info.number);
+ adev->gfx.cu_info.number,
+ NULL);
else
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GFX_CU_PG_DISABLE);
+ PPSMC_MSG_GFX_CU_PG_DISABLE,
+ NULL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 4795eb66b2b2..753cb2cf6b77 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -186,7 +186,7 @@ static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
}
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
return 0;
}
@@ -493,7 +493,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
}
/**
@@ -979,7 +979,8 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableVRHotGPIOInterrupt);
+ PPSMC_MSG_EnableVRHotGPIOInterrupt,
+ NULL);
return 0;
}
@@ -996,7 +997,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
return 0;
}
@@ -1006,7 +1007,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
return 0;
}
@@ -1015,13 +1016,14 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
PP_ASSERT_WITH_CODE(false,
"Attempt to enable Master Deep Sleep switch failed!",
return -EINVAL);
} else {
if (smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PPSMC_MSG_MASTER_DeepSleep_OFF,
+ NULL)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
return -EINVAL);
@@ -1036,7 +1038,8 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
if (smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PPSMC_MSG_MASTER_DeepSleep_OFF,
+ NULL)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
return -EINVAL);
@@ -1089,7 +1092,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
smu7_disable_sclk_vce_handshake(hwmgr);
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
+ (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
"Failed to enable SCLK DPM during DPM Start Function!",
return -EINVAL);
}
@@ -1101,7 +1104,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MCLKDPM_Enable)),
+ PPSMC_MSG_MCLKDPM_Enable,
+ NULL)),
"Failed to enable MCLK DPM during DPM Start Function!",
return -EINVAL);
@@ -1172,7 +1176,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PCIeDPM_Enable)),
+ PPSMC_MSG_PCIeDPM_Enable,
+ NULL)),
"Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL);
}
@@ -1180,7 +1185,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_Falcon_QuickTransition)) {
PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableACDCGPIOInterrupt)),
+ PPSMC_MSG_EnableACDCGPIOInterrupt,
+ NULL)),
"Failed to enable AC DC GPIO Interrupt!",
);
}
@@ -1197,7 +1203,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable SCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
}
/* disable MCLK dpm */
@@ -1205,7 +1211,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable MCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
}
return 0;
@@ -1226,7 +1232,8 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
if (!data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
(smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PCIeDPM_Disable) == 0),
+ PPSMC_MSG_PCIeDPM_Disable,
+ NULL) == 0),
"Failed to disable pcie DPM during DPM Stop Function!",
return -EINVAL);
}
@@ -1237,7 +1244,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
"Trying to disable voltage DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
return 0;
}
@@ -1388,7 +1395,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
+ smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1446,14 +1453,14 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr, PPSMC_MSG_EnableAvfs),
+ hwmgr, PPSMC_MSG_EnableAvfs, NULL),
"Failed to enable AVFS!",
return -EINVAL);
}
} else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr, PPSMC_MSG_DisableAvfs),
+ hwmgr, PPSMC_MSG_DisableAvfs, NULL),
"Failed to disable AVFS!",
return -EINVAL);
}
@@ -2609,7 +2616,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
if (level)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PCIeDPM_ForceLevel, level);
+ PPSMC_MSG_PCIeDPM_ForceLevel, level,
+ NULL);
}
}
@@ -2623,7 +2631,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
if (level)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
}
@@ -2637,7 +2646,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
if (level)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
}
@@ -2656,14 +2666,16 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask,
+ NULL);
}
if (!data->mclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask,
+ NULL);
}
return 0;
@@ -2678,7 +2690,8 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
if (!data->pcie_dpm_key_disabled) {
smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
+ PPSMC_MSG_PCIeDPM_UnForceLevel,
+ NULL);
}
return smu7_upload_dpm_level_enable_mask(hwmgr);
@@ -2696,7 +2709,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_level_enable_mask.sclk_dpm_enable_mask);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
@@ -2706,7 +2720,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_level_enable_mask.mclk_dpm_enable_mask);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
}
@@ -2716,7 +2731,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_level_enable_mask.pcie_dpm_enable_mask);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
- (level));
+ (level),
+ NULL);
}
}
@@ -3495,21 +3511,20 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
(adev->asic_type != CHIP_BONAIRE) &&
(adev->asic_type != CHIP_FIJI) &&
(adev->asic_type != CHIP_TONGA)) {
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
*query = tmp;
if (tmp != 0)
return 0;
}
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_PM_STATUS_95, 0);
for (i = 0; i < 10; i++) {
msleep(500);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
tmp = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC,
ixSMU_PM_STATUS_95);
@@ -3534,14 +3549,12 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
*((uint32_t *)value) = mclk;
*size = 4;
return 0;
@@ -3730,7 +3743,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
+ PPSMC_MSG_SCLKDPM_FreezeLevel,
+ NULL),
"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3742,7 +3756,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
+ PPSMC_MSG_MCLKDPM_FreezeLevel,
+ NULL),
"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3884,7 +3899,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_SCLKDPM_UnfreezeLevel,
+ NULL),
"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3896,7 +3912,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_MCLKDPM_UnfreezeLevel,
+ NULL),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3949,12 +3966,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
if (hwmgr->chip_id == CHIP_VEGAM)
smum_send_msg_to_smc_with_parameter(hwmgr,
- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
+ NULL);
else
smum_send_msg_to_smc_with_parameter(hwmgr,
- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
+ NULL);
}
- return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+ return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL;
}
static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
@@ -4040,7 +4059,8 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+ PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
+ NULL);
}
static int
@@ -4048,7 +4068,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
- return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
+ return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ? 0 : -1;
}
static int
@@ -4132,7 +4152,8 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+ PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
+ NULL);
}
static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
@@ -4262,14 +4283,14 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
if ((hwmgr->chip_id == CHIP_POLARIS10) ||
(hwmgr->chip_id == CHIP_POLARIS11) ||
(hwmgr->chip_id == CHIP_POLARIS12))
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
} else {
data->mem_latency_high = 330;
data->mem_latency_low = 330;
if ((hwmgr->chip_id == CHIP_POLARIS10) ||
(hwmgr->chip_id == CHIP_POLARIS11) ||
(hwmgr->chip_id == CHIP_POLARIS12))
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
}
return 0;
@@ -4413,13 +4434,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
if (!data->sclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
+ NULL);
break;
case PP_MCLK:
if (!data->mclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
+ NULL);
break;
case PP_PCIE:
{
@@ -4427,11 +4450,13 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
if (!data->pcie_dpm_key_disabled) {
if (fls(tmp) != ffs(tmp))
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
+ NULL);
else
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
- fls(tmp) - 1);
+ fls(tmp) - 1,
+ NULL);
}
break;
}
@@ -4457,8 +4482,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
for (i = 0; i < sclk_table->count; i++) {
if (clock > sclk_table->dpm_levels[i].value)
@@ -4473,8 +4497,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
for (i = 0; i < mclk_table->count; i++) {
if (clock > mclk_table->dpm_levels[i].value)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 58f5589aaf12..5d4971576111 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -887,7 +887,10 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
didt_block |= block_en << TCP_Enable_SHIFT;
if (enable)
- result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_Didt_Block_Function,
+ didt_block,
+ NULL);
return result;
}
@@ -1009,7 +1012,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id == CHIP_POLARIS11) {
result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_EnableDpmDidt));
+ (uint16_t)(PPSMC_MSG_EnableDpmDidt),
+ NULL);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to enable DPM DIDT.", goto error);
}
@@ -1042,7 +1046,8 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
goto error);
if (hwmgr->chip_id == CHIP_POLARIS11) {
result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_DisableDpmDidt));
+ (uint16_t)(PPSMC_MSG_DisableDpmDidt),
+ NULL);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", goto error);
}
@@ -1063,7 +1068,8 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_CAC)) {
int smc_result;
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_EnableCac));
+ (uint16_t)(PPSMC_MSG_EnableCac),
+ NULL);
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
@@ -1079,7 +1085,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) {
int smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_DisableCac));
+ (uint16_t)(PPSMC_MSG_DisableCac),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable CAC in SMC.", result = -1);
@@ -1095,7 +1102,9 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PkgPwrSetLimit, n<<8);
+ PPSMC_MSG_PkgPwrSetLimit,
+ n<<8,
+ NULL);
return 0;
}
@@ -1103,7 +1112,9 @@ static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
uint32_t target_tdp)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+ PPSMC_MSG_OverDriveSetTargetTdp,
+ target_tdp,
+ NULL);
}
int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
@@ -1124,7 +1135,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->enable_tdc_limit_feature) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_TDCLimitEnable));
+ (uint16_t)(PPSMC_MSG_TDCLimitEnable),
+ NULL);
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable TDCLimit in SMC.", result = -1;);
if (0 == smc_result)
@@ -1134,7 +1146,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
if (data->enable_pkg_pwr_tracking_feature) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable),
+ NULL);
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
@@ -1163,7 +1176,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_TDCLimit) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+ (uint16_t)(PPSMC_MSG_TDCLimitDisable),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable TDCLimit in SMC.",
result = smc_result);
@@ -1172,7 +1186,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_DTE) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_DisableDTE));
+ (uint16_t)(PPSMC_MSG_DisableDTE),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable DTE in SMC.",
result = smc_result);
@@ -1181,7 +1196,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable PkgPwrTracking in SMC.",
result = smc_result);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 5bdc0df5a9f4..0b30f73649a8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -151,8 +151,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int result;
if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+ FAN_CONTROL_FUZZY, NULL);
if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM))
hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
@@ -164,8 +164,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
advanceFanControlParameters.usMaxFanPWM);
} else {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+ FAN_CONTROL_TABLE, NULL);
}
if (!result && hwmgr->thermal_controller.
@@ -173,7 +173,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
- advanceFanControlParameters.ucTargetTemperature);
+ advanceFanControlParameters.ucTargetTemperature,
+ NULL);
hwmgr->fan_ctrl_enabled = true;
return result;
@@ -183,7 +184,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
hwmgr->fan_ctrl_enabled = false;
- return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl, NULL);
}
/**
@@ -372,7 +373,7 @@ static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable, NULL);
}
/**
@@ -390,7 +391,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable, NULL);
}
/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 019d6a206492..a6c6a793e98e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -162,8 +162,10 @@ static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
if (data->max_sclk_level == 0) {
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
- data->max_sclk_level = smum_get_argument(hwmgr) + 1;
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMaxSclkLevel,
+ &data->max_sclk_level);
+ data->max_sclk_level += 1;
}
return data->max_sclk_level;
@@ -580,7 +582,8 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_uvd_clock_voltage_dependency_table *table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
- unsigned long clock = 0, level;
+ unsigned long clock = 0;
+ uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
@@ -588,8 +591,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
data->uvd_dpm.soft_min_clk = 0;
data->uvd_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
- level = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
if (level < table->count)
clock = table->entries[level].vclk;
@@ -607,7 +609,8 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_vce_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
- unsigned long clock = 0, level;
+ unsigned long clock = 0;
+ uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
@@ -615,8 +618,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
data->vce_dpm.soft_min_clk = 0;
data->vce_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
- level = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
if (level < table->count)
clock = table->entries[level].ecclk;
@@ -634,7 +636,8 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_acp_clock_voltage_dependency_table *table =
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
- unsigned long clock = 0, level;
+ unsigned long clock = 0;
+ uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
@@ -642,8 +645,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
data->acp_dpm.soft_min_clk = 0;
data->acp_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
- level = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
if (level < table->count)
clock = table->entries[level].acpclk;
@@ -665,7 +667,7 @@ static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
#ifdef CONFIG_DRM_AMD_ACP
data->acp_power_gated = false;
#else
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
data->acp_power_gated = true;
#endif
@@ -708,7 +710,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkHardMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.hard_min_clk,
- PPSMC_MSG_SetSclkHardMin));
+ PPSMC_MSG_SetSclkHardMin),
+ NULL);
}
clock = data->sclk_dpm.soft_min_clk;
@@ -731,7 +734,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -742,7 +746,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
}
return 0;
@@ -760,7 +765,8 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepSclk,
- clks);
+ clks,
+ NULL);
}
return 0;
@@ -773,7 +779,8 @@ static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetWatermarkFrequency,
- data->sclk_dpm.soft_max_clk);
+ data->sclk_dpm.soft_max_clk,
+ NULL);
return 0;
}
@@ -788,13 +795,15 @@ static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable,
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableLowMemoryPstate,
- (lock ? 1 : 0));
+ (lock ? 1 : 0),
+ NULL);
} else {
PP_DBG_LOG("disable Low Memory PState.\n");
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableLowMemoryPstate,
- (lock ? 1 : 0));
+ (lock ? 1 : 0),
+ NULL);
}
}
@@ -814,7 +823,8 @@ static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
- dpm_features);
+ dpm_features,
+ NULL);
if (ret == 0)
data->is_nb_dpm_enabled = false;
}
@@ -835,7 +845,8 @@ static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
- dpm_features);
+ dpm_features,
+ NULL);
if (ret == 0)
data->is_nb_dpm_enabled = true;
}
@@ -953,7 +964,8 @@ static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
- SCLK_DPM_MASK);
+ SCLK_DPM_MASK,
+ NULL);
}
static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
@@ -967,7 +979,8 @@ static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
- dpm_features);
+ dpm_features,
+ NULL);
}
return ret;
}
@@ -983,13 +996,15 @@ static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
return 0;
}
@@ -1127,13 +1142,15 @@ static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
return 0;
}
@@ -1167,13 +1184,15 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
return 0;
}
@@ -1186,13 +1205,15 @@ static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
return 0;
}
@@ -1227,7 +1248,7 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL);
return 0;
}
@@ -1237,7 +1258,8 @@ static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_UVDPowerON,
- PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
+ PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0,
+ NULL);
}
return 0;
@@ -1259,15 +1281,20 @@ static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetEclkHardMin,
smu8_get_eclk_level(hwmgr,
data->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
+ PPSMC_MSG_SetEclkHardMin),
+ NULL);
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkHardMin, 0);
+ PPSMC_MSG_SetEclkHardMin,
+ 0,
+ NULL);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkSoftMin, 1);
+ PPSMC_MSG_SetEclkSoftMin,
+ 1,
+ NULL);
}
return 0;
}
@@ -1276,7 +1303,8 @@ static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerOFF);
+ PPSMC_MSG_VCEPowerOFF,
+ NULL);
return 0;
}
@@ -1284,7 +1312,8 @@ static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerON);
+ PPSMC_MSG_VCEPowerON,
+ NULL);
return 0;
}
@@ -1435,7 +1464,8 @@ static void smu8_hw_print_display_cfg(
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplaySizePowerParams,
- data);
+ data,
+ NULL);
}
return 0;
@@ -1497,10 +1527,12 @@ static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
case PP_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- mask);
+ mask,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- mask);
+ mask,
+ NULL);
break;
default:
break;
@@ -1753,9 +1785,10 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
+ result = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetAverageGraphicsActivity,
+ &activity_percent);
if (0 == result) {
- activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
activity_percent = activity_percent > 100 ? 100 : activity_percent;
} else {
activity_percent = 50;
@@ -1785,20 +1818,25 @@ static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrHiVirtual,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrLoVirtual,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrHiPhysical,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrLoPhysical,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramBufferSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -1827,12 +1865,16 @@ static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
data->dpm_flags |= DPMFlags_UVD_Enabled;
dpm_features |= UVD_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features,
+ NULL);
} else {
dpm_features |= UVD_DPM_MASK;
data->dpm_flags &= ~DPMFlags_UVD_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_DisableAllSmuFeatures,
+ dpm_features,
+ NULL);
}
return 0;
}
@@ -1854,7 +1896,8 @@ int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
PPSMC_MSG_SetUvdHardMin,
smu8_get_uvd_level(hwmgr,
data->uvd_dpm.hard_min_clk,
- PPSMC_MSG_SetUvdHardMin));
+ PPSMC_MSG_SetUvdHardMin),
+ NULL);
smu8_enable_disable_uvd_dpm(hwmgr, true);
} else {
@@ -1878,12 +1921,16 @@ static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
data->dpm_flags |= DPMFlags_VCE_Enabled;
dpm_features |= VCE_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features,
+ NULL);
} else {
dpm_features |= VCE_DPM_MASK;
data->dpm_flags &= ~DPMFlags_VCE_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_DisableAllSmuFeatures,
+ dpm_features,
+ NULL);
}
return 0;
@@ -1898,9 +1945,9 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
return;
if (bgate)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
else
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
}
static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index d09690fca452..60b5ca974356 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -22,6 +22,7 @@
*/
#include <linux/pci.h>
+#include <linux/reboot.h>
#include "hwmgr.h"
#include "pp_debug.h"
@@ -557,7 +558,9 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
if (req_vddc <= vddc_table->entries[i].vddc) {
req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_VddC_Request, req_volt);
+ PPSMC_MSG_VddC_Request,
+ req_volt,
+ NULL);
return;
}
}
@@ -593,37 +596,43 @@ int phm_irq_process(struct amdgpu_device *adev,
uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
- if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
- pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
- pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
- pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ /*
+ * SW CTF just occurred.
+ * Try to do a graceful shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+ } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+ else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
+ dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ /*
+ * HW CTF just occurred. Shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+ orderly_poweroff(true);
+ }
} else if (client_id == SOC15_IH_CLIENTID_THM) {
- if (src_id == 0)
- pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- else
- pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO)
- pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ if (src_id == 0) {
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ /*
+ * SW CTF just occurred.
+ * Try to do a graceful shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+ } else
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+ } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+ dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ /*
+ * HW CTF just occurred. Shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+ orderly_poweroff(true);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
index d168af4a4d78..46bb16c29cf6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
@@ -98,7 +98,7 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if (state == BACO_STATE_IN) {
if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
ARRAY_SIZE(pre_baco_tbl))) {
- if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco, NULL))
return -EINVAL;
if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f29f95be1e56..675c7cab7cfc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -484,8 +484,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- hwmgr->smu_version = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetSmuVersion,
+ &hwmgr->smu_version);
/* ACG firmware has major version 5 */
if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
@@ -503,10 +504,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_PCC_LIMIT].supported = true;
/* Get the SN to turn into a Unique ID */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
@@ -993,7 +992,10 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to set up led dpm config!",
return -EINVAL);
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_NumOfDisplays,
+ 0,
+ NULL);
return 0;
}
@@ -2303,16 +2305,15 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
- agc_btc_response = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop, NULL);
else if (2 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop, NULL);
if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = true;
@@ -2429,11 +2430,9 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = hwmgr->backend;
AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
serial_number = ((uint64_t)bottom32 << 32) | top32;
@@ -2610,14 +2609,16 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
if (0 != boot_up_values.usVddc) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage,
- (boot_up_values.usVddc * 4));
+ (boot_up_values.usVddc * 4),
+ NULL);
data->vbios_boot_state.bsoc_vddc_lock = true;
} else {
data->vbios_boot_state.bsoc_vddc_lock = false;
}
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+ NULL);
}
result = vega10_populate_avfs_parameters(hwmgr);
@@ -2904,7 +2905,8 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
if (data->vbios_boot_state.bsoc_vddc_lock) {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetFloorSocVoltage, 0);
+ PPSMC_MSG_SetFloorSocVoltage, 0,
+ NULL);
data->vbios_boot_state.bsoc_vddc_lock = false;
}
@@ -2947,7 +2949,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
vega10_enable_disable_PCC_limit_feature(hwmgr, true);
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
+ PPSMC_MSG_ConfigureTelemetry, data->config_telemetry,
+ NULL);
tmp_result = vega10_construct_voltage_tables(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
@@ -3528,7 +3531,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
data->dpm_table.gfx_table.dpm_state.soft_min_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
- data->smc_state_table.gfx_boot_level);
+ data->smc_state_table.gfx_boot_level,
+ NULL);
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->smc_state_table.gfx_boot_level;
@@ -3543,11 +3547,13 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
- socclk_idx);
+ socclk_idx,
+ NULL);
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
- data->smc_state_table.mem_boot_level);
+ data->smc_state_table.mem_boot_level,
+ NULL);
}
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->smc_state_table.mem_boot_level;
@@ -3562,7 +3568,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_min_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
- data->smc_state_table.soc_boot_level);
+ data->smc_state_table.soc_boot_level,
+ NULL);
data->dpm_table.soc_table.dpm_state.soft_min_level =
data->smc_state_table.soc_boot_level;
}
@@ -3582,7 +3589,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
data->dpm_table.gfx_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxclkByIndex,
- data->smc_state_table.gfx_max_level);
+ data->smc_state_table.gfx_max_level,
+ NULL);
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->smc_state_table.gfx_max_level;
}
@@ -3593,7 +3601,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxUclkByIndex,
- data->smc_state_table.mem_max_level);
+ data->smc_state_table.mem_max_level,
+ NULL);
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->smc_state_table.mem_max_level;
}
@@ -3607,7 +3616,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByIndex,
- data->smc_state_table.soc_max_level);
+ data->smc_state_table.soc_max_level,
+ NULL);
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->smc_state_table.soc_max_level;
}
@@ -3694,7 +3704,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
/* This message will also enable SmcToHost Interrupt */
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetLowGfxclkInterruptThreshold,
- (uint32_t)low_sclk_interrupt_threshold);
+ (uint32_t)low_sclk_interrupt_threshold,
+ NULL);
}
return 0;
@@ -3801,8 +3812,7 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
if (!query)
return -EINVAL;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
- value = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
*query = value << 8;
@@ -3822,13 +3832,11 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
- sclk_mhz = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz);
*((uint32_t *)value) = sclk_mhz * 100;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
- mclk_idx = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx);
if (mclk_idx < dpm_table->mem_table.count) {
*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
*size = 4;
@@ -3837,8 +3845,8 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
}
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
- activity_percent = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0,
+ &activity_percent);
*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
*size = 4;
break;
@@ -3847,14 +3855,14 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
- *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot, (uint32_t *)value);
+ *((uint32_t *)value) = *((uint32_t *)value) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
case AMDGPU_PP_SENSOR_MEM_TEMP:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
- *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM, (uint32_t *)value);
+ *((uint32_t *)value) = *((uint32_t *)value) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
@@ -3893,7 +3901,8 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
+ has_disp ? 1 : 0,
+ NULL);
}
int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3928,7 +3937,8 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
clk_request = (clk_freq << 16) | clk_select;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_RequestDisplayClockByFreq,
- clk_request);
+ clk_request,
+ NULL);
}
return result;
@@ -3990,7 +4000,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR / 100);
+ min_clocks.dcefClockInSR / 100,
+ NULL);
} else {
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
}
@@ -4000,7 +4011,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (min_clocks.memoryClock != 0) {
idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx,
+ NULL);
data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
}
@@ -4541,8 +4553,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.sclk_dpm_key_disabled)
break;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
if (hwmgr->pp_one_vf &&
(hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
@@ -4558,8 +4569,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.mclk_dpm_key_disabled)
break;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4570,8 +4580,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.socclk_dpm_key_disabled)
break;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
for (i = 0; i < soc_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4583,8 +4592,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK);
- now = smum_get_argument(hwmgr);
+ PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
for (i = 0; i < dcef_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4593,8 +4601,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
"*" : "");
break;
case PP_PCIE:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
for (i = 0; i < pcie_table->count; i++)
size += sprintf(buf + size, "%d: %s %s\n", i,
@@ -4658,7 +4665,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if (data->water_marks_bitmap & WaterMarksLoaded) {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+ PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+ NULL);
}
return result;
@@ -4924,21 +4932,26 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -5040,12 +5053,14 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetCustomGfxDpmParameters,
busy_set_point | FPS<<8 |
- use_rlc_busy << 16 | min_active_level<<24);
+ use_rlc_busy << 16 | min_active_level<<24,
+ NULL);
}
out:
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1 << power_profile_mode);
+ 1 << power_profile_mode,
+ NULL);
hwmgr->power_profile_mode = power_profile_mode;
return 0;
@@ -5302,7 +5317,7 @@ static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 0a677d4bc87b..9757d47dd6b8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -651,18 +651,6 @@ static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMSEEDCThresholdConfig_Vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC THRESHOLD */
- { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] =
{
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -707,17 +695,6 @@ static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMGCEDCThresholdConfig_vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] =
{
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -925,7 +902,8 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
/* For Vega10, SMC does not support any mask yet. */
if (enable)
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info,
+ NULL);
}
@@ -1327,7 +1305,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->registry_data.enable_pkg_pwr_tracking_feature)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetPptLimit, n);
+ PPSMC_MSG_SetPptLimit, n,
+ NULL);
return 0;
}
@@ -1393,7 +1372,8 @@ static void vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+ NULL);
}
int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index ba8763daa380..7783c7fd7ccb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -31,8 +31,7 @@
static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm);
- *current_rpm = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm, current_rpm);
return 0;
}
@@ -520,7 +519,8 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
- (uint32_t)table->FanTargetTemperature);
+ (uint32_t)table->FanTargetTemperature,
+ NULL);
table->FanPwmMin = hwmgr->thermal_controller.
advanceFanControlParameters.usPWMMin * 255 / 100;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
index 9d8ca94a8f0c..bc53cce4f32d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
@@ -96,7 +96,7 @@ int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if (state == BACO_STATE_IN) {
if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
ARRAY_SIZE(pre_baco_tbl))) {
- if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
+ if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0, NULL))
return -EINVAL;
if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index aca61d1ff3c2..f4d1692cccf3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -357,10 +357,8 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
}
/* Get the SN to turn into a Unique ID */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
@@ -483,16 +481,12 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ num_of_levels);
PP_ASSERT_WITH_CODE(!ret,
"[GetNumOfDpmLevel] failed to get dpm levels!",
return ret);
- *num_of_levels = smum_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(*num_of_levels > 0,
- "[GetNumOfDpmLevel] number of clk levels is invalid!",
- return -EINVAL);
-
return ret;
}
@@ -504,12 +498,11 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
*Lower 16 bits specify the level
*/
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0,
+ PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index),
+ clock) == 0,
"[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
return -EINVAL);
- *clock = smum_get_argument(hwmgr);
-
return 0;
}
@@ -749,7 +742,8 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.vclock = boot_up_values.ulVClk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+ NULL);
}
memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
@@ -767,11 +761,10 @@ static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
uint32_t result;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &result) == 0,
"[Run_ACG_BTC] Attempt to run ACG BTC failed!",
return -EINVAL);
- result = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(result == 1,
"Failed to run ACG BTC!", return -EINVAL);
@@ -792,12 +785,14 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
(allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high,
+ NULL) == 0,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
return -1);
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low,
+ NULL) == 0,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
return -1);
@@ -828,7 +823,7 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
bool enabled;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, NULL) == 0,
"[EnableAllSMUFeatures] Failed to enable all smu features!",
return -1);
@@ -854,7 +849,7 @@ static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
bool enabled;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, NULL) == 0,
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return -1);
@@ -879,7 +874,8 @@ static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+ NULL);
}
static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
@@ -902,24 +898,24 @@ static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
{
/* AC Max */
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16),
+ &(clock->ACMax)) == 0,
"[GetClockRanges] Failed to get max ac clock from SMC!",
return -EINVAL);
- clock->ACMax = smum_get_argument(hwmgr);
/* AC Min */
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16),
+ &(clock->ACMin)) == 0,
"[GetClockRanges] Failed to get min ac clock from SMC!",
return -EINVAL);
- clock->ACMin = smum_get_argument(hwmgr);
/* DC Max */
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16),
+ &(clock->DCMax)) == 0,
"[GetClockRanges] Failed to get max dc clock from SMC!",
return -EINVAL);
- clock->DCMax = smum_get_argument(hwmgr);
return 0;
}
@@ -944,7 +940,7 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
int tmp_result, result = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0, NULL);
result = vega12_set_allowed_featuresmask(hwmgr);
PP_ASSERT_WITH_CODE(result == 0,
@@ -1043,7 +1039,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min gfxclk !",
return ret);
}
@@ -1052,14 +1049,16 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min memclk !",
return ret);
min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set hard min memclk !",
return ret);
}
@@ -1069,7 +1068,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min vclk!",
return ret);
@@ -1077,7 +1077,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min dclk!",
return ret);
}
@@ -1087,7 +1088,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min eclk!",
return ret);
}
@@ -1097,7 +1099,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min socclk!",
return ret);
}
@@ -1107,7 +1110,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set hard min dcefclk!",
return ret);
}
@@ -1127,7 +1131,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max gfxclk!",
return ret);
}
@@ -1137,7 +1142,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max memclk!",
return ret);
}
@@ -1147,14 +1153,16 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max vclk!",
return ret);
max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max dclk!",
return ret);
}
@@ -1164,7 +1172,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max eclk!",
return ret);
}
@@ -1174,7 +1183,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max socclk!",
return ret);
}
@@ -1287,10 +1297,10 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
*gfx_freq = 0;
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16),
+ &gfx_clk) == 0,
"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
return -EINVAL);
- gfx_clk = smum_get_argument(hwmgr);
*gfx_freq = gfx_clk * 100;
@@ -1304,10 +1314,10 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
*mclk_freq = 0;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16),
+ &mem_clk) == 0,
"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
return -EINVAL);
- mem_clk = smum_get_argument(hwmgr);
*mclk_freq = mem_clk * 100;
@@ -1420,7 +1430,8 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
+ has_disp ? 1 : 0,
+ NULL);
return 0;
}
@@ -1459,7 +1470,8 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
clk_request = (clk_select << 16) | clk_freq;
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- clk_request);
+ clk_request,
+ NULL);
}
}
@@ -1493,7 +1505,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR /100),
+ min_clocks.dcefClockInSR /100,
+ NULL),
"Attempt to set divider for DCEFCLK Failed!",
return -1);
} else {
@@ -2124,10 +2137,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
case PP_SOCCLK:
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16),
+ &now) == 0,
"Attempt to get Current SOCCLK Frequency Failed!",
return -EINVAL);
- now = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(
vega12_get_socclocks(hwmgr, &clocks) == 0,
@@ -2142,10 +2155,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
case PP_DCEFCLK:
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16),
+ &now) == 0,
"Attempt to get Current DCEFCLK Frequency Failed!",
return -EINVAL);
- now = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(
vega12_get_dcefclocks(hwmgr, &clocks) == 0,
@@ -2343,7 +2356,8 @@ static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ NULL)),
"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
return ret);
}
@@ -2357,7 +2371,8 @@ static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
int ret = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0,
+ NULL);
ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
&data->dpm_table.mem_table);
@@ -2383,7 +2398,8 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
data->smu_features[GNLD_DPM_SOCCLK].supported)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+ PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+ NULL);
return result;
}
@@ -2555,21 +2571,26 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -2605,7 +2626,7 @@ static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
int ret = 0;
if (data->gfxoff_controlled_by_driver)
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff, NULL);
return ret;
}
@@ -2617,7 +2638,7 @@ static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
int ret = 0;
if (data->gfxoff_controlled_by_driver)
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff, NULL);
return ret;
}
@@ -2654,7 +2675,7 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index 904eb2c9155b..c85806a6f62e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -32,10 +32,10 @@
static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentRpm),
+ PPSMC_MSG_GetCurrentRpm,
+ current_rpm),
"Attempt to get current RPM from SMC Failed!",
return -EINVAL);
- *current_rpm = smum_get_argument(hwmgr);
return 0;
}
@@ -259,7 +259,8 @@ int vega12_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
- (uint32_t)table->FanTargetTemperature);
+ (uint32_t)table->FanTargetTemperature,
+ NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index 9b5e72bdceca..2a28c9df15a0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -91,16 +91,16 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
if(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnterBaco, 0))
+ PPSMC_MSG_EnterBaco, 0, NULL))
return -EINVAL;
} else {
if(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnterBaco, 1))
+ PPSMC_MSG_EnterBaco, 1, NULL))
return -EINVAL;
}
} else if (state == BACO_STATE_OUT) {
- if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco, NULL))
return -EINVAL;
if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl,
ARRAY_SIZE(clean_baco_tbl)))
@@ -118,5 +118,5 @@ int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr)
if (ret)
return ret;
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI, NULL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 08b6ba39a6d7..9ff470f1b826 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -92,8 +92,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
*/
data->registry_data.disallowed_features = 0xE0041C00;
/* ECC feature should be disabled on old SMUs */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- hwmgr->smu_version = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
if (hwmgr->smu_version < 0x282100)
data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
@@ -400,10 +399,8 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
}
/* Get the SN to turn into a Unique ID */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
@@ -527,16 +524,12 @@ static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ num_of_levels);
PP_ASSERT_WITH_CODE(!ret,
"[GetNumOfDpmLevel] failed to get dpm levels!",
return ret);
- *num_of_levels = smum_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(*num_of_levels > 0,
- "[GetNumOfDpmLevel] number of clk levels is invalid!",
- return -EINVAL);
-
return ret;
}
@@ -547,16 +540,12 @@ static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | index));
+ (clk_id << 16 | index),
+ clk);
PP_ASSERT_WITH_CODE(!ret,
"[GetDpmFreqByIndex] failed to get dpm freq by index!",
return ret);
- *clk = smum_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(*clk,
- "[GetDpmFreqByIndex] clk value is invalid!",
- return -EINVAL);
-
return ret;
}
@@ -813,7 +802,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+ NULL);
memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
@@ -868,7 +858,8 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
*/
smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverridePcieParameters, smu_pcie_arg);
+ PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+ NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
@@ -899,13 +890,13 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
& 0xFFFFFFFF));
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high);
+ PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL);
PP_ASSERT_WITH_CODE(!ret,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
return ret);
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low);
+ PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL);
PP_ASSERT_WITH_CODE(!ret,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
return ret);
@@ -915,12 +906,12 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
static int vega20_run_btc(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL);
}
static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL);
}
static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -933,7 +924,8 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures)) == 0,
+ PPSMC_MSG_EnableAllSmuFeatures,
+ NULL)) == 0,
"[EnableAllSMUFeatures] Failed to enable all smu features!",
return ret);
@@ -966,7 +958,8 @@ static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- 1);
+ 1,
+ NULL);
return 0;
}
@@ -978,7 +971,8 @@ static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFclkGfxClkRatio,
- data->registry_data.fclk_gfxclk_ratio);
+ data->registry_data.fclk_gfxclk_ratio,
+ NULL);
}
static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -991,7 +985,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures)) == 0,
+ PPSMC_MSG_DisableAllSmuFeatures,
+ NULL)) == 0,
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return ret);
@@ -1199,12 +1194,12 @@ static int vega20_od8_get_gfx_clock_base_voltage(
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetAVFSVoltageByDpm,
- ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+ voltage);
PP_ASSERT_WITH_CODE(!ret,
"[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
return ret);
- *voltage = smum_get_argument(hwmgr);
*voltage = *voltage / VOLTAGE_SCALE;
return 0;
@@ -1560,19 +1555,19 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDcModeMaxDpmFreq,
- (clock_select << 16))) == 0,
+ (clock_select << 16),
+ clock)) == 0,
"[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
/* if DC limit is zero, return AC limit */
if (*clock == 0) {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetMaxDpmFreq,
- (clock_select << 16))) == 0,
+ (clock_select << 16),
+ clock)) == 0,
"[GetMaxSustainableClock] failed to get max AC clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
}
return 0;
@@ -1641,7 +1636,8 @@ static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
int result;
result = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SetMGpuFanBoostLimitRpm);
+ PPSMC_MSG_SetMGpuFanBoostLimitRpm,
+ NULL);
PP_ASSERT_WITH_CODE(!result,
"[EnableMgpuFan] Failed to enable mgpu fan boost!",
return result);
@@ -1669,7 +1665,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
int result = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0, NULL);
result = vega20_set_allowed_featuresmask(hwmgr);
PP_ASSERT_WITH_CODE(!result,
@@ -1740,12 +1736,12 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result);
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
- POWER_SOURCE_AC << 16);
+ POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
PP_ASSERT_WITH_CODE(!result,
"[GetPptLimit] get default PPT limit failed!",
return result);
hwmgr->power_limit =
- hwmgr->default_power_limit = smum_get_argument(hwmgr);
+ hwmgr->default_power_limit;
return 0;
}
@@ -1806,7 +1802,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min gfxclk !",
return ret);
}
@@ -1816,7 +1813,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min memclk !",
return ret);
}
@@ -1827,7 +1825,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min vclk!",
return ret);
@@ -1835,7 +1834,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min dclk!",
return ret);
}
@@ -1846,7 +1846,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min eclk!",
return ret);
}
@@ -1857,7 +1858,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min socclk!",
return ret);
}
@@ -1868,7 +1870,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_FCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_FCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min fclk!",
return ret);
}
@@ -1879,7 +1882,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set hard min dcefclk!",
return ret);
}
@@ -1900,7 +1904,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max gfxclk!",
return ret);
}
@@ -1911,7 +1916,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max memclk!",
return ret);
}
@@ -1922,14 +1928,16 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max vclk!",
return ret);
max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max dclk!",
return ret);
}
@@ -1940,7 +1948,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max eclk!",
return ret);
}
@@ -1951,7 +1960,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max socclk!",
return ret);
}
@@ -1962,7 +1972,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_FCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_FCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max fclk!",
return ret);
}
@@ -2006,17 +2017,17 @@ static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
if (max) {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
+ PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16),
+ clock)) == 0,
"[GetClockRanges] Failed to get max clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
} else {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetMinDpmFreq,
- (clock_select << 16))) == 0,
+ (clock_select << 16),
+ clock)) == 0,
"[GetClockRanges] Failed to get min clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
}
return 0;
@@ -2122,10 +2133,10 @@ static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
*clk_freq = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (clk_id << 16),
+ clk_freq)) == 0,
"[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
return ret);
- *clk_freq = smum_get_argument(hwmgr);
*clk_freq = *clk_freq * 100;
@@ -2276,7 +2287,8 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
clk_request = (clk_select << 16) | clk_freq;
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- clk_request);
+ clk_request,
+ NULL);
}
}
@@ -2312,7 +2324,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
if (data->smu_features[GNLD_DS_DCEFCLK].supported)
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR / 100)) == 0,
+ min_clocks.dcefClockInSR / 100,
+ NULL)) == 0,
"Attempt to set divider for DCEFCLK Failed!",
return ret);
} else {
@@ -2324,7 +2337,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ NULL)),
"[SetHardMinFreq] Set hard min uclk failed!",
return ret);
}
@@ -2656,7 +2670,8 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
return -EINVAL;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level,
+ NULL);
PP_ASSERT_WITH_CODE(!ret,
"Failed to set min link dpm level!",
return ret);
@@ -3140,7 +3155,7 @@ static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
@@ -3495,7 +3510,8 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ NULL)),
"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
return ret);
}
@@ -3520,7 +3536,8 @@ static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)),
+ (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level,
+ NULL)),
"[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
return ret);
}
@@ -3534,7 +3551,7 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
int ret = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0, NULL);
ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
&data->dpm_table.mem_table);
@@ -3565,7 +3582,8 @@ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DPM_SOCCLK].supported) {
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays,
- hwmgr->display_config->num_display);
+ hwmgr->display_config->num_display,
+ NULL);
}
return result;
@@ -4082,7 +4100,8 @@ out:
workload_type =
conv_power_profile_to_pplib_workload(power_profile_mode);
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
hwmgr->power_profile_mode = power_profile_mode;
@@ -4098,21 +4117,26 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -4153,7 +4177,8 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
(acquire ?
PPSMC_MSG_RequestI2CBus :
PPSMC_MSG_ReleaseI2CBus),
- 0);
+ 0,
+ NULL);
PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
return res;
@@ -4170,7 +4195,8 @@ static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state,
+ NULL);
if (ret)
pr_err("SetDfCstate failed!\n");
@@ -4184,7 +4210,8 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetXgmiMode,
- pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+ NULL);
if (ret)
pr_err("SetXgmiPstate failed!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
index a0bfb65cc5d6..d7cc3d2d9e17 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
@@ -36,7 +36,8 @@ int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->smu_features[GNLD_PPT].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetPptLimit, n);
+ PPSMC_MSG_SetPptLimit, n,
+ NULL);
return 0;
}
@@ -51,7 +52,8 @@ static int vega20_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+ NULL);
}
int vega20_power_control_set_level(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
index ede54e87e287..7add2f60f49c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
@@ -106,10 +106,10 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentRpm)) == 0,
+ PPSMC_MSG_GetCurrentRpm,
+ current_rpm)) == 0,
"Attempt to get current RPM from SMC Failed!",
return ret);
- *current_rpm = smum_get_argument(hwmgr);
return 0;
}
@@ -329,7 +329,8 @@ static int vega20_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
- (uint32_t)table->FanTargetTemperature);
+ (uint32_t)table->FanTargetTemperature,
+ NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index ae2c318dd6fa..4d1c2a44a8b6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -405,7 +405,9 @@ struct smu_context
bool pm_enabled;
bool is_apu;
- uint32_t smc_if_version;
+ uint32_t smc_driver_if_version;
+ uint32_t smc_fw_if_version;
+ uint32_t smc_fw_version;
bool uploading_custom_pp_table;
bool dc_controlled_by_gpio;
@@ -489,6 +491,7 @@ struct pptable_funcs {
int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t dpm_level, uint32_t *freq);
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+ int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
int (*i2c_eeprom_init)(struct i2c_adapter *control);
void (*i2c_eeprom_fini)(struct i2c_adapter *control);
@@ -580,11 +583,6 @@ int smu_check_fw_status(struct smu_context *smu);
int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
-#define smu_i2c_eeprom_init(smu, control) \
- ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
-#define smu_i2c_eeprom_fini(smu, control) \
- ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
-
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
int smu_get_power_limit(struct smu_context *smu,
@@ -734,6 +732,7 @@ int smu_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
int smu_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state);
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index f736d773f9d6..e07478b6ac04 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -114,7 +114,8 @@
#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A
#define PPSMC_MSG_DFCstateControl 0x3B
-#define PPSMC_Message_Count 0x3C
+#define PPSMC_MSG_GmiPwrDnControl 0x3D
+#define PPSMC_Message_Count 0x3E
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 2ffb666b97e6..15ed6cbdf366 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -743,6 +743,7 @@ struct pp_hwmgr {
bool pm_en;
bool pp_one_vf;
struct mutex smu_lock;
+ struct mutex msg_lock;
uint32_t pp_table_version;
void *device;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index ce5b5011c122..8b82059d97e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -82,8 +82,8 @@
// Other
#define FEATURE_OUT_OF_BAND_MONITOR_BIT 24
#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25
+#define FEATURE_PER_PART_VMIN_BIT 26
-#define FEATURE_SPARE_26_BIT 26
#define FEATURE_SPARE_27_BIT 27
#define FEATURE_SPARE_28_BIT 28
#define FEATURE_SPARE_29_BIT 29
@@ -154,6 +154,7 @@
#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT )
#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
+#define FEATURE_PER_PART_VMIN_MASK (1 << FEATURE_PER_PART_VMIN_BIT )
//FIXME need updating
@@ -628,8 +629,14 @@ typedef struct {
uint16_t BasePerformanceFrequencyCap; //In Mhz
uint16_t MaxPerformanceFrequencyCap; //In Mhz
+ // Per-Part Vmin
+ uint16_t VDDGFX_VminLow; // mv Q2
+ uint16_t VDDGFX_TVminLow; //Celcius
+ uint16_t VDDGFX_VminLow_HiTemp; // mv Q2
+ uint16_t VDDGFX_VminLow_LoTemp; // mv Q2
+
// SECTION: Reserved
- uint32_t Reserved[9];
+ uint32_t Reserved[7];
// SECTION: BOARD PARAMETERS
@@ -869,6 +876,10 @@ typedef struct {
uint8_t Mem_DownHystLimit;
uint16_t Mem_Fps;
+ uint32_t BusyThreshold; // Q16
+ uint32_t BusyHyst;
+ uint32_t IdleHyst;
+
uint32_t MmHubPadding[8]; // SMU internal use
} DpmActivityMonitorCoeffInt_t;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
index 2f85a34c0591..e9315eb5b48e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU12_DRIVER_IF_VERSION 11
+#define SMU12_DRIVER_IF_VERSION 14
typedef struct {
int32_t value;
@@ -154,15 +154,19 @@ typedef enum {
} CLOCK_IDs_e;
// Throttler Status Bitmask
-#define THROTTLER_STATUS_BIT_SPL 0
-#define THROTTLER_STATUS_BIT_FPPT 1
-#define THROTTLER_STATUS_BIT_SPPT 2
-#define THROTTLER_STATUS_BIT_SPPT_APU 3
-#define THROTTLER_STATUS_BIT_THM_CORE 4
-#define THROTTLER_STATUS_BIT_THM_GFX 5
-#define THROTTLER_STATUS_BIT_THM_SOC 6
-#define THROTTLER_STATUS_BIT_TDC_VDD 7
-#define THROTTLER_STATUS_BIT_TDC_SOC 8
+#define THROTTLER_STATUS_BIT_SPL 0
+#define THROTTLER_STATUS_BIT_FPPT 1
+#define THROTTLER_STATUS_BIT_SPPT 2
+#define THROTTLER_STATUS_BIT_SPPT_APU 3
+#define THROTTLER_STATUS_BIT_THM_CORE 4
+#define THROTTLER_STATUS_BIT_THM_GFX 5
+#define THROTTLER_STATUS_BIT_THM_SOC 6
+#define THROTTLER_STATUS_BIT_TDC_VDD 7
+#define THROTTLER_STATUS_BIT_TDC_SOC 8
+#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9
+#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10
+#define THROTTLER_STATUS_BIT_EDC_CPU 11
+#define THROTTLER_STATUS_BIT_EDC_GFX 12
typedef struct {
uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz]
@@ -180,7 +184,7 @@ typedef struct {
uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC
uint16_t FanPwm; //[milli]
- uint16_t CurrentSocketPower; //[mW]
+ uint16_t CurrentSocketPower; //[W]
uint16_t CoreFrequency[8]; //[MHz]
uint16_t CorePower[8]; //[mW]
@@ -193,10 +197,16 @@ typedef struct {
uint16_t ThrottlerStatus;
uint16_t spare;
- uint16_t StapmOriginalLimit; //[mW]
- uint16_t StapmCurrentLimit; //[mW]
- uint16_t ApuPower; //[mW]
- uint16_t dGpuPower; //[mW]
+ uint16_t StapmOriginalLimit; //[W]
+ uint16_t StapmCurrentLimit; //[W]
+ uint16_t ApuPower; //[W]
+ uint16_t dGpuPower; //[W]
+
+ uint16_t VddTdcValue; //[mA]
+ uint16_t SocTdcValue; //[mA]
+ uint16_t VddEdcValue; //[mA]
+ uint16_t SocEdcValue; //[mA]
+ uint16_t reserve[2];
} SmuMetrics_t;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index a5b4df146713..ee7dac4693d4 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -170,6 +170,7 @@
__SMU_DUMMY_MAP(SetSoftMinJpeg), \
__SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
__SMU_DUMMY_MAP(DFCstateControl), \
+ __SMU_DUMMY_MAP(GmiPwrDnControl), \
__SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
__SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 674e426ed59b..6b3b451a8018 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,8 +27,8 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x14
+#define SMU11_DRIVER_IF_VERSION_NV10 0x36
#define SMU11_DRIVER_IF_VERSION_NV12 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
@@ -37,7 +37,6 @@
#define MP0_SRAM 0x03900000
#define MP1_Public 0x03b00000
#define MP1_SRAM 0x03c00004
-#define MP1_SMC_SIZE 0x40000
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index c5288831aa15..ad100b533d04 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -81,16 +81,15 @@ enum SMU10_TABLE_ID {
SMU10_CLOCKTABLE,
};
-extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
-
extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr);
-extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp);
extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter);
+ uint16_t msg, uint32_t parameter,
+ uint32_t *resp);
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 15030284b444..0c9be864d072 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -423,6 +423,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *smc_pptable = table_context->driver_pptable;
struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
+ struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
int index, ret;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -433,77 +434,33 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
if (ret)
return ret;
- memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
- sizeof(I2cControllerConfig_t) * NUM_I2C_CONTROLLERS);
-
- /* SVI2 Board Parameters */
- smc_pptable->MaxVoltageStepGfx = smc_dpm_table->MaxVoltageStepGfx;
- smc_pptable->MaxVoltageStepSoc = smc_dpm_table->MaxVoltageStepSoc;
- smc_pptable->VddGfxVrMapping = smc_dpm_table->VddGfxVrMapping;
- smc_pptable->VddSocVrMapping = smc_dpm_table->VddSocVrMapping;
- smc_pptable->VddMem0VrMapping = smc_dpm_table->VddMem0VrMapping;
- smc_pptable->VddMem1VrMapping = smc_dpm_table->VddMem1VrMapping;
- smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->GfxUlvPhaseSheddingMask;
- smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->SocUlvPhaseSheddingMask;
- smc_pptable->ExternalSensorPresent = smc_dpm_table->ExternalSensorPresent;
- smc_pptable->Padding8_V = smc_dpm_table->Padding8_V;
-
- /* Telemetry Settings */
- smc_pptable->GfxMaxCurrent = smc_dpm_table->GfxMaxCurrent;
- smc_pptable->GfxOffset = smc_dpm_table->GfxOffset;
- smc_pptable->Padding_TelemetryGfx = smc_dpm_table->Padding_TelemetryGfx;
- smc_pptable->SocMaxCurrent = smc_dpm_table->SocMaxCurrent;
- smc_pptable->SocOffset = smc_dpm_table->SocOffset;
- smc_pptable->Padding_TelemetrySoc = smc_dpm_table->Padding_TelemetrySoc;
- smc_pptable->Mem0MaxCurrent = smc_dpm_table->Mem0MaxCurrent;
- smc_pptable->Mem0Offset = smc_dpm_table->Mem0Offset;
- smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->Padding_TelemetryMem0;
- smc_pptable->Mem1MaxCurrent = smc_dpm_table->Mem1MaxCurrent;
- smc_pptable->Mem1Offset = smc_dpm_table->Mem1Offset;
- smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->Padding_TelemetryMem1;
-
- /* GPIO Settings */
- smc_pptable->AcDcGpio = smc_dpm_table->AcDcGpio;
- smc_pptable->AcDcPolarity = smc_dpm_table->AcDcPolarity;
- smc_pptable->VR0HotGpio = smc_dpm_table->VR0HotGpio;
- smc_pptable->VR0HotPolarity = smc_dpm_table->VR0HotPolarity;
- smc_pptable->VR1HotGpio = smc_dpm_table->VR1HotGpio;
- smc_pptable->VR1HotPolarity = smc_dpm_table->VR1HotPolarity;
- smc_pptable->GthrGpio = smc_dpm_table->GthrGpio;
- smc_pptable->GthrPolarity = smc_dpm_table->GthrPolarity;
-
- /* LED Display Settings */
- smc_pptable->LedPin0 = smc_dpm_table->LedPin0;
- smc_pptable->LedPin1 = smc_dpm_table->LedPin1;
- smc_pptable->LedPin2 = smc_dpm_table->LedPin2;
- smc_pptable->padding8_4 = smc_dpm_table->padding8_4;
-
- /* GFXCLK PLL Spread Spectrum */
- smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->PllGfxclkSpreadEnabled;
- smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->PllGfxclkSpreadPercent;
- smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->PllGfxclkSpreadFreq;
-
- /* GFXCLK DFLL Spread Spectrum */
- smc_pptable->DfllGfxclkSpreadEnabled = smc_dpm_table->DfllGfxclkSpreadEnabled;
- smc_pptable->DfllGfxclkSpreadPercent = smc_dpm_table->DfllGfxclkSpreadPercent;
- smc_pptable->DfllGfxclkSpreadFreq = smc_dpm_table->DfllGfxclkSpreadFreq;
-
- /* UCLK Spread Spectrum */
- smc_pptable->UclkSpreadEnabled = smc_dpm_table->UclkSpreadEnabled;
- smc_pptable->UclkSpreadPercent = smc_dpm_table->UclkSpreadPercent;
- smc_pptable->UclkSpreadFreq = smc_dpm_table->UclkSpreadFreq;
-
- /* SOCCLK Spread Spectrum */
- smc_pptable->SoclkSpreadEnabled = smc_dpm_table->SoclkSpreadEnabled;
- smc_pptable->SocclkSpreadPercent = smc_dpm_table->SocclkSpreadPercent;
- smc_pptable->SocclkSpreadFreq = smc_dpm_table->SocclkSpreadFreq;
-
- /* Total board power */
- smc_pptable->TotalBoardPower = smc_dpm_table->TotalBoardPower;
- smc_pptable->BoardPadding = smc_dpm_table->BoardPadding;
-
- /* Mvdd Svi2 Div Ratio Setting */
- smc_pptable->MvddRatio = smc_dpm_table->MvddRatio;
+ pr_info("smc_dpm_info table revision(format.content): %d.%d\n",
+ smc_dpm_table->table_header.format_revision,
+ smc_dpm_table->table_header.content_revision);
+
+ if (smc_dpm_table->table_header.format_revision != 4) {
+ pr_err("smc_dpm_info table format revision is not 4!\n");
+ return -EINVAL;
+ }
+
+ switch (smc_dpm_table->table_header.content_revision) {
+ case 5: /* nv10 and nv14 */
+ memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
+ sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+ break;
+ case 7: /* nv12 */
+ ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table_v4_7);
+ if (ret)
+ return ret;
+ memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
+ sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
+ break;
+ default:
+ pr_err("smc_dpm_info with unsupported content revision %d!\n",
+ smc_dpm_table->table_header.content_revision);
+ return -EINVAL;
+ }
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
/* TODO: remove it once SMU fw fix it */
@@ -1336,8 +1293,6 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size < 0)
- return -EINVAL;
ret = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
@@ -1860,7 +1815,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
int power_src;
if (!smu->power_limit) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT) &&
+ !amdgpu_sriov_vf(smu->adev)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
return -EINVAL;
@@ -2003,6 +1959,9 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
OverDriveTable_t *od_table, *boot_od_table;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index b0ed1b3fe79a..67476047c067 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -296,6 +296,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
for (i = 0; i < count; i++) {
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
+ if (!value)
+ continue;
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
if (cur_value == value)
@@ -847,7 +849,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
uint32_t i, size = 0;
int16_t workload_type = 0;
- if (!smu->pm_enabled || !buf)
+ if (!buf)
return -EINVAL;
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -898,7 +900,7 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
/*
- * Util now, the pmfw hasn't exported the interface of SMU
+ * Until now, the pmfw hasn't exported the interface of SMU
* feature mask to APU SKU so just force on all the feature
* at early initial stage.
*/
@@ -955,6 +957,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
void renoir_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &renoir_ppt_funcs;
- smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
+ smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 40c35bcc5a0a..c97444841abc 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -214,4 +214,9 @@ static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_typ
#define smu_set_power_source(smu, power_src) \
((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
+#define smu_i2c_eeprom_init(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : 0)
+#define smu_i2c_eeprom_fini(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 655ba4fb05dc..ae0361e225bb 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -23,6 +23,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/reboot.h>
#define SMU_11_0_PARTIAL_PPTABLE
@@ -57,7 +58,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
return 0;
}
@@ -65,7 +66,7 @@ static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
return 0;
}
@@ -75,7 +76,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
for (i = 0; i < timeout; i++) {
- cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
return cur_value == 0x1 ? 0 : -EIO;
@@ -83,7 +84,10 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
}
/* timeout means wrong logic */
- return -ETIME;
+ if (i == timeout)
+ return -ETIME;
+
+ return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
int
@@ -107,9 +111,9 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
goto out;
}
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
@@ -119,6 +123,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
smu_get_message_name(smu, msg), index, param, ret);
goto out;
}
+
if (read_arg) {
ret = smu_v11_0_read_arg(smu, read_arg);
if (ret) {
@@ -201,13 +206,15 @@ int smu_v11_0_load_microcode(struct smu_context *smu)
const struct smc_firmware_header_v1_0 *hdr;
uint32_t addr_start = MP1_SRAM;
uint32_t i;
+ uint32_t smc_fw_size;
uint32_t mp1_fw_flags;
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
src = (const uint32_t *)(adev->pm.fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ smc_fw_size = hdr->header.ucode_size_bytes;
- for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
+ for (i = 1; i < smc_fw_size/4 - 1; i++) {
WREG32_PCIE(addr_start, src[i]);
addr_start += 4;
}
@@ -264,23 +271,23 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
switch (smu->adev->asic_type) {
case CHIP_VEGA20:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20;
break;
case CHIP_ARCTURUS:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
break;
case CHIP_NAVI10:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
case CHIP_NAVI12:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
break;
case CHIP_NAVI14:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
default:
pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
break;
}
@@ -292,10 +299,10 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a warning message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_if_version) {
+ if (if_version != smu->smc_driver_if_version) {
pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_if_version, if_version,
+ smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
pr_warn("SMU driver if version not matched\n");
}
@@ -479,8 +486,6 @@ int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (!smu->pm_enabled)
- return 0;
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
@@ -497,8 +502,6 @@ int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (!smu->pm_enabled)
- return 0;
if (!smu_power->power_context || smu_power->power_context_size == 0)
return -EINVAL;
@@ -730,8 +733,9 @@ int smu_v11_0_parse_pptable(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
+ /* during TDR we need to free and alloc the pptable */
if (table_context->driver_pptable)
- return -EINVAL;
+ kfree(table_context->driver_pptable);
table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
@@ -771,6 +775,9 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
@@ -783,8 +790,6 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
- if (!smu->pm_enabled)
- return 0;
if (!table_context)
return -EINVAL;
@@ -816,6 +821,9 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (tool_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
@@ -835,6 +843,9 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!smu->pm_enabled)
return ret;
@@ -849,6 +860,9 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
mutex_lock(&feature->mutex);
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
goto failed;
@@ -877,6 +891,9 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev) && !amdgpu_sriov_is_pp_one_vf(smu->adev))
+ return 0;
+
if (!feature_mask || num < 2)
return -EINVAL;
@@ -932,8 +949,12 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!smu->pm_enabled)
return ret;
+
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
@@ -948,9 +969,6 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
int ret = 0;
int clk_id;
- if (!smu->pm_enabled)
- return ret;
-
if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
(smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
return 0;
@@ -1096,6 +1114,9 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
int ret = 0;
uint32_t max_power_limit;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
max_power_limit = smu_v11_0_get_max_power_limit(smu);
if (n > max_power_limit) {
@@ -1205,9 +1226,6 @@ int smu_v11_0_start_thermal_control(struct smu_context *smu)
struct smu_temperature_range range;
struct amdgpu_device *adev = smu->adev;
- if (!smu->pm_enabled)
- return ret;
-
memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
ret = smu_get_thermal_temperature_range(smu, &range);
@@ -1321,9 +1339,6 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
enum smu_clk_type clk_select = 0;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
- if (!smu->pm_enabled)
- return -EINVAL;
-
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
switch (clk_type) {
@@ -1533,39 +1548,59 @@ static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+#define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
+
static int smu_v11_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
+ /*
+ * ctxid is used to distinguish different
+ * events for SMCToHost interrupt.
+ */
+ uint32_t ctxid = entry->src_data[0];
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
- pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ /*
+ * SW CTF just occurred.
+ * Try to do a graceful shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
- pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
break;
default:
- pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
- src_id,
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
+ src_id);
break;
-
}
+ } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+ dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ /*
+ * HW CTF just occurred. Shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+ orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
- if (src_id == 0xfe)
- smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ if (src_id == 0xfe) {
+ switch (ctxid) {
+ case 0x3:
+ dev_dbg(adev->dev, "Switched to AC mode!\n");
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ break;
+ case 0x4:
+ dev_dbg(adev->dev, "Switched to DC mode!\n");
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ break;
+ }
+ }
}
return 0;
@@ -1605,6 +1640,13 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
if (ret)
return ret;
+ /* Register CTF(GPIO_19) interrupt */
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
+ SMUIO_11_0__SRCID__SMUIO_GPIO19,
+ irq_src);
+ if (ret)
+ return ret;
+
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
0xfe,
irq_src);
@@ -1833,6 +1875,9 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
uint32_t pcie_gen = 0, pcie_width = 0;
int ret;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 169ebdad87b8..4023d10fb49b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -32,13 +32,15 @@
#include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
+#include "asic_reg/smuio/smuio_12_0_0_offset.h"
+#include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
-#define smnMP1_FIRMWARE_FLAGS 0x3010024
+// because some SMU12 based ASICs use older ip offset tables
+// we should undefine this register from the smuio12 header
+// to prevent confusion down the road
+#undef mmPWR_MISC_CNTL_STATUS
-#define mmSMUIO_GFX_MISC_CNTL 0x00c8
-#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
@@ -158,10 +160,10 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a warning message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_if_version) {
+ if (if_version != smu->smc_driver_if_version) {
pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_if_version, if_version,
+ smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
pr_warn("SMU driver if version not matched\n");
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 868e2d5f6e62..85e5b1ed22c2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2780,7 +2780,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -2810,12 +2810,12 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -2845,7 +2845,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -2881,8 +2881,9 @@ static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
break;
}
- ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
- data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask,
+ NULL);
return 0;
}
@@ -2912,8 +2913,9 @@ static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
break;
}
- ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
- data->dpm_level_enable_mask.vce_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.vce_dpm_enable_mask,
+ NULL);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 32ebb383c456..ecb9ee46d6b3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -137,9 +137,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
INTERRUPTS_ENABLED, 1);
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
/* Wait for done bit to be set */
PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
@@ -203,8 +201,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+ if (0 != smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+ NULL)) {
pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
result = -EINVAL;
}
@@ -1913,7 +1912,8 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
if (mask)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_LedConfig,
- mask);
+ mask,
+ NULL);
return 0;
}
@@ -2220,14 +2220,16 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
+ advanceFanControlParameters.ucMinimumPWMLimit,
+ NULL);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+ NULL);
if (res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2242,7 +2244,7 @@ static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
if (!hwmgr->avfs_supported)
return 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
return 0;
}
@@ -2390,7 +2392,8 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -2422,7 +2425,8 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -2569,7 +2573,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -2599,12 +2603,12 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -2634,7 +2638,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -2649,6 +2653,7 @@ const struct pp_smumgr_func fiji_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.update_smc_table = fiji_update_smc_table,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 732005c03a82..431ad2fd38df 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -2669,6 +2669,7 @@ const struct pp_smumgr_func iceland_smu_funcs = {
.request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.get_offsetof = iceland_get_offsetof,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 23c12018dbc1..c3d2e6dcf62a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -99,7 +99,8 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+ if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+ NULL)) {
pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
@@ -2049,15 +2050,16 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
return 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting,
+ NULL);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
/* Apply avfs cks-off voltages to avoid the overshoot
* when switching to the highest sclk frequency
*/
if (data->apply_avfs_cks_off_voltage)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage, NULL);
return 0;
}
@@ -2158,14 +2160,16 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
+ advanceFanControlParameters.ucMinimumPWMLimit,
+ NULL);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+ NULL);
if (res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2202,7 +2206,8 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -2234,7 +2239,8 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -2485,7 +2491,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -2515,12 +2521,12 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -2550,7 +2556,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -2565,6 +2571,7 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.update_smc_table = polaris10_update_smc_table,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 2319400a3fcb..ea2279bb8cbf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -126,15 +126,18 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL;);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL;);
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
/* flush hdp cache */
amdgpu_asic_flush_hdp(adev, NULL);
@@ -164,15 +167,18 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
return 0;
}
@@ -181,9 +187,9 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
- smu10_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion);
- smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion,
+ &smc_driver_if_version);
if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) &&
(smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) {
@@ -217,11 +223,11 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
adev->pm.fw_version = hwmgr->smu_version >> 8;
- if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
+ if (!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
+ (adev->apu_flags & AMD_APU_IS_RAVEN) &&
adev->pm.fw_version < 0x1e45)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 3f51d545e8ff..aae25243eb10 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -191,13 +191,6 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
return 0;
}
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -207,25 +200,14 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, ui
return smu7_send_msg_to_smc(hwmgr, msg);
}
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
+ return cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
}
int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
-
- cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
-
- if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
- pr_info("Failed to send Message.\n");
-
- return 0;
+ return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
}
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
@@ -353,12 +335,14 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
if (hwmgr->not_vf) {
- smu7_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI,
- upper_32_bits(smu_data->smu_buffer.mc_addr));
- smu7_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(smu_data->smu_buffer.mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_LO,
- lower_32_bits(smu_data->smu_buffer.mc_addr));
+ lower_32_bits(smu_data->smu_buffer.mc_addr),
+ NULL);
}
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
@@ -423,10 +407,16 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
}
memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
sizeof(struct SMU_DRAMData_TOC));
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
-
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DRV_DRAM_ADDR_HI,
+ upper_32_bits(smu_data->header_buffer.mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DRV_DRAM_ADDR_LO,
+ lower_32_bits(smu_data->header_buffer.mc_addr),
+ NULL);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load, NULL);
r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
if (!r)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 01f0538fba6b..e7303dc8c260 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -60,11 +60,9 @@ int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr);
int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg);
int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
uint32_t parameter);
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter);
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr);
int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 7dca04a89217..76d4f12ceedf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -610,18 +610,21 @@ static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
*table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
- upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
- lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_clock_table);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table,
+ NULL);
- smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram, NULL);
return 0;
}
@@ -637,18 +640,21 @@ static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
break;
}
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
- upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
- lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_clock_table);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table,
+ NULL);
- smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu, NULL);
return 0;
}
@@ -671,25 +677,30 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrHi,
- upper_32_bits(smu8_smu->toc_buffer.mc_addr));
+ upper_32_bits(smu8_smu->toc_buffer.mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrLo,
- lower_32_bits(smu8_smu->toc_buffer.mc_addr));
+ lower_32_bits(smu8_smu->toc_buffer.mc_addr),
+ NULL);
- smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs, NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_aram);
- smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_power_profiling_index);
+ smu8_smu->toc_entry_aram,
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_power_profiling_index,
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_initialize_index);
+ smu8_smu->toc_entry_initialize_index,
+ NULL);
fw_to_check = UCODE_ID_RLC_G_MASK |
UCODE_ID_SDMA0_MASK |
@@ -860,11 +871,13 @@ static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
unsigned long check_feature)
{
int result;
- unsigned long features;
+ uint32_t features;
- result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetFeatureStatus,
+ 0,
+ &features);
if (result == 0) {
- features = smum_get_argument(hwmgr);
if (features & check_feature)
return true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 4240aeec9000..b6fb48066841 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -103,14 +103,6 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
return 0;
}
-uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
-{
- if (NULL != hwmgr->smumgr_funcs->get_argument)
- return hwmgr->smumgr_funcs->get_argument(hwmgr);
-
- return 0;
-}
-
uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value)
{
if (NULL != hwmgr->smumgr_funcs->get_mac_definition)
@@ -135,22 +127,58 @@ int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr)
return 0;
}
-int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp)
{
- if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL)
+ int ret = 0;
+
+ if (hwmgr == NULL ||
+ hwmgr->smumgr_funcs->send_msg_to_smc == NULL ||
+ (resp && !hwmgr->smumgr_funcs->get_argument))
return -EINVAL;
- return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+ mutex_lock(&hwmgr->msg_lock);
+
+ ret = hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+ if (ret) {
+ mutex_unlock(&hwmgr->msg_lock);
+ return ret;
+ }
+
+ if (resp)
+ *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+ mutex_unlock(&hwmgr->msg_lock);
+
+ return ret;
}
int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
+ uint16_t msg,
+ uint32_t parameter,
+ uint32_t *resp)
{
+ int ret = 0;
+
if (hwmgr == NULL ||
- hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
+ hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL ||
+ (resp && !hwmgr->smumgr_funcs->get_argument))
return -EINVAL;
- return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
+
+ mutex_lock(&hwmgr->msg_lock);
+
+ ret = hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
hwmgr, msg, parameter);
+ if (ret) {
+ mutex_unlock(&hwmgr->msg_lock);
+ return ret;
+ }
+
+ if (resp)
+ *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+ mutex_unlock(&hwmgr->msg_lock);
+
+ return ret;
}
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index f19bac7ef7ba..398e7e3587de 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -2702,7 +2702,8 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -2733,7 +2734,8 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -3168,7 +3170,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -3198,12 +3200,12 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -3233,7 +3235,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -3248,6 +3250,7 @@ const struct pp_smumgr_func tonga_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.update_smc_table = tonga_update_smc_table,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 715564009089..1e222c5d91a4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -47,15 +47,18 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
/* flush hdp cache */
amdgpu_asic_flush_hdp(adev, NULL);
@@ -90,15 +93,18 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
return 0;
}
@@ -118,17 +124,21 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
return 0;
return smum_send_msg_to_smc_with_parameter(hwmgr,
- msg, feature_mask);
+ msg, feature_mask, NULL);
}
int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
uint64_t *features_enabled)
{
+ uint32_t enabled_features;
+
if (features_enabled == NULL)
return -EINVAL;
- smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
- *features_enabled = smu9_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeatures,
+ &enabled_features);
+ *features_enabled = enabled_features;
return 0;
}
@@ -150,12 +160,14 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
struct vega10_smumgr *priv = hwmgr->smu_backend;
if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
+ lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+ NULL);
}
return 0;
}
@@ -167,11 +179,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion),
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion,
+ &smc_driver_if_version),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- smc_driver_if_version = smu9_get_argument(hwmgr);
dev_id = adev->pdev->device;
rev_id = adev->pdev->revision;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 275dbf65f1a0..f54df76537e4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -50,18 +50,21 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- table_id) == 0,
+ table_id,
+ NULL) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -EINVAL);
@@ -98,19 +101,22 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- table_id) == 0,
+ table_id,
+ NULL) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
return -EINVAL);
@@ -126,21 +132,21 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
if (enable) {
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return -EINVAL);
} else {
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return -EINVAL);
}
@@ -156,17 +162,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesLow,
+ &smc_features_low) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return -EINVAL);
- smc_features_low = smu9_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+ &smc_features_high) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return -EINVAL);
- smc_features_high = smu9_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -192,12 +198,14 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
(struct vega12_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
- if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
+ if (!smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 16aa171971d3..2fb97554134f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -175,18 +175,20 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return ret);
@@ -224,18 +226,20 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
return ret);
@@ -255,18 +259,22 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu,
+ TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16),
+ NULL)) == 0,
"[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
return ret);
@@ -281,19 +289,21 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+ TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0,
"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
return ret);
@@ -316,21 +326,21 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
if (enable) {
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return ret);
} else {
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return ret);
}
@@ -347,16 +357,16 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesLow,
+ &smc_features_low)) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return ret);
- smc_features_low = vega20_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+ &smc_features_high)) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return ret);
- smc_features_high = vega20_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -371,13 +381,15 @@ static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
int ret = 0;
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
- ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL);
if (!ret)
- ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL);
}
return ret;
@@ -389,14 +401,16 @@ int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
(struct vega20_smumgr *)(hwmgr->smu_backend);
int ret = 0;
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+ NULL)) == 0,
"[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+ NULL)) == 0,
"[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index b0e0d67cd54b..3da71a088b92 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -356,7 +356,8 @@ static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -388,7 +389,8 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -1906,7 +1908,8 @@ static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableModeSwitchRLCNotification,
- adev->gfx.cu_info.number);
+ adev->gfx.cu_info.number,
+ NULL);
return 0;
}
@@ -2060,7 +2063,7 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition) &&
- !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme))
+ !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme, NULL))
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
} else {
@@ -2250,10 +2253,12 @@ int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
if (!hwmgr->avfs_supported)
return 0;
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
if (!ret) {
if (data->apply_avfs_cks_off_voltage)
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+ ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_ApplyAvfsCksOffVoltage,
+ NULL);
}
return ret;
@@ -2279,6 +2284,7 @@ const struct pp_smumgr_func vegam_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.process_firmware_header = vegam_process_firmware_header,
.is_dpm_running = vegam_is_dpm_running,
.get_mac_definition = vegam_get_mac_definition,
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 3f1044326dcb..61923530b2e4 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -1796,7 +1796,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
"PD_Data_error_rate_coeff"};
int result = 0;
- if (!smu->pm_enabled || !buf)
+ if (!buf)
return -EINVAL;
size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
@@ -1887,8 +1887,6 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
smu->power_profile_mode = input[size];
- if (!smu->pm_enabled)
- return ret;
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index d6a6692db0ac..c05d001163e0 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -137,10 +137,11 @@ static struct drm_info_list arcpgu_debugfs_list[] = {
{ "clocks", arcpgu_show_pxlclock, 0 },
};
-static int arcpgu_debugfs_init(struct drm_minor *minor)
+static void arcpgu_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(arcpgu_debugfs_list,
- ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor);
+ drm_debugfs_create_files(arcpgu_debugfs_list,
+ ARRAY_SIZE(arcpgu_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 442d4656150a..6b85d5f4caa8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -14,6 +14,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -260,17 +261,16 @@ static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
{
- struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL);
+ struct komeda_kms_dev *kms;
struct drm_device *drm;
int err;
- if (!kms)
- return ERR_PTR(-ENOMEM);
+ kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
+ struct komeda_kms_dev, base);
+ if (IS_ERR(kms))
+ return kms;
drm = &kms->base;
- err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev);
- if (err)
- goto free_kms;
drm->dev_private = mdev;
@@ -327,9 +327,6 @@ cleanup_mode_config:
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
- drm_dev_put(drm);
-free_kms:
- kfree(kms);
return ERR_PTR(err);
}
@@ -346,5 +343,4 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
- drm_dev_put(drm);
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 2e053815b54a..194419f47c5e 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -224,10 +224,11 @@ static struct drm_info_list hdlcd_debugfs_list[] = {
{ "clocks", hdlcd_show_pxlclock, 0 },
};
-static int hdlcd_debugfs_init(struct drm_minor *minor)
+static void hdlcd_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(hdlcd_debugfs_list,
- ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor);
+ drm_debugfs_create_files(hdlcd_debugfs_list,
+ ARRAY_SIZE(hdlcd_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 37d92a06318e..def8c9ffafca 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -548,7 +548,7 @@ static const struct file_operations malidp_debugfs_fops = {
.release = single_release,
};
-static int malidp_debugfs_init(struct drm_minor *minor)
+static void malidp_debugfs_init(struct drm_minor *minor)
{
struct malidp_drm *malidp = minor->dev->dev_private;
@@ -557,7 +557,6 @@ static int malidp_debugfs_init(struct drm_minor *minor)
spin_lock_init(&malidp->errors_lock);
debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root,
minor->dev, &malidp_debugfs_fops);
- return 0;
}
#endif //CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 197dca3fc84c..5fc25c3f445c 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
@@ -103,6 +104,7 @@ static int armada_drm_bind(struct device *dev)
kfree(priv);
return ret;
}
+ drmm_add_final_kfree(&priv->drm, priv);
/* Remove early framebuffers */
ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
@@ -311,7 +313,7 @@ static void __exit armada_drm_exit(void)
}
module_exit(armada_drm_exit);
-MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
MODULE_DESCRIPTION("Armada DRM Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h
index a10358bb61ec..e7ca95827ae8 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx.h
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h
@@ -5,6 +5,7 @@
#include <drm/drm_simple_kms_helper.h>
struct aspeed_gfx {
+ struct drm_device drm;
void __iomem *base;
struct clk *clk;
struct reset_control *rst;
@@ -12,8 +13,8 @@ struct aspeed_gfx {
struct drm_simple_display_pipe pipe;
struct drm_connector connector;
- struct drm_fbdev_cma *fbdev;
};
+#define to_aspeed_gfx(x) container_of(x, struct aspeed_gfx, drm)
int aspeed_gfx_create_pipe(struct drm_device *drm);
int aspeed_gfx_create_output(struct drm_device *drm);
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index 2184b8be6fd4..e54686c31a90 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -231,7 +231,7 @@ static const uint32_t aspeed_gfx_formats[] = {
int aspeed_gfx_create_pipe(struct drm_device *drm)
{
- struct aspeed_gfx *priv = drm->dev_private;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs,
aspeed_gfx_formats,
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index ada2f6aca906..6b27242b9ee3 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -77,7 +77,7 @@ static void aspeed_gfx_setup_mode_config(struct drm_device *drm)
static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
{
struct drm_device *drm = data;
- struct aspeed_gfx *priv = drm->dev_private;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
u32 reg;
reg = readl(priv->base + CRT_CTRL1);
@@ -96,15 +96,10 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
static int aspeed_gfx_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
- struct aspeed_gfx *priv;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct resource *res;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- drm->dev_private = priv;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(drm->dev, res);
if (IS_ERR(priv->base))
@@ -187,8 +182,6 @@ static void aspeed_gfx_unload(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
-
- drm->dev_private = NULL;
}
DEFINE_DRM_GEM_CMA_FOPS(fops);
@@ -216,27 +209,26 @@ static const struct of_device_id aspeed_gfx_match[] = {
static int aspeed_gfx_probe(struct platform_device *pdev)
{
- struct drm_device *drm;
+ struct aspeed_gfx *priv;
int ret;
- drm = drm_dev_alloc(&aspeed_gfx_driver, &pdev->dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ priv = devm_drm_dev_alloc(&pdev->dev, &aspeed_gfx_driver,
+ struct aspeed_gfx, drm);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
- ret = aspeed_gfx_load(drm);
+ ret = aspeed_gfx_load(&priv->drm);
if (ret)
- goto err_free;
+ return ret;
- ret = drm_dev_register(drm, 0);
+ ret = drm_dev_register(&priv->drm, 0);
if (ret)
goto err_unload;
return 0;
err_unload:
- aspeed_gfx_unload(drm);
-err_free:
- drm_dev_put(drm);
+ aspeed_gfx_unload(&priv->drm);
return ret;
}
@@ -247,7 +239,6 @@ static int aspeed_gfx_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
aspeed_gfx_unload(drm);
- drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
index 67ee5fa10055..6759cb88415a 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
@@ -28,7 +28,7 @@ static const struct drm_connector_funcs aspeed_gfx_connector_funcs = {
int aspeed_gfx_create_output(struct drm_device *drm)
{
- struct aspeed_gfx *priv = drm->dev_private;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
int ret;
priv->connector.dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 30aa73a5d9b7..b7ba22dddcad 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_probe_helper.h>
@@ -111,6 +112,8 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_ast_driver_unload;
+ drm_fbdev_generic_setup(dev, 32);
+
return 0;
err_ast_driver_unload:
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 18a0a4ce00f6..e5398e3dabe7 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -30,7 +30,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
@@ -512,10 +511,6 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_reset(dev);
- ret = drm_fbdev_generic_setup(dev, 32);
- if (ret)
- goto out_free;
-
return 0;
out_free:
kfree(ast);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index cdd6c46d6557..7d39b858c9f1 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -561,8 +561,9 @@ static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
return 0;
}
-void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void
+ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct ast_private *ast = plane->dev->dev_private;
struct drm_plane_state *state = plane->state;
@@ -801,6 +802,9 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->enable)
+ return 0; /* no mode checks if CRTC is being disabled */
+
ast_state = to_ast_crtc_state(state);
format = ast_state->format;
@@ -881,6 +885,17 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_disable = ast_crtc_helper_atomic_disable,
};
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+ struct ast_crtc_state *ast_state =
+ kzalloc(sizeof(*ast_state), GFP_KERNEL);
+
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+}
+
static void ast_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
@@ -919,8 +934,7 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs ast_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
- .set_config = drm_crtc_helper_set_config,
+ .reset = ast_crtc_reset,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = ast_crtc_destroy,
.set_config = drm_atomic_helper_set_config,
@@ -1069,7 +1083,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
ast_i2c_destroy(ast_connector->i2c);
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1112,8 +1125,6 @@ static int ast_connector_init(struct drm_device *dev)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_connector_register(connector);
-
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index e2019fe97fff..43bc709e3523 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -11,9 +11,10 @@
#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
-#include <drm/drm_bridge.h>
+#include <drm/drm_simple_kms_helper.h>
#include "atmel_hlcdc_dc.h"
@@ -22,10 +23,6 @@ struct atmel_hlcdc_rgb_output {
int bus_fmt;
};
-static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static struct atmel_hlcdc_rgb_output *
atmel_hlcdc_encoder_to_rgb_output(struct drm_encoder *encoder)
{
@@ -98,9 +95,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
return -EINVAL;
}
- ret = drm_encoder_init(dev, &output->encoder,
- &atmel_hlcdc_panel_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(dev, &output->encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 917767173ee6..e5bd1d517a18 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -92,7 +92,6 @@ void bochs_mm_fini(struct bochs_device *bochs);
/* bochs_kms.c */
int bochs_kms_init(struct bochs_device *bochs);
-void bochs_kms_fini(struct bochs_device *bochs);
/* bochs_fbdev.c */
extern const struct drm_mode_config_funcs bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index addb0568c1af..e18c51de1196 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_managed.h>
#include "bochs.h"
@@ -21,10 +22,7 @@ static void bochs_unload(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
- bochs_kms_fini(bochs);
bochs_mm_fini(bochs);
- kfree(bochs);
- dev->dev_private = NULL;
}
static int bochs_load(struct drm_device *dev)
@@ -32,7 +30,7 @@ static int bochs_load(struct drm_device *dev)
struct bochs_device *bochs;
int ret;
- bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+ bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL);
if (bochs == NULL)
return -ENOMEM;
dev->dev_private = bochs;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 8066d7d370d5..05d8373888e8 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -104,7 +104,6 @@ static void bochs_connector_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector,
&bochs_connector_connector_helper_funcs);
- drm_connector_register(connector);
bochs_hw_load_edid(bochs);
if (bochs->edid) {
@@ -134,7 +133,11 @@ const struct drm_mode_config_funcs bochs_mode_funcs = {
int bochs_kms_init(struct bochs_device *bochs)
{
- drm_mode_config_init(bochs->dev);
+ int ret;
+
+ ret = drmm_mode_config_init(bochs->dev);
+ if (ret)
+ return ret;
bochs->dev->mode_config.max_width = 8192;
bochs->dev->mode_config.max_height = 8192;
@@ -160,12 +163,3 @@ int bochs_kms_init(struct bochs_device *bochs)
return 0;
}
-
-void bochs_kms_fini(struct bochs_device *bochs)
-{
- if (!bochs->dev->mode_config.num_connector)
- return;
-
- drm_atomic_helper_shutdown(bochs->dev);
- drm_mode_config_cleanup(bochs->dev);
-}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index aaed2347ace9..04f876e985de 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -27,6 +27,16 @@ config DRM_CDNS_DSI
Support Cadence DPI to DSI bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.
+config DRM_CHRONTEL_CH7033
+ tristate "Chrontel CH7033 Video Encoder"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Enable support for the Chrontel CH7033 VGA/DVI/HDMI Encoder, as
+ found in the Dell Wyse 3020 thin client.
+
+ If in doubt, say "N".
+
config DRM_DISPLAY_CONNECTOR
tristate "Display connector support"
depends on OF
@@ -58,6 +68,22 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
to DP++. This is used with the i.MX6 imx-ldb
driver. You are likely to say N here.
+config DRM_NWL_MIPI_DSI
+ tristate "Northwest Logic MIPI DSI Host controller"
+ depends on DRM
+ depends on COMMON_CLK
+ depends on OF && HAS_IOMEM
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ select GENERIC_PHY_MIPI_DPHY
+ select MFD_SYSCON
+ select MULTIPLEXER
+ select REGMAP_MMIO
+ help
+ This enables the Northwest Logic MIPI DSI Host controller as
+ for example found on NXP's i.MX8 Processors.
+
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 6fb062b5b0f0..d63d4b7e4347 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
+obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
+obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
obj-y += analogix/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 47d4eb9e845d..f46a5e26b5dd 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -6,7 +6,7 @@ config DRM_I2C_ADV7511
select REGMAP_I2C
select DRM_MIPI_DSI
help
- Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
+ Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
config DRM_I2C_ADV7511_AUDIO
bool "ADV7511 HDMI Audio driver"
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index a428185be2c1..f101dd2819b5 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
{
switch (fs) {
case 32000:
- *n = 4096;
+ case 48000:
+ case 96000:
+ case 192000:
+ *n = fs * 128 / 1000;
break;
case 44100:
- *n = 6272;
- break;
- case 48000:
- *n = 6144;
+ case 88200:
+ case 176400:
+ *n = fs * 128 / 900;
break;
}
@@ -119,6 +121,9 @@ int adv7511_hdmi_hw_params(struct device *dev, void *data,
audio_source = ADV7511_AUDIO_SOURCE_I2S;
i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
break;
+ case HDMI_SPDIF:
+ audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
+ break;
default:
return -EINVAL;
}
@@ -175,11 +180,21 @@ static int audio_startup(struct device *dev, void *data)
/* use Audio infoframe updated info */
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
BIT(5), 0);
+ /* enable SPDIF receiver */
+ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), BIT(7));
+
return 0;
}
static void audio_shutdown(struct device *dev, void *data)
{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+
+ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
}
static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
@@ -213,6 +228,7 @@ static const struct hdmi_codec_pdata codec_data = {
.ops = &adv7511_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
+ .spdif = 1,
};
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
new file mode 100644
index 000000000000..f8675d82974b
--- /dev/null
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Chrontel CH7033 Video Encoder Driver
+ *
+ * Copyright (C) 2019,2020 Lubomir Rintel
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/* Page 0, Register 0x07 */
+enum {
+ DRI_PD = BIT(3),
+ IO_PD = BIT(5),
+};
+
+/* Page 0, Register 0x08 */
+enum {
+ DRI_PDDRI = GENMASK(7, 4),
+ PDDAC = GENMASK(3, 1),
+ PANEN = BIT(0),
+};
+
+/* Page 0, Register 0x09 */
+enum {
+ DPD = BIT(7),
+ GCKOFF = BIT(6),
+ TV_BP = BIT(5),
+ SCLPD = BIT(4),
+ SDPD = BIT(3),
+ VGA_PD = BIT(2),
+ HDBKPD = BIT(1),
+ HDMI_PD = BIT(0),
+};
+
+/* Page 0, Register 0x0a */
+enum {
+ MEMINIT = BIT(7),
+ MEMIDLE = BIT(6),
+ MEMPD = BIT(5),
+ STOP = BIT(4),
+ LVDS_PD = BIT(3),
+ HD_DVIB = BIT(2),
+ HDCP_PD = BIT(1),
+ MCU_PD = BIT(0),
+};
+
+/* Page 0, Register 0x18 */
+enum {
+ IDF = GENMASK(7, 4),
+ INTEN = BIT(3),
+ SWAP = GENMASK(2, 0),
+};
+
+enum {
+ BYTE_SWAP_RGB = 0,
+ BYTE_SWAP_RBG = 1,
+ BYTE_SWAP_GRB = 2,
+ BYTE_SWAP_GBR = 3,
+ BYTE_SWAP_BRG = 4,
+ BYTE_SWAP_BGR = 5,
+};
+
+/* Page 0, Register 0x19 */
+enum {
+ HPO_I = BIT(5),
+ VPO_I = BIT(4),
+ DEPO_I = BIT(3),
+ CRYS_EN = BIT(2),
+ GCLKFREQ = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2e */
+enum {
+ HFLIP = BIT(7),
+ VFLIP = BIT(6),
+ DEPO_O = BIT(5),
+ HPO_O = BIT(4),
+ VPO_O = BIT(3),
+ TE = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2b */
+enum {
+ SWAPS = GENMASK(7, 4),
+ VFMT = GENMASK(3, 0),
+};
+
+/* Page 0, Register 0x54 */
+enum {
+ COMP_BP = BIT(7),
+ DAC_EN_T = BIT(6),
+ HWO_HDMI_HI = GENMASK(5, 3),
+ HOO_HDMI_HI = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x57 */
+enum {
+ FLDSEN = BIT(7),
+ VWO_HDMI_HI = GENMASK(5, 3),
+ VOO_HDMI_HI = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x7e */
+enum {
+ HDMI_LVDS_SEL = BIT(7),
+ DE_GEN = BIT(6),
+ PWM_INDEX_HI = BIT(5),
+ USE_DE = BIT(4),
+ R_INT = GENMASK(3, 0),
+};
+
+/* Page 1, Register 0x07 */
+enum {
+ BPCKSEL = BIT(7),
+ DRI_CMFB_EN = BIT(6),
+ CEC_PUEN = BIT(5),
+ CEC_T = BIT(3),
+ CKINV = BIT(2),
+ CK_TVINV = BIT(1),
+ DRI_CKS2 = BIT(0),
+};
+
+/* Page 1, Register 0x08 */
+enum {
+ DACG = BIT(6),
+ DACKTST = BIT(5),
+ DEDGEB = BIT(4),
+ SYO = BIT(3),
+ DRI_IT_LVDS = GENMASK(2, 1),
+ DISPON = BIT(0),
+};
+
+/* Page 1, Register 0x0c */
+enum {
+ DRI_PLL_CP = GENMASK(7, 6),
+ DRI_PLL_DIVSEL = BIT(5),
+ DRI_PLL_N1_1 = BIT(4),
+ DRI_PLL_N1_0 = BIT(3),
+ DRI_PLL_N3_1 = BIT(2),
+ DRI_PLL_N3_0 = BIT(1),
+ DRI_PLL_CKTSTEN = BIT(0),
+};
+
+/* Page 1, Register 0x6b */
+enum {
+ VCO3CS = GENMASK(7, 6),
+ ICPGBK2_0 = GENMASK(5, 3),
+ DRI_VCO357SC = BIT(2),
+ PDPLL2 = BIT(1),
+ DRI_PD_SER = BIT(0),
+};
+
+/* Page 1, Register 0x6c */
+enum {
+ PLL2N11 = GENMASK(7, 4),
+ PLL2N5_4 = BIT(3),
+ PLL2N5_TOP = BIT(2),
+ DRI_PLL_PD = BIT(1),
+ PD_I2CM = BIT(0),
+};
+
+/* Page 3, Register 0x28 */
+enum {
+ DIFF_EN = GENMASK(7, 6),
+ CORREC_EN = GENMASK(5, 4),
+ VGACLK_BP = BIT(3),
+ HM_LV_SEL = BIT(2),
+ HD_VGA_SEL = BIT(1),
+};
+
+/* Page 3, Register 0x2a */
+enum {
+ LVDSCLK_BP = BIT(7),
+ HDTVCLK_BP = BIT(6),
+ HDMICLK_BP = BIT(5),
+ HDTV_BP = BIT(4),
+ HDMI_BP = BIT(3),
+ THRWL = GENMASK(2, 0),
+};
+
+/* Page 4, Register 0x52 */
+enum {
+ PGM_ARSTB = BIT(7),
+ MCU_ARSTB = BIT(6),
+ MCU_RETB = BIT(2),
+ RESETIB = BIT(1),
+ RESETDB = BIT(0),
+};
+
+struct ch7033_priv {
+ struct regmap *regmap;
+ struct drm_bridge *next_bridge;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+};
+
+#define conn_to_ch7033_priv(x) \
+ container_of(x, struct ch7033_priv, connector)
+#define bridge_to_ch7033_priv(x) \
+ container_of(x, struct ch7033_priv, bridge)
+
+
+static enum drm_connector_status ch7033_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+ return drm_bridge_detect(priv->next_bridge);
+}
+
+static const struct drm_connector_funcs ch7033_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ch7033_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ch7033_connector_get_modes(struct drm_connector *connector)
+{
+ struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_bridge_get_edid(priv->next_bridge, connector);
+ drm_connector_update_edid_property(connector, edid);
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ } else {
+ ret = drm_add_modes_noedid(connector, 1920, 1080);
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return ret;
+}
+
+static struct drm_encoder *ch7033_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+ return priv->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ch7033_connector_helper_funcs = {
+ .get_modes = ch7033_connector_get_modes,
+ .best_encoder = ch7033_connector_best_encoder,
+};
+
+static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
+{
+ struct ch7033_priv *priv = arg;
+
+ if (priv->bridge.dev)
+ drm_helper_hpd_irq_event(priv->connector.dev);
+}
+
+static int ch7033_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
+ if (priv->next_bridge->ops & DRM_BRIDGE_OP_DETECT) {
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+
+ if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_enable(priv->next_bridge, ch7033_hpd_event,
+ priv);
+ }
+
+ drm_connector_helper_add(connector,
+ &ch7033_connector_helper_funcs);
+ ret = drm_connector_init_with_ddc(bridge->dev, &priv->connector,
+ &ch7033_connector_funcs,
+ priv->next_bridge->type,
+ priv->next_bridge->ddc);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+}
+
+static void ch7033_bridge_detach(struct drm_bridge *bridge)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+ if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_bridge_hpd_disable(priv->next_bridge);
+ drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status ch7033_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->hdisplay >= 1920)
+ return MODE_BAD_HVALUE;
+ if (mode->vdisplay >= 1080)
+ return MODE_BAD_VVALUE;
+ return MODE_OK;
+}
+
+static void ch7033_bridge_disable(struct drm_bridge *bridge)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+ regmap_write(priv->regmap, 0x03, 0x04);
+ regmap_update_bits(priv->regmap, 0x52, RESETDB, 0x00);
+}
+
+static void ch7033_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+ regmap_write(priv->regmap, 0x03, 0x04);
+ regmap_update_bits(priv->regmap, 0x52, RESETDB, RESETDB);
+}
+
+static void ch7033_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+ int hbporch = mode->hsync_start - mode->hdisplay;
+ int hsynclen = mode->hsync_end - mode->hsync_start;
+ int vbporch = mode->vsync_start - mode->vdisplay;
+ int vsynclen = mode->vsync_end - mode->vsync_start;
+
+ /*
+ * Page 4
+ */
+ regmap_write(priv->regmap, 0x03, 0x04);
+
+ /* Turn everything off to set all the registers to their defaults. */
+ regmap_write(priv->regmap, 0x52, 0x00);
+ /* Bring I/O block up. */
+ regmap_write(priv->regmap, 0x52, RESETIB);
+
+ /*
+ * Page 0
+ */
+ regmap_write(priv->regmap, 0x03, 0x00);
+
+ /* Bring up parts we need from the power down. */
+ regmap_update_bits(priv->regmap, 0x07, DRI_PD | IO_PD, 0);
+ regmap_update_bits(priv->regmap, 0x08, DRI_PDDRI | PDDAC | PANEN, 0);
+ regmap_update_bits(priv->regmap, 0x09, DPD | GCKOFF |
+ HDMI_PD | VGA_PD, 0);
+ regmap_update_bits(priv->regmap, 0x0a, HD_DVIB, 0);
+
+ /* Horizontal input timing. */
+ regmap_write(priv->regmap, 0x0b, (mode->htotal >> 8) << 3 |
+ (mode->hdisplay >> 8));
+ regmap_write(priv->regmap, 0x0c, mode->hdisplay);
+ regmap_write(priv->regmap, 0x0d, mode->htotal);
+ regmap_write(priv->regmap, 0x0e, (hsynclen >> 8) << 3 |
+ (hbporch >> 8));
+ regmap_write(priv->regmap, 0x0f, hbporch);
+ regmap_write(priv->regmap, 0x10, hsynclen);
+
+ /* Vertical input timing. */
+ regmap_write(priv->regmap, 0x11, (mode->vtotal >> 8) << 3 |
+ (mode->vdisplay >> 8));
+ regmap_write(priv->regmap, 0x12, mode->vdisplay);
+ regmap_write(priv->regmap, 0x13, mode->vtotal);
+ regmap_write(priv->regmap, 0x14, ((vsynclen >> 8) << 3) |
+ (vbporch >> 8));
+ regmap_write(priv->regmap, 0x15, vbporch);
+ regmap_write(priv->regmap, 0x16, vsynclen);
+
+ /* Input color swap. */
+ regmap_update_bits(priv->regmap, 0x18, SWAP, BYTE_SWAP_BGR);
+
+ /* Input clock and sync polarity. */
+ regmap_update_bits(priv->regmap, 0x19, 0x1, mode->clock >> 16);
+ regmap_update_bits(priv->regmap, 0x19, HPO_I | VPO_I | GCLKFREQ,
+ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_I : 0 |
+ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_I : 0 |
+ mode->clock >> 16);
+ regmap_write(priv->regmap, 0x1a, mode->clock >> 8);
+ regmap_write(priv->regmap, 0x1b, mode->clock);
+
+ /* Horizontal output timing. */
+ regmap_write(priv->regmap, 0x1f, (mode->htotal >> 8) << 3 |
+ (mode->hdisplay >> 8));
+ regmap_write(priv->regmap, 0x20, mode->hdisplay);
+ regmap_write(priv->regmap, 0x21, mode->htotal);
+
+ /* Vertical output timing. */
+ regmap_write(priv->regmap, 0x25, (mode->vtotal >> 8) << 3 |
+ (mode->vdisplay >> 8));
+ regmap_write(priv->regmap, 0x26, mode->vdisplay);
+ regmap_write(priv->regmap, 0x27, mode->vtotal);
+
+ /* VGA channel bypass */
+ regmap_update_bits(priv->regmap, 0x2b, VFMT, 9);
+
+ /* Output sync polarity. */
+ regmap_update_bits(priv->regmap, 0x2e, HPO_O | VPO_O,
+ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_O : 0 |
+ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_O : 0);
+
+ /* HDMI horizontal output timing. */
+ regmap_update_bits(priv->regmap, 0x54, HWO_HDMI_HI | HOO_HDMI_HI,
+ (hsynclen >> 8) << 3 |
+ (hbporch >> 8));
+ regmap_write(priv->regmap, 0x55, hbporch);
+ regmap_write(priv->regmap, 0x56, hsynclen);
+
+ /* HDMI vertical output timing. */
+ regmap_update_bits(priv->regmap, 0x57, VWO_HDMI_HI | VOO_HDMI_HI,
+ (vsynclen >> 8) << 3 |
+ (vbporch >> 8));
+ regmap_write(priv->regmap, 0x58, vbporch);
+ regmap_write(priv->regmap, 0x59, vsynclen);
+
+ /* Pick HDMI, not LVDS. */
+ regmap_update_bits(priv->regmap, 0x7e, HDMI_LVDS_SEL, HDMI_LVDS_SEL);
+
+ /*
+ * Page 1
+ */
+ regmap_write(priv->regmap, 0x03, 0x01);
+
+ /* No idea what these do, but VGA is wobbly and blinky without them. */
+ regmap_update_bits(priv->regmap, 0x07, CKINV, CKINV);
+ regmap_update_bits(priv->regmap, 0x08, DISPON, DISPON);
+
+ /* DRI PLL */
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_DIVSEL, DRI_PLL_DIVSEL);
+ if (mode->clock <= 40000) {
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+ DRI_PLL_N1_0 |
+ DRI_PLL_N3_1 |
+ DRI_PLL_N3_0,
+ 0);
+ } else if (mode->clock < 80000) {
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+ DRI_PLL_N1_0 |
+ DRI_PLL_N3_1 |
+ DRI_PLL_N3_0,
+ DRI_PLL_N3_0 |
+ DRI_PLL_N1_0);
+ } else {
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+ DRI_PLL_N1_0 |
+ DRI_PLL_N3_1 |
+ DRI_PLL_N3_0,
+ DRI_PLL_N3_1 |
+ DRI_PLL_N1_1);
+ }
+
+ /* This seems to be color calibration for VGA. */
+ regmap_write(priv->regmap, 0x64, 0x29); /* LSB Blue */
+ regmap_write(priv->regmap, 0x65, 0x29); /* LSB Green */
+ regmap_write(priv->regmap, 0x66, 0x29); /* LSB Red */
+ regmap_write(priv->regmap, 0x67, 0x00); /* MSB Blue */
+ regmap_write(priv->regmap, 0x68, 0x00); /* MSB Green */
+ regmap_write(priv->regmap, 0x69, 0x00); /* MSB Red */
+
+ regmap_update_bits(priv->regmap, 0x6b, DRI_PD_SER, 0x00);
+ regmap_update_bits(priv->regmap, 0x6c, DRI_PLL_PD, 0x00);
+
+ /*
+ * Page 3
+ */
+ regmap_write(priv->regmap, 0x03, 0x03);
+
+ /* More bypasses and apparently another HDMI/LVDS selector. */
+ regmap_update_bits(priv->regmap, 0x28, VGACLK_BP | HM_LV_SEL,
+ VGACLK_BP | HM_LV_SEL);
+ regmap_update_bits(priv->regmap, 0x2a, HDMICLK_BP | HDMI_BP,
+ HDMICLK_BP | HDMI_BP);
+
+ /*
+ * Page 4
+ */
+ regmap_write(priv->regmap, 0x03, 0x04);
+
+ /* Output clock. */
+ regmap_write(priv->regmap, 0x10, mode->clock >> 16);
+ regmap_write(priv->regmap, 0x11, mode->clock >> 8);
+ regmap_write(priv->regmap, 0x12, mode->clock);
+}
+
+static const struct drm_bridge_funcs ch7033_bridge_funcs = {
+ .attach = ch7033_bridge_attach,
+ .detach = ch7033_bridge_detach,
+ .mode_valid = ch7033_bridge_mode_valid,
+ .disable = ch7033_bridge_disable,
+ .enable = ch7033_bridge_enable,
+ .mode_set = ch7033_bridge_mode_set,
+};
+
+static const struct regmap_config ch7033_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x7f,
+};
+
+static int ch7033_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ch7033_priv *priv;
+ unsigned int val;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, NULL,
+ &priv->next_bridge);
+ if (ret)
+ return ret;
+
+ priv->regmap = devm_regmap_init_i2c(client, &ch7033_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(&client->dev, "regmap init failed\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = regmap_read(priv->regmap, 0x00, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading the model id: %d\n", ret);
+ return ret;
+ }
+ if ((val & 0xf7) != 0x56) {
+ dev_err(&client->dev, "the device is not a ch7033\n");
+ return -ENODEV;
+ }
+
+ regmap_write(priv->regmap, 0x03, 0x04);
+ ret = regmap_read(priv->regmap, 0x51, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading the model id: %d\n", ret);
+ return ret;
+ }
+ if ((val & 0x0f) != 3) {
+ dev_err(&client->dev, "unknown revision %u\n", val);
+ return -ENODEV;
+ }
+
+ INIT_LIST_HEAD(&priv->bridge.list);
+ priv->bridge.funcs = &ch7033_bridge_funcs;
+ priv->bridge.of_node = dev->of_node;
+ drm_bridge_add(&priv->bridge);
+
+ dev_info(dev, "Chrontel CH7033 Video Encoder\n");
+ return 0;
+}
+
+static int ch7033_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ch7033_priv *priv = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&priv->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ch7033_dt_ids[] = {
+ { .compatible = "chrontel,ch7033", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ch7033_dt_ids);
+
+static const struct i2c_device_id ch7033_ids[] = {
+ { "ch7033", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ch7033_ids);
+
+static struct i2c_driver ch7033_driver = {
+ .probe = ch7033_probe,
+ .remove = ch7033_remove,
+ .driver = {
+ .name = "ch7033",
+ .of_match_table = of_match_ptr(ch7033_dt_ids),
+ },
+ .id_table = ch7033_ids,
+};
+
+module_i2c_driver(ch7033_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Chrontel CH7033 Video Encoder Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
new file mode 100644
index 000000000000..b14d725bf609
--- /dev/null
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX8 NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2020 Purism SPC
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sys_soc.h>
+#include <linux/time64.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/mipi_display.h>
+
+#include "nwl-dsi.h"
+
+#define DRV_NAME "nwl-dsi"
+
+/* i.MX8 NWL quirks */
+/* i.MX8MQ errata E11418 */
+#define E11418_HS_MODE_QUIRK BIT(0)
+
+#define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500)
+
+enum transfer_direction {
+ DSI_PACKET_SEND,
+ DSI_PACKET_RECEIVE,
+};
+
+#define NWL_DSI_ENDPOINT_LCDIF 0
+#define NWL_DSI_ENDPOINT_DCSS 1
+
+struct nwl_dsi_plat_clk_config {
+ const char *id;
+ struct clk *clk;
+ bool present;
+};
+
+struct nwl_dsi_transfer {
+ const struct mipi_dsi_msg *msg;
+ struct mipi_dsi_packet packet;
+ struct completion completed;
+
+ int status; /* status of transmission */
+ enum transfer_direction direction;
+ bool need_bta;
+ u8 cmd;
+ u16 rx_word_count;
+ size_t tx_len; /* in bytes */
+ size_t rx_len; /* in bytes */
+};
+
+struct nwl_dsi {
+ struct drm_bridge bridge;
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge *panel_bridge;
+ struct device *dev;
+ struct phy *phy;
+ union phy_configure_opts phy_cfg;
+ unsigned int quirks;
+
+ struct regmap *regmap;
+ int irq;
+ /*
+ * The DSI host controller needs this reset sequence according to NWL:
+ * 1. Deassert pclk reset to get access to DSI regs
+ * 2. Configure DSI Host and DPHY and enable DPHY
+ * 3. Deassert ESC and BYTE resets to allow host TX operations)
+ * 4. Send DSI cmds to configure peripheral (handled by panel drv)
+ * 5. Deassert DPI reset so DPI receives pixels and starts sending
+ * DSI data
+ *
+ * TODO: Since panel_bridges do their DSI setup in enable we
+ * currently have 4. and 5. swapped.
+ */
+ struct reset_control *rst_byte;
+ struct reset_control *rst_esc;
+ struct reset_control *rst_dpi;
+ struct reset_control *rst_pclk;
+ struct mux_control *mux;
+
+ /* DSI clocks */
+ struct clk *phy_ref_clk;
+ struct clk *rx_esc_clk;
+ struct clk *tx_esc_clk;
+ struct clk *core_clk;
+ /*
+ * hardware bug: the i.MX8MQ needs this clock on during reset
+ * even when not using LCDIF.
+ */
+ struct clk *lcdif_clk;
+
+ /* dsi lanes */
+ u32 lanes;
+ enum mipi_dsi_pixel_format format;
+ struct drm_display_mode mode;
+ unsigned long dsi_mode_flags;
+ int error;
+
+ struct nwl_dsi_transfer *xfer;
+};
+
+static const struct regmap_config nwl_dsi_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = NWL_DSI_IRQ_MASK2,
+ .name = DRV_NAME,
+};
+
+static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct nwl_dsi, bridge);
+}
+
+static int nwl_dsi_clear_error(struct nwl_dsi *dsi)
+{
+ int ret = dsi->error;
+
+ dsi->error = 0;
+ return ret;
+}
+
+static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val)
+{
+ int ret;
+
+ if (dsi->error)
+ return;
+
+ ret = regmap_write(dsi->regmap, reg, val);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev,
+ "Failed to write NWL DSI reg 0x%x: %d\n", reg,
+ ret);
+ dsi->error = ret;
+ }
+}
+
+static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg)
+{
+ unsigned int val;
+ int ret;
+
+ if (dsi->error)
+ return 0;
+
+ ret = regmap_read(dsi->regmap, reg, &val);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n",
+ reg, ret);
+ dsi->error = ret;
+ }
+ return val;
+}
+
+static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB565:
+ return NWL_DSI_PIXEL_FORMAT_16;
+ case MIPI_DSI_FMT_RGB666:
+ return NWL_DSI_PIXEL_FORMAT_18L;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return NWL_DSI_PIXEL_FORMAT_18;
+ case MIPI_DSI_FMT_RGB888:
+ return NWL_DSI_PIXEL_FORMAT_24;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * ps2bc - Picoseconds to byte clock cycles
+ */
+static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
+{
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+ return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
+ dsi->lanes * 8 * NSEC_PER_SEC);
+}
+
+/*
+ * ui2bc - UI time periods to byte clock cycles
+ */
+static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+{
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+ return DIV64_U64_ROUND_UP(ui * dsi->lanes,
+ dsi->mode.clock * 1000 * bpp);
+}
+
+/*
+ * us2bc - micro seconds to lp clock cycles
+ */
+static u32 us2lp(u32 lp_clk_rate, unsigned long us)
+{
+ return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC);
+}
+
+static int nwl_dsi_config_host(struct nwl_dsi *dsi)
+{
+ u32 cycles;
+ struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy;
+
+ if (dsi->lanes < 1 || dsi->lanes > 4)
+ return -EINVAL;
+
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1);
+
+ if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+ nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01);
+ } else {
+ nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00);
+ }
+
+ /* values in byte clock cycles */
+ cycles = ui2bc(dsi, cfg->clk_pre);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
+ cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
+ cycles += ui2bc(dsi, cfg->clk_pre);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
+ cycles = ps2bc(dsi, cfg->hs_exit);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles);
+
+ nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00);
+ /* In LP clock cycles */
+ cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles);
+
+ return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
+{
+ u32 mode;
+ int color_format;
+ bool burst_mode;
+ int hfront_porch, hback_porch, vfront_porch, vback_porch;
+ int hsync_len, vsync_len;
+
+ hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay;
+ hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start;
+ hback_porch = dsi->mode.htotal - dsi->mode.hsync_end;
+
+ vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay;
+ vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start;
+ vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end;
+
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock);
+
+ color_format = nwl_dsi_get_dpi_pixel_format(dsi->format);
+ if (color_format < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n",
+ dsi->format);
+ return color_format;
+ }
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format);
+
+ nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
+ /*
+ * Adjusting input polarity based on the video mode results in
+ * a black screen so always pick active low:
+ */
+ nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
+ NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
+ nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
+ NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
+
+ burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
+ !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE);
+
+ if (burst_mode) {
+ nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE);
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256);
+ } else {
+ mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ?
+ NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES :
+ NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS);
+ nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode);
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL,
+ dsi->mode.hdisplay);
+ }
+
+ nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch);
+ nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch);
+ nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len);
+
+ nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
+ nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1);
+ nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0);
+ nwl_dsi_write(dsi, NWL_DSI_VC, 0x0);
+
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay);
+ nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1);
+ nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch);
+ nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch);
+
+ return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi)
+{
+ u32 irq_enable;
+
+ nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff);
+ nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7);
+
+ irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK |
+ NWL_DSI_RX_PKT_HDR_RCVD_MASK |
+ NWL_DSI_TX_FIFO_OVFLW_MASK |
+ NWL_DSI_HS_TX_TIMEOUT_MASK);
+
+ nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable);
+
+ return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host,
+ struct mipi_dsi_device *device)
+{
+ struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+ struct device *dev = dsi->dev;
+
+ DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes,
+ device->format, device->mode_flags);
+
+ if (device->lanes < 1 || device->lanes > 4)
+ return -EINVAL;
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->dsi_mode_flags = device->mode_flags;
+
+ return 0;
+}
+
+static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
+{
+ struct device *dev = dsi->dev;
+ struct nwl_dsi_transfer *xfer = dsi->xfer;
+ int err;
+ u8 *payload = xfer->msg->rx_buf;
+ u32 val;
+ u16 word_count;
+ u8 channel;
+ u8 data_type;
+
+ xfer->status = 0;
+
+ if (xfer->rx_word_count == 0) {
+ if (!(status & NWL_DSI_RX_PKT_HDR_RCVD))
+ return false;
+ /* Get the RX header and parse it */
+ val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER);
+ err = nwl_dsi_clear_error(dsi);
+ if (err)
+ xfer->status = err;
+ word_count = NWL_DSI_WC(val);
+ channel = NWL_DSI_RX_VC(val);
+ data_type = NWL_DSI_RX_DT(val);
+
+ if (channel != xfer->msg->channel) {
+ DRM_DEV_ERROR(dev,
+ "[%02X] Channel mismatch (%u != %u)\n",
+ xfer->cmd, channel, xfer->msg->channel);
+ xfer->status = -EINVAL;
+ return true;
+ }
+
+ switch (data_type) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ fallthrough;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ if (xfer->msg->rx_len > 1) {
+ /* read second byte */
+ payload[1] = word_count >> 8;
+ ++xfer->rx_len;
+ }
+ fallthrough;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ fallthrough;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ if (xfer->msg->rx_len > 0) {
+ /* read first byte */
+ payload[0] = word_count & 0xff;
+ ++xfer->rx_len;
+ }
+ xfer->status = xfer->rx_len;
+ return true;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ word_count &= 0xff;
+ DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n",
+ xfer->cmd, word_count);
+ xfer->status = -EPROTO;
+ return true;
+ }
+
+ if (word_count > xfer->msg->rx_len) {
+ DRM_DEV_ERROR(dev,
+ "[%02X] Receive buffer too small: %zu (< %u)\n",
+ xfer->cmd, xfer->msg->rx_len, word_count);
+ xfer->status = -EINVAL;
+ return true;
+ }
+
+ xfer->rx_word_count = word_count;
+ } else {
+ /* Set word_count from previous header read */
+ word_count = xfer->rx_word_count;
+ }
+
+ /* If RX payload is not yet received, wait for it */
+ if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD))
+ return false;
+
+ /* Read the RX payload */
+ while (word_count >= 4) {
+ val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+ payload[0] = (val >> 0) & 0xff;
+ payload[1] = (val >> 8) & 0xff;
+ payload[2] = (val >> 16) & 0xff;
+ payload[3] = (val >> 24) & 0xff;
+ payload += 4;
+ xfer->rx_len += 4;
+ word_count -= 4;
+ }
+
+ if (word_count > 0) {
+ val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+ switch (word_count) {
+ case 3:
+ payload[2] = (val >> 16) & 0xff;
+ ++xfer->rx_len;
+ fallthrough;
+ case 2:
+ payload[1] = (val >> 8) & 0xff;
+ ++xfer->rx_len;
+ fallthrough;
+ case 1:
+ payload[0] = (val >> 0) & 0xff;
+ ++xfer->rx_len;
+ break;
+ }
+ }
+
+ xfer->status = xfer->rx_len;
+ err = nwl_dsi_clear_error(dsi);
+ if (err)
+ xfer->status = err;
+
+ return true;
+}
+
+static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status)
+{
+ struct nwl_dsi_transfer *xfer = dsi->xfer;
+ bool end_packet = false;
+
+ if (!xfer)
+ return;
+
+ if (xfer->direction == DSI_PACKET_SEND &&
+ status & NWL_DSI_TX_PKT_DONE) {
+ xfer->status = xfer->tx_len;
+ end_packet = true;
+ } else if (status & NWL_DSI_DPHY_DIRECTION &&
+ ((status & (NWL_DSI_RX_PKT_HDR_RCVD |
+ NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) {
+ end_packet = nwl_dsi_read_packet(dsi, status);
+ }
+
+ if (end_packet)
+ complete(&xfer->completed);
+}
+
+static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi)
+{
+ struct nwl_dsi_transfer *xfer = dsi->xfer;
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+ const u8 *payload;
+ size_t length;
+ u16 word_count;
+ u8 hs_mode;
+ u32 val;
+ u32 hs_workaround = 0;
+
+ /* Send the payload, if any */
+ length = pkt->payload_length;
+ payload = pkt->payload;
+
+ while (length >= 4) {
+ val = *(u32 *)payload;
+ hs_workaround |= !(val & 0xFFFF00);
+ nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+ payload += 4;
+ length -= 4;
+ }
+ /* Send the rest of the payload */
+ val = 0;
+ switch (length) {
+ case 3:
+ val |= payload[2] << 16;
+ fallthrough;
+ case 2:
+ val |= payload[1] << 8;
+ hs_workaround |= !(val & 0xFFFF00);
+ fallthrough;
+ case 1:
+ val |= payload[0];
+ nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+ break;
+ }
+ xfer->tx_len = pkt->payload_length;
+
+ /*
+ * Send the header
+ * header[0] = Virtual Channel + Data Type
+ * header[1] = Word Count LSB (LP) or first param (SP)
+ * header[2] = Word Count MSB (LP) or second param (SP)
+ */
+ word_count = pkt->header[1] | (pkt->header[2] << 8);
+ if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) {
+ DRM_DEV_DEBUG_DRIVER(dsi->dev,
+ "Using hs mode workaround for cmd 0x%x\n",
+ xfer->cmd);
+ hs_mode = 1;
+ } else {
+ hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1;
+ }
+ val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) |
+ NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) |
+ NWL_DSI_BTA_TX(xfer->need_bta);
+ nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val);
+
+ /* Send packet command */
+ nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1);
+}
+
+static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+ struct nwl_dsi_transfer xfer;
+ ssize_t ret = 0;
+
+ /* Create packet to be sent */
+ dsi->xfer = &xfer;
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0) {
+ dsi->xfer = NULL;
+ return ret;
+ }
+
+ if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ||
+ msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ||
+ msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ||
+ msg->type & MIPI_DSI_DCS_READ) &&
+ msg->rx_len > 0 && msg->rx_buf)
+ xfer.direction = DSI_PACKET_RECEIVE;
+ else
+ xfer.direction = DSI_PACKET_SEND;
+
+ xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE);
+ xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0;
+ xfer.msg = msg;
+ xfer.status = -ETIMEDOUT;
+ xfer.rx_word_count = 0;
+ xfer.rx_len = 0;
+ xfer.cmd = 0x00;
+ if (msg->tx_len > 0)
+ xfer.cmd = ((u8 *)(msg->tx_buf))[0];
+ init_completion(&xfer.completed);
+
+ ret = clk_prepare_enable(dsi->rx_esc_clk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n",
+ ret);
+ return ret;
+ }
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n",
+ clk_get_rate(dsi->rx_esc_clk));
+
+ /* Initiate the DSI packet transmision */
+ nwl_dsi_begin_transmission(dsi);
+
+ if (!wait_for_completion_timeout(&xfer.completed,
+ NWL_DSI_MIPI_FIFO_TIMEOUT)) {
+ DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n",
+ xfer.cmd);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = xfer.status;
+ }
+
+ clk_disable_unprepare(dsi->rx_esc_clk);
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops nwl_dsi_host_ops = {
+ .attach = nwl_dsi_host_attach,
+ .transfer = nwl_dsi_host_transfer,
+};
+
+static irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
+{
+ u32 irq_status;
+ struct nwl_dsi *dsi = data;
+
+ irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS);
+
+ if (irq_status & NWL_DSI_TX_FIFO_OVFLW)
+ DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n");
+
+ if (irq_status & NWL_DSI_HS_TX_TIMEOUT)
+ DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n");
+
+ if (irq_status & NWL_DSI_TX_PKT_DONE ||
+ irq_status & NWL_DSI_RX_PKT_HDR_RCVD ||
+ irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)
+ nwl_dsi_finish_transmission(dsi, irq_status);
+
+ return IRQ_HANDLED;
+}
+
+static int nwl_dsi_enable(struct nwl_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
+ int ret;
+
+ if (!dsi->lanes) {
+ DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes);
+ return -EINVAL;
+ }
+
+ ret = phy_init(dsi->phy);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret);
+ return ret;
+ }
+
+ ret = phy_configure(dsi->phy, phy_cfg);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
+ goto uninit_phy;
+ }
+
+ ret = clk_prepare_enable(dsi->tx_esc_clk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n",
+ ret);
+ goto uninit_phy;
+ }
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n",
+ clk_get_rate(dsi->tx_esc_clk));
+
+ ret = nwl_dsi_config_host(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret);
+ goto disable_clock;
+ }
+
+ ret = nwl_dsi_config_dpi(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret);
+ goto disable_clock;
+ }
+
+ ret = phy_power_on(dsi->phy);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret);
+ goto disable_clock;
+ }
+
+ ret = nwl_dsi_init_interrupts(dsi);
+ if (ret < 0)
+ goto power_off_phy;
+
+ return ret;
+
+power_off_phy:
+ phy_power_off(dsi->phy);
+disable_clock:
+ clk_disable_unprepare(dsi->tx_esc_clk);
+uninit_phy:
+ phy_exit(dsi->phy);
+
+ return ret;
+}
+
+static int nwl_dsi_disable(struct nwl_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n");
+
+ phy_power_off(dsi->phy);
+ phy_exit(dsi->phy);
+
+ /* Disabling the clock before the phy breaks enabling dsi again */
+ clk_disable_unprepare(dsi->tx_esc_clk);
+
+ return 0;
+}
+
+static void nwl_dsi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ nwl_dsi_disable(dsi);
+
+ ret = reset_control_assert(dsi->rst_dpi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret);
+ return;
+ }
+ ret = reset_control_assert(dsi->rst_byte);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret);
+ return;
+ }
+ ret = reset_control_assert(dsi->rst_esc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret);
+ return;
+ }
+ ret = reset_control_assert(dsi->rst_pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret);
+ return;
+ }
+
+ clk_disable_unprepare(dsi->core_clk);
+ clk_disable_unprepare(dsi->lcdif_clk);
+
+ pm_runtime_put(dsi->dev);
+}
+
+static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
+ const struct drm_display_mode *mode,
+ union phy_configure_opts *phy_opts)
+{
+ unsigned long rate;
+ int ret;
+
+ if (dsi->lanes < 1 || dsi->lanes > 4)
+ return -EINVAL;
+
+ /*
+ * So far the DPHY spec minimal timings work for both mixel
+ * dphy and nwl dsi host
+ */
+ ret = phy_mipi_dphy_get_default_config(mode->clock * 1000,
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes,
+ &phy_opts->mipi_dphy);
+ if (ret < 0)
+ return ret;
+
+ rate = clk_get_rate(dsi->tx_esc_clk);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate);
+ phy_opts->mipi_dphy.lp_clk_rate = rate;
+
+ return 0;
+}
+
+static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* At least LCDIF + NWL needs active high sync */
+ adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+
+ return true;
+}
+
+static enum drm_mode_status
+nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+ if (mode->clock * bpp > 15000000 * dsi->lanes)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock * bpp < 80000 * dsi->lanes)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static void
+nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ struct device *dev = dsi->dev;
+ union phy_configure_opts new_cfg;
+ unsigned long phy_ref_rate;
+ int ret;
+
+ ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg);
+ if (ret < 0)
+ return;
+
+ /*
+ * If hs clock is unchanged, we're all good - all parameters are
+ * derived from it atm.
+ */
+ if (new_cfg.mipi_dphy.hs_clk_rate == dsi->phy_cfg.mipi_dphy.hs_clk_rate)
+ return;
+
+ phy_ref_rate = clk_get_rate(dsi->phy_ref_clk);
+ DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate);
+ /* Save the new desired phy config */
+ memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg));
+
+ memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
+ drm_mode_debug_printmodeline(adjusted_mode);
+}
+
+static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ pm_runtime_get_sync(dsi->dev);
+
+ if (clk_prepare_enable(dsi->lcdif_clk) < 0)
+ return;
+ if (clk_prepare_enable(dsi->core_clk) < 0)
+ return;
+
+ /* Step 1 from DSI reset-out instructions */
+ ret = reset_control_deassert(dsi->rst_pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert PCLK: %d\n", ret);
+ return;
+ }
+
+ /* Step 2 from DSI reset-out instructions */
+ nwl_dsi_enable(dsi);
+
+ /* Step 3 from DSI reset-out instructions */
+ ret = reset_control_deassert(dsi->rst_esc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert ESC: %d\n", ret);
+ return;
+ }
+ ret = reset_control_deassert(dsi->rst_byte);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert BYTE: %d\n", ret);
+ return;
+ }
+}
+
+static void nwl_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ /* Step 5 from DSI reset-out instructions */
+ ret = reset_control_deassert(dsi->rst_dpi);
+ if (ret < 0)
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret);
+}
+
+static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ struct drm_bridge *panel_bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
+ &panel_bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ panel_bridge = drm_panel_bridge_add(panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+ }
+ dsi->panel_bridge = panel_bridge;
+
+ if (!dsi->panel_bridge)
+ return -EPROBE_DEFER;
+
+ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ flags);
+}
+
+static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
+{ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+
+ drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0);
+}
+
+static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
+ .pre_enable = nwl_dsi_bridge_pre_enable,
+ .enable = nwl_dsi_bridge_enable,
+ .disable = nwl_dsi_bridge_disable,
+ .mode_fixup = nwl_dsi_bridge_mode_fixup,
+ .mode_set = nwl_dsi_bridge_mode_set,
+ .mode_valid = nwl_dsi_bridge_mode_valid,
+ .attach = nwl_dsi_bridge_attach,
+ .detach = nwl_dsi_bridge_detach,
+};
+
+static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
+{
+ struct platform_device *pdev = to_platform_device(dsi->dev);
+ struct clk *clk;
+ void __iomem *base;
+ int ret;
+
+ dsi->phy = devm_phy_get(dsi->dev, "dphy");
+ if (IS_ERR(dsi->phy)) {
+ ret = PTR_ERR(dsi->phy);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret);
+ return ret;
+ }
+
+ clk = devm_clk_get(dsi->dev, "lcdif");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->lcdif_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "core");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->core_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "phy_ref");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->phy_ref_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "rx_esc");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->rx_esc_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "tx_esc");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->tx_esc_clk = clk;
+
+ dsi->mux = devm_mux_control_get(dsi->dev, NULL);
+ if (IS_ERR(dsi->mux)) {
+ ret = PTR_ERR(dsi->mux);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret);
+ return ret;
+ }
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ dsi->regmap =
+ devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config);
+ if (IS_ERR(dsi->regmap)) {
+ ret = PTR_ERR(dsi->regmap);
+ DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n",
+ ret);
+ return ret;
+ }
+
+ dsi->irq = platform_get_irq(pdev, 0);
+ if (dsi->irq < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n",
+ dsi->irq);
+ return dsi->irq;
+ }
+
+ dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk");
+ if (IS_ERR(dsi->rst_pclk)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n",
+ PTR_ERR(dsi->rst_pclk));
+ return PTR_ERR(dsi->rst_pclk);
+ }
+ dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte");
+ if (IS_ERR(dsi->rst_byte)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n",
+ PTR_ERR(dsi->rst_byte));
+ return PTR_ERR(dsi->rst_byte);
+ }
+ dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc");
+ if (IS_ERR(dsi->rst_esc)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n",
+ PTR_ERR(dsi->rst_esc));
+ return PTR_ERR(dsi->rst_esc);
+ }
+ dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi");
+ if (IS_ERR(dsi->rst_dpi)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n",
+ PTR_ERR(dsi->rst_dpi));
+ return PTR_ERR(dsi->rst_dpi);
+ }
+ return 0;
+}
+
+static int nwl_dsi_select_input(struct nwl_dsi *dsi)
+{
+ struct device_node *remote;
+ u32 use_dcss = 1;
+ int ret;
+
+ remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+ NWL_DSI_ENDPOINT_LCDIF);
+ if (remote) {
+ use_dcss = 0;
+ } else {
+ remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+ NWL_DSI_ENDPOINT_DCSS);
+ if (!remote) {
+ DRM_DEV_ERROR(dsi->dev,
+ "No valid input endpoint found\n");
+ return -EINVAL;
+ }
+ }
+
+ DRM_DEV_INFO(dsi->dev, "Using %s as input source\n",
+ (use_dcss) ? "DCSS" : "LCDIF");
+ ret = mux_control_try_select(dsi->mux, use_dcss);
+ if (ret < 0)
+ DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret);
+
+ of_node_put(remote);
+ return ret;
+}
+
+static int nwl_dsi_deselect_input(struct nwl_dsi *dsi)
+{
+ int ret;
+
+ ret = mux_control_deselect(dsi->mux);
+ if (ret < 0)
+ DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret);
+
+ return ret;
+}
+
+static const struct drm_bridge_timings nwl_dsi_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
+};
+
+static const struct of_device_id nwl_dsi_dt_ids[] = {
+ { .compatible = "fsl,imx8mq-nwl-dsi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids);
+
+static const struct soc_device_attribute nwl_dsi_quirks_match[] = {
+ { .soc_id = "i.MX8MQ", .revision = "2.0",
+ .data = (void *)E11418_HS_MODE_QUIRK },
+ { /* sentinel. */ },
+};
+
+static int nwl_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct soc_device_attribute *attr;
+ struct nwl_dsi *dsi;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dev = dev;
+
+ ret = nwl_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0,
+ dev_name(dev), dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq,
+ ret);
+ return ret;
+ }
+
+ dsi->dsi_host.ops = &nwl_dsi_host_ops;
+ dsi->dsi_host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
+ return ret;
+ }
+
+ attr = soc_device_match(nwl_dsi_quirks_match);
+ if (attr)
+ dsi->quirks = (uintptr_t)attr->data;
+
+ dsi->bridge.driver_private = dsi;
+ dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
+ dsi->bridge.of_node = dev->of_node;
+ dsi->bridge.timings = &nwl_dsi_timings;
+
+ dev_set_drvdata(dev, dsi);
+ pm_runtime_enable(dev);
+
+ ret = nwl_dsi_select_input(dsi);
+ if (ret < 0) {
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ return ret;
+ }
+
+ drm_bridge_add(&dsi->bridge);
+ return 0;
+}
+
+static int nwl_dsi_remove(struct platform_device *pdev)
+{
+ struct nwl_dsi *dsi = platform_get_drvdata(pdev);
+
+ nwl_dsi_deselect_input(dsi);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ drm_bridge_remove(&dsi->bridge);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver nwl_dsi_driver = {
+ .probe = nwl_dsi_probe,
+ .remove = nwl_dsi_remove,
+ .driver = {
+ .of_match_table = nwl_dsi_dt_ids,
+ .name = DRV_NAME,
+ },
+};
+
+module_platform_driver(nwl_dsi_driver);
+
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_AUTHOR("Purism SPC");
+MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver");
+MODULE_LICENSE("GPL"); /* GPLv2 or later */
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.h b/drivers/gpu/drm/bridge/nwl-dsi.h
new file mode 100644
index 000000000000..a247a8a11c7c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/nwl-dsi.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2019 Purism SPC
+ */
+#ifndef __NWL_DSI_H__
+#define __NWL_DSI_H__
+
+/* DSI HOST registers */
+#define NWL_DSI_CFG_NUM_LANES 0x0
+#define NWL_DSI_CFG_NONCONTINUOUS_CLK 0x4
+#define NWL_DSI_CFG_T_PRE 0x8
+#define NWL_DSI_CFG_T_POST 0xc
+#define NWL_DSI_CFG_TX_GAP 0x10
+#define NWL_DSI_CFG_AUTOINSERT_EOTP 0x14
+#define NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP 0x18
+#define NWL_DSI_CFG_HTX_TO_COUNT 0x1c
+#define NWL_DSI_CFG_LRX_H_TO_COUNT 0x20
+#define NWL_DSI_CFG_BTA_H_TO_COUNT 0x24
+#define NWL_DSI_CFG_TWAKEUP 0x28
+#define NWL_DSI_CFG_STATUS_OUT 0x2c
+#define NWL_DSI_RX_ERROR_STATUS 0x30
+
+/* DSI DPI registers */
+#define NWL_DSI_PIXEL_PAYLOAD_SIZE 0x200
+#define NWL_DSI_PIXEL_FIFO_SEND_LEVEL 0x204
+#define NWL_DSI_INTERFACE_COLOR_CODING 0x208
+#define NWL_DSI_PIXEL_FORMAT 0x20c
+#define NWL_DSI_VSYNC_POLARITY 0x210
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW 0
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+
+#define NWL_DSI_HSYNC_POLARITY 0x214
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW 0
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+
+#define NWL_DSI_VIDEO_MODE 0x218
+#define NWL_DSI_HFP 0x21c
+#define NWL_DSI_HBP 0x220
+#define NWL_DSI_HSA 0x224
+#define NWL_DSI_ENABLE_MULT_PKTS 0x228
+#define NWL_DSI_VBP 0x22c
+#define NWL_DSI_VFP 0x230
+#define NWL_DSI_BLLP_MODE 0x234
+#define NWL_DSI_USE_NULL_PKT_BLLP 0x238
+#define NWL_DSI_VACTIVE 0x23c
+#define NWL_DSI_VC 0x240
+
+/* DSI APB PKT control */
+#define NWL_DSI_TX_PAYLOAD 0x280
+#define NWL_DSI_PKT_CONTROL 0x284
+#define NWL_DSI_SEND_PACKET 0x288
+#define NWL_DSI_PKT_STATUS 0x28c
+#define NWL_DSI_PKT_FIFO_WR_LEVEL 0x290
+#define NWL_DSI_PKT_FIFO_RD_LEVEL 0x294
+#define NWL_DSI_RX_PAYLOAD 0x298
+#define NWL_DSI_RX_PKT_HEADER 0x29c
+
+/* DSI IRQ handling */
+#define NWL_DSI_IRQ_STATUS 0x2a0
+#define NWL_DSI_SM_NOT_IDLE BIT(0)
+#define NWL_DSI_TX_PKT_DONE BIT(1)
+#define NWL_DSI_DPHY_DIRECTION BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD BIT(8)
+#define NWL_DSI_BTA_TIMEOUT BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT BIT(31)
+
+#define NWL_DSI_IRQ_STATUS2 0x2a4
+#define NWL_DSI_SINGLE_BIT_ECC_ERR BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR BIT(1)
+#define NWL_DSI_CRC_ERR BIT(2)
+
+#define NWL_DSI_IRQ_MASK 0x2a8
+#define NWL_DSI_SM_NOT_IDLE_MASK BIT(0)
+#define NWL_DSI_TX_PKT_DONE_MASK BIT(1)
+#define NWL_DSI_DPHY_DIRECTION_MASK BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW_MASK BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW_MASK BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW_MASK BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW_MASK BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD_MASK BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD_MASK BIT(8)
+#define NWL_DSI_BTA_TIMEOUT_MASK BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT_MASK BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT_MASK BIT(31)
+
+#define NWL_DSI_IRQ_MASK2 0x2ac
+#define NWL_DSI_SINGLE_BIT_ECC_ERR_MASK BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR_MASK BIT(1)
+#define NWL_DSI_CRC_ERR_MASK BIT(2)
+
+/*
+ * PKT_CONTROL format:
+ * [15: 0] - word count
+ * [17:16] - virtual channel
+ * [23:18] - data type
+ * [24] - LP or HS select (0 - LP, 1 - HS)
+ * [25] - perform BTA after packet is sent
+ * [26] - perform BTA only, no packet tx
+ */
+#define NWL_DSI_WC(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define NWL_DSI_TX_VC(x) FIELD_PREP(GENMASK(17, 16), (x))
+#define NWL_DSI_TX_DT(x) FIELD_PREP(GENMASK(23, 18), (x))
+#define NWL_DSI_HS_SEL(x) FIELD_PREP(GENMASK(24, 24), (x))
+#define NWL_DSI_BTA_TX(x) FIELD_PREP(GENMASK(25, 25), (x))
+#define NWL_DSI_BTA_NO_TX(x) FIELD_PREP(GENMASK(26, 26), (x))
+
+/*
+ * RX_PKT_HEADER format:
+ * [15: 0] - word count
+ * [21:16] - data type
+ * [23:22] - virtual channel
+ */
+#define NWL_DSI_RX_DT(x) FIELD_GET(GENMASK(21, 16), (x))
+#define NWL_DSI_RX_VC(x) FIELD_GET(GENMASK(23, 22), (x))
+
+/* DSI Video mode */
+#define NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES 0
+#define NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS BIT(0)
+#define NWL_DSI_VM_BURST_MODE BIT(1)
+
+/* * DPI color coding */
+#define NWL_DSI_DPI_16_BIT_565_PACKED 0
+#define NWL_DSI_DPI_16_BIT_565_ALIGNED 1
+#define NWL_DSI_DPI_16_BIT_565_SHIFTED 2
+#define NWL_DSI_DPI_18_BIT_PACKED 3
+#define NWL_DSI_DPI_18_BIT_ALIGNED 4
+#define NWL_DSI_DPI_24_BIT 5
+
+/* * DPI Pixel format */
+#define NWL_DSI_PIXEL_FORMAT_16 0
+#define NWL_DSI_PIXEL_FORMAT_18 BIT(0)
+#define NWL_DSI_PIXEL_FORMAT_18L BIT(1)
+#define NWL_DSI_PIXEL_FORMAT_24 (BIT(0) | BIT(1))
+
+#endif /* __NWL_DSI_H__ */
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 8461ee8304ba..1e63ed6b18aa 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -166,7 +166,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
*
* The connector type is set to @panel->connector_type, which must be set to a
* known type. Calling this function with a panel whose connector type is
- * DRM_MODE_CONNECTOR_Unknown will return NULL.
+ * DRM_MODE_CONNECTOR_Unknown will return ERR_PTR(-EINVAL).
*
* See devm_drm_panel_bridge_add() for an automatically managed version of this
* function.
@@ -174,7 +174,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
{
if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
- return NULL;
+ return ERR_PTR(-EINVAL);
return drm_panel_bridge_add_typed(panel, panel->connector_type);
}
@@ -265,7 +265,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_panel *panel)
{
if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
- return NULL;
+ return ERR_PTR(-EINVAL);
return devm_drm_panel_bridge_add_typed(dev, panel,
panel->connector_type);
@@ -311,6 +311,7 @@ EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
/**
* drm_panel_bridge_connector - return the connector for the panel bridge
+ * @bridge: The drm_bridge.
*
* drm_panel_bridge creates the connector.
* This function gives external access to the connector.
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index d3a53442d449..4b099196afeb 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -268,8 +268,6 @@ static int ps8640_probe(struct i2c_client *client)
if (!panel)
return -ENODEV;
- panel->connector_type = DRM_MODE_CONNECTOR_eDP;
-
ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(ps_bridge->panel_bridge))
return PTR_ERR(ps_bridge->panel_bridge);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index f81f81b7051f..b1258f0ed205 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -836,7 +836,8 @@ static int sii9234_init_resources(struct sii9234 *ctx,
ctx->supplies[3].supply = "cvcc12";
ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies);
if (ret) {
- dev_err(ctx->dev, "regulator_bulk failed\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(ctx->dev, "regulator_bulk failed\n");
return ret;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index dd56996fe9c7..d0db1acf11d7 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -630,7 +630,7 @@ static struct platform_driver snd_dw_hdmi_driver = {
module_platform_driver(snd_dw_hdmi_driver);
-MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
MODULE_DESCRIPTION("Synopsis Designware HDMI AHB ALSA interface");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 383b1073d7de..30681398cfb0 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -92,6 +92,12 @@ static const u16 csc_coeff_rgb_in_eitu709[3][4] = {
{ 0x6756, 0x78ab, 0x2000, 0x0200 }
};
+static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = {
+ { 0x1b7c, 0x0000, 0x0000, 0x0020 },
+ { 0x0000, 0x1b7c, 0x0000, 0x0020 },
+ { 0x0000, 0x0000, 0x1b7c, 0x0020 }
+};
+
struct hdmi_vmode {
bool mdataenablepolarity;
@@ -109,6 +115,7 @@ struct hdmi_data_info {
unsigned int pix_repet_factor;
unsigned int hdcp_enable;
struct hdmi_vmode video_mode;
+ bool rgb_limited_range;
};
struct dw_hdmi_i2c {
@@ -956,7 +963,14 @@ static void hdmi_video_sample(struct dw_hdmi *hdmi)
static int is_color_space_conversion(struct dw_hdmi *hdmi)
{
- return hdmi->hdmi_data.enc_in_bus_format != hdmi->hdmi_data.enc_out_bus_format;
+ struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
+ bool is_input_rgb, is_output_rgb;
+
+ is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_in_bus_format);
+ is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format);
+
+ return (is_input_rgb != is_output_rgb) ||
+ (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range);
}
static int is_color_space_decimation(struct dw_hdmi *hdmi)
@@ -983,28 +997,37 @@ static int is_color_space_interpolation(struct dw_hdmi *hdmi)
return 0;
}
+static bool is_csc_needed(struct dw_hdmi *hdmi)
+{
+ return is_color_space_conversion(hdmi) ||
+ is_color_space_decimation(hdmi) ||
+ is_color_space_interpolation(hdmi);
+}
+
static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi)
{
const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
+ bool is_input_rgb, is_output_rgb;
unsigned i;
u32 csc_scale = 1;
- if (is_color_space_conversion(hdmi)) {
- if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
- if (hdmi->hdmi_data.enc_out_encoding ==
- V4L2_YCBCR_ENC_601)
- csc_coeff = &csc_coeff_rgb_out_eitu601;
- else
- csc_coeff = &csc_coeff_rgb_out_eitu709;
- } else if (hdmi_bus_fmt_is_rgb(
- hdmi->hdmi_data.enc_in_bus_format)) {
- if (hdmi->hdmi_data.enc_out_encoding ==
- V4L2_YCBCR_ENC_601)
- csc_coeff = &csc_coeff_rgb_in_eitu601;
- else
- csc_coeff = &csc_coeff_rgb_in_eitu709;
- csc_scale = 0;
- }
+ is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format);
+ is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format);
+
+ if (!is_input_rgb && is_output_rgb) {
+ if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+ csc_coeff = &csc_coeff_rgb_out_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_out_eitu709;
+ } else if (is_input_rgb && !is_output_rgb) {
+ if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+ csc_coeff = &csc_coeff_rgb_in_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_in_eitu709;
+ csc_scale = 0;
+ } else if (is_input_rgb && is_output_rgb &&
+ hdmi->hdmi_data.rgb_limited_range) {
+ csc_coeff = &csc_coeff_rgb_full_to_rgb_limited;
}
/* The CSC registers are sequential, alternating MSB then LSB */
@@ -1614,6 +1637,18 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
drm_hdmi_avi_infoframe_from_display_mode(&frame,
&hdmi->connector, mode);
+ if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+ drm_hdmi_avi_infoframe_quant_range(&frame, &hdmi->connector,
+ mode,
+ hdmi->hdmi_data.rgb_limited_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
frame.colorspace = HDMI_COLORSPACE_YUV444;
else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
@@ -1654,8 +1689,6 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
}
- frame.scan_mode = HDMI_SCAN_MODE_NONE;
-
/*
* The Designware IP uses a different byte format from standard
* AVI info frames, though generally the bits are in the correct
@@ -2010,18 +2043,19 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
/* Enable csc path */
- if (is_color_space_conversion(hdmi)) {
+ if (is_csc_needed(hdmi)) {
hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
- }
- /* Enable color space conversion if needed */
- if (is_color_space_conversion(hdmi))
hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH,
HDMI_MC_FLOWCTRL);
- else
+ } else {
+ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
+
hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS,
HDMI_MC_FLOWCTRL);
+ }
}
/* Workaround to clear the overflow condition */
@@ -2119,6 +2153,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi &&
+ drm_default_rgb_quant_range(mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+
hdmi->hdmi_data.pix_repet_factor = 0;
hdmi->hdmi_data.hdcp_enable = 0;
hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 1b39e8d37834..6650fe4cfc20 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -178,6 +178,8 @@ static int tc358768_clear_error(struct tc358768_priv *priv)
static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
{
+ /* work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+ int tmpval = val;
size_t count = 2;
if (priv->error)
@@ -187,7 +189,7 @@ static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
if (reg < 0x100 || reg >= 0x600)
count = 1;
- priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+ priv->error = regmap_bulk_write(priv->regmap, reg, &tmpval, count);
}
static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
deleted file mode 100644
index c6bbd988b0e5..000000000000
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_CIRRUS_QEMU
- tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI && MMU
- select DRM_KMS_HELPER
- select DRM_GEM_SHMEM_HELPER
- help
- This is a KMS driver for emulated cirrus device in qemu.
- It is *NOT* intended for real cirrus devices. This requires
- the modesetting userspace X.org driver.
-
- Cirrus is obsolete, the hardware was designed in the 90ies
- and can't keep up with todays needs. More background:
- https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
-
- Better alternatives are:
- - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
- - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
- - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
deleted file mode 100644
index 0c1ed3f99725..000000000000
--- a/drivers/gpu/drm/cirrus/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9ccfbf213d72..965173fd0ac2 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1641,10 +1641,10 @@ static const struct drm_info_list drm_atomic_debugfs_list[] = {
{"state", drm_state_info, 0},
};
-int drm_atomic_debugfs_init(struct drm_minor *minor)
+void drm_atomic_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(drm_atomic_debugfs_list,
- ARRAY_SIZE(drm_atomic_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 531b876d0ed8..800ac39f3213 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -135,6 +135,7 @@ static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
}
}
+ fpriv->was_master = (ret == 0);
return ret;
}
@@ -174,17 +175,77 @@ out_err:
return ret;
}
+/*
+ * In the olden days the SET/DROP_MASTER ioctls used to return EACCES when
+ * CAP_SYS_ADMIN was not set. This was used to prevent rogue applications
+ * from becoming master and/or failing to release it.
+ *
+ * At the same time, the first client (for a given VT) is _always_ master.
+ * Thus in order for the ioctls to succeed, one had to _explicitly_ run the
+ * application as root or flip the setuid bit.
+ *
+ * If the CAP_SYS_ADMIN was missing, no other client could become master...
+ * EVER :-( Leading to a) the graphics session dying badly or b) a completely
+ * locked session.
+ *
+ *
+ * As some point systemd-logind was introduced to orchestrate and delegate
+ * master as applicable. It does so by opening the fd and passing it to users
+ * while in itself logind a) does the set/drop master per users' request and
+ * b) * implicitly drops master on VT switch.
+ *
+ * Even though logind looks like the future, there are a few issues:
+ * - some platforms don't have equivalent (Android, CrOS, some BSDs) so
+ * root is required _solely_ for SET/DROP MASTER.
+ * - applications may not be updated to use it,
+ * - any client which fails to drop master* can DoS the application using
+ * logind, to a varying degree.
+ *
+ * * Either due missing CAP_SYS_ADMIN or simply not calling DROP_MASTER.
+ *
+ *
+ * Here we implement the next best thing:
+ * - ensure the logind style of fd passing works unchanged, and
+ * - allow a client to drop/set master, iff it is/was master at a given point
+ * in time.
+ *
+ * Note: DROP_MASTER cannot be free for all, as an arbitrator user could:
+ * - DoS/crash the arbitrator - details would be implementation specific
+ * - open the node, become master implicitly and cause issues
+ *
+ * As a result this fixes the following when using root-less build w/o logind
+ * - startx
+ * - weston
+ * - various compositors based on wlroots
+ */
+static int
+drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
+{
+ if (file_priv->pid == task_pid(current) && file_priv->was_master)
+ return 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return 0;
+}
+
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret = 0;
mutex_lock(&dev->master_mutex);
+
+ ret = drm_master_check_perm(dev, file_priv);
+ if (ret)
+ goto out_unlock;
+
if (drm_is_current_master(file_priv))
goto out_unlock;
if (dev->master) {
- ret = -EINVAL;
+ ret = -EBUSY;
goto out_unlock;
}
@@ -224,6 +285,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
int ret = -EINVAL;
mutex_lock(&dev->master_mutex);
+
+ ret = drm_master_check_perm(dev, file_priv);
+ if (ret)
+ goto out_unlock;
+
+ ret = -EINVAL;
if (!drm_is_current_master(file_priv))
goto out_unlock;
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 121481f6aa71..f1dcad96f341 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -135,7 +135,9 @@
* are underneath planes with higher Z position values. Two planes with the
* same Z position value have undefined ordering. Note that the Z position
* value can also be immutable, to inform userspace about the hard-coded
- * stacking of planes, see drm_plane_create_zpos_immutable_property().
+ * stacking of planes, see drm_plane_create_zpos_immutable_property(). If
+ * any plane has a zpos property (either mutable or immutable), then all
+ * planes shall have a zpos property.
*
* pixel blend mode:
* Pixel blend mode is set up with drm_plane_create_blend_mode_property().
@@ -183,6 +185,12 @@
* plane does not expose the "alpha" property, then this is
* assumed to be 1.0
*
+ * IN_FORMATS:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane. The blob is a drm_format_modifier_blob
+ * struct. Without this property the plane doesn't support buffers with
+ * modifiers. Userspace cannot change this property.
+ *
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
* exposed and assumed to be black).
@@ -338,10 +346,10 @@ EXPORT_SYMBOL(drm_rotation_simplify);
* should be set to 0 and max to maximal number of planes for given crtc - 1.
*
* If zpos of some planes cannot be changed (like fixed background or
- * cursor/topmost planes), driver should adjust min/max values and assign those
- * planes immutable zpos property with lower or higher values (for more
+ * cursor/topmost planes), drivers shall adjust the min/max values and assign
+ * those planes immutable zpos properties with lower or higher values (for more
* information, see drm_plane_create_zpos_immutable_property() function). In such
- * case driver should also assign proper initial zpos values for all planes in
+ * case drivers shall also assign proper initial zpos values for all planes in
* its plane_reset() callback, so the planes will be always sorted properly.
*
* See also drm_atomic_normalize_zpos().
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index dcabf5698333..ef26ac57f039 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -33,6 +33,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/nospec.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
@@ -43,7 +44,6 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 6b0c6ef8b9b3..8cb93f5209a4 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -457,10 +457,10 @@ static const struct drm_info_list drm_client_debugfs_list[] = {
{ "internal_clients", drm_client_debugfs_internal_clients, 0 },
};
-int drm_client_debugfs_init(struct drm_minor *minor)
+void drm_client_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(drm_client_debugfs_list,
- ARRAY_SIZE(drm_client_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 644f0ad10671..b1099e1251a2 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1970,6 +1970,8 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
else
drm_reset_display_info(connector);
+ drm_update_tile_info(connector, edid);
+
drm_object_property_set_value(&connector->base,
dev->mode_config.non_desktop_property,
connector->display_info.non_desktop);
@@ -2392,7 +2394,7 @@ EXPORT_SYMBOL(drm_mode_put_tile_group);
* tile group or NULL if not found.
*/
struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8])
+ const char topology[8])
{
struct drm_tile_group *tg;
int id;
@@ -2422,7 +2424,7 @@ EXPORT_SYMBOL(drm_mode_get_tile_group);
* new tile group or NULL.
*/
struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8])
+ const char topology[8])
{
struct drm_tile_group *tg;
int ret;
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 16f2413403aa..da96b2f64d7e 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -82,6 +82,7 @@ int drm_mode_setcrtc(struct drm_device *dev,
/* drm_mode_config.c */
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
+void drm_mode_config_validate(struct drm_device *dev);
/* drm_modes.c */
const char *drm_get_mode_status_name(enum drm_mode_status status);
@@ -224,7 +225,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
/* drm_atomic.c */
#ifdef CONFIG_DEBUG_FS
struct drm_minor;
-int drm_atomic_debugfs_init(struct drm_minor *minor);
+void drm_atomic_debugfs_init(struct drm_minor *minor);
#endif
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
@@ -278,3 +279,4 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
void drm_reset_display_info(struct drm_connector *connector);
u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
+void drm_update_tile_info(struct drm_connector *connector, const struct edid *edid);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 4e673d318503..2bea22130703 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -172,8 +172,8 @@ static const struct file_operations drm_debugfs_fops = {
* &struct drm_info_list in the given root directory. These files will be removed
* automatically on drm_debugfs_cleanup().
*/
-int drm_debugfs_create_files(const struct drm_info_list *files, int count,
- struct dentry *root, struct drm_minor *minor)
+void drm_debugfs_create_files(const struct drm_info_list *files, int count,
+ struct dentry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct drm_info_node *tmp;
@@ -199,7 +199,6 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
list_add(&tmp->list, &minor->debugfs_list);
mutex_unlock(&minor->debugfs_lock);
}
- return 0;
}
EXPORT_SYMBOL(drm_debugfs_create_files);
@@ -208,52 +207,28 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
{
struct drm_device *dev = minor->dev;
char name[64];
- int ret;
INIT_LIST_HEAD(&minor->debugfs_list);
mutex_init(&minor->debugfs_lock);
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
- ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
- if (ret) {
- debugfs_remove(minor->debugfs_root);
- minor->debugfs_root = NULL;
- DRM_ERROR("Failed to create core drm debugfs files\n");
- return ret;
- }
+ drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
if (drm_drv_uses_atomic_modeset(dev)) {
- ret = drm_atomic_debugfs_init(minor);
- if (ret) {
- DRM_ERROR("Failed to create atomic debugfs files\n");
- return ret;
- }
+ drm_atomic_debugfs_init(minor);
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_framebuffer_debugfs_init(minor);
- if (ret) {
- DRM_ERROR("Failed to create framebuffer debugfs file\n");
- return ret;
- }
+ drm_framebuffer_debugfs_init(minor);
- ret = drm_client_debugfs_init(minor);
- if (ret) {
- DRM_ERROR("Failed to create client debugfs file\n");
- return ret;
- }
+ drm_client_debugfs_init(minor);
}
- if (dev->driver->debugfs_init) {
- ret = dev->driver->debugfs_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/sys/kernel/debug/dri.\n");
- return ret;
- }
- }
+ if (dev->driver->debugfs_init)
+ dev->driver->debugfs_init(minor);
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index a7add55a85b4..d07ba54ec945 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -34,9 +34,9 @@
*/
#include <linux/export.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index c6fbe6e6bc9d..19c99dddcb99 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1238,6 +1238,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
+ /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
+ { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
};
#undef OUI
@@ -1313,6 +1315,7 @@ static const struct edid_quirk edid_quirk_list[] = {
{ MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
{ MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
{ MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
};
#undef MFG
@@ -1533,3 +1536,271 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
return num_bpc;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
+
+/**
+ * drm_dp_get_phy_test_pattern() - get the requested pattern from the sink.
+ * @aux: DisplayPort AUX channel
+ * @data: DP phy compliance test parameters.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data)
+{
+ int err;
+ u8 rate, lanes;
+
+ err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+ if (err < 0)
+ return err;
+ data->link_rate = drm_dp_bw_code_to_link_rate(rate);
+
+ err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+ if (err < 0)
+ return err;
+ data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
+
+ if (lanes & DP_ENHANCED_FRAME_CAP)
+ data->enhanced_frame_cap = true;
+
+ err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+ if (err < 0)
+ return err;
+
+ switch (data->phy_pattern) {
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ &data->custom80, sizeof(data->custom80));
+ if (err < 0)
+ return err;
+
+ break;
+ case DP_PHY_TEST_PATTERN_CP2520:
+ err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+ &data->hbr2_reset,
+ sizeof(data->hbr2_reset));
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_get_phy_test_pattern);
+
+/**
+ * drm_dp_set_phy_test_pattern() - set the pattern to the sink.
+ * @aux: DisplayPort AUX channel
+ * @data: DP phy compliance test parameters.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data, u8 dp_rev)
+{
+ int err, i;
+ u8 link_config[2];
+ u8 test_pattern;
+
+ link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate);
+ link_config[1] = data->num_lanes;
+ if (data->enhanced_frame_cap)
+ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2);
+ if (err < 0)
+ return err;
+
+ test_pattern = data->phy_pattern;
+ if (dp_rev < 0x12) {
+ test_pattern = (test_pattern << 2) &
+ DP_LINK_QUAL_PATTERN_11_MASK;
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
+ test_pattern);
+ if (err < 0)
+ return err;
+ } else {
+ for (i = 0; i < data->num_lanes; i++) {
+ err = drm_dp_dpcd_writeb(aux,
+ DP_LINK_QUAL_LANE0_SET + i,
+ test_pattern);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_set_phy_test_pattern);
+
+static const char *dp_pixelformat_get_name(enum dp_pixelformat pixelformat)
+{
+ if (pixelformat < 0 || pixelformat > DP_PIXELFORMAT_RESERVED)
+ return "Invalid";
+
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "RGB";
+ case DP_PIXELFORMAT_YUV444:
+ return "YUV444";
+ case DP_PIXELFORMAT_YUV422:
+ return "YUV422";
+ case DP_PIXELFORMAT_YUV420:
+ return "YUV420";
+ case DP_PIXELFORMAT_Y_ONLY:
+ return "Y_ONLY";
+ case DP_PIXELFORMAT_RAW:
+ return "RAW";
+ default:
+ return "Reserved";
+ }
+}
+
+static const char *dp_colorimetry_get_name(enum dp_pixelformat pixelformat,
+ enum dp_colorimetry colorimetry)
+{
+ if (pixelformat < 0 || pixelformat > DP_PIXELFORMAT_RESERVED)
+ return "Invalid";
+
+ switch (colorimetry) {
+ case DP_COLORIMETRY_DEFAULT:
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "sRGB";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "BT.601";
+ case DP_PIXELFORMAT_Y_ONLY:
+ return "DICOM PS3.14";
+ case DP_PIXELFORMAT_RAW:
+ return "Custom Color Profile";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_RGB_WIDE_FIXED: /* and DP_COLORIMETRY_BT709_YCC */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "Wide Fixed";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "BT.709";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_RGB_WIDE_FLOAT: /* and DP_COLORIMETRY_XVYCC_601 */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "Wide Float";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "xvYCC 601";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_OPRGB: /* and DP_COLORIMETRY_XVYCC_709 */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "OpRGB";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "xvYCC 709";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_DCI_P3_RGB: /* and DP_COLORIMETRY_SYCC_601 */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "DCI-P3";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "sYCC 601";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_RGB_CUSTOM: /* and DP_COLORIMETRY_OPYCC_601 */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "Custom Profile";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "OpYCC 601";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_BT2020_RGB: /* and DP_COLORIMETRY_BT2020_CYCC */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "BT.2020 RGB";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "BT.2020 CYCC";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_BT2020_YCC:
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "BT.2020 YCC";
+ default:
+ return "Reserved";
+ }
+ default:
+ return "Invalid";
+ }
+}
+
+static const char *dp_dynamic_range_get_name(enum dp_dynamic_range dynamic_range)
+{
+ switch (dynamic_range) {
+ case DP_DYNAMIC_RANGE_VESA:
+ return "VESA range";
+ case DP_DYNAMIC_RANGE_CTA:
+ return "CTA range";
+ default:
+ return "Invalid";
+ }
+}
+
+static const char *dp_content_type_get_name(enum dp_content_type content_type)
+{
+ switch (content_type) {
+ case DP_CONTENT_TYPE_NOT_DEFINED:
+ return "Not defined";
+ case DP_CONTENT_TYPE_GRAPHICS:
+ return "Graphics";
+ case DP_CONTENT_TYPE_PHOTO:
+ return "Photo";
+ case DP_CONTENT_TYPE_VIDEO:
+ return "Video";
+ case DP_CONTENT_TYPE_GAME:
+ return "Game";
+ default:
+ return "Reserved";
+ }
+}
+
+void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
+ const struct drm_dp_vsc_sdp *vsc)
+{
+#define DP_SDP_LOG(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
+ DP_SDP_LOG("DP SDP: %s, revision %u, length %u\n", "VSC",
+ vsc->revision, vsc->length);
+ DP_SDP_LOG(" pixelformat: %s\n",
+ dp_pixelformat_get_name(vsc->pixelformat));
+ DP_SDP_LOG(" colorimetry: %s\n",
+ dp_colorimetry_get_name(vsc->pixelformat, vsc->colorimetry));
+ DP_SDP_LOG(" bpc: %u\n", vsc->bpc);
+ DP_SDP_LOG(" dynamic range: %s\n",
+ dp_dynamic_range_get_name(vsc->dynamic_range));
+ DP_SDP_LOG(" content type: %s\n",
+ dp_content_type_get_name(vsc->content_type));
+#undef DP_SDP_LOG
+}
+EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 9d89ebf3a749..1e26b89628f9 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/iopoll.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stacktrace.h>
@@ -687,51 +688,45 @@ static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *
raw->cur_len = idx;
}
-/* this adds a chunk of msg to the builder to get the final msg */
-static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
- u8 *replybuf, u8 replybuflen, bool hdr)
+static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
+ struct drm_dp_sideband_msg_hdr *hdr,
+ u8 hdrlen)
{
- int ret;
- u8 crc4;
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!hdr->somt && !msg->have_somt)
+ return false;
- if (hdr) {
- u8 hdrlen;
- struct drm_dp_sideband_msg_hdr recv_hdr;
- ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
- if (ret == false) {
- print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
- return false;
- }
+ /* get length contained in this portion */
+ msg->curchunk_idx = 0;
+ msg->curchunk_len = hdr->msg_len;
+ msg->curchunk_hdrlen = hdrlen;
- /*
- * ignore out-of-order messages or messages that are part of a
- * failed transaction
- */
- if (!recv_hdr.somt && !msg->have_somt)
- return false;
+ /* we have already gotten an somt - don't bother parsing */
+ if (hdr->somt && msg->have_somt)
+ return false;
- /* get length contained in this portion */
- msg->curchunk_len = recv_hdr.msg_len;
- msg->curchunk_hdrlen = hdrlen;
+ if (hdr->somt) {
+ memcpy(&msg->initial_hdr, hdr,
+ sizeof(struct drm_dp_sideband_msg_hdr));
+ msg->have_somt = true;
+ }
+ if (hdr->eomt)
+ msg->have_eomt = true;
- /* we have already gotten an somt - don't bother parsing */
- if (recv_hdr.somt && msg->have_somt)
- return false;
+ return true;
+}
- if (recv_hdr.somt) {
- memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
- msg->have_somt = true;
- }
- if (recv_hdr.eomt)
- msg->have_eomt = true;
+/* this adds a chunk of msg to the builder to get the final msg */
+static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
+ u8 *replybuf, u8 replybuflen)
+{
+ u8 crc4;
- /* copy the bytes for the remainder of this header chunk */
- msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
- memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
- } else {
- memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
- msg->curchunk_idx += replybuflen;
- }
+ memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
+ msg->curchunk_idx += replybuflen;
if (msg->curchunk_idx >= msg->curchunk_len) {
/* do CRC */
@@ -1060,13 +1055,12 @@ static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
drm_dp_encode_sideband_req(&req, msg);
}
-static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
drm_dp_encode_sideband_req(&req, msg);
- return 0;
}
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
@@ -1203,16 +1197,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
/* remove from q */
if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
- txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
+ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND)
list_del(&txmsg->next);
- }
-
- if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
- txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
- mstb->tx_slots[txmsg->seqno] = NULL;
- }
- mgr->is_waiting_for_dwn_reply = false;
-
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -2691,22 +2677,6 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
struct drm_dp_mst_branch *mstb = txmsg->dst;
u8 req_type;
- /* both msg slots are full */
- if (txmsg->seqno == -1) {
- if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
- DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
- return -EAGAIN;
- }
- if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
- txmsg->seqno = mstb->last_seqno;
- mstb->last_seqno ^= 1;
- } else if (mstb->tx_slots[0] == NULL)
- txmsg->seqno = 0;
- else
- txmsg->seqno = 1;
- mstb->tx_slots[txmsg->seqno] = txmsg;
- }
-
req_type = txmsg->msg[0] & 0x7f;
if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
req_type == DP_RESOURCE_STATUS_NOTIFY)
@@ -2718,7 +2688,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
hdr->lcr = mstb->lct - 1;
if (mstb->lct > 1)
memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
- hdr->seqno = txmsg->seqno;
+
return 0;
}
/*
@@ -2733,15 +2703,15 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
int len, space, idx, tosend;
int ret;
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
+ return 0;
+
memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
- if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
- txmsg->seqno = -1;
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
- }
- /* make hdr from dst mst - for replies use seqno
- otherwise assign one */
+ /* make hdr from dst mst */
ret = set_hdr_from_dst_qlock(&hdr, txmsg);
if (ret < 0)
return ret;
@@ -2794,42 +2764,17 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
if (list_empty(&mgr->tx_msg_downq))
return;
- txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
+ txmsg = list_first_entry(&mgr->tx_msg_downq,
+ struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
- if (ret == 1) {
- /* txmsg is sent it should be in the slots now */
- mgr->is_waiting_for_dwn_reply = true;
- list_del(&txmsg->next);
- } else if (ret) {
+ if (ret < 0) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
- mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
- if (txmsg->seqno != -1)
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
wake_up_all(&mgr->tx_waitq);
}
}
-/* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_sideband_msg_tx *txmsg)
-{
- int ret;
-
- /* construct a chunk from the first msg in the tx_msg queue */
- ret = process_single_tx_qlock(mgr, txmsg, true);
-
- if (ret != 1)
- DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-
- if (txmsg->seqno != -1) {
- WARN_ON((unsigned int)txmsg->seqno >
- ARRAY_SIZE(txmsg->dst->tx_slots));
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
- }
-}
-
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
@@ -2842,8 +2787,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
- if (list_is_singular(&mgr->tx_msg_downq) &&
- !mgr->is_waiting_for_dwn_reply)
+ if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -3467,7 +3411,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
- int req_type, int seqno, bool broadcast)
+ int req_type, bool broadcast)
{
struct drm_dp_sideband_msg_tx *txmsg;
@@ -3476,13 +3420,11 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- txmsg->seqno = seqno;
drm_dp_encode_up_ack_reply(txmsg, req_type);
mutex_lock(&mgr->qlock);
-
- process_single_up_tx_qlock(mgr, txmsg);
-
+ /* construct a chunk from the first msg in the tx_msg queue */
+ process_single_tx_qlock(mgr, txmsg, true);
mutex_unlock(&mgr->qlock);
kfree(txmsg);
@@ -3707,31 +3649,63 @@ out_fail:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool
+drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
+ struct drm_dp_mst_branch **mstb)
{
int len;
u8 replyblock[32];
int replylen, curreply;
int ret;
- struct drm_dp_sideband_msg_rx *msg;
- int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
- msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+ u8 hdrlen;
+ struct drm_dp_sideband_msg_hdr hdr;
+ struct drm_dp_sideband_msg_rx *msg =
+ up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+ int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
+ DP_SIDEBAND_MSG_DOWN_REP_BASE;
+
+ if (!up)
+ *mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg,
- replyblock, len);
+ ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+
+ ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
+ if (ret == false) {
+ print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
+ 1, replyblock, len, false);
+ DRM_DEBUG_KMS("ERROR: failed header\n");
+ return false;
+ }
+
+ if (!up) {
+ /* Caller is responsible for giving back this reference */
+ *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
+ if (!*mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr.lct);
+ return false;
+ }
+ }
+
+ if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
+ DRM_DEBUG_KMS("sideband msg set header failed %d\n",
+ replyblock[0]);
+ return false;
+ }
+
+ replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
+ ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
return false;
}
- replylen = msg->curchunk_len + msg->curchunk_hdrlen;
- replylen -= len;
+ replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
@@ -3743,7 +3717,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
return false;
}
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+ ret = drm_dp_sideband_append_payload(msg, replyblock, len);
if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
return false;
@@ -3758,67 +3732,60 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
- struct drm_dp_mst_branch *mstb;
- struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
- int slot = -1;
-
- if (!drm_dp_get_one_sb_msg(mgr, false))
- goto clear_down_rep_recv;
+ struct drm_dp_mst_branch *mstb = NULL;
+ struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
- if (!mgr->down_rep_recv.have_eomt)
- return 0;
+ if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
+ goto out;
- mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
- hdr->lct);
- goto clear_down_rep_recv;
- }
+ /* Multi-packet message transmission, don't clear the reply */
+ if (!msg->have_eomt)
+ goto out;
/* find the message */
- slot = hdr->seqno;
mutex_lock(&mgr->qlock);
- txmsg = mstb->tx_slots[slot];
- /* remove from slots */
+ txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
+ struct drm_dp_sideband_msg_tx, next);
mutex_unlock(&mgr->qlock);
- if (!txmsg) {
+ /* Were we actually expecting a response, and from this mstb? */
+ if (!txmsg || txmsg->dst != mstb) {
+ struct drm_dp_sideband_msg_hdr *hdr;
+ hdr = &msg->initial_hdr;
DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
mstb, hdr->seqno, hdr->lct, hdr->rad[0],
- mgr->down_rep_recv.msg[0]);
- goto no_msg;
+ msg->msg[0]);
+ goto out_clear_reply;
}
- drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ drm_dp_sideband_parse_reply(msg, &txmsg->reply);
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
txmsg->reply.req_type,
drm_dp_mst_req_type_str(txmsg->reply.req_type),
txmsg->reply.u.nak.reason,
drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
txmsg->reply.u.nak.nak_data);
+ }
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
drm_dp_mst_topology_put_mstb(mstb);
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
- mstb->tx_slots[slot] = NULL;
- mgr->is_waiting_for_dwn_reply = false;
+ list_del(&txmsg->next);
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
return 0;
-no_msg:
- drm_dp_mst_topology_put_mstb(mstb);
-clear_down_rep_recv:
- mutex_lock(&mgr->qlock);
- mgr->is_waiting_for_dwn_reply = false;
- mutex_unlock(&mgr->qlock);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out_clear_reply:
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out:
+ if (mstb)
+ drm_dp_mst_topology_put_mstb(mstb);
return 0;
}
@@ -3894,11 +3861,9 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
struct drm_dp_pending_up_req *up_req;
- bool seqno;
- if (!drm_dp_get_one_sb_msg(mgr, true))
+ if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
goto out;
if (!mgr->up_req_recv.have_eomt)
@@ -3911,7 +3876,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
}
INIT_LIST_HEAD(&up_req->next);
- seqno = hdr->seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
@@ -3923,7 +3887,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
}
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
- seqno, false);
+ false);
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
@@ -3945,7 +3909,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
res_stat->available_pbn);
}
- up_req->hdr = *hdr;
+ up_req->hdr = mgr->up_req_recv.initial_hdr;
mutex_lock(&mgr->up_req_lock);
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
@@ -4051,27 +4015,6 @@ out:
EXPORT_SYMBOL(drm_dp_mst_detect_port);
/**
- * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
- * @mgr: manager for this port
- * @port: unverified pointer to a port.
- *
- * This returns whether the port supports audio or not.
- */
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port)
-{
- bool ret = false;
-
- port = drm_dp_mst_topology_get_port_validated(mgr, port);
- if (!port)
- return ret;
- ret = port->has_audio;
- drm_dp_mst_topology_put_port(port);
- return ret;
-}
-EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
-
-/**
* drm_dp_mst_get_edid() - get EDID for an MST port
* @connector: toplevel connector to get EDID for
* @mgr: manager for this port
@@ -4448,42 +4391,58 @@ fail:
return ret;
}
+static int do_get_act_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
/**
- * drm_dp_check_act_status() - Check ACT handled status.
+ * drm_dp_check_act_status() - Polls for ACT handled status.
* @mgr: manager to use
*
- * Check the payload status bits in the DPCD for ACT handled completion.
+ * Tries waiting for the MST hub to finish updating it's payload table by
+ * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
+ * take that long).
+ *
+ * Returns:
+ * 0 if the ACT was handled in time, negative error code on failure.
*/
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
{
- u8 status;
- int ret;
- int count = 0;
-
- do {
- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
-
- if (ret < 0) {
- DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
- goto fail;
- }
-
- if (status & DP_PAYLOAD_ACT_HANDLED)
- break;
- count++;
- udelay(100);
-
- } while (count < 30);
-
- if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
- DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
- ret = -EINVAL;
- goto fail;
+ /*
+ * There doesn't seem to be any recommended retry count or timeout in
+ * the MST specification. Since some hubs have been observed to take
+ * over 1 second to update their payload allocations under certain
+ * conditions, we use a rather large timeout value.
+ */
+ const int timeout_ms = 3000;
+ int ret, status;
+
+ ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
+ status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+ 200, timeout_ms * USEC_PER_MSEC);
+ if (ret < 0 && status >= 0) {
+ DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
+ timeout_ms, status);
+ return -EINVAL;
+ } else if (status < 0) {
+ /*
+ * Failure here isn't unexpected - the hub may have
+ * just been unplugged
+ */
+ DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
+ status);
+ return status;
}
+
return 0;
-fail:
- return ret;
}
EXPORT_SYMBOL(drm_dp_check_act_status);
@@ -4674,28 +4633,18 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
+ if (!list_empty(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
-static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- if (!port->connector)
- return;
-
- if (port->mgr->cbs->destroy_connector) {
- port->mgr->cbs->destroy_connector(port->mgr, port->connector);
- } else {
+ if (port->connector) {
drm_connector_unregister(port->connector);
drm_connector_put(port->connector);
}
-}
-
-static inline void
-drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
-{
- drm_dp_destroy_connector(port);
drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
@@ -4705,26 +4654,25 @@ static inline void
drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
- struct drm_dp_mst_port *port, *tmp;
+ struct drm_dp_mst_port *port, *port_tmp;
+ struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
bool wake_tx = false;
mutex_lock(&mgr->lock);
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
list_del(&port->next);
drm_dp_mst_topology_put_port(port);
}
mutex_unlock(&mgr->lock);
- /* drop any tx slots msg */
+ /* drop any tx slot msg */
mutex_lock(&mstb->mgr->qlock);
- if (mstb->tx_slots[0]) {
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[0] = NULL;
- wake_tx = true;
- }
- if (mstb->tx_slots[1]) {
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[1] = NULL;
+ list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
+ if (txmsg->dst != mstb)
+ continue;
+
+ txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ list_del(&txmsg->next);
wake_tx = true;
}
mutex_unlock(&mstb->mgr->qlock);
@@ -5499,7 +5447,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *immediate_upstream_port;
struct drm_dp_mst_port *fec_port;
- struct drm_dp_desc desc = { 0 };
+ struct drm_dp_desc desc = { };
u8 endpoint_fec;
u8 endpoint_dsc;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7b1a628d1f6e..bc38322f306e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -39,6 +39,7 @@
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_print.h>
@@ -92,13 +93,27 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
}
}
+static void drm_minor_alloc_release(struct drm_device *dev, void *data)
+{
+ struct drm_minor *minor = data;
+ unsigned long flags;
+
+ WARN_ON(dev != minor->dev);
+
+ put_device(minor->kdev);
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+}
+
static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
{
struct drm_minor *minor;
unsigned long flags;
int r;
- minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+ minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
if (!minor)
return -ENOMEM;
@@ -116,46 +131,20 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
idr_preload_end();
if (r < 0)
- goto err_free;
+ return r;
minor->index = r;
+ r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
+ if (r)
+ return r;
+
minor->kdev = drm_sysfs_minor_alloc(minor);
- if (IS_ERR(minor->kdev)) {
- r = PTR_ERR(minor->kdev);
- goto err_index;
- }
+ if (IS_ERR(minor->kdev))
+ return PTR_ERR(minor->kdev);
*drm_minor_get_slot(dev, type) = minor;
return 0;
-
-err_index:
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-err_free:
- kfree(minor);
- return r;
-}
-
-static void drm_minor_free(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor **slot, *minor;
- unsigned long flags;
-
- slot = drm_minor_get_slot(dev, type);
- minor = *slot;
- if (!minor)
- return;
-
- put_device(minor->kdev);
-
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-
- kfree(minor);
- *slot = NULL;
}
static int drm_minor_register(struct drm_device *dev, unsigned int type)
@@ -270,17 +259,22 @@ void drm_minor_release(struct drm_minor *minor)
* any other resources allocated at device initialization and drop the driver's
* reference to &drm_device using drm_dev_put().
*
- * Note that the lifetime rules for &drm_device instance has still a lot of
- * historical baggage. Hence use the reference counting provided by
- * drm_dev_get() and drm_dev_put() only carefully.
+ * Note that any allocation or resource which is visible to userspace must be
+ * released only when the final drm_dev_put() is called, and not when the
+ * driver is unbound from the underlying physical struct &device. Best to use
+ * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
+ * related functions.
+ *
+ * devres managed resources like devm_kmalloc() can only be used for resources
+ * directly related to the underlying hardware device, and only used in code
+ * paths fully protected by drm_dev_enter() and drm_dev_exit().
*
* Display driver example
* ~~~~~~~~~~~~~~~~~~~~~~
*
* The following example shows a typical structure of a DRM display driver.
* The example focus on the probe() function and the other functions that is
- * almost always present and serves as a demonstration of devm_drm_dev_init()
- * usage with its accompanying drm_driver->release callback.
+ * almost always present and serves as a demonstration of devm_drm_dev_init().
*
* .. code-block:: c
*
@@ -290,19 +284,8 @@ void drm_minor_release(struct drm_minor *minor)
* struct clk *pclk;
* };
*
- * static void driver_drm_release(struct drm_device *drm)
- * {
- * struct driver_device *priv = container_of(...);
- *
- * drm_mode_config_cleanup(drm);
- * drm_dev_fini(drm);
- * kfree(priv->userspace_facing);
- * kfree(priv);
- * }
- *
* static struct drm_driver driver_drm_driver = {
* [...]
- * .release = driver_drm_release,
* };
*
* static int driver_probe(struct platform_device *pdev)
@@ -322,13 +305,16 @@ void drm_minor_release(struct drm_minor *minor)
*
* ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
* if (ret) {
- * kfree(drm);
+ * kfree(priv);
* return ret;
* }
+ * drmm_add_final_kfree(drm, priv);
*
- * drm_mode_config_init(drm);
+ * ret = drmm_mode_config_init(drm);
+ * if (ret)
+ * return ret;
*
- * priv->userspace_facing = kzalloc(..., GFP_KERNEL);
+ * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
* if (!priv->userspace_facing)
* return -ENOMEM;
*
@@ -580,6 +566,23 @@ static void drm_fs_inode_free(struct inode *inode)
* used.
*/
+static void drm_dev_init_release(struct drm_device *dev, void *res)
+{
+ drm_legacy_ctxbitmap_cleanup(dev);
+ drm_legacy_remove_map_hash(dev);
+ drm_fs_inode_free(dev->anon_inode);
+
+ put_device(dev->dev);
+ /* Prevent use-after-free in drm_managed_release when debugging is
+ * enabled. Slightly awkward, but can't really be helped. */
+ dev->dev = NULL;
+ mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
+ drm_legacy_destroy_members(dev);
+}
+
/**
* drm_dev_init - Initialise new DRM device
* @dev: DRM device
@@ -608,6 +611,9 @@ static void drm_fs_inode_free(struct inode *inode)
* arbitrary offset, you must supply a &drm_driver.release callback and control
* the finalization explicitly.
*
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
+ *
* RETURNS:
* 0 on success, or error code on failure.
*/
@@ -629,6 +635,9 @@ int drm_dev_init(struct drm_device *dev,
dev->dev = get_device(parent);
dev->driver = driver;
+ INIT_LIST_HEAD(&dev->managed.resources);
+ spin_lock_init(&dev->managed.lock);
+
/* no per-device feature limits by default */
dev->driver_features = ~0u;
@@ -644,26 +653,30 @@ int drm_dev_init(struct drm_device *dev,
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
+ ret = drmm_add_action(dev, drm_dev_init_release, NULL);
+ if (ret)
+ return ret;
+
dev->anon_inode = drm_fs_inode_new();
if (IS_ERR(dev->anon_inode)) {
ret = PTR_ERR(dev->anon_inode);
DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
- goto err_free;
+ goto err;
}
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret)
- goto err_minors;
+ goto err;
}
ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
if (ret)
- goto err_minors;
+ goto err;
ret = drm_legacy_create_map_hash(dev);
if (ret)
- goto err_minors;
+ goto err;
drm_legacy_ctxbitmap_init(dev);
@@ -671,33 +684,19 @@ int drm_dev_init(struct drm_device *dev,
ret = drm_gem_init(dev);
if (ret) {
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
- goto err_ctxbitmap;
+ goto err;
}
}
ret = drm_dev_set_unique(dev, dev_name(parent));
if (ret)
- goto err_setunique;
+ goto err;
return 0;
-err_setunique:
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_destroy(dev);
-err_ctxbitmap:
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_legacy_remove_map_hash(dev);
-err_minors:
- drm_minor_free(dev, DRM_MINOR_PRIMARY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_fs_inode_free(dev->anon_inode);
-err_free:
- put_device(dev->dev);
- mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->clientlist_mutex);
- mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
- drm_legacy_destroy_members(dev);
+err:
+ drm_managed_release(dev);
+
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
@@ -714,8 +713,10 @@ static void devm_drm_dev_init_release(void *data)
* @driver: DRM driver
*
* Managed drm_dev_init(). The DRM device initialized with this function is
- * automatically put on driver detach using drm_dev_put(). You must supply a
- * &drm_driver.release callback to control the finalization explicitly.
+ * automatically put on driver detach using drm_dev_put().
+ *
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
*
* RETURNS:
* 0 on success, or error code on failure.
@@ -726,9 +727,6 @@ int devm_drm_dev_init(struct device *parent,
{
int ret;
- if (WARN_ON(!driver->release))
- return -EINVAL;
-
ret = drm_dev_init(dev, driver, parent);
if (ret)
return ret;
@@ -741,42 +739,28 @@ int devm_drm_dev_init(struct device *parent,
}
EXPORT_SYMBOL(devm_drm_dev_init);
-/**
- * drm_dev_fini - Finalize a dead DRM device
- * @dev: DRM device
- *
- * Finalize a dead DRM device. This is the converse to drm_dev_init() and
- * frees up all data allocated by it. All driver private data should be
- * finalized first. Note that this function does not free the @dev, that is
- * left to the caller.
- *
- * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
- * from a &drm_driver.release callback.
- */
-void drm_dev_fini(struct drm_device *dev)
+void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
+ size_t size, size_t offset)
{
- drm_vblank_cleanup(dev);
-
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_destroy(dev);
-
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_legacy_remove_map_hash(dev);
- drm_fs_inode_free(dev->anon_inode);
+ void *container;
+ struct drm_device *drm;
+ int ret;
- drm_minor_free(dev, DRM_MINOR_PRIMARY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
- put_device(dev->dev);
+ drm = container + offset;
+ ret = devm_drm_dev_init(parent, drm, driver);
+ if (ret) {
+ kfree(container);
+ return ERR_PTR(ret);
+ }
+ drmm_add_final_kfree(drm, container);
- mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->clientlist_mutex);
- mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
- drm_legacy_destroy_members(dev);
- kfree(dev->unique);
+ return container;
}
-EXPORT_SYMBOL(drm_dev_fini);
+EXPORT_SYMBOL(__devm_drm_dev_alloc);
/**
* drm_dev_alloc - Allocate new DRM device
@@ -816,6 +800,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
return ERR_PTR(ret);
}
+ drmm_add_final_kfree(dev, dev);
+
return dev;
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -824,12 +810,13 @@ static void drm_dev_release(struct kref *ref)
{
struct drm_device *dev = container_of(ref, struct drm_device, ref);
- if (dev->driver->release) {
+ if (dev->driver->release)
dev->driver->release(dev);
- } else {
- drm_dev_fini(dev);
- kfree(dev);
- }
+
+ drm_managed_release(dev);
+
+ if (dev->managed.final_kfree)
+ kfree(dev->managed.final_kfree);
}
/**
@@ -946,6 +933,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
struct drm_driver *driver = dev->driver;
int ret;
+ if (!driver->load)
+ drm_mode_config_validate(dev);
+
+ WARN_ON(!dev->managed.final_kfree);
+
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
@@ -1046,8 +1038,8 @@ EXPORT_SYMBOL(drm_dev_unregister);
*/
int drm_dev_set_unique(struct drm_device *dev, const char *name)
{
- kfree(dev->unique);
- dev->unique = kstrdup(name, GFP_KERNEL);
+ drmm_kfree(dev, dev->unique);
+ dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
return dev->unique ? 0 : -ENOMEM;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 4ede08a84e37..fed653f13c26 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -191,10 +191,11 @@ static const struct edid_quirk {
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
- /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+ /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
/* Windows Mixed Reality Headsets */
{ "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
@@ -1583,8 +1584,6 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
-static void drm_get_displayid(struct drm_connector *connector,
- struct edid *edid);
static int validate_displayid(u8 *displayid, int length, int idx);
static int drm_edid_block_checksum(const u8 *raw_edid)
@@ -2018,18 +2017,13 @@ EXPORT_SYMBOL(drm_probe_ddc);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
- struct edid *edid;
-
if (connector->force == DRM_FORCE_OFF)
return NULL;
if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter))
return NULL;
- edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
- if (edid)
- drm_get_displayid(connector, edid);
- return edid;
+ return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
}
EXPORT_SYMBOL(drm_get_edid);
@@ -2387,6 +2381,14 @@ bad_std_timing(u8 a, u8 b)
(a == 0x20 && b == 0x20);
}
+static int drm_mode_hsync(const struct drm_display_mode *mode)
+{
+ if (mode->htotal <= 0)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(mode->clock, mode->htotal);
+}
+
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
* @connector: connector of for the EDID block
@@ -3212,16 +3214,33 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
}
-static u8 *drm_find_displayid_extension(const struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid,
+ int *length, int *idx)
{
- return drm_find_edid_extension(edid, DISPLAYID_EXT);
+ u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT);
+ struct displayid_hdr *base;
+ int ret;
+
+ if (!displayid)
+ return NULL;
+
+ /* EDID extensions block checksum isn't for us */
+ *length = EDID_LENGTH - 1;
+ *idx = 1;
+
+ ret = validate_displayid(displayid, *length, *idx);
+ if (ret)
+ return NULL;
+
+ base = (struct displayid_hdr *)&displayid[*idx];
+ *length = *idx + sizeof(*base) + base->bytes;
+
+ return displayid;
}
static u8 *drm_find_cea_extension(const struct edid *edid)
{
- int ret;
- int idx = 1;
- int length = EDID_LENGTH;
+ int length, idx;
struct displayid_block *block;
u8 *cea;
u8 *displayid;
@@ -3232,14 +3251,10 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
/* CEA blocks can also be found embedded in a DisplayID block */
- displayid = drm_find_displayid_extension(edid);
+ displayid = drm_find_displayid_extension(edid, &length, &idx);
if (!displayid)
return NULL;
- ret = validate_displayid(displayid, length, idx);
- if (ret)
- return NULL;
-
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
if (block->tag == DATA_BLOCK_CTA) {
@@ -5084,7 +5099,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
static int validate_displayid(u8 *displayid, int length, int idx)
{
- int i;
+ int i, dispid_length;
u8 csum = 0;
struct displayid_hdr *base;
@@ -5093,15 +5108,18 @@ static int validate_displayid(u8 *displayid, int length, int idx)
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
- if (base->bytes + 5 > length - idx)
+ /* +1 for DispID checksum */
+ dispid_length = sizeof(*base) + base->bytes + 1;
+ if (dispid_length > length - idx)
return -EINVAL;
- for (i = idx; i <= base->bytes + 5; i++) {
- csum += displayid[i];
- }
+
+ for (i = 0; i < dispid_length; i++)
+ csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
return -EINVAL;
}
+
return 0;
}
@@ -5180,20 +5198,14 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
struct edid *edid)
{
u8 *displayid;
- int ret;
- int idx = 1;
- int length = EDID_LENGTH;
+ int length, idx;
struct displayid_block *block;
int num_modes = 0;
- displayid = drm_find_displayid_extension(edid);
+ displayid = drm_find_displayid_extension(edid, &length, &idx);
if (!displayid)
return 0;
- ret = validate_displayid(displayid, length, idx);
- if (ret)
- return 0;
-
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
switch (block->tag) {
@@ -5782,9 +5794,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
static int drm_parse_tiled_block(struct drm_connector *connector,
- struct displayid_block *block)
+ const struct displayid_block *block)
{
- struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+ const struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
u16 w, h;
u8 tile_v_loc, tile_h_loc;
u8 num_v_tile, num_h_tile;
@@ -5835,22 +5847,12 @@ static int drm_parse_tiled_block(struct drm_connector *connector,
return 0;
}
-static int drm_parse_display_id(struct drm_connector *connector,
- u8 *displayid, int length,
- bool is_edid_extension)
+static int drm_displayid_parse_tiled(struct drm_connector *connector,
+ const u8 *displayid, int length, int idx)
{
- /* if this is an EDID extension the first byte will be 0x70 */
- int idx = 0;
- struct displayid_block *block;
+ const struct displayid_block *block;
int ret;
- if (is_edid_extension)
- idx = 1;
-
- ret = validate_displayid(displayid, length, idx);
- if (ret)
- return ret;
-
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
@@ -5862,12 +5864,6 @@ static int drm_parse_display_id(struct drm_connector *connector,
if (ret)
return ret;
break;
- case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
- /* handled in mode gathering code. */
- break;
- case DATA_BLOCK_CTA:
- /* handled in the cea parser code. */
- break;
default:
DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
break;
@@ -5876,19 +5872,21 @@ static int drm_parse_display_id(struct drm_connector *connector,
return 0;
}
-static void drm_get_displayid(struct drm_connector *connector,
- struct edid *edid)
+void drm_update_tile_info(struct drm_connector *connector,
+ const struct edid *edid)
{
- void *displayid = NULL;
+ const void *displayid = NULL;
+ int length, idx;
int ret;
+
connector->has_tile = false;
- displayid = drm_find_displayid_extension(edid);
+ displayid = drm_find_displayid_extension(edid, &length, &idx);
if (!displayid) {
/* drop reference to any tile group we had */
goto out_drop_ref;
}
- ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
+ ret = drm_displayid_parse_tiled(connector, displayid, length, idx);
if (ret < 0)
goto out_drop_ref;
if (!connector->has_tile)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 9801c0333eca..cb2349ad338d 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -2,7 +2,7 @@
/*
* drm kms/fb cma (contiguous memory allocator) helper functions
*
- * Copyright (C) 2012 Analog Device Inc.
+ * Copyright (C) 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Based on udl_fbdev.c
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a9771de4d17e..170aa7689110 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -307,13 +307,13 @@ static void drm_fb_helper_sysrq(int dummy1)
schedule_work(&drm_fb_helper_restore_work);
}
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
+static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.handler = drm_fb_helper_sysrq,
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
#else
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
+static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
@@ -514,6 +514,14 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
if (ret)
goto err_release;
+ /*
+ * TODO: We really should be smarter here and alloc an apperture
+ * for each IORESOURCE_MEM resource helper->dev->dev has and also
+ * init the ranges of the appertures based on the resources.
+ * Note some drivers currently count on there being only 1 empty
+ * aperture and fill this themselves, these will need to be dealt
+ * with somehow when fixing this.
+ */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
@@ -2162,6 +2170,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* This function sets up generic fbdev emulation for drivers that supports
* dumb buffers with a virtual address and that can be mmap'ed.
+ * drm_fbdev_generic_setup() shall be called after the DRM driver registered
+ * the new DRM device with drm_dev_register().
*
* Restore, hotplug events and teardown are all taken care of. Drivers that do
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -2178,29 +2188,30 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* Setup will be retried on the next hotplug event.
*
* The fbdev is destroyed by drm_dev_unregister().
- *
- * Returns:
- * Zero on success or negative error code on failure.
*/
-int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+void drm_fbdev_generic_setup(struct drm_device *dev,
+ unsigned int preferred_bpp)
{
struct drm_fb_helper *fb_helper;
int ret;
- WARN(dev->fb_helper, "fb_helper is already set!\n");
+ drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+ drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
if (!drm_fbdev_emulation)
- return 0;
+ return;
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper)
- return -ENOMEM;
+ if (!fb_helper) {
+ drm_err(dev, "Failed to allocate fb_helper\n");
+ return;
+ }
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
kfree(fb_helper);
drm_err(dev, "Failed to register client: %d\n", ret);
- return ret;
+ return;
}
if (!preferred_bpp)
@@ -2214,8 +2225,6 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
drm_client_register(&fb_helper->client);
-
- return 0;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index eb009d3ab48f..2f12b8c1d01c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -569,9 +569,6 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
struct drm_device *dev = file_priv->minor->dev;
ssize_t ret;
- if (!access_ok(buffer, count))
- return -EFAULT;
-
ret = mutex_lock_interruptible(&file_priv->event_read_lock);
if (ret)
return ret;
@@ -613,7 +610,8 @@ put_back_event:
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
- wake_up_interruptible(&file_priv->event_wait);
+ wake_up_interruptible_poll(&file_priv->event_wait,
+ EPOLLIN | EPOLLRDNORM);
break;
}
@@ -809,7 +807,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
list_del(&e->pending_link);
list_add_tail(&e->link,
&e->file_priv->event_list);
- wake_up_interruptible(&e->file_priv->event_wait);
+ wake_up_interruptible_poll(&e->file_priv->event_wait,
+ EPOLLIN | EPOLLRDNORM);
}
EXPORT_SYMBOL(drm_send_event_locked);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 57ac94ce9b9e..0375b3d7f8d0 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -1207,10 +1207,10 @@ static const struct drm_info_list drm_framebuffer_debugfs_list[] = {
{ "framebuffer", drm_framebuffer_info, 0 },
};
-int drm_framebuffer_debugfs_init(struct drm_minor *minor)
+void drm_framebuffer_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(drm_framebuffer_debugfs_list,
- ARRAY_SIZE(drm_framebuffer_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(drm_framebuffer_debugfs_list,
+ ARRAY_SIZE(drm_framebuffer_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 37627d06fb06..7bf628e13023 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -44,6 +44,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
@@ -77,6 +78,12 @@
* up at a later date, and as our interface with shmfs for memory allocation.
*/
+static void
+drm_gem_init_release(struct drm_device *dev, void *ptr)
+{
+ drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+}
+
/**
* drm_gem_init - Initialize the GEM device fields
* @dev: drm_devic structure to initialize
@@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
mutex_init(&dev->object_name_lock);
idr_init_base(&dev->object_name_idr, 1);
- vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+ vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
+ GFP_KERNEL);
if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
@@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
- return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
-
- drm_vma_offset_manager_destroy(dev->vma_offset_manager);
- kfree(dev->vma_offset_manager);
- dev->vma_offset_manager = NULL;
+ return drmm_add_action(dev, drm_gem_init_release, NULL);
}
/**
@@ -432,7 +431,7 @@ err_unref:
* drm_gem_handle_create - create a gem handle for an object
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
- * @handlep: pionter to return the created handle to the caller
+ * @handlep: pointer to return the created handle to the caller
*
* Create a handle for this object. This adds a handle reference to the object,
* which includes a regular reference count. Callers will likely want to
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 3a7ace19a902..ccc2c71fa491 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -21,6 +21,13 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#define AFBC_HEADER_SIZE 16
+#define AFBC_TH_LAYOUT_ALIGNMENT 8
+#define AFBC_HDR_ALIGN 64
+#define AFBC_SUPERBLOCK_PIXELS 256
+#define AFBC_SUPERBLOCK_ALIGNMENT 128
+#define AFBC_TH_BODY_START_ALIGNMENT 4096
+
/**
* DOC: overview
*
@@ -54,32 +61,25 @@ struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
-static struct drm_framebuffer *
-drm_gem_fb_alloc(struct drm_device *dev,
+static int
+drm_gem_fb_init(struct drm_device *dev,
+ struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes,
const struct drm_framebuffer_funcs *funcs)
{
- struct drm_framebuffer *fb;
int ret, i;
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb)
- return ERR_PTR(-ENOMEM);
-
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
for (i = 0; i < num_planes; i++)
fb->obj[i] = obj[i];
ret = drm_framebuffer_init(dev, fb, funcs);
- if (ret) {
+ if (ret)
drm_err(dev, "Failed to init framebuffer: %d\n", ret);
- kfree(fb);
- return ERR_PTR(ret);
- }
- return fb;
+ return ret;
}
/**
@@ -123,10 +123,13 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
EXPORT_SYMBOL(drm_gem_fb_create_handle);
/**
- * drm_gem_fb_create_with_funcs() - Helper function for the
- * &drm_mode_config_funcs.fb_create
- * callback
+ * drm_gem_fb_init_with_funcs() - Helper function for implementing
+ * &drm_mode_config_funcs.fb_create
+ * callback in cases when the driver
+ * allocates a subclass of
+ * struct drm_framebuffer
* @dev: DRM device
+ * @fb: framebuffer object
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
@@ -134,23 +137,26 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
* This function can be used to set &drm_framebuffer_funcs for drivers that need
* custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
* change &drm_framebuffer_funcs. The function does buffer size validation.
+ * The buffer size validation is for a general case, though, so users should
+ * pay attention to the checks being appropriate for them or, at least,
+ * non-conflicting.
*
* Returns:
- * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ * Zero or a negative error code.
*/
-struct drm_framebuffer *
-drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- const struct drm_framebuffer_funcs *funcs)
+int drm_gem_fb_init_with_funcs(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
{
const struct drm_format_info *info;
struct drm_gem_object *objs[4];
- struct drm_framebuffer *fb;
int ret, i;
info = drm_get_format_info(dev, mode_cmd);
if (!info)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
for (i = 0; i < info->num_planes; i++) {
unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
@@ -175,19 +181,55 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
}
}
- fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
+ ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
+ if (ret)
goto err_gem_object_put;
- }
- return fb;
+ return 0;
err_gem_object_put:
for (i--; i >= 0; i--)
drm_gem_object_put_unlocked(objs[i]);
- return ERR_PTR(ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
+
+/**
+ * drm_gem_fb_create_with_funcs() - Helper function for the
+ * &drm_mode_config_funcs.fb_create
+ * callback
+ * @dev: DRM device
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This function can be used to set &drm_framebuffer_funcs for drivers that need
+ * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
+ * change &drm_framebuffer_funcs. The function does buffer size validation.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_framebuffer *fb;
+ int ret;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
+ if (ret) {
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs);
@@ -265,6 +307,132 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
+static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct drm_format_info *info;
+
+ info = drm_get_format_info(dev, mode_cmd);
+
+ /* use whatever a driver has set */
+ if (info->cpp[0])
+ return info->cpp[0] * 8;
+
+ /* guess otherwise */
+ switch (info->format) {
+ case DRM_FORMAT_YUV420_8BIT:
+ return 12;
+ case DRM_FORMAT_YUV420_10BIT:
+ return 15;
+ case DRM_FORMAT_VUY101010:
+ return 30;
+ default:
+ break;
+ }
+
+ /* all attempts failed */
+ return 0;
+}
+
+static int drm_gem_afbc_min_size(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_afbc_framebuffer *afbc_fb)
+{
+ __u32 n_blocks, w_alignment, h_alignment, hdr_alignment;
+ /* remove bpp when all users properly encode cpp in drm_format_info */
+ __u32 bpp;
+
+ switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ afbc_fb->block_width = 16;
+ afbc_fb->block_height = 16;
+ break;
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
+ afbc_fb->block_width = 32;
+ afbc_fb->block_height = 8;
+ break;
+ /* no user exists yet - fall through */
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4:
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4:
+ default:
+ drm_dbg_kms(dev, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n",
+ mode_cmd->modifier[0]
+ & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
+ return -EINVAL;
+ }
+
+ /* tiled header afbc */
+ w_alignment = afbc_fb->block_width;
+ h_alignment = afbc_fb->block_height;
+ hdr_alignment = AFBC_HDR_ALIGN;
+ if (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_TILED) {
+ w_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+ h_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+ hdr_alignment = AFBC_TH_BODY_START_ALIGNMENT;
+ }
+
+ afbc_fb->aligned_width = ALIGN(mode_cmd->width, w_alignment);
+ afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
+ afbc_fb->offset = mode_cmd->offsets[0];
+
+ bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
+ if (!bpp) {
+ drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
+ return -EINVAL;
+ }
+
+ n_blocks = (afbc_fb->aligned_width * afbc_fb->aligned_height)
+ / AFBC_SUPERBLOCK_PIXELS;
+ afbc_fb->afbc_size = ALIGN(n_blocks * AFBC_HEADER_SIZE, hdr_alignment);
+ afbc_fb->afbc_size += n_blocks * ALIGN(bpp * AFBC_SUPERBLOCK_PIXELS / 8,
+ AFBC_SUPERBLOCK_ALIGNMENT);
+
+ return 0;
+}
+
+/**
+ * drm_gem_fb_afbc_init() - Helper function for drivers using afbc to
+ * fill and validate all the afbc-specific
+ * struct drm_afbc_framebuffer members
+ *
+ * @dev: DRM device
+ * @afbc_fb: afbc-specific framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @afbc_fb: afbc framebuffer
+ *
+ * This function can be used by drivers which support afbc to complete
+ * the preparation of struct drm_afbc_framebuffer. It must be called after
+ * allocating the said struct and calling drm_gem_fb_init_with_funcs().
+ * It is caller's responsibility to put afbc_fb->base.obj objects in case
+ * the call is unsuccessful.
+ *
+ * Returns:
+ * Zero on success or a negative error value on failure.
+ */
+int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_afbc_framebuffer *afbc_fb)
+{
+ const struct drm_format_info *info;
+ struct drm_gem_object **objs;
+ int ret;
+
+ objs = afbc_fb->base.obj;
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return -EINVAL;
+
+ ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
+ if (ret < 0)
+ return ret;
+
+ if (objs[0]->size < afbc_fb->afbc_size)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init);
+
/**
* drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
* @plane: Plane
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 92a11bb42365..8b2d5c945c95 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_mode.h>
@@ -18,13 +21,93 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
/**
* DOC: overview
*
- * This library provides a GEM buffer object that is backed by video RAM
- * (VRAM). It can be used for framebuffer devices with dedicated memory.
+ * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
+ * buffer object that is backed by video RAM (VRAM). It can be used for
+ * framebuffer devices with dedicated memory.
*
* The data structure &struct drm_vram_mm and its helpers implement a memory
- * manager for simple framebuffer devices with dedicated video memory. Buffer
- * objects are either placed in video RAM or evicted to system memory. The rsp.
- * buffer object is provided by &struct drm_gem_vram_object.
+ * manager for simple framebuffer devices with dedicated video memory. GEM
+ * VRAM buffer objects are either placed in the video memory or remain evicted
+ * to system memory.
+ *
+ * With the GEM interface userspace applications create, manage and destroy
+ * graphics buffers, such as an on-screen framebuffer. GEM does not provide
+ * an implementation of these interfaces. It's up to the DRM driver to
+ * provide an implementation that suits the hardware. If the hardware device
+ * contains dedicated video memory, the DRM driver can use the VRAM helper
+ * library. Each active buffer object is stored in video RAM. Active
+ * buffer are used for drawing the current frame, typically something like
+ * the frame's scanout buffer or the cursor image. If there's no more space
+ * left in VRAM, inactive GEM objects can be moved to system memory.
+ *
+ * The easiest way to use the VRAM helper library is to call
+ * drm_vram_helper_alloc_mm(). The function allocates and initializes an
+ * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
+ * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
+ * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
+ * as illustrated below.
+ *
+ * .. code-block:: c
+ *
+ * struct file_operations fops ={
+ * .owner = THIS_MODULE,
+ * DRM_VRAM_MM_FILE_OPERATION
+ * };
+ * struct drm_driver drv = {
+ * .driver_feature = DRM_ ... ,
+ * .fops = &fops,
+ * DRM_GEM_VRAM_DRIVER
+ * };
+ *
+ * int init_drm_driver()
+ * {
+ * struct drm_device *dev;
+ * uint64_t vram_base;
+ * unsigned long vram_size;
+ * int ret;
+ *
+ * // setup device, vram base and size
+ * // ...
+ *
+ * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
+ * if (ret)
+ * return ret;
+ * return 0;
+ * }
+ *
+ * This creates an instance of &struct drm_vram_mm, exports DRM userspace
+ * interfaces for GEM buffer management and initializes file operations to
+ * allow for accessing created GEM buffers. With this setup, the DRM driver
+ * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
+ * to userspace.
+ *
+ * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
+ * in the driver's clean-up code.
+ *
+ * .. code-block:: c
+ *
+ * void fini_drm_driver()
+ * {
+ * struct drm_device *dev = ...;
+ *
+ * drm_vram_helper_release_mm(dev);
+ * }
+ *
+ * For drawing or scanout operations, buffer object have to be pinned in video
+ * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
+ * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
+ * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
+ *
+ * A buffer object that is pinned in video RAM has a fixed address within that
+ * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
+ * it's used to program the hardware's scanout engine for framebuffers, set
+ * the cursor overlay's image for a mouse cursor, or use it as input to the
+ * hardware's draing engine.
+ *
+ * To access a buffer object's memory from the DRM driver, call
+ * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
+ * space and returns the memory address. Use drm_gem_vram_kunmap() to
+ * release the mapping.
*/
/*
@@ -670,9 +753,9 @@ EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
* @plane: a DRM plane
* @new_state: the plane's new state
*
- * During plane updates, this function pins the GEM VRAM
- * objects of the plane's new framebuffer to VRAM. Call
- * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ * During plane updates, this function sets the plane's fence and
+ * pins the GEM VRAM objects of the plane's new framebuffer to VRAM.
+ * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
*
* Returns:
* 0 on success, or
@@ -698,6 +781,10 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
goto err_drm_gem_vram_unpin;
}
+ ret = drm_gem_fb_prepare_fb(plane, new_state);
+ if (ret)
+ goto err_drm_gem_vram_unpin;
+
return 0;
err_drm_gem_vram_unpin:
@@ -1018,7 +1105,6 @@ static struct ttm_bo_driver bo_driver = {
* struct drm_vram_mm
*/
-#if defined(CONFIG_DEBUG_FS)
static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1035,27 +1121,18 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
-#endif
/**
* drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
*
* @minor: drm minor device.
*
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
*/
-int drm_vram_mm_debugfs_init(struct drm_minor *minor)
+void drm_vram_mm_debugfs_init(struct drm_minor *minor)
{
- int ret = 0;
-
-#if defined(CONFIG_DEBUG_FS)
- ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
- ARRAY_SIZE(drm_vram_mm_debugfs_list),
- minor->debugfs_root, minor);
-#endif
- return ret;
+ drm_debugfs_create_files(drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list),
+ minor->debugfs_root, minor);
}
EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
@@ -1202,3 +1279,6 @@ drm_vram_helper_mode_valid(struct drm_device *dev,
return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
}
EXPORT_SYMBOL(drm_vram_helper_mode_valid);
+
+MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 5714a78365ac..2470a352730b 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -89,9 +89,11 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
+/* drm_managed.c */
+void drm_managed_release(struct drm_device *dev);
+
/* drm_vblank.c */
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
-void drm_vblank_cleanup(struct drm_device *dev);
/* IOCTLS */
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
@@ -141,7 +143,6 @@ void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */
struct drm_gem_object;
int drm_gem_init(struct drm_device *dev);
-void drm_gem_destroy(struct drm_device *dev);
int drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -235,4 +236,4 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
/* drm_framebuffer.c */
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
-int drm_framebuffer_debugfs_init(struct drm_minor *minor);
+void drm_framebuffer_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9e41972c4bbc..328502aafaf7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -599,8 +599,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -741,7 +741,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
* };
*
* Please make sure that you follow all the best practices from
- * ``Documentation/ioctl/botching-up-ioctls.rst``. Note that drm_ioctl()
+ * ``Documentation/process/botching-up-ioctls.rst``. Note that drm_ioctl()
* automatically zero-extends structures, hence make sure you can add more stuff
* at the end, i.e. don't put a variable sized array there.
*
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
new file mode 100644
index 000000000000..9cebfe370a65
--- /dev/null
+++ b/drivers/gpu/drm/drm_managed.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel
+ *
+ * Based on drivers/base/devres.c
+ */
+
+#include <drm/drm_managed.h>
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: managed resources
+ *
+ * Inspired by struct &device managed resources, but tied to the lifetime of
+ * struct &drm_device, which can outlive the underlying physical device, usually
+ * when userspace has some open files and other handles to resources still open.
+ *
+ * Release actions can be added with drmm_add_action(), memory allocations can
+ * be done directly with drmm_kmalloc() and the related functions. Everything
+ * will be released on the final drm_dev_put() in reverse order of how the
+ * release actions have been added and memory has been allocated since driver
+ * loading started with drm_dev_init().
+ *
+ * Note that release actions and managed memory can also be added and removed
+ * during the lifetime of the driver, all the functions are fully concurrent
+ * safe. But it is recommended to use managed resources only for resources that
+ * change rarely, if ever, during the lifetime of the &drm_device instance.
+ */
+
+struct drmres_node {
+ struct list_head entry;
+ drmres_release_t release;
+ const char *name;
+ size_t size;
+};
+
+struct drmres {
+ struct drmres_node node;
+ /*
+ * Some archs want to perform DMA into kmalloc caches
+ * and need a guaranteed alignment larger than
+ * the alignment of a 64-bit integer.
+ * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+ * buffer alignment as if it was allocated by plain kmalloc().
+ */
+ u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
+};
+
+static void free_dr(struct drmres *dr)
+{
+ kfree_const(dr->node.name);
+ kfree(dr);
+}
+
+void drm_managed_release(struct drm_device *dev)
+{
+ struct drmres *dr, *tmp;
+
+ drm_dbg_drmres(dev, "drmres release begin\n");
+ list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
+ drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
+ dr, dr->node.name, dr->node.size);
+
+ if (dr->node.release)
+ dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
+
+ list_del(&dr->node.entry);
+ free_dr(dr);
+ }
+ drm_dbg_drmres(dev, "drmres release end\n");
+}
+
+/*
+ * Always inline so that kmalloc_track_caller tracks the actual interesting
+ * caller outside of drm_managed.c.
+ */
+static __always_inline struct drmres * alloc_dr(drmres_release_t release,
+ size_t size, gfp_t gfp, int nid)
+{
+ size_t tot_size;
+ struct drmres *dr;
+
+ /* We must catch any near-SIZE_MAX cases that could overflow. */
+ if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
+ return NULL;
+
+ dr = kmalloc_node_track_caller(tot_size, gfp, nid);
+ if (unlikely(!dr))
+ return NULL;
+
+ memset(dr, 0, offsetof(struct drmres, data));
+
+ INIT_LIST_HEAD(&dr->node.entry);
+ dr->node.release = release;
+ dr->node.size = size;
+
+ return dr;
+}
+
+static void del_dr(struct drm_device *dev, struct drmres *dr)
+{
+ list_del_init(&dr->node.entry);
+
+ drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
+ dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+static void add_dr(struct drm_device *dev, struct drmres *dr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->managed.lock, flags);
+ list_add(&dr->node.entry, &dev->managed.resources);
+ spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+ drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
+ dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+/**
+ * drmm_add_final_kfree - add release action for the final kfree()
+ * @dev: DRM device
+ * @container: pointer to the kmalloc allocation containing @dev
+ *
+ * Since the allocation containing the struct &drm_device must be allocated
+ * before it can be initialized with drm_dev_init() there's no way to allocate
+ * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the
+ * pointer for this final kfree() must be specified by calling this function. It
+ * will be released in the final drm_dev_put() for @dev, after all other release
+ * actions installed through drmm_add_action() have been processed.
+ */
+void drmm_add_final_kfree(struct drm_device *dev, void *container)
+{
+ WARN_ON(dev->managed.final_kfree);
+ WARN_ON(dev < (struct drm_device *) container);
+ WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
+ dev->managed.final_kfree = container;
+}
+EXPORT_SYMBOL(drmm_add_final_kfree);
+
+int __drmm_add_action(struct drm_device *dev,
+ drmres_release_t action,
+ void *data, const char *name)
+{
+ struct drmres *dr;
+ void **void_ptr;
+
+ dr = alloc_dr(action, data ? sizeof(void*) : 0,
+ GFP_KERNEL | __GFP_ZERO,
+ dev_to_node(dev->dev));
+ if (!dr) {
+ drm_dbg_drmres(dev, "failed to add action %s for %p\n",
+ name, data);
+ return -ENOMEM;
+ }
+
+ dr->node.name = kstrdup_const(name, GFP_KERNEL);
+ if (data) {
+ void_ptr = (void **)&dr->data;
+ *void_ptr = data;
+ }
+
+ add_dr(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL(__drmm_add_action);
+
+int __drmm_add_action_or_reset(struct drm_device *dev,
+ drmres_release_t action,
+ void *data, const char *name)
+{
+ int ret;
+
+ ret = __drmm_add_action(dev, action, data, name);
+ if (ret)
+ action(dev, data);
+
+ return ret;
+}
+EXPORT_SYMBOL(__drmm_add_action_or_reset);
+
+/**
+ * drmm_kmalloc - &drm_device managed kmalloc()
+ * @dev: DRM device
+ * @size: size of the memory allocation
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kmalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put(). Memory can also be freed
+ * before the final drm_dev_put() by calling drmm_kfree().
+ */
+void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
+{
+ struct drmres *dr;
+
+ dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
+ if (!dr) {
+ drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
+ size, gfp);
+ return NULL;
+ }
+ dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
+
+ add_dr(dev, dr);
+
+ return dr->data;
+}
+EXPORT_SYMBOL(drmm_kmalloc);
+
+/**
+ * drmm_kstrdup - &drm_device managed kstrdup()
+ * @dev: DRM device
+ * @s: 0-terminated string to be duplicated
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kstrdup(). The allocated memory is
+ * automatically freed on the final drm_dev_put() and works exactly like a
+ * memory allocation obtained by drmm_kmalloc().
+ */
+char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
+{
+ size_t size;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ size = strlen(s) + 1;
+ buf = drmm_kmalloc(dev, size, gfp);
+ if (buf)
+ memcpy(buf, s, size);
+ return buf;
+}
+EXPORT_SYMBOL_GPL(drmm_kstrdup);
+
+/**
+ * drmm_kfree - &drm_device managed kfree()
+ * @dev: DRM device
+ * @data: memory allocation to be freed
+ *
+ * This is a &drm_device managed version of kfree() which can be used to
+ * release memory allocated through drmm_kmalloc() or any of its related
+ * functions before the final drm_dev_put() of @dev.
+ */
+void drmm_kfree(struct drm_device *dev, void *data)
+{
+ struct drmres *dr_match = NULL, *dr;
+ unsigned long flags;
+
+ if (!data)
+ return;
+
+ spin_lock_irqsave(&dev->managed.lock, flags);
+ list_for_each_entry(dr, &dev->managed.resources, node.entry) {
+ if (dr->data == data) {
+ dr_match = dr;
+ del_dr(dev, dr_match);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+ if (WARN_ON(!dr_match))
+ return;
+
+ free_dr(dr_match);
+}
+EXPORT_SYMBOL(drmm_kfree);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 558baf989f5a..bb27c82757f1 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -169,7 +169,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
EXPORT_SYMBOL(mipi_dbi_command_buf);
/* This should only be used by mipi_dbi_command() */
-int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
+ size_t len)
{
u8 *buf;
int ret;
@@ -510,6 +511,10 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
if (!dbidev->dbi.command)
return -EINVAL;
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL);
if (!dbidev->tx_buf)
return -ENOMEM;
@@ -579,26 +584,6 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
EXPORT_SYMBOL(mipi_dbi_dev_init);
/**
- * mipi_dbi_release - DRM driver release helper
- * @drm: DRM device
- *
- * This function finalizes and frees &mipi_dbi.
- *
- * Drivers can use this as their &drm_driver->release callback.
- */
-void mipi_dbi_release(struct drm_device *drm)
-{
- struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(drm);
-
- DRM_DEBUG_DRIVER("\n");
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(dbidev);
-}
-EXPORT_SYMBOL(mipi_dbi_release);
-
-/**
* mipi_dbi_hw_reset - Hardware reset of controller
* @dbi: MIPI DBI structure
*
@@ -1308,10 +1293,8 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
* controller or getting the read command values.
* Drivers can use this as their &drm_driver->debugfs_init callback.
*
- * Returns:
- * Zero on success, negative error code on failure.
*/
-int mipi_dbi_debugfs_init(struct drm_minor *minor)
+void mipi_dbi_debugfs_init(struct drm_minor *minor)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev);
umode_t mode = S_IFREG | S_IWUSR;
@@ -1320,8 +1303,6 @@ int mipi_dbi_debugfs_init(struct drm_minor *minor)
mode |= S_IRUGO;
debugfs_create_file("command", mode, minor->debugfs_root, dbidev,
&mipi_dbi_debugfs_command_fops);
-
- return 0;
}
EXPORT_SYMBOL(mipi_dbi_debugfs_init);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 8981abe8b7c9..f4ca1ff80af9 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -212,20 +212,6 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
&drm_mm_interval_tree_augment);
}
-#define RB_INSERT(root, member, expr) do { \
- struct rb_node **link = &root.rb_node, *rb = NULL; \
- u64 x = expr(node); \
- while (*link) { \
- rb = *link; \
- if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
- link = &rb->rb_left; \
- else \
- link = &rb->rb_right; \
- } \
- rb_link_node(&node->member, rb, link); \
- rb_insert_color(&node->member, &root); \
-} while (0)
-
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
@@ -255,16 +241,42 @@ static void insert_hole_size(struct rb_root_cached *root,
rb_insert_color_cached(&node->rb_hole_size, root, first);
}
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct drm_mm_node, rb_hole_addr,
+ u64, subtree_max_hole, HOLE_SIZE)
+
+static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
+{
+ struct rb_node **link = &root->rb_node, *rb_parent = NULL;
+ u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
+ struct drm_mm_node *parent;
+
+ while (*link) {
+ rb_parent = *link;
+ parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
+ if (parent->subtree_max_hole < subtree_max_hole)
+ parent->subtree_max_hole = subtree_max_hole;
+ if (start < HOLE_ADDR(parent))
+ link = &parent->rb_hole_addr.rb_left;
+ else
+ link = &parent->rb_hole_addr.rb_right;
+ }
+
+ rb_link_node(&node->rb_hole_addr, rb_parent, link);
+ rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
+}
+
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
node->hole_size =
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+ node->subtree_max_hole = node->hole_size;
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
insert_hole_size(&mm->holes_size, node);
- RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
+ insert_hole_addr(&mm->holes_addr, node);
list_add(&node->hole_stack, &mm->hole_stack);
}
@@ -275,8 +287,10 @@ static void rm_hole(struct drm_mm_node *node)
list_del(&node->hole_stack);
rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
- rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+ rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
+ &augment_callbacks);
node->hole_size = 0;
+ node->subtree_max_hole = 0;
DRM_MM_BUG_ON(drm_mm_hole_follows(node));
}
@@ -361,9 +375,90 @@ first_hole(struct drm_mm *mm,
}
}
+/**
+ * next_hole_high_addr - returns next hole for a DRM_MM_INSERT_HIGH mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether left subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return previous node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete left subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * previous node of @entry if left subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_high_addr(struct drm_mm_node *entry, u64 size)
+{
+ struct rb_node *rb_node, *left_rb_node, *parent_rb_node;
+ struct drm_mm_node *left_node;
+
+ if (!entry)
+ return NULL;
+
+ rb_node = &entry->rb_hole_addr;
+ if (rb_node->rb_left) {
+ left_rb_node = rb_node->rb_left;
+ parent_rb_node = rb_parent(rb_node);
+ left_node = rb_entry(left_rb_node,
+ struct drm_mm_node, rb_hole_addr);
+ if ((left_node->subtree_max_hole < size ||
+ entry->size == entry->subtree_max_hole) &&
+ parent_rb_node && parent_rb_node->rb_left != rb_node)
+ return rb_hole_addr_to_node(parent_rb_node);
+ }
+
+ return rb_hole_addr_to_node(rb_prev(rb_node));
+}
+
+/**
+ * next_hole_low_addr - returns next hole for a DRM_MM_INSERT_LOW mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether right subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return next node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete right subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * next node of @entry if right subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_low_addr(struct drm_mm_node *entry, u64 size)
+{
+ struct rb_node *rb_node, *right_rb_node, *parent_rb_node;
+ struct drm_mm_node *right_node;
+
+ if (!entry)
+ return NULL;
+
+ rb_node = &entry->rb_hole_addr;
+ if (rb_node->rb_right) {
+ right_rb_node = rb_node->rb_right;
+ parent_rb_node = rb_parent(rb_node);
+ right_node = rb_entry(right_rb_node,
+ struct drm_mm_node, rb_hole_addr);
+ if ((right_node->subtree_max_hole < size ||
+ entry->size == entry->subtree_max_hole) &&
+ parent_rb_node && parent_rb_node->rb_right != rb_node)
+ return rb_hole_addr_to_node(parent_rb_node);
+ }
+
+ return rb_hole_addr_to_node(rb_next(rb_node));
+}
+
static struct drm_mm_node *
next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
+ u64 size,
enum drm_mm_insert_mode mode)
{
switch (mode) {
@@ -372,10 +467,10 @@ next_hole(struct drm_mm *mm,
return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
- return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+ return next_hole_low_addr(node, size);
case DRM_MM_INSERT_HIGH:
- return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+ return next_hole_high_addr(node, size);
case DRM_MM_INSERT_EVICT:
node = list_next_entry(node, hole_stack);
@@ -489,7 +584,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
for (hole = first_hole(mm, range_start, range_end, size, mode);
hole;
- hole = once ? NULL : next_hole(mm, hole, mode)) {
+ hole = once ? NULL : next_hole(mm, hole, size, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 08e6eff6a179..5761f838a057 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -25,6 +25,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
#include <linux/dma-resv.h>
@@ -373,8 +374,14 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return 0;
}
+static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
+{
+ drm_mode_config_cleanup(dev);
+}
+
/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
+ * drmm_mode_config_init - managed DRM mode_configuration structure
+ * initialization
* @dev: DRM device
*
* Initialize @dev's mode_config structure, used for tracking the graphics
@@ -384,8 +391,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
* problem, since this should happen single threaded at init time. It is the
* driver's problem to ensure this guarantee.
*
+ * Cleanup is automatically handled through registering drm_mode_config_cleanup
+ * with drmm_add_action().
+ *
+ * Returns: 0 on success, negative error value on failure.
*/
-void drm_mode_config_init(struct drm_device *dev)
+int drmm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
@@ -443,8 +454,11 @@ void drm_mode_config_init(struct drm_device *dev)
drm_modeset_acquire_fini(&modeset_ctx);
dma_resv_fini(&resv);
}
+
+ return drmm_add_action_or_reset(dev, drm_mode_config_init_release,
+ NULL);
}
-EXPORT_SYMBOL(drm_mode_config_init);
+EXPORT_SYMBOL(drmm_mode_config_init);
/**
* drm_mode_config_cleanup - free up DRM mode_config info
@@ -456,6 +470,9 @@ EXPORT_SYMBOL(drm_mode_config_init);
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
+ *
+ * FIXME: With the managed drmm_mode_config_init() it is no longer necessary for
+ * drivers to explicitly call this function.
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
@@ -532,3 +549,90 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+static u32 full_encoder_mask(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ u32 encoder_mask = 0;
+
+ drm_for_each_encoder(encoder, dev)
+ encoder_mask |= drm_encoder_mask(encoder);
+
+ return encoder_mask;
+}
+
+/*
+ * For some reason we want the encoder itself included in
+ * possible_clones. Make life easy for drivers by allowing them
+ * to leave possible_clones unset if no cloning is possible.
+ */
+static void fixup_encoder_possible_clones(struct drm_encoder *encoder)
+{
+ if (encoder->possible_clones == 0)
+ encoder->possible_clones = drm_encoder_mask(encoder);
+}
+
+static void validate_encoder_possible_clones(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ u32 encoder_mask = full_encoder_mask(dev);
+ struct drm_encoder *other;
+
+ drm_for_each_encoder(other, dev) {
+ WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) !=
+ !!(other->possible_clones & drm_encoder_mask(encoder)),
+ "possible_clones mismatch: "
+ "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. "
+ "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n",
+ encoder->base.id, encoder->name,
+ drm_encoder_mask(encoder), encoder->possible_clones,
+ other->base.id, other->name,
+ drm_encoder_mask(other), other->possible_clones);
+ }
+
+ WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 ||
+ (encoder->possible_clones & ~encoder_mask) != 0,
+ "Bogus possible_clones: "
+ "[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n",
+ encoder->base.id, encoder->name,
+ encoder->possible_clones, encoder_mask);
+}
+
+static u32 full_crtc_mask(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ u32 crtc_mask = 0;
+
+ drm_for_each_crtc(crtc, dev)
+ crtc_mask |= drm_crtc_mask(crtc);
+
+ return crtc_mask;
+}
+
+static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
+{
+ u32 crtc_mask = full_crtc_mask(encoder->dev);
+
+ WARN((encoder->possible_crtcs & crtc_mask) == 0 ||
+ (encoder->possible_crtcs & ~crtc_mask) != 0,
+ "Bogus possible_crtcs: "
+ "[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n",
+ encoder->base.id, encoder->name,
+ encoder->possible_crtcs, crtc_mask);
+}
+
+void drm_mode_config_validate(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ drm_for_each_encoder(encoder, dev)
+ fixup_encoder_possible_clones(encoder);
+
+ drm_for_each_encoder(encoder, dev) {
+ validate_encoder_possible_clones(encoder);
+ validate_encoder_possible_crtcs(encoder);
+ }
+}
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 35c2719407a8..901b078abf40 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -402,12 +402,13 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
{
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
+ struct drm_modeset_acquire_ctx ctx;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
- drm_modeset_lock_all(dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!obj) {
@@ -427,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
out_unref:
drm_mode_object_put(obj);
out:
- drm_modeset_unlock_all(dev);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
@@ -449,12 +450,13 @@ static int set_property_legacy(struct drm_mode_object *obj,
{
struct drm_device *dev = prop->dev;
struct drm_mode_object *ref;
+ struct drm_modeset_acquire_ctx ctx;
int ret = -EINVAL;
if (!drm_property_change_valid_get(prop, prop_value, &ref))
return -EINVAL;
- drm_modeset_lock_all(dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
ret = drm_connector_set_obj_prop(obj, prop, prop_value);
@@ -468,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
break;
}
drm_property_change_valid_put(prop, ref);
- drm_modeset_unlock_all(dev);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d4d64518e11b..fec1c33b3045 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -748,32 +748,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_set_name);
/**
- * drm_mode_hsync - get the hsync of a mode
- * @mode: mode
- *
- * Returns:
- * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
- * value first if it is not yet set.
- */
-int drm_mode_hsync(const struct drm_display_mode *mode)
-{
- unsigned int calc_val;
-
- if (mode->hsync)
- return mode->hsync;
-
- if (mode->htotal <= 0)
- return 0;
-
- calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
- calc_val += 500; /* round to 1000Hz */
- calc_val /= 1000; /* truncate to kHz */
-
- return calc_val;
-}
-EXPORT_SYMBOL(drm_mode_hsync);
-
-/**
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 81aa21561982..75e2b7053f35 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -30,12 +30,13 @@
#include <drm/drm.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
+#ifdef CONFIG_DRM_LEGACY
+
/**
* drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
* @dev: DRM device
@@ -93,6 +94,7 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
}
EXPORT_SYMBOL(drm_pci_free);
+#endif
static int drm_get_pci_domain(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index d6ad60ab0d38..4af173ced327 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -289,6 +289,8 @@ EXPORT_SYMBOL(drm_universal_plane_init);
int drm_plane_register_all(struct drm_device *dev)
{
+ unsigned int num_planes = 0;
+ unsigned int num_zpos = 0;
struct drm_plane *plane;
int ret = 0;
@@ -297,8 +299,15 @@ int drm_plane_register_all(struct drm_device *dev)
ret = plane->funcs->late_register(plane);
if (ret)
return ret;
+
+ if (plane->zpos_property)
+ num_zpos++;
+ num_planes++;
}
+ drm_WARN(dev, num_zpos && num_planes != num_zpos,
+ "Mixing planes with and without zpos property is invalid\n");
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index ca520028b2cb..f4e6184d1877 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -43,15 +43,6 @@
#define DEBUG_SCATTER 0
-static inline void *drm_vmalloc_dma(unsigned long size)
-{
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
-#else
- return vmalloc_32(size);
-#endif
-}
-
static void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
@@ -126,7 +117,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
return -ENOMEM;
}
- entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+ entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
if (!entry->virtual) {
kfree(entry->busaddr);
kfree(entry->pagelist);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index da7b0b0c1090..2d5ce690d214 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -40,6 +41,69 @@
/**
* DOC: vblank handling
*
+ * From the computer's perspective, every time the monitor displays
+ * a new frame the scanout engine has "scanned out" the display image
+ * from top to bottom, one row of pixels at a time. The current row
+ * of pixels is referred to as the current scanline.
+ *
+ * In addition to the display's visible area, there's usually a couple of
+ * extra scanlines which aren't actually displayed on the screen.
+ * These extra scanlines don't contain image data and are occasionally used
+ * for features like audio and infoframes. The region made up of these
+ * scanlines is referred to as the vertical blanking region, or vblank for
+ * short.
+ *
+ * For historical reference, the vertical blanking period was designed to
+ * give the electron gun (on CRTs) enough time to move back to the top of
+ * the screen to start scanning out the next frame. Similar for horizontal
+ * blanking periods. They were designed to give the electron gun enough
+ * time to move back to the other side of the screen to start scanning the
+ * next scanline.
+ *
+ * ::
+ *
+ *
+ * physical → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ * top of | |
+ * display | |
+ * | New frame |
+ * | |
+ * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓|
+ * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline,
+ * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the
+ * | | frame as it
+ * | | travels down
+ * | | ("sacn out")
+ * | Old frame |
+ * | |
+ * | |
+ * | |
+ * | | physical
+ * | | bottom of
+ * vertical |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display
+ * blanking ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ * region → ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ * ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ * start of → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ * new frame
+ *
+ * "Physical top of display" is the reference point for the high-precision/
+ * corrected timestamp.
+ *
+ * On a lot of display hardware, programming needs to take effect during the
+ * vertical blanking period so that settings like gamma, the image buffer
+ * buffer to be scanned out, etc. can safely be changed without showing
+ * any visual artifacts on the screen. In some unforgiving hardware, some of
+ * this programming has to both start and end in the same vblank. To help
+ * with the timing of the hardware programming, an interrupt is usually
+ * available to notify the driver when it can start the updating of registers.
+ * The interrupt is in this context named the vblank interrupt.
+ *
+ * The vblank interrupt may be fired at different points depending on the
+ * hardware. Some hardware implementations will fire the interrupt when the
+ * new frame start, other implementations will fire the interrupt at different
+ * points in time.
+ *
* Vertical blanking plays a major role in graphics rendering. To achieve
* tear-free display, users must synchronize page flips and/or rendering to
* vertical blanking. The DRM API offers ioctls to perform page flips
@@ -278,8 +342,8 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%llu, diff=%u, hw=%u hw_last=%u\n",
- pipe, atomic64_read(&vblank->count), diff,
- cur_vblank, vblank->last);
+ pipe, (unsigned long long)atomic64_read(&vblank->count),
+ diff, cur_vblank, vblank->last);
if (diff == 0) {
WARN_ON_ONCE(cur_vblank != vblank->last);
@@ -425,14 +489,10 @@ static void vblank_disable_fn(struct timer_list *t)
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-void drm_vblank_cleanup(struct drm_device *dev)
+static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
{
unsigned int pipe;
- /* Bail if the driver didn't call drm_vblank_init() */
- if (dev->num_crtcs == 0)
- return;
-
for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -441,10 +501,6 @@ void drm_vblank_cleanup(struct drm_device *dev)
del_timer_sync(&vblank->disable_timer);
}
-
- kfree(dev->vblank);
-
- dev->num_crtcs = 0;
}
/**
@@ -453,25 +509,29 @@ void drm_vblank_cleanup(struct drm_device *dev)
* @num_crtcs: number of CRTCs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
- * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for
- * drivers with a &drm_driver.release callback.
+ * Cleanup is handled automatically through a cleanup function added with
+ * drmm_add_action().
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
{
- int ret = -ENOMEM;
+ int ret;
unsigned int i;
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
+ dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+ if (!dev->vblank)
+ return -ENOMEM;
+
dev->num_crtcs = num_crtcs;
- dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
- if (!dev->vblank)
- goto err;
+ ret = drmm_add_action(dev, drm_vblank_init_release, NULL);
+ if (ret)
+ return ret;
for (i = 0; i < num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
@@ -486,10 +546,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
return 0;
-
-err:
- dev->num_crtcs = 0;
- return ret;
}
EXPORT_SYMBOL(drm_vblank_init);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index aa88911bbc06..56197ae0b2f9 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -595,8 +595,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_ops;
break;
}
+ fallthrough; /* to _DRM_FRAME_BUFFER... */
#endif
- /* fall through - to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
@@ -621,7 +621,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
- /* fall through - to _DRM_SHM */
+ fallthrough; /* to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
deleted file mode 100644
index 2000d9b33fd5..000000000000
--- a/drivers/gpu/drm/drm_vram_helper_common.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <linux/module.h>
-
-/**
- * DOC: overview
- *
- * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
- * buffer object that is backed by video RAM. It can be used for
- * framebuffer devices with dedicated memory. The video RAM is managed
- * by &struct drm_vram_mm (VRAM MM).
- *
- * With the GEM interface userspace applications create, manage and destroy
- * graphics buffers, such as an on-screen framebuffer. GEM does not provide
- * an implementation of these interfaces. It's up to the DRM driver to
- * provide an implementation that suits the hardware. If the hardware device
- * contains dedicated video memory, the DRM driver can use the VRAM helper
- * library. Each active buffer object is stored in video RAM. Active
- * buffer are used for drawing the current frame, typically something like
- * the frame's scanout buffer or the cursor image. If there's no more space
- * left in VRAM, inactive GEM objects can be moved to system memory.
- *
- * The easiest way to use the VRAM helper library is to call
- * drm_vram_helper_alloc_mm(). The function allocates and initializes an
- * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
- * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
- * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
- * as illustrated below.
- *
- * .. code-block:: c
- *
- * struct file_operations fops ={
- * .owner = THIS_MODULE,
- * DRM_VRAM_MM_FILE_OPERATION
- * };
- * struct drm_driver drv = {
- * .driver_feature = DRM_ ... ,
- * .fops = &fops,
- * DRM_GEM_VRAM_DRIVER
- * };
- *
- * int init_drm_driver()
- * {
- * struct drm_device *dev;
- * uint64_t vram_base;
- * unsigned long vram_size;
- * int ret;
- *
- * // setup device, vram base and size
- * // ...
- *
- * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
- * if (ret)
- * return ret;
- * return 0;
- * }
- *
- * This creates an instance of &struct drm_vram_mm, exports DRM userspace
- * interfaces for GEM buffer management and initializes file operations to
- * allow for accessing created GEM buffers. With this setup, the DRM driver
- * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
- * to userspace.
- *
- * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
- * in the driver's clean-up code.
- *
- * .. code-block:: c
- *
- * void fini_drm_driver()
- * {
- * struct drm_device *dev = ...;
- *
- * drm_vram_helper_release_mm(dev);
- * }
- *
- * For drawing or scanout operations, buffer object have to be pinned in video
- * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
- * A buffer object that is pinned in video RAM has a fixed address within that
- * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
- * it's used to program the hardware's scanout engine for framebuffers, set
- * the cursor overlay's image for a mouse cursor, or use it as input to the
- * hardware's draing engine.
- *
- * To access a buffer object's memory from the DRM driver, call
- * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
- * space and returns the memory address. Use drm_gem_vram_kunmap() to
- * release the mapping.
- */
-
-MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index a8685b2e1803..f9afe11c50f0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -231,21 +231,11 @@ static struct drm_info_list etnaviv_debugfs_list[] = {
{"ring", show_each_gpu, 0, etnaviv_ring_show},
};
-static int etnaviv_debugfs_init(struct drm_minor *minor)
+static void etnaviv_debugfs_init(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(etnaviv_debugfs_list,
- ARRAY_SIZE(etnaviv_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
- return ret;
- }
-
- return ret;
+ drm_debugfs_create_files(etnaviv_debugfs_list,
+ ARRAY_SIZE(etnaviv_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
@@ -736,7 +726,7 @@ static void __exit etnaviv_exit(void)
module_exit(etnaviv_exit);
MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
-MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
MODULE_DESCRIPTION("etnaviv DRM Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 648cf0207309..706af0304ca4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -154,8 +154,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
file_size += sizeof(*iter.hdr) * n_obj;
/* Allocate the file in vmalloc memory, it's likely to be big */
- iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
- PAGE_KERNEL);
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NORETRY);
if (!iter.start) {
mutex_unlock(&gpu->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3b0afa156d92..54def341c1db 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -238,8 +238,10 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
}
if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
- submit->bos[i].va != mapping->iova)
+ submit->bos[i].va != mapping->iova) {
+ etnaviv_gem_mapping_unreference(mapping);
return -EINVAL;
+ }
atomic_inc(&etnaviv_obj->gpu_active);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index e6795bafcbb9..75f9db8f7bec 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -453,7 +453,7 @@ static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
if (!(gpu->identity.features & meta->feature))
continue;
- if (meta->nr_domains < (index - offset)) {
+ if (index - offset >= meta->nr_domains) {
offset += meta->nr_domains;
continue;
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 5ee090691390..9ac51b6ab34b 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -25,6 +25,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
@@ -135,10 +136,6 @@ static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
.disable = exynos_dp_nop,
};
-static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
@@ -167,8 +164,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 43fa0f26c052..7ba5354e7d94 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -14,6 +14,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
@@ -149,10 +150,6 @@ static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
.disable = exynos_dpi_disable,
};
-static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
enum {
FIMD_PORT_IN0,
FIMD_PORT_IN1,
@@ -201,8 +198,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
{
int ret;
- drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 57defeb44522..dbd80f1e4c78 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -76,7 +76,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
}
static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
- .fault = exynos_drm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e080aa92338c..ee96a95fb6be 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -30,6 +30,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
@@ -211,7 +212,7 @@
#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-static char *clk_names[5] = { "bus_clk", "sclk_mipi",
+static const char *const clk_names[5] = { "bus_clk", "sclk_mipi",
"phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
"sclk_rgb_vclk_to_dsim0" };
@@ -1523,10 +1524,6 @@ static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
.disable = exynos_dsi_disable,
};
-static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
@@ -1704,8 +1701,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
struct drm_bridge *in_bridge;
int ret;
- drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
@@ -1763,10 +1759,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->dev = dev;
dsi->driver_data = of_device_get_match_data(dev);
- ret = exynos_dsi_parse_dt(dsi);
- if (ret)
- return ret;
-
dsi->supplies[0].supply = "vddcore";
dsi->supplies[1].supply = "vddio";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
@@ -1813,10 +1805,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
}
dsi->irq = platform_get_irq(pdev, 0);
- if (dsi->irq < 0) {
- dev_err(dev, "failed to request dsi irq resource\n");
+ if (dsi->irq < 0)
return dsi->irq;
- }
irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
@@ -1827,11 +1817,25 @@ static int exynos_dsi_probe(struct platform_device *pdev)
return ret;
}
+ ret = exynos_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, &dsi->encoder);
pm_runtime_enable(dev);
- return component_add(dev, &exynos_dsi_component_ops);
+ ret = component_add(dev, &exynos_dsi_component_ops);
+ if (ret)
+ goto err_disable_runtime;
+
+ return 0;
+
+err_disable_runtime:
+ pm_runtime_disable(dev);
+ of_node_put(dsi->in_bridge_node);
+
+ return ret;
}
static int exynos_dsi_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e6ceaf36fb04..56a2b47e1af7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -76,7 +76,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct fb_info *fbi;
struct drm_framebuffer *fb = helper->fb;
unsigned int size = fb->width * fb->height * fb->format->cpp[0];
- unsigned int nr_pages;
unsigned long offset;
fbi = drm_fb_helper_alloc_fbi(helper);
@@ -90,16 +89,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(fbi, helper, sizes);
- nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
- exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
- if (!exynos_gem->kvaddr) {
- DRM_DEV_ERROR(to_dma_dev(helper->dev),
- "failed to map pages to kernel space.\n");
- return -EIO;
- }
-
offset = fbi->var.xoffset * fb->format->cpp[0];
offset += fbi->var.yoffset * fb->pitches[0];
@@ -133,18 +122,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
- exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
- /*
- * If physically contiguous memory allocation fails and if IOMMU is
- * supported then try to get buffer from non physically contiguous
- * memory area.
- */
- if (IS_ERR(exynos_gem) && is_drm_iommu_supported(dev)) {
- dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
- exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
- size);
- }
-
+ exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_WC, size, true);
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
@@ -229,12 +207,8 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
- struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
- struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
struct drm_framebuffer *fb;
- vunmap(exynos_gem->kvaddr);
-
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d734d9d51762..0df57ee34144 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -17,28 +17,23 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
{
struct drm_device *dev = exynos_gem->base.dev;
- unsigned long attr;
- unsigned int nr_pages;
- struct sg_table sgt;
- int ret = -ENOMEM;
+ unsigned long attr = 0;
if (exynos_gem->dma_addr) {
DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
return 0;
}
- exynos_gem->dma_attrs = 0;
-
/*
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
* region will be allocated else physically contiguous
* as possible.
*/
if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
- exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
+ attr |= DMA_ATTR_FORCE_CONTIGUOUS;
/*
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
@@ -46,61 +41,29 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
*/
if (exynos_gem->flags & EXYNOS_BO_WC ||
!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
- attr = DMA_ATTR_WRITE_COMBINE;
+ attr |= DMA_ATTR_WRITE_COMBINE;
else
- attr = DMA_ATTR_NON_CONSISTENT;
-
- exynos_gem->dma_attrs |= attr;
- exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+ attr |= DMA_ATTR_NON_CONSISTENT;
- nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
- exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
- if (!exynos_gem->pages) {
- DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
- return -ENOMEM;
- }
+ /* FBDev emulation requires kernel mapping */
+ if (!kvmap)
+ attr |= DMA_ATTR_NO_KERNEL_MAPPING;
+ exynos_gem->dma_attrs = attr;
exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
&exynos_gem->dma_addr, GFP_KERNEL,
exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
- goto err_free;
- }
-
- ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
- exynos_gem->dma_addr, exynos_gem->size,
- exynos_gem->dma_attrs);
- if (ret < 0) {
- DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
- goto err_dma_free;
- }
-
- if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
- nr_pages)) {
- DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
- ret = -EINVAL;
- goto err_sgt_free;
+ return -ENOMEM;
}
- sg_free_table(&sgt);
+ if (kvmap)
+ exynos_gem->kvaddr = exynos_gem->cookie;
DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
-
return 0;
-
-err_sgt_free:
- sg_free_table(&sgt);
-err_dma_free:
- dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
- exynos_gem->dma_addr, exynos_gem->dma_attrs);
-err_free:
- kvfree(exynos_gem->pages);
-
- return ret;
}
static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
@@ -118,8 +81,6 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
(dma_addr_t)exynos_gem->dma_addr,
exynos_gem->dma_attrs);
-
- kvfree(exynos_gem->pages);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -203,7 +164,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
- unsigned long size)
+ unsigned long size,
+ bool kvmap)
{
struct exynos_drm_gem *exynos_gem;
int ret;
@@ -237,7 +199,7 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem->flags = flags;
- ret = exynos_drm_alloc_buf(exynos_gem);
+ ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
if (ret < 0) {
drm_gem_object_release(&exynos_gem->base);
kfree(exynos_gem);
@@ -254,7 +216,7 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
struct exynos_drm_gem *exynos_gem;
int ret;
- exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
+ exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
@@ -365,7 +327,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
else
flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
- exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
+ exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
if (IS_ERR(exynos_gem)) {
dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem);
@@ -381,26 +343,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
return 0;
}
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
- unsigned long pfn;
- pgoff_t page_offset;
-
- page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-
- if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
- DRM_ERROR("invalid page offset\n");
- return VM_FAULT_SIGBUS;
- }
-
- pfn = page_to_pfn(exynos_gem->pages[page_offset]);
- return vmf_insert_mixed(vma, vmf->address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
-}
-
static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
@@ -462,11 +404,24 @@ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
- int npages;
+ struct drm_device *drm_dev = obj->dev;
+ struct sg_table *sgt;
+ int ret;
- npages = exynos_gem->size >> PAGE_SHIFT;
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
- return drm_prime_pages_to_sg(exynos_gem->pages, npages);
+ ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
+ exynos_gem->dma_addr, exynos_gem->size,
+ exynos_gem->dma_attrs);
+ if (ret) {
+ DRM_ERROR("failed to get sgtable, %d\n", ret);
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
}
struct drm_gem_object *
@@ -475,52 +430,47 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct exynos_drm_gem *exynos_gem;
- int npages;
- int ret;
-
- exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
- if (IS_ERR(exynos_gem)) {
- ret = PTR_ERR(exynos_gem);
- return ERR_PTR(ret);
- }
- exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+ if (sgt->nents < 1)
+ return ERR_PTR(-EINVAL);
- npages = exynos_gem->size >> PAGE_SHIFT;
- exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!exynos_gem->pages) {
- ret = -ENOMEM;
- goto err;
+ /*
+ * Check if the provided buffer has been mapped as contiguous
+ * into DMA address space.
+ */
+ if (sgt->nents > 1) {
+ dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (!sg_dma_len(s))
+ break;
+ if (sg_dma_address(s) != next_addr) {
+ DRM_ERROR("buffer chunks must be mapped contiguously");
+ return ERR_PTR(-EINVAL);
+ }
+ next_addr = sg_dma_address(s) + sg_dma_len(s);
+ }
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
- npages);
- if (ret < 0)
- goto err_free_large;
-
- exynos_gem->sgt = sgt;
+ exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
+ if (IS_ERR(exynos_gem))
+ return ERR_CAST(exynos_gem);
- if (sgt->nents == 1) {
- /* always physically continuous memory if sgt->nents is 1. */
- exynos_gem->flags |= EXYNOS_BO_CONTIG;
- } else {
- /*
- * this case could be CONTIG or NONCONTIG type but for now
- * sets NONCONTIG.
- * TODO. we have to find a way that exporter can notify
- * the type of its own buffer to importer.
- */
+ /*
+ * Buffer has been mapped as contiguous into DMA address space,
+ * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
+ * We assume a simplified logic below:
+ */
+ if (is_drm_iommu_supported(dev))
exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
- }
+ else
+ exynos_gem->flags |= EXYNOS_BO_CONTIG;
+ exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+ exynos_gem->sgt = sgt;
return &exynos_gem->base;
-
-err_free_large:
- kvfree(exynos_gem->pages);
-err:
- drm_gem_object_release(&exynos_gem->base);
- kfree(exynos_gem);
- return ERR_PTR(ret);
}
void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 42ec67bc262d..6ef001f890aa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -21,20 +21,15 @@
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
- * @buffer: a pointer to exynos_drm_gem_buffer object.
- * - contain the information to memory region allocated
- * by user request or at framebuffer creation.
- * continuous memory region allocated by user request
- * or at framebuffer creation.
* @flags: indicate memory type to allocated buffer and cache attruibute.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
* @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
+ * @kvaddr: kernel virtual address to allocated memory region (for fbdev)
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
- * @pages: Array of backing pages.
+ * @dma_attrs: attrs passed dma mapping framework
* @sgt: Imported sg_table.
*
* P.S. this object would be transferred to user as kms_bo.handle so
@@ -48,7 +43,6 @@ struct exynos_drm_gem {
void __iomem *kvaddr;
dma_addr_t dma_addr;
unsigned long dma_attrs;
- struct page **pages;
struct sg_table *sgt;
};
@@ -58,7 +52,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
/* create a new buffer with gem object */
struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
- unsigned long size);
+ unsigned long size,
+ bool kvmap);
/*
* request gem object creation and buffer allocation as the size
@@ -101,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);
-
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index f41d75923557..a86abc173605 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -88,7 +88,7 @@
#define MIC_BS_SIZE_2D(x) ((x) & 0x3fff)
-static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
+static const char *const clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
#define NUM_CLKS ARRAY_SIZE(clk_names)
static DEFINE_MUTEX(mic_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index dafa87b82052..2d94afba031e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -293,10 +293,8 @@ static int rotator_probe(struct platform_device *pdev)
return PTR_ERR(rot->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev),
rot);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 93c43c8d914e..ce1857138f89 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -502,10 +502,8 @@ static int scaler_probe(struct platform_device *pdev)
return PTR_ERR(scaler->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler,
IRQF_ONESHOT, "drm_scaler", scaler);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index b320b3a21ad4..e5662bdcbbde 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
@@ -213,6 +214,12 @@ static ssize_t vidi_store_connection(struct device *dev,
static DEVICE_ATTR(connection, 0644, vidi_show_connection,
vidi_store_connection);
+static struct attribute *vidi_attrs[] = {
+ &dev_attr_connection.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vidi);
+
int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file_priv)
{
@@ -369,10 +376,6 @@ static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs =
.disable = exynos_vidi_disable,
};
-static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -406,8 +409,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
@@ -443,7 +445,6 @@ static int vidi_probe(struct platform_device *pdev)
{
struct vidi_context *ctx;
struct device *dev = &pdev->dev;
- int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -457,23 +458,7 @@ static int vidi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ret = device_create_file(dev, &dev_attr_connection);
- if (ret < 0) {
- DRM_DEV_ERROR(dev,
- "failed to create connection sysfs.\n");
- return ret;
- }
-
- ret = component_add(dev, &vidi_component_ops);
- if (ret)
- goto err_remove_file;
-
- return ret;
-
-err_remove_file:
- device_remove_file(dev, &dev_attr_connection);
-
- return ret;
+ return component_add(dev, &vidi_component_ops);
}
static int vidi_remove(struct platform_device *pdev)
@@ -498,5 +483,6 @@ struct platform_driver vidi_driver = {
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
+ .dev_groups = vidi_groups,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 1a7c828fc41d..95dd399aa9cc 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "regs-hdmi.h"
@@ -1559,10 +1560,6 @@ static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs =
.disable = hdmi_disable,
};
-static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void hdmi_audio_shutdown(struct device *dev, void *data)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
@@ -1843,8 +1840,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdata->phy_clk.enable = hdmiphy_clk_enable;
- drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 21b726baedea..c7e2e2ebc327 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1244,9 +1244,11 @@ static int mixer_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
+ pm_runtime_enable(dev);
+
ret = component_add(&pdev->dev, &mixer_component_ops);
- if (!ret)
- pm_runtime_enable(dev);
+ if (ret)
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index cff344367f81..9b0c4736c21a 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -13,19 +13,11 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
-static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = fsl_dcu_drm_encoder_destroy,
-};
-
int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
struct drm_crtc *crtc)
{
@@ -38,8 +30,8 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
if (fsl_dev->tcon)
fsl_tcon_bypass_enable(fsl_dev->tcon);
- ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(fsl_dev->drm, encoder,
+ DRM_MODE_ENCODER_LVDS);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 29c36d63b20e..88535f5aacc5 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -28,6 +28,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "cdv_device.h"
#include "intel_bios.h"
#include "power.h"
@@ -237,15 +239,6 @@ static const struct drm_connector_helper_funcs
.best_encoder = gma_best_encoder,
};
-static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
- .destroy = cdv_intel_crt_enc_destroy,
-};
-
void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
@@ -271,8 +264,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
encoder = &gma_encoder->base;
- drm_encoder_init(dev, encoder,
- &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
gma_connector_attach_encoder(gma_connector, gma_encoder);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 5772b2dce0d6..f41cbb753bb4 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
#include "psb_drv.h"
@@ -1271,37 +1272,8 @@ cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZ
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
-
-#if 0
-static char *voltage_names[] = {
- "0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char *pre_emph_names[] = {
- "0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char *link_train_names[] = {
- "pattern 1", "pattern 2", "idle", "off"
-};
-#endif
-
#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
-/*
-static uint8_t
-cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
-{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
-}
-*/
+
static void
cdv_intel_get_adjust_train(struct gma_encoder *encoder)
{
@@ -1908,11 +1880,6 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
kfree(connector);
}
-static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
.dpms = cdv_intel_dp_dpms,
.mode_fixup = cdv_intel_dp_mode_fixup,
@@ -1935,11 +1902,6 @@ static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_fun
.best_encoder = gma_best_encoder,
};
-static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
- .destroy = cdv_intel_dp_encoder_destroy,
-};
-
-
static void cdv_intel_dp_add_properties(struct drm_connector *connector)
{
cdv_intel_attach_force_audio_property(connector);
@@ -2016,8 +1978,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
encoder = &gma_encoder->base;
drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
- drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
@@ -2120,7 +2081,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
if (ret == 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- cdv_intel_dp_encoder_destroy(encoder);
+ drm_encoder_cleanup(encoder);
cdv_intel_dp_destroy(connector);
goto err_priv;
} else {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 1711a41acc16..0d12c6ffbc40 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -32,6 +32,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
#include "psb_drv.h"
@@ -311,8 +312,7 @@ void cdv_hdmi_init(struct drm_device *dev,
&cdv_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
- drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index ea0a5d9a0acc..eaaf4efec217 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -12,6 +12,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "cdv_device.h"
#include "intel_bios.h"
#include "power.h"
@@ -72,89 +74,6 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
return retval;
}
-#if 0
-/*
- * Set LVDS backlight level by I2C command
- */
-static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
- unsigned int level)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
- u8 out_buf[2];
- unsigned int blc_i2c_brightness;
-
- struct i2c_msg msgs[] = {
- {
- .addr = lvds_i2c_bus->slave_addr,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- }
- };
-
- blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
- BRIGHTNESS_MASK /
- BRIGHTNESS_MAX_LEVEL);
-
- if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
- blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
-
- out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
- out_buf[1] = (u8)blc_i2c_brightness;
-
- if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
- return 0;
-
- DRM_ERROR("I2C transfer error\n");
- return -1;
-}
-
-
-static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- u32 max_pwm_blc;
- u32 blc_pwm_duty_cycle;
-
- max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
-
- /*BLC_PWM_CTL Should be initiated while backlight device init*/
- BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
-
- blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
-
- if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
- blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
-
- blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
- REG_WRITE(BLC_PWM_CTL,
- (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
- (blc_pwm_duty_cycle));
-
- return 0;
-}
-
-/*
- * Set LVDS backlight level either by I2C or PWM
- */
-void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->lvds_bl) {
- DRM_ERROR("NO LVDS Backlight Info\n");
- return;
- }
-
- if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
- cdv_lvds_i2c_set_brightness(dev, level);
- else
- cdv_lvds_pwm_set_brightness(dev, level);
-}
-#endif
-
/**
* Sets the backlight level.
*
@@ -499,16 +418,6 @@ static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
.destroy = cdv_intel_lvds_destroy,
};
-
-static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
- .destroy = cdv_intel_lvds_enc_destroy,
-};
-
/*
* Enumerate the child dev array parsed from VBT to check whether
* the LVDS is present.
@@ -616,10 +525,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
&cdv_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, encoder,
- &cdv_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
-
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1d8f67e4795a..23a78d755382 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -577,31 +577,31 @@ static void psb_setup_outputs(struct drm_device *dev)
break;
case INTEL_OUTPUT_SDVO:
crtc_mask = dev_priv->ops->sdvo_mask;
- clone_mask = (1 << INTEL_OUTPUT_SDVO);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_LVDS:
- crtc_mask = dev_priv->ops->lvds_mask;
- clone_mask = (1 << INTEL_OUTPUT_LVDS);
+ crtc_mask = dev_priv->ops->lvds_mask;
+ clone_mask = 0;
break;
case INTEL_OUTPUT_MIPI:
crtc_mask = (1 << 0);
- clone_mask = (1 << INTEL_OUTPUT_MIPI);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_MIPI2:
crtc_mask = (1 << 2);
- clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_HDMI:
- crtc_mask = dev_priv->ops->hdmi_mask;
+ crtc_mask = dev_priv->ops->hdmi_mask;
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
case INTEL_OUTPUT_DISPLAYPORT:
crtc_mask = (1 << 0) | (1 << 1);
- clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_EDP:
crtc_mask = (1 << 1);
- clone_mask = (1 << INTEL_OUTPUT_EDP);
+ clone_mask = 0;
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d4c65f268922..c976a9dd9240 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -27,6 +27,8 @@
#include <linux/delay.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "mdfld_dsi_dpi.h"
#include "mdfld_dsi_pkg_sender.h"
#include "mdfld_output.h"
@@ -993,10 +995,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/*create drm encoder object*/
connector = &dsi_connector->base.base;
encoder = &dpi_output->base.base.base;
- drm_encoder_init(dev,
- encoder,
- p_funcs->encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(encoder,
p_funcs->encoder_helper_funcs);
@@ -1006,10 +1005,10 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/*set possible crtcs and clones*/
if (dsi_connector->pipe) {
encoder->possible_crtcs = (1 << 2);
- encoder->possible_clones = (1 << 1);
+ encoder->possible_clones = 0;
} else {
encoder->possible_crtcs = (1 << 0);
- encoder->possible_clones = (1 << 0);
+ encoder->possible_clones = 0;
}
dsi_connector->base.encoder = &dpi_output->base.base;
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 4fff110c4921..aae2d358364c 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -658,16 +658,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
-#if 0
- if (pipe == 1) {
- if (!gma_power_begin(dev, true))
- return 0;
- android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
- x, y, old_fb);
- goto mrst_crtc_mode_set_exit;
- }
-#endif
-
ret = check_fb(crtc->primary->fb);
if (ret)
return ret;
@@ -918,14 +908,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
dpll = 0;
-#if 0 /* FIXME revisit later */
- if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
- ksel == KSEL_BYPASS_25)
- dpll &= ~MDFLD_INPUT_REF_SEL;
- else if (ksel == KSEL_BYPASS_83_100)
- dpll |= MDFLD_INPUT_REF_SEL;
-#endif /* FIXME revisit later */
-
if (is_hdmi)
dpll |= MDFLD_VCO_SEL;
@@ -935,20 +917,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 2)) << 17;
-#if 0 /* 1080p30 & 720p */
- dpll = 0x00050000;
- fp = 0x000001be;
-#endif
-#if 0 /* 480p */
- dpll = 0x02010000;
- fp = 0x000000d2;
-#endif
} else {
-#if 0 /*DBI_TPO_480x864*/
- dpll = 0x00020000;
- fp = 0x00000156;
-#endif /* DBI_TPO_480x864 */ /* get from spec. */
-
dpll = 0x00800000;
fp = 0x000000c1;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
index ab2b27c0f037..17a944d70add 100644
--- a/drivers/gpu/drm/gma500/mdfld_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_output.h
@@ -51,7 +51,6 @@ struct panel_info {
};
struct panel_funcs {
- const struct drm_encoder_funcs *encoder_funcs;
const struct drm_encoder_helper_funcs *encoder_helper_funcs;
struct drm_display_mode * (*get_config_mode)(struct drm_device *);
int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
index 49c92debb7b2..25e897b98f86 100644
--- a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
+++ b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
@@ -188,13 +188,7 @@ static const struct drm_encoder_helper_funcs
.commit = mdfld_dsi_dpi_commit,
};
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
const struct panel_funcs mdfld_tmd_vid_funcs = {
- .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
.get_config_mode = &tmd_vid_get_config_mode,
.get_panel_info = tmd_vid_get_panel_info,
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
index a9420bf9a419..11845978fb0a 100644
--- a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
+++ b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
@@ -76,13 +76,7 @@ static const struct drm_encoder_helper_funcs
.commit = mdfld_dsi_dpi_commit,
};
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
const struct panel_funcs mdfld_tpo_vid_funcs = {
- .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
.get_config_mode = &tpo_vid_get_config_mode,
.get_panel_info = tpo_vid_get_panel_info,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f4370232767d..a097a59a9eae 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <drm/drm.h>
+#include <drm/drm_simple_kms_helper.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
@@ -620,15 +621,6 @@ static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
.destroy = oaktrail_hdmi_destroy,
};
-static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
- .destroy = oaktrail_hdmi_enc_destroy,
-};
-
void oaktrail_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
@@ -651,9 +643,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
&oaktrail_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
- drm_encoder_init(dev, encoder,
- &oaktrail_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
@@ -673,11 +663,6 @@ failed_connector:
kfree(gma_encoder);
}
-static const struct pci_device_id hdmi_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
- { 0 }
-};
-
void oaktrail_hdmi_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 582e09597500..2828360153d1 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -13,6 +13,8 @@
#include <asm/intel-mid.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
@@ -311,8 +313,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 16c6136f778b..fb601983cef0 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -252,7 +252,6 @@ extern int psb_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value);
extern void psb_intel_lvds_destroy(struct drm_connector *connector);
-extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
/* intel_gmbus.c */
extern void gma_intel_i2c_reset(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index afaebab7bc17..063c66bb946d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -11,6 +11,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
@@ -621,18 +623,6 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
.destroy = psb_intel_lvds_destroy,
};
-
-static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
- .destroy = psb_intel_lvds_enc_destroy,
-};
-
-
-
/**
* psb_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -683,9 +673,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, encoder,
- &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 264d7ad004b4..68fb3d7c172b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -864,36 +864,6 @@ static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sd
DRM_INFO("HDMI is not supported yet");
return false;
-#if 0
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
- uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
- uint8_t set_buf_index[2] = { 1, 0 };
- uint64_t *data = (uint64_t *)&avi_if;
- unsigned i;
-
- intel_dip_infoframe_csum(&avi_if);
-
- if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
- SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
-
- for (i = 0; i < sizeof(avi_if); i += 8) {
- if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
- SDVO_CMD_SET_HBUF_DATA,
- data, 8))
- return false;
- data++;
- }
-
- return psb_intel_sdvo_set_value(psb_intel_sdvo,
- SDVO_CMD_SET_HBUF_TXRATE,
- &tx_rate, 1);
-#endif
}
static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
@@ -1227,75 +1197,6 @@ static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdv
return true;
}
-/* No use! */
-#if 0
-struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
- struct drm_connector *connector = NULL;
- struct psb_intel_sdvo *iout = NULL;
- struct psb_intel_sdvo *sdvo;
-
- /* find the sdvo connector */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_psb_intel_sdvo(connector);
-
- if (iout->type != INTEL_OUTPUT_SDVO)
- continue;
-
- sdvo = iout->dev_priv;
-
- if (sdvo->sdvo_reg == SDVOB && sdvoB)
- return connector;
-
- if (sdvo->sdvo_reg == SDVOC && !sdvoB)
- return connector;
-
- }
-
- return NULL;
-}
-
-int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
-{
- u8 response[2];
- u8 status;
- struct psb_intel_sdvo *psb_intel_sdvo;
- DRM_DEBUG_KMS("\n");
-
- if (!connector)
- return 0;
-
- psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
- return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &response, 2) && response[0];
-}
-
-void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
-{
- u8 response[2];
- u8 status;
- struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
- if (on) {
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- } else {
- response[0] = 0;
- response[1] = 0;
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- }
-
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-}
-#endif
-
static bool
psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
{
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 9e8224456ea2..e5bdd99ad453 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -747,11 +747,11 @@ static int cmi_lcd_hack_create_device(void)
return -EINVAL;
}
- client = i2c_new_device(adapter, &info);
- if (!client) {
- pr_err("%s: i2c_new_device() failed\n", __func__);
+ client = i2c_new_client_device(adapter, &info);
+ if (IS_ERR(client)) {
+ pr_err("%s: creating I2C device failed\n", __func__);
i2c_put_adapter(adapter);
- return -EINVAL;
+ return PTR_ERR(client);
}
return 0;
@@ -765,12 +765,7 @@ static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
.commit = mdfld_dsi_dpi_commit,
};
-static const struct drm_encoder_funcs tc35876x_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
const struct panel_funcs mdfld_tc35876x_funcs = {
- .encoder_funcs = &tc35876x_encoder_funcs,
.encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
.get_config_mode = tc35876x_get_config_mode,
.get_panel_info = tc35876x_get_panel_info,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 55b46a7150a5..cc70e836522f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -94,6 +94,10 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (state->fb->pitches[0] % 128 != 0) {
+ DRM_DEBUG_ATOMIC("wrong stride with 128-byte aligned\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -119,11 +123,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
reg = state->fb->width * (state->fb->format->cpp[0]);
- /* now line_pad is 16 */
- reg = PADDING(16, reg);
- line_l = state->fb->width * state->fb->format->cpp[0];
- line_l = PADDING(16, line_l);
+ line_l = state->fb->pitches[0];
writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
priv->mmio + HIBMC_CRT_FB_WIDTH);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 222356a4f9a8..a6fd0c29e5b8 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -94,7 +94,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
priv->dev->mode_config.max_height = 1200;
priv->dev->mode_config.fb_base = priv->fb_base;
- priv->dev->mode_config.preferred_depth = 24;
+ priv->dev->mode_config.preferred_depth = 32;
priv->dev->mode_config.prefer_shadow = 1;
priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -307,11 +307,7 @@ static int hibmc_load(struct drm_device *dev)
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
- ret = drm_fbdev_generic_setup(dev, 16);
- if (ret) {
- DRM_ERROR("failed to initialize fbdev: %d\n", ret);
- goto err;
- }
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
return 0;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 99397ac3b363..322bd542e89d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -50,7 +50,7 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args);
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
}
const struct drm_mode_config_funcs hibmc_mode_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index f31068d74b18..00e87c290796 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -20,11 +20,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "dw_dsi_reg.h"
@@ -696,10 +696,6 @@ static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
.disable = dsi_encoder_disable
};
-static const struct drm_encoder_funcs dw_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int dw_drm_encoder_init(struct device *dev,
struct drm_device *drm_dev,
struct drm_encoder *encoder)
@@ -713,8 +709,7 @@ static int dw_drm_encoder_init(struct device *dev,
}
encoder->possible_crtcs = crtc_mask;
- ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("failed to init dsi encoder\n");
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 86000127d4ee..c339e632522a 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -940,7 +940,6 @@ static struct drm_driver ade_driver = {
};
struct kirin_drm_data ade_driver_data = {
- .register_connects = false,
.num_planes = ADE_CH_NUM,
.prim_plane = ADE_CH1,
.channel_formats = channel_formats,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index d3145ae877d7..4349da3e2379 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -219,40 +219,6 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
return 0;
}
-static int kirin_drm_connectors_register(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct drm_connector *failed_connector;
- struct drm_connector_list_iter conn_iter;
- int ret;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- ret = drm_connector_register(connector);
- if (ret) {
- failed_connector = connector;
- goto err;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
-
- return 0;
-
-err:
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (failed_connector == connector)
- break;
- drm_connector_unregister(connector);
- }
- drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
static int kirin_drm_bind(struct device *dev)
{
struct kirin_drm_data *driver_data;
@@ -279,17 +245,8 @@ static int kirin_drm_bind(struct device *dev)
drm_fbdev_generic_setup(drm_dev, 32);
- /* connectors should be registered after drm device register */
- if (driver_data->register_connects) {
- ret = kirin_drm_connectors_register(drm_dev);
- if (ret)
- goto err_drm_dev_unregister;
- }
-
return 0;
-err_drm_dev_unregister:
- drm_dev_unregister(drm_dev);
err_kms_cleanup:
kirin_drm_kms_cleanup(drm_dev);
err_drm_dev_put:
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
index 4d5c05a24065..dee8ec2f7f2e 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -37,7 +37,6 @@ struct kirin_drm_data {
u32 channel_formats_cnt;
int config_max_width;
int config_max_height;
- bool register_connects;
u32 num_planes;
u32 prim_plane;
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index a839f78a4c8a..741886b54419 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -393,7 +393,7 @@ sil164_detect_slave(struct i2c_client *client)
return NULL;
}
- return i2c_new_device(adap, &info);
+ return i2c_new_client_device(adap, &info);
}
static int
@@ -402,6 +402,7 @@ sil164_encoder_init(struct i2c_client *client,
struct drm_encoder_slave *encoder)
{
struct sil164_priv *priv;
+ struct i2c_client *slave_client;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -410,7 +411,9 @@ sil164_encoder_init(struct i2c_client *client,
encoder->slave_priv = priv;
encoder->slave_funcs = &sil164_encoder_funcs;
- priv->duallink_slave = sil164_detect_slave(client);
+ slave_client = sil164_detect_slave(client);
+ if (!IS_ERR(slave_client))
+ priv->duallink_slave = slave_client;
return 0;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index c3332209f27a..9517f522dcb9 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -19,6 +19,7 @@
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/i2c/tda998x.h>
#include <media/cec-notifier.h>
@@ -1132,7 +1133,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data)
mutex_unlock(&priv->audio_mutex);
}
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int tda998x_audio_digital_mute(struct device *dev, void *data,
+ bool enable)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -1949,9 +1951,9 @@ static int tda998x_create(struct device *dev)
cec_info.platform_data = &priv->cec_glue;
cec_info.irq = client->irq;
- priv->cec = i2c_new_device(client->adapter, &cec_info);
- if (!priv->cec) {
- ret = -ENODEV;
+ priv->cec = i2c_new_client_device(client->adapter, &cec_info);
+ if (IS_ERR(priv->cec)) {
+ ret = PTR_ERR(priv->cec);
goto fail;
}
@@ -1997,15 +1999,6 @@ err_irq:
/* DRM encoder functions */
-static void tda998x_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs tda998x_encoder_funcs = {
- .destroy = tda998x_encoder_destroy,
-};
-
static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -2023,8 +2016,8 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
priv->encoder.possible_crtcs = crtcs;
- ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm, &priv->encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret)
goto err_encoder;
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 0bfd276c19fe..35bbe2b80596 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -1,3 +1,15 @@
+config DRM_I915_FENCE_TIMEOUT
+ int "Timeout for unsignaled foreign fences (ms, jiffy granularity)"
+ default 10000 # milliseconds
+ help
+ When listening to a foreign fence, we install a supplementary timer
+ to ensure that we are always signaled and our userspace is able to
+ make forward progress. This value specifies the timeout used for an
+ unsignaled foreign fence.
+
+ May be 0 to disable the timeout, and rely on the foreign fence being
+ eventually signaled.
+
config DRM_I915_USERFAULT_AUTOSUSPEND
int "Runtime autosuspend delay for userspace GGTT mmaps (ms)"
default 250 # milliseconds
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6cd1f6253814..b0da6ea6e3f1 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
+subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
@@ -34,6 +35,7 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# core driver code
i915-y += i915_drv.o \
+ i915_config.o \
i915_irq.o \
i915_getparam.o \
i915_params.o \
@@ -86,10 +88,12 @@ gt-y += \
gt/intel_engine_cs.o \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
- gt/intel_engine_pool.o \
gt/intel_engine_user.o \
gt/intel_ggtt.o \
+ gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
+ gt/intel_gt_buffer_pool.o \
+ gt/intel_gt_clock_utils.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
@@ -108,6 +112,7 @@ gt-y += \
gt/intel_sseu.o \
gt/intel_timeline.o \
gt/intel_workarounds.o \
+ gt/shmem_utils.o \
gt/sysfs_engines.o
# autogenerated null render state
gt-y += \
@@ -150,7 +155,6 @@ i915-y += \
i915_buddy.o \
i915_cmd_parser.o \
i915_gem_evict.o \
- i915_gem_fence_reg.o \
i915_gem_gtt.o \
i915_gem.o \
i915_globals.o \
@@ -164,14 +168,18 @@ i915-y += \
# general-purpose microcontroller (GuC) support
i915-y += gt/uc/intel_uc.o \
+ gt/uc/intel_uc_debugfs.o \
gt/uc/intel_uc_fw.o \
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_ct.o \
+ gt/uc/intel_guc_debugfs.o \
gt/uc/intel_guc_fw.o \
gt/uc/intel_guc_log.o \
+ gt/uc/intel_guc_log_debugfs.o \
gt/uc/intel_guc_submission.o \
gt/uc/intel_huc.o \
+ gt/uc/intel_huc_debugfs.o \
gt/uc/intel_huc_fw.o
# modesetting core code
@@ -240,23 +248,6 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
-# perf code
-i915-y += \
- oa/i915_oa_hsw.o \
- oa/i915_oa_bdw.o \
- oa/i915_oa_chv.o \
- oa/i915_oa_sklgt2.o \
- oa/i915_oa_sklgt3.o \
- oa/i915_oa_sklgt4.o \
- oa/i915_oa_bxt.o \
- oa/i915_oa_kblgt2.o \
- oa/i915_oa_kblgt3.o \
- oa/i915_oa_glk.o \
- oa/i915_oa_cflgt2.o \
- oa/i915_oa_cflgt3.o \
- oa/i915_oa_cnl.o \
- oa/i915_oa_icl.o \
- oa/i915_oa_tgl.o
i915-y += i915_perf.o
# Post-mortem debug and GPU hang state capture
@@ -270,7 +261,8 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/igt_live_test.o \
selftests/igt_mmap.o \
selftests/igt_reset.o \
- selftests/igt_spinner.o
+ selftests/igt_spinner.o \
+ selftests/librapl.o
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 17cee6f80d8b..4fec5bd64920 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -36,15 +36,15 @@
#include "intel_panel.h"
#include "intel_vdsc.h"
-static inline int header_credits_available(struct drm_i915_private *dev_priv,
- enum transcoder dsi_trans)
+static int header_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
{
return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
>> FREE_HEADER_CREDIT_SHIFT;
}
-static inline int payload_credits_available(struct drm_i915_private *dev_priv,
- enum transcoder dsi_trans)
+static int payload_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
{
return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
>> FREE_PLOAD_CREDIT_SHIFT;
@@ -186,16 +186,19 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
static int dsi_send_pkt_payld(struct intel_dsi_host *host,
struct mipi_dsi_packet pkt)
{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
/* payload queue can accept *256 bytes*, check limit */
if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
- DRM_ERROR("payload size exceeds max queue limit\n");
+ drm_err(&i915->drm, "payload size exceeds max queue limit\n");
return -1;
}
/* load data into command payload queue */
if (!add_payld_to_queue(host, pkt.payload,
pkt.payload_length)) {
- DRM_ERROR("adding payload to queue failed\n");
+ drm_err(&i915->drm, "adding payload to queue failed\n");
return -1;
}
@@ -744,6 +747,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
tmp |= VIDEO_MODE_SYNC_PULSE;
break;
}
+ } else {
+ /*
+ * FIXME: Retrieve this info from VBT.
+ * As per the spec when dsi transcoder is operating
+ * in TE GATE mode, TE comes from GPIO
+ * which is UTIL PIN for DSI 0.
+ * Also this GPIO would not be used for other
+ * purposes is an assumption.
+ */
+ tmp &= ~OP_MODE_MASK;
+ tmp |= CMD_MODE_TE_GATE;
+ tmp |= TE_SOURCE_GPIO;
}
intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
@@ -837,14 +852,33 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
}
hactive = adjusted_mode->crtc_hdisplay;
- htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+
+ if (is_vid_mode(intel_dsi))
+ htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ else
+ htotal = DIV_ROUND_UP((hactive + 160) * mul, div);
+
hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
hsync_size = hsync_end - hsync_start;
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
vactive = adjusted_mode->crtc_vdisplay;
- vtotal = adjusted_mode->crtc_vtotal;
+
+ if (is_vid_mode(intel_dsi)) {
+ vtotal = adjusted_mode->crtc_vtotal;
+ } else {
+ int bpp, line_time_us, byte_clk_period_ns;
+
+ if (crtc_state->dsc.compression_enable)
+ bpp = crtc_state->dsc.compressed_bpp;
+ else
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ byte_clk_period_ns = 1000000 / afe_clk(encoder, crtc_state);
+ line_time_us = (htotal * (bpp / 8) * byte_clk_period_ns) / (1000 * intel_dsi->lane_count);
+ vtotal = vactive + DIV_ROUND_UP(400, line_time_us);
+ }
vsync_start = adjusted_mode->crtc_vsync_start;
vsync_end = adjusted_mode->crtc_vsync_end;
vsync_shift = hsync_start - htotal / 2;
@@ -873,7 +907,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
}
/* TRANS_HSYNC register to be programmed only for video mode */
- if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+ if (is_vid_mode(intel_dsi)) {
if (intel_dsi->video_mode_format ==
VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
/* BSPEC: hsync size should be atleast 16 pixels */
@@ -916,22 +950,27 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (vsync_start < vactive)
drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
- /* program TRANS_VSYNC register */
- for_each_dsi_port(port, intel_dsi->ports) {
- dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ /* program TRANS_VSYNC register for video mode only */
+ if (is_vid_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
+ }
}
/*
- * FIXME: It has to be programmed only for interlaced
+ * FIXME: It has to be programmed only for video modes and interlaced
* modes. Put the check condition here once interlaced
* info available as described above.
* program TRANS_VSYNCSHIFT register
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift);
+ if (is_vid_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
+ vsync_shift);
+ }
}
/* program TRANS_VBLANK register, should be same as vtotal programmed */
@@ -1016,6 +1055,32 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
}
}
+static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+
+ /*
+ * used as TE i/p for DSI0,
+ * for dual link/DSI1 TE is from slave DSI1
+ * through GPIO.
+ */
+ if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B)))
+ return;
+
+ tmp = intel_de_read(dev_priv, UTIL_PIN_CTL);
+
+ if (enable) {
+ tmp |= UTIL_PIN_DIRECTION_INPUT;
+ tmp |= UTIL_PIN_ENABLE;
+ } else {
+ tmp &= ~UTIL_PIN_ENABLE;
+ }
+ intel_de_write(dev_priv, UTIL_PIN_CTL, tmp);
+}
+
static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
@@ -1037,6 +1102,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
/* setup D-PHY timings */
gen11_dsi_setup_dphy_timings(encoder, crtc_state);
+ /* Since transcoder is configured to take events from GPIO */
+ gen11_dsi_config_util_pin(encoder, true);
+
/* step 4h: setup DSI protocol timeouts */
gen11_dsi_setup_timeouts(encoder, crtc_state);
@@ -1088,7 +1156,8 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
wait_for_cmds_dispatched_to_panel(encoder);
}
-static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1099,7 +1168,8 @@ static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
gen11_dsi_program_esc_clk_div(encoder, crtc_state);
}
-static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1118,13 +1188,14 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
}
-static void gen11_dsi_enable(struct intel_encoder *encoder,
+static void gen11_dsi_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
@@ -1180,6 +1251,15 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
enum transcoder dsi_trans;
u32 tmp;
+ /* disable periodic update mode */
+ if (is_cmd_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
+ tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
+ intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+ }
+ }
+
/* put dsi link in ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
@@ -1264,7 +1344,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_disable(struct intel_encoder *encoder,
+static void gen11_dsi_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -1286,11 +1367,14 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
/* step3: disable port */
gen11_dsi_disable_port(encoder);
+ gen11_dsi_config_util_pin(encoder, false);
+
/* step4: disable IO power */
gen11_dsi_disable_io_power(encoder);
}
-static void gen11_dsi_post_disable(struct intel_encoder *encoder,
+static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -1347,6 +1431,22 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
}
+static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder dsi_trans;
+ u32 val;
+
+ if (intel_dsi->ports == BIT(PORT_B))
+ dsi_trans = TRANSCODER_DSI_1;
+ else
+ dsi_trans = TRANSCODER_DSI_0;
+
+ val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
+ return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE);
+}
+
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -1367,6 +1467,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
+ if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
}
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
@@ -1417,18 +1521,22 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode =
- intel_connector->panel.fixed_mode;
+ intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
+ int ret;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
- intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+ ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
adjusted_mode->flags = 0;
@@ -1446,10 +1554,32 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->clock_set = true;
if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
- DRM_DEBUG_KMS("Attempting to use DSC failed\n");
+ drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n");
pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
+ /* We would not operate in periodic command mode */
+ pipe_config->hw.adjusted_mode.private_flags &=
+ ~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
+
+ /*
+ * In case of TE GATE cmd mode, we
+ * receive TE from the slave if
+ * dual link is enabled
+ */
+ if (is_cmd_mode(intel_dsi)) {
+ if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_USE_TE1 |
+ I915_MODE_FLAG_DSI_USE_TE0;
+ else if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_USE_TE1;
+ else
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_USE_TE0;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 457b258683d3..79032701873a 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -125,7 +125,7 @@ intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct intel_plane_state *plane_state = to_intel_plane_state(state);
- WARN_ON(plane_state->vma);
+ drm_WARN_ON(plane->dev, plane_state->vma);
__drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
if (plane_state->hw.fb)
@@ -264,6 +264,20 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
plane_state->hw.color_range = from_plane_state->uapi.color_range;
}
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->nv12_planes &= ~BIT(plane->id);
+ crtc_state->c8_planes &= ~BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
+
+ plane_state->uapi.visible = false;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -273,12 +287,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
const struct drm_framebuffer *fb = new_plane_state->hw.fb;
int ret;
- new_crtc_state->active_planes &= ~BIT(plane->id);
- new_crtc_state->nv12_planes &= ~BIT(plane->id);
- new_crtc_state->c8_planes &= ~BIT(plane->id);
- new_crtc_state->data_rate[plane->id] = 0;
- new_crtc_state->min_cdclk[plane->id] = 0;
- new_plane_state->uapi.visible = false;
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
@@ -387,7 +396,7 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
}
/* should never happen */
- WARN_ON(1);
+ drm_WARN_ON(state->base.dev, 1);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index a6bbf42bae1f..59dd1fbb02ea 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -52,5 +52,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc);
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 62f234f641de..ad4aa66fd676 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -252,14 +252,16 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
i = ARRAY_SIZE(hdmi_audio_clock);
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
- adjusted_mode->crtc_clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
+ adjusted_mode->crtc_clock);
i = 1;
}
- DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
- hdmi_audio_clock[i].clock,
- hdmi_audio_clock[i].config);
+ drm_dbg_kms(&dev_priv->drm,
+ "Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
return hdmi_audio_clock[i].config;
}
@@ -512,6 +514,124 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
mutex_unlock(&dev_priv->av_mutex);
}
+static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ unsigned int link_clks_available, link_clks_required;
+ unsigned int tu_data, tu_line, link_clks_active;
+ unsigned int h_active, h_total, hblank_delta, pixel_clk;
+ unsigned int fec_coeff, cdclk, vdsc_bpp;
+ unsigned int link_clk, lanes;
+ unsigned int hblank_rise;
+
+ h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+ h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
+ pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
+ vdsc_bpp = crtc_state->dsc.compressed_bpp;
+ cdclk = i915->cdclk.hw.cdclk;
+ /* fec= 0.972261, using rounding multiplier of 1000000 */
+ fec_coeff = 972261;
+ link_clk = crtc_state->port_clock;
+ lanes = crtc_state->lane_count;
+
+ drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
+ "lanes = %u vdsc_bpp = %u cdclk = %u\n",
+ h_active, link_clk, lanes, vdsc_bpp, cdclk);
+
+ if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk))
+ return 0;
+
+ link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28;
+ link_clks_required = DIV_ROUND_UP(192000 * h_total, 1000 * pixel_clk) * (48 / lanes + 2);
+
+ if (link_clks_available > link_clks_required)
+ hblank_delta = 32;
+ else
+ hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk),
+ mul_u32_u32(link_clk, cdclk));
+
+ tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000),
+ mul_u32_u32(link_clk * lanes, fec_coeff));
+ tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff),
+ mul_u32_u32(64 * pixel_clk, 1000000));
+ link_clks_active = (tu_line - 1) * 64 + tu_data;
+
+ hblank_rise = (link_clks_active + 6 * DIV_ROUND_UP(link_clks_active, 250) + 4) * pixel_clk / link_clk;
+
+ return h_active - hblank_rise + hblank_delta;
+}
+
+static unsigned int calc_samples_room(const struct intel_crtc_state *crtc_state)
+{
+ unsigned int h_active, h_total, pixel_clk;
+ unsigned int link_clk, lanes;
+
+ h_active = crtc_state->hw.adjusted_mode.hdisplay;
+ h_total = crtc_state->hw.adjusted_mode.htotal;
+ pixel_clk = crtc_state->hw.adjusted_mode.clock;
+ link_clk = crtc_state->port_clock;
+ lanes = crtc_state->lane_count;
+
+ return ((h_total - h_active) * link_clk - 12 * pixel_clk) /
+ (pixel_clk * (48 / lanes + 2));
+}
+
+static void enable_audio_dsc_wa(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ unsigned int hblank_early_prog, samples_room;
+ unsigned int val;
+
+ if (INTEL_GEN(i915) < 11)
+ return;
+
+ val = intel_de_read(i915, AUD_CONFIG_BE);
+
+ if (INTEL_GEN(i915) == 11)
+ val |= HBLANK_EARLY_ENABLE_ICL(pipe);
+ else if (INTEL_GEN(i915) >= 12)
+ val |= HBLANK_EARLY_ENABLE_TGL(pipe);
+
+ if (crtc_state->dsc.compression_enable &&
+ (crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
+ crtc_state->hw.adjusted_mode.vdisplay >= 2160)) {
+ /* Get hblank early enable value required */
+ hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state);
+ if (hblank_early_prog < 32) {
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
+ } else if (hblank_early_prog < 64) {
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
+ } else if (hblank_early_prog < 96) {
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
+ } else {
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
+ }
+
+ /* Get samples room value required */
+ samples_room = calc_samples_room(crtc_state);
+ if (samples_room < 3) {
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
+ } else {
+ /* Program 0 i.e "All Samples available in buffer" */
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
+ }
+ }
+
+ intel_de_write(i915, AUD_CONFIG_BE, val);
+}
+
+#undef ROUNDING_FACTOR
+
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -529,6 +649,10 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
mutex_lock(&dev_priv->av_mutex);
+ /* Enable Audio WA for 4k DSC usecases */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
+ enable_audio_dsc_wa(encoder, crtc_state);
+
/* Enable audio presence detect, invalidate ELD */
tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
@@ -891,7 +1015,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
if (dev_priv->audio_power_refcount++ == 0) {
- if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
dev_priv->audio_freq_cntrl);
drm_dbg_kms(&dev_priv->drm,
@@ -931,7 +1055,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
unsigned long cookie;
u32 tmp;
- if (!IS_GEN(dev_priv, 9))
+ if (INTEL_GEN(dev_priv) < 9)
return;
cookie = i915_audio_component_get_power(kdev);
@@ -1136,6 +1260,10 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
drm_modeset_unlock_all(&dev_priv->drm);
device_link_remove(hda_kdev, i915_kdev);
+
+ if (dev_priv->audio_power_refcount)
+ drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
+ dev_priv->audio_power_refcount);
}
static const struct component_ops i915_audio_component_bind_ops = {
@@ -1173,7 +1301,7 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
return;
}
- if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
AUD_FREQ_CNTRL);
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 58b264bc318d..fef04e2d954e 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -8,6 +8,9 @@
#include "intel_bw.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
+#include "intel_atomic.h"
+#include "intel_pm.h"
+
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -113,6 +116,26 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+ u32 points_mask)
+{
+ int ret;
+
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_POINTS_RESTRICTED_MASK,
+ ICL_PCODE_POINTS_RESTRICTED,
+ 1);
+
+ if (ret < 0) {
+ drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi)
{
@@ -240,6 +263,16 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
break;
}
+ /*
+ * In case if SAGV is disabled in BIOS, we always get 1
+ * SAGV point, but we can't send PCode commands to restrict it
+ * as it will fail and pointless anyway.
+ */
+ if (qi.num_points == 1)
+ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ else
+ dev_priv->sagv_status = I915_SAGV_ENABLED;
+
return 0;
}
@@ -248,6 +281,11 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
{
int i;
+ /*
+ * Let's return max bw for 0 planes
+ */
+ num_planes = max(1, num_planes);
+
for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
const struct intel_bw_info *bi =
&dev_priv->max_bw[i];
@@ -277,34 +315,6 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
icl_get_bw_info(dev_priv, &icl_sa_info);
}
-static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
- int num_planes)
-{
- if (INTEL_GEN(dev_priv) >= 11) {
- /*
- * Any bw group has same amount of QGV points
- */
- const struct intel_bw_info *bi =
- &dev_priv->max_bw[0];
- unsigned int min_bw = UINT_MAX;
- int i;
-
- /*
- * FIXME with SAGV disabled maybe we can assume
- * point 1 will always be used? Seems to match
- * the behaviour observed in the wild.
- */
- for (i = 0; i < bi->num_qgv_points; i++) {
- unsigned int bw = icl_max_bw(dev_priv, num_planes, i);
-
- min_bw = min(bw, min_bw);
- }
- return min_bw;
- } else {
- return UINT_MAX;
- }
-}
-
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
{
/*
@@ -338,16 +348,17 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
- DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
+ drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
}
static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
@@ -374,7 +385,29 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
return data_rate;
}
-static struct intel_bw_state *
+struct intel_bw_state *
+intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *bw_state;
+
+ bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
+
+ return to_intel_bw_state(bw_state);
+}
+
+struct intel_bw_state *
+intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *bw_state;
+
+ bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
+
+ return to_intel_bw_state(bw_state);
+}
+
+struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -391,11 +424,16 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
- struct intel_bw_state *bw_state = NULL;
- unsigned int data_rate, max_data_rate;
+ struct intel_bw_state *new_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ unsigned int data_rate;
unsigned int num_active_planes;
struct intel_crtc *crtc;
int i, ret;
+ u32 allowed_points = 0;
+ unsigned int max_bw_point = 0, max_bw = 0;
+ unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
+ u32 mask = (1 << num_qgv_points) - 1;
/* FIXME earlier gens need some checks too */
if (INTEL_GEN(dev_priv) < 11)
@@ -420,41 +458,93 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
old_active_planes == new_active_planes)
continue;
- bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(bw_state))
- return PTR_ERR(bw_state);
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
- bw_state->data_rate[crtc->pipe] = new_data_rate;
- bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+ new_bw_state->data_rate[crtc->pipe] = new_data_rate;
+ new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
drm_dbg_kms(&dev_priv->drm,
"pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
+ new_bw_state->data_rate[crtc->pipe],
+ new_bw_state->num_active_planes[crtc->pipe]);
}
- if (!bw_state)
+ if (!new_bw_state)
return 0;
- ret = intel_atomic_lock_global_state(&bw_state->base);
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
- data_rate = intel_bw_data_rate(dev_priv, bw_state);
- num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
+ data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
+ data_rate = DIV_ROUND_UP(data_rate, 1000);
- max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
+ num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
- data_rate = DIV_ROUND_UP(data_rate, 1000);
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int max_data_rate;
- if (data_rate > max_data_rate) {
- drm_dbg_kms(&dev_priv->drm,
- "Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
- data_rate, max_data_rate, num_active_planes);
+ max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = i;
+ max_bw = max_data_rate;
+ }
+ if (max_data_rate >= data_rate)
+ allowed_points |= BIT(i);
+ drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
+ i, max_data_rate, data_rate);
+ }
+
+ /*
+ * BSpec states that we always should have at least one allowed point
+ * left, so if we couldn't - simply reject the configuration for obvious
+ * reasons.
+ */
+ if (allowed_points == 0) {
+ drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
+ " bandwidth %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
return -EINVAL;
}
+ /*
+ * Leave only single point with highest bandwidth, if
+ * we can't enable SAGV due to the increased memory latency it may
+ * cause.
+ */
+ if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ allowed_points = BIT(max_bw_point);
+ drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
+ max_bw_point);
+ }
+ /*
+ * We store the ones which need to be masked as that is what PCode
+ * actually accepts as a parameter.
+ */
+ new_bw_state->qgv_points_mask = ~allowed_points & mask;
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * If the actual mask had changed we need to make sure that
+ * the commits are serialized(in case this is a nomodeset, nonblocking)
+ */
+ if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index a8aa7624c5aa..bbcaaa73ec1b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -18,16 +18,43 @@ struct intel_crtc_state;
struct intel_bw_state {
struct intel_global_state base;
+ /*
+ * Contains a bit mask, used to determine, whether correspondent
+ * pipe allows SAGV or not.
+ */
+ u8 pipe_sagv_reject;
+
+ /*
+ * Current QGV points mask, which restricts
+ * some particular SAGV states, not to confuse
+ * with pipe_sagv_mask.
+ */
+ u8 qgv_points_mask;
+
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
};
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
+struct intel_bw_state *
+intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
+
+struct intel_bw_state *
+intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
+
+struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state);
+
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+ u32 points_mask);
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index c1cce93a1c25..98ece9cd7cdd 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -460,6 +460,16 @@ static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
}
+static void icl_lut_multi_seg_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, ldw);
+ entry->green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, ldw);
+}
+
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -893,7 +903,7 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
+ /* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
@@ -1630,6 +1640,24 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
}
}
+static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ return 0;
+
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ return 16;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1641,7 +1669,9 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat
else
return i9xx_gamma_precision(crtc_state);
} else {
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ return icl_gamma_precision(crtc_state);
+ else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
return glk_gamma_precision(crtc_state);
else if (IS_IRONLAKE(dev_priv))
return ilk_gamma_precision(crtc_state);
@@ -1658,9 +1688,9 @@ static bool err_check(struct drm_color_lut *lut1,
((abs((long)lut2->green - lut1->green)) <= err);
}
-static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
- struct drm_color_lut *lut2,
- int lut_size, u32 err)
+static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
{
int i;
@@ -1690,16 +1720,8 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
lut_size2 = drm_color_lut_size(blob2);
/* check sw and hw lut size */
- switch (gamma_mode) {
- case GAMMA_MODE_MODE_8BIT:
- case GAMMA_MODE_MODE_10BIT:
- if (lut_size1 != lut_size2)
- return false;
- break;
- default:
- MISSING_CASE(gamma_mode);
- return false;
- }
+ if (lut_size1 != lut_size2)
+ return false;
lut1 = blob1->data;
lut2 = blob2->data;
@@ -1707,11 +1729,16 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
err = 0xffff >> bit_precision;
/* check sw and hw lut entry to be equal */
- switch (gamma_mode) {
+ switch (gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
case GAMMA_MODE_MODE_10BIT:
- if (!intel_color_lut_entry_equal(lut1, lut2,
- lut_size2, err))
+ if (!intel_color_lut_entries_equal(lut1, lut2,
+ lut_size2, err))
+ return false;
+ break;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ if (!intel_color_lut_entries_equal(lut1, lut2,
+ 9, err))
return false;
break;
default:
@@ -1946,6 +1973,63 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
}
+static struct drm_property_blob *
+icl_read_lut_multi_segment(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < 9; i++) {
+ u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+ u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+
+ icl_lut_multi_seg_pack(&lut[i], ldw, udw);
+ }
+
+ intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
+
+ /*
+ * FIXME readouts from PAL_PREC_DATA register aren't giving
+ * correct values in the case of fine and coarse segments.
+ * Restricting readouts only for super fine segment as of now.
+ */
+
+ return blob;
+}
+
+static void icl_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ return;
+
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc);
+ break;
+ default:
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+ }
+}
+
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1989,6 +2073,7 @@ void intel_color_init(struct intel_crtc *crtc)
if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
+ dev_priv->display.read_luts = icl_read_luts;
} else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
dev_priv->display.load_luts = glk_load_luts;
dev_priv->display.read_luts = glk_read_luts;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 903e49659f56..406e96785c76 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
+#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
@@ -123,6 +124,8 @@ int intel_connector_register(struct drm_connector *connector)
goto err_backlight;
}
+ intel_connector_debugfs_add(connector);
+
return 0;
err_backlight:
@@ -290,7 +293,7 @@ intel_attach_colorspace_property(struct drm_connector *connector)
return;
break;
default:
- DRM_DEBUG_KMS("Colorspace property not supported\n");
+ MISSING_CASE(connector->connector_type);
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 78f9b6cde810..2f5b9a4baafd 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -203,27 +203,31 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
intel_de_write(dev_priv, crt->adpa_reg, adpa);
}
-static void intel_disable_crt(struct intel_encoder *encoder,
+static void intel_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
}
-static void pch_disable_crt(struct intel_encoder *encoder,
+static void pch_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
}
-static void pch_post_disable_crt(struct intel_encoder *encoder,
+static void pch_post_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_crt(encoder, old_crtc_state, old_conn_state);
+ intel_disable_crt(state, encoder, old_crtc_state, old_conn_state);
}
-static void hsw_disable_crt(struct intel_encoder *encoder,
+static void hsw_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -234,7 +238,8 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
-static void hsw_post_disable_crt(struct intel_encoder *encoder,
+static void hsw_post_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -250,19 +255,20 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
intel_ddi_disable_pipe_clock(old_crtc_state);
- pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
+ pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
lpt_disable_pch_transcoder(dev_priv);
lpt_disable_iclkip(dev_priv);
- intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+ intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state);
drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
-static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
+static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -273,7 +279,8 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
-static void hsw_pre_enable_crt(struct intel_encoder *encoder,
+static void hsw_pre_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -287,10 +294,11 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
hsw_fdi_link_train(encoder, crtc_state);
- intel_ddi_enable_pipe_clock(crtc_state);
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
}
-static void hsw_enable_crt(struct intel_encoder *encoder,
+static void hsw_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -300,6 +308,8 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+ intel_ddi_enable_transcoder_func(encoder, crtc_state);
+
intel_enable_pipe(crtc_state);
lpt_pch_enable(crtc_state);
@@ -314,7 +324,8 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
-static void intel_enable_crt(struct intel_encoder *encoder,
+static void intel_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -594,7 +605,8 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector,
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
- DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ drm_dbg_kms(connector->dev,
+ "CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 52db7852827b..aa22465bb56e 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -568,7 +568,7 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
};
-static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = {
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
/* NT mV Trans mV db */
{ 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
@@ -583,23 +583,51 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[]
};
struct icl_mg_phy_ddi_buf_trans {
- u32 cri_txdeemph_override_5_0;
u32 cri_txdeemph_override_11_6;
+ u32 cri_txdeemph_override_5_0;
u32 cri_txdeemph_override_17_12;
};
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
+ /* Voltage swing pre-emphasis */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x21, 0x00, 0x00 }, /* 1 0 */
+ { 0x2B, 0x00, 0x08 }, /* 1 1 */
+ { 0x30, 0x00, 0x0F }, /* 1 2 */
+ { 0x31, 0x00, 0x03 }, /* 2 0 */
+ { 0x34, 0x00, 0x0B }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
/* Voltage swing pre-emphasis */
- { 0x0, 0x1B, 0x00 }, /* 0 0 */
- { 0x0, 0x23, 0x08 }, /* 0 1 */
- { 0x0, 0x2D, 0x12 }, /* 0 2 */
- { 0x0, 0x00, 0x00 }, /* 0 3 */
- { 0x0, 0x23, 0x00 }, /* 1 0 */
- { 0x0, 0x2B, 0x09 }, /* 1 1 */
- { 0x0, 0x2E, 0x11 }, /* 1 2 */
- { 0x0, 0x2F, 0x00 }, /* 2 0 */
- { 0x0, 0x33, 0x0C }, /* 2 1 */
- { 0x0, 0x00, 0x00 }, /* 3 0 */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x26, 0x00, 0x00 }, /* 1 0 */
+ { 0x2C, 0x00, 0x07 }, /* 1 1 */
+ { 0x33, 0x00, 0x0C }, /* 1 2 */
+ { 0x2E, 0x00, 0x00 }, /* 2 0 */
+ { 0x36, 0x00, 0x09 }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
+ /* HDMI Preset VS Pre-emph */
+ { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */
+ { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */
+ { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */
+ { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */
+ { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */
+ { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */
+ { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */
+ { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */
+ { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */
+ { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */
};
struct tgl_dkl_phy_ddi_buf_trans {
@@ -943,14 +971,29 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return icl_combo_phy_ddi_translations_dp_hbr2;
}
+static const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
+{
+ if (type == INTEL_OUTPUT_HDMI) {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
+ return icl_mg_phy_ddi_translations_hdmi;
+ } else if (rate > 270000) {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
+ return icl_mg_phy_ddi_translations_hbr2_hbr3;
+ }
+
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
+ return icl_mg_phy_ddi_translations_rbr_hbr;
+}
+
static const struct cnl_ddi_buf_trans *
ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
int *n_entries)
{
- if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP &&
- rate > 270000) {
- *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
- return ehl_combo_phy_ddi_translations_hbr2_hbr3;
+ if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
+ *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
+ return ehl_combo_phy_ddi_translations_dp;
}
return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
@@ -989,7 +1032,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
- n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+ &n_entries);
default_entry = n_entries - 1;
} else if (IS_CANNONLAKE(dev_priv)) {
cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
@@ -1103,7 +1147,8 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
return;
}
- DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+ drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n",
+ port_name(port));
}
static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
@@ -1216,7 +1261,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE);
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
* DDI E does not support port reversal, the functionality is
@@ -1250,7 +1298,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI link training done on step %d\n", i);
break;
}
@@ -1259,7 +1308,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
* Results in less fireworks from the state checker.
*/
if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) {
- DRM_ERROR("FDI link training failed!\n");
+ drm_err(&dev_priv->drm, "FDI link training failed!\n");
break;
}
@@ -1291,7 +1340,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
/* Enable normal pixel sending for FDI */
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE);
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
}
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
@@ -1305,27 +1357,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
}
-static struct intel_encoder *
-intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder, *ret = NULL;
- int num_encoders = 0;
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- ret = encoder;
- num_encoders++;
- }
-
- if (num_encoders != 1)
- drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
- num_encoders,
- pipe_name(crtc->pipe));
-
- BUG_ON(ret == NULL);
- return ret;
-}
-
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -1451,6 +1482,14 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
}
+static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
+{
+ if (master_transcoder == TRANSCODER_EDP)
+ return 0;
+ else
+ return master_transcoder + 1;
+}
+
/*
* Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
*
@@ -1458,10 +1497,10 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
* intel_ddi_config_transcoder_func().
*/
static u32
-intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
+intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1551,20 +1590,46 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
+ if (IS_GEN_RANGE(dev_priv, 8, 10) &&
+ crtc_state->master_transcoder != INVALID_TRANSCODER) {
+ u8 master_select =
+ bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
+
+ temp |= TRANS_DDI_PORT_SYNC_ENABLE |
+ TRANS_DDI_PORT_SYNC_MASTER_SELECT(master_select);
+ }
+
return temp;
}
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 temp;
+ u32 ctl;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ enum transcoder master_transcoder = crtc_state->master_transcoder;
+ u32 ctl2 = 0;
+
+ if (master_transcoder != INVALID_TRANSCODER) {
+ u8 master_select =
+ bdw_trans_port_sync_master_select(master_transcoder);
+
+ ctl2 |= PORT_SYNC_MODE_ENABLE |
+ PORT_SYNC_MODE_MASTER_SELECT(master_select);
+ }
- temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
+ }
+
+ ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
- temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
}
/*
@@ -1572,16 +1637,17 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
* bit.
*/
static void
-intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
+intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 temp;
+ u32 ctl;
- temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
- temp &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
+ ctl &= ~TRANS_DDI_FUNC_ENABLE;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
}
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
@@ -1589,24 +1655,35 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 val;
+ u32 ctl;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder), 0);
- val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~TRANS_DDI_FUNC_ENABLE;
+ ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ ctl &= ~TRANS_DDI_FUNC_ENABLE;
+
+ if (IS_GEN_RANGE(dev_priv, 8, 10))
+ ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
+ TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
if (INTEL_GEN(dev_priv) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
- val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
}
} else {
- val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
+ ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Quirk Increase DDI disabled time\n");
/* Quirk time at 100ms for reliable operation */
msleep(100);
}
@@ -1667,7 +1744,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
@@ -1729,7 +1806,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
tmp = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -1787,20 +1864,23 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
- encoder->base.base.id, encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "No pipe for [ENCODER:%d:%s] found\n",
+ encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
- encoder->base.base.id, encoder->base.name,
- *pipe_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask);
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
- encoder->base.base.id, encoder->base.name,
- *pipe_mask, mst_pipe_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask, mst_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -1810,9 +1890,9 @@ out:
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
- "(PHY_CTL %08x)\n", encoder->base.base.id,
- encoder->base.name, tmp);
+ drm_err(&dev_priv->drm,
+ "[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
+ encoder->base.base.id, encoder->base.name, tmp);
}
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -1834,7 +1914,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
return true;
}
-static inline enum intel_display_power_domain
+static enum intel_display_power_domain
intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
{
/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
@@ -1893,11 +1973,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
intel_dsc_power_domain(crtc_state));
}
-void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1978,7 +2058,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
- DRM_ERROR("Invalid I_boost value %u\n", iboost);
+ drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
return;
}
@@ -2037,7 +2117,8 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
- n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ icl_get_mg_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
} else if (IS_CANNONLAKE(dev_priv)) {
if (encoder->type == INTEL_OUTPUT_EDP)
cnl_get_buf_trans_edp(dev_priv, &n_entries);
@@ -2237,7 +2318,9 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
return;
if (level >= n_entries) {
- DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
+ drm_dbg_kms(&dev_priv->drm,
+ "DDI translation not found for level %d. Using %d instead.",
+ level, n_entries - 1);
level = n_entries - 1;
}
@@ -2350,21 +2433,28 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- int link_clock,
- u32 level)
+ int link_clock, u32 level,
+ enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val;
- int ln;
+ int ln, rate = 0;
+
+ if (type != INTEL_OUTPUT_HDMI) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ rate = intel_dp->link_rate;
+ }
- n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
- ddi_translations = icl_mg_phy_ddi_translations;
+ ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate,
+ &n_entries);
/* The table does not have values for level 3 and level 9. */
if (level >= n_entries || level == 3 || level == 9) {
- DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 2);
+ drm_dbg_kms(&dev_priv->drm,
+ "DDI translation not found for level %d. Using %d instead.",
+ level, n_entries - 2);
level = n_entries - 2;
}
@@ -2483,7 +2573,8 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
- icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
+ icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level,
+ type);
}
static void
@@ -2550,8 +2641,9 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
-static u32 translate_signal_level(int signal_levels)
+static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i;
for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
@@ -2559,8 +2651,9 @@ static u32 translate_signal_level(int signal_levels)
return i;
}
- WARN(1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
- signal_levels);
+ drm_WARN(&i915->drm, 1,
+ "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+ signal_levels);
return 0;
}
@@ -2571,46 +2664,73 @@ static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
- return translate_signal_level(signal_levels);
+ return translate_signal_level(intel_dp, signal_levels);
}
-u32 bxt_signal_levels(struct intel_dp *intel_dp)
+static void
+tgl_set_signal_levels(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- struct intel_encoder *encoder = &dport->base;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int level = intel_ddi_dp_level(intel_dp);
- if (INTEL_GEN(dev_priv) >= 12)
- tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
- level, encoder->type);
- else if (INTEL_GEN(dev_priv) >= 11)
- icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
- level, encoder->type);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_vswing_sequence(encoder, level, encoder->type);
- else
- bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+ tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+ level, encoder->type);
+}
- return 0;
+static void
+icl_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int level = intel_ddi_dp_level(intel_dp);
+
+ icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+ level, encoder->type);
}
-u32 ddi_signal_levels(struct intel_dp *intel_dp)
+static void
+cnl_set_signal_levels(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- struct intel_encoder *encoder = &dport->base;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int level = intel_ddi_dp_level(intel_dp);
+ cnl_ddi_vswing_sequence(encoder, level, encoder->type);
+}
+
+static void
+bxt_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int level = intel_ddi_dp_level(intel_dp);
+
+ bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+}
+
+static void
+hsw_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int level = intel_ddi_dp_level(intel_dp);
+ enum port port = encoder->port;
+ u32 signal_levels;
+
+ signal_levels = DDI_BUF_TRANS_SELECT(level);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~DDI_BUF_EMP_MASK;
+ intel_dp->DP |= signal_levels;
+
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, encoder->type);
- return DDI_BUF_TRANS_SELECT(level);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
}
-static inline
-u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
- enum phy phy)
+static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
+ enum phy phy)
{
if (intel_phy_is_combo(dev_priv, phy)) {
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
@@ -2698,8 +2818,9 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
continue;
- DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- phy_name(phy));
+ drm_notice(&dev_priv->drm,
+ "PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ phy_name(phy));
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
}
@@ -2936,11 +3057,14 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
if (!crtc_state->fec_enable)
return;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
- DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to set FEC_READY in the sink\n");
}
static void intel_ddi_enable_fec(struct intel_encoder *encoder,
@@ -2960,7 +3084,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
- DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for FEC Enable Status\n");
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -2980,7 +3105,8 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
}
-static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3048,13 +3174,13 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
* 7.a Configure Transcoder Clock Select to direct the Port clock to the
* Transcoder.
*/
- intel_ddi_enable_pipe_clock(crtc_state);
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
/*
* 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
* Transport Select
*/
- intel_ddi_config_transcoder_func(crtc_state);
+ intel_ddi_config_transcoder_func(encoder, crtc_state);
/*
* 7.c Configure & enable DP_TP_CTL with link training pattern 1
@@ -3120,7 +3246,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dsc_enable(encoder, crtc_state);
}
-static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3185,21 +3312,22 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_enable_fec(encoder, crtc_state);
if (!is_mst)
- intel_ddi_enable_pipe_clock(crtc_state);
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
intel_dsc_enable(encoder, crtc_state);
}
-static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (INTEL_GEN(dev_priv) >= 12)
- tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
- hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
/* MST will call a setting of MSA after an allocating of Virtual Channel
* from MST encoder pre_enable callback.
@@ -3211,7 +3339,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
}
}
-static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3244,14 +3373,15 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
- intel_ddi_enable_pipe_clock(crtc_state);
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
intel_dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
}
-static void intel_ddi_pre_enable(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3280,12 +3410,14 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
+ intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
+ conn_state);
} else {
struct intel_lspcon *lspcon =
enc_to_intel_lspcon(encoder);
- intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ intel_ddi_pre_enable_dp(state, encoder, crtc_state,
+ conn_state);
if (lspcon->active) {
struct intel_digital_port *dig_port =
enc_to_dig_port(encoder);
@@ -3328,7 +3460,8 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
intel_wait_ddi_buf_idle(dev_priv, port);
}
-static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
+static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3339,6 +3472,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
INTEL_OUTPUT_DP_MST);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ intel_dp_set_infoframes(encoder, false, old_crtc_state, old_conn_state);
+
/*
* Power down sink before disabling the port, otherwise we end
* up getting interrupts from the sink on detecting link loss.
@@ -3384,7 +3519,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_ddi_clk_disable(encoder);
}
-static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
+static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3407,22 +3543,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
- return;
-
- DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
- transcoder_name(old_crtc_state->cpu_transcoder));
-
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
-}
-
-static void intel_ddi_post_disable(struct intel_encoder *encoder,
+static void intel_ddi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3436,9 +3558,6 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
intel_disable_pipe(old_crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_disable_transcoder_port_sync(old_crtc_state);
-
intel_ddi_disable_transcoder_func(old_crtc_state);
intel_dsc_disable(old_crtc_state);
@@ -3463,11 +3582,11 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
*/
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- intel_ddi_post_disable_hdmi(encoder,
- old_crtc_state, old_conn_state);
+ intel_ddi_post_disable_hdmi(state, encoder, old_crtc_state,
+ old_conn_state);
else
- intel_ddi_post_disable_dp(encoder,
- old_crtc_state, old_conn_state);
+ intel_ddi_post_disable_dp(state, encoder, old_crtc_state,
+ old_conn_state);
if (INTEL_GEN(dev_priv) >= 11)
icl_unmap_plls_to_ports(encoder);
@@ -3480,7 +3599,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
intel_tc_port_put_link(dig_port);
}
-void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
+void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3514,7 +3634,43 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
}
-static void intel_enable_ddi_dp(struct intel_encoder *encoder,
+static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ if (!crtc_state->sync_mode_slaves_mask)
+ return;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *slave_encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ struct intel_crtc *slave_crtc = to_intel_crtc(conn_state->crtc);
+ const struct intel_crtc_state *slave_crtc_state;
+
+ if (!slave_crtc)
+ continue;
+
+ slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+
+ if (slave_crtc_state->master_transcoder !=
+ crtc_state->cpu_transcoder)
+ continue;
+
+ intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder));
+ }
+
+ usleep_range(200, 400);
+
+ intel_dp_stop_link_train(enc_to_intel_dp(encoder));
+}
+
+static void intel_enable_ddi_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3526,13 +3682,14 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(crtc_state, conn_state);
- intel_psr_enable(intel_dp, crtc_state);
- intel_dp_vsc_enable(intel_dp, crtc_state, conn_state);
- intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state);
+ intel_psr_enable(intel_dp, crtc_state, conn_state);
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
intel_audio_codec_enable(encoder, crtc_state, conn_state);
+
+ trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
static i915_reg_t
@@ -3555,7 +3712,8 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
return CHICKEN_TRANS(trans[port]);
}
-static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
+static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3567,9 +3725,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
- "scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
@@ -3617,20 +3775,23 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
-static void intel_enable_ddi(struct intel_encoder *encoder,
+static void intel_enable_ddi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+
+ intel_ddi_enable_transcoder_func(encoder, crtc_state);
intel_enable_pipe(crtc_state);
intel_crtc_vblank_on(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
+ intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
else
- intel_enable_ddi_dp(encoder, crtc_state, conn_state);
+ intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
/* Enable hdcp if it's desired */
if (conn_state->content_protection ==
@@ -3640,7 +3801,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
(u8)conn_state->hdcp_content_type);
}
-static void intel_disable_ddi_dp(struct intel_encoder *encoder,
+static void intel_disable_ddi_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3660,10 +3822,12 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
false);
}
-static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
+static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = old_conn_state->connector;
if (old_crtc_state->has_audio)
@@ -3672,23 +3836,28 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
}
-static void intel_disable_ddi(struct intel_encoder *encoder,
+static void intel_disable_ddi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
+ intel_disable_ddi_hdmi(state, encoder, old_crtc_state,
+ old_conn_state);
else
- intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
+ intel_disable_ddi_dp(state, encoder, old_crtc_state,
+ old_conn_state);
}
-static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
+static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3696,21 +3865,24 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
intel_ddi_set_dp_msa(crtc_state, conn_state);
- intel_psr_update(intel_dp, crtc_state);
+ intel_psr_update(intel_dp, crtc_state, conn_state);
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
- intel_panel_update_backlight(encoder, crtc_state, conn_state);
+ intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
}
-static void intel_ddi_update_pipe(struct intel_encoder *encoder,
+static void intel_ddi_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+ intel_ddi_update_pipe_dp(state, encoder, crtc_state,
+ conn_state);
- intel_hdcp_update_pipe(encoder, crtc_state, conn_state);
+ intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state);
}
static void
@@ -3722,7 +3894,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
int required_lanes = crtc_state ? crtc_state->lane_count : 1;
- WARN_ON(crtc && crtc->active);
+ drm_WARN_ON(state->base.dev, crtc && crtc->active);
intel_tc_port_get_link(enc_to_dig_port(encoder),
required_lanes);
@@ -3739,7 +3911,8 @@ intel_ddi_update_complete(struct intel_atomic_state *state,
}
static void
-intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
+intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3813,6 +3986,74 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600);
}
+static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
+ u8 dp_train_pat)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
+
+ if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+ temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+ else
+ temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (dp_train_pat & train_pat_mask) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ case DP_TRAINING_PATTERN_4:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
+ break;
+ }
+
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
+
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+}
+
+static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
+ val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
+
+ /*
+ * Until TGL on PORT_A we can have only eDP in SST mode. There the only
+ * reason we need to set idle transmission mode is to work around a HW
+ * issue where we enable the pipe while not in idle link-training mode.
+ * In this case there is requirement to wait for a minimum number of
+ * idle patterns to be sent.
+ */
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
+ return;
+
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_IDLE_DONE, 1))
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DP idle patterns\n");
+}
+
static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
@@ -3839,6 +4080,66 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
crtc_state->min_voltage_level = 2;
}
+static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 master_select;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ u32 ctl2 = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+
+ if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
+ } else {
+ u32 ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = REG_FIELD_GET(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, ctl);
+ }
+
+ if (master_select == 0)
+ return TRANSCODER_EDP;
+ else
+ return master_select - 1;
+}
+
+static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
+ enum transcoder cpu_transcoder;
+
+ crtc_state->master_transcoder =
+ bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t trans_wakeref;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+
+ if (!trans_wakeref)
+ continue;
+
+ if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ crtc_state->cpu_transcoder)
+ crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
+
+ intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ }
+
+ drm_WARN_ON(&dev_priv->drm,
+ crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -3930,11 +4231,15 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->fec_enable =
intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
- encoder->base.base.id, encoder->base.name,
- pipe_config->fec_enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] Fec status: %u\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_config->fec_enable);
}
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -3946,6 +4251,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
intel_dp_get_m_n(intel_crtc, pipe_config);
+
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
break;
default:
break;
@@ -3969,8 +4277,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
- DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
@@ -3996,6 +4305,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ bdw_get_trans_port_sync_config(pipe_config);
+
+ intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
+ intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
}
static enum intel_output_type
@@ -4025,7 +4340,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
enum port port = encoder->port;
int ret;
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4097,7 +4412,11 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
u8 transcoders = 0;
int i;
- if (INTEL_GEN(dev_priv) < 11)
+ /*
+ * We don't enable port sync on BDW due to missing w/as and
+ * due to not having adjusted the modeset sequence appropriately.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4129,12 +4448,13 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]",
- encoder->base.base.id, encoder->base.name,
- crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]",
+ encoder->base.base.id, encoder->base.name,
+ crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
if (connector->has_tile)
port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state,
@@ -4187,6 +4507,20 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_dig_port->dp.prepare_link_retrain =
intel_ddi_prepare_link_retrain;
+ intel_dig_port->dp.set_link_train = intel_ddi_set_link_train;
+ intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ intel_dig_port->dp.set_signal_levels = icl_set_signal_levels;
+ else if (IS_CANNONLAKE(dev_priv))
+ intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels;
+ else if (IS_GEN9_LP(dev_priv))
+ intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels;
+ else
+ intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
+
if (INTEL_GEN(dev_priv) < 12) {
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
@@ -4278,7 +4612,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+ drm_err(&dev_priv->drm, "Failed to read TMDS config: %d\n",
+ ret);
return 0;
}
@@ -4302,15 +4637,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
static enum intel_hotplug_state
intel_ddi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool is_tc = intel_phy_is_tc(i915, phy);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
- state = intel_encoder_hotplug(encoder, connector, irq_received);
+ state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -4348,14 +4685,45 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
* valid EDID. To solve this schedule another detection cycle if this
* time around we didn't detect any change in the sink's connection
* status.
+ *
+ * Type-c connectors which get their HPD signal deasserted then
+ * reasserted, without unplugging/replugging the sink from the
+ * connector, introduce a delay until the AUX channel communication
+ * becomes functional. Retry the detection for 5 seconds on type-c
+ * connectors to account for this delay.
*/
- if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
+ if (state == INTEL_HOTPLUG_UNCHANGED &&
+ connector->hotplug_retries < (is_tc ? 5 : 1) &&
!dig_port->dp.is_mst)
state = INTEL_HOTPLUG_RETRY;
return state;
}
+static bool lpt_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, SDEISR) & bit;
+}
+
+static bool hsw_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, DEISR) & bit;
+}
+
+static bool bdw_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+}
+
static struct intel_connector *
intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
{
@@ -4424,7 +4792,8 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
* so we use the proper lane count for our calculations.
*/
if (intel_ddi_a_force_4_lanes(intel_dport)) {
- DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Forcing DDI_A_4_LANES for port A\n");
intel_dport->saved_port_bits |= DDI_A_4_LANES;
max_lanes = 4;
}
@@ -4452,12 +4821,14 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
init_dp = true;
init_lspcon = true;
init_hdmi = false;
- DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ port_name(port));
}
if (!init_dp && !init_hdmi) {
- DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
+ port_name(port));
return;
}
@@ -4536,17 +4907,36 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
if (init_lspcon) {
if (lspcon_init(intel_dig_port))
/* TODO: handle hdmi info frame part */
- DRM_DEBUG_KMS("LSPCON init success on port %c\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "LSPCON init success on port %c\n",
+ port_name(port));
else
/*
* LSPCON init faied, but DP init was success, so
* lets try to drive as DP++ port.
*/
- DRM_ERROR("LSPCON init failed on port %c\n",
+ drm_err(&dev_priv->drm,
+ "LSPCON init failed on port %c\n",
port_name(port));
}
+ if (INTEL_GEN(dev_priv) >= 11) {
+ if (intel_phy_is_tc(dev_priv, phy))
+ intel_dig_port->connected = intel_tc_port_connected;
+ else
+ intel_dig_port->connected = lpt_digital_port_connected;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ if (port == PORT_A || IS_GEN9_LP(dev_priv))
+ intel_dig_port->connected = bdw_digital_port_connected;
+ else
+ intel_dig_port->connected = lpt_digital_port_connected;
+ } else {
+ if (port == PORT_A)
+ intel_dig_port->connected = hsw_digital_port_connected;
+ else
+ intel_dig_port->connected = lpt_digital_port_connected;
+ }
+
intel_infoframe_init(intel_dig_port);
return;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 55fd72b901fe..fbdf8ddde486 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -17,16 +17,19 @@ struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
-void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 346846609f45..9ea1a397d1b5 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -238,9 +238,9 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
-static inline u32 /* units of 100MHz */
-intel_fdi_link_freq(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
+/* units of 100MHz */
+static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
{
if (HAS_DDI(dev_priv))
return pipe_config->port_clock; /* SPLL */
@@ -525,7 +525,7 @@ skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
}
-/* Wa_2006604312:icl */
+/* Wa_2006604312:icl,ehl */
static void
icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
@@ -544,17 +544,23 @@ needs_modeset(const struct intel_crtc_state *state)
return drm_atomic_crtc_needs_modeset(&state->uapi);
}
-bool
-is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+static bool
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
- return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
- crtc_state->sync_mode_slaves_mask);
+ return crtc_state->master_transcoder != INVALID_TRANSCODER;
}
static bool
-is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
+is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->master_transcoder != INVALID_TRANSCODER;
+ return crtc_state->sync_mode_slaves_mask != 0;
+}
+
+bool
+is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+{
+ return is_trans_port_sync_master(crtc_state) ||
+ is_trans_port_sync_slave(crtc_state);
}
/*
@@ -620,45 +626,43 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot / 5;
}
-#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
-
/*
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
{
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid("n out of range\n");
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid("p1 out of range\n");
- if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- INTELPllInvalid("m2 out of range\n");
- if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- INTELPllInvalid("m1 out of range\n");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ return false;
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ return false;
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ return false;
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ return false;
if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
if (clock->m1 <= clock->m2)
- INTELPllInvalid("m1 <= m2\n");
+ return false;
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
!IS_GEN9_LP(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
+ return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid("m out of range\n");
+ return false;
}
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid("vco out of range\n");
+ return false;
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
* connector, etc., rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid("dot out of range\n");
+ return false;
return true;
}
@@ -725,7 +729,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -781,7 +785,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -842,7 +846,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -939,7 +943,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -1008,7 +1012,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
+ if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
continue;
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
@@ -1969,16 +1973,16 @@ static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
{
- WARN_ON(!is_ccs_modifier(fb->modifier) ||
- (main_plane && main_plane >= fb->format->num_planes / 2));
+ drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ (main_plane && main_plane >= fb->format->num_planes / 2));
return fb->format->num_planes / 2 + main_plane;
}
static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
{
- WARN_ON(!is_ccs_modifier(fb->modifier) ||
- ccs_plane < fb->format->num_planes / 2);
+ drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ ccs_plane < fb->format->num_planes / 2);
return ccs_plane - fb->format->num_planes / 2;
}
@@ -2910,6 +2914,7 @@ intel_fb_plane_get_subsampling(int *hsub, int *vsub,
static int
intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
int main_plane;
int hsub, vsub;
@@ -2938,7 +2943,8 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
* x/y offsets must match between CCS and the main surface.
*/
if (main_x != ccs_x || main_y != ccs_y) {
- DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ drm_dbg_kms(&i915->drm,
+ "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
main_x, main_y,
ccs_x, ccs_y,
intel_fb->normal[main_plane].x,
@@ -2986,7 +2992,7 @@ setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 0;
- if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
+ if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
return 0;
rot_info->plane[plane] = *plane_info;
@@ -3336,6 +3342,8 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_RGB565;
case PLANE_CTL_FORMAT_NV12:
return DRM_FORMAT_NV12;
+ case PLANE_CTL_FORMAT_XYUV:
+ return DRM_FORMAT_XYUV8888;
case PLANE_CTL_FORMAT_P010:
return DRM_FORMAT_P010;
case PLANE_CTL_FORMAT_P012:
@@ -4580,6 +4588,8 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
return PLANE_CTL_FORMAT_XRGB_16161616F;
+ case DRM_FORMAT_XYUV8888:
+ return PLANE_CTL_FORMAT_XYUV;
case DRM_FORMAT_YUYV:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
case DRM_FORMAT_YVYU:
@@ -4998,37 +5008,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
-static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 trans_ddi_func_ctl2_val;
- u8 master_select;
-
- /*
- * Configure the master select and enable Transcoder Port Sync for
- * Slave CRTCs transcoder.
- */
- if (crtc_state->master_transcoder == INVALID_TRANSCODER)
- return;
-
- if (crtc_state->master_transcoder == TRANSCODER_EDP)
- master_select = 0;
- else
- master_select = crtc_state->master_transcoder + 1;
-
- /* Set the master select bits for Tranascoder Port Sync */
- trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
- PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
- PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
- /* Enable Transcoder Port Sync */
- trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
-
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
- trans_ddi_func_ctl2_val);
-}
-
static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -6110,30 +6089,26 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
-/**
- * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
- *
- * @state: crtc's scaler state
- *
- * Return
- * 0 - scaler_usage updated successfully
- * error - requested scaling cannot be supported or other error condition
- */
-int skl_update_scaler_crtc(struct intel_crtc_state *state)
+static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
{
- const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
- bool need_scaler = false;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int width, height;
- if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- state->pch_pfit.enabled)
- need_scaler = true;
+ if (crtc_state->pch_pfit.enabled) {
+ width = drm_rect_width(&crtc_state->pch_pfit.dst);
+ height = drm_rect_height(&crtc_state->pch_pfit.dst);
+ } else {
+ width = adjusted_mode->crtc_hdisplay;
+ height = adjusted_mode->crtc_vdisplay;
+ }
- return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id,
- state->pipe_src_w, state->pipe_src_h,
- adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_vdisplay, NULL, 0,
- need_scaler);
+ return skl_update_scaler(crtc_state, !crtc_state->hw.active,
+ SKL_CRTC_INDEX,
+ &crtc_state->scaler_state.scaler_id,
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ width, height, NULL, 0,
+ crtc_state->pch_pfit.enabled);
}
/**
@@ -6200,6 +6175,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
@@ -6241,70 +6217,80 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ struct drm_rect src = {
+ .x2 = crtc_state->pipe_src_w << 16,
+ .y2 = crtc_state->pipe_src_h << 16,
+ };
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ u16 uv_rgb_hphase, uv_rgb_vphase;
+ enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
+ int hscale, vscale;
+ unsigned long irqflags;
+ int id;
- if (crtc_state->pch_pfit.enabled) {
- u16 uv_rgb_hphase, uv_rgb_vphase;
- int pfit_w, pfit_h, hscale, vscale;
- unsigned long irqflags;
- int id;
-
- if (drm_WARN_ON(&dev_priv->drm,
- crtc_state->scaler_state.scaler_id < 0))
- return;
+ if (!crtc_state->pch_pfit.enabled)
+ return;
- pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
- pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
+ if (drm_WARN_ON(&dev_priv->drm,
+ crtc_state->scaler_state.scaler_id < 0))
+ return;
- hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
- vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
+ hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
- uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
- id = scaler_state->scaler_id;
+ id = scaler_state->scaler_id;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
- PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
- crtc_state->pch_pfit.pos);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
- crtc_state->pch_pfit.size);
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+ PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ x << 16 | y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ width << 16 | height);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
- }
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
- if (crtc_state->pch_pfit.enabled) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- intel_de_write(dev_priv, PF_CTL(pipe),
- PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
- else
- intel_de_write(dev_priv, PF_CTL(pipe),
- PF_ENABLE | PF_FILTER_MED_3x3);
- intel_de_write(dev_priv, PF_WIN_POS(pipe),
- crtc_state->pch_pfit.pos);
- intel_de_write(dev_priv, PF_WIN_SZ(pipe),
- crtc_state->pch_pfit.size);
- }
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
+ intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
+ else
+ intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
}
void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
@@ -6463,8 +6449,8 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- /* Wa_2006604312:icl */
- if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
+ /* Wa_2006604312:icl,ehl */
+ if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
return true;
return false;
@@ -6534,7 +6520,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
needs_nv12_wa(new_crtc_state))
skl_wa_827(dev_priv, pipe, true);
- /* Wa_2006604312:icl */
+ /* Wa_2006604312:icl,ehl */
if (!needs_scalerclk_wa(old_crtc_state) &&
needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, true);
@@ -6646,7 +6632,7 @@ intel_connector_primary_encoder(struct intel_connector *connector)
return &dp_to_dig_port(connector->mst_port)->base;
encoder = intel_attached_encoder(connector);
- WARN_ON(!encoder);
+ drm_WARN_ON(connector->base.dev, !encoder);
return encoder;
}
@@ -6720,7 +6706,8 @@ static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
continue;
if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+ encoder->pre_pll_enable(state, encoder,
+ crtc_state, conn_state);
}
}
@@ -6741,7 +6728,8 @@ static void intel_encoders_pre_enable(struct intel_atomic_state *state,
continue;
if (encoder->pre_enable)
- encoder->pre_enable(encoder, crtc_state, conn_state);
+ encoder->pre_enable(state, encoder,
+ crtc_state, conn_state);
}
}
@@ -6762,7 +6750,8 @@ static void intel_encoders_enable(struct intel_atomic_state *state,
continue;
if (encoder->enable)
- encoder->enable(encoder, crtc_state, conn_state);
+ encoder->enable(state, encoder,
+ crtc_state, conn_state);
intel_opregion_notify_encoder(encoder, true);
}
}
@@ -6785,7 +6774,8 @@ static void intel_encoders_disable(struct intel_atomic_state *state,
intel_opregion_notify_encoder(encoder, false);
if (encoder->disable)
- encoder->disable(encoder, old_crtc_state, old_conn_state);
+ encoder->disable(state, encoder,
+ old_crtc_state, old_conn_state);
}
}
@@ -6806,7 +6796,8 @@ static void intel_encoders_post_disable(struct intel_atomic_state *state,
continue;
if (encoder->post_disable)
- encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+ encoder->post_disable(state, encoder,
+ old_crtc_state, old_conn_state);
}
}
@@ -6827,7 +6818,8 @@ static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
continue;
if (encoder->post_pll_disable)
- encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+ encoder->post_pll_disable(state, encoder,
+ old_crtc_state, old_conn_state);
}
}
@@ -6848,7 +6840,8 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state,
continue;
if (encoder->update_pipe)
- encoder->update_pipe(encoder, crtc_state, conn_state);
+ encoder->update_pipe(state, encoder,
+ crtc_state, conn_state);
}
}
@@ -7037,9 +7030,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(new_crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_enable_trans_port_sync(new_crtc_state);
-
intel_set_pipe_src_size(new_crtc_state);
if (cpu_transcoder != TRANSCODER_EDP &&
@@ -7087,9 +7077,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_transcoder_func(new_crtc_state);
-
if (dev_priv->display.initial_watermarks)
dev_priv->display.initial_watermarks(state, crtc);
@@ -7120,11 +7107,12 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (old_crtc_state->pch_pfit.enabled) {
- intel_de_write(dev_priv, PF_CTL(pipe), 0);
- intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
- intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
- }
+ if (!old_crtc_state->pch_pfit.enabled)
+ return;
+
+ intel_de_write(dev_priv, PF_CTL(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
}
static void ilk_crtc_disable(struct intel_atomic_state *state,
@@ -7312,7 +7300,17 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
}
}
- switch (dig_port->aux_ch) {
+ return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
+}
+
+/*
+ * Converts aux_ch to power_domain without caring about TBT ports for that use
+ * intel_aux_power_domain()
+ */
+enum intel_display_power_domain
+intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
+{
+ switch (aux_ch) {
case AUX_CH_A:
return POWER_DOMAIN_AUX_A;
case AUX_CH_B:
@@ -7328,7 +7326,7 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
case AUX_CH_G:
return POWER_DOMAIN_AUX_G;
default:
- MISSING_CASE(dig_port->aux_ch);
+ MISSING_CASE(aux_ch);
return POWER_DOMAIN_AUX_A;
}
}
@@ -7942,39 +7940,36 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
-static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
+static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
{
- u32 pixel_rate;
-
- pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
+ u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock;
+ unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
/*
* We only use IF-ID interlacing. If we ever use
* PF-ID we'll need to adjust the pixel_rate here.
*/
- if (pipe_config->pch_pfit.enabled) {
- u64 pipe_w, pipe_h, pfit_w, pfit_h;
- u32 pfit_size = pipe_config->pch_pfit.size;
+ if (!crtc_state->pch_pfit.enabled)
+ return pixel_rate;
- pipe_w = pipe_config->pipe_src_w;
- pipe_h = pipe_config->pipe_src_h;
+ pipe_w = crtc_state->pipe_src_w;
+ pipe_h = crtc_state->pipe_src_h;
- pfit_w = (pfit_size >> 16) & 0xFFFF;
- pfit_h = pfit_size & 0xFFFF;
- if (pipe_w < pfit_w)
- pipe_w = pfit_w;
- if (pipe_h < pfit_h)
- pipe_h = pfit_h;
+ pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
+ pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
- if (WARN_ON(!pfit_w || !pfit_h))
- return pixel_rate;
+ if (pipe_w < pfit_w)
+ pipe_w = pfit_w;
+ if (pipe_h < pfit_h)
+ pipe_h = pfit_h;
- pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
- pfit_w * pfit_h);
- }
+ if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
+ !pfit_w || !pfit_h))
+ return pixel_rate;
- return pixel_rate;
+ return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
+ pfit_w * pfit_h);
}
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
@@ -8143,7 +8138,7 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
}
}
-static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_modparams.panel_use_ssc >= 0)
return i915_modparams.panel_use_ssc != 0;
@@ -8891,7 +8886,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
- mode->hsync = drm_mode_hsync(mode);
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
}
@@ -9168,9 +9162,9 @@ static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
-static void i9xx_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
@@ -9190,9 +9184,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
return;
}
- pipe_config->gmch_pfit.control = tmp;
- pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
- PFIT_PGM_RATIOS);
+ crtc_state->gmch_pfit.control = tmp;
+ crtc_state->gmch_pfit.pgm_ratios =
+ intel_de_read(dev_priv, PFIT_PGM_RATIOS);
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9398,7 +9392,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
@@ -9443,7 +9436,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- i9xx_get_pfit_config(crtc, pipe_config);
+ i9xx_get_pfit_config(pipe_config);
if (INTEL_GEN(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
@@ -10413,37 +10406,47 @@ static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
&pipe_config->fdi_m_n, NULL);
}
-static void skl_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
+ u32 pos, u32 size)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
- u32 ps_ctrl = 0;
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ pos >> 16, pos & 0xffff,
+ size >> 16, size & 0xffff);
+}
+
+static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
int id = -1;
int i;
/* find scaler attached to this pipe */
for (i = 0; i < crtc->num_scalers; i++) {
- ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
- if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
- id = i;
- pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
- SKL_PS_WIN_POS(crtc->pipe, i));
- pipe_config->pch_pfit.size = intel_de_read(dev_priv,
- SKL_PS_WIN_SZ(crtc->pipe, i));
- scaler_state->scalers[i].in_use = true;
- break;
- }
+ u32 ctl, pos, size;
+
+ ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
+ if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
+ continue;
+
+ id = i;
+ crtc_state->pch_pfit.enabled = true;
+
+ pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
+ size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
+
+ ilk_get_pfit_pos_size(crtc_state, pos, size);
+
+ scaler_state->scalers[i].in_use = true;
+ break;
}
scaler_state->scaler_id = id;
- if (id >= 0) {
+ if (id >= 0)
scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
- } else {
+ else
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
- }
}
static void
@@ -10579,30 +10582,30 @@ error:
kfree(intel_fb);
}
-static void ilk_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 tmp;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 ctl, pos, size;
- tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
-
- if (tmp & PF_ENABLE) {
- pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
- PF_WIN_POS(crtc->pipe));
- pipe_config->pch_pfit.size = intel_de_read(dev_priv,
- PF_WIN_SZ(crtc->pipe));
-
- /* We currently do not free assignements of panel fitters on
- * ivb/hsw (since we don't use the higher upscaling modes which
- * differentiates them) so just WARN about this case for now. */
- if (IS_GEN(dev_priv, 7)) {
- drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
- PF_PIPE_SEL_IVB(crtc->pipe));
- }
- }
+ ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
+ if ((ctl & PF_ENABLE) == 0)
+ return;
+
+ crtc_state->pch_pfit.enabled = true;
+
+ pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
+ size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
+
+ ilk_get_pfit_pos_size(crtc_state, pos, size);
+
+ /*
+ * We currently do not free assignements of panel fitters on
+ * ivb/hsw (since we don't use the higher upscaling modes which
+ * differentiates them) so just WARN about this case for now.
+ */
+ drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
+ (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
}
static bool ilk_get_pipe_config(struct intel_crtc *crtc,
@@ -10622,7 +10625,6 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
@@ -10714,7 +10716,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- ilk_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(pipe_config);
ret = true;
@@ -10891,7 +10893,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
- if (HAS_TRANSCODER_EDP(dev_priv))
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
panel_transcoder_mask |= BIT(TRANSCODER_EDP);
/*
@@ -11085,61 +11087,6 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
}
}
-static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- u32 trans_port_sync, master_select;
-
- trans_port_sync = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(cpu_transcoder));
-
- if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
- return INVALID_TRANSCODER;
-
- master_select = trans_port_sync &
- PORT_SYNC_MODE_MASTER_SELECT_MASK;
- if (master_select == 0)
- return TRANSCODER_EDP;
- else
- return master_select - 1;
-}
-
-static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 transcoders;
- enum transcoder cpu_transcoder;
-
- crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
- crtc_state->cpu_transcoder);
-
- transcoders = BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C) |
- BIT(TRANSCODER_D);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- enum intel_display_power_domain power_domain;
- intel_wakeref_t trans_wakeref;
-
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
-
- if (!trans_wakeref)
- continue;
-
- if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
- crtc_state->cpu_transcoder)
- crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
-
- intel_display_power_put(dev_priv, power_domain, trans_wakeref);
- }
-
- drm_WARN_ON(&dev_priv->drm,
- crtc_state->master_transcoder != INVALID_TRANSCODER &&
- crtc_state->sync_mode_slaves_mask);
-}
-
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -11243,9 +11190,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
- skl_get_pfit_config(crtc, pipe_config);
+ skl_get_pfit_config(pipe_config);
else
- ilk_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(pipe_config);
}
if (hsw_crtc_supports_ips(crtc)) {
@@ -11271,10 +11218,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- if (INTEL_GEN(dev_priv) >= 11 &&
- !transcoder_is_dsi(pipe_config->cpu_transcoder))
- icl_get_trans_port_sync_config(pipe_config);
-
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv,
@@ -12377,10 +12320,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- plane_state->uapi.visible = visible = false;
- crtc_state->active_planes &= ~BIT(plane->id);
- crtc_state->data_rate[plane->id] = 0;
- crtc_state->min_cdclk[plane->id] = 0;
+ intel_plane_set_invisible(crtc_state, plane_state);
+ visible = false;
}
if (!was_visible && !visible)
@@ -12510,8 +12451,10 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
if (IS_ERR(linked_plane_state))
return PTR_ERR(linked_plane_state);
- WARN_ON(linked_plane_state->planar_linked_plane != plane);
- WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
+ drm_WARN_ON(state->base.dev,
+ linked_plane_state->planar_linked_plane != plane);
+ drm_WARN_ON(state->base.dev,
+ linked_plane_state->planar_slave == plane_state->planar_slave);
}
return 0;
@@ -12886,19 +12829,20 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
return 0;
}
-static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+ const struct drm_display_mode *mode)
{
- DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->type, mode->flags);
+ drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->type, mode->flags);
}
-static inline void
+static void
intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
@@ -12922,6 +12866,16 @@ intel_dump_infoframe(struct drm_i915_private *dev_priv,
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
}
+static void
+intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
+ const struct drm_dp_vsc_sdp *vsc)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
+}
+
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -13042,6 +12996,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
+ drm_dbg_kms(&dev_priv->drm,
+ "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+ transcoder_name(pipe_config->master_transcoder),
+ pipe_config->sync_mode_slaves_mask);
+
if (pipe_config->has_pch_encoder)
intel_dump_m_n_config(pipe_config, "fdi",
pipe_config->fdi_lanes,
@@ -13074,12 +13033,21 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(DP_SDP_VSC))
+ intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
- intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
+ intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
drm_dbg_kms(&dev_priv->drm,
"port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
pipe_config->port_clock,
@@ -13104,9 +13072,8 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
pipe_config->gmch_pfit.lvds_border_bits);
else
drm_dbg_kms(&dev_priv->drm,
- "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
+ "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
+ DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
enableddisabled(pipe_config->pch_pfit.enabled),
yesno(pipe_config->pch_pfit.force_thru));
@@ -13228,7 +13195,8 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
{
crtc_state->uapi.enable = crtc_state->hw.enable;
crtc_state->uapi.active = crtc_state->hw.active;
- WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+ drm_WARN_ON(crtc_state->uapi.crtc->dev,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
@@ -13521,6 +13489,13 @@ intel_compare_infoframe(const union hdmi_infoframe *a,
return memcmp(a, b, sizeof(*a)) == 0;
}
+static bool
+intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
+ const struct drm_dp_vsc_sdp *b)
+{
+ return memcmp(a, b, sizeof(*a)) == 0;
+}
+
static void
pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
bool fastset, const char *name,
@@ -13546,6 +13521,31 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
}
}
+static void
+pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
+ bool fastset, const char *name,
+ const struct drm_dp_vsc_sdp *a,
+ const struct drm_dp_vsc_sdp *b)
+{
+ if (fastset) {
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s dp sdp\n", name);
+ drm_dbg_kms(&dev_priv->drm, "expected:\n");
+ drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
+ drm_dbg_kms(&dev_priv->drm, "found:\n");
+ drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ } else {
+ drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
+ drm_err(&dev_priv->drm, "expected:\n");
+ drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
+ drm_err(&dev_priv->drm, "found:\n");
+ drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
+ }
+}
+
static void __printf(4, 5)
pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
const char *name, const char *format, ...)
@@ -13747,6 +13747,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
+ if (!current_config->has_psr && !pipe_config->has_psr && \
+ !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
if (current_config->name1 != pipe_config->name1) { \
pipe_config_mismatch(fastset, crtc, __stringify(name1), \
@@ -13847,8 +13858,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
- PIPE_CONF_CHECK_X(pch_pfit.pos);
- PIPE_CONF_CHECK_X(pch_pfit.size);
+ PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
+ PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
+ PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
+ PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
}
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
@@ -13922,6 +13935,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(spd);
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
@@ -14010,7 +14024,9 @@ static void verify_wm_state(struct intel_crtc *crtc,
/* Watermarks */
for (level = 0; level <= max_level; level++) {
if (skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->wm[level]))
+ &sw_plane_wm->wm[level]) ||
+ (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->sagv_wm0)))
continue;
drm_err(&dev_priv->drm,
@@ -14065,7 +14081,9 @@ static void verify_wm_state(struct intel_crtc *crtc,
/* Watermarks */
for (level = 0; level <= max_level; level++) {
if (skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->wm[level]))
+ &sw_plane_wm->wm[level]) ||
+ (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->sagv_wm0)))
continue;
drm_err(&dev_priv->drm,
@@ -14999,11 +15017,13 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
}
static void commit_pipe_config(struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = needs_modeset(new_crtc_state);
/*
@@ -15029,22 +15049,35 @@ static void commit_pipe_config(struct intel_atomic_state *state,
dev_priv->display.atomic_update_watermarks(state, crtc);
}
-static void intel_update_crtc(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+static void intel_enable_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- bool modeset = needs_modeset(new_crtc_state);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
- if (modeset) {
- intel_crtc_update_active_timings(new_crtc_state);
+ if (!needs_modeset(new_crtc_state))
+ return;
- dev_priv->display.crtc_enable(state, crtc);
+ intel_crtc_update_active_timings(new_crtc_state);
- /* vblanks work again, re-enable pipe CRC. */
- intel_crtc_enable_pipe_crc(crtc);
- } else {
+ dev_priv->display.crtc_enable(state, crtc);
+
+ /* vblanks work again, re-enable pipe CRC. */
+ intel_crtc_enable_pipe_crc(crtc);
+}
+
+static void intel_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = needs_modeset(new_crtc_state);
+
+ if (!modeset) {
if (new_crtc_state->preload_luts &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@@ -15064,7 +15097,7 @@ static void intel_update_crtc(struct intel_crtc *crtc,
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
- commit_pipe_config(state, old_crtc_state, new_crtc_state);
+ commit_pipe_config(state, crtc);
if (INTEL_GEN(dev_priv) >= 9)
skl_update_planes_on_crtc(state, crtc);
@@ -15084,18 +15117,6 @@ static void intel_update_crtc(struct intel_crtc *crtc,
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
-static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
- enum transcoder slave_transcoder;
-
- drm_WARN_ON(&dev_priv->drm,
- !is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
-
- slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
- return intel_get_crtc_for_pipe(dev_priv,
- (enum pipe)slave_transcoder);
-}
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
@@ -15171,129 +15192,19 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
{
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
int i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (!new_crtc_state->hw.active)
continue;
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
+ intel_enable_crtc(state, crtc);
+ intel_update_crtc(state, crtc);
}
}
-static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
- intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(state, crtc);
- intel_crtc_enable_pipe_crc(crtc);
-}
-
-static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
- struct intel_atomic_state *state)
-{
- struct drm_connector *uninitialized_var(conn);
- struct drm_connector_state *conn_state;
- struct intel_dp *intel_dp;
- int i;
-
- for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
- if (conn_state->crtc == &crtc->base)
- break;
- }
- intel_dp = intel_attached_dp(to_intel_connector(conn));
- intel_dp_stop_link_train(intel_dp);
-}
-
-/*
- * TODO: This is only called from port sync and it is identical to what will be
- * executed again in intel_update_crtc() over port sync pipes
- */
-static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
- struct intel_atomic_state *state)
-{
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
-
- if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
- intel_fbc_disable(crtc);
- else
- intel_fbc_enable(state, crtc);
-
- /* Perform vblank evasion around commit operation */
- intel_pipe_update_start(new_crtc_state);
- commit_pipe_config(state, old_crtc_state, new_crtc_state);
- skl_update_planes_on_crtc(state, crtc);
- intel_pipe_update_end(new_crtc_state);
-
- /*
- * We usually enable FIFO underrun interrupts as part of the
- * CRTC enable sequence during modesets. But when we inherit a
- * valid pipe configuration from the BIOS we need to take care
- * of enabling them on the CRTC's first fastset.
- */
- if (new_crtc_state->update_pipe && !modeset &&
- old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
-}
-
-static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
- struct intel_crtc_state *new_slave_crtc_state =
- intel_atomic_get_new_crtc_state(state, slave_crtc);
- struct intel_crtc_state *old_slave_crtc_state =
- intel_atomic_get_old_crtc_state(state, slave_crtc);
-
- drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
- !old_slave_crtc_state);
-
- drm_dbg_kms(&i915->drm,
- "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
- crtc->base.base.id, crtc->base.name,
- slave_crtc->base.base.id, slave_crtc->base.name);
-
- /* Enable seq for slave with with DP_TP_CTL left Idle until the
- * master is ready
- */
- intel_crtc_enable_trans_port_sync(slave_crtc,
- state,
- new_slave_crtc_state);
-
- /* Enable seq for master with with DP_TP_CTL left Idle */
- intel_crtc_enable_trans_port_sync(crtc,
- state,
- new_crtc_state);
-
- /* Set Slave's DP_TP_CTL to Normal */
- intel_set_dp_tp_ctl_normal(slave_crtc,
- state);
-
- /* Set Master's DP_TP_CTL To Normal */
- usleep_range(200, 400);
- intel_set_dp_tp_ctl_normal(crtc,
- state);
-
- /* Now do the post crtc enable for all master and slaves */
- intel_post_crtc_enable_updates(slave_crtc,
- state);
- intel_post_crtc_enable_updates(crtc,
- state);
-}
-
static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -15365,8 +15276,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
entries[pipe] = new_crtc_state->wm.skl.ddb;
update_pipes &= ~BIT(pipe);
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
+ intel_update_crtc(state, crtc);
/*
* If this is an already active pipe, it's DDB changed,
@@ -15381,67 +15291,62 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
}
}
+ update_pipes = modeset_pipes;
+
/*
* Enable all pipes that needs a modeset and do not depends on other
* pipes
*/
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
- is_trans_port_sync_slave(new_crtc_state))
+ is_trans_port_sync_master(new_crtc_state))
continue;
- drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, I915_MAX_PIPES, pipe));
-
- entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
- if (is_trans_port_sync_mode(new_crtc_state)) {
- struct intel_crtc *slave_crtc;
+ intel_enable_crtc(state, crtc);
+ }
- intel_update_trans_port_sync_crtcs(crtc, state,
- old_crtc_state,
- new_crtc_state);
+ /*
+ * Then we enable all remaining pipes that depend on other
+ * pipes: MST slaves and port sync masters.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
- slave_crtc = intel_get_slave_crtc(new_crtc_state);
- /* TODO: update entries[] of slave */
- modeset_pipes &= ~BIT(slave_crtc->pipe);
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
- } else {
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
- }
+ modeset_pipes &= ~BIT(pipe);
+
+ intel_enable_crtc(state, crtc);
}
/*
- * Finally enable all pipes that needs a modeset and depends on
- * other pipes, right now it is only MST slaves as both port sync slave
- * and master are enabled together
+ * Finally we do the plane updates/etc. for all pipes that got enabled.
*/
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
- if ((modeset_pipes & BIT(pipe)) == 0)
+ if ((update_pipes & BIT(pipe)) == 0)
continue;
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
- modeset_pipes &= ~BIT(pipe);
+ update_pipes &= ~BIT(pipe);
- intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
+ intel_update_crtc(state, crtc);
}
drm_WARN_ON(&dev_priv->drm, modeset_pipes);
-
+ drm_WARN_ON(&dev_priv->drm, update_pipes);
}
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -15540,16 +15445,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_set_cdclk_pre_plane_update(state);
- /*
- * SKL workaround: bspec recommends we disable the SAGV when we
- * have more then one pipe enabled
- */
- if (!intel_can_enable_sagv(state))
- intel_disable_sagv(dev_priv);
-
intel_modeset_verify_disabled(dev_priv, state);
}
+ intel_sagv_pre_plane_update(state);
+
/* Complete the events for pipes that have now been disabled */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
bool modeset = needs_modeset(new_crtc_state);
@@ -15645,8 +15545,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
intel_verify_planes(state);
- if (state->modeset && intel_can_enable_sagv(state))
- intel_enable_sagv(dev_priv);
+ intel_sagv_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -15982,7 +15881,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (new_plane_state->uapi.fence) { /* explicit fencing */
ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
- I915_FENCE_TIMEOUT,
+ i915_fence_timeout(dev_priv),
GFP_KERNEL);
if (ret < 0)
return ret;
@@ -16009,7 +15908,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
ret = i915_sw_fence_await_reservation(&state->commit_ready,
obj->base.resv, NULL,
- false, I915_FENCE_TIMEOUT,
+ false,
+ i915_fence_timeout(dev_priv),
GFP_KERNEL);
if (ret < 0)
goto unpin_fb;
@@ -18261,11 +18161,12 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
best_encoder = connector->base.state->best_encoder;
connector->base.state->best_encoder = &encoder->base;
+ /* FIXME NULL atomic state passed! */
if (encoder->disable)
- encoder->disable(encoder, crtc_state,
+ encoder->disable(NULL, encoder, crtc_state,
connector->base.state);
if (encoder->post_disable)
- encoder->post_disable(encoder, crtc_state,
+ encoder->post_disable(NULL, encoder, crtc_state,
connector->base.state);
connector->base.state->best_encoder = best_encoder;
@@ -18802,15 +18703,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-static bool
-has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
-{
- if (cpu_transcoder == TRANSCODER_EDP)
- return HAS_TRANSCODER_EDP(dev_priv);
- else
- return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
-}
-
struct intel_display_error_state {
u32 power_well_driver;
@@ -18919,7 +18811,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!has_transcoder(dev_priv, cpu_transcoder))
+ if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
continue;
error->transcoder[i].available = true;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index adb1225a3480..efb4da205ea2 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -320,9 +320,13 @@ enum phy_fia {
for_each_pipe(__dev_priv, __p) \
for_each_if((__mask) & BIT(__p))
-#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+#define for_each_cpu_transcoder(__dev_priv, __t) \
for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
- for_each_if ((__mask) & (1 << (__t)))
+ for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
+
+#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+ for_each_cpu_transcoder(__dev_priv, __t) \
+ for_each_if ((__mask) & BIT(__t))
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
@@ -579,13 +583,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
+enum intel_display_power_domain
+intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
-int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 1e6eb7f2f72d..70525623bcdf 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -9,6 +9,7 @@
#include "i915_debugfs.h"
#include "intel_csr.h"
#include "intel_display_debugfs.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_fbc.h"
@@ -631,15 +632,9 @@ static void intel_dp_info(struct seq_file *m,
}
static void intel_dp_mst_info(struct seq_file *m,
- struct intel_connector *intel_connector)
+ struct intel_connector *intel_connector)
{
- struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
- struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(intel_encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
- intel_connector->port);
+ bool has_audio = intel_connector->port->has_audio;
seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
}
@@ -1149,6 +1144,51 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
+#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
+ seq_puts(m, "LPSP: disabled\n"))
+
+static bool
+intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
+ enum i915_power_well_id power_well_id)
+{
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ is_enabled = intel_display_power_well_is_enabled(i915,
+ power_well_id);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return is_enabled;
+}
+
+static int i915_lpsp_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+
+ switch (INTEL_GEN(i915)) {
+ case 12:
+ case 11:
+ LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
+ break;
+ case 10:
+ case 9:
+ LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
+ break;
+ default:
+ /*
+ * Apart from HASWELL/BROADWELL other legacy platform doesn't
+ * support lpsp.
+ */
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
+ else
+ seq_puts(m, "LPSP: not supported\n");
+ }
+
+ return 0;
+}
+
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1326,6 +1366,16 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
intel_dp->compliance.test_data.vdisplay);
seq_printf(m, "bpc: %u\n",
intel_dp->compliance.test_data.bpc);
+ } else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_PHY_TEST_PATTERN) {
+ seq_printf(m, "pattern: %d\n",
+ intel_dp->compliance.test_data.phytest.phy_pattern);
+ seq_printf(m, "Number of lanes: %d\n",
+ intel_dp->compliance.test_data.phytest.num_lanes);
+ seq_printf(m, "Link Rate: %d\n",
+ intel_dp->compliance.test_data.phytest.link_rate);
+ seq_printf(m, "level: %02x\n",
+ intel_dp->train_set[0]);
}
} else
seq_puts(m, "0");
@@ -1358,7 +1408,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
if (encoder && connector->status == connector_status_connected) {
intel_dp = enc_to_intel_dp(encoder);
- seq_printf(m, "%02lx", intel_dp->compliance.test_type);
+ seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
}
@@ -1906,6 +1956,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
{"i915_drrs_status", i915_drrs_status, 0},
+ {"i915_lpsp_status", i915_lpsp_status, 0},
};
static const struct {
@@ -1927,7 +1978,7 @@ static const struct {
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
-int intel_display_debugfs_register(struct drm_i915_private *i915)
+void intel_display_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
int i;
@@ -1940,9 +1991,9 @@ int intel_display_debugfs_register(struct drm_i915_private *i915)
intel_display_debugfs_files[i].fops);
}
- return drm_debugfs_create_files(intel_display_debugfs_list,
- ARRAY_SIZE(intel_display_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(intel_display_debugfs_list,
+ ARRAY_SIZE(intel_display_debugfs_list),
+ minor->debugfs_root, minor);
}
static int i915_panel_show(struct seq_file *m, void *data)
@@ -1987,6 +2038,48 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
+ seq_puts(m, "LPSP: incapable\n"))
+
+static int i915_lpsp_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_encoder *encoder =
+ intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ switch (INTEL_GEN(i915)) {
+ case 12:
+ /*
+ * Actually TGL can drive LPSP on port till DDI_C
+ * but there is no physical connected DDI_C on TGL sku's,
+ * even driver is not initilizing DDI_C port for gen12.
+ */
+ LPSP_CAPABLE(encoder->port <= PORT_B);
+ break;
+ case 11:
+ LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ break;
+ case 10:
+ case 9:
+ LPSP_CAPABLE(encoder->port == PORT_A &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
+ break;
+ default:
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
+
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@@ -2130,5 +2223,16 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
connector, &i915_dsc_fec_support_fops);
+ /* Legacy panels doesn't lpsp on any platform */
+ if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv)) &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
+ debugfs_create_file("i915_lpsp_capability", 0444, root,
+ connector, &i915_lpsp_capability_fops);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index a3bea1ce04c2..c922c1745bfe 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -10,10 +10,10 @@ struct drm_connector;
struct drm_i915_private;
#ifdef CONFIG_DEBUG_FS
-int intel_display_debugfs_register(struct drm_i915_private *i915);
+void intel_display_debugfs_register(struct drm_i915_private *i915);
int intel_connector_debugfs_add(struct drm_connector *connector);
#else
-static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; }
+static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 84ecf8e58523..49998906cc61 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -151,6 +151,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "GT_IRQ";
case POWER_DOMAIN_DPLL_DC_OFF:
return "DPLL_DC_OFF";
+ case POWER_DOMAIN_TC_COLD_OFF:
+ return "TC_COLD_OFF";
default:
MISSING_CASE(domain);
return "?";
@@ -282,8 +284,51 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
+static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->hsw.idx;
+
+ return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+static struct intel_digital_port *
+aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
+ enum aux_ch aux_ch)
+{
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ /* We'll check the MST primary port */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ dig_port = enc_to_dig_port(encoder);
+ if (!dig_port)
+ continue;
+
+ if (dig_port->aux_ch != aux_ch) {
+ dig_port = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ return dig_port;
+}
+
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+ struct i915_power_well *power_well,
+ bool timeout_expected)
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
@@ -294,8 +339,8 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
power_well->desc->name);
- /* An AUX timeout is expected if the TBT DP tunnel is down. */
- drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
+ drm_WARN_ON(&dev_priv->drm, !timeout_expected);
+
}
}
@@ -358,11 +403,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- bool wait_fuses = power_well->desc->hsw.has_fuses;
- enum skl_power_gate uninitialized_var(pg);
u32 val;
- if (wait_fuses) {
+ if (power_well->desc->hsw.has_fuses) {
+ enum skl_power_gate pg;
+
pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
/*
@@ -379,19 +424,27 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, regs->driver);
intel_de_write(dev_priv, regs->driver,
val | HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: cnl */
if (IS_CANNONLAKE(dev_priv) &&
pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
+ u32 val;
+
val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
}
- if (wait_fuses)
+ if (power_well->desc->hsw.has_fuses) {
+ enum skl_power_gate pg;
+
+ pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
gen9_wait_for_power_well_fuses(dev_priv, pg);
+ }
hsw_power_well_post_enable(dev_priv,
power_well->desc->hsw.irq_pipe_mask,
@@ -437,7 +490,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
val | ICL_LANE_ENABLE_AUX);
}
- hsw_wait_for_power_well_enable(dev_priv, power_well);
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
@@ -470,21 +523,6 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
-
-#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
-
-static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int pw_idx = power_well->desc->hsw.idx;
-
- return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
- ICL_AUX_PW_TO_CH(pw_idx);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
@@ -501,51 +539,28 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
}
static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
{
- enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
- struct intel_digital_port *dig_port = NULL;
- struct intel_encoder *encoder;
-
/* Bypass the check if all references are released asynchronously */
if (power_well_async_ref_count(dev_priv, power_well) ==
power_well->count)
return;
- aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (!intel_phy_is_tc(dev_priv, phy))
- continue;
-
- /* We'll check the MST primary port */
- if (encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- dig_port = enc_to_dig_port(encoder);
- if (drm_WARN_ON(&dev_priv->drm, !dig_port))
- continue;
-
- if (dig_port->aux_ch != aux_ch) {
- dig_port = NULL;
- continue;
- }
-
- break;
- }
-
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
+ if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
+ return;
+
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
}
#else
static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
{
}
@@ -553,24 +568,65 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
+static void icl_tc_cold_exit(struct drm_i915_private *i915)
+{
+ int ret, tries = 0;
+
+ while (1) {
+ ret = sandybridge_pcode_write_timeout(i915,
+ ICL_PCODE_EXIT_TCCOLD,
+ 0, 250, 1);
+ if (ret != -EAGAIN || ++tries == 3)
+ break;
+ msleep(1);
+ }
+
+ /* Spec states that TC cold exit can take up to 1ms to complete */
+ if (!ret)
+ msleep(1);
+
+ /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
+ drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+ "succeeded");
+}
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+ bool timeout_expected;
u32 val;
- icl_tc_port_assert_ref_held(dev_priv, power_well);
+ icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (power_well->desc->hsw.is_tc_tbt)
+ if (is_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
- hsw_power_well_enable(dev_priv, power_well);
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
+
+ /*
+ * An AUX timeout is expected if the TBT DP tunnel is down,
+ * or need to enable AUX on a legacy TypeC port as part of the TC-cold
+ * exit sequence.
+ */
+ timeout_expected = is_tbt;
+ if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) {
+ icl_tc_cold_exit(dev_priv);
+ timeout_expected = true;
+ }
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
- if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
+ if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) {
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
@@ -588,11 +644,48 @@ static void
icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- icl_tc_port_assert_ref_held(dev_priv, power_well);
+ enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+
+ icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
hsw_power_well_disable(dev_priv, power_well);
}
+static void
+icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->hsw.idx;
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
+ bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+
+ if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+ return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_enable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->hsw.idx;
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
+ bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+
+ if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+ return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_disable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -943,7 +1036,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
/* Power wells at this level and above must be disabled for DC5 entry */
if (INTEL_GEN(dev_priv) >= 12)
- high_pg = TGL_DISP_PW_3;
+ high_pg = ICL_DISP_PW_3;
else
high_pg = SKL_DISP_PW_2;
@@ -1873,20 +1966,27 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, u64 mask)
{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
enum intel_display_power_domain domain;
- DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
+ drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
for_each_power_domain(domain, mask)
- DRM_DEBUG_DRIVER("%s use_count %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg(&i915->drm, "%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
static void
print_async_put_domains_state(struct i915_power_domains *power_domains)
{
- DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
- power_domains->async_put_wakeref);
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+
+ drm_dbg(&i915->drm, "async_put_wakeref %u\n",
+ power_domains->async_put_wakeref);
print_power_domains(power_domains, "async_put_domains[0]",
power_domains->async_put_domains[0]);
@@ -2798,6 +2898,21 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
+#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
+ BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -3496,17 +3611,10 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
},
};
-static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = icl_combo_phy_aux_power_well_enable,
- .disable = icl_combo_phy_aux_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+static const struct i915_power_well_ops icl_aux_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
- .enable = icl_tc_phy_aux_power_well_enable,
- .disable = icl_tc_phy_aux_power_well_disable,
+ .enable = icl_aux_power_well_enable,
+ .disable = icl_aux_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
@@ -3564,7 +3672,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.name = "power well 3",
.domains = ICL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = ICL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -3636,7 +3744,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX A",
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3646,7 +3754,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX B",
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3656,7 +3764,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX C TC1",
.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3667,7 +3775,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX D TC2",
.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3678,7 +3786,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX E TC3",
.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3689,7 +3797,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX F TC4",
.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3700,7 +3808,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX C TBT1",
.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3711,7 +3819,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX D TBT2",
.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3722,7 +3830,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX E TBT3",
.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3733,7 +3841,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX F TBT4",
.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3755,149 +3863,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
};
-static const struct i915_power_well_desc ehl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = ICL_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = ICL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO",
- .domains = ICL_DDI_IO_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
- },
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "AUX D",
- .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
- },
- },
- {
- .name = "power well 4",
- .domains = ICL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- },
- },
+static void
+tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
+{
+ u8 tries = 0;
+ int ret;
+
+ while (1) {
+ u32 low_val = 0, high_val;
+
+ if (block)
+ high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
+ else
+ high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
+
+ /*
+ * Spec states that we should timeout the request after 200us
+ * but the function below will timeout after 500us
+ */
+ ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
+ &high_val);
+ if (ret == 0) {
+ if (block &&
+ (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
+ ret = -EIO;
+ else
+ break;
+ }
+
+ if (++tries == 3)
+ break;
+
+ if (ret == -EAGAIN)
+ msleep(1);
+ }
+
+ if (ret)
+ drm_err(&i915->drm, "TC cold %sblock failed\n",
+ block ? "" : "un");
+ else
+ drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+ block ? "" : "un");
+}
+
+static void
+tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, true);
+}
+
+static void
+tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, false);
+}
+
+static void
+tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ if (power_well->count > 0)
+ tgl_tc_cold_off_power_well_enable(i915, power_well);
+ else
+ tgl_tc_cold_off_power_well_disable(i915, power_well);
+}
+
+static bool
+tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /*
+ * Not the correctly implementation but there is no way to just read it
+ * from PCODE, so returning count to avoid state mismatch errors
+ */
+ return power_well->count;
+}
+
+static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
+ .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
+ .enable = tgl_tc_cold_off_power_well_enable,
+ .disable = tgl_tc_cold_off_power_well_disable,
+ .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
};
static const struct i915_power_well_desc tgl_power_wells[] = {
@@ -3942,7 +3990,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "power well 3",
.domains = TGL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = TGL_DISP_PW_3,
+ .id = ICL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -4044,7 +4092,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX A",
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4054,7 +4102,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX B",
.domains = TGL_AUX_B_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4064,7 +4112,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX C",
.domains = TGL_AUX_C_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4074,7 +4122,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX D TC1",
.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4085,7 +4133,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX E TC2",
.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4096,7 +4144,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX F TC3",
.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4107,7 +4155,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX G TC4",
.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4118,7 +4166,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX H TC5",
.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4129,7 +4177,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX I TC6",
.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4140,7 +4188,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX D TBT1",
.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4151,7 +4199,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX E TBT2",
.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4162,7 +4210,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX F TBT3",
.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4173,7 +4221,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX G TBT4",
.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4184,7 +4232,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX H TBT5",
.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4195,7 +4243,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX I TBT6",
.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4227,6 +4275,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.hsw.irq_pipe_mask = BIT(PIPE_D),
},
},
+ {
+ .name = "TC cold off",
+ .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
+ .ops = &tgl_tc_cold_off_ops,
+ .id = DISP_PW_ID_NONE,
+ },
};
static int
@@ -4376,8 +4430,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_GEN(dev_priv, 12)) {
err = set_power_wells(power_domains, tgl_power_wells);
- } else if (IS_ELKHARTLAKE(dev_priv)) {
- err = set_power_wells(power_domains, ehl_power_wells);
} else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -4439,9 +4491,8 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
-static inline
-bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
- i915_reg_t reg, bool enable)
+static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, bool enable)
{
u32 val, status;
@@ -4480,7 +4531,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
"Invalid number of dbuf slices requested\n");
- DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
+ drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
+ req_slices);
/*
* Might be running this in parallel to gen9_dc_off_power_well_enable
@@ -5016,7 +5068,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
const struct buddy_page_mask *table;
int i;
- if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
/* Wa_1409767108: tgl */
table = wa_1409767108_buddy_page_masks;
else
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index da64a5edae7a..6c917699293b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -76,6 +76,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
POWER_DOMAIN_DPLL_DC_OFF,
+ POWER_DOMAIN_TC_COLD_OFF,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -100,7 +101,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
- TGL_DISP_PW_3,
+ ICL_DISP_PW_3,
SKL_DISP_DC_OFF,
};
@@ -266,6 +267,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id);
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 5e00e611f077..2bf3d4cb4ea9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -132,8 +132,7 @@ struct intel_encoder {
u16 cloneable;
u8 pipe_mask;
enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received);
+ struct intel_connector *connector);
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
@@ -146,28 +145,35 @@ struct intel_encoder {
void (*update_prepare)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
- void (*pre_pll_enable)(struct intel_encoder *,
+ void (*pre_pll_enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*pre_enable)(struct intel_encoder *,
+ void (*pre_enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*enable)(struct intel_encoder *,
+ void (*enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*update_complete)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
- void (*disable)(struct intel_encoder *,
+ void (*disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*post_disable)(struct intel_encoder *,
+ void (*post_disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*post_pll_disable)(struct intel_encoder *,
+ void (*post_pll_disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*update_pipe)(struct intel_encoder *,
+ void (*update_pipe)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
/* Read out the current hw state of this connector, returning true if
@@ -425,11 +431,14 @@ struct intel_connector {
struct edid *edid;
struct edid *detect_edid;
+ /* Number of times hotplug detection was tried after an HPD interrupt */
+ int hotplug_retries;
+
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
- void *port; /* store this opaque as its illegal to dereference it */
+ struct drm_dp_mst_port *port;
struct intel_dp *mst_port;
@@ -640,6 +649,16 @@ struct intel_crtc_scaler_state {
#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
/* Flag to use the scanline counter instead of the pixel counter */
#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+/*
+ * TE0 or TE1 flag is set if the crtc has a DSI encoder which
+ * is operating in command mode.
+ * Flag to use TE from DSI0 instead of VBI in command mode
+ */
+#define I915_MODE_FLAG_DSI_USE_TE0 (1<<3)
+/* Flag to use TE from DSI1 instead of VBI in command mode */
+#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
+/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
+#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
struct intel_wm_level {
bool enable;
@@ -669,11 +688,13 @@ struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
struct skl_wm_level trans_wm;
+ struct skl_wm_level sagv_wm0;
bool is_planar;
};
struct skl_pipe_wm {
struct skl_plane_wm planes[I915_MAX_PLANES];
+ bool use_sagv_wm;
};
enum vlv_wm_level {
@@ -955,8 +976,7 @@ struct intel_crtc_state {
/* Panel fitter placement and size for Ironlake+ */
struct {
- u32 pos;
- u32 size;
+ struct drm_rect dst;
bool enabled;
bool force_thru;
} pch_pfit;
@@ -1015,6 +1035,7 @@ struct intel_crtc_state {
union hdmi_infoframe spd;
union hdmi_infoframe hdmi;
union hdmi_infoframe drm;
+ struct drm_dp_vsc_sdp vsc;
} infoframes;
/* HDMI scrambling status */
@@ -1238,6 +1259,7 @@ struct intel_dp_compliance_data {
u8 video_pattern;
u16 hdisplay, vdisplay;
u8 bpc;
+ struct drm_dp_phy_test_params phytest;
};
struct intel_dp_compliance {
@@ -1347,6 +1369,9 @@ struct intel_dp {
/* This is called before a link training is starterd */
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
+ void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat);
+ void (*set_idle_link_train)(struct intel_dp *intel_dp);
+ void (*set_signal_levels)(struct intel_dp *intel_dp);
/* Displayport compliance testing */
struct intel_dp_compliance compliance;
@@ -1401,6 +1426,7 @@ struct intel_digital_port {
const struct drm_connector_state *conn_state);
u32 (*infoframes_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
+ bool (*connected)(struct intel_encoder *encoder);
};
struct intel_dp_mst_encoder {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index a2fafd4499f2..40d42dcff0b7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -48,7 +48,6 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
-#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -164,6 +163,17 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
};
int i, max_rate;
+ if (drm_dp_has_quirk(&intel_dp->desc, 0,
+ DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+ /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
+ static const int quirk_rates[] = { 162000, 270000, 324000 };
+
+ memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
+ intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
+
+ return;
+ }
+
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
@@ -452,6 +462,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, u8 lane_count)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int index;
index = intel_dp_rate_index(intel_dp->common_rates,
@@ -462,7 +473,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
!intel_dp_can_link_train_fallback_for_edp(intel_dp,
intel_dp->common_rates[index - 1],
lane_count)) {
- DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with same parameters\n");
return 0;
}
intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
@@ -472,13 +484,14 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
!intel_dp_can_link_train_fallback_for_edp(intel_dp,
intel_dp_max_common_rate(intel_dp),
lane_count >> 1)) {
- DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with same parameters\n");
return 0;
}
intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
intel_dp->max_link_lane_count = lane_count >> 1;
} else {
- DRM_ERROR("Link Training Unsuccessful\n");
+ drm_err(&i915->drm, "Link Training Unsuccessful\n");
return -1;
}
@@ -553,6 +566,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
int mode_clock, int mode_hdisplay)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 min_slice_count, i;
int max_slice_width;
@@ -565,8 +579,9 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
- DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
- max_slice_width);
+ drm_dbg_kms(&i915->drm,
+ "Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
return 0;
}
/* Also take into account max slice width */
@@ -584,7 +599,8 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return valid_dsc_slicecount[i];
}
- DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+ drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
+ min_slice_count);
return 0;
}
@@ -1343,8 +1359,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
- enum intel_display_power_domain aux_domain =
- intel_aux_power_domain(intel_dig_port);
+ enum intel_display_power_domain aux_domain;
intel_wakeref_t aux_wakeref;
intel_wakeref_t pps_wakeref;
int i, ret, recv_bytes;
@@ -1359,6 +1374,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
if (is_tc_port)
intel_tc_port_lock(intel_dig_port);
+ aux_domain = intel_aux_power_domain(intel_dig_port);
+
aux_wakeref = intel_display_power_get(i915, aux_domain);
pps_wakeref = pps_lock(intel_dp);
@@ -1832,6 +1849,7 @@ static void snprintf_int_array(char *str, size_t len,
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
char str[128]; /* FIXME: too big for stack? */
if (!drm_debug_enabled(DRM_UT_KMS))
@@ -1839,15 +1857,15 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
snprintf_int_array(str, sizeof(str),
intel_dp->source_rates, intel_dp->num_source_rates);
- DRM_DEBUG_KMS("source rates: %s\n", str);
+ drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
snprintf_int_array(str, sizeof(str),
intel_dp->sink_rates, intel_dp->num_sink_rates);
- DRM_DEBUG_KMS("sink rates: %s\n", str);
+ drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
snprintf_int_array(str, sizeof(str),
intel_dp->common_rates, intel_dp->num_common_rates);
- DRM_DEBUG_KMS("common rates: %s\n", str);
+ drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
}
int
@@ -1954,6 +1972,8 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
/* For DP Compliance we override the computed bpp for the pipe */
if (intel_dp->compliance.test_data.bpc != 0) {
int bpp = 3 * intel_dp->compliance.test_data.bpc;
@@ -1961,7 +1981,7 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
limits->min_bpp = limits->max_bpp = bpp;
pipe_config->dither_force_disable = bpp == 6 * 3;
- DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
+ drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
}
/* Use values requested by Compliance Test Request */
@@ -2055,6 +2075,7 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u8 line_buf_depth;
@@ -2089,7 +2110,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
if (!line_buf_depth) {
- DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ drm_dbg_kms(&i915->drm,
+ "DSC Sink Line Buffer Depth invalid\n");
return -EINVAL;
}
@@ -2114,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
u8 dsc_max_bpc;
int pipe_bpp;
int ret;
@@ -2229,7 +2252,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
int common_len;
@@ -2264,11 +2289,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
- DRM_DEBUG_KMS("DP link computation with max lane count %i "
- "max rate %d max bpp %d pixel clock %iKHz\n",
- limits.max_lane_count,
- intel_dp->common_rates[limits.max_clock],
- limits.max_bpp, adjusted_mode->crtc_clock);
+ drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
+ "max rate %d max bpp %d pixel clock %iKHz\n",
+ limits.max_lane_count,
+ intel_dp->common_rates[limits.max_clock],
+ limits.max_bpp, adjusted_mode->crtc_clock);
/*
* Optimize for slow and wide. This is the place to add alternative
@@ -2277,7 +2302,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
- DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
+ drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
if (ret || intel_dp->force_dsc_en) {
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits);
@@ -2286,40 +2311,42 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
}
if (pipe_config->dsc.compression_enable) {
- DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
-
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc.compressed_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ drm_dbg_kms(&i915->drm,
+ "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp);
+
+ drm_dbg_kms(&i915->drm,
+ "DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->dsc.compressed_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
} else {
- DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ drm_dbg_kms(&i915->drm,
+ "DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
}
return 0;
}
static int
intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
- struct drm_connector *connector,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
+ struct drm_connector *connector = conn_state->connector;
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- int ret;
if (!drm_mode_is_420_only(info, adjusted_mode) ||
!intel_dp_get_colorimetry_status(intel_dp) ||
@@ -2328,16 +2355,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- /* YCBCR 420 output conversion needs a scaler */
- ret = skl_update_scaler_crtc(crtc_state);
- if (ret) {
- DRM_DEBUG_KMS("Scaler allocation for output failed\n");
- return ret;
- }
-
- intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
-
- return 0;
+ return intel_pch_panel_fitting(crtc_state, conn_state);
}
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
@@ -2384,6 +2402,164 @@ static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
return true;
}
+static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /*
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+ * Colorimetry Format indication.
+ */
+ vsc->revision = 0x5;
+ vsc->length = 0x13;
+
+ /* DP 1.4a spec, Table 2-120 */
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ vsc->pixelformat = DP_PIXELFORMAT_YUV444;
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ vsc->pixelformat = DP_PIXELFORMAT_YUV420;
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ default:
+ vsc->pixelformat = DP_PIXELFORMAT_RGB;
+ }
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_709:
+ vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
+ break;
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
+ break;
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
+ break;
+ default:
+ /*
+ * RGB->YCBCR color conversion uses the BT.709
+ * color space.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+ else
+ vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
+ break;
+ }
+
+ vsc->bpc = crtc_state->pipe_bpp / 3;
+
+ /* only RGB pixelformat supports 6 bpc */
+ drm_WARN_ON(&dev_priv->drm,
+ vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
+
+ /* all YCbCr are always limited range */
+ vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
+ vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
+}
+
+static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
+
+ /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
+ if (crtc_state->has_psr)
+ return;
+
+ if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ return;
+
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+ vsc->sdp_type = DP_SDP_VSC;
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ &crtc_state->infoframes.vsc);
+}
+
+void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ vsc->sdp_type = DP_SDP_VSC;
+
+ if (dev_priv->psr.psr2_enabled) {
+ if (dev_priv->psr.colorimetry_support &&
+ intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
+ /* [PSR2, +Colorimetry] */
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ vsc);
+ } else {
+ /*
+ * [PSR2, -Colorimetry]
+ * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
+ * 3D stereo + PSR/PSR2 + Y-coordinate.
+ */
+ vsc->revision = 0x4;
+ vsc->length = 0xe;
+ }
+ } else {
+ /*
+ * [PSR1]
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
+ * higher).
+ */
+ vsc->revision = 0x2;
+ vsc->length = 0x8;
+ }
+}
+
+static void
+intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ int ret;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
+
+ if (!conn_state->hdr_output_metadata)
+ return;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
+
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
+ return;
+ }
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2394,7 +2570,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
enum port port = encoder->port;
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2410,9 +2585,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (lspcon->active)
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
else
- ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
- pipe_config);
-
+ ret = intel_dp_ycbcr420_config(intel_dp, pipe_config,
+ conn_state);
if (ret)
return ret;
@@ -2428,18 +2602,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (INTEL_GEN(dev_priv) >= 9) {
- ret = skl_update_scaler_crtc(pipe_config);
- if (ret)
- return ret;
- }
-
if (HAS_GMCH(dev_priv))
- intel_gmch_panel_fitting(intel_crtc, pipe_config,
- conn_state->scaling_mode);
+ ret = intel_gmch_panel_fitting(pipe_config, conn_state);
else
- intel_pch_panel_fitting(intel_crtc, pipe_config,
- conn_state->scaling_mode);
+ ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -2489,6 +2657,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_set_clock(encoder, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config);
+ intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
+ intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
return 0;
}
@@ -2630,22 +2800,27 @@ static void wait_panel_status(struct intel_dp *intel_dp,
static void wait_panel_on(struct intel_dp *intel_dp)
{
- DRM_DEBUG_KMS("Wait for panel power on\n");
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
static void wait_panel_off(struct intel_dp *intel_dp)
{
- DRM_DEBUG_KMS("Wait for panel power off time\n");
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
ktime_t panel_power_on_time;
s64 panel_power_off_duration;
- DRM_DEBUG_KMS("Wait for panel power cycle\n");
+ drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
/* take the difference of currrent time and panel power off time
* and then make panel wait for t11_t12 if needed. */
@@ -3009,11 +3184,12 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
intel_panel_enable_backlight(crtc_state, conn_state);
_intel_edp_backlight_on(intel_dp);
@@ -3047,11 +3223,12 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
_intel_edp_backlight_off(intel_dp);
intel_panel_disable_backlight(old_conn_state);
@@ -3064,6 +3241,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
@@ -3074,8 +3252,8 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
if (is_enabled == enable)
return;
- DRM_DEBUG_KMS("panel power control backlight %s\n",
- enable ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ enable ? "enable" : "disable");
if (enable)
_intel_edp_backlight_on(intel_dp);
@@ -3185,6 +3363,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int ret;
if (!crtc_state->dsc.compression_enable)
@@ -3193,13 +3372,15 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
enable ? DP_DECOMPRESSION_EN : 0);
if (ret < 0)
- DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
- enable ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s sink decompression state\n",
+ enable ? "enable" : "disable");
}
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int ret, i;
/* Should have a valid DPCD by this point */
@@ -3232,8 +3413,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
}
if (ret != 1)
- DRM_DEBUG_KMS("failed to %s sink power state\n",
- mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm, "failed to %s sink power state\n",
+ mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
}
static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
@@ -3390,7 +3571,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static void intel_disable_dp(struct intel_encoder *encoder,
+static void intel_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3410,21 +3592,24 @@ static void intel_disable_dp(struct intel_encoder *encoder,
intel_edp_panel_off(intel_dp);
}
-static void g4x_disable_dp(struct intel_encoder *encoder,
+static void g4x_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
}
-static void vlv_disable_dp(struct intel_encoder *encoder,
+static void vlv_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
}
-static void g4x_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3444,14 +3629,16 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
ilk_edp_pll_off(intel_dp, old_crtc_state);
}
-static void vlv_post_disable_dp(struct intel_encoder *encoder,
+static void vlv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_dp_link_down(encoder, old_crtc_state);
}
-static void chv_post_disable_dp(struct intel_encoder *encoder,
+static void chv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3468,90 +3655,63 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
}
static void
-_intel_dp_set_link_train(struct intel_dp *intel_dp,
- u32 *DP,
- u8 dp_train_pat)
+cpt_set_link_train(struct intel_dp *intel_dp,
+ u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->base.port;
- u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
-
- if (dp_train_pat & train_pat_mask)
- drm_dbg_kms(&dev_priv->drm,
- "Using DP training pattern TPS%d\n",
- dp_train_pat & train_pat_mask);
-
- if (HAS_DDI(dev_priv)) {
- u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
+ u32 *DP = &intel_dp->DP;
- if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
- temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
- else
- temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
-
- temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- switch (dp_train_pat & train_pat_mask) {
- case DP_TRAINING_PATTERN_DISABLE:
- temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+ *DP &= ~DP_LINK_TRAIN_MASK_CPT;
- break;
- case DP_TRAINING_PATTERN_1:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- break;
- case DP_TRAINING_PATTERN_2:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
- break;
- case DP_TRAINING_PATTERN_3:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
- break;
- case DP_TRAINING_PATTERN_4:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
- break;
- }
- intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ *DP |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ *DP |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ }
- } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
- *DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
- switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
- case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF_CPT;
- break;
- case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1_CPT;
- break;
- case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2_CPT;
- break;
- case DP_TRAINING_PATTERN_3:
- drm_dbg_kms(&dev_priv->drm,
- "TPS3 not supported, using TPS2 instead\n");
- *DP |= DP_LINK_TRAIN_PAT_2_CPT;
- break;
- }
+static void
+g4x_set_link_train(struct intel_dp *intel_dp,
+ u8 dp_train_pat)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 *DP = &intel_dp->DP;
- } else {
- *DP &= ~DP_LINK_TRAIN_MASK;
+ *DP &= ~DP_LINK_TRAIN_MASK;
- switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
- case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF;
- break;
- case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1;
- break;
- case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2;
- break;
- case DP_TRAINING_PATTERN_3:
- drm_dbg_kms(&dev_priv->drm,
- "TPS3 not supported, using TPS2 instead\n");
- *DP |= DP_LINK_TRAIN_PAT_2;
- break;
- }
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ *DP |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ *DP |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ *DP |= DP_LINK_TRAIN_PAT_2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
+ *DP |= DP_LINK_TRAIN_PAT_2;
+ break;
}
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
static void intel_dp_enable_port(struct intel_dp *intel_dp,
@@ -3577,7 +3737,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-static void intel_enable_dp(struct intel_encoder *encoder,
+static void intel_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3623,22 +3784,25 @@ static void intel_enable_dp(struct intel_encoder *encoder,
}
}
-static void g4x_enable_dp(struct intel_encoder *encoder,
+static void g4x_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- intel_enable_dp(encoder, pipe_config, conn_state);
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
-static void vlv_enable_dp(struct intel_encoder *encoder,
+static void vlv_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_edp_backlight_on(pipe_config, conn_state);
}
-static void g4x_pre_enable_dp(struct intel_encoder *encoder,
+static void g4x_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3758,16 +3922,18 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
}
-static void vlv_pre_enable_dp(struct intel_encoder *encoder,
+static void vlv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
vlv_phy_pre_encoder_enable(encoder, pipe_config);
- intel_enable_dp(encoder, pipe_config, conn_state);
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
}
-static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
+static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3776,19 +3942,21 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
vlv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_pre_enable_dp(struct intel_encoder *encoder,
+static void chv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
chv_phy_pre_encoder_enable(encoder, pipe_config);
- intel_enable_dp(encoder, pipe_config, conn_state);
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
}
-static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
+static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3797,7 +3965,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
+static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3881,7 +4050,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
}
}
-static u32 vlv_signal_levels(struct intel_dp *intel_dp)
+static void vlv_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
unsigned long demph_reg_value, preemph_reg_value,
@@ -3909,7 +4078,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x5598DA3A;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_1:
@@ -3928,7 +4097,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_2:
@@ -3943,7 +4112,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_3:
@@ -3954,20 +4123,18 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
- return 0;
+ return;
}
break;
default:
- return 0;
+ return;
}
vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value, 0);
-
- return 0;
}
-static u32 chv_signal_levels(struct intel_dp *intel_dp)
+static void chv_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u32 deemph_reg_value, margin_reg_value;
@@ -3995,7 +4162,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
uniq_trans_scale = true;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_1:
@@ -4013,7 +4180,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
margin_reg_value = 154;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_2:
@@ -4027,7 +4194,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
margin_reg_value = 154;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_3:
@@ -4037,21 +4204,18 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
margin_reg_value = 154;
break;
default:
- return 0;
+ return;
}
break;
default:
- return 0;
+ return;
}
chv_set_phy_signal_level(encoder, deemph_reg_value,
margin_reg_value, uniq_trans_scale);
-
- return 0;
}
-static u32
-g4x_signal_levels(u8 train_set)
+static u32 g4x_signal_levels(u8 train_set)
{
u32 signal_levels = 0;
@@ -4088,12 +4252,31 @@ g4x_signal_levels(u8 train_set)
return signal_levels;
}
+static void
+g4x_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = g4x_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
/* SNB CPU eDP voltage swing and pre-emphasis control */
-static u32
-snb_cpu_edp_signal_levels(u8 train_set)
+static u32 snb_cpu_edp_signal_levels(u8 train_set)
{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -4116,12 +4299,31 @@ snb_cpu_edp_signal_levels(u8 train_set)
}
}
+static void
+snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = snb_cpu_edp_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
/* IVB CPU eDP voltage swing and pre-emphasis control */
-static u32
-ivb_cpu_edp_signal_levels(u8 train_set)
+static u32 ivb_cpu_edp_signal_levels(u8 train_set)
{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return EDP_LINK_TRAIN_400MV_0DB_IVB;
@@ -4147,38 +4349,29 @@ ivb_cpu_edp_signal_levels(u8 train_set)
}
}
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp)
+static void
+ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->base.port;
- u32 signal_levels, mask = 0;
u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
- if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- signal_levels = bxt_signal_levels(intel_dp);
- } else if (HAS_DDI(dev_priv)) {
- signal_levels = ddi_signal_levels(intel_dp);
- mask = DDI_BUF_EMP_MASK;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- signal_levels = chv_signal_levels(intel_dp);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
- signal_levels = ivb_cpu_edp_signal_levels(train_set);
- mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
- } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
- signal_levels = snb_cpu_edp_signal_levels(train_set);
- mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
- } else {
- signal_levels = g4x_signal_levels(train_set);
- mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
- }
+ signal_levels = ivb_cpu_edp_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+ intel_dp->DP |= signal_levels;
- if (mask)
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
- signal_levels);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
@@ -4189,55 +4382,28 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
" (max)" : "");
- intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
-
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_dp->set_signal_levels(intel_dp);
}
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
- _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
+ if (dp_train_pat & train_pat_mask)
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DP training pattern TPS%d\n",
+ dp_train_pat & train_pat_mask);
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_dp->set_link_train(intel_dp, dp_train_pat);
}
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->base.port;
- u32 val;
-
- if (!HAS_DDI(dev_priv))
- return;
-
- val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
- val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
-
- /*
- * Until TGL on PORT_A we can have only eDP in SST mode. There the only
- * reason we need to set idle transmission mode is to work around a HW
- * issue where we enable the pipe while not in idle link-training mode.
- * In this case there is requirement to wait for a minimum number of
- * idle patterns to be sent.
- */
- if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
- return;
-
- if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
- DP_TP_STATUS_IDLE_DONE, 1))
- drm_err(&dev_priv->drm,
- "Timed out waiting for DP idle patterns\n");
+ if (intel_dp->set_idle_link_train)
+ intel_dp->set_idle_link_train(intel_dp);
}
static void
@@ -4316,6 +4482,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
static void
intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 dpcd_ext[6];
/*
@@ -4331,20 +4498,22 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
&dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
- DRM_ERROR("DPCD failed read at extended capabilities\n");
+ drm_err(&i915->drm,
+ "DPCD failed read at extended capabilities\n");
return;
}
if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
- DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
+ drm_dbg_kms(&i915->drm,
+ "DPCD extended DPCD rev less than base DPCD rev\n");
return;
}
if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
return;
- DRM_DEBUG_KMS("Base DPCD: %*ph\n",
- (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
+ drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
}
@@ -4352,13 +4521,16 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
intel_dp_extended_receiver_capabilities(intel_dp);
- DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
+ drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd),
+ intel_dp->dpcd);
return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
@@ -4375,6 +4547,8 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
/*
* Clear the cached register set to avoid using stale values
* for the sinks that do not support DSC.
@@ -4390,20 +4564,23 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
intel_dp->dsc_dpcd,
sizeof(intel_dp->dsc_dpcd)) < 0)
- DRM_ERROR("Failed to read DPCD register 0x%x\n",
- DP_DSC_SUPPORT);
+ drm_err(&i915->drm,
+ "Failed to read DPCD register 0x%x\n",
+ DP_DSC_SUPPORT);
- DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
- (int)sizeof(intel_dp->dsc_dpcd),
- intel_dp->dsc_dpcd);
+ drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dsc_dpcd),
+ intel_dp->dsc_dpcd);
/* FEC is supported only on DP 1.4 */
if (!intel_dp_is_edp(intel_dp) &&
drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
&intel_dp->fec_capable) < 0)
- DRM_ERROR("Failed to read FEC DPCD register\n");
+ drm_err(&i915->drm,
+ "Failed to read FEC DPCD register\n");
- DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
+ drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
+ intel_dp->fec_capable);
}
}
@@ -4577,14 +4754,16 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder =
&dp_to_dig_port(intel_dp)->base;
bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
- DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
- encoder->base.base.id, encoder->base.name,
- yesno(intel_dp->can_mst), yesno(sink_can_mst),
- yesno(i915_modparams.enable_dp_mst));
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
@@ -4630,158 +4809,92 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
return false;
}
-static void
-intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
+ struct dp_sdp *sdp, size_t size)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct dp_sdp vsc_sdp = {};
+ size_t length = sizeof(struct dp_sdp);
- /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
- vsc_sdp.sdp_header.HB0 = 0;
- vsc_sdp.sdp_header.HB1 = 0x7;
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
/*
- * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
- * Colorimetry Format indication.
+ * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
+ * VSC SDP Header Bytes
*/
- vsc_sdp.sdp_header.HB2 = 0x5;
+ sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
+ sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
+ sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
+ sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
/*
- * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
- * Colorimetry Format indication (HB2 = 05h).
+ * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
+ * per DP 1.4a spec.
*/
- vsc_sdp.sdp_header.HB3 = 0x13;
-
- /* DP 1.4a spec, Table 2-120 */
- switch (crtc_state->output_format) {
- case INTEL_OUTPUT_FORMAT_YCBCR444:
- vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */
- break;
- case INTEL_OUTPUT_FORMAT_YCBCR420:
- vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */
- break;
- case INTEL_OUTPUT_FORMAT_RGB:
- default:
- /* RGB: DB16[7:4] = 0h */
- break;
- }
+ if (vsc->revision != 0x5)
+ goto out;
- switch (conn_state->colorspace) {
- case DRM_MODE_COLORIMETRY_BT709_YCC:
- vsc_sdp.db[16] |= 0x1;
- break;
- case DRM_MODE_COLORIMETRY_XVYCC_601:
- vsc_sdp.db[16] |= 0x2;
- break;
- case DRM_MODE_COLORIMETRY_XVYCC_709:
- vsc_sdp.db[16] |= 0x3;
- break;
- case DRM_MODE_COLORIMETRY_SYCC_601:
- vsc_sdp.db[16] |= 0x4;
- break;
- case DRM_MODE_COLORIMETRY_OPYCC_601:
- vsc_sdp.db[16] |= 0x5;
- break;
- case DRM_MODE_COLORIMETRY_BT2020_CYCC:
- case DRM_MODE_COLORIMETRY_BT2020_RGB:
- vsc_sdp.db[16] |= 0x6;
- break;
- case DRM_MODE_COLORIMETRY_BT2020_YCC:
- vsc_sdp.db[16] |= 0x7;
- break;
- case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
- case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
- vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */
- break;
- default:
- /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */
+ /* VSC SDP Payload for DB16 through DB18 */
+ /* Pixel Encoding and Colorimetry Formats */
+ sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
+ sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
- /* RGB->YCBCR color conversion uses the BT.709 color space. */
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ switch (vsc->bpc) {
+ case 6:
+ /* 6bpc: 0x0 */
break;
- }
-
- /*
- * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
- * the following Component Bit Depth values are defined:
- * 001b = 8bpc.
- * 010b = 10bpc.
- * 011b = 12bpc.
- * 100b = 16bpc.
- */
- switch (crtc_state->pipe_bpp) {
- case 24: /* 8bpc */
- vsc_sdp.db[17] = 0x1;
+ case 8:
+ sdp->db[17] = 0x1; /* DB17[3:0] */
break;
- case 30: /* 10bpc */
- vsc_sdp.db[17] = 0x2;
+ case 10:
+ sdp->db[17] = 0x2;
break;
- case 36: /* 12bpc */
- vsc_sdp.db[17] = 0x3;
+ case 12:
+ sdp->db[17] = 0x3;
break;
- case 48: /* 16bpc */
- vsc_sdp.db[17] = 0x4;
+ case 16:
+ sdp->db[17] = 0x4;
break;
default:
- MISSING_CASE(crtc_state->pipe_bpp);
+ MISSING_CASE(vsc->bpc);
break;
}
+ /* Dynamic Range and Component Bit Depth */
+ if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
+ sdp->db[17] |= 0x80; /* DB17[7] */
- /*
- * Dynamic Range (Bit 7)
- * 0 = VESA range, 1 = CTA range.
- * all YCbCr are always limited range
- */
- vsc_sdp.db[17] |= 0x80;
-
- /*
- * Content Type (Bits 2:0)
- * 000b = Not defined.
- * 001b = Graphics.
- * 010b = Photo.
- * 011b = Video.
- * 100b = Game
- * All other values are RESERVED.
- * Note: See CTA-861-G for the definition and expected
- * processing by a stream sink for the above contect types.
- */
- vsc_sdp.db[18] = 0;
+ /* Content Type */
+ sdp->db[18] = vsc->content_type & 0x7;
- intel_dig_port->write_infoframe(&intel_dig_port->base,
- crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
+out:
+ return length;
}
-static void
-intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static ssize_t
+intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
+ struct dp_sdp *sdp,
+ size_t size)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct dp_sdp infoframe_sdp = {};
- struct hdmi_drm_infoframe drm_infoframe = {};
+ size_t length = sizeof(struct dp_sdp);
const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
ssize_t len;
- int ret;
- ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
- if (ret) {
- DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
- return;
- }
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
- len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
+ len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
if (len < 0) {
DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
- return;
+ return -ENOSPC;
}
if (len != infoframe_size) {
DRM_DEBUG_KMS("wrong static hdr metadata size\n");
- return;
+ return -ENOSPC;
}
/*
@@ -4790,34 +4903,37 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
* Table 2-100 and Table 2-101
*/
- /* Packet ID, 00h for non-Audio INFOFRAME */
- infoframe_sdp.sdp_header.HB0 = 0;
+ /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
+ sdp->sdp_header.HB0 = 0;
/*
* Packet Type 80h + Non-audio INFOFRAME Type value
- * HDMI_INFOFRAME_TYPE_DRM: 0x87,
+ * HDMI_INFOFRAME_TYPE_DRM: 0x87
+ * - 80h + Non-audio INFOFRAME Type value
+ * - InfoFrame Type: 0x07
+ * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
*/
- infoframe_sdp.sdp_header.HB1 = drm_infoframe.type;
+ sdp->sdp_header.HB1 = drm_infoframe->type;
/*
* Least Significant Eight Bits of (Data Byte Count – 1)
- * infoframe_size - 1,
+ * infoframe_size - 1
*/
- infoframe_sdp.sdp_header.HB2 = 0x1D;
+ sdp->sdp_header.HB2 = 0x1D;
/* INFOFRAME SDP Version Number */
- infoframe_sdp.sdp_header.HB3 = (0x13 << 2);
+ sdp->sdp_header.HB3 = (0x13 << 2);
/* CTA Header Byte 2 (INFOFRAME Version Number) */
- infoframe_sdp.db[0] = drm_infoframe.version;
+ sdp->db[0] = drm_infoframe->version;
/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
- infoframe_sdp.db[1] = drm_infoframe.length;
+ sdp->db[1] = drm_infoframe->length;
/*
* Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
* HDMI_INFOFRAME_HEADER_SIZE
*/
- BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2);
- memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
+ memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
HDMI_DRM_INFOFRAME_SIZE);
/*
- * Size of DP infoframe sdp packet for HDR static metadata is consist of
+ * Size of DP infoframe sdp packet for HDR static metadata consists of
* - DP SDP Header(struct dp_sdp_header): 4 bytes
* - Two Data Blocks: 2 bytes
* CTA Header Byte2 (INFOFRAME Version Number)
@@ -4828,36 +4944,286 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
* infoframe size. But GEN11+ has larger than that size, write_infoframe
* will pad rest of the size.
*/
- intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state,
- HDMI_PACKET_TYPE_GAMUT_METADATA,
- &infoframe_sdp,
- sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE);
+ return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
}
-void intel_dp_vsc_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void intel_write_dp_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type)
{
- if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct dp_sdp sdp = {};
+ ssize_t len;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state);
+ switch (type) {
+ case DP_SDP_VSC:
+ len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
+ sizeof(sdp));
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
+ &sdp, sizeof(sdp));
+ break;
+ default:
+ MISSING_CASE(type);
+ return;
+ }
+
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ return;
+
+ intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
}
-void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct drm_dp_vsc_sdp *vsc)
{
- if (!conn_state->hdr_output_metadata)
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct dp_sdp sdp = {};
+ ssize_t len;
+
+ len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
+
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ return;
+
+ intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
+ &sdp, len);
+}
+
+void intel_dp_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
+ u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
+ VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
+ u32 val = intel_de_read(dev_priv, reg);
+
+ /* TODO: Add DSC case (DIP_ENABLE_PPS) */
+ /* When PSR is enabled, this routine doesn't disable VSC DIP */
+ if (intel_psr_enabled(intel_dp))
+ val &= ~dip_enable;
+ else
+ val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
+
+ if (!enable) {
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+ return;
+ }
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ /* When PSR is enabled, VSC SDP is handled by PSR routine */
+ if (!intel_psr_enabled(intel_dp))
+ intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+
+ intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
+static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
+ const void *buffer, size_t size)
+{
+ const struct dp_sdp *sdp = buffer;
+
+ if (size < sizeof(struct dp_sdp))
+ return -EINVAL;
+
+ memset(vsc, 0, size);
+
+ if (sdp->sdp_header.HB0 != 0)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB1 != DP_SDP_VSC)
+ return -EINVAL;
+
+ vsc->sdp_type = sdp->sdp_header.HB1;
+ vsc->revision = sdp->sdp_header.HB2;
+ vsc->length = sdp->sdp_header.HB3;
+
+ if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
+ (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
+ /*
+ * - HB2 = 0x2, HB3 = 0x8
+ * VSC SDP supporting 3D stereo + PSR
+ * - HB2 = 0x4, HB3 = 0xe
+ * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
+ * first scan line of the SU region (applies to eDP v1.4b
+ * and higher).
+ */
+ return 0;
+ } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
+ /*
+ * - HB2 = 0x5, HB3 = 0x13
+ * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
+ * Format.
+ */
+ vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
+ vsc->colorimetry = sdp->db[16] & 0xf;
+ vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
+
+ switch (sdp->db[17] & 0x7) {
+ case 0x0:
+ vsc->bpc = 6;
+ break;
+ case 0x1:
+ vsc->bpc = 8;
+ break;
+ case 0x2:
+ vsc->bpc = 10;
+ break;
+ case 0x3:
+ vsc->bpc = 12;
+ break;
+ case 0x4:
+ vsc->bpc = 16;
+ break;
+ default:
+ MISSING_CASE(sdp->db[17] & 0x7);
+ return -EINVAL;
+ }
+
+ vsc->content_type = sdp->db[18] & 0x7;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
+ const void *buffer, size_t size)
+{
+ int ret;
+
+ const struct dp_sdp *sdp = buffer;
+
+ if (size < sizeof(struct dp_sdp))
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB0 != 0)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
+ return -EINVAL;
+
+ /*
+ * Least Significant Eight Bits of (Data Byte Count – 1)
+ * 1Dh (i.e., Data Byte Count = 30 bytes).
+ */
+ if (sdp->sdp_header.HB2 != 0x1D)
+ return -EINVAL;
+
+ /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
+ if ((sdp->sdp_header.HB3 & 0x3) != 0)
+ return -EINVAL;
+
+ /* INFOFRAME SDP Version Number */
+ if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
+ return -EINVAL;
+
+ /* CTA Header Byte 2 (INFOFRAME Version Number) */
+ if (sdp->db[0] != 1)
+ return -EINVAL;
+
+ /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+ if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
+ HDMI_DRM_INFOFRAME_SIZE);
+
+ return ret;
+}
+
+static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ unsigned int type = DP_SDP_VSC;
+ struct dp_sdp sdp = {};
+ int ret;
+
+ /* When PSR is enabled, VSC SDP is handled by PSR routine */
+ if (intel_psr_enabled(intel_dp))
+ return;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
+
+ ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
+
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
+}
+
+static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct hdmi_drm_infoframe *drm_infoframe)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
+ struct dp_sdp sdp = {};
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp,
- crtc_state,
- conn_state);
+ intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+ sizeof(sdp));
+
+ ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
+ sizeof(sdp));
+
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to unpack DP HDR Metadata Infoframe SDP\n");
+}
+
+void intel_read_dp_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ unsigned int type)
+{
+ switch (type) {
+ case DP_SDP_VSC:
+ intel_read_dp_vsc_sdp(encoder, crtc_state,
+ &crtc_state->infoframes.vsc);
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
+ &crtc_state->infoframes.drm.drm);
+ break;
+ default:
+ MISSING_CASE(type);
+ break;
+ }
}
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int status = 0;
int test_link_rate;
u8 test_lane_count, test_link_bw;
@@ -4869,7 +5235,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
&test_lane_count);
if (status <= 0) {
- DRM_DEBUG_KMS("Lane count read failed\n");
+ drm_dbg_kms(&i915->drm, "Lane count read failed\n");
return DP_TEST_NAK;
}
test_lane_count &= DP_MAX_LANE_COUNT_MASK;
@@ -4877,7 +5243,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
&test_link_bw);
if (status <= 0) {
- DRM_DEBUG_KMS("Link Rate read failed\n");
+ drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
return DP_TEST_NAK;
}
test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
@@ -4895,6 +5261,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 test_pattern;
u8 test_misc;
__be16 h_width, v_height;
@@ -4904,7 +5271,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
&test_pattern);
if (status <= 0) {
- DRM_DEBUG_KMS("Test pattern read failed\n");
+ drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
return DP_TEST_NAK;
}
if (test_pattern != DP_COLOR_RAMP)
@@ -4913,21 +5280,21 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
&h_width, 2);
if (status <= 0) {
- DRM_DEBUG_KMS("H Width read failed\n");
+ drm_dbg_kms(&i915->drm, "H Width read failed\n");
return DP_TEST_NAK;
}
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
&v_height, 2);
if (status <= 0) {
- DRM_DEBUG_KMS("V Height read failed\n");
+ drm_dbg_kms(&i915->drm, "V Height read failed\n");
return DP_TEST_NAK;
}
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
&test_misc);
if (status <= 0) {
- DRM_DEBUG_KMS("TEST MISC read failed\n");
+ drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
return DP_TEST_NAK;
}
if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
@@ -4956,6 +5323,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 test_result = DP_TEST_ACK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
@@ -4972,9 +5340,10 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
*/
if (intel_dp->aux.i2c_nack_count > 0 ||
intel_dp->aux.i2c_defer_count > 0)
- DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
- intel_dp->aux.i2c_nack_count,
- intel_dp->aux.i2c_defer_count);
+ drm_dbg_kms(&i915->drm,
+ "EDID read had %d NACKs, %d DEFERs\n",
+ intel_dp->aux.i2c_nack_count,
+ intel_dp->aux.i2c_defer_count);
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
struct edid *block = intel_connector->detect_edid;
@@ -4986,7 +5355,8 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
block->checksum) <= 0)
- DRM_DEBUG_KMS("Failed to write EDID checksum\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
@@ -4998,43 +5368,217 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
return test_result;
}
+static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp)
+{
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+
+ if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
+ DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
+ return DP_TEST_NAK;
+ }
+
+ /*
+ * link_mst is set to false to avoid executing mst related code
+ * during compliance testing.
+ */
+ intel_dp->link_mst = false;
+
+ return DP_TEST_ACK;
+}
+
+static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 pattern_val;
+
+ switch (data->phy_pattern) {
+ case DP_PHY_TEST_PATTERN_NONE:
+ DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+ break;
+ case DP_PHY_TEST_PATTERN_D10_2:
+ DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
+ break;
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_SCRAMBLED_0);
+ break;
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
+ break;
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x250. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
+ pattern_val = 0x3e0f83e0;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
+ pattern_val = 0x0f83e0f8;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
+ pattern_val = 0x0000f83e;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_CUSTOM80);
+ break;
+ case DP_PHY_TEST_PATTERN_CP2520:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x24A. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
+ pattern_val = 0xFB;
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
+ pattern_val);
+ break;
+ default:
+ WARN(1, "Invalid Phy Test Pattern\n");
+ }
+}
+
+static void
+intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+
+ trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe));
+ trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+ dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+
+ trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
+ TGL_TRANS_DDI_PORT_MASK);
+ trans_conf_value &= ~PIPECONF_ENABLE;
+ dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
+
+ intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+ trans_ddi_func_ctl_value);
+ intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+}
+
+static void
+intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_dig_port->base.port;
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+
+ trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe));
+ trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+ dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+
+ trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
+ TGL_TRANS_DDI_SELECT_PORT(port);
+ trans_conf_value |= PIPECONF_ENABLE;
+ dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
+
+ intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+ intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+ trans_ddi_func_ctl_value);
+}
+
+void intel_dp_process_phy_request(struct intel_dp *intel_dp)
+{
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_DEBUG_KMS("failed to get link status\n");
+ return;
+ }
+
+ /* retrieve vswing & pre-emphasis setting */
+ intel_dp_get_adjust_train(intel_dp, link_status);
+
+ intel_dp_autotest_phy_ddi_disable(intel_dp);
+
+ intel_dp_set_signal_levels(intel_dp);
+
+ intel_dp_phy_pattern_update(intel_dp);
+
+ intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);
+
+ drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
+ link_status[DP_DPCD_REV]);
+}
+
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
{
- u8 test_result = DP_TEST_NAK;
+ u8 test_result;
+
+ test_result = intel_dp_prepare_phytest(intel_dp);
+ if (test_result != DP_TEST_ACK)
+ DRM_ERROR("Phy test preparation failed\n");
+
+ intel_dp_process_phy_request(intel_dp);
+
return test_result;
}
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 response = DP_TEST_NAK;
u8 request = 0;
int status;
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
if (status <= 0) {
- DRM_DEBUG_KMS("Could not read test request from sink\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not read test request from sink\n");
goto update_status;
}
switch (request) {
case DP_TEST_LINK_TRAINING:
- DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
+ drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
response = intel_dp_autotest_link_training(intel_dp);
break;
case DP_TEST_LINK_VIDEO_PATTERN:
- DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
+ drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
response = intel_dp_autotest_video_pattern(intel_dp);
break;
case DP_TEST_LINK_EDID_READ:
- DRM_DEBUG_KMS("EDID test requested\n");
+ drm_dbg_kms(&i915->drm, "EDID test requested\n");
response = intel_dp_autotest_edid(intel_dp);
break;
case DP_TEST_LINK_PHY_TEST_PATTERN:
- DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
+ drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
response = intel_dp_autotest_phy_pattern(intel_dp);
break;
default:
- DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
+ drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
+ request);
break;
}
@@ -5044,64 +5588,59 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
update_status:
status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
if (status <= 0)
- DRM_DEBUG_KMS("Could not write test response to sink\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not write test response to sink\n");
}
static int
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
- bool bret;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool need_retrain = false;
- if (intel_dp->is_mst) {
- u8 esi[DP_DPRX_ESI_LEN] = { 0 };
- int ret = 0;
+ if (!intel_dp->is_mst)
+ return -EINVAL;
+
+ WARN_ON_ONCE(intel_dp->active_mst_links < 0);
+
+ for (;;) {
+ u8 esi[DP_DPRX_ESI_LEN] = {};
+ bool bret, handled;
int retry;
- bool handled;
- WARN_ON_ONCE(intel_dp->active_mst_links < 0);
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
-go_again:
- if (bret == true) {
-
- /* check link status - esi[10] = 0x200c */
- if (intel_dp->active_mst_links > 0 &&
- !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
- DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
- }
+ if (!bret) {
+ drm_dbg_kms(&i915->drm,
+ "failed to get ESI - device may have failed\n");
+ return -EINVAL;
+ }
- DRM_DEBUG_KMS("got esi %3ph\n", esi);
- ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
-
- if (handled) {
- for (retry = 0; retry < 3; retry++) {
- int wret;
- wret = drm_dp_dpcd_write(&intel_dp->aux,
- DP_SINK_COUNT_ESI+1,
- &esi[1], 3);
- if (wret == 3) {
- break;
- }
- }
+ /* check link status - esi[10] = 0x200c */
+ if (intel_dp->active_mst_links > 0 && !need_retrain &&
+ !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "channel EQ not ok, retraining\n");
+ need_retrain = true;
+ }
- bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
- if (bret == true) {
- DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
- goto go_again;
- }
- } else
- ret = 0;
+ drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
- return ret;
- } else {
- DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
+ drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+ if (!handled)
+ break;
+
+ for (retry = 0; retry < 3; retry++) {
+ int wret;
+
+ wret = drm_dp_dpcd_write(&intel_dp->aux,
+ DP_SINK_COUNT_ESI+1,
+ &esi[1], 3);
+ if (wret == 3)
+ break;
}
}
- return -EINVAL;
+
+ return need_retrain;
}
static bool
@@ -5138,20 +5677,102 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
}
+static bool intel_dp_has_connector(struct intel_dp *intel_dp,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder;
+ enum pipe pipe;
+
+ if (!conn_state->best_encoder)
+ return false;
+
+ /* SST */
+ encoder = &dp_to_dig_port(intel_dp)->base;
+ if (conn_state->best_encoder == &encoder->base)
+ return true;
+
+ /* MST */
+ for_each_pipe(i915, pipe) {
+ encoder = &intel_dp->mst_encoders[pipe]->base;
+ if (conn_state->best_encoder == &encoder->base)
+ return true;
+ }
+
+ return false;
+}
+
+static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
+ struct drm_modeset_acquire_ctx *ctx,
+ u32 *crtc_mask)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ int ret = 0;
+
+ *crtc_mask = 0;
+
+ if (!intel_dp_needs_link_retrain(intel_dp))
+ return 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state =
+ connector->base.state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!intel_dp_has_connector(intel_dp, conn_state))
+ continue;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ continue;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ break;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ continue;
+
+ *crtc_mask |= drm_crtc_mask(&crtc->base);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!intel_dp_needs_link_retrain(intel_dp))
+ *crtc_mask = 0;
+
+ return ret;
+}
+
+static bool intel_dp_is_connected(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ return connector->base.status == connector_status_connected ||
+ intel_dp->is_mst;
+}
+
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_connector_state *conn_state;
- struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
+ u32 crtc_mask;
int ret;
- /* FIXME handle the MST connectors as well */
-
- if (!connector || connector->base.status != connector_status_connected)
+ if (!intel_dp_is_connected(intel_dp))
return 0;
ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
@@ -5159,46 +5780,42 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
if (ret)
return ret;
- conn_state = connector->base.state;
-
- crtc = to_intel_crtc(conn_state->crtc);
- if (!crtc)
- return 0;
-
- ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
if (ret)
return ret;
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
-
- if (!crtc_state->hw.active)
+ if (crtc_mask == 0)
return 0;
- if (conn_state->commit &&
- !try_wait_for_completion(&conn_state->commit->hw_done))
- return 0;
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
+ encoder->base.base.id, encoder->base.name);
- if (!intel_dp_needs_link_retrain(intel_dp))
- return 0;
+ for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
- /* Suppress underruns caused by re-training */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- if (crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), false);
+ /* Suppress underruns caused by re-training */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ if (crtc_state->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), false);
+ }
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
- /* Keep underrun reporting disabled until things are stable */
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), true);
+ /* Keep underrun reporting disabled until things are stable */
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (crtc_state->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), true);
+ }
return 0;
}
@@ -5217,14 +5834,13 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
*/
static enum intel_hotplug_state
intel_dp_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
- state = intel_encoder_hotplug(encoder, connector, irq_received);
+ state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -5248,7 +5864,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
* Keeping it consistent with intel_ddi_hotplug() and
* intel_hdmi_hotplug().
*/
- if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
state = INTEL_HOTPLUG_RETRY;
return state;
@@ -5256,6 +5872,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
@@ -5274,7 +5891,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
- DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
+ drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
/*
@@ -5341,6 +5958,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
u8 *dpcd = intel_dp->dpcd;
u8 type;
@@ -5388,7 +6006,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
}
/* Anything else is out of spec, warn and ignore */
- DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
+ drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
return connector_status_disconnected;
}
@@ -5401,64 +6019,7 @@ edp_detect(struct intel_dp *intel_dp)
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = SDE_PORTB_HOTPLUG;
- break;
- case HPD_PORT_C:
- bit = SDE_PORTC_HOTPLUG;
- break;
- case HPD_PORT_D:
- bit = SDE_PORTD_HOTPLUG;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, SDEISR) & bit;
-}
-
-static bool cpt_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = SDE_PORTB_HOTPLUG_CPT;
- break;
- case HPD_PORT_C:
- bit = SDE_PORTC_HOTPLUG_CPT;
- break;
- case HPD_PORT_D:
- bit = SDE_PORTD_HOTPLUG_CPT;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, SDEISR) & bit;
-}
-
-static bool spt_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_A:
- bit = SDE_PORTA_HOTPLUG_SPT;
- break;
- case HPD_PORT_E:
- bit = SDE_PORTE_HOTPLUG_SPT;
- break;
- default:
- return cpt_digital_port_connected(encoder);
- }
+ u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, SDEISR) & bit;
}
@@ -5512,89 +6073,9 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
- if (encoder->hpd_pin == HPD_PORT_A)
- return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
- else
- return ibx_digital_port_connected(encoder);
-}
-
-static bool snb_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (encoder->hpd_pin == HPD_PORT_A)
- return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
- else
- return cpt_digital_port_connected(encoder);
-}
-
-static bool ivb_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (encoder->hpd_pin == HPD_PORT_A)
- return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB;
- else
- return cpt_digital_port_connected(encoder);
-}
-
-static bool bdw_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (encoder->hpd_pin == HPD_PORT_A)
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
- else
- return cpt_digital_port_connected(encoder);
-}
-
-static bool bxt_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_A:
- bit = BXT_DE_PORT_HP_DDIA;
- break;
- case HPD_PORT_B:
- bit = BXT_DE_PORT_HP_DDIB;
- break;
- case HPD_PORT_C:
- bit = BXT_DE_PORT_HP_DDIC;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
-}
-
-static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
- enum phy phy)
-{
- if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
- return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
-
- return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
-}
-
-static bool icp_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (intel_phy_is_combo(dev_priv, phy))
- return intel_combo_phy_connected(dev_priv, phy);
- else if (intel_phy_is_tc(dev_priv, phy))
- return intel_tc_port_connected(dig_port);
- else
- MISSING_CASE(encoder->hpd_pin);
-
- return false;
+ return intel_de_read(dev_priv, DEISR) & bit;
}
/*
@@ -5608,44 +6089,15 @@ static bool icp_digital_port_connected(struct intel_encoder *encoder)
*
* Return %true if port is connected, %false otherwise.
*/
-static bool __intel_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (HAS_GMCH(dev_priv)) {
- if (IS_GM45(dev_priv))
- return gm45_digital_port_connected(encoder);
- else
- return g4x_digital_port_connected(encoder);
- }
-
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- return icp_digital_port_connected(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- return spt_digital_port_connected(encoder);
- else if (IS_GEN9_LP(dev_priv))
- return bxt_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 8))
- return bdw_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 7))
- return ivb_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 6))
- return snb_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 5))
- return ilk_digital_port_connected(encoder);
-
- MISSING_CASE(INTEL_GEN(dev_priv));
- return false;
-}
-
bool intel_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_connected = false;
intel_wakeref_t wakeref;
with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- is_connected = __intel_digital_port_connected(encoder);
+ is_connected = dig_port->connected(encoder);
return is_connected;
}
@@ -5860,6 +6312,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static int
intel_dp_connector_register(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
int ret;
@@ -5867,10 +6320,8 @@ intel_dp_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- intel_connector_debugfs_add(connector);
-
- DRM_DEBUG_KMS("registering %s bus for %s\n",
- intel_dp->aux.name, connector->kdev->kobj.name);
+ drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
+ intel_dp->aux.name, connector->kdev->kobj.name);
intel_dp->aux.dev = connector->kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
@@ -5956,6 +6407,7 @@ static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
static const struct drm_dp_aux_msg msg = {
.request = DP_AUX_NATIVE_WRITE,
@@ -5970,8 +6422,9 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
an, DRM_HDCP_AN_LEN);
if (dpcd_ret != DRM_HDCP_AN_LEN) {
- DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
- dpcd_ret);
+ drm_dbg_kms(&i915->drm,
+ "Failed to write An over DP/AUX (%zd)\n",
+ dpcd_ret);
return dpcd_ret >= 0 ? -EIO : dpcd_ret;
}
@@ -5987,17 +6440,19 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
rxbuf, sizeof(rxbuf),
DP_AUX_CH_CTL_AUX_AKSV_SELECT);
if (ret < 0) {
- DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Write Aksv over DP/AUX failed (%d)\n", ret);
return ret;
} else if (ret == 0) {
- DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
+ drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n");
return -EIO;
}
reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
if (reply != DP_AUX_NATIVE_REPLY_ACK) {
- DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
- reply);
+ drm_dbg_kms(&i915->drm,
+ "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
+ reply);
return -EIO;
}
return 0;
@@ -6006,11 +6461,14 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
+
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret != DRM_HDCP_KSV_LEN) {
- DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read Bksv from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6019,7 +6477,9 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
u8 *bstatus)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
+
/*
* For some reason the HDMI and DP HDCP specs call this register
* definition by different names. In the HDMI spec, it's called BSTATUS,
@@ -6028,7 +6488,8 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret != DRM_HDCP_BSTATUS_LEN) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6038,12 +6499,14 @@ static
int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
u8 *bcaps)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
- DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bcaps from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -6069,11 +6532,14 @@ static
int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
u8 *ri_prime)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
+
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret != DRM_HDCP_RI_LEN) {
- DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
+ ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6083,12 +6549,15 @@ static
int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
bool *ksv_ready)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
+
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
*ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -6099,6 +6568,7 @@ static
int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
int num_downstream, u8 *ksv_fifo)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
int i;
@@ -6110,8 +6580,9 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
ksv_fifo + i * DRM_HDCP_KSV_LEN,
len);
if (ret != len) {
- DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
- i, ret);
+ drm_dbg_kms(&i915->drm,
+ "Read ksv[%d] from DP/AUX failed (%zd)\n",
+ i, ret);
return ret >= 0 ? -EIO : ret;
}
}
@@ -6122,6 +6593,7 @@ static
int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
int i, u32 *part)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -6131,7 +6603,8 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
DP_AUX_HDCP_V_PRIME(i), part,
DRM_HDCP_V_PRIME_PART_LEN);
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
- DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ drm_dbg_kms(&i915->drm,
+ "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6148,13 +6621,15 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
static
bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return false;
}
@@ -6225,17 +6700,19 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
0, 0 },
};
-static inline
-int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
- u8 *rx_status)
+static int
+intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+ u8 *rx_status)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
HDCP_2_2_DP_RXSTATUS_LEN);
if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -6279,6 +6756,7 @@ static ssize_t
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
@@ -6310,8 +6788,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
}
if (ret)
- DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
- hdcp2_msg_data->msg_id, ret, timeout);
+ drm_dbg_kms(&i915->drm,
+ "msg_id %d, ret %d, timeout(mSec): %d\n",
+ hdcp2_msg_data->msg_id, ret, timeout);
return ret;
}
@@ -6397,6 +6876,7 @@ static
int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, void *buf, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@@ -6430,7 +6910,8 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
(void *)byte, len);
if (ret < 0) {
- DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
+ drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
+ msg_id, ret);
return ret;
}
@@ -6721,7 +7202,11 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (ret)
return ret;
- if (INTEL_GEN(dev_priv) < 11)
+ /*
+ * We don't enable port sync on BDW due to missing w/as and
+ * due to not having adjusted the modeset sequence appropriately.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
if (!intel_connector_needs_modeset(state, conn))
@@ -6760,28 +7245,45 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
+static bool intel_edp_have_power(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool have_power = false;
+
+ with_pps_lock(intel_dp, wakeref) {
+ have_power = edp_have_panel_power(intel_dp) &&
+ edp_have_panel_vdd(intel_dp);
+ }
+
+ return have_power;
+}
+
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
+ if (intel_dig_port->base.type == INTEL_OUTPUT_EDP &&
+ (long_hpd || !intel_edp_have_power(intel_dp))) {
/*
- * vdd off can generate a long pulse on eDP which
+ * vdd off can generate a long/short pulse on eDP which
* would require vdd on to handle it, and thus we
* would end up in an endless cycle of
- * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
+ * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
*/
- DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&i915->drm,
+ "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
+ long_hpd ? "long" : "short",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return IRQ_HANDLED;
}
- DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
- long_hpd ? "long" : "short");
+ drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ long_hpd ? "long" : "short");
if (long_hpd) {
intel_dp->reset_link_params = true;
@@ -6789,18 +7291,25 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (intel_dp->is_mst) {
- if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+ switch (intel_dp_check_mst_status(intel_dp)) {
+ case -EINVAL:
/*
* If we were in MST mode, and device is not
* there, get out of MST mode
*/
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ drm_dbg_kms(&i915->drm,
+ "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst,
+ intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
return IRQ_NONE;
+ case 1:
+ return IRQ_NONE;
+ default:
+ break;
}
}
@@ -7831,6 +8340,23 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->post_disable = g4x_post_disable_dp;
}
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+ intel_dig_port->dp.set_link_train = cpt_set_link_train;
+ else
+ intel_dig_port->dp.set_link_train = g4x_set_link_train;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ intel_dig_port->dp.set_signal_levels = chv_set_signal_levels;
+ else if (IS_VALLEYVIEW(dev_priv))
+ intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels;
+ else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
+ else if (IS_GEN(dev_priv, 6) && port == PORT_A)
+ intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
+ else
+ intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
@@ -7851,6 +8377,18 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_GM45(dev_priv))
+ intel_dig_port->connected = gm45_digital_port_connected;
+ else
+ intel_dig_port->connected = g4x_digital_port_connected;
+ } else {
+ if (port == PORT_A)
+ intel_dig_port->connected = ilk_digital_port_connected;
+ else
+ intel_dig_port->connected = ibx_digital_port_connected;
+ }
+
if (port != PORT_A)
intel_infoframe_init(intel_dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 0c7be8ed1423..1702959ca079 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -16,6 +16,7 @@ struct drm_connector_state;
struct drm_encoder;
struct drm_i915_private;
struct drm_modeset_acquire_ctx;
+struct drm_dp_vsc_sdp;
struct intel_connector;
struct intel_crtc_state;
struct intel_digital_port;
@@ -108,13 +109,21 @@ int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-void intel_dp_vsc_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc);
+void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct drm_dp_vsc_sdp *vsc);
+void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_read_dp_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ unsigned int type);
bool intel_digital_port_connected(struct intel_encoder *encoder);
+void intel_dp_process_phy_request(struct intel_dp *intel_dp);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index dbfa6895795b..0722540d64ad 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -27,6 +27,7 @@
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 reg_val = 0;
/* Early return when display use other mechanism to enable backlight. */
@@ -35,8 +36,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
&reg_val) < 0) {
- DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_DISPLAY_CONTROL_REGISTER);
+ drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_EDP_DISPLAY_CONTROL_REGISTER);
return;
}
if (enable)
@@ -46,8 +47,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
reg_val) != 1) {
- DRM_DEBUG_KMS("Failed to %s aux backlight\n",
- enable ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm, "Failed to %s aux backlight\n",
+ enable ? "enable" : "disable");
}
}
@@ -58,6 +59,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 read_val[2] = { 0x0 };
u8 mode_reg;
u16 level = 0;
@@ -65,8 +67,9 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
&mode_reg) != 1) {
- DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ drm_dbg_kms(&i915->drm,
+ "Failed to read the DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
return 0;
}
@@ -80,8 +83,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
- DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
+ drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
return 0;
}
level = read_val[0];
@@ -100,6 +103,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -111,7 +115,8 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
}
if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
vals, sizeof(vals)) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight level\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux backlight level\n");
return;
}
}
@@ -133,7 +138,8 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
freq = dev_priv->vbt.backlight.pwm_freq_hz;
if (!freq) {
- DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Use panel default backlight frequency\n");
return false;
}
@@ -146,13 +152,14 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
- DRM_DEBUG_KMS("Actual frequency out of range\n");
+ drm_dbg_kms(&dev_priv->drm, "Actual frequency out of range\n");
return false;
}
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to write aux backlight freq\n");
return false;
}
return true;
@@ -163,13 +170,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
- DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
return;
}
@@ -186,7 +194,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT,
panel->backlight.pwmgen_bit_count) < 0)
- DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux pwmgen bit count\n");
break;
@@ -203,7 +212,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (new_dpcd_buf != dpcd_buf) {
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux backlight mode\n");
}
}
@@ -237,9 +247,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
* minimum value will applied automatically. So no need to check that.
*/
freq = i915->vbt.backlight.pwm_freq_hz;
- DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+ drm_dbg_kms(&i915->drm, "VBT defined backlight frequency %u Hz\n",
+ freq);
if (!freq) {
- DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ drm_dbg_kms(&i915->drm,
+ "Use panel default backlight frequency\n");
return max_backlight;
}
@@ -254,12 +266,14 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
*/
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to read pwmgen bit count cap min\n");
return max_backlight;
}
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to read pwmgen bit count cap max\n");
return max_backlight;
}
pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
@@ -268,7 +282,8 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
- DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT defined backlight frequency out of range\n");
return max_backlight;
}
@@ -279,10 +294,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
break;
}
- DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn);
+ drm_dbg_kms(&i915->drm, "Using eDP pwmgen bit count of %d\n", pn);
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
- DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux pwmgen bit count\n");
return max_backlight;
}
panel->backlight.pwmgen_bit_count = pn;
@@ -312,6 +328,7 @@ static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
@@ -319,7 +336,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) &&
!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
- DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
+ drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
return true;
}
return false;
@@ -329,8 +346,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
- struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (i915_modparams.enable_dpcd_backlight == 0 ||
!intel_dp_aux_display_control_capable(intel_connector))
@@ -340,18 +356,18 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
* There are a lot of machines that don't advertise the backlight
* control interface to use properly in their VBIOS, :\
*/
- if (dev_priv->vbt.backlight.type !=
+ if (i915->vbt.backlight.type !=
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
i915_modparams.enable_dpcd_backlight != 1 &&
!drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
- DRM_DEV_INFO(dev->dev,
- "Panel advertises DPCD backlight support, but "
- "VBT disagrees. If your backlight controls "
- "don't work try booting with "
- "i915.enable_dpcd_backlight=1. If your machine "
- "needs this, please file a _new_ bug report on "
- "drm/i915, see " FDO_BUG_URL " for details.\n");
+ drm_info(&i915->drm,
+ "Panel advertises DPCD backlight support, but "
+ "VBT disagrees. If your backlight controls "
+ "don't work try booting with "
+ "i915.enable_dpcd_backlight=1. If your machine "
+ "needs this, please file a _new_ bug report on "
+ "drm/i915, see " FDO_BUG_URL " for details.\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index a7defb37ab00..e4f1843170b7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,9 +34,8 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
-static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
- const u8 link_status[DP_LINK_STATUS_SIZE])
+void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
{
u8 v = 0;
u8 p = 0;
@@ -219,7 +218,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Update training set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ intel_dp_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
drm_err(&i915->drm,
"failed to update link training\n");
@@ -338,7 +337,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
}
/* Update training set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ intel_dp_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
drm_err(&i915->drm,
"failed to update link training\n");
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 174566adcc92..01f1dabbb060 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -6,8 +6,12 @@
#ifndef __INTEL_DP_LINK_TRAINING_H__
#define __INTEL_DP_LINK_TRAINING_H__
+#include <drm/drm_dp_helper.h>
+
struct intel_dp;
+void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const u8 link_status[DP_LINK_STATUS_SIZE]);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 44f3fd251ca1..d18b406f2a7d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -47,9 +47,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- void *port = connector->port;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
@@ -65,7 +65,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
false);
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
- port, crtc_state->pbn, 0);
+ connector->port,
+ crtc_state->pbn, 0);
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -73,7 +74,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
}
if (slots < 0) {
- DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
+ drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
+ slots);
return slots;
}
@@ -88,56 +90,10 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
return 0;
}
-/*
- * Iterate over all connectors and return the smallest transcoder in the MST
- * stream
- */
-static enum transcoder
-intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
- struct intel_dp *mst_port)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_digital_connector_state *conn_state;
- struct intel_connector *connector;
- enum pipe ret = I915_MAX_PIPES;
- int i;
-
- if (INTEL_GEN(dev_priv) < 12)
- return INVALID_TRANSCODER;
-
- for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- if (connector->mst_port != mst_port || !conn_state->base.crtc)
- continue;
-
- crtc = to_intel_crtc(conn_state->base.crtc);
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- if (!crtc_state->uapi.active)
- continue;
-
- /*
- * Using crtc->pipe because crtc_state->cpu_transcoder is
- * computed, so others CRTCs could have non-computed
- * cpu_transcoder
- */
- if (crtc->pipe < ret)
- ret = crtc->pipe;
- }
-
- if (ret == I915_MAX_PIPES)
- return INVALID_TRANSCODER;
-
- /* Simple cast works because TGL don't have a eDP transcoder */
- return (enum transcoder)ret;
-}
-
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
@@ -147,7 +103,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- void *port = connector->port;
struct link_config_limits limits;
int ret;
@@ -158,8 +113,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio =
- drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port);
+ pipe_config->has_audio = connector->port->has_audio;
else
pipe_config->has_audio =
intel_conn_state->force_audio == HDMI_AUDIO_ON;
@@ -201,7 +155,56 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
- pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
+ return 0;
+}
+
+/*
+ * Iterate over all connectors and return a mask of
+ * all CPU transcoders streaming over the same DP link.
+ */
+static unsigned int
+intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
+ struct intel_dp *mst_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ u8 transcoders = 0;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return 0;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (connector->mst_port != mst_port || !conn_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_state->base.crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ transcoders |= BIT(crtc_state->cpu_transcoder);
+ }
+
+ return transcoders;
+}
+
+static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+
+ /* lowest numbered transcoder will be designated master */
+ crtc_state->mst_master_transcoder =
+ ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
return 0;
}
@@ -313,7 +316,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
return ret;
}
-static void intel_mst_disable_dp(struct intel_encoder *encoder,
+static void intel_mst_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -322,22 +326,25 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&i915->drm, "active links %d\n",
+ intel_dp->active_mst_links);
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
- DRM_DEBUG_KMS("failed to update payload %d\n", ret);
+ drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
}
-static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
+static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -371,7 +378,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
- DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for ACT sent when disabling\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
@@ -402,13 +410,15 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_mst->connector = NULL;
if (last_mst_stream)
- intel_dig_port->base.post_disable(&intel_dig_port->base,
+ intel_dig_port->base.post_disable(state, &intel_dig_port->base,
old_crtc_state, NULL);
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
}
-static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -417,11 +427,12 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (intel_dp->active_mst_links == 0)
- intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
+ intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base,
pipe_config, NULL);
}
-static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -445,7 +456,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
if (first_mst_stream)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -453,7 +465,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
if (first_mst_stream)
- intel_dig_port->base.pre_enable(&intel_dig_port->base,
+ intel_dig_port->base.pre_enable(state, &intel_dig_port->base,
pipe_config, NULL);
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
@@ -461,7 +473,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
pipe_config->pbn,
pipe_config->dp_m_n.tu);
if (!ret)
- DRM_ERROR("failed to allocate vcpi\n");
+ drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
intel_dp->active_mst_links++;
temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
@@ -477,14 +489,15 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
* here for the following ones.
*/
if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
- intel_ddi_enable_pipe_clock(pipe_config);
+ intel_ddi_enable_pipe_clock(encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
intel_dp_set_m_n(pipe_config, M1_N1);
}
-static void intel_mst_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -495,19 +508,23 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
- intel_enable_pipe(pipe_config);
-
- intel_crtc_vblank_on(pipe_config);
+ intel_ddi_enable_transcoder_func(encoder, pipe_config);
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
- DRM_ERROR("Timed out waiting for ACT sent\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+ intel_enable_pipe(pipe_config);
+
+ intel_crtc_vblank_on(pipe_config);
+
if (pipe_config->has_audio)
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
@@ -786,6 +803,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->pipe_mask = ~0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
+ intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 2d47f1f756a2..b45185b80bec 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -80,7 +80,7 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
- WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+ drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
if (!state->dpll_set) {
state->dpll_set = true;
@@ -979,7 +979,7 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+ if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
return NULL;
crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
@@ -1616,7 +1616,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
ref_clock / 0x8000;
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
return 0;
return dco_freq / (p0 * p1 * p2 * 5);
@@ -2074,7 +2074,7 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
clk_div->p1 = best_clock.p1;
clk_div->p2 = best_clock.p2;
- WARN_ON(best_clock.m1 != 2);
+ drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
clk_div->n = best_clock.n;
clk_div->m2_int = best_clock.m2 >> 22;
clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index d7a6bf2277df..29fec6a92d17 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -34,7 +34,7 @@
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
-static inline bool is_dsb_busy(struct intel_dsb *dsb)
+static bool is_dsb_busy(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -43,7 +43,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb)
return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
}
-static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_enable_engine(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -63,7 +63,7 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
return true;
}
-static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_disable_engine(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index a2a937109a5a..afa4e6817e8c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -31,20 +31,21 @@ int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
int intel_dsi_get_modes(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *mode;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
if (!intel_connector->panel.fixed_mode) {
- DRM_DEBUG_KMS("no fixed mode\n");
+ drm_dbg_kms(&i915->drm, "no fixed mode\n");
return 0;
}
mode = drm_mode_duplicate(connector->dev,
intel_connector->panel.fixed_mode);
if (!mode) {
- DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+ drm_dbg_kms(&i915->drm, "drm_mode_duplicate failed\n");
return 0;
}
@@ -60,7 +61,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 574dcfec9577..eed037ec0b29 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -121,7 +121,7 @@ struct i2c_adapter_lookup {
#define ICL_GPIO_DDPA_CTRLCLK_2 8
#define ICL_GPIO_DDPA_CTRLDATA_2 9
-static inline enum port intel_dsi_seq_port_to_port(u8 port)
+static enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
}
@@ -453,8 +453,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct device *dev = &drm_dev->pdev->dev;
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
@@ -471,7 +470,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
if (!adapter) {
- DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
+ drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
goto err_bus;
}
@@ -489,9 +488,9 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
ret = i2c_transfer(adapter, &msg, 1);
if (ret < 0)
- DRM_DEV_ERROR(dev,
- "Failed to xfer payload of size (%u) to reg (%u)\n",
- payload_size, reg_offset);
+ drm_err(&i915->drm,
+ "Failed to xfer payload of size (%u) to reg (%u)\n",
+ payload_size, reg_offset);
kfree(payload_data);
err_alloc:
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 341d5ce8b062..5cd09034519b 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -183,7 +183,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
-static void intel_disable_dvo(struct intel_encoder *encoder,
+static void intel_disable_dvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -197,7 +198,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
intel_de_read(dev_priv, dvo_reg);
}
-static void intel_enable_dvo(struct intel_encoder *encoder,
+static void intel_enable_dvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -272,7 +274,8 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
return 0;
}
-static void intel_dvo_pre_enable(struct intel_encoder *encoder,
+static void intel_dvo_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 2e5d835a9eaa..1c26673acb2d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -104,7 +104,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
/* Wait for compressing bit to clear */
if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
FBC_STAT_COMPRESSING, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
+ drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n");
return;
}
}
@@ -485,8 +485,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
if (!ret)
goto err_llb;
else if (ret > 1) {
- DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
+ drm_info_once(&dev_priv->drm,
+ "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
fbc->threshold = ret;
@@ -521,8 +521,9 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
dev_priv->dsm.start + compressed_llb->start);
}
- DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
- fbc->compressed_fb.size, fbc->threshold);
+ drm_dbg_kms(&dev_priv->drm,
+ "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
+ fbc->compressed_fb.size, fbc->threshold);
return 0;
@@ -531,7 +532,7 @@ err_fb:
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
if (drm_mm_initialized(&dev_priv->mm.stolen))
- pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -539,6 +540,9 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
+ if (WARN_ON(intel_fbc_hw_is_active(dev_priv)))
+ return;
+
if (!drm_mm_node_allocated(&fbc->compressed_fb))
return;
@@ -563,7 +567,7 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
}
static bool stride_is_valid(struct drm_i915_private *dev_priv,
- unsigned int stride)
+ u64 modifier, unsigned int stride)
{
/* This should have been caught earlier. */
if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
@@ -579,6 +583,11 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
return false;
+ /* Display WA #1105: skl,bxt,kbl,cfl,glk */
+ if (IS_GEN(dev_priv, 9) &&
+ modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
+ return false;
+
if (stride > 16384)
return false;
@@ -606,6 +615,19 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
}
}
+static bool rotation_is_valid(struct drm_i915_private *dev_priv,
+ u32 pixel_format, unsigned int rotation)
+{
+ if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
+ drm_rotation_90_or_270(rotation))
+ return false;
+ else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
+ rotation != DRM_MODE_ROTATE_0)
+ return false;
+
+ return true;
+}
+
/*
* For some reason, the hardware tracking starts looking at whatever we
* programmed as the display plane base address register. It does not look at
@@ -640,6 +662,22 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
+static bool tiling_is_valid(struct drm_i915_private *dev_priv,
+ uint64_t modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ if (INTEL_GEN(dev_priv) >= 9)
+ return true;
+ return false;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -673,6 +711,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
+ cache->fb.modifier = fb->modifier;
drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
!plane_state->vma->fence);
@@ -746,30 +785,40 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
+ /* The use of a CPU fence is one of two ways to detect writes by the
+ * CPU to the scanout and trigger updates to the FBC.
+ *
+ * The other method is by software tracking (see
+ * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
+ * the current compressed buffer and recompress it.
*
* Note that is possible for a tiled surface to be unmappable (and
- * so have no fence associated with it) due to aperture constaints
+ * so have no fence associated with it) due to aperture constraints
* at the time of pinning.
*
* FIXME with 90/270 degree rotation we should use the fence on
* the normal GTT view (the rotated view doesn't even have a
* fence). Would need changes to the FBC fence Y offset as well.
- * For now this will effecively disable FBC with 90/270 degree
+ * For now this will effectively disable FBC with 90/270 degree
* rotation.
*/
- if (cache->fence_id < 0) {
+ if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
- if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
- cache->plane.rotation != DRM_MODE_ROTATE_0) {
+
+ if (!rotation_is_valid(dev_priv, cache->fb.format->format,
+ cache->plane.rotation)) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
}
- if (!stride_is_valid(dev_priv, cache->fb.stride)) {
+ if (!tiling_is_valid(dev_priv, cache->fb.modifier)) {
+ fbc->no_fbc_reason = "tiling unsupported";
+ return false;
+ }
+
+ if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) {
fbc->no_fbc_reason = "framebuffer stride not supported";
return false;
}
@@ -948,7 +997,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
drm_WARN_ON(&dev_priv->drm, fbc->active);
- DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n",
+ pipe_name(crtc->pipe));
__intel_fbc_cleanup_cfb(dev_priv);
@@ -1176,7 +1226,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
else
cache->gen9_wa_cfb_stride = 0;
- DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
+ pipe_name(crtc->pipe));
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
fbc->crtc = crtc;
@@ -1238,7 +1289,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
if (fbc->underrun_detected || !fbc->crtc)
goto out;
- DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n");
fbc->underrun_detected = true;
intel_fbc_deactivate(dev_priv, "FIFO underrun");
@@ -1264,7 +1315,8 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
return ret;
if (dev_priv->fbc.underrun_detected) {
- DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Re-allowing FBC after fifo underrun\n");
dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
}
@@ -1335,7 +1387,8 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
if (intel_vtd_active() &&
(IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+ drm_info(&dev_priv->drm,
+ "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
return true;
}
@@ -1363,8 +1416,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->display.has_fbc = false;
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
- DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
- i915_modparams.enable_fbc);
+ drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
+ i915_modparams.enable_fbc);
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 3bc804212a99..bd39eb6a21b8 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -146,7 +146,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
- DRM_ERROR("failed to allocate framebuffer\n");
+ drm_err(&dev_priv->drm, "failed to allocate framebuffer\n");
return PTR_ERR(obj);
}
@@ -183,21 +183,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
- DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
- " releasing it\n",
- intel_fb->base.width, intel_fb->base.height,
- sizes->fb_width, sizes->fb_height);
+ drm_dbg_kms(&dev_priv->drm,
+ "BIOS fb too small (%dx%d), we require (%dx%d),"
+ " releasing it\n",
+ intel_fb->base.width, intel_fb->base.height,
+ sizes->fb_width, sizes->fb_height);
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
- DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
return ret;
intel_fb = ifbdev->fb;
} else {
- DRM_DEBUG_KMS("re-using BIOS fb\n");
+ drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = intel_fb->base.width;
sizes->fb_height = intel_fb->base.height;
@@ -220,7 +222,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
- DRM_ERROR("Failed to allocate fb_info\n");
+ drm_err(&dev_priv->drm, "Failed to allocate fb_info\n");
ret = PTR_ERR(info);
goto out_unpin;
}
@@ -240,7 +242,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
+ drm_err(&dev_priv->drm,
+ "Failed to remap framebuffer into virtual memory\n");
ret = PTR_ERR(vaddr);
goto out_unpin;
}
@@ -258,9 +261,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
- ifbdev->fb->base.width, ifbdev->fb->base.height,
- i915_ggtt_offset(vma));
+ drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
+ ifbdev->fb->base.width, ifbdev->fb->base.height,
+ i915_ggtt_offset(vma));
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
@@ -309,6 +312,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
@@ -321,21 +325,24 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
intel_crtc = to_intel_crtc(crtc);
if (!crtc->state->active || !obj) {
- DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm,
+ "pipe %c not active or no fb, skipping\n",
+ pipe_name(intel_crtc->pipe));
continue;
}
if (obj->base.size > max_size) {
- DRM_DEBUG_KMS("found possible fb from plane %c\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm,
+ "found possible fb from plane %c\n",
+ pipe_name(intel_crtc->pipe));
fb = to_intel_framebuffer(crtc->primary->state->fb);
max_size = obj->base.size;
}
}
if (!fb) {
- DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
+ drm_dbg_kms(&i915->drm,
+ "no active fbs found, not using BIOS config\n");
goto out;
}
@@ -346,13 +353,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
intel_crtc = to_intel_crtc(crtc);
if (!crtc->state->active) {
- DRM_DEBUG_KMS("pipe %c not active, skipping\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm,
+ "pipe %c not active, skipping\n",
+ pipe_name(intel_crtc->pipe));
continue;
}
- DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm, "checking plane %c for BIOS fb\n",
+ pipe_name(intel_crtc->pipe));
/*
* See if the plane fb we found above will fit on this
@@ -362,9 +370,10 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
- DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
- pipe_name(intel_crtc->pipe),
- cur_size, fb->base.pitches[0]);
+ drm_dbg_kms(&i915->drm,
+ "fb not wide enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, fb->base.pitches[0]);
fb = NULL;
break;
}
@@ -372,28 +381,32 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
- DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
- pipe_name(intel_crtc->pipe),
- crtc->state->adjusted_mode.crtc_hdisplay,
- crtc->state->adjusted_mode.crtc_vdisplay,
- fb->base.format->cpp[0] * 8,
- cur_size);
+ drm_dbg_kms(&i915->drm,
+ "pipe %c area: %dx%d, bpp: %d, size: %d\n",
+ pipe_name(intel_crtc->pipe),
+ crtc->state->adjusted_mode.crtc_hdisplay,
+ crtc->state->adjusted_mode.crtc_vdisplay,
+ fb->base.format->cpp[0] * 8,
+ cur_size);
if (cur_size > max_size) {
- DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
- pipe_name(intel_crtc->pipe),
- cur_size, max_size);
+ drm_dbg_kms(&i915->drm,
+ "fb not big enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, max_size);
fb = NULL;
break;
}
- DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
- pipe_name(intel_crtc->pipe),
- max_size, cur_size);
+ drm_dbg_kms(&i915->drm,
+ "fb big enough for plane %c (%d >= %d)\n",
+ pipe_name(intel_crtc->pipe),
+ max_size, cur_size);
}
if (!fb) {
- DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
+ drm_dbg_kms(&i915->drm,
+ "BIOS fb not suitable for all pipes, not using\n");
goto out;
}
@@ -415,7 +428,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
- DRM_DEBUG_KMS("using BIOS fb for initial console\n");
+ drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
return true;
out:
@@ -522,8 +535,9 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
* processing, fbdev will perform a full connector reprobe if a hotplug event
* was received while HPD was suspended.
*/
-static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
{
+ struct intel_fbdev *ifbdev = i915->fbdev;
bool send_hpd = false;
mutex_lock(&ifbdev->hpd_lock);
@@ -533,7 +547,7 @@ static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
mutex_unlock(&ifbdev->hpd_lock);
if (send_hpd) {
- DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+ drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
}
@@ -588,7 +602,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
- intel_fbdev_hpd_set_suspend(ifbdev, state);
+ intel_fbdev_hpd_set_suspend(dev_priv, state);
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 6cb02c912acc..2979ed2588eb 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -302,12 +302,14 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
BITS_PER_TYPE(atomic_t));
if (old) {
- WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
+ drm_WARN_ON(old->obj->base.dev,
+ !(atomic_read(&old->bits) & frontbuffer_bits));
atomic_andnot(frontbuffer_bits, &old->bits);
}
if (new) {
- WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
+ drm_WARN_ON(new->obj->base.dev,
+ atomic_read(&new->bits) & frontbuffer_bits);
atomic_or(frontbuffer_bits, &new->bits);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index a0cc894c3868..212d4ee68205 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -64,13 +64,14 @@ static void assert_global_state_read_locked(struct intel_atomic_state *state)
return;
}
- WARN(1, "Global state not read locked\n");
+ drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
}
struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
struct intel_global_obj *obj)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
int index, num_objs, i;
size_t size;
struct __intel_global_objs_state *arr;
@@ -106,8 +107,8 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
state->num_global_objs = num_objs;
- DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
- obj, obj_state, state);
+ drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
+ obj, obj_state, state);
return obj_state;
}
@@ -147,7 +148,7 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
new_obj_state, i) {
- WARN_ON(obj->state != old_obj_state);
+ drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
/*
* If the new state wasn't modified (and properly
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 1fd3a5a6296b..a8d119b6b45c 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -379,8 +379,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
return ret;
}
-static inline
-unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
{
return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
GMBUS_BYTE_COUNT_MAX;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index ee0f27ea2810..2cbc4619b4ce 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -109,18 +109,16 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return capable;
}
-static inline
-bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder, enum port port)
+static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
return intel_de_read(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
HDCP_STATUS_ENC;
}
-static inline
-bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder, enum port port)
+static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
return intel_de_read(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
@@ -853,8 +851,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
return ret;
}
-static inline
-struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
{
return container_of(hdcp, struct intel_connector, hdcp);
}
@@ -1391,6 +1388,7 @@ static
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
@@ -1431,7 +1429,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
hdcp->seq_num_m++;
if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
- DRM_DEBUG_KMS("seq_num_m roll over.\n");
+ drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
return -1;
}
@@ -1855,8 +1853,7 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
-static inline
-enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
{
switch (port) {
case PORT_A:
@@ -1868,8 +1865,7 @@ enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
}
}
-static inline
-enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
{
switch (cpu_transcoder) {
case TRANSCODER_A ... TRANSCODER_D:
@@ -1879,8 +1875,8 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
}
}
-static inline int initialize_hdcp_port_data(struct intel_connector *connector,
- const struct intel_hdcp_shim *shim)
+static int initialize_hdcp_port_data(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -2075,7 +2071,8 @@ int intel_hdcp_disable(struct intel_connector *connector)
return ret;
}
-void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 7c12ad609b1f..86bbaec120cc 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -11,6 +11,7 @@
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
@@ -26,7 +27,8 @@ int intel_hdcp_init(struct intel_connector *connector,
int intel_hdcp_enable(struct intel_connector *connector,
enum transcoder cpu_transcoder, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
-void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 821411b93dac..010f37240710 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -44,7 +44,6 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
-#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -707,13 +706,15 @@ void intel_read_infoframe(struct intel_encoder *encoder,
/* see comment above for the reason for this offset */
ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
if (ret) {
- DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
+ drm_dbg_kms(encoder->base.dev,
+ "Failed to unpack infoframe type 0x%02x\n", type);
return;
}
if (frame->any.type != type)
- DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
- frame->any.type, type);
+ drm_dbg_kms(encoder->base.dev,
+ "Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, type);
}
static bool
@@ -853,7 +854,8 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
if (ret < 0) {
- DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "couldn't set HDR metadata in infoframe\n");
return false;
}
@@ -893,8 +895,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
if (!(val & VIDEO_DIP_ENABLE))
return;
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_dbg_kms(&dev_priv->drm,
+ "video DIP still enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
@@ -906,8 +909,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
- DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_dbg_kms(&dev_priv->drm,
+ "video DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~VIDEO_DIP_PORT_MASK;
@@ -1264,8 +1268,8 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return;
- DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
- enable ? "Enabling" : "Disabling");
+ drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
+ enable ? "Enabling" : "Disabling");
drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
adapter, enable);
@@ -1346,13 +1350,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
- DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
+ ret);
return ret;
}
ret = intel_gmbus_output_aksv(adapter);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret);
return ret;
}
return 0;
@@ -1361,11 +1366,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
- DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
+ ret);
return ret;
}
@@ -1373,11 +1381,14 @@ static
int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
u8 *bstatus)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
- DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
+ ret);
return ret;
}
@@ -1385,12 +1396,14 @@ static
int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
bool *repeater_present)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ ret);
return ret;
}
*repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1401,11 +1414,14 @@ static
int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
u8 *ri_prime)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
- DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
+ ret);
return ret;
}
@@ -1413,12 +1429,14 @@ static
int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
bool *ksv_ready)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ ret);
return ret;
}
*ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1429,11 +1447,13 @@ static
int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
int num_downstream, u8 *ksv_fifo)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
- DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read ksv fifo over DDC failed (%d)\n", ret);
return ret;
}
return 0;
@@ -1443,6 +1463,7 @@ static
int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
int i, u32 *part)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -1451,7 +1472,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
- DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
+ drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
+ i, ret);
return ret;
}
@@ -1474,12 +1496,14 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
if (ret) {
- DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
if (ret) {
- DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Enable HDCP signalling failed (%d)\n", ret);
return ret;
}
@@ -1500,8 +1524,8 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
if (ret) {
- DRM_ERROR("%s HDCP signalling failed (%d)\n",
- enable ? "Enable" : "Disable", ret);
+ drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
return ret;
}
@@ -1539,8 +1563,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
- DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
+ drm_err(&i915->drm,
+ "Ri' mismatch detected, link check failed (%x)\n",
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1588,17 +1614,19 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
return -EINVAL;
}
-static inline
-int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
- u8 msg_id, bool *msg_ready,
- ssize_t *msg_sz)
+static int
+hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, bool *msg_ready,
+ ssize_t *msg_sz)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
- ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
+ ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
if (ret < 0) {
- DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
+ drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
+ ret);
return ret;
}
@@ -1618,6 +1646,7 @@ static ssize_t
intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, bool paired)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
bool msg_ready = false;
int timeout, ret;
ssize_t msg_sz = 0;
@@ -1632,8 +1661,8 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
!ret && msg_ready && msg_sz, timeout * 1000,
1000, 5 * 1000);
if (ret)
- DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
- msg_id, ret, timeout);
+ drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n",
+ msg_id, ret, timeout);
return ret ? ret : msg_sz;
}
@@ -1652,6 +1681,7 @@ static
int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, void *buf, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
unsigned int offset;
@@ -1667,15 +1697,17 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
* available buffer.
*/
if (ret > size) {
- DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
- ret, size);
+ drm_dbg_kms(&i915->drm,
+ "msg_sz(%zd) is more than exp size(%zu)\n",
+ ret, size);
return -1;
}
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
if (ret)
- DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
+ drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
+ msg_id, ret);
return ret;
}
@@ -1718,12 +1750,6 @@ int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
return ret;
}
-static inline
-enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
-{
- return HDCP_PROTOCOL_HDMI;
-}
-
static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
.read_bksv = intel_hdmi_hdcp_read_bksv,
@@ -1871,15 +1897,17 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink);
- DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_WARN_ON(&i915->drm, !pipe_config->has_hdmi_sink);
+ drm_dbg_kms(&i915->drm, "Enabling HDMI audio on pipe %c\n",
+ pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
-static void g4x_enable_hdmi(struct intel_encoder *encoder,
+static void g4x_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1901,7 +1929,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
-static void ibx_enable_hdmi(struct intel_encoder *encoder,
+static void ibx_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1952,7 +1981,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
-static void cpt_enable_hdmi(struct intel_encoder *encoder,
+static void cpt_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2005,13 +2035,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
-static void vlv_enable_hdmi(struct intel_encoder *encoder,
+static void vlv_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
}
-static void intel_disable_hdmi(struct intel_encoder *encoder,
+static void intel_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2069,7 +2101,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void g4x_disable_hdmi(struct intel_encoder *encoder,
+static void g4x_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2077,10 +2110,11 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
- intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
-static void pch_disable_hdmi(struct intel_encoder *encoder,
+static void pch_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2089,11 +2123,12 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
old_crtc_state, old_conn_state);
}
-static void pch_post_disable_hdmi(struct intel_encoder *encoder,
+static void pch_post_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
@@ -2286,29 +2321,27 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
return true;
}
-static bool
-intel_hdmi_ycbcr420_config(struct drm_connector *connector,
- struct intel_crtc_state *config)
+static int
+intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
-
- if (!connector->ycbcr_420_allowed) {
- DRM_ERROR("Platform doesn't support YCBCR420 output\n");
- return false;
- }
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
- config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+ if (!drm_mode_is_420_only(&connector->display_info, adjusted_mode))
+ return 0;
- /* YCBCR 420 output conversion needs a scaler */
- if (skl_update_scaler_crtc(config)) {
- DRM_DEBUG_KMS("Scaler allocation for output failed\n");
- return false;
+ if (!connector->ycbcr_420_allowed) {
+ drm_err(&i915->drm,
+ "Platform doesn't support YCBCR420 output\n");
+ return -EINVAL;
}
- intel_pch_panel_fitting(intel_crtc, config,
- DRM_MODE_SCALE_FULLSCREEN);
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- return true;
+ return intel_pch_panel_fitting(crtc_state, conn_state);
}
static int intel_hdmi_port_clock(int clock, int bpc)
@@ -2342,6 +2375,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -2366,13 +2400,15 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
if (crtc_state->pipe_bpp > bpc * 3)
crtc_state->pipe_bpp = bpc * 3;
- DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
- bpc, crtc_state->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "picking %d bpc for HDMI output (pipe bpp: %d)\n",
+ bpc, crtc_state->pipe_bpp);
if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
false, crtc_state->has_hdmi_sink) != MODE_OK) {
- DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
- crtc_state->port_clock);
+ drm_dbg_kms(&i915->drm,
+ "unsupported HDMI clock (%d kHz), rejecting mode\n",
+ crtc_state->port_clock);
return -EINVAL;
}
@@ -2433,12 +2469,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
- if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
- if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
- DRM_ERROR("Can't support YCBCR420 output\n");
- return -EINVAL;
- }
- }
+ ret = intel_hdmi_ycbcr420_config(pipe_config, conn_state);
+ if (ret)
+ return ret;
pipe_config->limited_color_range =
intel_hdmi_limited_color_range(pipe_config, conn_state);
@@ -2475,25 +2508,26 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
- intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
+ intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
+ conn_state);
if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad AVI infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad SPD infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad HDMI infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad DRM infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n");
return -EINVAL;
}
@@ -2543,7 +2577,8 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
*/
if (has_edid && !connector->override_edid &&
intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
- DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Assuming DP dual mode adaptor presence based on VBT\n");
type = DRM_DP_DUAL_MODE_TYPE1_DVI;
} else {
type = DRM_DP_DUAL_MODE_NONE;
@@ -2557,9 +2592,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
hdmi->dp_dual_mode.max_tmds_clock =
drm_dp_dual_mode_max_tmds_clock(type, adapter);
- DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
- drm_dp_get_dual_mode_type_name(type),
- hdmi->dp_dual_mode.max_tmds_clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
+ drm_dp_get_dual_mode_type_name(type),
+ hdmi->dp_dual_mode.max_tmds_clock);
}
static bool
@@ -2579,7 +2615,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
- DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
@@ -2611,8 +2648,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
@@ -2643,8 +2680,10 @@ out:
static void
intel_hdmi_force(struct drm_connector *connector)
{
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
intel_hdmi_unset_edid(connector);
@@ -2665,7 +2704,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
return intel_connector_update_modes(connector, edid);
}
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
+static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2679,7 +2719,8 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
pipe_config, conn_state);
}
-static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
+static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2696,12 +2737,13 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
- g4x_enable_hdmi(encoder, pipe_config, conn_state);
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
vlv_wait_port_ready(dev_priv, dport, 0x0);
}
-static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2710,7 +2752,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
vlv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2719,14 +2762,16 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
+static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
chv_phy_post_pll_disable(encoder, old_crtc_state);
}
-static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
+static void vlv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2734,7 +2779,8 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
vlv_phy_reset_lanes(encoder, old_crtc_state);
}
-static void chv_hdmi_post_disable(struct intel_encoder *encoder,
+static void chv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2749,7 +2795,8 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
vlv_dpio_put(dev_priv);
}
-static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
+static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2767,7 +2814,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
- g4x_enable_hdmi(encoder, pipe_config, conn_state);
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
vlv_wait_port_ready(dev_priv, dport, 0x0);
@@ -2786,6 +2833,7 @@ intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
struct kobject *i2c_kobj = &adapter->dev.kobj;
struct kobject *connector_kobj = &connector->kdev->kobj;
@@ -2793,7 +2841,7 @@ static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name);
if (ret)
- DRM_ERROR("Failed to create i2c symlink (%d)\n", ret);
+ drm_err(&i915->drm, "Failed to create i2c symlink (%d)\n", ret);
}
static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector)
@@ -2814,8 +2862,6 @@ intel_hdmi_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- intel_connector_debugfs_add(connector);
-
intel_hdmi_create_i2c_symlink(connector);
return ret;
@@ -2922,9 +2968,10 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
if (!sink_scrambling->supported)
return true;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
- connector->base.id, connector->name,
- yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
+ connector->base.id, connector->name,
+ yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
return drm_scdc_set_high_tmds_clock_ratio(adapter,
@@ -3066,8 +3113,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = intel_bios_alternate_ddc_pin(encoder);
if (ddc_pin) {
- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
- ddc_pin, port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DDC pin 0x%x for port %c (VBT)\n",
+ ddc_pin, port_name(port));
return ddc_pin;
}
@@ -3084,8 +3132,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
- ddc_pin, port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DDC pin 0x%x for port %c (platform default)\n",
+ ddc_pin, port_name(port));
return ddc_pin;
}
@@ -3142,8 +3191,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_encoder->port;
struct cec_connector_info conn_info;
- DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
- intel_encoder->base.base.id, intel_encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Adding HDMI connector on [ENCODER:%d:%s]\n",
+ intel_encoder->base.base.id, intel_encoder->base.name);
if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
@@ -3187,7 +3237,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
int ret = intel_hdcp_init(intel_connector,
&intel_hdmi_hdcp_shim);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP init failed, skipping.\n");
}
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -3206,16 +3257,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
cec_notifier_conn_register(dev->dev, port_identifier(port),
&conn_info);
if (!intel_hdmi->cec_notifier)
- DRM_DEBUG_KMS("CEC notifier get failed\n");
+ drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
}
static enum intel_hotplug_state
intel_hdmi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector, bool irq_received)
+ struct intel_connector *connector)
{
enum intel_hotplug_state state;
- state = intel_encoder_hotplug(encoder, connector, irq_received);
+ state = intel_encoder_hotplug(encoder, connector);
/*
* On many platforms the HDMI live state signal is known to be
@@ -3229,7 +3280,7 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
* time around we didn't detect any change in the sink's connection
* status.
*/
- if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
state = INTEL_HOTPLUG_RETRY;
return state;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index a091442efba4..4f6f560e093e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -270,8 +270,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
enum intel_hotplug_state
intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
@@ -392,12 +391,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct intel_encoder *encoder =
intel_attached_encoder(connector);
+ if (hpd_event_bits & hpd_bit)
+ connector->hotplug_retries = 0;
+ else
+ connector->hotplug_retries++;
+
drm_dbg_kms(&dev_priv->drm,
- "Connector %s (pin %i) received hotplug event.\n",
- connector->base.name, pin);
+ "Connector %s (pin %i) received hotplug event. (retry %d)\n",
+ connector->base.name, pin,
+ connector->hotplug_retries);
- switch (encoder->hotplug(encoder, connector,
- hpd_event_bits & hpd_bit)) {
+ switch (encoder->hotplug(encoder, connector)) {
case INTEL_HOTPLUG_UNCHANGED:
break;
case INTEL_HOTPLUG_CHANGED:
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 1e6b4fda2900..777b0743257e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -15,8 +15,7 @@ enum port;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received);
+ struct intel_connector *connector);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index d807c5648c87..6ff7b226f0a1 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
/* FIXME actually read this from the hw */
- return enc_to_intel_lspcon(encoder)->active;
+ return 0;
}
void lspcon_resume(struct intel_lspcon *lspcon)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 9a067effcfa0..872f2a489339 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -220,7 +220,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
-static void intel_pre_enable_lvds(struct intel_encoder *encoder,
+static void intel_pre_enable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -301,7 +302,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
/*
* Sets the power state for the panel.
*/
-static void intel_enable_lvds(struct intel_encoder *encoder,
+static void intel_enable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -323,7 +325,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
intel_panel_enable_backlight(pipe_config, conn_state);
}
-static void intel_disable_lvds(struct intel_encoder *encoder,
+static void intel_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -341,28 +344,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
-static void gmch_disable_lvds(struct intel_encoder *encoder,
+static void gmch_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
- intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
+ intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
}
-static void pch_disable_lvds(struct intel_encoder *encoder,
+static void pch_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
}
-static void pch_post_disable_lvds(struct intel_encoder *encoder,
+static void pch_post_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
+ intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
}
static enum drm_mode_status
@@ -397,6 +403,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
unsigned int lvds_bpp;
+ int ret;
/* Should never happen!! */
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
@@ -430,16 +437,15 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
- intel_pch_panel_fitting(intel_crtc, pipe_config,
- conn_state->scaling_mode);
- } else {
- intel_gmch_panel_fitting(intel_crtc, pipe_config,
- conn_state->scaling_mode);
-
- }
+ if (HAS_GMCH(dev_priv))
+ ret = intel_gmch_panel_fitting(pipe_config, conn_state);
+ else
+ ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
/*
* XXX: It would be nice to support lower refresh rates on the
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 481187223101..66711e62fa71 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -281,7 +281,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *from = NULL, *to = NULL;
- WARN_ON(overlay->old_vma);
+ drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
if (overlay->vma)
from = intel_frontbuffer_get(overlay->vma->obj);
@@ -350,7 +350,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
- if (WARN_ON(!vma))
+ if (drm_WARN_ON(&overlay->i915->drm, !vma))
return;
intel_frontbuffer_flip_complete(overlay->i915,
@@ -396,7 +396,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs, flip_addr = overlay->flip_addr;
- WARN_ON(!overlay->active);
+ drm_WARN_ON(&overlay->i915->drm, !overlay->active);
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
@@ -1342,7 +1342,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!HAS_OVERLAY(dev_priv))
return;
- engine = dev_priv->engine[RCS0];
+ engine = dev_priv->gt.engine[RCS0];
if (!engine || !engine->kernel_context)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 276f43870802..3c5056dbf607 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -176,24 +176,23 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
}
/* adjusted_mode has been preset to be the panel's fixed mode */
-void
-intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode)
+int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int x = 0, y = 0, width = 0, height = 0;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int x, y, width, height;
/* Native modes don't need fitting */
- if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
- adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
- pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
- goto done;
+ if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ return 0;
- switch (fitting_mode) {
+ switch (conn_state->scaling_mode) {
case DRM_MODE_SCALE_CENTER:
- width = pipe_config->pipe_src_w;
- height = pipe_config->pipe_src_h;
+ width = crtc_state->pipe_src_w;
+ height = crtc_state->pipe_src_h;
x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
break;
@@ -202,18 +201,18 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
/* Scale but preserve the aspect ratio */
{
u32 scaled_width = adjusted_mode->crtc_hdisplay
- * pipe_config->pipe_src_h;
- u32 scaled_height = pipe_config->pipe_src_w
+ * crtc_state->pipe_src_h;
+ u32 scaled_height = crtc_state->pipe_src_w
* adjusted_mode->crtc_vdisplay;
if (scaled_width > scaled_height) { /* pillar */
- width = scaled_height / pipe_config->pipe_src_h;
+ width = scaled_height / crtc_state->pipe_src_h;
if (width & 1)
width++;
x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->crtc_vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
- height = scaled_width / pipe_config->pipe_src_w;
+ height = scaled_width / crtc_state->pipe_src_w;
if (height & 1)
height++;
y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
@@ -227,6 +226,10 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
}
break;
+ case DRM_MODE_SCALE_NONE:
+ WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w);
+ WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h);
+ /* fall through */
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->crtc_hdisplay;
@@ -234,14 +237,15 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
break;
default:
- WARN(1, "bad panel fit mode: %d\n", fitting_mode);
- return;
+ MISSING_CASE(conn_state->scaling_mode);
+ return -EINVAL;
}
-done:
- pipe_config->pch_pfit.pos = (x << 16) | y;
- pipe_config->pch_pfit.size = (width << 16) | height;
- pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ x, y, width, height);
+ crtc_state->pch_pfit.enabled = true;
+
+ return 0;
}
static void
@@ -287,7 +291,7 @@ centre_vertically(struct drm_display_mode *adjusted_mode,
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
}
-static inline u32 panel_fitter_scaling(u32 source, u32 target)
+static u32 panel_fitter_scaling(u32 source, u32 target)
{
/*
* Floating point operation is not supported. So the FACTOR
@@ -300,13 +304,14 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
-static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
+static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
u32 *pfit_control)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
- pipe_config->pipe_src_h;
- u32 scaled_height = pipe_config->pipe_src_w *
+ crtc_state->pipe_src_h;
+ u32 scaled_height = crtc_state->pipe_src_w *
adjusted_mode->crtc_vdisplay;
/* 965+ is easy, it does everything in hw */
@@ -316,18 +321,18 @@ static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
else if (scaled_width < scaled_height)
*pfit_control |= PFIT_ENABLE |
PFIT_SCALING_LETTER;
- else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w)
+ else if (adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w)
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
}
-static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
+static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
- pipe_config->pipe_src_h;
- u32 scaled_height = pipe_config->pipe_src_w *
+ crtc_state->pipe_src_h;
+ u32 scaled_height = crtc_state->pipe_src_w *
adjusted_mode->crtc_vdisplay;
u32 bits;
@@ -339,11 +344,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode,
scaled_height /
- pipe_config->pipe_src_h);
+ crtc_state->pipe_src_h);
*border = LVDS_BORDER_ENABLE;
- if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) {
- bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+ if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay) {
+ bits = panel_fitter_scaling(crtc_state->pipe_src_h,
adjusted_mode->crtc_vdisplay);
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -355,11 +360,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode,
scaled_width /
- pipe_config->pipe_src_w);
+ crtc_state->pipe_src_w);
*border = LVDS_BORDER_ENABLE;
- if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
- bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+ if (crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ bits = panel_fitter_scaling(crtc_state->pipe_src_w,
adjusted_mode->crtc_hdisplay);
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -377,35 +382,35 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
}
}
-void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode)
+int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
/* Native modes don't need fitting */
- if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
- adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
+ if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h)
goto out;
- switch (fitting_mode) {
+ switch (conn_state->scaling_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
- centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
- centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
+ centre_horizontally(adjusted_mode, crtc_state->pipe_src_w);
+ centre_vertically(adjusted_mode, crtc_state->pipe_src_h);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_GEN(dev_priv) >= 4)
- i965_scale_aspect(pipe_config, &pfit_control);
+ i965_scale_aspect(crtc_state, &pfit_control);
else
- i9xx_scale_aspect(pipe_config, &pfit_control,
+ i9xx_scale_aspect(crtc_state, &pfit_control,
&pfit_pgm_ratios, &border);
break;
case DRM_MODE_SCALE_FULLSCREEN:
@@ -413,8 +418,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
- if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
- pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay ||
+ crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
@@ -426,15 +431,14 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
}
break;
default:
- drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n",
- fitting_mode);
- return;
+ MISSING_CASE(conn_state->scaling_mode);
+ return -EINVAL;
}
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_GEN(dev_priv) >= 4)
- pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
+ pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY;
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
@@ -443,12 +447,14 @@ out:
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
+ if (INTEL_GEN(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- pipe_config->gmch_pfit.control = pfit_control;
- pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
- pipe_config->gmch_pfit.lvds_border_bits = border;
+ crtc_state->gmch_pfit.control = pfit_control;
+ crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
+ crtc_state->gmch_pfit.lvds_border_bits = border;
+
+ return 0;
}
/**
@@ -483,20 +489,10 @@ static u32 scale(u32 source_val,
return target_val;
}
-/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
-static inline u32 scale_user_to_hw(struct intel_connector *connector,
- u32 user_level, u32 user_max)
-{
- struct intel_panel *panel = &connector->panel;
-
- return scale(user_level, 0, user_max,
- panel->backlight.min, panel->backlight.max);
-}
-
/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
* to [hw_min..hw_max]. */
-static inline u32 clamp_user_to_hw(struct intel_connector *connector,
- u32 user_level, u32 user_max)
+static u32 clamp_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;
u32 hw_level;
@@ -508,8 +504,8 @@ static inline u32 clamp_user_to_hw(struct intel_connector *connector,
}
/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
-static inline u32 scale_hw_to_user(struct intel_connector *connector,
- u32 hw_level, u32 user_max)
+static u32 scale_hw_to_user(struct intel_connector *connector,
+ u32 hw_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;
@@ -684,9 +680,10 @@ static void
intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(connector, level);
panel->backlight.set(conn_state, level);
@@ -867,8 +864,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
* another client is not activated.
*/
if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg(&dev_priv->drm,
- "Skipping backlight disable on vga switch\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Skipping backlight disable on vga switch\n");
return;
}
@@ -1244,10 +1241,20 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_unlock(&dev_priv->backlight_lock);
- drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val);
+ drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
return val;
}
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static u32 scale_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(user_level, 0, user_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
/* set backlight brightness to level in range [0..max], scaling wrt hw min */
static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
u32 user_level, u32 user_max)
@@ -1335,6 +1342,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
int intel_backlight_device_register(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
@@ -1374,14 +1382,15 @@ int intel_backlight_device_register(struct intel_connector *connector)
&intel_backlight_device_ops, &props);
if (IS_ERR(panel->backlight.device)) {
- DRM_ERROR("Failed to register backlight: %ld\n",
- PTR_ERR(panel->backlight.device));
+ drm_err(&i915->drm, "Failed to register backlight: %ld\n",
+ PTR_ERR(panel->backlight.device));
panel->backlight.device = NULL;
return -ENODEV;
}
- DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
- connector->base.name);
+ drm_dbg_kms(&i915->drm,
+ "Connector %s backlight sysfs interface registered\n",
+ connector->base.name);
return 0;
}
@@ -1931,7 +1940,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return 0;
}
-void intel_panel_update_backlight(struct intel_encoder *encoder,
+void intel_panel_update_backlight(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index cedeea443336..968b95281cb4 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -25,19 +25,18 @@ int intel_panel_init(struct intel_panel *panel,
void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
-void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
-void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
+int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector,
enum pipe pipe);
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-void intel_panel_update_backlight(struct intel_encoder *encoder,
+void intel_panel_update_backlight(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index fd9b146e3aba..b7a2c102648a 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -30,6 +30,7 @@
#include "intel_display_types.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "intel_hdmi.h"
/**
* DOC: Panel Self Refresh (PSR/SRD)
@@ -137,41 +138,42 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, imr_reg, val);
}
-static void psr_event_print(u32 val, bool psr2_enabled)
+static void psr_event_print(struct drm_i915_private *i915,
+ u32 val, bool psr2_enabled)
{
- DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
+ drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
- DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
+ drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
- DRM_DEBUG_KMS("\tPSR2 disabled\n");
+ drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
- DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
+ drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
- DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
+ drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
if (val & PSR_EVENT_GRAPHICS_RESET)
- DRM_DEBUG_KMS("\tGraphics reset\n");
+ drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
if (val & PSR_EVENT_PCH_INTERRUPT)
- DRM_DEBUG_KMS("\tPCH interrupt\n");
+ drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
if (val & PSR_EVENT_MEMORY_UP)
- DRM_DEBUG_KMS("\tMemory up\n");
+ drm_dbg_kms(&i915->drm, "\tMemory up\n");
if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
- DRM_DEBUG_KMS("\tFront buffer modification\n");
+ drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
if (val & PSR_EVENT_WD_TIMER_EXPIRE)
- DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
+ drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
- DRM_DEBUG_KMS("\tPIPE registers updated\n");
+ drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
if (val & PSR_EVENT_REGISTER_UPDATE)
- DRM_DEBUG_KMS("\tRegister updated\n");
+ drm_dbg_kms(&i915->drm, "\tRegister updated\n");
if (val & PSR_EVENT_HDCP_ENABLE)
- DRM_DEBUG_KMS("\tHDCP enabled\n");
+ drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
- DRM_DEBUG_KMS("\tKVMR session enabled\n");
+ drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
if (val & PSR_EVENT_VBI_ENABLE)
- DRM_DEBUG_KMS("\tVBI enabled\n");
+ drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
if (val & PSR_EVENT_LPSP_MODE_EXIT)
- DRM_DEBUG_KMS("\tLPSP mode exited\n");
+ drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
- DRM_DEBUG_KMS("\tPSR disabled\n");
+ drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
}
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
@@ -209,7 +211,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
val);
- psr_event_print(val, psr2_enabled);
+ psr_event_print(dev_priv, val, psr2_enabled);
}
}
@@ -249,18 +251,21 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
+ drm_dbg_kms(&i915->drm,
+ "Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u16 val;
ssize_t r;
@@ -273,7 +278,8 @@ static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
if (r != 2)
- DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+ drm_dbg_kms(&i915->drm,
+ "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
/*
* Spec says that if the value read is 0 the default granularity should
@@ -352,39 +358,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
}
}
-static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct dp_sdp psr_vsc;
-
- if (dev_priv->psr.psr2_enabled) {
- /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- if (dev_priv->psr.colorimetry_support) {
- psr_vsc.sdp_header.HB2 = 0x5;
- psr_vsc.sdp_header.HB3 = 0x13;
- } else {
- psr_vsc.sdp_header.HB2 = 0x4;
- psr_vsc.sdp_header.HB3 = 0xe;
- }
- } else {
- /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
- }
-
- intel_dig_port->write_infoframe(&intel_dig_port->base,
- crtc_state,
- DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
-}
-
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -751,6 +724,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
if (intel_dp != dev_priv->psr.dp)
return;
+ if (!psr_global_enabled(dev_priv))
+ return;
/*
* HSW spec explicitly says PSR is tied to port A.
* BDW+ platforms have a instance of PSR registers per transcoder but
@@ -793,6 +768,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
}
static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -875,9 +851,12 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &intel_dig_port->base;
u32 val;
drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
@@ -916,7 +895,9 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
- intel_psr_setup_vsc(intel_dp, crtc_state);
+ intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
+ &dev_priv->psr.vsc);
+ intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
dev_priv->psr.enabled = true;
@@ -928,11 +909,13 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
* @crtc_state: new CRTC state
+ * @conn_state: new CONNECTOR state
*
* This function can only be called after the pipe is fully trained and enabled.
*/
void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -953,7 +936,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
goto unlock;
}
- intel_psr_enable_locked(dev_priv, crtc_state);
+ intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -1086,13 +1069,15 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
* intel_psr_update - Update PSR state
* @intel_dp: Intel DP
* @crtc_state: new CRTC state
+ * @conn_state: new CONNECTOR state
*
* This functions will update PSR states, disabling, enabling or switching PSR
* version when executing fastsets. For full modeset, intel_psr_disable() and
* intel_psr_enable() should be called instead.
*/
void intel_psr_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct i915_psr *psr = &dev_priv->psr;
@@ -1129,7 +1114,7 @@ void intel_psr_update(struct intel_dp *intel_dp,
intel_psr_disable_locked(intel_dp);
if (enable)
- intel_psr_enable_locked(dev_priv, crtc_state);
+ intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 274fc6bb6221..b4515186d5f4 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -17,11 +17,13 @@ struct intel_dp;
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
void intel_psr_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 637d8fe2f8c2..bc6c26818e15 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1430,7 +1430,8 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
#undef UPDATE_PROPERTY
}
-static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
+static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *intel_encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1727,7 +1728,8 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
SDVO_AUDIO_PRESENCE_DETECT);
}
-static void intel_disable_sdvo(struct intel_encoder *encoder,
+static void intel_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1775,20 +1777,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
}
}
-static void pch_disable_sdvo(struct intel_encoder *encoder,
+static void pch_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
}
-static void pch_post_disable_sdvo(struct intel_encoder *encoder,
+static void pch_post_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
+ intel_disable_sdvo(state, encoder, old_crtc_state, old_conn_state);
}
-static void intel_enable_sdvo(struct intel_encoder *encoder,
+static void intel_enable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1934,12 +1939,11 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
static enum intel_hotplug_state
intel_sdvo_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
intel_sdvo_enable_hotplug(encoder);
- return intel_encoder_hotplug(encoder, connector, irq_received);
+ return intel_encoder_hotplug(encoder, connector);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 33d886141138..0000ec7055f7 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -2503,6 +2503,7 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
+ DRM_FORMAT_XYUV8888,
};
static const u32 skl_planar_formats[] = {
@@ -2521,6 +2522,7 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
};
static const u32 glk_planar_formats[] = {
@@ -2539,6 +2541,7 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_P010,
DRM_FORMAT_P012,
DRM_FORMAT_P016,
@@ -2562,6 +2565,7 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
@@ -2589,6 +2593,7 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
@@ -2620,6 +2625,7 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
@@ -2790,6 +2796,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
@@ -2860,6 +2867,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 9b850c11aa78..b161c15baf86 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -34,6 +34,7 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
if (INTEL_INFO(i915)->display.has_modular_fia) {
modular_fia = intel_uncore_read(&i915->uncore,
PORT_TX_DFLEXDPSP(FIA1));
+ drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
modular_fia &= MODULAR_FIA_MASK;
} else {
modular_fia = 0;
@@ -52,6 +53,62 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
}
}
+static enum intel_display_power_domain
+tc_cold_get_power_domain(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (INTEL_GEN(i915) == 11)
+ return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
+ else
+ return POWER_DOMAIN_TC_COLD_OFF;
+}
+
+static intel_wakeref_t
+tc_cold_block(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum intel_display_power_domain domain;
+
+ if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
+ return 0;
+
+ domain = tc_cold_get_power_domain(dig_port);
+ return intel_display_power_get(i915, domain);
+}
+
+static void
+tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum intel_display_power_domain domain;
+
+ /*
+ * wakeref == -1, means some error happened saving save_depot_stack but
+ * power should still be put down and 0 is a invalid save_depot_stack
+ * id so can be used to skip it for non TC legacy ports.
+ */
+ if (wakeref == 0)
+ return;
+
+ domain = tc_cold_get_power_domain(dig_port);
+ intel_display_power_put_async(i915, domain, wakeref);
+}
+
+static void
+assert_tc_cold_blocked(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ bool enabled;
+
+ if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
+ return;
+
+ enabled = intel_display_power_is_enabled(i915,
+ tc_cold_get_power_domain(dig_port));
+ drm_WARN_ON(&i915->drm, !enabled);
+}
+
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -62,6 +119,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
+ assert_tc_cold_blocked(dig_port);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -77,6 +135,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
+ assert_tc_cold_blocked(dig_port);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -91,6 +150,8 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
if (dig_port->tc_mode != TC_PORT_DP_ALT)
return 4;
+ assert_tc_cold_blocked(dig_port);
+
lane_mask = 0;
with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
lane_mask = intel_tc_port_get_lane_mask(dig_port);
@@ -123,6 +184,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
drm_WARN_ON(&i915->drm,
lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+ assert_tc_cold_blocked(dig_port);
+
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
@@ -152,6 +215,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
u32 live_status_mask)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 valid_hpd_mask;
if (dig_port->tc_legacy_port)
@@ -164,8 +228,9 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
return;
/* If live status mismatches the VBT flag, trust the live status. */
- DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
- dig_port->tc_port_name, live_status_mask);
+ drm_err(&i915->drm,
+ "Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
+ dig_port->tc_port_name, live_status_mask);
dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}
@@ -173,8 +238,8 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
+ u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
u32 mask = 0;
u32 val;
@@ -193,7 +258,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_DP_ALT);
- if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
+ if (intel_uncore_read(uncore, SDEISR) & isr_bit)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
@@ -233,8 +298,7 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
- dig_port->tc_port_name,
- enableddisabled(enable));
+ dig_port->tc_port_name, enableddisabled(enable));
return false;
}
@@ -286,11 +350,12 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
int required_lanes)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int max_lanes;
if (!icl_tc_phy_status_complete(dig_port)) {
- DRM_DEBUG_KMS("Port %s: PHY not ready\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ dig_port->tc_port_name);
goto out_set_tbt_alt_mode;
}
@@ -311,15 +376,16 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
* became disconnected. Not necessary for legacy mode.
*/
if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
- DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+ dig_port->tc_port_name);
goto out_set_safe_mode;
}
if (max_lanes < required_lanes) {
- DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
- dig_port->tc_port_name,
- max_lanes, required_lanes);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY max lanes %d < required lanes %d\n",
+ dig_port->tc_port_name,
+ max_lanes, required_lanes);
goto out_set_safe_mode;
}
@@ -357,15 +423,17 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
if (!icl_tc_phy_status_complete(dig_port)) {
- DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
+ dig_port->tc_port_name);
return dig_port->tc_mode == TC_PORT_TBT_ALT;
}
if (icl_tc_phy_is_in_safe_mode(dig_port)) {
- DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY still in safe mode\n",
+ dig_port->tc_port_name);
return false;
}
@@ -415,9 +483,14 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
- drm_WARN_ON(&i915->drm,
- intel_display_power_is_enabled(i915,
- intel_aux_power_domain(dig_port)));
+ if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) {
+ enum intel_display_power_domain aux_domain;
+ bool aux_powered;
+
+ aux_domain = intel_aux_power_domain(dig_port);
+ aux_powered = intel_display_power_is_enabled(i915, aux_domain);
+ drm_WARN_ON(&i915->drm, aux_powered);
+ }
icl_tc_phy_disconnect(dig_port);
icl_tc_phy_connect(dig_port, required_lanes);
@@ -438,10 +511,13 @@ intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
+ intel_wakeref_t tc_cold_wref;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
+ tc_cold_wref = tc_cold_block(dig_port);
dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
if (dig_port->dp.is_mst)
@@ -451,8 +527,9 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
- DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
- dig_port->tc_port_name, active_links);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY disconnected with %d active link(s)\n",
+ dig_port->tc_port_name, active_links);
intel_tc_port_link_init_refcount(dig_port, active_links);
goto out;
@@ -462,10 +539,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
icl_tc_phy_connect(dig_port, 1);
out:
- DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
+ drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+ tc_cold_unblock(dig_port, tc_cold_wref);
mutex_unlock(&dig_port->tc_lock);
}
@@ -484,13 +562,19 @@ static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
-bool intel_tc_port_connected(struct intel_digital_port *dig_port)
+bool intel_tc_port_connected(struct intel_encoder *encoder)
{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_connected;
+ intel_wakeref_t tc_cold_wref;
intel_tc_port_lock(dig_port);
+ tc_cold_wref = tc_cold_block(dig_port);
+
is_connected = tc_port_live_status_mask(dig_port) &
BIT(dig_port->tc_mode);
+
+ tc_cold_unblock(dig_port, tc_cold_wref);
intel_tc_port_unlock(dig_port);
return is_connected;
@@ -506,9 +590,16 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
mutex_lock(&dig_port->tc_lock);
- if (!dig_port->tc_link_refcount &&
- intel_tc_port_needs_reset(dig_port))
- intel_tc_port_reset_mode(dig_port, required_lanes);
+ if (!dig_port->tc_link_refcount) {
+ intel_wakeref_t tc_cold_wref;
+
+ tc_cold_wref = tc_cold_block(dig_port);
+
+ if (intel_tc_port_needs_reset(dig_port))
+ intel_tc_port_reset_mode(dig_port, required_lanes);
+
+ tc_cold_unblock(dig_port, tc_cold_wref);
+ }
drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
dig_port->tc_lock_wakeref = wakeref;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 463f1b3c836f..b619e4736f85 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -10,8 +10,9 @@
#include <linux/types.h>
struct intel_digital_port;
+struct intel_encoder;
-bool intel_tc_port_connected(struct intel_digital_port *dig_port);
+bool intel_tc_port_connected(struct intel_encoder *encoder);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index d2e3a3a323e9..fbe12aad7d58 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -914,7 +914,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
}
static void
-intel_enable_tv(struct intel_encoder *encoder,
+intel_enable_tv(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -930,7 +931,8 @@ intel_enable_tv(struct intel_encoder *encoder,
}
static void
-intel_disable_tv(struct intel_encoder *encoder,
+intel_disable_tv(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -1414,7 +1416,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
(color_conversion->bv << 16) | color_conversion->av);
}
-static void intel_tv_pre_enable(struct intel_encoder *encoder,
+static void intel_tv_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1698,13 +1701,13 @@ intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, connector->name,
- force);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name, force);
if (force) {
struct intel_load_detect_pipe tmp;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 05c7cbe32eb4..aef7fe932d1a 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -462,7 +462,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* defs->child_dev_size;
*/
- u8 devices[0];
+ u8 devices[];
} __packed;
/*
@@ -839,7 +839,7 @@ struct bdb_mipi_config {
struct bdb_mipi_sequence {
u8 version;
- u8 data[0]; /* up to 6 variable length blocks */
+ u8 data[]; /* up to 6 variable length blocks */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f4c362dc6e15..f582ab52f0b0 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -267,7 +267,6 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
@@ -279,11 +278,11 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
if (HAS_GMCH(dev_priv))
- intel_gmch_panel_fitting(crtc, pipe_config,
- conn_state->scaling_mode);
+ ret = intel_gmch_panel_fitting(pipe_config, conn_state);
else
- intel_pch_panel_fitting(crtc, pipe_config,
- conn_state->scaling_mode);
+ ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -759,7 +758,8 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder);
* DSI port enable has to be done before pipe and plane enable, so we do it in
* the pre_enable hook instead of the enable hook.
*/
-static void intel_dsi_pre_enable(struct intel_encoder *encoder,
+static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -858,11 +858,12 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
-static void bxt_dsi_enable(struct intel_encoder *encoder,
+static void bxt_dsi_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
intel_crtc_vblank_on(crtc_state);
}
@@ -871,14 +872,16 @@ static void bxt_dsi_enable(struct intel_encoder *encoder,
* DSI port disable has to be done after pipe and plane disable, so we do it in
* the post_disable hook.
*/
-static void intel_dsi_disable(struct intel_encoder *encoder,
+static void intel_dsi_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_panel_disable_backlight(old_conn_state);
@@ -906,7 +909,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
vlv_dsi_clear_device_ready(encoder);
}
-static void intel_dsi_post_disable(struct intel_encoder *encoder,
+static void intel_dsi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 34be4c0ee7c5..bc0223716906 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -108,7 +108,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain,
obj->base.resv, NULL, true,
- I915_FENCE_TIMEOUT,
+ i915_fence_timeout(to_i915(obj->base.dev)),
I915_FENCE_GFP);
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
dma_fence_work_commit(&clflush->base);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 0598e5382a1d..d3a86a4d5c04 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -6,7 +6,6 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
@@ -289,8 +288,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
i915_gem_object_lock(obj);
err = i915_sw_fence_await_reservation(&work->wait,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
+ obj->base.resv, NULL, true, 0,
I915_FENCE_GFP);
if (err < 0) {
dma_fence_set_error(&work->dma, err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 68326ad3b2e0..900ea8b7fc8f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -130,9 +130,7 @@ static void lut_close(struct i915_gem_context *ctx)
if (&lut->obj_link != &obj->lut_list) {
i915_lut_handle_free(lut);
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
- if (atomic_dec_and_test(&vma->open_count) &&
- !i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
+ i915_vma_close(vma);
i915_gem_object_put(obj);
}
@@ -570,23 +568,19 @@ static void engines_idle_release(struct i915_gem_context *ctx,
engines->ctx = i915_gem_context_get(ctx);
for_each_gem_engine(ce, engines, it) {
- struct dma_fence *fence;
- int err = 0;
+ int err;
/* serialises with execbuf */
set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
if (!intel_context_pin_if_active(ce))
continue;
- fence = i915_active_fence_get(&ce->timeline->last_request);
- if (fence) {
- err = i915_sw_fence_await_dma_fence(&engines->fence,
- fence, 0,
- GFP_KERNEL);
- dma_fence_put(fence);
- }
+ /* Wait until context is finally scheduled out and retired */
+ err = i915_sw_fence_await_active(&engines->fence,
+ &ce->active,
+ I915_ACTIVE_AWAIT_BARRIER);
intel_context_unpin(ce);
- if (err < 0)
+ if (err)
goto kill;
}
@@ -757,21 +751,46 @@ err_free:
return ERR_PTR(err);
}
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *engines;
+
+ rcu_read_lock();
+ do {
+ engines = rcu_dereference(ctx->engines);
+ GEM_BUG_ON(!engines);
+
+ if (unlikely(!i915_sw_fence_await(&engines->fence)))
+ continue;
+
+ if (likely(engines == rcu_access_pointer(ctx->engines)))
+ break;
+
+ i915_sw_fence_complete(&engines->fence);
+ } while (1);
+ rcu_read_unlock();
+
+ return engines;
+}
+
static int
context_apply_all(struct i915_gem_context *ctx,
int (*fn)(struct intel_context *ce, void *data),
void *data)
{
struct i915_gem_engines_iter it;
+ struct i915_gem_engines *e;
struct intel_context *ce;
int err = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ e = __context_engines_await(ctx);
+ for_each_gem_engine(ce, e, it) {
err = fn(ce, data);
if (err)
break;
}
- i915_gem_context_unlock_engines(ctx);
+ i915_sw_fence_complete(&e->fence);
return err;
}
@@ -786,11 +805,13 @@ static int __apply_ppgtt(struct intel_context *ce, void *vm)
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
- struct i915_address_space *old = i915_gem_context_vm(ctx);
+ struct i915_address_space *old;
+ old = rcu_replace_pointer(ctx->vm,
+ i915_vm_open(vm),
+ lockdep_is_held(&ctx->mutex));
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
- rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
context_apply_all(ctx, __apply_ppgtt, vm);
return old;
@@ -1069,30 +1090,6 @@ static void cb_retire(struct i915_active *base)
kfree(cb);
}
-static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx)
-{
- struct i915_gem_engines *engines;
-
- rcu_read_lock();
- do {
- engines = rcu_dereference(ctx->engines);
- if (unlikely(!engines))
- break;
-
- if (unlikely(!i915_sw_fence_await(&engines->fence)))
- continue;
-
- if (likely(engines == rcu_access_pointer(ctx->engines)))
- break;
-
- i915_sw_fence_complete(&engines->fence);
- } while (1);
- rcu_read_unlock();
-
- return engines;
-}
-
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
@@ -1401,10 +1398,10 @@ static int get_ringsize(struct i915_gem_context *ctx,
return 0;
}
-static int
-user_to_context_sseu(struct drm_i915_private *i915,
- const struct drm_i915_gem_context_param_sseu *user,
- struct intel_sseu *context)
+int
+i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+ const struct drm_i915_gem_context_param_sseu *user,
+ struct intel_sseu *context)
{
const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
@@ -1539,7 +1536,7 @@ static int set_sseu(struct i915_gem_context *ctx,
goto out_ce;
}
- ret = user_to_context_sseu(i915, &user_sseu, &sseu);
+ ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
if (ret)
goto out_ce;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index f1d884d304bd..3702b2fb27ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -225,4 +225,8 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
+int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+ const struct drm_i915_gem_context_param_sseu *user,
+ struct intel_sseu *context);
+
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 0cc40e77bbd2..7f76fc68f498 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -368,8 +368,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- if (!atomic_read(&obj->bind_count))
+ if (list_empty(&obj->vma.list))
return;
mutex_lock(&i915->ggtt.vm.mutex);
@@ -400,12 +399,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj = vma->obj;
-
- assert_object_held(obj);
-
/* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(obj);
+ i915_gem_object_bump_inactive_ggtt(vma->obj);
i915_vma_unpin(vma);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b7440f06c5e2..3ce185670ca4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -15,8 +15,8 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
-#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
@@ -40,6 +40,11 @@ struct eb_vma {
u32 handle;
};
+struct eb_vma_array {
+ struct kref kref;
+ struct eb_vma vma[];
+};
+
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
@@ -52,7 +57,6 @@ enum {
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
-#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
#define __EXEC_INTERNAL_FLAGS (~0u << 31)
@@ -264,7 +268,9 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
+ struct i915_vma *target;
struct i915_request *rq;
+ struct i915_vma *rq_vma;
u32 *rq_cmd;
unsigned int rq_size;
} reloc_cache;
@@ -283,6 +289,7 @@ struct i915_execbuffer {
*/
int lut_size;
struct hlist_head *buckets; /** ht for relocation handles */
+ struct eb_vma_array *array;
};
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
@@ -292,8 +299,62 @@ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
eb->args->batch_len);
}
+static struct eb_vma_array *eb_vma_array_create(unsigned int count)
+{
+ struct eb_vma_array *arr;
+
+ arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN);
+ if (!arr)
+ return NULL;
+
+ kref_init(&arr->kref);
+ arr->vma[0].vma = NULL;
+
+ return arr;
+}
+
+static inline void eb_unreserve_vma(struct eb_vma *ev)
+{
+ struct i915_vma *vma = ev->vma;
+
+ if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
+ __i915_vma_unpin_fence(vma);
+
+ if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+ __i915_vma_unpin(vma);
+
+ ev->flags &= ~(__EXEC_OBJECT_HAS_PIN |
+ __EXEC_OBJECT_HAS_FENCE);
+}
+
+static void eb_vma_array_destroy(struct kref *kref)
+{
+ struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref);
+ struct eb_vma *ev = arr->vma;
+
+ while (ev->vma) {
+ eb_unreserve_vma(ev);
+ i915_vma_put(ev->vma);
+ ev++;
+ }
+
+ kvfree(arr);
+}
+
+static void eb_vma_array_put(struct eb_vma_array *arr)
+{
+ kref_put(&arr->kref, eb_vma_array_destroy);
+}
+
static int eb_create(struct i915_execbuffer *eb)
{
+ /* Allocate an extra slot for use by the command parser + sentinel */
+ eb->array = eb_vma_array_create(eb->buffer_count + 2);
+ if (!eb->array)
+ return -ENOMEM;
+
+ eb->vma = eb->array->vma;
+
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
unsigned int size = 1 + ilog2(eb->buffer_count);
@@ -327,8 +388,10 @@ static int eb_create(struct i915_execbuffer *eb)
break;
} while (--size);
- if (unlikely(!size))
+ if (unlikely(!size)) {
+ eb_vma_array_put(eb->array);
return -ENOMEM;
+ }
eb->lut_size = size;
} else {
@@ -368,6 +431,32 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return false;
}
+static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
+ unsigned int exec_flags)
+{
+ u64 pin_flags = 0;
+
+ if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
+ pin_flags |= PIN_GLOBAL;
+
+ /*
+ * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
+ * limit address to the first 4GBs for unflagged objects.
+ */
+ if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
+ pin_flags |= PIN_ZONE_4G;
+
+ if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
+ pin_flags |= PIN_MAPPABLE;
+
+ if (exec_flags & EXEC_OBJECT_PINNED)
+ pin_flags |= entry->offset | PIN_OFFSET_FIXED;
+ else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
+ pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+ return pin_flags;
+}
+
static inline bool
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
@@ -385,8 +474,19 @@ eb_pin_vma(struct i915_execbuffer *eb,
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
- if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
- return false;
+ /* Attempt to reuse the current location if available */
+ if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) {
+ if (entry->flags & EXEC_OBJECT_PINNED)
+ return false;
+
+ /* Failing that pick any _free_ space if suitable */
+ if (unlikely(i915_vma_pin(vma,
+ entry->pad_to_size,
+ entry->alignment,
+ eb_pin_flags(entry, ev->flags) |
+ PIN_USER | PIN_NOEVICT)))
+ return false;
+ }
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_pin_fence(vma))) {
@@ -402,26 +502,6 @@ eb_pin_vma(struct i915_execbuffer *eb,
return !eb_vma_misplaced(entry, vma, ev->flags);
}
-static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
-{
- GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
-
- if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
- __i915_vma_unpin_fence(vma);
-
- __i915_vma_unpin(vma);
-}
-
-static inline void
-eb_unreserve_vma(struct eb_vma *ev)
-{
- if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
- return;
-
- __eb_unreserve_vma(ev->vma, ev->flags);
- ev->flags &= ~__EXEC_OBJECT_RESERVED;
-}
-
static int
eb_validate_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry,
@@ -481,7 +561,7 @@ eb_add_vma(struct i915_execbuffer *eb,
GEM_BUG_ON(i915_vma_is_closed(vma));
- ev->vma = i915_vma_get(vma);
+ ev->vma = vma;
ev->exec = entry;
ev->flags = entry->flags;
@@ -547,28 +627,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
u64 pin_flags)
{
struct drm_i915_gem_exec_object2 *entry = ev->exec;
- unsigned int exec_flags = ev->flags;
struct i915_vma *vma = ev->vma;
int err;
- if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
- pin_flags |= PIN_GLOBAL;
-
- /*
- * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
- * limit address to the first 4GBs for unflagged objects.
- */
- if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
- pin_flags |= PIN_ZONE_4G;
-
- if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
- pin_flags |= PIN_MAPPABLE;
-
- if (exec_flags & EXEC_OBJECT_PINNED)
- pin_flags |= entry->offset | PIN_OFFSET_FIXED;
- else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
- pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
-
if (drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(entry, vma, ev->flags)) {
err = i915_vma_unbind(vma);
@@ -578,7 +639,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
err = i915_vma_pin(vma,
entry->pad_to_size, entry->alignment,
- pin_flags);
+ eb_pin_flags(entry, ev->flags) | pin_flags);
if (err)
return err;
@@ -587,7 +648,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
eb->args->flags |= __EXEC_HAS_RELOC;
}
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_pin_fence(vma);
if (unlikely(err)) {
i915_vma_unpin(vma);
@@ -595,10 +656,10 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
}
if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+ ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
- ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ ev->flags |= __EXEC_OBJECT_HAS_PIN;
GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
return 0;
@@ -728,77 +789,117 @@ static int eb_select_context(struct i915_execbuffer *eb)
return 0;
}
-static int eb_lookup_vmas(struct i915_execbuffer *eb)
+static int __eb_add_lut(struct i915_execbuffer *eb,
+ u32 handle, struct i915_vma *vma)
{
- struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
- struct drm_i915_gem_object *obj;
- unsigned int i, batch;
+ struct i915_gem_context *ctx = eb->gem_context;
+ struct i915_lut_handle *lut;
int err;
- if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
- return -ENOENT;
+ lut = i915_lut_handle_alloc();
+ if (unlikely(!lut))
+ return -ENOMEM;
- INIT_LIST_HEAD(&eb->relocs);
- INIT_LIST_HEAD(&eb->unbound);
+ i915_vma_get(vma);
+ if (!atomic_fetch_inc(&vma->open_count))
+ i915_vma_reopen(vma);
+ lut->handle = handle;
+ lut->ctx = ctx;
+
+ /* Check that the context hasn't been closed in the meantime */
+ err = -EINTR;
+ if (!mutex_lock_interruptible(&ctx->mutex)) {
+ err = -ENOENT;
+ if (likely(!i915_gem_context_is_closed(ctx)))
+ err = radix_tree_insert(&ctx->handles_vma, handle, vma);
+ if (err == 0) { /* And nor has this handle */
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ i915_gem_object_lock(obj);
+ if (idr_find(&eb->file->object_idr, handle) == obj) {
+ list_add(&lut->obj_link, &obj->lut_list);
+ } else {
+ radix_tree_delete(&ctx->handles_vma, handle);
+ err = -ENOENT;
+ }
+ i915_gem_object_unlock(obj);
+ }
+ mutex_unlock(&ctx->mutex);
+ }
+ if (unlikely(err))
+ goto err;
- batch = eb_batch_index(eb);
+ return 0;
- for (i = 0; i < eb->buffer_count; i++) {
- u32 handle = eb->exec[i].handle;
- struct i915_lut_handle *lut;
+err:
+ i915_vma_close(vma);
+ i915_vma_put(vma);
+ i915_lut_handle_free(lut);
+ return err;
+}
+
+static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
+{
+ do {
+ struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ int err;
- vma = radix_tree_lookup(handles_vma, handle);
+ rcu_read_lock();
+ vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
if (likely(vma))
- goto add_vma;
+ vma = i915_vma_tryget(vma);
+ rcu_read_unlock();
+ if (likely(vma))
+ return vma;
obj = i915_gem_object_lookup(eb->file, handle);
- if (unlikely(!obj)) {
- err = -ENOENT;
- goto err_vma;
- }
+ if (unlikely(!obj))
+ return ERR_PTR(-ENOENT);
vma = i915_vma_instance(obj, eb->context->vm, NULL);
if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
+ i915_gem_object_put(obj);
+ return vma;
}
- lut = i915_lut_handle_alloc();
- if (unlikely(!lut)) {
- err = -ENOMEM;
- goto err_obj;
- }
+ err = __eb_add_lut(eb, handle, vma);
+ if (likely(!err))
+ return vma;
- err = radix_tree_insert(handles_vma, handle, vma);
- if (unlikely(err)) {
- i915_lut_handle_free(lut);
- goto err_obj;
- }
+ i915_gem_object_put(obj);
+ if (err != -EEXIST)
+ return ERR_PTR(err);
+ } while (1);
+}
- /* transfer ref to lut */
- if (!atomic_fetch_inc(&vma->open_count))
- i915_vma_reopen(vma);
- lut->handle = handle;
- lut->ctx = eb->gem_context;
+static int eb_lookup_vmas(struct i915_execbuffer *eb)
+{
+ unsigned int batch = eb_batch_index(eb);
+ unsigned int i;
+ int err = 0;
- i915_gem_object_lock(obj);
- list_add(&lut->obj_link, &obj->lut_list);
- i915_gem_object_unlock(obj);
+ INIT_LIST_HEAD(&eb->relocs);
+ INIT_LIST_HEAD(&eb->unbound);
+
+ for (i = 0; i < eb->buffer_count; i++) {
+ struct i915_vma *vma;
+
+ vma = eb_lookup_vma(eb, eb->exec[i].handle);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ break;
+ }
-add_vma:
err = eb_validate_vma(eb, &eb->exec[i], vma);
- if (unlikely(err))
- goto err_vma;
+ if (unlikely(err)) {
+ i915_vma_put(vma);
+ break;
+ }
eb_add_vma(eb, i, batch, vma);
}
- return 0;
-
-err_obj:
- i915_gem_object_put(obj);
-err_vma:
eb->vma[i].vma = NULL;
return err;
}
@@ -823,31 +924,13 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
}
}
-static void eb_release_vmas(const struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- struct eb_vma *ev = &eb->vma[i];
- struct i915_vma *vma = ev->vma;
-
- if (!vma)
- break;
-
- eb->vma[i].vma = NULL;
-
- if (ev->flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, ev->flags);
-
- i915_vma_put(vma);
- }
-}
-
static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
+ if (eb->array)
+ eb_vma_array_put(eb->array);
+
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -872,7 +955,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.flags = 0;
cache->rq = NULL;
- cache->rq_size = 0;
+ cache->target = NULL;
}
static inline void *unmask_page(unsigned long p)
@@ -894,29 +977,122 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
return &i915->ggtt;
}
-static void reloc_gpu_flush(struct reloc_cache *cache)
+#define RELOC_TAIL 4
+
+static int reloc_gpu_chain(struct reloc_cache *cache)
+{
+ struct intel_gt_buffer_pool_node *pool;
+ struct i915_request *rq = cache->rq;
+ struct i915_vma *batch;
+ u32 *cmd;
+ int err;
+
+ pool = intel_gt_get_buffer_pool(rq->engine->gt, PAGE_SIZE);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ batch = i915_vma_instance(pool->obj, rq->context->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_pool;
+ }
+
+ err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
+ if (err)
+ goto out_pool;
+
+ GEM_BUG_ON(cache->rq_size + RELOC_TAIL > PAGE_SIZE / sizeof(u32));
+ cmd = cache->rq_cmd + cache->rq_size;
+ *cmd++ = MI_ARB_CHECK;
+ if (cache->gen >= 8)
+ *cmd++ = MI_BATCH_BUFFER_START_GEN8;
+ else if (cache->gen >= 6)
+ *cmd++ = MI_BATCH_BUFFER_START;
+ else
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cmd++ = lower_32_bits(batch->node.start);
+ *cmd++ = upper_32_bits(batch->node.start); /* Always 0 for gen<8 */
+ i915_gem_object_flush_map(cache->rq_vma->obj);
+ i915_gem_object_unpin_map(cache->rq_vma->obj);
+ cache->rq_vma = NULL;
+
+ err = intel_gt_buffer_pool_mark_active(pool, rq);
+ if (err == 0) {
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ }
+ i915_vma_unpin(batch);
+ if (err)
+ goto out_pool;
+
+ cmd = i915_gem_object_pin_map(batch->obj,
+ cache->has_llc ?
+ I915_MAP_FORCE_WB :
+ I915_MAP_FORCE_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_pool;
+ }
+
+ /* Return with batch mapping (cmd) still pinned */
+ cache->rq_cmd = cmd;
+ cache->rq_size = 0;
+ cache->rq_vma = batch;
+
+out_pool:
+ intel_gt_buffer_pool_put(pool);
+ return err;
+}
+
+static unsigned int reloc_bb_flags(const struct reloc_cache *cache)
{
- struct drm_i915_gem_object *obj = cache->rq->batch->obj;
+ return cache->gen > 5 ? 0 : I915_DISPATCH_SECURE;
+}
+
+static int reloc_gpu_flush(struct reloc_cache *cache)
+{
+ struct i915_request *rq;
+ int err;
- GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
- cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+ rq = fetch_and_zero(&cache->rq);
+ if (!rq)
+ return 0;
- __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
- i915_gem_object_unpin_map(obj);
+ if (cache->rq_vma) {
+ struct drm_i915_gem_object *obj = cache->rq_vma->obj;
- intel_gt_chipset_flush(cache->rq->engine->gt);
+ GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
+ cache->rq_cmd[cache->rq_size++] = MI_BATCH_BUFFER_END;
- i915_request_add(cache->rq);
- cache->rq = NULL;
+ __i915_gem_object_flush_map(obj,
+ 0, sizeof(u32) * cache->rq_size);
+ i915_gem_object_unpin_map(obj);
+ }
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ rq->batch->node.start,
+ PAGE_SIZE,
+ reloc_bb_flags(cache));
+ if (err)
+ i915_request_set_error_once(rq, err);
+
+ intel_gt_chipset_flush(rq->engine->gt);
+ i915_request_add(rq);
+
+ return err;
}
static void reloc_cache_reset(struct reloc_cache *cache)
{
void *vaddr;
- if (cache->rq)
- reloc_gpu_flush(cache);
-
if (!cache->vaddr)
return;
@@ -1109,17 +1285,17 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
}
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+ struct intel_engine_cs *engine,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
- pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
+ pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -1132,7 +1308,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto out_pool;
}
- batch = i915_vma_instance(pool->obj, vma->vm, NULL);
+ batch = i915_vma_instance(pool->obj, eb->context->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_unmap;
@@ -1142,26 +1318,32 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = i915_request_create(eb->context);
+ if (engine == eb->context->engine) {
+ rq = i915_request_create(eb->context);
+ } else {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err_unpin;
+ }
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(eb->context->vm);
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ }
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
- err = intel_engine_pool_mark_active(pool, rq);
+ err = intel_gt_buffer_pool_mark_active(pool, rq);
if (err)
goto err_request;
- err = reloc_move_to_gpu(rq, vma);
- if (err)
- goto err_request;
-
- err = eb->engine->emit_bb_start(rq,
- batch->node.start, PAGE_SIZE,
- cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
- if (err)
- goto skip_request;
-
i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
@@ -1176,6 +1358,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
cache->rq = rq;
cache->rq_cmd = cmd;
cache->rq_size = 0;
+ cache->rq_vma = batch;
/* Return with batch mapping (cmd) still pinned */
goto out_pool;
@@ -1189,124 +1372,206 @@ err_unpin:
err_unmap:
i915_gem_object_unpin_map(pool->obj);
out_pool:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
return err;
}
+static bool reloc_can_use_engine(const struct intel_engine_cs *engine)
+{
+ return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6);
+}
+
static u32 *reloc_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
u32 *cmd;
-
- if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
- reloc_gpu_flush(cache);
+ int err;
if (unlikely(!cache->rq)) {
- int err;
+ struct intel_engine_cs *engine = eb->engine;
- if (!intel_engine_can_store_dword(eb->engine))
- return ERR_PTR(-ENODEV);
+ if (!reloc_can_use_engine(engine)) {
+ engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
+ if (!engine)
+ return ERR_PTR(-ENODEV);
+ }
- err = __reloc_gpu_alloc(eb, vma, len);
+ err = __reloc_gpu_alloc(eb, engine, len);
if (unlikely(err))
return ERR_PTR(err);
}
+ if (vma != cache->target) {
+ err = reloc_move_to_gpu(cache->rq, vma);
+ if (unlikely(err)) {
+ i915_request_set_error_once(cache->rq, err);
+ return ERR_PTR(err);
+ }
+
+ cache->target = vma;
+ }
+
+ if (unlikely(cache->rq_size + len >
+ PAGE_SIZE / sizeof(u32) - RELOC_TAIL)) {
+ err = reloc_gpu_chain(cache);
+ if (unlikely(err)) {
+ i915_request_set_error_once(cache->rq, err);
+ return ERR_PTR(err);
+ }
+ }
+
+ GEM_BUG_ON(cache->rq_size + len >= PAGE_SIZE / sizeof(u32));
cmd = cache->rq_cmd + cache->rq_size;
cache->rq_size += len;
return cmd;
}
-static u64
-relocate_entry(struct i915_vma *vma,
- const struct drm_i915_gem_relocation_entry *reloc,
- struct i915_execbuffer *eb,
- const struct i915_vma *target)
+static inline bool use_reloc_gpu(struct i915_vma *vma)
{
- u64 offset = reloc->offset;
- u64 target_offset = relocation_target(reloc, target);
- bool wide = eb->reloc_cache.use_64bit_reloc;
- void *vaddr;
+ if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
+ return true;
- if (!eb->reloc_cache.vaddr &&
- (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !dma_resv_test_signaled_rcu(vma->resv, true))) {
- const unsigned int gen = eb->reloc_cache.gen;
- unsigned int len;
- u32 *batch;
- u64 addr;
-
- if (wide)
- len = offset & 7 ? 8 : 5;
- else if (gen >= 4)
- len = 4;
- else
- len = 3;
+ if (DBG_FORCE_RELOC)
+ return false;
- batch = reloc_gpu(eb, vma, len);
- if (IS_ERR(batch))
- goto repeat;
+ return !dma_resv_test_signaled_rcu(vma->resv, true);
+}
- addr = gen8_canonical_addr(vma->node.start + offset);
- if (wide) {
- if (offset & 7) {
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(addr);
- *batch++ = upper_32_bits(addr);
- *batch++ = lower_32_bits(target_offset);
-
- addr = gen8_canonical_addr(addr + 4);
-
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(addr);
- *batch++ = upper_32_bits(addr);
- *batch++ = upper_32_bits(target_offset);
- } else {
- *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
- *batch++ = lower_32_bits(addr);
- *batch++ = upper_32_bits(addr);
- *batch++ = lower_32_bits(target_offset);
- *batch++ = upper_32_bits(target_offset);
- }
- } else if (gen >= 6) {
+static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
+{
+ struct page *page;
+ unsigned long addr;
+
+ GEM_BUG_ON(vma->pages != vma->obj->mm.pages);
+
+ page = i915_gem_object_get_page(vma->obj, offset >> PAGE_SHIFT);
+ addr = PFN_PHYS(page_to_pfn(page));
+ GEM_BUG_ON(overflows_type(addr, u32)); /* expected dma32 */
+
+ return addr + offset_in_page(offset);
+}
+
+static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
+{
+ const unsigned int gen = eb->reloc_cache.gen;
+ unsigned int len;
+ u32 *batch;
+ u64 addr;
+
+ if (gen >= 8)
+ len = offset & 7 ? 8 : 5;
+ else if (gen >= 4)
+ len = 4;
+ else
+ len = 3;
+
+ batch = reloc_gpu(eb, vma, len);
+ if (IS_ERR(batch))
+ return false;
+
+ addr = gen8_canonical_addr(vma->node.start + offset);
+ if (gen >= 8) {
+ if (offset & 7) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = 0;
- *batch++ = addr;
- *batch++ = target_offset;
- } else if (gen >= 4) {
- *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *batch++ = 0;
- *batch++ = addr;
- *batch++ = target_offset;
+ *batch++ = lower_32_bits(addr);
+ *batch++ = upper_32_bits(addr);
+ *batch++ = lower_32_bits(target_addr);
+
+ addr = gen8_canonical_addr(addr + 4);
+
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(addr);
+ *batch++ = upper_32_bits(addr);
+ *batch++ = upper_32_bits(target_addr);
} else {
- *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *batch++ = addr;
- *batch++ = target_offset;
+ *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
+ *batch++ = lower_32_bits(addr);
+ *batch++ = upper_32_bits(addr);
+ *batch++ = lower_32_bits(target_addr);
+ *batch++ = upper_32_bits(target_addr);
}
-
- goto out;
+ } else if (gen >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = addr;
+ *batch++ = target_addr;
+ } else if (IS_I965G(eb->i915)) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = vma_phys_addr(vma, offset);
+ *batch++ = target_addr;
+ } else if (gen >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *batch++ = 0;
+ *batch++ = addr;
+ *batch++ = target_addr;
+ } else if (gen >= 3 &&
+ !(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) {
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = addr;
+ *batch++ = target_addr;
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM;
+ *batch++ = vma_phys_addr(vma, offset);
+ *batch++ = target_addr;
}
+ return true;
+}
+
+static bool reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
+{
+ if (eb->reloc_cache.vaddr)
+ return false;
+
+ if (!use_reloc_gpu(vma))
+ return false;
+
+ return __reloc_entry_gpu(eb, vma, offset, target_addr);
+}
+
+static u64
+relocate_entry(struct i915_vma *vma,
+ const struct drm_i915_gem_relocation_entry *reloc,
+ struct i915_execbuffer *eb,
+ const struct i915_vma *target)
+{
+ u64 target_addr = relocation_target(reloc, target);
+ u64 offset = reloc->offset;
+
+ if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
+ bool wide = eb->reloc_cache.use_64bit_reloc;
+ void *vaddr;
+
repeat:
- vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
+ vaddr = reloc_vaddr(vma->obj,
+ &eb->reloc_cache,
+ offset >> PAGE_SHIFT);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
- clflush_write32(vaddr + offset_in_page(offset),
- lower_32_bits(target_offset),
- eb->reloc_cache.vaddr);
+ GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
+ clflush_write32(vaddr + offset_in_page(offset),
+ lower_32_bits(target_addr),
+ eb->reloc_cache.vaddr);
- if (wide) {
- offset += sizeof(u32);
- target_offset >>= 32;
- wide = false;
- goto repeat;
+ if (wide) {
+ offset += sizeof(u32);
+ target_addr >>= 32;
+ wide = false;
+ goto repeat;
+ }
}
-out:
return target->node.start | UPDATE;
}
@@ -1411,12 +1676,11 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
- struct drm_i915_gem_relocation_entry __user *urelocs;
const struct drm_i915_gem_exec_object2 *entry = ev->exec;
- unsigned int remain;
+ struct drm_i915_gem_relocation_entry __user *urelocs =
+ u64_to_user_ptr(entry->relocs_ptr);
+ unsigned long remain = entry->relocation_count;
- urelocs = u64_to_user_ptr(entry->relocs_ptr);
- remain = entry->relocation_count;
if (unlikely(remain > N_RELOC(ULONG_MAX)))
return -EINVAL;
@@ -1425,13 +1689,13 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* to read. However, if the array is not writable the user loses
* the updated relocation values.
*/
- if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
+ if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs))))
return -EFAULT;
do {
struct drm_i915_gem_relocation_entry *r = stack;
unsigned int count =
- min_t(unsigned int, remain, ARRAY_SIZE(stack));
+ min_t(unsigned long, remain, ARRAY_SIZE(stack));
unsigned int copied;
/*
@@ -1494,9 +1758,7 @@ static int eb_relocate(struct i915_execbuffer *eb)
{
int err;
- mutex_lock(&eb->gem_context->mutex);
err = eb_lookup_vmas(eb);
- mutex_unlock(&eb->gem_context->mutex);
if (err)
return err;
@@ -1509,15 +1771,20 @@ static int eb_relocate(struct i915_execbuffer *eb)
/* The objects are in their final locations, apply the relocations. */
if (eb->args->flags & __EXEC_HAS_RELOC) {
struct eb_vma *ev;
+ int flush;
list_for_each_entry(ev, &eb->relocs, reloc_link) {
err = eb_relocate_vma(eb, ev);
if (err)
- return err;
+ break;
}
+
+ flush = reloc_gpu_flush(&eb->reloc_cache);
+ if (!err)
+ err = flush;
}
- return 0;
+ return err;
}
static int eb_move_to_gpu(struct i915_execbuffer *eb)
@@ -1597,19 +1864,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
err = i915_vma_move_to_active(vma, eb->request, flags);
i915_vma_unlock(vma);
-
- __eb_unreserve_vma(vma, flags);
- i915_vma_put(vma);
-
- ev->vma = NULL;
+ eb_unreserve_vma(ev);
}
ww_acquire_fini(&acquire);
+ eb_vma_array_put(fetch_and_zero(&eb->array));
+
if (unlikely(err))
goto err_skip;
- eb->exec = NULL;
-
/* Unconditionally flush any chipset caches (for streaming writes). */
intel_gt_chipset_flush(eb->engine->gt);
return 0;
@@ -1784,7 +2047,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
dma_resv_unlock(shadow->resv);
- dma_fence_work_commit(&pw->base);
+ dma_fence_work_commit_imm(&pw->base);
return 0;
err_batch_unlock:
@@ -1804,7 +2067,7 @@ err_free:
static int eb_parse(struct i915_execbuffer *eb)
{
struct drm_i915_private *i915 = eb->i915;
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_vma *shadow, *trampoline;
unsigned int len;
int err;
@@ -1827,7 +2090,7 @@ static int eb_parse(struct i915_execbuffer *eb)
len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
}
- pool = intel_engine_get_pool(eb->engine, len);
+ pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -1861,6 +2124,7 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
eb->batch = &eb->vma[eb->buffer_count++];
+ eb->vma[eb->buffer_count].vma = NULL;
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
@@ -1874,7 +2138,7 @@ err_trampoline:
err_shadow:
i915_vma_unpin(shadow);
err:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
return err;
}
@@ -2318,39 +2582,13 @@ static void eb_request_add(struct i915_execbuffer *eb)
/* Check that the context wasn't destroyed before submission */
if (likely(!intel_context_is_closed(eb->context))) {
attr = eb->gem_context->sched;
-
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
- if (list_empty(&rq->sched.signalers_list))
- attr.priority |= I915_PRIORITY_WAIT;
} else {
/* Serialise with context_close via the add_to_timeline */
i915_request_set_error_once(rq, -ENOENT);
__i915_request_skip(rq);
}
- local_bh_disable();
__i915_request_queue(rq, &attr);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
/* Try to clean up the client's timeline after submitting the request */
if (prev)
@@ -2369,7 +2607,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
- struct dma_fence *exec_fence = NULL;
struct sync_file *out_fence = NULL;
struct i915_vma *batch;
int out_fence_fd = -1;
@@ -2386,8 +2623,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
- eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
- eb.vma[0].vma = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2414,30 +2649,22 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (args->flags & I915_EXEC_IS_PINNED)
eb.batch_flags |= I915_DISPATCH_PINNED;
- if (args->flags & I915_EXEC_FENCE_IN) {
+#define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
+ if (args->flags & IN_FENCES) {
+ if ((args->flags & IN_FENCES) == IN_FENCES)
+ return -EINVAL;
+
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
if (!in_fence)
return -EINVAL;
}
-
- if (args->flags & I915_EXEC_FENCE_SUBMIT) {
- if (in_fence) {
- err = -EINVAL;
- goto err_in_fence;
- }
-
- exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
- if (!exec_fence) {
- err = -EINVAL;
- goto err_in_fence;
- }
- }
+#undef IN_FENCES
if (args->flags & I915_EXEC_FENCE_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
err = out_fence_fd;
- goto err_exec_fence;
+ goto err_in_fence;
}
}
@@ -2528,14 +2755,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
}
if (in_fence) {
- err = i915_request_await_dma_fence(eb.request, in_fence);
- if (err < 0)
- goto err_request;
- }
-
- if (exec_fence) {
- err = i915_request_await_execution(eb.request, exec_fence,
- eb.engine->bond_execute);
+ if (args->flags & I915_EXEC_FENCE_SUBMIT)
+ err = i915_request_await_execution(eb.request,
+ in_fence,
+ eb.engine->bond_execute);
+ else
+ err = i915_request_await_dma_fence(eb.request,
+ in_fence);
if (err < 0)
goto err_request;
}
@@ -2563,7 +2789,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
*/
eb.request->batch = batch;
if (batch->private)
- intel_engine_pool_mark_active(batch->private, eb.request);
+ intel_gt_buffer_pool_mark_active(batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb, batch);
@@ -2592,10 +2818,8 @@ err_batch_unpin:
i915_vma_unpin(batch);
err_parse:
if (batch->private)
- intel_engine_pool_put(batch->private);
+ intel_gt_buffer_pool_put(batch->private);
err_vma:
- if (eb.exec)
- eb_release_vmas(&eb);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
eb_unpin_engine(&eb);
@@ -2606,8 +2830,6 @@ err_destroy:
err_out_fence:
if (out_fence_fd != -1)
put_unused_fd(out_fence_fd);
-err_exec_fence:
- dma_fence_put(exec_fence);
err_in_fence:
dma_fence_put(in_fence);
return err;
@@ -2615,7 +2837,7 @@ err_in_fence:
static size_t eb_element_size(void)
{
- return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
+ return sizeof(struct drm_i915_gem_exec_object2);
}
static bool check_buffer_count(size_t count)
@@ -2671,7 +2893,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
/* Copy in the exec list from userland */
exec_list = kvmalloc_array(count, sizeof(*exec_list),
__GFP_NOWARN | GFP_KERNEL);
- exec2_list = kvmalloc_array(count + 1, eb_element_size(),
+ exec2_list = kvmalloc_array(count, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
drm_dbg(&i915->drm,
@@ -2749,8 +2971,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
if (err)
return err;
- /* Allocate an extra slot for use by the command parser */
- exec2_list = kvmalloc_array(count + 1, eb_element_size(),
+ exec2_list = kvmalloc_array(count, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
@@ -2794,7 +3015,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
* And this range already got effectively checked earlier
* when we did the "copy_from_user()" above.
*/
- if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
+ if (!user_write_access_begin(user_exec_list,
+ count * sizeof(*user_exec_list)))
goto end;
for (i = 0; i < args->buffer_count; i++) {
@@ -2808,7 +3030,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
end_user);
}
end_user:
- user_access_end();
+ user_write_access_end();
end:;
}
@@ -2817,3 +3039,7 @@ end:;
kvfree(exec2_list);
return err;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_execbuffer.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
index 2f6100ec2608..8ab842c80f99 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -72,8 +72,8 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
0, 0);
if (i915_sw_fence_await_reservation(&stub->chain,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
+ obj->base.resv, NULL, true,
+ i915_fence_timeout(to_i915(obj->base.dev)),
I915_FENCE_GFP) < 0)
goto err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b39c24dae64e..70f5f82da288 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -396,6 +396,38 @@ err:
return i915_error_to_vmf_fault(ret);
}
+static int
+vm_access(struct vm_area_struct *area, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+ void *vaddr;
+
+ if (i915_gem_object_is_readonly(obj) && write)
+ return -EACCES;
+
+ addr -= area->vm_start;
+ if (addr >= obj->base.size)
+ return -EINVAL;
+
+ /* As this is primarily for debugging, let's focus on simplicity */
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ if (write) {
+ memcpy(vaddr + addr, buf, len);
+ __i915_gem_object_flush_map(obj, addr, len);
+ } else {
+ memcpy(buf, vaddr + addr, len);
+ }
+
+ i915_gem_object_unpin_map(obj);
+
+ return len;
+}
+
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@@ -745,12 +777,14 @@ static void vm_close(struct vm_area_struct *vma)
static const struct vm_operations_struct vm_ops_gtt = {
.fault = vm_fault_gtt,
+ .access = vm_access,
.open = vm_open,
.close = vm_close,
};
static const struct vm_operations_struct vm_ops_cpu = {
.fault = vm_fault_cpu,
+ .access = vm_access,
.open = vm_open,
.close = vm_close,
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 5da9f9e534b9..99356c00c19e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -135,9 +135,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
if (vma) {
GEM_BUG_ON(vma->obj != obj);
GEM_BUG_ON(!atomic_read(&vma->open_count));
- if (atomic_dec_and_test(&vma->open_count) &&
- !i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
+ i915_vma_close(vma);
}
mutex_unlock(&ctx->mutex);
@@ -164,9 +162,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
- intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_mmap_offset *mmo, *mn;
@@ -206,7 +202,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
}
obj->mmo.offsets = RB_ROOT;
- GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
@@ -227,7 +222,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
cond_resched();
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index e00792158f13..f457d7130491 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -6,8 +6,8 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_ring.h"
#include "i915_gem_clflush.h"
#include "i915_gem_object_blt.h"
@@ -18,7 +18,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
{
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_vma *batch;
u64 offset;
u64 count;
@@ -33,7 +33,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_get_pool(ce->engine, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -78,10 +78,12 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
} while (rem);
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(ce->vm->gt);
+ i915_gem_object_flush_map(pool->obj);
i915_gem_object_unpin_map(pool->obj);
+ intel_gt_chipset_flush(ce->vm->gt);
+
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -96,7 +98,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
return batch;
out_put:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
@@ -114,13 +116,13 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
if (unlikely(err))
return err;
- return intel_engine_pool_mark_active(vma->private, rq);
+ return intel_gt_buffer_pool_mark_active(vma->private, rq);
}
void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
{
i915_vma_unpin(vma);
- intel_engine_pool_put(vma->private);
+ intel_gt_buffer_pool_put(vma->private);
intel_engine_pm_put(ce->engine);
}
@@ -213,7 +215,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
{
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_vma *batch;
u64 src_offset, dst_offset;
u64 count, rem;
@@ -228,7 +230,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_get_pool(ce->engine, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -289,10 +291,12 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
} while (rem);
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(ce->vm->gt);
+ i915_gem_object_flush_map(pool->obj);
i915_gem_object_unpin_map(pool->obj);
+ intel_gt_chipset_flush(ce->vm->gt);
+
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -307,7 +311,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
return batch;
out_put:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
index 243a43a87824..8bcd336a90dc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -10,7 +10,6 @@
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
#include "i915_vma.h"
struct drm_i915_gem_object;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index a0b10bcd8d8a..54ee658bb168 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -179,9 +179,6 @@ struct drm_i915_gem_object {
#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
#define STRIDE_MASK (~TILING_MASK)
- /** Count of VMA actually bound by this object */
- atomic_t bind_count;
-
struct {
/*
* Protects the pages and their use. Do not use directly, but
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 24f4cadea114..af9e48ee4a33 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -199,8 +199,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
- GEM_BUG_ON(atomic_read(&obj->bind_count));
-
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock(&obj->mm.lock);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
@@ -393,6 +391,7 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
offset, size, obj->base.size));
+ wmb(); /* let all previous writes be visible to coherent partners */
obj->mm.dirty = true;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 698e22420dc5..7fe9831aa9ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -10,8 +10,6 @@
#include <drm/drm.h> /* for drm_legacy.h! */
#include <drm/drm_cache.h>
-#include <drm/drm_legacy.h> /* for drm_pci.h! */
-#include <drm/drm_pci.h>
#include "gt/intel_gt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 03e5eb4c99d1..5b65ce738b16 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -27,18 +27,6 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return false;
/*
- * Only report true if by unbinding the object and putting its pages
- * we can actually make forward progress towards freeing physical
- * pages.
- *
- * If the pages are pinned for any other reason than being bound
- * to the GPU, simply unbinding from the GPU is not going to succeed
- * in releasing our pin count on the pages themselves.
- */
- if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
- return false;
-
- /*
* We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
@@ -54,6 +42,8 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
flags = 0;
if (shrink & I915_SHRINK_ACTIVE)
flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+ if (!(shrink & I915_SHRINK_BOUND))
+ flags = I915_GEM_OBJECT_UNBIND_TEST;
if (i915_gem_object_unbind(obj, flags) == 0)
__i915_gem_object_put_pages(obj);
@@ -194,10 +184,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
i915_gem_object_is_framebuffer(obj))
continue;
- if (!(shrink & I915_SHRINK_BOUND) &&
- atomic_read(&obj->bind_count))
- continue;
-
if (!can_release_pages(obj))
continue;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 5557dfa83a7b..dc250278bd2c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -381,14 +381,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
mutex_init(&i915->mm.stolen_lock);
if (intel_vgpu_active(i915)) {
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"iGVT-g active");
return 0;
}
if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"DMAR active");
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 7ffd7afeb7a5..33776b3f3fa5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -471,7 +471,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
down_read(&mm->mmap_sem);
locked = 1;
}
- ret = get_user_pages_remote
+ ret = pin_user_pages_remote
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
mutex_unlock(&obj->mm.lock);
- release_pages(pvec, pinned);
+ unpin_user_pages(pvec, pinned);
kvfree(pvec);
i915_gem_object_put(obj);
@@ -564,6 +564,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
struct sg_table *pages;
bool active;
int pinned;
+ unsigned int gup_flags = 0;
/* If userspace should engineer that these pages are replaced in
* the vma between us binding this page into the GTT and completion
@@ -598,11 +599,22 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_KERNEL |
__GFP_NORETRY |
__GFP_NOWARN);
- if (pvec) /* defer to worker if malloc fails */
- pinned = __get_user_pages_fast(obj->userptr.ptr,
- num_pages,
- !i915_gem_object_is_readonly(obj),
- pvec);
+ /*
+ * Using __get_user_pages_fast() with a read-only
+ * access is questionable. A read-only page may be
+ * COW-broken, and then this might end up giving
+ * the wrong side of the COW..
+ *
+ * We may or may not care.
+ */
+ if (pvec) {
+ /* defer to worker if malloc fails */
+ if (!i915_gem_object_is_readonly(obj))
+ gup_flags |= FOLL_WRITE;
+ pinned = pin_user_pages_fast_only(obj->userptr.ptr,
+ num_pages, gup_flags,
+ pvec);
+ }
}
active = false;
@@ -620,7 +632,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
__i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages))
- release_pages(pvec, pinned);
+ unpin_user_pages(pvec, pinned);
kvfree(pvec);
return PTR_ERR_OR_ZERO(pages);
@@ -675,7 +687,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
}
mark_page_accessed(page);
- put_page(page);
+ unpin_user_page(page);
}
obj->mm.dirty = false;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index fa16f2c3f3ac..2b46c6530da9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -88,8 +88,7 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops huge_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index d4f94ca9ae0d..c9988b6d5c88 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -421,7 +421,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- goto out_close;
+ goto out_put;
err = igt_check_page_sizes(vma);
@@ -432,8 +432,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
}
i915_vma_unpin(vma);
- i915_vma_close(vma);
-
i915_gem_object_put(obj);
if (err)
@@ -443,8 +441,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
goto out_device;
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_device:
@@ -492,7 +488,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- goto out_close;
+ goto out_put;
err = igt_check_page_sizes(vma);
if (err)
@@ -515,8 +511,6 @@ static int igt_mock_memory_region_huge_pages(void *arg)
}
i915_vma_unpin(vma);
- i915_vma_close(vma);
-
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -526,8 +520,6 @@ static int igt_mock_memory_region_huge_pages(void *arg)
out_unpin:
i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_region:
@@ -587,10 +579,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
}
err = i915_vma_pin(vma, 0, 0, flags);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
err = igt_check_page_sizes(vma);
@@ -603,10 +593,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_unpin(vma);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
/*
* Try all the other valid offsets until the next
@@ -615,16 +603,12 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
*/
for (offset = 4096; offset < page_size; offset += 4096) {
err = i915_vma_unbind(vma);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
err = i915_vma_pin(vma, 0, 0, flags | offset);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
err = igt_check_page_sizes(vma);
@@ -636,10 +620,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_unpin(vma);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
if (igt_timeout(end_time,
"%s timed out at offset %x with page-size %x\n",
@@ -647,8 +629,6 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
break;
}
- i915_vma_close(vma);
-
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
@@ -670,12 +650,6 @@ static void close_object_list(struct list_head *objects,
struct drm_i915_gem_object *obj, *on;
list_for_each_entry_safe(obj, on, objects, st_link) {
- struct i915_vma *vma;
-
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (!IS_ERR(vma))
- i915_vma_close(vma);
-
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
@@ -912,7 +886,7 @@ static int igt_mock_ppgtt_64K(void *arg)
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_vma_close;
+ goto out_object_unpin;
err = igt_check_page_sizes(vma);
if (err)
@@ -945,8 +919,6 @@ static int igt_mock_ppgtt_64K(void *arg)
}
i915_vma_unpin(vma);
- i915_vma_close(vma);
-
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
@@ -957,8 +929,6 @@ static int igt_mock_ppgtt_64K(void *arg)
out_vma_unpin:
i915_vma_unpin(vma);
-out_vma_close:
- i915_vma_close(vma);
out_object_unpin:
i915_gem_object_unpin_pages(obj);
out_object_put:
@@ -1070,7 +1040,7 @@ static int __igt_write_huge(struct intel_context *ce,
err = i915_vma_unbind(vma);
if (err)
- goto out_vma_close;
+ return err;
err = i915_vma_pin(vma, size, 0, flags | offset);
if (err) {
@@ -1081,7 +1051,7 @@ static int __igt_write_huge(struct intel_context *ce,
if (err == -ENOSPC && i915_is_ggtt(ce->vm))
err = 0;
- goto out_vma_close;
+ return err;
}
err = igt_check_page_sizes(vma);
@@ -1102,8 +1072,6 @@ static int __igt_write_huge(struct intel_context *ce,
out_vma_unpin:
i915_vma_unpin(vma);
-out_vma_close:
- __i915_vma_put(vma);
return err;
}
@@ -1490,7 +1458,7 @@ static int igt_ppgtt_pin_update(void *arg)
err = i915_vma_pin(vma, SZ_2M, 0, flags);
if (err)
- goto out_close;
+ goto out_put;
if (vma->page_sizes.sg < page_size) {
pr_info("Unable to allocate page-size %x, finishing test early\n",
@@ -1527,8 +1495,6 @@ static int igt_ppgtt_pin_update(void *arg)
goto out_unpin;
i915_vma_unpin(vma);
- i915_vma_close(vma);
-
i915_gem_object_put(obj);
}
@@ -1546,7 +1512,7 @@ static int igt_ppgtt_pin_update(void *arg)
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_close;
+ goto out_put;
/*
* Make sure we don't end up with something like where the pde is still
@@ -1576,8 +1542,6 @@ static int igt_ppgtt_pin_update(void *arg)
out_unpin:
i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_vm:
@@ -1629,13 +1593,11 @@ static int igt_tmpfs_fallback(void *arg)
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- goto out_close;
+ goto out_put;
err = igt_check_page_sizes(vma);
i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_restore:
@@ -1682,7 +1644,7 @@ static int igt_shrink_thp(void *arg)
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_close;
+ goto out_put;
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
pr_info("failed to allocate THP, finishing test early\n");
@@ -1706,7 +1668,7 @@ static int igt_shrink_thp(void *arg)
i915_gem_context_unlock_engines(ctx);
i915_vma_unpin(vma);
if (err)
- goto out_close;
+ goto out_put;
/*
* Now that the pages are *unpinned* shrink-all should invoke
@@ -1716,18 +1678,18 @@ static int igt_shrink_thp(void *arg)
if (i915_gem_object_has_pages(obj)) {
pr_err("shrink-all didn't truncate the pages\n");
err = -EINVAL;
- goto out_close;
+ goto out_put;
}
if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
pr_err("residual page-size bits left\n");
err = -EINVAL;
- goto out_close;
+ goto out_put;
}
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_close;
+ goto out_put;
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
@@ -1737,8 +1699,6 @@ static int igt_shrink_thp(void *arg)
out_unpin:
i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_vm:
@@ -1777,21 +1737,20 @@ int i915_gem_huge_page_mock_selftests(void)
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
- goto out_close;
+ goto out_put;
}
/* If we were ever hit this then it's time to mock the 64K scratch */
if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
pr_err("PPGTT missing 64K scratch page\n");
err = -EINVAL;
- goto out_close;
+ goto out_put;
}
err = i915_subtests(tests, ppgtt);
-out_close:
+out_put:
i915_vm_put(&ppgtt->vm);
-
out_unlock:
drm_dev_put(&dev_priv->drm);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index b972be165e85..8fe3ad2ee34e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -7,9 +7,12 @@
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gpu_commands.h"
+#include "gem/i915_gem_lmem.h"
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
+#include "selftests/i915_random.h"
#include "huge_gem_object.h"
#include "mock_context.h"
@@ -127,10 +130,573 @@ static int igt_client_fill(void *arg)
} while (1);
}
+#define WIDTH 512
+#define HEIGHT 32
+
+struct blit_buffer {
+ struct i915_vma *vma;
+ u32 start_val;
+ u32 tiling;
+};
+
+struct tiled_blits {
+ struct intel_context *ce;
+ struct blit_buffer buffers[3];
+ struct blit_buffer scratch;
+ struct i915_vma *batch;
+ u64 hole;
+ u32 width;
+ u32 height;
+};
+
+static int prepare_blit(const struct tiled_blits *t,
+ struct blit_buffer *dst,
+ struct blit_buffer *src,
+ struct drm_i915_gem_object *batch)
+{
+ const int gen = INTEL_GEN(to_i915(batch->base.dev));
+ bool use_64b_reloc = gen >= 8;
+ u32 src_pitch, dst_pitch;
+ u32 cmd, *cs;
+
+ cs = i915_gem_object_pin_map(batch, I915_MAP_WC);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
+ cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
+ if (src->tiling == I915_TILING_Y)
+ cmd |= BCS_SRC_Y;
+ if (dst->tiling == I915_TILING_Y)
+ cmd |= BCS_DST_Y;
+ *cs++ = cmd;
+
+ cmd = MI_FLUSH_DW;
+ if (gen >= 8)
+ cmd++;
+ *cs++ = cmd;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
+ if (gen >= 8)
+ cmd += 2;
+
+ src_pitch = t->width * 4;
+ if (src->tiling) {
+ cmd |= XY_SRC_COPY_BLT_SRC_TILED;
+ src_pitch /= 4;
+ }
+
+ dst_pitch = t->width * 4;
+ if (dst->tiling) {
+ cmd |= XY_SRC_COPY_BLT_DST_TILED;
+ dst_pitch /= 4;
+ }
+
+ *cs++ = cmd;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
+ *cs++ = 0;
+ *cs++ = t->height << 16 | t->width;
+ *cs++ = lower_32_bits(dst->vma->node.start);
+ if (use_64b_reloc)
+ *cs++ = upper_32_bits(dst->vma->node.start);
+ *cs++ = 0;
+ *cs++ = src_pitch;
+ *cs++ = lower_32_bits(src->vma->node.start);
+ if (use_64b_reloc)
+ *cs++ = upper_32_bits(src->vma->node.start);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch);
+ i915_gem_object_unpin_map(batch);
+
+ return 0;
+}
+
+static void tiled_blits_destroy_buffers(struct tiled_blits *t)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(t->buffers); i++)
+ i915_vma_put(t->buffers[i].vma);
+
+ i915_vma_put(t->scratch.vma);
+ i915_vma_put(t->batch);
+}
+
+static struct i915_vma *
+__create_vma(struct tiled_blits *t, size_t size, bool lmem)
+{
+ struct drm_i915_private *i915 = t->ce->vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ if (lmem)
+ obj = i915_gem_object_create_lmem(i915, size, 0);
+ else
+ obj = i915_gem_object_create_shmem(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, t->ce->vm, NULL);
+ if (IS_ERR(vma))
+ i915_gem_object_put(obj);
+
+ return vma;
+}
+
+static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem)
+{
+ return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem);
+}
+
+static int tiled_blits_create_buffers(struct tiled_blits *t,
+ int width, int height,
+ struct rnd_state *prng)
+{
+ struct drm_i915_private *i915 = t->ce->engine->i915;
+ int i;
+
+ t->width = width;
+ t->height = height;
+
+ t->batch = __create_vma(t, PAGE_SIZE, false);
+ if (IS_ERR(t->batch))
+ return PTR_ERR(t->batch);
+
+ t->scratch.vma = create_vma(t, false);
+ if (IS_ERR(t->scratch.vma)) {
+ i915_vma_put(t->batch);
+ return PTR_ERR(t->scratch.vma);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
+ struct i915_vma *vma;
+
+ vma = create_vma(t, HAS_LMEM(i915) && i % 2);
+ if (IS_ERR(vma)) {
+ tiled_blits_destroy_buffers(t);
+ return PTR_ERR(vma);
+ }
+
+ t->buffers[i].vma = vma;
+ t->buffers[i].tiling =
+ i915_prandom_u32_max_state(I915_TILING_Y + 1, prng);
+ }
+
+ return 0;
+}
+
+static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val)
+{
+ int i;
+
+ t->scratch.start_val = val;
+ for (i = 0; i < t->width * t->height; i++)
+ vaddr[i] = val++;
+
+ i915_gem_object_flush_map(t->scratch.vma->obj);
+}
+
+static u64 swizzle_bit(unsigned int bit, u64 offset)
+{
+ return (offset & BIT_ULL(bit)) >> (bit - 6);
+}
+
+static u64 tiled_offset(const struct intel_gt *gt,
+ u64 v,
+ unsigned int stride,
+ unsigned int tiling)
+{
+ unsigned int swizzle;
+ u64 x, y;
+
+ if (tiling == I915_TILING_NONE)
+ return v;
+
+ y = div64_u64_rem(v, stride, &x);
+
+ if (tiling == I915_TILING_X) {
+ v = div64_u64_rem(y, 8, &y) * stride * 8;
+ v += y * 512;
+ v += div64_u64_rem(x, 512, &x) << 12;
+ v += x;
+
+ swizzle = gt->ggtt->bit_6_swizzle_x;
+ } else {
+ const unsigned int ytile_span = 16;
+ const unsigned int ytile_height = 512;
+
+ v = div64_u64_rem(y, 32, &y) * stride * 32;
+ v += y * ytile_span;
+ v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
+ v += x;
+
+ swizzle = gt->ggtt->bit_6_swizzle_y;
+ }
+
+ switch (swizzle) {
+ case I915_BIT_6_SWIZZLE_9:
+ v ^= swizzle_bit(9, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_10:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_11:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
+ break;
+ }
+
+ return v;
+}
+
+static const char *repr_tiling(int tiling)
+{
+ switch (tiling) {
+ case I915_TILING_NONE: return "linear";
+ case I915_TILING_X: return "X";
+ case I915_TILING_Y: return "Y";
+ default: return "unknown";
+ }
+}
+
+static int verify_buffer(const struct tiled_blits *t,
+ struct blit_buffer *buf,
+ struct rnd_state *prng)
+{
+ const u32 *vaddr;
+ int ret = 0;
+ int x, y, p;
+
+ x = i915_prandom_u32_max_state(t->width, prng);
+ y = i915_prandom_u32_max_state(t->height, prng);
+ p = y * t->width + x;
+
+ vaddr = i915_gem_object_pin_map(buf->vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ if (vaddr[0] != buf->start_val) {
+ ret = -EINVAL;
+ } else {
+ u64 v = tiled_offset(buf->vma->vm->gt,
+ p * 4, t->width * 4,
+ buf->tiling);
+
+ if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
+ ret = -EINVAL;
+ }
+ if (ret) {
+ pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n",
+ repr_tiling(buf->tiling),
+ x, y, buf->start_val);
+ igt_hexdump(vaddr, 4096);
+ }
+
+ i915_gem_object_unpin_map(buf->vma->obj);
+ return ret;
+}
+
+static int move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+static int pin_buffer(struct i915_vma *vma, u64 addr)
+{
+ int err;
+
+ if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
+ err = i915_vma_unbind(vma);
+ if (err)
+ return err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+tiled_blit(struct tiled_blits *t,
+ struct blit_buffer *dst, u64 dst_addr,
+ struct blit_buffer *src, u64 src_addr)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = pin_buffer(src->vma, src_addr);
+ if (err) {
+ pr_err("Cannot pin src @ %llx\n", src_addr);
+ return err;
+ }
+
+ err = pin_buffer(dst->vma, dst_addr);
+ if (err) {
+ pr_err("Cannot pin dst @ %llx\n", dst_addr);
+ goto err_src;
+ }
+
+ err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH);
+ if (err) {
+ pr_err("cannot pin batch\n");
+ goto err_dst;
+ }
+
+ err = prepare_blit(t, dst, src, t->batch->obj);
+ if (err)
+ goto err_bb;
+
+ rq = intel_context_create_request(t->ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_bb;
+ }
+
+ err = move_to_active(t->batch, rq, 0);
+ if (!err)
+ err = move_to_active(src->vma, rq, 0);
+ if (!err)
+ err = move_to_active(dst->vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ t->batch->node.start,
+ t->batch->node.size,
+ 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ dst->start_val = src->start_val;
+err_bb:
+ i915_vma_unpin(t->batch);
+err_dst:
+ i915_vma_unpin(dst->vma);
+err_src:
+ i915_vma_unpin(src->vma);
+ return err;
+}
+
+static struct tiled_blits *
+tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+ struct drm_mm_node hole;
+ struct tiled_blits *t;
+ u64 hole_size;
+ int err;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+
+ t->ce = intel_context_create(engine);
+ if (IS_ERR(t->ce)) {
+ err = PTR_ERR(t->ce);
+ goto err_free;
+ }
+
+ hole_size = 2 * PAGE_ALIGN(WIDTH * HEIGHT * 4);
+ hole_size *= 2; /* room to maneuver */
+ hole_size += 2 * I915_GTT_MIN_ALIGNMENT;
+
+ mutex_lock(&t->ce->vm->mutex);
+ memset(&hole, 0, sizeof(hole));
+ err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
+ hole_size, 0, I915_COLOR_UNEVICTABLE,
+ 0, U64_MAX,
+ DRM_MM_INSERT_BEST);
+ if (!err)
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&t->ce->vm->mutex);
+ if (err) {
+ err = -ENODEV;
+ goto err_put;
+ }
+
+ t->hole = hole.start + I915_GTT_MIN_ALIGNMENT;
+ pr_info("Using hole at %llx\n", t->hole);
+
+ err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng);
+ if (err)
+ goto err_put;
+
+ return t;
+
+err_put:
+ intel_context_put(t->ce);
+err_free:
+ kfree(t);
+ return ERR_PTR(err);
+}
+
+static void tiled_blits_destroy(struct tiled_blits *t)
+{
+ tiled_blits_destroy_buffers(t);
+
+ intel_context_put(t->ce);
+ kfree(t);
+}
+
+static int tiled_blits_prepare(struct tiled_blits *t,
+ struct rnd_state *prng)
+{
+ u64 offset = PAGE_ALIGN(t->width * t->height * 4);
+ u32 *map;
+ int err;
+ int i;
+
+ map = i915_gem_object_pin_map(t->scratch.vma->obj, I915_MAP_WC);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ /* Use scratch to fill objects */
+ for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
+ fill_scratch(t, map, prandom_u32_state(prng));
+ GEM_BUG_ON(verify_buffer(t, &t->scratch, prng));
+
+ err = tiled_blit(t,
+ &t->buffers[i], t->hole + offset,
+ &t->scratch, t->hole);
+ if (err == 0)
+ err = verify_buffer(t, &t->buffers[i], prng);
+ if (err) {
+ pr_err("Failed to create buffer %d\n", i);
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(t->scratch.vma->obj);
+ return err;
+}
+
+static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng)
+{
+ u64 offset =
+ round_up(t->width * t->height * 4, 2 * I915_GTT_MIN_ALIGNMENT);
+ int err;
+
+ /* We want to check position invariant tiling across GTT eviction */
+
+ err = tiled_blit(t,
+ &t->buffers[1], t->hole + offset / 2,
+ &t->buffers[0], t->hole + 2 * offset);
+ if (err)
+ return err;
+
+ /* Reposition so that we overlap the old addresses, and slightly off */
+ err = tiled_blit(t,
+ &t->buffers[2], t->hole + I915_GTT_MIN_ALIGNMENT,
+ &t->buffers[1], t->hole + 3 * offset / 2);
+ if (err)
+ return err;
+
+ err = verify_buffer(t, &t->buffers[2], prng);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int __igt_client_tiled_blits(struct intel_engine_cs *engine,
+ struct rnd_state *prng)
+{
+ struct tiled_blits *t;
+ int err;
+
+ t = tiled_blits_create(engine, prng);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+
+ err = tiled_blits_prepare(t, prng);
+ if (err)
+ goto out;
+
+ err = tiled_blits_bounce(t, prng);
+ if (err)
+ goto out;
+
+out:
+ tiled_blits_destroy(t);
+ return err;
+}
+
+static bool has_bit17_swizzle(int sw)
+{
+ return (sw == I915_BIT_6_SWIZZLE_9_10_17 ||
+ sw == I915_BIT_6_SWIZZLE_9_17);
+}
+
+static bool bad_swizzling(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ return true;
+
+ if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
+ has_bit17_swizzle(ggtt->bit_6_swizzle_y))
+ return true;
+
+ return false;
+}
+
+static int igt_client_tiled_blits(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ I915_RND_STATE(prng);
+ int inst = 0;
+
+ /* Test requires explicit BLT tiling controls */
+ if (INTEL_GEN(i915) < 4)
+ return 0;
+
+ if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */
+ return 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ int err;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ err = __igt_client_tiled_blits(engine, &prng);
+ if (err == -ENODEV)
+ err = 0;
+ if (err)
+ return err;
+ } while (1);
+}
+
int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_client_fill),
+ SUBTEST(igt_client_tiled_blits),
};
if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 3f6079e1dfb6..87d7d8aa080f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -158,6 +158,8 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v)
return PTR_ERR(map);
map[offset / sizeof(*map)] = v;
+
+ __i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map));
i915_gem_object_unpin_map(ctx->obj);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 54b86cf7f5d2..b81978890641 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -972,12 +972,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_batch;
}
- err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
- if (err)
- goto err_request;
-
i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
@@ -994,6 +988,18 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err)
goto skip_request;
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto skip_request;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
+ if (err)
+ goto skip_request;
+
i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma);
@@ -1005,7 +1011,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
skip_request:
i915_request_set_error_once(rq, err);
-err_request:
i915_request_add(rq);
err_batch:
i915_vma_unpin_and_release(&batch, 0);
@@ -1541,10 +1546,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
- if (err)
- goto err_request;
-
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
@@ -1553,6 +1554,16 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto skip_request;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ if (err)
+ goto skip_request;
+
i915_vma_unpin(vma);
i915_request_add(rq);
@@ -1560,7 +1571,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
i915_request_set_error_once(rq, err);
-err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
@@ -1674,10 +1684,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
- if (err)
- goto err_request;
-
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
@@ -1686,8 +1692,17 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto skip_request;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
+ if (err)
+ goto skip_request;
+
i915_vma_unpin(vma);
- i915_vma_close(vma);
i915_request_add(rq);
@@ -1709,7 +1724,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
i915_request_set_error_once(rq, err);
-err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
@@ -1925,7 +1939,7 @@ static int mock_context_barrier(void *arg)
goto out;
}
- rq = igt_request_alloc(ctx, i915->engine[RCS0]);
+ rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]);
if (IS_ERR(rq)) {
pr_err("Request allocation failed!\n");
goto out;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
new file mode 100644
index 000000000000..a49016f8ee0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "gt/intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static u64 read_reloc(const u32 *map, int x, const u64 mask)
+{
+ u64 reloc;
+
+ memcpy(&reloc, &map[x], sizeof(reloc));
+ return reloc & mask;
+}
+
+static int __igt_gpu_reloc(struct i915_execbuffer *eb,
+ struct drm_i915_gem_object *obj)
+{
+ const unsigned int offsets[] = { 8, 3, 0 };
+ const u64 mask =
+ GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0);
+ const u32 *map = page_mask_bits(obj->mm.mapping);
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
+ int i;
+
+ vma = i915_vma_instance(obj, eb->context->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err)
+ return err;
+
+ /* 8-Byte aligned */
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[0] * sizeof(u32),
+ 0)) {
+ err = -EIO;
+ goto unpin_vma;
+ }
+
+ /* !8-Byte aligned */
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[1] * sizeof(u32),
+ 1)) {
+ err = -EIO;
+ goto unpin_vma;
+ }
+
+ /* Skip to the end of the cmd page */
+ i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
+ i -= eb->reloc_cache.rq_size;
+ memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
+ MI_NOOP, i);
+ eb->reloc_cache.rq_size += i;
+
+ /* Force batch chaining */
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[2] * sizeof(u32),
+ 2)) {
+ err = -EIO;
+ goto unpin_vma;
+ }
+
+ GEM_BUG_ON(!eb->reloc_cache.rq);
+ rq = i915_request_get(eb->reloc_cache.rq);
+ err = reloc_gpu_flush(&eb->reloc_cache);
+ if (err)
+ goto put_rq;
+ GEM_BUG_ON(eb->reloc_cache.rq);
+
+ err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
+ if (err) {
+ intel_gt_set_wedged(eb->engine->gt);
+ goto put_rq;
+ }
+
+ if (!i915_request_completed(rq)) {
+ pr_err("%s: did not wait for relocations!\n", eb->engine->name);
+ err = -EINVAL;
+ goto put_rq;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(offsets); i++) {
+ u64 reloc = read_reloc(map, offsets[i], mask);
+
+ if (reloc != i) {
+ pr_err("%s[%d]: map[%d] %llx != %x\n",
+ eb->engine->name, i, offsets[i], reloc, i);
+ err = -EINVAL;
+ }
+ }
+ if (err)
+ igt_hexdump(map, 4096);
+
+put_rq:
+ i915_request_put(rq);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static int igt_gpu_reloc(void *arg)
+{
+ struct i915_execbuffer eb;
+ struct drm_i915_gem_object *scratch;
+ int err = 0;
+ u32 *map;
+
+ eb.i915 = arg;
+
+ scratch = i915_gem_object_create_internal(eb.i915, 4096);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ map = i915_gem_object_pin_map(scratch, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_scratch;
+ }
+
+ for_each_uabi_engine(eb.engine, eb.i915) {
+ reloc_cache_init(&eb.reloc_cache, eb.i915);
+ memset(map, POISON_INUSE, 4096);
+
+ intel_engine_pm_get(eb.engine);
+ eb.context = intel_context_create(eb.engine);
+ if (IS_ERR(eb.context)) {
+ err = PTR_ERR(eb.context);
+ goto err_pm;
+ }
+
+ err = intel_context_pin(eb.context);
+ if (err)
+ goto err_put;
+
+ err = __igt_gpu_reloc(&eb, scratch);
+
+ intel_context_unpin(eb.context);
+err_put:
+ intel_context_put(eb.context);
+err_pm:
+ intel_engine_pm_put(eb.engine);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(eb.i915))
+ err = -EIO;
+
+err_scratch:
+ i915_gem_object_put(scratch);
+ return err;
+}
+
+int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gpu_reloc),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 43912e9b683d..9c7402ce5bf9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -952,6 +952,129 @@ static int igt_mmap(void *arg)
return 0;
}
+static const char *repr_mmap_type(enum i915_mmap_type type)
+{
+ switch (type) {
+ case I915_MMAP_TYPE_GTT: return "gtt";
+ case I915_MMAP_TYPE_WB: return "wb";
+ case I915_MMAP_TYPE_WC: return "wc";
+ case I915_MMAP_TYPE_UC: return "uc";
+ default: return "unknown";
+ }
+}
+
+static bool can_access(const struct drm_i915_gem_object *obj)
+{
+ unsigned int flags =
+ I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+
+ return i915_gem_object_type_has(obj, flags);
+}
+
+static int __igt_mmap_access(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
+{
+ struct i915_mmap_offset *mmo;
+ unsigned long __user *ptr;
+ unsigned long A, B;
+ unsigned long x, y;
+ unsigned long addr;
+ int err;
+
+ memset(&A, 0xAA, sizeof(A));
+ memset(&B, 0xBB, sizeof(B));
+
+ if (!can_mmap(obj, type) || !can_access(obj))
+ return 0;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ ptr = (unsigned long __user *)addr;
+
+ err = __put_user(A, ptr);
+ if (err) {
+ pr_err("%s(%s): failed to write into user mmap\n",
+ obj->mm.region->name, repr_mmap_type(type));
+ goto out_unmap;
+ }
+
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ err = access_process_vm(current, addr, &x, sizeof(x), 0);
+ if (err != sizeof(x)) {
+ pr_err("%s(%s): access_process_vm() read failed\n",
+ obj->mm.region->name, repr_mmap_type(type));
+ goto out_unmap;
+ }
+
+ err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
+ if (err != sizeof(B)) {
+ pr_err("%s(%s): access_process_vm() write failed\n",
+ obj->mm.region->name, repr_mmap_type(type));
+ goto out_unmap;
+ }
+
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ err = __get_user(y, ptr);
+ if (err) {
+ pr_err("%s(%s): failed to read from user mmap\n",
+ obj->mm.region->name, repr_mmap_type(type));
+ goto out_unmap;
+ }
+
+ if (x != A || y != B) {
+ pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
+ obj->mm.region->name, repr_mmap_type(type),
+ x, y);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+ return err;
+}
+
+static int igt_mmap_access(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
+ if (err == 0)
+ err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
+ if (err == 0)
+ err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __igt_mmap_gpu(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
enum i915_mmap_type type)
@@ -1156,9 +1279,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
if (err)
goto out_unmap;
- GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
- !atomic_read(&obj->bind_count));
-
err = check_present(addr, obj->base.size);
if (err) {
pr_err("%s: was not present\n", obj->mm.region->name);
@@ -1175,7 +1295,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
pr_err("Failed to unbind object!\n");
goto out_unmap;
}
- GEM_BUG_ON(atomic_read(&obj->bind_count));
if (type != I915_MMAP_TYPE_GTT) {
__i915_gem_object_put_pages(obj);
@@ -1233,6 +1352,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
SUBTEST(igt_mmap),
+ SUBTEST(igt_mmap_access),
SUBTEST(igt_mmap_revoke),
SUBTEST(igt_mmap_gpu),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index 2b6db6f799de..faa5b6d91795 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -14,7 +14,7 @@ static int igt_gem_object(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
- int err = -ENOMEM;
+ int err;
/* Basic test to ensure we can create an object */
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 772d8cba7da9..e21b5023ca7d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -83,6 +83,8 @@ igt_emit_store_dw(struct i915_vma *vma,
offset += PAGE_SIZE;
}
*cmd = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt);
@@ -126,16 +128,6 @@ int igt_gpu_fill_dw(struct intel_context *ce,
goto err_batch;
}
- flags = 0;
- if (INTEL_GEN(ce->vm->i915) <= 5)
- flags |= I915_DISPATCH_SECURE;
-
- err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
- if (err)
- goto err_request;
-
i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
@@ -152,15 +144,17 @@ int igt_gpu_fill_dw(struct intel_context *ce,
if (err)
goto skip_request;
- i915_request_add(rq);
-
- i915_vma_unpin_and_release(&batch, 0);
+ flags = 0;
+ if (INTEL_GEN(ce->vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
- return 0;
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
skip_request:
- i915_request_set_error_once(rq, err);
-err_request:
+ if (err)
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
err_batch:
i915_vma_unpin_and_release(&batch, 0);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index 9272bef57092..debaf7b18ab5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -66,7 +66,7 @@ static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
- return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
+ return vm_map_ram(mock->pages, mock->npages, 0);
}
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/debugfs_engines.c
index 6a5e9ab20b94..5e3725e62241 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.c
@@ -32,5 +32,5 @@ void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
{ "engines", &engines_fops },
};
- debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
index 75255aaacaed..1de5fbaa1cf9 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -9,6 +9,7 @@
#include "debugfs_engines.h"
#include "debugfs_gt.h"
#include "debugfs_gt_pm.h"
+#include "uc/intel_uc_debugfs.h"
#include "i915_drv.h"
void debugfs_gt_register(struct intel_gt *gt)
@@ -24,17 +25,19 @@ void debugfs_gt_register(struct intel_gt *gt)
debugfs_engines_register(gt, root);
debugfs_gt_pm_register(gt, root);
+
+ intel_uc_debugfs_register(&gt->uc, root);
}
-void debugfs_gt_register_files(struct intel_gt *gt,
- struct dentry *root,
- const struct debugfs_gt_file *files,
- unsigned long count)
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count, void *data)
{
while (count--) {
- if (!files->eval || files->eval(gt))
+ umode_t mode = files->fops->write ? 0644 : 0444;
+ if (!files->eval || files->eval(data))
debugfs_create_file(files->name,
- 0444, root, gt,
+ mode, root, data,
files->fops);
files++;
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/debugfs_gt.h
index 4ea0f06cda8f..f77540f727e9 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.h
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.h
@@ -28,12 +28,11 @@ void debugfs_gt_register(struct intel_gt *gt);
struct debugfs_gt_file {
const char *name;
const struct file_operations *fops;
- bool (*eval)(const struct intel_gt *gt);
+ bool (*eval)(void *data);
};
-void debugfs_gt_register_files(struct intel_gt *gt,
- struct dentry *root,
- const struct debugfs_gt_file *files,
- unsigned long count);
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count, void *data);
#endif /* DEBUGFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
index 059c9e5c002e..174a24553322 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -10,6 +10,7 @@
#include "debugfs_gt_pm.h"
#include "i915_drv.h"
#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
#include "intel_llc.h"
#include "intel_rc6.h"
#include "intel_rps.h"
@@ -268,7 +269,7 @@ static int frequency_show(struct seq_file *m, void *unused)
yesno(rpmodectl & GEN6_RP_ENABLE));
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
+ GEN6_RP_MEDIA_SW_MODE));
vlv_punit_get(i915);
freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
@@ -300,8 +301,9 @@ static int frequency_show(struct seq_file *m, void *unused)
u32 rp_state_cap;
u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
- u32 rpupei, rpcurup, rpprevup;
- u32 rpdownei, rpcurdown, rpprevdown;
+ u32 rpcurupei, rpcurup, rpprevup;
+ u32 rpcurdownei, rpcurdown, rpprevdown;
+ u32 rpupei, rpupt, rpdownei, rpdownt;
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
@@ -334,12 +336,19 @@ static int frequency_show(struct seq_file *m, void *unused)
rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
- rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
- rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+
+ rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI);
+ rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
+
+ rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
+ rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
+
cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
@@ -372,7 +381,7 @@ static int frequency_show(struct seq_file *m, void *unused)
yesno(rpmodectl & GEN6_RP_ENABLE));
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
+ GEN6_RP_MEDIA_SW_MODE));
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
@@ -394,23 +403,35 @@ static int frequency_show(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
- rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
- seq_printf(m, "RP CUR UP: %d (%dus)\n",
- rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dus)\n",
- rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
+ seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ rpcurupei,
+ intel_gt_pm_interval_to_ns(gt, rpcurupei));
+ seq_printf(m, "RP CUR UP: %d (%dns)\n",
+ rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
+ seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
-
- seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
- rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
- rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
- rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
+ seq_printf(m, "RP UP EI: %d (%dns)\n",
+ rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
+ seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
+ rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
+
+ seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ rpcurdownei,
+ intel_gt_pm_interval_to_ns(gt, rpcurdownei));
+ seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ rpcurdown,
+ intel_gt_pm_interval_to_ns(gt, rpcurdown));
+ seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ rpprevdown,
+ intel_gt_pm_interval_to_ns(gt, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
rps->power.down_threshold);
+ seq_printf(m, "RP DOWN EI: %d (%dns)\n",
+ rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
+ seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
+ rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
@@ -506,8 +527,10 @@ static int llc_show(struct seq_file *m, void *data)
return 0;
}
-static bool llc_eval(const struct intel_gt *gt)
+static bool llc_eval(void *data)
{
+ struct intel_gt *gt = data;
+
return HAS_LLC(gt->i915);
}
@@ -533,7 +556,8 @@ static int rps_boost_show(struct seq_file *m, void *data)
struct drm_i915_private *i915 = gt->i915;
struct intel_rps *rps = &gt->rps;
- seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+ seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
+ seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
@@ -553,7 +577,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
- if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
+ if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
struct intel_uncore *uncore = gt->uncore;
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -580,8 +604,10 @@ static int rps_boost_show(struct seq_file *m, void *data)
return 0;
}
-static bool rps_eval(const struct intel_gt *gt)
+static bool rps_eval(void *data)
{
+ struct intel_gt *gt = data;
+
return HAS_RPS(gt->i915);
}
@@ -597,5 +623,5 @@ void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
{ "rps_boost", &rps_boost_fops, rps_eval },
};
- debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 94e746af8926..699125928272 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -389,6 +389,16 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm,
return err;
}
+static __always_inline void
+write_pte(gen8_pte_t *pte, const gen8_pte_t val)
+{
+ /* Magic delays? Or can we refine these to flush all in one pass? */
+ *pte = val;
+ wmb(); /* cpu to cache */
+ clflush(pte); /* cache to memory */
+ wmb(); /* visible to all */
+}
+
static __always_inline u64
gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
struct i915_page_directory *pdp,
@@ -405,7 +415,8 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
- vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
+ write_pte(&vaddr[gen8_pd_index(idx, 0)],
+ pte_encode | iter->dma);
iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
@@ -487,7 +498,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
do {
GEM_BUG_ON(iter->sg->length < page_size);
- vaddr[index++] = encode | iter->dma;
+ write_pte(&vaddr[index++], encode | iter->dma);
start += page_size;
iter->dma += page_size;
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index cbad7fe722ce..d907d538176e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -64,7 +64,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
if (!--b->irq_enabled)
irq_disable(engine);
- b->irq_armed = false;
+ WRITE_ONCE(b->irq_armed, false);
intel_gt_pm_put_async(engine->gt);
}
@@ -73,7 +73,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
- if (!b->irq_armed)
+ if (!READ_ONCE(b->irq_armed))
return;
spin_lock_irqsave(&b->irq_lock, flags);
@@ -142,6 +142,18 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(engine, tl);
}
+static void __signal_request(struct i915_request *rq, struct list_head *signals)
+{
+ GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+ if (!__dma_fence_signal(&rq->fence))
+ return;
+
+ i915_request_get(rq);
+ list_add_tail(&rq->signal_link, signals);
+}
+
static void signal_irq_work(struct irq_work *work)
{
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
@@ -155,6 +167,8 @@ static void signal_irq_work(struct irq_work *work)
if (b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
+ list_splice_init(&b->signaled_requests, &signal);
+
list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
GEM_BUG_ON(list_empty(&ce->signals));
@@ -163,24 +177,15 @@ static void signal_irq_work(struct irq_work *work)
list_entry(pos, typeof(*rq), signal_link);
GEM_BUG_ON(!check_signal_order(ce, rq));
-
if (!__request_completed(rq))
break;
- GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
- &rq->fence.flags));
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
- if (!__dma_fence_signal(&rq->fence))
- continue;
-
/*
* Queue for execution after dropping the signaling
* spinlock as the callback chain may end up adding
* more signalers to the same context or engine.
*/
- i915_request_get(rq);
- list_add_tail(&rq->signal_link, &signal);
+ __signal_request(rq, &signal);
}
/*
@@ -233,7 +238,7 @@ static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
- b->irq_armed = true;
+ WRITE_ONCE(b->irq_armed, true);
/*
* Since we are waiting on a request, the GPU should be busy
@@ -255,6 +260,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
spin_lock_init(&b->irq_lock);
INIT_LIST_HEAD(&b->signalers);
+ INIT_LIST_HEAD(&b->signaled_requests);
init_irq_work(&b->irq_work, signal_irq_work);
}
@@ -274,6 +280,32 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&b->irq_lock, flags);
}
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+ struct intel_context *ce)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&b->irq_lock, flags);
+ if (!list_empty(&ce->signals)) {
+ struct i915_request *rq, *next;
+
+ /* Queue for executing the signal callbacks in the irq_work */
+ list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
+ GEM_BUG_ON(rq->engine != engine);
+ GEM_BUG_ON(!__request_completed(rq));
+
+ __signal_request(rq, &b->signaled_requests);
+ }
+
+ INIT_LIST_HEAD(&ce->signals);
+ list_del_init(&ce->signal_link);
+
+ irq_work_queue(&b->irq_work);
+ }
+ spin_unlock_irqrestore(&b->irq_lock, flags);
+}
+
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index aea992e46c42..74ddb49b2941 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -114,6 +114,11 @@ int __intel_context_do_pin(struct intel_context *ce)
goto out_release;
}
+ if (unlikely(intel_context_is_closed(ce))) {
+ err = -ENOENT;
+ goto out_unlock;
+ }
+
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce);
if (unlikely(err))
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index 57a30956c922..487299cb91f2 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -25,8 +25,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
return PTR_ERR(cs);
offset = i915_ggtt_offset(ce->state) +
- LRC_STATE_PN * PAGE_SIZE +
- CTX_R_PWR_CLK_STATE * 4;
+ LRC_STATE_OFFSET + CTX_R_PWR_CLK_STATE * 4;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 07cb83a0d017..4954b0df4864 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -69,7 +69,13 @@ struct intel_context {
#define CONTEXT_NOPREEMPT 7
u32 *lrc_reg_state;
- u64 lrc_desc;
+ union {
+ struct {
+ u32 lrca;
+ u32 ccid;
+ };
+ u64 desc;
+ } lrc;
u32 tag; /* cookie passed to HW to track this context on submission */
/* Time on GPU as tracked by the hw. */
@@ -96,6 +102,8 @@ struct intel_context {
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
+
+ u8 wa_bb_page; /* if set, page num reserved for context workarounds */
};
#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index b469de0dd9b6..9bf6d4989968 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -199,6 +199,8 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
int intel_engines_init_mmio(struct intel_gt *gt);
int intel_engines_init(struct intel_gt *gt);
+void intel_engine_free_request_pool(struct intel_engine_cs *engine);
+
void intel_engines_release(struct intel_gt *gt);
void intel_engines_free(struct intel_gt *gt);
@@ -236,22 +238,35 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+ struct intel_context *ce);
+
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
struct drm_printer *p);
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
{
memset(batch, 0, 6 * sizeof(u32));
- batch[0] = GFX_OP_PIPE_CONTROL(6);
- batch[1] = flags;
+ batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
+ batch[1] = flags1;
batch[2] = offset;
return batch + 6;
}
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, 0, flags, offset);
+}
+
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+}
+
static inline u32 *
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+__gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
{
/* We're using qword write, offset should be aligned to 8 bytes. */
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
@@ -260,8 +275,8 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- *cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
+ *cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
*cs++ = gtt_offset;
*cs++ = 0;
*cs++ = value;
@@ -271,6 +286,18 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
return cs;
}
+static inline u32*
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags);
+}
+
+static inline u32*
+gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+{
+ return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1);
+}
+
static inline u32 *
gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
{
@@ -308,9 +335,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-int intel_enable_engine_stats(struct intel_engine_cs *engine);
-void intel_disable_engine_stats(struct intel_engine_cs *engine);
-
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
struct i915_request *
@@ -333,13 +357,4 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
return intel_engine_has_preemption(engine);
}
-static inline bool
-intel_engine_has_timeslices(const struct intel_engine_cs *engine)
-{
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return false;
-
- return intel_engine_has_semaphores(engine);
-}
-
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 3aa8a652c16d..da5b61085257 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -31,7 +31,6 @@
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
#include "intel_engine_user.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
@@ -327,6 +326,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
engine->props.preempt_timeout_ms = 0;
+ engine->defaults = engine->props; /* never to change again */
+
engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
@@ -347,8 +348,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- i915->engine[id] = engine;
-
return 0;
}
@@ -425,17 +424,27 @@ void intel_engines_release(struct intel_gt *gt)
engine->release = NULL;
memset(&engine->reset, 0, sizeof(engine->reset));
-
- gt->i915->engine[id] = NULL;
}
}
+void intel_engine_free_request_pool(struct intel_engine_cs *engine)
+{
+ if (!engine->request_pool)
+ return;
+
+ kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
+}
+
void intel_engines_free(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /* Free the requests! dma-resv keeps fences around for an eternity */
+ rcu_barrier();
+
for_each_engine(engine, gt, id) {
+ intel_engine_free_request_pool(engine);
kfree(engine);
gt->engine[id] = NULL;
}
@@ -623,8 +632,6 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
- intel_engine_pool_init(&engine->pool);
-
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
@@ -821,12 +828,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
cleanup_status_page(engine);
intel_engine_fini_retire(engine);
- intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
if (engine->default_state)
- i915_gem_object_put(engine->default_state);
+ fput(engine->default_state);
if (engine->kernel_context) {
intel_context_unpin(engine->kernel_context);
@@ -1225,6 +1231,49 @@ static void print_request(struct drm_printer *m,
name);
}
+static struct intel_timeline *get_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *tl;
+
+ /*
+ * Even though we are holding the engine->active.lock here, there
+ * is no control over the submission queue per-se and we are
+ * inspecting the active state at a random point in time, with an
+ * unknown queue. Play safe and make sure the timeline remains valid.
+ * (Only being used for pretty printing, one extra kref shouldn't
+ * cause a camel stampede!)
+ */
+ rcu_read_lock();
+ tl = rcu_dereference(rq->timeline);
+ if (!kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+
+ return tl;
+}
+
+static int print_ring(char *buf, int sz, struct i915_request *rq)
+{
+ int len = 0;
+
+ if (!i915_request_signaled(rq)) {
+ struct intel_timeline *tl = get_timeline(rq);
+
+ len = scnprintf(buf, sz,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
+
+ if (tl)
+ intel_timeline_put(tl);
+ }
+
+ return len;
+}
+
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{
const size_t rowsize = 8 * sizeof(u32);
@@ -1254,27 +1303,6 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
-static struct intel_timeline *get_timeline(struct i915_request *rq)
-{
- struct intel_timeline *tl;
-
- /*
- * Even though we are holding the engine->active.lock here, there
- * is no control over the submission queue per-se and we are
- * inspecting the active state at a random point in time, with an
- * unknown queue. Play safe and make sure the timeline remains valid.
- * (Only being used for pretty printing, one extra kref shouldn't
- * cause a camel stampede!)
- */
- rcu_read_lock();
- tl = rcu_dereference(rq->timeline);
- if (!kref_get_unless_zero(&tl->kref))
- tl = NULL;
- rcu_read_unlock();
-
- return tl;
-}
-
static const char *repr_timer(const struct timer_list *t)
{
if (!READ_ONCE(t->expires))
@@ -1295,6 +1323,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
+ if (HAS_EXECLISTS(dev_priv)) {
+ drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
+ drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
+ }
drm_printf(m, "\tRING_START: 0x%08x\n",
ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
@@ -1387,39 +1421,24 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = scnprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ",
- (int)(port - execlists->active));
- if (!i915_request_signaled(rq)) {
- struct intel_timeline *tl = get_timeline(rq);
-
- len += scnprintf(hdr + len, sizeof(hdr) - len,
- "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq),
- DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
- 1000 * 1000));
-
- if (tl)
- intel_timeline_put(tl);
- }
+ "\t\tActive[%d]: ccid:%08x, ",
+ (int)(port - execlists->active),
+ rq->context->lrc.ccid);
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
- struct intel_timeline *tl = get_timeline(rq);
- char hdr[80];
-
- snprintf(hdr, sizeof(hdr),
- "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
- (int)(port - execlists->pending),
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq));
- print_request(m, rq, hdr);
+ char hdr[160];
+ int len;
- if (tl)
- intel_timeline_put(tl);
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tPending[%d]: ccid:%08x, ",
+ (int)(port - execlists->pending),
+ rq->context->lrc.ccid);
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ print_request(m, rq, hdr);
}
rcu_read_unlock();
execlists_active_unlock_bh(execlists);
@@ -1568,58 +1587,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
-/**
- * intel_enable_engine_stats() - Enable engine busy tracking on engine
- * @engine: engine to enable stats collection
- *
- * Start collecting the engine busyness data for @engine.
- *
- * Returns 0 on success or a negative error code.
- */
-int intel_enable_engine_stats(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned long flags;
- int err = 0;
-
- if (!intel_engine_supports_stats(engine))
- return -ENODEV;
-
- execlists_active_lock_bh(execlists);
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (unlikely(engine->stats.enabled == ~0)) {
- err = -EBUSY;
- goto unlock;
- }
-
- if (engine->stats.enabled++ == 0) {
- struct i915_request * const *port;
- struct i915_request *rq;
-
- engine->stats.enabled_at = ktime_get();
-
- /* XXX submission method oblivious? */
- for (port = execlists->active; (rq = *port); port++)
- engine->stats.active++;
-
- for (port = execlists->pending; (rq = *port); port++) {
- /* Exclude any contexts already counted in active */
- if (!intel_context_inflight_count(rq->context))
- engine->stats.active++;
- }
-
- if (engine->stats.active)
- engine->stats.start = engine->stats.enabled_at;
- }
-
-unlock:
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
- execlists_active_unlock_bh(execlists);
-
- return err;
-}
-
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
ktime_t total = engine->stats.total;
@@ -1628,7 +1595,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
* If the engine is executing something at the moment
* add it to the total.
*/
- if (engine->stats.active)
+ if (atomic_read(&engine->stats.active))
total = ktime_add(total,
ktime_sub(ktime_get(), engine->stats.start));
@@ -1654,28 +1621,6 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
return total;
}
-/**
- * intel_disable_engine_stats() - Disable engine busy tracking on engine
- * @engine: engine to disable stats collection
- *
- * Stops collecting the engine busyness data for @engine.
- */
-void intel_disable_engine_stats(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (!intel_engine_supports_stats(engine))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- WARN_ON_ONCE(engine->stats.enabled == 0);
- if (--engine->stats.enabled == 0) {
- engine->stats.total = __intel_engine_get_busy_time(engine);
- engine->stats.active = 0;
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
static bool match_ring(struct i915_request *rq)
{
u32 ring = ENGINE_READ(rq->engine, RING_START);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index dd825718e4e5..5136c8bf112d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -31,7 +31,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
delay = msecs_to_jiffies_timeout(delay);
if (delay >= HZ)
delay = round_jiffies_up_relative(delay);
- schedule_delayed_work(&engine->heartbeat.work, delay);
+ mod_delayed_work(system_wq, &engine->heartbeat.work, delay);
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index b6cf284e3a2d..d0a1078ef632 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -10,31 +10,22 @@
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
#include "intel_ring.h"
+#include "shmem_utils.h"
static int __engine_unpark(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
struct intel_context *ce;
- void *map;
ENGINE_TRACE(engine, "\n");
intel_gt_pm_get(engine->gt);
- /* Pin the default state for fast resets from atomic context. */
- map = NULL;
- if (engine->default_state)
- map = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (!IS_ERR_OR_NULL(map))
- engine->pinned_default_state = map;
-
/* Discard stale context state from across idling */
ce = engine->kernel_context;
if (ce) {
@@ -44,6 +35,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj;
int type = i915_coherent_map_type(engine->i915);
+ void *map;
map = i915_gem_object_pin_map(obj, type);
if (!IS_ERR(map)) {
@@ -181,7 +173,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* Ergo, if we put ourselves on the timelines.active_list
* (se intel_timeline_enter()) before we increment the
* engine->wakeref.count, we may see the request completion and retire
- * it causing an undeflow of the engine->wakeref.
+ * it causing an underflow of the engine->wakeref.
*/
flags = __timeline_mark_lock(ce);
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
@@ -255,7 +247,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine);
intel_engine_disarm_breadcrumbs(engine);
- intel_engine_pool_park(&engine->pool);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
@@ -263,11 +254,6 @@ static int __engine_park(struct intel_wakeref *wf)
if (engine->park)
engine->park(engine);
- if (engine->pinned_default_state) {
- i915_gem_object_unpin_map(engine->default_state);
- engine->pinned_default_state = NULL;
- }
-
engine->execlists.no_priolist = false;
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index e52c2b0cb245..418df0a13145 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
intel_wakeref_put_async(&engine->wakeref);
}
+static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ intel_wakeref_put_delay(&engine->wakeref, delay);
+}
+
static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
{
intel_wakeref_unlock_wait(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
deleted file mode 100644
index 1bd89cadc3b7..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef INTEL_ENGINE_POOL_H
-#define INTEL_ENGINE_POOL_H
-
-#include "intel_engine_pool_types.h"
-#include "i915_active.h"
-#include "i915_request.h"
-
-struct intel_engine_pool_node *
-intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
-
-static inline int
-intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
- struct i915_request *rq)
-{
- return i915_active_add_request(&node->active, rq);
-}
-
-static inline void
-intel_engine_pool_put(struct intel_engine_pool_node *node)
-{
- i915_active_release(&node->active);
-}
-
-void intel_engine_pool_init(struct intel_engine_pool *pool);
-void intel_engine_pool_park(struct intel_engine_pool *pool);
-void intel_engine_pool_fini(struct intel_engine_pool *pool);
-
-#endif /* INTEL_ENGINE_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 80cdde712842..2b6cdf47d428 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -22,7 +22,6 @@
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
-#include "intel_engine_pool_types.h"
#include "intel_sseu.h"
#include "intel_timeline_types.h"
#include "intel_wakeref.h"
@@ -157,6 +156,20 @@ struct intel_engine_execlists {
struct i915_priolist default_priolist;
/**
+ * @ccid: identifier for contexts submitted to this engine
+ */
+ u32 ccid;
+
+ /**
+ * @yield: CCID at the time of the last semaphore-wait interrupt.
+ *
+ * Instead of leaving a semaphore busy-spinning on an engine, we would
+ * like to switch to another ready context, i.e. yielding the semaphore
+ * timeslice.
+ */
+ u32 yield;
+
+ /**
* @error_interrupt: CS Master EIR
*
* The CS generates an interrupt when it detects an error. We capture
@@ -167,6 +180,11 @@ struct intel_engine_execlists {
u32 error_interrupt;
/**
+ * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
+ */
+ u32 reset_ccid;
+
+ /**
* @no_priolist: priority lists disabled
*/
bool no_priolist;
@@ -295,8 +313,7 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
- unsigned int context_tag;
-#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
+ unsigned long context_tag;
struct rb_node uabi_node;
@@ -308,6 +325,9 @@ struct intel_engine_cs {
struct list_head hold; /* ready requests, but on hold */
} active;
+ /* keep a request in reserve for a [pm] barrier under oom */
+ struct i915_request *request_pool;
+
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
@@ -323,8 +343,7 @@ struct intel_engine_cs {
unsigned long wakeref_serial;
struct intel_wakeref wakeref;
- struct drm_i915_gem_object *default_state;
- void *pinned_default_state;
+ struct file *default_state;
struct {
struct intel_ring *ring;
@@ -358,6 +377,8 @@ struct intel_engine_cs {
spinlock_t irq_lock;
struct list_head signalers;
+ struct list_head signaled_requests;
+
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
@@ -389,13 +410,6 @@ struct intel_engine_cs {
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
} pmu;
- /*
- * A pool of objects to use as shadow copies of client batch buffers
- * when the command parser is enabled. Prevents the client from
- * modifying the batch contents after software parsing.
- */
- struct intel_engine_pool pool;
-
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
struct i915_wa_list ctx_wa_list;
@@ -407,6 +421,7 @@ struct intel_engine_cs {
void (*irq_enable)(struct intel_engine_cs *engine);
void (*irq_disable)(struct intel_engine_cs *engine);
+ void (*sanitize)(struct intel_engine_cs *engine);
int (*resume)(struct intel_engine_cs *engine);
struct {
@@ -483,10 +498,11 @@ struct intel_engine_cs {
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
-#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
-#define I915_ENGINE_IS_VIRTUAL BIT(5)
-#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
-#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
+#define I915_ENGINE_HAS_TIMESLICES BIT(4)
+#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5)
+#define I915_ENGINE_IS_VIRTUAL BIT(6)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8)
unsigned int flags;
/*
@@ -515,34 +531,34 @@ struct intel_engine_cs {
struct {
/**
- * @lock: Lock protecting the below fields.
- */
- seqlock_t lock;
- /**
- * @enabled: Reference count indicating number of listeners.
+ * @active: Number of contexts currently scheduled in.
*/
- unsigned int enabled;
+ atomic_t active;
+
/**
- * @active: Number of contexts currently scheduled in.
+ * @lock: Lock protecting the below fields.
*/
- unsigned int active;
+ seqlock_t lock;
+
/**
- * @enabled_at: Timestamp when busy stats were enabled.
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
*/
- ktime_t enabled_at;
+ ktime_t total;
+
/**
* @start: Timestamp of the last idle to active transition.
*
* Idle is defined as active == 0, active is active > 0.
*/
ktime_t start;
+
/**
- * @total: Total time this engine was busy.
- *
- * Accumulated time not counting the most recent block in cases
- * where engine is currently busy (active > 0).
+ * @rps: Utilisation at last RPS sampling.
*/
- ktime_t total;
+ ktime_t rps;
} stats;
struct {
@@ -551,7 +567,7 @@ struct intel_engine_cs {
unsigned long preempt_timeout_ms;
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
- } props;
+ } props, defaults;
};
static inline bool
@@ -585,6 +601,15 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine)
}
static inline bool
+intel_engine_has_timeslices(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return false;
+
+ return engine->flags & I915_ENGINE_HAS_TIMESLICES;
+}
+
+static inline bool
intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 4c5a209cb669..66165b10256e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -65,7 +65,7 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
ggtt->mappable_end);
}
- i915_ggtt_init_fences(ggtt);
+ intel_ggtt_init_fences(ggtt);
return 0;
}
@@ -715,11 +715,13 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
*/
void i915_ggtt_driver_release(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct pagevec *pvec;
- fini_aliasing_ppgtt(&i915->ggtt);
+ fini_aliasing_ppgtt(ggtt);
- ggtt_cleanup_hw(&i915->ggtt);
+ intel_ggtt_fini_fences(ggtt);
+ ggtt_cleanup_hw(ggtt);
pvec = &i915->mm.wc_stash.pvec;
if (pvec->nr) {
@@ -784,13 +786,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
else
ggtt->gsm = ioremap_wc(phys_addr, size);
if (!ggtt->gsm) {
- DRM_ERROR("Failed to map the ggtt page table\n");
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
return -ENOMEM;
}
ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
if (ret) {
- DRM_ERROR("Scratch setup failed\n");
+ drm_err(&i915->drm, "Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(ggtt->gsm);
return ret;
@@ -838,7 +840,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
struct pci_dev *pdev = i915->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
- int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
if (!IS_DGFX(i915)) {
@@ -846,12 +847,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->mappable_end = resource_size(&ggtt->gmadr);
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
-
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (IS_CHERRYVIEW(i915))
size = chv_get_total_gtt_size(snb_gmch_ctl);
@@ -987,7 +982,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
struct pci_dev *pdev = i915->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
- int err;
ggtt->gmadr = pci_resource(pdev, 2);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
@@ -997,15 +991,11 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
* just a coarse sanity check.
*/
if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
+ drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
+ &ggtt->mappable_end);
return -ENXIO;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
@@ -1052,7 +1042,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
+ drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
}
@@ -1075,7 +1065,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
if (unlikely(ggtt->do_idle_maps))
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"Applying Ironlake quirks for intel_iommu\n");
return 0;
@@ -1100,26 +1090,29 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
return ret;
if ((ggtt->vm.total - 1) >> 32) {
- DRM_ERROR("We never expected a Global GTT with more than 32bits"
- " of address space! Found %lldM!\n",
- ggtt->vm.total >> 20);
+ drm_err(&i915->drm,
+ "We never expected a Global GTT with more than 32bits"
+ " of address space! Found %lldM!\n",
+ ggtt->vm.total >> 20);
ggtt->vm.total = 1ULL << 32;
ggtt->mappable_end =
min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
if (ggtt->mappable_end > ggtt->vm.total) {
- DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->vm.total);
+ drm_err(&i915->drm,
+ "mappable aperture extends past end of GGTT,"
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->vm.total);
ggtt->mappable_end = ggtt->vm.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("DSM size = %lluM\n",
- (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+ drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
+ drm_dbg(&i915->drm, "GMADR size = %lluM\n",
+ (u64)ggtt->mappable_end >> 20);
+ drm_dbg(&i915->drm, "DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
return 0;
}
@@ -1137,7 +1130,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
return ret;
if (intel_vtd_active())
- dev_info(i915->drm.dev, "VT-d active for gfx access\n");
+ drm_info(&i915->drm, "VT-d active for gfx access\n");
return 0;
}
@@ -1212,6 +1205,8 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
if (INTEL_GEN(ggtt->vm.i915) >= 8)
setup_private_pat(ggtt->vm.gt->uncore);
+
+ intel_ggtt_restore_fences(ggtt);
}
static struct scatterlist *
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index d152b648c73c..7fb36b12fe7a 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -68,8 +68,7 @@ static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
return fence->ggtt->vm.gt->uncore;
}
-static void i965_write_fence_reg(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void i965_write_fence_reg(struct i915_fence_reg *fence)
{
i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift;
@@ -87,18 +86,16 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
val = 0;
- if (vma) {
- unsigned int stride = i915_gem_object_get_stride(vma->obj);
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
- GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
- GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
GEM_BUG_ON(!IS_ALIGNED(stride, 128));
- val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
- val |= vma->node.start;
+ val = fence->start + fence->size - I965_FENCE_PAGE;
+ val <<= 32;
+ val |= fence->start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
- if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
+ if (fence->tiling == I915_TILING_Y)
val |= BIT(I965_FENCE_TILING_Y_SHIFT);
val |= I965_FENCE_REG_VALID;
}
@@ -125,21 +122,15 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
}
-static void i915_write_fence_reg(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void i915_write_fence_reg(struct i915_fence_reg *fence)
{
u32 val;
val = 0;
- if (vma) {
- unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
+ unsigned int tiling = fence->tiling;
bool is_y_tiled = tiling == I915_TILING_Y;
- unsigned int stride = i915_gem_object_get_stride(vma->obj);
-
- GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
- GEM_BUG_ON(!is_power_of_2(vma->fence_size));
- GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
stride /= 128;
@@ -147,10 +138,10 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
stride /= 512;
GEM_BUG_ON(!is_power_of_2(stride));
- val = vma->node.start;
+ val = fence->start;
if (is_y_tiled)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I915_FENCE_SIZE_BITS(vma->fence_size);
+ val |= I915_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
@@ -165,25 +156,18 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
}
}
-static void i830_write_fence_reg(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void i830_write_fence_reg(struct i915_fence_reg *fence)
{
u32 val;
val = 0;
- if (vma) {
- unsigned int stride = i915_gem_object_get_stride(vma->obj);
-
- GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
- GEM_BUG_ON(!is_power_of_2(vma->fence_size));
- GEM_BUG_ON(!is_power_of_2(stride / 128));
- GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
- val = vma->node.start;
- if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
+ val = fence->start;
+ if (fence->tiling == I915_TILING_Y)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I830_FENCE_SIZE_BITS(vma->fence_size);
+ val |= I830_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}
@@ -197,8 +181,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
}
}
-static void fence_write(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void fence_write(struct i915_fence_reg *fence)
{
struct drm_i915_private *i915 = fence_to_i915(fence);
@@ -209,18 +192,21 @@ static void fence_write(struct i915_fence_reg *fence,
*/
if (IS_GEN(i915, 2))
- i830_write_fence_reg(fence, vma);
+ i830_write_fence_reg(fence);
else if (IS_GEN(i915, 3))
- i915_write_fence_reg(fence, vma);
+ i915_write_fence_reg(fence);
else
- i965_write_fence_reg(fence, vma);
+ i965_write_fence_reg(fence);
/*
* Access through the fenced region afterwards is
* ordered by the posting reads whilst writing the registers.
*/
+}
- fence->dirty = false;
+static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
+{
+ return INTEL_GEN(fence_to_i915(fence)) < 4;
}
static int fence_update(struct i915_fence_reg *fence,
@@ -232,27 +218,32 @@ static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *old;
int ret;
+ fence->tiling = 0;
if (vma) {
+ GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
+ !i915_gem_object_get_tiling(vma->obj));
+
if (!i915_vma_is_map_and_fenceable(vma))
return -EINVAL;
- if (drm_WARN(&uncore->i915->drm,
- !i915_gem_object_get_stride(vma->obj) ||
- !i915_gem_object_get_tiling(vma->obj),
- "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
- i915_gem_object_get_stride(vma->obj),
- i915_gem_object_get_tiling(vma->obj)))
- return -EINVAL;
+ if (gpu_uses_fence_registers(fence)) {
+ /* implicit 'unfenced' GPU blits */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+ }
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
+ fence->start = vma->node.start;
+ fence->size = vma->fence_size;
+ fence->stride = i915_gem_object_get_stride(vma->obj);
+ fence->tiling = i915_gem_object_get_tiling(vma->obj);
}
+ WRITE_ONCE(fence->dirty, false);
old = xchg(&fence->vma, NULL);
if (old) {
/* XXX Ideally we would move the waiting to outside the mutex */
- ret = i915_vma_sync(old);
+ ret = i915_active_wait(&fence->active);
if (ret) {
fence->vma = old;
return ret;
@@ -276,7 +267,7 @@ static int fence_update(struct i915_fence_reg *fence,
/*
* We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
- * to the runtime resume, see i915_gem_restore_fences().
+ * to the runtime resume, see intel_ggtt_restore_fences().
*
* This only works for removing the fence register, on acquisition
* the caller must hold the rpm wakeref. The fence register must
@@ -290,7 +281,7 @@ static int fence_update(struct i915_fence_reg *fence,
}
WRITE_ONCE(fence->vma, vma);
- fence_write(fence, vma);
+ fence_write(fence);
if (vma) {
vma->fence = fence;
@@ -307,23 +298,26 @@ static int fence_update(struct i915_fence_reg *fence,
*
* This function force-removes any fence from the given object, which is useful
* if the kernel wants to do untiled GTT access.
- *
- * Returns:
- *
- * 0 on success, negative error code on failure.
*/
-int i915_vma_revoke_fence(struct i915_vma *vma)
+void i915_vma_revoke_fence(struct i915_vma *vma)
{
struct i915_fence_reg *fence = vma->fence;
+ intel_wakeref_t wakeref;
lockdep_assert_held(&vma->vm->mutex);
if (!fence)
- return 0;
+ return;
- if (atomic_read(&fence->pin_count))
- return -EBUSY;
+ GEM_BUG_ON(fence->vma != vma);
+ GEM_BUG_ON(!i915_active_is_idle(&fence->active));
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
- return fence_update(fence, NULL);
+ fence->tiling = 0;
+ WRITE_ONCE(fence->vma, NULL);
+ vma->fence = NULL;
+
+ with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
+ fence_write(fence);
}
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
@@ -487,34 +481,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence)
}
/**
- * i915_gem_restore_fences - restore fence state
+ * intel_ggtt_restore_fences - restore fence state
* @ggtt: Global GTT
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct i915_ggtt *ggtt)
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
{
int i;
- rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < ggtt->num_fences; i++) {
- struct i915_fence_reg *reg = &ggtt->fence_regs[i];
- struct i915_vma *vma = READ_ONCE(reg->vma);
-
- GEM_BUG_ON(vma && vma->fence != reg);
-
- /*
- * Commit delayed tiling changes if we have an object still
- * attached to the fence, otherwise just clear the fence.
- */
- if (vma && !i915_gem_object_is_tiled(vma->obj))
- vma = NULL;
-
- fence_write(reg, vma);
- }
- rcu_read_unlock();
+ for (i = 0; i < ggtt->num_fences; i++)
+ fence_write(&ggtt->fence_regs[i]);
}
/**
@@ -746,7 +725,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static void i915_gem_swizzle_page(struct page *page)
+static void swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
@@ -791,7 +770,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(page);
+ swizzle_page(page);
set_page_dirty(page);
}
i++;
@@ -836,7 +815,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
}
}
-void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
@@ -864,18 +843,37 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
if (intel_vgpu_active(i915))
num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
+ ggtt->fence_regs = kcalloc(num_fences,
+ sizeof(*ggtt->fence_regs),
+ GFP_KERNEL);
+ if (!ggtt->fence_regs)
+ num_fences = 0;
/* Initialize fence registers to zero */
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+ i915_active_init(&fence->active, NULL, NULL);
fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
}
ggtt->num_fences = num_fences;
- i915_gem_restore_fences(ggtt);
+ intel_ggtt_restore_fences(ggtt);
+}
+
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
+{
+ int i;
+
+ for (i = 0; i < ggtt->num_fences; i++) {
+ struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+ i915_active_fini(&fence->active);
+ }
+
+ kfree(ggtt->fence_regs);
}
void intel_gt_init_swizzling(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
index 7bd521cd7cd7..9eef679e1311 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
@@ -22,12 +22,14 @@
*
*/
-#ifndef __I915_FENCE_REG_H__
-#define __I915_FENCE_REG_H__
+#ifndef __INTEL_GGTT_FENCING_H__
+#define __INTEL_GGTT_FENCING_H__
#include <linux/list.h>
#include <linux/types.h>
+#include "i915_active.h"
+
struct drm_i915_gem_object;
struct i915_ggtt;
struct i915_vma;
@@ -41,6 +43,7 @@ struct i915_fence_reg {
struct i915_ggtt *ggtt;
struct i915_vma *vma;
atomic_t pin_count;
+ struct i915_active active;
int id;
/**
* Whether the tiling parameters for the currently
@@ -51,20 +54,24 @@ struct i915_fence_reg {
* command (such as BLT on gen2/3), as a "fence".
*/
bool dirty;
+ u32 start;
+ u32 size;
+ u32 tiling;
+ u32 stride;
};
-/* i915_gem_fence_reg.c */
struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
void i915_unreserve_fence(struct i915_fence_reg *fence);
-void i915_gem_restore_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
-void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt);
void intel_gt_init_swizzling(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index f04214a54f75..534e435f20bc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -138,7 +138,7 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
-#define MI_LRI_CS_MMIO (1<<19)
+#define MI_LRI_LRM_CS_MMIO REG_BIT(19)
#define MI_LRI_FORCE_POSTED (1<<12)
#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
@@ -156,6 +156,7 @@
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1)
+#define MI_LRR_SOURCE_CS_MMIO REG_BIT(18)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -235,9 +236,8 @@
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
-#define PIPE_CONTROL_L3_RO_CACHE_INVALIDATE REG_BIT(10) /* gen12 */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
-#define PIPE_CONTROL_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */
+#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d09f7596cb98..f069551e412f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -7,6 +7,8 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gt.h"
+#include "intel_gt_buffer_pool.h"
+#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_mocs.h"
@@ -15,6 +17,7 @@
#include "intel_rps.h"
#include "intel_uncore.h"
#include "intel_pm.h"
+#include "shmem_utils.h"
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
@@ -26,6 +29,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
+ intel_gt_init_buffer_pool(gt);
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
@@ -370,18 +374,6 @@ static struct i915_address_space *kernel_vm(struct intel_gt *gt)
return i915_vm_get(&gt->ggtt->vm);
}
-static int __intel_context_flush_retire(struct intel_context *ce)
-{
- struct intel_timeline *tl;
-
- tl = intel_context_timeline_lock(ce);
- if (IS_ERR(tl))
- return PTR_ERR(tl);
-
- intel_context_timeline_unlock(tl);
- return 0;
-}
-
static int __engines_record_defaults(struct intel_gt *gt)
{
struct i915_request *requests[I915_NUM_ENGINES] = {};
@@ -447,8 +439,7 @@ err_rq:
for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct i915_request *rq;
- struct i915_vma *state;
- void *vaddr;
+ struct file *state;
rq = requests[id];
if (!rq)
@@ -460,48 +451,16 @@ err_rq:
}
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
- state = rq->context->state;
- if (!state)
+ if (!rq->context->state)
continue;
- /* Serialise with retirement on another CPU */
- GEM_BUG_ON(!i915_request_completed(rq));
- err = __intel_context_flush_retire(rq->context);
- if (err)
- goto out;
-
- /* We want to be able to unbind the state from the GGTT */
- GEM_BUG_ON(intel_context_is_pinned(rq->context));
-
- /*
- * As we will hold a reference to the logical state, it will
- * not be torn down with the context, and importantly the
- * object will hold onto its vma (making it possible for a
- * stray GTT write to corrupt our defaults). Unmap the vma
- * from the GTT to prevent such accidents and reclaim the
- * space.
- */
- err = i915_vma_unbind(state);
- if (err)
- goto out;
-
- i915_gem_object_lock(state->obj);
- err = i915_gem_object_set_to_cpu_domain(state->obj, false);
- i915_gem_object_unlock(state->obj);
- if (err)
- goto out;
-
- i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
-
- /* Check we can acquire the image of the context state */
- vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
+ /* Keep a copy of the state's backing pages; free the obj */
+ state = shmem_create_from_object(rq->context->state->obj);
+ if (IS_ERR(state)) {
+ err = PTR_ERR(state);
goto out;
}
-
- rq->engine->default_state = i915_gem_object_get(state->obj);
- i915_gem_object_unpin_map(state->obj);
+ rq->engine->default_state = state;
}
out:
@@ -576,6 +535,8 @@ int intel_gt_init(struct intel_gt *gt)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ intel_gt_init_clock_frequency(gt);
+
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
if (err)
goto out_fw;
@@ -635,8 +596,7 @@ void intel_gt_driver_remove(struct intel_gt *gt)
{
__intel_gt_disable(gt);
- intel_uc_fini_hw(&gt->uc);
- intel_uc_fini(&gt->uc);
+ intel_uc_driver_remove(&gt->uc);
intel_engines_release(gt);
}
@@ -663,6 +623,7 @@ void intel_gt_driver_release(struct intel_gt *gt)
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
+ intel_gt_fini_buffer_pool(gt);
}
void intel_gt_driver_late_release(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 397186818305..1495054a4305 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
@@ -8,15 +7,15 @@
#include "i915_drv.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
+#include "intel_gt_buffer_pool.h"
-static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
+static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
{
- return container_of(pool, struct intel_engine_cs, pool);
+ return container_of(pool, struct intel_gt, buffer_pool);
}
static struct list_head *
-bucket_for_size(struct intel_engine_pool *pool, size_t sz)
+bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
{
int n;
@@ -32,16 +31,50 @@ bucket_for_size(struct intel_engine_pool *pool, size_t sz)
return &pool->cache_list[n];
}
-static void node_free(struct intel_engine_pool_node *node)
+static void node_free(struct intel_gt_buffer_pool_node *node)
{
i915_gem_object_put(node->obj);
i915_active_fini(&node->active);
kfree(node);
}
+static void pool_free_work(struct work_struct *wrk)
+{
+ struct intel_gt_buffer_pool *pool =
+ container_of(wrk, typeof(*pool), work.work);
+ struct intel_gt_buffer_pool_node *node, *next;
+ unsigned long old = jiffies - HZ;
+ bool active = false;
+ LIST_HEAD(stale);
+ int n;
+
+ /* Free buffers that have not been used in the past second */
+ spin_lock_irq(&pool->lock);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct list_head *list = &pool->cache_list[n];
+
+ /* Most recent at head; oldest at tail */
+ list_for_each_entry_safe_reverse(node, next, list, link) {
+ if (time_before(node->age, old))
+ break;
+
+ list_move(&node->link, &stale);
+ }
+ active |= !list_empty(list);
+ }
+ spin_unlock_irq(&pool->lock);
+
+ list_for_each_entry_safe(node, next, &stale, link)
+ node_free(node);
+
+ if (active)
+ schedule_delayed_work(&pool->work,
+ round_jiffies_up_relative(HZ));
+}
+
static int pool_active(struct i915_active *ref)
{
- struct intel_engine_pool_node *node =
+ struct intel_gt_buffer_pool_node *node =
container_of(ref, typeof(*node), active);
struct dma_resv *resv = node->obj->base.resv;
int err;
@@ -64,29 +97,31 @@ static int pool_active(struct i915_active *ref)
__i915_active_call
static void pool_retire(struct i915_active *ref)
{
- struct intel_engine_pool_node *node =
+ struct intel_gt_buffer_pool_node *node =
container_of(ref, typeof(*node), active);
- struct intel_engine_pool *pool = node->pool;
+ struct intel_gt_buffer_pool *pool = node->pool;
struct list_head *list = bucket_for_size(pool, node->obj->base.size);
unsigned long flags;
- GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
-
i915_gem_object_unpin_pages(node->obj);
/* Return this object to the shrinker pool */
i915_gem_object_make_purgeable(node->obj);
spin_lock_irqsave(&pool->lock, flags);
+ node->age = jiffies;
list_add(&node->link, list);
spin_unlock_irqrestore(&pool->lock, flags);
+
+ schedule_delayed_work(&pool->work,
+ round_jiffies_up_relative(HZ));
}
-static struct intel_engine_pool_node *
-node_create(struct intel_engine_pool *pool, size_t sz)
+static struct intel_gt_buffer_pool_node *
+node_create(struct intel_gt_buffer_pool *pool, size_t sz)
{
- struct intel_engine_cs *engine = to_engine(pool);
- struct intel_engine_pool_node *node;
+ struct intel_gt *gt = to_gt(pool);
+ struct intel_gt_buffer_pool_node *node;
struct drm_i915_gem_object *obj;
node = kmalloc(sizeof(*node),
@@ -97,7 +132,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
node->pool = pool;
i915_active_init(&node->active, pool_active, pool_retire);
- obj = i915_gem_object_create_internal(engine->i915, sz);
+ obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj)) {
i915_active_fini(&node->active);
kfree(node);
@@ -110,26 +145,15 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return node;
}
-static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
{
- if (intel_engine_is_virtual(engine))
- engine = intel_virtual_engine_get_sibling(engine, 0);
-
- GEM_BUG_ON(!engine);
- return &engine->pool;
-}
-
-struct intel_engine_pool_node *
-intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
-{
- struct intel_engine_pool *pool = lookup_pool(engine);
- struct intel_engine_pool_node *node;
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+ struct intel_gt_buffer_pool_node *node;
struct list_head *list;
unsigned long flags;
int ret;
- GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
-
size = PAGE_ALIGN(size);
list = bucket_for_size(pool, size);
@@ -157,34 +181,48 @@ intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
return node;
}
-void intel_engine_pool_init(struct intel_engine_pool *pool)
+void intel_gt_init_buffer_pool(struct intel_gt *gt)
{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
int n;
spin_lock_init(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
+ INIT_DELAYED_WORK(&pool->work, pool_free_work);
}
-void intel_engine_pool_park(struct intel_engine_pool *pool)
+static void pool_free_imm(struct intel_gt_buffer_pool *pool)
{
int n;
+ spin_lock_irq(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct intel_gt_buffer_pool_node *node, *next;
struct list_head *list = &pool->cache_list[n];
- struct intel_engine_pool_node *node, *nn;
- list_for_each_entry_safe(node, nn, list, link)
+ list_for_each_entry_safe(node, next, list, link)
node_free(node);
-
INIT_LIST_HEAD(list);
}
+ spin_unlock_irq(&pool->lock);
+}
+
+void intel_gt_flush_buffer_pool(struct intel_gt *gt)
+{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+
+ if (cancel_delayed_work_sync(&pool->work))
+ pool_free_imm(pool);
}
-void intel_engine_pool_fini(struct intel_engine_pool *pool)
+void intel_gt_fini_buffer_pool(struct intel_gt *gt)
{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
int n;
+ intel_gt_flush_buffer_pool(gt);
+
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
new file mode 100644
index 000000000000..42cbac003e8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_GT_BUFFER_POOL_H
+#define INTEL_GT_BUFFER_POOL_H
+
+#include <linux/types.h>
+
+#include "i915_active.h"
+#include "intel_gt_buffer_pool_types.h"
+
+struct intel_gt;
+struct i915_request;
+
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+
+static inline int
+intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
+ struct i915_request *rq)
+{
+ return i915_active_add_request(&node->active, rq);
+}
+
+static inline void
+intel_gt_buffer_pool_put(struct intel_gt_buffer_pool_node *node)
+{
+ i915_active_release(&node->active);
+}
+
+void intel_gt_init_buffer_pool(struct intel_gt *gt);
+void intel_gt_flush_buffer_pool(struct intel_gt *gt);
+void intel_gt_fini_buffer_pool(struct intel_gt *gt);
+
+#endif /* INTEL_GT_BUFFER_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index e31ee361b76f..e28bdda771ed 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -4,26 +4,29 @@
* Copyright © 2014-2018 Intel Corporation
*/
-#ifndef INTEL_ENGINE_POOL_TYPES_H
-#define INTEL_ENGINE_POOL_TYPES_H
+#ifndef INTEL_GT_BUFFER_POOL_TYPES_H
+#define INTEL_GT_BUFFER_POOL_TYPES_H
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include "i915_active_types.h"
struct drm_i915_gem_object;
-struct intel_engine_pool {
+struct intel_gt_buffer_pool {
spinlock_t lock;
struct list_head cache_list[4];
+ struct delayed_work work;
};
-struct intel_engine_pool_node {
+struct intel_gt_buffer_pool_node {
struct i915_active active;
struct drm_i915_gem_object *obj;
struct list_head link;
- struct intel_engine_pool *pool;
+ struct intel_gt_buffer_pool *pool;
+ unsigned long age;
};
-#endif /* INTEL_ENGINE_POOL_TYPES_H */
+#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
new file mode 100644
index 000000000000..999079686846
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
+
+#define MHZ_12 12000000 /* 12MHz (24MHz/2), 83.333ns */
+#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */
+#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */
+
+static u32 read_clock_frequency(const struct intel_gt *gt)
+{
+ if (INTEL_GEN(gt->i915) >= 11) {
+ u32 config;
+
+ config = intel_uncore_read(gt->uncore, RPM_CONFIG0);
+ config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
+ config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+ switch (config) {
+ case 0: return MHZ_12;
+ case 1:
+ case 2: return MHZ_19_2;
+ default:
+ case 3: return MHZ_12_5;
+ }
+ } else if (INTEL_GEN(gt->i915) >= 9) {
+ if (IS_GEN9_LP(gt->i915))
+ return MHZ_19_2;
+ else
+ return MHZ_12;
+ } else {
+ return MHZ_12_5;
+ }
+}
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt)
+{
+ /*
+ * Note that on gen11+, the clock frequency may be reconfigured.
+ * We do not, and we assume nobody else does.
+ */
+ gt->clock_frequency = read_clock_frequency(gt);
+ GT_TRACE(gt,
+ "Using clock frequency: %dkHz\n",
+ gt->clock_frequency / 1000);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_gt_check_clock_frequency(const struct intel_gt *gt)
+{
+ if (gt->clock_frequency != read_clock_frequency(gt)) {
+ dev_err(gt->i915->drm.dev,
+ "GT clock frequency changed, was %uHz, now %uHz!\n",
+ gt->clock_frequency,
+ read_clock_frequency(gt));
+ }
+}
+#endif
+
+static u64 div_u64_roundup(u64 nom, u32 den)
+{
+ return div_u64(nom + den - 1, den);
+}
+
+u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count)
+{
+ return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000),
+ gt->clock_frequency);
+}
+
+u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count)
+{
+ return intel_gt_clock_interval_to_ns(gt, 16 * count);
+}
+
+u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns)
+{
+ return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns),
+ 1000 * 1000 * 1000);
+}
+
+u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
+{
+ u32 val;
+
+ /*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+ val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16);
+ if (IS_GEN(gt->i915, 6))
+ val = roundup(val, 25);
+
+ return val;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
new file mode 100644
index 000000000000..f793c89f2cbd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CLOCK_UTILS_H__
+#define __INTEL_GT_CLOCK_UTILS_H__
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_gt_check_clock_frequency(const struct intel_gt *gt);
+#else
+static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
+#endif
+
+u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count);
+u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count);
+
+u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns);
+u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns);
+
+#endif /* __INTEL_GT_CLOCK_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f0e7fd95165a..0cc7dd54f4f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -39,6 +39,15 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
}
}
+ if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+ WRITE_ONCE(engine->execlists.yield,
+ ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+ ENGINE_TRACE(engine, "semaphore yield: %08x\n",
+ engine->execlists.yield);
+ if (del_timer(&engine->execlists.timer))
+ tasklet = true;
+ }
+
if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
tasklet = true;
@@ -228,7 +237,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
const u32 irqs =
GT_CS_MASTER_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT |
- GT_CONTEXT_SWITCH_INTERRUPT;
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
struct intel_uncore *uncore = gt->uncore;
const u32 dmask = irqs << 16 | irqs;
const u32 smask = irqs << 16;
@@ -366,7 +376,8 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
const u32 irqs =
GT_CS_MASTER_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT |
- GT_CONTEXT_SWITCH_INTERRUPT;
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
const u32 gt_interrupts[] = {
irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 8b653c0f5e5f..6bdb434a442d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -12,6 +12,7 @@
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_llc.h"
@@ -138,6 +139,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ intel_gt_check_clock_frequency(gt);
+
/*
* As we have just resumed the machine and woken the device up from
* deep PCI sleep (presumably D3_cold), assume the HW has been reset
@@ -155,6 +158,10 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
intel_uc_reset_prepare(&gt->uc);
+ for_each_engine(engine, gt, id)
+ if (engine->sanitize)
+ engine->sanitize(engine);
+
if (reset_engines(gt) || force) {
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, false);
@@ -164,6 +171,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
if (engine->reset.finish)
engine->reset.finish(engine);
+ intel_rps_sanitize(&gt->rps);
+
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
@@ -191,11 +200,12 @@ int intel_gt_resume(struct intel_gt *gt)
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
+ gt_sanitize(gt, true);
+
intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
- gt_sanitize(gt, true);
if (intel_gt_is_wedged(gt)) {
err = -EIO;
goto out_fw;
@@ -204,7 +214,7 @@ int intel_gt_resume(struct intel_gt *gt)
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
- dev_err(gt->i915->drm.dev,
+ drm_err(&gt->i915->drm,
"Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}
@@ -220,7 +230,7 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_put(engine);
if (err) {
- dev_err(gt->i915->drm.dev,
+ drm_err(&gt->i915->drm,
"Failed to restart %s (%d)\n",
engine->name, err);
goto err_wedged;
@@ -324,6 +334,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
{
GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
+ intel_ggtt_restore_fences(gt->ggtt);
return intel_uc_runtime_resume(&gt->uc);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 24c99d0838af..16ff47c83bd5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -26,6 +26,11 @@ static bool retire_requests(struct intel_timeline *tl)
return !i915_active_fence_isset(&tl->last_request);
}
+static bool engine_active(const struct intel_engine_cs *engine)
+{
+ return !list_empty(&engine->kernel_context->timeline->requests);
+}
+
static bool flush_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
@@ -37,8 +42,13 @@ static bool flush_submission(struct intel_gt *gt)
for_each_engine(engine, gt, id) {
intel_engine_flush_submission(engine);
- active |= flush_work(&engine->retire_work);
- active |= flush_work(&engine->wakeref.work);
+
+ /* Flush the background retirement and idle barriers */
+ flush_work(&engine->retire_work);
+ flush_delayed_work(&engine->wakeref.work);
+
+ /* Is the idle barrier still outstanding? */
+ active |= engine_active(engine);
}
return active;
@@ -162,7 +172,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
}
}
- if (!retire_requests(tl) || flush_submission(gt))
+ if (!retire_requests(tl))
active_count++;
mutex_unlock(&tl->mutex);
@@ -173,7 +183,6 @@ out_active: spin_lock(&timelines->lock);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
-
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(atomic_read(&tl->active_count));
@@ -185,6 +194,9 @@ out_active: spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
+ if (flush_submission(gt)) /* Wait, there's more! */
+ active_count++;
+
return active_count ? timeout : 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 96890dd12b5f..0cc1d6b185dc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -17,6 +17,7 @@
#include "i915_vma.h"
#include "intel_engine_types.h"
+#include "intel_gt_buffer_pool_types.h"
#include "intel_llc_types.h"
#include "intel_reset_types.h"
#include "intel_rc6_types.h"
@@ -61,6 +62,7 @@ struct intel_gt {
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
+ ktime_t last_init_time;
struct intel_reset reset;
/**
@@ -72,14 +74,12 @@ struct intel_gt {
*/
intel_wakeref_t awake;
+ u32 clock_frequency;
+
struct intel_llc llc;
struct intel_rc6 rc6;
struct intel_rps rps;
- ktime_t last_init_time;
-
- struct i915_vma *scratch;
-
spinlock_t irq_lock;
u32 gt_imr;
u32 pm_ier;
@@ -97,6 +97,18 @@ struct intel_gt {
* Reserved for exclusive use by the kernel.
*/
struct i915_address_space *vm;
+
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ *
+ * Buffers older than 1s are periodically reaped from the pool,
+ * or may be reclaimed by the shrinker before then.
+ */
+ struct intel_gt_buffer_pool buffer_pool;
+
+ struct i915_vma *scratch;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index b3116fe8d180..d93ebdf3fa0e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -26,7 +26,6 @@
#include <drm/drm_mm.h>
#include "gt/intel_reset.h"
-#include "i915_gem_fence_reg.h"
#include "i915_selftest.h"
#include "i915_vma_types.h"
@@ -135,6 +134,8 @@ typedef u64 gen8_pte_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
+struct i915_fence_reg;
+
#define for_each_sgt_daddr(__dp, __iter, __sgt) \
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
@@ -333,7 +334,7 @@ struct i915_ggtt {
u32 pin_bias;
unsigned int num_fences;
- struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ struct i915_fence_reg *fence_regs;
struct list_head fence_list;
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 683014e7bc51..87e6c5bdd2dc 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -147,6 +147,7 @@
#include "intel_reset.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
+#include "shmem_utils.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
@@ -216,7 +217,7 @@ struct virtual_engine {
/* And finally, which physical engines this virtual engine maps onto. */
unsigned int num_siblings;
- struct intel_engine_cs *siblings[0];
+ struct intel_engine_cs *siblings[];
};
static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
@@ -238,6 +239,123 @@ __execlists_update_reg_state(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 head);
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
+}
+
+static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x74;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x68;
+ else if (engine->class == RENDER_CLASS)
+ return 0xd8;
+ else
+ return -1;
+}
+
+static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x12;
+ else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+ return 0x18;
+ else
+ return -1;
+}
+
+static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
+{
+ int x;
+
+ x = lrc_ring_wa_bb_per_ctx(engine);
+ if (x < 0)
+ return x;
+
+ return x + 2;
+}
+
+static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
+{
+ int x;
+
+ x = lrc_ring_indirect_ptr(engine);
+ if (x < 0)
+ return x;
+
+ return x + 2;
+}
+
+static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
+{
+ if (engine->class != RENDER_CLASS)
+ return -1;
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0xb6;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return 0xaa;
+ else
+ return -1;
+}
+
+static u32
+lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ default:
+ MISSING_CASE(INTEL_GEN(engine->i915));
+ fallthrough;
+ case 12:
+ return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 11:
+ return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 10:
+ return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 9:
+ return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 8:
+ return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ }
+}
+
+static void
+lrc_ring_setup_indirect_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr,
+ u32 size)
+{
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
+ GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
+ regs[lrc_ring_indirect_ptr(engine) + 1] =
+ ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
+
+ GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
+ regs[lrc_ring_indirect_offset(engine) + 1] =
+ lrc_ring_indirect_offset_default(engine) << 6;
+}
+
+static u32 intel_context_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
static void mark_eio(struct i915_request *rq)
{
if (i915_request_completed(rq))
@@ -311,18 +429,7 @@ static int effective_prio(const struct i915_request *rq)
if (i915_request_has_nopreempt(rq))
prio = I915_PRIORITY_UNPREEMPTABLE;
- /*
- * On unwinding the active request, we give it a priority bump
- * if it has completed waiting on any semaphore. If we know that
- * the request has already started, we can prevent an unwanted
- * preempt-to-idle cycle by taking that into account now.
- */
- if (__i915_request_has_started(rq))
- prio |= I915_PRIORITY_NOSEMAPHORE;
-
- /* Restrict mere WAIT boosts from triggering preemption */
- BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
- return prio | __NO_PREEMPTION;
+ return prio;
}
static int queue_prio(const struct intel_engine_execlists *execlists)
@@ -456,10 +563,10 @@ assert_priority_queue(const struct i915_request *prev,
* engine info, SW context ID and SW counter need to form a unique number
* (Context ID) per lrc.
*/
-static u64
+static u32
lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
- u64 desc;
+ u32 desc;
desc = INTEL_LEGACY_32B_CONTEXT;
if (i915_vm_is_4lvl(ce->vm))
@@ -470,21 +577,7 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
if (IS_GEN(engine->i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
- desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */
- /*
- * The following 32bits are copied into the OA reports (dword 2).
- * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
- * anything below.
- */
- if (INTEL_GEN(engine->i915) >= 11) {
- desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
- /* bits 48-53 */
-
- desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
- /* bits 61-63 */
- }
-
- return desc;
+ return i915_ggtt_offset(ce->state) | desc;
}
static inline unsigned int dword_in_page(void *addr)
@@ -503,7 +596,7 @@ static void set_offsets(u32 *regs,
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END(x) 0, (x)
+#define END(total_state_size) 0, (total_state_size)
{
const u32 base = engine->mmio_base;
@@ -526,7 +619,7 @@ static void set_offsets(u32 *regs,
if (flags & POSTED)
*regs |= MI_LRI_FORCE_POSTED;
if (INTEL_GEN(engine->i915) >= 11)
- *regs |= MI_LRI_CS_MMIO;
+ *regs |= MI_LRI_LRM_CS_MMIO;
regs++;
GEM_BUG_ON(!count);
@@ -911,8 +1004,63 @@ static const u8 gen12_rcs_offsets[] = {
NOP(6),
LRI(1, 0),
REG(0x0c8),
+ NOP(3 + 9 + 1),
+
+ LRI(51, POSTED),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG(0x028),
+ REG(0x09c),
+ REG(0x0c0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x068),
+ REG(0x084),
+ NOP(1),
- END(80)
+ END(192)
};
#undef END
@@ -1040,17 +1188,14 @@ static void intel_engine_context_in(struct intel_engine_cs *engine)
{
unsigned long flags;
- if (READ_ONCE(engine->stats.enabled) == 0)
+ if (atomic_add_unless(&engine->stats.active, 1, 0))
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- if (engine->stats.active++ == 0)
- engine->stats.start = ktime_get();
- GEM_BUG_ON(engine->stats.active == 0);
+ if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
+ engine->stats.start = ktime_get();
+ atomic_inc(&engine->stats.active);
}
-
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
@@ -1058,51 +1203,20 @@ static void intel_engine_context_out(struct intel_engine_cs *engine)
{
unsigned long flags;
- if (READ_ONCE(engine->stats.enabled) == 0)
+ GEM_BUG_ON(!atomic_read(&engine->stats.active));
+
+ if (atomic_add_unless(&engine->stats.active, -1, 1))
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- ktime_t last;
-
- if (engine->stats.active && --engine->stats.active == 0) {
- /*
- * Decrement the active context count and in case GPU
- * is now idle add up to the running total.
- */
- last = ktime_sub(ktime_get(), engine->stats.start);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- } else if (engine->stats.active == 0) {
- /*
- * After turning on engine stats, context out might be
- * the first event in which case we account from the
- * time stats gathering was turned on.
- */
- last = ktime_sub(ktime_get(), engine->stats.enabled_at);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- }
+ if (atomic_dec_and_test(&engine->stats.active)) {
+ engine->stats.total =
+ ktime_add(engine->stats.total,
+ ktime_sub(ktime_get(), engine->stats.start));
}
-
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x60;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x54;
- else if (engine->class == RENDER_CLASS)
- return 0x58;
- else
- return -1;
-}
-
static void
execlists_check_context(const struct intel_context *ce,
const struct intel_engine_cs *engine)
@@ -1146,14 +1260,12 @@ execlists_check_context(const struct intel_context *ce,
static void restore_default_state(struct intel_context *ce,
struct intel_engine_cs *engine)
{
- u32 *regs = ce->lrc_reg_state;
+ u32 *regs;
- if (engine->pinned_default_state)
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
+ regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
+ execlists_init_reg_state(regs, ce, engine, ce->ring, true);
- execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+ ce->runtime.last = intel_context_get_runtime(ce);
}
static void reset_active(struct i915_request *rq,
@@ -1192,18 +1304,7 @@ static void reset_active(struct i915_request *rq,
__execlists_update_reg_state(ce, engine, head);
/* We've switched away, so this should be a no-op, but intent matters */
- ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static u32 intel_context_get_runtime(const struct intel_context *ce)
-{
- /*
- * We can use either ppHWSP[16] which is recorded before the context
- * switch (and so excludes the cost of context switches) or use the
- * value from the context image itself, which is saved/restored earlier
- * and so includes the cost of the save.
- */
- return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+ ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
}
static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
@@ -1251,18 +1352,23 @@ __execlists_schedule_in(struct i915_request *rq)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
execlists_check_context(ce, engine);
- ce->lrc_desc &= ~GENMASK_ULL(47, 37);
if (ce->tag) {
/* Use a fixed tag for OA and friends */
- ce->lrc_desc |= (u64)ce->tag << 32;
+ GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
+ ce->lrc.ccid = ce->tag;
} else {
/* We don't need a strict matching tag, just different values */
- ce->lrc_desc |=
- (u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
- GEN11_SW_CTX_ID_SHIFT;
- BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
+ unsigned int tag = ffs(READ_ONCE(engine->context_tag));
+
+ GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
+ clear_bit(tag - 1, &engine->context_tag);
+ ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32);
+
+ BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
}
+ ce->lrc.ccid |= engine->execlists.ccid;
+
__intel_gt_pm_get(engine->gt);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@@ -1302,7 +1408,8 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
static inline void
__execlists_schedule_out(struct i915_request *rq,
- struct intel_engine_cs * const engine)
+ struct intel_engine_cs * const engine,
+ unsigned int ccid)
{
struct intel_context * const ce = rq->context;
@@ -1320,6 +1427,14 @@ __execlists_schedule_out(struct i915_request *rq,
i915_request_completed(rq))
intel_engine_add_retire(engine, ce->timeline);
+ ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+ ccid &= GEN12_MAX_CONTEXT_HW_ID;
+ if (ccid < BITS_PER_LONG) {
+ GEM_BUG_ON(ccid == 0);
+ GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
+ set_bit(ccid - 1, &engine->context_tag);
+ }
+
intel_context_update_runtime(ce);
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
@@ -1345,15 +1460,17 @@ execlists_schedule_out(struct i915_request *rq)
{
struct intel_context * const ce = rq->context;
struct intel_engine_cs *cur, *old;
+ u32 ccid;
trace_i915_request_out(rq);
+ ccid = rq->context->lrc.ccid;
old = READ_ONCE(ce->inflight);
do
cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
while (!try_cmpxchg(&ce->inflight, &old, cur));
if (!cur)
- __execlists_schedule_out(rq, old);
+ __execlists_schedule_out(rq, old, ccid);
i915_request_put(rq);
}
@@ -1361,7 +1478,7 @@ execlists_schedule_out(struct i915_request *rq)
static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
- u64 desc = ce->lrc_desc;
+ u64 desc = ce->lrc.desc;
u32 tail, prev;
/*
@@ -1400,7 +1517,7 @@ static u64 execlists_update_context(struct i915_request *rq)
*/
wmb();
- ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+ ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
return desc;
}
@@ -1415,6 +1532,24 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc
}
}
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+ if (!rq)
+ return "";
+
+ snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
+ prefix,
+ rq->context->lrc.ccid,
+ rq->fence.context, rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ rq_prio(rq));
+
+ return buf;
+}
+
static __maybe_unused void
trace_ports(const struct intel_engine_execlists *execlists,
const char *msg,
@@ -1422,18 +1557,14 @@ trace_ports(const struct intel_engine_execlists *execlists,
{
const struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
+ char __maybe_unused p0[40], p1[40];
if (!ports[0])
return;
- ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
- ports[0]->fence.context,
- ports[0]->fence.seqno,
- i915_request_completed(ports[0]) ? "!" :
- i915_request_started(ports[0]) ? "*" :
- "",
- ports[1] ? ports[1]->fence.context : 0,
- ports[1] ? ports[1]->fence.seqno : 0);
+ ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+ dump_port(p0, sizeof(p0), "", ports[0]),
+ dump_port(p1, sizeof(p1), ", ", ports[1]));
}
static inline bool
@@ -1446,9 +1577,12 @@ static __maybe_unused bool
assert_pending_valid(const struct intel_engine_execlists *execlists,
const char *msg)
{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
struct i915_request * const *port, *rq;
struct intel_context *ce = NULL;
bool sentinel = false;
+ u32 ccid = -1;
trace_ports(execlists, msg, execlists->pending);
@@ -1457,13 +1591,14 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
return true;
if (!execlists->pending[0]) {
- GEM_TRACE_ERR("Nothing pending for promotion!\n");
+ GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
+ engine->name);
return false;
}
if (execlists->pending[execlists_num_ports(execlists)]) {
- GEM_TRACE_ERR("Excess pending[%d] for promotion!\n",
- execlists_num_ports(execlists));
+ GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
+ engine->name, execlists_num_ports(execlists));
return false;
}
@@ -1475,20 +1610,31 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
GEM_BUG_ON(!i915_request_is_active(rq));
if (ce == rq->context) {
- GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
}
ce = rq->context;
+ if (ccid == ce->lrc.ccid) {
+ GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
+ engine->name,
+ ccid, ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ccid = ce->lrc.ccid;
+
/*
* Sentinels are supposed to be lonely so they flush the
* current exection off the HW. Check that they are the
* only request in the pending submission.
*/
if (sentinel) {
- GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
@@ -1496,7 +1642,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
sentinel = i915_request_has_sentinel(rq);
if (sentinel && port != execlists->pending) {
- GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n",
+ GEM_TRACE_ERR("%s: sentinel context:%llx not in prime position[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
@@ -1511,7 +1658,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
if (i915_active_is_idle(&ce->active) &&
!intel_context_is_barrier(ce)) {
- GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
ok = false;
@@ -1519,7 +1667,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
if (!i915_vma_is_pinned(ce->state)) {
- GEM_TRACE_ERR("Unpinned context:%llx in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
ok = false;
@@ -1527,7 +1676,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
if (!i915_vma_is_pinned(ce->ring->vma)) {
- GEM_TRACE_ERR("Unpinned ring:%llx in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
ok = false;
@@ -1662,30 +1812,16 @@ static bool virtual_matches(const struct virtual_engine *ve,
return true;
}
-static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
- struct i915_request *rq)
+static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
{
- struct intel_engine_cs *old = ve->siblings[0];
-
- /* All unattached (rq->engine == old) must already be completed */
-
- spin_lock(&old->breadcrumbs.irq_lock);
- if (!list_empty(&ve->context.signal_link)) {
- list_del_init(&ve->context.signal_link);
-
- /*
- * We cannot acquire the new engine->breadcrumbs.irq_lock
- * (as we are holding a breadcrumbs.irq_lock already),
- * so attach this request to the signaler on submission.
- * The queued irq_work will occur when we finally drop
- * the engine->active.lock after dequeue.
- */
- set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
-
- /* Also transfer the pending irq_work for the old breadcrumb. */
- intel_engine_signal_breadcrumbs(rq->engine);
- }
- spin_unlock(&old->breadcrumbs.irq_lock);
+ /*
+ * All the outstanding signals on ve->siblings[0] must have
+ * been completed, just pending the interrupt handler. As those
+ * signals still refer to the old sibling (via rq->engine), we must
+ * transfer those to the old irq_worker to keep our locking
+ * consistent.
+ */
+ intel_engine_transfer_stale_breadcrumbs(ve->siblings[0], &ve->context);
}
#define for_each_waiter(p__, rq__) \
@@ -1719,12 +1855,16 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
/* Leave semaphores spinning on the other engines */
if (w->engine != rq->engine)
continue;
/* No waiter should start before its signaler */
- GEM_BUG_ON(i915_request_started(w) &&
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+ i915_request_started(w) &&
!i915_request_completed(rq));
GEM_BUG_ON(i915_request_is_active(w));
@@ -1754,7 +1894,8 @@ static void defer_active(struct intel_engine_cs *engine)
}
static bool
-need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+need_timeslice(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
int hint;
@@ -1768,6 +1909,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
return hint >= effective_prio(rq);
}
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ /*
+ * Once bitten, forever smitten!
+ *
+ * If the active context ever busy-waited on a semaphore,
+ * it will be treated as a hog until the end of its timeslice (i.e.
+ * until it is scheduled out and replaced by a new submission,
+ * possibly even its own lite-restore). The HW only sends an interrupt
+ * on the first miss, and we do know if that semaphore has been
+ * signaled, or even if it is now stuck on another semaphore. Play
+ * safe, yield if it might be stuck -- it will be given a fresh
+ * timeslice in the near future.
+ */
+ return rq->context->lrc.ccid == READ_ONCE(el->yield);
+}
+
+static bool
+timeslice_expired(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
static int
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
{
@@ -1783,8 +1950,7 @@ timeslice(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.timeslice_duration_ms);
}
-static unsigned long
-active_timeslice(const struct intel_engine_cs *engine)
+static unsigned long active_timeslice(const struct intel_engine_cs *engine)
{
const struct intel_engine_execlists *execlists = &engine->execlists;
const struct i915_request *rq = *execlists->active;
@@ -1800,16 +1966,25 @@ active_timeslice(const struct intel_engine_cs *engine)
static void set_timeslice(struct intel_engine_cs *engine)
{
+ unsigned long duration;
+
if (!intel_engine_has_timeslices(engine))
return;
- set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
+ duration = active_timeslice(engine);
+ ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+
+ set_timer_ms(&engine->execlists.timer, duration);
}
static void start_timeslice(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- int prio = queue_prio(execlists);
+ const int prio = queue_prio(execlists);
+ unsigned long duration;
+
+ if (!intel_engine_has_timeslices(engine))
+ return;
WRITE_ONCE(execlists->switch_priority_hint, prio);
if (prio == INT_MIN)
@@ -1818,7 +1993,12 @@ static void start_timeslice(struct intel_engine_cs *engine)
if (timer_pending(&execlists->timer))
return;
- set_timer_ms(&execlists->timer, timeslice(engine));
+ duration = timeslice(engine);
+ ENGINE_TRACE(engine,
+ "start timeslicing, prio:%d, interval:%lu",
+ prio, duration);
+
+ set_timer_ms(&execlists->timer, duration);
}
static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1915,11 +2095,26 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of trouble.
*/
active = READ_ONCE(execlists->active);
- while ((last = *active) && i915_request_completed(last))
- active++;
- if (last) {
+ /*
+ * In theory we can skip over completed contexts that have not
+ * yet been processed by events (as those events are in flight):
+ *
+ * while ((last = *active) && i915_request_completed(last))
+ * active++;
+ *
+ * However, the GPU cannot handle this as it will ultimately
+ * find itself trying to jump back into a context it has just
+ * completed and barf.
+ */
+
+ if ((last = *active)) {
if (need_preempt(engine, last, rb)) {
+ if (i915_request_completed(last)) {
+ tasklet_hi_schedule(&execlists->tasklet);
+ return;
+ }
+
ENGINE_TRACE(engine,
"preempting last=%llx:%lld, prio=%d, hint=%d\n",
last->fence.context,
@@ -1946,13 +2141,19 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last = NULL;
} else if (need_timeslice(engine, last) &&
- timer_expired(&engine->execlists.timer)) {
+ timeslice_expired(execlists, last)) {
+ if (i915_request_completed(last)) {
+ tasklet_hi_schedule(&execlists->tasklet);
+ return;
+ }
+
ENGINE_TRACE(engine,
- "expired last=%llx:%lld, prio=%d, hint=%d\n",
+ "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
last->fence.context,
last->fence.seqno,
last->sched.attr.priority,
- execlists->queue_priority_hint);
+ execlists->queue_priority_hint,
+ yesno(timeslice_yield(execlists, last)));
ring_set_paused(engine, 1);
defer_active(engine);
@@ -2055,7 +2256,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
engine);
if (!list_empty(&ve->context.signals))
- virtual_xfer_breadcrumbs(ve, rq);
+ virtual_xfer_breadcrumbs(ve);
/*
* Move the bound engine to the top of the list
@@ -2213,8 +2414,9 @@ done:
}
clear_ports(port + 1, last_port - port);
- execlists_submit_ports(engine);
+ WRITE_ONCE(execlists->yield, -1);
set_preempt_timeout(engine, *active);
+ execlists_submit_ports(engine);
} else {
skip_submit:
ring_set_paused(engine, 0);
@@ -2384,8 +2586,6 @@ static void process_csb(struct intel_engine_cs *engine)
if (promote) {
struct i915_request * const *old = execlists->active;
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
-
ring_set_paused(engine, 0);
/* Point active to the new ELSP; prevent overwriting */
@@ -2398,6 +2598,7 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_schedule_out(*old++);
/* switch pending to inflight */
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
memcpy(execlists->inflight,
execlists->pending,
execlists_num_ports(execlists) *
@@ -2416,17 +2617,21 @@ static void process_csb(struct intel_engine_cs *engine)
* We rely on the hardware being strongly
* ordered, that the breadcrumb write is
* coherent (visible from the CPU) before the
- * user interrupt and CSB is processed.
+ * user interrupt is processed. One might assume
+ * that the breadcrumb write being before the
+ * user interrupt and the CS event for the context
+ * switch would therefore be before the CS event
+ * itself...
*/
if (GEM_SHOW_DEBUG() &&
- !i915_request_completed(*execlists->active) &&
- !reset_in_progress(execlists)) {
- struct i915_request *rq __maybe_unused =
- *execlists->active;
+ !i915_request_completed(*execlists->active)) {
+ struct i915_request *rq = *execlists->active;
const u32 *regs __maybe_unused =
rq->context->lrc_reg_state;
ENGINE_TRACE(engine,
+ "context completed before request!\n");
+ ENGINE_TRACE(engine,
"ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
ENGINE_READ(engine, RING_START),
ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
@@ -2445,8 +2650,6 @@ static void process_csb(struct intel_engine_cs *engine)
regs[CTX_RING_START],
regs[CTX_RING_HEAD],
regs[CTX_RING_TAIL]);
-
- GEM_BUG_ON("context completed before request");
}
execlists_schedule_out(*execlists->active++);
@@ -2736,6 +2939,45 @@ err_cap:
return NULL;
}
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+ const struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request * const *port, *rq;
+
+ /*
+ * Use the most recent result from process_csb(), but just in case
+ * we trigger an error (via interrupt) before the first CS event has
+ * been written, peek at the next submission.
+ */
+
+ for (port = el->active; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid found at active:%zd\n",
+ port - el->active);
+ return rq;
+ }
+ }
+
+ for (port = el->pending; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid found at pending:%zd\n",
+ port - el->pending);
+ return rq;
+ }
+ }
+
+ ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+ return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+ return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
static bool execlists_capture(struct intel_engine_cs *engine)
{
struct execlists_capture *cap;
@@ -2753,7 +2995,7 @@ static bool execlists_capture(struct intel_engine_cs *engine)
return true;
spin_lock_irq(&engine->active.lock);
- cap->rq = execlists_active(&engine->execlists);
+ cap->rq = active_context(engine, active_ccid(engine));
if (cap->rq) {
cap->rq = active_request(cap->rq->context->timeline, cap->rq);
cap->rq = i915_request_get_rcu(cap->rq);
@@ -2901,10 +3143,14 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
if (reset_in_progress(execlists))
return; /* defer until we restart the engine following reset */
- if (execlists->tasklet.func == execlists_submission_tasklet)
- __execlists_submission_tasklet(engine);
- else
- tasklet_hi_schedule(&execlists->tasklet);
+ /* Hopefully we clear execlists->pending[] to let us through */
+ if (READ_ONCE(execlists->pending[0]) &&
+ tasklet_trylock(&execlists->tasklet)) {
+ process_csb(engine);
+ tasklet_unlock(&execlists->tasklet);
+ }
+
+ __execlists_submission_tasklet(engine);
}
static void submit_queue(struct intel_engine_cs *engine,
@@ -2990,19 +3236,139 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
vaddr += engine->context_size;
if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
- dev_err_once(engine->i915->drm.dev,
+ drm_err_once(&engine->i915->drm,
"%s context redzone overwritten!\n",
engine->name);
}
static void execlists_context_unpin(struct intel_context *ce)
{
- check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE,
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
ce->engine);
i915_gem_object_unpin_map(ce->state->obj);
}
+static u32 *
+gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+ CTX_TIMESTAMP * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_REG |
+ MI_LRR_SOURCE_CS_MMIO |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
+
+ *cs++ = MI_LOAD_REGISTER_REG |
+ MI_LRR_SOURCE_CS_MMIO |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs)
+{
+ GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1);
+
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+ (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32);
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
+{
+ GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1);
+
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+ (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_REG |
+ MI_LRR_SOURCE_CS_MMIO |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0));
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
+{
+ cs = gen12_emit_timestamp_wa(ce, cs);
+ cs = gen12_emit_cmd_buf_wa(ce, cs);
+ cs = gen12_emit_restore_scratch(ce, cs);
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
+{
+ cs = gen12_emit_timestamp_wa(ce, cs);
+ cs = gen12_emit_restore_scratch(ce, cs);
+
+ return cs;
+}
+
+static inline u32 context_wa_bb_offset(const struct intel_context *ce)
+{
+ return PAGE_SIZE * ce->wa_bb_page;
+}
+
+static u32 *context_indirect_bb(const struct intel_context *ce)
+{
+ void *ptr;
+
+ GEM_BUG_ON(!ce->wa_bb_page);
+
+ ptr = ce->lrc_reg_state;
+ ptr -= LRC_STATE_OFFSET; /* back to start of context image */
+ ptr += context_wa_bb_offset(ce);
+
+ return ptr;
+}
+
+static void
+setup_indirect_ctx_bb(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 *(*emit)(const struct intel_context *, u32 *))
+{
+ u32 * const start = context_indirect_bb(ce);
+ u32 *cs;
+
+ cs = emit(ce, start);
+ GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
+ while ((unsigned long)cs % CACHELINE_BYTES)
+ *cs++ = MI_NOOP;
+
+ lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine,
+ i915_ggtt_offset(ce->state) +
+ context_wa_bb_offset(ce),
+ (cs - start) * sizeof(*cs));
+}
+
static void
__execlists_update_reg_state(const struct intel_context *ce,
const struct intel_engine_cs *engine,
@@ -3026,6 +3392,18 @@ __execlists_update_reg_state(const struct intel_context *ce,
i915_oa_init_reg_state(ce, engine);
}
+
+ if (ce->wa_bb_page) {
+ u32 *(*fn)(const struct intel_context *ce, u32 *cs);
+
+ fn = gen12_emit_indirect_ctx_xcs;
+ if (ce->engine->class == RENDER_CLASS)
+ fn = gen12_emit_indirect_ctx_rcs;
+
+ /* Mutually exclusive wrt to global indirect bb */
+ GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
+ setup_indirect_ctx_bb(ce, engine, fn);
+ }
}
static int
@@ -3043,8 +3421,8 @@ __execlists_context_pin(struct intel_context *ce,
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
- ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
+ ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
__execlists_update_reg_state(ce, engine, ce->ring->tail);
return 0;
@@ -3072,7 +3450,7 @@ static void execlists_context_reset(struct intel_context *ce)
ce, ce->engine, ce->ring, true);
__execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
- ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
}
static const struct intel_context_ops execlists_context_ops = {
@@ -3092,6 +3470,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
if (!i915_request_timeline(rq)->has_initial_breadcrumb)
return 0;
@@ -3118,6 +3497,56 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
/* Record the updated position of the request's payload */
rq->infix = intel_ring_offset(rq, cs);
+ __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+
+ return 0;
+}
+
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+ int err, i;
+ u32 *cs;
+
+ GEM_BUG_ON(intel_vgpu_active(rq->i915));
+
+ /*
+ * Beware ye of the dragons, this sequence is magic!
+ *
+ * Small changes to this sequence can cause anything from
+ * GPU hangs to forcewake errors and machine lockups!
+ */
+
+ /* Flush any residual operations from the context load */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Ensure the LRI have landed before we invalidate & continue */
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
return 0;
}
@@ -3142,6 +3571,12 @@ static int execlists_request_alloc(struct i915_request *request)
* to cancel/unwind this request now.
*/
+ if (!i915_vm_is_4lvl(request->context->vm)) {
+ ret = emit_pdps(request);
+ if (ret)
+ return ret;
+ }
+
/* Unconditionally invalidate GPU caches and TLBs. */
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
@@ -3442,7 +3877,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
ret = lrc_setup_wa_ctx(engine);
if (ret) {
- DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+ drm_dbg(&engine->i915->drm,
+ "Failed to setup context WA page: %d\n", ret);
return ret;
}
@@ -3475,6 +3911,72 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
+static void reset_csb_pointers(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ ring_set_paused(engine, 0);
+
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[reset_value]);
+
+ /* Once more for luck and our trusty paranoia */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+}
+
+static void execlists_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ reset_csb_pointers(engine);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ intel_timeline_reset_seqno(engine->kernel_context->timeline);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
static void enable_error_interrupt(struct intel_engine_cs *engine)
{
u32 status;
@@ -3485,7 +3987,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine)
status = ENGINE_READ(engine, RING_ESR);
if (unlikely(status)) {
- dev_err(engine->i915->drm.dev,
+ drm_err(&engine->i915->drm,
"engine '%s' resumed still in error: %08x\n",
engine->name, status);
__intel_gt_reset(engine->gt, engine->mask);
@@ -3541,7 +4043,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
enable_error_interrupt(engine);
- engine->context_tag = 0;
+ engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
}
static bool unexpected_starting_state(struct intel_engine_cs *engine)
@@ -3549,7 +4051,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
bool unexpected = false;
if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
- DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+ drm_dbg(&engine->i915->drm,
+ "STOP_RING still set in RING_MI_MODE\n");
unexpected = true;
}
@@ -3609,39 +4112,10 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
*
* FIXME: Wa for more modern gens needs to be validated
*/
+ ring_set_paused(engine, 1);
intel_engine_stop_cs(engine);
-}
-
-static void reset_csb_pointers(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- const unsigned int reset_value = execlists->csb_size - 1;
- ring_set_paused(engine, 0);
-
- /*
- * After a reset, the HW starts writing into CSB entry [0]. We
- * therefore have to set our HEAD pointer back one entry so that
- * the *first* entry we check is entry 0. To complicate this further,
- * as we don't wait for the first interrupt after reset, we have to
- * fake the HW write to point back to the last entry so that our
- * inline comparison of our cached head position against the last HW
- * write works even before the first interrupt.
- */
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
- wmb(); /* Make sure this is visible to HW (paranoia?) */
-
- /*
- * Sometimes Icelake forgets to reset its pointers on a GPU reset.
- * Bludgeon them with a mmio update to be sure.
- */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
+ engine->execlists.reset_ccid = active_ccid(engine);
}
static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
@@ -3684,7 +4158,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* its request, it was still running at the time of the
* reset and will have been clobbered.
*/
- rq = execlists_active(execlists);
+ rq = active_context(engine, engine->execlists.reset_ccid);
if (!rq)
goto unwind;
@@ -3734,8 +4208,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* image back to the expected values to skip over the guilty request.
*/
__i915_request_reset(rq, stalled);
- if (!stalled)
- goto out_replay;
/*
* We want a simple context + ring to execute the breadcrumb update.
@@ -3745,15 +4217,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- GEM_BUG_ON(!intel_context_is_pinned(ce));
- restore_default_state(ce, engine);
-
out_replay:
ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
head, ce->ring->tail);
__execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine, head);
- ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
+ ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
unwind:
/* Push back any incomplete requests for replay after the reset. */
@@ -4113,6 +4582,42 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
+static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+{
+ static const i915_reg_t vd[] = {
+ GEN12_VD0_AUX_NV,
+ GEN12_VD1_AUX_NV,
+ GEN12_VD2_AUX_NV,
+ GEN12_VD3_AUX_NV,
+ };
+
+ static const i915_reg_t ve[] = {
+ GEN12_VE0_AUX_NV,
+ GEN12_VE1_AUX_NV,
+ };
+
+ if (engine->class == VIDEO_DECODE_CLASS)
+ return vd[engine->instance];
+
+ if (engine->class == VIDEO_ENHANCEMENT_CLASS)
+ return ve[engine->instance];
+
+ GEM_BUG_ON("unknown aux_inv_reg\n");
+
+ return INVALID_MMIO_REG;
+}
+
+static u32 *
+gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(inv_reg);
+ *cs++ = AUX_INV;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
static int gen12_emit_flush_render(struct i915_request *request,
u32 mode)
{
@@ -4121,13 +4626,13 @@ static int gen12_emit_flush_render(struct i915_request *request,
u32 *cs;
flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_FLUSH_L3;
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* Wa_1409600907:tgl */
flags |= PIPE_CONTROL_DEPTH_STALL;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
flags |= PIPE_CONTROL_QW_WRITE;
@@ -4138,7 +4643,9 @@ static int gen12_emit_flush_render(struct i915_request *request,
if (IS_ERR(cs))
return PTR_ERR(cs);
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ cs = gen12_emit_pipe_control(cs,
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ flags, LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(request, cs);
}
@@ -4153,14 +4660,13 @@ static int gen12_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_L3_RO_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_CS_STALL;
- cs = intel_ring_begin(request, 8);
+ cs = intel_ring_begin(request, 8 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -4173,6 +4679,9 @@ static int gen12_emit_flush_render(struct i915_request *request,
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ /* hsdes: 1809175790 */
+ cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+
*cs++ = preparser_disable(false);
intel_ring_advance(request, cs);
}
@@ -4180,6 +4689,56 @@ static int gen12_emit_flush_render(struct i915_request *request,
return 0;
}
+static int gen12_emit_flush(struct i915_request *request, u32 mode)
+{
+ intel_engine_mask_t aux_inv = 0;
+ u32 cmd, *cs;
+
+ if (mode & EMIT_INVALIDATE)
+ aux_inv = request->engine->mask & ~BIT(BCS0);
+
+ cs = intel_ring_begin(request,
+ 4 + (aux_inv ? 2 * hweight8(aux_inv) + 2 : 0));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /* We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (request->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+
+ if (aux_inv) { /* hsdes: 1809175790 */
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
+ for_each_engine_masked(engine, request->engine->gt,
+ aux_inv, tmp) {
+ *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
+ *cs++ = AUX_INV;
+ }
+ *cs++ = MI_NOOP;
+ }
+ intel_ring_advance(request, cs);
+
+ return 0;
+}
+
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
@@ -4209,8 +4768,7 @@ static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
}
static __always_inline u32*
-gen8_emit_fini_breadcrumb_footer(struct i915_request *request,
- u32 *cs)
+gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
{
*cs++ = MI_USER_INTERRUPT;
@@ -4224,14 +4782,16 @@ gen8_emit_fini_breadcrumb_footer(struct i915_request *request,
return gen8_emit_wa_tail(request, cs);
}
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs)
{
- cs = gen8_emit_ggtt_write(cs,
- request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
- 0);
+ u32 addr = i915_request_active_timeline(request)->hwsp_offset;
+
+ return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0);
+}
- return gen8_emit_fini_breadcrumb_footer(request, cs);
+static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
}
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
@@ -4249,7 +4809,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL);
- return gen8_emit_fini_breadcrumb_footer(request, cs);
+ return gen8_emit_fini_breadcrumb_tail(request, cs);
}
static u32 *
@@ -4265,7 +4825,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE);
- return gen8_emit_fini_breadcrumb_footer(request, cs);
+ return gen8_emit_fini_breadcrumb_tail(request, cs);
}
/*
@@ -4303,7 +4863,7 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
}
static __always_inline u32*
-gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
+gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
{
*cs++ = MI_USER_INTERRUPT;
@@ -4317,33 +4877,29 @@ gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
return gen8_emit_wa_tail(request, cs);
}
-static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
{
- cs = gen8_emit_ggtt_write(cs,
- request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
- 0);
-
- return gen12_emit_fini_breadcrumb_footer(request, cs);
+ return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
}
static u32 *
gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- /* Wa_1409600907:tgl */
- PIPE_CONTROL_DEPTH_STALL |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_HDC_PIPELINE_FLUSH);
+ cs = gen12_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
- return gen12_emit_fini_breadcrumb_footer(request, cs);
+ return gen12_emit_fini_breadcrumb_tail(request, cs);
}
static void execlists_park(struct intel_engine_cs *engine)
@@ -4369,8 +4925,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
if (!intel_vgpu_active(engine->i915)) {
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ }
}
if (INTEL_GEN(engine->i915) >= 12)
@@ -4392,6 +4951,8 @@ static void execlists_shutdown(struct intel_engine_cs *engine)
static void execlists_release(struct intel_engine_cs *engine)
{
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
execlists_shutdown(engine);
intel_engine_cleanup_common(engine);
@@ -4411,9 +4972,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_flush = gen8_emit_flush;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
- if (INTEL_GEN(engine->i915) >= 12)
+ if (INTEL_GEN(engine->i915) >= 12) {
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
-
+ engine->emit_flush = gen12_emit_flush;
+ }
engine->set_default_submission = intel_execlists_set_default_submission;
if (INTEL_GEN(engine->i915) < 11) {
@@ -4449,6 +5011,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -4493,7 +5056,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
* because we only expect rare glitches but nothing
* critical to prevent us from using GPU
*/
- DRM_ERROR("WA batch buffer initialization failed\n");
+ drm_err(&i915->drm, "WA batch buffer initialization failed\n");
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = uncore->regs +
@@ -4516,48 +5079,18 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
else
execlists->csb_size = GEN11_CSB_ENTRIES;
- reset_csb_pointers(engine);
+ if (INTEL_GEN(engine->i915) >= 11) {
+ execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ }
/* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = execlists_sanitize;
engine->release = execlists_release;
return 0;
}
-static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
-{
- u32 indirect_ctx_offset;
-
- switch (INTEL_GEN(engine->i915)) {
- default:
- MISSING_CASE(INTEL_GEN(engine->i915));
- /* fall through */
- case 12:
- indirect_ctx_offset =
- GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- case 11:
- indirect_ctx_offset =
- GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- case 10:
- indirect_ctx_offset =
- GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- case 9:
- indirect_ctx_offset =
- GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- case 8:
- indirect_ctx_offset =
- GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- }
-
- return indirect_ctx_offset;
-}
-
-
static void init_common_reg_state(u32 * const regs,
const struct intel_engine_cs *engine,
const struct intel_ring *ring,
@@ -4575,30 +5108,27 @@ static void init_common_reg_state(u32 * const regs,
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ regs[CTX_TIMESTAMP] = 0;
}
static void init_wa_bb_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine,
- u32 pos_bb_per_ctx)
+ const struct intel_engine_cs *engine)
{
const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
if (wa_ctx->per_ctx.size) {
const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
- regs[pos_bb_per_ctx] =
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
(ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
if (wa_ctx->indirect_ctx.size) {
- const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- regs[pos_bb_per_ctx + 2] =
- (ggtt_offset + wa_ctx->indirect_ctx.offset) |
- (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
-
- regs[pos_bb_per_ctx + 4] =
- intel_lr_indirect_ctx_offset(engine) << 6;
+ lrc_ring_setup_indirect_ctx(regs, engine,
+ i915_ggtt_offset(wa_ctx->vma) +
+ wa_ctx->indirect_ctx.offset,
+ wa_ctx->indirect_ctx.size);
}
}
@@ -4647,10 +5177,7 @@ static void execlists_init_reg_state(u32 *regs,
init_common_reg_state(regs, engine, ring, inhibit);
init_ppgtt_reg_state(regs, vm_alias(ce->vm));
- init_wa_bb_reg_state(regs, engine,
- INTEL_GEN(engine->i915) >= 12 ?
- GEN12_CTX_BB_PER_CTX_PTR :
- CTX_BB_PER_CTX_PTR);
+ init_wa_bb_reg_state(regs, engine);
__reset_stop_ring(regs, engine);
}
@@ -4663,29 +5190,18 @@ populate_lr_context(struct intel_context *ce,
{
bool inhibit = true;
void *vaddr;
- int ret;
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
- return ret;
+ drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
+ return PTR_ERR(vaddr);
}
set_redzone(vaddr, engine);
if (engine->default_state) {
- void *defaults;
-
- defaults = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (IS_ERR(defaults)) {
- ret = PTR_ERR(defaults);
- goto err_unpin_ctx;
- }
-
- memcpy(vaddr, defaults, engine->context_size);
- i915_gem_object_unpin_map(engine->default_state);
+ shmem_read(engine->default_state, 0,
+ vaddr, engine->context_size);
__set_bit(CONTEXT_VALID_BIT, &ce->flags);
inhibit = false;
}
@@ -4697,14 +5213,12 @@ populate_lr_context(struct intel_context *ce,
* The second page of the context object contains some registers which
* must be set up prior to the first execution.
*/
- execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
+ execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
ce, engine, ring, inhibit);
- ret = 0;
-err_unpin_ctx:
__i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
- return ret;
+ return 0;
}
static int __execlists_context_alloc(struct intel_context *ce,
@@ -4722,6 +5236,11 @@ static int __execlists_context_alloc(struct intel_context *ce,
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
+ if (INTEL_GEN(engine->i915) == 12) {
+ ce->wa_bb_page = context_size / PAGE_SIZE;
+ context_size += PAGE_SIZE;
+ }
+
ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
@@ -4761,7 +5280,8 @@ static int __execlists_context_alloc(struct intel_context *ce,
ret = populate_lr_context(ce, ctx_obj, engine, ring);
if (ret) {
- DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
+ drm_dbg(&engine->i915->drm,
+ "Failed to populate LRC: %d\n", ret);
goto error_ring_free;
}
@@ -4814,6 +5334,8 @@ static void virtual_context_destroy(struct kref *kref)
__execlists_context_fini(&ve->context);
intel_context_fini(&ve->context);
+ intel_engine_free_request_pool(&ve->base);
+
kfree(ve->bonds);
kfree(ve);
}
@@ -4938,12 +5460,15 @@ static void virtual_submission_tasklet(unsigned long data)
return;
local_irq_disable();
- for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = ve->siblings[n];
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
struct ve_node * const node = &ve->nodes[sibling->id];
struct rb_node **parent, *rb;
bool first;
+ if (!READ_ONCE(ve->request))
+ break; /* already handled by a sibling's tasklet */
+
if (unlikely(!(mask & sibling->mask))) {
if (!RB_EMPTY_NODE(&node->rb)) {
spin_lock(&sibling->active.lock);
@@ -4994,10 +5519,8 @@ static void virtual_submission_tasklet(unsigned long data)
submit_engine:
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
node->prio = prio;
- if (first && prio > sibling->execlists.queue_priority_hint) {
- sibling->execlists.queue_priority_hint = prio;
+ if (first && prio > sibling->execlists.queue_priority_hint)
tasklet_hi_schedule(&sibling->execlists.tasklet);
- }
spin_unlock(&sibling->active.lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index dfbc214e14f5..91fd8e452d9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -90,6 +90,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
#define LRC_PPHWSP_SZ (1)
/* After the PPHWSP we have the logical state for the context */
#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
+#define LRC_STATE_OFFSET (LRC_STATE_PN * PAGE_SIZE)
/* Space within PPHWSP reserved to be used as scratch */
#define LRC_PPHWSP_SCRATCH 0x34
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index d39b72590e40..93cb6c460508 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,14 +9,13 @@
#include <linux/types.h>
-/* GEN8 to GEN11 Reg State Context */
+/* GEN8 to GEN12 Reg State Context */
#define CTX_CONTEXT_CONTROL (0x02 + 1)
#define CTX_RING_HEAD (0x04 + 1)
#define CTX_RING_TAIL (0x06 + 1)
#define CTX_RING_START (0x08 + 1)
#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_STATE (0x10 + 1)
-#define CTX_BB_PER_CTX_PTR (0x18 + 1)
#define CTX_TIMESTAMP (0x22 + 1)
#define CTX_PDP3_UDW (0x24 + 1)
#define CTX_PDP3_LDW (0x26 + 1)
@@ -30,9 +29,6 @@
#define GEN9_CTX_RING_MI_MODE 0x54
-/* GEN12+ Reg State Context */
-#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1)
-
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 3847ee44b181..ab675d35030d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -113,7 +113,6 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 rc6_mode;
/* 2b: Program RC6 thresholds.*/
if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
@@ -165,16 +164,11 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
/* 3a: Enable RC6 */
set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
- /* WaRsUseTimeoutMode:cnl (pre-prod) */
- if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- else
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_RC6_ENABLE |
- rc6_mode;
+ GEN6_RC_CTL_EI_MODE(1);
/*
* WaRsDisableCoarsePowerGating:skl,cnl
@@ -246,16 +240,18 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
&rc6vids, NULL);
if (IS_GEN(i915, 6) && ret) {
- DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
} else if (IS_GEN(i915, 6) &&
(GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
- DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
- GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ drm_dbg(&i915->drm,
+ "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
rc6vids |= GEN6_ENCODE_RC6_VID(450);
ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
if (ret)
- DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ drm_err(&i915->drm,
+ "Couldn't fix incorrect rc6 voltage\n");
}
}
@@ -263,14 +259,15 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
static int chv_rc6_init(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
resource_size_t pctx_paddr, paddr;
resource_size_t pctx_size = 32 * SZ_1K;
u32 pcbr;
pcbr = intel_uncore_read(uncore, VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
+ drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
+ paddr = i915->dsm.end + 1 - pctx_size;
GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & ~4095);
@@ -304,7 +301,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
goto out;
}
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+ drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
/*
* From the Gunit register HAS:
@@ -316,7 +313,8 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
*/
pctx = i915_gem_object_create_stolen(i915, pctx_size);
if (IS_ERR(pctx)) {
- DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+ drm_dbg(&i915->drm,
+ "not enough stolen space for PCTX, disabling\n");
return PTR_ERR(pctx);
}
@@ -398,14 +396,14 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
rc_sw_target &= RC_SW_TARGET_STATE_MASK;
rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
- DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+ drm_dbg(&i915->drm, "BIOS enabled RC states: "
"HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
rc_sw_target);
if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
- DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
+ drm_dbg(&i915->drm, "RC6 Base location not set properly.\n");
enable_rc6 = false;
}
@@ -417,7 +415,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
- DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
+ drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -425,24 +423,25 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
(intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
(intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
(intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
- DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+ drm_dbg(&i915->drm,
+ "Engine Idle wait time not set properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
!intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
!intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
- DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+ drm_dbg(&i915->drm, "Pushbus not setup properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
- DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+ drm_dbg(&i915->drm, "GFX pause not setup properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
- DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
+ drm_dbg(&i915->drm, "GPM control not setup properly.\n");
enable_rc6 = false;
}
@@ -463,7 +462,7 @@ static bool rc6_supported(struct intel_rc6 *rc6)
return false;
if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"RC6 and powersaving disabled by BIOS\n");
return false;
}
@@ -495,7 +494,7 @@ static bool pctx_corrupted(struct intel_rc6 *rc6)
if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
return false;
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"RC6 context corruption, disabling runtime power management\n");
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 5954ecc3207f..f59e7875cc5e 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -102,7 +102,7 @@ static int render_state_setup(struct intel_renderstate *so,
}
if (rodata->reloc[reloc_index] != -1) {
- DRM_ERROR("only %d relocs resolved\n", reloc_index);
+ drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
goto err;
}
@@ -194,7 +194,7 @@ int intel_renderstate_init(struct intel_renderstate *so,
err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
- goto err_vma;
+ goto err_obj;
err = render_state_setup(so, engine->i915);
if (err)
@@ -204,8 +204,6 @@ int intel_renderstate_init(struct intel_renderstate *so,
err_unpin:
i915_vma_unpin(so->vma);
-err_vma:
- i915_vma_close(so->vma);
err_obj:
i915_gem_object_put(obj);
so->vma = NULL;
@@ -221,6 +219,14 @@ int intel_renderstate_emit(struct intel_renderstate *so,
if (!so->vma)
return 0;
+ i915_vma_lock(so->vma);
+ err = i915_request_await_object(rq, so->vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(so->vma, rq, 0);
+ i915_vma_unlock(so->vma);
+ if (err)
+ return err;
+
err = engine->emit_bb_start(rq,
so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
@@ -235,13 +241,7 @@ int intel_renderstate_emit(struct intel_renderstate *so,
return err;
}
- i915_vma_lock(so->vma);
- err = i915_request_await_object(rq, so->vma->obj, false);
- if (err == 0)
- err = i915_vma_move_to_active(so->vma, rq, 0);
- i915_vma_unlock(so->vma);
-
- return err;
+ return 0;
}
void intel_renderstate_fini(struct intel_renderstate *so)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 80db3c9d785e..39070b514e65 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -109,7 +109,7 @@ static bool mark_guilty(struct i915_request *rq)
goto out;
}
- dev_notice(ctx->i915->drm.dev,
+ drm_notice(&ctx->i915->drm,
"%s context reset due to GPU hang\n",
ctx->name);
@@ -755,7 +755,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
- i915_gem_restore_fences(gt->ggtt);
+ intel_ggtt_restore_fences(gt->ggtt);
return err;
}
@@ -1031,7 +1031,7 @@ void intel_gt_reset(struct intel_gt *gt,
goto unlock;
if (reason)
- dev_notice(gt->i915->drm.dev,
+ drm_notice(&gt->i915->drm,
"Resetting chip for %s\n", reason);
atomic_inc(&gt->i915->gpu_error.reset_count);
@@ -1039,7 +1039,7 @@ void intel_gt_reset(struct intel_gt *gt,
if (!intel_has_gpu_reset(gt)) {
if (i915_modparams.reset)
- dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
+ drm_err(&gt->i915->drm, "GPU reset not supported\n");
else
drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
goto error;
@@ -1049,7 +1049,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_runtime_pm_disable_interrupts(gt->i915);
if (do_reset(gt, stalled_mask)) {
- dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
+ drm_err(&gt->i915->drm, "Failed to reset chip\n");
goto taint;
}
@@ -1111,7 +1111,7 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
/**
* intel_engine_reset - reset GPU engine to recover from a hang
* @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no dev_notice()
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
*
* Reset a specific GPU engine. Useful if a hang is detected.
* Returns zero on successful reset or otherwise an error code.
@@ -1136,7 +1136,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
reset_prepare_engine(engine);
if (msg)
- dev_notice(engine->i915->drm.dev,
+ drm_notice(&engine->i915->drm,
"Resetting %s for %s\n", engine->name, msg);
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
@@ -1381,7 +1381,7 @@ static void intel_wedge_me(struct work_struct *work)
{
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
- dev_err(w->gt->i915->drm.dev,
+ drm_err(&w->gt->i915->drm,
"%s timed out, cancelling all in-flight rendering.\n",
w->name);
intel_gt_set_wedged(w->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index 5bdce24994aa..cc0ebca65167 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -88,6 +88,8 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
+ unsigned int head = READ_ONCE(ring->head);
+
GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
/*
@@ -105,8 +107,7 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
* into the same cacheline as ring->head.
*/
#define cacheline(a) round_down(a, CACHELINE_BYTES)
- GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
- tail < ring->head);
+ GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
#undef cacheline
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index fdc3f10e12aa..ca7286e58409 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -42,6 +42,7 @@
#include "intel_reset.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
+#include "shmem_utils.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
@@ -577,8 +578,9 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
RING_INSTPM(engine->mmio_base),
INSTPM_SYNC_FLUSH, 0,
1000))
- DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- engine->name);
+ drm_err(&dev_priv->drm,
+ "%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ engine->name);
}
static void ring_setup_status_page(struct intel_engine_cs *engine)
@@ -601,8 +603,9 @@ static bool stop_ring(struct intel_engine_cs *engine)
MODE_IDLE,
MODE_IDLE,
1000)) {
- DRM_ERROR("%s : timed out trying to stop ring\n",
- engine->name);
+ drm_err(&dev_priv->drm,
+ "%s : timed out trying to stop ring\n",
+ engine->name);
/*
* Sometimes we observe that the idle flag is not
@@ -661,22 +664,23 @@ static int xcs_resume(struct intel_engine_cs *engine)
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
- DRM_DEBUG_DRIVER("%s head not reset to zero "
+ drm_dbg(&dev_priv->drm, "%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_HEAD),
+ ENGINE_READ(engine, RING_TAIL),
+ ENGINE_READ(engine, RING_START));
+
+ if (!stop_ring(engine)) {
+ drm_err(&dev_priv->drm,
+ "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_HEAD),
ENGINE_READ(engine, RING_TAIL),
ENGINE_READ(engine, RING_START));
-
- if (!stop_ring(engine)) {
- DRM_ERROR("failed to set %s head to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
ret = -EIO;
goto out;
}
@@ -719,7 +723,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
50)) {
- DRM_ERROR("%s initialization failed "
+ drm_err(&dev_priv->drm, "%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
ENGINE_READ(engine, RING_CTL),
@@ -1238,7 +1242,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
if (engine->default_state) {
- void *defaults, *vaddr;
+ void *vaddr;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
@@ -1246,15 +1250,8 @@ alloc_context_vma(struct intel_engine_cs *engine)
goto err_obj;
}
- defaults = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (IS_ERR(defaults)) {
- err = PTR_ERR(defaults);
- goto err_map;
- }
-
- memcpy(vaddr, defaults, engine->context_size);
- i915_gem_object_unpin_map(engine->default_state);
+ shmem_read(engine->default_state, 0,
+ vaddr, engine->context_size);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
@@ -1268,8 +1265,6 @@ alloc_context_vma(struct intel_engine_cs *engine)
return vma;
-err_map:
- i915_gem_object_unpin_map(obj);
err_obj:
i915_gem_object_put(obj);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 19542fd9e207..2f59fc6df3c2 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -8,12 +8,15 @@
#include "i915_drv.h"
#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_rps.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+#define BUSY_MAX_EI 20u /* ms */
+
/*
* Lock protecting IPS related data structures
*/
@@ -44,6 +47,100 @@ static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
intel_uncore_write_fw(uncore, reg, val);
}
+static void rps_timer(struct timer_list *t)
+{
+ struct intel_rps *rps = from_timer(rps, t, timer);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ s64 max_busy[3] = {};
+ ktime_t dt, last;
+
+ for_each_engine(engine, rps_to_gt(rps), id) {
+ s64 busy;
+ int i;
+
+ dt = intel_engine_get_busy_time(engine);
+ last = engine->stats.rps;
+ engine->stats.rps = dt;
+
+ busy = ktime_to_ns(ktime_sub(dt, last));
+ for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
+ if (busy > max_busy[i])
+ swap(busy, max_busy[i]);
+ }
+ }
+
+ dt = ktime_get();
+ last = rps->pm_timestamp;
+ rps->pm_timestamp = dt;
+
+ if (intel_rps_is_active(rps)) {
+ s64 busy;
+ int i;
+
+ dt = ktime_sub(dt, last);
+
+ /*
+ * Our goal is to evaluate each engine independently, so we run
+ * at the lowest clocks required to sustain the heaviest
+ * workload. However, a task may be split into sequential
+ * dependent operations across a set of engines, such that
+ * the independent contributions do not account for high load,
+ * but overall the task is GPU bound. For example, consider
+ * video decode on vcs followed by colour post-processing
+ * on vecs, followed by general post-processing on rcs.
+ * Since multi-engines being active does imply a single
+ * continuous workload across all engines, we hedge our
+ * bets by only contributing a factor of the distributed
+ * load into our busyness calculation.
+ */
+ busy = max_busy[0];
+ for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
+ if (!max_busy[i])
+ break;
+
+ busy += div_u64(max_busy[i], 1 << i);
+ }
+ GT_TRACE(rps_to_gt(rps),
+ "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
+ busy, (int)div64_u64(100 * busy, dt),
+ max_busy[0], max_busy[1], max_busy[2],
+ rps->pm_interval);
+
+ if (100 * busy > rps->power.up_threshold * dt &&
+ rps->cur_freq < rps->max_freq_softlimit) {
+ rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
+ rps->pm_interval = 1;
+ schedule_work(&rps->work);
+ } else if (100 * busy < rps->power.down_threshold * dt &&
+ rps->cur_freq > rps->min_freq_softlimit) {
+ rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
+ rps->pm_interval = 1;
+ schedule_work(&rps->work);
+ } else {
+ rps->last_adj = 0;
+ }
+
+ mod_timer(&rps->timer,
+ jiffies + msecs_to_jiffies(rps->pm_interval));
+ rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
+ }
+}
+
+static void rps_start_timer(struct intel_rps *rps)
+{
+ rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
+ rps->pm_interval = 1;
+ mod_timer(&rps->timer, jiffies + 1);
+}
+
+static void rps_stop_timer(struct intel_rps *rps)
+{
+ del_timer_sync(&rps->timer);
+ rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
+ cancel_work_sync(&rps->work);
+}
+
static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
{
u32 mask = 0;
@@ -57,7 +154,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask &= READ_ONCE(rps->pm_events);
+ mask &= rps->pm_events;
return rps_pm_sanitize_mask(rps, ~mask);
}
@@ -70,18 +167,11 @@ static void rps_reset_ei(struct intel_rps *rps)
static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- u32 events;
- rps_reset_ei(rps);
+ GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
+ rps->pm_events, rps_pm_mask(rps, rps->last_freq));
- if (IS_VALLEYVIEW(gt->i915))
- /* WaGsvRC0ResidencyMethod:vlv */
- events = GEN6_PM_RP_UP_EI_EXPIRED;
- else
- events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
- WRITE_ONCE(rps->pm_events, events);
+ rps_reset_ei(rps);
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
@@ -120,8 +210,6 @@ static void rps_disable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- WRITE_ONCE(rps->pm_events, 0);
-
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
@@ -140,6 +228,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
cancel_work_sync(&rps->work);
rps_reset_interrupts(rps);
+ GT_TRACE(gt, "interrupts:off\n");
}
static const struct cparams {
@@ -186,14 +275,12 @@ static void gen5_rps_init(struct intel_rps *rps)
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
+ drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
rps->min_freq = fmax;
+ rps->efficient_freq = fstart;
rps->max_freq = fmin;
-
- rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
}
static unsigned long
@@ -456,7 +543,8 @@ static bool gen5_rps_enable(struct intel_rps *rps)
if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
+ drm_err(&uncore->i915->drm,
+ "stuck trying to change perf mode\n");
mdelay(1);
gen5_rps_set(rps, rps->cur_freq);
@@ -533,8 +621,8 @@ static u32 rps_limits(struct intel_rps *rps, u8 val)
static void rps_set_power(struct intel_rps *rps, int new_power)
{
- struct intel_uncore *uncore = rps_to_uncore(rps);
- struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_gt *gt = rps_to_gt(rps);
+ struct intel_uncore *uncore = gt->uncore;
u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
@@ -543,55 +631,49 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
if (new_power == rps->power.mode)
return;
+ threshold_up = 95;
+ threshold_down = 85;
+
/* Note the units here are not exactly 1us, but 1280ns. */
switch (new_power) {
case LOW_POWER:
- /* Upclock if more than 95% busy over 16ms */
ei_up = 16000;
- threshold_up = 95;
-
- /* Downclock if less than 85% busy over 32ms */
ei_down = 32000;
- threshold_down = 85;
break;
case BETWEEN:
- /* Upclock if more than 90% busy over 13ms */
ei_up = 13000;
- threshold_up = 90;
-
- /* Downclock if less than 75% busy over 32ms */
ei_down = 32000;
- threshold_down = 75;
break;
case HIGH_POWER:
- /* Upclock if more than 85% busy over 10ms */
ei_up = 10000;
- threshold_up = 85;
-
- /* Downclock if less than 60% busy over 32ms */
ei_down = 32000;
- threshold_down = 60;
break;
}
/* When byt can survive without system hang with dynamic
* sw freq adjustments, this restriction can be lifted.
*/
- if (IS_VALLEYVIEW(i915))
+ if (IS_VALLEYVIEW(gt->i915))
goto skip_hw_write;
- set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
+ GT_TRACE(gt,
+ "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
+ new_power, threshold_up, ei_up, threshold_down, ei_down);
+
+ set(uncore, GEN6_RP_UP_EI,
+ intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
set(uncore, GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
+ intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
- set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
+ set(uncore, GEN6_RP_DOWN_EI,
+ intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
set(uncore, GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
+ intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
set(uncore, GEN6_RP_CONTROL,
- (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
@@ -646,9 +728,11 @@ static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
+ GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
+
mutex_lock(&rps->power.mutex);
if (interactive) {
- if (!rps->power.interactive++ && READ_ONCE(rps->active))
+ if (!rps->power.interactive++ && intel_rps_is_active(rps))
rps_set_power(rps, HIGH_POWER);
} else {
GEM_BUG_ON(!rps->power.interactive);
@@ -673,6 +757,9 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
GEN6_AGGRESSIVE_TURBO);
set(uncore, GEN6_RPNSWREQ, swreq);
+ GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
+ val, intel_gpu_freq(rps, val), swreq);
+
return 0;
}
@@ -685,6 +772,9 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
vlv_punit_put(i915);
+ GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
+ val, intel_gpu_freq(rps, val));
+
return err;
}
@@ -715,29 +805,30 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
void intel_rps_unpark(struct intel_rps *rps)
{
- u8 freq;
-
- if (!rps->enabled)
+ if (!intel_rps_is_enabled(rps))
return;
+ GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
+
/*
* Use the user's desired frequency as a guide, but for better
* performance, jump directly to RPe as our starting frequency.
*/
mutex_lock(&rps->lock);
- WRITE_ONCE(rps->active, true);
-
- freq = max(rps->cur_freq, rps->efficient_freq),
- freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
- intel_rps_set(rps, freq);
-
- rps->last_adj = 0;
+ intel_rps_set_active(rps);
+ intel_rps_set(rps,
+ clamp(rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit));
mutex_unlock(&rps->lock);
- if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+ rps->pm_iir = 0;
+ if (intel_rps_has_interrupts(rps))
rps_enable_interrupts(rps);
+ if (intel_rps_uses_timer(rps))
+ rps_start_timer(rps);
if (IS_GEN(rps_to_i915(rps), 5))
gen5_rps_update(rps);
@@ -745,15 +836,16 @@ void intel_rps_unpark(struct intel_rps *rps)
void intel_rps_park(struct intel_rps *rps)
{
- struct drm_i915_private *i915 = rps_to_i915(rps);
+ int adj;
- if (!rps->enabled)
+ if (!intel_rps_clear_active(rps))
return;
- if (INTEL_GEN(i915) >= 6)
+ if (intel_rps_uses_timer(rps))
+ rps_stop_timer(rps);
+ if (intel_rps_has_interrupts(rps))
rps_disable_interrupts(rps);
- WRITE_ONCE(rps->active, false);
if (rps->last_freq <= rps->idle_freq)
return;
@@ -784,8 +876,15 @@ void intel_rps_park(struct intel_rps *rps)
* (Note we accommodate Cherryview's limitation of only using an
* even bin by applying it to all.)
*/
- rps->cur_freq =
- max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
+ adj = rps->last_adj;
+ if (adj < 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = -2;
+ rps->last_adj = adj;
+ rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
+
+ GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}
void intel_rps_boost(struct i915_request *rq)
@@ -793,7 +892,7 @@ void intel_rps_boost(struct i915_request *rq)
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
unsigned long flags;
- if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
+ if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
return;
/* Serializes with i915_request_retire() */
@@ -802,6 +901,9 @@ void intel_rps_boost(struct i915_request *rq)
!dma_fence_is_signaled_locked(&rq->fence)) {
set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
+ GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
+ rq->fence.context, rq->fence.seqno);
+
if (!atomic_fetch_inc(&rps->num_waiters) &&
READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
@@ -819,7 +921,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
GEM_BUG_ON(val > rps->max_freq);
GEM_BUG_ON(val < rps->min_freq);
- if (rps->active) {
+ if (intel_rps_is_active(rps)) {
err = rps_set(rps, val, true);
if (err)
return err;
@@ -828,7 +930,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
- if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
+ if (intel_rps_has_interrupts(rps)) {
struct intel_uncore *uncore = rps_to_uncore(rps);
set(uncore,
@@ -896,12 +998,14 @@ static void gen6_rps_init(struct intel_rps *rps)
static bool rps_reset(struct intel_rps *rps)
{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
/* force a reset */
rps->power.mode = -1;
rps->last_freq = -1;
if (rps_set(rps, rps->min_freq, true)) {
- DRM_ERROR("Failed to reset RPS to initial values\n");
+ drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
return false;
}
@@ -912,20 +1016,18 @@ static bool rps_reset(struct intel_rps *rps)
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static bool gen9_rps_enable(struct intel_rps *rps)
{
- struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct intel_gt *gt = rps_to_gt(rps);
+ struct intel_uncore *uncore = gt->uncore;
/* Program defaults and thresholds for RPS */
- if (IS_GEN(i915, 9))
+ if (IS_GEN(gt->i915, 9))
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(rps->rp1_freq));
- /* 1 second timeout */
- intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
- GT_INTERVAL_FROM_US(i915, 1000000));
-
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
+ rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
return rps_reset(rps);
}
@@ -936,12 +1038,10 @@ static bool gen8_rps_enable(struct intel_rps *rps)
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
HSW_FREQUENCY(rps->rp1_freq));
- /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
- intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
- 100000000 / 128); /* 1 second timeout */
-
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+ rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
return rps_reset(rps);
}
@@ -953,6 +1053,10 @@ static bool gen6_rps_enable(struct intel_rps *rps)
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
return rps_reset(rps);
}
@@ -1038,6 +1142,10 @@ static bool chv_rps_enable(struct intel_rps *rps)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
/* Setting Fixed Bias */
vlv_punit_get(i915);
@@ -1052,8 +1160,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
}
@@ -1136,6 +1244,9 @@ static bool vlv_rps_enable(struct intel_rps *rps)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
+ /* WaGsvRC0ResidencyMethod:vlv */
+ rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+
vlv_punit_get(i915);
/* Setting Fixed Bias */
@@ -1150,8 +1261,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
}
@@ -1194,33 +1305,71 @@ static unsigned long __ips_gfx_val(struct intel_ips *ips)
return ips->gfx_power + state2;
}
+static bool has_busy_stats(struct intel_rps *rps)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, rps_to_gt(rps), id) {
+ if (!intel_engine_supports_stats(engine))
+ return false;
+ }
+
+ return true;
+}
+
void intel_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
+ bool enabled = false;
+
+ if (!HAS_RPS(i915))
+ return;
+
+ intel_gt_check_clock_frequency(rps_to_gt(rps));
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- if (IS_CHERRYVIEW(i915))
- rps->enabled = chv_rps_enable(rps);
+ if (rps->max_freq <= rps->min_freq)
+ /* leave disabled, no room for dynamic reclocking */;
+ else if (IS_CHERRYVIEW(i915))
+ enabled = chv_rps_enable(rps);
else if (IS_VALLEYVIEW(i915))
- rps->enabled = vlv_rps_enable(rps);
+ enabled = vlv_rps_enable(rps);
else if (INTEL_GEN(i915) >= 9)
- rps->enabled = gen9_rps_enable(rps);
+ enabled = gen9_rps_enable(rps);
else if (INTEL_GEN(i915) >= 8)
- rps->enabled = gen8_rps_enable(rps);
+ enabled = gen8_rps_enable(rps);
else if (INTEL_GEN(i915) >= 6)
- rps->enabled = gen6_rps_enable(rps);
+ enabled = gen6_rps_enable(rps);
else if (IS_IRONLAKE_M(i915))
- rps->enabled = gen5_rps_enable(rps);
+ enabled = gen5_rps_enable(rps);
+ else
+ MISSING_CASE(INTEL_GEN(i915));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
- if (!rps->enabled)
+ if (!enabled)
return;
- drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
- drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
+ GT_TRACE(rps_to_gt(rps),
+ "min:%x, max:%x, freq:[%d, %d]\n",
+ rps->min_freq, rps->max_freq,
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->max_freq));
- drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
- drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
+ GEM_BUG_ON(rps->max_freq < rps->min_freq);
+ GEM_BUG_ON(rps->idle_freq > rps->max_freq);
+
+ GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
+ GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
+
+ if (has_busy_stats(rps))
+ intel_rps_set_timer(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ intel_rps_set_interrupts(rps);
+ else
+ /* Ironlake currently uses intel_ips.ko */ {}
+
+ intel_rps_set_enabled(rps);
}
static void gen6_rps_disable(struct intel_rps *rps)
@@ -1232,7 +1381,9 @@ void intel_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- rps->enabled = false;
+ intel_rps_clear_enabled(rps);
+ intel_rps_clear_interrupts(rps);
+ intel_rps_clear_timer(rps);
if (INTEL_GEN(i915) >= 6)
gen6_rps_disable(rps);
@@ -1308,7 +1459,8 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
CCK_GPLL_CLOCK_CONTROL,
i915->czclk_freq);
- DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
+ drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
+ rps->gpll_ref_freq);
}
static void vlv_rps_init(struct intel_rps *rps)
@@ -1336,28 +1488,24 @@ static void vlv_rps_init(struct intel_rps *rps)
i915->mem_freq = 1333;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->max_freq),
- rps->max_freq);
+ drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
rps->efficient_freq = vlv_rps_rpe_freq(rps);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->efficient_freq),
- rps->efficient_freq);
+ drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
rps->rp1_freq = vlv_rps_guar_freq(rps);
- DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->rp1_freq),
- rps->rp1_freq);
+ drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
rps->min_freq = vlv_rps_min_freq(rps);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->min_freq),
- rps->min_freq);
+ drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
vlv_iosf_sb_put(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1387,28 +1535,24 @@ static void chv_rps_init(struct intel_rps *rps)
i915->mem_freq = 1600;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->max_freq),
- rps->max_freq);
+ drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
rps->efficient_freq = chv_rps_rpe_freq(rps);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->efficient_freq),
- rps->efficient_freq);
+ drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
rps->rp1_freq = chv_rps_guar_freq(rps);
- DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->rp1_freq),
- rps->rp1_freq);
+ drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
rps->min_freq = chv_rps_min_freq(rps);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->min_freq),
- rps->min_freq);
+ drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
vlv_iosf_sb_put(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1471,12 +1615,13 @@ static void rps_work(struct work_struct *work)
{
struct intel_rps *rps = container_of(work, typeof(*rps), work);
struct intel_gt *gt = rps_to_gt(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
bool client_boost = false;
int new_freq, adj, min, max;
u32 pm_iir = 0;
spin_lock_irq(&gt->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
+ pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(&gt->irq_lock);
@@ -1485,6 +1630,10 @@ static void rps_work(struct work_struct *work)
goto out;
mutex_lock(&rps->lock);
+ if (!intel_rps_is_active(rps)) {
+ mutex_unlock(&rps->lock);
+ return;
+ }
pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
@@ -1494,6 +1643,12 @@ static void rps_work(struct work_struct *work)
max = rps->max_freq_softlimit;
if (client_boost)
max = rps->max_freq;
+
+ GT_TRACE(gt,
+ "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
+ pm_iir, yesno(client_boost),
+ adj, new_freq, min, max);
+
if (client_boost && new_freq < rps->boost_freq) {
new_freq = rps->boost_freq;
adj = 0;
@@ -1525,30 +1680,18 @@ static void rps_work(struct work_struct *work)
adj = 0;
}
- rps->last_adj = adj;
-
/*
- * Limit deboosting and boosting to keep ourselves at the extremes
- * when in the respective power modes (i.e. slowly decrease frequencies
- * while in the HIGH_POWER zone and slowly increase frequencies while
- * in the LOW_POWER zone). On idle, we will hit the timeout and drop
- * to the next level quickly, and conversely if busy we expect to
- * hit a waitboost and rapidly switch into max power.
- */
- if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
- (adj > 0 && rps->power.mode == LOW_POWER))
- rps->last_adj = 0;
-
- /* sysfs frequency interfaces may have snuck in while servicing the
- * interrupt
+ * sysfs frequency limits may have snuck in while
+ * servicing the interrupt
*/
new_freq += adj;
new_freq = clamp_t(int, new_freq, min, max);
if (intel_rps_set(rps, new_freq)) {
- DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
- rps->last_adj = 0;
+ drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
+ adj = 0;
}
+ rps->last_adj = adj;
mutex_unlock(&rps->lock);
@@ -1568,6 +1711,8 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
if (unlikely(!events))
return;
+ GT_TRACE(gt, "irq events:%x\n", events);
+
gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;
@@ -1579,10 +1724,12 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
struct intel_gt *gt = rps_to_gt(rps);
u32 events;
- events = pm_iir & READ_ONCE(rps->pm_events);
+ events = pm_iir & rps->pm_events;
if (events) {
spin_lock(&gt->irq_lock);
+ GT_TRACE(gt, "irq events:%x\n", events);
+
gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;
@@ -1640,6 +1787,7 @@ void intel_rps_init_early(struct intel_rps *rps)
mutex_init(&rps->power.mutex);
INIT_WORK(&rps->work, rps_work);
+ timer_setup(&rps->timer, rps_timer, 0);
atomic_set(&rps->num_waiters, 0);
}
@@ -1668,9 +1816,10 @@ void intel_rps_init(struct intel_rps *rps)
sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
&params, NULL);
if (params & BIT(31)) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
- (rps->max_freq & 0xff) * 50,
- (params & 0xff) * 50);
+ drm_dbg(&i915->drm,
+ "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+ (rps->max_freq & 0xff) * 50,
+ (params & 0xff) * 50);
rps->max_freq = params & 0xff;
}
}
@@ -1678,7 +1827,9 @@ void intel_rps_init(struct intel_rps *rps)
/* Finally allow us to boost to max by default */
rps->boost_freq = rps->max_freq;
rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
+
+ /* Start in the middle, from here we will autotune based on workload */
+ rps->cur_freq = rps->efficient_freq;
rps->pm_intrmsk_mbz = 0;
@@ -1695,6 +1846,12 @@ void intel_rps_init(struct intel_rps *rps)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
+void intel_rps_sanitize(struct intel_rps *rps)
+{
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+ rps_disable_interrupts(rps);
+}
+
u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
@@ -1722,7 +1879,7 @@ static u32 read_cagf(struct intel_rps *rps)
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
} else {
- freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
+ freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1);
}
return intel_rps_get_cagf(rps, freq);
@@ -1730,7 +1887,7 @@ static u32 read_cagf(struct intel_rps *rps)
u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
{
- struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
+ struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
intel_wakeref_t wakeref;
u32 freq = 0;
@@ -1930,3 +2087,7 @@ bool i915_gpu_turbo_disable(void)
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rps.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index dfa98194f3b2..8d3c9d663662 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -13,6 +13,7 @@ struct i915_request;
void intel_rps_init_early(struct intel_rps *rps);
void intel_rps_init(struct intel_rps *rps);
+void intel_rps_sanitize(struct intel_rps *rps);
void intel_rps_driver_register(struct intel_rps *rps);
void intel_rps_driver_unregister(struct intel_rps *rps);
@@ -36,4 +37,64 @@ void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+static inline bool intel_rps_is_enabled(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_set_enabled(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_clear_enabled(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline bool intel_rps_is_active(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline void intel_rps_set_active(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline bool intel_rps_clear_active(struct intel_rps *rps)
+{
+ return test_and_clear_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline bool intel_rps_has_interrupts(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline void intel_rps_set_interrupts(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline void intel_rps_clear_interrupts(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline bool intel_rps_uses_timer(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
+static inline void intel_rps_set_timer(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
+static inline void intel_rps_clear_timer(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
#endif /* INTEL_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index c2e279154bd5..38083f0402d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -31,6 +31,13 @@ struct intel_rps_ei {
u32 media_c0;
};
+enum {
+ INTEL_RPS_ENABLED = 0,
+ INTEL_RPS_ACTIVE,
+ INTEL_RPS_INTERRUPTS,
+ INTEL_RPS_TIMER,
+};
+
struct intel_rps {
struct mutex lock; /* protects enabling and the worker */
@@ -38,9 +45,12 @@ struct intel_rps {
* work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock
*/
+ struct timer_list timer;
struct work_struct work;
- bool enabled;
- bool active;
+ unsigned long flags;
+
+ ktime_t pm_timestamp;
+ u32 pm_interval;
u32 pm_iir;
/* PM interrupt bits that should never be masked */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 74f793423231..d173271c7397 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -65,7 +65,6 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
bool subslice_pg = sseu->has_subslice_pg;
- struct intel_sseu ctx_sseu;
u8 slices, subslices;
u32 rpcs = 0;
@@ -78,31 +77,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
/*
* If i915/perf is active, we want a stable powergating configuration
- * on the system.
- *
- * We could choose full enablement, but on ICL we know there are use
- * cases which disable slices for functional, apart for performance
- * reasons. So in this case we select a known stable subset.
+ * on the system. Use the configuration pinned by i915/perf.
*/
- if (!i915->perf.exclusive_stream) {
- ctx_sseu = *req_sseu;
- } else {
- ctx_sseu = intel_sseu_from_device_info(sseu);
-
- if (IS_GEN(i915, 11)) {
- /*
- * We only need subslice count so it doesn't matter
- * which ones we select - just turn off low bits in the
- * amount of half of all available subslices per slice.
- */
- ctx_sseu.subslice_mask =
- ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
- ctx_sseu.slice_mask = 0x1;
- }
- }
+ if (i915->perf.exclusive_stream)
+ req_sseu = &i915->perf.sseu;
- slices = hweight8(ctx_sseu.slice_mask);
- subslices = hweight8(ctx_sseu.subslice_mask);
+ slices = hweight8(req_sseu->slice_mask);
+ subslices = hweight8(req_sseu->subslice_mask);
/*
* Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
@@ -175,13 +156,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
if (sseu->has_eu_pg) {
u32 val;
- val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
+ val = req_sseu->min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
val &= GEN8_RPCS_EU_MIN_MASK;
rpcs |= val;
- val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
+ val = req_sseu->max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
val &= GEN8_RPCS_EU_MAX_MASK;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 08b56d7ab4f4..4546284fede1 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -119,6 +119,15 @@ static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
spin_unlock_irqrestore(&gt->hwsp_lock, flags);
}
+static void __rcu_cacheline_free(struct rcu_head *rcu)
+{
+ struct intel_timeline_cacheline *cl =
+ container_of(rcu, typeof(*cl), rcu);
+
+ i915_active_fini(&cl->active);
+ kfree(cl);
+}
+
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
@@ -127,8 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
i915_vma_put(cl->hwsp->vma);
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
- i915_active_fini(&cl->active);
- kfree_rcu(cl, rcu);
+ call_rcu(&cl->rcu, __rcu_cacheline_free);
}
__i915_active_call
@@ -203,9 +211,9 @@ static void cacheline_free(struct intel_timeline_cacheline *cl)
i915_active_release(&cl->active);
}
-int intel_timeline_init(struct intel_timeline *timeline,
- struct intel_gt *gt,
- struct i915_vma *hwsp)
+static int intel_timeline_init(struct intel_timeline *timeline,
+ struct intel_gt *gt,
+ struct i915_vma *hwsp)
{
void *vaddr;
@@ -272,7 +280,7 @@ void intel_gt_init_timelines(struct intel_gt *gt)
INIT_LIST_HEAD(&timelines->hwsp_free_list);
}
-void intel_timeline_fini(struct intel_timeline *timeline)
+static void intel_timeline_fini(struct intel_timeline *timeline)
{
GEM_BUG_ON(atomic_read(&timeline->pin_count));
GEM_BUG_ON(!list_empty(&timeline->requests));
@@ -329,6 +337,13 @@ int intel_timeline_pin(struct intel_timeline *tl)
return 0;
}
+void intel_timeline_reset_seqno(const struct intel_timeline *tl)
+{
+ /* Must be pinned to be writable, and no requests in flight. */
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+}
+
void intel_timeline_enter(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
@@ -357,8 +372,16 @@ void intel_timeline_enter(struct intel_timeline *tl)
return;
spin_lock(&timelines->lock);
- if (!atomic_fetch_inc(&tl->active_count))
+ if (!atomic_fetch_inc(&tl->active_count)) {
+ /*
+ * The HWSP is volatile, and may have been lost while inactive,
+ * e.g. across suspend/resume. Be paranoid, and ensure that
+ * the HWSP value matches our seqno so we don't proclaim
+ * the next request as already complete.
+ */
+ intel_timeline_reset_seqno(tl);
list_add_tail(&tl->link, &timelines->active_list);
+ }
spin_unlock(&timelines->lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index f5b7eade3809..4298b9ac7327 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -31,11 +31,6 @@
#include "i915_syncmap.h"
#include "gt/intel_timeline_types.h"
-int intel_timeline_init(struct intel_timeline *tl,
- struct intel_gt *gt,
- struct i915_vma *hwsp);
-void intel_timeline_fini(struct intel_timeline *tl);
-
struct intel_timeline *
intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
@@ -84,6 +79,8 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
void intel_timeline_exit(struct intel_timeline *tl);
void intel_timeline_unpin(struct intel_timeline *tl);
+void intel_timeline_reset_seqno(const struct intel_timeline *tl);
+
int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *until,
u32 *hwsp_offset);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 5176ad1a3976..90a2b9e399b0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -485,25 +485,14 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = engine->i915;
-
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
- /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
- if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
-
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
- /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
- if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
-
/* WaPushConstantDereferenceHoldDisable:cnl */
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
@@ -837,7 +826,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
GEN10_L3BANK_MASK;
- DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
+ drm_dbg(&i915->drm, "L3 fuse = %x\n", l3_fuse);
l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
} else {
l3_en = ~0;
@@ -846,7 +835,8 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
slice = fls(sseu->slice_mask) - 1;
subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
if (!subslice) {
- DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
+ drm_warn(&i915->drm,
+ "No common index found between subslice mask %x and L3 bank mask %x!\n",
intel_sseu_get_subslices(sseu, slice), l3_en);
subslice = fls(l3_en);
drm_WARN_ON(&i915->drm, !subslice);
@@ -861,7 +851,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
}
- DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);
+ drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
@@ -871,12 +861,6 @@ cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
wa_init_mcr(i915, wal);
- /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
- wa_write_or(wal,
- GAMT_CHKN_BIT_REG,
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
-
/* WaInPlaceDecompressionHang:cnl */
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
@@ -933,15 +917,20 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
- /* Wa_1607087056:icl */
- wa_write_or(wal,
- SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+ /* Wa_1607087056:icl,ehl,jsl */
+ if (IS_ICELAKE(i915) ||
+ IS_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) {
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+ }
}
static void
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
+ wa_init_mcr(i915, wal);
+
/* Wa_1409420604:tgl */
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
@@ -1379,12 +1368,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
- /*
- * Wa_1409085225:tgl
- * Wa_14010229206:tgl
- */
- wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
-
/* Wa_1408615072:tgl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
@@ -1402,6 +1385,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
+
+ /*
+ * Wa_1409085225:tgl
+ * Wa_14010229206:tgl
+ */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
if (IS_GEN(i915, 11)) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 4a53ded7c2dd..b8dd3cbc8696 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -28,7 +28,6 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
#include "mock_engine.h"
#include "selftests/mock_request.h"
@@ -328,7 +327,6 @@ int mock_engine_init(struct intel_engine_cs *engine)
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
- intel_engine_pool_init(&engine->pool);
ce = create_kernel_context(engine);
if (IS_ERR(ce))
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index e874dfaa5316..52af1cee9a94 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -24,6 +24,7 @@ static int request_sync(struct i915_request *rq)
/* Opencode i915_request_add() so we can keep the timeline locked. */
__i915_request_commit(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_queue(rq, NULL);
timeout = i915_request_wait(rq, 0, HZ / 10);
@@ -154,10 +155,7 @@ static int live_context_size(void *arg)
*/
for_each_engine(engine, gt, id) {
- struct {
- struct drm_i915_gem_object *state;
- void *pinned;
- } saved;
+ struct file *saved;
if (!engine->context_size)
continue;
@@ -171,8 +169,7 @@ static int live_context_size(void *arg)
* active state is sufficient, we are only checking that we
* don't use more than we planned.
*/
- saved.state = fetch_and_zero(&engine->default_state);
- saved.pinned = fetch_and_zero(&engine->pinned_default_state);
+ saved = fetch_and_zero(&engine->default_state);
/* Overlaps with the execlists redzone */
engine->context_size += I915_GTT_PAGE_SIZE;
@@ -181,8 +178,7 @@ static int live_context_size(void *arg)
engine->context_size -= I915_GTT_PAGE_SIZE;
- engine->pinned_default_state = saved.pinned;
- engine->default_state = saved.state;
+ engine->default_state = saved;
intel_engine_pm_put(engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 09ff8e4f88af..242181a5214c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -7,6 +7,7 @@
#include "selftest_llc.h"
#include "selftest_rc6.h"
+#include "selftest_rps.h"
static int live_gt_resume(void *arg)
{
@@ -52,6 +53,13 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_rc6_manual),
+ SUBTEST(live_rps_clock_interval),
+ SUBTEST(live_rps_control),
+ SUBTEST(live_rps_frequency_cs),
+ SUBTEST(live_rps_frequency_srm),
+ SUBTEST(live_rps_power),
+ SUBTEST(live_rps_interrupt),
+ SUBTEST(live_rps_dynamic),
SUBTEST(live_gt_resume),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 6f06ba750a0a..824f99c4cc7c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -21,7 +21,8 @@
#include "gem/selftests/mock_context.h"
#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
-#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
@@ -68,26 +69,41 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine,
engine->props.heartbeat_interval_ms = saved;
}
+static bool is_active(struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return true;
+
+ if (i915_request_on_hold(rq))
+ return true;
+
+ if (i915_request_started(rq))
+ return true;
+
+ return false;
+}
+
static int wait_for_submit(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
timeout += jiffies;
do {
- cond_resched();
- intel_engine_flush_submission(engine);
+ bool done = time_after(jiffies, timeout);
- if (READ_ONCE(engine->execlists.pending[0]))
- continue;
-
- if (i915_request_is_active(rq))
+ if (i915_request_completed(rq)) /* that was quick! */
return 0;
- if (i915_request_started(rq)) /* that was quick! */
+ /* Wait until the HW has acknowleged the submission (or err) */
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
- } while (time_before(jiffies, timeout));
- return -ETIME;
+ if (done)
+ return -ETIME;
+
+ cond_resched();
+ } while (1);
}
static int wait_for_reset(struct intel_engine_cs *engine,
@@ -634,9 +650,9 @@ static int live_error_interrupt(void *arg)
error_repr(p->error[i]));
if (!i915_request_started(client[i])) {
- pr_debug("%s: %s request not stated!\n",
- engine->name,
- error_repr(p->error[i]));
+ pr_err("%s: %s request not started!\n",
+ engine->name,
+ error_repr(p->error[i]));
err = -ETIME;
goto out;
}
@@ -644,9 +660,10 @@ static int live_error_interrupt(void *arg)
/* Kick the tasklet to process the error */
intel_engine_flush_submission(engine);
if (client[i]->fence.error != p->error[i]) {
- pr_err("%s: %s request completed with wrong error code: %d\n",
+ pr_err("%s: %s request (%s) with wrong error code: %d\n",
engine->name,
error_repr(p->error[i]),
+ i915_request_completed(client[i]) ? "completed" : "running",
client[i]->fence.error);
err = -EINVAL;
goto out;
@@ -929,7 +946,7 @@ create_rewinder(struct intel_context *ce,
goto err;
}
- cs = intel_ring_begin(rq, 10);
+ cs = intel_ring_begin(rq, 14);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err;
@@ -941,8 +958,8 @@ create_rewinder(struct intel_context *ce,
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_NEQ_SDD;
- *cs++ = 0;
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = idx;
*cs++ = offset;
*cs++ = 0;
@@ -951,6 +968,11 @@ create_rewinder(struct intel_context *ce,
*cs++ = offset + idx * sizeof(u32);
*cs++ = 0;
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = idx + 1;
+
intel_ring_advance(rq, cs);
rq->sched.attr.priority = I915_PRIORITY_MASK;
@@ -984,7 +1006,7 @@ static int live_timeslice_rewind(void *arg)
for_each_engine(engine, gt, id) {
enum { A1, A2, B1 };
- enum { X = 1, Y, Z };
+ enum { X = 1, Z, Y };
struct i915_request *rq[3] = {};
struct intel_context *ce;
unsigned long heartbeat;
@@ -1017,13 +1039,13 @@ static int live_timeslice_rewind(void *arg)
goto err;
}
- rq[0] = create_rewinder(ce, NULL, slot, 1);
+ rq[0] = create_rewinder(ce, NULL, slot, X);
if (IS_ERR(rq[0])) {
intel_context_put(ce);
goto err;
}
- rq[1] = create_rewinder(ce, NULL, slot, 2);
+ rq[1] = create_rewinder(ce, NULL, slot, Y);
intel_context_put(ce);
if (IS_ERR(rq[1]))
goto err;
@@ -1041,7 +1063,7 @@ static int live_timeslice_rewind(void *arg)
goto err;
}
- rq[2] = create_rewinder(ce, rq[0], slot, 3);
+ rq[2] = create_rewinder(ce, rq[0], slot, Z);
intel_context_put(ce);
if (IS_ERR(rq[2]))
goto err;
@@ -1052,18 +1074,14 @@ static int live_timeslice_rewind(void *arg)
engine->name);
goto err;
}
- GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
- GEM_BUG_ON(!i915_request_is_active(rq[A1]));
- GEM_BUG_ON(!i915_request_is_active(rq[A2]));
- GEM_BUG_ON(!i915_request_is_active(rq[B1]));
-
- /* Wait for the timeslice to kick in */
- del_timer(&engine->execlists.timer);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- intel_engine_flush_submission(engine);
-
+ if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_flush_submission(engine);
+ }
/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
GEM_BUG_ON(!i915_request_is_active(rq[A1]));
GEM_BUG_ON(!i915_request_is_active(rq[B1]));
@@ -1228,8 +1246,14 @@ static int live_timeslice_queue(void *arg)
if (err)
goto err_rq;
- intel_engine_flush_submission(engine);
+ /* Wait until we ack the release_queue and start timeslicing */
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (READ_ONCE(engine->execlists.pending[0]));
+
if (!READ_ONCE(engine->execlists.timer.expires) &&
+ execlists_active(&engine->execlists) == rq &&
!i915_request_completed(rq)) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
@@ -2030,6 +2054,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
return 0;
+ if (!intel_has_reset_engine(arg->engine->gt))
+ return 0;
+
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
@@ -2630,7 +2657,7 @@ static int create_gang(struct intel_engine_cs *engine,
if (IS_ERR(rq))
goto err_obj;
- rq->batch = vma;
+ rq->batch = i915_vma_get(vma);
i915_request_get(rq);
i915_vma_lock(vma);
@@ -2654,6 +2681,7 @@ static int create_gang(struct intel_engine_cs *engine,
return 0;
err_rq:
+ i915_vma_put(rq->batch);
i915_request_put(rq);
err_obj:
i915_gem_object_put(obj);
@@ -2750,6 +2778,7 @@ static int live_preempt_gang(void *arg)
err = -ETIME;
}
+ i915_vma_put(rq->batch);
i915_request_put(rq);
rq = n;
}
@@ -2763,6 +2792,331 @@ static int live_preempt_gang(void *arg)
return 0;
}
+static struct i915_vma *
+create_gpr_user(struct intel_engine_cs *engine,
+ struct i915_vma *result,
+ unsigned int offset)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+ int i;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, result->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(vma);
+ return ERR_CAST(cs);
+ }
+
+ /* All GPR are clear for new contexts. We use GPR(0) as a constant */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, 0);
+ *cs++ = 1;
+
+ for (i = 1; i < NUM_GPR; i++) {
+ u64 addr;
+
+ /*
+ * Perform: GPR[i]++
+ *
+ * As we read and write into the context saved GPR[i], if
+ * we restart this batch buffer from an earlier point, we
+ * will repeat the increment and store a value > 1.
+ */
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
+
+ addr = result->node.start + offset + i * sizeof(*cs);
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = CS_GPR(engine, 2 * i);
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = i;
+ *cs++ = lower_32_bits(result->node.start);
+ *cs++ = upper_32_bits(result->node.start);
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ return vma;
+}
+
+static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_ggtt_pin(vma, 0, 0);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_request *
+create_gpr_client(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ unsigned int offset)
+{
+ struct i915_vma *batch, *vma;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ vma = i915_vma_instance(global->obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_ce;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_ce;
+
+ batch = create_gpr_user(engine, vma, offset);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_vma;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ i915_vma_lock(batch);
+ if (!err)
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(batch);
+ i915_vma_unpin(batch);
+
+ if (!err)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+out_batch:
+ i915_vma_put(batch);
+out_vma:
+ i915_vma_unpin(vma);
+out_ce:
+ intel_context_put(ce);
+ return err ? ERR_PTR(err) : rq;
+}
+
+static int preempt_user(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ int id)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_PRIORITY_MAX
+ };
+ struct i915_request *rq;
+ int err = 0;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(global);
+ *cs++ = 0;
+ *cs++ = id;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ engine->schedule(rq, &attr);
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int live_preempt_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *global;
+ enum intel_engine_id id;
+ u32 *result;
+ int err = 0;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ /*
+ * In our other tests, we look at preemption in carefully
+ * controlled conditions in the ringbuffer. Since most of the
+ * time is spent in user batches, most of our preemptions naturally
+ * occur there. We want to verify that when we preempt inside a batch
+ * we continue on from the current instruction and do not roll back
+ * to the start, or another earlier arbitration point.
+ *
+ * To verify this, we create a batch which is a mixture of
+ * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
+ * a few preempting contexts thrown into the mix, we look for any
+ * repeated instructions (which show up as incorrect values).
+ */
+
+ global = create_global(gt, 4096);
+ if (IS_ERR(global))
+ return PTR_ERR(global);
+
+ result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ i915_vma_unpin_and_release(&global, 0);
+ return PTR_ERR(result);
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *client[3] = {};
+ struct igt_live_test t;
+ int i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+ continue; /* we need per-context GPR */
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ memset(result, 0, 4096);
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_request *rq;
+
+ rq = create_gpr_client(engine, global,
+ NUM_GPR * i * sizeof(u32));
+ if (IS_ERR(rq))
+ goto end_test;
+
+ client[i] = rq;
+ }
+
+ /* Continuously preempt the set of 3 running contexts */
+ for (i = 1; i <= NUM_GPR; i++) {
+ err = preempt_user(engine, global, i);
+ if (err)
+ goto end_test;
+ }
+
+ if (READ_ONCE(result[0]) != NUM_GPR) {
+ pr_err("%s: Failed to release semaphore\n",
+ engine->name);
+ err = -EIO;
+ goto end_test;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ int gpr;
+
+ if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
+ err = -ETIME;
+ goto end_test;
+ }
+
+ for (gpr = 1; gpr < NUM_GPR; gpr++) {
+ if (result[NUM_GPR * i + gpr] != 1) {
+ pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
+ engine->name,
+ i, gpr, result[NUM_GPR * i + gpr]);
+ err = -EINVAL;
+ goto end_test;
+ }
+ }
+ }
+
+end_test:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i])
+ break;
+
+ i915_request_put(client[i]);
+ }
+
+ /* Flush the semaphores on error */
+ smp_store_mb(result[0], -1);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
static int live_preempt_timeout(void *arg)
{
struct intel_gt *gt = arg;
@@ -3970,6 +4324,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_timeout),
+ SUBTEST(live_preempt_user),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
@@ -3987,35 +4342,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
return intel_gt_live_subtests(tests, &i915->gt);
}
-static void hexdump(const void *buf, size_t len)
-{
- const size_t rowsize = 8 * sizeof(u32);
- const void *prev = NULL;
- bool skip = false;
- size_t pos;
-
- for (pos = 0; pos < len; pos += rowsize) {
- char line[128];
-
- if (prev && !memcmp(prev, buf + pos, rowsize)) {
- if (!skip) {
- pr_info("*\n");
- skip = true;
- }
- continue;
- }
-
- WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
- rowsize, sizeof(u32),
- line, sizeof(line),
- false) >= sizeof(line));
- pr_info("[%04zx] %s\n", pos, line);
-
- prev = buf + pos;
- skip = false;
- }
-}
-
static int emit_semaphore_signal(struct intel_context *ce, void *slot)
{
const u32 offset =
@@ -4097,13 +4423,12 @@ static int live_lrc_layout(void *arg)
if (!engine->default_state)
continue;
- hw = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
+ hw = shmem_pin_map(engine->default_state);
if (IS_ERR(hw)) {
err = PTR_ERR(hw);
break;
}
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
engine->kernel_context,
@@ -4164,13 +4489,13 @@ static int live_lrc_layout(void *arg)
if (err) {
pr_info("%s: HW register image:\n", engine->name);
- hexdump(hw, PAGE_SIZE);
+ igt_hexdump(hw, PAGE_SIZE);
pr_info("%s: SW register image:\n", engine->name);
- hexdump(lrc, PAGE_SIZE);
+ igt_hexdump(lrc, PAGE_SIZE);
}
- i915_gem_object_unpin_map(engine->default_state);
+ shmem_unpin_map(engine->default_state, hw);
if (err)
break;
}
@@ -4239,10 +4564,35 @@ static int live_lrc_fixed(void *arg)
"BB_STATE"
},
{
+ i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)),
+ lrc_ring_wa_bb_per_ctx(engine),
+ "RING_BB_PER_CTX_PTR"
+ },
+ {
+ i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)),
+ lrc_ring_indirect_ptr(engine),
+ "RING_INDIRECT_CTX_PTR"
+ },
+ {
+ i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)),
+ lrc_ring_indirect_offset(engine),
+ "RING_INDIRECT_CTX_OFFSET"
+ },
+ {
i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
CTX_TIMESTAMP - 1,
"RING_CTX_TIMESTAMP"
},
+ {
+ i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)),
+ lrc_ring_gpr0(engine),
+ "RING_CS_GPR0"
+ },
+ {
+ i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)),
+ lrc_ring_cmd_buf_cctl(engine),
+ "RING_CMD_BUF_CCTL"
+ },
{ },
}, *t;
u32 *hw;
@@ -4250,13 +4600,12 @@ static int live_lrc_fixed(void *arg)
if (!engine->default_state)
continue;
- hw = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
+ hw = shmem_pin_map(engine->default_state);
if (IS_ERR(hw)) {
err = PTR_ERR(hw);
break;
}
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
for (t = tbl; t->name; t++) {
int dw = find_offset(hw, t->reg);
@@ -4272,7 +4621,7 @@ static int live_lrc_fixed(void *arg)
}
}
- i915_gem_object_unpin_map(engine->default_state);
+ shmem_unpin_map(engine->default_state, hw);
}
return err;
@@ -4828,6 +5177,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
{
struct i915_vma *batch;
u32 dw, x, *cs, *hw;
+ u32 *defaults;
batch = create_user_vma(ce->vm, SZ_64K);
if (IS_ERR(batch))
@@ -4839,10 +5189,17 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
return ERR_CAST(cs);
}
+ defaults = shmem_pin_map(ce->engine->default_state);
+ if (!defaults) {
+ i915_gem_object_unpin_map(batch->obj);
+ i915_vma_put(batch);
+ return ERR_PTR(-ENOMEM);
+ }
+
x = 0;
dw = 0;
- hw = ce->engine->pinned_default_state;
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw = defaults;
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
u32 len = hw[dw] & 0x7f;
@@ -4872,6 +5229,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
*cs++ = MI_BATCH_BUFFER_END;
+ shmem_unpin_map(ce->engine->default_state, defaults);
+
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
@@ -4982,6 +5341,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
{
struct i915_vma *batch;
u32 dw, *cs, *hw;
+ u32 *defaults;
batch = create_user_vma(ce->vm, SZ_64K);
if (IS_ERR(batch))
@@ -4993,9 +5353,16 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
return ERR_CAST(cs);
}
+ defaults = shmem_pin_map(ce->engine->default_state);
+ if (!defaults) {
+ i915_gem_object_unpin_map(batch->obj);
+ i915_vma_put(batch);
+ return ERR_PTR(-ENOMEM);
+ }
+
dw = 0;
- hw = ce->engine->pinned_default_state;
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw = defaults;
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
u32 len = hw[dw] & 0x7f;
@@ -5022,6 +5389,8 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
*cs++ = MI_BATCH_BUFFER_END;
+ shmem_unpin_map(ce->engine->default_state, defaults);
+
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
@@ -5089,6 +5458,7 @@ static int compare_isolation(struct intel_engine_cs *engine,
{
u32 x, dw, *hw, *lrc;
u32 *A[2], *B[2];
+ u32 *defaults;
int err = 0;
A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
@@ -5119,12 +5489,18 @@ static int compare_isolation(struct intel_engine_cs *engine,
err = PTR_ERR(lrc);
goto err_B1;
}
- lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ lrc += LRC_STATE_OFFSET / sizeof(*hw);
+
+ defaults = shmem_pin_map(ce->engine->default_state);
+ if (!defaults) {
+ err = -ENOMEM;
+ goto err_lrc;
+ }
x = 0;
dw = 0;
- hw = engine->pinned_default_state;
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw = defaults;
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
u32 len = hw[dw] & 0x7f;
@@ -5155,7 +5531,6 @@ static int compare_isolation(struct intel_engine_cs *engine,
A[0][x], B[0][x], B[1][x],
poison, lrc[dw + 1]);
err = -EINVAL;
- break;
}
}
dw += 2;
@@ -5164,6 +5539,8 @@ static int compare_isolation(struct intel_engine_cs *engine,
} while (dw < PAGE_SIZE / sizeof(u32) &&
(hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+ shmem_unpin_map(ce->engine->default_state, defaults);
+err_lrc:
i915_gem_object_unpin_map(ce->state->obj);
err_B1:
i915_gem_object_unpin_map(result[1]->obj);
@@ -5294,6 +5671,7 @@ static int live_lrc_isolation(void *arg)
0xffffffff,
0xffff0000,
};
+ int err = 0;
/*
* Our goal is try and verify that per-context state cannot be
@@ -5304,7 +5682,6 @@ static int live_lrc_isolation(void *arg)
*/
for_each_engine(engine, gt, id) {
- int err = 0;
int i;
/* Just don't even ask */
@@ -5313,25 +5690,180 @@ static int live_lrc_isolation(void *arg)
continue;
intel_engine_pm_get(engine);
- if (engine->pinned_default_state) {
- for (i = 0; i < ARRAY_SIZE(poison); i++) {
- err = __lrc_isolation(engine, poison[i]);
- if (err)
- break;
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ int result;
- err = __lrc_isolation(engine, ~poison[i]);
- if (err)
- break;
- }
+ result = __lrc_isolation(engine, poison[i]);
+ if (result && !err)
+ err = result;
+
+ result = __lrc_isolation(engine, ~poison[i]);
+ if (result && !err)
+ err = result;
+ }
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
}
+ }
+
+ return err;
+}
+
+static int indirect_ctx_submit_req(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
+#define CTX_BB_CANARY_OFFSET (3 * 1024)
+#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
+
+static u32 *
+emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(RING_START(0));
+ *cs++ = i915_ggtt_offset(ce->state) +
+ context_wa_bb_offset(ce) +
+ CTX_BB_CANARY_OFFSET;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static void
+indirect_ctx_bb_setup(struct intel_context *ce)
+{
+ u32 *cs = context_indirect_bb(ce);
+
+ cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
+
+ setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
+}
+
+static bool check_ring_start(struct intel_context *ce)
+{
+ const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
+ LRC_STATE_OFFSET + context_wa_bb_offset(ce);
+
+ if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
+ return true;
+
+ pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n",
+ ctx_bb[CTX_BB_CANARY_INDEX],
+ ce->lrc_reg_state[CTX_RING_START]);
+
+ return false;
+}
+
+static int indirect_ctx_bb_check(struct intel_context *ce)
+{
+ int err;
+
+ err = indirect_ctx_submit_req(ce);
+ if (err)
+ return err;
+
+ if (!check_ring_start(ce))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
+{
+ struct intel_context *a, *b;
+ int err;
+
+ a = intel_context_create(engine);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+ err = intel_context_pin(a);
+ if (err)
+ goto put_a;
+
+ b = intel_context_create(engine);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto unpin_a;
+ }
+ err = intel_context_pin(b);
+ if (err)
+ goto put_b;
+
+ /* We use the already reserved extra page in context state */
+ if (!a->wa_bb_page) {
+ GEM_BUG_ON(b->wa_bb_page);
+ GEM_BUG_ON(INTEL_GEN(engine->i915) == 12);
+ goto unpin_b;
+ }
+
+ /*
+ * In order to test that our per context bb is truly per context,
+ * and executes at the intended spot on context restoring process,
+ * make the batch store the ring start value to memory.
+ * As ring start is restored apriori of starting the indirect ctx bb and
+ * as it will be different for each context, it fits to this purpose.
+ */
+ indirect_ctx_bb_setup(a);
+ indirect_ctx_bb_setup(b);
+
+ err = indirect_ctx_bb_check(a);
+ if (err)
+ goto unpin_b;
+
+ err = indirect_ctx_bb_check(b);
+
+unpin_b:
+ intel_context_unpin(b);
+put_b:
+ intel_context_put(b);
+unpin_a:
+ intel_context_unpin(a);
+put_a:
+ intel_context_put(a);
+
+ return err;
+}
+
+static int live_lrc_indirect_ctx_bb(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_lrc_indirect_ctx_bb(engine);
intel_engine_pm_put(engine);
+
if (igt_flush_test(gt->i915))
err = -EIO;
+
if (err)
- return err;
+ break;
}
- return 0;
+ return err;
}
static void garbage_reset(struct intel_engine_cs *engine,
@@ -5365,7 +5897,7 @@ static struct i915_request *garbage(struct intel_context *ce,
prandom_bytes_state(prng,
ce->lrc_reg_state,
ce->engine->context_size -
- LRC_STATE_PN * PAGE_SIZE);
+ LRC_STATE_OFFSET);
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
@@ -5569,6 +6101,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_timestamp),
SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
+ SUBTEST(live_lrc_indirect_ctx_bb),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 95b165faeba7..2dc460624bbc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -11,6 +11,7 @@
#include "selftest_rc6.h"
#include "selftests/i915_random.h"
+#include "selftests/librapl.h"
static u64 rc6_residency(struct intel_rc6 *rc6)
{
@@ -31,7 +32,9 @@ int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
struct intel_rc6 *rc6 = &gt->rc6;
+ u64 rc0_power, rc6_power;
intel_wakeref_t wakeref;
+ ktime_t dt;
u64 res[2];
int err = 0;
@@ -54,7 +57,12 @@ int live_rc6_manual(void *arg)
msleep(1); /* wakeup is not immediate, takes about 100us on icl */
res[0] = rc6_residency(rc6);
+
+ dt = ktime_get();
+ rc0_power = librapl_energy_uJ();
msleep(250);
+ rc0_power = librapl_energy_uJ() - rc0_power;
+ dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
@@ -63,13 +71,24 @@ int live_rc6_manual(void *arg)
goto out_unlock;
}
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt));
+ if (!rc0_power) {
+ pr_err("No power measured while in RC0\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
/* Manually enter RC6 */
intel_rc6_park(rc6);
res[0] = rc6_residency(rc6);
+ intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
+ dt = ktime_get();
+ rc6_power = librapl_energy_uJ();
msleep(100);
+ rc6_power = librapl_energy_uJ() - rc6_power;
+ dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
-
if (res[1] == res[0]) {
pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
@@ -78,6 +97,15 @@ int live_rc6_manual(void *arg)
err = -EINVAL;
}
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, ktime_to_ns(dt));
+ pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ rc0_power, rc6_power);
+ if (2 * rc6_power > rc0_power) {
+ pr_err("GPU leaked energy while in RC6!\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
/* Restore what should have been the original state! */
intel_rc6_unpark(rc6);
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 9995faadd7e8..3350e7c995bc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -54,6 +54,8 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
*cs++ = STACK_MAGIC;
*cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
vma->private = intel_context_create(engine); /* dummy residuals */
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
new file mode 100644
index 000000000000..6275d69aa9cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -0,0 +1,1331 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/sort.h>
+
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "selftest_rps.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/librapl.h"
+
+/* Try to isolate the impact of cstates from determing frequency response */
+#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
+
+static unsigned long engine_heartbeat_disable(struct intel_engine_cs *engine)
+{
+ unsigned long old;
+
+ old = fetch_and_zero(&engine->props.heartbeat_interval_ms);
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+
+ return old;
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
+static void dummy_rps_work(struct work_struct *wrk)
+{
+}
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static struct i915_vma *
+create_spin_counter(struct intel_engine_cs *engine,
+ struct i915_address_space *vm,
+ bool srm,
+ u32 **cancel,
+ u32 **counter)
+{
+ enum {
+ COUNT,
+ INC,
+ __NGPR__,
+ };
+#define CS_GPR(x) GEN8_RING_CS_GPR(engine->mmio_base, x)
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned long end;
+ u32 *base, *cs;
+ int loop, i;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, 64 << 10);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ end = obj->base.size / sizeof(u32) - 1;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ base = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(base)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(base);
+ }
+ cs = base;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(__NGPR__ * 2);
+ for (i = 0; i < __NGPR__; i++) {
+ *cs++ = i915_mmio_reg_offset(CS_GPR(i));
+ *cs++ = 0;
+ *cs++ = i915_mmio_reg_offset(CS_GPR(i)) + 4;
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(INC));
+ *cs++ = 1;
+
+ loop = cs - base;
+
+ /* Unroll the loop to avoid MI_BB_START stalls impacting measurements */
+ for (i = 0; i < 1024; i++) {
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(COUNT));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(INC));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(COUNT), MI_MATH_REG_ACCU);
+
+ if (srm) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT));
+ *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs));
+ *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs));
+ }
+ }
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8;
+ *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs));
+ *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs));
+ GEM_BUG_ON(cs - base > end);
+
+ i915_gem_object_flush_map(obj);
+
+ *cancel = base + loop;
+ *counter = srm ? memset32(base + end, 0, 1) : NULL;
+ return vma;
+}
+
+static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms)
+{
+ u8 history[64], i;
+ unsigned long end;
+ int sleep;
+
+ i = 0;
+ memset(history, freq, sizeof(history));
+ sleep = 20;
+
+ /* The PCU does not change instantly, but drifts towards the goal? */
+ end = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ u8 act;
+
+ act = read_cagf(rps);
+ if (time_after(jiffies, end))
+ return act;
+
+ /* Target acquired */
+ if (act == freq)
+ return act;
+
+ /* Any change within the last N samples? */
+ if (!memchr_inv(history, act, sizeof(history)))
+ return act;
+
+ history[i] = act;
+ i = (i + 1) % ARRAY_SIZE(history);
+
+ usleep_range(sleep, 2 * sleep);
+ sleep *= 2;
+ if (sleep > timeout_ms * 20)
+ sleep = timeout_ms * 20;
+ } while (1);
+}
+
+static u8 rps_set_check(struct intel_rps *rps, u8 freq)
+{
+ mutex_lock(&rps->lock);
+ GEM_BUG_ON(!intel_rps_is_active(rps));
+ intel_rps_set(rps, freq);
+ GEM_BUG_ON(rps->last_freq != freq);
+ mutex_unlock(&rps->lock);
+
+ return wait_for_freq(rps, freq, 50);
+}
+
+static void show_pstate_limits(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (IS_BROXTON(i915)) {
+ pr_info("P_STATE_CAP[%x]: 0x%08x\n",
+ i915_mmio_reg_offset(BXT_RP_STATE_CAP),
+ intel_uncore_read(rps_to_uncore(rps),
+ BXT_RP_STATE_CAP));
+ } else if (IS_GEN(i915, 9)) {
+ pr_info("P_STATE_LIMITS[%x]: 0x%08x\n",
+ i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS),
+ intel_uncore_read(rps_to_uncore(rps),
+ GEN9_RP_STATE_LIMITS));
+ }
+}
+
+int live_rps_clock_interval(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ intel_gt_pm_get(gt);
+ intel_rps_disable(&gt->rps);
+
+ intel_gt_check_clock_frequency(gt);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ u32 cycles;
+ u64 dt;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ intel_uncore_write_fw(gt->uncore, GEN6_RP_CUR_UP_EI, 0);
+
+ /* Set the evaluation interval to infinity! */
+ intel_uncore_write_fw(gt->uncore,
+ GEN6_RP_UP_EI, 0xffffffff);
+ intel_uncore_write_fw(gt->uncore,
+ GEN6_RP_UP_THRESHOLD, 0xffffffff);
+
+ intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL,
+ GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG);
+
+ if (wait_for(intel_uncore_read_fw(gt->uncore,
+ GEN6_RP_CUR_UP_EI),
+ 10)) {
+ /* Just skip the test; assume lack of HW support */
+ pr_notice("%s: rps evaluation interval not ticking\n",
+ engine->name);
+ err = -ENODEV;
+ } else {
+ ktime_t dt_[5];
+ u32 cycles_[5];
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ preempt_disable();
+
+ dt_[i] = ktime_get();
+ cycles_[i] = -intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+
+ udelay(1000);
+
+ dt_[i] = ktime_sub(ktime_get(), dt_[i]);
+ cycles_[i] += intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+
+ preempt_enable();
+ }
+
+ /* Use the median of both cycle/dt; close enough */
+ sort(cycles_, 5, sizeof(*cycles_), cmp_u32, NULL);
+ cycles = (cycles_[1] + 2 * cycles_[2] + cycles_[3]) / 4;
+ sort(dt_, 5, sizeof(*dt_), cmp_u64, NULL);
+ dt = div_u64(dt_[1] + 2 * dt_[2] + dt_[3], 4);
+ }
+
+ intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL, 0);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+
+ if (err == 0) {
+ u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
+ u32 expected =
+ intel_gt_ns_to_pm_interval(gt, dt);
+
+ pr_info("%s: rps counted %d C0 cycles [%lldns] in %lldns [%d cycles], using GT clock frequency of %uKHz\n",
+ engine->name, cycles, time, dt, expected,
+ gt->clock_frequency / 1000);
+
+ if (10 * time < 8 * dt ||
+ 8 * time > 10 * dt) {
+ pr_err("%s: rps clock time does not match walltime!\n",
+ engine->name);
+ err = -EINVAL;
+ }
+
+ if (10 * expected < 8 * cycles ||
+ 8 * expected > 10 * cycles) {
+ pr_err("%s: walltime does not match rps clock ticks!\n",
+ engine->name);
+ err = -EINVAL;
+ }
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ break; /* once is enough */
+ }
+
+ intel_rps_enable(&gt->rps);
+ intel_gt_pm_put(gt);
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ if (err == -ENODEV) /* skipped, don't report a fail */
+ err = 0;
+
+ return err;
+}
+
+int live_rps_control(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * Check that the actual frequency matches our requested frequency,
+ * to verify our control mechanism. We have to be careful that the
+ * PCU may throttle the GPU in which case the actual frequency used
+ * will be lowered than requested.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (IS_CHERRYVIEW(gt->i915)) /* XXX fragile PCU */
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ intel_gt_pm_get(gt);
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ ktime_t min_dt, max_dt;
+ int f, limit;
+ int min, max;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ if (rps_set_check(rps, rps->min_freq) != rps->min_freq) {
+ pr_err("%s: could not set minimum frequency [%x], only %x!\n",
+ engine->name, rps->min_freq, read_cagf(rps));
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ show_pstate_limits(rps);
+ err = -EINVAL;
+ break;
+ }
+
+ for (f = rps->min_freq + 1; f < rps->max_freq; f++) {
+ if (rps_set_check(rps, f) < f)
+ break;
+ }
+
+ limit = rps_set_check(rps, f);
+
+ if (rps_set_check(rps, rps->min_freq) != rps->min_freq) {
+ pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
+ engine->name, rps->min_freq, read_cagf(rps));
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ show_pstate_limits(rps);
+ err = -EINVAL;
+ break;
+ }
+
+ max_dt = ktime_get();
+ max = rps_set_check(rps, limit);
+ max_dt = ktime_sub(ktime_get(), max_dt);
+
+ min_dt = ktime_get();
+ min = rps_set_check(rps, rps->min_freq);
+ min_dt = ktime_sub(ktime_get(), min_dt);
+
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+
+ pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
+ engine->name,
+ rps->min_freq, intel_gpu_freq(rps, rps->min_freq),
+ rps->max_freq, intel_gpu_freq(rps, rps->max_freq),
+ limit, intel_gpu_freq(rps, limit),
+ min, max, ktime_to_ns(min_dt), ktime_to_ns(max_dt));
+
+ if (limit == rps->min_freq) {
+ pr_err("%s: GPU throttled to minimum!\n",
+ engine->name);
+ show_pstate_limits(rps);
+ err = -ENODEV;
+ break;
+ }
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+ intel_gt_pm_put(gt);
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
+
+static void show_pcu_config(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ unsigned int max_gpu_freq, min_gpu_freq;
+ intel_wakeref_t wakeref;
+ int gpu_freq;
+
+ if (!HAS_LLC(i915))
+ return;
+
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
+ if (INTEL_GEN(i915) >= 9) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq /= GEN9_FREQ_SCALER;
+ max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ wakeref = intel_runtime_pm_get(rps_to_uncore(rps)->rpm);
+
+ pr_info("%5s %5s %5s\n", "GPU", "eCPU", "eRing");
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+ int ia_freq = gpu_freq;
+
+ sandybridge_pcode_read(i915,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq, NULL);
+
+ pr_info("%5d %5d %5d\n",
+ gpu_freq * 50,
+ ((ia_freq >> 0) & 0xff) * 100,
+ ((ia_freq >> 8) & 0xff) * 100);
+ }
+
+ intel_runtime_pm_put(rps_to_uncore(rps)->rpm, wakeref);
+}
+
+static u64 __measure_frequency(u32 *cntr, int duration_ms)
+{
+ u64 dc, dt;
+
+ dt = ktime_get();
+ dc = READ_ONCE(*cntr);
+ usleep_range(1000 * duration_ms, 2000 * duration_ms);
+ dc = READ_ONCE(*cntr) - dc;
+ dt = ktime_get() - dt;
+
+ return div64_u64(1000 * 1000 * dc, dt);
+}
+
+static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq)
+{
+ u64 x[5];
+ int i;
+
+ *freq = rps_set_check(rps, *freq);
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_frequency(cntr, 2);
+ *freq = (*freq + read_cagf(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+static u64 __measure_cs_frequency(struct intel_engine_cs *engine,
+ int duration_ms)
+{
+ u64 dc, dt;
+
+ dt = ktime_get();
+ dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0));
+ usleep_range(1000 * duration_ms, 2000 * duration_ms);
+ dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc;
+ dt = ktime_get() - dt;
+
+ return div64_u64(1000 * 1000 * dc, dt);
+}
+
+static u64 measure_cs_frequency_at(struct intel_rps *rps,
+ struct intel_engine_cs *engine,
+ int *freq)
+{
+ u64 x[5];
+ int i;
+
+ *freq = rps_set_check(rps, *freq);
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_cs_frequency(engine, 2);
+ *freq = (*freq + read_cagf(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+static bool scaled_within(u64 x, u64 y, u32 f_n, u32 f_d)
+{
+ return f_d * x > f_n * y && f_n * x < f_d * y;
+}
+
+int live_rps_frequency_cs(void *arg)
+{
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * The premise is that the GPU does change freqency at our behest.
+ * Let's check there is a correspondence between the requested
+ * frequency, the actual frequency, and the observed clock rate.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+ return 0;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_add_request(&qos, CPU_LATENCY);
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cancel, *cntr;
+ struct {
+ u64 count;
+ int freq;
+ } min, max;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ vma = create_spin_counter(engine,
+ engine->kernel_context->vm, false,
+ &cancel, &cntr);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ break;
+ }
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_vma;
+
+ if (wait_for(intel_uncore_read(engine->uncore, CS_GPR(0)),
+ 10)) {
+ pr_err("%s: timed loop did not start\n",
+ engine->name);
+ goto err_vma;
+ }
+
+ min.freq = rps->min_freq;
+ min.count = measure_cs_frequency_at(rps, engine, &min.freq);
+
+ max.freq = rps->max_freq;
+ max.count = measure_cs_frequency_at(rps, engine, &max.freq);
+
+ pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n",
+ engine->name,
+ min.count, intel_gpu_freq(rps, min.freq),
+ max.count, intel_gpu_freq(rps, max.freq),
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count,
+ max.freq * min.count));
+
+ if (!scaled_within(max.freq * min.count,
+ min.freq * max.count,
+ 2, 3)) {
+ int f;
+
+ pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n",
+ engine->name,
+ max.freq * min.count,
+ min.freq * max.count);
+ show_pcu_config(rps);
+
+ for (f = min.freq + 1; f <= rps->max_freq; f++) {
+ int act = f;
+ u64 count;
+
+ count = measure_cs_frequency_at(rps, engine, &act);
+ if (act < f)
+ break;
+
+ pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n",
+ engine->name,
+ act, intel_gpu_freq(rps, act), count,
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count,
+ act * min.count));
+
+ f = act; /* may skip ahead [pcu granularity] */
+ }
+
+ err = -EINVAL;
+ }
+
+err_vma:
+ *cancel = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_remove_request(&qos);
+
+ return err;
+}
+
+int live_rps_frequency_srm(void *arg)
+{
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * The premise is that the GPU does change freqency at our behest.
+ * Let's check there is a correspondence between the requested
+ * frequency, the actual frequency, and the observed clock rate.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+ return 0;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_add_request(&qos, CPU_LATENCY);
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cancel, *cntr;
+ struct {
+ u64 count;
+ int freq;
+ } min, max;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ vma = create_spin_counter(engine,
+ engine->kernel_context->vm, true,
+ &cancel, &cntr);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ break;
+ }
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_vma;
+
+ if (wait_for(READ_ONCE(*cntr), 10)) {
+ pr_err("%s: timed loop did not start\n",
+ engine->name);
+ goto err_vma;
+ }
+
+ min.freq = rps->min_freq;
+ min.count = measure_frequency_at(rps, cntr, &min.freq);
+
+ max.freq = rps->max_freq;
+ max.count = measure_frequency_at(rps, cntr, &max.freq);
+
+ pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n",
+ engine->name,
+ min.count, intel_gpu_freq(rps, min.freq),
+ max.count, intel_gpu_freq(rps, max.freq),
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count,
+ max.freq * min.count));
+
+ if (!scaled_within(max.freq * min.count,
+ min.freq * max.count,
+ 1, 2)) {
+ int f;
+
+ pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n",
+ engine->name,
+ max.freq * min.count,
+ min.freq * max.count);
+ show_pcu_config(rps);
+
+ for (f = min.freq + 1; f <= rps->max_freq; f++) {
+ int act = f;
+ u64 count;
+
+ count = measure_frequency_at(rps, cntr, &act);
+ if (act < f)
+ break;
+
+ pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n",
+ engine->name,
+ act, intel_gpu_freq(rps, act), count,
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count,
+ act * min.count));
+
+ f = act; /* may skip ahead [pcu granularity] */
+ }
+
+ err = -EINVAL;
+ }
+
+err_vma:
+ *cancel = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_remove_request(&qos);
+
+ return err;
+}
+
+static void sleep_for_ei(struct intel_rps *rps, int timeout_us)
+{
+ /* Flush any previous EI */
+ usleep_range(timeout_us, 2 * timeout_us);
+
+ /* Reset the interrupt status */
+ rps_disable_interrupts(rps);
+ GEM_BUG_ON(rps->pm_iir);
+ rps_enable_interrupts(rps);
+
+ /* And then wait for the timeout, for real this time */
+ usleep_range(2 * timeout_us, 3 * timeout_us);
+}
+
+static int __rps_up_interrupt(struct intel_rps *rps,
+ struct intel_engine_cs *engine,
+ struct igt_spinner *spin)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ struct i915_request *rq;
+ u32 timeout;
+
+ if (!intel_engine_can_store_dword(engine))
+ return 0;
+
+ rps_set_check(rps, rps->min_freq);
+
+ rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ i915_request_put(rq);
+ intel_gt_set_wedged(engine->gt);
+ return -EIO;
+ }
+
+ if (!intel_rps_is_active(rps)) {
+ pr_err("%s: RPS not enabled on starting spinner\n",
+ engine->name);
+ igt_spinner_end(spin);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) {
+ pr_err("%s: RPS did not register UP interrupt\n",
+ engine->name);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ if (rps->last_freq != rps->min_freq) {
+ pr_err("%s: RPS did not program min frequency\n",
+ engine->name);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ timeout = intel_uncore_read(uncore, GEN6_RP_UP_EI);
+ timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout);
+ timeout = DIV_ROUND_UP(timeout, 1000);
+
+ sleep_for_ei(rps, timeout);
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ igt_spinner_end(spin);
+ i915_request_put(rq);
+
+ if (rps->cur_freq != rps->min_freq) {
+ pr_err("%s: Frequency unexpectedly changed [up], now %d!\n",
+ engine->name, intel_rps_read_actual_frequency(rps));
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) {
+ pr_err("%s: UP interrupt not recorded for spinner, pm_iir:%x, prev_up:%x, up_threshold:%x, up_ei:%x\n",
+ engine->name, rps->pm_iir,
+ intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+ intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_UP_EI));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __rps_down_interrupt(struct intel_rps *rps,
+ struct intel_engine_cs *engine)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ u32 timeout;
+
+ rps_set_check(rps, rps->max_freq);
+
+ if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) {
+ pr_err("%s: RPS did not register DOWN interrupt\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ if (rps->last_freq != rps->max_freq) {
+ pr_err("%s: RPS did not program max frequency\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
+ timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout);
+ timeout = DIV_ROUND_UP(timeout, 1000);
+
+ sleep_for_ei(rps, timeout);
+
+ if (rps->cur_freq != rps->max_freq) {
+ pr_err("%s: Frequency unexpectedly changed [down], now %d!\n",
+ engine->name,
+ intel_rps_read_actual_frequency(rps));
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) {
+ pr_err("%s: DOWN interrupt not recorded for idle, pm_iir:%x, prev_down:%x, down_threshold:%x, down_ei:%x [prev_up:%x, up_threshold:%x, up_ei:%x]\n",
+ engine->name, rps->pm_iir,
+ intel_uncore_read(uncore, GEN6_RP_PREV_DOWN),
+ intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_DOWN_EI),
+ intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+ intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_UP_EI));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int live_rps_interrupt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ u32 pm_events;
+ int err = 0;
+
+ /*
+ * First, let's check whether or not we are receiving interrupts.
+ */
+
+ if (!intel_rps_has_interrupts(rps))
+ return 0;
+
+ intel_gt_pm_get(gt);
+ pm_events = rps->pm_events;
+ intel_gt_pm_put(gt);
+ if (!pm_events) {
+ pr_err("No RPS PM events registered, but RPS is enabled?\n");
+ return -ENODEV;
+ }
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ /* Keep the engine busy with a spinner; expect an UP! */
+ if (pm_events & GEN6_PM_RP_UP_THRESHOLD) {
+ unsigned long saved_heartbeat;
+
+ intel_gt_pm_wait_for_idle(engine->gt);
+ GEM_BUG_ON(intel_rps_is_active(rps));
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ err = __rps_up_interrupt(rps, engine, &spin);
+
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ if (err)
+ goto out;
+
+ intel_gt_pm_wait_for_idle(engine->gt);
+ }
+
+ /* Keep the engine awake but idle and check for DOWN */
+ if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
+ unsigned long saved_heartbeat;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+ intel_rc6_disable(&gt->rc6);
+
+ err = __rps_down_interrupt(rps, engine);
+
+ intel_rc6_enable(&gt->rc6);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
+
+static u64 __measure_power(int duration_ms)
+{
+ u64 dE, dt;
+
+ dt = ktime_get();
+ dE = librapl_energy_uJ();
+ usleep_range(1000 * duration_ms, 2000 * duration_ms);
+ dE = librapl_energy_uJ() - dE;
+ dt = ktime_get() - dt;
+
+ return div64_u64(1000 * 1000 * dE, dt);
+}
+
+static u64 measure_power_at(struct intel_rps *rps, int *freq)
+{
+ u64 x[5];
+ int i;
+
+ *freq = rps_set_check(rps, *freq);
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_power(5);
+ *freq = (*freq + read_cagf(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+int live_rps_power(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * Our fundamental assumption is that running at lower frequency
+ * actually saves power. Let's see if our RAPL measurement support
+ * that theory.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (!librapl_energy_uJ())
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ struct {
+ u64 power;
+ int freq;
+ } min, max;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ max.freq = rps->max_freq;
+ max.power = measure_power_at(rps, &max.freq);
+
+ min.freq = rps->min_freq;
+ min.power = measure_power_at(rps, &min.freq);
+
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+
+ pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
+ engine->name,
+ min.power, intel_gpu_freq(rps, min.freq),
+ max.power, intel_gpu_freq(rps, max.freq));
+
+ if (10 * min.freq >= 9 * max.freq) {
+ pr_notice("Could not control frequency, ran at [%d:%uMHz, %d:%uMhz]\n",
+ min.freq, intel_gpu_freq(rps, min.freq),
+ max.freq, intel_gpu_freq(rps, max.freq));
+ continue;
+ }
+
+ if (11 * min.power > 10 * max.power) {
+ pr_err("%s: did not conserve power when setting lower frequency!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
+
+int live_rps_dynamic(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * We've looked at the bascs, and have established that we
+ * can change the clock frequency and that the HW will generate
+ * interrupts based on load. Now we check how we integrate those
+ * moving parts into dynamic reclocking based on load.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ struct {
+ ktime_t dt;
+ u8 freq;
+ } min, max;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ intel_gt_pm_wait_for_idle(gt);
+ GEM_BUG_ON(intel_rps_is_active(rps));
+ rps->cur_freq = rps->min_freq;
+
+ intel_engine_pm_get(engine);
+ intel_rc6_disable(&gt->rc6);
+ GEM_BUG_ON(rps->last_freq != rps->min_freq);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ i915_request_add(rq);
+
+ max.dt = ktime_get();
+ max.freq = wait_for_freq(rps, rps->max_freq, 500);
+ max.dt = ktime_sub(ktime_get(), max.dt);
+
+ igt_spinner_end(&spin);
+
+ min.dt = ktime_get();
+ min.freq = wait_for_freq(rps, rps->min_freq, 2000);
+ min.dt = ktime_sub(ktime_get(), min.dt);
+
+ pr_info("%s: dynamically reclocked to %u:%uMHz while busy in %lluns, and %u:%uMHz while idle in %lluns\n",
+ engine->name,
+ max.freq, intel_gpu_freq(rps, max.freq),
+ ktime_to_ns(max.dt),
+ min.freq, intel_gpu_freq(rps, min.freq),
+ ktime_to_ns(min.dt));
+ if (min.freq >= max.freq) {
+ pr_err("%s: dynamic reclocking of spinner failed\n!",
+ engine->name);
+ err = -EINVAL;
+ }
+
+err:
+ intel_rc6_enable(&gt->rc6);
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h
new file mode 100644
index 000000000000..6e82a631cfa1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_RPS_H
+#define SELFTEST_RPS_H
+
+int live_rps_control(void *arg);
+int live_rps_clock_interval(void *arg);
+int live_rps_frequency_cs(void *arg);
+int live_rps_frequency_srm(void *arg);
+int live_rps_power(void *arg);
+int live_rps_interrupt(void *arg);
+int live_rps_dynamic(void *arg);
+
+#endif /* SELFTEST_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
new file mode 100644
index 000000000000..43c7acbdc79d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+
+#include "gem/i915_gem_object.h"
+#include "shmem_utils.h"
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len)
+{
+ struct file *file;
+ int err;
+
+ file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
+ if (IS_ERR(file))
+ return file;
+
+ err = shmem_write(file, 0, data, len);
+ if (err) {
+ fput(file);
+ return ERR_PTR(err);
+ }
+
+ return file;
+}
+
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
+{
+ struct file *file;
+ void *ptr;
+
+ if (obj->ops == &i915_gem_shmem_ops) {
+ file = obj->base.filp;
+ atomic_long_inc(&file->f_count);
+ return file;
+ }
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
+
+ file = shmem_create_from_data("", ptr, obj->base.size);
+ i915_gem_object_unpin_map(obj);
+
+ return file;
+}
+
+static size_t shmem_npte(struct file *file)
+{
+ return file->f_mapping->host->i_size >> PAGE_SHIFT;
+}
+
+static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
+{
+ unsigned long pfn;
+
+ vunmap(ptr);
+
+ for (pfn = 0; pfn < n_pte; pfn++) {
+ struct page *page;
+
+ page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+ GFP_KERNEL);
+ if (!WARN_ON(IS_ERR(page))) {
+ put_page(page);
+ put_page(page);
+ }
+ }
+}
+
+void *shmem_pin_map(struct file *file)
+{
+ const size_t n_pte = shmem_npte(file);
+ pte_t *stack[32], **ptes, **mem;
+ struct vm_struct *area;
+ unsigned long pfn;
+
+ mem = stack;
+ if (n_pte > ARRAY_SIZE(stack)) {
+ mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return NULL;
+ }
+
+ area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
+ if (!area) {
+ if (mem != stack)
+ kvfree(mem);
+ return NULL;
+ }
+
+ ptes = mem;
+ for (pfn = 0; pfn < n_pte; pfn++) {
+ struct page *page;
+
+ page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+ GFP_KERNEL);
+ if (IS_ERR(page))
+ goto err_page;
+
+ **ptes++ = mk_pte(page, PAGE_KERNEL);
+ }
+
+ if (mem != stack)
+ kvfree(mem);
+
+ mapping_set_unevictable(file->f_mapping);
+ return area->addr;
+
+err_page:
+ if (mem != stack)
+ kvfree(mem);
+
+ __shmem_unpin_map(file, area->addr, pfn);
+ return NULL;
+}
+
+void shmem_unpin_map(struct file *file, void *ptr)
+{
+ mapping_clear_unevictable(file->f_mapping);
+ __shmem_unpin_map(file, ptr, shmem_npte(file));
+}
+
+static int __shmem_rw(struct file *file, loff_t off,
+ void *ptr, size_t len,
+ bool write)
+{
+ unsigned long pfn;
+
+ for (pfn = off >> PAGE_SHIFT; len; pfn++) {
+ unsigned int this =
+ min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
+ struct page *page;
+ void *vaddr;
+
+ page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+ GFP_KERNEL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap(page);
+ if (write)
+ memcpy(vaddr + offset_in_page(off), ptr, this);
+ else
+ memcpy(ptr, vaddr + offset_in_page(off), this);
+ kunmap(page);
+ put_page(page);
+
+ len -= this;
+ ptr += this;
+ off = 0;
+ }
+
+ return 0;
+}
+
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
+{
+ return __shmem_rw(file, off, dst, len, false);
+}
+
+int shmem_write(struct file *file, loff_t off, void *src, size_t len)
+{
+ return __shmem_rw(file, off, src, len, true);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "st_shmem_utils.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.h b/drivers/gpu/drm/i915/gt/shmem_utils.h
new file mode 100644
index 000000000000..c1669170c351
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SHMEM_UTILS_H
+#define SHMEM_UTILS_H
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct file;
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len);
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj);
+
+void *shmem_pin_map(struct file *file);
+void shmem_unpin_map(struct file *file, void *ptr);
+
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len);
+int shmem_write(struct file *file, loff_t off, void *src, size_t len);
+
+#endif /* SHMEM_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gt/st_shmem_utils.c b/drivers/gpu/drm/i915/gt/st_shmem_utils.c
new file mode 100644
index 000000000000..b279fe88b70e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/st_shmem_utils.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/* Just a quick and causal check of the shmem_utils API */
+
+static int igt_shmem_basic(void *ignored)
+{
+ u32 datum = 0xdeadbeef, result;
+ struct file *file;
+ u32 *map;
+ int err;
+
+ file = shmem_create_from_data("mock", &datum, sizeof(datum));
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ result = 0;
+ err = shmem_read(file, 0, &result, sizeof(result));
+ if (err)
+ goto out_file;
+
+ if (result != datum) {
+ pr_err("Incorrect read back from shmemfs: %x != %x\n",
+ result, datum);
+ err = -EINVAL;
+ goto out_file;
+ }
+
+ result = 0xc0ffee;
+ err = shmem_write(file, 0, &result, sizeof(result));
+ if (err)
+ goto out_file;
+
+ map = shmem_pin_map(file);
+ if (!map) {
+ err = -ENOMEM;
+ goto out_file;
+ }
+
+ if (*map != result) {
+ pr_err("Incorrect read back via mmap of last write: %x != %x\n",
+ *map, result);
+ err = -EINVAL;
+ goto out_map;
+ }
+
+out_map:
+ shmem_unpin_map(file, map);
+out_file:
+ fput(file);
+ return err;
+}
+
+int shmem_utils_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_shmem_basic),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index 8f9b2f33dbaf..535cc1169e54 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -192,6 +192,17 @@ static struct kobj_attribute max_spin_attr =
__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
static ssize_t
+max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_def =
+__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
+
+static ssize_t
timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
@@ -234,6 +245,17 @@ static struct kobj_attribute timeslice_duration_attr =
__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
static ssize_t
+timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_def =
+__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
+
+static ssize_t
stop_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
@@ -273,6 +295,17 @@ static struct kobj_attribute stop_timeout_attr =
__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
static ssize_t
+stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_def =
+__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
+
+static ssize_t
preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
@@ -317,6 +350,18 @@ static struct kobj_attribute preempt_timeout_attr =
__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
static ssize_t
+preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_def =
+__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
+
+static ssize_t
heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
@@ -359,6 +404,17 @@ heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
static struct kobj_attribute heartbeat_interval_attr =
__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
+static ssize_t
+heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_def =
+__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
+
static void kobj_engine_release(struct kobject *kobj)
{
kfree(kobj);
@@ -390,6 +446,42 @@ kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
return &ke->base;
}
+static void add_defaults(struct kobj_engine *parent)
+{
+ static const struct attribute *files[] = {
+ &max_spin_def.attr,
+ &stop_timeout_def.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+ &heartbeat_interval_def.attr,
+#endif
+ NULL
+ };
+ struct kobj_engine *ke;
+
+ ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+ if (!ke)
+ return;
+
+ kobject_init(&ke->base, &kobj_engine_type);
+ ke->engine = parent->engine;
+
+ if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
+ kobject_put(&ke->base);
+ return;
+ }
+
+ if (sysfs_create_files(&ke->base, files))
+ return;
+
+ if (intel_engine_has_timeslices(ke->engine) &&
+ sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
+ return;
+
+ if (intel_engine_has_preempt_reset(ke->engine) &&
+ sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
+ return;
+}
+
void intel_engines_add_sysfs(struct drm_i915_private *i915)
{
static const struct attribute *files[] = {
@@ -433,6 +525,8 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
sysfs_create_file(kobj, &preempt_timeout_attr.attr))
goto err_engine;
+ add_defaults(container_of(kobj, struct kobj_engine, base));
+
if (0) {
err_object:
kobject_put(kobj);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 819f09ef51fc..861657897c0f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -169,7 +169,7 @@ void intel_guc_init_early(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
- intel_guc_fw_init_early(guc);
+ intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
intel_guc_ct_init_early(&guc->ct);
intel_guc_log_init_early(&guc->log);
intel_guc_submission_init_early(guc);
@@ -723,3 +723,47 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
return 0;
}
+
+/**
+ * intel_guc_load_status - dump information about GuC load status
+ * @guc: the GuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC load status.
+ */
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ intel_wakeref_t wakeref;
+
+ if (!intel_guc_is_supported(guc)) {
+ drm_printf(p, "GuC not supported\n");
+ return;
+ }
+
+ if (!intel_guc_is_wanted(guc)) {
+ drm_printf(p, "GuC disabled\n");
+ return;
+ }
+
+ intel_uc_fw_dump(&guc->fw, p);
+
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ u32 status = intel_uncore_read(uncore, GUC_STATUS);
+ u32 i;
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < 16; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 4594ccbeaa34..e84ab67b317d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -74,6 +74,11 @@ struct intel_guc {
struct mutex send_mutex;
};
+static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
+{
+ return container_of(log, struct intel_guc, log);
+}
+
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
@@ -190,4 +195,6 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine);
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
new file mode 100644
index 000000000000..fe7cb7b29a1e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_guc.h"
+#include "intel_guc_debugfs.h"
+#include "intel_guc_log_debugfs.h"
+
+static int guc_info_show(struct seq_file *m, void *data)
+{
+ struct intel_guc *guc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_guc_is_supported(guc))
+ return -ENODEV;
+
+ intel_guc_load_status(guc, &p);
+ drm_puts(&p, "\n");
+ intel_guc_log_info(&guc->log, &p);
+
+ /* Add more as required ... */
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info);
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "guc_info", &guc_info_fops, NULL },
+ };
+
+ if (!intel_guc_is_supported(guc))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), guc);
+ intel_guc_log_debugfs_register(&guc->log, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
new file mode 100644
index 000000000000..424c26665cf1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_H
+#define DEBUGFS_GUC_H
+
+struct intel_guc;
+struct dentry;
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root);
+
+#endif /* DEBUGFS_GUC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 3a1c47d600ea..d4a87f4c9421 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -13,20 +13,6 @@
#include "intel_guc_fw.h"
#include "i915_drv.h"
-/**
- * intel_guc_fw_init_early() - initializes GuC firmware struct
- * @guc: intel_guc struct
- *
- * On platforms with GuC selects firmware for uploading
- */
-void intel_guc_fw_init_early(struct intel_guc *guc)
-{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
- intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915),
- INTEL_INFO(i915)->platform, INTEL_REVID(i915));
-}
-
static void guc_prepare_xfer(struct intel_uncore *uncore)
{
u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
index b5ab639d7259..0b4d2a9c9435 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
@@ -8,7 +8,6 @@
struct intel_guc;
-void intel_guc_fw_init_early(struct intel_guc *guc);
int intel_guc_fw_upload(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index caed0d57e704..fb10f3597ea5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -55,11 +55,6 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
-{
- return container_of(log, struct intel_guc, log);
-}
-
static void guc_log_enable_flush_events(struct intel_guc_log *log)
{
intel_guc_enable_msg(log_to_guc(log),
@@ -672,3 +667,95 @@ void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
queue_work(system_highpri_wq, &log->relay.flush_work);
}
+
+static const char *
+stringify_guc_log_type(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return "ISR";
+ case GUC_DPC_LOG_BUFFER:
+ return "DPC";
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return "CRASH";
+ default:
+ MISSING_CASE(type);
+ }
+
+ return "";
+}
+
+/**
+ * intel_guc_log_info - dump information about GuC log relay
+ * @log: the GuC log
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC log info
+ */
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
+{
+ enum guc_log_buffer_type type;
+
+ if (!intel_guc_log_relay_created(log)) {
+ drm_puts(p, "GuC log relay not created\n");
+ return;
+ }
+
+ drm_puts(p, "GuC logging stats:\n");
+
+ drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
+
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
+ stringify_guc_log_type(type),
+ log->stats[type].flush,
+ log->stats[type].sampled_overflow);
+ }
+}
+
+/**
+ * intel_guc_log_dump - dump the contents of the GuC log
+ * @log: the GuC log
+ * @p: the &drm_printer
+ * @dump_load_err: dump the log saved on GuC load error
+ *
+ * Pretty printer for the GuC log
+ */
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+ bool dump_load_err)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+ struct drm_i915_gem_object *obj = NULL;
+ u32 *map;
+ int i = 0;
+
+ if (!intel_guc_is_supported(guc))
+ return -ENODEV;
+
+ if (dump_load_err)
+ obj = uc->load_err_log;
+ else if (guc->log.vma)
+ obj = guc->log.vma->obj;
+
+ if (!obj)
+ return 0;
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ DRM_DEBUG("Failed to pin object\n");
+ drm_puts(p, "(log data unaccessible)\n");
+ return PTR_ERR(map);
+ }
+
+ for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
+ drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(map + i), *(map + i + 1),
+ *(map + i + 2), *(map + i + 3));
+
+ drm_puts(p, "\n");
+
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index c252c022c5fc..11fccd0b2294 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -79,4 +79,8 @@ static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
return log->level;
}
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p);
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+ bool dump_load_err);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
new file mode 100644
index 000000000000..129e0cf7dfe2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/fs.h>
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_guc.h"
+#include "intel_guc_log.h"
+#include "intel_guc_log_debugfs.h"
+
+static int guc_log_dump_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ return intel_guc_log_dump(m->private, &p, false);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
+
+static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ return intel_guc_log_dump(m->private, &p, true);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
+
+static int guc_log_level_get(void *data, u64 *val)
+{
+ struct intel_guc_log *log = data;
+
+ if (!intel_guc_is_used(log_to_guc(log)))
+ return -ENODEV;
+
+ *val = intel_guc_log_get_level(log);
+
+ return 0;
+}
+
+static int guc_log_level_set(void *data, u64 val)
+{
+ struct intel_guc_log *log = data;
+
+ if (!intel_guc_is_used(log_to_guc(log)))
+ return -ENODEV;
+
+ return intel_guc_log_set_level(log, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(guc_log_level_fops,
+ guc_log_level_get, guc_log_level_set,
+ "%lld\n");
+
+static int guc_log_relay_open(struct inode *inode, struct file *file)
+{
+ struct intel_guc_log *log = inode->i_private;
+
+ if (!intel_guc_is_ready(log_to_guc(log)))
+ return -ENODEV;
+
+ file->private_data = log;
+
+ return intel_guc_log_relay_open(log);
+}
+
+static ssize_t
+guc_log_relay_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct intel_guc_log *log = filp->private_data;
+ int val;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Enable and start the guc log relay on value of 1.
+ * Flush log relay for any other value.
+ */
+ if (val == 1)
+ ret = intel_guc_log_relay_start(log);
+ else
+ intel_guc_log_relay_flush(log);
+
+ return ret ?: cnt;
+}
+
+static int guc_log_relay_release(struct inode *inode, struct file *file)
+{
+ struct intel_guc_log *log = inode->i_private;
+
+ intel_guc_log_relay_close(log);
+ return 0;
+}
+
+static const struct file_operations guc_log_relay_fops = {
+ .owner = THIS_MODULE,
+ .open = guc_log_relay_open,
+ .write = guc_log_relay_write,
+ .release = guc_log_relay_release,
+};
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+ struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "guc_log_dump", &guc_log_dump_fops, NULL },
+ { "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL },
+ { "guc_log_level", &guc_log_level_fops, NULL },
+ { "guc_log_relay", &guc_log_relay_fops, NULL },
+ };
+
+ if (!intel_guc_is_supported(log_to_guc(log)))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), log);
+}
+
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h
new file mode 100644
index 000000000000..e8900e3d74ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_LOG_H
+#define DEBUGFS_GUC_LOG_H
+
+struct intel_guc_log;
+struct dentry;
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+ struct dentry *root);
+
+#endif /* DEBUGFS_GUC_LOG_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index fe7778c28d2d..94eb63f309ce 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -217,7 +217,7 @@ static void guc_wq_item_append(struct intel_guc *guc,
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
+ u32 ctx_desc = rq->context->lrc.ccid;
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
guc_wq_item_append(guc, engine->guc_id, ctx_desc,
@@ -258,7 +258,7 @@ static void guc_submit(struct intel_engine_cs *engine,
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->sched.attr.priority | __NO_PREEMPTION;
+ return rq->sched.attr.priority;
}
static struct i915_request *schedule_in(struct i915_request *rq, int idx)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index a74b65694512..65eeb44b397d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -41,7 +41,7 @@ void intel_huc_init_early(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
- intel_huc_fw_init_early(huc);
+ intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
if (INTEL_GEN(i915) >= 11) {
huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
@@ -200,9 +200,13 @@ fail:
* This function reads status register to verify if HuC
* firmware was successfully loaded.
*
- * Returns: 1 if HuC firmware is loaded and verified,
- * 0 if HuC firmware is not loaded and -ENODEV if HuC
- * is not present on this platform.
+ * Returns:
+ * * -ENODEV if HuC is not present on this platform,
+ * * -EOPNOTSUPP if HuC firmware is disabled,
+ * * -ENOPKG if HuC firmware was not installed,
+ * * -ENOEXEC if HuC firmware is invalid or mismatched,
+ * * 0 if HuC firmware is not running,
+ * * 1 if HuC firmware is authenticated and running.
*/
int intel_huc_check_status(struct intel_huc *huc)
{
@@ -210,11 +214,50 @@ int intel_huc_check_status(struct intel_huc *huc)
intel_wakeref_t wakeref;
u32 status = 0;
- if (!intel_huc_is_supported(huc))
+ switch (__intel_uc_fw_status(&huc->fw)) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
return -ENODEV;
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return -EOPNOTSUPP;
+ case INTEL_UC_FIRMWARE_MISSING:
+ return -ENOPKG;
+ case INTEL_UC_FIRMWARE_ERROR:
+ return -ENOEXEC;
+ default:
+ break;
+ }
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
status = intel_uncore_read(gt->uncore, huc->status.reg);
return (status & huc->status.mask) == huc->status.value;
}
+
+/**
+ * intel_huc_load_status - dump information about HuC load status
+ * @huc: the HuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for HuC load status.
+ */
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ intel_wakeref_t wakeref;
+
+ if (!intel_huc_is_supported(huc)) {
+ drm_printf(p, "HuC not supported\n");
+ return;
+ }
+
+ if (!intel_huc_is_wanted(huc)) {
+ drm_printf(p, "HuC disabled\n");
+ return;
+ }
+
+ intel_uc_fw_dump(&huc->fw, p);
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ drm_printf(p, "HuC status: 0x%08x\n",
+ intel_uncore_read(gt->uncore, huc->status.reg));
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index a40b9cfc6c22..daee43b661d4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -57,4 +57,6 @@ static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
return intel_uc_fw_is_running(&huc->fw);
}
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
new file mode 100644
index 000000000000..5733c15fd123
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_huc.h"
+#include "intel_huc_debugfs.h"
+
+static int huc_info_show(struct seq_file *m, void *data)
+{
+ struct intel_huc *huc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_huc_is_supported(huc))
+ return -ENODEV;
+
+ intel_huc_load_status(huc, &p);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(huc_info);
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "huc_info", &huc_info_fops, NULL },
+ };
+
+ if (!intel_huc_is_supported(huc))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), huc);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h
new file mode 100644
index 000000000000..be79e992f976
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_HUC_H
+#define DEBUGFS_HUC_H
+
+struct intel_huc;
+struct dentry;
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root);
+
+#endif /* DEBUGFS_HUC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 9cdf4cbe691c..e5ef509c70e8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -8,23 +8,6 @@
#include "i915_drv.h"
/**
- * intel_huc_fw_init_early() - initializes HuC firmware struct
- * @huc: intel_huc struct
- *
- * On platforms with HuC selects firmware for uploading
- */
-void intel_huc_fw_init_early(struct intel_huc *huc)
-{
- struct intel_gt *gt = huc_to_gt(huc);
- struct intel_uc *uc = &gt->uc;
- struct drm_i915_private *i915 = gt->i915;
-
- intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
- intel_uc_wants_guc(uc),
- INTEL_INFO(i915)->platform, INTEL_REVID(i915));
-}
-
-/**
* intel_huc_fw_upload() - load HuC uCode to device
* @huc: intel_huc structure
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
index b791269ce923..12f264ee3e0b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
@@ -8,7 +8,6 @@
struct intel_huc;
-void intel_huc_fw_init_early(struct intel_huc *huc);
int intel_huc_fw_upload(struct intel_huc *huc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index a4cbe06e06bd..f518fe05c6f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -45,12 +45,12 @@ static void __confirm_options(struct intel_uc *uc)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
- "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
- i915_modparams.enable_guc,
- yesno(intel_uc_wants_guc(uc)),
- yesno(intel_uc_wants_guc_submission(uc)),
- yesno(intel_uc_wants_huc(uc)));
+ drm_dbg(&i915->drm,
+ "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_wants_guc(uc)),
+ yesno(intel_uc_wants_guc_submission(uc)),
+ yesno(intel_uc_wants_huc(uc)));
if (i915_modparams.enable_guc == -1)
return;
@@ -63,25 +63,25 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "GuC is not supported!");
if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "HuC is not supported!");
if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "GuC submission is N/A");
if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
ENABLE_GUC_LOAD_HUC))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "undocumented flag");
}
@@ -131,6 +131,13 @@ static void __uc_free_load_err_log(struct intel_uc *uc)
i915_gem_object_put(log);
}
+void intel_uc_driver_remove(struct intel_uc *uc)
+{
+ intel_uc_fini_hw(uc);
+ intel_uc_fini(uc);
+ __uc_free_load_err_log(uc);
+}
+
static inline bool guc_communication_enabled(struct intel_guc *guc)
{
return intel_guc_ct_enabled(&guc->ct);
@@ -311,8 +318,6 @@ static void __uc_fini(struct intel_uc *uc)
{
intel_huc_fini(&uc->huc);
intel_guc_fini(&uc->guc);
-
- __uc_free_load_err_log(uc);
}
static int __uc_sanitize(struct intel_uc *uc)
@@ -475,14 +480,14 @@ static int __uc_init_hw(struct intel_uc *uc)
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
- dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
guc->fw.major_ver_found, guc->fw.minor_ver_found,
"submission",
enableddisabled(intel_uc_uses_guc_submission(uc)));
if (intel_uc_uses_huc(uc)) {
- dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
huc->fw.path,
huc->fw.major_ver_found, huc->fw.minor_ver_found,
@@ -503,7 +508,7 @@ err_out:
__uc_sanitize(uc);
if (!ret) {
- dev_notice(i915->drm.dev, "GuC is uninitialized\n");
+ drm_notice(&i915->drm, "GuC is uninitialized\n");
/* We want to run without GuC submission */
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 5ae7b50b7dc1..9c954c589edf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -34,6 +34,7 @@ struct intel_uc {
void intel_uc_init_early(struct intel_uc *uc);
void intel_uc_driver_late_release(struct intel_uc *uc);
+void intel_uc_driver_remove(struct intel_uc *uc);
void intel_uc_init_mmio(struct intel_uc *uc);
void intel_uc_reset_prepare(struct intel_uc *uc);
void intel_uc_suspend(struct intel_uc *uc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
new file mode 100644
index 000000000000..9d16b784aa0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "intel_guc_debugfs.h"
+#include "intel_huc_debugfs.h"
+#include "intel_uc.h"
+#include "intel_uc_debugfs.h"
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
+{
+ struct dentry *root;
+
+ if (!gt_root)
+ return;
+
+ /* GuC and HuC go always in pair, no need to check both */
+ if (!intel_uc_supports_guc(uc))
+ return;
+
+ root = debugfs_create_dir("uc", gt_root);
+ if (IS_ERR(root))
+ return;
+
+ intel_guc_debugfs_register(&uc->guc, root);
+ intel_huc_debugfs_register(&uc->huc, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h
new file mode 100644
index 000000000000..010ce250d223
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_UC_H
+#define DEBUGFS_UC_H
+
+struct intel_uc;
+struct dentry;
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root);
+
+#endif /* DEBUGFS_UC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 18c755203688..e1caae93996d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,26 +11,32 @@
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
-static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+static inline struct intel_gt *
+____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
{
- GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
- if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+ if (type == INTEL_UC_FW_TYPE_GUC)
return container_of(uc_fw, struct intel_gt, uc.guc.fw);
- GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC);
+ GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
return container_of(uc_fw, struct intel_gt, uc.huc.fw);
}
+static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+{
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ return ____uc_fw_to_gt(uc_fw, uc_fw->type);
+}
+
#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
uc_fw->__status = status;
- DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev,
- "%s firmware -> %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->path : intel_uc_fw_status_repr(status));
+ drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
+ "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->path : intel_uc_fw_status_repr(status));
}
#endif
@@ -187,17 +193,15 @@ static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
* intel_uc_fw_init_early - initialize the uC object and select the firmware
* @uc_fw: uC firmware
* @type: type of uC
- * @supported: is uC support possible
- * @platform: platform identifier
- * @rev: hardware revision
*
* Initialize the state of our uC object and relevant tracking and select the
* firmware to fetch and load.
*/
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type, bool supported,
- enum intel_platform platform, u8 rev)
+ enum intel_uc_fw_type type)
{
+ struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
+
/*
* we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
* before we're looked at the HW caps to see if we have uc support
@@ -208,8 +212,10 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
uc_fw->type = type;
- if (supported) {
- __uc_fw_auto_select(uc_fw, platform, rev);
+ if (HAS_GT_UC(i915)) {
+ __uc_fw_auto_select(uc_fw,
+ INTEL_INFO(i915)->platform,
+ INTEL_REVID(i915));
__uc_fw_user_override(uc_fw);
}
@@ -290,7 +296,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
- dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
err = -ENODATA;
@@ -303,7 +309,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
- dev_warn(dev,
+ drm_warn(&i915->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
@@ -316,7 +322,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* now RSA */
if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
- dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n",
+ drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
err = -EPROTO;
@@ -327,7 +333,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
- dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, size);
err = -ENOEXEC;
@@ -337,7 +343,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= i915->wopcm.size)) {
- dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n",
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
size, (size_t)i915->wopcm.size);
err = -E2BIG;
@@ -352,7 +358,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
uc_fw->major_ver_found, uc_fw->minor_ver_found,
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
@@ -380,9 +386,9 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- dev_notice(dev, "%s firmware %s: fetch failed with error %d\n",
+ drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- dev_info(dev, "%s firmware(s) can be downloaded from %s\n",
+ drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
@@ -467,7 +473,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
if (ret)
- dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
intel_uc_fw_type_repr(uc_fw->type),
intel_uncore_read_fw(uncore, DMA_CTRL));
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 888ff0de0244..23d3a423ac0f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -239,8 +239,7 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
}
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type, bool supported,
- enum intel_platform platform, u8 rev);
+ enum intel_uc_fw_type type);
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw);
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 8b13f091cee2..0d6d59871308 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -35,7 +35,7 @@
*/
#include "i915_drv.h"
-#include "i915_gem_fence_reg.h"
+#include "gt/intel_ggtt_fencing.h"
#include "gvt.h"
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index a3cc080a46c6..8b87f130f7f1 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -882,6 +882,47 @@ static int mocs_cmd_reg_handler(struct parser_exec_state *s,
return 0;
}
+static int is_cmd_update_pdps(unsigned int offset,
+ struct parser_exec_state *s)
+{
+ u32 base = s->workload->engine->mmio_base;
+ return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
+}
+
+static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
+ struct intel_vgpu_mm *mm;
+ u64 pdps[GEN8_3LVL_PDPES];
+
+ if (shadow_mm->ppgtt_mm.root_entry_type ==
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ pdps[0] = (u64)cmd_val(s, 2) << 32;
+ pdps[0] |= cmd_val(s, 4);
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
+ if (!mm) {
+ gvt_vgpu_err("failed to get the 4-level shadow vm\n");
+ return -EINVAL;
+ }
+ intel_vgpu_mm_get(mm);
+ list_add_tail(&mm->ppgtt_mm.link,
+ &s->workload->lri_shadow_mm);
+ *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
+ *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
+ } else {
+ /* Currently all guests use PML4 table and now can't
+ * have a guest with 3-level table but uses LRI for
+ * PPGTT update. So this is simply un-testable. */
+ GEM_BUG_ON(1);
+ gvt_vgpu_err("invalid shared shadow vm type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
@@ -920,6 +961,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
+ if (is_cmd_update_pdps(offset, s) &&
+ cmd_pdp_mmio_update_handler(s, offset, index))
+ return -EINVAL;
+
/* TODO
* In order to let workload with inhibit context to generate
* correct image data into memory, vregs values will be loaded to
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a83df2f84eb9..a1696e9ce4b6 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -208,14 +208,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
- vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
- LCPLL_PLL_ENABLE |
- LCPLL_PLL_LOCK;
- vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
-
+ /*
+ * Only 1 PIPE enabled in current vGPU display and PIPE_A is
+ * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
+ * TRANSCODER_A can be enabled. PORT_x depends on the input of
+ * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x
+ * so we fixed to DPLL0 here.
+ * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode
+ */
+ vgpu_vreg_t(vgpu, DPLL_CTRL1) =
+ DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0);
+ vgpu_vreg_t(vgpu, DPLL_CTRL1) |=
+ DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0);
+ vgpu_vreg_t(vgpu, LCPLL1_CTL) =
+ LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK;
+ vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0);
+ /*
+ * Golden M/N are calculated based on:
+ * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
+ * DP link clk 1620 MHz and non-constant_n.
+ * TODO: calculate DP link symbol clk and stream clk m/n.
+ */
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
@@ -236,6 +263,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
@@ -256,6 +289,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index dd25c3024370..158873f269b1 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -424,8 +424,6 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
out:
- intel_vgpu_unpin_mm(workload->shadow_mm);
- intel_vgpu_destroy_workload(workload);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2a4b23f8aa74..210016192ce7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1900,6 +1900,7 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
INIT_LIST_HEAD(&mm->ppgtt_mm.list);
INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
+ INIT_LIST_HEAD(&mm->ppgtt_mm.link);
if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
mm->ppgtt_mm.guest_pdps[0] = pdps[0];
@@ -2341,12 +2342,27 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
int ret;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_engine_cs *engine;
+ int i;
if (bytes != 4 && bytes != 8)
return -EINVAL;
off -= info->gtt_start_offset;
ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
+
+ /* if ggtt of last submitted context is written,
+ * that context is probably got unpinned.
+ * Set last shadowed ctx to invalid.
+ */
+ for_each_engine(engine, vgpu->gvt->gt, i) {
+ if (!s->last_ctx[i].valid)
+ continue;
+
+ if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
+ s->last_ctx[i].valid = false;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 88789316807d..320b8d6ad92f 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -160,6 +160,7 @@ struct intel_vgpu_mm {
struct list_head list;
struct list_head lru_list;
+ struct list_head link; /* possible LRI shadow mm list */
} ppgtt_mm;
struct {
void *virtual_ggtt;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 9e1787867894..c7c561237883 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -31,7 +31,6 @@
*/
#include <linux/types.h>
-#include <xen/xen.h>
#include <linux/kthread.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 58c2c7932e3f..a4a6db6b7f90 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -163,6 +163,11 @@ struct intel_vgpu_submission {
const struct intel_vgpu_submission_ops *ops;
int virtual_submission_interface;
bool active;
+ struct {
+ u32 lrca;
+ bool valid;
+ u64 ring_context_gpa;
+ } last_ctx[I915_NUM_ENGINES];
};
struct intel_vgpu {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 2faf50e1b051..3e88e3b5c43a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2812,7 +2812,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
#define RING_REG(base) _MMIO((base) + 0x270)
- MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index b17c4a1599cd..b79da5124f83 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -79,6 +79,4 @@ struct intel_gvt_mpt {
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
};
-extern struct intel_gvt_mpt xengt_mpt;
-
#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index cb11c3184085..0fb1df71c637 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -58,10 +58,8 @@ static void set_context_pdp_root_pointer(
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
- struct drm_i915_gem_object *ctx_obj =
- workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
- struct page *page;
+ struct intel_context *ctx = workload->req->context;
if (WARN_ON(!workload->shadow_mm))
return;
@@ -69,11 +67,9 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
return;
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap(page);
+ shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state;
set_context_pdp_root_pointer(shadow_ring_context,
(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
- kunmap(page);
}
/*
@@ -128,16 +124,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_gem_object *ctx_obj =
- workload->req->context->state->obj;
+ struct intel_context *ctx = workload->req->context;
struct execlist_ring_context *shadow_ring_context;
- struct page *page;
void *dst;
+ void *context_base;
unsigned long context_gpa, context_page_num;
+ unsigned long gpa_base; /* first gpa of consecutive GPAs */
+ unsigned long gpa_size; /* size of consecutive GPAs */
+ struct intel_vgpu_submission *s = &vgpu->submission;
int i;
+ bool skip = false;
+ int ring_id = workload->engine->id;
+
+ GEM_BUG_ON(!intel_context_is_pinned(ctx));
+
+ context_base = (void *) ctx->lrc_reg_state -
+ (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap(page);
+ shadow_ring_context = (void *) ctx->lrc_reg_state;
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
#define COPY_REG(name) \
@@ -169,23 +173,43 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
- kunmap(page);
- if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
- return 0;
+ gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
+ workload->engine->name, workload->ctx_desc.lrca,
+ workload->ctx_desc.context_id,
+ workload->ring_context_gpa);
- gvt_dbg_sched("ring %s workload lrca %x",
- workload->engine->name,
- workload->ctx_desc.lrca);
+ /* only need to ensure this context is not pinned/unpinned during the
+ * period from last submission to this this submission.
+ * Upon reaching this function, the currently submitted context is not
+ * supposed to get unpinned. If a misbehaving guest driver ever does
+ * this, it would corrupt itself.
+ */
+ if (s->last_ctx[ring_id].valid &&
+ (s->last_ctx[ring_id].lrca ==
+ workload->ctx_desc.lrca) &&
+ (s->last_ctx[ring_id].ring_context_gpa ==
+ workload->ring_context_gpa))
+ skip = true;
+
+ s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
+ s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
+ if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip)
+ return 0;
+
+ s->last_ctx[ring_id].valid = false;
context_page_num = workload->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
context_page_num = 19;
- i = 2;
- while (i < context_page_num) {
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
+ * read from the continuous GPAs into dst virtual address
+ */
+ gpa_size = 0;
+ for (i = 2; i < context_page_num; i++) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
I915_GTT_PAGE_SHIFT));
@@ -194,13 +218,26 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT;
}
- page = i915_gem_object_get_page(ctx_obj, i);
- dst = kmap(page);
- intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
- I915_GTT_PAGE_SIZE);
- kunmap(page);
- i++;
+ if (gpa_size == 0) {
+ gpa_base = context_gpa;
+ dst = context_base + (i << I915_GTT_PAGE_SHIFT);
+ } else if (context_gpa != gpa_base + gpa_size)
+ goto read;
+
+ gpa_size += I915_GTT_PAGE_SIZE;
+
+ if (i == context_page_num - 1)
+ goto read;
+
+ continue;
+
+read:
+ intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
+ gpa_base = context_gpa;
+ gpa_size = I915_GTT_PAGE_SIZE;
+ dst = context_base + (i << I915_GTT_PAGE_SHIFT);
}
+ s->last_ctx[ring_id].valid = true;
return 0;
}
@@ -290,7 +327,7 @@ static void
shadow_context_descriptor_update(struct intel_context *ce,
struct intel_vgpu_workload *workload)
{
- u64 desc = ce->lrc_desc;
+ u64 desc = ce->lrc.desc;
/*
* Update bits 0-11 of the context descriptor which includes flags
@@ -300,7 +337,7 @@ shadow_context_descriptor_update(struct intel_context *ce,
desc |= (u64)workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- ce->lrc_desc = desc;
+ ce->lrc.desc = desc;
}
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
@@ -379,7 +416,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
struct i915_page_directory * const pd =
i915_pd_entry(ppgtt->pd, i);
-
+ /* skip now as current i915 ppgtt alloc won't allocate
+ top level pdp for non 4-level table, won't impact
+ shadow ppgtt. */
+ if (!pd)
+ break;
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
@@ -595,10 +636,9 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
- if (bb->vma && !IS_ERR(bb->vma)) {
+ if (bb->vma && !IS_ERR(bb->vma))
i915_vma_unpin(bb->vma);
- i915_vma_close(bb->vma);
- }
+
i915_gem_object_put(bb->obj);
}
list_del(&bb->list);
@@ -606,10 +646,11 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
}
-static int prepare_workload(struct intel_vgpu_workload *workload)
+static int
+intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_mm *m;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -624,6 +665,52 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return -EINVAL;
}
+ if (!list_empty(&workload->lri_shadow_mm)) {
+ list_for_each_entry(m, &workload->lri_shadow_mm,
+ ppgtt_mm.link) {
+ ret = intel_vgpu_pin_mm(m);
+ if (ret) {
+ list_for_each_entry_from_reverse(m,
+ &workload->lri_shadow_mm,
+ ppgtt_mm.link)
+ intel_vgpu_unpin_mm(m);
+ gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
+ break;
+ }
+ }
+ }
+
+ if (ret)
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+
+ return ret;
+}
+
+static void
+intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu_mm *m;
+
+ if (!list_empty(&workload->lri_shadow_mm)) {
+ list_for_each_entry(m, &workload->lri_shadow_mm,
+ ppgtt_mm.link)
+ intel_vgpu_unpin_mm(m);
+ }
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+}
+
+static int prepare_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ int ret = 0;
+
+ ret = intel_vgpu_shadow_mm_pin(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to pin shadow mm\n");
+ return ret;
+ }
+
update_shadow_pdps(workload);
set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
@@ -670,7 +757,7 @@ err_shadow_wa_ctx:
err_shadow_batch:
release_shadow_batch_buffer(workload);
err_unpin_mm:
- intel_vgpu_unpin_mm(workload->shadow_mm);
+ intel_vgpu_shadow_mm_unpin(workload);
return ret;
}
@@ -780,15 +867,48 @@ out:
return workload;
}
+static void update_guest_pdps(struct intel_vgpu *vgpu,
+ u64 ring_context_gpa, u32 pdp[8])
+{
+ u64 gpa;
+ int i;
+
+ gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
+
+ for (i = 0; i < 8; i++)
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static __maybe_unused bool
+check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
+{
+ if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
+
+ if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
+ gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
+ return false;
+ }
+ return true;
+ } else {
+ /* see comment in LRI handler in cmd_parser.c */
+ gvt_dbg_mm("invalid shadow mm type\n");
+ return false;
+ }
+}
+
static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
- struct page *page;
+ struct intel_context *ctx = workload->req->context;
+ void *context_base;
void *src;
unsigned long context_gpa, context_page_num;
+ unsigned long gpa_base; /* first gpa of consecutive GPAs */
+ unsigned long gpa_size; /* size of consecutive GPAs*/
int i;
u32 ring_base;
u32 head, tail;
@@ -797,6 +917,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca);
+ GEM_BUG_ON(!intel_context_is_pinned(ctx));
+
head = workload->rb_head;
tail = workload->rb_tail;
wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
@@ -820,9 +942,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19;
- i = 2;
+ context_base = (void *) ctx->lrc_reg_state -
+ (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
- while (i < context_page_num) {
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
+ * write to the consecutive GPAs from src virtual address
+ */
+ gpa_size = 0;
+ for (i = 2; i < context_page_num; i++) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
I915_GTT_PAGE_SHIFT));
@@ -831,19 +958,39 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
- page = i915_gem_object_get_page(ctx_obj, i);
- src = kmap(page);
- intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
- I915_GTT_PAGE_SIZE);
- kunmap(page);
- i++;
+ if (gpa_size == 0) {
+ gpa_base = context_gpa;
+ src = context_base + (i << I915_GTT_PAGE_SHIFT);
+ } else if (context_gpa != gpa_base + gpa_size)
+ goto write;
+
+ gpa_size += I915_GTT_PAGE_SIZE;
+
+ if (i == context_page_num - 1)
+ goto write;
+
+ continue;
+
+write:
+ intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
+ gpa_base = context_gpa;
+ gpa_size = I915_GTT_PAGE_SIZE;
+ src = context_base + (i << I915_GTT_PAGE_SHIFT);
}
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap(page);
+ shadow_ring_context = (void *) ctx->lrc_reg_state;
+
+ if (!list_empty(&workload->lri_shadow_mm)) {
+ struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
+ struct intel_vgpu_mm,
+ ppgtt_mm.link);
+ GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
+ update_guest_pdps(vgpu, workload->ring_context_gpa,
+ (void *)m->ppgtt_mm.guest_pdps);
+ }
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
@@ -860,8 +1007,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
-
- kunmap(page);
}
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
@@ -955,6 +1100,9 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->complete(workload);
+ intel_vgpu_shadow_mm_unpin(workload);
+ intel_vgpu_destroy_workload(workload);
+
atomic_dec(&s->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
@@ -1260,6 +1408,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
+ memset(s->last_ctx, 0, sizeof(s->last_ctx));
+
i915_vm_put(&ppgtt->vm);
return 0;
@@ -1346,6 +1496,16 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
+ if (!list_empty(&workload->lri_shadow_mm)) {
+ struct intel_vgpu_mm *m, *mm;
+ list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
+ ppgtt_mm.link) {
+ list_del(&m->ppgtt_mm.link);
+ intel_vgpu_mm_put(m);
+ }
+ }
+
+ GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
if (workload->shadow_mm)
intel_vgpu_mm_put(workload->shadow_mm);
@@ -1364,6 +1524,7 @@ alloc_workload(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&workload->list);
INIT_LIST_HEAD(&workload->shadow_bb);
+ INIT_LIST_HEAD(&workload->lri_shadow_mm);
init_waitqueue_head(&workload->shadow_ctx_status_wq);
atomic_set(&workload->shadow_ctx_active, 0);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index bf7fc0ca4cb1..15d317f2a4a4 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -87,6 +87,7 @@ struct intel_vgpu_workload {
int status;
struct intel_vgpu_mm *shadow_mm;
+ struct list_head lri_shadow_mm; /* For PPGTT load cmd */
/* different submission model may need different handler */
int (*prepare)(struct intel_vgpu_workload *);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index c4048628188a..d960d0be5bd2 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -496,7 +496,7 @@ static int flush_lazy_signals(struct i915_active *ref)
return err;
}
-int i915_active_wait(struct i915_active *ref)
+int __i915_active_wait(struct i915_active *ref, int state)
{
int err;
@@ -511,7 +511,9 @@ int i915_active_wait(struct i915_active *ref)
if (err)
return err;
- if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
+ if (!i915_active_is_idle(ref) &&
+ ___wait_var_event(ref, i915_active_is_idle(ref),
+ state, 0, 0, schedule()))
return -EINTR;
flush_work(&ref->work);
@@ -540,34 +542,88 @@ static int __await_active(struct i915_active_fence *active,
return 0;
}
+struct wait_barrier {
+ struct wait_queue_entry base;
+ struct i915_active *ref;
+};
+
+static int
+barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
+{
+ struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
+
+ if (i915_active_is_idle(wb->ref)) {
+ list_del(&wq->entry);
+ i915_sw_fence_complete(wq->private);
+ kfree(wq);
+ }
+
+ return 0;
+}
+
+static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
+{
+ struct wait_barrier *wb;
+
+ wb = kmalloc(sizeof(*wb), GFP_KERNEL);
+ if (unlikely(!wb))
+ return -ENOMEM;
+
+ GEM_BUG_ON(i915_active_is_idle(ref));
+ if (!i915_sw_fence_await(fence)) {
+ kfree(wb);
+ return -EINVAL;
+ }
+
+ wb->base.flags = 0;
+ wb->base.func = barrier_wake;
+ wb->base.private = fence;
+ wb->ref = ref;
+
+ add_wait_queue(__var_waitqueue(ref), &wb->base);
+ return 0;
+}
+
static int await_active(struct i915_active *ref,
unsigned int flags,
int (*fn)(void *arg, struct dma_fence *fence),
- void *arg)
+ void *arg, struct i915_sw_fence *barrier)
{
int err = 0;
- /* We must always wait for the exclusive fence! */
- if (rcu_access_pointer(ref->excl.fence)) {
+ if (!i915_active_acquire_if_busy(ref))
+ return 0;
+
+ if (flags & I915_ACTIVE_AWAIT_EXCL &&
+ rcu_access_pointer(ref->excl.fence)) {
err = __await_active(&ref->excl, fn, arg);
if (err)
- return err;
+ goto out;
}
- if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
struct active_node *it, *n;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
err = __await_active(&it->base, fn, arg);
if (err)
- break;
+ goto out;
}
- i915_active_release(ref);
+ }
+
+ if (flags & I915_ACTIVE_AWAIT_BARRIER) {
+ err = flush_lazy_signals(ref);
if (err)
- return err;
+ goto out;
+
+ err = __await_barrier(ref, barrier);
+ if (err)
+ goto out;
}
- return 0;
+out:
+ i915_active_release(ref);
+ return err;
}
static int rq_await_fence(void *arg, struct dma_fence *fence)
@@ -579,7 +635,7 @@ int i915_request_await_active(struct i915_request *rq,
struct i915_active *ref,
unsigned int flags)
{
- return await_active(ref, flags, rq_await_fence, rq);
+ return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
}
static int sw_await_fence(void *arg, struct dma_fence *fence)
@@ -592,7 +648,7 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
struct i915_active *ref,
unsigned int flags)
{
- return await_active(ref, flags, sw_await_fence, fence);
+ return await_active(ref, flags, sw_await_fence, fence, fence);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -818,7 +874,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
- intel_engine_pm_put(engine);
+ intel_engine_pm_put_delay(engine, 1);
}
}
@@ -937,6 +993,59 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
active_fence_cb(fence, cb);
}
+struct auto_active {
+ struct i915_active base;
+ struct kref ref;
+};
+
+struct i915_active *i915_active_get(struct i915_active *ref)
+{
+ struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+ kref_get(&aa->ref);
+ return &aa->base;
+}
+
+static void auto_release(struct kref *ref)
+{
+ struct auto_active *aa = container_of(ref, typeof(*aa), ref);
+
+ i915_active_fini(&aa->base);
+ kfree(aa);
+}
+
+void i915_active_put(struct i915_active *ref)
+{
+ struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+ kref_put(&aa->ref, auto_release);
+}
+
+static int auto_active(struct i915_active *ref)
+{
+ i915_active_get(ref);
+ return 0;
+}
+
+static void auto_retire(struct i915_active *ref)
+{
+ i915_active_put(ref);
+}
+
+struct i915_active *i915_active_create(void)
+{
+ struct auto_active *aa;
+
+ aa = kmalloc(sizeof(*aa), GFP_KERNEL);
+ if (!aa)
+ return NULL;
+
+ kref_init(&aa->ref);
+ i915_active_init(&aa->base, auto_active, auto_retire);
+
+ return &aa->base;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_active.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index b3282ae7913c..cf4058150966 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -181,7 +181,11 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
return rcu_access_pointer(ref->excl.fence);
}
-int i915_active_wait(struct i915_active *ref);
+int __i915_active_wait(struct i915_active *ref, int state);
+static inline int i915_active_wait(struct i915_active *ref)
+{
+ return __i915_active_wait(ref, TASK_INTERRUPTIBLE);
+}
int i915_sw_fence_await_active(struct i915_sw_fence *fence,
struct i915_active *ref,
@@ -189,7 +193,9 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
int i915_request_await_active(struct i915_request *rq,
struct i915_active *ref,
unsigned int flags);
-#define I915_ACTIVE_AWAIT_ALL BIT(0)
+#define I915_ACTIVE_AWAIT_EXCL BIT(0)
+#define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
+#define I915_ACTIVE_AWAIT_BARRIER BIT(2)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
@@ -221,4 +227,8 @@ void i915_request_add_active_barriers(struct i915_request *rq);
void i915_active_print(struct i915_active *ref, struct drm_printer *m);
void i915_active_unlock_wait(struct i915_active *ref);
+struct i915_active *i915_active_create(void);
+struct i915_active *i915_active_get(struct i915_active *ref);
+void i915_active_put(struct i915_active *ref);
+
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
new file mode 100644
index 000000000000..b79b5f6d2cfa
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+unsigned long
+i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
+{
+ if (context && IS_ACTIVE(CONFIG_DRM_I915_FENCE_TIMEOUT))
+ return msecs_to_jiffies_timeout(CONFIG_DRM_I915_FENCE_TIMEOUT);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6ca797128aa1..bca036ac6621 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,12 +32,13 @@
#include <drm/drm_debugfs.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_buffer_pool.h"
+#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
-#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
@@ -218,7 +219,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct file_stats {
struct i915_address_space *vm;
unsigned long count;
- u64 total, unbound;
+ u64 total;
u64 active, inactive;
u64 closed;
};
@@ -234,8 +235,6 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (!atomic_read(&obj->bind_count))
- stats->unbound += obj->base.size;
spin_lock(&obj->vma.lock);
if (!stats->vm) {
@@ -285,13 +284,12 @@ static int per_file_stats(int id, void *ptr, void *data)
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
name, \
stats.count, \
stats.total, \
stats.active, \
stats.inactive, \
- stats.unbound, \
stats.closed); \
} while (0)
@@ -745,7 +743,7 @@ i915_error_state_write(struct file *filp,
if (!error)
return 0;
- DRM_DEBUG_DRIVER("Resetting error state\n");
+ drm_dbg(&error->i915->drm, "Resetting error state\n");
i915_reset_error_state(error->i915);
return cnt;
@@ -930,21 +928,30 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
- rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
- seq_printf(m, "RP CUR UP: %d (%dus)\n",
- rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dus)\n",
- rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
+ seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ rpupei,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
+ seq_printf(m, "RP CUR UP: %d (%dun)\n",
+ rpcurup,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
+ seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ rpprevup,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
- rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
- rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
- rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
+ seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ rpdownei,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt,
+ rpdownei));
+ seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ rpcurdown,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt,
+ rpcurdown));
+ seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ rpprevdown,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt,
+ rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
rps->power.down_threshold);
@@ -1193,7 +1200,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt.rps;
- seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+ seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
+ seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
@@ -1213,7 +1221,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
- if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
+ if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -1251,286 +1259,6 @@ static int i915_llc(struct seq_file *m, void *data)
return 0;
}
-static int i915_huc_load_status_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct drm_printer p;
-
- if (!HAS_GT_UC(dev_priv))
- return -ENODEV;
-
- p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
-
- return 0;
-}
-
-static int i915_guc_load_status_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct drm_printer p;
-
- if (!HAS_GT_UC(dev_priv))
- return -ENODEV;
-
- p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- u32 tmp = I915_READ(GUC_STATUS);
- u32 i;
-
- seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
- seq_printf(m, "\tBootrom status = 0x%x\n",
- (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
- seq_printf(m, "\tuKernel status = 0x%x\n",
- (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
- seq_printf(m, "\tMIA Core status = 0x%x\n",
- (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
- seq_puts(m, "\nScratch registers:\n");
- for (i = 0; i < 16; i++) {
- seq_printf(m, "\t%2d: \t0x%x\n",
- i, I915_READ(SOFT_SCRATCH(i)));
- }
- }
-
- return 0;
-}
-
-static const char *
-stringify_guc_log_type(enum guc_log_buffer_type type)
-{
- switch (type) {
- case GUC_ISR_LOG_BUFFER:
- return "ISR";
- case GUC_DPC_LOG_BUFFER:
- return "DPC";
- case GUC_CRASH_DUMP_LOG_BUFFER:
- return "CRASH";
- default:
- MISSING_CASE(type);
- }
-
- return "";
-}
-
-static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
-{
- enum guc_log_buffer_type type;
-
- if (!intel_guc_log_relay_created(log)) {
- seq_puts(m, "GuC log relay not created\n");
- return;
- }
-
- seq_puts(m, "GuC logging stats:\n");
-
- seq_printf(m, "\tRelay full count: %u\n",
- log->relay.full_count);
-
- for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
- seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
- stringify_guc_log_type(type),
- log->stats[type].flush,
- log->stats[type].sampled_overflow);
- }
-}
-
-static int i915_guc_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc *uc = &dev_priv->gt.uc;
-
- if (!intel_uc_uses_guc(uc))
- return -ENODEV;
-
- i915_guc_log_info(m, &uc->guc.log);
-
- /* Add more as required ... */
-
- return 0;
-}
-
-static int i915_guc_stage_pool(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc *uc = &dev_priv->gt.uc;
- struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
- int index;
-
- if (!intel_uc_uses_guc_submission(uc))
- return -ENODEV;
-
- for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
- struct intel_engine_cs *engine;
-
- if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
- continue;
-
- seq_printf(m, "GuC stage descriptor %u:\n", index);
- seq_printf(m, "\tIndex: %u\n", desc->stage_id);
- seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
- seq_printf(m, "\tPriority: %d\n", desc->priority);
- seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
- seq_printf(m, "\tEngines used: 0x%x\n",
- desc->engines_used);
- seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
- desc->db_trigger_phy,
- desc->db_trigger_cpu,
- desc->db_trigger_uk);
- seq_printf(m, "\tProcess descriptor: 0x%x\n",
- desc->process_desc);
- seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
- desc->wq_addr, desc->wq_size);
- seq_putc(m, '\n');
-
- for_each_uabi_engine(engine, dev_priv) {
- u32 guc_engine_id = engine->guc_id;
- struct guc_execlist_context *lrc =
- &desc->lrc[guc_engine_id];
-
- seq_printf(m, "\t%s LRC:\n", engine->name);
- seq_printf(m, "\t\tContext desc: 0x%x\n",
- lrc->context_desc);
- seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
- seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
- seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
- seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
- seq_putc(m, '\n');
- }
- }
-
- return 0;
-}
-
-static int i915_guc_log_dump(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = node_to_i915(node);
- bool dump_load_err = !!node->info_ent->data;
- struct drm_i915_gem_object *obj = NULL;
- u32 *log;
- int i = 0;
-
- if (!HAS_GT_UC(dev_priv))
- return -ENODEV;
-
- if (dump_load_err)
- obj = dev_priv->gt.uc.load_err_log;
- else if (dev_priv->gt.uc.guc.log.vma)
- obj = dev_priv->gt.uc.guc.log.vma->obj;
-
- if (!obj)
- return 0;
-
- log = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(log)) {
- DRM_DEBUG("Failed to pin object\n");
- seq_puts(m, "(log data unaccessible)\n");
- return PTR_ERR(log);
- }
-
- for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
- seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(log + i), *(log + i + 1),
- *(log + i + 2), *(log + i + 3));
-
- seq_putc(m, '\n');
-
- i915_gem_object_unpin_map(obj);
-
- return 0;
-}
-
-static int i915_guc_log_level_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_uc *uc = &dev_priv->gt.uc;
-
- if (!intel_uc_uses_guc(uc))
- return -ENODEV;
-
- *val = intel_guc_log_get_level(&uc->guc.log);
-
- return 0;
-}
-
-static int i915_guc_log_level_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_uc *uc = &dev_priv->gt.uc;
-
- if (!intel_uc_uses_guc(uc))
- return -ENODEV;
-
- return intel_guc_log_set_level(&uc->guc.log, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
- i915_guc_log_level_get, i915_guc_log_level_set,
- "%lld\n");
-
-static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct intel_guc *guc = &i915->gt.uc.guc;
- struct intel_guc_log *log = &guc->log;
-
- if (!intel_guc_is_ready(guc))
- return -ENODEV;
-
- file->private_data = log;
-
- return intel_guc_log_relay_open(log);
-}
-
-static ssize_t
-i915_guc_log_relay_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct intel_guc_log *log = filp->private_data;
- int val;
- int ret;
-
- ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
- if (ret < 0)
- return ret;
-
- /*
- * Enable and start the guc log relay on value of 1.
- * Flush log relay for any other value.
- */
- if (val == 1)
- ret = intel_guc_log_relay_start(log);
- else
- intel_guc_log_relay_flush(log);
-
- return ret ?: cnt;
-}
-
-static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct intel_guc *guc = &i915->gt.uc.guc;
-
- intel_guc_log_relay_close(&guc->log);
- return 0;
-}
-
-static const struct file_operations i915_guc_log_relay_fops = {
- .owner = THIS_MODULE,
- .open = i915_guc_log_relay_open,
- .write = i915_guc_log_relay_write,
- .release = i915_guc_log_relay_release,
-};
-
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1576,8 +1304,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "GT awake? %s [%d]\n",
yesno(dev_priv->gt.awake),
atomic_read(&dev_priv->gt.wakeref.count));
- seq_printf(m, "CS timestamp frequency: %u kHz\n",
- RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
+ seq_printf(m, "CS timestamp frequency: %u Hz\n",
+ RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
p = drm_seq_file_printer(m);
for_each_uabi_engine(engine, dev_priv)
@@ -1676,13 +1404,12 @@ static int
i915_perf_noa_delay_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
/*
* This would lead to infinite waits as we're doing timestamp
* difference on the CS with only 32bits.
*/
- if (val > mul_u32_u32(U32_MAX, clk))
+ if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
return -EINVAL;
atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -1757,6 +1484,9 @@ gt_drop_caches(struct intel_gt *gt, u64 val)
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
+ if (val & DROP_FREED)
+ intel_gt_flush_buffer_pool(gt);
+
return 0;
}
@@ -2139,12 +1869,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_guc_info", i915_guc_info, 0},
- {"i915_guc_load_status", i915_guc_load_status_info, 0},
- {"i915_guc_log_dump", i915_guc_log_dump, 0},
- {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
- {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
- {"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_context_status", i915_context_status, 0},
@@ -2172,11 +1896,9 @@ static const struct i915_debugfs_files {
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
- {"i915_guc_log_level", &i915_guc_log_level_fops},
- {"i915_guc_log_relay", &i915_guc_log_relay_fops},
};
-int i915_debugfs_register(struct drm_i915_private *dev_priv)
+void i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
int i;
@@ -2193,7 +1915,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
i915_debugfs_files[i].fops);
}
- return drm_debugfs_create_files(i915_debugfs_list,
- I915_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(i915_debugfs_list,
+ I915_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
index 6da39c76ab5e..1de2736f1248 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.h
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -12,10 +12,10 @@ struct drm_i915_private;
struct seq_file;
#ifdef CONFIG_DEBUG_FS
-int i915_debugfs_register(struct drm_i915_private *dev_priv);
+void i915_debugfs_register(struct drm_i915_private *dev_priv);
void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj);
#else
-static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
+static inline void i915_debugfs_register(struct drm_i915_private *dev_priv) {}
static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 81a4621853db..34ee12f3f02d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -43,6 +43,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "display/intel_acpi.h"
@@ -227,14 +228,14 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
ret = drm_vblank_init(&i915->drm,
INTEL_NUM_PIPES(i915));
if (ret)
- goto out;
+ return ret;
}
intel_bios_init(i915);
ret = intel_vga_register(i915);
if (ret)
- goto out;
+ goto cleanup_bios;
intel_power_domains_init_hw(i915, false);
@@ -242,13 +243,16 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
ret = intel_modeset_init_noirq(i915);
if (ret)
- goto cleanup_vga_client;
+ goto cleanup_vga_client_pw_domain_csr;
return 0;
-cleanup_vga_client:
+cleanup_vga_client_pw_domain_csr:
+ intel_csr_ucode_fini(i915);
+ intel_power_domains_driver_remove(i915);
intel_vga_unregister(i915);
-out:
+cleanup_bios:
+ intel_bios_driver_remove(i915);
return ret;
}
@@ -307,13 +311,13 @@ static void i915_driver_modeset_remove(struct drm_i915_private *i915)
/* part #2: call after irq uninstall */
static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
{
- intel_modeset_driver_remove_noirq(i915);
+ intel_csr_ucode_fini(i915);
- intel_bios_driver_remove(i915);
+ intel_power_domains_driver_remove(i915);
intel_vga_unregister(i915);
- intel_csr_ucode_fini(i915);
+ intel_bios_driver_remove(i915);
}
static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -566,6 +570,62 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
}
/**
+ * i915_set_dma_info - set all relevant PCI dma info as configured for the
+ * platform
+ * @i915: valid i915 instance
+ *
+ * Set the dma max segment size, device and coherent masks. The dma mask set
+ * needs to occur before i915_ggtt_probe_hw.
+ *
+ * A couple of platforms have special needs. Address them as well.
+ *
+ */
+static int i915_set_dma_info(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
+ int ret;
+
+ GEM_BUG_ON(!mask_size);
+
+ /*
+ * We don't have a max segment size, so set it to the max so sg's
+ * debugging layer doesn't complain
+ */
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+ if (ret)
+ goto mask_err;
+
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN(i915, 2))
+ mask_size = 30;
+
+ /*
+ * 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_I965G(i915) || IS_I965GM(i915))
+ mask_size = 32;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+ if (ret)
+ goto mask_err;
+
+ return 0;
+
+mask_err:
+ drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
+ return ret;
+}
+
+/**
* i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
*
@@ -610,6 +670,10 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
/* needs to be done before ggtt probe */
intel_dram_edram_detect(dev_priv);
+ ret = i915_set_dma_info(dev_priv);
+ if (ret)
+ return ret;
+
i915_perf_init(dev_priv);
ret = i915_ggtt_probe_hw(dev_priv);
@@ -638,40 +702,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- /*
- * We don't have a max segment size, so set it to the max so sg's
- * debugging layer doesn't complain
- */
- dma_set_max_seg_size(&pdev->dev, UINT_MAX);
-
- /* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN(dev_priv, 2)) {
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
- if (ret) {
- drm_err(&dev_priv->drm, "failed to set DMA mask\n");
-
- goto err_mem_regions;
- }
- }
-
- /* 965GM sometimes incorrectly writes to hardware status page (HWS)
- * using 32bit addressing, overwriting memory if HWS is located
- * above 4GB.
- *
- * The documentation also mentions an issue with undefined
- * behaviour if any general state is accessed within a page above 4GB,
- * which also needs to be handled carefully.
- */
- if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
- if (ret) {
- drm_err(&dev_priv->drm, "failed to set DMA mask\n");
-
- goto err_mem_regions;
- }
- }
-
cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_gt_init_workarounds(dev_priv);
@@ -876,17 +906,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
struct drm_i915_private *i915;
- int err;
-
- i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
- if (!i915)
- return ERR_PTR(-ENOMEM);
- err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
- if (err) {
- kfree(i915);
- return ERR_PTR(err);
- }
+ i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
+ struct drm_i915_private, drm);
+ if (IS_ERR(i915))
+ return i915;
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
@@ -901,17 +925,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return i915;
}
-static void i915_driver_destroy(struct drm_i915_private *i915)
-{
- struct pci_dev *pdev = i915->drm.pdev;
-
- drm_dev_fini(&i915->drm);
- kfree(i915);
-
- /* And make sure we never chase our dangling pointer from pci_dev */
- pci_set_drvdata(pdev, NULL);
-}
-
/**
* i915_driver_probe - setup chip and create an initial config
* @pdev: PCI device
@@ -993,12 +1006,14 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i915_welcome_messages(i915);
+ i915->do_release = true;
+
return 0;
out_cleanup_irq:
intel_irq_uninstall(i915);
out_cleanup_modeset:
- /* FIXME */
+ i915_driver_modeset_remove_noirq(i915);
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -1012,7 +1027,6 @@ out_pci_disable:
pci_disable_device(pdev);
out_fini:
i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
- i915_driver_destroy(i915);
return ret;
}
@@ -1035,12 +1049,12 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_irq_uninstall(i915);
- i915_driver_modeset_remove_noirq(i915);
+ intel_modeset_driver_remove_noirq(i915);
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
- intel_power_domains_driver_remove(i915);
+ i915_driver_modeset_remove_noirq(i915);
i915_driver_hw_remove(i915);
@@ -1052,6 +1066,9 @@ static void i915_driver_release(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ if (!dev_priv->do_release)
+ return;
+
disable_rpm_wakeref_asserts(rpm);
i915_gem_driver_release(dev_priv);
@@ -1065,7 +1082,6 @@ static void i915_driver_release(struct drm_device *dev)
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
- i915_driver_destroy(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -1286,7 +1302,6 @@ static int i915_drm_resume(struct drm_device *dev)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
i915_ggtt_resume(&dev_priv->ggtt);
- i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
@@ -1604,8 +1619,6 @@ static int intel_runtime_suspend(struct device *kdev)
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(&dev_priv->ggtt);
-
enable_rpm_wakeref_asserts(rpm);
return ret;
@@ -1685,7 +1698,6 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM).
*/
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(&dev_priv->ggtt);
/*
* On VLV/CHV display interrupts are part of the display
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 62b901ffabf9..adb9bf34cf97 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -92,7 +92,6 @@
#include "intel_wopcm.h"
#include "i915_gem.h"
-#include "i915_gem_fence_reg.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
#include "i915_perf_types.h"
@@ -109,8 +108,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200313"
-#define DRIVER_TIMESTAMP 1584144591
+#define DRIVER_DATE "20200515"
+#define DRIVER_TIMESTAMP 1589543364
struct drm_i915_gem_object;
@@ -149,6 +148,8 @@ enum hpd_pin {
struct i915_hotplug {
struct delayed_work hotplug_work;
+ const u32 *hpd, *pch_hpd;
+
struct {
unsigned long last_jiffies;
int count;
@@ -417,6 +418,7 @@ struct intel_fbc {
struct {
const struct drm_format_info *format;
unsigned int stride;
+ u64 modifier;
} fb;
u16 gen9_wa_cfb_stride;
s8 fence_id;
@@ -510,6 +512,7 @@ struct i915_psr {
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
bool force_mode_changed;
+ struct drm_dp_vsc_sdp vsc;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -540,7 +543,6 @@ struct i915_suspend_saved_registers {
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF3[3];
- u64 saveFENCE[I915_MAX_NUM_FENCES];
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@@ -615,13 +617,14 @@ struct i915_gem_mm {
#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
-#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
-#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
-
-#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
-#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
+unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
+ u64 context);
-#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
+static inline unsigned long
+i915_fence_timeout(const struct drm_i915_private *i915)
+{
+ return i915_fence_context_timeout(i915, U64_MAX);
+}
/* Amount of SAGV/QGV points, BSpec precisely defines this */
#define I915_NUM_QGV_POINTS 8
@@ -823,6 +826,9 @@ struct i915_selftest_stash {
struct drm_i915_private {
struct drm_device drm;
+ /* FIXME: Device release actions should all be moved to drmm_ */
+ bool do_release;
+
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
@@ -885,7 +891,6 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
- struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct rb_root uabi_engines;
struct resource mch_res;
@@ -1506,6 +1511,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ICL_REVID(p, since, until) \
(IS_ICELAKE(p) && IS_REVID(p, since, until))
+#define EHL_REVID_A0 0x0
+
+#define IS_EHL_REVID(p, since, until) \
+ (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
+
#define TGL_REVID_A0 0x0
#define TGL_REVID_B0 0x1
#define TGL_REVID_C0 0x2
@@ -1606,7 +1616,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
-#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
+#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -1740,6 +1750,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -1913,4 +1924,16 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
+static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
+{
+ return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
+ 1000000000);
+}
+
+static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
+{
+ return div_u64(val * 1000000000,
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ca5420012a22..0cbcb9f54e7d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -118,7 +118,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if (!atomic_read(&obj->bind_count))
+ if (list_empty(&obj->vma.list))
return 0;
/*
@@ -141,6 +141,11 @@ try_again:
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
continue;
+ if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
+ ret = -EBUSY;
+ break;
+ }
+
ret = -EAGAIN;
if (!i915_vm_tryopen(vm))
break;
@@ -993,18 +998,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
+ ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ if (ret)
+ return ERR_PTR(ret);
+
if (vma->fence && !i915_gem_object_is_tiled(obj)) {
mutex_lock(&ggtt->vm.mutex);
- ret = i915_vma_revoke_fence(vma);
+ i915_vma_revoke_fence(vma);
mutex_unlock(&ggtt->vm.mutex);
- if (ret)
- return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
- if (ret)
- return ERR_PTR(ret);
-
ret = i915_vma_wait_for_bind(vma);
if (ret) {
i915_vma_unpin(vma);
@@ -1156,7 +1159,6 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_ggtt_resume(&dev_priv->ggtt);
- i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 4518b9b35c3d..6501939929d5 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -128,6 +128,13 @@ search_again:
active = NULL;
INIT_LIST_HEAD(&eviction_list);
list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
+ if (vma == active) { /* now seen this vma twice */
+ if (flags & PIN_NONBLOCK)
+ break;
+
+ active = ERR_PTR(-EAGAIN);
+ }
+
/*
* We keep this list in a rough least-recently scanned order
* of active elements (inactive elements are cheap to reap).
@@ -143,21 +150,12 @@ search_again:
* To notice when we complete one full cycle, we record the
* first active element seen, before moving it to the tail.
*/
- if (i915_vma_is_active(vma)) {
- if (vma == active) {
- if (flags & PIN_NONBLOCK)
- break;
-
- active = ERR_PTR(-EAGAIN);
- }
-
- if (active != ERR_PTR(-EAGAIN)) {
- if (!active)
- active = vma;
+ if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
+ if (!active)
+ active = vma;
- list_move_tail(&vma->vm_link, &vm->bound_list);
- continue;
- }
+ list_move_tail(&vma->vm_link, &vm->bound_list);
+ continue;
}
if (mark_free(&scan, vma, flags, &eviction_list))
@@ -228,7 +226,12 @@ found:
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
- ret = __i915_vma_unbind(vma);
+
+ /* If we find any non-objects (!vma), we cannot evict them */
+ if (vma->node.color != I915_COLOR_UNEVICTABLE)
+ ret = __i915_vma_unbind(vma);
+ else
+ ret = -ENOSPC; /* XXX search failed, try again? */
}
return ret;
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 54fce81d5724..d042644b9cd2 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -153,7 +153,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = 1000 * RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+ value = RUNTIME_INFO(i915)->cs_timestamp_frequency_hz;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2a4cd0ba5464..eec292d06f11 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -467,14 +467,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n",
+ err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
prefix, erq->pid, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&erq->flags) ? "+" : "",
erq->sched_attr.priority,
- erq->start, erq->head, erq->tail);
+ erq->head, erq->tail);
}
static void error_print_context(struct drm_i915_error_state_buf *m,
@@ -1207,21 +1207,22 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
static void record_request(const struct i915_request *request,
struct i915_request_coredump *erq)
{
- const struct i915_gem_context *ctx;
-
erq->flags = request->fence.flags;
erq->context = request->fence.context;
erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
- erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
erq->tail = request->tail;
erq->pid = 0;
rcu_read_lock();
- ctx = rcu_dereference(request->context->gem_context);
- if (ctx)
- erq->pid = pid_nr(ctx->pid);
+ if (!intel_context_is_closed(request->context)) {
+ const struct i915_gem_context *ctx;
+
+ ctx = rcu_dereference(request->context->gem_context);
+ if (ctx)
+ erq->pid = pid_nr(ctx->pid);
+ }
rcu_read_unlock();
}
@@ -1319,26 +1320,6 @@ capture_user(struct intel_engine_capture_vma *capture,
return capture;
}
-static struct i915_vma_coredump *
-capture_object(const struct intel_gt *gt,
- struct drm_i915_gem_object *obj,
- const char *name,
- struct i915_vma_compress *compress)
-{
- if (obj && i915_gem_object_has_pages(obj)) {
- struct i915_vma fake = {
- .node = { .start = U64_MAX, .size = obj->base.size },
- .size = obj->base.size,
- .pages = obj->mm.pages,
- .obj = obj,
- };
-
- return i915_vma_coredump_create(gt, &fake, name, compress);
- } else {
- return NULL;
- }
-}
-
static void add_vma(struct intel_engine_coredump *ee,
struct i915_vma_coredump *vma)
{
@@ -1427,12 +1408,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
engine->wa_ctx.vma,
"WA context",
compress));
-
- add_vma(ee,
- capture_object(engine->gt,
- engine->default_state,
- "NULL context",
- compress));
}
static struct intel_engine_coredump *
@@ -1858,7 +1833,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
return;
i915 = error->i915;
- dev_info(i915->drm.dev, "%s\n", error_msg(error));
+ drm_info(&i915->drm, "%s\n", error_msg(error));
if (error->simulated ||
cmpxchg(&i915->gpu_error.first_error, NULL, error))
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 0d1f6c8ff355..76b80fbfb7e9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -42,7 +42,7 @@ struct i915_vma_coredump {
int num_pages;
int page_count;
int unused;
- u32 *pages[0];
+ u32 *pages[];
};
struct i915_request_coredump {
@@ -50,7 +50,6 @@ struct i915_request_coredump {
pid_t pid;
u32 context;
u32 seqno;
- u32 start;
u32 head;
u32 tail;
struct i915_sched_attr sched_attr;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d91557d842dc..4dc601dffc08 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -124,7 +124,6 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
};
-/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
@@ -168,6 +167,49 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
+static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
+{
+ struct i915_hotplug *hpd = &dev_priv->hotplug;
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv))
+ hpd->hpd = hpd_status_g4x;
+ else
+ hpd->hpd = hpd_status_i915;
+ return;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ hpd->hpd = hpd_gen12;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ hpd->hpd = hpd_gen11;
+ else if (IS_GEN9_LP(dev_priv))
+ hpd->hpd = hpd_bxt;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ hpd->hpd = hpd_bdw;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ hpd->hpd = hpd_ivb;
+ else
+ hpd->hpd = hpd_ilk;
+
+ if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
+ return;
+
+ if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv))
+ hpd->pch_hpd = hpd_tgp;
+ else if (HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
+ hpd->pch_hpd = hpd_icp;
+ else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
+ hpd->pch_hpd = hpd_spt;
+ else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
+ hpd->pch_hpd = hpd_cpt;
+ else if (HAS_PCH_IBX(dev_priv))
+ hpd->pch_hpd = hpd_ibx;
+ else
+ MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
+}
+
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1504,33 +1546,27 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_status)
{
u32 pin_mask = 0, long_mask = 0;
+ u32 hotplug_trigger;
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
-
- if (hotplug_trigger) {
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- hotplug_trigger, hotplug_trigger,
- hpd_status_g4x,
- i9xx_port_hotplug_long_detect);
+ if (IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+ else
+ hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
- }
+ if (hotplug_trigger) {
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, hotplug_trigger,
+ dev_priv->hotplug.hpd,
+ i9xx_port_hotplug_long_detect);
- if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev_priv);
- } else {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
-
- if (hotplug_trigger) {
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- hotplug_trigger, hotplug_trigger,
- hpd_status_i915,
- i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
- }
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
+
+ if ((IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev_priv);
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1696,8 +1732,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
}
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
- u32 hotplug_trigger,
- const u32 hpd[HPD_NUM_PINS])
+ u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
@@ -1720,8 +1755,9 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!hotplug_trigger)
return;
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
- dig_hotplug_reg, hpd,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
pch_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1732,7 +1768,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1820,7 +1856,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -1857,22 +1893,18 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
u32 ddi_hotplug_trigger, tc_hotplug_trigger;
u32 pin_mask = 0, long_mask = 0;
bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
- const u32 *pins;
if (HAS_PCH_TGP(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
- pins = hpd_tgp;
} else if (HAS_PCH_JSP(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
tc_hotplug_trigger = 0;
- pins = hpd_tgp;
} else if (HAS_PCH_MCC(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
- pins = hpd_icp;
} else {
drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
"Unrecognized PCH type 0x%x\n",
@@ -1881,7 +1913,6 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
- pins = hpd_icp;
}
if (ddi_hotplug_trigger) {
@@ -1891,8 +1922,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- ddi_hotplug_trigger,
- dig_hotplug_reg, pins,
+ ddi_hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
@@ -1903,8 +1934,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- tc_hotplug_trigger,
- dig_hotplug_reg, pins,
+ tc_hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
tc_port_hotplug_long_detect);
}
@@ -1929,7 +1960,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- hotplug_trigger, dig_hotplug_reg, hpd_spt,
+ hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
@@ -1940,7 +1972,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- hotplug2_trigger, dig_hotplug_reg, hpd_spt,
+ hotplug2_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
@@ -1952,16 +1985,16 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
}
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
- u32 hotplug_trigger,
- const u32 hpd[HPD_NUM_PINS])
+ u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
- dig_hotplug_reg, hpd,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.hpd,
ilk_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1974,7 +2007,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev_priv);
@@ -2020,7 +2053,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev_priv);
@@ -2130,16 +2163,16 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
}
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
- u32 hotplug_trigger,
- const u32 hpd[HPD_NUM_PINS])
+ u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
- dig_hotplug_reg, hpd,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.hpd,
bxt_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -2151,15 +2184,11 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
long_pulse_detect_func long_pulse_detect;
- const u32 *hpd;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (INTEL_GEN(dev_priv) >= 12)
long_pulse_detect = gen12_port_hotplug_long_detect;
- hpd = hpd_gen12;
- } else {
+ else
long_pulse_detect = gen11_port_hotplug_long_detect;
- hpd = hpd_gen11;
- }
if (trigger_tc) {
u32 dig_hotplug_reg;
@@ -2167,8 +2196,10 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
- dig_hotplug_reg, hpd, long_pulse_detect);
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ trigger_tc, dig_hotplug_reg,
+ dev_priv->hotplug.hpd,
+ long_pulse_detect);
}
if (trigger_tbt) {
@@ -2177,8 +2208,10 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
- dig_hotplug_reg, hpd, long_pulse_detect);
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ trigger_tbt, dig_hotplug_reg,
+ dev_priv->hotplug.hpd,
+ long_pulse_detect);
}
if (pin_mask)
@@ -2309,15 +2342,13 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (IS_GEN9_LP(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
- bxt_hpd_irq_handler(dev_priv, tmp_mask,
- hpd_bxt);
+ bxt_hpd_irq_handler(dev_priv, tmp_mask);
found = true;
}
} else if (IS_BROADWELL(dev_priv)) {
tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
if (tmp_mask) {
- ilk_hpd_irq_handler(dev_priv,
- tmp_mask, hpd_bdw);
+ ilk_hpd_irq_handler(dev_priv, tmp_mask);
found = true;
}
}
@@ -2870,6 +2901,14 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
+
+ /* Wa_14010685332:icl */
+ if (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) {
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
+ SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
+ SBCLK_RUN_REFCLK_DIS, 0);
+ }
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
@@ -2989,13 +3028,12 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(dev_priv))
hotplug_irqs = SDE_HOTPLUG_MASK;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
- } else {
+ else
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
- }
+
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3021,13 +3059,12 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
u32 sde_ddi_mask, u32 sde_tc_mask,
- u32 ddi_enable_mask, u32 tc_enable_mask,
- const u32 *pins)
+ u32 ddi_enable_mask, u32 tc_enable_mask)
{
u32 hotplug_irqs, enabled_irqs;
hotplug_irqs = sde_ddi_mask | sde_tc_mask;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
@@ -3044,8 +3081,7 @@ static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
icp_hpd_irq_setup(dev_priv,
SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
- ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
- hpd_icp);
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
}
/*
@@ -3057,8 +3093,7 @@ static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
icp_hpd_irq_setup(dev_priv,
SDE_DDI_MASK_TGP, 0,
- TGP_DDI_HPD_ENABLE_MASK, 0,
- hpd_tgp);
+ TGP_DDI_HPD_ENABLE_MASK, 0);
}
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3083,11 +3118,9 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- const u32 *hpd;
u32 val;
- hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
val = I915_READ(GEN11_DE_HPD_IMR);
@@ -3099,12 +3132,10 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
- TGP_DDI_HPD_ENABLE_MASK,
- TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
+ TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
- ICP_DDI_HPD_ENABLE_MASK,
- ICP_TC_HPD_ENABLE_MASK, hpd_icp);
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
}
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3140,7 +3171,7 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3169,17 +3200,17 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 8) {
hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
} else if (INTEL_GEN(dev_priv) >= 7) {
hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
} else {
hotplug_irqs = DE_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
}
@@ -3230,7 +3261,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3361,7 +3392,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
- u32 de_port_masked = GEN8_AUX_CHANNEL_A;
+ u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
enum pipe pipe;
@@ -3369,18 +3400,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) <= 10)
de_misc_masked |= GEN8_DE_MISC_GSE;
- if (INTEL_GEN(dev_priv) >= 9) {
- de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
- GEN9_AUX_CHANNEL_D;
- if (IS_GEN9_LP(dev_priv))
- de_port_masked |= BXT_DE_PORT_GMBUS;
- }
-
- if (INTEL_GEN(dev_priv) >= 11)
- de_port_masked |= ICL_AUX_CHANNEL_E;
-
- if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
- de_port_masked |= CNL_AUX_CHANNEL_F;
+ if (IS_GEN9_LP(dev_priv))
+ de_port_masked |= BXT_DE_PORT_GMBUS;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
GEN8_PIPE_FIFO_UNDERRUN;
@@ -3656,7 +3677,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3761,7 +3782,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3903,10 +3924,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3934,6 +3955,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
int i;
+ intel_hpd_init_pins(dev_priv);
+
intel_hpd_init_work(dev_priv);
INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 2c80a0194c80..eb0b5be7c35d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -160,6 +160,7 @@
GEN(2), \
.is_mobile = 1, \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -170,6 +171,7 @@
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
+ .dma_mask_size = 32, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
@@ -179,6 +181,7 @@
#define I845_FEATURES \
GEN(2), \
.pipe_mask = BIT(PIPE_A), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -188,6 +191,7 @@
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
+ .dma_mask_size = 32, \
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
I9XX_COLORS, \
@@ -218,11 +222,13 @@ static const struct intel_device_info i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
+ .dma_mask_size = 32, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
@@ -283,6 +289,7 @@ static const struct intel_device_info g33_info = {
PLATFORM(INTEL_G33),
.display.has_hotplug = 1,
.display.has_overlay = 1,
+ .dma_mask_size = 36,
};
static const struct intel_device_info pnv_g_info = {
@@ -290,6 +297,7 @@ static const struct intel_device_info pnv_g_info = {
PLATFORM(INTEL_PINEVIEW),
.display.has_hotplug = 1,
.display.has_overlay = 1,
+ .dma_mask_size = 36,
};
static const struct intel_device_info pnv_m_info = {
@@ -298,17 +306,20 @@ static const struct intel_device_info pnv_m_info = {
.is_mobile = 1,
.display.has_hotplug = 1,
.display.has_overlay = 1,
+ .dma_mask_size = 36,
};
#define GEN4_FEATURES \
GEN(4), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
+ .dma_mask_size = 36, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I965_COLORS, \
@@ -354,12 +365,14 @@ static const struct intel_device_info gm45_info = {
#define GEN5_FEATURES \
GEN(5), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
.has_rc6 = 0, \
+ .dma_mask_size = 36, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
@@ -381,6 +394,7 @@ static const struct intel_device_info ilk_m_info = {
#define GEN6_FEATURES \
GEN(6), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -389,6 +403,7 @@ static const struct intel_device_info ilk_m_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
+ .dma_mask_size = 40, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
@@ -430,6 +445,7 @@ static const struct intel_device_info snb_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -438,6 +454,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
+ .dma_mask_size = 40, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
@@ -482,6 +499,7 @@ static const struct intel_device_info ivb_q_info = {
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
.pipe_mask = 0, /* legal, last one wins */
+ .cpu_transcoder_mask = 0,
.has_l3_dpf = 1,
};
@@ -490,11 +508,13 @@ static const struct intel_device_info vlv_info = {
GEN(7),
.is_lp = 1,
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
+ .dma_mask_size = 40,
.ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 31,
.has_snoop = true,
@@ -511,6 +531,8 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_psr = 1, \
@@ -543,6 +565,7 @@ static const struct intel_device_info hsw_gt3_info = {
G75_FEATURES, \
GEN(8), \
.has_logical_ring_contexts = 1, \
+ .dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
.has_64bit_reloc = 1, \
@@ -581,6 +604,7 @@ static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
@@ -590,7 +614,8 @@ static const struct intel_device_info chv_info = {
.has_rps = true,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
- .ppgtt_type = INTEL_PPGTT_ALIASING,
+ .dma_mask_size = 39,
+ .ppgtt_type = INTEL_PPGTT_FULL,
.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
@@ -656,6 +681,9 @@ static const struct intel_device_info skl_gt4_info = {
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
@@ -670,6 +698,7 @@ static const struct intel_device_info skl_gt4_info = {
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_gt_uc = 1, \
+ .dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
.has_reset_engine = 1, \
@@ -759,6 +788,9 @@ static const struct intel_device_info cnl_info = {
#define GEN11_FEATURES \
GEN10_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -799,6 +831,10 @@ static const struct intel_device_info ehl_info = {
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -822,7 +858,6 @@ static const struct intel_device_info ehl_info = {
static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
@@ -920,8 +955,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
i915_driver_remove(i915);
pci_set_drvdata(pdev, NULL);
-
- drm_dev_put(&i915->drm);
}
/* is device_id present in comma separated list of ids */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 66a46e41d5ef..75c60c2afb7e 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -204,21 +204,6 @@
#include "i915_drv.h"
#include "i915_perf.h"
-#include "oa/i915_oa_hsw.h"
-#include "oa/i915_oa_bdw.h"
-#include "oa/i915_oa_chv.h"
-#include "oa/i915_oa_sklgt2.h"
-#include "oa/i915_oa_sklgt3.h"
-#include "oa/i915_oa_sklgt4.h"
-#include "oa/i915_oa_bxt.h"
-#include "oa/i915_oa_kblgt2.h"
-#include "oa/i915_oa_kblgt3.h"
-#include "oa/i915_oa_glk.h"
-#include "oa/i915_oa_cflgt2.h"
-#include "oa/i915_oa_cflgt3.h"
-#include "oa/i915_oa_cnl.h"
-#include "oa/i915_oa_icl.h"
-#include "oa/i915_oa_tgl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -238,26 +223,17 @@
*
* Although this can be observed explicitly while copying reports to userspace
* by checking for a zeroed report-id field in tail reports, we want to account
- * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
- * read() attempts.
- *
- * In effect we define a tail pointer for reading that lags the real tail
- * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
- * time for the corresponding reports to become visible to the CPU.
- *
- * To manage this we actually track two tail pointers:
- * 1) An 'aging' tail with an associated timestamp that is tracked until we
- * can trust the corresponding data is visible to the CPU; at which point
- * it is considered 'aged'.
- * 2) An 'aged' tail that can be used for read()ing.
- *
- * The two separate pointers let us decouple read()s from tail pointer aging.
- *
- * The tail pointers are checked and updated at a limited rate within a hrtimer
- * callback (the same callback that is used for delivering EPOLLIN events)
- *
- * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
- * indicates that an updated tail pointer is needed.
+ * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
+ * redundant read() attempts.
+ *
+ * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
+ * in the OA buffer, starting from the tail reported by the HW until we find a
+ * report with its first 2 dwords not 0 meaning its previous report is
+ * completely in memory and ready to be read. Those dwords are also set to 0
+ * once read and the whole buffer is cleared upon OA buffer initialization. The
+ * first dword is the reason for this report while the second is the timestamp,
+ * making the chances of having those 2 fields at 0 fairly unlikely. A more
+ * detailed explanation is available in oa_buffer_check_unlocked().
*
* Most of the implementation details for this workaround are in
* oa_buffer_check_unlocked() and _append_oa_reports()
@@ -272,11 +248,11 @@
#define OA_TAIL_MARGIN_NSEC 100000ULL
#define INVALID_TAIL_PTR 0xffffffff
-/* frequency for checking whether the OA unit has written new reports to the
- * circular OA buffer...
+/* The default frequency for checking whether the OA unit has written new
+ * reports to the circular OA buffer...
*/
-#define POLL_FREQUENCY 200
-#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
+#define DEFAULT_POLL_FREQUENCY_HZ 200
+#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
static u32 i915_perf_stream_paranoid = true;
@@ -359,6 +335,12 @@ static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
* @oa_periodic: Whether to enable periodic OA unit sampling
* @oa_period_exponent: The OA unit sampling period is derived from this
* @engine: The engine (typically rcs0) being monitored by the OA unit
+ * @has_sseu: Whether @sseu was specified by userspace
+ * @sseu: internal SSEU configuration computed either from the userspace
+ * specified configuration in the opening parameters or a default value
+ * (see get_default_sseu_config())
+ * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
+ * data availability
*
* As read_properties_unlocked() enumerates and validates the properties given
* to open a stream of metrics the configuration is built up in the structure
@@ -378,6 +360,11 @@ struct perf_open_properties {
int oa_period_exponent;
struct intel_engine_cs *engine;
+
+ bool has_sseu;
+ struct intel_sseu sseu;
+
+ u64 poll_oa_period;
};
struct i915_oa_config_bo {
@@ -409,10 +396,7 @@ i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
struct i915_oa_config *oa_config;
rcu_read_lock();
- if (metrics_set == 1)
- oa_config = &perf->test_config;
- else
- oa_config = idr_find(&perf->metrics_idr, metrics_set);
+ oa_config = idr_find(&perf->metrics_idr, metrics_set);
if (oa_config)
oa_config = i915_oa_config_get(oa_config);
rcu_read_unlock();
@@ -465,8 +449,8 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
* (See description of OA_TAIL_MARGIN_NSEC above for further details.)
*
* Besides returning true when there is data available to read() this function
- * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
- * and .aged_tail_idx state used for reading.
+ * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
+ * object.
*
* Note: It's safe to read OA config state here unlocked, assuming that this is
* only called while the stream is enabled, while the global OA configuration
@@ -476,28 +460,19 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
*/
static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
int report_size = stream->oa_buffer.format_size;
unsigned long flags;
- unsigned int aged_idx;
- u32 head, hw_tail, aged_tail, aging_tail;
+ bool pollin;
+ u32 hw_tail;
u64 now;
/* We have to consider the (unlikely) possibility that read() errors
- * could result in an OA buffer reset which might reset the head,
- * tails[] and aged_tail state.
+ * could result in an OA buffer reset which might reset the head and
+ * tail state.
*/
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- /* NB: The head we observe here might effectively be a little out of
- * date (between head and tails[aged_idx].offset if there is currently
- * a read() in progress.
- */
- head = stream->oa_buffer.head;
-
- aged_idx = stream->oa_buffer.aged_tail_idx;
- aged_tail = stream->oa_buffer.tails[aged_idx].offset;
- aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
-
hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
/* The tail pointer increases in 64 byte increments,
@@ -507,64 +482,63 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
now = ktime_get_mono_fast_ns();
- /* Update the aged tail
- *
- * Flip the tail pointer available for read()s once the aging tail is
- * old enough to trust that the corresponding data will be visible to
- * the CPU...
- *
- * Do this before updating the aging pointer in case we may be able to
- * immediately start aging a new pointer too (if new data has become
- * available) without needing to wait for a later hrtimer callback.
- */
- if (aging_tail != INVALID_TAIL_PTR &&
- ((now - stream->oa_buffer.aging_timestamp) >
- OA_TAIL_MARGIN_NSEC)) {
-
- aged_idx ^= 1;
- stream->oa_buffer.aged_tail_idx = aged_idx;
+ if (hw_tail == stream->oa_buffer.aging_tail &&
+ (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
+ /* If the HW tail hasn't move since the last check and the HW
+ * tail has been aging for long enough, declare it the new
+ * tail.
+ */
+ stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
+ } else {
+ u32 head, tail, aged_tail;
- aged_tail = aging_tail;
+ /* NB: The head we observe here might effectively be a little
+ * out of date. If a read() is in progress, the head could be
+ * anywhere between this head and stream->oa_buffer.tail.
+ */
+ head = stream->oa_buffer.head - gtt_offset;
+ aged_tail = stream->oa_buffer.tail - gtt_offset;
+
+ hw_tail -= gtt_offset;
+ tail = hw_tail;
+
+ /* Walk the stream backward until we find a report with dword 0
+ * & 1 not at 0. Since the circular buffer pointers progress by
+ * increments of 64 bytes and that reports can be up to 256
+ * bytes long, we can't tell whether a report has fully landed
+ * in memory before the first 2 dwords of the following report
+ * have effectively landed.
+ *
+ * This is assuming that the writes of the OA unit land in
+ * memory in the order they were written to.
+ * If not : (╯°□°)╯︵ ┻━┻
+ */
+ while (OA_TAKEN(tail, aged_tail) >= report_size) {
+ u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
- /* Mark that we need a new pointer to start aging... */
- stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
- aging_tail = INVALID_TAIL_PTR;
- }
+ if (report32[0] != 0 || report32[1] != 0)
+ break;
- /* Update the aging tail
- *
- * We throttle aging tail updates until we have a new tail that
- * represents >= one report more data than is already available for
- * reading. This ensures there will be enough data for a successful
- * read once this new pointer has aged and ensures we will give the new
- * pointer time to age.
- */
- if (aging_tail == INVALID_TAIL_PTR &&
- (aged_tail == INVALID_TAIL_PTR ||
- OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
- struct i915_vma *vma = stream->oa_buffer.vma;
- u32 gtt_offset = i915_ggtt_offset(vma);
-
- /* Be paranoid and do a bounds check on the pointer read back
- * from hardware, just in case some spurious hardware condition
- * could put the tail out of bounds...
- */
- if (hw_tail >= gtt_offset &&
- hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
- stream->oa_buffer.tails[!aged_idx].offset =
- aging_tail = hw_tail;
- stream->oa_buffer.aging_timestamp = now;
- } else {
- drm_err(&stream->perf->i915->drm,
- "Ignoring spurious out of range OA buffer tail pointer = %x\n",
- hw_tail);
+ tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
}
+
+ if (OA_TAKEN(hw_tail, tail) > report_size &&
+ __ratelimit(&stream->perf->tail_pointer_race))
+ DRM_NOTE("unlanded report(s) head=0x%x "
+ "tail=0x%x hw_tail=0x%x\n",
+ head, tail, hw_tail);
+
+ stream->oa_buffer.tail = gtt_offset + tail;
+ stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
+ stream->oa_buffer.aging_timestamp = now;
}
+ pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
+ stream->oa_buffer.head - gtt_offset) >= report_size;
+
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- return aged_tail == INVALID_TAIL_PTR ?
- false : OA_TAKEN(aged_tail, head) >= report_size;
+ return pollin;
}
/**
@@ -682,7 +656,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
- unsigned int aged_tail_idx;
u32 head, tail;
u32 taken;
int ret = 0;
@@ -693,19 +666,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
head = stream->oa_buffer.head;
- aged_tail_idx = stream->oa_buffer.aged_tail_idx;
- tail = stream->oa_buffer.tails[aged_tail_idx].offset;
+ tail = stream->oa_buffer.tail;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
- * An invalid tail pointer here means we're still waiting for the poll
- * hrtimer callback to give us a pointer
- */
- if (tail == INVALID_TAIL_PTR)
- return -EAGAIN;
-
- /*
* NB: oa_buffer.head/tail include the gtt_offset which we don't want
* while indexing relative to oa_buf_base.
*/
@@ -838,13 +803,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
}
/*
- * The above reason field sanity check is based on
- * the assumption that the OA buffer is initially
- * zeroed and we reset the field after copying so the
- * check is still meaningful once old reports start
- * being overwritten.
+ * Clear out the first 2 dword as a mean to detect unlanded
+ * reports.
*/
report32[0] = 0;
+ report32[1] = 0;
}
if (start_offset != *offset) {
@@ -985,7 +948,6 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
- unsigned int aged_tail_idx;
u32 head, tail;
u32 taken;
int ret = 0;
@@ -996,17 +958,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
head = stream->oa_buffer.head;
- aged_tail_idx = stream->oa_buffer.aged_tail_idx;
- tail = stream->oa_buffer.tails[aged_tail_idx].offset;
+ tail = stream->oa_buffer.tail;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- /* An invalid tail pointer here means we're still waiting for the poll
- * hrtimer callback to give us a pointer
- */
- if (tail == INVALID_TAIL_PTR)
- return -EAGAIN;
-
/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
* while indexing relative to oa_buf_base.
*/
@@ -1064,13 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
if (ret)
break;
- /* The above report-id field sanity check is based on
- * the assumption that the OA buffer is initially
- * zeroed and we reset the field after copying so the
- * check is still meaningful once old reports start
- * being overwritten.
+ /* Clear out the first 2 dwords as a mean to detect unlanded
+ * reports.
*/
report32[0] = 0;
+ report32[1] = 0;
}
if (start_offset != *offset) {
@@ -1310,8 +1263,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* dropped by GuC. They won't be part of the context
* ID in the OA reports, so squash those lower bits.
*/
- stream->specific_ctx_id =
- lower_32_bits(ce->lrc_desc) >> 12;
+ stream->specific_ctx_id = ce->lrc.lrca >> 12;
/*
* GuC uses the top bit to signal proxy submission, so
@@ -1328,11 +1280,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
/*
* Pick an unused context id
- * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts
+ * 0 - BITS_PER_LONG are used by other contexts
* GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
*/
stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
- BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG);
break;
}
@@ -1449,8 +1400,8 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
gtt_offset | OABUFFER_SIZE_16M);
/* Mark that we need updated tail pointers to read from... */
- stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = gtt_offset;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -1472,8 +1423,6 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
* memory...
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
-
- stream->pollin = false;
}
static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
@@ -1503,8 +1452,8 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = gtt_offset;
/*
* Reset state used to recognise context switches, affecting which
@@ -1528,8 +1477,6 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
* memory...
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
-
- stream->pollin = false;
}
static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
@@ -1559,8 +1506,8 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
gtt_offset & GEN12_OAG_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = gtt_offset;
/*
* Reset state used to recognise context switches, affecting which
@@ -1585,8 +1532,6 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
*/
memset(stream->oa_buffer.vaddr, 0,
stream->oa_buffer.vma->size);
-
- stream->pollin = false;
}
static int alloc_oa_buffer(struct i915_perf_stream *stream)
@@ -1667,10 +1612,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
- DIV64_U64_ROUND_UP(
- atomic64_read(&stream->perf->noa_programming_delay) *
- RUNTIME_INFO(i915)->cs_timestamp_frequency_khz,
- 1000000ull);
+ i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
@@ -1972,10 +1914,11 @@ out:
return i915_vma_get(oa_bo->vma);
}
-static struct i915_request *
+static int
emit_oa_config(struct i915_perf_stream *stream,
struct i915_oa_config *oa_config,
- struct intel_context *ce)
+ struct intel_context *ce,
+ struct i915_active *active)
{
struct i915_request *rq;
struct i915_vma *vma;
@@ -1983,7 +1926,7 @@ emit_oa_config(struct i915_perf_stream *stream,
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
- return ERR_CAST(vma);
+ return PTR_ERR(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
@@ -1997,6 +1940,18 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_vma_unpin;
}
+ if (!IS_ERR_OR_NULL(active)) {
+ /* After all individual context modifications */
+ err = i915_request_await_active(rq, active,
+ I915_ACTIVE_AWAIT_ACTIVE);
+ if (err)
+ goto err_add_request;
+
+ err = i915_active_add_request(active, rq);
+ if (err)
+ goto err_add_request;
+ }
+
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0);
if (!err)
@@ -2011,14 +1966,13 @@ emit_oa_config(struct i915_perf_stream *stream,
if (err)
goto err_add_request;
- i915_request_get(rq);
err_add_request:
i915_request_add(rq);
err_vma_unpin:
i915_vma_unpin(vma);
err_vma_put:
i915_vma_put(vma);
- return err ? ERR_PTR(err) : rq;
+ return err;
}
static struct intel_context *oa_context(struct i915_perf_stream *stream)
@@ -2026,8 +1980,9 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
return stream->pinned_ctx ?: stream->engine->kernel_context;
}
-static struct i915_request *
-hsw_enable_metric_set(struct i915_perf_stream *stream)
+static int
+hsw_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
struct intel_uncore *uncore = stream->uncore;
@@ -2046,7 +2001,9 @@ hsw_enable_metric_set(struct i915_perf_stream *stream)
intel_uncore_rmw(uncore, GEN6_UCGCTL1,
0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
- return emit_oa_config(stream, stream->oa_config, oa_context(stream));
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
static void hsw_disable_metric_set(struct i915_perf_stream *stream)
@@ -2116,9 +2073,6 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce,
for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
reg_state[ctx_flexeu0 + i * 2 + 1] =
oa_config_flex_reg(stream->oa_config, flex_regs[i]);
-
- reg_state[CTX_R_PWR_CLK_STATE] =
- intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu);
}
struct flex {
@@ -2139,7 +2093,7 @@ gen8_store_flex(struct i915_request *rq,
if (IS_ERR(cs))
return PTR_ERR(cs);
- offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+ offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
do {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = offset + flex->offset * sizeof(u32);
@@ -2196,8 +2150,10 @@ static int gen8_modify_context(struct intel_context *ce,
return err;
}
-static int gen8_modify_self(struct intel_context *ce,
- const struct flex *flex, unsigned int count)
+static int
+gen8_modify_self(struct intel_context *ce,
+ const struct flex *flex, unsigned int count,
+ struct i915_active *active)
{
struct i915_request *rq;
int err;
@@ -2208,8 +2164,17 @@ static int gen8_modify_self(struct intel_context *ce,
if (IS_ERR(rq))
return PTR_ERR(rq);
+ if (!IS_ERR_OR_NULL(active)) {
+ err = i915_active_add_request(active, rq);
+ if (err)
+ goto err_add_request;
+ }
+
err = gen8_load_flex(rq, ce, flex, count);
+ if (err)
+ goto err_add_request;
+err_add_request:
i915_request_add(rq);
return err;
}
@@ -2243,7 +2208,8 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
return err;
}
-static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
+static int gen12_configure_oar_context(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
int err;
struct intel_context *ce = stream->pinned_ctx;
@@ -2252,7 +2218,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
{
GEN8_OACTXCONTROL,
stream->perf->ctx_oactxctrl_offset + 1,
- enable ? GEN8_OA_COUNTER_RESUME : 0,
+ active ? GEN8_OA_COUNTER_RESUME : 0,
},
};
/* Offsets in regs_lri are not used since this configuration is only
@@ -2264,13 +2230,13 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
GEN12_OAR_OACONTROL,
GEN12_OAR_OACONTROL_OFFSET + 1,
(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
- (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
+ (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
},
{
RING_CONTEXT_CONTROL(ce->engine->mmio_base),
CTX_CONTEXT_CONTROL,
_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
- enable ?
+ active ?
GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
0)
},
@@ -2287,7 +2253,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
return err;
/* Apply regs_lri using LRI with pinned context */
- return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
+ return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
}
/*
@@ -2315,9 +2281,11 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
* Note: it's only the RCS/Render context that has any OA state.
* Note: the first flex register passed must always be R_PWR_CLK_STATE
*/
-static int oa_configure_all_contexts(struct i915_perf_stream *stream,
- struct flex *regs,
- size_t num_regs)
+static int
+oa_configure_all_contexts(struct i915_perf_stream *stream,
+ struct flex *regs,
+ size_t num_regs,
+ struct i915_active *active)
{
struct drm_i915_private *i915 = stream->perf->i915;
struct intel_engine_cs *engine;
@@ -2374,7 +2342,7 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
- err = gen8_modify_self(ce, regs, num_regs);
+ err = gen8_modify_self(ce, regs, num_regs, active);
if (err)
return err;
}
@@ -2382,8 +2350,10 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
return 0;
}
-static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int
+gen12_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config,
+ struct i915_active *active)
{
struct flex regs[] = {
{
@@ -2392,11 +2362,15 @@ static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
},
};
- return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+ return oa_configure_all_contexts(stream,
+ regs, ARRAY_SIZE(regs),
+ active);
}
-static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int
+lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config,
+ struct i915_active *active)
{
/* The MMIO offsets for Flex EU registers aren't contiguous */
const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
@@ -2429,11 +2403,14 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
for (i = 2; i < ARRAY_SIZE(regs); i++)
regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
- return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+ return oa_configure_all_contexts(stream,
+ regs, ARRAY_SIZE(regs),
+ active);
}
-static struct i915_request *
-gen8_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen8_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2473,11 +2450,13 @@ gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = lrc_configure_all_contexts(stream, oa_config);
+ ret = lrc_configure_all_contexts(stream, oa_config, active);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- return emit_oa_config(stream, oa_config, oa_context(stream));
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
@@ -2487,8 +2466,9 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
-static struct i915_request *
-gen12_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen12_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2517,9 +2497,9 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen12_configure_all_contexts(stream, oa_config);
+ ret = gen12_configure_all_contexts(stream, oa_config, active);
if (ret)
- return ERR_PTR(ret);
+ return ret;
/*
* For Gen12, performance counters are context
@@ -2527,12 +2507,14 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
* requested this.
*/
if (stream->ctx) {
- ret = gen12_configure_oar_context(stream, true);
+ ret = gen12_configure_oar_context(stream, active);
if (ret)
- return ERR_PTR(ret);
+ return ret;
}
- return emit_oa_config(stream, oa_config, oa_context(stream));
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
static void gen8_disable_metric_set(struct i915_perf_stream *stream)
@@ -2540,7 +2522,7 @@ static void gen8_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- lrc_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL, NULL);
intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
@@ -2550,7 +2532,7 @@ static void gen10_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- lrc_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL, NULL);
/* Make sure we disable noa to save power. */
intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2561,11 +2543,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- gen12_configure_all_contexts(stream, NULL);
+ gen12_configure_all_contexts(stream, NULL, NULL);
/* disable the context save/restore or OAR counters */
if (stream->ctx)
- gen12_configure_oar_context(stream, false);
+ gen12_configure_oar_context(stream, NULL);
/* Make sure we disable noa to save power. */
intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2657,11 +2639,13 @@ static void gen12_oa_enable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
+ stream->pollin = false;
+
stream->perf->ops.oa_enable(stream);
if (stream->periodic)
hrtimer_start(&stream->poll_check_timer,
- ns_to_ktime(POLL_PERIOD),
+ ns_to_ktime(stream->poll_oa_period),
HRTIMER_MODE_REL_PINNED);
}
@@ -2737,16 +2721,52 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
{
- struct i915_request *rq;
+ struct i915_active *active;
+ int err;
- rq = stream->perf->ops.enable_metric_set(stream);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ active = i915_active_create();
+ if (!active)
+ return -ENOMEM;
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
+ err = stream->perf->ops.enable_metric_set(stream, active);
+ if (err == 0)
+ __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
- return 0;
+ i915_active_put(active);
+ return err;
+}
+
+static void
+get_default_sseu_config(struct intel_sseu *out_sseu,
+ struct intel_engine_cs *engine)
+{
+ const struct sseu_dev_info *devinfo_sseu =
+ &RUNTIME_INFO(engine->i915)->sseu;
+
+ *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
+
+ if (IS_GEN(engine->i915, 11)) {
+ /*
+ * We only need subslice count so it doesn't matter which ones
+ * we select - just turn off low bits in the amount of half of
+ * all available subslices per slice.
+ */
+ out_sseu->subslice_mask =
+ ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
+ out_sseu->slice_mask = 0x1;
+ }
+}
+
+static int
+get_sseu_config(struct intel_sseu *out_sseu,
+ struct intel_engine_cs *engine,
+ const struct drm_i915_gem_context_param_sseu *drm_sseu)
+{
+ if (drm_sseu->engine.engine_class != engine->uabi_class ||
+ drm_sseu->engine.engine_instance != engine->uabi_instance)
+ return -EINVAL;
+
+ return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu);
}
/**
@@ -2881,6 +2901,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_oa_buf_alloc;
stream->ops = &i915_oa_stream_ops;
+
+ perf->sseu = props->sseu;
WRITE_ONCE(perf->exclusive_stream, stream);
ret = i915_perf_stream_enable_sync(stream);
@@ -2932,10 +2954,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
stream = READ_ONCE(engine->i915->perf.exclusive_stream);
- /*
- * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
- * is already doing that, so nothing to be done for gen12 here.
- */
if (stream && INTEL_GEN(stream->perf->i915) < 12)
gen8_update_reg_state_unlocked(ce, stream);
}
@@ -3026,7 +3044,8 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
wake_up(&stream->poll_wq);
}
- hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
+ hrtimer_forward_now(hrtimer,
+ ns_to_ktime(stream->poll_oa_period));
return HRTIMER_RESTART;
}
@@ -3157,7 +3176,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
return -EINVAL;
if (config != stream->oa_config) {
- struct i915_request *rq;
+ int err;
/*
* If OA is bound to a specific context, emit the
@@ -3168,13 +3187,11 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* When set globally, we use a low priority kernel context,
* so it will effectively take effect when idle.
*/
- rq = emit_oa_config(stream, config, oa_context(stream));
- if (!IS_ERR(rq)) {
+ err = emit_oa_config(stream, config, oa_context(stream), NULL);
+ if (!err)
config = xchg(&stream->oa_config, config);
- i915_request_put(rq);
- } else {
- ret = PTR_ERR(rq);
- }
+ else
+ ret = err;
}
i915_oa_config_put(config);
@@ -3387,13 +3404,21 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
privileged_op = true;
}
+ /*
+ * Asking for SSEU configuration is a priviliged operation.
+ */
+ if (props->has_sseu)
+ privileged_op = true;
+ else
+ get_default_sseu_config(&props->sseu, props->engine);
+
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
* to determine if it's ok to access system wide OA counters
- * without CAP_SYS_ADMIN privileges.
+ * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
*/
if (privileged_op &&
- i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
@@ -3407,6 +3432,7 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
stream->perf = perf;
stream->ctx = specific_ctx;
+ stream->poll_oa_period = props->poll_oa_period;
ret = i915_oa_stream_init(stream, param, props);
if (ret)
@@ -3456,8 +3482,7 @@ err:
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return div64_u64(1000000000ULL * (2ULL << exponent),
- 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz);
+ return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
}
/**
@@ -3482,8 +3507,10 @@ static int read_properties_unlocked(struct i915_perf *perf,
{
u64 __user *uprop = uprops;
u32 i;
+ int ret;
memset(props, 0, sizeof(struct perf_open_properties));
+ props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
if (!n_props) {
DRM_DEBUG("No i915 perf properties given\n");
@@ -3513,7 +3540,6 @@ static int read_properties_unlocked(struct i915_perf *perf,
for (i = 0; i < n_props; i++) {
u64 oa_period, oa_freq_hz;
u64 id, value;
- int ret;
ret = get_user(id, uprop);
if (ret)
@@ -3586,9 +3612,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
} else
oa_freq_hz = 0;
- if (oa_freq_hz > i915_oa_max_sample_rate &&
- !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
+ if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
+ DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
i915_oa_max_sample_rate);
return -EACCES;
}
@@ -3599,6 +3624,32 @@ static int read_properties_unlocked(struct i915_perf *perf,
case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
props->hold_preemption = !!value;
break;
+ case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
+ struct drm_i915_gem_context_param_sseu user_sseu;
+
+ if (copy_from_user(&user_sseu,
+ u64_to_user_ptr(value),
+ sizeof(user_sseu))) {
+ DRM_DEBUG("Unable to copy global sseu parameter\n");
+ return -EFAULT;
+ }
+
+ ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
+ if (ret) {
+ DRM_DEBUG("Invalid SSEU configuration\n");
+ return ret;
+ }
+ props->has_sseu = true;
+ break;
+ }
+ case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
+ if (value < 100000 /* 100us */) {
+ DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
+ value);
+ return -EINVAL;
+ }
+ props->poll_oa_period = value;
+ break;
case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
return -EINVAL;
@@ -3681,7 +3732,6 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
void i915_perf_register(struct drm_i915_private *i915)
{
struct i915_perf *perf = &i915->perf;
- int ret;
if (!perf->i915)
return;
@@ -3695,64 +3745,7 @@ void i915_perf_register(struct drm_i915_private *i915)
perf->metrics_kobj =
kobject_create_and_add("metrics",
&i915->drm.primary->kdev->kobj);
- if (!perf->metrics_kobj)
- goto exit;
-
- sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr);
-
- if (IS_TIGERLAKE(i915)) {
- i915_perf_load_test_config_tgl(i915);
- } else if (INTEL_GEN(i915) >= 11) {
- i915_perf_load_test_config_icl(i915);
- } else if (IS_CANNONLAKE(i915)) {
- i915_perf_load_test_config_cnl(i915);
- } else if (IS_COFFEELAKE(i915)) {
- if (IS_CFL_GT2(i915))
- i915_perf_load_test_config_cflgt2(i915);
- if (IS_CFL_GT3(i915))
- i915_perf_load_test_config_cflgt3(i915);
- } else if (IS_GEMINILAKE(i915)) {
- i915_perf_load_test_config_glk(i915);
- } else if (IS_KABYLAKE(i915)) {
- if (IS_KBL_GT2(i915))
- i915_perf_load_test_config_kblgt2(i915);
- else if (IS_KBL_GT3(i915))
- i915_perf_load_test_config_kblgt3(i915);
- } else if (IS_BROXTON(i915)) {
- i915_perf_load_test_config_bxt(i915);
- } else if (IS_SKYLAKE(i915)) {
- if (IS_SKL_GT2(i915))
- i915_perf_load_test_config_sklgt2(i915);
- else if (IS_SKL_GT3(i915))
- i915_perf_load_test_config_sklgt3(i915);
- else if (IS_SKL_GT4(i915))
- i915_perf_load_test_config_sklgt4(i915);
- } else if (IS_CHERRYVIEW(i915)) {
- i915_perf_load_test_config_chv(i915);
- } else if (IS_BROADWELL(i915)) {
- i915_perf_load_test_config_bdw(i915);
- } else if (IS_HASWELL(i915)) {
- i915_perf_load_test_config_hsw(i915);
- }
-
- if (perf->test_config.id == 0)
- goto sysfs_error;
-
- ret = sysfs_create_group(perf->metrics_kobj,
- &perf->test_config.sysfs_metric);
- if (ret)
- goto sysfs_error;
-
- perf->test_config.perf = perf;
- kref_init(&perf->test_config.ref);
-
- goto exit;
-sysfs_error:
- kobject_put(perf->metrics_kobj);
- perf->metrics_kobj = NULL;
-
-exit:
mutex_unlock(&perf->lock);
}
@@ -3772,9 +3765,6 @@ void i915_perf_unregister(struct drm_i915_private *i915)
if (!perf->metrics_kobj)
return;
- sysfs_remove_group(perf->metrics_kobj,
- &perf->test_config.sysfs_metric);
-
kobject_put(perf->metrics_kobj);
perf->metrics_kobj = NULL;
}
@@ -4009,7 +3999,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
return -EACCES;
}
@@ -4156,7 +4146,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
return -ENOTSUPP;
}
- if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
return -EACCES;
}
@@ -4349,8 +4339,8 @@ void i915_perf_init(struct drm_i915_private *i915)
if (perf->ops.enable_metric_set) {
mutex_init(&perf->lock);
- oa_sample_rate_hard_limit = 1000 *
- (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
+ oa_sample_rate_hard_limit =
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
mutex_init(&perf->metrics_lock);
idr_init(&perf->metrics_idr);
@@ -4373,6 +4363,11 @@ void i915_perf_init(struct drm_i915_private *i915)
ratelimit_set_flags(&perf->spurious_report_rs,
RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_state_init(&perf->tail_pointer_race,
+ 5 * HZ, 10);
+ ratelimit_set_flags(&perf->tail_pointer_race,
+ RATELIMIT_MSG_ON_RELEASE);
+
atomic64_set(&perf->noa_programming_delay,
500 * 1000 /* 500us */);
@@ -4433,8 +4428,15 @@ int i915_perf_ioctl_version(void)
* preemption on a particular context so that performance data is
* accessible from a delta of MI_RPC reports without looking at the
* OA buffer.
+ *
+ * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
+ * be run for the duration of the performance recording based on
+ * their SSEU configuration.
+ *
+ * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
+ * interval for the hrtimer used to check for OA data.
*/
- return 3;
+ return 5;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index a0e22f00f6cf..a36a455ae336 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -16,11 +16,13 @@
#include <linux/uuid.h>
#include <linux/wait.h>
+#include "gt/intel_sseu.h"
#include "i915_reg.h"
#include "intel_wakeref.h"
struct drm_i915_private;
struct file;
+struct i915_active;
struct i915_gem_context;
struct i915_perf;
struct i915_vma;
@@ -272,21 +274,10 @@ struct i915_perf_stream {
spinlock_t ptr_lock;
/**
- * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to
- * used for reading.
- *
- * Initial values of 0xffffffff are invalid and imply that an
- * update is required (and should be ignored by an attempted
- * read)
- */
- struct {
- u32 offset;
- } tails[2];
-
- /**
- * @aged_tail_idx: Index for the aged tail ready to read() data up to.
+ * @aging_tail: The last HW tail reported by HW. The data
+ * might not have made it to memory yet though.
*/
- unsigned int aged_tail_idx;
+ u32 aging_tail;
/**
* @aging_timestamp: A monotonic timestamp for when the current aging tail pointer
@@ -302,6 +293,11 @@ struct i915_perf_stream {
* OA buffer data to userspace.
*/
u32 head;
+
+ /**
+ * @tail: The last verified tail that can be read by userspace.
+ */
+ u32 tail;
} oa_buffer;
/**
@@ -309,6 +305,12 @@ struct i915_perf_stream {
* reprogrammed.
*/
struct i915_vma *noa_wait;
+
+ /**
+ * @poll_oa_period: The period in nanoseconds at which the OA
+ * buffer should be checked for available data.
+ */
+ u64 poll_oa_period;
};
/**
@@ -339,8 +341,8 @@ struct i915_oa_ops {
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- struct i915_request *
- (*enable_metric_set)(struct i915_perf_stream *stream);
+ int (*enable_metric_set)(struct i915_perf_stream *stream,
+ struct i915_active *active);
/**
* @disable_metric_set: Remove system constraints associated with using
@@ -408,12 +410,22 @@ struct i915_perf {
struct i915_perf_stream *exclusive_stream;
/**
+ * @sseu: sseu configuration selected to run while perf is active,
+ * applies to all contexts.
+ */
+ struct intel_sseu sseu;
+
+ /**
* For rate limiting any notifications of spurious
* invalid OA reports
*/
struct ratelimit_state spurious_report_rs;
- struct i915_oa_config test_config;
+ /**
+ * For rate limiting any notifications of tail pointer
+ * race.
+ */
+ struct ratelimit_state tail_pointer_race;
u32 gen7_latched_oastatus1;
u32 ctx_oactxctrl_offset;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 2c062534eac1..e991a707bdb7 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -439,29 +439,10 @@ static u64 count_interrupts(struct drm_i915_private *i915)
return sum;
}
-static void engine_event_destroy(struct perf_event *event)
-{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct intel_engine_cs *engine;
-
- engine = intel_engine_lookup_user(i915,
- engine_event_class(event),
- engine_event_instance(event));
- if (drm_WARN_ON_ONCE(&i915->drm, !engine))
- return;
-
- if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
- intel_engine_supports_stats(engine))
- intel_disable_engine_stats(engine);
-}
-
static void i915_pmu_event_destroy(struct perf_event *event)
{
WARN_ON(event->parent);
-
- if (is_engine_event(event))
- engine_event_destroy(event);
+ module_put(THIS_MODULE);
}
static int
@@ -514,23 +495,13 @@ static int engine_event_init(struct perf_event *event)
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct intel_engine_cs *engine;
- u8 sample;
- int ret;
engine = intel_engine_lookup_user(i915, engine_event_class(event),
engine_event_instance(event));
if (!engine)
return -ENODEV;
- sample = engine_event_sample(event);
- ret = engine_event_status(engine, sample);
- if (ret)
- return ret;
-
- if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
- ret = intel_enable_engine_stats(engine);
-
- return ret;
+ return engine_event_status(engine, engine_event_sample(event));
}
static int i915_pmu_event_init(struct perf_event *event)
@@ -563,8 +534,10 @@ static int i915_pmu_event_init(struct perf_event *event)
if (ret)
return ret;
- if (!event->parent)
+ if (!event->parent) {
+ __module_get(THIS_MODULE);
event->destroy = i915_pmu_event_destroy;
+ }
return 0;
}
@@ -1115,7 +1088,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
- dev_info(i915->drm.dev, "PMU not supported for this GPU.");
+ drm_info(&i915->drm, "PMU not supported for this GPU.");
return;
}
@@ -1178,7 +1151,7 @@ err_name:
if (!is_igp(i915))
kfree(pmu->name);
err:
- dev_notice(i915->drm.dev, "Failed to register PMU!\n");
+ drm_notice(&i915->drm, "Failed to register PMU!\n");
}
void i915_pmu_unregister(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 732aad148881..5003a71113cb 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -24,15 +24,12 @@ enum {
I915_PRIORITY_DISPLAY,
};
-#define I915_USER_PRIORITY_SHIFT 2
+#define I915_USER_PRIORITY_SHIFT 0
#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
-#define I915_PRIORITY_WAIT ((u8)BIT(0))
-#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(1))
-
/* Smallest priority value that cannot be bumped. */
#define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
@@ -47,8 +44,6 @@ enum {
#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
#define I915_PRIORITY_BARRIER INT_MAX
-#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
-
struct i915_priolist {
struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e0c6021fdaf9..6c076a24eb82 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -561,6 +561,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Registers used only by the command parser
*/
#define BCS_SWCTRL _MMIO(0x22200)
+#define BCS_SRC_Y REG_BIT(0)
+#define BCS_DST_Y REG_BIT(1)
/* There are 16 GPR registers */
#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
@@ -2555,6 +2557,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
+#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
+#define GEN12_VD0_AUX_NV _MMIO(0x4218)
+#define GEN12_VD1_AUX_NV _MMIO(0x4228)
+#define GEN12_VD2_AUX_NV _MMIO(0x4298)
+#define GEN12_VD3_AUX_NV _MMIO(0x42A8)
+#define GEN12_VE0_AUX_NV _MMIO(0x4238)
+#define GEN12_VE1_AUX_NV _MMIO(0x42B8)
+#define AUX_INV REG_BIT(0)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
#define RING_ACTHD(base) _MMIO((base) + 0x74)
@@ -2657,6 +2667,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */
#define RING_INSTPM(base) _MMIO((base) + 0xc0)
#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
+#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84)
#define INSTPS _MMIO(0x2070) /* 965+ only */
#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
#define ACTHD_I965 _MMIO(0x2074)
@@ -3094,6 +3105,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
+#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) /* bdw+ */
#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
@@ -4012,31 +4024,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
-
-/*
- * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
- * 8300) freezing up around GPU hangs. Looks as if even
- * scheduling/timer interrupts start misbehaving if the RPS
- * EI/thresholds are "bad", leading to a very sluggish or even
- * frozen machine.
- */
-#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
-#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
-#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
-#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \
- (IS_GEN9_LP(dev_priv) ? \
- INTERVAL_0_833_US(us) : \
- INTERVAL_1_33_US(us)) : \
- INTERVAL_1_28_US(us))
-
-#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
-#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
-#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
-#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \
- (IS_GEN9_LP(dev_priv) ? \
- INTERVAL_0_833_TO_US(interval) : \
- INTERVAL_1_33_TO_US(interval)) : \
- INTERVAL_1_28_TO_US(interval))
+#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
/*
* Logical Context regs
@@ -4324,6 +4312,96 @@ enum {
#define EXITLINE_MASK REG_GENMASK(12, 0)
#define EXITLINE_SHIFT 0
+/* VRR registers */
+#define _TRANS_VRR_CTL_A 0x60420
+#define _TRANS_VRR_CTL_B 0x61420
+#define _TRANS_VRR_CTL_C 0x62420
+#define _TRANS_VRR_CTL_D 0x63420
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0)
+
+#define _TRANS_VRR_VMAX_A 0x60424
+#define _TRANS_VRR_VMAX_B 0x61424
+#define _TRANS_VRR_VMAX_C 0x62424
+#define _TRANS_VRR_VMAX_D 0x63424
+#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMAX_A)
+#define VRR_VMAX_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_VMIN_A 0x60434
+#define _TRANS_VRR_VMIN_B 0x61434
+#define _TRANS_VRR_VMIN_C 0x62434
+#define _TRANS_VRR_VMIN_D 0x63434
+#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMIN_A)
+#define VRR_VMIN_MASK REG_GENMASK(15, 0)
+
+#define _TRANS_VRR_VMAXSHIFT_A 0x60428
+#define _TRANS_VRR_VMAXSHIFT_B 0x61428
+#define _TRANS_VRR_VMAXSHIFT_C 0x62428
+#define _TRANS_VRR_VMAXSHIFT_D 0x63428
+#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(trans, \
+ _TRANS_VRR_VMAXSHIFT_A)
+#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
+#define VRR_VMAXSHIFT_DEC REG_BIT(16)
+#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0)
+
+#define _TRANS_VRR_STATUS_A 0x6042C
+#define _TRANS_VRR_STATUS_B 0x6142C
+#define _TRANS_VRR_STATUS_C 0x6242C
+#define _TRANS_VRR_STATUS_D 0x6342C
+#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS_A)
+#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
+#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
+#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
+#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28)
+#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27)
+#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26)
+#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20)
+#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
+#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
+#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
+#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
+#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
+#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
+#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
+
+#define _TRANS_VRR_VTOTAL_PREV_A 0x60480
+#define _TRANS_VRR_VTOTAL_PREV_B 0x61480
+#define _TRANS_VRR_VTOTAL_PREV_C 0x62480
+#define _TRANS_VRR_VTOTAL_PREV_D 0x63480
+#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(trans, \
+ _TRANS_VRR_VTOTAL_PREV_A)
+#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
+#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
+#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
+#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_FLIPLINE_A 0x60438
+#define _TRANS_VRR_FLIPLINE_B 0x61438
+#define _TRANS_VRR_FLIPLINE_C 0x62438
+#define _TRANS_VRR_FLIPLINE_D 0x63438
+#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(trans, \
+ _TRANS_VRR_FLIPLINE_A)
+#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_STATUS2_A 0x6043C
+#define _TRANS_VRR_STATUS2_B 0x6143C
+#define _TRANS_VRR_STATUS2_C 0x6243C
+#define _TRANS_VRR_STATUS2_D 0x6343C
+#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS2_A)
+#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_PUSH_A 0x60A70
+#define _TRANS_PUSH_B 0x61A70
+#define _TRANS_PUSH_C 0x62A70
+#define _TRANS_PUSH_D 0x63A70
+#define TRANS_PUSH(trans) _MMIO_TRANS2(trans, _TRANS_PUSH_A)
+#define TRANS_PUSH_EN REG_BIT(31)
+#define TRANS_PUSH_SEND REG_BIT(30)
+
/*
* HSW+ eDP PSR registers
*
@@ -6764,7 +6842,7 @@ enum {
#define PLANE_CTL_FORMAT_P012 (5 << 24)
#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
#define PLANE_CTL_FORMAT_P016 (7 << 24)
-#define PLANE_CTL_FORMAT_AYUV (8 << 24)
+#define PLANE_CTL_FORMAT_XYUV (8 << 24)
#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
@@ -8503,6 +8581,7 @@ enum {
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
+#define SBCLK_RUN_REFCLK_DIS (1 << 7)
#define SPT_PWM_GRANULARITY (1 << 0)
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
@@ -8994,6 +9073,7 @@ enum {
#define GEN7_PCODE_ILLEGAL_DATA 0x3
#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
#define GEN11_PCODE_LOCKED 0x6
+#define GEN11_PCODE_REJECTED 0x11
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
@@ -9015,10 +9095,18 @@ enum {
#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
+#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
+#define ICL_PCODE_POINTS_RESTRICTED 0x0
+#define ICL_PCODE_POINTS_RESTRICTED_MASK 0x1
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
+#define ICL_PCODE_EXIT_TCCOLD 0x12
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
+#define TGL_PCODE_TCCOLD 0x26
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
+#define TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ 0
+#define TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ REG_BIT(0)
/* See also IPS_CTL */
#define IPS_PCODE_CONTROL (1 << 30)
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
@@ -9305,6 +9393,22 @@ enum {
#define AUD_PIN_BUF_CTL _MMIO(0x48414)
#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+/* Display Audio Config Reg */
+#define AUD_CONFIG_BE _MMIO(0x65ef0)
+#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
+#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe)))
+#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6)))
+#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6))
+#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6))
+#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6))
+
+#define HBLANK_START_COUNT_8 0
+#define HBLANK_START_COUNT_16 1
+#define HBLANK_START_COUNT_32 2
+#define HBLANK_START_COUNT_64 3
+#define HBLANK_START_COUNT_96 4
+#define HBLANK_START_COUNT_128 5
+
/*
* HSW - ICL power wells
*
@@ -9700,8 +9804,11 @@ enum skl_power_gate {
#define TRANS_DDI_BPC_10 (1 << 20)
#define TRANS_DDI_BPC_6 (2 << 20)
#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18) /* bdw-cnl */
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
#define TRANS_DDI_PVSYNC (1 << 17)
#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15) /* bdw-cnl */
#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
@@ -9728,12 +9835,10 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
-#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
- _TRANS_DDI_FUNC_CTL2_A)
-#define PORT_SYNC_MODE_ENABLE (1 << 4)
-#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
-#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
-#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
+#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
@@ -9794,6 +9899,24 @@ enum skl_power_gate {
#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
+/* DDI DP Compliance Control */
+#define _DDI_DP_COMP_CTL_A 0x605F0
+#define _DDI_DP_COMP_CTL_B 0x615F0
+#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
+#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
+#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
+#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
+#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
+#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
+#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
+
+/* DDI DP Compliance Pattern */
+#define _DDI_DP_COMP_PAT_A 0x605F4
+#define _DDI_DP_COMP_PAT_B 0x615F4
+#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
+
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
* which contains the payload */
@@ -10741,6 +10864,12 @@ enum skl_power_gate {
#define _PAL_PREC_MULTI_SEG_DATA_A 0x4A40C
#define _PAL_PREC_MULTI_SEG_DATA_B 0x4AC0C
+#define PAL_PREC_MULTI_SEG_RED_LDW_MASK REG_GENMASK(29, 24)
+#define PAL_PREC_MULTI_SEG_RED_UDW_MASK REG_GENMASK(29, 20)
+#define PAL_PREC_MULTI_SEG_GREEN_LDW_MASK REG_GENMASK(19, 14)
+#define PAL_PREC_MULTI_SEG_GREEN_UDW_MASK REG_GENMASK(19, 10)
+#define PAL_PREC_MULTI_SEG_BLUE_LDW_MASK REG_GENMASK(9, 4)
+#define PAL_PREC_MULTI_SEG_BLUE_UDW_MASK REG_GENMASK(9, 0)
#define PREC_PAL_MULTI_SEG_INDEX(pipe) _MMIO_PIPE(pipe, \
_PAL_PREC_MULTI_SEG_INDEX_A, \
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c0df71d7d0ff..526c1e9acbd5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -23,6 +23,7 @@
*/
#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/irq_work.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
@@ -101,6 +102,11 @@ static signed long i915_fence_wait(struct dma_fence *fence,
timeout);
}
+struct kmem_cache *i915_request_slab_cache(void)
+{
+ return global.slab_requests;
+}
+
static void i915_fence_release(struct dma_fence *fence)
{
struct i915_request *rq = to_request(fence);
@@ -115,6 +121,10 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->submit);
i915_sw_fence_fini(&rq->semaphore);
+ /* Keep one request on each engine for reserved use under mempressure */
+ if (!cmpxchg(&rq->engine->request_pool, NULL, rq))
+ return;
+
kmem_cache_free(global.slab_requests, rq);
}
@@ -358,8 +368,6 @@ __await_execution(struct i915_request *rq,
}
spin_unlock_irq(&signal->lock);
- /* Copy across semaphore status as we need the same behaviour */
- rq->sched.flags |= signal->sched.flags;
return 0;
}
@@ -527,10 +535,8 @@ void __i915_request_unsubmit(struct i915_request *request)
spin_unlock(&request->lock);
/* We've already spun, don't charge on resubmitting. */
- if (request->sched.semaphores && i915_request_started(request)) {
- request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+ if (request->sched.semaphores && i915_request_started(request))
request->sched.semaphores = 0;
- }
/*
* We don't need to wake_up any waiters on request->execute, they
@@ -588,15 +594,6 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
-static void irq_semaphore_cb(struct irq_work *wrk)
-{
- struct i915_request *rq =
- container_of(wrk, typeof(*rq), semaphore_work);
-
- i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
- i915_request_put(rq);
-}
-
static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
@@ -604,11 +601,6 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
- if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
- i915_request_get(rq);
- init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
- irq_work_queue(&rq->semaphore_work);
- }
break;
case FENCE_FREE:
@@ -629,14 +621,22 @@ static void retire_requests(struct intel_timeline *tl)
}
static noinline struct i915_request *
-request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl,
+ struct i915_request **rsvd,
+ gfp_t gfp)
{
struct i915_request *rq;
- if (list_empty(&tl->requests))
- goto out;
+ /* If we cannot wait, dip into our reserves */
+ if (!gfpflags_allow_blocking(gfp)) {
+ rq = xchg(rsvd, NULL);
+ if (!rq) /* Use the normal failure path for one final WARN */
+ goto out;
- if (!gfpflags_allow_blocking(gfp))
+ return rq;
+ }
+
+ if (list_empty(&tl->requests))
goto out;
/* Move our oldest request to the slab-cache (if not in use!) */
@@ -721,7 +721,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq = kmem_cache_alloc(global.slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = request_alloc_slow(tl, gfp);
+ rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
@@ -933,6 +933,7 @@ __emit_semaphore_wait(struct i915_request *to,
u32 *cs;
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
/* We need to pin the signaler's HWSP until we are finished reading. */
err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
@@ -978,13 +979,26 @@ emit_semaphore_wait(struct i915_request *to,
gfp_t gfp)
{
const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+ struct i915_sw_fence *wait = &to->submit;
if (!intel_context_use_semaphores(to->context))
goto await_fence;
+ if (i915_request_has_initial_breadcrumb(to))
+ goto await_fence;
+
if (!rcu_access_pointer(from->hwsp_cacheline))
goto await_fence;
+ /*
+ * If this or its dependents are waiting on an external fence
+ * that may fail catastrophically, then we want to avoid using
+ * sempahores as they bypass the fence signaling metadata, and we
+ * lose the fence->error propagation.
+ */
+ if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
+ goto await_fence;
+
/* Just emit the first semaphore we see as request space is limited. */
if (already_busywaiting(to) & mask)
goto await_fence;
@@ -1000,11 +1014,10 @@ emit_semaphore_wait(struct i915_request *to,
goto await_fence;
to->sched.semaphores |= mask;
- to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
- return 0;
+ wait = &to->semaphore;
await_fence:
- return i915_sw_fence_await_dma_fence(&to->submit,
+ return i915_sw_fence_await_dma_fence(wait,
&from->fence, 0,
I915_FENCE_GFP);
}
@@ -1017,11 +1030,15 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
GEM_BUG_ON(to == from);
GEM_BUG_ON(to->timeline == from->timeline);
- if (i915_request_completed(from))
+ if (i915_request_completed(from)) {
+ i915_sw_fence_set_error_once(&to->submit, from->fence.error);
return 0;
+ }
if (to->engine->schedule) {
- ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
+ ret = i915_sched_node_add_dependency(&to->sched,
+ &from->sched,
+ I915_DEPENDENCY_EXTERNAL);
if (ret < 0)
return ret;
}
@@ -1035,15 +1052,56 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
if (ret < 0)
return ret;
- if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
- ret = i915_sw_fence_await_dma_fence(&to->semaphore,
- &from->fence, 0,
- I915_FENCE_GFP);
- if (ret < 0)
- return ret;
+ return 0;
+}
+
+static void mark_external(struct i915_request *rq)
+{
+ /*
+ * The downside of using semaphores is that we lose metadata passing
+ * along the signaling chain. This is particularly nasty when we
+ * need to pass along a fatal error such as EFAULT or EDEADLK. For
+ * fatal errors we want to scrub the request before it is executed,
+ * which means that we cannot preload the request onto HW and have
+ * it wait upon a semaphore.
+ */
+ rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
+}
+
+static int
+__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+{
+ mark_external(rq);
+ return i915_sw_fence_await_dma_fence(&rq->submit, fence,
+ i915_fence_context_timeout(rq->i915,
+ fence->context),
+ I915_FENCE_GFP);
+}
+
+static int
+i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+{
+ struct dma_fence *iter;
+ int err = 0;
+
+ if (!to_dma_fence_chain(fence))
+ return __i915_request_await_external(rq, fence);
+
+ dma_fence_chain_for_each(iter, fence) {
+ struct dma_fence_chain *chain = to_dma_fence_chain(iter);
+
+ if (!dma_fence_is_i915(chain->fence)) {
+ err = __i915_request_await_external(rq, iter);
+ break;
+ }
+
+ err = i915_request_await_dma_fence(rq, chain->fence);
+ if (err < 0)
+ break;
}
- return 0;
+ dma_fence_put(iter);
+ return err;
}
int
@@ -1093,9 +1151,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
if (dma_fence_is_i915(fence))
ret = i915_request_await_request(rq, to_request(fence));
else
- ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- fence->context ? I915_FENCE_TIMEOUT : 0,
- I915_FENCE_GFP);
+ ret = i915_request_await_external(rq, fence);
if (ret < 0)
return ret;
@@ -1175,7 +1231,8 @@ __i915_request_await_execution(struct i915_request *to,
* immediate execution, and so we must wait until it reaches the
* active slot.
*/
- if (intel_engine_has_semaphores(to->engine)) {
+ if (intel_engine_has_semaphores(to->engine) &&
+ !i915_request_has_initial_breadcrumb(to)) {
err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
if (err < 0)
return err;
@@ -1183,7 +1240,9 @@ __i915_request_await_execution(struct i915_request *to,
/* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
- err = i915_sched_node_add_dependency(&to->sched, &from->sched);
+ err = i915_sched_node_add_dependency(&to->sched,
+ &from->sched,
+ I915_DEPENDENCY_WEAK);
if (err < 0)
return err;
}
@@ -1219,6 +1278,9 @@ i915_request_await_execution(struct i915_request *rq,
continue;
}
+ if (fence->context == rq->fence.context)
+ continue;
+
/*
* We don't squash repeated fence dependencies here as we
* want to run our callback in all cases.
@@ -1229,9 +1291,7 @@ i915_request_await_execution(struct i915_request *rq,
to_request(fence),
hook);
else
- ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- I915_FENCE_TIMEOUT,
- GFP_KERNEL);
+ ret = i915_request_await_external(rq, fence);
if (ret < 0)
return ret;
} while (--nchild);
@@ -1439,14 +1499,7 @@ void i915_request_add(struct i915_request *rq)
attr = ctx->sched;
rcu_read_unlock();
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
- if (list_empty(&rq->sched.signalers_list))
- attr.priority |= I915_PRIORITY_WAIT;
-
- local_bh_disable();
__i915_request_queue(rq, &attr);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
mutex_unlock(&tl->mutex);
}
@@ -1630,7 +1683,6 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
intel_rps_boost(rq);
- i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
}
wait.tsk = current;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 3c552bfea67a..8ec7ee4dbadc 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -84,19 +84,26 @@ enum {
I915_FENCE_FLAG_PQUEUE,
/*
- * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
+ * I915_FENCE_FLAG_HOLD - this request is currently on hold
*
- * Internal bookkeeping used by the breadcrumb code to track when
- * a request is on the various signal_list.
+ * This request has been suspended, pending an ongoing investigation.
*/
- I915_FENCE_FLAG_SIGNAL,
+ I915_FENCE_FLAG_HOLD,
/*
- * I915_FENCE_FLAG_HOLD - this request is currently on hold
+ * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
+ * breadcrumb that marks the end of semaphore waits and start of the
+ * user payload.
+ */
+ I915_FENCE_FLAG_INITIAL_BREADCRUMB,
+
+ /*
+ * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
*
- * This request has been suspended, pending an ongoing investigation.
+ * Internal bookkeeping used by the breadcrumb code to track when
+ * a request is on the various signal_list.
*/
- I915_FENCE_FLAG_HOLD,
+ I915_FENCE_FLAG_SIGNAL,
/*
* I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
@@ -209,7 +216,6 @@ struct i915_request {
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
- struct irq_work semaphore_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -300,6 +306,8 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
return fence->ops == &i915_fence_ops;
}
+struct kmem_cache *i915_request_slab_cache(void);
+
struct i915_request * __must_check
__i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
@@ -388,6 +396,12 @@ static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
+static inline bool
+i915_request_has_initial_breadcrumb(const struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+}
+
/**
* Returns true if seq1 is later than seq2.
*/
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 68b06a7ba667..f4ea318781f0 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -51,11 +51,11 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
rb_first(&execlists->queue.rb_root));
- last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
+ last_prio = INT_MAX;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
const struct i915_priolist *p = to_priolist(rb);
- GEM_BUG_ON(p->priority >= last_prio);
+ GEM_BUG_ON(p->priority > last_prio);
last_prio = p->priority;
GEM_BUG_ON(!p->used);
@@ -174,7 +174,7 @@ sched_lock_engine(const struct i915_sched_node *node,
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->sched.attr.priority | __NO_PREEMPTION;
+ return rq->sched.attr.priority;
}
static inline bool need_preempt(int prio, int active)
@@ -209,6 +209,12 @@ static void kick_submission(struct intel_engine_cs *engine,
if (!inflight)
goto unlock;
+ ENGINE_TRACE(engine,
+ "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+ prio,
+ rq->fence.context, rq->fence.seqno,
+ inflight->fence.context, inflight->fence.seqno,
+ inflight->sched.attr.priority);
engine->execlists.queue_priority_hint = prio;
/*
@@ -428,25 +434,12 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
dep->waiter = node;
dep->flags = flags;
- /* Keep track of whether anyone on this chain has a semaphore */
- if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
- !node_started(signal))
- node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
-
/* All set, now publish. Beware the lockless walkers. */
list_add_rcu(&dep->signal_link, &node->signalers_list);
list_add_rcu(&dep->wait_link, &signal->waiters_list);
- /*
- * As we do not allow WAIT to preempt inflight requests,
- * once we have executed a request, along with triggering
- * any execution callbacks, we must preserve its ordering
- * within the non-preemptible FIFO.
- */
- BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
- if (flags & I915_DEPENDENCY_EXTERNAL)
- __bump_priority(signal, __NO_PREEMPTION);
-
+ /* Propagate the chains */
+ node->flags |= signal->flags;
ret = true;
}
@@ -456,7 +449,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
}
int i915_sched_node_add_dependency(struct i915_sched_node *node,
- struct i915_sched_node *signal)
+ struct i915_sched_node *signal,
+ unsigned long flags)
{
struct i915_dependency *dep;
@@ -464,11 +458,14 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!dep)
return -ENOMEM;
+ local_bh_disable();
+
if (!__i915_sched_node_add_dependency(node, signal, dep,
- I915_DEPENDENCY_EXTERNAL |
- I915_DEPENDENCY_ALLOC))
+ flags | I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
+ local_bh_enable(); /* kick submission tasklet */
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index d1dc4efef77b..6f0bf00fc569 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -34,7 +34,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
unsigned long flags);
int i915_sched_node_add_dependency(struct i915_sched_node *node,
- struct i915_sched_node *signal);
+ struct i915_sched_node *signal,
+ unsigned long flags);
void i915_sched_node_fini(struct i915_sched_node *node);
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index d18e70550054..f72e6c397b08 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -65,7 +65,7 @@ struct i915_sched_node {
struct list_head link;
struct i915_sched_attr attr;
unsigned int flags;
-#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
+#define I915_SCHED_HAS_EXTERNAL_CHAIN BIT(0)
intel_engine_mask_t semaphores;
};
@@ -78,6 +78,7 @@ struct i915_dependency {
unsigned long flags;
#define I915_DEPENDENCY_ALLOC BIT(0)
#define I915_DEPENDENCY_EXTERNAL BIT(1)
+#define I915_DEPENDENCY_WEAK BIT(2)
};
#endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 98bcb6fa0ab4..d53d207ab6eb 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -133,4 +133,6 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
__igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+void igt_hexdump(const void *buf, size_t len);
+
#endif /* !__I915_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index a3d38e089b6e..295b9829e2da 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -421,7 +421,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
if (!fence)
return;
- pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n",
+ pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno,
@@ -546,13 +546,11 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
cb->fence = fence;
i915_sw_fence_await(fence);
- ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
- if (ret == 0) {
- ret = 1;
- } else {
+ ret = 1;
+ if (dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake)) {
+ /* fence already signaled */
__dma_i915_sw_fence_wake(dma, &cb->base);
- if (ret == -ENOENT) /* fence already signaled */
- ret = 0;
+ ret = 0;
}
return ret;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index 997b2998f1f2..a3a81bb8f2c3 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -38,7 +38,10 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
if (!f->dma.error) {
dma_fence_get(&f->dma);
- queue_work(system_unbound_wq, &f->work);
+ if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
+ fence_work(&f->work);
+ else
+ queue_work(system_unbound_wq, &f->work);
} else {
fence_complete(f);
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h
index 3a22b287e201..2c409f11c5c5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h
@@ -32,6 +32,10 @@ struct dma_fence_work {
const struct dma_fence_work_ops *ops;
};
+enum {
+ DMA_FENCE_WORK_IMM = DMA_FENCE_FLAG_USER_BITS,
+};
+
void dma_fence_work_init(struct dma_fence_work *f,
const struct dma_fence_work_ops *ops);
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
@@ -41,4 +45,23 @@ static inline void dma_fence_work_commit(struct dma_fence_work *f)
i915_sw_fence_commit(&f->chain);
}
+/**
+ * dma_fence_work_commit_imm: Commit the fence, and if possible execute locally.
+ * @f: the fenced worker
+ *
+ * Instead of always scheduling a worker to execute the callback (see
+ * dma_fence_work_commit()), we try to execute the callback immediately in
+ * the local context. It is required that the fence be committed before it
+ * is published, and that no other threads try to tamper with the number
+ * of asynchronous waits on the fence (or else the callback will be
+ * executed in the wrong context, i.e. not the callers).
+ */
+static inline void dma_fence_work_commit_imm(struct dma_fence_work *f)
+{
+ if (atomic_read(&f->chain.pending) <= 1)
+ __set_bit(DMA_FENCE_WORK_IMM, &f->dma.flags);
+
+ dma_fence_work_commit(f);
+}
+
#endif /* I915_SW_FENCE_WORK_H */
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index ed69b5d4a375..b3a24eac21f1 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -20,14 +20,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
}
if (state == VGA_SWITCHEROO_ON) {
- pr_info("switched on\n");
+ drm_info(&i915->drm, "switched on\n");
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(pdev, PCI_D0);
i915_resume_switcheroo(i915);
i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- pr_info("switched off\n");
+ drm_info(&i915->drm, "switched off\n");
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend_switcheroo(i915, pmm);
i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 029854ae65fc..e28eae4a8f70 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -101,5 +101,6 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout)
*/
barrier();
- mod_timer(t, jiffies + timeout);
+ /* Keep t->expires = 0 reserved to indicate a canceled timer. */
+ mod_timer(t, jiffies + timeout ?: 1);
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 82e3bc280622..fc14ebf9a0b7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -522,7 +522,6 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
GEM_BUG_ON(!obj);
i915_vma_unpin(vma);
- i915_vma_close(vma);
if (flags & I915_VMA_RELEASE_MAP)
i915_gem_object_unpin_map(obj);
@@ -610,18 +609,6 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
return true;
}
-static void assert_bind_count(const struct drm_i915_gem_object *obj)
-{
- /*
- * Combine the assertion that the object is bound and that we have
- * pinned its pages. But we should never have bound the object
- * more than we have pinned its pages. (For complete accuracy, we
- * assume that no else is pinning the pages, but as a rough assertion
- * that we will not run into problems later, this will do!)
- */
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
-}
-
/**
* i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma
@@ -740,12 +727,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- atomic_inc(&obj->bind_count);
- assert_bind_count(obj);
- }
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
@@ -763,12 +744,6 @@ i915_vma_detach(struct i915_vma *vma)
* it to be reaped by the shrinker.
*/
list_del(&vma->vm_link);
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- assert_bind_count(obj);
- atomic_dec(&obj->bind_count);
- }
}
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
@@ -915,11 +890,30 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- /* No more allocations allowed once we hold vm->mutex */
- err = mutex_lock_interruptible(&vma->vm->mutex);
+ /*
+ * Differentiate between user/kernel vma inside the aliasing-ppgtt.
+ *
+ * We conflate the Global GTT with the user's vma when using the
+ * aliasing-ppgtt, but it is still vitally important to try and
+ * keep the use cases distinct. For example, userptr objects are
+ * not allowed inside the Global GTT as that will cause lock
+ * inversions when we have to evict them the mmu_notifier callbacks -
+ * but they are allowed to be part of the user ppGTT which can never
+ * be mapped. As such we try to give the distinct users of the same
+ * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
+ * and i915_ppgtt separate].
+ *
+ * NB this may cause us to mask real lock inversions -- while the
+ * code is safe today, lockdep may not be able to spot future
+ * transgressions.
+ */
+ err = mutex_lock_interruptible_nested(&vma->vm->mutex,
+ !(flags & PIN_GLOBAL));
if (err)
goto err_fence;
+ /* No more allocations allowed now we hold vm->mutex */
+
if (unlikely(i915_vma_is_closed(vma))) {
err = -ENOENT;
goto err_unlock;
@@ -982,7 +976,7 @@ err_unlock:
mutex_unlock(&vma->vm->mutex);
err_fence:
if (work)
- dma_fence_work_commit(&work->base);
+ dma_fence_work_commit_imm(&work->base);
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
err_pages:
@@ -1028,13 +1022,8 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
} while (1);
}
-void i915_vma_close(struct i915_vma *vma)
+static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
{
- struct intel_gt *gt = vma->vm->gt;
- unsigned long flags;
-
- GEM_BUG_ON(i915_vma_is_closed(vma));
-
/*
* We defer actually closing, unbinding and destroying the VMA until
* the next idle point, or if the object is freed in the meantime. By
@@ -1047,9 +1036,25 @@ void i915_vma_close(struct i915_vma *vma)
* causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state.
*/
- spin_lock_irqsave(&gt->closed_lock, flags);
+ GEM_BUG_ON(i915_vma_is_closed(vma));
list_add(&vma->closed_link, &gt->closed_vma);
- spin_unlock_irqrestore(&gt->closed_lock, flags);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+ struct intel_gt *gt = vma->vm->gt;
+ unsigned long flags;
+
+ if (i915_vma_is_ggtt(vma))
+ return;
+
+ GEM_BUG_ON(!atomic_read(&vma->open_count));
+ if (atomic_dec_and_lock_irqsave(&vma->open_count,
+ &gt->closed_lock,
+ flags)) {
+ __vma_close(vma, gt);
+ spin_unlock_irqrestore(&gt->closed_lock, flags);
+ }
}
static void __i915_vma_remove_closed(struct i915_vma *vma)
@@ -1174,7 +1179,8 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active, 0);
+ err = i915_request_await_active(rq, &vma->active,
+ I915_ACTIVE_AWAIT_EXCL);
if (err)
return err;
@@ -1215,6 +1221,10 @@ int i915_vma_move_to_active(struct i915_vma *vma,
dma_resv_add_shared_fence(vma->resv, &rq->fence);
obj->write_domain = 0;
}
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
+ i915_active_add_request(&vma->fence->active, rq);
+
obj->read_domains |= I915_GEM_GPU_DOMAINS;
obj->mm.dirty = true;
@@ -1228,18 +1238,6 @@ int __i915_vma_unbind(struct i915_vma *vma)
lockdep_assert_held(&vma->vm->mutex);
- /*
- * First wait upon any activity as retiring the request may
- * have side-effects such as unpinning or even unbinding this vma.
- *
- * XXX Actually waiting under the vm->mutex is a hinderance and
- * should be pipelined wherever possible. In cases where that is
- * unavoidable, we should lift the wait to before the mutex.
- */
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
-
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
@@ -1261,6 +1259,9 @@ int __i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_map_and_fenceable(vma)) {
+ /* Force a pagefault for domain tracking on next user access */
+ i915_vma_revoke_mmap(vma);
+
/*
* Check that we have flushed all writes through the GGTT
* before the unbind, other due to non-strict nature of those
@@ -1277,12 +1278,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_flush_writes(vma);
/* release the fence reg _after_ flushing */
- ret = i915_vma_revoke_fence(vma);
- if (ret)
- return ret;
-
- /* Force a pagefault for domain tracking on next user access */
- i915_vma_revoke_mmap(vma);
+ i915_vma_revoke_fence(vma);
__i915_vma_iounmap(vma);
clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
@@ -1313,16 +1309,21 @@ int i915_vma_unbind(struct i915_vma *vma)
if (!drm_mm_node_allocated(&vma->node))
return 0;
- if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
- /* XXX not always required: nop_clear_range */
- wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
/* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma);
if (err)
goto out_rpm;
- err = mutex_lock_interruptible(&vm->mutex);
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ /* XXX not always required: nop_clear_range */
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
if (err)
goto out_rpm;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index e1ced1df13e1..8ad1daabcd58 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,10 +30,10 @@
#include <drm/drm_mm.h>
+#include "gt/intel_ggtt_fencing.h"
#include "gem/i915_gem_object.h"
#include "i915_gem_gtt.h"
-#include "i915_gem_fence_reg.h"
#include "i915_active.h"
#include "i915_request.h"
@@ -326,7 +326,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
* True if the vma has a fence, false otherwise.
*/
int __must_check i915_vma_pin_fence(struct i915_vma *vma);
-int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
+void i915_vma_revoke_fence(struct i915_vma *vma);
int __i915_vma_pin_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d7fe12734db8..8a635bd4d5d8 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -98,6 +98,7 @@ void intel_device_info_print_static(const struct intel_device_info *info,
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+ drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
@@ -135,8 +136,8 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
sseu_dump(&info->sseu, p);
drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
- drm_printf(p, "CS timestamp frequency: %u kHz\n",
- info->cs_timestamp_frequency_khz);
+ drm_printf(p, "CS timestamp frequency: %u Hz\n",
+ info->cs_timestamp_frequency_hz);
}
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
@@ -677,12 +678,12 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
- base_freq *= 1000;
+ base_freq *= 1000000;
frac_freq = ((ts_override &
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
- frac_freq = 1000 / (frac_freq + 1);
+ frac_freq = 1000000 / (frac_freq + 1);
return base_freq + frac_freq;
}
@@ -690,8 +691,8 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
u32 rpm_config_reg)
{
- u32 f19_2_mhz = 19200;
- u32 f24_mhz = 24000;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
u32 crystal_clock = (rpm_config_reg &
GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
@@ -710,10 +711,10 @@ static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
u32 rpm_config_reg)
{
- u32 f19_2_mhz = 19200;
- u32 f24_mhz = 24000;
- u32 f25_mhz = 25000;
- u32 f38_4_mhz = 38400;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 f25_mhz = 25000000;
+ u32 f38_4_mhz = 38400000;
u32 crystal_clock = (rpm_config_reg &
GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
@@ -735,9 +736,9 @@ static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
{
- u32 f12_5_mhz = 12500;
- u32 f19_2_mhz = 19200;
- u32 f24_mhz = 24000;
+ u32 f12_5_mhz = 12500000;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
if (INTEL_GEN(dev_priv) <= 4) {
/* PRMs say:
@@ -746,7 +747,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* hclks." (through the “Clocking Configuration”
* (“CLKCFG”) MCHBAR register)
*/
- return RUNTIME_INFO(dev_priv)->rawclk_freq / 16;
+ return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
} else if (INTEL_GEN(dev_priv) <= 8) {
/* PRMs say:
*
@@ -980,35 +981,32 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
info->pipe_mask = 0;
+ info->cpu_transcoder_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
info->pipe_mask &= ~BIT(PIPE_C);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
- u8 enabled_mask = info->pipe_mask;
-
- if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
- enabled_mask &= ~BIT(PIPE_A);
- if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
- enabled_mask &= ~BIT(PIPE_B);
- if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
- enabled_mask &= ~BIT(PIPE_C);
- if (INTEL_GEN(dev_priv) >= 12 &&
- (dfsm & TGL_DFSM_PIPE_D_DISABLE))
- enabled_mask &= ~BIT(PIPE_D);
- /*
- * At least one pipe should be enabled and if there are
- * disabled pipes, they should be the last ones, with no holes
- * in the mask.
- */
- if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
- drm_err(&dev_priv->drm,
- "invalid pipe fuse configuration: enabled_mask=0x%x\n",
- enabled_mask);
- else
- info->pipe_mask = enabled_mask;
+ if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
+ info->pipe_mask &= ~BIT(PIPE_A);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
+ }
+ if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
+ info->pipe_mask &= ~BIT(PIPE_B);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
+ }
+ if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
+ info->pipe_mask &= ~BIT(PIPE_C);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ }
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
+ info->pipe_mask &= ~BIT(PIPE_D);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
+ }
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
info->display.has_hdcp = 0;
@@ -1050,11 +1048,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
/* Initialize command stream timestamp frequency */
- runtime->cs_timestamp_frequency_khz =
+ runtime->cs_timestamp_frequency_hz =
read_timestamp_frequency(dev_priv);
- if (runtime->cs_timestamp_frequency_khz) {
+ if (runtime->cs_timestamp_frequency_hz) {
runtime->cs_timestamp_period_ns =
- div_u64(1e6, runtime->cs_timestamp_frequency_khz);
+ i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
drm_dbg(&dev_priv->drm,
"CS timestamp wraparound in %lldms\n",
div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 1ecb9df2de91..62e03ffa377e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -158,6 +158,8 @@ struct intel_device_info {
enum intel_platform platform;
+ unsigned int dma_mask_size; /* available DMA address bits */
+
enum intel_ppgtt_type ppgtt_type;
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
@@ -168,6 +170,7 @@ struct intel_device_info {
u32 display_mmio_offset;
u8 pipe_mask;
+ u8 cpu_transcoder_mask;
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
@@ -218,7 +221,7 @@ struct intel_runtime_info {
u32 rawclk_freq;
- u32 cs_timestamp_frequency_khz;
+ u32 cs_timestamp_frequency_hz;
u32 cs_timestamp_period_ns;
/* Media engine access to SFC per instance */
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 6b922efb1d7c..8aa12cad93ce 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -495,6 +495,5 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
else
i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
- dev_info(i915->drm.dev,
- "Found %uMB of eDRAM\n", i915->edram_size_mb);
+ drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8375054ba27d..696491d71a1d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -43,6 +43,7 @@
#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
+#include "display/intel_bw.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
@@ -3637,10 +3638,6 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
- /* HACK! */
- if (IS_GEN(dev_priv, 12))
- return false;
-
return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
@@ -3757,42 +3754,120 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-bool intel_can_enable_sagv(struct intel_atomic_state *state)
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc;
- struct intel_plane *plane;
- struct intel_crtc_state *crtc_state;
- enum pipe pipe;
- int level, latency;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state;
+ const struct intel_bw_state *old_bw_state;
+ u32 new_mask = 0;
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
if (!intel_has_sagv(dev_priv))
- return false;
+ return;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+ if (!new_bw_state)
+ return;
+ if (INTEL_GEN(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ intel_disable_sagv(dev_priv);
+ return;
+ }
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
/*
- * If there are no active CRTCs, no additional checks need be performed
+ * Nothing to mask
*/
- if (hweight8(state->active_pipes) == 0)
- return true;
+ if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+ return;
+
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ /*
+ * If new mask is zero - means there is nothing to mask,
+ * we can only unmask, which should be done in unmask.
+ */
+ if (!new_mask)
+ return;
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+}
+
+void intel_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state;
+ const struct intel_bw_state *old_bw_state;
+ u32 new_mask = 0;
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(dev_priv))
+ return;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+ if (!new_bw_state)
+ return;
+
+ if (INTEL_GEN(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ intel_enable_sagv(dev_priv);
+ return;
+ }
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * Nothing to unmask
+ */
+ if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+ return;
+
+ new_mask = new_bw_state->qgv_points_mask;
/*
- * SKL+ workaround: bspec recommends we disable SAGV when we have
- * more then one pipe enabled
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
*/
- if (hweight8(state->active_pipes) > 1)
+ icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+}
+
+static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
+ int level, latency;
+
+ if (!intel_has_sagv(dev_priv))
return false;
- /* Since we're now guaranteed to only have one active CRTC... */
- pipe = ffs(state->active_pipes) - 1;
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ if (!crtc_state->hw.active)
+ return true;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- struct skl_plane_wm *wm =
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane->id];
/* Skip this plane if it's not enabled */
@@ -3807,7 +3882,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
latency = dev_priv->wm.skl_latency[level];
if (skl_needs_memory_bw_wa(dev_priv) &&
- plane->base.state->fb->modifier ==
+ plane_state->uapi.fb->modifier ==
I915_FORMAT_MOD_X_TILED)
latency += 15;
@@ -3823,6 +3898,112 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
return true;
}
+static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *plane_alloc =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (skl_ddb_entry_size(plane_alloc) < wm->sagv_wm0.min_ddb_alloc)
+ return false;
+ }
+
+ return true;
+}
+
+static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ return tgl_crtc_can_enable_sagv(crtc_state);
+ else
+ return skl_crtc_can_enable_sagv(crtc_state);
+}
+
+bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state)
+{
+ if (INTEL_GEN(dev_priv) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+static int intel_compute_sagv_mask(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int ret;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_bw_state *new_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case)
+ */
+ pipe_wm->use_sagv_wm = INTEL_GEN(dev_priv) >= 12 &&
+ intel_can_enable_sagv(dev_priv, new_bw_state);
+ }
+
+ if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
+ intel_can_enable_sagv(dev_priv, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Calculate initial DBuf slice offset, based on slice size
* and mask(i.e if slice size is 1024 and second slice is enabled
@@ -4016,6 +4197,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int color_plane);
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
+ unsigned int latency,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
@@ -4038,7 +4220,9 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
drm_WARN_ON(&dev_priv->drm, ret);
for (level = 0; level <= max_level; level++) {
- skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
+ unsigned int latency = dev_priv->wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
break;
@@ -4544,6 +4728,20 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
return total_data_rate;
}
+static const struct skl_wm_level *
+skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id,
+ int level)
+{
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (level == 0 && pipe_wm->use_sagv_wm)
+ return &wm->sagv_wm0;
+
+ return &wm->wm[level];
+}
+
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
{
@@ -4580,7 +4778,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
plane_data_rate,
uv_plane_data_rate);
-
skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
@@ -4780,7 +4977,7 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
wm_intermediate_val = latency * pixel_rate * cpp;
ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
ret = add_fixed16_u32(ret, 1);
return ret;
@@ -4915,18 +5112,19 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines,
wp->dbuf_block_size);
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
wp->y_min_scanlines);
- } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
- wp->dbuf_block_size);
- wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
} else {
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
- wp->dbuf_block_size) + 1;
+ wp->dbuf_block_size);
+
+ if (!wp->x_tiled ||
+ INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ interm_pbpl++;
+
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
}
@@ -4972,12 +5170,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
+ unsigned int latency,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
u32 res_blocks, res_lines, min_ddb_alloc = 0;
@@ -4992,7 +5190,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
+ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
dev_priv->ipc_enabled)
latency += 4;
@@ -5106,14 +5304,29 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
+ unsigned int latency = dev_priv->wm.skl_latency[level];
- skl_compute_plane_wm(crtc_state, level, wm_params,
- result_prev, result);
+ skl_compute_plane_wm(crtc_state, level, latency,
+ wm_params, result_prev, result);
result_prev = result;
}
}
+static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
+ const struct skl_wm_params *wm_params,
+ struct skl_plane_wm *plane_wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wm_level *sagv_wm = &plane_wm->sagv_wm0;
+ struct skl_wm_level *levels = plane_wm->wm;
+ unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us;
+
+ skl_compute_plane_wm(crtc_state, 0, latency,
+ wm_params, &levels[0],
+ sagv_wm);
+}
+
static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
@@ -5166,10 +5379,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
trans_offset_b;
} else {
res_blocks = wm0_sel_res_b + trans_offset_b;
-
- /* WA BUG:1938466 add one block for non y-tile planes */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
- res_blocks += 1;
}
/*
@@ -5185,6 +5394,8 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
enum plane_id plane_id, int color_plane)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
struct skl_wm_params wm_params;
int ret;
@@ -5195,6 +5406,10 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
return ret;
skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_compute_sagv_wm(crtc_state, &wm_params, wm);
+
skl_compute_transition_wm(crtc_state, &wm_params, wm);
return 0;
@@ -5354,8 +5569,12 @@ void skl_write_plane_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
+ const struct skl_wm_level *wm_level;
+
+ wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
+
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
- &wm->wm[level]);
+ wm_level);
}
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
@@ -5388,8 +5607,12 @@ void skl_write_cursor_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
+ const struct skl_wm_level *wm_level;
+
+ wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
+
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
- &wm->wm[level]);
+ wm_level);
}
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
@@ -5424,8 +5647,8 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
-static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
- const struct skl_ddb_entry *b)
+static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
{
return a->start < b->end && b->start < a->end;
}
@@ -5553,23 +5776,25 @@ skl_print_wm_changes(struct intel_atomic_state *state)
continue;
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
enast(old_wm->trans_wm.plane_en),
+ enast(old_wm->sagv_wm0.plane_en),
enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
- enast(new_wm->trans_wm.plane_en));
+ enast(new_wm->trans_wm.plane_en),
+ enast(new_wm->sagv_wm0.plane_en));
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
@@ -5580,6 +5805,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+ enast(old_wm->sagv_wm0.ignore_lines), old_wm->sagv_wm0.plane_res_l,
enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
@@ -5589,37 +5815,42 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l,
+ enast(new_wm->sagv_wm0.ignore_lines), new_wm->sagv_wm0.plane_res_l);
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
old_wm->trans_wm.plane_res_b,
+ old_wm->sagv_wm0.plane_res_b,
new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
- new_wm->trans_wm.plane_res_b);
+ new_wm->trans_wm.plane_res_b,
+ new_wm->sagv_wm0.plane_res_b);
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
old_wm->trans_wm.min_ddb_alloc,
+ old_wm->sagv_wm0.min_ddb_alloc,
new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc);
+ new_wm->trans_wm.min_ddb_alloc,
+ new_wm->sagv_wm0.min_ddb_alloc);
}
}
}
@@ -5780,6 +6011,10 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
+ ret = intel_compute_sagv_mask(state);
+ if (ret)
+ return ret;
+
/*
* skl_compute_ddb() will have adjusted the final watermarks
* based on how much ddb is available. Now we can actually
@@ -5876,8 +6111,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static inline void skl_wm_level_from_reg_val(u32 val,
- struct skl_wm_level *level)
+static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
@@ -5909,6 +6143,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
+ if (INTEL_GEN(dev_priv) >= 12)
+ wm->sagv_wm0 = wm->wm[0];
+
if (plane_id != PLANE_CURSOR)
val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
else
@@ -6850,6 +7087,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
TGL_VRH_GATING_DIS);
+
+ /* Wa_14011059788:tgl */
+ intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
+ 0, DFR_DISABLE);
}
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6882,9 +7123,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
/* ReadHitWriteOnlyDisable:cnl */
val |= RCCUNIT_CLKGATE_DIS;
- /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
- val |= SARBUNIT_CLKGATE_DIS;
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
/* Wa_2201832410:cnl */
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index d60a85421c5a..614ac7f8d4cc 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "i915_reg.h"
+#include "display/intel_bw.h"
struct drm_device;
struct drm_i915_private;
@@ -41,9 +42,12 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct intel_atomic_state *state);
+bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
+void intel_sagv_post_plane_update(struct intel_atomic_state *state);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ad719c9602af..9cb2d7548daa 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -549,7 +549,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
* becaue the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
- dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
pm_runtime_mark_last_busy(kdev);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 3f13baaef058..916ccd1c0e96 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -336,7 +336,7 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
intel_sbi_rw(i915, reg, destination, &value, false);
}
-static inline int gen6_check_mailbox_status(u32 mbox)
+static int gen6_check_mailbox_status(u32 mbox)
{
switch (mbox & GEN6_PCODE_ERROR_MASK) {
case GEN6_PCODE_SUCCESS:
@@ -356,7 +356,7 @@ static inline int gen6_check_mailbox_status(u32 mbox)
}
}
-static inline int gen7_check_mailbox_status(u32 mbox)
+static int gen7_check_mailbox_status(u32 mbox)
{
switch (mbox & GEN6_PCODE_ERROR_MASK) {
case GEN6_PCODE_SUCCESS:
@@ -371,6 +371,8 @@ static inline int gen7_check_mailbox_status(u32 mbox)
return -ENXIO;
case GEN11_PCODE_LOCKED:
return -EBUSY;
+ case GEN11_PCODE_REJECTED:
+ return -EACCES;
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
return -EOVERFLOW;
default:
@@ -429,7 +431,7 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
mutex_lock(&i915->sb_lock);
err = __sandybridge_pcode_rw(i915, mbox, val, val1,
- 500, 0,
+ 500, 20,
true);
mutex_unlock(&i915->sb_lock);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index abb18b90d7c3..a61cb8ca4d50 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -665,7 +665,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
mmio_debug_resume(uncore->debug);
if (check_for_unclaimed_mmio(uncore))
- dev_info(uncore->i915->drm.dev,
+ drm_info(&uncore->i915->drm,
"Invalid mmio detected during user access\n");
spin_unlock(&uncore->debug->lock);
@@ -735,6 +735,28 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
}
/**
+ * intel_uncore_forcewake_flush - flush the delayed release
+ * @uncore: the intel_uncore structure
+ * @fw_domains: forcewake domains to flush
+ */
+void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
+
+ if (!uncore->funcs.force_wake_put)
+ return;
+
+ fw_domains &= uncore->fw_domains;
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ WRITE_ONCE(domain->active, false);
+ if (hrtimer_cancel(&domain->timer))
+ intel_uncore_fw_release_timer(&domain->timer);
+ }
+}
+
+/**
* intel_uncore_forcewake_put__locked - grab forcewake domain references
* @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
@@ -877,11 +899,6 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
#define GEN_FW_RANGE(s, e, d) \
{ .start = (s), .end = (e), .domains = (d) }
-#define HAS_FWTABLE(dev_priv) \
- (INTEL_GEN(dev_priv) >= 9 || \
- IS_CHERRYVIEW(dev_priv) || \
- IS_VALLEYVIEW(dev_priv))
-
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __vlv_fw_ranges[] = {
GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
@@ -1070,8 +1087,7 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __gen11_fw_ranges[] = {
- GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
- GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
@@ -1081,27 +1097,31 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8800, 0x8bff, 0),
GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
- GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
- GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x9560, 0x95ff, 0),
+ GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER),
- GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24000, 0x2407f, 0),
+ GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
- GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
- GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
- GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
+ GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
- GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
- GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
+ GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
};
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index dcfa243892c6..8d3aa8b9acf9 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -209,7 +209,11 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
-/* Like above but the caller must manage the uncore.lock itself.
+void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
+ enum forcewake_domains fw_domains);
+
+/*
+ * Like above but the caller must manage the uncore.lock itself.
* Must be used with I915_READ_FW and friends.
*/
void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 8fbf6f4d3f26..dfd87d082218 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -70,11 +70,12 @@ unlock:
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
{
- INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
+ INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
/* Assume we are not in process context and so cannot sleep. */
if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
- schedule_work(&wf->work);
+ mod_delayed_work(system_wq, &wf->work,
+ FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
return;
}
@@ -83,7 +84,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
static void __intel_wakeref_put_work(struct work_struct *wrk)
{
- struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
+ struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
if (atomic_add_unless(&wf->count, -1, 1))
return;
@@ -104,8 +105,9 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
atomic_set(&wf->count, 0);
wf->wakeref = 0;
- INIT_WORK(&wf->work, __intel_wakeref_put_work);
- lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
+ INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
+ lockdep_init_map(&wf->work.work.lockdep_map,
+ "wakeref.work", &key->work, 0);
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 7d1e676b71ef..545c8f277c46 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -8,6 +8,7 @@
#define INTEL_WAKEREF_H
#include <linux/atomic.h>
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
@@ -41,7 +42,7 @@ struct intel_wakeref {
struct intel_runtime_pm *rpm;
const struct intel_wakeref_ops *ops;
- struct work_struct work;
+ struct delayed_work work;
};
struct intel_wakeref_lockclass {
@@ -117,6 +118,11 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
return atomic_inc_not_zero(&wf->count);
}
+enum {
+ INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
+ __INTEL_WAKEREF_PUT_LAST_BIT__
+};
+
/**
* intel_wakeref_put_flags: Release the wakeref
* @wf: the wakeref
@@ -134,7 +140,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
*/
static inline void
__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
-#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
+#define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
+#define INTEL_WAKEREF_PUT_DELAY \
+ GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
{
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
@@ -154,6 +162,14 @@ intel_wakeref_put_async(struct intel_wakeref *wf)
__intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
}
+static inline void
+intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
+{
+ __intel_wakeref_put(wf,
+ INTEL_WAKEREF_PUT_ASYNC |
+ FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
+}
+
/**
* intel_wakeref_lock: Lock the wakeref (mutex)
* @wf: the wakeref
@@ -194,7 +210,7 @@ intel_wakeref_unlock_wait(struct intel_wakeref *wf)
{
mutex_lock(&wf->mutex);
mutex_unlock(&wf->mutex);
- flush_work(&wf->work);
+ flush_delayed_work(&wf->work);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 2bb9f9f9a50a..ec776591e1cf 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -86,10 +86,10 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
else
wopcm->size = GEN9_WOPCM_SIZE;
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "WOPCM: %uK\n", wopcm->size / 1024);
+ drm_dbg(&i915->drm, "WOPCM: %uK\n", wopcm->size / 1024);
}
-static inline u32 context_reserved_size(struct drm_i915_private *i915)
+static u32 context_reserved_size(struct drm_i915_private *i915)
{
if (IS_GEN9_LP(i915))
return BXT_WOPCM_RC6_CTX_RESERVED;
@@ -99,8 +99,8 @@ static inline u32 context_reserved_size(struct drm_i915_private *i915)
return 0;
}
-static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
- u32 guc_wopcm_base, u32 guc_wopcm_size)
+static bool gen9_check_dword_gap(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size)
{
u32 offset;
@@ -112,7 +112,7 @@ static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
if (offset > guc_wopcm_size ||
(guc_wopcm_size - offset) < sizeof(u32)) {
- dev_err(i915->drm.dev,
+ drm_err(&i915->drm,
"WOPCM: invalid GuC region size: %uK < %uK\n",
guc_wopcm_size / SZ_1K,
(u32)(offset + sizeof(u32)) / SZ_1K);
@@ -122,8 +122,8 @@ static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
return true;
}
-static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
- u32 guc_wopcm_size, u32 huc_fw_size)
+static bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
+ u32 guc_wopcm_size, u32 huc_fw_size)
{
/*
* On Gen9 & CNL A0, hardware requires the total available GuC WOPCM
@@ -131,7 +131,7 @@ static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
* firmware uploading would fail.
*/
if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
- dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
(guc_wopcm_size - GUC_WOPCM_RESERVED) / SZ_1K,
huc_fw_size / 1024);
@@ -141,32 +141,31 @@ static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
return true;
}
-static inline bool check_hw_restrictions(struct drm_i915_private *i915,
- u32 guc_wopcm_base, u32 guc_wopcm_size,
- u32 huc_fw_size)
+static bool check_hw_restrictions(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 huc_fw_size)
{
if (IS_GEN(i915, 9) && !gen9_check_dword_gap(i915, guc_wopcm_base,
guc_wopcm_size))
return false;
- if ((IS_GEN(i915, 9) ||
- IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)) &&
+ if (IS_GEN(i915, 9) &&
!gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
return false;
return true;
}
-static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
- u32 guc_wopcm_base, u32 guc_wopcm_size,
- u32 guc_fw_size, u32 huc_fw_size)
+static bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 guc_fw_size, u32 huc_fw_size)
{
const u32 ctx_rsvd = context_reserved_size(i915);
u32 size;
size = wopcm_size - ctx_rsvd;
if (unlikely(range_overflows(guc_wopcm_base, guc_wopcm_size, size))) {
- dev_err(i915->drm.dev,
+ drm_err(&i915->drm,
"WOPCM: invalid GuC region layout: %uK + %uK > %uK\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K,
size / SZ_1K);
@@ -175,7 +174,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
if (unlikely(guc_wopcm_size < size)) {
- dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC),
guc_wopcm_size / SZ_1K, size / SZ_1K);
return false;
@@ -183,7 +182,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
size = huc_fw_size + WOPCM_RESERVED_SIZE;
if (unlikely(guc_wopcm_base < size)) {
- dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
guc_wopcm_base / SZ_1K, size / SZ_1K);
return false;
@@ -242,10 +241,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
return;
if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
- "GuC WOPCM is already locked [%uK, %uK)\n",
- guc_wopcm_base / SZ_1K,
- guc_wopcm_size / SZ_1K);
+ drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
goto check;
}
@@ -266,8 +263,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "Calculated GuC WOPCM [%uK, %uK)\n",
- guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
+ drm_dbg(&i915->drm, "Calculated GuC WOPCM [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
check:
if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size,
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
deleted file mode 100644
index 14da5c3b569d..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_bdw.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x000000a0 },
- { _MMIO(0x9888), 0x198b0000 },
- { _MMIO(0x9888), 0x078b0066 },
- { _MMIO(0x9888), 0x118b0000 },
- { _MMIO(0x9888), 0x258b0000 },
- { _MMIO(0x9888), 0x21850008 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9840), 0x00000080 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "d6de6f55-e526-4f79-a6a6-d7315c09044e",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
deleted file mode 100644
index 0cee3334f0a6..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BDW_H__
-#define __I915_OA_BDW_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
deleted file mode 100644
index 3e785bafcf99..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_bxt.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x19800000 },
- { _MMIO(0x9888), 0x07800063 },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x23810008 },
- { _MMIO(0x9888), 0x1d950400 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "5ee72f5c-092f-421e-8b70-225f7c3e9612",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
deleted file mode 100644
index 0bdf391323ec..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BXT_H__
-#define __I915_OA_BXT_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
deleted file mode 100644
index 0ea86f70a06c..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cflgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "74fb4902-d3d3-4237-9e90-cbdc68d0a446",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
deleted file mode 100644
index 6b862280ab78..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT2_H__
-#define __I915_OA_CFLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
deleted file mode 100644
index fc632dd890bf..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cflgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "577e8e2c-3fa0-4875-8743-3538d585e3b0",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
deleted file mode 100644
index 4ca9d8f89b2f..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT3_H__
-#define __I915_OA_CFLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
deleted file mode 100644
index 6cd4e9921a8a..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_chv.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_chv.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x000000a0 },
- { _MMIO(0x9888), 0x59800000 },
- { _MMIO(0x9888), 0x59800001 },
- { _MMIO(0x9888), 0x338b0000 },
- { _MMIO(0x9888), 0x258b0066 },
- { _MMIO(0x9888), 0x058b0000 },
- { _MMIO(0x9888), 0x038b0000 },
- { _MMIO(0x9888), 0x03844000 },
- { _MMIO(0x9888), 0x47800080 },
- { _MMIO(0x9888), 0x57800000 },
- { _MMIO(0x1823a4), 0x00000000 },
- { _MMIO(0x9888), 0x59800000 },
- { _MMIO(0x9840), 0x00000080 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "4a534b07-cba3-414d-8d60-874830e883aa",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
deleted file mode 100644
index 3cac7bbc9c71..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_chv.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CHV_H__
-#define __I915_OA_CHV_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
deleted file mode 100644
index 1041e8914993..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cnl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x0000ffff },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x0000ffff },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x0000ffff },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0xd04), 0x00000200 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x17060000 },
- { _MMIO(0x9840), 0x00000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x13034000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x07060066 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x05060000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x0f080040 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x07091000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x0f041000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x1d004000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x35000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x49000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x3d000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x31000000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "db41edd4-d8e7-4730-ad11-b9a2d6833503",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
deleted file mode 100644
index db379f5fcbb9..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CNL_H__
-#define __I915_OA_CNL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
deleted file mode 100644
index bd15ebe9aeeb..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_glk.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_glk.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x19800000 },
- { _MMIO(0x9888), 0x07800063 },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x23810008 },
- { _MMIO(0x9888), 0x1d950400 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "dd3fd789-e783-4204-8cd0-b671bbccb0cf",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
deleted file mode 100644
index 779f343efd11..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_glk.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_GLK_H__
-#define __I915_OA_GLK_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
deleted file mode 100644
index 133721a8619f..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_hsw.h"
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x253a4), 0x01600000 },
- { _MMIO(0x25440), 0x00100000 },
- { _MMIO(0x25128), 0x00000000 },
- { _MMIO(0x2691c), 0x00000800 },
- { _MMIO(0x26aa0), 0x01500000 },
- { _MMIO(0x26b9c), 0x00006000 },
- { _MMIO(0x2791c), 0x00000800 },
- { _MMIO(0x27aa0), 0x01500000 },
- { _MMIO(0x27b9c), 0x00006000 },
- { _MMIO(0x2641c), 0x00000400 },
- { _MMIO(0x25380), 0x00000010 },
- { _MMIO(0x2538c), 0x00000000 },
- { _MMIO(0x25384), 0x0800aaaa },
- { _MMIO(0x25400), 0x00000004 },
- { _MMIO(0x2540c), 0x06029000 },
- { _MMIO(0x25410), 0x00000002 },
- { _MMIO(0x25404), 0x5c30ffff },
- { _MMIO(0x25100), 0x00000016 },
- { _MMIO(0x25110), 0x00000400 },
- { _MMIO(0x25104), 0x00000000 },
- { _MMIO(0x26804), 0x00001211 },
- { _MMIO(0x26884), 0x00000100 },
- { _MMIO(0x26900), 0x00000002 },
- { _MMIO(0x26908), 0x00700000 },
- { _MMIO(0x26904), 0x00000000 },
- { _MMIO(0x26984), 0x00001022 },
- { _MMIO(0x26a04), 0x00000011 },
- { _MMIO(0x26a80), 0x00000006 },
- { _MMIO(0x26a88), 0x00000c02 },
- { _MMIO(0x26a84), 0x00000000 },
- { _MMIO(0x26b04), 0x00001000 },
- { _MMIO(0x26b80), 0x00000002 },
- { _MMIO(0x26b8c), 0x00000007 },
- { _MMIO(0x26b84), 0x00000000 },
- { _MMIO(0x27804), 0x00004844 },
- { _MMIO(0x27884), 0x00000400 },
- { _MMIO(0x27900), 0x00000002 },
- { _MMIO(0x27908), 0x0e000000 },
- { _MMIO(0x27904), 0x00000000 },
- { _MMIO(0x27984), 0x00004088 },
- { _MMIO(0x27a04), 0x00000044 },
- { _MMIO(0x27a80), 0x00000006 },
- { _MMIO(0x27a88), 0x00018040 },
- { _MMIO(0x27a84), 0x00000000 },
- { _MMIO(0x27b04), 0x00004000 },
- { _MMIO(0x27b80), 0x00000002 },
- { _MMIO(0x27b8c), 0x000000e0 },
- { _MMIO(0x27b84), 0x00000000 },
- { _MMIO(0x26104), 0x00002222 },
- { _MMIO(0x26184), 0x0c006666 },
- { _MMIO(0x26284), 0x04000000 },
- { _MMIO(0x26304), 0x04000000 },
- { _MMIO(0x26400), 0x00000002 },
- { _MMIO(0x26410), 0x000000a0 },
- { _MMIO(0x26404), 0x00000000 },
- { _MMIO(0x25420), 0x04108020 },
- { _MMIO(0x25424), 0x1284a420 },
- { _MMIO(0x2541c), 0x00000000 },
- { _MMIO(0x25428), 0x00042049 },
-};
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "403d8832-1a27-4aa6-a64e-f5389ce7b212",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_render_basic;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_render_basic;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_render_basic;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
-
- dev_priv->perf.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_render_basic_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
deleted file mode 100644
index ba97f732f136..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_HSW_H__
-#define __I915_OA_HSW_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
deleted file mode 100644
index 2d92041b754f..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_icl.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_icl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x0000ffff },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x0000ffff },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x0000ffff },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0xd04), 0x00000200 },
- { _MMIO(0x9840), 0x00000000 },
- { _MMIO(0x9884), 0x00000000 },
- { _MMIO(0x9888), 0x10060000 },
- { _MMIO(0x9888), 0x22060000 },
- { _MMIO(0x9888), 0x16060000 },
- { _MMIO(0x9888), 0x24060000 },
- { _MMIO(0x9888), 0x18060000 },
- { _MMIO(0x9888), 0x1a060000 },
- { _MMIO(0x9888), 0x12060000 },
- { _MMIO(0x9888), 0x14060000 },
- { _MMIO(0x9888), 0x10060000 },
- { _MMIO(0x9888), 0x22060000 },
- { _MMIO(0x9884), 0x00000003 },
- { _MMIO(0x9888), 0x16130000 },
- { _MMIO(0x9888), 0x24000001 },
- { _MMIO(0x9888), 0x0e130056 },
- { _MMIO(0x9888), 0x10130000 },
- { _MMIO(0x9888), 0x1a130000 },
- { _MMIO(0x9888), 0x541f0001 },
- { _MMIO(0x9888), 0x181f0000 },
- { _MMIO(0x9888), 0x4c1f0000 },
- { _MMIO(0x9888), 0x301f0000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "a291665e-244b-4b76-9b9a-01de9d3c8068",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
deleted file mode 100644
index 5c64112d720e..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_icl.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_ICL_H__
-#define __I915_OA_ICL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
deleted file mode 100644
index 1c3a67c9cfe0..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_kblgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "baa3c7e4-52b6-4b85-801e-465a94b746dd",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
deleted file mode 100644
index 810532fa6b63..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT2_H__
-#define __I915_OA_KBLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
deleted file mode 100644
index ebbe5a9c9fdc..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_kblgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "f1792f32-6db2-4b50-b4b2-557128f1688d",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
deleted file mode 100644
index 13d70456fabd..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT3_H__
-#define __I915_OA_KBLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
deleted file mode 100644
index 1bc359ed34e8..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810016 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "1651949f-0ac0-4cb1-a06f-dafd74a407d1",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
deleted file mode 100644
index fda70c51a6ec..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT2_H__
-#define __I915_OA_SKLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
deleted file mode 100644
index 6e352f881310..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "2b985803-d3c9-4629-8a4f-634bfecba0e8",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
deleted file mode 100644
index df74eba5799e..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT3_H__
-#define __I915_OA_SKLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
deleted file mode 100644
index 8f345115a306..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt4.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "882fa433-1f4a-4a67-a962-c741888fe5f5",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
deleted file mode 100644
index 378ab7ab78d5..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT4_H__
-#define __I915_OA_SKLGT4_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
deleted file mode 100644
index a29d93707345..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_tgl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0xD920), 0x00000000 },
- { _MMIO(0xD900), 0x00000000 },
- { _MMIO(0xD904), 0xF0800000 },
- { _MMIO(0xD910), 0x00000000 },
- { _MMIO(0xD914), 0xF0800000 },
- { _MMIO(0xDC40), 0x00FF0000 },
- { _MMIO(0xD940), 0x00000004 },
- { _MMIO(0xD944), 0x0000FFFF },
- { _MMIO(0xDC00), 0x00000004 },
- { _MMIO(0xDC04), 0x0000FFFF },
- { _MMIO(0xD948), 0x00000003 },
- { _MMIO(0xD94C), 0x0000FFFF },
- { _MMIO(0xDC08), 0x00000003 },
- { _MMIO(0xDC0C), 0x0000FFFF },
- { _MMIO(0xD950), 0x00000007 },
- { _MMIO(0xD954), 0x0000FFFF },
- { _MMIO(0xDC10), 0x00000007 },
- { _MMIO(0xDC14), 0x0000FFFF },
- { _MMIO(0xD958), 0x00100002 },
- { _MMIO(0xD95C), 0x0000FFF7 },
- { _MMIO(0xDC18), 0x00100002 },
- { _MMIO(0xDC1C), 0x0000FFF7 },
- { _MMIO(0xD960), 0x00100002 },
- { _MMIO(0xD964), 0x0000FFCF },
- { _MMIO(0xDC20), 0x00100002 },
- { _MMIO(0xDC24), 0x0000FFCF },
- { _MMIO(0xD968), 0x00100082 },
- { _MMIO(0xD96C), 0x0000FFEF },
- { _MMIO(0xDC28), 0x00100082 },
- { _MMIO(0xDC2C), 0x0000FFEF },
- { _MMIO(0xD970), 0x001000C2 },
- { _MMIO(0xD974), 0x0000FFE7 },
- { _MMIO(0xDC30), 0x001000C2 },
- { _MMIO(0xDC34), 0x0000FFE7 },
- { _MMIO(0xD978), 0x00100001 },
- { _MMIO(0xD97C), 0x0000FFE7 },
- { _MMIO(0xDC38), 0x00100001 },
- { _MMIO(0xDC3C), 0x0000FFE7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x0D04), 0x00000200 },
- { _MMIO(0x9840), 0x00000000 },
- { _MMIO(0x9884), 0x00000000 },
- { _MMIO(0x9888), 0x280E0000 },
- { _MMIO(0x9888), 0x1E0E0147 },
- { _MMIO(0x9888), 0x180E0000 },
- { _MMIO(0x9888), 0x160E0000 },
- { _MMIO(0x9888), 0x1E0F1000 },
- { _MMIO(0x9888), 0x1E104000 },
- { _MMIO(0x9888), 0x2E020100 },
- { _MMIO(0x9888), 0x2C030004 },
- { _MMIO(0x9888), 0x38003000 },
- { _MMIO(0x9888), 0x1E0A8000 },
- { _MMIO(0x9884), 0x00000003 },
- { _MMIO(0x9888), 0x49110000 },
- { _MMIO(0x9888), 0x5D101400 },
- { _MMIO(0x9888), 0x1D140020 },
- { _MMIO(0x9888), 0x1D1103A3 },
- { _MMIO(0x9888), 0x01110000 },
- { _MMIO(0x9888), 0x61111000 },
- { _MMIO(0x9888), 0x1F128000 },
- { _MMIO(0x9888), 0x17100000 },
- { _MMIO(0x9888), 0x55100630 },
- { _MMIO(0x9888), 0x57100000 },
- { _MMIO(0x9888), 0x31100000 },
- { _MMIO(0x9884), 0x00000003 },
- { _MMIO(0x9888), 0x65100002 },
- { _MMIO(0x9884), 0x00000000 },
- { _MMIO(0x9888), 0x42000001 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "80a833f0-2504-4321-8894-e9277844ce7b",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
deleted file mode 100644
index 4c25f0be825c..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_TGL_H__
-#define __I915_OA_TGL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 68bbb1580162..4002c984c2e0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -153,7 +153,7 @@ static int live_active_wait(void *arg)
if (IS_ERR(active))
return PTR_ERR(active);
- i915_active_wait(&active->base);
+ __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(active->retired)) {
struct drm_printer p = drm_err_printer(__func__);
@@ -228,11 +228,11 @@ static int live_active_barrier(void *arg)
}
i915_active_release(&active->base);
+ if (err)
+ goto out;
- if (err == 0)
- err = i915_active_wait(&active->base);
-
- if (err == 0 && !READ_ONCE(active->retired)) {
+ __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing barriers!\n");
err = -EINVAL;
}
@@ -277,7 +277,7 @@ static struct intel_engine_cs *node_to_barrier(struct active_node *it)
void i915_active_print(struct i915_active *ref, struct drm_printer *m)
{
- drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
+ drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
drm_printf(m, "\tpreallocated barriers? %s\n",
yesno(!llist_empty(&ref->preallocated_barriers)));
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 623759b73bb4..88d400b9df88 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -125,8 +125,6 @@ static void pm_resume(struct drm_i915_private *i915)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_ggtt_resume(&i915->ggtt);
- i915_gem_restore_fences(&i915->ggtt);
-
i915_gem_resume(i915);
}
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 06ef88510209..028baae9631f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -45,8 +45,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
{
- unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
+ unsigned long count;
count = 0;
do {
@@ -72,30 +72,6 @@ static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
pr_debug("Filled GGTT with %lu pages [%llu total]\n",
count, ggtt->vm.total / PAGE_SIZE);
- bound = 0;
- unbound = 0;
- list_for_each_entry(obj, objects, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
-
- if (atomic_read(&obj->bind_count))
- bound++;
- else
- unbound++;
- }
- GEM_BUG_ON(bound + unbound != count);
-
- if (unbound) {
- pr_err("%s: Found %lu objects unbound, expected %u!\n",
- __func__, unbound, 0);
- return -EINVAL;
- }
-
- if (bound != count) {
- pr_err("%s: Found %lu objects bound, expected %lu!\n",
- __func__, bound, count);
- return -EINVAL;
- }
-
if (list_empty(&ggtt->vm.bound_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index b342bef5e7c9..2e471500a646 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -331,9 +331,6 @@ static void close_object_list(struct list_head *objects,
vma = i915_vma_instance(obj, vm, NULL);
if (!IS_ERR(vma))
ignored = i915_vma_unbind(vma);
- /* Only ppgtt vma may be closed before the object is freed */
- if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
list_del(&obj->st_link);
i915_gem_object_put(obj);
@@ -591,7 +588,7 @@ static int walk_hole(struct i915_address_space *vm,
pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
__func__, addr, vma->size,
hole_start, hole_end, err);
- goto err_close;
+ goto err_put;
}
i915_vma_unpin(vma);
@@ -600,14 +597,14 @@ static int walk_hole(struct i915_address_space *vm,
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, vma->size);
err = -EINVAL;
- goto err_close;
+ goto err_put;
}
err = i915_vma_unbind(vma);
if (err) {
pr_err("%s unbind failed at %llx + %llx with err=%d\n",
__func__, addr, vma->size, err);
- goto err_close;
+ goto err_put;
}
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -616,13 +613,10 @@ static int walk_hole(struct i915_address_space *vm,
"%s timed out at %llx\n",
__func__, addr)) {
err = -EINTR;
- goto err_close;
+ goto err_put;
}
}
-err_close:
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
err_put:
i915_gem_object_put(obj);
if (err)
@@ -675,7 +669,7 @@ static int pot_hole(struct i915_address_space *vm,
addr,
hole_start, hole_end,
err);
- goto err;
+ goto err_obj;
}
if (!drm_mm_node_allocated(&vma->node) ||
@@ -685,7 +679,7 @@ static int pot_hole(struct i915_address_space *vm,
i915_vma_unpin(vma);
err = i915_vma_unbind(vma);
err = -EINVAL;
- goto err;
+ goto err_obj;
}
i915_vma_unpin(vma);
@@ -697,13 +691,10 @@ static int pot_hole(struct i915_address_space *vm,
"%s timed out after %d/%d\n",
__func__, pot, fls64(hole_end - 1) - 1)) {
err = -EINTR;
- goto err;
+ goto err_obj;
}
}
-err:
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
err_obj:
i915_gem_object_put(obj);
return err;
@@ -778,7 +769,7 @@ static int drunk_hole(struct i915_address_space *vm,
addr, BIT_ULL(size),
hole_start, hole_end,
err);
- goto err;
+ goto err_obj;
}
if (!drm_mm_node_allocated(&vma->node) ||
@@ -788,7 +779,7 @@ static int drunk_hole(struct i915_address_space *vm,
i915_vma_unpin(vma);
err = i915_vma_unbind(vma);
err = -EINVAL;
- goto err;
+ goto err_obj;
}
i915_vma_unpin(vma);
@@ -799,13 +790,10 @@ static int drunk_hole(struct i915_address_space *vm,
"%s timed out after %d/%d\n",
__func__, n, count)) {
err = -EINTR;
- goto err;
+ goto err_obj;
}
}
-err:
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
err_obj:
i915_gem_object_put(obj);
kfree(order);
@@ -1229,7 +1217,6 @@ static void track_vma_bind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
GEM_BUG_ON(vma->pages);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 0a953bfc0585..5dd5d81646c4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -37,6 +37,7 @@ selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(gem_contexts, i915_gem_context_live_selftests)
+selftest(gem_execbuf, i915_gem_execbuffer_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 5b39bab4da1d..6a2be7d0dd95 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -16,6 +16,7 @@
* Tests are executed in order by igt/drv_selftest
*/
selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
+selftest(shmem, shmem_utils_mock_selftests)
selftest(fence, i915_sw_fence_mock_selftests)
selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index d1a1568c47ba..8eb3108f1767 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -14,10 +14,85 @@
#include "igt_flush_test.h"
#include "lib_sw_fence.h"
+#define TEST_OA_CONFIG_UUID "12345678-1234-1234-1234-1234567890ab"
+
+static int
+alloc_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config;
+
+ oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
+ if (!oa_config)
+ return -ENOMEM;
+
+ oa_config->perf = perf;
+ kref_init(&oa_config->ref);
+
+ strlcpy(oa_config->uuid, TEST_OA_CONFIG_UUID, sizeof(oa_config->uuid));
+
+ mutex_lock(&perf->metrics_lock);
+
+ oa_config->id = idr_alloc(&perf->metrics_idr, oa_config, 2, 0, GFP_KERNEL);
+ if (oa_config->id < 0) {
+ mutex_unlock(&perf->metrics_lock);
+ i915_oa_config_put(oa_config);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&perf->metrics_lock);
+
+ return 0;
+}
+
+static void
+destroy_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config = NULL, *tmp;
+ int id;
+
+ mutex_lock(&perf->metrics_lock);
+
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+ oa_config = tmp;
+ break;
+ }
+ }
+
+ if (oa_config)
+ idr_remove(&perf->metrics_idr, oa_config->id);
+
+ mutex_unlock(&perf->metrics_lock);
+
+ if (oa_config)
+ i915_oa_config_put(oa_config);
+}
+
+static struct i915_oa_config *
+get_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config = NULL, *tmp;
+ int id;
+
+ mutex_lock(&perf->metrics_lock);
+
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+ oa_config = i915_oa_config_get(tmp);
+ break;
+ }
+ }
+
+ mutex_unlock(&perf->metrics_lock);
+
+ return oa_config;
+}
+
static struct i915_perf_stream *
test_stream(struct i915_perf *perf)
{
struct drm_i915_perf_open_param param = {};
+ struct i915_oa_config *oa_config = get_empty_config(perf);
struct perf_open_properties props = {
.engine = intel_engine_lookup_user(perf->i915,
I915_ENGINE_CLASS_RENDER,
@@ -25,13 +100,19 @@ test_stream(struct i915_perf *perf)
.sample_flags = SAMPLE_OA_REPORT,
.oa_format = IS_GEN(perf->i915, 12) ?
I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
- .metrics_set = 1,
};
struct i915_perf_stream *stream;
+ if (!oa_config)
+ return NULL;
+
+ props.metrics_set = oa_config->id;
+
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
- if (!stream)
+ if (!stream) {
+ i915_oa_config_put(oa_config);
return NULL;
+ }
stream->perf = perf;
@@ -42,6 +123,8 @@ test_stream(struct i915_perf *perf)
}
mutex_unlock(&perf->lock);
+ i915_oa_config_put(oa_config);
+
return stream;
}
@@ -138,8 +221,7 @@ static int live_noa_delay(void *arg)
goto out;
}
- if (rq->engine->emit_init_breadcrumb &&
- i915_request_timeline(rq)->has_initial_breadcrumb) {
+ if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err) {
i915_request_add(rq);
@@ -180,8 +262,7 @@ static int live_noa_delay(void *arg)
delay = intel_read_status_page(stream->engine, 0x102);
delay -= intel_read_status_page(stream->engine, 0x100);
- delay = div_u64(mul_u32_u32(delay, 1000 * 1000),
- RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
+ delay = i915_cs_timestamp_ticks_to_ns(i915, delay);
pr_info("GPU delay: %uns, expected %lluns\n",
delay, expected);
@@ -206,6 +287,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_noa_delay),
};
struct i915_perf *perf = &i915->perf;
+ int err;
if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
return 0;
@@ -213,5 +295,13 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ err = alloc_empty_config(&i915->perf);
+ if (err)
+ return err;
+
+ err = i915_subtests(tests, i915);
+
+ destroy_empty_config(&i915->perf);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index 3bf7f53e9924..d8da142985eb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -16,5 +16,6 @@
* Tests are executed in order by igt/i915_selftest
*/
selftest(engine_cs, intel_engine_cs_perf_selftests)
+selftest(request, i915_request_perf_selftests)
selftest(blt, i915_gem_object_blt_perf_selftests)
selftest(region, intel_memory_region_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index f89d9c42f1fa..6014e8dfcbb1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -23,11 +23,13 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/pm_qos.h>
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_random.h"
@@ -51,6 +53,11 @@ static unsigned int num_uabi_engines(struct drm_i915_private *i915)
return count;
}
+static struct intel_engine_cs *rcs0(struct drm_i915_private *i915)
+{
+ return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0);
+}
+
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -58,7 +65,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
- request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
+ request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
if (!request)
return -ENOMEM;
@@ -76,7 +83,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -145,7 +152,7 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -420,7 +427,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
struct smoketest t = {
- .engine = i915->engine[RCS0],
+ .engine = rcs0(i915),
.ncontexts = 1024,
.max_batch = 1024,
.request_alloc = __mock_request_alloc
@@ -809,10 +816,12 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(batch->vm->gt);
+ __i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd));
i915_gem_object_unpin_map(batch->obj);
+ intel_gt_chipset_flush(batch->vm->gt);
+
return 0;
}
@@ -858,13 +867,6 @@ static int live_all_engines(void *arg)
goto out_request;
}
- err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
- 0);
- GEM_BUG_ON(err);
- request[idx]->batch = batch;
-
i915_vma_lock(batch);
err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0)
@@ -872,6 +874,13 @@ static int live_all_engines(void *arg)
i915_vma_unlock(batch);
GEM_BUG_ON(err);
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
i915_request_get(request[idx]);
i915_request_add(request[idx]);
idx++;
@@ -986,13 +995,6 @@ static int live_sequential_engines(void *arg)
}
}
- err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
- 0);
- GEM_BUG_ON(err);
- request[idx]->batch = batch;
-
i915_vma_lock(batch);
err = i915_request_await_object(request[idx],
batch->obj, false);
@@ -1001,6 +1003,13 @@ static int live_sequential_engines(void *arg)
i915_vma_unlock(batch);
GEM_BUG_ON(err);
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
i915_request_get(request[idx]);
i915_request_add(request[idx]);
@@ -1053,9 +1062,12 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(engine->gt);
+ __i915_gem_object_flush_map(request[idx]->batch->obj,
+ 0, sizeof(*cmd));
i915_gem_object_unpin_map(request[idx]->batch->obj);
+
+ intel_gt_chipset_flush(engine->gt);
}
i915_vma_put(request[idx]->batch);
@@ -1233,7 +1245,7 @@ static int live_parallel_engines(void *arg)
struct igt_live_test t;
unsigned int idx;
- snprintf(name, sizeof(name), "%pS", fn);
+ snprintf(name, sizeof(name), "%ps", *fn);
err = igt_live_test_begin(&t, i915, __func__, name);
if (err)
break;
@@ -1470,3 +1482,572 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
return i915_subtests(tests, i915);
}
+
+static int switch_to_kernel_sync(struct intel_context *ce, int err)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ while (!err && !intel_engine_is_idle(ce->engine))
+ intel_engine_flush_submission(ce->engine);
+
+ return err;
+}
+
+struct perf_stats {
+ struct intel_engine_cs *engine;
+ unsigned long count;
+ ktime_t time;
+ ktime_t busy;
+ u64 runtime;
+};
+
+struct perf_series {
+ struct drm_i915_private *i915;
+ unsigned int nengines;
+ struct intel_context *ce[];
+};
+
+static int s_sync0(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+static int s_sync1(void *arg)
+{
+ struct perf_series *ps = arg;
+ struct i915_request *prev = NULL;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+
+ return err;
+}
+
+static int s_many(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return 0;
+}
+
+static int perf_series_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ s_sync0,
+ s_sync1,
+ s_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct pm_qos_request qos;
+ struct perf_stats *stats;
+ struct perf_series *ps;
+ unsigned int idx;
+ int err = 0;
+
+ stats = kcalloc(nengines, sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL);
+ if (!ps) {
+ kfree(stats);
+ return -ENOMEM;
+ }
+
+ cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
+
+ ps->i915 = i915;
+ ps->nengines = nengines;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ ps->ce[idx++] = ce;
+ }
+ GEM_BUG_ON(idx != ps->nengines);
+
+ for (fn = func; *fn && !err; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p =
+ memset(&stats[idx], 0, sizeof(stats[idx]));
+ struct intel_context *ce = ps->ce[idx];
+
+ p->engine = ps->ce[idx]->engine;
+ intel_engine_pm_get(p->engine);
+
+ if (intel_engine_supports_stats(p->engine))
+ p->busy = intel_engine_get_busy_time(p->engine) + 1;
+ p->runtime = -intel_context_get_total_runtime_ns(ce);
+ p->time = ktime_get();
+ }
+
+ err = (*fn)(ps);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p = &stats[idx];
+ struct intel_context *ce = ps->ce[idx];
+ int integer, decimal;
+ u64 busy, dt;
+
+ p->time = ktime_sub(ktime_get(), p->time);
+ if (p->busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(p->engine),
+ p->busy - 1);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime += intel_context_get_total_runtime_ns(ce);
+ intel_engine_pm_put(p->engine);
+
+ busy = 100 * ktime_to_ns(p->busy);
+ dt = ktime_to_ns(p->time);
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ pr_info("%s %5s: { seqno:%d, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, p->engine->name, ce->timeline->seqno,
+ integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ }
+ }
+
+out:
+ for (idx = 0; idx < nengines; idx++) {
+ if (IS_ERR_OR_NULL(ps->ce[idx]))
+ break;
+
+ intel_context_unpin(ps->ce[idx]);
+ intel_context_put(ps->ce[idx]);
+ }
+ kfree(ps);
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(stats);
+ return err;
+}
+
+static int p_sync0(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ p->time = ktime_get();
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int p_sync1(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct i915_request *prev = NULL;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ p->time = ktime_get();
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int p_many(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int err = 0;
+ bool busy;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ count = 0;
+ p->time = ktime_get();
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int perf_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ p_sync0,
+ p_sync1,
+ p_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct pm_qos_request qos;
+ struct {
+ struct perf_stats p;
+ struct task_struct *tsk;
+ } *engines;
+ int err = 0;
+
+ engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
+ if (!engines)
+ return -ENOMEM;
+
+ cpu_latency_qos_add_request(&qos, 0);
+
+ for (fn = func; *fn; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+ unsigned int idx;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ atomic_set(&i915->selftest.counter, nengines);
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ intel_engine_pm_get(engine);
+
+ memset(&engines[idx].p, 0, sizeof(engines[idx].p));
+ engines[idx].p.engine = engine;
+
+ engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
+ "igt:%s", engine->name);
+ if (IS_ERR(engines[idx].tsk)) {
+ err = PTR_ERR(engines[idx].tsk);
+ intel_engine_pm_put(engine);
+ break;
+ }
+ get_task_struct(engines[idx++].tsk);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (IS_ERR(engines[idx].tsk))
+ break;
+
+ status = kthread_stop(engines[idx].tsk);
+ if (status && !err)
+ err = status;
+
+ intel_engine_pm_put(engine);
+ put_task_struct(engines[idx++].tsk);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct perf_stats *p = &engines[idx].p;
+ u64 busy = 100 * ktime_to_ns(p->busy);
+ u64 dt = ktime_to_ns(p->time);
+ int integer, decimal;
+
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ GEM_BUG_ON(engine != p->engine);
+ pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, engine->name, p->count, integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ idx++;
+ }
+ }
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(engines);
+ return err;
+}
+
+int i915_request_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_series_engines),
+ SUBTEST(perf_parallel_engines),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index d3bf9eefb682..1bc11c09faef 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -396,6 +396,35 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
return true;
}
+void igt_hexdump(const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ pr_info("*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ pr_info("[%04zx] %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400);
module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400);
module_param_named(st_filter, i915_selftest.filter, charp, 0400);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 58b5f40a07dd..af89c7fc8f59 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -173,7 +173,7 @@ static int igt_vma_create(void *arg)
}
nc = 0;
- for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) {
+ for_each_prime_number(num_ctx, 2 * BITS_PER_LONG) {
for (; nc < num_ctx; nc++) {
ctx = mock_context(i915, "mock");
if (!ctx)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 9ad4ab088466..e35ba5f9e73f 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -169,8 +169,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
intel_gt_chipset_flush(engine->gt);
- if (engine->emit_init_breadcrumb &&
- i915_request_timeline(rq)->has_initial_breadcrumb) {
+ if (engine->emit_init_breadcrumb) {
err = engine->emit_init_breadcrumb(rq);
if (err)
goto cancel_rq;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 2a1d4ba1f9f3..6e80d99048e4 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -594,8 +594,11 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
void *addr;
obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
+ return ERR_PTR(-ENODEV);
return obj;
+ }
addr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(addr)) {
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
new file mode 100644
index 000000000000..58710ac3f979
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/librapl.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <asm/msr.h>
+
+#include "librapl.h"
+
+u64 librapl_energy_uJ(void)
+{
+ unsigned long long power;
+ u32 units;
+
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
+ return 0;
+
+ units = (power & 0x1f00) >> 8;
+
+ if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
+ return 0;
+
+ return (1000000 * power) >> units; /* convert to uJ */
+}
diff --git a/drivers/gpu/drm/i915/selftests/librapl.h b/drivers/gpu/drm/i915/selftests/librapl.h
new file mode 100644
index 000000000000..887f3e91dd05
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/librapl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_LIBRAPL_H
+#define SELFTEST_LIBRAPL_H
+
+#include <linux/types.h>
+
+u64 librapl_energy_uJ(void);
+
+#endif /* SELFTEST_LIBRAPL_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 754d0eb6beaa..9b105b811f1f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,6 +25,8 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_managed.h>
+
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
@@ -55,6 +57,9 @@ static void mock_device_release(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
+ if (!i915->do_release)
+ goto out;
+
mock_device_flush(i915);
intel_gt_driver_remove(&i915->gt);
@@ -71,8 +76,9 @@ static void mock_device_release(struct drm_device *dev)
drm_mode_config_cleanup(&i915->drm);
- drm_dev_fini(&i915->drm);
+out:
put_device(&i915->drm.pdev->dev);
+ i915->drm.pdev = NULL;
}
static struct drm_driver mock_driver = {
@@ -114,9 +120,14 @@ struct drm_i915_private *mock_gem_device(void)
struct pci_dev *pdev;
int err;
- pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
+ pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
if (!pdev)
- goto err;
+ return NULL;
+ i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
+ if (!i915) {
+ kfree(pdev);
+ return NULL;
+ }
device_initialize(&pdev->dev);
pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
@@ -129,7 +140,6 @@ struct drm_i915_private *mock_gem_device(void)
pdev->dev.archdata.iommu = (void *)-1;
#endif
- i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
dev_pm_domain_set(&pdev->dev, &pm_domain);
@@ -141,9 +151,13 @@ struct drm_i915_private *mock_gem_device(void)
err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
if (err) {
pr_err("Failed to initialise mock GEM device: err=%d\n", err);
- goto put_device;
+ put_device(&pdev->dev);
+ kfree(i915);
+
+ return NULL;
}
i915->drm.pdev = pdev;
+ drmm_add_final_kfree(&i915->drm, i915);
intel_runtime_pm_init_early(&i915->runtime_pm);
@@ -178,16 +192,18 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->engine_mask = BIT(0);
- i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
- if (!i915->engine[RCS0])
+ i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!i915->gt.engine[RCS0])
goto err_unlock;
- if (mock_engine_init(i915->engine[RCS0]))
+ if (mock_engine_init(i915->gt.engine[RCS0]))
goto err_context;
__clear_bit(I915_WEDGED, &i915->gt.reset.flags);
intel_engines_driver_register(i915);
+ i915->do_release = true;
+
return i915;
err_context:
@@ -198,9 +214,7 @@ err_drv:
intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
- drm_dev_fini(&i915->drm);
-put_device:
- put_device(&pdev->dev);
-err:
+ drm_dev_put(&i915->drm);
+
return NULL;
}
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index f22cfbf9353e..ba4ca17fd4d8 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -18,6 +18,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -143,10 +144,6 @@ static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs =
.atomic_check = dw_hdmi_imx_atomic_check,
};
-static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_mode_status
imx6q_hdmi_mode_valid(struct drm_connector *con,
const struct drm_display_mode *mode)
@@ -236,8 +233,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
return ret;
drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
platform_set_drvdata(pdev, hdmi);
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index da87c70e413b..2e38f1a5cf8d 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -42,12 +42,6 @@ void imx_drm_connector_destroy(struct drm_connector *connector)
}
EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
-void imx_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
-
static int imx_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -139,8 +133,8 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
encoder->possible_crtcs = crtc_mask;
- /* FIXME: this is the mask of outputs which can clone this output. */
- encoder->possible_clones = ~0;
+ /* FIXME: cloning support not clear, disable it all for now */
+ encoder->possible_clones = 0;
return 0;
}
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index ab9c6f706eb3..c3e1a3f14d30 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -38,7 +38,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
void imx_drm_connector_destroy(struct drm_connector *connector);
-void imx_drm_encoder_destroy(struct drm_encoder *encoder);
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 4da22a94790c..66ea68e8da87 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -26,6 +26,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -393,10 +394,6 @@ static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs =
.best_encoder = imx_ldb_connector_best_encoder,
};
-static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
.atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
.enable = imx_ldb_encoder_enable,
@@ -441,8 +438,7 @@ static int imx_ldb_register(struct drm_device *drm,
}
drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
if (imx_ldb_ch->bridge) {
ret = drm_bridge_attach(&imx_ldb_ch->encoder,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 5bbfaa2cd0f4..ee63782c77e9 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -348,10 +349,6 @@ static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs =
.mode_valid = imx_tve_connector_mode_valid,
};
-static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
.mode_set = imx_tve_encoder_mode_set,
.enable = imx_tve_encoder_enable,
@@ -479,8 +476,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
return ret;
drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
- drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
- encoder_type, NULL);
+ drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
drm_connector_helper_add(&tve->connector,
&imx_tve_connector_helper_funcs);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 08fafa4bf8c2..ac916c84a631 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -18,6 +18,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -256,10 +257,6 @@ static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
.best_encoder = imx_pd_connector_best_encoder,
};
-static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
.enable = imx_pd_bridge_enable,
.disable = imx_pd_bridge_disable,
@@ -288,8 +285,7 @@ static int imx_pd_register(struct drm_device *drm,
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
imxpd->bridge.funcs = &imx_pd_bridge_funcs;
drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 1754c0547069..55b49a31729b 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -23,11 +23,13 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#define JZ_REG_LCD_CFG 0x00
@@ -328,8 +330,8 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
if (!drm_atomic_crtc_needs_modeset(state))
return 0;
- if (state->mode.hdisplay > priv->soc_info->max_height ||
- state->mode.vdisplay > priv->soc_info->max_width)
+ if (state->mode.hdisplay > priv->soc_info->max_width ||
+ state->mode.vdisplay > priv->soc_info->max_height)
return -EINVAL;
rate = clk_round_rate(priv->pix_clk,
@@ -474,7 +476,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
{
- struct ingenic_drm *priv = arg;
+ struct ingenic_drm *priv = drm_device_get_priv(arg);
unsigned int state;
regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
@@ -488,15 +490,6 @@ static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void ingenic_drm_release(struct drm_device *drm)
-{
- struct ingenic_drm *priv = drm_device_get_priv(drm);
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(priv);
-}
-
static int ingenic_drm_enable_vblank(struct drm_crtc *crtc)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
@@ -540,7 +533,6 @@ static struct drm_driver ingenic_drm_driver_data = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.irq_handler = ingenic_drm_irq_handler,
- .release = ingenic_drm_release,
};
static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
@@ -592,10 +584,6 @@ static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static const struct drm_encoder_funcs ingenic_drm_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void ingenic_drm_free_dma_hwdesc(void *d)
{
struct ingenic_drm *priv = d;
@@ -623,24 +611,21 @@ static int ingenic_drm_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data,
+ struct ingenic_drm, drm);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->soc_info = soc_info;
priv->dev = dev;
drm = &priv->drm;
- drm->dev_private = priv;
platform_set_drvdata(pdev, priv);
- ret = devm_drm_dev_init(dev, drm, &ingenic_drm_driver_data);
- if (ret) {
- kfree(priv);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
return ret;
- }
- drm_mode_config_init(drm);
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = soc_info->max_width;
@@ -661,10 +646,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Failed to get platform irq");
+ if (irq < 0)
return irq;
- }
if (soc_info->needs_dev_clk) {
priv->lcd_clk = devm_clk_get(dev, "lcd");
@@ -730,8 +713,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
drm_encoder_helper_add(&priv->encoder,
&ingenic_drm_encoder_helper_funcs);
- ret = drm_encoder_init(drm, &priv->encoder, &ingenic_drm_encoder_funcs,
- DRM_MODE_ENCODER_DPI, NULL);
+ ret = drm_simple_encoder_init(drm, &priv->encoder,
+ DRM_MODE_ENCODER_DPI);
if (ret) {
dev_err(dev, "Failed to init encoder: %i", ret);
return ret;
@@ -791,9 +774,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
goto err_devclk_disable;
}
- ret = drm_fbdev_generic_setup(drm, 32);
- if (ret)
- dev_warn(dev, "Unable to start fbdev emulation: %i", ret);
+ drm_fbdev_generic_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index d589f09d04d9..fa1d4f5df31e 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -10,5 +10,7 @@ config DRM_LIMA
depends on OF
select DRM_SCHED
select DRM_GEM_SHMEM_HELPER
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
help
DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
index a85444b0a1d4..ca2097b8e1ad 100644
--- a/drivers/gpu/drm/lima/Makefile
+++ b/drivers/gpu/drm/lima/Makefile
@@ -14,6 +14,8 @@ lima-y := \
lima_sched.o \
lima_ctx.o \
lima_dlbu.o \
- lima_bcast.o
+ lima_bcast.o \
+ lima_trace.o \
+ lima_devfreq.o
obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
index 288398027bfa..fbc43f243c54 100644
--- a/drivers/gpu/drm/lima/lima_bcast.c
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -26,18 +26,33 @@ void lima_bcast_enable(struct lima_device *dev, int num_pp)
bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
}
+static int lima_bcast_hw_init(struct lima_ip *ip)
+{
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, ip->data.mask << 16);
+ bcast_write(LIMA_BCAST_INTERRUPT_MASK, ip->data.mask);
+ return 0;
+}
+
+int lima_bcast_resume(struct lima_ip *ip)
+{
+ return lima_bcast_hw_init(ip);
+}
+
+void lima_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_bcast_init(struct lima_ip *ip)
{
- int i, mask = 0;
+ int i;
for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
if (ip->dev->ip[i].present)
- mask |= 1 << (i - lima_ip_pp0);
+ ip->data.mask |= 1 << (i - lima_ip_pp0);
}
- bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
- bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
- return 0;
+ return lima_bcast_hw_init(ip);
}
void lima_bcast_fini(struct lima_ip *ip)
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
index c47e58563d0a..465ee587bceb 100644
--- a/drivers/gpu/drm/lima/lima_bcast.h
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -6,6 +6,8 @@
struct lima_ip;
+int lima_bcast_resume(struct lima_ip *ip);
+void lima_bcast_suspend(struct lima_ip *ip);
int lima_bcast_init(struct lima_ip *ip);
void lima_bcast_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
index 22fff6caa961..891d5cd5019a 100644
--- a/drivers/gpu/drm/lima/lima_ctx.c
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -27,6 +27,9 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
if (err < 0)
goto err_out0;
+ ctx->pid = task_pid_nr(current);
+ get_task_comm(ctx->pname, current);
+
return 0;
err_out0:
diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h
index 6154e5c9bfe4..74e2be09090f 100644
--- a/drivers/gpu/drm/lima/lima_ctx.h
+++ b/drivers/gpu/drm/lima/lima_ctx.h
@@ -5,6 +5,7 @@
#define __LIMA_CTX_H__
#include <linux/xarray.h>
+#include <linux/sched.h>
#include "lima_device.h"
@@ -13,6 +14,10 @@ struct lima_ctx {
struct lima_device *dev;
struct lima_sched_context context[lima_pipe_num];
atomic_t guilty;
+
+ /* debug info */
+ char pname[TASK_COMM_LEN];
+ pid_t pid;
};
struct lima_ctx_mgr {
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
new file mode 100644
index 000000000000..bbe02817721b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * Based on panfrost_devfreq.c:
+ * Copyright 2019 Collabora ltd.
+ */
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/property.h>
+
+#include "lima_device.h"
+#include "lima_devfreq.h"
+
+static void lima_devfreq_update_utilization(struct lima_devfreq *devfreq)
+{
+ ktime_t now, last;
+
+ now = ktime_get();
+ last = devfreq->time_last_update;
+
+ if (devfreq->busy_count > 0)
+ devfreq->busy_time += ktime_sub(now, last);
+ else
+ devfreq->idle_time += ktime_sub(now, last);
+
+ devfreq->time_last_update = now;
+}
+
+static int lima_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct dev_pm_opp *opp;
+ int err;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ dev_pm_opp_put(opp);
+
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void lima_devfreq_reset(struct lima_devfreq *devfreq)
+{
+ devfreq->busy_time = 0;
+ devfreq->idle_time = 0;
+ devfreq->time_last_update = ktime_get();
+}
+
+static int lima_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ struct lima_devfreq *devfreq = &ldev->devfreq;
+ unsigned long irqflags;
+
+ status->current_frequency = clk_get_rate(ldev->clk_gpu);
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_update_utilization(devfreq);
+
+ status->total_time = ktime_to_ns(ktime_add(devfreq->busy_time,
+ devfreq->idle_time));
+ status->busy_time = ktime_to_ns(devfreq->busy_time);
+
+ lima_devfreq_reset(devfreq);
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+ dev_dbg(ldev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ status->busy_time, status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile lima_devfreq_profile = {
+ .polling_ms = 50, /* ~3 frames */
+ .target = lima_devfreq_target,
+ .get_dev_status = lima_devfreq_get_dev_status,
+};
+
+void lima_devfreq_fini(struct lima_device *ldev)
+{
+ struct lima_devfreq *devfreq = &ldev->devfreq;
+
+ if (devfreq->cooling) {
+ devfreq_cooling_unregister(devfreq->cooling);
+ devfreq->cooling = NULL;
+ }
+
+ if (devfreq->devfreq) {
+ devm_devfreq_remove_device(ldev->dev, devfreq->devfreq);
+ devfreq->devfreq = NULL;
+ }
+
+ if (devfreq->opp_of_table_added) {
+ dev_pm_opp_of_remove_table(ldev->dev);
+ devfreq->opp_of_table_added = false;
+ }
+
+ if (devfreq->regulators_opp_table) {
+ dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
+ devfreq->regulators_opp_table = NULL;
+ }
+
+ if (devfreq->clkname_opp_table) {
+ dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
+ devfreq->clkname_opp_table = NULL;
+ }
+}
+
+int lima_devfreq_init(struct lima_device *ldev)
+{
+ struct thermal_cooling_device *cooling;
+ struct device *dev = ldev->dev;
+ struct opp_table *opp_table;
+ struct devfreq *devfreq;
+ struct lima_devfreq *ldevfreq = &ldev->devfreq;
+ struct dev_pm_opp *opp;
+ unsigned long cur_freq;
+ int ret;
+
+ if (!device_property_present(dev, "operating-points-v2"))
+ /* Optional, continue without devfreq */
+ return 0;
+
+ spin_lock_init(&ldevfreq->lock);
+
+ opp_table = dev_pm_opp_set_clkname(dev, "core");
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ goto err_fini;
+ }
+
+ ldevfreq->clkname_opp_table = opp_table;
+
+ opp_table = dev_pm_opp_set_regulators(dev,
+ (const char *[]){ "mali" },
+ 1);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+
+ /* Continue if the optional regulator is missing */
+ if (ret != -ENODEV)
+ goto err_fini;
+ } else {
+ ldevfreq->regulators_opp_table = opp_table;
+ }
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret)
+ goto err_fini;
+ ldevfreq->opp_of_table_added = true;
+
+ lima_devfreq_reset(ldevfreq);
+
+ cur_freq = clk_get_rate(ldev->clk_gpu);
+
+ opp = devfreq_recommended_opp(dev, &cur_freq, 0);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto err_fini;
+ }
+
+ lima_devfreq_profile.initial_freq = cur_freq;
+ dev_pm_opp_put(opp);
+
+ devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+ if (IS_ERR(devfreq)) {
+ dev_err(dev, "Couldn't initialize GPU devfreq\n");
+ ret = PTR_ERR(devfreq);
+ goto err_fini;
+ }
+
+ ldevfreq->devfreq = devfreq;
+
+ cooling = of_devfreq_cooling_register(dev->of_node, devfreq);
+ if (IS_ERR(cooling))
+ dev_info(dev, "Failed to register cooling device\n");
+ else
+ ldevfreq->cooling = cooling;
+
+ return 0;
+
+err_fini:
+ lima_devfreq_fini(ldev);
+ return ret;
+}
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq)
+{
+ unsigned long irqflags;
+
+ if (!devfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_update_utilization(devfreq);
+
+ devfreq->busy_count++;
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq)
+{
+ unsigned long irqflags;
+
+ if (!devfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_update_utilization(devfreq);
+
+ WARN_ON(--devfreq->busy_count < 0);
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq)
+{
+ unsigned long irqflags;
+
+ if (!devfreq->devfreq)
+ return 0;
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_reset(devfreq);
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+ return devfreq_resume_device(devfreq->devfreq);
+}
+
+int lima_devfreq_suspend(struct lima_devfreq *devfreq)
+{
+ if (!devfreq->devfreq)
+ return 0;
+
+ return devfreq_suspend_device(devfreq->devfreq);
+}
diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h
new file mode 100644
index 000000000000..5eed2975a375
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_devfreq.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com> */
+
+#ifndef __LIMA_DEVFREQ_H__
+#define __LIMA_DEVFREQ_H__
+
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+
+struct devfreq;
+struct opp_table;
+struct thermal_cooling_device;
+
+struct lima_device;
+
+struct lima_devfreq {
+ struct devfreq *devfreq;
+ struct opp_table *clkname_opp_table;
+ struct opp_table *regulators_opp_table;
+ struct thermal_cooling_device *cooling;
+ bool opp_of_table_added;
+
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ int busy_count;
+ /*
+ * Protect busy_time, idle_time, time_last_update and busy_count
+ * because these can be updated concurrently, for example by the GP
+ * and PP interrupts.
+ */
+ spinlock_t lock;
+};
+
+int lima_devfreq_init(struct lima_device *ldev);
+void lima_devfreq_fini(struct lima_device *ldev);
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq);
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq);
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq);
+int lima_devfreq_suspend(struct lima_devfreq *devfreq);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index 19829b543024..65fdca366e41 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -25,6 +25,8 @@ struct lima_ip_desc {
int (*init)(struct lima_ip *ip);
void (*fini)(struct lima_ip *ip);
+ int (*resume)(struct lima_ip *ip);
+ void (*suspend)(struct lima_ip *ip);
};
#define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
@@ -41,6 +43,8 @@ struct lima_ip_desc {
}, \
.init = lima_##func##_init, \
.fini = lima_##func##_fini, \
+ .resume = lima_##func##_resume, \
+ .suspend = lima_##func##_suspend, \
}
static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
@@ -77,26 +81,10 @@ const char *lima_ip_name(struct lima_ip *ip)
return lima_ip_desc[ip->id].name;
}
-static int lima_clk_init(struct lima_device *dev)
+static int lima_clk_enable(struct lima_device *dev)
{
int err;
- dev->clk_bus = devm_clk_get(dev->dev, "bus");
- if (IS_ERR(dev->clk_bus)) {
- err = PTR_ERR(dev->clk_bus);
- if (err != -EPROBE_DEFER)
- dev_err(dev->dev, "get bus clk failed %d\n", err);
- return err;
- }
-
- dev->clk_gpu = devm_clk_get(dev->dev, "core");
- if (IS_ERR(dev->clk_gpu)) {
- err = PTR_ERR(dev->clk_gpu);
- if (err != -EPROBE_DEFER)
- dev_err(dev->dev, "get core clk failed %d\n", err);
- return err;
- }
-
err = clk_prepare_enable(dev->clk_bus);
if (err)
return err;
@@ -105,15 +93,7 @@ static int lima_clk_init(struct lima_device *dev)
if (err)
goto error_out0;
- dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
-
- if (IS_ERR(dev->reset)) {
- err = PTR_ERR(dev->reset);
- if (err != -EPROBE_DEFER)
- dev_err(dev->dev, "get reset controller failed %d\n",
- err);
- goto error_out1;
- } else if (dev->reset != NULL) {
+ if (dev->reset) {
err = reset_control_deassert(dev->reset);
if (err) {
dev_err(dev->dev,
@@ -131,14 +111,76 @@ error_out0:
return err;
}
-static void lima_clk_fini(struct lima_device *dev)
+static void lima_clk_disable(struct lima_device *dev)
{
- if (dev->reset != NULL)
+ if (dev->reset)
reset_control_assert(dev->reset);
clk_disable_unprepare(dev->clk_gpu);
clk_disable_unprepare(dev->clk_bus);
}
+static int lima_clk_init(struct lima_device *dev)
+{
+ int err;
+
+ dev->clk_bus = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->clk_bus)) {
+ err = PTR_ERR(dev->clk_bus);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get bus clk failed %d\n", err);
+ dev->clk_bus = NULL;
+ return err;
+ }
+
+ dev->clk_gpu = devm_clk_get(dev->dev, "core");
+ if (IS_ERR(dev->clk_gpu)) {
+ err = PTR_ERR(dev->clk_gpu);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get core clk failed %d\n", err);
+ dev->clk_gpu = NULL;
+ return err;
+ }
+
+ dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
+ if (IS_ERR(dev->reset)) {
+ err = PTR_ERR(dev->reset);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get reset controller failed %d\n",
+ err);
+ dev->reset = NULL;
+ return err;
+ }
+
+ return lima_clk_enable(dev);
+}
+
+static void lima_clk_fini(struct lima_device *dev)
+{
+ lima_clk_disable(dev);
+}
+
+static int lima_regulator_enable(struct lima_device *dev)
+{
+ int ret;
+
+ if (!dev->regulator)
+ return 0;
+
+ ret = regulator_enable(dev->regulator);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lima_regulator_disable(struct lima_device *dev)
+{
+ if (dev->regulator)
+ regulator_disable(dev->regulator);
+}
+
static int lima_regulator_init(struct lima_device *dev)
{
int ret;
@@ -154,25 +196,20 @@ static int lima_regulator_init(struct lima_device *dev)
return ret;
}
- ret = regulator_enable(dev->regulator);
- if (ret < 0) {
- dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return lima_regulator_enable(dev);
}
static void lima_regulator_fini(struct lima_device *dev)
{
- if (dev->regulator)
- regulator_disable(dev->regulator);
+ lima_regulator_disable(dev);
}
static int lima_init_ip(struct lima_device *dev, int index)
{
+ struct platform_device *pdev = to_platform_device(dev->dev);
struct lima_ip_desc *desc = lima_ip_desc + index;
struct lima_ip *ip = dev->ip + index;
+ const char *irq_name = desc->irq_name;
int offset = desc->offset[dev->id];
bool must = desc->must_have[dev->id];
int err;
@@ -183,8 +220,9 @@ static int lima_init_ip(struct lima_device *dev, int index)
ip->dev = dev;
ip->id = index;
ip->iomem = dev->iomem + offset;
- if (desc->irq_name) {
- err = platform_get_irq_byname(dev->pdev, desc->irq_name);
+ if (irq_name) {
+ err = must ? platform_get_irq_byname(pdev, irq_name) :
+ platform_get_irq_byname_optional(pdev, irq_name);
if (err < 0)
goto out;
ip->irq = err;
@@ -209,11 +247,34 @@ static void lima_fini_ip(struct lima_device *ldev, int index)
desc->fini(ip);
}
+static int lima_resume_ip(struct lima_device *ldev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = ldev->ip + index;
+ int ret = 0;
+
+ if (ip->present)
+ ret = desc->resume(ip);
+
+ return ret;
+}
+
+static void lima_suspend_ip(struct lima_device *ldev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = ldev->ip + index;
+
+ if (ip->present)
+ desc->suspend(ip);
+}
+
static int lima_init_gp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
int err;
+ pipe->ldev = dev;
+
err = lima_sched_pipe_init(pipe, "gp");
if (err)
return err;
@@ -244,6 +305,8 @@ static int lima_init_pp_pipe(struct lima_device *dev)
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
int err, i;
+ pipe->ldev = dev;
+
err = lima_sched_pipe_init(pipe, "pp");
if (err)
return err;
@@ -290,8 +353,8 @@ static void lima_fini_pp_pipe(struct lima_device *dev)
int lima_device_init(struct lima_device *ldev)
{
+ struct platform_device *pdev = to_platform_device(ldev->dev);
int err, i;
- struct resource *res;
dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
@@ -322,8 +385,7 @@ int lima_device_init(struct lima_device *ldev)
} else
ldev->va_end = LIMA_VA_RESERVE_END;
- res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
- ldev->iomem = devm_ioremap_resource(ldev->dev, res);
+ ldev->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ldev->iomem)) {
dev_err(ldev->dev, "fail to ioremap iomem\n");
err = PTR_ERR(ldev->iomem);
@@ -344,6 +406,12 @@ int lima_device_init(struct lima_device *ldev)
if (err)
goto err_out5;
+ ldev->dump.magic = LIMA_DUMP_MAGIC;
+ ldev->dump.version_major = LIMA_DUMP_MAJOR;
+ ldev->dump.version_minor = LIMA_DUMP_MINOR;
+ INIT_LIST_HEAD(&ldev->error_task_list);
+ mutex_init(&ldev->error_task_list_lock);
+
dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus));
dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu));
@@ -370,6 +438,13 @@ err_out0:
void lima_device_fini(struct lima_device *ldev)
{
int i;
+ struct lima_sched_error_task *et, *tmp;
+
+ list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+ list_del(&et->list);
+ kvfree(et);
+ }
+ mutex_destroy(&ldev->error_task_list_lock);
lima_fini_pp_pipe(ldev);
lima_fini_gp_pipe(ldev);
@@ -387,3 +462,72 @@ void lima_device_fini(struct lima_device *ldev)
lima_clk_fini(ldev);
}
+
+int lima_device_resume(struct device *dev)
+{
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ int i, err;
+
+ err = lima_clk_enable(ldev);
+ if (err) {
+ dev_err(dev, "resume clk fail %d\n", err);
+ return err;
+ }
+
+ err = lima_regulator_enable(ldev);
+ if (err) {
+ dev_err(dev, "resume regulator fail %d\n", err);
+ goto err_out0;
+ }
+
+ for (i = 0; i < lima_ip_num; i++) {
+ err = lima_resume_ip(ldev, i);
+ if (err) {
+ dev_err(dev, "resume ip %d fail\n", i);
+ goto err_out1;
+ }
+ }
+
+ err = lima_devfreq_resume(&ldev->devfreq);
+ if (err) {
+ dev_err(dev, "devfreq resume fail\n");
+ goto err_out1;
+ }
+
+ return 0;
+
+err_out1:
+ while (--i >= 0)
+ lima_suspend_ip(ldev, i);
+ lima_regulator_disable(ldev);
+err_out0:
+ lima_clk_disable(ldev);
+ return err;
+}
+
+int lima_device_suspend(struct device *dev)
+{
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ int i, err;
+
+ /* check any task running */
+ for (i = 0; i < lima_pipe_num; i++) {
+ if (atomic_read(&ldev->pipe[i].base.hw_rq_count))
+ return -EBUSY;
+ }
+
+ err = lima_devfreq_suspend(&ldev->devfreq);
+ if (err) {
+ dev_err(dev, "devfreq suspend fail\n");
+ return err;
+ }
+
+ for (i = lima_ip_num - 1; i >= 0; i--)
+ lima_suspend_ip(ldev, i);
+
+ lima_regulator_disable(ldev);
+
+ lima_clk_disable(ldev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h
index 31158d86271c..41b9d7b4bcc7 100644
--- a/drivers/gpu/drm/lima/lima_device.h
+++ b/drivers/gpu/drm/lima/lima_device.h
@@ -6,8 +6,12 @@
#include <drm/drm_device.h>
#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include "lima_sched.h"
+#include "lima_dump.h"
+#include "lima_devfreq.h"
enum lima_gpu_id {
lima_gpu_mali400 = 0,
@@ -60,6 +64,8 @@ struct lima_ip {
bool async_reset;
/* l2 cache */
spinlock_t lock;
+ /* pmu/bcast */
+ u32 mask;
} data;
};
@@ -72,7 +78,6 @@ enum lima_pipe_id {
struct lima_device {
struct device *dev;
struct drm_device *ddev;
- struct platform_device *pdev;
enum lima_gpu_id id;
u32 gp_version;
@@ -94,6 +99,13 @@ struct lima_device {
u32 *dlbu_cpu;
dma_addr_t dlbu_dma;
+
+ struct lima_devfreq devfreq;
+
+ /* debug info */
+ struct lima_dump_head dump;
+ struct list_head error_task_list;
+ struct mutex error_task_list_lock;
};
static inline struct lima_device *
@@ -128,4 +140,7 @@ static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
return 0;
}
+int lima_device_suspend(struct device *dev);
+int lima_device_resume(struct device *dev);
+
#endif
diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
index 8399ceffb94b..c1d5ea35daa7 100644
--- a/drivers/gpu/drm/lima/lima_dlbu.c
+++ b/drivers/gpu/drm/lima/lima_dlbu.c
@@ -42,7 +42,7 @@ void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
}
-int lima_dlbu_init(struct lima_ip *ip)
+static int lima_dlbu_hw_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
@@ -52,6 +52,21 @@ int lima_dlbu_init(struct lima_ip *ip)
return 0;
}
+int lima_dlbu_resume(struct lima_ip *ip)
+{
+ return lima_dlbu_hw_init(ip);
+}
+
+void lima_dlbu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_dlbu_init(struct lima_ip *ip)
+{
+ return lima_dlbu_hw_init(ip);
+}
+
void lima_dlbu_fini(struct lima_ip *ip)
{
diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
index 16f877984466..be71daaaee89 100644
--- a/drivers/gpu/drm/lima/lima_dlbu.h
+++ b/drivers/gpu/drm/lima/lima_dlbu.h
@@ -12,6 +12,8 @@ void lima_dlbu_disable(struct lima_device *dev);
void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
+int lima_dlbu_resume(struct lima_ip *ip);
+void lima_dlbu_suspend(struct lima_ip *ip);
int lima_dlbu_init(struct lima_ip *ip);
void lima_dlbu_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 2daac64d8955..a831565af813 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -5,17 +5,20 @@
#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
#include <drm/lima_drm.h>
+#include "lima_device.h"
#include "lima_drv.h"
#include "lima_gem.h"
#include "lima_vm.h"
int lima_sched_timeout_ms;
uint lima_heap_init_nr_pages = 8;
+uint lima_max_error_tasks;
MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
@@ -23,6 +26,9 @@ module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
+MODULE_PARM_DESC(max_error_tasks, "max number of error tasks to save");
+module_param_named(max_error_tasks, lima_max_error_tasks, uint, 0644);
+
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_get_param *args = data;
@@ -272,6 +278,93 @@ static struct drm_driver lima_drm_driver = {
.gem_prime_mmap = drm_gem_prime_mmap,
};
+struct lima_block_reader {
+ void *dst;
+ size_t base;
+ size_t count;
+ size_t off;
+ ssize_t read;
+};
+
+static bool lima_read_block(struct lima_block_reader *reader,
+ void *src, size_t src_size)
+{
+ size_t max_off = reader->base + src_size;
+
+ if (reader->off < max_off) {
+ size_t size = min_t(size_t, max_off - reader->off,
+ reader->count);
+
+ memcpy(reader->dst, src + (reader->off - reader->base), size);
+
+ reader->dst += size;
+ reader->off += size;
+ reader->read += size;
+ reader->count -= size;
+ }
+
+ reader->base = max_off;
+
+ return !!reader->count;
+}
+
+static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ struct lima_sched_error_task *et;
+ struct lima_block_reader reader = {
+ .dst = buf,
+ .count = count,
+ .off = off,
+ };
+
+ mutex_lock(&ldev->error_task_list_lock);
+
+ if (lima_read_block(&reader, &ldev->dump, sizeof(ldev->dump))) {
+ list_for_each_entry(et, &ldev->error_task_list, list) {
+ if (!lima_read_block(&reader, et->data, et->size))
+ break;
+ }
+ }
+
+ mutex_unlock(&ldev->error_task_list_lock);
+ return reader.read;
+}
+
+static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ struct lima_sched_error_task *et, *tmp;
+
+ mutex_lock(&ldev->error_task_list_lock);
+
+ list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+ list_del(&et->list);
+ kvfree(et);
+ }
+
+ ldev->dump.size = 0;
+ ldev->dump.num_tasks = 0;
+
+ mutex_unlock(&ldev->error_task_list_lock);
+
+ return count;
+}
+
+static const struct bin_attribute lima_error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = 0600,
+ .size = 0,
+ .read = lima_error_state_read,
+ .write = lima_error_state_write,
+};
+
static int lima_pdev_probe(struct platform_device *pdev)
{
struct lima_device *ldev;
@@ -288,7 +381,6 @@ static int lima_pdev_probe(struct platform_device *pdev)
goto err_out0;
}
- ldev->pdev = pdev;
ldev->dev = &pdev->dev;
ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
@@ -306,16 +398,34 @@ static int lima_pdev_probe(struct platform_device *pdev)
if (err)
goto err_out1;
+ err = lima_devfreq_init(ldev);
+ if (err) {
+ dev_err(&pdev->dev, "Fatal error during devfreq init\n");
+ goto err_out2;
+ }
+
+ pm_runtime_set_active(ldev->dev);
+ pm_runtime_mark_last_busy(ldev->dev);
+ pm_runtime_set_autosuspend_delay(ldev->dev, 200);
+ pm_runtime_use_autosuspend(ldev->dev);
+ pm_runtime_enable(ldev->dev);
+
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
err = drm_dev_register(ddev, 0);
if (err < 0)
- goto err_out2;
+ goto err_out3;
+
+ if (sysfs_create_bin_file(&ldev->dev->kobj, &lima_error_state_attr))
+ dev_warn(ldev->dev, "fail to create error state sysfs\n");
return 0;
+err_out3:
+ pm_runtime_disable(ldev->dev);
+ lima_devfreq_fini(ldev);
err_out2:
lima_device_fini(ldev);
err_out1:
@@ -330,8 +440,17 @@ static int lima_pdev_remove(struct platform_device *pdev)
struct lima_device *ldev = platform_get_drvdata(pdev);
struct drm_device *ddev = ldev->ddev;
+ sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr);
+
drm_dev_unregister(ddev);
+
+ /* stop autosuspend to make sure device is in active state */
+ pm_runtime_set_autosuspend_delay(ldev->dev, -1);
+ pm_runtime_disable(ldev->dev);
+
+ lima_devfreq_fini(ldev);
lima_device_fini(ldev);
+
drm_dev_put(ddev);
lima_sched_slab_fini();
return 0;
@@ -344,26 +463,22 @@ static const struct of_device_id dt_match[] = {
};
MODULE_DEVICE_TABLE(of, dt_match);
+static const struct dev_pm_ops lima_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(lima_device_suspend, lima_device_resume, NULL)
+};
+
static struct platform_driver lima_platform_driver = {
.probe = lima_pdev_probe,
.remove = lima_pdev_remove,
.driver = {
.name = "lima",
+ .pm = &lima_pm_ops,
.of_match_table = dt_match,
},
};
-static int __init lima_init(void)
-{
- return platform_driver_register(&lima_platform_driver);
-}
-module_init(lima_init);
-
-static void __exit lima_exit(void)
-{
- platform_driver_unregister(&lima_platform_driver);
-}
-module_exit(lima_exit);
+module_platform_driver(lima_platform_driver);
MODULE_AUTHOR("Lima Project Developers");
MODULE_DESCRIPTION("Lima DRM Driver");
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index f492ecc6a5d9..fdbd4077c768 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -10,6 +10,7 @@
extern int lima_sched_timeout_ms;
extern uint lima_heap_init_nr_pages;
+extern uint lima_max_error_tasks;
struct lima_vm;
struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_dump.h b/drivers/gpu/drm/lima/lima_dump.h
new file mode 100644
index 000000000000..ca243d99c51b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dump.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DUMP_H__
+#define __LIMA_DUMP_H__
+
+#include <linux/types.h>
+
+/**
+ * dump file format for all the information to start a lima task
+ *
+ * top level format
+ * | magic code "LIMA" | format version | num tasks | data size |
+ * | reserved | reserved | reserved | reserved |
+ * | task 1 ID | task 1 size | num chunks | reserved | task 1 data |
+ * | task 2 ID | task 2 size | num chunks | reserved | task 2 data |
+ * ...
+ *
+ * task data format
+ * | chunk 1 ID | chunk 1 size | reserved | reserved | chunk 1 data |
+ * | chunk 2 ID | chunk 2 size | reserved | reserved | chunk 2 data |
+ * ...
+ *
+ */
+
+#define LIMA_DUMP_MAJOR 1
+#define LIMA_DUMP_MINOR 0
+
+#define LIMA_DUMP_MAGIC 0x414d494c
+
+struct lima_dump_head {
+ __u32 magic;
+ __u16 version_major;
+ __u16 version_minor;
+ __u32 num_tasks;
+ __u32 size;
+ __u32 reserved[4];
+};
+
+#define LIMA_DUMP_TASK_GP 0
+#define LIMA_DUMP_TASK_PP 1
+#define LIMA_DUMP_TASK_NUM 2
+
+struct lima_dump_task {
+ __u32 id;
+ __u32 size;
+ __u32 num_chunks;
+ __u32 reserved;
+};
+
+#define LIMA_DUMP_CHUNK_FRAME 0
+#define LIMA_DUMP_CHUNK_BUFFER 1
+#define LIMA_DUMP_CHUNK_PROCESS_NAME 2
+#define LIMA_DUMP_CHUNK_PROCESS_ID 3
+#define LIMA_DUMP_CHUNK_NUM 4
+
+struct lima_dump_chunk {
+ __u32 id;
+ __u32 size;
+ __u32 reserved[2];
+};
+
+struct lima_dump_chunk_buffer {
+ __u32 id;
+ __u32 size;
+ __u32 va;
+ __u32 reserved;
+};
+
+struct lima_dump_chunk_pid {
+ __u32 id;
+ __u32 size;
+ __u32 pid;
+ __u32 reserved;
+};
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index d8841c870d90..8dd501b7a3d0 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -274,6 +274,23 @@ static void lima_gp_print_version(struct lima_ip *ip)
static struct kmem_cache *lima_gp_task_slab;
static int lima_gp_task_slab_refcnt;
+static int lima_gp_hw_init(struct lima_ip *ip)
+{
+ ip->data.async_reset = false;
+ lima_gp_soft_reset_async(ip);
+ return lima_gp_soft_reset_async_wait(ip);
+}
+
+int lima_gp_resume(struct lima_ip *ip)
+{
+ return lima_gp_hw_init(ip);
+}
+
+void lima_gp_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_gp_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
@@ -281,9 +298,7 @@ int lima_gp_init(struct lima_ip *ip)
lima_gp_print_version(ip);
- ip->data.async_reset = false;
- lima_gp_soft_reset_async(ip);
- err = lima_gp_soft_reset_async_wait(ip);
+ err = lima_gp_hw_init(ip);
if (err)
return err;
diff --git a/drivers/gpu/drm/lima/lima_gp.h b/drivers/gpu/drm/lima/lima_gp.h
index 516e5c1babbb..02ec9af78a51 100644
--- a/drivers/gpu/drm/lima/lima_gp.h
+++ b/drivers/gpu/drm/lima/lima_gp.h
@@ -7,6 +7,8 @@
struct lima_ip;
struct lima_device;
+int lima_gp_resume(struct lima_ip *ip);
+void lima_gp_suspend(struct lima_ip *ip);
int lima_gp_init(struct lima_ip *ip);
void lima_gp_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
index 6873a7af5a5c..c4080a02957b 100644
--- a/drivers/gpu/drm/lima/lima_l2_cache.c
+++ b/drivers/gpu/drm/lima/lima_l2_cache.c
@@ -38,9 +38,35 @@ int lima_l2_cache_flush(struct lima_ip *ip)
return ret;
}
+static int lima_l2_cache_hw_init(struct lima_ip *ip)
+{
+ int err;
+
+ err = lima_l2_cache_flush(ip);
+ if (err)
+ return err;
+
+ l2_cache_write(LIMA_L2_CACHE_ENABLE,
+ LIMA_L2_CACHE_ENABLE_ACCESS |
+ LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
+ l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
+
+ return 0;
+}
+
+int lima_l2_cache_resume(struct lima_ip *ip)
+{
+ return lima_l2_cache_hw_init(ip);
+}
+
+void lima_l2_cache_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_l2_cache_init(struct lima_ip *ip)
{
- int i, err;
+ int i;
u32 size;
struct lima_device *dev = ip->dev;
@@ -63,15 +89,7 @@ int lima_l2_cache_init(struct lima_ip *ip)
1 << (size & 0xff),
1 << ((size >> 24) & 0xff));
- err = lima_l2_cache_flush(ip);
- if (err)
- return err;
-
- l2_cache_write(LIMA_L2_CACHE_ENABLE,
- LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
- l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
-
- return 0;
+ return lima_l2_cache_hw_init(ip);
}
void lima_l2_cache_fini(struct lima_ip *ip)
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
index c63fb676ff14..1aeeefd53fb9 100644
--- a/drivers/gpu/drm/lima/lima_l2_cache.h
+++ b/drivers/gpu/drm/lima/lima_l2_cache.h
@@ -6,6 +6,8 @@
struct lima_ip;
+int lima_l2_cache_resume(struct lima_ip *ip);
+void lima_l2_cache_suspend(struct lima_ip *ip);
int lima_l2_cache_init(struct lima_ip *ip);
void lima_l2_cache_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index f79d2af427e7..a1ae6c252dc2 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -59,12 +59,44 @@ static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-int lima_mmu_init(struct lima_ip *ip)
+static int lima_mmu_hw_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
int err;
u32 v;
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
+ err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+ LIMA_MMU_DTE_ADDR, v, v == 0);
+ if (err)
+ return err;
+
+ mmu_write(LIMA_MMU_INT_MASK,
+ LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+ mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+ return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_PAGING_ENABLED);
+}
+
+int lima_mmu_resume(struct lima_ip *ip)
+{
+ if (ip->id == lima_ip_ppmmu_bcast)
+ return 0;
+
+ return lima_mmu_hw_init(ip);
+}
+
+void lima_mmu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_mmu_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
if (ip->id == lima_ip_ppmmu_bcast)
return 0;
@@ -74,12 +106,6 @@ int lima_mmu_init(struct lima_ip *ip)
return -EIO;
}
- mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
- err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
- LIMA_MMU_DTE_ADDR, v, v == 0);
- if (err)
- return err;
-
err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
@@ -87,11 +113,7 @@ int lima_mmu_init(struct lima_ip *ip)
return err;
}
- mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
- mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
- return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
- LIMA_MMU_STATUS, v,
- v & LIMA_MMU_STATUS_PAGING_ENABLED);
+ return lima_mmu_hw_init(ip);
}
void lima_mmu_fini(struct lima_ip *ip)
@@ -113,8 +135,7 @@ void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
LIMA_MMU_STATUS, v,
v & LIMA_MMU_STATUS_STALL_ACTIVE);
- if (vm)
- mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
+ mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
/* flush the TLB */
mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
index 4f8ccbebcba1..f0c97ac75ea0 100644
--- a/drivers/gpu/drm/lima/lima_mmu.h
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -7,6 +7,8 @@
struct lima_ip;
struct lima_vm;
+int lima_mmu_resume(struct lima_ip *ip);
+void lima_mmu_suspend(struct lima_ip *ip);
int lima_mmu_init(struct lima_ip *ip);
void lima_mmu_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c
index 571f6d661581..e397e1146e96 100644
--- a/drivers/gpu/drm/lima/lima_pmu.c
+++ b/drivers/gpu/drm/lima/lima_pmu.c
@@ -21,7 +21,7 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
v, v & LIMA_PMU_INT_CMD_MASK,
100, 100000);
if (err) {
- dev_err(dev->dev, "timeout wait pmd cmd\n");
+ dev_err(dev->dev, "timeout wait pmu cmd\n");
return err;
}
@@ -29,7 +29,41 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
return 0;
}
-int lima_pmu_init(struct lima_ip *ip)
+static u32 lima_pmu_get_ip_mask(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ u32 ret = 0;
+ int i;
+
+ ret |= LIMA_PMU_POWER_GP0_MASK;
+
+ if (dev->id == lima_gpu_mali400) {
+ ret |= LIMA_PMU_POWER_L2_MASK;
+ for (i = 0; i < 4; i++) {
+ if (dev->ip[lima_ip_pp0 + i].present)
+ ret |= LIMA_PMU_POWER_PP_MASK(i);
+ }
+ } else {
+ if (dev->ip[lima_ip_pp0].present)
+ ret |= LIMA450_PMU_POWER_PP0_MASK;
+ for (i = lima_ip_pp1; i <= lima_ip_pp3; i++) {
+ if (dev->ip[i].present) {
+ ret |= LIMA450_PMU_POWER_PP13_MASK;
+ break;
+ }
+ }
+ for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
+ if (dev->ip[i].present) {
+ ret |= LIMA450_PMU_POWER_PP47_MASK;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int lima_pmu_hw_init(struct lima_ip *ip)
{
int err;
u32 stat;
@@ -54,7 +88,44 @@ int lima_pmu_init(struct lima_ip *ip)
return 0;
}
-void lima_pmu_fini(struct lima_ip *ip)
+static void lima_pmu_hw_fini(struct lima_ip *ip)
{
+ u32 stat;
+
+ if (!ip->data.mask)
+ ip->data.mask = lima_pmu_get_ip_mask(ip);
+ stat = ~pmu_read(LIMA_PMU_STATUS) & ip->data.mask;
+ if (stat) {
+ pmu_write(LIMA_PMU_POWER_DOWN, stat);
+
+ /* Don't wait for interrupt on Mali400 if all domains are
+ * powered off because the HW won't generate an interrupt
+ * in this case.
+ */
+ if (ip->dev->id == lima_gpu_mali400)
+ pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
+ else
+ lima_pmu_wait_cmd(ip);
+ }
+}
+
+int lima_pmu_resume(struct lima_ip *ip)
+{
+ return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_suspend(struct lima_ip *ip)
+{
+ lima_pmu_hw_fini(ip);
+}
+
+int lima_pmu_init(struct lima_ip *ip)
+{
+ return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_fini(struct lima_ip *ip)
+{
+ lima_pmu_hw_fini(ip);
}
diff --git a/drivers/gpu/drm/lima/lima_pmu.h b/drivers/gpu/drm/lima/lima_pmu.h
index a2a18775eb07..652dc7af3047 100644
--- a/drivers/gpu/drm/lima/lima_pmu.h
+++ b/drivers/gpu/drm/lima/lima_pmu.h
@@ -6,6 +6,8 @@
struct lima_ip;
+int lima_pmu_resume(struct lima_ip *ip);
+void lima_pmu_suspend(struct lima_ip *ip);
int lima_pmu_init(struct lima_ip *ip);
void lima_pmu_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index 8fef224b93c8..33f01383409c 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -223,6 +223,23 @@ static void lima_pp_print_version(struct lima_ip *ip)
lima_ip_name(ip), name, major, minor);
}
+static int lima_pp_hw_init(struct lima_ip *ip)
+{
+ ip->data.async_reset = false;
+ lima_pp_soft_reset_async(ip);
+ return lima_pp_soft_reset_async_wait(ip);
+}
+
+int lima_pp_resume(struct lima_ip *ip)
+{
+ return lima_pp_hw_init(ip);
+}
+
+void lima_pp_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_pp_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
@@ -230,9 +247,7 @@ int lima_pp_init(struct lima_ip *ip)
lima_pp_print_version(ip);
- ip->data.async_reset = false;
- lima_pp_soft_reset_async(ip);
- err = lima_pp_soft_reset_async_wait(ip);
+ err = lima_pp_hw_init(ip);
if (err)
return err;
@@ -254,6 +269,16 @@ void lima_pp_fini(struct lima_ip *ip)
}
+int lima_pp_bcast_resume(struct lima_ip *ip)
+{
+ return 0;
+}
+
+void lima_pp_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_pp_bcast_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
diff --git a/drivers/gpu/drm/lima/lima_pp.h b/drivers/gpu/drm/lima/lima_pp.h
index bf60c77b2633..16ec96de15a9 100644
--- a/drivers/gpu/drm/lima/lima_pp.h
+++ b/drivers/gpu/drm/lima/lima_pp.h
@@ -7,9 +7,13 @@
struct lima_ip;
struct lima_device;
+int lima_pp_resume(struct lima_ip *ip);
+void lima_pp_suspend(struct lima_ip *ip);
int lima_pp_init(struct lima_ip *ip);
void lima_pp_fini(struct lima_ip *ip);
+int lima_pp_bcast_resume(struct lima_ip *ip);
+void lima_pp_bcast_suspend(struct lima_ip *ip);
int lima_pp_bcast_init(struct lima_ip *ip);
void lima_pp_bcast_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 3886999b4533..e6cefda00279 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -3,14 +3,17 @@
#include <linux/kthread.h>
#include <linux/slab.h>
-#include <linux/xarray.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
+#include "lima_devfreq.h"
#include "lima_drv.h"
#include "lima_sched.h"
#include "lima_vm.h"
#include "lima_mmu.h"
#include "lima_l2_cache.h"
#include "lima_gem.h"
+#include "lima_trace.h"
struct lima_fence {
struct dma_fence base;
@@ -176,6 +179,7 @@ struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *conte
{
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
+ trace_lima_task_submit(task);
drm_sched_entity_push_job(&task->base, &context->base);
return fence;
}
@@ -191,14 +195,36 @@ static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
return NULL;
}
+static int lima_pm_busy(struct lima_device *ldev)
+{
+ int ret;
+
+ /* resume GPU if it has been suspended by runtime PM */
+ ret = pm_runtime_get_sync(ldev->dev);
+ if (ret < 0)
+ return ret;
+
+ lima_devfreq_record_busy(&ldev->devfreq);
+ return 0;
+}
+
+static void lima_pm_idle(struct lima_device *ldev)
+{
+ lima_devfreq_record_idle(&ldev->devfreq);
+
+ /* GPU can do auto runtime suspend */
+ pm_runtime_mark_last_busy(ldev->dev);
+ pm_runtime_put_autosuspend(ldev->dev);
+}
+
static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
{
struct lima_sched_task *task = to_lima_task(job);
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_device *ldev = pipe->ldev;
struct lima_fence *fence;
struct dma_fence *ret;
- struct lima_vm *vm = NULL, *last_vm = NULL;
- int i;
+ int i, err;
/* after GPU reset */
if (job->s_fence->finished.error < 0)
@@ -207,6 +233,13 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
fence = lima_fence_create(pipe);
if (!fence)
return NULL;
+
+ err = lima_pm_busy(ldev);
+ if (err < 0) {
+ dma_fence_put(&fence->base);
+ return NULL;
+ }
+
task->fence = &fence->base;
/* for caller usage of the fence, otherwise irq handler
@@ -234,21 +267,17 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
for (i = 0; i < pipe->num_l2_cache; i++)
lima_l2_cache_flush(pipe->l2_cache[i]);
- if (task->vm != pipe->current_vm) {
- vm = lima_vm_get(task->vm);
- last_vm = pipe->current_vm;
- pipe->current_vm = task->vm;
- }
+ lima_vm_put(pipe->current_vm);
+ pipe->current_vm = lima_vm_get(task->vm);
if (pipe->bcast_mmu)
- lima_mmu_switch_vm(pipe->bcast_mmu, vm);
+ lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
else {
for (i = 0; i < pipe->num_mmu; i++)
- lima_mmu_switch_vm(pipe->mmu[i], vm);
+ lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
}
- if (last_vm)
- lima_vm_put(last_vm);
+ trace_lima_task_run(task);
pipe->error = false;
pipe->task_run(pipe, task);
@@ -256,10 +285,139 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
return task->fence;
}
+static void lima_sched_build_error_task_list(struct lima_sched_task *task)
+{
+ struct lima_sched_error_task *et;
+ struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
+ struct lima_ip *ip = pipe->processor[0];
+ int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_context *sched_ctx =
+ container_of(task->base.entity,
+ struct lima_sched_context, base);
+ struct lima_ctx *ctx =
+ container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
+ struct lima_dump_task *dt;
+ struct lima_dump_chunk *chunk;
+ struct lima_dump_chunk_pid *pid_chunk;
+ struct lima_dump_chunk_buffer *buffer_chunk;
+ u32 size, task_size, mem_size;
+ int i;
+
+ mutex_lock(&dev->error_task_list_lock);
+
+ if (dev->dump.num_tasks >= lima_max_error_tasks) {
+ dev_info(dev->dev, "fail to save task state from %s pid %d: "
+ "error task list is full\n", ctx->pname, ctx->pid);
+ goto out;
+ }
+
+ /* frame chunk */
+ size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
+ /* process name chunk */
+ size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
+ /* pid chunk */
+ size += sizeof(struct lima_dump_chunk);
+ /* buffer chunks */
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+
+ size += sizeof(struct lima_dump_chunk);
+ size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
+ }
+
+ task_size = size + sizeof(struct lima_dump_task);
+ mem_size = task_size + sizeof(*et);
+ et = kvmalloc(mem_size, GFP_KERNEL);
+ if (!et) {
+ dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
+ mem_size);
+ goto out;
+ }
+
+ et->data = et + 1;
+ et->size = task_size;
+
+ dt = et->data;
+ memset(dt, 0, sizeof(*dt));
+ dt->id = pipe_id;
+ dt->size = size;
+
+ chunk = (struct lima_dump_chunk *)(dt + 1);
+ memset(chunk, 0, sizeof(*chunk));
+ chunk->id = LIMA_DUMP_CHUNK_FRAME;
+ chunk->size = pipe->frame_size;
+ memcpy(chunk + 1, task->frame, pipe->frame_size);
+ dt->num_chunks++;
+
+ chunk = (void *)(chunk + 1) + chunk->size;
+ memset(chunk, 0, sizeof(*chunk));
+ chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
+ chunk->size = sizeof(ctx->pname);
+ memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
+ dt->num_chunks++;
+
+ pid_chunk = (void *)(chunk + 1) + chunk->size;
+ memset(pid_chunk, 0, sizeof(*pid_chunk));
+ pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
+ pid_chunk->pid = ctx->pid;
+ dt->num_chunks++;
+
+ buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+ void *data;
+
+ memset(buffer_chunk, 0, sizeof(*buffer_chunk));
+ buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
+ buffer_chunk->va = lima_vm_get_va(task->vm, bo);
+
+ if (bo->heap_size) {
+ buffer_chunk->size = bo->heap_size;
+
+ data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (!data) {
+ kvfree(et);
+ goto out;
+ }
+
+ memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+ vunmap(data);
+ } else {
+ buffer_chunk->size = lima_bo_size(bo);
+
+ data = drm_gem_shmem_vmap(&bo->base.base);
+ if (IS_ERR_OR_NULL(data)) {
+ kvfree(et);
+ goto out;
+ }
+
+ memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+ drm_gem_shmem_vunmap(&bo->base.base, data);
+ }
+
+ buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
+ dt->num_chunks++;
+ }
+
+ list_add(&et->list, &dev->error_task_list);
+ dev->dump.size += et->size;
+ dev->dump.num_tasks++;
+
+ dev_info(dev->dev, "save error task state success\n");
+
+out:
+ mutex_unlock(&dev->error_task_list_lock);
+}
+
static void lima_sched_timedout_job(struct drm_sched_job *job)
{
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_sched_task *task = to_lima_task(job);
+ struct lima_device *ldev = pipe->ldev;
if (!pipe->error)
DRM_ERROR("lima job timeout\n");
@@ -268,6 +426,8 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
drm_sched_increase_karma(&task->base);
+ lima_sched_build_error_task_list(task);
+
pipe->task_error(pipe);
if (pipe->bcast_mmu)
@@ -279,12 +439,12 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
lima_mmu_page_fault_resume(pipe->mmu[i]);
}
- if (pipe->current_vm)
- lima_vm_put(pipe->current_vm);
-
+ lima_vm_put(pipe->current_vm);
pipe->current_vm = NULL;
pipe->current_task = NULL;
+ lima_pm_idle(ldev);
+
drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, true);
}
@@ -355,6 +515,7 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
struct lima_sched_task *task = pipe->current_task;
+ struct lima_device *ldev = pipe->ldev;
if (pipe->error) {
if (task && task->recoverable)
@@ -364,5 +525,7 @@ void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
} else {
pipe->task_fini(pipe);
dma_fence_signal(task->fence);
+
+ lima_pm_idle(ldev);
}
}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index d64393fb50a9..90f03c48ef4a 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -5,9 +5,18 @@
#define __LIMA_SCHED_H__
#include <drm/gpu_scheduler.h>
+#include <linux/list.h>
+#include <linux/xarray.h>
+struct lima_device;
struct lima_vm;
+struct lima_sched_error_task {
+ struct list_head list;
+ void *data;
+ u32 size;
+};
+
struct lima_sched_task {
struct drm_sched_job base;
@@ -44,6 +53,8 @@ struct lima_sched_pipe {
u32 fence_seqno;
spinlock_t fence_lock;
+ struct lima_device *ldev;
+
struct lima_sched_task *current_task;
struct lima_vm *current_vm;
diff --git a/drivers/gpu/drm/lima/lima_trace.c b/drivers/gpu/drm/lima/lima_trace.c
new file mode 100644
index 000000000000..ea1c7289bebc
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_trace.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#include "lima_sched.h"
+
+#define CREATE_TRACE_POINTS
+#include "lima_trace.h"
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
new file mode 100644
index 000000000000..3a430e93d384
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#if !defined(_LIMA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _LIMA_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lima
+#define TRACE_INCLUDE_FILE lima_trace
+
+DECLARE_EVENT_CLASS(lima_task,
+ TP_PROTO(struct lima_sched_task *task),
+ TP_ARGS(task),
+ TP_STRUCT__entry(
+ __field(uint64_t, task_id)
+ __field(unsigned int, context)
+ __field(unsigned int, seqno)
+ __string(pipe, task->base.sched->name)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->base.id;
+ __entry->context = task->base.s_fence->finished.context;
+ __entry->seqno = task->base.s_fence->finished.seqno;
+ __assign_str(pipe, task->base.sched->name)
+ ),
+
+ TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
+ __entry->task_id, __entry->context, __entry->seqno,
+ __get_str(pipe))
+);
+
+DEFINE_EVENT(lima_task, lima_task_submit,
+ TP_PROTO(struct lima_sched_task *task),
+ TP_ARGS(task)
+);
+
+DEFINE_EVENT(lima_task, lima_task_run,
+ TP_PROTO(struct lima_sched_task *task),
+ TP_ARGS(task)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/lima
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index 22aeec77d84d..3a7c74822d8b 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -54,7 +54,8 @@ static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
static inline void lima_vm_put(struct lima_vm *vm)
{
- kref_put(&vm->refcount, lima_vm_release);
+ if (vm)
+ kref_put(&vm->refcount, lima_vm_release);
}
void lima_vm_print(struct lima_vm *vm);
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index e59907e68854..04e1d38d41f7 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -948,7 +948,7 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
struct drm_pending_vblank_event *event;
drm_crtc_vblank_off(crtc);
@@ -1020,7 +1020,7 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
struct drm_pending_vblank_event *event = crtc->state->event;
struct drm_plane *plane = &pipe->plane;
struct drm_plane_state *pstate = plane->state;
@@ -1078,7 +1078,7 @@ static int mcde_display_enable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
u32 val;
/* Enable all VBLANK IRQs */
@@ -1097,7 +1097,7 @@ static void mcde_display_disable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
/* Disable all VBLANK IRQs */
writel(0, mcde->regs + MCDE_IMSCPP);
@@ -1117,7 +1117,7 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
int mcde_display_init(struct drm_device *drm)
{
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
int ret;
static const u32 formats[] = {
DRM_FORMAT_ARGB8888,
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
index 80edd6628979..679c2c4e6d9d 100644
--- a/drivers/gpu/drm/mcde/mcde_drm.h
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -34,6 +34,8 @@ struct mcde {
struct regulator *vana;
};
+#define to_mcde(dev) container_of(dev, struct mcde, drm)
+
bool mcde_dsi_irq(struct mipi_dsi_device *mdsi);
void mcde_dsi_te_request(struct mipi_dsi_device *mdsi);
extern struct platform_driver mcde_dsi_driver;
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index f28cb7a576ba..84f3e2dbd77b 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -72,6 +72,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_panel.h>
@@ -163,7 +164,7 @@ static irqreturn_t mcde_irq(int irq, void *data)
static int mcde_modeset_init(struct drm_device *drm)
{
struct drm_mode_config *mode_config;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
int ret;
if (!mcde->bridge) {
@@ -183,13 +184,13 @@ static int mcde_modeset_init(struct drm_device *drm)
ret = drm_vblank_init(drm, 1);
if (ret) {
dev_err(drm->dev, "failed to init vblank\n");
- goto out_config;
+ return ret;
}
ret = mcde_display_init(drm);
if (ret) {
dev_err(drm->dev, "failed to init display\n");
- goto out_config;
+ return ret;
}
/*
@@ -203,7 +204,7 @@ static int mcde_modeset_init(struct drm_device *drm)
mcde->bridge);
if (ret) {
dev_err(drm->dev, "failed to attach display output bridge\n");
- goto out_config;
+ return ret;
}
drm_mode_config_reset(drm);
@@ -211,19 +212,6 @@ static int mcde_modeset_init(struct drm_device *drm)
drm_fbdev_generic_setup(drm, 32);
return 0;
-
-out_config:
- drm_mode_config_cleanup(drm);
- return ret;
-}
-
-static void mcde_release(struct drm_device *drm)
-{
- struct mcde *mcde = drm->dev_private;
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(mcde);
}
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
@@ -231,7 +219,6 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver mcde_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .release = mcde_release,
.lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
.fops = &drm_fops,
@@ -259,7 +246,9 @@ static int mcde_drm_bind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
int ret;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
ret = component_bind_all(drm->dev, drm);
if (ret) {
@@ -318,35 +307,27 @@ static int mcde_probe(struct platform_device *pdev)
int ret;
int i;
- mcde = kzalloc(sizeof(*mcde), GFP_KERNEL);
- if (!mcde)
- return -ENOMEM;
- mcde->dev = dev;
-
- ret = drm_dev_init(&mcde->drm, &mcde_drm_driver, dev);
- if (ret) {
- kfree(mcde);
- return ret;
- }
+ mcde = devm_drm_dev_alloc(dev, &mcde_drm_driver, struct mcde, drm);
+ if (IS_ERR(mcde))
+ return PTR_ERR(mcde);
drm = &mcde->drm;
- drm->dev_private = mcde;
+ mcde->dev = dev;
platform_set_drvdata(pdev, drm);
/* Enable continuous updates: this is what Linux' framebuffer expects */
mcde->oneshot_mode = false;
- drm->dev_private = mcde;
/* First obtain and turn on the main power */
mcde->epod = devm_regulator_get(dev, "epod");
if (IS_ERR(mcde->epod)) {
ret = PTR_ERR(mcde->epod);
dev_err(dev, "can't get EPOD regulator\n");
- goto dev_unref;
+ return ret;
}
ret = regulator_enable(mcde->epod);
if (ret) {
dev_err(dev, "can't enable EPOD regulator\n");
- goto dev_unref;
+ return ret;
}
mcde->vana = devm_regulator_get(dev, "vana");
if (IS_ERR(mcde->vana)) {
@@ -497,8 +478,6 @@ regulator_off:
regulator_disable(mcde->vana);
regulator_epod_off:
regulator_disable(mcde->epod);
-dev_unref:
- drm_dev_put(drm);
return ret;
}
@@ -506,13 +485,12 @@ dev_unref:
static int mcde_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
component_master_del(&pdev->dev, &mcde_drm_comp_ops);
clk_disable_unprepare(mcde->mcde_clk);
regulator_disable(mcde->vana);
regulator_disable(mcde->epod);
- drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 7af5ebb0c436..f303369305a3 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1020,7 +1020,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
struct mcde_dsi *d = dev_get_drvdata(dev);
struct device_node *child;
struct drm_panel *panel = NULL;
@@ -1073,10 +1073,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
panel = NULL;
bridge = of_drm_find_bridge(child);
- if (IS_ERR(bridge)) {
- dev_err(dev, "failed to find bridge (%ld)\n",
- PTR_ERR(bridge));
- return PTR_ERR(bridge);
+ if (!bridge) {
+ dev_err(dev, "failed to find bridge\n");
+ return -EINVAL;
}
}
}
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index fa5ffc4fe823..c420f5a3d33b 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -11,6 +11,7 @@ config DRM_MEDIATEK
select DRM_MIPI_DSI
select DRM_PANEL
select MEMORY
+ select MTK_MMSYS
select MTK_SMI
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6fb0d6983a4a..3ae9c810845b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -119,7 +119,10 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_color_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 891d80c73e04..28651bc579bc 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -386,7 +386,10 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_ovl_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 0cb848d64206..e04319fedf46 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -294,7 +294,10 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_rdma_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 4f0ce4cd5b8c..d4f0fb7ad312 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -10,7 +10,9 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/types.h>
@@ -20,6 +22,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mtk_dpi_regs.h"
#include "mtk_drm_ddp_comp.h"
@@ -74,6 +77,9 @@ struct mtk_dpi {
enum mtk_dpi_out_yc_map yc_map;
enum mtk_dpi_out_bit_num bit_num;
enum mtk_dpi_out_channel_swap channel_swap;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_gpio;
+ struct pinctrl_state *pins_dpi;
int refcount;
};
@@ -379,6 +385,9 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
if (--dpi->refcount != 0)
return;
+ if (dpi->pinctrl && dpi->pins_gpio)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk);
clk_disable_unprepare(dpi->engine_clk);
@@ -403,6 +412,9 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
goto err_pixel;
}
+ if (dpi->pinctrl && dpi->pins_dpi)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
+
mtk_dpi_enable(dpi);
return 0;
@@ -509,15 +521,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
return 0;
}
-static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
- .destroy = mtk_dpi_encoder_destroy,
-};
-
static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -596,8 +599,8 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_encoder_init(drm_dev, &dpi->encoder, &mtk_dpi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Failed to initialize decoder: %d\n", ret);
goto err_unregister;
@@ -705,6 +708,26 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
+ dpi->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(dpi->pinctrl)) {
+ dpi->pinctrl = NULL;
+ dev_dbg(&pdev->dev, "Cannot find pinctrl!\n");
+ }
+ if (dpi->pinctrl) {
+ dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep");
+ if (IS_ERR(dpi->pins_gpio)) {
+ dpi->pins_gpio = NULL;
+ dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n");
+ }
+ if (dpi->pins_gpio)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
+ dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default");
+ if (IS_ERR(dpi->pins_dpi)) {
+ dpi->pins_dpi = NULL;
+ dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n");
+ }
+ }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dpi->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(dpi->regs)) {
@@ -716,21 +739,27 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dpi->engine_clk)) {
ret = PTR_ERR(dpi->engine_clk);
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
+
return ret;
}
dpi->pixel_clk = devm_clk_get(dev, "pixel");
if (IS_ERR(dpi->pixel_clk)) {
ret = PTR_ERR(dpi->pixel_clk);
- dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+
return ret;
}
dpi->tvd_clk = devm_clk_get(dev, "pll");
if (IS_ERR(dpi->tvd_clk)) {
ret = PTR_ERR(dpi->tvd_clk);
- dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index fe85e487e477..fe46c4bac64d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -28,7 +29,7 @@
* @enabled: records whether crtc_enable succeeded
* @planes: array of 4 drm_plane structures, one for each overlay plane
* @pending_planes: whether any plane has pending changes to be applied
- * @config_regs: memory mapped mmsys configuration register space
+ * @mmsys_dev: pointer to the mmsys device for configuration registers
* @mutex: handle to one of the ten disp_mutex streams
* @ddp_comp_nr: number of components in ddp_comp
* @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
@@ -50,7 +51,7 @@ struct mtk_drm_crtc {
u32 cmdq_event;
#endif
- void __iomem *config_regs;
+ struct device *mmsys_dev;
struct mtk_disp_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
@@ -300,9 +301,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
- mtk_ddp_add_comp_to_path(mtk_crtc->config_regs,
- mtk_crtc->ddp_comp[i]->id,
- mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
mtk_disp_mutex_add_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
@@ -360,9 +361,9 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
mtk_crtc->ddp_comp[i]->id);
mtk_disp_mutex_disable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
- mtk_ddp_remove_comp_from_path(mtk_crtc->config_regs,
- mtk_crtc->ddp_comp[i]->id,
- mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
@@ -766,7 +767,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (!mtk_crtc)
return -ENOMEM;
- mtk_crtc->config_regs = priv->config_regs;
+ mtk_crtc->mmsys_dev = priv->mmsys_dev;
mtk_crtc->ddp_comp_nr = path_len;
mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
sizeof(*mtk_crtc->ddp_comp),
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 13035c906035..014c1bbe1df2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -13,26 +13,6 @@
#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
-#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
-#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
-#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
-#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
-#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
-#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
-#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
-#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
-#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
-#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
-#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
-#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
-#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
-#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
-
-#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
-#define DISP_REG_CONFIG_OUT_SEL 0x04c
-#define DISP_REG_CONFIG_DSI_SEL 0x050
-#define DISP_REG_CONFIG_DPI_SEL 0x064
-
#define MT2701_DISP_MUTEX0_MOD0 0x2c
#define MT2701_DISP_MUTEX0_SOF0 0x30
@@ -94,48 +74,6 @@
#define MUTEX_SOF_DSI2 5
#define MUTEX_SOF_DSI3 6
-#define OVL0_MOUT_EN_COLOR0 0x1
-#define OD_MOUT_EN_RDMA0 0x1
-#define OD1_MOUT_EN_RDMA1 BIT(16)
-#define UFOE_MOUT_EN_DSI0 0x1
-#define COLOR0_SEL_IN_OVL0 0x1
-#define OVL1_MOUT_EN_COLOR1 0x1
-#define GAMMA_MOUT_EN_RDMA1 0x1
-#define RDMA0_SOUT_DPI0 0x2
-#define RDMA0_SOUT_DPI1 0x3
-#define RDMA0_SOUT_DSI1 0x1
-#define RDMA0_SOUT_DSI2 0x4
-#define RDMA0_SOUT_DSI3 0x5
-#define RDMA1_SOUT_DPI0 0x2
-#define RDMA1_SOUT_DPI1 0x3
-#define RDMA1_SOUT_DSI1 0x1
-#define RDMA1_SOUT_DSI2 0x4
-#define RDMA1_SOUT_DSI3 0x5
-#define RDMA2_SOUT_DPI0 0x2
-#define RDMA2_SOUT_DPI1 0x3
-#define RDMA2_SOUT_DSI1 0x1
-#define RDMA2_SOUT_DSI2 0x4
-#define RDMA2_SOUT_DSI3 0x5
-#define DPI0_SEL_IN_RDMA1 0x1
-#define DPI0_SEL_IN_RDMA2 0x3
-#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
-#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
-#define DSI0_SEL_IN_RDMA1 0x1
-#define DSI0_SEL_IN_RDMA2 0x4
-#define DSI1_SEL_IN_RDMA1 0x1
-#define DSI1_SEL_IN_RDMA2 0x4
-#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
-#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
-#define COLOR1_SEL_IN_OVL1 0x1
-
-#define OVL_MOUT_EN_RDMA 0x1
-#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
-#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
-#define DSI_SEL_IN_BLS 0x0
-#define DPI_SEL_IN_BLS 0x0
-#define DSI_SEL_IN_RDMA 0x1
struct mtk_disp_mutex {
int id;
@@ -246,200 +184,6 @@ static const struct mtk_ddp_data mt8173_ddp_driver_data = {
.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
};
-static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
- value = OVL0_MOUT_EN_COLOR0;
- } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
- value = OVL_MOUT_EN_RDMA;
- } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD_MOUT_EN_RDMA0;
- } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
- value = UFOE_MOUT_EN_DSI0;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
- value = OVL1_MOUT_EN_COLOR1;
- } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
- value = GAMMA_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD1_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI3;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
- value = COLOR0_SEL_IN_OVL0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI3_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI3_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
- value = COLOR1_SEL_IN_OVL1;
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSI_SEL;
- value = DSI_SEL_IN_BLS;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static void mtk_ddp_sout_sel(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
- writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- writel_relaxed(DSI_SEL_IN_RDMA,
- config_regs + DISP_REG_CONFIG_DSI_SEL);
- writel_relaxed(DPI_SEL_IN_BLS,
- config_regs + DISP_REG_CONFIG_DPI_SEL);
- }
-}
-
-void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- unsigned int addr, value, reg;
-
- value = mtk_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- mtk_ddp_sout_sel(config_regs, cur, next);
-
- value = mtk_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
-}
-
-void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- unsigned int addr, value, reg;
-
- value = mtk_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- value = mtk_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
-}
-
struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id)
{
struct mtk_ddp *ddp = dev_get_drvdata(dev);
@@ -628,7 +372,8 @@ static int mtk_ddp_probe(struct platform_device *pdev)
if (!ddp->data->no_clk) {
ddp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ddp->clk)) {
- dev_err(dev, "Failed to get clock\n");
+ if (PTR_ERR(ddp->clk) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clock\n");
return PTR_ERR(ddp->clk);
}
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
index 827be424a148..6b691a57be4a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -12,13 +12,6 @@ struct regmap;
struct device;
struct mtk_disp_mutex;
-void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next);
-void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next);
-
struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id);
int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex);
void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 0563c6813333..6bd369434d9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -10,6 +10,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/dma-mapping.h>
#include <drm/drm_atomic.h>
@@ -162,7 +163,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
private->mutex_dev = &pdev->dev;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
@@ -179,7 +182,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = component_bind_all(drm->dev, drm);
if (ret)
- goto err_config_cleanup;
+ return ret;
/*
* We currently support two fixed data streams, each optional,
@@ -255,8 +258,6 @@ err_unset_dma_parms:
dma_dev->dma_parms = NULL;
err_component_unbind:
component_unbind_all(drm->dev, drm);
-err_config_cleanup:
- drm_mode_config_cleanup(drm);
return ret;
}
@@ -272,7 +273,6 @@ static void mtk_drm_kms_deinit(struct drm_device *drm)
private->dma_dev->dma_parms = NULL;
component_unbind_all(drm->dev, drm);
- drm_mode_config_cleanup(drm);
}
static const struct file_operations mtk_drm_fops = {
@@ -348,9 +348,7 @@ static int mtk_drm_bind(struct device *dev)
if (ret < 0)
goto err_deinit;
- ret = drm_fbdev_generic_setup(drm, 32);
- if (ret)
- DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+ drm_fbdev_generic_setup(drm, 32);
return 0;
@@ -421,11 +419,22 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
{ }
};
+static const struct of_device_id mtk_drm_of_ids[] = {
+ { .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data},
+ { .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data},
+ { }
+};
+
static int mtk_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *phandle = dev->parent->of_node;
+ const struct of_device_id *of_id;
struct mtk_drm_private *private;
- struct resource *mem;
struct device_node *node;
struct component_match *match = NULL;
int ret;
@@ -436,18 +445,20 @@ static int mtk_drm_probe(struct platform_device *pdev)
return -ENOMEM;
private->data = of_device_get_match_data(dev);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- private->config_regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(private->config_regs)) {
- ret = PTR_ERR(private->config_regs);
- dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
- ret);
- return ret;
+ private->mmsys_dev = dev->parent;
+ if (!private->mmsys_dev) {
+ dev_err(dev, "Failed to get MMSYS device\n");
+ return -ENODEV;
}
+ of_id = of_match_node(mtk_drm_of_ids, phandle);
+ if (!of_id)
+ return -ENODEV;
+
+ private->data = of_id->data;
+
/* Iterate over sibling DISP function blocks */
- for_each_child_of_node(dev->of_node->parent, node) {
+ for_each_child_of_node(phandle->parent, node) {
const struct of_device_id *of_id;
enum mtk_ddp_comp_type comp_type;
int comp_id;
@@ -581,22 +592,11 @@ static int mtk_drm_sys_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
mtk_drm_sys_resume);
-static const struct of_device_id mtk_drm_of_ids[] = {
- { .compatible = "mediatek,mt2701-mmsys",
- .data = &mt2701_mmsys_driver_data},
- { .compatible = "mediatek,mt2712-mmsys",
- .data = &mt2712_mmsys_driver_data},
- { .compatible = "mediatek,mt8173-mmsys",
- .data = &mt8173_mmsys_driver_data},
- { }
-};
-
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
.remove = mtk_drm_remove,
.driver = {
.name = "mediatek-drm",
- .of_match_table = mtk_drm_of_ids,
.pm = &mtk_drm_pm_ops,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 17bc99b9f5d4..b5be63e53176 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -39,7 +39,7 @@ struct mtk_drm_private {
struct device_node *mutex_node;
struct device *mutex_dev;
- void __iomem *config_regs;
+ struct device *mmsys_dev;
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
const struct mtk_mmsys_driver_data *data;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index b04a3c2b111e..f8fd8b98c30e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -224,6 +224,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
expected = sg_dma_address(sg->sgl);
for_each_sg(sg->sgl, s, sg->nents, i) {
+ if (!sg_dma_len(s))
+ break;
+
if (sg_dma_address(s) != expected) {
DRM_ERROR("sg_table is not contiguous");
ret = -EINVAL;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 0ede69830a9d..270bf22c98fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -22,6 +22,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mtk_drm_ddp_comp.h"
@@ -787,15 +788,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
dsi->enabled = false;
}
-static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
- .destroy = mtk_dsi_encoder_destroy,
-};
-
static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -888,8 +880,8 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
{
int ret;
- ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to encoder init to drm\n");
return ret;
@@ -1194,14 +1186,18 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
- dev_err(dev, "Failed to get digital clock: %d\n", ret);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get digital clock: %d\n", ret);
goto err_unregister_host;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index ff43a3d80410..5feb760617cb 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -311,14 +311,10 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
u8 checksum;
int ctrl_frame_en = 0;
- frame_type = *buffer;
- buffer += 1;
- frame_ver = *buffer;
- buffer += 1;
- frame_len = *buffer;
- buffer += 1;
- checksum = *buffer;
- buffer += 1;
+ frame_type = *buffer++;
+ frame_ver = *buffer++;
+ frame_len = *buffer++;
+ checksum = *buffer++;
frame_data = buffer;
dev_dbg(hdmi->dev,
@@ -982,7 +978,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
- u8 buffer[17];
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
ssize_t err;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
@@ -1008,7 +1004,7 @@ static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
const char *product)
{
struct hdmi_spd_infoframe frame;
- u8 buffer[29];
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE];
ssize_t err;
err = hdmi_spd_infoframe_init(&frame, vendor, product);
@@ -1031,7 +1027,7 @@ static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
{
struct hdmi_audio_infoframe frame;
- u8 buffer[14];
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
ssize_t err;
err = hdmi_audio_infoframe_init(&frame);
@@ -1474,7 +1470,9 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
ret = mtk_hdmi_get_all_clk(hdmi, np);
if (ret) {
- dev_err(dev, "Failed to get clocks: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clocks: %d\n", ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index e4d34484ecc8..8cee2591e728 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -88,6 +88,44 @@ static const struct phy_ops mtk_mipi_tx_ops = {
.owner = THIS_MODULE,
};
+static void mtk_mipi_tx_get_calibration_datal(struct mtk_mipi_tx *mipi_tx)
+{
+ struct nvmem_cell *cell;
+ size_t len;
+ u32 *buf;
+
+ cell = nvmem_cell_get(mipi_tx->dev, "calibration-data");
+ if (IS_ERR(cell)) {
+ dev_info(mipi_tx->dev, "can't get nvmem_cell_get, ignore it\n");
+ return;
+ }
+ buf = (u32 *)nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(buf)) {
+ dev_info(mipi_tx->dev, "can't get data, ignore it\n");
+ return;
+ }
+
+ if (len < 3 * sizeof(u32)) {
+ dev_info(mipi_tx->dev, "invalid calibration data\n");
+ kfree(buf);
+ return;
+ }
+
+ mipi_tx->rt_code[0] = ((buf[0] >> 6 & 0x1f) << 5) |
+ (buf[0] >> 11 & 0x1f);
+ mipi_tx->rt_code[1] = ((buf[1] >> 27 & 0x1f) << 5) |
+ (buf[0] >> 1 & 0x1f);
+ mipi_tx->rt_code[2] = ((buf[1] >> 17 & 0x1f) << 5) |
+ (buf[1] >> 22 & 0x1f);
+ mipi_tx->rt_code[3] = ((buf[1] >> 7 & 0x1f) << 5) |
+ (buf[1] >> 12 & 0x1f);
+ mipi_tx->rt_code[4] = ((buf[2] >> 27 & 0x1f) << 5) |
+ (buf[1] >> 2 & 0x1f);
+ kfree(buf);
+}
+
static int mtk_mipi_tx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -125,6 +163,20 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return ret;
}
+ ret = of_property_read_u32(dev->of_node, "drive-strength-microamp",
+ &mipi_tx->mipitx_drive);
+ /* If can't get the "mipi_tx->mipitx_drive", set it default 0x8 */
+ if (ret < 0)
+ mipi_tx->mipitx_drive = 4600;
+
+ /* check the mipitx_drive valid */
+ if (mipi_tx->mipitx_drive > 6000 || mipi_tx->mipitx_drive < 3000) {
+ dev_warn(dev, "drive-strength-microamp is invalid %d, not in 3000 ~ 6000\n",
+ mipi_tx->mipitx_drive);
+ mipi_tx->mipitx_drive = clamp_val(mipi_tx->mipitx_drive, 3000,
+ 6000);
+ }
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
@@ -160,6 +212,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
mipi_tx->dev = dev;
+ mtk_mipi_tx_get_calibration_datal(mipi_tx);
+
return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
mipi_tx->pll);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
index 413f35d86219..c76f07c3fdeb 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
@@ -12,9 +12,11 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/slab.h>
struct mtk_mipitx_data {
const u32 mppll_preserve;
@@ -27,6 +29,8 @@ struct mtk_mipi_tx {
struct device *dev;
void __iomem *regs;
u32 data_rate;
+ u32 mipitx_drive;
+ u32 rt_code[5];
const struct mtk_mipitx_data *driver_data;
struct clk_hw pll_hw;
struct clk *pll;
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
index 91f08a351fd0..9f3e55aeebb2 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
@@ -17,6 +17,9 @@
#define RG_DSI_BG_CORE_EN BIT(7)
#define RG_DSI_PAD_TIEL_SEL BIT(8)
+#define MIPITX_VOLTAGE_SEL 0x0010
+#define RG_DSI_HSTX_LDO_REF_SEL (0xf << 6)
+
#define MIPITX_PLL_PWR 0x0028
#define MIPITX_PLL_CON0 0x002c
#define MIPITX_PLL_CON1 0x0030
@@ -25,6 +28,7 @@
#define MIPITX_PLL_CON4 0x003c
#define RG_DSI_PLL_IBIAS (3 << 10)
+#define MIPITX_D2P_RTCODE 0x0100
#define MIPITX_D2_SW_CTL_EN 0x0144
#define MIPITX_D0_SW_CTL_EN 0x0244
#define MIPITX_CK_CKMODE_EN 0x0328
@@ -105,6 +109,24 @@ static const struct clk_ops mtk_mipi_tx_pll_ops = {
.recalc_rate = mtk_mipi_tx_pll_recalc_rate,
};
+static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx)
+{
+ int i, j;
+
+ for (i = 0; i < 5; i++) {
+ if ((mipi_tx->rt_code[i] & 0x1f) == 0)
+ mipi_tx->rt_code[i] |= 0x10;
+
+ if ((mipi_tx->rt_code[i] >> 5 & 0x1f) == 0)
+ mipi_tx->rt_code[i] |= 0x10 << 5;
+
+ for (j = 0; j < 10; j++)
+ mtk_mipi_tx_update_bits(mipi_tx,
+ MIPITX_D2P_RTCODE * (i + 1) + j * 4,
+ 1, mipi_tx->rt_code[i] >> j & 1);
+ }
+}
+
static void mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -123,6 +145,12 @@ static void mtk_mipi_tx_power_on_signal(struct phy *phy)
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_VOLTAGE_SEL,
+ RG_DSI_HSTX_LDO_REF_SEL,
+ (mipi_tx->mipitx_drive - 3000) / 200 << 6);
+
+ mtk_mipi_tx_config_calibration_data(mipi_tx);
+
mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index b5f5eb7b4bb9..4c5aafcec799 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -11,6 +11,7 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/sys_soc.h>
#include <linux/platform_device.h>
#include <linux/soc/amlogic/meson-canvas.h>
@@ -183,6 +184,24 @@ static void meson_remove_framebuffers(void)
kfree(ap);
}
+struct meson_drm_soc_attr {
+ struct meson_drm_soc_limits limits;
+ const struct soc_device_attribute *attrs;
+};
+
+static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
+ /* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
+ {
+ .limits = {
+ .max_hdmi_phy_freq = 1650000,
+ },
+ .attrs = (const struct soc_device_attribute []) {
+ { .soc_id = "GXL (S805*)", },
+ { /* sentinel */ },
+ }
+ },
+};
+
static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -191,7 +210,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
struct drm_device *drm;
struct resource *res;
void __iomem *regs;
- int ret;
+ int ret, i;
/* Checks if an output connector is available */
if (!meson_vpu_has_available_connectors(dev)) {
@@ -281,10 +300,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
+ /* Assign limits per soc revision/package */
+ for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
+ if (soc_device_match(meson_drm_soc_attrs[i].attrs)) {
+ priv->limits = &meson_drm_soc_attrs[i].limits;
+ break;
+ }
+ }
+
/* Remove early framebuffers (ie. simplefb) */
meson_remove_framebuffers();
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ goto free_drm;
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
@@ -379,7 +408,6 @@ static void meson_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_irq_uninstall(drm);
drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
drm_dev_put(drm);
}
@@ -412,9 +440,7 @@ static int __maybe_unused meson_drv_pm_resume(struct device *dev)
if (priv->afbcd.ops)
priv->afbcd.ops->init(priv);
- drm_mode_config_helper_resume(priv->drm);
-
- return 0;
+ return drm_mode_config_helper_resume(priv->drm);
}
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 04fdf3826643..5b23704a80d6 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -30,6 +30,10 @@ struct meson_drm_match_data {
struct meson_afbcd_ops *afbcd_ops;
};
+struct meson_drm_soc_limits {
+ unsigned int max_hdmi_phy_freq;
+};
+
struct meson_drm {
struct device *dev;
enum vpu_compatible compat;
@@ -48,6 +52,8 @@ struct meson_drm {
struct drm_plane *primary_plane;
struct drm_plane *overlay_plane;
+ const struct meson_drm_soc_limits *limits;
+
/* Components Data */
struct {
bool osd1_enabled;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 64cb6ba4bc42..24a12c453095 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -695,7 +695,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
- return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
+ return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
}
/* Encoder */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index d5cbc47835bf..35338ed18209 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -223,7 +223,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
OSD_COLOR_MATRIX_16_RGB565;
break;
- };
+ }
}
switch (fb->format->format) {
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index fdf26dac9fa8..0eb86943a358 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -725,6 +725,13 @@ meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
/* In DMT mode, path after PLL is always /10 */
freq *= 10;
+ /* Check against soc revision/package limits */
+ if (priv->limits) {
+ if (priv->limits->max_hdmi_phy_freq &&
+ freq > priv->limits->max_hdmi_phy_freq)
+ return MODE_CLOCK_HIGH;
+ }
+
if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
return MODE_OK;
@@ -762,7 +769,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq,
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
unsigned int vclk_freq)
{
int i;
@@ -770,6 +777,13 @@ meson_vclk_vic_supported_freq(unsigned int phy_freq,
DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
phy_freq, vclk_freq);
+ /* Check against soc revision/package limits */
+ if (priv->limits) {
+ if (priv->limits->max_hdmi_phy_freq &&
+ phy_freq > priv->limits->max_hdmi_phy_freq)
+ return MODE_CLOCK_HIGH;
+ }
+
for (i = 0 ; params[i].pixel_freq ; ++i) {
DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
i, params[i].pixel_freq,
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index aed0ab2efa71..60617aaf18dd 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -25,7 +25,8 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+ unsigned int vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int phy_freq, unsigned int vclk_freq,
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index d491edd317ff..aebc9ce43d55 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -260,7 +260,7 @@ int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
struct drm_gem_object *obj;
struct drm_gem_vram_object *gbo = NULL;
int ret;
@@ -307,7 +307,7 @@ err_drm_gem_object_put_unlocked:
int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
- struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
/* Our origin is at (64,64) */
x += 64;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 7a5bad2f57d7..c2f0e4b40b05 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -77,6 +77,8 @@ static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mgag200_driver_unload;
+ drm_fbdev_generic_setup(dev, 0);
+
return 0;
err_mgag200_driver_unload:
@@ -118,7 +120,7 @@ int mgag200_driver_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
unsigned long pg_align;
if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 9691252d6233..d9b7e96b214f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -104,11 +104,6 @@ struct mga_crtc {
bool enabled;
};
-struct mga_mode_info {
- bool mode_config_initialized;
- struct mga_crtc *crtc;
-};
-
struct mga_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -160,17 +155,14 @@ struct mga_device {
void __iomem *rmmio;
struct mga_mc mc;
- struct mga_mode_info mode_info;
struct mga_cursor cursor;
size_t vram_fb_available;
bool suspended;
- int num_crtc;
enum mga_type type;
int has_sdram;
- struct drm_display_mode mode;
int bpp_shifts[4];
@@ -179,9 +171,15 @@ struct mga_device {
/* SE model number stored in reg 0x1e24 */
u32 unique_rev_id;
+ struct mga_connector connector;
struct drm_encoder encoder;
};
+static inline struct mga_device *to_mga_device(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
static inline enum mga_type
mgag200_type_from_driver_data(kernel_ulong_t driver_data)
{
@@ -196,7 +194,6 @@ mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
/* mgag200_mode.c */
int mgag200_modeset_init(struct mga_device *mdev);
-void mgag200_modeset_fini(struct mga_device *mdev);
/* mgag200_main.c */
int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 9f4635916d32..09731e614e46 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -61,34 +61,34 @@ static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
static void mga_gpio_setsda(void *data, int state)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
mga_i2c_set(mdev, i2c->data, state);
}
static void mga_gpio_setscl(void *data, int state)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
mga_i2c_set(mdev, i2c->clock, state);
}
static int mga_gpio_getsda(void *data)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
}
static int mga_gpio_getscl(void *data)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
}
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
{
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
struct mga_i2c_chan *i2c;
int ret;
int data, clock;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index e278b6a547bd..86df799fd38c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -10,15 +10,8 @@
#include <linux/pci.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
#include "mgag200_drv.h"
-static const struct drm_mode_config_funcs mga_mode_funcs = {
- .fb_create = drm_gem_fb_create
-};
-
static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
{
int offset;
@@ -66,51 +59,54 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
/* Map the framebuffer from the card and configure the core */
static int mga_vram_init(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
void __iomem *mem;
/* BAR 0 is VRAM */
- mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
- mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+ mdev->mc.vram_base = pci_resource_start(dev->pdev, 0);
+ mdev->mc.vram_window = pci_resource_len(dev->pdev, 0);
- if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
- "mgadrmfb_vram")) {
+ if (!devm_request_mem_region(dev->dev, mdev->mc.vram_base,
+ mdev->mc.vram_window, "mgadrmfb_vram")) {
DRM_ERROR("can't reserve VRAM\n");
return -ENXIO;
}
- mem = pci_iomap(mdev->dev->pdev, 0, 0);
+ mem = pci_iomap(dev->pdev, 0, 0);
if (!mem)
return -ENOMEM;
mdev->mc.vram_size = mga_probe_vram(mdev, mem);
- pci_iounmap(mdev->dev->pdev, mem);
+ pci_iounmap(dev->pdev, mem);
return 0;
}
-static int mgag200_device_init(struct drm_device *dev,
- uint32_t flags)
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
{
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev;
int ret, option;
+ mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
+ if (mdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)mdev;
+ mdev->dev = dev;
+
mdev->flags = mgag200_flags_from_driver_data(flags);
mdev->type = mgag200_type_from_driver_data(flags);
- /* Hardcode the number of CRTCs to 1 */
- mdev->num_crtc = 1;
-
pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
mdev->has_sdram = !(option & (1 << 14));
/* BAR 0 is the framebuffer, BAR 1 contains registers */
- mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
- mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+ mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
+ mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
- if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
- "mgadrmfb_mmio")) {
- DRM_ERROR("can't reserve mmio registers\n");
+ if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
+ mdev->rmmio_size, "mgadrmfb_mmio")) {
+ drm_err(dev, "can't reserve mmio registers\n");
return -ENOMEM;
}
@@ -121,90 +117,43 @@ static int mgag200_device_init(struct drm_device *dev,
/* stash G200 SE model number for later use */
if (IS_G200_SE(mdev)) {
mdev->unique_rev_id = RREG32(0x1e24);
- DRM_DEBUG("G200 SE unique revision id is 0x%x\n",
- mdev->unique_rev_id);
+ drm_dbg(dev, "G200 SE unique revision id is 0x%x\n",
+ mdev->unique_rev_id);
}
ret = mga_vram_init(mdev);
if (ret)
return ret;
- mdev->bpp_shifts[0] = 0;
- mdev->bpp_shifts[1] = 1;
- mdev->bpp_shifts[2] = 0;
- mdev->bpp_shifts[3] = 2;
- return 0;
-}
-
-/*
- * Functions here will be called by the core once it's bound the driver to
- * a PCI device
- */
-
-
-int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct mga_device *mdev;
- int r;
-
- mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
- if (mdev == NULL)
- return -ENOMEM;
- dev->dev_private = (void *)mdev;
- mdev->dev = dev;
-
- r = mgag200_device_init(dev, flags);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
- return r;
- }
- r = mgag200_mm_init(mdev);
- if (r)
+ ret = mgag200_mm_init(mdev);
+ if (ret)
goto err_mm;
- drm_mode_config_init(dev);
- dev->mode_config.funcs = (void *)&mga_mode_funcs;
- if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
- dev->mode_config.preferred_depth = 16;
- else
- dev->mode_config.preferred_depth = 32;
- dev->mode_config.prefer_shadow = 1;
-
- r = mgag200_modeset_init(mdev);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
- goto err_modeset;
+ ret = mgag200_modeset_init(mdev);
+ if (ret) {
+ drm_err(dev, "Fatal error during modeset init: %d\n", ret);
+ goto err_mgag200_mm_fini;
}
- r = mgag200_cursor_init(mdev);
- if (r)
- dev_warn(&dev->pdev->dev,
- "Could not initialize cursors. Not doing hardware cursors.\n");
-
- r = drm_fbdev_generic_setup(mdev->dev, 0);
- if (r)
- goto err_modeset;
+ ret = mgag200_cursor_init(mdev);
+ if (ret)
+ drm_err(dev, "Could not initialize cursors. Not doing hardware cursors.\n");
return 0;
-err_modeset:
- drm_mode_config_cleanup(dev);
- mgag200_cursor_fini(mdev);
+err_mgag200_mm_fini:
mgag200_mm_fini(mdev);
err_mm:
dev->dev_private = NULL;
-
- return r;
+ return ret;
}
void mgag200_driver_unload(struct drm_device *dev)
{
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
if (mdev == NULL)
return;
- mgag200_modeset_fini(mdev);
- drm_mode_config_cleanup(dev);
mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d90e83959fca..5f4ac36a9776 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -28,7 +29,7 @@
static void mga_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
struct drm_framebuffer *fb = crtc->primary->fb;
u16 *r_ptr, *g_ptr, *b_ptr;
int i;
@@ -728,7 +729,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
static void mga_g200wb_prepare(struct drm_crtc *crtc)
{
- struct mga_device *mdev = crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
u8 tmp;
int iter_max;
@@ -783,7 +784,7 @@ static void mga_g200wb_prepare(struct drm_crtc *crtc)
static void mga_g200wb_commit(struct drm_crtc *crtc)
{
u8 tmp;
- struct mga_device *mdev = crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
/* 1- The first step is to ensure that the vrsten and hrsten are set */
WREG8(MGAREG_CRTCEXT_INDEX, 1);
@@ -833,7 +834,7 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
*/
static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
- struct mga_device *mdev = crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
u32 addr;
int count;
u8 crtcext0;
@@ -902,7 +903,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
const struct drm_framebuffer *fb = crtc->primary->fb;
int hdisplay, hsyncstart, hsyncend, htotal;
int vdisplay, vsyncstart, vsyncend, vtotal;
@@ -1135,9 +1136,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
WREG8(MGA_MISC_OUT, misc);
- if (adjusted_mode)
- memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
-
mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
/* reset tagfifo */
@@ -1263,7 +1261,7 @@ static int mga_resume(struct drm_crtc *crtc)
static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
u8 seq1 = 0, crtcext1 = 0;
switch (mode) {
@@ -1317,7 +1315,7 @@ static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
static void mga_crtc_prepare(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
u8 tmp;
/* mga_resume(crtc);*/
@@ -1353,7 +1351,7 @@ static void mga_crtc_prepare(struct drm_crtc *crtc)
static void mga_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
u8 tmp;
@@ -1433,6 +1431,7 @@ static const struct drm_crtc_helper_funcs mga_helper_funcs = {
/* CRTC setup */
static void mga_crtc_init(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
struct mga_crtc *mga_crtc;
mga_crtc = kzalloc(sizeof(struct mga_crtc) +
@@ -1442,14 +1441,17 @@ static void mga_crtc_init(struct mga_device *mdev)
if (mga_crtc == NULL)
return;
- drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
+ drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
- mdev->mode_info.crtc = mga_crtc;
drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
}
+/*
+ * Connector
+ */
+
static int mga_vga_get_modes(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1495,7 +1497,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
int bpp = 32;
if (IS_G200_SE(mdev)) {
@@ -1574,7 +1576,6 @@ static void mga_connector_destroy(struct drm_connector *connector)
struct mga_connector *mga_connector = to_mga_connector(connector);
mgag200_i2c_destroy(mga_connector->i2c);
drm_connector_cleanup(connector);
- kfree(connector);
}
static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
@@ -1588,70 +1589,96 @@ static const struct drm_connector_funcs mga_vga_connector_funcs = {
.destroy = mga_connector_destroy,
};
-static struct drm_connector *mga_vga_init(struct drm_device *dev)
+static int mgag200_vga_connector_init(struct mga_device *mdev)
{
- struct drm_connector *connector;
- struct mga_connector *mga_connector;
-
- mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
- if (!mga_connector)
- return NULL;
-
- connector = &mga_connector->base;
- mga_connector->i2c = mgag200_i2c_create(dev);
- if (!mga_connector->i2c)
- DRM_ERROR("failed to add ddc bus\n");
+ struct drm_device *dev = mdev->dev;
+ struct mga_connector *mconnector = &mdev->connector;
+ struct drm_connector *connector = &mconnector->base;
+ struct mga_i2c_chan *i2c;
+ int ret;
- drm_connector_init_with_ddc(dev, connector,
- &mga_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &mga_connector->i2c->adapter);
+ i2c = mgag200_i2c_create(dev);
+ if (!i2c)
+ drm_warn(dev, "failed to add DDC bus\n");
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mga_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret)
+ goto err_mgag200_i2c_destroy;
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
- drm_connector_register(connector);
+ mconnector->i2c = i2c;
- return connector;
+ return 0;
+
+err_mgag200_i2c_destroy:
+ mgag200_i2c_destroy(i2c);
+ return ret;
}
+static const struct drm_mode_config_funcs mgag200_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create
+};
+
+static unsigned int mgag200_preferred_depth(struct mga_device *mdev)
+{
+ if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
+ return 16;
+ else
+ return 32;
+}
int mgag200_modeset_init(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
struct drm_encoder *encoder = &mdev->encoder;
- struct drm_connector *connector;
+ struct drm_connector *connector = &mdev->connector.base;
int ret;
- mdev->mode_info.mode_config_initialized = true;
+ mdev->bpp_shifts[0] = 0;
+ mdev->bpp_shifts[1] = 1;
+ mdev->bpp_shifts[2] = 0;
+ mdev->bpp_shifts[3] = 2;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret) {
+ drm_err(dev, "drmm_mode_config_init() failed, error %d\n",
+ ret);
+ return ret;
+ }
+
+ dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+ dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
- mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
- mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+ dev->mode_config.preferred_depth = mgag200_preferred_depth(mdev);
+ dev->mode_config.prefer_shadow = 1;
- mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+ dev->mode_config.fb_base = mdev->mc.vram_base;
+
+ dev->mode_config.funcs = &mgag200_mode_config_funcs;
mga_crtc_init(mdev);
- ret = drm_simple_encoder_init(mdev->dev, encoder,
- DRM_MODE_ENCODER_DAC);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
if (ret) {
- drm_err(mdev->dev,
+ drm_err(dev,
"drm_simple_encoder_init() failed, error %d\n",
ret);
return ret;
}
encoder->possible_crtcs = 0x1;
- connector = mga_vga_init(mdev->dev);
- if (!connector) {
- DRM_ERROR("mga_vga_init failed\n");
- return -1;
+ ret = mgag200_vga_connector_init(mdev);
+ if (ret) {
+ drm_err(dev,
+ "mgag200_vga_connector_init() failed, error %d\n",
+ ret);
+ return ret;
}
drm_connector_attach_encoder(connector, encoder);
return 0;
}
-
-void mgag200_modeset_fini(struct mga_device *mdev)
-{
-
-}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 075ecce4b5e0..8cae2ca4af6b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -148,27 +148,19 @@ reset_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
{
struct drm_device *dev;
- int ret;
if (!minor)
- return 0;
+ return;
dev = minor->dev;
- ret = drm_debugfs_create_files(a5xx_debugfs_list,
- ARRAY_SIZE(a5xx_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(a5xx_debugfs_list,
+ ARRAY_SIZE(a5xx_debugfs_list),
+ minor->debugfs_root, minor);
debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
&reset_fops);
-
- return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 833468ce6b6d..54868d4e3958 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -41,7 +41,7 @@ struct a5xx_gpu {
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
#ifdef CONFIG_DEBUG_FS
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
/*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 211f5de99a44..9aba2910d83a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -170,7 +170,7 @@ struct dpu_global_state
*
* Main debugfs documentation is located at,
*
- * Documentation/filesystems/debugfs.txt
+ * Documentation/filesystems/debugfs.rst
*
* @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
* @dpu_debugfs_create_regset32: Create 32-bit register dump file
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 47b989834af1..c902c6503675 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -259,17 +259,9 @@ static struct drm_info_list mdp5_debugfs_list[] = {
static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(mdp5_debugfs_list,
- ARRAY_SIZE(mdp5_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list),
+ minor->debugfs_root, minor);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1c74381a4fc9..ee2e270f464c 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -214,31 +214,20 @@ int msm_debugfs_late_init(struct drm_device *dev)
return ret;
}
-int msm_debugfs_init(struct drm_minor *minor)
+void msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct msm_drm_private *priv = dev->dev_private;
- int ret;
-
- ret = drm_debugfs_create_files(msm_debugfs_list,
- ARRAY_SIZE(msm_debugfs_list),
- minor->debugfs_root, minor);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms && priv->kms->funcs->debugfs_init) {
- ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
- if (ret)
- return ret;
- }
-
- return ret;
+ if (priv->kms && priv->kms->funcs->debugfs_init)
+ priv->kms->funcs->debugfs_init(priv->kms, minor);
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_debugfs.h b/drivers/gpu/drm/msm/msm_debugfs.h
index 2b91f8c178ad..ef58f66abbb3 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.h
+++ b/drivers/gpu/drm/msm/msm_debugfs.h
@@ -8,7 +8,7 @@
#define __MSM_DEBUGFS_H__
#ifdef CONFIG_DEBUG_FS
-int msm_debugfs_init(struct drm_minor *minor);
+void msm_debugfs_init(struct drm_minor *minor);
#endif
#endif /* __MSM_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index be5bc2e8425c..6ccae4ba905c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -57,7 +57,7 @@ struct msm_gpu_funcs {
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p);
/* for generation specific debugfs: */
- int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
+ void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
unsigned long (*gpu_busy)(struct msm_gpu *gpu);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 7a62fa04272d..49e57fba4925 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,8 +1,10 @@
+NOUVEAU_PATH ?= $(srctree)
+
# SPDX-License-Identifier: MIT
-ccflags-y += -I $(srctree)/$(src)/include
-ccflags-y += -I $(srctree)/$(src)/include/nvkm
-ccflags-y += -I $(srctree)/$(src)/nvkm
-ccflags-y += -I $(srctree)/$(src)
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include/nvkm
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/nvkm
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)
# NVKM - HW resource manager
#- code also used by various userspace tools/tests
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 1f08de4241e0..2de589caf508 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -605,15 +605,16 @@ static int
nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{
struct nv04_display *disp = nv04_display(crtc->dev);
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
+ nouveau_bo_ref(nvbo, &disp->image[nv_crtc->index]);
}
return ret;
@@ -822,8 +823,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+ struct nouveau_bo *nvbo;
struct drm_framebuffer *drm_fb;
- struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
@@ -839,13 +840,12 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
*/
if (atomic) {
drm_fb = passed_fb;
- fb = nouveau_framebuffer(passed_fb);
} else {
drm_fb = crtc->primary->fb;
- fb = nouveau_framebuffer(crtc->primary->fb);
}
- nv_crtc->fb.offset = fb->nvbo->bo.offset;
+ nvbo = nouveau_gem_object(drm_fb->obj[0]);
+ nv_crtc->fb.offset = nvbo->bo.offset;
if (nv_crtc->lut.depth != drm_fb->format->depth) {
nv_crtc->lut.depth = drm_fb->format->depth;
@@ -1143,8 +1143,9 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
- struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct nouveau_bo *old_bo = nouveau_gem_object(old_fb->obj[0]);
+ struct nouveau_bo *new_bo = nouveau_gem_object(fb->obj[0]);
struct nv04_page_flip_state *s;
struct nouveau_channel *chan;
struct nouveau_cli *cli;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 44ee82d0c9b6..0f4ebefed1fd 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -30,6 +30,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_bo.h"
+#include "nouveau_gem.h"
#include <nvif/if0004.h>
@@ -52,13 +53,13 @@ nv04_display_fini(struct drm_device *dev, bool suspend)
/* Un-pin FB and cursors so they'll be evicted to system memory. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_framebuffer *nouveau_fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct nouveau_bo *nvbo;
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
- if (!nouveau_fb || !nouveau_fb->nvbo)
+ if (!fb || !fb->obj[0])
continue;
-
- nouveau_bo_unpin(nouveau_fb->nvbo);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ nouveau_bo_unpin(nvbo);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -104,13 +105,13 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
/* Re-pin FB/cursors. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_framebuffer *nouveau_fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct nouveau_bo *nvbo;
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
- if (!nouveau_fb || !nouveau_fb->nvbo)
+ if (!fb || !fb->obj[0])
continue;
-
- ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n");
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index a3a0a73ae8ab..6248fd1dbc6d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -31,6 +31,7 @@
#include "nouveau_bo.h"
#include "nouveau_connector.h"
#include "nouveau_display.h"
+#include "nouveau_gem.h"
#include "nvreg.h"
#include "disp.h"
@@ -120,9 +121,9 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nvif_object *dev = &drm->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cur = nv_plane->cur;
+ struct nouveau_bo *nvbo;
bool flip = nv_plane->flip;
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
@@ -140,17 +141,18 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (ret)
return ret;
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
- nv_plane->cur = nv_fb->nvbo;
+ nv_plane->cur = nvbo;
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0);
- nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
+ nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nvbo->bo.offset);
nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
@@ -172,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (format & NV_PVIDEO_FORMAT_PLANAR) {
nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
- nv_fb->nvbo->bo.offset + fb->offsets[1]);
+ nvbo->bo.offset + fb->offsets[1]);
}
nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format | fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_STOP, 0);
@@ -368,8 +370,8 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_bo *cur = nv_plane->cur;
+ struct nouveau_bo *nvbo;
uint32_t overlay = 1;
int brightness = (nv_plane->brightness - 512) * 62 / 512;
int ret, i;
@@ -384,11 +386,12 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (ret)
return ret;
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
- nv_plane->cur = nv_fb->nvbo;
+ nv_plane->cur = nvbo;
nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
@@ -396,7 +399,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
for (i = 0; i < 2; i++) {
nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
- nv_fb->nvbo->bo.offset);
+ nvbo->bo.offset);
nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i,
fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index ee782151d332..511258bfbcbc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -263,7 +263,8 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
struct nv50_disp_base_channel_dma_v0 args = {
.head = head,
};
- struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nouveau_display *disp = nouveau_display(drm->dev);
+ struct nv50_disp *disp50 = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
@@ -273,9 +274,9 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
&oclass, head, &args, sizeof(args),
- disp->sync->bo.offset, &wndw->wndw);
+ disp50->sync->bo.offset, &wndw->wndw);
if (ret) {
NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index ff94f3f6f264..99157dc94d23 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -2,6 +2,7 @@
#define __NV50_KMS_CORE_H__
#include "disp.h"
#include "atom.h"
+#include <nouveau_encoder.h>
struct nv50_core {
const struct nv50_core_func *func;
@@ -15,6 +16,7 @@ void nv50_core_del(struct nv50_core **);
struct nv50_core_func {
void (*init)(struct nv50_core *);
void (*ntfy_init)(struct nouveau_bo *, u32 offset);
+ int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
@@ -27,6 +29,9 @@ struct nv50_core_func {
const struct nv50_outp_func {
void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
struct nv50_head_atom *);
+ /* XXX: Only used by SORs and PIORs for now */
+ void (*get_caps)(struct nv50_disp *,
+ struct nouveau_encoder *, int or);
} *dac, *pior, *sor;
};
@@ -35,6 +40,7 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
struct nv50_core **);
void core507d_init(struct nv50_core *);
void core507d_ntfy_init(struct nouveau_bo *, u32);
+int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
void core507d_update(struct nv50_core *, u32 *, bool);
@@ -51,6 +57,7 @@ extern const struct nv50_outp_func sor907d;
int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int corec37d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
void corec37d_update(struct nv50_core *, u32 *, bool);
void corec37d_wndw_owner(struct nv50_core *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index c5152c39c684..e341f572c269 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -62,6 +62,20 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
nouveau_bo_wr32(bo, offset / 4, 0x00000000);
}
+int
+core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ u32 *push = evo_wait(&disp->core->chan, 2);
+
+ if (push) {
+ evo_mthd(push, 0x008c, 1);
+ evo_data(push, 0x0);
+ evo_kick(push, &disp->core->chan);
+ }
+
+ return 0;
+}
+
void
core507d_init(struct nv50_core *core)
{
@@ -77,6 +91,7 @@ static const struct nv50_core_func
core507d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head507d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core827d.c b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
index 6123a068f836..2e0c1c536afe 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
@@ -26,6 +26,7 @@ static const struct nv50_core_func
core827d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head827d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
index ef822f813435..271629832629 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
@@ -26,6 +26,7 @@ static const struct nv50_core_func
core907d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head907d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core917d.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
index 392338df5bfd..5cc072d4c30f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
@@ -26,6 +26,7 @@ static const struct nv50_core_func
core917d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head917d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index c03cb987856b..e0c8811fb8e4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -22,6 +22,7 @@
#include "core.h"
#include "head.h"
+#include <nvif/class.h>
#include <nouveau_bo.h>
#include <nvif/timer.h>
@@ -87,6 +88,30 @@ corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
}
+int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ int ret;
+
+ ret = nvif_object_init(&disp->disp->object, 0, GV100_DISP_CAPS,
+ NULL, 0, &disp->caps);
+ if (ret) {
+ NV_ERROR(drm,
+ "Failed to init notifier caps region: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = nvif_object_map(&disp->caps, NULL, 0);
+ if (ret) {
+ NV_ERROR(drm,
+ "Failed to map notifier caps region: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void
corec37d_init(struct nv50_core *core)
{
@@ -111,6 +136,7 @@ static const struct nv50_core_func
corec37d = {
.init = corec37d_init,
.ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 147adcd60937..10ba9e9e4ae6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -46,6 +46,7 @@ static const struct nv50_core_func
corec57d = {
.init = corec57d_init,
.ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 8c5cf096f69b..658a200ab616 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -32,7 +32,7 @@
bool
curs507a_space(struct nv50_wndw *wndw)
{
- nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2,
+ nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 100,
if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
return true;
);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6be9df1820c5..7622490d8602 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -482,15 +482,16 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
* audio component binding for ELD notification
*/
static void
-nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
+nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
+ int dev_id)
{
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
- port, -1);
+ port, dev_id);
}
static int
-nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
+nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
bool *enabled, unsigned char *buf, int max_bytes)
{
struct drm_device *drm_dev = dev_get_drvdata(kdev);
@@ -506,7 +507,8 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
nv_encoder = nouveau_encoder(encoder);
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_crtc = nouveau_crtc(encoder->crtc);
- if (!nv_connector || !nv_crtc || nv_crtc->index != port)
+ if (!nv_connector || !nv_crtc || nv_encoder->or != port ||
+ nv_crtc->index != dev_id)
continue;
*enabled = drm_detect_monitor_audio(nv_connector->edid);
if (*enabled) {
@@ -600,7 +602,8 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
- nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
+ nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
+ nv_crtc->index);
}
static void
@@ -634,7 +637,8 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
- nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
+ nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
+ nv_crtc->index);
}
/******************************************************************************
@@ -904,15 +908,9 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
if (!state->duplicated) {
const int clock = crtc_state->adjusted_mode.clock;
- /*
- * XXX: Since we don't use HDR in userspace quite yet, limit
- * the bpc to 8 to save bandwidth on the topology. In the
- * future, we'll want to properly fix this by dynamically
- * selecting the highest possible bpc that would fit in the
- * topology
- */
- asyh->or.bpc = min(connector->display_info.bpc, 8U);
- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
+ asyh->or.bpc = connector->display_info.bpc;
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
+ false);
}
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
@@ -1058,7 +1056,14 @@ static enum drm_mode_status
nv50_mstc_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- return MODE_OK;
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct nouveau_encoder *outp = mstc->mstm->outp;
+
+ /* TODO: calculate the PBN from the dotclock and validate against the
+ * MSTB's max possible PBN
+ */
+
+ return nv50_dp_mode_valid(connector, outp, mode, NULL);
}
static int
@@ -1072,8 +1077,17 @@ nv50_mstc_get_modes(struct drm_connector *connector)
if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
- if (!mstc->connector.display_info.bpc)
- mstc->connector.display_info.bpc = 8;
+ /*
+ * XXX: Since we don't use HDR in userspace quite yet, limit the bpc
+ * to 8 to save bandwidth on the topology. In the future, we'll want
+ * to properly fix this by dynamically selecting the highest possible
+ * bpc that would fit in the topology
+ */
+ if (connector->display_info.bpc)
+ connector->display_info.bpc =
+ clamp(connector->display_info.bpc, 6U, 8U);
+ else
+ connector->display_info.bpc = 8;
if (mstc->native)
drm_mode_destroy(mstc->connector.dev, mstc->native);
@@ -1123,8 +1137,10 @@ nv50_mstc_detect(struct drm_connector *connector,
return connector_status_disconnected;
ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
mstc->port);
@@ -1659,6 +1675,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ struct nv50_disp *disp = nv50_disp(connector->dev);
int type, ret;
switch (dcbe->type) {
@@ -1685,10 +1702,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_connector_attach_encoder(connector, encoder);
+ disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+
if (dcbe->type == DCB_OUTPUT_DP) {
- struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+
if (aux) {
if (disp->disp->object.oclass < GF110_DISP) {
/* HW has no support for address-only
@@ -1801,7 +1820,9 @@ nv50_pior_func = {
static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
@@ -1840,6 +1861,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_connector_attach_encoder(connector, encoder);
+
+ disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+
return 0;
}
@@ -2369,7 +2393,8 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
struct drm_encoder *encoder;
struct drm_plane *plane;
- core->func->init(core);
+ if (resume || runtime)
+ core->func->init(core);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
@@ -2396,6 +2421,8 @@ nv50_display_destroy(struct drm_device *dev)
nv50_audio_component_fini(nouveau_drm(dev));
+ nvif_object_unmap(&disp->caps);
+ nvif_object_fini(&disp->caps);
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
@@ -2456,6 +2483,22 @@ nv50_display_create(struct drm_device *dev)
if (ret)
goto out;
+ disp->core->func->init(disp->core);
+ if (disp->core->func->caps_init) {
+ ret = disp->core->func->caps_init(drm, disp);
+ if (ret)
+ goto out;
+ }
+
+ /* Assign the correct format modifiers */
+ if (disp->disp->object.oclass >= TU102_DISP)
+ nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
+ else
+ if (disp->disp->object.oclass >= GF110_DISP)
+ nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
+ else
+ nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
@@ -2551,3 +2594,53 @@ out:
nv50_display_destroy(dev);
return ret;
}
+
+/******************************************************************************
+ * Format modifiers
+ *****************************************************************************/
+
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 disp50xx_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 disp90xx_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index d54fe00ac3a3..696e70a6b98b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -9,6 +9,7 @@ struct nv50_msto;
struct nv50_disp {
struct nvif_disp *disp;
struct nv50_core *core;
+ struct nvif_object caps;
#define NV50_DISP_SYNC(c, o) ((c) * 0x040 + (o))
#define NV50_DISP_CORE_NTFY NV50_DISP_SYNC(0 , 0x00)
@@ -78,6 +79,10 @@ void nv50_dmac_destroy(struct nv50_dmac *);
u32 *evo_wait(struct nv50_dmac *, int nr);
void evo_kick(u32 *, struct nv50_dmac *);
+extern const u64 disp50xx_modifiers[];
+extern const u64 disp90xx_modifiers[];
+extern const u64 wndwc57e_modifiers[];
+
#define evo_mthd(p, m, s) do { \
const u32 _m = (m), _s = (s); \
if (drm_debug_enabled(DRM_UT_KMS)) \
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index 00011ce109a6..4a9a32b89f74 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -168,14 +168,15 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
struct nv50_head_mode *m = &asyh->mode;
u32 *push;
- if ((push = evo_wait(core, 12))) {
+ if ((push = evo_wait(core, 13))) {
evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
evo_data(push, (m->v.active << 16) | m->h.active );
evo_data(push, (m->v.synce << 16) | m->h.synce );
evo_data(push, (m->v.blanke << 16) | m->h.blanke );
evo_data(push, (m->v.blanks << 16) | m->h.blanks );
evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
- evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+ evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
+ evo_data(push, m->interlace);
evo_data(push, m->clock * 1000);
evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
evo_data(push, m->clock * 1000);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 938d910a1b1e..859131a8bc3c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -173,14 +173,15 @@ headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
struct nv50_head_mode *m = &asyh->mode;
u32 *push;
- if ((push = evo_wait(core, 12))) {
+ if ((push = evo_wait(core, 13))) {
evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
evo_data(push, (m->v.active << 16) | m->h.active );
evo_data(push, (m->v.synce << 16) | m->h.synce );
evo_data(push, (m->v.blanke << 16) | m->h.blanke );
evo_data(push, (m->v.blanks << 16) | m->h.blanks );
evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
- evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+ evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
+ evo_data(push, m->interlace);
evo_data(push, m->clock * 1000);
evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
evo_data(push, m->clock * 1000);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
index d2bac6a341dc..45d8ce7d2c28 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -38,7 +38,15 @@ pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+pior507d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp,
+ int or)
+{
+ outp->caps.dp_interlace = true;
+}
+
const struct nv50_outp_func
pior507d = {
.ctrl = pior507d_ctrl,
+ .get_caps = pior507d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
index 5222fe6a9b21..9a59fa7da00d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -38,7 +38,14 @@ sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+sor507d_get_caps(struct nv50_disp *core, struct nouveau_encoder *outp, int or)
+{
+ outp->caps.dp_interlace = true;
+}
+
const struct nv50_outp_func
sor507d = {
.ctrl = sor507d_ctrl,
+ .get_caps = sor507d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
index b0314ec11fb3..9577ccf1c809 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -21,6 +21,7 @@
*/
#include "core.h"
+#include <nouveau_bo.h>
#include <nvif/class.h>
static void
@@ -35,7 +36,17 @@ sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+sor907d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+ const int off = or * 2;
+ u32 tmp = nouveau_bo_rd32(disp->sync, 0x000014 + off);
+
+ outp->caps.dp_interlace = !!(tmp & 0x04000000);
+}
+
const struct nv50_outp_func
sor907d = {
.ctrl = sor907d_ctrl,
+ .get_caps = sor907d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
index dff059241c5d..c86ca955fdcd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -33,7 +33,16 @@ sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+sorc37d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+ u32 tmp = nvif_rd32(&disp->caps, 0x000144 + (or * 8));
+
+ outp->caps.dp_interlace = !!(tmp & 0x04000000);
+}
+
const struct nv50_outp_func
sorc37d = {
.ctrl = sorc37d_ctrl,
+ .get_caps = sorc37d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index bb737f9281e6..e25ead56052c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -29,6 +29,7 @@
#include <drm/drm_fourcc.h>
#include "nouveau_bo.h"
+#include "nouveau_gem.h"
static void
nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
@@ -39,12 +40,13 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
}
static struct nv50_wndw_ctxdma *
-nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
{
- struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
struct nv50_wndw_ctxdma *ctxdma;
- const u8 kind = fb->nvbo->kind;
- const u32 handle = 0xfb000000 | kind;
+ u32 handle;
+ u32 unused;
+ u8 kind;
struct {
struct nv_dma_v0 base;
union {
@@ -56,6 +58,9 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
u32 argc = sizeof(args.base);
int ret;
+ nouveau_framebuffer_get_layout(fb, &unused, &kind);
+ handle = 0xfb000000 | kind;
+
list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
if (ctxdma->object.handle == handle)
return ctxdma;
@@ -234,16 +239,20 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+ struct drm_framebuffer *fb = asyw->state.fb;
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ uint8_t kind;
+ uint32_t tile_mode;
int ret;
NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
- if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
- asyw->image.w = fb->base.width;
- asyw->image.h = fb->base.height;
- asyw->image.kind = fb->nvbo->kind;
+ if (fb != armw->state.fb || !armw->visible || modeset) {
+ nouveau_framebuffer_get_layout(fb, &tile_mode, &kind);
+
+ asyw->image.w = fb->width;
+ asyw->image.h = fb->height;
+ asyw->image.kind = kind;
ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
if (ret) {
@@ -255,16 +264,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
if (asyw->image.kind) {
asyw->image.layout = 0;
if (drm->client.device.info.chipset >= 0xc0)
- asyw->image.blockh = fb->nvbo->mode >> 4;
+ asyw->image.blockh = tile_mode >> 4;
else
- asyw->image.blockh = fb->nvbo->mode;
- asyw->image.blocks[0] = fb->base.pitches[0] / 64;
+ asyw->image.blockh = tile_mode;
+ asyw->image.blocks[0] = fb->pitches[0] / 64;
asyw->image.pitch[0] = 0;
} else {
asyw->image.layout = 1;
asyw->image.blockh = 0;
asyw->image.blocks[0] = 0;
- asyw->image.pitch[0] = fb->base.pitches[0];
+ asyw->image.pitch[0] = fb->pitches[0];
}
if (!asyh->state.async_flip)
@@ -471,47 +480,50 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
static void
nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nouveau_bo *nvbo;
NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
if (!old_state->fb)
return;
- nouveau_bo_unpin(fb->nvbo);
+ nvbo = nouveau_gem_object(old_state->fb->obj[0]);
+ nouveau_bo_unpin(nvbo);
}
static int
nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+ struct drm_framebuffer *fb = state->fb;
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nv50_wndw *wndw = nv50_wndw(plane);
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nouveau_bo *nvbo;
struct nv50_head_atom *asyh;
struct nv50_wndw_ctxdma *ctxdma;
int ret;
- NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+ NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
if (!asyw->state.fb)
return 0;
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
if (ret)
return ret;
if (wndw->ctxdma.parent) {
ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
if (IS_ERR(ctxdma)) {
- nouveau_bo_unpin(fb->nvbo);
+ nouveau_bo_unpin(nvbo);
return PTR_ERR(ctxdma);
}
asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
- asyw->image.offset[0] = fb->nvbo->bo.offset;
+ asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
+ asyw->image.offset[0] = nvbo->bo.offset;
if (wndw->func->prepare) {
asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
@@ -603,6 +615,29 @@ nv50_wndw_destroy(struct drm_plane *plane)
kfree(wndw);
}
+/* This function assumes the format has already been validated against the plane
+ * and the modifier was validated against the device-wides modifier list at FB
+ * creation time.
+ */
+static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ uint8_t i;
+
+ if (drm->client.device.info.chipset < 0xc0) {
+ const struct drm_format_info *info = drm_format_info(format);
+ const uint8_t kind = (modifier >> 12) & 0xff;
+
+ if (!format) return false;
+
+ for (i = 0; i < info->num_planes; i++)
+ if ((info->cpp[i] != 4) && kind != 0x70) return false;
+ }
+
+ return true;
+}
+
const struct drm_plane_funcs
nv50_wndw = {
.update_plane = drm_atomic_helper_update_plane,
@@ -611,6 +646,7 @@ nv50_wndw = {
.reset = nv50_wndw_reset,
.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+ .format_mod_supported = nv50_plane_format_mod_supported,
};
static int
@@ -658,7 +694,8 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
for (nformat = 0; format[nformat]; nformat++);
ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
- format, nformat, NULL,
+ format, nformat,
+ nouveau_display(dev)->format_modifiers,
type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 35c9c52fab26..1d64741595ba 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -173,6 +173,23 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
return true;
}
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 wndwc57e_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static const struct nv50_wndw_func
wndwc57e = {
.acquire = wndwc37e_acquire,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 30659747ffe8..2c79beb41126 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -89,6 +89,8 @@
#define GV100_DISP /* cl5070.h */ 0x0000c370
#define TU102_DISP /* cl5070.h */ 0x0000c570
+#define GV100_DISP_CAPS 0x0000c373
+
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 1218f28c14ba..76288c682e9e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -24,6 +24,8 @@ struct nvkm_subdev_func {
};
extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
+int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *,
+ int index, struct nvkm_subdev **);
void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
int index, struct nvkm_subdev *);
void nvkm_subdev_del(struct nvkm_subdev **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index fe3a10255c36..69a84d0197d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -49,7 +49,6 @@ static struct nouveau_dsm_priv {
bool optimus_flags_detected;
bool optimus_skip_dsm;
acpi_handle dhandle;
- acpi_handle rom_handle;
} nouveau_dsm_priv;
bool nouveau_is_optimus(void) {
@@ -212,37 +211,6 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = {
.get_client_id = nouveau_dsm_get_client_id,
};
-/*
- * Firmware supporting Windows 8 or later do not use _DSM to put the device into
- * D3cold, they instead rely on disabling power resources on the parent.
- */
-static bool nouveau_pr3_present(struct pci_dev *pdev)
-{
- struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
- struct acpi_device *parent_adev;
-
- if (!parent_pdev)
- return false;
-
- if (!parent_pdev->bridge_d3) {
- /*
- * Parent PCI bridge is currently not power managed.
- * Since userspace can change these afterwards to be on
- * the safe side we stick with _DSM and prevent usage of
- * _PR3 from the bridge.
- */
- pci_d3cold_disable(pdev);
- return false;
- }
-
- parent_adev = ACPI_COMPANION(&parent_pdev->dev);
- if (!parent_adev)
- return false;
-
- return parent_adev->power.flags.power_resources &&
- acpi_has_method(parent_adev->handle, "_PR3");
-}
-
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
bool *has_mux, bool *has_opt,
bool *has_opt_flags, bool *has_pr3)
@@ -250,6 +218,16 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
acpi_handle dhandle;
bool supports_mux;
int optimus_funcs;
+ struct pci_dev *parent_pdev;
+
+ *has_pr3 = false;
+ parent_pdev = pci_upstream_bridge(pdev);
+ if (parent_pdev) {
+ if (parent_pdev->bridge_d3)
+ *has_pr3 = pci_pr3_present(parent_pdev);
+ else
+ pci_d3cold_disable(pdev);
+ }
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
@@ -270,7 +248,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
*has_mux = supports_mux;
*has_opt = !!optimus_funcs;
*has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS);
- *has_pr3 = false;
if (optimus_funcs) {
uint32_t result;
@@ -280,8 +257,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
(result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
(result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
(result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
-
- *has_pr3 = nouveau_pr3_present(pdev);
}
}
@@ -385,59 +360,6 @@ void nouveau_unregister_dsm_handler(void) {}
void nouveau_switcheroo_optimus_dsm(void) {}
#endif
-/* retrieve the ROM in 4k blocks */
-static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
- int offset, int len)
-{
- acpi_status status;
- union acpi_object rom_arg_elements[2], *obj;
- struct acpi_object_list rom_arg;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-
- rom_arg.count = 2;
- rom_arg.pointer = &rom_arg_elements[0];
-
- rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
- rom_arg_elements[0].integer.value = offset;
-
- rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
- rom_arg_elements[1].integer.value = len;
-
- status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
- if (ACPI_FAILURE(status)) {
- pr_info("failed to evaluate ROM got %s\n",
- acpi_format_exception(status));
- return -ENODEV;
- }
- obj = (union acpi_object *)buffer.pointer;
- len = min(len, (int)obj->buffer.length);
- memcpy(bios+offset, obj->buffer.pointer, len);
- kfree(buffer.pointer);
- return len;
-}
-
-bool nouveau_acpi_rom_supported(struct device *dev)
-{
- acpi_status status;
- acpi_handle dhandle, rom_handle;
-
- dhandle = ACPI_HANDLE(dev);
- if (!dhandle)
- return false;
-
- status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
- if (ACPI_FAILURE(status))
- return false;
-
- nouveau_dsm_priv.rom_handle = rom_handle;
- return true;
-}
-
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
- return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
-}
-
void *
nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 1e6e8a8c0455..330f9b837066 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -10,8 +10,6 @@ bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct device *);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline bool nouveau_is_optimus(void) { return false; };
@@ -19,8 +17,6 @@ static inline bool nouveau_is_v1_dsm(void) { return false; };
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
-static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 9a9a7f5003d3..1b383ae0248f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -38,6 +38,7 @@
#include "nouveau_reg.h"
#include "nouveau_drv.h"
#include "dispnv04/hw.h"
+#include "dispnv50/disp.h"
#include "nouveau_acpi.h"
#include "nouveau_display.h"
@@ -509,7 +510,11 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
nv_connector->detected_encoder = nv_encoder;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- connector->interlace_allowed = true;
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ connector->interlace_allowed =
+ nv_encoder->caps.dp_interlace;
+ else
+ connector->interlace_allowed = true;
connector->doublescan_allowed = true;
} else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
@@ -1029,6 +1034,29 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
return 112000 * duallink_scale;
}
+enum drm_mode_status
+nouveau_conn_mode_clock_valid(const struct drm_display_mode *mode,
+ const unsigned min_clock,
+ const unsigned max_clock,
+ unsigned int *clock_out)
+{
+ unsigned int clock = mode->clock;
+
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
+ DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+ if (clock < min_clock)
+ return MODE_CLOCK_LOW;
+ if (clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ if (clock_out)
+ *clock_out = clock;
+
+ return MODE_OK;
+}
+
static enum drm_mode_status
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -1037,7 +1065,6 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
unsigned min_clock = 25000, max_clock = min_clock;
- unsigned clock = mode->clock;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_LVDS:
@@ -1060,25 +1087,14 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
case DCB_OUTPUT_TV:
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case DCB_OUTPUT_DP:
- max_clock = nv_encoder->dp.link_nr;
- max_clock *= nv_encoder->dp.link_bw;
- clock = clock * (connector->display_info.bpc * 3) / 10;
- break;
+ return nv50_dp_mode_valid(connector, nv_encoder, mode, NULL);
default:
BUG();
return MODE_BAD;
}
- if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
- clock *= 2;
-
- if (clock < min_clock)
- return MODE_CLOCK_LOW;
-
- if (clock > max_clock)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
+ return nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+ NULL);
}
static struct drm_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index de84fb4708c7..9e062c7adec8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -195,6 +195,11 @@ int nouveau_conn_atomic_get_property(struct drm_connector *,
const struct drm_connector_state *,
struct drm_property *, u64 *);
struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
+enum drm_mode_status
+nouveau_conn_mode_clock_valid(const struct drm_display_mode *,
+ const unsigned min_clock,
+ const unsigned max_clock,
+ unsigned *clock);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
extern int nouveau_backlight_init(struct drm_connector *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 15a3d40edf02..63b5c8cf9ae4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -181,8 +181,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
}
ret = pm_runtime_get_sync(drm->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(drm->dev);
return ret;
+ }
+
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
pm_runtime_put_autosuspend(drm->dev);
if (ret < 0)
@@ -217,7 +220,7 @@ static const struct nouveau_debugfs_files {
{"pstate", &nouveau_pstate_fops},
};
-int
+void
nouveau_drm_debugfs_init(struct drm_minor *minor)
{
struct nouveau_drm *drm = nouveau_drm(minor->dev);
@@ -240,12 +243,10 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
*/
dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
if (!dentry)
- return 0;
+ return;
d_inode(dentry)->i_size = drm->vbios.length;
dput(dentry);
-
- return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index 8909c010e8ea..77f0323b38ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -18,15 +18,13 @@ nouveau_debugfs(struct drm_device *dev)
return nouveau_drm(dev)->debugfs;
}
-extern int nouveau_drm_debugfs_init(struct drm_minor *);
+extern void nouveau_drm_debugfs_init(struct drm_minor *);
extern int nouveau_debugfs_init(struct nouveau_drm *);
extern void nouveau_debugfs_fini(struct nouveau_drm *);
#else
-static inline int
+static inline void
nouveau_drm_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
+{}
static inline int
nouveau_debugfs_init(struct nouveau_drm *drm)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 700817dc4fa0..496c4621cc78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -31,6 +31,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -179,41 +180,164 @@ nouveau_display_vblank_init(struct drm_device *dev)
return 0;
}
+static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
static void
-nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
+nouveau_decode_mod(struct nouveau_drm *drm,
+ uint64_t modifier,
+ uint32_t *tile_mode,
+ uint8_t *kind)
+{
+ BUG_ON(!tile_mode || !kind);
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ /* tile_mode will not be used in this case */
+ *tile_mode = 0;
+ *kind = 0;
+ } else {
+ /*
+ * Extract the block height and kind from the corresponding
+ * modifier fields. See drm_fourcc.h for details.
+ */
+ *tile_mode = (uint32_t)(modifier & 0xF);
+ *kind = (uint8_t)((modifier >> 12) & 0xFF);
+
+ if (drm->client.device.info.chipset >= 0xc0)
+ *tile_mode <<= 4;
+ }
+}
+
+void
+nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
+ uint32_t *tile_mode,
+ uint8_t *kind)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ if (fb->flags & DRM_MODE_FB_MODIFIERS) {
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
- if (fb->nvbo)
- drm_gem_object_put_unlocked(&fb->nvbo->bo.base);
+ nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
+ } else {
+ const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
- drm_framebuffer_cleanup(drm_fb);
- kfree(fb);
+ *tile_mode = nvbo->mode;
+ *kind = nvbo->kind;
+ }
}
static int
-nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
- struct drm_file *file_priv,
- unsigned int *handle)
+nouveau_validate_decode_mod(struct nouveau_drm *drm,
+ uint64_t modifier,
+ uint32_t *tile_mode,
+ uint8_t *kind)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct nouveau_display *disp = nouveau_display(drm->dev);
+ int mod;
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ return -EINVAL;
+ }
- return drm_gem_handle_create(file_priv, &fb->nvbo->bo.base, handle);
+ BUG_ON(!disp->format_modifiers);
+
+ for (mod = 0;
+ (disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
+ (disp->format_modifiers[mod] != modifier);
+ mod++);
+
+ if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
+ return -EINVAL;
+
+ nouveau_decode_mod(drm, modifier, tile_mode, kind);
+
+ return 0;
}
-static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
- .destroy = nouveau_user_framebuffer_destroy,
- .create_handle = nouveau_user_framebuffer_create_handle,
-};
+static inline uint32_t
+nouveau_get_width_in_blocks(uint32_t stride)
+{
+ /* GOBs per block in the x direction is always one, and GOBs are
+ * 64 bytes wide
+ */
+ static const uint32_t log_block_width = 6;
+
+ return (stride + (1 << log_block_width) - 1) >> log_block_width;
+}
+
+static inline uint32_t
+nouveau_get_height_in_blocks(struct nouveau_drm *drm,
+ uint32_t height,
+ uint32_t log_block_height_in_gobs)
+{
+ uint32_t log_gob_height;
+ uint32_t log_block_height;
+
+ BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ log_gob_height = 2;
+ else
+ log_gob_height = 3;
+
+ log_block_height = log_block_height_in_gobs + log_gob_height;
+
+ return (height + (1 << log_block_height) - 1) >> log_block_height;
+}
+
+static int
+nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
+ uint32_t offset, uint32_t stride, uint32_t h,
+ uint32_t tile_mode)
+{
+ uint32_t gob_size, bw, bh;
+ uint64_t bl_size;
+
+ BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
+
+ if (drm->client.device.info.chipset >= 0xc0) {
+ if (tile_mode & 0xF)
+ return -EINVAL;
+ tile_mode >>= 4;
+ }
+
+ if (tile_mode & 0xFFFFFFF0)
+ return -EINVAL;
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ gob_size = 256;
+ else
+ gob_size = 512;
+
+ bw = nouveau_get_width_in_blocks(stride);
+ bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
+
+ bl_size = bw * bh * (1 << tile_mode) * gob_size;
+
+ DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+ offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
+ nvbo->bo.mem.size);
+
+ if (bl_size + offset > nvbo->bo.mem.size)
+ return -ERANGE;
+
+ return 0;
+}
int
nouveau_framebuffer_new(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct nouveau_bo *nvbo,
- struct nouveau_framebuffer **pfb)
+ struct drm_gem_object *gem,
+ struct drm_framebuffer **pfb)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_framebuffer *fb;
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct drm_framebuffer *fb;
+ const struct drm_format_info *info;
+ unsigned int width, height, i;
+ uint32_t tile_mode;
+ uint8_t kind;
int ret;
/* YUV overlays have special requirements pre-NV50 */
@@ -236,13 +360,50 @@ nouveau_framebuffer_new(struct drm_device *dev,
return -EINVAL;
}
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
+ &tile_mode, &kind)) {
+ DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
+ mode_cmd->modifier[0]);
+ return -EINVAL;
+ }
+ } else {
+ tile_mode = nvbo->mode;
+ kind = nvbo->kind;
+ }
+
+ info = drm_get_format_info(dev, mode_cmd);
+
+ for (i = 0; i < info->num_planes; i++) {
+ width = drm_format_info_plane_width(info,
+ mode_cmd->width,
+ i);
+ height = drm_format_info_plane_height(info,
+ mode_cmd->height,
+ i);
+
+ if (kind) {
+ ret = nouveau_check_bl_size(drm, nvbo,
+ mode_cmd->offsets[i],
+ mode_cmd->pitches[i],
+ height, tile_mode);
+ if (ret)
+ return ret;
+ } else {
+ uint32_t size = mode_cmd->pitches[i] * height;
+
+ if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+ return -ERANGE;
+ }
+ }
+
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
- fb->nvbo = nvbo;
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ fb->obj[0] = gem;
- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret)
kfree(fb);
return ret;
@@ -253,19 +414,17 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct nouveau_framebuffer *fb;
- struct nouveau_bo *nvbo;
+ struct drm_framebuffer *fb;
struct drm_gem_object *gem;
int ret;
gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
- nvbo = nouveau_gem_object(gem);
- ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
+ ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
if (ret == 0)
- return &fb->base;
+ return fb;
drm_gem_object_put_unlocked(gem);
return ERR_PTR(ret);
@@ -517,6 +676,7 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
+ dev->mode_config.allow_fb_modifiers = true;
if (drm->client.device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index de004018ab5c..6e0d900441d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -8,26 +8,11 @@
#include <drm/drm_framebuffer.h>
-struct nouveau_framebuffer {
- struct drm_framebuffer base;
- struct nouveau_bo *nvbo;
- struct nouveau_vma *vma;
- u32 r_handle;
- u32 r_format;
- u32 r_pitch;
- struct nvif_object h_base[4];
- struct nvif_object h_core;
-};
-
-static inline struct nouveau_framebuffer *
-nouveau_framebuffer(struct drm_framebuffer *fb)
-{
- return container_of(fb, struct nouveau_framebuffer, base);
-}
-
-int nouveau_framebuffer_new(struct drm_device *,
- const struct drm_mode_fb_cmd2 *,
- struct nouveau_bo *, struct nouveau_framebuffer **);
+int
+nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *gem,
+ struct drm_framebuffer **pfb);
struct nouveau_display {
void *priv;
@@ -47,6 +32,8 @@ struct nouveau_display {
struct drm_property *color_vibrance_property;
struct drm_atomic_state *suspend;
+
+ const u64 *format_modifiers;
};
static inline struct nouveau_display *
@@ -75,6 +62,10 @@ int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+void
+nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, uint32_t *tile_mode,
+ uint8_t *kind);
+
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
const struct drm_mode_fb_cmd2 *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index ad89e09a0be3..e5c230d9ae24 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -25,12 +25,14 @@
#include "nouveau_dma.h"
#include "nouveau_mem.h"
#include "nouveau_bo.h"
+#include "nouveau_svm.h"
#include <nvif/class.h>
#include <nvif/object.h>
#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
+#include <nvif/if000c.h>
#include <linux/sched/mm.h>
#include <linux/hmm.h>
@@ -54,66 +56,69 @@ enum nouveau_aper {
typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper, u64 dst_addr,
enum nouveau_aper, u64 src_addr);
+typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
+ enum nouveau_aper, u64 dst_addr);
struct nouveau_dmem_chunk {
struct list_head list;
struct nouveau_bo *bo;
struct nouveau_drm *drm;
- unsigned long pfn_first;
unsigned long callocated;
- unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
- spinlock_t lock;
+ struct dev_pagemap pagemap;
};
struct nouveau_dmem_migrate {
nouveau_migrate_copy_t copy_func;
+ nouveau_clear_page_t clear_func;
struct nouveau_channel *chan;
};
struct nouveau_dmem {
struct nouveau_drm *drm;
- struct dev_pagemap pagemap;
struct nouveau_dmem_migrate migrate;
- struct list_head chunk_free;
- struct list_head chunk_full;
- struct list_head chunk_empty;
+ struct list_head chunks;
struct mutex mutex;
+ struct page *free_pages;
+ spinlock_t lock;
};
-static inline struct nouveau_dmem *page_to_dmem(struct page *page)
+static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
{
- return container_of(page->pgmap, struct nouveau_dmem, pagemap);
+ return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
+}
+
+static struct nouveau_drm *page_to_drm(struct page *page)
+{
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+
+ return chunk->drm;
}
-static unsigned long nouveau_dmem_page_addr(struct page *page)
+unsigned long nouveau_dmem_page_addr(struct page *page)
{
- struct nouveau_dmem_chunk *chunk = page->zone_device_data;
- unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+ unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
+ chunk->pagemap.res.start;
- return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
+ return chunk->bo->bo.offset + off;
}
static void nouveau_dmem_page_free(struct page *page)
{
- struct nouveau_dmem_chunk *chunk = page->zone_device_data;
- unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+ struct nouveau_dmem *dmem = chunk->drm->dmem;
+
+ spin_lock(&dmem->lock);
+ page->zone_device_data = dmem->free_pages;
+ dmem->free_pages = page;
- /*
- * FIXME:
- *
- * This is really a bad example, we need to overhaul nouveau memory
- * management to be more page focus and allow lighter locking scheme
- * to be use in the process.
- */
- spin_lock(&chunk->lock);
- clear_bit(idx, chunk->bitmap);
WARN_ON(!chunk->callocated);
chunk->callocated--;
/*
* FIXME when chunk->callocated reach 0 we should add the chunk to
* a reclaim list so that it can be freed in case of memory pressure.
*/
- spin_unlock(&chunk->lock);
+ spin_unlock(&dmem->lock);
}
static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
@@ -165,8 +170,8 @@ error_free_page:
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
{
- struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
- struct nouveau_drm *drm = dmem->drm;
+ struct nouveau_drm *drm = page_to_drm(vmf->page);
+ struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
unsigned long src = 0, dst = 0;
dma_addr_t dma_addr = 0;
@@ -209,131 +214,105 @@ static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
};
static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
{
struct nouveau_dmem_chunk *chunk;
+ struct resource *res;
+ struct page *page;
+ void *ptr;
+ unsigned long i, pfn_first;
int ret;
- if (drm->dmem == NULL)
- return -EINVAL;
-
- mutex_lock(&drm->dmem->mutex);
- chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
- struct nouveau_dmem_chunk,
- list);
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (chunk == NULL) {
- mutex_unlock(&drm->dmem->mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- list_del(&chunk->list);
- mutex_unlock(&drm->dmem->mutex);
+ /* Allocate unused physical address space for device private pages. */
+ res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
+ "nouveau_dmem");
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ goto out_free;
+ }
+
+ chunk->drm = drm;
+ chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ chunk->pagemap.res = *res;
+ chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
+ chunk->pagemap.owner = drm->dev;
ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
&chunk->bo);
if (ret)
- goto out;
+ goto out_release;
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
- if (ret) {
- nouveau_bo_ref(NULL, &chunk->bo);
- goto out;
- }
+ if (ret)
+ goto out_bo_free;
- bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
- spin_lock_init(&chunk->lock);
+ ptr = memremap_pages(&chunk->pagemap, numa_node_id());
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ goto out_bo_unpin;
+ }
-out:
mutex_lock(&drm->dmem->mutex);
- if (chunk->bo)
- list_add(&chunk->list, &drm->dmem->chunk_empty);
- else
- list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+ list_add(&chunk->list, &drm->dmem->chunks);
mutex_unlock(&drm->dmem->mutex);
- return ret;
-}
-
-static struct nouveau_dmem_chunk *
-nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
-{
- struct nouveau_dmem_chunk *chunk;
-
- chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
- struct nouveau_dmem_chunk,
- list);
- if (chunk)
- return chunk;
-
- chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
- struct nouveau_dmem_chunk,
- list);
- if (chunk->bo)
- return chunk;
-
- return NULL;
-}
-
-static int
-nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
- unsigned long npages,
- unsigned long *pages)
-{
- struct nouveau_dmem_chunk *chunk;
- unsigned long c;
- int ret;
-
- memset(pages, 0xff, npages * sizeof(*pages));
-
- mutex_lock(&drm->dmem->mutex);
- for (c = 0; c < npages;) {
- unsigned long i;
-
- chunk = nouveau_dmem_chunk_first_free_locked(drm);
- if (chunk == NULL) {
- mutex_unlock(&drm->dmem->mutex);
- ret = nouveau_dmem_chunk_alloc(drm);
- if (ret) {
- if (c)
- return 0;
- return ret;
- }
- mutex_lock(&drm->dmem->mutex);
- continue;
- }
-
- spin_lock(&chunk->lock);
- i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
- while (i < DMEM_CHUNK_NPAGES && c < npages) {
- pages[c] = chunk->pfn_first + i;
- set_bit(i, chunk->bitmap);
- chunk->callocated++;
- c++;
-
- i = find_next_zero_bit(chunk->bitmap,
- DMEM_CHUNK_NPAGES, i);
- }
- spin_unlock(&chunk->lock);
+ pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
+ page = pfn_to_page(pfn_first);
+ spin_lock(&drm->dmem->lock);
+ for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
+ page->zone_device_data = drm->dmem->free_pages;
+ drm->dmem->free_pages = page;
}
- mutex_unlock(&drm->dmem->mutex);
+ *ppage = page;
+ chunk->callocated++;
+ spin_unlock(&drm->dmem->lock);
+
+ NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
+ DMEM_CHUNK_SIZE >> 20);
return 0;
+
+out_bo_unpin:
+ nouveau_bo_unpin(chunk->bo);
+out_bo_free:
+ nouveau_bo_ref(NULL, &chunk->bo);
+out_release:
+ release_mem_region(chunk->pagemap.res.start,
+ resource_size(&chunk->pagemap.res));
+out_free:
+ kfree(chunk);
+out:
+ return ret;
}
static struct page *
nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
{
- unsigned long pfns[1];
- struct page *page;
+ struct nouveau_dmem_chunk *chunk;
+ struct page *page = NULL;
int ret;
- /* FIXME stop all the miss-match API ... */
- ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
- if (ret)
- return NULL;
+ spin_lock(&drm->dmem->lock);
+ if (drm->dmem->free_pages) {
+ page = drm->dmem->free_pages;
+ drm->dmem->free_pages = page->zone_device_data;
+ chunk = nouveau_page_to_chunk(page);
+ chunk->callocated++;
+ spin_unlock(&drm->dmem->lock);
+ } else {
+ spin_unlock(&drm->dmem->lock);
+ ret = nouveau_dmem_chunk_alloc(drm, &page);
+ if (ret)
+ return NULL;
+ }
- page = pfn_to_page(pfns[0]);
get_page(page);
lock_page(page);
return page;
@@ -356,12 +335,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
return;
mutex_lock(&drm->dmem->mutex);
- list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
- /* FIXME handle pin failure */
- WARN_ON(ret);
- }
- list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ list_for_each_entry(chunk, &drm->dmem->chunks, list) {
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
/* FIXME handle pin failure */
WARN_ON(ret);
@@ -378,12 +352,8 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
return;
mutex_lock(&drm->dmem->mutex);
- list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
- nouveau_bo_unpin(chunk->bo);
- }
- list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ list_for_each_entry(chunk, &drm->dmem->chunks, list)
nouveau_bo_unpin(chunk->bo);
- }
mutex_unlock(&drm->dmem->mutex);
}
@@ -397,15 +367,13 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex);
- WARN_ON(!list_empty(&drm->dmem->chunk_free));
- WARN_ON(!list_empty(&drm->dmem->chunk_full));
-
- list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
- if (chunk->bo) {
- nouveau_bo_unpin(chunk->bo);
- nouveau_bo_ref(NULL, &chunk->bo);
- }
+ list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
+ nouveau_bo_unpin(chunk->bo);
+ nouveau_bo_ref(NULL, &chunk->bo);
list_del(&chunk->list);
+ memunmap_pages(&chunk->pagemap);
+ release_mem_region(chunk->pagemap.res.start,
+ resource_size(&chunk->pagemap.res));
kfree(chunk);
}
@@ -472,6 +440,52 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
}
static int
+nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
+ enum nouveau_aper dst_aper, u64 dst_addr)
+{
+ struct nouveau_channel *chan = drm->dmem->migrate.chan;
+ u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
+ (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
+ (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
+ (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
+ (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
+ u32 remap = (4 << 0) /* DST_X_CONST_A */ |
+ (5 << 4) /* DST_Y_CONST_B */ |
+ (3 << 16) /* COMPONENT_SIZE_FOUR */ |
+ (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
+ int ret;
+
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ switch (dst_aper) {
+ case NOUVEAU_APER_VRAM:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
+ break;
+ case NOUVEAU_APER_HOST:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
+
+ BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, remap);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
+ OUT_RING(chan, upper_32_bits(dst_addr));
+ OUT_RING(chan, lower_32_bits(dst_addr));
+ BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
+ OUT_RING(chan, length >> 3);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING(chan, launch_dma);
+ return 0;
+}
+
+static int
nouveau_dmem_migrate_init(struct nouveau_drm *drm)
{
switch (drm->ttm.copy.oclass) {
@@ -480,6 +494,7 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
case VOLTA_DMA_COPY_A:
case TURING_DMA_COPY_A:
drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
+ drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
drm->dmem->migrate.chan = drm->ttm.chan;
return 0;
default:
@@ -491,9 +506,6 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
void
nouveau_dmem_init(struct nouveau_drm *drm)
{
- struct device *device = drm->dev->dev;
- struct resource *res;
- unsigned long i, size, pfn_first;
int ret;
/* This only make sense on PASCAL or newer */
@@ -505,84 +517,53 @@ nouveau_dmem_init(struct nouveau_drm *drm)
drm->dmem->drm = drm;
mutex_init(&drm->dmem->mutex);
- INIT_LIST_HEAD(&drm->dmem->chunk_free);
- INIT_LIST_HEAD(&drm->dmem->chunk_full);
- INIT_LIST_HEAD(&drm->dmem->chunk_empty);
-
- size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
+ INIT_LIST_HEAD(&drm->dmem->chunks);
+ mutex_init(&drm->dmem->mutex);
+ spin_lock_init(&drm->dmem->lock);
/* Initialize migration dma helpers before registering memory */
ret = nouveau_dmem_migrate_init(drm);
- if (ret)
- goto out_free;
-
- /*
- * FIXME we need some kind of policy to decide how much VRAM we
- * want to register with HMM. For now just register everything
- * and latter if we want to do thing like over commit then we
- * could revisit this.
- */
- res = devm_request_free_mem_region(device, &iomem_resource, size);
- if (IS_ERR(res))
- goto out_free;
- drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
- drm->dmem->pagemap.res = *res;
- drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
- drm->dmem->pagemap.owner = drm->dev;
- if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
- goto out_free;
-
- pfn_first = res->start >> PAGE_SHIFT;
- for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
- struct nouveau_dmem_chunk *chunk;
- struct page *page;
- unsigned long j;
-
- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
- if (chunk == NULL) {
- nouveau_dmem_fini(drm);
- return;
- }
-
- chunk->drm = drm;
- chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
- list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
-
- page = pfn_to_page(chunk->pfn_first);
- for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
- page->zone_device_data = chunk;
+ if (ret) {
+ kfree(drm->dmem);
+ drm->dmem = NULL;
}
-
- NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
- return;
-out_free:
- kfree(drm->dmem);
- drm->dmem = NULL;
}
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
- unsigned long src, dma_addr_t *dma_addr)
+ unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
+ unsigned long paddr;
spage = migrate_pfn_to_page(src);
- if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+ if (!(src & MIGRATE_PFN_MIGRATE))
goto out;
dpage = nouveau_dmem_page_alloc_locked(drm);
if (!dpage)
- return 0;
-
- *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, *dma_addr))
- goto out_free_page;
+ goto out;
- if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
- nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
- *dma_addr))
- goto out_dma_unmap;
+ paddr = nouveau_dmem_page_addr(dpage);
+ if (spage) {
+ *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, *dma_addr))
+ goto out_free_page;
+ if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+ NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
+ goto out_dma_unmap;
+ } else {
+ *dma_addr = DMA_MAPPING_ERROR;
+ if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
+ NOUVEAU_APER_VRAM, paddr))
+ goto out_free_page;
+ }
+ *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
+ ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
+ if (src & MIGRATE_PFN_WRITE)
+ *pfn |= NVIF_VMM_PFNMAP_V0_W;
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
out_dma_unmap:
@@ -590,19 +571,21 @@ out_dma_unmap:
out_free_page:
nouveau_dmem_page_free_locked(drm, dpage);
out:
+ *pfn = NVIF_VMM_PFNMAP_V0_NONE;
return 0;
}
static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
- struct migrate_vma *args, dma_addr_t *dma_addrs)
+ struct nouveau_svmm *svmm, struct migrate_vma *args,
+ dma_addr_t *dma_addrs, u64 *pfns)
{
struct nouveau_fence *fence;
unsigned long addr = args->start, nr_dma = 0, i;
for (i = 0; addr < args->end; i++) {
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
- dma_addrs + nr_dma);
- if (args->dst[i])
+ dma_addrs + nr_dma, pfns + i);
+ if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
nr_dma++;
addr += PAGE_SIZE;
}
@@ -610,20 +593,18 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
+ nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
while (nr_dma--) {
dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
DMA_BIDIRECTIONAL);
}
- /*
- * FIXME optimization: update GPU page table to point to newly migrated
- * memory.
- */
migrate_vma_finalize(args);
}
int
nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct nouveau_svmm *svmm,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
@@ -635,9 +616,13 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
.vma = vma,
.start = start,
};
- unsigned long c, i;
+ unsigned long i;
+ u64 *pfns;
int ret = -ENOMEM;
+ if (drm->dmem == NULL)
+ return -ENODEV;
+
args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
if (!args.src)
goto out;
@@ -649,19 +634,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
if (!dma_addrs)
goto out_free_dst;
- for (i = 0; i < npages; i += c) {
- c = min(SG_MAX_SINGLE_ALLOC, npages);
- args.end = start + (c << PAGE_SHIFT);
+ pfns = nouveau_pfns_alloc(max);
+ if (!pfns)
+ goto out_free_dma;
+
+ for (i = 0; i < npages; i += max) {
+ args.end = start + (max << PAGE_SHIFT);
ret = migrate_vma_setup(&args);
if (ret)
- goto out_free_dma;
+ goto out_free_pfns;
if (args.cpages)
- nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
+ nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
+ pfns);
args.start = args.end;
}
ret = 0;
+out_free_pfns:
+ nouveau_pfns_free(pfns);
out_free_dma:
kfree(dma_addrs);
out_free_dst:
@@ -671,28 +662,3 @@ out_free_src:
out:
return ret;
}
-
-void
-nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range)
-{
- unsigned long i, npages;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- struct page *page;
- uint64_t addr;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- if (!is_device_private_page(page))
- continue;
-
- addr = nouveau_dmem_page_addr(page);
- range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
- range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
- range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
index 92394be5d649..64da5d3635c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -25,6 +25,7 @@
struct drm_device;
struct drm_file;
struct nouveau_drm;
+struct nouveau_svmm;
struct hmm_range;
#if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
@@ -34,12 +35,12 @@ void nouveau_dmem_suspend(struct nouveau_drm *);
void nouveau_dmem_resume(struct nouveau_drm *);
int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct nouveau_svmm *svmm,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
+unsigned long nouveau_dmem_page_addr(struct page *page);
-void nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_dmem_init(struct nouveau_drm *drm) {}
static inline void nouveau_dmem_fini(struct nouveau_drm *drm) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 2674f1587457..8a0f7994e1ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -98,3 +98,34 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
return NOUVEAU_DP_SST;
return ret;
}
+
+/* TODO:
+ * - Use the minimum possible BPC here, once we add support for the max bpc
+ * property.
+ * - Validate the mode against downstream port caps (see
+ * drm_dp_downstream_max_clock())
+ * - Validate against the DP caps advertised by the GPU (we don't check these
+ * yet)
+ */
+enum drm_mode_status
+nv50_dp_mode_valid(struct drm_connector *connector,
+ struct nouveau_encoder *outp,
+ const struct drm_display_mode *mode,
+ unsigned *out_clock)
+{
+ const unsigned min_clock = 25000;
+ unsigned max_clock, clock;
+ enum drm_mode_status ret;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
+ return MODE_NO_INTERLACE;
+
+ max_clock = outp->dp.link_nr * outp->dp.link_bw;
+ clock = mode->clock * (connector->display_info.bpc * 3) / 10;
+
+ ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+ &clock);
+ if (out_clock)
+ *out_clock = clock;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ca4087f5a15b..ac93d12201dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -681,8 +681,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
{
struct nvkm_device *device;
struct drm_device *drm_dev;
- struct apertures_struct *aper;
- bool boot = false;
int ret;
if (vga_switcheroo_client_probe_defer(pdev))
@@ -699,32 +697,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
nvkm_device_del(&device);
/* Remove conflicting drivers (vesafb, efifb etc). */
- aper = alloc_apertures(3);
- if (!aper)
- return -ENOMEM;
-
- aper->ranges[0].base = pci_resource_start(pdev, 1);
- aper->ranges[0].size = pci_resource_len(pdev, 1);
- aper->count = 1;
-
- if (pci_resource_len(pdev, 2)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
- aper->count++;
- }
-
- if (pci_resource_len(pdev, 3)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
- aper->count++;
- }
-
-#ifdef CONFIG_X86
- boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- if (nouveau_modeset != 2)
- drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
- kfree(aper);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
+ if (ret)
+ return ret;
ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
true, true, ~0ULL, &device);
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 3517f920bf89..de51733b0476 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -66,6 +66,10 @@ struct nouveau_encoder {
} dp;
};
+ struct {
+ bool dp_interlace : 1;
+ } caps;
+
void (*enc_save)(struct drm_encoder *encoder);
void (*enc_restore)(struct drm_encoder *encoder);
void (*update)(struct nouveau_encoder *, u8 head,
@@ -100,6 +104,10 @@ enum nouveau_dp_status {
};
int nouveau_dp_detect(struct nouveau_encoder *);
+enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
+ struct nouveau_encoder *,
+ const struct drm_display_mode *,
+ unsigned *clock);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 24d543a01f43..3d11b84d4cf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -312,7 +312,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct fb_info *info;
- struct nouveau_framebuffer *fb;
+ struct drm_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
@@ -335,7 +335,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
goto out;
}
- ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
+ ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
if (ret)
goto out_unref;
@@ -353,7 +353,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_vma_new(nvbo, chan->vmm, &fb->vma);
+ ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -367,7 +367,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
}
/* setup helper */
- fbcon->helper.fb = &fb->base;
+ fbcon->helper.fb = fb;
if (!chan)
info->flags = FBINFO_HWACCEL_DISABLED;
@@ -376,12 +376,12 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
- fb->nvbo->bo.mem.bus.offset;
- info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->fix.smem_start = nvbo->bo.mem.bus.base +
+ nvbo->bo.mem.bus.offset;
+ info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
- info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
+ info->screen_size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
@@ -393,19 +393,19 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* To allow resizeing without swapping buffers */
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
- fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
+ fb->width, fb->height, nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unlock:
if (chan)
- nouveau_vma_del(&fb->vma);
- nouveau_bo_unmap(fb->nvbo);
+ nouveau_vma_del(&fbcon->vma);
+ nouveau_bo_unmap(nvbo);
out_unpin:
- nouveau_bo_unpin(fb->nvbo);
+ nouveau_bo_unpin(nvbo);
out_unref:
- nouveau_bo_ref(NULL, &fb->nvbo);
+ nouveau_bo_ref(NULL, &nvbo);
out:
return ret;
}
@@ -413,16 +413,18 @@ out:
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fbcon->helper.fb);
+ struct drm_framebuffer *fb = fbcon->helper.fb;
+ struct nouveau_bo *nvbo;
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_fini(&fbcon->helper);
- if (nouveau_fb && nouveau_fb->nvbo) {
- nouveau_vma_del(&nouveau_fb->vma);
- nouveau_bo_unmap(nouveau_fb->nvbo);
- nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_framebuffer_put(&nouveau_fb->base);
+ if (fb && fb->obj[0]) {
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ nouveau_vma_del(&fbcon->vma);
+ nouveau_bo_unmap(nvbo);
+ nouveau_bo_unpin(nvbo);
+ drm_framebuffer_put(fb);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 73a7eeba3973..1796d8824580 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -31,6 +31,8 @@
#include "nouveau_display.h"
+struct nouveau_vma;
+
struct nouveau_fbdev {
struct drm_fb_helper helper; /* must be first */
unsigned int saved_flags;
@@ -41,6 +43,7 @@ struct nouveau_fbdev {
struct nvif_object gdi;
struct nvif_object blit;
struct nvif_object twod;
+ struct nouveau_vma *vma;
struct mutex hotplug_lock;
bool hotplug_waiting;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f5ece1f94973..4c3f131ad31d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -76,8 +76,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
return ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev);
goto out;
+ }
ret = nouveau_vma_new(nvbo, vmm, &vma);
pm_runtime_mark_last_busy(dev);
@@ -157,8 +159,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
nouveau_gem_object_unmap(nvbo, vma);
pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
}
+ pm_runtime_put_autosuspend(dev);
}
}
ttm_bo_unreserve(&nvbo->bo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 039e23548e08..23cd43a7fd19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -95,14 +95,3 @@ struct platform_driver nouveau_platform_driver = {
.probe = nouveau_platform_probe,
.remove = nouveau_platform_remove,
};
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
-MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 645fedd77e21..22f054f7ee3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -70,6 +70,12 @@ struct nouveau_svm {
#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
+struct nouveau_pfnmap_args {
+ struct nvif_ioctl_v0 i;
+ struct nvif_ioctl_mthd_v0 m;
+ struct nvif_vmm_pfnmap_v0 p;
+};
+
struct nouveau_ivmm {
struct nouveau_svmm *svmm;
u64 inst;
@@ -187,7 +193,8 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
addr = max(addr, vma->vm_start);
next = min(vma->vm_end, end);
/* This is a best effort so we ignore errors */
- nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
+ nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
+ next);
addr = next;
}
@@ -369,19 +376,6 @@ out_free:
return ret;
}
-static const u64
-nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
- [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
- [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
-};
-
-static const u64
-nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
- [HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V,
- [HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE,
- [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
-};
-
/* Issue fault replay for GPU to retry accesses that faulted previously. */
static void
nouveau_svm_fault_replay(struct nouveau_svm *svm)
@@ -519,9 +513,45 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
.invalidate = nouveau_svm_range_invalidate,
};
+static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
+ struct hmm_range *range, u64 *ioctl_addr)
+{
+ unsigned long i, npages;
+
+ /*
+ * The ioctl_addr prepared here is passed through nvif_object_ioctl()
+ * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
+ *
+ * This is all just encoding the internal hmm representation into a
+ * different nouveau internal representation.
+ */
+ npages = (range->end - range->start) >> PAGE_SHIFT;
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
+ ioctl_addr[i] = 0;
+ continue;
+ }
+
+ page = hmm_pfn_to_page(range->hmm_pfns[i]);
+ if (is_device_private_page(page))
+ ioctl_addr[i] = nouveau_dmem_page_addr(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_VRAM;
+ else
+ ioctl_addr[i] = page_to_phys(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_HOST;
+ if (range->hmm_pfns[i] & HMM_PFN_WRITE)
+ ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W;
+ }
+}
+
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm, void *data, u32 size,
- u64 *pfns, struct svm_notifier *notifier)
+ unsigned long hmm_pfns[], u64 *ioctl_addr,
+ struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -530,26 +560,27 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.notifier = &notifier->notifier,
.start = notifier->notifier.interval_tree.start,
.end = notifier->notifier.interval_tree.last + 1,
- .pfns = pfns,
- .flags = nouveau_svm_pfn_flags,
- .values = nouveau_svm_pfn_values,
- .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
+ .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
+ .hmm_pfns = hmm_pfns,
};
struct mm_struct *mm = notifier->notifier.mm;
- long ret;
+ int ret;
while (true) {
if (time_after(jiffies, timeout))
return -EBUSY;
range.notifier_seq = mmu_interval_read_begin(range.notifier);
- range.default_flags = 0;
- range.pfn_flags_mask = -1UL;
down_read(&mm->mmap_sem);
ret = hmm_range_fault(&range);
up_read(&mm->mmap_sem);
- if (ret <= 0) {
- if (ret == 0 || ret == -EBUSY)
+ if (ret) {
+ /*
+ * FIXME: the input PFN_REQ flags are destroyed on
+ * -EBUSY, we need to regenerate them, also for the
+ * other continue below
+ */
+ if (ret == -EBUSY)
continue;
return ret;
}
@@ -563,7 +594,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
break;
}
- nouveau_dmem_convert_pfn(drm, &range);
+ nouveau_hmm_convert_pfn(drm, &range, ioctl_addr);
svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
@@ -590,6 +621,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
} i;
u64 phys[16];
} args;
+ unsigned long hmm_pfns[ARRAY_SIZE(args.phys)];
struct vm_area_struct *vma;
u64 inst, start, limit;
int fi, fn, pi, fill;
@@ -705,12 +737,17 @@ nouveau_svm_fault(struct nvif_notify *notify)
* access flags.
*XXX: atomic?
*/
- if (buffer->fault[fn]->access != 0 /* READ. */ &&
- buffer->fault[fn]->access != 3 /* PREFETCH. */) {
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_W;
- } else {
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
+ switch (buffer->fault[fn]->access) {
+ case 0: /* READ. */
+ hmm_pfns[pi++] = HMM_PFN_REQ_FAULT;
+ break;
+ case 3: /* PREFETCH. */
+ hmm_pfns[pi++] = 0;
+ break;
+ default:
+ hmm_pfns[pi++] = HMM_PFN_REQ_FAULT |
+ HMM_PFN_REQ_WRITE;
+ break;
}
args.i.p.size = pi << PAGE_SHIFT;
@@ -738,7 +775,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
fill = (buffer->fault[fn ]->addr -
buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
while (--fill)
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
+ hmm_pfns[pi++] = 0;
}
SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
@@ -754,7 +791,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
ret = nouveau_range_fault(
svmm, svm->drm, &args,
sizeof(args.i) + pi * sizeof(args.phys[0]),
- args.phys, &notifier);
+ hmm_pfns, args.phys, &notifier);
mmu_interval_notifier_remove(&notifier.notifier);
}
mmput(mm);
@@ -784,6 +821,56 @@ nouveau_svm_fault(struct nvif_notify *notify)
return NVIF_NOTIFY_KEEP;
}
+static struct nouveau_pfnmap_args *
+nouveau_pfns_to_args(void *pfns)
+{
+ return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
+}
+
+u64 *
+nouveau_pfns_alloc(unsigned long npages)
+{
+ struct nouveau_pfnmap_args *args;
+
+ args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
+ if (!args)
+ return NULL;
+
+ args->i.type = NVIF_IOCTL_V0_MTHD;
+ args->m.method = NVIF_VMM_V0_PFNMAP;
+ args->p.page = PAGE_SHIFT;
+
+ return args->p.phys;
+}
+
+void
+nouveau_pfns_free(u64 *pfns)
+{
+ struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+
+ kfree(args);
+}
+
+void
+nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
+ unsigned long addr, u64 *pfns, unsigned long npages)
+{
+ struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+ int ret;
+
+ args->p.addr = addr;
+ args->p.size = npages << PAGE_SHIFT;
+
+ mutex_lock(&svmm->mutex);
+
+ svmm->vmm->vmm.object.client->super = true;
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
+ npages * sizeof(args->p.phys[0]), NULL);
+ svmm->vmm->vmm.object.client->super = false;
+
+ mutex_unlock(&svmm->mutex);
+}
+
static void
nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
index e839d8189461..f0fcd1b72e8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
+
+u64 *nouveau_pfns_alloc(unsigned long npages);
+void nouveau_pfns_free(u64 *pfns);
+void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
+ unsigned long addr, u64 *pfns, unsigned long npages);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index facd18564e0d..47428f79ede8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -149,7 +149,6 @@ int
nv50_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
@@ -240,8 +239,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma->addr));
- OUT_RING(chan, lower_32_bits(fb->vma->addr));
+ OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -249,8 +248,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma->addr));
- OUT_RING(chan, lower_32_bits(fb->vma->addr));
+ OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
FIRE_RING(chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index c0deef4fe727..cb56163ed608 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -150,7 +150,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->helper.dev;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
@@ -240,8 +239,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma->addr));
- OUT_RING (chan, lower_32_bits(fb->vma->addr));
+ OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
@@ -251,8 +250,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma->addr));
- OUT_RING (chan, lower_32_bits(fb->vma->addr));
+ OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
FIRE_RING (chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index 4cc186262d34..38130ef272d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -140,7 +140,7 @@ nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
{
struct nvkm_instmem *imem = device->imem;
struct nvkm_memory *memory;
- int ret = -ENOSYS;
+ int ret;
if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 79a8f9d305c5..49d468b45d3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -221,3 +221,14 @@ nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
subdev->debug = nvkm_dbgopt(device->dbgopt, name);
}
+
+int
+nvkm_subdev_new_(const struct nvkm_subdev_func *func,
+ struct nvkm_device *device, int index,
+ struct nvkm_subdev **psubdev)
+{
+ if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(func, device, index, *psubdev);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 8ebbe1656008..5b90c2a1bf3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2924,6 +2924,20 @@ nvkm_device_del(struct nvkm_device **pdevice)
}
}
+static inline bool
+nvkm_device_endianness(struct nvkm_device *device)
+{
+ u32 boot1 = nvkm_rd32(device, 0x000004) & 0x01000001;
+#ifdef __BIG_ENDIAN
+ if (!boot1)
+ return false;
+#else
+ if (boot1)
+ return false;
+#endif
+ return true;
+}
+
int
nvkm_device_ctor(const struct nvkm_device_func *func,
const struct nvkm_device_quirk *quirk,
@@ -2934,8 +2948,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
{
struct nvkm_subdev *subdev;
u64 mmio_base, mmio_size;
- u32 boot0, strap;
- void __iomem *map;
+ u32 boot0, boot1, strap;
int ret = -EEXIST, i;
unsigned chipset;
@@ -2961,26 +2974,30 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
- /* identify the chipset, and determine classes of subdev/engines */
- if (detect) {
- map = ioremap(mmio_base, 0x102000);
- if (ret = -ENOMEM, map == NULL)
+ if (detect || mmio) {
+ device->pri = ioremap(mmio_base, mmio_size);
+ if (device->pri == NULL) {
+ nvdev_error(device, "unable to map PRI\n");
+ ret = -ENOMEM;
goto done;
+ }
+ }
+ /* identify the chipset, and determine classes of subdev/engines */
+ if (detect) {
/* switch mmio to cpu's native endianness */
-#ifndef __BIG_ENDIAN
- if (ioread32_native(map + 0x000004) != 0x00000000) {
-#else
- if (ioread32_native(map + 0x000004) == 0x00000000) {
-#endif
- iowrite32_native(0x01000001, map + 0x000004);
- ioread32_native(map);
+ if (!nvkm_device_endianness(device)) {
+ nvkm_wr32(device, 0x000004, 0x01000001);
+ nvkm_rd32(device, 0x000000);
+ if (!nvkm_device_endianness(device)) {
+ nvdev_error(device,
+ "GPU not supported on big-endian\n");
+ ret = -ENOSYS;
+ goto done;
+ }
}
- /* read boot0 and strapping information */
- boot0 = ioread32_native(map + 0x000000);
- strap = ioread32_native(map + 0x101000);
- iounmap(map);
+ boot0 = nvkm_rd32(device, 0x000000);
/* chipset can be overridden for devel/testing purposes */
chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
@@ -3138,6 +3155,17 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
nvdev_info(device, "NVIDIA %s (%08x)\n",
device->chip->name, boot0);
+ /* vGPU detection */
+ boot1 = nvkm_rd32(device, 0x0000004);
+ if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
+ nvdev_info(device, "vGPUs are not supported\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ /* read strapping information */
+ strap = nvkm_rd32(device, 0x101000);
+
/* determine frequency of timing crystal */
if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
(device->chipset >= 0x20 && device->chipset < 0x25))
@@ -3158,15 +3186,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
if (!device->name)
device->name = device->chip->name;
- if (mmio) {
- device->pri = ioremap(mmio_base, mmio_size);
- if (!device->pri) {
- nvdev_error(device, "unable to map PRI\n");
- ret = -ENOMEM;
- goto done;
- }
- }
-
mutex_init(&device->mutex);
for (i = 0; i < NVKM_SUBDEV_NR; i++) {
@@ -3254,6 +3273,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
ret = 0;
done:
+ if (device->pri && (!mmio || ret)) {
+ iounmap(device->pri);
+ device->pri = NULL;
+ }
mutex_unlock(&nv_devices_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 0d584d0da59c..571687ba85b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -47,6 +47,7 @@ nvkm-y += nvkm/engine/disp/dp.o
nvkm-y += nvkm/engine/disp/hdagt215.o
nvkm-y += nvkm/engine/disp/hdagf119.o
+nvkm-y += nvkm/engine/disp/hdagv100.o
nvkm-y += nvkm/engine/disp/hdmi.o
nvkm-y += nvkm/engine/disp/hdmig84.o
@@ -74,6 +75,8 @@ nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
nvkm-y += nvkm/engine/disp/roottu102.o
+nvkm-y += nvkm/engine/disp/capsgv100.o
+
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
nvkm-y += nvkm/engine/disp/changv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c
new file mode 100644
index 000000000000..5026e530f4bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define gv100_disp_caps(p) container_of((p), struct gv100_disp_caps, object)
+#include "rootnv50.h"
+
+struct gv100_disp_caps {
+ struct nvkm_object object;
+ struct nv50_disp *disp;
+};
+
+static int
+gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct gv100_disp_caps *caps = gv100_disp_caps(object);
+ struct nvkm_device *device = caps->disp->base.engine.subdev.device;
+ *type = NVKM_OBJECT_MAP_IO;
+ *addr = 0x640000 + device->func->resource_addr(device, 0);
+ *size = 0x1000;
+ return 0;
+}
+
+static const struct nvkm_object_func
+gv100_disp_caps = {
+ .map = gv100_disp_caps_map,
+};
+
+int
+gv100_disp_caps_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+ struct gv100_disp_caps *caps;
+
+ if (!(caps = kzalloc(sizeof(*caps), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &caps->object;
+
+ nvkm_object_ctor(&gv100_disp_caps, oclass, &caps->object);
+ caps->disp = disp;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
index 0fa0ec0a1de0..19d2d58344e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
@@ -24,10 +24,18 @@
#include "ior.h"
void
-gf119_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
+gf119_hda_device_entry(struct nvkm_ior *ior, int head)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
- const u32 soff = 0x030 * ior->id;
+ const u32 hoff = 0x800 * head;
+ nvkm_mask(device, 0x616548 + hoff, 0x00000070, head << 4);
+}
+
+void
+gf119_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
+{
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 soff = 0x030 * ior->id + (head * 0x04);
int i;
for (i = 0; i < size; i++)
@@ -41,14 +49,14 @@ void
gf119_hda_hpd(struct nvkm_ior *ior, int head, bool present)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
- const u32 hoff = 0x800 * head;
+ const u32 soff = 0x030 * ior->id + (head * 0x04);
u32 data = 0x80000000;
u32 mask = 0x80000001;
if (present) {
- nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
+ ior->func->hda.device_entry(ior, head);
data |= 0x00000001;
} else {
mask |= 0x00000002;
}
- nvkm_mask(device, 0x10ec10 + ior->id * 0x030, mask, data);
+ nvkm_mask(device, 0x10ec10 + soff, mask, data);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 4509d2ba880e..0d1b81fe1093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -24,7 +24,7 @@
#include "ior.h"
void
-gt215_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
+gt215_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 soff = ior->id * 0x800;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c
index 723af0b2dda0..57d374ecfeef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2020 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,26 +18,13 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
*/
+#include "ior.h"
-#include "dml_common_defs.h"
-#include "dcn_calc_math.h"
-
-#include "dml_inline_defs.h"
-
-double dml_round(double a)
+void
+gv100_hda_device_entry(struct nvkm_ior *ior, int head)
{
- double round_pt = 0.5;
- double ceil = dml_ceil(a, 1);
- double floor = dml_floor(a, 1);
-
- if (a - floor >= round_pt)
- return ceil;
- else
- return floor;
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 hoff = 0x800 * head;
+ nvkm_mask(device, 0x616528 + hoff, 0x00000070, head << 4);
}
-
-
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 009d3a8b7a50..c1d7a36e4d3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -87,7 +87,8 @@ struct nvkm_ior_func {
struct {
void (*hpd)(struct nvkm_ior *, int head, bool present);
- void (*eld)(struct nvkm_ior *, u8 *data, u8 size);
+ void (*eld)(struct nvkm_ior *, int head, u8 *data, u8 size);
+ void (*device_entry)(struct nvkm_ior *, int head);
} hda;
};
@@ -158,10 +159,13 @@ void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gm200_hdmi_scdc(struct nvkm_ior *, int, u8);
void gt215_hda_hpd(struct nvkm_ior *, int, bool);
-void gt215_hda_eld(struct nvkm_ior *, u8 *, u8);
+void gt215_hda_eld(struct nvkm_ior *, int, u8 *, u8);
void gf119_hda_hpd(struct nvkm_ior *, int, bool);
-void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
+void gf119_hda_eld(struct nvkm_ior *, int, u8 *, u8);
+void gf119_hda_device_entry(struct nvkm_ior *, int);
+
+void gv100_hda_device_entry(struct nvkm_ior *, int);
#define IOR_MSG(i,l,f,a...) do { \
struct nvkm_ior *_ior = (i); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
index 9c658d632d37..47efb48d769a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_root_func
gv100_disp_root = {
.user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{0,0,GV100_DISP_CURSOR }, gv100_disp_curs_new },
{{0,0,GV100_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
{{0,0,GV100_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 5f758948d6e1..a7672ef17d3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -155,7 +155,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp.audio(ior, hidx, true);
ior->func->hda.hpd(ior, hidx, true);
- ior->func->hda.eld(ior, data, size);
+ ior->func->hda.eld(ior, hidx, data, size);
} else {
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp.audio(ior, hidx, false);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index a1f942793f98..7070f5408d92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -24,6 +24,9 @@ int nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
+int gv100_disp_caps_new(const struct nvkm_oclass *, void *, u32,
+ struct nv50_disp *, struct nvkm_object **);
+
extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
extern const struct nvkm_disp_oclass g84_disp_root_oclass;
extern const struct nvkm_disp_oclass g94_disp_root_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
index 579a5d02308a..d8719d38b98a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_root_func
tu102_disp_root = {
.user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{0,0,TU102_DISP_CURSOR }, gv100_disp_curs_new },
{{0,0,TU102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
{{0,0,TU102_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 456a5a143522..3b3643fb1019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -177,6 +177,7 @@ gf119_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
index b94090edaebf..0c0925680790 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
@@ -43,6 +43,7 @@ gk104_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index e6965dec09c9..38045c92197f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -57,6 +57,7 @@ gm107_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 384f82652bec..cf2075db742a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -115,6 +115,7 @@ gm200_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
index b0597ff9a714..d11a0dff10c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -103,6 +103,7 @@ gv100_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index 4d5f3791ea7b..fa6d74251237 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -88,6 +88,7 @@ tu102_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 4209b24a46d7..ec330d791d15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -319,6 +319,17 @@ gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
return 0;
}
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
+MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
+#endif
+
static int
gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
index 8eb2a930a9b5..e4866a02e457 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -250,6 +250,11 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
list_add_tail(&lsf->head, &acr->lsf);
}
+ /* Ensure the falcon that'll provide ACR functions is booted first. */
+ lsf = nvkm_acr_falcon(device);
+ if (lsf)
+ list_move(&lsf->head, &acr->lsf);
+
if (!acr->wpr_fw || acr->wpr_comp)
wpr_size = acr->func->wpr_layout(acr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
index aecce2dac558..667fa016496e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
@@ -100,25 +100,21 @@ nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver,
hsfw->data_size = lhdr->data_size;
hsfw->sig.prod.size = fwhdr->sig_prod_size;
- hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL);
+ hsfw->sig.prod.data = kmemdup(fw->data + fwhdr->sig_prod_offset + sig,
+ hsfw->sig.prod.size, GFP_KERNEL);
if (!hsfw->sig.prod.data) {
ret = -ENOMEM;
goto done;
}
- memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig,
- hsfw->sig.prod.size);
-
hsfw->sig.dbg.size = fwhdr->sig_dbg_size;
- hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL);
+ hsfw->sig.dbg.data = kmemdup(fw->data + fwhdr->sig_dbg_offset + sig,
+ hsfw->sig.dbg.size, GFP_KERNEL);
if (!hsfw->sig.dbg.data) {
ret = -ENOMEM;
goto done;
}
- memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig,
- hsfw->sig.dbg.size);
-
hsfw->sig.patch_loc = loc;
done:
nvkm_firmware_put(fw);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index 06572f8ce914..f9c427559538 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -22,22 +22,39 @@
*/
#include "priv.h"
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct device *);
-#else
-static inline bool
-nouveau_acpi_rom_supported(struct device *dev)
+static int
+acpi_read_bios(acpi_handle rom_handle, u8 *bios, u32 offset, u32 length)
{
- return false;
-}
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_status status;
+ union acpi_object rom_arg_elements[2], *obj;
+ struct acpi_object_list rom_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-static inline int
-nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
+ rom_arg.count = 2;
+ rom_arg.pointer = &rom_arg_elements[0];
+
+ rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[0].integer.value = offset;
+
+ rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[1].integer.value = length;
+
+ status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_info("failed to evaluate ROM got %s\n",
+ acpi_format_exception(status));
+ return -ENODEV;
+ }
+ obj = (union acpi_object *)buffer.pointer;
+ length = min(length, obj->buffer.length);
+ memcpy(bios+offset, obj->buffer.pointer, length);
+ kfree(buffer.pointer);
+ return length;
+#else
return -EINVAL;
-}
#endif
+}
/* This version of the shadow function disobeys the ACPI spec and tries
* to fetch in units of more than 4KiB at a time. This is a LOT faster
@@ -51,7 +68,7 @@ acpi_read_fast(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
u32 fetch = limit - start;
if (nvbios_extend(bios, limit) >= 0) {
- int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
+ int ret = acpi_read_bios(data, bios->data, start, fetch);
if (ret == fetch)
return fetch;
}
@@ -73,9 +90,8 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
if (nvbios_extend(bios, limit) >= 0) {
while (start + fetch < limit) {
- int ret = nouveau_acpi_get_bios_chunk(bios->data,
- start + fetch,
- 0x1000);
+ int ret = acpi_read_bios(data, bios->data,
+ start + fetch, 0x1000);
if (ret != 0x1000)
break;
fetch += 0x1000;
@@ -88,9 +104,22 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
static void *
acpi_init(struct nvkm_bios *bios, const char *name)
{
- if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_status status;
+ acpi_handle dhandle, rom_handle;
+
+ dhandle = ACPI_HANDLE(bios->subdev.device->dev);
+ if (!dhandle)
return ERR_PTR(-ENODEV);
- return NULL;
+
+ status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENODEV);
+
+ return rom_handle;
+#else
+ return ERR_PTR(-ENODEV);
+#endif
}
const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index d80dbc8f09b2..2340040942c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -114,9 +114,5 @@ int
gf100_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gf100_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gf100_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
index 3905a80da811..1124dadac145 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
@@ -43,9 +43,5 @@ int
gf117_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gf117_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gf117_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 9025ed1bd2a9..f3915f85838e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -117,9 +117,5 @@ int
gk104_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gk104_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gk104_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 1a4ab825852c..187d544378b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -81,9 +81,5 @@ int
gk20a_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gk20a_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gk20a_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
index c63328152bfa..0f1f0ad6377e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
@@ -32,9 +32,5 @@ int
gm200_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gm200_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gm200_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
index 39db90aa2c80..0347b367cefe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
@@ -51,9 +51,5 @@ int
gp10b_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gp10b_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gp10b_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 41640e0584ac..199f94e15c5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -580,7 +580,7 @@ nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
it.pte[it.lvl]++;
}
}
- };
+ }
nvkm_vmm_flush(&it);
return ~0ULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 5e55ecbd8005..d3f8f916d0db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -304,7 +304,7 @@ int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \
PTEI += _ptes; \
PTEN -= _ptes; \
- }; \
+ } \
nvkm_done((PT)->memory); \
} while(0)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 03b355dabab3..abf3eda683f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -36,8 +36,8 @@ probe_monitoring_device(struct nvkm_i2c_bus *bus,
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
- client = i2c_new_device(&bus->i2c, info);
- if (!client)
+ client = i2c_new_client_device(&bus->i2c, info);
+ if (IS_ERR(client))
return false;
if (!client->dev.driver ||
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index dbb90f2d2ccd..6639ee9b05d3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -3137,33 +3137,12 @@ static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
dispc_write_reg(dispc, DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(dispc, DISPC_TIMING_V(channel), timing_v);
- if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
- vs = false;
- else
- vs = true;
-
- if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
- hs = false;
- else
- hs = true;
-
- if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
- de = false;
- else
- de = true;
-
- if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
- ipc = false;
- else
- ipc = true;
-
- /* always use the 'rf' setting */
- onoff = true;
-
- if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
- rf = true;
- else
- rf = false;
+ vs = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ hs = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ de = !!(vm->flags & DISPLAY_FLAGS_DE_LOW);
+ ipc = !!(vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE);
+ onoff = true; /* always use the 'rf' setting */
+ rf = !!(vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE);
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 766553bb2f87..9701843ccf09 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -208,49 +208,6 @@ static const struct venc_config venc_config_ntsc_trm = {
.gen_ctrl = 0x00F90000,
};
-static const struct venc_config venc_config_pal_bdghi = {
- .f_control = 0,
- .vidout_ctrl = 0,
- .sync_ctrl = 0,
- .hfltr_ctrl = 0,
- .x_color = 0,
- .line21 = 0,
- .ln_sel = 21,
- .htrigger_vtrigger = 0,
- .tvdetgp_int_start_stop_x = 0x00140001,
- .tvdetgp_int_start_stop_y = 0x00010001,
- .gen_ctrl = 0x00FB0000,
-
- .llen = 864-1,
- .flens = 625-1,
- .cc_carr_wss_carr = 0x2F7625ED,
- .c_phase = 0xDF,
- .gain_u = 0x111,
- .gain_v = 0x181,
- .gain_y = 0x140,
- .black_level = 0x3e,
- .blank_level = 0x3e,
- .m_control = 0<<2 | 1<<1,
- .bstamp_wss_data = 0x42,
- .s_carr = 0x2a098acb,
- .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0,
- .savid__eavid = 0x06A70108,
- .flen__fal = 23<<16 | 624<<0,
- .lal__phase_reset = 2<<17 | 310<<0,
- .hs_int_start_stop_x = 0x00920358,
- .hs_ext_start_stop_x = 0x000F035F,
- .vs_int_start_x = 0x1a7<<16,
- .vs_int_stop_x__vs_int_start_y = 0x000601A7,
- .vs_int_stop_y__vs_ext_start_x = 0x01AF0036,
- .vs_ext_stop_x__vs_ext_start_y = 0x27101af,
- .vs_ext_stop_y = 0x05,
- .avid_start_stop_x = 0x03530082,
- .avid_start_stop_y = 0x0270002E,
- .fid_int_start_x__fid_int_start_y = 0x0005008A,
- .fid_int_offset_y__fid_ext_start_x = 0x002E0138,
- .fid_ext_start_y__fid_ext_offset_y = 0x01380005,
-};
-
enum venc_videomode {
VENC_MODE_UNKNOWN,
VENC_MODE_PAL,
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 34dfb33145b4..b57fbe8a0ac2 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -80,31 +80,16 @@ static struct drm_info_list omap_dmm_debugfs_list[] = {
{"tiler_map", tiler_map_show, 0},
};
-int omap_debugfs_init(struct drm_minor *minor)
+void omap_debugfs_init(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(omap_debugfs_list,
- ARRAY_SIZE(omap_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install omap_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list),
+ minor->debugfs_root, minor);
if (dmm_is_available())
- ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
- ARRAY_SIZE(omap_dmm_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
- return ret;
- }
-
- return ret;
+ drm_debugfs_create_files(omap_dmm_debugfs_list,
+ ARRAY_SIZE(omap_dmm_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 7c4b66efcaa7..8a1fac680138 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -82,6 +82,6 @@ struct omap_drm_private {
};
-int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_init(struct drm_minor *minor);
#endif /* __OMAPDRM_DRV_H__ */
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index a1723c1b5fbf..39055c1f0e2f 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,6 +18,16 @@ config DRM_PANEL_ARM_VERSATILE
reference designs. The panel is detected using special registers
in the Versatile family syscon registers.
+config DRM_PANEL_ASUS_Z00T_TM5P5_NT35596
+ tristate "ASUS Z00T TM5P5 NT35596 panel"
+ depends on GPIOLIB && OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the ASUS TMP5P5
+ NT35596 1080x1920 video mode panel as found in some Asus
+ Zenfone 2 Laser Z00T devices.
+
config DRM_PANEL_BOE_HIMAX8279D
tristate "Boe Himax8279d panel"
depends on OF
@@ -137,6 +147,17 @@ config DRM_PANEL_KINGDISPLAY_KD097D04
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_LEADTEK_LTK050H3146W
+ tristate "Leadtek LTK050H3146W panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Leadtek LTK050H3146W
+ TFT-LCD modules. The panel has a 720x1280 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
config DRM_PANEL_LEADTEK_LTK500HD1829
tristate "Leadtek LTK500HD1829 panel"
depends on OF
@@ -433,6 +454,14 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+config DRM_PANEL_VISIONOX_RM69299
+ tristate "Visionox RM69299"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Visionox
+ RM69299 DSI Video Mode panel.
+
config DRM_PANEL_XINPENG_XPP055C272
tristate "Xinpeng XPP055C272 panel driver"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 96a883cd6630..de74f282c433 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
+obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
+obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
@@ -46,4 +48,5 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
new file mode 100644
index 000000000000..39e0f0373f3c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct tm5p5_nt35596 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ bool prepared;
+};
+
+static inline struct tm5p5_nt35596 *to_tm5p5_nt35596(struct drm_panel *panel)
+{
+ return container_of(panel, struct tm5p5_nt35596, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void tm5p5_nt35596_reset(struct tm5p5_nt35596 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(15000, 16000);
+}
+
+static int tm5p5_nt35596_on(struct tm5p5_nt35596 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+
+ dsi_generic_write_seq(dsi, 0xff, 0x05);
+ dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ dsi_generic_write_seq(dsi, 0xc5, 0x31);
+ dsi_generic_write_seq(dsi, 0xff, 0x04);
+ dsi_generic_write_seq(dsi, 0x01, 0x84);
+ dsi_generic_write_seq(dsi, 0x05, 0x25);
+ dsi_generic_write_seq(dsi, 0x06, 0x01);
+ dsi_generic_write_seq(dsi, 0x07, 0x20);
+ dsi_generic_write_seq(dsi, 0x08, 0x06);
+ dsi_generic_write_seq(dsi, 0x09, 0x08);
+ dsi_generic_write_seq(dsi, 0x0a, 0x10);
+ dsi_generic_write_seq(dsi, 0x0b, 0x10);
+ dsi_generic_write_seq(dsi, 0x0c, 0x10);
+ dsi_generic_write_seq(dsi, 0x0d, 0x14);
+ dsi_generic_write_seq(dsi, 0x0e, 0x14);
+ dsi_generic_write_seq(dsi, 0x0f, 0x14);
+ dsi_generic_write_seq(dsi, 0x10, 0x14);
+ dsi_generic_write_seq(dsi, 0x11, 0x14);
+ dsi_generic_write_seq(dsi, 0x12, 0x14);
+ dsi_generic_write_seq(dsi, 0x17, 0xf3);
+ dsi_generic_write_seq(dsi, 0x18, 0xc0);
+ dsi_generic_write_seq(dsi, 0x19, 0xc0);
+ dsi_generic_write_seq(dsi, 0x1a, 0xc0);
+ dsi_generic_write_seq(dsi, 0x1b, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1c, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1d, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1e, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1f, 0xb3);
+ dsi_generic_write_seq(dsi, 0x20, 0xb3);
+ dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ dsi_generic_write_seq(dsi, 0xff, 0x00);
+ dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ dsi_generic_write_seq(dsi, 0x35, 0x01);
+ dsi_generic_write_seq(dsi, 0xd3, 0x06);
+ dsi_generic_write_seq(dsi, 0xd4, 0x04);
+ dsi_generic_write_seq(dsi, 0x5e, 0x0d);
+ dsi_generic_write_seq(dsi, 0x11, 0x00);
+ msleep(100);
+ dsi_generic_write_seq(dsi, 0x29, 0x00);
+ dsi_generic_write_seq(dsi, 0x53, 0x24);
+
+ return 0;
+}
+
+static int tm5p5_nt35596_off(struct tm5p5_nt35596 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(60);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ dsi_dcs_write_seq(dsi, 0x4f, 0x01);
+
+ return 0;
+}
+
+static int tm5p5_nt35596_prepare(struct drm_panel *panel)
+{
+ struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ tm5p5_nt35596_reset(ctx);
+
+ ret = tm5p5_nt35596_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int tm5p5_nt35596_unprepare(struct drm_panel *panel)
+{
+ struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = tm5p5_nt35596_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode tm5p5_nt35596_mode = {
+ .clock = (1080 + 100 + 8 + 16) * (1920 + 4 + 2 + 4) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 100,
+ .hsync_end = 1080 + 100 + 8,
+ .htotal = 1080 + 100 + 8 + 16,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 4,
+ .vsync_end = 1920 + 4 + 2,
+ .vtotal = 1920 + 4 + 2 + 4,
+ .vrefresh = 60,
+ .width_mm = 68,
+ .height_mm = 121,
+};
+
+static int tm5p5_nt35596_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &tm5p5_nt35596_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs tm5p5_nt35596_panel_funcs = {
+ .prepare = tm5p5_nt35596_prepare,
+ .unprepare = tm5p5_nt35596_unprepare,
+ .get_modes = tm5p5_nt35596_get_modes,
+};
+
+static int tm5p5_nt35596_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = bl->props.brightness;
+ int ret;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int tm5p5_nt35596_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = bl->props.brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness & 0xff;
+}
+
+static const struct backlight_ops tm5p5_nt35596_bl_ops = {
+ .update_status = tm5p5_nt35596_bl_update_status,
+ .get_brightness = tm5p5_nt35596_bl_get_brightness,
+};
+
+static struct backlight_device *
+tm5p5_nt35596_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &tm5p5_nt35596_bl_ops, &props);
+}
+
+static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct tm5p5_nt35596 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vdd";
+ ctx->supplies[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+ return ret;
+ }
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight)) {
+ ret = PTR_ERR(ctx->panel.backlight);
+ dev_err(dev, "Failed to create backlight: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add panel: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
+{
+ struct tm5p5_nt35596 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev,
+ "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id tm5p5_nt35596_of_match[] = {
+ { .compatible = "asus,z00t-tm5p5-n35596" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tm5p5_nt35596_of_match);
+
+static struct mipi_dsi_driver tm5p5_nt35596_driver = {
+ .probe = tm5p5_nt35596_probe,
+ .remove = tm5p5_nt35596_remove,
+ .driver = {
+ .name = "panel-tm5p5-nt35596",
+ .of_match_table = tm5p5_nt35596_of_match,
+ },
+};
+module_mipi_dsi_driver(tm5p5_nt35596_driver);
+
+MODULE_AUTHOR("Konrad Dybcio <konradybcio@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for tm5p5 nt35596 1080p video mode dsi panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 48a164257d18..46fe1805c588 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -696,6 +696,34 @@ static const struct panel_desc auo_b101uan08_3_desc = {
.init_cmds = auo_b101uan08_3_init_cmd,
};
+static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
+ .clock = 159916,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 60,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 20,
+ .vsync_end = 1920 + 20 + 4,
+ .vtotal = 1920 + 20 + 4 + 10,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv105wum_nw0_desc = {
+ .modes = &boe_tv105wum_nw0_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 141,
+ .height_mm = 226,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+};
+
static int boe_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -834,6 +862,9 @@ static const struct of_device_id boe_of_match[] = {
{ .compatible = "auo,b101uan08.3",
.data = &auo_b101uan08_3_desc
},
+ { .compatible = "boe,tv105wum-nw0",
+ .data = &boe_tv105wum_nw0_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_of_match);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 09935520e606..873b1c7059bd 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -379,7 +379,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
"can't set up VCOM amplitude (%d)\n", ret);
return ret;
}
- };
+ }
if (ili->vcom_high != U8_MAX) {
ret = regmap_write(ili->regmap, ILI9322_VCOM_HIGH,
@@ -388,7 +388,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
dev_err(ili->dev, "can't set up VCOM high (%d)\n", ret);
return ret;
}
- };
+ }
/* Set up gamma correction */
for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
new file mode 100644
index 000000000000..5a7a31c8513e
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct ltk050h3146w_cmd {
+ char cmd;
+ char data;
+};
+
+struct ltk050h3146w;
+struct ltk050h3146w_desc {
+ const struct drm_display_mode *mode;
+ int (*init)(struct ltk050h3146w *ctx);
+};
+
+struct ltk050h3146w {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vci;
+ struct regulator *iovcc;
+ const struct ltk050h3146w_desc *panel_desc;
+ bool prepared;
+};
+
+static const struct ltk050h3146w_cmd page1_cmds[] = {
+ { 0x22, 0x0A }, /* BGR SS GS */
+ { 0x31, 0x00 }, /* column inversion */
+ { 0x53, 0xA2 }, /* VCOM1 */
+ { 0x55, 0xA2 }, /* VCOM2 */
+ { 0x50, 0x81 }, /* VREG1OUT=5V */
+ { 0x51, 0x85 }, /* VREG2OUT=-5V */
+ { 0x62, 0x0D }, /* EQT Time setting */
+/*
+ * The vendor init selected page 1 here _again_
+ * Is this supposed to be page 2?
+ */
+ { 0xA0, 0x00 },
+ { 0xA1, 0x1A },
+ { 0xA2, 0x28 },
+ { 0xA3, 0x13 },
+ { 0xA4, 0x16 },
+ { 0xA5, 0x29 },
+ { 0xA6, 0x1D },
+ { 0xA7, 0x1E },
+ { 0xA8, 0x84 },
+ { 0xA9, 0x1C },
+ { 0xAA, 0x28 },
+ { 0xAB, 0x75 },
+ { 0xAC, 0x1A },
+ { 0xAD, 0x19 },
+ { 0xAE, 0x4D },
+ { 0xAF, 0x22 },
+ { 0xB0, 0x28 },
+ { 0xB1, 0x54 },
+ { 0xB2, 0x66 },
+ { 0xB3, 0x39 },
+ { 0xC0, 0x00 },
+ { 0xC1, 0x1A },
+ { 0xC2, 0x28 },
+ { 0xC3, 0x13 },
+ { 0xC4, 0x16 },
+ { 0xC5, 0x29 },
+ { 0xC6, 0x1D },
+ { 0xC7, 0x1E },
+ { 0xC8, 0x84 },
+ { 0xC9, 0x1C },
+ { 0xCA, 0x28 },
+ { 0xCB, 0x75 },
+ { 0xCC, 0x1A },
+ { 0xCD, 0x19 },
+ { 0xCE, 0x4D },
+ { 0xCF, 0x22 },
+ { 0xD0, 0x28 },
+ { 0xD1, 0x54 },
+ { 0xD2, 0x66 },
+ { 0xD3, 0x39 },
+};
+
+static const struct ltk050h3146w_cmd page3_cmds[] = {
+ { 0x01, 0x00 },
+ { 0x02, 0x00 },
+ { 0x03, 0x73 },
+ { 0x04, 0x00 },
+ { 0x05, 0x00 },
+ { 0x06, 0x0a },
+ { 0x07, 0x00 },
+ { 0x08, 0x00 },
+ { 0x09, 0x01 },
+ { 0x0a, 0x00 },
+ { 0x0b, 0x00 },
+ { 0x0c, 0x01 },
+ { 0x0d, 0x00 },
+ { 0x0e, 0x00 },
+ { 0x0f, 0x1d },
+ { 0x10, 0x1d },
+ { 0x11, 0x00 },
+ { 0x12, 0x00 },
+ { 0x13, 0x00 },
+ { 0x14, 0x00 },
+ { 0x15, 0x00 },
+ { 0x16, 0x00 },
+ { 0x17, 0x00 },
+ { 0x18, 0x00 },
+ { 0x19, 0x00 },
+ { 0x1a, 0x00 },
+ { 0x1b, 0x00 },
+ { 0x1c, 0x00 },
+ { 0x1d, 0x00 },
+ { 0x1e, 0x40 },
+ { 0x1f, 0x80 },
+ { 0x20, 0x06 },
+ { 0x21, 0x02 },
+ { 0x22, 0x00 },
+ { 0x23, 0x00 },
+ { 0x24, 0x00 },
+ { 0x25, 0x00 },
+ { 0x26, 0x00 },
+ { 0x27, 0x00 },
+ { 0x28, 0x33 },
+ { 0x29, 0x03 },
+ { 0x2a, 0x00 },
+ { 0x2b, 0x00 },
+ { 0x2c, 0x00 },
+ { 0x2d, 0x00 },
+ { 0x2e, 0x00 },
+ { 0x2f, 0x00 },
+ { 0x30, 0x00 },
+ { 0x31, 0x00 },
+ { 0x32, 0x00 },
+ { 0x33, 0x00 },
+ { 0x34, 0x04 },
+ { 0x35, 0x00 },
+ { 0x36, 0x00 },
+ { 0x37, 0x00 },
+ { 0x38, 0x3C },
+ { 0x39, 0x35 },
+ { 0x3A, 0x01 },
+ { 0x3B, 0x40 },
+ { 0x3C, 0x00 },
+ { 0x3D, 0x01 },
+ { 0x3E, 0x00 },
+ { 0x3F, 0x00 },
+ { 0x40, 0x00 },
+ { 0x41, 0x88 },
+ { 0x42, 0x00 },
+ { 0x43, 0x00 },
+ { 0x44, 0x1F },
+ { 0x50, 0x01 },
+ { 0x51, 0x23 },
+ { 0x52, 0x45 },
+ { 0x53, 0x67 },
+ { 0x54, 0x89 },
+ { 0x55, 0xab },
+ { 0x56, 0x01 },
+ { 0x57, 0x23 },
+ { 0x58, 0x45 },
+ { 0x59, 0x67 },
+ { 0x5a, 0x89 },
+ { 0x5b, 0xab },
+ { 0x5c, 0xcd },
+ { 0x5d, 0xef },
+ { 0x5e, 0x11 },
+ { 0x5f, 0x01 },
+ { 0x60, 0x00 },
+ { 0x61, 0x15 },
+ { 0x62, 0x14 },
+ { 0x63, 0x0E },
+ { 0x64, 0x0F },
+ { 0x65, 0x0C },
+ { 0x66, 0x0D },
+ { 0x67, 0x06 },
+ { 0x68, 0x02 },
+ { 0x69, 0x07 },
+ { 0x6a, 0x02 },
+ { 0x6b, 0x02 },
+ { 0x6c, 0x02 },
+ { 0x6d, 0x02 },
+ { 0x6e, 0x02 },
+ { 0x6f, 0x02 },
+ { 0x70, 0x02 },
+ { 0x71, 0x02 },
+ { 0x72, 0x02 },
+ { 0x73, 0x02 },
+ { 0x74, 0x02 },
+ { 0x75, 0x01 },
+ { 0x76, 0x00 },
+ { 0x77, 0x14 },
+ { 0x78, 0x15 },
+ { 0x79, 0x0E },
+ { 0x7a, 0x0F },
+ { 0x7b, 0x0C },
+ { 0x7c, 0x0D },
+ { 0x7d, 0x06 },
+ { 0x7e, 0x02 },
+ { 0x7f, 0x07 },
+ { 0x80, 0x02 },
+ { 0x81, 0x02 },
+ { 0x82, 0x02 },
+ { 0x83, 0x02 },
+ { 0x84, 0x02 },
+ { 0x85, 0x02 },
+ { 0x86, 0x02 },
+ { 0x87, 0x02 },
+ { 0x88, 0x02 },
+ { 0x89, 0x02 },
+ { 0x8A, 0x02 },
+};
+
+static const struct ltk050h3146w_cmd page4_cmds[] = {
+ { 0x70, 0x00 },
+ { 0x71, 0x00 },
+ { 0x82, 0x0F }, /* VGH_MOD clamp level=15v */
+ { 0x84, 0x0F }, /* VGH clamp level 15V */
+ { 0x85, 0x0D }, /* VGL clamp level (-10V) */
+ { 0x32, 0xAC },
+ { 0x8C, 0x80 },
+ { 0x3C, 0xF5 },
+ { 0xB5, 0x07 }, /* GAMMA OP */
+ { 0x31, 0x45 }, /* SOURCE OP */
+ { 0x3A, 0x24 }, /* PS_EN OFF */
+ { 0x88, 0x33 }, /* LVD */
+};
+
+static inline
+struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel)
+{
+ return container_of(panel, struct ltk050h3146w, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /*
+ * Init sequence was supplied by the panel vendor without much
+ * documentation.
+ */
+ dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
+ dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
+ 0x01);
+ dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
+ dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
+ dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
+
+ dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
+ dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
+ 0x28, 0x04, 0xcc, 0xcc, 0xcc);
+ dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
+ dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
+ dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
+ dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
+ dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
+ 0x80);
+ dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
+ 0x16, 0x00, 0x00);
+ dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
+ 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
+ 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
+ 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
+ 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
+ dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
+ 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
+ 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
+ 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
+ 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
+ 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
+ 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
+ dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
+ 0x21, 0x00, 0x60);
+ dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
+ dsi_dcs_write_seq(dsi, 0xde, 0x02);
+ dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
+ dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
+ dsi_dcs_write_seq(dsi, 0xc1, 0x11);
+ dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
+ dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
+ dsi_dcs_write_seq(dsi, 0xde, 0x00);
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+ ret);
+ return ret;
+ }
+
+ msleep(60);
+
+ return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 42,
+ .hsync_end = 720 + 42 + 8,
+ .htotal = 720 + 42 + 8 + 42,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 12,
+ .vsync_end = 1280 + 12 + 4,
+ .vtotal = 1280 + 12 + 4 + 18,
+ .clock = 64018,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_data = {
+ .mode = &ltk050h3146w_mode,
+ .init = ltk050h3146w_init_sequence,
+};
+
+static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ u8 d[3] = { 0x98, 0x81, page };
+
+ return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d));
+}
+
+static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page,
+ const struct ltk050h3146w_cmd *cmds,
+ int num)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int i, ret;
+
+ ret = ltk050h3146w_a2_select_page(ctx, page);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to select page %d: %d\n",
+ page, ret);
+ return ret;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = mipi_dsi_generic_write(dsi, &cmds[i],
+ sizeof(struct ltk050h3146w_cmd));
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to write page %d init cmds: %d\n",
+ page, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /*
+ * Init sequence was supplied by the panel vendor without much
+ * documentation.
+ */
+ ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds,
+ ARRAY_SIZE(page3_cmds));
+ if (ret < 0)
+ return ret;
+
+ ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds,
+ ARRAY_SIZE(page4_cmds));
+ if (ret < 0)
+ return ret;
+
+ ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds,
+ ARRAY_SIZE(page1_cmds));
+ if (ret < 0)
+ return ret;
+
+ ret = ltk050h3146w_a2_select_page(ctx, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to select page 0: %d\n", ret);
+ return ret;
+ }
+
+ /* vendor code called this without param, where there should be one */
+ ret = mipi_dsi_dcs_set_tear_on(dsi, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+ ret);
+ return ret;
+ }
+
+ msleep(60);
+
+ return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_a2_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 42,
+ .hsync_end = 720 + 42 + 10,
+ .htotal = 720 + 42 + 10 + 60,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 18,
+ .vsync_end = 1280 + 18 + 4,
+ .vtotal = 1280 + 18 + 4 + 12,
+ .clock = 65595,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_a2_data = {
+ .mode = &ltk050h3146w_a2_mode,
+ .init = ltk050h3146w_a2_init_sequence,
+};
+
+static int ltk050h3146w_unprepare(struct drm_panel *panel)
+{
+ struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ return ret;
+ }
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vci);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int ltk050h3146w_prepare(struct drm_panel *panel)
+{
+ struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vci);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vci supply: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vci;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(20);
+
+ ret = ctx->panel_desc->init(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ /* T9: 120ms */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(50);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vci:
+ regulator_disable(ctx->vci);
+ return ret;
+}
+
+static int ltk050h3146w_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ltk050h3146w_funcs = {
+ .unprepare = ltk050h3146w_unprepare,
+ .prepare = ltk050h3146w_prepare,
+ .get_modes = ltk050h3146w_get_modes,
+};
+
+static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct ltk050h3146w *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->panel_desc = of_device_get_match_data(dev);
+ if (!ctx->panel_desc)
+ return -EINVAL;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vci = devm_regulator_get(dev, "vci");
+ if (IS_ERR(ctx->vci)) {
+ ret = PTR_ERR(ctx->vci);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vci regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
+{
+ struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ltk050h3146w_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id ltk050h3146w_of_match[] = {
+ {
+ .compatible = "leadtek,ltk050h3146w",
+ .data = &ltk050h3146w_data,
+ },
+ {
+ .compatible = "leadtek,ltk050h3146w-a2",
+ .data = &ltk050h3146w_a2_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltk050h3146w_of_match);
+
+static struct mipi_dsi_driver ltk050h3146w_driver = {
+ .driver = {
+ .name = "panel-leadtek-ltk050h3146w",
+ .of_match_table = ltk050h3146w_of_match,
+ },
+ .probe = ltk050h3146w_probe,
+ .remove = ltk050h3146w_remove,
+ .shutdown = ltk050h3146w_shutdown,
+};
+module_mipi_dsi_driver(ltk050h3146w_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Leadtek LTK050H3146W MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 76ecf2de9c44..113ab9c0396b 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -377,7 +377,7 @@ static const struct drm_display_mode default_mode = {
.vsync_end = 1280 + 30 + 4,
.vtotal = 1280 + 30 + 4 + 12,
.vrefresh = 60,
- .clock = 41600,
+ .clock = 69217,
.width_mm = 62,
.height_mm = 110,
};
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index a470810f7dbe..05cae8d62d56 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -49,7 +49,8 @@ enum nt39016_regs {
#define NT39016_SYSTEM_STANDBY BIT(1)
struct nt39016_panel_info {
- struct drm_display_mode display_mode;
+ const struct drm_display_mode *display_modes;
+ unsigned int num_modes;
u16 width_mm, height_mm;
u32 bus_format, bus_flags;
};
@@ -212,15 +213,22 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
struct nt39016 *panel = to_nt39016(drm_panel);
const struct nt39016_panel_info *panel_info = panel->panel_info;
struct drm_display_mode *mode;
+ unsigned int i;
- mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode);
- if (!mode)
- return -ENOMEM;
+ for (i = 0; i < panel_info->num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel_info->display_modes[i]);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
- drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ if (panel_info->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
+ drm_mode_probed_add(connector, mode);
+ }
connector->display_info.bpc = 8;
connector->display_info.width_mm = panel_info->width_mm;
@@ -230,7 +238,7 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
&panel_info->bus_format, 1);
connector->display_info.bus_flags = panel_info->bus_flags;
- return 1;
+ return panel_info->num_modes;
}
static const struct drm_panel_funcs nt39016_funcs = {
@@ -316,8 +324,8 @@ static int nt39016_remove(struct spi_device *spi)
return 0;
}
-static const struct nt39016_panel_info kd035g6_info = {
- .display_mode = {
+static const struct drm_display_mode kd035g6_display_modes[] = {
+ { /* 60 Hz */
.clock = 6000,
.hdisplay = 320,
.hsync_start = 320 + 10,
@@ -330,6 +338,24 @@ static const struct nt39016_panel_info kd035g6_info = {
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
+ { /* 50 Hz */
+ .clock = 5400,
+ .hdisplay = 320,
+ .hsync_start = 320 + 42,
+ .hsync_end = 320 + 42 + 50,
+ .htotal = 320 + 42 + 50 + 20,
+ .vdisplay = 240,
+ .vsync_start = 240 + 5,
+ .vsync_end = 240 + 5 + 1,
+ .vtotal = 240 + 5 + 1 + 4,
+ .vrefresh = 50,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct nt39016_panel_info kd035g6_info = {
+ .display_modes = kd035g6_display_modes,
+ .num_modes = ARRAY_SIZE(kd035g6_display_modes),
.width_mm = 71,
.height_mm = 53,
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3ad828eaefe1..b6ecd1552132 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -108,6 +109,7 @@ struct panel_simple {
struct i2c_adapter *ddc;
struct gpio_desc *enable_gpio;
+ struct gpio_desc *hpd_gpio;
struct drm_display_mode override_mode;
};
@@ -259,11 +261,37 @@ static int panel_simple_unprepare(struct drm_panel *panel)
return 0;
}
+static int panel_simple_get_hpd_gpio(struct device *dev,
+ struct panel_simple *p, bool from_probe)
+{
+ int err;
+
+ p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(p->hpd_gpio)) {
+ err = PTR_ERR(p->hpd_gpio);
+
+ /*
+ * If we're called from probe we won't consider '-EPROBE_DEFER'
+ * to be an error--we'll leave the error code in "hpd_gpio".
+ * When we try to use it we'll try again. This allows for
+ * circular dependencies where the component providing the
+ * hpd gpio needs the panel to init before probing.
+ */
+ if (err != -EPROBE_DEFER || !from_probe) {
+ dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int panel_simple_prepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
unsigned int delay;
int err;
+ int hpd_asserted;
if (p->prepared)
return 0;
@@ -282,6 +310,26 @@ static int panel_simple_prepare(struct drm_panel *panel)
if (delay)
msleep(delay);
+ if (p->hpd_gpio) {
+ if (IS_ERR(p->hpd_gpio)) {
+ err = panel_simple_get_hpd_gpio(panel->dev, p, false);
+ if (err)
+ return err;
+ }
+
+ err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
+ hpd_asserted, hpd_asserted,
+ 1000, 2000000);
+ if (hpd_asserted < 0)
+ err = hpd_asserted;
+
+ if (err) {
+ dev_err(panel->dev,
+ "error waiting for hpd GPIO: %d\n", err);
+ return err;
+ }
+ }
+
p->prepared = true;
return 0;
@@ -462,6 +510,11 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->desc = desc;
panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+ if (!panel->no_hpd) {
+ err = panel_simple_get_hpd_gpio(dev, panel, true);
+ if (err)
+ return err;
+ }
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
@@ -836,7 +889,8 @@ static const struct panel_desc auo_g101evn010 = {
.width = 216,
.height = 135,
},
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_g104sn02_mode = {
@@ -862,6 +916,31 @@ static const struct panel_desc auo_g104sn02 = {
},
};
+static const struct drm_display_mode auo_g121ean01_mode = {
+ .clock = 66700,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 58,
+ .hsync_end = 1280 + 58 + 8,
+ .htotal = 1280 + 58 + 8 + 70,
+ .vdisplay = 800,
+ .vsync_start = 800 + 6,
+ .vsync_end = 800 + 6 + 4,
+ .vtotal = 800 + 6 + 4 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g121ean01 = {
+ .modes = &auo_g121ean01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 261,
+ .height = 163,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_g133han01_timings = {
.pixelclock = { 134000000, 141200000, 149000000 },
.hactive = { 1920, 1920, 1920 },
@@ -892,6 +971,31 @@ static const struct panel_desc auo_g133han01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct drm_display_mode auo_g156xtn01_mode = {
+ .clock = 76000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 33,
+ .hsync_end = 1366 + 33 + 67,
+ .htotal = 1560,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 4,
+ .vtotal = 806,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g156xtn01 = {
+ .modes = &auo_g156xtn01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 344,
+ .height = 194,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_g185han01_timings = {
.pixelclock = { 120000000, 144000000, 175000000 },
.hactive = { 1920, 1920, 1920 },
@@ -922,6 +1026,36 @@ static const struct panel_desc auo_g185han01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing auo_g190ean01_timings = {
+ .pixelclock = { 90000000, 108000000, 135000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 126, 184, 1266 },
+ .hback_porch = { 84, 122, 844 },
+ .hsync_len = { 70, 102, 704 },
+ .vactive = { 1024, 1024, 1024 },
+ .vfront_porch = { 4, 26, 76 },
+ .vback_porch = { 2, 8, 25 },
+ .vsync_len = { 2, 8, 25 },
+};
+
+static const struct panel_desc auo_g190ean01 = {
+ .timings = &auo_g190ean01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 376,
+ .height = 301,
+ },
+ .delay = {
+ .prepare = 50,
+ .enable = 200,
+ .disable = 110,
+ .unprepare = 1000,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_p320hvn03_timings = {
.pixelclock = { 106000000, 148500000, 164000000 },
.hactive = { 1920, 1920, 1920 },
@@ -1092,6 +1226,38 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+/* Also used for boe_nv133fhm_n62 */
+static const struct drm_display_mode boe_nv133fhm_n61_modes = {
+ .clock = 147840,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 6,
+ .vtotal = 1080 + 3 + 6 + 31,
+ .vrefresh = 60,
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct panel_desc boe_nv133fhm_n61 = {
+ .modes = &boe_nv133fhm_n61_modes,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
{
.clock = 148500,
@@ -1980,6 +2146,37 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
+static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
+ .clock = 138778,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 24,
+ .hsync_end = 1920 + 24 + 48,
+ .htotal = 1920 + 24 + 48 + 88,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 12,
+ .vtotal = 1080 + 3 + 12 + 17,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ivo_m133nwf4_r0 = {
+ .modes = &ivo_m133nwf4_r0_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
@@ -2168,6 +2365,7 @@ static const struct panel_desc lg_lp120up1 = {
.width = 267,
.height = 183,
},
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
};
static const struct drm_display_mode lg_lp129qe_mode = {
@@ -3065,6 +3263,32 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode starry_kr070pe2t_mode = {
+ .clock = 33000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 209,
+ .hsync_end = 800 + 209 + 1,
+ .htotal = 800 + 209 + 1 + 45,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 1,
+ .vtotal = 480 + 22 + 1 + 22,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc starry_kr070pe2t = {
+ .modes = &starry_kr070pe2t_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode starry_kr122ea0sra_mode = {
.clock = 147000,
.hdisplay = 1920,
@@ -3455,12 +3679,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
+ .compatible = "auo,g121ean01",
+ .data = &auo_g121ean01,
+ }, {
.compatible = "auo,g133han01",
.data = &auo_g133han01,
}, {
+ .compatible = "auo,g156xtn01",
+ .data = &auo_g156xtn01,
+ }, {
.compatible = "auo,g185han01",
.data = &auo_g185han01,
}, {
+ .compatible = "auo,g190ean01",
+ .data = &auo_g190ean01,
+ }, {
.compatible = "auo,p320hvn03",
.data = &auo_p320hvn03,
}, {
@@ -3479,6 +3712,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "boe,nv133fhm-n61",
+ .data = &boe_nv133fhm_n61,
+ }, {
+ .compatible = "boe,nv133fhm-n62",
+ .data = &boe_nv133fhm_n61,
+ }, {
.compatible = "boe,nv140fhmn49",
.data = &boe_nv140fhmn49,
}, {
@@ -3587,6 +3826,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
+ .compatible = "ivo,m133nwf4-r0",
+ .data = &ivo_m133nwf4_r0,
+ }, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
@@ -3716,6 +3958,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ .compatible = "starry,kr070pe2t",
+ .data = &starry_kr070pe2t,
+ }, {
.compatible = "starry,kr122ea0sra",
.data = &starry_kr122ea0sra,
}, {
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 012ca62bf30e..f0ad6081570f 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -490,9 +490,7 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
{
struct device *dev = ctx->dev;
int ret, i;
- const struct nt35597_config *config;
- config = ctx->config;
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
new file mode 100644
index 000000000000..42f299ad3804
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct visionox_rm69299 {
+ struct drm_panel panel;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ struct mipi_dsi_device *dsi;
+ bool prepared;
+ bool enabled;
+};
+
+static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_rm69299, panel);
+}
+
+static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Reset sequence of visionox panel requires the panel to be
+ * out of reset for 10ms, followed by being held in reset
+ * for 10ms and then out again
+ */
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
+{
+ gpiod_set_value(ctx->reset_gpio, 0);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int visionox_rm69299_unprepare(struct drm_panel *panel)
+{
+ struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ ctx->dsi->mode_flags = 0;
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "set_display_off cmd failed ret = %d\n", ret);
+
+ /* 120ms delay required here as per DCS spec */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "enter_sleep cmd failed ret = %d\n", ret);
+ }
+
+ ret = visionox_rm69299_power_off(ctx);
+
+ ctx->prepared = false;
+ return ret;
+}
+
+static int visionox_rm69299_prepare(struct drm_panel *panel)
+{
+ struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = visionox_rm69299_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 0 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 1 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 2 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 3 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "exit_sleep_mode cmd failed ret = %d\n", ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending exit sleep DCS command */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "set_display_on cmd failed ret = %d\n", ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending set_display_on DCS command */
+ msleep(120);
+
+ ctx->prepared = true;
+
+ return 0;
+
+power_off:
+ return ret;
+}
+
+static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
+ .name = "1080x2248",
+ .clock = 158695,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 2,
+ .htotal = 1080 + 26 + 2 + 36,
+ .vdisplay = 2248,
+ .vsync_start = 2248 + 56,
+ .vsync_end = 2248 + 56 + 4,
+ .vtotal = 2248 + 56 + 4 + 4,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static int visionox_rm69299_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "failed to create a new display mode\n");
+ return 0;
+ }
+
+ connector->display_info.width_mm = 74;
+ connector->display_info.height_mm = 131;
+ drm_mode_copy(mode, &visionox_rm69299_1080x2248_60hz);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_rm69299_drm_funcs = {
+ .unprepare = visionox_rm69299_unprepare,
+ .prepare = visionox_rm69299_prepare,
+ .get_modes = visionox_rm69299_get_modes,
+};
+
+static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_rm69299 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->panel.dev = dev;
+ ctx->dsi = dsi;
+
+ ctx->supplies[0].supply = "vdda";
+ ctx->supplies[1].supply = "vdd3p3";
+
+ ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &visionox_rm69299_drm_funcs;
+ drm_panel_add(&ctx->panel);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "dsi attach failed ret = %d\n", ret);
+ goto err_dsi_attach;
+ }
+
+ ret = regulator_set_load(ctx->supplies[0].consumer, 32000);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "regulator set load failed for vdda supply ret = %d\n",
+ ret);
+ goto err_set_load;
+ }
+
+ ret = regulator_set_load(ctx->supplies[1].consumer, 13200);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "regulator set load failed for vdd3p3 supply ret = %d\n",
+ ret);
+ goto err_set_load;
+ }
+
+ return 0;
+
+err_set_load:
+ mipi_dsi_detach(dsi);
+err_dsi_attach:
+ drm_panel_remove(&ctx->panel);
+ return ret;
+}
+
+static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+
+ drm_panel_remove(&ctx->panel);
+ return 0;
+}
+
+static const struct of_device_id visionox_rm69299_of_match[] = {
+ { .compatible = "visionox,rm69299-1080p-display", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
+
+static struct mipi_dsi_driver visionox_rm69299_driver = {
+ .driver = {
+ .name = "panel-visionox-rm69299",
+ .of_match_table = visionox_rm69299_of_match,
+ },
+ .probe = visionox_rm69299_probe,
+ .remove = visionox_rm69299_remove,
+};
+module_mipi_dsi_driver(visionox_rm69299_driver);
+
+MODULE_DESCRIPTION("Visionox RM69299 DSI Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index 0c70f0e91d21..67d430d433e0 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -3,7 +3,6 @@ pl111_drm-y += pl111_display.o \
pl111_versatile.o \
pl111_drv.o
-pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
pl111_drm-$(CONFIG_ARCH_NOMADIK) += pl111_nomadik.o
pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c
index 3c8e82016854..26ca8cdf3e60 100644
--- a/drivers/gpu/drm/pl111/pl111_debugfs.c
+++ b/drivers/gpu/drm/pl111/pl111_debugfs.c
@@ -51,10 +51,10 @@ static const struct drm_info_list pl111_debugfs_list[] = {
{"regs", pl111_debugfs_regs, 0},
};
-int
+void
pl111_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(pl111_debugfs_list,
- ARRAY_SIZE(pl111_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(pl111_debugfs_list,
+ ARRAY_SIZE(pl111_debugfs_list),
+ minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 77d2da9a8a7c..ba399bcb792f 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -84,6 +84,6 @@ struct pl111_drm_dev_private {
int pl111_display_init(struct drm_device *dev);
irqreturn_t pl111_irq(int irq, void *data);
-int pl111_debugfs_init(struct drm_minor *minor);
+void pl111_debugfs_init(struct drm_minor *minor);
#endif /* _PL111_DRM_H_ */
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index aa8aa8d9e405..da0c39dae874 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -90,10 +90,13 @@ static int pl111_modeset_init(struct drm_device *dev)
struct drm_panel *panel = NULL;
struct drm_bridge *bridge = NULL;
bool defer = false;
- int ret = 0;
+ int ret;
int i;
- drm_mode_config_init(dev);
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
mode_config = &dev->mode_config;
mode_config->funcs = &mode_config_funcs;
mode_config->min_width = 1;
@@ -154,7 +157,7 @@ static int pl111_modeset_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
- goto out_config;
+ goto finish;
}
} else if (bridge) {
dev_info(dev->dev, "Using non-panel bridge\n");
@@ -197,8 +200,6 @@ static int pl111_modeset_init(struct drm_device *dev)
out_bridge:
if (panel)
drm_panel_bridge_remove(bridge);
-out_config:
- drm_mode_config_cleanup(dev);
finish:
return ret;
}
@@ -343,7 +344,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
drm_dev_unregister(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
- drm_mode_config_cleanup(drm);
drm_dev_put(drm);
of_reserved_mem_device_release(dev);
@@ -444,6 +444,7 @@ static const struct amba_id pl111_id_table[] = {
},
{0, 0},
};
+MODULE_DEVICE_TABLE(amba, pl111_id_table);
static struct amba_driver pl111_amba_driver __maybe_unused = {
.drv = {
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 4f325c410b5d..64f01a4e6767 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -8,9 +8,9 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
+#include <linux/vexpress.h>
#include "pl111_versatile.h"
-#include "pl111_vexpress.h"
#include "pl111_drm.h"
static struct regmap *versatile_syscon_map;
@@ -361,13 +361,110 @@ static const struct pl111_variant_data pl111_vexpress = {
.broken_clockdivider = true,
};
+#define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02
+
+static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np,
+ struct pl111_drm_dev_private *priv)
+{
+ struct platform_device *pdev;
+ struct device_node *root;
+ struct device_node *child;
+ struct device_node *ct_clcd = NULL;
+ struct regmap *map;
+ bool has_coretile_clcd = false;
+ bool has_coretile_hdlcd = false;
+ bool mux_motherboard = true;
+ u32 val;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_VEXPRESS_CONFIG))
+ return -ENODEV;
+
+ /*
+ * Check if we have a CLCD or HDLCD on the core tile by checking if a
+ * CLCD or HDLCD is available in the root of the device tree.
+ */
+ root = of_find_node_by_path("/");
+ if (!root)
+ return -EINVAL;
+
+ for_each_available_child_of_node(root, child) {
+ if (of_device_is_compatible(child, "arm,pl111")) {
+ has_coretile_clcd = true;
+ ct_clcd = child;
+ break;
+ }
+ if (of_device_is_compatible(child, "arm,hdlcd")) {
+ has_coretile_hdlcd = true;
+ of_node_put(child);
+ break;
+ }
+ }
+
+ of_node_put(root);
+
+ /*
+ * If there is a coretile HDLCD and it has a driver,
+ * do not mux the CLCD on the motherboard to the DVI.
+ */
+ if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
+ mux_motherboard = false;
+
+ /*
+ * On the Vexpress CA9 we let the CLCD on the coretile
+ * take precedence, so also in this case do not mux the
+ * motherboard to the DVI.
+ */
+ if (has_coretile_clcd)
+ mux_motherboard = false;
+
+ if (mux_motherboard) {
+ dev_info(dev, "DVI muxed to motherboard CLCD\n");
+ val = VEXPRESS_FPGAMUX_MOTHERBOARD;
+ } else if (ct_clcd == dev->of_node) {
+ dev_info(dev,
+ "DVI muxed to daughterboard 1 (core tile) CLCD\n");
+ val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
+ } else {
+ dev_info(dev, "core tile graphics present\n");
+ dev_info(dev, "this device will be deactivated\n");
+ return -ENODEV;
+ }
+
+ /* Call into deep Vexpress configuration API */
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(dev, "can't find the sysreg device, deferring\n");
+ return -EPROBE_DEFER;
+ }
+
+ map = devm_regmap_init_vexpress_config(&pdev->dev);
+ if (IS_ERR(map)) {
+ platform_device_put(pdev);
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_write(map, 0, val);
+ platform_device_put(pdev);
+ if (ret) {
+ dev_err(dev, "error setting DVI muxmode\n");
+ return -ENODEV;
+ }
+
+ priv->variant = &pl111_vexpress;
+ dev_info(dev, "initializing Versatile Express PL111\n");
+
+ return 0;
+}
+
int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
{
const struct of_device_id *clcd_id;
enum versatile_clcd versatile_clcd_type;
struct device_node *np;
struct regmap *map;
- int ret;
np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
&clcd_id);
@@ -378,6 +475,15 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ /* Versatile Express special handling */
+ if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
+ int ret = pl111_vexpress_clcd_init(dev, np, priv);
+ of_node_put(np);
+ if (ret)
+ dev_err(dev, "Versatile Express init failed - %d", ret);
+ return ret;
+ }
+
/*
* On the Integrator, check if we should use the IM-PD1 instead,
* if we find it, it will take precedence. This is on the Integrator/AP
@@ -390,37 +496,8 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
}
- /* Versatile Express special handling */
- if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
- struct platform_device *pdev;
-
- /* Registers a driver for the muxfpga */
- ret = vexpress_muxfpga_init();
- if (ret) {
- dev_err(dev, "unable to initialize muxfpga driver\n");
- of_node_put(np);
- return ret;
- }
-
- /* Call into deep Vexpress configuration API */
- pdev = of_find_device_by_node(np);
- if (!pdev) {
- dev_err(dev, "can't find the sysreg device, deferring\n");
- of_node_put(np);
- return -EPROBE_DEFER;
- }
- map = dev_get_drvdata(&pdev->dev);
- if (!map) {
- dev_err(dev, "sysreg has not yet probed\n");
- platform_device_put(pdev);
- of_node_put(np);
- return -EPROBE_DEFER;
- }
- } else {
- map = syscon_node_to_regmap(np);
- }
+ map = syscon_node_to_regmap(np);
of_node_put(np);
-
if (IS_ERR(map)) {
dev_err(dev, "no Versatile syscon regmap\n");
return PTR_ERR(map);
@@ -466,13 +543,6 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
priv->variant_display_disable = pl111_realview_clcd_disable;
dev_info(dev, "set up callbacks for RealView PL111\n");
break;
- case VEXPRESS_CLCD_V2M:
- priv->variant = &pl111_vexpress;
- dev_info(dev, "initializing Versatile Express PL111\n");
- ret = pl111_vexpress_clcd_init(dev, priv, map);
- if (ret)
- return ret;
- break;
default:
dev_info(dev, "unknown Versatile system controller\n");
break;
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
deleted file mode 100644
index 350570fe06b5..000000000000
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ /dev/null
@@ -1,138 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Versatile Express PL111 handling
- * Copyright (C) 2018 Linus Walleij
- *
- * This module binds to the "arm,vexpress-muxfpga" device on the
- * Versatile Express configuration bus and sets up which CLCD instance
- * gets muxed out on the DVI bridge.
- */
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/vexpress.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include "pl111_drm.h"
-#include "pl111_vexpress.h"
-
-#define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02
-
-int pl111_vexpress_clcd_init(struct device *dev,
- struct pl111_drm_dev_private *priv,
- struct regmap *map)
-{
- struct device_node *root;
- struct device_node *child;
- struct device_node *ct_clcd = NULL;
- bool has_coretile_clcd = false;
- bool has_coretile_hdlcd = false;
- bool mux_motherboard = true;
- u32 val;
- int ret;
-
- /*
- * Check if we have a CLCD or HDLCD on the core tile by checking if a
- * CLCD or HDLCD is available in the root of the device tree.
- */
- root = of_find_node_by_path("/");
- if (!root)
- return -EINVAL;
-
- for_each_available_child_of_node(root, child) {
- if (of_device_is_compatible(child, "arm,pl111")) {
- has_coretile_clcd = true;
- ct_clcd = child;
- break;
- }
- if (of_device_is_compatible(child, "arm,hdlcd")) {
- has_coretile_hdlcd = true;
- of_node_put(child);
- break;
- }
- }
-
- of_node_put(root);
-
- /*
- * If there is a coretile HDLCD and it has a driver,
- * do not mux the CLCD on the motherboard to the DVI.
- */
- if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
- mux_motherboard = false;
-
- /*
- * On the Vexpress CA9 we let the CLCD on the coretile
- * take precedence, so also in this case do not mux the
- * motherboard to the DVI.
- */
- if (has_coretile_clcd)
- mux_motherboard = false;
-
- if (mux_motherboard) {
- dev_info(dev, "DVI muxed to motherboard CLCD\n");
- val = VEXPRESS_FPGAMUX_MOTHERBOARD;
- } else if (ct_clcd == dev->of_node) {
- dev_info(dev,
- "DVI muxed to daughterboard 1 (core tile) CLCD\n");
- val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
- } else {
- dev_info(dev, "core tile graphics present\n");
- dev_info(dev, "this device will be deactivated\n");
- return -ENODEV;
- }
-
- ret = regmap_write(map, 0, val);
- if (ret) {
- dev_err(dev, "error setting DVI muxmode\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-/*
- * This sets up the regmap pointer that will then be retrieved by
- * the detection code in pl111_versatile.c and passed in to the
- * pl111_vexpress_clcd_init() function above.
- */
-static int vexpress_muxfpga_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *map;
-
- map = devm_regmap_init_vexpress_config(&pdev->dev);
- if (IS_ERR(map))
- return PTR_ERR(map);
- dev_set_drvdata(dev, map);
-
- return 0;
-}
-
-static const struct of_device_id vexpress_muxfpga_match[] = {
- { .compatible = "arm,vexpress-muxfpga", },
- {}
-};
-
-static struct platform_driver vexpress_muxfpga_driver = {
- .driver = {
- .name = "vexpress-muxfpga",
- .of_match_table = of_match_ptr(vexpress_muxfpga_match),
- },
- .probe = vexpress_muxfpga_probe,
-};
-
-int vexpress_muxfpga_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&vexpress_muxfpga_driver);
- /* -EBUSY just means this driver is already registered */
- if (ret == -EBUSY)
- ret = 0;
- return ret;
-}
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.h b/drivers/gpu/drm/pl111/pl111_vexpress.h
deleted file mode 100644
index 5d3681bb4c00..000000000000
--- a/drivers/gpu/drm/pl111/pl111_vexpress.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-struct device;
-struct pl111_drm_dev_private;
-struct regmap;
-
-#ifdef CONFIG_ARCH_VEXPRESS
-
-int pl111_vexpress_clcd_init(struct device *dev,
- struct pl111_drm_dev_private *priv,
- struct regmap *map);
-
-int vexpress_muxfpga_init(void);
-
-#else
-
-static inline int pl111_vexpress_clcd_init(struct device *dev,
- struct pl111_drm_dev_private *priv,
- struct regmap *map)
-{
- return -ENODEV;
-}
-
-static inline int vexpress_muxfpga_init(void)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index a4f4175bbdbe..524d35b648d8 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -39,7 +39,7 @@ static int
qxl_debugfs_irq_received(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct qxl_device *qdev = node->minor->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(node->minor->dev);
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
@@ -53,7 +53,7 @@ static int
qxl_debugfs_buffers_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct qxl_device *qdev = node->minor->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(node->minor->dev);
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
@@ -79,36 +79,29 @@ static struct drm_info_list qxl_debugfs_list[] = {
#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
#endif
-int
+void
qxl_debugfs_init(struct drm_minor *minor)
{
#if defined(CONFIG_DEBUG_FS)
- int r;
- struct qxl_device *dev =
- (struct qxl_device *) minor->dev->dev_private;
+ struct qxl_device *dev = to_qxl(minor->dev);
drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
- r = qxl_ttm_debugfs_init(dev);
- if (r) {
- DRM_ERROR("Failed to init TTM debugfs\n");
- return r;
- }
+ qxl_ttm_debugfs_init(dev);
#endif
- return 0;
}
-int qxl_debugfs_add_files(struct qxl_device *qdev,
- struct drm_info_list *files,
- unsigned int nfiles)
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+ struct drm_info_list *files,
+ unsigned int nfiles)
{
unsigned int i;
for (i = 0; i < qdev->debugfs_count; i++) {
if (qdev->debugfs[i].files == files) {
/* Already registered */
- return 0;
+ return;
}
}
@@ -116,7 +109,7 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
+ return;
}
qdev->debugfs[qdev->debugfs_count].files = files;
qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
@@ -126,5 +119,4 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
qdev->ddev.primary->debugfs_root,
qdev->ddev.primary);
#endif
- return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 91f398d51cfa..9d45d5a4278f 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -221,7 +221,7 @@ static int qxl_add_mode(struct drm_connector *connector,
bool preferred)
{
struct drm_device *dev = connector->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_display_mode *mode = NULL;
int rc;
@@ -242,7 +242,7 @@ static int qxl_add_mode(struct drm_connector *connector,
static int qxl_add_monitors_config_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *output = drm_connector_to_qxl_output(connector);
int h = output->index;
struct qxl_head *head;
@@ -310,7 +310,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
const char *reason)
{
struct drm_device *dev = crtc->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
struct qxl_head head;
int oldcount, i = qcrtc->index;
@@ -400,7 +400,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
unsigned int num_clips)
{
/* TODO: vmwgfx where this was cribbed from had locking. Why? */
- struct qxl_device *qdev = fb->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(fb->dev);
struct drm_clip_rect norect;
struct qxl_bo *qobj;
bool is_primary;
@@ -462,7 +462,7 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
static int qxl_primary_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo;
if (!state->crtc || !state->fb)
@@ -476,7 +476,7 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
static int qxl_primary_apply_cursor(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_framebuffer *fb = plane->state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
struct qxl_cursor_cmd *cmd;
@@ -523,7 +523,7 @@ out_free_release:
static void qxl_primary_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
struct qxl_bo *primary;
struct drm_clip_rect norect = {
@@ -554,7 +554,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
static void qxl_primary_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
if (old_state->fb) {
struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
@@ -570,7 +570,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_framebuffer *fb = plane->state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
struct qxl_release *release;
@@ -679,7 +679,7 @@ out_free_release:
static void qxl_cursor_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_release *release;
struct qxl_cursor_cmd *cmd;
int ret;
@@ -762,7 +762,7 @@ static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
struct qxl_surface surf;
@@ -923,7 +923,7 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
{
struct qxl_crtc *qxl_crtc;
struct drm_plane *primary, *cursor;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
int r;
qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
@@ -965,7 +965,7 @@ free_mem:
static int qxl_conn_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *output = drm_connector_to_qxl_output(connector);
unsigned int pwidth = 1024;
unsigned int pheight = 768;
@@ -991,7 +991,7 @@ static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *ddev = connector->dev;
- struct qxl_device *qdev = ddev->dev_private;
+ struct qxl_device *qdev = to_qxl(ddev);
if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0)
return MODE_BAD;
@@ -1021,7 +1021,7 @@ static enum drm_connector_status qxl_conn_detect(
struct qxl_output *output =
drm_connector_to_qxl_output(connector);
struct drm_device *ddev = connector->dev;
- struct qxl_device *qdev = ddev->dev_private;
+ struct qxl_device *qdev = to_qxl(ddev);
bool connected = false;
/* The first monitor is always connected */
@@ -1071,7 +1071,7 @@ static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
static int qdev_output_init(struct drm_device *dev, int num_output)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *qxl_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 4fda3f9b29f4..13872b882775 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -81,13 +81,16 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -EINVAL; /* TODO: ENODEV ? */
}
- qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
- if (!qdev)
+ qdev = devm_drm_dev_alloc(&pdev->dev, &qxl_driver,
+ struct qxl_device, ddev);
+ if (IS_ERR(qdev)) {
+ pr_err("Unable to init drm dev");
return -ENOMEM;
+ }
ret = pci_enable_device(pdev);
if (ret)
- goto free_dev;
+ return ret;
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
if (ret)
@@ -101,7 +104,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- ret = qxl_device_init(qdev, &qxl_driver, pdev);
+ ret = qxl_device_init(qdev, pdev);
if (ret)
goto put_vga;
@@ -128,14 +131,13 @@ put_vga:
vga_put(pdev, VGA_RSRC_LEGACY_IO);
disable_pci:
pci_disable_device(pdev);
-free_dev:
- kfree(qdev);
+
return ret;
}
static void qxl_drm_release(struct drm_device *dev)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
/*
* TODO: qxl_device_fini() call should be in qxl_pci_remove(),
@@ -144,8 +146,6 @@ static void qxl_drm_release(struct drm_device *dev)
*/
qxl_modeset_fini(qdev);
qxl_device_fini(qdev);
- dev->dev_private = NULL;
- kfree(qdev);
}
static void
@@ -157,7 +157,6 @@ qxl_pci_remove(struct pci_dev *pdev)
drm_atomic_helper_shutdown(dev);
if (is_vga(pdev))
vga_put(pdev, VGA_RSRC_LEGACY_IO);
- drm_dev_put(dev);
}
DEFINE_DRM_GEM_FOPS(qxl_fops);
@@ -165,7 +164,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
struct pci_dev *pdev = dev->pdev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
int ret;
ret = drm_mode_config_helper_suspend(dev);
@@ -187,7 +186,7 @@ static int qxl_drm_freeze(struct drm_device *dev)
static int qxl_drm_resume(struct drm_device *dev, bool thaw)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (!thaw) {
@@ -246,7 +245,7 @@ static int qxl_pm_restore(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct qxl_device *qdev = drm_dev->dev_private;
+ struct qxl_device *qdev = to_qxl(drm_dev);
qxl_io_reset(qdev);
return qxl_drm_resume(drm_dev, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 27e45a2d6b52..31e35f787df2 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -190,13 +190,8 @@ struct qxl_debugfs {
unsigned int num_files;
};
-int qxl_debugfs_add_files(struct qxl_device *rdev,
- struct drm_info_list *files,
- unsigned int nfiles);
int qxl_debugfs_fence_init(struct qxl_device *rdev);
-struct qxl_device;
-
struct qxl_device {
struct drm_device ddev;
@@ -276,11 +271,12 @@ struct qxl_device {
int monitors_config_height;
};
+#define to_qxl(dev) container_of(dev, struct qxl_device, ddev)
+
extern const struct drm_ioctl_desc qxl_ioctls[];
extern int qxl_max_ioctl;
-int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv,
- struct pci_dev *pdev);
+int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
void qxl_device_fini(struct qxl_device *qdev);
int qxl_modeset_init(struct qxl_device *qdev);
@@ -442,8 +438,8 @@ int qxl_garbage_collect(struct qxl_device *qdev);
/* debugfs */
-int qxl_debugfs_init(struct drm_minor *minor);
-int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+void qxl_debugfs_init(struct drm_minor *minor);
+void qxl_ttm_debugfs_init(struct qxl_device *qdev);
/* qxl_prime.c */
int qxl_gem_prime_pin(struct drm_gem_object *obj);
@@ -461,9 +457,9 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
int qxl_irq_init(struct qxl_device *qdev);
irqreturn_t qxl_irq_handler(int irq, void *arg);
-int qxl_debugfs_add_files(struct qxl_device *qdev,
- struct drm_info_list *files,
- unsigned int nfiles);
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+ struct drm_info_list *files,
+ unsigned int nfiles);
int qxl_surface_id_alloc(struct qxl_device *qdev,
struct qxl_bo *surf);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 272d19b677d8..24e903383aa1 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -32,7 +32,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_bo *qobj;
uint32_t handle;
int r;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 69f37db1027a..5ff6fa9b799c 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -34,7 +34,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
struct qxl_device *qdev;
struct ttm_buffer_object *tbo;
- qdev = (struct qxl_device *)gobj->dev->dev_private;
+ qdev = to_qxl(gobj->dev);
qxl_surface_evict(qdev, qobj, false);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 72f3f1bbb40c..13bd1d11c703 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -36,7 +36,7 @@
static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc *qxl_alloc = data;
int ret;
struct qxl_bo *qobj;
@@ -64,7 +64,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
static int qxl_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_map *qxl_map = data;
return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
@@ -276,7 +276,7 @@ out_free_reloc:
static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_execbuffer *execbuffer = data;
struct drm_qxl_command user_cmd;
int cmd_num;
@@ -301,7 +301,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_update_area *update_area = data;
struct qxl_rect area = {.left = update_area->left,
.top = update_area->top,
@@ -351,7 +351,7 @@ out:
static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_getparam *param = data;
switch (param->param) {
@@ -370,7 +370,7 @@ static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_clientcap *param = data;
int byte, idx;
@@ -391,7 +391,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc_surf *param = data;
struct qxl_bo *qobj;
int handle;
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 8435af108632..1ba5a702d763 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -32,7 +32,7 @@
irqreturn_t qxl_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
uint32_t pending;
pending = xchg(&qdev->ram_header->int_pending, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 70b20ee4741a..a6d873052cd4 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "qxl_drv.h"
@@ -107,20 +108,12 @@ static void qxl_gc_work(struct work_struct *work)
}
int qxl_device_init(struct qxl_device *qdev,
- struct drm_driver *drv,
struct pci_dev *pdev)
{
int r, sb;
- r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
- if (r) {
- pr_err("Unable to init drm dev");
- goto error;
- }
-
qdev->ddev.pdev = pdev;
pci_set_drvdata(pdev, &qdev->ddev);
- qdev->ddev.dev_private = qdev;
mutex_init(&qdev->gem.mutex);
mutex_init(&qdev->update_area_mutex);
@@ -136,8 +129,7 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
if (!qdev->vram_mapping) {
pr_err("Unable to create vram_mapping");
- r = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
if (pci_resource_len(pdev, 4) > 0) {
@@ -218,7 +210,7 @@ int qxl_device_init(struct qxl_device *qdev,
&(qdev->ram_header->cursor_ring_hdr),
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
- qdev->io_base + QXL_IO_NOTIFY_CMD,
+ qdev->io_base + QXL_IO_NOTIFY_CURSOR,
false,
&qdev->cursor_event);
@@ -291,7 +283,6 @@ surface_mapping_free:
io_mapping_free(qdev->surface_mapping);
vram_mapping_free:
io_mapping_free(qdev->vram_mapping);
-error:
return r;
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index ab72dc3476e9..edc8a9916872 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -33,7 +33,7 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
struct qxl_device *qdev;
bo = to_qxl_bo(tbo);
- qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
+ qdev = to_qxl(bo->tbo.base.dev);
qxl_surface_evict(qdev, bo, false);
WARN_ON_ONCE(bo->map_count > 0);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 2feca734c7b1..4fae3e393da1 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -243,7 +243,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
/* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo);
+ ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
if (ret)
return ret;
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 62a5e424971b..f09a712b1ed2 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -243,7 +243,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
if (!qxl_ttm_bo_is_qxl_bo(bo))
return;
qbo = to_qxl_bo(bo);
- qdev = qbo->tbo.base.dev->dev_private;
+ qdev = to_qxl(qbo->tbo.base.dev);
if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
@@ -322,7 +322,7 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
}
#endif
-int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+void qxl_ttm_debugfs_init(struct qxl_device *qdev)
{
#if defined(CONFIG_DEBUG_FS)
static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
@@ -343,8 +343,6 @@ int qxl_ttm_debugfs_init(struct qxl_device *qdev)
qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
}
- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
-#else
- return 0;
+ qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
#endif
}
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index 9b4072f97215..3e76ae5a17ee 100644
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -32,9 +32,10 @@
*/
#include <linux/export.h>
+#include <linux/pci.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
+#include <drm/drm_legacy.h>
#include <drm/drm_print.h>
#include "ati_pcigart.h"
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index c693b2ca0329..11c97edde54d 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -3,42 +3,13 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Idrivers/gpu/drm/amd/include
-
hostprogs := mkregtable
-clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
+targets := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
-quiet_cmd_mkregtable = MKREGTABLE $@
+quiet_cmd_mkregtable = MKREG $@
cmd_mkregtable = $(obj)/mkregtable $< > $@
-$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+$(obj)/%_reg_safe.h: $(src)/reg_srcs/% $(obj)/mkregtable FORCE
$(call if_changed,mkregtable)
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 2c27627b6659..f15b20da5315 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1211,8 +1211,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
SDEBUG("<<\n");
free:
- if (ws)
- kfree(ectx.ws);
+ kfree(ectx.ws);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index a9257bed3484..134aa2b01f90 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -65,13 +65,6 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
-static const struct ci_pt_defaults defaults_bonaire_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
- { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
- { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
-};
-
static const struct ci_pt_defaults defaults_saturn_xt =
{
1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
@@ -79,13 +72,6 @@ static const struct ci_pt_defaults defaults_saturn_xt =
{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
-static const struct ci_pt_defaults defaults_saturn_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
- { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
- { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
-};
-
static const struct ci_pt_config_reg didt_config_ci[] =
{
{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 848ef68d9086..5d2591725189 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2111,7 +2111,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
ucOverdriveThermalController];
info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
num_modes = power_info->info.ucNumOfPowerModeEntries;
@@ -2351,7 +2351,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
} else {
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c3e49c973812..d3c04df7e75d 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2704,7 +2704,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
const char *name = thermal_controller_names[thermal_controller];
info.addr = i2c_addr >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
} else {
@@ -2721,7 +2721,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
const char *name = "f75375";
info.addr = 0x28;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
name, info.addr);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 59f8186a2415..bbb0883e8ce6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,6 +36,7 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <linux/pci.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc_helper.h>
@@ -44,7 +45,6 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 58176db85952..c5d1dc9618a4 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -158,7 +158,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (radeon_is_px(dev)) {
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
@@ -828,7 +828,7 @@ int radeon_enable_vblank_kms(struct drm_crtc *crtc)
unsigned long irqflags;
int r;
- if (pipe < 0 || pipe >= rdev->num_crtc) {
+ if (pipe >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
@@ -854,7 +854,7 @@ void radeon_disable_vblank_kms(struct drm_crtc *crtc)
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
- if (pipe < 0 || pipe >= rdev->num_crtc) {
+ if (pipe >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return;
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2cb85dbe728f..a167e1c36d24 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -252,24 +252,6 @@ static const struct si_dte_data dte_data_tahiti =
false
};
-static const struct si_dte_data dte_data_tahiti_le =
-{
- { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
- { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
- 0x5,
- 0xAFC8,
- 0x64,
- 0x32,
- 1,
- 0,
- 0x10,
- { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
- { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
- { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
- 85,
- true
-};
-
static const struct si_dte_data dte_data_tahiti_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 654e2dd08146..3e67cf70f040 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -530,7 +530,6 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drm_kms_helper_poll_fini(ddev);
- drm_mode_config_cleanup(ddev);
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index c07c6a88aff0..b0335da0c161 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -23,13 +24,6 @@
* Encoder
*/
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
{
struct device_node *ports;
@@ -110,13 +104,11 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
}
}
- ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(rcdu->ddev, encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret < 0)
goto done;
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
/*
* Attach the bridge to the encoder. The bridge will create the
* connector.
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index fcfd916227d1..482329102f19 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -712,7 +712,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
unsigned int i;
int ret;
- drm_mode_config_init(dev);
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index c6430027169f..a0021fc25b27 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
drm_plane_create_alpha_property(&plane->plane);
- if (type == DRM_PLANE_TYPE_PRIMARY)
- continue;
-
- drm_object_attach_property(&plane->plane.base,
- rcdu->props.colorkey,
- RCAR_DU_COLORKEY_NONE);
- drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(&plane->plane,
+ 0);
+ } else {
+ drm_object_attach_property(&plane->plane.base,
+ rcdu->props.colorkey,
+ RCAR_DU_COLORKEY_NONE);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 5e4faf258c31..f1a81c9b184d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
drm_plane_helper_add(&plane->plane,
&rcar_du_vsp_plane_helper_funcs);
- if (type == DRM_PLANE_TYPE_PRIMARY)
- continue;
-
- drm_plane_create_alpha_property(&plane->plane);
- drm_plane_create_zpos_property(&plane->plane, 1, 1,
- vsp->num_planes - 1);
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(&plane->plane,
+ 0);
+ } else {
+ drm_plane_create_alpha_property(&plane->plane);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1,
+ vsp->num_planes - 1);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index ce98c08aa8b4..ade2327a10e2 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -26,6 +26,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -258,10 +259,6 @@ static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
.atomic_check = rockchip_dp_drm_encoder_atomic_check,
};
-static struct drm_encoder_funcs rockchip_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
{
struct device *dev = dp->dev;
@@ -309,8 +306,8 @@ static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp)
dev->of_node);
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
- ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
DRM_ERROR("failed to initialize encoder with drm\n");
return ret;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index eed594bd38d3..c634b95b50f7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -20,6 +20,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
@@ -689,10 +690,6 @@ static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
.atomic_check = cdn_dp_encoder_atomic_check,
};
-static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
{
struct device *dev = dp->dev;
@@ -1030,8 +1027,8 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
dev->of_node);
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
- ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
DRM_ERROR("failed to initialize encoder with drm\n");
return ret;
@@ -1109,7 +1106,7 @@ static const struct component_ops cdn_dp_component_ops = {
.unbind = cdn_dp_unbind,
};
-int cdn_dp_suspend(struct device *dev)
+static int cdn_dp_suspend(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
int ret = 0;
@@ -1123,7 +1120,7 @@ int cdn_dp_suspend(struct device *dev)
return ret;
}
-int cdn_dp_resume(struct device *dev)
+static int cdn_dp_resume(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 7361c07cb4a7..9d2163ef4d6e 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -601,7 +601,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
case YCBCR_4_2_0:
val[0] = 5;
break;
- };
+ }
switch (video->color_depth) {
case 6:
@@ -619,7 +619,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
case 16:
val[1] = 4;
break;
- };
+ }
msa_misc = 2 * val[0] + 32 * val[1] +
((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
@@ -700,7 +700,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
case 16:
val = BCS_16;
break;
- };
+ }
val += video->color_fmt << 8;
ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 6e1270e45f97..3feff0c45b3f 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -21,6 +21,7 @@
#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -789,10 +790,6 @@ dw_mipi_dsi_encoder_helper_funcs = {
.disable = dw_mipi_dsi_encoder_disable,
};
-static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
struct drm_device *drm_dev)
{
@@ -802,8 +799,7 @@ static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dsi->dev->of_node);
- ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to initialize encoder with drm\n");
return ret;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 7f56d8c3491d..121aa8a63a76 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -14,6 +14,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -237,10 +238,6 @@ dw_hdmi_rockchip_mode_valid(struct drm_connector *connector,
return (valid) ? MODE_OK : MODE_BAD;
}
-static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
{
}
@@ -546,8 +543,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
platform_set_drvdata(pdev, hdmi);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index e5864e823020..7afdc54eb3ec 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -19,6 +19,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -532,10 +533,6 @@ static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
.atomic_check = inno_hdmi_encoder_atomic_check,
};
-static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_connector_status
inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -617,8 +614,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
return -EPROBE_DEFER;
drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index fe203d38664e..1c546c3a8998 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -6,6 +6,7 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
@@ -451,10 +452,6 @@ struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
.atomic_check = rk3066_hdmi_encoder_atomic_check,
};
-static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_connector_status
rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -557,8 +554,7 @@ rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
return -EPROBE_DEFER;
drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 20ecb1508a22..0f3eb392fe39 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -135,14 +135,16 @@ static int rockchip_drm_bind(struct device *dev)
if (ret)
goto err_free;
- drm_mode_config_init(drm_dev);
+ ret = drmm_mode_config_init(drm_dev);
+ if (ret)
+ goto err_iommu_cleanup;
rockchip_drm_mode_config_init(drm_dev);
/* Try to bind all sub drivers. */
ret = component_bind_all(dev, drm_dev);
if (ret)
- goto err_mode_config_cleanup;
+ goto err_iommu_cleanup;
ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
if (ret)
@@ -173,12 +175,9 @@ err_kms_helper_poll_fini:
rockchip_drm_fbdev_fini(drm_dev);
err_unbind_all:
component_unbind_all(dev, drm_dev);
-err_mode_config_cleanup:
- drm_mode_config_cleanup(drm_dev);
+err_iommu_cleanup:
rockchip_iommu_cleanup(drm_dev);
err_free:
- drm_dev->dev_private = NULL;
- dev_set_drvdata(dev, NULL);
drm_dev_put(drm_dev);
return ret;
}
@@ -194,11 +193,8 @@ static void rockchip_drm_unbind(struct device *dev)
drm_atomic_helper_shutdown(drm_dev);
component_unbind_all(dev, drm_dev);
- drm_mode_config_cleanup(drm_dev);
rockchip_iommu_cleanup(drm_dev);
- drm_dev->dev_private = NULL;
- dev_set_drvdata(dev, NULL);
drm_dev_put(drm_dev);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index c5b06048124e..e33c2dcd0d4b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -30,6 +30,7 @@ struct rockchip_crtc_state {
int output_mode;
int output_bpc;
int output_flags;
+ bool enable_afbc;
};
#define to_rockchip_crtc_state(s) \
container_of(s, struct rockchip_crtc_state, base)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 221e72e71432..9b13c784b347 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -57,8 +57,49 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers =
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
+static struct drm_framebuffer *
+rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_afbc_framebuffer *afbc_fb;
+ const struct drm_format_info *info;
+ int ret;
+
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL);
+ if (!afbc_fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd,
+ &rockchip_drm_fb_funcs);
+ if (ret) {
+ kfree(afbc_fb);
+ return ERR_PTR(ret);
+ }
+
+ if (drm_is_afbc(mode_cmd->modifier[0])) {
+ int ret, i;
+
+ ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
+ if (ret) {
+ struct drm_gem_object **obj = afbc_fb->base.obj;
+
+ for (i = 0; i < info->num_planes; ++i)
+ drm_gem_object_put_unlocked(obj[i]);
+
+ kfree(afbc_fb);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return &afbc_fb->base;
+}
+
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
+ .fb_create = rockchip_fb_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index cecb2cc781f5..33463b79a37b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -91,9 +91,22 @@
#define VOP_WIN_TO_INDEX(vop_win) \
((vop_win) - (vop_win)->vop->win)
+#define VOP_AFBC_SET(vop, name, v) \
+ do { \
+ if ((vop)->data->afbc) \
+ vop_reg_set((vop), &(vop)->data->afbc->name, \
+ 0, ~0, v, #name); \
+ } while (0)
+
#define to_vop(x) container_of(x, struct vop, crtc)
#define to_vop_win(x) container_of(x, struct vop_win, base)
+#define AFBC_FMT_RGB565 0x0
+#define AFBC_FMT_U8U8U8U8 0x5
+#define AFBC_FMT_U8U8U8 0x4
+
+#define AFBC_TILE_16x16 BIT(4)
+
/*
* The coefficients of the following matrix are all fixed points.
* The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets.
@@ -274,6 +287,29 @@ static enum vop_data_format vop_convert_format(uint32_t format)
}
}
+static int vop_convert_afbc_format(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return AFBC_FMT_U8U8U8U8;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ return AFBC_FMT_U8U8U8;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ return AFBC_FMT_RGB565;
+ /* either of the below should not be reachable */
+ default:
+ DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format);
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
uint32_t dst, bool is_horizontal,
int vsu_mode, int *vskiplines)
@@ -598,6 +634,17 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
vop_win_disable(vop, vop_win);
}
}
+
+ if (vop->data->afbc) {
+ struct rockchip_crtc_state *s;
+ /*
+ * Disable AFBC and forget there was a vop window with AFBC
+ */
+ VOP_AFBC_SET(vop, enable, 0);
+ s = to_rockchip_crtc_state(crtc->state);
+ s->enable_afbc = false;
+ }
+
spin_unlock(&vop->reg_lock);
vop_cfg_done(vop);
@@ -710,6 +757,26 @@ static void vop_plane_destroy(struct drm_plane *plane)
drm_plane_cleanup(plane);
}
+static inline bool rockchip_afbc(u64 modifier)
+{
+ return modifier == ROCKCHIP_AFBC_MOD;
+}
+
+static bool rockchip_mod_supported(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (!rockchip_afbc(modifier)) {
+ DRM_DEBUG_KMS("Unsupported format modifier 0x%llx\n", modifier);
+
+ return false;
+ }
+
+ return vop_convert_afbc_format(format) >= 0;
+}
+
static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -758,6 +825,30 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (rockchip_afbc(fb->modifier)) {
+ struct vop *vop = to_vop(crtc);
+
+ if (!vop->data->afbc) {
+ DRM_ERROR("vop does not support AFBC\n");
+ return -EINVAL;
+ }
+
+ ret = vop_convert_afbc_format(fb->format->format);
+ if (ret < 0)
+ return ret;
+
+ if (state->src.x1 || state->src.y1) {
+ DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]);
+ return -EINVAL;
+ }
+
+ if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) {
+ DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
+ state->rotation);
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -846,6 +937,16 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
+ if (rockchip_afbc(fb->modifier)) {
+ int afbc_format = vop_convert_afbc_format(fb->format->format);
+
+ VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16);
+ VOP_AFBC_SET(vop, hreg_block_split, 0);
+ VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win));
+ VOP_AFBC_SET(vop, hdr_ptr, dma_addr);
+ VOP_AFBC_SET(vop, pic_size, act_info);
+ }
+
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
@@ -1001,6 +1102,7 @@ static const struct drm_plane_funcs vop_plane_funcs = {
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = rockchip_mod_supported,
};
static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
@@ -1310,6 +1412,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct vop *vop = to_vop(crtc);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct rockchip_crtc_state *s;
+ int afbc_planes = 0;
if (vop->lut_regs && crtc_state->color_mgmt_changed &&
crtc_state->gamma_lut) {
@@ -1323,6 +1429,27 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
}
}
+ drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
+ plane_state =
+ drm_atomic_get_plane_state(crtc_state->state, plane);
+ if (IS_ERR(plane_state)) {
+ DRM_DEBUG_KMS("Cannot get plane state for plane %s\n",
+ plane->name);
+ return PTR_ERR(plane_state);
+ }
+
+ if (drm_is_afbc(plane_state->fb->modifier))
+ ++afbc_planes;
+ }
+
+ if (afbc_planes > 1) {
+ DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes);
+ return -EINVAL;
+ }
+
+ s = to_rockchip_crtc_state(crtc_state);
+ s->enable_afbc = afbc_planes > 0;
+
return 0;
}
@@ -1333,6 +1460,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_plane_state *old_plane_state, *new_plane_state;
struct vop *vop = to_vop(crtc);
struct drm_plane *plane;
+ struct rockchip_crtc_state *s;
int i;
if (WARN_ON(!vop->is_enabled))
@@ -1340,6 +1468,9 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock(&vop->reg_lock);
+ /* Enable AFBC if there is some AFBC window, disable otherwise. */
+ s = to_rockchip_crtc_state(crtc->state);
+ VOP_AFBC_SET(vop, enable, s->enable_afbc);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
@@ -1634,7 +1765,8 @@ static int vop_create_crtc(struct vop *vop)
0, &vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- NULL, win_data->type, NULL);
+ win_data->phy->format_modifiers,
+ win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
ret);
@@ -1678,7 +1810,8 @@ static int vop_create_crtc(struct vop *vop)
&vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- NULL, win_data->type, NULL);
+ win_data->phy->format_modifiers,
+ win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index cc672620d6e0..d03bdb531ef2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -17,6 +17,11 @@
#define NUM_YUV2YUV_COEFFICIENTS 12
+#define ROCKCHIP_AFBC_MOD \
+ DRM_FORMAT_MOD_ARM_AFBC( \
+ AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
+ )
+
enum vop_data_format {
VOP_FMT_ARGB8888 = 0,
VOP_FMT_RGB888,
@@ -34,6 +39,16 @@ struct vop_reg {
bool relaxed;
};
+struct vop_afbc {
+ struct vop_reg enable;
+ struct vop_reg win_sel;
+ struct vop_reg format;
+ struct vop_reg hreg_block_split;
+ struct vop_reg pic_size;
+ struct vop_reg hdr_ptr;
+ struct vop_reg rstn;
+};
+
struct vop_modeset {
struct vop_reg htotal_pw;
struct vop_reg hact_st_end;
@@ -134,6 +149,7 @@ struct vop_win_phy {
const struct vop_scl_regs *scl;
const uint32_t *data_formats;
uint32_t nformats;
+ const uint64_t *format_modifiers;
struct vop_reg enable;
struct vop_reg gate;
@@ -173,6 +189,7 @@ struct vop_data {
const struct vop_misc *misc;
const struct vop_modeset *modeset;
const struct vop_output *output;
+ const struct vop_afbc *afbc;
const struct vop_win_yuv2yuv_data *win_yuv2yuv;
const struct vop_win_data *win;
unsigned int win_size;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 449a62908d21..63f967902c2d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -16,13 +16,14 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -435,10 +436,6 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
.atomic_check = rockchip_lvds_encoder_atomic_check,
};
-static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int rk3288_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
@@ -607,8 +604,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dev->of_node);
- ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize encoder: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 90784781e515..9a771af5d0c9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -14,6 +14,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -67,10 +68,6 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
.atomic_check = rockchip_rgb_encoder_atomic_check,
};
-static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
struct drm_device *drm_dev)
@@ -126,8 +123,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
encoder = &rgb->encoder;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize encoder: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 7a9d979c8d5d..2413deded22c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -50,6 +50,17 @@ static const uint32_t formats_win_full[] = {
DRM_FORMAT_NV24,
};
+static const uint64_t format_modifiers_win_full[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_win_full_afbc[] = {
+ ROCKCHIP_AFBC_MOD,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
static const uint32_t formats_win_lite[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
@@ -61,6 +72,11 @@ static const uint32_t formats_win_lite[] = {
DRM_FORMAT_BGR565,
};
+static const uint64_t format_modifiers_win_lite[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
static const struct vop_scl_regs rk3036_win_scl = {
.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
@@ -72,6 +88,7 @@ static const struct vop_win_phy rk3036_win0_data = {
.scl = &rk3036_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
@@ -87,6 +104,7 @@ static const struct vop_win_phy rk3036_win0_data = {
static const struct vop_win_phy rk3036_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -153,6 +171,7 @@ static const struct vop_data rk3036_vop = {
static const struct vop_win_phy rk3126_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -234,6 +253,7 @@ static const struct vop_win_phy px30_win0_data = {
.scl = &px30_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12),
@@ -249,6 +269,7 @@ static const struct vop_win_phy px30_win0_data = {
static const struct vop_win_phy px30_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4),
.rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12),
@@ -261,6 +282,7 @@ static const struct vop_win_phy px30_win1_data = {
static const struct vop_win_phy px30_win2_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4),
.enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5),
@@ -316,6 +338,7 @@ static const struct vop_win_phy rk3066_win0_data = {
.scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
@@ -332,6 +355,7 @@ static const struct vop_win_phy rk3066_win1_data = {
.scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
@@ -347,6 +371,7 @@ static const struct vop_win_phy rk3066_win1_data = {
static const struct vop_win_phy rk3066_win2_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
@@ -426,6 +451,7 @@ static const struct vop_win_phy rk3188_win0_data = {
.scl = &rk3188_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0),
.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3),
.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15),
@@ -440,6 +466,7 @@ static const struct vop_win_phy rk3188_win0_data = {
static const struct vop_win_phy rk3188_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19),
@@ -545,6 +572,7 @@ static const struct vop_win_phy rk3288_win01_data = {
.scl = &rk3288_win_full_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
@@ -563,6 +591,7 @@ static const struct vop_win_phy rk3288_win01_data = {
static const struct vop_win_phy rk3288_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4),
.gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
@@ -677,6 +706,7 @@ static const struct vop_win_phy rk3368_win01_data = {
.scl = &rk3288_win_full_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12),
@@ -697,6 +727,7 @@ static const struct vop_win_phy rk3368_win01_data = {
static const struct vop_win_phy rk3368_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
@@ -817,6 +848,53 @@ static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = {
.y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) },
{ .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data },
{ .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data },
+
+};
+
+static const struct vop_win_phy rk3399_win01_data = {
+ .scl = &rk3288_win_full_scl,
+ .data_formats = formats_win_full,
+ .nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full_afbc,
+ .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+ .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+/*
+ * rk3399 vop big windows register layout is same as rk3288, but we
+ * have a separate rk3399 win data array here so that we can advertise
+ * AFBC on the primary plane.
+ */
+static const struct vop_win_data rk3399_vop_win_data[] = {
+ { .base = 0x00, .phy = &rk3399_win01_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x40, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x00, .phy = &rk3288_win23_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &rk3288_win23_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_afbc rk3399_vop_afbc = {
+ .rstn = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 3),
+ .enable = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 0),
+ .win_sel = VOP_REG(RK3399_AFBCD0_CTRL, 0x3, 1),
+ .format = VOP_REG(RK3399_AFBCD0_CTRL, 0x1f, 16),
+ .hreg_block_split = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 21),
+ .hdr_ptr = VOP_REG(RK3399_AFBCD0_HDR_PTR, 0xffffffff, 0),
+ .pic_size = VOP_REG(RK3399_AFBCD0_PIC_SIZE, 0xffffffff, 0),
};
static const struct vop_data rk3399_vop_big = {
@@ -826,9 +904,10 @@ static const struct vop_data rk3399_vop_big = {
.common = &rk3288_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
+ .afbc = &rk3399_vop_afbc,
.misc = &rk3368_misc,
- .win = rk3368_vop_win_data,
- .win_size = ARRAY_SIZE(rk3368_vop_win_data),
+ .win = rk3399_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3399_vop_win_data),
.win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
};
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 75a752d59ef1..03556dbfcafb 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -17,6 +17,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include "shmob_drm_backlight.h"
@@ -558,15 +559,6 @@ static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
.mode_set = shmob_drm_encoder_mode_set,
};
-static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = shmob_drm_encoder_destroy,
-};
-
int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
{
struct drm_encoder *encoder = &sdev->encoder.encoder;
@@ -576,8 +568,8 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
encoder->possible_crtcs = 1;
- ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(sdev->ddev, encoder,
+ DRM_MODE_ENCODER_LVDS);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index b8c0930959c7..ae9d6b8d3ca8 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -192,7 +192,6 @@ static int shmob_drm_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drm_kms_helper_poll_fini(ddev);
- drm_mode_config_cleanup(ddev);
drm_irq_uninstall(ddev);
drm_dev_put(ddev);
@@ -288,7 +287,6 @@ err_irq_uninstall:
drm_irq_uninstall(ddev);
err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev);
- drm_mode_config_cleanup(ddev);
err_free_drm_dev:
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index c51197b6fd85..7a866d6ce6bb 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -126,7 +126,11 @@ static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
{
- drm_mode_config_init(sdev->ddev);
+ int ret;
+
+ ret = drmm_mode_config_init(sdev->ddev);
+ if (ret)
+ return ret;
shmob_drm_crtc_create(sdev);
shmob_drm_encoder_create(sdev);
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index c7652584255d..319962a2c17b 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -42,8 +42,8 @@ static const struct sti_compositor_data stih407_compositor_data = {
},
};
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
- struct drm_minor *minor)
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+ struct drm_minor *minor)
{
unsigned int i;
@@ -54,8 +54,6 @@ int sti_compositor_debugfs_init(struct sti_compositor *compo,
for (i = 0; i < STI_MAX_MIXER; i++)
if (compo->mixer[i])
sti_mixer_debugfs_init(compo->mixer[i], minor);
-
- return 0;
}
static int sti_compositor_bind(struct device *dev,
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index ac4bb3834810..25bb01bdd013 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -79,7 +79,7 @@ struct sti_compositor {
struct notifier_block vtg_vblank_nb[STI_MAX_MIXER];
};
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
- struct drm_minor *minor);
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+ struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 49e6cb8f5836..6f37c104c46f 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -319,7 +319,7 @@ static int sti_crtc_late_register(struct drm_crtc *crtc)
struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
if (drm_crtc_index(crtc) == 0)
- return sti_compositor_debugfs_init(compo, crtc->dev->primary);
+ sti_compositor_debugfs_init(compo, crtc->dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index ea64c1dcaf63..a98057431023 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -131,17 +131,17 @@ static struct drm_info_list cursor_debugfs_files[] = {
{ "cursor", cursor_dbg_show, 0, NULL },
};
-static int cursor_debugfs_init(struct sti_cursor *cursor,
- struct drm_minor *minor)
+static void cursor_debugfs_init(struct sti_cursor *cursor,
+ struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
cursor_debugfs_files[i].data = cursor;
- return drm_debugfs_create_files(cursor_debugfs_files,
- ARRAY_SIZE(cursor_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(cursor_debugfs_files,
+ ARRAY_SIZE(cursor_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
@@ -342,7 +342,9 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
- return cursor_debugfs_init(cursor, drm_plane->dev->primary);
+ cursor_debugfs_init(cursor, drm_plane->dev->primary);
+
+ return 0;
}
static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 50870d8cbb76..3f9db3e3f397 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -92,24 +92,16 @@ static struct drm_info_list sti_drm_dbg_list[] = {
{"fps_get", sti_drm_fps_dbg_show, 0},
};
-static int sti_drm_dbg_init(struct drm_minor *minor)
+static void sti_drm_dbg_init(struct drm_minor *minor)
{
- int ret;
-
- ret = drm_debugfs_create_files(sti_drm_dbg_list,
- ARRAY_SIZE(sti_drm_dbg_list),
- minor->debugfs_root, minor);
- if (ret)
- goto err;
+ drm_debugfs_create_files(sti_drm_dbg_list,
+ ARRAY_SIZE(sti_drm_dbg_list),
+ minor->debugfs_root, minor);
debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, minor->debugfs_root,
minor->dev, &sti_drm_fps_fops);
DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
- return 0;
-err:
- DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
- return ret;
}
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 3d04bfca21a0..de4af7735c46 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -196,16 +196,16 @@ static struct drm_info_list dvo_debugfs_files[] = {
{ "dvo", dvo_dbg_show, 0, NULL },
};
-static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
+static void dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
dvo_debugfs_files[i].data = dvo;
- return drm_debugfs_create_files(dvo_debugfs_files,
- ARRAY_SIZE(dvo_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(dvo_debugfs_files,
+ ARRAY_SIZE(dvo_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_dvo_disable(struct drm_bridge *bridge)
@@ -405,10 +405,7 @@ static int sti_dvo_late_register(struct drm_connector *connector)
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
- if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
- DRM_ERROR("DVO debugfs setup failed\n");
- return -EINVAL;
- }
+ dvo_debugfs_init(dvo, dvo->drm_dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 11595c748844..2d5a2b5b78b8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -343,9 +343,10 @@ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
for (i = 0; i < nb_files; i++)
gdp_debugfs_files[i].data = gdp;
- return drm_debugfs_create_files(gdp_debugfs_files,
- nb_files,
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(gdp_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
+ return 0;
}
static int sti_gdp_fourcc2format(int fourcc)
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f3f28d79b0e4..a1ec891eaf3a 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -367,16 +367,16 @@ static struct drm_info_list hda_debugfs_files[] = {
{ "hda", hda_dbg_show, 0, NULL },
};
-static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
+static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
hda_debugfs_files[i].data = hda;
- return drm_debugfs_create_files(hda_debugfs_files,
- ARRAY_SIZE(hda_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(hda_debugfs_files,
+ ARRAY_SIZE(hda_debugfs_files),
+ minor->debugfs_root, minor);
}
/**
@@ -643,10 +643,7 @@ static int sti_hda_late_register(struct drm_connector *connector)
= to_sti_hda_connector(connector);
struct sti_hda *hda = hda_connector->hda;
- if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
- DRM_ERROR("HDA debugfs setup failed\n");
- return -EINVAL;
- }
+ hda_debugfs_init(hda, hda->drm_dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 18eaf786ffa4..5b15c4974e6b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -727,16 +727,16 @@ static struct drm_info_list hdmi_debugfs_files[] = {
{ "hdmi", hdmi_dbg_show, 0, NULL },
};
-static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
+static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
hdmi_debugfs_files[i].data = hdmi;
- return drm_debugfs_create_files(hdmi_debugfs_files,
- ARRAY_SIZE(hdmi_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(hdmi_debugfs_files,
+ ARRAY_SIZE(hdmi_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_hdmi_disable(struct drm_bridge *bridge)
@@ -1113,10 +1113,7 @@ static int sti_hdmi_late_register(struct drm_connector *connector)
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
- if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
- DRM_ERROR("HDMI debugfs setup failed\n");
- return -EINVAL;
- }
+ hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 1015abe0ce08..5a4e12194a77 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -639,16 +639,16 @@ static struct drm_info_list hqvdp_debugfs_files[] = {
{ "hqvdp", hqvdp_dbg_show, 0, NULL },
};
-static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
hqvdp_debugfs_files[i].data = hqvdp;
- return drm_debugfs_create_files(hqvdp_debugfs_files,
- ARRAY_SIZE(hqvdp_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(hqvdp_debugfs_files,
+ ARRAY_SIZE(hqvdp_debugfs_files),
+ minor->debugfs_root, minor);
}
/**
@@ -1274,7 +1274,9 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
- return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+ hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+
+ return 0;
}
static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index c3a3e1e5fc8a..7e5f14646625 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -178,7 +178,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
{ "mixer_aux", mixer_dbg_show, 0, NULL },
};
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
{
unsigned int i;
struct drm_info_list *mixer_debugfs_files;
@@ -194,15 +194,15 @@ int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
nb_files = ARRAY_SIZE(mixer1_debugfs_files);
break;
default:
- return -EINVAL;
+ return;
}
for (i = 0; i < nb_files; i++)
mixer_debugfs_files[i].data = mixer;
- return drm_debugfs_create_files(mixer_debugfs_files,
- nb_files,
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(mixer_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
}
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index d9544246913a..ab06beb7b258 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -58,7 +58,7 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
/* depth in Cross-bar control = z order */
#define GAM_MIXER_NB_DEPTH_LEVEL 6
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index c36a8da373cb..df3817f0fd30 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -570,16 +570,16 @@ static struct drm_info_list tvout_debugfs_files[] = {
{ "tvout", tvout_dbg_show, 0, NULL },
};
-static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
+static void tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
tvout_debugfs_files[i].data = tvout;
- return drm_debugfs_create_files(tvout_debugfs_files,
- ARRAY_SIZE(tvout_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(tvout_debugfs_files,
+ ARRAY_SIZE(tvout_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
@@ -603,14 +603,11 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
static int sti_tvout_late_register(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- int ret;
if (tvout->debugfs_registered)
return 0;
- ret = tvout_debugfs_init(tvout, encoder->dev->primary);
- if (ret)
- return ret;
+ tvout_debugfs_init(tvout, encoder->dev->primary);
tvout->debugfs_registered = true;
return 0;
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 2d4230410464..2d818397918d 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -124,16 +124,16 @@ static struct drm_info_list vid_debugfs_files[] = {
{ "vid", vid_dbg_show, 0, NULL },
};
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
vid_debugfs_files[i].data = vid;
- return drm_debugfs_create_files(vid_debugfs_files,
- ARRAY_SIZE(vid_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(vid_debugfs_files,
+ ARRAY_SIZE(vid_debugfs_files),
+ minor->debugfs_root, minor);
}
void sti_vid_commit(struct sti_vid *vid,
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 9dbd78461de1..991849ba50b5 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -26,6 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
int id, void __iomem *baseaddr);
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index ea9fcbdc68b3..0f85dd86cafa 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -88,7 +88,9 @@ static int drv_load(struct drm_device *ddev)
ddev->dev_private = (void *)ldev;
- drm_mode_config_init(ddev);
+ ret = drmm_mode_config_init(ddev);
+ if (ret)
+ return ret;
/*
* set max width and height as default value.
@@ -103,7 +105,7 @@ static int drv_load(struct drm_device *ddev)
ret = ltdc_load(ddev);
if (ret)
- goto err;
+ return ret;
drm_mode_config_reset(ddev);
drm_kms_helper_poll_init(ddev);
@@ -111,9 +113,6 @@ static int drv_load(struct drm_device *ddev)
platform_set_drvdata(pdev, ddev);
return 0;
-err:
- drm_mode_config_cleanup(ddev);
- return ret;
}
static void drv_unload(struct drm_device *ddev)
@@ -122,7 +121,6 @@ static void drv_unload(struct drm_device *ddev)
drm_kms_helper_poll_fini(ddev);
ltdc_unload(ddev);
- drm_mode_config_cleanup(ddev);
}
static __maybe_unused int drv_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index df585fe64f61..f894968d6e45 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -42,8 +42,6 @@
#define MAX_IRQ 4
-#define MAX_ENDPOINTS 2
-
#define HWVER_10200 0x010200
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
@@ -1201,36 +1199,20 @@ int ltdc_load(struct drm_device *ddev)
struct ltdc_device *ldev = ddev->dev_private;
struct device *dev = ddev->dev;
struct device_node *np = dev->of_node;
- struct drm_bridge *bridge[MAX_ENDPOINTS] = {NULL};
- struct drm_panel *panel[MAX_ENDPOINTS] = {NULL};
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
struct drm_crtc *crtc;
struct reset_control *rstc;
struct resource *res;
- int irq, ret, i, endpoint_not_ready = -ENODEV;
+ int irq, i, nb_endpoints;
+ int ret = -ENODEV;
DRM_DEBUG_DRIVER("\n");
- /* Get endpoints if any */
- for (i = 0; i < MAX_ENDPOINTS; i++) {
- ret = drm_of_find_panel_or_bridge(np, 0, i, &panel[i],
- &bridge[i]);
-
- /*
- * If at least one endpoint is -EPROBE_DEFER, defer probing,
- * else if at least one endpoint is ready, continue probing.
- */
- if (ret == -EPROBE_DEFER)
- return ret;
- else if (!ret)
- endpoint_not_ready = 0;
- }
-
- if (endpoint_not_ready)
- return endpoint_not_ready;
-
- rstc = devm_reset_control_get_exclusive(dev, NULL);
-
- mutex_init(&ldev->err_lock);
+ /* Get number of endpoints */
+ nb_endpoints = of_graph_get_endpoint_count(np);
+ if (!nb_endpoints)
+ return -ENODEV;
ldev->pixel_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(ldev->pixel_clk)) {
@@ -1244,6 +1226,43 @@ int ltdc_load(struct drm_device *ddev)
return -ENODEV;
}
+ /* Get endpoints if any */
+ for (i = 0; i < nb_endpoints; i++) {
+ ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge);
+
+ /*
+ * If at least one endpoint is -ENODEV, continue probing,
+ * else if at least one endpoint returned an error
+ * (ie -EPROBE_DEFER) then stop probing.
+ */
+ if (ret == -ENODEV)
+ continue;
+ else if (ret)
+ goto err;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ DRM_ERROR("panel-bridge endpoint %d\n", i);
+ ret = PTR_ERR(bridge);
+ goto err;
+ }
+ }
+
+ if (bridge) {
+ ret = ltdc_encoder_init(ddev, bridge);
+ if (ret) {
+ DRM_ERROR("init encoder endpoint %d\n", i);
+ goto err;
+ }
+ }
+ }
+
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+
+ mutex_init(&ldev->err_lock);
+
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
usleep_range(10, 20);
@@ -1285,27 +1304,7 @@ int ltdc_load(struct drm_device *ddev)
DRM_ERROR("Failed to register LTDC interrupt\n");
goto err;
}
- }
- /* Add endpoints panels or bridges if any */
- for (i = 0; i < MAX_ENDPOINTS; i++) {
- if (panel[i]) {
- bridge[i] = drm_panel_bridge_add_typed(panel[i],
- DRM_MODE_CONNECTOR_DPI);
- if (IS_ERR(bridge[i])) {
- DRM_ERROR("panel-bridge endpoint %d\n", i);
- ret = PTR_ERR(bridge[i]);
- goto err;
- }
- }
-
- if (bridge[i]) {
- ret = ltdc_encoder_init(ddev, bridge[i]);
- if (ret) {
- DRM_ERROR("init encoder endpoint %d\n", i);
- goto err;
- }
- }
}
crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
@@ -1340,8 +1339,8 @@ int ltdc_load(struct drm_device *ddev)
return 0;
err:
- for (i = 0; i < MAX_ENDPOINTS; i++)
- drm_panel_bridge_remove(bridge[i]);
+ for (i = 0; i < nb_endpoints; i++)
+ drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
clk_disable_unprepare(ldev->pixel_clk);
@@ -1350,11 +1349,14 @@ err:
void ltdc_unload(struct drm_device *ddev)
{
- int i;
+ struct device *dev = ddev->dev;
+ int nb_endpoints, i;
DRM_DEBUG_DRIVER("\n");
- for (i = 0; i < MAX_ENDPOINTS; i++)
+ nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
+
+ for (i = 0; i < nb_endpoints; i++)
drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
pm_runtime_disable(ddev->dev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 68d4644ac2dc..ce07ddc3e058 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -22,6 +22,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
@@ -204,10 +205,6 @@ static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
.mode_valid = sun4i_hdmi_mode_valid,
};
-static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
@@ -282,7 +279,7 @@ static const struct drm_connector_funcs sun4i_hdmi_connector_funcs = {
};
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
-static bool sun4i_hdmi_cec_pin_read(struct cec_adapter *adap)
+static int sun4i_hdmi_cec_pin_read(struct cec_adapter *adap)
{
struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
@@ -611,11 +608,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(&hdmi->encoder,
&sun4i_hdmi_helper_funcs);
- ret = drm_encoder_init(drm,
- &hdmi->encoder,
- &sun4i_hdmi_funcs,
- DRM_MODE_ENCODER_TMDS,
- NULL);
+ ret = drm_simple_encoder_init(drm, &hdmi->encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
goto err_put_ddc_i2c;
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 26e5c7ceb8ff..ffda3184aa12 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -12,6 +12,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
@@ -96,10 +97,6 @@ static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
.enable = sun4i_lvds_encoder_enable,
};
-static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
@@ -121,11 +118,8 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_encoder_helper_add(&lvds->encoder,
&sun4i_lvds_enc_helper_funcs);
- ret = drm_encoder_init(drm,
- &lvds->encoder,
- &sun4i_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS,
- NULL);
+ ret = drm_simple_encoder_init(drm, &lvds->encoder,
+ DRM_MODE_ENCODER_LVDS);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the lvds encoder\n");
goto err_out;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 3b23d5be3cf3..5a7d43939ae6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -14,6 +14,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
@@ -188,15 +189,6 @@ static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
.mode_valid = sun4i_rgb_mode_valid,
};
-static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
- .destroy = sun4i_rgb_enc_destroy,
-};
-
int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
@@ -218,11 +210,8 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_encoder_helper_add(&rgb->encoder,
&sun4i_rgb_enc_helper_funcs);
- ret = drm_encoder_init(drm,
- &rgb->encoder,
- &sun4i_rgb_enc_funcs,
- DRM_MODE_ENCODER_NONE,
- NULL);
+ ret = drm_simple_encoder_init(drm, &rgb->encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the rgb encoder\n");
goto err_out;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 624437b27cdc..359b56e43b83 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -812,10 +812,8 @@ static int sun4i_tcon_init_irq(struct device *dev,
int irq, ret;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Couldn't retrieve the TCON interrupt\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0,
dev_name(dev), tcon);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 39c15282e448..63f4428ac3bf 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -19,6 +19,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -473,15 +474,6 @@ static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
.mode_set = sun4i_tv_mode_set,
};
-static void sun4i_tv_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_tv_funcs = {
- .destroy = sun4i_tv_destroy,
-};
-
static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
{
int i;
@@ -592,11 +584,8 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(&tv->encoder,
&sun4i_tv_helper_funcs);
- ret = drm_encoder_init(drm,
- &tv->encoder,
- &sun4i_tv_funcs,
- DRM_MODE_ENCODER_TVDAC,
- NULL);
+ ret = drm_simple_encoder_init(drm, &tv->encoder,
+ DRM_MODE_ENCODER_TVDAC);
if (ret) {
dev_err(dev, "Couldn't initialise the TV encoder\n");
goto err_disable_clk;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 3eb89f1eb0e1..aa67cb037e9d 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -24,6 +24,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
@@ -846,10 +847,6 @@ static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = {
.enable = sun6i_dsi_encoder_enable,
};
-static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
const struct mipi_dsi_msg *msg)
{
@@ -1062,11 +1059,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(&dsi->encoder,
&sun6i_dsi_enc_helper_funcs);
- ret = drm_encoder_init(drm,
- &dsi->encoder,
- &sun6i_dsi_enc_funcs,
- DRM_MODE_ENCODER_DSI,
- NULL);
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
if (ret) {
dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
return ret;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index e8a317d5ba19..972682bb8000 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -10,6 +10,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun8i_dw_hdmi.h"
#include "sun8i_tcon_top.h"
@@ -29,10 +30,6 @@ sun8i_dw_hdmi_encoder_helper_funcs = {
.mode_set = sun8i_dw_hdmi_encoder_mode_set,
};
-static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_mode_status
sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector,
const struct drm_display_mode *mode)
@@ -220,8 +217,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &sun8i_dw_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
sun8i_hdmi_phy_init(hdmi->phy);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 4a64f7ae437a..56cc037fd312 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -27,314 +27,225 @@
#include "sun8i_vi_layer.h"
#include "sunxi_engine.h"
+struct de2_fmt_info {
+ u32 drm_fmt;
+ u32 de2_fmt;
+};
+
static const struct de2_fmt_info de2_formats[] = {
{
.drm_fmt = DRM_FORMAT_ARGB8888,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR8888,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA8888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA8888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_XRGB8888,
.de2_fmt = SUN8I_MIXER_FBFMT_XRGB8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_XBGR8888,
.de2_fmt = SUN8I_MIXER_FBFMT_XBGR8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBX8888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBX8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRX8888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRX8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGB888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGR888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGR888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGB565,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB565,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGR565,
.de2_fmt = SUN8I_MIXER_FBFMT_BGR565,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_UYVY,
.de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_VYUY,
.de2_fmt = SUN8I_MIXER_FBFMT_VYUY,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUYV,
.de2_fmt = SUN8I_MIXER_FBFMT_YUYV,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVYU,
.de2_fmt = SUN8I_MIXER_FBFMT_YVYU,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV16,
.de2_fmt = SUN8I_MIXER_FBFMT_NV16,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV61,
.de2_fmt = SUN8I_MIXER_FBFMT_NV61,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV12,
.de2_fmt = SUN8I_MIXER_FBFMT_NV12,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV21,
.de2_fmt = SUN8I_MIXER_FBFMT_NV21,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV420,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV411,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU420,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU411,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_P010,
.de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_P210,
.de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
};
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(de2_formats); ++i)
- if (de2_formats[i].drm_fmt == format)
- return &de2_formats[i];
+ if (de2_formats[i].drm_fmt == format) {
+ *hw_format = de2_formats[i].de2_fmt;
+ return 0;
+ }
- return NULL;
+ return -EINVAL;
}
static void sun8i_mixer_commit(struct sunxi_engine *engine)
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 345b28b0a80a..7576b523fdbb 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -10,7 +10,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#include "sun8i_csc.h"
#include "sunxi_engine.h"
#define SUN8I_MIXER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
@@ -144,13 +143,6 @@
#define SUN50I_MIXER_CDC0_EN 0xd0000
#define SUN50I_MIXER_CDC1_EN 0xd8000
-struct de2_fmt_info {
- u32 drm_fmt;
- u32 de2_fmt;
- bool rgb;
- enum sun8i_csc_mode csc;
-};
-
/**
* struct sun8i_mixer_cfg - mixer HW configuration
* @vi_num: number of VI channels
@@ -210,5 +202,5 @@ sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
return DE2_CH_BASE + channel * DE2_CH_SIZE;
}
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format);
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format);
#endif /* _SUN8I_MIXER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index c87fd842918e..54f937a7d5e7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -19,8 +19,8 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include "sun8i_ui_layer.h"
#include "sun8i_mixer.h"
+#include "sun8i_ui_layer.h"
#include "sun8i_ui_scaler.h"
static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -174,18 +174,20 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
- const struct de2_fmt_info *fmt_info;
- u32 val, ch_base;
+ const struct drm_format_info *fmt;
+ u32 val, ch_base, hw_fmt;
+ int ret;
ch_base = sun8i_channel_base(mixer, channel);
- fmt_info = sun8i_mixer_format_info(state->fb->format->format);
- if (!fmt_info || !fmt_info->rgb) {
+ fmt = state->fb->format;
+ ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+ if (ret || fmt->is_yuv) {
DRM_DEBUG_DRIVER("Invalid format\n");
return -EINVAL;
}
- val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
+ val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index b8398ca18b0f..22c8c5375d0d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -12,8 +12,9 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include "sun8i_vi_layer.h"
+#include "sun8i_csc.h"
#include "sun8i_mixer.h"
+#include "sun8i_vi_layer.h"
#include "sun8i_vi_scaler.h"
static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -210,28 +211,47 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
return 0;
}
+static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
+{
+ if (!format->is_yuv)
+ return SUN8I_CSC_MODE_OFF;
+
+ switch (format->format) {
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ return SUN8I_CSC_MODE_YVU2RGB;
+ default:
+ return SUN8I_CSC_MODE_YUV2RGB;
+ }
+}
+
static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
- const struct de2_fmt_info *fmt_info;
- u32 val, ch_base;
+ u32 val, ch_base, csc_mode, hw_fmt;
+ const struct drm_format_info *fmt;
+ int ret;
ch_base = sun8i_channel_base(mixer, channel);
- fmt_info = sun8i_mixer_format_info(state->fb->format->format);
- if (!fmt_info) {
+ fmt = state->fb->format;
+ ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+ if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
- return -EINVAL;
+ return ret;
}
- val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
+ val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
- if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
- sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc,
+ csc_mode = sun8i_vi_layer_get_csc_mode(fmt);
+ if (csc_mode != SUN8I_CSC_MODE_OFF) {
+ sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode,
state->color_encoding,
state->color_range);
sun8i_csc_enable_ccsc(mixer, channel, true);
@@ -239,7 +259,7 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
sun8i_csc_enable_ccsc(mixer, channel, false);
}
- if (fmt_info->rgb)
+ if (!fmt->is_yuv)
val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE;
else
val = 0;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 1a7b08f35776..83f31c6e891c 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1496,7 +1496,6 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
struct drm_minor *minor = crtc->dev->primary;
struct dentry *root;
struct tegra_dc *dc = to_tegra_dc(crtc);
- int err;
#ifdef CONFIG_DEBUG_FS
root = crtc->debugfs_entry;
@@ -1512,17 +1511,9 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
for (i = 0; i < count; i++)
dc->debugfs_files[i].data = dc;
- err = drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(dc->debugfs_files);
- dc->debugfs_files = NULL;
-
- return err;
}
static void tegra_dc_early_unregister(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 7dfb50f65067..105fb9cdbb3b 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -5,12 +5,10 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index bd268028fb3d..211906347f3f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -839,11 +839,11 @@ static struct drm_info_list tegra_debugfs_list[] = {
{ "iova", tegra_debugfs_iova, 0 },
};
-static int tegra_debugfs_init(struct drm_minor *minor)
+static void tegra_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
@@ -1039,6 +1039,7 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
static bool host1x_drm_wants_iommu(struct host1x_device *dev)
{
+ struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
struct iommu_domain *domain;
/*
@@ -1076,7 +1077,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
* sufficient and whether or not the host1x is attached to an IOMMU
* doesn't matter.
*/
- if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
+ if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
return true;
return domain != NULL;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index ed99b67deb29..b25443255be6 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -9,7 +9,7 @@
#include <linux/host1x.h>
#include <linux/iova.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <drm/drm_atomic.h>
#include <drm/drm_edid.h>
@@ -152,8 +152,6 @@ enum drm_connector_status
tegra_output_connector_detect(struct drm_connector *connector, bool force);
void tegra_output_connector_destroy(struct drm_connector *connector);
-void tegra_output_encoder_destroy(struct drm_encoder *encoder);
-
/* from dpaux.c */
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 88b9d64c77bf..38beab9ab4f8 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -22,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
#include "drm.h"
@@ -234,7 +235,6 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_dsi *dsi = to_dsi(output);
- int err;
dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
@@ -244,17 +244,9 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
for (i = 0; i < count; i++)
dsi->debugfs_files[i].data = dsi;
- err = drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(dsi->debugfs_files);
- dsi->debugfs_files = NULL;
-
- return err;
}
static void tegra_dsi_early_unregister(struct drm_connector *connector)
@@ -824,10 +816,6 @@ static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs
.mode_valid = tegra_dsi_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
{
int err;
@@ -1058,9 +1046,8 @@ static int tegra_dsi_init(struct host1x_client *client)
&tegra_dsi_connector_helper_funcs);
dsi->output.connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &dsi->output.encoder,
- &tegra_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ drm_simple_encoder_init(drm, &dsi->output.encoder,
+ DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(&dsi->output.encoder,
&tegra_dsi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index b8a328f53862..2b0666ac681b 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -4,7 +4,7 @@
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*
* Based on the KMS/FB CMA helpers
- * Copyright (C) 2012 Analog Device Inc.
+ * Copyright (C) 2012 Analog Devices Inc.
*/
#include <linux/console.h>
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 38252c0f068d..d09a24931c87 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/hdmi.h>
#include <linux/math64.h>
#include <linux/module.h>
@@ -22,6 +21,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "hda.h"
#include "hdmi.h"
@@ -1064,7 +1064,6 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_hdmi *hdmi = to_hdmi(output);
- int err;
hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
@@ -1074,17 +1073,9 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
for (i = 0; i < count; i++)
hdmi->debugfs_files[i].data = hdmi;
- err = drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(hdmi->debugfs_files);
- hdmi->debugfs_files = NULL;
-
- return err;
}
static void tegra_hdmi_early_unregister(struct drm_connector *connector)
@@ -1136,10 +1127,6 @@ tegra_hdmi_connector_helper_funcs = {
.mode_valid = tegra_hdmi_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
@@ -1445,8 +1432,8 @@ static int tegra_hdmi_init(struct host1x_client *client)
&tegra_hdmi_connector_helper_funcs);
hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, &hdmi->output.encoder,
+ DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&hdmi->output.encoder,
&tegra_hdmi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index a264259b97a2..e36e5e7c2f69 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "drm.h"
#include "dc.h"
@@ -79,11 +80,6 @@ void tegra_output_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-void tegra_output_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static irqreturn_t hpd_irq(int irq, void *data)
{
struct tegra_output *output = data;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 4be4dfd4a68a..0562a7eb793f 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "drm.h"
#include "dc.h"
@@ -110,10 +111,6 @@ static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs
.mode_valid = tegra_rgb_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
@@ -281,8 +278,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
&tegra_rgb_connector_helper_funcs);
output->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(&output->encoder,
&tegra_rgb_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 81226a4953c1..7cbcf9617f5e 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -6,7 +6,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/debugfs.h>
-#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -23,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
#include <drm/drm_scdc_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
#include "dp.h"
@@ -1687,7 +1687,6 @@ static int tegra_sor_late_register(struct drm_connector *connector)
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_sor *sor = to_sor(output);
- int err;
sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
@@ -1697,17 +1696,9 @@ static int tegra_sor_late_register(struct drm_connector *connector)
for (i = 0; i < count; i++)
sor->debugfs_files[i].data = sor;
- err = drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(sor->debugfs_files);
- sor->debugfs_files = NULL;
-
- return err;
}
static void tegra_sor_early_unregister(struct drm_connector *connector)
@@ -1805,10 +1796,6 @@ static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs
.mode_valid = tegra_sor_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -3102,8 +3089,7 @@ static int tegra_sor_init(struct host1x_client *client)
&tegra_sor_connector_helper_funcs);
sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
- encoder, NULL);
+ drm_simple_encoder_init(drm, &sor->output.encoder, encoder);
drm_encoder_helper_add(&sor->output.encoder, helpers);
drm_connector_attach_encoder(&sor->output.connector,
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 3221a707e073..89a226912de8 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -24,7 +24,7 @@
static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
{
struct drm_device *ddev = tcrtc->crtc.dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct drm_pending_vblank_event *event;
unsigned long flags;
bool busy;
@@ -88,7 +88,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct dispc_device *dispc = tidss->dispc;
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
@@ -165,7 +165,7 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
dev_dbg(ddev->dev,
@@ -216,7 +216,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
unsigned long flags;
int r;
@@ -259,7 +259,7 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
@@ -295,7 +295,7 @@ enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
}
@@ -314,7 +314,7 @@ static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -328,7 +328,7 @@ static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
dev_dbg(ddev->dev, "%s\n", __func__);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 29f42768e294..629dd06393f6 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -181,10 +181,6 @@ const struct dispc_features dispc_am65x_feats = {
.vid_name = { "vid", "vidl1" },
.vid_lite = { false, true, },
.vid_order = { 1, 0 },
-
- .errata = {
- .i2000 = true,
- },
};
static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
@@ -2674,12 +2670,9 @@ int dispc_init(struct tidss_device *tidss)
return -ENOMEM;
num_fourccs = 0;
- for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
- if (feat->errata.i2000 &&
- dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
- continue;
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i)
dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
- }
+
dispc->num_fourccs = num_fourccs;
dispc->tidss = tidss;
dispc->dev = dev;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index a4a68249e44b..902e612ff7ac 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -46,10 +46,6 @@ struct dispc_features_scaling {
u32 xinc_max;
};
-struct dispc_errata {
- bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
-};
-
enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
DISPC_VP_OLDI, /* OLDI (LVDS) output */
@@ -83,8 +79,6 @@ struct dispc_features {
const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
bool vid_lite[TIDSS_MAX_PLANES];
u32 vid_order[TIDSS_MAX_PLANES];
-
- struct dispc_errata errata;
};
extern const struct dispc_features dispc_k2g_feats;
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index d95e4be2c7b9..99edc66ebdef 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "tidss_dispc.h"
@@ -102,15 +103,7 @@ static const struct dev_pm_ops tidss_pm_ops = {
static void tidss_release(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
-
drm_kms_helper_poll_fini(ddev);
-
- tidss_modeset_cleanup(tidss);
-
- drm_dev_fini(ddev);
-
- kfree(tidss);
}
DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
@@ -142,26 +135,18 @@ static int tidss_probe(struct platform_device *pdev)
dev_dbg(dev, "%s\n", __func__);
- /* Can't use devm_* since drm_device's lifetime may exceed dev's */
- tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
- if (!tidss)
- return -ENOMEM;
+ tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver,
+ struct tidss_device, ddev);
+ if (IS_ERR(tidss))
+ return PTR_ERR(tidss);
ddev = &tidss->ddev;
- ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
- if (ret) {
- kfree(ddev);
- return ret;
- }
-
tidss->dev = dev;
tidss->feat = of_device_get_match_data(dev);
platform_set_drvdata(pdev, tidss);
- ddev->dev_private = tidss;
-
ret = dispc_init(tidss);
if (ret) {
dev_err(dev, "failed to initialize dispc: %d\n", ret);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index e2aa6436ad18..3b0a3d87b7c4 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -29,10 +29,10 @@ struct tidss_device {
spinlock_t wait_lock; /* protects the irq masks */
dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
-
- struct drm_atomic_state *saved_state;
};
+#define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)
+
int tidss_runtime_get(struct tidss_device *tidss);
void tidss_runtime_put(struct tidss_device *tidss);
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 612c046738e5..1b80f2d62e0a 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -23,7 +23,7 @@ static void tidss_irq_update(struct tidss_device *tidss)
void tidss_irq_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
@@ -38,7 +38,7 @@ void tidss_irq_enable_vblank(struct drm_crtc *crtc)
void tidss_irq_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
@@ -53,7 +53,7 @@ void tidss_irq_disable_vblank(struct drm_crtc *crtc)
irqreturn_t tidss_irq_handler(int irq, void *arg)
{
struct drm_device *ddev = (struct drm_device *)arg;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned int id;
dispc_irq_t irqstatus;
@@ -95,7 +95,7 @@ void tidss_irq_resume(struct tidss_device *tidss)
void tidss_irq_preinstall(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
spin_lock_init(&tidss->wait_lock);
@@ -109,7 +109,7 @@ void tidss_irq_preinstall(struct drm_device *ddev)
int tidss_irq_postinstall(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
unsigned int i;
@@ -138,7 +138,7 @@ int tidss_irq_postinstall(struct drm_device *ddev)
void tidss_irq_uninstall(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
tidss_runtime_get(tidss);
dispc_set_irqenable(tidss->dispc, 0);
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 7d419960b030..4b99e9fa84a5 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -25,7 +25,7 @@
static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *ddev = old_state->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -258,7 +258,9 @@ int tidss_modeset_init(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s\n", __func__);
- drm_mode_config_init(ddev);
+ ret = drmm_mode_config_init(ddev);
+ if (ret)
+ return ret;
ddev->mode_config.min_width = 8;
ddev->mode_config.min_height = 8;
@@ -270,11 +272,11 @@ int tidss_modeset_init(struct tidss_device *tidss)
ret = tidss_dispc_modeset_init(tidss);
if (ret)
- goto err_mode_config_cleanup;
+ return ret;
ret = drm_vblank_init(ddev, tidss->num_crtcs);
if (ret)
- goto err_mode_config_cleanup;
+ return ret;
/* Start with vertical blanking interrupt reporting disabled. */
for (i = 0; i < tidss->num_crtcs; ++i)
@@ -285,15 +287,4 @@ int tidss_modeset_init(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s done\n", __func__);
return 0;
-
-err_mode_config_cleanup:
- drm_mode_config_cleanup(ddev);
- return ret;
-}
-
-void tidss_modeset_cleanup(struct tidss_device *tidss)
-{
- struct drm_device *ddev = &tidss->ddev;
-
- drm_mode_config_cleanup(ddev);
}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.h b/drivers/gpu/drm/tidss/tidss_kms.h
index dda5625d0128..99aaff099f22 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.h
+++ b/drivers/gpu/drm/tidss/tidss_kms.h
@@ -10,6 +10,5 @@
struct tidss_device;
int tidss_modeset_init(struct tidss_device *tidss);
-void tidss_modeset_cleanup(struct tidss_device *tidss);
#endif
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 798488948fc5..0a563eabcbb9 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -22,7 +22,7 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_device *ddev = plane->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
const struct drm_format_info *finfo;
struct drm_crtc_state *crtc_state;
@@ -101,7 +101,7 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *ddev = plane->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
struct drm_plane_state *state = plane->state;
u32 hw_videoport;
@@ -133,7 +133,7 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *ddev = plane->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
dev_dbg(ddev->dev, "%s\n", __func__);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 0791a0200cc3..a5e9ee4c7fbf 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -390,10 +390,9 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
ret = drm_dev_register(ddev, 0);
if (ret)
goto init_failed;
+ priv->is_registered = true;
drm_fbdev_generic_setup(ddev, bpp);
-
- priv->is_registered = true;
return 0;
init_failed:
@@ -478,26 +477,17 @@ static struct drm_info_list tilcdc_debugfs_list[] = {
{ "mm", tilcdc_mm_show, 0 },
};
-static int tilcdc_debugfs_init(struct drm_minor *minor)
+static void tilcdc_debugfs_init(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
struct tilcdc_module *mod;
- int ret;
- ret = drm_debugfs_create_files(tilcdc_debugfs_list,
- ARRAY_SIZE(tilcdc_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(tilcdc_debugfs_list,
+ ARRAY_SIZE(tilcdc_debugfs_list),
+ minor->debugfs_root, minor);
list_for_each_entry(mod, &module_list, list)
if (mod->funcs->debugfs_init)
mod->funcs->debugfs_init(mod, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
- return ret;
- }
-
- return ret;
}
#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 28b7f703236e..b177525588c1 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
@@ -83,10 +84,6 @@ int tilcdc_add_component_encoder(struct drm_device *ddev)
return 0;
}
-static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static
int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
{
@@ -131,9 +128,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
if (!priv->external_encoder)
return -ENOMEM;
- ret = drm_encoder_init(ddev, priv->external_encoder,
- &tilcdc_external_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(ddev, priv->external_encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret) {
dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 5584e656b857..12823d60c4e8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -16,6 +16,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_panel.h"
@@ -74,10 +75,6 @@ static void panel_encoder_mode_set(struct drm_encoder *encoder,
/* nothing needed */
}
-static const struct drm_encoder_funcs panel_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
.dpms = panel_encoder_dpms,
.prepare = panel_encoder_prepare,
@@ -102,8 +99,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
encoder = &panel_encoder->base;
encoder->possible_crtcs = 1;
- ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0)
goto fail;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 4160e74e4751..2b6414f0fa75 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -1,5 +1,24 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_CIRRUS_QEMU
+ tristate "Cirrus driver for QEMU emulated device"
+ depends on DRM && PCI && MMU
+ select DRM_KMS_HELPER
+ select DRM_GEM_SHMEM_HELPER
+ help
+ This is a KMS driver for emulated cirrus device in qemu.
+ It is *NOT* intended for real cirrus devices. This requires
+ the modesetting userspace X.org driver.
+
+ Cirrus is obsolete, the hardware was designed in the 90ies
+ and can't keep up with todays needs. More background:
+ https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+ Better alternatives are:
+ - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+ - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+ - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
+
config DRM_GM12U320
tristate "GM12U320 driver for USB projectors"
depends on DRM && USB
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index c96ceee71453..6ae4e9e5a35f 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index d2ff63ce8eaf..744a8e337e41 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -35,6 +35,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -58,6 +59,8 @@ struct cirrus_device {
void __iomem *mmio;
};
+#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
+
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
@@ -310,7 +313,7 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
struct drm_rect *rect)
{
- struct cirrus_device *cirrus = fb->dev->dev_private;
+ struct cirrus_device *cirrus = to_cirrus(fb->dev);
void *vmap;
int idx, ret;
@@ -435,7 +438,7 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+ struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
cirrus_fb_blit_fullscreen(plane_state->fb);
@@ -444,7 +447,7 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
- struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+ struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
struct drm_plane_state *state = pipe->plane.state;
struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
@@ -509,11 +512,15 @@ static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static void cirrus_mode_config_init(struct cirrus_device *cirrus)
+static int cirrus_mode_config_init(struct cirrus_device *cirrus)
{
struct drm_device *dev = &cirrus->dev;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
- drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
@@ -521,18 +528,12 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
dev->mode_config.preferred_depth = 16;
dev->mode_config.prefer_shadow = 0;
dev->mode_config.funcs = &cirrus_mode_config_funcs;
+
+ return 0;
}
/* ------------------------------------------------------------------ */
-static void cirrus_release(struct drm_device *dev)
-{
- struct cirrus_device *cirrus = dev->dev_private;
-
- drm_mode_config_cleanup(dev);
- kfree(cirrus);
-}
-
DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
@@ -546,7 +547,6 @@ static struct drm_driver cirrus_driver = {
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .release = cirrus_release,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -560,7 +560,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
@@ -569,36 +569,34 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
return ret;
ret = -ENOMEM;
- cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL);
- if (cirrus == NULL)
- goto err_pci_release;
+ cirrus = devm_drm_dev_alloc(&pdev->dev, &cirrus_driver,
+ struct cirrus_device, dev);
+ if (IS_ERR(cirrus))
+ return PTR_ERR(cirrus);
dev = &cirrus->dev;
- ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev);
- if (ret)
- goto err_free_cirrus;
- dev->dev_private = cirrus;
- ret = -ENOMEM;
- cirrus->vram = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ cirrus->vram = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
if (cirrus->vram == NULL)
- goto err_dev_put;
+ return -ENOMEM;
- cirrus->mmio = ioremap(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
+ cirrus->mmio = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
if (cirrus->mmio == NULL)
- goto err_unmap_vram;
+ return -ENOMEM;
- cirrus_mode_config_init(cirrus);
+ ret = cirrus_mode_config_init(cirrus);
+ if (ret)
+ return ret;
ret = cirrus_conn_init(cirrus);
if (ret < 0)
- goto err_cleanup;
+ return ret;
ret = cirrus_pipe_init(cirrus);
if (ret < 0)
- goto err_cleanup;
+ return ret;
drm_mode_config_reset(dev);
@@ -606,36 +604,18 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_cleanup;
+ return ret;
drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
return 0;
-
-err_cleanup:
- drm_mode_config_cleanup(dev);
- iounmap(cirrus->mmio);
-err_unmap_vram:
- iounmap(cirrus->vram);
-err_dev_put:
- drm_dev_put(dev);
-err_free_cirrus:
- kfree(cirrus);
-err_pci_release:
- pci_release_regions(pdev);
- return ret;
}
static void cirrus_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct cirrus_device *cirrus = dev->dev_private;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- iounmap(cirrus->mmio);
- iounmap(cirrus->vram);
- drm_dev_put(dev);
- pci_release_regions(pdev);
}
static const struct pci_device_id pciidlist[] = {
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index a48173441ae0..cc397671f689 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -19,6 +19,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -87,18 +88,18 @@ struct gm12u320_device {
struct usb_device *udev;
unsigned char *cmd_buf;
unsigned char *data_buf[GM12U320_BLOCK_COUNT];
- bool pipe_enabled;
struct {
- bool run;
- struct workqueue_struct *workq;
- struct work_struct work;
- wait_queue_head_t waitq;
+ struct delayed_work work;
struct mutex lock;
struct drm_framebuffer *fb;
struct drm_rect rect;
+ int frame;
+ int draw_status_timeout;
} fb_update;
};
+#define to_gm12u320(__dev) container_of(__dev, struct gm12u320_device, dev)
+
static const char cmd_data[CMD_SIZE] = {
0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
0x68, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x10, 0xff,
@@ -159,7 +160,7 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
int i, block_size;
const char *hdr;
- gm12u320->cmd_buf = kmalloc(CMD_SIZE, GFP_KERNEL);
+ gm12u320->cmd_buf = drmm_kmalloc(&gm12u320->dev, CMD_SIZE, GFP_KERNEL);
if (!gm12u320->cmd_buf)
return -ENOMEM;
@@ -172,7 +173,8 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
hdr = data_block_header;
}
- gm12u320->data_buf[i] = kzalloc(block_size, GFP_KERNEL);
+ gm12u320->data_buf[i] = drmm_kzalloc(&gm12u320->dev,
+ block_size, GFP_KERNEL);
if (!gm12u320->data_buf[i])
return -ENOMEM;
@@ -182,26 +184,9 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
data_block_footer, DATA_BLOCK_FOOTER_SIZE);
}
- gm12u320->fb_update.workq = create_singlethread_workqueue(DRIVER_NAME);
- if (!gm12u320->fb_update.workq)
- return -ENOMEM;
-
return 0;
}
-static void gm12u320_usb_free(struct gm12u320_device *gm12u320)
-{
- int i;
-
- if (gm12u320->fb_update.workq)
- destroy_workqueue(gm12u320->fb_update.workq);
-
- for (i = 0; i < GM12U320_BLOCK_COUNT; i++)
- kfree(gm12u320->data_buf[i]);
-
- kfree(gm12u320->cmd_buf);
-}
-
static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
u8 req_a, u8 req_b,
u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d)
@@ -344,80 +329,77 @@ unlock:
static void gm12u320_fb_update_work(struct work_struct *work)
{
struct gm12u320_device *gm12u320 =
- container_of(work, struct gm12u320_device, fb_update.work);
- int draw_status_timeout = FIRST_FRAME_TIMEOUT;
+ container_of(to_delayed_work(work), struct gm12u320_device,
+ fb_update.work);
int block, block_size, len;
- int frame = 0;
int ret = 0;
- while (gm12u320->fb_update.run) {
- gm12u320_copy_fb_to_blocks(gm12u320);
-
- for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
- if (block == GM12U320_BLOCK_COUNT - 1)
- block_size = DATA_LAST_BLOCK_SIZE;
- else
- block_size = DATA_BLOCK_SIZE;
-
- /* Send data command to device */
- memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
- gm12u320->cmd_buf[8] = block_size & 0xff;
- gm12u320->cmd_buf[9] = block_size >> 8;
- gm12u320->cmd_buf[20] = 0xfc - block * 4;
- gm12u320->cmd_buf[21] = block | (frame << 7);
-
- ret = usb_bulk_msg(gm12u320->udev,
- usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
- gm12u320->cmd_buf, CMD_SIZE, &len,
- CMD_TIMEOUT);
- if (ret || len != CMD_SIZE)
- goto err;
-
- /* Send data block to device */
- ret = usb_bulk_msg(gm12u320->udev,
- usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
- gm12u320->data_buf[block], block_size,
- &len, DATA_TIMEOUT);
- if (ret || len != block_size)
- goto err;
-
- /* Read status */
- ret = usb_bulk_msg(gm12u320->udev,
- usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
- gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
- CMD_TIMEOUT);
- if (ret || len != READ_STATUS_SIZE)
- goto err;
- }
+ gm12u320_copy_fb_to_blocks(gm12u320);
+
+ for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
+ if (block == GM12U320_BLOCK_COUNT - 1)
+ block_size = DATA_LAST_BLOCK_SIZE;
+ else
+ block_size = DATA_BLOCK_SIZE;
+
+ /* Send data command to device */
+ memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
+ gm12u320->cmd_buf[8] = block_size & 0xff;
+ gm12u320->cmd_buf[9] = block_size >> 8;
+ gm12u320->cmd_buf[20] = 0xfc - block * 4;
+ gm12u320->cmd_buf[21] =
+ block | (gm12u320->fb_update.frame << 7);
- /* Send draw command to device */
- memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
ret = usb_bulk_msg(gm12u320->udev,
usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
- gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ gm12u320->cmd_buf, CMD_SIZE, &len,
+ CMD_TIMEOUT);
if (ret || len != CMD_SIZE)
goto err;
+ /* Send data block to device */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->data_buf[block], block_size,
+ &len, DATA_TIMEOUT);
+ if (ret || len != block_size)
+ goto err;
+
/* Read status */
ret = usb_bulk_msg(gm12u320->udev,
usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
- draw_status_timeout);
+ CMD_TIMEOUT);
if (ret || len != READ_STATUS_SIZE)
goto err;
-
- draw_status_timeout = CMD_TIMEOUT;
- frame = !frame;
-
- /*
- * We must draw a frame every 2s otherwise the projector
- * switches back to showing its logo.
- */
- wait_event_timeout(gm12u320->fb_update.waitq,
- !gm12u320->fb_update.run ||
- gm12u320->fb_update.fb != NULL,
- IDLE_TIMEOUT);
}
+
+ /* Send draw command to device */
+ memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ if (ret || len != CMD_SIZE)
+ goto err;
+
+ /* Read status */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
+ gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+ gm12u320->fb_update.draw_status_timeout);
+ if (ret || len != READ_STATUS_SIZE)
+ goto err;
+
+ gm12u320->fb_update.draw_status_timeout = CMD_TIMEOUT;
+ gm12u320->fb_update.frame = !gm12u320->fb_update.frame;
+
+ /*
+ * We must draw a frame every 2s otherwise the projector
+ * switches back to showing its logo.
+ */
+ queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
+ IDLE_TIMEOUT);
+
return;
err:
/* Do not log errors caused by module unload or device unplug */
@@ -428,7 +410,7 @@ err:
static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
struct drm_rect *dirty)
{
- struct gm12u320_device *gm12u320 = fb->dev->dev_private;
+ struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev);
struct drm_framebuffer *old_fb = NULL;
bool wakeup = false;
@@ -452,36 +434,24 @@ static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
mutex_unlock(&gm12u320->fb_update.lock);
if (wakeup)
- wake_up(&gm12u320->fb_update.waitq);
+ mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0);
if (old_fb)
drm_framebuffer_put(old_fb);
}
-static void gm12u320_start_fb_update(struct gm12u320_device *gm12u320)
-{
- mutex_lock(&gm12u320->fb_update.lock);
- gm12u320->fb_update.run = true;
- mutex_unlock(&gm12u320->fb_update.lock);
-
- queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work);
-}
-
static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320)
{
- mutex_lock(&gm12u320->fb_update.lock);
- gm12u320->fb_update.run = false;
- mutex_unlock(&gm12u320->fb_update.lock);
+ struct drm_framebuffer *old_fb;
- wake_up(&gm12u320->fb_update.waitq);
- cancel_work_sync(&gm12u320->fb_update.work);
+ cancel_delayed_work_sync(&gm12u320->fb_update.work);
mutex_lock(&gm12u320->fb_update.lock);
- if (gm12u320->fb_update.fb) {
- drm_framebuffer_put(gm12u320->fb_update.fb);
- gm12u320->fb_update.fb = NULL;
- }
+ old_fb = gm12u320->fb_update.fb;
+ gm12u320->fb_update.fb = NULL;
mutex_unlock(&gm12u320->fb_update.lock);
+
+ drm_framebuffer_put(old_fb);
}
static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
@@ -589,20 +559,18 @@ static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT };
+ struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
+ gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT;
gm12u320_fb_mark_dirty(plane_state->fb, &rect);
- gm12u320_start_fb_update(gm12u320);
- gm12u320->pipe_enabled = true;
}
static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
+ struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
gm12u320_stop_fb_update(gm12u320);
- gm12u320->pipe_enabled = false;
}
static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -630,16 +598,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-static void gm12u320_driver_release(struct drm_device *dev)
-{
- struct gm12u320_device *gm12u320 = dev->dev_private;
-
- gm12u320_usb_free(gm12u320);
- drm_mode_config_cleanup(dev);
- drm_dev_fini(dev);
- kfree(gm12u320);
-}
-
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static struct drm_driver gm12u320_drm_driver = {
@@ -651,7 +609,6 @@ static struct drm_driver gm12u320_drm_driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
- .release = gm12u320_driver_release,
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
};
@@ -676,24 +633,21 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
if (interface->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
- gm12u320 = kzalloc(sizeof(*gm12u320), GFP_KERNEL);
- if (gm12u320 == NULL)
- return -ENOMEM;
+ gm12u320 = devm_drm_dev_alloc(&interface->dev, &gm12u320_drm_driver,
+ struct gm12u320_device, dev);
+ if (IS_ERR(gm12u320))
+ return PTR_ERR(gm12u320);
gm12u320->udev = interface_to_usbdev(interface);
- INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
+ INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
- init_waitqueue_head(&gm12u320->fb_update.waitq);
dev = &gm12u320->dev;
- ret = drm_dev_init(dev, &gm12u320_drm_driver, &interface->dev);
- if (ret) {
- kfree(gm12u320);
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
return ret;
- }
- dev->dev_private = gm12u320;
- drm_mode_config_init(dev);
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
dev->mode_config.min_height = GM12U320_HEIGHT;
@@ -702,15 +656,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
- goto err_put;
+ return ret;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
- goto err_put;
+ return ret;
ret = gm12u320_conn_init(gm12u320);
if (ret)
- goto err_put;
+ return ret;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -720,56 +674,44 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
- goto err_put;
+ return ret;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_put;
+ return ret;
drm_fbdev_generic_setup(dev, 0);
return 0;
-
-err_put:
- drm_dev_put(dev);
- return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = dev->dev_private;
- gm12u320_stop_fb_update(gm12u320);
drm_dev_unplug(dev);
- drm_dev_put(dev);
+ drm_atomic_helper_shutdown(dev);
}
static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = dev->dev_private;
- if (gm12u320->pipe_enabled)
- gm12u320_stop_fb_update(gm12u320);
-
- return 0;
+ return drm_mode_config_helper_suspend(dev);
}
static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = dev->dev_private;
+ struct gm12u320_device *gm12u320 = to_gm12u320(dev);
gm12u320_set_ecomode(gm12u320);
- if (gm12u320->pipe_enabled)
- gm12u320_start_fb_update(gm12u320);
- return 0;
+ return drm_mode_config_helper_resume(dev);
}
static const struct usb_device_id id_table[] = {
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 9af8ff84974f..b4bc358a3269 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -21,6 +21,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
@@ -195,7 +196,6 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
static struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
@@ -226,18 +226,12 @@ static int hx8357d_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &hx8357d_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 802fb8dde1b6..d1a5ab6747d5 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -24,6 +24,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -345,7 +346,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
static struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
@@ -376,19 +376,13 @@ static int ili9225_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &ili9225_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 33b51dc7faa8..bb819f45a5d3 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -20,6 +20,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
@@ -151,7 +152,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
@@ -183,19 +183,13 @@ static int ili9341_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &ili9341_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 532560aebb1e..2702ea557d29 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -19,6 +19,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -164,7 +165,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
static struct drm_driver ili9486_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9486_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
@@ -197,19 +197,13 @@ static int ili9486_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &ili9486_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index e2cfd9a17143..08ac549ab0f7 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
@@ -155,7 +156,6 @@ DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
static struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
@@ -187,19 +187,13 @@ static int mi0283qt_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &mi0283qt_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index f5ebcaf7ee3a..1c0e7169545b 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -31,6 +31,7 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
#include <drm/drm_probe_helper.h>
@@ -908,17 +909,6 @@ static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static void repaper_release(struct drm_device *drm)
-{
- struct repaper_epd *epd = drm_to_epd(drm);
-
- DRM_DEBUG_DRIVER("\n");
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(epd);
-}
-
static const uint32_t repaper_formats[] = {
DRM_FORMAT_XRGB8888,
};
@@ -956,7 +946,6 @@ DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
static struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
- .release = repaper_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
@@ -1013,19 +1002,16 @@ static int repaper_probe(struct spi_device *spi)
}
}
- epd = kzalloc(sizeof(*epd), GFP_KERNEL);
- if (!epd)
- return -ENOMEM;
+ epd = devm_drm_dev_alloc(dev, &repaper_driver,
+ struct repaper_epd, drm);
+ if (IS_ERR(epd))
+ return PTR_ERR(epd);
drm = &epd->drm;
- ret = devm_drm_dev_init(dev, drm, &repaper_driver);
- if (ret) {
- kfree(epd);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
return ret;
- }
-
- drm_mode_config_init(drm);
drm->mode_config.funcs = &repaper_mode_config_funcs;
epd->spi = spi;
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 9ef559dd3191..2a1fae422f7a 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -21,6 +21,7 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -284,7 +285,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
static struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
@@ -317,19 +317,13 @@ static int st7586_probe(struct spi_device *spi)
size_t bufsize;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &st7586_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &st7586_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 3cd9b8d9888d..0af1b15efdf8 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -21,6 +21,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#define ST7735R_FRMCTR1 0xb1
@@ -156,7 +157,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
static struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
@@ -195,22 +195,16 @@ static int st7735r_probe(struct spi_device *spi)
if (!cfg)
cfg = (void *)spi_get_device_id(spi)->driver_data;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_dev_alloc(dev, &st7735r_driver,
+ struct st7735r_priv, dbidev.drm);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dbidev = &priv->dbidev;
priv->cfg = cfg;
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9e07c3f75156..f73b81c2576e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -588,7 +588,8 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_unlock(man);
}
- if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+ if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+ !dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
bo->deleted = true;
@@ -621,6 +622,7 @@ static void ttm_bo_release(struct kref *kref)
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
+ dma_resv_unlock(bo->base.resv);
BUG_ON(bo->mem.mm_node != NULL);
atomic_dec(&ttm_bo_glob.bo_count);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 52d2b71f1588..f09b096ba4fd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -257,54 +257,6 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
-#ifdef CONFIG_X86
-#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
-#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
-#else
-#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
-#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
-#endif
-
-
-/**
- * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
- * specified page protection.
- *
- * @page: The page to map.
- * @prot: The page protection.
- *
- * This function maps a TTM page using the kmap_atomic api if available,
- * otherwise falls back to vmap. The user must make sure that the
- * specified page does not have an aliased mapping with a different caching
- * policy unless the architecture explicitly allows it. Also mapping and
- * unmapping using this api must be correctly nested. Unmapping should
- * occur in the reverse order of mapping.
- */
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- return kmap_atomic(page);
- else
- return __ttm_kmap_atomic_prot(page, prot);
-}
-EXPORT_SYMBOL(ttm_kmap_atomic_prot);
-
-/**
- * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
- * ttm_kmap_atomic_prot.
- *
- * @addr: The virtual address from the map.
- * @prot: The page protection.
- */
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- kunmap_atomic(addr);
- else
- __ttm_kunmap_atomic(addr);
-}
-EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
-
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
@@ -316,13 +268,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
- dst = ttm_kmap_atomic_prot(d, prot);
+ dst = kmap_atomic_prot(d, prot);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(dst, prot);
+ kunmap_atomic(dst);
return 0;
}
@@ -338,13 +290,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
- src = ttm_kmap_atomic_prot(s, prot);
+ src = kmap_atomic_prot(s, prot);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(src, prot);
+ kunmap_atomic(src);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 0afdfb0d1fe1..cdc1c42e1669 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -59,7 +59,7 @@ static int udl_get_modes(struct drm_connector *connector)
static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct udl_device *udl = connector->dev->dev_private;
+ struct udl_device *udl = to_udl(connector->dev);
if (!udl->sku_pixel_limit)
return 0;
@@ -72,7 +72,7 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- struct udl_device *udl = connector->dev->dev_private;
+ struct udl_device *udl = to_udl(connector->dev);
struct udl_drm_connector *udl_connector =
container_of(connector,
struct udl_drm_connector,
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index e6c1cd77d4d4..d1aa50fd6d65 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -10,6 +10,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
@@ -33,17 +34,8 @@ static int udl_usb_resume(struct usb_interface *interface)
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
-static void udl_driver_release(struct drm_device *dev)
-{
- udl_fini(dev);
- udl_modeset_cleanup(dev);
- drm_dev_fini(dev);
- kfree(dev);
-}
-
static struct drm_driver driver = {
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
- .release = udl_driver_release,
/* gem hooks */
.gem_create_object = udl_driver_gem_create_object,
@@ -65,27 +57,19 @@ static struct udl_device *udl_driver_create(struct usb_interface *interface)
struct udl_device *udl;
int r;
- udl = kzalloc(sizeof(*udl), GFP_KERNEL);
- if (!udl)
- return ERR_PTR(-ENOMEM);
-
- r = drm_dev_init(&udl->drm, &driver, &interface->dev);
- if (r) {
- kfree(udl);
- return ERR_PTR(r);
- }
+ udl = devm_drm_dev_alloc(&interface->dev, &driver,
+ struct udl_device, drm);
+ if (IS_ERR(udl))
+ return udl;
udl->udev = udev;
- udl->drm.dev_private = udl;
r = udl_init(udl);
- if (r) {
- drm_dev_fini(&udl->drm);
- kfree(udl);
+ if (r)
return ERR_PTR(r);
- }
usb_set_intfdata(interface, udl);
+
return udl;
}
@@ -101,31 +85,22 @@ static int udl_usb_probe(struct usb_interface *interface,
r = drm_dev_register(&udl->drm, 0);
if (r)
- goto err_free;
+ return r;
DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
- r = drm_fbdev_generic_setup(&udl->drm, 0);
- if (r)
- goto err_drm_dev_unregister;
+ drm_fbdev_generic_setup(&udl->drm, 0);
return 0;
-
-err_drm_dev_unregister:
- drm_dev_unregister(&udl->drm);
-err_free:
- drm_dev_put(&udl->drm);
- return r;
}
static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- drm_kms_helper_poll_disable(dev);
+ drm_kms_helper_poll_fini(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
- drm_dev_put(dev);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index e67227c44cc4..2642f94a63fc 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -68,7 +68,6 @@ struct udl_device {
/* modeset */
int udl_modeset_init(struct drm_device *dev);
-void udl_modeset_cleanup(struct drm_device *dev);
struct drm_connector *udl_connector_init(struct drm_device *dev);
struct urb *udl_get_urb(struct drm_device *dev);
@@ -77,7 +76,6 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
-void udl_fini(struct drm_device *dev);
int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 538718919916..f5d27f2a5654 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -351,13 +351,3 @@ int udl_drop_usb(struct drm_device *dev)
udl_free_urb_list(dev);
return 0;
}
-
-void udl_fini(struct drm_device *dev)
-{
- struct udl_device *udl = to_udl(dev);
-
- drm_kms_helper_poll_fini(dev);
-
- if (udl->urbs.count)
- udl_free_urb_list(dev);
-}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index d59ebac70b15..fef43f4e3bac 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -215,7 +215,7 @@ static char *udl_dummy_render(char *wrptr)
static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
char *buf;
int retval;
@@ -266,8 +266,8 @@ static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
return 0;
}
-int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
- int width, int height)
+static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
+ int width, int height)
{
struct drm_device *dev = fb->dev;
struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
@@ -369,7 +369,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
struct drm_framebuffer *fb = plane_state->fb;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct drm_display_mode *mode = &crtc_state->mode;
char *buf;
char *wrptr;
@@ -464,11 +464,13 @@ static const struct drm_mode_config_funcs udl_mode_funcs = {
int udl_modeset_init(struct drm_device *dev)
{
size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct drm_connector *connector;
int ret;
- drm_mode_config_init(dev);
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
dev->mode_config.min_width = 640;
dev->mode_config.min_height = 480;
@@ -482,10 +484,8 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &udl_mode_funcs;
connector = udl_connector_init(dev);
- if (IS_ERR(connector)) {
- ret = PTR_ERR(connector);
- goto err_drm_mode_config_cleanup;
- }
+ if (IS_ERR(connector))
+ return PTR_ERR(connector);
format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
@@ -494,18 +494,9 @@ int udl_modeset_init(struct drm_device *dev)
udl_simple_display_pipe_formats,
format_count, NULL, connector);
if (ret)
- goto err_drm_mode_config_cleanup;
+ return ret;
drm_mode_config_reset(dev);
return 0;
-
-err_drm_mode_config_cleanup:
- drm_mode_config_cleanup(dev);
- return ret;
-}
-
-void udl_modeset_cleanup(struct drm_device *dev)
-{
- drm_mode_config_cleanup(dev);
}
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 9e953ce64ef7..e76b24bb8828 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -132,7 +132,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
u32 ident0, ident1, ident2, ident3, cores;
int ret, core;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
@@ -187,8 +187,8 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_mark_last_busy(v3d->drm.dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -219,7 +219,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int measure_ms = 1000;
int ret;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
@@ -245,8 +245,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
cycles / (measure_ms * 1000),
(cycles / (measure_ms * 100)) % 10);
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_mark_last_busy(v3d->drm.dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -258,10 +258,10 @@ static const struct drm_info_list v3d_debugfs_list[] = {
{"bo_stats", v3d_debugfs_bo_stats, 0},
};
-int
+void
v3d_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(v3d_debugfs_list,
- ARRAY_SIZE(v3d_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(v3d_debugfs_list,
+ ARRAY_SIZE(v3d_debugfs_list),
+ minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index eaa8e9682373..82a7dfdd14c2 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <uapi/drm/v3d_drm.h>
#include "v3d_drv.h"
@@ -104,7 +105,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
if (args->value != 0)
return -EINVAL;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
@@ -113,8 +114,8 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
} else {
args->value = V3D_READ(offset);
}
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_mark_last_busy(v3d->drm.dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -234,9 +235,9 @@ static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
struct resource *res =
- platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name);
+ platform_get_resource_byname(v3d_to_pdev(v3d), IORESOURCE_MEM, name);
- *regs = devm_ioremap_resource(v3d->dev, res);
+ *regs = devm_ioremap_resource(v3d->drm.dev, res);
return PTR_ERR_OR_ZERO(*regs);
}
@@ -250,20 +251,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
u32 ident1;
- v3d = kzalloc(sizeof(*v3d), GFP_KERNEL);
- if (!v3d)
- return -ENOMEM;
- v3d->dev = dev;
- v3d->pdev = pdev;
+ v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
+ if (IS_ERR(v3d))
+ return PTR_ERR(v3d);
+
drm = &v3d->drm;
+ platform_set_drvdata(pdev, drm);
+
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
- goto dev_free;
+ return ret;
ret = map_regs(v3d, &v3d->core_regs[0], "core0");
if (ret)
- goto dev_free;
+ return ret;
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
dev->coherent_dma_mask =
@@ -281,45 +283,37 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ret = PTR_ERR(v3d->reset);
if (ret == -EPROBE_DEFER)
- goto dev_free;
+ return ret;
v3d->reset = NULL;
ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
if (ret) {
dev_err(dev,
"Failed to get reset control or bridge regs\n");
- goto dev_free;
+ return ret;
}
}
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
- goto dev_free;
+ return ret;
}
v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->mmu_scratch) {
dev_err(dev, "Failed to allocate MMU scratch page\n");
- ret = -ENOMEM;
- goto dev_free;
+ return -ENOMEM;
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_enable(dev);
- ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev);
- if (ret)
- goto dma_free;
-
- platform_set_drvdata(pdev, drm);
- drm->dev_private = v3d;
-
ret = v3d_gem_init(drm);
if (ret)
- goto dev_destroy;
+ goto dma_free;
ret = v3d_irq_init(v3d);
if (ret)
@@ -335,12 +329,8 @@ irq_disable:
v3d_irq_disable(v3d);
gem_destroy:
v3d_gem_destroy(drm);
-dev_destroy:
- drm_dev_put(drm);
dma_free:
dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
-dev_free:
- kfree(v3d);
return ret;
}
@@ -353,9 +343,8 @@ static int v3d_platform_drm_remove(struct platform_device *pdev)
v3d_gem_destroy(drm);
- drm_dev_put(drm);
-
- dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+ dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
+ v3d->mmu_scratch_paddr);
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index ac2603334587..8a390738d65b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -14,7 +14,6 @@
#include "uapi/drm/v3d_drm.h"
struct clk;
-struct device;
struct platform_device;
struct reset_control;
@@ -47,8 +46,6 @@ struct v3d_dev {
int ver;
bool single_irq_line;
- struct device *dev;
- struct platform_device *pdev;
void __iomem *hub_regs;
void __iomem *core_regs[3];
void __iomem *bridge_regs;
@@ -121,7 +118,7 @@ struct v3d_dev {
static inline struct v3d_dev *
to_v3d_dev(struct drm_device *dev)
{
- return (struct v3d_dev *)dev->dev_private;
+ return container_of(dev, struct v3d_dev, drm);
}
static inline bool
@@ -130,6 +127,8 @@ v3d_has_csd(struct v3d_dev *v3d)
return v3d->ver >= 41;
}
+#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
+
/* The per-fd struct, which tracks the MMU mappings. */
struct v3d_file_priv {
struct v3d_dev *v3d;
@@ -316,7 +315,7 @@ struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
/* v3d_debugfs.c */
-int v3d_debugfs_init(struct drm_minor *minor);
+void v3d_debugfs_init(struct drm_minor *minor);
/* v3d_fence.c */
extern const struct dma_fence_ops v3d_fence_ops;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 549dde83408b..09a7639cf161 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -370,8 +370,8 @@ v3d_job_free(struct kref *ref)
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
- pm_runtime_mark_last_busy(job->v3d->dev);
- pm_runtime_put_autosuspend(job->v3d->dev);
+ pm_runtime_mark_last_busy(job->v3d->drm.dev);
+ pm_runtime_put_autosuspend(job->v3d->drm.dev);
kfree(job);
}
@@ -439,7 +439,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->v3d = v3d;
job->free = free;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
@@ -458,7 +458,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
return 0;
fail:
xa_destroy(&job->deps);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return ret;
}
@@ -886,12 +886,12 @@ v3d_gem_init(struct drm_device *dev)
*/
drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
- v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
+ v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
&v3d->pt_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
- dev_err(v3d->dev,
+ dev_err(v3d->drm.dev,
"Failed to allocate page tables. "
"Please ensure you have CMA enabled.\n");
return -ENOMEM;
@@ -903,7 +903,7 @@ v3d_gem_init(struct drm_device *dev)
ret = v3d_sched_init(v3d);
if (ret) {
drm_mm_takedown(&v3d->mm);
- dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
+ dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
v3d->pt_paddr);
}
@@ -925,5 +925,6 @@ v3d_gem_destroy(struct drm_device *dev)
drm_mm_takedown(&v3d->mm);
- dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
+ dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
+ v3d->pt_paddr);
}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 662e67279a7b..51b65263c657 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -128,7 +128,7 @@ v3d_irq(int irq, void *arg)
* always-allowed mode.
*/
if (intsts & V3D_INT_GMPV)
- dev_err(v3d->dev, "GMP violation\n");
+ dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
* didn't see the common one then check hub for MMU IRQs.
@@ -189,7 +189,7 @@ v3d_hub_irq(int irq, void *arg)
client = v3d41_axi_ids[axi_id];
}
- dev_err(v3d->dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+ dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
@@ -217,16 +217,17 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- irq1 = platform_get_irq(v3d->pdev, 1);
+ irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
return irq1;
if (irq1 > 0) {
- ret = devm_request_irq(v3d->dev, irq1,
+ ret = devm_request_irq(v3d->drm.dev, irq1,
v3d_irq, IRQF_SHARED,
"v3d_core0", v3d);
if (ret)
goto fail;
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ ret = devm_request_irq(v3d->drm.dev,
+ platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_hub_irq, IRQF_SHARED,
"v3d_hub", v3d);
if (ret)
@@ -234,7 +235,8 @@ v3d_irq_init(struct v3d_dev *v3d)
} else {
v3d->single_irq_line = true;
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ ret = devm_request_irq(v3d->drm.dev,
+ platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_irq, IRQF_SHARED,
"v3d", v3d);
if (ret)
@@ -246,7 +248,7 @@ v3d_irq_init(struct v3d_dev *v3d)
fail:
if (ret != -EPROBE_DEFER)
- dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index 395e81d97163..3b81ea28c0bb 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -40,7 +40,7 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
V3D_MMU_CTL_TLB_CLEARING), 100);
if (ret)
- dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n");
+ dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
V3D_MMU_CTL_TLB_CLEAR);
@@ -52,14 +52,14 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
V3D_MMU_CTL_TLB_CLEARING), 100);
if (ret) {
- dev_err(v3d->dev, "TLB clear wait idle failed\n");
+ dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
return ret;
}
ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
V3D_MMUC_CONTROL_FLUSHING), 100);
if (ret)
- dev_err(v3d->dev, "MMUC flush wait idle failed\n");
+ dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
return ret;
}
@@ -109,7 +109,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
if (v3d_mmu_flush_all(v3d))
- dev_err(v3d->dev, "MMU flush timeout\n");
+ dev_err(v3d->drm.dev, "MMU flush timeout\n");
}
void v3d_mmu_remove_ptes(struct v3d_bo *bo)
@@ -122,5 +122,5 @@ void v3d_mmu_remove_ptes(struct v3d_bo *bo)
v3d->pt[page] = 0;
if (v3d_mmu_flush_all(v3d))
- dev_err(v3d->dev, "MMU flush timeout\n");
+ dev_err(v3d->drm.dev, "MMU flush timeout\n");
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 8c2df6d95283..0747614a78f0 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -403,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_bin");
if (ret) {
- dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret);
+ dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
return ret;
}
@@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_render");
if (ret) {
- dev_err(v3d->dev, "Failed to create render scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
@@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_tfu");
if (ret) {
- dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
@@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_csd");
if (ret) {
- dev_err(v3d->dev, "Failed to create CSD scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
@@ -450,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_cache_clean");
if (ret) {
- dev_err(v3d->dev, "Failed to create CACHE_CLEAN scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index ac8f75db2ecd..cf2e3e6a2388 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include "vbox_drv.h"
@@ -45,28 +46,22 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
- if (!vbox)
- return -ENOMEM;
-
- ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev);
- if (ret) {
- kfree(vbox);
- return ret;
- }
+ vbox = devm_drm_dev_alloc(&pdev->dev, &driver,
+ struct vbox_private, ddev);
+ if (IS_ERR(vbox))
+ return PTR_ERR(vbox);
vbox->ddev.pdev = pdev;
- vbox->ddev.dev_private = vbox;
pci_set_drvdata(pdev, vbox);
mutex_init(&vbox->hw_mutex);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
- goto err_dev_put;
+ return ret;
ret = vbox_hw_init(vbox);
if (ret)
- goto err_pci_disable;
+ return ret;
ret = vbox_mm_init(vbox);
if (ret)
@@ -80,14 +75,12 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mode_fini;
- ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
- if (ret)
- goto err_irq_fini;
-
ret = drm_dev_register(&vbox->ddev, 0);
if (ret)
goto err_irq_fini;
+ drm_fbdev_generic_setup(&vbox->ddev, 32);
+
return 0;
err_irq_fini:
@@ -98,10 +91,6 @@ err_mm_fini:
vbox_mm_fini(vbox);
err_hw_fini:
vbox_hw_fini(vbox);
-err_pci_disable:
- pci_disable_device(pdev);
-err_dev_put:
- drm_dev_put(&vbox->ddev);
return ret;
}
@@ -114,7 +103,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
vbox_mode_fini(vbox);
vbox_mm_fini(vbox);
vbox_hw_fini(vbox);
- drm_dev_put(&vbox->ddev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index 87421903816c..ac7c2effc46f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -127,6 +127,7 @@ struct vbox_encoder {
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_dev(x) container_of(x, struct vbox_private, ddev)
bool vbox_check_supported(u16 id);
int vbox_hw_init(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 16a1e29f5292..631657fa554f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -34,7 +34,7 @@ void vbox_report_hotplug(struct vbox_private *vbox)
irqreturn_t vbox_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
- struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(dev);
u32 host_flags = vbox_get_flags(vbox);
if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 9dcab115a261..d68d9bad7674 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -71,8 +71,6 @@ static void vbox_accel_fini(struct vbox_private *vbox)
for (i = 0; i < vbox->num_crtcs; ++i)
vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
-
- pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
}
/* Do we support the 4.3 plus mode hint reporting interface? */
@@ -123,21 +121,22 @@ int vbox_hw_init(struct vbox_private *vbox)
return -ENOMEM;
/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
- vbox->guest_pool = gen_pool_create(4, -1);
+ vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1,
+ "vboxvideo-accel");
if (!vbox->guest_pool)
- goto err_unmap_guest_heap;
+ return -ENOMEM;
ret = gen_pool_add_virt(vbox->guest_pool,
(unsigned long)vbox->guest_heap,
GUEST_HEAP_OFFSET(vbox),
GUEST_HEAP_USABLE_SIZE, -1);
if (ret)
- goto err_destroy_guest_pool;
+ return ret;
ret = hgsmi_test_query_conf(vbox->guest_pool);
if (ret) {
DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
- goto err_destroy_guest_pool;
+ return ret;
}
/* Reduce available VRAM size to reflect the guest heap. */
@@ -149,33 +148,23 @@ int vbox_hw_init(struct vbox_private *vbox)
if (!have_hgsmi_mode_hints(vbox)) {
ret = -ENOTSUPP;
- goto err_destroy_guest_pool;
+ return ret;
}
vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
sizeof(struct vbva_modehint),
GFP_KERNEL);
- if (!vbox->last_mode_hints) {
- ret = -ENOMEM;
- goto err_destroy_guest_pool;
- }
+ if (!vbox->last_mode_hints)
+ return -ENOMEM;
ret = vbox_accel_init(vbox);
if (ret)
- goto err_destroy_guest_pool;
+ return ret;
return 0;
-
-err_destroy_guest_pool:
- gen_pool_destroy(vbox->guest_pool);
-err_unmap_guest_heap:
- pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
- return ret;
}
void vbox_hw_fini(struct vbox_private *vbox)
{
vbox_accel_fini(vbox);
- gen_pool_destroy(vbox->guest_pool);
- pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 0883a435e62b..d9a5af62af89 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -36,7 +36,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
u16 flags;
s32 x_offset, y_offset;
- vbox = crtc->dev->dev_private;
+ vbox = to_vbox_dev(crtc->dev);
width = vbox_crtc->width ? vbox_crtc->width : 640;
height = vbox_crtc->height ? vbox_crtc->height : 480;
bpp = fb ? fb->format->cpp[0] * 8 : 32;
@@ -77,7 +77,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
static int vbox_set_view(struct drm_crtc *crtc)
{
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
- struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(crtc->dev);
struct vbva_infoview *p;
/*
@@ -174,7 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
int x, int y)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
- struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(crtc->dev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
@@ -272,7 +272,7 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
{
struct drm_crtc *crtc = plane->state->crtc;
struct drm_framebuffer *fb = plane->state->fb;
- struct vbox_private *vbox = fb->dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(fb->dev);
struct drm_mode_rect *clips;
uint32_t num_clips, i;
@@ -704,7 +704,7 @@ static int vbox_get_modes(struct drm_connector *connector)
int preferred_width, preferred_height;
vbox_connector = to_vbox_connector(connector);
- vbox = connector->dev->dev_private;
+ vbox = to_vbox_dev(connector->dev);
hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
HOST_FLAGS_OFFSET);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index 976423d0c3cc..f5a06675da43 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -24,25 +24,13 @@ int vbox_mm_init(struct vbox_private *vbox)
return ret;
}
-#ifdef DRM_MTRR_WC
- vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0),
- DRM_MTRR_WC);
-#else
vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
-#endif
return 0;
}
void vbox_mm_fini(struct vbox_private *vbox)
{
-#ifdef DRM_MTRR_WC
- drm_mtrr_del(vbox->fb_mtrr,
- pci_resource_start(vbox->ddev.pdev, 0),
- pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC);
-#else
arch_phys_wc_del(vbox->fb_mtrr);
-#endif
drm_vram_helper_release_mm(&vbox->ddev);
}
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index b61b2d3407b5..4fbbf980a299 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -20,7 +20,7 @@ struct vc4_debugfs_info_entry {
* Called at drm_dev_register() time on each of the minors registered
* by the DRM device, to attach the debugfs files.
*/
-int
+void
vc4_debugfs_init(struct drm_minor *minor)
{
struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
@@ -30,14 +30,9 @@ vc4_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, &vc4->load_tracker_enabled);
list_for_each_entry(entry, &vc4->debugfs_list, link) {
- int ret = drm_debugfs_create_files(&entry->info, 1,
- minor->debugfs_root, minor);
-
- if (ret)
- return ret;
+ drm_debugfs_create_files(&entry->info, 1,
+ minor->debugfs_root, minor);
}
-
- return 0;
}
static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 6dfede03396e..a90f2545baee 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -17,6 +17,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_graph.h>
@@ -114,10 +115,6 @@ static const struct debugfs_reg32 dpi_regs[] = {
VC4_REG32(DPI_ID),
};
-static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
{
struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
@@ -309,8 +306,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
if (ret)
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
- drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs,
- DRM_MODE_ENCODER_DPI, NULL);
+ drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI);
drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
ret = vc4_dpi_init_bridge(dpi);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 139d25a8328e..3b1f02efefbe 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -759,7 +759,7 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *top, unsigned int *bottom);
/* vc4_debugfs.c */
-int vc4_debugfs_init(struct drm_minor *minor);
+void vc4_debugfs_init(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
void vc4_debugfs_add_file(struct drm_device *drm,
const char *filename,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index d99b1d526651..eaf276978ee7 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -37,6 +37,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -652,15 +653,6 @@ static const struct debugfs_reg32 dsi1_regs[] = {
VC4_REG32(DSI1_ID),
};
-static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
- .destroy = vc4_dsi_encoder_destroy,
-};
-
static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch)
{
u32 afec0 = DSI_PORT_READ(PHY_AFEC0);
@@ -1615,8 +1607,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (dsi->port == 1)
vc4->dsi1 = dsi;
- drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
@@ -1656,7 +1647,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
* normally.
*/
list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
- vc4_dsi_encoder_destroy(dsi->encoder);
+ drm_encoder_cleanup(dsi->encoder);
if (dsi->port == 1)
vc4->dsi1 = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 340719238753..625bfcf52dc4 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/i2c.h>
@@ -306,15 +307,6 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
return connector;
}
-static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
- .destroy = vc4_hdmi_encoder_destroy,
-};
-
static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
enum hdmi_infoframe_type type)
{
@@ -1406,8 +1398,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
}
pm_runtime_enable(dev);
- drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
hdmi->connector =
@@ -1465,7 +1456,7 @@ err_destroy_conn:
vc4_hdmi_connector_destroy(hdmi->connector);
#endif
err_destroy_encoder:
- vc4_hdmi_encoder_destroy(hdmi->encoder);
+ drm_encoder_cleanup(hdmi->encoder);
err_unprepare_hsm:
clk_disable_unprepare(hdmi->hsm_clock);
pm_runtime_disable(dev);
@@ -1484,7 +1475,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
cec_unregister_adapter(hdmi->cec_adap);
vc4_hdmi_connector_destroy(hdmi->connector);
- vc4_hdmi_encoder_destroy(hdmi->encoder);
+ drm_encoder_cleanup(hdmi->encoder);
clk_disable_unprepare(hdmi->hsm_clock);
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 7402bc768664..bd5b8eb58b18 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -17,6 +17,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_graph.h>
@@ -374,10 +375,6 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
return connector;
}
-static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
{
struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
@@ -566,8 +563,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
pm_runtime_enable(dev);
- drm_encoder_init(drm, vec->encoder, &vc4_vec_encoder_funcs,
- DRM_MODE_ENCODER_TVDAC, NULL);
+ drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC);
drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
vec->connector = vc4_vec_connector_init(drm, vec);
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 909eba43664a..ec1a8ebb6f1b 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -39,6 +39,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include "vgem_drv.h"
@@ -431,9 +432,6 @@ static void vgem_release(struct drm_device *dev)
struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
platform_device_unregister(vgem->platform);
- drm_dev_fini(&vgem->drm);
-
- kfree(vgem);
}
static struct drm_driver vgem_driver = {
@@ -489,16 +487,19 @@ static int __init vgem_init(void)
&vgem_device->platform->dev);
if (ret)
goto out_unregister;
+ drmm_add_final_kfree(&vgem_device->drm, vgem_device);
/* Final step: expose the device/driver to userspace */
- ret = drm_dev_register(&vgem_device->drm, 0);
+ ret = drm_dev_register(&vgem_device->drm, 0);
if (ret)
- goto out_fini;
+ goto out_put;
return 0;
-out_fini:
- drm_dev_fini(&vgem_device->drm);
+out_put:
+ drm_dev_put(&vgem_device->drm);
+ return ret;
+
out_unregister:
platform_device_unregister(vgem_device->platform);
out_free:
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index e27120d512b0..3221520f61f0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -72,11 +72,10 @@ static struct drm_info_list virtio_gpu_debugfs_list[] = {
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
-int
+void
virtio_gpu_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(virtio_gpu_debugfs_list,
VIRTIO_GPU_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
- return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 2b7e6ae65546..cc7fd957a307 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,6 +30,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "virtgpu_drv.h"
@@ -240,10 +241,6 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
{
struct drm_device *dev = vgdev->ddev;
@@ -276,8 +273,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
if (vgdev->has_edid)
drm_connector_attach_edid_property(connector);
- drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
encoder->possible_crtcs = 1 << index;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7879ff58236f..9ff9f4ac0522 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -218,27 +218,19 @@ struct virtio_gpu_fpriv {
struct mutex context_lock;
};
-/* virtio_ioctl.c */
+/* virtgpu_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
-/* virtio_kms.c */
+/* virtgpu_kms.c */
int virtio_gpu_init(struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
-/* virtio_gem.c */
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
-int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_gem_create(struct drm_file *file,
- struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct drm_gem_object **obj_p,
- uint32_t *handle_p);
+/* virtgpu_gem.c */
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
@@ -264,7 +256,7 @@ void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs);
void virtio_gpu_array_put_free_work(struct work_struct *work);
-/* virtio vg */
+/* virtgpu_vq.c */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
@@ -288,10 +280,10 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y);
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj,
- struct virtio_gpu_mem_entry *ents,
- unsigned int nents);
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -344,17 +336,17 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
-/* virtio_gpu_display.c */
+/* virtgpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
-/* virtio_gpu_plane.c */
+/* virtgpu_plane.c */
uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc);
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
-/* virtio_gpu_fence.c */
+/* virtgpu_fence.c */
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -363,7 +355,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
-/* virtio_gpu_object */
+/* virtgpu_object.c */
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size);
@@ -379,7 +371,7 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
-/* virgl debugfs */
-int virtio_gpu_debugfs_init(struct drm_minor *minor);
+/* virtgpu_debugfs.c */
+void virtio_gpu_debugfs_init(struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index f0d5a8974677..d6cb350ae52a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -28,11 +28,11 @@
#include "virtgpu_drv.h"
-int virtio_gpu_gem_create(struct drm_file *file,
- struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct drm_gem_object **obj_p,
- uint32_t *handle_p)
+static int virtio_gpu_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct virtio_gpu_object_params *params,
+ struct drm_gem_object **obj_p,
+ uint32_t *handle_p)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
@@ -117,7 +117,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
- return 0;
+ goto out_notify;
objs = virtio_gpu_array_alloc(1);
if (!objs)
@@ -126,6 +126,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
objs);
+out_notify:
virtio_gpu_notify(vgdev);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 512daff92038..5df722072ba0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -47,7 +47,6 @@ void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
get_task_comm(dbgname, current);
virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
strlen(dbgname), dbgname);
- virtio_gpu_notify(vgdev);
vfpriv->context_created = true;
out_unlock:
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index d9039bb7c5e3..6ccbd01cd888 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -235,13 +235,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
return ret;
}
- ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
- if (ret != 0) {
- virtio_gpu_free_object(&shmem_obj->base);
- return ret;
- }
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
- virtio_gpu_notify(vgdev);
*bo_ptr = bo;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 73854915ec34..9e663a5d9952 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1087,14 +1087,13 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj,
- struct virtio_gpu_mem_entry *ents,
- unsigned int nents)
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents)
{
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
- return 0;
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 860de052e820..1e8b2169d834 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -21,6 +21,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -34,7 +35,7 @@
static struct vkms_device *vkms_device;
-bool enable_cursor;
+bool enable_cursor = true;
module_param_named(enable_cursor, enable_cursor, bool, 0444);
MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
@@ -63,7 +64,6 @@ static void vkms_release(struct drm_device *dev)
platform_device_unregister(vkms->platform);
drm_atomic_helper_shutdown(&vkms->drm);
drm_mode_config_cleanup(&vkms->drm);
- drm_dev_fini(&vkms->drm);
destroy_workqueue(vkms->output.composer_workq);
}
@@ -158,13 +158,14 @@ static int __init vkms_init(void)
&vkms_device->platform->dev);
if (ret)
goto out_unregister;
+ drmm_add_final_kfree(&vkms_device->drm, vkms_device);
ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
DMA_BIT_MASK(64));
if (ret) {
DRM_ERROR("Could not initialize DMA support\n");
- goto out_fini;
+ goto out_put;
}
vkms_device->drm.irq_enabled = true;
@@ -172,25 +173,25 @@ static int __init vkms_init(void)
ret = drm_vblank_init(&vkms_device->drm, 1);
if (ret) {
DRM_ERROR("Failed to vblank\n");
- goto out_fini;
+ goto out_put;
}
ret = vkms_modeset_init(vkms_device);
if (ret)
- goto out_fini;
+ goto out_put;
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
- goto out_fini;
+ goto out_put;
return 0;
-out_fini:
- drm_dev_fini(&vkms_device->drm);
+out_put:
+ drm_dev_put(&vkms_device->drm);
+ return ret;
out_unregister:
platform_device_unregister(vkms_device->platform);
-
out_free:
kfree(vkms_device);
return ret;
@@ -205,8 +206,6 @@ static void __exit vkms_exit(void)
drm_dev_unregister(&vkms_device->drm);
drm_dev_put(&vkms_device->drm);
-
- kfree(vkms_device);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index eda04ffba7b1..f4036bb0b9a8 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -117,11 +117,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
enum drm_plane_type type, int index);
/* Gem stuff */
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size);
-
vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 2e01186fb943..c541fec57566 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -97,10 +97,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
return ret;
}
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size)
+static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size)
{
struct vkms_gem_object *obj;
int ret;
@@ -113,7 +113,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->gem, handle);
- drm_gem_object_put_unlocked(&obj->gem);
if (ret)
return ERR_PTR(ret);
@@ -142,6 +141,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_obj->size;
args->pitch = pitch;
+ drm_gem_object_put_unlocked(gem_obj);
+
DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
return 0;
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index fb1941a6522c..85afb77e97f0 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -3,6 +3,7 @@
#include "vkms_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
static void vkms_connector_destroy(struct drm_connector *connector)
{
@@ -17,10 +18,6 @@ static const struct drm_connector_funcs vkms_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static const struct drm_encoder_funcs vkms_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vkms_conn_get_modes(struct drm_connector *connector)
{
int count;
@@ -70,8 +67,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
- ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
if (ret) {
DRM_ERROR("Failed to init encoder\n");
goto err_encoder;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index bb46ca0c458f..1629427d5734 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -27,6 +27,7 @@
**************************************************************************/
#include "vmwgfx_drv.h"
+#include <linux/highmem.h>
/*
* Template that implements find_first_diff() for a generic
@@ -374,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
if (unmap_src) {
- ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
+ kunmap_atomic(d->src_addr);
d->src_addr = NULL;
}
if (unmap_dst) {
- ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
+ kunmap_atomic(d->dst_addr);
d->dst_addr = NULL;
}
@@ -388,8 +389,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->dst_addr =
- ttm_kmap_atomic_prot(d->dst_pages[dst_page],
- d->dst_prot);
+ kmap_atomic_prot(d->dst_pages[dst_page],
+ d->dst_prot);
if (!d->dst_addr)
return -ENOMEM;
@@ -401,8 +402,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->src_addr =
- ttm_kmap_atomic_prot(d->src_pages[src_page],
- d->src_prot);
+ kmap_atomic_prot(d->src_pages[src_page],
+ d->src_prot);
if (!d->src_addr)
return -ENOMEM;
@@ -499,9 +500,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
}
out:
if (d.src_addr)
- ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
+ kunmap_atomic(d.src_addr);
if (d.dst_addr)
- ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
+ kunmap_atomic(d.dst_addr);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cdcd6e5f9e1..3596f3923ea3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -850,7 +850,7 @@ extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
- bool interuptable,
+ bool interruptible,
void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 178a6cd1a06f..0f8d29397157 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -515,7 +515,7 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
struct vmw_fence_manager *fman = fman_from_fence(fence);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
- return 1;
+ return true;
vmw_fences_update(fman);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7ef51fa84b01..126f93c0b0b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1651,7 +1651,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct vmw_surface_metadata *metadata;
struct ttm_base_object *base;
uint32_t backup_handle;
- int ret = -EINVAL;
+ int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req->handle_type, &base);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 374142018171..1fd458e877ca 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -460,9 +460,6 @@ static void xen_drm_drv_release(struct drm_device *dev)
drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
- drm_dev_fini(dev);
- kfree(dev);
-
if (front_info->cfg.be_alloc)
xenbus_switch_state(front_info->xb_dev,
XenbusStateInitialising);
@@ -561,6 +558,7 @@ fail_register:
fail_modeset:
drm_kms_helper_poll_fini(drm_dev);
drm_mode_config_cleanup(drm_dev);
+ drm_dev_put(drm_dev);
fail:
kfree(drm_info);
return ret;
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index b98a1420dcd3..76a16d997a23 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -20,6 +20,7 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
#include <sound/hdmi-codec.h>
@@ -254,10 +255,6 @@ static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
.mode_set = zx_hdmi_encoder_mode_set,
};
-static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct zx_hdmi *hdmi = to_zx_hdmi(connector);
@@ -313,8 +310,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
encoder->possible_crtcs = VOU_CRTC_MASK;
- drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
index c598b7daf1f1..d8a89ba383bc 100644
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "zx_drm_drv.h"
#include "zx_tvenc_regs.h"
@@ -218,10 +219,6 @@ static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
.mode_set = zx_tvenc_encoder_mode_set,
};
-static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
{
struct zx_tvenc *tvenc = to_zx_tvenc(connector);
@@ -285,8 +282,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
*/
encoder->possible_crtcs = BIT(1);
- drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs,
- DRM_MODE_ENCODER_TVDAC, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC);
drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
connector->interlace_allowed = true;
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index c4fa3bbaba78..a7ed7f5ca837 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "zx_drm_drv.h"
#include "zx_vga_regs.h"
@@ -72,10 +73,6 @@ static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = {
.disable = zx_vga_encoder_disable,
};
-static const struct drm_encoder_funcs zx_vga_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int zx_vga_connector_get_modes(struct drm_connector *connector)
{
struct zx_vga *vga = to_zx_vga(connector);
@@ -154,8 +151,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
encoder->possible_crtcs = VOU_CRTC_MASK;
- ret = drm_encoder_init(drm, encoder, &zx_vga_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC);
if (ret) {
DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret);
return ret;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 388bcc2889aa..d24344e91922 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -192,17 +192,55 @@ static void host1x_setup_sid_table(struct host1x *host)
}
}
+static bool host1x_wants_iommu(struct host1x *host1x)
+{
+ /*
+ * If we support addressing a maximum of 32 bits of physical memory
+ * and if the host1x firewall is enabled, there's no need to enable
+ * IOMMU support. This can happen for example on Tegra20, Tegra30
+ * and Tegra114.
+ *
+ * Tegra124 and later can address up to 34 bits of physical memory and
+ * many platforms come equipped with more than 2 GiB of system memory,
+ * which requires crossing the 4 GiB boundary. But there's a catch: on
+ * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
+ * only address up to 32 bits of memory in GATHER opcodes, which means
+ * that command buffers need to either be in the first 2 GiB of system
+ * memory (which could quickly lead to memory exhaustion), or command
+ * buffers need to be treated differently from other buffers (which is
+ * not possible with the current ABI).
+ *
+ * A third option is to use the IOMMU in these cases to make sure all
+ * buffers will be mapped into a 32-bit IOVA space that host1x can
+ * address. This allows all of the system memory to be used and works
+ * within the limitations of the host1x on these SoCs.
+ *
+ * In summary, default to enable IOMMU on Tegra124 and later. For any
+ * of the earlier SoCs, only use the IOMMU for additional safety when
+ * the host1x firewall is disabled.
+ */
+ if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ return false;
+ }
+
+ return true;
+}
+
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
int err;
/*
- * If the host1x firewall is enabled, there's no need to enable IOMMU
- * support. Similarly, if host1x is already attached to an IOMMU (via
- * the DMA API), don't try to attach again.
+ * We may not always want to enable IOMMU support (for example if the
+ * host1x firewall is already enabled and we don't support addressing
+ * more than 32 bits of physical memory), so check for that first.
+ *
+ * Similarly, if host1x is already attached to an IOMMU (via the DMA
+ * API), don't try to attach again.
*/
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
+ if (!host1x_wants_iommu(host) || domain)
return domain;
host->group = iommu_group_get(host->dev);
@@ -502,6 +540,19 @@ static void __exit tegra_host1x_exit(void)
}
module_exit(tegra_host1x_exit);
+/**
+ * host1x_get_dma_mask() - query the supported DMA mask for host1x
+ * @host1x: host1x instance
+ *
+ * Note that this returns the supported DMA mask for host1x, which can be
+ * different from the applicable DMA mask under certain circumstances.
+ */
+u64 host1x_get_dma_mask(struct host1x *host1x)
+{
+ return host1x->info->dma_mask;
+}
+EXPORT_SYMBOL(host1x_get_dma_mask);
+
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
MODULE_DESCRIPTION("Host1x driver for Tegra products");
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
index b84fcaf8b105..aeea082f1418 100644
--- a/drivers/greybus/Kconfig
+++ b/drivers/greybus/Kconfig
@@ -3,7 +3,7 @@ menuconfig GREYBUS
tristate "Greybus support"
depends on SYSFS
---help---
- This option enables the Greybus driver core. Greybus is an
+ This option enables the Greybus driver core. Greybus is a
hardware protocol that was designed to provide Unipro with a
sane application layer. It was originally designed for the
ARA project, a module phone system, but has shown up in other
@@ -12,7 +12,7 @@ menuconfig GREYBUS
Say Y here to enable support for these types of drivers.
- To compile this code as a module, chose M here: the module
+ To compile this code as a module, choose M here: the module
will be called greybus.ko
if GREYBUS
@@ -25,7 +25,7 @@ config GREYBUS_ES2
acts as a Greybus "host controller". This device is a bridge
from a USB device to a Unipro network.
- To compile this code as a module, chose M here: the module
+ To compile this code as a module, choose M here: the module
will be called gb-es2.ko
endif # GREYBUS
diff --git a/drivers/greybus/arpc.h b/drivers/greybus/arpc.h
index c8b83c5cfa79..b9ea81b55b29 100644
--- a/drivers/greybus/arpc.h
+++ b/drivers/greybus/arpc.h
@@ -21,7 +21,7 @@ struct arpc_request_message {
__le16 id; /* RPC unique id */
__le16 size; /* Size in bytes of header + payload */
__u8 type; /* RPC type */
- __u8 data[0]; /* ARPC data */
+ __u8 data[]; /* ARPC data */
} __packed;
struct arpc_response_message {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 34f07371716d..443c5cbbde04 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -42,7 +42,7 @@ config HIDRAW
---help---
Say Y here if you want to support HID devices (from the USB
specification standpoint) that aren't strictly user interface
- devices, like monitor controls and Uninterruptable Power Supplies.
+ devices, like monitor controls and Uninterruptible Power Supplies.
This module supports these devices separately using a separate
event interface on /dev/hidraw.
@@ -149,6 +149,7 @@ config HID_APPLEIR
config HID_ASUS
tristate "Asus"
+ depends on USB_HID
depends on LEDS_CLASS
depends on ASUS_WMI || ASUS_WMI=n
select POWER_SUPPLY
@@ -538,14 +539,14 @@ config HID_LOGITECH
Support for Logitech devices that are not fully compliant with HID standard.
config HID_LOGITECH_DJ
- tristate "Logitech Unifying receivers full support"
+ tristate "Logitech receivers full support"
depends on USB_HID
depends on HIDRAW
depends on HID_LOGITECH
select HID_LOGITECH_HIDPP
---help---
- Say Y if you want support for Logitech Unifying receivers and devices.
- Unifying receivers are capable of pairing up to 6 Logitech compliant
+ Say Y if you want support for Logitech receivers and devices.
+ Logitech receivers are capable of pairing multiple Logitech compliant
devices to the same receiver. Without this driver it will be handled by
generic USB_HID driver and all incoming events will be multiplexed
into a single mouse and a single keyboard device.
@@ -1140,7 +1141,7 @@ config HID_SENSOR_CUSTOM_SENSOR
to decide how to interpret these special sensor ids and process in
the user space. Currently some manufacturers are using these ids for
sensor calibration and debugging other sensors. Manufacturers
- should't use these special custom sensor ids to export any of the
+ shouldn't use these special custom sensor ids to export any of the
standard sensors.
Select this config option for custom/generic sensor support.
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index d732d1d10caf..359bdfbe3701 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -51,6 +51,12 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
"(For people who want to keep Windows PC keyboard muscle memory. "
"[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+static unsigned int swap_fn_leftctrl;
+module_param(swap_fn_leftctrl, uint, 0644);
+MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
+ "(For people who want to keep PC keyboard muscle memory. "
+ "[0] = as-is, Mac layout, 1 = swapped, PC layout)");
+
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
@@ -162,6 +168,11 @@ static const struct apple_key_translation swapped_option_cmd_keys[] = {
{ }
};
+static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
+ { KEY_FN, KEY_LEFTCTRL },
+ { }
+};
+
static const struct apple_key_translation *apple_find_translation(
const struct apple_key_translation *table, u16 from)
{
@@ -183,9 +194,11 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
bool do_translate;
u16 code = 0;
- if (usage->code == KEY_FN) {
+ u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
+
+ if (usage->code == fn_keycode) {
asc->fn_on = !!value;
- input_event(input, usage->type, usage->code, value);
+ input_event(input, usage->type, KEY_FN, value);
return 1;
}
@@ -270,6 +283,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
+ if (swap_fn_leftctrl) {
+ trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
+ }
+
return 0;
}
@@ -333,6 +354,11 @@ static void apple_setup_input(struct input_dev *input)
for (trans = apple_iso_keyboard; trans->from; trans++)
set_bit(trans->to, input->keybit);
+
+ if (swap_fn_leftctrl) {
+ for (trans = swapped_fn_leftctrl_keys; trans->from; trans++)
+ set_bit(trans->to, input->keybit);
+ }
}
static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index e6e4c841fb06..c183caf89d49 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -40,7 +40,9 @@ MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define T100_TPAD_INTF 2
+#define MEDION_E1239T_TPAD_INTF 1
+#define E1239T_TP_TOGGLE_REPORT_ID 0x05
#define T100CHI_MOUSE_REPORT_ID 0x06
#define FEATURE_REPORT_ID 0x0d
#define INPUT_REPORT_ID 0x5d
@@ -77,6 +79,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_G752_KEYBOARD BIT(8)
#define QUIRK_T101HA_DOCK BIT(9)
#define QUIRK_T90CHI BIT(10)
+#define QUIRK_MEDION_E1239T BIT(11)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -102,12 +105,14 @@ struct asus_touchpad_info {
int res_y;
int contact_size;
int max_contacts;
+ int report_size;
};
struct asus_drvdata {
unsigned long quirks;
struct hid_device *hdev;
struct input_dev *input;
+ struct input_dev *tp_kbd_input;
struct asus_kbd_leds *kbd_backlight;
const struct asus_touchpad_info *tp;
bool enable_backlight;
@@ -126,6 +131,7 @@ static const struct asus_touchpad_info asus_i2c_tp = {
.max_y = 1758,
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100ta_tp = {
@@ -135,6 +141,7 @@ static const struct asus_touchpad_info asus_t100ta_tp = {
.res_y = 27, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100ha_tp = {
@@ -144,6 +151,7 @@ static const struct asus_touchpad_info asus_t100ha_tp = {
.res_y = 29, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t200ta_tp = {
@@ -153,6 +161,7 @@ static const struct asus_touchpad_info asus_t200ta_tp = {
.res_y = 28, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100chi_tp = {
@@ -162,6 +171,17 @@ static const struct asus_touchpad_info asus_t100chi_tp = {
.res_y = 29, /* units/mm */
.contact_size = 3,
.max_contacts = 4,
+ .report_size = 15 /* 2 byte header + 3 * 4 + 1 byte footer */,
+};
+
+static const struct asus_touchpad_info medion_e1239t_tp = {
+ .max_x = 2640,
+ .max_y = 1380,
+ .res_x = 29, /* units/mm */
+ .res_y = 28, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+ .report_size = 32 /* 2 byte header + 5 * 5 + 5 byte footer */,
};
static void asus_report_contact_down(struct asus_drvdata *drvdat,
@@ -229,7 +249,7 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
int i, toolType = MT_TOOL_FINGER;
u8 *contactData = data + 2;
- if (size != 3 + drvdat->tp->contact_size * drvdat->tp->max_contacts)
+ if (size != drvdat->tp->report_size)
return 0;
for (i = 0; i < drvdat->tp->max_contacts; i++) {
@@ -257,6 +277,34 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
return 1;
}
+static int asus_e1239t_event(struct asus_drvdata *drvdat, u8 *data, int size)
+{
+ if (size != 3)
+ return 0;
+
+ /* Handle broken mute key which only sends press events */
+ if (!drvdat->tp &&
+ data[0] == 0x02 && data[1] == 0xe2 && data[2] == 0x00) {
+ input_report_key(drvdat->input, KEY_MUTE, 1);
+ input_sync(drvdat->input);
+ input_report_key(drvdat->input, KEY_MUTE, 0);
+ input_sync(drvdat->input);
+ return 1;
+ }
+
+ /* Handle custom touchpad toggle key which only sends press events */
+ if (drvdat->tp_kbd_input &&
+ data[0] == 0x05 && data[1] == 0x02 && data[2] == 0x28) {
+ input_report_key(drvdat->tp_kbd_input, KEY_F21, 1);
+ input_sync(drvdat->tp_kbd_input);
+ input_report_key(drvdat->tp_kbd_input, KEY_F21, 0);
+ input_sync(drvdat->tp_kbd_input);
+ return 1;
+ }
+
+ return 0;
+}
+
static int asus_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -281,6 +329,9 @@ static int asus_raw_event(struct hid_device *hdev,
if (drvdata->tp && data[0] == INPUT_REPORT_ID)
return asus_report_input(drvdata, data, size);
+ if (drvdata->quirks & QUIRK_MEDION_E1239T)
+ return asus_e1239t_event(drvdata, data, size);
+
return 0;
}
@@ -615,6 +666,21 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
hi->report->id != T100CHI_MOUSE_REPORT_ID)
return 0;
+ /* Handle MULTI_INPUT on E1239T mouse/touchpad USB interface */
+ if (drvdata->tp && (drvdata->quirks & QUIRK_MEDION_E1239T)) {
+ switch (hi->report->id) {
+ case E1239T_TP_TOGGLE_REPORT_ID:
+ input_set_capability(input, EV_KEY, KEY_F21);
+ input->name = "Asus Touchpad Keys";
+ drvdata->tp_kbd_input = input;
+ return 0;
+ case INPUT_REPORT_ID:
+ break; /* Touchpad report, handled below */
+ default:
+ return 0; /* Ignore other reports */
+ }
+ }
+
if (drvdata->tp) {
int ret;
@@ -677,24 +743,16 @@ static int asus_input_mapping(struct hid_device *hdev,
* This avoids a bunch of non-functional hid_input devices getting
* created because of the T100CHI using HID_QUIRK_MULTI_INPUT.
*/
- if (drvdata->quirks & (QUIRK_T100CHI | QUIRK_T90CHI)) {
- if (field->application == (HID_UP_GENDESK | 0x0080) ||
- usage->hid == (HID_UP_GENDEVCTRLS | 0x0024) ||
- usage->hid == (HID_UP_GENDEVCTRLS | 0x0025) ||
- usage->hid == (HID_UP_GENDEVCTRLS | 0x0026))
- return -1;
- /*
- * We use the hid_input for the mouse report for the touchpad,
- * keep the left button, to avoid the core removing it.
- */
- if (field->application == HID_GD_MOUSE &&
- usage->hid != (HID_UP_BUTTON | 1))
- return -1;
- }
+ if ((drvdata->quirks & (QUIRK_T100CHI | QUIRK_T90CHI)) &&
+ (field->application == (HID_UP_GENDESK | 0x0080) ||
+ field->application == HID_GD_MOUSE ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0024) ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0025) ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0026)))
+ return -1;
/* ASUS-specific keyboard hotkeys */
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000) {
- set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break;
case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break;
@@ -737,11 +795,11 @@ static int asus_input_mapping(struct hid_device *hdev,
if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT)
drvdata->enable_backlight = true;
+ set_bit(EV_REP, hi->input->evbit);
return 1;
}
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) {
- set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0xff01: asus_map_key_clear(BTN_1); break;
case 0xff02: asus_map_key_clear(BTN_2); break;
@@ -764,6 +822,7 @@ static int asus_input_mapping(struct hid_device *hdev,
return 0;
}
+ set_bit(EV_REP, hi->input->evbit);
return 1;
}
@@ -782,6 +841,16 @@ static int asus_input_mapping(struct hid_device *hdev,
}
}
+ /*
+ * The mute button is broken and only sends press events, we
+ * deal with this in our raw_event handler, so do not map it.
+ */
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
+ usage->hid == (HID_UP_CONSUMER | 0xe2)) {
+ input_set_capability(hi->input, EV_KEY, KEY_MUTE);
+ return -1;
+ }
+
return 0;
}
@@ -849,7 +918,8 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
drvdata->tp = &asus_i2c_tp;
- if (drvdata->quirks & QUIRK_T100_KEYBOARD) {
+ if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
+ hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
@@ -877,6 +947,19 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
drvdata->tp = &asus_t100chi_tp;
}
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
+ hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ struct usb_host_interface *alt =
+ to_usb_interface(hdev->dev.parent)->altsetting;
+
+ if (alt->desc.bInterfaceNumber == MEDION_E1239T_TPAD_INTF) {
+ /* For separate input-devs for tp and tp toggle key */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ drvdata->quirks |= QUIRK_SKIP_INPUT_MAPPING;
+ drvdata->tp = &medion_e1239t_tp;
+ }
+ }
+
if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
@@ -1056,7 +1139,8 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
-
+ { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
+ QUIRK_MEDION_E1239T },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 1c71a1aa76b2..874fc3791f3b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -76,12 +76,9 @@
#define USB_VENDOR_ID_ALPS_JP 0x044E
#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
-#define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
-#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
#define HID_DEVICE_ID_ALPS_U1 0x1215
#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
-#define HID_DEVICE_ID_ALPS_1222 0x1222
#define USB_VENDOR_ID_AMI 0x046b
#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
@@ -281,9 +278,6 @@
#define USB_VENDOR_ID_CIDC 0x1677
-#define I2C_VENDOR_ID_CIRQUE 0x0488
-#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F
-
#define USB_VENDOR_ID_CJTOUCH 0x24b8
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
@@ -640,6 +634,7 @@
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
#define USB_DEVICE_ID_ITE8595 0x8595
+#define USB_DEVICE_ID_ITE_MEDION_E1239T 0xce50
#define USB_VENDOR_ID_JABRA 0x0b0e
#define USB_DEVICE_ID_JABRA_SPEAK_410 0x0412
@@ -730,8 +725,6 @@
#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
-#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
-#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
#define USB_VENDOR_ID_LG 0x1fd2
@@ -1157,6 +1150,9 @@
#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882
#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883
+#define USB_VENDOR_ID_TRUST 0x145f
+#define USB_DEVICE_ID_TRUST_PANORA_TABLET 0x0212
+
#define USB_VENDOR_ID_TURBOX 0x062a
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
#define USB_DEVICE_ID_ASUS_MD_5110 0x5110
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index ed9b1c1f460d..48dff5d6b605 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * HID driver for Logitech Unifying receivers
+ * HID driver for Logitech receivers
*
* Copyright (c) 2011 Logitech
*/
@@ -701,7 +701,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
type_str, dj_hiddev->product);
} else {
snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
- "Logitech Unifying Device. Wireless PID:%04x",
+ "Logitech Wireless Device PID:%04x",
dj_hiddev->product);
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 094f4f1b6555..1e1cf8eae649 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * HIDPP protocol for Logitech Unifying receivers
+ * HIDPP protocol for Logitech receivers
*
* Copyright (c) 2011 Logitech (c)
* Copyright (c) 2012-2013 Google (c)
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index d958475f8c81..e1b93ce32e01 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -15,6 +15,7 @@
#include <linux/hid.h>
#include <linux/hidraw.h>
#include <linux/i2c.h>
+#include <linux/gpio/driver.h>
#include "hid-ids.h"
/* Commands codes in a raw output report */
@@ -27,6 +28,8 @@ enum {
MCP2221_I2C_PARAM_OR_STATUS = 0x10,
MCP2221_I2C_SET_SPEED = 0x20,
MCP2221_I2C_CANCEL = 0x10,
+ MCP2221_GPIO_SET = 0x50,
+ MCP2221_GPIO_GET = 0x51,
};
/* Response codes in a raw input report */
@@ -42,6 +45,8 @@ enum {
MCP2221_I2C_WRADDRL_SEND = 0x21,
MCP2221_I2C_ADDR_NACK = 0x25,
MCP2221_I2C_READ_COMPL = 0x55,
+ MCP2221_ALT_F_NOT_GPIOV = 0xEE,
+ MCP2221_ALT_F_NOT_GPIOD = 0xEF,
};
/*
@@ -59,6 +64,9 @@ struct mcp2221 {
int rxbuf_idx;
int status;
u8 cur_i2c_clk_div;
+ struct gpio_chip *gc;
+ u8 gp_idx;
+ u8 gpio_dir;
};
/*
@@ -526,6 +534,110 @@ static const struct i2c_algorithm mcp_i2c_algo = {
.functionality = mcp_i2c_func,
};
+static int mcp_gpio_get(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mcp->txbuf[0] = MCP2221_GPIO_GET;
+
+ mcp->gp_idx = (offset + 1) * 2;
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ mutex_unlock(&mcp->lock);
+
+ return ret;
+}
+
+static void mcp_gpio_set(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ memset(mcp->txbuf, 0, 18);
+ mcp->txbuf[0] = MCP2221_GPIO_SET;
+
+ mcp->gp_idx = ((offset + 1) * 4) - 1;
+
+ mcp->txbuf[mcp->gp_idx - 1] = 1;
+ mcp->txbuf[mcp->gp_idx] = !!value;
+
+ mutex_lock(&mcp->lock);
+ mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+ mutex_unlock(&mcp->lock);
+}
+
+static int mcp_gpio_dir_set(struct mcp2221 *mcp,
+ unsigned int offset, u8 val)
+{
+ memset(mcp->txbuf, 0, 18);
+ mcp->txbuf[0] = MCP2221_GPIO_SET;
+
+ mcp->gp_idx = (offset + 1) * 5;
+
+ mcp->txbuf[mcp->gp_idx - 1] = 1;
+ mcp->txbuf[mcp->gp_idx] = val;
+
+ return mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+}
+
+static int mcp_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_gpio_dir_set(mcp, offset, 0);
+ mutex_unlock(&mcp->lock);
+
+ return ret;
+}
+
+static int mcp_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_gpio_dir_set(mcp, offset, 1);
+ mutex_unlock(&mcp->lock);
+
+ /* Can't configure as output, bailout early */
+ if (ret)
+ return ret;
+
+ mcp_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int mcp_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mcp->txbuf[0] = MCP2221_GPIO_GET;
+
+ mcp->gp_idx = (offset + 1) * 2;
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ mutex_unlock(&mcp->lock);
+
+ if (ret)
+ return ret;
+
+ if (mcp->gpio_dir)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
/* Gives current state of i2c engine inside mcp2221 */
static int mcp_get_i2c_eng_state(struct mcp2221 *mcp,
u8 *data, u8 idx)
@@ -638,6 +750,39 @@ static int mcp2221_raw_event(struct hid_device *hdev,
complete(&mcp->wait_in_report);
break;
+ case MCP2221_GPIO_GET:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if ((data[mcp->gp_idx] == MCP2221_ALT_F_NOT_GPIOV) ||
+ (data[mcp->gp_idx + 1] == MCP2221_ALT_F_NOT_GPIOD)) {
+ mcp->status = -ENOENT;
+ } else {
+ mcp->status = !!data[mcp->gp_idx];
+ mcp->gpio_dir = !!data[mcp->gp_idx + 1];
+ }
+ break;
+ default:
+ mcp->status = -EAGAIN;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
+ case MCP2221_GPIO_SET:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if ((data[mcp->gp_idx] == MCP2221_ALT_F_NOT_GPIOV) ||
+ (data[mcp->gp_idx - 1] == MCP2221_ALT_F_NOT_GPIOV)) {
+ mcp->status = -ENOENT;
+ } else {
+ mcp->status = 0;
+ }
+ break;
+ default:
+ mcp->status = -EAGAIN;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
default:
mcp->status = -EIO;
complete(&mcp->wait_in_report);
@@ -702,8 +847,32 @@ static int mcp2221_probe(struct hid_device *hdev,
}
i2c_set_adapdata(&mcp->adapter, mcp);
+ /* Setup GPIO chip */
+ mcp->gc = devm_kzalloc(&hdev->dev, sizeof(*mcp->gc), GFP_KERNEL);
+ if (!mcp->gc) {
+ ret = -ENOMEM;
+ goto err_gc;
+ }
+
+ mcp->gc->label = "mcp2221_gpio";
+ mcp->gc->direction_input = mcp_gpio_direction_input;
+ mcp->gc->direction_output = mcp_gpio_direction_output;
+ mcp->gc->get_direction = mcp_gpio_get_direction;
+ mcp->gc->set = mcp_gpio_set;
+ mcp->gc->get = mcp_gpio_get;
+ mcp->gc->ngpio = 4;
+ mcp->gc->base = -1;
+ mcp->gc->can_sleep = 1;
+ mcp->gc->parent = &hdev->dev;
+
+ ret = devm_gpiochip_add_data(&hdev->dev, mcp->gc, mcp);
+ if (ret)
+ goto err_gc;
+
return 0;
+err_gc:
+ i2c_del_adapter(&mcp->adapter);
err_i2c:
hid_hw_close(mcp->hdev);
err_hstop:
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 03c720b47306..35c8c174a0ce 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -69,6 +69,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_ASUS_CUSTOM_UP BIT(17)
#define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18)
#define MT_QUIRK_SEPARATE_APP_REPORT BIT(19)
+#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -188,7 +189,8 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
/* reserved 0x0011 */
#define MT_CLS_WIN_8 0x0012
#define MT_CLS_EXPORT_ALL_INPUTS 0x0013
-#define MT_CLS_WIN_8_DUAL 0x0014
+/* reserved 0x0014 */
+#define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -272,12 +274,14 @@ static const struct mt_class mt_classes[] = {
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_CONTACT_CNT_ACCURATE,
.export_all_inputs = true },
- { .name = MT_CLS_WIN_8_DUAL,
+ { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_IGNORE_DUPLICATES |
MT_QUIRK_HOVERING |
MT_QUIRK_CONTACT_CNT_ACCURATE |
- MT_QUIRK_WIN8_PTP_BUTTONS,
+ MT_QUIRK_STICKY_FINGERS |
+ MT_QUIRK_WIN8_PTP_BUTTONS |
+ MT_QUIRK_FORCE_MULTI_INPUT,
.export_all_inputs = true },
/*
@@ -754,8 +758,7 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
- if ((cls->name == MT_CLS_WIN_8 ||
- cls->name == MT_CLS_WIN_8_DUAL) &&
+ if (cls->name == MT_CLS_WIN_8 &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1714,6 +1717,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ if (mtclass->quirks & MT_QUIRK_FORCE_MULTI_INPUT) {
+ hdev->quirks &= ~HID_QUIRK_INPUT_PER_APP;
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ }
+
timer_setup(&td->release_timer, mt_expired_timeout, 0);
ret = hid_parse(hdev);
@@ -1786,32 +1794,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_3M,
USB_DEVICE_ID_3M3266) },
- /* Alps devices */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_ALPS_JP,
- HID_DEVICE_ID_ALPS_U1_DUAL_PTP) },
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_ALPS_JP,
- HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_ALPS_JP,
- HID_DEVICE_ID_ALPS_1222) },
-
- /* Lenovo X1 TAB Gen 2 */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_LENOVO,
- USB_DEVICE_ID_LENOVO_X1_TAB) },
-
- /* Lenovo X1 TAB Gen 3 */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_LENOVO,
- USB_DEVICE_ID_LENOVO_X1_TAB3) },
-
/* Anton devices */
{ .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
@@ -1846,12 +1828,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
- /* Cirque devices */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- I2C_VENDOR_ID_CIRQUE,
- I2C_PRODUCT_ID_CIRQUE_121F) },
-
/* CJTouch panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
@@ -1926,6 +1902,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
+ /* Elan devices */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ELAN, 0x313a) },
+
/* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
@@ -2056,6 +2037,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
+ /* Synaptics devices */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+
/* TopSeed panels */
{ .driver_data = MT_CLS_TOPSEED,
MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index e4cb543de0cd..ca8b5c261c7c 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -168,6 +168,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET), HID_QUIRK_MULTI_INPUT | HID_QUIRK_HIDINPUT_FORCE },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 4c6ed6ef31f1..2f073f536070 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -867,6 +867,23 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
if (sc->quirks & PS3REMOTE)
return ps3remote_fixup(hdev, rdesc, rsize);
+ /*
+ * Some knock-off USB dongles incorrectly report their button count
+ * as 13 instead of 16 causing three non-functional buttons.
+ */
+ if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 &&
+ /* Report Count (13) */
+ rdesc[23] == 0x95 && rdesc[24] == 0x0D &&
+ /* Usage Maximum (13) */
+ rdesc[37] == 0x29 && rdesc[38] == 0x0D &&
+ /* Report Count (3) */
+ rdesc[43] == 0x95 && rdesc[44] == 0x03) {
+ hid_info(hdev, "Fixing up USB dongle report descriptor\n");
+ rdesc[24] = 0x10;
+ rdesc[38] = 0x10;
+ rdesc[44] = 0x00;
+ }
+
return rdesc;
}
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index a66f08041a1a..ec142bc8c1da 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -389,6 +389,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
+ {
+ .ident = "Schneider SCL142ALM",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
{ } /* Terminate list */
};
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index aa2dbed30fc3..6cf59fd26ad7 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -480,6 +480,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
sizeof(ldr_xfer_query_resp));
if (rv < 0) {
client_data->flag_retry = true;
+ *fw_info = (struct shim_fw_info){};
return rv;
}
@@ -489,6 +490,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
"data size %d is not equal to size of loader_xfer_query_response %zu\n",
rv, sizeof(struct loader_xfer_query_response));
client_data->flag_retry = true;
+ *fw_info = (struct shim_fw_info){};
return -EMSGSIZE;
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 23f358cb7f49..90070b337c10 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -290,6 +290,34 @@ int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
/*
+ * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
+ *
+ * CHANNELMSG_MODIFYCHANNEL messages are aynchronous. Also, Hyper-V does not
+ * ACK such messages. IOW we can't know when the host will stop interrupting
+ * the "old" vCPU and start interrupting the "new" vCPU for the given channel.
+ *
+ * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
+ * VERSION_WIN10_V4_1.
+ */
+int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
+{
+ struct vmbus_channel_modifychannel conn_msg;
+ int ret;
+
+ memset(&conn_msg, 0, sizeof(conn_msg));
+ conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+ conn_msg.child_relid = child_relid;
+ conn_msg.target_vp = target_vp;
+
+ ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+
+ trace_vmbus_send_modifychannel(&conn_msg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
+
+/*
* create_gpadl_header - Creates a gpadl for the specified buffer
*/
static int create_gpadl_header(void *kbuffer, u32 size,
@@ -594,35 +622,31 @@ post_msg_err:
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
-static void reset_channel_cb(void *arg)
-{
- struct vmbus_channel *channel = arg;
-
- channel->onchannel_callback = NULL;
-}
-
void vmbus_reset_channel_cb(struct vmbus_channel *channel)
{
+ unsigned long flags;
+
/*
* vmbus_on_event(), running in the per-channel tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when
* the former is accessing channel->inbound.ring_buffer, the latter
* could be freeing the ring_buffer pages, so here we must stop it
* first.
+ *
+ * vmbus_chan_sched() might call the netvsc driver callback function
+ * that ends up scheduling NAPI work that accesses the ring buffer.
+ * At this point, we have to ensure that any such work is completed
+ * and that the channel ring buffer is no longer being accessed, cf.
+ * the calls to napi_disable() in netvsc_device_remove().
*/
tasklet_disable(&channel->callback_event);
- channel->sc_creation_callback = NULL;
+ /* See the inline comments in vmbus_chan_sched(). */
+ spin_lock_irqsave(&channel->sched_lock, flags);
+ channel->onchannel_callback = NULL;
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
- /* Stop the callback asap */
- if (channel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(channel->target_cpu, reset_channel_cb,
- channel, true);
- } else {
- reset_channel_cb(channel);
- put_cpu();
- }
+ channel->sc_creation_callback = NULL;
/* Re-enable tasklet for use on re-open */
tasklet_enable(&channel->callback_event);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 501c43c5851d..417a95e5094d 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -18,14 +18,15 @@
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/cpu.h>
#include <linux/hyperv.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
-static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
+static void init_vp_index(struct vmbus_channel *channel);
-static const struct vmbus_device vmbus_devs[] = {
+const struct vmbus_device vmbus_devs[] = {
/* IDE */
{ .dev_type = HV_IDE,
HV_IDE_GUID,
@@ -315,11 +316,11 @@ static struct vmbus_channel *alloc_channel(void)
if (!channel)
return NULL;
+ spin_lock_init(&channel->sched_lock);
spin_lock_init(&channel->lock);
init_completion(&channel->rescind_event);
INIT_LIST_HEAD(&channel->sc_list);
- INIT_LIST_HEAD(&channel->percpu_list);
tasklet_init(&channel->callback_event,
vmbus_on_event, (unsigned long)channel);
@@ -340,23 +341,49 @@ static void free_channel(struct vmbus_channel *channel)
kobject_put(&channel->kobj);
}
-static void percpu_channel_enq(void *arg)
+void vmbus_channel_map_relid(struct vmbus_channel *channel)
{
- struct vmbus_channel *channel = arg;
- struct hv_per_cpu_context *hv_cpu
- = this_cpu_ptr(hv_context.cpu_context);
-
- list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
+ if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
+ return;
+ /*
+ * The mapping of the channel's relid is visible from the CPUs that
+ * execute vmbus_chan_sched() by the time that vmbus_chan_sched() will
+ * execute:
+ *
+ * (a) In the "normal (i.e., not resuming from hibernation)" path,
+ * the full barrier in smp_store_mb() guarantees that the store
+ * is propagated to all CPUs before the add_channel_work work
+ * is queued. In turn, add_channel_work is queued before the
+ * channel's ring buffer is allocated/initialized and the
+ * OPENCHANNEL message for the channel is sent in vmbus_open().
+ * Hyper-V won't start sending the interrupts for the channel
+ * before the OPENCHANNEL message is acked. The memory barrier
+ * in vmbus_chan_sched() -> sync_test_and_clear_bit() ensures
+ * that vmbus_chan_sched() must find the channel's relid in
+ * recv_int_page before retrieving the channel pointer from the
+ * array of channels.
+ *
+ * (b) In the "resuming from hibernation" path, the smp_store_mb()
+ * guarantees that the store is propagated to all CPUs before
+ * the VMBus connection is marked as ready for the resume event
+ * (cf. check_ready_for_resume_event()). The interrupt handler
+ * of the VMBus driver and vmbus_chan_sched() can not run before
+ * vmbus_bus_resume() has completed execution (cf. resume_noirq).
+ */
+ smp_store_mb(
+ vmbus_connection.channels[channel->offermsg.child_relid],
+ channel);
}
-static void percpu_channel_deq(void *arg)
+void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
{
- struct vmbus_channel *channel = arg;
-
- list_del_rcu(&channel->percpu_list);
+ if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
+ return;
+ WRITE_ONCE(
+ vmbus_connection.channels[channel->offermsg.child_relid],
+ NULL);
}
-
static void vmbus_release_relid(u32 relid)
{
struct vmbus_channel_relid_released msg;
@@ -373,39 +400,43 @@ static void vmbus_release_relid(u32 relid)
void hv_process_channel_removal(struct vmbus_channel *channel)
{
- struct vmbus_channel *primary_channel;
unsigned long flags;
- BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
BUG_ON(!channel->rescind);
- if (channel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(channel->target_cpu,
- percpu_channel_deq, channel, true);
- } else {
- percpu_channel_deq(channel);
- put_cpu();
- }
+ /*
+ * hv_process_channel_removal() could find INVALID_RELID only for
+ * hv_sock channels. See the inline comments in vmbus_onoffer().
+ */
+ WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
+ !is_hvsock_channel(channel));
+
+ /*
+ * Upon suspend, an in-use hv_sock channel is removed from the array of
+ * channels and the relid is invalidated. After hibernation, when the
+ * user-space appplication destroys the channel, it's unnecessary and
+ * unsafe to remove the channel from the array of channels. See also
+ * the inline comments before the call of vmbus_release_relid() below.
+ */
+ if (channel->offermsg.child_relid != INVALID_RELID)
+ vmbus_channel_unmap_relid(channel);
if (channel->primary_channel == NULL) {
list_del(&channel->listentry);
-
- primary_channel = channel;
} else {
- primary_channel = channel->primary_channel;
+ struct vmbus_channel *primary_channel = channel->primary_channel;
spin_lock_irqsave(&primary_channel->lock, flags);
list_del(&channel->sc_list);
spin_unlock_irqrestore(&primary_channel->lock, flags);
}
/*
- * We need to free the bit for init_vp_index() to work in the case
- * of sub-channel, when we reload drivers like hv_netvsc.
+ * If this is a "perf" channel, updates the hv_numa_map[] masks so that
+ * init_vp_index() can (re-)use the CPU.
*/
- if (channel->affinity_policy == HV_LOCALIZED)
- cpumask_clear_cpu(channel->target_cpu,
- &primary_channel->alloced_cpus_in_node);
+ if (hv_is_perf_channel(channel))
+ hv_clear_alloced_cpu(channel->target_cpu);
/*
* Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
@@ -440,23 +471,8 @@ static void vmbus_add_channel_work(struct work_struct *work)
container_of(work, struct vmbus_channel, add_channel_work);
struct vmbus_channel *primary_channel = newchannel->primary_channel;
unsigned long flags;
- u16 dev_type;
int ret;
- dev_type = hv_get_dev_type(newchannel);
-
- init_vp_index(newchannel, dev_type);
-
- if (newchannel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(newchannel->target_cpu,
- percpu_channel_enq,
- newchannel, true);
- } else {
- percpu_channel_enq(newchannel);
- put_cpu();
- }
-
/*
* This state is used to indicate a successful open
* so that when we do close the channel normally, we
@@ -488,7 +504,7 @@ static void vmbus_add_channel_work(struct work_struct *work)
if (!newchannel->device_obj)
goto err_deq_chan;
- newchannel->device_obj->device_id = dev_type;
+ newchannel->device_obj->device_id = newchannel->device_id;
/*
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
@@ -523,17 +539,10 @@ err_deq_chan:
spin_unlock_irqrestore(&primary_channel->lock, flags);
}
- mutex_unlock(&vmbus_connection.channel_mutex);
+ /* vmbus_process_offer() has mapped the channel. */
+ vmbus_channel_unmap_relid(newchannel);
- if (newchannel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(newchannel->target_cpu,
- percpu_channel_deq,
- newchannel, true);
- } else {
- percpu_channel_deq(newchannel);
- put_cpu();
- }
+ mutex_unlock(&vmbus_connection.channel_mutex);
vmbus_release_relid(newchannel->offermsg.child_relid);
@@ -551,8 +560,35 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
unsigned long flags;
bool fnew = true;
+ /*
+ * Synchronize vmbus_process_offer() and CPU hotplugging:
+ *
+ * CPU1 CPU2
+ *
+ * [vmbus_process_offer()] [Hot removal of the CPU]
+ *
+ * CPU_READ_LOCK CPUS_WRITE_LOCK
+ * LOAD cpu_online_mask SEARCH chn_list
+ * STORE target_cpu LOAD target_cpu
+ * INSERT chn_list STORE cpu_online_mask
+ * CPUS_READ_UNLOCK CPUS_WRITE_UNLOCK
+ *
+ * Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
+ * CPU2's SEARCH from *not* seeing CPU1's INSERT
+ *
+ * Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
+ * CPU2's LOAD from *not* seing CPU1's STORE
+ */
+ cpus_read_lock();
+
+ /*
+ * Serializes the modifications of the chn_list list as well as
+ * the accesses to next_numa_node_id in init_vp_index().
+ */
mutex_lock(&vmbus_connection.channel_mutex);
+ init_vp_index(newchannel);
+
/* Remember the channels that should be cleaned up upon suspend. */
if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
@@ -599,7 +635,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
spin_unlock_irqrestore(&channel->lock, flags);
}
+ vmbus_channel_map_relid(newchannel);
+
mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
/*
* vmbus_process_offer() mustn't call channel->sc_creation_callback()
@@ -632,73 +671,61 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
* We use this state to statically distribute the channel interrupt load.
*/
static int next_numa_node_id;
-/*
- * init_vp_index() accesses global variables like next_numa_node_id, and
- * it can run concurrently for primary channels and sub-channels: see
- * vmbus_process_offer(), so we need the lock to protect the global
- * variables.
- */
-static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
/*
* Starting with Win8, we can statically distribute the incoming
* channel interrupt load by binding a channel to VCPU.
- * We distribute the interrupt loads to one or more NUMA nodes based on
- * the channel's affinity_policy.
*
* For pre-win8 hosts or non-performance critical channels we assign the
- * first CPU in the first NUMA node.
+ * VMBUS_CONNECT_CPU.
+ *
+ * Starting with win8, performance critical channels will be distributed
+ * evenly among all the available NUMA nodes. Once the node is assigned,
+ * we will assign the CPU based on a simple round robin scheme.
*/
-static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
+static void init_vp_index(struct vmbus_channel *channel)
{
- u32 cur_cpu;
- bool perf_chn = vmbus_devs[dev_type].perf_device;
- struct vmbus_channel *primary = channel->primary_channel;
- int next_node;
+ bool perf_chn = hv_is_perf_channel(channel);
cpumask_var_t available_mask;
struct cpumask *alloced_mask;
+ u32 target_cpu;
+ int numa_node;
if ((vmbus_proto_version == VERSION_WS2008) ||
(vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
!alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
/*
* Prior to win8, all channel interrupts are
- * delivered on cpu 0.
+ * delivered on VMBUS_CONNECT_CPU.
* Also if the channel is not a performance critical
- * channel, bind it to cpu 0.
- * In case alloc_cpumask_var() fails, bind it to cpu 0.
+ * channel, bind it to VMBUS_CONNECT_CPU.
+ * In case alloc_cpumask_var() fails, bind it to
+ * VMBUS_CONNECT_CPU.
*/
- channel->numa_node = 0;
- channel->target_cpu = 0;
- channel->target_vp = hv_cpu_number_to_vp_number(0);
+ channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU);
+ channel->target_cpu = VMBUS_CONNECT_CPU;
+ channel->target_vp =
+ hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
+ if (perf_chn)
+ hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
return;
}
- spin_lock(&bind_channel_to_cpu_lock);
-
- /*
- * Based on the channel affinity policy, we will assign the NUMA
- * nodes.
- */
-
- if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
- while (true) {
- next_node = next_numa_node_id++;
- if (next_node == nr_node_ids) {
- next_node = next_numa_node_id = 0;
- continue;
- }
- if (cpumask_empty(cpumask_of_node(next_node)))
- continue;
- break;
+ while (true) {
+ numa_node = next_numa_node_id++;
+ if (numa_node == nr_node_ids) {
+ next_numa_node_id = 0;
+ continue;
}
- channel->numa_node = next_node;
- primary = channel;
+ if (cpumask_empty(cpumask_of_node(numa_node)))
+ continue;
+ break;
}
- alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
+ channel->numa_node = numa_node;
+ alloced_mask = &hv_context.hv_numa_map[numa_node];
if (cpumask_weight(alloced_mask) ==
- cpumask_weight(cpumask_of_node(primary->numa_node))) {
+ cpumask_weight(cpumask_of_node(numa_node))) {
/*
* We have cycled through all the CPUs in the node;
* reset the alloced map.
@@ -706,59 +733,13 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
cpumask_clear(alloced_mask);
}
- cpumask_xor(available_mask, alloced_mask,
- cpumask_of_node(primary->numa_node));
-
- cur_cpu = -1;
-
- if (primary->affinity_policy == HV_LOCALIZED) {
- /*
- * Normally Hyper-V host doesn't create more subchannels
- * than there are VCPUs on the node but it is possible when not
- * all present VCPUs on the node are initialized by guest.
- * Clear the alloced_cpus_in_node to start over.
- */
- if (cpumask_equal(&primary->alloced_cpus_in_node,
- cpumask_of_node(primary->numa_node)))
- cpumask_clear(&primary->alloced_cpus_in_node);
- }
-
- while (true) {
- cur_cpu = cpumask_next(cur_cpu, available_mask);
- if (cur_cpu >= nr_cpu_ids) {
- cur_cpu = -1;
- cpumask_copy(available_mask,
- cpumask_of_node(primary->numa_node));
- continue;
- }
-
- if (primary->affinity_policy == HV_LOCALIZED) {
- /*
- * NOTE: in the case of sub-channel, we clear the
- * sub-channel related bit(s) in
- * primary->alloced_cpus_in_node in
- * hv_process_channel_removal(), so when we
- * reload drivers like hv_netvsc in SMP guest, here
- * we're able to re-allocate
- * bit from primary->alloced_cpus_in_node.
- */
- if (!cpumask_test_cpu(cur_cpu,
- &primary->alloced_cpus_in_node)) {
- cpumask_set_cpu(cur_cpu,
- &primary->alloced_cpus_in_node);
- cpumask_set_cpu(cur_cpu, alloced_mask);
- break;
- }
- } else {
- cpumask_set_cpu(cur_cpu, alloced_mask);
- break;
- }
- }
+ cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
- channel->target_cpu = cur_cpu;
- channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
+ target_cpu = cpumask_first(available_mask);
+ cpumask_set_cpu(target_cpu, alloced_mask);
- spin_unlock(&bind_channel_to_cpu_lock);
+ channel->target_cpu = target_cpu;
+ channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);
free_cpumask_var(available_mask);
}
@@ -890,6 +871,7 @@ static void vmbus_setup_channel_state(struct vmbus_channel *channel,
sizeof(struct vmbus_channel_offer_channel));
channel->monitor_grp = (u8)offer->monitorid / 32;
channel->monitor_bit = (u8)offer->monitorid % 32;
+ channel->device_id = hv_get_dev_type(channel);
}
/*
@@ -940,8 +922,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
oldchannel = find_primary_channel_by_offer(offer);
if (oldchannel != NULL) {
- atomic_dec(&vmbus_connection.offer_in_progress);
-
/*
* We're resuming from hibernation: all the sub-channel and
* hv_sock channels we had before the hibernation should have
@@ -949,36 +929,65 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
* primary channel that we had before the hibernation.
*/
+ /*
+ * { Initially: channel relid = INVALID_RELID,
+ * channels[valid_relid] = NULL }
+ *
+ * CPU1 CPU2
+ *
+ * [vmbus_onoffer()] [vmbus_device_release()]
+ *
+ * LOCK channel_mutex LOCK channel_mutex
+ * STORE channel relid = valid_relid LOAD r1 = channel relid
+ * MAP_RELID channel if (r1 != INVALID_RELID)
+ * UNLOCK channel_mutex UNMAP_RELID channel
+ * UNLOCK channel_mutex
+ *
+ * Forbids: r1 == valid_relid &&
+ * channels[valid_relid] == channel
+ *
+ * Note. r1 can be INVALID_RELID only for an hv_sock channel.
+ * None of the hv_sock channels which were present before the
+ * suspend are re-offered upon the resume. See the WARN_ON()
+ * in hv_process_channel_removal().
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ atomic_dec(&vmbus_connection.offer_in_progress);
+
WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
/* Fix up the relid. */
oldchannel->offermsg.child_relid = offer->child_relid;
offer_sz = sizeof(*offer);
- if (memcmp(offer, &oldchannel->offermsg, offer_sz) == 0) {
- check_ready_for_resume_event();
- return;
+ if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
+ /*
+ * This is not an error, since the host can also change
+ * the other field(s) of the offer, e.g. on WS RS5
+ * (Build 17763), the offer->connection_id of the
+ * Mellanox VF vmbus device can change when the host
+ * reoffers the device upon resume.
+ */
+ pr_debug("vmbus offer changed: relid=%d\n",
+ offer->child_relid);
+
+ print_hex_dump_debug("Old vmbus offer: ",
+ DUMP_PREFIX_OFFSET, 16, 4,
+ &oldchannel->offermsg, offer_sz,
+ false);
+ print_hex_dump_debug("New vmbus offer: ",
+ DUMP_PREFIX_OFFSET, 16, 4,
+ offer, offer_sz, false);
+
+ /* Fix up the old channel. */
+ vmbus_setup_channel_state(oldchannel, offer);
}
- /*
- * This is not an error, since the host can also change the
- * other field(s) of the offer, e.g. on WS RS5 (Build 17763),
- * the offer->connection_id of the Mellanox VF vmbus device
- * can change when the host reoffers the device upon resume.
- */
- pr_debug("vmbus offer changed: relid=%d\n",
- offer->child_relid);
-
- print_hex_dump_debug("Old vmbus offer: ", DUMP_PREFIX_OFFSET,
- 16, 4, &oldchannel->offermsg, offer_sz,
- false);
- print_hex_dump_debug("New vmbus offer: ", DUMP_PREFIX_OFFSET,
- 16, 4, offer, offer_sz, false);
-
- /* Fix up the old channel. */
- vmbus_setup_channel_state(oldchannel, offer);
-
+ /* Add the channel back to the array of channels. */
+ vmbus_channel_map_relid(oldchannel);
check_ready_for_resume_event();
+ mutex_unlock(&vmbus_connection.channel_mutex);
return;
}
@@ -1028,11 +1037,22 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* offer comes in first and then the rescind.
* Since we process these events in work elements,
* and with preemption, we may end up processing
- * the events out of order. Given that we handle these
- * work elements on the same CPU, this is possible only
- * in the case of preemption. In any case wait here
- * until the offer processing has moved beyond the
- * point where the channel is discoverable.
+ * the events out of order. We rely on the synchronization
+ * provided by offer_in_progress and by channel_mutex for
+ * ordering these events:
+ *
+ * { Initially: offer_in_progress = 1 }
+ *
+ * CPU1 CPU2
+ *
+ * [vmbus_onoffer()] [vmbus_onoffer_rescind()]
+ *
+ * LOCK channel_mutex WAIT_ON offer_in_progress == 0
+ * DECREMENT offer_in_progress LOCK channel_mutex
+ * STORE channels[] LOAD channels[]
+ * UNLOCK channel_mutex UNLOCK channel_mutex
+ *
+ * Forbids: CPU2's LOAD from *not* seeing CPU1's STORE
*/
while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
@@ -1332,30 +1352,36 @@ static void vmbus_onversion_response(
/* Channel message dispatch table */
const struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT] = {
- { CHANNELMSG_INVALID, 0, NULL },
- { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer },
- { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind },
- { CHANNELMSG_REQUESTOFFERS, 0, NULL },
- { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered },
- { CHANNELMSG_OPENCHANNEL, 0, NULL },
- { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result },
- { CHANNELMSG_CLOSECHANNEL, 0, NULL },
- { CHANNELMSG_GPADL_HEADER, 0, NULL },
- { CHANNELMSG_GPADL_BODY, 0, NULL },
- { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created },
- { CHANNELMSG_GPADL_TEARDOWN, 0, NULL },
- { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown },
- { CHANNELMSG_RELID_RELEASED, 0, NULL },
- { CHANNELMSG_INITIATE_CONTACT, 0, NULL },
- { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response },
- { CHANNELMSG_UNLOAD, 0, NULL },
- { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response },
- { CHANNELMSG_18, 0, NULL },
- { CHANNELMSG_19, 0, NULL },
- { CHANNELMSG_20, 0, NULL },
- { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
- { CHANNELMSG_22, 0, NULL },
- { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL },
+ { CHANNELMSG_INVALID, 0, NULL, 0},
+ { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer,
+ sizeof(struct vmbus_channel_offer_channel)},
+ { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind,
+ sizeof(struct vmbus_channel_rescind_offer) },
+ { CHANNELMSG_REQUESTOFFERS, 0, NULL, 0},
+ { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered, 0},
+ { CHANNELMSG_OPENCHANNEL, 0, NULL, 0},
+ { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result,
+ sizeof(struct vmbus_channel_open_result)},
+ { CHANNELMSG_CLOSECHANNEL, 0, NULL, 0},
+ { CHANNELMSG_GPADL_HEADER, 0, NULL, 0},
+ { CHANNELMSG_GPADL_BODY, 0, NULL, 0},
+ { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created,
+ sizeof(struct vmbus_channel_gpadl_created)},
+ { CHANNELMSG_GPADL_TEARDOWN, 0, NULL, 0},
+ { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown,
+ sizeof(struct vmbus_channel_gpadl_torndown) },
+ { CHANNELMSG_RELID_RELEASED, 0, NULL, 0},
+ { CHANNELMSG_INITIATE_CONTACT, 0, NULL, 0},
+ { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response,
+ sizeof(struct vmbus_channel_version_response)},
+ { CHANNELMSG_UNLOAD, 0, NULL, 0},
+ { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response, 0},
+ { CHANNELMSG_18, 0, NULL, 0},
+ { CHANNELMSG_19, 0, NULL, 0},
+ { CHANNELMSG_20, 0, NULL, 0},
+ { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
+ { CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
+ { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
};
/*
@@ -1363,13 +1389,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
*
* This is invoked in the vmbus worker thread context.
*/
-void vmbus_onmessage(void *context)
+void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
{
- struct hv_message *msg = context;
- struct vmbus_channel_message_header *hdr;
-
- hdr = (struct vmbus_channel_message_header *)msg->u.payload;
-
trace_vmbus_on_message(hdr);
/*
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 74e77de89b4f..11170d9a2e1a 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -69,7 +69,6 @@ MODULE_PARM_DESC(max_version,
int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
{
int ret = 0;
- unsigned int cur_cpu;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
@@ -102,24 +101,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
- /*
- * We want all channel messages to be delivered on CPU 0.
- * This has been the behavior pre-win8. This is not
- * perf issue and having all channel messages delivered on CPU 0
- * would be ok.
- * For post win8 hosts, we support receiving channel messagges on
- * all the CPUs. This is needed for kexec to work correctly where
- * the CPU attempting to connect may not be CPU 0.
- */
- if (version >= VERSION_WIN8_1) {
- cur_cpu = get_cpu();
- msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
- vmbus_connection.connect_cpu = cur_cpu;
- put_cpu();
- } else {
- msg->target_vcpu = 0;
- vmbus_connection.connect_cpu = 0;
- }
+ msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
/*
* Add to list before we send the request since we may
@@ -266,6 +248,14 @@ int vmbus_connect(void)
pr_info("Vmbus version:%d.%d\n",
version >> 16, version & 0xFFFF);
+ vmbus_connection.channels = kcalloc(MAX_CHANNEL_RELIDS,
+ sizeof(struct vmbus_channel *),
+ GFP_KERNEL);
+ if (vmbus_connection.channels == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
kfree(msginfo);
return 0;
@@ -313,33 +303,9 @@ void vmbus_disconnect(void)
*/
struct vmbus_channel *relid2channel(u32 relid)
{
- struct vmbus_channel *channel;
- struct vmbus_channel *found_channel = NULL;
- struct list_head *cur, *tmp;
- struct vmbus_channel *cur_sc;
-
- BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
-
- list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
- if (channel->offermsg.child_relid == relid) {
- found_channel = channel;
- break;
- } else if (!list_empty(&channel->sc_list)) {
- /*
- * Deal with sub-channels.
- */
- list_for_each_safe(cur, tmp, &channel->sc_list) {
- cur_sc = list_entry(cur, struct vmbus_channel,
- sc_list);
- if (cur_sc->offermsg.child_relid == relid) {
- found_channel = cur_sc;
- break;
- }
- }
- }
- }
-
- return found_channel;
+ if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
+ return NULL;
+ return READ_ONCE(vmbus_connection.channels[relid]);
}
/*
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 533c8b82b344..857290dcfd95 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -117,8 +117,6 @@ int hv_synic_alloc(void)
pr_err("Unable to allocate post msg page\n");
goto err;
}
-
- INIT_LIST_HEAD(&hv_cpu->chan_list);
}
return 0;
@@ -246,10 +244,18 @@ int hv_synic_cleanup(unsigned int cpu)
unsigned long flags;
/*
+ * Hyper-V does not provide a way to change the connect CPU once
+ * it is set; we must prevent the connect CPU from going offline.
+ */
+ if (cpu == VMBUS_CONNECT_CPU)
+ return -EBUSY;
+
+ /*
* Search for channels which are bound to the CPU we're about to
- * cleanup. In case we find one and vmbus is still connected we need to
- * fail, this will effectively prevent CPU offlining. There is no way
- * we can re-bind channels to different CPUs for now.
+ * cleanup. In case we find one and vmbus is still connected, we
+ * fail; this will effectively prevent CPU offlining.
+ *
+ * TODO: Re-bind the channels to different CPUs.
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index bb9ba3f7c794..5040d7e0cd9e 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -71,7 +71,7 @@ static void fcopy_poll_wrapper(void *channel)
{
/* Transaction is finished, reset the state here to avoid races. */
fcopy_transaction.state = HVUTIL_READY;
- hv_fcopy_onchannelcallback(channel);
+ tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
}
static void fcopy_timeout_func(struct work_struct *dummy)
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 1c75b38f0d6d..783779e4cc1a 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -80,7 +80,7 @@ static void vss_poll_wrapper(void *channel)
{
/* Transaction is finished, reset the state here to avoid races. */
vss_transaction.state = HVUTIL_READY;
- hv_vss_onchannelcallback(channel);
+ tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
}
/*
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index f9d14db980cb..6063bb21bb13 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -44,10 +44,8 @@ TRACE_EVENT(vmbus_onoffer,
__entry->monitorid = offer->monitorid;
__entry->is_ddc_int = offer->is_dedicated_interrupt;
__entry->connection_id = offer->connection_id;
- memcpy(__entry->if_type,
- &offer->offer.if_type.b, 16);
- memcpy(__entry->if_instance,
- &offer->offer.if_instance.b, 16);
+ export_guid(__entry->if_type, &offer->offer.if_type);
+ export_guid(__entry->if_instance, &offer->offer.if_instance);
__entry->chn_flags = offer->offer.chn_flags;
__entry->mmio_mb = offer->offer.mmio_megabytes;
__entry->sub_idx = offer->offer.sub_channel_index;
@@ -296,6 +294,25 @@ TRACE_EVENT(vmbus_send_tl_connect_request,
)
);
+TRACE_EVENT(vmbus_send_modifychannel,
+ TP_PROTO(const struct vmbus_channel_modifychannel *msg,
+ int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, target_vp)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->target_vp = msg->target_vp;
+ __entry->ret = ret;
+ ),
+ TP_printk("binding child_relid 0x%x to target_vp 0x%x, ret %d",
+ __entry->child_relid, __entry->target_vp, __entry->ret
+ )
+ );
+
DECLARE_EVENT_CLASS(vmbus_channel,
TP_PROTO(const struct vmbus_channel *channel),
TP_ARGS(channel),
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 70b30e223a57..40e2b9f91163 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -132,12 +132,6 @@ struct hv_per_cpu_context {
* basis.
*/
struct tasklet_struct msg_dpc;
-
- /*
- * To optimize the mapping of relid to channel, maintain
- * per-cpu list of the channels based on their CPU affinity.
- */
- struct list_head chan_list;
};
struct hv_context {
@@ -202,6 +196,8 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
/* TODO: Need to make this configurable */
#define MAX_NUM_CHANNELS_SUPPORTED 256
+#define MAX_CHANNEL_RELIDS \
+ max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
enum vmbus_connect_state {
DISCONNECTED,
@@ -212,12 +208,13 @@ enum vmbus_connect_state {
#define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT
-struct vmbus_connection {
- /*
- * CPU on which the initial host contact was made.
- */
- int connect_cpu;
+/*
+ * The CPU that Hyper-V will interrupt for VMBUS messages, such as
+ * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
+ */
+#define VMBUS_CONNECT_CPU 0
+struct vmbus_connection {
u32 msg_conn_id;
atomic_t offer_in_progress;
@@ -250,6 +247,9 @@ struct vmbus_connection {
struct list_head chn_list;
struct mutex channel_mutex;
+ /* Array of channels */
+ struct vmbus_channel **channels;
+
/*
* An offer message is handled first on the work_queue, and then
* is further handled on handle_primary_chan_wq or
@@ -317,6 +317,7 @@ struct vmbus_channel_message_table_entry {
enum vmbus_channel_message_type message_type;
enum vmbus_message_handler_type handler_type;
void (*message_handler)(struct vmbus_channel_message_header *msg);
+ u32 min_payload_len;
};
extern const struct vmbus_channel_message_table_entry
@@ -336,6 +337,9 @@ int vmbus_add_channel_kobj(struct hv_device *device_obj,
void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
+void vmbus_channel_map_relid(struct vmbus_channel *channel);
+void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
+
struct vmbus_channel *relid2channel(u32 relid);
void vmbus_free_channels(void);
@@ -374,12 +378,7 @@ static inline void hv_poll_channel(struct vmbus_channel *channel,
{
if (!channel)
return;
-
- if (in_interrupt() && (channel->target_cpu == smp_processor_id())) {
- cb(channel);
- return;
- }
- smp_call_function_single(channel->target_cpu, cb, channel, true);
+ cb(channel);
}
enum hvutil_device_state {
@@ -396,6 +395,54 @@ enum delay {
MESSAGE_DELAY = 1,
};
+extern const struct vmbus_device vmbus_devs[];
+
+static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
+{
+ return vmbus_devs[channel->device_id].perf_device;
+}
+
+static inline bool hv_is_alloced_cpu(unsigned int cpu)
+{
+ struct vmbus_channel *channel, *sc;
+
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+ /*
+ * List additions/deletions as well as updates of the target CPUs are
+ * protected by channel_mutex.
+ */
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (!hv_is_perf_channel(channel))
+ continue;
+ if (channel->target_cpu == cpu)
+ return true;
+ list_for_each_entry(sc, &channel->sc_list, sc_list) {
+ if (sc->target_cpu == cpu)
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline void hv_set_alloced_cpu(unsigned int cpu)
+{
+ cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
+}
+
+static inline void hv_clear_alloced_cpu(unsigned int cpu)
+{
+ if (hv_is_alloced_cpu(cpu))
+ return;
+ cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
+}
+
+static inline void hv_update_alloced_cpus(unsigned int old_cpu,
+ unsigned int new_cpu)
+{
+ hv_set_alloced_cpu(new_cpu);
+ hv_clear_alloced_cpu(old_cpu);
+}
+
#ifdef CONFIG_HYPERV_TESTING
int hv_debug_add_dev_dir(struct hv_device *dev);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index e06c6b9555cf..9147ee9d5f7d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -117,14 +117,6 @@ static int vmbus_exists(void)
return 0;
}
-#define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
-static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
-{
- int i;
- for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
- sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
-}
-
static u8 channel_monitor_group(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid / 32;
@@ -201,7 +193,7 @@ static ssize_t class_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "{%pUl}\n",
- hv_dev->channel->offermsg.offer.if_type.b);
+ &hv_dev->channel->offermsg.offer.if_type);
}
static DEVICE_ATTR_RO(class_id);
@@ -213,7 +205,7 @@ static ssize_t device_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "{%pUl}\n",
- hv_dev->channel->offermsg.offer.if_instance.b);
+ &hv_dev->channel->offermsg.offer.if_instance);
}
static DEVICE_ATTR_RO(device_id);
@@ -221,10 +213,8 @@ static ssize_t modalias_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- char alias_name[VMBUS_ALIAS_LEN + 1];
- print_alias_name(hv_dev, alias_name);
- return sprintf(buf, "vmbus:%s\n", alias_name);
+ return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
}
static DEVICE_ATTR_RO(modalias);
@@ -693,12 +683,9 @@ __ATTRIBUTE_GROUPS(vmbus_dev);
static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
{
struct hv_device *dev = device_to_hv_device(device);
- int ret;
- char alias_name[VMBUS_ALIAS_LEN + 1];
+ const char *format = "MODALIAS=vmbus:%*phN";
- print_alias_name(dev, alias_name);
- ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
- return ret;
+ return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
}
static const struct hv_vmbus_device_id *
@@ -1033,7 +1020,10 @@ static struct bus_type hv_bus = {
struct onmessage_work_context {
struct work_struct work;
- struct hv_message msg;
+ struct {
+ struct hv_message_header header;
+ u8 payload[];
+ } msg;
};
static void vmbus_onmessage_work(struct work_struct *work)
@@ -1046,7 +1036,8 @@ static void vmbus_onmessage_work(struct work_struct *work)
ctx = container_of(work, struct onmessage_work_context,
work);
- vmbus_onmessage(&ctx->msg);
+ vmbus_onmessage((struct vmbus_channel_message_header *)
+ &ctx->msg.payload);
kfree(ctx);
}
@@ -1061,6 +1052,13 @@ void vmbus_on_msg_dpc(unsigned long data)
struct onmessage_work_context *ctx;
u32 message_type = msg->header.message_type;
+ /*
+ * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
+ * it is being used in 'struct vmbus_channel_message_header' definition
+ * which is supposed to match hypervisor ABI.
+ */
+ BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
+
if (message_type == HVMSG_NONE)
/* no msg */
return;
@@ -1074,41 +1072,88 @@ void vmbus_on_msg_dpc(unsigned long data)
goto msg_handled;
}
+ if (msg->header.payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
+ WARN_ONCE(1, "payload size is too large (%d)\n",
+ msg->header.payload_size);
+ goto msg_handled;
+ }
+
entry = &channel_message_table[hdr->msgtype];
if (!entry->message_handler)
goto msg_handled;
+ if (msg->header.payload_size < entry->min_payload_len) {
+ WARN_ONCE(1, "message too short: msgtype=%d len=%d\n",
+ hdr->msgtype, msg->header.payload_size);
+ goto msg_handled;
+ }
+
if (entry->handler_type == VMHT_BLOCKING) {
- ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ ctx = kmalloc(sizeof(*ctx) + msg->header.payload_size,
+ GFP_ATOMIC);
if (ctx == NULL)
return;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, msg, sizeof(*msg));
+ memcpy(&ctx->msg, msg, sizeof(msg->header) +
+ msg->header.payload_size);
/*
* The host can generate a rescind message while we
* may still be handling the original offer. We deal with
- * this condition by ensuring the processing is done on the
- * same CPU.
+ * this condition by relying on the synchronization provided
+ * by offer_in_progress and by channel_mutex. See also the
+ * inline comments in vmbus_onoffer_rescind().
*/
switch (hdr->msgtype) {
case CHANNELMSG_RESCIND_CHANNELOFFER:
/*
* If we are handling the rescind message;
* schedule the work on the global work queue.
+ *
+ * The OFFER message and the RESCIND message should
+ * not be handled by the same serialized work queue,
+ * because the OFFER handler may call vmbus_open(),
+ * which tries to open the channel by sending an
+ * OPEN_CHANNEL message to the host and waits for
+ * the host's response; however, if the host has
+ * rescinded the channel before it receives the
+ * OPEN_CHANNEL message, the host just silently
+ * ignores the OPEN_CHANNEL message; as a result,
+ * the guest's OFFER handler hangs for ever, if we
+ * handle the RESCIND message in the same serialized
+ * work queue: the RESCIND handler can not start to
+ * run before the OFFER handler finishes.
*/
- schedule_work_on(vmbus_connection.connect_cpu,
- &ctx->work);
+ schedule_work(&ctx->work);
break;
case CHANNELMSG_OFFERCHANNEL:
+ /*
+ * The host sends the offer message of a given channel
+ * before sending the rescind message of the same
+ * channel. These messages are sent to the guest's
+ * connect CPU; the guest then starts processing them
+ * in the tasklet handler on this CPU:
+ *
+ * VMBUS_CONNECT_CPU
+ *
+ * [vmbus_on_msg_dpc()]
+ * atomic_inc() // CHANNELMSG_OFFERCHANNEL
+ * queue_work()
+ * ...
+ * [vmbus_on_msg_dpc()]
+ * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
+ *
+ * We rely on the memory-ordering properties of the
+ * queue_work() and schedule_work() primitives, which
+ * guarantee that the atomic increment will be visible
+ * to the CPUs which will execute the offer & rescind
+ * works by the time these works will start execution.
+ */
atomic_inc(&vmbus_connection.offer_in_progress);
- queue_work_on(vmbus_connection.connect_cpu,
- vmbus_connection.work_queue,
- &ctx->work);
- break;
+ fallthrough;
default:
queue_work(vmbus_connection.work_queue, &ctx->work);
@@ -1133,10 +1178,11 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
WARN_ON(!is_hvsock_channel(channel));
/*
- * sizeof(*ctx) is small and the allocation should really not fail,
+ * Allocation size is small and the allocation should really not fail,
* otherwise the state of the hv_sock connections ends up in limbo.
*/
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
+ ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
+ GFP_KERNEL | __GFP_NOFAIL);
/*
* So far, these are not really used by Linux. Just set them to the
@@ -1146,31 +1192,17 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
ctx->msg.header.payload_size = sizeof(*rescind);
/* These values are actually used by Linux. */
- rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.u.payload;
+ rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
rescind->child_relid = channel->offermsg.child_relid;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
- queue_work_on(vmbus_connection.connect_cpu,
- vmbus_connection.work_queue,
- &ctx->work);
+ queue_work(vmbus_connection.work_queue, &ctx->work);
}
#endif /* CONFIG_PM_SLEEP */
/*
- * Direct callback for channels using other deferred processing
- */
-static void vmbus_channel_isr(struct vmbus_channel *channel)
-{
- void (*callback_fn)(void *);
-
- callback_fn = READ_ONCE(channel->onchannel_callback);
- if (likely(callback_fn != NULL))
- (*callback_fn)(channel->channel_callback_context);
-}
-
-/*
* Schedule all channels with events pending
*/
static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
@@ -1200,6 +1232,7 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
return;
for_each_set_bit(relid, recv_int_page, maxbits) {
+ void (*callback_fn)(void *context);
struct vmbus_channel *channel;
if (!sync_test_and_clear_bit(relid, recv_int_page))
@@ -1209,33 +1242,54 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (relid == 0)
continue;
+ /*
+ * Pairs with the kfree_rcu() in vmbus_chan_release().
+ * Guarantees that the channel data structure doesn't
+ * get freed while the channel pointer below is being
+ * dereferenced.
+ */
rcu_read_lock();
/* Find channel based on relid */
- list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
- if (channel->offermsg.child_relid != relid)
- continue;
+ channel = relid2channel(relid);
+ if (channel == NULL)
+ goto sched_unlock_rcu;
- if (channel->rescind)
- continue;
+ if (channel->rescind)
+ goto sched_unlock_rcu;
- trace_vmbus_chan_sched(channel);
+ /*
+ * Make sure that the ring buffer data structure doesn't get
+ * freed while we dereference the ring buffer pointer. Test
+ * for the channel's onchannel_callback being NULL within a
+ * sched_lock critical section. See also the inline comments
+ * in vmbus_reset_channel_cb().
+ */
+ spin_lock(&channel->sched_lock);
- ++channel->interrupts;
+ callback_fn = channel->onchannel_callback;
+ if (unlikely(callback_fn == NULL))
+ goto sched_unlock;
- switch (channel->callback_mode) {
- case HV_CALL_ISR:
- vmbus_channel_isr(channel);
- break;
+ trace_vmbus_chan_sched(channel);
- case HV_CALL_BATCHED:
- hv_begin_read(&channel->inbound);
- /* fallthrough */
- case HV_CALL_DIRECT:
- tasklet_schedule(&channel->callback_event);
- }
+ ++channel->interrupts;
+
+ switch (channel->callback_mode) {
+ case HV_CALL_ISR:
+ (*callback_fn)(channel->channel_callback_context);
+ break;
+
+ case HV_CALL_BATCHED:
+ hv_begin_read(&channel->inbound);
+ fallthrough;
+ case HV_CALL_DIRECT:
+ tasklet_schedule(&channel->callback_event);
}
+sched_unlock:
+ spin_unlock(&channel->sched_lock);
+sched_unlock_rcu:
rcu_read_unlock();
}
}
@@ -1364,7 +1418,6 @@ static int vmbus_bus_init(void)
{
int ret;
- /* Hypervisor initialization...setup hypercall page..etc */
ret = hv_init();
if (ret != 0) {
pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
@@ -1553,8 +1606,24 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
return attribute->show(chan, buf);
}
+static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf,
+ size_t count)
+{
+ const struct vmbus_chan_attribute *attribute
+ = container_of(attr, struct vmbus_chan_attribute, attr);
+ struct vmbus_channel *chan
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(chan, buf, count);
+}
+
static const struct sysfs_ops vmbus_chan_sysfs_ops = {
.show = vmbus_chan_attr_show,
+ .store = vmbus_chan_attr_store,
};
static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
@@ -1625,11 +1694,110 @@ static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
}
static VMBUS_CHAN_ATTR_RO(write_avail);
-static ssize_t show_target_cpu(struct vmbus_channel *channel, char *buf)
+static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
-static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
+static ssize_t target_cpu_store(struct vmbus_channel *channel,
+ const char *buf, size_t count)
+{
+ u32 target_cpu, origin_cpu;
+ ssize_t ret = count;
+
+ if (vmbus_proto_version < VERSION_WIN10_V4_1)
+ return -EIO;
+
+ if (sscanf(buf, "%uu", &target_cpu) != 1)
+ return -EIO;
+
+ /* Validate target_cpu for the cpumask_test_cpu() operation below. */
+ if (target_cpu >= nr_cpumask_bits)
+ return -EINVAL;
+
+ /* No CPUs should come up or down during this. */
+ cpus_read_lock();
+
+ if (!cpumask_test_cpu(target_cpu, cpu_online_mask)) {
+ cpus_read_unlock();
+ return -EINVAL;
+ }
+
+ /*
+ * Synchronizes target_cpu_store() and channel closure:
+ *
+ * { Initially: state = CHANNEL_OPENED }
+ *
+ * CPU1 CPU2
+ *
+ * [target_cpu_store()] [vmbus_disconnect_ring()]
+ *
+ * LOCK channel_mutex LOCK channel_mutex
+ * LOAD r1 = state LOAD r2 = state
+ * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
+ * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
+ * [...] SEND CLOSECHANNEL
+ * UNLOCK channel_mutex UNLOCK channel_mutex
+ *
+ * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
+ * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
+ *
+ * Note. The host processes the channel messages "sequentially", in
+ * the order in which they are received on a per-partition basis.
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ /*
+ * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
+ * avoid sending the message and fail here for such channels.
+ */
+ if (channel->state != CHANNEL_OPENED_STATE) {
+ ret = -EIO;
+ goto cpu_store_unlock;
+ }
+
+ origin_cpu = channel->target_cpu;
+ if (target_cpu == origin_cpu)
+ goto cpu_store_unlock;
+
+ if (vmbus_send_modifychannel(channel->offermsg.child_relid,
+ hv_cpu_number_to_vp_number(target_cpu))) {
+ ret = -EIO;
+ goto cpu_store_unlock;
+ }
+
+ /*
+ * Warning. At this point, there is *no* guarantee that the host will
+ * have successfully processed the vmbus_send_modifychannel() request.
+ * See the header comment of vmbus_send_modifychannel() for more info.
+ *
+ * Lags in the processing of the above vmbus_send_modifychannel() can
+ * result in missed interrupts if the "old" target CPU is taken offline
+ * before Hyper-V starts sending interrupts to the "new" target CPU.
+ * But apart from this offlining scenario, the code tolerates such
+ * lags. It will function correctly even if a channel interrupt comes
+ * in on a CPU that is different from the channel target_cpu value.
+ */
+
+ channel->target_cpu = target_cpu;
+ channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);
+ channel->numa_node = cpu_to_node(target_cpu);
+
+ /* See init_vp_index(). */
+ if (hv_is_perf_channel(channel))
+ hv_update_alloced_cpus(origin_cpu, target_cpu);
+
+ /* Currently set only for storvsc channels. */
+ if (channel->change_target_cpu_callback) {
+ (*channel->change_target_cpu_callback)(channel,
+ origin_cpu, target_cpu);
+ }
+
+cpu_store_unlock:
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
+ return ret;
+}
+static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
static ssize_t channel_pending_show(struct vmbus_channel *channel,
char *buf)
@@ -1830,7 +1998,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
int ret;
dev_set_name(&child_device_obj->device, "%pUl",
- child_device_obj->channel->offermsg.offer.if_instance.b);
+ &child_device_obj->channel->offermsg.offer.if_instance);
child_device_obj->device.bus = &hv_bus;
child_device_obj->device.parent = &hv_acpi_dev->dev;
@@ -2221,9 +2389,12 @@ static int vmbus_bus_suspend(struct device *dev)
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
/*
- * Invalidate the field. Upon resume, vmbus_onoffer() will fix
- * up the field, and the other fields (if necessary).
+ * Remove the channel from the array of channels and invalidate
+ * the channel's relid. Upon resume, vmbus_onoffer() will fix
+ * up the relid (and other fields, if necessary) and add the
+ * channel back to the array.
*/
+ vmbus_channel_unmap_relid(channel);
channel->offermsg.child_relid = INVALID_RELID;
if (is_hvsock_channel(channel)) {
@@ -2470,6 +2641,7 @@ static void __exit vmbus_exit(void)
hv_debug_rm_all_dir();
vmbus_free_channels();
+ kfree(vmbus_connection.channels);
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
kmsg_dump_unregister(&hv_kmsg_dumper);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4c62f900bf7e..288ae9f63588 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,6 +324,16 @@ config SENSORS_FAM15H_POWER
This driver can also be built as a module. If so, the module
will be called fam15h_power.
+config SENSORS_AMD_ENERGY
+ tristate "AMD RAPL MSR based Energy driver"
+ depends on X86
+ help
+ If you say yes here you get support for core and package energy
+ sensors, based on RAPL MSR for AMD family 17h and above CPUs.
+
+ This driver can also be built as a module. If so, the module
+ will be called as amd_energy.
+
config SENSORS_APPLESMC
tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
depends on INPUT && X86
@@ -404,6 +414,31 @@ config SENSORS_ATXP1
This driver can also be built as a module. If so, the module
will be called atxp1.
+config SENSORS_BT1_PVT
+ tristate "Baikal-T1 Process, Voltage, Temperature sensor driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ help
+ If you say yes here you get support for Baikal-T1 PVT sensor
+ embedded into the SoC.
+
+ This driver can also be built as a module. If so, the module will be
+ called bt1-pvt.
+
+config SENSORS_BT1_PVT_ALARMS
+ bool "Enable Baikal-T1 PVT sensor alarms"
+ depends on SENSORS_BT1_PVT
+ help
+ Baikal-T1 PVT IP-block provides threshold registers for each
+ supported sensor. But the corresponding interrupts might be
+ generated by the thresholds comparator only in synchronization with
+ a data conversion. Additionally there is only one sensor data can
+ be converted at a time. All of these makes the interface impossible
+ to be used for the hwmon alarms implementation without periodic
+ switch between the PVT sensors. By default the data conversion is
+ performed on demand from the user-space. If this config is enabled
+ the data conversion will be periodically performed and the data will be
+ saved in the internal driver cache.
+
config SENSORS_DRIVETEMP
tristate "Hard disk drives with temperature sensors"
depends on SCSI && ATA
@@ -523,6 +558,15 @@ config SENSORS_F75375S
This driver can also be built as a module. If so, the module
will be called f75375s.
+config SENSORS_GSC
+ tristate "Gateworks System Controller ADC"
+ depends on MFD_GATEWORKS_GSC
+ help
+ Support for the Gateworks System Controller A/D converters.
+
+ To compile this driver as a module, choose M here:
+ the module will be called gsc-hwmon.
+
config SENSORS_MC13783_ADC
tristate "Freescale MC13783/MC13892 ADC"
depends on MFD_MC13XXX
@@ -1198,10 +1242,11 @@ config SENSORS_LM90
help
If you say yes here you get support for National Semiconductor LM90,
LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
- Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
- MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
- Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, GMT G781, and
- Texas Instruments TMP451 sensor chips.
+ Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6654, MAX6657, MAX6658,
+ MAX6659, MAX6680, MAX6681, MAX6692, MAX6695, MAX6696,
+ ON Semiconductor NCT1008, Winbond/Nuvoton W83L771W/G/AWG/ASG,
+ Philips SA56004, GMT G781, and Texas Instruments TMP451
+ sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
@@ -1340,10 +1385,12 @@ config SENSORS_NCT7802
config SENSORS_NCT7904
tristate "Nuvoton NCT7904"
- depends on I2C
+ depends on I2C && WATCHDOG
+ select WATCHDOG_CORE
help
If you say yes here you get support for the Nuvoton NCT7904
- hardware monitoring chip, including manual fan speed control.
+ hardware monitoring chip, including manual fan speed control
+ and support for the integrated watchdog.
This driver can also be built as a module. If so, the module
will be called nct7904.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b0b9c8e57176..3e32c21f5efe 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
+obj-$(CONFIG_SENSORS_AMD_ENERGY) += amd_energy.o
obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o
obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o
@@ -53,6 +54,7 @@ obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o
+obj-$(CONFIG_SENSORS_BT1_PVT) += bt1-pvt.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
@@ -74,6 +76,7 @@ obj-$(CONFIG_SENSORS_G760A) += g760a.o
obj-$(CONFIG_SENSORS_G762) += g762.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
+obj-$(CONFIG_SENSORS_GSC) += gsc-hwmon.o
obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index c7010b91bc13..5a839cc2ed1c 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -716,7 +716,6 @@ static struct i2c_driver adt7411_driver = {
module_i2c_driver(adt7411_driver);
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de> and "
- "Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Sascha Hauer, Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("ADT7411 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
new file mode 100644
index 000000000000..e95b7426106e
--- /dev/null
+++ b/drivers/hwmon/amd_energy.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ */
+#include <asm/cpu_device_id.h>
+
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/processor.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
+#include <linux/types.h>
+
+#define DRVNAME "amd_energy"
+
+#define ENERGY_PWR_UNIT_MSR 0xC0010299
+#define ENERGY_CORE_MSR 0xC001029A
+#define ENERGY_PKG_MSR 0xC001029B
+
+#define AMD_ENERGY_UNIT_MASK 0x01F00
+#define AMD_ENERGY_MASK 0xFFFFFFFF
+
+struct sensor_accumulator {
+ u64 energy_ctr;
+ u64 prev_value;
+ char label[10];
+};
+
+struct amd_energy_data {
+ struct hwmon_channel_info energy_info;
+ const struct hwmon_channel_info *info[2];
+ struct hwmon_chip_info chip;
+ struct task_struct *wrap_accumulate;
+ /* Lock around the accumulator */
+ struct mutex lock;
+ /* An accumulator for each core and socket */
+ struct sensor_accumulator *accums;
+ /* Energy Status Units */
+ u64 energy_units;
+ int nr_cpus;
+ int nr_socks;
+ int core_id;
+};
+
+static int amd_energy_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel,
+ const char **str)
+{
+ struct amd_energy_data *data = dev_get_drvdata(dev);
+
+ *str = data->accums[channel].label;
+ return 0;
+}
+
+static void get_energy_units(struct amd_energy_data *data)
+{
+ u64 rapl_units;
+
+ rdmsrl_safe(ENERGY_PWR_UNIT_MSR, &rapl_units);
+ data->energy_units = (rapl_units & AMD_ENERGY_UNIT_MASK) >> 8;
+}
+
+static void accumulate_socket_delta(struct amd_energy_data *data,
+ int sock, int cpu)
+{
+ struct sensor_accumulator *s_accum;
+ u64 input;
+
+ mutex_lock(&data->lock);
+ rdmsrl_safe_on_cpu(cpu, ENERGY_PKG_MSR, &input);
+ input &= AMD_ENERGY_MASK;
+
+ s_accum = &data->accums[data->nr_cpus + sock];
+ if (input >= s_accum->prev_value)
+ s_accum->energy_ctr +=
+ input - s_accum->prev_value;
+ else
+ s_accum->energy_ctr += UINT_MAX -
+ s_accum->prev_value + input;
+
+ s_accum->prev_value = input;
+ mutex_unlock(&data->lock);
+}
+
+static void accumulate_core_delta(struct amd_energy_data *data)
+{
+ struct sensor_accumulator *c_accum;
+ u64 input;
+ int cpu;
+
+ mutex_lock(&data->lock);
+ if (data->core_id >= data->nr_cpus)
+ data->core_id = 0;
+
+ cpu = data->core_id;
+
+ if (!cpu_online(cpu))
+ goto out;
+
+ rdmsrl_safe_on_cpu(cpu, ENERGY_CORE_MSR, &input);
+ input &= AMD_ENERGY_MASK;
+
+ c_accum = &data->accums[cpu];
+
+ if (input >= c_accum->prev_value)
+ c_accum->energy_ctr +=
+ input - c_accum->prev_value;
+ else
+ c_accum->energy_ctr += UINT_MAX -
+ c_accum->prev_value + input;
+
+ c_accum->prev_value = input;
+
+out:
+ data->core_id++;
+ mutex_unlock(&data->lock);
+}
+
+static void read_accumulate(struct amd_energy_data *data)
+{
+ int sock;
+
+ for (sock = 0; sock < data->nr_socks; sock++) {
+ int cpu;
+
+ cpu = cpumask_first_and(cpu_online_mask,
+ cpumask_of_node(sock));
+
+ accumulate_socket_delta(data, sock, cpu);
+ }
+
+ accumulate_core_delta(data);
+}
+
+static void amd_add_delta(struct amd_energy_data *data, int ch,
+ int cpu, long *val, bool is_core)
+{
+ struct sensor_accumulator *s_accum, *c_accum;
+ u64 input;
+
+ mutex_lock(&data->lock);
+ if (!is_core) {
+ rdmsrl_safe_on_cpu(cpu, ENERGY_PKG_MSR, &input);
+ input &= AMD_ENERGY_MASK;
+
+ s_accum = &data->accums[ch];
+ if (input >= s_accum->prev_value)
+ input += s_accum->energy_ctr -
+ s_accum->prev_value;
+ else
+ input += UINT_MAX - s_accum->prev_value +
+ s_accum->energy_ctr;
+ } else {
+ rdmsrl_safe_on_cpu(cpu, ENERGY_CORE_MSR, &input);
+ input &= AMD_ENERGY_MASK;
+
+ c_accum = &data->accums[ch];
+ if (input >= c_accum->prev_value)
+ input += c_accum->energy_ctr -
+ c_accum->prev_value;
+ else
+ input += UINT_MAX - c_accum->prev_value +
+ c_accum->energy_ctr;
+ }
+
+ /* Energy consumed = (1/(2^ESU) * RAW * 1000000UL) μJoules */
+ *val = div64_ul(input * 1000000UL, BIT(data->energy_units));
+
+ mutex_unlock(&data->lock);
+}
+
+static int amd_energy_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct amd_energy_data *data = dev_get_drvdata(dev);
+ int cpu;
+
+ if (channel >= data->nr_cpus) {
+ cpu = cpumask_first_and(cpu_online_mask,
+ cpumask_of_node
+ (channel - data->nr_cpus));
+ amd_add_delta(data, channel, cpu, val, false);
+ } else {
+ cpu = channel;
+ if (!cpu_online(cpu))
+ return -ENODEV;
+
+ amd_add_delta(data, channel, cpu, val, true);
+ }
+
+ return 0;
+}
+
+static umode_t amd_energy_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int energy_accumulator(void *p)
+{
+ struct amd_energy_data *data = (struct amd_energy_data *)p;
+
+ while (!kthread_should_stop()) {
+ /*
+ * Ignoring the conditions such as
+ * cpu being offline or rdmsr failure
+ */
+ read_accumulate(data);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+
+ /*
+ * On a 240W system, with default resolution the
+ * Socket Energy status register may wrap around in
+ * 2^32*15.3 e-6/240 = 273.8041 secs (~4.5 mins)
+ *
+ * let us accumulate for every 100secs
+ */
+ schedule_timeout(msecs_to_jiffies(100000));
+ }
+ return 0;
+}
+
+static const struct hwmon_ops amd_energy_ops = {
+ .is_visible = amd_energy_is_visible,
+ .read = amd_energy_read,
+ .read_string = amd_energy_read_labels,
+};
+
+static int amd_create_sensor(struct device *dev,
+ struct amd_energy_data *data,
+ u8 type, u32 config)
+{
+ struct hwmon_channel_info *info = &data->energy_info;
+ struct sensor_accumulator *accums;
+ int i, num_siblings, cpus, sockets;
+ u32 *s_config;
+
+ /* Identify the number of siblings per core */
+ num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+
+ sockets = num_possible_nodes();
+
+ /*
+ * Energy counter register is accessed at core level.
+ * Hence, filterout the siblings.
+ */
+ cpus = num_present_cpus() / num_siblings;
+
+ s_config = devm_kcalloc(dev, cpus + sockets,
+ sizeof(u32), GFP_KERNEL);
+ if (!s_config)
+ return -ENOMEM;
+
+ accums = devm_kcalloc(dev, cpus + sockets,
+ sizeof(struct sensor_accumulator),
+ GFP_KERNEL);
+ if (!accums)
+ return -ENOMEM;
+
+ info->type = type;
+ info->config = s_config;
+
+ data->nr_cpus = cpus;
+ data->nr_socks = sockets;
+ data->accums = accums;
+
+ for (i = 0; i < cpus + sockets; i++) {
+ s_config[i] = config;
+ if (i < cpus)
+ scnprintf(accums[i].label, 10,
+ "Ecore%03u", i);
+ else
+ scnprintf(accums[i].label, 10,
+ "Esocket%u", (i - cpus));
+ }
+
+ return 0;
+}
+
+static int amd_energy_probe(struct platform_device *pdev)
+{
+ struct device *hwmon_dev;
+ struct amd_energy_data *data;
+ struct device *dev = &pdev->dev;
+
+ data = devm_kzalloc(dev,
+ sizeof(struct amd_energy_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->chip.ops = &amd_energy_ops;
+ data->chip.info = data->info;
+
+ dev_set_drvdata(dev, data);
+ /* Populate per-core energy reporting */
+ data->info[0] = &data->energy_info;
+ amd_create_sensor(dev, data, hwmon_energy,
+ HWMON_E_INPUT | HWMON_E_LABEL);
+
+ mutex_init(&data->lock);
+ get_energy_units(data);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, DRVNAME,
+ data,
+ &data->chip,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ data->wrap_accumulate = kthread_run(energy_accumulator, data,
+ "%s", dev_name(hwmon_dev));
+ if (IS_ERR(data->wrap_accumulate))
+ return PTR_ERR(data->wrap_accumulate);
+
+ return PTR_ERR_OR_ZERO(data->wrap_accumulate);
+}
+
+static int amd_energy_remove(struct platform_device *pdev)
+{
+ struct amd_energy_data *data = dev_get_drvdata(&pdev->dev);
+
+ if (data && data->wrap_accumulate)
+ kthread_stop(data->wrap_accumulate);
+
+ return 0;
+}
+
+static const struct platform_device_id amd_energy_ids[] = {
+ { .name = DRVNAME, },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, amd_energy_ids);
+
+static struct platform_driver amd_energy_driver = {
+ .probe = amd_energy_probe,
+ .remove = amd_energy_remove,
+ .id_table = amd_energy_ids,
+ .driver = {
+ .name = DRVNAME,
+ },
+};
+
+static struct platform_device *amd_energy_platdev;
+
+static const struct x86_cpu_id cpu_ids[] __initconst = {
+ X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, cpu_ids);
+
+static int __init amd_energy_init(void)
+{
+ int ret;
+
+ if (!x86_match_cpu(cpu_ids))
+ return -ENODEV;
+
+ ret = platform_driver_register(&amd_energy_driver);
+ if (ret)
+ return ret;
+
+ amd_energy_platdev = platform_device_alloc(DRVNAME, 0);
+ if (!amd_energy_platdev) {
+ platform_driver_unregister(&amd_energy_driver);
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add(amd_energy_platdev);
+ if (ret) {
+ platform_device_put(amd_energy_platdev);
+ platform_driver_unregister(&amd_energy_driver);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit amd_energy_exit(void)
+{
+ platform_device_unregister(amd_energy_platdev);
+ platform_driver_unregister(&amd_energy_driver);
+}
+
+module_init(amd_energy_init);
+module_exit(amd_energy_exit);
+
+MODULE_DESCRIPTION("Driver for AMD Energy reporting from RAPL MSR via HWMON interface");
+MODULE_AUTHOR("Naveen Krishna Chatradhi <nchatrad@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index ec93b8d673f5..316618409315 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -156,14 +156,19 @@ static struct workqueue_struct *applesmc_led_wq;
*/
static int wait_read(void)
{
+ unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
u8 status;
int us;
+
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
- udelay(us);
+ usleep_range(us, us * 16);
status = inb(APPLESMC_CMD_PORT);
/* read: wait for smc to settle */
if (status & 0x01)
return 0;
+ /* timeout: give up */
+ if (time_after(jiffies, end))
+ break;
}
pr_warn("wait_read() fail: 0x%02x\n", status);
@@ -178,10 +183,11 @@ static int send_byte(u8 cmd, u16 port)
{
u8 status;
int us;
+ unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
outb(cmd, port);
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
- udelay(us);
+ usleep_range(us, us * 16);
status = inb(APPLESMC_CMD_PORT);
/* write: wait for smc to settle */
if (status & 0x02)
@@ -190,7 +196,7 @@ static int send_byte(u8 cmd, u16 port)
if (status & 0x04)
return 0;
/* timeout: give up */
- if (us << 1 == APPLESMC_MAX_WAIT)
+ if (time_after(jiffies, end))
break;
/* busy: long wait and resend */
udelay(APPLESMC_RETRY_WAIT);
diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
new file mode 100644
index 000000000000..1a9772fb1f73
--- /dev/null
+++ b/drivers/hwmon/bt1-pvt.c
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 Process, Voltage, Temperature sensor driver
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seqlock.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include "bt1-pvt.h"
+
+/*
+ * For the sake of the code simplification we created the sensors info table
+ * with the sensor names, activation modes, threshold registers base address
+ * and the thresholds bit fields.
+ */
+static const struct pvt_sensor_info pvt_info[] = {
+ PVT_SENSOR_INFO(0, "CPU Core Temperature", hwmon_temp, TEMP, TTHRES),
+ PVT_SENSOR_INFO(0, "CPU Core Voltage", hwmon_in, VOLT, VTHRES),
+ PVT_SENSOR_INFO(1, "CPU Core Low-Vt", hwmon_in, LVT, LTHRES),
+ PVT_SENSOR_INFO(2, "CPU Core High-Vt", hwmon_in, HVT, HTHRES),
+ PVT_SENSOR_INFO(3, "CPU Core Standard-Vt", hwmon_in, SVT, STHRES),
+};
+
+/*
+ * The original translation formulae of the temperature (in degrees of Celsius)
+ * to PVT data and vice-versa are following:
+ * N = 1.8322e-8*(T^4) + 2.343e-5*(T^3) + 8.7018e-3*(T^2) + 3.9269*(T^1) +
+ * 1.7204e2,
+ * T = -1.6743e-11*(N^4) + 8.1542e-8*(N^3) + -1.8201e-4*(N^2) +
+ * 3.1020e-1*(N^1) - 4.838e1,
+ * where T = [-48.380, 147.438]C and N = [0, 1023].
+ * They must be accordingly altered to be suitable for the integer arithmetics.
+ * The technique is called 'factor redistribution', which just makes sure the
+ * multiplications and divisions are made so to have a result of the operations
+ * within the integer numbers limit. In addition we need to translate the
+ * formulae to accept millidegrees of Celsius. Here what they look like after
+ * the alterations:
+ * N = (18322e-20*(T^4) + 2343e-13*(T^3) + 87018e-9*(T^2) + 39269e-3*T +
+ * 17204e2) / 1e4,
+ * T = -16743e-12*(D^4) + 81542e-9*(D^3) - 182010e-6*(D^2) + 310200e-3*D -
+ * 48380,
+ * where T = [-48380, 147438] mC and N = [0, 1023].
+ */
+static const struct pvt_poly poly_temp_to_N = {
+ .total_divider = 10000,
+ .terms = {
+ {4, 18322, 10000, 10000},
+ {3, 2343, 10000, 10},
+ {2, 87018, 10000, 10},
+ {1, 39269, 1000, 1},
+ {0, 1720400, 1, 1}
+ }
+};
+
+static const struct pvt_poly poly_N_to_temp = {
+ .total_divider = 1,
+ .terms = {
+ {4, -16743, 1000, 1},
+ {3, 81542, 1000, 1},
+ {2, -182010, 1000, 1},
+ {1, 310200, 1000, 1},
+ {0, -48380, 1, 1}
+ }
+};
+
+/*
+ * Similar alterations are performed for the voltage conversion equations.
+ * The original formulae are:
+ * N = 1.8658e3*V - 1.1572e3,
+ * V = (N + 1.1572e3) / 1.8658e3,
+ * where V = [0.620, 1.168] V and N = [0, 1023].
+ * After the optimization they looks as follows:
+ * N = (18658e-3*V - 11572) / 10,
+ * V = N * 10^5 / 18658 + 11572 * 10^4 / 18658.
+ */
+static const struct pvt_poly poly_volt_to_N = {
+ .total_divider = 10,
+ .terms = {
+ {1, 18658, 1000, 1},
+ {0, -11572, 1, 1}
+ }
+};
+
+static const struct pvt_poly poly_N_to_volt = {
+ .total_divider = 10,
+ .terms = {
+ {1, 100000, 18658, 1},
+ {0, 115720000, 1, 18658}
+ }
+};
+
+/*
+ * Here is the polynomial calculation function, which performs the
+ * redistributed terms calculations. It's pretty straightforward. We walk
+ * over each degree term up to the free one, and perform the redistributed
+ * multiplication of the term coefficient, its divider (as for the rationale
+ * fraction representation), data power and the rational fraction divider
+ * leftover. Then all of this is collected in a total sum variable, which
+ * value is normalized by the total divider before being returned.
+ */
+static long pvt_calc_poly(const struct pvt_poly *poly, long data)
+{
+ const struct pvt_poly_term *term = poly->terms;
+ long tmp, ret = 0;
+ int deg;
+
+ do {
+ tmp = term->coef;
+ for (deg = 0; deg < term->deg; ++deg)
+ tmp = mult_frac(tmp, data, term->divider);
+ ret += tmp / term->divider_leftover;
+ } while ((term++)->deg);
+
+ return ret / poly->total_divider;
+}
+
+static inline u32 pvt_update(void __iomem *reg, u32 mask, u32 data)
+{
+ u32 old;
+
+ old = readl_relaxed(reg);
+ writel((old & ~mask) | (data & mask), reg);
+
+ return old & mask;
+}
+
+/*
+ * Baikal-T1 PVT mode can be updated only when the controller is disabled.
+ * So first we disable it, then set the new mode together with the controller
+ * getting back enabled. The same concerns the temperature trim and
+ * measurements timeout. If it is necessary the interface mutex is supposed
+ * to be locked at the time the operations are performed.
+ */
+static inline void pvt_set_mode(struct pvt_hwmon *pvt, u32 mode)
+{
+ u32 old;
+
+ mode = FIELD_PREP(PVT_CTRL_MODE_MASK, mode);
+
+ old = pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_MODE_MASK | PVT_CTRL_EN,
+ mode | old);
+}
+
+static inline u32 pvt_calc_trim(long temp)
+{
+ temp = clamp_val(temp, 0, PVT_TRIM_TEMP);
+
+ return DIV_ROUND_UP(temp, PVT_TRIM_STEP);
+}
+
+static inline void pvt_set_trim(struct pvt_hwmon *pvt, u32 trim)
+{
+ u32 old;
+
+ trim = FIELD_PREP(PVT_CTRL_TRIM_MASK, trim);
+
+ old = pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_TRIM_MASK | PVT_CTRL_EN,
+ trim | old);
+}
+
+static inline void pvt_set_tout(struct pvt_hwmon *pvt, u32 tout)
+{
+ u32 old;
+
+ old = pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ writel(tout, pvt->regs + PVT_TTIMEOUT);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, old);
+}
+
+/*
+ * This driver can optionally provide the hwmon alarms for each sensor the PVT
+ * controller supports. The alarms functionality is made compile-time
+ * configurable due to the hardware interface implementation peculiarity
+ * described further in this comment. So in case if alarms are unnecessary in
+ * your system design it's recommended to have them disabled to prevent the PVT
+ * IRQs being periodically raised to get the data cache/alarms status up to
+ * date.
+ *
+ * Baikal-T1 PVT embedded controller is based on the Analog Bits PVT sensor,
+ * but is equipped with a dedicated control wrapper. It exposes the PVT
+ * sub-block registers space via the APB3 bus. In addition the wrapper provides
+ * a common interrupt vector of the sensors conversion completion events and
+ * threshold value alarms. Alas the wrapper interface hasn't been fully thought
+ * through. There is only one sensor can be activated at a time, for which the
+ * thresholds comparator is enabled right after the data conversion is
+ * completed. Due to this if alarms need to be implemented for all available
+ * sensors we can't just set the thresholds and enable the interrupts. We need
+ * to enable the sensors one after another and let the controller to detect
+ * the alarms by itself at each conversion. This also makes pointless to handle
+ * the alarms interrupts, since in occasion they happen synchronously with
+ * data conversion completion. The best driver design would be to have the
+ * completion interrupts enabled only and keep the converted value in the
+ * driver data cache. This solution is implemented if hwmon alarms are enabled
+ * in this driver. In case if the alarms are disabled, the conversion is
+ * performed on demand at the time a sensors input file is read.
+ */
+
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+
+#define pvt_hard_isr NULL
+
+static irqreturn_t pvt_soft_isr(int irq, void *data)
+{
+ const struct pvt_sensor_info *info;
+ struct pvt_hwmon *pvt = data;
+ struct pvt_cache *cache;
+ u32 val, thres_sts, old;
+
+ /*
+ * DVALID bit will be cleared by reading the data. We need to save the
+ * status before the next conversion happens. Threshold events will be
+ * handled a bit later.
+ */
+ thres_sts = readl(pvt->regs + PVT_RAW_INTR_STAT);
+
+ /*
+ * Then lets recharge the PVT interface with the next sampling mode.
+ * Lock the interface mutex to serialize trim, timeouts and alarm
+ * thresholds settings.
+ */
+ cache = &pvt->cache[pvt->sensor];
+ info = &pvt_info[pvt->sensor];
+ pvt->sensor = (pvt->sensor == PVT_SENSOR_LAST) ?
+ PVT_SENSOR_FIRST : (pvt->sensor + 1);
+
+ /*
+ * For some reason we have to mask the interrupt before changing the
+ * mode, otherwise sometimes the temperature mode doesn't get
+ * activated even though the actual mode in the ctrl register
+ * corresponds to one. Then we read the data. By doing so we also
+ * recharge the data conversion. After this the mode corresponding
+ * to the next sensor in the row is set. Finally we enable the
+ * interrupts back.
+ */
+ mutex_lock(&pvt->iface_mtx);
+
+ old = pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
+ PVT_INTR_DVALID);
+
+ val = readl(pvt->regs + PVT_DATA);
+
+ pvt_set_mode(pvt, pvt_info[pvt->sensor].mode);
+
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, old);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ /*
+ * We can now update the data cache with data just retrieved from the
+ * sensor. Lock write-seqlock to make sure the reader has a coherent
+ * data.
+ */
+ write_seqlock(&cache->data_seqlock);
+
+ cache->data = FIELD_GET(PVT_DATA_DATA_MASK, val);
+
+ write_sequnlock(&cache->data_seqlock);
+
+ /*
+ * While PVT core is doing the next mode data conversion, we'll check
+ * whether the alarms were triggered for the current sensor. Note that
+ * according to the documentation only one threshold IRQ status can be
+ * set at a time, that's why if-else statement is utilized.
+ */
+ if ((thres_sts & info->thres_sts_lo) ^ cache->thres_sts_lo) {
+ WRITE_ONCE(cache->thres_sts_lo, thres_sts & info->thres_sts_lo);
+ hwmon_notify_event(pvt->hwmon, info->type, info->attr_min_alarm,
+ info->channel);
+ } else if ((thres_sts & info->thres_sts_hi) ^ cache->thres_sts_hi) {
+ WRITE_ONCE(cache->thres_sts_hi, thres_sts & info->thres_sts_hi);
+ hwmon_notify_event(pvt->hwmon, info->type, info->attr_max_alarm,
+ info->channel);
+ }
+
+ return IRQ_HANDLED;
+}
+
+inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+{
+ return 0644;
+}
+
+inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+{
+ return 0444;
+}
+
+static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ long *val)
+{
+ struct pvt_cache *cache = &pvt->cache[type];
+ unsigned int seq;
+ u32 data;
+
+ do {
+ seq = read_seqbegin(&cache->data_seqlock);
+ data = cache->data;
+ } while (read_seqretry(&cache->data_seqlock, seq));
+
+ if (type == PVT_TEMP)
+ *val = pvt_calc_poly(&poly_N_to_temp, data);
+ else
+ *val = pvt_calc_poly(&poly_N_to_volt, data);
+
+ return 0;
+}
+
+static int pvt_read_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long *val)
+{
+ u32 data;
+
+ /* No need in serialization, since it is just read from MMIO. */
+ data = readl(pvt->regs + pvt_info[type].thres_base);
+
+ if (is_low)
+ data = FIELD_GET(PVT_THRES_LO_MASK, data);
+ else
+ data = FIELD_GET(PVT_THRES_HI_MASK, data);
+
+ if (type == PVT_TEMP)
+ *val = pvt_calc_poly(&poly_N_to_temp, data);
+ else
+ *val = pvt_calc_poly(&poly_N_to_volt, data);
+
+ return 0;
+}
+
+static int pvt_write_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long val)
+{
+ u32 data, limit, mask;
+ int ret;
+
+ if (type == PVT_TEMP) {
+ val = clamp(val, PVT_TEMP_MIN, PVT_TEMP_MAX);
+ data = pvt_calc_poly(&poly_temp_to_N, val);
+ } else {
+ val = clamp(val, PVT_VOLT_MIN, PVT_VOLT_MAX);
+ data = pvt_calc_poly(&poly_volt_to_N, val);
+ }
+
+ /* Serialize limit update, since a part of the register is changed. */
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
+ if (ret)
+ return ret;
+
+ /* Make sure the upper and lower ranges don't intersect. */
+ limit = readl(pvt->regs + pvt_info[type].thres_base);
+ if (is_low) {
+ limit = FIELD_GET(PVT_THRES_HI_MASK, limit);
+ data = clamp_val(data, PVT_DATA_MIN, limit);
+ data = FIELD_PREP(PVT_THRES_LO_MASK, data);
+ mask = PVT_THRES_LO_MASK;
+ } else {
+ limit = FIELD_GET(PVT_THRES_LO_MASK, limit);
+ data = clamp_val(data, limit, PVT_DATA_MAX);
+ data = FIELD_PREP(PVT_THRES_HI_MASK, data);
+ mask = PVT_THRES_HI_MASK;
+ }
+
+ pvt_update(pvt->regs + pvt_info[type].thres_base, mask, data);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ return 0;
+}
+
+static int pvt_read_alarm(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long *val)
+{
+ if (is_low)
+ *val = !!READ_ONCE(pvt->cache[type].thres_sts_lo);
+ else
+ *val = !!READ_ONCE(pvt->cache[type].thres_sts_hi);
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *pvt_channel_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_TYPE | HWMON_T_LABEL |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_OFFSET),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL |
+ HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX | HWMON_I_MAX_ALARM,
+ HWMON_I_INPUT | HWMON_I_LABEL |
+ HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX | HWMON_I_MAX_ALARM,
+ HWMON_I_INPUT | HWMON_I_LABEL |
+ HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX | HWMON_I_MAX_ALARM,
+ HWMON_I_INPUT | HWMON_I_LABEL |
+ HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX | HWMON_I_MAX_ALARM),
+ NULL
+};
+
+#else /* !CONFIG_SENSORS_BT1_PVT_ALARMS */
+
+static irqreturn_t pvt_hard_isr(int irq, void *data)
+{
+ struct pvt_hwmon *pvt = data;
+ struct pvt_cache *cache;
+ u32 val;
+
+ /*
+ * Mask the DVALID interrupt so after exiting from the handler a
+ * repeated conversion wouldn't happen.
+ */
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
+ PVT_INTR_DVALID);
+
+ /*
+ * Nothing special for alarm-less driver. Just read the data, update
+ * the cache and notify a waiter of this event.
+ */
+ val = readl(pvt->regs + PVT_DATA);
+ if (!(val & PVT_DATA_VALID)) {
+ dev_err(pvt->dev, "Got IRQ when data isn't valid\n");
+ return IRQ_HANDLED;
+ }
+
+ cache = &pvt->cache[pvt->sensor];
+
+ WRITE_ONCE(cache->data, FIELD_GET(PVT_DATA_DATA_MASK, val));
+
+ complete(&cache->conversion);
+
+ return IRQ_HANDLED;
+}
+
+#define pvt_soft_isr NULL
+
+inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+{
+ return 0;
+}
+
+inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+{
+ return 0;
+}
+
+static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ long *val)
+{
+ struct pvt_cache *cache = &pvt->cache[type];
+ u32 data;
+ int ret;
+
+ /*
+ * Lock PVT conversion interface until data cache is updated. The
+ * data read procedure is following: set the requested PVT sensor
+ * mode, enable IRQ and conversion, wait until conversion is finished,
+ * then disable conversion and IRQ, and read the cached data.
+ */
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
+ if (ret)
+ return ret;
+
+ pvt->sensor = type;
+ pvt_set_mode(pvt, pvt_info[type].mode);
+
+ /*
+ * Unmask the DVALID interrupt and enable the sensors conversions.
+ * Do the reverse procedure when conversion is done.
+ */
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
+
+ wait_for_completion(&cache->conversion);
+
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
+ PVT_INTR_DVALID);
+
+ data = READ_ONCE(cache->data);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ if (type == PVT_TEMP)
+ *val = pvt_calc_poly(&poly_N_to_temp, data);
+ else
+ *val = pvt_calc_poly(&poly_N_to_volt, data);
+
+ return 0;
+}
+
+static int pvt_read_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static int pvt_write_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long val)
+{
+ return -EOPNOTSUPP;
+}
+
+static int pvt_read_alarm(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_channel_info *pvt_channel_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_TYPE | HWMON_T_LABEL |
+ HWMON_T_OFFSET),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ NULL
+};
+
+#endif /* !CONFIG_SENSORS_BT1_PVT_ALARMS */
+
+static inline bool pvt_hwmon_channel_is_valid(enum hwmon_sensor_types type,
+ int ch)
+{
+ switch (type) {
+ case hwmon_temp:
+ if (ch < 0 || ch >= PVT_TEMP_CHS)
+ return false;
+ break;
+ case hwmon_in:
+ if (ch < 0 || ch >= PVT_VOLT_CHS)
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ /* The rest of the types are independent from the channel number. */
+ return true;
+}
+
+static umode_t pvt_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int ch)
+{
+ if (!pvt_hwmon_channel_is_valid(type, ch))
+ return 0;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_type:
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ return pvt_limit_is_visible(ch);
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ return pvt_alarm_is_visible(ch);
+ case hwmon_temp_offset:
+ return 0644;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_in_min:
+ case hwmon_in_max:
+ return pvt_limit_is_visible(PVT_VOLT + ch);
+ case hwmon_in_min_alarm:
+ case hwmon_in_max_alarm:
+ return pvt_alarm_is_visible(PVT_VOLT + ch);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int pvt_read_trim(struct pvt_hwmon *pvt, long *val)
+{
+ u32 data;
+
+ data = readl(pvt->regs + PVT_CTRL);
+ *val = FIELD_GET(PVT_CTRL_TRIM_MASK, data) * PVT_TRIM_STEP;
+
+ return 0;
+}
+
+static int pvt_write_trim(struct pvt_hwmon *pvt, long val)
+{
+ u32 trim;
+ int ret;
+
+ /*
+ * Serialize trim update, since a part of the register is changed and
+ * the controller is supposed to be disabled during this operation.
+ */
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
+ if (ret)
+ return ret;
+
+ trim = pvt_calc_trim(val);
+ pvt_set_trim(pvt, trim);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ return 0;
+}
+
+static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
+{
+ unsigned long rate;
+ ktime_t kt;
+ u32 data;
+
+ rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
+ if (!rate)
+ return -ENODEV;
+
+ /*
+ * Don't bother with mutex here, since we just read data from MMIO.
+ * We also have to scale the ticks timeout up to compensate the
+ * ms-ns-data translations.
+ */
+ data = readl(pvt->regs + PVT_TTIMEOUT) + 1;
+
+ /*
+ * Calculate ref-clock based delay (Ttotal) between two consecutive
+ * data samples of the same sensor. So we first must calculate the
+ * delay introduced by the internal ref-clock timer (Tref * Fclk).
+ * Then add the constant timeout cuased by each conversion latency
+ * (Tmin). The basic formulae for each conversion is following:
+ * Ttotal = Tref * Fclk + Tmin
+ * Note if alarms are enabled the sensors are polled one after
+ * another, so in order to have the delay being applicable for each
+ * sensor the requested value must be equally redistirbuted.
+ */
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ kt = ktime_set(PVT_SENSORS_NUM * (u64)data, 0);
+ kt = ktime_divns(kt, rate);
+ kt = ktime_add_ns(kt, PVT_SENSORS_NUM * PVT_TOUT_MIN);
+#else
+ kt = ktime_set(data, 0);
+ kt = ktime_divns(kt, rate);
+ kt = ktime_add_ns(kt, PVT_TOUT_MIN);
+#endif
+
+ /* Return the result in msec as hwmon sysfs interface requires. */
+ *val = ktime_to_ms(kt);
+
+ return 0;
+}
+
+static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
+{
+ unsigned long rate;
+ ktime_t kt;
+ u32 data;
+ int ret;
+
+ rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
+ if (!rate)
+ return -ENODEV;
+
+ /*
+ * If alarms are enabled, the requested timeout must be divided
+ * between all available sensors to have the requested delay
+ * applicable to each individual sensor.
+ */
+ kt = ms_to_ktime(val);
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ kt = ktime_divns(kt, PVT_SENSORS_NUM);
+#endif
+
+ /*
+ * Subtract a constant lag, which always persists due to the limited
+ * PVT sampling rate. Make sure the timeout is not negative.
+ */
+ kt = ktime_sub_ns(kt, PVT_TOUT_MIN);
+ if (ktime_to_ns(kt) < 0)
+ kt = ktime_set(0, 0);
+
+ /*
+ * Finally recalculate the timeout in terms of the reference clock
+ * period.
+ */
+ data = ktime_divns(kt * rate, NSEC_PER_SEC);
+
+ /*
+ * Update the measurements delay, but lock the interface first, since
+ * we have to disable PVT in order to have the new delay actually
+ * updated.
+ */
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
+ if (ret)
+ return ret;
+
+ pvt_set_tout(pvt, data);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ return 0;
+}
+
+static int pvt_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int ch, long *val)
+{
+ struct pvt_hwmon *pvt = dev_get_drvdata(dev);
+
+ if (!pvt_hwmon_channel_is_valid(type, ch))
+ return -EINVAL;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return pvt_read_timeout(pvt, val);
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return pvt_read_data(pvt, ch, val);
+ case hwmon_temp_type:
+ *val = 1;
+ return 0;
+ case hwmon_temp_min:
+ return pvt_read_limit(pvt, ch, true, val);
+ case hwmon_temp_max:
+ return pvt_read_limit(pvt, ch, false, val);
+ case hwmon_temp_min_alarm:
+ return pvt_read_alarm(pvt, ch, true, val);
+ case hwmon_temp_max_alarm:
+ return pvt_read_alarm(pvt, ch, false, val);
+ case hwmon_temp_offset:
+ return pvt_read_trim(pvt, val);
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return pvt_read_data(pvt, PVT_VOLT + ch, val);
+ case hwmon_in_min:
+ return pvt_read_limit(pvt, PVT_VOLT + ch, true, val);
+ case hwmon_in_max:
+ return pvt_read_limit(pvt, PVT_VOLT + ch, false, val);
+ case hwmon_in_min_alarm:
+ return pvt_read_alarm(pvt, PVT_VOLT + ch, true, val);
+ case hwmon_in_max_alarm:
+ return pvt_read_alarm(pvt, PVT_VOLT + ch, false, val);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int pvt_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int ch, const char **str)
+{
+ if (!pvt_hwmon_channel_is_valid(type, ch))
+ return -EINVAL;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = pvt_info[ch].label;
+ return 0;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = pvt_info[PVT_VOLT + ch].label;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int pvt_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int ch, long val)
+{
+ struct pvt_hwmon *pvt = dev_get_drvdata(dev);
+
+ if (!pvt_hwmon_channel_is_valid(type, ch))
+ return -EINVAL;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return pvt_write_timeout(pvt, val);
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_min:
+ return pvt_write_limit(pvt, ch, true, val);
+ case hwmon_temp_max:
+ return pvt_write_limit(pvt, ch, false, val);
+ case hwmon_temp_offset:
+ return pvt_write_trim(pvt, val);
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_min:
+ return pvt_write_limit(pvt, PVT_VOLT + ch, true, val);
+ case hwmon_in_max:
+ return pvt_write_limit(pvt, PVT_VOLT + ch, false, val);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops pvt_hwmon_ops = {
+ .is_visible = pvt_hwmon_is_visible,
+ .read = pvt_hwmon_read,
+ .read_string = pvt_hwmon_read_string,
+ .write = pvt_hwmon_write
+};
+
+static const struct hwmon_chip_info pvt_hwmon_info = {
+ .ops = &pvt_hwmon_ops,
+ .info = pvt_channel_info
+};
+
+static void pvt_clear_data(void *data)
+{
+ struct pvt_hwmon *pvt = data;
+#if !defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ int idx;
+
+ for (idx = 0; idx < PVT_SENSORS_NUM; ++idx)
+ complete_all(&pvt->cache[idx].conversion);
+#endif
+
+ mutex_destroy(&pvt->iface_mtx);
+}
+
+static struct pvt_hwmon *pvt_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pvt_hwmon *pvt;
+ int ret, idx;
+
+ pvt = devm_kzalloc(dev, sizeof(*pvt), GFP_KERNEL);
+ if (!pvt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, pvt_clear_data, pvt);
+ if (ret) {
+ dev_err(dev, "Can't add PVT data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ pvt->dev = dev;
+ pvt->sensor = PVT_SENSOR_FIRST;
+ mutex_init(&pvt->iface_mtx);
+
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ for (idx = 0; idx < PVT_SENSORS_NUM; ++idx)
+ seqlock_init(&pvt->cache[idx].data_seqlock);
+#else
+ for (idx = 0; idx < PVT_SENSORS_NUM; ++idx)
+ init_completion(&pvt->cache[idx].conversion);
+#endif
+
+ return pvt;
+}
+
+static int pvt_request_regs(struct pvt_hwmon *pvt)
+{
+ struct platform_device *pdev = to_platform_device(pvt->dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(pvt->dev, "Couldn't find PVT memresource\n");
+ return -EINVAL;
+ }
+
+ pvt->regs = devm_ioremap_resource(pvt->dev, res);
+ if (IS_ERR(pvt->regs)) {
+ dev_err(pvt->dev, "Couldn't map PVT registers\n");
+ return PTR_ERR(pvt->regs);
+ }
+
+ return 0;
+}
+
+static void pvt_disable_clks(void *data)
+{
+ struct pvt_hwmon *pvt = data;
+
+ clk_bulk_disable_unprepare(PVT_CLOCK_NUM, pvt->clks);
+}
+
+static int pvt_request_clks(struct pvt_hwmon *pvt)
+{
+ int ret;
+
+ pvt->clks[PVT_CLOCK_APB].id = "pclk";
+ pvt->clks[PVT_CLOCK_REF].id = "ref";
+
+ ret = devm_clk_bulk_get(pvt->dev, PVT_CLOCK_NUM, pvt->clks);
+ if (ret) {
+ dev_err(pvt->dev, "Couldn't get PVT clocks descriptors\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(PVT_CLOCK_NUM, pvt->clks);
+ if (ret) {
+ dev_err(pvt->dev, "Couldn't enable the PVT clocks\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(pvt->dev, pvt_disable_clks, pvt);
+ if (ret) {
+ dev_err(pvt->dev, "Can't add PVT clocks disable action\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pvt_init_iface(struct pvt_hwmon *pvt)
+{
+ u32 trim, temp;
+
+ /*
+ * Make sure all interrupts and controller are disabled so not to
+ * accidentally have ISR executed before the driver data is fully
+ * initialized. Clear the IRQ status as well.
+ */
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_ALL, PVT_INTR_ALL);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ readl(pvt->regs + PVT_CLR_INTR);
+ readl(pvt->regs + PVT_DATA);
+
+ /* Setup default sensor mode, timeout and temperature trim. */
+ pvt_set_mode(pvt, pvt_info[pvt->sensor].mode);
+ pvt_set_tout(pvt, PVT_TOUT_DEF);
+
+ trim = PVT_TRIM_DEF;
+ if (!of_property_read_u32(pvt->dev->of_node,
+ "baikal,pvt-temp-offset-millicelsius", &temp))
+ trim = pvt_calc_trim(temp);
+
+ pvt_set_trim(pvt, trim);
+}
+
+static int pvt_request_irq(struct pvt_hwmon *pvt)
+{
+ struct platform_device *pdev = to_platform_device(pvt->dev);
+ int ret;
+
+ pvt->irq = platform_get_irq(pdev, 0);
+ if (pvt->irq < 0)
+ return pvt->irq;
+
+ ret = devm_request_threaded_irq(pvt->dev, pvt->irq,
+ pvt_hard_isr, pvt_soft_isr,
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ IRQF_SHARED | IRQF_TRIGGER_HIGH |
+ IRQF_ONESHOT,
+#else
+ IRQF_SHARED | IRQF_TRIGGER_HIGH,
+#endif
+ "pvt", pvt);
+ if (ret) {
+ dev_err(pvt->dev, "Couldn't request PVT IRQ\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pvt_create_hwmon(struct pvt_hwmon *pvt)
+{
+ pvt->hwmon = devm_hwmon_device_register_with_info(pvt->dev, "pvt", pvt,
+ &pvt_hwmon_info, NULL);
+ if (IS_ERR(pvt->hwmon)) {
+ dev_err(pvt->dev, "Couldn't create hwmon device\n");
+ return PTR_ERR(pvt->hwmon);
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+
+static void pvt_disable_iface(void *data)
+{
+ struct pvt_hwmon *pvt = data;
+
+ mutex_lock(&pvt->iface_mtx);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
+ PVT_INTR_DVALID);
+ mutex_unlock(&pvt->iface_mtx);
+}
+
+static int pvt_enable_iface(struct pvt_hwmon *pvt)
+{
+ int ret;
+
+ ret = devm_add_action(pvt->dev, pvt_disable_iface, pvt);
+ if (ret) {
+ dev_err(pvt->dev, "Can't add PVT disable interface action\n");
+ return ret;
+ }
+
+ /*
+ * Enable sensors data conversion and IRQ. We need to lock the
+ * interface mutex since hwmon has just been created and the
+ * corresponding sysfs files are accessible from user-space,
+ * which theoretically may cause races.
+ */
+ mutex_lock(&pvt->iface_mtx);
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
+ mutex_unlock(&pvt->iface_mtx);
+
+ return 0;
+}
+
+#else /* !CONFIG_SENSORS_BT1_PVT_ALARMS */
+
+static int pvt_enable_iface(struct pvt_hwmon *pvt)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_SENSORS_BT1_PVT_ALARMS */
+
+static int pvt_probe(struct platform_device *pdev)
+{
+ struct pvt_hwmon *pvt;
+ int ret;
+
+ pvt = pvt_create_data(pdev);
+ if (IS_ERR(pvt))
+ return PTR_ERR(pvt);
+
+ ret = pvt_request_regs(pvt);
+ if (ret)
+ return ret;
+
+ ret = pvt_request_clks(pvt);
+ if (ret)
+ return ret;
+
+ pvt_init_iface(pvt);
+
+ ret = pvt_request_irq(pvt);
+ if (ret)
+ return ret;
+
+ ret = pvt_create_hwmon(pvt);
+ if (ret)
+ return ret;
+
+ ret = pvt_enable_iface(pvt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id pvt_of_match[] = {
+ { .compatible = "baikal,bt1-pvt" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pvt_of_match);
+
+static struct platform_driver pvt_driver = {
+ .probe = pvt_probe,
+ .driver = {
+ .name = "bt1-pvt",
+ .of_match_table = pvt_of_match
+ }
+};
+module_platform_driver(pvt_driver);
+
+MODULE_AUTHOR("Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 PVT driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/bt1-pvt.h b/drivers/hwmon/bt1-pvt.h
new file mode 100644
index 000000000000..5eac73e94885
--- /dev/null
+++ b/drivers/hwmon/bt1-pvt.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 Process, Voltage, Temperature sensor driver
+ */
+#ifndef __HWMON_BT1_PVT_H__
+#define __HWMON_BT1_PVT_H__
+
+#include <linux/completion.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/seqlock.h>
+
+/* Baikal-T1 PVT registers and their bitfields */
+#define PVT_CTRL 0x00
+#define PVT_CTRL_EN BIT(0)
+#define PVT_CTRL_MODE_FLD 1
+#define PVT_CTRL_MODE_MASK GENMASK(3, PVT_CTRL_MODE_FLD)
+#define PVT_CTRL_MODE_TEMP 0x0
+#define PVT_CTRL_MODE_VOLT 0x1
+#define PVT_CTRL_MODE_LVT 0x2
+#define PVT_CTRL_MODE_HVT 0x4
+#define PVT_CTRL_MODE_SVT 0x6
+#define PVT_CTRL_TRIM_FLD 4
+#define PVT_CTRL_TRIM_MASK GENMASK(8, PVT_CTRL_TRIM_FLD)
+#define PVT_DATA 0x04
+#define PVT_DATA_VALID BIT(10)
+#define PVT_DATA_DATA_FLD 0
+#define PVT_DATA_DATA_MASK GENMASK(9, PVT_DATA_DATA_FLD)
+#define PVT_TTHRES 0x08
+#define PVT_VTHRES 0x0C
+#define PVT_LTHRES 0x10
+#define PVT_HTHRES 0x14
+#define PVT_STHRES 0x18
+#define PVT_THRES_LO_FLD 0
+#define PVT_THRES_LO_MASK GENMASK(9, PVT_THRES_LO_FLD)
+#define PVT_THRES_HI_FLD 10
+#define PVT_THRES_HI_MASK GENMASK(19, PVT_THRES_HI_FLD)
+#define PVT_TTIMEOUT 0x1C
+#define PVT_INTR_STAT 0x20
+#define PVT_INTR_MASK 0x24
+#define PVT_RAW_INTR_STAT 0x28
+#define PVT_INTR_DVALID BIT(0)
+#define PVT_INTR_TTHRES_LO BIT(1)
+#define PVT_INTR_TTHRES_HI BIT(2)
+#define PVT_INTR_VTHRES_LO BIT(3)
+#define PVT_INTR_VTHRES_HI BIT(4)
+#define PVT_INTR_LTHRES_LO BIT(5)
+#define PVT_INTR_LTHRES_HI BIT(6)
+#define PVT_INTR_HTHRES_LO BIT(7)
+#define PVT_INTR_HTHRES_HI BIT(8)
+#define PVT_INTR_STHRES_LO BIT(9)
+#define PVT_INTR_STHRES_HI BIT(10)
+#define PVT_INTR_ALL GENMASK(10, 0)
+#define PVT_CLR_INTR 0x2C
+
+/*
+ * PVT sensors-related limits and default values
+ * @PVT_TEMP_MIN: Minimal temperature in millidegrees of Celsius.
+ * @PVT_TEMP_MAX: Maximal temperature in millidegrees of Celsius.
+ * @PVT_TEMP_CHS: Number of temperature hwmon channels.
+ * @PVT_VOLT_MIN: Minimal voltage in mV.
+ * @PVT_VOLT_MAX: Maximal voltage in mV.
+ * @PVT_VOLT_CHS: Number of voltage hwmon channels.
+ * @PVT_DATA_MIN: Minimal PVT raw data value.
+ * @PVT_DATA_MAX: Maximal PVT raw data value.
+ * @PVT_TRIM_MIN: Minimal temperature sensor trim value.
+ * @PVT_TRIM_MAX: Maximal temperature sensor trim value.
+ * @PVT_TRIM_DEF: Default temperature sensor trim value (set a proper value
+ * when one is determined for Baikal-T1 SoC).
+ * @PVT_TRIM_TEMP: Maximum temperature encoded by the trim factor.
+ * @PVT_TRIM_STEP: Temperature stride corresponding to the trim value.
+ * @PVT_TOUT_MIN: Minimal timeout between samples in nanoseconds.
+ * @PVT_TOUT_DEF: Default data measurements timeout. In case if alarms are
+ * activated the PVT IRQ is enabled to be raised after each
+ * conversion in order to have the thresholds checked and the
+ * converted value cached. Too frequent conversions may cause
+ * the system CPU overload. Lets set the 50ms delay between
+ * them by default to prevent this.
+ */
+#define PVT_TEMP_MIN -48380L
+#define PVT_TEMP_MAX 147438L
+#define PVT_TEMP_CHS 1
+#define PVT_VOLT_MIN 620L
+#define PVT_VOLT_MAX 1168L
+#define PVT_VOLT_CHS 4
+#define PVT_DATA_MIN 0
+#define PVT_DATA_MAX (PVT_DATA_DATA_MASK >> PVT_DATA_DATA_FLD)
+#define PVT_TRIM_MIN 0
+#define PVT_TRIM_MAX (PVT_CTRL_TRIM_MASK >> PVT_CTRL_TRIM_FLD)
+#define PVT_TRIM_TEMP 7130
+#define PVT_TRIM_STEP (PVT_TRIM_TEMP / PVT_TRIM_MAX)
+#define PVT_TRIM_DEF 0
+#define PVT_TOUT_MIN (NSEC_PER_SEC / 3000)
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+# define PVT_TOUT_DEF 60000
+#else
+# define PVT_TOUT_DEF 0
+#endif
+
+/*
+ * enum pvt_sensor_type - Baikal-T1 PVT sensor types (correspond to each PVT
+ * sampling mode)
+ * @PVT_SENSOR*: helpers to traverse the sensors in loops.
+ * @PVT_TEMP: PVT Temperature sensor.
+ * @PVT_VOLT: PVT Voltage sensor.
+ * @PVT_LVT: PVT Low-Voltage threshold sensor.
+ * @PVT_HVT: PVT High-Voltage threshold sensor.
+ * @PVT_SVT: PVT Standard-Voltage threshold sensor.
+ */
+enum pvt_sensor_type {
+ PVT_SENSOR_FIRST,
+ PVT_TEMP = PVT_SENSOR_FIRST,
+ PVT_VOLT,
+ PVT_LVT,
+ PVT_HVT,
+ PVT_SVT,
+ PVT_SENSOR_LAST = PVT_SVT,
+ PVT_SENSORS_NUM
+};
+
+/*
+ * enum pvt_clock_type - Baikal-T1 PVT clocks.
+ * @PVT_CLOCK_APB: APB clock.
+ * @PVT_CLOCK_REF: PVT reference clock.
+ */
+enum pvt_clock_type {
+ PVT_CLOCK_APB,
+ PVT_CLOCK_REF,
+ PVT_CLOCK_NUM
+};
+
+/*
+ * struct pvt_sensor_info - Baikal-T1 PVT sensor informational structure
+ * @channel: Sensor channel ID.
+ * @label: hwmon sensor label.
+ * @mode: PVT mode corresponding to the channel.
+ * @thres_base: upper and lower threshold values of the sensor.
+ * @thres_sts_lo: low threshold status bitfield.
+ * @thres_sts_hi: high threshold status bitfield.
+ * @type: Sensor type.
+ * @attr_min_alarm: Min alarm attribute ID.
+ * @attr_min_alarm: Max alarm attribute ID.
+ */
+struct pvt_sensor_info {
+ int channel;
+ const char *label;
+ u32 mode;
+ unsigned long thres_base;
+ u32 thres_sts_lo;
+ u32 thres_sts_hi;
+ enum hwmon_sensor_types type;
+ u32 attr_min_alarm;
+ u32 attr_max_alarm;
+};
+
+#define PVT_SENSOR_INFO(_ch, _label, _type, _mode, _thres) \
+ { \
+ .channel = _ch, \
+ .label = _label, \
+ .mode = PVT_CTRL_MODE_ ##_mode, \
+ .thres_base = PVT_ ##_thres, \
+ .thres_sts_lo = PVT_INTR_ ##_thres## _LO, \
+ .thres_sts_hi = PVT_INTR_ ##_thres## _HI, \
+ .type = _type, \
+ .attr_min_alarm = _type## _min, \
+ .attr_max_alarm = _type## _max, \
+ }
+
+/*
+ * struct pvt_cache - PVT sensors data cache
+ * @data: data cache in raw format.
+ * @thres_sts_lo: low threshold status saved on the previous data conversion.
+ * @thres_sts_hi: high threshold status saved on the previous data conversion.
+ * @data_seqlock: cached data seq-lock.
+ * @conversion: data conversion completion.
+ */
+struct pvt_cache {
+ u32 data;
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ seqlock_t data_seqlock;
+ u32 thres_sts_lo;
+ u32 thres_sts_hi;
+#else
+ struct completion conversion;
+#endif
+};
+
+/*
+ * struct pvt_hwmon - Baikal-T1 PVT private data
+ * @dev: device structure of the PVT platform device.
+ * @hwmon: hwmon device structure.
+ * @regs: pointer to the Baikal-T1 PVT registers region.
+ * @irq: PVT events IRQ number.
+ * @clks: Array of the PVT clocks descriptor (APB/ref clocks).
+ * @ref_clk: Pointer to the reference clocks descriptor.
+ * @iface_mtx: Generic interface mutex (used to lock the alarm registers
+ * when the alarms enabled, or the data conversion interface
+ * if alarms are disabled).
+ * @sensor: current PVT sensor the data conversion is being performed for.
+ * @cache: data cache descriptor.
+ */
+struct pvt_hwmon {
+ struct device *dev;
+ struct device *hwmon;
+
+ void __iomem *regs;
+ int irq;
+
+ struct clk_bulk_data clks[PVT_CLOCK_NUM];
+
+ struct mutex iface_mtx;
+ enum pvt_sensor_type sensor;
+ struct pvt_cache cache[PVT_SENSORS_NUM];
+};
+
+/*
+ * struct pvt_poly_term - a term descriptor of the PVT data translation
+ * polynomial
+ * @deg: degree of the term.
+ * @coef: multiplication factor of the term.
+ * @divider: distributed divider per each degree.
+ * @divider_leftover: divider leftover, which couldn't be redistributed.
+ */
+struct pvt_poly_term {
+ unsigned int deg;
+ long coef;
+ long divider;
+ long divider_leftover;
+};
+
+/*
+ * struct pvt_poly - PVT data translation polynomial descriptor
+ * @total_divider: total data divider.
+ * @terms: polynomial terms up to a free one.
+ */
+struct pvt_poly {
+ long total_divider;
+ struct pvt_poly_term terms[];
+};
+
+#endif /* __HWMON_BT1_PVT_H__ */
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 53b517dbe7e6..4af2fc309c28 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev,
int channel = to_sensor_dev_attr(devattr)->index;
int ret;
- mutex_lock(&hwmon->hwmon_lock);
+ mutex_lock(&hwmon->da9052->auxadc_lock);
ret = __da9052_read_tsi(dev, channel);
- mutex_unlock(&hwmon->hwmon_lock);
+ mutex_unlock(&hwmon->da9052->auxadc_lock);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index ab719d372b0d..16be012a95ed 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1073,13 +1073,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
- .ident = "Dell XPS421",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
- },
- },
- {
.ident = "Dell Studio",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1088,14 +1081,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
.driver_data = (void *)&i8k_config_data[DELL_STUDIO],
},
{
- .ident = "Dell XPS 13",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS13"),
- },
- .driver_data = (void *)&i8k_config_data[DELL_XPS],
- },
- {
.ident = "Dell XPS M140",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1104,17 +1089,10 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
.driver_data = (void *)&i8k_config_data[DELL_XPS],
},
{
- .ident = "Dell XPS 15 9560",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9560"),
- },
- },
- {
- .ident = "Dell XPS 15 9570",
+ .ident = "Dell XPS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS"),
},
},
{ }
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 9179460c2d9d..0d4f3d97ffc6 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -346,7 +346,7 @@ static int drivetemp_identify_sata(struct drivetemp_data *st)
st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
if (!have_sct_data_table)
- goto skip_sct;
+ goto skip_sct_data;
/* Request and read temperature history table */
memset(buf, '\0', sizeof(st->smartdata));
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
new file mode 100644
index 000000000000..2137bc65829d
--- /dev/null
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Gateworks System Controller Hardware Monitor module
+ *
+ * Copyright (C) 2020 Gateworks Corporation
+ */
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mfd/gsc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <linux/platform_data/gsc_hwmon.h>
+
+#define GSC_HWMON_MAX_TEMP_CH 16
+#define GSC_HWMON_MAX_IN_CH 16
+
+#define GSC_HWMON_RESOLUTION 12
+#define GSC_HWMON_VREF 2500
+
+struct gsc_hwmon_data {
+ struct gsc_dev *gsc;
+ struct gsc_hwmon_platform_data *pdata;
+ struct regmap *regmap;
+ const struct gsc_hwmon_channel *temp_ch[GSC_HWMON_MAX_TEMP_CH];
+ const struct gsc_hwmon_channel *in_ch[GSC_HWMON_MAX_IN_CH];
+ u32 temp_config[GSC_HWMON_MAX_TEMP_CH + 1];
+ u32 in_config[GSC_HWMON_MAX_IN_CH + 1];
+ struct hwmon_channel_info temp_info;
+ struct hwmon_channel_info in_info;
+ const struct hwmon_channel_info *info[3];
+ struct hwmon_chip_info chip;
+};
+
+static struct regmap_bus gsc_hwmon_regmap_bus = {
+ .reg_read = gsc_read,
+ .reg_write = gsc_write,
+};
+
+static const struct regmap_config gsc_hwmon_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+};
+
+static ssize_t pwm_auto_point_temp_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ u8 reg = hwmon->pdata->fan_base + (2 * attr->index);
+ u8 regs[2];
+ int ret;
+
+ ret = regmap_bulk_read(hwmon->regmap, reg, regs, 2);
+ if (ret)
+ return ret;
+
+ ret = regs[0] | regs[1] << 8;
+ return sprintf(buf, "%d\n", ret * 10);
+}
+
+static ssize_t pwm_auto_point_temp_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ u8 reg = hwmon->pdata->fan_base + (2 * attr->index);
+ u8 regs[2];
+ long temp;
+ int err;
+
+ if (kstrtol(buf, 10, &temp))
+ return -EINVAL;
+
+ temp = clamp_val(temp, 0, 10000);
+ temp = DIV_ROUND_CLOSEST(temp, 10);
+
+ regs[0] = temp & 0xff;
+ regs[1] = (temp >> 8) & 0xff;
+ err = regmap_bulk_write(hwmon->regmap, reg, regs, 2);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static ssize_t pwm_auto_point_pwm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)) / 100);
+}
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm_auto_point_pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point1_temp, pwm_auto_point_temp, 0);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point2_pwm, pwm_auto_point_pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point2_temp, pwm_auto_point_temp, 1);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point3_pwm, pwm_auto_point_pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point3_temp, pwm_auto_point_temp, 2);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point4_pwm, pwm_auto_point_pwm, 3);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point4_temp, pwm_auto_point_temp, 3);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point5_pwm, pwm_auto_point_pwm, 4);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point5_temp, pwm_auto_point_temp, 4);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point6_pwm, pwm_auto_point_pwm, 5);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point6_temp, pwm_auto_point_temp, 5);
+
+static struct attribute *gsc_hwmon_attributes[] = {
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group gsc_hwmon_group = {
+ .attrs = gsc_hwmon_attributes,
+};
+__ATTRIBUTE_GROUPS(gsc_hwmon);
+
+static int
+gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ const struct gsc_hwmon_channel *ch;
+ int sz, ret;
+ long tmp;
+ u8 buf[3];
+
+ switch (type) {
+ case hwmon_in:
+ ch = hwmon->in_ch[channel];
+ break;
+ case hwmon_temp:
+ ch = hwmon->temp_ch[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ sz = (ch->mode == mode_voltage) ? 3 : 2;
+ ret = regmap_bulk_read(hwmon->regmap, ch->reg, buf, sz);
+ if (ret)
+ return ret;
+
+ tmp = 0;
+ while (sz-- > 0)
+ tmp |= (buf[sz] << (8 * sz));
+
+ switch (ch->mode) {
+ case mode_temperature:
+ if (tmp > 0x8000)
+ tmp -= 0xffff;
+ break;
+ case mode_voltage_raw:
+ tmp = clamp_val(tmp, 0, BIT(GSC_HWMON_RESOLUTION));
+ /* scale based on ref voltage and ADC resolution */
+ tmp *= GSC_HWMON_VREF;
+ tmp >>= GSC_HWMON_RESOLUTION;
+ /* scale based on optional voltage divider */
+ if (ch->vdiv[0] && ch->vdiv[1]) {
+ tmp *= (ch->vdiv[0] + ch->vdiv[1]);
+ tmp /= ch->vdiv[1];
+ }
+ /* adjust by uV offset */
+ tmp += ch->mvoffset;
+ break;
+ case mode_voltage:
+ /* no adjustment needed */
+ break;
+ }
+
+ *val = tmp;
+
+ return 0;
+}
+
+static int
+gsc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **buf)
+{
+ struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_in:
+ *buf = hwmon->in_ch[channel]->name;
+ break;
+ case hwmon_temp:
+ *buf = hwmon->temp_ch[channel]->name;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t
+gsc_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
+ int ch)
+{
+ return 0444;
+}
+
+static const struct hwmon_ops gsc_hwmon_ops = {
+ .is_visible = gsc_hwmon_is_visible,
+ .read = gsc_hwmon_read,
+ .read_string = gsc_hwmon_read_string,
+};
+
+static struct gsc_hwmon_platform_data *
+gsc_hwmon_get_devtree_pdata(struct device *dev)
+{
+ struct gsc_hwmon_platform_data *pdata;
+ struct gsc_hwmon_channel *ch;
+ struct fwnode_handle *child;
+ struct device_node *fan;
+ int nchannels;
+
+ nchannels = device_get_child_node_count(dev);
+ if (nchannels == 0)
+ return ERR_PTR(-ENODEV);
+
+ pdata = devm_kzalloc(dev,
+ sizeof(*pdata) + nchannels * sizeof(*ch),
+ GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+ ch = (struct gsc_hwmon_channel *)(pdata + 1);
+ pdata->channels = ch;
+ pdata->nchannels = nchannels;
+
+ /* fan controller base address */
+ fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan");
+ if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) {
+ dev_err(dev, "fan node without base\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* allocate structures for channels and count instances of each type */
+ device_for_each_child_node(dev, child) {
+ if (fwnode_property_read_string(child, "label", &ch->name)) {
+ dev_err(dev, "channel without label\n");
+ fwnode_handle_put(child);
+ return ERR_PTR(-EINVAL);
+ }
+ if (fwnode_property_read_u32(child, "reg", &ch->reg)) {
+ dev_err(dev, "channel without reg\n");
+ fwnode_handle_put(child);
+ return ERR_PTR(-EINVAL);
+ }
+ if (fwnode_property_read_u32(child, "gw,mode", &ch->mode)) {
+ dev_err(dev, "channel without mode\n");
+ fwnode_handle_put(child);
+ return ERR_PTR(-EINVAL);
+ }
+ if (ch->mode > mode_max) {
+ dev_err(dev, "invalid channel mode\n");
+ fwnode_handle_put(child);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!fwnode_property_read_u32(child,
+ "gw,voltage-offset-microvolt",
+ &ch->mvoffset))
+ ch->mvoffset /= 1000;
+ fwnode_property_read_u32_array(child,
+ "gw,voltage-divider-ohms",
+ ch->vdiv, ARRAY_SIZE(ch->vdiv));
+ ch++;
+ }
+
+ return pdata;
+}
+
+static int gsc_hwmon_probe(struct platform_device *pdev)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct device *hwmon_dev;
+ struct gsc_hwmon_platform_data *pdata = dev_get_platdata(dev);
+ struct gsc_hwmon_data *hwmon;
+ const struct attribute_group **groups;
+ int i, i_in, i_temp;
+
+ if (!pdata) {
+ pdata = gsc_hwmon_get_devtree_pdata(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ }
+
+ hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+ hwmon->gsc = gsc;
+ hwmon->pdata = pdata;
+
+ hwmon->regmap = devm_regmap_init(dev, &gsc_hwmon_regmap_bus,
+ gsc->i2c_hwmon,
+ &gsc_hwmon_regmap_config);
+ if (IS_ERR(hwmon->regmap))
+ return PTR_ERR(hwmon->regmap);
+
+ for (i = 0, i_in = 0, i_temp = 0; i < hwmon->pdata->nchannels; i++) {
+ const struct gsc_hwmon_channel *ch = &pdata->channels[i];
+
+ switch (ch->mode) {
+ case mode_temperature:
+ if (i_temp == GSC_HWMON_MAX_TEMP_CH) {
+ dev_err(gsc->dev, "too many temp channels\n");
+ return -EINVAL;
+ }
+ hwmon->temp_ch[i_temp] = ch;
+ hwmon->temp_config[i_temp] = HWMON_T_INPUT |
+ HWMON_T_LABEL;
+ i_temp++;
+ break;
+ case mode_voltage:
+ case mode_voltage_raw:
+ if (i_in == GSC_HWMON_MAX_IN_CH) {
+ dev_err(gsc->dev, "too many input channels\n");
+ return -EINVAL;
+ }
+ hwmon->in_ch[i_in] = ch;
+ hwmon->in_config[i_in] =
+ HWMON_I_INPUT | HWMON_I_LABEL;
+ i_in++;
+ break;
+ default:
+ dev_err(gsc->dev, "invalid mode: %d\n", ch->mode);
+ return -EINVAL;
+ }
+ }
+
+ /* setup config structures */
+ hwmon->chip.ops = &gsc_hwmon_ops;
+ hwmon->chip.info = hwmon->info;
+ hwmon->info[0] = &hwmon->temp_info;
+ hwmon->info[1] = &hwmon->in_info;
+ hwmon->temp_info.type = hwmon_temp;
+ hwmon->temp_info.config = hwmon->temp_config;
+ hwmon->in_info.type = hwmon_in;
+ hwmon->in_info.config = hwmon->in_config;
+
+ groups = pdata->fan_base ? gsc_hwmon_groups : NULL;
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ KBUILD_MODNAME, hwmon,
+ &hwmon->chip, groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id gsc_hwmon_of_match[] = {
+ { .compatible = "gw,gsc-adc", },
+ {}
+};
+
+static struct platform_driver gsc_hwmon_driver = {
+ .driver = {
+ .name = "gsc-hwmon",
+ .of_match_table = gsc_hwmon_of_match,
+ },
+ .probe = gsc_hwmon_probe,
+};
+
+module_platform_driver(gsc_hwmon_driver);
+
+MODULE_AUTHOR("Tim Harvey <tharvey@gateworks.com>");
+MODULE_DESCRIPTION("GSC hardware monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 6a30fb453f7a..3f596a5328da 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -15,6 +15,7 @@
#include <linux/gfp.h>
#include <linux/hwmon.h>
#include <linux/idr.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -31,7 +32,7 @@ struct hwmon_device {
const char *name;
struct device dev;
const struct hwmon_chip_info *chip;
-
+ struct list_head tzdata;
struct attribute_group group;
const struct attribute_group **groups;
};
@@ -55,12 +56,12 @@ struct hwmon_device_attribute {
/*
* Thermal zone information
- * In addition to the reference to the hwmon device,
- * also provides the sensor index.
*/
struct hwmon_thermal_data {
+ struct list_head node; /* hwmon tzdata list entry */
struct device *dev; /* Reference to hwmon device */
int index; /* sensor index */
+ struct thermal_zone_device *tzd;/* thermal zone device */
};
static ssize_t
@@ -156,10 +157,17 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
};
+static void hwmon_thermal_remove_sensor(void *data)
+{
+ list_del(data);
+}
+
static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
struct hwmon_thermal_data *tdata;
struct thermal_zone_device *tzd;
+ int err;
tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
if (!tdata)
@@ -177,13 +185,68 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
return PTR_ERR(tzd);
+ err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
+ if (err)
+ return err;
+
+ tdata->tzd = tzd;
+ list_add(&tdata->node, &hwdev->tzdata);
+
return 0;
}
+
+static int hwmon_thermal_register_sensors(struct device *dev)
+{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+ const struct hwmon_chip_info *chip = hwdev->chip;
+ const struct hwmon_channel_info **info = chip->info;
+ void *drvdata = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 1; info[i]; i++) {
+ int j;
+
+ if (info[i]->type != hwmon_temp)
+ continue;
+
+ for (j = 0; info[i]->config[j]; j++) {
+ int err;
+
+ if (!(info[i]->config[j] & HWMON_T_INPUT) ||
+ !chip->ops->is_visible(drvdata, hwmon_temp,
+ hwmon_temp_input, j))
+ continue;
+
+ err = hwmon_thermal_add_sensor(dev, j);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void hwmon_thermal_notify(struct device *dev, int index)
+{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+ struct hwmon_thermal_data *tzdata;
+
+ list_for_each_entry(tzdata, &hwdev->tzdata, node) {
+ if (tzdata->index == index) {
+ thermal_zone_device_update(tzdata->tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+ }
+}
+
#else
-static int hwmon_thermal_add_sensor(struct device *dev, int index)
+static int hwmon_thermal_register_sensors(struct device *dev)
{
return 0;
}
+
+static void hwmon_thermal_notify(struct device *dev, int index) { }
+
#endif /* IS_REACHABLE(CONFIG_THERMAL) && ... */
static int hwmon_attr_base(enum hwmon_sensor_types type)
@@ -511,6 +574,35 @@ static const int __templates_size[] = {
[hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates),
};
+int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ char sattr[MAX_SYSFS_ATTR_NAME_LENGTH];
+ const char * const *templates;
+ const char *template;
+ int base;
+
+ if (type >= ARRAY_SIZE(__templates))
+ return -EINVAL;
+ if (attr >= __templates_size[type])
+ return -EINVAL;
+
+ templates = __templates[type];
+ template = templates[attr];
+
+ base = hwmon_attr_base(type);
+
+ scnprintf(sattr, MAX_SYSFS_ATTR_NAME_LENGTH, template, base + channel);
+ sysfs_notify(&dev->kobj, NULL, sattr);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
+ if (type == hwmon_temp)
+ hwmon_thermal_notify(dev, channel);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hwmon_notify_event);
+
static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
{
int i, n;
@@ -596,7 +688,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
{
struct hwmon_device *hwdev;
struct device *hdev;
- int i, j, err, id;
+ int i, err, id;
/* Complain about invalid characters in hwmon name attribute */
if (name && (!strlen(name) || strpbrk(name, "-* \t\n")))
@@ -661,33 +753,19 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (err)
goto free_hwmon;
+ INIT_LIST_HEAD(&hwdev->tzdata);
+
if (dev && dev->of_node && chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
- const struct hwmon_channel_info **info = chip->info;
-
- for (i = 1; info[i]; i++) {
- if (info[i]->type != hwmon_temp)
- continue;
-
- for (j = 0; info[i]->config[j]; j++) {
- if (!chip->ops->is_visible(drvdata, hwmon_temp,
- hwmon_temp_input, j))
- continue;
- if (info[i]->config[j] & HWMON_T_INPUT) {
- err = hwmon_thermal_add_sensor(hdev, j);
- if (err) {
- device_unregister(hdev);
- /*
- * Don't worry about hwdev;
- * hwmon_dev_release(), called
- * from device_unregister(),
- * will free it.
- */
- goto ida_remove;
- }
- }
- }
+ err = hwmon_thermal_register_sensors(hdev);
+ if (err) {
+ device_unregister(hdev);
+ /*
+ * Don't worry about hwdev; hwmon_dev_release(), called
+ * from device_unregister(), will free it.
+ */
+ goto ida_remove;
}
}
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e9e78c0b7212..55d474ec7c35 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -74,6 +74,17 @@
#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9)
#define INA226_SHIFT_AVG(val) ((val) << 9)
+/* bit number of alert functions in Mask/Enable Register */
+#define INA226_SHUNT_OVER_VOLTAGE_BIT 15
+#define INA226_SHUNT_UNDER_VOLTAGE_BIT 14
+#define INA226_BUS_OVER_VOLTAGE_BIT 13
+#define INA226_BUS_UNDER_VOLTAGE_BIT 12
+#define INA226_POWER_OVER_LIMIT_BIT 11
+
+/* bit mask for alert config bits of Mask/Enable Register */
+#define INA226_ALERT_CONFIG_MASK 0xFC00
+#define INA226_ALERT_FUNCTION_FLAG BIT(4)
+
/* common attrs, ina226 attrs and NULL */
#define INA2XX_MAX_ATTRIBUTE_GROUPS 3
@@ -303,6 +314,145 @@ static ssize_t ina2xx_value_show(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
+static int ina226_reg_to_alert(struct ina2xx_data *data, u8 bit, u16 regval)
+{
+ int reg;
+
+ switch (bit) {
+ case INA226_SHUNT_OVER_VOLTAGE_BIT:
+ case INA226_SHUNT_UNDER_VOLTAGE_BIT:
+ reg = INA2XX_SHUNT_VOLTAGE;
+ break;
+ case INA226_BUS_OVER_VOLTAGE_BIT:
+ case INA226_BUS_UNDER_VOLTAGE_BIT:
+ reg = INA2XX_BUS_VOLTAGE;
+ break;
+ case INA226_POWER_OVER_LIMIT_BIT:
+ reg = INA2XX_POWER;
+ break;
+ default:
+ /* programmer goofed */
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ return ina2xx_get_value(data, reg, regval);
+}
+
+/*
+ * Turns alert limit values into register values.
+ * Opposite of the formula in ina2xx_get_value().
+ */
+static s16 ina226_alert_to_reg(struct ina2xx_data *data, u8 bit, int val)
+{
+ switch (bit) {
+ case INA226_SHUNT_OVER_VOLTAGE_BIT:
+ case INA226_SHUNT_UNDER_VOLTAGE_BIT:
+ val *= data->config->shunt_div;
+ return clamp_val(val, SHRT_MIN, SHRT_MAX);
+ case INA226_BUS_OVER_VOLTAGE_BIT:
+ case INA226_BUS_UNDER_VOLTAGE_BIT:
+ val = (val * 1000) << data->config->bus_voltage_shift;
+ val = DIV_ROUND_CLOSEST(val, data->config->bus_voltage_lsb);
+ return clamp_val(val, 0, SHRT_MAX);
+ case INA226_POWER_OVER_LIMIT_BIT:
+ val = DIV_ROUND_CLOSEST(val, data->power_lsb_uW);
+ return clamp_val(val, 0, USHRT_MAX);
+ default:
+ /* programmer goofed */
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static ssize_t ina226_alert_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ int regval;
+ int val = 0;
+ int ret;
+
+ mutex_lock(&data->config_lock);
+ ret = regmap_read(data->regmap, INA226_MASK_ENABLE, &regval);
+ if (ret)
+ goto abort;
+
+ if (regval & BIT(attr->index)) {
+ ret = regmap_read(data->regmap, INA226_ALERT_LIMIT, &regval);
+ if (ret)
+ goto abort;
+ val = ina226_reg_to_alert(data, attr->index, regval);
+ }
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", val);
+abort:
+ mutex_unlock(&data->config_lock);
+ return ret;
+}
+
+static ssize_t ina226_alert_store(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Clear all alerts first to avoid accidentally triggering ALERT pin
+ * due to register write sequence. Then, only enable the alert
+ * if the value is non-zero.
+ */
+ mutex_lock(&data->config_lock);
+ ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE,
+ INA226_ALERT_CONFIG_MASK, 0);
+ if (ret < 0)
+ goto abort;
+
+ ret = regmap_write(data->regmap, INA226_ALERT_LIMIT,
+ ina226_alert_to_reg(data, attr->index, val));
+ if (ret < 0)
+ goto abort;
+
+ if (val != 0) {
+ ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE,
+ INA226_ALERT_CONFIG_MASK,
+ BIT(attr->index));
+ if (ret < 0)
+ goto abort;
+ }
+
+ ret = count;
+abort:
+ mutex_unlock(&data->config_lock);
+ return ret;
+}
+
+static ssize_t ina226_alarm_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ int regval;
+ int alarm = 0;
+ int ret;
+
+ ret = regmap_read(data->regmap, INA226_MASK_ENABLE, &regval);
+ if (ret)
+ return ret;
+
+ alarm = (regval & BIT(attr->index)) &&
+ (regval & INA226_ALERT_FUNCTION_FLAG);
+ return snprintf(buf, PAGE_SIZE, "%d\n", alarm);
+}
+
/*
* In order to keep calibration register value fixed, the product
* of current_lsb and shunt_resistor should also be fixed and equal
@@ -392,15 +542,38 @@ static ssize_t ina226_interval_show(struct device *dev,
/* shunt voltage */
static SENSOR_DEVICE_ATTR_RO(in0_input, ina2xx_value, INA2XX_SHUNT_VOLTAGE);
+/* shunt voltage over/under voltage alert setting and alarm */
+static SENSOR_DEVICE_ATTR_RW(in0_crit, ina226_alert,
+ INA226_SHUNT_OVER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RW(in0_lcrit, ina226_alert,
+ INA226_SHUNT_UNDER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RO(in0_crit_alarm, ina226_alarm,
+ INA226_SHUNT_OVER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RO(in0_lcrit_alarm, ina226_alarm,
+ INA226_SHUNT_UNDER_VOLTAGE_BIT);
/* bus voltage */
static SENSOR_DEVICE_ATTR_RO(in1_input, ina2xx_value, INA2XX_BUS_VOLTAGE);
+/* bus voltage over/under voltage alert setting and alarm */
+static SENSOR_DEVICE_ATTR_RW(in1_crit, ina226_alert,
+ INA226_BUS_OVER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RW(in1_lcrit, ina226_alert,
+ INA226_BUS_UNDER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RO(in1_crit_alarm, ina226_alarm,
+ INA226_BUS_OVER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RO(in1_lcrit_alarm, ina226_alarm,
+ INA226_BUS_UNDER_VOLTAGE_BIT);
/* calculated current */
static SENSOR_DEVICE_ATTR_RO(curr1_input, ina2xx_value, INA2XX_CURRENT);
/* calculated power */
static SENSOR_DEVICE_ATTR_RO(power1_input, ina2xx_value, INA2XX_POWER);
+/* over-limit power alert setting and alarm */
+static SENSOR_DEVICE_ATTR_RW(power1_crit, ina226_alert,
+ INA226_POWER_OVER_LIMIT_BIT);
+static SENSOR_DEVICE_ATTR_RO(power1_crit_alarm, ina226_alarm,
+ INA226_POWER_OVER_LIMIT_BIT);
/* shunt resistance */
static SENSOR_DEVICE_ATTR_RW(shunt_resistor, ina2xx_shunt, INA2XX_CALIBRATION);
@@ -423,6 +596,16 @@ static const struct attribute_group ina2xx_group = {
};
static struct attribute *ina226_attrs[] = {
+ &sensor_dev_attr_in0_crit.dev_attr.attr,
+ &sensor_dev_attr_in0_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in0_lcrit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit.dev_attr.attr,
+ &sensor_dev_attr_in1_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_lcrit_alarm.dev_attr.attr,
+ &sensor_dev_attr_power1_crit.dev_attr.attr,
+ &sensor_dev_attr_power1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_update_interval.dev_attr.attr,
NULL,
};
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 4122e59f0bb4..ae2b84263a44 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -25,7 +25,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/of_device.h>
-
+#include <linux/acpi.h>
#define DRVNAME "lm70"
@@ -148,18 +148,50 @@ static const struct of_device_id lm70_of_ids[] = {
MODULE_DEVICE_TABLE(of, lm70_of_ids);
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id lm70_acpi_ids[] = {
+ {
+ .id = "LM000070",
+ .driver_data = LM70_CHIP_LM70,
+ },
+ {
+ .id = "TMP00121",
+ .driver_data = LM70_CHIP_TMP121,
+ },
+ {
+ .id = "LM000071",
+ .driver_data = LM70_CHIP_LM71,
+ },
+ {
+ .id = "LM000074",
+ .driver_data = LM70_CHIP_LM74,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids);
+#endif
+
static int lm70_probe(struct spi_device *spi)
{
- const struct of_device_id *match;
+ const struct of_device_id *of_match;
struct device *hwmon_dev;
struct lm70 *p_lm70;
int chip;
- match = of_match_device(lm70_of_ids, &spi->dev);
- if (match)
- chip = (int)(uintptr_t)match->data;
- else
- chip = spi_get_device_id(spi)->driver_data;
+ of_match = of_match_device(lm70_of_ids, &spi->dev);
+ if (of_match)
+ chip = (int)(uintptr_t)of_match->data;
+ else {
+#ifdef CONFIG_ACPI
+ const struct acpi_device_id *acpi_match;
+
+ acpi_match = acpi_match_device(lm70_acpi_ids, &spi->dev);
+ if (acpi_match)
+ chip = (int)(uintptr_t)acpi_match->driver_data;
+ else
+#endif
+ chip = spi_get_device_id(spi)->driver_data;
+ }
/* signaling is SPI_MODE_0 */
if (spi->mode & (SPI_CPOL | SPI_CPHA))
@@ -195,6 +227,7 @@ static struct spi_driver lm70_driver = {
.driver = {
.name = "lm70",
.of_match_table = of_match_ptr(lm70_of_ids),
+ .acpi_match_table = ACPI_PTR(lm70_acpi_ids),
},
.id_table = lm70_ids,
.probe = lm70_probe,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 5e6392294c03..ba0be48aeadd 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -797,8 +797,10 @@ static int lm75_detect(struct i2c_client *new_client,
/* First check for LM75A */
if (i2c_smbus_read_byte_data(new_client, 7) == LM75A_ID) {
- /* LM75A returns 0xff on unused registers so
- just to be sure we check for that too. */
+ /*
+ * LM75A returns 0xff on unused registers so
+ * just to be sure we check for that too.
+ */
if (i2c_smbus_read_byte_data(new_client, 4) != 0xff
|| i2c_smbus_read_byte_data(new_client, 5) != 0xff
|| i2c_smbus_read_byte_data(new_client, 6) != 0xff)
@@ -849,6 +851,7 @@ static int lm75_suspend(struct device *dev)
{
int status;
struct i2c_client *client = to_i2c_client(dev);
+
status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
@@ -863,6 +866,7 @@ static int lm75_resume(struct device *dev)
{
int status;
struct i2c_client *client = to_i2c_client(dev);
+
status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index b614e6328566..a398171162a8 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -1,17 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- lm75.h - Part of lm_sensors, Linux kernel modules for hardware
- monitoring
- Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com>
-
-*/
+ * lm75.h - Part of lm_sensors, Linux kernel modules for hardware monitoring
+ * Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com>
+ */
/*
- This file contains common code for encoding/decoding LM75 type
- temperature readings, which are emulated by many of the chips
- we support. As the user is unlikely to load more than one driver
- which contains this code, we don't worry about the wasted space.
-*/
+ * This file contains common code for encoding/decoding LM75 type
+ * temperature readings, which are emulated by many of the chips
+ * we support. As the user is unlikely to load more than one driver
+ * which contains this code, we don't worry about the wasted space.
+ */
#include <linux/kernel.h>
@@ -20,18 +18,23 @@
#define LM75_TEMP_MAX 125000
#define LM75_SHUTDOWN 0x01
-/* TEMP: 0.001C/bit (-55C to +125C)
- REG: (0.5C/bit, two's complement) << 7 */
+/*
+ * TEMP: 0.001C/bit (-55C to +125C)
+ * REG: (0.5C/bit, two's complement) << 7
+ */
static inline u16 LM75_TEMP_TO_REG(long temp)
{
int ntemp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
+
ntemp += (ntemp < 0 ? -250 : 250);
return (u16)((ntemp / 500) << 7);
}
static inline int LM75_TEMP_FROM_REG(u16 reg)
{
- /* use integer division instead of equivalent right shift to
- guarantee arithmetic shift and preserve the sign */
+ /*
+ * use integer division instead of equivalent right shift to
+ * guarantee arithmetic shift and preserve the sign
+ */
return ((s16)reg / 128) * 500;
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 9b3c9f390ef8..7bdc664af55b 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -35,6 +35,14 @@
* explicitly as max6659, or if its address is not 0x4c.
* These chips lack the remote temperature offset feature.
*
+ * This driver also supports the MAX6654 chip made by Maxim. This chip can
+ * be at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is
+ * otherwise similar to MAX6657/MAX6658/MAX6659. Extended range is available
+ * by setting the configuration register accordingly, and is done during
+ * initialization. Extended precision is only available at conversion rates
+ * of 1 Hz and slower. Note that extended precision is not enabled by
+ * default, as this driver initializes all chips to 2 Hz by design.
+ *
* This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
* MAX6692 chips made by Maxim. These are again similar to the LM86,
* but they use unsigned temperature values and can report temperatures
@@ -94,8 +102,8 @@
* have address 0x4d.
* MAX6647 has address 0x4e.
* MAX6659 can have address 0x4c, 0x4d or 0x4e.
- * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
- * 0x4c, 0x4d or 0x4e.
+ * MAX6654, MAX6680, and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29,
+ * 0x2a, 0x2b, 0x4c, 0x4d or 0x4e.
* SA56004 can have address 0x48 through 0x4F.
*/
@@ -104,7 +112,7 @@ static const unsigned short normal_i2c[] = {
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
- max6646, w83l771, max6696, sa56004, g781, tmp451 };
+ max6646, w83l771, max6696, sa56004, g781, tmp451, max6654 };
/*
* The LM90 registers
@@ -145,7 +153,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_REG_R_TCRIT_HYST 0x21
#define LM90_REG_W_TCRIT_HYST 0x21
-/* MAX6646/6647/6649/6657/6658/6659/6695/6696 registers */
+/* MAX6646/6647/6649/6654/6657/6658/6659/6695/6696 registers */
#define MAX6657_REG_R_LOCAL_TEMPL 0x11
#define MAX6696_REG_R_STATUS2 0x12
@@ -209,6 +217,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "max6646", max6646 },
{ "max6647", max6646 },
{ "max6649", max6646 },
+ { "max6654", max6654 },
{ "max6657", max6657 },
{ "max6658", max6657 },
{ "max6659", max6659 },
@@ -270,6 +279,10 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = {
.data = (void *)max6646
},
{
+ .compatible = "dallas,max6654",
+ .data = (void *)max6654
+ },
+ {
.compatible = "dallas,max6657",
.data = (void *)max6657
},
@@ -367,6 +380,11 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
+ [max6654] = {
+ .alert_alarms = 0x7c,
+ .max_convrate = 7,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ },
[max6657] = {
.flags = LM90_PAUSE_FOR_CONFIG,
.alert_alarms = 0x7c,
@@ -1557,6 +1575,16 @@ static int lm90_detect(struct i2c_client *client,
&& (config1 & 0x3f) == 0x00
&& convrate <= 0x07) {
name = "max6646";
+ } else
+ /*
+ * The chip_id of the MAX6654 holds the revision of the chip.
+ * The lowest 3 bits of the config1 register are unused and
+ * should return zero when read.
+ */
+ if (chip_id == 0x08
+ && (config1 & 0x07) == 0x00
+ && convrate <= 0x07) {
+ name = "max6654";
}
} else
if (address == 0x4C
@@ -1661,6 +1689,15 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
config |= 0x18;
/*
+ * Put MAX6654 into extended range (0x20, extend minimum range from
+ * 0 degrees to -64 degrees). Note that extended resolution is not
+ * possible on the MAX6654 unless conversion rate is set to 1 Hz or
+ * slower, which is intentionally not done by default.
+ */
+ if (data->kind == max6654)
+ config |= 0x20;
+
+ /*
* Select external channel 0 for max6695/96
*/
if (data->kind == max6696)
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 7efa6bfef060..e7e1ddc1d631 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -2047,7 +2047,7 @@ store_temp_beep(struct device *dev, struct device_attribute *attr,
static umode_t nct6775_in_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int in = index / 5; /* voltage index */
@@ -2253,7 +2253,7 @@ store_fan_pulses(struct device *dev, struct device_attribute *attr,
static umode_t nct6775_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int fan = index / 6; /* fan index */
int nr = index % 6; /* attribute index */
@@ -2440,7 +2440,7 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
static umode_t nct6775_temp_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int temp = index / 10; /* temp index */
int nr = index % 10; /* attribute index */
@@ -3257,7 +3257,7 @@ store_auto_temp(struct device *dev, struct device_attribute *attr,
static umode_t nct6775_pwm_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int pwm = index / 36; /* pwm index */
int nr = index % 36; /* attribute index */
@@ -3459,7 +3459,7 @@ static SENSOR_DEVICE_ATTR(beep_enable, S_IWUSR | S_IRUGO, show_beep,
static umode_t nct6775_other_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
if (index == 0 && !data->have_vid)
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 2e97e56c72c7..570df8eb5272 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -679,7 +679,7 @@ static struct attribute *nct7802_temp_attrs[] = {
static umode_t nct7802_temp_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct7802_data *data = dev_get_drvdata(dev);
unsigned int reg;
int err;
@@ -778,7 +778,7 @@ static struct attribute *nct7802_in_attrs[] = {
static umode_t nct7802_in_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct7802_data *data = dev_get_drvdata(dev);
unsigned int reg;
int err;
@@ -853,7 +853,7 @@ static struct attribute *nct7802_fan_attrs[] = {
static umode_t nct7802_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct7802_data *data = dev_get_drvdata(dev);
int fan = index / 4; /* 4 attributes per fan */
unsigned int reg;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 1f5743d68984..b0425694f702 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -8,6 +8,9 @@
* Copyright (c) 2019 Advantech
* Author: Amy.Shih <amy.shih@advantech.com.tw>
*
+ * Copyright (c) 2020 Advantech
+ * Author: Yuechao Zhao <yuechao.zhao@advantech.com.cn>
+ *
* Supports the following chips:
*
* Chip #vin #fan #pwm #temp #dts chip ID
@@ -20,6 +23,7 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/hwmon.h>
+#include <linux/watchdog.h>
#define VENDOR_ID_REG 0x7A /* Any bank */
#define NUVOTON_ID 0x50
@@ -41,6 +45,7 @@
#define FANCTL_MAX 4 /* Counted from 1 */
#define TCPU_MAX 8 /* Counted from 1 */
#define TEMP_MAX 4 /* Counted from 1 */
+#define SMI_STS_MAX 10 /* Counted from 1 */
#define VT_ADC_CTRL0_REG 0x20 /* Bank 0 */
#define VT_ADC_CTRL1_REG 0x21 /* Bank 0 */
@@ -87,18 +92,42 @@
#define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */
#define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */
+#define WDT_LOCK_REG 0xE0 /* W/O Lock Watchdog Register */
+#define WDT_EN_REG 0xE1 /* R/O Watchdog Enable Register */
+#define WDT_STS_REG 0xE2 /* R/O Watchdog Status Register */
+#define WDT_TIMER_REG 0xE3 /* R/W Watchdog Timer Register */
+#define WDT_SOFT_EN 0x55 /* Enable soft watchdog timer */
+#define WDT_SOFT_DIS 0xAA /* Disable soft watchdog timer */
+
#define VOLT_MONITOR_MODE 0x0
#define THERMAL_DIODE_MODE 0x1
#define THERMISTOR_MODE 0x3
#define ENABLE_TSI BIT(1)
+#define WATCHDOG_TIMEOUT 1 /* 1 minute default timeout */
+
+/*The timeout range is 1-255 minutes*/
+#define MIN_TIMEOUT (1 * 60)
+#define MAX_TIMEOUT (255 * 60)
+
+static int timeout;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes. 1 <= timeout <= 255, default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
static const unsigned short normal_i2c[] = {
0x2d, 0x2e, I2C_CLIENT_END
};
struct nct7904_data {
struct i2c_client *client;
+ struct watchdog_device wdt;
struct mutex bank_lock;
int bank_sel;
u32 fanin_mask;
@@ -361,6 +390,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret, temp;
unsigned int reg1, reg2, reg3;
+ s8 temps;
switch (attr) {
case hwmon_temp_input:
@@ -466,7 +496,8 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
if (ret < 0)
return ret;
- *val = ret * 1000;
+ temps = ret;
+ *val = temps * 1000;
return 0;
}
@@ -889,6 +920,95 @@ static const struct hwmon_chip_info nct7904_chip_info = {
.info = nct7904_info,
};
+/*
+ * Watchdog Function
+ */
+static int nct7904_wdt_start(struct watchdog_device *wdt)
+{
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+
+ /* Enable soft watchdog timer */
+ return nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_EN);
+}
+
+static int nct7904_wdt_stop(struct watchdog_device *wdt)
+{
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+
+ return nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_DIS);
+}
+
+static int nct7904_wdt_set_timeout(struct watchdog_device *wdt,
+ unsigned int timeout)
+{
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+ /*
+ * The NCT7904 is very special in watchdog function.
+ * Its minimum unit is minutes. And wdt->timeout needs
+ * to match the actual timeout selected. So, this needs
+ * to be: wdt->timeout = timeout / 60 * 60.
+ * For example, if the user configures a timeout of
+ * 119 seconds, the actual timeout will be 60 seconds.
+ * So, wdt->timeout must then be set to 60 seconds.
+ */
+ wdt->timeout = timeout / 60 * 60;
+
+ return nct7904_write_reg(data, BANK_0, WDT_TIMER_REG,
+ wdt->timeout / 60);
+}
+
+static int nct7904_wdt_ping(struct watchdog_device *wdt)
+{
+ /*
+ * Note:
+ * NCT7904 does not support refreshing WDT_TIMER_REG register when
+ * the watchdog is active. Please disable watchdog before feeding
+ * the watchdog and enable it again.
+ */
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+ int ret;
+
+ /* Disable soft watchdog timer */
+ ret = nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_DIS);
+ if (ret < 0)
+ return ret;
+
+ /* feed watchdog */
+ ret = nct7904_write_reg(data, BANK_0, WDT_TIMER_REG, wdt->timeout / 60);
+ if (ret < 0)
+ return ret;
+
+ /* Enable soft watchdog timer */
+ return nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_EN);
+}
+
+static unsigned int nct7904_wdt_get_timeleft(struct watchdog_device *wdt)
+{
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+ int ret;
+
+ ret = nct7904_read_reg(data, BANK_0, WDT_TIMER_REG);
+ if (ret < 0)
+ return 0;
+
+ return ret * 60;
+}
+
+static const struct watchdog_info nct7904_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .identity = "nct7904 watchdog",
+};
+
+static const struct watchdog_ops nct7904_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = nct7904_wdt_start,
+ .stop = nct7904_wdt_stop,
+ .ping = nct7904_wdt_ping,
+ .set_timeout = nct7904_wdt_set_timeout,
+ .get_timeleft = nct7904_wdt_get_timeleft,
+};
+
static int nct7904_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1009,10 +1129,36 @@ static int nct7904_probe(struct i2c_client *client,
data->fan_mode[i] = ret;
}
+ /* Read all of SMI status register to clear alarms */
+ for (i = 0; i < SMI_STS_MAX; i++) {
+ ret = nct7904_read_reg(data, BANK_0, SMI_STS1_REG + i);
+ if (ret < 0)
+ return ret;
+ }
+
hwmon_dev =
devm_hwmon_device_register_with_info(dev, client->name, data,
&nct7904_chip_info, NULL);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ ret = PTR_ERR_OR_ZERO(hwmon_dev);
+ if (ret)
+ return ret;
+
+ /* Watchdog initialization */
+ data->wdt.ops = &nct7904_wdt_ops;
+ data->wdt.info = &nct7904_wdt_info;
+
+ data->wdt.timeout = WATCHDOG_TIMEOUT * 60; /* Set default timeout */
+ data->wdt.min_timeout = MIN_TIMEOUT;
+ data->wdt.max_timeout = MAX_TIMEOUT;
+ data->wdt.parent = &client->dev;
+
+ watchdog_init_timeout(&data->wdt, timeout * 60, &client->dev);
+ watchdog_set_nowayout(&data->wdt, nowayout);
+ watchdog_set_drvdata(&data->wdt, data);
+
+ watchdog_stop_on_unregister(&data->wdt);
+
+ return devm_watchdog_register_device(dev, &data->wdt);
}
static const struct i2c_device_id nct7904_id[] = {
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index de12a565006d..a337195b1c39 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -146,6 +146,15 @@ config SENSORS_MAX16064
This driver can also be built as a module. If so, the module will
be called max16064.
+config SENSORS_MAX16601
+ tristate "Maxim MAX16601"
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX16601.
+
+ This driver can also be built as a module. If so, the module will
+ be called max16601.
+
config SENSORS_MAX20730
tristate "Maxim MAX20730, MAX20734, MAX20743"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 5feb45806123..c4b15db996ad 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX16601) += max16601.o
obj-$(CONFIG_SENSORS_MAX20730) += max20730.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
diff --git a/drivers/hwmon/pmbus/max16601.c b/drivers/hwmon/pmbus/max16601.c
new file mode 100644
index 000000000000..51cdfaf9023c
--- /dev/null
+++ b/drivers/hwmon/pmbus/max16601.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware monitoring driver for Maxim MAX16601
+ *
+ * Implementation notes:
+ *
+ * Ths chip supports two rails, VCORE and VSA. Telemetry information for the
+ * two rails is reported in two subsequent I2C addresses. The driver
+ * instantiates a dummy I2C client at the second I2C address to report
+ * information for the VSA rail in a single instance of the driver.
+ * Telemetry for the VSA rail is reported to the PMBus core in PMBus page 2.
+ *
+ * The chip reports input current using two separate methods. The input current
+ * reported with the standard READ_IIN command is derived from the output
+ * current. The first method is reported to the PMBus core with PMBus page 0,
+ * the second method is reported with PMBus page 1.
+ *
+ * The chip supports reading per-phase temperatures and per-phase input/output
+ * currents for VCORE. Telemetry is reported in vendor specific registers.
+ * The driver translates the vendor specific register values to PMBus standard
+ * register values and reports per-phase information in PMBus page 0.
+ *
+ * Copyright 2019, 2020 Google LLC.
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define REG_SETPT_DVID 0xd1
+#define DAC_10MV_MODE BIT(4)
+#define REG_IOUT_AVG_PK 0xee
+#define REG_IIN_SENSOR 0xf1
+#define REG_TOTAL_INPUT_POWER 0xf2
+#define REG_PHASE_ID 0xf3
+#define CORE_RAIL_INDICATOR BIT(7)
+#define REG_PHASE_REPORTING 0xf4
+
+struct max16601_data {
+ struct pmbus_driver_info info;
+ struct i2c_client *vsa;
+ int iout_avg_pkg;
+};
+
+#define to_max16601_data(x) container_of(x, struct max16601_data, info)
+
+static int max16601_read_byte(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct max16601_data *data = to_max16601_data(info);
+
+ if (page > 0) {
+ if (page == 2) /* VSA */
+ return i2c_smbus_read_byte_data(data->vsa, reg);
+ return -EOPNOTSUPP;
+ }
+ return -ENODATA;
+}
+
+static int max16601_read_word(struct i2c_client *client, int page, int phase,
+ int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct max16601_data *data = to_max16601_data(info);
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ int ret;
+
+ switch (page) {
+ case 0: /* VCORE */
+ if (phase == 0xff)
+ return -ENODATA;
+ switch (reg) {
+ case PMBUS_READ_IIN:
+ case PMBUS_READ_IOUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ ret = i2c_smbus_write_byte_data(client, REG_PHASE_ID,
+ phase);
+ if (ret)
+ return ret;
+ ret = i2c_smbus_read_block_data(client,
+ REG_PHASE_REPORTING,
+ buf);
+ if (ret < 0)
+ return ret;
+ if (ret < 6)
+ return -EIO;
+ switch (reg) {
+ case PMBUS_READ_TEMPERATURE_1:
+ return buf[1] << 8 | buf[0];
+ case PMBUS_READ_IOUT:
+ return buf[3] << 8 | buf[2];
+ case PMBUS_READ_IIN:
+ return buf[5] << 8 | buf[4];
+ default:
+ break;
+ }
+ }
+ return -EOPNOTSUPP;
+ case 1: /* VCORE, read IIN/PIN from sensor element */
+ switch (reg) {
+ case PMBUS_READ_IIN:
+ return i2c_smbus_read_word_data(client, REG_IIN_SENSOR);
+ case PMBUS_READ_PIN:
+ return i2c_smbus_read_word_data(client,
+ REG_TOTAL_INPUT_POWER);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+ case 2: /* VSA */
+ switch (reg) {
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = i2c_smbus_read_word_data(data->vsa,
+ REG_IOUT_AVG_PK);
+ if (ret < 0)
+ return ret;
+ if (sign_extend32(ret, 10) >
+ sign_extend32(data->iout_avg_pkg, 10))
+ data->iout_avg_pkg = ret;
+ return data->iout_avg_pkg;
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ return 0;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_READ_IIN:
+ case PMBUS_READ_IOUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ case PMBUS_STATUS_WORD:
+ return i2c_smbus_read_word_data(data->vsa, reg);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int max16601_write_byte(struct i2c_client *client, int page, u8 reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct max16601_data *data = to_max16601_data(info);
+
+ if (page == 2) {
+ if (reg == PMBUS_CLEAR_FAULTS)
+ return i2c_smbus_write_byte(data->vsa, reg);
+ return -EOPNOTSUPP;
+ }
+ return -ENODATA;
+}
+
+static int max16601_write_word(struct i2c_client *client, int page, int reg,
+ u16 value)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct max16601_data *data = to_max16601_data(info);
+
+ switch (page) {
+ case 0: /* VCORE */
+ return -ENODATA;
+ case 1: /* VCORE IIN/PIN from sensor element */
+ default:
+ return -EOPNOTSUPP;
+ case 2: /* VSA */
+ switch (reg) {
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ data->iout_avg_pkg = 0xfc00;
+ return 0;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ return i2c_smbus_write_word_data(data->vsa, reg, value);
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+}
+
+static int max16601_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(client, REG_SETPT_DVID);
+ if (reg < 0)
+ return reg;
+ if (reg & DAC_10MV_MODE)
+ info->vrm_version[0] = vr13;
+ else
+ info->vrm_version[0] = vr12;
+
+ return 0;
+}
+
+static struct pmbus_driver_info max16601_info = {
+ .pages = 3,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = vid,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_PAGE_VIRTUAL | PMBUS_PHASE_VIRTUAL,
+ .func[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_PAGE_VIRTUAL,
+ .func[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_PAGE_VIRTUAL,
+ .phases[0] = 8,
+ .pfunc[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
+ .pfunc[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
+ .pfunc[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
+ .pfunc[3] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
+ .pfunc[4] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
+ .pfunc[5] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
+ .pfunc[6] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
+ .pfunc[7] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
+ .identify = max16601_identify,
+ .read_byte_data = max16601_read_byte,
+ .read_word_data = max16601_read_word,
+ .write_byte = max16601_write_byte,
+ .write_word_data = max16601_write_word,
+};
+
+static void max16601_remove(void *_data)
+{
+ struct max16601_data *data = _data;
+
+ i2c_unregister_device(data->vsa);
+}
+
+static int max16601_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ struct max16601_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf);
+ if (ret < 0)
+ return -ENODEV;
+
+ /* PMBUS_IC_DEVICE_ID is expected to return "MAX16601y.xx" */
+ if (ret < 11 || strncmp(buf, "MAX16601", 8)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported chip '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, REG_PHASE_ID);
+ if (ret < 0)
+ return ret;
+ if (!(ret & CORE_RAIL_INDICATOR)) {
+ dev_err(dev,
+ "Driver must be instantiated on CORE rail I2C address\n");
+ return -ENODEV;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->iout_avg_pkg = 0xfc00;
+ data->vsa = i2c_new_dummy_device(client->adapter, client->addr + 1);
+ if (IS_ERR(data->vsa)) {
+ dev_err(dev, "Failed to register VSA client\n");
+ return PTR_ERR(data->vsa);
+ }
+ ret = devm_add_action_or_reset(dev, max16601_remove, data);
+ if (ret)
+ return ret;
+
+ data->info = max16601_info;
+
+ return pmbus_do_probe(client, id, &data->info);
+}
+
+static const struct i2c_device_id max16601_id[] = {
+ {"max16601", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max16601_id);
+
+static struct i2c_driver max16601_driver = {
+ .driver = {
+ .name = "max16601",
+ },
+ .probe = max16601_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max16601_id,
+};
+
+module_i2c_driver(max16601_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX16601");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 8d321bf7d15b..a420877ba533 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -109,8 +109,8 @@ struct pmbus_data {
bool has_status_word; /* device uses STATUS_WORD register */
int (*read_status)(struct i2c_client *client, int page);
- u8 currpage;
- u8 currphase; /* current phase, 0xff for all */
+ s16 currpage; /* current page, -1 for unknown/unset */
+ s16 currphase; /* current phase, 0xff for all, -1 for unknown/unset */
};
struct pmbus_debugfs_entry {
@@ -2529,8 +2529,8 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
if (pdata)
data->flags = pdata->flags;
data->info = info;
- data->currpage = 0xff;
- data->currphase = 0xfe;
+ data->currpage = -1;
+ data->currphase = -1;
ret = pmbus_init_common(client, data, info);
if (ret < 0)
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 83e841be1081..02dbb5ca3bcf 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -107,7 +107,7 @@ config CORESIGHT_CPU_DEBUG
can quickly get to know program counter (PC), secure state,
exception level, etc. Before use debugging functionality, platform
needs to ensure the clock domain and power domain are enabled
- properly, please refer Documentation/trace/coresight-cpu-debug.rst
+ properly, please refer Documentation/trace/coresight/coresight-cpu-debug.rst
for detailed description and the example for usage.
config CORESIGHT_CTI
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 0e3e72f0f510..19497d1d92bf 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -2,7 +2,8 @@
#
# Makefile for CoreSight drivers.
#
-obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o coresight-platform.o
+obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o \
+ coresight-platform.o coresight-sysfs.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
coresight-tmc-etf.o \
coresight-tmc-etr.o
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
index b44d83142b62..98f830c6ed50 100644
--- a/drivers/hwtracing/coresight/coresight-cti-platform.c
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -2,11 +2,17 @@
/*
* Copyright (c) 2019, The Linaro Limited. All rights reserved.
*/
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/slab.h>
#include <dt-bindings/arm/coresight-cti-dt.h>
-#include <linux/of.h>
#include "coresight-cti.h"
+#include "coresight-priv.h"
/* Number of CTI signals in the v8 architecturally defined connection */
#define NR_V8PE_IN_SIGS 2
@@ -120,7 +126,7 @@ static int cti_plat_create_v8_etm_connection(struct device *dev,
/* Can optionally have an etm node - return if not */
cs_fwnode = fwnode_find_reference(root_fwnode, CTI_DT_CSDEV_ASSOC, 0);
- if (IS_ERR_OR_NULL(cs_fwnode))
+ if (IS_ERR(cs_fwnode))
return 0;
/* allocate memory */
@@ -393,7 +399,7 @@ static int cti_plat_create_connection(struct device *dev,
/* associated device ? */
cs_fwnode = fwnode_find_reference(fwnode,
CTI_DT_CSDEV_ASSOC, 0);
- if (!IS_ERR_OR_NULL(cs_fwnode)) {
+ if (!IS_ERR(cs_fwnode)) {
assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode,
&csdev);
fwnode_handle_put(cs_fwnode);
@@ -429,8 +435,7 @@ static int cti_plat_create_impdef_connections(struct device *dev,
}
/* get the hardware configuration & connection data. */
-int cti_plat_get_hw_data(struct device *dev,
- struct cti_drvdata *drvdata)
+static int cti_plat_get_hw_data(struct device *dev, struct cti_drvdata *drvdata)
{
int rc = 0;
struct cti_device *cti_dev = &drvdata->ctidev;
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index 1f8fb7c15e80..392757f3a019 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -4,7 +4,13 @@
* Author: Mike Leach <mike.leach@linaro.org>
*/
+#include <linux/atomic.h>
#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
#include "coresight-cti.h"
@@ -1036,8 +1042,8 @@ static int cti_create_con_sysfs_attr(struct device *dev,
enum cti_conn_attr_type attr_type,
int attr_idx)
{
- struct dev_ext_attribute *eattr = 0;
- char *name = 0;
+ struct dev_ext_attribute *eattr;
+ char *name;
eattr = devm_kzalloc(dev, sizeof(struct dev_ext_attribute),
GFP_KERNEL);
@@ -1139,7 +1145,7 @@ static int cti_create_con_attr_set(struct device *dev, int con_idx,
}
/* create the array of group pointers for the CTI sysfs groups */
-int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
+static int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
{
int nr_groups;
@@ -1156,8 +1162,8 @@ int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
int cti_create_cons_sysfs(struct device *dev, struct cti_drvdata *drvdata)
{
struct cti_device *ctidev = &drvdata->ctidev;
- int err = 0, con_idx = 0, i;
- struct cti_trig_con *tc = NULL;
+ int err, con_idx = 0, i;
+ struct cti_trig_con *tc;
err = cti_create_cons_groups(dev, ctidev);
if (err)
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index aa6e0249bd70..40387d58c8e7 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -4,7 +4,22 @@
* Author: Mike Leach <mike.leach@linaro.org>
*/
+#include <linux/amba/bus.h>
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/coresight.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpuhotplug.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/spinlock.h>
+
+#include "coresight-priv.h"
#include "coresight-cti.h"
/**
@@ -19,7 +34,7 @@
*/
/* net of CTI devices connected via CTM */
-LIST_HEAD(ect_net);
+static LIST_HEAD(ect_net);
/* protect the list */
static DEFINE_MUTEX(ect_mutex);
@@ -27,6 +42,12 @@ static DEFINE_MUTEX(ect_mutex);
#define csdev_to_cti_drvdata(csdev) \
dev_get_drvdata(csdev->dev.parent)
+/* power management handling */
+static int nr_cti_cpu;
+
+/* quick lookup list for CPU bound CTIs when power handling */
+static struct cti_drvdata *cti_cpu_drvdata[NR_CPUS];
+
/*
* CTI naming. CTI bound to cores will have the name cti_cpu<N> where
* N is the CPU ID. System CTIs will have the name cti_sys<I> where I
@@ -116,6 +137,35 @@ cti_err_not_enabled:
return rc;
}
+/* re-enable CTI on CPU when using CPU hotplug */
+static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ struct device *dev = &drvdata->csdev->dev;
+
+ pm_runtime_get_sync(dev->parent);
+ spin_lock(&drvdata->spinlock);
+ config->hw_powered = true;
+
+ /* no need to do anything if no enable request */
+ if (!atomic_read(&drvdata->config.enable_req_count))
+ goto cti_hp_not_enabled;
+
+ /* try to claim the device */
+ if (coresight_claim_device(drvdata->base))
+ goto cti_hp_not_enabled;
+
+ cti_write_all_hw_regs(drvdata);
+ config->hw_enabled = true;
+ spin_unlock(&drvdata->spinlock);
+ return;
+
+ /* did not re-enable due to no claim / no request */
+cti_hp_not_enabled:
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put(dev->parent);
+}
+
/* disable hardware */
static int cti_disable_hw(struct cti_drvdata *drvdata)
{
@@ -442,6 +492,34 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
return err;
}
+static bool cti_add_sysfs_link(struct cti_drvdata *drvdata,
+ struct cti_trig_con *tc)
+{
+ struct coresight_sysfs_link link_info;
+ int link_err = 0;
+
+ link_info.orig = drvdata->csdev;
+ link_info.orig_name = tc->con_dev_name;
+ link_info.target = tc->con_dev;
+ link_info.target_name = dev_name(&drvdata->csdev->dev);
+
+ link_err = coresight_add_sysfs_link(&link_info);
+ if (link_err)
+ dev_warn(&drvdata->csdev->dev,
+ "Failed to set CTI sysfs link %s<=>%s\n",
+ link_info.orig_name, link_info.target_name);
+ return !link_err;
+}
+
+static void cti_remove_sysfs_link(struct cti_trig_con *tc)
+{
+ struct coresight_sysfs_link link_info;
+
+ link_info.orig_name = tc->con_dev_name;
+ link_info.target = tc->con_dev;
+ coresight_remove_sysfs_link(&link_info);
+}
+
/*
* Look for a matching connection device name in the list of connections.
* If found then swap in the csdev name, set trig con association pointer
@@ -452,6 +530,8 @@ cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
struct coresight_device *csdev)
{
struct cti_trig_con *tc;
+ struct cti_drvdata *drvdata = container_of(ctidev, struct cti_drvdata,
+ ctidev);
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev_name) {
@@ -459,7 +539,12 @@ cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
/* match: so swap in csdev name & dev */
tc->con_dev_name = dev_name(&csdev->dev);
tc->con_dev = csdev;
- return true;
+ /* try to set sysfs link */
+ if (cti_add_sysfs_link(drvdata, tc))
+ return true;
+ /* link failed - remove CTI reference */
+ tc->con_dev = NULL;
+ break;
}
}
}
@@ -522,6 +607,7 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
ctidev = &ctidrv->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev == csdev->ect_dev) {
+ cti_remove_sysfs_link(tc);
tc->con_dev = NULL;
break;
}
@@ -543,10 +629,16 @@ static void cti_update_conn_xrefs(struct cti_drvdata *drvdata)
struct cti_device *ctidev = &drvdata->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
- if (tc->con_dev)
- /* set tc->con_dev->ect_dev */
- coresight_set_assoc_ectdev_mutex(tc->con_dev,
+ if (tc->con_dev) {
+ /* if we can set the sysfs link */
+ if (cti_add_sysfs_link(drvdata, tc))
+ /* set the CTI/csdev association */
+ coresight_set_assoc_ectdev_mutex(tc->con_dev,
drvdata->csdev);
+ else
+ /* otherwise remove reference from CTI */
+ tc->con_dev = NULL;
+ }
}
}
@@ -559,7 +651,113 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
if (tc->con_dev) {
coresight_set_assoc_ectdev_mutex(tc->con_dev,
NULL);
+ cti_remove_sysfs_link(tc);
+ tc->con_dev = NULL;
+ }
+ }
+}
+
+/** cti PM callbacks **/
+static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct cti_drvdata *drvdata;
+ unsigned int cpu = smp_processor_id();
+ int notify_res = NOTIFY_OK;
+
+ if (!cti_cpu_drvdata[cpu])
+ return NOTIFY_OK;
+
+ drvdata = cti_cpu_drvdata[cpu];
+
+ if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
+ return NOTIFY_BAD;
+
+ spin_lock(&drvdata->spinlock);
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* CTI regs all static - we have a copy & nothing to save */
+ drvdata->config.hw_powered = false;
+ if (drvdata->config.hw_enabled)
+ coresight_disclaim_device(drvdata->base);
+ break;
+
+ case CPU_PM_ENTER_FAILED:
+ drvdata->config.hw_powered = true;
+ if (drvdata->config.hw_enabled) {
+ if (coresight_claim_device(drvdata->base))
+ drvdata->config.hw_enabled = false;
+ }
+ break;
+
+ case CPU_PM_EXIT:
+ /* write hardware registers to re-enable. */
+ drvdata->config.hw_powered = true;
+ drvdata->config.hw_enabled = false;
+
+ /* check enable reference count to enable HW */
+ if (atomic_read(&drvdata->config.enable_req_count)) {
+ /* check we can claim the device as we re-power */
+ if (coresight_claim_device(drvdata->base))
+ goto cti_notify_exit;
+
+ drvdata->config.hw_enabled = true;
+ cti_write_all_hw_regs(drvdata);
+ }
+ break;
+
+ default:
+ notify_res = NOTIFY_DONE;
+ break;
+ }
+
+cti_notify_exit:
+ spin_unlock(&drvdata->spinlock);
+ return notify_res;
+}
+
+static struct notifier_block cti_cpu_pm_nb = {
+ .notifier_call = cti_cpu_pm_notify,
+};
+
+/* CPU HP handlers */
+static int cti_starting_cpu(unsigned int cpu)
+{
+ struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
+
+ if (!drvdata)
+ return 0;
+
+ cti_cpuhp_enable_hw(drvdata);
+ return 0;
+}
+
+static int cti_dying_cpu(unsigned int cpu)
+{
+ struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
+
+ if (!drvdata)
+ return 0;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->config.hw_powered = false;
+ coresight_disclaim_device(drvdata->base);
+ spin_unlock(&drvdata->spinlock);
+ return 0;
+}
+
+/* release PM registrations */
+static void cti_pm_release(struct cti_drvdata *drvdata)
+{
+ if (drvdata->ctidev.cpu >= 0) {
+ if (--nr_cti_cpu == 0) {
+ cpu_pm_unregister_notifier(&cti_cpu_pm_nb);
+
+ cpuhp_remove_state_nocalls(
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
}
+ cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL;
}
}
@@ -578,12 +776,12 @@ int cti_disable(struct coresight_device *csdev)
return cti_disable_hw(drvdata);
}
-const struct coresight_ops_ect cti_ops_ect = {
+static const struct coresight_ops_ect cti_ops_ect = {
.enable = cti_enable,
.disable = cti_disable,
};
-const struct coresight_ops cti_ops = {
+static const struct coresight_ops cti_ops = {
.ect_ops = &cti_ops_ect,
};
@@ -598,6 +796,7 @@ static void cti_device_release(struct device *dev)
mutex_lock(&ect_mutex);
cti_remove_conn_xrefs(drvdata);
+ cti_pm_release(drvdata);
/* remove from the list */
list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, node) {
@@ -673,6 +872,24 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
goto err_out;
}
+ /* setup CPU power management handling for CPU bound CTI devices. */
+ if (drvdata->ctidev.cpu >= 0) {
+ cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata;
+ if (!nr_cti_cpu++) {
+ cpus_read_lock();
+ ret = cpuhp_setup_state_nocalls_cpuslocked(
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ "arm/coresight_cti:starting",
+ cti_starting_cpu, cti_dying_cpu);
+
+ if (!ret)
+ ret = cpu_pm_register_notifier(&cti_cpu_pm_nb);
+ cpus_read_unlock();
+ if (ret)
+ goto err_out;
+ }
+ }
+
/* create dynamic attributes for connections */
ret = cti_create_cons_sysfs(dev, drvdata);
if (ret) {
@@ -711,6 +928,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_out:
+ cti_pm_release(drvdata);
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
index 004df3ab9dd0..acf7b545e6b9 100644
--- a/drivers/hwtracing/coresight/coresight-cti.h
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -7,8 +7,14 @@
#ifndef _CORESIGHT_CORESIGHT_CTI_H
#define _CORESIGHT_CORESIGHT_CTI_H
-#include <asm/local.h>
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
#include "coresight-priv.h"
/*
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 3810290e6d07..03e3f2590191 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -717,7 +717,7 @@ static const struct attribute_group coresight_etb_mgmt_group = {
.name = "mgmt",
};
-const struct attribute_group *coresight_etb_groups[] = {
+static const struct attribute_group *coresight_etb_groups[] = {
&coresight_etb_group,
&coresight_etb_mgmt_group,
NULL,
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index e2cb6873c3f2..bf22dcfd3327 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -504,7 +504,7 @@ static int etm_enable_perf(struct coresight_device *csdev,
static int etm_enable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct etm_enable_arg arg = { 0 };
+ struct etm_enable_arg arg = { };
int ret;
spin_lock(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index ce41482431f9..b673e738bc9a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -205,7 +205,7 @@ static ssize_t reset_store(struct device *dev,
* started state. ARM recommends start-stop logic is set before
* each trace run.
*/
- config->vinst_ctrl |= BIT(0);
+ config->vinst_ctrl = BIT(0);
if (drvdata->nr_addr_cmp == true) {
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* SSSTATUS, bit[9] */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a90d757f7043..747afc875f91 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -412,7 +412,7 @@ out:
static int etm4_enable_sysfs(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct etm4_enable_arg arg = { 0 };
+ struct etm4_enable_arg arg = { };
int ret;
spin_lock(&drvdata->spinlock);
@@ -791,7 +791,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
config->ts_ctrl = 0x0;
/* TRCVICTLR::EVENT = 0x01, select the always on logic */
- config->vinst_ctrl |= BIT(0);
+ config->vinst_ctrl = BIT(0);
}
static u64 etm4_get_ns_access_type(struct etmv4_config *config)
@@ -894,17 +894,8 @@ static void etm4_set_start_stop_filter(struct etmv4_config *config,
static void etm4_set_default_filter(struct etmv4_config *config)
{
- u64 start, stop;
-
- /*
- * Configure address range comparator '0' to encompass all
- * possible addresses.
- */
- start = 0x0;
- stop = ~0x0;
-
- etm4_set_comparator_filter(config, start, stop,
- ETM_DEFAULT_ADDR_COMP);
+ /* Trace everything 'default' filter achieved by no filtering */
+ config->viiectlr = 0x0;
/*
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
@@ -925,11 +916,9 @@ static void etm4_set_default(struct etmv4_config *config)
/*
* Make default initialisation trace everything
*
- * Select the "always true" resource selector on the
- * "Enablign Event" line and configure address range comparator
- * '0' to trace all the possible address range. From there
- * configure the "include/exclude" engine to include address
- * range comparator '0'.
+ * This is done by a minimum default config sufficient to enable
+ * full instruction trace - with a default filter for trace all
+ * achieved by having no filtering.
*/
etm4_set_default_config(config);
etm4_set_default_filter(config);
@@ -1527,6 +1516,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_arch_supported:
+ etmdrvdata[drvdata->cpu] = NULL;
if (--etm4_count == 0) {
etm4_cpu_pm_unregister();
@@ -1552,10 +1542,13 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
+ CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
- CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
- CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */
+ CS_AMBA_UCI_ID(0x000bb803, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */
+ CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */
CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 43418a2126ff..e4912abda3aa 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -87,6 +87,7 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
int *nr_inport, int *nr_outport)
{
struct device_node *ep = NULL;
+ struct of_endpoint endpoint;
int in = 0, out = 0;
do {
@@ -94,10 +95,16 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
if (!ep)
break;
- if (of_coresight_legacy_ep_is_input(ep))
- in++;
- else
- out++;
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ continue;
+
+ if (of_coresight_legacy_ep_is_input(ep)) {
+ in = (endpoint.port + 1 > in) ?
+ endpoint.port + 1 : in;
+ } else {
+ out = (endpoint.port + 1) > out ?
+ endpoint.port + 1 : out;
+ }
} while (ep);
@@ -137,9 +144,16 @@ of_coresight_count_ports(struct device_node *port_parent)
{
int i = 0;
struct device_node *ep = NULL;
+ struct of_endpoint endpoint;
+
+ while ((ep = of_graph_get_next_endpoint(port_parent, ep))) {
+ /* Defer error handling to parsing */
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ continue;
+ if (endpoint.port + 1 > i)
+ i = endpoint.port + 1;
+ }
- while ((ep = of_graph_get_next_endpoint(port_parent, ep)))
- i++;
return i;
}
@@ -191,14 +205,12 @@ static int of_coresight_get_cpu(struct device *dev)
* Parses the local port, remote device name and the remote port.
*
* Returns :
- * 1 - If the parsing is successful and a connection record
- * was created for an output connection.
* 0 - If the parsing completed without any fatal errors.
* -Errno - Fatal error, abort the scanning.
*/
static int of_coresight_parse_endpoint(struct device *dev,
struct device_node *ep,
- struct coresight_connection *conn)
+ struct coresight_platform_data *pdata)
{
int ret = 0;
struct of_endpoint endpoint, rendpoint;
@@ -206,6 +218,7 @@ static int of_coresight_parse_endpoint(struct device *dev,
struct device_node *rep = NULL;
struct device *rdev = NULL;
struct fwnode_handle *rdev_fwnode;
+ struct coresight_connection *conn;
do {
/* Parse the local port details */
@@ -232,6 +245,13 @@ static int of_coresight_parse_endpoint(struct device *dev,
break;
}
+ conn = &pdata->conns[endpoint.port];
+ if (conn->child_fwnode) {
+ dev_warn(dev, "Duplicate output port %d\n",
+ endpoint.port);
+ ret = -EINVAL;
+ break;
+ }
conn->outport = endpoint.port;
/*
* Hold the refcount to the target device. This could be
@@ -244,7 +264,6 @@ static int of_coresight_parse_endpoint(struct device *dev,
conn->child_fwnode = fwnode_handle_get(rdev_fwnode);
conn->child_port = rendpoint.port;
/* Connection record updated */
- ret = 1;
} while (0);
of_node_put(rparent);
@@ -258,7 +277,6 @@ static int of_get_coresight_platform_data(struct device *dev,
struct coresight_platform_data *pdata)
{
int ret = 0;
- struct coresight_connection *conn;
struct device_node *ep = NULL;
const struct device_node *parent = NULL;
bool legacy_binding = false;
@@ -287,8 +305,6 @@ static int of_get_coresight_platform_data(struct device *dev,
dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
}
- conn = pdata->conns;
-
/* Iterate through each output port to discover topology */
while ((ep = of_graph_get_next_endpoint(parent, ep))) {
/*
@@ -300,15 +316,9 @@ static int of_get_coresight_platform_data(struct device *dev,
if (legacy_binding && of_coresight_legacy_ep_is_input(ep))
continue;
- ret = of_coresight_parse_endpoint(dev, ep, conn);
- switch (ret) {
- case 1:
- conn++; /* Fall through */
- case 0:
- break;
- default:
+ ret = of_coresight_parse_endpoint(dev, ep, pdata);
+ if (ret)
return ret;
- }
}
return 0;
@@ -501,7 +511,7 @@ static inline bool acpi_validate_dsd_graph(const union acpi_object *graph)
}
/* acpi_get_dsd_graph - Find the _DSD Graph property for the given device. */
-const union acpi_object *
+static const union acpi_object *
acpi_get_dsd_graph(struct acpi_device *adev)
{
int i;
@@ -564,7 +574,7 @@ acpi_validate_coresight_graph(const union acpi_object *cs_graph)
* Returns the pointer to the CoreSight Graph Package when found. Otherwise
* returns NULL.
*/
-const union acpi_object *
+static const union acpi_object *
acpi_get_coresight_graph(struct acpi_device *adev)
{
const union acpi_object *graph_list, *graph;
@@ -647,6 +657,16 @@ static int acpi_coresight_parse_link(struct acpi_device *adev,
* coresight_remove_match().
*/
conn->child_fwnode = fwnode_handle_get(&r_adev->fwnode);
+ } else if (dir == ACPI_CORESIGHT_LINK_SLAVE) {
+ /*
+ * We are only interested in the port number
+ * for the input ports at this component.
+ * Store the port number in child_port.
+ */
+ conn->child_port = fields[0].integer.value;
+ } else {
+ /* Invalid direction */
+ return -EINVAL;
}
return dir;
@@ -692,10 +712,20 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
return dir;
if (dir == ACPI_CORESIGHT_LINK_MASTER) {
- pdata->nr_outport++;
+ if (ptr->outport > pdata->nr_outport)
+ pdata->nr_outport = ptr->outport;
ptr++;
} else {
- pdata->nr_inport++;
+ WARN_ON(pdata->nr_inport == ptr->child_port);
+ /*
+ * We do not track input port connections for a device.
+ * However we need the highest port number described,
+ * which can be recorded now and reuse this connection
+ * record for an output connection. Hence, do not move
+ * the ptr for input connections
+ */
+ if (ptr->child_port > pdata->nr_inport)
+ pdata->nr_inport = ptr->child_port;
}
}
@@ -704,8 +734,13 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
return rc;
/* Copy the connection information to the final location */
- for (i = 0; i < pdata->nr_outport; i++)
- pdata->conns[i] = conns[i];
+ for (i = 0; conns + i < ptr; i++) {
+ int port = conns[i].outport;
+
+ /* Duplicate output port */
+ WARN_ON(pdata->conns[port].child_fwnode);
+ pdata->conns[port] = conns[i];
+ }
devm_kfree(&adev->dev, conns);
return 0;
@@ -822,7 +857,7 @@ coresight_get_platform_data(struct device *dev)
error:
if (!IS_ERR_OR_NULL(pdata))
/* Cleanup the connection information */
- coresight_release_platform_data(pdata);
+ coresight_release_platform_data(NULL, pdata);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_get_platform_data);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 890f9a5c97c6..36c943ae94d5 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -153,6 +153,15 @@ struct coresight_device *coresight_get_sink_by_id(u32 id);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
+int coresight_add_sysfs_link(struct coresight_sysfs_link *info);
+void coresight_remove_sysfs_link(struct coresight_sysfs_link *info);
+int coresight_create_conns_sysfs_group(struct coresight_device *csdev);
+void coresight_remove_conns_sysfs_group(struct coresight_device *csdev);
+int coresight_make_links(struct coresight_device *orig,
+ struct coresight_connection *conn,
+ struct coresight_device *target);
+void coresight_remove_links(struct coresight_device *orig,
+ struct coresight_connection *conn);
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
extern int etm_readl_cp14(u32 off, unsigned int *val);
@@ -206,12 +215,16 @@ cti_remove_assoc_from_csdev(struct coresight_device *csdev) {}
/* extract the data value from a UCI structure given amba_id pointer. */
static inline void *coresight_get_uci_data(const struct amba_id *id)
{
- if (id->data)
- return ((struct amba_cs_uci_id *)(id->data))->data;
- return 0;
+ struct amba_cs_uci_id *uci_id = id->data;
+
+ if (!uci_id)
+ return NULL;
+
+ return uci_id->data;
}
-void coresight_release_platform_data(struct coresight_platform_data *pdata);
+void coresight_release_platform_data(struct coresight_device *csdev,
+ struct coresight_platform_data *pdata);
struct coresight_device *
coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
new file mode 100644
index 000000000000..82afeaf2ccc4
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include "coresight-priv.h"
+
+/*
+ * Connections group - links attribute.
+ * Count of created links between coresight components in the group.
+ */
+static ssize_t nr_links_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return sprintf(buf, "%d\n", csdev->nr_links);
+}
+static DEVICE_ATTR_RO(nr_links);
+
+static struct attribute *coresight_conns_attrs[] = {
+ &dev_attr_nr_links.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_conns_group = {
+ .attrs = coresight_conns_attrs,
+ .name = "connections",
+};
+
+/*
+ * Create connections group for CoreSight devices.
+ * This group will then be used to collate the sysfs links between
+ * devices.
+ */
+int coresight_create_conns_sysfs_group(struct coresight_device *csdev)
+{
+ int ret = 0;
+
+ if (!csdev)
+ return -EINVAL;
+
+ ret = sysfs_create_group(&csdev->dev.kobj, &coresight_conns_group);
+ if (ret)
+ return ret;
+
+ csdev->has_conns_grp = true;
+ return ret;
+}
+
+void coresight_remove_conns_sysfs_group(struct coresight_device *csdev)
+{
+ if (!csdev)
+ return;
+
+ if (csdev->has_conns_grp) {
+ sysfs_remove_group(&csdev->dev.kobj, &coresight_conns_group);
+ csdev->has_conns_grp = false;
+ }
+}
+
+int coresight_add_sysfs_link(struct coresight_sysfs_link *info)
+{
+ int ret = 0;
+
+ if (!info)
+ return -EINVAL;
+ if (!info->orig || !info->target ||
+ !info->orig_name || !info->target_name)
+ return -EINVAL;
+ if (!info->orig->has_conns_grp || !info->target->has_conns_grp)
+ return -EINVAL;
+
+ /* first link orig->target */
+ ret = sysfs_add_link_to_group(&info->orig->dev.kobj,
+ coresight_conns_group.name,
+ &info->target->dev.kobj,
+ info->orig_name);
+ if (ret)
+ return ret;
+
+ /* second link target->orig */
+ ret = sysfs_add_link_to_group(&info->target->dev.kobj,
+ coresight_conns_group.name,
+ &info->orig->dev.kobj,
+ info->target_name);
+
+ /* error in second link - remove first - otherwise inc counts */
+ if (ret) {
+ sysfs_remove_link_from_group(&info->orig->dev.kobj,
+ coresight_conns_group.name,
+ info->orig_name);
+ } else {
+ info->orig->nr_links++;
+ info->target->nr_links++;
+ }
+
+ return ret;
+}
+
+void coresight_remove_sysfs_link(struct coresight_sysfs_link *info)
+{
+ if (!info)
+ return;
+ if (!info->orig || !info->target ||
+ !info->orig_name || !info->target_name)
+ return;
+
+ sysfs_remove_link_from_group(&info->orig->dev.kobj,
+ coresight_conns_group.name,
+ info->orig_name);
+
+ sysfs_remove_link_from_group(&info->target->dev.kobj,
+ coresight_conns_group.name,
+ info->target_name);
+
+ info->orig->nr_links--;
+ info->target->nr_links--;
+}
+
+/*
+ * coresight_make_links: Make a link for a connection from a @orig
+ * device to @target, represented by @conn.
+ *
+ * e.g, for devOrig[output_X] -> devTarget[input_Y] is represented
+ * as two symbolic links :
+ *
+ * /sys/.../devOrig/out:X -> /sys/.../devTarget/
+ * /sys/.../devTarget/in:Y -> /sys/.../devOrig/
+ *
+ * The link names are allocated for a device where it appears. i.e, the
+ * "out" link on the master and "in" link on the slave device.
+ * The link info is stored in the connection record for avoiding
+ * the reconstruction of names for removal.
+ */
+int coresight_make_links(struct coresight_device *orig,
+ struct coresight_connection *conn,
+ struct coresight_device *target)
+{
+ int ret = -ENOMEM;
+ char *outs = NULL, *ins = NULL;
+ struct coresight_sysfs_link *link = NULL;
+
+ do {
+ outs = devm_kasprintf(&orig->dev, GFP_KERNEL,
+ "out:%d", conn->outport);
+ if (!outs)
+ break;
+ ins = devm_kasprintf(&target->dev, GFP_KERNEL,
+ "in:%d", conn->child_port);
+ if (!ins)
+ break;
+ link = devm_kzalloc(&orig->dev,
+ sizeof(struct coresight_sysfs_link),
+ GFP_KERNEL);
+ if (!link)
+ break;
+
+ link->orig = orig;
+ link->target = target;
+ link->orig_name = outs;
+ link->target_name = ins;
+
+ ret = coresight_add_sysfs_link(link);
+ if (ret)
+ break;
+
+ conn->link = link;
+
+ /*
+ * Install the device connection. This also indicates that
+ * the links are operational on both ends.
+ */
+ conn->child_dev = target;
+ return 0;
+ } while (0);
+
+ return ret;
+}
+
+/*
+ * coresight_remove_links: Remove the sysfs links for a given connection @conn,
+ * from @orig device to @target device. See coresight_make_links() for more
+ * details.
+ */
+void coresight_remove_links(struct coresight_device *orig,
+ struct coresight_connection *conn)
+{
+ if (!orig || !conn->link)
+ return;
+
+ coresight_remove_sysfs_link(conn->link);
+
+ devm_kfree(&conn->child_dev->dev, conn->link->target_name);
+ devm_kfree(&orig->dev, conn->link->orig_name);
+ devm_kfree(&orig->dev, conn->link);
+ conn->link = NULL;
+ conn->child_dev = NULL;
+}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d0cc3985b72a..36cce2bfb744 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -596,13 +596,6 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
goto out;
}
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- ret = -EINVAL;
- goto out;
- }
-
/* Don't interfere if operated from Perf */
if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
@@ -616,8 +609,15 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ ret = -EINVAL;
+ goto out;
+ }
__tmc_etb_disable_hw(drvdata);
+ }
drvdata->reading = true;
out:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 1cf82fa58289..39fba1d16e6e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -361,7 +361,7 @@ static const struct attribute_group coresight_tmc_mgmt_group = {
.name = "mgmt",
};
-const struct attribute_group *coresight_tmc_groups[] = {
+static const struct attribute_group *coresight_tmc_groups[] = {
&coresight_tmc_group,
&coresight_tmc_mgmt_group,
NULL,
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index c71553c09f8e..f3efbb3b2b4d 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1031,7 +1031,7 @@ static void coresight_device_release(struct device *dev)
static int coresight_orphan_match(struct device *dev, void *data)
{
- int i;
+ int i, ret = 0;
bool still_orphan = false;
struct coresight_device *csdev, *i_csdev;
struct coresight_connection *conn;
@@ -1053,49 +1053,62 @@ static int coresight_orphan_match(struct device *dev, void *data)
for (i = 0; i < i_csdev->pdata->nr_outport; i++) {
conn = &i_csdev->pdata->conns[i];
+ /* Skip the port if FW doesn't describe it */
+ if (!conn->child_fwnode)
+ continue;
/* We have found at least one orphan connection */
if (conn->child_dev == NULL) {
/* Does it match this newly added device? */
- if (conn->child_fwnode == csdev->dev.fwnode)
- conn->child_dev = csdev;
- else
+ if (conn->child_fwnode == csdev->dev.fwnode) {
+ ret = coresight_make_links(i_csdev,
+ conn, csdev);
+ if (ret)
+ return ret;
+ } else {
/* This component still has an orphan */
still_orphan = true;
+ }
}
}
i_csdev->orphan = still_orphan;
/*
- * Returning '0' ensures that all known component on the
- * bus will be checked.
+ * Returning '0' in case we didn't encounter any error,
+ * ensures that all known component on the bus will be checked.
*/
return 0;
}
-static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
+static int coresight_fixup_orphan_conns(struct coresight_device *csdev)
{
- /*
- * No need to check for a return value as orphan connection(s)
- * are hooked-up with each newly added component.
- */
- bus_for_each_dev(&coresight_bustype, NULL,
+ return bus_for_each_dev(&coresight_bustype, NULL,
csdev, coresight_orphan_match);
}
-static void coresight_fixup_device_conns(struct coresight_device *csdev)
+static int coresight_fixup_device_conns(struct coresight_device *csdev)
{
- int i;
+ int i, ret = 0;
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_connection *conn = &csdev->pdata->conns[i];
+ if (!conn->child_fwnode)
+ continue;
conn->child_dev =
coresight_find_csdev_by_fwnode(conn->child_fwnode);
- if (!conn->child_dev)
+ if (conn->child_dev) {
+ ret = coresight_make_links(csdev, conn,
+ conn->child_dev);
+ if (ret)
+ break;
+ } else {
csdev->orphan = true;
+ }
}
+
+ return 0;
}
static int coresight_remove_match(struct device *dev, void *data)
@@ -1118,12 +1131,12 @@ static int coresight_remove_match(struct device *dev, void *data)
for (i = 0; i < iterator->pdata->nr_outport; i++) {
conn = &iterator->pdata->conns[i];
- if (conn->child_dev == NULL)
+ if (conn->child_dev == NULL || conn->child_fwnode == NULL)
continue;
if (csdev->dev.fwnode == conn->child_fwnode) {
iterator->orphan = true;
- conn->child_dev = NULL;
+ coresight_remove_links(iterator, conn);
/*
* Drop the reference to the handle for the remote
* device acquired in parsing the connections from
@@ -1213,16 +1226,27 @@ postcore_initcall(coresight_init);
* coresight_release_platform_data: Release references to the devices connected
* to the output port of this device.
*/
-void coresight_release_platform_data(struct coresight_platform_data *pdata)
+void coresight_release_platform_data(struct coresight_device *csdev,
+ struct coresight_platform_data *pdata)
{
int i;
+ struct coresight_connection *conns = pdata->conns;
for (i = 0; i < pdata->nr_outport; i++) {
- if (pdata->conns[i].child_fwnode) {
- fwnode_handle_put(pdata->conns[i].child_fwnode);
+ /* If we have made the links, remove them now */
+ if (csdev && conns[i].child_dev)
+ coresight_remove_links(csdev, &conns[i]);
+ /*
+ * Drop the refcount and clear the handle as this device
+ * is going away
+ */
+ if (conns[i].child_fwnode) {
+ fwnode_handle_put(conns[i].child_fwnode);
pdata->conns[i].child_fwnode = NULL;
}
}
+ if (csdev)
+ coresight_remove_conns_sysfs_group(csdev);
}
struct coresight_device *coresight_register(struct coresight_desc *desc)
@@ -1304,11 +1328,19 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
mutex_lock(&coresight_mutex);
- coresight_fixup_device_conns(csdev);
- coresight_fixup_orphan_conns(csdev);
- cti_add_assoc_to_csdev(csdev);
+ ret = coresight_create_conns_sysfs_group(csdev);
+ if (!ret)
+ ret = coresight_fixup_device_conns(csdev);
+ if (!ret)
+ ret = coresight_fixup_orphan_conns(csdev);
+ if (!ret)
+ cti_add_assoc_to_csdev(csdev);
mutex_unlock(&coresight_mutex);
+ if (ret) {
+ coresight_unregister(csdev);
+ return ERR_PTR(ret);
+ }
return csdev;
@@ -1316,7 +1348,7 @@ err_free_csdev:
kfree(csdev);
err_out:
/* Cleanup the connection information */
- coresight_release_platform_data(desc->pdata);
+ coresight_release_platform_data(NULL, desc->pdata);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_register);
@@ -1326,7 +1358,7 @@ void coresight_unregister(struct coresight_device *csdev)
etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
coresight_remove_conns(csdev);
- coresight_release_platform_data(csdev->pdata);
+ coresight_release_platform_data(csdev, csdev->pdata);
device_unregister(&csdev->dev);
}
EXPORT_SYMBOL_GPL(coresight_unregister);
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index dff4e178c732..7f10312d1b88 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -542,7 +542,7 @@ int i2c_pca_add_numbered_bus(struct i2c_adapter *adap)
EXPORT_SYMBOL(i2c_pca_add_numbered_bus);
MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, "
- "Wolfram Sang <w.sang@pengutronix.de>");
+ "Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("I2C-Bus PCA9564/PCA9665 algorithm");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index f5c00f903df3..16ddc26c00e6 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -70,6 +70,7 @@
* @isr_mask: cached copy of local ISR enables.
* @isr_status: cached copy of local ISR status.
* @lock: spinlock for IRQ synchronization.
+ * @isr_mutex: mutex for IRQ thread.
*/
struct altr_i2c_dev {
void __iomem *base;
@@ -86,6 +87,7 @@ struct altr_i2c_dev {
u32 isr_mask;
u32 isr_status;
spinlock_t lock; /* IRQ synchronization */
+ struct mutex isr_mutex;
};
static void
@@ -245,10 +247,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
struct altr_i2c_dev *idev = _dev;
u32 status = idev->isr_status;
+ mutex_lock(&idev->isr_mutex);
if (!idev->msg) {
dev_warn(idev->dev, "unexpected interrupt\n");
altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ);
- return IRQ_HANDLED;
+ goto out;
}
read = (idev->msg->flags & I2C_M_RD) != 0;
@@ -301,6 +304,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
complete(&idev->msg_complete);
dev_dbg(idev->dev, "Message Complete\n");
}
+out:
+ mutex_unlock(&idev->isr_mutex);
return IRQ_HANDLED;
}
@@ -312,6 +317,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
u32 value;
u8 addr = i2c_8bit_addr_from_msg(msg);
+ mutex_lock(&idev->isr_mutex);
idev->msg = msg;
idev->msg_len = msg->len;
idev->buf = msg->buf;
@@ -336,6 +342,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
altr_i2c_int_enable(idev, imask, true);
altr_i2c_fill_tx_fifo(idev);
}
+ mutex_unlock(&idev->isr_mutex);
time_left = wait_for_completion_timeout(&idev->msg_complete,
ALTR_I2C_XFER_TIMEOUT);
@@ -409,6 +416,7 @@ static int altr_i2c_probe(struct platform_device *pdev)
idev->dev = &pdev->dev;
init_completion(&idev->msg_complete);
spin_lock_init(&idev->lock);
+ mutex_init(&idev->isr_mutex);
ret = device_property_read_u32(idev->dev, "fifo-size",
&idev->fifo_size);
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index 0aba51a7df32..37b96ac9dfae 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -845,6 +845,18 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev,
PINCTRL_STATE_DEFAULT);
dev->pinctrl_pins_gpio = pinctrl_lookup_state(dev->pinctrl,
"gpio");
+ if (IS_ERR(dev->pinctrl_pins_default) ||
+ IS_ERR(dev->pinctrl_pins_gpio)) {
+ dev_info(&pdev->dev, "pinctrl states incomplete for recovery\n");
+ return -EINVAL;
+ }
+
+ /*
+ * pins will be taken as GPIO, so we might as well inform pinctrl about
+ * this and move the state to GPIO
+ */
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
+
rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@@ -855,9 +867,7 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev,
return -EPROBE_DEFER;
if (IS_ERR(rinfo->sda_gpiod) ||
- IS_ERR(rinfo->scl_gpiod) ||
- IS_ERR(dev->pinctrl_pins_default) ||
- IS_ERR(dev->pinctrl_pins_gpio)) {
+ IS_ERR(rinfo->scl_gpiod)) {
dev_info(&pdev->dev, "recovery information incomplete\n");
if (!IS_ERR(rinfo->sda_gpiod)) {
gpiod_put(rinfo->sda_gpiod);
@@ -867,9 +877,13 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev,
gpiod_put(rinfo->scl_gpiod);
rinfo->scl_gpiod = NULL;
}
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
return -EINVAL;
}
+ /* change the state of the pins back to their default state */
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
+
dev_info(&pdev->dev, "using scl, sda for recovery\n");
rinfo->prepare_recovery = at91_prepare_twi_recovery;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 5536673060cc..c429d664f655 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -357,12 +357,12 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
dev_pm_set_driver_flags(&pdev->dev,
DPM_FLAG_SMART_PREPARE |
- DPM_FLAG_LEAVE_SUSPENDED);
+ DPM_FLAG_MAY_SKIP_RESUME);
} else {
dev_pm_set_driver_flags(&pdev->dev,
DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED);
+ DPM_FLAG_MAY_SKIP_RESUME);
}
/* The code below assumes runtime PM to be disabled. */
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index a9c03f5c3482..4f333889489c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1439,9 +1439,9 @@ static int i801_add_mux(struct i801_priv *priv)
return -ENOMEM;
lookup->dev_id = "i2c-mux-gpio";
for (i = 0; i < mux_config->n_gpios; i++) {
- lookup->table[i].chip_label = mux_config->gpio_chip;
- lookup->table[i].chip_hwnum = mux_config->gpios[i];
- lookup->table[i].con_id = "mux";
+ lookup->table[i] = (struct gpiod_lookup)
+ GPIO_LOOKUP(mux_config->gpio_chip,
+ mux_config->gpios[i], "mux", 0);
}
gpiod_add_lookup_table(lookup);
priv->lookup = lookup;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index a66912782064..1f1442dfcad7 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -7,7 +7,7 @@
* Mux support by Rodolfo Giometti <giometti@enneenne.com> and
* Michael Lawnick <michael.lawnick.ext@nsn.com>
*
- * Copyright (C) 2013-2017 Wolfram Sang <wsa@the-dreams.de>
+ * Copyright (C) 2013-2017 Wolfram Sang <wsa@kernel.org>
*/
#define pr_fmt(fmt) "i2c-core: " fmt
@@ -338,8 +338,10 @@ static int i2c_device_probe(struct device *dev)
} else if (ACPI_COMPANION(dev)) {
irq = i2c_acpi_get_irq(client);
}
- if (irq == -EPROBE_DEFER)
- return irq;
+ if (irq == -EPROBE_DEFER) {
+ status = irq;
+ goto put_sync_adapter;
+ }
if (irq < 0)
irq = 0;
@@ -353,15 +355,19 @@ static int i2c_device_probe(struct device *dev)
*/
if (!driver->id_table &&
!i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
- !i2c_of_match_device(dev->driver->of_match_table, client))
- return -ENODEV;
+ !i2c_of_match_device(dev->driver->of_match_table, client)) {
+ status = -ENODEV;
+ goto put_sync_adapter;
+ }
if (client->flags & I2C_CLIENT_WAKE) {
int wakeirq;
wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
- if (wakeirq == -EPROBE_DEFER)
- return wakeirq;
+ if (wakeirq == -EPROBE_DEFER) {
+ status = wakeirq;
+ goto put_sync_adapter;
+ }
device_init_wakeup(&client->dev, true);
@@ -408,6 +414,10 @@ err_detach_pm_domain:
err_clear_wakeup_irq:
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
+put_sync_adapter:
+ if (client->flags & I2C_CLIENT_HOST_NOTIFY)
+ pm_runtime_put_sync(&client->adapter->dev);
+
return status;
}
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 6787c1f71483..3ed74aa4b44b 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -5,7 +5,7 @@
* Copyright (C) 2008 Jochen Friedrich <jochen@scram.de>
* based on a previous patch from Jon Smirl <jonsmirl@gmail.com>
*
- * Copyright (C) 2013, 2018 Wolfram Sang <wsa@the-dreams.de>
+ * Copyright (C) 2013, 2018 Wolfram Sang <wsa@kernel.org>
*/
#include <dt-bindings/i2c/i2c.h>
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 0e16490eb3a1..5365199a31f4 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -272,6 +272,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
err_rollback_available:
device_remove_file(&pdev->dev, &dev_attr_available_masters);
err_rollback:
+ i2c_demux_deactivate_master(priv);
for (j = 0; j < i; j++) {
of_node_put(priv->chan[j].parent_np);
of_changeset_destroy(&priv->chan[j].chgset);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index d79cd6d54b3a..97f2e29265da 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1008,7 +1008,6 @@ static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
struct i3c_ccc_cmd_dest dest;
- unsigned int expected_len;
struct i3c_ccc_mrl *mrl;
struct i3c_ccc_cmd cmd;
int ret;
@@ -1024,22 +1023,23 @@ static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
dest.payload.len -= 1;
- expected_len = dest.payload.len;
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
if (ret)
goto out;
- if (dest.payload.len != expected_len) {
+ switch (dest.payload.len) {
+ case 3:
+ info->max_ibi_len = mrl->ibi_len;
+ fallthrough;
+ case 2:
+ info->max_read_len = be16_to_cpu(mrl->read_len);
+ break;
+ default:
ret = -EIO;
goto out;
}
- info->max_read_len = be16_to_cpu(mrl->read_len);
-
- if (info->bcr & I3C_BCR_IBI_PAYLOAD)
- info->max_ibi_len = mrl->ibi_len;
-
out:
i3c_ccc_cmd_dest_cleanup(&dest);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index dcf8b51b47fd..7f17f8303988 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1034,8 +1034,8 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
return 0;
}
-static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
- int format, char *buf, int buflen)
+static int ide_cdrom_read_tocentry(ide_drive_t *drive, int trackno,
+ int msf_flag, int format, char *buf, int buflen)
{
unsigned char cmd[BLK_MAX_CDB];
@@ -1104,7 +1104,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
sectors_per_frame << SECTOR_SHIFT);
/* first read just the header, so we know how long the TOC is */
- stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
+ stat = ide_cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
sizeof(struct atapi_toc_header));
if (stat)
return stat;
@@ -1121,7 +1121,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
ntracks = MAX_TRACKS;
/* now read the whole schmeer */
- stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
+ stat = ide_cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
@@ -1141,7 +1141,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
* Heiko Eißfeldt.
*/
ntracks = 0;
- stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
+ stat = ide_cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
@@ -1181,7 +1181,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
if (toc->hdr.first_track != CDROM_LEADOUT) {
/* read the multisession information */
- stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
+ stat = ide_cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
sizeof(ms_tmp));
if (stat)
return stat;
@@ -1195,7 +1195,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
/* re-read multisession information using MSF format */
- stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
+ stat = ide_cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
sizeof(ms_tmp));
if (stat)
return stat;
@@ -1305,8 +1305,7 @@ static int ide_cdrom_register(ide_drive_t *drive, int nslots)
if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT)
devinfo->mask |= CDC_SELECT_SPEED;
- devinfo->disk = info->disk;
- return register_cdrom(devinfo);
+ return register_cdrom(info->disk, devinfo);
}
static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index b137f27a34d5..c31f1d2b3b07 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -233,10 +233,13 @@ static ide_startstop_t do_special(ide_drive_t *drive)
void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
- struct scatterlist *sg = hwif->sg_table;
+ struct scatterlist *sg = hwif->sg_table, *last_sg = NULL;
struct request *rq = cmd->rq;
- cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
+ cmd->sg_nents = __blk_rq_map_sg(drive->queue, rq, sg, &last_sg);
+ if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & rq->q->dma_pad_mask))
+ last_sg->length +=
+ (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
}
EXPORT_SYMBOL_GPL(ide_map_sg);
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 5d91a6dda894..1080637ca40e 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -89,13 +89,13 @@ config ADXL372_I2C
module will be called adxl372_i2c.
config BMA180
- tristate "Bosch BMA180/BMA25x 3-Axis Accelerometer Driver"
- depends on I2C
+ tristate "Bosch BMA023/BMA1x0/BMA25x 3-Axis Accelerometer Driver"
+ depends on I2C && INPUT_BMA150=n
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say Y here if you want to build a driver for the Bosch BMA180 or
- BMA25x triaxial acceleration sensor.
+ Say Y here if you want to build a driver for the Bosch BMA023, BMA150
+ BMA180, SMB380, or BMA25x triaxial acceleration sensor.
To compile this driver as a module, choose M here: the
module will be called bma180.
@@ -238,7 +238,7 @@ config IIO_ST_ACCEL_3AXIS
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
- LNG2DM, LIS3DE, LIS2DE12
+ LNG2DM, LIS3DE, LIS2DE12, LIS2HH12
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index fcd91d5f05fd..265722e4b13f 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -7,6 +7,7 @@
* Support for BMA250 (c) Peter Meerwald <pmeerw@pmeerw.net>
*
* SPI is not supported by driver
+ * BMA023/BMA150/SMB380: 7-bit I2C slave address 0x38
* BMA180: 7-bit I2C slave address 0x40 or 0x41
* BMA250: 7-bit I2C slave address 0x18 or 0x19
* BMA254: 7-bit I2C slave address 0x18 or 0x19
@@ -33,6 +34,8 @@
#define BMA180_IRQ_NAME "bma180_event"
enum chip_ids {
+ BMA023,
+ BMA150,
BMA180,
BMA250,
BMA254,
@@ -48,7 +51,7 @@ struct bma180_part_info {
unsigned int num_scales;
const int *bw_table;
unsigned int num_bw;
- int center_temp;
+ int temp_offset;
u8 int_reset_reg, int_reset_mask;
u8 sleep_reg, sleep_mask;
@@ -57,13 +60,25 @@ struct bma180_part_info {
u8 power_reg, power_mask, lowpower_val;
u8 int_enable_reg, int_enable_mask;
u8 int_map_reg, int_enable_dataready_int1_mask;
- u8 softreset_reg;
+ u8 softreset_reg, softreset_val;
int (*chip_config)(struct bma180_data *data);
void (*chip_disable)(struct bma180_data *data);
};
/* Register set */
+#define BMA023_CTRL_REG0 0x0a
+#define BMA023_CTRL_REG1 0x0b
+#define BMA023_CTRL_REG2 0x14
+#define BMA023_CTRL_REG3 0x15
+
+#define BMA023_RANGE_MASK GENMASK(4, 3) /* Range of accel values */
+#define BMA023_BW_MASK GENMASK(2, 0) /* Accel bandwidth */
+#define BMA023_SLEEP BIT(0)
+#define BMA023_INT_RESET_MASK BIT(6)
+#define BMA023_NEW_DATA_INT BIT(5) /* Intr every new accel data is ready */
+#define BMA023_RESET_VAL BIT(1)
+
#define BMA180_CHIP_ID 0x00 /* Need to distinguish BMA180 from other */
#define BMA180_ACC_X_LSB 0x02 /* First of 6 registers of accel data */
#define BMA180_TEMP 0x08
@@ -94,6 +109,7 @@ struct bma180_part_info {
/* We have to write this value in reset register to do soft reset */
#define BMA180_RESET_VAL 0xb6
+#define BMA023_ID_REG_VAL 0x02
#define BMA180_ID_REG_VAL 0x03
#define BMA250_ID_REG_VAL 0x03
#define BMA254_ID_REG_VAL 0xfa /* 250 decimal */
@@ -156,6 +172,9 @@ enum bma180_chan {
TEMP
};
+static int bma023_bw_table[] = { 25, 50, 100, 190, 375, 750, 1500 }; /* Hz */
+static int bma023_scale_table[] = { 2452, 4903, 9709, };
+
static int bma180_bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
static int bma180_scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
@@ -319,7 +338,8 @@ static int bma180_set_pmode(struct bma180_data *data, bool mode)
static int bma180_soft_reset(struct bma180_data *data)
{
int ret = i2c_smbus_write_byte_data(data->client,
- data->part_info->softreset_reg, BMA180_RESET_VAL);
+ data->part_info->softreset_reg,
+ data->part_info->softreset_val);
if (ret)
dev_err(&data->client->dev, "failed to reset the chip\n");
@@ -349,11 +369,28 @@ static int bma180_chip_init(struct bma180_data *data)
*/
msleep(20);
- ret = bma180_set_new_data_intr_state(data, false);
+ return bma180_set_new_data_intr_state(data, false);
+}
+
+static int bma023_chip_config(struct bma180_data *data)
+{
+ int ret = bma180_chip_init(data);
+
if (ret)
- return ret;
+ goto err;
+
+ ret = bma180_set_bw(data, 50); /* 50 Hz */
+ if (ret)
+ goto err;
+ ret = bma180_set_scale(data, 2452); /* 2 G */
+ if (ret)
+ goto err;
- return bma180_set_pmode(data, false);
+ return 0;
+
+err:
+ dev_err(&data->client->dev, "failed to config the chip\n");
+ return ret;
}
static int bma180_chip_config(struct bma180_data *data)
@@ -362,6 +399,9 @@ static int bma180_chip_config(struct bma180_data *data)
if (ret)
goto err;
+ ret = bma180_set_pmode(data, false);
+ if (ret)
+ goto err;
ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_DIS_WAKE_UP, 1);
if (ret)
goto err;
@@ -391,6 +431,9 @@ static int bma25x_chip_config(struct bma180_data *data)
if (ret)
goto err;
+ ret = bma180_set_pmode(data, false);
+ if (ret)
+ goto err;
ret = bma180_set_bw(data, 16); /* 16 Hz */
if (ret)
goto err;
@@ -413,6 +456,17 @@ err:
return ret;
}
+static void bma023_chip_disable(struct bma180_data *data)
+{
+ if (bma180_set_sleep_state(data, true))
+ goto err;
+
+ return;
+
+err:
+ dev_err(&data->client->dev, "failed to disable the chip\n");
+}
+
static void bma180_chip_disable(struct bma180_data *data)
{
if (bma180_set_new_data_intr_state(data, false))
@@ -512,8 +566,12 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
- *val = sign_extend32(ret >> chan->scan_type.shift,
- chan->scan_type.realbits - 1);
+ if (chan->scan_type.sign == 's') {
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ } else {
+ *val = ret;
+ }
return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
*val = data->bw;
@@ -531,7 +589,7 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
- *val = data->part_info->center_temp;
+ *val = data->part_info->temp_offset;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -609,6 +667,11 @@ static const struct iio_enum bma180_power_mode_enum = {
.set = bma180_set_power_mode,
};
+static const struct iio_chan_spec_ext_info bma023_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma180_accel_get_mount_matrix),
+ { }
+};
+
static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
IIO_ENUM("power_mode", true, &bma180_power_mode_enum),
IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
@@ -616,6 +679,35 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
{ }
};
+#define BMA023_ACC_CHANNEL(_axis, _bits) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = AXIS_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ .shift = 16 - _bits, \
+ }, \
+ .ext_info = bma023_ext_info, \
+}
+
+#define BMA150_TEMP_CHANNEL { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = TEMP, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 16, \
+ }, \
+}
+
#define BMA180_ACC_CHANNEL(_axis, _bits) { \
.type = IIO_ACCEL, \
.modified = 1, \
@@ -645,6 +737,21 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
}, \
}
+static const struct iio_chan_spec bma023_channels[] = {
+ BMA023_ACC_CHANNEL(X, 10),
+ BMA023_ACC_CHANNEL(Y, 10),
+ BMA023_ACC_CHANNEL(Z, 10),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct iio_chan_spec bma150_channels[] = {
+ BMA023_ACC_CHANNEL(X, 10),
+ BMA023_ACC_CHANNEL(Y, 10),
+ BMA023_ACC_CHANNEL(Z, 10),
+ BMA150_TEMP_CHANNEL,
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
static const struct iio_chan_spec bma180_channels[] = {
BMA180_ACC_CHANNEL(X, 14),
BMA180_ACC_CHANNEL(Y, 14),
@@ -670,6 +777,63 @@ static const struct iio_chan_spec bma254_channels[] = {
};
static const struct bma180_part_info bma180_part_info[] = {
+ [BMA023] = {
+ .chip_id = BMA023_ID_REG_VAL,
+ .channels = bma023_channels,
+ .num_channels = ARRAY_SIZE(bma023_channels),
+ .scale_table = bma023_scale_table,
+ .num_scales = ARRAY_SIZE(bma023_scale_table),
+ .bw_table = bma023_bw_table,
+ .num_bw = ARRAY_SIZE(bma023_bw_table),
+ /* No temperature channel */
+ .temp_offset = 0,
+ .int_reset_reg = BMA023_CTRL_REG0,
+ .int_reset_mask = BMA023_INT_RESET_MASK,
+ .sleep_reg = BMA023_CTRL_REG0,
+ .sleep_mask = BMA023_SLEEP,
+ .bw_reg = BMA023_CTRL_REG2,
+ .bw_mask = BMA023_BW_MASK,
+ .scale_reg = BMA023_CTRL_REG2,
+ .scale_mask = BMA023_RANGE_MASK,
+ /* No power mode on bma023 */
+ .power_reg = 0,
+ .power_mask = 0,
+ .lowpower_val = 0,
+ .int_enable_reg = BMA023_CTRL_REG3,
+ .int_enable_mask = BMA023_NEW_DATA_INT,
+ .softreset_reg = BMA023_CTRL_REG0,
+ .softreset_val = BMA023_RESET_VAL,
+ .chip_config = bma023_chip_config,
+ .chip_disable = bma023_chip_disable,
+ },
+ [BMA150] = {
+ .chip_id = BMA023_ID_REG_VAL,
+ .channels = bma150_channels,
+ .num_channels = ARRAY_SIZE(bma150_channels),
+ .scale_table = bma023_scale_table,
+ .num_scales = ARRAY_SIZE(bma023_scale_table),
+ .bw_table = bma023_bw_table,
+ .num_bw = ARRAY_SIZE(bma023_bw_table),
+ .temp_offset = -60, /* 0 LSB @ -30 degree C */
+ .int_reset_reg = BMA023_CTRL_REG0,
+ .int_reset_mask = BMA023_INT_RESET_MASK,
+ .sleep_reg = BMA023_CTRL_REG0,
+ .sleep_mask = BMA023_SLEEP,
+ .bw_reg = BMA023_CTRL_REG2,
+ .bw_mask = BMA023_BW_MASK,
+ .scale_reg = BMA023_CTRL_REG2,
+ .scale_mask = BMA023_RANGE_MASK,
+ /* No power mode on bma150 */
+ .power_reg = 0,
+ .power_mask = 0,
+ .lowpower_val = 0,
+ .int_enable_reg = BMA023_CTRL_REG3,
+ .int_enable_mask = BMA023_NEW_DATA_INT,
+ .softreset_reg = BMA023_CTRL_REG0,
+ .softreset_val = BMA023_RESET_VAL,
+ .chip_config = bma023_chip_config,
+ .chip_disable = bma023_chip_disable,
+ },
[BMA180] = {
.chip_id = BMA180_ID_REG_VAL,
.channels = bma180_channels,
@@ -678,7 +842,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.num_scales = ARRAY_SIZE(bma180_scale_table),
.bw_table = bma180_bw_table,
.num_bw = ARRAY_SIZE(bma180_bw_table),
- .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .temp_offset = 48, /* 0 LSB @ 24 degree C */
.int_reset_reg = BMA180_CTRL_REG0,
.int_reset_mask = BMA180_RESET_INT,
.sleep_reg = BMA180_CTRL_REG0,
@@ -693,6 +857,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.int_enable_reg = BMA180_CTRL_REG3,
.int_enable_mask = BMA180_NEW_DATA_INT,
.softreset_reg = BMA180_RESET,
+ .softreset_val = BMA180_RESET_VAL,
.chip_config = bma180_chip_config,
.chip_disable = bma180_chip_disable,
},
@@ -704,7 +869,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.num_scales = ARRAY_SIZE(bma25x_scale_table),
.bw_table = bma25x_bw_table,
.num_bw = ARRAY_SIZE(bma25x_bw_table),
- .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .temp_offset = 48, /* 0 LSB @ 24 degree C */
.int_reset_reg = BMA250_INT_RESET_REG,
.int_reset_mask = BMA250_INT_RESET_MASK,
.sleep_reg = BMA250_POWER_REG,
@@ -721,6 +886,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.int_map_reg = BMA250_INT_MAP_REG,
.int_enable_dataready_int1_mask = BMA250_INT1_DATA_MASK,
.softreset_reg = BMA250_RESET_REG,
+ .softreset_val = BMA180_RESET_VAL,
.chip_config = bma25x_chip_config,
.chip_disable = bma25x_chip_disable,
},
@@ -732,7 +898,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.num_scales = ARRAY_SIZE(bma25x_scale_table),
.bw_table = bma25x_bw_table,
.num_bw = ARRAY_SIZE(bma25x_bw_table),
- .center_temp = 46, /* 0 LSB @ 23 degree C */
+ .temp_offset = 46, /* 0 LSB @ 23 degree C */
.int_reset_reg = BMA254_INT_RESET_REG,
.int_reset_mask = BMA254_INT_RESET_MASK,
.sleep_reg = BMA254_POWER_REG,
@@ -749,6 +915,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.int_map_reg = BMA254_INT_MAP_REG,
.int_enable_dataready_int1_mask = BMA254_INT1_DATA_MASK,
.softreset_reg = BMA254_RESET_REG,
+ .softreset_val = BMA180_RESET_VAL,
.chip_config = bma25x_chip_config,
.chip_disable = bma25x_chip_disable,
},
@@ -990,9 +1157,12 @@ static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
#endif
static const struct i2c_device_id bma180_ids[] = {
+ { "bma023", BMA023 },
+ { "bma150", BMA150 },
{ "bma180", BMA180 },
{ "bma250", BMA250 },
{ "bma254", BMA254 },
+ { "smb380", BMA150 },
{ }
};
@@ -1000,6 +1170,14 @@ MODULE_DEVICE_TABLE(i2c, bma180_ids);
static const struct of_device_id bma180_of_match[] = {
{
+ .compatible = "bosch,bma023",
+ .data = (void *)BMA023
+ },
+ {
+ .compatible = "bosch,bma150",
+ .data = (void *)BMA150
+ },
+ {
.compatible = "bosch,bma180",
.data = (void *)BMA180
},
@@ -1011,6 +1189,10 @@ static const struct of_device_id bma180_of_match[] = {
.compatible = "bosch,bma254",
.data = (void *)BMA254
},
+ {
+ .compatible = "bosch,smb380",
+ .data = (void *)BMA150
+ },
{ }
};
MODULE_DEVICE_TABLE(of, bma180_of_match);
@@ -1030,5 +1212,5 @@ module_i2c_driver(bma180_driver);
MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("Bosch BMA180/BMA25x triaxial acceleration sensor");
+MODULE_DESCRIPTION("Bosch BMA023/BMA1x0/BMA25x triaxial acceleration sensor");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
index 2bf210fa4ba6..ef89bded7390 100644
--- a/drivers/iio/accel/dmard06.c
+++ b/drivers/iio/accel/dmard06.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -226,7 +227,7 @@ static struct i2c_driver dmard06_driver = {
.id_table = dmard06_id,
.driver = {
.name = DMARD06_DRV_NAME,
- .of_match_table = of_match_ptr(dmard06_of_match),
+ .of_match_table = dmard06_of_match,
.pm = DMARD06_PM_OPS,
},
};
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 0d9e2def2b25..0ec0533448bc 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum accel_3d_channel {
@@ -391,18 +389,13 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&accel_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&accel_state->common_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -426,9 +419,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&accel_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &accel_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -443,8 +434,7 @@ static int hid_accel_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&accel_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &accel_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index 38411e1c155b..b580d605f848 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -2,6 +2,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
@@ -21,8 +22,8 @@ static int kxsd9_i2c_probe(struct i2c_client *i2c,
regmap = devm_regmap_init_i2c(i2c, &config);
if (IS_ERR(regmap)) {
- dev_err(&i2c->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&i2c->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
@@ -36,15 +37,11 @@ static int kxsd9_i2c_remove(struct i2c_client *client)
return kxsd9_common_remove(&client->dev);
}
-#ifdef CONFIG_OF
static const struct of_device_id kxsd9_of_match[] = {
{ .compatible = "kionix,kxsd9", },
{ },
};
MODULE_DEVICE_TABLE(of, kxsd9_of_match);
-#else
-#define kxsd9_of_match NULL
-#endif
static const struct i2c_device_id kxsd9_i2c_id[] = {
{"kxsd9", 0},
@@ -55,7 +52,7 @@ MODULE_DEVICE_TABLE(i2c, kxsd9_i2c_id);
static struct i2c_driver kxsd9_i2c_driver = {
.driver = {
.name = "kxsd9",
- .of_match_table = of_match_ptr(kxsd9_of_match),
+ .of_match_table = kxsd9_of_match,
.pm = &kxsd9_dev_pm_ops,
},
.probe = kxsd9_i2c_probe,
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 3d5bea651923..9d07642c0de1 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -135,7 +135,7 @@ static int mxc4005_read_xyz(struct mxc4005_data *data)
int ret;
ret = regmap_bulk_read(data->regmap, MXC4005_REG_XOUT_UPPER,
- (u8 *) data->buffer, sizeof(data->buffer));
+ data->buffer, sizeof(data->buffer));
if (ret < 0) {
dev_err(data->dev, "failed to read axes\n");
return ret;
@@ -150,7 +150,7 @@ static int mxc4005_read_axis(struct mxc4005_data *data,
__be16 reg;
int ret;
- ret = regmap_bulk_read(data->regmap, addr, (u8 *) &reg, sizeof(reg));
+ ret = regmap_bulk_read(data->regmap, addr, &reg, sizeof(reg));
if (ret < 0) {
dev_err(data->dev, "failed to read reg %02x\n", addr);
return ret;
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 66d768d971e1..6e429072e44a 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -980,7 +980,7 @@ static int sca3000_read_data(struct sca3000_state *st,
st->tx[0] = SCA3000_READ_REG(reg_address_high);
ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
if (ret) {
- dev_err(get_device(&st->us->dev), "problem reading register");
+ dev_err(&st->us->dev, "problem reading register\n");
return ret;
}
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 5b13e293cade..5d356288e001 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -35,6 +35,7 @@ enum st_accel_type {
LIS2DW12,
LIS3DHH,
LIS2DE12,
+ LIS2HH12,
ST_ACCEL_MAX,
};
@@ -59,6 +60,7 @@ enum st_accel_type {
#define LIS3DHH_ACCEL_DEV_NAME "lis3dhh"
#define LIS3DE_ACCEL_DEV_NAME "lis3de"
#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
+#define LIS2HH12_ACCEL_DEV_NAME "lis2hh12"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
index 9f2b40474b8e..b5c814ef1637 100644
--- a/drivers/iio/accel/st_accel_buffer.c
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -37,8 +37,7 @@ static int st_accel_buffer_postenable(struct iio_dev *indio_dev)
if (err < 0)
return err;
- err = st_sensors_set_axis_enable(indio_dev,
- (u8)indio_dev->active_scan_mask[0]);
+ err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]);
if (err < 0)
goto st_accel_buffer_predisable;
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 7320275c7e56..43c50167d220 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -904,6 +904,83 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = true,
.bootime = 2,
},
+ {
+ .wai = 0x41,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS2HH12_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0x70,
+ .odr_avl = {
+ { .hz = 10, .value = 0x01, },
+ { .hz = 50, .value = 0x02, },
+ { .hz = 100, .value = 0x03, },
+ { .hz = 200, .value = 0x04, },
+ { .hz = 400, .value = 0x05, },
+ { .hz = 800, .value = 0x06, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0x70,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(61),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(122),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(244),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x20,
+ .mask = 0x08,
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x01,
+ },
+ .int2 = {
+ .addr = 0x25,
+ .mask = 0x01,
+ },
+ .addr_ihl = 0x24,
+ .mask_ihl = 0x02,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -1170,8 +1247,7 @@ EXPORT_SYMBOL(st_accel_get_settings);
int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata =
- (struct st_sensors_platform_data *)adata->dev->platform_data;
+ struct st_sensors_platform_data *pdata = dev_get_platdata(adata->dev);
struct iio_chan_spec *channels;
size_t channels_size;
int err;
@@ -1204,8 +1280,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
"failed to apply ACPI orientation data: %d\n", err);
indio_dev->channels = channels;
- adata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &adata->sensor_settings->fs.fs_avl[0];
+ adata->current_fullscale = &adata->sensor_settings->fs.fs_avl[0];
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
if (!pdata)
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 6b283be26ebc..360e16f2cadb 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -104,6 +104,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis2de12",
.data = LIS2DE12_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis2hh12",
+ .data = LIS2HH12_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -138,6 +142,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS2DW12_ACCEL_DEV_NAME },
{ LIS3DE_ACCEL_DEV_NAME },
{ LIS2DE12_ACCEL_DEV_NAME },
+ { LIS2HH12_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 12bb8b7ca1ff..ff3569635ce0 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -246,6 +246,41 @@ config AD799X
To compile this driver as a module, choose M here: the module will be
called ad799x.
+config AD9467
+ tristate "Analog Devices AD9467 High Speed ADC driver"
+ depends on SPI
+ select ADI_AXI_ADC
+ help
+ Say yes here to build support for Analog Devices:
+ * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
+
+ The driver requires the assistance of the AXI ADC IP core to operate,
+ since SPI is used for configuration only, while data has to be
+ streamed into memory via DMA.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad9467.
+
+config ADI_AXI_ADC
+ tristate "Analog Devices Generic AXI ADC IP core driver"
+ select IIO_BUFFER
+ select IIO_BUFFER_HW_CONSUMER
+ select IIO_BUFFER_DMAENGINE
+ help
+ Say yes here to build support for Analog Devices Generic
+ AXI ADC IP core. The IP core is used for interfacing with
+ analog-to-digital (ADC) converters that require either a high-speed
+ serial interface (JESD204B/C) or a source synchronous parallel
+ interface (LVDS/CMOS).
+ Typically (for such devices) SPI will be used for configuration only,
+ while this IP core handles the streaming of data into memory via DMA.
+
+ Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called adi-axi-adc.
+
config ASPEED_ADC
tristate "Aspeed ADC"
depends on ARCH_ASPEED || COMPILE_TEST
@@ -595,6 +630,16 @@ config MAX1118
To compile this driver as a module, choose M here: the module will be
called max1118.
+config MAX1241
+ tristate "Maxim max1241 ADC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Maxim max1241 12-bit, single-channel
+ ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max1241.
+
config MAX1363
tristate "Maxim max1363 ADC driver"
depends on I2C
@@ -692,6 +737,16 @@ config MESON_SARADC
To compile this driver as a module, choose M here: the
module will be called meson_saradc.
+config MP2629_ADC
+ tristate "Monolithic MP2629 ADC driver"
+ depends on MFD_MP2629
+ help
+ Say yes to have support for battery charger IC MP2629 ADC device
+ accessed over I2C.
+
+ This driver provides ADC conversion of system, input power supply
+ and battery voltage & current information.
+
config NAU7802
tristate "Nuvoton NAU7802 ADC driver"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 637807861112..90f94ada7b30 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
obj-$(CONFIG_AD7949) += ad7949.o
obj-$(CONFIG_AD799X) += ad799x.o
+obj-$(CONFIG_AD9467) += ad9467.o
+obj-$(CONFIG_ADI_AXI_ADC) += adi-axi-adc.o
obj-$(CONFIG_ASPEED_ADC) += aspeed_adc.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
@@ -57,6 +59,7 @@ obj-$(CONFIG_LTC2497) += ltc2497.o ltc2497-core.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX11100) += max11100.o
obj-$(CONFIG_MAX1118) += max1118.o
+obj-$(CONFIG_MAX1241) += max1241.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MAX9611) += max9611.o
obj-$(CONFIG_MCP320X) += mcp320x.o
@@ -65,6 +68,7 @@ obj-$(CONFIG_MCP3911) += mcp3911.o
obj-$(CONFIG_MEDIATEK_MT6577_AUXADC) += mt6577_auxadc.o
obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_MESON_SARADC) += meson_saradc.o
+obj-$(CONFIG_MP2629_ADC) += mp2629_adc.o
obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 76747488044b..4e816d714ad2 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -12,9 +12,11 @@
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -27,6 +29,8 @@ struct ad7476_state;
struct ad7476_chip_info {
unsigned int int_vref_uv;
struct iio_chan_spec channel[2];
+ /* channels used when convst gpio is defined */
+ struct iio_chan_spec convst_channel[2];
void (*reset)(struct ad7476_state *);
};
@@ -34,6 +38,7 @@ struct ad7476_state {
struct spi_device *spi;
const struct ad7476_chip_info *chip_info;
struct regulator *reg;
+ struct gpio_desc *convst_gpio;
struct spi_transfer xfer;
struct spi_message msg;
/*
@@ -64,6 +69,17 @@ enum ad7476_supported_device_ids {
ID_ADS7868,
};
+static void ad7091_convst(struct ad7476_state *st)
+{
+ if (!st->convst_gpio)
+ return;
+
+ gpiod_set_value(st->convst_gpio, 0);
+ udelay(1); /* CONVST pulse width: 10 ns min */
+ gpiod_set_value(st->convst_gpio, 1);
+ udelay(1); /* Conversion time: 650 ns max */
+}
+
static irqreturn_t ad7476_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -71,6 +87,8 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
struct ad7476_state *st = iio_priv(indio_dev);
int b_sent;
+ ad7091_convst(st);
+
b_sent = spi_sync(st->spi, &st->msg);
if (b_sent < 0)
goto done;
@@ -93,6 +111,8 @@ static int ad7476_scan_direct(struct ad7476_state *st)
{
int ret;
+ ad7091_convst(st);
+
ret = spi_sync(st->spi, &st->msg);
if (ret)
return ret;
@@ -160,6 +180,8 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
#define AD7940_CHAN(bits) _AD7476_CHAN((bits), 15 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
#define AD7091R_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), 0)
+#define AD7091R_CONVST_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), \
+ BIT(IIO_CHAN_INFO_RAW))
#define ADS786X_CHAN(bits) _AD7476_CHAN((bits), 12 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
@@ -167,6 +189,8 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
[ID_AD7091R] = {
.channel[0] = AD7091R_CHAN(12),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .convst_channel[0] = AD7091R_CONVST_CHAN(12),
+ .convst_channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
.reset = ad7091_reset,
},
[ID_AD7276] = {
@@ -232,6 +256,13 @@ static const struct iio_info ad7476_info = {
.read_raw = &ad7476_read_raw,
};
+static void ad7476_reg_disable(void *data)
+{
+ struct ad7476_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
static int ad7476_probe(struct spi_device *spi)
{
struct ad7476_state *st;
@@ -254,6 +285,17 @@ static int ad7476_probe(struct spi_device *spi)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, ad7476_reg_disable,
+ st);
+ if (ret)
+ return ret;
+
+ st->convst_gpio = devm_gpiod_get_optional(&spi->dev,
+ "adi,conversion-start",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->convst_gpio))
+ return PTR_ERR(st->convst_gpio);
+
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
@@ -266,6 +308,9 @@ static int ad7476_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channel;
indio_dev->num_channels = 2;
indio_dev->info = &ad7476_info;
+
+ if (st->convst_gpio)
+ indio_dev->channels = st->chip_info->convst_channel;
/* Setup default message */
st->xfer.rx_buf = &st->data;
@@ -295,19 +340,8 @@ error_disable_reg:
return ret;
}
-static int ad7476_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7476_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- regulator_disable(st->reg);
-
- return 0;
-}
-
static const struct spi_device_id ad7476_id[] = {
+ {"ad7091", ID_AD7091R},
{"ad7091r", ID_AD7091R},
{"ad7273", ID_AD7277},
{"ad7274", ID_AD7276},
@@ -343,7 +377,6 @@ static struct spi_driver ad7476_driver = {
.name = "ad7476",
},
.probe = ad7476_probe,
- .remove = ad7476_remove,
.id_table = ad7476_id,
};
module_spi_driver(ad7476_driver);
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index 291c1a898129..f47606ebbbbe 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -206,10 +206,29 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
.irq_flags = IRQF_TRIGGER_LOW,
};
-#define AD7780_CHANNEL(bits, wordsize) \
- AD_SD_CHANNEL(1, 0, 0, bits, 32, (wordsize) - (bits))
-#define AD7170_CHANNEL(bits, wordsize) \
- AD_SD_CHANNEL_NO_SAMP_FREQ(1, 0, 0, bits, 32, (wordsize) - (bits))
+#define _AD7780_CHANNEL(_bits, _wordsize, _mask_all) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = _mask_all, \
+ .scan_index = 1, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 32, \
+ .shift = (_wordsize) - (_bits), \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define AD7780_CHANNEL(_bits, _wordsize) \
+ _AD7780_CHANNEL(_bits, _wordsize, BIT(IIO_CHAN_INFO_SAMP_FREQ))
+#define AD7170_CHANNEL(_bits, _wordsize) \
+ _AD7780_CHANNEL(_bits, _wordsize, 0)
static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
[ID_AD7170] = {
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index abb239392631..48432b6f6002 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -64,25 +64,73 @@
#define AD7791_MODE_SEL_MASK (0x3 << 6)
#define AD7791_MODE_SEL(x) ((x) << 6)
+#define __AD7991_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, _extend_name, _type, _mask_all) \
+ { \
+ .type = (_type), \
+ .differential = (_channel2 == -1 ? 0 : 1), \
+ .indexed = 1, \
+ .channel = (_channel1), \
+ .channel2 = (_channel2), \
+ .address = (_address), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = _mask_all, \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storagebits), \
+ .shift = (_shift), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD7991_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7991_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7991_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7991_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7991_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7991_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7991_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
+ _shift) \
+ __AD7991_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, "supply", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
#define DECLARE_AD7787_CHANNELS(name, bits, storagebits) \
const struct iio_chan_spec name[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
+ AD7991_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_CHANNEL(1, 1, AD7791_CH_AIN2, (bits), (storagebits), 0), \
- AD_SD_SHORTED_CHANNEL(2, 0, AD7791_CH_AIN1N_AIN1N, \
+ AD7991_CHANNEL(1, 1, AD7791_CH_AIN2, (bits), (storagebits), 0), \
+ AD7991_SHORTED_CHANNEL(2, 0, AD7791_CH_AIN1N_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_SUPPLY_CHANNEL(3, 2, AD7791_CH_AVDD_MONITOR, \
+ AD7991_SUPPLY_CHANNEL(3, 2, AD7791_CH_AVDD_MONITOR, \
(bits), (storagebits), 0), \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
#define DECLARE_AD7791_CHANNELS(name, bits, storagebits) \
const struct iio_chan_spec name[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
+ AD7991_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_SHORTED_CHANNEL(1, 0, AD7791_CH_AIN1N_AIN1N, \
+ AD7991_SHORTED_CHANNEL(1, 0, AD7791_CH_AIN1N_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_SUPPLY_CHANNEL(2, 1, AD7791_CH_AVDD_MONITOR, \
+ AD7991_SUPPLY_CHANNEL(2, 1, AD7791_CH_AVDD_MONITOR, \
(bits), (storagebits), 0), \
IIO_CHAN_SOFT_TIMESTAMP(3), \
}
@@ -444,5 +492,5 @@ static struct spi_driver ad7791_driver = {
module_spi_driver(ad7791_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("Analog Device AD7787/AD7788/AD7789/AD7790/AD7791 ADC driver");
+MODULE_DESCRIPTION("Analog Devices AD7787/AD7788/AD7789/AD7790/AD7791 ADC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index e5691e330323..808485f42415 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -354,29 +354,28 @@ static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
static IIO_CONST_ATTR_NAMED(sampling_frequency_available_ad7797,
sampling_frequency_available, "123 62 50 33 17 16 12 10 8 6 4");
-static ssize_t ad7793_show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int ad7793_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7793_state *st = iio_priv(indio_dev);
- int i, len = 0;
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0],
- st->scale_avail[i][1]);
-
- len += sprintf(buf + len, "\n");
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)st->scale_avail;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(st->scale_avail) * 2;
- return len;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
}
-static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
- in_voltage-voltage_scale_available, S_IRUGO,
- ad7793_show_scale_available, NULL, 0);
-
static struct attribute *ad7793_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
NULL
};
@@ -534,6 +533,7 @@ static const struct iio_info ad7793_info = {
.read_raw = &ad7793_read_raw,
.write_raw = &ad7793_write_raw,
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
+ .read_avail = ad7793_read_avail,
.attrs = &ad7793_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
};
@@ -546,47 +546,113 @@ static const struct iio_info ad7797_info = {
.validate_trigger = ad_sd_validate_trigger,
};
+#define __AD7793_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, _extend_name, _type, _mask_type_av, _mask_all) \
+ { \
+ .type = (_type), \
+ .differential = (_channel2 == -1 ? 0 : 1), \
+ .indexed = 1, \
+ .channel = (_channel1), \
+ .channel2 = (_channel2), \
+ .address = (_address), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = (_mask_type_av), \
+ .info_mask_shared_by_all = _mask_all, \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storagebits), \
+ .shift = (_shift), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD7793_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7793_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7793_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, 0, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_TEMP, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7793_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
+ _shift) \
+ __AD7793_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, "supply", IIO_VOLTAGE, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7797_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7797_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
#define DECLARE_AD7793_CHANNELS(_name, _b, _sb, _s) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), (_s)), \
- AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), (_s)), \
- AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), (_s)), \
- AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), (_s)), \
- AD_SD_TEMP_CHANNEL(4, AD7793_CH_TEMP, (_b), (_sb), (_s)), \
- AD_SD_SUPPLY_CHANNEL(5, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), (_s)), \
+ AD7793_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), (_s)), \
+ AD7793_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), (_s)), \
+ AD7793_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), (_s)), \
+ AD7793_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), (_s)), \
+ AD7793_TEMP_CHANNEL(4, AD7793_CH_TEMP, (_b), (_sb), (_s)), \
+ AD7793_SUPPLY_CHANNEL(5, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), (_s)), \
IIO_CHAN_SOFT_TIMESTAMP(6), \
}
#define DECLARE_AD7795_CHANNELS(_name, _b, _sb) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(3, 3, 3, AD7795_CH_AIN4P_AIN4M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(4, 4, 4, AD7795_CH_AIN5P_AIN5M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(5, 5, 5, AD7795_CH_AIN6P_AIN6M, (_b), (_sb), 0), \
- AD_SD_SHORTED_CHANNEL(6, 0, AD7795_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
- AD_SD_TEMP_CHANNEL(7, AD7793_CH_TEMP, (_b), (_sb), 0), \
- AD_SD_SUPPLY_CHANNEL(8, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(3, 3, 3, AD7795_CH_AIN4P_AIN4M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(4, 4, 4, AD7795_CH_AIN5P_AIN5M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(5, 5, 5, AD7795_CH_AIN6P_AIN6M, (_b), (_sb), 0), \
+ AD7793_SHORTED_CHANNEL(6, 0, AD7795_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD7793_TEMP_CHANNEL(7, AD7793_CH_TEMP, (_b), (_sb), 0), \
+ AD7793_SUPPLY_CHANNEL(8, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
IIO_CHAN_SOFT_TIMESTAMP(9), \
}
#define DECLARE_AD7797_CHANNELS(_name, _b, _sb) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
- AD_SD_SHORTED_CHANNEL(1, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
- AD_SD_TEMP_CHANNEL(2, AD7793_CH_TEMP, (_b), (_sb), 0), \
- AD_SD_SUPPLY_CHANNEL(3, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ AD7797_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD7797_SHORTED_CHANNEL(1, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD7793_TEMP_CHANNEL(2, AD7793_CH_TEMP, (_b), (_sb), 0), \
+ AD7793_SUPPLY_CHANNEL(3, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
#define DECLARE_AD7799_CHANNELS(_name, _b, _sb) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
- AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
- AD_SD_SUPPLY_CHANNEL(4, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
+ AD7793_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD7793_SUPPLY_CHANNEL(4, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
IIO_CHAN_SOFT_TIMESTAMP(5), \
}
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
new file mode 100644
index 000000000000..1e8fd83b9bc2
--- /dev/null
+++ b/drivers/iio/adc/ad9467.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD9467 SPI ADC driver
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/clk.h>
+
+#include <linux/iio/adc/adi-axi-adc.h>
+
+/*
+ * ADI High-Speed ADC common spi interface registers
+ * See Application-Note AN-877:
+ * https://www.analog.com/media/en/technical-documentation/application-notes/AN-877.pdf
+ */
+
+#define AN877_ADC_REG_CHIP_PORT_CONF 0x00
+#define AN877_ADC_REG_CHIP_ID 0x01
+#define AN877_ADC_REG_CHIP_GRADE 0x02
+#define AN877_ADC_REG_CHAN_INDEX 0x05
+#define AN877_ADC_REG_TRANSFER 0xFF
+#define AN877_ADC_REG_MODES 0x08
+#define AN877_ADC_REG_TEST_IO 0x0D
+#define AN877_ADC_REG_ADC_INPUT 0x0F
+#define AN877_ADC_REG_OFFSET 0x10
+#define AN877_ADC_REG_OUTPUT_MODE 0x14
+#define AN877_ADC_REG_OUTPUT_ADJUST 0x15
+#define AN877_ADC_REG_OUTPUT_PHASE 0x16
+#define AN877_ADC_REG_OUTPUT_DELAY 0x17
+#define AN877_ADC_REG_VREF 0x18
+#define AN877_ADC_REG_ANALOG_INPUT 0x2C
+
+/* AN877_ADC_REG_TEST_IO */
+#define AN877_ADC_TESTMODE_OFF 0x0
+#define AN877_ADC_TESTMODE_MIDSCALE_SHORT 0x1
+#define AN877_ADC_TESTMODE_POS_FULLSCALE 0x2
+#define AN877_ADC_TESTMODE_NEG_FULLSCALE 0x3
+#define AN877_ADC_TESTMODE_ALT_CHECKERBOARD 0x4
+#define AN877_ADC_TESTMODE_PN23_SEQ 0x5
+#define AN877_ADC_TESTMODE_PN9_SEQ 0x6
+#define AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE 0x7
+#define AN877_ADC_TESTMODE_USER 0x8
+#define AN877_ADC_TESTMODE_BIT_TOGGLE 0x9
+#define AN877_ADC_TESTMODE_SYNC 0xA
+#define AN877_ADC_TESTMODE_ONE_BIT_HIGH 0xB
+#define AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY 0xC
+#define AN877_ADC_TESTMODE_RAMP 0xF
+
+/* AN877_ADC_REG_TRANSFER */
+#define AN877_ADC_TRANSFER_SYNC 0x1
+
+/* AN877_ADC_REG_OUTPUT_MODE */
+#define AN877_ADC_OUTPUT_MODE_OFFSET_BINARY 0x0
+#define AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT 0x1
+#define AN877_ADC_OUTPUT_MODE_GRAY_CODE 0x2
+
+/* AN877_ADC_REG_OUTPUT_PHASE */
+#define AN877_ADC_OUTPUT_EVEN_ODD_MODE_EN 0x20
+#define AN877_ADC_INVERT_DCO_CLK 0x80
+
+/* AN877_ADC_REG_OUTPUT_DELAY */
+#define AN877_ADC_DCO_DELAY_ENABLE 0x80
+
+/*
+ * Analog Devices AD9467 16-Bit, 200/250 MSPS ADC
+ */
+
+#define CHIPID_AD9467 0x50
+#define AD9467_DEF_OUTPUT_MODE 0x08
+#define AD9467_REG_VREF_MASK 0x0F
+
+enum {
+ ID_AD9467,
+};
+
+struct ad9467_state {
+ struct spi_device *spi;
+ struct clk *clk;
+ unsigned int output_mode;
+
+ struct gpio_desc *pwrdown_gpio;
+ struct gpio_desc *reset_gpio;
+};
+
+static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
+{
+ unsigned char tbuf[2], rbuf[1];
+ int ret;
+
+ tbuf[0] = 0x80 | (reg >> 8);
+ tbuf[1] = reg & 0xFF;
+
+ ret = spi_write_then_read(spi,
+ tbuf, ARRAY_SIZE(tbuf),
+ rbuf, ARRAY_SIZE(rbuf));
+
+ if (ret < 0)
+ return ret;
+
+ return rbuf[0];
+}
+
+static int ad9467_spi_write(struct spi_device *spi, unsigned int reg,
+ unsigned int val)
+{
+ unsigned char buf[3];
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xFF;
+ buf[2] = val;
+
+ return spi_write(spi, buf, ARRAY_SIZE(buf));
+}
+
+static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct spi_device *spi = st->spi;
+ int ret;
+
+ if (readval == NULL) {
+ ret = ad9467_spi_write(spi, reg, writeval);
+ ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+ return ret;
+ }
+
+ ret = ad9467_spi_read(spi, reg);
+ if (ret < 0)
+ return ret;
+ *readval = ret;
+
+ return 0;
+}
+
+static const unsigned int ad9467_scale_table[][2] = {
+ {2000, 0}, {2100, 6}, {2200, 7},
+ {2300, 8}, {2400, 9}, {2500, 10},
+};
+
+static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+ unsigned int *val, unsigned int *val2)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ const struct iio_chan_spec *chan = &info->channels[0];
+ unsigned int tmp;
+
+ tmp = (info->scale_table[index][0] * 1000000ULL) >>
+ chan->scan_type.realbits;
+ *val = tmp / 1000000;
+ *val2 = tmp % 1000000;
+}
+
+#define AD9467_CHAN(_chan, _si, _bits, _sign) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _chan, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = _sign, \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ }, \
+}
+
+static const struct iio_chan_spec ad9467_channels[] = {
+ AD9467_CHAN(0, 0, 16, 'S'),
+};
+
+static const struct adi_axi_adc_chip_info ad9467_chip_tbl[] = {
+ [ID_AD9467] = {
+ .id = CHIPID_AD9467,
+ .max_rate = 250000000UL,
+ .scale_table = ad9467_scale_table,
+ .num_scales = ARRAY_SIZE(ad9467_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
+ },
+};
+
+static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ unsigned int i, vref_val, vref_mask;
+
+ vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
+
+ switch (info->id) {
+ case CHIPID_AD9467:
+ vref_mask = AD9467_REG_VREF_MASK;
+ break;
+ default:
+ vref_mask = 0xFFFF;
+ break;
+ }
+
+ vref_val &= vref_mask;
+
+ for (i = 0; i < info->num_scales; i++) {
+ if (vref_val == info->scale_table[i][1])
+ break;
+ }
+
+ if (i == info->num_scales)
+ return -ERANGE;
+
+ __ad9467_get_scale(conv, i, val, val2);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ unsigned int scale_val[2];
+ unsigned int i;
+
+ if (val != 0)
+ return -EINVAL;
+
+ for (i = 0; i < info->num_scales; i++) {
+ __ad9467_get_scale(conv, i, &scale_val[0], &scale_val[1]);
+ if (scale_val[0] != val || scale_val[1] != val2)
+ continue;
+
+ ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
+ info->scale_table[i][1]);
+ ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ad9467_read_raw(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ return ad9467_get_scale(conv, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(st->clk);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ long r_clk;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return ad9467_set_scale(conv, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ r_clk = clk_round_rate(st->clk, val);
+ if (r_clk < 0 || r_clk > info->max_rate) {
+ dev_warn(&st->spi->dev,
+ "Error setting ADC sample rate %ld", r_clk);
+ return -EINVAL;
+ }
+
+ return clk_set_rate(st->clk, r_clk);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+{
+ int ret;
+
+ ret = ad9467_spi_write(spi, AN877_ADC_REG_OUTPUT_MODE, mode);
+ if (ret < 0)
+ return ret;
+
+ return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+}
+
+static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
+{
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+
+ return ad9467_outputmode_set(st->spi, st->output_mode);
+}
+
+static int ad9467_setup(struct ad9467_state *st, unsigned int chip_id)
+{
+ switch (chip_id) {
+ case CHIPID_AD9467:
+ st->output_mode = AD9467_DEF_OUTPUT_MODE |
+ AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ad9467_clk_disable(void *data)
+{
+ struct ad9467_state *st = data;
+
+ clk_disable_unprepare(st->clk);
+}
+
+static int ad9467_probe(struct spi_device *spi)
+{
+ const struct adi_axi_adc_chip_info *info;
+ struct adi_axi_adc_conv *conv;
+ struct ad9467_state *st;
+ unsigned int id;
+ int ret;
+
+ info = of_device_get_match_data(&spi->dev);
+ if (!info)
+ return -ENODEV;
+
+ conv = devm_adi_axi_adc_conv_register(&spi->dev, sizeof(*st));
+ if (IS_ERR(conv))
+ return PTR_ERR(conv);
+
+ st = adi_axi_adc_conv_priv(conv);
+ st->spi = spi;
+
+ st->clk = devm_clk_get(&spi->dev, "adc-clk");
+ if (IS_ERR(st->clk))
+ return PTR_ERR(st->clk);
+
+ ret = clk_prepare_enable(st->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad9467_clk_disable, st);
+ if (ret)
+ return ret;
+
+ st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->pwrdown_gpio))
+ return PTR_ERR(st->pwrdown_gpio);
+
+ st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->reset_gpio))
+ return PTR_ERR(st->reset_gpio);
+
+ if (st->reset_gpio) {
+ udelay(1);
+ ret = gpiod_direction_output(st->reset_gpio, 1);
+ if (ret)
+ return ret;
+ mdelay(10);
+ }
+
+ spi_set_drvdata(spi, st);
+
+ conv->chip_info = info;
+
+ id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
+ if (id != conv->chip_info->id) {
+ dev_err(&spi->dev, "Unrecognized CHIP_ID 0x%X\n", id);
+ return -ENODEV;
+ }
+
+ conv->reg_access = ad9467_reg_access;
+ conv->write_raw = ad9467_write_raw;
+ conv->read_raw = ad9467_read_raw;
+ conv->preenable_setup = ad9467_preenable_setup;
+
+ return ad9467_setup(st, id);
+}
+
+static const struct of_device_id ad9467_of_match[] = {
+ { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl[ID_AD9467], },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ad9467_of_match);
+
+static struct spi_driver ad9467_driver = {
+ .driver = {
+ .name = "ad9467",
+ .of_match_table = ad9467_of_match,
+ },
+ .probe = ad9467_probe,
+};
+module_spi_driver(ad9467_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD9467 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 8115b6de1d6c..dd3d54b3bc8b 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -70,9 +70,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
switch (size) {
case 3:
- data[1] = val >> 16;
- data[2] = val >> 8;
- data[3] = val;
+ put_unaligned_be24(val, &data[1]);
break;
case 2:
put_unaligned_be16(val, &data[1]);
@@ -157,9 +155,7 @@ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta,
*val = get_unaligned_be32(sigma_delta->data);
break;
case 3:
- *val = (sigma_delta->data[0] << 16) |
- (sigma_delta->data[1] << 8) |
- sigma_delta->data[2];
+ *val = get_unaligned_be24(&sigma_delta->data[0]);
break;
case 2:
*val = get_unaligned_be16(sigma_delta->data);
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
new file mode 100644
index 000000000000..c24c8da99eb4
--- /dev/null
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices Generic AXI ADC IP core
+ * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/buffer-dmaengine.h>
+
+#include <linux/fpga/adi-axi-common.h>
+#include <linux/iio/adc/adi-axi-adc.h>
+
+/**
+ * Register definitions:
+ * https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map
+ */
+
+/* ADC controls */
+
+#define ADI_AXI_REG_RSTN 0x0040
+#define ADI_AXI_REG_RSTN_CE_N BIT(2)
+#define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1)
+#define ADI_AXI_REG_RSTN_RSTN BIT(0)
+
+/* ADC Channel controls */
+
+#define ADI_AXI_REG_CHAN_CTRL(c) (0x0400 + (c) * 0x40)
+#define ADI_AXI_REG_CHAN_CTRL_LB_OWR BIT(11)
+#define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10)
+#define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9)
+#define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4)
+#define ADI_AXI_REG_CHAN_CTRL_PN_TYPE_OWR BIT(1)
+#define ADI_AXI_REG_CHAN_CTRL_ENABLE BIT(0)
+
+#define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \
+ (ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT | \
+ ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
+ ADI_AXI_REG_CHAN_CTRL_ENABLE)
+
+struct adi_axi_adc_core_info {
+ unsigned int version;
+};
+
+struct adi_axi_adc_state {
+ struct mutex lock;
+
+ struct adi_axi_adc_client *client;
+ void __iomem *regs;
+};
+
+struct adi_axi_adc_client {
+ struct list_head entry;
+ struct adi_axi_adc_conv conv;
+ struct adi_axi_adc_state *state;
+ struct device *dev;
+ const struct adi_axi_adc_core_info *info;
+};
+
+static LIST_HEAD(registered_clients);
+static DEFINE_MUTEX(registered_clients_lock);
+
+static struct adi_axi_adc_client *conv_to_client(struct adi_axi_adc_conv *conv)
+{
+ return container_of(conv, struct adi_axi_adc_client, conv);
+}
+
+void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv)
+{
+ struct adi_axi_adc_client *cl = conv_to_client(conv);
+
+ return (char *)cl + ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN);
+}
+EXPORT_SYMBOL_GPL(adi_axi_adc_conv_priv);
+
+static void adi_axi_adc_write(struct adi_axi_adc_state *st,
+ unsigned int reg,
+ unsigned int val)
+{
+ iowrite32(val, st->regs + reg);
+}
+
+static unsigned int adi_axi_adc_read(struct adi_axi_adc_state *st,
+ unsigned int reg)
+{
+ return ioread32(st->regs + reg);
+}
+
+static int adi_axi_adc_config_dma_buffer(struct device *dev,
+ struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+ const char *dma_name;
+
+ if (!device_property_present(dev, "dmas"))
+ return 0;
+
+ if (device_property_read_string(dev, "dma-names", &dma_name))
+ dma_name = "rx";
+
+ buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent,
+ dma_name);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+ iio_device_attach_buffer(indio_dev, buffer);
+
+ return 0;
+}
+
+static int adi_axi_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ if (!conv->read_raw)
+ return -EOPNOTSUPP;
+
+ return conv->read_raw(conv, chan, val, val2, mask);
+}
+
+static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ if (!conv->write_raw)
+ return -EOPNOTSUPP;
+
+ return conv->write_raw(conv, chan, val, val2, mask);
+}
+
+static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+ unsigned int i, ctrl;
+
+ for (i = 0; i < conv->chip_info->num_channels; i++) {
+ ctrl = adi_axi_adc_read(st, ADI_AXI_REG_CHAN_CTRL(i));
+
+ if (test_bit(i, scan_mask))
+ ctrl |= ADI_AXI_REG_CHAN_CTRL_ENABLE;
+ else
+ ctrl &= ~ADI_AXI_REG_CHAN_CTRL_ENABLE;
+
+ adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), ctrl);
+ }
+
+ return 0;
+}
+
+static struct adi_axi_adc_conv *adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct adi_axi_adc_client *cl;
+ size_t alloc_size;
+
+ alloc_size = ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN);
+ if (sizeof_priv)
+ alloc_size += ALIGN(sizeof_priv, IIO_ALIGN);
+
+ cl = kzalloc(alloc_size, GFP_KERNEL);
+ if (!cl)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&registered_clients_lock);
+
+ cl->dev = get_device(dev);
+
+ list_add_tail(&cl->entry, &registered_clients);
+
+ mutex_unlock(&registered_clients_lock);
+
+ return &cl->conv;
+}
+
+static void adi_axi_adc_conv_unregister(struct adi_axi_adc_conv *conv)
+{
+ struct adi_axi_adc_client *cl = conv_to_client(conv);
+
+ mutex_lock(&registered_clients_lock);
+
+ list_del(&cl->entry);
+ put_device(cl->dev);
+
+ mutex_unlock(&registered_clients_lock);
+
+ kfree(cl);
+}
+
+static void devm_adi_axi_adc_conv_release(struct device *dev, void *res)
+{
+ adi_axi_adc_conv_unregister(*(struct adi_axi_adc_conv **)res);
+}
+
+struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct adi_axi_adc_conv **ptr, *conv;
+
+ ptr = devres_alloc(devm_adi_axi_adc_conv_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ conv = adi_axi_adc_conv_register(dev, sizeof_priv);
+ if (IS_ERR(conv)) {
+ devres_free(ptr);
+ return ERR_CAST(conv);
+ }
+
+ *ptr = conv;
+ devres_add(dev, ptr);
+
+ return conv;
+}
+EXPORT_SYMBOL_GPL(devm_adi_axi_adc_conv_register);
+
+static ssize_t in_voltage_scale_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < conv->chip_info->num_scales; i++) {
+ const unsigned int *s = conv->chip_info->scale_table[i];
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%u.%06u ", s[0], s[1]);
+ }
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
+
+enum {
+ ADI_AXI_ATTR_SCALE_AVAIL,
+};
+
+#define ADI_AXI_ATTR(_en_, _file_) \
+ [ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr
+
+static struct attribute *adi_axi_adc_attributes[] = {
+ ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available),
+ NULL
+};
+
+static umode_t axi_adc_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ switch (n) {
+ case ADI_AXI_ATTR_SCALE_AVAIL:
+ if (!conv->chip_info->num_scales)
+ return 0;
+ return attr->mode;
+ default:
+ return attr->mode;
+ }
+}
+
+static const struct attribute_group adi_axi_adc_attribute_group = {
+ .attrs = adi_axi_adc_attributes,
+ .is_visible = axi_adc_attr_is_visible,
+};
+
+static const struct iio_info adi_axi_adc_info = {
+ .read_raw = &adi_axi_adc_read_raw,
+ .write_raw = &adi_axi_adc_write_raw,
+ .attrs = &adi_axi_adc_attribute_group,
+ .update_scan_mode = &adi_axi_adc_update_scan_mode,
+};
+
+static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
+ .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+};
+
+static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
+{
+ const struct adi_axi_adc_core_info *info;
+ struct adi_axi_adc_client *cl;
+ struct device_node *cln;
+
+ info = of_device_get_match_data(dev);
+ if (!info)
+ return ERR_PTR(-ENODEV);
+
+ cln = of_parse_phandle(dev->of_node, "adi,adc-dev", 0);
+ if (!cln) {
+ dev_err(dev, "No 'adi,adc-dev' node defined\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&registered_clients_lock);
+
+ list_for_each_entry(cl, &registered_clients, entry) {
+ if (!cl->dev)
+ continue;
+
+ if (cl->dev->of_node != cln)
+ continue;
+
+ if (!try_module_get(dev->driver->owner)) {
+ mutex_unlock(&registered_clients_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ get_device(dev);
+ cl->info = info;
+ mutex_unlock(&registered_clients_lock);
+ return cl;
+ }
+
+ mutex_unlock(&registered_clients_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int adi_axi_adc_setup_channels(struct device *dev,
+ struct adi_axi_adc_state *st)
+{
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+ int i, ret;
+
+ if (conv->preenable_setup) {
+ ret = conv->preenable_setup(conv);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < conv->chip_info->num_channels; i++) {
+ adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i),
+ ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
+ }
+
+ return 0;
+}
+
+static void axi_adc_reset(struct adi_axi_adc_state *st)
+{
+ adi_axi_adc_write(st, ADI_AXI_REG_RSTN, 0);
+ mdelay(10);
+ adi_axi_adc_write(st, ADI_AXI_REG_RSTN, ADI_AXI_REG_RSTN_MMCM_RSTN);
+ mdelay(10);
+ adi_axi_adc_write(st, ADI_AXI_REG_RSTN,
+ ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
+}
+
+static void adi_axi_adc_cleanup(void *data)
+{
+ struct adi_axi_adc_client *cl = data;
+
+ put_device(cl->dev);
+ module_put(cl->dev->driver->owner);
+}
+
+static int adi_axi_adc_probe(struct platform_device *pdev)
+{
+ struct adi_axi_adc_conv *conv;
+ struct iio_dev *indio_dev;
+ struct adi_axi_adc_client *cl;
+ struct adi_axi_adc_state *st;
+ unsigned int ver;
+ int ret;
+
+ cl = adi_axi_adc_attach_client(&pdev->dev);
+ if (IS_ERR(cl))
+ return PTR_ERR(cl);
+
+ ret = devm_add_action_or_reset(&pdev->dev, adi_axi_adc_cleanup, cl);
+ if (ret)
+ return ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->client = cl;
+ cl->state = st;
+ mutex_init(&st->lock);
+
+ st->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(st->regs))
+ return PTR_ERR(st->regs);
+
+ conv = &st->client->conv;
+
+ axi_adc_reset(st);
+
+ ver = adi_axi_adc_read(st, ADI_AXI_REG_VERSION);
+
+ if (cl->info->version > ver) {
+ dev_err(&pdev->dev,
+ "IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
+ ADI_AXI_PCORE_VER_MAJOR(cl->info->version),
+ ADI_AXI_PCORE_VER_MINOR(cl->info->version),
+ ADI_AXI_PCORE_VER_PATCH(cl->info->version),
+ ADI_AXI_PCORE_VER_MAJOR(ver),
+ ADI_AXI_PCORE_VER_MINOR(ver),
+ ADI_AXI_PCORE_VER_PATCH(ver));
+ return -ENODEV;
+ }
+
+ indio_dev->info = &adi_axi_adc_info;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = "adi-axi-adc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = conv->chip_info->num_channels;
+ indio_dev->channels = conv->chip_info->channels;
+
+ ret = adi_axi_adc_config_dma_buffer(&pdev->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = adi_axi_adc_setup_channels(&pdev->dev, st);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n",
+ ADI_AXI_PCORE_VER_MAJOR(ver),
+ ADI_AXI_PCORE_VER_MINOR(ver),
+ ADI_AXI_PCORE_VER_PATCH(ver));
+
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id adi_axi_adc_of_match[] = {
+ { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
+ { /* end of list */ }
+};
+MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match);
+
+static struct platform_driver adi_axi_adc_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = adi_axi_adc_of_match,
+ },
+ .probe = adi_axi_adc_probe,
+};
+module_platform_driver(adi_axi_adc_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 9d96f7d08b95..9abbbdcc7420 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -100,6 +101,8 @@
#define AT91_SAMA5D2_IER_YRDY BIT(21)
/* Interrupt Enable Register - TS pressure measurement ready */
#define AT91_SAMA5D2_IER_PRDY BIT(22)
+/* Interrupt Enable Register - Data ready */
+#define AT91_SAMA5D2_IER_DRDY BIT(24)
/* Interrupt Enable Register - general overrun error */
#define AT91_SAMA5D2_IER_GOVRE BIT(25)
/* Interrupt Enable Register - Pen detect */
@@ -486,6 +489,21 @@ static inline int at91_adc_of_xlate(struct iio_dev *indio_dev,
return at91_adc_chan_xlate(indio_dev, iiospec->args[0]);
}
+static unsigned int at91_adc_active_scan_mask_to_reg(struct iio_dev *indio_dev)
+{
+ u32 mask = 0;
+ u8 bit;
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->num_channels) {
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+ mask |= BIT(chan->channel);
+ }
+
+ return mask & GENMASK(11, 0);
+}
+
static void at91_adc_config_emr(struct at91_adc_state *st)
{
/* configure the extended mode register */
@@ -710,7 +728,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
struct iio_dev *indio = iio_trigger_get_drvdata(trig);
struct at91_adc_state *st = iio_priv(indio);
u32 status = at91_adc_readl(st, AT91_SAMA5D2_TRGR);
- u8 bit;
/* clear TRGMOD */
status &= ~AT91_SAMA5D2_TRGR_TRGMOD_MASK;
@@ -721,50 +738,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
/* set/unset hw trigger */
at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
- for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
- struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
- u32 cor;
-
- if (!chan)
- continue;
- /* these channel types cannot be handled by this trigger */
- if (chan->type == IIO_POSITIONRELATIVE ||
- chan->type == IIO_PRESSURE)
- continue;
-
- if (state) {
- cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
-
- if (chan->differential)
- cor |= (BIT(chan->channel) |
- BIT(chan->channel2)) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET;
- else
- cor &= ~(BIT(chan->channel) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET);
-
- at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
- }
-
- if (state) {
- at91_adc_writel(st, AT91_SAMA5D2_CHER,
- BIT(chan->channel));
- /* enable irq only if not using DMA */
- if (!st->dma_st.dma_chan) {
- at91_adc_writel(st, AT91_SAMA5D2_IER,
- BIT(chan->channel));
- }
- } else {
- /* disable irq only if not using DMA */
- if (!st->dma_st.dma_chan) {
- at91_adc_writel(st, AT91_SAMA5D2_IDR,
- BIT(chan->channel));
- }
- at91_adc_writel(st, AT91_SAMA5D2_CHDR,
- BIT(chan->channel));
- }
- }
-
return 0;
}
@@ -781,6 +754,7 @@ static int at91_adc_reenable_trigger(struct iio_trigger *trig)
/* Needed to ACK the DRDY interruption */
at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+
return 0;
}
@@ -888,18 +862,37 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
return 0;
}
-static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
+static bool at91_adc_buffer_check_use_irq(struct iio_dev *indio,
+ struct at91_adc_state *st)
+{
+ /* if using DMA, we do not use our own IRQ (we use DMA-controller) */
+ if (st->dma_st.dma_chan)
+ return false;
+ /* if the trigger is not ours, then it has its own IRQ */
+ if (iio_trigger_validate_own_device(indio->trig, indio))
+ return false;
+ return true;
+}
+
+static bool at91_adc_current_chan_is_touch(struct iio_dev *indio_dev)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ return !!bitmap_subset(indio_dev->active_scan_mask,
+ &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1);
+}
+
+static int at91_adc_buffer_preenable(struct iio_dev *indio_dev)
{
int ret;
+ u8 bit;
struct at91_adc_state *st = iio_priv(indio_dev);
/* check if we are enabling triggered buffer or the touchscreen */
- if (bitmap_subset(indio_dev->active_scan_mask,
- &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
- /* touchscreen enabling */
+ if (at91_adc_current_chan_is_touch(indio_dev))
return at91_adc_configure_touch(st, true);
- }
+
/* if we are not in triggered mode, we cannot enable the buffer. */
if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
@@ -911,41 +904,65 @@ static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
return ret;
}
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->num_channels) {
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+ u32 cor;
+
+ if (!chan)
+ continue;
+ /* these channel types cannot be handled by this trigger */
+ if (chan->type == IIO_POSITIONRELATIVE ||
+ chan->type == IIO_PRESSURE)
+ continue;
+
+ cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
+
+ if (chan->differential)
+ cor |= (BIT(chan->channel) | BIT(chan->channel2)) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET;
+ else
+ cor &= ~(BIT(chan->channel) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET);
+
+ at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
+
+ at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
+ }
+
+ if (at91_adc_buffer_check_use_irq(indio_dev, st))
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_DRDY);
+
+ return 0;
+}
+
+static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
+{
+ if (at91_adc_current_chan_is_touch(indio_dev))
+ return 0;
+
return iio_triggered_buffer_postenable(indio_dev);
}
-static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
+static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
{
struct at91_adc_state *st = iio_priv(indio_dev);
- int ret;
u8 bit;
/* check if we are disabling triggered buffer or the touchscreen */
- if (bitmap_subset(indio_dev->active_scan_mask,
- &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
- /* touchscreen disable */
+ if (at91_adc_current_chan_is_touch(indio_dev))
return at91_adc_configure_touch(st, false);
- }
+
/* if we are not in triggered mode, nothing to do here */
if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
- /* continue with the triggered buffer */
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret < 0)
- dev_err(&indio_dev->dev, "buffer predisable failed\n");
-
- if (!st->dma_st.dma_chan)
- return ret;
-
- /* if we are using DMA we must clear registers and end DMA */
- dmaengine_terminate_sync(st->dma_st.dma_chan);
-
/*
- * For each enabled channel we must read the last converted value
+ * For each enable channel we must disable it in hardware.
+ * In the case of DMA, we must read the last converted value
* to clear EOC status and not get a possible interrupt later.
- * This value is being read by DMA from LCDR anyway
+ * This value is being read by DMA from LCDR anyway, so it's not lost.
*/
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
@@ -958,16 +975,37 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
if (chan->type == IIO_POSITIONRELATIVE ||
chan->type == IIO_PRESSURE)
continue;
+
+ at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+
if (st->dma_st.dma_chan)
at91_adc_readl(st, chan->address);
}
+ if (at91_adc_buffer_check_use_irq(indio_dev, st))
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_DRDY);
+
/* read overflow register to clear possible overflow status */
at91_adc_readl(st, AT91_SAMA5D2_OVER);
- return ret;
+
+ /* if we are using DMA we must clear registers and end DMA */
+ if (st->dma_st.dma_chan)
+ dmaengine_terminate_sync(st->dma_st.dma_chan);
+
+ return 0;
+}
+
+static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
+{
+ if (at91_adc_current_chan_is_touch(indio_dev))
+ return 0;
+
+ return iio_triggered_buffer_predisable(indio_dev);
}
static const struct iio_buffer_setup_ops at91_buffer_setup_ops = {
+ .preenable = &at91_adc_buffer_preenable,
+ .postdisable = &at91_adc_buffer_postdisable,
.postenable = &at91_adc_buffer_postenable,
.predisable = &at91_adc_buffer_predisable,
};
@@ -1015,6 +1053,22 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
int i = 0;
int val;
u8 bit;
+ u32 mask = at91_adc_active_scan_mask_to_reg(indio_dev);
+ unsigned int timeout = 50;
+
+ /*
+ * Check if the conversion is ready. If not, wait a little bit, and
+ * in case of timeout exit with an error.
+ */
+ while ((at91_adc_readl(st, AT91_SAMA5D2_ISR) & mask) != mask &&
+ timeout) {
+ usleep_range(50, 100);
+ timeout--;
+ }
+
+ /* Cannot read data, not ready. Continue without reporting data */
+ if (!timeout)
+ return;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
@@ -1102,6 +1156,13 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct at91_adc_state *st = iio_priv(indio_dev);
+ /*
+ * If it's not our trigger, start a conversion now, as we are
+ * actually polling the trigger now.
+ */
+ if (iio_trigger_validate_own_device(indio_dev->trig, indio_dev))
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+
if (st->dma_st.dma_chan)
at91_adc_trigger_handler_dma(indio_dev);
else
@@ -1114,20 +1175,9 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
static int at91_adc_buffer_init(struct iio_dev *indio)
{
- struct at91_adc_state *st = iio_priv(indio);
-
- if (st->selected_trig->hw_trig) {
- return devm_iio_triggered_buffer_setup(&indio->dev, indio,
- &iio_pollfunc_store_time,
- &at91_adc_trigger_handler, &at91_buffer_setup_ops);
- }
- /*
- * we need to prepare the buffer ops in case we will get
- * another buffer attached (like a callback buffer for the touchscreen)
- */
- indio->setup_ops = &at91_buffer_setup_ops;
-
- return 0;
+ return devm_iio_triggered_buffer_setup(&indio->dev, indio,
+ &iio_pollfunc_store_time,
+ &at91_adc_trigger_handler, &at91_buffer_setup_ops);
}
static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -1281,7 +1331,8 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
status = at91_adc_readl(st, AT91_SAMA5D2_XPOSR);
status = at91_adc_readl(st, AT91_SAMA5D2_YPOSR);
status = at91_adc_readl(st, AT91_SAMA5D2_PRESSR);
- } else if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
+ } else if (iio_buffer_enabled(indio) &&
+ (status & AT91_SAMA5D2_IER_DRDY)) {
/* triggered buffer without DMA */
disable_irq_nosync(irq);
iio_trigger_poll(indio->trig);
@@ -1901,14 +1952,10 @@ static __maybe_unused int at91_adc_resume(struct device *dev)
return 0;
/* check if we are enabling triggered buffer or the touchscreen */
- if (bitmap_subset(indio_dev->active_scan_mask,
- &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
- /* touchscreen enabling */
+ if (at91_adc_current_chan_is_touch(indio_dev))
return at91_adc_configure_touch(st, true);
- } else {
+ else
return at91_adc_configure_trigger(st->trig, true);
- }
/* not needed but more explicit */
return 0;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index abe99856c823..0368b6dc6d60 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -1152,7 +1152,6 @@ static int at91_adc_probe(struct platform_device *pdev)
int ret;
struct iio_dev *idev;
struct at91_adc_state *st;
- struct resource *res;
u32 reg;
idev = devm_iio_device_alloc(&pdev->dev, sizeof(struct at91_adc_state));
@@ -1182,9 +1181,7 @@ static int at91_adc_probe(struct platform_device *pdev)
if (st->irq < 0)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- st->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ st->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(st->reg_base))
return PTR_ERR(st->reg_base);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 22131a677445..6bda4f4d89fe 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -449,9 +449,6 @@ static void exynos_adc_exynos7_init_hw(struct exynos_adc *info)
{
u32 con1, con2;
- if (info->data->needs_adc_phy)
- regmap_write(info->pmu_map, info->data->phy_offset, 1);
-
con1 = ADC_V2_CON1_SOFT_RESET;
writel(con1, ADC_V2_CON1(info->regs));
@@ -531,8 +528,19 @@ static int exynos_read_raw(struct iio_dev *indio_dev,
unsigned long timeout;
int ret;
- if (mask != IIO_CHAN_INFO_RAW)
+ if (mask == IIO_CHAN_INFO_SCALE) {
+ ret = regulator_get_voltage(info->vdd);
+ if (ret < 0)
+ return ret;
+
+ /* Regulator voltage is in uV, but need mV */
+ *val = ret / 1000;
+ *val2 = info->data->mask;
+
+ return IIO_VAL_FRACTIONAL;
+ } else if (mask != IIO_CHAN_INFO_RAW) {
return -EINVAL;
+ }
mutex_lock(&indio_dev->mlock);
reinit_completion(&info->completion);
@@ -683,6 +691,7 @@ static const struct iio_info exynos_adc_iio_info = {
.channel = _index, \
.address = _index, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
.datasheet_name = _id, \
}
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index fa71489195c6..b0a4dc88ba9b 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -294,7 +294,6 @@ static int mx25_gcq_probe(struct platform_device *pdev)
struct mx25_gcq_priv *priv;
struct mx25_tsadc *tsadc = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *mem;
int ret;
int i;
@@ -305,8 +304,7 @@ static int mx25_gcq_probe(struct platform_device *pdev)
priv = iio_priv(indio_dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem = devm_ioremap_resource(dev, res);
+ mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
index c35a1beb817c..a6d2e1f27e76 100644
--- a/drivers/iio/adc/intel_mrfld_adc.c
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -75,7 +75,7 @@ static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
struct regmap *regmap = adc->regmap;
unsigned int req;
long timeout;
- u8 buf[2];
+ __be16 value;
int ret;
reinit_completion(&adc->completion);
@@ -105,11 +105,11 @@ static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
goto done;
}
- ret = regmap_bulk_read(regmap, chan->address, buf, 2);
+ ret = regmap_bulk_read(regmap, chan->address, &value, sizeof(value));
if (ret)
goto done;
- *result = get_unaligned_be16(buf);
+ *result = be16_to_cpu(value);
ret = IIO_VAL_INT;
done:
diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c
new file mode 100644
index 000000000000..541939c7abca
--- /dev/null
+++ b/drivers/iio/adc/max1241.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MAX1241 low-power, 12-bit serial ADC
+ *
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX1240-MAX1241.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#define MAX1241_VAL_MASK GENMASK(11, 0)
+#define MAX1241_SHUTDOWN_DELAY_USEC 4
+
+enum max1241_id {
+ max1241,
+};
+
+struct max1241 {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct regulator *vdd;
+ struct regulator *vref;
+ struct gpio_desc *shutdown;
+
+ __be16 data ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec max1241_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int max1241_read(struct max1241 *adc)
+{
+ struct spi_transfer xfers[] = {
+ /*
+ * Begin conversion by bringing /CS low for at least
+ * tconv us.
+ */
+ {
+ .len = 0,
+ .delay.value = 8,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
+ },
+ /*
+ * Then read two bytes of data in our RX buffer.
+ */
+ {
+ .rx_buf = &adc->data,
+ .len = 2,
+ },
+ };
+
+ return spi_sync_transfer(adc->spi, xfers, ARRAY_SIZE(xfers));
+}
+
+static int max1241_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret, vref_uV;
+ struct max1241 *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+
+ if (adc->shutdown) {
+ gpiod_set_value(adc->shutdown, 0);
+ udelay(MAX1241_SHUTDOWN_DELAY_USEC);
+ ret = max1241_read(adc);
+ gpiod_set_value(adc->shutdown, 1);
+ } else
+ ret = max1241_read(adc);
+
+ if (ret) {
+ mutex_unlock(&adc->lock);
+ return ret;
+ }
+
+ *val = (be16_to_cpu(adc->data) >> 3) & MAX1241_VAL_MASK;
+
+ mutex_unlock(&adc->lock);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ vref_uV = regulator_get_voltage(adc->vref);
+
+ if (vref_uV < 0)
+ return vref_uV;
+
+ *val = vref_uV / 1000;
+ *val2 = 12;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info max1241_info = {
+ .read_raw = max1241_read_raw,
+};
+
+static void max1241_disable_vdd_action(void *data)
+{
+ struct max1241 *adc = data;
+ struct device *dev = &adc->spi->dev;
+ int err;
+
+ err = regulator_disable(adc->vdd);
+ if (err)
+ dev_err(dev, "could not disable vdd regulator.\n");
+}
+
+static void max1241_disable_vref_action(void *data)
+{
+ struct max1241 *adc = data;
+ struct device *dev = &adc->spi->dev;
+ int err;
+
+ err = regulator_disable(adc->vref);
+ if (err)
+ dev_err(dev, "could not disable vref regulator.\n");
+}
+
+static int max1241_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct max1241 *adc;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+ mutex_init(&adc->lock);
+
+ spi_set_drvdata(spi, indio_dev);
+
+ adc->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(adc->vdd)) {
+ dev_err(dev, "failed to get vdd regulator\n");
+ return PTR_ERR(adc->vdd);
+ }
+
+ ret = regulator_enable(adc->vdd);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, max1241_disable_vdd_action, adc);
+ if (ret) {
+ dev_err(dev, "could not set up vdd regulator cleanup action\n");
+ return ret;
+ }
+
+ adc->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(adc->vref)) {
+ dev_err(dev, "failed to get vref regulator\n");
+ return PTR_ERR(adc->vref);
+ }
+
+ ret = regulator_enable(adc->vref);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, max1241_disable_vref_action, adc);
+ if (ret) {
+ dev_err(dev, "could not set up vref regulator cleanup action\n");
+ return ret;
+ }
+
+ adc->shutdown = devm_gpiod_get_optional(dev, "shutdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(adc->shutdown))
+ return PTR_ERR(adc->shutdown);
+
+ if (adc->shutdown)
+ dev_dbg(dev, "shutdown pin passed, low-power mode enabled");
+ else
+ dev_dbg(dev, "no shutdown pin passed, low-power mode disabled");
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &max1241_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max1241_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max1241_channels);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id max1241_id[] = {
+ { "max1241", max1241 },
+ {}
+};
+
+static const struct of_device_id max1241_dt_ids[] = {
+ { .compatible = "maxim,max1241" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max1241_dt_ids);
+
+static struct spi_driver max1241_spi_driver = {
+ .driver = {
+ .name = "max1241",
+ .of_match_table = max1241_dt_ids,
+ },
+ .probe = max1241_probe,
+ .id_table = max1241_id,
+};
+module_spi_driver(max1241_spi_driver);
+
+MODULE_AUTHOR("Alexandru Lazar <alazar@startmail.com>");
+MODULE_DESCRIPTION("MAX1241 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 5c2cc61b666e..9d92017c79b2 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -150,6 +150,7 @@ struct max1363_chip_info {
* @current_mode: the scan mode of this chip
* @requestedmask: a valid requested set of channels
* @reg: supply regulator
+ * @lock lock to ensure state is consistent
* @monitor_on: whether monitor mode is enabled
* @monitor_speed: parameter corresponding to device monitor speed setting
* @mask_high: bitmask for enabled high thresholds
@@ -169,6 +170,7 @@ struct max1363_state {
const struct max1363_mode *current_mode;
u32 requestedmask;
struct regulator *reg;
+ struct mutex lock;
/* Using monitor modes and buffer at the same time is
currently not supported */
@@ -364,7 +366,11 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
struct max1363_state *st = iio_priv(indio_dev);
struct i2c_client *client = st->client;
- mutex_lock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+
/*
* If monitor mode is enabled, the method for reading a single
* channel will have to be rather different and has not yet
@@ -372,7 +378,7 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
*
* Also, cannot read directly if buffered capture enabled.
*/
- if (st->monitor_on || iio_buffer_enabled(indio_dev)) {
+ if (st->monitor_on) {
ret = -EBUSY;
goto error_ret;
}
@@ -404,8 +410,10 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
data = rxbuf[0];
}
*val = data;
+
error_ret:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -705,9 +713,9 @@ static ssize_t max1363_monitor_store_freq(struct device *dev,
if (!found)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->monitor_speed = i;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
}
@@ -810,12 +818,12 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
int val;
int number = chan->channel;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (dir == IIO_EV_DIR_FALLING)
val = (1 << number) & st->mask_low;
else
val = (1 << number) & st->mask_high;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return val;
}
@@ -962,7 +970,11 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
u16 unifiedmask;
int number = chan->channel;
- mutex_lock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+
unifiedmask = st->mask_low | st->mask_high;
if (dir == IIO_EV_DIR_FALLING) {
@@ -989,7 +1001,8 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
error_ret:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -1587,6 +1600,7 @@ static int max1363_probe(struct i2c_client *client,
st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
st->reg = devm_regulator_get(&client->dev, "vcc");
if (IS_ERR(st->reg)) {
ret = PTR_ERR(st->reg);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index ea24d7c58b12..d86c0b5d80a3 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/of.h>
+#include <asm/unaligned.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -117,11 +118,11 @@ static int mcp3422_read(struct mcp3422 *adc, int *value, u8 *config)
if (sample_rate == MCP3422_SRATE_3) {
ret = i2c_master_recv(adc->i2c, buf, 4);
- temp = buf[0] << 16 | buf[1] << 8 | buf[2];
+ temp = get_unaligned_be24(&buf[0]);
*config = buf[3];
} else {
ret = i2c_master_recv(adc->i2c, buf, 3);
- temp = buf[0] << 8 | buf[1];
+ temp = get_unaligned_be16(&buf[0]);
*config = buf[2];
}
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
new file mode 100644
index 000000000000..331a9a728217
--- /dev/null
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MP2629 Driver for ADC
+ *
+ * Copyright 2020 Monolithic Power Systems, Inc
+ *
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/iio/driver.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/mfd/mp2629.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MP2629_REG_ADC_CTRL 0x03
+#define MP2629_REG_BATT_VOLT 0x0e
+#define MP2629_REG_SYSTEM_VOLT 0x0f
+#define MP2629_REG_INPUT_VOLT 0x11
+#define MP2629_REG_BATT_CURRENT 0x12
+#define MP2629_REG_INPUT_CURRENT 0x13
+
+#define MP2629_ADC_START BIT(7)
+#define MP2629_ADC_CONTINUOUS BIT(6)
+
+#define MP2629_MAP(_mp, _mpc) IIO_MAP(#_mp, "mp2629_charger", "mp2629-"_mpc)
+
+#define MP2629_ADC_CHAN(_ch, _type) { \
+ .type = _type, \
+ .indexed = 1, \
+ .datasheet_name = #_ch, \
+ .channel = MP2629_ ## _ch, \
+ .address = MP2629_REG_ ## _ch, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+struct mp2629_adc {
+ struct regmap *regmap;
+ struct device *dev;
+};
+
+static struct iio_chan_spec mp2629_channels[] = {
+ MP2629_ADC_CHAN(BATT_VOLT, IIO_VOLTAGE),
+ MP2629_ADC_CHAN(SYSTEM_VOLT, IIO_VOLTAGE),
+ MP2629_ADC_CHAN(INPUT_VOLT, IIO_VOLTAGE),
+ MP2629_ADC_CHAN(BATT_CURRENT, IIO_CURRENT),
+ MP2629_ADC_CHAN(INPUT_CURRENT, IIO_CURRENT)
+};
+
+static struct iio_map mp2629_adc_maps[] = {
+ MP2629_MAP(BATT_VOLT, "batt-volt"),
+ MP2629_MAP(SYSTEM_VOLT, "system-volt"),
+ MP2629_MAP(INPUT_VOLT, "input-volt"),
+ MP2629_MAP(BATT_CURRENT, "batt-current"),
+ MP2629_MAP(INPUT_CURRENT, "input-current")
+};
+
+static int mp2629_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mp2629_adc *info = iio_priv(indio_dev);
+ unsigned int rval;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(info->regmap, chan->address, &rval);
+ if (ret)
+ return ret;
+
+ if (chan->address == MP2629_INPUT_VOLT)
+ rval &= GENMASK(6, 0);
+ *val = rval;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel) {
+ case MP2629_BATT_VOLT:
+ case MP2629_SYSTEM_VOLT:
+ *val = 20;
+ return IIO_VAL_INT;
+
+ case MP2629_INPUT_VOLT:
+ *val = 60;
+ return IIO_VAL_INT;
+
+ case MP2629_BATT_CURRENT:
+ *val = 175;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL;
+
+ case MP2629_INPUT_CURRENT:
+ *val = 133;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mp2629_adc_info = {
+ .read_raw = &mp2629_read_raw,
+};
+
+static int mp2629_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mp2629_data *ddata = dev_get_drvdata(dev->parent);
+ struct mp2629_adc *info;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+ info->regmap = ddata->regmap;
+ info->dev = dev;
+ platform_set_drvdata(pdev, indio_dev);
+
+ ret = regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START | MP2629_ADC_CONTINUOUS,
+ MP2629_ADC_START | MP2629_ADC_CONTINUOUS);
+ if (ret) {
+ dev_err(dev, "adc enable fail: %d\n", ret);
+ return ret;
+ }
+
+ ret = iio_map_array_register(indio_dev, mp2629_adc_maps);
+ if (ret) {
+ dev_err(dev, "IIO maps register fail: %d\n", ret);
+ goto fail_disable;
+ }
+
+ indio_dev->name = "mp2629-adc";
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = mp2629_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mp2629_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &mp2629_adc_info;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "IIO device register fail: %d\n", ret);
+ goto fail_map_unregister;
+ }
+
+ return 0;
+
+fail_map_unregister:
+ iio_map_array_unregister(indio_dev);
+
+fail_disable:
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_CONTINUOUS, 0);
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START, 0);
+
+ return ret;
+}
+
+static int mp2629_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct mp2629_adc *info = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ iio_map_array_unregister(indio_dev);
+
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_CONTINUOUS, 0);
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START, 0);
+
+ return 0;
+}
+
+static const struct of_device_id mp2629_adc_of_match[] = {
+ { .compatible = "mps,mp2629_adc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2629_adc_of_match);
+
+static struct platform_driver mp2629_adc_driver = {
+ .driver = {
+ .name = "mp2629_adc",
+ .of_match_table = mp2629_adc_of_match,
+ },
+ .probe = mp2629_adc_probe,
+ .remove = mp2629_adc_remove,
+};
+module_platform_driver(mp2629_adc_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MP2629 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 2df88d2b880a..0e2068ec068b 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -65,12 +65,14 @@ struct stm32_adc_priv;
* @clk_sel: clock selection routine
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
* @has_syscfg: SYSCFG capability flags
+ * @num_irqs: number of interrupt lines
*/
struct stm32_adc_priv_cfg {
const struct stm32_adc_common_regs *regs;
int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *);
u32 max_clk_rate_hz;
unsigned int has_syscfg;
+ unsigned int num_irqs;
};
/**
@@ -375,21 +377,15 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
unsigned int i;
- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
+ /*
+ * Interrupt(s) must be provided, depending on the compatible:
+ * - stm32f4/h7 shares a common interrupt line.
+ * - stm32mp1, has one line per ADC
+ */
+ for (i = 0; i < priv->cfg->num_irqs; i++) {
priv->irq[i] = platform_get_irq(pdev, i);
- if (priv->irq[i] < 0) {
- /*
- * At least one interrupt must be provided, make others
- * optional:
- * - stm32f4/h7 shares a common interrupt.
- * - stm32mp1, has one line per ADC (either for ADC1,
- * ADC2 or both).
- */
- if (i && priv->irq[i] == -ENXIO)
- continue;
-
+ if (priv->irq[i] < 0)
return priv->irq[i];
- }
}
priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
@@ -400,9 +396,7 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
return -ENOMEM;
}
- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
- if (priv->irq[i] < 0)
- continue;
+ for (i = 0; i < priv->cfg->num_irqs; i++) {
irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler);
irq_set_handler_data(priv->irq[i], priv);
}
@@ -420,11 +414,8 @@ static void stm32_adc_irq_remove(struct platform_device *pdev,
irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
irq_domain_remove(priv->domain);
- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
- if (priv->irq[i] < 0)
- continue;
+ for (i = 0; i < priv->cfg->num_irqs; i++)
irq_set_chained_handler(priv->irq[i], NULL);
- }
}
static int stm32_adc_core_switches_supply_en(struct stm32_adc_priv *priv,
@@ -817,6 +808,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
.regs = &stm32f4_adc_common_regs,
.clk_sel = stm32f4_adc_clk_sel,
.max_clk_rate_hz = 36000000,
+ .num_irqs = 1,
};
static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
@@ -824,6 +816,7 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 36000000,
.has_syscfg = HAS_VBOOSTER,
+ .num_irqs = 1,
};
static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
@@ -831,6 +824,7 @@ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 40000000,
.has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
+ .num_irqs = 2,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index ae622ee6d08c..dfc3a306c667 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1812,18 +1812,18 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
return 0;
}
-static int stm32_adc_dma_request(struct iio_dev *indio_dev)
+static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev)
{
struct stm32_adc *adc = iio_priv(indio_dev);
struct dma_slave_config config;
int ret;
- adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+ adc->dma_chan = dma_request_chan(dev, "rx");
if (IS_ERR(adc->dma_chan)) {
ret = PTR_ERR(adc->dma_chan);
if (ret != -ENODEV) {
if (ret != -EPROBE_DEFER)
- dev_err(&indio_dev->dev,
+ dev_err(dev,
"DMA channel request failed with %d\n",
ret);
return ret;
@@ -1930,7 +1930,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = stm32_adc_dma_request(indio_dev);
+ ret = stm32_adc_dma_request(dev, indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 76a60d93fe23..506bf519f64c 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -62,7 +62,7 @@ enum sd_converter_type {
struct stm32_dfsdm_dev_data {
int type;
- int (*init)(struct iio_dev *indio_dev);
+ int (*init)(struct device *dev, struct iio_dev *indio_dev);
unsigned int num_channels;
const struct regmap_config *regmap_cfg;
};
@@ -1365,11 +1365,12 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
}
}
-static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+static int stm32_dfsdm_dma_request(struct device *dev,
+ struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+ adc->dma_chan = dma_request_chan(dev, "rx");
if (IS_ERR(adc->dma_chan)) {
int ret = PTR_ERR(adc->dma_chan);
@@ -1425,7 +1426,7 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
&adc->dfsdm->ch_list[ch->channel]);
}
-static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
@@ -1452,10 +1453,10 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
indio_dev->num_channels = 1;
indio_dev->channels = ch;
- return stm32_dfsdm_dma_request(indio_dev);
+ return stm32_dfsdm_dma_request(dev, indio_dev);
}
-static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
@@ -1499,17 +1500,17 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
init_completion(&adc->completion);
/* Optionally request DMA */
- ret = stm32_dfsdm_dma_request(indio_dev);
+ ret = stm32_dfsdm_dma_request(dev, indio_dev);
if (ret) {
if (ret != -ENODEV) {
if (ret != -EPROBE_DEFER)
- dev_err(&indio_dev->dev,
+ dev_err(dev,
"DMA channel request failed with %d\n",
ret);
return ret;
}
- dev_dbg(&indio_dev->dev, "No DMA support\n");
+ dev_dbg(dev, "No DMA support\n");
return 0;
}
@@ -1622,7 +1623,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
adc->dev_data = dev_data;
- ret = dev_data->init(iio);
+ ret = dev_data->init(dev, iio);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 176e1cb4abb1..0f2c1738a90d 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -496,7 +496,6 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
struct iio_dev *indio_dev)
{
struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
- struct resource *mem;
void __iomem *base;
int ret;
@@ -508,8 +507,7 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
indio_dev->num_channels = ARRAY_SIZE(sun8i_a33_gpadc_channels);
indio_dev->channels = sun8i_a33_gpadc_channels;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, mem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 552c2be8d87a..f1ee3b1e2827 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -22,6 +22,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
/* Commands */
#define ADS124S08_CMD_NOP 0x00
#define ADS124S08_CMD_WAKEUP 0x02
@@ -188,7 +190,6 @@ static int ads124s_read(struct iio_dev *indio_dev, unsigned int chan)
{
struct ads124s_private *priv = iio_priv(indio_dev);
int ret;
- u32 tmp;
struct spi_transfer t[] = {
{
.tx_buf = &priv->data[0],
@@ -208,9 +209,7 @@ static int ads124s_read(struct iio_dev *indio_dev, unsigned int chan)
if (ret < 0)
return ret;
- tmp = priv->data[2] << 16 | priv->data[3] << 8 | priv->data[4];
-
- return tmp;
+ return get_unaligned_be24(&priv->data[2]);
}
static int ads124s_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c
index abe4b56c847c..8a8792010c20 100644
--- a/drivers/iio/adc/ti-ads8344.c
+++ b/drivers/iio/adc/ti-ads8344.c
@@ -32,16 +32,17 @@ struct ads8344 {
u8 rx_buf[3];
};
-#define ADS8344_VOLTAGE_CHANNEL(chan, si) \
+#define ADS8344_VOLTAGE_CHANNEL(chan, addr) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = addr, \
}
-#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \
+#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, addr) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -50,6 +51,7 @@ struct ads8344 {
.differential = 1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = addr, \
}
static const struct iio_chan_spec ads8344_channels[] = {
@@ -105,7 +107,7 @@ static int ads8344_read_raw(struct iio_dev *iio,
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&adc->lock);
- *value = ads8344_adc_conversion(adc, channel->scan_index,
+ *value = ads8344_adc_conversion(adc, channel->address,
channel->differential);
mutex_unlock(&adc->lock);
if (*value < 0)
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 6fd06e4eff73..d7fecab9252e 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -3,7 +3,7 @@
* Xilinx XADC driver
*
* Copyright 2013-2014 Analog Devices Inc.
- * Author: Lars-Peter Clauen <lars@metafoo.de>
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Documentation for the parts can be found at:
* - XADC hardmacro: Xilinx UG480
@@ -663,7 +663,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
mutex_lock(&xadc->mutex);
if (state) {
- /* Only one of the two triggers can be active at the a time. */
+ /* Only one of the two triggers can be active at a time. */
if (xadc->trigger != NULL) {
ret = -EBUSY;
goto err_out;
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
index dbfd5da290a4..2357f585720a 100644
--- a/drivers/iio/adc/xilinx-xadc-events.c
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -3,7 +3,7 @@
* Xilinx XADC driver
*
* Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clauen <lars@metafoo.de>
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
*/
#include <linux/iio/events.h>
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 4017f18b0a4f..25abed9c0285 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -3,7 +3,7 @@
* Xilinx XADC driver
*
* Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clauen <lars@metafoo.de>
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
*/
#ifndef __IIO_XILINX_XADC__
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index a74bd9c0587c..d348af8b9705 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -12,7 +12,6 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/poll.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index b129693af0fd..6dedf12b69a4 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -134,7 +134,7 @@ static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
- return sprintf(buf, "%u\n", dmaengine_buffer->align);
+ return sprintf(buf, "%zu\n", dmaengine_buffer->align);
}
static IIO_DEVICE_ATTR(length_align_bytes, 0444,
@@ -229,6 +229,45 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
+static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
+{
+ iio_dmaengine_buffer_free(*(struct iio_buffer **)res);
+}
+
+/**
+ * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
+ * @dev: Parent device for the buffer
+ * @channel: DMA channel name, typically "rx".
+ *
+ * This allocates a new IIO buffer which internally uses the DMAengine framework
+ * to perform its transfers. The parent device will be used to request the DMA
+ * channel.
+ *
+ * The buffer will be automatically de-allocated once the device gets destroyed.
+ */
+struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
+ const char *channel)
+{
+ struct iio_buffer **bufferp, *buffer;
+
+ bufferp = devres_alloc(__devm_iio_dmaengine_buffer_free,
+ sizeof(*bufferp), GFP_KERNEL);
+ if (!bufferp)
+ return ERR_PTR(-ENOMEM);
+
+ buffer = iio_dmaengine_buffer_alloc(dev, channel);
+ if (IS_ERR(buffer)) {
+ devres_free(bufferp);
+ return buffer;
+ }
+
+ *bufferp = buffer;
+ devres_add(dev, bufferp);
+
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_alloc);
+
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
index 95165697d8ae..f2d27788f666 100644
--- a/drivers/iio/buffer/industrialio-hw-consumer.c
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -142,17 +142,6 @@ static void devm_iio_hw_consumer_release(struct device *dev, void *res)
iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
}
-static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
-{
- struct iio_hw_consumer **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
- return *r == data;
-}
-
/**
* devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
* @dev: Pointer to consumer device.
@@ -160,9 +149,6 @@ static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
* Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
* is automatically freed on driver detach.
*
- * If an iio_hw_consumer allocated with this function needs to be freed
- * separately, devm_iio_hw_consumer_free() must be used.
- *
* returns pointer to allocated iio_hw_consumer on success, NULL on failure.
*/
struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
@@ -187,23 +173,6 @@ struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
/**
- * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
- * @dev: Pointer to consumer device.
- * @hwc: iio_hw_consumer to free.
- *
- * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
- */
-void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_hw_consumer_release,
- devm_iio_hw_consumer_match, hwc);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
-
-/**
* iio_hw_consumer_enable() - Enable IIO hardware consumer
* @hwc: iio_hw_consumer to enable.
*
diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
index cb322b2f09cd..e8046c1ecd6b 100644
--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
+++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
@@ -126,17 +126,6 @@ int devm_iio_triggered_buffer_setup(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_setup);
-void devm_iio_triggered_buffer_cleanup(struct device *dev,
- struct iio_dev *indio_dev)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_triggered_buffer_clean,
- devm_iio_device_match, indio_dev);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_cleanup);
-
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 3150f8ab984b..1359abed3b31 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -179,16 +179,6 @@ static void devm_iio_kfifo_release(struct device *dev, void *res)
iio_kfifo_free(*(struct iio_buffer **)res);
}
-static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
-{
- struct iio_buffer **r = res;
-
- if (WARN_ON(!r || !*r))
- return 0;
-
- return *r == data;
-}
-
/**
* devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
* @dev: Device to allocate kfifo buffer for
@@ -216,16 +206,4 @@ struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
}
EXPORT_SYMBOL(devm_iio_kfifo_allocate);
-/**
- * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
- * @dev: Device the buffer belongs to
- * @r: The buffer associated with the device
- */
-void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
-{
- WARN_ON(devres_release(dev, devm_iio_kfifo_release,
- devm_iio_kfifo_match, r));
-}
-EXPORT_SYMBOL(devm_iio_kfifo_free);
-
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index a7e65a59bf42..7f21afd73b1c 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -22,6 +22,17 @@ config ATLAS_PH_SENSOR
To compile this driver as module, choose M here: the
module will be called atlas-ph-sensor.
+config ATLAS_EZO_SENSOR
+ tristate "Atlas Scientific EZO sensors"
+ depends on I2C
+ help
+ Say Y here to build I2C interface support for the following
+ Atlas Scientific EZO sensors
+ * CO2 EZO Sensor
+
+ To compile this driver as module, choose M here: the
+ module will be called atlas-ezo-sensor.
+
config BME680
tristate "Bosch Sensortec BME680 sensor driver"
depends on (I2C || SPI)
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index 33d3a595dda9..aba4167db745 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-sensor.o
+obj-$(CONFIG_ATLAS_EZO_SENSOR) += atlas-ezo-sensor.o
obj-$(CONFIG_BME680) += bme680_core.o
obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
obj-$(CONFIG_BME680_SPI) += bme680_spi.o
diff --git a/drivers/iio/chemical/atlas-ezo-sensor.c b/drivers/iio/chemical/atlas-ezo-sensor.c
new file mode 100644
index 000000000000..f5a6d8ec6d4d
--- /dev/null
+++ b/drivers/iio/chemical/atlas-ezo-sensor.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * atlas-ezo-sensor.c - Support for Atlas Scientific EZO sensors
+ *
+ * Copyright (C) 2020 Konsulko Group
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/iio/iio.h>
+
+#define ATLAS_EZO_DRV_NAME "atlas-ezo-sensor"
+#define ATLAS_CO2_INT_TIME_IN_MS 950
+
+enum {
+ ATLAS_CO2_EZO,
+};
+
+struct atlas_ezo_device {
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ int delay;
+};
+
+struct atlas_ezo_data {
+ struct i2c_client *client;
+ struct atlas_ezo_device *chip;
+
+ /* lock to avoid multiple concurrent read calls */
+ struct mutex lock;
+
+ u8 buffer[8];
+};
+
+static const struct iio_chan_spec atlas_co2_ezo_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .modified = 1,
+ .channel2 = IIO_MOD_CO2,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+};
+
+static struct atlas_ezo_device atlas_ezo_devices[] = {
+ [ATLAS_CO2_EZO] = {
+ .channels = atlas_co2_ezo_channels,
+ .num_channels = 1,
+ .delay = ATLAS_CO2_INT_TIME_IN_MS,
+ },
+};
+
+static int atlas_ezo_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct atlas_ezo_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int ret = 0;
+
+ if (chan->type != IIO_CONCENTRATION)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ long tmp;
+
+ mutex_lock(&data->lock);
+
+ tmp = i2c_smbus_write_byte(client, 'R');
+
+ if (tmp < 0) {
+ mutex_unlock(&data->lock);
+ return tmp;
+ }
+
+ msleep(data->chip->delay);
+
+ tmp = i2c_master_recv(client, data->buffer, sizeof(data->buffer));
+
+ if (tmp < 0 || data->buffer[0] != 1) {
+ mutex_unlock(&data->lock);
+ return -EBUSY;
+ }
+
+ ret = kstrtol(data->buffer + 1, 10, &tmp);
+
+ *val = tmp;
+
+ mutex_unlock(&data->lock);
+
+ return ret ? ret : IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 100; /* 0.0001 */
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return ret;
+}
+
+static const struct iio_info atlas_info = {
+ .read_raw = atlas_ezo_read_raw,
+};
+
+static const struct i2c_device_id atlas_ezo_id[] = {
+ { "atlas-co2-ezo", ATLAS_CO2_EZO },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_ezo_id);
+
+static const struct of_device_id atlas_ezo_dt_ids[] = {
+ { .compatible = "atlas,co2-ezo", .data = (void *)ATLAS_CO2_EZO, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, atlas_ezo_dt_ids);
+
+static int atlas_ezo_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atlas_ezo_data *data;
+ struct atlas_ezo_device *chip;
+ const struct of_device_id *of_id;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ of_id = of_match_device(atlas_ezo_dt_ids, &client->dev);
+ if (!of_id)
+ chip = &atlas_ezo_devices[id->driver_data];
+ else
+ chip = &atlas_ezo_devices[(unsigned long)of_id->data];
+
+ indio_dev->info = &atlas_info;
+ indio_dev->name = ATLAS_EZO_DRV_NAME;
+ indio_dev->channels = chip->channels;
+ indio_dev->num_channels = chip->num_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &client->dev;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->chip = chip;
+ mutex_init(&data->lock);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+};
+
+static struct i2c_driver atlas_ezo_driver = {
+ .driver = {
+ .name = ATLAS_EZO_DRV_NAME,
+ .of_match_table = atlas_ezo_dt_ids,
+ },
+ .probe = atlas_ezo_probe,
+ .id_table = atlas_ezo_id,
+};
+module_i2c_driver(atlas_ezo_driver);
+
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
+MODULE_DESCRIPTION("Atlas Scientific EZO sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 82d470561ad3..78a27e36bf32 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -53,6 +53,8 @@
#define ATLAS_REG_DO_CALIB_STATUS_PRESSURE BIT(0)
#define ATLAS_REG_DO_CALIB_STATUS_DO BIT(1)
+#define ATLAS_REG_RTD_DATA 0x0e
+
#define ATLAS_REG_PH_TEMP_DATA 0x0e
#define ATLAS_REG_PH_DATA 0x16
@@ -72,12 +74,14 @@
#define ATLAS_EC_INT_TIME_IN_MS 650
#define ATLAS_ORP_INT_TIME_IN_MS 450
#define ATLAS_DO_INT_TIME_IN_MS 450
+#define ATLAS_RTD_INT_TIME_IN_MS 450
enum {
ATLAS_PH_SM,
ATLAS_EC_SM,
ATLAS_ORP_SM,
ATLAS_DO_SM,
+ ATLAS_RTD_SM,
};
struct atlas_data {
@@ -194,7 +198,19 @@ static const struct iio_chan_spec atlas_orp_channels[] = {
};
static const struct iio_chan_spec atlas_do_channels[] = {
- ATLAS_CONCENTRATION_CHANNEL(0, ATLAS_REG_DO_DATA),
+ {
+ .type = IIO_CONCENTRATION,
+ .address = ATLAS_REG_DO_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
IIO_CHAN_SOFT_TIMESTAMP(1),
{
.type = IIO_TEMP,
@@ -206,6 +222,22 @@ static const struct iio_chan_spec atlas_do_channels[] = {
},
};
+static const struct iio_chan_spec atlas_rtd_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .address = ATLAS_REG_RTD_DATA,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
static int atlas_check_ph_calibration(struct atlas_data *data)
{
struct device *dev = &data->client->dev;
@@ -350,6 +382,12 @@ static struct atlas_device atlas_devices[] = {
.calibration = &atlas_check_do_calibration,
.delay = ATLAS_DO_INT_TIME_IN_MS,
},
+ [ATLAS_RTD_SM] = {
+ .channels = atlas_rtd_channels,
+ .num_channels = 2,
+ .data_reg = ATLAS_REG_RTD_DATA,
+ .delay = ATLAS_RTD_INT_TIME_IN_MS,
+ },
};
static int atlas_set_powermode(struct atlas_data *data, int on)
@@ -426,8 +464,7 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private)
int ret;
ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
- (u8 *) &data->buffer,
- sizeof(__be32) * channels);
+ &data->buffer, sizeof(__be32) * channels);
if (!ret)
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
@@ -463,7 +500,7 @@ static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
if (suspended)
msleep(data->chip->delay);
- ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val));
+ ret = regmap_bulk_read(data->regmap, reg, val, sizeof(*val));
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -478,6 +515,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
struct atlas_data *data = iio_priv(indio_dev);
switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
case IIO_CHAN_INFO_RAW: {
int ret;
__be32 reg;
@@ -485,7 +523,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_TEMP:
ret = regmap_bulk_read(data->regmap, chan->address,
- (u8 *) &reg, sizeof(reg));
+ &reg, sizeof(reg));
break;
case IIO_PH:
case IIO_CONCENTRATION:
@@ -566,6 +604,7 @@ static const struct i2c_device_id atlas_id[] = {
{ "atlas-ec-sm", ATLAS_EC_SM},
{ "atlas-orp-sm", ATLAS_ORP_SM},
{ "atlas-do-sm", ATLAS_DO_SM},
+ { "atlas-rtd-sm", ATLAS_RTD_SM},
{}
};
MODULE_DEVICE_TABLE(i2c, atlas_id);
@@ -575,6 +614,7 @@ static const struct of_device_id atlas_dt_ids[] = {
{ .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
{ .compatible = "atlas,orp-sm", .data = (void *)ATLAS_ORP_SM, },
{ .compatible = "atlas,do-sm", .data = (void *)ATLAS_DO_SM, },
+ { .compatible = "atlas,rtd-sm", .data = (void *)ATLAS_RTD_SM, },
{ }
};
MODULE_DEVICE_TABLE(of, atlas_dt_ids);
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index ccde4c65ff93..13773e01699b 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -114,14 +114,16 @@ static int bme680_read_calib(struct bme680_data *data,
__le16 buf;
/* Temperature related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_T1_LSB_REG\n");
return ret;
}
calib->par_t1 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_T2_LSB_REG\n");
return ret;
@@ -136,14 +138,16 @@ static int bme680_read_calib(struct bme680_data *data,
calib->par_t3 = tmp;
/* Pressure related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P1_LSB_REG\n");
return ret;
}
calib->par_p1 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P2_LSB_REG\n");
return ret;
@@ -157,14 +161,16 @@ static int bme680_read_calib(struct bme680_data *data,
}
calib->par_p3 = tmp;
- ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P4_LSB_REG\n");
return ret;
}
calib->par_p4 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P5_LSB_REG\n");
return ret;
@@ -185,14 +191,16 @@ static int bme680_read_calib(struct bme680_data *data,
}
calib->par_p7 = tmp;
- ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P8_LSB_REG\n");
return ret;
}
calib->par_p8 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P9_LSB_REG\n");
return ret;
@@ -276,8 +284,8 @@ static int bme680_read_calib(struct bme680_data *data,
}
calib->par_gh1 = tmp;
- ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG, (u8 *) &buf,
- 2);
+ ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_GH2_LSB_REG\n");
return ret;
@@ -615,7 +623,7 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
return ret;
ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
- (u8 *) &tmp, 3);
+ &tmp, 3);
if (ret < 0) {
dev_err(dev, "failed to read temperature\n");
return ret;
@@ -656,7 +664,7 @@ static int bme680_read_press(struct bme680_data *data,
return ret;
ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB,
- (u8 *) &tmp, 3);
+ &tmp, 3);
if (ret < 0) {
dev_err(dev, "failed to read pressure\n");
return ret;
@@ -689,7 +697,7 @@ static int bme680_read_humid(struct bme680_data *data,
return ret;
ret = regmap_bulk_read(data->regmap, BM6880_REG_HUMIDITY_MSB,
- (u8 *) &tmp, 2);
+ &tmp, sizeof(tmp));
if (ret < 0) {
dev_err(dev, "failed to read humidity\n");
return ret;
@@ -754,7 +762,7 @@ static int bme680_read_gas(struct bme680_data *data,
}
ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB,
- (u8 *) &tmp, 2);
+ &tmp, sizeof(tmp));
if (ret < 0) {
dev_err(dev, "failed to read gas resistance\n");
return ret;
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 2ebdfc35bcda..3ecd633f9ed3 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -16,6 +16,7 @@
*/
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -36,6 +37,7 @@
#define CCS811_ERR 0xE0
/* Used to transition from boot to application mode */
#define CCS811_APP_START 0xF4
+#define CCS811_SW_RESET 0xFF
/* Status register flags */
#define CCS811_STATUS_ERROR BIT(0)
@@ -74,6 +76,7 @@ struct ccs811_data {
struct mutex lock; /* Protect readings */
struct ccs811_reading buffer;
struct iio_trigger *drdy_trig;
+ struct gpio_desc *wakeup_gpio;
bool drdy_trig_on;
};
@@ -166,10 +169,25 @@ static int ccs811_setup(struct i2c_client *client)
CCS811_MODE_IAQ_1SEC);
}
+static void ccs811_set_wakeup(struct ccs811_data *data, bool enable)
+{
+ if (!data->wakeup_gpio)
+ return;
+
+ gpiod_set_value(data->wakeup_gpio, enable);
+
+ if (enable)
+ usleep_range(50, 60);
+ else
+ usleep_range(20, 30);
+}
+
static int ccs811_get_measurement(struct ccs811_data *data)
{
int ret, tries = 11;
+ ccs811_set_wakeup(data, true);
+
/* Maximum waiting time: 1s, as measurements are made every second */
while (tries-- > 0) {
ret = i2c_smbus_read_byte_data(data->client, CCS811_STATUS);
@@ -183,9 +201,12 @@ static int ccs811_get_measurement(struct ccs811_data *data)
if (!(ret & CCS811_STATUS_DATA_READY))
return -EIO;
- return i2c_smbus_read_i2c_block_data(data->client,
+ ret = i2c_smbus_read_i2c_block_data(data->client,
CCS811_ALG_RESULT_DATA, 8,
(char *)&data->buffer);
+ ccs811_set_wakeup(data, false);
+
+ return ret;
}
static int ccs811_read_raw(struct iio_dev *indio_dev,
@@ -336,6 +357,45 @@ static irqreturn_t ccs811_data_rdy_trigger_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static int ccs811_reset(struct i2c_client *client)
+{
+ struct gpio_desc *reset_gpio;
+ int ret;
+
+ reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio))
+ return PTR_ERR(reset_gpio);
+
+ /* Try to reset using nRESET pin if available else do SW reset */
+ if (reset_gpio) {
+ gpiod_set_value(reset_gpio, 1);
+ usleep_range(20, 30);
+ gpiod_set_value(reset_gpio, 0);
+ } else {
+ /*
+ * As per the datasheet, this sequence of values needs to be
+ * written to the SW_RESET register for triggering the soft
+ * reset in the device and placing it in boot mode.
+ */
+ static const u8 reset_seq[] = {
+ 0x11, 0xE5, 0x72, 0x8A,
+ };
+
+ ret = i2c_smbus_write_i2c_block_data(client, CCS811_SW_RESET,
+ sizeof(reset_seq), reset_seq);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to reset sensor\n");
+ return ret;
+ }
+ }
+
+ /* tSTART delay required after reset */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
static int ccs811_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -348,36 +408,59 @@ static int ccs811_probe(struct i2c_client *client,
| I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -EOPNOTSUPP;
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ data->wakeup_gpio = devm_gpiod_get_optional(&client->dev, "wakeup",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(data->wakeup_gpio))
+ return PTR_ERR(data->wakeup_gpio);
+
+ ccs811_set_wakeup(data, true);
+
+ ret = ccs811_reset(client);
+ if (ret) {
+ ccs811_set_wakeup(data, false);
+ return ret;
+ }
+
/* Check hardware id (should be 0x81 for this family of devices) */
ret = i2c_smbus_read_byte_data(client, CCS811_HW_ID);
- if (ret < 0)
+ if (ret < 0) {
+ ccs811_set_wakeup(data, false);
return ret;
+ }
if (ret != CCS811_HW_ID_VALUE) {
dev_err(&client->dev, "hardware id doesn't match CCS81x\n");
+ ccs811_set_wakeup(data, false);
return -ENODEV;
}
ret = i2c_smbus_read_byte_data(client, CCS811_HW_VERSION);
- if (ret < 0)
+ if (ret < 0) {
+ ccs811_set_wakeup(data, false);
return ret;
+ }
if ((ret & CCS811_HW_VERSION_MASK) != CCS811_HW_VERSION_VALUE) {
dev_err(&client->dev, "no CCS811 sensor\n");
+ ccs811_set_wakeup(data, false);
return -ENODEV;
}
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev)
- return -ENOMEM;
-
ret = ccs811_setup(client);
- if (ret < 0)
+ if (ret < 0) {
+ ccs811_set_wakeup(data, false);
return ret;
+ }
- data = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
- data->client = client;
+ ccs811_set_wakeup(data, false);
mutex_init(&data->lock);
@@ -466,9 +549,16 @@ static const struct i2c_device_id ccs811_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ccs811_id);
+static const struct of_device_id ccs811_dt_ids[] = {
+ { .compatible = "ams,ccs811" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ccs811_dt_ids);
+
static struct i2c_driver ccs811_driver = {
.driver = {
.name = "ccs811",
+ .of_match_table = ccs811_dt_ids,
},
.probe = ccs811_probe,
.remove = ccs811_remove,
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index 23c9ab252470..07bb90d72434 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -73,6 +73,11 @@ struct pms7003_state {
struct pms7003_frame frame;
struct completion frame_ready;
struct mutex lock; /* must be held whenever state gets touched */
+ /* Used to construct scan to push to the IIO buffer */
+ struct {
+ u16 data[3]; /* PM1, PM2P5, PM10 */
+ s64 ts;
+ } scan;
};
static int pms7003_do_cmd(struct pms7003_state *state, enum pms7003_cmd cmd)
@@ -104,7 +109,6 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct pms7003_state *state = iio_priv(indio_dev);
struct pms7003_frame *frame = &state->frame;
- u16 data[3 + 1 + 4]; /* PM1, PM2P5, PM10, padding, timestamp */
int ret;
mutex_lock(&state->lock);
@@ -114,12 +118,15 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p)
goto err;
}
- data[PM1] = pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET);
- data[PM2P5] = pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET);
- data[PM10] = pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET);
+ state->scan.data[PM1] =
+ pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET);
+ state->scan.data[PM2P5] =
+ pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET);
+ state->scan.data[PM10] =
+ pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET);
mutex_unlock(&state->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &state->scan,
iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index acb9f8ecbb3d..a88c1fb875a0 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -230,15 +230,18 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct sps30_state *state = iio_priv(indio_dev);
int ret;
- s32 data[4 + 2]; /* PM1, PM2P5, PM4, PM10, timestamp */
+ struct {
+ s32 data[4]; /* PM1, PM2P5, PM4, PM10 */
+ s64 ts;
+ } scan;
mutex_lock(&state->lock);
- ret = sps30_do_meas(state, data, 4);
+ ret = sps30_do_meas(state, scan.data, ARRAY_SIZE(scan.data));
mutex_unlock(&state->lock);
if (ret)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 906d87780419..ff375790b7e8 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -13,6 +13,8 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/sysfs.h>
#include "hid-sensor-trigger.h"
@@ -222,7 +224,8 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
return hid_sensor_power_state(iio_trigger_get_drvdata(trig), state);
}
-void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
+void hid_sensor_remove_trigger(struct iio_dev *indio_dev,
+ struct hid_sensor_common *attrb)
{
if (atomic_read(&attrb->runtime_pm_enable))
pm_runtime_disable(&attrb->pdev->dev);
@@ -233,6 +236,7 @@ void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
cancel_work_sync(&attrb->work);
iio_trigger_unregister(attrb->trigger);
iio_trigger_free(attrb->trigger);
+ iio_triggered_buffer_cleanup(indio_dev);
}
EXPORT_SYMBOL(hid_sensor_remove_trigger);
@@ -246,11 +250,18 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
int ret;
struct iio_trigger *trig;
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Triggered Buffer Setup Failed\n");
+ return ret;
+ }
+
trig = iio_trigger_alloc("%s-dev%d", name, indio_dev->id);
if (trig == NULL) {
dev_err(&indio_dev->dev, "Trigger Allocate Failed\n");
ret = -ENOMEM;
- goto error_ret;
+ goto error_triggered_buffer_cleanup;
}
trig->dev.parent = indio_dev->dev.parent;
@@ -284,7 +295,8 @@ error_unreg_trigger:
iio_trigger_unregister(trig);
error_free_trig:
iio_trigger_free(trig);
-error_ret:
+error_triggered_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
return ret;
}
EXPORT_SYMBOL(hid_sensor_setup_trigger);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index f47b940ff170..bb45cc89e551 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -13,7 +13,8 @@ extern const struct dev_pm_ops hid_sensor_pm_ops;
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
struct hid_sensor_common *attrb);
-void hid_sensor_remove_trigger(struct hid_sensor_common *attrb);
+void hid_sensor_remove_trigger(struct iio_dev *indio_dev,
+ struct hid_sensor_common *attrb);
int hid_sensor_power_state(struct hid_sensor_common *st, bool state);
#endif
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 13bdfbbf5f71..7a69c1be7393 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -20,11 +20,6 @@
#include "st_sensors_core.h"
-static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
-{
- return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8;
-}
-
int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
u8 reg_addr, u8 mask, u8 data)
{
@@ -150,8 +145,7 @@ static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
if (err < 0)
goto st_accel_set_fullscale_error;
- sdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &sdata->sensor_settings->fs.fs_avl[i];
+ sdata->current_fullscale = &sdata->sensor_settings->fs.fs_avl[i];
return err;
st_accel_set_fullscale_error:
@@ -278,8 +272,7 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
!sdata->sensor_settings->drdy_irq.int2.addr) {
if (pdata->drdy_int_pin)
dev_info(&indio_dev->dev,
- "DRDY on pin INT%d specified, but sensor "
- "does not support interrupts\n",
+ "DRDY on pin INT%d specified, but sensor does not support interrupts\n",
pdata->drdy_int_pin);
return 0;
}
@@ -545,7 +538,7 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
else if (byte_for_channel == 2)
*data = (s16)get_unaligned_le16(outdata);
else if (byte_for_channel == 3)
- *data = (s32)st_sensors_get_unaligned_le24(outdata);
+ *data = (s32)sign_extend32(get_unaligned_le24(outdata), 23);
st_sensors_free_memory:
kfree(outdata);
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index 286830fb5d35..b400560bac93 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -49,8 +49,8 @@ int st_sensors_i2c_configure(struct iio_dev *indio_dev,
sdata->regmap = devm_regmap_init_i2c(client, config);
if (IS_ERR(sdata->regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap (%d)\n",
- (int)PTR_ERR(sdata->regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap (%ld)\n",
+ PTR_ERR(sdata->regmap));
return PTR_ERR(sdata->regmap);
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 1275fb0eda31..ee70515bb89f 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -44,7 +44,7 @@ static bool st_sensors_is_spi_3_wire(struct spi_device *spi)
if (device_property_read_bool(dev, "spi-3wire"))
return true;
- pdata = (struct st_sensors_platform_data *)dev->platform_data;
+ pdata = dev_get_platdata(dev);
if (pdata && pdata->spi_3wire)
return true;
@@ -101,8 +101,8 @@ int st_sensors_spi_configure(struct iio_dev *indio_dev,
sdata->regmap = devm_regmap_init_spi(spi, config);
if (IS_ERR(sdata->regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap (%d)\n",
- (int)PTR_ERR(sdata->regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap (%ld)\n",
+ PTR_ERR(sdata->regmap));
return PTR_ERR(sdata->regmap);
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index e817537cdfb5..0507283bd4c1 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -44,8 +44,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
- dev_err(sdata->dev,
- "error checking samples available\n");
+ dev_err(sdata->dev, "error checking samples available\n");
return ret;
}
@@ -148,9 +147,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
case IRQF_TRIGGER_LOW:
if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
dev_err(&indio_dev->dev,
- "falling/low specified for IRQ "
- "but hardware supports only rising/high: "
- "will request rising/high\n");
+ "falling/low specified for IRQ but hardware supports only rising/high: will request rising/high\n");
if (irq_trig == IRQF_TRIGGER_FALLING)
irq_trig = IRQF_TRIGGER_RISING;
if (irq_trig == IRQF_TRIGGER_LOW)
@@ -163,8 +160,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
if (err < 0)
goto iio_trigger_free;
dev_info(&indio_dev->dev,
- "interrupts on the falling edge or "
- "active low level\n");
+ "interrupts on the falling edge or active low level\n");
}
break;
case IRQF_TRIGGER_RISING:
@@ -178,8 +174,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
default:
/* This is the most preferred mode, if possible */
dev_err(&indio_dev->dev,
- "unsupported IRQ trigger specified (%lx), enforce "
- "rising edge\n", irq_trig);
+ "unsupported IRQ trigger specified (%lx), enforce rising edge\n", irq_trig);
irq_trig = IRQF_TRIGGER_RISING;
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 93744011b63f..3728f6325501 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -279,12 +279,12 @@ config LTC1660
module will be called ltc1660.
config LTC2632
- tristate "Linear Technology LTC2632-12/10/8 and LTC2636-12/10/8 DAC spi driver"
+ tristate "Linear Technology LTC2632-12/10/8 and similar DAC spi driver"
depends on SPI
help
Say yes here to build support for Linear Technology
- LTC2632-12, LTC2632-10, LTC2632-8, LTC2636-12, LTC2636-10 and
- LTC2636-8 converters (DAC).
+ LTC2632, LTC2634 and LTC2636 DAC resolution 12/10/8 bit
+ low 0-2.5V and high 0-4.096V range converters.
To compile this driver as a module, choose M here: the
module will be called ltc2632.
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 2ac428b957e3..3e0c9e84e8da 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -67,6 +67,7 @@ struct ad5360_chip_info {
* @chip_info: chip model specific constants, available modes etc
* @vref_reg: vref supply regulators
* @ctrl: control register cache
+ * @lock lock to protect the data buffer during SPI ops
* @data: spi transfer buffers
*/
@@ -75,6 +76,7 @@ struct ad5360_state {
const struct ad5360_chip_info *chip_info;
struct regulator_bulk_data vref_reg[3];
unsigned int ctrl;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -205,10 +207,11 @@ static int ad5360_write(struct iio_dev *indio_dev, unsigned int cmd,
unsigned int addr, unsigned int val, unsigned int shift)
{
int ret;
+ struct ad5360_state *st = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5360_write_unlocked(indio_dev, cmd, addr, val, shift);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -229,7 +232,7 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32(AD5360_CMD(AD5360_CMD_SPECIAL_FUNCTION) |
AD5360_ADDR(AD5360_REG_SF_READBACK) |
@@ -240,7 +243,7 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -261,7 +264,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
struct ad5360_state *st = iio_priv(indio_dev);
unsigned int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->ctrl |= set;
st->ctrl &= ~clr;
@@ -269,7 +272,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
ret = ad5360_write_unlocked(indio_dev, AD5360_CMD_SPECIAL_FUNCTION,
AD5360_REG_SF_CTRL, st->ctrl, 0);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -479,6 +482,8 @@ static int ad5360_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
ret = ad5360_alloc_channels(indio_dev);
if (ret) {
dev_err(&spi->dev, "Failed to allocate channel spec: %d\n", ret);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 2ebe08326048..b37e5675f716 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -51,6 +51,7 @@ struct ad5380_chip_info {
* @vref_reg: vref supply regulator
* @vref: actual reference voltage used in uA
* @pwr_down: whether the chip is currently in power down mode
+ * @lock lock to protect the data buffer during regmap ops
*/
struct ad5380_state {
@@ -59,6 +60,7 @@ struct ad5380_state {
struct regulator *vref_reg;
int vref;
bool pwr_down;
+ struct mutex lock;
};
enum ad5380_type {
@@ -98,7 +100,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (pwr_down)
ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_DOWN, 0);
@@ -107,7 +109,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
st->pwr_down = pwr_down;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -390,6 +392,8 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
ret = ad5380_alloc_channels(indio_dev);
if (ret) {
dev_err(dev, "Failed to allocate channel spec: %d\n", ret);
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 63063e85cd0a..fec27764cea8 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -62,12 +62,14 @@
* @current_range: current range which the device is configured for
* @data: spi transfer buffers
* @fault_mask: software masking of events
+ * @lock lock to protect the data buffer during SPI ops
*/
struct ad5421_state {
struct spi_device *spi;
unsigned int ctrl;
enum ad5421_current_range current_range;
unsigned int fault_mask;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -142,11 +144,12 @@ static int ad5421_write_unlocked(struct iio_dev *indio_dev,
static int ad5421_write(struct iio_dev *indio_dev, unsigned int reg,
unsigned int val)
{
+ struct ad5421_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5421_write_unlocked(indio_dev, reg, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -166,7 +169,7 @@ static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
@@ -174,7 +177,7 @@ static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -185,14 +188,14 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
struct ad5421_state *st = iio_priv(indio_dev);
unsigned int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->ctrl &= ~clr;
st->ctrl |= set;
ret = ad5421_write_unlocked(indio_dev, AD5421_REG_CTRL, st->ctrl);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -400,12 +403,12 @@ static int ad5421_write_event_config(struct iio_dev *indio_dev,
return -EINVAL;
}
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (state)
st->fault_mask |= mask;
else
st->fault_mask &= ~mask;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
}
@@ -491,6 +494,8 @@ static int ad5421_probe(struct spi_device *spi)
indio_dev->channels = ad5421_channels;
indio_dev->num_channels = ARRAY_SIZE(ad5421_channels);
+ mutex_init(&st->lock);
+
st->ctrl = AD5421_CTRL_WATCHDOG_DISABLE |
AD5421_CTRL_AUTO_FAULT_READBACK;
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 61c670f7fc5f..8f8afc8999bc 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -21,6 +21,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#define MODE_PWRDWN_1k 0x1
#define MODE_PWRDWN_100k 0x2
#define MODE_PWRDWN_TRISTATE 0x3
@@ -31,6 +33,7 @@
* @chip_info: chip model specific constants, available modes etc
* @reg: supply regulator
* @vref_mv: actual reference voltage used
+ * @lock lock to protect the data buffer during write ops
*/
struct ad5446_state {
@@ -41,6 +44,7 @@ struct ad5446_state {
unsigned cached_val;
unsigned pwr_down_mode;
unsigned pwr_down;
+ struct mutex lock;
};
/**
@@ -110,7 +114,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->pwr_down = powerdown;
if (st->pwr_down) {
@@ -121,7 +125,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
}
ret = st->chip_info->write(st, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -195,11 +199,11 @@ static int ad5446_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
val <<= chan->scan_type.shift;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->cached_val = val;
if (!st->pwr_down)
ret = st->chip_info->write(st, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -254,6 +258,8 @@ static int ad5446_probe(struct device *dev, const char *name,
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
+ mutex_init(&st->lock);
+
st->pwr_down_mode = MODE_PWRDWN_1k;
if (st->chip_info->int_vref_mv)
@@ -302,9 +308,7 @@ static int ad5660_write(struct ad5446_state *st, unsigned val)
struct spi_device *spi = to_spi_device(st->dev);
uint8_t data[3];
- data[0] = (val >> 16) & 0xFF;
- data[1] = (val >> 8) & 0xFF;
- data[2] = val & 0xFF;
+ put_unaligned_be24(val, &data[0]);
return spi_write(spi, data, sizeof(data));
}
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index fed3ebaccac4..d739b10e5236 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -56,11 +56,13 @@ struct ad5449_chip_info {
* @has_sdo: whether the SDO line is connected
* @dac_cache: Cache for the DAC values
* @data: spi transfer buffers
+ * @lock lock to protect the data buffer during SPI ops
*/
struct ad5449 {
struct spi_device *spi;
const struct ad5449_chip_info *chip_info;
struct regulator_bulk_data vref_reg[AD5449_MAX_VREFS];
+ struct mutex lock;
bool has_sdo;
uint16_t dac_cache[AD5449_MAX_CHANNELS];
@@ -87,10 +89,10 @@ static int ad5449_write(struct iio_dev *indio_dev, unsigned int addr,
struct ad5449 *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0] = cpu_to_be16((addr << 12) | val);
ret = spi_write(st->spi, st->data, 2);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -112,7 +114,7 @@ static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0] = cpu_to_be16(addr << 12);
st->data[1] = cpu_to_be16(AD5449_CMD_NOOP);
@@ -123,7 +125,7 @@ static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
*val = be16_to_cpu(st->data[1]);
out_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -302,6 +304,8 @@ static int ad5449_spi_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
if (st->chip_info->has_ctrl) {
unsigned int ctrl = 0x00;
if (pdata) {
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index e2110113e884..410e90e5f75f 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -156,7 +156,6 @@ static void ad5592r_gpio_cleanup(struct ad5592r_state *st)
static int ad5592r_reset(struct ad5592r_state *st)
{
struct gpio_desc *gpio;
- struct iio_dev *iio_dev = iio_priv_to_dev(st);
gpio = devm_gpiod_get_optional(st->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
@@ -166,10 +165,10 @@ static int ad5592r_reset(struct ad5592r_state *st)
udelay(1);
gpiod_set_value(gpio, 1);
} else {
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
/* Writing this magic value resets the device */
st->ops->reg_write(st, AD5592R_REG_RESET, 0xdac);
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
}
udelay(250);
@@ -197,7 +196,6 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
const struct ad5592r_rw_ops *ops = st->ops;
int ret;
unsigned i;
- struct iio_dev *iio_dev = iio_priv_to_dev(st);
u8 pulldown = 0, tristate = 0, dac = 0, adc = 0;
u16 read_back;
@@ -247,7 +245,7 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
}
}
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
/* Pull down unused pins to GND */
ret = ops->reg_write(st, AD5592R_REG_PULLDOWN, pulldown);
@@ -285,7 +283,7 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
ret = -EIO;
err_unlock:
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -314,11 +312,11 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
if (!chan->output)
return -EINVAL;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->ops->write_dac(st, chan->channel, val);
if (!ret)
st->cached_dac[chan->channel] = val;
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_VOLTAGE) {
@@ -333,12 +331,12 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
else
return -EINVAL;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->ops->reg_read(st, AD5592R_REG_CTRL,
&st->cached_gp_ctrl);
if (ret < 0) {
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -360,7 +358,7 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
ret = st->ops->reg_write(st, AD5592R_REG_CTRL,
st->cached_gp_ctrl);
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -382,7 +380,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
if (!chan->output) {
ret = st->ops->read_adc(st, chan->channel, &read_val);
@@ -419,7 +417,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
} else {
int mult;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
if (chan->output)
mult = !!(st->cached_gp_ctrl &
@@ -437,7 +435,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_OFFSET:
ret = ad5592r_get_vref(st);
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
if (st->cached_gp_ctrl & AD5592R_REG_CTRL_ADC_RANGE)
*val = (-34365 * 25) / ret;
@@ -450,7 +448,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
}
unlock:
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -625,6 +623,8 @@ int ad5592r_probe(struct device *dev, const char *name,
iio_dev->info = &ad5592r_info;
iio_dev->modes = INDIO_DIRECT_MODE;
+ mutex_init(&st->lock);
+
ad5592r_init_scales(st, ad5592r_get_vref(st));
ret = ad5592r_reset(st);
diff --git a/drivers/iio/dac/ad5592r-base.h b/drivers/iio/dac/ad5592r-base.h
index 4774e4cd9c11..23dac2f1ff8a 100644
--- a/drivers/iio/dac/ad5592r-base.h
+++ b/drivers/iio/dac/ad5592r-base.h
@@ -52,6 +52,7 @@ struct ad5592r_state {
struct regulator *reg;
struct gpio_chip gpiochip;
struct mutex gpio_lock; /* Protect cached gpio_out, gpio_val, etc. */
+ struct mutex lock;
unsigned int num_channels;
const struct ad5592r_rw_ops *ops;
int scale_avail[2][2];
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 34ba059a77da..49308ad13c4b 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -98,7 +98,7 @@ static int ad5592r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
return 0;
}
-static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
+static int ad5592r_gpio_read(struct ad5592r_state *st, u8 *value)
{
int ret;
@@ -121,7 +121,7 @@ static const struct ad5592r_rw_ops ad5592r_rw_ops = {
.read_adc = ad5592r_read_adc,
.reg_write = ad5592r_reg_write,
.reg_read = ad5592r_reg_read,
- .gpio_read = ad5593r_gpio_read,
+ .gpio_read = ad5592r_gpio_read,
};
static int ad5592r_spi_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 44ea3b8117d0..1fbe9c019c7f 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -134,5 +134,5 @@ static struct i2c_driver ad5593r_driver = {
module_i2c_driver(ad5593r_driver);
MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
-MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
+MODULE_DESCRIPTION("Analog Devices AD5593R multi-channel converters");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index e6c022e1dc1c..2015a5df840c 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -18,6 +18,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#include "ad5624r.h"
static int ad5624r_spi_write(struct spi_device *spi,
@@ -35,11 +37,9 @@ static int ad5624r_spi_write(struct spi_device *spi,
* for the AD5664R, AD5644R, and AD5624R, respectively.
*/
data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
- msg[0] = data >> 16;
- msg[1] = data >> 8;
- msg[2] = data;
+ put_unaligned_be24(data, &msg[0]);
- return spi_write(spi, msg, 3);
+ return spi_write(spi, msg, sizeof(msg));
}
static int ad5624r_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index e06b29c565b9..8dd67da0a7da 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -127,9 +127,9 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->read(st, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
if (ret < 0)
return ret;
*val = (ret >> chan->scan_type.shift) &
@@ -157,12 +157,12 @@ static int ad5686_write_raw(struct iio_dev *indio_dev,
if (val > (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->write(st,
AD5686_CMD_WRITE_INPUT_N_UPDATE_N,
chan->address,
val << chan->scan_type.shift);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -468,6 +468,8 @@ int ad5686_probe(struct device *dev,
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
switch (st->chip_info->regmap_type) {
case AD5310_REGMAP:
cmd = AD5686_CMD_CONTROL_REG;
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index 70a779939ddb..52009b5eef88 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -117,6 +117,7 @@ struct ad5686_chip_info {
* @pwr_down_mask: power down mask
* @pwr_down_mode: current power down mode
* @use_internal_vref: set to true if the internal reference voltage is used
+ * @lock lock to protect the data buffer during regmap ops
* @data: spi transfer buffers
*/
@@ -130,6 +131,7 @@ struct ad5686_state {
ad5686_write_func write;
ad5686_read_func read;
bool use_internal_vref;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 388ddd14bfd0..7723bd313fc6 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -82,6 +82,7 @@ struct ad5755_chip_info {
* @pwr_down: bitmask which contains hether a channel is powered down or not
* @ctrl: software shadow of the channel ctrl registers
* @channels: iio channel spec for the device
+ * @lock lock to protect the data buffer during SPI ops
* @data: spi transfer buffers
*/
struct ad5755_state {
@@ -90,6 +91,7 @@ struct ad5755_state {
unsigned int pwr_down;
unsigned int ctrl[AD5755_NUM_CHANNELS];
struct iio_chan_spec channels[AD5755_NUM_CHANNELS];
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -174,11 +176,12 @@ static int ad5755_write_ctrl_unlocked(struct iio_dev *indio_dev,
static int ad5755_write(struct iio_dev *indio_dev, unsigned int reg,
unsigned int val)
{
+ struct ad5755_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5755_write_unlocked(indio_dev, reg, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -186,11 +189,12 @@ static int ad5755_write(struct iio_dev *indio_dev, unsigned int reg,
static int ad5755_write_ctrl(struct iio_dev *indio_dev, unsigned int channel,
unsigned int reg, unsigned int val)
{
+ struct ad5755_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5755_write_ctrl_unlocked(indio_dev, channel, reg, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -211,7 +215,7 @@ static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32(AD5755_READ_FLAG | (addr << 16));
st->data[1].d32 = cpu_to_be32(AD5755_NOOP);
@@ -220,7 +224,7 @@ static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -246,7 +250,7 @@ static int ad5755_set_channel_pwr_down(struct iio_dev *indio_dev,
struct ad5755_state *st = iio_priv(indio_dev);
unsigned int mask = BIT(channel);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if ((bool)(st->pwr_down & mask) == pwr_down)
goto out_unlock;
@@ -266,7 +270,7 @@ static int ad5755_set_channel_pwr_down(struct iio_dev *indio_dev,
}
out_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
}
@@ -746,6 +750,8 @@ static int ad5755_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = AD5755_NUM_CHANNELS;
+ mutex_init(&st->lock);
+
if (spi->dev.of_node)
pdata = ad5755_parse_dt(&spi->dev);
else
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
index 4fb42b743f0f..67c4fa75c6f1 100644
--- a/drivers/iio/dac/ad5761.c
+++ b/drivers/iio/dac/ad5761.c
@@ -3,7 +3,7 @@
* AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
*
* Copyright 2016 Qtechnology A/S
- * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ * 2016 Ricardo Ribalda <ribalda@kernel.org>
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -57,11 +57,13 @@ enum ad5761_supported_device_ids {
* @use_intref: true when the internal voltage reference is used
* @vref: actual voltage reference in mVolts
* @range: output range mode used
+ * @lock lock to protect the data buffer during SPI ops
* @data: cache aligned spi buffer
*/
struct ad5761_state {
struct spi_device *spi;
struct regulator *vref_reg;
+ struct mutex lock;
bool use_intref;
int vref;
@@ -124,9 +126,9 @@ static int ad5761_spi_write(struct iio_dev *indio_dev, u8 addr, u16 val)
struct ad5761_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = _ad5761_spi_write(st, addr, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -163,9 +165,9 @@ static int ad5761_spi_read(struct iio_dev *indio_dev, u8 addr, u16 *val)
struct ad5761_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = _ad5761_spi_read(st, addr, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -368,6 +370,8 @@ static int ad5761_probe(struct spi_device *spi)
if (pdata)
voltage_range = pdata->voltage_range;
+ mutex_init(&st->lock);
+
ret = ad5761_spi_set_range(st, voltage_range);
if (ret)
goto disable_regulator_err;
@@ -423,6 +427,6 @@ static struct spi_driver ad5761_driver = {
};
module_spi_driver(ad5761_driver);
-MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
+MODULE_AUTHOR("Ricardo Ribalda <ribalda@kernel.org>");
MODULE_DESCRIPTION("Analog Devices AD5721, AD5721R, AD5761, AD5761R driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index f7ab211604a1..5b0f0fe354f6 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -46,6 +46,7 @@ struct ad5764_chip_info {
* @spi: spi_device
* @chip_info: chip info
* @vref_reg: vref supply regulators
+ * @lock lock to protect the data buffer during SPI ops
* @data: spi transfer buffers
*/
@@ -53,6 +54,7 @@ struct ad5764_state {
struct spi_device *spi;
const struct ad5764_chip_info *chip_info;
struct regulator_bulk_data vref_reg[2];
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -126,11 +128,11 @@ static int ad5764_write(struct iio_dev *indio_dev, unsigned int reg,
struct ad5764_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32((reg << 16) | val);
ret = spi_write(st->spi, &st->data[0].d8[1], 3);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -151,7 +153,7 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
@@ -159,7 +161,7 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
if (ret >= 0)
*val = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -295,6 +297,8 @@ static int ad5764_probe(struct spi_device *spi)
indio_dev->num_channels = AD5764_NUM_CHANNELS;
indio_dev->channels = st->chip_info->channels;
+ mutex_init(&st->lock);
+
if (st->chip_info->int_vref == 0) {
st->vref_reg[0].supply = "vrefAB";
st->vref_reg[1].supply = "vrefCD";
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index 7adc91056aa1..f891311f05cf 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -12,6 +12,8 @@
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
#define LTC2632_CMD_WRITE_INPUT_N 0x0
#define LTC2632_CMD_UPDATE_DAC_N 0x1
#define LTC2632_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
@@ -24,6 +26,7 @@
/**
* struct ltc2632_chip_info - chip specific information
* @channels: channel spec for the DAC
+ * @num_channels: DAC channel count of the chip
* @vref_mv: internal reference voltage
*/
struct ltc2632_chip_info {
@@ -53,6 +56,12 @@ enum ltc2632_supported_device_ids {
ID_LTC2632H12,
ID_LTC2632H10,
ID_LTC2632H8,
+ ID_LTC2634L12,
+ ID_LTC2634L10,
+ ID_LTC2634L8,
+ ID_LTC2634H12,
+ ID_LTC2634H10,
+ ID_LTC2634H8,
ID_LTC2636L12,
ID_LTC2636L10,
ID_LTC2636L8,
@@ -75,9 +84,7 @@ static int ltc2632_spi_write(struct spi_device *spi,
* 10-, 8-bit input code followed by 4, 6, or 8 don't care bits.
*/
data = (cmd << 20) | (addr << 16) | (val << shift);
- msg[0] = data >> 16;
- msg[1] = data >> 8;
- msg[2] = data;
+ put_unaligned_be24(data, &msg[0]);
return spi_write(spi, msg, sizeof(msg));
}
@@ -235,6 +242,36 @@ static const struct ltc2632_chip_info ltc2632_chip_info_tbl[] = {
.num_channels = 2,
.vref_mv = 4096,
},
+ [ID_LTC2634L12] = {
+ .channels = ltc2632x12_channels,
+ .num_channels = 4,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2634L10] = {
+ .channels = ltc2632x10_channels,
+ .num_channels = 4,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2634L8] = {
+ .channels = ltc2632x8_channels,
+ .num_channels = 4,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2634H12] = {
+ .channels = ltc2632x12_channels,
+ .num_channels = 4,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2634H10] = {
+ .channels = ltc2632x10_channels,
+ .num_channels = 4,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2634H8] = {
+ .channels = ltc2632x8_channels,
+ .num_channels = 4,
+ .vref_mv = 4096,
+ },
[ID_LTC2636L12] = {
.channels = ltc2632x12_channels,
.num_channels = 8,
@@ -356,6 +393,12 @@ static const struct spi_device_id ltc2632_id[] = {
{ "ltc2632-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H12] },
{ "ltc2632-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H10] },
{ "ltc2632-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H8] },
+ { "ltc2634-l12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634L12] },
+ { "ltc2634-l10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634L10] },
+ { "ltc2634-l8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634L8] },
+ { "ltc2634-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634H12] },
+ { "ltc2634-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634H10] },
+ { "ltc2634-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634H8] },
{ "ltc2636-l12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L12] },
{ "ltc2636-l10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L10] },
{ "ltc2636-l8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L8] },
@@ -386,6 +429,24 @@ static const struct of_device_id ltc2632_of_match[] = {
.compatible = "lltc,ltc2632-h8",
.data = &ltc2632_chip_info_tbl[ID_LTC2632H8]
}, {
+ .compatible = "lltc,ltc2634-l12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634L12]
+ }, {
+ .compatible = "lltc,ltc2634-l10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634L10]
+ }, {
+ .compatible = "lltc,ltc2634-l8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634L8]
+ }, {
+ .compatible = "lltc,ltc2634-h12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634H12]
+ }, {
+ .compatible = "lltc,ltc2634-h10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634H10]
+ }, {
+ .compatible = "lltc,ltc2634-h8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634H8]
+ }, {
.compatible = "lltc,ltc2636-l12",
.data = &ltc2632_chip_info_tbl[ID_LTC2636L12]
}, {
diff --git a/drivers/iio/dac/ti-dac7612.c b/drivers/iio/dac/ti-dac7612.c
index c46805144dd4..de0c6573cd97 100644
--- a/drivers/iio/dac/ti-dac7612.c
+++ b/drivers/iio/dac/ti-dac7612.c
@@ -3,7 +3,7 @@
* DAC7612 Dual, 12-Bit Serial input Digital-to-Analog Converter
*
* Copyright 2019 Qtechnology A/S
- * 2019 Ricardo Ribalda <ricardo@ribalda.com>
+ * 2019 Ricardo Ribalda <ribalda@kernel.org>
*
* Licensed under the GPL-2.
*/
@@ -179,6 +179,6 @@ static struct spi_driver dac7612_driver = {
};
module_spi_driver(dac7612_driver);
-MODULE_AUTHOR("Ricardo Ribalda <ricardo@ribalda.com>");
+MODULE_AUTHOR("Ricardo Ribalda <ribalda@kernel.org>");
MODULE_DESCRIPTION("Texas Instruments DAC7612 DAC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 71f8a5c471c4..9417a4a3e22a 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -36,6 +36,7 @@ struct vf610_dac {
struct device *dev;
enum vf610_conversion_mode_sel conv_mode;
void __iomem *regs;
+ struct mutex lock;
};
static void vf610_dac_init(struct vf610_dac *info)
@@ -64,7 +65,7 @@ static int vf610_set_conversion_mode(struct iio_dev *indio_dev,
struct vf610_dac *info = iio_priv(indio_dev);
int val;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&info->lock);
info->conv_mode = mode;
val = readl(info->regs + VF610_DACx_STATCTRL);
if (mode)
@@ -72,7 +73,7 @@ static int vf610_set_conversion_mode(struct iio_dev *indio_dev,
else
val &= ~VF610_DAC_LPEN;
writel(val, info->regs + VF610_DACx_STATCTRL);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&info->lock);
return 0;
}
@@ -147,9 +148,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&info->lock);
writel(VF610_DAC_DAT0(val), info->regs);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&info->lock);
return 0;
default:
@@ -205,6 +206,8 @@ static int vf610_dac_probe(struct platform_device *pdev)
indio_dev->channels = vf610_dac_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(vf610_dac_iio_channels);
+ mutex_init(&info->lock);
+
ret = clk_prepare_enable(info->clk);
if (ret) {
dev_err(&pdev->dev,
@@ -223,6 +226,7 @@ static int vf610_dac_probe(struct platform_device *pdev)
return 0;
error_iio_device_register:
+ vf610_dac_exit(info);
clk_disable_unprepare(info->clk);
return ret;
diff --git a/drivers/iio/dummy/iio_dummy_evgen.c b/drivers/iio/dummy/iio_dummy_evgen.c
index a6edf30567aa..ee85d596e528 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.c
+++ b/drivers/iio/dummy/iio_dummy_evgen.c
@@ -37,8 +37,7 @@ struct iio_dummy_eventgen {
struct iio_dummy_regs regs[IIO_EVENTGEN_NO];
struct mutex lock;
bool inuse[IIO_EVENTGEN_NO];
- struct irq_sim irq_sim;
- int base;
+ struct irq_domain *irq_sim_domain;
};
/* We can only ever have one instance of this 'device' */
@@ -52,13 +51,14 @@ static int iio_dummy_evgen_create(void)
if (!iio_evgen)
return -ENOMEM;
- ret = irq_sim_init(&iio_evgen->irq_sim, IIO_EVENTGEN_NO);
- if (ret < 0) {
+ iio_evgen->irq_sim_domain = irq_domain_create_sim(NULL,
+ IIO_EVENTGEN_NO);
+ if (IS_ERR(iio_evgen->irq_sim_domain)) {
+ ret = PTR_ERR(iio_evgen->irq_sim_domain);
kfree(iio_evgen);
return ret;
}
- iio_evgen->base = irq_sim_irqnum(&iio_evgen->irq_sim, 0);
mutex_init(&iio_evgen->lock);
return 0;
@@ -80,7 +80,7 @@ int iio_dummy_evgen_get_irq(void)
mutex_lock(&iio_evgen->lock);
for (i = 0; i < IIO_EVENTGEN_NO; i++) {
if (!iio_evgen->inuse[i]) {
- ret = irq_sim_irqnum(&iio_evgen->irq_sim, i);
+ ret = irq_create_mapping(iio_evgen->irq_sim_domain, i);
iio_evgen->inuse[i] = true;
break;
}
@@ -101,21 +101,27 @@ EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_irq);
*/
void iio_dummy_evgen_release_irq(int irq)
{
+ struct irq_data *irqd = irq_get_irq_data(irq);
+
mutex_lock(&iio_evgen->lock);
- iio_evgen->inuse[irq - iio_evgen->base] = false;
+ iio_evgen->inuse[irqd_to_hwirq(irqd)] = false;
+ irq_dispose_mapping(irq);
mutex_unlock(&iio_evgen->lock);
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_release_irq);
struct iio_dummy_regs *iio_dummy_evgen_get_regs(int irq)
{
- return &iio_evgen->regs[irq - iio_evgen->base];
+ struct irq_data *irqd = irq_get_irq_data(irq);
+
+ return &iio_evgen->regs[irqd_to_hwirq(irqd)];
+
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_regs);
static void iio_dummy_evgen_free(void)
{
- irq_sim_fini(&iio_evgen->irq_sim);
+ irq_domain_remove_sim(iio_evgen->irq_sim_domain);
kfree(iio_evgen);
}
@@ -131,7 +137,7 @@ static ssize_t iio_evgen_poke(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned long event;
- int ret;
+ int ret, irq;
ret = kstrtoul(buf, 10, &event);
if (ret)
@@ -140,7 +146,10 @@ static ssize_t iio_evgen_poke(struct device *dev,
iio_evgen->regs[this_attr->address].reg_id = this_attr->address;
iio_evgen->regs[this_attr->address].reg_data = event;
- irq_sim_fire(&iio_evgen->irq_sim, this_attr->address);
+ irq = irq_find_mapping(iio_evgen->irq_sim_domain, this_attr->address);
+ ret = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true);
+ if (ret)
+ return ret;
return len;
}
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 7eaf77707b0b..6daeddf37f60 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -61,7 +61,7 @@ config BMG160
help
Say yes here to build support for BOSCH BMG160 Tri-axis Gyro Sensor
driver connected via I2C or SPI. This driver also supports BMI055
- gyroscope.
+ and BMI088 gyroscope.
This driver can also be built as a module. If so, the module
will be called bmg160_i2c or bmg160_spi.
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index 79e63c8a2ea8..2a9ec08ec561 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -12,6 +12,8 @@
#include <linux/iio/iio.h>
+#include <asm/unaligned.h>
+
#define ADIS16130_CON 0x0
#define ADIS16130_CON_RD (1 << 6)
#define ADIS16130_IOP 0x1
@@ -59,7 +61,7 @@ static int adis16130_spi_read(struct iio_dev *indio_dev, u8 reg_addr, u32 *val)
ret = spi_sync_transfer(st->us, &xfer, 1);
if (ret == 0)
- *val = (st->buf[1] << 16) | (st->buf[2] << 8) | st->buf[3];
+ *val = get_unaligned_be24(&st->buf[1]);
mutex_unlock(&st->buf_lock);
return ret;
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index a4c967a5fc5c..afdc57af475d 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -148,16 +148,14 @@ DEFINE_DEBUGFS_ATTRIBUTE(adis16136_flash_count_fops,
static int adis16136_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
debugfs_create_file_unsafe("serial_number", 0400,
- indio_dev->debugfs_dentry, adis16136,
- &adis16136_serial_fops);
+ d, adis16136, &adis16136_serial_fops);
debugfs_create_file_unsafe("product_id", 0400,
- indio_dev->debugfs_dentry,
- adis16136, &adis16136_product_id_fops);
+ d, adis16136, &adis16136_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
- indio_dev->debugfs_dentry,
- adis16136, &adis16136_flash_count_fops);
+ d, adis16136, &adis16136_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 4fc9c6a3321f..b3fa46bd02cb 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -21,8 +21,8 @@ static int bmg160_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &bmg160_regmap_i2c_conf);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
@@ -42,6 +42,7 @@ static int bmg160_i2c_remove(struct i2c_client *client)
static const struct acpi_device_id bmg160_acpi_match[] = {
{"BMG0160", 0},
{"BMI055B", 0},
+ {"BMI088B", 0},
{},
};
@@ -50,6 +51,7 @@ MODULE_DEVICE_TABLE(acpi, bmg160_acpi_match);
static const struct i2c_device_id bmg160_i2c_id[] = {
{"bmg160", 0},
{"bmi055_gyro", 0},
+ {"bmi088_gyro", 0},
{}
};
diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c
index 182a59c42507..745962e1e423 100644
--- a/drivers/iio/gyro/bmg160_spi.c
+++ b/drivers/iio/gyro/bmg160_spi.c
@@ -19,8 +19,8 @@ static int bmg160_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &bmg160_regmap_spi_conf);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
@@ -37,6 +37,7 @@ static int bmg160_spi_remove(struct spi_device *spi)
static const struct spi_device_id bmg160_spi_id[] = {
{"bmg160", 0},
{"bmi055_gyro", 0},
+ {"bmi088_gyro", 0},
{}
};
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 08cacbbf31e6..7f382aae1dfd 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum gyro_3d_channel {
@@ -326,18 +324,13 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&gyro_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&gyro_state->common_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -361,9 +354,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&gyro_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &gyro_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -378,8 +369,7 @@ static int hid_gyro_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&gyro_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &gyro_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index afa8018b9238..ef5bcbc4b45b 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -51,8 +51,8 @@ static int mpu3050_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &mpu3050_i2c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
index 7465ad62391c..9c92ff7a82be 100644
--- a/drivers/iio/gyro/st_gyro_buffer.c
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -37,8 +37,7 @@ static int st_gyro_buffer_postenable(struct iio_dev *indio_dev)
if (err < 0)
return err;
- err = st_sensors_set_axis_enable(indio_dev,
- (u8)indio_dev->active_scan_mask[0]);
+ err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]);
if (err < 0)
goto st_gyro_buffer_predisable;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 26c50b24bc08..c8aa051995d3 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -460,6 +460,7 @@ EXPORT_SYMBOL(st_gyro_get_settings);
int st_gyro_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *gdata = iio_priv(indio_dev);
+ struct st_sensors_platform_data *pdata;
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -477,12 +478,12 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = gdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
- gdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &gdata->sensor_settings->fs.fs_avl[0];
+ gdata->current_fullscale = &gdata->sensor_settings->fs.fs_avl[0];
gdata->odr = gdata->sensor_settings->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev,
- (struct st_sensors_platform_data *)&gyro_pdata);
+ pdata = (struct st_sensors_platform_data *)&gyro_pdata;
+
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
goto st_gyro_power_off;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index dc22dc363a99..e9f87e42ff4f 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -23,6 +23,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
+#include <asm/unaligned.h>
+
#include "afe440x.h"
#define AFE4403_DRIVER_NAME "afe4403"
@@ -220,13 +222,11 @@ static int afe4403_read(struct afe4403_data *afe, unsigned int reg, u32 *val)
if (ret)
return ret;
- ret = spi_write_then_read(afe->spi, &reg, 1, rx, 3);
+ ret = spi_write_then_read(afe->spi, &reg, 1, rx, sizeof(rx));
if (ret)
return ret;
- *val = (rx[0] << 16) |
- (rx[1] << 8) |
- (rx[2]);
+ *val = get_unaligned_be24(&rx[0]);
/* Disable reading from the device */
tx[3] = AFE440X_CONTROL0_WRITE;
@@ -322,13 +322,11 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
indio_dev->masklength) {
ret = spi_write_then_read(afe->spi,
&afe4403_channel_values[bit], 1,
- rx, 3);
+ rx, sizeof(rx));
if (ret)
goto err;
- buffer[i++] = (rx[0] << 16) |
- (rx[1] << 8) |
- (rx[2]);
+ buffer[i++] = get_unaligned_be24(&rx[0]);
}
/* Disable reading from the device */
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 84010501762d..546fc37ad75d 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -16,7 +16,7 @@
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -267,11 +267,10 @@ static int max30100_get_current_idx(unsigned int val, int *reg)
static int max30100_led_init(struct max30100_data *data)
{
struct device *dev = &data->client->dev;
- struct device_node *np = dev->of_node;
unsigned int val[2];
int reg, ret;
- ret = of_property_read_u32_array(np, "maxim,led-current-microamp",
+ ret = device_property_read_u32_array(dev, "maxim,led-current-microamp",
(unsigned int *) &val, 2);
if (ret) {
/* Default to 24 mA RED LED, 50 mA IR LED */
@@ -502,7 +501,7 @@ MODULE_DEVICE_TABLE(of, max30100_dt_ids);
static struct i2c_driver max30100_driver = {
.driver = {
.name = MAX30100_DRV_NAME,
- .of_match_table = of_match_ptr(max30100_dt_ids),
+ .of_match_table = max30100_dt_ids,
},
.probe = max30100_probe,
.remove = max30100_remove,
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index c99b54b0568d..d2318c4aab0f 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -7,8 +7,6 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/trigger_consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -233,12 +231,8 @@ static int hid_humidity_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev,
- &iio_pollfunc_store_time, NULL, NULL);
- if (ret)
- return ret;
-
atomic_set(&humid_st->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&humid_st->common_attributes);
if (ret)
@@ -261,7 +255,7 @@ static int hid_humidity_probe(struct platform_device *pdev)
error_remove_callback:
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY);
error_remove_trigger:
- hid_sensor_remove_trigger(&humid_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &humid_st->common_attributes);
return ret;
}
@@ -274,7 +268,7 @@ static int hid_humidity_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY);
- hid_sensor_remove_trigger(&humid_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &humid_st->common_attributes);
return 0;
}
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 81d50a861c22..9fb3f33614d4 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -74,10 +74,9 @@ static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
int hts221_allocate_trigger(struct hts221_hw *hw)
{
+ struct st_sensors_platform_data *pdata = dev_get_platdata(hw->dev);
struct iio_dev *iio_dev = iio_priv_to_dev(hw);
bool irq_active_low = false, open_drain = false;
- struct device_node *np = hw->dev->of_node;
- struct st_sensors_platform_data *pdata;
unsigned long irq_type;
int err;
@@ -106,8 +105,7 @@ int hts221_allocate_trigger(struct hts221_hw *hw)
if (err < 0)
return err;
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ if (device_property_read_bool(hw->dev, "drive-open-drain") ||
(pdata && pdata->open_drain)) {
irq_type |= IRQF_SHARED;
open_drain = true;
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index 4272b7030c44..cab39c4756f8 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -32,8 +32,8 @@ static int hts221_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &hts221_i2c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -63,7 +63,7 @@ static struct i2c_driver hts221_driver = {
.driver = {
.name = "hts221_i2c",
.pm = &hts221_pm_ops,
- .of_match_table = of_match_ptr(hts221_i2c_of_match),
+ .of_match_table = hts221_i2c_of_match,
.acpi_match_table = ACPI_PTR(hts221_acpi_match),
},
.probe = hts221_i2c_probe,
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
index 055dba8897d2..729e86e433b1 100644
--- a/drivers/iio/humidity/hts221_spi.c
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -31,8 +31,8 @@ static int hts221_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &hts221_spi_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -56,7 +56,7 @@ static struct spi_driver hts221_driver = {
.driver = {
.name = "hts221_spi",
.pm = &hts221_pm_ops,
- .of_match_table = of_match_ptr(hts221_spi_of_match),
+ .of_match_table = hts221_spi_of_match,
},
.probe = hts221_spi_probe,
.id_table = hts221_spi_id_table,
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 60bb1029e759..fc4123d518bc 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -29,6 +29,19 @@ config ADIS16460
To compile this driver as a module, choose M here: the module will be
called adis16460.
+config ADIS16475
+ tristate "Analog Devices ADIS16475 and similar IMU driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADIS16470, ADIS16475,
+ ADIS16477, ADIS16465, ADIS16467, ADIS16500, ADIS16505, ADIS16507 inertial
+ sensors.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adis16475.
+
config ADIS16480
tristate "Analog Devices ADIS16480 and similar IMU driver"
depends on SPI
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 5237fd4bc384..88b2c4555230 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADIS16400) += adis16400.o
obj-$(CONFIG_ADIS16460) += adis16460.o
+obj-$(CONFIG_ADIS16475) += adis16475.o
obj-$(CONFIG_ADIS16480) += adis16480.o
adis_lib-y += adis.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index a8afd01de4f3..c539dfa3b8d3 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -223,6 +223,31 @@ int __adis_read_reg(struct adis *adis, unsigned int reg,
return ret;
}
EXPORT_SYMBOL_GPL(__adis_read_reg);
+/**
+ * __adis_update_bits_base() - ADIS Update bits function - Unlocked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @size: Size of the register to update
+ *
+ * Updates the desired bits of @reg in accordance with @mask and @val.
+ */
+int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
+ const u32 val, u8 size)
+{
+ int ret;
+ u32 __val;
+
+ ret = __adis_read_reg(adis, reg, &__val, size);
+ if (ret)
+ return ret;
+
+ __val = (__val & ~mask) | (val & mask);
+
+ return __adis_write_reg(adis, reg, __val, size);
+}
+EXPORT_SYMBOL_GPL(__adis_update_bits_base);
#ifdef CONFIG_DEBUG_FS
@@ -419,7 +444,7 @@ int __adis_initial_startup(struct adis *adis)
if (prod_id != adis->data->prod_id)
dev_warn(&adis->spi->dev,
- "Device ID(%u) and product ID(%u) do not match.",
+ "Device ID(%u) and product ID(%u) do not match.\n",
adis->data->prod_id, prod_id);
return 0;
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 05e70c1c4835..229f2ff98469 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -258,7 +258,7 @@ static int adis16400_show_product_id(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16400_product_id_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16400_product_id_fops,
adis16400_show_product_id, NULL, "%lld\n");
static int adis16400_show_flash_count(void *arg, u64 *val)
@@ -275,23 +275,22 @@ static int adis16400_show_flash_count(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16400_flash_count_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16400_flash_count_fops,
adis16400_show_flash_count, NULL, "%lld\n");
static int adis16400_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16400_state *st = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
if (st->variant->flags & ADIS16400_HAS_SERIAL_NUMBER)
- debugfs_create_file("serial_number", 0400,
- indio_dev->debugfs_dentry, st,
- &adis16400_serial_number_fops);
+ debugfs_create_file_unsafe("serial_number", 0400,
+ d, st, &adis16400_serial_number_fops);
if (st->variant->flags & ADIS16400_HAS_PROD_ID)
- debugfs_create_file("product_id", 0400,
- indio_dev->debugfs_dentry, st,
- &adis16400_product_id_fops);
- debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
- st, &adis16400_flash_count_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ d, st, &adis16400_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ d, st, &adis16400_flash_count_fops);
return 0;
}
@@ -1194,7 +1193,7 @@ static int adis16400_probe(struct spi_device *spi)
indio_dev->available_scan_masks = st->avail_scan_mask;
st->adis.burst = &adis16400_burst;
if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
- st->adis.burst->extra_len = sizeof(u16);
+ st->adis.burst_extra_len = sizeof(u16);
}
adis16400_data = &st->variant->adis_data;
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 0027683d0256..ad20c488a3ba 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -87,8 +87,8 @@ static int adis16460_show_serial_number(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16460_serial_number_fops,
- adis16460_show_serial_number, NULL, "0x%.4llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(adis16460_serial_number_fops,
+ adis16460_show_serial_number, NULL, "0x%.4llx\n");
static int adis16460_show_product_id(void *arg, u64 *val)
{
@@ -105,8 +105,8 @@ static int adis16460_show_product_id(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16460_product_id_fops,
- adis16460_show_product_id, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(adis16460_product_id_fops,
+ adis16460_show_product_id, NULL, "%llu\n");
static int adis16460_show_flash_count(void *arg, u64 *val)
{
@@ -123,19 +123,20 @@ static int adis16460_show_flash_count(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16460_flash_count_fops,
- adis16460_show_flash_count, NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(adis16460_flash_count_fops,
+ adis16460_show_flash_count, NULL, "%lld\n");
static int adis16460_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16460 *adis16460 = iio_priv(indio_dev);
-
- debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
- adis16460, &adis16460_serial_number_fops);
- debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
- adis16460, &adis16460_product_id_fops);
- debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
- adis16460, &adis16460_flash_count_fops);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+
+ debugfs_create_file_unsafe("serial_number", 0400,
+ d, adis16460, &adis16460_serial_number_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ d, adis16460, &adis16460_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ d, adis16460, &adis16460_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
new file mode 100644
index 000000000000..c6dac4fc67a1
--- /dev/null
+++ b/drivers/iio/imu/adis16475.c
@@ -0,0 +1,1338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADIS16475 IMU driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/imu/adis.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#define ADIS16475_REG_DIAG_STAT 0x02
+#define ADIS16475_REG_X_GYRO_L 0x04
+#define ADIS16475_REG_Y_GYRO_L 0x08
+#define ADIS16475_REG_Z_GYRO_L 0x0C
+#define ADIS16475_REG_X_ACCEL_L 0x10
+#define ADIS16475_REG_Y_ACCEL_L 0x14
+#define ADIS16475_REG_Z_ACCEL_L 0x18
+#define ADIS16475_REG_TEMP_OUT 0x1c
+#define ADIS16475_REG_X_GYRO_BIAS_L 0x40
+#define ADIS16475_REG_Y_GYRO_BIAS_L 0x44
+#define ADIS16475_REG_Z_GYRO_BIAS_L 0x48
+#define ADIS16475_REG_X_ACCEL_BIAS_L 0x4c
+#define ADIS16475_REG_Y_ACCEL_BIAS_L 0x50
+#define ADIS16475_REG_Z_ACCEL_BIAS_L 0x54
+#define ADIS16475_REG_FILT_CTRL 0x5c
+#define ADIS16475_FILT_CTRL_MASK GENMASK(2, 0)
+#define ADIS16475_FILT_CTRL(x) FIELD_PREP(ADIS16475_FILT_CTRL_MASK, x)
+#define ADIS16475_REG_MSG_CTRL 0x60
+#define ADIS16475_MSG_CTRL_DR_POL_MASK BIT(0)
+#define ADIS16475_MSG_CTRL_DR_POL(x) \
+ FIELD_PREP(ADIS16475_MSG_CTRL_DR_POL_MASK, x)
+#define ADIS16475_SYNC_MODE_MASK GENMASK(4, 2)
+#define ADIS16475_SYNC_MODE(x) FIELD_PREP(ADIS16475_SYNC_MODE_MASK, x)
+#define ADIS16475_REG_UP_SCALE 0x62
+#define ADIS16475_REG_DEC_RATE 0x64
+#define ADIS16475_REG_GLOB_CMD 0x68
+#define ADIS16475_REG_FIRM_REV 0x6c
+#define ADIS16475_REG_FIRM_DM 0x6e
+#define ADIS16475_REG_FIRM_Y 0x70
+#define ADIS16475_REG_PROD_ID 0x72
+#define ADIS16475_REG_SERIAL_NUM 0x74
+#define ADIS16475_REG_FLASH_CNT 0x7c
+#define ADIS16500_BURST32_MASK BIT(9)
+#define ADIS16500_BURST32(x) FIELD_PREP(ADIS16500_BURST32_MASK, x)
+/* number of data elements in burst mode */
+#define ADIS16475_BURST32_MAX_DATA 32
+#define ADIS16475_BURST_MAX_DATA 20
+#define ADIS16475_MAX_SCAN_DATA 20
+/* spi max speed in brust mode */
+#define ADIS16475_BURST_MAX_SPEED 1000000
+#define ADIS16475_LSB_DEC_MASK BIT(0)
+#define ADIS16475_LSB_FIR_MASK BIT(1)
+
+enum {
+ ADIS16475_SYNC_DIRECT = 1,
+ ADIS16475_SYNC_SCALED,
+ ADIS16475_SYNC_OUTPUT,
+ ADIS16475_SYNC_PULSE = 5,
+};
+
+struct adis16475_sync {
+ u16 sync_mode;
+ u16 min_rate;
+ u16 max_rate;
+};
+
+struct adis16475_chip_info {
+ const struct iio_chan_spec *channels;
+ const struct adis16475_sync *sync;
+ const struct adis_data adis_data;
+ const char *name;
+ u32 num_channels;
+ u32 gyro_max_val;
+ u32 gyro_max_scale;
+ u32 accel_max_val;
+ u32 accel_max_scale;
+ u32 temp_scale;
+ u32 int_clk;
+ u16 max_dec;
+ u8 num_sync;
+ bool has_burst32;
+};
+
+struct adis16475 {
+ const struct adis16475_chip_info *info;
+ struct adis adis;
+ u32 clk_freq;
+ bool burst32;
+ unsigned long lsb_flag;
+ /* Alignment needed for the timestamp */
+ __be16 data[ADIS16475_MAX_SCAN_DATA] __aligned(8);
+};
+
+enum {
+ ADIS16475_SCAN_GYRO_X,
+ ADIS16475_SCAN_GYRO_Y,
+ ADIS16475_SCAN_GYRO_Z,
+ ADIS16475_SCAN_ACCEL_X,
+ ADIS16475_SCAN_ACCEL_Y,
+ ADIS16475_SCAN_ACCEL_Z,
+ ADIS16475_SCAN_TEMP,
+ ADIS16475_SCAN_DIAG_S_FLAGS,
+ ADIS16475_SCAN_CRC_FAILURE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t adis16475_show_firmware_revision(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16475 *st = file->private_data;
+ char buf[7];
+ size_t len;
+ u16 rev;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIRM_REV, &rev);
+ if (ret)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16475_firmware_revision_fops = {
+ .open = simple_open,
+ .read = adis16475_show_firmware_revision,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t adis16475_show_firmware_date(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16475 *st = file->private_data;
+ u16 md, year;
+ char buf[12];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIRM_Y, &year);
+ if (ret)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIRM_DM, &md);
+ if (ret)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n", md >> 8, md & 0xff,
+ year);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16475_firmware_date_fops = {
+ .open = simple_open,
+ .read = adis16475_show_firmware_date,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16475_show_serial_number(void *arg, u64 *val)
+{
+ struct adis16475 *st = arg;
+ u16 serial;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_SERIAL_NUM, &serial);
+ if (ret)
+ return ret;
+
+ *val = serial;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16475_serial_number_fops,
+ adis16475_show_serial_number, NULL, "0x%.4llx\n");
+
+static int adis16475_show_product_id(void *arg, u64 *val)
+{
+ struct adis16475 *st = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_PROD_ID, &prod_id);
+ if (ret)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16475_product_id_fops,
+ adis16475_show_product_id, NULL, "%llu\n");
+
+static int adis16475_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16475 *st = arg;
+ u32 flash_count;
+ int ret;
+
+ ret = adis_read_reg_32(&st->adis, ADIS16475_REG_FLASH_CNT,
+ &flash_count);
+ if (ret)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16475_flash_count_fops,
+ adis16475_show_flash_count, NULL, "%lld\n");
+
+static void adis16475_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+
+ debugfs_create_file_unsafe("serial_number", 0400,
+ d, st, &adis16475_serial_number_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ d, st, &adis16475_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ d, st, &adis16475_flash_count_fops);
+ debugfs_create_file("firmware_revision", 0400,
+ d, st, &adis16475_firmware_revision_fops);
+ debugfs_create_file("firmware_date", 0400, d,
+ st, &adis16475_firmware_date_fops);
+}
+#else
+static void adis16475_debugfs_init(struct iio_dev *indio_dev)
+{
+}
+#endif
+
+static int adis16475_get_freq(struct adis16475 *st, u32 *freq)
+{
+ int ret;
+ u16 dec;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, &dec);
+ if (ret)
+ return -EINVAL;
+
+ *freq = DIV_ROUND_CLOSEST(st->clk_freq, dec + 1);
+
+ return 0;
+}
+
+static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
+{
+ u16 dec;
+ int ret;
+
+ if (!freq)
+ return -EINVAL;
+
+ dec = DIV_ROUND_CLOSEST(st->clk_freq, freq);
+
+ if (dec)
+ dec--;
+
+ if (dec > st->info->max_dec)
+ dec = st->info->max_dec;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
+ if (ret)
+ return ret;
+
+ /*
+ * If decimation is used, then gyro and accel data will have meaningful
+ * bits on the LSB registers. This info is used on the trigger handler.
+ */
+ assign_bit(ADIS16475_LSB_DEC_MASK, &st->lsb_flag, dec);
+
+ return 0;
+}
+
+/* The values are approximated. */
+static const u32 adis16475_3db_freqs[] = {
+ [0] = 720, /* Filter disabled, full BW (~720Hz) */
+ [1] = 360,
+ [2] = 164,
+ [3] = 80,
+ [4] = 40,
+ [5] = 20,
+ [6] = 10,
+};
+
+static int adis16475_get_filter(struct adis16475 *st, u32 *filter)
+{
+ u16 filter_sz;
+ int ret;
+ const int mask = ADIS16475_FILT_CTRL_MASK;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FILT_CTRL, &filter_sz);
+ if (ret)
+ return ret;
+
+ *filter = adis16475_3db_freqs[filter_sz & mask];
+
+ return 0;
+}
+
+static int adis16475_set_filter(struct adis16475 *st, const u32 filter)
+{
+ int i = ARRAY_SIZE(adis16475_3db_freqs);
+ int ret;
+
+ while (--i) {
+ if (adis16475_3db_freqs[i] >= filter)
+ break;
+ }
+
+ ret = adis_write_reg_16(&st->adis, ADIS16475_REG_FILT_CTRL,
+ ADIS16475_FILT_CTRL(i));
+ if (ret)
+ return ret;
+
+ /*
+ * If FIR is used, then gyro and accel data will have meaningful
+ * bits on the LSB registers. This info is used on the trigger handler.
+ */
+ assign_bit(ADIS16475_LSB_FIR_MASK, &st->lsb_flag, i);
+
+ return 0;
+}
+
+static const u32 adis16475_calib_regs[] = {
+ [ADIS16475_SCAN_GYRO_X] = ADIS16475_REG_X_GYRO_BIAS_L,
+ [ADIS16475_SCAN_GYRO_Y] = ADIS16475_REG_Y_GYRO_BIAS_L,
+ [ADIS16475_SCAN_GYRO_Z] = ADIS16475_REG_Z_GYRO_BIAS_L,
+ [ADIS16475_SCAN_ACCEL_X] = ADIS16475_REG_X_ACCEL_BIAS_L,
+ [ADIS16475_SCAN_ACCEL_Y] = ADIS16475_REG_Y_ACCEL_BIAS_L,
+ [ADIS16475_SCAN_ACCEL_Z] = ADIS16475_REG_Z_ACCEL_BIAS_L,
+};
+
+static int adis16475_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ int ret;
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = st->info->gyro_max_val;
+ *val2 = st->info->gyro_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_ACCEL:
+ *val = st->info->accel_max_val;
+ *val2 = st->info->accel_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ *val = st->info->temp_scale;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adis_read_reg_32(&st->adis,
+ adis16475_calib_regs[chan->scan_index],
+ val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = adis16475_get_filter(st, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = adis16475_get_freq(st, &tmp);
+ if (ret)
+ return ret;
+
+ *val = tmp / 1000;
+ *val2 = (tmp % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16475_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long info)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ tmp = val * 1000 + val2 / 1000;
+ return adis16475_set_freq(st, tmp);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16475_set_filter(st, val);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis_write_reg_32(&st->adis,
+ adis16475_calib_regs[chan->scan_index],
+ val);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16475_MOD_CHAN(_type, _mod, _address, _si, _r_bits, _s_bits) \
+ { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .address = (_address), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (_r_bits), \
+ .storagebits = (_s_bits), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16475_GYRO_CHANNEL(_mod) \
+ ADIS16475_MOD_CHAN(IIO_ANGL_VEL, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _GYRO_L, \
+ ADIS16475_SCAN_GYRO_ ## _mod, 32, 32)
+
+#define ADIS16475_ACCEL_CHANNEL(_mod) \
+ ADIS16475_MOD_CHAN(IIO_ACCEL, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _ACCEL_L, \
+ ADIS16475_SCAN_ACCEL_ ## _mod, 32, 32)
+
+#define ADIS16475_TEMP_CHANNEL() { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .address = ADIS16475_REG_TEMP_OUT, \
+ .scan_index = ADIS16475_SCAN_TEMP, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec adis16475_channels[] = {
+ ADIS16475_GYRO_CHANNEL(X),
+ ADIS16475_GYRO_CHANNEL(Y),
+ ADIS16475_GYRO_CHANNEL(Z),
+ ADIS16475_ACCEL_CHANNEL(X),
+ ADIS16475_ACCEL_CHANNEL(Y),
+ ADIS16475_ACCEL_CHANNEL(Z),
+ ADIS16475_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(7)
+};
+
+enum adis16475_variant {
+ ADIS16470,
+ ADIS16475_1,
+ ADIS16475_2,
+ ADIS16475_3,
+ ADIS16477_1,
+ ADIS16477_2,
+ ADIS16477_3,
+ ADIS16465_1,
+ ADIS16465_2,
+ ADIS16465_3,
+ ADIS16467_1,
+ ADIS16467_2,
+ ADIS16467_3,
+ ADIS16500,
+ ADIS16505_1,
+ ADIS16505_2,
+ ADIS16505_3,
+ ADIS16507_1,
+ ADIS16507_2,
+ ADIS16507_3,
+};
+
+enum {
+ ADIS16475_DIAG_STAT_DATA_PATH = 1,
+ ADIS16475_DIAG_STAT_FLASH_MEM,
+ ADIS16475_DIAG_STAT_SPI,
+ ADIS16475_DIAG_STAT_STANDBY,
+ ADIS16475_DIAG_STAT_SENSOR,
+ ADIS16475_DIAG_STAT_MEMORY,
+ ADIS16475_DIAG_STAT_CLK,
+};
+
+static const char * const adis16475_status_error_msgs[] = {
+ [ADIS16475_DIAG_STAT_DATA_PATH] = "Data Path Overrun",
+ [ADIS16475_DIAG_STAT_FLASH_MEM] = "Flash memory update failure",
+ [ADIS16475_DIAG_STAT_SPI] = "SPI communication error",
+ [ADIS16475_DIAG_STAT_STANDBY] = "Standby mode",
+ [ADIS16475_DIAG_STAT_SENSOR] = "Sensor failure",
+ [ADIS16475_DIAG_STAT_MEMORY] = "Memory failure",
+ [ADIS16475_DIAG_STAT_CLK] = "Clock error",
+};
+
+static int adis16475_enable_irq(struct adis *adis, bool enable)
+{
+ /*
+ * There is no way to gate the data-ready signal internally inside the
+ * ADIS16475. We can only control it's polarity...
+ */
+ if (enable)
+ enable_irq(adis->spi->irq);
+ else
+ disable_irq(adis->spi->irq);
+
+ return 0;
+}
+
+#define ADIS16475_DATA(_prod_id, _timeouts) \
+{ \
+ .msc_ctrl_reg = ADIS16475_REG_MSG_CTRL, \
+ .glob_cmd_reg = ADIS16475_REG_GLOB_CMD, \
+ .diag_stat_reg = ADIS16475_REG_DIAG_STAT, \
+ .prod_id_reg = ADIS16475_REG_PROD_ID, \
+ .prod_id = (_prod_id), \
+ .self_test_mask = BIT(2), \
+ .self_test_reg = ADIS16475_REG_GLOB_CMD, \
+ .cs_change_delay = 16, \
+ .read_delay = 5, \
+ .write_delay = 5, \
+ .status_error_msgs = adis16475_status_error_msgs, \
+ .status_error_mask = BIT(ADIS16475_DIAG_STAT_DATA_PATH) | \
+ BIT(ADIS16475_DIAG_STAT_FLASH_MEM) | \
+ BIT(ADIS16475_DIAG_STAT_SPI) | \
+ BIT(ADIS16475_DIAG_STAT_STANDBY) | \
+ BIT(ADIS16475_DIAG_STAT_SENSOR) | \
+ BIT(ADIS16475_DIAG_STAT_MEMORY) | \
+ BIT(ADIS16475_DIAG_STAT_CLK), \
+ .enable_irq = adis16475_enable_irq, \
+ .timeouts = (_timeouts), \
+}
+
+static const struct adis16475_sync adis16475_sync_mode[] = {
+ { ADIS16475_SYNC_OUTPUT },
+ { ADIS16475_SYNC_DIRECT, 1900, 2100 },
+ { ADIS16475_SYNC_SCALED, 1, 128 },
+ { ADIS16475_SYNC_PULSE, 1000, 2100 },
+};
+
+static const struct adis_timeout adis16475_timeouts = {
+ .reset_ms = 200,
+ .sw_reset_ms = 200,
+ .self_test_ms = 20,
+};
+
+static const struct adis_timeout adis1650x_timeouts = {
+ .reset_ms = 260,
+ .sw_reset_ms = 260,
+ .self_test_ms = 30,
+};
+
+static const struct adis16475_chip_info adis16475_chip_info[] = {
+ [ADIS16470] = {
+ .name = "adis16470",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16470, &adis16475_timeouts),
+ },
+ [ADIS16475_1] = {
+ .name = "adis16475-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ },
+ [ADIS16475_2] = {
+ .name = "adis16475-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ },
+ [ADIS16475_3] = {
+ .name = "adis16475-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ },
+ [ADIS16477_1] = {
+ .name = "adis16477-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ },
+ [ADIS16477_2] = {
+ .name = "adis16477-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ },
+ [ADIS16477_3] = {
+ .name = "adis16477-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ },
+ [ADIS16465_1] = {
+ .name = "adis16465-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ },
+ [ADIS16465_2] = {
+ .name = "adis16465-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ },
+ [ADIS16465_3] = {
+ .name = "adis16465-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ },
+ [ADIS16467_1] = {
+ .name = "adis16467-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ },
+ [ADIS16467_2] = {
+ .name = "adis16467-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ },
+ [ADIS16467_3] = {
+ .name = "adis16467-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ },
+ [ADIS16500] = {
+ .name = "adis16500",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16500, &adis1650x_timeouts),
+ },
+ [ADIS16505_1] = {
+ .name = "adis16505-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 78,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ },
+ [ADIS16505_2] = {
+ .name = "adis16505-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 78,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ },
+ [ADIS16505_3] = {
+ .name = "adis16505-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 78,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ },
+ [ADIS16507_1] = {
+ .name = "adis16507-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ },
+ [ADIS16507_2] = {
+ .name = "adis16507-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ },
+ [ADIS16507_3] = {
+ .name = "adis16507-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ },
+};
+
+static const struct iio_info adis16475_info = {
+ .read_raw = &adis16475_read_raw,
+ .write_raw = &adis16475_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+static struct adis_burst adis16475_burst = {
+ .en = true,
+ .reg_cmd = ADIS16475_REG_GLOB_CMD,
+ /*
+ * adis_update_scan_mode_burst() sets the burst length in respect with
+ * the number of channels and allocates 16 bits for each. However,
+ * adis1647x devices also need space for DIAG_STAT, DATA_CNTR or
+ * TIME_STAMP (depending on the clock mode but for us these bytes are
+ * don't care...) and CRC.
+ */
+ .extra_len = 3 * sizeof(u16),
+ .burst_max_len = ADIS16475_BURST32_MAX_DATA,
+};
+
+static bool adis16475_validate_crc(const u8 *buffer, u16 crc,
+ const bool burst32)
+{
+ int i;
+ /* extra 6 elements for low gyro and accel */
+ const u16 sz = burst32 ? ADIS16475_BURST32_MAX_DATA :
+ ADIS16475_BURST_MAX_DATA;
+
+ for (i = 0; i < sz - 2; i++)
+ crc -= buffer[i];
+
+ return crc == 0;
+}
+
+static void adis16475_burst32_check(struct adis16475 *st)
+{
+ int ret;
+ struct adis *adis = &st->adis;
+
+ if (!st->info->has_burst32)
+ return;
+
+ if (st->lsb_flag && !st->burst32) {
+ const u16 en = ADIS16500_BURST32(1);
+
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16500_BURST32_MASK, en);
+ if (ret)
+ return;
+
+ st->burst32 = true;
+
+ /*
+ * In 32-bit mode we need extra 2 bytes for all gyro
+ * and accel channels.
+ */
+ adis->burst_extra_len = 6 * sizeof(u16);
+ adis->xfer[1].len += 6 * sizeof(u16);
+ dev_dbg(&adis->spi->dev, "Enable burst32 mode, xfer:%d",
+ adis->xfer[1].len);
+
+ } else if (!st->lsb_flag && st->burst32) {
+ const u16 en = ADIS16500_BURST32(0);
+
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16500_BURST32_MASK, en);
+ if (ret)
+ return;
+
+ st->burst32 = false;
+
+ /* Remove the extra bits */
+ adis->burst_extra_len = 0;
+ adis->xfer[1].len -= 6 * sizeof(u16);
+ dev_dbg(&adis->spi->dev, "Disable burst32 mode, xfer:%d\n",
+ adis->xfer[1].len);
+ }
+}
+
+static irqreturn_t adis16475_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ int ret, bit, i = 0;
+ __be16 *buffer;
+ u16 crc;
+ bool valid;
+ /* offset until the first element after gyro and accel */
+ const u8 offset = st->burst32 ? 13 : 7;
+ const u32 cached_spi_speed_hz = adis->spi->max_speed_hz;
+
+ adis->spi->max_speed_hz = ADIS16475_BURST_MAX_SPEED;
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ return ret;
+
+ adis->spi->max_speed_hz = cached_spi_speed_hz;
+ buffer = adis->buffer;
+
+ crc = be16_to_cpu(buffer[offset + 2]);
+ valid = adis16475_validate_crc(adis->buffer, crc, st->burst32);
+ if (!valid) {
+ dev_err(&adis->spi->dev, "Invalid crc\n");
+ goto check_burst32;
+ }
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ /*
+ * When burst mode is used, system flags is the first data
+ * channel in the sequence, but the scan index is 7.
+ */
+ switch (bit) {
+ case ADIS16475_SCAN_TEMP:
+ st->data[i++] = buffer[offset];
+ break;
+ case ADIS16475_SCAN_GYRO_X ... ADIS16475_SCAN_ACCEL_Z:
+ /*
+ * The first 2 bytes on the received data are the
+ * DIAG_STAT reg, hence the +1 offset here...
+ */
+ if (st->burst32) {
+ /* upper 16 */
+ st->data[i++] = buffer[bit * 2 + 2];
+ /* lower 16 */
+ st->data[i++] = buffer[bit * 2 + 1];
+ } else {
+ st->data[i++] = buffer[bit + 1];
+ /*
+ * Don't bother in doing the manual read if the
+ * device supports burst32. burst32 will be
+ * enabled in the next call to
+ * adis16475_burst32_check()...
+ */
+ if (st->lsb_flag && !st->info->has_burst32) {
+ u16 val = 0;
+ const u32 reg = ADIS16475_REG_X_GYRO_L +
+ bit * 4;
+
+ adis_read_reg_16(adis, reg, &val);
+ st->data[i++] = cpu_to_be16(val);
+ } else {
+ /* lower not used */
+ st->data[i++] = 0;
+ }
+ }
+ break;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data, pf->timestamp);
+check_burst32:
+ /*
+ * We only check the burst mode at the end of the current capture since
+ * it takes a full data ready cycle for the device to update the burst
+ * array.
+ */
+ adis16475_burst32_check(st);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static void adis16475_disable_clk(void *data)
+{
+ clk_disable_unprepare((struct clk *)data);
+}
+
+static int adis16475_config_sync_mode(struct adis16475 *st)
+{
+ int ret;
+ struct device *dev = &st->adis.spi->dev;
+ const struct adis16475_sync *sync;
+ u32 sync_mode;
+
+ /* default to internal clk */
+ st->clk_freq = st->info->int_clk * 1000;
+
+ ret = device_property_read_u32(dev, "adi,sync-mode", &sync_mode);
+ if (ret)
+ return 0;
+
+ if (sync_mode >= st->info->num_sync) {
+ dev_err(dev, "Invalid sync mode: %u for %s\n", sync_mode,
+ st->info->name);
+ return -EINVAL;
+ }
+
+ sync = &st->info->sync[sync_mode];
+
+ /* All the other modes require external input signal */
+ if (sync->sync_mode != ADIS16475_SYNC_OUTPUT) {
+ struct clk *clk = devm_clk_get(dev, NULL);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adis16475_disable_clk, clk);
+ if (ret)
+ return ret;
+
+ st->clk_freq = clk_get_rate(clk);
+ if (st->clk_freq < sync->min_rate ||
+ st->clk_freq > sync->max_rate) {
+ dev_err(dev,
+ "Clk rate:%u not in a valid range:[%u %u]\n",
+ st->clk_freq, sync->min_rate, sync->max_rate);
+ return -EINVAL;
+ }
+
+ if (sync->sync_mode == ADIS16475_SYNC_SCALED) {
+ u16 up_scale;
+ u32 scaled_out_freq = 0;
+ /*
+ * If we are in scaled mode, we must have an up_scale.
+ * In scaled mode the allowable input clock range is
+ * 1 Hz to 128 Hz, and the allowable output range is
+ * 1900 to 2100 Hz. Hence, a scale must be given to
+ * get the allowable output.
+ */
+ ret = device_property_read_u32(dev,
+ "adi,scaled-output-hz",
+ &scaled_out_freq);
+ if (ret) {
+ dev_err(dev, "adi,scaled-output-hz must be given when in scaled sync mode");
+ return -EINVAL;
+ } else if (scaled_out_freq < 1900 ||
+ scaled_out_freq > 2100) {
+ dev_err(dev, "Invalid value: %u for adi,scaled-output-hz",
+ scaled_out_freq);
+ return -EINVAL;
+ }
+
+ up_scale = DIV_ROUND_CLOSEST(scaled_out_freq,
+ st->clk_freq);
+
+ ret = __adis_write_reg_16(&st->adis,
+ ADIS16475_REG_UP_SCALE,
+ up_scale);
+ if (ret)
+ return ret;
+
+ st->clk_freq = scaled_out_freq;
+ }
+
+ st->clk_freq *= 1000;
+ }
+ /*
+ * Keep in mind that the mask for the clk modes in adis1650*
+ * chips is different (1100 instead of 11100). However, we
+ * are not configuring BIT(4) in these chips and the default
+ * value is 0, so we are fine in doing the below operations.
+ * I'm keeping this for simplicity and avoiding extra variables
+ * in chip_info.
+ */
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16475_SYNC_MODE_MASK, sync->sync_mode);
+ if (ret)
+ return ret;
+
+ usleep_range(250, 260);
+
+ return 0;
+}
+
+static int adis16475_config_irq_pin(struct adis16475 *st)
+{
+ int ret;
+ struct irq_data *desc;
+ u32 irq_type;
+ u16 val = 0;
+ u8 polarity;
+ struct spi_device *spi = st->adis.spi;
+
+ desc = irq_get_irq_data(spi->irq);
+ if (!desc) {
+ dev_err(&spi->dev, "Could not find IRQ %d\n", spi->irq);
+ return -EINVAL;
+ }
+ /*
+ * It is possible to configure the data ready polarity. Furthermore, we
+ * need to update the adis struct if we want data ready as active low.
+ */
+ irq_type = irqd_get_trigger_type(desc);
+ if (irq_type == IRQ_TYPE_EDGE_RISING) {
+ polarity = 1;
+ st->adis.irq_flag = IRQF_TRIGGER_RISING;
+ } else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
+ polarity = 0;
+ st->adis.irq_flag = IRQF_TRIGGER_FALLING;
+ } else {
+ dev_err(&spi->dev, "Invalid interrupt type 0x%x specified\n",
+ irq_type);
+ return -EINVAL;
+ }
+
+ val = ADIS16475_MSG_CTRL_DR_POL(polarity);
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16475_MSG_CTRL_DR_POL_MASK, val);
+ if (ret)
+ return ret;
+ /*
+ * There is a delay writing to any bits written to the MSC_CTRL
+ * register. It should not be bigger than 200us, so 250 should be more
+ * than enough!
+ */
+ usleep_range(250, 260);
+
+ return 0;
+}
+
+static const struct of_device_id adis16475_of_match[] = {
+ { .compatible = "adi,adis16470",
+ .data = &adis16475_chip_info[ADIS16470] },
+ { .compatible = "adi,adis16475-1",
+ .data = &adis16475_chip_info[ADIS16475_1] },
+ { .compatible = "adi,adis16475-2",
+ .data = &adis16475_chip_info[ADIS16475_2] },
+ { .compatible = "adi,adis16475-3",
+ .data = &adis16475_chip_info[ADIS16475_3] },
+ { .compatible = "adi,adis16477-1",
+ .data = &adis16475_chip_info[ADIS16477_1] },
+ { .compatible = "adi,adis16477-2",
+ .data = &adis16475_chip_info[ADIS16477_2] },
+ { .compatible = "adi,adis16477-3",
+ .data = &adis16475_chip_info[ADIS16477_3] },
+ { .compatible = "adi,adis16465-1",
+ .data = &adis16475_chip_info[ADIS16465_1] },
+ { .compatible = "adi,adis16465-2",
+ .data = &adis16475_chip_info[ADIS16465_2] },
+ { .compatible = "adi,adis16465-3",
+ .data = &adis16475_chip_info[ADIS16465_3] },
+ { .compatible = "adi,adis16467-1",
+ .data = &adis16475_chip_info[ADIS16467_1] },
+ { .compatible = "adi,adis16467-2",
+ .data = &adis16475_chip_info[ADIS16467_2] },
+ { .compatible = "adi,adis16467-3",
+ .data = &adis16475_chip_info[ADIS16467_3] },
+ { .compatible = "adi,adis16500",
+ .data = &adis16475_chip_info[ADIS16500] },
+ { .compatible = "adi,adis16505-1",
+ .data = &adis16475_chip_info[ADIS16505_1] },
+ { .compatible = "adi,adis16505-2",
+ .data = &adis16475_chip_info[ADIS16505_2] },
+ { .compatible = "adi,adis16505-3",
+ .data = &adis16475_chip_info[ADIS16505_3] },
+ { .compatible = "adi,adis16507-1",
+ .data = &adis16475_chip_info[ADIS16507_1] },
+ { .compatible = "adi,adis16507-2",
+ .data = &adis16475_chip_info[ADIS16507_2] },
+ { .compatible = "adi,adis16507-3",
+ .data = &adis16475_chip_info[ADIS16507_3] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adis16475_of_match);
+
+static int adis16475_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adis16475 *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ st->adis.burst = &adis16475_burst;
+
+ st->info = device_get_match_data(&spi->dev);
+ if (!st->info)
+ return -EINVAL;
+
+ ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = st->info->name;
+ indio_dev->channels = st->info->channels;
+ indio_dev->num_channels = st->info->num_channels;
+ indio_dev->info = &adis16475_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = __adis_initial_startup(&st->adis);
+ if (ret)
+ return ret;
+
+ ret = adis16475_config_irq_pin(st);
+ if (ret)
+ return ret;
+
+ ret = adis16475_config_sync_mode(st);
+ if (ret)
+ return ret;
+
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16475_trigger_handler);
+ if (ret)
+ return ret;
+
+ adis16475_enable_irq(&st->adis, false);
+
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ adis16475_debugfs_init(indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver adis16475_driver = {
+ .driver = {
+ .name = "adis16475",
+ .of_match_table = adis16475_of_match,
+ },
+ .probe = adis16475_probe,
+};
+module_spi_driver(adis16475_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16475 IMU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index cfae0e4476e7..6a471eee110e 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -284,22 +284,18 @@ DEFINE_DEBUGFS_ATTRIBUTE(adis16480_flash_count_fops,
static int adis16480_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16480 *adis16480 = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
debugfs_create_file_unsafe("firmware_revision", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_firmware_revision_fops);
+ d, adis16480, &adis16480_firmware_revision_fops);
debugfs_create_file_unsafe("firmware_date", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_firmware_date_fops);
+ d, adis16480, &adis16480_firmware_date_fops);
debugfs_create_file_unsafe("serial_number", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_serial_number_fops);
+ d, adis16480, &adis16480_serial_number_fops);
debugfs_create_file_unsafe("product_id", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_product_id_fops);
+ d, adis16480, &adis16480_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_flash_count_fops);
+ d, adis16480, &adis16480_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 04e5e2a0fd6b..5b4225ee09b9 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -23,25 +23,30 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct adis *adis = iio_device_get_drvdata(indio_dev);
- unsigned int burst_length;
+ unsigned int burst_length, burst_max_length;
u8 *tx;
/* All but the timestamp channel */
burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
- burst_length += adis->burst->extra_len;
+ burst_length += adis->burst->extra_len + adis->burst_extra_len;
+
+ if (adis->burst->burst_max_len)
+ burst_max_length = adis->burst->burst_max_len;
+ else
+ burst_max_length = burst_length;
adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
if (!adis->xfer)
return -ENOMEM;
- adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
+ adis->buffer = kzalloc(burst_max_length + sizeof(u16), GFP_KERNEL);
if (!adis->buffer) {
kfree(adis->xfer);
adis->xfer = NULL;
return -ENOMEM;
}
- tx = adis->buffer + burst_length;
+ tx = adis->buffer + burst_max_length;
tx[0] = ADIS_READ_REG(adis->burst->reg_cmd);
tx[1] = 0;
@@ -156,6 +161,14 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
return IRQ_HANDLED;
}
+static void adis_buffer_cleanup(void *arg)
+{
+ struct adis *adis = arg;
+
+ kfree(adis->buffer);
+ kfree(adis->xfer);
+}
+
/**
* adis_setup_buffer_and_trigger() - Sets up buffer and trigger for the adis device
* @adis: The adis device.
@@ -199,6 +212,43 @@ error_buffer_cleanup:
EXPORT_SYMBOL_GPL(adis_setup_buffer_and_trigger);
/**
+ * devm_adis_setup_buffer_and_trigger() - Sets up buffer and trigger for
+ * the managed adis device
+ * @adis: The adis device
+ * @indio_dev: The IIO device
+ * @trigger_handler: Optional trigger handler, may be NULL.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function perfoms exactly the same as adis_setup_buffer_and_trigger()
+ */
+int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler)
+{
+ int ret;
+
+ if (!trigger_handler)
+ trigger_handler = adis_trigger_handler;
+
+ ret = devm_iio_triggered_buffer_setup(&adis->spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ if (adis->spi->irq) {
+ ret = devm_adis_probe_trigger(adis, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ return devm_add_action_or_reset(&adis->spi->dev, adis_buffer_cleanup,
+ adis);
+}
+EXPORT_SYMBOL_GPL(devm_adis_setup_buffer_and_trigger);
+
+/**
* adis_cleanup_buffer_and_trigger() - Free buffer and trigger resources
* @adis: The adis device.
* @indio_dev: The IIO device.
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index 8b9cd02c0f9f..8afe71947c00 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -27,6 +27,34 @@ static const struct iio_trigger_ops adis_trigger_ops = {
.set_trigger_state = &adis_data_rdy_trigger_set_state,
};
+static void adis_trigger_setup(struct adis *adis)
+{
+ adis->trig->dev.parent = &adis->spi->dev;
+ adis->trig->ops = &adis_trigger_ops;
+ iio_trigger_set_drvdata(adis->trig, adis);
+}
+
+static int adis_validate_irq_flag(struct adis *adis)
+{
+ /*
+ * Typically this devices have data ready either on the rising edge or
+ * on the falling edge of the data ready pin. This checks enforces that
+ * one of those is set in the drivers... It defaults to
+ * IRQF_TRIGGER_RISING for backward compatibility wiht devices that
+ * don't support changing the pin polarity.
+ */
+ if (!adis->irq_flag) {
+ adis->irq_flag = IRQF_TRIGGER_RISING;
+ return 0;
+ } else if (adis->irq_flag != IRQF_TRIGGER_RISING &&
+ adis->irq_flag != IRQF_TRIGGER_FALLING) {
+ dev_err(&adis->spi->dev, "Invalid IRQ mask: %08lx\n",
+ adis->irq_flag);
+ return -EINVAL;
+ }
+
+ return 0;
+}
/**
* adis_probe_trigger() - Sets up trigger for a adis device
* @adis: The adis device
@@ -45,13 +73,15 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (adis->trig == NULL)
return -ENOMEM;
- adis->trig->dev.parent = &adis->spi->dev;
- adis->trig->ops = &adis_trigger_ops;
- iio_trigger_set_drvdata(adis->trig, adis);
+ adis_trigger_setup(adis);
+
+ ret = adis_validate_irq_flag(adis);
+ if (ret)
+ return ret;
ret = request_irq(adis->spi->irq,
&iio_trigger_generic_data_rdy_poll,
- IRQF_TRIGGER_RISING,
+ adis->irq_flag,
indio_dev->name,
adis->trig);
if (ret)
@@ -74,6 +104,40 @@ error_free_trig:
EXPORT_SYMBOL_GPL(adis_probe_trigger);
/**
+ * devm_adis_probe_trigger() - Sets up trigger for a managed adis device
+ * @adis: The adis device
+ * @indio_dev: The IIO device
+ *
+ * Returns 0 on success or a negative error code
+ */
+int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+{
+ int ret;
+
+ adis->trig = devm_iio_trigger_alloc(&adis->spi->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!adis->trig)
+ return -ENOMEM;
+
+ adis_trigger_setup(adis);
+
+ ret = adis_validate_irq_flag(adis);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&adis->spi->dev, adis->spi->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ adis->irq_flag,
+ indio_dev->name,
+ adis->trig);
+ if (ret)
+ return ret;
+
+ return devm_iio_trigger_register(&adis->spi->dev, adis->trig);
+}
+EXPORT_SYMBOL_GPL(devm_adis_probe_trigger);
+
+/**
* adis_remove_trigger() - Remove trigger for a adis devices
* @adis: The adis device
*
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index e36f5e82d400..26398614eddf 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -24,8 +24,8 @@ static int bmi160_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
index c19e3df35559..61389b41c6d9 100644
--- a/drivers/iio/imu/bmi160/bmi160_spi.c
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -20,8 +20,8 @@ static int bmi160_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
return bmi160_core_probe(&spi->dev, regmap, id->name, true);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 2f8560ba4572..c27d06035c8b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -135,6 +135,7 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
st->mux_client = NULL;
if (ACPI_HANDLE(&client->dev)) {
struct i2c_board_info info;
+ struct i2c_client *mux_client;
struct acpi_device *adev;
int ret = -1;
@@ -172,9 +173,10 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
} else
return 0; /* no secondary addr, which is OK */
}
- st->mux_client = i2c_new_device(st->muxc->adapter[0], &info);
- if (!st->mux_client)
- return -ENODEV;
+ mux_client = i2c_new_client_device(st->muxc->adapter[0], &info);
+ if (IS_ERR(mux_client))
+ return PTR_ERR(mux_client);
+ st->mux_client = mux_client;
}
return 0;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 0b8d2f7a0165..4d604fe842e5 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -526,7 +526,7 @@ static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg,
__be16 d = cpu_to_be16(val);
ind = (axis - IIO_MOD_X) * 2;
- result = regmap_bulk_write(st->map, reg + ind, (u8 *)&d, 2);
+ result = regmap_bulk_write(st->map, reg + ind, &d, sizeof(d));
if (result)
return -EINVAL;
@@ -540,7 +540,7 @@ static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
__be16 d;
ind = (axis - IIO_MOD_X) * 2;
- result = regmap_bulk_read(st->map, reg + ind, (u8 *)&d, 2);
+ result = regmap_bulk_read(st->map, reg + ind, &d, sizeof(d));
if (result)
return -EINVAL;
*val = (short)be16_to_cpup(&d);
@@ -1248,12 +1248,31 @@ static const struct attribute_group inv_attribute_group = {
.attrs = inv_attributes
};
+static int inv_mpu6050_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ if (readval)
+ ret = regmap_read(st->map, reg, readval);
+ else
+ ret = regmap_write(st->map, reg, writeval);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
static const struct iio_info mpu_info = {
.read_raw = &inv_mpu6050_read_raw,
.write_raw = &inv_mpu6050_write_raw,
.write_raw_get_fmt = &inv_write_raw_get_fmt,
.attrs = &inv_attribute_group,
.validate_trigger = inv_mpu6050_validate_trigger,
+ .debugfs_reg_access = &inv_mpu6050_reg_access,
};
/**
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 6993d3b87bb0..28cfae1e61cf 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -122,8 +122,8 @@ static int inv_mpu_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 673b198e6368..6f968ce687e1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -53,8 +53,8 @@ static int inv_mpu_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 41cb20cb3809..b56df409ed0f 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -111,7 +111,7 @@ struct st_lsm6dsx_odr {
u8 val;
};
-#define ST_LSM6DSX_ODR_LIST_SIZE 6
+#define ST_LSM6DSX_ODR_LIST_SIZE 8
struct st_lsm6dsx_odr_table_entry {
struct st_lsm6dsx_reg reg;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 4426524b59f2..0b776cb91928 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -27,7 +27,8 @@
* - FIFO size: 4KB
*
* - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX:
- * - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
+ * - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416,
+ * 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 3KB
@@ -791,7 +792,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -804,7 +806,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
},
.fs_table = {
@@ -994,7 +997,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -1007,7 +1011,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
},
.fs_table = {
@@ -1171,7 +1176,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -1184,7 +1190,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
},
.fs_table = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index 64ef07a30726..c1f83fe0d8da 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -88,6 +88,69 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
.len = 6,
},
},
+ /* LIS3MDL */
+ {
+ .i2c_addr = { 0x1e },
+ .wai = {
+ .addr = 0x0f,
+ .val = 0x3d,
+ },
+ .id = ST_LSM6DSX_ID_MAGN,
+ .odr_table = {
+ .reg = {
+ .addr = 0x20,
+ .mask = GENMASK(4, 2),
+ },
+ .odr_avl[0] = { 1000, 0x0 },
+ .odr_avl[1] = { 2000, 0x1 },
+ .odr_avl[2] = { 3000, 0x2 },
+ .odr_avl[3] = { 5000, 0x3 },
+ .odr_avl[4] = { 10000, 0x4 },
+ .odr_avl[5] = { 20000, 0x5 },
+ .odr_avl[6] = { 40000, 0x6 },
+ .odr_avl[7] = { 80000, 0x7 },
+ .odr_len = 8,
+ },
+ .fs_table = {
+ .reg = {
+ .addr = 0x21,
+ .mask = GENMASK(6, 5),
+ },
+ .fs_avl[0] = {
+ .gain = 146,
+ .val = 0x00,
+ }, /* 4000 uG/LSB */
+ .fs_avl[1] = {
+ .gain = 292,
+ .val = 0x01,
+ }, /* 8000 uG/LSB */
+ .fs_avl[2] = {
+ .gain = 438,
+ .val = 0x02,
+ }, /* 12000 uG/LSB */
+ .fs_avl[3] = {
+ .gain = 584,
+ .val = 0x03,
+ }, /* 16000 uG/LSB */
+ .fs_len = 4,
+ },
+ .pwr_table = {
+ .reg = {
+ .addr = 0x22,
+ .mask = GENMASK(1, 0),
+ },
+ .off_val = 0x2,
+ .on_val = 0x0,
+ },
+ .bdu = {
+ .addr = 0x24,
+ .mask = BIT(6),
+ },
+ .out = {
+ .addr = 0x28,
+ .len = 6,
+ },
+ },
};
static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
@@ -519,6 +582,36 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev,
}
static int
+st_lsm6dsx_shub_set_full_scale(struct st_lsm6dsx_sensor *sensor,
+ u32 gain)
+{
+ const struct st_lsm6dsx_fs_table_entry *fs_table;
+ int i, err;
+
+ fs_table = &sensor->ext_info.settings->fs_table;
+ if (!fs_table->reg.addr)
+ return -ENOTSUPP;
+
+ for (i = 0; i < fs_table->fs_len; i++) {
+ if (fs_table->fs_avl[i].gain == gain)
+ break;
+ }
+
+ if (i == fs_table->fs_len)
+ return -EINVAL;
+
+ err = st_lsm6dsx_shub_write_with_mask(sensor, fs_table->reg.addr,
+ fs_table->reg.mask,
+ fs_table->fs_avl[i].val);
+ if (err < 0)
+ return err;
+
+ sensor->gain = gain;
+
+ return 0;
+}
+
+static int
st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -544,19 +637,25 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
ref_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
odr = st_lsm6dsx_check_odr(ref_sensor, val, &odr_val);
- if (odr < 0)
- return odr;
+ if (odr < 0) {
+ err = odr;
+ goto release;
+ }
sensor->ext_info.slv_odr = val;
sensor->odr = odr;
}
break;
}
+ case IIO_CHAN_INFO_SCALE:
+ err = st_lsm6dsx_shub_set_full_scale(sensor, val2);
+ break;
default:
err = -EINVAL;
break;
}
+release:
iio_device_release_direct_mode(iio_dev);
return err;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 4ada5592aa2b..9fa238c0a7d4 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -189,10 +189,12 @@ __poll_t iio_buffer_poll(struct file *filp,
*/
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{
- if (!indio_dev->buffer)
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (!buffer)
return;
- wake_up(&indio_dev->buffer->pollq);
+ wake_up(&buffer->pollq);
}
void iio_buffer_init(struct iio_buffer *buffer)
@@ -262,10 +264,11 @@ static ssize_t iio_scan_el_show(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
/* Ensure ret is 0 or 1. */
ret = !!test_bit(to_iio_dev_attr(attr)->address,
- indio_dev->buffer->scan_mask);
+ buffer->scan_mask);
return sprintf(buf, "%d\n", ret);
}
@@ -316,8 +319,7 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
const unsigned long *mask;
unsigned long *trialmask;
- trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
- sizeof(*trialmask), GFP_KERNEL);
+ trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
@@ -382,7 +384,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
@@ -411,7 +413,9 @@ static ssize_t iio_scan_el_ts_show(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ return sprintf(buf, "%d\n", buffer->scan_timestamp);
}
static ssize_t iio_scan_el_ts_store(struct device *dev,
@@ -421,6 +425,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
bool state;
ret = strtobool(buf, &state);
@@ -428,11 +433,11 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
- indio_dev->buffer->scan_timestamp = state;
+ buffer->scan_timestamp = state;
error_ret:
mutex_unlock(&indio_dev->mlock);
@@ -440,10 +445,10 @@ error_ret:
}
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer,
const struct iio_chan_spec *chan)
{
int ret, attrcount = 0;
- struct iio_buffer *buffer = indio_dev->buffer;
ret = __iio_add_chan_devattr("index",
chan,
@@ -519,7 +524,7 @@ static ssize_t iio_buffer_write_length(struct device *dev,
return len;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
} else {
buffer->access->set_length(buffer, val);
@@ -540,7 +545,9 @@ static ssize_t iio_buffer_show_enable(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ return sprintf(buf, "%d\n", iio_buffer_is_active(buffer));
}
static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
@@ -687,6 +694,13 @@ static int iio_verify_update(struct iio_dev *indio_dev,
bool scan_timestamp;
unsigned int modes;
+ if (insert_buffer &&
+ bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
+ dev_dbg(&indio_dev->dev,
+ "At least one scan element must be enabled first\n");
+ return -EINVAL;
+ }
+
memset(config, 0, sizeof(*config));
config->watermark = ~0;
@@ -913,6 +927,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
indio_dev->active_scan_mask = config->scan_mask;
indio_dev->scan_timestamp = config->scan_timestamp;
indio_dev->scan_bytes = config->scan_bytes;
+ indio_dev->currentmode = config->mode;
iio_update_demux(indio_dev);
@@ -948,8 +963,6 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
goto err_disable_buffers;
}
- indio_dev->currentmode = config->mode;
-
if (indio_dev->setup_ops->postenable) {
ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
@@ -966,10 +979,10 @@ err_disable_buffers:
buffer_list)
iio_buffer_disable(buffer, indio_dev);
err_run_postdisable:
- indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
err_undo_config:
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
indio_dev->active_scan_mask = NULL;
return ret;
@@ -1004,8 +1017,6 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
ret = ret2;
}
- indio_dev->currentmode = INDIO_DIRECT_MODE;
-
if (indio_dev->setup_ops->postdisable) {
ret2 = indio_dev->setup_ops->postdisable(indio_dev);
if (ret2 && !ret)
@@ -1014,6 +1025,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
indio_dev->active_scan_mask = NULL;
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
return ret;
}
@@ -1123,6 +1135,7 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
int ret;
bool requested_state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
bool inlist;
ret = strtobool(buf, &requested_state);
@@ -1132,17 +1145,15 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
mutex_lock(&indio_dev->mlock);
/* Find out if it is in the list */
- inlist = iio_buffer_is_active(indio_dev->buffer);
+ inlist = iio_buffer_is_active(buffer);
/* Already in desired state */
if (inlist == requested_state)
goto done;
if (requested_state)
- ret = __iio_update_buffers(indio_dev,
- indio_dev->buffer, NULL);
+ ret = __iio_update_buffers(indio_dev, buffer, NULL);
else
- ret = __iio_update_buffers(indio_dev,
- NULL, indio_dev->buffer);
+ ret = __iio_update_buffers(indio_dev, NULL, buffer);
done:
mutex_unlock(&indio_dev->mlock);
@@ -1184,7 +1195,7 @@ static ssize_t iio_buffer_store_watermark(struct device *dev,
goto out;
}
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto out;
}
@@ -1201,11 +1212,9 @@ static ssize_t iio_dma_show_data_available(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- size_t bytes;
-
- bytes = iio_buffer_data_available(indio_dev->buffer);
+ struct iio_buffer *buffer = indio_dev->buffer;
- return sprintf(buf, "%zu\n", bytes);
+ return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer));
}
static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
@@ -1233,7 +1242,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
struct iio_dev_attr *p;
struct attribute **attr;
struct iio_buffer *buffer = indio_dev->buffer;
- int ret, i, attrn, attrcount, attrcount_orig = 0;
+ int ret, i, attrn, attrcount;
const struct iio_chan_spec *channels;
channels = indio_dev->channels;
@@ -1277,12 +1286,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
- if (buffer->scan_el_attrs != NULL) {
- attr = buffer->scan_el_attrs->attrs;
- while (*attr++ != NULL)
- attrcount_orig++;
- }
- attrcount = attrcount_orig;
+ attrcount = 0;
INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
channels = indio_dev->channels;
if (channels) {
@@ -1291,7 +1295,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
if (channels[i].scan_index < 0)
continue;
- ret = iio_buffer_add_channel_sysfs(indio_dev,
+ ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
&channels[i]);
if (ret < 0)
goto error_cleanup_dynamic;
@@ -1319,10 +1323,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_free_scan_mask;
}
- if (buffer->scan_el_attrs)
- memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
- sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
- attrn = attrcount_orig;
+ attrn = 0;
list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
@@ -1334,20 +1335,22 @@ error_free_scan_mask:
bitmap_free(buffer->scan_mask);
error_cleanup_dynamic:
iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
- kfree(indio_dev->buffer->buffer_group.attrs);
+ kfree(buffer->buffer_group.attrs);
return ret;
}
void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
{
- if (!indio_dev->buffer)
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (!buffer)
return;
- bitmap_free(indio_dev->buffer->scan_mask);
- kfree(indio_dev->buffer->buffer_group.attrs);
- kfree(indio_dev->buffer->scan_el_group.attrs);
- iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
+ bitmap_free(buffer->scan_mask);
+ kfree(buffer->buffer_group.attrs);
+ kfree(buffer->scan_el_group.attrs);
+ iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
}
/**
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 24f7bbff4938..1527f01a44f1 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -572,46 +572,46 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
switch (type) {
case IIO_VAL_INT:
- return snprintf(buf, len, "%d", vals[0]);
+ return scnprintf(buf, len, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
/* fall through */
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
- return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
+ return scnprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
-vals[1], scale_db ? " dB" : "");
else
- return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
+ return scnprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (vals[1] < 0)
- return snprintf(buf, len, "-%d.%09u", abs(vals[0]),
+ return scnprintf(buf, len, "-%d.%09u", abs(vals[0]),
-vals[1]);
else
- return snprintf(buf, len, "%d.%09u", vals[0], vals[1]);
+ return scnprintf(buf, len, "%d.%09u", vals[0], vals[1]);
case IIO_VAL_FRACTIONAL:
tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
tmp1 = vals[1];
tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
- return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
+ return scnprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_FRACTIONAL_LOG2:
tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
- return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
+ return scnprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_INT_MULTIPLE:
{
int i;
int l = 0;
for (i = 0; i < size; ++i) {
- l += snprintf(&buf[l], len - l, "%d ", vals[i]);
+ l += scnprintf(&buf[l], len - l, "%d ", vals[i]);
if (l >= len)
break;
}
return l;
}
case IIO_VAL_CHAR:
- return snprintf(buf, len, "%c", (char)vals[0]);
+ return scnprintf(buf, len, "%c", (char)vals[0]);
default:
return 0;
}
@@ -682,10 +682,10 @@ static ssize_t iio_format_avail_list(char *buf, const int *vals,
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < length - 1)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -698,10 +698,10 @@ static ssize_t iio_format_avail_list(char *buf, const int *vals,
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < length / 2 - 1)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -725,10 +725,10 @@ static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < 2)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"]\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -741,10 +741,10 @@ static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < 2)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"]\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -1507,27 +1507,27 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
alloc_size += IIO_ALIGN - 1;
dev = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dev)
+ return NULL;
- if (dev) {
- dev->dev.groups = dev->groups;
- dev->dev.type = &iio_device_type;
- dev->dev.bus = &iio_bus_type;
- device_initialize(&dev->dev);
- dev_set_drvdata(&dev->dev, (void *)dev);
- mutex_init(&dev->mlock);
- mutex_init(&dev->info_exist_lock);
- INIT_LIST_HEAD(&dev->channel_attr_list);
-
- dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
- if (dev->id < 0) {
- /* cannot use a dev_err as the name isn't available */
- pr_err("failed to get device id\n");
- kfree(dev);
- return NULL;
- }
- dev_set_name(&dev->dev, "iio:device%d", dev->id);
- INIT_LIST_HEAD(&dev->buffer_list);
+ dev->dev.groups = dev->groups;
+ dev->dev.type = &iio_device_type;
+ dev->dev.bus = &iio_bus_type;
+ device_initialize(&dev->dev);
+ dev_set_drvdata(&dev->dev, (void *)dev);
+ mutex_init(&dev->mlock);
+ mutex_init(&dev->info_exist_lock);
+ INIT_LIST_HEAD(&dev->channel_attr_list);
+
+ dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
+ if (dev->id < 0) {
+ /* cannot use a dev_err as the name isn't available */
+ pr_err("failed to get device id\n");
+ kfree(dev);
+ return NULL;
}
+ dev_set_name(&dev->dev, "iio:device%d", dev->id);
+ INIT_LIST_HEAD(&dev->buffer_list);
return dev;
}
@@ -1549,17 +1549,6 @@ static void devm_iio_device_release(struct device *dev, void *res)
iio_device_free(*(struct iio_dev **)res);
}
-int devm_iio_device_match(struct device *dev, void *res, void *data)
-{
- struct iio_dev **r = res;
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
- return *r == data;
-}
-EXPORT_SYMBOL_GPL(devm_iio_device_match);
-
/**
* devm_iio_device_alloc - Resource-managed iio_device_alloc()
* @dev: Device to allocate iio_dev for
@@ -1568,9 +1557,6 @@ EXPORT_SYMBOL_GPL(devm_iio_device_match);
* Managed iio_device_alloc. iio_dev allocated with this function is
* automatically freed on driver detach.
*
- * If an iio_dev allocated with this function needs to be freed separately,
- * devm_iio_device_free() must be used.
- *
* RETURNS:
* Pointer to allocated iio_dev on success, NULL on failure.
*/
@@ -1596,23 +1582,6 @@ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
/**
- * devm_iio_device_free - Resource-managed iio_device_free()
- * @dev: Device this iio_dev belongs to
- * @iio_dev: the iio_dev associated with the device
- *
- * Free iio_dev allocated with devm_iio_device_alloc().
- */
-void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_device_release,
- devm_iio_device_match, iio_dev);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_device_free);
-
-/**
* iio_chrdev_open() - chrdev file open for buffer access and ioctls
* @inode: Inode structure for identifying the device in the file system
* @filp: File structure for iio device used to keep and later access
@@ -1714,6 +1683,9 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
int ret;
+ if (!indio_dev->info)
+ return -EINVAL;
+
indio_dev->driver_module = this_mod;
/* If the calling driver did not initialize of_node, do it here */
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
@@ -1726,9 +1698,6 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
if (ret < 0)
return ret;
- if (!indio_dev->info)
- return -EINVAL;
-
/* configure elements for the chrdev */
indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
@@ -1834,23 +1803,6 @@ int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
EXPORT_SYMBOL_GPL(__devm_iio_device_register);
/**
- * devm_iio_device_unregister - Resource-managed iio_device_unregister()
- * @dev: Device this iio_dev belongs to
- * @indio_dev: the iio_dev associated with the device
- *
- * Unregister iio_dev registered with devm_iio_device_register().
- */
-void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_device_unreg,
- devm_iio_device_match, indio_dev);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
-
-/**
* iio_device_claim_direct_mode - Keep device in direct mode
* @indio_dev: the iio_dev associated with the device
*
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 3908a9a90035..53d1931f6be8 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -585,18 +585,6 @@ static void devm_iio_trigger_release(struct device *dev, void *res)
iio_trigger_free(*(struct iio_trigger **)res);
}
-static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
-{
- struct iio_trigger **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
-
- return *r == data;
-}
-
/**
* devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
* @dev: Device to allocate iio_trigger for
@@ -608,9 +596,6 @@ static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
* Managed iio_trigger_alloc. iio_trigger allocated with this function is
* automatically freed on driver detach.
*
- * If an iio_trigger allocated with this function needs to be freed separately,
- * devm_iio_trigger_free() must be used.
- *
* RETURNS:
* Pointer to allocated iio_trigger on success, NULL on failure.
*/
@@ -640,23 +625,6 @@ struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
-/**
- * devm_iio_trigger_free - Resource-managed iio_trigger_free()
- * @dev: Device this iio_dev belongs to
- * @iio_trig: the iio_trigger associated with the device
- *
- * Free iio_trigger allocated with devm_iio_trigger_alloc().
- */
-void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_trigger_release,
- devm_iio_trigger_match, iio_trig);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
-
static void devm_iio_trigger_unreg(struct device *dev, void *res)
{
iio_trigger_unregister(*(struct iio_trigger **)res);
@@ -673,9 +641,6 @@ static void devm_iio_trigger_unreg(struct device *dev, void *res)
* calls iio_trigger_register() internally. Refer to that function for more
* information.
*
- * If an iio_trigger registered with this function needs to be unregistered
- * separately, devm_iio_trigger_unregister() must be used.
- *
* RETURNS:
* 0 on success, negative error number on failure.
*/
@@ -701,24 +666,6 @@ int __devm_iio_trigger_register(struct device *dev,
}
EXPORT_SYMBOL_GPL(__devm_iio_trigger_register);
-/**
- * devm_iio_trigger_unregister - Resource-managed iio_trigger_unregister()
- * @dev: device this iio_trigger belongs to
- * @trig_info: the trigger associated with the device
- *
- * Unregister trigger registered with devm_iio_trigger_register().
- */
-void devm_iio_trigger_unregister(struct device *dev,
- struct iio_trigger *trig_info)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_trigger_unreg, devm_iio_trigger_match,
- trig_info);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_trigger_unregister);
-
bool iio_trigger_using_own(struct iio_dev *indio_dev)
{
return indio_dev->trig->attached_own_device;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 5a8351c9a426..ede99e0d5371 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -360,18 +360,6 @@ static void devm_iio_channel_free(struct device *dev, void *res)
iio_channel_release(channel);
}
-static int devm_iio_channel_match(struct device *dev, void *res, void *data)
-{
- struct iio_channel **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
-
- return *r == data;
-}
-
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *channel_name)
{
@@ -394,13 +382,6 @@ struct iio_channel *devm_iio_channel_get(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get);
-void devm_iio_channel_release(struct device *dev, struct iio_channel *channel)
-{
- WARN_ON(devres_release(dev, devm_iio_channel_free,
- devm_iio_channel_match, channel));
-}
-EXPORT_SYMBOL_GPL(devm_iio_channel_release);
-
struct iio_channel *iio_channel_get_all(struct device *dev)
{
const char *name;
@@ -514,14 +495,6 @@ struct iio_channel *devm_iio_channel_get_all(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
-void devm_iio_channel_release_all(struct device *dev,
- struct iio_channel *channels)
-{
- WARN_ON(devres_release(dev, devm_iio_channel_free_all,
- devm_iio_channel_match, channels));
-}
-EXPORT_SYMBOL_GPL(devm_iio_channel_release_all);
-
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum info)
{
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index b27719cefcf9..182bd18c4bb2 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -516,6 +516,8 @@ config US5182D
config VCNL4000
tristate "VCNL4000/4010/4020/4200 combined ALS and proximity sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VCNL4000,
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index a8361006dcd9..03f2d8d123c4 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -273,13 +273,11 @@ static const struct i2c_device_id bh1780_id[] = {
MODULE_DEVICE_TABLE(i2c, bh1780_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_bh1780_match[] = {
{ .compatible = "rohm,bh1780gli", },
{},
};
MODULE_DEVICE_TABLE(of, of_bh1780_match);
-#endif
static struct i2c_driver bh1780_driver = {
.probe = bh1780_probe,
@@ -288,7 +286,7 @@ static struct i2c_driver bh1780_driver = {
.driver = {
.name = "bh1780",
.pm = &bh1780_dev_pm_ops,
- .of_match_table = of_match_ptr(of_bh1780_match),
+ .of_match_table = of_bh1780_match,
},
};
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 5f4fb5674fa0..160eb3f99795 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -4,11 +4,13 @@
* Author: Kevin Tsai <ktsai@capellamicro.com>
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
@@ -18,17 +20,24 @@
/* Registers Address */
#define CM32181_REG_ADDR_CMD 0x00
+#define CM32181_REG_ADDR_WH 0x01
+#define CM32181_REG_ADDR_WL 0x02
+#define CM32181_REG_ADDR_TEST 0x03
#define CM32181_REG_ADDR_ALS 0x04
#define CM32181_REG_ADDR_STATUS 0x06
#define CM32181_REG_ADDR_ID 0x07
/* Number of Configurable Registers */
-#define CM32181_CONF_REG_NUM 0x01
+#define CM32181_CONF_REG_NUM 4
/* CMD register */
-#define CM32181_CMD_ALS_ENABLE 0x00
-#define CM32181_CMD_ALS_DISABLE 0x01
-#define CM32181_CMD_ALS_INT_EN 0x02
+#define CM32181_CMD_ALS_DISABLE BIT(0)
+#define CM32181_CMD_ALS_INT_EN BIT(1)
+#define CM32181_CMD_ALS_THRES_WINDOW BIT(2)
+
+#define CM32181_CMD_ALS_PERS_SHIFT 4
+#define CM32181_CMD_ALS_PERS_MASK (0x03 << CM32181_CMD_ALS_PERS_SHIFT)
+#define CM32181_CMD_ALS_PERS_DEFAULT (0x01 << CM32181_CMD_ALS_PERS_SHIFT)
#define CM32181_CMD_ALS_IT_SHIFT 6
#define CM32181_CMD_ALS_IT_MASK (0x0F << CM32181_CMD_ALS_IT_SHIFT)
@@ -38,27 +47,133 @@
#define CM32181_CMD_ALS_SM_MASK (0x03 << CM32181_CMD_ALS_SM_SHIFT)
#define CM32181_CMD_ALS_SM_DEFAULT (0x01 << CM32181_CMD_ALS_SM_SHIFT)
-#define CM32181_MLUX_PER_BIT 5 /* ALS_SM=01 IT=800ms */
-#define CM32181_MLUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */
-#define CM32181_CALIBSCALE_DEFAULT 1000
-#define CM32181_CALIBSCALE_RESOLUTION 1000
-#define MLUX_PER_LUX 1000
+#define CM32181_LUX_PER_BIT 500 /* ALS_SM=01 IT=800ms */
+#define CM32181_LUX_PER_BIT_RESOLUTION 100000
+#define CM32181_LUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */
+#define CM32181_CALIBSCALE_DEFAULT 100000
+#define CM32181_CALIBSCALE_RESOLUTION 100000
-static const u8 cm32181_reg[CM32181_CONF_REG_NUM] = {
- CM32181_REG_ADDR_CMD,
-};
+#define SMBUS_ALERT_RESPONSE_ADDRESS 0x0c
+
+/* CPM0 Index 0: device-id (3218 or 32181), 1: Unknown, 2: init_regs_bitmap */
+#define CPM0_REGS_BITMAP 2
+#define CPM0_HEADER_SIZE 3
-static const int als_it_bits[] = {12, 8, 0, 1, 2, 3};
-static const int als_it_value[] = {25000, 50000, 100000, 200000, 400000,
- 800000};
+/* CPM1 Index 0: lux_per_bit, 1: calibscale, 2: resolution (100000) */
+#define CPM1_LUX_PER_BIT 0
+#define CPM1_CALIBSCALE 1
+#define CPM1_SIZE 3
+
+/* CM3218 Family */
+static const int cm3218_als_it_bits[] = { 0, 1, 2, 3 };
+static const int cm3218_als_it_values[] = { 100000, 200000, 400000, 800000 };
+
+/* CM32181 Family */
+static const int cm32181_als_it_bits[] = { 12, 8, 0, 1, 2, 3 };
+static const int cm32181_als_it_values[] = {
+ 25000, 50000, 100000, 200000, 400000, 800000
+};
struct cm32181_chip {
struct i2c_client *client;
+ struct device *dev;
struct mutex lock;
u16 conf_regs[CM32181_CONF_REG_NUM];
+ unsigned long init_regs_bitmap;
int calibscale;
+ int lux_per_bit;
+ int lux_per_bit_base_it;
+ int num_als_it;
+ const int *als_it_bits;
+ const int *als_it_values;
};
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2);
+
+#ifdef CONFIG_ACPI
+/**
+ * cm32181_acpi_get_cpm() - Get CPM object from ACPI
+ * @client pointer of struct i2c_client.
+ * @obj_name pointer of ACPI object name.
+ * @count maximum size of return array.
+ * @vals pointer of array for return elements.
+ *
+ * Convert ACPI CPM table to array.
+ *
+ * Return: -ENODEV for fail. Otherwise is number of elements.
+ */
+static int cm32181_acpi_get_cpm(struct device *dev, char *obj_name,
+ u64 *values, int count)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *cpm, *elem;
+ acpi_handle handle;
+ acpi_status status;
+ int i;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -ENODEV;
+
+ status = acpi_evaluate_object(handle, obj_name, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "object %s not found\n", obj_name);
+ return -ENODEV;
+ }
+
+ cpm = buffer.pointer;
+ if (cpm->package.count > count)
+ dev_warn(dev, "%s table contains %u values, only using first %d values\n",
+ obj_name, cpm->package.count, count);
+
+ count = min_t(int, cpm->package.count, count);
+ for (i = 0; i < count; i++) {
+ elem = &(cpm->package.elements[i]);
+ values[i] = elem->integer.value;
+ }
+
+ kfree(buffer.pointer);
+
+ return count;
+}
+
+static void cm32181_acpi_parse_cpm_tables(struct cm32181_chip *cm32181)
+{
+ u64 vals[CPM0_HEADER_SIZE + CM32181_CONF_REG_NUM];
+ struct device *dev = cm32181->dev;
+ int i, count;
+
+ count = cm32181_acpi_get_cpm(dev, "CPM0", vals, ARRAY_SIZE(vals));
+ if (count <= CPM0_HEADER_SIZE)
+ return;
+
+ count -= CPM0_HEADER_SIZE;
+
+ cm32181->init_regs_bitmap = vals[CPM0_REGS_BITMAP];
+ cm32181->init_regs_bitmap &= GENMASK(count - 1, 0);
+ for_each_set_bit(i, &cm32181->init_regs_bitmap, count)
+ cm32181->conf_regs[i] = vals[CPM0_HEADER_SIZE + i];
+
+ count = cm32181_acpi_get_cpm(dev, "CPM1", vals, ARRAY_SIZE(vals));
+ if (count != CPM1_SIZE)
+ return;
+
+ cm32181->lux_per_bit = vals[CPM1_LUX_PER_BIT];
+
+ /* Check for uncalibrated devices */
+ if (vals[CPM1_CALIBSCALE] == CM32181_CALIBSCALE_DEFAULT)
+ return;
+
+ cm32181->calibscale = vals[CPM1_CALIBSCALE];
+ /* CPM1 lux_per_bit is for the current it value */
+ cm32181_read_als_it(cm32181, &cm32181->lux_per_bit_base_it);
+}
+#else
+static void cm32181_acpi_parse_cpm_tables(struct cm32181_chip *cm32181)
+{
+}
+#endif /* CONFIG_ACPI */
+
/**
* cm32181_reg_init() - Initialize CM32181 registers
* @cm32181: pointer of struct cm32181.
@@ -78,18 +193,37 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
return ret;
/* check device ID */
- if ((ret & 0xFF) != 0x81)
+ switch (ret & 0xFF) {
+ case 0x18: /* CM3218 */
+ cm32181->num_als_it = ARRAY_SIZE(cm3218_als_it_bits);
+ cm32181->als_it_bits = cm3218_als_it_bits;
+ cm32181->als_it_values = cm3218_als_it_values;
+ break;
+ case 0x81: /* CM32181 */
+ case 0x82: /* CM32182, fully compat. with CM32181 */
+ cm32181->num_als_it = ARRAY_SIZE(cm32181_als_it_bits);
+ cm32181->als_it_bits = cm32181_als_it_bits;
+ cm32181->als_it_values = cm32181_als_it_values;
+ break;
+ default:
return -ENODEV;
+ }
/* Default Values */
- cm32181->conf_regs[CM32181_REG_ADDR_CMD] = CM32181_CMD_ALS_ENABLE |
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] =
CM32181_CMD_ALS_IT_DEFAULT | CM32181_CMD_ALS_SM_DEFAULT;
+ cm32181->init_regs_bitmap = BIT(CM32181_REG_ADDR_CMD);
cm32181->calibscale = CM32181_CALIBSCALE_DEFAULT;
+ cm32181->lux_per_bit = CM32181_LUX_PER_BIT;
+ cm32181->lux_per_bit_base_it = CM32181_LUX_PER_BIT_BASE_IT;
+
+ if (ACPI_HANDLE(cm32181->dev))
+ cm32181_acpi_parse_cpm_tables(cm32181);
/* Initialize registers*/
- for (i = 0; i < CM32181_CONF_REG_NUM; i++) {
- ret = i2c_smbus_write_word_data(client, cm32181_reg[i],
- cm32181->conf_regs[i]);
+ for_each_set_bit(i, &cm32181->init_regs_bitmap, CM32181_CONF_REG_NUM) {
+ ret = i2c_smbus_write_word_data(client, i,
+ cm32181->conf_regs[i]);
if (ret < 0)
return ret;
}
@@ -102,7 +236,7 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
* @cm32181: pointer of struct cm32181
* @val2: pointer of int to load the als_it value.
*
- * Report the current integartion time by millisecond.
+ * Report the current integration time in milliseconds.
*
* Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
*/
@@ -114,9 +248,9 @@ static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2)
als_it = cm32181->conf_regs[CM32181_REG_ADDR_CMD];
als_it &= CM32181_CMD_ALS_IT_MASK;
als_it >>= CM32181_CMD_ALS_IT_SHIFT;
- for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
- if (als_it == als_it_bits[i]) {
- *val2 = als_it_value[i];
+ for (i = 0; i < cm32181->num_als_it; i++) {
+ if (als_it == cm32181->als_it_bits[i]) {
+ *val2 = cm32181->als_it_values[i];
return IIO_VAL_INT_PLUS_MICRO;
}
}
@@ -139,14 +273,14 @@ static int cm32181_write_als_it(struct cm32181_chip *cm32181, int val)
u16 als_it;
int ret, i, n;
- n = ARRAY_SIZE(als_it_value);
+ n = cm32181->num_als_it;
for (i = 0; i < n; i++)
- if (val <= als_it_value[i])
+ if (val <= cm32181->als_it_values[i])
break;
if (i >= n)
i = n - 1;
- als_it = als_it_bits[i];
+ als_it = cm32181->als_it_bits[i];
als_it <<= CM32181_CMD_ALS_IT_SHIFT;
mutex_lock(&cm32181->lock);
@@ -175,15 +309,15 @@ static int cm32181_get_lux(struct cm32181_chip *cm32181)
struct i2c_client *client = cm32181->client;
int ret;
int als_it;
- unsigned long lux;
+ u64 lux;
ret = cm32181_read_als_it(cm32181, &als_it);
if (ret < 0)
return -EINVAL;
- lux = CM32181_MLUX_PER_BIT;
- lux *= CM32181_MLUX_PER_BIT_BASE_IT;
- lux /= als_it;
+ lux = cm32181->lux_per_bit;
+ lux *= cm32181->lux_per_bit_base_it;
+ lux = div_u64(lux, als_it);
ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ALS);
if (ret < 0)
@@ -191,8 +325,8 @@ static int cm32181_get_lux(struct cm32181_chip *cm32181)
lux *= ret;
lux *= cm32181->calibscale;
- lux /= CM32181_CALIBSCALE_RESOLUTION;
- lux /= MLUX_PER_LUX;
+ lux = div_u64(lux, CM32181_CALIBSCALE_RESOLUTION);
+ lux = div_u64(lux, CM32181_LUX_PER_BIT_RESOLUTION);
if (lux > 0xFFFF)
lux = 0xFFFF;
@@ -258,11 +392,12 @@ static int cm32181_write_raw(struct iio_dev *indio_dev,
static ssize_t cm32181_get_it_available(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct cm32181_chip *cm32181 = iio_priv(dev_to_iio_dev(dev));
int i, n, len;
- n = ARRAY_SIZE(als_it_value);
+ n = cm32181->num_als_it;
for (i = 0, len = 0; i < n; i++)
- len += sprintf(buf + len, "0.%06u ", als_it_value[i]);
+ len += sprintf(buf + len, "0.%06u ", cm32181->als_it_values[i]);
return len + sprintf(buf + len, "\n");
}
@@ -294,70 +429,86 @@ static const struct iio_info cm32181_info = {
.attrs = &cm32181_attribute_group,
};
-static int cm32181_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int cm32181_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct cm32181_chip *cm32181;
struct iio_dev *indio_dev;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*cm32181));
- if (!indio_dev) {
- dev_err(&client->dev, "devm_iio_device_alloc failed\n");
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*cm32181));
+ if (!indio_dev)
return -ENOMEM;
+
+ /*
+ * Some ACPI systems list 2 I2C resources for the CM3218 sensor, the
+ * SMBus Alert Response Address (ARA, 0x0c) and the actual I2C address.
+ * Detect this and take the following step to deal with it:
+ * 1. When a SMBus Alert capable sensor has an Alert asserted, it will
+ * not respond on its actual I2C address. Read a byte from the ARA
+ * to clear any pending Alerts.
+ * 2. Create a "dummy" client for the actual I2C address and
+ * use that client to communicate with the sensor.
+ */
+ if (ACPI_HANDLE(dev) && client->addr == SMBUS_ALERT_RESPONSE_ADDRESS) {
+ struct i2c_board_info board_info = { .type = "dummy" };
+
+ i2c_smbus_read_byte(client);
+
+ client = i2c_acpi_new_device(dev, 1, &board_info);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
}
cm32181 = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
cm32181->client = client;
+ cm32181->dev = dev;
mutex_init(&cm32181->lock);
- indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.parent = dev;
indio_dev->channels = cm32181_channels;
indio_dev->num_channels = ARRAY_SIZE(cm32181_channels);
indio_dev->info = &cm32181_info;
- indio_dev->name = id->name;
+ indio_dev->name = dev_name(dev);
indio_dev->modes = INDIO_DIRECT_MODE;
ret = cm32181_reg_init(cm32181);
if (ret) {
- dev_err(&client->dev,
- "%s: register init failed\n",
- __func__);
+ dev_err(dev, "%s: register init failed\n", __func__);
return ret;
}
- ret = devm_iio_device_register(&client->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(&client->dev,
- "%s: regist device failed\n",
- __func__);
+ dev_err(dev, "%s: regist device failed\n", __func__);
return ret;
}
return 0;
}
-static const struct i2c_device_id cm32181_id[] = {
- { "cm32181", 0 },
- { }
-};
-
-MODULE_DEVICE_TABLE(i2c, cm32181_id);
-
static const struct of_device_id cm32181_of_match[] = {
+ { .compatible = "capella,cm3218" },
{ .compatible = "capella,cm32181" },
{ }
};
MODULE_DEVICE_TABLE(of, cm32181_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cm32181_acpi_match[] = {
+ { "CPLM3218", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cm32181_acpi_match);
+#endif
+
static struct i2c_driver cm32181_driver = {
.driver = {
.name = "cm32181",
- .of_match_table = of_match_ptr(cm32181_of_match),
+ .acpi_match_table = ACPI_PTR(cm32181_acpi_match),
+ .of_match_table = cm32181_of_match,
},
- .id_table = cm32181_id,
- .probe = cm32181_probe,
+ .probe_new = cm32181_probe,
};
module_i2c_driver(cm32181_driver);
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index cd3cfb7d02bd..867200825686 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/init.h>
@@ -418,7 +419,7 @@ MODULE_DEVICE_TABLE(of, cm3232_of_match);
static struct i2c_driver cm3232_driver = {
.driver = {
.name = "cm3232",
- .of_match_table = of_match_ptr(cm3232_of_match),
+ .of_match_table = cm3232_of_match,
#ifdef CONFIG_PM_SLEEP
.pm = &cm3232_pm_ops,
#endif
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index b7ef16b28280..7a2679bdc987 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -158,6 +158,9 @@ static irqreturn_t gp2ap002_prox_irq(int irq, void *d)
int val;
int ret;
+ if (!gp2ap002->enabled)
+ goto err_retrig;
+
ret = regmap_read(gp2ap002->map, GP2AP002_PROX, &val);
if (ret) {
dev_err(gp2ap002->dev, "error reading proximity\n");
@@ -247,6 +250,8 @@ static int gp2ap002_read_raw(struct iio_dev *indio_dev,
struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
int ret;
+ pm_runtime_get_sync(gp2ap002->dev);
+
switch (mask) {
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
@@ -255,13 +260,21 @@ static int gp2ap002_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
*val = ret;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ goto out;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+out:
+ pm_runtime_mark_last_busy(gp2ap002->dev);
+ pm_runtime_put_autosuspend(gp2ap002->dev);
+
+ return ret;
}
static int gp2ap002_init(struct gp2ap002 *gp2ap002)
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 7fbbce0d4bc7..070d4cd0cf54 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -38,8 +38,8 @@
#include <linux/irq.h>
#include <linux/irq_work.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -1617,18 +1617,16 @@ static const struct i2c_device_id gp2ap020a00f_id[] = {
MODULE_DEVICE_TABLE(i2c, gp2ap020a00f_id);
-#ifdef CONFIG_OF
static const struct of_device_id gp2ap020a00f_of_match[] = {
{ .compatible = "sharp,gp2ap020a00f" },
{ }
};
MODULE_DEVICE_TABLE(of, gp2ap020a00f_of_match);
-#endif
static struct i2c_driver gp2ap020a00f_driver = {
.driver = {
.name = GP2A_I2C_NAME,
- .of_match_table = of_match_ptr(gp2ap020a00f_of_match),
+ .of_match_table = gp2ap020a00f_of_match,
},
.probe = gp2ap020a00f_probe,
.remove = gp2ap020a00f_remove,
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index b6cd299517d1..81fa2a422797 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum {
@@ -308,18 +306,13 @@ static int hid_als_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&als_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&als_state->common_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -343,9 +336,7 @@ static int hid_als_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&als_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &als_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -360,8 +351,7 @@ static int hid_als_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&als_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &als_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 7e1030af9ba3..e9c04df07344 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
#define CHANNEL_SCAN_INDEX_PRESENCE 0
@@ -286,18 +284,13 @@ static int hid_prox_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&prox_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&prox_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -321,9 +314,7 @@ static int hid_prox_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&prox_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &prox_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -338,8 +329,7 @@ static int hid_prox_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PROX);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&prox_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &prox_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index e37894f0ae0b..95611f5eff01 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -213,13 +213,24 @@ static const struct iio_info isl29125_info = {
.attrs = &isl29125_attribute_group,
};
-static int isl29125_buffer_preenable(struct iio_dev *indio_dev)
+static int isl29125_buffer_postenable(struct iio_dev *indio_dev)
{
struct isl29125_data *data = iio_priv(indio_dev);
+ int err;
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err)
+ return err;
data->conf1 |= ISL29125_MODE_RGB;
- return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ err = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
data->conf1);
+ if (err) {
+ iio_triggered_buffer_predisable(indio_dev);
+ return err;
+ }
+
+ return 0;
}
static int isl29125_buffer_predisable(struct iio_dev *indio_dev)
@@ -227,19 +238,18 @@ static int isl29125_buffer_predisable(struct iio_dev *indio_dev)
struct isl29125_data *data = iio_priv(indio_dev);
int ret;
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret < 0)
- return ret;
-
data->conf1 &= ~ISL29125_MODE_MASK;
data->conf1 |= ISL29125_MODE_PD;
- return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ ret = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
data->conf1);
+
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
}
static const struct iio_buffer_setup_ops isl29125_buffer_setup_ops = {
- .preenable = isl29125_buffer_preenable,
- .postenable = &iio_triggered_buffer_postenable,
+ .postenable = isl29125_buffer_postenable,
.predisable = isl29125_buffer_predisable,
};
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 71f99d2a22c1..5a3fcb127cd2 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -101,12 +101,12 @@ struct ltr501_gain {
int uscale;
};
-static struct ltr501_gain ltr501_als_gain_tbl[] = {
+static const struct ltr501_gain ltr501_als_gain_tbl[] = {
{1, 0},
{0, 5000},
};
-static struct ltr501_gain ltr559_als_gain_tbl[] = {
+static const struct ltr501_gain ltr559_als_gain_tbl[] = {
{1, 0},
{0, 500000},
{0, 250000},
@@ -117,14 +117,14 @@ static struct ltr501_gain ltr559_als_gain_tbl[] = {
{0, 10000},
};
-static struct ltr501_gain ltr501_ps_gain_tbl[] = {
+static const struct ltr501_gain ltr501_ps_gain_tbl[] = {
{1, 0},
{0, 250000},
{0, 125000},
{0, 62500},
};
-static struct ltr501_gain ltr559_ps_gain_tbl[] = {
+static const struct ltr501_gain ltr559_ps_gain_tbl[] = {
{0, 62500}, /* x16 gain */
{0, 31250}, /* x32 gain */
{0, 15625}, /* bits X1 are for x64 gain */
@@ -133,9 +133,9 @@ static struct ltr501_gain ltr559_ps_gain_tbl[] = {
struct ltr501_chip_info {
u8 partid;
- struct ltr501_gain *als_gain;
+ const struct ltr501_gain *als_gain;
int als_gain_tbl_size;
- struct ltr501_gain *ps_gain;
+ const struct ltr501_gain *ps_gain;
int ps_gain_tbl_size;
u8 als_mode_active;
u8 als_gain_mask;
@@ -192,7 +192,7 @@ static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
return -EINVAL;
}
-static int ltr501_als_read_samp_freq(struct ltr501_data *data,
+static int ltr501_als_read_samp_freq(const struct ltr501_data *data,
int *val, int *val2)
{
int ret, i;
@@ -210,7 +210,7 @@ static int ltr501_als_read_samp_freq(struct ltr501_data *data,
return IIO_VAL_INT_PLUS_MICRO;
}
-static int ltr501_ps_read_samp_freq(struct ltr501_data *data,
+static int ltr501_ps_read_samp_freq(const struct ltr501_data *data,
int *val, int *val2)
{
int ret, i;
@@ -266,7 +266,7 @@ static int ltr501_ps_write_samp_freq(struct ltr501_data *data,
return ret;
}
-static int ltr501_als_read_samp_period(struct ltr501_data *data, int *val)
+static int ltr501_als_read_samp_period(const struct ltr501_data *data, int *val)
{
int ret, i;
@@ -282,7 +282,7 @@ static int ltr501_als_read_samp_period(struct ltr501_data *data, int *val)
return IIO_VAL_INT;
}
-static int ltr501_ps_read_samp_period(struct ltr501_data *data, int *val)
+static int ltr501_ps_read_samp_period(const struct ltr501_data *data, int *val)
{
int ret, i;
@@ -321,7 +321,7 @@ static unsigned long ltr501_calculate_lux(u16 vis_data, u16 ir_data)
return lux / 1000;
}
-static int ltr501_drdy(struct ltr501_data *data, u8 drdy_mask)
+static int ltr501_drdy(const struct ltr501_data *data, u8 drdy_mask)
{
int tries = 100;
int ret, status;
@@ -373,7 +373,8 @@ static int ltr501_set_it_time(struct ltr501_data *data, int it)
}
/* read int time in micro seconds */
-static int ltr501_read_it_time(struct ltr501_data *data, int *val, int *val2)
+static int ltr501_read_it_time(const struct ltr501_data *data,
+ int *val, int *val2)
{
int ret, index;
@@ -391,7 +392,7 @@ static int ltr501_read_it_time(struct ltr501_data *data, int *val, int *val2)
return IIO_VAL_INT_PLUS_MICRO;
}
-static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2])
+static int ltr501_read_als(const struct ltr501_data *data, __le16 buf[2])
{
int ret;
@@ -403,7 +404,7 @@ static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2])
buf, 2 * sizeof(__le16));
}
-static int ltr501_read_ps(struct ltr501_data *data)
+static int ltr501_read_ps(const struct ltr501_data *data)
{
int ret, status;
@@ -419,7 +420,7 @@ static int ltr501_read_ps(struct ltr501_data *data)
return status;
}
-static int ltr501_read_intr_prst(struct ltr501_data *data,
+static int ltr501_read_intr_prst(const struct ltr501_data *data,
enum iio_chan_type type,
int *val2)
{
@@ -716,7 +717,7 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ltr501_get_gain_index(struct ltr501_gain *gain, int size,
+static int ltr501_get_gain_index(const struct ltr501_gain *gain, int size,
int val, int val2)
{
int i;
@@ -848,14 +849,14 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int ltr501_read_thresh(struct iio_dev *indio_dev,
+static int ltr501_read_thresh(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int *val, int *val2)
{
- struct ltr501_data *data = iio_priv(indio_dev);
+ const struct ltr501_data *data = iio_priv(indio_dev);
int ret, thresh_data;
switch (chan->type) {
@@ -1263,7 +1264,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
if (mask & LTR501_STATUS_ALS_RDY) {
ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
- (u8 *)als_buf, sizeof(als_buf));
+ als_buf, sizeof(als_buf));
if (ret < 0)
return ret;
if (test_bit(0, indio_dev->active_scan_mask))
@@ -1359,7 +1360,7 @@ static bool ltr501_is_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config ltr501_regmap_config = {
+static const struct regmap_config ltr501_regmap_config = {
.name = LTR501_REGMAP_NAME,
.reg_bits = 8,
.val_bits = 8,
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 92004a2563ea..82abfa57b59c 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -16,6 +16,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -844,7 +845,7 @@ static struct i2c_driver opt3001_driver = {
.driver = {
.name = "opt3001",
- .of_match_table = of_match_ptr(opt3001_of_match),
+ .of_match_table = opt3001_of_match,
},
};
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
index 9174ab928880..c1adab2a50fd 100644
--- a/drivers/iio/light/si1133.c
+++ b/drivers/iio/light/si1133.c
@@ -17,6 +17,8 @@
#include <linux/util_macros.h>
+#include <asm/unaligned.h>
+
#define SI1133_REG_PART_ID 0x00
#define SI1133_REG_REV_ID 0x01
#define SI1133_REG_MFR_ID 0x02
@@ -104,8 +106,6 @@
#define SI1133_LUX_BUFFER_SIZE 9
#define SI1133_MEASURE_BUFFER_SIZE 3
-#define SI1133_SIGN_BIT_INDEX 23
-
static const int si1133_scale_available[] = {
1, 2, 4, 8, 16, 32, 64, 128};
@@ -633,8 +633,7 @@ static int si1133_measure(struct si1133_data *data,
if (err)
return err;
- *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
- SI1133_SIGN_BIT_INDEX);
+ *val = sign_extend32(get_unaligned_be24(&buffer[0]), 23);
return err;
}
@@ -723,16 +722,11 @@ static int si1133_get_lux(struct si1133_data *data, int *val)
if (err)
return err;
- high_vis =
- sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
- SI1133_SIGN_BIT_INDEX);
+ high_vis = sign_extend32(get_unaligned_be24(&buffer[0]), 23);
- low_vis =
- sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5],
- SI1133_SIGN_BIT_INDEX);
+ low_vis = sign_extend32(get_unaligned_be24(&buffer[3]), 23);
- ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8],
- SI1133_SIGN_BIT_INDEX);
+ ir = sign_extend32(get_unaligned_be24(&buffer[6]), 23);
if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD)
lux = si1133_calc_polynomial(high_vis, ir,
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index 4889bbeb0c73..98cd49eefe45 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -31,8 +32,8 @@ static int st_uvis25_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &st_uvis25_i2c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -55,7 +56,7 @@ static struct i2c_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_i2c",
.pm = &st_uvis25_pm_ops,
- .of_match_table = of_match_ptr(st_uvis25_i2c_of_match),
+ .of_match_table = st_uvis25_i2c_of_match,
},
.probe = st_uvis25_i2c_probe,
.id_table = st_uvis25_i2c_id_table,
diff --git a/drivers/iio/light/st_uvis25_spi.c b/drivers/iio/light/st_uvis25_spi.c
index a9ceae4f58b3..af9d94d12787 100644
--- a/drivers/iio/light/st_uvis25_spi.c
+++ b/drivers/iio/light/st_uvis25_spi.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -31,8 +32,8 @@ static int st_uvis25_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &st_uvis25_spi_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -55,7 +56,7 @@ static struct spi_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_spi",
.pm = &st_uvis25_pm_ops,
- .of_match_table = of_match_ptr(st_uvis25_spi_of_match),
+ .of_match_table = st_uvis25_spi_of_match,
},
.probe = st_uvis25_spi_probe,
.id_table = st_uvis25_spi_id_table,
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index d8c40a83097d..27a5c28aac7f 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -69,7 +69,7 @@
#define TSL2563_TIMING_GAIN16 0x10
#define TSL2563_TIMING_GAIN1 0x00
-#define TSL2563_INT_DISBLED 0x00
+#define TSL2563_INT_DISABLED 0x00
#define TSL2563_INT_LEVEL 0x10
#define TSL2563_INT_PERSIST(n) ((n) & 0x0F)
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index be37fcbd4654..9fbde9b71b63 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -932,7 +932,7 @@ static ssize_t in_illuminance0_target_input_show(struct device *dev,
{
struct tsl2772_chip *chip = iio_priv(dev_to_iio_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
}
static ssize_t in_illuminance0_target_input_store(struct device *dev,
@@ -986,7 +986,7 @@ static ssize_t in_illuminance0_lux_table_show(struct device *dev,
int offset = 0;
while (i < TSL2772_MAX_LUX_TABLE_SIZE) {
- offset += snprintf(buf + offset, PAGE_SIZE, "%u,%u,",
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%u,%u,",
chip->tsl2772_device_lux[i].ch0,
chip->tsl2772_device_lux[i].ch1);
if (chip->tsl2772_device_lux[i].ch0 == 0) {
@@ -1000,7 +1000,7 @@ static ssize_t in_illuminance0_lux_table_show(struct device *dev,
i++;
}
- offset += snprintf(buf + offset, PAGE_SIZE, "\n");
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n");
return offset;
}
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index ec803c1e81df..2a4b3d331055 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -5,6 +5,7 @@
*
* Copyright 2012 Peter Meerwald <pmeerw@pmeerw.net>
* Copyright 2019 Pursim SPC
+ * Copyright 2020 Mathieu Othacehe <m.othacehe@gmail.com>
*
* IIO driver for:
* VCNL4000/10/20 (7-bit I2C slave address 0x13)
@@ -13,9 +14,7 @@
*
* TODO:
* allow to adjust IR current
- * proximity threshold and event handling
- * periodic ALS/proximity measurement (VCNL4010/20)
- * interrupts (VCNL4010/20/40, VCNL4200)
+ * interrupts (VCNL4040, VCNL4200)
*/
#include <linux/module.h>
@@ -23,9 +22,15 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#define VCNL4000_DRV_NAME "vcnl4000"
#define VCNL4000_PROD_ID 0x01
@@ -35,14 +40,22 @@
#define VCNL4000_COMMAND 0x80 /* Command register */
#define VCNL4000_PROD_REV 0x81 /* Product ID and Revision ID */
+#define VCNL4010_PROX_RATE 0x82 /* Proximity rate */
#define VCNL4000_LED_CURRENT 0x83 /* IR LED current for proximity mode */
#define VCNL4000_AL_PARAM 0x84 /* Ambient light parameter register */
+#define VCNL4010_ALS_PARAM 0x84 /* ALS rate */
#define VCNL4000_AL_RESULT_HI 0x85 /* Ambient light result register, MSB */
#define VCNL4000_AL_RESULT_LO 0x86 /* Ambient light result register, LSB */
#define VCNL4000_PS_RESULT_HI 0x87 /* Proximity result register, MSB */
#define VCNL4000_PS_RESULT_LO 0x88 /* Proximity result register, LSB */
#define VCNL4000_PS_MEAS_FREQ 0x89 /* Proximity test signal frequency */
+#define VCNL4010_INT_CTRL 0x89 /* Interrupt control */
#define VCNL4000_PS_MOD_ADJ 0x8a /* Proximity modulator timing adjustment */
+#define VCNL4010_LOW_THR_HI 0x8a /* Low threshold, MSB */
+#define VCNL4010_LOW_THR_LO 0x8b /* Low threshold, LSB */
+#define VCNL4010_HIGH_THR_HI 0x8c /* High threshold, MSB */
+#define VCNL4010_HIGH_THR_LO 0x8d /* High threshold, LSB */
+#define VCNL4010_ISR 0x8e /* Interrupt status */
#define VCNL4200_AL_CONF 0x00 /* Ambient light configuration */
#define VCNL4200_PS_CONF1 0x03 /* Proximity configuration */
@@ -57,6 +70,36 @@
#define VCNL4000_PS_RDY BIT(5) /* proximity data ready? */
#define VCNL4000_AL_OD BIT(4) /* start on-demand ALS measurement */
#define VCNL4000_PS_OD BIT(3) /* start on-demand proximity measurement */
+#define VCNL4000_ALS_EN BIT(2) /* start ALS measurement */
+#define VCNL4000_PROX_EN BIT(1) /* start proximity measurement */
+#define VCNL4000_SELF_TIMED_EN BIT(0) /* start self-timed measurement */
+
+/* Bit masks for interrupt registers. */
+#define VCNL4010_INT_THR_SEL BIT(0) /* Select threshold interrupt source */
+#define VCNL4010_INT_THR_EN BIT(1) /* Threshold interrupt type */
+#define VCNL4010_INT_ALS_EN BIT(2) /* Enable on ALS data ready */
+#define VCNL4010_INT_PROX_EN BIT(3) /* Enable on proximity data ready */
+
+#define VCNL4010_INT_THR_HIGH 0 /* High threshold exceeded */
+#define VCNL4010_INT_THR_LOW 1 /* Low threshold exceeded */
+#define VCNL4010_INT_ALS 2 /* ALS data ready */
+#define VCNL4010_INT_PROXIMITY 3 /* Proximity data ready */
+
+#define VCNL4010_INT_THR \
+ (BIT(VCNL4010_INT_THR_LOW) | BIT(VCNL4010_INT_THR_HIGH))
+#define VCNL4010_INT_DRDY \
+ (BIT(VCNL4010_INT_PROXIMITY) | BIT(VCNL4010_INT_ALS))
+
+static const int vcnl4010_prox_sampling_frequency[][2] = {
+ {1, 950000},
+ {3, 906250},
+ {7, 812500},
+ {16, 625000},
+ {31, 250000},
+ {62, 500000},
+ {125, 0},
+ {250, 0},
+};
#define VCNL4000_SLEEP_DELAY_MS 2000 /* before we enter pm_runtime_suspend */
@@ -83,10 +126,15 @@ struct vcnl4000_data {
struct mutex vcnl4000_lock;
struct vcnl4200_channel vcnl4200_al;
struct vcnl4200_channel vcnl4200_ps;
+ uint32_t near_level;
};
struct vcnl4000_chip_spec {
const char *prod;
+ struct iio_chan_spec const *channels;
+ const int num_channels;
+ const struct iio_info *info;
+ bool irq_support;
int (*init)(struct vcnl4000_data *data);
int (*measure_light)(struct vcnl4000_data *data, int *val);
int (*measure_proximity)(struct vcnl4000_data *data, int *val);
@@ -215,11 +263,31 @@ static int vcnl4200_init(struct vcnl4000_data *data)
return 0;
};
+static int vcnl4000_read_data(struct vcnl4000_data *data, u8 data_reg, int *val)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_word_swapped(data->client, data_reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+
+static int vcnl4000_write_data(struct vcnl4000_data *data, u8 data_reg, int val)
+{
+ if (val > U16_MAX)
+ return -ERANGE;
+
+ return i2c_smbus_write_word_swapped(data->client, data_reg, val);
+}
+
+
static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
u8 rdy_mask, u8 data_reg, int *val)
{
int tries = 20;
- __be16 buf;
int ret;
mutex_lock(&data->vcnl4000_lock);
@@ -246,13 +314,11 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
goto fail;
}
- ret = i2c_smbus_read_i2c_block_data(data->client,
- data_reg, sizeof(buf), (u8 *) &buf);
+ ret = vcnl4000_read_data(data, data_reg, val);
if (ret < 0)
goto fail;
mutex_unlock(&data->vcnl4000_lock);
- *val = be16_to_cpu(buf);
return 0;
@@ -312,47 +378,34 @@ static int vcnl4200_measure_proximity(struct vcnl4000_data *data, int *val)
return vcnl4200_measure(data, &data->vcnl4200_ps, val);
}
-static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
- [VCNL4000] = {
- .prod = "VCNL4000",
- .init = vcnl4000_init,
- .measure_light = vcnl4000_measure_light,
- .measure_proximity = vcnl4000_measure_proximity,
- .set_power_state = vcnl4000_set_power_state,
- },
- [VCNL4010] = {
- .prod = "VCNL4010/4020",
- .init = vcnl4000_init,
- .measure_light = vcnl4000_measure_light,
- .measure_proximity = vcnl4000_measure_proximity,
- .set_power_state = vcnl4000_set_power_state,
- },
- [VCNL4040] = {
- .prod = "VCNL4040",
- .init = vcnl4200_init,
- .measure_light = vcnl4200_measure_light,
- .measure_proximity = vcnl4200_measure_proximity,
- .set_power_state = vcnl4200_set_power_state,
- },
- [VCNL4200] = {
- .prod = "VCNL4200",
- .init = vcnl4200_init,
- .measure_light = vcnl4200_measure_light,
- .measure_proximity = vcnl4200_measure_proximity,
- .set_power_state = vcnl4200_set_power_state,
- },
-};
+static int vcnl4010_read_proxy_samp_freq(struct vcnl4000_data *data, int *val,
+ int *val2)
+{
+ int ret;
-static const struct iio_chan_spec vcnl4000_channels[] = {
- {
- .type = IIO_LIGHT,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- }, {
- .type = IIO_PROXIMITY,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }
-};
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_PROX_RATE);
+ if (ret < 0)
+ return ret;
+
+ if (ret >= ARRAY_SIZE(vcnl4010_prox_sampling_frequency))
+ return -EINVAL;
+
+ *val = vcnl4010_prox_sampling_frequency[ret][0];
+ *val2 = vcnl4010_prox_sampling_frequency[ret][1];
+
+ return 0;
+}
+
+static bool vcnl4010_is_in_periodic_mode(struct vcnl4000_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4000_COMMAND);
+ if (ret < 0)
+ return false;
+
+ return !!(ret & VCNL4000_SELF_TIMED_EN);
+}
static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on)
{
@@ -412,10 +465,571 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
}
}
+static int vcnl4010_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_SCALE:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Protect against event capture. */
+ if (vcnl4010_is_in_periodic_mode(data)) {
+ ret = -EBUSY;
+ } else {
+ ret = vcnl4000_read_raw(indio_dev, chan, val, val2,
+ mask);
+ }
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = vcnl4010_read_proxy_samp_freq(data, val, val2);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (int *)vcnl4010_prox_sampling_frequency;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = 2 * ARRAY_SIZE(vcnl4010_prox_sampling_frequency);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_write_proxy_samp_freq(struct vcnl4000_data *data, int val,
+ int val2)
+{
+ unsigned int i;
+ int index = -1;
+
+ for (i = 0; i < ARRAY_SIZE(vcnl4010_prox_sampling_frequency); i++) {
+ if (val == vcnl4010_prox_sampling_frequency[i][0] &&
+ val2 == vcnl4010_prox_sampling_frequency[i][1]) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index < 0)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(data->client, VCNL4010_PROX_RATE,
+ index);
+}
+
+static int vcnl4010_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Protect against event capture. */
+ if (vcnl4010_is_in_periodic_mode(data)) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = vcnl4010_write_proxy_samp_freq(data, val, val2);
+ goto end;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+end:
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+}
+
+static int vcnl4010_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = vcnl4000_read_data(data, VCNL4010_HIGH_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = vcnl4000_read_data(data, VCNL4010_LOW_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = vcnl4000_write_data(data, VCNL4010_HIGH_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = vcnl4000_write_data(data, VCNL4010_LOW_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool vcnl4010_is_thr_enabled(struct vcnl4000_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_INT_CTRL);
+ if (ret < 0)
+ return false;
+
+ return !!(ret & VCNL4010_INT_THR_EN);
+}
+
+static int vcnl4010_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return vcnl4010_is_thr_enabled(data);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_config_threshold(struct iio_dev *indio_dev, bool state)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
+ int icr;
+ int command;
+
+ if (state) {
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Enable periodic measurement of proximity data. */
+ command = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN;
+
+ /*
+ * Enable interrupts on threshold, for proximity data by
+ * default.
+ */
+ icr = VCNL4010_INT_THR_EN;
+ } else {
+ if (!vcnl4010_is_thr_enabled(data))
+ return 0;
+
+ command = 0;
+ icr = 0;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
+ command);
+ if (ret < 0)
+ goto end;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, icr);
+
+end:
+ if (state)
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static int vcnl4010_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return vcnl4010_config_threshold(indio_dev, state);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t vcnl4000_read_near_level(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", data->near_level);
+}
+
+static const struct iio_chan_spec_ext_info vcnl4000_ext_info[] = {
+ {
+ .name = "nearlevel",
+ .shared = IIO_SEPARATE,
+ .read = vcnl4000_read_near_level,
+ },
+ { /* sentinel */ }
+};
+
+static const struct iio_event_spec vcnl4000_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ }
+};
+
+static const struct iio_chan_spec vcnl4000_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .ext_info = vcnl4000_ext_info,
+ }
+};
+
+static const struct iio_chan_spec vcnl4010_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .scan_index = -1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_PROXIMITY,
+ .scan_index = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .event_spec = vcnl4000_event_spec,
+ .num_event_specs = ARRAY_SIZE(vcnl4000_event_spec),
+ .ext_info = vcnl4000_ext_info,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
static const struct iio_info vcnl4000_info = {
.read_raw = vcnl4000_read_raw,
};
+static const struct iio_info vcnl4010_info = {
+ .read_raw = vcnl4010_read_raw,
+ .read_avail = vcnl4010_read_avail,
+ .write_raw = vcnl4010_write_raw,
+ .read_event_value = vcnl4010_read_event,
+ .write_event_value = vcnl4010_write_event,
+ .read_event_config = vcnl4010_read_event_config,
+ .write_event_config = vcnl4010_write_event_config,
+};
+
+static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
+ [VCNL4000] = {
+ .prod = "VCNL4000",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ .set_power_state = vcnl4000_set_power_state,
+ .channels = vcnl4000_channels,
+ .num_channels = ARRAY_SIZE(vcnl4000_channels),
+ .info = &vcnl4000_info,
+ .irq_support = false,
+ },
+ [VCNL4010] = {
+ .prod = "VCNL4010/4020",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ .set_power_state = vcnl4000_set_power_state,
+ .channels = vcnl4010_channels,
+ .num_channels = ARRAY_SIZE(vcnl4010_channels),
+ .info = &vcnl4010_info,
+ .irq_support = true,
+ },
+ [VCNL4040] = {
+ .prod = "VCNL4040",
+ .init = vcnl4200_init,
+ .measure_light = vcnl4200_measure_light,
+ .measure_proximity = vcnl4200_measure_proximity,
+ .set_power_state = vcnl4200_set_power_state,
+ .channels = vcnl4000_channels,
+ .num_channels = ARRAY_SIZE(vcnl4000_channels),
+ .info = &vcnl4000_info,
+ .irq_support = false,
+ },
+ [VCNL4200] = {
+ .prod = "VCNL4200",
+ .init = vcnl4200_init,
+ .measure_light = vcnl4200_measure_light,
+ .measure_proximity = vcnl4200_measure_proximity,
+ .set_power_state = vcnl4200_set_power_state,
+ .channels = vcnl4000_channels,
+ .num_channels = ARRAY_SIZE(vcnl4000_channels),
+ .info = &vcnl4000_info,
+ .irq_support = false,
+ },
+};
+
+static irqreturn_t vcnl4010_irq_thread(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ unsigned long isr;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
+ if (ret < 0)
+ goto end;
+
+ isr = ret;
+
+ if (isr & VCNL4010_INT_THR) {
+ if (test_bit(VCNL4010_INT_THR_LOW, &isr)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if (test_bit(VCNL4010_INT_THR_HIGH, &isr)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
+ isr & VCNL4010_INT_THR);
+ }
+
+ if (isr & VCNL4010_INT_DRDY && iio_buffer_enabled(indio_dev))
+ iio_trigger_poll_chained(indio_dev->trig);
+
+end:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
+ u16 buffer[8] = {0}; /* 1x16-bit + ts */
+ bool data_read = false;
+ unsigned long isr;
+ int val = 0;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
+ if (ret < 0)
+ goto end;
+
+ isr = ret;
+
+ if (test_bit(0, active_scan_mask)) {
+ if (test_bit(VCNL4010_INT_PROXIMITY, &isr)) {
+ ret = vcnl4000_read_data(data,
+ VCNL4000_PS_RESULT_HI,
+ &val);
+ if (ret < 0)
+ goto end;
+
+ buffer[0] = val;
+ data_read = true;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
+ isr & VCNL4010_INT_DRDY);
+ if (ret < 0)
+ goto end;
+
+ if (!data_read)
+ goto end;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns(indio_dev));
+
+end:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int vcnl4010_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
+ int cmd;
+
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Do not enable the buffer if we are already capturing events. */
+ if (vcnl4010_is_in_periodic_mode(data)) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL,
+ VCNL4010_INT_PROX_EN);
+ if (ret < 0)
+ goto end;
+
+ cmd = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN;
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd);
+ if (ret < 0)
+ goto end;
+
+ return 0;
+end:
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
+}
+
+static int vcnl4010_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret, ret_disable;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 0);
+ if (ret < 0)
+ goto end;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0);
+
+end:
+ ret_disable = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret_disable;
+
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops vcnl4010_buffer_ops = {
+ .postenable = &vcnl4010_buffer_postenable,
+ .predisable = &vcnl4010_buffer_predisable,
+};
+
+static const struct iio_trigger_ops vcnl4010_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static int vcnl4010_probe_trigger(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ struct iio_trigger *trigger;
+
+ trigger = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!trigger)
+ return -ENOMEM;
+
+ trigger->dev.parent = &client->dev;
+ trigger->ops = &vcnl4010_trigger_ops;
+ iio_trigger_set_drvdata(trigger, indio_dev);
+
+ return devm_iio_trigger_register(&client->dev, trigger);
+}
+
static int vcnl4000_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -440,13 +1054,44 @@ static int vcnl4000_probe(struct i2c_client *client,
dev_dbg(&client->dev, "%s Ambient light/proximity sensor, Rev: %02x\n",
data->chip_spec->prod, data->rev);
+ if (device_property_read_u32(&client->dev, "proximity-near-level",
+ &data->near_level))
+ data->near_level = 0;
+
indio_dev->dev.parent = &client->dev;
- indio_dev->info = &vcnl4000_info;
- indio_dev->channels = vcnl4000_channels;
- indio_dev->num_channels = ARRAY_SIZE(vcnl4000_channels);
+ indio_dev->info = data->chip_spec->info;
+ indio_dev->channels = data->chip_spec->channels;
+ indio_dev->num_channels = data->chip_spec->num_channels;
indio_dev->name = VCNL4000_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
+ if (client->irq && data->chip_spec->irq_support) {
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ NULL,
+ vcnl4010_trigger_handler,
+ &vcnl4010_buffer_ops);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "unable to setup iio triggered buffer\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, vcnl4010_irq_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "vcnl4010_irq",
+ indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "irq request failed\n");
+ return ret;
+ }
+
+ ret = vcnl4010_probe_trigger(indio_dev);
+ if (ret < 0)
+ return ret;
+ }
+
ret = pm_runtime_set_active(&client->dev);
if (ret < 0)
goto fail_poweroff;
@@ -540,5 +1185,6 @@ static struct i2c_driver vcnl4000_driver = {
module_i2c_driver(vcnl4000_driver);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
MODULE_DESCRIPTION("Vishay VCNL4000 proximity/ambient light sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index d9533a76b8f6..ed7b02765b97 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -16,6 +16,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/err.h>
@@ -537,7 +538,7 @@ MODULE_DEVICE_TABLE(i2c, vl6180_id);
static struct i2c_driver vl6180_driver = {
.driver = {
.name = VL6180_DRV_NAME,
- .of_match_table = of_match_ptr(vl6180_of_match),
+ .of_match_table = vl6180_of_match,
},
.probe = vl6180_probe,
.id_table = vl6180_id,
diff --git a/drivers/iio/light/zopt2201.c b/drivers/iio/light/zopt2201.c
index 5f54f39e7a4c..80ae530720cd 100644
--- a/drivers/iio/light/zopt2201.c
+++ b/drivers/iio/light/zopt2201.c
@@ -19,6 +19,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#define ZOPT2201_DRV_NAME "zopt2201"
/* Registers */
@@ -219,7 +221,7 @@ static int zopt2201_read(struct zopt2201_data *data, u8 reg)
goto fail;
mutex_unlock(&data->lock);
- return (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ return get_unaligned_le24(&buf[0]);
fail:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index d32996702110..810fdfd37c88 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -49,6 +49,7 @@
#define AK8974_WHOAMI_VALUE_AMI306 0x46
#define AK8974_WHOAMI_VALUE_AMI305 0x47
#define AK8974_WHOAMI_VALUE_AK8974 0x48
+#define AK8974_WHOAMI_VALUE_HSCDTD008A 0x49
#define AK8974_DATA_X 0x10
#define AK8974_DATA_Y 0x12
@@ -140,6 +141,12 @@
#define AK8974_INT_CTRL_PULSE BIT(1) /* 0 = latched; 1 = pulse (50 usec) */
#define AK8974_INT_CTRL_RESDEF (AK8974_INT_CTRL_XYZEN | AK8974_INT_CTRL_POL)
+/* HSCDTD008A-specific control register */
+#define HSCDTD008A_CTRL4 0x1E
+#define HSCDTD008A_CTRL4_MMD BIT(7) /* must be set to 1 */
+#define HSCDTD008A_CTRL4_RANGE BIT(4) /* 0 = 14-bit output; 1 = 15-bit output */
+#define HSCDTD008A_CTRL4_RESDEF (HSCDTD008A_CTRL4_MMD | HSCDTD008A_CTRL4_RANGE)
+
/* The AMI305 has elaborate FW version and serial number registers */
#define AMI305_VER 0xE8
#define AMI305_SN 0xEA
@@ -241,10 +248,17 @@ static int ak8974_reset(struct ak8974 *ak8974)
ret = regmap_write(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_RESDEF);
if (ret)
return ret;
- ret = regmap_write(ak8974->map, AK8974_INT_CTRL,
- AK8974_INT_CTRL_RESDEF);
- if (ret)
- return ret;
+ if (ak8974->variant != AK8974_WHOAMI_VALUE_HSCDTD008A) {
+ ret = regmap_write(ak8974->map, AK8974_INT_CTRL,
+ AK8974_INT_CTRL_RESDEF);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_write(ak8974->map, HSCDTD008A_CTRL4,
+ HSCDTD008A_CTRL4_RESDEF);
+ if (ret)
+ return ret;
+ }
/* After reset, power off is default state */
return ak8974_set_power(ak8974, AK8974_PWR_OFF);
@@ -267,6 +281,8 @@ static int ak8974_configure(struct ak8974 *ak8974)
if (ret)
return ret;
}
+ if (ak8974->variant == AK8974_WHOAMI_VALUE_HSCDTD008A)
+ return 0;
ret = regmap_write(ak8974->map, AK8974_INT_CTRL, AK8974_INT_CTRL_POL);
if (ret)
return ret;
@@ -495,6 +511,10 @@ static int ak8974_detect(struct ak8974 *ak8974)
name = "ak8974";
dev_info(&ak8974->i2c->dev, "detected AK8974\n");
break;
+ case AK8974_WHOAMI_VALUE_HSCDTD008A:
+ name = "hscdtd008a";
+ dev_info(&ak8974->i2c->dev, "detected hscdtd008a\n");
+ break;
default:
dev_err(&ak8974->i2c->dev, "unsupported device (%02x) ",
whoami);
@@ -534,47 +554,103 @@ static int ak8974_detect(struct ak8974 *ak8974)
return 0;
}
+static int ak8974_measure_channel(struct ak8974 *ak8974, unsigned long address,
+ int *val)
+{
+ __le16 hw_values[3];
+ int ret;
+
+ pm_runtime_get_sync(&ak8974->i2c->dev);
+ mutex_lock(&ak8974->lock);
+
+ /*
+ * We read all axes and discard all but one, for optimized
+ * reading, use the triggered buffer.
+ */
+ ret = ak8974_trigmeas(ak8974);
+ if (ret)
+ goto out_unlock;
+ ret = ak8974_getresult(ak8974, hw_values);
+ if (ret)
+ goto out_unlock;
+ /*
+ * This explicit cast to (s16) is necessary as the measurement
+ * is done in 2's complement with positive and negative values.
+ * The follwing assignment to *val will then convert the signed
+ * s16 value to a signed int value.
+ */
+ *val = (s16)le16_to_cpu(hw_values[address]);
+out_unlock:
+ mutex_unlock(&ak8974->lock);
+ pm_runtime_mark_last_busy(&ak8974->i2c->dev);
+ pm_runtime_put_autosuspend(&ak8974->i2c->dev);
+
+ return ret;
+}
+
static int ak8974_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
- __le16 hw_values[3];
- int ret = -EINVAL;
-
- pm_runtime_get_sync(&ak8974->i2c->dev);
- mutex_lock(&ak8974->lock);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (chan->address > 2) {
dev_err(&ak8974->i2c->dev, "faulty channel address\n");
- ret = -EIO;
- goto out_unlock;
+ return -EIO;
}
- ret = ak8974_trigmeas(ak8974);
- if (ret)
- goto out_unlock;
- ret = ak8974_getresult(ak8974, hw_values);
+ ret = ak8974_measure_channel(ak8974, chan->address, val);
if (ret)
- goto out_unlock;
-
- /*
- * We read all axes and discard all but one, for optimized
- * reading, use the triggered buffer.
- */
- *val = (s16)le16_to_cpu(hw_values[chan->address]);
-
- ret = IIO_VAL_INT;
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (ak8974->variant) {
+ case AK8974_WHOAMI_VALUE_AMI306:
+ case AK8974_WHOAMI_VALUE_AMI305:
+ /*
+ * The datasheet for AMI305 and AMI306, page 6
+ * specifies the range of the sensor to be
+ * +/- 12 Gauss.
+ */
+ *val = 12;
+ /*
+ * 12 bits are used, +/- 2^11
+ * [ -2048 .. 2047 ] (manual page 20)
+ * [ 0xf800 .. 0x07ff ]
+ */
+ *val2 = 11;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case AK8974_WHOAMI_VALUE_HSCDTD008A:
+ /*
+ * The datasheet for HSCDTF008A, page 3 specifies the
+ * range of the sensor as +/- 2.4 mT per axis, which
+ * corresponds to +/- 2400 uT = +/- 24 Gauss.
+ */
+ *val = 24;
+ /*
+ * 15 bits are used (set up in CTRL4), +/- 2^14
+ * [ -16384 .. 16383 ] (manual page 24)
+ * [ 0xc000 .. 0x3fff ]
+ */
+ *val2 = 14;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ /* GUESSING +/- 12 Gauss */
+ *val = 12;
+ /* GUESSING 12 bits ADC +/- 2^11 */
+ *val2 = 11;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ break;
+ default:
+ /* Unknown request */
+ break;
}
- out_unlock:
- mutex_unlock(&ak8974->lock);
- pm_runtime_mark_last_busy(&ak8974->i2c->dev);
- pm_runtime_put_autosuspend(&ak8974->i2c->dev);
-
- return ret;
+ return -EINVAL;
}
static void ak8974_fill_buffer(struct iio_dev *indio_dev)
@@ -631,27 +707,44 @@ static const struct iio_chan_spec_ext_info ak8974_ext_info[] = {
{ },
};
-#define AK8974_AXIS_CHANNEL(axis, index) \
+#define AK8974_AXIS_CHANNEL(axis, index, bits) \
{ \
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
.ext_info = ak8974_ext_info, \
.address = index, \
.scan_index = index, \
.scan_type = { \
.sign = 's', \
- .realbits = 16, \
+ .realbits = bits, \
.storagebits = 16, \
.endianness = IIO_LE \
}, \
}
-static const struct iio_chan_spec ak8974_channels[] = {
- AK8974_AXIS_CHANNEL(X, 0),
- AK8974_AXIS_CHANNEL(Y, 1),
- AK8974_AXIS_CHANNEL(Z, 2),
+/*
+ * We have no datasheet for the AK8974 but we guess that its
+ * ADC is 12 bits. The AMI305 and AMI306 certainly has 12bit
+ * ADC.
+ */
+static const struct iio_chan_spec ak8974_12_bits_channels[] = {
+ AK8974_AXIS_CHANNEL(X, 0, 12),
+ AK8974_AXIS_CHANNEL(Y, 1, 12),
+ AK8974_AXIS_CHANNEL(Z, 2, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+/*
+ * The HSCDTD008A has 15 bits resolution the way we set it up
+ * in CTRL4.
+ */
+static const struct iio_chan_spec ak8974_15_bits_channels[] = {
+ AK8974_AXIS_CHANNEL(X, 0, 15),
+ AK8974_AXIS_CHANNEL(Y, 1, 15),
+ AK8974_AXIS_CHANNEL(Z, 2, 15),
IIO_CHAN_SOFT_TIMESTAMP(3),
};
@@ -674,18 +767,18 @@ static bool ak8974_writeable_reg(struct device *dev, unsigned int reg)
case AK8974_INT_CTRL:
case AK8974_INT_THRES:
case AK8974_INT_THRES + 1:
+ return true;
case AK8974_PRESET:
case AK8974_PRESET + 1:
- return true;
+ return ak8974->variant != AK8974_WHOAMI_VALUE_HSCDTD008A;
case AK8974_OFFSET_X:
case AK8974_OFFSET_X + 1:
case AK8974_OFFSET_Y:
case AK8974_OFFSET_Y + 1:
case AK8974_OFFSET_Z:
case AK8974_OFFSET_Z + 1:
- if (ak8974->variant == AK8974_WHOAMI_VALUE_AK8974)
- return true;
- return false;
+ return ak8974->variant == AK8974_WHOAMI_VALUE_AK8974 ||
+ ak8974->variant == AK8974_WHOAMI_VALUE_HSCDTD008A;
case AMI305_OFFSET_X:
case AMI305_OFFSET_X + 1:
case AMI305_OFFSET_Y:
@@ -746,7 +839,12 @@ static int ak8974_probe(struct i2c_client *i2c,
ARRAY_SIZE(ak8974->regs),
ak8974->regs);
if (ret < 0) {
- dev_err(&i2c->dev, "cannot get regulators\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&i2c->dev, "cannot get regulators: %d\n", ret);
+ else
+ dev_dbg(&i2c->dev,
+ "regulators unavailable, deferring probe\n");
+
return ret;
}
@@ -795,8 +893,21 @@ static int ak8974_probe(struct i2c_client *i2c,
pm_runtime_put(&i2c->dev);
indio_dev->dev.parent = &i2c->dev;
- indio_dev->channels = ak8974_channels;
- indio_dev->num_channels = ARRAY_SIZE(ak8974_channels);
+ switch (ak8974->variant) {
+ case AK8974_WHOAMI_VALUE_AMI306:
+ case AK8974_WHOAMI_VALUE_AMI305:
+ indio_dev->channels = ak8974_12_bits_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_12_bits_channels);
+ break;
+ case AK8974_WHOAMI_VALUE_HSCDTD008A:
+ indio_dev->channels = ak8974_15_bits_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_15_bits_channels);
+ break;
+ default:
+ indio_dev->channels = ak8974_12_bits_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_12_bits_channels);
+ break;
+ }
indio_dev->info = &ak8974_info;
indio_dev->available_scan_masks = ak8974_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -926,12 +1037,14 @@ static const struct i2c_device_id ak8974_id[] = {
{"ami305", 0 },
{"ami306", 0 },
{"ak8974", 0 },
+ {"hscdtd008a", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, ak8974_id);
static const struct of_device_id ak8974_of_match[] = {
{ .compatible = "asahi-kasei,ak8974", },
+ { .compatible = "alps,hscdtd008a", },
{}
};
MODULE_DEVICE_TABLE(of, ak8974_of_match);
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index ed9be0490d77..c6ed3ea8460a 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -22,8 +22,8 @@ static int bmc150_magn_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &bmc150_magn_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name);
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 25e60b233e08..0c09daf87794 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum magn_3d_channel {
@@ -519,18 +517,13 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- return ret;
- }
atomic_set(&magn_state->magn_flux_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&magn_state->magn_flux_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ return ret;
}
ret = iio_device_register(indio_dev);
@@ -554,9 +547,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &magn_state->magn_flux_attributes);
return ret;
}
@@ -569,8 +560,7 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &magn_state->magn_flux_attributes);
return 0;
}
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 425cdd07b4e5..1787d656d009 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -239,7 +239,7 @@ static int mmc35240_init(struct mmc35240_data *data)
return ret;
ret = regmap_bulk_read(data->regmap, MMC35240_OTP_START_ADDR,
- (u8 *)otp_data, sizeof(otp_data));
+ otp_data, sizeof(otp_data));
if (ret < 0)
return ret;
@@ -295,7 +295,7 @@ static int mmc35240_read_measurement(struct mmc35240_data *data, __le16 buf[3])
if (ret < 0)
return ret;
- return regmap_bulk_read(data->regmap, MMC35240_REG_XOUT_L, (u8 *)buf,
+ return regmap_bulk_read(data->regmap, MMC35240_REG_XOUT_L, buf,
3 * sizeof(__le16));
}
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 7c20918d8108..43a2e420c9c4 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -22,6 +22,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
+#include <asm/unaligned.h>
+
#include "rm3100.h"
/* Cycle Count Registers. */
@@ -223,8 +225,7 @@ static int rm3100_read_mag(struct rm3100_data *data, int idx, int *val)
goto unlock_return;
mutex_unlock(&data->lock);
- *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
- 23);
+ *val = sign_extend32(get_unaligned_be24(&buffer[0]), 23);
return IIO_VAL_INT;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e68184a93a6d..79de721e6015 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -506,8 +506,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = mdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
- mdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &mdata->sensor_settings->fs.fs_avl[0];
+ mdata->current_fullscale = &mdata->sensor_settings->fs.fs_avl[0];
mdata->odr = mdata->sensor_settings->odr.odr_avl[0].hz;
err = st_sensors_init_sensor(indio_dev, NULL);
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index 00af68764cda..6aac8bea233a 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -15,8 +15,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum incl_3d_channel {
@@ -346,18 +344,13 @@ static int hid_incl_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&incl_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&incl_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -382,9 +375,7 @@ static int hid_incl_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&incl_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &incl_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -399,8 +390,7 @@ static int hid_incl_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&incl_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &incl_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index 64ae7d04a200..b99f41240e3e 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
struct dev_rot_state {
@@ -288,18 +286,13 @@ static int hid_dev_rot_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- return ret;
- }
atomic_set(&rot_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&rot_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ return ret;
}
ret = iio_device_register(indio_dev);
@@ -323,9 +316,7 @@ static int hid_dev_rot_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&rot_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &rot_state->common_attributes);
return ret;
}
@@ -338,8 +329,7 @@ static int hid_dev_rot_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&rot_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &rot_state->common_attributes);
return 0;
}
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 29c209cc1108..126a56d31b6e 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -271,6 +271,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+ (s32)2097152) * calib->H2 + 8192) >> 14);
var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)calib->H1) >> 4;
+ var = clamp_val(var, 0, 419430400);
+
return var >> 12;
};
@@ -337,8 +339,7 @@ static int bmp280_read_temp(struct bmp280_data *data,
__be32 tmp = 0;
s32 adc_temp, comp_temp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
- (u8 *) &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB, &tmp, 3);
if (ret < 0) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
@@ -377,8 +378,7 @@ static int bmp280_read_press(struct bmp280_data *data,
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
- (u8 *) &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB, &tmp, 3);
if (ret < 0) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
@@ -400,8 +400,8 @@ static int bmp280_read_press(struct bmp280_data *data,
static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
{
+ __be16 tmp;
int ret;
- __be16 tmp = 0;
s32 adc_humidity;
u32 comp_humidity;
@@ -410,8 +410,7 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
- (u8 *) &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB, &tmp, 2);
if (ret < 0) {
dev_err(data->dev, "failed to read humidity\n");
return ret;
@@ -575,57 +574,38 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static ssize_t bmp280_show_avail(char *buf, const int *vals, const int n)
+static int bmp280_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
{
- size_t len = 0;
- int i;
-
- for (i = 0; i < n; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", vals[i]);
-
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static ssize_t bmp280_show_temp_oversampling_avail(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
-
- return bmp280_show_avail(buf, data->chip_info->oversampling_temp_avail,
- data->chip_info->num_oversampling_temp_avail);
-}
-
-static ssize_t bmp280_show_press_oversampling_avail(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
+ struct bmp280_data *data = iio_priv(indio_dev);
- return bmp280_show_avail(buf, data->chip_info->oversampling_press_avail,
- data->chip_info->num_oversampling_press_avail);
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *vals = data->chip_info->oversampling_press_avail;
+ *length = data->chip_info->num_oversampling_press_avail;
+ break;
+ case IIO_TEMP:
+ *vals = data->chip_info->oversampling_temp_avail;
+ *length = data->chip_info->num_oversampling_temp_avail;
+ break;
+ default:
+ return -EINVAL;
+ }
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
}
-static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available,
- S_IRUGO, bmp280_show_temp_oversampling_avail, NULL, 0);
-
-static IIO_DEVICE_ATTR(in_pressure_oversampling_ratio_available,
- S_IRUGO, bmp280_show_press_oversampling_avail, NULL, 0);
-
-static struct attribute *bmp280_attributes[] = {
- &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
- &iio_dev_attr_in_pressure_oversampling_ratio_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group bmp280_attrs_group = {
- .attrs = bmp280_attributes,
-};
-
static const struct iio_info bmp280_info = {
.read_raw = &bmp280_read_raw,
+ .read_avail = &bmp280_read_avail,
.write_raw = &bmp280_write_raw,
- .attrs = &bmp280_attrs_group,
};
static int bmp280_chip_config(struct bmp280_data *data)
@@ -713,7 +693,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
unsigned int ctrl;
if (data->use_eoc)
- init_completion(&data->done);
+ reinit_completion(&data->done);
ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
if (ret)
@@ -752,14 +732,14 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
{
+ __be16 tmp;
int ret;
- __be16 tmp = 0;
ret = bmp180_measure(data, BMP180_MEAS_TEMP);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 2);
if (ret)
return ret;
@@ -856,7 +836,7 @@ static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 3);
if (ret)
return ret;
@@ -965,10 +945,12 @@ static int bmp085_fetch_eoc_irq(struct device *dev,
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
if (irq_trig != IRQF_TRIGGER_RISING) {
- dev_err(dev, "non-rising trigger given for EOC interrupt, "
- "trying to enforce it\n");
+ dev_err(dev, "non-rising trigger given for EOC interrupt, trying to enforce it\n");
irq_trig = IRQF_TRIGGER_RISING;
}
+
+ init_completion(&data->done);
+
ret = devm_request_threaded_irq(dev,
irq,
bmp085_eoc_irq,
@@ -1082,9 +1064,9 @@ int bmp280_common_probe(struct device *dev,
usleep_range(data->start_up_time, data->start_up_time + 100);
/* Bring chip out of reset if there is an assigned GPIO line */
- gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
/* Deassert the signal */
- if (!IS_ERR(gpiod)) {
+ if (gpiod) {
dev_info(dev, "release reset\n");
gpiod_set_value(gpiod, 0);
}
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 953235052155..5e6663f757ae 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
#define CHANNEL_SCAN_INDEX_PRESSURE 0
@@ -290,18 +288,13 @@ static int hid_press_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&press_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&press_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -325,9 +318,7 @@ static int hid_press_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&press_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &press_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -342,8 +333,7 @@ static int hid_press_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PRESSURE);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&press_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &press_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index 3ac3632e7242..1f931f5b7a65 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -18,6 +18,8 @@
#include <linux/util_macros.h>
#include <linux/acpi.h>
+#include <asm/unaligned.h>
+
/* I2C commands: */
#define HP206C_CMD_SOFT_RST 0x06
@@ -93,12 +95,12 @@ static int hp206c_read_20bit(struct i2c_client *client, u8 cmd)
int ret;
u8 values[3];
- ret = i2c_smbus_read_i2c_block_data(client, cmd, 3, values);
+ ret = i2c_smbus_read_i2c_block_data(client, cmd, sizeof(values), values);
if (ret < 0)
return ret;
- if (ret != 3)
+ if (ret != sizeof(values))
return -EIO;
- return ((values[0] & 0xF) << 16) | (values[1] << 8) | (values[2]);
+ return get_unaligned_be24(&values[0]) & GENMASK(19, 0);
}
/* Spin for max 160ms until DEV_RDY is 1, or return error. */
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 8089c59adce5..072c106dd66d 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/of_device.h>
+#include <asm/unaligned.h>
+
#include "ms5611.h"
static int ms5611_i2c_reset(struct device *dev)
@@ -50,7 +52,7 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val)
if (ret < 0)
return ret;
- *val = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+ *val = get_unaligned_be24(&buf[0]);
return 0;
}
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index b463eaa799ab..4799aa57135e 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -11,6 +11,8 @@
#include <linux/spi/spi.h>
#include <linux/of_device.h>
+#include <asm/unaligned.h>
+
#include "ms5611.h"
static int ms5611_spi_reset(struct device *dev)
@@ -45,7 +47,7 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
if (ret < 0)
return ret;
- *val = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+ *val = get_unaligned_be24(&buf[0]);
return 0;
}
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index bd972cec4830..789a2928504a 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -683,8 +683,7 @@ EXPORT_SYMBOL(st_press_get_settings);
int st_press_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *press_data = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata =
- (struct st_sensors_platform_data *)press_data->dev->platform_data;
+ struct st_sensors_platform_data *pdata = dev_get_platdata(press_data->dev);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -708,9 +707,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = press_data->sensor_settings->ch;
indio_dev->num_channels = press_data->sensor_settings->num_ch;
- press_data->current_fullscale =
- (struct st_sensor_fullscale_avl *)
- &press_data->sensor_settings->fs.fs_avl[0];
+ press_data->current_fullscale = &press_data->sensor_settings->fs.fs_avl[0];
press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 99dfe33ee402..37fe851f89af 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -64,6 +64,7 @@
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <asm/unaligned.h>
#include "zpa2326.h"
/* 200 ms should be enough for the longest conversion time in one-shot mode. */
@@ -1005,22 +1006,20 @@ static int zpa2326_fetch_raw_sample(const struct iio_dev *indio_dev,
struct regmap *regs = ((struct zpa2326_private *)
iio_priv(indio_dev))->regmap;
int err;
+ u8 v[3];
switch (type) {
case IIO_PRESSURE:
zpa2326_dbg(indio_dev, "fetching raw pressure sample");
- err = regmap_bulk_read(regs, ZPA2326_PRESS_OUT_XL_REG, value,
- 3);
+ err = regmap_bulk_read(regs, ZPA2326_PRESS_OUT_XL_REG, v, sizeof(v));
if (err) {
zpa2326_warn(indio_dev, "failed to fetch pressure (%d)",
err);
return err;
}
- /* Pressure is a 24 bits wide little-endian unsigned int. */
- *value = (((u8 *)value)[2] << 16) | (((u8 *)value)[1] << 8) |
- ((u8 *)value)[0];
+ *value = get_unaligned_le24(&v[0]);
return IIO_VAL_INT;
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index 37606d400805..12672a0e89ed 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -101,6 +101,19 @@ config SRF04
To compile this driver as a module, choose M here: the
module will be called srf04.
+config SX9310
+ tristate "SX9310/SX9311 Semtech proximity sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here to build a driver for Semtech's SX9310/SX9311 capacitive
+ proximity/button sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sx9310.
+
config SX9500
tristate "SX9500 Semtech proximity sensor"
select IIO_BUFFER
@@ -127,6 +140,17 @@ config SRF08
To compile this driver as a module, choose M here: the
module will be called srf08.
+config VCNL3020
+ tristate "VCNL3020 proximity sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VCNL3020
+ proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vcnl3020.
+
config VL53L0X_I2C
tristate "STMicroelectronics VL53L0X ToF ranger sensor (I2C)"
depends on I2C
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index c591b019304e..9c1aca1a8b79 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -12,6 +12,8 @@ obj-$(CONFIG_PING) += ping.o
obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
obj-$(CONFIG_SRF08) += srf08.o
+obj-$(CONFIG_SX9310) += sx9310.o
obj-$(CONFIG_SX9500) += sx9500.o
+obj-$(CONFIG_VCNL3020) += vcnl3020.o
obj-$(CONFIG_VL53L0X_I2C) += vl53l0x-i2c.o
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
index 12b893c5b0ee..2e99eeb27f2e 100644
--- a/drivers/iio/proximity/ping.c
+++ b/drivers/iio/proximity/ping.c
@@ -89,14 +89,14 @@ static irqreturn_t ping_handle_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ping_read(struct ping_data *data)
+static int ping_read(struct iio_dev *indio_dev)
{
+ struct ping_data *data = iio_priv(indio_dev);
int ret;
ktime_t ktime_dt;
s64 dt_ns;
u32 time_ns, distance_mm;
struct platform_device *pdev = to_platform_device(data->dev);
- struct iio_dev *indio_dev = iio_priv_to_dev(data);
/*
* just one read-echo-cycle can take place at a time
@@ -228,7 +228,6 @@ static int ping_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
int *val2, long info)
{
- struct ping_data *data = iio_priv(indio_dev);
int ret;
if (channel->type != IIO_DISTANCE)
@@ -236,7 +235,7 @@ static int ping_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = ping_read(data);
+ ret = ping_read(indio_dev);
if (ret < 0)
return ret;
*val = ret;
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
new file mode 100644
index 000000000000..d161f3061e35
--- /dev/null
+++ b/drivers/iio/proximity/sx9310.c
@@ -0,0 +1,1069 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC.
+ *
+ * Driver for Semtech's SX9310/SX9311 capacitive proximity/button solution.
+ * Based on SX9500 driver and Semtech driver using the input framework
+ * <https://my.syncplicity.com/share/teouwsim8niiaud/
+ * linux-driver-SX9310_NoSmartHSensing>.
+ * Reworked April 2019 by Evan Green <evgreen@chromium.org>
+ * and January 2020 by Daniel Campello <campello@chromium.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+/* Register definitions. */
+#define SX9310_REG_IRQ_SRC 0x00
+#define SX9310_REG_STAT0 0x01
+#define SX9310_REG_STAT1 0x02
+#define SX9310_REG_IRQ_MSK 0x03
+#define SX9310_CONVDONE_IRQ BIT(3)
+#define SX9310_FAR_IRQ BIT(5)
+#define SX9310_CLOSE_IRQ BIT(6)
+#define SX9310_EVENT_IRQ (SX9310_FAR_IRQ | \
+ SX9310_CLOSE_IRQ)
+#define SX9310_REG_IRQ_FUNC 0x04
+
+#define SX9310_REG_PROX_CTRL0 0x10
+#define SX9310_REG_PROX_CTRL0_PROXSTAT2 0x10
+#define SX9310_REG_PROX_CTRL0_EN_MASK 0x0F
+#define SX9310_REG_PROX_CTRL1 0x11
+#define SX9310_REG_PROX_CTRL2 0x12
+#define SX9310_REG_PROX_CTRL2_COMBMODE_ALL 0x80
+#define SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC 0x04
+#define SX9310_REG_PROX_CTRL3 0x13
+#define SX9310_REG_PROX_CTRL3_GAIN0_X8 0x0c
+#define SX9310_REG_PROX_CTRL3_GAIN12_X4 0x02
+#define SX9310_REG_PROX_CTRL4 0x14
+#define SX9310_REG_PROX_CTRL4_RESOLUTION_FINEST 0x07
+#define SX9310_REG_PROX_CTRL5 0x15
+#define SX9310_REG_PROX_CTRL5_RANGE_SMALL 0xc0
+#define SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 0x04
+#define SX9310_REG_PROX_CTRL5_RAWFILT_1P25 0x02
+#define SX9310_REG_PROX_CTRL6 0x16
+#define SX9310_REG_PROX_CTRL6_COMP_COMMON 0x20
+#define SX9310_REG_PROX_CTRL7 0x17
+#define SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 0x08
+#define SX9310_REG_PROX_CTRL7_AVGPOSFILT_512 0x05
+#define SX9310_REG_PROX_CTRL8 0x18
+#define SX9310_REG_PROX_CTRL9 0x19
+#define SX9310_REG_PROX_CTRL8_9_PTHRESH12_28 0x40
+#define SX9310_REG_PROX_CTRL8_9_PTHRESH_96 0x88
+#define SX9310_REG_PROX_CTRL8_9_BODYTHRESH_900 0x03
+#define SX9310_REG_PROX_CTRL8_9_BODYTHRESH_1500 0x05
+#define SX9310_REG_PROX_CTRL10 0x1a
+#define SX9310_REG_PROX_CTRL10_HYST_6PCT 0x10
+#define SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_8 0x12
+#define SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_8 0x03
+#define SX9310_REG_PROX_CTRL11 0x1b
+#define SX9310_REG_PROX_CTRL12 0x1c
+#define SX9310_REG_PROX_CTRL13 0x1d
+#define SX9310_REG_PROX_CTRL14 0x1e
+#define SX9310_REG_PROX_CTRL15 0x1f
+#define SX9310_REG_PROX_CTRL16 0x20
+#define SX9310_REG_PROX_CTRL17 0x21
+#define SX9310_REG_PROX_CTRL18 0x22
+#define SX9310_REG_PROX_CTRL19 0x23
+#define SX9310_REG_SAR_CTRL0 0x2a
+#define SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES 0x40
+#define SX9310_REG_SAR_CTRL0_SARHYST_8 0x10
+#define SX9310_REG_SAR_CTRL1 0x2b
+/* Each increment of the slope register is 0.0078125. */
+#define SX9310_REG_SAR_CTRL1_SLOPE(_hnslope) (_hnslope / 78125)
+#define SX9310_REG_SAR_CTRL2 0x2c
+#define SX9310_REG_SAR_CTRL2_SAROFFSET_DEFAULT 0x3c
+
+#define SX9310_REG_SENSOR_SEL 0x30
+
+#define SX9310_REG_USE_MSB 0x31
+#define SX9310_REG_USE_LSB 0x32
+
+#define SX9310_REG_AVG_MSB 0x33
+#define SX9310_REG_AVG_LSB 0x34
+
+#define SX9310_REG_DIFF_MSB 0x35
+#define SX9310_REG_DIFF_LSB 0x36
+
+#define SX9310_REG_OFFSET_MSB 0x37
+#define SX9310_REG_OFFSET_LSB 0x38
+
+#define SX9310_REG_SAR_MSB 0x39
+#define SX9310_REG_SAR_LSB 0x3a
+
+#define SX9310_REG_I2CADDR 0x40
+#define SX9310_REG_PAUSE 0x41
+#define SX9310_REG_WHOAMI 0x42
+#define SX9310_WHOAMI_VALUE 0x01
+#define SX9311_WHOAMI_VALUE 0x02
+
+#define SX9310_REG_RESET 0x7f
+#define SX9310_SOFT_RESET 0xde
+
+#define SX9310_SCAN_PERIOD_MASK GENMASK(7, 4)
+#define SX9310_SCAN_PERIOD_SHIFT 4
+
+#define SX9310_COMPSTAT_MASK GENMASK(3, 0)
+
+/* 4 hardware channels, as defined in STAT0: COMB, CS2, CS1 and CS0. */
+#define SX9310_NUM_CHANNELS 4
+#define SX9310_CHAN_ENABLED_MASK GENMASK(3, 0)
+
+struct sx9310_data {
+ /* Serialize access to registers and channel configuration */
+ struct mutex mutex;
+ struct i2c_client *client;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ /*
+ * Last reading of the proximity status for each channel.
+ * We only send an event to user space when this changes.
+ */
+ bool prox_stat[SX9310_NUM_CHANNELS];
+ bool trigger_enabled;
+ __be16 buffer[SX9310_NUM_CHANNELS +
+ 4]; /* 64-bit data + 64-bit timestamp */
+ /* Remember enabled channels and sample rate during suspend. */
+ unsigned int suspend_ctrl0;
+ struct completion completion;
+ unsigned int chan_read, chan_event;
+ int channel_users[SX9310_NUM_CHANNELS];
+ int whoami;
+};
+
+static const struct iio_event_spec sx9310_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define SX9310_NAMED_CHANNEL(idx, name) \
+ { \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .indexed = 1, \
+ .channel = idx, \
+ .extend_name = name, \
+ .address = SX9310_REG_DIFF_MSB, \
+ .event_spec = sx9310_events, \
+ .num_event_specs = ARRAY_SIZE(sx9310_events), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+#define SX9310_CHANNEL(idx) SX9310_NAMED_CHANNEL(idx, NULL)
+
+static const struct iio_chan_spec sx9310_channels[] = {
+ SX9310_CHANNEL(0), /* CS0 */
+ SX9310_CHANNEL(1), /* CS1 */
+ SX9310_CHANNEL(2), /* CS2 */
+ SX9310_NAMED_CHANNEL(3, "comb"), /* COMB */
+
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+/*
+ * Each entry contains the integer part (val) and the fractional part, in micro
+ * seconds. It conforms to the IIO output IIO_VAL_INT_PLUS_MICRO.
+ */
+static const struct {
+ int val;
+ int val2;
+} sx9310_samp_freq_table[] = {
+ { 500, 0 }, /* 0000: Min (no idle time) */
+ { 66, 666666 }, /* 0001: 15 ms */
+ { 33, 333333 }, /* 0010: 30 ms (Typ.) */
+ { 22, 222222 }, /* 0011: 45 ms */
+ { 16, 666666 }, /* 0100: 60 ms */
+ { 11, 111111 }, /* 0101: 90 ms */
+ { 8, 333333 }, /* 0110: 120 ms */
+ { 5, 0 }, /* 0111: 200 ms */
+ { 2, 500000 }, /* 1000: 400 ms */
+ { 1, 666666 }, /* 1001: 600 ms */
+ { 1, 250000 }, /* 1010: 800 ms */
+ { 1, 0 }, /* 1011: 1 s */
+ { 0, 500000 }, /* 1100: 2 s */
+ { 0, 333333 }, /* 1101: 3 s */
+ { 0, 250000 }, /* 1110: 4 s */
+ { 0, 200000 }, /* 1111: 5 s */
+};
+static const unsigned int sx9310_scan_period_table[] = {
+ 2, 15, 30, 45, 60, 90, 120, 200,
+ 400, 600, 800, 1000, 2000, 3000, 4000, 5000,
+};
+
+static ssize_t sx9310_show_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sx9310_samp_freq_table); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%d ",
+ sx9310_samp_freq_table[i].val,
+ sx9310_samp_freq_table[i].val2);
+ buf[len - 1] = '\n';
+ return len;
+}
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sx9310_show_samp_freq_avail);
+
+static const struct regmap_range sx9310_writable_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_MSK, SX9310_REG_IRQ_FUNC),
+ regmap_reg_range(SX9310_REG_PROX_CTRL0, SX9310_REG_PROX_CTRL19),
+ regmap_reg_range(SX9310_REG_SAR_CTRL0, SX9310_REG_SAR_CTRL2),
+ regmap_reg_range(SX9310_REG_SENSOR_SEL, SX9310_REG_SENSOR_SEL),
+ regmap_reg_range(SX9310_REG_OFFSET_MSB, SX9310_REG_OFFSET_LSB),
+ regmap_reg_range(SX9310_REG_PAUSE, SX9310_REG_PAUSE),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_writeable_regs = {
+ .yes_ranges = sx9310_writable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_writable_reg_ranges),
+};
+
+static const struct regmap_range sx9310_readable_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_SRC, SX9310_REG_IRQ_FUNC),
+ regmap_reg_range(SX9310_REG_PROX_CTRL0, SX9310_REG_PROX_CTRL19),
+ regmap_reg_range(SX9310_REG_SAR_CTRL0, SX9310_REG_SAR_CTRL2),
+ regmap_reg_range(SX9310_REG_SENSOR_SEL, SX9310_REG_SAR_LSB),
+ regmap_reg_range(SX9310_REG_I2CADDR, SX9310_REG_WHOAMI),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_readable_regs = {
+ .yes_ranges = sx9310_readable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_readable_reg_ranges),
+};
+
+static const struct regmap_range sx9310_volatile_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_SRC, SX9310_REG_STAT1),
+ regmap_reg_range(SX9310_REG_USE_MSB, SX9310_REG_DIFF_LSB),
+ regmap_reg_range(SX9310_REG_SAR_MSB, SX9310_REG_SAR_LSB),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_volatile_regs = {
+ .yes_ranges = sx9310_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_volatile_reg_ranges),
+};
+
+static const struct regmap_config sx9310_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = SX9310_REG_RESET,
+ .cache_type = REGCACHE_RBTREE,
+
+ .wr_table = &sx9310_writeable_regs,
+ .rd_table = &sx9310_readable_regs,
+ .volatile_table = &sx9310_volatile_regs,
+};
+
+static int sx9310_update_chan_en(struct sx9310_data *data,
+ unsigned int chan_read,
+ unsigned int chan_event)
+{
+ int ret;
+
+ if ((data->chan_read | data->chan_event) != (chan_read | chan_event)) {
+ ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL0,
+ SX9310_CHAN_ENABLED_MASK,
+ chan_read | chan_event);
+ if (ret)
+ return ret;
+ }
+ data->chan_read = chan_read;
+ data->chan_event = chan_event;
+ return 0;
+}
+
+static int sx9310_get_read_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read | BIT(channel),
+ data->chan_event);
+}
+
+static int sx9310_put_read_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read & ~BIT(channel),
+ data->chan_event);
+}
+
+static int sx9310_get_event_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read,
+ data->chan_event | BIT(channel));
+}
+
+static int sx9310_put_event_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read,
+ data->chan_event & ~BIT(channel));
+}
+
+static int sx9310_enable_irq(struct sx9310_data *data, unsigned int irq)
+{
+ return regmap_update_bits(data->regmap, SX9310_REG_IRQ_MSK, irq, irq);
+}
+
+static int sx9310_disable_irq(struct sx9310_data *data, unsigned int irq)
+{
+ return regmap_update_bits(data->regmap, SX9310_REG_IRQ_MSK, irq, 0);
+}
+
+static int sx9310_read_prox_data(struct sx9310_data *data,
+ const struct iio_chan_spec *chan, __be16 *val)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, SX9310_REG_SENSOR_SEL, chan->channel);
+ if (ret < 0)
+ return ret;
+
+ return regmap_bulk_read(data->regmap, chan->address, val, 2);
+}
+
+/*
+ * If we have no interrupt support, we have to wait for a scan period
+ * after enabling a channel to get a result.
+ */
+static int sx9310_wait_for_sample(struct sx9310_data *data)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &val);
+ if (ret < 0)
+ return ret;
+
+ val = (val & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+
+ msleep(sx9310_scan_period_table[val]);
+
+ return 0;
+}
+
+static int sx9310_read_proximity(struct sx9310_data *data,
+ const struct iio_chan_spec *chan, int *val)
+{
+ int ret = 0;
+ __be16 rawval;
+
+ mutex_lock(&data->mutex);
+
+ ret = sx9310_get_read_channel(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ ret = sx9310_enable_irq(data, SX9310_CONVDONE_IRQ);
+ if (ret < 0)
+ goto out_put_channel;
+
+ mutex_unlock(&data->mutex);
+
+ if (data->client->irq > 0) {
+ ret = wait_for_completion_interruptible(&data->completion);
+ reinit_completion(&data->completion);
+ } else {
+ ret = sx9310_wait_for_sample(data);
+ }
+
+ mutex_lock(&data->mutex);
+
+ if (ret < 0)
+ goto out_disable_irq;
+
+ ret = sx9310_read_prox_data(data, chan, &rawval);
+ if (ret < 0)
+ goto out_disable_irq;
+
+ *val = sign_extend32(be16_to_cpu(rawval),
+ (chan->address == SX9310_REG_DIFF_MSB ? 11 : 15));
+
+ ret = sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
+ if (ret < 0)
+ goto out_put_channel;
+
+ ret = sx9310_put_read_channel(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ mutex_unlock(&data->mutex);
+
+ return IIO_VAL_INT;
+
+out_disable_irq:
+ sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
+out_put_channel:
+ sx9310_put_read_channel(data, chan->channel);
+out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9310_read_samp_freq(struct sx9310_data *data, int *val, int *val2)
+{
+ unsigned int regval;
+ int ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &regval);
+
+ if (ret < 0)
+ return ret;
+
+ regval = (regval & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+ *val = sx9310_samp_freq_table[regval].val;
+ *val2 = sx9310_samp_freq_table[regval].val2;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int sx9310_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val,
+ int *val2, long mask)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = sx9310_read_proximity(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return sx9310_read_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sx9310_set_samp_freq(struct sx9310_data *data, int val, int val2)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(sx9310_samp_freq_table); i++)
+ if (val == sx9310_samp_freq_table[i].val &&
+ val2 == sx9310_samp_freq_table[i].val2)
+ break;
+
+ if (i == ARRAY_SIZE(sx9310_samp_freq_table))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL0,
+ SX9310_SCAN_PERIOD_MASK,
+ i << SX9310_SCAN_PERIOD_SHIFT);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9310_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2,
+ long mask)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ return sx9310_set_samp_freq(data, val, val2);
+}
+
+static irqreturn_t sx9310_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ if (data->trigger_enabled)
+ iio_trigger_poll(data->trig);
+
+ /*
+ * Even if no event is enabled, we need to wake the thread to
+ * clear the interrupt state by reading SX9310_REG_IRQ_SRC. It
+ * is not possible to do that here because regmap_read takes a
+ * mutex.
+ */
+ return IRQ_WAKE_THREAD;
+}
+
+static void sx9310_push_events(struct iio_dev *indio_dev)
+{
+ int ret;
+ unsigned int val, chan;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns(indio_dev);
+
+ /* Read proximity state on all channels */
+ ret = regmap_read(data->regmap, SX9310_REG_STAT0, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ return;
+ }
+
+ for (chan = 0; chan < SX9310_NUM_CHANNELS; chan++) {
+ int dir;
+ u64 ev;
+ bool new_prox = val & BIT(chan);
+
+ if (!(data->chan_event & BIT(chan)))
+ continue;
+ if (new_prox == data->prox_stat[chan])
+ /* No change on this channel. */
+ continue;
+
+ dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
+ ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
+ IIO_EV_TYPE_THRESH, dir);
+
+ iio_push_event(indio_dev, ev, timestamp);
+ data->prox_stat[chan] = new_prox;
+ }
+}
+
+static irqreturn_t sx9310_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int val;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ goto out;
+ }
+
+ if (val & SX9310_EVENT_IRQ)
+ sx9310_push_events(indio_dev);
+
+ if (val & SX9310_CONVDONE_IRQ)
+ complete(&data->completion);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int sx9310_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ return !!(data->chan_event & BIT(chan->channel));
+}
+
+static int sx9310_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* If the state hasn't changed, there's nothing to do. */
+ if (!!(data->chan_event & BIT(chan->channel)) == state)
+ return 0;
+
+ mutex_lock(&data->mutex);
+ if (state) {
+ ret = sx9310_get_event_channel(data, chan->channel);
+ if (ret < 0)
+ goto out_unlock;
+ if (!(data->chan_event & ~BIT(chan->channel))) {
+ ret = sx9310_enable_irq(data, SX9310_EVENT_IRQ);
+ if (ret < 0)
+ sx9310_put_event_channel(data, chan->channel);
+ }
+ } else {
+ ret = sx9310_put_event_channel(data, chan->channel);
+ if (ret < 0)
+ goto out_unlock;
+ if (!data->chan_event) {
+ ret = sx9310_disable_irq(data, SX9310_EVENT_IRQ);
+ if (ret < 0)
+ sx9310_get_event_channel(data, chan->channel);
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static struct attribute *sx9310_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sx9310_attribute_group = {
+ .attrs = sx9310_attributes,
+};
+
+static const struct iio_info sx9310_info = {
+ .attrs = &sx9310_attribute_group,
+ .read_raw = sx9310_read_raw,
+ .write_raw = sx9310_write_raw,
+ .read_event_config = sx9310_read_event_config,
+ .write_event_config = sx9310_write_event_config,
+};
+
+static int sx9310_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret = 0;
+
+ mutex_lock(&data->mutex);
+
+ if (state)
+ ret = sx9310_enable_irq(data, SX9310_CONVDONE_IRQ);
+ else if (!data->chan_read)
+ ret = sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
+ if (ret < 0)
+ goto out;
+
+ data->trigger_enabled = state;
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops sx9310_trigger_ops = {
+ .set_trigger_state = sx9310_set_trigger_state,
+};
+
+static irqreturn_t sx9310_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ __be16 val;
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = sx9310_read_prox_data(data, &indio_dev->channels[bit],
+ &val);
+ if (ret < 0)
+ goto out;
+
+ data->buffer[i++] = val;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ pf->timestamp);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int sx9310_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ unsigned int channels = 0;
+ int bit, ret;
+
+ mutex_lock(&data->mutex);
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength)
+ channels |= BIT(indio_dev->channels[bit].channel);
+
+ ret = sx9310_update_chan_en(data, channels, data->chan_event);
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int sx9310_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = sx9310_update_chan_en(data, 0, data->chan_event);
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops sx9310_buffer_setup_ops = {
+ .preenable = sx9310_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = sx9310_buffer_postdisable,
+};
+
+struct sx9310_reg_default {
+ u8 reg;
+ u8 def;
+};
+
+#define SX_INIT(_reg, _def) \
+ { \
+ .reg = SX9310_REG_##_reg, \
+ .def = _def, \
+ }
+
+static const struct sx9310_reg_default sx9310_default_regs[] = {
+ SX_INIT(IRQ_MSK, 0x00),
+ SX_INIT(IRQ_FUNC, 0x00),
+ /*
+ * The lower 4 bits should not be set as it enable sensors measurements.
+ * Turning the detection on before the configuration values are set to
+ * good values can cause the device to return erroneous readings.
+ */
+ SX_INIT(PROX_CTRL0, SX9310_REG_PROX_CTRL0_PROXSTAT2),
+ SX_INIT(PROX_CTRL1, 0x00),
+ SX_INIT(PROX_CTRL2, SX9310_REG_PROX_CTRL2_COMBMODE_ALL |
+ SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC),
+ SX_INIT(PROX_CTRL3, SX9310_REG_PROX_CTRL3_GAIN0_X8 |
+ SX9310_REG_PROX_CTRL3_GAIN12_X4),
+ SX_INIT(PROX_CTRL4, SX9310_REG_PROX_CTRL4_RESOLUTION_FINEST),
+ SX_INIT(PROX_CTRL5, SX9310_REG_PROX_CTRL5_RANGE_SMALL |
+ SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 |
+ SX9310_REG_PROX_CTRL5_RAWFILT_1P25),
+ SX_INIT(PROX_CTRL6, SX9310_REG_PROX_CTRL6_COMP_COMMON),
+ SX_INIT(PROX_CTRL7, SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 |
+ SX9310_REG_PROX_CTRL7_AVGPOSFILT_512),
+ SX_INIT(PROX_CTRL8, SX9310_REG_PROX_CTRL8_9_PTHRESH_96 |
+ SX9310_REG_PROX_CTRL8_9_BODYTHRESH_1500),
+ SX_INIT(PROX_CTRL9, SX9310_REG_PROX_CTRL8_9_PTHRESH12_28 |
+ SX9310_REG_PROX_CTRL8_9_BODYTHRESH_900),
+ SX_INIT(PROX_CTRL10, SX9310_REG_PROX_CTRL10_HYST_6PCT |
+ SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_8 |
+ SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_8),
+ SX_INIT(PROX_CTRL11, 0x00),
+ SX_INIT(PROX_CTRL12, 0x00),
+ SX_INIT(PROX_CTRL13, 0x00),
+ SX_INIT(PROX_CTRL14, 0x00),
+ SX_INIT(PROX_CTRL15, 0x00),
+ SX_INIT(PROX_CTRL16, 0x00),
+ SX_INIT(PROX_CTRL17, 0x00),
+ SX_INIT(PROX_CTRL18, 0x00),
+ SX_INIT(PROX_CTRL19, 0x00),
+ SX_INIT(SAR_CTRL0, SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES |
+ SX9310_REG_SAR_CTRL0_SARHYST_8),
+ SX_INIT(SAR_CTRL1, SX9310_REG_SAR_CTRL1_SLOPE(10781250)),
+ SX_INIT(SAR_CTRL2, SX9310_REG_SAR_CTRL2_SAROFFSET_DEFAULT),
+};
+
+/* Activate all channels and perform an initial compensation. */
+static int sx9310_init_compensation(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int i, ret;
+ unsigned int val;
+ unsigned int ctrl0;
+
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &ctrl0);
+ if (ret < 0)
+ return ret;
+
+ /* run the compensation phase on all channels */
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
+ ctrl0 | SX9310_REG_PROX_CTRL0_EN_MASK);
+ if (ret < 0)
+ return ret;
+
+ for (i = 100; i >= 0; i--) {
+ msleep(20);
+ ret = regmap_read(data->regmap, SX9310_REG_STAT1, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & SX9310_COMPSTAT_MASK))
+ break;
+ }
+
+ if (i < 0) {
+ dev_err(&data->client->dev,
+ "initial compensation timed out: 0x%02x", val);
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
+ return ret;
+}
+
+static int sx9310_init_device(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ const struct sx9310_reg_default *initval;
+ int ret;
+ unsigned int i, val;
+
+ ret = regmap_write(data->regmap, SX9310_REG_RESET, SX9310_SOFT_RESET);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000); /* power-up time is ~1ms. */
+
+ /* Clear reset interrupt state by reading SX9310_REG_IRQ_SRC. */
+ ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Program some sane defaults. */
+ for (i = 0; i < ARRAY_SIZE(sx9310_default_regs); i++) {
+ initval = &sx9310_default_regs[i];
+ ret = regmap_write(data->regmap, initval->reg, initval->def);
+ if (ret < 0)
+ return ret;
+ }
+
+ return sx9310_init_compensation(indio_dev);
+}
+
+static int sx9310_set_indio_dev_name(struct device *dev,
+ struct iio_dev *indio_dev,
+ const struct i2c_device_id *id, int whoami)
+{
+ const struct acpi_device_id *acpi_id;
+
+ /* id will be NULL when enumerated via ACPI */
+ if (id) {
+ if (id->driver_data != whoami)
+ dev_err(dev, "WHOAMI does not match i2c_device_id: %s",
+ id->name);
+ } else if (ACPI_HANDLE(dev)) {
+ acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!acpi_id)
+ return -ENODEV;
+ if (acpi_id->driver_data != whoami)
+ dev_err(dev, "WHOAMI does not match acpi_device_id: %s",
+ acpi_id->id);
+ } else
+ return -ENODEV;
+
+ switch (whoami) {
+ case SX9310_WHOAMI_VALUE:
+ indio_dev->name = "sx9310";
+ break;
+ case SX9311_WHOAMI_VALUE:
+ indio_dev->name = "sx9311";
+ break;
+ default:
+ dev_err(dev, "unexpected WHOAMI response: %u", whoami);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int sx9310_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct sx9310_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->mutex);
+ init_completion(&data->completion);
+
+ data->regmap = devm_regmap_init_i2c(client, &sx9310_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ ret = regmap_read(data->regmap, SX9310_REG_WHOAMI, &data->whoami);
+ if (ret < 0) {
+ dev_err(&client->dev, "error in reading WHOAMI register: %d",
+ ret);
+ return ret;
+ }
+
+ ret = sx9310_set_indio_dev_name(&client->dev, indio_dev, id,
+ data->whoami);
+ if (ret < 0)
+ return ret;
+
+ ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(&client->dev));
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = sx9310_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sx9310_channels);
+ indio_dev->info = &sx9310_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = sx9310_init_device(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ sx9310_irq_handler,
+ sx9310_irq_thread_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "sx9310_event", indio_dev);
+ if (ret < 0)
+ return ret;
+
+ data->trig =
+ devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->dev.parent = &client->dev;
+ data->trig->ops = &sx9310_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = devm_iio_trigger_register(&client->dev, data->trig);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ iio_pollfunc_store_time,
+ sx9310_trigger_handler,
+ &sx9310_buffer_setup_ops);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int __maybe_unused sx9310_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx9310_data *data = iio_priv(indio_dev);
+ u8 ctrl0;
+ int ret;
+
+ disable_irq_nosync(data->client->irq);
+
+ mutex_lock(&data->mutex);
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0,
+ &data->suspend_ctrl0);
+
+ if (ret)
+ goto out;
+
+ ctrl0 = data->suspend_ctrl0 & ~SX9310_REG_PROX_CTRL0_EN_MASK;
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 0);
+
+out:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int __maybe_unused sx9310_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 1);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
+ data->suspend_ctrl0);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ enable_irq(data->client->irq);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sx9310_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sx9310_suspend, sx9310_resume)
+};
+
+static const struct acpi_device_id sx9310_acpi_match[] = {
+ { "STH9310", SX9310_WHOAMI_VALUE },
+ { "STH9311", SX9311_WHOAMI_VALUE },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, sx9310_acpi_match);
+
+static const struct of_device_id sx9310_of_match[] = {
+ { .compatible = "semtech,sx9310" },
+ { .compatible = "semtech,sx9311" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sx9310_of_match);
+
+static const struct i2c_device_id sx9310_id[] = {
+ { "sx9310", SX9310_WHOAMI_VALUE },
+ { "sx9311", SX9311_WHOAMI_VALUE },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, sx9310_id);
+
+static struct i2c_driver sx9310_driver = {
+ .driver = {
+ .name = "sx9310",
+ .acpi_match_table = ACPI_PTR(sx9310_acpi_match),
+ .of_match_table = of_match_ptr(sx9310_of_match),
+ .pm = &sx9310_pm_ops,
+ },
+ .probe = sx9310_probe,
+ .id_table = sx9310_id,
+};
+module_i2c_driver(sx9310_driver);
+
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_AUTHOR("Daniel Campello <campello@chromium.org>");
+MODULE_DESCRIPTION("Driver for Semtech SX9310/SX9311 proximity sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/proximity/vcnl3020.c b/drivers/iio/proximity/vcnl3020.c
new file mode 100644
index 000000000000..9ff1a164c2e6
--- /dev/null
+++ b/drivers/iio/proximity/vcnl3020.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support for Vishay VCNL3020 proximity sensor on i2c bus.
+ * Based on Vishay VCNL4000 driver code.
+ *
+ * TODO: interrupts.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define VCNL3020_PROD_ID 0x21
+
+#define VCNL_COMMAND 0x80 /* Command register */
+#define VCNL_PROD_REV 0x81 /* Product ID and Revision ID */
+#define VCNL_PROXIMITY_RATE 0x82 /* Rate of Proximity Measurement */
+#define VCNL_LED_CURRENT 0x83 /* IR LED current for proximity mode */
+#define VCNL_PS_RESULT_HI 0x87 /* Proximity result register, MSB */
+#define VCNL_PS_RESULT_LO 0x88 /* Proximity result register, LSB */
+#define VCNL_PS_ICR 0x89 /* Interrupt Control Register */
+#define VCNL_PS_LO_THR_HI 0x8a /* High byte of low threshold value */
+#define VCNL_PS_LO_THR_LO 0x8b /* Low byte of low threshold value */
+#define VCNL_PS_HI_THR_HI 0x8c /* High byte of high threshold value */
+#define VCNL_PS_HI_THR_LO 0x8d /* Low byte of high threshold value */
+#define VCNL_ISR 0x8e /* Interrupt Status Register */
+#define VCNL_PS_MOD_ADJ 0x8f /* Proximity Modulator Timing Adjustment */
+
+/* Bit masks for COMMAND register */
+#define VCNL_PS_RDY BIT(5) /* proximity data ready? */
+#define VCNL_PS_OD BIT(3) /* start on-demand proximity
+ * measurement
+ */
+
+#define VCNL_ON_DEMAND_TIMEOUT_US 100000
+#define VCNL_POLL_US 20000
+
+/**
+ * struct vcnl3020_data - vcnl3020 specific data.
+ * @regmap: device register map.
+ * @dev: vcnl3020 device.
+ * @rev: revision id.
+ * @lock: lock for protecting access to device hardware registers.
+ */
+struct vcnl3020_data {
+ struct regmap *regmap;
+ struct device *dev;
+ u8 rev;
+ struct mutex lock;
+};
+
+/**
+ * struct vcnl3020_property - vcnl3020 property.
+ * @name: property name.
+ * @reg: i2c register offset.
+ * @conversion_func: conversion function.
+ */
+struct vcnl3020_property {
+ const char *name;
+ u32 reg;
+ u32 (*conversion_func)(u32 *val);
+};
+
+static u32 microamp_to_reg(u32 *val)
+{
+ /*
+ * An example of conversion from uA to reg val:
+ * 200000 uA == 200 mA == 20
+ */
+ return *val /= 10000;
+};
+
+static struct vcnl3020_property vcnl3020_led_current_property = {
+ .name = "vishay,led-current-microamp",
+ .reg = VCNL_LED_CURRENT,
+ .conversion_func = microamp_to_reg,
+};
+
+static int vcnl3020_get_and_apply_property(struct vcnl3020_data *data,
+ struct vcnl3020_property prop)
+{
+ int rc;
+ u32 val;
+
+ rc = device_property_read_u32(data->dev, prop.name, &val);
+ if (rc)
+ return 0;
+
+ if (prop.conversion_func)
+ prop.conversion_func(&val);
+
+ rc = regmap_write(data->regmap, prop.reg, val);
+ if (rc) {
+ dev_err(data->dev, "Error (%d) setting property (%s)\n",
+ rc, prop.name);
+ }
+
+ return rc;
+}
+
+static int vcnl3020_init(struct vcnl3020_data *data)
+{
+ int rc;
+ unsigned int reg;
+
+ rc = regmap_read(data->regmap, VCNL_PROD_REV, &reg);
+ if (rc) {
+ dev_err(data->dev,
+ "Error (%d) reading product revision\n", rc);
+ return rc;
+ }
+
+ if (reg != VCNL3020_PROD_ID) {
+ dev_err(data->dev,
+ "Product id (%x) did not match vcnl3020 (%x)\n", reg,
+ VCNL3020_PROD_ID);
+ return -ENODEV;
+ }
+
+ data->rev = reg;
+ mutex_init(&data->lock);
+
+ return vcnl3020_get_and_apply_property(data,
+ vcnl3020_led_current_property);
+};
+
+static int vcnl3020_measure_proximity(struct vcnl3020_data *data, int *val)
+{
+ int rc;
+ unsigned int reg;
+ __be16 res;
+
+ mutex_lock(&data->lock);
+
+ rc = regmap_write(data->regmap, VCNL_COMMAND, VCNL_PS_OD);
+ if (rc)
+ goto err_unlock;
+
+ /* wait for data to become ready */
+ rc = regmap_read_poll_timeout(data->regmap, VCNL_COMMAND, reg,
+ reg & VCNL_PS_RDY, VCNL_POLL_US,
+ VCNL_ON_DEMAND_TIMEOUT_US);
+ if (rc) {
+ dev_err(data->dev,
+ "Error (%d) reading vcnl3020 command register\n", rc);
+ goto err_unlock;
+ }
+
+ /* high & low result bytes read */
+ rc = regmap_bulk_read(data->regmap, VCNL_PS_RESULT_HI, &res,
+ sizeof(res));
+ if (rc)
+ goto err_unlock;
+
+ *val = be16_to_cpu(res);
+
+err_unlock:
+ mutex_unlock(&data->lock);
+
+ return rc;
+}
+
+static const struct iio_chan_spec vcnl3020_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+};
+
+static int vcnl3020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int rc;
+ struct vcnl3020_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ rc = vcnl3020_measure_proximity(data, val);
+ if (rc)
+ return rc;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info vcnl3020_info = {
+ .read_raw = vcnl3020_read_raw,
+};
+
+static const struct regmap_config vcnl3020_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = VCNL_PS_MOD_ADJ,
+};
+
+static int vcnl3020_probe(struct i2c_client *client)
+{
+ struct vcnl3020_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int rc;
+
+ regmap = devm_regmap_init_i2c(client, &vcnl3020_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "regmap_init failed\n");
+ return PTR_ERR(regmap);
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->regmap = regmap;
+ data->dev = &client->dev;
+
+ rc = vcnl3020_init(data);
+ if (rc)
+ return rc;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &vcnl3020_info;
+ indio_dev->channels = vcnl3020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vcnl3020_channels);
+ indio_dev->name = "vcnl3020";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct of_device_id vcnl3020_of_match[] = {
+ {
+ .compatible = "vishay,vcnl3020",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vcnl3020_of_match);
+
+static struct i2c_driver vcnl3020_driver = {
+ .driver = {
+ .name = "vcnl3020",
+ .of_match_table = vcnl3020_of_match,
+ },
+ .probe_new = vcnl3020_probe,
+};
+module_i2c_driver(vcnl3020_driver);
+
+MODULE_AUTHOR("Ivan Mikhaylov <i.mikhaylov@yadro.com>");
+MODULE_DESCRIPTION("Vishay VCNL3020 proximity sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index eda55b9c1e9b..8d1f434f109d 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -7,8 +7,6 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/trigger_consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -230,12 +228,8 @@ static int hid_temperature_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev,
- &iio_pollfunc_store_time, NULL, NULL);
- if (ret)
- return ret;
-
atomic_set(&temp_st->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&temp_st->common_attributes);
if (ret)
@@ -258,7 +252,7 @@ static int hid_temperature_probe(struct platform_device *pdev)
error_remove_callback:
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE);
error_remove_trigger:
- hid_sensor_remove_trigger(&temp_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &temp_st->common_attributes);
return ret;
}
@@ -270,7 +264,7 @@ static int hid_temperature_remove(struct platform_device *pdev)
struct temperature_state *temp_st = iio_priv(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE);
- hid_sensor_remove_trigger(&temp_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &temp_st->common_attributes);
return 0;
}
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index d39c0d6b77f1..8976e8d59826 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -390,8 +390,8 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
* For custom steinhart, the full u32 is taken. For all the others
* the MSB is discarded.
*/
- const u8 n_size = (is_steinhart == true) ? 4 : 3;
- const u8 e_size = (is_steinhart == true) ? sizeof(u32) : sizeof(u64);
+ const u8 n_size = is_steinhart ? 4 : 3;
+ const u8 e_size = is_steinhart ? sizeof(u32) : sizeof(u64);
n_entries = of_property_count_elems_of_size(np, propname, e_size);
/* n_entries must be an even number */
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index b4cb21ab2e85..b4c49a5d3685 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -14,6 +14,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/util_macros.h>
+#include <asm/unaligned.h>
#include <dt-bindings/iio/temperature/thermocouple.h>
/*
* The MSB of the register value determines whether the following byte will
@@ -168,7 +169,7 @@ static int max31856_thermocouple_read(struct max31856_data *data,
if (ret)
return ret;
/* Skip last 5 dead bits of LTCBL */
- *val = (reg_val[0] << 16 | reg_val[1] << 8 | reg_val[2]) >> 5;
+ *val = get_unaligned_be24(&reg_val[0]) >> 5;
/* Check 7th bit of LTCBH reg. value for sign*/
if (reg_val[0] & 0x80)
*val -= 0x80000;
@@ -185,7 +186,7 @@ static int max31856_thermocouple_read(struct max31856_data *data,
/* Get Cold Junction Temp. offset register value */
offset_cjto = reg_val[0];
/* Get CJTH and CJTL value and skip last 2 dead bits of CJTL */
- *val = (reg_val[1] << 8 | reg_val[2]) >> 2;
+ *val = get_unaligned_be16(&reg_val[1]) >> 2;
/* As per datasheet add offset into CJTH and CJTL */
*val += offset_cjto;
/* Check 7th bit of CJTH reg. value for sign */
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index a5e670726717..f59bf8d58586 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -4,7 +4,7 @@
*
* Copyright (C) Intuitive Aerial AB
* Written by Marten Svanfeldt, marten@intuitiveaerial.com
- * Copyright (C) 2012, Analog Device Inc.
+ * Copyright (C) 2012, Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
* Copyright (C) 2015, Intel Corporation
*/
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index ade86388434f..477418b37786 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -107,6 +107,7 @@ source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
+source "drivers/infiniband/ulp/rtrs/Kconfig"
source "drivers/infiniband/ulp/opa_vnic/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index d1b14887960e..24cb71a16a28 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -8,11 +8,11 @@ obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
- device.o fmr_pool.o cache.o netlink.o \
+ device.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
nldev.o restrack.o counters.o ib_core_uverbs.o \
- trace.o
+ trace.o lag.o
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
@@ -36,6 +36,9 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
uverbs_std_types_mr.o uverbs_std_types_counters.o \
uverbs_uapi.o uverbs_std_types_device.o \
- uverbs_std_types_async_fd.o
+ uverbs_std_types_async_fd.o \
+ uverbs_std_types_srq.o \
+ uverbs_std_types_wq.o \
+ uverbs_std_types_qp.o
ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 1753a9801b70..3a98439bba83 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -371,6 +371,8 @@ static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
(const void *)&dst_in6->sin6_addr;
sa_family_t family = dst_in->sa_family;
+ might_sleep();
+
/* If we have a gateway in IB mode then it must be an IB network */
if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB)
return ib_nl_fetch_ha(dev_addr, daddr, seq, family);
@@ -727,6 +729,8 @@ int roce_resolve_route_from_path(struct sa_path_rec *rec,
struct rdma_dev_addr dev_addr = {};
int ret;
+ might_sleep();
+
if (rec->roce.route_resolved)
return 0;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 717b798cddad..a670209bbce6 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1553,8 +1553,11 @@ int ib_cache_setup_one(struct ib_device *device)
if (err)
return err;
- rdma_for_each_port (device, p)
- ib_cache_update(device, p, true);
+ rdma_for_each_port (device, p) {
+ err = ib_cache_update(device, p, true);
+ if (err)
+ return err;
+ }
return 0;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 17f14e0eafe4..9ce787e37e22 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -66,6 +66,8 @@ static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
[IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
[IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
+ [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
+ "vendor option is not supported",
};
const char *__attribute_const__ ibcm_reject_msg(int reason)
@@ -81,8 +83,11 @@ const char *__attribute_const__ ibcm_reject_msg(int reason)
EXPORT_SYMBOL(ibcm_reject_msg);
struct cm_id_private;
-static void cm_add_one(struct ib_device *device);
+struct cm_work;
+static int cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data);
+static void cm_process_work(struct cm_id_private *cm_id_priv,
+ struct cm_work *work);
static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param);
static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
@@ -287,6 +292,8 @@ struct cm_id_private {
struct list_head work_list;
atomic_t work_count;
+
+ struct rdma_ucm_ece ece;
};
static void cm_work_handler(struct work_struct *work);
@@ -474,24 +481,19 @@ static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
-static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
- struct cm_av *av,
- struct cm_port *port)
+static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
+ struct cm_av *av, struct cm_port *port)
{
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(&cm.lock, flags);
-
if (&cm_id_priv->av == av)
list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
else if (&cm_id_priv->alt_av == av)
list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
else
- ret = -EINVAL;
-
+ WARN_ON(true);
spin_unlock_irqrestore(&cm.lock, flags);
- return ret;
}
static struct cm_port *
@@ -572,12 +574,7 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
return ret;
av->timeout = path->packet_life_time + 1;
-
- ret = add_cm_id_to_port_list(cm_id_priv, av, port);
- if (ret) {
- rdma_destroy_ah_attr(&new_ah_attr);
- return ret;
- }
+ add_cm_id_to_port_list(cm_id_priv, av, port);
rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
return 0;
}
@@ -587,11 +584,6 @@ static u32 cm_local_id(__be32 local_id)
return (__force u32) (local_id ^ cm.random_id_operand);
}
-static void cm_free_id(__be32 local_id)
-{
- xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
-}
-
static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
@@ -698,9 +690,10 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
if ((cm_id_priv->id.service_mask & service_id) ==
cm_id_priv->id.service_id &&
- (cm_id_priv->id.device == device))
+ (cm_id_priv->id.device == device)) {
+ refcount_inc(&cm_id_priv->refcount);
return cm_id_priv;
-
+ }
if (device < cm_id_priv->id.device)
node = node->rb_left;
else if (device > cm_id_priv->id.device)
@@ -745,12 +738,14 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
return NULL;
}
-static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
- __be32 remote_id)
+static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
+ __be32 remote_id)
{
struct rb_node *node = cm.remote_id_table.rb_node;
struct cm_timewait_info *timewait_info;
+ struct cm_id_private *res = NULL;
+ spin_lock_irq(&cm.lock);
while (node) {
timewait_info = rb_entry(node, struct cm_timewait_info,
remote_id_node);
@@ -762,10 +757,14 @@ static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
node = node->rb_left;
else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
node = node->rb_right;
- else
- return timewait_info;
+ else {
+ res = cm_acquire_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+ break;
+ }
}
- return NULL;
+ spin_unlock_irq(&cm.lock);
+ return res;
}
static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
@@ -917,6 +916,35 @@ static void cm_free_work(struct cm_work *work)
kfree(work);
}
+static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
+ struct cm_work *work)
+{
+ bool immediate;
+
+ /*
+ * To deliver the event to the user callback we have the drop the
+ * spinlock, however, we need to ensure that the user callback is single
+ * threaded and receives events in the temporal order. If there are
+ * already events being processed then thread new events onto a list,
+ * the thread currently processing will pick them up.
+ */
+ immediate = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!immediate) {
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ /*
+ * This routine always consumes incoming reference. Once queued
+ * to the work_list then a reference is held by the thread
+ * currently running cm_process_work() and this reference is not
+ * needed.
+ */
+ cm_deref_id(cm_id_priv);
+ }
+ spin_unlock_irq(&cm_id_priv->lock);
+
+ if (immediate)
+ cm_process_work(cm_id_priv, work);
+}
+
static inline int cm_convert_to_ms(int iba_time)
{
/* approximate conversion to ms from 4.096us x 2^iba_time */
@@ -942,8 +970,10 @@ static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
return min(31, ack_timeout);
}
-static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
+static void cm_remove_remote(struct cm_id_private *cm_id_priv)
{
+ struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
+
if (timewait_info->inserted_remote_id) {
rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
timewait_info->inserted_remote_id = 0;
@@ -982,7 +1012,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
return;
spin_lock_irqsave(&cm.lock, flags);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
spin_unlock_irqrestore(&cm.lock, flags);
@@ -1001,6 +1031,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
msecs_to_jiffies(wait_time));
spin_unlock_irqrestore(&cm.lock, flags);
+ /*
+ * The timewait_info is converted into a work and gets freed during
+ * cm_free_work() in cm_timewait_handler().
+ */
+ BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
cm_id_priv->timewait_info = NULL;
}
@@ -1013,7 +1048,7 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
cm_id_priv->id.state = IB_CM_IDLE;
if (cm_id_priv->timewait_info) {
spin_lock_irqsave(&cm.lock, flags);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
spin_unlock_irqrestore(&cm.lock, flags);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
@@ -1076,7 +1111,9 @@ retest:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* Fall through */
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, NULL, 0);
+ goto retest;
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
@@ -1101,7 +1138,7 @@ retest:
case IB_CM_TIMEWAIT:
/*
* The cm_acquire_id in cm_timewait_handler will stop working
- * once we do cm_free_id() below, so just move to idle here for
+ * once we do xa_erase below, so just move to idle here for
* consistency.
*/
cm_id->state = IB_CM_IDLE;
@@ -1114,7 +1151,7 @@ retest:
spin_lock(&cm.lock);
/* Required for cleanup paths related cm_req_handler() */
if (cm_id_priv->timewait_info) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
}
@@ -1131,7 +1168,7 @@ retest:
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_id(cm_id->local_id);
+ xa_erase_irq(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
@@ -1287,6 +1324,13 @@ static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
hdr->tid = tid;
}
+static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
+ __be64 tid, u32 attr_mod)
+{
+ cm_format_mad_hdr(hdr, attr_id, tid);
+ hdr->attr_mod = cpu_to_be32(attr_mod);
+}
+
static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
@@ -1299,8 +1343,8 @@ static void cm_format_req(struct cm_req_msg *req_msg,
pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
pri_path->opa.slid);
- cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
- cm_form_tid(cm_id_priv));
+ cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
+ cm_form_tid(cm_id_priv), param->ece.attr_mod);
IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
be32_to_cpu(cm_id_priv->id.local_id));
@@ -1423,6 +1467,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alt_path->packet_life_time));
}
+ IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
if (param->private_data && param->private_data_len)
IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
@@ -1779,6 +1824,9 @@ static void cm_format_req_event(struct cm_work *work,
param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
+ param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
+ param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
+
work->cm_event.private_data =
IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
}
@@ -1927,7 +1975,6 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
struct cm_req_msg *req_msg;
- struct ib_cm_id *cm_id;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1948,7 +1995,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
/* Check for stale connections. */
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
@@ -1957,8 +2004,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
NULL, 0);
if (cur_cm_id_priv) {
- cm_id = &cur_cm_id_priv->id;
- ib_send_cm_dreq(cm_id, NULL, 0);
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
cm_deref_id(cur_cm_id_priv);
}
return NULL;
@@ -1969,14 +2015,13 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
cm_id_priv->id.device,
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
if (!listen_cm_id_priv) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
NULL, 0);
return NULL;
}
- refcount_inc(&listen_cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
return listen_cm_id_priv;
}
@@ -2153,9 +2198,7 @@ static int cm_req_handler(struct cm_work *work)
/* Refcount belongs to the event, pairs with cm_process_work() */
refcount_inc(&cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->work_count);
- spin_unlock_irq(&cm_id_priv->lock);
- cm_process_work(cm_id_priv, work);
+ cm_queue_work_unlock(cm_id_priv, work);
/*
* Since this ID was just created and was not made visible to other MAD
* handlers until the cm_finalize_id() above we know that the
@@ -2176,7 +2219,8 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_rep_param *param)
{
- cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
+ cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
+ param->ece.attr_mod);
IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
be32_to_cpu(cm_id_priv->id.local_id));
IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
@@ -2203,6 +2247,10 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
}
+ IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
+ IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
+ IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
+
if (param->private_data && param->private_data_len)
IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
param->private_data_len);
@@ -2350,6 +2398,11 @@ static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
+ param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
+ param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
+
work->cm_event.private_data =
IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
}
@@ -2404,7 +2457,6 @@ static int cm_rep_handler(struct cm_work *work)
struct cm_rep_msg *rep_msg;
int ret;
struct cm_id_private *cur_cm_id_priv;
- struct ib_cm_id *cm_id;
struct cm_timewait_info *timewait_info;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2454,9 +2506,7 @@ static int cm_rep_handler(struct cm_work *work)
/* Check for a stale connection. */
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
- rb_erase(&cm_id_priv->timewait_info->remote_id_node,
- &cm.remote_id_table);
- cm_id_priv->timewait_info->inserted_remote_id = 0;
+ cm_remove_remote(cm_id_priv);
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
@@ -2472,8 +2522,7 @@ static int cm_rep_handler(struct cm_work *work)
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
if (cur_cm_id_priv) {
- cm_id = &cur_cm_id_priv->id;
- ib_send_cm_dreq(cm_id, NULL, 0);
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
cm_deref_id(cur_cm_id_priv);
}
@@ -2501,15 +2550,7 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv->alt_av.timeout - 1);
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
error:
@@ -2520,7 +2561,6 @@ error:
static int cm_establish_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
- int ret;
/* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
@@ -2534,15 +2574,7 @@ static int cm_establish_handler(struct cm_work *work)
}
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2553,7 +2585,6 @@ static int cm_rtu_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rtu_msg *rtu_msg;
- int ret;
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(
@@ -2576,15 +2607,7 @@ static int cm_rtu_handler(struct cm_work *work)
cm_id_priv->id.state = IB_CM_ESTABLISHED;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2777,7 +2800,6 @@ static int cm_dreq_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
struct cm_dreq_msg *dreq_msg;
struct ib_mad_send_buf *msg = NULL;
- int ret;
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(
@@ -2842,15 +2864,7 @@ static int cm_dreq_handler(struct cm_work *work)
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
cm_id_priv->tid = dreq_msg->hdr.tid;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
@@ -2862,7 +2876,6 @@ static int cm_drep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
- int ret;
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(
@@ -2883,15 +2896,7 @@ static int cm_drep_handler(struct cm_work *work)
cm_enter_timewait(cm_id_priv);
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2987,24 +2992,15 @@ static void cm_format_rej_event(struct cm_work *work)
static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
{
- struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
__be32 remote_id;
remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
- spin_lock_irq(&cm.lock);
- timewait_info = cm_find_remote_id(
+ cm_id_priv = cm_find_remote_id(
*((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
remote_id);
- if (!timewait_info) {
- spin_unlock_irq(&cm.lock);
- return NULL;
- }
- cm_id_priv =
- cm_acquire_id(timewait_info->work.local_id, remote_id);
- spin_unlock_irq(&cm.lock);
} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
CM_MSG_RESPONSE_REQ)
cm_id_priv = cm_acquire_id(
@@ -3022,7 +3018,6 @@ static int cm_rej_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rej_msg *rej_msg;
- int ret;
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_rejected_id(rej_msg);
@@ -3068,19 +3063,10 @@ static int cm_rej_handler(struct cm_work *work)
__func__, be32_to_cpu(cm_id_priv->id.local_id),
cm_id_priv->id.state);
spin_unlock_irq(&cm_id_priv->lock);
- ret = -EINVAL;
goto out;
}
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -3190,7 +3176,7 @@ static int cm_mra_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_mra_msg *mra_msg;
- int timeout, ret;
+ int timeout;
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_mraed_id(mra_msg);
@@ -3250,15 +3236,7 @@ static int cm_mra_handler(struct cm_work *work)
cm_id_priv->msg->context[1] = (void *) (unsigned long)
cm_id_priv->id.state;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
spin_unlock_irq(&cm_id_priv->lock);
@@ -3393,15 +3371,7 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
@@ -3413,7 +3383,6 @@ static int cm_apr_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_apr_msg *apr_msg;
- int ret;
/* Currently Alternate path messages are not supported for
* RoCE link layer.
@@ -3448,16 +3417,7 @@ static int cm_apr_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_id_priv->msg = NULL;
-
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -3468,7 +3428,6 @@ static int cm_timewait_handler(struct cm_work *work)
{
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
- int ret;
timewait_info = container_of(work, struct cm_timewait_info, work);
spin_lock_irq(&cm.lock);
@@ -3487,15 +3446,7 @@ static int cm_timewait_handler(struct cm_work *work)
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -3642,7 +3593,6 @@ static int cm_sidr_req_handler(struct cm_work *work)
.status = IB_SIDR_UNSUPPORTED });
goto out; /* No match. */
}
- refcount_inc(&listen_cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -3674,8 +3624,8 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param)
{
- cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
- cm_id_priv->tid);
+ cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
+ cm_id_priv->tid, param->ece.attr_mod);
IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
@@ -3683,6 +3633,10 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
be64_to_cpu(cm_id_priv->id.service_id));
IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
+ param->ece.vendor_id & 0xFF);
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
+ (param->ece.vendor_id >> 8) & 0xFF);
if (param->info && param->info_length)
IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
@@ -4384,7 +4338,7 @@ static void cm_remove_port_fs(struct cm_port *port)
}
-static void cm_add_one(struct ib_device *ib_device)
+static int cm_add_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
@@ -4403,7 +4357,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
GFP_KERNEL);
if (!cm_dev)
- return;
+ return -ENOMEM;
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
@@ -4415,8 +4369,10 @@ static void cm_add_one(struct ib_device *ib_device)
continue;
port = kzalloc(sizeof *port, GFP_KERNEL);
- if (!port)
+ if (!port) {
+ ret = -ENOMEM;
goto error1;
+ }
cm_dev->port[i-1] = port;
port->cm_dev = cm_dev;
@@ -4437,8 +4393,10 @@ static void cm_add_one(struct ib_device *ib_device)
cm_recv_handler,
port,
0);
- if (IS_ERR(port->mad_agent))
+ if (IS_ERR(port->mad_agent)) {
+ ret = PTR_ERR(port->mad_agent);
goto error2;
+ }
ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
@@ -4447,15 +4405,17 @@ static void cm_add_one(struct ib_device *ib_device)
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(ib_device, &cm_client, cm_dev);
write_lock_irqsave(&cm.device_lock, flags);
list_add_tail(&cm_dev->list, &cm.device_list);
write_unlock_irqrestore(&cm.device_lock, flags);
- return;
+ return 0;
error3:
ib_unregister_mad_agent(port->mad_agent);
@@ -4477,6 +4437,7 @@ error1:
}
free:
kfree(cm_dev);
+ return ret;
}
static void cm_remove_one(struct ib_device *ib_device, void *client_data)
@@ -4491,9 +4452,6 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
unsigned long flags;
int i;
- if (!cm_dev)
- return;
-
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 26e6f7df247b..3d7cc9f0f3d4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -91,7 +91,13 @@ const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
}
EXPORT_SYMBOL(rdma_reject_msg);
-bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
+/**
+ * rdma_is_consumer_reject - return true if the consumer rejected the connect
+ * request.
+ * @id: Communication identifier that received the REJECT event.
+ * @reason: Value returned in the REJECT event status field.
+ */
+static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
{
if (rdma_ib_or_roce(id->device, id->port_num))
return reason == IB_CM_REJ_CONSUMER_DEFINED;
@@ -102,7 +108,6 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
WARN_ON_ONCE(1);
return false;
}
-EXPORT_SYMBOL(rdma_is_consumer_reject);
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
struct rdma_cm_event *ev, u8 *data_len)
@@ -148,7 +153,7 @@ struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
}
EXPORT_SYMBOL(rdma_res_to_id);
-static void cma_add_one(struct ib_device *device);
+static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
static struct ib_client cma_client = {
@@ -479,6 +484,7 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
rdma_restrack_kadd(&id_priv->res);
else
rdma_restrack_uadd(&id_priv->res);
+ trace_cm_id_attach(id_priv, cma_dev->device);
}
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
@@ -883,7 +889,6 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
id_priv->id.route.addr.dev_addr.net = get_net(net);
id_priv->seq_num &= 0x00ffffff;
- trace_cm_id_create(id_priv);
return &id_priv->id;
}
EXPORT_SYMBOL(__rdma_create_id);
@@ -1906,6 +1911,9 @@ static void cma_set_rep_event_data(struct rdma_cm_event *event,
event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
event->param.conn.srq = rep_data->srq;
event->param.conn.qp_num = rep_data->remote_qpn;
+
+ event->ece.vendor_id = rep_data->ece.vendor_id;
+ event->ece.attr_mod = rep_data->ece.attr_mod;
}
static int cma_cm_event_handler(struct rdma_id_private *id_priv,
@@ -2124,6 +2132,9 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
event->param.conn.srq = req_data->srq;
event->param.conn.qp_num = req_data->remote_qpn;
+
+ event->ece.vendor_id = req_data->ece.vendor_id;
+ event->ece.attr_mod = req_data->ece.attr_mod;
}
static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
@@ -2904,6 +2915,24 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
+static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
+{
+ struct sockaddr_in6 *addr6;
+ u16 dport, sport;
+ u32 hash, fl;
+
+ addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
+ fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
+ if ((cma_family(id_priv) != AF_INET6) || !fl) {
+ dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
+ sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
+ hash = (u32)sport * 31 + dport;
+ fl = hash & IB_GRH_FLOWLABEL_MASK;
+ }
+
+ return cpu_to_be32(fl);
+}
+
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2970,6 +2999,11 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
+ if (rdma_protocol_roce_udp_encap(id_priv->id.device,
+ id_priv->id.port_num))
+ route->path_rec->flow_label =
+ cma_get_roce_udp_flow_label(id_priv);
+
cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
@@ -3919,6 +3953,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req.max_cm_retries = CMA_MAX_CM_RETRIES;
req.srq = id_priv->srq ? 1 : 0;
+ req.ece.vendor_id = id_priv->ece.vendor_id;
+ req.ece.attr_mod = id_priv->ece.attr_mod;
trace_cm_send_req(id_priv);
ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
@@ -4008,6 +4044,27 @@ err:
}
EXPORT_SYMBOL(rdma_connect);
+/**
+ * rdma_connect_ece - Initiate an active connection request with ECE data.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ * @ece: ECE parameters
+ *
+ * See rdma_connect() explanation.
+ */
+int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ id_priv->ece.vendor_id = ece->vendor_id;
+ id_priv->ece.attr_mod = ece->attr_mod;
+
+ return rdma_connect(id, conn_param);
+}
+EXPORT_SYMBOL(rdma_connect_ece);
+
static int cma_accept_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
@@ -4033,6 +4090,8 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.flow_control = conn_param->flow_control;
rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
rep.srq = id_priv->srq ? 1 : 0;
+ rep.ece.vendor_id = id_priv->ece.vendor_id;
+ rep.ece.attr_mod = id_priv->ece.attr_mod;
trace_cm_send_rep(id_priv);
ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
@@ -4080,7 +4139,11 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
return ret;
rep.qp_num = id_priv->qp_num;
rep.qkey = id_priv->qkey;
+
+ rep.ece.vendor_id = id_priv->ece.vendor_id;
+ rep.ece.attr_mod = id_priv->ece.attr_mod;
}
+
rep.private_data = private_data;
rep.private_data_len = private_data_len;
@@ -4133,11 +4196,24 @@ int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
return 0;
reject:
cma_modify_qp_err(id_priv);
- rdma_reject(id, NULL, 0);
+ rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
EXPORT_SYMBOL(__rdma_accept);
+int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ const char *caller, struct rdma_ucm_ece *ece)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ id_priv->ece.vendor_id = ece->vendor_id;
+ id_priv->ece.attr_mod = ece->attr_mod;
+
+ return __rdma_accept(id, conn_param, caller);
+}
+EXPORT_SYMBOL(__rdma_accept_ece);
+
int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
{
struct rdma_id_private *id_priv;
@@ -4160,7 +4236,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
EXPORT_SYMBOL(rdma_notify);
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
- u8 private_data_len)
+ u8 private_data_len, u8 reason)
{
struct rdma_id_private *id_priv;
int ret;
@@ -4175,9 +4251,8 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
private_data, private_data_len);
} else {
trace_cm_send_rej(id_priv);
- ret = ib_send_cm_rej(id_priv->cm_id.ib,
- IB_CM_REJ_CONSUMER_DEFINED, NULL,
- 0, private_data, private_data_len);
+ ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
+ private_data, private_data_len);
}
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_reject(id_priv->cm_id.iw,
@@ -4633,29 +4708,34 @@ static struct notifier_block cma_nb = {
.notifier_call = cma_netdev_callback
};
-static void cma_add_one(struct ib_device *device)
+static int cma_add_one(struct ib_device *device)
{
struct cma_device *cma_dev;
struct rdma_id_private *id_priv;
unsigned int i;
unsigned long supported_gids = 0;
+ int ret;
cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
if (!cma_dev)
- return;
+ return -ENOMEM;
cma_dev->device = device;
cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
sizeof(*cma_dev->default_gid_type),
GFP_KERNEL);
- if (!cma_dev->default_gid_type)
+ if (!cma_dev->default_gid_type) {
+ ret = -ENOMEM;
goto free_cma_dev;
+ }
cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
sizeof(*cma_dev->default_roce_tos),
GFP_KERNEL);
- if (!cma_dev->default_roce_tos)
+ if (!cma_dev->default_roce_tos) {
+ ret = -ENOMEM;
goto free_gid_type;
+ }
rdma_for_each_port (device, i) {
supported_gids = roce_gid_type_mask_support(device, i);
@@ -4681,15 +4761,14 @@ static void cma_add_one(struct ib_device *device)
mutex_unlock(&lock);
trace_cm_add_one(device);
- return;
+ return 0;
free_gid_type:
kfree(cma_dev->default_gid_type);
free_cma_dev:
kfree(cma_dev);
-
- return;
+ return ret;
}
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
@@ -4751,9 +4830,6 @@ static void cma_remove_one(struct ib_device *device, void *client_data)
trace_cm_remove_one(device);
- if (!cma_dev)
- return;
-
mutex_lock(&lock);
list_del(&cma_dev->list);
mutex_unlock(&lock);
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index c672a4978bfd..3c1e2ca564fe 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -322,8 +322,21 @@ fail:
return ERR_PTR(err);
}
+static void drop_cma_dev(struct config_group *cgroup, struct config_item *item)
+{
+ struct config_group *group =
+ container_of(item, struct config_group, cg_item);
+ struct cma_dev_group *cma_dev_group =
+ container_of(group, struct cma_dev_group, device_group);
+
+ configfs_remove_default_groups(&cma_dev_group->ports_group);
+ configfs_remove_default_groups(&cma_dev_group->device_group);
+ config_item_put(item);
+}
+
static struct configfs_group_operations cma_subsys_group_ops = {
.make_group = make_cma_dev,
+ .drop_item = drop_cma_dev,
};
static const struct config_item_type cma_subsys_type = {
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index 5edcf44a9307..caece96ebcf5 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -95,6 +95,7 @@ struct rdma_id_private {
* Internal to RDMA/core, don't use in the drivers
*/
struct rdma_restrack_entry res;
+ struct rdma_ucm_ece ece;
};
#if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
index 81e36bf13159..e6e20c36c538 100644
--- a/drivers/infiniband/core/cma_trace.h
+++ b/drivers/infiniband/core/cma_trace.h
@@ -103,23 +103,33 @@ DEFINE_CMA_FSM_EVENT(sent_drep);
DEFINE_CMA_FSM_EVENT(sent_dreq);
DEFINE_CMA_FSM_EVENT(id_destroy);
-TRACE_EVENT(cm_id_create,
+TRACE_EVENT(cm_id_attach,
TP_PROTO(
- const struct rdma_id_private *id_priv
+ const struct rdma_id_private *id_priv,
+ const struct ib_device *device
),
- TP_ARGS(id_priv),
+ TP_ARGS(id_priv, device),
TP_STRUCT__entry(
__field(u32, cm_id)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ __string(devname, device->name)
),
TP_fast_assign(
__entry->cm_id = id_priv->res.id;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ __assign_str(devname, device->name);
),
- TP_printk("cm.id=%u",
- __entry->cm_id
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc device=%s",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr,
+ __get_str(devname)
)
);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index cf42acca4a3a..a1e6a67b2c4a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -414,4 +414,7 @@ void rdma_umap_priv_init(struct rdma_umap_priv *priv,
struct vm_area_struct *vma,
struct rdma_user_mmap_entry *entry);
+void ib_cq_pool_init(struct ib_device *dev);
+void ib_cq_pool_destroy(struct ib_device *dev);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 4f25b2400694..655795bfa0ee 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -7,7 +7,11 @@
#include <linux/slab.h>
#include <rdma/ib_verbs.h>
+#include "core_priv.h"
+
#include <trace/events/rdma_core.h>
+/* Max size for shared CQ, may require tuning */
+#define IB_MAX_SHARED_CQ_SZ 4096U
/* # of WCs to poll for with a single call to ib_poll_cq */
#define IB_POLL_BATCH 16
@@ -218,6 +222,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
cq->cq_context = private;
cq->poll_ctx = poll_ctx;
atomic_set(&cq->usecnt, 0);
+ cq->comp_vector = comp_vector;
cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
if (!cq->wc)
@@ -309,6 +314,8 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
return;
+ if (WARN_ON_ONCE(cq->cqe_used))
+ return;
switch (cq->poll_ctx) {
case IB_POLL_DIRECT:
@@ -334,3 +341,169 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
kfree(cq);
}
EXPORT_SYMBOL(ib_free_cq_user);
+
+void ib_cq_pool_init(struct ib_device *dev)
+{
+ unsigned int i;
+
+ spin_lock_init(&dev->cq_pools_lock);
+ for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++)
+ INIT_LIST_HEAD(&dev->cq_pools[i]);
+}
+
+void ib_cq_pool_destroy(struct ib_device *dev)
+{
+ struct ib_cq *cq, *n;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) {
+ list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
+ pool_entry) {
+ WARN_ON(cq->cqe_used);
+ cq->shared = false;
+ ib_free_cq(cq);
+ }
+ }
+}
+
+static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
+ enum ib_poll_context poll_ctx)
+{
+ LIST_HEAD(tmp_list);
+ unsigned int nr_cqs, i;
+ struct ib_cq *cq;
+ int ret;
+
+ if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
+ WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate at least as many CQEs as requested, and otherwise
+ * a reasonable batch size so that we can share CQs between
+ * multiple users instead of allocating a larger number of CQs.
+ */
+ nr_cqes = min_t(unsigned int, dev->attrs.max_cqe,
+ max(nr_cqes, IB_MAX_SHARED_CQ_SZ));
+ nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ for (i = 0; i < nr_cqs; i++) {
+ cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto out_free_cqs;
+ }
+ cq->shared = true;
+ list_add_tail(&cq->pool_entry, &tmp_list);
+ }
+
+ spin_lock_irq(&dev->cq_pools_lock);
+ list_splice(&tmp_list, &dev->cq_pools[poll_ctx]);
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ return 0;
+
+out_free_cqs:
+ list_for_each_entry(cq, &tmp_list, pool_entry) {
+ cq->shared = false;
+ ib_free_cq(cq);
+ }
+ return ret;
+}
+
+/**
+ * ib_cq_pool_get() - Find the least used completion queue that matches
+ * a given cpu hint (or least used for wild card affinity) and fits
+ * nr_cqe.
+ * @dev: rdma device
+ * @nr_cqe: number of needed cqe entries
+ * @comp_vector_hint: completion vector hint (-1) for the driver to assign
+ * a comp vector based on internal counter
+ * @poll_ctx: cq polling context
+ *
+ * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
+ * claim entries in it for us. In case there is no available cq, allocate
+ * a new cq with the requirements and add it to the device pool.
+ * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value
+ * for @poll_ctx.
+ */
+struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
+ int comp_vector_hint,
+ enum ib_poll_context poll_ctx)
+{
+ static unsigned int default_comp_vector;
+ unsigned int vector, num_comp_vectors;
+ struct ib_cq *cq, *found = NULL;
+ int ret;
+
+ if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
+ WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
+ return ERR_PTR(-EINVAL);
+ }
+
+ num_comp_vectors =
+ min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ /* Project the affinty to the device completion vector range */
+ if (comp_vector_hint < 0) {
+ comp_vector_hint =
+ (READ_ONCE(default_comp_vector) + 1) % num_comp_vectors;
+ WRITE_ONCE(default_comp_vector, comp_vector_hint);
+ }
+ vector = comp_vector_hint % num_comp_vectors;
+
+ /*
+ * Find the least used CQ with correct affinity and
+ * enough free CQ entries
+ */
+ while (!found) {
+ spin_lock_irq(&dev->cq_pools_lock);
+ list_for_each_entry(cq, &dev->cq_pools[poll_ctx],
+ pool_entry) {
+ /*
+ * Check to see if we have found a CQ with the
+ * correct completion vector
+ */
+ if (vector != cq->comp_vector)
+ continue;
+ if (cq->cqe_used + nr_cqe > cq->cqe)
+ continue;
+ found = cq;
+ break;
+ }
+
+ if (found) {
+ found->cqe_used += nr_cqe;
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ return found;
+ }
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ /*
+ * Didn't find a match or ran out of CQs in the device
+ * pool, allocate a new array of CQs.
+ */
+ ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return found;
+}
+EXPORT_SYMBOL(ib_cq_pool_get);
+
+/**
+ * ib_cq_pool_put - Return a CQ taken from a shared pool.
+ * @cq: The CQ to return.
+ * @nr_cqe: The max number of cqes that the user had requested.
+ */
+void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
+{
+ if (WARN_ON_ONCE(nr_cqe > cq->cqe_used))
+ return;
+
+ spin_lock_irq(&cq->device->cq_pools_lock);
+ cq->cqe_used -= nr_cqe;
+ spin_unlock_irq(&cq->device->cq_pools_lock);
+}
+EXPORT_SYMBOL(ib_cq_pool_put);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d0b3d35ad3e4..905a2beaf885 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -677,8 +677,20 @@ static int add_client_context(struct ib_device *device,
if (ret)
goto out;
downgrade_write(&device->client_data_rwsem);
- if (client->add)
- client->add(device);
+ if (client->add) {
+ if (client->add(device)) {
+ /*
+ * If a client fails to add then the error code is
+ * ignored, but we won't call any more ops on this
+ * client.
+ */
+ xa_erase(&device->client_data, client->client_id);
+ up_read(&device->client_data_rwsem);
+ ib_device_put(device);
+ ib_client_put(client);
+ return 0;
+ }
+ }
/* Readers shall not see a client until add has been completed */
xa_set_mark(&device->client_data, client->client_id,
@@ -1381,6 +1393,7 @@ int ib_register_device(struct ib_device *device, const char *name)
goto dev_cleanup;
}
+ ib_cq_pool_init(device);
ret = enable_device_and_get(device);
dev_set_uevent_suppress(&device->dev, false);
/* Mark for userspace that device is ready */
@@ -1435,6 +1448,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
goto out;
disable_device(ib_dev);
+ ib_cq_pool_destroy(ib_dev);
/* Expedite removing unregistered pointers from the hash table */
free_netdevs(ib_dev);
@@ -2557,7 +2571,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, add_gid);
SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm);
- SET_DEVICE_OP(dev_ops, alloc_fmr);
SET_DEVICE_OP(dev_ops, alloc_hw_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
@@ -2584,7 +2597,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_wq);
SET_DEVICE_OP(dev_ops, dealloc_dm);
SET_DEVICE_OP(dev_ops, dealloc_driver);
- SET_DEVICE_OP(dev_ops, dealloc_fmr);
SET_DEVICE_OP(dev_ops, dealloc_mw);
SET_DEVICE_OP(dev_ops, dealloc_pd);
SET_DEVICE_OP(dev_ops, dealloc_ucontext);
@@ -2628,7 +2640,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, iw_rem_ref);
SET_DEVICE_OP(dev_ops, map_mr_sg);
SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
- SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
SET_DEVICE_OP(dev_ops, mmap_free);
SET_DEVICE_OP(dev_ops, modify_ah);
@@ -2662,7 +2673,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, resize_cq);
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
- SET_DEVICE_OP(dev_ops, unmap_fmr);
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_cq);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
deleted file mode 100644
index e08aec427027..000000000000
--- a/drivers/infiniband/core/fmr_pool.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/jhash.h>
-#include <linux/kthread.h>
-
-#include <rdma/ib_fmr_pool.h>
-
-#include "core_priv.h"
-
-#define PFX "fmr_pool: "
-
-enum {
- IB_FMR_MAX_REMAPS = 32,
-
- IB_FMR_HASH_BITS = 8,
- IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
- IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
-};
-
-/*
- * If an FMR is not in use, then the list member will point to either
- * its pool's free_list (if the FMR can be mapped again; that is,
- * remap_count < pool->max_remaps) or its pool's dirty_list (if the
- * FMR needs to be unmapped before being remapped). In either of
- * these cases it is a bug if the ref_count is not 0. In other words,
- * if ref_count is > 0, then the list member must not be linked into
- * either free_list or dirty_list.
- *
- * The cache_node member is used to link the FMR into a cache bucket
- * (if caching is enabled). This is independent of the reference
- * count of the FMR. When a valid FMR is released, its ref_count is
- * decremented, and if ref_count reaches 0, the FMR is placed in
- * either free_list or dirty_list as appropriate. However, it is not
- * removed from the cache and may be "revived" if a call to
- * ib_fmr_register_physical() occurs before the FMR is remapped. In
- * this case we just increment the ref_count and remove the FMR from
- * free_list/dirty_list.
- *
- * Before we remap an FMR from free_list, we remove it from the cache
- * (to prevent another user from obtaining a stale FMR). When an FMR
- * is released, we add it to the tail of the free list, so that our
- * cache eviction policy is "least recently used."
- *
- * All manipulation of ref_count, list and cache_node is protected by
- * pool_lock to maintain consistency.
- */
-
-struct ib_fmr_pool {
- spinlock_t pool_lock;
-
- int pool_size;
- int max_pages;
- int max_remaps;
- int dirty_watermark;
- int dirty_len;
- struct list_head free_list;
- struct list_head dirty_list;
- struct hlist_head *cache_bucket;
-
- void (*flush_function)(struct ib_fmr_pool *pool,
- void * arg);
- void *flush_arg;
-
- struct kthread_worker *worker;
- struct kthread_work work;
-
- atomic_t req_ser;
- atomic_t flush_ser;
-
- wait_queue_head_t force_wait;
-};
-
-static inline u32 ib_fmr_hash(u64 first_page)
-{
- return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
- (IB_FMR_HASH_SIZE - 1);
-}
-
-/* Caller must hold pool_lock */
-static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
- u64 *page_list,
- int page_list_len,
- u64 io_virtual_address)
-{
- struct hlist_head *bucket;
- struct ib_pool_fmr *fmr;
-
- if (!pool->cache_bucket)
- return NULL;
-
- bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
-
- hlist_for_each_entry(fmr, bucket, cache_node)
- if (io_virtual_address == fmr->io_virtual_address &&
- page_list_len == fmr->page_list_len &&
- !memcmp(page_list, fmr->page_list,
- page_list_len * sizeof *page_list))
- return fmr;
-
- return NULL;
-}
-
-static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
-{
- int ret;
- struct ib_pool_fmr *fmr;
- LIST_HEAD(unmap_list);
- LIST_HEAD(fmr_list);
-
- spin_lock_irq(&pool->pool_lock);
-
- list_for_each_entry(fmr, &pool->dirty_list, list) {
- hlist_del_init(&fmr->cache_node);
- fmr->remap_count = 0;
- list_add_tail(&fmr->fmr->list, &fmr_list);
- }
-
- list_splice_init(&pool->dirty_list, &unmap_list);
- pool->dirty_len = 0;
-
- spin_unlock_irq(&pool->pool_lock);
-
- if (list_empty(&unmap_list)) {
- return;
- }
-
- ret = ib_unmap_fmr(&fmr_list);
- if (ret)
- pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
-
- spin_lock_irq(&pool->pool_lock);
- list_splice(&unmap_list, &pool->free_list);
- spin_unlock_irq(&pool->pool_lock);
-}
-
-static void ib_fmr_cleanup_func(struct kthread_work *work)
-{
- struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
-
- ib_fmr_batch_release(pool);
- atomic_inc(&pool->flush_ser);
- wake_up_interruptible(&pool->force_wait);
-
- if (pool->flush_function)
- pool->flush_function(pool, pool->flush_arg);
-
- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
- kthread_queue_work(pool->worker, &pool->work);
-}
-
-/**
- * ib_create_fmr_pool - Create an FMR pool
- * @pd:Protection domain for FMRs
- * @params:FMR pool parameters
- *
- * Create a pool of FMRs. Return value is pointer to new pool or
- * error code if creation failed.
- */
-struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
- struct ib_fmr_pool_param *params)
-{
- struct ib_device *device;
- struct ib_fmr_pool *pool;
- int i;
- int ret;
- int max_remaps;
-
- if (!params)
- return ERR_PTR(-EINVAL);
-
- device = pd->device;
- if (!device->ops.alloc_fmr || !device->ops.dealloc_fmr ||
- !device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
- dev_info(&device->dev, "Device does not support FMRs\n");
- return ERR_PTR(-ENOSYS);
- }
-
- if (!device->attrs.max_map_per_fmr)
- max_remaps = IB_FMR_MAX_REMAPS;
- else
- max_remaps = device->attrs.max_map_per_fmr;
-
- pool = kmalloc(sizeof *pool, GFP_KERNEL);
- if (!pool)
- return ERR_PTR(-ENOMEM);
-
- pool->cache_bucket = NULL;
- pool->flush_function = params->flush_function;
- pool->flush_arg = params->flush_arg;
-
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->dirty_list);
-
- if (params->cache) {
- pool->cache_bucket =
- kmalloc_array(IB_FMR_HASH_SIZE,
- sizeof(*pool->cache_bucket),
- GFP_KERNEL);
- if (!pool->cache_bucket) {
- ret = -ENOMEM;
- goto out_free_pool;
- }
-
- for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
- INIT_HLIST_HEAD(pool->cache_bucket + i);
- }
-
- pool->pool_size = 0;
- pool->max_pages = params->max_pages_per_fmr;
- pool->max_remaps = max_remaps;
- pool->dirty_watermark = params->dirty_watermark;
- pool->dirty_len = 0;
- spin_lock_init(&pool->pool_lock);
- atomic_set(&pool->req_ser, 0);
- atomic_set(&pool->flush_ser, 0);
- init_waitqueue_head(&pool->force_wait);
-
- pool->worker =
- kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
- if (IS_ERR(pool->worker)) {
- pr_warn(PFX "couldn't start cleanup kthread worker\n");
- ret = PTR_ERR(pool->worker);
- goto out_free_pool;
- }
- kthread_init_work(&pool->work, ib_fmr_cleanup_func);
-
- {
- struct ib_pool_fmr *fmr;
- struct ib_fmr_attr fmr_attr = {
- .max_pages = params->max_pages_per_fmr,
- .max_maps = pool->max_remaps,
- .page_shift = params->page_shift
- };
- int bytes_per_fmr = sizeof *fmr;
-
- if (pool->cache_bucket)
- bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
-
- for (i = 0; i < params->pool_size; ++i) {
- fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
- if (!fmr)
- goto out_fail;
-
- fmr->pool = pool;
- fmr->remap_count = 0;
- fmr->ref_count = 0;
- INIT_HLIST_NODE(&fmr->cache_node);
-
- fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
- if (IS_ERR(fmr->fmr)) {
- pr_warn(PFX "fmr_create failed for FMR %d\n",
- i);
- kfree(fmr);
- goto out_fail;
- }
-
- list_add_tail(&fmr->list, &pool->free_list);
- ++pool->pool_size;
- }
- }
-
- return pool;
-
- out_free_pool:
- kfree(pool->cache_bucket);
- kfree(pool);
-
- return ERR_PTR(ret);
-
- out_fail:
- ib_destroy_fmr_pool(pool);
-
- return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL(ib_create_fmr_pool);
-
-/**
- * ib_destroy_fmr_pool - Free FMR pool
- * @pool:FMR pool to free
- *
- * Destroy an FMR pool and free all associated resources.
- */
-void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
-{
- struct ib_pool_fmr *fmr;
- struct ib_pool_fmr *tmp;
- LIST_HEAD(fmr_list);
- int i;
-
- kthread_destroy_worker(pool->worker);
- ib_fmr_batch_release(pool);
-
- i = 0;
- list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
- if (fmr->remap_count) {
- INIT_LIST_HEAD(&fmr_list);
- list_add_tail(&fmr->fmr->list, &fmr_list);
- ib_unmap_fmr(&fmr_list);
- }
- ib_dealloc_fmr(fmr->fmr);
- list_del(&fmr->list);
- kfree(fmr);
- ++i;
- }
-
- if (i < pool->pool_size)
- pr_warn(PFX "pool still has %d regions registered\n",
- pool->pool_size - i);
-
- kfree(pool->cache_bucket);
- kfree(pool);
-}
-EXPORT_SYMBOL(ib_destroy_fmr_pool);
-
-/**
- * ib_flush_fmr_pool - Invalidate all unmapped FMRs
- * @pool:FMR pool to flush
- *
- * Ensure that all unmapped FMRs are fully invalidated.
- */
-int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
-{
- int serial;
- struct ib_pool_fmr *fmr, *next;
-
- /*
- * The free_list holds FMRs that may have been used
- * but have not been remapped enough times to be dirty.
- * Put them on the dirty list now so that the cleanup
- * thread will reap them too.
- */
- spin_lock_irq(&pool->pool_lock);
- list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
- if (fmr->remap_count > 0)
- list_move(&fmr->list, &pool->dirty_list);
- }
- spin_unlock_irq(&pool->pool_lock);
-
- serial = atomic_inc_return(&pool->req_ser);
- kthread_queue_work(pool->worker, &pool->work);
-
- if (wait_event_interruptible(pool->force_wait,
- atomic_read(&pool->flush_ser) - serial >= 0))
- return -EINTR;
-
- return 0;
-}
-EXPORT_SYMBOL(ib_flush_fmr_pool);
-
-/**
- * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
- * @pool_handle: FMR pool to allocate FMR from
- * @page_list: List of pages to map
- * @list_len: Number of pages in @page_list
- * @io_virtual_address: I/O virtual address for new FMR
- */
-struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
- u64 *page_list,
- int list_len,
- u64 io_virtual_address)
-{
- struct ib_fmr_pool *pool = pool_handle;
- struct ib_pool_fmr *fmr;
- unsigned long flags;
- int result;
-
- if (list_len < 1 || list_len > pool->max_pages)
- return ERR_PTR(-EINVAL);
-
- spin_lock_irqsave(&pool->pool_lock, flags);
- fmr = ib_fmr_cache_lookup(pool,
- page_list,
- list_len,
- io_virtual_address);
- if (fmr) {
- /* found in cache */
- ++fmr->ref_count;
- if (fmr->ref_count == 1) {
- list_del(&fmr->list);
- }
-
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- return fmr;
- }
-
- if (list_empty(&pool->free_list)) {
- spin_unlock_irqrestore(&pool->pool_lock, flags);
- return ERR_PTR(-EAGAIN);
- }
-
- fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
- list_del(&fmr->list);
- hlist_del_init(&fmr->cache_node);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
- io_virtual_address);
-
- if (result) {
- spin_lock_irqsave(&pool->pool_lock, flags);
- list_add(&fmr->list, &pool->free_list);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- pr_warn(PFX "fmr_map returns %d\n", result);
-
- return ERR_PTR(result);
- }
-
- ++fmr->remap_count;
- fmr->ref_count = 1;
-
- if (pool->cache_bucket) {
- fmr->io_virtual_address = io_virtual_address;
- fmr->page_list_len = list_len;
- memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
-
- spin_lock_irqsave(&pool->pool_lock, flags);
- hlist_add_head(&fmr->cache_node,
- pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
- spin_unlock_irqrestore(&pool->pool_lock, flags);
- }
-
- return fmr;
-}
-EXPORT_SYMBOL(ib_fmr_pool_map_phys);
-
-/**
- * ib_fmr_pool_unmap - Unmap FMR
- * @fmr:FMR to unmap
- *
- * Unmap an FMR. The FMR mapping may remain valid until the FMR is
- * reused (or until ib_flush_fmr_pool() is called).
- */
-void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
-{
- struct ib_fmr_pool *pool;
- unsigned long flags;
-
- pool = fmr->pool;
-
- spin_lock_irqsave(&pool->pool_lock, flags);
-
- --fmr->ref_count;
- if (!fmr->ref_count) {
- if (fmr->remap_count < pool->max_remaps) {
- list_add_tail(&fmr->list, &pool->free_list);
- } else {
- list_add_tail(&fmr->list, &pool->dirty_list);
- if (++pool->dirty_len >= pool->dirty_watermark) {
- atomic_inc(&pool->req_ser);
- kthread_queue_work(pool->worker, &pool->work);
- }
- }
- }
-
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-}
-EXPORT_SYMBOL(ib_fmr_pool_unmap);
diff --git a/drivers/infiniband/core/lag.c b/drivers/infiniband/core/lag.c
new file mode 100644
index 000000000000..7063e41eaf26
--- /dev/null
+++ b/drivers/infiniband/core/lag.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
+#include <rdma/lag.h>
+
+static struct sk_buff *rdma_build_skb(struct ib_device *device,
+ struct net_device *netdev,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ u8 smac[ETH_ALEN];
+ bool is_ipv4;
+ int hdr_len;
+
+ is_ipv4 = ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw);
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) + LL_RESERVED_SPACE(netdev);
+ hdr_len += is_ipv4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr);
+
+ skb = alloc_skb(hdr_len, flags);
+ if (!skb)
+ return NULL;
+
+ skb->dev = netdev;
+ skb_reserve(skb, hdr_len);
+ skb_push(skb, sizeof(struct udphdr));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+ uh->source =
+ htons(rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
+ uh->dest = htons(ROCE_V2_UDP_DPORT);
+ uh->len = htons(sizeof(struct udphdr));
+
+ if (is_ipv4) {
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ iph->frag_off = 0;
+ iph->version = 4;
+ iph->protocol = IPPROTO_UDP;
+ iph->ihl = 0x5;
+ iph->tot_len = htons(sizeof(struct udphdr) + sizeof(struct
+ iphdr));
+ memcpy(&iph->saddr, ah_attr->grh.sgid_attr->gid.raw + 12,
+ sizeof(struct in_addr));
+ memcpy(&iph->daddr, ah_attr->grh.dgid.raw + 12,
+ sizeof(struct in_addr));
+ } else {
+ skb_push(skb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6h->version = 6;
+ ip6h->nexthdr = IPPROTO_UDP;
+ memcpy(&ip6h->flow_lbl, &ah_attr->grh.flow_label,
+ sizeof(*ip6h->flow_lbl));
+ memcpy(&ip6h->saddr, ah_attr->grh.sgid_attr->gid.raw,
+ sizeof(struct in6_addr));
+ memcpy(&ip6h->daddr, ah_attr->grh.dgid.raw,
+ sizeof(struct in6_addr));
+ }
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = eth->h_proto = htons(is_ipv4 ? ETH_P_IP : ETH_P_IPV6);
+ rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, NULL, smac);
+ memcpy(eth->h_source, smac, ETH_ALEN);
+ memcpy(eth->h_dest, ah_attr->roce.dmac, ETH_ALEN);
+
+ return skb;
+}
+
+static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
+ struct net_device *master,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct net_device *slave;
+ struct sk_buff *skb;
+
+ skb = rdma_build_skb(device, master, ah_attr, flags);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ rcu_read_lock();
+ slave = netdev_get_xmit_slave(master, skb,
+ !!(device->lag_flags &
+ RDMA_LAG_FLAGS_HASH_ALL_SLAVES));
+ if (slave)
+ dev_hold(slave);
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return slave;
+}
+
+void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave)
+{
+ if (xmit_slave)
+ dev_put(xmit_slave);
+}
+
+struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct net_device *slave = NULL;
+ struct net_device *master;
+
+ if (!(ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE &&
+ ah_attr->grh.sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
+ ah_attr->grh.flow_label))
+ return NULL;
+
+ rcu_read_lock();
+ master = rdma_read_gid_attr_ndev_rcu(ah_attr->grh.sgid_attr);
+ if (IS_ERR(master)) {
+ rcu_read_unlock();
+ return master;
+ }
+ dev_hold(master);
+ rcu_read_unlock();
+
+ if (!netif_is_bond_master(master))
+ goto put;
+
+ slave = rdma_get_xmit_slave_udp(device, master, ah_attr, flags);
+put:
+ dev_put(master);
+ return slave;
+}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index c54db13fa9b0..186e0d652e8b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -85,7 +85,6 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
-/* Client ID 0 is used for snoop-only clients */
static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
static u32 ib_mad_client_next;
static struct list_head ib_mad_port_list;
@@ -483,141 +482,12 @@ error1:
}
EXPORT_SYMBOL(ib_register_mad_agent);
-static inline int is_snooping_sends(int mad_snoop_flags)
-{
- return (mad_snoop_flags &
- (/*IB_MAD_SNOOP_POSTED_SENDS |
- IB_MAD_SNOOP_RMPP_SENDS |*/
- IB_MAD_SNOOP_SEND_COMPLETIONS /*|
- IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
-}
-
-static inline int is_snooping_recvs(int mad_snoop_flags)
-{
- return (mad_snoop_flags &
- (IB_MAD_SNOOP_RECVS /*|
- IB_MAD_SNOOP_RMPP_RECVS*/));
-}
-
-static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
- struct ib_mad_snoop_private *mad_snoop_priv)
-{
- struct ib_mad_snoop_private **new_snoop_table;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- /* Check for empty slot in array. */
- for (i = 0; i < qp_info->snoop_table_size; i++)
- if (!qp_info->snoop_table[i])
- break;
-
- if (i == qp_info->snoop_table_size) {
- /* Grow table. */
- new_snoop_table = krealloc(qp_info->snoop_table,
- sizeof mad_snoop_priv *
- (qp_info->snoop_table_size + 1),
- GFP_ATOMIC);
- if (!new_snoop_table) {
- i = -ENOMEM;
- goto out;
- }
-
- qp_info->snoop_table = new_snoop_table;
- qp_info->snoop_table_size++;
- }
- qp_info->snoop_table[i] = mad_snoop_priv;
- atomic_inc(&qp_info->snoop_count);
-out:
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- return i;
-}
-
-struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
- u8 port_num,
- enum ib_qp_type qp_type,
- int mad_snoop_flags,
- ib_mad_snoop_handler snoop_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
-{
- struct ib_mad_port_private *port_priv;
- struct ib_mad_agent *ret;
- struct ib_mad_snoop_private *mad_snoop_priv;
- int qpn;
- int err;
-
- /* Validate parameters */
- if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
- (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- qpn = get_spl_qp_index(qp_type);
- if (qpn == -1) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- port_priv = ib_get_mad_port(device, port_num);
- if (!port_priv) {
- ret = ERR_PTR(-ENODEV);
- goto error1;
- }
- /* Allocate structures */
- mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
- if (!mad_snoop_priv) {
- ret = ERR_PTR(-ENOMEM);
- goto error1;
- }
-
- /* Now, fill in the various structures */
- mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
- mad_snoop_priv->agent.device = device;
- mad_snoop_priv->agent.recv_handler = recv_handler;
- mad_snoop_priv->agent.snoop_handler = snoop_handler;
- mad_snoop_priv->agent.context = context;
- mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
- mad_snoop_priv->agent.port_num = port_num;
- mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
- init_completion(&mad_snoop_priv->comp);
-
- err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
- if (err) {
- ret = ERR_PTR(err);
- goto error2;
- }
-
- mad_snoop_priv->snoop_index = register_snoop_agent(
- &port_priv->qp_info[qpn],
- mad_snoop_priv);
- if (mad_snoop_priv->snoop_index < 0) {
- ret = ERR_PTR(mad_snoop_priv->snoop_index);
- goto error3;
- }
-
- atomic_set(&mad_snoop_priv->refcount, 1);
- return &mad_snoop_priv->agent;
-error3:
- ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
-error2:
- kfree(mad_snoop_priv);
-error1:
- return ret;
-}
-EXPORT_SYMBOL(ib_register_mad_snoop);
-
static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
if (atomic_dec_and_test(&mad_agent_priv->refcount))
complete(&mad_agent_priv->comp);
}
-static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
-{
- if (atomic_dec_and_test(&mad_snoop_priv->refcount))
- complete(&mad_snoop_priv->comp);
-}
-
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
struct ib_mad_port_private *port_priv;
@@ -650,25 +520,6 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
kfree_rcu(mad_agent_priv, rcu);
}
-static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
-{
- struct ib_mad_qp_info *qp_info;
- unsigned long flags;
-
- qp_info = mad_snoop_priv->qp_info;
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
- atomic_dec(&qp_info->snoop_count);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-
- deref_snoop_agent(mad_snoop_priv);
- wait_for_completion(&mad_snoop_priv->comp);
-
- ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
-
- kfree(mad_snoop_priv);
-}
-
/*
* ib_unregister_mad_agent - Unregisters a client from using MAD services
*
@@ -677,20 +528,11 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
{
struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_snoop_private *mad_snoop_priv;
-
- /* If the TID is zero, the agent can only snoop. */
- if (mad_agent->hi_tid) {
- mad_agent_priv = container_of(mad_agent,
- struct ib_mad_agent_private,
- agent);
- unregister_mad_agent(mad_agent_priv);
- } else {
- mad_snoop_priv = container_of(mad_agent,
- struct ib_mad_snoop_private,
- agent);
- unregister_mad_snoop(mad_snoop_priv);
- }
+
+ mad_agent_priv = container_of(mad_agent,
+ struct ib_mad_agent_private,
+ agent);
+ unregister_mad_agent(mad_agent_priv);
}
EXPORT_SYMBOL(ib_unregister_mad_agent);
@@ -706,57 +548,6 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
spin_unlock_irqrestore(&mad_queue->lock, flags);
}
-static void snoop_send(struct ib_mad_qp_info *qp_info,
- struct ib_mad_send_buf *send_buf,
- struct ib_mad_send_wc *mad_send_wc,
- int mad_snoop_flags)
-{
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
-
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
- send_buf, mad_send_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-}
-
-static void snoop_recv(struct ib_mad_qp_info *qp_info,
- struct ib_mad_recv_wc *mad_recv_wc,
- int mad_snoop_flags)
-{
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
-
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
- mad_recv_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-}
-
static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
u16 pkey_index, u8 port_num, struct ib_wc *wc)
{
@@ -2289,9 +2080,6 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
recv->header.recv_wc.recv_buf.grh = &recv->grh;
- if (atomic_read(&qp_info->snoop_count))
- snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
-
/* Validate MAD */
if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
goto out;
@@ -2538,9 +2326,6 @@ retry:
mad_send_wc.send_buf = &mad_send_wr->send_buf;
mad_send_wc.status = wc->status;
mad_send_wc.vendor_err = wc->vendor_err;
- if (atomic_read(&qp_info->snoop_count))
- snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
- IB_MAD_SNOOP_SEND_COMPLETIONS);
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
if (queued_send_wr) {
@@ -2782,10 +2567,6 @@ static void local_completions(struct work_struct *work)
local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
local->mad_priv->header.recv_wc.recv_buf.mad =
(struct ib_mad *)local->mad_priv->mad;
- if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
- snoop_recv(recv_mad_agent->qp_info,
- &local->mad_priv->header.recv_wc,
- IB_MAD_SNOOP_RECVS);
recv_mad_agent->agent.recv_handler(
&recv_mad_agent->agent,
&local->mad_send_wr->send_buf,
@@ -2800,10 +2581,6 @@ local_send_completion:
mad_send_wc.status = IB_WC_SUCCESS;
mad_send_wc.vendor_err = 0;
mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
- if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
- snoop_send(mad_agent_priv->qp_info,
- &local->mad_send_wr->send_buf,
- &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
&mad_send_wc);
@@ -3119,10 +2896,6 @@ static void init_mad_qp(struct ib_mad_port_private *port_priv,
init_mad_queue(qp_info, &qp_info->send_queue);
init_mad_queue(qp_info, &qp_info->recv_queue);
INIT_LIST_HEAD(&qp_info->overflow_list);
- spin_lock_init(&qp_info->snoop_lock);
- qp_info->snoop_table = NULL;
- qp_info->snoop_table_size = 0;
- atomic_set(&qp_info->snoop_count, 0);
}
static int create_mad_qp(struct ib_mad_qp_info *qp_info,
@@ -3166,7 +2939,6 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
return;
ib_destroy_qp(qp_info->qp);
- kfree(qp_info->snoop_table);
}
/*
@@ -3304,9 +3076,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
return 0;
}
-static void ib_mad_init_device(struct ib_device *device)
+static int ib_mad_init_device(struct ib_device *device)
{
int start, i;
+ unsigned int count = 0;
+ int ret;
start = rdma_start_port(device);
@@ -3314,17 +3088,23 @@ static void ib_mad_init_device(struct ib_device *device)
if (!rdma_cap_ib_mad(device, i))
continue;
- if (ib_mad_port_open(device, i)) {
+ ret = ib_mad_port_open(device, i);
+ if (ret) {
dev_err(&device->dev, "Couldn't open port %d\n", i);
goto error;
}
- if (ib_agent_port_open(device, i)) {
+ ret = ib_agent_port_open(device, i);
+ if (ret) {
dev_err(&device->dev,
"Couldn't open port %d for agents\n", i);
goto error_agent;
}
+ count++;
}
- return;
+ if (!count)
+ return -EOPNOTSUPP;
+
+ return 0;
error_agent:
if (ib_mad_port_close(device, i))
@@ -3341,6 +3121,7 @@ error:
if (ib_mad_port_close(device, i))
dev_err(&device->dev, "Couldn't close port %d\n", i);
}
+ return ret;
}
static void ib_mad_remove_device(struct ib_device *device, void *client_data)
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 9c2d8b7f1af9..740f03ecc05d 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -42,7 +42,7 @@
#include <rdma/ib_cache.h>
#include "sa.h"
-static void mcast_add_one(struct ib_device *device);
+static int mcast_add_one(struct ib_device *device);
static void mcast_remove_one(struct ib_device *device, void *client_data);
static struct ib_client mcast_client = {
@@ -815,7 +815,7 @@ static void mcast_event_handler(struct ib_event_handler *handler,
}
}
-static void mcast_add_one(struct ib_device *device)
+static int mcast_add_one(struct ib_device *device)
{
struct mcast_device *dev;
struct mcast_port *port;
@@ -825,7 +825,7 @@ static void mcast_add_one(struct ib_device *device)
dev = kmalloc(struct_size(dev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!dev)
- return;
+ return -ENOMEM;
dev->start_port = rdma_start_port(device);
dev->end_port = rdma_end_port(device);
@@ -845,7 +845,7 @@ static void mcast_add_one(struct ib_device *device)
if (!count) {
kfree(dev);
- return;
+ return -EOPNOTSUPP;
}
dev->device = device;
@@ -853,6 +853,7 @@ static void mcast_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
ib_register_event_handler(&dev->event_handler);
+ return 0;
}
static void mcast_remove_one(struct ib_device *device, void *client_data)
@@ -861,9 +862,6 @@ static void mcast_remove_one(struct ib_device *device, void *client_data)
struct mcast_port *port;
int i;
- if (!dev)
- return;
-
ib_unregister_event_handler(&dev->event_handler);
flush_workqueue(mcast_wq);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 9eec26d10d7b..e16105be2eb2 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1292,11 +1292,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
ret = fill_func(msg, has_cap_net_admin, res, port);
-
- rdma_restrack_put(res);
if (ret)
goto err_free;
+ rdma_restrack_put(res);
nlmsg_end(msg, nlh);
ib_device_put(device);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 177333d8bcda..38de4942c682 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -130,6 +130,17 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
lockdep_assert_held(&ufile->hw_destroy_rwsem);
assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
+ if (reason == RDMA_REMOVE_ABORT_HWOBJ) {
+ reason = RDMA_REMOVE_ABORT;
+ ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
+ attrs);
+ /*
+ * Drivers are not permitted to ignore RDMA_REMOVE_ABORT, see
+ * ib_is_destroy_retryable, cleanup_retryable == false here.
+ */
+ WARN_ON(ret);
+ }
+
if (reason == RDMA_REMOVE_ABORT) {
WARN_ON(!list_empty(&uobj->list));
WARN_ON(!uobj->context);
@@ -153,9 +164,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
uobj->context = NULL;
/*
- * For DESTROY the usecnt is held write locked, the caller is expected
- * to put it unlock and put the object when done with it. Only DESTROY
- * can remove the IDR handle.
+ * For DESTROY the usecnt is not changed, the caller is expected to
+ * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
+ * handle.
*/
if (reason != RDMA_REMOVE_DESTROY)
atomic_set(&uobj->usecnt, 0);
@@ -187,7 +198,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
/*
* This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
* sequence. It should only be used from command callbacks. On success the
- * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
+ * caller must pair this with uobj_put_destroy(). This
* version requires the caller to have already obtained an
* LOOKUP_DESTROY uobject kref.
*/
@@ -198,6 +209,13 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
down_read(&ufile->hw_destroy_rwsem);
+ /*
+ * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
+ * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
+ * This is because any other concurrent thread can still see the object
+ * in the xarray due to RCU. Leaving it locked ensures nothing else will
+ * touch it.
+ */
ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
if (ret)
goto out_unlock;
@@ -216,7 +234,7 @@ out_unlock:
/*
* uobj_get_destroy destroys the HW object and returns a handle to the uobj
* with a NULL object pointer. The caller must pair this with
- * uverbs_put_destroy.
+ * uobj_put_destroy().
*/
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
u32 id, struct uverbs_attr_bundle *attrs)
@@ -250,8 +268,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
uobj = __uobj_get_destroy(obj, id, attrs);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
-
- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ uobj_put_destroy(uobj);
return 0;
}
@@ -459,7 +476,8 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
struct ib_uobject *uobj;
struct file *filp;
- if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release))
+ if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
+ fd_type->fops->release != &uverbs_async_event_release))
return ERR_PTR(-EINVAL);
new_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -646,11 +664,15 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
* object and anything else connected to uobj before calling this.
*/
void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs)
+ struct uverbs_attr_bundle *attrs,
+ bool hw_obj_valid)
{
struct ib_uverbs_file *ufile = uobj->ufile;
- uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
+ uverbs_destroy_uobject(uobj,
+ hw_obj_valid ? RDMA_REMOVE_ABORT_HWOBJ :
+ RDMA_REMOVE_ABORT,
+ attrs);
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
@@ -920,8 +942,8 @@ uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
}
void uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access, bool commit,
- struct uverbs_attr_bundle *attrs)
+ enum uverbs_obj_access access, bool hw_obj_valid,
+ bool commit, struct uverbs_attr_bundle *attrs)
{
/*
* refcounts should be handled at the object level and not at the
@@ -944,7 +966,7 @@ void uverbs_finalize_object(struct ib_uobject *uobj,
if (commit)
rdma_alloc_commit_uobject(uobj, attrs);
else
- rdma_alloc_abort_uobject(uobj, attrs);
+ rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid);
break;
default:
WARN_ON(true);
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 33978e0f1262..33706dad6c0f 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -64,8 +64,8 @@ uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
s64 id, struct uverbs_attr_bundle *attrs);
void uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access, bool commit,
- struct uverbs_attr_bundle *attrs);
+ enum uverbs_obj_access access, bool hw_obj_valid,
+ bool commit, struct uverbs_attr_bundle *attrs);
int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
@@ -159,6 +159,9 @@ extern const struct uapi_definition uverbs_def_obj_dm[];
extern const struct uapi_definition uverbs_def_obj_flow_action[];
extern const struct uapi_definition uverbs_def_obj_intf[];
extern const struct uapi_definition uverbs_def_obj_mr[];
+extern const struct uapi_definition uverbs_def_obj_qp[];
+extern const struct uapi_definition uverbs_def_obj_srq[];
+extern const struct uapi_definition uverbs_def_obj_wq[];
extern const struct uapi_definition uverbs_def_write_intf[];
static inline const struct uverbs_api_write_method *
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 557efbf29197..614cff89fc71 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -129,7 +129,7 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
qp->integrity_en);
int i, j, ret = 0, count = 0;
- ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr;
+ ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr);
ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
if (!ctx->reg) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 74e0058fcf9e..a2ed09a3c714 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -174,7 +174,7 @@ static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
};
-static void ib_sa_add_one(struct ib_device *device);
+static int ib_sa_add_one(struct ib_device *device);
static void ib_sa_remove_one(struct ib_device *device, void *client_data);
static struct ib_client sa_client = {
@@ -190,7 +190,7 @@ static u32 tid;
#define PATH_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct sa_path_rec, field), \
- .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
+ .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \
.field_name = "sa_path_rec:" #field
static const struct ib_field path_rec_table[] = {
@@ -292,7 +292,7 @@ static const struct ib_field path_rec_table[] = {
.struct_offset_bytes = \
offsetof(struct sa_path_rec, field), \
.struct_size_bytes = \
- sizeof((struct sa_path_rec *)0)->field, \
+ sizeof_field(struct sa_path_rec, field), \
.field_name = "sa_path_rec:" #field
static const struct ib_field opa_path_rec_table[] = {
@@ -420,7 +420,7 @@ static const struct ib_field opa_path_rec_table[] = {
#define MCMEMBER_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
- .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \
.field_name = "sa_mcmember_rec:" #field
static const struct ib_field mcmember_rec_table[] = {
@@ -504,7 +504,7 @@ static const struct ib_field mcmember_rec_table[] = {
#define SERVICE_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
- .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_service_rec, field), \
.field_name = "sa_service_rec:" #field
static const struct ib_field service_rec_table[] = {
@@ -552,7 +552,7 @@ static const struct ib_field service_rec_table[] = {
#define CLASSPORTINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
- .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \
.field_name = "ib_class_port_info:" #field
static const struct ib_field ib_classport_info_rec_table[] = {
@@ -630,7 +630,7 @@ static const struct ib_field ib_classport_info_rec_table[] = {
.struct_offset_bytes =\
offsetof(struct opa_class_port_info, field), \
.struct_size_bytes = \
- sizeof((struct opa_class_port_info *)0)->field, \
+ sizeof_field(struct opa_class_port_info, field), \
.field_name = "opa_class_port_info:" #field
static const struct ib_field opa_classport_info_rec_table[] = {
@@ -710,7 +710,7 @@ static const struct ib_field opa_classport_info_rec_table[] = {
#define GUIDINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
- .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \
.field_name = "sa_guidinfo_rec:" #field
static const struct ib_field guidinfo_rec_table[] = {
@@ -1412,17 +1412,13 @@ void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
EXPORT_SYMBOL(ib_sa_pack_path);
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
- struct ib_device *device,
+ struct ib_sa_device *sa_dev,
u8 port_num)
{
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port;
unsigned long flags;
bool ret = false;
- if (!sa_dev)
- return ret;
-
port = &sa_dev->port[port_num - sa_dev->start_port];
spin_lock_irqsave(&port->classport_lock, flags);
if (!port->classport_info.valid)
@@ -1450,8 +1446,8 @@ enum opa_pr_supported {
* query is possible.
*/
static int opa_pr_query_possible(struct ib_sa_client *client,
- struct ib_device *device,
- u8 port_num,
+ struct ib_sa_device *sa_dev,
+ struct ib_device *device, u8 port_num,
struct sa_path_rec *rec)
{
struct ib_port_attr port_attr;
@@ -1459,7 +1455,7 @@ static int opa_pr_query_possible(struct ib_sa_client *client,
if (ib_query_port(device, port_num, &port_attr))
return PR_NOT_SUPPORTED;
- if (ib_sa_opa_pathrecord_support(client, device, port_num))
+ if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
return PR_OPA_SUPPORTED;
if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
@@ -1574,7 +1570,8 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
query->sa_query.port = port;
if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
- status = opa_pr_query_possible(client, device, port_num, rec);
+ status = opa_pr_query_possible(client, sa_dev, device, port_num,
+ rec);
if (status == PR_NOT_SUPPORTED) {
ret = -EINVAL;
goto err1;
@@ -2325,18 +2322,19 @@ static void ib_sa_event(struct ib_event_handler *handler,
}
}
-static void ib_sa_add_one(struct ib_device *device)
+static int ib_sa_add_one(struct ib_device *device)
{
struct ib_sa_device *sa_dev;
int s, e, i;
int count = 0;
+ int ret;
s = rdma_start_port(device);
e = rdma_end_port(device);
sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
if (!sa_dev)
- return;
+ return -ENOMEM;
sa_dev->start_port = s;
sa_dev->end_port = e;
@@ -2356,8 +2354,10 @@ static void ib_sa_add_one(struct ib_device *device)
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
NULL, 0, send_handler,
recv_handler, sa_dev, 0);
- if (IS_ERR(sa_dev->port[i].agent))
+ if (IS_ERR(sa_dev->port[i].agent)) {
+ ret = PTR_ERR(sa_dev->port[i].agent);
goto err;
+ }
INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
@@ -2366,8 +2366,10 @@ static void ib_sa_add_one(struct ib_device *device)
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(device, &sa_client, sa_dev);
@@ -2386,7 +2388,7 @@ static void ib_sa_add_one(struct ib_device *device)
update_sm_ah(&sa_dev->port[i].update_task);
}
- return;
+ return 0;
err:
while (--i >= 0) {
@@ -2395,7 +2397,7 @@ err:
}
free:
kfree(sa_dev);
- return;
+ return ret;
}
static void ib_sa_remove_one(struct ib_device *device, void *client_data)
@@ -2403,9 +2405,6 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data)
struct ib_sa_device *sa_dev = client_data;
int i;
- if (!sa_dev)
- return;
-
ib_unregister_event_handler(&sa_dev->event_handler);
flush_workqueue(ib_wq);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 087682e6969e..defe9cd4c5ee 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1058,8 +1058,7 @@ static int add_port(struct ib_core_device *coredev, int port_num)
coredev->ports_kobj,
"%d", port_num);
if (ret) {
- kfree(p);
- return ret;
+ goto err_put;
}
p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL);
@@ -1072,8 +1071,7 @@ static int add_port(struct ib_core_device *coredev, int port_num)
ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type,
&p->kobj, "gid_attrs");
if (ret) {
- kfree(p->gid_attr_group);
- goto err_put;
+ goto err_put_gid_attrs;
}
if (device->ops.process_mad && is_full_dev) {
@@ -1404,8 +1402,10 @@ int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
ret = kobject_init_and_add(kobj, ktype, &port->kobj, "%s",
name);
- if (ret)
+ if (ret) {
+ kobject_put(kobj);
return ret;
+ }
}
return 0;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 16b6cf57fa85..5b87eee8ccc8 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -52,6 +52,7 @@
#include <rdma/rdma_cm_ib.h>
#include <rdma/ib_addr.h>
#include <rdma/ib.h>
+#include <rdma/ib_cm.h>
#include <rdma/rdma_netlink.h>
#include "core_priv.h"
@@ -360,6 +361,9 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
ucma_copy_conn_event(&uevent->resp.param.conn,
&event->param.conn);
+ uevent->resp.ece.vendor_id = event->ece.vendor_id;
+ uevent->resp.ece.attr_mod = event->ece.attr_mod;
+
if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
if (!ctx->backlog) {
ret = -ENOMEM;
@@ -404,7 +408,8 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
* Old 32 bit user space does not send the 4 byte padding in the
* reserved field. We don't care, allow it to keep working.
*/
- if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved))
+ if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) -
+ sizeof(uevent->resp.ece))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
@@ -845,7 +850,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
struct sockaddr *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
@@ -869,6 +874,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
goto out;
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
+ resp.ibdev_index = ctx->cm_id->device->index;
resp.port_num = ctx->cm_id->port_num;
if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
@@ -880,8 +886,8 @@ static ssize_t ucma_query_route(struct ucma_file *file,
out:
mutex_unlock(&ctx->mutex);
- if (copy_to_user(u64_to_user_ptr(cmd.response),
- &resp, sizeof(resp)))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp,
+ min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
ucma_put_ctx(ctx);
@@ -895,6 +901,7 @@ static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
return;
resp->node_guid = (__force __u64) cm_id->device->node_guid;
+ resp->ibdev_index = cm_id->device->index;
resp->port_num = cm_id->port_num;
resp->pkey = (__force __u16) cpu_to_be16(
ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
@@ -907,7 +914,7 @@ static ssize_t ucma_query_addr(struct ucma_context *ctx,
struct sockaddr *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
return -ENOSPC;
memset(&resp, 0, sizeof resp);
@@ -922,7 +929,7 @@ static ssize_t ucma_query_addr(struct ucma_context *ctx,
ucma_query_device_addr(ctx->cm_id, &resp);
- if (copy_to_user(response, &resp, sizeof(resp)))
+ if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
return ret;
@@ -974,7 +981,7 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
struct sockaddr_ib *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
return -ENOSPC;
memset(&resp, 0, sizeof resp);
@@ -1007,7 +1014,7 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
&ctx->cm_id->route.addr.dst_addr);
}
- if (copy_to_user(response, &resp, sizeof(resp)))
+ if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
return ret;
@@ -1070,12 +1077,15 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
- struct rdma_ucm_connect cmd;
struct rdma_conn_param conn_param;
+ struct rdma_ucm_ece ece = {};
+ struct rdma_ucm_connect cmd;
struct ucma_context *ctx;
+ size_t in_size;
int ret;
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ in_size = min_t(size_t, in_len, sizeof(cmd));
+ if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
if (!cmd.conn_param.valid)
@@ -1086,8 +1096,13 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
return PTR_ERR(ctx);
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
+ if (offsetofend(typeof(cmd), ece) <= in_size) {
+ ece.vendor_id = cmd.ece.vendor_id;
+ ece.attr_mod = cmd.ece.attr_mod;
+ }
+
mutex_lock(&ctx->mutex);
- ret = rdma_connect(ctx->cm_id, &conn_param);
+ ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@@ -1121,28 +1136,36 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
{
struct rdma_ucm_accept cmd;
struct rdma_conn_param conn_param;
+ struct rdma_ucm_ece ece = {};
struct ucma_context *ctx;
+ size_t in_size;
int ret;
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ in_size = min_t(size_t, in_len, sizeof(cmd));
+ if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (offsetofend(typeof(cmd), ece) <= in_size) {
+ ece.vendor_id = cmd.ece.vendor_id;
+ ece.attr_mod = cmd.ece.attr_mod;
+ }
+
if (cmd.conn_param.valid) {
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
mutex_lock(&file->mut);
mutex_lock(&ctx->mutex);
- ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
+ ret = __rdma_accept_ece(ctx->cm_id, &conn_param, NULL, &ece);
mutex_unlock(&ctx->mutex);
if (!ret)
ctx->uid = cmd.uid;
mutex_unlock(&file->mut);
} else {
mutex_lock(&ctx->mutex);
- ret = __rdma_accept(ctx->cm_id, NULL, NULL);
+ ret = __rdma_accept_ece(ctx->cm_id, NULL, NULL, &ece);
mutex_unlock(&ctx->mutex);
}
ucma_put_ctx(ctx);
@@ -1159,12 +1182,24 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!cmd.reason)
+ cmd.reason = IB_CM_REJ_CONSUMER_DEFINED;
+
+ switch (cmd.reason) {
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
- ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
+ ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len,
+ cmd.reason);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 29a45d2f8898..d65d541b9a25 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -41,7 +41,7 @@
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
- .struct_size_bytes = sizeof ((struct ib_unpacked_ ## header *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_unpacked_ ## header, field), \
.field_name = #header ":" #field
static const struct ib_field lrh_table[] = {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index da229eab5903..b0d0b522cc76 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -142,7 +142,7 @@ static dev_t dynamic_issm_dev;
static DEFINE_IDA(umad_ida);
-static void ib_umad_add_one(struct ib_device *device);
+static int ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device, void *client_data);
static void ib_umad_dev_free(struct kref *kref)
@@ -1352,37 +1352,41 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
put_device(&port->dev);
}
-static void ib_umad_add_one(struct ib_device *device)
+static int ib_umad_add_one(struct ib_device *device)
{
struct ib_umad_device *umad_dev;
int s, e, i;
int count = 0;
+ int ret;
s = rdma_start_port(device);
e = rdma_end_port(device);
umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
if (!umad_dev)
- return;
+ return -ENOMEM;
kref_init(&umad_dev->kref);
for (i = s; i <= e; ++i) {
if (!rdma_cap_ib_mad(device, i))
continue;
- if (ib_umad_init_port(device, i, umad_dev,
- &umad_dev->ports[i - s]))
+ ret = ib_umad_init_port(device, i, umad_dev,
+ &umad_dev->ports[i - s]);
+ if (ret)
goto err;
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(device, &umad_client, umad_dev);
- return;
+ return 0;
err:
while (--i >= s) {
@@ -1394,6 +1398,7 @@ err:
free:
/* balances kref_init */
ib_umad_dev_put(umad_dev);
+ return ret;
}
static void ib_umad_remove_one(struct ib_device *device, void *client_data)
@@ -1401,9 +1406,6 @@ static void ib_umad_remove_one(struct ib_device *device, void *client_data)
struct ib_umad_device *umad_dev = client_data;
unsigned int i;
- if (!umad_dev)
- return;
-
rdma_for_each_port (device, i) {
if (rdma_cap_ib_mad(device, i))
ib_umad_kill_port(
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 7df71983212d..53a10479958b 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -142,7 +142,7 @@ struct ib_uverbs_file {
* ucontext_lock held
*/
struct ib_ucontext *ucontext;
- struct ib_uverbs_async_event_file *async_file;
+ struct ib_uverbs_async_event_file *default_async_file;
struct list_head list;
/*
@@ -180,6 +180,7 @@ struct ib_uverbs_mcast_entry {
struct ib_uevent_object {
struct ib_uobject uobject;
+ struct ib_uverbs_async_event_file *event_file;
/* List member for ib_uverbs_async_event_file list */
struct list_head event_list;
u32 events_reported;
@@ -219,6 +220,7 @@ void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
+int uverbs_async_event_release(struct inode *inode, struct file *filp);
int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
@@ -227,6 +229,9 @@ void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj);
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
void ib_uverbs_release_file(struct kref *ref);
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+ __u64 element, __u64 event,
+ struct list_head *obj_list, u32 *counter);
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
@@ -292,6 +297,24 @@ static inline u32 make_port_cap_flags(const struct ib_port_attr *attr)
return res;
}
+static inline struct ib_uverbs_async_event_file *
+ib_uverbs_get_async_event(struct uverbs_attr_bundle *attrs,
+ u16 id)
+{
+ struct ib_uobject *async_ev_file_uobj;
+ struct ib_uverbs_async_event_file *async_ev_file;
+
+ async_ev_file_uobj = uverbs_attr_get_uobject(attrs, id);
+ if (IS_ERR(async_ev_file_uobj))
+ async_ev_file = READ_ONCE(attrs->ufile->default_async_file);
+ else
+ async_ev_file = container_of(async_ev_file_uobj,
+ struct ib_uverbs_async_event_file,
+ uobj);
+ if (async_ev_file)
+ uverbs_uobject_get(&async_ev_file->uobj);
+ return async_ev_file;
+}
void copy_port_attr_to_resp(struct ib_port_attr *attr,
struct ib_uverbs_query_port_resp *resp,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 060b4ebbd2ba..b48b3f6e632d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -311,7 +311,7 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
return 0;
err_uobj:
- rdma_alloc_abort_uobject(uobj, attrs);
+ rdma_alloc_abort_uobject(uobj, attrs, false);
err_ucontext:
kfree(attrs->context);
attrs->context = NULL;
@@ -356,8 +356,6 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext,
resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
resp->max_ah = attr->max_ah;
- resp->max_fmr = attr->max_fmr;
- resp->max_map_per_fmr = attr->max_map_per_fmr;
resp->max_srq = attr->max_srq;
resp->max_srq_wr = attr->max_srq_wr;
resp->max_srq_sge = attr->max_srq_sge;
@@ -1051,6 +1049,10 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
goto err_free;
obj->uevent.uobject.object = cq;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
+
memset(&resp, 0, sizeof resp);
resp.base.cq_handle = obj->uevent.uobject.id;
resp.base.cqe = cq->cqe;
@@ -1067,6 +1069,8 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
return obj;
err_cb:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
cq = NULL;
err_free:
@@ -1460,6 +1464,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
}
obj->uevent.uobject.object = qp;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
memset(&resp, 0, sizeof resp);
resp.base.qpn = qp->qp_num;
@@ -1473,7 +1480,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
ret = uverbs_response(attrs, &resp, sizeof(resp));
if (ret)
- goto err_cb;
+ goto err_uevent;
if (xrcd) {
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
@@ -1498,6 +1505,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
return 0;
+err_uevent:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
err_cb:
ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
@@ -2954,11 +2964,11 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
wq_init_attr.cq = cq;
wq_init_attr.max_sge = cmd.max_sge;
wq_init_attr.max_wr = cmd.max_wr;
- wq_init_attr.wq_context = attrs->ufile;
wq_init_attr.wq_type = cmd.wq_type;
wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
wq_init_attr.create_flags = cmd.create_flags;
INIT_LIST_HEAD(&obj->uevent.event_list);
+ obj->uevent.uobject.user_handle = cmd.user_handle;
wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
if (IS_ERR(wq)) {
@@ -2972,12 +2982,12 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
wq->cq = cq;
wq->pd = pd;
wq->device = pd->device;
- wq->wq_context = wq_init_attr.wq_context;
atomic_set(&wq->usecnt, 0);
atomic_inc(&pd->usecnt);
atomic_inc(&cq->usecnt);
- wq->uobject = obj;
- obj->uevent.uobject.object = wq;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
memset(&resp, 0, sizeof(resp));
resp.wq_handle = obj->uevent.uobject.id;
@@ -2996,6 +3006,8 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return 0;
err_copy:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
err_put_cq:
rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
@@ -3441,46 +3453,25 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
}
attr.event_handler = ib_uverbs_srq_event_handler;
- attr.srq_context = attrs->ufile;
attr.srq_type = cmd->srq_type;
attr.attr.max_wr = cmd->max_wr;
attr.attr.max_sge = cmd->max_sge;
attr.attr.srq_limit = cmd->srq_limit;
INIT_LIST_HEAD(&obj->uevent.event_list);
+ obj->uevent.uobject.user_handle = cmd->user_handle;
- srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
- if (!srq) {
- ret = -ENOMEM;
- goto err_put;
- }
-
- srq->device = pd->device;
- srq->pd = pd;
- srq->srq_type = cmd->srq_type;
- srq->uobject = obj;
- srq->event_handler = attr.event_handler;
- srq->srq_context = attr.srq_context;
-
- ret = pd->device->ops.create_srq(srq, &attr, udata);
- if (ret)
- goto err_free;
-
- if (ib_srq_has_cq(cmd->srq_type)) {
- srq->ext.cq = attr.ext.cq;
- atomic_inc(&attr.ext.cq->usecnt);
- }
-
- if (cmd->srq_type == IB_SRQT_XRC) {
- srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
- atomic_inc(&attr.ext.xrc.xrcd->usecnt);
+ srq = ib_create_srq_user(pd, &attr, obj, udata);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto err_put_pd;
}
- atomic_inc(&pd->usecnt);
- atomic_set(&srq->usecnt, 0);
-
obj->uevent.uobject.object = srq;
obj->uevent.uobject.user_handle = cmd->user_handle;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
memset(&resp, 0, sizeof resp);
resp.srq_handle = obj->uevent.uobject.id;
@@ -3505,14 +3496,11 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
return 0;
err_copy:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
- /* It was released in ib_destroy_srq_user */
- srq = NULL;
-err_free:
- kfree(srq);
-err_put:
+err_put_pd:
uobj_put_obj_read(pd);
-
err_put_cq:
if (ib_srq_has_cq(cmd->srq_type))
rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
@@ -3751,7 +3739,7 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
#define UAPI_DEF_WRITE_IO(req, resp) \
.write.has_resp = 1 + \
BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) + \
- BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) != \
+ BUILD_BUG_ON_ZERO(sizeof_field(req, response) != \
sizeof(u64)), \
.write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 538affbc517e..2d882c02387c 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -58,6 +58,7 @@ struct bundle_priv {
DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
+ DECLARE_BITMAP(uobj_hw_obj_valid, UVERBS_API_ATTR_BKEY_LEN);
/*
* Must be last. bundle ends in a flex array which overlaps
@@ -136,7 +137,7 @@ EXPORT_SYMBOL(_uverbs_alloc);
static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
u16 len)
{
- if (uattr->len > sizeof(((struct ib_uverbs_attr *)0)->data))
+ if (uattr->len > sizeof_field(struct ib_uverbs_attr, data))
return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len,
uattr->len - len);
@@ -230,7 +231,8 @@ static void uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
for (i = 0; i != attr->len; i++)
uverbs_finalize_object(attr->uobjects[i],
- spec->u2.objs_arr.access, commit, attrs);
+ spec->u2.objs_arr.access, false, commit,
+ attrs);
}
static int uverbs_process_attr(struct bundle_priv *pbundle,
@@ -502,7 +504,9 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
uverbs_finalize_object(
attr->obj_attr.uobject,
- attr->obj_attr.attr_elm->spec.u.obj.access, commit,
+ attr->obj_attr.attr_elm->spec.u.obj.access,
+ test_bit(i, pbundle->uobj_hw_obj_valid),
+ commit,
&pbundle->bundle);
}
@@ -590,6 +594,8 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
sizeof(pbundle->bundle.attr_present));
memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
+ memset(pbundle->uobj_hw_obj_valid, 0,
+ sizeof(pbundle->uobj_hw_obj_valid));
ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
bundle_destroy(pbundle, ret == 0);
@@ -784,3 +790,15 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
}
return uverbs_copy_to(bundle, idx, from, size);
}
+
+/* Once called an abort will call through to the type's destroy_hw() */
+void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle,
+ u16 idx)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+
+ __set_bit(uapi_bkey_attr(uapi_key_attr(idx)),
+ pbundle->uobj_hw_obj_valid);
+}
+EXPORT_SYMBOL(uverbs_finalize_uobj_create);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 17fc25db0311..47794c85e9af 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -75,7 +75,7 @@ static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
static DEFINE_IDA(uverbs_ida);
-static void ib_uverbs_add_one(struct ib_device *device);
+static int ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
/*
@@ -146,8 +146,7 @@ void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
{
- struct ib_uverbs_async_event_file *async_file =
- READ_ONCE(uobj->uobject.ufile->async_file);
+ struct ib_uverbs_async_event_file *async_file = uobj->event_file;
struct ib_uverbs_event *evt, *tmp;
if (!async_file)
@@ -159,6 +158,7 @@ void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
kfree(evt);
}
spin_unlock_irq(&async_file->ev_queue.lock);
+ uverbs_uobject_put(&async_file->uobj);
}
void ib_uverbs_detach_umcast(struct ib_qp *qp,
@@ -197,8 +197,8 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
- if (file->async_file)
- uverbs_uobject_put(&file->async_file->uobj);
+ if (file->default_async_file)
+ uverbs_uobject_put(&file->default_async_file->uobj);
put_device(&file->device->dev);
if (file->disassociate_page)
@@ -296,6 +296,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
spin_lock_irq(&ev_queue->lock);
if (!list_empty(&ev_queue->event_list))
pollflags = EPOLLIN | EPOLLRDNORM;
+ else if (ev_queue->is_closed)
+ pollflags = EPOLLERR;
spin_unlock_irq(&ev_queue->lock);
return pollflags;
@@ -346,7 +348,7 @@ const struct file_operations uverbs_async_event_fops = {
.owner = THIS_MODULE,
.read = ib_uverbs_async_event_read,
.poll = ib_uverbs_async_event_poll,
- .release = uverbs_uobject_fd_release,
+ .release = uverbs_async_event_release,
.fasync = ib_uverbs_async_event_fasync,
.llseek = no_llseek,
};
@@ -386,10 +388,9 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
}
-static void
-ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
- __u64 element, __u64 event, struct list_head *obj_list,
- u32 *counter)
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+ __u64 element, __u64 event,
+ struct list_head *obj_list, u32 *counter)
{
struct ib_uverbs_event *entry;
unsigned long flags;
@@ -426,7 +427,7 @@ ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
static void uverbs_uobj_event(struct ib_uevent_object *eobj,
struct ib_event *event)
{
- ib_uverbs_async_handler(READ_ONCE(eobj->uobject.ufile->async_file),
+ ib_uverbs_async_handler(eobj->event_file,
eobj->uobject.user_handle, event->event,
&eobj->event_list, &eobj->events_reported);
}
@@ -483,10 +484,10 @@ void ib_uverbs_init_async_event_file(
/* The first async_event_file becomes the default one for the file. */
mutex_lock(&uverbs_file->ucontext_lock);
- if (!uverbs_file->async_file) {
+ if (!uverbs_file->default_async_file) {
/* Pairs with the put in ib_uverbs_release_file */
uverbs_uobject_get(&async_file->uobj);
- smp_store_release(&uverbs_file->async_file, async_file);
+ smp_store_release(&uverbs_file->default_async_file, async_file);
}
mutex_unlock(&uverbs_file->ucontext_lock);
@@ -1093,7 +1094,7 @@ static int ib_uverbs_create_uapi(struct ib_device *device,
return 0;
}
-static void ib_uverbs_add_one(struct ib_device *device)
+static int ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
dev_t base;
@@ -1101,16 +1102,16 @@ static void ib_uverbs_add_one(struct ib_device *device)
int ret;
if (!device->ops.alloc_ucontext)
- return;
+ return -EOPNOTSUPP;
uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
if (!uverbs_dev)
- return;
+ return -ENOMEM;
ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
if (ret) {
kfree(uverbs_dev);
- return;
+ return -ENOMEM;
}
device_initialize(&uverbs_dev->dev);
@@ -1130,15 +1131,18 @@ static void ib_uverbs_add_one(struct ib_device *device)
devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
GFP_KERNEL);
- if (devnum < 0)
+ if (devnum < 0) {
+ ret = -ENOMEM;
goto err;
+ }
uverbs_dev->devnum = devnum;
if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
else
base = IB_UVERBS_BASE_DEV + devnum;
- if (ib_uverbs_create_uapi(device, uverbs_dev))
+ ret = ib_uverbs_create_uapi(device, uverbs_dev);
+ if (ret)
goto err_uapi;
uverbs_dev->dev.devt = base;
@@ -1153,7 +1157,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
goto err_uapi;
ib_set_client_data(device, &uverbs_client, uverbs_dev);
- return;
+ return 0;
err_uapi:
ida_free(&uverbs_ida, devnum);
@@ -1162,7 +1166,7 @@ err:
ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
put_device(&uverbs_dev->dev);
- return;
+ return ret;
}
static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
@@ -1187,9 +1191,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
*/
mutex_unlock(&uverbs_dev->lists_mutex);
- ib_uverbs_async_handler(READ_ONCE(file->async_file), 0,
- IB_EVENT_DEVICE_FATAL, NULL, NULL);
-
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
kref_put(&file->ref, ib_uverbs_release_file);
@@ -1205,9 +1206,6 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
struct ib_uverbs_device *uverbs_dev = client_data;
int wait_clients = 1;
- if (!uverbs_dev)
- return;
-
cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
ida_free(&uverbs_ida, uverbs_dev->devnum);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 3abfc63225cb..08c39cfb1bd9 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -75,40 +75,6 @@ static int uverbs_free_mw(struct ib_uobject *uobject,
return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
}
-static int uverbs_free_qp(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_qp *qp = uobject->object;
- struct ib_uqp_object *uqp =
- container_of(uobject, struct ib_uqp_object, uevent.uobject);
- int ret;
-
- /*
- * If this is a user triggered destroy then do not allow destruction
- * until the user cleans up all the mcast bindings. Unlike in other
- * places we forcibly clean up the mcast attachments for !DESTROY
- * because the mcast attaches are not ubojects and will not be
- * destroyed by anything else during cleanup processing.
- */
- if (why == RDMA_REMOVE_DESTROY) {
- if (!list_empty(&uqp->mcast_list))
- return -EBUSY;
- } else if (qp == qp->real_qp) {
- ib_uverbs_detach_umcast(qp, uqp);
- }
-
- ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
- return ret;
-
- if (uqp->uxrcd)
- atomic_dec(&uqp->uxrcd->refcnt);
-
- ib_uverbs_release_uevent(&uqp->uevent);
- return ret;
-}
-
static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
@@ -125,48 +91,6 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
return ret;
}
-static int uverbs_free_wq(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_wq *wq = uobject->object;
- struct ib_uwq_object *uwq =
- container_of(uobject, struct ib_uwq_object, uevent.uobject);
- int ret;
-
- ret = ib_destroy_wq(wq, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
- return ret;
-
- ib_uverbs_release_uevent(&uwq->uevent);
- return ret;
-}
-
-static int uverbs_free_srq(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_srq *srq = uobject->object;
- struct ib_uevent_object *uevent =
- container_of(uobject, struct ib_uevent_object, uobject);
- enum ib_srq_type srq_type = srq->srq_type;
- int ret;
-
- ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
- return ret;
-
- if (srq_type == IB_SRQT_XRC) {
- struct ib_usrq_object *us =
- container_of(uevent, struct ib_usrq_object, uevent);
-
- atomic_dec(&us->uxrcd->refcnt);
- }
-
- ib_uverbs_release_uevent(uevent);
- return ret;
-}
-
static int uverbs_free_xrcd(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
@@ -252,10 +176,6 @@ DECLARE_UVERBS_NAMED_OBJECT(
"[infinibandevent]",
O_RDONLY));
-DECLARE_UVERBS_NAMED_OBJECT(
- UVERBS_OBJECT_QP,
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_MW_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MW_HANDLE,
@@ -267,11 +187,6 @@ DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw),
&UVERBS_METHOD(UVERBS_METHOD_MW_DESTROY));
-DECLARE_UVERBS_NAMED_OBJECT(
- UVERBS_OBJECT_SRQ,
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
- uverbs_free_srq));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_AH_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_AH_HANDLE,
@@ -296,10 +211,6 @@ DECLARE_UVERBS_NAMED_OBJECT(
uverbs_free_flow),
&UVERBS_METHOD(UVERBS_METHOD_FLOW_DESTROY));
-DECLARE_UVERBS_NAMED_OBJECT(
- UVERBS_OBJECT_WQ,
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_RWQ_IND_TBL_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE,
@@ -340,18 +251,12 @@ const struct uapi_definition uverbs_def_obj_intf[] = {
UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COMP_CHANNEL,
UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_AH,
UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MW,
UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW,
UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
UVERBS_OBJECT_RWQ_IND_TBL,
UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)),
diff --git a/drivers/infiniband/core/uverbs_std_types_async_fd.c b/drivers/infiniband/core/uverbs_std_types_async_fd.c
index 82ec0806b34b..61899eaf1f91 100644
--- a/drivers/infiniband/core/uverbs_std_types_async_fd.c
+++ b/drivers/infiniband/core/uverbs_std_types_async_fd.c
@@ -26,10 +26,38 @@ static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
container_of(uobj, struct ib_uverbs_async_event_file, uobj);
ib_unregister_event_handler(&event_file->event_handler);
- ib_uverbs_free_event_queue(&event_file->ev_queue);
+
+ if (why == RDMA_REMOVE_DRIVER_REMOVE)
+ ib_uverbs_async_handler(event_file, 0, IB_EVENT_DEVICE_FATAL,
+ NULL, NULL);
return 0;
}
+int uverbs_async_event_release(struct inode *inode, struct file *filp)
+{
+ struct ib_uverbs_async_event_file *event_file;
+ struct ib_uobject *uobj = filp->private_data;
+ int ret;
+
+ if (!uobj)
+ return uverbs_uobject_fd_release(inode, filp);
+
+ event_file =
+ container_of(uobj, struct ib_uverbs_async_event_file, uobj);
+
+ /*
+ * The async event FD has to deliver IB_EVENT_DEVICE_FATAL even after
+ * disassociation, so cleaning the event list must only happen after
+ * release. The user knows it has reached the end of the event stream
+ * when it sees IB_EVENT_DEVICE_FATAL.
+ */
+ uverbs_uobject_get(uobj);
+ ret = uverbs_uobject_fd_release(inode, filp);
+ ib_uverbs_free_event_queue(&event_file->ev_queue);
+ uverbs_uobject_put(uobj);
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_ASYNC_EVENT_ALLOC,
UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index da4110a0eea2..5dce2c7cc323 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -100,6 +100,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
uverbs_uobject_get(ev_file_uobj);
}
+ obj->uevent.event_file = ib_uverbs_get_async_event(
+ attrs, UVERBS_ATTR_CREATE_CQ_EVENT_FD);
+
if (attr.comp_vector >= attrs->ufile->device->num_comp_vectors) {
ret = -EINVAL;
goto err_event_file;
@@ -129,19 +132,17 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
obj->uevent.uobject.object = cq;
obj->uevent.uobject.user_handle = user_handle;
rdma_restrack_uadd(&cq->res);
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE);
ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
sizeof(cq->cqe));
- if (ret)
- goto err_cq;
+ return ret;
- return 0;
-err_cq:
- ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
- cq = NULL;
err_free:
kfree(cq);
err_event_file:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
if (ev_file)
uverbs_uobject_put(ev_file_uobj);
return ret;
@@ -171,6 +172,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE,
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
UVERBS_ATTR_UHW());
static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index c1286a52dc84..a2722ef8496e 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -136,21 +136,15 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
uobj->object = mr;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE);
+
ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_LKEY, &mr->lkey,
sizeof(mr->lkey));
if (ret)
- goto err_dereg;
+ return ret;
ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
&mr->rkey, sizeof(mr->rkey));
- if (ret)
- goto err_dereg;
-
- return 0;
-
-err_dereg:
- ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
-
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_std_types_qp.c b/drivers/infiniband/core/uverbs_std_types_qp.c
new file mode 100644
index 000000000000..3bf8dcdfe7eb
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_qp.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+#include "core_priv.h"
+
+static int uverbs_free_qp(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_qp *qp = uobject->object;
+ struct ib_uqp_object *uqp =
+ container_of(uobject, struct ib_uqp_object, uevent.uobject);
+ int ret;
+
+ /*
+ * If this is a user triggered destroy then do not allow destruction
+ * until the user cleans up all the mcast bindings. Unlike in other
+ * places we forcibly clean up the mcast attachments for !DESTROY
+ * because the mcast attaches are not ubojects and will not be
+ * destroyed by anything else during cleanup processing.
+ */
+ if (why == RDMA_REMOVE_DESTROY) {
+ if (!list_empty(&uqp->mcast_list))
+ return -EBUSY;
+ } else if (qp == qp->real_qp) {
+ ib_uverbs_detach_umcast(qp, uqp);
+ }
+
+ ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ if (uqp->uxrcd)
+ atomic_dec(&uqp->uxrcd->refcnt);
+
+ ib_uverbs_release_uevent(&uqp->uevent);
+ return ret;
+}
+
+static int check_creation_flags(enum ib_qp_type qp_type,
+ u32 create_flags)
+{
+ create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
+
+ if (!create_flags || qp_type == IB_QPT_DRIVER)
+ return 0;
+
+ if (qp_type != IB_QPT_RAW_PACKET && qp_type != IB_QPT_UD)
+ return -EINVAL;
+
+ if ((create_flags & IB_UVERBS_QP_CREATE_SCATTER_FCS ||
+ create_flags & IB_UVERBS_QP_CREATE_CVLAN_STRIPPING) &&
+ qp_type != IB_QPT_RAW_PACKET)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void set_caps(struct ib_qp_init_attr *attr,
+ struct ib_uverbs_qp_cap *cap, bool req)
+{
+ if (req) {
+ attr->cap.max_send_wr = cap->max_send_wr;
+ attr->cap.max_recv_wr = cap->max_recv_wr;
+ attr->cap.max_send_sge = cap->max_send_sge;
+ attr->cap.max_recv_sge = cap->max_recv_sge;
+ attr->cap.max_inline_data = cap->max_inline_data;
+ } else {
+ cap->max_send_wr = attr->cap.max_send_wr;
+ cap->max_recv_wr = attr->cap.max_recv_wr;
+ cap->max_send_sge = attr->cap.max_send_sge;
+ cap->max_recv_sge = attr->cap.max_recv_sge;
+ cap->max_inline_data = attr->cap.max_inline_data;
+ }
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uqp_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_QP_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_qp_init_attr attr = {};
+ struct ib_uverbs_qp_cap cap = {};
+ struct ib_rwq_ind_table *rwq_ind_tbl = NULL;
+ struct ib_qp *qp;
+ struct ib_pd *pd = NULL;
+ struct ib_srq *srq = NULL;
+ struct ib_cq *recv_cq = NULL;
+ struct ib_cq *send_cq = NULL;
+ struct ib_xrcd *xrcd = NULL;
+ struct ib_uobject *xrcd_uobj = NULL;
+ struct ib_device *device;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_copy_from_or_zero(&cap, attrs,
+ UVERBS_ATTR_CREATE_QP_CAP);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_QP_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&attr.qp_type, attrs,
+ UVERBS_ATTR_CREATE_QP_TYPE);
+ if (ret)
+ return ret;
+
+ switch (attr.qp_type) {
+ case IB_QPT_XRC_TGT:
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE))
+ return -EINVAL;
+
+ xrcd_uobj = uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE);
+ if (IS_ERR(xrcd_uobj))
+ return PTR_ERR(xrcd_uobj);
+
+ xrcd = (struct ib_xrcd *)xrcd_uobj->object;
+ if (!xrcd)
+ return -EINVAL;
+ device = xrcd->device;
+ break;
+ case IB_UVERBS_QPT_RAW_PACKET:
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+ fallthrough;
+ case IB_UVERBS_QPT_RC:
+ case IB_UVERBS_QPT_UC:
+ case IB_UVERBS_QPT_UD:
+ case IB_UVERBS_QPT_XRC_INI:
+ case IB_UVERBS_QPT_DRIVER:
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE) ||
+ (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE) &&
+ attr.qp_type == IB_QPT_XRC_INI))
+ return -EINVAL;
+
+ pd = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE);
+ if (IS_ERR(pd))
+ return PTR_ERR(pd);
+
+ rwq_ind_tbl = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE);
+ if (!IS_ERR(rwq_ind_tbl)) {
+ if (cap.max_recv_wr || cap.max_recv_sge ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE))
+ return -EINVAL;
+
+ /* send_cq is optinal */
+ if (cap.max_send_wr) {
+ send_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
+ if (IS_ERR(send_cq))
+ return PTR_ERR(send_cq);
+ }
+ attr.rwq_ind_tbl = rwq_ind_tbl;
+ } else {
+ send_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
+ if (IS_ERR(send_cq))
+ return PTR_ERR(send_cq);
+
+ if (attr.qp_type != IB_QPT_XRC_INI) {
+ recv_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE);
+ if (IS_ERR(recv_cq))
+ return PTR_ERR(recv_cq);
+ }
+ }
+
+ device = pd->device;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = uverbs_get_flags32(&attr.create_flags, attrs,
+ UVERBS_ATTR_CREATE_QP_FLAGS,
+ IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
+ IB_UVERBS_QP_CREATE_SCATTER_FCS |
+ IB_UVERBS_QP_CREATE_CVLAN_STRIPPING |
+ IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING |
+ IB_UVERBS_QP_CREATE_SQ_SIG_ALL);
+ if (ret)
+ return ret;
+
+ ret = check_creation_flags(attr.qp_type, attr.create_flags);
+ if (ret)
+ return ret;
+
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN)) {
+ ret = uverbs_copy_from(&attr.source_qpn, attrs,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN);
+ if (ret)
+ return ret;
+ attr.create_flags |= IB_QP_CREATE_SOURCE_QPN;
+ }
+
+ srq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE);
+ if (!IS_ERR(srq)) {
+ if ((srq->srq_type == IB_SRQT_XRC &&
+ attr.qp_type != IB_QPT_XRC_TGT) ||
+ (srq->srq_type != IB_SRQT_XRC &&
+ attr.qp_type == IB_QPT_XRC_TGT))
+ return -EINVAL;
+ attr.srq = srq;
+ }
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_QP_EVENT_FD);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ INIT_LIST_HEAD(&obj->mcast_list);
+ obj->uevent.uobject.user_handle = user_handle;
+ attr.event_handler = ib_uverbs_qp_event_handler;
+ attr.send_cq = send_cq;
+ attr.recv_cq = recv_cq;
+ attr.xrcd = xrcd;
+ if (attr.create_flags & IB_UVERBS_QP_CREATE_SQ_SIG_ALL) {
+ /* This creation bit is uverbs one, need to mask before
+ * calling drivers. It was added to prevent an extra user attr
+ * only for that when using ioctl.
+ */
+ attr.create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
+ attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ } else {
+ attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ }
+
+ set_caps(&attr, &cap, true);
+ mutex_init(&obj->mcast_lock);
+
+ if (attr.qp_type == IB_QPT_XRC_TGT)
+ qp = ib_create_qp(pd, &attr);
+ else
+ qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
+ obj);
+
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto err_put;
+ }
+
+ if (attr.qp_type != IB_QPT_XRC_TGT) {
+ atomic_inc(&pd->usecnt);
+ if (attr.send_cq)
+ atomic_inc(&attr.send_cq->usecnt);
+ if (attr.recv_cq)
+ atomic_inc(&attr.recv_cq->usecnt);
+ if (attr.srq)
+ atomic_inc(&attr.srq->usecnt);
+ if (attr.rwq_ind_tbl)
+ atomic_inc(&attr.rwq_ind_tbl->usecnt);
+ } else {
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+ uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
+ /* It is done in _ib_create_qp for other QP types */
+ qp->uobject = obj;
+ }
+
+ obj->uevent.uobject.object = qp;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_QP_HANDLE);
+
+ if (attr.qp_type != IB_QPT_XRC_TGT) {
+ ret = ib_create_qp_security(qp, device);
+ if (ret)
+ return ret;
+ }
+
+ set_caps(&attr, &cap, false);
+ ret = uverbs_copy_to_struct_or_zero(attrs,
+ UVERBS_ATTR_CREATE_QP_RESP_CAP, &cap,
+ sizeof(cap));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+ &qp->qp_num,
+ sizeof(qp->qp_num));
+
+ return ret;
+err_put:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QP_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_HANDLE,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_XRCD_HANDLE,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_CAP,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
+ max_inline_data),
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_QP_TYPE,
+ enum ib_uverbs_qp_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_QP_FLAGS,
+ enum ib_uverbs_qp_create_flags,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_SOURCE_QPN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_QP_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_CAP,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
+ max_inline_data),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QP_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_QP_HANDLE);
+ struct ib_uqp_object *obj =
+ container_of(uobj, struct ib_uqp_object, uevent.uobject);
+ struct ib_uverbs_destroy_qp_resp resp = {
+ .events_reported = obj->uevent.events_reported
+ };
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_QP_RESP, &resp,
+ sizeof(resp));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QP_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_QP_HANDLE,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_QP_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_qp_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_QP,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp),
+ &UVERBS_METHOD(UVERBS_METHOD_QP_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_QP_DESTROY));
+
+const struct uapi_definition uverbs_def_obj_qp[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_srq.c b/drivers/infiniband/core/uverbs_std_types_srq.c
new file mode 100644
index 000000000000..c0ecbba26bf4
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_srq.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_free_srq(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_srq *srq = uobject->object;
+ struct ib_uevent_object *uevent =
+ container_of(uobject, struct ib_uevent_object, uobject);
+ enum ib_srq_type srq_type = srq->srq_type;
+ int ret;
+
+ ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ if (srq_type == IB_SRQT_XRC) {
+ struct ib_usrq_object *us =
+ container_of(uobject, struct ib_usrq_object,
+ uevent.uobject);
+
+ atomic_dec(&us->uxrcd->refcnt);
+ }
+
+ ib_uverbs_release_uevent(uevent);
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_usrq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_SRQ_PD_HANDLE);
+ struct ib_srq_init_attr attr = {};
+ struct ib_uobject *xrcd_uobj;
+ struct ib_srq *srq;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_copy_from(&attr.attr.max_sge, attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_SGE);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.attr.max_wr, attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_WR);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.attr.srq_limit, attrs,
+ UVERBS_ATTR_CREATE_SRQ_LIMIT);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_SRQ_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&attr.srq_type, attrs,
+ UVERBS_ATTR_CREATE_SRQ_TYPE);
+ if (ret)
+ return ret;
+
+ if (ib_srq_has_cq(attr.srq_type)) {
+ attr.ext.cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE);
+ if (IS_ERR(attr.ext.cq))
+ return PTR_ERR(attr.ext.cq);
+ }
+
+ switch (attr.srq_type) {
+ case IB_UVERBS_SRQT_XRC:
+ xrcd_uobj = uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE);
+ if (IS_ERR(xrcd_uobj))
+ return PTR_ERR(xrcd_uobj);
+
+ attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
+ if (!attr.ext.xrc.xrcd)
+ return -EINVAL;
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+ uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
+ break;
+ case IB_UVERBS_SRQT_TM:
+ ret = uverbs_copy_from(&attr.ext.tag_matching.max_num_tags,
+ attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS);
+ if (ret)
+ return ret;
+ break;
+ case IB_UVERBS_SRQT_BASIC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_SRQ_EVENT_FD);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ attr.event_handler = ib_uverbs_srq_event_handler;
+ obj->uevent.uobject.user_handle = user_handle;
+
+ srq = ib_create_srq_user(pd, &attr, obj, &attrs->driver_udata);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto err;
+ }
+
+ obj->uevent.uobject.object = srq;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ &attr.attr.max_wr,
+ sizeof(attr.attr.max_wr));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ &attr.attr.max_sge,
+ sizeof(attr.attr.max_sge));
+ if (ret)
+ return ret;
+
+ if (attr.srq_type == IB_SRQT_XRC) {
+ ret = uverbs_copy_to(attrs,
+ UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+ &srq->ext.xrc.srq_num,
+ sizeof(srq->ext.xrc.srq_num));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+err:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ if (attr.srq_type == IB_SRQT_XRC)
+ atomic_dec(&obj->uxrcd->refcnt);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_SRQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_SRQ_TYPE,
+ enum ib_uverbs_srq_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_LIMIT,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_SRQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_SRQ_HANDLE);
+ struct ib_usrq_object *obj =
+ container_of(uobj, struct ib_usrq_object, uevent.uobject);
+ struct ib_uverbs_destroy_srq_resp resp = {
+ .events_reported = obj->uevent.events_reported
+ };
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_SRQ_RESP, &resp,
+ sizeof(resp));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_SRQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_SRQ_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_srq_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_SRQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
+ uverbs_free_srq),
+ &UVERBS_METHOD(UVERBS_METHOD_SRQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_SRQ_DESTROY)
+);
+
+const struct uapi_definition uverbs_def_obj_srq[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_wq.c b/drivers/infiniband/core/uverbs_std_types_wq.c
new file mode 100644
index 000000000000..cad842ede077
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_wq.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_free_wq(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_wq *wq = uobject->object;
+ struct ib_uwq_object *uwq =
+ container_of(uobject, struct ib_uwq_object, uevent.uobject);
+ int ret;
+
+ ret = ib_destroy_wq(wq, &attrs->driver_udata);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ ib_uverbs_release_uevent(&uwq->uevent);
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_WQ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uwq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_PD_HANDLE);
+ struct ib_cq *cq =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_CQ_HANDLE);
+ struct ib_wq_init_attr wq_init_attr = {};
+ struct ib_wq *wq;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_get_flags32(&wq_init_attr.create_flags, attrs,
+ UVERBS_ATTR_CREATE_WQ_FLAGS,
+ IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING |
+ IB_UVERBS_WQ_FLAGS_SCATTER_FCS |
+ IB_UVERBS_WQ_FLAGS_DELAY_DROP |
+ IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING);
+ if (!ret)
+ ret = uverbs_copy_from(&wq_init_attr.max_sge, attrs,
+ UVERBS_ATTR_CREATE_WQ_MAX_SGE);
+ if (!ret)
+ ret = uverbs_copy_from(&wq_init_attr.max_wr, attrs,
+ UVERBS_ATTR_CREATE_WQ_MAX_WR);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_WQ_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&wq_init_attr.wq_type, attrs,
+ UVERBS_ATTR_CREATE_WQ_TYPE);
+ if (ret)
+ return ret;
+
+ if (wq_init_attr.wq_type != IB_WQT_RQ)
+ return -EINVAL;
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_WQ_EVENT_FD);
+ obj->uevent.uobject.user_handle = user_handle;
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
+ wq_init_attr.wq_context = attrs->ufile;
+ wq_init_attr.cq = cq;
+
+ wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
+ if (IS_ERR(wq)) {
+ ret = PTR_ERR(wq);
+ goto err;
+ }
+
+ obj->uevent.uobject.object = wq;
+ wq->wq_type = wq_init_attr.wq_type;
+ wq->cq = cq;
+ wq->pd = pd;
+ wq->device = pd->device;
+ wq->wq_context = wq_init_attr.wq_context;
+ atomic_set(&wq->usecnt, 0);
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&cq->usecnt);
+ wq->uobject = obj;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ &wq_init_attr.max_wr,
+ sizeof(wq_init_attr.max_wr));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ &wq_init_attr.max_sge,
+ sizeof(wq_init_attr.max_sge));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+ &wq->wq_num,
+ sizeof(wq->wq_num));
+ return ret;
+
+err:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_WQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_HANDLE,
+ UVERBS_OBJECT_WQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_WQ_TYPE,
+ enum ib_wq_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_WQ_FLAGS,
+ enum ib_uverbs_wq_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_WQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_WQ_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_WQ_HANDLE);
+ struct ib_uwq_object *obj =
+ container_of(uobj, struct ib_uwq_object, uevent.uobject);
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_WQ_RESP,
+ &obj->uevent.events_reported,
+ sizeof(obj->uevent.events_reported));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_WQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_WQ_HANDLE,
+ UVERBS_OBJECT_WQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_WQ_RESP,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_WQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq),
+ &UVERBS_METHOD(UVERBS_METHOD_WQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_WQ_DESTROY)
+);
+
+const struct uapi_definition uverbs_def_obj_wq[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 3f121ac31e0a..5addc8fae3f3 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -634,6 +634,9 @@ static const struct uapi_definition uverbs_core_api[] = {
UAPI_DEF_CHAIN(uverbs_def_obj_flow_action),
UAPI_DEF_CHAIN(uverbs_def_obj_intf),
UAPI_DEF_CHAIN(uverbs_def_obj_mr),
+ UAPI_DEF_CHAIN(uverbs_def_obj_qp),
+ UAPI_DEF_CHAIN(uverbs_def_obj_srq),
+ UAPI_DEF_CHAIN(uverbs_def_obj_wq),
UAPI_DEF_CHAIN(uverbs_def_write_intf),
{},
};
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 56a71337112c..53d6505c0c7b 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -50,6 +50,7 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
#include <rdma/rw.h>
+#include <rdma/lag.h>
#include "core_priv.h"
#include <trace/events/rdma_core.h>
@@ -500,8 +501,10 @@ rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
u32 flags,
- struct ib_udata *udata)
+ struct ib_udata *udata,
+ struct net_device *xmit_slave)
{
+ struct rdma_ah_init_attr init_attr = {};
struct ib_device *device = pd->device;
struct ib_ah *ah;
int ret;
@@ -521,8 +524,11 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
ah->pd = pd;
ah->type = ah_attr->type;
ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
+ init_attr.ah_attr = ah_attr;
+ init_attr.flags = flags;
+ init_attr.xmit_slave = xmit_slave;
- ret = device->ops.create_ah(ah, ah_attr, flags, udata);
+ ret = device->ops.create_ah(ah, &init_attr, udata);
if (ret) {
kfree(ah);
return ERR_PTR(ret);
@@ -547,15 +553,22 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
u32 flags)
{
const struct ib_gid_attr *old_sgid_attr;
+ struct net_device *slave;
struct ib_ah *ah;
int ret;
ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
if (ret)
return ERR_PTR(ret);
-
- ah = _rdma_create_ah(pd, ah_attr, flags, NULL);
-
+ slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
+ (flags & RDMA_CREATE_AH_SLEEPABLE) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (IS_ERR(slave)) {
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return (void *)slave;
+ }
+ ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
+ rdma_lag_put_ah_roce_slave(slave);
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
return ah;
}
@@ -594,7 +607,8 @@ struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
}
}
- ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, udata);
+ ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
+ udata, NULL);
out:
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
@@ -967,15 +981,29 @@ EXPORT_SYMBOL(rdma_destroy_ah_user);
/* Shared receive queues */
-struct ib_srq *ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr)
+/**
+ * ib_create_srq_user - Creates a SRQ associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the SRQ.
+ * @srq_init_attr: A list of initial attributes required to create the
+ * SRQ. If SRQ creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created SRQ.
+ * @uobject - uobject pointer if this is not a kernel SRQ
+ * @udata - udata pointer if this is not a kernel SRQ
+ *
+ * srq_attr->max_wr and srq_attr->max_sge are read the determine the
+ * requested size of the SRQ, and set to the actual values allocated
+ * on return. If ib_create_srq() succeeds, then max_wr and max_sge
+ * will always be at least as large as the requested values.
+ */
+struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_usrq_object *uobject,
+ struct ib_udata *udata)
{
struct ib_srq *srq;
int ret;
- if (!pd->device->ops.create_srq)
- return ERR_PTR(-EOPNOTSUPP);
-
srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
if (!srq)
return ERR_PTR(-ENOMEM);
@@ -985,6 +1013,7 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
srq->event_handler = srq_init_attr->event_handler;
srq->srq_context = srq_init_attr->srq_context;
srq->srq_type = srq_init_attr->srq_type;
+ srq->uobject = uobject;
if (ib_srq_has_cq(srq->srq_type)) {
srq->ext.cq = srq_init_attr->ext.cq;
@@ -996,7 +1025,7 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
}
atomic_inc(&pd->usecnt);
- ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
+ ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
if (ret) {
atomic_dec(&srq->pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC)
@@ -1009,7 +1038,7 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
return srq;
}
-EXPORT_SYMBOL(ib_create_srq);
+EXPORT_SYMBOL(ib_create_srq_user);
int ib_modify_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr,
@@ -1633,11 +1662,35 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
const struct ib_gid_attr *old_sgid_attr_alt_av;
int ret;
+ attr->xmit_slave = NULL;
if (attr_mask & IB_QP_AV) {
ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
&old_sgid_attr_av);
if (ret)
return ret;
+
+ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
+ struct net_device *slave;
+
+ /*
+ * If the user provided the qp_attr then we have to
+ * resolve it. Kerne users have to provide already
+ * resolved rdma_ah_attr's.
+ */
+ if (udata) {
+ ret = ib_resolve_eth_dmac(qp->device,
+ &attr->ah_attr);
+ if (ret)
+ goto out_av;
+ }
+ slave = rdma_lag_get_ah_roce_slave(qp->device,
+ &attr->ah_attr,
+ GFP_KERNEL);
+ if (IS_ERR(slave))
+ goto out_av;
+ attr->xmit_slave = slave;
+ }
}
if (attr_mask & IB_QP_ALT_PATH) {
/*
@@ -1664,18 +1717,6 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
}
}
- /*
- * If the user provided the qp_attr then we have to resolve it. Kernel
- * users have to provide already resolved rdma_ah_attr's
- */
- if (udata && (attr_mask & IB_QP_AV) &&
- attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
- is_qp_type_connected(qp)) {
- ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
- if (ret)
- goto out;
- }
-
if (rdma_ib_or_roce(qp->device, port)) {
if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
dev_warn(&qp->device->dev,
@@ -1717,8 +1758,10 @@ out:
if (attr_mask & IB_QP_ALT_PATH)
rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
out_av:
- if (attr_mask & IB_QP_AV)
+ if (attr_mask & IB_QP_AV) {
+ rdma_lag_put_ah_roce_slave(attr->xmit_slave);
rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
+ }
return ret;
}
@@ -1962,6 +2005,9 @@ EXPORT_SYMBOL(__ib_create_cq);
int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
+ if (cq->shared)
+ return -EOPNOTSUPP;
+
return cq->device->ops.modify_cq ?
cq->device->ops.modify_cq(cq, cq_count,
cq_period) : -EOPNOTSUPP;
@@ -1970,6 +2016,9 @@ EXPORT_SYMBOL(rdma_set_cq_moderation);
int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
+ if (WARN_ON_ONCE(cq->shared))
+ return -EOPNOTSUPP;
+
if (atomic_read(&cq->usecnt))
return -EBUSY;
@@ -1982,6 +2031,9 @@ EXPORT_SYMBOL(ib_destroy_cq_user);
int ib_resize_cq(struct ib_cq *cq, int cqe)
{
+ if (cq->shared)
+ return -EOPNOTSUPP;
+
return cq->device->ops.resize_cq ?
cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
}
@@ -2160,54 +2212,6 @@ out:
}
EXPORT_SYMBOL(ib_alloc_mr_integrity);
-/* "Fast" memory regions */
-
-struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct ib_fmr *fmr;
-
- if (!pd->device->ops.alloc_fmr)
- return ERR_PTR(-EOPNOTSUPP);
-
- fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
- if (!IS_ERR(fmr)) {
- fmr->device = pd->device;
- fmr->pd = pd;
- atomic_inc(&pd->usecnt);
- }
-
- return fmr;
-}
-EXPORT_SYMBOL(ib_alloc_fmr);
-
-int ib_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *fmr;
-
- if (list_empty(fmr_list))
- return 0;
-
- fmr = list_entry(fmr_list->next, struct ib_fmr, list);
- return fmr->device->ops.unmap_fmr(fmr_list);
-}
-EXPORT_SYMBOL(ib_unmap_fmr);
-
-int ib_dealloc_fmr(struct ib_fmr *fmr)
-{
- struct ib_pd *pd;
- int ret;
-
- pd = fmr->pd;
- ret = fmr->device->ops.dealloc_fmr(fmr);
- if (!ret)
- atomic_dec(&pd->usecnt);
-
- return ret;
-}
-EXPORT_SYMBOL(ib_dealloc_fmr);
-
/* Multicast groups */
static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
@@ -2574,6 +2578,7 @@ EXPORT_SYMBOL(ib_map_mr_sg_pi);
* @page_size: page vector desired page size
*
* Constraints:
+ *
* - The first sg element is allowed to have an offset.
* - Each sg element must either be aligned to page_size or virtually
* contiguous to the previous element. In case an sg element has a
@@ -2607,10 +2612,12 @@ EXPORT_SYMBOL(ib_map_mr_sg);
* @mr: memory region
* @sgl: dma mapped scatterlist
* @sg_nents: number of entries in sg
- * @sg_offset_p: IN: start offset in bytes into sg
- * OUT: offset in bytes for element n of the sg of the first
+ * @sg_offset_p: ==== =======================================================
+ * IN start offset in bytes into sg
+ * OUT offset in bytes for element n of the sg of the first
* byte that has not been processed where n is the return
* value of this function.
+ * ==== =======================================================
* @set_page: driver page assignment function pointer
*
* Core service helper for drivers to convert the largest
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 95f6d493d1b9..8b6ad5cddfce 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -177,9 +177,6 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_total_mcast_qp_attach = 0;
ib_attr->max_ah = dev_attr->max_ah;
- ib_attr->max_fmr = 0;
- ib_attr->max_map_per_fmr = 0;
-
ib_attr->max_srq = dev_attr->max_srq;
ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
ib_attr->max_srq_sge = dev_attr->max_srq_sges;
@@ -631,11 +628,12 @@ static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
return nw_type;
}
-int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct ib_pd *ib_pd = ib_ah->pd;
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
struct bnxt_re_dev *rdev = pd->rdev;
const struct ib_gid_attr *sgid_attr;
@@ -673,7 +671,8 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
- !(flags & RDMA_CREATE_AH_SLEEPABLE));
+ !(init_attr->flags &
+ RDMA_CREATE_AH_SLEEPABLE));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
return rc;
@@ -856,7 +855,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT;
- bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
+ bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
@@ -879,7 +878,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->qp_handle = ureq.qp_handle;
if (!qp->qplib_qp.srq) {
- bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE);
@@ -976,6 +975,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.sig_type = true;
/* Shadow QP SQ depth should be same as QP1 RQ depth */
+ qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size();
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
@@ -986,6 +986,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.scq = qp1_qp->scq;
qp->qplib_qp.rcq = qp1_qp->rcq;
+ qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size();
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
@@ -1021,10 +1022,12 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *rq;
int entries;
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
+ rq = &qplqp->rq;
dev_attr = &rdev->dev_attr;
if (init_attr->srq) {
@@ -1036,23 +1039,21 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
return -EINVAL;
}
qplqp->srq = &srq->qplib_srq;
- qplqp->rq.max_wqe = 0;
+ rq->max_wqe = 0;
} else {
+ rq->wqe_size = bnxt_re_get_rwqe_size();
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty.
*/
entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
- qplqp->rq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
-
- qplqp->rq.q_full_delta = qplqp->rq.max_wqe -
- init_attr->cap.max_recv_wr;
- qplqp->rq.max_sge = init_attr->cap.max_recv_sge;
- if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
- qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->q_full_delta = rq->max_wqe - init_attr->cap.max_recv_wr;
+ rq->max_sge = init_attr->cap.max_recv_sge;
+ if (rq->max_sge > dev_attr->max_qp_sges)
+ rq->max_sge = dev_attr->max_qp_sges;
}
- qplqp->rq.sg_info.pgsize = PAGE_SIZE;
- qplqp->rq.sg_info.pgshft = PAGE_SHIFT;
+ rq->sg_info.pgsize = PAGE_SIZE;
+ rq->sg_info.pgshft = PAGE_SHIFT;
return 0;
}
@@ -1080,15 +1081,18 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *sq;
int entries;
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
+ sq = &qplqp->sq;
dev_attr = &rdev->dev_attr;
- qplqp->sq.max_sge = init_attr->cap.max_send_sge;
- if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
- qplqp->sq.max_sge = dev_attr->max_qp_sges;
+ sq->wqe_size = bnxt_re_get_swqe_size();
+ sq->max_sge = init_attr->cap.max_send_sge;
+ if (sq->max_sge > dev_attr->max_qp_sges)
+ sq->max_sge = dev_attr->max_qp_sges;
/*
* Change the SQ depth if user has requested minimum using
* configfs. Only supported for kernel consumers
@@ -1096,9 +1100,9 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
entries = init_attr->cap.max_send_wr;
/* Allocate 128 + 1 more than what's provided */
entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ sq->q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
/*
* Reserving one slot for Phantom WQE. Application can
* post one extra entry in this case. But allowing this to avoid
@@ -1511,7 +1515,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT;
- bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
IB_ACCESS_LOCAL_WRITE);
@@ -1534,15 +1538,20 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{
- struct ib_pd *ib_pd = ib_srq->pd;
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_srq *srq =
- container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+ struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_nq *nq = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ struct bnxt_re_pd *pd;
+ struct ib_pd *ib_pd;
int rc, entries;
+ ib_pd = ib_srq->pd;
+ pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ rdev = pd->rdev;
+ dev_attr = &rdev->dev_attr;
+ srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
rc = -EINVAL;
@@ -1563,8 +1572,9 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
if (entries > dev_attr->max_srq_wqes + 1)
entries = dev_attr->max_srq_wqes + 1;
-
srq->qplib_srq.max_wqe = entries;
+
+ srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size();
srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 23d972da5652..e5fbbeba6d28 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -122,12 +122,6 @@ struct bnxt_re_frpl {
u64 *page_list;
};
-struct bnxt_re_fmr {
- struct bnxt_re_dev *rdev;
- struct ib_fmr ib_fmr;
- struct bnxt_qplib_mrw qplib_fmr;
-};
-
struct bnxt_re_mw {
struct bnxt_re_dev *rdev;
struct ib_mw ib_mw;
@@ -142,6 +136,16 @@ struct bnxt_re_ucontext {
spinlock_t sh_lock; /* protect shpg */
};
+static inline u16 bnxt_re_get_swqe_size(void)
+{
+ return sizeof(struct sq_send);
+}
+
+static inline u16 bnxt_re_get_rwqe_size(void)
+{
+ return sizeof(struct rq_wqe);
+}
+
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
@@ -160,7 +164,7 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num);
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 899a5d2c100e..c5e29577cd43 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -300,12 +300,12 @@ static void bnxt_qplib_service_nq(unsigned long data)
{
struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
struct bnxt_qplib_hwq *hwq = &nq->hwq;
- struct nq_base *nqe, **nq_ptr;
- struct bnxt_qplib_cq *cq;
- int num_cqne_processed = 0;
int num_srqne_processed = 0;
+ int num_cqne_processed = 0;
+ struct bnxt_qplib_cq *cq;
int budget = nq->budget;
u32 sw_cons, raw_cons;
+ struct nq_base *nqe;
uintptr_t q_handle;
u16 type;
@@ -314,8 +314,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
raw_cons = hwq->cons;
while (budget--) {
sw_cons = HWQ_CMP(raw_cons, hwq);
- nq_ptr = (struct nq_base **)hwq->pbl_ptr;
- nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
+ nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
break;
@@ -392,13 +391,11 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_nq *nq = dev_instance;
struct bnxt_qplib_hwq *hwq = &nq->hwq;
- struct nq_base **nq_ptr;
u32 sw_cons;
/* Prefetch the NQ element */
sw_cons = HWQ_CMP(hwq->cons, hwq);
- nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
- prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
+ prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
/* Fan out to CPU affinitized kthreads? */
tasklet_schedule(&nq->nq_tasklet);
@@ -612,12 +609,13 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct cmdq_create_srq req;
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
+ u16 pg_sz_lvl;
int rc, idx;
hwq_attr.res = res;
hwq_attr.sginfo = &srq->sg_info;
hwq_attr.depth = srq->max_wqe;
- hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.stride = srq->wqe_size;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
@@ -638,22 +636,11 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
pbl = &srq->hwq.pbl[PBL_LVL_0];
- req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
- CMDQ_CREATE_SRQ_LVL_MASK) <<
- CMDQ_CREATE_SRQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
+ pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
+ CMDQ_CREATE_SRQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
+ CMDQ_CREATE_SRQ_LVL_SFT;
+ req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
req.pd_id = cpu_to_le32(srq->pd->id);
req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
@@ -740,7 +727,7 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_swqe *wqe)
{
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- struct rq_wqe *srqe, **srqe_ptr;
+ struct rq_wqe *srqe;
struct sq_sge *hw_sge;
u32 sw_prod, sw_cons, count = 0;
int i, rc = 0, next;
@@ -758,9 +745,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
spin_unlock(&srq_hwq->lock);
sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
- srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
- srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
- memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
+ memset(srqe, 0, srq->wqe_size);
/* Calculate wqe_size16 and data_len */
for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
i < wqe->num_sge; i++, hw_sge++) {
@@ -809,6 +795,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
u32 qp_flags = 0;
+ u8 pg_sz_lvl;
int rc;
RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
@@ -822,7 +809,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.depth = sq->max_wqe;
- hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.stride = sq->wqe_size;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
@@ -835,33 +822,18 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.sq_pg_size_sq_lvl =
- ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
- << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
if (qp->scq)
req.scq_cid = cpu_to_le32(qp->scq->id);
-
- qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
-
/* RQ */
if (rq->max_wqe) {
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
- hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.stride = rq->wqe_size;
hwq_attr.depth = qp->rq.max_wqe;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
@@ -876,32 +848,20 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
pbl = &rq->hwq.pbl[PBL_LVL_0];
req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.rq_pg_size_rq_lvl =
- ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
- CMDQ_CREATE_QP1_RQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
if (qp->rcq)
req.rcq_cid = cpu_to_le32(qp->rcq->id);
}
-
/* Header buffer - allow hdr_buf pass in */
rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
if (rc) {
rc = -ENOMEM;
goto fail;
}
+ qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
req.qp_flags = cpu_to_le32(qp_flags);
req.sq_size = cpu_to_le32(sq->hwq.max_elements);
req.rq_size = cpu_to_le32(rq->hwq.max_elements);
@@ -948,23 +908,47 @@ exit:
return rc;
}
+static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
+{
+ struct bnxt_qplib_hwq *hwq;
+ struct bnxt_qplib_q *sq;
+ u64 fpsne, psne, psn_pg;
+ u16 indx_pad = 0, indx;
+ u16 pg_num, pg_indx;
+ u64 *page;
+
+ sq = &qp->sq;
+ hwq = &sq->hwq;
+
+ fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->max_elements, &psn_pg);
+ if (!IS_ALIGNED(fpsne, PAGE_SIZE))
+ indx_pad = ALIGN(fpsne, PAGE_SIZE) / size;
+
+ page = (u64 *)psn_pg;
+ for (indx = 0; indx < hwq->max_elements; indx++) {
+ pg_num = (indx + indx_pad) / (PAGE_SIZE / size);
+ pg_indx = (indx + indx_pad) % (PAGE_SIZE / size);
+ psne = page[pg_num] + pg_indx * size;
+ sq->swq[indx].psn_ext = (struct sq_psn_search_ext *)psne;
+ sq->swq[indx].psn_search = (struct sq_psn_search *)psne;
+ }
+}
+
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
- unsigned long int psn_search, poff = 0;
struct bnxt_qplib_sg_info sginfo = {};
- struct sq_psn_search **psn_search_ptr;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
- int i, rc, req_size, psn_sz = 0;
- struct sq_send **hw_sq_send_ptr;
struct creq_create_qp_resp resp;
+ int rc, req_size, psn_sz = 0;
struct bnxt_qplib_hwq *xrrq;
u16 cmd_flags = 0, max_ssge;
- struct cmdq_create_qp req;
struct bnxt_qplib_pbl *pbl;
+ struct cmdq_create_qp req;
u32 qp_flags = 0;
+ u8 pg_sz_lvl;
u16 max_rsge;
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
@@ -983,7 +967,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
- hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.stride = sq->wqe_size;
hwq_attr.depth = sq->max_wqe;
hwq_attr.aux_stride = psn_sz;
hwq_attr.aux_depth = hwq_attr.depth;
@@ -997,64 +981,25 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rc = -ENOMEM;
goto fail_sq;
}
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- if (psn_sz) {
- psn_search_ptr = (struct sq_psn_search **)
- &hw_sq_send_ptr[get_sqe_pg
- (sq->hwq.max_elements)];
- psn_search = (unsigned long int)
- &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
- [get_sqe_idx(sq->hwq.max_elements)];
- if (psn_search & ~PAGE_MASK) {
- /* If the psn_search does not start on a page boundary,
- * then calculate the offset
- */
- poff = (psn_search & ~PAGE_MASK) /
- BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
- }
- for (i = 0; i < sq->hwq.max_elements; i++) {
- sq->swq[i].psn_search =
- &psn_search_ptr[get_psne_pg(i + poff)]
- [get_psne_idx(i + poff)];
- /*psns_ext will be used only for P5 chips. */
- sq->swq[i].psn_ext =
- (struct sq_psn_search_ext *)
- &psn_search_ptr[get_psne_pg(i + poff)]
- [get_psne_idx(i + poff)];
- }
- }
+
+ if (psn_sz)
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
+
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.sq_pg_size_sq_lvl =
- ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
- << CMDQ_CREATE_QP_SQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
if (qp->scq)
req.scq_cid = cpu_to_le32(qp->scq->id);
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
- if (qp->sig_type)
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
-
/* RQ */
if (rq->max_wqe) {
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
- hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.stride = rq->wqe_size;
hwq_attr.depth = rq->max_wqe;
hwq_attr.aux_stride = 0;
hwq_attr.aux_depth = 0;
@@ -1071,22 +1016,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
pbl = &rq->hwq.pbl[PBL_LVL_0];
req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.rq_pg_size_rq_lvl =
- ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
- CMDQ_CREATE_QP_RQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
} else {
/* SRQ */
if (qp->srq) {
@@ -1097,7 +1030,13 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (qp->rcq)
req.rcq_cid = cpu_to_le32(qp->rcq->id);
+
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
+ if (qp->sig_type)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
req.qp_flags = cpu_to_le32(qp_flags);
+
req.sq_size = cpu_to_le32(sq->hwq.max_elements);
req.rq_size = cpu_to_le32(rq->hwq.max_elements);
qp->sq_hdr_buf = NULL;
@@ -1483,12 +1422,11 @@ bail:
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
{
struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
- struct cq_base *hw_cqe, **hw_cqe_ptr;
+ struct cq_base *hw_cqe;
int i;
for (i = 0; i < cq_hwq->max_elements; i++) {
- hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
+ hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
continue;
/*
@@ -1615,6 +1553,34 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
return NULL;
}
+static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+{
+ struct sq_psn_search_ext *psns_ext;
+ struct sq_psn_search *psns;
+ u32 flg_npsn;
+ u32 op_spsn;
+
+ psns = swq->psn_search;
+ psns_ext = swq->psn_ext;
+
+ op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
+ SQ_PSN_SEARCH_START_PSN_MASK);
+ op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
+ SQ_PSN_SEARCH_OPCODE_MASK);
+ flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_PSN_SEARCH_NEXT_PSN_MASK);
+
+ if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
+ psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
+ } else {
+ psns->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns->flags_next_psn = cpu_to_le32(flg_npsn);
+ }
+}
+
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *sq = &qp->sq;
@@ -1625,16 +1591,16 @@ void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe)
{
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ int i, rc = 0, data_len = 0, pkt_num = 0;
struct bnxt_qplib_q *sq = &qp->sq;
+ struct sq_send *hw_sq_send_hdr;
struct bnxt_qplib_swq *swq;
- struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
- struct sq_sge *hw_sge;
- struct bnxt_qplib_nq_work *nq_work = NULL;
bool sch_handler = false;
- u32 sw_prod;
+ struct sq_sge *hw_sge;
u8 wqe_size16;
- int i, rc = 0, data_len = 0, pkt_num = 0;
__le32 temp32;
+ u32 sw_prod;
if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
@@ -1663,11 +1629,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
swq->start_psn = sq->psn & BTH_PSN_MASK;
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
- [get_sqe_idx(sw_prod)];
-
- memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
+ hw_sq_send_hdr = bnxt_qplib_get_qe(&sq->hwq, sw_prod, NULL);
+ memset(hw_sq_send_hdr, 0, sq->wqe_size);
if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
/* Copy the inline data */
@@ -1854,28 +1817,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
goto done;
}
swq->next_psn = sq->psn & BTH_PSN_MASK;
- if (swq->psn_search) {
- u32 opcd_spsn;
- u32 flg_npsn;
-
- opcd_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
- SQ_PSN_SEARCH_START_PSN_MASK);
- opcd_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
- SQ_PSN_SEARCH_OPCODE_MASK);
- flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
- SQ_PSN_SEARCH_NEXT_PSN_MASK);
- if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
- swq->psn_ext->opcode_start_psn =
- cpu_to_le32(opcd_spsn);
- swq->psn_ext->flags_next_psn =
- cpu_to_le32(flg_npsn);
- } else {
- swq->psn_search->opcode_start_psn =
- cpu_to_le32(opcd_spsn);
- swq->psn_search->flags_next_psn =
- cpu_to_le32(flg_npsn);
- }
- }
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RC)
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
queue_err:
if (sch_handler) {
/* Store the ULP info in the software structures */
@@ -1918,13 +1861,13 @@ void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe)
{
- struct bnxt_qplib_q *rq = &qp->rq;
- struct rq_wqe *rqe, **rqe_ptr;
- struct sq_sge *hw_sge;
struct bnxt_qplib_nq_work *nq_work = NULL;
+ struct bnxt_qplib_q *rq = &qp->rq;
bool sch_handler = false;
- u32 sw_prod;
+ struct sq_sge *hw_sge;
+ struct rq_wqe *rqe;
int i, rc = 0;
+ u32 sw_prod;
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
sch_handler = true;
@@ -1941,10 +1884,8 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
rq->swq[sw_prod].wr_id = wqe->wr_id;
- rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
- rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
-
- memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ rqe = bnxt_qplib_get_qe(&rq->hwq, sw_prod, NULL);
+ memset(rqe, 0, rq->wqe_size);
/* Calculate wqe_size16 and data_len */
for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
@@ -1997,9 +1938,10 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct creq_create_cq_resp resp;
- struct cmdq_create_cq req;
struct bnxt_qplib_pbl *pbl;
+ struct cmdq_create_cq req;
u16 cmd_flags = 0;
+ u32 pg_sz_lvl;
int rc;
hwq_attr.res = res;
@@ -2020,22 +1962,13 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
}
req.dpi = cpu_to_le32(cq->dpi->dpi);
req.cq_handle = cpu_to_le64(cq->cq_handle);
-
req.cq_size = cpu_to_le32(cq->hwq.max_elements);
pbl = &cq->hwq.pbl[PBL_LVL_0];
- req.pg_size_lvl = cpu_to_le32(
- ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
- CMDQ_CREATE_CQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
-
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
+ CMDQ_CREATE_CQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
+ req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
-
req.cq_fco_cnq_id = cpu_to_le32(
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
CMDQ_CREATE_CQ_CNQ_ID_SFT);
@@ -2194,13 +2127,13 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
{
- struct bnxt_qplib_q *sq = &qp->sq;
- struct bnxt_qplib_swq *swq;
u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
- struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
+ struct bnxt_qplib_q *sq = &qp->sq;
struct cq_req *peek_req_hwcqe;
struct bnxt_qplib_qp *peek_qp;
struct bnxt_qplib_q *peek_sq;
+ struct bnxt_qplib_swq *swq;
+ struct cq_base *peek_hwcqe;
int i, rc = 0;
/* Normal mode */
@@ -2230,9 +2163,8 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
i = cq->hwq.max_elements;
while (i--) {
peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
- peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
- [CQE_IDX(peek_sw_cq_cons)];
+ peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
+ peek_sw_cq_cons, NULL);
/* If the next hwcqe is VALID */
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
cq->hwq.max_elements)) {
@@ -2294,11 +2226,11 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe **pcqe, int *budget,
u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
{
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *sq;
- struct bnxt_qplib_cqe *cqe;
u32 sw_sq_cons, cqe_sq_cons;
struct bnxt_qplib_swq *swq;
+ struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *sq;
int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -2408,10 +2340,10 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe **pcqe,
int *budget)
{
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *rq;
struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
u32 wr_id_idx;
int rc = 0;
@@ -2483,10 +2415,10 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe **pcqe,
int *budget)
{
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *rq;
struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
u32 wr_id_idx;
int rc = 0;
@@ -2561,15 +2493,13 @@ done:
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
{
- struct cq_base *hw_cqe, **hw_cqe_ptr;
+ struct cq_base *hw_cqe;
u32 sw_cons, raw_cons;
bool rc = true;
raw_cons = cq->hwq.cons;
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
-
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
/* Check for Valid bit. If the CQE is valid, return false */
rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
return rc;
@@ -2813,7 +2743,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
- struct cq_base *hw_cqe, **hw_cqe_ptr;
+ struct cq_base *hw_cqe;
u32 sw_cons, raw_cons;
int budget, rc = 0;
@@ -2822,8 +2752,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
while (budget) {
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
/* Check for Valid bit */
if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 7edb70b6bb16..568ca390322c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -45,6 +45,7 @@ struct bnxt_qplib_srq {
struct bnxt_qplib_db_info dbinfo;
u64 srq_handle;
u32 id;
+ u16 wqe_size;
u32 max_wqe;
u32 max_sge;
u32 threshold;
@@ -65,38 +66,7 @@ struct bnxt_qplib_sge {
u32 size;
};
-#define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
-
-#define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
-#define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
-
-static inline u32 get_sqe_pg(u32 val)
-{
- return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG);
-}
-
-static inline u32 get_sqe_idx(u32 val)
-{
- return (val & SQE_MAX_IDX_PER_PG);
-}
-
-#define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
-
-#define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
-#define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
-
-static inline u32 get_psne_pg(u32 val)
-{
- return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG);
-}
-
-static inline u32 get_psne_idx(u32 val)
-{
- return (val & PSNE_MAX_IDX_PER_PG);
-}
-
#define BNXT_QPLIB_QP_MAX_SGL 6
-
struct bnxt_qplib_swq {
u64 wr_id;
int next_idx;
@@ -226,19 +196,13 @@ struct bnxt_qplib_swqe {
};
};
-#define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
-
-#define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
-#define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
-#define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
-#define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
-
struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq;
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
+ u16 wqe_size;
u16 q_full_delta;
u16 max_sge;
u32 psn;
@@ -256,7 +220,7 @@ struct bnxt_qplib_qp {
struct bnxt_qplib_dpi *dpi;
struct bnxt_qplib_chip_ctx *cctx;
u64 qp_handle;
-#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
+#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
u32 id;
u8 type;
u8 sig_type;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index f01e864bb611..4e211162acee 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -89,10 +89,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
struct creq_base *resp, void *sb, u8 is_block)
{
struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
- struct bnxt_qplib_cmdqe *cmdqe, **hwq_ptr;
struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
struct bnxt_qplib_crsqe *crsqe;
- u32 cmdq_depth = rcfw->cmdq_depth;
+ struct bnxt_qplib_cmdqe *cmdqe;
u32 sw_prod, cmdq_prod;
struct pci_dev *pdev;
unsigned long flags;
@@ -163,13 +162,11 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
BNXT_QPLIB_CMDQE_UNITS;
}
- hwq_ptr = (struct bnxt_qplib_cmdqe **)hwq->pbl_ptr;
preq = (u8 *)req;
do {
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(hwq->prod, hwq);
- cmdqe = &hwq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)]
- [get_cmdq_idx(sw_prod, cmdq_depth)];
+ cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
if (!cmdqe) {
dev_err(&pdev->dev,
"RCFW request failed with no cmdqe!\n");
@@ -378,7 +375,7 @@ static void bnxt_qplib_service_creq(unsigned long data)
struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct bnxt_qplib_hwq *hwq = &creq->hwq;
- struct creq_base *creqe, **hwq_ptr;
+ struct creq_base *creqe;
u32 sw_cons, raw_cons;
unsigned long flags;
@@ -387,8 +384,7 @@ static void bnxt_qplib_service_creq(unsigned long data)
raw_cons = hwq->cons;
while (budget > 0) {
sw_cons = HWQ_CMP(raw_cons, hwq);
- hwq_ptr = (struct creq_base **)hwq->pbl_ptr;
- creqe = &hwq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
+ creqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
break;
/* The valid test of the entry must be done first before
@@ -434,7 +430,6 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_rcfw *rcfw = dev_instance;
struct bnxt_qplib_creq_ctx *creq;
- struct creq_base **creq_ptr;
struct bnxt_qplib_hwq *hwq;
u32 sw_cons;
@@ -442,8 +437,7 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
hwq = &creq->hwq;
/* Prefetch the CREQ element */
sw_cons = HWQ_CMP(hwq->cons, hwq);
- creq_ptr = (struct creq_base **)creq->hwq.pbl_ptr;
- prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
+ prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
tasklet_schedule(&creq->creq_tasklet);
@@ -468,29 +462,13 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
return 0;
}
-static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
-{
- return (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
-}
-
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn)
{
- struct cmdq_initialize_fw req;
struct creq_initialize_fw_resp resp;
- u16 cmd_flags = 0, level;
+ struct cmdq_initialize_fw req;
+ u16 cmd_flags = 0;
+ u8 pgsz, lvl;
int rc;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
@@ -511,32 +489,30 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
goto config_vf_res;
- level = ctx->qpc_tbl.level;
- req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
- level = ctx->mrw_tbl.level;
- req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
- level = ctx->srqc_tbl.level;
- req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
- level = ctx->cq_tbl.level;
- req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
- level = ctx->srqc_tbl.level;
- req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
- level = ctx->cq_tbl.level;
- req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
- level = ctx->tim_tbl.level;
- req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
- level = ctx->tqm_ctx.pde.level;
- req.tqm_pg_size_tqm_lvl =
- (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->tqm_ctx.pde.pbl[level]);
-
+ lvl = ctx->qpc_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl);
+ req.qpc_pg_size_qpc_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->mrw_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->mrw_tbl);
+ req.mrw_pg_size_mrw_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->srqc_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->srqc_tbl);
+ req.srq_pg_size_srq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->cq_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->cq_tbl);
+ req.cq_pg_size_cq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->tim_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->tim_tbl);
+ req.tim_pg_size_tim_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->tqm_ctx.pde.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->tqm_ctx.pde);
+ req.tqm_pg_size_tqm_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
req.qpc_page_dir =
cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.mrw_page_dir =
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 411fce3493b6..157387636d00 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -87,12 +87,6 @@ static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth)
return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE);
}
-static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
-{
- return (bnxt_qplib_cmdqe_page_size(depth) /
- BNXT_QPLIB_CMDQE_UNITS);
-}
-
/* Set the cmd_size to a factor of CMDQE unit */
static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
{
@@ -100,30 +94,12 @@ static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
BNXT_QPLIB_CMDQE_UNITS;
}
-#define MAX_CMDQ_IDX(depth) ((depth) - 1)
-
-static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
-{
- return (bnxt_qplib_cmdqe_cnt_per_pg(depth) - 1);
-}
-
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000
#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
#define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL
-static inline u32 get_cmdq_pg(u32 val, u32 depth)
-{
- return (val & ~(bnxt_qplib_max_cmdq_idx_per_pg(depth))) /
- (bnxt_qplib_cmdqe_cnt_per_pg(depth));
-}
-
-static inline u32 get_cmdq_idx(u32 val, u32 depth)
-{
- return val & (bnxt_qplib_max_cmdq_idx_per_pg(depth));
-}
-
/* Crsq buf is 1024-Byte */
struct bnxt_qplib_crsbe {
u8 data[1024];
@@ -133,76 +109,9 @@ struct bnxt_qplib_crsbe {
/* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
-#define BNXT_QPLIB_CREQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CREQE_UNITS)
-
-#define MAX_CREQ_IDX (BNXT_QPLIB_CREQE_MAX_CNT - 1)
-#define MAX_CREQ_IDX_PER_PG (BNXT_QPLIB_CREQE_CNT_PER_PG - 1)
-
-static inline u32 get_creq_pg(u32 val)
-{
- return (val & ~MAX_CREQ_IDX_PER_PG) / BNXT_QPLIB_CREQE_CNT_PER_PG;
-}
-
-static inline u32 get_creq_idx(u32 val)
-{
- return val & MAX_CREQ_IDX_PER_PG;
-}
-
-#define BNXT_QPLIB_CREQE_PER_PG (PAGE_SIZE / sizeof(struct creq_base))
-
#define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
(!!((hdr)->v & CREQ_BASE_V) == \
!((raw_cons) & (cp_bit)))
-
-#define CREQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
-#define CREQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
-#define CREQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
-#define CREQ_DB_CP_FLAGS_REARM (CREQ_DB_KEY_CP | \
- CREQ_DB_IDX_VALID)
-#define CREQ_DB_CP_FLAGS (CREQ_DB_KEY_CP | \
- CREQ_DB_IDX_VALID | \
- CREQ_DB_IRQ_DIS)
-
-static inline void bnxt_qplib_ring_creq_db64(void __iomem *db, u32 index,
- u32 xid, bool arm)
-{
- u64 val = 0;
-
- val = xid & DBC_DBC_XID_MASK;
- val |= DBC_DBC_PATH_ROCE;
- val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
- val <<= 32;
- val |= index & DBC_DBC_INDEX_MASK;
-
- writeq(val, db);
-}
-
-static inline void bnxt_qplib_ring_creq_db_rearm(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_creq_db64(db, index, xid, true);
- else
- writel(CREQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK),
- db);
-}
-
-static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_creq_db64(db, index, xid, true);
- else
- writel(CREQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK),
- db);
-}
-
#define CREQ_ENTRY_POLL_BUDGET 0x100
/* HWQ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index cab1adf1fed9..7efa6e5dce62 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -347,6 +347,7 @@ done:
hwq->depth = hwq_attr->depth;
hwq->max_elements = depth;
hwq->element_size = stride;
+ hwq->qe_ppg = pg_size / stride;
/* For direct access to the elements */
lvl = hwq->level;
if (hwq_attr->sginfo->nopte && hwq->level)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 95b645dbbc2d..c29cbd3a2d7b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -80,6 +80,15 @@ enum bnxt_qplib_pbl_lvl {
#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
+enum bnxt_qplib_hwrm_pg_size {
+ BNXT_QPLIB_HWRM_PG_SIZE_4K = 0,
+ BNXT_QPLIB_HWRM_PG_SIZE_8K = 1,
+ BNXT_QPLIB_HWRM_PG_SIZE_64K = 2,
+ BNXT_QPLIB_HWRM_PG_SIZE_2M = 3,
+ BNXT_QPLIB_HWRM_PG_SIZE_8M = 4,
+ BNXT_QPLIB_HWRM_PG_SIZE_1G = 5,
+};
+
struct bnxt_qplib_reg_desc {
u8 bar_id;
resource_size_t bar_base;
@@ -126,6 +135,7 @@ struct bnxt_qplib_hwq {
u32 max_elements;
u32 depth;
u16 element_size; /* Size of each entry */
+ u16 qe_ppg; /* queue entry per page */
u32 prod; /* raw */
u32 cons; /* raw */
@@ -263,6 +273,49 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
}
+static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq)
+{
+ u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ struct bnxt_qplib_pbl *pbl;
+
+ pbl = &hwq->pbl[PBL_LVL_0];
+ switch (pbl->pg_size) {
+ case ROCE_PG_SIZE_4K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ break;
+ case ROCE_PG_SIZE_8K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
+ break;
+ case ROCE_PG_SIZE_64K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
+ break;
+ case ROCE_PG_SIZE_2M:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
+ break;
+ case ROCE_PG_SIZE_8M:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
+ break;
+ case ROCE_PG_SIZE_1G:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
+ break;
+ default:
+ break;
+ }
+
+ return pg_size;
+}
+
+static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq,
+ u32 indx, u64 *pg)
+{
+ u32 pg_num, pg_idx;
+
+ pg_num = (indx / hwq->qe_ppg);
+ pg_idx = (indx % hwq->qe_ppg);
+ if (pg)
+ *pg = (u64)&hwq->pbl_ptr[pg_num];
+ return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
+}
#define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 66954ff6a2f2..4cd475ea97a2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -132,9 +132,6 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
attr->max_ah = le32_to_cpu(sb->max_ah);
- attr->max_fmr = le32_to_cpu(sb->max_fmr);
- attr->max_map_per_fmr = sb->max_map_per_fmr;
-
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 13d9432d5ce2..6404f0da1051 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -64,8 +64,6 @@ struct bnxt_qplib_dev_attr {
u32 max_mw;
u32 max_raw_ethy_qp;
u32 max_ah;
- u32 max_fmr;
- u32 max_map_per_fmr;
u32 max_srq;
u32 max_srq_wqes;
u32 max_srq_sges;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index e4b09e7c2175..6f00f07420b7 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -210,6 +210,20 @@ struct sq_send {
__le32 data[24];
};
+/* sq_send_hdr (size:256b/32B) */
+struct sq_send_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8_1;
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 q_key;
+ __le32 dst_qp;
+ __le32 avid;
+ __le64 reserved64;
+};
+
/* Send Raw Ethernet and QP1 SQ WQE (40 bytes) */
struct sq_send_raweth_qp1 {
u8 wqe_type;
@@ -265,6 +279,21 @@ struct sq_send_raweth_qp1 {
__le32 data[24];
};
+/* sq_send_raweth_qp1_hdr (size:256b/32B) */
+struct sq_send_raweth_qp1_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le16 lflags;
+ __le16 cfa_action;
+ __le32 length;
+ __le32 reserved32_1;
+ __le32 cfa_meta;
+ __le32 reserved32_2;
+ __le64 reserved64;
+};
+
/* RDMA SQ WQE (40 bytes) */
struct sq_rdma {
u8 wqe_type;
@@ -288,6 +317,20 @@ struct sq_rdma {
__le32 data[24];
};
+/* sq_rdma_hdr (size:256b/32B) */
+struct sq_rdma_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 imm_data;
+ __le32 length;
+ __le32 reserved32_1;
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 reserved32_2;
+};
+
/* Atomic SQ WQE (40 bytes) */
struct sq_atomic {
u8 wqe_type;
@@ -307,6 +350,17 @@ struct sq_atomic {
__le32 data[24];
};
+/* sq_atomic_hdr (size:256b/32B) */
+struct sq_atomic_hdr {
+ u8 wqe_type;
+ u8 flags;
+ __le16 reserved16;
+ __le32 remote_key;
+ __le64 remote_va;
+ __le64 swap_data;
+ __le64 cmp_data;
+};
+
/* Local Invalidate SQ WQE (40 bytes) */
struct sq_localinvalidate {
u8 wqe_type;
@@ -324,6 +378,16 @@ struct sq_localinvalidate {
__le32 data[24];
};
+/* sq_localinvalidate_hdr (size:256b/32B) */
+struct sq_localinvalidate_hdr {
+ u8 wqe_type;
+ u8 flags;
+ __le16 reserved16;
+ __le32 inv_l_key;
+ __le64 reserved64;
+ u8 reserved128[16];
+};
+
/* FR-PMR SQ WQE (40 bytes) */
struct sq_fr_pmr {
u8 wqe_type;
@@ -380,6 +444,21 @@ struct sq_fr_pmr {
__le32 data[24];
};
+/* sq_fr_pmr_hdr (size:256b/32B) */
+struct sq_fr_pmr_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 access_cntl;
+ u8 zero_based_page_size_log;
+ __le32 l_key;
+ u8 length[5];
+ u8 reserved8_1;
+ u8 reserved8_2;
+ u8 numlevels_pbl_page_size_log;
+ __le64 pblptr;
+ __le64 va;
+};
+
/* Bind SQ WQE (40 bytes) */
struct sq_bind {
u8 wqe_type;
@@ -417,6 +496,22 @@ struct sq_bind {
#define SQ_BIND_DATA_SFT 0
};
+/* sq_bind_hdr (size:256b/32B) */
+struct sq_bind_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 access_cntl;
+ u8 reserved8_1;
+ u8 mw_type_zero_based;
+ u8 reserved8_2;
+ __le16 reserved16;
+ __le32 parent_l_key;
+ __le32 l_key;
+ __le64 va;
+ u8 length[5];
+ u8 reserved24[3];
+};
+
/* RQ/SRQ WQE Structures */
/* RQ/SRQ WQE (40 bytes) */
struct rq_wqe {
@@ -435,6 +530,17 @@ struct rq_wqe {
__le32 data[24];
};
+/* rq_wqe_hdr (size:256b/32B) */
+struct rq_wqe_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 reserved32;
+ __le32 wr_id[2];
+ u8 reserved128[16];
+};
+
/* CQ CQE Structures */
/* Base CQE (32 bytes) */
struct cq_base {
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index d69dece3b1d5..30e08bcc9afb 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
srqidx = ABORT_RSS_SRQIDX_G(
be32_to_cpu(req->srqidx_status));
if (srqidx) {
- complete_cached_srq_buffers(ep,
- req->srqidx_status);
+ complete_cached_srq_buffers(ep, srqidx);
} else {
/* Hold ep ref until finish_peer_abort() */
c4iw_get_ep(&ep->com);
@@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
- ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
- TCB_RQ_START_S);
+ ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
+ TCB_RQ_START_S);
cleanup:
pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 599340c1f0b8..541dbcf22d0e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -953,6 +953,7 @@ void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx)
{
pr_debug("c4iw_dev %p\n", ctx->dev);
+ debugfs_remove_recursive(ctx->dev->debugfs_root);
c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx);
}
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index aa7396a1588a..1889dd172a25 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_H_
@@ -40,6 +40,7 @@ struct efa_sw_stats {
atomic64_t reg_mr_err;
atomic64_t alloc_ucontext_err;
atomic64_t create_ah_err;
+ atomic64_t mmap_err;
};
/* Don't use anything other than atomic64 */
@@ -153,8 +154,7 @@ int efa_mmap(struct ib_ucontext *ibucontext,
struct vm_area_struct *vma);
void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int efa_create_ah(struct ib_ah *ibah,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
+ struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void efa_destroy_ah(struct ib_ah *ibah, u32 flags);
int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 96b104ab5415..bef2bd291054 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -37,7 +37,7 @@ enum efa_admin_aq_feature_id {
EFA_ADMIN_NETWORK_ATTR = 3,
EFA_ADMIN_QUEUE_ATTR = 4,
EFA_ADMIN_HW_HINTS = 5,
- EFA_ADMIN_FEATURES_OPCODE_NUM = 8,
+ EFA_ADMIN_HOST_INFO = 6,
};
/* QP transport type */
@@ -799,6 +799,54 @@ struct efa_admin_mmio_req_read_less_resp {
u32 reg_val;
};
+enum efa_admin_os_type {
+ EFA_ADMIN_OS_LINUX = 0,
+};
+
+struct efa_admin_host_info {
+ /* OS distribution string format */
+ u8 os_dist_str[128];
+
+ /* Defined in enum efa_admin_os_type */
+ u32 os_type;
+
+ /* Kernel version string format */
+ u8 kernel_ver_str[32];
+
+ /* Kernel version numeric format */
+ u32 kernel_ver;
+
+ /*
+ * 7:0 : driver_module_type
+ * 15:8 : driver_sub_minor
+ * 23:16 : driver_minor
+ * 31:24 : driver_major
+ */
+ u32 driver_ver;
+
+ /*
+ * Device's Bus, Device and Function
+ * 2:0 : function
+ * 7:3 : device
+ * 15:8 : bus
+ */
+ u16 bdf;
+
+ /*
+ * Spec version
+ * 7:0 : spec_minor
+ * 15:8 : spec_major
+ */
+ u16 spec_ver;
+
+ /*
+ * 0 : intree - Intree driver
+ * 1 : gdr - GPUDirect RDMA supported
+ * 31:2 : reserved2
+ */
+ u32 flags;
+};
+
/* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
@@ -820,4 +868,17 @@ struct efa_admin_mmio_req_read_less_resp {
/* feature_device_attr_desc */
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
+/* host_info */
+#define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK GENMASK(7, 0)
+#define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK GENMASK(23, 16)
+#define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK GENMASK(31, 24)
+#define EFA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
+#define EFA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
+#define EFA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK GENMASK(7, 0)
+#define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_INTREE_MASK BIT(0)
+#define EFA_ADMIN_HOST_INFO_GDR_MASK BIT(1)
+
#endif /* _EFA_ADMIN_CMDS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 7fce69f5568f..336bc2c57bb1 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -631,17 +631,20 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
up(&aq->avail_cmds);
+ atomic64_inc(&aq->stats.cmd_err);
return PTR_ERR(comp_ctx);
}
err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
- if (err)
+ if (err) {
ibdev_err_ratelimited(
aq->efa_dev,
"Failed to process command %s (opcode %u) comp_status %d err %d\n",
efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
cmd->aq_common_descriptor.opcode, comp_ctx->comp_status,
err);
+ atomic64_inc(&aq->stats.cmd_err);
+ }
up(&aq->avail_cmds);
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
index c67dd8109d1c..5e4c88877ddb 100644
--- a/drivers/infiniband/hw/efa/efa_com.h
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
@@ -47,6 +47,7 @@ struct efa_com_admin_sq {
struct efa_com_stats_admin {
atomic64_t submitted_cmd;
atomic64_t completed_cmd;
+ atomic64_t cmd_err;
atomic64_t no_completion;
};
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index eea5574a62e8..fabd8df2e78f 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -351,7 +351,7 @@ int efa_com_destroy_ah(struct efa_com_dev *edev,
return 0;
}
-static bool
+bool
efa_com_check_supported_feature_id(struct efa_com_dev *edev,
enum efa_admin_aq_feature_id feature_id)
{
@@ -388,7 +388,7 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
if (control_buff_size)
EFA_SET(&get_cmd.aq_common_descriptor.flags,
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&get_cmd.control_buffer.address.mem_addr_high,
@@ -517,12 +517,12 @@ int efa_com_get_hw_hints(struct efa_com_dev *edev,
return 0;
}
-static int efa_com_set_feature_ex(struct efa_com_dev *edev,
- struct efa_admin_set_feature_resp *set_resp,
- struct efa_admin_set_feature_cmd *set_cmd,
- enum efa_admin_aq_feature_id feature_id,
- dma_addr_t control_buf_dma_addr,
- u32 control_buff_size)
+int efa_com_set_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
{
struct efa_com_admin_queue *aq;
int err;
@@ -540,7 +540,7 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
if (control_buff_size) {
set_cmd->aq_common_descriptor.flags = 0;
EFA_SET(&set_cmd->aq_common_descriptor.flags,
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&set_cmd->control_buffer.address.mem_addr_high,
&set_cmd->control_buffer.address.mem_addr_low);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 31db5a0cbd5b..41ce4a476ee6 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -270,6 +270,15 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result);
int efa_com_get_hw_hints(struct efa_com_dev *edev,
struct efa_com_get_hw_hints_result *result);
+bool
+efa_com_check_supported_feature_id(struct efa_com_dev *edev,
+ enum efa_admin_aq_feature_id feature_id);
+int efa_com_set_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size);
int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups);
int efa_com_alloc_pd(struct efa_com_dev *edev,
struct efa_com_alloc_pd_result *result);
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index faf3ff1bca2a..82145574c928 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
#include <rdma/ib_user_verbs.h>
@@ -187,6 +189,52 @@ static void efa_stats_init(struct efa_dev *dev)
atomic64_set(s, 0);
}
+static void efa_set_host_info(struct efa_dev *dev)
+{
+ struct efa_admin_set_feature_resp resp = {};
+ struct efa_admin_set_feature_cmd cmd = {};
+ struct efa_admin_host_info *hinf;
+ u32 bufsz = sizeof(*hinf);
+ dma_addr_t hinf_dma;
+
+ if (!efa_com_check_supported_feature_id(&dev->edev,
+ EFA_ADMIN_HOST_INFO))
+ return;
+
+ /* Failures in host info set shall not disturb probe */
+ hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
+ GFP_KERNEL);
+ if (!hinf)
+ return;
+
+ strlcpy(hinf->os_dist_str, utsname()->release,
+ min(sizeof(hinf->os_dist_str), sizeof(utsname()->release)));
+ hinf->os_type = EFA_ADMIN_OS_LINUX;
+ strlcpy(hinf->kernel_ver_str, utsname()->version,
+ min(sizeof(hinf->kernel_ver_str), sizeof(utsname()->version)));
+ hinf->kernel_ver = LINUX_VERSION_CODE;
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
+ PCI_SLOT(dev->pdev->devfn));
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
+ PCI_FUNC(dev->pdev->devfn));
+ EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
+ EFA_COMMON_SPEC_VERSION_MAJOR);
+ EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
+ EFA_COMMON_SPEC_VERSION_MINOR);
+ EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
+ EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
+
+ efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
+ hinf_dma, bufsz);
+
+ dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
+}
+
static const struct ib_device_ops efa_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_EFA,
@@ -251,6 +299,8 @@ static int efa_ib_device_add(struct efa_dev *dev)
if (err)
goto err_release_doorbell_bar;
+ efa_set_host_info(dev);
+
dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = 1;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 5c57098a4aee..08313f7c73bc 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -37,13 +37,16 @@ struct efa_user_mmap_entry {
op(EFA_RX_DROPS, "rx_drops") \
op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
op(EFA_COMPLETED_CMDS, "completed_cmds") \
+ op(EFA_CMDS_ERR, "cmds_err") \
op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
op(EFA_CREATE_QP_ERR, "create_qp_err") \
+ op(EFA_CREATE_CQ_ERR, "create_cq_err") \
op(EFA_REG_MR_ERR, "reg_mr_err") \
op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
- op(EFA_CREATE_AH_ERR, "create_ah_err")
+ op(EFA_CREATE_AH_ERR, "create_ah_err") \
+ op(EFA_MMAP_ERR, "mmap_err")
#define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, name) [ename] = name,
@@ -1568,6 +1571,7 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
ibdev_dbg(&dev->ibdev,
"pgoff[%#lx] does not have valid entry\n",
vma->vm_pgoff);
+ atomic64_inc(&dev->stats.sw_stats.mmap_err);
return -EINVAL;
}
entry = to_emmap(rdma_entry);
@@ -1603,12 +1607,14 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
err = -EINVAL;
}
- if (err)
+ if (err) {
ibdev_dbg(
&dev->ibdev,
"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
entry->address, rdma_entry->npages * PAGE_SIZE,
entry->mmap_flag, err);
+ atomic64_inc(&dev->stats.sw_stats.mmap_err);
+ }
rdma_user_mmap_entry_put(rdma_entry);
return err;
@@ -1639,10 +1645,10 @@ static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
}
int efa_create_ah(struct ib_ah *ibah,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
+ struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
struct efa_dev *dev = to_edev(ibah->device);
struct efa_com_create_ah_params params = {};
struct efa_ibv_create_ah_resp resp = {};
@@ -1650,7 +1656,7 @@ int efa_create_ah(struct ib_ah *ibah,
struct efa_ah *ah = to_eah(ibah);
int err;
- if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) {
+ if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
ibdev_dbg(&dev->ibdev,
"Create address handle is not supported in atomic context\n");
err = -EOPNOTSUPP;
@@ -1747,15 +1753,18 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
as = &dev->edev.aq.stats;
stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
+ stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
s = &dev->stats;
stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
+ stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->sw_stats.create_cq_err);
stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
+ stats->value[EFA_MMAP_ERR] = atomic64_read(&s->sw_stats.mmap_err);
return ARRAY_SIZE(efa_stats_names);
}
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 0405d26d0833..2e89ec10efed 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -22,9 +22,13 @@ hfi1-y := \
init.o \
intr.o \
iowait.o \
+ ipoib_main.o \
+ ipoib_rx.o \
+ ipoib_tx.o \
mad.o \
mmu_rb.o \
msix.o \
+ netdev_rx.o \
opfn.o \
pcie.o \
pio.o \
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 1aeea5d65c01..2a91b8d95e12 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -64,6 +64,7 @@ struct hfi1_affinity_node_list node_affinity = {
static const char * const irq_type_names[] = {
"SDMA",
"RCVCTXT",
+ "NETDEVCTXT",
"GENERAL",
"OTHER",
};
@@ -915,6 +916,11 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
set = &entry->rcv_intr;
scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
break;
+ case IRQ_NETDEVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ set = &entry->def_intr;
+ scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
+ break;
default:
dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
return -EINVAL;
@@ -987,6 +993,10 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
if (rcd->ctxt != HFI1_CTRL_CTXT)
set = &entry->rcv_intr;
break;
+ case IRQ_NETDEVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ set = &entry->def_intr;
+ break;
default:
mutex_unlock(&node_affinity.lock);
return;
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 6a7e6ea4e426..f94ed5d7c7a3 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -52,6 +52,7 @@
enum irq_type {
IRQ_SDMA,
IRQ_RCVCTXT,
+ IRQ_NETDEVCTXT,
IRQ_GENERAL,
IRQ_OTHER
};
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e0b1238d31df..15f9c635f292 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -66,10 +66,7 @@
#include "affinity.h"
#include "debugfs.h"
#include "fault.h"
-
-uint kdeth_qp;
-module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
-MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
+#include "netdev.h"
uint num_vls = HFI1_MAX_VLS_SUPPORTED;
module_param(num_vls, uint, S_IRUGO);
@@ -128,13 +125,15 @@ struct flag_table {
/*
* RSM instance allocation
- * 0 - Verbs
- * 1 - User Fecn Handling
- * 2 - Vnic
+ * 0 - User Fecn Handling
+ * 1 - Vnic
+ * 2 - AIP
+ * 3 - Verbs
*/
-#define RSM_INS_VERBS 0
-#define RSM_INS_FECN 1
-#define RSM_INS_VNIC 2
+#define RSM_INS_FECN 0
+#define RSM_INS_VNIC 1
+#define RSM_INS_AIP 2
+#define RSM_INS_VERBS 3
/* Bit offset into the GUID which carries HFI id information */
#define GUID_HFI_INDEX_SHIFT 39
@@ -175,6 +174,25 @@ struct flag_table {
/* QPN[m+n:1] QW 1, OFFSET 1 */
#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
+/* RSM fields for AIP */
+/* LRH.BTH above is reused for this rule */
+
+/* BTH.DESTQP: QW 1, OFFSET 16 for match */
+#define BTH_DESTQP_QW 1ull
+#define BTH_DESTQP_BIT_OFFSET 16ull
+#define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off))
+#define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET)
+#define BTH_DESTQP_MASK 0xFFull
+#define BTH_DESTQP_VALUE 0x81ull
+
+/* DETH.SQPN: QW 1 Offset 56 for select */
+/* We use 8 most significant Soure QPN bits as entropy fpr AIP */
+#define DETH_AIP_SQPN_QW 3ull
+#define DETH_AIP_SQPN_BIT_OFFSET 56ull
+#define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off))
+#define DETH_AIP_SQPN_SELECT_OFFSET \
+ DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET)
+
/* RSM fields for Vnic */
/* L2_TYPE: QW 0, OFFSET 61 - for match */
#define L2_TYPE_QW 0ull
@@ -8463,6 +8481,49 @@ static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
local_irq_restore(flags);
}
+/**
+ * hfi1_netdev_rx_napi - napi poll function to move eoi inline
+ * @napi - pointer to napi object
+ * @budget - netdev budget
+ */
+int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct hfi1_netdev_rxq *rxq = container_of(napi,
+ struct hfi1_netdev_rxq, napi);
+ struct hfi1_ctxtdata *rcd = rxq->rcd;
+ int work_done = 0;
+
+ work_done = rcd->do_interrupt(rcd, budget);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ hfi1_rcd_eoi_intr(rcd);
+ }
+
+ return work_done;
+}
+
+/* Receive packet napi handler for netdevs VNIC and AIP */
+irqreturn_t receive_context_interrupt_napi(int irq, void *data)
+{
+ struct hfi1_ctxtdata *rcd = data;
+
+ receive_interrupt_common(rcd);
+
+ if (likely(rcd->napi)) {
+ if (likely(napi_schedule_prep(rcd->napi)))
+ __napi_schedule_irqoff(rcd->napi);
+ else
+ __hfi1_rcd_eoi_intr(rcd);
+ } else {
+ WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n",
+ rcd->ctxt);
+ __hfi1_rcd_eoi_intr(rcd);
+ }
+
+ return IRQ_HANDLED;
+}
+
/*
* Receive packet IRQ handler. This routine expects to be on its own IRQ.
* This routine will try to handle packets immediately (latency), but if
@@ -13330,13 +13391,12 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
* in array of contexts
* freectxts - number of free user contexts
* num_send_contexts - number of PIO send contexts being used
- * num_vnic_contexts - number of contexts reserved for VNIC
+ * num_netdev_contexts - number of contexts reserved for netdev
*/
static int set_up_context_variables(struct hfi1_devdata *dd)
{
unsigned long num_kernel_contexts;
- u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
- int total_contexts;
+ u16 num_netdev_contexts;
int ret;
unsigned ngroups;
int rmt_count;
@@ -13373,13 +13433,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
num_kernel_contexts = send_contexts - num_vls - 1;
}
- /* Accommodate VNIC contexts if possible */
- if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
- dd_dev_err(dd, "No receive contexts available for VNIC\n");
- num_vnic_contexts = 0;
- }
- total_contexts = num_kernel_contexts + num_vnic_contexts;
-
/*
* User contexts:
* - default to 1 user context per real (non-HT) CPU core if
@@ -13392,28 +13445,32 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
/*
* Adjust the counts given a global max.
*/
- if (total_contexts + n_usr_ctxts > rcv_contexts) {
+ if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) {
dd_dev_err(dd,
- "Reducing # user receive contexts to: %d, from %u\n",
- rcv_contexts - total_contexts,
+ "Reducing # user receive contexts to: %u, from %u\n",
+ (u32)(rcv_contexts - num_kernel_contexts),
n_usr_ctxts);
/* recalculate */
- n_usr_ctxts = rcv_contexts - total_contexts;
+ n_usr_ctxts = rcv_contexts - num_kernel_contexts;
}
+ num_netdev_contexts =
+ hfi1_num_netdev_contexts(dd, rcv_contexts -
+ (num_kernel_contexts + n_usr_ctxts),
+ &node_affinity.real_cpu_mask);
/*
* The RMT entries are currently allocated as shown below:
* 1. QOS (0 to 128 entries);
* 2. FECN (num_kernel_context - 1 + num_user_contexts +
- * num_vnic_contexts);
- * 3. VNIC (num_vnic_contexts).
- * It should be noted that FECN oversubscribe num_vnic_contexts
- * entries of RMT because both VNIC and PSM could allocate any receive
+ * num_netdev_contexts);
+ * 3. netdev (num_netdev_contexts).
+ * It should be noted that FECN oversubscribe num_netdev_contexts
+ * entries of RMT because both netdev and PSM could allocate any receive
* context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
* and PSM FECN must reserve an RMT entry for each possible PSM receive
* context.
*/
- rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
if (HFI1_CAP_IS_KSET(TID_RDMA))
rmt_count += num_kernel_contexts - 1;
if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
@@ -13426,21 +13483,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = user_rmt_reduced;
}
- total_contexts += n_usr_ctxts;
-
- /* the first N are kernel contexts, the rest are user/vnic contexts */
- dd->num_rcv_contexts = total_contexts;
+ /* the first N are kernel contexts, the rest are user/netdev contexts */
+ dd->num_rcv_contexts =
+ num_kernel_contexts + n_usr_ctxts + num_netdev_contexts;
dd->n_krcv_queues = num_kernel_contexts;
dd->first_dyn_alloc_ctxt = num_kernel_contexts;
- dd->num_vnic_contexts = num_vnic_contexts;
+ dd->num_netdev_contexts = num_netdev_contexts;
dd->num_user_contexts = n_usr_ctxts;
dd->freectxts = n_usr_ctxts;
dd_dev_info(dd,
- "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
+ "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n",
rcv_contexts,
(int)dd->num_rcv_contexts,
(int)dd->n_krcv_queues,
- dd->num_vnic_contexts,
+ dd->num_netdev_contexts,
dd->num_user_contexts);
/*
@@ -14119,21 +14175,12 @@ static void init_early_variables(struct hfi1_devdata *dd)
static void init_kdeth_qp(struct hfi1_devdata *dd)
{
- /* user changed the KDETH_QP */
- if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
- /* out of range or illegal value */
- dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
- kdeth_qp = 0;
- }
- if (kdeth_qp == 0) /* not set, or failed range check */
- kdeth_qp = DEFAULT_KDETH_QP;
-
write_csr(dd, SEND_BTH_QP,
- (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
+ (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) <<
SEND_BTH_QP_KDETH_QP_SHIFT);
write_csr(dd, RCV_BTH_QP,
- (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
+ (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) <<
RCV_BTH_QP_KDETH_QP_SHIFT);
}
@@ -14249,6 +14296,12 @@ static void complete_rsm_map_table(struct hfi1_devdata *dd,
}
}
+/* Is a receive side mapping rule */
+static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
+{
+ return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0;
+}
+
/*
* Add a receive side mapping rule.
*/
@@ -14485,77 +14538,138 @@ static void init_fecn_handling(struct hfi1_devdata *dd,
rmt->used += total_cnt;
}
-/* Initialize RSM for VNIC */
-void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
+static inline bool hfi1_is_rmt_full(int start, int spare)
+{
+ return (start + spare) > NUM_MAP_ENTRIES;
+}
+
+static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd)
{
u8 i, j;
u8 ctx_id = 0;
u64 reg;
u32 regoff;
- struct rsm_rule_data rrd;
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ int ctxt_count = hfi1_netdev_ctxt_count(dd);
- if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
- dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
- dd->vnic.rmt_start);
- return;
+ /* We already have contexts mapped in RMT */
+ if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) {
+ dd_dev_info(dd, "Contexts are already mapped in RMT\n");
+ return true;
+ }
+
+ if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) {
+ dd_dev_err(dd, "Not enough RMT entries used = %d\n",
+ rmt_start);
+ return false;
}
- dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
- dd->vnic.rmt_start,
- dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
+ dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n",
+ rmt_start,
+ rmt_start + NUM_NETDEV_MAP_ENTRIES);
/* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
- regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
+ regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8;
reg = read_csr(dd, regoff);
- for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
- /* Update map register with vnic context */
- j = (dd->vnic.rmt_start + i) % 8;
+ for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) {
+ /* Update map register with netdev context */
+ j = (rmt_start + i) % 8;
reg &= ~(0xffllu << (j * 8));
- reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
- /* Wrap up vnic ctx index */
- ctx_id %= dd->vnic.num_ctxt;
+ reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8);
+ /* Wrap up netdev ctx index */
+ ctx_id %= ctxt_count;
/* Write back map register */
- if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
+ if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) {
dev_dbg(&(dd)->pcidev->dev,
- "Vnic rsm map reg[%d] =0x%llx\n",
+ "RMT[%d] =0x%llx\n",
regoff - RCV_RSM_MAP_TABLE, reg);
write_csr(dd, regoff, reg);
regoff += 8;
- if (i < (NUM_VNIC_MAP_ENTRIES - 1))
+ if (i < (NUM_NETDEV_MAP_ENTRIES - 1))
reg = read_csr(dd, regoff);
}
}
- /* Add rule for vnic */
- rrd.offset = dd->vnic.rmt_start;
- rrd.pkt_type = 4;
- /* Match 16B packets */
- rrd.field1_off = L2_TYPE_MATCH_OFFSET;
- rrd.mask1 = L2_TYPE_MASK;
- rrd.value1 = L2_16B_VALUE;
- /* Match ETH L4 packets */
- rrd.field2_off = L4_TYPE_MATCH_OFFSET;
- rrd.mask2 = L4_16B_TYPE_MASK;
- rrd.value2 = L4_16B_ETH_VALUE;
- /* Calc context from veswid and entropy */
- rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
- rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
- rrd.index2_off = L2_16B_ENTROPY_OFFSET;
- rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
- add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
-
- /* Enable RSM if not already enabled */
+ return true;
+}
+
+static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd,
+ int rule, struct rsm_rule_data *rrd)
+{
+ if (!hfi1_netdev_update_rmt(dd)) {
+ dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule);
+ return;
+ }
+
+ add_rsm_rule(dd, rule, rrd);
add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
+void hfi1_init_aip_rsm(struct hfi1_devdata *dd)
+{
+ /*
+ * go through with the initialisation only if this rule actually doesn't
+ * exist yet
+ */
+ if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) {
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ struct rsm_rule_data rrd = {
+ .offset = rmt_start,
+ .pkt_type = IB_PACKET_TYPE,
+ .field1_off = LRH_BTH_MATCH_OFFSET,
+ .mask1 = LRH_BTH_MASK,
+ .value1 = LRH_BTH_VALUE,
+ .field2_off = BTH_DESTQP_MATCH_OFFSET,
+ .mask2 = BTH_DESTQP_MASK,
+ .value2 = BTH_DESTQP_VALUE,
+ .index1_off = DETH_AIP_SQPN_SELECT_OFFSET +
+ ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index2_off = DETH_AIP_SQPN_SELECT_OFFSET,
+ .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
+ };
+
+ hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd);
+ }
+}
+
+/* Initialize RSM for VNIC */
+void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
+{
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ struct rsm_rule_data rrd = {
+ /* Add rule for vnic */
+ .offset = rmt_start,
+ .pkt_type = 4,
+ /* Match 16B packets */
+ .field1_off = L2_TYPE_MATCH_OFFSET,
+ .mask1 = L2_TYPE_MASK,
+ .value1 = L2_16B_VALUE,
+ /* Match ETH L4 packets */
+ .field2_off = L4_TYPE_MATCH_OFFSET,
+ .mask2 = L4_16B_TYPE_MASK,
+ .value2 = L4_16B_ETH_VALUE,
+ /* Calc context from veswid and entropy */
+ .index1_off = L4_16B_HDR_VESWID_OFFSET,
+ .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index2_off = L2_16B_ENTROPY_OFFSET,
+ .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
+ };
+
+ hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd);
+}
+
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
{
clear_rsm_rule(dd, RSM_INS_VNIC);
+}
- /* Disable RSM if used only by vnic */
- if (dd->vnic.rmt_start == 0)
- clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
+void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd)
+{
+ /* only actually clear the rule if it's the last user asking to do so */
+ if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1)
+ clear_rsm_rule(dd, RSM_INS_AIP);
}
static int init_rxe(struct hfi1_devdata *dd)
@@ -14574,8 +14688,8 @@ static int init_rxe(struct hfi1_devdata *dd)
init_qos(dd, rmt);
init_fecn_handling(dd, rmt);
complete_rsm_map_table(dd, rmt);
- /* record number of used rsm map entries for vnic */
- dd->vnic.rmt_start = rmt->used;
+ /* record number of used rsm map entries for netdev */
+ hfi1_netdev_set_free_rmt_idx(dd, rmt->used);
kfree(rmt);
/*
@@ -15129,6 +15243,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
(dd->revision >> CCE_REVISION_SW_SHIFT)
& CCE_REVISION_SW_MASK);
+ /* alloc netdev data */
+ if (hfi1_netdev_alloc(dd))
+ goto bail_cleanup;
+
ret = set_up_context_variables(dd);
if (ret)
goto bail_cleanup;
@@ -15229,6 +15347,7 @@ bail_clear_intr:
hfi1_comp_vectors_clean_up(dd);
msix_clean_up_interrupts(dd);
bail_cleanup:
+ hfi1_netdev_free(dd);
hfi1_pcie_ddcleanup(dd);
bail_free:
hfi1_free_devdata(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 725509261016..2c6f2de74d4d 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -1,7 +1,7 @@
#ifndef _CHIP_H
#define _CHIP_H
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -1447,6 +1447,7 @@ irqreturn_t general_interrupt(int irq, void *data);
irqreturn_t sdma_interrupt(int irq, void *data);
irqreturn_t receive_context_interrupt(int irq, void *data);
irqreturn_t receive_context_thread(int irq, void *data);
+irqreturn_t receive_context_interrupt_napi(int irq, void *data);
int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set);
void init_qsfp_int(struct hfi1_devdata *dd);
@@ -1455,6 +1456,8 @@ void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
void reset_interrupts(struct hfi1_devdata *dd);
u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx);
+void hfi1_init_aip_rsm(struct hfi1_devdata *dd);
+void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd);
/*
* Interrupt source table.
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index 40a1ff0c8a8e..ff423e546b80 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -72,13 +72,6 @@
* compilation unit
*/
-/*
- * If a packet's QP[23:16] bits match this value, then it is
- * a PSM packet and the hardware will expect a KDETH header
- * following the BTH.
- */
-#define DEFAULT_KDETH_QP 0x80
-
/* driver/hw feature set bitmask */
#define HFI1_CAP_USER_SHIFT 24
#define HFI1_CAP_MASK ((1UL << HFI1_CAP_USER_SHIFT) - 1)
@@ -149,7 +142,8 @@
HFI1_CAP_NO_INTEGRITY | \
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_TID_RDMA | \
- HFI1_CAP_OPFN) << \
+ HFI1_CAP_OPFN | \
+ HFI1_CAP_AIP) << \
HFI1_CAP_USER_SHIFT)
/*
* Set of capabilities that need to be enabled for kernel context in
@@ -166,6 +160,7 @@
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_MULTI_PKT_EGR | \
HFI1_CAP_EXTENDED_PSN | \
+ HFI1_CAP_AIP | \
((HFI1_CAP_HDRSUPP | \
HFI1_CAP_MULTI_PKT_EGR | \
HFI1_CAP_STATIC_RATE_CTRL | \
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 049d15befe58..a40701a6e1b6 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2018 Intel Corporation.
+ * Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -54,6 +54,7 @@
#include <linux/module.h>
#include <linux/prefetch.h>
#include <rdma/ib_verbs.h>
+#include <linux/etherdevice.h>
#include "hfi.h"
#include "trace.h"
@@ -63,6 +64,9 @@
#include "vnic.h"
#include "fault.h"
+#include "ipoib.h"
+#include "netdev.h"
+
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -748,6 +752,39 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
return ret;
}
+static void process_rcv_packet_napi(struct hfi1_packet *packet)
+{
+ packet->etype = rhf_rcv_type(packet->rhf);
+
+ /* total length */
+ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+ /* retrieve eager buffer details */
+ packet->etail = rhf_egr_index(packet->rhf);
+ packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
+ &packet->updegr);
+ /*
+ * Prefetch the contents of the eager buffer. It is
+ * OK to send a negative length to prefetch_range().
+ * The +2 is the size of the RHF.
+ */
+ prefetch_range(packet->ebuf,
+ packet->tlen - ((packet->rcd->rcvhdrqentsize -
+ (rhf_hdrq_offset(packet->rhf)
+ + 2)) * 4));
+
+ packet->rcd->rhf_rcv_function_map[packet->etype](packet);
+ packet->numpkt++;
+
+ /* Set up for the next packet */
+ packet->rhqoff += packet->rsize;
+ if (packet->rhqoff >= packet->maxcnt)
+ packet->rhqoff = 0;
+
+ packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
+ packet->rcd->rhf_offset;
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+}
+
static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
{
int ret;
@@ -827,6 +864,36 @@ static inline void finish_packet(struct hfi1_packet *packet)
}
/*
+ * handle_receive_interrupt_napi_fp - receive a packet
+ * @rcd: the context
+ * @budget: polling budget
+ *
+ * Called from interrupt handler for receive interrupt.
+ * This is the fast path interrupt handler
+ * when executing napi soft irq environment.
+ */
+int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget)
+{
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
+ goto bail;
+
+ while (packet.numpkt < budget) {
+ process_rcv_packet_napi(&packet);
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ break;
+
+ process_rcv_update(0, &packet);
+ }
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+bail:
+ finish_packet(&packet);
+ return packet.numpkt;
+}
+
+/*
* Handle receive interrupts when using the no dma rtail option.
*/
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
@@ -1074,6 +1141,63 @@ bail:
}
/*
+ * handle_receive_interrupt_napi_sp - receive a packet
+ * @rcd: the context
+ * @budget: polling budget
+ *
+ * Called from interrupt handler for errors or receive interrupt.
+ * This is the slow path interrupt handler
+ * when executing napi soft irq environment.
+ */
+int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ int last = RCV_PKT_OK;
+ bool needset = true;
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
+ goto bail;
+
+ while (last != RCV_PKT_DONE && packet.numpkt < budget) {
+ if (hfi1_need_drop(dd)) {
+ /* On to the next packet */
+ packet.rhqoff += packet.rsize;
+ packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
+ packet.rhqoff +
+ rcd->rhf_offset;
+ packet.rhf = rhf_to_cpu(packet.rhf_addr);
+
+ } else {
+ if (set_armed_to_active(&packet))
+ goto bail;
+ process_rcv_packet_napi(&packet);
+ }
+
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ last = RCV_PKT_DONE;
+
+ if (needset) {
+ needset = false;
+ set_all_fastpath(dd, rcd);
+ }
+
+ process_rcv_update(last, &packet);
+ }
+
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+
+bail:
+ /*
+ * Always write head at end, and setup rcv interrupt, even
+ * if no packets were processed.
+ */
+ finish_packet(&packet);
+ return packet.numpkt;
+}
+
+/*
* We may discover in the interrupt that the hardware link state has
* changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
* and we need to update the driver's notion of the link state. We cannot
@@ -1550,6 +1674,82 @@ void handle_eflags(struct hfi1_packet *packet)
show_eflags_errs(packet);
}
+static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp;
+ struct net_device *netdev;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct napi_struct *napi = rcd->napi;
+ struct sk_buff *skb;
+ struct hfi1_netdev_rxq *rxq = container_of(napi,
+ struct hfi1_netdev_rxq, napi);
+ u32 extra_bytes;
+ u32 tlen, qpnum;
+ bool do_work, do_cnp;
+ struct hfi1_ipoib_dev_priv *priv;
+
+ trace_hfi1_rcvhdr(packet);
+
+ hfi1_setup_ib_header(packet);
+
+ packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth;
+ packet->grh = NULL;
+
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ handle_eflags(packet);
+ return;
+ }
+
+ qpnum = ib_bth_get_qpn(packet->ohdr);
+ netdev = hfi1_netdev_get_data(rcd->dd, qpnum);
+ if (!netdev)
+ goto drop_no_nd;
+
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+ trace_ctxt_rsm_hist(rcd->ctxt);
+
+ /* handle congestion notifications */
+ do_work = hfi1_may_ecn(packet);
+ if (unlikely(do_work)) {
+ do_cnp = (packet->opcode != IB_OPCODE_CNP);
+ (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp,
+ packet, do_cnp);
+ }
+
+ /*
+ * We have split point after last byte of DETH
+ * lets strip padding and CRC and ICRC.
+ * tlen is whole packet len so we need to
+ * subtract header size as well.
+ */
+ tlen = packet->tlen;
+ extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) +
+ packet->hlen;
+ if (unlikely(tlen < extra_bytes))
+ goto drop;
+
+ tlen -= extra_bytes;
+
+ skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf);
+ if (unlikely(!skb))
+ goto drop;
+
+ priv = hfi1_ipoib_priv(netdev);
+ hfi1_ipoib_update_rx_netstats(priv, 1, skb->len);
+
+ skb->dev = netdev;
+ skb->pkt_type = PACKET_HOST;
+ netif_receive_skb(skb);
+
+ return;
+
+drop:
+ ++netdev->stats.rx_dropped;
+drop_no_nd:
+ ibp = rcd_to_iport(packet->rcd);
+ ++ibp->rvp.n_pkt_drops;
+}
+
/*
* The following functions are called by the interrupt handler. They are type
* specific handlers for each packet type.
@@ -1572,28 +1772,10 @@ static void process_receive_ib(struct hfi1_packet *packet)
hfi1_ib_rcv(packet);
}
-static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
-{
- /* Packet received in VNIC context via RSM */
- if (packet->rcd->is_vnic)
- return true;
-
- if ((hfi1_16B_get_l2(packet->ebuf) == OPA_16B_L2_TYPE) &&
- (hfi1_16B_get_l4(packet->ebuf) == OPA_16B_L4_ETHR))
- return true;
-
- return false;
-}
-
static void process_receive_bypass(struct hfi1_packet *packet)
{
struct hfi1_devdata *dd = packet->rcd->dd;
- if (hfi1_is_vnic_packet(packet)) {
- hfi1_vnic_bypass_rcv(packet);
- return;
- }
-
if (hfi1_setup_bypass_packet(packet))
return;
@@ -1757,3 +1939,14 @@ const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
[RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
[RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
};
+
+const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = {
+ [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid,
+ [RHF_RCV_TYPE_EAGER] = process_receive_invalid,
+ [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv,
+ [RHF_RCV_TYPE_ERROR] = process_receive_error,
+ [RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv,
+ [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
+};
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index e7fdd70c6e78..8ca51e43cf53 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -1264,7 +1264,7 @@ static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
memset(&binfo, 0, sizeof(binfo));
binfo.hw_version = dd->revision;
binfo.sw_version = HFI1_KERN_SWVERSION;
- binfo.bthqp = kdeth_qp;
+ binfo.bthqp = RVT_KDETH_QP_PREFIX;
binfo.jkey = uctxt->jkey;
/*
* If more than 64 contexts are enabled the allocated credit
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index b06c2594105a..b4c6bff60a4e 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H
/*
- * Copyright(c) 2015-2018 Intel Corporation.
+ * Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -233,6 +233,8 @@ struct hfi1_ctxtdata {
intr_handler fast_handler;
/** slow handler */
intr_handler slow_handler;
+ /* napi pointer assiociated with netdev */
+ struct napi_struct *napi;
/* verbs rx_stats per rcd */
struct hfi1_opcode_stats_perctx *opstats;
/* clear interrupt mask */
@@ -383,11 +385,11 @@ struct hfi1_packet {
u32 rhqoff;
u32 dlid;
u32 slid;
+ int numpkt;
u16 tlen;
s16 etail;
u16 pkey;
u8 hlen;
- u8 numpkt;
u8 rsize;
u8 updegr;
u8 etype;
@@ -985,7 +987,7 @@ typedef void (*hfi1_make_req)(struct rvt_qp *qp,
struct hfi1_pkt_state *ps,
struct rvt_swqe *wqe);
extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
-
+extern const rhf_rcv_function_ptr netdev_rhf_rcv_functions[];
/* return values for the RHF receive functions */
#define RHF_RCV_CONTINUE 0 /* keep going */
@@ -1045,23 +1047,10 @@ struct hfi1_asic_data {
#define NUM_MAP_ENTRIES 256
#define NUM_MAP_REGS 32
-/*
- * Number of VNIC contexts used. Ensure it is less than or equal to
- * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
- */
-#define HFI1_NUM_VNIC_CTXT 8
-
-/* Number of VNIC RSM entries */
-#define NUM_VNIC_MAP_ENTRIES 8
-
/* Virtual NIC information */
struct hfi1_vnic_data {
- struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT];
struct kmem_cache *txreq_cache;
- struct xarray vesws;
u8 num_vports;
- u8 rmt_start;
- u8 num_ctxt;
};
struct hfi1_vnic_vport_info;
@@ -1167,8 +1156,8 @@ struct hfi1_devdata {
u64 z_send_schedule;
u64 __percpu *send_schedule;
- /* number of reserved contexts for VNIC usage */
- u16 num_vnic_contexts;
+ /* number of reserved contexts for netdev usage */
+ u16 num_netdev_contexts;
/* number of receive contexts in use by the driver */
u32 num_rcv_contexts;
/* number of pio send contexts in use by the driver */
@@ -1417,12 +1406,12 @@ struct hfi1_devdata {
struct hfi1_vnic_data vnic;
/* Lock to protect IRQ SRC register access */
spinlock_t irq_src_lock;
-};
+ int vnic_num_vports;
+ struct net_device *dummy_netdev;
-static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
-{
- return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES;
-}
+ /* Keeps track of IPoIB RSM rule users */
+ atomic_t ipoib_rsm_usr_num;
+};
/* 8051 firmware version helper */
#define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
@@ -1500,6 +1489,8 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
+int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget);
+int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget);
void set_all_slowpath(struct hfi1_devdata *dd);
extern const struct pci_device_id hfi1_pci_tbl[];
@@ -2250,7 +2241,6 @@ extern int num_user_contexts;
extern unsigned long n_krcvqs;
extern uint krcvqs[];
extern int krcvqsset;
-extern uint kdeth_qp;
extern uint loopback;
extern uint quick_linkup;
extern uint rcv_intr_timeout;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 3759d9233a1c..5eed4360695f 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -69,6 +69,7 @@
#include "affinity.h"
#include "vnic.h"
#include "exp_rcv.h"
+#include "netdev.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -374,6 +375,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
+ rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
mutex_init(&rcd->exp_mutex);
spin_lock_init(&rcd->exp_lock);
@@ -1316,6 +1318,7 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail;
}
+ atomic_set(&dd->ipoib_rsm_usr_num, 0);
return dd;
bail:
@@ -1663,9 +1666,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* do the generic initialization */
initfail = hfi1_init(dd, 0);
- /* setup vnic */
- hfi1_vnic_setup(dd);
-
ret = hfi1_register_ib_device(dd);
/*
@@ -1704,7 +1704,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hfi1_device_remove(dd);
if (!ret)
hfi1_unregister_ib_device(dd);
- hfi1_vnic_cleanup(dd);
postinit_cleanup(dd);
if (initfail)
ret = initfail;
@@ -1749,8 +1748,8 @@ static void remove_one(struct pci_dev *pdev)
/* unregister from IB core */
hfi1_unregister_ib_device(dd);
- /* cleanup vnic */
- hfi1_vnic_cleanup(dd);
+ /* free netdev data */
+ hfi1_netdev_free(dd);
/*
* Disable the IB link, disable interrupts on the device,
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
new file mode 100644
index 000000000000..185c9b02c974
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for IPOIB functionality
+ */
+
+#ifndef HFI1_IPOIB_H
+#define HFI1_IPOIB_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/if_infiniband.h>
+
+#include "hfi.h"
+#include "iowait.h"
+#include "netdev.h"
+
+#include <rdma/ib_verbs.h>
+
+#define HFI1_IPOIB_ENTROPY_SHIFT 24
+
+#define HFI1_IPOIB_TXREQ_NAME_LEN 32
+
+#define HFI1_IPOIB_PSEUDO_LEN 20
+#define HFI1_IPOIB_ENCAP_LEN 4
+
+struct hfi1_ipoib_dev_priv;
+
+union hfi1_ipoib_flow {
+ u16 as_int;
+ struct {
+ u8 tx_queue;
+ u8 sc5;
+ } __attribute__((__packed__));
+};
+
+/**
+ * struct hfi1_ipoib_circ_buf - List of items to be processed
+ * @items: ring of items
+ * @head: ring head
+ * @tail: ring tail
+ * @max_items: max items + 1 that the ring can contain
+ * @producer_lock: producer sync lock
+ * @consumer_lock: consumer sync lock
+ */
+struct hfi1_ipoib_circ_buf {
+ void **items;
+ unsigned long head;
+ unsigned long tail;
+ unsigned long max_items;
+ spinlock_t producer_lock; /* head sync lock */
+ spinlock_t consumer_lock; /* tail sync lock */
+};
+
+/**
+ * struct hfi1_ipoib_txq - IPOIB per Tx queue information
+ * @priv: private pointer
+ * @sde: sdma engine
+ * @tx_list: tx request list
+ * @sent_txreqs: count of txreqs posted to sdma
+ * @flow: tracks when list needs to be flushed for a flow change
+ * @q_idx: ipoib Tx queue index
+ * @pkts_sent: indicator packets have been sent from this queue
+ * @wait: iowait structure
+ * @complete_txreqs: count of txreqs completed by sdma
+ * @napi: pointer to tx napi interface
+ * @tx_ring: ring of ipoib txreqs to be reaped by napi callback
+ */
+struct hfi1_ipoib_txq {
+ struct hfi1_ipoib_dev_priv *priv;
+ struct sdma_engine *sde;
+ struct list_head tx_list;
+ u64 sent_txreqs;
+ union hfi1_ipoib_flow flow;
+ u8 q_idx;
+ bool pkts_sent;
+ struct iowait wait;
+
+ atomic64_t ____cacheline_aligned_in_smp complete_txreqs;
+ struct napi_struct *napi;
+ struct hfi1_ipoib_circ_buf tx_ring;
+};
+
+struct hfi1_ipoib_dev_priv {
+ struct hfi1_devdata *dd;
+ struct net_device *netdev;
+ struct ib_device *device;
+ struct hfi1_ipoib_txq *txqs;
+ struct kmem_cache *txreq_cache;
+ struct napi_struct *tx_napis;
+ u16 pkey;
+ u16 pkey_index;
+ u32 qkey;
+ u8 port_num;
+
+ const struct net_device_ops *netdev_ops;
+ struct rvt_qp *qp;
+ struct pcpu_sw_netstats __percpu *netstats;
+};
+
+/* hfi1 ipoib rdma netdev's private data structure */
+struct hfi1_ipoib_rdma_netdev {
+ struct rdma_netdev rn; /* keep this first */
+ /* followed by device private data */
+ struct hfi1_ipoib_dev_priv dev_priv;
+};
+
+static inline struct hfi1_ipoib_dev_priv *
+hfi1_ipoib_priv(const struct net_device *dev)
+{
+ return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
+}
+
+static inline void
+hfi1_ipoib_update_rx_netstats(struct hfi1_ipoib_dev_priv *priv,
+ u64 packets,
+ u64 bytes)
+{
+ struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
+
+ u64_stats_update_begin(&netstats->syncp);
+ netstats->rx_packets += packets;
+ netstats->rx_bytes += bytes;
+ u64_stats_update_end(&netstats->syncp);
+}
+
+static inline void
+hfi1_ipoib_update_tx_netstats(struct hfi1_ipoib_dev_priv *priv,
+ u64 packets,
+ u64 bytes)
+{
+ struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
+
+ u64_stats_update_begin(&netstats->syncp);
+ netstats->tx_packets += packets;
+ netstats->tx_bytes += bytes;
+ u64_stats_update_end(&netstats->syncp);
+}
+
+int hfi1_ipoib_send_dma(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn);
+
+int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
+void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
+
+int hfi1_ipoib_rxq_init(struct net_device *dev);
+void hfi1_ipoib_rxq_deinit(struct net_device *dev);
+
+void hfi1_ipoib_napi_tx_enable(struct net_device *dev);
+void hfi1_ipoib_napi_tx_disable(struct net_device *dev);
+
+struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
+ int size, void *data);
+
+int hfi1_ipoib_rn_get_params(struct ib_device *device,
+ u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params);
+
+#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
new file mode 100644
index 000000000000..014351ebbefa
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for ipoib functionality
+ */
+
+#include "ipoib.h"
+#include "hfi.h"
+
+static u32 qpn_from_mac(u8 *mac_arr)
+{
+ return (u32)mac_arr[1] << 16 | mac_arr[2] << 8 | mac_arr[3];
+}
+
+static int hfi1_ipoib_dev_init(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int ret;
+
+ priv->netstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+
+ ret = priv->netdev_ops->ndo_init(dev);
+ if (ret)
+ return ret;
+
+ ret = hfi1_netdev_add_data(priv->dd,
+ qpn_from_mac(priv->netdev->dev_addr),
+ dev);
+ if (ret < 0) {
+ priv->netdev_ops->ndo_uninit(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hfi1_ipoib_dev_uninit(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
+
+ priv->netdev_ops->ndo_uninit(dev);
+}
+
+static int hfi1_ipoib_dev_open(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int ret;
+
+ ret = priv->netdev_ops->ndo_open(dev);
+ if (!ret) {
+ struct hfi1_ibport *ibp = to_iport(priv->device,
+ priv->port_num);
+ struct rvt_qp *qp;
+ u32 qpn = qpn_from_mac(priv->netdev->dev_addr);
+
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (!qp) {
+ rcu_read_unlock();
+ priv->netdev_ops->ndo_stop(dev);
+ return -EINVAL;
+ }
+ rvt_get_qp(qp);
+ priv->qp = qp;
+ rcu_read_unlock();
+
+ hfi1_netdev_enable_queues(priv->dd);
+ hfi1_ipoib_napi_tx_enable(dev);
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_dev_stop(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ if (!priv->qp)
+ return 0;
+
+ hfi1_ipoib_napi_tx_disable(dev);
+ hfi1_netdev_disable_queues(priv->dd);
+
+ rvt_put_qp(priv->qp);
+ priv->qp = NULL;
+
+ return priv->netdev_ops->ndo_stop(dev);
+}
+
+static void hfi1_ipoib_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u64 rx_packets = 0ull;
+ u64 rx_bytes = 0ull;
+ u64 tx_packets = 0ull;
+ u64 tx_bytes = 0ull;
+ int i;
+
+ netdev_stats_to_stats64(storage, &dev->stats);
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_sw_netstats *stats;
+ unsigned int start;
+ u64 trx_packets;
+ u64 trx_bytes;
+ u64 ttx_packets;
+ u64 ttx_bytes;
+
+ stats = per_cpu_ptr(priv->netstats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ trx_packets = stats->rx_packets;
+ trx_bytes = stats->rx_bytes;
+ ttx_packets = stats->tx_packets;
+ ttx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ rx_packets += trx_packets;
+ rx_bytes += trx_bytes;
+ tx_packets += ttx_packets;
+ tx_bytes += ttx_bytes;
+ }
+
+ storage->rx_packets += rx_packets;
+ storage->rx_bytes += rx_bytes;
+ storage->tx_packets += tx_packets;
+ storage->tx_bytes += tx_bytes;
+}
+
+static const struct net_device_ops hfi1_ipoib_netdev_ops = {
+ .ndo_init = hfi1_ipoib_dev_init,
+ .ndo_uninit = hfi1_ipoib_dev_uninit,
+ .ndo_open = hfi1_ipoib_dev_open,
+ .ndo_stop = hfi1_ipoib_dev_stop,
+ .ndo_get_stats64 = hfi1_ipoib_dev_get_stats64,
+};
+
+static int hfi1_ipoib_send(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn)
+{
+ return hfi1_ipoib_send_dma(dev, skb, address, dqpn);
+}
+
+static int hfi1_ipoib_mcast_attach(struct net_device *dev,
+ struct ib_device *device,
+ union ib_gid *mgid,
+ u16 mlid,
+ int set_qkey,
+ u32 qkey)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
+ struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
+ struct rvt_qp *qp;
+ int ret = -EINVAL;
+
+ rcu_read_lock();
+
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (qp) {
+ rvt_get_qp(qp);
+ rcu_read_unlock();
+ if (set_qkey)
+ priv->qkey = qkey;
+
+ /* attach QP to multicast group */
+ ret = ib_attach_mcast(&qp->ibqp, mgid, mlid);
+ rvt_put_qp(qp);
+ } else {
+ rcu_read_unlock();
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_mcast_detach(struct net_device *dev,
+ struct ib_device *device,
+ union ib_gid *mgid,
+ u16 mlid)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
+ struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
+ struct rvt_qp *qp;
+ int ret = -EINVAL;
+
+ rcu_read_lock();
+
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (qp) {
+ rvt_get_qp(qp);
+ rcu_read_unlock();
+ ret = ib_detach_mcast(&qp->ibqp, mgid, mlid);
+ rvt_put_qp(qp);
+ } else {
+ rcu_read_unlock();
+ }
+ return ret;
+}
+
+static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ hfi1_ipoib_txreq_deinit(priv);
+ hfi1_ipoib_rxq_deinit(priv->netdev);
+
+ free_percpu(priv->netstats);
+}
+
+static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
+{
+ hfi1_ipoib_netdev_dtor(dev);
+ free_netdev(dev);
+}
+
+static void hfi1_ipoib_set_id(struct net_device *dev, int id)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ priv->pkey_index = (u16)id;
+ ib_query_pkey(priv->device,
+ priv->port_num,
+ priv->pkey_index,
+ &priv->pkey);
+}
+
+static int hfi1_ipoib_setup_rn(struct ib_device *device,
+ u8 port_num,
+ struct net_device *netdev,
+ void *param)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+ struct rdma_netdev *rn = netdev_priv(netdev);
+ struct hfi1_ipoib_dev_priv *priv;
+ int rc;
+
+ rn->send = hfi1_ipoib_send;
+ rn->attach_mcast = hfi1_ipoib_mcast_attach;
+ rn->detach_mcast = hfi1_ipoib_mcast_detach;
+ rn->set_id = hfi1_ipoib_set_id;
+ rn->hca = device;
+ rn->port_num = port_num;
+ rn->mtu = netdev->mtu;
+
+ priv = hfi1_ipoib_priv(netdev);
+ priv->dd = dd;
+ priv->netdev = netdev;
+ priv->device = device;
+ priv->port_num = port_num;
+ priv->netdev_ops = netdev->netdev_ops;
+
+ netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
+
+ ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
+
+ rc = hfi1_ipoib_txreq_init(priv);
+ if (rc) {
+ dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
+ hfi1_ipoib_free_rdma_netdev(netdev);
+ return rc;
+ }
+
+ rc = hfi1_ipoib_rxq_init(netdev);
+ if (rc) {
+ dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
+ hfi1_ipoib_free_rdma_netdev(netdev);
+ return rc;
+ }
+
+ netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
+ netdev->needs_free_netdev = true;
+
+ return 0;
+}
+
+int hfi1_ipoib_rn_get_params(struct ib_device *device,
+ u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+
+ if (type != RDMA_NETDEV_IPOIB)
+ return -EOPNOTSUPP;
+
+ if (!HFI1_CAP_IS_KSET(AIP) || !dd->num_netdev_contexts)
+ return -EOPNOTSUPP;
+
+ if (!port_num || port_num > dd->num_pports)
+ return -EINVAL;
+
+ params->sizeof_priv = sizeof(struct hfi1_ipoib_rdma_netdev);
+ params->txqs = dd->num_sdma;
+ params->rxqs = dd->num_netdev_contexts;
+ params->param = NULL;
+ params->initialize_rdma_netdev = hfi1_ipoib_setup_rn;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_rx.c b/drivers/infiniband/hw/hfi1/ipoib_rx.c
new file mode 100644
index 000000000000..3afa7545242c
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_rx.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+#include "netdev.h"
+#include "ipoib.h"
+
+#define HFI1_IPOIB_SKB_PAD ((NET_SKB_PAD) + (NET_IP_ALIGN))
+
+static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
+{
+ void *dst_data;
+
+ skb_checksum_none_assert(skb);
+ skb->protocol = *((__be16 *)data);
+
+ dst_data = skb_put(skb, size);
+ memcpy(dst_data, data, size);
+ skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
+ skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
+}
+
+static struct sk_buff *prepare_frag_skb(struct napi_struct *napi, int size)
+{
+ struct sk_buff *skb;
+ int skb_size = SKB_DATA_ALIGN(size + HFI1_IPOIB_SKB_PAD);
+ void *frag;
+
+ skb_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb_size = SKB_DATA_ALIGN(skb_size);
+ frag = napi_alloc_frag(skb_size);
+
+ if (unlikely(!frag))
+ return napi_alloc_skb(napi, size);
+
+ skb = build_skb(frag, skb_size);
+
+ if (unlikely(!skb)) {
+ skb_free_frag(frag);
+ return NULL;
+ }
+
+ skb_reserve(skb, HFI1_IPOIB_SKB_PAD);
+ return skb;
+}
+
+struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
+ int size, void *data)
+{
+ struct napi_struct *napi = &rxq->napi;
+ int skb_size = size + HFI1_IPOIB_ENCAP_LEN;
+ struct sk_buff *skb;
+
+ /*
+ * For smaller(4k + skb overhead) allocations we will go using
+ * napi cache. Otherwise we will try to use napi frag cache.
+ */
+ if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE))
+ skb = napi_alloc_skb(napi, skb_size);
+ else
+ skb = prepare_frag_skb(napi, skb_size);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ copy_ipoib_buf(skb, data, size);
+
+ return skb;
+}
+
+int hfi1_ipoib_rxq_init(struct net_device *netdev)
+{
+ struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
+ struct hfi1_devdata *dd = ipoib_priv->dd;
+ int ret;
+
+ ret = hfi1_netdev_rx_init(dd);
+ if (ret)
+ return ret;
+
+ hfi1_init_aip_rsm(dd);
+
+ return ret;
+}
+
+void hfi1_ipoib_rxq_deinit(struct net_device *netdev)
+{
+ struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
+ struct hfi1_devdata *dd = ipoib_priv->dd;
+
+ hfi1_deinit_aip_rsm(dd);
+ hfi1_netdev_rx_destroy(dd);
+}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
new file mode 100644
index 000000000000..883cb9d48022
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for IPOIB SDMA functionality
+ */
+
+#include <linux/log2.h>
+#include <linux/circ_buf.h>
+
+#include "sdma.h"
+#include "verbs.h"
+#include "trace_ibhdrs.h"
+#include "ipoib.h"
+
+/* Add a convenience helper */
+#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
+#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
+#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
+
+/**
+ * struct ipoib_txreq - IPOIB transmit descriptor
+ * @txreq: sdma transmit request
+ * @sdma_hdr: 9b ib headers
+ * @sdma_status: status returned by sdma engine
+ * @priv: ipoib netdev private data
+ * @txq: txq on which skb was output
+ * @skb: skb to send
+ */
+struct ipoib_txreq {
+ struct sdma_txreq txreq;
+ struct hfi1_sdma_header sdma_hdr;
+ int sdma_status;
+ struct hfi1_ipoib_dev_priv *priv;
+ struct hfi1_ipoib_txq *txq;
+ struct sk_buff *skb;
+};
+
+struct ipoib_txparms {
+ struct hfi1_devdata *dd;
+ struct rdma_ah_attr *ah_attr;
+ struct hfi1_ibport *ibp;
+ struct hfi1_ipoib_txq *txq;
+ union hfi1_ipoib_flow flow;
+ u32 dqpn;
+ u8 hdr_dwords;
+ u8 entropy;
+};
+
+static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
+{
+ return sent - completed;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+ if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs)) >=
+ min_t(unsigned int, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items - 1)))
+ netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
+{
+ struct net_device *dev = txq->priv->netdev;
+
+ /* If the queue is already running just return */
+ if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
+ return;
+
+ /* If shutting down just return as queue state is irrelevant */
+ if (unlikely(dev->reg_state != NETREG_REGISTERED))
+ return;
+
+ /*
+ * When the queue has been drained to less than half full it will be
+ * restarted.
+ * The size of the txreq ring is fixed at initialization.
+ * The tx queue len can be adjusted upward while the interface is
+ * running.
+ * The tx queue len can be large enough to overflow the txreq_ring.
+ * Use the minimum of the current tx_queue_len or the rings max txreqs
+ * to protect against ring overflow.
+ */
+ if (hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs))
+ < min_t(unsigned int, dev->tx_queue_len,
+ txq->tx_ring.max_items) >> 1)
+ netif_wake_subqueue(dev, txq->q_idx);
+}
+
+static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
+{
+ struct hfi1_ipoib_dev_priv *priv = tx->priv;
+
+ if (likely(!tx->sdma_status)) {
+ hfi1_ipoib_update_tx_netstats(priv, 1, tx->skb->len);
+ } else {
+ ++priv->netdev->stats.tx_errors;
+ dd_dev_warn(priv->dd,
+ "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
+ __func__, tx->sdma_status,
+ le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
+ tx->txq->sde->this_idx);
+ }
+
+ napi_consume_skb(tx->skb, budget);
+ sdma_txclean(priv->dd, &tx->txreq);
+ kmem_cache_free(priv->txreq_cache, tx);
+}
+
+static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget)
+{
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ unsigned long head;
+ unsigned long tail;
+ unsigned int max_tx;
+ int work_done;
+ int tx_count;
+
+ spin_lock_bh(&tx_ring->consumer_lock);
+
+ /* Read index before reading contents at that index. */
+ head = smp_load_acquire(&tx_ring->head);
+ tail = tx_ring->tail;
+ max_tx = tx_ring->max_items;
+
+ work_done = min_t(int, CIRC_CNT(head, tail, max_tx), budget);
+
+ for (tx_count = work_done; tx_count; tx_count--) {
+ hfi1_ipoib_free_tx(tx_ring->items[tail], budget);
+ tail = CIRC_NEXT(tail, max_tx);
+ }
+
+ atomic64_add(work_done, &txq->complete_txreqs);
+
+ /* Finished freeing tx items so store the tail value. */
+ smp_store_release(&tx_ring->tail, tail);
+
+ spin_unlock_bh(&tx_ring->consumer_lock);
+
+ hfi1_ipoib_check_queue_stopped(txq);
+
+ return work_done;
+}
+
+static int hfi1_ipoib_process_tx_ring(struct napi_struct *napi, int budget)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev);
+ struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
+
+ int work_done = hfi1_ipoib_drain_tx_ring(txq, budget);
+
+ if (work_done < budget)
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
+{
+ struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring;
+ unsigned long head;
+ unsigned long tail;
+ size_t max_tx;
+
+ spin_lock(&tx_ring->producer_lock);
+
+ head = tx_ring->head;
+ tail = READ_ONCE(tx_ring->tail);
+ max_tx = tx_ring->max_items;
+
+ if (likely(CIRC_SPACE(head, tail, max_tx))) {
+ tx_ring->items[head] = tx;
+
+ /* Finish storing txreq before incrementing head. */
+ smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
+ napi_schedule(tx->txq->napi);
+ } else {
+ struct hfi1_ipoib_txq *txq = tx->txq;
+ struct hfi1_ipoib_dev_priv *priv = tx->priv;
+
+ /* Ring was full */
+ hfi1_ipoib_free_tx(tx, 0);
+ atomic64_inc(&txq->complete_txreqs);
+ dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx);
+ }
+
+ spin_unlock(&tx_ring->producer_lock);
+}
+
+static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
+{
+ struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
+
+ tx->sdma_status = status;
+
+ hfi1_ipoib_add_tx(tx);
+}
+
+static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_devdata *dd = txp->dd;
+ struct sdma_txreq *txreq = &tx->txreq;
+ struct sk_buff *skb = tx->skb;
+ int ret = 0;
+ int i;
+
+ if (skb_headlen(skb)) {
+ ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb));
+ if (unlikely(ret))
+ return ret;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ ret = sdma_txadd_page(dd,
+ txreq,
+ skb_frag_page(frag),
+ frag->bv_offset,
+ skb_frag_size(frag));
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_devdata *dd = txp->dd;
+ struct sdma_txreq *txreq = &tx->txreq;
+ struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ u16 pkt_bytes =
+ sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
+ int ret;
+
+ ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete);
+ if (unlikely(ret))
+ return ret;
+
+ /* add pbc + headers */
+ ret = sdma_txadd_kvaddr(dd,
+ txreq,
+ sdma_hdr,
+ sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2));
+ if (unlikely(ret))
+ return ret;
+
+ /* add the ulp payload */
+ return hfi1_ipoib_build_ulp_payload(tx, txp);
+}
+
+static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = tx->priv;
+ struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct sk_buff *skb = tx->skb;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
+ struct rdma_ah_attr *ah_attr = txp->ah_attr;
+ struct ib_other_headers *ohdr;
+ struct ib_grh *grh;
+ u16 dwords;
+ u16 slid;
+ u16 dlid;
+ u16 lrh0;
+ u32 bth0;
+ u32 sqpn = (u32)(priv->netdev->dev_addr[1] << 16 |
+ priv->netdev->dev_addr[2] << 8 |
+ priv->netdev->dev_addr[3]);
+ u16 payload_dwords;
+ u8 pad_cnt;
+
+ pad_cnt = -skb->len & 3;
+
+ /* Includes ICRC */
+ payload_dwords = ((skb->len + pad_cnt) >> 2) + SIZE_OF_CRC;
+
+ /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
+ txp->hdr_dwords = 7;
+
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ grh = &sdma_hdr->hdr.ibh.u.l.grh;
+ txp->hdr_dwords +=
+ hfi1_make_grh(txp->ibp,
+ grh,
+ rdma_ah_read_grh(ah_attr),
+ txp->hdr_dwords - LRH_9B_DWORDS,
+ payload_dwords);
+ lrh0 = HFI1_LRH_GRH;
+ ohdr = &sdma_hdr->hdr.ibh.u.l.oth;
+ } else {
+ lrh0 = HFI1_LRH_BTH;
+ ohdr = &sdma_hdr->hdr.ibh.u.oth;
+ }
+
+ lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
+ lrh0 |= (txp->flow.sc5 & 0xf) << 12;
+
+ dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
+ if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ } else {
+ u16 lid = (u16)ppd->lid;
+
+ if (lid) {
+ lid |= rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1);
+ slid = lid;
+ } else {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ }
+ }
+
+ /* Includes ICRC */
+ dwords = txp->hdr_dwords + payload_dwords;
+
+ /* Build the lrh */
+ sdma_hdr->hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ hfi1_make_ib_hdr(&sdma_hdr->hdr.ibh, lrh0, dwords, dlid, slid);
+
+ /* Build the bth */
+ bth0 = (IB_OPCODE_UD_SEND_ONLY << 24) | (pad_cnt << 20) | priv->pkey;
+
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(txp->dqpn);
+ ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
+
+ /* Build the deth */
+ ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32((txp->entropy <<
+ HFI1_IPOIB_ENTROPY_SHIFT) | sqpn);
+
+ /* Construct the pbc. */
+ sdma_hdr->pbc =
+ cpu_to_le64(create_pbc(ppd,
+ ib_is_sc5(txp->flow.sc5) <<
+ PBC_DC_INFO_SHIFT,
+ 0,
+ sc_to_vlt(priv->dd, txp->flow.sc5),
+ dwords - SIZE_OF_CRC +
+ (sizeof(sdma_hdr->pbc) >> 2)));
+}
+
+static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct ipoib_txreq *tx;
+ int ret;
+
+ tx = kmem_cache_alloc_node(priv->txreq_cache,
+ GFP_ATOMIC,
+ priv->dd->node);
+ if (unlikely(!tx))
+ return ERR_PTR(-ENOMEM);
+
+ /* so that we can test if the sdma decriptors are there */
+ tx->txreq.num_desc = 0;
+ tx->priv = priv;
+ tx->txq = txp->txq;
+ tx->skb = skb;
+
+ hfi1_ipoib_build_ib_tx_headers(tx, txp);
+
+ ret = hfi1_ipoib_build_tx_desc(tx, txp);
+ if (likely(!ret)) {
+ if (txp->txq->flow.as_int != txp->flow.as_int) {
+ txp->txq->flow.tx_queue = txp->flow.tx_queue;
+ txp->txq->flow.sc5 = txp->flow.sc5;
+ txp->txq->sde =
+ sdma_select_engine_sc(priv->dd,
+ txp->flow.tx_queue,
+ txp->flow.sc5);
+ }
+
+ return tx;
+ }
+
+ sdma_txclean(priv->dd, &tx->txreq);
+ kmem_cache_free(priv->txreq_cache, tx);
+
+ return ERR_PTR(ret);
+}
+
+static int hfi1_ipoib_submit_tx_list(struct net_device *dev,
+ struct hfi1_ipoib_txq *txq)
+{
+ int ret;
+ u16 count_out;
+
+ ret = sdma_send_txlist(txq->sde,
+ iowait_get_ib_work(&txq->wait),
+ &txq->tx_list,
+ &count_out);
+ if (likely(!ret) || ret == -EBUSY || ret == -ECOMM)
+ return ret;
+
+ dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
+
+ return ret;
+}
+
+static int hfi1_ipoib_flush_tx_list(struct net_device *dev,
+ struct hfi1_ipoib_txq *txq)
+{
+ int ret = 0;
+
+ if (!list_empty(&txq->tx_list)) {
+ /* Flush the current list */
+ ret = hfi1_ipoib_submit_tx_list(dev, txq);
+
+ if (unlikely(ret))
+ if (ret != -EBUSY)
+ ++dev->stats.tx_carrier_errors;
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
+ struct ipoib_txreq *tx)
+{
+ int ret;
+
+ ret = sdma_send_txreq(txq->sde,
+ iowait_get_ib_work(&txq->wait),
+ &tx->txreq,
+ txq->pkts_sent);
+ if (likely(!ret)) {
+ txq->pkts_sent = true;
+ iowait_starve_clear(txq->pkts_sent, &txq->wait);
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_send_dma_single(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct hfi1_ipoib_txq *txq = txp->txq;
+ struct ipoib_txreq *tx;
+ int ret;
+
+ tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
+ if (IS_ERR(tx)) {
+ int ret = PTR_ERR(tx);
+
+ dev_kfree_skb_any(skb);
+
+ if (ret == -ENOMEM)
+ ++dev->stats.tx_errors;
+ else
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+ }
+
+ ret = hfi1_ipoib_submit_tx(txq, tx);
+ if (likely(!ret)) {
+ trace_sdma_output_ibhdr(tx->priv->dd,
+ &tx->sdma_hdr.hdr,
+ ib_is_sc5(txp->flow.sc5));
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ txq->pkts_sent = false;
+
+ if (ret == -EBUSY) {
+ list_add_tail(&tx->txreq.list, &txq->tx_list);
+
+ trace_sdma_output_ibhdr(tx->priv->dd,
+ &tx->sdma_hdr.hdr,
+ ib_is_sc5(txp->flow.sc5));
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ if (ret == -ECOMM) {
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ sdma_txclean(priv->dd, &tx->txreq);
+ dev_kfree_skb_any(skb);
+ kmem_cache_free(priv->txreq_cache, tx);
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+}
+
+static int hfi1_ipoib_send_dma_list(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_txq *txq = txp->txq;
+ struct ipoib_txreq *tx;
+
+ /* Has the flow change ? */
+ if (txq->flow.as_int != txp->flow.as_int)
+ (void)hfi1_ipoib_flush_tx_list(dev, txq);
+
+ tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
+ if (IS_ERR(tx)) {
+ int ret = PTR_ERR(tx);
+
+ dev_kfree_skb_any(skb);
+
+ if (ret == -ENOMEM)
+ ++dev->stats.tx_errors;
+ else
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+ }
+
+ list_add_tail(&tx->txreq.list, &txq->tx_list);
+
+ hfi1_ipoib_check_queue_depth(txq);
+
+ trace_sdma_output_ibhdr(tx->priv->dd,
+ &tx->sdma_hdr.hdr,
+ ib_is_sc5(txp->flow.sc5));
+
+ if (!netdev_xmit_more())
+ (void)hfi1_ipoib_flush_tx_list(dev, txq);
+
+ return NETDEV_TX_OK;
+}
+
+static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
+{
+ if (skb_transport_header_was_set(skb)) {
+ u8 *hdr = (u8 *)skb_transport_header(skb);
+
+ return (hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3]);
+ }
+
+ return (u8)skb_get_queue_mapping(skb);
+}
+
+int hfi1_ipoib_send_dma(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct ipoib_txparms txp;
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ if (unlikely(skb->len > rn->mtu + HFI1_IPOIB_ENCAP_LEN)) {
+ dd_dev_warn(priv->dd, "packet len %d (> %d) too long to send, dropping\n",
+ skb->len,
+ rn->mtu + HFI1_IPOIB_ENCAP_LEN);
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ txp.dd = priv->dd;
+ txp.ah_attr = &ibah_to_rvtah(address)->attr;
+ txp.ibp = to_iport(priv->device, priv->port_num);
+ txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
+ txp.dqpn = dqpn;
+ txp.flow.sc5 = txp.ibp->sl_to_sc[rdma_ah_get_sl(txp.ah_attr)];
+ txp.flow.tx_queue = (u8)skb_get_queue_mapping(skb);
+ txp.entropy = hfi1_ipoib_calc_entropy(skb);
+
+ if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
+ return hfi1_ipoib_send_dma_list(dev, skb, &txp);
+
+ return hfi1_ipoib_send_dma_single(dev, skb, &txp);
+}
+
+/*
+ * hfi1_ipoib_sdma_sleep - ipoib sdma sleep function
+ *
+ * This function gets called from sdma_send_txreq() when there are not enough
+ * sdma descriptors available to send the packet. It adds Tx queue's wait
+ * structure to sdma engine's dmawait list to be woken up when descriptors
+ * become available.
+ */
+static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *txreq,
+ uint seq,
+ bool pkts_sent)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait->iow, struct hfi1_ipoib_txq, wait);
+
+ write_seqlock(&sde->waitlock);
+
+ if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
+ if (sdma_progress(sde, seq, txreq)) {
+ write_sequnlock(&sde->waitlock);
+ return -EAGAIN;
+ }
+
+ netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
+
+ if (list_empty(&txq->wait.list))
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+
+ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+ }
+
+ write_sequnlock(&sde->waitlock);
+ return -EINVAL;
+}
+
+/*
+ * hfi1_ipoib_sdma_wakeup - ipoib sdma wakeup function
+ *
+ * This function gets called when SDMA descriptors becomes available and Tx
+ * queue's wait structure was previously added to sdma engine's dmawait list.
+ */
+static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait, struct hfi1_ipoib_txq, wait);
+
+ if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
+ iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
+}
+
+static void hfi1_ipoib_flush_txq(struct work_struct *work)
+{
+ struct iowait_work *ioww =
+ container_of(work, struct iowait_work, iowork);
+ struct iowait *wait = iowait_ioww_to_iow(ioww);
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait, struct hfi1_ipoib_txq, wait);
+ struct net_device *dev = txq->priv->netdev;
+
+ if (likely(dev->reg_state == NETREG_REGISTERED) &&
+ likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
+ likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
+ netif_wake_subqueue(dev, txq->q_idx);
+}
+
+int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+{
+ struct net_device *dev = priv->netdev;
+ char buf[HFI1_IPOIB_TXREQ_NAME_LEN];
+ unsigned long tx_ring_size;
+ int i;
+
+ /*
+ * Ring holds 1 less than tx_ring_size
+ * Round up to next power of 2 in order to hold at least tx_queue_len
+ */
+ tx_ring_size = roundup_pow_of_two((unsigned long)dev->tx_queue_len + 1);
+
+ snprintf(buf, sizeof(buf), "hfi1_%u_ipoib_txreq_cache", priv->dd->unit);
+ priv->txreq_cache = kmem_cache_create(buf,
+ sizeof(struct ipoib_txreq),
+ 0,
+ 0,
+ NULL);
+ if (!priv->txreq_cache)
+ return -ENOMEM;
+
+ priv->tx_napis = kcalloc_node(dev->num_tx_queues,
+ sizeof(struct napi_struct),
+ GFP_ATOMIC,
+ priv->dd->node);
+ if (!priv->tx_napis)
+ goto free_txreq_cache;
+
+ priv->txqs = kcalloc_node(dev->num_tx_queues,
+ sizeof(struct hfi1_ipoib_txq),
+ GFP_ATOMIC,
+ priv->dd->node);
+ if (!priv->txqs)
+ goto free_tx_napis;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ iowait_init(&txq->wait,
+ 0,
+ hfi1_ipoib_flush_txq,
+ NULL,
+ hfi1_ipoib_sdma_sleep,
+ hfi1_ipoib_sdma_wakeup,
+ NULL,
+ NULL);
+ txq->priv = priv;
+ txq->sde = NULL;
+ INIT_LIST_HEAD(&txq->tx_list);
+ atomic64_set(&txq->complete_txreqs, 0);
+ txq->q_idx = i;
+ txq->flow.tx_queue = 0xff;
+ txq->flow.sc5 = 0xff;
+ txq->pkts_sent = false;
+
+ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
+ priv->dd->node);
+
+ txq->tx_ring.items =
+ vzalloc_node(array_size(tx_ring_size,
+ sizeof(struct ipoib_txreq)),
+ priv->dd->node);
+ if (!txq->tx_ring.items)
+ goto free_txqs;
+
+ spin_lock_init(&txq->tx_ring.producer_lock);
+ spin_lock_init(&txq->tx_ring.consumer_lock);
+ txq->tx_ring.max_items = tx_ring_size;
+
+ txq->napi = &priv->tx_napis[i];
+ netif_tx_napi_add(dev, txq->napi,
+ hfi1_ipoib_process_tx_ring,
+ NAPI_POLL_WEIGHT);
+ }
+
+ return 0;
+
+free_txqs:
+ for (i--; i >= 0; i--) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ netif_napi_del(txq->napi);
+ vfree(txq->tx_ring.items);
+ }
+
+ kfree(priv->txqs);
+ priv->txqs = NULL;
+
+free_tx_napis:
+ kfree(priv->tx_napis);
+ priv->tx_napis = NULL;
+
+free_txreq_cache:
+ kmem_cache_destroy(priv->txreq_cache);
+ priv->txreq_cache = NULL;
+ return -ENOMEM;
+}
+
+static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
+{
+ struct sdma_txreq *txreq;
+ struct sdma_txreq *txreq_tmp;
+ atomic64_t *complete_txreqs = &txq->complete_txreqs;
+
+ list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
+ struct ipoib_txreq *tx =
+ container_of(txreq, struct ipoib_txreq, txreq);
+
+ list_del(&txreq->list);
+ sdma_txclean(txq->priv->dd, &tx->txreq);
+ dev_kfree_skb_any(tx->skb);
+ kmem_cache_free(txq->priv->txreq_cache, tx);
+ atomic64_inc(complete_txreqs);
+ }
+
+ if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+ dd_dev_warn(txq->priv->dd,
+ "txq %d not empty found %llu requests\n",
+ txq->q_idx,
+ hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(complete_txreqs)));
+}
+
+void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->netdev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ iowait_cancel_work(&txq->wait);
+ iowait_sdma_drain(&txq->wait);
+ hfi1_ipoib_drain_tx_list(txq);
+ netif_napi_del(txq->napi);
+ (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ vfree(txq->tx_ring.items);
+ }
+
+ kfree(priv->txqs);
+ priv->txqs = NULL;
+
+ kfree(priv->tx_napis);
+ priv->tx_napis = NULL;
+
+ kmem_cache_destroy(priv->txreq_cache);
+ priv->txreq_cache = NULL;
+}
+
+void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ napi_enable(txq->napi);
+ }
+}
+
+void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ napi_disable(txq->napi);
+ (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index db82db497b2c..d61ee853d215 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * Copyright(c) 2018 Intel Corporation.
+ * Copyright(c) 2018 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -49,6 +49,7 @@
#include "hfi.h"
#include "affinity.h"
#include "sdma.h"
+#include "netdev.h"
/**
* msix_initialize() - Calculate, request and configure MSIx IRQs
@@ -69,7 +70,7 @@ int msix_initialize(struct hfi1_devdata *dd)
* one for each VNIC context
* ...any new IRQs should be added here.
*/
- total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
+ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
if (total >= CCE_NUM_MSIX_VECTORS)
return -EINVAL;
@@ -140,7 +141,7 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
if (ret) {
dd_dev_err(dd,
- "%s: request for IRQ %d failed, MSIx %lu, err %d\n",
+ "%s: request for IRQ %d failed, MSIx %lx, err %d\n",
name, irq, nr, ret);
spin_lock(&dd->msix_info.msix_lock);
__clear_bit(nr, dd->msix_info.in_use_msix);
@@ -160,7 +161,7 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
/* This is a request, so a failure is not fatal */
ret = hfi1_get_irq_affinity(dd, me);
if (ret)
- dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
+ dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
return nr;
}
@@ -171,7 +172,8 @@ static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
const char *name)
{
int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
- IRQ_RCVCTXT, name);
+ rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
+ name);
if (nr < 0)
return nr;
@@ -204,6 +206,21 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
}
/**
+ * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * for netdev context
+ * @rcd: valid netdev contexti
+ */
+int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+ char name[MAX_NAME_SIZE];
+
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d nd kctxt%d",
+ rcd->dd->unit, rcd->ctxt);
+ return msix_request_rcd_irq_common(rcd, receive_context_interrupt_napi,
+ NULL, name);
+}
+
+/**
* msix_request_smda_ira() - Helper for getting SDMA IRQ resources
* @sde: valid sdma engine
*
@@ -355,15 +372,16 @@ void msix_clean_up_interrupts(struct hfi1_devdata *dd)
}
/**
- * msix_vnic_syncrhonize_irq() - Vnic IRQ synchronize
+ * msix_netdev_syncrhonize_irq() - netdev IRQ synchronize
* @dd: valid devdata
*/
-void msix_vnic_synchronize_irq(struct hfi1_devdata *dd)
+void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
{
int i;
+ int ctxt_count = hfi1_netdev_ctxt_count(dd);
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
- struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
+ for (i = 0; i < ctxt_count; i++) {
+ struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i);
struct hfi1_msix_entry *me;
me = &dd->msix_info.msix_entries[rcd->msix_intr];
diff --git a/drivers/infiniband/hw/hfi1/msix.h b/drivers/infiniband/hw/hfi1/msix.h
index 1a02ab7971c8..e63e944bf0fc 100644
--- a/drivers/infiniband/hw/hfi1/msix.h
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
- * Copyright(c) 2018 Intel Corporation.
+ * Copyright(c) 2018 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -59,7 +59,8 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
int msix_request_sdma_irq(struct sdma_engine *sde);
void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
-/* VNIC interface */
-void msix_vnic_synchronize_irq(struct hfi1_devdata *dd);
+/* Netdev interface */
+void msix_netdev_synchronize_irq(struct hfi1_devdata *dd);
+int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd);
#endif
diff --git a/drivers/infiniband/hw/hfi1/netdev.h b/drivers/infiniband/hw/hfi1/netdev.h
new file mode 100644
index 000000000000..947543a3e0c4
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/netdev.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+#ifndef HFI1_NETDEV_H
+#define HFI1_NETDEV_H
+
+#include "hfi.h"
+
+#include <linux/netdevice.h>
+#include <linux/xarray.h>
+
+/**
+ * struct hfi1_netdev_rxq - Receive Queue for HFI
+ * dummy netdev. Both IPoIB and VNIC netdevices will be working on
+ * top of this device.
+ * @napi: napi object
+ * @priv: ptr to netdev_priv
+ * @rcd: ptr to receive context data
+ */
+struct hfi1_netdev_rxq {
+ struct napi_struct napi;
+ struct hfi1_netdev_priv *priv;
+ struct hfi1_ctxtdata *rcd;
+};
+
+/*
+ * Number of netdev contexts used. Ensure it is less than or equal to
+ * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
+ */
+#define HFI1_MAX_NETDEV_CTXTS 8
+
+/* Number of NETDEV RSM entries */
+#define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
+
+/**
+ * struct hfi1_netdev_priv: data required to setup and run HFI netdev.
+ * @dd: hfi1_devdata
+ * @rxq: pointer to dummy netdev receive queues.
+ * @num_rx_q: number of receive queues
+ * @rmt_index: first free index in RMT Array
+ * @msix_start: first free MSI-X interrupt vector.
+ * @dev_tbl: netdev table for unique identifier VNIC and IPoIb VLANs.
+ * @enabled: atomic counter of netdevs enabling receive queues.
+ * When 0 NAPI will be disabled.
+ * @netdevs: atomic counter of netdevs using dummy netdev.
+ * When 0 receive queues will be freed.
+ */
+struct hfi1_netdev_priv {
+ struct hfi1_devdata *dd;
+ struct hfi1_netdev_rxq *rxq;
+ int num_rx_q;
+ int rmt_start;
+ struct xarray dev_tbl;
+ /* count of enabled napi polls */
+ atomic_t enabled;
+ /* count of netdevs on top */
+ atomic_t netdevs;
+};
+
+static inline
+struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
+{
+ return (struct hfi1_netdev_priv *)&dev[1];
+}
+
+static inline
+int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return priv->num_rx_q;
+}
+
+static inline
+struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return priv->rxq[ctxt].rcd;
+}
+
+static inline
+int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return priv->rmt_start;
+}
+
+static inline
+void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ priv->rmt_start = rmt_idx;
+}
+
+u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
+ struct cpumask *cpu_mask);
+
+void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
+void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
+int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
+int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
+int hfi1_netdev_alloc(struct hfi1_devdata *dd);
+void hfi1_netdev_free(struct hfi1_devdata *dd);
+int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
+void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
+void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
+void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id);
+
+/* chip.c */
+int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget);
+
+#endif /* HFI1_NETDEV_H */
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
new file mode 100644
index 000000000000..63688e85e8da
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for netdev RX functionality
+ */
+
+#include "sdma.h"
+#include "verbs.h"
+#include "netdev.h"
+#include "hfi.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <rdma/ib_verbs.h>
+
+static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
+ struct hfi1_ctxtdata *uctxt)
+{
+ unsigned int rcvctrl_ops;
+ struct hfi1_devdata *dd = priv->dd;
+ int ret;
+
+ uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
+ uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
+
+ /* Now allocate the RcvHdr queue and eager buffers. */
+ ret = hfi1_create_rcvhdrq(dd, uctxt);
+ if (ret)
+ goto done;
+
+ ret = hfi1_setup_eagerbufs(uctxt);
+ if (ret)
+ goto done;
+
+ clear_rcvhdrtail(uctxt);
+
+ rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS;
+ rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS;
+
+ if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
+ rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
+ rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
+
+ hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
+done:
+ return ret;
+}
+
+static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata **ctxt)
+{
+ struct hfi1_ctxtdata *uctxt;
+ int ret;
+
+ if (dd->flags & HFI1_FROZEN)
+ return -EIO;
+
+ ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
+ if (ret < 0) {
+ dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
+ return -ENOMEM;
+ }
+
+ uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
+ HFI1_CAP_KGET(NODROP_RHQ_FULL) |
+ HFI1_CAP_KGET(NODROP_EGR_FULL) |
+ HFI1_CAP_KGET(DMA_RTAIL);
+ /* Netdev contexts are always NO_RDMA_RTAIL */
+ uctxt->fast_handler = handle_receive_interrupt_napi_fp;
+ uctxt->slow_handler = handle_receive_interrupt_napi_sp;
+ hfi1_set_seq_cnt(uctxt, 1);
+ uctxt->is_vnic = true;
+
+ hfi1_stats.sps_ctxts++;
+
+ dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
+ *ctxt = uctxt;
+
+ return 0;
+}
+
+static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata *uctxt)
+{
+ flush_wc();
+
+ /*
+ * Disable receive context and interrupt available, reset all
+ * RcvCtxtCtrl bits to default values.
+ */
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_TIDFLOW_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
+ HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+
+ if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
+ msix_free_irq(dd, uctxt->msix_intr);
+
+ uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
+ uctxt->event_flags = 0;
+
+ hfi1_clear_tids(uctxt);
+ hfi1_clear_ctxt_pkey(dd, uctxt);
+
+ hfi1_stats.sps_ctxts--;
+
+ hfi1_free_ctxt(uctxt);
+}
+
+static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
+ struct hfi1_ctxtdata **ctxt)
+{
+ int rc;
+ struct hfi1_devdata *dd = priv->dd;
+
+ rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
+ if (rc) {
+ dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
+ return rc;
+ }
+
+ rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
+ if (rc) {
+ dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
+ hfi1_netdev_deallocate_ctxt(dd, *ctxt);
+ *ctxt = NULL;
+ }
+
+ return rc;
+}
+
+/**
+ * hfi1_num_netdev_contexts - Count of netdev recv contexts to use.
+ * @dd: device on which to allocate netdev contexts
+ * @available_contexts: count of available receive contexts
+ * @cpu_mask: mask of possible cpus to include for contexts
+ *
+ * Return: count of physical cores on a node or the remaining available recv
+ * contexts for netdev recv context usage up to the maximum of
+ * HFI1_MAX_NETDEV_CTXTS.
+ * A value of 0 can be returned when acceleration is explicitly turned off,
+ * a memory allocation error occurs or when there are no available contexts.
+ *
+ */
+u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
+ struct cpumask *cpu_mask)
+{
+ cpumask_var_t node_cpu_mask;
+ unsigned int available_cpus;
+
+ if (!HFI1_CAP_IS_KSET(AIP))
+ return 0;
+
+ /* Always give user contexts priority over netdev contexts */
+ if (available_contexts == 0) {
+ dd_dev_info(dd, "No receive contexts available for netdevs.\n");
+ return 0;
+ }
+
+ if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
+ dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
+ return 0;
+ }
+
+ cpumask_and(node_cpu_mask, cpu_mask,
+ cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
+
+ available_cpus = cpumask_weight(node_cpu_mask);
+
+ free_cpumask_var(node_cpu_mask);
+
+ return min3(available_cpus, available_contexts,
+ (u32)HFI1_MAX_NETDEV_CTXTS);
+}
+
+static int hfi1_netdev_rxq_init(struct net_device *dev)
+{
+ int i;
+ int rc;
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
+ struct hfi1_devdata *dd = priv->dd;
+
+ priv->num_rx_q = dd->num_netdev_contexts;
+ priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
+ GFP_KERNEL, dd->node);
+
+ if (!priv->rxq) {
+ dd_dev_err(dd, "Unable to allocate netdev queue data\n");
+ return (-ENOMEM);
+ }
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
+ if (rc)
+ goto bail_context_irq_failure;
+
+ hfi1_rcd_get(rxq->rcd);
+ rxq->priv = priv;
+ rxq->rcd->napi = &rxq->napi;
+ dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
+ i, rxq->rcd->ctxt);
+ /*
+ * Disable BUSY_POLL on this NAPI as this is not supported
+ * right now.
+ */
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
+ netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
+ rc = msix_netdev_request_rcd_irq(rxq->rcd);
+ if (rc)
+ goto bail_context_irq_failure;
+ }
+
+ return 0;
+
+bail_context_irq_failure:
+ dd_dev_err(dd, "Unable to allot receive context\n");
+ for (; i >= 0; i--) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ if (rxq->rcd) {
+ hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
+ hfi1_rcd_put(rxq->rcd);
+ rxq->rcd = NULL;
+ }
+ }
+ kfree(priv->rxq);
+ priv->rxq = NULL;
+
+ return rc;
+}
+
+static void hfi1_netdev_rxq_deinit(struct net_device *dev)
+{
+ int i;
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
+ struct hfi1_devdata *dd = priv->dd;
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ netif_napi_del(&rxq->napi);
+ hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
+ hfi1_rcd_put(rxq->rcd);
+ rxq->rcd = NULL;
+ }
+
+ kfree(priv->rxq);
+ priv->rxq = NULL;
+ priv->num_rx_q = 0;
+}
+
+static void enable_queues(struct hfi1_netdev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
+ rxq->rcd->ctxt);
+ napi_enable(&rxq->napi);
+ hfi1_rcvctrl(priv->dd,
+ HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
+ rxq->rcd);
+ }
+}
+
+static void disable_queues(struct hfi1_netdev_priv *priv)
+{
+ int i;
+
+ msix_netdev_synchronize_irq(priv->dd);
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
+ rxq->rcd->ctxt);
+
+ /* wait for napi if it was scheduled */
+ hfi1_rcvctrl(priv->dd,
+ HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
+ rxq->rcd);
+ napi_synchronize(&rxq->napi);
+ napi_disable(&rxq->napi);
+ }
+}
+
+/**
+ * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
+ * it allocates receive queue data and calls netif_napi_add
+ * for each queue.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ int res;
+
+ if (atomic_fetch_inc(&priv->netdevs))
+ return 0;
+
+ mutex_lock(&hfi1_mutex);
+ init_dummy_netdev(dd->dummy_netdev);
+ res = hfi1_netdev_rxq_init(dd->dummy_netdev);
+ mutex_unlock(&hfi1_mutex);
+ return res;
+}
+
+/**
+ * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
+ * napi is deleted and receive queses memory is freed.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ /* destroy the RX queues only if it is the last netdev going away */
+ if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
+ mutex_lock(&hfi1_mutex);
+ hfi1_netdev_rxq_deinit(dd->dummy_netdev);
+ mutex_unlock(&hfi1_mutex);
+ }
+
+ return 0;
+}
+
+/**
+ * hfi1_netdev_alloc - Allocates netdev and private data. It is required
+ * because RMT index and MSI-X interrupt can be set only
+ * during driver initialization.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_alloc(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv;
+ const int netdev_size = sizeof(*dd->dummy_netdev) +
+ sizeof(struct hfi1_netdev_priv);
+
+ dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
+ dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
+
+ if (!dd->dummy_netdev)
+ return -ENOMEM;
+
+ priv = hfi1_netdev_priv(dd->dummy_netdev);
+ priv->dd = dd;
+ xa_init(&priv->dev_tbl);
+ atomic_set(&priv->enabled, 0);
+ atomic_set(&priv->netdevs, 0);
+
+ return 0;
+}
+
+void hfi1_netdev_free(struct hfi1_devdata *dd)
+{
+ if (dd->dummy_netdev) {
+ dd_dev_info(dd, "hfi1 netdev freed\n");
+ free_netdev(dd->dummy_netdev);
+ dd->dummy_netdev = NULL;
+ }
+}
+
+/**
+ * hfi1_netdev_enable_queues - This is napi enable function.
+ * It enables napi objects associated with queues.
+ * When at least one device has called it it increments atomic counter.
+ * Disable function decrements counter and when it is 0,
+ * calls napi_disable for every queue.
+ *
+ * @dd: hfi1 dev data
+ */
+void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv;
+
+ if (!dd->dummy_netdev)
+ return;
+
+ priv = hfi1_netdev_priv(dd->dummy_netdev);
+ if (atomic_fetch_inc(&priv->enabled))
+ return;
+
+ mutex_lock(&hfi1_mutex);
+ enable_queues(priv);
+ mutex_unlock(&hfi1_mutex);
+}
+
+void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv;
+
+ if (!dd->dummy_netdev)
+ return;
+
+ priv = hfi1_netdev_priv(dd->dummy_netdev);
+ if (atomic_dec_if_positive(&priv->enabled))
+ return;
+
+ mutex_lock(&hfi1_mutex);
+ disable_queues(priv);
+ mutex_unlock(&hfi1_mutex);
+}
+
+/**
+ * hfi1_netdev_add_data - Registers data with unique identifier
+ * to be requested later this is needed for VNIC and IPoIB VLANs
+ * implementations.
+ * This call is protected by mutex idr_lock.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ * @data: data to be associated with index
+ */
+int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
+}
+
+/**
+ * hfi1_netdev_remove_data - Removes data with previously given id.
+ * Returns the reference to removed entry.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return xa_erase(&priv->dev_tbl, id);
+}
+
+/**
+ * hfi1_netdev_get_data - Gets data with given id
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return xa_load(&priv->dev_tbl, id);
+}
+
+/**
+ * hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ unsigned long index = *start_id;
+ void *ret;
+
+ ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
+ *start_id = (int)index;
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index f8e733aa3bb8..0c2ae9f7b3e8 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2019 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -186,15 +186,6 @@ static void flush_iowait(struct rvt_qp *qp)
write_sequnlock_irqrestore(lock, flags);
}
-static inline int opa_mtu_enum_to_int(int mtu)
-{
- switch (mtu) {
- case OPA_MTU_8192: return 8192;
- case OPA_MTU_10240: return 10240;
- default: return -1;
- }
-}
-
/**
* This function is what we would push to the core layer if we wanted to be a
* "first class citizen". Instead we hide this here and rely on Verbs ULPs
@@ -202,15 +193,10 @@ static inline int opa_mtu_enum_to_int(int mtu)
*/
static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
- int val;
-
/* Constraining 10KB packets to 8KB packets */
if (mtu == (enum ib_mtu)OPA_MTU_10240)
mtu = OPA_MTU_8192;
- val = opa_mtu_enum_to_int((int)mtu);
- if (val > 0)
- return val;
- return ib_mtu_enum_to_int(mtu);
+ return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}
int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 8a2e0d9351e9..243b4ba0b6f6 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * Copyright(c) 2018 Intel Corporation.
+ * Copyright(c) 2018 - 2020 Intel Corporation.
*
*/
@@ -194,7 +194,7 @@ void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
{
struct hfi1_qp_priv *priv = qp->priv;
- p->qp = (kdeth_qp << 16) | priv->rcd->ctxt;
+ p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt;
p->max_len = TID_RDMA_MAX_SEGMENT_SIZE;
p->jkey = priv->rcd->jkey;
p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ;
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 9a3d236bcc88..b219ea90fd6f 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -47,6 +47,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "exp_rcv.h"
+#include "ipoib.h"
static u8 __get_ib_hdr_len(struct ib_header *hdr)
{
@@ -126,6 +127,7 @@ const char *hfi1_trace_get_packet_l2_str(u8 l2)
#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
#define DETH_PRN "deth qkey:0x%.8x sqpn:0x%.6x"
+#define DETH_ENTROPY_PRN "deth qkey:0x%.8x sqpn:0x%.6x entropy:0x%.2x"
#define IETH_PRN "ieth rkey:0x%.8x"
#define ATOMICACKETH_PRN "origdata:%llx"
#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
@@ -444,6 +446,12 @@ const char *parse_everbs_hdrs(
break;
/* deth */
case OP(UD, SEND_ONLY):
+ trace_seq_printf(p, DETH_ENTROPY_PRN,
+ be32_to_cpu(eh->ud.deth[0]),
+ be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK,
+ be32_to_cpu(eh->ud.deth[1]) >>
+ HFI1_IPOIB_ENTROPY_SHIFT);
+ break;
case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, DETH_PRN,
be32_to_cpu(eh->ud.deth[0]),
@@ -512,6 +520,38 @@ u16 hfi1_trace_get_tid_idx(u32 ent)
return EXP_TID_GET(ent, IDX);
}
+struct hfi1_ctxt_hist {
+ atomic_t count;
+ atomic_t data[255];
+};
+
+struct hfi1_ctxt_hist hist = {
+ .count = ATOMIC_INIT(0)
+};
+
+const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt)
+{
+ int i, len = ARRAY_SIZE(hist.data);
+ const char *ret = trace_seq_buffer_ptr(p);
+ unsigned long packet_count = atomic_fetch_inc(&hist.count);
+
+ trace_seq_printf(p, "packet[%lu]", packet_count);
+ for (i = 0; i < len; ++i) {
+ unsigned long val;
+ atomic_t *count = &hist.data[i];
+
+ if (ctxt == i)
+ val = atomic_fetch_inc(count);
+ else
+ val = atomic_read(count);
+
+ if (val)
+ trace_seq_printf(p, "(%d:%lu)", i, val);
+ }
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
__hfi1_trace_fn(AFFINITY);
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
index b5fc5c6cd52f..d8c168dc3ea8 100644
--- a/drivers/infiniband/hw/hfi1/trace_ctxts.h
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -1,5 +1,5 @@
/*
-* Copyright(c) 2015, 2016 Intel Corporation.
+* Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -138,6 +138,15 @@ TRACE_EVENT(hfi1_ctxt_info,
)
);
+const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt);
+TRACE_EVENT(ctxt_rsm_hist,
+ TP_PROTO(unsigned int ctxt),
+ TP_ARGS(ctxt),
+ TP_STRUCT__entry(__field(unsigned int, ctxt)),
+ TP_fast_assign(__entry->ctxt = ctxt;),
+ TP_printk("%s", hfi1_trace_print_rsm_hist(p, __entry->ctxt))
+);
+
#endif /* __HFI1_TRACE_CTXTS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 4da03f823474..f81ca20f4b69 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -206,13 +206,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
return -EINVAL;
}
- /* Verify that access is OK for the user buffer */
- if (!access_ok((void __user *)vaddr,
- npages * PAGE_SIZE)) {
- dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
- (void *)vaddr, npages);
- return -EFAULT;
- }
/* Allocate the array of struct page pointers needed for pinning */
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
if (!pages)
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 13e4203497b3..a92346e88628 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
pq->state = SDMA_PKT_Q_ACTIVE;
- /* Send the first N packets in the request to buy us some time */
- ret = user_sdma_send_pkts(req, pcount);
- if (unlikely(ret < 0 && ret != -EBUSY))
- goto free_req;
/*
* This is a somewhat blocking send implementation.
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 2f6323ad9c59..30865635b449 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -66,6 +66,7 @@
#include "vnic.h"
#include "fault.h"
#include "affinity.h"
+#include "ipoib.h"
static unsigned int hfi1_lkey_table_size = 16;
module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
@@ -1342,7 +1343,7 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
IB_DEVICE_MEM_MGT_EXTENSIONS |
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC;
+ IB_DEVICE_RDMA_NETDEV_OPA;
rdi->dparms.props.page_size_cap = PAGE_SIZE;
rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
rdi->dparms.props.vendor_part_id = dd->pcidev->device;
@@ -1360,7 +1361,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
rdi->dparms.props.max_cq = hfi1_max_cqs;
rdi->dparms.props.max_ah = hfi1_max_ahs;
rdi->dparms.props.max_cqe = hfi1_max_cqes;
- rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_pd = hfi1_max_pds;
rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
rdi->dparms.props.max_qp_init_rd_atom = 255;
@@ -1439,6 +1439,8 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
+ props->phys_mtu = HFI1_CAP_IS_KSET(AIP) ? hfi1_max_mtu :
+ ib_mtu_enum_to_int(props->max_mtu);
return 0;
}
@@ -1793,6 +1795,7 @@ static const struct ib_device_ops hfi1_dev_ops = {
.modify_device = modify_device,
/* keep process mad in the driver */
.process_mad = hfi1_process_mad,
+ .rdma_netdev_get_params = hfi1_ipoib_rn_get_params,
};
/**
@@ -1863,9 +1866,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.qpn_start = 0;
dd->verbs_dev.rdi.dparms.qpn_inc = 1;
dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
- dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
- dd->verbs_dev.rdi.dparms.qpn_res_end =
- dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
+ dd->verbs_dev.rdi.dparms.qpn_res_start = RVT_KDETH_QP_BASE;
+ dd->verbs_dev.rdi.dparms.qpn_res_end = RVT_AIP_QP_MAX;
dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h
index 5ae781514e32..66150a13f374 100644
--- a/drivers/infiniband/hw/hfi1/vnic.h
+++ b/drivers/infiniband/hw/hfi1/vnic.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_VNIC_H
#define _HFI1_VNIC_H
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -69,6 +69,7 @@
#define HFI1_VNIC_SC_SHIFT 4
#define HFI1_VNIC_MAX_QUEUE 16
+#define HFI1_NUM_VNIC_CTXT 8
/**
* struct hfi1_vnic_sdma - VNIC per Tx ring SDMA information
@@ -104,7 +105,6 @@ struct hfi1_vnic_rx_queue {
struct hfi1_vnic_vport_info *vinfo;
struct net_device *netdev;
struct napi_struct napi;
- struct sk_buff_head skbq;
};
/**
@@ -146,7 +146,6 @@ struct hfi1_vnic_vport_info {
/* vnic hfi1 internal functions */
void hfi1_vnic_setup(struct hfi1_devdata *dd);
-void hfi1_vnic_cleanup(struct hfi1_devdata *dd);
int hfi1_vnic_txreq_init(struct hfi1_devdata *dd);
void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index 6b14581b9965..a90824de0f57 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2017 - 2018 Intel Corporation.
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -53,6 +53,7 @@
#include <linux/if_vlan.h>
#include "vnic.h"
+#include "netdev.h"
#define HFI_TX_TIMEOUT_MS 1000
@@ -62,114 +63,6 @@
static DEFINE_SPINLOCK(vport_cntr_lock);
-static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
-{
- unsigned int rcvctrl_ops = 0;
- int ret;
-
- uctxt->do_interrupt = &handle_receive_interrupt;
-
- /* Now allocate the RcvHdr queue and eager buffers. */
- ret = hfi1_create_rcvhdrq(dd, uctxt);
- if (ret)
- goto done;
-
- ret = hfi1_setup_eagerbufs(uctxt);
- if (ret)
- goto done;
-
- if (hfi1_rcvhdrtail_kvaddr(uctxt))
- clear_rcvhdrtail(uctxt);
-
- rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
- rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_ENB;
-
- if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
- rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
- rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
- rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
- rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
-
- hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
-done:
- return ret;
-}
-
-static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
- struct hfi1_ctxtdata **vnic_ctxt)
-{
- struct hfi1_ctxtdata *uctxt;
- int ret;
-
- if (dd->flags & HFI1_FROZEN)
- return -EIO;
-
- ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
- if (ret < 0) {
- dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
- return -ENOMEM;
- }
-
- uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
- HFI1_CAP_KGET(NODROP_RHQ_FULL) |
- HFI1_CAP_KGET(NODROP_EGR_FULL) |
- HFI1_CAP_KGET(DMA_RTAIL);
- uctxt->seq_cnt = 1;
- uctxt->is_vnic = true;
-
- msix_request_rcd_irq(uctxt);
-
- hfi1_stats.sps_ctxts++;
- dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
- *vnic_ctxt = uctxt;
-
- return 0;
-}
-
-static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
- struct hfi1_ctxtdata *uctxt)
-{
- dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
- flush_wc();
-
- /*
- * Disable receive context and interrupt available, reset all
- * RcvCtxtCtrl bits to default values.
- */
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
- HFI1_RCVCTRL_TIDFLOW_DIS |
- HFI1_RCVCTRL_INTRAVAIL_DIS |
- HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
- HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
- HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
-
- /* msix_intr will always be > 0, only clean up if this is true */
- if (uctxt->msix_intr)
- msix_free_irq(dd, uctxt->msix_intr);
-
- uctxt->event_flags = 0;
-
- hfi1_clear_tids(uctxt);
- hfi1_clear_ctxt_pkey(dd, uctxt);
-
- hfi1_stats.sps_ctxts--;
-
- hfi1_free_ctxt(uctxt);
-}
-
-void hfi1_vnic_setup(struct hfi1_devdata *dd)
-{
- xa_init(&dd->vnic.vesws);
-}
-
-void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
-{
- WARN_ON(!xa_empty(&dd->vnic.vesws));
-}
-
#define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \
u64 *src64, *dst64; \
for (src64 = &qstats->x_grp.unicast, \
@@ -179,6 +72,9 @@ void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
} \
} while (0)
+#define VNIC_MASK (0xFF)
+#define VNIC_ID(val) ((1ull << 24) | ((val) & VNIC_MASK))
+
/* hfi1_vnic_update_stats - update statistics */
static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info *vinfo,
struct opa_vnic_stats *stats)
@@ -454,71 +350,25 @@ static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq,
return rc;
}
-static inline struct sk_buff *hfi1_vnic_get_skb(struct hfi1_vnic_rx_queue *rxq)
+static struct hfi1_vnic_vport_info *get_vnic_port(struct hfi1_devdata *dd,
+ int vesw_id)
{
- unsigned char *pad_info;
- struct sk_buff *skb;
+ int vnic_id = VNIC_ID(vesw_id);
- skb = skb_dequeue(&rxq->skbq);
- if (unlikely(!skb))
- return NULL;
-
- /* remove tail padding and icrc */
- pad_info = skb->data + skb->len - 1;
- skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN -
- ((*pad_info) & 0x7)));
-
- return skb;
+ return hfi1_netdev_get_data(dd, vnic_id);
}
-/* hfi1_vnic_handle_rx - handle skb receive */
-static void hfi1_vnic_handle_rx(struct hfi1_vnic_rx_queue *rxq,
- int *work_done, int work_to_do)
+static struct hfi1_vnic_vport_info *get_first_vnic_port(struct hfi1_devdata *dd)
{
- struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
- struct sk_buff *skb;
- int rc;
-
- while (1) {
- if (*work_done >= work_to_do)
- break;
-
- skb = hfi1_vnic_get_skb(rxq);
- if (unlikely(!skb))
- break;
-
- rc = hfi1_vnic_decap_skb(rxq, skb);
- /* update rx counters */
- hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
- if (unlikely(rc)) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- skb_checksum_none_assert(skb);
- skb->protocol = eth_type_trans(skb, rxq->netdev);
-
- napi_gro_receive(&rxq->napi, skb);
- (*work_done)++;
- }
-}
-
-/* hfi1_vnic_napi - napi receive polling callback function */
-static int hfi1_vnic_napi(struct napi_struct *napi, int budget)
-{
- struct hfi1_vnic_rx_queue *rxq = container_of(napi,
- struct hfi1_vnic_rx_queue, napi);
- struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
- int work_done = 0;
+ struct hfi1_vnic_vport_info *vinfo;
+ int next_id = VNIC_ID(0);
- v_dbg("napi %d budget %d\n", rxq->idx, budget);
- hfi1_vnic_handle_rx(rxq, &work_done, budget);
+ vinfo = hfi1_netdev_get_first_data(dd, &next_id);
- v_dbg("napi %d work_done %d\n", rxq->idx, work_done);
- if (work_done < budget)
- napi_complete(napi);
+ if (next_id > VNIC_ID(VNIC_MASK))
+ return NULL;
- return work_done;
+ return vinfo;
}
void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
@@ -527,13 +377,14 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
struct hfi1_vnic_vport_info *vinfo = NULL;
struct hfi1_vnic_rx_queue *rxq;
struct sk_buff *skb;
- int l4_type, vesw_id = -1;
+ int l4_type, vesw_id = -1, rc;
u8 q_idx;
+ unsigned char *pad_info;
l4_type = hfi1_16B_get_l4(packet->ebuf);
if (likely(l4_type == OPA_16B_L4_ETHR)) {
vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
- vinfo = xa_load(&dd->vnic.vesws, vesw_id);
+ vinfo = get_vnic_port(dd, vesw_id);
/*
* In case of invalid vesw id, count the error on
@@ -541,10 +392,8 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
*/
if (unlikely(!vinfo)) {
struct hfi1_vnic_vport_info *vinfo_tmp;
- unsigned long index = 0;
- vinfo_tmp = xa_find(&dd->vnic.vesws, &index, ULONG_MAX,
- XA_PRESENT);
+ vinfo_tmp = get_first_vnic_port(dd);
if (vinfo_tmp) {
spin_lock(&vport_cntr_lock);
vinfo_tmp->stats[0].netstats.rx_nohandler++;
@@ -563,12 +412,6 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
rxq = &vinfo->rxq[q_idx];
if (unlikely(!netif_oper_up(vinfo->netdev))) {
vinfo->stats[q_idx].rx_drop_state++;
- skb_queue_purge(&rxq->skbq);
- return;
- }
-
- if (unlikely(skb_queue_len(&rxq->skbq) > HFI1_VNIC_RCV_Q_SIZE)) {
- vinfo->stats[q_idx].netstats.rx_fifo_errors++;
return;
}
@@ -580,62 +423,65 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
memcpy(skb->data, packet->ebuf, packet->tlen);
skb_put(skb, packet->tlen);
- skb_queue_tail(&rxq->skbq, skb);
- if (napi_schedule_prep(&rxq->napi)) {
- v_dbg("napi %d scheduling\n", q_idx);
- __napi_schedule(&rxq->napi);
+ pad_info = skb->data + skb->len - 1;
+ skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN -
+ ((*pad_info) & 0x7)));
+
+ rc = hfi1_vnic_decap_skb(rxq, skb);
+
+ /* update rx counters */
+ hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
+ if (unlikely(rc)) {
+ dev_kfree_skb_any(skb);
+ return;
}
+
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, rxq->netdev);
+
+ napi_gro_receive(&rxq->napi, skb);
}
static int hfi1_vnic_up(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
struct net_device *netdev = vinfo->netdev;
- int i, rc;
+ int rc;
/* ensure virtual eth switch id is valid */
if (!vinfo->vesw_id)
return -EINVAL;
- rc = xa_insert(&dd->vnic.vesws, vinfo->vesw_id, vinfo, GFP_KERNEL);
+ rc = hfi1_netdev_add_data(dd, VNIC_ID(vinfo->vesw_id), vinfo);
if (rc < 0)
return rc;
- for (i = 0; i < vinfo->num_rx_q; i++) {
- struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
-
- skb_queue_head_init(&rxq->skbq);
- napi_enable(&rxq->napi);
- }
+ rc = hfi1_netdev_rx_init(dd);
+ if (rc)
+ goto err_remove;
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
set_bit(HFI1_VNIC_UP, &vinfo->flags);
return 0;
+
+err_remove:
+ hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
+ return rc;
}
static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
- u8 i;
clear_bit(HFI1_VNIC_UP, &vinfo->flags);
netif_carrier_off(vinfo->netdev);
netif_tx_disable(vinfo->netdev);
- xa_erase(&dd->vnic.vesws, vinfo->vesw_id);
-
- /* ensure irqs see the change */
- msix_vnic_synchronize_irq(dd);
+ hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
- /* remove unread skbs */
- for (i = 0; i < vinfo->num_rx_q; i++) {
- struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
-
- napi_disable(&rxq->napi);
- skb_queue_purge(&rxq->skbq);
- }
+ hfi1_netdev_rx_destroy(dd);
}
static int hfi1_netdev_open(struct net_device *netdev)
@@ -660,70 +506,31 @@ static int hfi1_netdev_close(struct net_device *netdev)
return 0;
}
-static int hfi1_vnic_allot_ctxt(struct hfi1_devdata *dd,
- struct hfi1_ctxtdata **vnic_ctxt)
-{
- int rc;
-
- rc = allocate_vnic_ctxt(dd, vnic_ctxt);
- if (rc) {
- dd_dev_err(dd, "vnic ctxt alloc failed %d\n", rc);
- return rc;
- }
-
- rc = setup_vnic_ctxt(dd, *vnic_ctxt);
- if (rc) {
- dd_dev_err(dd, "vnic ctxt setup failed %d\n", rc);
- deallocate_vnic_ctxt(dd, *vnic_ctxt);
- *vnic_ctxt = NULL;
- }
-
- return rc;
-}
-
static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
- int i, rc = 0;
+ int rc = 0;
mutex_lock(&hfi1_mutex);
- if (!dd->vnic.num_vports) {
+ if (!dd->vnic_num_vports) {
rc = hfi1_vnic_txreq_init(dd);
if (rc)
goto txreq_fail;
}
- for (i = dd->vnic.num_ctxt; i < vinfo->num_rx_q; i++) {
- rc = hfi1_vnic_allot_ctxt(dd, &dd->vnic.ctxt[i]);
- if (rc)
- break;
- hfi1_rcd_get(dd->vnic.ctxt[i]);
- dd->vnic.ctxt[i]->vnic_q_idx = i;
- }
-
- if (i < vinfo->num_rx_q) {
- /*
- * If required amount of contexts is not
- * allocated successfully then remaining contexts
- * are released.
- */
- while (i-- > dd->vnic.num_ctxt) {
- deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
- hfi1_rcd_put(dd->vnic.ctxt[i]);
- dd->vnic.ctxt[i] = NULL;
- }
+ rc = hfi1_netdev_rx_init(dd);
+ if (rc) {
+ dd_dev_err(dd, "Unable to initialize netdev contexts\n");
goto alloc_fail;
}
- if (dd->vnic.num_ctxt != i) {
- dd->vnic.num_ctxt = i;
- hfi1_init_vnic_rsm(dd);
- }
+ hfi1_init_vnic_rsm(dd);
- dd->vnic.num_vports++;
+ dd->vnic_num_vports++;
hfi1_vnic_sdma_init(vinfo);
+
alloc_fail:
- if (!dd->vnic.num_vports)
+ if (!dd->vnic_num_vports)
hfi1_vnic_txreq_deinit(dd);
txreq_fail:
mutex_unlock(&hfi1_mutex);
@@ -733,20 +540,14 @@ txreq_fail:
static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
- int i;
mutex_lock(&hfi1_mutex);
- if (--dd->vnic.num_vports == 0) {
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
- deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
- hfi1_rcd_put(dd->vnic.ctxt[i]);
- dd->vnic.ctxt[i] = NULL;
- }
+ if (--dd->vnic_num_vports == 0) {
hfi1_deinit_vnic_rsm(dd);
- dd->vnic.num_ctxt = 0;
hfi1_vnic_txreq_deinit(dd);
}
mutex_unlock(&hfi1_mutex);
+ hfi1_netdev_rx_destroy(dd);
}
static void hfi1_vnic_set_vesw_id(struct net_device *netdev, int id)
@@ -804,7 +605,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
struct rdma_netdev *rn;
int i, size, rc;
- if (!dd->num_vnic_contexts)
+ if (!dd->num_netdev_contexts)
return ERR_PTR(-ENOMEM);
if (!port_num || (port_num > dd->num_pports))
@@ -815,15 +616,16 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
- dd->num_sdma, dd->num_vnic_contexts);
+ chip_sdma_engines(dd),
+ dd->num_netdev_contexts);
if (!netdev)
return ERR_PTR(-ENOMEM);
rn = netdev_priv(netdev);
vinfo = opa_vnic_dev_priv(netdev);
vinfo->dd = dd;
- vinfo->num_tx_q = dd->num_sdma;
- vinfo->num_rx_q = dd->num_vnic_contexts;
+ vinfo->num_tx_q = chip_sdma_engines(dd);
+ vinfo->num_rx_q = dd->num_netdev_contexts;
vinfo->netdev = netdev;
rn->free_rdma_netdev = hfi1_vnic_free_rn;
rn->set_id = hfi1_vnic_set_vesw_id;
@@ -841,7 +643,6 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
rxq->idx = i;
rxq->vinfo = vinfo;
rxq->netdev = netdev;
- netif_napi_add(netdev, &rxq->napi, hfi1_vnic_napi, 64);
}
rc = hfi1_vnic_init(vinfo);
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 8a522e14ef62..5b2f9314edd3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -39,13 +39,14 @@
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
#define HNS_ROCE_VLAN_SL_SHIFT 13
-int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
struct hns_roce_ah *ah = to_hr_ah(ibah);
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
u16 vlan_id = 0xffff;
bool vlan_en = false;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index da574c26e063..a522cb2d29ea 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -157,84 +157,78 @@ void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
kfree(bitmap->table);
}
-void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
- struct hns_roce_buf *buf)
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{
- int i;
struct device *dev = hr_dev->dev;
+ u32 size = buf->size;
+ int i;
+
+ if (size == 0)
+ return;
- if (buf->nbufs == 1) {
+ buf->size = 0;
+
+ if (hns_roce_buf_is_direct(buf)) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
- for (i = 0; i < buf->nbufs; ++i)
+ for (i = 0; i < buf->npages; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(dev, 1 << buf->page_shift,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
+ buf->page_list = NULL;
}
}
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf, u32 page_shift)
{
- int i = 0;
- dma_addr_t t;
+ struct hns_roce_buf_list *buf_list;
struct device *dev = hr_dev->dev;
- u32 page_size = 1 << page_shift;
- u32 order;
+ u32 page_size;
+ int i;
- /* SQ/RQ buf lease than one page, SQ + RQ = 8K */
+ /* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
+ buf->page_shift = max_t(int, HNS_HW_PAGE_SHIFT, page_shift);
+
+ page_size = 1 << buf->page_shift;
+ buf->npages = DIV_ROUND_UP(size, page_size);
+
+ /* required size is not bigger than one trunk size */
if (size <= max_direct) {
- buf->nbufs = 1;
- /* Npages calculated by page_size */
- order = get_order(size);
- if (order <= page_shift - PAGE_SHIFT)
- order = 0;
- else
- order -= page_shift - PAGE_SHIFT;
- buf->npages = 1 << order;
- buf->page_shift = page_shift;
- /* MTT PA must be recorded in 4k alignment, t is 4k aligned */
- buf->direct.buf = dma_alloc_coherent(dev, size, &t,
+ buf->page_list = NULL;
+ buf->direct.buf = dma_alloc_coherent(dev, size,
+ &buf->direct.map,
GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
} else {
- buf->nbufs = (size + page_size - 1) / page_size;
- buf->npages = buf->nbufs;
- buf->page_shift = page_shift;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
-
- if (!buf->page_list)
+ buf_list = kcalloc(buf->npages, sizeof(*buf_list), GFP_KERNEL);
+ if (!buf_list)
return -ENOMEM;
- for (i = 0; i < buf->nbufs; ++i) {
- buf->page_list[i].buf = dma_alloc_coherent(dev,
- page_size,
- &t,
- GFP_KERNEL);
-
- if (!buf->page_list[i].buf)
- goto err_free;
+ for (i = 0; i < buf->npages; i++) {
+ buf_list[i].buf = dma_alloc_coherent(dev, page_size,
+ &buf_list[i].map,
+ GFP_KERNEL);
+ if (!buf_list[i].buf)
+ break;
+ }
- buf->page_list[i].map = t;
+ if (i != buf->npages && i > 0) {
+ while (i-- > 0)
+ dma_free_coherent(dev, page_size,
+ buf_list[i].buf,
+ buf_list[i].map);
+ kfree(buf_list);
+ return -ENOMEM;
}
+ buf->page_list = buf_list;
}
+ buf->size = size;
return 0;
-
-err_free:
- hns_roce_buf_free(hr_dev, size, buf);
- return -ENOMEM;
}
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
@@ -246,33 +240,30 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
end = start + buf_cnt;
if (end > buf->npages) {
dev_err(hr_dev->dev,
- "invalid kmem region,offset %d,buf_cnt %d,total %d!\n",
+ "Failed to check kmem bufs, end %d + %d total %d!\n",
start, buf_cnt, buf->npages);
return -EINVAL;
}
total = 0;
for (i = start; i < end; i++)
- if (buf->nbufs == 1)
- bufs[total++] = buf->direct.map +
- ((dma_addr_t)i << buf->page_shift);
- else
- bufs[total++] = buf->page_list[i].map;
+ bufs[total++] = hns_roce_buf_page(buf, i);
return total;
}
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct ib_umem *umem,
- int page_shift)
+ unsigned int page_shift)
{
struct ib_block_iter biter;
int total = 0;
int idx = 0;
u64 addr;
- if (page_shift < PAGE_SHIFT) {
- dev_err(hr_dev->dev, "invalid page shift %d!\n", page_shift);
+ if (page_shift < HNS_HW_PAGE_SHIFT) {
+ dev_err(hr_dev->dev, "Failed to check umem page shift %d!\n",
+ page_shift);
return -EINVAL;
}
@@ -292,49 +283,6 @@ done:
return total;
}
-void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
- int offset, int buf_cnt)
-{
- if (hopnum == HNS_ROCE_HOP_NUM_0)
- region->hopnum = 0;
- else
- region->hopnum = hopnum;
-
- region->offset = offset;
- region->count = buf_cnt;
-}
-
-void hns_roce_free_buf_list(dma_addr_t **bufs, int region_cnt)
-{
- int i;
-
- for (i = 0; i < region_cnt; i++) {
- kfree(bufs[i]);
- bufs[i] = NULL;
- }
-}
-
-int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
- dma_addr_t **bufs, int region_cnt)
-{
- struct hns_roce_buf_region *r;
- int i;
-
- for (i = 0; i < region_cnt; i++) {
- r = &regions[i];
- bufs[i] = kcalloc(r->count, sizeof(dma_addr_t), GFP_KERNEL);
- if (!bufs[i])
- goto err_alloc;
- }
-
- return 0;
-
-err_alloc:
- hns_roce_free_buf_list(bufs, i);
-
- return -ENOMEM;
-}
-
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 8e95a1aa1b4f..f5669ff8cfeb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -33,10 +33,6 @@
#ifndef _HNS_ROCE_COMMON_H
#define _HNS_ROCE_COMMON_H
-#ifndef assert
-#define assert(cond)
-#endif
-
#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
#define roce_raw_write(value, addr) \
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 5bfb52ffd590..e87d616f7988 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -39,51 +39,40 @@
#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
-static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
+static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cmd_mailbox *mailbox;
- struct hns_roce_hem_table *mtt_table;
struct hns_roce_cq_table *cq_table;
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle;
- u64 *mtts;
int ret;
- cq_table = &hr_dev->cq_table;
-
- /* Get the physical address of cq buf */
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- mtt_table = &hr_dev->mr_table.mtt_cqe_table;
- else
- mtt_table = &hr_dev->mr_table.mtt_table;
-
- mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
- &dma_handle);
-
- if (!mtts) {
- dev_err(dev, "Failed to find mtt for CQ buf.\n");
+ ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
+ &dma_handle);
+ if (ret < 1) {
+ ibdev_err(ibdev, "Failed to find CQ mtr\n");
return -EINVAL;
}
+ cq_table = &hr_dev->cq_table;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
if (ret) {
- dev_err(dev, "Num of CQ out of range.\n");
+ ibdev_err(ibdev, "Failed to alloc CQ bitmap, err %d\n", ret);
return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
- dev_err(dev,
- "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
- ret, hr_cq->cqn);
+ ibdev_err(ibdev, "Failed to get CQ(0x%lx) context, err %d\n",
+ hr_cq->cqn, ret);
goto err_out;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- dev_err(dev, "Failed to xa_store CQ.\n");
+ ibdev_err(ibdev, "Failed to xa_store CQ\n");
goto err_put;
}
@@ -101,9 +90,9 @@ static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(dev,
- "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
- ret, hr_cq->cqn);
+ ibdev_err(ibdev,
+ "Failed to send create cmd for CQ(0x%lx), err %d\n",
+ hr_cq->cqn, ret);
goto err_xa;
}
@@ -126,7 +115,7 @@ err_out:
return ret;
}
-void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
@@ -153,190 +142,86 @@ void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
-static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
- struct hns_roce_ib_create_cq ucmd,
- struct ib_udata *udata)
-{
- struct hns_roce_buf *buf = &hr_cq->buf;
- struct hns_roce_mtt *mtt = &hr_cq->mtt;
- struct ib_umem **umem = &hr_cq->umem;
- u32 npages;
- int ret;
-
- *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(*umem))
- return PTR_ERR(*umem);
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- mtt->mtt_type = MTT_TYPE_CQE;
- else
- mtt->mtt_type = MTT_TYPE_WQE;
-
- npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
- 1 << hr_dev->caps.cqe_buf_pg_sz);
- ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
- if (ret)
- goto err_buf;
-
- ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
- if (ret)
- goto err_mtt;
-
- return 0;
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, mtt);
-
-err_buf:
- ib_umem_release(*umem);
- return ret;
-}
-
-static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata, unsigned long addr)
{
- struct hns_roce_buf *buf = &hr_cq->buf;
- struct hns_roce_mtt *mtt = &hr_cq->mtt;
- int ret;
-
- ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
- buf, buf->page_shift);
- if (ret)
- goto out;
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- mtt->mtt_type = MTT_TYPE_CQE;
- else
- mtt->mtt_type = MTT_TYPE_WQE;
-
- ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
- if (ret)
- goto err_buf;
-
- ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
- if (ret)
- goto err_mtt;
-
- return 0;
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, mtt);
-
-err_buf:
- hns_roce_buf_free(hr_dev, buf->size, buf);
-
-out:
- return ret;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+ buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+ err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
+ hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, addr);
+ if (err)
+ ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err);
+
+ return err;
}
static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
- hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
+ hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
}
-static int create_user_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq,
- struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp)
+static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata, unsigned long addr,
+ struct hns_roce_ib_create_cq_resp *resp)
{
- struct hns_roce_ib_create_cq ucmd;
- struct device *dev = hr_dev->dev;
- int ret;
- struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
-
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- dev_err(dev, "Failed to copy_from_udata.\n");
- return -EFAULT;
- }
+ bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB;
+ struct hns_roce_ucontext *uctx;
+ int err;
- /* Get user space address, write it into mtt table */
- ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
- if (ret) {
- dev_err(dev, "Failed to get_cq_umem.\n");
- return ret;
- }
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
- udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
- ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
- &hr_cq->db);
- if (ret) {
- dev_err(dev, "cq record doorbell map failed!\n");
- goto err_mtt;
+ if (udata) {
+ if (has_db &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ err = hns_roce_db_map_user(uctx, udata, addr,
+ &hr_cq->db);
+ if (err)
+ return err;
+ hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
}
- hr_cq->db_en = 1;
- resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
- }
-
- return 0;
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- ib_umem_release(hr_cq->umem);
-
- return ret;
-}
-
-static int create_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
-{
- struct device *dev = hr_dev->dev;
- int ret;
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
- ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
- if (ret)
- return ret;
-
- hr_cq->set_ci_db = hr_cq->db.db_record;
- *hr_cq->set_ci_db = 0;
- hr_cq->db_en = 1;
- }
-
- /* Init mtt table and write buff address to mtt table */
- ret = alloc_cq_buf(hr_dev, hr_cq);
- if (ret) {
- dev_err(dev, "Failed to alloc_cq_buf.\n");
- goto err_db;
+ } else {
+ if (has_db) {
+ err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
+ if (err)
+ return err;
+ hr_cq->set_ci_db = hr_cq->db.db_record;
+ *hr_cq->set_ci_db = 0;
+ hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
+ }
+ hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
}
- hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
-
return 0;
-
-err_db:
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
- hns_roce_free_db(hr_dev, &hr_cq->db);
-
- return ret;
}
-static void destroy_user_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq,
- struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp)
+static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata)
{
- struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
+ struct hns_roce_ucontext *uctx;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
- udata->outlen >= offsetofend(typeof(*resp), cap_flags))
- hns_roce_db_unmap_user(context, &hr_cq->db);
-
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- ib_umem_release(hr_cq->umem);
-}
-
-static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
-{
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- free_cq_buf(hr_dev, hr_cq);
+ if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
+ return;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
+ hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
+ if (udata) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext,
+ ibucontext);
+ hns_roce_db_unmap_user(uctx, &hr_cq->db);
+ } else {
hns_roce_free_db(hr_dev, &hr_cq->db);
+ }
}
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
@@ -345,20 +230,21 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_cq ucmd = {};
int vector = attr->comp_vector;
u32 cq_entries = attr->cqe;
int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- dev_err(dev, "Create CQ failed. entries=%d, max=%d\n",
- cq_entries, hr_dev->caps.max_cqes);
+ ibdev_err(ibdev, "Failed to check CQ count %d max=%d\n",
+ cq_entries, hr_dev->caps.max_cqes);
return -EINVAL;
}
if (vector >= hr_dev->caps.num_comp_vectors) {
- dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
- vector, hr_dev->caps.num_comp_vectors);
+ ibdev_err(ibdev, "Failed to check CQ vector=%d max=%d\n",
+ vector, hr_dev->caps.num_comp_vectors);
return -EINVAL;
}
@@ -367,30 +253,35 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
hr_cq->cq_depth = cq_entries;
hr_cq->vector = vector;
- hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
- hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock);
INIT_LIST_HEAD(&hr_cq->sq_list);
INIT_LIST_HEAD(&hr_cq->rq_list);
if (udata) {
- ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
- if (ret) {
- dev_err(dev, "Create cq failed in user mode!\n");
- goto err_cq;
- }
- } else {
- ret = create_kernel_cq(hr_dev, hr_cq);
+ ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (ret) {
- dev_err(dev, "Create cq failed in kernel mode!\n");
- goto err_cq;
+ ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n",
+ ret);
+ return ret;
}
}
- ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
+ ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
+ return ret;
+ }
+
+ ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
if (ret) {
- dev_err(dev, "Alloc CQ failed(%d).\n", ret);
- goto err_dbmap;
+ ibdev_err(ibdev, "Failed to alloc CQ db, err %d\n", ret);
+ goto err_cq_buf;
+ }
+
+ ret = alloc_cqc(hr_dev, hr_cq);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc CQ context, err %d\n", ret);
+ goto err_cq_db;
}
/*
@@ -412,15 +303,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
return 0;
err_cqc:
- hns_roce_free_cqc(hr_dev, hr_cq);
-
-err_dbmap:
- if (udata)
- destroy_user_cq(hr_dev, hr_cq, udata, &resp);
- else
- destroy_kernel_cq(hr_dev, hr_cq);
-
-err_cq:
+ free_cqc(hr_dev, hr_cq);
+err_cq_db:
+ free_cq_db(hr_dev, hr_cq, udata);
+err_cq_buf:
+ free_cq_buf(hr_dev, hr_cq);
return ret;
}
@@ -429,28 +316,12 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- if (hr_dev->hw->destroy_cq) {
+ if (hr_dev->hw->destroy_cq)
hr_dev->hw->destroy_cq(ib_cq, udata);
- return;
- }
-
- hns_roce_free_cqc(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- ib_umem_release(hr_cq->umem);
- if (udata) {
- if (hr_cq->db_en == 1)
- hns_roce_db_unmap_user(rdma_udata_to_drv_context(
- udata,
- struct hns_roce_ucontext,
- ibucontext),
- &hr_cq->db);
- } else {
- /* Free the buff of stored cq */
- free_cq_buf(hr_dev, hr_cq);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
- hns_roce_free_db(hr_dev, &hr_cq->db);
- }
+ free_cq_buf(hr_dev, hr_cq);
+ free_cq_db(hr_dev, hr_cq, udata);
+ free_cqc(hr_dev, hr_cq);
}
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index f6b3cf6b95d6..a77fa6730b2d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -66,6 +66,8 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
+#define HNS_ROCE_RESERVED_SGE 1
+
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
@@ -131,12 +133,12 @@ enum {
};
enum {
- HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
- HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
+ HNS_ROCE_QP_CAP_RQ_RECORD_DB = BIT(0),
+ HNS_ROCE_QP_CAP_SQ_RECORD_DB = BIT(1),
};
-enum {
- HNS_ROCE_SUPPORT_CQ_RECORD_DB = 1 << 0,
+enum hns_roce_cq_flags {
+ HNS_ROCE_CQ_FLAG_RECORD_DB = BIT(0),
};
enum hns_roce_qp_state {
@@ -209,6 +211,8 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
};
+#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
+
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
@@ -222,13 +226,6 @@ enum {
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
};
-enum hns_roce_mtt_type {
- MTT_TYPE_WQE,
- MTT_TYPE_CQE,
- MTT_TYPE_SRQWQE,
- MTT_TYPE_IDX
-};
-
#define HNS_ROCE_DB_TYPE_COUNT 2
#define HNS_ROCE_DB_UNIT_SIZE 4
@@ -267,9 +264,12 @@ enum {
#define HNS_ROCE_PORT_DOWN 0
#define HNS_ROCE_PORT_UP 1
-#define HNS_ROCE_MTT_ENTRY_PER_SEG 8
+/* The minimum page size is 4K for hardware */
+#define HNS_HW_PAGE_SHIFT 12
+#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
-#define PAGE_ADDR_SHIFT 12
+/* The minimum page count for hardware access page directly. */
+#define HNS_HW_DIRECT_PAGE_COUNT 2
struct hns_roce_uar {
u64 pfn;
@@ -300,22 +300,6 @@ struct hns_roce_bitmap {
unsigned long *table;
};
-/* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */
-/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
-/* Every bit repesent to a partner free/used status in bitmap */
-/*
- * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
- * Bit = 1 represent to idle and available; bit = 0: not available
- */
-struct hns_roce_buddy {
- /* Members point to every order level bitmap */
- unsigned long **bits;
- /* Represent to avail bits of the order level bitmap */
- u32 *num_free;
- int max_order;
- spinlock_t lock;
-};
-
/* For Hardware Entry Memory */
struct hns_roce_hem_table {
/* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
@@ -336,13 +320,6 @@ struct hns_roce_hem_table {
dma_addr_t *bt_l0_dma_addr;
};
-struct hns_roce_mtt {
- unsigned long first_seg;
- int order;
- int page_shift;
- enum hns_roce_mtt_type mtt_type;
-};
-
struct hns_roce_buf_region {
int offset; /* page offset */
u32 count; /* page count */
@@ -357,13 +334,34 @@ struct hns_roce_hem_list {
struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
struct list_head btm_bt; /* link all bottom bt in @mid_bt */
dma_addr_t root_ba; /* pointer to the root ba table */
- int bt_pg_shift;
+};
+
+struct hns_roce_buf_attr {
+ struct {
+ size_t size; /* region size */
+ int hopnum; /* multi-hop addressing hop num */
+ } region[HNS_ROCE_MAX_BT_REGION];
+ int region_count; /* valid region count */
+ unsigned int page_shift; /* buffer page shift */
+ bool fixed_page; /* decide page shift is fixed-size or maximum size */
+ int user_access; /* umem access flag */
+ bool mtt_only; /* only alloc buffer-required MTT memory */
};
/* memory translate region */
struct hns_roce_mtr {
- struct hns_roce_hem_list hem_list;
- int buf_pg_shift;
+ struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
+ struct ib_umem *umem; /* user space buffer */
+ struct hns_roce_buf *kmem; /* kernel space buffer */
+ struct {
+ dma_addr_t root_ba; /* root BA table's address */
+ bool is_direct; /* addressing without BA table */
+ unsigned int ba_pg_shift; /* BA table page shift */
+ unsigned int buf_pg_shift; /* buffer page shift */
+ int buf_pg_count; /* buffer page count */
+ struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
+ unsigned int region_count;
+ } hem_cfg; /* config for hardware addressing */
};
struct hns_roce_mw {
@@ -381,43 +379,22 @@ struct hns_roce_mw {
struct hns_roce_mr {
struct ib_mr ibmr;
- struct ib_umem *umem;
u64 iova; /* MR's virtual orignal addr */
u64 size; /* Address range of MR */
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
u32 access; /* Access permission of MR */
- u32 npages;
int enabled; /* MR's active status */
int type; /* MR's register type */
- u64 *pbl_buf; /* MR's PBL space */
- dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
- u32 pbl_size; /* PA number in the PBL */
- u64 pbl_ba; /* page table address */
- u32 l0_chunk_last_num; /* L0 last number */
- u32 l1_chunk_last_num; /* L1 last number */
- u64 **pbl_bt_l2; /* PBL BT L2 */
- u64 **pbl_bt_l1; /* PBL BT L1 */
- u64 *pbl_bt_l0; /* PBL BT L0 */
- dma_addr_t *pbl_l2_dma_addr; /* PBL BT L2 dma addr */
- dma_addr_t *pbl_l1_dma_addr; /* PBL BT L1 dma addr */
- dma_addr_t pbl_l0_dma_addr; /* PBL BT L0 dma addr */
- u32 pbl_ba_pg_sz; /* BT chunk page size */
- u32 pbl_buf_pg_sz; /* buf chunk page size */
u32 pbl_hop_num; /* multi-hop number */
+ struct hns_roce_mtr pbl_mtr;
+ u32 npages;
+ dma_addr_t *page_list;
};
struct hns_roce_mr_table {
struct hns_roce_bitmap mtpt_bitmap;
- struct hns_roce_buddy mtt_buddy;
- struct hns_roce_hem_table mtt_table;
struct hns_roce_hem_table mtpt_table;
- struct hns_roce_buddy mtt_cqe_buddy;
- struct hns_roce_hem_table mtt_cqe_table;
- struct hns_roce_buddy mtt_srqwqe_buddy;
- struct hns_roce_hem_table mtt_srqwqe_table;
- struct hns_roce_buddy mtt_idx_buddy;
- struct hns_roce_hem_table mtt_idx_table;
};
struct hns_roce_wq {
@@ -433,7 +410,7 @@ struct hns_roce_wq {
};
struct hns_roce_sge {
- int sge_cnt; /* SGE num */
+ unsigned int sge_cnt; /* SGE num */
int offset;
int sge_shift; /* SGE size */
};
@@ -446,10 +423,9 @@ struct hns_roce_buf_list {
struct hns_roce_buf {
struct hns_roce_buf_list direct;
struct hns_roce_buf_list *page_list;
- int nbufs;
u32 npages;
u32 size;
- int page_shift;
+ unsigned int page_shift;
};
struct hns_roce_db_pgdir {
@@ -482,12 +458,10 @@ struct hns_roce_db {
struct hns_roce_cq {
struct ib_cq ib_cq;
- struct hns_roce_buf buf;
- struct hns_roce_mtt mtt;
+ struct hns_roce_mtr mtr;
struct hns_roce_db db;
- u8 db_en;
+ u32 flags;
spinlock_t lock;
- struct ib_umem *umem;
u32 cq_depth;
u32 cons_index;
u32 *set_ci_db;
@@ -505,11 +479,8 @@ struct hns_roce_cq {
};
struct hns_roce_idx_que {
- struct hns_roce_buf idx_buf;
- int entry_sz;
- u32 buf_size;
- struct ib_umem *umem;
- struct hns_roce_mtt mtt;
+ struct hns_roce_mtr mtr;
+ int entry_shift;
unsigned long *bitmap;
};
@@ -524,10 +495,9 @@ struct hns_roce_srq {
atomic_t refcount;
struct completion free;
- struct hns_roce_buf buf;
+ struct hns_roce_mtr buf_mtr;
+
u64 *wrid;
- struct ib_umem *umem;
- struct hns_roce_mtt mtt;
struct hns_roce_idx_que idx_que;
spinlock_t lock;
int head;
@@ -656,20 +626,15 @@ struct hns_roce_work {
struct hns_roce_qp {
struct ib_qp ibqp;
- struct hns_roce_buf hr_buf;
struct hns_roce_wq rq;
struct hns_roce_db rdb;
struct hns_roce_db sdb;
- u8 rdb_en;
- u8 sdb_en;
+ unsigned long en_flags;
u32 doorbell_qpn;
u32 sq_signal_bits;
struct hns_roce_wq sq;
- struct ib_umem *umem;
- struct hns_roce_mtt mtt;
struct hns_roce_mtr mtr;
- int wqe_bt_pg_shift;
u32 buff_size;
struct mutex mutex;
@@ -769,17 +734,11 @@ struct hns_roce_eq {
int over_ignore;
int coalesce;
int arm_st;
- u64 eqe_ba;
- int eqe_ba_pg_sz;
- int eqe_buf_pg_sz;
int hop_num;
struct hns_roce_mtr mtr;
- struct hns_roce_buf buf;
- int eq_max_cnt;
+ u16 eq_max_cnt;
int eq_period;
int shift;
- dma_addr_t cur_eqe_ba;
- dma_addr_t nxt_eqe_ba;
int event_type;
int sub_type;
};
@@ -1102,15 +1061,67 @@ static inline struct hns_roce_qp
return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
}
+static inline bool hns_roce_buf_is_direct(struct hns_roce_buf *buf)
+{
+ if (buf->page_list)
+ return false;
+
+ return true;
+}
+
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
{
- u32 page_size = 1 << buf->page_shift;
+ if (hns_roce_buf_is_direct(buf))
+ return (char *)(buf->direct.buf) + (offset & (buf->size - 1));
- if (buf->nbufs == 1)
- return (char *)(buf->direct.buf) + offset;
+ return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
+ (offset & ((1 << buf->page_shift) - 1));
+}
+
+static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, int idx)
+{
+ if (hns_roce_buf_is_direct(buf))
+ return buf->direct.map + ((dma_addr_t)idx << buf->page_shift);
else
- return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
- (offset & (page_size - 1));
+ return buf->page_list[idx].map;
+}
+
+#define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
+
+static inline u64 to_hr_hw_page_addr(u64 addr)
+{
+ return addr >> HNS_HW_PAGE_SHIFT;
+}
+
+static inline u32 to_hr_hw_page_shift(u32 page_shift)
+{
+ return page_shift - HNS_HW_PAGE_SHIFT;
+}
+
+static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
+{
+ if (count > 0)
+ return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
+
+ return 0;
+}
+
+static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
+{
+ return hr_hw_page_align(count << buf_shift);
+}
+
+static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
+{
+ return hr_hw_page_align(count << buf_shift) >> buf_shift;
+}
+
+static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
+{
+ if (!count)
+ return 0;
+
+ return ilog2(to_hr_hem_entries_count(count, buf_shift));
}
int hns_roce_init_uar_table(struct hns_roce_dev *dev);
@@ -1125,25 +1136,18 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
-int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
- struct hns_roce_mtt *mtt);
-void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt);
-int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
-
-void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift,
- int buf_pg_shift);
-int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t **bufs, struct hns_roce_buf_region *regions,
- int region_cnt);
-void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtr *mtr);
-
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
+int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int page_shift, struct ib_udata *udata,
+ unsigned long user_addr);
+void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr);
+int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, int page_cnt);
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
@@ -1171,8 +1175,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned long obj, int cnt,
int rr);
-int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
+int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags);
@@ -1200,25 +1204,15 @@ struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw);
-void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
- struct hns_roce_buf *buf);
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf, u32 page_shift);
-int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct ib_umem *umem);
-
-void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
- int offset, int buf_cnt);
-int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
- dma_addr_t **bufs, int count);
-void hns_roce_free_buf_list(dma_addr_t **bufs, int count);
-
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct hns_roce_buf *buf);
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct ib_umem *umem,
- int page_shift);
+ unsigned int page_shift);
int hns_roce_create_srq(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
@@ -1254,8 +1248,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
-
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 263338b90d7a..c8db6f8ae018 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -75,18 +75,6 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
case HEM_TYPE_CQC_TIMER:
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
- case HEM_TYPE_CQE:
- hop_num = hr_dev->caps.cqe_hop_num;
- break;
- case HEM_TYPE_MTT:
- hop_num = hr_dev->caps.mtt_hop_num;
- break;
- case HEM_TYPE_SRQWQE:
- hop_num = hr_dev->caps.srqwqe_hop_num;
- break;
- case HEM_TYPE_IDX:
- hop_num = hr_dev->caps.idx_hop_num;
- break;
default:
return false;
}
@@ -195,38 +183,6 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
mhop->hop_num = hr_dev->caps.srqc_hop_num;
break;
- case HEM_TYPE_MTT:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.mtt_hop_num;
- break;
- case HEM_TYPE_CQE:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.cqe_hop_num;
- break;
- case HEM_TYPE_SRQWQE:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.srqwqe_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
- break;
- case HEM_TYPE_IDX:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.idx_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.idx_hop_num;
- break;
default:
dev_err(dev, "Table %d not support multi-hop addressing!\n",
type);
@@ -899,57 +855,6 @@ out:
return addr;
}
-int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end)
-{
- struct hns_roce_hem_mhop mhop;
- unsigned long inc = table->table_chunk_size / table->obj_size;
- unsigned long i = 0;
- int ret;
-
- if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
- ret = hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
- if (ret)
- goto fail;
- inc = mhop.bt_chunk_size / table->obj_size;
- }
-
- /* Allocate MTT entry memory according to chunk(128K) */
- for (i = start; i <= end; i += inc) {
- ret = hns_roce_table_get(hr_dev, table, i);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- while (i > start) {
- i -= inc;
- hns_roce_table_put(hr_dev, table, i);
- }
- return ret;
-}
-
-void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end)
-{
- struct hns_roce_hem_mhop mhop;
- unsigned long inc = table->table_chunk_size / table->obj_size;
- unsigned long i;
-
- if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
- if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
- return;
- inc = mhop.bt_chunk_size / table->obj_size;
- }
-
- for (i = start; i <= end; i += inc)
- hns_roce_table_put(hr_dev, table, i);
-}
-
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj,
@@ -1112,12 +1017,6 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{
- if ((hr_dev->caps.num_idx_segs))
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_idx_table);
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table);
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->srq_table.table);
@@ -1137,10 +1036,6 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table);
- hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
}
struct roce_hem_item {
@@ -1505,7 +1400,7 @@ err_exit:
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
const struct hns_roce_buf_region *regions,
- int region_cnt)
+ int region_cnt, unsigned int bt_pg_shift)
{
const struct hns_roce_buf_region *r;
int ofs, end;
@@ -1519,7 +1414,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
- unit = (1 << hem_list->bt_pg_shift) / BA_BYTE_LEN;
+ unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
for (i = 0; i < region_cnt; i++) {
r = &regions[i];
if (!r->count)
@@ -1566,8 +1461,7 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
hem_list->root_ba = 0;
}
-void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
- int bt_page_order)
+void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
{
int i, j;
@@ -1576,8 +1470,6 @@ void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
-
- hem_list->bt_pg_shift = bt_page_order;
}
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 3bb8f78fb7b0..b34c940077bb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -115,12 +115,6 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj,
dma_addr_t *dma_handle);
-int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end);
-void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end);
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj,
@@ -133,14 +127,13 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_mhop *mhop);
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
-void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
- int bt_page_order);
+void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list);
int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
int region_cnt, int unit);
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
const struct hns_roce_buf_region *regions,
- int region_cnt);
+ int region_cnt, unsigned int bt_pg_shift);
void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list);
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5ff028d77be3..d02207cd30df 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -503,16 +503,13 @@ static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
u32 ext_sdb_alful)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
dma_addr_t sdb_dma_addr;
__le32 tmp;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
/* Configure extend SDB threshold */
roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
@@ -545,16 +542,13 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
u32 ext_odb_alful)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
dma_addr_t odb_dma_addr;
__le32 tmp;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
/* Configure extend ODB threshold */
roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
@@ -583,16 +577,13 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
u32 odb_ext_mod)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
dma_addr_t sdb_dma_addr;
dma_addr_t odb_dma_addr;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
if (!db->ext_db)
return -ENOMEM;
@@ -692,14 +683,14 @@ static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct hns_roce_caps *caps = &hr_dev->caps;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct device *dev = &hr_dev->pdev->dev;
struct ib_cq_init_attr cq_init_attr;
- struct hns_roce_free_mr *free_mr;
struct ib_qp_attr attr = { 0 };
- struct hns_roce_v1_priv *priv;
struct hns_roce_qp *hr_qp;
- struct ib_device *ibdev;
struct ib_cq *cq;
struct ib_pd *pd;
union ib_gid dgid;
@@ -712,14 +703,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
u8 port = 0;
u8 sl;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
/* Reserved cq for loop qp */
cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
cq_init_attr.comp_vector = 0;
- ibdev = &hr_dev->ib_dev;
cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
if (!cq)
return -ENOMEM;
@@ -868,16 +855,13 @@ alloc_cq_failed:
static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
struct hns_roce_qp *hr_qp;
int ret;
int i;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
if (!hr_qp)
@@ -897,18 +881,15 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
u32 sdb_ext_mod;
u32 odb_ext_mod;
u32 sdb_evt_mod;
u32 odb_evt_mod;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
memset(db, 0, sizeof(*db));
/* Default DB mode */
@@ -954,15 +935,12 @@ static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
{
- struct device *dev = &hr_dev->pdev->dev;
+ long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct hns_roce_recreate_lp_qp_work *lp_qp_work;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
+ struct device *dev = &hr_dev->pdev->dev;
struct completion comp;
- long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
GFP_KERNEL);
@@ -1021,29 +999,21 @@ static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
{
- struct hns_roce_mr_free_work *mr_work;
- struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_cq *mr_free_cq;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_dev *hr_dev;
- struct hns_roce_mr *hr_mr;
- struct hns_roce_qp *hr_qp;
- struct device *dev;
unsigned long end =
msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
- int i;
- int ret;
+ struct hns_roce_mr_free_work *mr_work =
+ container_of(work, struct hns_roce_mr_free_work, work);
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev);
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
+ struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq;
+ struct hns_roce_mr *hr_mr = mr_work->mr;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
+ struct hns_roce_qp *hr_qp;
int ne = 0;
-
- mr_work = container_of(work, struct hns_roce_mr_free_work, work);
- hr_mr = (struct hns_roce_mr *)mr_work->mr;
- hr_dev = to_hr_dev(mr_work->ib_dev);
- dev = &hr_dev->pdev->dev;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
- mr_free_cq = free_mr->mr_free_cq;
+ int ret;
+ int i;
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
@@ -1092,19 +1062,15 @@ free_work:
static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, struct ib_udata *udata)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
+ long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_mr_free_work *mr_work;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
- struct completion comp;
- long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
unsigned long start = jiffies;
- int npages;
+ struct completion comp;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
if (mr->enabled) {
if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
key_to_hw_index(mr->key) &
@@ -1146,17 +1112,9 @@ free_mr:
dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
- dma_free_coherent(dev, npages * 8, mr->pbl_buf,
- mr->pbl_dma_addr);
- }
-
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
key_to_hw_index(mr->key), 0);
-
- ib_umem_release(mr->umem);
-
+ hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
kfree(mr);
return ret;
@@ -1164,12 +1122,9 @@ free_mr:
static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
if (db->sdb_ext_mod) {
dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
@@ -1190,17 +1145,14 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
{
- int ret;
- u32 val;
- __le32 tmp;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_raq_table *raq = &priv->raq_table;
+ struct device *dev = &hr_dev->pdev->dev;
int raq_shift = 0;
dma_addr_t addr;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_raq_table *raq;
- struct device *dev = &hr_dev->pdev->dev;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- raq = &priv->raq_table;
+ __le32 tmp;
+ u32 val;
+ int ret;
raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
if (!raq->e_raq_buf)
@@ -1280,12 +1232,9 @@ err_dma_alloc_raq:
static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_raq_table *raq = &priv->raq_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_raq_table *raq;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- raq = &priv->raq_table;
dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
raq->e_raq_buf->map);
@@ -1319,12 +1268,10 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
int ret;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-
priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
GFP_KERNEL);
@@ -1362,10 +1309,8 @@ err_failed_alloc_mtpt_buf:
static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
@@ -1379,12 +1324,9 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_buf_list *tptr_buf;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- tptr_buf = &priv->tptr_table.tptr_buf;
/*
* This buffer will be used for CQ's tptr(tail pointer), also
@@ -1405,12 +1347,9 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_buf_list *tptr_buf;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- tptr_buf = &priv->tptr_table.tptr_buf;
dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
tptr_buf->buf, tptr_buf->map);
@@ -1418,14 +1357,11 @@ static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
if (!free_mr->free_mr_wq) {
dev_err(dev, "Create free mr workqueue failed!\n");
@@ -1444,11 +1380,8 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
flush_workqueue(free_mr->free_mr_wq);
destroy_workqueue(free_mr->free_mr_wq);
@@ -1826,9 +1759,12 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
+ u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v1_mpt_entry *mpt_entry;
- struct sg_dma_page_iter sg_iter;
- u64 *pages;
+ dma_addr_t pbl_ba;
+ int count;
int i;
/* MPT filled into mailbox buf */
@@ -1878,22 +1814,15 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA)
return 0;
- pages = (u64 *) __get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- i = 0;
- for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
- pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
-
- /* Directly record to MTPT table firstly 7 entry */
- if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
- break;
- i++;
+ count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+ ARRAY_SIZE(pages), &pbl_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count);
+ return -ENOBUFS;
}
/* Register user mr */
- for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
+ for (i = 0; i < count; i++) {
switch (i) {
case 0:
mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
@@ -1959,20 +1888,17 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
}
}
- free_page((unsigned long) pages);
-
- mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
-
+ mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba);
roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
- MPT_BYTE_12_PBL_ADDR_H_S,
- ((u32)(mr->pbl_dma_addr >> 32)));
+ MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba));
return 0;
}
static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(hr_cq->mtr.kmem,
+ n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
@@ -2066,16 +1992,12 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
u64 *mtts, dma_addr_t dma_handle)
{
- struct hns_roce_cq_context *cq_context = NULL;
- struct hns_roce_buf_list *tptr_buf;
- struct hns_roce_v1_priv *priv;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
+ struct hns_roce_cq_context *cq_context = mb_buf;
dma_addr_t tptr_dma_addr;
int offset;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- tptr_buf = &priv->tptr_table.tptr_buf;
-
- cq_context = mb_buf;
memset(cq_context, 0, sizeof(*cq_context));
/* Get the tptr for this CQ. */
@@ -2416,16 +2338,14 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
int step_idx)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- unsigned long flags = 0;
long end = HW_SYNC_TIMEOUT_MSECS;
__le32 bt_cmd_val[2] = {0};
+ unsigned long flags = 0;
void __iomem *bt_cmd;
u64 bt_ba = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-
switch (table->type) {
case HEM_TYPE_QPC:
bt_ba = priv->bt_table.qpc_buf.map >> 12;
@@ -2479,7 +2399,6 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt,
enum hns_roce_qp_state cur_state,
enum hns_roce_qp_state new_state,
struct hns_roce_qp_context *context,
@@ -2560,6 +2479,29 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
return ret;
}
+static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int rq_pa_start;
+ int count;
+
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "Failed to find SQ ba\n");
+ return -ENOBUFS;
+ }
+ rq_pa_start = hr_qp->rq.offset >> hr_qp->mtr.hem_cfg.buf_pg_shift;
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, rq_pa_start, rq_ba, 1,
+ NULL);
+ if (!count) {
+ ibdev_err(ibdev, "Failed to find RQ ba\n");
+ return -ENOBUFS;
+ }
+
+ return 0;
+}
+
static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
@@ -2567,25 +2509,20 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_sqp_context *context;
- struct device *dev = &hr_dev->pdev->dev;
dma_addr_t dma_handle = 0;
u32 __iomem *addr;
- int rq_pa_start;
+ u64 sq_ba = 0;
+ u64 rq_ba = 0;
__le32 tmp;
u32 reg_val;
- u64 *mtts;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
/* Search QP buf's MTTs */
- mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
- hr_qp->mtt.first_seg, &dma_handle);
- if (!mtts) {
- dev_err(dev, "qp buf pa find failed\n");
+ if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
goto out;
- }
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
roce_set_field(context->qp1c_bytes_4,
@@ -2599,11 +2536,11 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
- context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
+ context->sq_rq_bt_l = cpu_to_le32(dma_handle);
roce_set_field(context->qp1c_bytes_12,
QP1C_BYTES_12_SQ_RQ_BT_H_M,
QP1C_BYTES_12_SQ_RQ_BT_H_S,
- ((u32)(dma_handle >> 32)));
+ upper_32_bits(dma_handle));
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
@@ -2624,14 +2561,12 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
- rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l =
- cpu_to_le32((u32)(mtts[rq_pa_start]));
+ context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
roce_set_field(context->qp1c_bytes_28,
QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
- (mtts[rq_pa_start]) >> 32);
+ upper_32_bits(rq_ba));
roce_set_field(context->qp1c_bytes_28,
QP1C_BYTES_28_RQ_CUR_IDX_M,
QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
@@ -2645,12 +2580,12 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_32_TX_CQ_NUM_S,
to_hr_cq(ibqp->send_cq)->cqn);
- context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
+ context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
roce_set_field(context->qp1c_bytes_40,
QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
- (mtts[0]) >> 32);
+ upper_32_bits(sq_ba));
roce_set_field(context->qp1c_bytes_40,
QP1C_BYTES_40_SQ_CUR_IDX_M,
QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
@@ -2704,6 +2639,28 @@ out:
return -EINVAL;
}
+static bool check_qp_state(enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ static const bool sm[][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true },
+ [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true,
+ [IB_QPS_RTR] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
+ [IB_QPS_SQD] = {},
+ [IB_QPS_SQE] = {},
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ };
+
+ return sm[cur_state][new_state];
+}
+
static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
@@ -2716,26 +2673,29 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0;
__le32 doorbell[2] = {0};
- int rq_pa_start = 0;
u64 *mtts_2 = NULL;
int ret = -EINVAL;
- u64 *mtts = NULL;
+ u64 sq_ba = 0;
+ u64 rq_ba = 0;
int port;
u8 port_num;
u8 *dmac;
u8 *smac;
+ if (!check_qp_state(cur_state, new_state)) {
+ ibdev_err(ibqp->device,
+ "not support QP(%u) status from %d to %d\n",
+ ibqp->qp_num, cur_state, new_state);
+ return -EINVAL;
+ }
+
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
/* Search qp buf's mtts */
- mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
- hr_qp->mtt.first_seg, &dma_handle);
- if (mtts == NULL) {
- dev_err(dev, "qp buf pa find failed\n");
+ if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
goto out;
- }
/* Search IRRL's mtts */
mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
@@ -2890,11 +2850,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
dmac = (u8 *)attr->ah_attr.roce.dmac;
- context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
+ context->sq_rq_bt_l = cpu_to_le32(dma_handle);
roce_set_field(context->qpc_bytes_24,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
- ((u32)(dma_handle >> 32)));
+ upper_32_bits(dma_handle));
roce_set_bit(context->qpc_bytes_24,
QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
1);
@@ -2993,14 +2953,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
- rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l =
- cpu_to_le32((u32)(mtts[rq_pa_start]));
+ context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
roce_set_field(context->qpc_bytes_76,
QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
- mtts[rq_pa_start] >> 32);
+ upper_32_bits(rq_ba));
roce_set_field(context->qpc_bytes_76,
QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
@@ -3062,8 +3020,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_156_SL_S,
rdma_ah_get_sl(&attr->ah_attr));
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- } else if (cur_state == IB_QPS_RTR &&
- new_state == IB_QPS_RTS) {
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
/* If exist optional param, return error */
if ((attr_mask & IB_QP_ALT_PATH) ||
(attr_mask & IB_QP_ACCESS_FLAGS) ||
@@ -3075,12 +3032,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
goto out;
}
- context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
+ context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
roce_set_field(context->qpc_bytes_120,
QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
- (mtts[0]) >> 32);
+ upper_32_bits(sq_ba));
roce_set_field(context->qpc_bytes_124,
QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
@@ -3223,28 +3180,18 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
- context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
+ context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
roce_set_field(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
- (mtts[0]) >> 32);
+ upper_32_bits(sq_ba));
roce_set_bit(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
roce_set_field(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
0);
- } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
- dev_err(dev, "not support this status migration\n");
- goto out;
}
/* Every status migrate must change state */
@@ -3253,8 +3200,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
/* SW pass context to HW */
- ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
- to_hns_roce_state(cur_state),
+ ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state),
to_hns_roce_state(new_state), context,
hr_qp);
if (ret) {
@@ -3636,8 +3582,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
u32 cqe_cnt_cur;
int wait_time = 0;
- hns_roce_free_cqc(hr_dev, hr_cq);
-
/*
* Before freeing cq buffer, we need to ensure that the outstanding CQE
* have been written by checking the CQE counter.
@@ -3660,14 +3604,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
}
wait_time++;
}
-
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
-
- ib_umem_release(hr_cq->umem);
- if (!udata) {
- /* Free the buff of stored cq */
- hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
- }
}
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c3316672b70e..c597d7281629 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -95,6 +95,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
{
struct hns_roce_mr *mr = to_hr_mr(wr->mr);
struct hns_roce_wqe_frmr_seg *fseg = wqe;
+ u64 pbl_ba;
/* use ib_access_flags */
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
@@ -109,26 +110,27 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
/* Data structure reuse may lead to confusion */
- rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
- rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
+ pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
+ rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
+ rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
rc_sq_wqe->rkey = cpu_to_le32(wr->key);
rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
- fseg->pbl_size = cpu_to_le32(mr->pbl_size);
+ fseg->pbl_size = cpu_to_le32(mr->npages);
roce_set_field(fseg->mode_buf_pg_sz,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
roce_set_bit(fseg->mode_buf_pg_sz,
V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
}
static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
- int valid_num_sge)
+ unsigned int valid_num_sge)
{
struct hns_roce_wqe_atomic_seg *aseg;
@@ -149,56 +151,33 @@ static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe,
}
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
- unsigned int *sge_ind, int valid_num_sge)
+ unsigned int *sge_ind, unsigned int valid_num_sge)
{
struct hns_roce_v2_wqe_data_seg *dseg;
- struct ib_sge *sg;
- int num_in_wqe = 0;
- int extend_sge_num;
- int fi_sge_num;
- int se_sge_num;
- int shift;
- int i;
+ unsigned int cnt = valid_num_sge;
+ struct ib_sge *sge = wr->sg_list;
+ unsigned int idx = *sge_ind;
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
- num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
- extend_sge_num = valid_num_sge - num_in_wqe;
- sg = wr->sg_list + num_in_wqe;
- shift = qp->hr_buf.page_shift;
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ cnt -= HNS_ROCE_SGE_IN_WQE;
+ sge += HNS_ROCE_SGE_IN_WQE;
+ }
- /*
- * Check whether wr->num_sge sges are in the same page. If not, we
- * should calculate how many sges in the first page and the second
- * page.
- */
- dseg = hns_roce_get_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
- fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
- (uintptr_t)dseg) /
- sizeof(struct hns_roce_v2_wqe_data_seg);
- if (extend_sge_num > fi_sge_num) {
- se_sge_num = extend_sge_num - fi_sge_num;
- for (i = 0; i < fi_sge_num; i++) {
- set_data_seg_v2(dseg++, sg + i);
- (*sge_ind)++;
- }
- dseg = hns_roce_get_extend_sge(qp,
- (*sge_ind) & (qp->sge.sge_cnt - 1));
- for (i = 0; i < se_sge_num; i++) {
- set_data_seg_v2(dseg++, sg + fi_sge_num + i);
- (*sge_ind)++;
- }
- } else {
- for (i = 0; i < extend_sge_num; i++) {
- set_data_seg_v2(dseg++, sg + i);
- (*sge_ind)++;
- }
+ while (cnt > 0) {
+ dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
+ set_data_seg_v2(dseg, sge);
+ idx++;
+ sge++;
+ cnt--;
}
+
+ *sge_ind = idx;
}
static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
- int valid_num_sge)
+ unsigned int valid_num_sge)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg = wqe;
@@ -208,15 +187,15 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int i;
if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
- if (le32_to_cpu(rc_sq_wqe->msg_len) >
- hr_dev->caps.max_sq_inline) {
+ if (unlikely(le32_to_cpu(rc_sq_wqe->msg_len) >
+ hr_dev->caps.max_sq_inline)) {
ibdev_err(ibdev, "inline len(1-%d)=%d, illegal",
rc_sq_wqe->msg_len,
hr_dev->caps.max_sq_inline);
return -EINVAL;
}
- if (wr->opcode == IB_WR_RDMA_READ) {
+ if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
ibdev_err(ibdev, "Not support inline data!\n");
return -EINVAL;
}
@@ -230,7 +209,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
1);
} else {
- if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
+ if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
for (i = 0; i < wr->num_sge; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
@@ -243,8 +222,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
(*sge_ind) & (qp->sge.sge_cnt - 1));
- for (i = 0; i < wr->num_sge &&
- j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
+ for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE;
+ i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
@@ -290,10 +269,11 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
return 0;
}
-static inline int calc_wr_sge_num(const struct ib_send_wr *wr, u32 *sge_len)
+static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
+ unsigned int *sge_len)
{
- int valid_num = 0;
- u32 len = 0;
+ unsigned int valid_num = 0;
+ unsigned int len = 0;
int i;
for (i = 0; i < wr->num_sge; i++) {
@@ -424,7 +404,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
{
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
unsigned int curr_idx = *sge_idx;
- int valid_num_sge;
+ unsigned int valid_num_sge;
u32 msg_len = 0;
int ret = 0;
@@ -521,8 +501,7 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
- V2_DB_PARAMETER_IDX_S,
- qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
+ V2_DB_PARAMETER_IDX_S, qp->sq.head);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
V2_DB_PARAMETER_SL_S, qp->sl);
@@ -548,7 +527,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
spin_lock_irqsave(&qp->sq.lock, flags);
ret = check_send_valid(hr_dev, qp);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
@@ -584,7 +563,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
else if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
goto out;
}
@@ -634,15 +613,15 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
spin_lock_irqsave(&hr_qp->rq.lock, flags);
ret = check_recv_valid(hr_dev, hr_qp);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
}
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
- hr_qp->ibqp.recv_cq)) {
+ if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq))) {
ret = -ENOMEM;
*bad_wr = wr;
goto out;
@@ -650,7 +629,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
- if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+ if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
@@ -667,13 +646,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
dseg++;
}
- if (i < hr_qp->rq.max_gs) {
+ if (wr->num_sge < hr_qp->rq.max_gs) {
dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg->addr = 0;
+ dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
}
/* rq support inline data */
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ if (hr_qp->rq_inl_buf.wqe_cnt) {
sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
(u32)wr->num_sge;
@@ -715,6 +695,129 @@ out:
return ret;
}
+static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
+{
+ return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
+}
+
+static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n)
+{
+ return hns_roce_buf_offset(idx_que->mtr.kmem,
+ n << idx_que->entry_shift);
+}
+
+static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+{
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+ bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
+ srq->tail++;
+
+ spin_unlock(&srq->lock);
+}
+
+static int find_empty_entry(struct hns_roce_idx_que *idx_que,
+ unsigned long size)
+{
+ int wqe_idx;
+
+ if (unlikely(bitmap_full(idx_que->bitmap, size)))
+ return -ENOSPC;
+
+ wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
+
+ bitmap_set(idx_que->bitmap, wqe_idx, 1);
+
+ return wqe_idx;
+}
+
+static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_v2_db srq_db;
+ unsigned long flags;
+ __le32 *srq_idx;
+ int ret = 0;
+ int wqe_idx;
+ void *wqe;
+ int nreq;
+ int ind;
+ int i;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ ind = srq->head & (srq->wqe_cnt - 1);
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (unlikely(wr->num_sge >= srq->max_gs)) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (unlikely(srq->head == srq->tail)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
+ if (unlikely(wqe_idx < 0)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe = get_srq_wqe(srq, wqe_idx);
+ dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
+ dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
+ dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
+ }
+
+ if (wr->num_sge < srq->max_gs) {
+ dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
+ dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg[i].addr = 0;
+ }
+
+ srq_idx = get_idx_buf(&srq->idx_que, ind);
+ *srq_idx = cpu_to_le32(wqe_idx);
+
+ srq->wrid[wqe_idx] = wr->wr_id;
+ ind = (ind + 1) & (srq->wqe_cnt - 1);
+ }
+
+ if (likely(nreq)) {
+ srq->head += nreq;
+
+ /*
+ * Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ srq_db.byte_4 =
+ cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
+ (srq->srqn & V2_DB_BYTE_4_TAG_M));
+ srq_db.parameter =
+ cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
+
+ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+ }
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return ret;
+}
+
static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
@@ -742,7 +845,7 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
@@ -768,7 +871,7 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
@@ -785,7 +888,7 @@ static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage; /* the current instance stage */
@@ -865,7 +968,7 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
&priv->cmq.csq : &priv->cmq.crq;
@@ -878,7 +981,7 @@ static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
&priv->cmq.csq : &priv->cmq.crq;
dma_addr_t dma = ring->desc_dma_addr;
@@ -904,7 +1007,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
int ret;
/* Setup the queue entries for command queue */
@@ -948,7 +1051,7 @@ err_crq:
static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
@@ -970,15 +1073,15 @@ static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
return head == priv->cmq.csq.next_to_use;
}
static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
struct hns_roce_cmq_desc *desc;
u16 ntc = csq->next_to_clean;
@@ -1003,7 +1106,7 @@ static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
struct hns_roce_cmq_desc *desc_to_use;
bool complete = false;
@@ -1131,7 +1234,7 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long reset_cnt;
@@ -1151,7 +1254,7 @@ static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
int flag)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage;
@@ -1349,34 +1452,26 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
{
struct hns_roce_pf_timer_res_a *req_a;
- struct hns_roce_cmq_desc desc[2];
- int ret, i;
+ struct hns_roce_cmq_desc desc;
+ int ret;
- for (i = 0; i < 2; i++) {
- hns_roce_cmq_setup_basic_desc(&desc[i],
- HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
- true);
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
+ true);
- if (i == 0)
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- }
-
- ret = hns_roce_cmq_send(hr_dev, desc, 2);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
- req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
+ req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
hr_dev->caps.qpc_timer_bt_num =
- roce_get_field(req_a->qpc_timer_bt_idx_num,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
+ roce_get_field(req_a->qpc_timer_bt_idx_num,
+ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
+ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
hr_dev->caps.cqc_timer_bt_num =
- roce_get_field(req_a->cqc_timer_bt_idx_num,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
+ roce_get_field(req_a->cqc_timer_bt_idx_num,
+ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
+ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
return 0;
}
@@ -1786,6 +1881,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
+ caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
+ HNS_ROCE_CAP_FLAGS_EX_SHIFT;
+
caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
V2_QUERY_PF_CAPS_C_NUM_CQS_M,
V2_QUERY_PF_CAPS_C_NUM_CQS_S);
@@ -1978,11 +2076,6 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
hr_dev->vendor_part_id = hr_dev->pci_dev->device;
hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
- caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
- caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
- caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
- caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
-
caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
caps->pbl_buf_pg_sz = 0;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
@@ -2040,8 +2133,6 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
page_num = link_tbl->npages;
entry = link_tbl->table.buf;
- memset(req_a, 0, sizeof(*req_a));
- memset(req_b, 0, sizeof(*req_b));
for (i = 0; i < 2; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
@@ -2050,39 +2141,30 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
-
- if (i == 0) {
- req_a->base_addr_l =
- cpu_to_le32(link_tbl->table.map & 0xffffffff);
- req_a->base_addr_h =
- cpu_to_le32(link_tbl->table.map >> 32);
- roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S,
- link_tbl->npages);
- roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S,
- link_tbl->pg_sz);
- req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
- req_a->head_ba_h_nxtptr =
- cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
- roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M,
- CFG_LLM_HEAD_PTR_S, 0);
- } else {
- req_b->tail_ba_l =
- cpu_to_le32(entry[page_num - 1].blk_ba0);
- roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
- CFG_LLM_TAIL_BA_H_S,
- entry[page_num - 1].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_BA1_M);
- roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M,
- CFG_LLM_TAIL_PTR_S,
- (entry[page_num - 2].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
- HNS_ROCE_LINK_TABLE_NXT_PTR_S);
- }
}
+
+ req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
+ req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
+ roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M,
+ CFG_LLM_QUE_DEPTH_S, link_tbl->npages);
+ roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M,
+ CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz);
roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
CFG_LLM_INIT_EN_S, 1);
+ req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
+ req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
+ roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S,
+ 0);
+
+ req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0);
+ roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
+ CFG_LLM_TAIL_BA_H_S,
+ entry[page_num - 1].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_BA1_M);
+ roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S,
+ (entry[page_num - 2].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
+ HNS_ROCE_LINK_TABLE_NXT_PTR_S);
return hns_roce_cmq_send(hr_dev, desc, 2);
}
@@ -2438,12 +2520,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
reg_smac_l = *(u32 *)(&addr[0]);
reg_smac_h = *(u16 *)(&addr[4]);
- memset(smac_tb, 0, sizeof(*smac_tb));
- roce_set_field(smac_tb->tb_idx_rsv,
- CFG_SMAC_TB_IDX_M,
+ roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
CFG_SMAC_TB_IDX_S, phy_port);
- roce_set_field(smac_tb->vf_smac_h_rsv,
- CFG_SMAC_TB_VF_SMAC_H_M,
+ roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
@@ -2453,32 +2532,30 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr)
{
- struct sg_dma_page_iter sg_iter;
- u64 page_addr;
- u64 *pages;
- int i;
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
+ u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t pbl_ba;
+ int i, count;
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
- mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
- roce_set_field(mpt_entry->byte_48_mode_ba,
- V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
- upper_32_bits(mr->pbl_ba >> 3));
+ count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+ ARRAY_SIZE(pages), &pbl_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
+ count);
+ return -ENOBUFS;
+ }
- pages = (u64 *)__get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ /* Aligned to the hardware address access unit */
+ for (i = 0; i < count; i++)
+ pages[i] >>= 6;
- i = 0;
- for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
- page_addr = sg_page_iter_dma_address(&sg_iter);
- pages[i] = page_addr >> 6;
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
+ mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
+ roce_set_field(mpt_entry->byte_48_mode_ba,
+ V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(pbl_ba >> 3));
- /* Record the first 2 entry directly to MTPT table */
- if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
- goto found;
- i++;
- }
-found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
@@ -2489,9 +2566,7 @@ found:
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
-
- free_page((unsigned long)pages);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
return 0;
}
@@ -2513,7 +2588,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
- mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
@@ -2599,11 +2674,19 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_mpt_entry *mpt_entry;
+ dma_addr_t pbl_ba = 0;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
+ if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
+ ibdev_err(ibdev, "failed to find frmr mtr.\n");
+ return -ENOBUFS;
+ }
+
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
@@ -2611,7 +2694,7 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
- mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
@@ -2624,17 +2707,17 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
- mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
V2_MPT_BYTE_48_PBL_BA_H_S,
- upper_32_bits(mr->pbl_ba >> 3));
+ upper_32_bits(pbl_ba >> 3));
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
return 0;
}
@@ -2680,7 +2763,8 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(hr_cq->mtr.kmem,
+ n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
@@ -2692,30 +2776,9 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
!!(n & hr_cq->cq_depth)) ? cqe : NULL;
}
-static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
+static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
{
- return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
-}
-
-static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
-{
- return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
-}
-
-static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
-{
- /* always called with interrupts disabled. */
- spin_lock(&srq->lock);
-
- bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
- srq->tail++;
-
- spin_unlock(&srq->lock);
-}
-
-static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
-{
- *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
+ *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -2801,39 +2864,39 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
- cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
+ cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
- mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
- cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
+ cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
- mtts[1] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
- hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
- hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
- if (hr_cq->db_en)
- roce_set_bit(cq_context->byte_44_db_record,
- V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
+ roce_set_bit(cq_context->byte_44_db_record,
+ V2_CQC_BYTE_44_DB_RECORD_EN_S,
+ (hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) ? 1 : 0);
roce_set_field(cq_context->byte_44_db_record,
V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
@@ -2873,8 +2936,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
HNS_ROCE_V2_CQ_DB_NTR);
roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
- V2_CQ_DB_PARAMETER_CONS_IDX_S,
- hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
+ V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index);
roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
@@ -2911,7 +2973,7 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
wqe_buf += size;
}
- if (data_len) {
+ if (unlikely(data_len)) {
wc->status = IB_WC_LOC_LEN_ERR;
return -EAGAIN;
}
@@ -2968,6 +3030,62 @@ out:
return npolled;
}
+static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
+{
+ static const struct {
+ u32 cqe_status;
+ enum ib_wc_status wc_status;
+ } map[] = {
+ { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
+ { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
+ { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
+ { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
+ { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
+ { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
+ IB_WC_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
+ };
+
+ u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
+ V2_CQE_BYTE_4_STATUS_S);
+ int i;
+
+ wc->status = IB_WC_GENERAL_ERR;
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (cqe_status == map[i].cqe_status) {
+ wc->status = map[i].wc_status;
+ break;
+ }
+
+ if (likely(wc->status == IB_WC_SUCCESS ||
+ wc->status == IB_WC_WR_FLUSH_ERR))
+ return;
+
+ ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ sizeof(*cqe), false);
+
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
+ * into errored mode. Hence, as a workaround to this hardware
+ * limitation, driver needs to assist in flushing. But the flushing
+ * operation uses mailbox to convey the QP state to the hardware and
+ * which can sleep due to the mutex protection around the mailbox calls.
+ * Hence, use the deferred flush for now. Once wc error detected, the
+ * flushing operation is needed.
+ */
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
@@ -2979,12 +3097,11 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
int is_send;
u16 wqe_ctr;
u32 opcode;
- u32 status;
int qpn;
int ret;
/* Find cqe according to consumer index */
- cqe = next_cqe_sw_v2(hr_cq);
+ cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
if (!cqe)
return -EAGAIN;
@@ -3009,7 +3126,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
*cur_qp = hr_qp;
}
- hr_qp = *cur_qp;
wc->qp = &(*cur_qp)->ibqp;
wc->vendor_err = 0;
@@ -3044,77 +3160,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
++wq->tail;
}
- status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
- V2_CQE_BYTE_4_STATUS_S);
- switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
- case HNS_ROCE_CQE_V2_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
- wc->status = IB_WC_LOC_QP_OP_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- case HNS_ROCE_CQE_V2_MW_BIND_ERR:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
- wc->status = IB_WC_BAD_RESP_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
- wc->status = IB_WC_REM_INV_REQ_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
- wc->status = IB_WC_REM_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
- wc->status = IB_WC_REM_OP_ERR;
- break;
- case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
- wc->status = IB_WC_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
- wc->status = IB_WC_RNR_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
- wc->status = IB_WC_REM_ABORT_ERR;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /*
- * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
- * into errored mode. Hence, as a workaround to this hardware
- * limitation, driver needs to assist in flushing. But the flushing
- * operation uses mailbox to convey the QP state to the hardware and
- * which can sleep due to the mutex protection around the mailbox calls.
- * Hence, use the deferred flush for now. Once wc error detected, the
- * flushing operation is needed.
- */
- if (wc->status != IB_WC_SUCCESS &&
- wc->status != IB_WC_WR_FLUSH_ERR) {
- ibdev_err(&hr_dev->ib_dev, "error cqe status is: 0x%x\n",
- status & HNS_ROCE_V2_CQE_STATUS_MASK);
-
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag))
- init_flush_work(hr_dev, hr_qp);
-
- return 0;
- }
-
- if (wc->status == IB_WC_WR_FLUSH_ERR)
+ get_cqe_status(hr_dev, *cur_qp, cqe, wc);
+ if (unlikely(wc->status != IB_WC_SUCCESS))
return 0;
if (is_send) {
@@ -3213,7 +3260,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
(roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
- if (ret)
+ if (unlikely(ret))
return -EAGAIN;
}
@@ -3514,29 +3561,18 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sge.sge_cnt));
- else
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- hr_qp->sq.max_gs >
- HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_S,
+ to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
+ hr_qp->sge.sge_shift));
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ ilog2(hr_qp->sq.wqe_cnt));
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
- (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
- hr_qp->ibqp.srq) ? 0 :
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ ilog2(hr_qp->rq.wqe_cnt));
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
@@ -3572,7 +3608,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
- if (hr_qp->rdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
roce_set_bit(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
@@ -3734,30 +3770,19 @@ static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
return true;
}
-static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
- struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
{
- const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_qp *ibqp = &hr_qp->ibqp;
u64 mtts[MTT_MIN_COUNT] = { 0 };
- dma_addr_t dma_handle_3;
- dma_addr_t dma_handle_2;
u64 wqe_sge_ba;
u32 page_size;
- u8 port_num;
- u64 *mtts_3;
- u64 *mtts_2;
int count;
- u8 *dmac;
- u8 *smac;
- int port;
/* Search qp buf's mtts */
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ page_size = 1 << hr_qp->mtr.hem_cfg.buf_pg_shift;
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
hr_qp->rq.offset / page_size, mtts,
MTT_MIN_COUNT, &wqe_sge_ba);
@@ -3765,29 +3790,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
return -EINVAL;
- /* Search IRRL's mtts */
- mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
- hr_qp->qpn, &dma_handle_2);
- if (!mtts_2) {
- ibdev_err(ibdev, "failed to find QP irrl_table\n");
- return -EINVAL;
- }
-
- /* Search TRRL's mtts */
- mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
- hr_qp->qpn, &dma_handle_3);
- if (!mtts_3) {
- ibdev_err(ibdev, "failed to find QP trrl_table\n");
- return -EINVAL;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error\n",
- attr_mask);
- return -EINVAL;
- }
-
- dmac = (u8 *)attr->ah_attr.roce.dmac;
context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
qpc_mask->wqe_sge_ba = 0;
@@ -3804,17 +3806,16 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
V2_QPC_BYTE_12_SQ_HOP_NUM_S,
- hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
- 0 : hr_dev->caps.wqe_sq_hop_num);
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
+ hr_qp->sq.wqe_cnt));
roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S,
- ((ibqp->qp_type == IB_QPT_GSI) ||
- hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- hr_dev->caps.wqe_sge_hop_num : 0);
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
+ hr_qp->sge.sge_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
@@ -3822,8 +3823,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_HOP_NUM_M,
V2_QPC_BYTE_20_RQ_HOP_NUM_S,
- hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
- 0 : hr_dev->caps.wqe_rq_hop_num);
+ to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
+ hr_qp->rq.wqe_cnt));
+
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_HOP_NUM_M,
V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
@@ -3831,7 +3833,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
- hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
@@ -3839,50 +3841,181 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
- hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
- context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
+ context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
qpc_mask->rq_cur_blk_addr = 0;
roce_set_field(context->byte_92_srq_info,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
- mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
roce_set_field(qpc_mask->byte_92_srq_info,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
- context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
+ context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
qpc_mask->rq_nxt_blk_addr = 0;
roce_set_field(context->byte_104_rq_sge,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
- mtts[1] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
roce_set_field(qpc_mask->byte_104_rq_sge,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
+
+ return 0;
+}
+
+static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 sge_cur_blk = 0;
+ u64 sq_cur_blk = 0;
+ u32 page_size;
+ int count;
+
+ /* search qp buf's mtts */
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
+ hr_qp->qpn);
+ return -EINVAL;
+ }
+ if (hr_qp->sge.sge_cnt > 0) {
+ page_size = 1 << hr_qp->mtr.hem_cfg.buf_pg_shift;
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
+ hr_qp->sge.offset / page_size,
+ &sge_cur_blk, 1, NULL);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
+ hr_qp->qpn);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ context->sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ qpc_mask->sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
+
+ context->sq_cur_sge_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(sge_cur_blk));
+ roce_set_field(context->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
+ qpc_mask->sq_cur_sge_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
+
+ context->rx_sq_cur_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
+ roce_set_field(context->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ qpc_mask->rx_sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+
+ return 0;
+}
+
+static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t trrl_ba;
+ dma_addr_t irrl_ba;
+ u8 port_num;
+ u64 *mtts;
+ u8 *dmac;
+ u8 *smac;
+ int port;
+ int ret;
+
+ ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Search IRRL's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+ hr_qp->qpn, &irrl_ba);
+ if (!mtts) {
+ ibdev_err(ibdev, "failed to find qp irrl_table.\n");
+ return -EINVAL;
+ }
+
+ /* Search TRRL's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
+ hr_qp->qpn, &trrl_ba);
+ if (!mtts) {
+ ibdev_err(ibdev, "failed to find qp trrl_table.\n");
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
+ attr_mask);
+ return -EINVAL;
+ }
+
roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
- V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
+ V2_QPC_BYTE_132_TRRL_BA_S, trrl_ba >> 4);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
V2_QPC_BYTE_132_TRRL_BA_S, 0);
- context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
+ context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
qpc_mask->trrl_ba = 0;
roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
V2_QPC_BYTE_140_TRRL_BA_S,
- (u32)(dma_handle_3 >> (32 + 16 + 4)));
+ (u32)(trrl_ba >> (32 + 16 + 4)));
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
V2_QPC_BYTE_140_TRRL_BA_S, 0);
- context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
+ context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
qpc_mask->irrl_ba = 0;
roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
V2_QPC_BYTE_208_IRRL_BA_S,
- dma_handle_2 >> (32 + 6));
+ irrl_ba >> (32 + 6));
roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
V2_QPC_BYTE_208_IRRL_BA_S, 0);
@@ -3897,6 +4030,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
smac = (u8 *)hr_dev->dev_addr[port];
+ dmac = (u8 *)attr->ah_attr.roce.dmac;
/* when dmac equals smac or loop_idc is 1, it should loopback */
if (ether_addr_equal_unaligned(dmac, smac) ||
hr_dev->loop_idc == 0x1) {
@@ -3919,6 +4053,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
grh->sgid_index));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
+
memcpy(&(context->dmac), dmac, sizeof(u32));
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
@@ -3928,7 +4063,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
- V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
+ V2_QPC_BYTE_56_LP_PKTN_INI_S,
+ ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
@@ -3942,16 +4078,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
- roce_set_field(context->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
-
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
@@ -3987,30 +4113,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
- u64 sge_cur_blk = 0;
- u64 sq_cur_blk = 0;
- u32 page_size;
- int count;
-
- /* Search qp buf's mtts */
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find buf pa of QP(0x%lx)\n",
- hr_qp->qpn);
- return -EINVAL;
- }
-
- if (hr_qp->sge.offset) {
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
- hr_qp->sge.offset / page_size,
- &sge_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find sge pa of QP(0x%lx)\n",
- hr_qp->qpn);
- return -EINVAL;
- }
- }
+ int ret;
/* Not support alternate path and path migration */
if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
@@ -4018,48 +4121,11 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return -EINVAL;
}
- /*
- * In v2 engine, software pass context and context mask to hardware
- * when modifying qp. If software need modify some fields in context,
- * we should set all bits of the relevant fields in context mask to
- * 0 at the same time, else set them to 0x1.
- */
- context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
- sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
- qpc_mask->sq_cur_blk_addr = 0;
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
-
- context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
- hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- cpu_to_le32(sge_cur_blk >>
- PAGE_ADDR_SHIFT) : 0;
- roce_set_field(context->byte_184_irrl_idx,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
- ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
- HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- (sge_cur_blk >>
- (32 + PAGE_ADDR_SHIFT)) : 0);
- qpc_mask->sq_cur_sge_blk_addr = 0;
- roce_set_field(qpc_mask->byte_184_irrl_idx,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
-
- context->rx_sq_cur_blk_addr =
- cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
- roce_set_field(context->byte_232_irrl_sge,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
- sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
- qpc_mask->rx_sq_cur_blk_addr = 0;
- roce_set_field(qpc_mask->byte_232_irrl_sge,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+ ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config sq buf, ret %d\n", ret);
+ return ret;
+ }
/*
* Set some fields in context to zero, Because the default values
@@ -4108,21 +4174,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return 0;
}
-static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
-
- if ((cur_state != IB_QPS_RESET &&
- (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
- ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
- (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
- (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
- return true;
-
- return false;
-
-}
-
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4226,6 +4277,28 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
return 0;
}
+static bool check_qp_state(enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ static const bool sm[][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true },
+ [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true,
+ [IB_QPS_RTR] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
+ [IB_QPS_SQD] = {},
+ [IB_QPS_SQE] = {},
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ };
+
+ return sm[cur_state][new_state];
+}
+
static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4237,6 +4310,11 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
int ret = 0;
+ if (!check_qp_state(cur_state, new_state)) {
+ ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
+ return -EINVAL;
+ }
+
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, sizeof(*qpc_mask));
modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
@@ -4247,23 +4325,11 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
- if (ret)
- goto out;
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
- if (ret)
- goto out;
- } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
- /* Nothing */
- ;
- } else {
- ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
- ret = -EINVAL;
- goto out;
}
-out:
return ret;
}
@@ -4554,19 +4620,20 @@ out:
return ret;
}
-static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
+static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
{
- switch (state) {
- case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
- case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
- case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
- case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
- case HNS_ROCE_QP_ST_SQ_DRAINING:
- case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
- case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
- case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
- default: return -1;
- }
+ static const enum ib_qp_state map[] = {
+ [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
+ [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
+ [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
+ [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
+ [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
+ [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
+ [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
+ [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
+ };
+
+ return (state < ARRAY_SIZE(map)) ? map[state] : -1;
}
static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
@@ -4639,7 +4706,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->path_mig_state = IB_MIG_ARMED;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
- qp_attr->qkey = V2_QKEY_VAL;
+ qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
@@ -4838,6 +4905,184 @@ out:
return ret;
}
+static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
+ u32 cqn, void *mb_buf, u64 *mtts_wqe,
+ u64 *mtts_idx, dma_addr_t dma_handle_wqe,
+ dma_addr_t dma_handle_idx)
+{
+ struct hns_roce_srq_context *srq_context;
+
+ srq_context = mb_buf;
+ memset(srq_context, 0, sizeof(*srq_context));
+
+ roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
+ SRQC_BYTE_4_SRQ_ST_S, 1);
+
+ roce_set_field(srq_context->byte_4_srqn_srqst,
+ SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
+ SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
+ to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
+ srq->wqe_cnt));
+ roce_set_field(srq_context->byte_4_srqn_srqst,
+ SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
+ ilog2(srq->wqe_cnt));
+
+ roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
+ SRQC_BYTE_4_SRQN_S, srq->srqn);
+
+ roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
+
+ roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
+ SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
+
+ srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
+
+ roce_set_field(srq_context->byte_24_wqe_bt_ba,
+ SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
+ SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
+ dma_handle_wqe >> 35);
+
+ roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
+ SRQC_BYTE_28_PD_S, pdn);
+ roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
+ SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
+ fls(srq->max_gs - 1));
+
+ srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
+ roce_set_field(srq_context->rsv_idx_bt_ba,
+ SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
+ SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
+ dma_handle_idx >> 35);
+
+ srq_context->idx_cur_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
+ SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
+ SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
+ to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
+ srq->wqe_cnt));
+
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
+ SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
+ to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
+ SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
+ to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
+
+ srq_context->idx_nxt_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
+ roce_set_field(srq_context->rsv_idxnxtblkaddr,
+ SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
+ SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
+ roce_set_field(srq_context->byte_56_xrc_cqn,
+ SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
+ cqn);
+ roce_set_field(srq_context->byte_56_xrc_cqn,
+ SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
+ SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
+ roce_set_field(srq_context->byte_56_xrc_cqn,
+ SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
+ SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
+
+ roce_set_bit(srq_context->db_record_addr_record_en,
+ SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
+}
+
+static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
+ struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_srq_context *srq_context;
+ struct hns_roce_srq_context *srqc_mask;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ if (srq_attr_mask & IB_SRQ_LIMIT) {
+ if (srq_attr->srq_limit >= srq->wqe_cnt)
+ return -EINVAL;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+ srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
+
+ memset(srqc_mask, 0xff, sizeof(*srqc_mask));
+
+ roce_set_field(srq_context->byte_8_limit_wl,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
+ roce_set_field(srqc_mask->byte_8_limit_wl,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
+ HNS_ROCE_CMD_MODIFY_SRQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to handle cmd of modifying SRQ, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_srq_context *srq_context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int limit_wl;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
+ HNS_ROCE_CMD_QUERY_SRQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd of querying SRQ, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S);
+
+ attr->srq_limit = limit_wl;
+ attr->max_wr = srq->wqe_cnt - 1;
+ attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE;
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
@@ -4989,24 +5234,14 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
hns_roce_write64(hr_dev, doorbell, eq->doorbell);
}
-static inline void *get_eqe_buf(struct hns_roce_eq *eq, unsigned long offset)
-{
- u32 buf_chk_sz;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
- if (eq->buf.nbufs == 1)
- return eq->buf.direct.buf + offset % buf_chk_sz;
- else
- return eq->buf.page_list[offset / buf_chk_sz].buf +
- offset % buf_chk_sz;
-}
-
static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_aeqe *aeqe;
- aeqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
- HNS_ROCE_AEQ_ENTRY_SIZE);
+ aeqe = hns_roce_buf_offset(eq->mtr.kmem,
+ (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE);
+
return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
}
@@ -5103,8 +5338,9 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_ceqe *ceqe;
- ceqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
- HNS_ROCE_CEQ_ENTRY_SIZE);
+ ceqe = hns_roce_buf_offset(eq->mtr.kmem,
+ (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE);
return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}
@@ -5263,17 +5499,15 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
- if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0)
- hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
- hns_roce_buf_free(hr_dev, eq->buf.size, &eq->buf);
+ hns_roce_mtr_destroy(hr_dev, &eq->mtr);
}
-static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq,
- void *mb_buf)
+static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
+ void *mb_buf)
{
+ u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
struct hns_roce_eq_context *eqc;
- u64 ba[MTT_MIN_COUNT] = { 0 };
+ u64 bt_ba = 0;
int count;
eqc = mb_buf;
@@ -5281,31 +5515,18 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* init eqc */
eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
- eq->hop_num = hr_dev->caps.eqe_hop_num;
eq->cons_index = 0;
eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
- eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
- eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
eq->shift = ilog2((unsigned int)eq->entries);
- /* if not muti-hop, eqe buffer only use one trunk */
- if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0) {
- eq->eqe_ba = eq->buf.direct.map;
- eq->cur_eqe_ba = eq->eqe_ba;
- if (eq->buf.npages > 1)
- eq->nxt_eqe_ba = eq->eqe_ba + (1 << eq->eqe_buf_pg_sz);
- else
- eq->nxt_eqe_ba = eq->eqe_ba;
- } else {
- count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, ba,
- MTT_MIN_COUNT, &eq->eqe_ba);
- eq->cur_eqe_ba = ba[0];
- if (count > 1)
- eq->nxt_eqe_ba = ba[1];
- else
- eq->nxt_eqe_ba = ba[0];
+ /* if not multi-hop, eqe buffer only use one trunk */
+ count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
+ &bt_ba);
+ if (count < 1) {
+ dev_err(hr_dev->dev, "failed to find EQE mtr\n");
+ return -ENOBUFS;
}
/* set eqc state */
@@ -5339,12 +5560,12 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set eqe_ba_pg_sz */
roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
HNS_ROCE_EQC_BA_PG_SZ_S,
- eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
/* set eqe_buf_pg_sz */
roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
HNS_ROCE_EQC_BUF_PG_SZ_S,
- eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
/* set eq_producer_idx */
roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
@@ -5363,13 +5584,13 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
HNS_ROCE_EQC_REPORT_TIMER_S,
HNS_ROCE_EQ_INIT_REPORT_TIMER);
- /* set eqe_ba [34:3] */
+ /* set bt_ba [34:3] */
roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
- HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
+ HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3);
- /* set eqe_ba [64:35] */
+ /* set bt_ba [64:35] */
roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
- HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
+ HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35);
/* set eq shift */
roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
@@ -5381,15 +5602,15 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set cur_eqe_ba [27:12] */
roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
- HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
+ HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12);
/* set cur_eqe_ba [59:28] */
roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
- HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
+ HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28);
/* set cur_eqe_ba [63:60] */
roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
- HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
+ HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60);
/* set eq consumer idx */
roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
@@ -5397,97 +5618,38 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set nex_eqe_ba[43:12] */
roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
- HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
+ HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
/* set nex_eqe_ba[63:44] */
roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
- HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
-}
+ HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
-static int map_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
- u32 page_shift)
-{
- struct hns_roce_buf_region region = {};
- dma_addr_t *buf_list = NULL;
- int ba_num;
- int ret;
-
- ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
- 1 << page_shift);
- hns_roce_init_buf_region(&region, hr_dev->caps.eqe_hop_num, 0, ba_num);
-
- /* alloc a tmp list for storing eq buf address */
- ret = hns_roce_alloc_buf_list(&region, &buf_list, 1);
- if (ret) {
- dev_err(hr_dev->dev, "alloc eq buf_list error\n");
- return ret;
- }
-
- ba_num = hns_roce_get_kmem_bufs(hr_dev, buf_list, region.count,
- region.offset, &eq->buf);
- if (ba_num != region.count) {
- dev_err(hr_dev->dev, "get eqe buf err,expect %d,ret %d.\n",
- region.count, ba_num);
- ret = -ENOBUFS;
- goto done;
- }
-
- hns_roce_mtr_init(&eq->mtr, PAGE_SHIFT + hr_dev->caps.eqe_ba_pg_sz,
- page_shift);
- ret = hns_roce_mtr_attach(hr_dev, &eq->mtr, &buf_list, &region, 1);
- if (ret)
- dev_err(hr_dev->dev, "mtr attach error for eqe\n");
-
- goto done;
-
- hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
-done:
- hns_roce_free_buf_list(&buf_list, 1);
-
- return ret;
+ return 0;
}
static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
- struct hns_roce_buf *buf = &eq->buf;
- bool is_mhop = false;
- u32 page_shift;
- u32 mhop_num;
- u32 max_size;
- int ret;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
- page_shift = PAGE_SHIFT + hr_dev->caps.eqe_buf_pg_sz;
- mhop_num = hr_dev->caps.eqe_hop_num;
- if (!mhop_num) {
- max_size = 1 << page_shift;
- buf->size = max_size;
- } else if (mhop_num == HNS_ROCE_HOP_NUM_0) {
- max_size = eq->entries * eq->eqe_size;
- buf->size = max_size;
- } else {
- max_size = 1 << page_shift;
- buf->size = PAGE_ALIGN(eq->entries * eq->eqe_size);
- is_mhop = true;
- }
+ if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
+ eq->hop_num = 0;
+ else
+ eq->hop_num = hr_dev->caps.eqe_hop_num;
- ret = hns_roce_buf_alloc(hr_dev, buf->size, max_size, buf, page_shift);
- if (ret) {
- dev_err(hr_dev->dev, "alloc eq buf error\n");
- return ret;
- }
+ buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = eq->entries * eq->eqe_size;
+ buf_attr.region[0].hopnum = eq->hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
- if (is_mhop) {
- ret = map_eq_buf(hr_dev, eq, page_shift);
- if (ret) {
- dev_err(hr_dev->dev, "map roce buf error\n");
- goto err_alloc;
- }
- }
+ err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
+ hr_dev->caps.eqe_ba_pg_sz +
+ HNS_HW_PAGE_SHIFT, NULL, 0);
+ if (err)
+ dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
- return 0;
-err_alloc:
- hns_roce_buf_free(hr_dev, buf->size, buf);
- return ret;
+ return err;
}
static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
@@ -5499,15 +5661,16 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
+ if (IS_ERR_OR_NULL(mailbox))
+ return -ENOMEM;
ret = alloc_eq_buf(hr_dev, eq);
- if (ret) {
- ret = -ENOMEM;
+ if (ret)
goto free_cmd_mbox;
- }
- hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
+
+ ret = config_eqc(hr_dev, eq, mailbox->buf);
+ if (ret)
+ goto err_cmd_mbox;
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
@@ -5731,294 +5894,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
destroy_workqueue(hr_dev->irq_workq);
}
-static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
- u32 cqn, void *mb_buf, u64 *mtts_wqe,
- u64 *mtts_idx, dma_addr_t dma_handle_wqe,
- dma_addr_t dma_handle_idx)
-{
- struct hns_roce_srq_context *srq_context;
-
- srq_context = mb_buf;
- memset(srq_context, 0, sizeof(*srq_context));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
- SRQC_BYTE_4_SRQ_ST_S, 1);
-
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
- (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- hr_dev->caps.srqwqe_hop_num));
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
- ilog2(srq->wqe_cnt));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
- SRQC_BYTE_4_SRQN_S, srq->srqn);
-
- roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
-
- roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
- SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
-
- srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
-
- roce_set_field(srq_context->byte_24_wqe_bt_ba,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
- dma_handle_wqe >> 35);
-
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
- SRQC_BYTE_28_PD_S, pdn);
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
- SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
- fls(srq->max_gs - 1));
-
- srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
- roce_set_field(srq_context->rsv_idx_bt_ba,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
- dma_handle_idx >> 35);
-
- srq_context->idx_cur_blk_addr =
- cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
- mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
- hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- hr_dev->caps.idx_hop_num);
-
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
- hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
- hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
-
- srq_context->idx_nxt_blk_addr =
- cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
- roce_set_field(srq_context->rsv_idxnxtblkaddr,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
- mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
- cqn);
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
- hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
- hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
-
- roce_set_bit(srq_context->db_record_addr_record_en,
- SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
-}
-
-static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
- struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask,
- struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
- struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_srq_context *srq_context;
- struct hns_roce_srq_context *srqc_mask;
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
-
- if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit >= srq->wqe_cnt)
- return -EINVAL;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- srq_context = mailbox->buf;
- srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
-
- memset(srqc_mask, 0xff, sizeof(*srqc_mask));
-
- roce_set_field(srq_context->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
- roce_set_field(srqc_mask->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
-
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
- HNS_ROCE_CMD_MODIFY_SRQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to process cmd when modifying SRQ, ret = %d\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
- struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_srq_context *srq_context;
- struct hns_roce_cmd_mailbox *mailbox;
- int limit_wl;
- int ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- srq_context = mailbox->buf;
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
- HNS_ROCE_CMD_QUERY_SRQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
- if (ret) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to process cmd when querying SRQ, ret = %d\n",
- ret);
- goto out;
- }
-
- limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S);
-
- attr->srq_limit = limit_wl;
- attr->max_wr = srq->wqe_cnt - 1;
- attr->max_sge = srq->max_gs;
-
- memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
-
-out:
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- return ret;
-}
-
-static int find_empty_entry(struct hns_roce_idx_que *idx_que,
- unsigned long size)
-{
- int wqe_idx;
-
- if (unlikely(bitmap_full(idx_que->bitmap, size)))
- return -ENOSPC;
-
- wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
-
- bitmap_set(idx_que->bitmap, wqe_idx, 1);
-
- return wqe_idx;
-}
-
-static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
- int cur_idx, int wqe_idx)
-{
- unsigned int *addr;
-
- addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
- cur_idx * idx_que->entry_sz);
- *addr = wqe_idx;
-}
-
-static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
- const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
- struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_v2_wqe_data_seg *dseg;
- struct hns_roce_v2_db srq_db;
- unsigned long flags;
- int ret = 0;
- int wqe_idx;
- void *wqe;
- int nreq;
- int ind;
- int i;
-
- spin_lock_irqsave(&srq->lock, flags);
-
- ind = srq->head & (srq->wqe_cnt - 1);
-
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (unlikely(wr->num_sge > srq->max_gs)) {
- ret = -EINVAL;
- *bad_wr = wr;
- break;
- }
-
- if (unlikely(srq->head == srq->tail)) {
- ret = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
- wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
- if (wqe_idx < 0) {
- ret = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
- fill_idx_queue(&srq->idx_que, ind, wqe_idx);
- wqe = get_srq_wqe(srq, wqe_idx);
- dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
-
- for (i = 0; i < wr->num_sge; ++i) {
- dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
- dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
- dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
- }
-
- if (i < srq->max_gs) {
- dseg[i].len = 0;
- dseg[i].lkey = cpu_to_le32(0x100);
- dseg[i].addr = 0;
- }
-
- srq->wrid[wqe_idx] = wr->wr_id;
- ind = (ind + 1) & (srq->wqe_cnt - 1);
- }
-
- if (likely(nreq)) {
- srq->head += nreq;
-
- /*
- * Make sure that descriptors are written before
- * doorbell record.
- */
- wmb();
-
- srq_db.byte_4 =
- cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
- (srq->srqn & V2_DB_BYTE_4_TAG_M));
- srq_db.parameter = cpu_to_le32(srq->head);
-
- hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
-
- }
-
- spin_unlock_irqrestore(&srq->lock, flags);
-
- return ret;
-}
-
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
.query_cqc_info = hns_roce_v2_query_cqc_info,
};
@@ -6161,7 +6036,7 @@ error_failed_kzalloc:
static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
bool reset)
{
- struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct hns_roce_dev *hr_dev = handle->priv;
if (!hr_dev)
return;
@@ -6241,7 +6116,7 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
- hr_dev = (struct hns_roce_dev *)handle->priv;
+ hr_dev = handle->priv;
if (!hr_dev)
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 82dd9f6f4845..e176b0aaa4ac 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -92,7 +92,9 @@
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
-#define HNS_ROCE_INVALID_LKEY 0x100
+#define HNS_ROCE_INVALID_LKEY 0x0
+#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
+
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
@@ -1241,10 +1243,9 @@ struct hns_roce_func_clear {
};
#define FUNC_CLEAR_RST_FUN_DONE_S 0
-/* Each physical function manages up to 248 virtual functions;
- * it takes up to 100ms for each function to execute clear;
- * if an abnormal reset occurs, it is executed twice at most;
- * so it takes up to 249 * 2 * 100ms.
+/* Each physical function manages up to 248 virtual functions, it takes up to
+ * 100ms for each function to execute clear. If an abnormal reset occurs, it is
+ * executed twice at most, so it takes up to 249 * 2 * 100ms.
*/
#define HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS (249 * 2 * 100)
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL 40
@@ -1648,7 +1649,7 @@ struct hns_roce_query_pf_caps_c {
struct hns_roce_query_pf_caps_d {
__le32 wq_hop_num_max_srqs;
__le16 srq_depth;
- __le16 rsv;
+ __le16 cap_flags_ex;
__le32 num_ceqs_ceq_depth;
__le32 arm_st_aeq_depth;
__le32 num_uars_rsv_pds;
@@ -1978,7 +1979,7 @@ int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index d0031d559213..50763cf4fa3d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -233,7 +233,6 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
enum ib_mtu mtu;
u8 port;
- assert(port_num > 0);
port = port_num - 1;
/* props being zeroed by the caller, avoid zeroing it here */
@@ -579,33 +578,12 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
int ret;
struct device *dev = hr_dev->dev;
- ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
- HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_mtt_segs, 1);
- if (ret) {
- dev_err(dev, "Failed to init MTT context memory, aborting.\n");
- return ret;
- }
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table,
- HEM_TYPE_CQE,
- hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_cqe_segs, 1);
- if (ret) {
- dev_err(dev,
- "Failed to init CQE context memory, aborting.\n");
- goto err_unmap_cqe;
- }
- }
-
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1);
if (ret) {
dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
- goto err_unmap_mtt;
+ return ret;
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
@@ -660,32 +638,6 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
}
}
- if (hr_dev->caps.num_srqwqe_segs) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table,
- HEM_TYPE_SRQWQE,
- hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_srqwqe_segs, 1);
- if (ret) {
- dev_err(dev,
- "Failed to init MTT srqwqe memory, aborting.\n");
- goto err_unmap_srq;
- }
- }
-
- if (hr_dev->caps.num_idx_segs) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_idx_table,
- HEM_TYPE_IDX,
- hr_dev->caps.idx_entry_sz,
- hr_dev->caps.num_idx_segs, 1);
- if (ret) {
- dev_err(dev,
- "Failed to init MTT idx memory, aborting.\n");
- goto err_unmap_srqwqe;
- }
- }
-
if (hr_dev->caps.sccc_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table,
@@ -695,7 +647,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
if (ret) {
dev_err(dev,
"Failed to init SCC context memory, aborting.\n");
- goto err_unmap_idx;
+ goto err_unmap_srq;
}
}
@@ -733,17 +685,6 @@ err_unmap_ctx:
if (hr_dev->caps.sccc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table);
-
-err_unmap_idx:
- if (hr_dev->caps.num_idx_segs)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_idx_table);
-
-err_unmap_srqwqe:
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table);
-
err_unmap_srq:
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
@@ -765,14 +706,6 @@ err_unmap_qp:
err_unmap_dmpt:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
-err_unmap_mtt:
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table);
-
-err_unmap_cqe:
- hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
-
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 176f34692f88..4c0bbb12770d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -66,645 +66,89 @@ int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
- unsigned long *seg)
+static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
+ u32 pd, u64 iova, u64 size, u32 access)
{
- int o;
- u32 m;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned long obj = 0;
+ int err;
- spin_lock(&buddy->lock);
-
- for (o = order; o <= buddy->max_order; ++o) {
- if (buddy->num_free[o]) {
- m = 1 << (buddy->max_order - o);
- *seg = find_first_bit(buddy->bits[o], m);
- if (*seg < m)
- goto found;
- }
- }
- spin_unlock(&buddy->lock);
- return -EINVAL;
-
- found:
- clear_bit(*seg, buddy->bits[o]);
- --buddy->num_free[o];
-
- while (o > order) {
- --o;
- *seg <<= 1;
- set_bit(*seg ^ 1, buddy->bits[o]);
- ++buddy->num_free[o];
- }
-
- spin_unlock(&buddy->lock);
-
- *seg <<= order;
- return 0;
-}
-
-static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
- int order)
-{
- seg >>= order;
-
- spin_lock(&buddy->lock);
-
- while (test_bit(seg ^ 1, buddy->bits[order])) {
- clear_bit(seg ^ 1, buddy->bits[order]);
- --buddy->num_free[order];
- seg >>= 1;
- ++order;
- }
-
- set_bit(seg, buddy->bits[order]);
- ++buddy->num_free[order];
-
- spin_unlock(&buddy->lock);
-}
-
-static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
-{
- int i, s;
-
- buddy->max_order = max_order;
- spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1,
- sizeof(*buddy->bits),
- GFP_KERNEL);
- buddy->num_free = kcalloc(buddy->max_order + 1,
- sizeof(*buddy->num_free),
- GFP_KERNEL);
- if (!buddy->bits || !buddy->num_free)
- goto err_out;
-
- for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
- __GFP_NOWARN);
- if (!buddy->bits[i]) {
- buddy->bits[i] = vzalloc(array_size(s, sizeof(long)));
- if (!buddy->bits[i])
- goto err_out_free;
- }
- }
-
- set_bit(0, buddy->bits[buddy->max_order]);
- buddy->num_free[buddy->max_order] = 1;
-
- return 0;
-
-err_out_free:
- for (i = 0; i <= buddy->max_order; ++i)
- kvfree(buddy->bits[i]);
-
-err_out:
- kfree(buddy->bits);
- kfree(buddy->num_free);
- return -ENOMEM;
-}
-
-static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
-{
- int i;
-
- for (i = 0; i <= buddy->max_order; ++i)
- kvfree(buddy->bits[i]);
-
- kfree(buddy->bits);
- kfree(buddy->num_free);
-}
-
-static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
- unsigned long *seg, u32 mtt_type)
-{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- struct hns_roce_hem_table *table;
- struct hns_roce_buddy *buddy;
- int ret;
-
- switch (mtt_type) {
- case MTT_TYPE_WQE:
- buddy = &mr_table->mtt_buddy;
- table = &mr_table->mtt_table;
- break;
- case MTT_TYPE_CQE:
- buddy = &mr_table->mtt_cqe_buddy;
- table = &mr_table->mtt_cqe_table;
- break;
- case MTT_TYPE_SRQWQE:
- buddy = &mr_table->mtt_srqwqe_buddy;
- table = &mr_table->mtt_srqwqe_table;
- break;
- case MTT_TYPE_IDX:
- buddy = &mr_table->mtt_idx_buddy;
- table = &mr_table->mtt_idx_table;
- break;
- default:
- dev_err(hr_dev->dev, "Unsupport MTT table type: %d\n",
- mtt_type);
- return -EINVAL;
- }
-
- ret = hns_roce_buddy_alloc(buddy, order, seg);
- if (ret)
- return ret;
-
- ret = hns_roce_table_get_range(hr_dev, table, *seg,
- *seg + (1 << order) - 1);
- if (ret) {
- hns_roce_buddy_free(buddy, *seg, order);
- return ret;
- }
-
- return 0;
-}
-
-int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
- struct hns_roce_mtt *mtt)
-{
- int ret;
- int i;
-
- /* Page num is zero, correspond to DMA memory register */
- if (!npages) {
- mtt->order = -1;
- mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
- return 0;
- }
-
- /* Note: if page_shift is zero, FAST memory register */
- mtt->page_shift = page_shift;
-
- /* Compute MTT entry necessary */
- for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
- i <<= 1)
- ++mtt->order;
-
- /* Allocate MTT entry */
- ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
- mtt->mtt_type);
- if (ret)
- return -ENOMEM;
-
- return 0;
-}
-
-void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
-{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
-
- if (mtt->order < 0)
- return;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- case MTT_TYPE_CQE:
- hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- case MTT_TYPE_SRQWQE:
- hns_roce_buddy_free(&mr_table->mtt_srqwqe_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_srqwqe_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- case MTT_TYPE_IDX:
- hns_roce_buddy_free(&mr_table->mtt_idx_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_idx_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- default:
- dev_err(hr_dev->dev,
- "Unsupport mtt type %d, clean mtt failed\n",
- mtt->mtt_type);
- break;
- }
-}
-
-static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr, int err_loop_index,
- int loop_i, int loop_j)
-{
- struct device *dev = hr_dev->dev;
- u32 mhop_num;
- u32 pbl_bt_sz;
- u64 bt_idx;
- int i, j;
-
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- mhop_num = hr_dev->caps.pbl_hop_num;
-
- i = loop_i;
- if (mhop_num == 3 && err_loop_index == 2) {
- for (; i >= 0; i--) {
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
- if (i == loop_i && j >= loop_j)
- break;
-
- bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, pbl_bt_sz,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
- }
- }
- } else if (mhop_num == 3 && err_loop_index == 1) {
- for (i -= 1; i >= 0; i--) {
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
- bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, pbl_bt_sz,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
- }
- }
- } else if (mhop_num == 2 && err_loop_index == 1) {
- for (i -= 1; i >= 0; i--)
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
- } else {
- dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
- mhop_num, err_loop_index);
- return;
- }
-
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
- mr->pbl_bt_l0 = NULL;
- mr->pbl_l0_dma_addr = 0;
-}
-static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr, u32 pbl_bt_sz)
-{
- struct device *dev = hr_dev->dev;
-
- if (npages > pbl_bt_sz / 8) {
- dev_err(dev, "npages %d is larger than buf_pg_sz!",
- npages);
- return -EINVAL;
- }
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf)
+ /* Allocate a key for mr from mr_table */
+ err = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &obj);
+ if (err) {
+ ibdev_err(ibdev,
+ "failed to alloc bitmap for MR key, ret = %d.\n",
+ err);
return -ENOMEM;
-
- mr->pbl_size = npages;
- mr->pbl_ba = mr->pbl_dma_addr;
- mr->pbl_hop_num = 1;
- mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
- mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
- return 0;
-
-}
-
-
-static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr, u32 pbl_bt_sz)
-{
- struct device *dev = hr_dev->dev;
- int npages_allocated;
- u64 pbl_last_bt_num;
- u64 pbl_bt_cnt = 0;
- u64 size;
- int i;
-
- pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
-
- /* alloc L1 BT */
- for (i = 0; i < pbl_bt_sz / 8; i++) {
- if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
- size = pbl_bt_sz;
- } else {
- npages_allocated = i * (pbl_bt_sz / 8);
- size = (npages - npages_allocated) * 8;
- }
- mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
- &(mr->pbl_l1_dma_addr[i]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1[i]) {
- hns_roce_loop_free(hr_dev, mr, 1, i, 0);
- return -ENOMEM;
- }
-
- *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
-
- pbl_bt_cnt++;
- if (pbl_bt_cnt >= pbl_last_bt_num)
- break;
}
- mr->l0_chunk_last_num = i + 1;
-
- return 0;
-}
-
-static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr, u32 pbl_bt_sz)
-{
- struct device *dev = hr_dev->dev;
- int mr_alloc_done = 0;
- int npages_allocated;
- u64 pbl_last_bt_num;
- u64 pbl_bt_cnt = 0;
- u64 bt_idx;
- u64 size;
- int i;
- int j = 0;
-
- pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
-
- mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
- sizeof(*mr->pbl_l2_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_l2_dma_addr)
- return -ENOMEM;
-
- mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
- sizeof(*mr->pbl_bt_l2),
- GFP_KERNEL);
- if (!mr->pbl_bt_l2)
- goto err_kcalloc_bt_l2;
-
- /* alloc L1, L2 BT */
- for (i = 0; i < pbl_bt_sz / 8; i++) {
- mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
- &(mr->pbl_l1_dma_addr[i]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1[i]) {
- hns_roce_loop_free(hr_dev, mr, 1, i, 0);
- goto err_dma_alloc_l0;
- }
-
- *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
-
- for (j = 0; j < pbl_bt_sz / 8; j++) {
- bt_idx = i * pbl_bt_sz / 8 + j;
-
- if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
- size = pbl_bt_sz;
- } else {
- npages_allocated = bt_idx *
- (pbl_bt_sz / 8);
- size = (npages - npages_allocated) * 8;
- }
- mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
- dev, size,
- &(mr->pbl_l2_dma_addr[bt_idx]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l2[bt_idx]) {
- hns_roce_loop_free(hr_dev, mr, 2, i, j);
- goto err_dma_alloc_l0;
- }
-
- *(mr->pbl_bt_l1[i] + j) =
- mr->pbl_l2_dma_addr[bt_idx];
-
- pbl_bt_cnt++;
- if (pbl_bt_cnt >= pbl_last_bt_num) {
- mr_alloc_done = 1;
- break;
- }
- }
+ mr->iova = iova; /* MR va starting addr */
+ mr->size = size; /* MR addr range */
+ mr->pd = pd; /* MR num */
+ mr->access = access; /* MR access permit */
+ mr->enabled = 0; /* MR active status */
+ mr->key = hw_index_to_key(obj); /* MR key */
- if (mr_alloc_done)
- break;
+ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
+ if (err) {
+ ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
+ goto err_free_bitmap;
}
- mr->l0_chunk_last_num = i + 1;
- mr->l1_chunk_last_num = j + 1;
-
-
return 0;
-
-err_dma_alloc_l0:
- kfree(mr->pbl_bt_l2);
- mr->pbl_bt_l2 = NULL;
-
-err_kcalloc_bt_l2:
- kfree(mr->pbl_l2_dma_addr);
- mr->pbl_l2_dma_addr = NULL;
-
- return -ENOMEM;
+err_free_bitmap:
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
+ return err;
}
-
-/* PBL multi hop addressing */
-static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr)
+static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
- struct device *dev = hr_dev->dev;
- u32 pbl_bt_sz;
- u32 mhop_num;
-
- mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0)
- return 0;
-
- if (mhop_num == 1)
- return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
-
- mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
- sizeof(*mr->pbl_l1_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_l1_dma_addr)
- return -ENOMEM;
-
- mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1)
- goto err_kcalloc_bt_l1;
-
- /* alloc L0 BT */
- mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
- &(mr->pbl_l0_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_bt_l0)
- goto err_kcalloc_l2_dma;
-
- if (mhop_num == 2) {
- if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
- goto err_kcalloc_l2_dma;
- }
-
- if (mhop_num == 3) {
- if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
- goto err_kcalloc_l2_dma;
- }
-
+ unsigned long obj = key_to_hw_index(mr->key);
- mr->pbl_size = npages;
- mr->pbl_ba = mr->pbl_l0_dma_addr;
- mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
- mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
- mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
-
- return 0;
-
-err_kcalloc_l2_dma:
- kfree(mr->pbl_bt_l1);
- mr->pbl_bt_l1 = NULL;
-
-err_kcalloc_bt_l1:
- kfree(mr->pbl_l1_dma_addr);
- mr->pbl_l1_dma_addr = NULL;
-
- return -ENOMEM;
+ hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
}
-static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
- u64 size, u32 access, int npages,
- struct hns_roce_mr *mr)
+static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
+ size_t length, struct ib_udata *udata, u64 start,
+ int access)
{
- struct device *dev = hr_dev->dev;
- unsigned long index = 0;
- int ret;
-
- /* Allocate a key for mr from mr_table */
- ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
- if (ret)
- return -ENOMEM;
-
- mr->iova = iova; /* MR va starting addr */
- mr->size = size; /* MR addr range */
- mr->pd = pd; /* MR num */
- mr->access = access; /* MR access permit */
- mr->enabled = 0; /* MR active status */
- mr->key = hw_index_to_key(index); /* MR key */
-
- if (size == ~0ull) {
- mr->pbl_buf = NULL;
- mr->pbl_dma_addr = 0;
- /* PBL multi-hop addressing parameters */
- mr->pbl_bt_l2 = NULL;
- mr->pbl_bt_l1 = NULL;
- mr->pbl_bt_l0 = NULL;
- mr->pbl_l2_dma_addr = NULL;
- mr->pbl_l1_dma_addr = NULL;
- mr->pbl_l0_dma_addr = 0;
- } else {
- if (!hr_dev->caps.pbl_hop_num) {
- mr->pbl_buf = dma_alloc_coherent(dev,
- npages * BA_BYTE_LEN,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf)
- return -ENOMEM;
- } else {
- ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
- }
- }
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ bool is_fast = mr->type == MR_TYPE_FRMR;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
+ buf_attr.page_shift = is_fast ? PAGE_SHIFT :
+ hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = length;
+ buf_attr.region[0].hopnum = mr->pbl_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+ buf_attr.user_access = access;
+ /* fast MR's buffer is alloced before mapping, not at creation */
+ buf_attr.mtt_only = is_fast;
+
+ err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
+ hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, start);
+ if (err)
+ ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
+ else
+ mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
- return ret;
+ return err;
}
-static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr)
+static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
- struct device *dev = hr_dev->dev;
- int npages_allocated;
- int npages;
- int i, j;
- u32 pbl_bt_sz;
- u32 mhop_num;
- u64 bt_idx;
-
- npages = mr->pbl_size;
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0)
- return;
-
- if (mhop_num == 1) {
- dma_free_coherent(dev, (unsigned int)(npages * BA_BYTE_LEN),
- mr->pbl_buf, mr->pbl_dma_addr);
- return;
- }
-
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
- mr->pbl_l0_dma_addr);
-
- if (mhop_num == 2) {
- for (i = 0; i < mr->l0_chunk_last_num; i++) {
- if (i == mr->l0_chunk_last_num - 1) {
- npages_allocated =
- i * (pbl_bt_sz / BA_BYTE_LEN);
-
- dma_free_coherent(dev,
- (npages - npages_allocated) * BA_BYTE_LEN,
- mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- break;
- }
-
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
- }
- } else if (mhop_num == 3) {
- for (i = 0; i < mr->l0_chunk_last_num; i++) {
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
- bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j;
-
- if ((i == mr->l0_chunk_last_num - 1)
- && j == mr->l1_chunk_last_num - 1) {
- npages_allocated = bt_idx *
- (pbl_bt_sz / BA_BYTE_LEN);
-
- dma_free_coherent(dev,
- (npages - npages_allocated) *
- BA_BYTE_LEN,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
-
- break;
- }
-
- dma_free_coherent(dev, pbl_bt_sz,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
- }
- }
- }
-
- kfree(mr->pbl_bt_l1);
- kfree(mr->pbl_l1_dma_addr);
- mr->pbl_bt_l1 = NULL;
- mr->pbl_l1_dma_addr = NULL;
- if (mhop_num == 3) {
- kfree(mr->pbl_bt_l2);
- kfree(mr->pbl_l2_dma_addr);
- mr->pbl_bt_l2 = NULL;
- mr->pbl_l2_dma_addr = NULL;
- }
+ hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
}
static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr)
{
- struct device *dev = hr_dev->dev;
- int npages = 0;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
if (mr->enabled) {
@@ -712,27 +156,12 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
key_to_hw_index(mr->key) &
(hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
+ ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
+ ret);
}
- if (mr->size != ~0ULL) {
- if (mr->type == MR_TYPE_MR)
- npages = ib_umem_page_count(mr->umem);
-
- if (!hr_dev->caps.pbl_hop_num)
- dma_free_coherent(dev,
- (unsigned int)(npages * BA_BYTE_LEN),
- mr->pbl_buf, mr->pbl_dma_addr);
- else
- hns_roce_mhop_free(hr_dev, mr);
- }
-
- if (mr->enabled)
- hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
- key_to_hw_index(mr->key));
-
- hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
- key_to_hw_index(mr->key), BITMAP_NO_RR);
+ free_mr_pbl(hr_dev, mr);
+ free_mr_key(hr_dev, mr);
}
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
@@ -742,18 +171,12 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
unsigned long mtpt_idx = key_to_hw_index(mr->key);
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
-
- /* Prepare HEM entry memory */
- ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
- if (ret)
- return ret;
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
- goto err_table;
+ return ret;
}
if (mr->type != MR_TYPE_FRMR)
@@ -780,137 +203,6 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
err_page:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-err_table:
- hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
- return ret;
-}
-
-static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, u32 start_index,
- u32 npages, u64 *page_list)
-{
- struct hns_roce_hem_table *table;
- dma_addr_t dma_handle;
- __le64 *mtts;
- u32 bt_page_size;
- u32 i;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- table = &hr_dev->mr_table.mtt_table;
- bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_CQE:
- table = &hr_dev->mr_table.mtt_cqe_table;
- bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_SRQWQE:
- table = &hr_dev->mr_table.mtt_srqwqe_table;
- bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_IDX:
- table = &hr_dev->mr_table.mtt_idx_table;
- bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
- break;
- default:
- return -EINVAL;
- }
-
- /* All MTTs must fit in the same page */
- if (start_index / (bt_page_size / sizeof(u64)) !=
- (start_index + npages - 1) / (bt_page_size / sizeof(u64)))
- return -EINVAL;
-
- if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
- return -EINVAL;
-
- mtts = hns_roce_table_find(hr_dev, table,
- mtt->first_seg +
- start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
- &dma_handle);
- if (!mtts)
- return -ENOMEM;
-
- /* Save page addr, low 12 bits : 0 */
- for (i = 0; i < npages; ++i) {
- if (!hr_dev->caps.mtt_hop_num)
- mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
- else
- mtts[i] = cpu_to_le64(page_list[i]);
- }
-
- return 0;
-}
-
-static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, u32 start_index,
- u32 npages, u64 *page_list)
-{
- int chunk;
- int ret;
- u32 bt_page_size;
-
- if (mtt->order < 0)
- return -EINVAL;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_CQE:
- bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_SRQWQE:
- bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_IDX:
- bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
- break;
- default:
- dev_err(hr_dev->dev,
- "Unsupport mtt type %d, write mtt failed\n",
- mtt->mtt_type);
- return -EINVAL;
- }
-
- while (npages > 0) {
- chunk = min_t(int, bt_page_size / sizeof(u64), npages);
-
- ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
- page_list);
- if (ret)
- return ret;
-
- npages -= chunk;
- start_index += chunk;
- page_list += chunk;
- }
-
- return 0;
-}
-
-int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
-{
- u64 *page_list;
- int ret;
- u32 i;
-
- page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
- if (!page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->npages; ++i) {
- if (buf->nbufs == 1)
- page_list[i] = buf->direct.map + (i << buf->page_shift);
- else
- page_list[i] = buf->page_list[i].map;
-
- }
- ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
-
- kfree(page_list);
-
return ret;
}
@@ -923,50 +215,6 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_mtpts,
hr_dev->caps.num_mtpts - 1,
hr_dev->caps.reserved_mrws, 0);
- if (ret)
- return ret;
-
- ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
- ilog2(hr_dev->caps.num_mtt_segs));
- if (ret)
- goto err_buddy;
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
- ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
- ilog2(hr_dev->caps.num_cqe_segs));
- if (ret)
- goto err_buddy_cqe;
- }
-
- if (hr_dev->caps.num_srqwqe_segs) {
- ret = hns_roce_buddy_init(&mr_table->mtt_srqwqe_buddy,
- ilog2(hr_dev->caps.num_srqwqe_segs));
- if (ret)
- goto err_buddy_srqwqe;
- }
-
- if (hr_dev->caps.num_idx_segs) {
- ret = hns_roce_buddy_init(&mr_table->mtt_idx_buddy,
- ilog2(hr_dev->caps.num_idx_segs));
- if (ret)
- goto err_buddy_idx;
- }
-
- return 0;
-
-err_buddy_idx:
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
-
-err_buddy_srqwqe:
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
-
-err_buddy_cqe:
- hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
-
-err_buddy:
- hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
return ret;
}
@@ -974,30 +222,24 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- if (hr_dev->caps.num_idx_segs)
- hns_roce_buddy_cleanup(&mr_table->mtt_idx_buddy);
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
- hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
}
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_mr *mr;
int ret;
- mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (mr == NULL)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
/* Allocate memory region key */
- ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
- ~0ULL, acc, 0, mr);
+ hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
+ ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, 0, acc);
if (ret)
goto err_free;
@@ -1006,203 +248,52 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->umem = NULL;
return &mr->ibmr;
-
err_mr:
- hns_roce_mr_free(to_hr_dev(pd->device), mr);
+ free_mr_key(hr_dev, mr);
err_free:
kfree(mr);
return ERR_PTR(ret);
}
-int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct ib_umem *umem)
-{
- struct device *dev = hr_dev->dev;
- struct sg_dma_page_iter sg_iter;
- unsigned int order;
- int npage = 0;
- int ret = 0;
- int i;
- u64 page_addr;
- u64 *pages;
- u32 bt_page_size;
- u32 n;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- order = hr_dev->caps.mtt_ba_pg_sz;
- break;
- case MTT_TYPE_CQE:
- order = hr_dev->caps.cqe_ba_pg_sz;
- break;
- case MTT_TYPE_SRQWQE:
- order = hr_dev->caps.srqwqe_ba_pg_sz;
- break;
- case MTT_TYPE_IDX:
- order = hr_dev->caps.idx_ba_pg_sz;
- break;
- default:
- dev_err(dev, "Unsupport mtt type %d, write mtt failed\n",
- mtt->mtt_type);
- return -EINVAL;
- }
-
- bt_page_size = 1 << (order + PAGE_SHIFT);
-
- pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
- if (!pages)
- return -ENOMEM;
-
- i = n = 0;
-
- for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- page_addr = sg_page_iter_dma_address(&sg_iter);
- if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
- if (page_addr & ((1 << mtt->page_shift) - 1)) {
- dev_err(dev,
- "page_addr is not page_shift %d alignment!\n",
- mtt->page_shift);
- ret = -EINVAL;
- goto out;
- }
- pages[i++] = page_addr;
- }
- npage++;
- if (i == bt_page_size / sizeof(u64)) {
- ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
- if (ret)
- goto out;
- n += i;
- i = 0;
- }
- }
-
- if (i)
- ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
-
-out:
- free_pages((unsigned long) pages, order);
- return ret;
-}
-
-static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr,
- struct ib_umem *umem)
-{
- struct sg_dma_page_iter sg_iter;
- int i = 0, j = 0;
- u64 page_addr;
- u32 pbl_bt_sz;
-
- if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
- return 0;
-
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- page_addr = sg_page_iter_dma_address(&sg_iter);
- if (!hr_dev->caps.pbl_hop_num) {
- /* for hip06, page addr is aligned to 4K */
- mr->pbl_buf[i++] = page_addr >> 12;
- } else if (hr_dev->caps.pbl_hop_num == 1) {
- mr->pbl_buf[i++] = page_addr;
- } else {
- if (hr_dev->caps.pbl_hop_num == 2)
- mr->pbl_bt_l1[i][j] = page_addr;
- else if (hr_dev->caps.pbl_hop_num == 3)
- mr->pbl_bt_l2[i][j] = page_addr;
-
- j++;
- if (j >= (pbl_bt_sz / BA_BYTE_LEN)) {
- i++;
- j = 0;
- }
- }
- }
-
- /* Memory barrier */
- mb();
-
- return 0;
-}
-
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
- int bt_size;
int ret;
- int n;
- int i;
- mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(pd->device, start, length, access_flags);
- if (IS_ERR(mr->umem)) {
- ret = PTR_ERR(mr->umem);
- goto err_free;
- }
-
- n = ib_umem_page_count(mr->umem);
-
- if (!hr_dev->caps.pbl_hop_num) {
- if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
- dev_err(dev,
- " MR len %lld err. MR is limited to 4G at most!\n",
- length);
- ret = -EINVAL;
- goto err_umem;
- }
- } else {
- u64 pbl_size = 1;
-
- bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) /
- BA_BYTE_LEN;
- for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
- pbl_size *= bt_size;
- if (n > pbl_size) {
- dev_err(dev,
- " MR len %lld err. MR page num is limited to %lld!\n",
- length, pbl_size);
- ret = -EINVAL;
- goto err_umem;
- }
- }
-
mr->type = MR_TYPE_MR;
-
- ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
- access_flags, n, mr);
+ ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, virt_addr, length,
+ access_flags);
if (ret)
- goto err_umem;
+ goto err_alloc_mr;
- ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
+ ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, access_flags);
if (ret)
- goto err_mr;
+ goto err_alloc_key;
ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
- goto err_mr;
+ goto err_alloc_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+ mr->ibmr.length = length;
return &mr->ibmr;
-err_mr:
- hns_roce_mr_free(hr_dev, mr);
-
-err_umem:
- ib_umem_release(mr->umem);
-
-err_free:
+err_alloc_pbl:
+ free_mr_pbl(hr_dev, mr);
+err_alloc_key:
+ free_mr_key(hr_dev, mr);
+err_alloc_mr:
kfree(mr);
return ERR_PTR(ret);
}
@@ -1214,84 +305,36 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
u32 pdn, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
- struct device *dev = hr_dev->dev;
- int npages;
int ret;
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num)
- hns_roce_mhop_free(hr_dev, mr);
- else
- dma_free_coherent(dev, npages * 8,
- mr->pbl_buf, mr->pbl_dma_addr);
- }
- ib_umem_release(mr->umem);
-
- mr->umem = ib_umem_get(ibmr->device, start, length, mr_access_flags);
- if (IS_ERR(mr->umem)) {
- ret = PTR_ERR(mr->umem);
- mr->umem = NULL;
- return -ENOMEM;
- }
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num) {
- ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
- if (ret)
- goto release_umem;
- } else {
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf) {
- ret = -ENOMEM;
- goto release_umem;
- }
+ free_mr_pbl(hr_dev, mr);
+ ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, mr_access_flags);
+ if (ret) {
+ ibdev_err(ibdev, "failed to create mr PBL, ret = %d.\n", ret);
+ return ret;
}
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr,
length, mailbox->buf);
- if (ret)
- goto release_umem;
-
-
- ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret) {
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num)
- hns_roce_mhop_free(hr_dev, mr);
- else
- dma_free_coherent(dev, npages * 8,
- mr->pbl_buf,
- mr->pbl_dma_addr);
- }
-
- goto release_umem;
+ ibdev_err(ibdev, "failed to write mtpt, ret = %d.\n", ret);
+ free_mr_pbl(hr_dev, mr);
}
- return 0;
-
-release_umem:
- ib_umem_release(mr->umem);
return ret;
-
}
-
int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ib_dev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_cmd_mailbox *mailbox;
- struct device *dev = hr_dev->dev;
unsigned long mtpt_idx;
u32 pdn = 0;
int ret;
@@ -1312,7 +355,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
if (ret)
- dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
+ ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
mr->enabled = 0;
@@ -1336,8 +379,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) {
- dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
- ib_umem_release(mr->umem);
+ ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
goto free_cmd_mbox;
}
@@ -1365,8 +407,6 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
} else {
hns_roce_mr_free(hr_dev, mr);
-
- ib_umem_release(mr->umem);
kfree(mr);
}
@@ -1380,12 +420,8 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
u64 length;
- u32 page_size;
int ret;
- page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
- length = max_num_sg * page_size;
-
if (mr_type != IB_MR_TYPE_MEM_REG)
return ERR_PTR(-EINVAL);
@@ -1402,23 +438,28 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
mr->type = MR_TYPE_FRMR;
/* Allocate memory region key */
- ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
- 0, max_num_sg, mr);
+ length = max_num_sg * (1 << PAGE_SHIFT);
+ ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, length, 0);
if (ret)
goto err_free;
+ ret = alloc_mr_pbl(hr_dev, mr, length, NULL, 0, 0);
+ if (ret)
+ goto err_key;
+
ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
- goto err_mr;
+ goto err_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->umem = NULL;
+ mr->ibmr.length = length;
return &mr->ibmr;
-err_mr:
- hns_roce_mr_free(to_hr_dev(pd->device), mr);
-
+err_key:
+ free_mr_key(hr_dev, mr);
+err_pbl:
+ free_mr_pbl(hr_dev, mr);
err_free:
kfree(mr);
return ERR_PTR(ret);
@@ -1428,19 +469,54 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
{
struct hns_roce_mr *mr = to_hr_mr(ibmr);
- mr->pbl_buf[mr->npages++] = addr;
+ if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
+ mr->page_list[mr->npages++] = addr;
+ return 0;
+ }
- return 0;
+ return -ENOBUFS;
}
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ struct hns_roce_mtr *mtr = &mr->pbl_mtr;
+ int ret = 0;
mr->npages = 0;
+ mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!mr->page_list)
+ return ret;
+
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ if (ret < 1) {
+ ibdev_err(ibdev, "failed to store sg pages %d %d, cnt = %d.\n",
+ mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
+ goto err_page_list;
+ }
+
+ mtr->hem_cfg.region[0].offset = 0;
+ mtr->hem_cfg.region[0].count = mr->npages;
+ mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
+ mtr->hem_cfg.region_count = 1;
+ ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
+ if (ret) {
+ ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
+ ret = 0;
+ } else {
+ mr->pbl_mtr.hem_cfg.buf_pg_shift = ilog2(ibmr->page_size);
+ ret = mr->npages;
+ }
+
+err_page_list:
+ kvfree(mr->page_list);
+ mr->page_list = NULL;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ return ret;
}
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
@@ -1564,32 +640,23 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
return 0;
}
-void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift,
- int buf_pg_shift)
-{
- hns_roce_hem_list_init(&mtr->hem_list, bt_pg_shift);
- mtr->buf_pg_shift = buf_pg_shift;
-}
-
-void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtr *mtr)
-{
- hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
-}
-
-static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtr *mtr, dma_addr_t *bufs,
- struct hns_roce_buf_region *r)
+static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, struct hns_roce_buf_region *region)
{
+ __le64 *mtts;
int offset;
int count;
int npage;
- u64 *mtts;
+ u64 addr;
int end;
int i;
- offset = r->offset;
- end = offset + r->count;
+ /* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
+ if (!region->hopnum)
+ return 0;
+
+ offset = region->offset;
+ end = offset + region->count;
npage = 0;
while (offset < end) {
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
@@ -1597,13 +664,13 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
if (!mtts)
return -ENOBUFS;
- /* Save page addr, low 12 bits : 0 */
for (i = 0; i < count; i++) {
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- mtts[i] = bufs[npage] >> PAGE_ADDR_SHIFT;
+ addr = to_hr_hw_page_addr(pages[npage]);
else
- mtts[i] = bufs[npage];
+ addr = pages[npage];
+ mtts[i] = cpu_to_le64(addr);
npage++;
}
offset += count;
@@ -1612,69 +679,416 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
return 0;
}
-int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t **bufs, struct hns_roce_buf_region *regions,
- int region_cnt)
+static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
{
- struct hns_roce_buf_region *r;
- int ret;
int i;
- ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, regions,
- region_cnt);
- if (ret)
- return ret;
+ for (i = 0; i < attr->region_count; i++)
+ if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
+ attr->region[i].hopnum > 0)
+ return true;
- for (i = 0; i < region_cnt; i++) {
- r = &regions[i];
- ret = hns_roce_write_mtr(hr_dev, mtr, bufs[i], r);
+ /* because the mtr only one root base address, when hopnum is 0 means
+ * root base address equals the first buffer address, thus all alloced
+ * memory must in a continuous space accessed by direct mode.
+ */
+ return false;
+}
+
+static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < attr->region_count; i++)
+ size += attr->region[i].size;
+
+ return size;
+}
+
+static inline int mtr_umem_page_count(struct ib_umem *umem,
+ unsigned int page_shift)
+{
+ int count = ib_umem_page_count(umem);
+
+ if (page_shift >= PAGE_SHIFT)
+ count >>= page_shift - PAGE_SHIFT;
+ else
+ count <<= PAGE_SHIFT - page_shift;
+
+ return count;
+}
+
+static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
+ unsigned int page_shift)
+{
+ if (is_direct)
+ return ALIGN(alloc_size, 1 << page_shift);
+ else
+ return HNS_HW_DIRECT_PAGE_COUNT << page_shift;
+}
+
+/*
+ * check the given pages in continuous address space
+ * Returns 0 on success, or the error page num.
+ */
+static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
+ unsigned int page_shift)
+{
+ size_t page_size = 1 << page_shift;
+ int i;
+
+ for (i = 1; i < page_count; i++)
+ if (pages[i] - pages[i - 1] != page_size)
+ return i;
+
+ return 0;
+}
+
+static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ /* release user buffers */
+ if (mtr->umem) {
+ ib_umem_release(mtr->umem);
+ mtr->umem = NULL;
+ }
+
+ /* release kernel buffers */
+ if (mtr->kmem) {
+ hns_roce_buf_free(hr_dev, mtr->kmem);
+ kfree(mtr->kmem);
+ mtr->kmem = NULL;
+ }
+}
+
+static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr, bool is_direct,
+ struct ib_udata *udata, unsigned long user_addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned int max_pg_shift = buf_attr->page_shift;
+ unsigned int best_pg_shift = 0;
+ int all_pg_count = 0;
+ size_t direct_size;
+ size_t total_size;
+ unsigned long tmp;
+ int ret = 0;
+
+ total_size = mtr_bufs_size(buf_attr);
+ if (total_size < 1) {
+ ibdev_err(ibdev, "Failed to check mtr size\n");
+ return -EINVAL;
+ }
+
+ if (udata) {
+ mtr->kmem = NULL;
+ mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
+ buf_attr->user_access);
+ if (IS_ERR_OR_NULL(mtr->umem)) {
+ ibdev_err(ibdev, "Failed to get umem, ret %ld\n",
+ PTR_ERR(mtr->umem));
+ return -ENOMEM;
+ }
+ if (buf_attr->fixed_page) {
+ best_pg_shift = max_pg_shift;
+ } else {
+ tmp = GENMASK(max_pg_shift, 0);
+ ret = ib_umem_find_best_pgsz(mtr->umem, tmp, user_addr);
+ best_pg_shift = (ret <= PAGE_SIZE) ?
+ PAGE_SHIFT : ilog2(ret);
+ }
+ all_pg_count = mtr_umem_page_count(mtr->umem, best_pg_shift);
+ ret = 0;
+ } else {
+ mtr->umem = NULL;
+ mtr->kmem = kzalloc(sizeof(*mtr->kmem), GFP_KERNEL);
+ if (!mtr->kmem) {
+ ibdev_err(ibdev, "Failed to alloc kmem\n");
+ return -ENOMEM;
+ }
+ direct_size = mtr_kmem_direct_size(is_direct, total_size,
+ max_pg_shift);
+ ret = hns_roce_buf_alloc(hr_dev, total_size, direct_size,
+ mtr->kmem, max_pg_shift);
if (ret) {
- dev_err(hr_dev->dev,
- "write mtr[%d/%d] err %d,offset=%d.\n",
- i, region_cnt, ret, r->offset);
- goto err_write;
+ ibdev_err(ibdev, "Failed to alloc kmem, ret %d\n", ret);
+ goto err_alloc_mem;
+ } else {
+ best_pg_shift = max_pg_shift;
+ all_pg_count = mtr->kmem->npages;
}
}
- return 0;
+ /* must bigger than minimum hardware page shift */
+ if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
+ ret = -EINVAL;
+ ibdev_err(ibdev, "Failed to check mtr page shift %d count %d\n",
+ best_pg_shift, all_pg_count);
+ goto err_alloc_mem;
+ }
-err_write:
- hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+ mtr->hem_cfg.buf_pg_shift = best_pg_shift;
+ mtr->hem_cfg.buf_pg_count = all_pg_count;
+ return 0;
+err_alloc_mem:
+ mtr_free_bufs(hr_dev, mtr);
return ret;
}
+static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, int count, unsigned int page_shift)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int npage;
+ int err;
+
+ if (mtr->umem)
+ npage = hns_roce_get_umem_bufs(hr_dev, pages, count, 0,
+ mtr->umem, page_shift);
+ else
+ npage = hns_roce_get_kmem_bufs(hr_dev, pages, count, 0,
+ mtr->kmem);
+
+ if (mtr->hem_cfg.is_direct && npage > 1) {
+ err = mtr_check_direct_pages(pages, npage, page_shift);
+ if (err) {
+ ibdev_err(ibdev, "Failed to check %s direct page-%d\n",
+ mtr->umem ? "user" : "kernel", err);
+ npage = err;
+ }
+ }
+
+ return npage;
+}
+
+int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, int page_cnt)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_region *r;
+ int err;
+ int i;
+
+ for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ r = &mtr->hem_cfg.region[i];
+ if (r->offset + r->count > page_cnt) {
+ err = -EINVAL;
+ ibdev_err(ibdev,
+ "Failed to check mtr%d end %d + %d, max %d\n",
+ i, r->offset, r->count, page_cnt);
+ return err;
+ }
+
+ err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
+ if (err) {
+ ibdev_err(ibdev,
+ "Failed to map mtr%d offset %d, err %d\n",
+ i, r->offset, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
{
- u64 *mtts = mtt_buf;
int mtt_count;
int total = 0;
- u64 *addr;
+ __le64 *mtts;
int npage;
+ u64 addr;
int left;
- if (mtts == NULL || mtt_max < 1)
+ if (!mtt_buf || mtt_max < 1)
goto done;
+ /* no mtt memory in direct mode, so just return the buffer address */
+ if (mtr->hem_cfg.is_direct) {
+ npage = offset;
+ for (total = 0; total < mtt_max; total++, npage++) {
+ addr = mtr->hem_cfg.root_ba +
+ (npage << mtr->hem_cfg.buf_pg_shift);
+
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ mtt_buf[total] = to_hr_hw_page_addr(addr);
+ else
+ mtt_buf[total] = addr;
+ }
+
+ goto done;
+ }
+
left = mtt_max;
while (left > 0) {
mtt_count = 0;
- addr = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
+ mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
offset + total,
&mtt_count, NULL);
- if (!addr || !mtt_count)
+ if (!mtts || !mtt_count)
goto done;
npage = min(mtt_count, left);
- memcpy(&mtts[total], addr, BA_BYTE_LEN * npage);
left -= npage;
- total += npage;
+ for (mtt_count = 0; mtt_count < npage; mtt_count++)
+ mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
}
done:
if (base_addr)
- *base_addr = mtr->hem_list.root_ba;
+ *base_addr = mtr->hem_cfg.root_ba;
return total;
}
+
+/* convert buffer size to page index and page count */
+static unsigned int mtr_init_region(struct hns_roce_buf_attr *attr,
+ int page_cnt,
+ struct hns_roce_buf_region *regions,
+ int region_cnt, unsigned int page_shift)
+{
+ unsigned int page_size = 1 << page_shift;
+ int max_region = attr->region_count;
+ struct hns_roce_buf_region *r;
+ unsigned int i = 0;
+ int page_idx = 0;
+
+ for (; i < region_cnt && i < max_region && page_idx < page_cnt; i++) {
+ r = &regions[i];
+ r->hopnum = attr->region[i].hopnum == HNS_ROCE_HOP_NUM_0 ?
+ 0 : attr->region[i].hopnum;
+ r->offset = page_idx;
+ r->count = DIV_ROUND_UP(attr->region[i].size, page_size);
+ page_idx += r->count;
+ }
+
+ return i;
+}
+
+/**
+ * hns_roce_mtr_create - Create hns memory translate region.
+ *
+ * @mtr: memory translate region
+ * @init_attr: init attribute for creating mtr
+ * @page_shift: page shift for multi-hop base address table
+ * @udata: user space context, if it's NULL, means kernel space
+ * @user_addr: userspace virtual address to start at
+ * @buf_alloced: mtr has private buffer, true means need to alloc
+ */
+int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int page_shift, struct ib_udata *udata,
+ unsigned long user_addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t *pages = NULL;
+ int region_cnt = 0;
+ int all_pg_cnt;
+ int get_pg_cnt;
+ bool has_mtt;
+ int err = 0;
+
+ has_mtt = mtr_has_mtt(buf_attr);
+ /* if buffer only need mtt, just init the hem cfg */
+ if (buf_attr->mtt_only) {
+ mtr->hem_cfg.buf_pg_shift = buf_attr->page_shift;
+ mtr->hem_cfg.buf_pg_count = mtr_bufs_size(buf_attr) >>
+ buf_attr->page_shift;
+ mtr->umem = NULL;
+ mtr->kmem = NULL;
+ } else {
+ err = mtr_alloc_bufs(hr_dev, mtr, buf_attr, !has_mtt, udata,
+ user_addr);
+ if (err) {
+ ibdev_err(ibdev, "Failed to alloc mtr bufs, err %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* alloc mtt memory */
+ all_pg_cnt = mtr->hem_cfg.buf_pg_count;
+ hns_roce_hem_list_init(&mtr->hem_list);
+ mtr->hem_cfg.is_direct = !has_mtt;
+ mtr->hem_cfg.ba_pg_shift = page_shift;
+ mtr->hem_cfg.region_count = 0;
+ region_cnt = mtr_init_region(buf_attr, all_pg_cnt,
+ mtr->hem_cfg.region,
+ ARRAY_SIZE(mtr->hem_cfg.region),
+ mtr->hem_cfg.buf_pg_shift);
+ if (region_cnt < 1) {
+ err = -ENOBUFS;
+ ibdev_err(ibdev, "failed to init mtr region %d\n", region_cnt);
+ goto err_alloc_bufs;
+ }
+
+ mtr->hem_cfg.region_count = region_cnt;
+
+ if (has_mtt) {
+ err = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
+ mtr->hem_cfg.region, region_cnt,
+ page_shift);
+ if (err) {
+ ibdev_err(ibdev, "Failed to request mtr hem, err %d\n",
+ err);
+ goto err_alloc_bufs;
+ }
+ mtr->hem_cfg.root_ba = mtr->hem_list.root_ba;
+ }
+
+ /* no buffer to map */
+ if (buf_attr->mtt_only)
+ return 0;
+
+ /* alloc a tmp array to store buffer's dma address */
+ pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!pages) {
+ err = -ENOMEM;
+ ibdev_err(ibdev, "Failed to alloc mtr page list %d\n",
+ all_pg_cnt);
+ goto err_alloc_hem_list;
+ }
+
+ get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt,
+ mtr->hem_cfg.buf_pg_shift);
+ if (get_pg_cnt != all_pg_cnt) {
+ ibdev_err(ibdev, "Failed to get mtr page %d != %d\n",
+ get_pg_cnt, all_pg_cnt);
+ err = -ENOBUFS;
+ goto err_alloc_page_list;
+ }
+
+ if (!has_mtt) {
+ mtr->hem_cfg.root_ba = pages[0];
+ } else {
+ /* write buffer's dma address to BA table */
+ err = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
+ if (err) {
+ ibdev_err(ibdev, "Failed to map mtr pages, err %d\n",
+ err);
+ goto err_alloc_page_list;
+ }
+ }
+
+ /* drop tmp array */
+ kvfree(pages);
+ return 0;
+err_alloc_page_list:
+ kvfree(pages);
+err_alloc_hem_list:
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+err_alloc_bufs:
+ mtr_free_bufs(hr_dev, mtr);
+ return err;
+}
+
+void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ /* release multi-hop addressing resource */
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+
+ /* free buffers */
+ mtr_free_bufs(hr_dev, mtr);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 6317901c4b4f..a0a47bd66975 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -355,16 +355,16 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
}
-static int set_rq_size(struct hns_roce_dev *hr_dev,
- struct ib_qp_cap *cap, bool is_user, int has_rq,
- struct hns_roce_qp *hr_qp)
+static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ struct hns_roce_qp *hr_qp, int has_rq)
{
- u32 max_cnt;
+ u32 cnt;
/* If srq exist, set zero for relative number of rq */
if (!has_rq) {
hr_qp->rq.wqe_cnt = 0;
hr_qp->rq.max_gs = 0;
+ hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
@@ -379,17 +379,15 @@ static int set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
- max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
-
- hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
- if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
+ cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
+ if (cnt > hr_dev->caps.max_wqes) {
ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
cap->max_recv_wr);
return -EINVAL;
}
- max_cnt = max(1U, cap->max_recv_sge);
- hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
+ HNS_ROCE_RESERVED_SGE);
if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
@@ -397,8 +395,57 @@ static int set_rq_size(struct hns_roce_dev *hr_dev,
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
hr_qp->rq.max_gs);
- cap->max_recv_wr = hr_qp->rq.wqe_cnt;
- cap->max_recv_sge = hr_qp->rq.max_gs;
+ hr_qp->rq.wqe_cnt = cnt;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ hr_qp->rq_inl_buf.wqe_cnt = cnt;
+ else
+ hr_qp->rq_inl_buf.wqe_cnt = 0;
+
+ cap->max_recv_wr = cnt;
+ cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
+
+ return 0;
+}
+
+static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_cap *cap)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt;
+
+ cnt = max(1U, cap->max_send_sge);
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ hr_qp->sq.max_gs = roundup_pow_of_two(cnt);
+ hr_qp->sge.sge_cnt = 0;
+
+ return 0;
+ }
+
+ hr_qp->sq.max_gs = cnt;
+
+ /* UD sqwqe's sge use extend sge */
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
+ hr_qp->ibqp.qp_type == IB_QPT_UD) {
+ cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs);
+ } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) {
+ cnt = roundup_pow_of_two(sq_wqe_cnt *
+ (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
+ if (cnt > hr_dev->caps.max_extend_sg) {
+ ibdev_err(ibdev,
+ "failed to check exSGE num, exSGE num = %d.\n",
+ cnt);
+ return -EINVAL;
+ }
+ }
+ } else {
+ cnt = 0;
+ }
+
+ hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
+ hr_qp->sge.sge_cnt = cnt;
return 0;
}
@@ -430,174 +477,79 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
- u32 ex_sge_num;
- u32 page_size;
- u32 max_cnt;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt = 0;
int ret;
- if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) ||
- hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes)
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
+ cnt > hr_dev->caps.max_wqes)
return -EINVAL;
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
- ibdev_err(&hr_dev->ib_dev, "Failed to check user SQ size limit\n");
+ ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
+ ret);
return ret;
}
- hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
-
- max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
- else
- hr_qp->sq.max_gs = max_cnt;
-
- if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- (hr_qp->sq.max_gs - 2));
-
- if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE &&
- hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
- if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
- ibdev_err(&hr_dev->ib_dev,
- "Failed to check extended SGE size limit %d\n",
- hr_qp->sge.sge_cnt);
- return -EINVAL;
- }
- }
-
- hr_qp->sge.sge_shift = 4;
- ex_sge_num = hr_qp->sge.sge_cnt;
+ ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
+ if (ret)
+ return ret;
- /* Get buf size, SQ and RQ are aligned to page_szie */
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
- hr_qp->rq.wqe_shift), PAGE_SIZE) +
- round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
-
- hr_qp->sq.offset = 0;
- hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
- } else {
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- hr_qp->sge.sge_cnt = ex_sge_num ?
- max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
- hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
- hr_qp->rq.wqe_shift), page_size) +
- round_up((hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift), page_size) +
- round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), page_size);
-
- hr_qp->sq.offset = 0;
- if (ex_sge_num) {
- hr_qp->sge.offset = round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
- hr_qp->rq.offset = hr_qp->sge.offset +
- round_up((hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift),
- page_size);
- } else {
- hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
- }
- }
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ hr_qp->sq.wqe_cnt = cnt;
return 0;
}
-static int split_wqe_buf_region(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_buf_region *regions,
- int region_max, int page_shift)
+static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_buf_attr *buf_attr)
{
- int page_size = 1 << page_shift;
- bool is_extend_sge;
- int region_cnt = 0;
int buf_size;
- int buf_cnt;
+ int idx = 0;
- if (hr_qp->buff_size < 1 || region_max < 1)
- return region_cnt;
+ hr_qp->buff_size = 0;
- if (hr_qp->sge.sge_cnt > 0)
- is_extend_sge = true;
- else
- is_extend_sge = false;
-
- /* sq region */
- if (is_extend_sge)
- buf_size = hr_qp->sge.offset - hr_qp->sq.offset;
- else
- buf_size = hr_qp->rq.offset - hr_qp->sq.offset;
-
- if (buf_size > 0 && region_cnt < region_max) {
- buf_cnt = DIV_ROUND_UP(buf_size, page_size);
- hns_roce_init_buf_region(&regions[region_cnt],
- hr_dev->caps.wqe_sq_hop_num,
- hr_qp->sq.offset / page_size,
- buf_cnt);
- region_cnt++;
- }
-
- /* sge region */
- if (is_extend_sge) {
- buf_size = hr_qp->rq.offset - hr_qp->sge.offset;
- if (buf_size > 0 && region_cnt < region_max) {
- buf_cnt = DIV_ROUND_UP(buf_size, page_size);
- hns_roce_init_buf_region(&regions[region_cnt],
- hr_dev->caps.wqe_sge_hop_num,
- hr_qp->sge.offset / page_size,
- buf_cnt);
- region_cnt++;
- }
- }
-
- /* rq region */
- buf_size = hr_qp->buff_size - hr_qp->rq.offset;
- if (buf_size > 0) {
- buf_cnt = DIV_ROUND_UP(buf_size, page_size);
- hns_roce_init_buf_region(&regions[region_cnt],
- hr_dev->caps.wqe_rq_hop_num,
- hr_qp->rq.offset / page_size,
- buf_cnt);
- region_cnt++;
- }
-
- return region_cnt;
-}
-
-static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp)
-{
- struct device *dev = hr_dev->dev;
-
- if (hr_qp->sq.max_gs > 2) {
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- (hr_qp->sq.max_gs - 2));
- hr_qp->sge.sge_shift = 4;
- }
-
- /* ud sqwqe's sge use extend sge */
- if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
- hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- hr_qp->sq.max_gs);
- hr_qp->sge.sge_shift = 4;
- }
+ /* SQ WQE */
+ hr_qp->sq.offset = 0;
+ buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
+ hr_qp->sq.wqe_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ /* extend SGE WQE in SQ */
+ hr_qp->sge.offset = hr_qp->buff_size;
+ buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
+ hr_qp->sge.sge_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ /* RQ WQE */
+ hr_qp->rq.offset = hr_qp->buff_size;
+ buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
+ hr_qp->rq.wqe_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ if (hr_qp->buff_size < 1)
+ return -EINVAL;
- if (hr_qp->sq.max_gs > 2 &&
- hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
- if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
- dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
- hr_qp->sge.sge_cnt);
- return -EINVAL;
- }
- }
+ buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ buf_attr->fixed_page = true;
+ buf_attr->region_count = idx;
return 0;
}
@@ -605,62 +557,35 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
{
- u32 page_size;
- u32 max_cnt;
- int size;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt;
int ret;
if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
- ibdev_err(&hr_dev->ib_dev,
- "SQ WR or sge or inline data error!\n");
+ ibdev_err(ibdev,
+ "failed to check SQ WR, SGE or inline num, ret = %d.\n",
+ -EINVAL);
return -EINVAL;
}
- hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
-
- max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
-
- hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
- if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
- ibdev_err(&hr_dev->ib_dev,
- "while setting kernel sq size, sq.wqe_cnt too large\n");
+ cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
+ if (cnt > hr_dev->caps.max_wqes) {
+ ibdev_err(ibdev, "failed to check WQE num, WQE num = %d.\n",
+ cnt);
return -EINVAL;
}
- /* Get data_seg numbers */
- max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
- else
- hr_qp->sq.max_gs = max_cnt;
+ hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
+ hr_qp->sq.wqe_cnt = cnt;
- ret = set_extend_sge_param(hr_dev, hr_qp);
- if (ret) {
- ibdev_err(&hr_dev->ib_dev, "set extend sge parameters fail\n");
+ ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
+ if (ret)
return ret;
- }
- /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- hr_qp->sq.offset = 0;
- size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
-
- if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && hr_qp->sge.sge_cnt) {
- hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
- (u32)hr_qp->sge.sge_cnt);
- hr_qp->sge.offset = size;
- size += round_up(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift,
- page_size);
- }
-
- hr_qp->rq.offset = size;
- size += round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size);
- hr_qp->buff_size = size;
-
- /* Get wr and sge number which send */
- cap->max_send_wr = hr_qp->sq.wqe_cnt;
+ /* sync the parameters of kernel QP to user's configuration */
+ cap->max_send_wr = cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */
@@ -691,8 +616,8 @@ static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr)
{
u32 max_recv_sge = init_attr->cap.max_recv_sge;
+ u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
struct hns_roce_rinl_wqe *wqe_list;
- u32 wqe_cnt = hr_qp->rq.wqe_cnt;
int i;
/* allocate recv inline buf */
@@ -714,7 +639,6 @@ static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
hr_qp->rq_inl_buf.wqe_list = wqe_list;
- hr_qp->rq_inl_buf.wqe_cnt = wqe_cnt;
return 0;
@@ -727,140 +651,55 @@ err:
static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
{
- kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ if (hr_qp->rq_inl_buf.wqe_list)
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list);
}
-static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
- u32 page_shift, bool is_user)
-{
-/* WQE buffer include 3 parts: SQ, extend SGE and RQ. */
-#define HNS_ROCE_WQE_REGION_MAX 3
- struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX] = {};
- dma_addr_t *buf_list[HNS_ROCE_WQE_REGION_MAX] = {};
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_buf_region *r;
- int region_count;
- int buf_count;
- int ret;
- int i;
-
- region_count = split_wqe_buf_region(hr_dev, hr_qp, regions,
- ARRAY_SIZE(regions), page_shift);
-
- /* alloc a tmp list to store WQE buffers address */
- ret = hns_roce_alloc_buf_list(regions, buf_list, region_count);
- if (ret) {
- ibdev_err(ibdev, "Failed to alloc WQE buffer list\n");
- return ret;
- }
-
- for (i = 0; i < region_count; i++) {
- r = &regions[i];
- if (is_user)
- buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i],
- r->count, r->offset, hr_qp->umem,
- page_shift);
- else
- buf_count = hns_roce_get_kmem_bufs(hr_dev, buf_list[i],
- r->count, r->offset, &hr_qp->hr_buf);
-
- if (buf_count != r->count) {
- ibdev_err(ibdev, "Failed to get %s WQE buf, expect %d = %d.\n",
- is_user ? "user" : "kernel",
- r->count, buf_count);
- ret = -ENOBUFS;
- goto done;
- }
- }
-
- hr_qp->wqe_bt_pg_shift = hr_dev->caps.mtt_ba_pg_sz;
- hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
- page_shift);
- ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, regions,
- region_count);
- if (ret)
- ibdev_err(ibdev, "Failed to attach WQE's mtr\n");
-
- goto done;
-
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
-done:
- hns_roce_free_buf_list(buf_list, region_count);
-
- return ret;
-}
-
static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, unsigned long addr)
{
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
struct ib_device *ibdev = &hr_dev->ib_dev;
- bool is_rq_buf_inline;
+ struct hns_roce_buf_attr buf_attr = {};
int ret;
- is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hns_roce_qp_has_rq(init_attr);
- if (is_rq_buf_inline) {
+ if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
ret = alloc_rq_inline_buf(hr_qp, init_attr);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc inline RQ buffer\n");
+ ibdev_err(ibdev,
+ "failed to alloc inline buf, ret = %d.\n",
+ ret);
return ret;
}
- }
-
- if (udata) {
- hr_qp->umem = ib_umem_get(ibdev, addr, hr_qp->buff_size, 0);
- if (IS_ERR(hr_qp->umem)) {
- ret = PTR_ERR(hr_qp->umem);
- goto err_inline;
- }
} else {
- ret = hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
- (1 << page_shift) * 2,
- &hr_qp->hr_buf, page_shift);
- if (ret)
- goto err_inline;
+ hr_qp->rq_inl_buf.wqe_list = NULL;
}
- ret = map_wqe_buf(hr_dev, hr_qp, page_shift, udata);
- if (ret)
- goto err_alloc;
+ ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
+ goto err_inline;
+ }
+ ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
+ HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
+ udata, addr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
+ goto err_inline;
+ }
return 0;
-
err_inline:
- if (is_rq_buf_inline)
- free_rq_inline_buf(hr_qp);
-
-err_alloc:
- if (udata) {
- ib_umem_release(hr_qp->umem);
- hr_qp->umem = NULL;
- } else {
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- }
-
- ibdev_err(ibdev, "Failed to alloc WQE buffer, ret %d.\n", ret);
+ free_rq_inline_buf(hr_qp);
return ret;
}
static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
- if (hr_qp->umem) {
- ib_umem_release(hr_qp->umem);
- hr_qp->umem = NULL;
- }
-
- if (hr_qp->hr_buf.nbufs > 0)
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
-
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hr_qp->rq.wqe_cnt)
- free_rq_inline_buf(hr_qp);
+ hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
+ free_rq_inline_buf(hr_qp);
}
static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
@@ -912,8 +751,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
"Failed to map user SQ doorbell\n");
goto err_out;
}
- hr_qp->sdb_en = 1;
- resp->cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
}
if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
@@ -924,8 +763,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
"Failed to map user RQ doorbell\n");
goto err_sdb;
}
- hr_qp->rdb_en = 1;
- resp->cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
}
} else {
/* QP doorbell register address */
@@ -942,13 +781,13 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
goto err_out;
}
*hr_qp->rdb.db_record = 0;
- hr_qp->rdb_en = 1;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
}
}
return 0;
err_sdb:
- if (udata && hr_qp->sdb_en)
+ if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_out:
return ret;
@@ -961,12 +800,12 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
udata, struct hns_roce_ucontext, ibucontext);
if (udata) {
- if (hr_qp->rdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
- if (hr_qp->sdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
} else {
- if (hr_qp->rdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_qp->rdb);
}
}
@@ -1003,8 +842,7 @@ err_sq:
return ret;
}
-static void free_kernel_wrid(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp)
+static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
{
kfree(hr_qp->rq.wrid);
kfree(hr_qp->sq.wrid);
@@ -1025,10 +863,11 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
else
hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
- ret = set_rq_size(hr_dev, &init_attr->cap, udata,
- hns_roce_qp_has_rq(init_attr), hr_qp);
+ ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
+ hns_roce_qp_has_rq(init_attr));
if (ret) {
- ibdev_err(ibdev, "Failed to set user RQ size\n");
+ ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
+ ret);
return ret;
}
@@ -1156,7 +995,7 @@ err_buf:
err_db:
free_qp_db(hr_dev, hr_qp, udata);
err_wrid:
- free_kernel_wrid(hr_dev, hr_qp);
+ free_kernel_wrid(hr_qp);
return ret;
}
@@ -1170,7 +1009,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
free_qpc(hr_dev, hr_qp);
free_qpn(hr_dev, hr_qp);
free_qp_buf(hr_dev, hr_qp);
- free_kernel_wrid(hr_dev, hr_qp);
+ free_kernel_wrid(hr_qp);
free_qp_db(hr_dev, hr_qp, udata);
kfree(hr_qp);
@@ -1339,10 +1178,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (ibqp->uobject &&
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
- if (hr_qp->sdb_en == 1) {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
- if (hr_qp->rdb_en == 1)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
ibdev_warn(&hr_dev->ib_dev,
@@ -1431,10 +1270,9 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
}
}
-static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
+static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
{
-
- return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
+ return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
}
void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
@@ -1449,8 +1287,7 @@ void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
{
- return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
- (n << hr_qp->sge.sge_shift));
+ return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
}
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 5b3dd1a337d4..f40a000e94ee 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -77,56 +77,56 @@ static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
- u16 xrcd, struct hns_roce_mtt *hr_mtt,
- u64 db_rec_addr, struct hns_roce_srq *srq)
+static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
- dma_addr_t dma_handle_wqe;
- dma_addr_t dma_handle_idx;
- u64 *mtts_wqe;
- u64 *mtts_idx;
+ u64 mtts_wqe[MTT_MIN_COUNT] = { 0 };
+ u64 mtts_idx[MTT_MIN_COUNT] = { 0 };
+ dma_addr_t dma_handle_wqe = 0;
+ dma_addr_t dma_handle_idx = 0;
int ret;
/* Get the physical address of srq buf */
- mtts_wqe = hns_roce_table_find(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table,
- srq->mtt.first_seg,
- &dma_handle_wqe);
- if (!mtts_wqe) {
- dev_err(hr_dev->dev, "Failed to find mtt for srq buf.\n");
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
+ ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
+ if (ret < 1) {
+ ibdev_err(ibdev, "Failed to find mtr for SRQ WQE\n");
+ return -ENOBUFS;
}
/* Get physical address of idx que buf */
- mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
- srq->idx_que.mtt.first_seg,
- &dma_handle_idx);
- if (!mtts_idx) {
- dev_err(hr_dev->dev,
- "Failed to find mtt for srq idx queue buf.\n");
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
+ ARRAY_SIZE(mtts_idx), &dma_handle_idx);
+ if (ret < 1) {
+ ibdev_err(ibdev, "Failed to find mtr for SRQ idx\n");
+ return -ENOBUFS;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
if (ret) {
- dev_err(hr_dev->dev,
- "Failed to alloc a bit from srq bitmap.\n");
+ ibdev_err(ibdev, "Failed to alloc SRQ number, err %d\n", ret);
return -ENOMEM;
}
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
- if (ret)
+ if (ret) {
+ ibdev_err(ibdev, "Failed to get SRQC table, err %d\n", ret);
goto err_out;
+ }
ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
- if (ret)
+ if (ret) {
+ ibdev_err(ibdev, "Failed to store SRQC, err %d\n", ret);
goto err_put;
+ }
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox)) {
- ret = PTR_ERR(mailbox);
+ if (IS_ERR_OR_NULL(mailbox)) {
+ ret = -ENOMEM;
+ ibdev_err(ibdev, "Failed to alloc mailbox for SRQC\n");
goto err_xa;
}
@@ -136,8 +136,10 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret)
+ if (ret) {
+ ibdev_err(ibdev, "Failed to config SRQC, err %d\n", ret);
goto err_xa;
+ }
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
@@ -154,8 +156,7 @@ err_out:
return ret;
}
-static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq)
+static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
@@ -175,187 +176,104 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}
-static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
- int srq_buf_size)
+static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- struct hns_roce_ib_create_srq ucmd;
- struct hns_roce_buf *buf;
- int ret;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
+ HNS_ROCE_SGE_SIZE *
+ srq->max_gs)));
+
+ buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->wqe_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+ err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
+ hr_dev->caps.srqwqe_ba_pg_sz +
+ HNS_HW_PAGE_SHIFT, udata, addr);
+ if (err)
+ ibdev_err(ibdev, "Failed to alloc SRQ buf mtr, err %d\n", err);
+
+ return err;
+}
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
- return -EFAULT;
-
- srq->umem =
- ib_umem_get(srq->ibsrq.device, ucmd.buf_addr, srq_buf_size, 0);
- if (IS_ERR(srq->umem))
- return PTR_ERR(srq->umem);
-
- buf = &srq->buf;
- buf->npages = (ib_umem_page_count(srq->umem) +
- (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.srqwqe_buf_pg_sz);
- buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
- &srq->mtt);
- if (ret)
- goto err_user_buf;
+static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
+}
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
- if (ret)
- goto err_user_srq_mtt;
-
- /* config index queue BA */
- srq->idx_que.umem = ib_umem_get(srq->ibsrq.device, ucmd.que_addr,
- srq->idx_que.buf_size, 0);
- if (IS_ERR(srq->idx_que.umem)) {
- dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
- ret = PTR_ERR(srq->idx_que.umem);
- goto err_user_srq_mtt;
+static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
+
+ buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->idx_que.entry_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+ err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
+ hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, addr);
+ if (err) {
+ ibdev_err(ibdev, "Failed to alloc SRQ idx mtr, err %d\n", err);
+ return err;
}
- buf = &srq->idx_que.idx_buf;
- buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
- 1 << hr_dev->caps.idx_buf_pg_sz);
- buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
- &srq->idx_que.mtt);
- if (ret) {
- dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
- goto err_user_idx_mtt;
- }
+ if (!udata) {
+ idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
+ if (!idx_que->bitmap) {
+ ibdev_err(ibdev, "Failed to alloc SRQ idx bitmap\n");
+ err = -ENOMEM;
+ goto err_idx_mtr;
+ }
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
- srq->idx_que.umem);
- if (ret) {
- dev_err(hr_dev->dev,
- "hns_roce_ib_umem_write_mtt error for idx que\n");
- goto err_user_idx_buf;
}
return 0;
+err_idx_mtr:
+ hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
-err_user_idx_buf:
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
-
-err_user_idx_mtt:
- ib_umem_release(srq->idx_que.umem);
-
-err_user_srq_mtt:
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
-err_user_buf:
- ib_umem_release(srq->umem);
-
- return ret;
+ return err;
}
-static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
- u32 page_shift)
+static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
- idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
- if (!idx_que->bitmap)
- return -ENOMEM;
-
- idx_que->buf_size = srq->idx_que.buf_size;
-
- if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
- &idx_que->idx_buf, page_shift)) {
- bitmap_free(idx_que->bitmap);
- return -ENOMEM;
- }
-
- return 0;
+ bitmap_free(idx_que->bitmap);
+ idx_que->bitmap = NULL;
+ hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
}
-static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
+static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- int ret;
-
- if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
- &srq->buf, page_shift))
- return -ENOMEM;
-
srq->head = 0;
srq->tail = srq->wqe_cnt - 1;
-
- ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
- &srq->mtt);
- if (ret)
- goto err_kernel_buf;
-
- ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
- if (ret)
- goto err_kernel_srq_mtt;
-
- page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
- ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
- if (ret) {
- dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
- goto err_kernel_srq_mtt;
- }
-
- /* Init mtt table for idx_que */
- ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
- srq->idx_que.idx_buf.page_shift,
- &srq->idx_que.mtt);
- if (ret)
- goto err_kernel_create_idx;
-
- /* Write buffer address into the mtt table */
- ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
- &srq->idx_que.idx_buf);
- if (ret)
- goto err_kernel_idx_buf;
-
srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
- if (!srq->wrid) {
- ret = -ENOMEM;
- goto err_kernel_idx_buf;
- }
+ if (!srq->wrid)
+ return -ENOMEM;
return 0;
-
-err_kernel_idx_buf:
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
-
-err_kernel_create_idx:
- hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
- &srq->idx_que.idx_buf);
- kfree(srq->idx_que.bitmap);
-
-err_kernel_srq_mtt:
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
-err_kernel_buf:
- hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
-
- return ret;
-}
-
-static void destroy_user_srq(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq)
-{
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
- ib_umem_release(srq->idx_que.umem);
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
- ib_umem_release(srq->umem);
}
-static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, int srq_buf_size)
+static void free_srq_wrid(struct hns_roce_srq *srq)
{
- kvfree(srq->wrid);
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
- hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
- kfree(srq->idx_que.bitmap);
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
- hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
+ kfree(srq->wrid);
+ srq->wrid = NULL;
}
int hns_roce_create_srq(struct ib_srq *ib_srq,
@@ -365,8 +283,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
struct hns_roce_ib_create_srq_resp resp = {};
struct hns_roce_srq *srq = to_hr_srq(ib_srq);
- int srq_desc_size;
- int srq_buf_size;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_srq ucmd = {};
int ret = 0;
u32 cqn;
@@ -379,43 +297,47 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
spin_lock_init(&srq->lock);
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
- srq->max_gs = init_attr->attr.max_sge;
-
- srq_desc_size = roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
- HNS_ROCE_SGE_SIZE * srq->max_gs));
-
- srq->wqe_shift = ilog2(srq_desc_size);
-
- srq_buf_size = srq->wqe_cnt * srq_desc_size;
-
- srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
- srq->idx_que.buf_size = srq->wqe_cnt * srq->idx_que.entry_sz;
- srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
- srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
+ srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
if (udata) {
- ret = create_user_srq(srq, udata, srq_buf_size);
+ ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (ret) {
- dev_err(hr_dev->dev, "Create user srq failed\n");
- goto err_srq;
+ ibdev_err(ibdev, "Failed to copy SRQ udata, err %d\n",
+ ret);
+ return ret;
}
- } else {
- ret = create_kernel_srq(srq, srq_buf_size);
+ }
+
+ ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc SRQ buffer, err %d\n", ret);
+ return ret;
+ }
+
+ ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc SRQ idx, err %d\n", ret);
+ goto err_buf_alloc;
+ }
+
+ if (!udata) {
+ ret = alloc_srq_wrid(hr_dev, srq);
if (ret) {
- dev_err(hr_dev->dev, "Create kernel srq failed\n");
- goto err_srq;
+ ibdev_err(ibdev, "Failed to alloc SRQ wrid, err %d\n",
+ ret);
+ goto err_idx_alloc;
}
}
cqn = ib_srq_has_cq(init_attr->srq_type) ?
to_hr_cq(init_attr->ext.cq)->cqn : 0;
-
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
- ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(ib_srq->pd)->pdn, cqn, 0,
- &srq->mtt, 0, srq);
- if (ret)
- goto err_wrid;
+ ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc SRQ context, err %d\n", ret);
+ goto err_wrid_alloc;
+ }
srq->event = hns_roce_ib_srq_event;
resp.srqn = srq->srqn;
@@ -431,15 +353,13 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
return 0;
err_srqc_alloc:
- hns_roce_srq_free(hr_dev, srq);
-
-err_wrid:
- if (udata)
- destroy_user_srq(hr_dev, srq);
- else
- destroy_kernel_srq(hr_dev, srq, srq_buf_size);
-
-err_srq:
+ free_srqc(hr_dev, srq);
+err_wrid_alloc:
+ free_srq_wrid(srq);
+err_idx_alloc:
+ free_srq_idx(hr_dev, srq);
+err_buf_alloc:
+ free_srq_buf(hr_dev, srq);
return ret;
}
@@ -448,18 +368,10 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- hns_roce_srq_free(hr_dev, srq);
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
- if (udata) {
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
- } else {
- kvfree(srq->wrid);
- hns_roce_buf_free(hr_dev, srq->wqe_cnt << srq->wqe_shift,
- &srq->buf);
- }
- ib_umem_release(srq->idx_que.umem);
- ib_umem_release(srq->umem);
+ free_srqc(hr_dev, srq);
+ free_srq_idx(hr_dev, srq);
+ free_srq_wrid(srq);
+ free_srq_buf(hr_dev, srq);
}
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 3c62c9327a9c..49d92638e0db 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -382,15 +382,6 @@ static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)
}
/**
- * to_iwmr_from_ibfmr - get device memory region
- * @ibfmr: ib fmr
- **/
-static inline struct i40iw_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct i40iw_mr, ibfmr);
-}
-
-/**
* to_iwmw - get device memory window
* @ibmw: ib memory window
**/
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index bb78d3280acc..fa7a5ff498c7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1987,7 +1987,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
struct rtable *rt;
struct neighbour *neigh;
int rc = arpindex;
- struct net_device *netdev = iwdev->netdev;
__be32 dst_ipaddr = htonl(dst_ip);
__be32 src_ipaddr = htonl(src_ip);
@@ -1997,9 +1996,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
return rc;
}
- if (netif_is_bond_slave(netdev))
- netdev = netdev_master_upper_dev_get(netdev);
-
neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rcu_read_lock();
@@ -2065,7 +2061,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
{
struct neighbour *neigh;
int rc = arpindex;
- struct net_device *netdev = iwdev->netdev;
struct dst_entry *dst;
struct sockaddr_in6 dst_addr;
struct sockaddr_in6 src_addr;
@@ -2086,9 +2081,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
return rc;
}
- if (netif_is_bond_slave(netdev))
- netdev = netdev_master_upper_dev_get(netdev);
-
neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
rcu_read_lock();
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 55a1fbf0e670..ae8b97c30665 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
int arp_index;
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
- if (arp_index == -1)
+ if (arp_index < 0)
return;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
if (!cqp_request)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 1b6fb1380961..19af29a48c55 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -83,7 +83,6 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE;
- props->max_map_per_fmr = 1;
props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 3a413752ccc3..331bc21cbcc7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -89,7 +89,6 @@ struct i40iw_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
- struct ib_fmr ibfmr;
};
struct ib_umem *region;
u16 type;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 02a169f8027b..5f8f8d5c0ce0 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -141,10 +141,11 @@ static int create_iboe_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
return 0;
}
-int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
-
+int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
return -EINVAL;
@@ -167,12 +168,14 @@ int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
int slave_sgid_index, u8 *s_mac, u16 vlan_tag)
{
struct rdma_ah_attr slave_attr = *ah_attr;
+ struct rdma_ah_init_attr init_attr = {};
struct mlx4_ib_ah *mah = to_mah(ah);
int ret;
slave_attr.grh.sgid_attr = NULL;
slave_attr.grh.sgid_index = slave_sgid_index;
- ret = mlx4_ib_create_ah(ah, &slave_attr, 0, NULL);
+ init_attr.ah_attr = &slave_attr;
+ ret = mlx4_ib_create_ah(ah, &init_attr, NULL);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 275722cec8c6..816d28854a8e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -558,7 +558,6 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
props->max_ah = INT_MAX;
@@ -2600,13 +2599,6 @@ static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
.modify_wq = mlx4_ib_modify_wq,
};
-static const struct ib_device_ops mlx4_ib_dev_fmr_ops = {
- .alloc_fmr = mlx4_ib_fmr_alloc,
- .dealloc_fmr = mlx4_ib_fmr_dealloc,
- .map_phys_fmr = mlx4_ib_map_phys_fmr,
- .unmap_fmr = mlx4_ib_unmap_fmr,
-};
-
static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
.alloc_mw = mlx4_ib_alloc_mw,
.dealloc_mw = mlx4_ib_dealloc_mw,
@@ -2724,9 +2716,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
}
- if (!mlx4_is_slave(ibdev->dev))
- ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fmr_ops);
-
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
ibdev->ib_dev.uverbs_cmd_mask |=
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index d188573187fa..6f4ea1067095 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -146,11 +146,6 @@ struct mlx4_ib_mw {
struct mlx4_mw mmw;
};
-struct mlx4_ib_fmr {
- struct ib_fmr ibfmr;
- struct mlx4_fmr mfmr;
-};
-
#define MAX_REGS_PER_FLOW 2
struct mlx4_flow_reg_id {
@@ -679,11 +674,6 @@ static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
return container_of(ibmw, struct mlx4_ib_mw, ibmw);
}
-static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
-}
-
static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
{
return container_of(ibflow, struct mlx4_ib_flow, ibflow);
@@ -752,7 +742,7 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
-int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
@@ -794,12 +784,6 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
- u64 iova);
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
-int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props, int netw_view);
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index b0121c90c561..e2fb71b23c80 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -698,99 +698,6 @@ err_free:
return ERR_PTR(err);
}
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
- struct ib_fmr_attr *fmr_attr)
-{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_fmr *fmr;
- int err = -ENOMEM;
-
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
- fmr_attr->max_pages, fmr_attr->max_maps,
- fmr_attr->page_shift, &fmr->mfmr);
- if (err)
- goto err_free;
-
- err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
- if (err)
- goto err_mr;
-
- fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
-
- return &fmr->ibfmr;
-
-err_mr:
- (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
-
-err_free:
- kfree(fmr);
-
- return ERR_PTR(err);
-}
-
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int npages, u64 iova)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
-
- return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
- &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
-}
-
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *ibfmr;
- int err;
- struct mlx4_dev *mdev = NULL;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- if (mdev && to_mdev(ibfmr->device)->dev != mdev)
- return -EINVAL;
- mdev = to_mdev(ibfmr->device)->dev;
- }
-
- if (!mdev)
- return 0;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
-
- mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
- }
-
- /*
- * Make sure all MPT status updates are visible before issuing
- * SYNC_TPT firmware command.
- */
- wmb();
-
- err = mlx4_SYNC_TPT(mdev);
- if (err)
- pr_warn("SYNC_TPT error %d when "
- "unmapping FMRs\n", err);
-
- return 0;
-}
-
-int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
- int err;
-
- err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
-
- if (!err)
- kfree(ifmr);
-
- return err;
-}
-
static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2f9f78912267..cf51e3cbd969 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
int send_size;
int header_size;
int spc;
+ int err;
int i;
if (wr->wr.opcode != IB_WR_SEND)
@@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
sqp->ud_header.lrh.virtual_lane = 0;
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
- ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+ if (err)
+ return err;
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
@@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
}
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num)
- ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
+ &pkey);
else
- ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
+ &pkey);
+ if (err)
+ return err;
+
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 2a334800f109..8cca61c671f8 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,11 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
+obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
+
+mlx5_ib-y := ah.o \
+ cmd.o \
+ cong.o \
+ cq.o \
+ doorbell.o \
+ gsi.o \
+ ib_virt.o \
+ mad.o \
+ main.o \
+ mem.o \
+ mr.o \
+ qp.o \
+ qpc.o \
+ restrack.o \
+ srq.o \
+ srq_cmd.o \
+ wr.o
-mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
- srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
- cong.o restrack.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
-mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
-mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o
-mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += qos.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \
+ flow.o \
+ qos.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 80642dd359bc..59e5ec39b447 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -32,9 +32,28 @@
#include "mlx5_ib.h"
+static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
+ const struct rdma_ah_attr *ah_attr)
+{
+ enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type;
+ __be16 sport;
+
+ if ((gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
+ (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK))
+ sport = cpu_to_be16(
+ rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
+ else
+ sport = mlx5_get_roce_udp_sport_min(dev,
+ ah_attr->grh.sgid_attr);
+
+ return sport;
+}
+
static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
- struct rdma_ah_attr *ah_attr)
+ struct rdma_ah_init_attr *init_attr)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
enum ib_gid_type gid_type;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
@@ -51,12 +70,15 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ if (init_attr->xmit_slave)
+ ah->xmit_port =
+ mlx5_lag_get_slave_port(dev->mdev,
+ init_attr->xmit_slave);
gid_type = ah_attr->grh.sgid_attr->gid_type;
memcpy(ah->av.rmac, ah_attr->roce.dmac,
sizeof(ah_attr->roce.dmac));
- ah->av.udp_sport =
- mlx5_get_roce_udp_sport(dev, ah_attr->grh.sgid_attr);
+ ah->av.udp_sport = mlx5_ah_get_udp_sport(dev, ah_attr);
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#define MLX5_ECN_ENABLED BIT(1)
@@ -68,10 +90,11 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
}
}
-int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
struct mlx5_ib_ah *ah = to_mah(ibah);
struct mlx5_ib_dev *dev = to_mdev(ibah->device);
enum rdma_ah_attr_type ah_type = ah_attr->type;
@@ -97,7 +120,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
return err;
}
- create_ib_ah(dev, ah, ah_attr);
+ create_ib_ah(dev, ah, init_attr);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 4c26492ab8a3..cc24c711e92a 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -1,46 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
*/
#include "cmd.h"
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
{
- u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
int err;
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
if (!err)
*mkey = MLX5_GET(query_special_contexts_out, out,
dump_fill_mkey);
@@ -50,12 +23,12 @@ int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
int err;
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
if (!err)
*null_mkey = MLX5_GET(query_special_contexts_out, out,
null_mkey);
@@ -63,23 +36,15 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
}
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
- void *out, int out_size)
+ void *out)
{
- u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
+ u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
MLX5_SET(query_cong_params_in, in, opcode,
MLX5_CMD_OP_QUERY_CONG_PARAMS);
MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
-int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
- void *in, int in_size)
-{
- u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
-
- return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
+ return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
}
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
@@ -133,7 +98,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
MLX5_SET64(alloc_memic_in, in, range_start_addr,
hw_start_addr + (page_idx * PAGE_SIZE));
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
if (ret) {
spin_lock(&dm->lock);
bitmap_clear(dm->memic_alloc_pages,
@@ -162,8 +127,7 @@ void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
struct mlx5_core_dev *dev = dm->dev;
u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
- u32 out[MLX5_ST_SZ_DW(dealloc_memic_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
u64 start_page_idx;
int err;
@@ -174,7 +138,7 @@ void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
MLX5_SET(dealloc_memic_in, in, memic_size, length);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
if (err)
return;
@@ -198,49 +162,46 @@ int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
MLX5_SET(destroy_tir_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tir, in);
}
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
MLX5_SET(destroy_tis_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tis, in);
}
void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
MLX5_SET(destroy_rqt_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_rqt, in);
}
int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
int err;
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
MLX5_SET(alloc_transport_domain_in, in, uid, uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@@ -251,32 +212,29 @@ int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
}
void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
MLX5_SET(dealloc_pd_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_pd, in);
}
int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {};
- u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
void *gid;
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
@@ -284,14 +242,13 @@ int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
MLX5_SET(attach_to_mcg_in, in, uid, uid);
gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
}
int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {};
- u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
void *gid;
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
@@ -299,18 +256,18 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
MLX5_SET(detach_from_mcg_in, in, uid, uid);
gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
}
int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
{
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
int err;
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
MLX5_SET(alloc_xrcd_in, in, uid, uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
if (!err)
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
@@ -318,30 +275,12 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
MLX5_SET(dealloc_xrcd_in, in, uid, uid);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
- u16 uid)
-{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
- int err;
-
- MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- MLX5_SET(alloc_q_counter_in, in, uid, uid);
-
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *counter_id = MLX5_GET(alloc_q_counter_out, out,
- counter_set_id);
- return err;
+ return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
}
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
@@ -367,7 +306,7 @@ int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
if (err)
goto out;
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 945ebce73613..f4d8558db434 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -40,10 +40,8 @@
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
- void *out, int out_size);
+ void *out);
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out);
-int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
- void *in, int in_size);
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
u64 length, u32 alignment);
void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
@@ -61,8 +59,6 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid);
int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
-int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
- u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index de4da92b81a6..b9291e482428 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -290,7 +290,7 @@ static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
node = mlx5_ib_param_to_node(offset);
- err = mlx5_cmd_query_cong_params(mdev, node, out, outlen);
+ err = mlx5_cmd_query_cong_params(mdev, node, out);
if (err)
goto free;
@@ -339,7 +339,7 @@ static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
attr_mask);
- err = mlx5_cmd_modify_cong_params(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(dev->mdev, modify_cong_params, in);
kvfree(in);
alloc_err:
mlx5_ib_put_native_port_mdev(dev, port_num + 1);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 146ba2966744..0c18cb6a2f14 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -36,6 +36,7 @@
#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
#include "srq.h"
+#include "qp.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
{
@@ -201,7 +202,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
@@ -213,12 +214,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
break;
}
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
@@ -226,7 +227,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
if (unlikely(is_qp1(qp->ibqp.qp_type))) {
- u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
+ u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
&wc->pkey_index);
@@ -484,7 +485,7 @@ repoll:
* because CQs will be locked while QPs are removed
* from the table.
*/
- mqp = __mlx5_qp_lookup(dev->mdev, qpn);
+ mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
*cur_qp = to_mibqp(mqp);
}
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 46e1ab771f10..9454a66c12cc 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -14,6 +14,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
+#include "qp.h"
#include <linux/xarray.h>
#define UVERBS_MODULE_NAME mlx5_ib
@@ -494,6 +495,10 @@ static u64 devx_get_obj_id(const void *in)
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(rst2init_qp_in, in, qpn));
break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2init_qp_in, in, qpn));
+ break;
case MLX5_CMD_OP_INIT2RTR_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(init2rtr_qp_in, in, qpn));
@@ -614,7 +619,7 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
enum ib_qp_type qp_type = qp->ibqp.qp_type;
if (qp_type == IB_QPT_RAW_PACKET ||
- (qp->flags & MLX5_IB_QP_UNDERLAY)) {
+ (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
&qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
@@ -819,6 +824,7 @@ static bool devx_is_obj_modify_cmd(const void *in)
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
case MLX5_CMD_OP_RST2INIT_QP:
case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
@@ -1356,7 +1362,7 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
}
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
- ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
+ ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
else
@@ -1450,9 +1456,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (opcode == MLX5_CMD_OP_CREATE_DCT) {
obj->flags |= DEVX_OBJ_FLAGS_DCT;
- err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
- cmd_in, cmd_in_len,
- cmd_out, cmd_out_len);
+ err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
+ cmd_in_len, cmd_out, cmd_out_len);
} else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
obj->flags |= DEVX_OBJ_FLAGS_CQ;
obj->core_cq.comp = devx_cq_comp;
@@ -1499,7 +1504,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
obj_destroy:
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
- mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
+ mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
else
@@ -2217,14 +2222,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
obj->mdev = dev->mdev;
uobj->object = obj;
devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
- err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
- if (err)
- goto err_umem_destroy;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
- return 0;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
+ sizeof(obj_id));
+ return err;
-err_umem_destroy:
- mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
err_umem_release:
ib_umem_release(obj->umem);
err_obj_free:
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index 862b7bf3e646..216a1108ad34 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -67,46 +67,41 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
},
};
-#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
-static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
- struct uverbs_attr_bundle *attrs)
+static int get_dests(struct uverbs_attr_bundle *attrs,
+ struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id,
+ int *dest_type, struct ib_qp **qp, u32 *flags)
{
- struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
- struct mlx5_ib_flow_handler *flow_handler;
- struct mlx5_ib_flow_matcher *fs_matcher;
- struct ib_uobject **arr_flow_actions;
- struct ib_uflow_resources *uflow_res;
- struct mlx5_flow_act flow_act = {};
- void *devx_obj;
- int dest_id, dest_type;
- void *cmd_in;
- int inlen;
bool dest_devx, dest_qp;
- struct ib_qp *qp = NULL;
- struct ib_uobject *uobj =
- uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
- struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
- int len, ret, i;
- u32 counter_id = 0;
- u32 *offset_attr;
- u32 offset = 0;
-
- if (!capable(CAP_NET_RAW))
- return -EPERM;
+ void *devx_obj;
+ int err;
- dest_devx =
- uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ dest_devx = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
dest_qp = uverbs_attr_is_valid(attrs,
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
- fs_matcher = uverbs_attr_get_obj(attrs,
- MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS &&
- ((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
+ *flags = 0;
+ err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS |
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP);
+ if (err)
+ return err;
+
+ /* Both flags are not allowed */
+ if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS &&
+ *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
return -EINVAL;
- /* Allow only DEVX object as dest when inserting to FDB */
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx)
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
+ if (dest_devx && (dest_qp || *flags))
+ return -EINVAL;
+ else if (dest_qp && *flags)
+ return -EINVAL;
+ }
+
+ /* Allow only DEVX object, drop as dest for FDB */
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx ||
+ (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
return -EINVAL;
/* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
@@ -114,43 +109,86 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
return -EINVAL;
+ *qp = NULL;
if (dest_devx) {
- devx_obj = uverbs_attr_get_obj(
- attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
- if (IS_ERR(devx_obj))
- return PTR_ERR(devx_obj);
+ devx_obj =
+ uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
/* Verify that the given DEVX object is a flow
* steering destination.
*/
- if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
+ if (!mlx5_ib_devx_is_flow_dest(devx_obj, dest_id, dest_type))
return -EINVAL;
/* Allow only flow table as dest when inserting to FDB or RDMA_RX */
if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
- dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
return -EINVAL;
} else if (dest_qp) {
struct mlx5_ib_qp *mqp;
- qp = uverbs_attr_get_obj(attrs,
- MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
+ *qp = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+ if (IS_ERR(*qp))
+ return PTR_ERR(*qp);
- if (qp->qp_type != IB_QPT_RAW_PACKET)
+ if ((*qp)->qp_type != IB_QPT_RAW_PACKET)
return -EINVAL;
- mqp = to_mqp(qp);
- if (mqp->flags & MLX5_IB_QP_RSS)
- dest_id = mqp->rss_qp.tirn;
+ mqp = to_mqp(*qp);
+ if (mqp->is_rss)
+ *dest_id = mqp->rss_qp.tirn;
else
- dest_id = mqp->raw_packet_qp.rq.tirn;
- dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- } else {
- dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ *dest_id = mqp->raw_packet_qp.rq.tirn;
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
}
+ if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
+static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_flow_context flow_context = {.flow_tag =
+ MLX5_FS_DEFAULT_FLOW_TAG};
+ u32 *offset_attr, offset = 0, counter_id = 0;
+ int dest_id, dest_type, inlen, len, ret, i;
+ struct mlx5_ib_flow_handler *flow_handler;
+ struct mlx5_ib_flow_matcher *fs_matcher;
+ struct ib_uobject **arr_flow_actions;
+ struct ib_uflow_resources *uflow_res;
+ struct mlx5_flow_act flow_act = {};
+ struct ib_qp *qp = NULL;
+ void *devx_obj, *cmd_in;
+ struct ib_uobject *uobj;
+ struct mlx5_ib_dev *dev;
+ u32 flags;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
+ dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+
+ if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags))
+ return -EINVAL;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
if (len) {
@@ -180,10 +218,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
- if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
- fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
- return -EINVAL;
-
cmd_in = uverbs_attr_get_alloced_ptr(
attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
inlen = uverbs_attr_get_len(attrs,
@@ -404,7 +438,10 @@ static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
{
return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
max_modify_header_actions) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions);
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ max_modify_header_actions) ||
+ MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ max_modify_header_actions);
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
@@ -427,7 +464,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
num_actions = uverbs_attr_ptr_get_array_size(
attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
- MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto));
+ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto));
if (num_actions < 0)
return num_actions;
@@ -626,7 +663,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
UA_OPTIONAL,
- UA_ALLOC_AND_COPY));
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ enum mlx5_ib_create_flow_flags,
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
@@ -648,7 +688,7 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
- set_action_in_add_action_in_auto)),
+ set_add_copy_action_in_auto)),
UA_MANDATORY,
UA_ALLOC_AND_COPY),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 1ae6fd95acaa..40d418153891 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -119,17 +119,15 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
struct mlx5_ib_gsi_qp *gsi;
struct ib_qp_init_attr hw_init_attr = *init_attr;
const u8 port_num = init_attr->port_num;
- const int num_pkeys = pd->device->attrs.max_pkeys;
- const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
+ int num_qps = 0;
int ret;
- mlx5_ib_dbg(dev, "creating GSI QP\n");
-
- if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
- mlx5_ib_warn(dev,
- "invalid port number %d during GSI QP creation\n",
- port_num);
- return ERR_PTR(-EINVAL);
+ if (mlx5_ib_deth_sqpn_cap(dev)) {
+ if (MLX5_CAP_GEN(dev->mdev,
+ port_type) == MLX5_CAP_PORT_TYPE_IB)
+ num_qps = pd->device->attrs.max_pkeys;
+ else if (dev->lag_active)
+ num_qps = MLX5_MAX_PORTS;
}
gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
@@ -270,7 +268,7 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
}
static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
- u16 qp_index)
+ u16 pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct ib_qp_attr attr;
@@ -279,7 +277,7 @@ static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
attr.qp_state = IB_QPS_INIT;
- attr.pkey_index = qp_index;
+ attr.pkey_index = pkey_index;
attr.qkey = IB_QP1_QKEY;
attr.port_num = gsi->port_num;
ret = ib_modify_qp(qp, &attr, mask);
@@ -313,12 +311,17 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
{
struct ib_device *device = gsi->rx_qp->device;
struct mlx5_ib_dev *dev = to_mdev(device);
+ int pkey_index = qp_index;
+ struct mlx5_ib_qp *mqp;
struct ib_qp *qp;
unsigned long flags;
u16 pkey;
int ret;
- ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ pkey_index = 0;
+
+ ret = ib_query_pkey(device, gsi->port_num, pkey_index, &pkey);
if (ret) {
mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
gsi->port_num, qp_index);
@@ -347,7 +350,10 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
return;
}
- ret = modify_to_rts(gsi, qp, qp_index);
+ mqp = to_mqp(qp);
+ if (dev->lag_active)
+ mqp->gsi_lag_port = qp_index + 1;
+ ret = modify_to_rts(gsi, qp, pkey_index);
if (ret)
goto err_destroy_qp;
@@ -466,11 +472,15 @@ static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
{
struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ struct mlx5_ib_ah *ah = to_mah(wr->ah);
int qp_index = wr->pkey_index;
- if (!mlx5_ib_deth_sqpn_cap(dev))
+ if (!gsi->num_qps)
return gsi->rx_qp;
+ if (dev->lag_active && ah->xmit_port)
+ qp_index = ah->xmit_port - 1;
+
if (qp_index >= gsi->num_qps)
return NULL;
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index 3b6750cba796..5b30d3fa8f8d 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -9,9 +9,9 @@
#include <linux/mlx5/eswitch.h>
#include "mlx5_ib.h"
-#ifdef CONFIG_MLX5_ESWITCH
extern const struct mlx5_ib_profile raw_eth_profile;
+#ifdef CONFIG_MLX5_ESWITCH
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
u16 vport_num);
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index b61165359954..46b2d370fb3f 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -134,7 +134,7 @@ int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
if (!out)
return -ENOMEM;
- err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz);
+ err = mlx5_core_query_vport_counter(mdev, true, vf, port, out);
if (err)
goto ex;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 14e0c17de6a9..454ce5de2de7 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/vport.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
@@ -188,8 +187,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
goto done;
}
- err = mlx5_core_query_vport_counter(mdev, 0, 0,
- mdev_port_num, out_cnt, sz);
+ err = mlx5_core_query_vport_counter(mdev, 0, 0, mdev_port_num,
+ out_cnt);
if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
} else {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6679756506e6..343a8b8361e7 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -53,12 +53,15 @@
#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
+#include <rdma/lag.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
#include "mlx5_ib.h"
#include "ib_rep.h"
#include "cmd.h"
#include "srq.h"
+#include "qp.h"
+#include "wr.h"
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
@@ -69,17 +72,10 @@
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
-#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "5.0-0"
-
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
MODULE_LICENSE("Dual BSD/GPL");
-static char mlx5_version[] =
- DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
- DRIVER_VERSION "\n";
-
struct mlx5_ib_event_work {
struct work_struct work;
union {
@@ -627,8 +623,8 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
attr->index, NULL, NULL);
}
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
- const struct ib_gid_attr *attr)
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr)
{
if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
return 0;
@@ -1003,7 +999,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
props->max_ah = INT_MAX;
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
@@ -1963,6 +1958,9 @@ uar_done:
resp.response_length += sizeof(resp.dump_fill_mkey);
}
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp.comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_mdev;
@@ -1973,7 +1971,7 @@ uar_done:
context->lib_caps = req.lib_caps;
print_lib_caps(dev, context->lib_caps);
- if (dev->lag_active) {
+ if (mlx5_ib_lag_should_assign_affinity(dev)) {
u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity,
@@ -2443,7 +2441,7 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
act_size = roundup_pow_of_two(act_size);
dm->size = act_size;
- err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
+ err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment,
to_mucontext(ctx)->devx_uid, &dm->dev_addr,
&dm->icm_dm.obj_id);
if (err)
@@ -2560,7 +2558,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct mlx5_ib_alloc_pd_resp resp;
int err;
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
u16 uid = 0;
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
@@ -2568,8 +2566,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
uid = context ? context->devx_uid : 0;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid);
- err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
- out, sizeof(out));
+ err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
if (err)
return err;
@@ -3697,12 +3694,13 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!dest_num)
rule_dst = NULL;
} else {
+ if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)
+ flow_act.action |=
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
if (is_egress)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- else
- flow_act.action |=
- dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
- MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ else if (dest_num)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) &&
@@ -3746,30 +3744,6 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
}
-static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
- struct mlx5_ib_flow_prio *ft_prio,
- struct ib_flow_attr *flow_attr,
- struct mlx5_flow_destination *dst)
-{
- struct mlx5_ib_flow_handler *handler_dst = NULL;
- struct mlx5_ib_flow_handler *handler = NULL;
-
- handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
- if (!IS_ERR(handler)) {
- handler_dst = create_flow_rule(dev, ft_prio,
- flow_attr, dst);
- if (IS_ERR(handler_dst)) {
- mlx5_del_flow_rules(handler->rule);
- ft_prio->refcount--;
- kfree(handler);
- handler = handler_dst;
- } else {
- list_add(&handler_dst->list, &handler->list);
- }
- }
-
- return handler;
-}
enum {
LEFTOVERS_MC,
LEFTOVERS_UC,
@@ -3966,22 +3940,18 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
} else {
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- if (mqp->flags & MLX5_IB_QP_RSS)
+ if (mqp->is_rss)
dst->tir_num = mqp->rss_qp.tirn;
else
dst->tir_num = mqp->raw_packet_qp.rq.tirn;
}
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
- handler = create_dont_trap_rule(dev, ft_prio,
- flow_attr, dst);
- } else {
- underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
- mqp->underlay_qpn : 0;
- handler = _create_flow_rule(dev, ft_prio, flow_attr,
- dst, underlay_qpn, ucmd);
- }
+ underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ?
+ mqp->underlay_qpn :
+ 0;
+ handler = _create_flow_rule(dev, ft_prio, flow_attr, dst,
+ underlay_qpn, ucmd);
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
handler = create_leftovers_rule(dev, ft_prio, flow_attr,
@@ -4225,18 +4195,17 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
dst[dst_num].type = dest_type;
- dst[dst_num].tir_num = dest_id;
+ dst[dst_num++].tir_num = dest_id;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
- dst[dst_num].ft_num = dest_id;
+ dst[dst_num++].ft_num = dest_id;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- } else {
- dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
+ dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
}
- dst_num++;
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -4446,7 +4415,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
uid = ibqp->pd ?
to_mpd(ibqp->pd)->uid : 0;
- if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
+ if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
return -EOPNOTSUPP;
}
@@ -4632,8 +4601,7 @@ static void delay_drop_handler(struct work_struct *work)
atomic_inc(&delay_drop->events_cnt);
mutex_lock(&delay_drop->lock);
- err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
- delay_drop->timeout);
+ err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
if (err) {
mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
delay_drop->timeout);
@@ -5439,15 +5407,21 @@ static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
int num_cnt_ports;
int i;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+
for (i = 0; i < num_cnt_ports; i++) {
- if (dev->port[i].cnts.set_id_valid)
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
+ if (dev->port[i].cnts.set_id) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ dev->port[i].cnts.set_id);
+ mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+ }
kfree(dev->port[i].cnts.names);
kfree(dev->port[i].cnts.offsets);
}
@@ -5556,11 +5530,14 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
int num_cnt_ports;
int err = 0;
int i;
bool is_shared;
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
@@ -5572,17 +5549,19 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
dev->port[i].cnts.offsets);
- err = mlx5_cmd_alloc_q_counter(dev->mdev,
- &dev->port[i].cnts.set_id,
- is_shared ?
- MLX5_SHARED_RESOURCE_UID : 0);
+ MLX5_SET(alloc_q_counter_in, in, uid,
+ is_shared ? MLX5_SHARED_RESOURCE_UID : 0);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
if (err) {
mlx5_ib_warn(dev,
"couldn't allocate queue counter for port %d, err %d\n",
i + 1, err);
goto err_alloc;
}
- dev->port[i].cnts.set_id_valid = true;
+
+ dev->port[i].cnts.set_id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
return 0;
@@ -5638,27 +5617,23 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
struct rdma_hw_stats *stats,
u16 set_id)
{
- int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
- void *out;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
__be32 val;
int ret, i;
- out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, set_id);
+ ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out);
if (ret)
- goto free;
+ return ret;
for (i = 0; i < cnts->num_q_counters; i++) {
- val = *(__be32 *)(out + cnts->offsets[i]);
+ val = *(__be32 *)((void *)out + cnts->offsets[i]);
stats->value[i] = (u64)be32_to_cpu(val);
}
-free:
- kvfree(out);
- return ret;
+ return 0;
}
static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
@@ -5765,20 +5740,38 @@ static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
counter->stats, counter->id);
}
+static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+
+ if (!counter->id)
+ return 0;
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id);
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+}
+
static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
- u16 cnt_set_id = 0;
int err;
if (!counter->id) {
- err = mlx5_cmd_alloc_q_counter(dev->mdev,
- &cnt_set_id,
- MLX5_SHARED_RESOURCE_UID);
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
+
+ MLX5_SET(alloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ MLX5_SET(alloc_q_counter_in, in, uid, MLX5_SHARED_RESOURCE_UID);
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
if (err)
return err;
- counter->id = cnt_set_id;
+ counter->id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
err = mlx5_ib_qp_set_counter(qp, counter);
@@ -5788,7 +5781,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return 0;
fail_set_counter:
- mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id);
+ mlx5_ib_counter_dealloc(counter);
counter->id = 0;
return err;
@@ -5799,13 +5792,6 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
return mlx5_ib_qp_set_counter(qp, NULL);
}
-static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
-{
- struct mlx5_ib_dev *dev = to_mdev(counter->device);
-
- return mlx5_core_dealloc_q_counter(dev->mdev, counter->id);
-}
-
static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params)
@@ -6203,26 +6189,20 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
mmap_offset = mlx5_entry_to_mmap_offset(entry);
length = entry->rdma_entry.npages * PAGE_SIZE;
uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
&mmap_offset, sizeof(mmap_offset));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
&entry->page_idx, sizeof(entry->page_idx));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
&length, sizeof(length));
- if (err)
- goto err;
-
- return 0;
-
-err:
- rdma_user_mmap_entry_remove(&entry->rdma_entry);
return err;
}
@@ -6336,26 +6316,20 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
mmap_offset = mlx5_entry_to_mmap_offset(entry);
length = entry->rdma_entry.npages * PAGE_SIZE;
uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
&mmap_offset, sizeof(mmap_offset));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
&entry->page_idx, sizeof(entry->page_idx));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
&length, sizeof(length));
- if (err)
- goto err;
-
- return 0;
-
-err:
- rdma_user_mmap_entry_remove(&entry->rdma_entry);
return err;
}
@@ -6549,6 +6523,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
dev->ib_dev.dev.parent = mdev->device;
+ dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
@@ -6638,8 +6613,8 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.modify_qp = mlx5_ib_modify_qp,
.modify_srq = mlx5_ib_modify_srq,
.poll_cq = mlx5_ib_poll_cq,
- .post_recv = mlx5_ib_post_recv,
- .post_send = mlx5_ib_post_send,
+ .post_recv = mlx5_ib_post_recv_nodrain,
+ .post_send = mlx5_ib_post_send_nodrain,
.post_srq_recv = mlx5_ib_post_srq_recv,
.process_mad = mlx5_ib_process_mad,
.query_ah = mlx5_ib_query_ah,
@@ -7140,6 +7115,8 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
int err;
int i;
+ dev->profile = profile;
+
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) {
err = profile->stage[i].init(dev);
@@ -7148,7 +7125,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
}
}
- dev->profile = profile;
dev->ib_active = true;
return dev;
@@ -7175,6 +7151,9 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_roce_init,
mlx5_ib_stage_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
@@ -7232,6 +7211,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_raw_eth_roce_init,
mlx5_ib_stage_raw_eth_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
@@ -7316,8 +7298,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
int port_type_cap;
int num_ports;
- printk_once(KERN_INFO "%s", mlx5_version);
-
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
if (!mlx5_core_mp_enabled(mdev))
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a4e522385de0..5dbe3eb0d9cb 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -337,7 +337,6 @@ struct mlx5_ib_rwq {
struct ib_umem *umem;
size_t buf_size;
unsigned int page_shift;
- int create_type;
struct mlx5_db db;
u32 user_index;
u32 wqe_count;
@@ -346,17 +345,6 @@ struct mlx5_ib_rwq {
u32 create_flags; /* Use enum mlx5_ib_wq_flags */
};
-enum {
- MLX5_QP_USER,
- MLX5_QP_KERNEL,
- MLX5_QP_EMPTY
-};
-
-enum {
- MLX5_WQ_USER,
- MLX5_WQ_KERNEL
-};
-
struct mlx5_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_rwq_ind_tbl;
u32 rqtn;
@@ -443,34 +431,37 @@ struct mlx5_ib_qp {
/* serialize qp state modifications
*/
struct mutex mutex;
+ /* cached variant of create_flags from struct ib_qp_init_attr */
u32 flags;
u8 port;
u8 state;
- int wq_sig;
- int scat_cqe;
int max_inline_data;
struct mlx5_bf bf;
- int has_rq;
+ u8 has_rq:1;
+ u8 is_rss:1;
/* only for user space QPs. For kernel
* we have it from the bf object
*/
int bfregn;
- int create_type;
-
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
struct mlx5_rate_limit rl;
u32 underlay_qpn;
u32 flags_en;
- /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
- enum ib_qp_type qp_sub_type;
+ /*
+ * IB/core doesn't store low-level QP types, so
+ * store both MLX and IBTA types in the field below.
+ * IB_QPT_DRIVER will be break to DCI/DCT subtypes.
+ */
+ enum ib_qp_type type;
/* A flag to indicate if there's a new counter is configured
* but not take effective
*/
u32 counter_pending;
+ u16 gsi_lag_port;
};
struct mlx5_ib_cq_buf {
@@ -481,24 +472,6 @@ struct mlx5_ib_cq_buf {
int nent;
};
-enum mlx5_ib_qp_flags {
- MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
- MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
- MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
- MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
- MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
- MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
- /* QP uses 1 as its source QP number */
- MLX5_IB_QP_SQPN_QP1 = 1 << 6,
- MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
- MLX5_IB_QP_RSS = 1 << 8,
- MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
- MLX5_IB_QP_UNDERLAY = 1 << 10,
- MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
- MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
- MLX5_IB_QP_PACKET_BASED_CREDIT = 1 << 13,
-};
-
struct mlx5_umr_wr {
struct ib_send_wr wr;
u64 virt_addr;
@@ -702,12 +675,6 @@ struct umr_common {
struct semaphore sem;
};
-enum {
- MLX5_FMR_INVALID,
- MLX5_FMR_VALID,
- MLX5_FMR_BUSY,
-};
-
struct mlx5_cache_ent {
struct list_head head;
/* sync access to the cahce entry
@@ -780,7 +747,6 @@ struct mlx5_ib_counters {
u32 num_cong_counters;
u32 num_ext_ppcnt_counters;
u16 set_id;
- bool set_id_valid;
};
struct mlx5_ib_multiport_info;
@@ -870,6 +836,7 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_CAPS,
MLX5_IB_STAGE_NON_DEFAULT_CB,
MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_QP,
MLX5_IB_STAGE_SRQ,
MLX5_IB_STAGE_DEVICE_RESOURCES,
MLX5_IB_STAGE_DEVICE_NOTIFIER,
@@ -1065,6 +1032,7 @@ struct mlx5_ib_dev {
struct mlx5_dm dm;
u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
+ struct mlx5_qp_table qp_table;
struct mlx5_async_ctx async_ctx;
struct mlx5_devx_event_table devx_event_table;
struct mlx5_var_table var_table;
@@ -1180,7 +1148,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
-int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
@@ -1204,10 +1172,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx5_ib_drain_sq(struct ib_qp *qp);
void mlx5_ib_drain_rq(struct ib_qp *qp);
-int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
-int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
size_t buflen, size_t *bc);
int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
@@ -1283,8 +1247,6 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
-int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
-void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
unsigned long max_page_shift,
int *count, int *shift,
@@ -1382,8 +1344,8 @@ int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
- const struct ib_gid_attr *attr);
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr);
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
@@ -1580,4 +1542,11 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
int mlx5_ib_enable_driver(struct ib_device *dev);
int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
+
+static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
+{
+ return dev->lag_active ||
+ (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
+ MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
+}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a401931189b7..44683073be0c 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1439,6 +1439,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (is_odp_mr(mr)) {
to_ib_umem_odp(mr->umem)->private = mr;
+ init_waitqueue_head(&mr->q_deferred_work);
atomic_set(&mr->num_deferred_work, 0);
err = xa_err(xa_store(&dev->odp_mkeys,
mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3de7606d4a1a..7d2ec9ee5097 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -36,6 +36,7 @@
#include "mlx5_ib.h"
#include "cmd.h"
+#include "qp.h"
#include <linux/mlx5/eq.h>
@@ -446,8 +447,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
{
int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
pfault->wqe.wq_num : pfault->token;
- u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
- u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = { };
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
int err;
MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
@@ -456,7 +456,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
MLX5_SET(page_fault_resume_in, in, error, !!error);
- err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
if (err)
mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
wq_num, err);
@@ -1135,8 +1135,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
*wqe += sizeof(struct mlx5_wqe_xrc_seg);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->qp_sub_type == MLX5_IB_QPT_DCI) {
+ if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
av = *wqe;
if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av);
@@ -1189,7 +1188,7 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_wq *wq = &qp->rq;
int wqe_size = 1 << wq->wqe_shift;
- if (qp->wq_sig) {
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
return -EFAULT;
}
@@ -1219,7 +1218,7 @@ static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
case MLX5_WQE_PF_TYPE_RESP:
case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
- common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
+ common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
break;
default:
break;
diff --git a/drivers/infiniband/hw/mlx5/qos.c b/drivers/infiniband/hw/mlx5/qos.c
index cac878a70edb..dce92554142a 100644
--- a/drivers/infiniband/hw/mlx5/qos.c
+++ b/drivers/infiniband/hw/mlx5/qos.c
@@ -69,17 +69,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
if (err)
goto err;
- err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
- &pp_entry->index, sizeof(pp_entry->index));
- if (err)
- goto clean;
-
pp_entry->mdev = dev->mdev;
uobj->object = pp_entry;
- return 0;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ &pp_entry->index, sizeof(pp_entry->index));
+ return err;
-clean:
- mlx5_rl_remove_rate_raw(dev->mdev, pp_entry->index);
err:
kfree(pp_entry);
return err;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2210759843ba..81bf6b975e0e 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -39,9 +39,8 @@
#include "mlx5_ib.h"
#include "ib_rep.h"
#include "cmd.h"
-
-/* not supported currently */
-static int wq_signature;
+#include "qp.h"
+#include "wr.h"
enum {
MLX5_IB_ACK_REQ_FREQ = 8,
@@ -54,32 +53,6 @@ enum {
MLX5_IB_LINK_TYPE_ETH = 1
};
-enum {
- MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
-};
-
-static const u32 mlx5_ib_opcode[] = {
- [IB_WR_SEND] = MLX5_OPCODE_SEND,
- [IB_WR_LSO] = MLX5_OPCODE_LSO,
- [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
- [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
- [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
- [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
- [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
- [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
- [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
- [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
- [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
-};
-
-struct mlx5_wqe_eth_pad {
- u8 rsvd0[16];
-};
-
enum raw_qp_set_mask_map {
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
@@ -391,17 +364,26 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
} else {
+ int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE);
+
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
- if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+ if ((1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) <
+ wq_sig)
return -EINVAL;
- qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
- wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
+ wqe_size =
+ wq_sig ? sizeof(struct mlx5_wqe_signature_seg) :
+ 0;
wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
wqe_size = roundup_pow_of_two(wqe_size);
wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
@@ -415,7 +397,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
return -EINVAL;
}
qp->rq.wqe_shift = ilog2(wqe_size);
- qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
}
}
@@ -595,7 +580,7 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
}
if (attr->qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
} else {
@@ -750,10 +735,7 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI;
- case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
- case IB_QPT_RAW_PACKET:
- case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
- case IB_QPT_MAX:
+ case IB_QPT_RAW_PACKET: return MLX5_QP_ST_RAW_ETHERTYPE;
default: return -EINVAL;
}
}
@@ -890,7 +872,6 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
- rwq->create_type = MLX5_WQ_USER;
return 0;
err_umem:
@@ -905,15 +886,14 @@ static int adjust_bfregn(struct mlx5_ib_dev *dev,
bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
}
-static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_qp *qp, struct ib_udata *udata,
- struct ib_qp_init_attr *attr,
- u32 **in,
- struct mlx5_ib_create_qp_resp *resp, int *inlen,
- struct mlx5_ib_qp_base *base)
+static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp, struct ib_udata *udata,
+ struct ib_qp_init_attr *attr, u32 **in,
+ struct mlx5_ib_create_qp_resp *resp, int *inlen,
+ struct mlx5_ib_qp_base *base,
+ struct mlx5_ib_create_qp *ucmd)
{
struct mlx5_ib_ucontext *context;
- struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
int page_shift = 0;
int uar_index = 0;
@@ -927,30 +907,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u16 uid;
u32 uar_flags;
- err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return err;
- }
-
context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
ibucontext);
- uar_flags = ucmd.flags & (MLX5_QP_FLAG_UAR_PAGE_INDEX |
- MLX5_QP_FLAG_BFREG_INDEX);
+ uar_flags = qp->flags_en &
+ (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX);
switch (uar_flags) {
case MLX5_QP_FLAG_UAR_PAGE_INDEX:
- uar_index = ucmd.bfreg_index;
+ uar_index = ucmd->bfreg_index;
bfregn = MLX5_IB_INVALID_BFREG;
break;
case MLX5_QP_FLAG_BFREG_INDEX:
uar_index = bfregn_to_uar_index(dev, &context->bfregi,
- ucmd.bfreg_index, true);
+ ucmd->bfreg_index, true);
if (uar_index < 0)
return uar_index;
bfregn = MLX5_IB_INVALID_BFREG;
break;
case 0:
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
return -EINVAL;
bfregn = alloc_bfreg(dev, &context->bfregi);
if (bfregn < 0)
@@ -969,12 +943,12 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
- err = set_user_buf_size(dev, qp, &ucmd, base, attr);
+ err = set_user_buf_size(dev, qp, ucmd, base, attr);
if (err)
goto err_bfreg;
- if (ucmd.buf_addr && ubuffer->buf_size) {
- ubuffer->buf_addr = ucmd.buf_addr;
+ if (ucmd->buf_addr && ubuffer->buf_size) {
+ ubuffer->buf_addr = ucmd->buf_addr;
err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr,
ubuffer->buf_size, &ubuffer->umem,
&npages, &page_shift, &ncont, &offset);
@@ -992,8 +966,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
- uid = (attr->qp_type != IB_QPT_XRC_TGT &&
- attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
+ uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
MLX5_SET(create_qp_in, *in, uid, uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
@@ -1011,24 +984,14 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
resp->bfreg_index = MLX5_IB_INVALID_BFREG;
qp->bfregn = bfregn;
- err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &qp->db);
+ err = mlx5_ib_db_map_user(context, udata, ucmd->db_addr, &qp->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_free;
}
- err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- goto err_unmap;
- }
- qp->create_type = MLX5_QP_USER;
-
return 0;
-err_unmap:
- mlx5_ib_db_unmap_user(context, &qp->db);
-
err_free:
kvfree(*in);
@@ -1041,72 +1004,50 @@ err_bfreg:
return err;
}
-static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
- struct ib_udata *udata)
+static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_ib_qp_base *base, struct ib_udata *udata)
{
- struct mlx5_ib_ucontext *context =
- rdma_udata_to_drv_context(
- udata,
- struct mlx5_ib_ucontext,
- ibucontext);
-
- mlx5_ib_db_unmap_user(context, &qp->db);
- ib_umem_release(base->ubuffer.umem);
-
- /*
- * Free only the BFREGs which are handled by the kernel.
- * BFREGs of UARs allocated dynamically are handled by user.
- */
- if (qp->bfregn != MLX5_IB_INVALID_BFREG)
- mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
-}
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
-/* get_sq_edge - Get the next nearby edge.
- *
- * An 'edge' is defined as the first following address after the end
- * of the fragment or the SQ. Accordingly, during the WQE construction
- * which repetitively increases the pointer to write the next data, it
- * simply should check if it gets to an edge.
- *
- * @sq - SQ buffer.
- * @idx - Stride index in the SQ buffer.
- *
- * Return:
- * The new edge.
- */
-static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
-{
- void *fragment_end;
+ if (udata) {
+ /* User QP */
+ mlx5_ib_db_unmap_user(context, &qp->db);
+ ib_umem_release(base->ubuffer.umem);
- fragment_end = mlx5_frag_buf_get_wqe
- (&sq->fbc,
- mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
+ /*
+ * Free only the BFREGs which are handled by the kernel.
+ * BFREGs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
+ return;
+ }
- return fragment_end + MLX5_SEND_WQE_BB;
+ /* Kernel QP */
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
+ if (qp->db.db)
+ mlx5_db_free(dev->mdev, &qp->db);
+ if (qp->buf.frags)
+ mlx5_frag_buf_free(dev->mdev, &qp->buf);
}
-static int create_kernel_qp(struct mlx5_ib_dev *dev,
- struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_qp *qp,
- u32 **in, int *inlen,
- struct mlx5_ib_qp_base *base)
+static int _create_kernel_qp(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_qp *qp, u32 **in, int *inlen,
+ struct mlx5_ib_qp_base *base)
{
int uar_index;
void *qpc;
int err;
- if (init_attr->create_flags & ~(IB_QP_CREATE_INTEGRITY_EN |
- IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
- IB_QP_CREATE_IPOIB_UD_LSO |
- IB_QP_CREATE_NETIF_QP |
- MLX5_IB_QP_CREATE_SQPN_QP1 |
- MLX5_IB_QP_CREATE_WC_TEST))
- return -EINVAL;
-
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
qp->bf.bfreg = &dev->fp_bfreg;
- else if (init_attr->create_flags & MLX5_IB_QP_CREATE_WC_TEST)
+ else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
qp->bf.bfreg = &dev->wc_bfreg;
else
qp->bf.bfreg = &dev->bfreg;
@@ -1166,10 +1107,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
MLX5_SET(qpc, qpc, fre, 1);
MLX5_SET(qpc, qpc, rlky, 1);
- if (init_attr->create_flags & MLX5_IB_QP_CREATE_SQPN_QP1) {
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
MLX5_SET(qpc, qpc, deth_sqpn, 1);
- qp->flags |= MLX5_IB_QP_SQPN_QP1;
- }
mlx5_fill_page_frag_array(&qp->buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in,
@@ -1197,7 +1136,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
err = -ENOMEM;
goto err_wrid;
}
- qp->create_type = MLX5_QP_KERNEL;
return 0;
@@ -1217,36 +1155,15 @@ err_buf:
return err;
}
-static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
-{
- kvfree(qp->sq.wqe_head);
- kvfree(qp->sq.w_list);
- kvfree(qp->sq.wrid);
- kvfree(qp->sq.wr_data);
- kvfree(qp->rq.wrid);
- mlx5_db_free(dev->mdev, &qp->db);
- mlx5_frag_buf_free(dev->mdev, &qp->buf);
-}
-
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
- if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
- (attr->qp_type == MLX5_IB_QPT_DCI) ||
- (attr->qp_type == IB_QPT_XRC_INI))
+ if (attr->srq || (qp->type == IB_QPT_XRC_TGT) ||
+ (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI))
return MLX5_SRQ_RQ;
else if (!qp->has_rq)
return MLX5_ZERO_LEN_RQ;
- else
- return MLX5_NON_ZERO_RQ;
-}
-
-static int is_connected(enum ib_qp_type qp_type)
-{
- if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
- qp_type == MLX5_IB_QPT_DCI)
- return 1;
- return 0;
+ return MLX5_NON_ZERO_RQ;
}
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
@@ -1254,15 +1171,15 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq, u32 tdn,
struct ib_pd *pd)
{
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(tisc, tisc, transport_domain, tdn);
- if (qp->flags & MLX5_IB_QP_UNDERLAY)
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
- return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
+ return mlx5_core_create_tis(dev->mdev, in, &sq->tisn);
}
static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
@@ -1336,7 +1253,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
- err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp);
+ err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
kvfree(in);
@@ -1356,7 +1273,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{
destroy_flow_rule_vport_sq(sq);
- mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
+ mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp);
ib_umem_release(sq->ubuffer.umem);
}
@@ -1408,7 +1325,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
- if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
+ if (mqp->flags & IB_QP_CREATE_SCATTER_FCS)
MLX5_SET(rqc, rqc, scatter_fcs, 1);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
@@ -1426,7 +1343,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
memcpy(pas, qp_pas, rq_pas_size);
- err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
kvfree(in);
@@ -1436,14 +1353,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq)
{
- mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
-}
-
-static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
-{
- return (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
- MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
- MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
+ mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
}
static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
@@ -1459,9 +1369,8 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, u32 tdn,
- u32 *qp_flags_en,
- struct ib_pd *pd,
- u32 *out, int outlen)
+ u32 *qp_flags_en, struct ib_pd *pd,
+ u32 *out)
{
u8 lb_flag = 0;
u32 *in;
@@ -1494,9 +1403,8 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
-
- err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
-
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
rq->tirn = MLX5_GET(create_tir_out, out, tirn);
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
err = mlx5_ib_enable_lb(dev, false, true);
@@ -1525,6 +1433,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u16 uid = to_mpd(pd)->uid;
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
+ if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt)
+ return -EINVAL;
if (qp->sq.wqe_cnt) {
err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
if (err)
@@ -1548,17 +1458,16 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->rq.wqe_cnt) {
rq->base.container_mibqp = qp;
- if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
+ if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING)
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
- if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
if (err)
goto err_destroy_sq;
- err = create_raw_packet_qp_tir(
- dev, rq, tdn, &qp->flags_en, pd, out,
- MLX5_ST_SZ_BYTES(create_tir_out));
+ err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd,
+ out);
if (err)
goto err_destroy_rq;
@@ -1586,14 +1495,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
rq->base.mqp.qpn;
- err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
- if (err)
- goto err_destroy_tir;
-
return 0;
-err_destroy_tir:
- destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
err_destroy_rq:
destroy_raw_packet_qp_rq(dev, rq);
err_destroy_sq:
@@ -1645,14 +1548,27 @@ static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *q
to_mpd(qp->ibqp.pd)->uid);
}
-static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+struct mlx5_create_qp_params {
+ struct ib_udata *udata;
+ size_t inlen;
+ size_t outlen;
+ size_t ucmd_size;
+ void *ucmd;
+ u8 is_rss_raw : 1;
+ struct ib_qp_init_attr *attr;
+ u32 uidx;
+ struct mlx5_ib_create_qp_resp resp;
+};
+
+static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp_rss *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_ib_create_qp_resp resp = {};
int inlen;
int outlen;
int err;
@@ -1662,79 +1578,28 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
void *hfso;
u32 selected_fields = 0;
u32 outer_l4;
- size_t min_resp_len;
u32 tdn = mucontext->tdn;
- struct mlx5_ib_create_qp_rss ucmd = {};
- size_t required_cmd_sz;
u8 lb_flag = 0;
- if (init_attr->qp_type != IB_QPT_RAW_PACKET)
- return -EOPNOTSUPP;
-
- if (init_attr->create_flags || init_attr->send_cq)
- return -EINVAL;
-
- min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
- if (udata->outlen < min_resp_len)
- return -EINVAL;
-
- required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
- if (udata->inlen < required_cmd_sz) {
- mlx5_ib_dbg(dev, "invalid inlen\n");
- return -EINVAL;
- }
-
- if (udata->inlen > sizeof(ucmd) &&
- !ib_is_udata_cleared(udata, sizeof(ucmd),
- udata->inlen - sizeof(ucmd))) {
- mlx5_ib_dbg(dev, "inlen is not supported\n");
- return -EOPNOTSUPP;
- }
-
- if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EFAULT;
- }
-
- if (ucmd.comp_mask) {
+ if (ucmd->comp_mask) {
mlx5_ib_dbg(dev, "invalid comp mask\n");
return -EOPNOTSUPP;
}
- if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
- mlx5_ib_dbg(dev, "invalid flags\n");
- return -EOPNOTSUPP;
- }
-
- if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
- !tunnel_offload_supported(dev->mdev)) {
- mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
- return -EOPNOTSUPP;
- }
-
- if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
- !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
+ !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
return -EOPNOTSUPP;
}
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
- lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ if (dev->is_rep)
qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
- }
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
- lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
- qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
- }
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
- err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EINVAL;
- }
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
outlen = MLX5_ST_SZ_BYTES(create_tir_out);
@@ -1753,29 +1618,29 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
- if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER)
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
else
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- switch (ucmd.rx_hash_function) {
+ switch (ucmd->rx_hash_function) {
case MLX5_RX_HASH_FUNC_TOEPLITZ:
{
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
- if (len != ucmd.rx_key_len) {
+ if (len != ucmd->rx_key_len) {
err = -EINVAL;
goto err;
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
- memcpy(rss_key, ucmd.rx_hash_key, len);
+ memcpy(rss_key, ucmd->rx_hash_key, len);
break;
}
default:
@@ -1783,7 +1648,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
goto err;
}
- if (!ucmd.rx_hash_fields_mask) {
+ if (!ucmd->rx_hash_fields_mask) {
/* special case when this TIR serves as steering entry without hashing */
if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
goto create_tir;
@@ -1791,29 +1656,31 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
goto err;
}
- if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
- ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
+ if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
err = -EINVAL;
goto err;
}
/* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
- else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
- outer_l4 = ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) << 0 |
- ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) << 1 |
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
+ outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ << 0 |
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ << 1 |
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
/* Check that only one l4 protocol is set */
if (outer_l4 & (outer_l4 - 1)) {
@@ -1822,38 +1689,39 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
/* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
- else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
- if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir:
- err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
@@ -1868,73 +1736,43 @@ create_tir:
goto err;
if (mucontext->devx_uid) {
- resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
- resp.tirn = qp->rss_qp.tirn;
+ params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ params->resp.tirn = qp->rss_qp.tirn;
if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
- resp.tir_icm_addr =
+ params->resp.tir_icm_addr =
MLX5_GET(create_tir_out, out, icm_address_31_0);
- resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
- icm_address_39_32)
- << 32;
- resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
- icm_address_63_40)
- << 40;
- resp.comp_mask |=
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_39_32)
+ << 32;
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_63_40)
+ << 40;
+ params->resp.comp_mask |=
MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
}
}
- err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
- if (err)
- goto err_copy;
-
kvfree(in);
/* qpn is reserved for that QP */
qp->trans_qp.base.mqp.qpn = 0;
- qp->flags |= MLX5_IB_QP_RSS;
+ qp->is_rss = true;
return 0;
-err_copy:
- mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
err:
kvfree(in);
return err;
}
-static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
- void *qpc)
-{
- int rcqe_sz;
-
- if (init_attr->qp_type == MLX5_IB_QPT_DCI)
- return;
-
- rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
-
- if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
- if (rcqe_sz == 128)
- MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
-
- return;
- }
-
- MLX5_SET(qpc, qpc, cs_res,
- rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
- MLX5_RES_SCAT_DATA32_CQE);
-}
-
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx5_ib_create_qp *ucmd,
void *qpc)
{
- enum ib_qp_type qpt = init_attr->qp_type;
int scqe_sz;
bool allow_scat_cqe = false;
- if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
- return;
-
if (ucmd)
allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
@@ -1999,269 +1837,182 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
return atomic_mode;
}
-static inline bool check_flags_mask(uint64_t input, uint64_t supported)
-{
- return (input & ~supported) == 0;
-}
-
-static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, struct mlx5_ib_qp *qp)
+static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
{
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_create_qp_resp resp = {};
- struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_ib_cq *send_cq;
- struct mlx5_ib_cq *recv_cq;
- unsigned long flags;
- u32 uidx = MLX5_IB_DEFAULT_UIDX;
- struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_qp_base *base;
- int mlx5_st;
+ unsigned long flags;
void *qpc;
u32 *in;
int err;
mutex_init(&qp->mutex);
- spin_lock_init(&qp->sq.lock);
- spin_lock_init(&qp->rq.lock);
- mlx5_st = to_mlx5_st(init_attr->qp_type);
- if (mlx5_st < 0)
- return -EINVAL;
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
- if (init_attr->rwq_ind_tbl) {
- if (!udata)
- return -ENOSYS;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
- err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
- return err;
- }
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
- mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
- return -EINVAL;
- } else {
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
- }
- }
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn);
- if (init_attr->create_flags &
- (IB_QP_CREATE_CROSS_CHANNEL |
- IB_QP_CREATE_MANAGED_SEND |
- IB_QP_CREATE_MANAGED_RECV)) {
- if (!MLX5_CAP_GEN(mdev, cd)) {
- mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
- return -EINVAL;
- }
- if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
- qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
- if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
- qp->flags |= MLX5_IB_QP_MANAGED_SEND;
- if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
- qp->flags |= MLX5_IB_QP_MANAGED_RECV;
- }
-
- if (init_attr->qp_type == IB_QPT_UD &&
- (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
- if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
- mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
- return -EOPNOTSUPP;
- }
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
- if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
- return -EOPNOTSUPP;
- }
- if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
- !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
- mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
- }
+ MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
- qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
- if (init_attr->create_flags & IB_QP_CREATE_CVLAN_STRIPPING) {
- if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
- MLX5_CAP_ETH(dev->mdev, vlan_cap)) ||
- (init_attr->qp_type != IB_QPT_RAW_PACKET))
- return -EOPNOTSUPP;
- qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING;
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
}
- if (udata) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EFAULT;
- }
+ base = &qp->trans_qp.base;
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+ kvfree(in);
+ if (err)
+ return err;
- if (!check_flags_mask(ucmd.flags,
- MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
- MLX5_QP_FLAG_BFREG_INDEX |
- MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
- MLX5_QP_FLAG_SCATTER_CQE |
- MLX5_QP_FLAG_SIGNATURE |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
- MLX5_QP_FLAG_TUNNEL_OFFLOADS |
- MLX5_QP_FLAG_UAR_PAGE_INDEX |
- MLX5_QP_FLAG_TYPE_DCI |
- MLX5_QP_FLAG_TYPE_DCT))
- return -EINVAL;
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
- err = get_qp_user_index(ucontext, &ucmd, udata->inlen, &uidx);
- if (err)
- return err;
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
- if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
- qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
- if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
- !tunnel_offload_supported(mdev)) {
- mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
- }
+ qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn;
+ return 0;
+}
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
- }
+static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct ib_udata *udata = params->udata;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
- }
+ mutex_init(&qp->mutex);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
- if (ucmd.flags & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) {
- if (init_attr->qp_type != IB_QPT_RC ||
- !MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
- mlx5_ib_dbg(dev, "packet based credit mode isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags |= MLX5_IB_QP_PACKET_BASED_CREDIT;
- }
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
- if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
- if (init_attr->qp_type != IB_QPT_UD ||
- (MLX5_CAP_GEN(dev->mdev, port_type) !=
- MLX5_CAP_PORT_TYPE_IB) ||
- !mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
- mlx5_ib_dbg(dev, "Source QP option isn't supported\n");
- return -EOPNOTSUPP;
- }
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
- qp->flags |= MLX5_IB_QP_UNDERLAY;
- qp->underlay_qpn = init_attr->source_qpn;
- }
- } else {
- qp->wq_sig = !!wq_signature;
- }
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ qp->underlay_qpn = init_attr->source_qpn;
base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) ?
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
qp->has_rq = qp_has_rq(init_attr);
- err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
- qp, udata ? &ucmd : NULL);
+ err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
return err;
}
- if (pd) {
- if (udata) {
- __u32 max_wqes =
- 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
- if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
- ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
- mlx5_ib_dbg(dev, "invalid rq params\n");
- return -EINVAL;
- }
- if (ucmd.sq_wqe_count > max_wqes) {
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
- ucmd.sq_wqe_count, max_wqes);
- return -EINVAL;
- }
- if (init_attr->create_flags &
- MLX5_IB_QP_CREATE_SQPN_QP1) {
- mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
- return -EINVAL;
- }
- err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
- &resp, &inlen, base);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
- } else {
- err = create_kernel_qp(dev, init_attr, qp, &in, &inlen,
- base);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
- }
+ if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd->rq_wqe_count != qp->rq.wqe_cnt)
+ return -EINVAL;
- if (err)
- return err;
- } else {
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
+ return -EINVAL;
- qp->create_type = MLX5_QP_EMPTY;
- }
+ err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
+ &inlen, base, ucmd);
+ if (err)
+ return err;
if (is_sqp(init_attr->qp_type))
qp->port = init_attr->port_num;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, mlx5_st);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
- if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
- MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
- else
- MLX5_SET(qpc, qpc, latency_sensitive, 1);
-
-
- if (qp->wq_sig)
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
MLX5_SET(qpc, qpc, wq_signature, 1);
- if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
MLX5_SET(qpc, qpc, block_lb_mc, 1);
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
MLX5_SET(qpc, qpc, cd_master, 1);
- if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
MLX5_SET(qpc, qpc, cd_slave_send, 1);
- if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
- if (qp->flags & MLX5_IB_QP_PACKET_BASED_CREDIT)
+ if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)
MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
- if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
- configure_responder_scat_cqe(init_attr, qpc);
- configure_requester_scat_cqe(dev, init_attr,
- udata ? &ucmd : NULL,
- qpc);
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (init_attr->qp_type == IB_QPT_RC ||
+ init_attr->qp_type == IB_QPT_UC)) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
+
+ MLX5_SET(qpc, qpc, cs_res,
+ rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
+ MLX5_RES_SCAT_DATA32_CQE);
}
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
+ configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2282,12 +2033,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
/* Set default resources */
switch (init_attr->qp_type) {
- case IB_QPT_XRC_TGT:
- MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
- MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
- MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
- MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
- break;
case IB_QPT_XRC_INI:
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
@@ -2315,52 +2060,163 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
MLX5_SET(qpc, qpc, user_index, uidx);
- /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
- if (init_attr->qp_type == IB_QPT_UD &&
- (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
- MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
- qp->flags |= MLX5_IB_QP_LSO;
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING &&
+ init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
}
- if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
- if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
- mlx5_ib_dbg(dev, "scatter end padding is not supported\n");
- err = -EOPNOTSUPP;
- goto err;
- } else if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- MLX5_SET(qpc, qpc, end_padding_mode,
- MLX5_WQ_END_PAD_MODE_ALIGN);
- } else {
- qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING;
- }
+ if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
+ raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
+ &params->resp);
+ } else
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+
+ kvfree(in);
+ if (err)
+ goto err_create;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+
+err_create:
+ destroy_qp(dev, qp, base, udata);
+ return err;
+}
+
+static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ mutex_init(&qp->mutex);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ base = &qp->trans_qp.base;
+
+ qp->has_rq = qp_has_rq(attr);
+ err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
}
- if (inlen < 0) {
- err = -EINVAL;
- goto err;
+ err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
+ if (err)
+ return err;
+
+ if (is_sqp(attr->qp_type))
+ qp->port = attr->port_num;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+
+ if (attr->qp_type != MLX5_IB_QPT_REG_UMR)
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
+ else
+ MLX5_SET(qpc, qpc, latency_sensitive, 1);
+
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+
+ if (qp->rq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
}
- if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
- qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
- raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
- err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
- &resp);
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
+
+ if (qp->sq.wqe_cnt)
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+ else
+ MLX5_SET(qpc, qpc, no_sq, 1);
+
+ if (attr->srq) {
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(attr->srq)->msrq.srqn);
} else {
- err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(devr->s1)->msrq.srqn);
}
- if (err) {
- mlx5_ib_dbg(dev, "create qp failed\n");
- goto err_create;
- }
+ if (attr->send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn);
+
+ if (attr->recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
+ MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
+
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
kvfree(in);
+ if (err)
+ goto err_create;
base->container_mibqp = qp;
base->mqp.event = mlx5_ib_qp_event;
- get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
+ get_cqs(qp->type, attr->send_cq, attr->recv_cq,
&send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
@@ -2380,13 +2236,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return 0;
err_create:
- if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, pd, qp, base, udata);
- else if (qp->create_type == MLX5_QP_KERNEL)
- destroy_qp_kernel(dev, qp);
-
-err:
- kvfree(in);
+ destroy_qp(dev, qp, base, NULL);
return err;
}
@@ -2448,11 +2298,6 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
}
}
-static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
-{
- return to_mpd(qp->ibqp.pd);
-}
-
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
@@ -2473,14 +2318,10 @@ static void get_cqs(enum ib_qp_type qp_type,
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
- case IB_QPT_RAW_IPV6:
- case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_RAW_PACKET:
*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
break;
-
- case IB_QPT_MAX:
default:
*send_cq = NULL;
*recv_cq = NULL;
@@ -2506,16 +2347,15 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) ?
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
- !(qp->flags & MLX5_IB_QP_UNDERLAY)) {
- err = mlx5_core_qp_modify(dev->mdev,
- MLX5_CMD_OP_2RST_QP, 0,
- NULL, &base->mqp);
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
+ err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
+ NULL, &base->mqp, NULL);
} else {
struct mlx5_modify_raw_qp_param raw_qp_param = {
.operation = MLX5_CMD_OP_2RST_QP
@@ -2541,7 +2381,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (recv_cq)
list_del(&qp->cq_recv_list);
- if (qp->create_type == MLX5_QP_KERNEL) {
+ if (!udata) {
__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
@@ -2552,263 +2392,463 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
destroy_raw_packet_qp(dev, qp);
} else {
- err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
+ err = mlx5_core_destroy_qp(dev, &base->mqp);
if (err)
mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
base->mqp.qpn);
}
- if (qp->create_type == MLX5_QP_KERNEL)
- destroy_qp_kernel(dev, qp);
- else if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
+ destroy_qp(dev, qp, base, udata);
}
-static const char *ib_qp_type_str(enum ib_qp_type type)
+static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
{
- switch (type) {
- case IB_QPT_SMI:
- return "IB_QPT_SMI";
- case IB_QPT_GSI:
- return "IB_QPT_GSI";
+ struct ib_qp_init_attr *attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 uidx = params->uidx;
+ void *dctc;
+
+ qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
+ if (!qp->dct.in)
+ return -ENOMEM;
+
+ MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
+ MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
+ MLX5_SET(dctc, dctc, user_index, uidx);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
+
+ if (rcqe_sz == 128)
+ MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ }
+
+ qp->state = IB_QPS_RESET;
+
+ return 0;
+}
+
+static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ enum ib_qp_type *type)
+{
+ if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct))
+ goto out;
+
+ switch (attr->qp_type) {
+ case IB_QPT_XRC_TGT:
+ case IB_QPT_XRC_INI:
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ goto out;
+ fallthrough;
case IB_QPT_RC:
- return "IB_QPT_RC";
case IB_QPT_UC:
- return "IB_QPT_UC";
- case IB_QPT_UD:
- return "IB_QPT_UD";
- case IB_QPT_RAW_IPV6:
- return "IB_QPT_RAW_IPV6";
- case IB_QPT_RAW_ETHERTYPE:
- return "IB_QPT_RAW_ETHERTYPE";
- case IB_QPT_XRC_INI:
- return "IB_QPT_XRC_INI";
- case IB_QPT_XRC_TGT:
- return "IB_QPT_XRC_TGT";
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ case IB_QPT_DRIVER:
+ case IB_QPT_GSI:
+ if (dev->profile == &raw_eth_profile)
+ goto out;
case IB_QPT_RAW_PACKET:
- return "IB_QPT_RAW_PACKET";
+ case IB_QPT_UD:
case MLX5_IB_QPT_REG_UMR:
- return "MLX5_IB_QPT_REG_UMR";
- case IB_QPT_DRIVER:
- return "IB_QPT_DRIVER";
- case IB_QPT_MAX:
+ break;
default:
- return "Invalid QP type";
+ goto out;
}
+
+ *type = attr->qp_type;
+ return 0;
+
+out:
+ mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type);
+ return -EOPNOTSUPP;
}
-static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
- struct ib_qp_init_attr *attr,
- struct mlx5_ib_create_qp *ucmd,
- struct ib_udata *udata)
+static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_ib_qp *qp;
- int err = 0;
- u32 uidx = MLX5_IB_DEFAULT_UIDX;
- void *dctc;
- if (!attr->srq || !attr->recv_cq)
- return ERR_PTR(-EINVAL);
+ if (!udata) {
+ /* Kernel create_qp callers */
+ if (attr->rwq_ind_tbl)
+ return -EOPNOTSUPP;
- err = get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &uidx);
- if (err)
- return ERR_PTR(err);
+ switch (attr->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ case IB_QPT_DRIVER:
+ return -EOPNOTSUPP;
+ default:
+ return 0;
+ }
+ }
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ /* Userspace create_qp callers */
+ if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) {
+ mlx5_ib_dbg(dev,
+ "Raw Packet QP is only supported for CQE version > 0\n");
+ return -EINVAL;
+ }
- qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
- if (!qp->dct.in) {
- err = -ENOMEM;
- goto err_free;
+ if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) {
+ mlx5_ib_dbg(dev,
+ "Wrong QP type %d for the RWQ indirect table\n",
+ attr->qp_type);
+ return -EINVAL;
}
- MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
- dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
- qp->qp_sub_type = MLX5_IB_QPT_DCT;
- MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
- MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
- MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
- MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
- MLX5_SET(dctc, dctc, user_index, uidx);
+ switch (attr->qp_type) {
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ case MLX5_IB_QPT_REG_UMR:
+ case IB_QPT_GSI:
+ mlx5_ib_dbg(dev, "Kernel doesn't support QP type %d\n",
+ attr->qp_type);
+ return -EINVAL;
+ default:
+ break;
+ }
- if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
- configure_responder_scat_cqe(attr, dctc);
+ /*
+ * We don't need to see this warning, it means that kernel code
+ * missing ib_pd. Placed here to catch developer's mistakes.
+ */
+ WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT,
+ "There is a missing PD pointer assignment\n");
+ return 0;
+}
- qp->state = IB_QPS_RESET;
+static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
- return &qp->ibqp;
-err_free:
- kfree(qp);
- return ERR_PTR(err);
+ if (cond) {
+ qp->flags_en |= flag;
+ *flags &= ~flag;
+ return;
+ }
+
+ if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+ /*
+ * We don't return error if this flag was provided,
+ * and mlx5 doesn't have right capability.
+ */
+ *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+ return;
+ }
+ mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
}
-static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
- struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_create_qp *ucmd,
- struct ib_udata *udata)
+static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ void *ucmd, struct ib_qp_init_attr *attr)
{
- enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
- int err;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ bool cond;
+ int flags;
- if (!udata)
+ if (attr->rwq_ind_tbl)
+ flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags;
+ else
+ flags = ((struct mlx5_ib_create_qp *)ucmd)->flags;
+
+ switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
+ case MLX5_QP_FLAG_TYPE_DCI:
+ qp->type = MLX5_IB_QPT_DCI;
+ break;
+ case MLX5_QP_FLAG_TYPE_DCT:
+ qp->type = MLX5_IB_QPT_DCT;
+ break;
+ default:
+ if (qp->type != IB_QPT_DRIVER)
+ break;
+ /*
+ * It is IB_QPT_DRIVER and or no subtype or
+ * wrong subtype were provided.
+ */
return -EINVAL;
+ }
- if (udata->inlen < sizeof(*ucmd)) {
- mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+
+ if (qp->type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS,
+ cond, qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true,
+ qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true,
+ qp);
+ }
+
+ if (qp->type == IB_QPT_RC)
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE,
+ MLX5_CAP_GEN(mdev, qp_packet_based), qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp);
+
+ cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC);
+ if (attr->rwq_ind_tbl && cond) {
+ mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n",
+ cond);
return -EINVAL;
}
- err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
- if (err)
- return err;
- if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
- init_attr->qp_type = MLX5_IB_QPT_DCI;
- } else {
- if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
- init_attr->qp_type = MLX5_IB_QPT_DCT;
- } else {
- mlx5_ib_dbg(dev, "Invalid QP flags\n");
- return -EINVAL;
- }
+ if (flags)
+ mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags);
+
+ return (flags) ? -EINVAL : 0;
}
- if (!MLX5_CAP_GEN(dev->mdev, dct)) {
- mlx5_ib_dbg(dev, "DC transport is not supported\n");
- return -EOPNOTSUPP;
+static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
+
+ if (cond) {
+ qp->flags |= flag;
+ *flags &= ~flag;
+ return;
}
- return 0;
+ if (flag == MLX5_IB_QP_CREATE_WC_TEST) {
+ /*
+ * Special case, if condition didn't meet, it won't be error,
+ * just different in-kernel flow.
+ */
+ *flags &= ~MLX5_IB_QP_CREATE_WC_TEST;
+ return;
+ }
+ mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag);
}
-struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *verbs_init_attr,
- struct ib_udata *udata)
+static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
{
- struct mlx5_ib_dev *dev;
- struct mlx5_ib_qp *qp;
- u16 xrcdn = 0;
- int err;
- struct ib_qp_init_attr mlx_init_attr;
- struct ib_qp_init_attr *init_attr = verbs_init_attr;
- struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct mlx5_ib_ucontext, ibucontext);
+ enum ib_qp_type qp_type = qp->type;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int create_flags = attr->create_flags;
+ bool cond;
- if (pd) {
- dev = to_mdev(pd->device);
+ if (qp->type == IB_QPT_UD && dev->profile == &raw_eth_profile)
+ if (create_flags & ~MLX5_IB_QP_CREATE_WC_TEST)
+ return -EINVAL;
- if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
- if (!ucontext) {
- mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
- return ERR_PTR(-EINVAL);
- } else if (!ucontext->cqe_version) {
- mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n");
- return ERR_PTR(-EINVAL);
- }
- }
- } else {
- /* being cautious here */
- if (init_attr->qp_type != IB_QPT_XRC_TGT &&
- init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
- pr_warn("%s: no PD for transport %s\n", __func__,
- ib_qp_type_str(init_attr->qp_type));
- return ERR_PTR(-EINVAL);
- }
- dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return (create_flags) ? -EINVAL : 0;
+
+ if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
+ return (create_flags) ? -EINVAL : 0;
+
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+ MLX5_CAP_GEN(mdev, block_lb_mc), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV,
+ MLX5_CAP_GEN(mdev, cd), qp);
+
+ if (qp_type == IB_QPT_UD) {
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_IPOIB_UD_LSO,
+ MLX5_CAP_GEN(mdev, ipoib_basic_offloads),
+ qp);
+ cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN,
+ cond, qp);
+ }
+
+ if (qp_type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, scatter_fcs);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_SCATTER_FCS, cond, qp);
+
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, vlan_cap);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_CVLAN_STRIPPING, cond, qp);
+ }
+
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING,
+ MLX5_CAP_GEN(mdev, end_pad), qp);
+
+ process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST,
+ qp_type != MLX5_IB_QPT_REG_UMR, qp);
+ process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
+ true, qp);
+
+ if (create_flags)
+ mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n",
+ create_flags);
+
+ return (create_flags) ? -EINVAL : 0;
+}
+
+static int process_udata_size(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ size_t ucmd = sizeof(struct mlx5_ib_create_qp);
+ struct ib_udata *udata = params->udata;
+ size_t outlen = udata->outlen;
+ size_t inlen = udata->inlen;
+
+ params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp));
+ params->ucmd_size = ucmd;
+ if (!params->is_rss_raw) {
+ /* User has old rdma-core, which doesn't support ECE */
+ size_t min_inlen =
+ offsetof(struct mlx5_ib_create_qp, ece_options);
+
+ /*
+ * We will check in check_ucmd_data() that user
+ * cleared everything after inlen.
+ */
+ params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd);
+ goto out;
}
- if (init_attr->qp_type == IB_QPT_DRIVER) {
- struct mlx5_ib_create_qp ucmd;
+ /* RSS RAW QP */
+ if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags))
+ return -EINVAL;
- init_attr = &mlx_init_attr;
- memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
- err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
- if (err)
- return ERR_PTR(err);
+ if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index))
+ return -EINVAL;
- if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
- if (init_attr->cap.max_recv_wr ||
- init_attr->cap.max_recv_sge) {
- mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
- return ERR_PTR(-EINVAL);
- }
- } else {
- return mlx5_ib_create_dct(pd, init_attr, &ucmd, udata);
- }
+ ucmd = sizeof(struct mlx5_ib_create_qp_rss);
+ params->ucmd_size = ucmd;
+ if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd))
+ return -EINVAL;
+
+ params->inlen = min(ucmd, inlen);
+out:
+ if (!params->inlen)
+ mlx5_ib_dbg(dev, "udata is too small\n");
+
+ return (params->inlen) ? 0 : -EINVAL;
+}
+
+static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ int err;
+
+ if (params->is_rss_raw) {
+ err = create_rss_raw_qp_tir(dev, pd, qp, params);
+ goto out;
}
- switch (init_attr->qp_type) {
- case IB_QPT_XRC_TGT:
- case IB_QPT_XRC_INI:
- if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
- mlx5_ib_dbg(dev, "XRC not supported\n");
- return ERR_PTR(-ENOSYS);
- }
- init_attr->recv_cq = NULL;
- if (init_attr->qp_type == IB_QPT_XRC_TGT) {
- xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
- init_attr->send_cq = NULL;
- }
+ if (qp->type == MLX5_IB_QPT_DCT) {
+ err = create_dct(dev, pd, qp, params);
+ goto out;
+ }
- /* fall through */
- case IB_QPT_RAW_PACKET:
- case IB_QPT_RC:
- case IB_QPT_UC:
- case IB_QPT_UD:
- case IB_QPT_SMI:
- case MLX5_IB_QPT_HW_GSI:
- case MLX5_IB_QPT_REG_UMR:
- case MLX5_IB_QPT_DCI:
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ if (qp->type == IB_QPT_XRC_TGT) {
+ err = create_xrc_tgt_qp(dev, qp, params);
+ goto out;
+ }
- err = create_qp_common(dev, pd, init_attr, udata, qp);
- if (err) {
- mlx5_ib_dbg(dev, "create_qp_common failed\n");
- kfree(qp);
- return ERR_PTR(err);
- }
+ if (params->udata)
+ err = create_user_qp(dev, pd, qp, params);
+ else
+ err = create_kernel_qp(dev, pd, qp, params);
- if (is_qp0(init_attr->qp_type))
- qp->ibqp.qp_num = 0;
- else if (is_qp1(init_attr->qp_type))
- qp->ibqp.qp_num = 1;
- else
- qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
+out:
+ if (err) {
+ mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
+ return err;
+ }
- mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
- qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
- init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
- init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
+ if (is_qp0(qp->type))
+ qp->ibqp.qp_num = 0;
+ else if (is_qp1(qp->type))
+ qp->ibqp.qp_num = 1;
+ else
+ qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
- qp->trans_qp.xrcdn = xrcdn;
+ mlx5_ib_dbg(dev,
+ "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n",
+ qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
+ params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn :
+ -1,
+ params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn :
+ -1,
+ params->resp.ece_options);
- break;
+ return 0;
+}
- case IB_QPT_GSI:
- return mlx5_ib_gsi_create_qp(pd, init_attr);
+static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
+{
+ int ret = 0;
- case IB_QPT_RAW_IPV6:
- case IB_QPT_RAW_ETHERTYPE:
- case IB_QPT_MAX:
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
+ ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0;
+ break;
+ case MLX5_IB_QPT_DCI:
+ ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ?
+ -EINVAL :
+ 0;
+ break;
+ case IB_QPT_RAW_PACKET:
+ ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0;
+ break;
default:
- mlx5_ib_dbg(dev, "unsupported qp type %d\n",
- init_attr->qp_type);
- /* Don't support raw QPs */
- return ERR_PTR(-EOPNOTSUPP);
+ break;
}
- if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
- qp->qp_sub_type = init_attr->qp_type;
+ if (ret)
+ mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type);
- return &qp->ibqp;
+ return ret;
+}
+
+static int get_qp_uidx(struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ if (params->is_rss_raw)
+ return 0;
+
+ return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &params->uidx);
}
static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
@@ -2818,7 +2858,7 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
if (mqp->state == IB_QPS_RTR) {
int err;
- err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
+ err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
if (err) {
mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
return err;
@@ -2830,6 +2870,150 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
return 0;
}
+static int check_ucmd_data(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ struct ib_udata *udata = params->udata;
+ size_t size, last;
+ int ret;
+
+ if (params->is_rss_raw)
+ /*
+ * These QPs don't have "reserved" field in their
+ * create_qp input struct, so their data is always valid.
+ */
+ last = sizeof(struct mlx5_ib_create_qp_rss);
+ else
+ /* IB_QPT_RAW_PACKET doesn't have ECE data */
+ switch (attr->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ last = offsetof(struct mlx5_ib_create_qp, ece_options);
+ break;
+ default:
+ last = offsetof(struct mlx5_ib_create_qp, reserved);
+ }
+
+ if (udata->inlen <= last)
+ return 0;
+
+ /*
+ * User provides different create_qp structures based on the
+ * flow and we need to know if he cleared memory after our
+ * struct create_qp ends.
+ */
+ size = udata->inlen - last;
+ ret = ib_is_udata_cleared(params->udata, last, size);
+ if (!ret)
+ mlx5_ib_dbg(
+ dev,
+ "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+ udata->inlen, params->ucmd_size, last, size);
+ return ret ? 0 : -EINVAL;
+}
+
+struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_create_qp_params params = {};
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_qp *qp;
+ enum ib_qp_type type;
+ int err;
+
+ dev = pd ? to_mdev(pd->device) :
+ to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device);
+
+ err = check_qp_type(dev, attr, &type);
+ if (err)
+ return ERR_PTR(err);
+
+ err = check_valid_flow(dev, pd, attr, udata);
+ if (err)
+ return ERR_PTR(err);
+
+ if (attr->qp_type == IB_QPT_GSI)
+ return mlx5_ib_gsi_create_qp(pd, attr);
+
+ params.udata = udata;
+ params.uidx = MLX5_IB_DEFAULT_UIDX;
+ params.attr = attr;
+ params.is_rss_raw = !!attr->rwq_ind_tbl;
+
+ if (udata) {
+ err = process_udata_size(dev, &params);
+ if (err)
+ return ERR_PTR(err);
+
+ err = check_ucmd_data(dev, &params);
+ if (err)
+ return ERR_PTR(err);
+
+ params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
+ if (!params.ucmd)
+ return ERR_PTR(-ENOMEM);
+
+ err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
+ if (err)
+ goto free_ucmd;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
+
+ qp->type = type;
+ if (udata) {
+ err = process_vendor_flags(dev, qp, params.ucmd, attr);
+ if (err)
+ goto free_qp;
+
+ err = get_qp_uidx(qp, &params);
+ if (err)
+ goto free_qp;
+ }
+ err = process_create_flags(dev, qp, attr);
+ if (err)
+ goto free_qp;
+
+ err = check_qp_attr(dev, qp, attr);
+ if (err)
+ goto free_qp;
+
+ err = create_qp(dev, pd, qp, &params);
+ if (err)
+ goto free_qp;
+
+ kfree(params.ucmd);
+ params.ucmd = NULL;
+
+ if (udata)
+ /*
+ * It is safe to copy response for all user create QP flows,
+ * including MLX5_IB_QPT_DCT, which doesn't need it.
+ * In that case, resp will be filled with zeros.
+ */
+ err = ib_copy_to_udata(udata, &params.resp, params.outlen);
+ if (err)
+ goto destroy_qp;
+
+ return &qp->ibqp;
+
+destroy_qp:
+ if (qp->type == MLX5_IB_QPT_DCT)
+ mlx5_ib_destroy_dct(qp);
+ else
+ destroy_qp_common(dev, qp, udata);
+ qp = NULL;
+free_qp:
+ kfree(qp);
+free_ucmd:
+ kfree(params.ucmd);
+ return ERR_PTR(err);
+}
+
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
@@ -2838,7 +3022,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
if (unlikely(qp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_destroy_qp(qp);
- if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
+ if (mqp->type == MLX5_IB_QPT_DCT)
return mlx5_ib_destroy_dct(mqp);
destroy_qp_common(dev, mqp, udata);
@@ -2848,14 +3032,13 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
return 0;
}
-static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
- const struct ib_qp_attr *attr,
- int attr_mask, __be32 *hw_access_flags_be)
+static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ void *qpc)
{
- u8 dest_rd_atomic;
- u32 access_flags, hw_access_flags = 0;
-
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+ u8 dest_rd_atomic;
+ u32 access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic;
@@ -2870,8 +3053,8 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
if (!dest_rd_atomic)
access_flags &= IB_ACCESS_REMOTE_WRITE;
- if (access_flags & IB_ACCESS_REMOTE_READ)
- hw_access_flags |= MLX5_QP_BIT_RRE;
+ MLX5_SET(qpc, qpc, rre, !!(access_flags & IB_ACCESS_REMOTE_READ));
+
if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
int atomic_mode;
@@ -2879,15 +3062,11 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
if (atomic_mode < 0)
return -EOPNOTSUPP;
- hw_access_flags |= MLX5_QP_BIT_RAE;
- hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
+ MLX5_SET(qpc, qpc, rae, 1);
+ MLX5_SET(qpc, qpc, atomic_mode, atomic_mode);
}
- if (access_flags & IB_ACCESS_REMOTE_WRITE)
- hw_access_flags |= MLX5_QP_BIT_RWE;
-
- *hw_access_flags_be = cpu_to_be32(hw_access_flags);
-
+ MLX5_SET(qpc, qpc, rwe, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
return 0;
}
@@ -2933,7 +3112,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
- err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
kvfree(in);
@@ -2960,18 +3139,29 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
- err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
kvfree(in);
return err;
}
+static void mlx5_set_path_udp_sport(void *path, const struct rdma_ah_attr *ah,
+ u32 lqpn, u32 rqpn)
+
+{
+ u32 fl = ah->grh.flow_label;
+
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ MLX5_SET(ads, path, udp_sport, rdma_flow_label_to_udp_sport(fl));
+}
+
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- const struct rdma_ah_attr *ah,
- struct mlx5_qp_path *path, u8 port, int attr_mask,
- u32 path_flags, const struct ib_qp_attr *attr,
- bool alt)
+ const struct rdma_ah_attr *ah, void *path, u8 port,
+ int attr_mask, u32 path_flags,
+ const struct ib_qp_attr *attr, bool alt)
{
const struct ib_global_route *grh = rdma_ah_read_grh(ah);
int err;
@@ -2980,8 +3170,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u8 sl = rdma_ah_get_sl(ah);
if (attr_mask & IB_QP_PKEY_INDEX)
- path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
- attr->pkey_index);
+ MLX5_SET(ads, path, pkey_index,
+ alt ? attr->alt_pkey_index : attr->pkey_index);
if (ah_flags & IB_AH_GRH) {
if (grh->sgid_index >=
@@ -2997,45 +3187,49 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!(ah_flags & IB_AH_GRH))
return -EINVAL;
- memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
- if (qp->ibqp.qp_type == IB_QPT_RC ||
- qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- qp->ibqp.qp_type == IB_QPT_XRC_TGT)
- path->udp_sport =
- mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr);
- path->dci_cfi_prio_sl = (sl & 0x7) << 4;
+ ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32),
+ ah->roce.dmac);
+ if ((qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
+ (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (attr_mask & IB_QP_DEST_QPN))
+ mlx5_set_path_udp_sport(path, ah,
+ qp->ibqp.qp_num,
+ attr->dest_qp_num);
+ MLX5_SET(ads, path, eth_prio, sl & 0x7);
gid_type = ah->grh.sgid_attr->gid_type;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
- path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
+ MLX5_SET(ads, path, dscp, grh->traffic_class >> 2);
} else {
- path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
- path->fl_free_ar |=
- (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
- path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
- path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f;
- if (ah_flags & IB_AH_GRH)
- path->grh_mlid |= 1 << 7;
- path->dci_cfi_prio_sl = sl & 0xf;
+ MLX5_SET(ads, path, fl, !!(path_flags & MLX5_PATH_FLAG_FL));
+ MLX5_SET(ads, path, free_ar,
+ !!(path_flags & MLX5_PATH_FLAG_FREE_AR));
+ MLX5_SET(ads, path, rlid, rdma_ah_get_dlid(ah));
+ MLX5_SET(ads, path, mlid, rdma_ah_get_path_bits(ah));
+ MLX5_SET(ads, path, grh, !!(ah_flags & IB_AH_GRH));
+ MLX5_SET(ads, path, sl, sl);
}
if (ah_flags & IB_AH_GRH) {
- path->mgid_index = grh->sgid_index;
- path->hop_limit = grh->hop_limit;
- path->tclass_flowlabel =
- cpu_to_be32((grh->traffic_class << 20) |
- (grh->flow_label));
- memcpy(path->rgid, grh->dgid.raw, 16);
+ MLX5_SET(ads, path, src_addr_index, grh->sgid_index);
+ MLX5_SET(ads, path, hop_limit, grh->hop_limit);
+ MLX5_SET(ads, path, tclass, grh->traffic_class);
+ MLX5_SET(ads, path, flow_label, grh->flow_label);
+ memcpy(MLX5_ADDR_OF(ads, path, rgid_rip), grh->dgid.raw,
+ sizeof(grh->dgid.raw));
}
err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
- path->static_rate = err;
- path->port = port;
+ MLX5_SET(ads, path, stat_rate, err);
+ MLX5_SET(ads, path, vhca_port_num, port);
if (attr_mask & IB_QP_TIMEOUT)
- path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
+ MLX5_SET(ads, path, ack_timeout,
+ alt ? attr->alt_timeout : attr->timeout);
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
@@ -3052,10 +3246,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_PRI_PORT,
@@ -3063,17 +3259,20 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
@@ -3082,7 +3281,8 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
},
},
[MLX5_QP_STATE_RTR] = {
@@ -3240,7 +3440,7 @@ static int modify_raw_packet_qp_rq(
"RAW PACKET QP counters are not supported on current FW\n");
}
- err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
+ err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in);
if (err)
goto out;
@@ -3303,7 +3503,7 @@ static int modify_raw_packet_qp_sq(
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
}
- err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
+ err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in);
if (err) {
/* Remove new rate from table if failed */
if (new_rate_added)
@@ -3416,43 +3616,80 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return 0;
}
-static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
- struct mlx5_ib_pd *pd,
- struct mlx5_ib_qp_base *qp_base,
- u8 port_num, struct ib_udata *udata)
+static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
+ struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- unsigned int tx_port_affinity;
+ u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ atomic_t *tx_port_affinity;
+
+ if (ucontext)
+ tx_port_affinity = &ucontext->tx_port_affinity;
+ else
+ tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
+
+ return (unsigned int)atomic_add_return(1, tx_port_affinity) %
+ MLX5_MAX_PORTS + 1;
+}
+
+static bool qp_supports_affinity(struct ib_qp *qp)
+{
+ if ((qp->qp_type == IB_QPT_RC) ||
+ (qp->qp_type == IB_QPT_UD) ||
+ (qp->qp_type == IB_QPT_UC) ||
+ (qp->qp_type == IB_QPT_RAW_PACKET) ||
+ (qp->qp_type == IB_QPT_XRC_INI) ||
+ (qp->qp_type == IB_QPT_XRC_TGT))
+ return true;
+ return false;
+}
+
+static unsigned int get_tx_affinity(struct ib_qp *qp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, u8 init,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_qp_base *qp_base;
+ unsigned int tx_affinity;
+
+ if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
+ qp_supports_affinity(qp)))
+ return 0;
+
+ if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ tx_affinity = mqp->gsi_lag_port;
+ else if (init)
+ tx_affinity = get_tx_affinity_rr(dev, udata);
+ else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
+ tx_affinity =
+ mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave);
+ else
+ return 0;
- if (ucontext) {
- tx_port_affinity = (unsigned int)atomic_add_return(
- 1, &ucontext->tx_port_affinity) %
- MLX5_MAX_PORTS +
- 1;
+ qp_base = &mqp->trans_qp.base;
+ if (ucontext)
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
- tx_port_affinity, qp_base->mqp.qpn, ucontext);
- } else {
- tx_port_affinity =
- (unsigned int)atomic_add_return(
- 1, &dev->port[port_num].roce.tx_port_affinity) %
- MLX5_MAX_PORTS +
- 1;
+ tx_affinity, qp_base->mqp.qpn, ucontext);
+ else
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
- tx_port_affinity, qp_base->mqp.qpn);
- }
-
- return tx_port_affinity;
+ tx_affinity, qp_base->mqp.qpn);
+ return tx_affinity;
}
static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
struct rdma_counter *counter)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ u32 in[MLX5_ST_SZ_DW(rts2rts_qp_in)] = {};
struct mlx5_ib_qp *mqp = to_mqp(qp);
- struct mlx5_qp_context context = {};
struct mlx5_ib_qp_base *base;
u32 set_id;
+ u32 *qpc;
if (counter)
set_id = counter->id;
@@ -3460,12 +3697,15 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
base = &mqp->trans_qp.base;
- context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
- context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24);
- return mlx5_core_qp_modify(dev->mdev,
- MLX5_CMD_OP_RTS2RTS_QP,
- MLX5_QP_OPTPAR_COUNTER_SET_ID,
- &context, &base->mqp);
+ MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+ MLX5_SET(rts2rts_qp_in, in, uid, base->mqp.uid);
+ MLX5_SET(rts2rts_qp_in, in, opt_param_mask,
+ MLX5_QP_OPTPAR_COUNTER_SET_ID);
+
+ qpc = MLX5_ADDR_OF(rts2rts_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
+ return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in);
}
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
@@ -3473,6 +3713,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
enum ib_qp_state cur_state,
enum ib_qp_state new_state,
const struct mlx5_ib_modify_qp *ucmd,
+ struct mlx5_ib_modify_qp_resp *resp,
struct ib_udata *udata)
{
static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
@@ -3516,67 +3757,60 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_cq *send_cq, *recv_cq;
- struct mlx5_qp_context *context;
struct mlx5_ib_pd *pd;
enum mlx5_qp_state mlx5_cur, mlx5_new;
- enum mlx5_qp_optpar optpar;
+ void *qpc, *pri_path, *alt_path;
+ enum mlx5_qp_optpar optpar = 0;
u32 set_id = 0;
int mlx5_st;
int err;
u16 op;
u8 tx_affinity = 0;
- mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
- qp->qp_sub_type : ibqp->qp_type);
+ mlx5_st = to_mlx5_st(qp->type);
if (mlx5_st < 0)
return -EINVAL;
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc)
return -ENOMEM;
- pd = get_pd(qp);
- context->flags = cpu_to_be32(mlx5_st << 16);
+ pd = to_mpd(qp->ibqp.pd);
+ MLX5_SET(qpc, qpc, st, mlx5_st);
if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
- context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
} else {
switch (attr->path_mig_state) {
case IB_MIG_MIGRATED:
- context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
break;
case IB_MIG_REARM:
- context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_REARM);
break;
case IB_MIG_ARMED:
- context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_ARMED);
break;
}
}
- if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
- if ((ibqp->qp_type == IB_QPT_RC) ||
- (ibqp->qp_type == IB_QPT_UD &&
- !(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
- (ibqp->qp_type == IB_QPT_UC) ||
- (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
- (ibqp->qp_type == IB_QPT_XRC_INI) ||
- (ibqp->qp_type == IB_QPT_XRC_TGT)) {
- if (dev->lag_active) {
- u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
- tx_affinity = get_tx_affinity(dev, pd, base, p,
- udata);
- context->flags |= cpu_to_be32(tx_affinity << 24);
- }
- }
- }
+ tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT, udata);
+
+ MLX5_SET(qpc, qpc, lag_tx_port_affinity, tx_affinity);
+ if (tx_affinity && new_state == IB_QPS_RTR &&
+ MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity))
+ optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF;
if (is_sqp(ibqp->qp_type)) {
- context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_256);
+ MLX5_SET(qpc, qpc, log_msg_max, 8);
} else if ((ibqp->qp_type == IB_QPT_UD &&
- !(qp->flags & MLX5_IB_QP_UNDERLAY)) ||
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) ||
ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
+ MLX5_SET(qpc, qpc, log_msg_max, 12);
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > IB_MTU_4096) {
@@ -3584,40 +3818,45 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = -EINVAL;
goto out;
}
- context->mtu_msgmax = (attr->path_mtu << 5) |
- (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
+ MLX5_SET(qpc, qpc, mtu, attr->path_mtu);
+ MLX5_SET(qpc, qpc, log_msg_max,
+ MLX5_CAP_GEN(dev->mdev, log_max_msg));
}
if (attr_mask & IB_QP_DEST_QPN)
- context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
+ MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num);
+
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
if (attr_mask & IB_QP_PKEY_INDEX)
- context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
+ MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index);
/* todo implement counter_index functionality */
if (is_sqp(ibqp->qp_type))
- context->pri_path.port = qp->port;
+ MLX5_SET(ads, pri_path, vhca_port_num, qp->port);
if (attr_mask & IB_QP_PORT)
- context->pri_path.port = attr->port_num;
+ MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num);
if (attr_mask & IB_QP_AV) {
- err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
- attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
+ err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path,
+ attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port,
attr_mask, 0, attr, false);
if (err)
goto out;
}
if (attr_mask & IB_QP_TIMEOUT)
- context->pri_path.ackto_lt |= attr->timeout << 3;
+ MLX5_SET(ads, pri_path, ack_timeout, attr->timeout);
if (attr_mask & IB_QP_ALT_PATH) {
- err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
- &context->alt_path,
+ err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path,
attr->alt_port_num,
- attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+ attr_mask | IB_QP_PKEY_INDEX |
+ IB_QP_TIMEOUT,
0, attr, true);
if (err)
goto out;
@@ -3626,75 +3865,68 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
- context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
- context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
- context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
- context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
+ MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
+ if (send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn);
+ if (recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn);
+
+ MLX5_SET(qpc, qpc, log_ack_req_freq, MLX5_IB_ACK_REQ_FREQ);
if (attr_mask & IB_QP_RNR_RETRY)
- context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
+ MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
if (attr_mask & IB_QP_RETRY_CNT)
- context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
+ MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
- if (attr->max_rd_atomic)
- context->params1 |=
- cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
- }
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
+ MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic));
if (attr_mask & IB_QP_SQ_PSN)
- context->next_send_psn = cpu_to_be32(attr->sq_psn);
+ MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- if (attr->max_dest_rd_atomic)
- context->params2 |=
- cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
- }
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
+ MLX5_SET(qpc, qpc, log_rra_max,
+ ilog2(attr->max_dest_rd_atomic));
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
- __be32 access_flags;
-
- err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
+ err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
if (err)
goto out;
-
- context->params2 |= access_flags;
}
if (attr_mask & IB_QP_MIN_RNR_TIMER)
- context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
+ MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer);
if (attr_mask & IB_QP_RQ_PSN)
- context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
+ MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn);
if (attr_mask & IB_QP_QKEY)
- context->qkey = cpu_to_be32(attr->qkey);
+ MLX5_SET(qpc, qpc, q_key, attr->qkey);
if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- context->db_rec_addr = cpu_to_be64(qp->db.dma);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
/* Underlay port should be used - index 0 function per port */
- if (qp->flags & MLX5_IB_QP_UNDERLAY)
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
port_num = 0;
if (ibqp->counter)
set_id = ibqp->counter->id;
else
set_id = mlx5_ib_get_counters_id(dev, port_num);
- context->qp_counter_set_usr_page |=
- cpu_to_be32(set_id << 24);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
}
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- context->sq_crq_size |= cpu_to_be16(1 << 4);
+ MLX5_SET(qpc, qpc, rlky, 1);
- if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- context->deth_sqpn = cpu_to_be32(1);
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ MLX5_SET(qpc, qpc, deth_sqpn, 1);
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
@@ -3706,11 +3938,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
op = optab[mlx5_cur][mlx5_new];
- optpar = ib_mask_to_mlx5_opt(attr_mask);
+ optpar |= ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
struct mlx5_modify_raw_qp_param raw_qp_param = {};
raw_qp_param.operation = op;
@@ -3752,8 +3984,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
} else {
- err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
- &base->mqp);
+ if (udata) {
+ /* For the kernel flows, the resp will stay zero */
+ resp->ece_options =
+ MLX5_CAP_GEN(dev->mdev, ece_support) ?
+ ucmd->ece_options : 0;
+ resp->response_length = sizeof(*resp);
+ }
+ err = mlx5_core_qp_modify(dev, op, optpar, qpc, &base->mqp,
+ &resp->ece_options);
}
if (err)
@@ -3800,7 +4039,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
out:
- kfree(context);
+ kfree(qpc);
return err;
}
@@ -3858,7 +4097,8 @@ static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new
* Other transitions and attributes are illegal
*/
static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
+ int attr_mask, struct mlx5_ib_modify_qp *ucmd,
+ struct ib_udata *udata)
{
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@@ -3874,6 +4114,15 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr->qp_state;
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
+ /*
+ * DCT doesn't initialize QP till modify command is executed,
+ * so we need to overwrite previously set ECE field if user
+ * provided any value except zero, which means not set/not
+ * valid.
+ */
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u16 set_id;
@@ -3906,17 +4155,23 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
-
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
- u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
- u32 min_resp_len = offsetof(typeof(resp), dctn) +
- sizeof(resp.dctn);
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
+ u32 min_resp_len = offsetofend(typeof(resp), dctn);
if (udata->outlen < min_resp_len)
return -EINVAL;
resp.response_length = min_resp_len;
+ /*
+ * If we don't have enough space for the ECE options,
+ * simply indicate it with resp.response_length.
+ */
+ resp.response_length = (udata->outlen < sizeof(resp)) ?
+ min_resp_len :
+ sizeof(resp);
+
required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
if (!is_valid_mask(attr_mask, required, 0))
return -EINVAL;
@@ -3927,15 +4182,17 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
- err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
+ err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,
sizeof(out));
if (err)
return err;
resp.dctn = qp->dct.mdct.mqp.qpn;
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp.ece_options = MLX5_GET(create_dct_out, out, ece);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err) {
- mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
+ mlx5_core_destroy_dct(dev, &qp->dct.mdct);
return err;
}
} else {
@@ -3953,11 +4210,11 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_modify_qp_resp resp = {};
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_modify_qp ucmd = {};
enum ib_qp_type qp_type;
enum ib_qp_state cur_state, new_state;
- size_t required_cmd_sz;
int err = -EINVAL;
int port;
@@ -3965,9 +4222,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return -ENOSYS;
if (udata && udata->inlen) {
- required_cmd_sz = offsetof(typeof(ucmd), reserved) +
- sizeof(ucmd.reserved);
- if (udata->inlen < required_cmd_sz)
+ if (udata->inlen < offsetofend(typeof(ucmd), ece_options))
return -EINVAL;
if (udata->inlen > sizeof(ucmd) &&
@@ -3980,23 +4235,20 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return -EFAULT;
if (ucmd.comp_mask ||
- memchr_inv(&ucmd.reserved, 0, sizeof(ucmd.reserved)) ||
memchr_inv(&ucmd.burst_info.reserved, 0,
sizeof(ucmd.burst_info.reserved)))
return -EOPNOTSUPP;
+
}
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
- if (ibqp->qp_type == IB_QPT_DRIVER)
- qp_type = qp->qp_sub_type;
- else
- qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
- IB_QPT_GSI : ibqp->qp_type;
+ qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? IB_QPT_GSI :
+ qp->type;
if (qp_type == MLX5_IB_QPT_DCT)
- return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
+ return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
mutex_lock(&qp->mutex);
@@ -4007,7 +4259,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
}
- if (qp->flags & MLX5_IB_QP_UNDERLAY) {
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
attr_mask);
@@ -4067,1439 +4319,19 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
- new_state, &ucmd, udata);
-
-out:
- mutex_unlock(&qp->mutex);
- return err;
-}
-
-static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
- u32 wqe_sz, void **cur_edge)
-{
- u32 idx;
-
- idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
- *cur_edge = get_sq_edge(sq, idx);
-
- *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
-}
-
-/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
- * next nearby edge and get new address translation for current WQE position.
- * @sq - SQ buffer.
- * @seg: Current WQE position (16B aligned).
- * @wqe_sz: Total current WQE size [16B].
- * @cur_edge: Updated current edge.
- */
-static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
- u32 wqe_sz, void **cur_edge)
-{
- if (likely(*seg != *cur_edge))
- return;
-
- _handle_post_send_edge(sq, seg, wqe_sz, cur_edge);
-}
-
-/* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
- * pointers. At the end @seg is aligned to 16B regardless the copied size.
- * @sq - SQ buffer.
- * @cur_edge: Updated current edge.
- * @seg: Current WQE position (16B aligned).
- * @wqe_sz: Total current WQE size [16B].
- * @src: Pointer to copy from.
- * @n: Number of bytes to copy.
- */
-static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
- void **seg, u32 *wqe_sz, const void *src,
- size_t n)
-{
- while (likely(n)) {
- size_t leftlen = *cur_edge - *seg;
- size_t copysz = min_t(size_t, leftlen, n);
- size_t stride;
-
- memcpy(*seg, src, copysz);
-
- n -= copysz;
- src += copysz;
- stride = !n ? ALIGN(copysz, 16) : copysz;
- *seg += stride;
- *wqe_sz += stride >> 4;
- handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
- }
-}
-
-static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
-{
- struct mlx5_ib_cq *cq;
- unsigned cur;
-
- cur = wq->head - wq->tail;
- if (likely(cur + nreq < wq->max_post))
- return 0;
-
- cq = to_mcq(ib_cq);
- spin_lock(&cq->lock);
- cur = wq->head - wq->tail;
- spin_unlock(&cq->lock);
-
- return cur + nreq >= wq->max_post;
-}
-
-static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
- u64 remote_addr, u32 rkey)
-{
- rseg->raddr = cpu_to_be64(remote_addr);
- rseg->rkey = cpu_to_be32(rkey);
- rseg->reserved = 0;
-}
-
-static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
- void **seg, int *size, void **cur_edge)
-{
- struct mlx5_wqe_eth_seg *eseg = *seg;
-
- memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
-
- if (wr->send_flags & IB_SEND_IP_CSUM)
- eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
-
- if (wr->opcode == IB_WR_LSO) {
- struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
- size_t left, copysz;
- void *pdata = ud_wr->header;
- size_t stride;
-
- left = ud_wr->hlen;
- eseg->mss = cpu_to_be16(ud_wr->mss);
- eseg->inline_hdr.sz = cpu_to_be16(left);
-
- /* memcpy_send_wqe should get a 16B align address. Hence, we
- * first copy up to the current edge and then, if needed,
- * fall-through to memcpy_send_wqe.
- */
- copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
- left);
- memcpy(eseg->inline_hdr.start, pdata, copysz);
- stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
- sizeof(eseg->inline_hdr.start) + copysz, 16);
- *size += stride / 16;
- *seg += stride;
-
- if (copysz < left) {
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
- left -= copysz;
- pdata += copysz;
- memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
- left);
- }
-
- return;
- }
-
- *seg += sizeof(struct mlx5_wqe_eth_seg);
- *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
-}
-
-static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
- const struct ib_send_wr *wr)
-{
- memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
- dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
- dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
-}
-
-static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
-{
- dseg->byte_count = cpu_to_be32(sg->length);
- dseg->lkey = cpu_to_be32(sg->lkey);
- dseg->addr = cpu_to_be64(sg->addr);
-}
-
-static u64 get_xlt_octo(u64 bytes)
-{
- return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
- MLX5_IB_UMR_OCTOWORD;
-}
-
-static __be64 frwr_mkey_mask(bool atomic)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_EN_RINVAL |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_SMALL_FENCE |
- MLX5_MKEY_MASK_FREE;
-
- if (atomic)
- result |= MLX5_MKEY_MASK_A;
-
- return cpu_to_be64(result);
-}
-
-static __be64 sig_mkey_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_EN_SIGERR |
- MLX5_MKEY_MASK_EN_RINVAL |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_SMALL_FENCE |
- MLX5_MKEY_MASK_FREE |
- MLX5_MKEY_MASK_BSF_EN;
-
- return cpu_to_be64(result);
-}
-
-static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr, u8 flags, bool atomic)
-{
- int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
-
- memset(umr, 0, sizeof(*umr));
-
- umr->flags = flags;
- umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
- umr->mkey_mask = frwr_mkey_mask(atomic);
-}
-
-static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
-{
- memset(umr, 0, sizeof(*umr));
- umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
- umr->flags = MLX5_UMR_INLINE;
-}
-
-static __be64 get_umr_enable_mr_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_FREE;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_disable_mr_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_FREE;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_translation_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_access_mask(int atomic)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW;
-
- if (atomic)
- result |= MLX5_MKEY_MASK_A;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_pd_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_PD;
-
- return cpu_to_be64(result);
-}
-
-static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
-{
- if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
- MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
- (mask & MLX5_MKEY_MASK_A &&
- MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
- return -EPERM;
- return 0;
-}
-
-static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
- struct mlx5_wqe_umr_ctrl_seg *umr,
- const struct ib_send_wr *wr, int atomic)
-{
- const struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- memset(umr, 0, sizeof(*umr));
-
- if (!umrwr->ignore_free_state) {
- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
- /* fail if free */
- umr->flags = MLX5_UMR_CHECK_FREE;
- else
- /* fail if not free */
- umr->flags = MLX5_UMR_CHECK_NOT_FREE;
- }
-
- umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
- u64 offset = get_xlt_octo(umrwr->offset);
-
- umr->xlt_offset = cpu_to_be16(offset & 0xffff);
- umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
- umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
- }
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
- umr->mkey_mask |= get_umr_update_translation_mask();
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
- umr->mkey_mask |= get_umr_update_access_mask(atomic);
- umr->mkey_mask |= get_umr_update_pd_mask();
- }
- if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
- umr->mkey_mask |= get_umr_enable_mr_mask();
- if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
- umr->mkey_mask |= get_umr_disable_mr_mask();
-
- if (!wr->num_sge)
- umr->flags |= MLX5_UMR_INLINE;
-
- return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
-}
-
-static u8 get_umr_flags(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
- (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
- MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
-}
-
-static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
- struct mlx5_ib_mr *mr,
- u32 key, int access)
-{
- int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
-
- memset(seg, 0, sizeof(*seg));
-
- if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
- seg->log2_page_size = ilog2(mr->ibmr.page_size);
- else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
- /* KLMs take twice the size of MTTs */
- ndescs *= 2;
-
- seg->flags = get_umr_flags(access) | mr->access_mode;
- seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
- seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
- seg->start_addr = cpu_to_be64(mr->ibmr.iova);
- seg->len = cpu_to_be64(mr->ibmr.length);
- seg->xlt_oct_size = cpu_to_be32(ndescs);
-}
-
-static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
-{
- memset(seg, 0, sizeof(*seg));
- seg->status = MLX5_MKEY_STATUS_FREE;
-}
-
-static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
- const struct ib_send_wr *wr)
-{
- const struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- memset(seg, 0, sizeof(*seg));
- if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
- seg->status = MLX5_MKEY_STATUS_FREE;
-
- seg->flags = convert_access(umrwr->access_flags);
- if (umrwr->pd)
- seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
- !umrwr->length)
- seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
-
- seg->start_addr = cpu_to_be64(umrwr->virt_addr);
- seg->len = cpu_to_be64(umrwr->length);
- seg->log2_page_size = umrwr->page_shift;
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
- mlx5_mkey_variant(umrwr->mkey));
-}
-
-static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
- struct mlx5_ib_mr *mr,
- struct mlx5_ib_pd *pd)
-{
- int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
-
- dseg->addr = cpu_to_be64(mr->desc_map);
- dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
- dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
-}
-
-static __be32 send_ieth(const struct ib_send_wr *wr)
-{
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- return wr->ex.imm_data;
-
- case IB_WR_SEND_WITH_INV:
- return cpu_to_be32(wr->ex.invalidate_rkey);
-
- default:
- return 0;
- }
-}
-
-static u8 calc_sig(void *wqe, int size)
-{
- u8 *p = wqe;
- u8 res = 0;
- int i;
-
- for (i = 0; i < size; i++)
- res ^= p[i];
-
- return ~res;
-}
-
-static u8 wq_sig(void *wqe)
-{
- return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
-}
-
-static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
- void **wqe, int *wqe_sz, void **cur_edge)
-{
- struct mlx5_wqe_inline_seg *seg;
- size_t offset;
- int inl = 0;
- int i;
-
- seg = *wqe;
- *wqe += sizeof(*seg);
- offset = sizeof(*seg);
-
- for (i = 0; i < wr->num_sge; i++) {
- size_t len = wr->sg_list[i].length;
- void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
-
- inl += len;
-
- if (unlikely(inl > qp->max_inline_data))
- return -ENOMEM;
-
- while (likely(len)) {
- size_t leftlen;
- size_t copysz;
+ new_state, &ucmd, &resp, udata);
- handle_post_send_edge(&qp->sq, wqe,
- *wqe_sz + (offset >> 4),
- cur_edge);
-
- leftlen = *cur_edge - *wqe;
- copysz = min_t(size_t, leftlen, len);
-
- memcpy(*wqe, addr, copysz);
- len -= copysz;
- addr += copysz;
- *wqe += copysz;
- offset += copysz;
- }
- }
-
- seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
-
- *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
-
- return 0;
-}
-
-static u16 prot_field_size(enum ib_signature_type type)
-{
- switch (type) {
- case IB_SIG_TYPE_T10_DIF:
- return MLX5_DIF_SIZE;
- default:
- return 0;
- }
-}
-
-static u8 bs_selector(int block_size)
-{
- switch (block_size) {
- case 512: return 0x1;
- case 520: return 0x2;
- case 4096: return 0x3;
- case 4160: return 0x4;
- case 1073741824: return 0x5;
- default: return 0;
- }
-}
-
-static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
- struct mlx5_bsf_inl *inl)
-{
- /* Valid inline section and allow BSF refresh */
- inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
- MLX5_BSF_REFRESH_DIF);
- inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
- inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
- /* repeating block */
- inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
- inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
- MLX5_DIF_CRC : MLX5_DIF_IPCS;
-
- if (domain->sig.dif.ref_remap)
- inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
-
- if (domain->sig.dif.app_escape) {
- if (domain->sig.dif.ref_escape)
- inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
- else
- inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
- }
-
- inl->dif_app_bitmask_check =
- cpu_to_be16(domain->sig.dif.apptag_check_mask);
-}
-
-static int mlx5_set_bsf(struct ib_mr *sig_mr,
- struct ib_sig_attrs *sig_attrs,
- struct mlx5_bsf *bsf, u32 data_size)
-{
- struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
- struct mlx5_bsf_basic *basic = &bsf->basic;
- struct ib_sig_domain *mem = &sig_attrs->mem;
- struct ib_sig_domain *wire = &sig_attrs->wire;
-
- memset(bsf, 0, sizeof(*bsf));
-
- /* Basic + Extended + Inline */
- basic->bsf_size_sbs = 1 << 7;
- /* Input domain check byte mask */
- basic->check_byte_mask = sig_attrs->check_mask;
- basic->raw_data_size = cpu_to_be32(data_size);
-
- /* Memory domain */
- switch (sig_attrs->mem.sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
- basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
- mlx5_fill_inl_bsf(mem, &bsf->m_inl);
- break;
- default:
- return -EINVAL;
- }
-
- /* Wire domain */
- switch (sig_attrs->wire.sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
- mem->sig_type == wire->sig_type) {
- /* Same block structure */
- basic->bsf_size_sbs |= 1 << 4;
- if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
- basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
- if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
- basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
- if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
- basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
- } else
- basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
-
- basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
- mlx5_fill_inl_bsf(wire, &bsf->w_inl);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int set_sig_data_segment(const struct ib_send_wr *send_wr,
- struct ib_mr *sig_mr,
- struct ib_sig_attrs *sig_attrs,
- struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
-{
- struct mlx5_bsf *bsf;
- u32 data_len;
- u32 data_key;
- u64 data_va;
- u32 prot_len = 0;
- u32 prot_key = 0;
- u64 prot_va = 0;
- bool prot = false;
- int ret;
- int wqe_size;
- struct mlx5_ib_mr *mr = to_mmr(sig_mr);
- struct mlx5_ib_mr *pi_mr = mr->pi_mr;
-
- data_len = pi_mr->data_length;
- data_key = pi_mr->ibmr.lkey;
- data_va = pi_mr->data_iova;
- if (pi_mr->meta_ndescs) {
- prot_len = pi_mr->meta_length;
- prot_key = pi_mr->ibmr.lkey;
- prot_va = pi_mr->pi_iova;
- prot = true;
- }
-
- if (!prot || (data_key == prot_key && data_va == prot_va &&
- data_len == prot_len)) {
- /**
- * Source domain doesn't contain signature information
- * or data and protection are interleaved in memory.
- * So need construct:
- * ------------------
- * | data_klm |
- * ------------------
- * | BSF |
- * ------------------
- **/
- struct mlx5_klm *data_klm = *seg;
-
- data_klm->bcount = cpu_to_be32(data_len);
- data_klm->key = cpu_to_be32(data_key);
- data_klm->va = cpu_to_be64(data_va);
- wqe_size = ALIGN(sizeof(*data_klm), 64);
- } else {
- /**
- * Source domain contains signature information
- * So need construct a strided block format:
- * ---------------------------
- * | stride_block_ctrl |
- * ---------------------------
- * | data_klm |
- * ---------------------------
- * | prot_klm |
- * ---------------------------
- * | BSF |
- * ---------------------------
- **/
- struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
- struct mlx5_stride_block_entry *data_sentry;
- struct mlx5_stride_block_entry *prot_sentry;
- u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
- int prot_size;
-
- sblock_ctrl = *seg;
- data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
- prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
-
- prot_size = prot_field_size(sig_attrs->mem.sig_type);
- if (!prot_size) {
- pr_err("Bad block size given: %u\n", block_size);
- return -EINVAL;
- }
- sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
- prot_size);
- sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
- sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
- sblock_ctrl->num_entries = cpu_to_be16(2);
-
- data_sentry->bcount = cpu_to_be16(block_size);
- data_sentry->key = cpu_to_be32(data_key);
- data_sentry->va = cpu_to_be64(data_va);
- data_sentry->stride = cpu_to_be16(block_size);
-
- prot_sentry->bcount = cpu_to_be16(prot_size);
- prot_sentry->key = cpu_to_be32(prot_key);
- prot_sentry->va = cpu_to_be64(prot_va);
- prot_sentry->stride = cpu_to_be16(prot_size);
-
- wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
- sizeof(*prot_sentry), 64);
- }
-
- *seg += wqe_size;
- *size += wqe_size / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- bsf = *seg;
- ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
- if (ret)
- return -EINVAL;
-
- *seg += sizeof(*bsf);
- *size += sizeof(*bsf) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- return 0;
-}
-
-static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- struct ib_mr *sig_mr, int access_flags,
- u32 size, u32 length, u32 pdn)
-{
- u32 sig_key = sig_mr->rkey;
- u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
-
- memset(seg, 0, sizeof(*seg));
-
- seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
- seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
- seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
- MLX5_MKEY_BSF_EN | pdn);
- seg->len = cpu_to_be64(length);
- seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
- seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
-}
-
-static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- u32 size)
-{
- memset(umr, 0, sizeof(*umr));
-
- umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
- umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
- umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
- umr->mkey_mask = sig_mkey_mask();
-}
-
-static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
- struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
-{
- const struct ib_reg_wr *wr = reg_wr(send_wr);
- struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
- struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
- struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
- u32 pdn = get_pd(qp)->pdn;
- u32 xlt_size;
- int region_len, ret;
-
- if (unlikely(send_wr->num_sge != 0) ||
- unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
- unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
- unlikely(!sig_mr->sig->sig_status_checked))
- return -EINVAL;
-
- /* length of the protected region, data + protection */
- region_len = pi_mr->ibmr.length;
-
- /**
- * KLM octoword size - if protection was provided
- * then we use strided block format (3 octowords),
- * else we use single KLM (1 octoword)
- **/
- if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
- xlt_size = 0x30;
- else
- xlt_size = sizeof(struct mlx5_klm);
-
- set_sig_umr_segment(*seg, xlt_size);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
- pdn);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
- cur_edge);
- if (ret)
- return ret;
-
- sig_mr->sig->sig_status_checked = false;
- return 0;
-}
-
-static int set_psv_wr(struct ib_sig_domain *domain,
- u32 psv_idx, void **seg, int *size)
-{
- struct mlx5_seg_set_psv *psv_seg = *seg;
-
- memset(psv_seg, 0, sizeof(*psv_seg));
- psv_seg->psv_num = cpu_to_be32(psv_idx);
- switch (domain->sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
- domain->sig.dif.app_tag);
- psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
- break;
- default:
- pr_err("Bad signature type (%d) is given.\n",
- domain->sig_type);
- return -EINVAL;
- }
-
- *seg += sizeof(*psv_seg);
- *size += sizeof(*psv_seg) / 16;
-
- return 0;
-}
-
-static int set_reg_wr(struct mlx5_ib_qp *qp,
- const struct ib_reg_wr *wr,
- void **seg, int *size, void **cur_edge,
- bool check_not_free)
-{
- struct mlx5_ib_mr *mr = to_mmr(wr->mr);
- struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
- struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
- int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
- bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
- bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
- u8 flags = 0;
-
- if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
- mlx5_ib_warn(to_mdev(qp->ibqp.device),
- "Fast update of %s for MR is disabled\n",
- (MLX5_CAP_GEN(dev->mdev,
- umr_modify_entity_size_disabled)) ?
- "entity size" :
- "atomic access");
- return -EINVAL;
- }
-
- if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
- mlx5_ib_warn(to_mdev(qp->ibqp.device),
- "Invalid IB_SEND_INLINE send flag\n");
- return -EINVAL;
- }
-
- if (check_not_free)
- flags |= MLX5_UMR_CHECK_NOT_FREE;
- if (umr_inline)
- flags |= MLX5_UMR_INLINE;
-
- set_reg_umr_seg(*seg, mr, flags, atomic);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- if (umr_inline) {
- memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
- mr_list_size);
- *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
- } else {
- set_reg_data_seg(*seg, mr, pd);
- *seg += sizeof(struct mlx5_wqe_data_seg);
- *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
- }
- return 0;
-}
-
-static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
-{
- set_linv_umr_seg(*seg);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
- set_linv_mkey_seg(*seg);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-}
-
-static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
-{
- __be32 *p = NULL;
- int i, j;
-
- pr_debug("dump WQE index %u:\n", idx);
- for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
- if ((i & 0xf) == 0) {
- p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
- pr_debug("WQBB at %p:\n", (void *)p);
- j = 0;
- idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
- }
- pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
- be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
- be32_to_cpu(p[j + 3]));
- }
-}
-
-static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
- struct mlx5_wqe_ctrl_seg **ctrl,
- const struct ib_send_wr *wr, unsigned int *idx,
- int *size, void **cur_edge, int nreq,
- bool send_signaled, bool solicited)
-{
- if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
- return -ENOMEM;
-
- *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
- *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
- *ctrl = *seg;
- *(uint32_t *)(*seg + 8) = 0;
- (*ctrl)->imm = send_ieth(wr);
- (*ctrl)->fm_ce_se = qp->sq_signal_bits |
- (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
- (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
-
- *seg += sizeof(**ctrl);
- *size = sizeof(**ctrl) / 16;
- *cur_edge = qp->sq.cur_edge;
-
- return 0;
-}
-
-static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
- struct mlx5_wqe_ctrl_seg **ctrl,
- const struct ib_send_wr *wr, unsigned *idx,
- int *size, void **cur_edge, int nreq)
-{
- return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
- wr->send_flags & IB_SEND_SIGNALED,
- wr->send_flags & IB_SEND_SOLICITED);
-}
-
-static void finish_wqe(struct mlx5_ib_qp *qp,
- struct mlx5_wqe_ctrl_seg *ctrl,
- void *seg, u8 size, void *cur_edge,
- unsigned int idx, u64 wr_id, int nreq, u8 fence,
- u32 mlx5_opcode)
-{
- u8 opmod = 0;
-
- ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
- mlx5_opcode | ((u32)opmod << 24));
- ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
- ctrl->fm_ce_se |= fence;
- if (unlikely(qp->wq_sig))
- ctrl->signature = wq_sig(ctrl);
-
- qp->sq.wrid[idx] = wr_id;
- qp->sq.w_list[idx].opcode = mlx5_opcode;
- qp->sq.wqe_head[idx] = qp->sq.head + nreq;
- qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
- qp->sq.w_list[idx].next = qp->sq.cur_post;
-
- /* We save the edge which was possibly updated during the WQE
- * construction, into SQ's cache.
- */
- seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
- qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
- get_sq_edge(&qp->sq, qp->sq.cur_post &
- (qp->sq.wqe_cnt - 1)) :
- cur_edge;
-}
-
-static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr, bool drain)
-{
- struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
- struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_core_dev *mdev = dev->mdev;
- struct ib_reg_wr reg_pi_wr;
- struct mlx5_ib_qp *qp;
- struct mlx5_ib_mr *mr;
- struct mlx5_ib_mr *pi_mr;
- struct mlx5_ib_mr pa_pi_mr;
- struct ib_sig_attrs *sig_attrs;
- struct mlx5_wqe_xrc_seg *xrc;
- struct mlx5_bf *bf;
- void *cur_edge;
- int uninitialized_var(size);
- unsigned long flags;
- unsigned idx;
- int err = 0;
- int num_sge;
- void *seg;
- int nreq;
- int i;
- u8 next_fence = 0;
- u8 fence;
-
- if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
- !drain)) {
- *bad_wr = wr;
- return -EIO;
- }
-
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
- return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
-
- qp = to_mqp(ibqp);
- bf = &qp->bf;
-
- spin_lock_irqsave(&qp->sq.lock, flags);
-
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
- mlx5_ib_warn(dev, "\n");
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- num_sge = wr->num_sge;
- if (unlikely(num_sge > qp->sq.max_gs)) {
- mlx5_ib_warn(dev, "\n");
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
- nreq);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- if (wr->opcode == IB_WR_REG_MR ||
- wr->opcode == IB_WR_REG_MR_INTEGRITY) {
- fence = dev->umr_fence;
- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
- } else {
- if (wr->send_flags & IB_SEND_FENCE) {
- if (qp->next_fence)
- fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
- else
- fence = MLX5_FENCE_MODE_FENCE;
- } else {
- fence = qp->next_fence;
- }
- }
-
- switch (ibqp->qp_type) {
- case IB_QPT_XRC_INI:
- xrc = seg;
- seg += sizeof(*xrc);
- size += sizeof(*xrc) / 16;
- /* fall through */
- case IB_QPT_RC:
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
- size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
- err = -ENOSYS;
- *bad_wr = wr;
- goto out;
-
- case IB_WR_LOCAL_INV:
- qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
- ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
- set_linv_wr(qp, &seg, &size, &cur_edge);
- num_sge = 0;
- break;
-
- case IB_WR_REG_MR:
- qp->sq.wr_data[idx] = IB_WR_REG_MR;
- ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
- err = set_reg_wr(qp, reg_wr(wr), &seg, &size,
- &cur_edge, true);
- if (err) {
- *bad_wr = wr;
- goto out;
- }
- num_sge = 0;
- break;
-
- case IB_WR_REG_MR_INTEGRITY:
- qp->sq.wr_data[idx] = IB_WR_REG_MR_INTEGRITY;
-
- mr = to_mmr(reg_wr(wr)->mr);
- pi_mr = mr->pi_mr;
-
- if (pi_mr) {
- memset(&reg_pi_wr, 0,
- sizeof(struct ib_reg_wr));
-
- reg_pi_wr.mr = &pi_mr->ibmr;
- reg_pi_wr.access = reg_wr(wr)->access;
- reg_pi_wr.key = pi_mr->ibmr.rkey;
-
- ctrl->imm = cpu_to_be32(reg_pi_wr.key);
- /* UMR for data + prot registration */
- err = set_reg_wr(qp, &reg_pi_wr, &seg,
- &size, &cur_edge,
- false);
- if (err) {
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size,
- cur_edge, idx, wr->wr_id,
- nreq, fence,
- MLX5_OPCODE_UMR);
-
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, &cur_edge,
- nreq);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
- } else {
- memset(&pa_pi_mr, 0,
- sizeof(struct mlx5_ib_mr));
- /* No UMR, use local_dma_lkey */
- pa_pi_mr.ibmr.lkey =
- mr->ibmr.pd->local_dma_lkey;
-
- pa_pi_mr.ndescs = mr->ndescs;
- pa_pi_mr.data_length = mr->data_length;
- pa_pi_mr.data_iova = mr->data_iova;
- if (mr->meta_ndescs) {
- pa_pi_mr.meta_ndescs =
- mr->meta_ndescs;
- pa_pi_mr.meta_length =
- mr->meta_length;
- pa_pi_mr.pi_iova = mr->pi_iova;
- }
-
- pa_pi_mr.ibmr.length = mr->ibmr.length;
- mr->pi_mr = &pa_pi_mr;
- }
- ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
- /* UMR for sig MR */
- err = set_pi_umr_wr(wr, qp, &seg, &size,
- &cur_edge);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, fence,
- MLX5_OPCODE_UMR);
-
- /*
- * SET_PSV WQEs are not signaled and solicited
- * on error
- */
- sig_attrs = mr->ibmr.sig_attrs;
- err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
- &size, &cur_edge, nreq, false,
- true);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
- err = set_psv_wr(&sig_attrs->mem,
- mr->sig->psv_memory.psv_idx,
- &seg, &size);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, next_fence,
- MLX5_OPCODE_SET_PSV);
-
- err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
- &size, &cur_edge, nreq, false,
- true);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
- err = set_psv_wr(&sig_attrs->wire,
- mr->sig->psv_wire.psv_idx,
- &seg, &size);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, next_fence,
- MLX5_OPCODE_SET_PSV);
-
- qp->next_fence =
- MLX5_FENCE_MODE_INITIATOR_SMALL;
- num_sge = 0;
- goto skip_psv;
-
- default:
- break;
- }
- break;
-
- case IB_QPT_UC:
- switch (wr->opcode) {
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
- size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
- break;
-
- default:
- break;
- }
- break;
-
- case IB_QPT_SMI:
- if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
- mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
- err = -EPERM;
- *bad_wr = wr;
- goto out;
- }
- /* fall through */
- case MLX5_IB_QPT_HW_GSI:
- set_datagram_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_datagram_seg);
- size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
-
- break;
- case IB_QPT_UD:
- set_datagram_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_datagram_seg);
- size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
-
- /* handle qp that supports ud offload */
- if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
- struct mlx5_wqe_eth_pad *pad;
-
- pad = seg;
- memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
- seg += sizeof(struct mlx5_wqe_eth_pad);
- size += sizeof(struct mlx5_wqe_eth_pad) / 16;
- set_eth_seg(wr, qp, &seg, &size, &cur_edge);
- handle_post_send_edge(&qp->sq, &seg, size,
- &cur_edge);
- }
- break;
- case MLX5_IB_QPT_REG_UMR:
- if (wr->opcode != MLX5_IB_WR_UMR) {
- err = -EINVAL;
- mlx5_ib_warn(dev, "bad opcode\n");
- goto out;
- }
- qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
- ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
- err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
- if (unlikely(err))
- goto out;
- seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
- set_reg_mkey_segment(seg, wr);
- seg += sizeof(struct mlx5_mkey_seg);
- size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
- break;
-
- default:
- break;
- }
-
- if (wr->send_flags & IB_SEND_INLINE && num_sge) {
- err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
- if (unlikely(err)) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- } else {
- for (i = 0; i < num_sge; i++) {
- handle_post_send_edge(&qp->sq, &seg, size,
- &cur_edge);
- if (likely(wr->sg_list[i].length)) {
- set_data_ptr_seg
- ((struct mlx5_wqe_data_seg *)seg,
- wr->sg_list + i);
- size += sizeof(struct mlx5_wqe_data_seg) / 16;
- seg += sizeof(struct mlx5_wqe_data_seg);
- }
- }
- }
-
- qp->next_fence = next_fence;
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
- fence, mlx5_ib_opcode[wr->opcode]);
-skip_psv:
- if (0)
- dump_wqe(qp, idx, size);
- }
-
-out:
- if (likely(nreq)) {
- qp->sq.head += nreq;
-
- /* Make sure that descriptors are written before
- * updating doorbell record and ringing the doorbell
- */
- wmb();
-
- qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
-
- /* Make sure doorbell record is visible to the HCA before
- * we hit doorbell */
- wmb();
-
- mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
- /* Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order.
- */
- bf->offset ^= bf->buf_size;
- }
-
- spin_unlock_irqrestore(&qp->sq.lock, flags);
-
- return err;
-}
-
-int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
-}
-
-static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
-{
- sig->signature = calc_sig(sig, size);
-}
-
-static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr, bool drain)
-{
- struct mlx5_ib_qp *qp = to_mqp(ibqp);
- struct mlx5_wqe_data_seg *scat;
- struct mlx5_rwqe_sig *sig;
- struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_core_dev *mdev = dev->mdev;
- unsigned long flags;
- int err = 0;
- int nreq;
- int ind;
- int i;
-
- if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
- !drain)) {
- *bad_wr = wr;
- return -EIO;
- }
-
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
- return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
-
- spin_lock_irqsave(&qp->rq.lock, flags);
-
- ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
-
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- if (unlikely(wr->num_sge > qp->rq.max_gs)) {
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
- if (qp->wq_sig)
- scat++;
-
- for (i = 0; i < wr->num_sge; i++)
- set_data_ptr_seg(scat + i, wr->sg_list + i);
-
- if (i < qp->rq.max_gs) {
- scat[i].byte_count = 0;
- scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
- scat[i].addr = 0;
- }
-
- if (qp->wq_sig) {
- sig = (struct mlx5_rwqe_sig *)scat;
- set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
- }
-
- qp->rq.wrid[ind] = wr->wr_id;
-
- ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
- }
+ /* resp.response_length is set in ECE supported flows only */
+ if (!err && resp.response_length &&
+ udata->outlen >= resp.response_length)
+ /* Return -EFAULT to the user and expect him to destroy QP. */
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
out:
- if (likely(nreq)) {
- qp->rq.head += nreq;
-
- /* Make sure that descriptors are written before
- * doorbell record.
- */
- wmb();
-
- *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
- }
-
- spin_unlock_irqrestore(&qp->rq.lock, flags);
-
+ mutex_unlock(&qp->mutex);
return err;
}
-int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
-}
-
static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
{
switch (mlx5_state) {
@@ -5525,50 +4357,35 @@ static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
}
}
-static int to_ib_qp_access_flags(int mlx5_flags)
-{
- int ib_flags = 0;
-
- if (mlx5_flags & MLX5_QP_BIT_RRE)
- ib_flags |= IB_ACCESS_REMOTE_READ;
- if (mlx5_flags & MLX5_QP_BIT_RWE)
- ib_flags |= IB_ACCESS_REMOTE_WRITE;
- if (mlx5_flags & MLX5_QP_BIT_RAE)
- ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
-
- return ib_flags;
-}
-
static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
- struct rdma_ah_attr *ah_attr,
- struct mlx5_qp_path *path)
+ struct rdma_ah_attr *ah_attr, void *path)
{
+ int port = MLX5_GET(ads, path, vhca_port_num);
+ int static_rate;
memset(ah_attr, 0, sizeof(*ah_attr));
- if (!path->port || path->port > ibdev->num_ports)
+ if (!port || port > ibdev->num_ports)
return;
- ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port);
- rdma_ah_set_port_num(ah_attr, path->port);
- rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
+ rdma_ah_set_port_num(ah_attr, port);
+ rdma_ah_set_sl(ah_attr, MLX5_GET(ads, path, sl));
- rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
- rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
- rdma_ah_set_static_rate(ah_attr,
- path->static_rate ? path->static_rate - 5 : 0);
+ rdma_ah_set_dlid(ah_attr, MLX5_GET(ads, path, rlid));
+ rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
- if (path->grh_mlid & (1 << 7) ||
+ static_rate = MLX5_GET(ads, path, stat_rate);
+ rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
+ if (MLX5_GET(ads, path, grh) ||
ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
- u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
-
- rdma_ah_set_grh(ah_attr, NULL,
- tc_fl & 0xfffff,
- path->mgid_index,
- path->hop_limit,
- (tc_fl >> 20) & 0xff);
- rdma_ah_set_dgid_raw(ah_attr, path->rgid);
+ rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
+ MLX5_GET(ads, path, src_addr_index),
+ MLX5_GET(ads, path, hop_limit),
+ MLX5_GET(ads, path, tclass));
+ memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
+ MLX5_FLD_SZ_BYTES(ads, rgid_rip));
}
}
@@ -5690,61 +4507,58 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct ib_qp_attr *qp_attr)
{
int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
- struct mlx5_qp_context *context;
- int mlx5_state;
+ void *qpc, *pri_path, *alt_path;
u32 *outb;
- int err = 0;
+ int err;
outb = kzalloc(outlen, GFP_KERNEL);
if (!outb)
return -ENOMEM;
- err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
- outlen);
+ err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen);
if (err)
goto out;
- /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
- context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);
+ qpc = MLX5_ADDR_OF(query_qp_out, outb, qpc);
- mlx5_state = be32_to_cpu(context->flags) >> 28;
+ qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state));
+ if (MLX5_GET(qpc, qpc, state) == MLX5_QP_STATE_SQ_DRAINING)
+ qp_attr->sq_draining = 1;
- qp->state = to_ib_qp_state(mlx5_state);
- qp_attr->path_mtu = context->mtu_msgmax >> 5;
- qp_attr->path_mig_state =
- to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
- qp_attr->qkey = be32_to_cpu(context->qkey);
- qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
- qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
- qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
- qp_attr->qp_access_flags =
- to_ib_qp_access_flags(be32_to_cpu(context->params2));
+ qp_attr->path_mtu = MLX5_GET(qpc, qpc, mtu);
+ qp_attr->path_mig_state = to_ib_mig_state(MLX5_GET(qpc, qpc, pm_state));
+ qp_attr->qkey = MLX5_GET(qpc, qpc, q_key);
+ qp_attr->rq_psn = MLX5_GET(qpc, qpc, next_rcv_psn);
+ qp_attr->sq_psn = MLX5_GET(qpc, qpc, next_send_psn);
+ qp_attr->dest_qp_num = MLX5_GET(qpc, qpc, remote_qpn);
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
- to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
- to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
- qp_attr->alt_pkey_index =
- be16_to_cpu(context->alt_path.pkey_index);
- qp_attr->alt_port_num =
- rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
- }
+ if (MLX5_GET(qpc, qpc, rre))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(qpc, qpc, rwe))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(qpc, qpc, rae))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_ATOMIC;
- qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
- qp_attr->port_num = context->pri_path.port;
+ qp_attr->max_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_sra_max);
+ qp_attr->max_dest_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_rra_max);
+ qp_attr->min_rnr_timer = MLX5_GET(qpc, qpc, min_rnr_nak);
+ qp_attr->retry_cnt = MLX5_GET(qpc, qpc, retry_count);
+ qp_attr->rnr_retry = MLX5_GET(qpc, qpc, rnr_retry);
- /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
- qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
- qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path);
+ to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path);
+ qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index);
+ qp_attr->alt_port_num = MLX5_GET(ads, alt_path, vhca_port_num);
+ }
- qp_attr->max_dest_rd_atomic =
- 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
- qp_attr->min_rnr_timer =
- (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
- qp_attr->timeout = context->pri_path.ackto_lt >> 3;
- qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
- qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
- qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
+ qp_attr->pkey_index = MLX5_GET(ads, pri_path, pkey_index);
+ qp_attr->port_num = MLX5_GET(ads, pri_path, vhca_port_num);
+ qp_attr->timeout = MLX5_GET(ads, pri_path, ack_timeout);
+ qp_attr->alt_timeout = MLX5_GET(ads, alt_path, ack_timeout);
out:
kfree(outb);
@@ -5778,7 +4592,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
if (!out)
return -ENOMEM;
- err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
+ err = mlx5_core_dct_query(dev, dct, out, outlen);
if (err)
goto out;
@@ -5835,14 +4649,14 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
memset(qp_attr, 0, sizeof(*qp_attr));
- if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
+ if (unlikely(qp->type == MLX5_IB_QPT_DCT))
return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
qp_attr_mask, qp_init_attr);
mutex_lock(&qp->mutex);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
if (err)
goto out;
@@ -5876,18 +4690,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
- qp_init_attr->create_flags = 0;
- if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
- qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
-
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
- qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
- if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
- qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
- if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
- qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
- if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- qp_init_attr->create_flags |= MLX5_IB_QP_CREATE_SQPN_QP1;
+ qp_init_attr->create_flags = qp->flags;
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
@@ -5964,7 +4767,7 @@ static int set_delay_drop(struct mlx5_ib_dev *dev)
if (dev->delay_drop.activate)
goto out;
- err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout);
+ err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout);
if (err)
goto out;
@@ -6070,13 +4873,13 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
}
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
- err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
err = set_delay_drop(dev);
if (err) {
mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
err);
- mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
} else {
rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
}
@@ -6258,7 +5061,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
return &rwq->ibwq;
err_copy:
- mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
err_user_rq:
destroy_user_rq(dev, pd, rwq, udata);
err:
@@ -6271,7 +5074,7 @@ void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
- mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq);
}
@@ -6449,7 +5252,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
"Receive WQ counters are not supported on current FW\n");
}
- err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
+ err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
if (!err)
rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
@@ -6548,7 +5351,7 @@ void mlx5_ib_drain_sq(struct ib_qp *qp)
sdrain.cqe.done = mlx5_ib_drain_qp_done;
init_completion(&sdrain.done);
- ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true);
+ ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
@@ -6578,7 +5381,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
rdrain.cqe.done = mlx5_ib_drain_qp_done;
init_completion(&rdrain.done);
- ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true);
+ ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
new file mode 100644
index 000000000000..82ea2b94dfa6
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_QP_H
+#define _MLX5_IB_QP_H
+
+#include "mlx5_ib.h"
+
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev);
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev);
+
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *qp,
+ u32 *in, int inlen, u32 *out, int outlen);
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out);
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece);
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct);
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
+
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
+
+void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq);
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq);
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *sq);
+
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq);
+
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type);
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
+
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn);
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
+#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/infiniband/hw/mlx5/qpc.c
index c3aea4cc2fff..c19d91d6dce8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -1,46 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/gfp.h>
-#include <linux/export.h>
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/transobj.h>
+#include "mlx5_ib.h"
+#include "qp.h"
-#include "mlx5_core.h"
-#include "lib/eq.h"
-
-static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct);
static struct mlx5_core_rsc_common *
@@ -124,11 +93,9 @@ static int rsc_event_notifier(struct notifier_block *nb,
{
struct mlx5_core_rsc_common *common;
struct mlx5_qp_table *table;
- struct mlx5_core_dev *dev;
struct mlx5_core_dct *dct;
u8 event_type = (u8)type;
struct mlx5_core_qp *qp;
- struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
u32 rsn;
@@ -155,22 +122,12 @@ static int rsc_event_notifier(struct notifier_block *nb,
}
table = container_of(nb, struct mlx5_qp_table, nb);
- priv = container_of(table, struct mlx5_priv, qp_table);
- dev = container_of(priv, struct mlx5_core_dev, priv);
-
- mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
-
common = mlx5_get_rsc(table, rsn);
- if (!common) {
- mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
+ if (!common)
return NOTIFY_OK;
- }
- if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
- mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
- event_type, rsn);
+ if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
goto out;
- }
switch (common->res) {
case MLX5_RES_QP:
@@ -185,7 +142,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
complete(&dct->drained);
break;
default:
- mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
+ break;
}
out:
mlx5_core_put_rsc(common);
@@ -193,11 +150,10 @@ out:
return NOTIFY_OK;
}
-static int create_resource_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- int rsc_type)
+static int create_resource_common(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp, int rsc_type)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
int err;
qp->common.res = rsc_type;
@@ -216,10 +172,10 @@ static int create_resource_common(struct mlx5_core_dev *dev,
return 0;
}
-static void destroy_resource_common(struct mlx5_core_dev *dev,
+static void destroy_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
unsigned long flags;
spin_lock_irqsave(&table->lock, flags);
@@ -230,24 +186,19 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free);
}
-static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct, bool need_cleanup)
{
- u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp;
int err;
err = mlx5_core_drain_dct(dev, dct);
if (err) {
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto destroy;
- } else {
- mlx5_core_warn(
- dev, "failed drain DCT 0x%x with error 0x%x\n",
- qp->qpn, err);
- return err;
- }
+
+ return err;
}
wait_for_completion(&dct->drained);
destroy:
@@ -256,15 +207,12 @@ destroy:
MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
MLX5_SET(destroy_dct_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
return err;
}
-int mlx5_core_create_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *dct,
- u32 *in, int inlen,
- u32 *out, int outlen)
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *in, int inlen, u32 *out, int outlen)
{
struct mlx5_core_qp *qp = &dct->mqp;
int err;
@@ -272,11 +220,9 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
init_completion(&dct->drained);
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
- if (err) {
- mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
+ if (err)
return err;
- }
qp->qpn = MLX5_GET(create_dct_out, out, dctn);
qp->uid = MLX5_GET(create_dct_in, in, uid);
@@ -289,108 +235,83 @@ err_cmd:
_mlx5_core_destroy_dct(dev, dct, false);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
-int mlx5_core_create_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- u32 *in, int inlen)
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out)
{
- u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
- u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
- u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
+ u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
int err;
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
+ MLX5_ST_SZ_BYTES(create_qp_out));
if (err)
return err;
qp->uid = MLX5_GET(create_qp_in, in, uid);
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
- mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
err = create_resource_common(dev, qp, MLX5_RES_QP);
if (err)
goto err_cmd;
- err = mlx5_debug_qp_add(dev, qp);
- if (err)
- mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
- qp->qpn);
-
- atomic_inc(&dev->num_qps);
+ mlx5_debug_qp_add(dev->mdev, qp);
return 0;
err_cmd:
- memset(din, 0, sizeof(din));
- memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, din, uid, qp->uid);
- mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
+ mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
-static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct)
{
- u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp;
MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
MLX5_SET(drain_dct_in, in, uid, qp->uid);
- return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
}
-int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct)
{
return _mlx5_core_destroy_dct(dev, dct, true);
}
-EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
-int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp)
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
{
- u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
- int err;
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
- mlx5_debug_qp_remove(dev, qp);
+ mlx5_debug_qp_remove(dev->mdev, qp);
destroy_resource_common(dev, qp);
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- atomic_dec(&dev->num_qps);
+ mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
-int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
u32 timeout_usec)
{
- u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
MLX5_SET(set_delay_drop_params_in, in, opcode,
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
timeout_usec / 100);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
}
-EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
struct mbox_info {
u32 *in;
@@ -420,9 +341,30 @@ static void mbox_free(struct mbox_info *mbox)
kfree(mbox->out);
}
+static int get_ece_from_mbox(void *out, u16 opcode)
+{
+ int ece = 0;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ ece = MLX5_GET(init2rtr_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ ece = MLX5_GET(rtr2rts_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ ece = MLX5_GET(rts2rts_qp_out, out, ece);
+ break;
+ default:
+ break;
+ }
+
+ return ece;
+}
+
static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
u32 opt_param_mask, void *qpc,
- struct mbox_info *mbox, u16 uid)
+ struct mbox_info *mbox, u16 uid, u32 ece)
{
mbox->out = NULL;
mbox->in = NULL;
@@ -470,18 +412,21 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_RTR2RTS_QP:
if (MBOX_ALLOC(mbox, rtr2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_RTS2RTS_QP:
if (MBOX_ALLOC(mbox, rts2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_SQERR2RTS_QP:
if (MBOX_ALLOC(mbox, sqerr2rts_qp))
@@ -496,120 +441,116 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
opt_param_mask, qpc, uid);
break;
default:
- mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
- opcode, qpn);
return -EINVAL;
}
return 0;
}
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
- u32 opt_param_mask, void *qpc,
- struct mlx5_core_qp *qp)
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece)
{
struct mbox_info mbox;
int err;
- err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
- opt_param_mask, qpc, &mbox, qp->uid);
+ err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
+ qpc, &mbox, qp->uid, (ece) ? *ece : 0);
if (err)
return err;
- err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
+ err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
+ mbox.outlen);
+
+ if (ece)
+ *ece = get_ece_from_mbox(mbox.out, opcode);
+
mbox_free(&mbox);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
-void mlx5_init_qp_table(struct mlx5_core_dev *dev)
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
- memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
- mlx5_qp_debugfs_init(dev);
+ mlx5_qp_debugfs_init(dev->mdev);
table->nb.notifier_call = rsc_event_notifier;
- mlx5_notifier_register(dev, &table->nb);
+ mlx5_notifier_register(dev->mdev, &table->nb);
+
+ return 0;
}
-void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
- mlx5_notifier_unregister(dev, &table->nb);
- mlx5_qp_debugfs_cleanup(dev);
+ mlx5_notifier_unregister(dev->mdev, &table->nb);
+ mlx5_qp_debugfs_cleanup(dev->mdev);
}
-int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
MLX5_SET(query_qp_in, in, qpn, qp->qpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
-int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp;
MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
MLX5_SET(query_dct_in, in, dctn, qp->qpn);
- return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)out, outlen);
+ return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
+ outlen);
}
-EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
-int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
{
- u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
int err;
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
if (!err)
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
-int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
-static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
+static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
MLX5_SET(destroy_rq_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
}
-int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq)
{
int err;
u32 rqn;
- err = mlx5_core_create_rq(dev, in, inlen, &rqn);
+ err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
if (err)
return err;
@@ -626,39 +567,37 @@ err_destroy_rq:
return err;
}
-EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
-void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
{
destroy_resource_common(dev, rq);
destroy_rq_tracked(dev, rq->qpn, rq->uid);
}
-EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
-static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
+static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
MLX5_SET(destroy_sq_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
}
-int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq)
{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
int err;
- u32 sqn;
- err = mlx5_core_create_sq(dev, in, inlen, &sqn);
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
if (err)
return err;
+ sq->qpn = MLX5_GET(create_sq_out, out, sqn);
sq->uid = MLX5_GET(create_sq_in, in, uid);
- sq->qpn = sqn;
err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err)
goto err_destroy_sq;
@@ -670,68 +609,25 @@ err_destroy_sq:
return err;
}
-EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
-void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *sq)
{
destroy_resource_common(dev, sq);
destroy_sq_tracked(dev, sq->qpn, sq->uid);
}
-EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
-
-int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
-{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
- int err;
-
- MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *counter_id = MLX5_GET(alloc_q_counter_out, out,
- counter_set_id);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
-
-int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
-{
- u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
-
- MLX5_SET(dealloc_q_counter_in, in, opcode,
- MLX5_CMD_OP_DEALLOC_Q_COUNTER);
- MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
-
-int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
- int reset, void *out, int out_size)
-{
- u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
-
- MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
- MLX5_SET(query_q_counter_in, in, clear, reset);
- MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
-struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
int res_num,
enum mlx5_res_type res_type)
{
u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
return mlx5_get_rsc(table, rsn);
}
-EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
{
mlx5_core_put_rsc(res);
}
-EXPORT_SYMBOL_GPL(mlx5_core_res_put);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index b1a8a9175040..6d1ff13d2283 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -310,12 +310,18 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
srq->msrq.event = mlx5_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
- if (udata)
- if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
+ if (udata) {
+ struct mlx5_ib_create_srq_resp resp = {
+ .srqn = srq->msrq.srqn,
+ };
+
+ if (ib_copy_to_udata(udata, &resp, min(udata->outlen,
+ sizeof(resp)))) {
mlx5_ib_dbg(dev, "copy to user failed\n");
err = -EFAULT;
goto err_core;
}
+ }
init_attr->attr.max_wr = srq->msrq.max - 1;
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 8fc3630a9d4c..6f5eadc4d183 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -5,9 +5,9 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_ib.h"
#include "srq.h"
+#include "qp.h"
static int get_pas_size(struct mlx5_srq_attr *in)
{
@@ -132,38 +132,33 @@ static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
- MLX5_SET(destroy_srq_in, srq_in, opcode,
- MLX5_CMD_OP_DESTROY_SRQ);
- MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
+ MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
+ MLX5_SET(destroy_srq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
- sizeof(srq_out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
}
static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
- MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
- MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
- MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
- MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
- MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
+ MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
- sizeof(srq_out));
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
}
static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
u32 *srq_out;
void *srqc;
int err;
@@ -172,11 +167,9 @@ static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
if (!srq_out)
return -ENOMEM;
- MLX5_SET(query_srq_in, srq_in, opcode,
- MLX5_CMD_OP_QUERY_SRQ);
- MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
- MLX5_ST_SZ_BYTES(query_srq_out));
+ MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
+ MLX5_SET(query_srq_in, in, srqn, srq->srqn);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
if (err)
goto out;
@@ -234,39 +227,35 @@ out:
static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
- u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
- MLX5_CMD_OP_DESTROY_XRC_SRQ);
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
}
static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
u16 lwm)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
- u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
}
static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
u32 *xrcsrq_out;
void *xrc_srqc;
int err;
@@ -274,14 +263,11 @@ static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
if (!xrcsrq_out)
return -ENOMEM;
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
- MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
- MLX5_CMD_OP_QUERY_XRC_SRQ);
- MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
if (err)
goto out;
@@ -341,13 +327,12 @@ out:
static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
}
static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
@@ -384,7 +369,7 @@ static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
- err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
+ err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
out:
kvfree(in);
@@ -414,7 +399,7 @@ static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
if (err)
goto out;
@@ -477,36 +462,34 @@ static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
- MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
+ MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
}
static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq,
u16 lwm)
{
- u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
- MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
- MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
- MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
MLX5_SET(arm_rq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
}
static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
u32 *xrq_out;
int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
void *xrqc;
@@ -519,7 +502,7 @@ static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
if (err)
goto out;
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
new file mode 100644
index 000000000000..2c6df1c43b55
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -0,0 +1,1504 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/driver.h>
+#include "wr.h"
+
+static const u32 mlx5_ib_opcode[] = {
+ [IB_WR_SEND] = MLX5_OPCODE_SEND,
+ [IB_WR_LSO] = MLX5_OPCODE_LSO,
+ [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
+ [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
+ [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
+ [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
+ [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
+ [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
+ [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
+ [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
+ [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
+};
+
+/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
+ * next nearby edge and get new address translation for current WQE position.
+ * @sq - SQ buffer.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @cur_edge: Updated current edge.
+ */
+static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
+ u32 wqe_sz, void **cur_edge)
+{
+ u32 idx;
+
+ if (likely(*seg != *cur_edge))
+ return;
+
+ idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
+ *cur_edge = get_sq_edge(sq, idx);
+
+ *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
+}
+
+/* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
+ * pointers. At the end @seg is aligned to 16B regardless the copied size.
+ * @sq - SQ buffer.
+ * @cur_edge: Updated current edge.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @src: Pointer to copy from.
+ * @n: Number of bytes to copy.
+ */
+static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
+ void **seg, u32 *wqe_sz, const void *src,
+ size_t n)
+{
+ while (likely(n)) {
+ size_t leftlen = *cur_edge - *seg;
+ size_t copysz = min_t(size_t, leftlen, n);
+ size_t stride;
+
+ memcpy(*seg, src, copysz);
+
+ n -= copysz;
+ src += copysz;
+ stride = !n ? ALIGN(copysz, 16) : copysz;
+ *seg += stride;
+ *wqe_sz += stride >> 4;
+ handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
+ }
+}
+
+static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq,
+ struct ib_cq *ib_cq)
+{
+ struct mlx5_ib_cq *cq;
+ unsigned int cur;
+
+ cur = wq->head - wq->tail;
+ if (likely(cur + nreq < wq->max_post))
+ return 0;
+
+ cq = to_mcq(ib_cq);
+ spin_lock(&cq->lock);
+ cur = wq->head - wq->tail;
+ spin_unlock(&cq->lock);
+
+ return cur + nreq >= wq->max_post;
+}
+
+static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
+ u64 remote_addr, u32 rkey)
+{
+ rseg->raddr = cpu_to_be64(remote_addr);
+ rseg->rkey = cpu_to_be32(rkey);
+ rseg->reserved = 0;
+}
+
+static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
+ void **seg, int *size, void **cur_edge)
+{
+ struct mlx5_wqe_eth_seg *eseg = *seg;
+
+ memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
+
+ if (wr->send_flags & IB_SEND_IP_CSUM)
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+
+ if (wr->opcode == IB_WR_LSO) {
+ struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
+ size_t left, copysz;
+ void *pdata = ud_wr->header;
+ size_t stride;
+
+ left = ud_wr->hlen;
+ eseg->mss = cpu_to_be16(ud_wr->mss);
+ eseg->inline_hdr.sz = cpu_to_be16(left);
+
+ /* memcpy_send_wqe should get a 16B align address. Hence, we
+ * first copy up to the current edge and then, if needed,
+ * continue to memcpy_send_wqe.
+ */
+ copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
+ left);
+ memcpy(eseg->inline_hdr.start, pdata, copysz);
+ stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
+ sizeof(eseg->inline_hdr.start) + copysz, 16);
+ *size += stride / 16;
+ *seg += stride;
+
+ if (copysz < left) {
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ left -= copysz;
+ pdata += copysz;
+ memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
+ left);
+ }
+
+ return;
+ }
+
+ *seg += sizeof(struct mlx5_wqe_eth_seg);
+ *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
+}
+
+static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
+ const struct ib_send_wr *wr)
+{
+ memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
+ dseg->av.dqp_dct =
+ cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
+ dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
+}
+
+static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
+{
+ dseg->byte_count = cpu_to_be32(sg->length);
+ dseg->lkey = cpu_to_be32(sg->lkey);
+ dseg->addr = cpu_to_be64(sg->addr);
+}
+
+static u64 get_xlt_octo(u64 bytes)
+{
+ return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
+ MLX5_IB_UMR_OCTOWORD;
+}
+
+static __be64 frwr_mkey_mask(bool atomic)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE;
+
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 sig_mkey_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_SIGERR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE |
+ MLX5_MKEY_MASK_BSF_EN;
+
+ return cpu_to_be64(result);
+}
+
+static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
+ struct mlx5_ib_mr *mr, u8 flags, bool atomic)
+{
+ int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = flags;
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
+ umr->mkey_mask = frwr_mkey_mask(atomic);
+}
+
+static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
+{
+ memset(umr, 0, sizeof(*umr));
+ umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr->flags = MLX5_UMR_INLINE;
+}
+
+static __be64 get_umr_enable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_disable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_translation_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_access_mask(int atomic)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW;
+
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_pd_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_PD;
+
+ return cpu_to_be64(result);
+}
+
+static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
+{
+ if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
+ (mask & MLX5_MKEY_MASK_A &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
+ return -EPERM;
+ return 0;
+}
+
+static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
+ struct mlx5_wqe_umr_ctrl_seg *umr,
+ const struct ib_send_wr *wr, int atomic)
+{
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
+
+ memset(umr, 0, sizeof(*umr));
+
+ if (!umrwr->ignore_free_state) {
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ /* fail if free */
+ umr->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ }
+
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
+ u64 offset = get_xlt_octo(umrwr->offset);
+
+ umr->xlt_offset = cpu_to_be16(offset & 0xffff);
+ umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
+ umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+ }
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
+ umr->mkey_mask |= get_umr_update_translation_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
+ umr->mkey_mask |= get_umr_update_access_mask(atomic);
+ umr->mkey_mask |= get_umr_update_pd_mask();
+ }
+ if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
+ umr->mkey_mask |= get_umr_enable_mr_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
+ umr->mkey_mask |= get_umr_disable_mr_mask();
+
+ if (!wr->num_sge)
+ umr->flags |= MLX5_UMR_INLINE;
+
+ return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
+}
+
+static u8 get_umr_flags(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
+ (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
+ MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
+}
+
+static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
+ struct mlx5_ib_mr *mr,
+ u32 key, int access)
+{
+ int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ seg->log2_page_size = ilog2(mr->ibmr.page_size);
+ else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
+ /* KLMs take twice the size of MTTs */
+ ndescs *= 2;
+
+ seg->flags = get_umr_flags(access) | mr->access_mode;
+ seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+ seg->start_addr = cpu_to_be64(mr->ibmr.iova);
+ seg->len = cpu_to_be64(mr->ibmr.length);
+ seg->xlt_oct_size = cpu_to_be32(ndescs);
+}
+
+static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
+{
+ memset(seg, 0, sizeof(*seg));
+ seg->status = MLX5_MKEY_STATUS_FREE;
+}
+
+static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
+ const struct ib_send_wr *wr)
+{
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
+
+ memset(seg, 0, sizeof(*seg));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
+ seg->status = MLX5_MKEY_STATUS_FREE;
+
+ seg->flags = convert_access(umrwr->access_flags);
+ if (umrwr->pd)
+ seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
+ !umrwr->length)
+ seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
+
+ seg->start_addr = cpu_to_be64(umrwr->virt_addr);
+ seg->len = cpu_to_be64(umrwr->length);
+ seg->log2_page_size = umrwr->page_shift;
+ seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
+ mlx5_mkey_variant(umrwr->mkey));
+}
+
+static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
+ struct mlx5_ib_mr *mr,
+ struct mlx5_ib_pd *pd)
+{
+ int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
+
+ dseg->addr = cpu_to_be64(mr->desc_map);
+ dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
+ dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
+}
+
+static __be32 send_ieth(const struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return wr->ex.imm_data;
+
+ case IB_WR_SEND_WITH_INV:
+ return cpu_to_be32(wr->ex.invalidate_rkey);
+
+ default:
+ return 0;
+ }
+}
+
+static u8 calc_sig(void *wqe, int size)
+{
+ u8 *p = wqe;
+ u8 res = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ res ^= p[i];
+
+ return ~res;
+}
+
+static u8 wq_sig(void *wqe)
+{
+ return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
+}
+
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **wqe, int *wqe_sz, void **cur_edge)
+{
+ struct mlx5_wqe_inline_seg *seg;
+ size_t offset;
+ int inl = 0;
+ int i;
+
+ seg = *wqe;
+ *wqe += sizeof(*seg);
+ offset = sizeof(*seg);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ size_t len = wr->sg_list[i].length;
+ void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
+
+ inl += len;
+
+ if (unlikely(inl > qp->max_inline_data))
+ return -ENOMEM;
+
+ while (likely(len)) {
+ size_t leftlen;
+ size_t copysz;
+
+ handle_post_send_edge(&qp->sq, wqe,
+ *wqe_sz + (offset >> 4),
+ cur_edge);
+
+ leftlen = *cur_edge - *wqe;
+ copysz = min_t(size_t, leftlen, len);
+
+ memcpy(*wqe, addr, copysz);
+ len -= copysz;
+ addr += copysz;
+ *wqe += copysz;
+ offset += copysz;
+ }
+ }
+
+ seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+
+ *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
+
+ return 0;
+}
+
+static u16 prot_field_size(enum ib_signature_type type)
+{
+ switch (type) {
+ case IB_SIG_TYPE_T10_DIF:
+ return MLX5_DIF_SIZE;
+ default:
+ return 0;
+ }
+}
+
+static u8 bs_selector(int block_size)
+{
+ switch (block_size) {
+ case 512: return 0x1;
+ case 520: return 0x2;
+ case 4096: return 0x3;
+ case 4160: return 0x4;
+ case 1073741824: return 0x5;
+ default: return 0;
+ }
+}
+
+static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
+ struct mlx5_bsf_inl *inl)
+{
+ /* Valid inline section and allow BSF refresh */
+ inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
+ MLX5_BSF_REFRESH_DIF);
+ inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
+ inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
+ /* repeating block */
+ inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
+ inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
+ MLX5_DIF_CRC : MLX5_DIF_IPCS;
+
+ if (domain->sig.dif.ref_remap)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
+
+ if (domain->sig.dif.app_escape) {
+ if (domain->sig.dif.ref_escape)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
+ else
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
+ }
+
+ inl->dif_app_bitmask_check =
+ cpu_to_be16(domain->sig.dif.apptag_check_mask);
+}
+
+static int mlx5_set_bsf(struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_bsf *bsf, u32 data_size)
+{
+ struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
+ struct mlx5_bsf_basic *basic = &bsf->basic;
+ struct ib_sig_domain *mem = &sig_attrs->mem;
+ struct ib_sig_domain *wire = &sig_attrs->wire;
+
+ memset(bsf, 0, sizeof(*bsf));
+
+ /* Basic + Extended + Inline */
+ basic->bsf_size_sbs = 1 << 7;
+ /* Input domain check byte mask */
+ basic->check_byte_mask = sig_attrs->check_mask;
+ basic->raw_data_size = cpu_to_be32(data_size);
+
+ /* Memory domain */
+ switch (sig_attrs->mem.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
+ basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
+ mlx5_fill_inl_bsf(mem, &bsf->m_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Wire domain */
+ switch (sig_attrs->wire.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
+ mem->sig_type == wire->sig_type) {
+ /* Same block structure */
+ basic->bsf_size_sbs |= 1 << 4;
+ if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
+ basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
+ if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
+ if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
+ } else
+ basic->wire.bs_selector =
+ bs_selector(wire->sig.dif.pi_interval);
+
+ basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
+ mlx5_fill_inl_bsf(wire, &bsf->w_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int set_sig_data_segment(const struct ib_send_wr *send_wr,
+ struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ struct mlx5_bsf *bsf;
+ u32 data_len;
+ u32 data_key;
+ u64 data_va;
+ u32 prot_len = 0;
+ u32 prot_key = 0;
+ u64 prot_va = 0;
+ bool prot = false;
+ int ret;
+ int wqe_size;
+ struct mlx5_ib_mr *mr = to_mmr(sig_mr);
+ struct mlx5_ib_mr *pi_mr = mr->pi_mr;
+
+ data_len = pi_mr->data_length;
+ data_key = pi_mr->ibmr.lkey;
+ data_va = pi_mr->data_iova;
+ if (pi_mr->meta_ndescs) {
+ prot_len = pi_mr->meta_length;
+ prot_key = pi_mr->ibmr.lkey;
+ prot_va = pi_mr->pi_iova;
+ prot = true;
+ }
+
+ if (!prot || (data_key == prot_key && data_va == prot_va &&
+ data_len == prot_len)) {
+ /**
+ * Source domain doesn't contain signature information
+ * or data and protection are interleaved in memory.
+ * So need construct:
+ * ------------------
+ * | data_klm |
+ * ------------------
+ * | BSF |
+ * ------------------
+ **/
+ struct mlx5_klm *data_klm = *seg;
+
+ data_klm->bcount = cpu_to_be32(data_len);
+ data_klm->key = cpu_to_be32(data_key);
+ data_klm->va = cpu_to_be64(data_va);
+ wqe_size = ALIGN(sizeof(*data_klm), 64);
+ } else {
+ /**
+ * Source domain contains signature information
+ * So need construct a strided block format:
+ * ---------------------------
+ * | stride_block_ctrl |
+ * ---------------------------
+ * | data_klm |
+ * ---------------------------
+ * | prot_klm |
+ * ---------------------------
+ * | BSF |
+ * ---------------------------
+ **/
+ struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
+ struct mlx5_stride_block_entry *data_sentry;
+ struct mlx5_stride_block_entry *prot_sentry;
+ u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
+ int prot_size;
+
+ sblock_ctrl = *seg;
+ data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
+ prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
+
+ prot_size = prot_field_size(sig_attrs->mem.sig_type);
+ if (!prot_size) {
+ pr_err("Bad block size given: %u\n", block_size);
+ return -EINVAL;
+ }
+ sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
+ prot_size);
+ sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
+ sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
+ sblock_ctrl->num_entries = cpu_to_be16(2);
+
+ data_sentry->bcount = cpu_to_be16(block_size);
+ data_sentry->key = cpu_to_be32(data_key);
+ data_sentry->va = cpu_to_be64(data_va);
+ data_sentry->stride = cpu_to_be16(block_size);
+
+ prot_sentry->bcount = cpu_to_be16(prot_size);
+ prot_sentry->key = cpu_to_be32(prot_key);
+ prot_sentry->va = cpu_to_be64(prot_va);
+ prot_sentry->stride = cpu_to_be16(prot_size);
+
+ wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
+ sizeof(*prot_sentry), 64);
+ }
+
+ *seg += wqe_size;
+ *size += wqe_size / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ bsf = *seg;
+ ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
+ if (ret)
+ return -EINVAL;
+
+ *seg += sizeof(*bsf);
+ *size += sizeof(*bsf) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ return 0;
+}
+
+static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
+ struct ib_mr *sig_mr, int access_flags,
+ u32 size, u32 length, u32 pdn)
+{
+ u32 sig_key = sig_mr->rkey;
+ u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
+ seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
+ MLX5_MKEY_BSF_EN | pdn);
+ seg->len = cpu_to_be64(length);
+ seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
+ seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
+}
+
+static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
+ u32 size)
+{
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
+ umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
+ umr->mkey_mask = sig_mkey_mask();
+}
+
+static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ const struct ib_reg_wr *wr = reg_wr(send_wr);
+ struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
+ struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
+ struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
+ u32 pdn = to_mpd(qp->ibqp.pd)->pdn;
+ u32 xlt_size;
+ int region_len, ret;
+
+ if (unlikely(send_wr->num_sge != 0) ||
+ unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
+ unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
+ unlikely(!sig_mr->sig->sig_status_checked))
+ return -EINVAL;
+
+ /* length of the protected region, data + protection */
+ region_len = pi_mr->ibmr.length;
+
+ /**
+ * KLM octoword size - if protection was provided
+ * then we use strided block format (3 octowords),
+ * else we use single KLM (1 octoword)
+ **/
+ if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
+ xlt_size = 0x30;
+ else
+ xlt_size = sizeof(struct mlx5_klm);
+
+ set_sig_umr_segment(*seg, xlt_size);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
+ pdn);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
+ cur_edge);
+ if (ret)
+ return ret;
+
+ sig_mr->sig->sig_status_checked = false;
+ return 0;
+}
+
+static int set_psv_wr(struct ib_sig_domain *domain,
+ u32 psv_idx, void **seg, int *size)
+{
+ struct mlx5_seg_set_psv *psv_seg = *seg;
+
+ memset(psv_seg, 0, sizeof(*psv_seg));
+ psv_seg->psv_num = cpu_to_be32(psv_idx);
+ switch (domain->sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
+ domain->sig.dif.app_tag);
+ psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
+ break;
+ default:
+ pr_err("Bad signature type (%d) is given.\n",
+ domain->sig_type);
+ return -EINVAL;
+ }
+
+ *seg += sizeof(*psv_seg);
+ *size += sizeof(*psv_seg) / 16;
+
+ return 0;
+}
+
+static int set_reg_wr(struct mlx5_ib_qp *qp,
+ const struct ib_reg_wr *wr,
+ void **seg, int *size, void **cur_edge,
+ bool check_not_free)
+{
+ struct mlx5_ib_mr *mr = to_mmr(wr->mr);
+ struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
+ int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+ bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
+ bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
+ u8 flags = 0;
+
+ if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Fast update of %s for MR is disabled\n",
+ (MLX5_CAP_GEN(dev->mdev,
+ umr_modify_entity_size_disabled)) ?
+ "entity size" :
+ "atomic access");
+ return -EINVAL;
+ }
+
+ if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Invalid IB_SEND_INLINE send flag\n");
+ return -EINVAL;
+ }
+
+ if (check_not_free)
+ flags |= MLX5_UMR_CHECK_NOT_FREE;
+ if (umr_inline)
+ flags |= MLX5_UMR_INLINE;
+
+ set_reg_umr_seg(*seg, mr, flags, atomic);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ if (umr_inline) {
+ memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
+ mr_list_size);
+ *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
+ } else {
+ set_reg_data_seg(*seg, mr, pd);
+ *seg += sizeof(struct mlx5_wqe_data_seg);
+ *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
+ }
+ return 0;
+}
+
+static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ set_linv_umr_seg(*seg);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ set_linv_mkey_seg(*seg);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
+{
+ __be32 *p = NULL;
+ int i, j;
+
+ pr_debug("dump WQE index %u:\n", idx);
+ for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
+ if ((i & 0xf) == 0) {
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+ pr_debug("WQBB at %p:\n", (void *)p);
+ j = 0;
+ idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
+ }
+ pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
+ be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
+ be32_to_cpu(p[j + 3]));
+ }
+}
+
+static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned int *idx,
+ int *size, void **cur_edge, int nreq,
+ bool send_signaled, bool solicited)
+{
+ if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+ return -ENOMEM;
+
+ *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
+ *ctrl = *seg;
+ *(uint32_t *)(*seg + 8) = 0;
+ (*ctrl)->imm = send_ieth(wr);
+ (*ctrl)->fm_ce_se = qp->sq_signal_bits |
+ (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
+
+ *seg += sizeof(**ctrl);
+ *size = sizeof(**ctrl) / 16;
+ *cur_edge = qp->sq.cur_edge;
+
+ return 0;
+}
+
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned int *idx, int *size,
+ void **cur_edge, int nreq)
+{
+ return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
+ wr->send_flags & IB_SEND_SIGNALED,
+ wr->send_flags & IB_SEND_SOLICITED);
+}
+
+static void finish_wqe(struct mlx5_ib_qp *qp,
+ struct mlx5_wqe_ctrl_seg *ctrl,
+ void *seg, u8 size, void *cur_edge,
+ unsigned int idx, u64 wr_id, int nreq, u8 fence,
+ u32 mlx5_opcode)
+{
+ u8 opmod = 0;
+
+ ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
+ mlx5_opcode | ((u32)opmod << 24));
+ ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
+ ctrl->fm_ce_se |= fence;
+ if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE))
+ ctrl->signature = wq_sig(ctrl);
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = mlx5_opcode;
+ qp->sq.wqe_head[idx] = qp->sq.head + nreq;
+ qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+
+ /* We save the edge which was possibly updated during the WQE
+ * construction, into SQ's cache.
+ */
+ seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
+ qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
+ get_sq_edge(&qp->sq, qp->sq.cur_post &
+ (qp->sq.wqe_cnt - 1)) :
+ cur_edge;
+}
+
+static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey);
+ *seg += sizeof(struct mlx5_wqe_raddr_seg);
+ *size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
+}
+
+static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
+ (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey);
+ set_linv_wr(qp, seg, size, cur_edge);
+}
+
+static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_REG_MR;
+ (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key);
+ return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true);
+}
+
+static int handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq,
+ struct ib_sig_domain *domain, u32 psv_index,
+ u8 next_fence)
+{
+ int err;
+
+ /*
+ * SET_PSV WQEs are not signaled and solicited on error.
+ */
+ err = __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
+ false, true);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ err = set_psv_wr(domain, psv_index, seg, size);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
+ next_fence, MLX5_OPCODE_SET_PSV);
+
+out:
+ return err;
+}
+
+static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge,
+ unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence)
+{
+ struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr pa_pi_mr;
+ struct ib_sig_attrs *sig_attrs;
+ struct ib_reg_wr reg_pi_wr;
+ int err;
+
+ qp->sq.wr_data[*idx] = IB_WR_REG_MR_INTEGRITY;
+
+ mr = to_mmr(reg_wr(wr)->mr);
+ pi_mr = mr->pi_mr;
+
+ if (pi_mr) {
+ memset(&reg_pi_wr, 0,
+ sizeof(struct ib_reg_wr));
+
+ reg_pi_wr.mr = &pi_mr->ibmr;
+ reg_pi_wr.access = reg_wr(wr)->access;
+ reg_pi_wr.key = pi_mr->ibmr.rkey;
+
+ (*ctrl)->imm = cpu_to_be32(reg_pi_wr.key);
+ /* UMR for data + prot registration */
+ err = set_reg_wr(qp, &reg_pi_wr, seg, size, cur_edge, false);
+ if (unlikely(err))
+ goto out;
+
+ finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
+ nreq, fence, MLX5_OPCODE_UMR);
+
+ err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ } else {
+ memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
+ /* No UMR, use local_dma_lkey */
+ pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
+ pa_pi_mr.ndescs = mr->ndescs;
+ pa_pi_mr.data_length = mr->data_length;
+ pa_pi_mr.data_iova = mr->data_iova;
+ if (mr->meta_ndescs) {
+ pa_pi_mr.meta_ndescs = mr->meta_ndescs;
+ pa_pi_mr.meta_length = mr->meta_length;
+ pa_pi_mr.pi_iova = mr->pi_iova;
+ }
+
+ pa_pi_mr.ibmr.length = mr->ibmr.length;
+ mr->pi_mr = &pa_pi_mr;
+ }
+ (*ctrl)->imm = cpu_to_be32(mr->ibmr.rkey);
+ /* UMR for sig MR */
+ err = set_pi_umr_wr(wr, qp, seg, size, cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
+ fence, MLX5_OPCODE_UMR);
+
+ sig_attrs = mr->ibmr.sig_attrs;
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->mem, mr->sig->psv_memory.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->wire, mr->sig->psv_wire.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+
+out:
+ return err;
+}
+
+static int handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence, int *num_sge)
+{
+ int err = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+ err = -EOPNOTSUPP;
+ goto out;
+
+ case IB_WR_LOCAL_INV:
+ handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR:
+ err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR_INTEGRITY:
+ err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size,
+ cur_edge, idx, nreq, fence,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ return err;
+}
+
+static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+ default:
+ break;
+ }
+}
+
+static void handle_qpt_hw_gsi(struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr, void **seg,
+ int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **seg, int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ /* handle qp that supports ud offload */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ struct mlx5_wqe_eth_pad *pad;
+
+ pad = *seg;
+ memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
+ *seg += sizeof(struct mlx5_wqe_eth_pad);
+ *size += sizeof(struct mlx5_wqe_eth_pad) / 16;
+ set_eth_seg(wr, qp, seg, size, cur_edge);
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ }
+}
+
+static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge, unsigned int idx)
+{
+ int err = 0;
+
+ if (unlikely(wr->opcode != MLX5_IB_WR_UMR)) {
+ err = -EINVAL;
+ mlx5_ib_warn(dev, "bad opcode %d\n", wr->opcode);
+ goto out;
+ }
+
+ qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
+ (*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey);
+ err = set_reg_umr_segment(dev, *seg, wr,
+ !!(MLX5_CAP_GEN(dev->mdev, atomic)));
+ if (unlikely(err))
+ goto out;
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ set_reg_mkey_segment(*seg, wr);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+out:
+ return err;
+}
+
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp *qp;
+ struct mlx5_wqe_xrc_seg *xrc;
+ struct mlx5_bf *bf;
+ void *cur_edge;
+ int uninitialized_var(size);
+ unsigned long flags;
+ unsigned int idx;
+ int err = 0;
+ int num_sge;
+ void *seg;
+ int nreq;
+ int i;
+ u8 next_fence = 0;
+ u8 fence;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
+
+ qp = to_mqp(ibqp);
+ bf = &qp->bf;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ num_sge = wr->num_sge;
+ if (unlikely(num_sge > qp->sq.max_gs)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
+ nreq);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (wr->opcode == IB_WR_REG_MR ||
+ wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ fence = dev->umr_fence;
+ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ } else {
+ if (wr->send_flags & IB_SEND_FENCE) {
+ if (qp->next_fence)
+ fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+ else
+ fence = MLX5_FENCE_MODE_FENCE;
+ } else {
+ fence = qp->next_fence;
+ }
+ }
+
+ switch (ibqp->qp_type) {
+ case IB_QPT_XRC_INI:
+ xrc = seg;
+ seg += sizeof(*xrc);
+ size += sizeof(*xrc) / 16;
+ fallthrough;
+ case IB_QPT_RC:
+ err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size,
+ &cur_edge, &idx, nreq, fence,
+ next_fence, &num_sge);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ goto out;
+ } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ goto skip_psv;
+ }
+ break;
+
+ case IB_QPT_UC:
+ handle_qpt_uc(wr, &seg, &size);
+ break;
+ case IB_QPT_SMI:
+ if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
+ mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
+ err = -EPERM;
+ *bad_wr = wr;
+ goto out;
+ }
+ fallthrough;
+ case MLX5_IB_QPT_HW_GSI:
+ handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge);
+ break;
+ case IB_QPT_UD:
+ handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
+ break;
+ case MLX5_IB_QPT_REG_UMR:
+ err = handle_qpt_reg_umr(dev, qp, wr, &ctrl, &seg,
+ &size, &cur_edge, idx);
+ if (unlikely(err))
+ goto out;
+ break;
+
+ default:
+ break;
+ }
+
+ if (wr->send_flags & IB_SEND_INLINE && num_sge) {
+ err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ } else {
+ for (i = 0; i < num_sge; i++) {
+ handle_post_send_edge(&qp->sq, &seg, size,
+ &cur_edge);
+ if (unlikely(!wr->sg_list[i].length))
+ continue;
+
+ set_data_ptr_seg(
+ (struct mlx5_wqe_data_seg *)seg,
+ wr->sg_list + i);
+ size += sizeof(struct mlx5_wqe_data_seg) / 16;
+ seg += sizeof(struct mlx5_wqe_data_seg);
+ }
+ }
+
+ qp->next_fence = next_fence;
+ finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
+ fence, mlx5_ib_opcode[wr->opcode]);
+skip_psv:
+ if (0)
+ dump_wqe(qp, idx, size);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell.
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
+ /* Make sure doorbells don't leak out of SQ spinlock
+ * and reach the HCA out of order.
+ */
+ bf->offset ^= bf->buf_size;
+ }
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return err;
+}
+
+static void set_sig_seg(struct mlx5_rwqe_sig *sig, int max_gs)
+{
+ sig->signature = calc_sig(sig, (max_gs + 1) << 2);
+}
+
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_data_seg *scat;
+ struct mlx5_rwqe_sig *sig;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int ind;
+ int i;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->rq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ scat++;
+
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_ptr_seg(scat + i, wr->sg_list + i);
+
+ if (i < qp->rq.max_gs) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].addr = 0;
+ }
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
+ sig = (struct mlx5_rwqe_sig *)scat;
+ set_sig_seg(sig, qp->rq.max_gs);
+ }
+
+ qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->rq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/wr.h b/drivers/infiniband/hw/mlx5/wr.h
new file mode 100644
index 000000000000..4f0057516402
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_WR_H
+#define _MLX5_IB_WR_H
+
+#include "mlx5_ib.h"
+
+enum {
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
+};
+
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
+
+
+/* get_sq_edge - Get the next nearby edge.
+ *
+ * An 'edge' is defined as the first following address after the end
+ * of the fragment or the SQ. Accordingly, during the WQE construction
+ * which repetitively increases the pointer to write the next data, it
+ * simply should check if it gets to an edge.
+ *
+ * @sq - SQ buffer.
+ * @idx - Stride index in the SQ buffer.
+ *
+ * Return:
+ * The new edge.
+ */
+static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
+{
+ void *fragment_end;
+
+ fragment_end = mlx5_frag_buf_get_wqe
+ (&sq->fbc,
+ mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
+
+ return fragment_end + MLX5_SEND_WQE_BB;
+}
+
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain);
+
+static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
+}
+
+static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
+}
+#endif /* _MLX5_IB_WR_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 599794c5a78f..7550e9d03dec 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -478,16 +478,6 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr);
void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr);
-int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
- u32 access, struct mthca_fmr *fmr);
-int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
-int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
-int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr);
-
int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt);
void mthca_unmap_eq_icm(struct mthca_dev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 4250b2c18c64..ce0e0867e488 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -541,7 +541,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
return err;
}
-/* Free mr or fmr */
+/* Free mr */
static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
{
mthca_table_put(dev, dev->mr_table.mpt_table,
@@ -564,266 +564,6 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
mthca_free_mtt(dev, mr->mtt);
}
-int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
- u32 access, struct mthca_fmr *mr)
-{
- struct mthca_mpt_entry *mpt_entry;
- struct mthca_mailbox *mailbox;
- u64 mtt_seg;
- u32 key, idx;
- int list_len = mr->attr.max_pages;
- int err = -ENOMEM;
- int i;
-
- if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
- return -EINVAL;
-
- /* For Arbel, all MTTs must fit in the same page. */
- if (mthca_is_memfree(dev) &&
- mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
- return -EINVAL;
-
- mr->maps = 0;
-
- key = mthca_alloc(&dev->mr_table.mpt_alloc);
- if (key == -1)
- return -ENOMEM;
- key = adjust_key(dev, key);
-
- idx = key & (dev->limits.num_mpts - 1);
- mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
-
- if (mthca_is_memfree(dev)) {
- err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
- if (err)
- goto err_out_mpt_free;
-
- mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL);
- BUG_ON(!mr->mem.arbel.mpt);
- } else
- mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
- sizeof *(mr->mem.tavor.mpt) * idx;
-
- mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
- if (IS_ERR(mr->mtt)) {
- err = PTR_ERR(mr->mtt);
- goto err_out_table;
- }
-
- mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
-
- if (mthca_is_memfree(dev)) {
- mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
- mr->mtt->first_seg,
- &mr->mem.arbel.dma_handle);
- BUG_ON(!mr->mem.arbel.mtts);
- } else
- mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
-
- mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox)) {
- err = PTR_ERR(mailbox);
- goto err_out_free_mtt;
- }
-
- mpt_entry = mailbox->buf;
-
- mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
- MTHCA_MPT_FLAG_MIO |
- MTHCA_MPT_FLAG_REGION |
- access);
-
- mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
- mpt_entry->key = cpu_to_be32(key);
- mpt_entry->pd = cpu_to_be32(pd);
- memset(&mpt_entry->start, 0,
- sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
- mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
-
- if (0) {
- mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
- for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
- if (i % 4 == 0)
- printk("[%02x] ", i * 4);
- printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
- if ((i + 1) % 4 == 0)
- printk("\n");
- }
- }
-
- err = mthca_SW2HW_MPT(dev, mailbox,
- key & (dev->limits.num_mpts - 1));
- if (err) {
- mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
- goto err_out_mailbox_free;
- }
-
- mthca_free_mailbox(dev, mailbox);
- return 0;
-
-err_out_mailbox_free:
- mthca_free_mailbox(dev, mailbox);
-
-err_out_free_mtt:
- mthca_free_mtt(dev, mr->mtt);
-
-err_out_table:
- mthca_table_put(dev, dev->mr_table.mpt_table, key);
-
-err_out_mpt_free:
- mthca_free(&dev->mr_table.mpt_alloc, key);
- return err;
-}
-
-int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (fmr->maps)
- return -EBUSY;
-
- mthca_free_region(dev, fmr->ibmr.lkey);
- mthca_free_mtt(dev, fmr->mtt);
-
- return 0;
-}
-
-static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
- int list_len, u64 iova)
-{
- int i, page_mask;
-
- if (list_len > fmr->attr.max_pages)
- return -EINVAL;
-
- page_mask = (1 << fmr->attr.page_shift) - 1;
-
- /* We are getting page lists, so va must be page aligned. */
- if (iova & page_mask)
- return -EINVAL;
-
- /* Trust the user not to pass misaligned data in page_list */
- if (0)
- for (i = 0; i < list_len; ++i) {
- if (page_list[i] & ~page_mask)
- return -EINVAL;
- }
-
- if (fmr->maps >= fmr->attr.max_maps)
- return -EINVAL;
-
- return 0;
-}
-
-
-int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct mthca_fmr *fmr = to_mfmr(ibfmr);
- struct mthca_dev *dev = to_mdev(ibfmr->device);
- struct mthca_mpt_entry mpt_entry;
- u32 key;
- int i, err;
-
- err = mthca_check_fmr(fmr, page_list, list_len, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = tavor_key_to_hw_index(fmr->ibmr.lkey);
- key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
-
- writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
-
- for (i = 0; i < list_len; ++i) {
- __be64 mtt_entry = cpu_to_be64(page_list[i] |
- MTHCA_MTT_FLAG_PRESENT);
- mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
- }
-
- mpt_entry.lkey = cpu_to_be32(key);
- mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
- mpt_entry.start = cpu_to_be64(iova);
-
- __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
- memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
- offsetof(struct mthca_mpt_entry, window_count) -
- offsetof(struct mthca_mpt_entry, start));
-
- writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
-
- return 0;
-}
-
-int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct mthca_fmr *fmr = to_mfmr(ibfmr);
- struct mthca_dev *dev = to_mdev(ibfmr->device);
- u32 key;
- int i, err;
-
- err = mthca_check_fmr(fmr, page_list, list_len, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = arbel_key_to_hw_index(fmr->ibmr.lkey);
- if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
- key += SINAI_FMR_KEY_INC;
- else
- key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
-
- wmb();
-
- dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
-
- for (i = 0; i < list_len; ++i)
- fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
- MTHCA_MTT_FLAG_PRESENT);
-
- dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
-
- fmr->mem.arbel.mpt->key = cpu_to_be32(key);
- fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
- fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
- fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
-
- wmb();
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
-
- wmb();
-
- return 0;
-}
-
-void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (!fmr->maps)
- return;
-
- fmr->maps = 0;
-
- writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
-}
-
-void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (!fmr->maps)
- return;
-
- fmr->maps = 0;
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
-}
-
int mthca_init_mr_table(struct mthca_dev *dev)
{
phys_addr_t addr;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 69a3e4f62fb1..9fa2f9164a47 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -118,16 +118,6 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- /*
- * If Sinai memory key optimization is being used, then only
- * the 8-bit key portion will change. For other HCAs, the
- * unused index bits will also be used for FMR remapping.
- */
- if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
- props->max_map_per_fmr = 255;
- else
- props->max_map_per_fmr =
- (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
err = 0;
out:
@@ -388,14 +378,15 @@ static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
}
-static int mthca_ah_create(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+static int mthca_ah_create(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct mthca_ah *ah = to_mah(ibah);
- return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd), ah_attr,
- ah);
+ return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd),
+ init_attr->ah_attr, ah);
}
static void mthca_ah_destroy(struct ib_ah *ah, u32 flags)
@@ -957,69 +948,6 @@ static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
return 0;
}
-static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct mthca_fmr *fmr;
- int err;
-
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
- err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
- convert_access(mr_access_flags), fmr);
-
- if (err) {
- kfree(fmr);
- return ERR_PTR(err);
- }
-
- return &fmr->ibmr;
-}
-
-static int mthca_dealloc_fmr(struct ib_fmr *fmr)
-{
- struct mthca_fmr *mfmr = to_mfmr(fmr);
- int err;
-
- err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
- if (err)
- return err;
-
- kfree(mfmr);
- return 0;
-}
-
-static int mthca_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *fmr;
- int err;
- struct mthca_dev *mdev = NULL;
-
- list_for_each_entry(fmr, fmr_list, list) {
- if (mdev && to_mdev(fmr->device) != mdev)
- return -EINVAL;
- mdev = to_mdev(fmr->device);
- }
-
- if (!mdev)
- return 0;
-
- if (mthca_is_memfree(mdev)) {
- list_for_each_entry(fmr, fmr_list, list)
- mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
-
- wmb();
- } else
- list_for_each_entry(fmr, fmr_list, list)
- mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
-
- err = mthca_SYNC_TPT(mdev);
- return err;
-}
-
static ssize_t hw_rev_show(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -1203,20 +1131,6 @@ static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
};
-static const struct ib_device_ops mthca_dev_arbel_fmr_ops = {
- .alloc_fmr = mthca_alloc_fmr,
- .dealloc_fmr = mthca_dealloc_fmr,
- .map_phys_fmr = mthca_arbel_map_phys_fmr,
- .unmap_fmr = mthca_unmap_fmr,
-};
-
-static const struct ib_device_ops mthca_dev_tavor_fmr_ops = {
- .alloc_fmr = mthca_alloc_fmr,
- .dealloc_fmr = mthca_dealloc_fmr,
- .map_phys_fmr = mthca_tavor_map_phys_fmr,
- .unmap_fmr = mthca_unmap_fmr,
-};
-
static const struct ib_device_ops mthca_dev_arbel_ops = {
.post_recv = mthca_arbel_post_receive,
.post_send = mthca_arbel_post_send,
@@ -1275,15 +1189,6 @@ int mthca_register_device(struct mthca_dev *dev)
&mthca_dev_tavor_srq_ops);
}
- if (dev->mthca_flags & MTHCA_FLAG_FMR) {
- if (mthca_is_memfree(dev))
- ib_set_device_ops(&dev->ib_dev,
- &mthca_dev_arbel_fmr_ops);
- else
- ib_set_device_ops(&dev->ib_dev,
- &mthca_dev_tavor_fmr_ops);
- }
-
ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
if (mthca_is_memfree(dev))
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 596acc45569b..84c64bff0d92 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -76,24 +76,6 @@ struct mthca_mr {
struct mthca_mtt *mtt;
};
-struct mthca_fmr {
- struct ib_fmr ibmr;
- struct ib_fmr_attr attr;
- struct mthca_mtt *mtt;
- int maps;
- union {
- struct {
- struct mthca_mpt_entry __iomem *mpt;
- u64 __iomem *mtts;
- } tavor;
- struct {
- struct mthca_mpt_entry *mpt;
- __be64 *mtts;
- dma_addr_t dma_handle;
- } arbel;
- } mem;
-};
-
struct mthca_pd {
struct ib_pd ibpd;
u32 pd_num;
@@ -301,11 +283,6 @@ static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext
return container_of(ibucontext, struct mthca_ucontext, ibucontext);
}
-static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
-{
- return container_of(ibmr, struct mthca_fmr, ibmr);
-}
-
static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct mthca_mr, ibmr);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 7baedc74e39d..fcfe0e82197a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -98,7 +98,6 @@ struct ocrdma_dev_attr {
u64 max_mr_size;
u32 max_num_mr_pbl;
int max_mw;
- int max_fmr;
int max_map_per_fmr;
int max_pages_per_frmr;
u16 max_ord_per_qp;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 2b7f00ac41b0..6eea02b18968 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -155,7 +155,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
return status;
}
-int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
u32 *ahid_addr;
@@ -165,6 +165,7 @@ int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
u16 vlan_tag = 0xffff;
const struct ib_gid_attr *sgid_attr;
struct ocrdma_pd *pd = get_ocrdma_pd(ibah->pd);
+ struct rdma_ah_attr *attr = init_attr->ah_attr;
struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 9780afcde780..8b73b3489f3a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -51,7 +51,7 @@ enum {
OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
};
-int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index d82d3ec3649e..e07bf0b2209a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1190,7 +1190,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_mr = rsp->max_mr;
attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
rsp->max_mr_size_lo;
- attr->max_fmr = 0;
attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
attr->max_cqe = rsp->max_cq_cqes_per_cq &
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 10e343894595..d11c74390a12 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -99,8 +99,6 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_mw = dev->attr.max_mw;
attr->max_pd = dev->attr.max_pd;
attr->atomic_cap = 0;
- attr->max_fmr = 0;
- attr->max_map_per_fmr = 0;
attr->max_qp_rd_atom =
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index dcdc85a1ab25..ccaedfd53e49 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -632,7 +632,6 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
attr->max_mr_size = qed_attr->max_mr_size;
attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
attr->max_mw = qed_attr->max_mw;
- attr->max_fmr = qed_attr->max_fmr;
attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
attr->max_pd = qed_attr->max_pd;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 5488dbd59d3c..fdf90ecb2699 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -103,7 +103,6 @@ struct qedr_device_attr {
u64 max_mr_size;
u32 max_cqe;
u32 max_mw;
- u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a5bd3adaf90a..9b9e80266367 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -145,8 +145,6 @@ int qedr_query_device(struct ib_device *ibdev,
attr->max_mw = qattr->max_mw;
attr->max_pd = qattr->max_pd;
attr->atomic_cap = dev->atomic_cap;
- attr->max_fmr = qattr->max_fmr;
- attr->max_map_per_fmr = 16;
attr->max_qp_init_rd_atom =
1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
attr->max_qp_rd_atom =
@@ -2750,12 +2748,12 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
return 0;
}
-int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
struct qedr_ah *ah = get_qedr_ah(ibah);
- rdma_copy_ah_attr(&ah->attr, attr);
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 18027844eb87..5e02387e068d 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -70,7 +70,7 @@ int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_recv_wr);
-int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 91d64dd71a8a..8bcbc884e5b6 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2375,7 +2375,6 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
struct qib_devdata *dd = ppd->dd;
u64 val, guid, ibc;
unsigned long flags;
- int ret = 0;
/*
* SerDes model not in Pd, but still need to
@@ -2510,7 +2509,7 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
val | ERR_MASK_N(IBStatusChanged));
/* Always zero until we start messing with SerDes for real */
- return ret;
+ return 0;
}
/**
@@ -6875,7 +6874,7 @@ static int init_sdma_7322_regs(struct qib_pportdata *ppd)
struct qib_devdata *dd = ppd->dd;
unsigned lastbuf, erstbuf;
u64 senddmabufmask[3] = { 0 };
- int n, ret = 0;
+ int n;
qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
qib_sdma_7322_setlengen(ppd);
@@ -6904,7 +6903,7 @@ static int init_sdma_7322_regs(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
- return ret;
+ return 0;
}
/* sdma_lock must be held */
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 568b21eb6ea1..021df0654ba7 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -760,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping linkcontrol sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail;
+ goto bail_link;
}
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
@@ -770,7 +770,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping sl2vl sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_link;
+ goto bail_sl;
}
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
@@ -780,7 +780,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping diag_counters sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sl;
+ goto bail_diagc;
}
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
@@ -793,7 +793,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_diagc;
+ goto bail_cc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
@@ -854,6 +854,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
&cc_table_bin_attr);
kobject_put(&ppd->pport_cc_kobj);
}
+ kobject_put(&ppd->diagc_kobj);
kobject_put(&ppd->sl2vl_kobj);
kobject_put(&ppd->pport_kobj);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 7508abb6a0fa..7acf9ba5358a 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1460,7 +1460,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_cq = ib_qib_max_cqs;
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
rdi->dparms.props.max_qp_init_rd_atom = 255;
rdi->dparms.props.max_srq = ib_qib_max_srqs;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 71f82339446c..b8a77ce11590 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -322,7 +322,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
props->max_mcast_grp = 0;
props->max_mcast_qp_attach = 0;
props->max_total_mcast_qp_attach = 0;
- props->max_map_per_fmr = 0;
/* Owned by Userspace
* max_qp_wr, max_sge, max_sge_rd, max_cqe */
mutex_unlock(&us_ibdev->usdev_lock);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index e580ae9cc55a..780fd2dfc07e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -829,7 +829,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
ret = -ENOMEM;
- goto err_free_device;
+ goto err_disable_pdev;
}
ret = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index faf7ecd7b3fa..ccbded2d26ce 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -509,9 +509,10 @@ void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
*
* @return: 0 on success, otherwise errno.
*/
-int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
struct pvrdma_dev *dev = to_vdev(ibah->device);
struct pvrdma_ah *ah = to_vah(ibah);
const struct ib_global_route *grh;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index e4a48f5c0c85..267702226f10 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -414,7 +414,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
-int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index ee02c6176007..40480add7dd3 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -98,14 +98,14 @@ EXPORT_SYMBOL(rvt_check_ah);
*
* Return: 0 on success
*/
-int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 create_flags, struct ib_udata *udata)
+int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct rvt_ah *ah = ibah_to_rvtah(ibah);
struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
unsigned long flags;
- if (rvt_check_ah(ibah->device, ah_attr))
+ if (rvt_check_ah(ibah->device, init_attr->ah_attr))
return -EINVAL;
spin_lock_irqsave(&dev->n_ahs_lock, flags);
@@ -117,10 +117,11 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
dev->n_ahs_allocated++;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- rdma_copy_ah_attr(&ah->attr, ah_attr);
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
if (dev->driver_f.notify_new_ah)
- dev->driver_f.notify_new_ah(ibah->device, ah_attr, ah);
+ dev->driver_f.notify_new_ah(ibah->device,
+ init_attr->ah_attr, ah);
return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index bbb4d3bdec4e..40b7123fec76 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -50,8 +50,8 @@
#include <rdma/rdma_vt.h>
-int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 create_flags, struct ib_udata *udata);
+int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 72f6534fbb52..60864e5ca7cb 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -97,7 +97,6 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
return 0;
}
@@ -714,160 +713,6 @@ bail:
EXPORT_SYMBOL(rvt_invalidate_rkey);
/**
- * rvt_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Return: the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct rvt_fmr *fmr;
- int m;
- struct ib_fmr *ret;
- int rval = -ENOMEM;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
- fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL);
- if (!fmr)
- goto bail;
-
- rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages,
- PERCPU_REF_INIT_ATOMIC);
- if (rval)
- goto bail;
-
- /*
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
- * rkey.
- */
- rval = rvt_alloc_lkey(&fmr->mr, 0);
- if (rval)
- goto bail_mregion;
- fmr->ibfmr.rkey = fmr->mr.lkey;
- fmr->ibfmr.lkey = fmr->mr.lkey;
- /*
- * Resources are allocated but no valid mapping (RKEY can't be
- * used).
- */
- fmr->mr.access_flags = mr_access_flags;
- fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->mr.page_shift = fmr_attr->page_shift;
-
- ret = &fmr->ibfmr;
-done:
- return ret;
-
-bail_mregion:
- rvt_deinit_mregion(&fmr->mr);
-bail:
- kfree(fmr);
- ret = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * rvt_map_phys_fmr - set up a fast memory region
- * @ibfmr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- *
- * Return: 0 on success
- */
-
-int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
- struct rvt_lkey_table *rkt;
- unsigned long flags;
- int m, n;
- unsigned long i;
- u32 ps;
- struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
-
- i = atomic_long_read(&fmr->mr.refcount.count);
- if (i > 2)
- return -EBUSY;
-
- if (list_len > fmr->mr.max_segs)
- return -EINVAL;
-
- rkt = &rdi->lkey_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = iova;
- fmr->mr.iova = iova;
- ps = 1 << fmr->mr.page_shift;
- fmr->mr.length = list_len * ps;
- m = 0;
- n = 0;
- for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
- fmr->mr.map[m]->segs[n].length = ps;
- trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
- if (++n == RVT_SEGSZ) {
- m++;
- n = 0;
- }
- }
- spin_unlock_irqrestore(&rkt->lock, flags);
- return 0;
-}
-
-/**
- * rvt_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Return: 0 on success.
- */
-int rvt_unmap_fmr(struct list_head *fmr_list)
-{
- struct rvt_fmr *fmr;
- struct rvt_lkey_table *rkt;
- unsigned long flags;
- struct rvt_dev_info *rdi;
-
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
- rdi = ib_to_rvt(fmr->ibfmr.device);
- rkt = &rdi->lkey_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- spin_unlock_irqrestore(&rkt->lock, flags);
- }
- return 0;
-}
-
-/**
- * rvt_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Return: 0 on success.
- */
-int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
-{
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
- int ret = 0;
-
- rvt_free_lkey(&fmr->mr);
- rvt_put_mr(&fmr->mr); /* will set completion if last */
- ret = rvt_check_refs(&fmr->mr, __func__);
- if (ret)
- goto out;
- rvt_deinit_mregion(&fmr->mr);
- kfree(fmr);
-out:
- return ret;
-}
-
-/**
* rvt_sge_adjacent - is isge compressible
* @last_sge: last outgoing SGE written
* @sge: SGE to check
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
index 2c8d0752e8e3..780fc63af98b 100644
--- a/drivers/infiniband/sw/rdmavt/mr.h
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -49,10 +49,6 @@
*/
#include <rdma/rdma_vt.h>
-struct rvt_fmr {
- struct ib_fmr ibfmr;
- struct rvt_mregion mr; /* must be last */
-};
struct rvt_mr {
struct ib_mr ibmr;
@@ -60,11 +56,6 @@ struct rvt_mr {
struct rvt_mregion mr; /* must be last */
};
-static inline struct rvt_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct rvt_fmr, ibfmr);
-}
-
static inline struct rvt_mr *to_imr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct rvt_mr, ibmr);
@@ -83,11 +74,5 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-int rvt_unmap_fmr(struct list_head *fmr_list);
-int rvt_dealloc_fmr(struct ib_fmr *ibfmr);
#endif /* DEF_RVTMR_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 500a7ee04c44..511b72809e14 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 - 2019 Intel Corporation.
+ * Copyright(c) 2016 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -525,15 +525,18 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* @rdi: rvt device info structure
* @qpt: queue pair number table pointer
* @port_num: IB port number, 1 based, comes from core
+ * @exclude_prefix: prefix of special queue pair number being allocated
*
* Return: The queue pair number
*/
static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num)
+ enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
u32 ret;
+ u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
+ RVT_AIP_QPN_MAX : RVT_QPN_MAX;
if (rdi->driver_f.alloc_qpn)
return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
@@ -553,7 +556,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
}
qpn = qpt->last + qpt->incr;
- if (qpn >= RVT_QPN_MAX)
+ if (qpn >= max_qpn)
qpn = qpt->incr | ((qpt->last & 1) ^ 1);
/* offset carries bit 0 */
offset = qpn & RVT_BITS_PER_PAGE_MASK;
@@ -987,6 +990,9 @@ static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
{
struct rvt_qpn_map *map;
+ if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
+ qpn &= RVT_AIP_QP_SUFFIX;
+
map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
if (map->page)
clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
@@ -1074,13 +1080,15 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
size_t sqsize;
+ u8 exclude_prefix = 0;
if (!rdi)
return ERR_PTR(-EINVAL);
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
- init_attr->create_flags)
+ (init_attr->create_flags &&
+ init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
return ERR_PTR(-EINVAL);
/* Check receive queue parameters if no SRQ is specified. */
@@ -1199,14 +1207,20 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_driver_priv;
}
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ exclude_prefix = RVT_AIP_QP_PREFIX;
+
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type,
- init_attr->port_num);
+ init_attr->port_num,
+ exclude_prefix);
if (err < 0) {
ret = ERR_PTR(err);
goto bail_rq_wq;
}
qp->ibqp.qp_num = err;
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
qp->port_num = init_attr->port_num;
rvt_init_qp(rdi, qp, init_attr->qp_type);
if (rdi->driver_f.qp_priv_init) {
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 72b031ab7092..f904bb34477a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -378,7 +378,6 @@ enum {
static const struct ib_device_ops rvt_dev_ops = {
.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION,
- .alloc_fmr = rvt_alloc_fmr,
.alloc_mr = rvt_alloc_mr,
.alloc_pd = rvt_alloc_pd,
.alloc_ucontext = rvt_alloc_ucontext,
@@ -387,7 +386,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.create_cq = rvt_create_cq,
.create_qp = rvt_create_qp,
.create_srq = rvt_create_srq,
- .dealloc_fmr = rvt_dealloc_fmr,
.dealloc_pd = rvt_dealloc_pd,
.dealloc_ucontext = rvt_dealloc_ucontext,
.dereg_mr = rvt_dereg_mr,
@@ -399,7 +397,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.get_dma_mr = rvt_get_dma_mr,
.get_port_immutable = rvt_get_port_immutable,
.map_mr_sg = rvt_map_mr_sg,
- .map_phys_fmr = rvt_map_phys_fmr,
.mmap = rvt_mmap,
.modify_ah = rvt_modify_ah,
.modify_device = rvt_modify_device,
@@ -420,7 +417,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.reg_user_mr = rvt_reg_user_mr,
.req_notify_cq = rvt_req_notify_cq,
.resize_cq = rvt_resize_cq,
- .unmap_fmr = rvt_unmap_fmr,
INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq),
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 4afdd2e20883..5642eefb4ba1 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,6 +77,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
{
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
+ rxe->attr.vendor_id = RXE_VENDOR_ID;
rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
rxe->attr.max_qp = RXE_MAX_QP;
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 48f48122ddcb..6a413d73b95d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip)
- return NULL;
+ return ERR_PTR(-ENOMEM);
size = PAGE_ALIGN(size);
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index f59616b02477..99e9d8ba9767 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -127,6 +127,9 @@ enum rxe_device_param {
/* Delay before calling arbiter timer */
RXE_NSEC_ARB_TIMER_DELAY = 200,
+
+ /* IBTA v1.4 A3.3.1 VENDOR INFORMATION section */
+ RXE_VENDOR_ID = 0XFFFFFF,
};
/* default/initial rxe port parameters */
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index ff92704de32f..245040c3a35d 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
if (outbuf) {
ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
- if (!ip)
+ if (IS_ERR(ip)) {
+ err = PTR_ERR(ip);
goto err1;
+ }
- err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
- if (err)
+ if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
+ err = -EFAULT;
goto err2;
+ }
spin_lock_bh(&rxe->pending_lock);
list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
@@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
err2:
kfree(ip);
err1:
- return -EINVAL;
+ return err;
}
inline void rxe_queue_reset(struct rxe_queue *q)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9dd4bd7aea92..b8a22af724e8 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -195,15 +195,16 @@ static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
rxe_drop_ref(pd);
}
-static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
- u32 flags, struct ib_udata *udata)
+static int rxe_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
int err;
struct rxe_dev *rxe = to_rdev(ibah->device);
struct rxe_ah *ah = to_rah(ibah);
- err = rxe_av_chk_attr(rxe, attr);
+ err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
if (err)
return err;
@@ -211,7 +212,7 @@ static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
if (err)
return err;
- rxe_init_av(attr, &ah->av);
+ rxe_init_av(init_attr->ah_attr, &ah->av);
return 0;
}
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index af5e9f8c0fcd..e9753831ac3f 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -30,7 +30,6 @@
#define SIW_MAX_MR (SIW_MAX_QP * 10)
#define SIW_MAX_PD SIW_MAX_QP
#define SIW_MAX_MW 0 /* to be set if MW's are supported */
-#define SIW_MAX_FMR SIW_MAX_MR
#define SIW_MAX_SRQ SIW_MAX_QP
#define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10)
#define SIW_MAX_CONTEXT SIW_MAX_PD
@@ -59,7 +58,6 @@ struct siw_dev_cap {
int max_mr;
int max_pd;
int max_mw;
- int max_fmr;
int max_srq;
int max_srq_wr;
int max_srq_sge;
@@ -139,7 +137,7 @@ struct siw_pble {
struct siw_pbl {
unsigned int num_buf;
unsigned int max_buf;
- struct siw_pble pbe[1];
+ struct siw_pble pbe[];
};
/*
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 559e5fd3bad8..1662216be66d 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -947,16 +947,8 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_get(new_cep);
new_s->sk->sk_user_data = new_cep;
- if (siw_tcp_nagle == false) {
- int val = 1;
-
- rv = kernel_setsockopt(new_s, SOL_TCP, TCP_NODELAY,
- (char *)&val, sizeof(val));
- if (rv) {
- siw_dbg_cep(cep, "setsockopt NODELAY error: %d\n", rv);
- goto error;
- }
- }
+ if (siw_tcp_nagle == false)
+ tcp_sock_set_nodelay(new_s->sk);
new_cep->state = SIW_EPSTATE_AWAIT_MPAREQ;
rv = siw_cm_queue_work(new_cep, SIW_CM_WORK_MPATIMEOUT);
@@ -1312,17 +1304,14 @@ static void siw_cm_llp_state_change(struct sock *sk)
static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
struct sockaddr *raddr)
{
- int rv, flags = 0, s_val = 1;
+ int rv, flags = 0;
size_t size = laddr->sa_family == AF_INET ?
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
/*
* Make address available again asap.
*/
- rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
- sizeof(s_val));
- if (rv < 0)
- return rv;
+ sock_set_reuseaddr(s->sk);
rv = s->ops->bind(s, laddr, size);
if (rv < 0)
@@ -1389,16 +1378,8 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv);
goto error;
}
- if (siw_tcp_nagle == false) {
- int val = 1;
-
- rv = kernel_setsockopt(s, SOL_TCP, TCP_NODELAY, (char *)&val,
- sizeof(val));
- if (rv) {
- siw_dbg_qp(qp, "setsockopt NODELAY error: %d\n", rv);
- goto error;
- }
- }
+ if (siw_tcp_nagle == false)
+ tcp_sock_set_nodelay(s->sk);
cep = siw_cep_alloc(sdev);
if (!cep) {
rv = -ENOMEM;
@@ -1781,7 +1762,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct siw_cep *cep = NULL;
struct siw_device *sdev = to_siw_dev(id->device);
int addr_family = id->local_addr.ss_family;
- int rv = 0, s_val;
+ int rv = 0;
if (addr_family != AF_INET && addr_family != AF_INET6)
return -EAFNOSUPPORT;
@@ -1793,13 +1774,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
/*
* Allow binding local port when still in TIME_WAIT from last close.
*/
- s_val = 1;
- rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
- sizeof(s_val));
- if (rv) {
- siw_dbg(id->device, "setsockopt error: %d\n", rv);
- goto error;
- }
+ sock_set_reuseaddr(s->sk);
+
if (addr_family == AF_INET) {
struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 5cd40fb9e20c..a0b8cc643c5c 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -413,7 +413,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->attrs.max_mr = SIW_MAX_MR;
sdev->attrs.max_pd = SIW_MAX_PD;
sdev->attrs.max_mw = SIW_MAX_MW;
- sdev->attrs.max_fmr = SIW_MAX_FMR;
sdev->attrs.max_srq = SIW_MAX_SRQ;
sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
sdev->attrs.max_srq_sge = SIW_MAX_SGE;
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index e2061dc0b043..87117781d637 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -349,14 +349,11 @@ dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
struct siw_pbl *siw_pbl_alloc(u32 num_buf)
{
struct siw_pbl *pbl;
- int buf_size = sizeof(*pbl);
if (num_buf == 0)
return ERR_PTR(-EINVAL);
- buf_size += ((num_buf - 1) * sizeof(struct siw_pble));
-
- pbl = kzalloc(buf_size, GFP_KERNEL);
+ pbl = kzalloc(struct_size(pbl, pbe, num_buf), GFP_KERNEL);
if (!pbl)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index aeb842bc7a1e..987e2ba05dbc 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -136,7 +136,6 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
attr->max_cq = sdev->attrs.max_cq;
attr->max_cqe = sdev->attrs.max_cqe;
attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
- attr->max_fmr = sdev->attrs.max_fmr;
attr->max_mr = sdev->attrs.max_mr;
attr->max_mw = sdev->attrs.max_mw;
attr->max_mr_size = ~0ull;
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile
index 437813c7b481..4d0004b58377 100644
--- a/drivers/infiniband/ulp/Makefile
+++ b/drivers/infiniband/ulp/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_INFINIBAND_SRPT) += srpt/
obj-$(CONFIG_INFINIBAND_ISER) += iser/
obj-$(CONFIG_INFINIBAND_ISERT) += isert/
obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic/
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs/
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index e188a95984b5..9a3379c49541 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -377,8 +377,12 @@ struct ipoib_dev_priv {
struct ipoib_rx_buf *rx_ring;
struct ipoib_tx_buf *tx_ring;
+ /* cyclic ring variables for managing tx_ring, for UD only */
unsigned int tx_head;
unsigned int tx_tail;
+ /* cyclic ring variables for counting overall outstanding send WRs */
+ unsigned int global_tx_head;
+ unsigned int global_tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_ud_wr tx_wr;
struct ib_wc send_wc[MAX_SEND_CQE];
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c59e00a0881f..9bf0fa30df28 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
return;
}
- if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
netif_stop_queue(dev);
@@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
} else {
netif_trans_update(dev);
++tx->tx_head;
- ++priv->tx_head;
+ ++priv->global_tx_head;
}
}
@@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
netif_tx_lock(dev);
++tx->tx_tail;
- ++priv->tx_tail;
+ ++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
- (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
@@ -1232,8 +1234,9 @@ timeout:
dev_kfree_skb_any(tx_req->skb);
netif_tx_lock_bh(p->dev);
++p->tx_tail;
- ++priv->tx_tail;
- if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
+ ++priv->global_tx_tail;
+ if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index c332b4761816..da3c5315bbb5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -407,9 +407,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
+ ++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
- ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
@@ -634,7 +636,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
else
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
/* increase the tx_head after send success, but use it for queue state */
- if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
netif_stop_queue(dev);
}
@@ -662,6 +665,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
rc = priv->tx_head;
++priv->tx_head;
+ ++priv->global_tx_head;
}
return rc;
}
@@ -807,6 +811,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
+ ++priv->global_tx_tail;
}
for (i = 0; i < ipoib_recvq_size; ++i) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 81b8227214f1..3cfb682b91b0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -86,7 +86,7 @@ struct workqueue_struct *ipoib_workqueue;
struct ib_sa_client ipoib_sa_client;
-static void ipoib_add_one(struct ib_device *device);
+static int ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device, void *client_data);
static void ipoib_neigh_reclaim(struct rcu_head *rp);
static struct net_device *ipoib_get_net_dev_by_params(
@@ -479,9 +479,6 @@ static struct net_device *ipoib_get_net_dev_by_params(
if (ret)
return NULL;
- if (!dev_list)
- return NULL;
-
/* See if we can find a unique device matching the L2 parameters */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
@@ -529,6 +526,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
"will cause multicast packet drops\n");
netdev_update_features(dev);
dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_real_num_tx_queues(dev, 1);
rtnl_unlock();
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -540,6 +538,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
netdev_update_features(dev);
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
rtnl_unlock();
ipoib_flush_paths(dev);
return (!rtnl_trylock()) ? -EBUSY : 0;
@@ -1184,9 +1183,11 @@ static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
- ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
- netif_queue_stopped(dev),
- priv->tx_head, priv->tx_tail);
+ ipoib_warn(priv,
+ "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
+ netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
+ priv->global_tx_head, priv->global_tx_tail);
+
/* XXX reset QP, etc. */
}
@@ -1701,7 +1702,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
goto out_rx_ring_cleanup;
}
- /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
+ /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
if (ipoib_transport_dev_init(dev, priv->ca)) {
pr_warn("%s: ipoib_transport_dev_init failed\n",
@@ -1858,7 +1859,7 @@ static int ipoib_parent_init(struct net_device *ndev)
priv->port);
return result;
}
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+ priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
if (result) {
@@ -1899,6 +1900,7 @@ static int ipoib_ndo_init(struct net_device *ndev)
{
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
int rc;
+ struct rdma_netdev *rn = netdev_priv(ndev);
if (priv->parent) {
ipoib_child_init(ndev);
@@ -1911,6 +1913,7 @@ static int ipoib_ndo_init(struct net_device *ndev)
/* MTU will be reset when mcast join happens */
ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
+ rn->mtu = priv->mcast_mtu;
ndev->max_mtu = IPOIB_CM_MTU;
ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
@@ -2072,9 +2075,17 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_do_ioctl = ipoib_ioctl,
};
+static const struct net_device_ops ipoib_netdev_default_pf = {
+ .ndo_init = ipoib_dev_init_default,
+ .ndo_uninit = ipoib_dev_uninit_default,
+ .ndo_open = ipoib_ib_dev_open_default,
+ .ndo_stop = ipoib_ib_dev_stop_default,
+};
+
void ipoib_setup_common(struct net_device *dev)
{
dev->header_ops = &ipoib_header_ops;
+ dev->netdev_ops = &ipoib_netdev_default_pf;
ipoib_set_ethtool_ops(dev);
@@ -2124,13 +2135,6 @@ static void ipoib_build_priv(struct net_device *dev)
INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
-static const struct net_device_ops ipoib_netdev_default_pf = {
- .ndo_init = ipoib_dev_init_default,
- .ndo_uninit = ipoib_dev_uninit_default,
- .ndo_open = ipoib_ib_dev_open_default,
- .ndo_stop = ipoib_ib_dev_stop_default,
-};
-
static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
const char *name)
{
@@ -2168,7 +2172,6 @@ int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
if (rc != -EOPNOTSUPP)
goto out;
- dev->netdev_ops = &ipoib_netdev_default_pf;
rn->send = ipoib_send;
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
@@ -2514,7 +2517,7 @@ sysfs_failed:
return ERR_PTR(-ENOMEM);
}
-static void ipoib_add_one(struct ib_device *device)
+static int ipoib_add_one(struct ib_device *device)
{
struct list_head *dev_list;
struct net_device *dev;
@@ -2524,7 +2527,7 @@ static void ipoib_add_one(struct ib_device *device)
dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
if (!dev_list)
- return;
+ return -ENOMEM;
INIT_LIST_HEAD(dev_list);
@@ -2541,10 +2544,11 @@ static void ipoib_add_one(struct ib_device *device)
if (!count) {
kfree(dev_list);
- return;
+ return -EOPNOTSUPP;
}
ib_set_client_data(device, &ipoib_client, dev_list);
+ return 0;
}
static void ipoib_remove_one(struct ib_device *device, void *client_data)
@@ -2552,9 +2556,6 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
struct list_head *dev_list = client_data;
- if (!dev_list)
- return;
-
list_for_each_entry_safe(priv, tmp, dev_list, list) {
LIST_HEAD(head);
ipoib_parent_unregister_pre(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b9e9562f5034..9bfa514473d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -135,12 +135,11 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
kfree(mcast);
}
-static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
- int can_sleep)
+static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev)
{
struct ipoib_mcast *mcast;
- mcast = kzalloc(sizeof(*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
if (!mcast)
return NULL;
@@ -218,6 +217,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
struct rdma_ah_attr av;
int ret;
int set_qkey = 0;
+ int mtu;
mcast->mcmember = *mcmember;
@@ -240,13 +240,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
priv->broadcast->mcmember.flow_label = mcmember->flow_label;
priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
/* assume if the admin and the mcast are the same both can be changed */
+ mtu = rdma_mtu_enum_to_int(priv->ca, priv->port,
+ priv->broadcast->mcmember.mtu);
if (priv->mcast_mtu == priv->admin_mtu)
- priv->admin_mtu =
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
- else
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ priv->admin_mtu = IPOIB_UD_MTU(mtu);
+ priv->mcast_mtu = IPOIB_UD_MTU(mtu);
+ rn->mtu = priv->mcast_mtu;
priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
spin_unlock_irq(&priv->lock);
@@ -599,7 +598,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (!priv->broadcast) {
struct ipoib_mcast *broadcast;
- broadcast = ipoib_mcast_alloc(dev, 0);
+ broadcast = ipoib_mcast_alloc(dev);
if (!broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
/*
@@ -782,7 +781,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
mgid);
- mcast = ipoib_mcast_alloc(dev, 0);
+ mcast = ipoib_mcast_alloc(dev);
if (!mcast) {
ipoib_warn(priv, "unable to allocate memory "
"for multicast structure\n");
@@ -936,7 +935,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
mgid.raw);
- nmcast = ipoib_mcast_alloc(dev, 0);
+ nmcast = ipoib_mcast_alloc(dev);
if (!nmcast) {
ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
continue;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b69304d28f06..587252fd6f57 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -206,6 +206,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
+ if (priv->hca_caps & IB_DEVICE_RDMA_NETDEV_OPA)
+ init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
+
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
pr_warn("%s: failed to create QP\n", ca->name);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 8ac8e18fbe0c..30865605e098 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -97,6 +97,7 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
{
struct net_device *ndev = priv->dev;
int result;
+ struct rdma_netdev *rn = netdev_priv(ndev);
ASSERT_RTNL();
@@ -117,6 +118,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
goto out_early;
}
+ rn->mtu = priv->mcast_mtu;
+
priv->parent = ppriv->dev;
priv->pkey = pkey;
priv->child_type = type;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 029c00163442..1d77c7f42e38 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -65,7 +65,6 @@
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
#define DRV_NAME "iser"
@@ -313,33 +312,6 @@ struct iser_comp {
};
/**
- * struct iser_reg_ops - Memory registration operations
- * per-device registration schemes
- *
- * @alloc_reg_res: Allocate registration resources
- * @free_reg_res: Free registration resources
- * @reg_mem: Register memory buffers
- * @unreg_mem: Un-register memory buffers
- * @reg_desc_get: Get a registration descriptor for pool
- * @reg_desc_put: Get a registration descriptor to pool
- */
-struct iser_reg_ops {
- int (*alloc_reg_res)(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
- void (*free_reg_res)(struct ib_conn *ib_conn);
- int (*reg_mem)(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg);
- void (*unreg_mem)(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
- struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
- void (*reg_desc_put)(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-};
-
-/**
* struct iser_device - iSER device handle
*
* @ib_device: RDMA device
@@ -351,8 +323,6 @@ struct iser_reg_ops {
* @comps_used: Number of completion contexts used, Min between online
* cpus and device max completion vectors
* @comps: Dinamically allocated array of completion handlers
- * @reg_ops: Registration ops
- * @remote_inv_sup: Remote invalidate is supported on this device
*/
struct iser_device {
struct ib_device *ib_device;
@@ -362,26 +332,18 @@ struct iser_device {
int refcount;
int comps_used;
struct iser_comp *comps;
- const struct iser_reg_ops *reg_ops;
- bool remote_inv_sup;
};
/**
* struct iser_reg_resources - Fast registration resources
*
* @mr: memory region
- * @fmr_pool: pool of fmrs
* @sig_mr: signature memory region
- * @page_vec: fast reg page list used by fmr pool
* @mr_valid: is mr valid indicator
*/
struct iser_reg_resources {
- union {
- struct ib_mr *mr;
- struct ib_fmr_pool *fmr_pool;
- };
+ struct ib_mr *mr;
struct ib_mr *sig_mr;
- struct iser_page_vec *page_vec;
u8 mr_valid:1;
};
@@ -403,7 +365,7 @@ struct iser_fr_desc {
* struct iser_fr_pool - connection fast registration pool
*
* @list: list of fastreg descriptors
- * @lock: protects fmr/fastreg pool
+ * @lock: protects fastreg pool
* @size: size of the pool
*/
struct iser_fr_pool {
@@ -518,12 +480,6 @@ struct iscsi_iser_task {
struct iser_data_buf prot[ISER_DIRS_NUM];
};
-struct iser_page_vec {
- u64 *pages;
- int npages;
- struct ib_mr fake_mr;
-};
-
/**
* struct iser_global - iSER global context
*
@@ -548,8 +504,6 @@ extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
extern bool iser_always_reg;
-int iser_assign_reg_ops(struct iser_device *device);
-
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task);
@@ -591,22 +545,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir,
- bool all_imm);
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir);
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm);
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir);
int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking);
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-
int iser_post_recvl(struct iser_conn *iser_conn);
int iser_post_recvm(struct iser_conn *iser_conn, int count);
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
@@ -625,26 +574,12 @@ int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session);
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
-void iser_free_fmr_pool(struct ib_conn *ib_conn);
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
unsigned int size);
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
-struct iser_fr_desc *
-iser_reg_desc_get_fr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
static inline struct iser_conn *
to_iser_conn(struct ib_conn *ib_conn)
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 4a7045bb0831..27a6f75a9912 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -72,7 +72,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
return err;
}
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false);
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
@@ -126,8 +126,8 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return err;
}
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT,
- buf_out->data_len == imm_sz);
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
+ buf_out->data_len == imm_sz);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
@@ -250,8 +250,8 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
- if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
- iser_conn->pages_per_mr))
+ if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
+ iser_conn->pages_per_mr))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
@@ -293,7 +293,7 @@ rx_desc_dma_map_failed:
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
@@ -306,8 +306,7 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
- if (device->reg_ops->free_reg_res)
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
@@ -768,7 +767,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
int prot_count = scsi_prot_sg_count(iser_task->sc);
if (iser_task->dir[ISER_DIR_IN]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_IN],
DMA_FROM_DEVICE);
@@ -779,7 +778,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
}
if (iser_task->dir[ISER_DIR_OUT]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_OUT],
DMA_TO_DEVICE);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 999ef7cdd05e..d4e057fac219 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -38,62 +38,13 @@
#include <linux/scatterlist.h>
#include "iscsi_iser.h"
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-static
-int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-
-static const struct iser_reg_ops fastreg_ops = {
- .alloc_reg_res = iser_alloc_fastreg_pool,
- .free_reg_res = iser_free_fastreg_pool,
- .reg_mem = iser_fast_reg_mr,
- .unreg_mem = iser_unreg_mem_fastreg,
- .reg_desc_get = iser_reg_desc_get_fr,
- .reg_desc_put = iser_reg_desc_put_fr,
-};
-
-static const struct iser_reg_ops fmr_ops = {
- .alloc_reg_res = iser_alloc_fmr_pool,
- .free_reg_res = iser_free_fmr_pool,
- .reg_mem = iser_fast_reg_fmr,
- .unreg_mem = iser_unreg_mem_fmr,
- .reg_desc_get = iser_reg_desc_get_fmr,
- .reg_desc_put = iser_reg_desc_put_fmr,
-};
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
{
iser_err_comp(wc, "memreg");
}
-int iser_assign_reg_ops(struct iser_device *device)
-{
- struct ib_device *ib_dev = device->ib_device;
-
- /* Assign function handles - based on FMR support */
- if (ib_dev->ops.alloc_fmr && ib_dev->ops.dealloc_fmr &&
- ib_dev->ops.map_phys_fmr && ib_dev->ops.unmap_fmr) {
- iser_info("FMR supported, using FMR for registration\n");
- device->reg_ops = &fmr_ops;
- } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- iser_info("FastReg supported, using FastReg for registration\n");
- device->reg_ops = &fastreg_ops;
- device->remote_inv_sup = iser_always_reg;
- } else {
- iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
- return -1;
- }
-
- return 0;
-}
-
-struct iser_fr_desc *
+static struct iser_fr_desc *
iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
@@ -109,7 +60,7 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn)
return desc;
}
-void
+static void
iser_reg_desc_put_fr(struct ib_conn *ib_conn,
struct iser_fr_desc *desc)
{
@@ -121,44 +72,6 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn,
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
-
- return list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
-}
-
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
-{
-}
-
-static void iser_data_buf_dump(struct iser_data_buf *data,
- struct ib_device *ibdev)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(data->sg, sg, data->dma_nents, i)
- iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
- "off:0x%x sz:0x%x dma_len:0x%x\n",
- i, (unsigned long)sg_dma_address(sg),
- sg_page(sg), sg->offset, sg->length, sg_dma_len(sg));
-}
-
-static void iser_dump_page_vec(struct iser_page_vec *page_vec)
-{
- int i;
-
- iser_err("page vec npages %d data length %lld\n",
- page_vec->npages, page_vec->fake_mr.length);
- for (i = 0; i < page_vec->npages; i++)
- iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
-}
-
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
@@ -213,84 +126,9 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
return 0;
}
-static int iser_set_page(struct ib_mr *mr, u64 addr)
-{
- struct iser_page_vec *page_vec =
- container_of(mr, struct iser_page_vec, fake_mr);
-
- page_vec->pages[page_vec->npages++] = addr;
-
- return 0;
-}
-
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg)
-{
- struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
- struct iser_page_vec *page_vec = rsc->page_vec;
- struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
- struct ib_pool_fmr *fmr;
- int ret, plen;
-
- page_vec->npages = 0;
- page_vec->fake_mr.page_size = SZ_4K;
- plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
- mem->dma_nents, NULL, iser_set_page);
- if (unlikely(plen < mem->dma_nents)) {
- iser_err("page vec too short to hold this SG\n");
- iser_data_buf_dump(mem, device->ib_device);
- iser_dump_page_vec(page_vec);
- return -EINVAL;
- }
-
- fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages,
- page_vec->npages, page_vec->pages[0]);
- if (IS_ERR(fmr)) {
- ret = PTR_ERR(fmr);
- iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
- return ret;
- }
-
- reg->sge.lkey = fmr->fmr->lkey;
- reg->rkey = fmr->fmr->rkey;
- reg->sge.addr = page_vec->fake_mr.iova;
- reg->sge.length = page_vec->fake_mr.length;
- reg->mem_h = fmr;
-
- iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
- " length=0x%x\n", reg->sge.lkey, reg->rkey,
- reg->sge.addr, reg->sge.length);
-
- return 0;
-}
-
-/**
- * Unregister (previosuly registered using FMR) memory.
- * If memory is non-FMR does nothing.
- */
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
-
- if (!reg->mem_h)
- return;
-
- iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
-
- ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
-
- reg->mem_h = NULL;
-}
-
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
struct iser_fr_desc *desc;
struct ib_mr_status mr_status;
@@ -312,7 +150,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
&mr_status);
}
- device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc);
+ iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->mem_h);
reg->mem_h = NULL;
}
@@ -509,15 +347,14 @@ iser_reg_data_sg(struct iscsi_iser_task *task,
if (use_dma_key)
return iser_reg_dma(device, mem, reg);
- return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
+ return iser_fast_reg_mr(task, mem, &desc->rsc, reg);
}
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir,
- bool all_imm)
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm)
{
struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
struct iser_data_buf *mem = &task->data[dir];
struct iser_mem_reg *reg = &task->rdma_reg[dir];
struct iser_fr_desc *desc = NULL;
@@ -528,7 +365,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
if (!use_dma_key) {
- desc = device->reg_ops->reg_desc_get(ib_conn);
+ desc = iser_reg_desc_get_fr(ib_conn);
reg->mem_h = desc;
}
@@ -549,15 +386,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
err_reg:
if (desc)
- device->reg_ops->reg_desc_put(ib_conn, desc);
+ iser_reg_desc_put_fr(ib_conn, desc);
return err;
}
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir)
-{
- struct iser_device *device = task->iser_conn->ib_conn.device;
-
- device->reg_ops->unreg_mem(task, dir);
-}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 127887c6c03f..c1f44c41f501 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -68,11 +68,12 @@ static void iser_event_handler(struct ib_event_handler *handler,
static int iser_create_device_ib_res(struct iser_device *device)
{
struct ib_device *ib_dev = device->ib_device;
- int ret, i, max_cqe;
+ int i, max_cqe;
- ret = iser_assign_reg_ops(device);
- if (ret)
- return ret;
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ iser_err("IB device does not support memory registrations\n");
+ return -1;
+ }
device->comps_used = min_t(int, num_online_cpus(),
ib_dev->num_comp_vectors);
@@ -147,96 +148,6 @@ static void iser_free_device_ib_res(struct iser_device *device)
device->pd = NULL;
}
-/**
- * iser_alloc_fmr_pool - Creates FMR pool and page_vector
- * @ib_conn: connection RDMA resources
- * @cmds_max: max number of SCSI commands for this connection
- * @size: max number of pages per map request
- *
- * Return: 0 on success, or errno code on failure
- */
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size)
-{
- struct iser_device *device = ib_conn->device;
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_page_vec *page_vec;
- struct iser_fr_desc *desc;
- struct ib_fmr_pool *fmr_pool;
- struct ib_fmr_pool_param params;
- int ret;
-
- INIT_LIST_HEAD(&fr_pool->list);
- spin_lock_init(&fr_pool->lock);
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
- GFP_KERNEL);
- if (!page_vec) {
- ret = -ENOMEM;
- goto err_frpl;
- }
-
- page_vec->pages = (u64 *)(page_vec + 1);
-
- params.page_shift = ilog2(SZ_4K);
- params.max_pages_per_fmr = size;
- /* make the pool size twice the max number of SCSI commands *
- * the ML is expected to queue, watermark for unmap at 50% */
- params.pool_size = cmds_max * 2;
- params.dirty_watermark = cmds_max;
- params.cache = 0;
- params.flush_function = NULL;
- params.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- fmr_pool = ib_create_fmr_pool(device->pd, &params);
- if (IS_ERR(fmr_pool)) {
- ret = PTR_ERR(fmr_pool);
- iser_err("FMR allocation failed, err %d\n", ret);
- goto err_fmr;
- }
-
- desc->rsc.page_vec = page_vec;
- desc->rsc.fmr_pool = fmr_pool;
- list_add(&desc->list, &fr_pool->list);
-
- return 0;
-
-err_fmr:
- kfree(page_vec);
-err_frpl:
- kfree(desc);
-
- return ret;
-}
-
-/**
- * iser_free_fmr_pool - releases the FMR pool and page vec
- * @ib_conn: connection RDMA resources
- */
-void iser_free_fmr_pool(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_fr_desc *desc;
-
- desc = list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
- list_del(&desc->list);
-
- iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, desc->rsc.fmr_pool);
-
- ib_destroy_fmr_pool(desc->rsc.fmr_pool);
- kfree(desc->rsc.page_vec);
- kfree(desc);
-}
-
static struct iser_fr_desc *
iser_create_fastreg_desc(struct iser_device *device,
struct ib_pd *pd,
@@ -667,13 +578,12 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
u32 max_num_sg;
/*
- * FRs without SG_GAPS or FMRs can only map up to a (device) page per
- * entry, but if the first entry is misaligned we'll end up using two
- * entries (head and tail) for a single page worth data, so one
- * additional entry is required.
+ * FRs without SG_GAPS can only map up to a (device) page per entry,
+ * but if the first entry is misaligned we'll end up using two entries
+ * (head and tail) for a single page worth data, so one additional
+ * entry is required.
*/
- if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) &&
- (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ if (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
reserved_mr_pages = 0;
else
reserved_mr_pages = 1;
@@ -684,14 +594,8 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
max_num_sg = attr->max_fast_reg_page_list_len;
sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
- if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
- sup_sg_tablesize =
- min_t(
- uint, ISCSI_ISER_MAX_SG_TABLESIZE,
- max_num_sg - reserved_mr_pages);
- else
- sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
-
+ sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+ max_num_sg - reserved_mr_pages);
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
iser_conn->pages_per_mr =
iser_conn->scsi_sg_tablesize + reserved_mr_pages;
@@ -755,7 +659,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
struct iser_cm_hdr req_hdr;
struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
+ struct ib_device *ib_dev = ib_conn->device->ib_device;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
@@ -766,14 +670,14 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
goto failure;
memset(&conn_param, 0, sizeof conn_param);
- conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom;
+ conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
conn_param.initiator_depth = 1;
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
req_hdr.flags = ISER_ZBVA_NOT_SUP;
- if (!device->remote_inv_sup)
+ if (!iser_always_reg)
req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
conn_param.private_data = (void *)&req_hdr;
conn_param.private_data_len = sizeof(struct iser_cm_hdr);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a1a035270cab..b7df38ee8ae0 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -15,6 +15,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_cm.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -502,7 +503,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("iscsi_np is not enabled, reject connect request\n");
- return rdma_reject(cma_id, NULL, 0);
+ return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
}
spin_unlock_bh(&np->np_thread_lock);
@@ -553,7 +554,7 @@ out_rsp_dma_map:
isert_free_login_buf(isert_conn);
out:
kfree(isert_conn);
- rdma_reject(cma_id, NULL, 0);
+ rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 6e8d650c17c7..874a8eb7638c 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -113,7 +113,7 @@ struct opa_vnic_vema_port {
struct mutex lock;
};
-static void opa_vnic_vema_add_one(struct ib_device *device);
+static int opa_vnic_vema_add_one(struct ib_device *device);
static void opa_vnic_vema_rem_one(struct ib_device *device,
void *client_data);
@@ -989,18 +989,18 @@ static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
*
* Allocate the vnic control port and initialize it.
*/
-static void opa_vnic_vema_add_one(struct ib_device *device)
+static int opa_vnic_vema_add_one(struct ib_device *device)
{
struct opa_vnic_ctrl_port *cport;
int rc, size = sizeof(*cport);
if (!rdma_cap_opa_vnic(device))
- return;
+ return -EOPNOTSUPP;
size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port);
cport = kzalloc(size, GFP_KERNEL);
if (!cport)
- return;
+ return -ENOMEM;
cport->num_ports = device->phys_port_cnt;
cport->ibdev = device;
@@ -1012,6 +1012,7 @@ static void opa_vnic_vema_add_one(struct ib_device *device)
ib_set_client_data(device, &opa_vnic_client, cport);
opa_vnic_ctrl_config_dev(cport, true);
+ return 0;
}
/**
@@ -1026,9 +1027,6 @@ static void opa_vnic_vema_rem_one(struct ib_device *device,
{
struct opa_vnic_ctrl_port *cport = client_data;
- if (!cport)
- return;
-
c_info("removing VNIC client\n");
opa_vnic_ctrl_config_dev(cport, false);
vema_unregister(cport);
diff --git a/drivers/infiniband/ulp/rtrs/Kconfig b/drivers/infiniband/ulp/rtrs/Kconfig
new file mode 100644
index 000000000000..9092b62e6dc8
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config INFINIBAND_RTRS
+ tristate
+ depends on INFINIBAND_ADDR_TRANS
+
+config INFINIBAND_RTRS_CLIENT
+ tristate "RTRS client module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport client module.
+
+ RDMA Transport (RTRS) client implements a reliable transport layer
+ and also multipathing functionality and that it is intended to be
+ the base layer for a block storage initiator over RDMA.
+
+config INFINIBAND_RTRS_SERVER
+ tristate "RTRS server module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport server module.
+
+ RDMA Transport (RTRS) server module processing connection and IO
+ requests received from the RTRS client module, it will pass the
+ IO requests to its user eg. RNBD_server.
diff --git a/drivers/infiniband/ulp/rtrs/Makefile b/drivers/infiniband/ulp/rtrs/Makefile
new file mode 100644
index 000000000000..3898509be270
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+rtrs-client-y := rtrs-clt.o \
+ rtrs-clt-stats.o \
+ rtrs-clt-sysfs.o
+
+rtrs-server-y := rtrs-srv.o \
+ rtrs-srv-stats.o \
+ rtrs-srv-sysfs.o
+
+rtrs-core-y := rtrs.o
+
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs-core.o
+obj-$(CONFIG_INFINIBAND_RTRS_CLIENT) += rtrs-client.o
+obj-$(CONFIG_INFINIBAND_RTRS_SERVER) += rtrs-server.o
diff --git a/drivers/infiniband/ulp/rtrs/README b/drivers/infiniband/ulp/rtrs/README
new file mode 100644
index 000000000000..5d9ea142e5dd
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/README
@@ -0,0 +1,213 @@
+****************************
+RDMA Transport (RTRS)
+****************************
+
+RTRS (RDMA Transport) is a reliable high speed transport library
+which provides support to establish optimal number of connections
+between client and server machines using RDMA (InfiniBand, RoCE, iWarp)
+transport. It is optimized to transfer (read/write) IO blocks.
+
+In its core interface it follows the BIO semantics of providing the
+possibility to either write data from an sg list to the remote side
+or to request ("read") data transfer from the remote side into a given
+sg list.
+
+RTRS provides I/O fail-over and load-balancing capabilities by using
+multipath I/O (see "add_path" and "mp_policy" configuration entries in
+Documentation/ABI/testing/sysfs-class-rtrs-client).
+
+RTRS is used by the RNBD (RDMA Network Block Device) modules.
+
+==================
+Transport protocol
+==================
+
+Overview
+--------
+An established connection between a client and a server is called rtrs
+session. A session is associated with a set of memory chunks reserved on the
+server side for a given client for rdma transfer. A session
+consists of multiple paths, each representing a separate physical link
+between client and server. Those are used for load balancing and failover.
+Each path consists of as many connections (QPs) as there are cpus on
+the client.
+
+When processing an incoming write or read request, rtrs client uses memory
+chunks reserved for him on the server side. Their number, size and addresses
+need to be exchanged between client and server during the connection
+establishment phase. Apart from the memory related information client needs to
+inform the server about the session name and identify each path and connection
+individually.
+
+On an established session client sends to server write or read messages.
+Server uses immediate field to tell the client which request is being
+acknowledged and for errno. Client uses immediate field to tell the server
+which of the memory chunks has been accessed and at which offset the message
+can be found.
+
+Module parameter always_invalidate is introduced for the security problem
+discussed in LPC RDMA MC 2019. When always_invalidate=Y, on the server side we
+invalidate each rdma buffer before we hand it over to RNBD server and
+then pass it to the block layer. A new rkey is generated and registered for the
+buffer after it returns back from the block layer and RNBD server.
+The new rkey is sent back to the client along with the IO result.
+The procedure is the default behaviour of the driver. This invalidation and
+registration on each IO causes performance drop of up to 20%. A user of the
+driver may choose to load the modules with this mechanism switched off
+(always_invalidate=N), if he understands and can take the risk of a malicious
+client being able to corrupt memory of a server it is connected to. This might
+be a reasonable option in a scenario where all the clients and all the servers
+are located within a secure datacenter.
+
+
+Connection establishment
+------------------------
+
+1. Client starts establishing connections belonging to a path of a session one
+by one via attaching RTRS_MSG_CON_REQ messages to the rdma_connect requests.
+Those include uuid of the session and uuid of the path to be
+established. They are used by the server to find a persisting session/path or
+to create a new one when necessary. The message also contains the protocol
+version and magic for compatibility, total number of connections per session
+(as many as cpus on the client), the id of the current connection and
+the reconnect counter, which is used to resolve the situations where
+client is trying to reconnect a path, while server is still destroying the old
+one.
+
+2. Server accepts the connection requests one by one and attaches
+RTRS_MSG_CONN_RSP messages to the rdma_accept. Apart from magic and
+protocol version, the messages include error code, queue depth supported by
+the server (number of memory chunks which are going to be allocated for that
+session) and the maximum size of one io, RTRS_MSG_NEW_RKEY_F flags is set
+when always_invalidate=Y.
+
+3. After all connections of a path are established client sends to server the
+RTRS_MSG_INFO_REQ message, containing the name of the session. This message
+requests the address information from the server.
+
+4. Server replies to the session info request message with RTRS_MSG_INFO_RSP,
+which contains the addresses and keys of the RDMA buffers allocated for that
+session.
+
+5. Session becomes connected after all paths to be established are connected
+(i.e. steps 1-4 finished for all paths requested for a session)
+
+6. Server and client exchange periodically heartbeat messages (empty rdma
+messages with an immediate field) which are used to detect a crash on remote
+side or network outage in an absence of IO.
+
+7. On any RDMA related error or in the case of a heartbeat timeout, the
+corresponding path is disconnected, all the inflight IO are failed over to a
+healthy path, if any, and the reconnect mechanism is triggered.
+
+CLT SRV
+*for each connection belonging to a path and for each path:
+RTRS_MSG_CON_REQ ------------------->
+ <------------------- RTRS_MSG_CON_RSP
+...
+*after all connections are established:
+RTRS_MSG_INFO_REQ ------------------->
+ <------------------- RTRS_MSG_INFO_RSP
+*heartbeat is started from both sides:
+ -------------------> [RTRS_HB_MSG_IMM]
+[RTRS_HB_MSG_ACK] <-------------------
+[RTRS_HB_MSG_IMM] <-------------------
+ -------------------> [RTRS_HB_MSG_ACK]
+
+IO path
+-------
+
+* Write (always_invalidate=N) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+* Write (always_invalidate=Y) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field, Server invalidate rkey associated to the memory chunks
+first, when it finishes, pass the IO to RNBD server module.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+
+* Read (always_invalidate=N)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+
+* Read (always_invalidate=Y)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+Server invalidate rkey associated to the memory chunks first, when it finishes,
+passes the IO to RNBD server module.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+=========================================
+Contributors List(in alphabetical order)
+=========================================
+Danil Kipnis <danil.kipnis@profitbricks.com>
+Fabian Holler <mail@fholler.de>
+Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
+Jack Wang <jinpu.wang@profitbricks.com>
+Kleber Souza <kleber.souza@profitbricks.com>
+Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
+Milind Dumbare <Milind.dumbare@gmail.com>
+Roman Penyaev <roman.penyaev@profitbricks.com>
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
new file mode 100644
index 000000000000..26bbe5d6dff5
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-clt.h"
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+ s = this_cpu_ptr(stats->pcpu_stats);
+ if (unlikely(con->cpu != cpu)) {
+ s->cpu_migr.to++;
+
+ /* Careful here, override s pointer */
+ s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
+ atomic_inc(&s->cpu_migr.from);
+ }
+}
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ s = this_cpu_ptr(stats->pcpu_stats);
+ s->rdma.failover_cnt++;
+}
+
+int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats,
+ char *buf, size_t len)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = scnprintf(buf, len, " ");
+ for_each_possible_cpu(cpu)
+ used += scnprintf(buf + used, len - used, " CPU%u", cpu);
+
+ used += scnprintf(buf + used, len - used, "\nfrom:");
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += scnprintf(buf + used, len - used, " %d",
+ atomic_read(&s->cpu_migr.from));
+ }
+
+ used += scnprintf(buf + used, len - used, "\nto :");
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += scnprintf(buf + used, len - used, " %d",
+ s->cpu_migr.to);
+ }
+ used += scnprintf(buf + used, len - used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len)
+{
+ return scnprintf(buf, len, "%d %d\n",
+ stats->reconnects.successful_cnt,
+ stats->reconnects.fail_cnt);
+}
+
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len)
+{
+ struct rtrs_clt_stats_rdma sum;
+ struct rtrs_clt_stats_rdma *r;
+ int cpu;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ sum.failover_cnt += r->failover_cnt;
+ }
+
+ return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
+ atomic_read(&stats->inflight), sum.failover_cnt);
+}
+
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s,
+ char *page, size_t len)
+{
+ return scnprintf(page, len, "echo 1 to reset all statistics\n");
+}
+
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->rdma, 0, sizeof(s->rdma));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
+{
+ if (!enable)
+ return -EINVAL;
+
+ memset(&stats->reconnects, 0, sizeof(stats->reconnects));
+
+ return 0;
+}
+
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
+{
+ if (enable) {
+ rtrs_clt_reset_rdma_stats(s, enable);
+ rtrs_clt_reset_cpu_migr_stats(s, enable);
+ rtrs_clt_reset_reconnects_stat(s, enable);
+ atomic_set(&s->inflight, 0);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
+ size_t size, int d)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ s = this_cpu_ptr(stats->pcpu_stats);
+ s->rdma.dir[d].cnt++;
+ s->rdma.dir[d].size_total += size;
+}
+
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_stats *stats = sess->stats;
+ unsigned int len;
+
+ len = req->usr_len + req->data_len;
+ rtrs_clt_update_rdma_stats(stats, len, dir);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_inc(&stats->inflight);
+}
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
+{
+ stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
+ if (!stats->pcpu_stats)
+ return -ENOMEM;
+
+ /*
+ * successful_cnt will be set to 0 after session
+ * is established for the first time
+ */
+ stats->reconnects.successful_cnt = -1;
+
+ return 0;
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
new file mode 100644
index 000000000000..298b747d0330
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define MIN_MAX_RECONN_ATT -1
+#define MAX_MAX_RECONN_ATT 9999
+
+static void rtrs_clt_sess_release(struct kobject *kobj)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ free_sess(sess);
+}
+
+static struct kobj_type ktype_sess = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_sess_release
+};
+
+static void rtrs_clt_sess_stats_release(struct kobject *kobj)
+{
+ struct rtrs_clt_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_clt_stats, kobj_stats);
+
+ free_percpu(stats->pcpu_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_sess_stats_release,
+};
+
+static ssize_t max_reconnect_attempts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ return sprintf(page, "%d\n", rtrs_clt_get_max_reconnect_attempts(clt));
+}
+
+static ssize_t max_reconnect_attempts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int value;
+ int ret;
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (ret) {
+ rtrs_err(clt, "%s: failed to convert string '%s' to int\n",
+ attr->attr.name, buf);
+ return ret;
+ }
+ if (value > MAX_MAX_RECONN_ATT ||
+ value < MIN_MAX_RECONN_ATT) {
+ rtrs_err(clt,
+ "%s: invalid range (provided: '%s', accepted: min: %d, max: %d)\n",
+ attr->attr.name, buf, MIN_MAX_RECONN_ATT,
+ MAX_MAX_RECONN_ATT);
+ return -EINVAL;
+ }
+ rtrs_clt_set_max_reconnect_attempts(clt, value);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(max_reconnect_attempts);
+
+static ssize_t mpath_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt *clt;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ switch (clt->mp_policy) {
+ case MP_POLICY_RR:
+ return sprintf(page, "round-robin (RR: %d)\n", clt->mp_policy);
+ case MP_POLICY_MIN_INFLIGHT:
+ return sprintf(page, "min-inflight (MI: %d)\n", clt->mp_policy);
+ default:
+ return sprintf(page, "Unknown (%d)\n", clt->mp_policy);
+ }
+}
+
+static ssize_t mpath_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rtrs_clt *clt;
+ int value;
+ int ret;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (!ret && (value == MP_POLICY_RR ||
+ value == MP_POLICY_MIN_INFLIGHT)) {
+ clt->mp_policy = value;
+ return count;
+ }
+
+ if (!strncasecmp(buf, "round-robin", 11) ||
+ !strncasecmp(buf, "rr", 2))
+ clt->mp_policy = MP_POLICY_RR;
+ else if (!strncasecmp(buf, "min-inflight", 12) ||
+ !strncasecmp(buf, "mi", 2))
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(mpath_policy);
+
+static ssize_t add_path_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
+}
+
+static ssize_t add_path_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sockaddr_storage srcaddr, dstaddr;
+ struct rtrs_addr addr = {
+ .src = &srcaddr,
+ .dst = &dstaddr
+ };
+ struct rtrs_clt *clt;
+ const char *nl;
+ size_t len;
+ int err;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ nl = strchr(buf, '\n');
+ if (nl)
+ len = nl - buf;
+ else
+ len = count;
+ err = rtrs_addr_to_sockaddr(buf, len, clt->port, &addr);
+ if (err)
+ return -EINVAL;
+
+ err = rtrs_clt_create_path_from_sysfs(clt, &addr);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(add_path);
+
+static ssize_t rtrs_clt_state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (sess->state == RTRS_CLT_CONNECTED)
+ return sprintf(page, "connected\n");
+
+ return sprintf(page, "disconnected\n");
+}
+
+static struct kobj_attribute rtrs_clt_state_attr =
+ __ATTR(state, 0444, rtrs_clt_state_show, NULL);
+
+static ssize_t rtrs_clt_reconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_reconnect_from_sysfs(sess);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_reconnect_attr =
+ __ATTR(reconnect, 0644, rtrs_clt_reconnect_show,
+ rtrs_clt_reconnect_store);
+
+static ssize_t rtrs_clt_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_disconnect_from_sysfs(sess);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_disconnect_attr =
+ __ATTR(disconnect, 0644, rtrs_clt_disconnect_show,
+ rtrs_clt_disconnect_store);
+
+static ssize_t rtrs_clt_remove_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_remove_path_attr =
+ __ATTR(remove_path, 0644, rtrs_clt_remove_path_show,
+ rtrs_clt_remove_path_store);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration,
+ rtrs_clt_stats_migration_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reconnects,
+ rtrs_clt_stats_reconnects_to_str,
+ rtrs_clt_reset_reconnects_stat);
+
+STAT_ATTR(struct rtrs_clt_stats, rdma,
+ rtrs_clt_stats_rdma_to_str,
+ rtrs_clt_reset_rdma_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reset_all,
+ rtrs_clt_reset_all_help,
+ rtrs_clt_reset_all_stats);
+
+static struct attribute *rtrs_clt_stats_attrs[] = {
+ &cpu_migration_attr.attr,
+ &reconnects_attr.attr,
+ &rdma_attr.attr,
+ &reset_all_attr.attr,
+ NULL
+};
+
+static struct attribute_group rtrs_clt_stats_attr_group = {
+ .attrs = rtrs_clt_stats_attrs,
+};
+
+static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, typeof(*sess), kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%u\n", sess->hca_port);
+}
+
+static struct kobj_attribute rtrs_clt_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_clt_hca_port_show, NULL);
+
+static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", sess->hca_name);
+}
+
+static struct kobj_attribute rtrs_clt_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_clt_hca_name_show, NULL);
+
+static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_clt_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_clt_src_addr_show, NULL);
+
+static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_clt_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
+
+static struct attribute *rtrs_clt_sess_attrs[] = {
+ &rtrs_clt_hca_name_attr.attr,
+ &rtrs_clt_hca_port_attr.attr,
+ &rtrs_clt_src_addr_attr.attr,
+ &rtrs_clt_dst_addr_attr.attr,
+ &rtrs_clt_state_attr.attr,
+ &rtrs_clt_reconnect_attr.attr,
+ &rtrs_clt_disconnect_attr.attr,
+ &rtrs_clt_remove_path_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_clt_sess_attr_group = {
+ .attrs = rtrs_clt_sess_attrs,
+};
+
+int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ char str[NAME_MAX];
+ int err, cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ str, sizeof(str));
+ cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ str + cnt, sizeof(str) - cnt);
+
+ err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
+ "%s", str);
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ return err;
+ }
+ err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ if (err) {
+ pr_err("sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
+ &sess->kobj, "stats");
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ goto remove_group;
+ }
+
+ err = sysfs_create_group(&sess->stats->kobj_stats,
+ &rtrs_clt_stats_attr_group);
+ if (err) {
+ pr_err("failed to create stats sysfs group, err: %d\n", err);
+ goto put_kobj_stats;
+ }
+
+ return 0;
+
+put_kobj_stats:
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+remove_group:
+ sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+put_kobj:
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+
+ return err;
+}
+
+void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self)
+{
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+ if (sysfs_self)
+ sysfs_remove_file_self(&sess->kobj, sysfs_self);
+ kobject_del(&sess->kobj);
+}
+
+static struct attribute *rtrs_clt_attrs[] = {
+ &dev_attr_max_reconnect_attempts.attr,
+ &dev_attr_mpath_policy.attr,
+ &dev_attr_add_path.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_clt_attr_group = {
+ .attrs = rtrs_clt_attrs,
+};
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
+{
+ return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
+
+void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt)
+{
+ if (clt->kobj_paths) {
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ }
+}
+
+void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt)
+{
+ sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
new file mode 100644
index 000000000000..564388a85603
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -0,0 +1,2992 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define RTRS_CONNECT_TIMEOUT_MS 30000
+/*
+ * Wait a bit before trying to reconnect after a failure
+ * in order to give server time to finish clean up which
+ * leads to "false positives" failed reconnect attempts
+ */
+#define RTRS_RECONNECT_BACKOFF 1000
+
+MODULE_DESCRIPTION("RDMA Transport Client");
+MODULE_LICENSE("GPL");
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
+
+static struct workqueue_struct *rtrs_wq;
+static struct class *rtrs_clt_dev_class;
+
+static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
+{
+ struct rtrs_clt_sess *sess;
+ bool connected = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
+ connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
+ rcu_read_unlock();
+
+ return connected;
+}
+
+static struct rtrs_permit *
+__rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
+{
+ size_t max_depth = clt->queue_depth;
+ struct rtrs_permit *permit;
+ int bit;
+
+ /*
+ * Adapted from null_blk get_tag(). Callers from different cpus may
+ * grab the same bit, since find_first_zero_bit is not atomic.
+ * But then the test_and_set_bit_lock will fail for all the
+ * callers but one, so that they will loop again.
+ * This way an explicit spinlock is not required.
+ */
+ do {
+ bit = find_first_zero_bit(clt->permits_map, max_depth);
+ if (unlikely(bit >= max_depth))
+ return NULL;
+ } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map)));
+
+ permit = get_permit(clt, bit);
+ WARN_ON(permit->mem_id != bit);
+ permit->cpu_id = raw_smp_processor_id();
+ permit->con_type = con_type;
+
+ return permit;
+}
+
+static inline void __rtrs_put_permit(struct rtrs_clt *clt,
+ struct rtrs_permit *permit)
+{
+ clear_bit_unlock(permit->mem_id, clt->permits_map);
+}
+
+/**
+ * rtrs_clt_get_permit() - allocates permit for future RDMA operation
+ * @clt: Current session
+ * @con_type: Type of connection to use with the permit
+ * @can_wait: Wait type
+ *
+ * Description:
+ * Allocates permit for the following RDMA operation. Permit is used
+ * to preallocate all resources and to propagate memory pressure
+ * up earlier.
+ *
+ * Context:
+ * Can sleep if @wait == RTRS_TAG_WAIT
+ */
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
+ enum rtrs_clt_con_type con_type,
+ int can_wait)
+{
+ struct rtrs_permit *permit;
+ DEFINE_WAIT(wait);
+
+ permit = __rtrs_get_permit(clt, con_type);
+ if (likely(permit) || !can_wait)
+ return permit;
+
+ do {
+ prepare_to_wait(&clt->permits_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ permit = __rtrs_get_permit(clt, con_type);
+ if (likely(permit))
+ break;
+
+ io_schedule();
+ } while (1);
+
+ finish_wait(&clt->permits_wait, &wait);
+
+ return permit;
+}
+EXPORT_SYMBOL(rtrs_clt_get_permit);
+
+/**
+ * rtrs_clt_put_permit() - puts allocated permit
+ * @clt: Current session
+ * @permit: Permit to be freed
+ *
+ * Context:
+ * Does not matter
+ */
+void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
+{
+ if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
+ return;
+
+ __rtrs_put_permit(clt, permit);
+
+ /*
+ * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
+ * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
+ * it must have added itself to &clt->permits_wait before
+ * __rtrs_put_permit() finished.
+ * Hence it is safe to guard wake_up() with a waitqueue_active() test.
+ */
+ if (waitqueue_active(&clt->permits_wait))
+ wake_up(&clt->permits_wait);
+}
+EXPORT_SYMBOL(rtrs_clt_put_permit);
+
+void *rtrs_permit_to_pdu(struct rtrs_permit *permit)
+{
+ return permit + 1;
+}
+EXPORT_SYMBOL(rtrs_permit_to_pdu);
+
+/**
+ * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
+ * @sess: client session pointer
+ * @permit: permit for the allocation of the RDMA buffer
+ * Note:
+ * IO connection starts from 1.
+ * 0 connection is for user messages.
+ */
+static
+struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
+ struct rtrs_permit *permit)
+{
+ int id = 0;
+
+ if (likely(permit->con_type == RTRS_IO_CON))
+ id = (permit->cpu_id % (sess->s.con_num - 1)) + 1;
+
+ return to_clt_con(sess->s.con[id]);
+}
+
+/**
+ * __rtrs_clt_change_state() - change the session state through session state
+ * machine.
+ *
+ * @sess: client session to change the state of.
+ * @new_state: state to change to.
+ *
+ * returns true if successful, false if the requested state can not be set.
+ *
+ * Locks:
+ * state_wq lock must be hold.
+ */
+static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&sess->state_wq.lock);
+
+ old_state = sess->state;
+ switch (new_state) {
+ case RTRS_CLT_CONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_RECONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_RECONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTED:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTED:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTING_ERR:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_RECONNECTING:
+ case RTRS_CLT_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSED:
+ switch (old_state) {
+ case RTRS_CLT_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_DEAD:
+ switch (old_state) {
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed) {
+ sess->state = new_state;
+ wake_up_locked(&sess->state_wq);
+ }
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state old_state,
+ enum rtrs_clt_state new_state)
+{
+ bool changed = false;
+
+ spin_lock_irq(&sess->state_wq.lock);
+ if (sess->state == old_state)
+ changed = __rtrs_clt_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_wq.lock);
+
+ return changed;
+}
+
+static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ if (rtrs_clt_change_state_from_to(sess,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_RECONNECTING)) {
+ struct rtrs_clt *clt = sess->clt;
+ unsigned int delay_ms;
+
+ /*
+ * Normal scenario, reconnect if we were successfully connected
+ */
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ msecs_to_jiffies(delay_ms));
+ } else {
+ /*
+ * Error can happen just on establishing new connection,
+ * so notify waiter with error state, waiter is responsible
+ * for cleaning the rest and reconnect if needed.
+ */
+ rtrs_clt_change_state_from_to(sess,
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR);
+ }
+}
+
+static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static struct ib_cqe fast_reg_cqe = {
+ .done = rtrs_clt_fast_reg_done
+};
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait);
+
+static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_io_req *req =
+ container_of(wc->wr_cqe, typeof(*req), inv_cqe);
+ struct rtrs_clt_con *con = cq->cq_context;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ req->need_inv = false;
+ if (likely(req->need_inv_comp))
+ complete(&req->inv_comp);
+ else
+ /* Complete request from INV callback */
+ complete_rdma_req(req, req->inv_errno, true, false);
+}
+
+static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &req->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+ req->inv_cqe.done = rtrs_clt_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_sess *sess;
+ int err;
+
+ if (WARN_ON(!req->in_use))
+ return;
+ if (WARN_ON(!req->con))
+ return;
+ sess = to_clt_sess(con->c.sess);
+
+ if (req->sg_cnt) {
+ if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) {
+ /*
+ * We are here to invalidate read requests
+ * ourselves. In normal scenario server should
+ * send INV for all read requests, but
+ * we are here, thus two things could happen:
+ *
+ * 1. this is failover, when errno != 0
+ * and can_wait == 1,
+ *
+ * 2. something totally bad happened and
+ * server forgot to send INV, so we
+ * should do that ourselves.
+ */
+
+ if (likely(can_wait)) {
+ req->need_inv_comp = true;
+ } else {
+ /* This should be IO path, so always notify */
+ WARN_ON(!notify);
+ /* Save errno for INV callback */
+ req->inv_errno = errno;
+ }
+
+ err = rtrs_inv_rkey(req);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
+ req->mr->rkey, err);
+ } else if (likely(can_wait)) {
+ wait_for_completion(&req->inv_comp);
+ } else {
+ /*
+ * Something went wrong, so request will be
+ * completed from INV callback.
+ */
+ WARN_ON_ONCE(1);
+
+ return;
+ }
+ }
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+
+ req->in_use = false;
+ req->con = NULL;
+
+ if (notify)
+ req->conf(req->priv, errno);
+}
+
+static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, u32 off,
+ u32 imm, struct ib_send_wr *wr)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ enum ib_send_flags flags;
+ struct ib_sge sge;
+
+ if (unlikely(!req->sg_size)) {
+ rtrs_wrn(con->c.sess,
+ "Doing RDMA Write failed, no data supplied\n");
+ return -EINVAL;
+ }
+
+ /* user data and user message in the first list element */
+ sge.addr = req->iu->dma_addr;
+ sge.length = req->sg_size;
+ sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ req->sg_size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
+ rbuf->rkey, rbuf->addr + off,
+ imm, flags, wr);
+}
+
+static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
+ s16 errno, bool w_inval)
+{
+ struct rtrs_clt_io_req *req;
+
+ if (WARN_ON(msg_id >= sess->queue_depth))
+ return;
+
+ req = &sess->reqs[msg_id];
+ /* Drop need_inv if server responded with send with invalidation */
+ req->need_inv &= !w_inval;
+ complete_rdma_req(req, errno, true, false);
+}
+
+static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_iu *iu;
+ int err;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu,
+ cqe);
+ err = rtrs_iu_post_recv(&con->c, iu);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "post iu failed %d\n", err);
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_msg_rkey_rsp *msg;
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ struct rtrs_iu *iu;
+ u32 buf_id;
+ int err;
+
+ WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) {
+ rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ buf_id = le16_to_cpu(msg->buf_id);
+ if (WARN_ON(buf_id >= sess->queue_depth))
+ goto out;
+
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ if (WARN_ON(buf_id != msg_id))
+ goto out;
+ sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
+ process_io_rsp(sess, msg_id, err, w_inval);
+ }
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ return rtrs_clt_recv_done(con, wc);
+out:
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_clt_rdma_done
+};
+
+/*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr_arr[2], *wr;
+ int i;
+
+ memset(wr_arr, 0, sizeof(wr_arr));
+ for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
+ wr = &wr_arr[i];
+ wr->wr_cqe = cqe;
+ if (i)
+ /* Chain backwards */
+ wr->next = &wr_arr[i - 1];
+ }
+
+ return ib_post_recv(con->qp, wr, NULL);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ int err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(sess->clt, "RDMA failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ return;
+ }
+ rtrs_clt_update_wc_stats(con);
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
+ return;
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ process_io_rsp(sess, msg_id, err, w_inval);
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&sess->s);
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ sess->s.hb_missed_cnt = 0;
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else {
+ rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
+ imm_type);
+ }
+ if (w_inval)
+ /*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+ err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
+ else
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
+ err);
+ rtrs_rdma_error_recovery(con);
+ break;
+ }
+ break;
+ case IB_WC_RECV:
+ /*
+ * Key invalidations from server side
+ */
+ WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
+ wc->wc_flags & IB_WC_WITH_IMM));
+ WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
+ return rtrs_clt_recv_done(con, wc);
+
+ return rtrs_clt_rkey_rsp_done(con, wc);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ break;
+
+ default:
+ rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
+{
+ int err, i;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ for (i = 0; i < q_size; i++) {
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ struct rtrs_iu *iu = &con->rsp_ius[i];
+
+ err = rtrs_iu_post_recv(&con->c, iu);
+ } else {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ }
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_sess(struct rtrs_clt_sess *sess)
+{
+ size_t q_size = 0;
+ int err, cid;
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = sess->queue_depth;
+
+ /*
+ * x2 for RDMA read responses + FR key invalidations,
+ * RDMA writes do not require any FR registrations.
+ */
+ q_size *= 2;
+
+ err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct path_it {
+ int i;
+ struct list_head skip_list;
+ struct rtrs_clt *clt;
+ struct rtrs_clt_sess *(*next_path)(struct path_it *it);
+};
+
+/**
+ * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
+ * @head: the head for the list.
+ * @ptr: the list head to take the next element from.
+ * @type: the type of the struct this is embedded in.
+ * @memb: the name of the list_head within the struct.
+ *
+ * Next element returned in round-robin fashion, i.e. head will be skipped,
+ * but if list is observed as empty, NULL will be returned.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_next_or_null_rr_rcu(head, ptr, type, memb) \
+({ \
+ list_next_or_null_rcu(head, ptr, type, memb) ?: \
+ list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
+ type, memb); \
+})
+
+/**
+ * get_next_path_rr() - Returns path in round-robin fashion.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_RR
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
+{
+ struct rtrs_clt_sess __rcu **ppcpu_path;
+ struct rtrs_clt_sess *path;
+ struct rtrs_clt *clt;
+
+ clt = it->clt;
+
+ /*
+ * Here we use two RCU objects: @paths_list and @pcpu_path
+ * pointer. See rtrs_clt_remove_path_from_arr() for details
+ * how that is handled.
+ */
+
+ ppcpu_path = this_cpu_ptr(clt->pcpu_path);
+ path = rcu_dereference(*ppcpu_path);
+ if (unlikely(!path))
+ path = list_first_or_null_rcu(&clt->paths_list,
+ typeof(*path), s.entry);
+ else
+ path = list_next_or_null_rr_rcu(&clt->paths_list,
+ &path->s.entry,
+ typeof(*path),
+ s.entry);
+ rcu_assign_pointer(*ppcpu_path, path);
+
+ return path;
+}
+
+/**
+ * get_next_path_min_inflight() - Returns path with minimal inflight count.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_MIN_INFLIGHT
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
+{
+ struct rtrs_clt_sess *min_path = NULL;
+ struct rtrs_clt *clt = it->clt;
+ struct rtrs_clt_sess *sess;
+ int min_inflight = INT_MAX;
+ int inflight;
+
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
+ if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+ continue;
+
+ inflight = atomic_read(&sess->stats->inflight);
+
+ if (inflight < min_inflight) {
+ min_inflight = inflight;
+ min_path = sess;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
+static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
+{
+ INIT_LIST_HEAD(&it->skip_list);
+ it->clt = clt;
+ it->i = 0;
+
+ if (clt->mp_policy == MP_POLICY_RR)
+ it->next_path = get_next_path_rr;
+ else
+ it->next_path = get_next_path_min_inflight;
+}
+
+static inline void path_it_deinit(struct path_it *it)
+{
+ struct list_head *skip, *tmp;
+ /*
+ * The skip_list is used only for the MIN_INFLIGHT policy.
+ * We need to remove paths from it, so that next IO can insert
+ * paths (->mp_skip_entry) into a skip_list again.
+ */
+ list_for_each_safe(skip, tmp, &it->skip_list)
+ list_del_init(skip);
+}
+
+/**
+ * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information
+ * about an inflight IO.
+ * The user buffer holding user control message (not data) is copied into
+ * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
+ * also hold the control message of rtrs.
+ * @req: an io request holding information about IO.
+ * @sess: client session
+ * @conf: conformation callback function to notify upper layer.
+ * @permit: permit for allocation of RDMA remote buffer
+ * @priv: private pointer
+ * @vec: kernel vector containing control message
+ * @usr_len: length of the user message
+ * @sg: scater list for IO data
+ * @sg_cnt: number of scater list entries
+ * @data_len: length of the IO data
+ * @dir: direction of the IO.
+ */
+static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
+ struct rtrs_clt_sess *sess,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct iov_iter iter;
+ size_t len;
+
+ req->permit = permit;
+ req->in_use = true;
+ req->usr_len = usr_len;
+ req->data_len = data_len;
+ req->sglist = sg;
+ req->sg_cnt = sg_cnt;
+ req->priv = priv;
+ req->dir = dir;
+ req->con = rtrs_permit_to_clt_con(sess, permit);
+ req->conf = conf;
+ req->need_inv = false;
+ req->need_inv_comp = false;
+ req->inv_errno = 0;
+
+ iov_iter_kvec(&iter, READ, vec, 1, usr_len);
+ len = _copy_from_iter(req->iu->buf, usr_len, &iter);
+ WARN_ON(len != usr_len);
+
+ reinit_completion(&req->inv_comp);
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_req(struct rtrs_clt_sess *sess,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct rtrs_clt_io_req *req;
+
+ req = &sess->reqs[permit->mem_id];
+ rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
+ sg, sg_cnt, data_len, dir);
+ return req;
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_io_req *req;
+ struct kvec vec = {
+ .iov_base = fail_req->iu->buf,
+ .iov_len = fail_req->usr_len
+ };
+
+ req = &alive_sess->reqs[fail_req->permit->mem_id];
+ rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
+ fail_req->priv, &vec, fail_req->usr_len,
+ fail_req->sglist, fail_req->sg_cnt,
+ fail_req->data_len, fail_req->dir);
+ return req;
+}
+
+static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf,
+ u32 size, u32 imm)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct ib_sge *sge = req->sge;
+ enum ib_send_flags flags;
+ struct scatterlist *sg;
+ size_t num_sge;
+ int i;
+
+ for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ }
+ sge[i].addr = req->iu->dma_addr;
+ sge[i].length = size;
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+
+ num_sge = 1 + req->sg_cnt;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
+ rbuf->rkey, rbuf->addr, imm,
+ flags, NULL);
+}
+
+static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_msg_rdma_write *msg;
+
+ struct rtrs_rbuf *rbuf;
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ if (unlikely(tsize > sess->chunk_size)) {
+ rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
+ tsize, sess->chunk_size);
+ return -EMSGSIZE;
+ }
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ if (unlikely(!count)) {
+ rtrs_wrn(s, "Write request failed, map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put rtrs msg after sg and user message */
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_WRITE);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ /* rtrs message on server side will be after user data and message */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+ req->sg_size = tsize;
+ rbuf = &sess->rbufs[buf_id];
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, WRITE);
+
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
+ req->usr_len + sizeof(*msg),
+ imm);
+ if (unlikely(ret)) {
+ rtrs_err(s, "Write request failed: %d\n", ret);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
+{
+ int nr;
+
+ /* Align the MR to a 4K page size to match the block virt boundary */
+ nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
+ if (nr < 0)
+ return nr;
+ if (unlikely(nr < req->sg_cnt))
+ return -EINVAL;
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ return nr;
+}
+
+static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_msg_rdma_read *msg;
+ struct rtrs_ib_dev *dev;
+
+ struct ib_reg_wr rwr;
+ struct ib_send_wr *wr = NULL;
+
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ s = &sess->s;
+ dev = sess->s.dev;
+
+ if (unlikely(tsize > sess->chunk_size)) {
+ rtrs_wrn(s,
+ "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
+ tsize, sess->chunk_size);
+ return -EMSGSIZE;
+ }
+
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ if (unlikely(!count)) {
+ rtrs_wrn(s,
+ "Read request failed, dma map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put our message into req->buf after user message*/
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_READ);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Read request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ return ret;
+ }
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
+ };
+ wr = &rwr.wr;
+
+ msg->sg_cnt = cpu_to_le16(1);
+ msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
+
+ msg->desc[0].addr = cpu_to_le64(req->mr->iova);
+ msg->desc[0].key = cpu_to_le32(req->mr->rkey);
+ msg->desc[0].len = cpu_to_le32(req->mr->length);
+
+ /* Further invalidation is required */
+ req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
+
+ } else {
+ msg->sg_cnt = 0;
+ msg->flags = 0;
+ }
+ /*
+ * rtrs message will be after the space reserved for disk data and
+ * user message
+ */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+
+ req->sg_size = sizeof(*msg);
+ req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
+ req->sg_size += req->usr_len;
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, READ);
+
+ ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
+ req->data_len, imm, wr);
+ if (unlikely(ret)) {
+ rtrs_err(s, "Read request failed: %d\n", ret);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+ req->need_inv = false;
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+/**
+ * rtrs_clt_failover_req() Try to find an active path for a failed request
+ * @clt: clt context
+ * @fail_req: a failed io request.
+ */
+static int rtrs_clt_failover_req(struct rtrs_clt *clt,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_sess *alive_sess;
+ struct rtrs_clt_io_req *req;
+ int err = -ECONNABORTED;
+ struct path_it it;
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
+ it.i++) {
+ if (unlikely(READ_ONCE(alive_sess->state) !=
+ RTRS_CLT_CONNECTED))
+ continue;
+ req = rtrs_clt_get_copy_req(alive_sess, fail_req);
+ if (req->dir == DMA_TO_DEVICE)
+ err = rtrs_clt_write_req(req);
+ else
+ err = rtrs_clt_read_req(req);
+ if (unlikely(err)) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ rtrs_clt_inc_failover_cnt(alive_sess->stats);
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_io_req *req;
+ int i, err;
+
+ if (!sess->reqs)
+ return;
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ if (!req->in_use)
+ continue;
+
+ /*
+ * Safely (without notification) complete failed request.
+ * After completion this request is still useble and can
+ * be failovered to another path.
+ */
+ complete_rdma_req(req, -ECONNABORTED, false, true);
+
+ err = rtrs_clt_failover_req(clt, req);
+ if (unlikely(err))
+ /* Failover failed, notify anyway */
+ req->conf(req->priv, err);
+ }
+}
+
+static void free_sess_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_io_req *req;
+ int i;
+
+ if (!sess->reqs)
+ return;
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ if (req->mr)
+ ib_dereg_mr(req->mr);
+ kfree(req->sge);
+ rtrs_iu_free(req->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+ }
+ kfree(sess->reqs);
+ sess->reqs = NULL;
+}
+
+static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt *clt = sess->clt;
+ int i, err = -ENOMEM;
+
+ sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
+ GFP_KERNEL);
+ if (!sess->reqs)
+ return -ENOMEM;
+
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
+ sess->s.dev->ib_dev,
+ DMA_TO_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!req->iu)
+ goto out;
+
+ req->sge = kmalloc_array(clt->max_segments + 1,
+ sizeof(*req->sge), GFP_KERNEL);
+ if (!req->sge)
+ goto out;
+
+ req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ sess->max_pages_per_mr);
+ if (IS_ERR(req->mr)) {
+ err = PTR_ERR(req->mr);
+ req->mr = NULL;
+ pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
+ sess->max_pages_per_mr);
+ goto out;
+ }
+
+ init_completion(&req->inv_comp);
+ }
+
+ return 0;
+
+out:
+ free_sess_reqs(sess);
+
+ return err;
+}
+
+static int alloc_permits(struct rtrs_clt *clt)
+{
+ unsigned int chunk_bits;
+ int err, i;
+
+ clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
+ sizeof(long), GFP_KERNEL);
+ if (!clt->permits_map) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
+ if (!clt->permits) {
+ err = -ENOMEM;
+ goto err_map;
+ }
+ chunk_bits = ilog2(clt->queue_depth - 1) + 1;
+ for (i = 0; i < clt->queue_depth; i++) {
+ struct rtrs_permit *permit;
+
+ permit = get_permit(clt, i);
+ permit->mem_id = i;
+ permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
+ }
+
+ return 0;
+
+err_map:
+ kfree(clt->permits_map);
+ clt->permits_map = NULL;
+out_err:
+ return err;
+}
+
+static void free_permits(struct rtrs_clt *clt)
+{
+ kfree(clt->permits_map);
+ clt->permits_map = NULL;
+ kfree(clt->permits);
+ clt->permits = NULL;
+}
+
+static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
+{
+ struct ib_device *ib_dev;
+ u64 max_pages_per_mr;
+ int mr_page_shift;
+
+ ib_dev = sess->s.dev->ib_dev;
+
+ /*
+ * Use the smallest page size supported by the HCA, down to a
+ * minimum of 4096 bytes. We're unlikely to build large sglists
+ * out of smaller entries.
+ */
+ mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
+ max_pages_per_mr = ib_dev->attrs.max_mr_size;
+ do_div(max_pages_per_mr, (1ull << mr_page_shift));
+ sess->max_pages_per_mr =
+ min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
+ ib_dev->attrs.max_fast_reg_page_list_len);
+ sess->max_send_sge = ib_dev->attrs.max_send_sge;
+}
+
+static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state,
+ enum rtrs_clt_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&sess->state_wq.lock);
+ *old_state = sess->state;
+ changed = __rtrs_clt_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_wq.lock);
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+
+ return rtrs_clt_change_state_get_old(sess, new_state, &old_state);
+}
+
+static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
+{
+ struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
+
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_init_hb(&sess->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_clt_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_start_hb(&sess->s);
+}
+
+static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_stop_hb(&sess->s);
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work);
+static void rtrs_clt_close_work(struct work_struct *work);
+
+static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
+ const struct rtrs_addr *path,
+ size_t con_num, u16 max_segments,
+ size_t max_segment_size)
+{
+ struct rtrs_clt_sess *sess;
+ int err = -ENOMEM;
+ int cpu;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ goto err;
+
+ /* Extra connection for user messages */
+ con_num += 1;
+
+ sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
+ if (!sess->s.con)
+ goto err_free_sess;
+
+ sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
+ if (!sess->stats)
+ goto err_free_con;
+
+ mutex_init(&sess->init_mutex);
+ uuid_gen(&sess->s.uuid);
+ memcpy(&sess->s.dst_addr, path->dst,
+ rdma_addr_size((struct sockaddr *)path->dst));
+
+ /*
+ * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
+ * checks the sa_family to be non-zero. If user passed src_addr=NULL
+ * the sess->src_addr will contain only zeros, which is then fine.
+ */
+ if (path->src)
+ memcpy(&sess->s.src_addr, path->src,
+ rdma_addr_size((struct sockaddr *)path->src));
+ strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
+ sess->s.con_num = con_num;
+ sess->clt = clt;
+ sess->max_pages_per_mr = max_segments * max_segment_size >> 12;
+ init_waitqueue_head(&sess->state_wq);
+ sess->state = RTRS_CLT_CONNECTING;
+ atomic_set(&sess->connected_cnt, 0);
+ INIT_WORK(&sess->close_work, rtrs_clt_close_work);
+ INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
+ rtrs_clt_init_hb(sess);
+
+ sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
+ if (!sess->mp_skip_entry)
+ goto err_free_stats;
+
+ for_each_possible_cpu(cpu)
+ INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
+
+ err = rtrs_clt_init_stats(sess->stats);
+ if (err)
+ goto err_free_percpu;
+
+ return sess;
+
+err_free_percpu:
+ free_percpu(sess->mp_skip_entry);
+err_free_stats:
+ kfree(sess->stats);
+err_free_con:
+ kfree(sess->s.con);
+err_free_sess:
+ kfree(sess);
+err:
+ return ERR_PTR(err);
+}
+
+void free_sess(struct rtrs_clt_sess *sess)
+{
+ free_percpu(sess->mp_skip_entry);
+ mutex_destroy(&sess->init_mutex);
+ kfree(sess->s.con);
+ kfree(sess->rbufs);
+ kfree(sess);
+}
+
+static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
+{
+ struct rtrs_clt_con *con;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con)
+ return -ENOMEM;
+
+ /* Map first two connections to the first CPU */
+ con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
+ con->c.cid = cid;
+ con->c.sess = &sess->s;
+ atomic_set(&con->io_cnt, 0);
+
+ sess->s.con[cid] = &con->c;
+
+ return 0;
+}
+
+static void destroy_con(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ sess->s.con[con->c.cid] = NULL;
+ kfree(con);
+}
+
+static int create_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ u16 wr_queue_size;
+ int err, cq_vector;
+ struct rtrs_msg_rkey_rsp *rsp;
+
+ /*
+ * This function can fail, but still destroy_con_cq_qp() should
+ * be called, this is because create_con_cq_qp() is called on cm
+ * event path, thus caller/waiter never knows: have we failed before
+ * create_con_cq_qp() or after. To solve this dilemma without
+ * creating any additional flags just allow destroy_con_cq_qp() be
+ * called many times.
+ */
+
+ if (con->c.cid == 0) {
+ /*
+ * One completion for each receive and two for each send
+ * (send request + registration)
+ * + 2 for drain and heartbeat
+ * in case qp gets into error state
+ */
+ wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ /* We must be the first here */
+ if (WARN_ON(sess->s.dev))
+ return -EINVAL;
+
+ /*
+ * The whole session uses device from user connection.
+ * Be careful not to close user connection before ib dev
+ * is gracefully put.
+ */
+ sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
+ &dev_pd);
+ if (!sess->s.dev) {
+ rtrs_wrn(sess->clt,
+ "rtrs_ib_dev_find_get_or_add(): no memory\n");
+ return -ENOMEM;
+ }
+ sess->s.dev_ref = 1;
+ query_fast_reg_mode(sess);
+ } else {
+ /*
+ * Here we assume that session members are correctly set.
+ * This is always true if user connection (cid == 0) is
+ * established first.
+ */
+ if (WARN_ON(!sess->s.dev))
+ return -EINVAL;
+ if (WARN_ON(!sess->queue_depth))
+ return -EINVAL;
+
+ /* Shared between connections */
+ sess->s.dev_ref++;
+ wr_queue_size =
+ min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+ sess->queue_depth * 3 + 1);
+ }
+ /* alloc iu to recv new rkey reply when server reports flags set */
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!con->rsp_ius)
+ return -ENOMEM;
+ con->queue_size = wr_queue_size;
+ }
+ cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
+ err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
+ cq_vector, wr_queue_size, wr_queue_size,
+ IB_POLL_SOFTIRQ);
+ /*
+ * In case of error we do not bother to clean previous allocations,
+ * since destroy_con_cq_qp() must be called.
+ */
+ return err;
+}
+
+static void destroy_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ /*
+ * Be careful here: destroy_con_cq_qp() can be called even
+ * create_con_cq_qp() failed, see comments there.
+ */
+
+ rtrs_cq_qp_destroy(&con->c);
+ if (con->rsp_ius) {
+ rtrs_iu_free(con->rsp_ius, DMA_FROM_DEVICE,
+ sess->s.dev->ib_dev, con->queue_size);
+ con->rsp_ius = NULL;
+ con->queue_size = 0;
+ }
+ if (sess->s.dev_ref && !--sess->s.dev_ref) {
+ rtrs_ib_dev_put(sess->s.dev);
+ sess->s.dev = NULL;
+ }
+}
+
+static void stop_cm(struct rtrs_clt_con *con)
+{
+ rdma_disconnect(con->c.cm_id);
+ if (con->c.qp)
+ ib_drain_qp(con->c.qp);
+}
+
+static void destroy_cm(struct rtrs_clt_con *con)
+{
+ rdma_destroy_id(con->c.cm_id);
+ con->c.cm_id = NULL;
+}
+
+static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ int err;
+
+ err = create_con_cq_qp(con);
+ if (err) {
+ rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
+ return err;
+ }
+ err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Resolving route failed, err: %d\n", err);
+ destroy_con_cq_qp(con);
+ }
+
+ return err;
+}
+
+static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_msg_conn_req msg;
+ struct rdma_conn_param param;
+
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .retry_count = 7,
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_req) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .cid = cpu_to_le16(con->c.cid),
+ .cid_num = cpu_to_le16(sess->s.con_num),
+ .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
+ };
+ uuid_copy(&msg.sess_uuid, &sess->s.uuid);
+ uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
+
+ err = rdma_connect(con->c.cm_id, &param);
+ if (err)
+ rtrs_err(clt, "rdma_connect(): %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt *clt = sess->clt;
+ const struct rtrs_msg_conn_rsp *msg;
+ u16 version, queue_depth;
+ int errno;
+ u8 len;
+
+ msg = ev->param.conn.private_data;
+ len = ev->param.conn.private_data_len;
+ if (len < sizeof(*msg)) {
+ rtrs_err(clt, "Invalid RTRS connection response\n");
+ return -ECONNRESET;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ rtrs_err(clt, "Invalid RTRS magic\n");
+ return -ECONNRESET;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ return -ECONNRESET;
+ }
+ errno = le16_to_cpu(msg->errno);
+ if (errno) {
+ rtrs_err(clt, "Invalid RTRS message: errno %d\n",
+ errno);
+ return -ECONNRESET;
+ }
+ if (con->c.cid == 0) {
+ queue_depth = le16_to_cpu(msg->queue_depth);
+
+ if (queue_depth > MAX_SESS_QUEUE_DEPTH) {
+ rtrs_err(clt, "Invalid RTRS message: queue=%d\n",
+ queue_depth);
+ return -ECONNRESET;
+ }
+ if (!sess->rbufs || sess->queue_depth < queue_depth) {
+ kfree(sess->rbufs);
+ sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
+ GFP_KERNEL);
+ if (!sess->rbufs)
+ return -ENOMEM;
+ }
+ sess->queue_depth = queue_depth;
+ sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
+ sess->max_io_size = le32_to_cpu(msg->max_io_size);
+ sess->flags = le32_to_cpu(msg->flags);
+ sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
+
+ /*
+ * Global queue depth and IO size is always a minimum.
+ * If while a reconnection server sends us a value a bit
+ * higher - client does not care and uses cached minimum.
+ *
+ * Since we can have several sessions (paths) restablishing
+ * connections in parallel, use lock.
+ */
+ mutex_lock(&clt->paths_mutex);
+ clt->queue_depth = min_not_zero(sess->queue_depth,
+ clt->queue_depth);
+ clt->max_io_size = min_not_zero(sess->max_io_size,
+ clt->max_io_size);
+ mutex_unlock(&clt->paths_mutex);
+
+ /*
+ * Cache the hca_port and hca_name for sysfs
+ */
+ sess->hca_port = con->c.cm_id->port_num;
+ scnprintf(sess->hca_name, sizeof(sess->hca_name),
+ sess->s.dev->ib_dev->name);
+ sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ }
+
+ return 0;
+}
+
+static inline void flag_success_on_conn(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ atomic_inc(&sess->connected_cnt);
+ con->cm_err = 1;
+}
+
+static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_sess *s = con->c.sess;
+ const struct rtrs_msg_conn_rsp *msg;
+ const char *rej_msg;
+ int status, errno;
+ u8 data_len;
+
+ status = ev->status;
+ rej_msg = rdma_reject_msg(con->c.cm_id, status);
+ msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
+
+ if (msg && data_len >= sizeof(*msg)) {
+ errno = (int16_t)le16_to_cpu(msg->errno);
+ if (errno == -EBUSY)
+ rtrs_err(s,
+ "Previous session is still exists on the server, please reconnect later\n");
+ else
+ rtrs_err(s,
+ "Connect rejected: status %d (%s), rtrs errno %d\n",
+ status, rej_msg, errno);
+ } else {
+ rtrs_err(s,
+ "Connect rejected but with malformed message: status %d (%s)\n",
+ status, rej_msg);
+ }
+
+ return -ECONNRESET;
+}
+
+static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
+{
+ if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING))
+ queue_work(rtrs_wq, &sess->close_work);
+ if (wait)
+ flush_work(&sess->close_work);
+}
+
+static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
+{
+ if (con->cm_err == 1) {
+ struct rtrs_clt_sess *sess;
+
+ sess = to_clt_sess(con->c.sess);
+ if (atomic_dec_and_test(&sess->connected_cnt))
+
+ wake_up(&sess->state_wq);
+ }
+ con->cm_err = cm_err;
+}
+
+static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_con *con = cm_id->context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ int cm_err = 0;
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_err = rtrs_rdma_addr_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_err = rtrs_rdma_route_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ con->cm_err = rtrs_rdma_conn_established(con, ev);
+ if (likely(!con->cm_err)) {
+ /*
+ * Report success and wake up. Here we abuse state_wq,
+ * i.e. wake up without state change, but we set cm_err.
+ */
+ flag_success_on_conn(con);
+ wake_up(&sess->state_wq);
+ return 0;
+ }
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_err = rtrs_rdma_conn_rejected(con, ev);
+ break;
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ rtrs_wrn(s, "CM error event %d\n", ev->event);
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ cm_err = -EHOSTUNREACH;
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /*
+ * Device removal is a special case. Queue close and return 0.
+ */
+ rtrs_clt_close_conns(sess, false);
+ return 0;
+ default:
+ rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
+ cm_err = -ECONNRESET;
+ break;
+ }
+
+ if (cm_err) {
+ /*
+ * cm error makes sense only on connection establishing,
+ * in other cases we rely on normal procedure of reconnecting.
+ */
+ flag_error_on_conn(con, cm_err);
+ rtrs_rdma_error_recovery(con);
+ }
+
+ return 0;
+}
+
+static int create_cm(struct rtrs_clt_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rdma_cm_id *cm_id;
+ int err;
+
+ cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
+ sess->s.dst_addr.ss_family == AF_IB ?
+ RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ err = PTR_ERR(cm_id);
+ rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
+
+ return err;
+ }
+ con->c.cm_id = cm_id;
+ con->cm_err = 0;
+ /* allow the port to be reused */
+ err = rdma_set_reuseaddr(cm_id, 1);
+ if (err != 0) {
+ rtrs_err(s, "Set address reuse failed, err: %d\n", err);
+ goto destroy_cm;
+ }
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
+ (struct sockaddr *)&sess->s.dst_addr,
+ RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Failed to resolve address, err: %d\n", err);
+ goto destroy_cm;
+ }
+ /*
+ * Combine connection status and session events. This is needed
+ * for waiting two possible cases: cm_err has something meaningful
+ * or session state was really changed to error by device removal.
+ */
+ err = wait_event_interruptible_timeout(
+ sess->state_wq,
+ con->cm_err || sess->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+ if (err == 0 || err == -ERESTARTSYS) {
+ if (err == 0)
+ err = -ETIMEDOUT;
+ /* Timedout or interrupted */
+ goto errr;
+ }
+ if (con->cm_err < 0) {
+ err = con->cm_err;
+ goto errr;
+ }
+ if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
+ /* Device removal */
+ err = -ECONNABORTED;
+ goto errr;
+ }
+
+ return 0;
+
+errr:
+ stop_cm(con);
+ /* Is safe to call destroy if cq_qp is not inited */
+ destroy_con_cq_qp(con);
+destroy_cm:
+ destroy_cm(con);
+
+ return err;
+}
+
+static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ int up;
+
+ /*
+ * We can fire RECONNECTED event only when all paths were
+ * connected on rtrs_clt_open(), then each was disconnected
+ * and the first one connected again. That's why this nasty
+ * game with counter value.
+ */
+
+ mutex_lock(&clt->paths_ev_mutex);
+ up = ++clt->paths_up;
+ /*
+ * Here it is safe to access paths num directly since up counter
+ * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
+ * in progress, thus paths removals are impossible.
+ */
+ if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
+ clt->paths_up = clt->paths_num;
+ else if (up == 1)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+
+ /* Mark session as established */
+ sess->established = true;
+ sess->reconnect_attempts = 0;
+ sess->stats->reconnects.successful_cnt++;
+}
+
+static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+
+ if (!sess->established)
+ return;
+
+ sess->established = false;
+ mutex_lock(&clt->paths_ev_mutex);
+ WARN_ON(!clt->paths_up);
+ if (--clt->paths_up == 0)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+}
+
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_con *con;
+ unsigned int cid;
+
+ WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
+
+ /*
+ * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
+ * exactly in between. Start destroying after it finishes.
+ */
+ mutex_lock(&sess->init_mutex);
+ mutex_unlock(&sess->init_mutex);
+
+ /*
+ * All IO paths must observe !CONNECTED state before we
+ * free everything.
+ */
+ synchronize_rcu();
+
+ rtrs_clt_stop_hb(sess);
+
+ /*
+ * The order it utterly crucial: firstly disconnect and complete all
+ * rdma requests with error (thus set in_use=false for requests),
+ * then fail outstanding requests checking in_use for each, and
+ * eventually notify upper layer about session disconnection.
+ */
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (!sess->s.con[cid])
+ break;
+ con = to_clt_con(sess->s.con[cid]);
+ stop_cm(con);
+ }
+ fail_all_outstanding_reqs(sess);
+ free_sess_reqs(sess);
+ rtrs_clt_sess_down(sess);
+
+ /*
+ * Wait for graceful shutdown, namely when peer side invokes
+ * rdma_disconnect(). 'connected_cnt' is decremented only on
+ * CM events, thus if other side had crashed and hb has detected
+ * something is wrong, here we will stuck for exactly timeout ms,
+ * since CM does not fire anything. That is fine, we are not in
+ * hurry.
+ */
+ wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (!sess->s.con[cid])
+ break;
+ con = to_clt_con(sess->s.con[cid]);
+ destroy_con_cq_qp(con);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+}
+
+static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
+ struct rtrs_clt_sess *sess,
+ struct rtrs_clt_sess *next)
+{
+ struct rtrs_clt_sess **ppcpu_path;
+
+ /* Call cmpxchg() without sparse warnings */
+ ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
+ return sess == cmpxchg(ppcpu_path, sess, next);
+}
+
+static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *next;
+ bool wait_for_grace = false;
+ int cpu;
+
+ mutex_lock(&clt->paths_mutex);
+ list_del_rcu(&sess->s.entry);
+
+ /* Make sure everybody observes path removal. */
+ synchronize_rcu();
+
+ /*
+ * At this point nobody sees @sess in the list, but still we have
+ * dangling pointer @pcpu_path which _can_ point to @sess. Since
+ * nobody can observe @sess in the list, we guarantee that IO path
+ * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
+ * to @sess, but can never again become @sess.
+ */
+
+ /*
+ * Decrement paths number only after grace period, because
+ * caller of do_each_path() must firstly observe list without
+ * path and only then decremented paths number.
+ *
+ * Otherwise there can be the following situation:
+ * o Two paths exist and IO is coming.
+ * o One path is removed:
+ * CPU#0 CPU#1
+ * do_each_path(): rtrs_clt_remove_path_from_arr():
+ * path = get_next_path()
+ * ^^^ list_del_rcu(path)
+ * [!CONNECTED path] clt->paths_num--
+ * ^^^^^^^^^
+ * load clt->paths_num from 2 to 1
+ * ^^^^^^^^^
+ * sees 1
+ *
+ * path is observed as !CONNECTED, but do_each_path() loop
+ * ends, because expression i < clt->paths_num is false.
+ */
+ clt->paths_num--;
+
+ /*
+ * Get @next connection from current @sess which is going to be
+ * removed. If @sess is the last element, then @next is NULL.
+ */
+ rcu_read_lock();
+ next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
+ typeof(*next), s.entry);
+ rcu_read_unlock();
+
+ /*
+ * @pcpu paths can still point to the path which is going to be
+ * removed, so change the pointer manually.
+ */
+ for_each_possible_cpu(cpu) {
+ struct rtrs_clt_sess __rcu **ppcpu_path;
+
+ ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
+ if (rcu_dereference_protected(*ppcpu_path,
+ lockdep_is_held(&clt->paths_mutex)) != sess)
+ /*
+ * synchronize_rcu() was called just after deleting
+ * entry from the list, thus IO code path cannot
+ * change pointer back to the pointer which is going
+ * to be removed, we are safe here.
+ */
+ continue;
+
+ /*
+ * We race with IO code path, which also changes pointer,
+ * thus we have to be careful not to overwrite it.
+ */
+ if (xchg_sessions(ppcpu_path, sess, next))
+ /*
+ * @ppcpu_path was successfully replaced with @next,
+ * that means that someone could also pick up the
+ * @sess and dereferencing it right now, so wait for
+ * a grace period is required.
+ */
+ wait_for_grace = true;
+ }
+ if (wait_for_grace)
+ synchronize_rcu();
+
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt *clt = sess->clt;
+
+ mutex_lock(&clt->paths_mutex);
+ clt->paths_num++;
+
+ list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_close_work(struct work_struct *work)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(work, struct rtrs_clt_sess, close_work);
+
+ cancel_delayed_work_sync(&sess->reconnect_dwork);
+ rtrs_clt_stop_and_destroy_conns(sess);
+ rtrs_clt_change_state(sess, RTRS_CLT_CLOSED);
+}
+
+static int init_conns(struct rtrs_clt_sess *sess)
+{
+ unsigned int cid;
+ int err;
+
+ /*
+ * On every new session connections increase reconnect counter
+ * to avoid clashes with previous sessions not yet closed
+ * sessions on a server side.
+ */
+ sess->s.recon_cnt++;
+
+ /* Establish all RDMA connections */
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ err = create_con(sess, cid);
+ if (err)
+ goto destroy;
+
+ err = create_cm(to_clt_con(sess->s.con[cid]));
+ if (err) {
+ destroy_con(to_clt_con(sess->s.con[cid]));
+ goto destroy;
+ }
+ }
+ err = alloc_sess_reqs(sess);
+ if (err)
+ goto destroy;
+
+ rtrs_clt_start_hb(sess);
+
+ return 0;
+
+destroy:
+ while (cid--) {
+ struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
+
+ stop_cm(con);
+ destroy_con_cq_qp(con);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+ /*
+ * If we've never taken async path and got an error, say,
+ * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
+ * manually to keep reconnecting.
+ */
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+
+ return err;
+}
+
+static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(sess->clt, "Sess info request send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ return;
+ }
+
+ rtrs_clt_update_wc_stats(con);
+}
+
+static int process_info_rsp(struct rtrs_clt_sess *sess,
+ const struct rtrs_msg_info_rsp *msg)
+{
+ unsigned int sg_cnt, total_len;
+ int i, sgi;
+
+ sg_cnt = le16_to_cpu(msg->sg_cnt);
+ if (unlikely(!sg_cnt))
+ return -EINVAL;
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and
+ * the offset inside the memory chunk.
+ */
+ if (unlikely((ilog2(sg_cnt - 1) + 1) +
+ (ilog2(sess->chunk_size - 1) + 1) >
+ MAX_IMM_PAYL_BITS)) {
+ rtrs_err(sess->clt,
+ "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
+ MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
+ return -EINVAL;
+ }
+ if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
+ rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
+ sg_cnt);
+ return -EINVAL;
+ }
+ total_len = 0;
+ for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
+ const struct rtrs_sg_desc *desc = &msg->desc[sgi];
+ u32 len, rkey;
+ u64 addr;
+
+ addr = le64_to_cpu(desc->addr);
+ rkey = le32_to_cpu(desc->key);
+ len = le32_to_cpu(desc->len);
+
+ total_len += len;
+
+ if (unlikely(!len || (len % sess->chunk_size))) {
+ rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
+ len);
+ return -EINVAL;
+ }
+ for ( ; len && i < sess->queue_depth; i++) {
+ sess->rbufs[i].addr = addr;
+ sess->rbufs[i].rkey = rkey;
+
+ len -= sess->chunk_size;
+ addr += sess->chunk_size;
+ }
+ }
+ /* Sanity check */
+ if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) {
+ rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
+ return -EINVAL;
+ }
+ if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) {
+ rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_msg_info_rsp *msg;
+ enum rtrs_clt_state state;
+ struct rtrs_iu *iu;
+ size_t rx_sz;
+ int err;
+
+ state = RTRS_CLT_CONNECTING_ERR;
+
+ WARN_ON(con->c.cid);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto out;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) {
+ rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ rx_sz = sizeof(*msg);
+ rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
+ if (unlikely(wc->byte_len < rx_sz)) {
+ rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ err = process_info_rsp(sess, msg);
+ if (unlikely(err))
+ goto out;
+
+ err = post_recv_sess(sess);
+ if (unlikely(err))
+ goto out;
+
+ state = RTRS_CLT_CONNECTED;
+
+out:
+ rtrs_clt_update_wc_stats(con);
+ rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_clt_change_state(sess, state);
+}
+
+static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *tx_iu, *rx_iu;
+ size_t rx_sz;
+ int err;
+
+ rx_sz = sizeof(struct rtrs_msg_info_rsp);
+ rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH;
+
+ tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
+ sess->s.dev->ib_dev, DMA_TO_DEVICE,
+ rtrs_clt_info_req_done);
+ rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
+ if (unlikely(!tx_iu || !rx_iu)) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
+ goto out;
+ }
+ rx_iu = NULL;
+
+ msg = tx_iu->buf;
+ msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
+ memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info request */
+ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
+ goto out;
+ }
+ tx_iu = NULL;
+
+ /* Wait for state change */
+ wait_event_interruptible_timeout(sess->state_wq,
+ sess->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(
+ RTRS_CONNECT_TIMEOUT_MS));
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) {
+ if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
+ err = -ECONNRESET;
+ else
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+out:
+ if (tx_iu)
+ rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ if (rx_iu)
+ rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ if (unlikely(err))
+ /* If we've never taken async path because of malloc problems */
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+
+ return err;
+}
+
+/**
+ * init_sess() - establishes all session connections and does handshake
+ * @sess: client session.
+ * In case of error full close or reconnect procedure should be taken,
+ * because reconnect or close async works can be started.
+ */
+static int init_sess(struct rtrs_clt_sess *sess)
+{
+ int err;
+
+ mutex_lock(&sess->init_mutex);
+ err = init_conns(sess);
+ if (err) {
+ rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
+ goto out;
+ }
+ err = rtrs_send_sess_info(sess);
+ if (err) {
+ rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
+ goto out;
+ }
+ rtrs_clt_sess_up(sess);
+out:
+ mutex_unlock(&sess->init_mutex);
+
+ return err;
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work)
+{
+ struct rtrs_clt_sess *sess;
+ struct rtrs_clt *clt;
+ unsigned int delay_ms;
+ int err;
+
+ sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
+ reconnect_dwork);
+ clt = sess->clt;
+
+ if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
+ return;
+
+ if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
+ /* Close a session completely if max attempts is reached */
+ rtrs_clt_close_conns(sess, false);
+ return;
+ }
+ sess->reconnect_attempts++;
+
+ /* Stop everything */
+ rtrs_clt_stop_and_destroy_conns(sess);
+ msleep(RTRS_RECONNECT_BACKOFF);
+ if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) {
+ err = init_sess(sess);
+ if (err)
+ goto reconnect_again;
+ }
+
+ return;
+
+reconnect_again:
+ if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) {
+ sess->stats->reconnects.fail_cnt++;
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ msecs_to_jiffies(delay_ms));
+ }
+}
+
+static void rtrs_clt_dev_release(struct device *dev)
+{
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ kfree(clt);
+}
+
+static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+ u16 port, size_t pdu_sz, void *priv,
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev),
+ unsigned int max_segments,
+ size_t max_segment_size,
+ unsigned int reconnect_delay_sec,
+ unsigned int max_reconnect_attempts)
+{
+ struct rtrs_clt *clt;
+ int err;
+
+ if (!paths_num || paths_num > MAX_PATHS_NUM)
+ return ERR_PTR(-EINVAL);
+
+ if (strlen(sessname) >= sizeof(clt->sessname))
+ return ERR_PTR(-EINVAL);
+
+ clt = kzalloc(sizeof(*clt), GFP_KERNEL);
+ if (!clt)
+ return ERR_PTR(-ENOMEM);
+
+ clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
+ if (!clt->pcpu_path) {
+ kfree(clt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ uuid_gen(&clt->paths_uuid);
+ INIT_LIST_HEAD_RCU(&clt->paths_list);
+ clt->paths_num = paths_num;
+ clt->paths_up = MAX_PATHS_NUM;
+ clt->port = port;
+ clt->pdu_sz = pdu_sz;
+ clt->max_segments = max_segments;
+ clt->max_segment_size = max_segment_size;
+ clt->reconnect_delay_sec = reconnect_delay_sec;
+ clt->max_reconnect_attempts = max_reconnect_attempts;
+ clt->priv = priv;
+ clt->link_ev = link_ev;
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ strlcpy(clt->sessname, sessname, sizeof(clt->sessname));
+ init_waitqueue_head(&clt->permits_wait);
+ mutex_init(&clt->paths_ev_mutex);
+ mutex_init(&clt->paths_mutex);
+
+ clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
+ err = dev_set_name(&clt->dev, "%s", sessname);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ kfree(clt);
+ return ERR_PTR(err);
+ }
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&clt->dev, true);
+ err = device_register(&clt->dev);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ put_device(&clt->dev);
+ return ERR_PTR(err);
+ }
+
+ clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
+ if (!clt->kobj_paths) {
+ free_percpu(clt->pcpu_path);
+ device_unregister(&clt->dev);
+ return NULL;
+ }
+ err = rtrs_clt_create_sysfs_root_files(clt);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ device_unregister(&clt->dev);
+ return ERR_PTR(err);
+ }
+ dev_set_uevent_suppress(&clt->dev, false);
+ kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
+
+ return clt;
+}
+
+static void wait_for_inflight_permits(struct rtrs_clt *clt)
+{
+ if (clt->permits_map) {
+ size_t sz = clt->queue_depth;
+
+ wait_event(clt->permits_wait,
+ find_first_bit(clt->permits_map, sz) >= sz);
+ }
+}
+
+static void free_clt(struct rtrs_clt *clt)
+{
+ wait_for_inflight_permits(clt);
+ free_permits(clt);
+ free_percpu(clt->pcpu_path);
+ mutex_destroy(&clt->paths_ev_mutex);
+ mutex_destroy(&clt->paths_mutex);
+ /* release callback will free clt in last put */
+ device_unregister(&clt->dev);
+}
+
+/**
+ * rtrs_clt_open() - Open a session to an RTRS server
+ * @ops: holds the link event callback and the private pointer.
+ * @sessname: name of the session
+ * @paths: Paths to be established defined by their src and dst addresses
+ * @paths_num: Number of elements in the @paths array
+ * @port: port to be used by the RTRS session
+ * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
+ * @reconnect_delay_sec: time between reconnect tries
+ * @max_segments: Max. number of segments per IO request
+ * @max_segment_size: Max. size of one segment
+ * @max_reconnect_attempts: Number of times to reconnect on error before giving
+ * up, 0 for * disabled, -1 for forever
+ *
+ * Starts session establishment with the rtrs_server. The function can block
+ * up to ~2000ms before it returns.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t paths_num, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ u16 max_segments,
+ size_t max_segment_size,
+ s16 max_reconnect_attempts)
+{
+ struct rtrs_clt_sess *sess, *tmp;
+ struct rtrs_clt *clt;
+ int err, i;
+
+ clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
+ ops->link_ev,
+ max_segments, max_segment_size, reconnect_delay_sec,
+ max_reconnect_attempts);
+ if (IS_ERR(clt)) {
+ err = PTR_ERR(clt);
+ goto out;
+ }
+ for (i = 0; i < paths_num; i++) {
+ struct rtrs_clt_sess *sess;
+
+ sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
+ max_segments, max_segment_size);
+ if (IS_ERR(sess)) {
+ err = PTR_ERR(sess);
+ goto close_all_sess;
+ }
+ list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+
+ err = init_sess(sess);
+ if (err) {
+ list_del_rcu(&sess->s.entry);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+ goto close_all_sess;
+ }
+
+ err = rtrs_clt_create_sess_files(sess);
+ if (err) {
+ list_del_rcu(&sess->s.entry);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+ goto close_all_sess;
+ }
+ }
+ err = alloc_permits(clt);
+ if (err)
+ goto close_all_sess;
+
+ return clt;
+
+close_all_sess:
+ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_sess_files(sess, NULL);
+ rtrs_clt_close_conns(sess, true);
+ kobject_put(&sess->kobj);
+ }
+ rtrs_clt_destroy_sysfs_root_files(clt);
+ rtrs_clt_destroy_sysfs_root_folders(clt);
+ free_clt(clt);
+
+out:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(rtrs_clt_open);
+
+/**
+ * rtrs_clt_close() - Close a session
+ * @clt: Session handle. Session is freed upon return.
+ */
+void rtrs_clt_close(struct rtrs_clt *clt)
+{
+ struct rtrs_clt_sess *sess, *tmp;
+
+ /* Firstly forbid sysfs access */
+ rtrs_clt_destroy_sysfs_root_files(clt);
+ rtrs_clt_destroy_sysfs_root_folders(clt);
+
+ /* Now it is safe to iterate over all paths without locks */
+ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_sess_files(sess, NULL);
+ rtrs_clt_close_conns(sess, true);
+ kobject_put(&sess->kobj);
+ }
+ free_clt(clt);
+}
+EXPORT_SYMBOL(rtrs_clt_close);
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
+{
+ enum rtrs_clt_state old_state;
+ int err = -EBUSY;
+ bool changed;
+
+ changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
+ &old_state);
+ if (changed) {
+ sess->reconnect_attempts = 0;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
+ }
+ if (changed || old_state == RTRS_CLT_RECONNECTING) {
+ /*
+ * flush_delayed_work() queues pending work for immediate
+ * execution, so do the flush if we have queued something
+ * right now or work is pending.
+ */
+ flush_delayed_work(&sess->reconnect_dwork);
+ err = (READ_ONCE(sess->state) ==
+ RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
+ }
+
+ return err;
+}
+
+int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
+{
+ rtrs_clt_close_conns(sess, true);
+
+ return 0;
+}
+
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self)
+{
+ enum rtrs_clt_state old_state;
+ bool changed;
+
+ /*
+ * Continue stopping path till state was changed to DEAD or
+ * state was observed as DEAD:
+ * 1. State was changed to DEAD - we were fast and nobody
+ * invoked rtrs_clt_reconnect(), which can again start
+ * reconnecting.
+ * 2. State was observed as DEAD - we have someone in parallel
+ * removing the path.
+ */
+ do {
+ rtrs_clt_close_conns(sess, true);
+ changed = rtrs_clt_change_state_get_old(sess,
+ RTRS_CLT_DEAD,
+ &old_state);
+ } while (!changed && old_state != RTRS_CLT_DEAD);
+
+ if (likely(changed)) {
+ rtrs_clt_destroy_sess_files(sess, sysfs_self);
+ rtrs_clt_remove_path_from_arr(sess);
+ kobject_put(&sess->kobj);
+ }
+
+ return 0;
+}
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
+{
+ clt->max_reconnect_attempts = (unsigned int)value;
+}
+
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
+{
+ return (int)clt->max_reconnect_attempts;
+}
+
+/**
+ * rtrs_clt_request() - Request data transfer to/from server via RDMA.
+ *
+ * @dir: READ/WRITE
+ * @ops: callback function to be called as confirmation, and the pointer.
+ * @clt: Session
+ * @permit: Preallocated permit
+ * @vec: Message that is sent to server together with the request.
+ * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
+ * Since the msg is copied internally it can be allocated on stack.
+ * @nr: Number of elements in @vec.
+ * @data_len: length of data sent to/from server
+ * @sg: Pages to be sent/received to/from server.
+ * @sg_cnt: Number of elements in the @sg
+ *
+ * Return:
+ * 0: Success
+ * <0: Error
+ *
+ * On dir=READ rtrs client will request a data transfer from Server to client.
+ * The data that the server will respond with will be stored in @sg when
+ * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
+ * On dir=WRITE rtrs client will rdma write data in sg to server side.
+ */
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt *clt, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t data_len,
+ struct scatterlist *sg, unsigned int sg_cnt)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt_sess *sess;
+
+ enum dma_data_direction dma_dir;
+ int err = -ECONNABORTED, i;
+ size_t usr_len, hdr_len;
+ struct path_it it;
+
+ /* Get kvec length */
+ for (i = 0, usr_len = 0; i < nr; i++)
+ usr_len += vec[i].iov_len;
+
+ if (dir == READ) {
+ hdr_len = sizeof(struct rtrs_msg_rdma_read) +
+ sg_cnt * sizeof(struct rtrs_sg_desc);
+ dma_dir = DMA_FROM_DEVICE;
+ } else {
+ hdr_len = sizeof(struct rtrs_msg_rdma_write);
+ dma_dir = DMA_TO_DEVICE;
+ }
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ continue;
+
+ if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) {
+ rtrs_wrn_rl(sess->clt,
+ "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
+ dir == READ ? "Read" : "Write",
+ usr_len, hdr_len, sess->max_hdr_size);
+ err = -EMSGSIZE;
+ break;
+ }
+ req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
+ vec, usr_len, sg, sg_cnt, data_len,
+ dma_dir);
+ if (dir == READ)
+ err = rtrs_clt_read_req(req);
+ else
+ err = rtrs_clt_write_req(req);
+ if (unlikely(err)) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_clt_request);
+
+/**
+ * rtrs_clt_query() - queries RTRS session attributes
+ *@clt: session pointer
+ *@attr: query results for session attributes.
+ * Returns:
+ * 0 on success
+ * -ECOMM no connection to the server
+ */
+int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
+{
+ if (!rtrs_clt_is_connected(clt))
+ return -ECOMM;
+
+ attr->queue_depth = clt->queue_depth;
+ attr->max_io_size = clt->max_io_size;
+ attr->sess_kobj = &clt->dev.kobj;
+ strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
+
+ return 0;
+}
+EXPORT_SYMBOL(rtrs_clt_query);
+
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt_sess *sess;
+ int err;
+
+ sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments,
+ clt->max_segment_size);
+ if (IS_ERR(sess))
+ return PTR_ERR(sess);
+
+ /*
+ * It is totally safe to add path in CONNECTING state: coming
+ * IO will never grab it. Also it is very important to add
+ * path before init, since init fires LINK_CONNECTED event.
+ */
+ rtrs_clt_add_path_to_arr(sess, addr);
+
+ err = init_sess(sess);
+ if (err)
+ goto close_sess;
+
+ err = rtrs_clt_create_sess_files(sess);
+ if (err)
+ goto close_sess;
+
+ return 0;
+
+close_sess:
+ rtrs_clt_remove_path_from_arr(sess);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+
+ return err;
+}
+
+static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
+{
+ if (!(dev->ib_dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ pr_err("Memory registrations not supported.\n");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
+ .init = rtrs_clt_ib_dev_init
+};
+
+static int __init rtrs_client_init(void)
+{
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
+ if (IS_ERR(rtrs_clt_dev_class)) {
+ pr_err("Failed to create rtrs-client dev class\n");
+ return PTR_ERR(rtrs_clt_dev_class);
+ }
+ rtrs_wq = alloc_workqueue("rtrs_client_wq", WQ_MEM_RECLAIM, 0);
+ if (!rtrs_wq) {
+ class_destroy(rtrs_clt_dev_class);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit rtrs_client_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_clt_dev_class);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_client_init);
+module_exit(rtrs_client_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
new file mode 100644
index 000000000000..167acd3c90fc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_CLT_H
+#define RTRS_CLT_H
+
+#include <linux/device.h>
+#include "rtrs-pri.h"
+
+/**
+ * enum rtrs_clt_state - Client states.
+ */
+enum rtrs_clt_state {
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR,
+ RTRS_CLT_RECONNECTING,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_CLOSING,
+ RTRS_CLT_CLOSED,
+ RTRS_CLT_DEAD,
+};
+
+enum rtrs_mp_policy {
+ MP_POLICY_RR,
+ MP_POLICY_MIN_INFLIGHT,
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_reconnects {
+ int successful_cnt;
+ int fail_cnt;
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_cpu_migr {
+ atomic_t from;
+ int to;
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-client for details
+ */
+struct rtrs_clt_stats_rdma {
+ struct {
+ u64 cnt;
+ u64 size_total;
+ } dir[2];
+
+ u64 failover_cnt;
+};
+
+struct rtrs_clt_stats_pcpu {
+ struct rtrs_clt_stats_cpu_migr cpu_migr;
+ struct rtrs_clt_stats_rdma rdma;
+};
+
+struct rtrs_clt_stats {
+ struct kobject kobj_stats;
+ struct rtrs_clt_stats_pcpu __percpu *pcpu_stats;
+ struct rtrs_clt_stats_reconnects reconnects;
+ atomic_t inflight;
+};
+
+struct rtrs_clt_con {
+ struct rtrs_con c;
+ struct rtrs_iu *rsp_ius;
+ u32 queue_size;
+ unsigned int cpu;
+ atomic_t io_cnt;
+ int cm_err;
+};
+
+/**
+ * rtrs_permit - permits the memory allocation for future RDMA operation.
+ * Combine with irq pinning to keep IO on same CPU.
+ */
+struct rtrs_permit {
+ enum rtrs_clt_con_type con_type;
+ unsigned int cpu_id;
+ unsigned int mem_id;
+ unsigned int mem_off;
+};
+
+/**
+ * rtrs_clt_io_req - describes one inflight IO request
+ */
+struct rtrs_clt_io_req {
+ struct list_head list;
+ struct rtrs_iu *iu;
+ struct scatterlist *sglist; /* list holding user data */
+ unsigned int sg_cnt;
+ unsigned int sg_size;
+ unsigned int data_len;
+ unsigned int usr_len;
+ void *priv;
+ bool in_use;
+ struct rtrs_clt_con *con;
+ struct rtrs_sg_desc *desc;
+ struct ib_sge *sge;
+ struct rtrs_permit *permit;
+ enum dma_data_direction dir;
+ void (*conf)(void *priv, int errno);
+ unsigned long start_jiffies;
+
+ struct ib_mr *mr;
+ struct ib_cqe inv_cqe;
+ struct completion inv_comp;
+ int inv_errno;
+ bool need_inv_comp;
+ bool need_inv;
+};
+
+struct rtrs_rbuf {
+ u64 addr;
+ u32 rkey;
+};
+
+struct rtrs_clt_sess {
+ struct rtrs_sess s;
+ struct rtrs_clt *clt;
+ wait_queue_head_t state_wq;
+ enum rtrs_clt_state state;
+ atomic_t connected_cnt;
+ struct mutex init_mutex;
+ struct rtrs_clt_io_req *reqs;
+ struct delayed_work reconnect_dwork;
+ struct work_struct close_work;
+ unsigned int reconnect_attempts;
+ bool established;
+ struct rtrs_rbuf *rbufs;
+ size_t max_io_size;
+ u32 max_hdr_size;
+ u32 chunk_size;
+ size_t queue_depth;
+ u32 max_pages_per_mr;
+ int max_send_sge;
+ u32 flags;
+ struct kobject kobj;
+ struct rtrs_clt_stats *stats;
+ /* cache hca_port and hca_name to display in sysfs */
+ u8 hca_port;
+ char hca_name[IB_DEVICE_NAME_MAX];
+ struct list_head __percpu
+ *mp_skip_entry;
+};
+
+struct rtrs_clt {
+ struct list_head paths_list; /* rcu protected list */
+ size_t paths_num;
+ struct rtrs_clt_sess
+ __rcu * __percpu *pcpu_path;
+ uuid_t paths_uuid;
+ int paths_up;
+ struct mutex paths_mutex;
+ struct mutex paths_ev_mutex;
+ char sessname[NAME_MAX];
+ u16 port;
+ unsigned int max_reconnect_attempts;
+ unsigned int reconnect_delay_sec;
+ unsigned int max_segments;
+ size_t max_segment_size;
+ void *permits;
+ unsigned long *permits_map;
+ size_t queue_depth;
+ size_t max_io_size;
+ wait_queue_head_t permits_wait;
+ size_t pdu_sz;
+ void *priv;
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev);
+ struct device dev;
+ struct kobject *kobj_paths;
+ enum rtrs_mp_policy mp_policy;
+};
+
+static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_clt_con, c);
+}
+
+static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s)
+{
+ return container_of(s, struct rtrs_clt_sess, s);
+}
+
+static inline int permit_size(struct rtrs_clt *clt)
+{
+ return sizeof(struct rtrs_permit) + clt->pdu_sz;
+}
+
+static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
+{
+ return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
+}
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
+int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess);
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+ struct rtrs_addr *addr);
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self);
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value);
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt);
+void free_sess(struct rtrs_clt_sess *sess);
+
+/* rtrs-clt-stats.c */
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats);
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *s);
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con);
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir);
+
+int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats,
+ bool enable);
+ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_wc_comp_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_wc_completion_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+
+/* rtrs-clt-sysfs.c */
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt);
+
+int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
+void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self);
+
+#endif /* RTRS_CLT_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-log.h b/drivers/infiniband/ulp/rtrs/rtrs-log.h
new file mode 100644
index 000000000000..53c785b992f2
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-log.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_LOG_H
+#define RTRS_LOG_H
+
+#define rtrs_log(fn, obj, fmt, ...) \
+ fn("<%s>: " fmt, obj->sessname, ##__VA_ARGS__)
+
+#define rtrs_err(obj, fmt, ...) \
+ rtrs_log(pr_err, obj, fmt, ##__VA_ARGS__)
+#define rtrs_err_rl(obj, fmt, ...) \
+ rtrs_log(pr_err_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn(obj, fmt, ...) \
+ rtrs_log(pr_warn, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn_rl(obj, fmt, ...) \
+ rtrs_log(pr_warn_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info(obj, fmt, ...) \
+ rtrs_log(pr_info, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info_rl(obj, fmt, ...) \
+ rtrs_log(pr_info_ratelimited, obj, fmt, ##__VA_ARGS__)
+
+#endif /* RTRS_LOG_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
new file mode 100644
index 000000000000..0a93c87ef92b
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_PRI_H
+#define RTRS_PRI_H
+
+#include <linux/uuid.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib.h>
+
+#include "rtrs.h"
+
+#define RTRS_PROTO_VER_MAJOR 2
+#define RTRS_PROTO_VER_MINOR 0
+
+#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
+ __stringify(RTRS_PROTO_VER_MINOR)
+
+enum rtrs_imm_const {
+ MAX_IMM_TYPE_BITS = 4,
+ MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
+ MAX_IMM_PAYL_BITS = 28,
+ MAX_IMM_PAYL_MASK = ((1 << MAX_IMM_PAYL_BITS) - 1),
+};
+
+enum rtrs_imm_type {
+ RTRS_IO_REQ_IMM = 0, /* client to server */
+ RTRS_IO_RSP_IMM = 1, /* server to client */
+ RTRS_IO_RSP_W_INV_IMM = 2, /* server to client */
+
+ RTRS_HB_MSG_IMM = 8, /* HB: HeartBeat */
+ RTRS_HB_ACK_IMM = 9,
+
+ RTRS_LAST_IMM,
+};
+
+enum {
+ SERVICE_CON_QUEUE_DEPTH = 512,
+
+ MAX_PATHS_NUM = 128,
+
+ /*
+ * With the size of struct rtrs_permit allocated on the client, 4K
+ * is the maximum number of rtrs_permits we can allocate. This number is
+ * also used on the client to allocate the IU for the user connection
+ * to receive the RDMA addresses from the server.
+ */
+ MAX_SESS_QUEUE_DEPTH = 4096,
+
+ RTRS_HB_INTERVAL_MS = 5000,
+ RTRS_HB_MISSED_MAX = 5,
+
+ RTRS_MAGIC = 0x1BBD,
+ RTRS_PROTO_VER = (RTRS_PROTO_VER_MAJOR << 8) | RTRS_PROTO_VER_MINOR,
+};
+
+struct rtrs_ib_dev;
+
+struct rtrs_rdma_dev_pd_ops {
+ struct rtrs_ib_dev *(*alloc)(void);
+ void (*free)(struct rtrs_ib_dev *dev);
+ int (*init)(struct rtrs_ib_dev *dev);
+ void (*deinit)(struct rtrs_ib_dev *dev);
+};
+
+struct rtrs_rdma_dev_pd {
+ struct mutex mutex;
+ struct list_head list;
+ enum ib_pd_flags pd_flags;
+ const struct rtrs_rdma_dev_pd_ops *ops;
+};
+
+struct rtrs_ib_dev {
+ struct ib_device *ib_dev;
+ struct ib_pd *ib_pd;
+ struct kref ref;
+ struct list_head entry;
+ struct rtrs_rdma_dev_pd *pool;
+};
+
+struct rtrs_con {
+ struct rtrs_sess *sess;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct rdma_cm_id *cm_id;
+ unsigned int cid;
+};
+
+struct rtrs_sess {
+ struct list_head entry;
+ struct sockaddr_storage dst_addr;
+ struct sockaddr_storage src_addr;
+ char sessname[NAME_MAX];
+ uuid_t uuid;
+ struct rtrs_con **con;
+ unsigned int con_num;
+ unsigned int recon_cnt;
+ struct rtrs_ib_dev *dev;
+ int dev_ref;
+ struct ib_cqe *hb_cqe;
+ void (*hb_err_handler)(struct rtrs_con *con);
+ struct workqueue_struct *hb_wq;
+ struct delayed_work hb_dwork;
+ unsigned int hb_interval_ms;
+ unsigned int hb_missed_cnt;
+ unsigned int hb_missed_max;
+};
+
+/* rtrs information unit */
+struct rtrs_iu {
+ struct list_head list;
+ struct ib_cqe cqe;
+ dma_addr_t dma_addr;
+ void *buf;
+ size_t size;
+ enum dma_data_direction direction;
+};
+
+/**
+ * enum rtrs_msg_types - RTRS message types, see also rtrs/README
+ * @RTRS_MSG_INFO_REQ: Client additional info request to the server
+ * @RTRS_MSG_INFO_RSP: Server additional info response to the client
+ * @RTRS_MSG_WRITE: Client writes data per RDMA to server
+ * @RTRS_MSG_READ: Client requests data transfer from server
+ * @RTRS_MSG_RKEY_RSP: Server refreshed rkey for rbuf
+ */
+enum rtrs_msg_types {
+ RTRS_MSG_INFO_REQ,
+ RTRS_MSG_INFO_RSP,
+ RTRS_MSG_WRITE,
+ RTRS_MSG_READ,
+ RTRS_MSG_RKEY_RSP,
+};
+
+/**
+ * enum rtrs_msg_flags - RTRS message flags.
+ * @RTRS_NEED_INVAL: Send invalidation in response.
+ * @RTRS_MSG_NEW_RKEY_F: Send refreshed rkey in response.
+ */
+enum rtrs_msg_flags {
+ RTRS_MSG_NEED_INVAL_F = 1 << 0,
+ RTRS_MSG_NEW_RKEY_F = 1 << 1,
+};
+
+/**
+ * struct rtrs_sg_desc - RDMA-Buffer entry description
+ * @addr: Address of RDMA destination buffer
+ * @key: Authorization rkey to write to the buffer
+ * @len: Size of the buffer
+ */
+struct rtrs_sg_desc {
+ __le64 addr;
+ __le32 key;
+ __le32 len;
+};
+
+/**
+ * struct rtrs_msg_conn_req - Client connection request to the server
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @cid: Current connection id
+ * @cid_num: Number of connections per session
+ * @recon_cnt: Reconnections counter
+ * @sess_uuid: UUID of a session (path)
+ * @paths_uuid: UUID of a group of sessions (paths)
+ *
+ * NOTE: max size 56 bytes, see man rdma_connect().
+ */
+struct rtrs_msg_conn_req {
+ /* Is set to 0 by cma.c in case of AF_IB, do not touch that.
+ * see https://www.spinics.net/lists/linux-rdma/msg22397.html
+ */
+ u8 __cma_version;
+ /* On sender side that should be set to 0, or cma_save_ip_info()
+ * extract garbage and will fail.
+ */
+ u8 __ip_version;
+ __le16 magic;
+ __le16 version;
+ __le16 cid;
+ __le16 cid_num;
+ __le16 recon_cnt;
+ uuid_t sess_uuid;
+ uuid_t paths_uuid;
+ u8 reserved[12];
+};
+
+/**
+ * struct rtrs_msg_conn_rsp - Server connection response to the client
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @errno: If rdma_accept() then 0, if rdma_reject() indicates error
+ * @queue_depth: max inflight messages (queue-depth) in this session
+ * @max_io_size: max io size server supports
+ * @max_hdr_size: max msg header size server supports
+ *
+ * NOTE: size is 56 bytes, max possible is 136 bytes, see man rdma_accept().
+ */
+struct rtrs_msg_conn_rsp {
+ __le16 magic;
+ __le16 version;
+ __le16 errno;
+ __le16 queue_depth;
+ __le32 max_io_size;
+ __le32 max_hdr_size;
+ __le32 flags;
+ u8 reserved[36];
+};
+
+/**
+ * struct rtrs_msg_info_req
+ * @type: @RTRS_MSG_INFO_REQ
+ * @sessname: Session name chosen by client
+ */
+struct rtrs_msg_info_req {
+ __le16 type;
+ u8 sessname[NAME_MAX];
+ u8 reserved[15];
+};
+
+/**
+ * struct rtrs_msg_info_rsp
+ * @type: @RTRS_MSG_INFO_RSP
+ * @sg_cnt: Number of @desc entries
+ * @desc: RDMA buffers where the client can write to server
+ */
+struct rtrs_msg_info_rsp {
+ __le16 type;
+ __le16 sg_cnt;
+ u8 reserved[4];
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct rtrs_msg_rkey_rsp
+ * @type: @RTRS_MSG_RKEY_RSP
+ * @buf_id: RDMA buf_id of the new rkey
+ * @rkey: new remote key for RDMA buffers id from server
+ */
+struct rtrs_msg_rkey_rsp {
+ __le16 type;
+ __le16 buf_id;
+ __le32 rkey;
+};
+
+/**
+ * struct rtrs_msg_rdma_read - RDMA data transfer request from client
+ * @type: always @RTRS_MSG_READ
+ * @usr_len: length of user payload
+ * @sg_cnt: number of @desc entries
+ * @desc: RDMA buffers where the server can write the result to
+ */
+struct rtrs_msg_rdma_read {
+ __le16 type;
+ __le16 usr_len;
+ __le16 flags;
+ __le16 sg_cnt;
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct_msg_rdma_write - Message transferred to server with RDMA-Write
+ * @type: always @RTRS_MSG_WRITE
+ * @usr_len: length of user payload
+ */
+struct rtrs_msg_rdma_write {
+ __le16 type;
+ __le16 usr_len;
+};
+
+/**
+ * struct_msg_rdma_hdr - header for read or write request
+ * @type: @RTRS_MSG_WRITE | @RTRS_MSG_READ
+ */
+struct rtrs_msg_rdma_hdr {
+ __le16 type;
+};
+
+/* rtrs.c */
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t t,
+ struct ib_device *dev, enum dma_data_direction,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc));
+void rtrs_iu_free(struct rtrs_iu *iu, enum dma_data_direction dir,
+ struct ib_device *dev, u32 queue_size);
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head);
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
+int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ u32 imm_data, enum ib_send_flags flags,
+ struct ib_send_wr *head);
+
+int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, u16 cq_size,
+ u16 wr_queue_size, enum ib_poll_context poll_ctx);
+void rtrs_cq_qp_destroy(struct rtrs_con *con);
+
+void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq);
+void rtrs_start_hb(struct rtrs_sess *sess);
+void rtrs_stop_hb(struct rtrs_sess *sess);
+void rtrs_send_hb_ack(struct rtrs_sess *sess);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool);
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool);
+
+struct rtrs_ib_dev *rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool);
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev);
+
+static inline u32 rtrs_to_imm(u32 type, u32 payload)
+{
+ BUILD_BUG_ON(MAX_IMM_PAYL_BITS + MAX_IMM_TYPE_BITS != 32);
+ BUILD_BUG_ON(RTRS_LAST_IMM > (1<<MAX_IMM_TYPE_BITS));
+ return ((type & MAX_IMM_TYPE_MASK) << MAX_IMM_PAYL_BITS) |
+ (payload & MAX_IMM_PAYL_MASK);
+}
+
+static inline void rtrs_from_imm(u32 imm, u32 *type, u32 *payload)
+{
+ *payload = imm & MAX_IMM_PAYL_MASK;
+ *type = imm >> MAX_IMM_PAYL_BITS;
+}
+
+static inline u32 rtrs_to_io_req_imm(u32 addr)
+{
+ return rtrs_to_imm(RTRS_IO_REQ_IMM, addr);
+}
+
+static inline u32 rtrs_to_io_rsp_imm(u32 msg_id, int errno, bool w_inval)
+{
+ enum rtrs_imm_type type;
+ u32 payload;
+
+ /* 9 bits for errno, 19 bits for msg_id */
+ payload = (abs(errno) & 0x1ff) << 19 | (msg_id & 0x7ffff);
+ type = w_inval ? RTRS_IO_RSP_W_INV_IMM : RTRS_IO_RSP_IMM;
+
+ return rtrs_to_imm(type, payload);
+}
+
+static inline void rtrs_from_io_rsp_imm(u32 payload, u32 *msg_id, int *errno)
+{
+ /* 9 bits for errno, 19 bits for msg_id */
+ *msg_id = payload & 0x7ffff;
+ *errno = -(int)((payload >> 19) & 0x1ff);
+}
+
+#define STAT_STORE_FUNC(type, set_value, reset) \
+static ssize_t set_value##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int ret = -EINVAL; \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ if (sysfs_streq(buf, "1")) \
+ ret = reset(stats, true); \
+ else if (sysfs_streq(buf, "0")) \
+ ret = reset(stats, false); \
+ if (ret) \
+ return ret; \
+ \
+ return count; \
+}
+
+#define STAT_SHOW_FUNC(type, get_value, print) \
+static ssize_t get_value##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ return print(stats, page, PAGE_SIZE); \
+}
+
+#define STAT_ATTR(type, stat, print, reset) \
+STAT_STORE_FUNC(type, stat, reset) \
+STAT_SHOW_FUNC(type, stat, print) \
+static struct kobj_attribute stat##_attr = __ATTR_RW(stat)
+
+#endif /* RTRS_PRI_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
new file mode 100644
index 000000000000..e102b1368d0c
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-srv.h"
+
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
+{
+ if (enable) {
+ struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+
+ memset(r, 0, sizeof(*r));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
+ char *page, size_t len)
+{
+ struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ struct rtrs_srv_sess *sess = stats->sess;
+
+ return scnprintf(page, len, "%lld %lld %lld %lld %u\n",
+ (s64)atomic64_read(&r->dir[READ].cnt),
+ (s64)atomic64_read(&r->dir[READ].size_total),
+ (s64)atomic64_read(&r->dir[WRITE].cnt),
+ (s64)atomic64_read(&r->dir[WRITE].size_total),
+ atomic_read(&sess->ids_inflight));
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
new file mode 100644
index 000000000000..3d7877534bcc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+
+static void rtrs_srv_release(struct kobject *kobj)
+{
+ struct rtrs_srv_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ kfree(sess);
+}
+
+static struct kobj_type ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_release,
+};
+
+static ssize_t rtrs_srv_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_sess *s;
+ char str[MAXHOSTNAMELEN];
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ s = &sess->s;
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(s, "%s: invalid value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+
+ sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
+
+ rtrs_info(s, "disconnect for path %s requested\n", str);
+ close_sess(sess);
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_srv_disconnect_attr =
+ __ATTR(disconnect, 0644,
+ rtrs_srv_disconnect_show, rtrs_srv_disconnect_store);
+
+static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_con *usr_con;
+
+ sess = container_of(kobj, typeof(*sess), kobj);
+ usr_con = sess->s.con[0];
+
+ return scnprintf(page, PAGE_SIZE, "%u\n",
+ usr_con->cm_id->port_num);
+}
+
+static struct kobj_attribute rtrs_srv_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_srv_hca_port_show, NULL);
+
+static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n",
+ sess->s.dev->ib_dev->name);
+}
+
+static struct kobj_attribute rtrs_srv_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_srv_hca_name_show, NULL);
+
+static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_srv_src_addr_show, NULL);
+
+static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
+
+static struct attribute *rtrs_srv_sess_attrs[] = {
+ &rtrs_srv_hca_name_attr.attr,
+ &rtrs_srv_hca_port_attr.attr,
+ &rtrs_srv_src_addr_attr.attr,
+ &rtrs_srv_dst_addr_attr.attr,
+ &rtrs_srv_disconnect_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_srv_sess_attr_group = {
+ .attrs = rtrs_srv_sess_attrs,
+};
+
+STAT_ATTR(struct rtrs_srv_stats, rdma,
+ rtrs_srv_stats_rdma_to_str,
+ rtrs_srv_reset_rdma_stats);
+
+static struct attribute *rtrs_srv_stats_attrs[] = {
+ &rdma_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_srv_stats_attr_group = {
+ .attrs = rtrs_srv_stats_attrs,
+};
+
+static void rtrs_srv_dev_release(struct device *dev)
+{
+ struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
+
+ kfree(srv);
+}
+
+static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ int err = 0;
+
+ mutex_lock(&srv->paths_mutex);
+ if (srv->dev_ref++) {
+ /*
+ * Device needs to be registered only on the first session
+ */
+ goto unlock;
+ }
+ srv->dev.class = rtrs_dev_class;
+ srv->dev.release = rtrs_srv_dev_release;
+ err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
+ if (err)
+ goto unlock;
+
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&srv->dev, true);
+ err = device_register(&srv->dev);
+ if (err) {
+ pr_err("device_register(): %d\n", err);
+ goto put;
+ }
+ srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
+ if (!srv->kobj_paths) {
+ err = -ENOMEM;
+ pr_err("kobject_create_and_add(): %d\n", err);
+ device_unregister(&srv->dev);
+ goto unlock;
+ }
+ dev_set_uevent_suppress(&srv->dev, false);
+ kobject_uevent(&srv->dev.kobj, KOBJ_ADD);
+ goto unlock;
+
+put:
+ put_device(&srv->dev);
+unlock:
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static void
+rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+
+ mutex_lock(&srv->paths_mutex);
+ if (!--srv->dev_ref) {
+ kobject_del(srv->kobj_paths);
+ kobject_put(srv->kobj_paths);
+ mutex_unlock(&srv->paths_mutex);
+ device_unregister(&srv->dev);
+ } else {
+ mutex_unlock(&srv->paths_mutex);
+ }
+}
+
+static void rtrs_srv_sess_stats_release(struct kobject *kobj)
+{
+ struct rtrs_srv_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_sess_stats_release,
+};
+
+static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
+{
+ int err;
+ struct rtrs_sess *s = &sess->s;
+
+ err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
+ &sess->kobj, "stats");
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ return err;
+ }
+ err = sysfs_create_group(&sess->stats->kobj_stats,
+ &rtrs_srv_stats_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+
+ return err;
+}
+
+int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ char str[NAME_MAX];
+ int err, cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ str, sizeof(str));
+ cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ str + cnt, sizeof(str) - cnt);
+
+ err = rtrs_srv_create_once_sysfs_root_folders(sess);
+ if (err)
+ return err;
+
+ err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths,
+ "%s", str);
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ goto destroy_root;
+ }
+ err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = rtrs_srv_create_stats_files(sess);
+ if (err)
+ goto remove_group;
+
+ return 0;
+
+remove_group:
+ sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+put_kobj:
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+destroy_root:
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+
+ return err;
+}
+
+void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
+{
+ if (sess->kobj.state_in_sysfs) {
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ }
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
new file mode 100644
index 000000000000..0d9241f5d9e6
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -0,0 +1,2178 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+#include <rdma/ib_cm.h>
+
+MODULE_DESCRIPTION("RDMA Transport Server");
+MODULE_LICENSE("GPL");
+
+/* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
+#define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
+#define DEFAULT_SESS_QUEUE_DEPTH 512
+#define MAX_HDR_SIZE PAGE_SIZE
+
+/* We guarantee to serve 10 paths at least */
+#define CHUNK_POOL_SZ 10
+
+static struct rtrs_rdma_dev_pd dev_pd;
+static mempool_t *chunk_pool;
+struct class *rtrs_dev_class;
+
+static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
+static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
+
+static bool always_invalidate = true;
+module_param(always_invalidate, bool, 0444);
+MODULE_PARM_DESC(always_invalidate,
+ "Invalidate memory registration for contiguous memory regions before accessing.");
+
+module_param_named(max_chunk_size, max_chunk_size, int, 0444);
+MODULE_PARM_DESC(max_chunk_size,
+ "Max size for each IO request, when change the unit is in byte (default: "
+ __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
+
+module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
+MODULE_PARM_DESC(sess_queue_depth,
+ "Number of buffers for pending I/O requests to allocate per session. Maximum: "
+ __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
+ __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
+
+static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
+
+static struct workqueue_struct *rtrs_wq;
+
+static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_srv_con, c);
+}
+
+static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s)
+{
+ return container_of(s, struct rtrs_srv_sess, s);
+}
+
+static bool __rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&sess->state_lock);
+ old_state = sess->state;
+ switch (new_state) {
+ case RTRS_SRV_CONNECTED:
+ switch (old_state) {
+ case RTRS_SRV_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_SRV_CLOSING:
+ switch (old_state) {
+ case RTRS_SRV_CONNECTING:
+ case RTRS_SRV_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_SRV_CLOSED:
+ switch (old_state) {
+ case RTRS_SRV_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed)
+ sess->state = new_state;
+
+ return changed;
+}
+
+static bool rtrs_srv_change_state_get_old(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state,
+ enum rtrs_srv_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&sess->state_lock);
+ *old_state = sess->state;
+ changed = __rtrs_srv_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_lock);
+
+ return changed;
+}
+
+static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+
+ return rtrs_srv_change_state_get_old(sess, new_state, &old_state);
+}
+
+static void free_id(struct rtrs_srv_op *id)
+{
+ if (!id)
+ return;
+ kfree(id);
+}
+
+static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ int i;
+
+ WARN_ON(atomic_read(&sess->ids_inflight));
+ if (sess->ops_ids) {
+ for (i = 0; i < srv->queue_depth; i++)
+ free_id(sess->ops_ids[i]);
+ kfree(sess->ops_ids);
+ sess->ops_ids = NULL;
+ }
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_srv_rdma_done
+};
+
+static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_op *id;
+ int i;
+
+ sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
+ GFP_KERNEL);
+ if (!sess->ops_ids)
+ goto err;
+
+ for (i = 0; i < srv->queue_depth; ++i) {
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ goto err;
+
+ sess->ops_ids[i] = id;
+ }
+ init_waitqueue_head(&sess->ids_waitq);
+ atomic_set(&sess->ids_inflight, 0);
+
+ return 0;
+
+err:
+ rtrs_srv_free_ops_ids(sess);
+ return -ENOMEM;
+}
+
+static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
+{
+ atomic_inc(&sess->ids_inflight);
+}
+
+static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
+{
+ if (atomic_dec_and_test(&sess->ids_inflight))
+ wake_up(&sess->ids_waitq);
+}
+
+static void rtrs_srv_wait_ops_ids(struct rtrs_srv_sess *sess)
+{
+ wait_event(sess->ids_waitq, !atomic_read(&sess->ids_inflight));
+}
+
+
+static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "REG MR failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ return;
+ }
+}
+
+static struct ib_cqe local_reg_cqe = {
+ .done = rtrs_srv_reg_mr_done
+};
+
+static int rdma_write_sg(struct rtrs_srv_op *id)
+{
+ struct rtrs_sess *s = id->con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
+ struct rtrs_srv_mr *srv_mr;
+ struct rtrs_srv *srv = sess->srv;
+ struct ib_send_wr inv_wr, imm_wr;
+ struct ib_rdma_wr *wr = NULL;
+ enum ib_send_flags flags;
+ size_t sg_cnt;
+ int err, offset;
+ bool need_inval;
+ u32 rkey = 0;
+ struct ib_reg_wr rwr;
+ struct ib_sge *plist;
+ struct ib_sge list;
+
+ sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
+ need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
+ if (unlikely(sg_cnt != 1))
+ return -EINVAL;
+
+ offset = 0;
+
+ wr = &id->tx_wr;
+ plist = &id->tx_sg;
+ plist->addr = dma_addr + offset;
+ plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
+
+ /* WR will fail with length error
+ * if this is 0
+ */
+ if (unlikely(plist->length == 0)) {
+ rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
+ return -EINVAL;
+ }
+
+ plist->lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ offset += plist->length;
+
+ wr->wr.sg_list = plist;
+ wr->wr.num_sge = 1;
+ wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
+ wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
+ if (rkey == 0)
+ rkey = wr->rkey;
+ else
+ /* Only one key is actually used */
+ WARN_ON_ONCE(rkey != wr->rkey);
+
+ wr->wr.opcode = IB_WR_RDMA_WRITE;
+ wr->wr.ex.imm_data = 0;
+ wr->wr.send_flags = 0;
+
+ if (need_inval && always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else if (always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (need_inval) {
+ wr->wr.next = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else {
+ wr->wr.next = &imm_wr;
+ }
+ /*
+ * From time to time we have to post signaled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&id->con->wr_cnt) % srv->queue_depth) ?
+ 0 : IB_SEND_SIGNALED;
+
+ if (need_inval) {
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.send_flags = 0;
+ inv_wr.ex.invalidate_rkey = rkey;
+ }
+
+ imm_wr.next = NULL;
+ if (always_invalidate) {
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.num_sge = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.wr.send_flags = 0;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.sg_list = &list;
+ imm_wr.num_sge = 1;
+ imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.sg_list = NULL;
+ imm_wr.num_sge = 0;
+ imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.send_flags = flags;
+ imm_wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
+ 0, need_inval));
+
+ imm_wr.wr_cqe = &io_comp_cqe;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
+ offset, DMA_BIDIRECTIONAL);
+
+ err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
+ if (unlikely(err))
+ rtrs_err(s,
+ "Posting RDMA-Write-Request to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
+ * requests or on successful WRITE request.
+ * @con: the connection to send back result
+ * @id: the id associated with the IO
+ * @errno: the error number of the IO.
+ *
+ * Return 0 on success, errno otherwise.
+ */
+static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ int errno)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct ib_send_wr inv_wr, imm_wr, *wr = NULL;
+ struct ib_reg_wr rwr;
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_mr *srv_mr;
+ bool need_inval = false;
+ enum ib_send_flags flags;
+ u32 imm;
+ int err;
+
+ if (id->dir == READ) {
+ struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
+ size_t sg_cnt;
+
+ need_inval = le16_to_cpu(rd_msg->flags) &
+ RTRS_MSG_NEED_INVAL_F;
+ sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
+
+ if (need_inval) {
+ if (likely(sg_cnt)) {
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.send_flags = 0;
+ /* Only one key is actually used */
+ inv_wr.ex.invalidate_rkey =
+ le32_to_cpu(rd_msg->desc[0].key);
+ } else {
+ WARN_ON_ONCE(1);
+ need_inval = false;
+ }
+ }
+ }
+
+ if (need_inval && always_invalidate) {
+ wr = &inv_wr;
+ inv_wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (always_invalidate) {
+ wr = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (need_inval) {
+ wr = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else {
+ wr = &imm_wr;
+ }
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
+ 0 : IB_SEND_SIGNALED;
+ imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+ imm_wr.next = NULL;
+ if (always_invalidate) {
+ struct ib_sge list;
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+ rwr.wr.next = &imm_wr;
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.num_sge = 0;
+ rwr.wr.send_flags = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.sg_list = &list;
+ imm_wr.num_sge = 1;
+ imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.sg_list = NULL;
+ imm_wr.num_sge = 0;
+ imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.send_flags = flags;
+ imm_wr.wr_cqe = &io_comp_cqe;
+
+ imm_wr.ex.imm_data = cpu_to_be32(imm);
+
+ err = ib_post_send(id->con->c.qp, wr, NULL);
+ if (unlikely(err))
+ rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+void close_sess(struct rtrs_srv_sess *sess)
+{
+ enum rtrs_srv_state old_state;
+
+ if (rtrs_srv_change_state_get_old(sess, RTRS_SRV_CLOSING,
+ &old_state))
+ queue_work(rtrs_wq, &sess->close_work);
+ WARN_ON(sess->state != RTRS_SRV_CLOSING);
+}
+
+static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
+{
+ switch (state) {
+ case RTRS_SRV_CONNECTING:
+ return "RTRS_SRV_CONNECTING";
+ case RTRS_SRV_CONNECTED:
+ return "RTRS_SRV_CONNECTED";
+ case RTRS_SRV_CLOSING:
+ return "RTRS_SRV_CLOSING";
+ case RTRS_SRV_CLOSED:
+ return "RTRS_SRV_CLOSED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
+ * rtrs_srv_resp_rdma() - Finish an RDMA request
+ *
+ * @id: Internal RTRS operation identifier
+ * @status: Response Code sent to the other side for this operation.
+ * 0 = success, <=0 error
+ * Context: any
+ *
+ * Finish a RDMA operation. A message is sent to the client and the
+ * corresponding memory areas will be released.
+ */
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv_con *con;
+ struct rtrs_sess *s;
+ int err;
+
+ if (WARN_ON(!id))
+ return true;
+
+ con = id->con;
+ s = con->c.sess;
+ sess = to_srv_sess(s);
+
+ id->status = status;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Sending I/O response failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ goto out;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id];
+
+ ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
+ }
+ if (unlikely(atomic_sub_return(1,
+ &con->sq_wr_avail) < 0)) {
+ pr_err("IB send queue full\n");
+ atomic_add(1, &con->sq_wr_avail);
+ spin_lock(&con->rsp_wr_wait_lock);
+ list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
+ spin_unlock(&con->rsp_wr_wait_lock);
+ return false;
+ }
+
+ if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
+ err = send_io_resp_imm(con, id, status);
+ else
+ err = rdma_write_sg(id);
+
+ if (unlikely(err)) {
+ rtrs_err_rl(s, "IO response failed: %d\n", err);
+ close_sess(sess);
+ }
+out:
+ rtrs_srv_put_ops_ids(sess);
+ return true;
+}
+EXPORT_SYMBOL(rtrs_srv_resp_rdma);
+
+/**
+ * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
+ * @srv: Session pointer
+ * @priv: The private pointer that is associated with the session.
+ */
+void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv)
+{
+ srv->priv = priv;
+}
+EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
+
+static void unmap_cont_bufs(struct rtrs_srv_sess *sess)
+{
+ int i;
+
+ for (i = 0; i < sess->mrs_num; i++) {
+ struct rtrs_srv_mr *srv_mr;
+
+ srv_mr = &sess->mrs[i];
+ rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+ ib_dereg_mr(srv_mr->mr);
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
+ srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
+ sg_free_table(&srv_mr->sgt);
+ }
+ kfree(sess->mrs);
+}
+
+static int map_cont_bufs(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *ss = &sess->s;
+ int i, mri, err, mrs_num;
+ unsigned int chunk_bits;
+ int chunks_per_mr = 1;
+
+ /*
+ * Here we map queue_depth chunks to MR. Firstly we have to
+ * figure out how many chunks can we map per MR.
+ */
+ if (always_invalidate) {
+ /*
+ * in order to do invalidate for each chunks of memory, we needs
+ * more memory regions.
+ */
+ mrs_num = srv->queue_depth;
+ } else {
+ chunks_per_mr =
+ sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
+ mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
+ chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
+ }
+
+ sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL);
+ if (!sess->mrs)
+ return -ENOMEM;
+
+ sess->mrs_num = mrs_num;
+
+ for (mri = 0; mri < mrs_num; mri++) {
+ struct rtrs_srv_mr *srv_mr = &sess->mrs[mri];
+ struct sg_table *sgt = &srv_mr->sgt;
+ struct scatterlist *s;
+ struct ib_mr *mr;
+ int nr, chunks;
+
+ chunks = chunks_per_mr * mri;
+ if (!always_invalidate)
+ chunks_per_mr = min_t(int, chunks_per_mr,
+ srv->queue_depth - chunks);
+
+ err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
+ if (err)
+ goto err;
+
+ for_each_sg(sgt->sgl, s, chunks_per_mr, i)
+ sg_set_page(s, srv->chunks[chunks + i],
+ max_chunk_size, 0);
+
+ nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+ if (nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+ goto free_sg;
+ }
+ mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ sgt->nents);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto unmap_sg;
+ }
+ nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+ NULL, max_chunk_size);
+ if (nr < 0 || nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+ goto dereg_mr;
+ }
+
+ if (always_invalidate) {
+ srv_mr->iu = rtrs_iu_alloc(1,
+ sizeof(struct rtrs_msg_rkey_rsp),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_rdma_done);
+ if (!srv_mr->iu) {
+ err = -ENOMEM;
+ rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
+ goto free_iu;
+ }
+ }
+ /* Eventually dma addr for each chunk can be cached */
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+ sess->dma_addr[chunks + i] = sg_dma_address(s);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+ srv_mr->mr = mr;
+
+ continue;
+err:
+ while (mri--) {
+ srv_mr = &sess->mrs[mri];
+ sgt = &srv_mr->sgt;
+ mr = srv_mr->mr;
+free_iu:
+ rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+dereg_mr:
+ ib_dereg_mr(mr);
+unmap_sg:
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+free_sg:
+ sg_free_table(sgt);
+ }
+ kfree(sess->mrs);
+
+ return err;
+ }
+
+ chunk_bits = ilog2(srv->queue_depth - 1) + 1;
+ sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
+
+ return 0;
+}
+
+static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
+{
+ close_sess(to_srv_sess(c->sess));
+}
+
+static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_init_hb(&sess->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_srv_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_start_hb(&sess->s);
+}
+
+static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_stop_hb(&sess->s);
+}
+
+static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Sess info response send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ return;
+ }
+ WARN_ON(wc->opcode != IB_WC_SEND);
+}
+
+static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ int up;
+
+ mutex_lock(&srv->paths_ev_mutex);
+ up = ++srv->paths_up;
+ if (up == 1)
+ ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ mutex_unlock(&srv->paths_ev_mutex);
+
+ /* Mark session as established */
+ sess->established = true;
+}
+
+static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ if (!sess->established)
+ return;
+
+ sess->established = false;
+ mutex_lock(&srv->paths_ev_mutex);
+ WARN_ON(!srv->paths_up);
+ if (--srv->paths_up == 0)
+ ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
+ mutex_unlock(&srv->paths_ev_mutex);
+}
+
+static int post_recv_sess(struct rtrs_srv_sess *sess);
+
+static int process_info_req(struct rtrs_srv_con *con,
+ struct rtrs_msg_info_req *msg)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct ib_send_wr *reg_wr = NULL;
+ struct rtrs_msg_info_rsp *rsp;
+ struct rtrs_iu *tx_iu;
+ struct ib_reg_wr *rwr;
+ int mri, err;
+ size_t tx_sz;
+
+ err = post_recv_sess(sess);
+ if (unlikely(err)) {
+ rtrs_err(s, "post_recv_sess(), err: %d\n", err);
+ return err;
+ }
+ rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
+ if (unlikely(!rwr))
+ return -ENOMEM;
+ strlcpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
+
+ tx_sz = sizeof(*rsp);
+ tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
+ tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
+ if (unlikely(!tx_iu)) {
+ err = -ENOMEM;
+ goto rwr_free;
+ }
+
+ rsp = tx_iu->buf;
+ rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
+ rsp->sg_cnt = cpu_to_le16(sess->mrs_num);
+
+ for (mri = 0; mri < sess->mrs_num; mri++) {
+ struct ib_mr *mr = sess->mrs[mri].mr;
+
+ rsp->desc[mri].addr = cpu_to_le64(mr->iova);
+ rsp->desc[mri].key = cpu_to_le32(mr->rkey);
+ rsp->desc[mri].len = cpu_to_le32(mr->length);
+
+ /*
+ * Fill in reg MR request and chain them *backwards*
+ */
+ rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
+ rwr[mri].wr.opcode = IB_WR_REG_MR;
+ rwr[mri].wr.wr_cqe = &local_reg_cqe;
+ rwr[mri].wr.num_sge = 0;
+ rwr[mri].wr.send_flags = mri ? 0 : IB_SEND_SIGNALED;
+ rwr[mri].mr = mr;
+ rwr[mri].key = mr->rkey;
+ rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ reg_wr = &rwr[mri].wr;
+ }
+
+ err = rtrs_srv_create_sess_files(sess);
+ if (unlikely(err))
+ goto iu_free;
+ kobject_get(&sess->kobj);
+ get_device(&sess->srv->dev);
+ rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED);
+ rtrs_srv_start_hb(sess);
+
+ /*
+ * We do not account number of established connections at the current
+ * moment, we rely on the client, which should send info request when
+ * all connections are successfully established. Thus, simply notify
+ * listener with a proper event if we are the first path.
+ */
+ rtrs_srv_sess_up(sess);
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info response */
+ err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
+iu_free:
+ rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ }
+rwr_free:
+ kfree(rwr);
+
+ return err;
+}
+
+static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *iu;
+ int err;
+
+ WARN_ON(con->c.cid);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Sess info request receive failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto close;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(s, "Sess info request is malformed: size %d\n",
+ wc->byte_len);
+ goto close;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ)) {
+ rtrs_err(s, "Sess info request is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto close;
+ }
+ err = process_info_req(con, msg);
+ if (unlikely(err))
+ goto close;
+
+out:
+ rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ return;
+close:
+ close_sess(sess);
+ goto out;
+}
+
+static int post_recv_info_req(struct rtrs_srv_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_iu *rx_iu;
+ int err;
+
+ rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_srv_info_req_done);
+ if (unlikely(!rx_iu))
+ return -ENOMEM;
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&con->c, rx_iu);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
+ rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
+{
+ int i, err;
+
+ for (i = 0; i < q_size; i++) {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_sess(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ size_t q_size;
+ int err, cid;
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = srv->queue_depth;
+
+ err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size);
+ if (unlikely(err)) {
+ rtrs_err(s, "post_recv_io(), err: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void process_read(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_read *msg,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t usr_len, data_len;
+ void *data;
+ int ret;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ return;
+ }
+ if (unlikely(msg->sg_cnt != 1 && msg->sg_cnt != 0)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, invalid message\n");
+ return;
+ }
+ rtrs_srv_get_ops_ids(sess);
+ rtrs_srv_update_rdma_stats(sess->stats, off, READ);
+ id = sess->ops_ids[buf_id];
+ id->con = con;
+ id->dir = READ;
+ id->msg_id = buf_id;
+ id->rd_msg = msg;
+ usr_len = le16_to_cpu(msg->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv, srv->priv, id, READ, data, data_len,
+ data + data_len, usr_len);
+
+ if (unlikely(ret)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
+ buf_id, ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_sess(sess);
+ }
+ rtrs_srv_put_ops_ids(sess);
+}
+
+static void process_write(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_write *req,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t data_len, usr_len;
+ void *data;
+ int ret;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Processing write request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ return;
+ }
+ rtrs_srv_get_ops_ids(sess);
+ rtrs_srv_update_rdma_stats(sess->stats, off, WRITE);
+ id = sess->ops_ids[buf_id];
+ id->con = con;
+ id->dir = WRITE;
+ id->msg_id = buf_id;
+
+ usr_len = le16_to_cpu(req->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv, srv->priv, id, WRITE, data, data_len,
+ data + data_len, usr_len);
+ if (unlikely(ret)) {
+ rtrs_err_rl(s,
+ "Processing write request failed, user module callback reports err: %d\n",
+ ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_sess(sess);
+ }
+ rtrs_srv_put_ops_ids(sess);
+}
+
+static void process_io_req(struct rtrs_srv_con *con, void *msg,
+ u32 id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_msg_rdma_hdr *hdr;
+ unsigned int type;
+
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id],
+ max_chunk_size, DMA_BIDIRECTIONAL);
+ hdr = msg;
+ type = le16_to_cpu(hdr->type);
+
+ switch (type) {
+ case RTRS_MSG_WRITE:
+ process_write(con, msg, id, off);
+ break;
+ case RTRS_MSG_READ:
+ process_read(con, msg, id, off);
+ break;
+ default:
+ rtrs_err(s,
+ "Processing I/O request failed, unknown message type received: 0x%02x\n",
+ type);
+ goto err;
+ }
+
+ return;
+
+err:
+ close_sess(sess);
+}
+
+static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_mr *mr =
+ container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ u32 msg_id, off;
+ void *data;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ }
+ msg_id = mr->msg_id;
+ off = mr->msg_off;
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+}
+
+static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
+ struct rtrs_srv_mr *mr)
+{
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &mr->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = mr->mr->rkey,
+ };
+ mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
+{
+ spin_lock(&con->rsp_wr_wait_lock);
+ while (!list_empty(&con->rsp_wr_wait_list)) {
+ struct rtrs_srv_op *id;
+ int ret;
+
+ id = list_entry(con->rsp_wr_wait_list.next,
+ struct rtrs_srv_op, wait_list);
+ list_del(&id->wait_list);
+
+ spin_unlock(&con->rsp_wr_wait_lock);
+ ret = rtrs_srv_resp_rdma(id, id->status);
+ spin_lock(&con->rsp_wr_wait_lock);
+
+ if (!ret) {
+ list_add(&id->wait_list, &con->rsp_wr_wait_list);
+ break;
+ }
+ }
+ spin_unlock(&con->rsp_wr_wait_lock);
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ u32 imm_type, imm_payload;
+ int err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(s,
+ "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
+ ib_wc_status_msg(wc->status), wc->wr_cqe,
+ wc->opcode, wc->vendor_err, wc->byte_len);
+ close_sess(sess);
+ }
+ return;
+ }
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
+ return;
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
+ close_sess(sess);
+ break;
+ }
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_REQ_IMM)) {
+ u32 msg_id, off;
+ void *data;
+
+ msg_id = imm_payload >> sess->mem_bits;
+ off = imm_payload & ((1 << sess->mem_bits) - 1);
+ if (unlikely(msg_id >= srv->queue_depth ||
+ off >= max_chunk_size)) {
+ rtrs_err(s, "Wrong msg_id %u, off %u\n",
+ msg_id, off);
+ close_sess(sess);
+ return;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &sess->mrs[msg_id];
+
+ mr->msg_off = off;
+ mr->msg_id = msg_id;
+ err = rtrs_srv_inv_rkey(con, mr);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n",
+ err);
+ close_sess(sess);
+ break;
+ }
+ } else {
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+ }
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&sess->s);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ sess->s.hb_missed_cnt = 0;
+ } else {
+ rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ case IB_WC_SEND:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ atomic_add(srv->queue_depth, &con->sq_wr_avail);
+
+ if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list)))
+ rtrs_rdma_process_wr_wait_list(con);
+
+ break;
+ default:
+ rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+/**
+ * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname.
+ * @srv: Session
+ * @sessname: Sessname buffer
+ * @len: Length of sessname buffer
+ */
+int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
+{
+ struct rtrs_srv_sess *sess;
+ int err = -ENOTCONN;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(sess, &srv->paths_list, s.entry) {
+ if (sess->state != RTRS_SRV_CONNECTED)
+ continue;
+ strlcpy(sessname, sess->s.sessname,
+ min_t(size_t, sizeof(sess->s.sessname), len));
+ err = 0;
+ break;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_srv_get_sess_name);
+
+/**
+ * rtrs_srv_get_sess_qdepth() - Get rtrs_srv qdepth.
+ * @srv: Session
+ */
+int rtrs_srv_get_queue_depth(struct rtrs_srv *srv)
+{
+ return srv->queue_depth;
+}
+EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
+
+static int find_next_bit_ring(struct rtrs_srv_sess *sess)
+{
+ struct ib_device *ib_dev = sess->s.dev->ib_dev;
+ int v;
+
+ v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask);
+ if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
+ v = cpumask_first(&cq_affinity_mask);
+ return v;
+}
+
+static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
+{
+ sess->cur_cq_vector = find_next_bit_ring(sess);
+
+ return sess->cur_cq_vector;
+}
+
+static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+ int i;
+
+ srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+ if (!srv)
+ return NULL;
+
+ refcount_set(&srv->refcount, 1);
+ INIT_LIST_HEAD(&srv->paths_list);
+ mutex_init(&srv->paths_mutex);
+ mutex_init(&srv->paths_ev_mutex);
+ uuid_copy(&srv->paths_uuid, paths_uuid);
+ srv->queue_depth = sess_queue_depth;
+ srv->ctx = ctx;
+
+ srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
+ GFP_KERNEL);
+ if (!srv->chunks)
+ goto err_free_srv;
+
+ for (i = 0; i < srv->queue_depth; i++) {
+ srv->chunks[i] = mempool_alloc(chunk_pool, GFP_KERNEL);
+ if (!srv->chunks[i])
+ goto err_free_chunks;
+ }
+ list_add(&srv->ctx_list, &ctx->srv_list);
+
+ return srv;
+
+err_free_chunks:
+ while (i--)
+ mempool_free(srv->chunks[i], chunk_pool);
+ kfree(srv->chunks);
+
+err_free_srv:
+ kfree(srv);
+
+ return NULL;
+}
+
+static void free_srv(struct rtrs_srv *srv)
+{
+ int i;
+
+ WARN_ON(refcount_read(&srv->refcount));
+ for (i = 0; i < srv->queue_depth; i++)
+ mempool_free(srv->chunks[i], chunk_pool);
+ kfree(srv->chunks);
+ mutex_destroy(&srv->paths_mutex);
+ mutex_destroy(&srv->paths_ev_mutex);
+ /* last put to release the srv structure */
+ put_device(&srv->dev);
+}
+
+static inline struct rtrs_srv *__find_srv_and_get(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
+ if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
+ refcount_inc_not_zero(&srv->refcount))
+ return srv;
+ }
+
+ return NULL;
+}
+
+static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+
+ mutex_lock(&ctx->srv_mutex);
+ srv = __find_srv_and_get(ctx, paths_uuid);
+ if (!srv)
+ srv = __alloc_srv(ctx, paths_uuid);
+ mutex_unlock(&ctx->srv_mutex);
+
+ return srv;
+}
+
+static void put_srv(struct rtrs_srv *srv)
+{
+ if (refcount_dec_and_test(&srv->refcount)) {
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ WARN_ON(srv->dev.kobj.state_in_sysfs);
+
+ mutex_lock(&ctx->srv_mutex);
+ list_del(&srv->ctx_list);
+ mutex_unlock(&ctx->srv_mutex);
+ free_srv(srv);
+ }
+}
+
+static void __add_path_to_srv(struct rtrs_srv *srv,
+ struct rtrs_srv_sess *sess)
+{
+ list_add_tail(&sess->s.entry, &srv->paths_list);
+ srv->paths_num++;
+ WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
+}
+
+static void del_path_from_srv(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+
+ if (WARN_ON(!srv))
+ return;
+
+ mutex_lock(&srv->paths_mutex);
+ list_del(&sess->s.entry);
+ WARN_ON(!srv->paths_num);
+ srv->paths_num--;
+ mutex_unlock(&srv->paths_mutex);
+}
+
+/* return true if addresses are the same, error other wise */
+static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
+{
+ switch (a->sa_family) {
+ case AF_IB:
+ return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
+ &((struct sockaddr_ib *)b)->sib_addr,
+ sizeof(struct ib_addr)) &&
+ (b->sa_family == AF_IB);
+ case AF_INET:
+ return memcmp(&((struct sockaddr_in *)a)->sin_addr,
+ &((struct sockaddr_in *)b)->sin_addr,
+ sizeof(struct in_addr)) &&
+ (b->sa_family == AF_INET);
+ case AF_INET6:
+ return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
+ &((struct sockaddr_in6 *)b)->sin6_addr,
+ sizeof(struct in6_addr)) &&
+ (b->sa_family == AF_INET6);
+ default:
+ return -ENOENT;
+ }
+}
+
+static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
+ struct rdma_addr *addr)
+{
+ struct rtrs_srv_sess *sess;
+
+ list_for_each_entry(sess, &srv->paths_list, s.entry)
+ if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr,
+ (struct sockaddr *)&addr->dst_addr) &&
+ !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr,
+ (struct sockaddr *)&addr->src_addr))
+ return true;
+
+ return false;
+}
+
+static void free_sess(struct rtrs_srv_sess *sess)
+{
+ if (sess->kobj.state_in_sysfs)
+ kobject_put(&sess->kobj);
+ else
+ kfree(sess);
+}
+
+static void rtrs_srv_close_work(struct work_struct *work)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv_con *con;
+ int i;
+
+ sess = container_of(work, typeof(*sess), close_work);
+
+ rtrs_srv_destroy_sess_files(sess);
+ rtrs_srv_stop_hb(sess);
+
+ for (i = 0; i < sess->s.con_num; i++) {
+ if (!sess->s.con[i])
+ continue;
+ con = to_srv_con(sess->s.con[i]);
+ rdma_disconnect(con->c.cm_id);
+ ib_drain_qp(con->c.qp);
+ }
+ /* Wait for all inflights */
+ rtrs_srv_wait_ops_ids(sess);
+
+ /* Notify upper layer if we are the last path */
+ rtrs_srv_sess_down(sess);
+
+ unmap_cont_bufs(sess);
+ rtrs_srv_free_ops_ids(sess);
+
+ for (i = 0; i < sess->s.con_num; i++) {
+ if (!sess->s.con[i])
+ continue;
+ con = to_srv_con(sess->s.con[i]);
+ rtrs_cq_qp_destroy(&con->c);
+ rdma_destroy_id(con->c.cm_id);
+ kfree(con);
+ }
+ rtrs_ib_dev_put(sess->s.dev);
+
+ del_path_from_srv(sess);
+ put_srv(sess->srv);
+ sess->srv = NULL;
+ rtrs_srv_change_state(sess, RTRS_SRV_CLOSED);
+
+ kfree(sess->dma_addr);
+ kfree(sess->s.con);
+ free_sess(sess);
+}
+
+static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess,
+ struct rdma_cm_id *cm_id)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_msg_conn_rsp msg;
+ struct rdma_conn_param param;
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .queue_depth = cpu_to_le16(srv->queue_depth),
+ .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
+ .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
+ };
+
+ if (always_invalidate)
+ msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
+
+ err = rdma_accept(cm_id, &param);
+ if (err)
+ pr_err("rdma_accept(), err: %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
+{
+ struct rtrs_msg_conn_rsp msg;
+ int err;
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .errno = cpu_to_le16(errno),
+ };
+
+ err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
+ if (err)
+ pr_err("rdma_reject(), err: %d\n", err);
+
+ /* Bounce errno back */
+ return errno;
+}
+
+static struct rtrs_srv_sess *
+__find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid)
+{
+ struct rtrs_srv_sess *sess;
+
+ list_for_each_entry(sess, &srv->paths_list, s.entry) {
+ if (uuid_equal(&sess->s.uuid, sess_uuid))
+ return sess;
+ }
+
+ return NULL;
+}
+
+static int create_con(struct rtrs_srv_sess *sess,
+ struct rdma_cm_id *cm_id,
+ unsigned int cid)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_con *con;
+
+ u16 cq_size, wr_queue_size;
+ int err, cq_vector;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&con->rsp_wr_wait_lock);
+ INIT_LIST_HEAD(&con->rsp_wr_wait_list);
+ con->c.cm_id = cm_id;
+ con->c.sess = &sess->s;
+ con->c.cid = cid;
+ atomic_set(&con->wr_cnt, 0);
+
+ if (con->c.cid == 0) {
+ /*
+ * All receive and all send (each requiring invalidate)
+ * + 2 for drain and heartbeat
+ */
+ wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ cq_size = wr_queue_size;
+ } else {
+ /*
+ * If we have all receive requests posted and
+ * all write requests posted and each read request
+ * requires an invalidate request + drain
+ * and qp gets into error state.
+ */
+ cq_size = srv->queue_depth * 3 + 1;
+ /*
+ * In theory we might have queue_depth * 32
+ * outstanding requests if an unsafe global key is used
+ * and we have queue_depth read requests each consisting
+ * of 32 different addresses. div 3 for mlx5.
+ */
+ wr_queue_size = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
+ }
+ atomic_set(&con->sq_wr_avail, wr_queue_size);
+ cq_vector = rtrs_srv_get_next_cq_vector(sess);
+
+ /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
+ err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
+ wr_queue_size, IB_POLL_WORKQUEUE);
+ if (err) {
+ rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
+ goto free_con;
+ }
+ if (con->c.cid == 0) {
+ err = post_recv_info_req(con);
+ if (err)
+ goto free_cqqp;
+ }
+ WARN_ON(sess->s.con[cid]);
+ sess->s.con[cid] = &con->c;
+
+ /*
+ * Change context from server to current connection. The other
+ * way is to use cm_id->qp->qp_context, which does not work on OFED.
+ */
+ cm_id->context = &con->c;
+
+ return 0;
+
+free_cqqp:
+ rtrs_cq_qp_destroy(&con->c);
+free_con:
+ kfree(con);
+
+err:
+ return err;
+}
+
+static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
+ struct rdma_cm_id *cm_id,
+ unsigned int con_num,
+ unsigned int recon_cnt,
+ const uuid_t *uuid)
+{
+ struct rtrs_srv_sess *sess;
+ int err = -ENOMEM;
+
+ if (srv->paths_num >= MAX_PATHS_NUM) {
+ err = -ECONNRESET;
+ goto err;
+ }
+ if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
+ err = -EEXIST;
+ pr_err("Path with same addr exists\n");
+ goto err;
+ }
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ goto err;
+
+ sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
+ if (!sess->stats)
+ goto err_free_sess;
+
+ sess->stats->sess = sess;
+
+ sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr),
+ GFP_KERNEL);
+ if (!sess->dma_addr)
+ goto err_free_stats;
+
+ sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
+ if (!sess->s.con)
+ goto err_free_dma_addr;
+
+ sess->state = RTRS_SRV_CONNECTING;
+ sess->srv = srv;
+ sess->cur_cq_vector = -1;
+ sess->s.dst_addr = cm_id->route.addr.dst_addr;
+ sess->s.src_addr = cm_id->route.addr.src_addr;
+ sess->s.con_num = con_num;
+ sess->s.recon_cnt = recon_cnt;
+ uuid_copy(&sess->s.uuid, uuid);
+ spin_lock_init(&sess->state_lock);
+ INIT_WORK(&sess->close_work, rtrs_srv_close_work);
+ rtrs_srv_init_hb(sess);
+
+ sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
+ if (!sess->s.dev) {
+ err = -ENOMEM;
+ goto err_free_con;
+ }
+ err = map_cont_bufs(sess);
+ if (err)
+ goto err_put_dev;
+
+ err = rtrs_srv_alloc_ops_ids(sess);
+ if (err)
+ goto err_unmap_bufs;
+
+ __add_path_to_srv(srv, sess);
+
+ return sess;
+
+err_unmap_bufs:
+ unmap_cont_bufs(sess);
+err_put_dev:
+ rtrs_ib_dev_put(sess->s.dev);
+err_free_con:
+ kfree(sess->s.con);
+err_free_dma_addr:
+ kfree(sess->dma_addr);
+err_free_stats:
+ kfree(sess->stats);
+err_free_sess:
+ kfree(sess);
+err:
+ return ERR_PTR(err);
+}
+
+static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
+ const struct rtrs_msg_conn_req *msg,
+ size_t len)
+{
+ struct rtrs_srv_ctx *ctx = cm_id->context;
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv *srv;
+
+ u16 version, con_num, cid;
+ u16 recon_cnt;
+ int err;
+
+ if (len < sizeof(*msg)) {
+ pr_err("Invalid RTRS connection request\n");
+ goto reject_w_econnreset;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ pr_err("Invalid RTRS magic\n");
+ goto reject_w_econnreset;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ pr_err("Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ goto reject_w_econnreset;
+ }
+ con_num = le16_to_cpu(msg->cid_num);
+ if (con_num > 4096) {
+ /* Sanity check */
+ pr_err("Too many connections requested: %d\n", con_num);
+ goto reject_w_econnreset;
+ }
+ cid = le16_to_cpu(msg->cid);
+ if (cid >= con_num) {
+ /* Sanity check */
+ pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
+ goto reject_w_econnreset;
+ }
+ recon_cnt = le16_to_cpu(msg->recon_cnt);
+ srv = get_or_create_srv(ctx, &msg->paths_uuid);
+ if (!srv) {
+ err = -ENOMEM;
+ goto reject_w_err;
+ }
+ mutex_lock(&srv->paths_mutex);
+ sess = __find_sess(srv, &msg->sess_uuid);
+ if (sess) {
+ struct rtrs_sess *s = &sess->s;
+
+ /* Session already holds a reference */
+ put_srv(srv);
+
+ if (sess->state != RTRS_SRV_CONNECTING) {
+ rtrs_err(s, "Session in wrong state: %s\n",
+ rtrs_srv_state_str(sess->state));
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ /*
+ * Sanity checks
+ */
+ if (con_num != s->con_num || cid >= s->con_num) {
+ rtrs_err(s, "Incorrect request: %d, %d\n",
+ cid, con_num);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ if (s->con[cid]) {
+ rtrs_err(s, "Connection already exists: %d\n",
+ cid);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ } else {
+ sess = __alloc_sess(srv, cm_id, con_num, recon_cnt,
+ &msg->sess_uuid);
+ if (IS_ERR(sess)) {
+ mutex_unlock(&srv->paths_mutex);
+ put_srv(srv);
+ err = PTR_ERR(sess);
+ goto reject_w_err;
+ }
+ }
+ err = create_con(sess, cm_id, cid);
+ if (err) {
+ (void)rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since session has other connections we follow normal way
+ * through workqueue, but still return an error to tell cma.c
+ * to call rdma_destroy_id() for current connection.
+ */
+ goto close_and_return_err;
+ }
+ err = rtrs_rdma_do_accept(sess, cm_id);
+ if (err) {
+ (void)rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since current connection was successfully added to the
+ * session we follow normal way through workqueue to close the
+ * session, thus return 0 to tell cma.c we call
+ * rdma_destroy_id() ourselves.
+ */
+ err = 0;
+ goto close_and_return_err;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return 0;
+
+reject_w_err:
+ return rtrs_rdma_do_reject(cm_id, err);
+
+reject_w_econnreset:
+ return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
+
+close_and_return_err:
+ close_sess(sess);
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_srv_sess *sess = NULL;
+ struct rtrs_sess *s = NULL;
+
+ if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
+ struct rtrs_con *c = cm_id->context;
+
+ s = c->sess;
+ sess = to_srv_sess(s);
+ }
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ /*
+ * In case of error cma.c will destroy cm_id,
+ * see cma_process_remove()
+ */
+ return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
+ ev->param.conn.private_data_len);
+ case RDMA_CM_EVENT_ESTABLISHED:
+ /* Nothing here */
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ close_sess(sess);
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ close_sess(sess);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ close_sess(sess);
+ break;
+ default:
+ pr_err("Ignoring unexpected CM event %s, err %d\n",
+ rdma_event_msg(ev->event), ev->status);
+ break;
+ }
+
+ return 0;
+}
+
+static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
+ struct sockaddr *addr,
+ enum rdma_ucm_port_space ps)
+{
+ struct rdma_cm_id *cm_id;
+ int ret;
+
+ cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
+ ctx, ps, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
+ pr_err("Creating id for RDMA connection failed, err: %d\n",
+ ret);
+ goto err_out;
+ }
+ ret = rdma_bind_addr(cm_id, addr);
+ if (ret) {
+ pr_err("Binding RDMA address failed, err: %d\n", ret);
+ goto err_cm;
+ }
+ ret = rdma_listen(cm_id, 64);
+ if (ret) {
+ pr_err("Listening on RDMA connection failed, err: %d\n",
+ ret);
+ goto err_cm;
+ }
+
+ return cm_id;
+
+err_cm:
+ rdma_destroy_id(cm_id);
+err_out:
+
+ return ERR_PTR(ret);
+}
+
+static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
+{
+ struct sockaddr_in6 sin = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_ANY_INIT,
+ .sin6_port = htons(port),
+ };
+ struct sockaddr_ib sib = {
+ .sib_family = AF_IB,
+ .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
+ .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
+ .sib_pkey = cpu_to_be16(0xffff),
+ };
+ struct rdma_cm_id *cm_ip, *cm_ib;
+ int ret;
+
+ /*
+ * We accept both IPoIB and IB connections, so we need to keep
+ * two cm id's, one for each socket type and port space.
+ * If the cm initialization of one of the id's fails, we abort
+ * everything.
+ */
+ cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
+ if (IS_ERR(cm_ip))
+ return PTR_ERR(cm_ip);
+
+ cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
+ if (IS_ERR(cm_ib)) {
+ ret = PTR_ERR(cm_ib);
+ goto free_cm_ip;
+ }
+
+ ctx->cm_id_ip = cm_ip;
+ ctx->cm_id_ib = cm_ib;
+
+ return 0;
+
+free_cm_ip:
+ rdma_destroy_id(cm_ip);
+
+ return ret;
+}
+
+static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
+{
+ struct rtrs_srv_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->ops = *ops;
+ mutex_init(&ctx->srv_mutex);
+ INIT_LIST_HEAD(&ctx->srv_list);
+
+ return ctx;
+}
+
+static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
+{
+ WARN_ON(!list_empty(&ctx->srv_list));
+ mutex_destroy(&ctx->srv_mutex);
+ kfree(ctx);
+}
+
+/**
+ * rtrs_srv_open() - open RTRS server context
+ * @ops: callback functions
+ * @port: port to listen on
+ *
+ * Creates server context with specified callbacks.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
+{
+ struct rtrs_srv_ctx *ctx;
+ int err;
+
+ ctx = alloc_srv_ctx(ops);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ err = rtrs_srv_rdma_init(ctx, port);
+ if (err) {
+ free_srv_ctx(ctx);
+ return ERR_PTR(err);
+ }
+
+ return ctx;
+}
+EXPORT_SYMBOL(rtrs_srv_open);
+
+static void close_sessions(struct rtrs_srv *srv)
+{
+ struct rtrs_srv_sess *sess;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(sess, &srv->paths_list, s.entry)
+ close_sess(sess);
+ mutex_unlock(&srv->paths_mutex);
+}
+
+static void close_ctx(struct rtrs_srv_ctx *ctx)
+{
+ struct rtrs_srv *srv;
+
+ mutex_lock(&ctx->srv_mutex);
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list)
+ close_sessions(srv);
+ mutex_unlock(&ctx->srv_mutex);
+ flush_workqueue(rtrs_wq);
+}
+
+/**
+ * rtrs_srv_close() - close RTRS server context
+ * @ctx: pointer to server context
+ *
+ * Closes RTRS server context with all client sessions.
+ */
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
+{
+ rdma_destroy_id(ctx->cm_id_ip);
+ rdma_destroy_id(ctx->cm_id_ib);
+ close_ctx(ctx);
+ free_srv_ctx(ctx);
+}
+EXPORT_SYMBOL(rtrs_srv_close);
+
+static int check_module_params(void)
+{
+ if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
+ pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
+ sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+ if (max_chunk_size < 4096 || !is_power_of_2(max_chunk_size)) {
+ pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
+ max_chunk_size, 4096);
+ return -EINVAL;
+ }
+
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and the
+ * offset inside the memory chunk
+ */
+ if ((ilog2(sess_queue_depth - 1) + 1) +
+ (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
+ pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
+ MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init rtrs_server_init(void)
+{
+ int err;
+
+ pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
+ KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
+ max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
+ sess_queue_depth, always_invalidate);
+
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ err = check_module_params();
+ if (err) {
+ pr_err("Failed to load module, invalid module parameters, err: %d\n",
+ err);
+ return err;
+ }
+ chunk_pool = mempool_create_page_pool(sess_queue_depth * CHUNK_POOL_SZ,
+ get_order(max_chunk_size));
+ if (!chunk_pool)
+ return -ENOMEM;
+ rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
+ if (IS_ERR(rtrs_dev_class)) {
+ err = PTR_ERR(rtrs_dev_class);
+ goto out_chunk_pool;
+ }
+ rtrs_wq = alloc_workqueue("rtrs_server_wq", WQ_MEM_RECLAIM, 0);
+ if (!rtrs_wq) {
+ err = -ENOMEM;
+ goto out_dev_class;
+ }
+
+ return 0;
+
+out_dev_class:
+ class_destroy(rtrs_dev_class);
+out_chunk_pool:
+ mempool_destroy(chunk_pool);
+
+ return err;
+}
+
+static void __exit rtrs_server_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_dev_class);
+ mempool_destroy(chunk_pool);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_server_init);
+module_exit(rtrs_server_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
new file mode 100644
index 000000000000..dc95b0932f0d
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_SRV_H
+#define RTRS_SRV_H
+
+#include <linux/device.h>
+#include <linux/refcount.h>
+#include "rtrs-pri.h"
+
+/*
+ * enum rtrs_srv_state - Server states.
+ */
+enum rtrs_srv_state {
+ RTRS_SRV_CONNECTING,
+ RTRS_SRV_CONNECTED,
+ RTRS_SRV_CLOSING,
+ RTRS_SRV_CLOSED,
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-server for details
+ */
+struct rtrs_srv_stats_rdma_stats {
+ struct {
+ atomic64_t cnt;
+ atomic64_t size_total;
+ } dir[2];
+};
+
+struct rtrs_srv_stats {
+ struct kobject kobj_stats;
+ struct rtrs_srv_stats_rdma_stats rdma_stats;
+ struct rtrs_srv_sess *sess;
+};
+
+struct rtrs_srv_con {
+ struct rtrs_con c;
+ atomic_t wr_cnt;
+ atomic_t sq_wr_avail;
+ struct list_head rsp_wr_wait_list;
+ spinlock_t rsp_wr_wait_lock;
+};
+
+/* IO context in rtrs_srv, each io has one */
+struct rtrs_srv_op {
+ struct rtrs_srv_con *con;
+ u32 msg_id;
+ u8 dir;
+ struct rtrs_msg_rdma_read *rd_msg;
+ struct ib_rdma_wr tx_wr;
+ struct ib_sge tx_sg;
+ struct list_head wait_list;
+ int status;
+};
+
+/*
+ * server side memory region context, when always_invalidate=Y, we need
+ * queue_depth of memory regrion to invalidate each memory region.
+ */
+struct rtrs_srv_mr {
+ struct ib_mr *mr;
+ struct sg_table sgt;
+ struct ib_cqe inv_cqe; /* only for always_invalidate=true */
+ u32 msg_id; /* only for always_invalidate=true */
+ u32 msg_off; /* only for always_invalidate=true */
+ struct rtrs_iu *iu; /* send buffer for new rkey msg */
+};
+
+struct rtrs_srv_sess {
+ struct rtrs_sess s;
+ struct rtrs_srv *srv;
+ struct work_struct close_work;
+ enum rtrs_srv_state state;
+ spinlock_t state_lock;
+ int cur_cq_vector;
+ struct rtrs_srv_op **ops_ids;
+ atomic_t ids_inflight;
+ wait_queue_head_t ids_waitq;
+ struct rtrs_srv_mr *mrs;
+ unsigned int mrs_num;
+ dma_addr_t *dma_addr;
+ bool established;
+ unsigned int mem_bits;
+ struct kobject kobj;
+ struct rtrs_srv_stats *stats;
+};
+
+struct rtrs_srv {
+ struct list_head paths_list;
+ int paths_up;
+ struct mutex paths_ev_mutex;
+ size_t paths_num;
+ struct mutex paths_mutex;
+ uuid_t paths_uuid;
+ refcount_t refcount;
+ struct rtrs_srv_ctx *ctx;
+ struct list_head ctx_list;
+ void *priv;
+ size_t queue_depth;
+ struct page **chunks;
+ struct device dev;
+ unsigned int dev_ref;
+ struct kobject *kobj_paths;
+};
+
+struct rtrs_srv_ctx {
+ struct rtrs_srv_ops ops;
+ struct rdma_cm_id *cm_id_ip;
+ struct rdma_cm_id *cm_id_ib;
+ struct mutex srv_mutex;
+ struct list_head srv_list;
+};
+
+extern struct class *rtrs_dev_class;
+
+void close_sess(struct rtrs_srv_sess *sess);
+
+static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
+ size_t size, int d)
+{
+ atomic64_inc(&s->rdma_stats.dir[d].cnt);
+ atomic64_add(size, &s->rdma_stats.dir[d].size_total);
+}
+
+/* functions which are implemented in rtrs-srv-stats.c */
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
+ char *page, size_t len);
+int rtrs_srv_reset_wc_completion_stats(struct rtrs_srv_stats *stats,
+ bool enable);
+int rtrs_srv_stats_wc_completion_to_str(struct rtrs_srv_stats *stats, char *buf,
+ size_t len);
+int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
+ char *page, size_t len);
+
+/* functions which are implemented in rtrs-srv-sysfs.c */
+int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess);
+void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess);
+
+#endif /* RTRS_SRV_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
new file mode 100644
index 000000000000..ff1093d6e4bc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/inet.h>
+
+#include "rtrs-pri.h"
+#include "rtrs-log.h"
+
+MODULE_DESCRIPTION("RDMA Transport Core");
+MODULE_LICENSE("GPL");
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
+ struct ib_device *dma_dev,
+ enum dma_data_direction dir,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc))
+{
+ struct rtrs_iu *ius, *iu;
+ int i;
+
+ ius = kcalloc(queue_size, sizeof(*ius), gfp_mask);
+ if (!ius)
+ return NULL;
+ for (i = 0; i < queue_size; i++) {
+ iu = &ius[i];
+ iu->buf = kzalloc(size, gfp_mask);
+ if (!iu->buf)
+ goto err;
+
+ iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
+ if (ib_dma_mapping_error(dma_dev, iu->dma_addr))
+ goto err;
+
+ iu->cqe.done = done;
+ iu->size = size;
+ iu->direction = dir;
+ }
+ return ius;
+err:
+ rtrs_iu_free(ius, dir, dma_dev, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
+
+void rtrs_iu_free(struct rtrs_iu *ius, enum dma_data_direction dir,
+ struct ib_device *ibdev, u32 queue_size)
+{
+ struct rtrs_iu *iu;
+ int i;
+
+ if (!ius)
+ return;
+
+ for (i = 0; i < queue_size; i++) {
+ iu = &ius[i];
+ ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, dir);
+ kfree(iu->buf);
+ }
+ kfree(ius);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_free);
+
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
+{
+ struct rtrs_sess *sess = con->sess;
+ struct ib_recv_wr wr;
+ struct ib_sge list;
+
+ list.addr = iu->dma_addr;
+ list.length = iu->size;
+ list.lkey = sess->dev->ib_pd->local_dma_lkey;
+
+ if (list.length == 0) {
+ rtrs_wrn(con->sess,
+ "Posting receive work request failed, sg list is empty\n");
+ return -EINVAL;
+ }
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_recv);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr;
+
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = cqe,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
+
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head)
+{
+ struct rtrs_sess *sess = con->sess;
+ struct ib_send_wr wr;
+ struct ib_sge list;
+
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
+ list.addr = iu->dma_addr;
+ list.length = size;
+ list.lkey = sess->dev->ib_pd->local_dma_lkey;
+
+ wr = (struct ib_send_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ .opcode = IB_WR_SEND,
+ .send_flags = IB_SEND_SIGNALED,
+ };
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr;
+ } else {
+ head = &wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
+
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head)
+{
+ struct ib_rdma_wr wr;
+ int i;
+
+ wr = (struct ib_rdma_wr) {
+ .wr.wr_cqe = &iu->cqe,
+ .wr.sg_list = sge,
+ .wr.num_sge = num_sge,
+ .rkey = rkey,
+ .remote_addr = rdma_addr,
+ .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .wr.ex.imm_data = cpu_to_be32(imm_data),
+ .wr.send_flags = flags,
+ };
+
+ /*
+ * If one of the sges has 0 size, the operation will fail with a
+ * length error
+ */
+ for (i = 0; i < num_sge; i++)
+ if (WARN_ON(sge[i].length == 0))
+ return -EINVAL;
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr.wr;
+ } else {
+ head = &wr.wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
+
+int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ u32 imm_data, enum ib_send_flags flags,
+ struct ib_send_wr *head)
+{
+ struct ib_send_wr wr;
+
+ wr = (struct ib_send_wr) {
+ .wr_cqe = cqe,
+ .send_flags = flags,
+ .opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .ex.imm_data = cpu_to_be32(imm_data),
+ };
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr;
+ } else {
+ head = &wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
+
+static void qp_event_handler(struct ib_event *ev, void *ctx)
+{
+ struct rtrs_con *con = ctx;
+
+ switch (ev->event) {
+ case IB_EVENT_COMM_EST:
+ rtrs_info(con->sess, "QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
+ break;
+ default:
+ rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ break;
+ }
+}
+
+static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
+ enum ib_poll_context poll_ctx)
+{
+ struct rdma_cm_id *cm_id = con->cm_id;
+ struct ib_cq *cq;
+
+ cq = ib_alloc_cq(cm_id->device, con, cq_size,
+ cq_vector, poll_ctx);
+ if (IS_ERR(cq)) {
+ rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
+ PTR_ERR(cq));
+ return PTR_ERR(cq);
+ }
+ con->cq = cq;
+
+ return 0;
+}
+
+static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+ u16 wr_queue_size, u32 max_sge)
+{
+ struct ib_qp_init_attr init_attr = {NULL};
+ struct rdma_cm_id *cm_id = con->cm_id;
+ int ret;
+
+ init_attr.cap.max_send_wr = wr_queue_size;
+ init_attr.cap.max_recv_wr = wr_queue_size;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.event_handler = qp_event_handler;
+ init_attr.qp_context = con;
+ init_attr.cap.max_send_sge = max_sge;
+
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.send_cq = con->cq;
+ init_attr.recv_cq = con->cq;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+
+ ret = rdma_create_qp(cm_id, pd, &init_attr);
+ if (ret) {
+ rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret);
+ return ret;
+ }
+ con->qp = cm_id->qp;
+
+ return ret;
+}
+
+int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, u16 cq_size,
+ u16 wr_queue_size, enum ib_poll_context poll_ctx)
+{
+ int err;
+
+ err = create_cq(con, cq_vector, cq_size, poll_ctx);
+ if (err)
+ return err;
+
+ err = create_qp(con, sess->dev->ib_pd, wr_queue_size, max_send_sge);
+ if (err) {
+ ib_free_cq(con->cq);
+ con->cq = NULL;
+ return err;
+ }
+ con->sess = sess;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_create);
+
+void rtrs_cq_qp_destroy(struct rtrs_con *con)
+{
+ if (con->qp) {
+ rdma_destroy_qp(con->cm_id);
+ con->qp = NULL;
+ }
+ if (con->cq) {
+ ib_free_cq(con->cq);
+ con->cq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
+
+static void schedule_hb(struct rtrs_sess *sess)
+{
+ queue_delayed_work(sess->hb_wq, &sess->hb_dwork,
+ msecs_to_jiffies(sess->hb_interval_ms));
+}
+
+void rtrs_send_hb_ack(struct rtrs_sess *sess)
+{
+ struct rtrs_con *usr_con = sess->con[0];
+ u32 imm;
+ int err;
+
+ imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ IB_SEND_SIGNALED, NULL);
+ if (err) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
+
+static void hb_work(struct work_struct *work)
+{
+ struct rtrs_con *usr_con;
+ struct rtrs_sess *sess;
+ u32 imm;
+ int err;
+
+ sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork);
+ usr_con = sess->con[0];
+
+ if (sess->hb_missed_cnt > sess->hb_missed_max) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+ if (sess->hb_missed_cnt++) {
+ /* Reschedule work without sending hb */
+ schedule_hb(sess);
+ return;
+ }
+ imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ IB_SEND_SIGNALED, NULL);
+ if (err) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+
+ schedule_hb(sess);
+}
+
+void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq)
+{
+ sess->hb_cqe = cqe;
+ sess->hb_interval_ms = interval_ms;
+ sess->hb_err_handler = err_handler;
+ sess->hb_wq = wq;
+ sess->hb_missed_max = missed_max;
+ sess->hb_missed_cnt = 0;
+ INIT_DELAYED_WORK(&sess->hb_dwork, hb_work);
+}
+EXPORT_SYMBOL_GPL(rtrs_init_hb);
+
+void rtrs_start_hb(struct rtrs_sess *sess)
+{
+ schedule_hb(sess);
+}
+EXPORT_SYMBOL_GPL(rtrs_start_hb);
+
+void rtrs_stop_hb(struct rtrs_sess *sess)
+{
+ cancel_delayed_work_sync(&sess->hb_dwork);
+ sess->hb_missed_cnt = 0;
+ sess->hb_missed_max = 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_stop_hb);
+
+static int rtrs_str_gid_to_sockaddr(const char *addr, size_t len,
+ short port, struct sockaddr_storage *dst)
+{
+ struct sockaddr_ib *dst_ib = (struct sockaddr_ib *)dst;
+ int ret;
+
+ /*
+ * We can use some of the IPv6 functions since GID is a valid
+ * IPv6 address format
+ */
+ ret = in6_pton(addr, len, dst_ib->sib_addr.sib_raw, '\0', NULL);
+ if (ret == 0)
+ return -EINVAL;
+
+ dst_ib->sib_family = AF_IB;
+ /*
+ * Use the same TCP server port number as the IB service ID
+ * on the IB port space range
+ */
+ dst_ib->sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port);
+ dst_ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
+ dst_ib->sib_pkey = cpu_to_be16(0xffff);
+
+ return 0;
+}
+
+/**
+ * rtrs_str_to_sockaddr() - Convert rtrs address string to sockaddr
+ * @addr: String representation of an addr (IPv4, IPv6 or IB GID):
+ * - "ip:192.168.1.1"
+ * - "ip:fe80::200:5aee:feaa:20a2"
+ * - "gid:fe80::200:5aee:feaa:20a2"
+ * @len: String address length
+ * @port: Destination port
+ * @dst: Destination sockaddr structure
+ *
+ * Returns 0 if conversion successful. Non-zero on error.
+ */
+static int rtrs_str_to_sockaddr(const char *addr, size_t len,
+ u16 port, struct sockaddr_storage *dst)
+{
+ if (strncmp(addr, "gid:", 4) == 0) {
+ return rtrs_str_gid_to_sockaddr(addr + 4, len - 4, port, dst);
+ } else if (strncmp(addr, "ip:", 3) == 0) {
+ char port_str[8];
+ char *cpy;
+ int err;
+
+ snprintf(port_str, sizeof(port_str), "%u", port);
+ cpy = kstrndup(addr + 3, len - 3, GFP_KERNEL);
+ err = cpy ? inet_pton_with_scope(&init_net, AF_UNSPEC,
+ cpy, port_str, dst) : -ENOMEM;
+ kfree(cpy);
+
+ return err;
+ }
+ return -EPROTONOSUPPORT;
+}
+
+/**
+ * sockaddr_to_str() - convert sockaddr to a string.
+ * @addr: the sockadddr structure to be converted.
+ * @buf: string containing socket addr.
+ * @len: string length.
+ *
+ * The return value is the number of characters written into buf not
+ * including the trailing '\0'. If len is == 0 the function returns 0..
+ */
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
+{
+
+ switch (addr->sa_family) {
+ case AF_IB:
+ return scnprintf(buf, len, "gid:%pI6",
+ &((struct sockaddr_ib *)addr)->sib_addr.sib_raw);
+ case AF_INET:
+ return scnprintf(buf, len, "ip:%pI4",
+ &((struct sockaddr_in *)addr)->sin_addr);
+ case AF_INET6:
+ return scnprintf(buf, len, "ip:%pI6c",
+ &((struct sockaddr_in6 *)addr)->sin6_addr);
+ }
+ return scnprintf(buf, len, "<invalid address family>");
+}
+EXPORT_SYMBOL(sockaddr_to_str);
+
+/**
+ * rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
+ * to sockaddreses
+ * @str: string containing source and destination addr of a path
+ * separated by ',' or '@' I.e. "ip:1.1.1.1,ip:1.1.1.2" or
+ * "ip:1.1.1.1@ip:1.1.1.2". If str contains only one address it's
+ * considered to be destination.
+ * @len: string length
+ * @port: Destination port number.
+ * @addr: will be set to the source/destination address or to NULL
+ * if str doesn't contain any source address.
+ *
+ * Returns zero if conversion successful. Non-zero otherwise.
+ */
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr)
+{
+ const char *d;
+
+ d = strchr(str, ',');
+ if (!d)
+ d = strchr(str, '@');
+ if (d) {
+ if (rtrs_str_to_sockaddr(str, d - str, 0, addr->src))
+ return -EINVAL;
+ d += 1;
+ len -= d - str;
+ str = d;
+
+ } else {
+ addr->src = NULL;
+ }
+ return rtrs_str_to_sockaddr(str, len, port, addr->dst);
+}
+EXPORT_SYMBOL(rtrs_addr_to_sockaddr);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ WARN_ON(pool->ops && (!pool->ops->alloc ^ !pool->ops->free));
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->mutex);
+ pool->pd_flags = pd_flags;
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_init);
+
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool)
+{
+ mutex_destroy(&pool->mutex);
+ WARN_ON(!list_empty(&pool->list));
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_deinit);
+
+static void dev_free(struct kref *ref)
+{
+ struct rtrs_rdma_dev_pd *pool;
+ struct rtrs_ib_dev *dev;
+
+ dev = container_of(ref, typeof(*dev), ref);
+ pool = dev->pool;
+
+ mutex_lock(&pool->mutex);
+ list_del(&dev->entry);
+ mutex_unlock(&pool->mutex);
+
+ if (pool->ops && pool->ops->deinit)
+ pool->ops->deinit(dev);
+
+ ib_dealloc_pd(dev->ib_pd);
+
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+}
+
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev)
+{
+ return kref_put(&dev->ref, dev_free);
+}
+EXPORT_SYMBOL(rtrs_ib_dev_put);
+
+static int rtrs_ib_dev_get(struct rtrs_ib_dev *dev)
+{
+ return kref_get_unless_zero(&dev->ref);
+}
+
+struct rtrs_ib_dev *
+rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ struct rtrs_ib_dev *dev;
+
+ mutex_lock(&pool->mutex);
+ list_for_each_entry(dev, &pool->list, entry) {
+ if (dev->ib_dev->node_guid == ib_dev->node_guid &&
+ rtrs_ib_dev_get(dev))
+ goto out_unlock;
+ }
+ mutex_unlock(&pool->mutex);
+ if (pool->ops && pool->ops->alloc)
+ dev = pool->ops->alloc();
+ else
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(dev))
+ goto out_err;
+
+ kref_init(&dev->ref);
+ dev->pool = pool;
+ dev->ib_dev = ib_dev;
+ dev->ib_pd = ib_alloc_pd(ib_dev, pool->pd_flags);
+ if (IS_ERR(dev->ib_pd))
+ goto out_free_dev;
+
+ if (pool->ops && pool->ops->init && pool->ops->init(dev))
+ goto out_free_pd;
+
+ mutex_lock(&pool->mutex);
+ list_add(&dev->entry, &pool->list);
+out_unlock:
+ mutex_unlock(&pool->mutex);
+ return dev;
+
+out_free_pd:
+ ib_dealloc_pd(dev->ib_pd);
+out_free_dev:
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+out_err:
+ return NULL;
+}
+EXPORT_SYMBOL(rtrs_ib_dev_find_or_add);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
new file mode 100644
index 000000000000..9af750f4d783
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_H
+#define RTRS_H
+
+#include <linux/socket.h>
+#include <linux/scatterlist.h>
+
+struct rtrs_permit;
+struct rtrs_clt;
+struct rtrs_srv_ctx;
+struct rtrs_srv;
+struct rtrs_srv_op;
+
+/*
+ * RDMA transport (RTRS) client API
+ */
+
+/**
+ * enum rtrs_clt_link_ev - Events about connectivity state of a client
+ * @RTRS_CLT_LINK_EV_RECONNECTED Client was reconnected.
+ * @RTRS_CLT_LINK_EV_DISCONNECTED Client was disconnected.
+ */
+enum rtrs_clt_link_ev {
+ RTRS_CLT_LINK_EV_RECONNECTED,
+ RTRS_CLT_LINK_EV_DISCONNECTED,
+};
+
+/**
+ * Source and destination address of a path to be established
+ */
+struct rtrs_addr {
+ struct sockaddr_storage *src;
+ struct sockaddr_storage *dst;
+};
+
+/**
+ * rtrs_clt_ops - it holds the link event callback and private pointer.
+ * @priv: User supplied private data.
+ * @link_ev: Event notification callback function for connection state changes
+ * @priv: User supplied data that was passed to rtrs_clt_open()
+ * @ev: Occurred event
+ */
+struct rtrs_clt_ops {
+ void *priv;
+ void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev);
+};
+
+struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t path_cnt, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ u16 max_segments,
+ size_t max_segment_size,
+ s16 max_reconnect_attempts);
+
+void rtrs_clt_close(struct rtrs_clt *sess);
+
+/**
+ * rtrs_permit_to_pdu() - converts rtrs_permit to opaque pdu pointer
+ * @permit: RTRS permit pointer, it associates the memory allocation for future
+ * RDMA operation.
+ */
+void *rtrs_permit_to_pdu(struct rtrs_permit *permit);
+
+enum {
+ RTRS_PERMIT_NOWAIT = 0,
+ RTRS_PERMIT_WAIT = 1,
+};
+
+/**
+ * enum rtrs_clt_con_type() type of ib connection to use with a given
+ * rtrs_permit
+ * @ADMIN_CON - use connection reserved for "service" messages
+ * @IO_CON - use a connection reserved for IO
+ */
+enum rtrs_clt_con_type {
+ RTRS_ADMIN_CON,
+ RTRS_IO_CON
+};
+
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait);
+
+void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit);
+
+/**
+ * rtrs_clt_req_ops - it holds the request confirmation callback
+ * and a private pointer.
+ * @priv: User supplied private data.
+ * @conf_fn: callback function to be called as confirmation
+ * @priv: User provided data, passed back with corresponding
+ * @(conf) confirmation.
+ * @errno: error number.
+ */
+struct rtrs_clt_req_ops {
+ void *priv;
+ void (*conf_fn)(void *priv, int errno);
+};
+
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt *sess, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t len,
+ struct scatterlist *sg, unsigned int sg_cnt);
+
+/**
+ * rtrs_attrs - RTRS session attributes
+ */
+struct rtrs_attrs {
+ u32 queue_depth;
+ u32 max_io_size;
+ u8 sessname[NAME_MAX];
+ struct kobject *sess_kobj;
+};
+
+int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
+
+/*
+ * Here goes RTRS server API
+ */
+
+/**
+ * enum rtrs_srv_link_ev - Server link events
+ * @RTRS_SRV_LINK_EV_CONNECTED: Connection from client established
+ * @RTRS_SRV_LINK_EV_DISCONNECTED: Connection was disconnected, all
+ * connection RTRS resources were freed.
+ */
+enum rtrs_srv_link_ev {
+ RTRS_SRV_LINK_EV_CONNECTED,
+ RTRS_SRV_LINK_EV_DISCONNECTED,
+};
+
+struct rtrs_srv_ops {
+ /**
+ * rdma_ev(): Event notification for RDMA operations
+ * If the callback returns a value != 0, an error
+ * message for the data transfer will be sent to
+ * the client.
+
+ * @sess: Session
+ * @priv: Private data set by rtrs_srv_set_sess_priv()
+ * @id: internal RTRS operation id
+ * @dir: READ/WRITE
+ * @data: Pointer to (bidirectional) rdma memory area:
+ * - in case of %RTRS_SRV_RDMA_EV_RECV contains
+ * data sent by the client
+ * - in case of %RTRS_SRV_RDMA_EV_WRITE_REQ points
+ * to the memory area where the response is to be
+ * written to
+ * @datalen: Size of the memory area in @data
+ * @usr: The extra user message sent by the client (%vec)
+ * @usrlen: Size of the user message
+ */
+ int (*rdma_ev)(struct rtrs_srv *sess, void *priv,
+ struct rtrs_srv_op *id, int dir,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen);
+ /**
+ * link_ev(): Events about connectivity state changes
+ * If the callback returns != 0 and the event
+ * %RTRS_SRV_LINK_EV_CONNECTED the corresponding
+ * session will be destroyed.
+ * @sess: Session
+ * @ev: event
+ * @priv: Private data from user if previously set with
+ * rtrs_srv_set_sess_priv()
+ */
+ int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev,
+ void *priv);
+};
+
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port);
+
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx);
+
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno);
+
+void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv);
+
+int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len);
+
+int rtrs_srv_get_queue_depth(struct rtrs_srv *sess);
+
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr);
+
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len);
+#endif
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index cd1181c39ed2..d8fcd21ab472 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -71,7 +71,6 @@ static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
-static bool prefer_fr = true;
static bool register_always = true;
static bool never_register;
static int topspin_workarounds = 1;
@@ -95,10 +94,6 @@ module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
-module_param(prefer_fr, bool, 0444);
-MODULE_PARM_DESC(prefer_fr,
-"Whether to use fast registration if both FMR and fast registration are supported");
-
module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always,
"Use memory registration even for contiguous memory regions");
@@ -146,7 +141,7 @@ module_param(ch_count, uint, 0444);
MODULE_PARM_DESC(ch_count,
"Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
-static void srp_add_one(struct ib_device *device);
+static int srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_rename_dev(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -388,24 +383,6 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
srp_new_ib_cm_id(ch);
}
-static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
-{
- struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_fmr_pool_param fmr_param;
-
- memset(&fmr_param, 0, sizeof(fmr_param));
- fmr_param.pool_size = target->mr_pool_size;
- fmr_param.dirty_watermark = fmr_param.pool_size / 4;
- fmr_param.cache = 1;
- fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
- fmr_param.page_shift = ilog2(dev->mr_page_size);
- fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- return ib_create_fmr_pool(dev->pd, &fmr_param);
-}
-
/**
* srp_destroy_fr_pool() - free the resources owned by a pool
* @pool: Fast registration pool to be destroyed.
@@ -556,7 +533,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
- struct ib_fmr_pool *fmr_pool = NULL;
struct srp_fr_pool *fr_pool = NULL;
const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
int ret;
@@ -619,14 +595,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
"FR pool allocation failed (%d)\n", ret);
goto err_qp;
}
- } else if (dev->use_fmr) {
- fmr_pool = srp_alloc_fmr_pool(target);
- if (IS_ERR(fmr_pool)) {
- ret = PTR_ERR(fmr_pool);
- shost_printk(KERN_WARNING, target->scsi_host, PFX
- "FMR pool allocation failed (%d)\n", ret);
- goto err_qp;
- }
}
if (ch->qp)
@@ -644,10 +612,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
ch->fr_pool = fr_pool;
- } else if (dev->use_fmr) {
- if (ch->fmr_pool)
- ib_destroy_fmr_pool(ch->fmr_pool);
- ch->fmr_pool = fmr_pool;
}
kfree(init_attr);
@@ -702,9 +666,6 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (dev->use_fast_reg) {
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
- } else if (dev->use_fmr) {
- if (ch->fmr_pool)
- ib_destroy_fmr_pool(ch->fmr_pool);
}
srp_destroy_qp(ch);
@@ -1017,12 +978,8 @@ static void srp_free_req_data(struct srp_target_port *target,
for (i = 0; i < target->req_ring_size; ++i) {
req = &ch->req_ring[i];
- if (dev->use_fast_reg) {
+ if (dev->use_fast_reg)
kfree(req->fr_list);
- } else {
- kfree(req->fmr_list);
- kfree(req->map_page);
- }
if (req->indirect_dma_addr) {
ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
target->indirect_size,
@@ -1056,16 +1013,8 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
GFP_KERNEL);
if (!mr_list)
goto out;
- if (srp_dev->use_fast_reg) {
+ if (srp_dev->use_fast_reg)
req->fr_list = mr_list;
- } else {
- req->fmr_list = mr_list;
- req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
- sizeof(void *),
- GFP_KERNEL);
- if (!req->map_page)
- goto out;
- }
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
if (!req->indirect_desc)
goto out;
@@ -1272,11 +1221,6 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
if (req->nmdesc)
srp_fr_pool_put(ch->fr_pool, req->fr_list,
req->nmdesc);
- } else if (dev->use_fmr) {
- struct ib_pool_fmr **pfmr;
-
- for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
- ib_fmr_pool_unmap(*pfmr);
}
ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
@@ -1472,50 +1416,6 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
state->ndesc++;
}
-static int srp_map_finish_fmr(struct srp_map_state *state,
- struct srp_rdma_ch *ch)
-{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pool_fmr *fmr;
- u64 io_addr = 0;
-
- if (state->fmr.next >= state->fmr.end) {
- shost_printk(KERN_ERR, ch->target->scsi_host,
- PFX "Out of MRs (mr_per_cmd = %d)\n",
- ch->target->mr_per_cmd);
- return -ENOMEM;
- }
-
- WARN_ON_ONCE(!dev->use_fmr);
-
- if (state->npages == 0)
- return 0;
-
- if (state->npages == 1 && target->global_rkey) {
- srp_map_desc(state, state->base_dma_addr, state->dma_len,
- target->global_rkey);
- goto reset_state;
- }
-
- fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
- state->npages, io_addr);
- if (IS_ERR(fmr))
- return PTR_ERR(fmr);
-
- *state->fmr.next++ = fmr;
- state->nmdesc++;
-
- srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
- state->dma_len, fmr->fmr->rkey);
-
-reset_state:
- state->npages = 0;
- state->dma_len = 0;
-
- return 0;
-}
-
static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
{
srp_handle_qp_err(cq, wc, "FAST REG");
@@ -1606,74 +1506,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
return n;
}
-static int srp_map_sg_entry(struct srp_map_state *state,
- struct srp_rdma_ch *ch,
- struct scatterlist *sg)
-{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
- dma_addr_t dma_addr = sg_dma_address(sg);
- unsigned int dma_len = sg_dma_len(sg);
- unsigned int len = 0;
- int ret;
-
- WARN_ON_ONCE(!dma_len);
-
- while (dma_len) {
- unsigned offset = dma_addr & ~dev->mr_page_mask;
-
- if (state->npages == dev->max_pages_per_mr ||
- (state->npages > 0 && offset != 0)) {
- ret = srp_map_finish_fmr(state, ch);
- if (ret)
- return ret;
- }
-
- len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
-
- if (!state->npages)
- state->base_dma_addr = dma_addr;
- state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
- state->dma_len += len;
- dma_addr += len;
- dma_len -= len;
- }
-
- /*
- * If the end of the MR is not on a page boundary then we need to
- * close it out and start a new one -- we can only merge at page
- * boundaries.
- */
- ret = 0;
- if ((dma_addr & ~dev->mr_page_mask) != 0)
- ret = srp_map_finish_fmr(state, ch);
- return ret;
-}
-
-static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
- struct srp_request *req, struct scatterlist *scat,
- int count)
-{
- struct scatterlist *sg;
- int i, ret;
-
- state->pages = req->map_page;
- state->fmr.next = req->fmr_list;
- state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
-
- for_each_sg(scat, sg, count, i) {
- ret = srp_map_sg_entry(state, ch, sg);
- if (ret)
- return ret;
- }
-
- ret = srp_map_finish_fmr(state, ch);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
@@ -1733,7 +1565,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
struct srp_device *dev = target->srp_host->srp_dev;
struct srp_map_state state;
struct srp_direct_buf idb_desc;
- u64 idb_pages[1];
struct scatterlist idb_sg[1];
int ret;
@@ -1756,14 +1587,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
if (ret < 0)
return ret;
WARN_ON_ONCE(ret < 1);
- } else if (dev->use_fmr) {
- state.pages = idb_pages;
- state.pages[0] = (req->indirect_dma_addr &
- dev->mr_page_mask);
- state.npages = 1;
- ret = srp_map_finish_fmr(&state, ch);
- if (ret < 0)
- return ret;
} else {
return -EINVAL;
}
@@ -1787,9 +1610,6 @@ static void srp_check_mapping(struct srp_map_state *state,
if (dev->use_fast_reg)
for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
mr_len += (*pfr)->mr->length;
- else if (dev->use_fmr)
- for (i = 0; i < state->nmdesc; i++)
- mr_len += be32_to_cpu(req->indirect_desc[i].len);
if (desc_len != scsi_bufflen(req->scmnd) ||
mr_len > scsi_bufflen(req->scmnd))
pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
@@ -1904,8 +1724,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
state.desc = req->indirect_desc;
if (dev->use_fast_reg)
ret = srp_map_sg_fr(&state, ch, req, scat, count);
- else if (dev->use_fmr)
- ret = srp_map_sg_fmr(&state, ch, req, scat, count);
else
ret = srp_map_sg_dma(&state, ch, req, scat, count);
req->nmdesc = state.nmdesc;
@@ -3424,6 +3242,7 @@ enum {
SRP_OPT_IP_DEST = 1 << 16,
SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
+ SRP_OPT_CH_COUNT = 1 << 19,
};
static unsigned int srp_opt_mandatory[] = {
@@ -3457,6 +3276,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
+ { SRP_OPT_CH_COUNT, "ch_count=%u", },
{ SRP_OPT_ERR, NULL }
};
@@ -3758,6 +3578,14 @@ static int srp_parse_options(struct net *net, const char *buf,
target->max_it_iu_size = token;
break;
+ case SRP_OPT_CH_COUNT:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad channel count %s\n", p);
+ goto out;
+ }
+ target->ch_count = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -3864,13 +3692,13 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
}
- if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
+ if (!srp_dev->has_fr && !target->allow_ext_sg &&
target->cmd_sg_cnt < target->sg_tablesize) {
pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
target->sg_tablesize = target->cmd_sg_cnt;
}
- if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
+ if (srp_dev->use_fast_reg) {
bool gaps_reg = (ibdev->attrs.device_cap_flags &
IB_DEVICE_SG_GAPS_REG);
@@ -3878,12 +3706,12 @@ static ssize_t srp_create_target(struct device *dev,
(ilog2(srp_dev->mr_page_size) - 9);
if (!gaps_reg) {
/*
- * FR and FMR can only map one HCA page per entry. If
- * the start address is not aligned on a HCA page
- * boundary two entries will be used for the head and
- * the tail although these two entries combined
- * contain at most one HCA page of data. Hence the "+
- * 1" in the calculation below.
+ * FR can only map one HCA page per entry. If the start
+ * address is not aligned on a HCA page boundary two
+ * entries will be used for the head and the tail
+ * although these two entries combined contain at most
+ * one HCA page of data. Hence the "+ 1" in the
+ * calculation below.
*
* The indirect data buffer descriptor is contiguous
* so the memory for that buffer will only be
@@ -3921,11 +3749,13 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
ret = -ENOMEM;
- target->ch_count = max_t(unsigned, num_online_nodes(),
- min(ch_count ? :
- min(4 * num_online_nodes(),
- ibdev->num_comp_vectors),
- num_online_cpus()));
+ if (target->ch_count == 0)
+ target->ch_count =
+ max_t(unsigned int, num_online_nodes(),
+ min(ch_count ?:
+ min(4 * num_online_nodes(),
+ ibdev->num_comp_vectors),
+ num_online_cpus()));
target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
GFP_KERNEL);
if (!target->ch)
@@ -4132,7 +3962,7 @@ static void srp_rename_dev(struct ib_device *device, void *client_data)
}
}
-static void srp_add_one(struct ib_device *device)
+static int srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct ib_device_attr *attr = &device->attrs;
@@ -4144,7 +3974,7 @@ static void srp_add_one(struct ib_device *device)
srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
- return;
+ return -ENOMEM;
/*
* Use the smallest page size supported by the HCA, down to a
@@ -4162,23 +3992,15 @@ static void srp_add_one(struct ib_device *device)
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
- srp_dev->has_fmr = (device->ops.alloc_fmr &&
- device->ops.dealloc_fmr &&
- device->ops.map_phys_fmr &&
- device->ops.unmap_fmr);
srp_dev->has_fr = (attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
- if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
- dev_warn(&device->dev, "neither FMR nor FR is supported\n");
- } else if (!never_register &&
- attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
- srp_dev->use_fast_reg = (srp_dev->has_fr &&
- (!srp_dev->has_fmr || prefer_fr));
- srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
- }
+ if (!never_register && !srp_dev->has_fr)
+ dev_warn(&device->dev, "FR is not supported\n");
+ else if (!never_register &&
+ attr->max_mr_size >= 2 * srp_dev->mr_page_size)
+ srp_dev->use_fast_reg = srp_dev->has_fr;
- if (never_register || !register_always ||
- (!srp_dev->has_fmr && !srp_dev->has_fr))
+ if (never_register || !register_always || !srp_dev->has_fr)
flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
if (srp_dev->use_fast_reg) {
@@ -4197,8 +4019,12 @@ static void srp_add_one(struct ib_device *device)
srp_dev->dev = device;
srp_dev->pd = ib_alloc_pd(device, flags);
- if (IS_ERR(srp_dev->pd))
- goto free_dev;
+ if (IS_ERR(srp_dev->pd)) {
+ int ret = PTR_ERR(srp_dev->pd);
+
+ kfree(srp_dev);
+ return ret;
+ }
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
@@ -4212,10 +4038,7 @@ static void srp_add_one(struct ib_device *device)
}
ib_set_client_data(device, &srp_client, srp_dev);
- return;
-
-free_dev:
- kfree(srp_dev);
+ return 0;
}
static void srp_remove_one(struct ib_device *device, void *client_data)
@@ -4225,8 +4048,6 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
struct srp_target_port *target;
srp_dev = client_data;
- if (!srp_dev)
- return;
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
device_unregister(&host->dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 6fabcc2faf1f..6818cac0a3b7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -44,7 +44,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
-#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
enum {
@@ -95,8 +94,7 @@ enum srp_iu_type {
/*
* @mr_page_mask: HCA memory registration page mask.
* @mr_page_size: HCA memory registration page size.
- * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
- * request.
+ * @mr_max_size: Maximum size in bytes of a single FR registration request.
*/
struct srp_device {
struct list_head dev_list;
@@ -107,9 +105,7 @@ struct srp_device {
int mr_page_size;
int mr_max_size;
int max_pages_per_mr;
- bool has_fmr;
bool has_fr;
- bool use_fmr;
bool use_fast_reg;
};
@@ -127,11 +123,7 @@ struct srp_host {
struct srp_request {
struct scsi_cmnd *scmnd;
struct srp_iu *cmd;
- union {
- struct ib_pool_fmr **fmr_list;
- struct srp_fr_desc **fr_list;
- };
- u64 *map_page;
+ struct srp_fr_desc **fr_list;
struct srp_direct_buf *indirect_desc;
dma_addr_t indirect_dma_addr;
short nmdesc;
@@ -155,10 +147,7 @@ struct srp_rdma_ch {
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
struct ib_qp *qp;
- union {
- struct ib_fmr_pool *fmr_pool;
- struct srp_fr_pool *fr_pool;
- };
+ struct srp_fr_pool *fr_pool;
uint32_t max_it_iu_len;
uint32_t max_ti_iu_len;
u8 max_imm_sge;
@@ -319,20 +308,16 @@ struct srp_fr_pool {
* @pages: Array with DMA addresses of pages being considered for
* memory registration.
* @base_dma_addr: DMA address of the first page that has not yet been mapped.
- * @dma_len: Number of bytes that will be registered with the next
- * FMR or FR memory registration call.
+ * @dma_len: Number of bytes that will be registered with the next FR
+ * memory registration call.
* @total_len: Total number of bytes in the sg-list being mapped.
* @npages: Number of page addresses in the pages[] array.
- * @nmdesc: Number of FMR or FR memory descriptors used for mapping.
+ * @nmdesc: Number of FR memory descriptors used for mapping.
* @ndesc: Number of SRP buffer descriptors that have been filled in.
*/
struct srp_map_state {
union {
struct {
- struct ib_pool_fmr **next;
- struct ib_pool_fmr **end;
- } fmr;
- struct {
struct srp_fr_desc **next;
struct srp_fr_desc **end;
} fr;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 98552749d71c..ef7fcd3e8e15 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(srpt_srq_size,
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
- return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+ return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
}
module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
0444);
@@ -135,14 +135,11 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
static void srpt_event_handler(struct ib_event_handler *handler,
struct ib_event *event)
{
- struct srpt_device *sdev;
+ struct srpt_device *sdev =
+ container_of(handler, struct srpt_device, event_handler);
struct srpt_port *sport;
u8 port_num;
- sdev = ib_get_client_data(event->device, &srpt_client);
- if (!sdev || sdev->device != event->device)
- return;
-
pr_debug("ASYNC event= %d on device= %s\n", event->event,
dev_name(&sdev->device->dev));
@@ -217,8 +214,9 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
- pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
- event->event, ch, ch->sess_name, ch->state);
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
switch (event->event) {
case IB_EVENT_COMM_EST:
@@ -610,6 +608,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
dev_name(&sport->sdev->device->dev), sport->port,
PTR_ERR(sport->mad_agent));
sport->mad_agent = NULL;
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ ib_modify_port(sport->sdev->device, sport->port, 0,
+ &port_modify);
+
}
}
@@ -633,9 +636,8 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
WARN_ON(sport->port != i);
- if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
- pr_err("disabling MAD processing failed.\n");
if (sport->mad_agent) {
+ ib_modify_port(sdev->device, i, 0, &port_modify);
ib_unregister_mad_agent(sport->mad_agent);
sport->mad_agent = NULL;
}
@@ -1814,18 +1816,13 @@ retry:
*/
qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
qp_init->cap.max_rdma_ctxs = sq_size / 2;
- qp_init->cap.max_send_sge = min(attrs->max_send_sge,
- SRPT_MAX_SG_PER_WQE);
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
- SRPT_MAX_SG_PER_WQE);
+ qp_init->cap.max_send_sge = attrs->max_send_sge;
+ qp_init->cap.max_recv_sge = 1;
qp_init->port_num = ch->sport->port;
- if (sdev->use_srq) {
+ if (sdev->use_srq)
qp_init->srq = sdev->srq;
- } else {
+ else
qp_init->cap.max_recv_wr = ch->rq_size;
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
- SRPT_MAX_SG_PER_WQE);
- }
if (ch->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
@@ -1984,8 +1981,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
if (srpt_disconnect_ch(ch) >= 0)
- pr_info("Closing channel %s because target %s_%d has been disabled\n",
- ch->sess_name,
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
dev_name(&sport->sdev->device->dev),
sport->port);
srpt_close_ch(ch);
@@ -2496,7 +2493,8 @@ reject:
SRP_BUF_FORMAT_INDIRECT);
if (rdma_cm_id)
- rdma_reject(rdma_cm_id, rej, sizeof(*rej));
+ rdma_reject(rdma_cm_id, rej, sizeof(*rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
else
ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
rej, sizeof(*rej));
@@ -3104,7 +3102,7 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
* srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
*/
-static void srpt_add_one(struct ib_device *device)
+static int srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
@@ -3115,14 +3113,16 @@ static void srpt_add_one(struct ib_device *device)
sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!sdev)
- goto err;
+ return -ENOMEM;
sdev->device = device;
mutex_init(&sdev->sdev_mutex);
sdev->pd = ib_alloc_pd(device, 0);
- if (IS_ERR(sdev->pd))
+ if (IS_ERR(sdev->pd)) {
+ ret = PTR_ERR(sdev->pd);
goto free_dev;
+ }
sdev->lkey = sdev->pd->local_dma_lkey;
@@ -3138,6 +3138,7 @@ static void srpt_add_one(struct ib_device *device)
if (IS_ERR(sdev->cm_id)) {
pr_info("ib_create_cm_id() failed: %ld\n",
PTR_ERR(sdev->cm_id));
+ ret = PTR_ERR(sdev->cm_id);
sdev->cm_id = NULL;
if (!rdma_cm_id)
goto err_ring;
@@ -3182,7 +3183,8 @@ static void srpt_add_one(struct ib_device *device)
mutex_init(&sport->port_gid_id.mutex);
INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
- if (srpt_refresh_port(sport)) {
+ ret = srpt_refresh_port(sport);
+ if (ret) {
pr_err("MAD registration failed for %s-%d.\n",
dev_name(&sdev->device->dev), i);
goto err_event;
@@ -3193,10 +3195,9 @@ static void srpt_add_one(struct ib_device *device)
list_add_tail(&sdev->list, &srpt_dev_list);
spin_unlock(&srpt_dev_lock);
-out:
ib_set_client_data(device, &srpt_client, sdev);
pr_debug("added %s.\n", dev_name(&device->dev));
- return;
+ return 0;
err_event:
ib_unregister_event_handler(&sdev->event_handler);
@@ -3208,10 +3209,8 @@ err_ring:
ib_dealloc_pd(sdev->pd);
free_dev:
kfree(sdev);
-err:
- sdev = NULL;
pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
- goto out;
+ return ret;
}
/**
@@ -3224,12 +3223,6 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
struct srpt_device *sdev = client_data;
int i;
- if (!sdev) {
- pr_info("%s(%s): nothing to do.\n", __func__,
- dev_name(&device->dev));
- return;
- }
-
srpt_unregister_mad_agent(sdev);
ib_unregister_event_handler(&sdev->event_handler);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 2e1a69840857..f31c349d07a1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -105,11 +105,6 @@ enum {
SRP_CMD_ACA = 0x4,
SRPT_DEF_SG_TABLESIZE = 128,
- /*
- * An experimentally determined value that avoids that QP creation
- * fails due to "swiotlb buffer is full" on systems using the swiotlb.
- */
- SRPT_MAX_SG_PER_WQE = 16,
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index cb6e3a5f509c..0d57e51b8ba1 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -326,20 +326,6 @@ static int evdev_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &client->fasync);
}
-static int evdev_flush(struct file *file, fl_owner_t id)
-{
- struct evdev_client *client = file->private_data;
- struct evdev *evdev = client->evdev;
-
- mutex_lock(&evdev->mutex);
-
- if (evdev->exist && !client->revoked)
- input_flush_device(&evdev->handle, file);
-
- mutex_unlock(&evdev->mutex);
- return 0;
-}
-
static void evdev_free(struct device *dev)
{
struct evdev *evdev = container_of(dev, struct evdev, dev);
@@ -453,6 +439,10 @@ static int evdev_release(struct inode *inode, struct file *file)
unsigned int i;
mutex_lock(&evdev->mutex);
+
+ if (evdev->exist && !client->revoked)
+ input_flush_device(&evdev->handle, file);
+
evdev_ungrab(evdev, client);
mutex_unlock(&evdev->mutex);
@@ -1310,7 +1300,6 @@ static const struct file_operations evdev_fops = {
.compat_ioctl = evdev_ioctl_compat,
#endif
.fasync = evdev_fasync,
- .flush = evdev_flush,
.llseek = no_llseek,
};
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6b40a1c68f9f..c77cdb3b62b5 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -459,6 +459,16 @@ static const u8 xboxone_fw2015_init[] = {
};
/*
+ * This packet is required for Xbox One S (0x045e:0x02ea)
+ * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to
+ * initialize the controller that was previously used in
+ * Bluetooth mode.
+ */
+static const u8 xboxone_s_init[] = {
+ 0x05, 0x20, 0x00, 0x0f, 0x06
+};
+
+/*
* This packet is required for the Titanfall 2 Xbox One pads
* (0x0e6f:0x0165) to finish initialization and for Hori pads
* (0x0f0d:0x0067) to make the analog sticks work.
@@ -516,6 +526,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
+ XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index d38398526965..14362ebab9a9 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -186,7 +186,7 @@ struct touchpad_protocol {
u8 number_of_fingers;
u8 clicked2;
u8 unknown3[16];
- struct tp_finger fingers[0];
+ struct tp_finger fingers[];
};
/**
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 2b71c5a51f90..fc1793ca2f17 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -347,18 +347,14 @@ static int cros_ec_keyb_info(struct cros_ec_device *ec_dev,
params->info_type = info_type;
params->event_type = event_type;
- ret = cros_ec_cmd_xfer(ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n",
- (int)info_type, (int)event_type, ret);
- } else if (msg->result == EC_RES_INVALID_VERSION) {
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret == -ENOTSUPP) {
/* With older ECs we just return 0 for everything */
memset(result, 0, result_size);
ret = 0;
- } else if (msg->result != EC_RES_SUCCESS) {
- dev_warn(ec_dev->dev, "Error getting info %d/%d: %d\n",
- (int)info_type, (int)event_type, msg->result);
- ret = -EPROTO;
+ } else if (ret < 0) {
+ dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n",
+ (int)info_type, (int)event_type, ret);
} else if (ret != result_size) {
dev_warn(ec_dev->dev, "Wrong size %d/%d: %d != %zu\n",
(int)info_type, (int)event_type,
diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c
index b0ead7199c40..a69dcc3bd30c 100644
--- a/drivers/input/keyboard/dlink-dir685-touchkeys.c
+++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c
@@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match);
static struct i2c_driver dir685_tk_i2c_driver = {
.driver = {
- .name = "dlin-dir685-touchkeys",
+ .name = "dlink-dir685-touchkeys",
.of_match_table = of_match_ptr(dir685_tk_of_match),
},
.probe = dir685_tk_probe,
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index c8f87df93a50..9c6386b2af33 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -205,8 +205,11 @@ ATTRIBUTE_GROUPS(axp20x);
static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
{
- struct input_dev *idev = pwr;
- struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
+ struct axp20x_pek *axp20x_pek = pwr;
+ struct input_dev *idev = axp20x_pek->input;
+
+ if (!idev)
+ return IRQ_HANDLED;
/*
* The power-button is connected to ground so a falling edge (dbf)
@@ -225,22 +228,9 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
struct platform_device *pdev)
{
- struct axp20x_dev *axp20x = axp20x_pek->axp20x;
struct input_dev *idev;
int error;
- axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
- if (axp20x_pek->irq_dbr < 0)
- return axp20x_pek->irq_dbr;
- axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc,
- axp20x_pek->irq_dbr);
-
- axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
- if (axp20x_pek->irq_dbf < 0)
- return axp20x_pek->irq_dbf;
- axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc,
- axp20x_pek->irq_dbf);
-
axp20x_pek->input = devm_input_allocate_device(&pdev->dev);
if (!axp20x_pek->input)
return -ENOMEM;
@@ -255,24 +245,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
input_set_drvdata(idev, axp20x_pek);
- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
- axp20x_pek_irq, 0,
- "axp20x-pek-dbr", idev);
- if (error < 0) {
- dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
- axp20x_pek->irq_dbr, error);
- return error;
- }
-
- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
- axp20x_pek_irq, 0,
- "axp20x-pek-dbf", idev);
- if (error < 0) {
- dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
- axp20x_pek->irq_dbf, error);
- return error;
- }
-
error = input_register_device(idev);
if (error) {
dev_err(&pdev->dev, "Can't register input device: %d\n",
@@ -280,8 +252,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
return error;
}
- device_init_wakeup(&pdev->dev, true);
-
return 0;
}
@@ -339,6 +309,18 @@ static int axp20x_pek_probe(struct platform_device *pdev)
axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
+ axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
+ if (axp20x_pek->irq_dbr < 0)
+ return axp20x_pek->irq_dbr;
+ axp20x_pek->irq_dbr = regmap_irq_get_virq(
+ axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr);
+
+ axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
+ if (axp20x_pek->irq_dbf < 0)
+ return axp20x_pek->irq_dbf;
+ axp20x_pek->irq_dbf = regmap_irq_get_virq(
+ axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf);
+
if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
if (error)
@@ -347,6 +329,26 @@ static int axp20x_pek_probe(struct platform_device *pdev)
axp20x_pek->info = (struct axp20x_info *)match->driver_data;
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbr", axp20x_pek);
+ if (error < 0) {
+ dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
+ axp20x_pek->irq_dbr, error);
+ return error;
+ }
+
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbf", axp20x_pek);
+ if (error < 0) {
+ dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
+ axp20x_pek->irq_dbf, error);
+ return error;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+
platform_set_drvdata(pdev, axp20x_pek);
return 0;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 4d2036209b45..758dae8d6500 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN005b", /* P50 */
"LEN005e", /* T560 */
"LEN006c", /* T470s */
+ "LEN007a", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 190b9974526b..258d5fe3d395 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -205,7 +205,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
if (count) {
kfree(attn_data.data);
- attn_data.data = NULL;
+ drvdata->attn_data.data = NULL;
}
if (!kfifo_is_empty(&drvdata->attn_fifo))
@@ -1210,7 +1210,8 @@ static int rmi_driver_probe(struct device *dev)
if (data->input) {
rmi_driver_set_input_name(rmi_dev, data->input);
if (!rmi_dev->xport->input) {
- if (input_register_device(data->input)) {
+ retval = input_register_device(data->input);
+ if (retval) {
dev_err(dev, "%s: Failed to register input device.\n",
__func__);
goto err_destroy_functions;
diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
deleted file mode 100644
index 391f94d9e47d..000000000000
--- a/drivers/input/serio/i8042-ppcio.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _I8042_PPCIO_H
-#define _I8042_PPCIO_H
-
-
-#if defined(CONFIG_WALNUT)
-
-#define I8042_KBD_IRQ 25
-#define I8042_AUX_IRQ 26
-
-#define I8042_KBD_PHYS_DESC "walnutps2/serio0"
-#define I8042_AUX_PHYS_DESC "walnutps2/serio1"
-#define I8042_MUX_PHYS_DESC "walnutps2/serio%d"
-
-extern void *kb_cs;
-extern void *kb_data;
-
-#define I8042_COMMAND_REG (*(int *)kb_cs)
-#define I8042_DATA_REG (*(int *)kb_data)
-
-static inline int i8042_read_data(void)
-{
- return readb(kb_data);
-}
-
-static inline int i8042_read_status(void)
-{
- return readb(kb_cs);
-}
-
-static inline void i8042_write_data(int val)
-{
- writeb(val, kb_data);
-}
-
-static inline void i8042_write_command(int val)
-{
- writeb(val, kb_cs);
-}
-
-static inline int i8042_platform_init(void)
-{
- i8042_reset = I8042_RESET_ALWAYS;
- return 0;
-}
-
-static inline void i8042_platform_exit(void)
-{
-}
-
-#else
-
-#include "i8042-io.h"
-
-#endif
-
-#endif /* _I8042_PPCIO_H */
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 08e919dbeb5d..7e048b557462 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -662,6 +662,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
},
+ {
+ /* Lenovo ThinkPad Twist S230u */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index 38dc27ad3c18..eb376700dfff 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -17,8 +17,6 @@
#include "i8042-ip22io.h"
#elif defined(CONFIG_SNI_RM)
#include "i8042-snirm.h"
-#elif defined(CONFIG_PPC)
-#include "i8042-ppcio.h"
#elif defined(CONFIG_SPARC)
#include "i8042-sparcio.h"
#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 14c577c16b16..2289f9638116 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -19,6 +19,7 @@
*/
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -73,6 +74,7 @@
#define FW_POS_STATE 1
#define FW_POS_TOTAL 2
#define FW_POS_XY 3
+#define FW_POS_TOOL_TYPE 33
#define FW_POS_CHECKSUM 34
#define FW_POS_WIDTH 35
#define FW_POS_PRESSURE 45
@@ -842,6 +844,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
{
struct input_dev *input = ts->input;
unsigned int n_fingers;
+ unsigned int tool_type;
u16 finger_state;
int i;
@@ -852,6 +855,10 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
dev_dbg(&ts->client->dev,
"n_fingers: %u, state: %04x\n", n_fingers, finger_state);
+ /* Note: all fingers have the same tool type */
+ tool_type = buf[FW_POS_TOOL_TYPE] & BIT(0) ?
+ MT_TOOL_FINGER : MT_TOOL_PALM;
+
for (i = 0; i < MAX_CONTACT_NUM && n_fingers; i++) {
if (finger_state & 1) {
unsigned int x, y, p, w;
@@ -867,7 +874,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
i, x, y, p, w);
input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_mt_report_slot_state(input, tool_type, true);
input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
@@ -1307,6 +1314,8 @@ static int elants_i2c_probe(struct i2c_client *client,
input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_PALM, 0, 0);
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 69c6d559eeb0..2ef1adaed9af 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg,
if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL)
BUG();
- /* Write register: use repeated start */
+ /* Write register */
xfer[0].addr = client->addr;
- xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART;
+ xfer[0].flags = client->flags & I2C_M_TEN;
xfer[0].len = 1;
xfer[0].buf = &buf;
/* Read data */
xfer[1].addr = client->addr;
- xfer[1].flags = I2C_M_RD;
+ xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
xfer[1].len = len;
xfer[1].buf = val;
@@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client,
const void *match_data;
int error;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_PROTOCOL_MANGLING)) {
- dev_err(&client->dev,
- "Need i2c bus that supports protocol mangling\n");
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "Not supported I2C adapter\n");
return -ENODEV;
}
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 16d70201de4a..397cb1d3f481 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -182,6 +182,7 @@ static const struct usb_device_id usbtouch_devices[] = {
#endif
#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
+ {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
index bfa4ca3ab7a9..5b7204ee2eb2 100644
--- a/drivers/interconnect/Kconfig
+++ b/drivers/interconnect/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig INTERCONNECT
- tristate "On-Chip Interconnect management support"
+ bool "On-Chip Interconnect management support"
help
Support for management of the on-chip interconnects.
@@ -11,6 +11,7 @@ menuconfig INTERCONNECT
if INTERCONNECT
+source "drivers/interconnect/imx/Kconfig"
source "drivers/interconnect/qcom/Kconfig"
endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 725029ae7a2c..4825c287ca13 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -4,4 +4,5 @@ CFLAGS_core.o := -I$(src)
icc-core-objs := core.o
obj-$(CONFIG_INTERCONNECT) += icc-core.o
+obj-$(CONFIG_INTERCONNECT_IMX) += imx/
obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 2c6515e3ecf1..ece2a579a9b0 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -158,6 +158,7 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
path->reqs[i].node = node;
path->reqs[i].dev = dev;
+ path->reqs[i].enabled = true;
/* reference to previous node was saved during path traversal */
node = node->reverse;
}
@@ -249,9 +250,12 @@ static int aggregate_requests(struct icc_node *node)
if (p->pre_aggregate)
p->pre_aggregate(node);
- hlist_for_each_entry(r, &node->req_list, req_node)
+ hlist_for_each_entry(r, &node->req_list, req_node) {
+ if (!r->enabled)
+ continue;
p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
&node->avg_bw, &node->peak_bw);
+ }
return 0;
}
@@ -350,10 +354,35 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
return node;
}
+static void devm_icc_release(struct device *dev, void *res)
+{
+ icc_put(*(struct icc_path **)res);
+}
+
+struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
+{
+ struct icc_path **ptr, *path;
+
+ ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ path = of_icc_get(dev, name);
+ if (!IS_ERR(path)) {
+ *ptr = path;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return path;
+}
+EXPORT_SYMBOL_GPL(devm_of_icc_get);
+
/**
- * of_icc_get() - get a path handle from a DT node based on name
+ * of_icc_get_by_index() - get a path handle from a DT node based on index
* @dev: device pointer for the consumer device
- * @name: interconnect path name
+ * @idx: interconnect path index
*
* This function will search for a path between two endpoints and return an
* icc_path handle on success. Use icc_put() to release constraints when they
@@ -365,13 +394,12 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
* Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
* when the API is disabled or the "interconnects" DT property is missing.
*/
-struct icc_path *of_icc_get(struct device *dev, const char *name)
+struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
{
- struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_path *path;
struct icc_node *src_node, *dst_node;
- struct device_node *np = NULL;
+ struct device_node *np;
struct of_phandle_args src_args, dst_args;
- int idx = 0;
int ret;
if (!dev || !dev->of_node)
@@ -391,12 +419,6 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
* lets support only global ids and extend this in the future if needed
* without breaking DT compatibility.
*/
- if (name) {
- idx = of_property_match_string(np, "interconnect-names", name);
- if (idx < 0)
- return ERR_PTR(idx);
- }
-
ret = of_parse_phandle_with_args(np, "interconnects",
"#interconnect-cells", idx * 2,
&src_args);
@@ -439,12 +461,8 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
return path;
}
- if (name)
- path->name = kstrdup_const(name, GFP_KERNEL);
- else
- path->name = kasprintf(GFP_KERNEL, "%s-%s",
- src_node->name, dst_node->name);
-
+ path->name = kasprintf(GFP_KERNEL, "%s-%s",
+ src_node->name, dst_node->name);
if (!path->name) {
kfree(path);
return ERR_PTR(-ENOMEM);
@@ -452,6 +470,53 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
return path;
}
+EXPORT_SYMBOL_GPL(of_icc_get_by_index);
+
+/**
+ * of_icc_get() - get a path handle from a DT node based on name
+ * @dev: device pointer for the consumer device
+ * @name: interconnect path name
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release constraints when they
+ * are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
+ * when the API is disabled or the "interconnects" DT property is missing.
+ */
+struct icc_path *of_icc_get(struct device *dev, const char *name)
+{
+ struct device_node *np;
+ int idx = 0;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ np = dev->of_node;
+
+ /*
+ * When the consumer DT node do not have "interconnects" property
+ * return a NULL path to skip setting constraints.
+ */
+ if (!of_find_property(np, "interconnects", NULL))
+ return NULL;
+
+ /*
+ * We use a combination of phandle and specifier for endpoint. For now
+ * lets support only global ids and extend this in the future if needed
+ * without breaking DT compatibility.
+ */
+ if (name) {
+ idx = of_property_match_string(np, "interconnect-names", name);
+ if (idx < 0)
+ return ERR_PTR(idx);
+ }
+
+ return of_icc_get_by_index(dev, idx);
+}
EXPORT_SYMBOL_GPL(of_icc_get);
/**
@@ -546,6 +611,39 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
}
EXPORT_SYMBOL_GPL(icc_set_bw);
+static int __icc_enable(struct icc_path *path, bool enable)
+{
+ int i;
+
+ if (!path)
+ return 0;
+
+ if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ for (i = 0; i < path->num_nodes; i++)
+ path->reqs[i].enabled = enable;
+
+ mutex_unlock(&icc_lock);
+
+ return icc_set_bw(path, path->reqs[0].avg_bw,
+ path->reqs[0].peak_bw);
+}
+
+int icc_enable(struct icc_path *path)
+{
+ return __icc_enable(path, true);
+}
+EXPORT_SYMBOL_GPL(icc_enable);
+
+int icc_disable(struct icc_path *path)
+{
+ return __icc_enable(path, false);
+}
+EXPORT_SYMBOL_GPL(icc_disable);
+
/**
* icc_get() - return a handle for path between two endpoints
* @dev: the device requesting the path
@@ -908,12 +1006,7 @@ static int __init icc_init(void)
return 0;
}
-static void __exit icc_exit(void)
-{
- debugfs_remove_recursive(icc_debugfs_dir);
-}
-module_init(icc_init);
-module_exit(icc_exit);
+device_initcall(icc_init);
MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
MODULE_DESCRIPTION("Interconnect Driver Core");
diff --git a/drivers/interconnect/imx/Kconfig b/drivers/interconnect/imx/Kconfig
new file mode 100644
index 000000000000..be2928362bb7
--- /dev/null
+++ b/drivers/interconnect/imx/Kconfig
@@ -0,0 +1,17 @@
+config INTERCONNECT_IMX
+ tristate "i.MX interconnect drivers"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ Generic interconnect drivers for i.MX SOCs
+
+config INTERCONNECT_IMX8MM
+ tristate "i.MX8MM interconnect driver"
+ depends on INTERCONNECT_IMX
+
+config INTERCONNECT_IMX8MN
+ tristate "i.MX8MN interconnect driver"
+ depends on INTERCONNECT_IMX
+
+config INTERCONNECT_IMX8MQ
+ tristate "i.MX8MQ interconnect driver"
+ depends on INTERCONNECT_IMX
diff --git a/drivers/interconnect/imx/Makefile b/drivers/interconnect/imx/Makefile
new file mode 100644
index 000000000000..21fd5233754f
--- /dev/null
+++ b/drivers/interconnect/imx/Makefile
@@ -0,0 +1,9 @@
+imx-interconnect-objs := imx.o
+imx8mm-interconnect-objs := imx8mm.o
+imx8mq-interconnect-objs := imx8mq.o
+imx8mn-interconnect-objs := imx8mn.o
+
+obj-$(CONFIG_INTERCONNECT_IMX) += imx-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MM) += imx8mm-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MQ) += imx8mq-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MN) += imx8mn-interconnect.o
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
new file mode 100644
index 000000000000..ac420f86008e
--- /dev/null
+++ b/drivers/interconnect/imx/imx.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ * Author: Leonard Crestez <leonard.crestez@nxp.com>
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+
+#include "imx.h"
+
+/* private icc_node data */
+struct imx_icc_node {
+ const struct imx_icc_node_desc *desc;
+ struct device *qos_dev;
+ struct dev_pm_qos_request qos_req;
+};
+
+static int imx_icc_node_set(struct icc_node *node)
+{
+ struct device *dev = node->provider->dev;
+ struct imx_icc_node *node_data = node->data;
+ u64 freq;
+
+ if (!node_data->qos_dev)
+ return 0;
+
+ freq = (node->avg_bw + node->peak_bw) * node_data->desc->adj->bw_mul;
+ do_div(freq, node_data->desc->adj->bw_div);
+ dev_dbg(dev, "node %s device %s avg_bw %ukBps peak_bw %ukBps min_freq %llukHz\n",
+ node->name, dev_name(node_data->qos_dev),
+ node->avg_bw, node->peak_bw, freq);
+
+ if (freq > S32_MAX) {
+ dev_err(dev, "%s can't request more than S32_MAX freq\n",
+ node->name);
+ return -ERANGE;
+ }
+
+ dev_pm_qos_update_request(&node_data->qos_req, freq);
+
+ return 0;
+}
+
+static int imx_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ return imx_icc_node_set(dst);
+}
+
+/* imx_icc_node_destroy() - Destroy an imx icc_node, including private data */
+static void imx_icc_node_destroy(struct icc_node *node)
+{
+ struct imx_icc_node *node_data = node->data;
+ int ret;
+
+ if (dev_pm_qos_request_active(&node_data->qos_req)) {
+ ret = dev_pm_qos_remove_request(&node_data->qos_req);
+ if (ret)
+ dev_warn(node->provider->dev,
+ "failed to remove qos request for %s\n",
+ dev_name(node_data->qos_dev));
+ }
+
+ put_device(node_data->qos_dev);
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+}
+
+static int imx_icc_node_init_qos(struct icc_provider *provider,
+ struct icc_node *node)
+{
+ struct imx_icc_node *node_data = node->data;
+ const struct imx_icc_node_adj_desc *adj = node_data->desc->adj;
+ struct device *dev = provider->dev;
+ struct device_node *dn = NULL;
+ struct platform_device *pdev;
+
+ if (adj->main_noc) {
+ node_data->qos_dev = dev;
+ dev_dbg(dev, "icc node %s[%d] is main noc itself\n",
+ node->name, node->id);
+ } else {
+ dn = of_parse_phandle(dev->of_node, adj->phandle_name, 0);
+ if (!dn) {
+ dev_warn(dev, "Failed to parse %s\n",
+ adj->phandle_name);
+ return -ENODEV;
+ }
+ /* Allow scaling to be disabled on a per-node basis */
+ if (!dn || !of_device_is_available(dn)) {
+ dev_warn(dev, "Missing property %s, skip scaling %s\n",
+ adj->phandle_name, node->name);
+ return 0;
+ }
+
+ pdev = of_find_device_by_node(dn);
+ of_node_put(dn);
+ if (!pdev) {
+ dev_warn(dev, "node %s[%d] missing device for %pOF\n",
+ node->name, node->id, dn);
+ return -EPROBE_DEFER;
+ }
+ node_data->qos_dev = &pdev->dev;
+ dev_dbg(dev, "node %s[%d] has device node %pOF\n",
+ node->name, node->id, dn);
+ }
+
+ return dev_pm_qos_add_request(node_data->qos_dev,
+ &node_data->qos_req,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+}
+
+static struct icc_node *imx_icc_node_add(struct icc_provider *provider,
+ const struct imx_icc_node_desc *node_desc)
+{
+ struct device *dev = provider->dev;
+ struct imx_icc_node *node_data;
+ struct icc_node *node;
+ int ret;
+
+ node = icc_node_create(node_desc->id);
+ if (IS_ERR(node)) {
+ dev_err(dev, "failed to create node %d\n", node_desc->id);
+ return node;
+ }
+
+ if (node->data) {
+ dev_err(dev, "already created node %s id=%d\n",
+ node_desc->name, node_desc->id);
+ return ERR_PTR(-EEXIST);
+ }
+
+ node_data = devm_kzalloc(dev, sizeof(*node_data), GFP_KERNEL);
+ if (!node_data) {
+ icc_node_destroy(node->id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ node->name = node_desc->name;
+ node->data = node_data;
+ node_data->desc = node_desc;
+ icc_node_add(node, provider);
+
+ if (node_desc->adj) {
+ ret = imx_icc_node_init_qos(provider, node);
+ if (ret < 0) {
+ imx_icc_node_destroy(node);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return node;
+}
+
+static void imx_icc_unregister_nodes(struct icc_provider *provider)
+{
+ struct icc_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &provider->nodes, node_list)
+ imx_icc_node_destroy(node);
+}
+
+static int imx_icc_register_nodes(struct icc_provider *provider,
+ const struct imx_icc_node_desc *descs,
+ int count)
+{
+ struct icc_onecell_data *provider_data = provider->data;
+ int ret;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct icc_node *node;
+ const struct imx_icc_node_desc *node_desc = &descs[i];
+ size_t j;
+
+ node = imx_icc_node_add(provider, node_desc);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ if (ret != -EPROBE_DEFER)
+ dev_err(provider->dev, "failed to add %s: %d\n",
+ node_desc->name, ret);
+ goto err;
+ }
+ provider_data->nodes[node->id] = node;
+
+ for (j = 0; j < node_desc->num_links; j++) {
+ ret = icc_link_create(node, node_desc->links[j]);
+ if (ret) {
+ dev_err(provider->dev, "failed to link node %d to %d: %d\n",
+ node->id, node_desc->links[j], ret);
+ goto err;
+ }
+ }
+ }
+
+ return 0;
+
+err:
+ imx_icc_unregister_nodes(provider);
+
+ return ret;
+}
+
+static int get_max_node_id(struct imx_icc_node_desc *nodes, int nodes_count)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < nodes_count; ++i)
+ if (nodes[i].id > ret)
+ ret = nodes[i].id;
+
+ return ret;
+}
+
+int imx_icc_register(struct platform_device *pdev,
+ struct imx_icc_node_desc *nodes, int nodes_count)
+{
+ struct device *dev = &pdev->dev;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ int max_node_id;
+ int ret;
+
+ /* icc_onecell_data is indexed by node_id, unlike nodes param */
+ max_node_id = get_max_node_id(nodes, nodes_count);
+ data = devm_kzalloc(dev, struct_size(data, nodes, max_node_id),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->num_nodes = max_node_id;
+
+ provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
+ if (!provider)
+ return -ENOMEM;
+ provider->set = imx_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+ provider->dev = dev->parent;
+ platform_set_drvdata(pdev, provider);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ return ret;
+ }
+
+ ret = imx_icc_register_nodes(provider, nodes, nodes_count);
+ if (ret)
+ goto provider_del;
+
+ return 0;
+
+provider_del:
+ icc_provider_del(provider);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_icc_register);
+
+int imx_icc_unregister(struct platform_device *pdev)
+{
+ struct icc_provider *provider = platform_get_drvdata(pdev);
+ int ret;
+
+ imx_icc_unregister_nodes(provider);
+
+ ret = icc_provider_del(provider);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_icc_unregister);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/imx/imx.h b/drivers/interconnect/imx/imx.h
new file mode 100644
index 000000000000..75da51076c68
--- /dev/null
+++ b/drivers/interconnect/imx/imx.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ * Author: Leonard Crestez <leonard.crestez@nxp.com>
+ */
+#ifndef __DRIVERS_INTERCONNECT_IMX_H
+#define __DRIVERS_INTERCONNECT_IMX_H
+
+#include <linux/kernel.h>
+
+#define IMX_ICC_MAX_LINKS 4
+
+/*
+ * struct imx_icc_node_adj - Describe a dynamic adjustable node
+ */
+struct imx_icc_node_adj_desc {
+ unsigned int bw_mul, bw_div;
+ const char *phandle_name;
+ bool main_noc;
+};
+
+/*
+ * struct imx_icc_node - Describe an interconnect node
+ * @name: name of the node
+ * @id: an unique id to identify the node
+ * @links: an array of slaves' node id
+ * @num_links: number of id defined in links
+ */
+struct imx_icc_node_desc {
+ const char *name;
+ u16 id;
+ u16 links[IMX_ICC_MAX_LINKS];
+ u16 num_links;
+ const struct imx_icc_node_adj_desc *adj;
+};
+
+#define DEFINE_BUS_INTERCONNECT(_name, _id, _adj, ...) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .adj = _adj, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+#define DEFINE_BUS_MASTER(_name, _id, _dest_id) \
+ DEFINE_BUS_INTERCONNECT(_name, _id, NULL, _dest_id)
+
+#define DEFINE_BUS_SLAVE(_name, _id, _adj) \
+ DEFINE_BUS_INTERCONNECT(_name, _id, _adj)
+
+int imx_icc_register(struct platform_device *pdev,
+ struct imx_icc_node_desc *nodes,
+ int nodes_count);
+int imx_icc_unregister(struct platform_device *pdev);
+
+#endif /* __DRIVERS_INTERCONNECT_IMX_H */
diff --git a/drivers/interconnect/imx/imx8mm.c b/drivers/interconnect/imx/imx8mm.c
new file mode 100644
index 000000000000..1083490bb391
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mm.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MM SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ * Author: Leonard Crestez <leonard.crestez@nxp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/imx8mm.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mm_dram_adj = {
+ .bw_mul = 1,
+ .bw_div = 16,
+ .phandle_name = "fsl,ddrc",
+};
+
+static const struct imx_icc_node_adj_desc imx8mm_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 16,
+ .main_noc = true,
+};
+
+/*
+ * Describe bus masters, slaves and connections between them
+ *
+ * This is a simplified subset of the bus diagram, there are several other
+ * PL301 nics which are skipped/merged into PL301_MAIN
+ */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MM_ICN_NOC, &imx8mm_noc_adj,
+ IMX8MM_ICS_DRAM, IMX8MM_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("DRAM", IMX8MM_ICS_DRAM, &imx8mm_dram_adj),
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MM_ICS_OCRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MM_ICM_A53, IMX8MM_ICN_NOC),
+
+ /* VPUMIX */
+ DEFINE_BUS_MASTER("VPU H1", IMX8MM_ICM_VPU_H1, IMX8MM_ICN_VIDEO),
+ DEFINE_BUS_MASTER("VPU G1", IMX8MM_ICM_VPU_G1, IMX8MM_ICN_VIDEO),
+ DEFINE_BUS_MASTER("VPU G2", IMX8MM_ICM_VPU_G2, IMX8MM_ICN_VIDEO),
+ DEFINE_BUS_INTERCONNECT("PL301_VIDEO", IMX8MM_ICN_VIDEO, NULL, IMX8MM_ICN_NOC),
+
+ /* GPUMIX */
+ DEFINE_BUS_MASTER("GPU 2D", IMX8MM_ICM_GPU2D, IMX8MM_ICN_GPU),
+ DEFINE_BUS_MASTER("GPU 3D", IMX8MM_ICM_GPU3D, IMX8MM_ICN_GPU),
+ DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MM_ICN_GPU, NULL, IMX8MM_ICN_NOC),
+
+ /* DISPLAYMIX */
+ DEFINE_BUS_MASTER("CSI", IMX8MM_ICM_CSI, IMX8MM_ICN_MIPI),
+ DEFINE_BUS_MASTER("LCDIF", IMX8MM_ICM_LCDIF, IMX8MM_ICN_MIPI),
+ DEFINE_BUS_INTERCONNECT("PL301_MIPI", IMX8MM_ICN_MIPI, NULL, IMX8MM_ICN_NOC),
+
+ /* HSIO */
+ DEFINE_BUS_MASTER("USB1", IMX8MM_ICM_USB1, IMX8MM_ICN_HSIO),
+ DEFINE_BUS_MASTER("USB2", IMX8MM_ICM_USB2, IMX8MM_ICN_HSIO),
+ DEFINE_BUS_MASTER("PCIE", IMX8MM_ICM_PCIE, IMX8MM_ICN_HSIO),
+ DEFINE_BUS_INTERCONNECT("PL301_HSIO", IMX8MM_ICN_HSIO, NULL, IMX8MM_ICN_NOC),
+
+ /* Audio */
+ DEFINE_BUS_MASTER("SDMA2", IMX8MM_ICM_SDMA2, IMX8MM_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA3", IMX8MM_ICM_SDMA3, IMX8MM_ICN_AUDIO),
+ DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MM_ICN_AUDIO, NULL, IMX8MM_ICN_MAIN),
+
+ /* Ethernet */
+ DEFINE_BUS_MASTER("ENET", IMX8MM_ICM_ENET, IMX8MM_ICN_ENET),
+ DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MM_ICN_ENET, NULL, IMX8MM_ICN_MAIN),
+
+ /* Other */
+ DEFINE_BUS_MASTER("SDMA1", IMX8MM_ICM_SDMA1, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("NAND", IMX8MM_ICM_NAND, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC1", IMX8MM_ICM_USDHC1, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC2", IMX8MM_ICM_USDHC2, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC3", IMX8MM_ICM_USDHC3, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MM_ICN_MAIN, NULL,
+ IMX8MM_ICN_NOC, IMX8MM_ICS_OCRAM),
+};
+
+static int imx8mm_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+}
+
+static int imx8mm_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mm_icc_driver = {
+ .probe = imx8mm_icc_probe,
+ .remove = imx8mm_icc_remove,
+ .driver = {
+ .name = "imx8mm-interconnect",
+ },
+};
+
+module_platform_driver(imx8mm_icc_driver);
+MODULE_AUTHOR("Alexandre Bailon <abailon@baylibre.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx8mm-interconnect");
diff --git a/drivers/interconnect/imx/imx8mn.c b/drivers/interconnect/imx/imx8mn.c
new file mode 100644
index 000000000000..ad97e55fd4e5
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mn.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MN SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/imx8mn.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mn_dram_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .phandle_name = "fsl,ddrc",
+};
+
+static const struct imx_icc_node_adj_desc imx8mn_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .main_noc = true,
+};
+
+/*
+ * Describe bus masters, slaves and connections between them
+ *
+ * This is a simplified subset of the bus diagram, there are several other
+ * PL301 nics which are skipped/merged into PL301_MAIN
+ */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MN_ICN_NOC, &imx8mn_noc_adj,
+ IMX8MN_ICS_DRAM, IMX8MN_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("DRAM", IMX8MN_ICS_DRAM, &imx8mn_dram_adj),
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MN_ICS_OCRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MN_ICM_A53, IMX8MN_ICN_NOC),
+
+ /* GPUMIX */
+ DEFINE_BUS_MASTER("GPU", IMX8MN_ICM_GPU, IMX8MN_ICN_GPU),
+ DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MN_ICN_GPU, NULL, IMX8MN_ICN_NOC),
+
+ /* DISPLAYMIX */
+ DEFINE_BUS_MASTER("CSI1", IMX8MN_ICM_CSI1, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_MASTER("CSI2", IMX8MN_ICM_CSI2, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_MASTER("ISI", IMX8MN_ICM_ISI, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_MASTER("LCDIF", IMX8MN_ICM_LCDIF, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_INTERCONNECT("PL301_MIPI", IMX8MN_ICN_MIPI, NULL, IMX8MN_ICN_NOC),
+
+ /* USB goes straight to NOC */
+ DEFINE_BUS_MASTER("USB", IMX8MN_ICM_USB, IMX8MN_ICN_NOC),
+
+ /* Audio */
+ DEFINE_BUS_MASTER("SDMA2", IMX8MN_ICM_SDMA2, IMX8MN_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA3", IMX8MN_ICM_SDMA3, IMX8MN_ICN_AUDIO),
+ DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MN_ICN_AUDIO, NULL, IMX8MN_ICN_MAIN),
+
+ /* Ethernet */
+ DEFINE_BUS_MASTER("ENET", IMX8MN_ICM_ENET, IMX8MN_ICN_ENET),
+ DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MN_ICN_ENET, NULL, IMX8MN_ICN_MAIN),
+
+ /* Other */
+ DEFINE_BUS_MASTER("SDMA1", IMX8MN_ICM_SDMA1, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("NAND", IMX8MN_ICM_NAND, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC1", IMX8MN_ICM_USDHC1, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC2", IMX8MN_ICM_USDHC2, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC3", IMX8MN_ICM_USDHC3, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MN_ICN_MAIN, NULL,
+ IMX8MN_ICN_NOC, IMX8MN_ICS_OCRAM),
+};
+
+static int imx8mn_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+}
+
+static int imx8mn_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mn_icc_driver = {
+ .probe = imx8mn_icc_probe,
+ .remove = imx8mn_icc_remove,
+ .driver = {
+ .name = "imx8mn-interconnect",
+ },
+};
+
+module_platform_driver(imx8mn_icc_driver);
+MODULE_ALIAS("platform:imx8mn-interconnect");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
new file mode 100644
index 000000000000..ba43a15aefec
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MQ SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/imx8mq.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mq_dram_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .phandle_name = "fsl,ddrc",
+};
+
+static const struct imx_icc_node_adj_desc imx8mq_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .main_noc = true,
+};
+
+/*
+ * Describe bus masters, slaves and connections between them
+ *
+ * This is a simplified subset of the bus diagram, there are several other
+ * PL301 nics which are skipped/merged into PL301_MAIN
+ */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MQ_ICN_NOC, &imx8mq_noc_adj,
+ IMX8MQ_ICS_DRAM, IMX8MQ_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("DRAM", IMX8MQ_ICS_DRAM, &imx8mq_dram_adj),
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MQ_ICS_OCRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MQ_ICM_A53, IMX8MQ_ICN_NOC),
+
+ /* VPUMIX */
+ DEFINE_BUS_MASTER("VPU", IMX8MQ_ICM_VPU, IMX8MQ_ICN_VIDEO),
+ DEFINE_BUS_INTERCONNECT("PL301_VIDEO", IMX8MQ_ICN_VIDEO, NULL, IMX8MQ_ICN_NOC),
+
+ /* GPUMIX */
+ DEFINE_BUS_MASTER("GPU", IMX8MQ_ICM_GPU, IMX8MQ_ICN_GPU),
+ DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MQ_ICN_GPU, NULL, IMX8MQ_ICN_NOC),
+
+ /* DISPMIX (only for DCSS) */
+ DEFINE_BUS_MASTER("DC", IMX8MQ_ICM_DCSS, IMX8MQ_ICN_DCSS),
+ DEFINE_BUS_INTERCONNECT("PL301_DC", IMX8MQ_ICN_DCSS, NULL, IMX8MQ_ICN_NOC),
+
+ /* USBMIX */
+ DEFINE_BUS_MASTER("USB1", IMX8MQ_ICM_USB1, IMX8MQ_ICN_USB),
+ DEFINE_BUS_MASTER("USB2", IMX8MQ_ICM_USB2, IMX8MQ_ICN_USB),
+ DEFINE_BUS_INTERCONNECT("PL301_USB", IMX8MQ_ICN_USB, NULL, IMX8MQ_ICN_NOC),
+
+ /* PL301_DISPLAY (IPs other than DCSS, inside SUPERMIX) */
+ DEFINE_BUS_MASTER("CSI1", IMX8MQ_ICM_CSI1, IMX8MQ_ICN_DISPLAY),
+ DEFINE_BUS_MASTER("CSI2", IMX8MQ_ICM_CSI2, IMX8MQ_ICN_DISPLAY),
+ DEFINE_BUS_MASTER("LCDIF", IMX8MQ_ICM_LCDIF, IMX8MQ_ICN_DISPLAY),
+ DEFINE_BUS_INTERCONNECT("PL301_DISPLAY", IMX8MQ_ICN_DISPLAY, NULL, IMX8MQ_ICN_MAIN),
+
+ /* AUDIO */
+ DEFINE_BUS_MASTER("SDMA2", IMX8MQ_ICM_SDMA2, IMX8MQ_ICN_AUDIO),
+ DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MQ_ICN_AUDIO, NULL, IMX8MQ_ICN_DISPLAY),
+
+ /* ENET */
+ DEFINE_BUS_MASTER("ENET", IMX8MQ_ICM_ENET, IMX8MQ_ICN_ENET),
+ DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MQ_ICN_ENET, NULL, IMX8MQ_ICN_MAIN),
+
+ /* OTHER */
+ DEFINE_BUS_MASTER("SDMA1", IMX8MQ_ICM_SDMA1, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("NAND", IMX8MQ_ICM_NAND, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC1", IMX8MQ_ICM_USDHC1, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC2", IMX8MQ_ICM_USDHC2, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("PCIE1", IMX8MQ_ICM_PCIE1, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("PCIE2", IMX8MQ_ICM_PCIE2, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MQ_ICN_MAIN, NULL,
+ IMX8MQ_ICN_NOC, IMX8MQ_ICS_OCRAM),
+};
+
+static int imx8mq_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+}
+
+static int imx8mq_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mq_icc_driver = {
+ .probe = imx8mq_icc_probe,
+ .remove = imx8mq_icc_remove,
+ .driver = {
+ .name = "imx8mq-interconnect",
+ },
+};
+
+module_platform_driver(imx8mq_icc_driver);
+MODULE_ALIAS("platform:imx8mq-interconnect");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/internal.h b/drivers/interconnect/internal.h
index bf18cb7239df..f5f82a5c939e 100644
--- a/drivers/interconnect/internal.h
+++ b/drivers/interconnect/internal.h
@@ -14,6 +14,7 @@
* @req_node: entry in list of requests for the particular @node
* @node: the interconnect node to which this constraint applies
* @dev: reference to the device that sets the constraints
+ * @enabled: indicates whether the path with this request is enabled
* @tag: path tag (optional)
* @avg_bw: an integer describing the average bandwidth in kBps
* @peak_bw: an integer describing the peak bandwidth in kBps
@@ -22,6 +23,7 @@ struct icc_req {
struct hlist_node req_node;
struct icc_node *node;
struct device *dev;
+ bool enabled;
u32 tag;
u32 avg_bw;
u32 peak_bw;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 1dc3718560d0..2883ac389abb 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -127,7 +127,8 @@ static inline int get_acpihid_device_id(struct device *dev,
return -ENODEV;
list_for_each_entry(p, &acpihid_map, list) {
- if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) {
+ if (acpi_dev_hid_uid_match(adev, p->hid,
+ p->uid[0] ? p->uid : NULL)) {
if (entry)
*entry = p;
return p->devid;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 2b9a67ecc6ac..5b81fd16f5fa 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1329,8 +1329,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
}
case IVHD_DEV_ACPI_HID: {
u16 devid;
- u8 hid[ACPIHID_HID_LEN] = {0};
- u8 uid[ACPIHID_UID_LEN] = {0};
+ u8 hid[ACPIHID_HID_LEN];
+ u8 uid[ACPIHID_UID_LEN];
int ret;
if (h->type != 0x40) {
@@ -1347,6 +1347,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
+ uid[0] = '\0';
switch (e->uidf) {
case UID_NOT_PRESENT:
@@ -1361,8 +1362,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case UID_IS_CHARACTER:
- memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
- uid[ACPIHID_UID_LEN - 1] = '\0';
+ memcpy(uid, &e->uid, e->uidl);
+ uid[e->uidl] = '\0';
break;
default:
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ba128d1cdaee..4959f5df21bd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -952,7 +952,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
/* Non-coherent atomic allocation? Easy */
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_free_from_pool(cpu_addr, alloc_size))
+ dma_free_from_pool(dev, cpu_addr, alloc_size))
return;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
@@ -1035,7 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
- cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
+ gfp);
else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
if (!cpu_addr)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 7b375421afba..03d6a26687bc 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -510,7 +510,7 @@ struct iommu_group *iommu_group_alloc(void)
NULL, "%d", group->id);
if (ret) {
ida_simple_remove(&iommu_group_ida, group->id);
- kfree(group);
+ kobject_put(&group->kobj);
return ERR_PTR(ret);
}
@@ -693,6 +693,15 @@ out:
return ret;
}
+static bool iommu_is_attach_deferred(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (domain->ops->is_attach_deferred)
+ return domain->ops->is_attach_deferred(domain, dev);
+
+ return false;
+}
+
/**
* iommu_group_add_device - add a device to an iommu group
* @group: the group into which to add the device (reference should be held)
@@ -747,7 +756,7 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
- if (group->domain)
+ if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
if (ret)
@@ -1653,9 +1662,6 @@ static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
int ret;
- if ((domain->ops->is_attach_deferred != NULL) &&
- domain->ops->is_attach_deferred(domain, dev))
- return 0;
if (unlikely(domain->ops->attach_dev == NULL))
return -ENODEV;
@@ -1727,8 +1733,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- if ((domain->ops->is_attach_deferred != NULL) &&
- domain->ops->is_attach_deferred(domain, dev))
+ if (iommu_is_attach_deferred(domain, dev))
return;
if (unlikely(domain->ops->detach_dev == NULL))
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index 23445ebfda5c..ec71063fff76 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -306,6 +306,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to map driver user space!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
+ res = -ENOMEM;
goto out_release_mem8_space;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index a85aada04a64..66b9a68f5e9f 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -532,4 +532,31 @@ config LOONGSON_HTPIC
help
Support for the Loongson-3 HyperTransport PIC Controller.
+config LOONGSON_HTVEC
+ bool "Loongson3 HyperTransport Interrupt Vector Controller"
+ depends on MACH_LOONGSON64
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for the Loongson3 HyperTransport Interrupt Vector Controller.
+
+config LOONGSON_PCH_PIC
+ bool "Loongson PCH PIC Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
+ help
+ Support for the Loongson PCH PIC Controller.
+
+config LOONGSON_PCH_MSI
+ bool "Loongson PCH PIC Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on PCI
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ select PCI_MSI
+ help
+ Support for the Loongson PCH MSI Controller.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 37bbe39bf909..3a4ce283189a 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -107,3 +107,6 @@ obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
+obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o
+obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o
+obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 124251b0ccba..cd685f521c77 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -174,6 +174,13 @@ static struct {
int next_victim;
} vpe_proxy;
+struct cpu_lpi_count {
+ atomic_t managed;
+ atomic_t unmanaged;
+};
+
+static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
+
static LIST_HEAD(its_nodes);
static DEFINE_RAW_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
@@ -1510,42 +1517,159 @@ static void its_unmask_irq(struct irq_data *d)
lpi_update_config(d, 0, LPI_PROP_ENABLED);
}
+static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+
+ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static void its_inc_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+ else
+ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static void its_dec_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+ else
+ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
+ const struct cpumask *cpu_mask)
+{
+ unsigned int cpu = nr_cpu_ids, tmp;
+ int count = S32_MAX;
+
+ for_each_cpu(tmp, cpu_mask) {
+ int this_count = its_read_lpi_count(d, tmp);
+ if (this_count < count) {
+ cpu = tmp;
+ count = this_count;
+ }
+ }
+
+ return cpu;
+}
+
+/*
+ * As suggested by Thomas Gleixner in:
+ * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
+ */
+static int its_select_cpu(struct irq_data *d,
+ const struct cpumask *aff_mask)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ cpumask_var_t tmpmask;
+ int cpu, node;
+
+ if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
+ return -ENOMEM;
+
+ node = its_dev->its->numa_node;
+
+ if (!irqd_affinity_is_managed(d)) {
+ /* First try the NUMA node */
+ if (node != NUMA_NO_NODE) {
+ /*
+ * Try the intersection of the affinity mask and the
+ * node mask (and the online mask, just to be safe).
+ */
+ cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
+ cpumask_and(tmpmask, tmpmask, cpu_online_mask);
+
+ /*
+ * Ideally, we would check if the mask is empty, and
+ * try again on the full node here.
+ *
+ * But it turns out that the way ACPI describes the
+ * affinity for ITSs only deals about memory, and
+ * not target CPUs, so it cannot describe a single
+ * ITS placed next to two NUMA nodes.
+ *
+ * Instead, just fallback on the online mask. This
+ * diverges from Thomas' suggestion above.
+ */
+ cpu = cpumask_pick_least_loaded(d, tmpmask);
+ if (cpu < nr_cpu_ids)
+ goto out;
+
+ /* If we can't cross sockets, give up */
+ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
+ goto out;
+
+ /* If the above failed, expand the search */
+ }
+
+ /* Try the intersection of the affinity and online masks */
+ cpumask_and(tmpmask, aff_mask, cpu_online_mask);
+
+ /* If that doesn't fly, the online mask is the last resort */
+ if (cpumask_empty(tmpmask))
+ cpumask_copy(tmpmask, cpu_online_mask);
+
+ cpu = cpumask_pick_least_loaded(d, tmpmask);
+ } else {
+ cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask);
+
+ /* If we cannot cross sockets, limit the search to that node */
+ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
+ node != NUMA_NO_NODE)
+ cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
+
+ cpu = cpumask_pick_least_loaded(d, tmpmask);
+ }
+out:
+ free_cpumask_var(tmpmask);
+
+ pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
+ return cpu;
+}
+
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu;
- const struct cpumask *cpu_mask = cpu_online_mask;
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_collection *target_col;
u32 id = its_get_event_id(d);
+ int cpu, prev_cpu;
/* A forwarded interrupt should use irq_set_vcpu_affinity */
if (irqd_is_forwarded_to_vcpu(d))
return -EINVAL;
- /* lpi cannot be routed to a redistributor that is on a foreign node */
- if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
- if (its_dev->its->numa_node >= 0) {
- cpu_mask = cpumask_of_node(its_dev->its->numa_node);
- if (!cpumask_intersects(mask_val, cpu_mask))
- return -EINVAL;
- }
- }
+ prev_cpu = its_dev->event_map.col_map[id];
+ its_dec_lpi_count(d, prev_cpu);
- cpu = cpumask_any_and(mask_val, cpu_mask);
+ if (!force)
+ cpu = its_select_cpu(d, mask_val);
+ else
+ cpu = cpumask_pick_least_loaded(d, mask_val);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
+ if (cpu < 0 || cpu >= nr_cpu_ids)
+ goto err;
/* don't set the affinity when the target cpu is same as current one */
- if (cpu != its_dev->event_map.col_map[id]) {
+ if (cpu != prev_cpu) {
target_col = &its_dev->its->collections[cpu];
its_send_movi(its_dev, target_col, id);
its_dev->event_map.col_map[id] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));
}
+ its_inc_lpi_count(d, cpu);
+
return IRQ_SET_MASK_OK_DONE;
+
+err:
+ its_inc_lpi_count(d, prev_cpu);
+ return -EINVAL;
}
static u64 its_irq_get_msi_base(struct its_device *its_dev)
@@ -3432,22 +3556,13 @@ static int its_irq_domain_activate(struct irq_domain *domain,
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- const struct cpumask *cpu_mask = cpu_online_mask;
int cpu;
- /* get the cpu_mask of local node */
- if (its_dev->its->numa_node >= 0)
- cpu_mask = cpumask_of_node(its_dev->its->numa_node);
-
- /* Bind the LPI to the first possible CPU */
- cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
- if (cpu >= nr_cpu_ids) {
- if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
- return -EINVAL;
-
- cpu = cpumask_first(cpu_online_mask);
- }
+ cpu = its_select_cpu(d, cpu_online_mask);
+ if (cpu < 0 || cpu >= nr_cpu_ids)
+ return -EINVAL;
+ its_inc_lpi_count(d, cpu);
its_dev->event_map.col_map[event] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -3462,6 +3577,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain,
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
/* Stop the delivery of interrupts */
its_send_discard(its_dev, event);
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d7006ef18a0d..cc46bc2d634b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1150,7 +1150,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
isb();
}
-static void gic_smp_init(void)
+static void __init gic_smp_init(void)
{
set_smp_cross_call(gic_raise_softirq);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
@@ -1282,7 +1282,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
break;
case SPI_RANGE:
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 30ab623343d3..00de05abd3c3 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -982,7 +982,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
} else {
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
new file mode 100644
index 000000000000..1ece9337c78d
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson HyperTransport Interrupt Vector support
+ */
+
+#define pr_fmt(fmt) "htvec: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Registers */
+#define HTVEC_EN_OFF 0x20
+#define HTVEC_MAX_PARENT_IRQ 4
+
+#define VEC_COUNT_PER_REG 32
+#define VEC_REG_COUNT 4
+#define VEC_COUNT (VEC_COUNT_PER_REG * VEC_REG_COUNT)
+#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
+#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
+
+struct htvec {
+ void __iomem *base;
+ struct irq_domain *htvec_domain;
+ raw_spinlock_t htvec_lock;
+};
+
+static void htvec_irq_dispatch(struct irq_desc *desc)
+{
+ int i;
+ u32 pending;
+ bool handled = false;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct htvec *priv = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < VEC_REG_COUNT; i++) {
+ pending = readl(priv->base + 4 * i);
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_irq(irq_linear_revmap(priv->htvec_domain, bit +
+ VEC_COUNT_PER_REG * i));
+ pending &= ~BIT(bit);
+ handled = true;
+ }
+ }
+
+ if (!handled)
+ spurious_interrupt();
+
+ chained_irq_exit(chip, desc);
+}
+
+static void htvec_ack_irq(struct irq_data *d)
+{
+ struct htvec *priv = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(VEC_REG_BIT(d->hwirq)),
+ priv->base + VEC_REG_IDX(d->hwirq) * 4);
+}
+
+static void htvec_mask_irq(struct irq_data *d)
+{
+ u32 reg;
+ void __iomem *addr;
+ struct htvec *priv = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&priv->htvec_lock);
+ addr = priv->base + HTVEC_EN_OFF;
+ addr += VEC_REG_IDX(d->hwirq) * 4;
+ reg = readl(addr);
+ reg &= ~BIT(VEC_REG_BIT(d->hwirq));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->htvec_lock);
+}
+
+static void htvec_unmask_irq(struct irq_data *d)
+{
+ u32 reg;
+ void __iomem *addr;
+ struct htvec *priv = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&priv->htvec_lock);
+ addr = priv->base + HTVEC_EN_OFF;
+ addr += VEC_REG_IDX(d->hwirq) * 4;
+ reg = readl(addr);
+ reg |= BIT(VEC_REG_BIT(d->hwirq));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->htvec_lock);
+}
+
+static struct irq_chip htvec_irq_chip = {
+ .name = "LOONGSON_HTVEC",
+ .irq_mask = htvec_mask_irq,
+ .irq_unmask = htvec_unmask_irq,
+ .irq_ack = htvec_ack_irq,
+};
+
+static int htvec_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ unsigned long hwirq;
+ unsigned int type, i;
+ struct htvec *priv = domain->host_data;
+
+ irq_domain_translate_onecell(domain, arg, &hwirq, &type);
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i, &htvec_irq_chip,
+ priv, handle_edge_irq, NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void htvec_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops htvec_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = htvec_domain_alloc,
+ .free = htvec_domain_free,
+};
+
+static void htvec_reset(struct htvec *priv)
+{
+ u32 idx;
+
+ /* Clear IRQ cause registers, mask all interrupts */
+ for (idx = 0; idx < VEC_REG_COUNT; idx++) {
+ writel_relaxed(0x0, priv->base + HTVEC_EN_OFF + 4 * idx);
+ writel_relaxed(0xFFFFFFFF, priv->base);
+ }
+}
+
+static int htvec_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct htvec *priv;
+ int err, parent_irq[4], num_parents = 0, i;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&priv->htvec_lock);
+ priv->base = of_iomap(node, 0);
+ if (!priv->base) {
+ err = -ENOMEM;
+ goto free_priv;
+ }
+
+ /* Interrupt may come from any of the 4 interrupt line */
+ for (i = 0; i < HTVEC_MAX_PARENT_IRQ; i++) {
+ parent_irq[i] = irq_of_parse_and_map(node, i);
+ if (parent_irq[i] <= 0)
+ break;
+
+ num_parents++;
+ }
+
+ if (!num_parents) {
+ pr_err("Failed to get parent irqs\n");
+ err = -ENODEV;
+ goto iounmap_base;
+ }
+
+ priv->htvec_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ VEC_COUNT,
+ &htvec_domain_ops,
+ priv);
+ if (!priv->htvec_domain) {
+ pr_err("Failed to create IRQ domain\n");
+ err = -ENOMEM;
+ goto iounmap_base;
+ }
+
+ htvec_reset(priv);
+
+ for (i = 0; i < num_parents; i++)
+ irq_set_chained_handler_and_data(parent_irq[i],
+ htvec_irq_dispatch, priv);
+
+ return 0;
+
+iounmap_base:
+ iounmap(priv->base);
+free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+IRQCHIP_DECLARE(htvec, "loongson,htvec-1.0", htvec_of_init);
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
new file mode 100644
index 000000000000..50becd21008c
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson PCH MSI support
+ */
+
+#define pr_fmt(fmt) "pch-msi: " fmt
+
+#include <linux/irqchip.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+struct pch_msi_data {
+ struct mutex msi_map_lock;
+ phys_addr_t doorbell;
+ u32 irq_first; /* The vector number that MSIs starts */
+ u32 num_irqs; /* The number of vectors for MSIs */
+ unsigned long *msi_map;
+};
+
+static void pch_msi_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void pch_msi_unmask_msi_irq(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ pci_msi_unmask_irq(d);
+}
+
+static struct irq_chip pch_msi_irq_chip = {
+ .name = "PCH PCI MSI",
+ .irq_mask = pch_msi_mask_msi_irq,
+ .irq_unmask = pch_msi_unmask_msi_irq,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int pch_msi_allocate_hwirq(struct pch_msi_data *priv, int num_req)
+{
+ int first;
+
+ mutex_lock(&priv->msi_map_lock);
+
+ first = bitmap_find_free_region(priv->msi_map, priv->num_irqs,
+ get_count_order(num_req));
+ if (first < 0) {
+ mutex_unlock(&priv->msi_map_lock);
+ return -ENOSPC;
+ }
+
+ mutex_unlock(&priv->msi_map_lock);
+
+ return priv->irq_first + first;
+}
+
+static void pch_msi_free_hwirq(struct pch_msi_data *priv,
+ int hwirq, int num_req)
+{
+ int first = hwirq - priv->irq_first;
+
+ mutex_lock(&priv->msi_map_lock);
+ bitmap_release_region(priv->msi_map, first, get_count_order(num_req));
+ mutex_unlock(&priv->msi_map_lock);
+}
+
+static void pch_msi_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct pch_msi_data *priv = irq_data_get_irq_chip_data(data);
+
+ msg->address_hi = upper_32_bits(priv->doorbell);
+ msg->address_lo = lower_32_bits(priv->doorbell);
+ msg->data = data->hwirq;
+}
+
+static struct msi_domain_info pch_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ .chip = &pch_msi_irq_chip,
+};
+
+static struct irq_chip middle_irq_chip = {
+ .name = "PCH MSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = pch_msi_compose_msi_msg,
+};
+
+static int pch_msi_parent_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, int hwirq)
+{
+ struct irq_fwspec fwspec;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct pch_msi_data *priv = domain->host_data;
+ int hwirq, err, i;
+
+ hwirq = pch_msi_allocate_hwirq(priv, nr_irqs);
+ if (hwirq < 0)
+ return hwirq;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = pch_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto err_hwirq;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &middle_irq_chip, priv);
+ }
+
+ return 0;
+
+err_hwirq:
+ pch_msi_free_hwirq(priv, hwirq, nr_irqs);
+ irq_domain_free_irqs_parent(domain, virq, i - 1);
+
+ return err;
+}
+
+static void pch_msi_middle_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct pch_msi_data *priv = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ pch_msi_free_hwirq(priv, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops pch_msi_middle_domain_ops = {
+ .alloc = pch_msi_middle_domain_alloc,
+ .free = pch_msi_middle_domain_free,
+};
+
+static int pch_msi_init_domains(struct pch_msi_data *priv,
+ struct device_node *node,
+ struct irq_domain *parent)
+{
+ struct irq_domain *middle_domain, *msi_domain;
+
+ middle_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ priv->num_irqs,
+ &pch_msi_middle_domain_ops,
+ priv);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSI middle domain\n");
+ return -ENOMEM;
+ }
+
+ middle_domain->parent = parent;
+ irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
+
+ msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ &pch_msi_domain_info,
+ middle_domain);
+ if (!msi_domain) {
+ pr_err("Failed to create PCI MSI domain\n");
+ irq_domain_remove(middle_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int pch_msi_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct pch_msi_data *priv;
+ struct irq_domain *parent_domain;
+ struct resource res;
+ int ret;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Failed to find the parent domain\n");
+ return -ENXIO;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->msi_map_lock);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("Failed to allocate resource\n");
+ goto err_priv;
+ }
+
+ priv->doorbell = res.start;
+
+ if (of_property_read_u32(node, "loongson,msi-base-vec",
+ &priv->irq_first)) {
+ pr_err("Unable to parse MSI vec base\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ if (of_property_read_u32(node, "loongson,msi-num-vecs",
+ &priv->num_irqs)) {
+ pr_err("Unable to parse MSI vec number\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ priv->msi_map = bitmap_alloc(priv->num_irqs, GFP_KERNEL);
+ if (!priv->msi_map) {
+ ret = -ENOMEM;
+ goto err_priv;
+ }
+
+ pr_debug("Registering %d MSIs, starting at %d\n",
+ priv->num_irqs, priv->irq_first);
+
+ ret = pch_msi_init_domains(priv, node, parent_domain);
+ if (ret)
+ goto err_map;
+
+ return 0;
+
+err_map:
+ kfree(priv->msi_map);
+err_priv:
+ kfree(priv);
+ return ret;
+}
+
+IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_init);
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
new file mode 100644
index 000000000000..2a05b9305012
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson PCH PIC support
+ */
+
+#define pr_fmt(fmt) "pch-pic: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Registers */
+#define PCH_PIC_MASK 0x20
+#define PCH_PIC_HTMSI_EN 0x40
+#define PCH_PIC_EDGE 0x60
+#define PCH_PIC_CLR 0x80
+#define PCH_PIC_AUTO0 0xc0
+#define PCH_PIC_AUTO1 0xe0
+#define PCH_INT_ROUTE(irq) (0x100 + irq)
+#define PCH_INT_HTVEC(irq) (0x200 + irq)
+#define PCH_PIC_POL 0x3e0
+
+#define PIC_COUNT_PER_REG 32
+#define PIC_REG_COUNT 2
+#define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT)
+#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG)
+#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG)
+
+struct pch_pic {
+ void __iomem *base;
+ struct irq_domain *pic_domain;
+ u32 ht_vec_base;
+ raw_spinlock_t pic_lock;
+};
+
+static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
+{
+ u32 reg;
+ void __iomem *addr = priv->base + offset + PIC_REG_IDX(bit) * 4;
+
+ raw_spin_lock(&priv->pic_lock);
+ reg = readl(addr);
+ reg |= BIT(PIC_REG_BIT(bit));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->pic_lock);
+}
+
+static void pch_pic_bitclr(struct pch_pic *priv, int offset, int bit)
+{
+ u32 reg;
+ void __iomem *addr = priv->base + offset + PIC_REG_IDX(bit) * 4;
+
+ raw_spin_lock(&priv->pic_lock);
+ reg = readl(addr);
+ reg &= ~BIT(PIC_REG_BIT(bit));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->pic_lock);
+}
+
+static void pch_pic_eoi_irq(struct irq_data *d)
+{
+ u32 idx = PIC_REG_IDX(d->hwirq);
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(PIC_REG_BIT(d->hwirq)),
+ priv->base + PCH_PIC_CLR + idx * 4);
+}
+
+static void pch_pic_mask_irq(struct irq_data *d)
+{
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+ pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq);
+ irq_chip_mask_parent(d);
+}
+
+static void pch_pic_unmask_irq(struct irq_data *d)
+{
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+ irq_chip_unmask_parent(d);
+ pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq);
+}
+
+static int pch_pic_set_type(struct irq_data *d, unsigned int type)
+{
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int ret = 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct irq_chip pch_pic_irq_chip = {
+ .name = "PCH PIC",
+ .irq_mask = pch_pic_mask_irq,
+ .irq_unmask = pch_pic_unmask_irq,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_eoi = pch_pic_eoi_irq,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = pch_pic_set_type,
+};
+
+static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int err;
+ unsigned int type;
+ unsigned long hwirq;
+ struct irq_fwspec fwspec;
+ struct pch_pic *priv = domain->host_data;
+
+ irq_domain_translate_twocell(domain, arg, &hwirq, &type);
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + priv->ht_vec_base;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &pch_pic_irq_chip, priv,
+ handle_fasteoi_ack_irq, NULL, NULL);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops pch_pic_domain_ops = {
+ .translate = irq_domain_translate_twocell,
+ .alloc = pch_pic_alloc,
+ .free = irq_domain_free_irqs_parent,
+};
+
+static void pch_pic_reset(struct pch_pic *priv)
+{
+ int i;
+
+ for (i = 0; i < PIC_COUNT; i++) {
+ /* Write vectore ID */
+ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
+ /* Hardcode route to HT0 Lo */
+ writeb(1, priv->base + PCH_INT_ROUTE(i));
+ }
+
+ for (i = 0; i < PIC_REG_COUNT; i++) {
+ /* Clear IRQ cause registers, mask all interrupts */
+ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_MASK + 4 * i);
+ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_CLR + 4 * i);
+ /* Clear auto bounce, we don't need that */
+ writel_relaxed(0, priv->base + PCH_PIC_AUTO0 + 4 * i);
+ writel_relaxed(0, priv->base + PCH_PIC_AUTO1 + 4 * i);
+ /* Enable HTMSI transformer */
+ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_HTMSI_EN + 4 * i);
+ }
+}
+
+static int pch_pic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct pch_pic *priv;
+ struct irq_domain *parent_domain;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&priv->pic_lock);
+ priv->base = of_iomap(node, 0);
+ if (!priv->base) {
+ err = -ENOMEM;
+ goto free_priv;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Failed to find the parent domain\n");
+ err = -ENXIO;
+ goto iounmap_base;
+ }
+
+ if (of_property_read_u32(node, "loongson,pic-base-vec",
+ &priv->ht_vec_base)) {
+ pr_err("Failed to determine pic-base-vec\n");
+ err = -EINVAL;
+ goto iounmap_base;
+ }
+
+ priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0,
+ PIC_COUNT,
+ of_node_to_fwnode(node),
+ &pch_pic_domain_ops,
+ priv);
+ if (!priv->pic_domain) {
+ pr_err("Failed to create IRQ domain\n");
+ err = -ENOMEM;
+ goto iounmap_base;
+ }
+
+ pch_pic_reset(priv);
+
+ return 0;
+
+iounmap_base:
+ iounmap(priv->base);
+free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index d0a71febdadc..d9c53f85a68e 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -76,6 +76,7 @@ struct plic_handler {
void __iomem *enable_base;
struct plic_priv *priv;
};
+static bool plic_cpuhp_setup_done;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static inline void plic_toggle(struct plic_handler *handler,
@@ -176,9 +177,12 @@ static struct irq_chip plic_chip = {
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
+ struct plic_priv *priv = d->host_data;
+
irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_noprobe(irq);
+ irq_set_affinity(irq, &priv->lmask);
return 0;
}
@@ -282,6 +286,7 @@ static int __init plic_init(struct device_node *node,
int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
struct plic_priv *priv;
+ struct plic_handler *handler;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -301,8 +306,6 @@ static int __init plic_init(struct device_node *node,
nr_contexts = of_irq_count(node);
if (WARN_ON(!nr_contexts))
goto out_iounmap;
- if (WARN_ON(nr_contexts < num_possible_cpus()))
- goto out_iounmap;
error = -ENOMEM;
priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
@@ -312,7 +315,6 @@ static int __init plic_init(struct device_node *node,
for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
- struct plic_handler *handler;
irq_hw_number_t hwirq;
int cpu, hartid;
@@ -366,11 +368,20 @@ done:
nr_handlers++;
}
- cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ /*
+ * We can have multiple PLIC instances so setup cpuhp state only
+ * when context handler for current/boot CPU is present.
+ */
+ handler = this_cpu_ptr(&plic_handlers);
+ if (handler->present && !plic_cpuhp_setup_done) {
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
- pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
- nr_irqs, nr_handlers, nr_contexts);
+ plic_cpuhp_setup_done = true;
+ }
+
+ pr_info("%pOFP: mapped %d interrupts with %d handlers for"
+ " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq);
return 0;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index c664d84e1667..ed943140e1fd 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -83,6 +83,17 @@ config LEDS_APU
To compile this driver as a module, choose M here: the
module will be called leds-apu.
+config LEDS_ARIEL
+ tristate "Dell Wyse 3020 status LED support"
+ depends on LEDS_CLASS
+ depends on (MACH_MMP3_DT && MFD_ENE_KB3930) || COMPILE_TEST
+ help
+ This driver adds support for controlling the front panel status
+ LEDs on Dell Wyse 3020 (Ariel) board via the KB3930 Embedded
+ Controller.
+
+ Say Y to if your machine is a Dell Wyse 3020 thin client.
+
config LEDS_AS3645A
tristate "AS3645A and LM3555 LED flash controllers support"
depends on I2C && LEDS_CLASS_FLASH
@@ -92,6 +103,16 @@ config LEDS_AS3645A
controller. V4L2 flash API is provided as well if
CONFIG_V4L2_FLASH_API is enabled.
+config LEDS_AW2013
+ tristate "LED support for Awinic AW2013"
+ depends on LEDS_CLASS && I2C && OF
+ help
+ This option enables support for the AW2013 3-channel
+ LED driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-aw2013.
+
config LEDS_BCM6328
tristate "LED Support for Broadcom BCM6328"
depends on LEDS_CLASS
@@ -857,6 +878,14 @@ config LEDS_IP30
To compile this driver as a module, choose M here: the module
will be called leds-ip30.
+config LEDS_SGM3140
+ tristate "LED support for the SGM3140"
+ depends on LEDS_CLASS_FLASH
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+ help
+ This option enables support for the SGM3140 500mA Buck/Boost Charge
+ Pump LED Driver.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 45235d5fb218..d6b8a792c936 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -12,8 +12,10 @@ obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
obj-$(CONFIG_LEDS_APU) += leds-apu.o
+obj-$(CONFIG_LEDS_ARIEL) += leds-ariel.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
+obj-$(CONFIG_LEDS_AW2013) += leds-aw2013.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
@@ -77,6 +79,7 @@ obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
+obj-$(CONFIG_LEDS_SGM3140) += leds-sgm3140.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
diff --git a/drivers/leds/leds-ariel.c b/drivers/leds/leds-ariel.c
new file mode 100644
index 000000000000..bb68ba23a7d4
--- /dev/null
+++ b/drivers/leds/leds-ariel.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-or-later
+/*
+ * Dell Wyse 3020 a.k.a. "Ariel" Embedded Controller LED Driver
+ *
+ * Copyright (C) 2020 Lubomir Rintel
+ */
+
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <linux/regmap.h>
+#include <linux/of_platform.h>
+
+enum ec_index {
+ EC_BLUE_LED = 0x01,
+ EC_AMBER_LED = 0x02,
+ EC_GREEN_LED = 0x03,
+};
+
+enum {
+ EC_LED_OFF = 0x00,
+ EC_LED_STILL = 0x01,
+ EC_LED_FADE = 0x02,
+ EC_LED_BLINK = 0x03,
+};
+
+struct ariel_led {
+ struct regmap *ec_ram;
+ enum ec_index ec_index;
+ struct led_classdev led_cdev;
+};
+
+#define led_cdev_to_ariel_led(c) container_of(c, struct ariel_led, led_cdev)
+
+static enum led_brightness ariel_led_get(struct led_classdev *led_cdev)
+{
+ struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
+ unsigned int led_status = 0;
+
+ if (regmap_read(led->ec_ram, led->ec_index, &led_status))
+ return LED_OFF;
+
+ if (led_status == EC_LED_STILL)
+ return LED_FULL;
+ else
+ return LED_OFF;
+}
+
+static void ariel_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
+
+ if (brightness == LED_OFF)
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_OFF);
+ else
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_STILL);
+}
+
+static int ariel_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
+
+ if (*delay_on == 0 && *delay_off == 0)
+ return -EINVAL;
+
+ if (*delay_on == 0) {
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_OFF);
+ } else if (*delay_off == 0) {
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_STILL);
+ } else {
+ *delay_on = 500;
+ *delay_off = 500;
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_BLINK);
+ }
+
+ return 0;
+}
+
+#define NLEDS 3
+
+static int ariel_led_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ariel_led *leds;
+ struct regmap *ec_ram;
+ int ret;
+ int i;
+
+ ec_ram = dev_get_regmap(dev->parent, "ec_ram");
+ if (!ec_ram)
+ return -ENODEV;
+
+ leds = devm_kcalloc(dev, NLEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ leds[0].ec_index = EC_BLUE_LED;
+ leds[0].led_cdev.name = "blue:power",
+ leds[0].led_cdev.default_trigger = "default-on";
+
+ leds[1].ec_index = EC_AMBER_LED;
+ leds[1].led_cdev.name = "amber:status",
+
+ leds[2].ec_index = EC_GREEN_LED;
+ leds[2].led_cdev.name = "green:status",
+ leds[2].led_cdev.default_trigger = "default-on";
+
+ for (i = 0; i < NLEDS; i++) {
+ leds[i].ec_ram = ec_ram;
+ leds[i].led_cdev.brightness_get = ariel_led_get;
+ leds[i].led_cdev.brightness_set = ariel_led_set;
+ leds[i].led_cdev.blink_set = ariel_blink_set;
+
+ ret = devm_led_classdev_register(dev, &leds[i].led_cdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver ariel_led_driver = {
+ .probe = ariel_led_probe,
+ .driver = {
+ .name = "dell-wyse-ariel-led",
+ },
+};
+module_platform_driver(ariel_led_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Dell Wyse 3020 Status LEDs Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
new file mode 100644
index 000000000000..d709cc1f949e
--- /dev/null
+++ b/drivers/leds/leds-aw2013.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Driver for Awinic AW2013 3-channel LED driver
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define AW2013_MAX_LEDS 3
+
+/* Reset and ID register */
+#define AW2013_RSTR 0x00
+#define AW2013_RSTR_RESET 0x55
+#define AW2013_RSTR_CHIP_ID 0x33
+
+/* Global control register */
+#define AW2013_GCR 0x01
+#define AW2013_GCR_ENABLE BIT(0)
+
+/* LED channel enable register */
+#define AW2013_LCTR 0x30
+#define AW2013_LCTR_LE(x) BIT((x))
+
+/* LED channel control registers */
+#define AW2013_LCFG(x) (0x31 + (x))
+#define AW2013_LCFG_IMAX_MASK (BIT(0) | BIT(1)) // Should be 0-3
+#define AW2013_LCFG_MD BIT(4)
+#define AW2013_LCFG_FI BIT(5)
+#define AW2013_LCFG_FO BIT(6)
+
+/* LED channel PWM registers */
+#define AW2013_REG_PWM(x) (0x34 + (x))
+
+/* LED channel timing registers */
+#define AW2013_LEDT0(x) (0x37 + (x) * 3)
+#define AW2013_LEDT0_T1(x) ((x) << 4) // Should be 0-7
+#define AW2013_LEDT0_T2(x) (x) // Should be 0-5
+
+#define AW2013_LEDT1(x) (0x38 + (x) * 3)
+#define AW2013_LEDT1_T3(x) ((x) << 4) // Should be 0-7
+#define AW2013_LEDT1_T4(x) (x) // Should be 0-7
+
+#define AW2013_LEDT2(x) (0x39 + (x) * 3)
+#define AW2013_LEDT2_T0(x) ((x) << 4) // Should be 0-8
+#define AW2013_LEDT2_REPEAT(x) (x) // Should be 0-15
+
+#define AW2013_REG_MAX 0x77
+
+#define AW2013_TIME_STEP 130 /* ms */
+
+struct aw2013;
+
+struct aw2013_led {
+ struct aw2013 *chip;
+ struct led_classdev cdev;
+ u32 num;
+ unsigned int imax;
+};
+
+struct aw2013 {
+ struct mutex mutex; /* held when writing to registers */
+ struct regulator *vcc_regulator;
+ struct i2c_client *client;
+ struct aw2013_led leds[AW2013_MAX_LEDS];
+ struct regmap *regmap;
+ int num_leds;
+ bool enabled;
+};
+
+static int aw2013_chip_init(struct aw2013 *chip)
+{
+ int i, ret;
+
+ ret = regmap_write(chip->regmap, AW2013_GCR, AW2013_GCR_ENABLE);
+ if (ret) {
+ dev_err(&chip->client->dev, "Failed to enable the chip: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (i = 0; i < chip->num_leds; i++) {
+ ret = regmap_update_bits(chip->regmap,
+ AW2013_LCFG(chip->leds[i].num),
+ AW2013_LCFG_IMAX_MASK,
+ chip->leds[i].imax);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Failed to set maximum current for led %d: %d\n",
+ chip->leds[i].num, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void aw2013_chip_disable(struct aw2013 *chip)
+{
+ int ret;
+
+ if (!chip->enabled)
+ return;
+
+ regmap_write(chip->regmap, AW2013_GCR, 0);
+
+ ret = regulator_disable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Failed to disable regulator: %d\n", ret);
+ return;
+ }
+
+ chip->enabled = false;
+}
+
+static int aw2013_chip_enable(struct aw2013 *chip)
+{
+ int ret;
+
+ if (chip->enabled)
+ return 0;
+
+ ret = regulator_enable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ chip->enabled = true;
+
+ ret = aw2013_chip_init(chip);
+ if (ret)
+ aw2013_chip_disable(chip);
+
+ return ret;
+}
+
+static bool aw2013_chip_in_use(struct aw2013 *chip)
+{
+ int i;
+
+ for (i = 0; i < chip->num_leds; i++)
+ if (chip->leds[i].cdev.brightness)
+ return true;
+
+ return false;
+}
+
+static int aw2013_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct aw2013_led *led = container_of(cdev, struct aw2013_led, cdev);
+ int ret, num;
+
+ mutex_lock(&led->chip->mutex);
+
+ if (aw2013_chip_in_use(led->chip)) {
+ ret = aw2013_chip_enable(led->chip);
+ if (ret)
+ goto error;
+ }
+
+ num = led->num;
+
+ ret = regmap_write(led->chip->regmap, AW2013_REG_PWM(num), brightness);
+ if (ret)
+ goto error;
+
+ if (brightness) {
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCTR,
+ AW2013_LCTR_LE(num), 0xFF);
+ } else {
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCTR,
+ AW2013_LCTR_LE(num), 0);
+ if (ret)
+ goto error;
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCFG(num),
+ AW2013_LCFG_MD, 0);
+ }
+ if (ret)
+ goto error;
+
+ if (!aw2013_chip_in_use(led->chip))
+ aw2013_chip_disable(led->chip);
+
+error:
+ mutex_unlock(&led->chip->mutex);
+
+ return ret;
+}
+
+static int aw2013_blink_set(struct led_classdev *cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct aw2013_led *led = container_of(cdev, struct aw2013_led, cdev);
+ int ret, num = led->num;
+ unsigned long off = 0, on = 0;
+
+ /* If no blink specified, default to 1 Hz. */
+ if (!*delay_off && !*delay_on) {
+ *delay_off = 500;
+ *delay_on = 500;
+ }
+
+ if (!led->cdev.brightness) {
+ led->cdev.brightness = LED_FULL;
+ ret = aw2013_brightness_set(&led->cdev, led->cdev.brightness);
+ if (ret)
+ return ret;
+ }
+
+ /* Never on - just set to off */
+ if (!*delay_on) {
+ led->cdev.brightness = LED_OFF;
+ return aw2013_brightness_set(&led->cdev, LED_OFF);
+ }
+
+ mutex_lock(&led->chip->mutex);
+
+ /* Never off - brightness is already set, disable blinking */
+ if (!*delay_off) {
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCFG(num),
+ AW2013_LCFG_MD, 0);
+ goto out;
+ }
+
+ /* Convert into values the HW will understand. */
+ off = min(5, ilog2((*delay_off - 1) / AW2013_TIME_STEP) + 1);
+ on = min(7, ilog2((*delay_on - 1) / AW2013_TIME_STEP) + 1);
+
+ *delay_off = BIT(off) * AW2013_TIME_STEP;
+ *delay_on = BIT(on) * AW2013_TIME_STEP;
+
+ /* Set timings */
+ ret = regmap_write(led->chip->regmap,
+ AW2013_LEDT0(num), AW2013_LEDT0_T2(on));
+ if (ret)
+ goto out;
+ ret = regmap_write(led->chip->regmap,
+ AW2013_LEDT1(num), AW2013_LEDT1_T4(off));
+ if (ret)
+ goto out;
+
+ /* Finally, enable the LED */
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCFG(num),
+ AW2013_LCFG_MD, 0xFF);
+ if (ret)
+ goto out;
+
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCTR,
+ AW2013_LCTR_LE(num), 0xFF);
+
+out:
+ mutex_unlock(&led->chip->mutex);
+
+ return ret;
+}
+
+static int aw2013_probe_dt(struct aw2013 *chip)
+{
+ struct device_node *np = chip->client->dev.of_node, *child;
+ int count, ret = 0, i = 0;
+ struct aw2013_led *led;
+
+ count = of_get_child_count(np);
+ if (!count || count > AW2013_MAX_LEDS)
+ return -EINVAL;
+
+ regmap_write(chip->regmap, AW2013_RSTR, AW2013_RSTR_RESET);
+
+ for_each_available_child_of_node(np, child) {
+ struct led_init_data init_data = {};
+ u32 source;
+ u32 imax;
+
+ ret = of_property_read_u32(child, "reg", &source);
+ if (ret != 0 || source >= AW2013_MAX_LEDS) {
+ dev_err(&chip->client->dev,
+ "Couldn't read LED address: %d\n", ret);
+ count--;
+ continue;
+ }
+
+ led = &chip->leds[i];
+ led->num = source;
+ led->chip = chip;
+ init_data.fwnode = of_fwnode_handle(child);
+
+ if (!of_property_read_u32(child, "led-max-microamp", &imax)) {
+ led->imax = min_t(u32, imax / 5000, 3);
+ } else {
+ led->imax = 1; // 5mA
+ dev_info(&chip->client->dev,
+ "DT property led-max-microamp is missing\n");
+ }
+
+ of_property_read_string(child, "linux,default-trigger",
+ &led->cdev.default_trigger);
+
+ led->cdev.brightness_set_blocking = aw2013_brightness_set;
+ led->cdev.blink_set = aw2013_blink_set;
+
+ ret = devm_led_classdev_register_ext(&chip->client->dev,
+ &led->cdev, &init_data);
+ if (ret < 0)
+ return ret;
+
+ i++;
+ }
+
+ if (!count)
+ return -EINVAL;
+
+ chip->num_leds = i;
+
+ return 0;
+}
+
+static const struct regmap_config aw2013_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AW2013_REG_MAX,
+};
+
+static int aw2013_probe(struct i2c_client *client)
+{
+ struct aw2013 *chip;
+ int ret;
+ unsigned int chipid;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ mutex_init(&chip->mutex);
+ mutex_lock(&chip->mutex);
+
+ chip->client = client;
+ i2c_set_clientdata(client, chip);
+
+ chip->regmap = devm_regmap_init_i2c(client, &aw2013_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto error;
+ }
+
+ chip->vcc_regulator = devm_regulator_get(&client->dev, "vcc");
+ ret = PTR_ERR_OR_ZERO(chip->vcc_regulator);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to request regulator: %d\n", ret);
+ goto error;
+ }
+
+ ret = regulator_enable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to enable regulator: %d\n", ret);
+ goto error;
+ }
+
+ ret = regmap_read(chip->regmap, AW2013_RSTR, &chipid);
+ if (ret) {
+ dev_err(&client->dev, "Failed to read chip ID: %d\n",
+ ret);
+ goto error_reg;
+ }
+
+ if (chipid != AW2013_RSTR_CHIP_ID) {
+ dev_err(&client->dev, "Chip reported wrong ID: %x\n",
+ chipid);
+ ret = -ENODEV;
+ goto error_reg;
+ }
+
+ ret = aw2013_probe_dt(chip);
+ if (ret < 0)
+ goto error_reg;
+
+ ret = regulator_disable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to disable regulator: %d\n", ret);
+ goto error;
+ }
+
+ mutex_unlock(&chip->mutex);
+
+ return 0;
+
+error_reg:
+ regulator_disable(chip->vcc_regulator);
+
+error:
+ mutex_destroy(&chip->mutex);
+ return ret;
+}
+
+static int aw2013_remove(struct i2c_client *client)
+{
+ struct aw2013 *chip = i2c_get_clientdata(client);
+
+ aw2013_chip_disable(chip);
+
+ mutex_destroy(&chip->mutex);
+
+ return 0;
+}
+
+static const struct of_device_id aw2013_match_table[] = {
+ { .compatible = "awinic,aw2013", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, aw2013_match_table);
+
+static struct i2c_driver aw2013_driver = {
+ .driver = {
+ .name = "leds-aw2013",
+ .of_match_table = of_match_ptr(aw2013_match_table),
+ },
+ .probe_new = aw2013_probe,
+ .remove = aw2013_remove,
+};
+
+module_i2c_driver(aw2013_driver);
+
+MODULE_AUTHOR("Nikita Travkin <nikitos.tr@gmail.com>");
+MODULE_DESCRIPTION("AW2013 LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index a5abb499574b..11ce05249751 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index 4e4e542774cb..6ee9131fbf25 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -7,7 +7,7 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/kernel.h>
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index c94995f0daa2..9079850e6ea4 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -5,7 +5,6 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 14ef4ccdda3a..ceceeb6a0e96 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -12,16 +12,17 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/leds.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
struct netxbig_gpio_ext {
- unsigned int *addr;
+ struct gpio_desc **addr;
int num_addr;
- unsigned int *data;
+ struct gpio_desc **data;
int num_data;
- unsigned int enable;
+ struct gpio_desc *enable;
};
enum netxbig_led_mode {
@@ -69,7 +70,7 @@ static void gpio_ext_set_addr(struct netxbig_gpio_ext *gpio_ext, int addr)
int pin;
for (pin = 0; pin < gpio_ext->num_addr; pin++)
- gpio_set_value(gpio_ext->addr[pin], (addr >> pin) & 1);
+ gpiod_set_value(gpio_ext->addr[pin], (addr >> pin) & 1);
}
static void gpio_ext_set_data(struct netxbig_gpio_ext *gpio_ext, int data)
@@ -77,14 +78,14 @@ static void gpio_ext_set_data(struct netxbig_gpio_ext *gpio_ext, int data)
int pin;
for (pin = 0; pin < gpio_ext->num_data; pin++)
- gpio_set_value(gpio_ext->data[pin], (data >> pin) & 1);
+ gpiod_set_value(gpio_ext->data[pin], (data >> pin) & 1);
}
static void gpio_ext_enable_select(struct netxbig_gpio_ext *gpio_ext)
{
/* Enable select is done on the raising edge. */
- gpio_set_value(gpio_ext->enable, 0);
- gpio_set_value(gpio_ext->enable, 1);
+ gpiod_set_value(gpio_ext->enable, 0);
+ gpiod_set_value(gpio_ext->enable, 1);
}
static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext,
@@ -99,41 +100,6 @@ static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext,
spin_unlock_irqrestore(&gpio_ext_lock, flags);
}
-static int gpio_ext_init(struct platform_device *pdev,
- struct netxbig_gpio_ext *gpio_ext)
-{
- int err;
- int i;
-
- if (unlikely(!gpio_ext))
- return -EINVAL;
-
- /* Configure address GPIOs. */
- for (i = 0; i < gpio_ext->num_addr; i++) {
- err = devm_gpio_request_one(&pdev->dev, gpio_ext->addr[i],
- GPIOF_OUT_INIT_LOW,
- "GPIO extension addr");
- if (err)
- return err;
- }
- /* Configure data GPIOs. */
- for (i = 0; i < gpio_ext->num_data; i++) {
- err = devm_gpio_request_one(&pdev->dev, gpio_ext->data[i],
- GPIOF_OUT_INIT_LOW,
- "GPIO extension data");
- if (err)
- return err;
- }
- /* Configure "enable select" GPIO. */
- err = devm_gpio_request_one(&pdev->dev, gpio_ext->enable,
- GPIOF_OUT_INIT_LOW,
- "GPIO extension enable");
- if (err)
- return err;
-
- return 0;
-}
-
/*
* Class LED driver.
*/
@@ -347,15 +313,47 @@ static int create_netxbig_led(struct platform_device *pdev,
return devm_led_classdev_register(&pdev->dev, &led_dat->cdev);
}
-static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
- struct netxbig_gpio_ext *gpio_ext)
+/**
+ * netxbig_gpio_ext_remove() - Clean up GPIO extension data
+ * @data: managed resource data to clean up
+ *
+ * Since we pick GPIO descriptors from another device than the device our
+ * driver is probing to, we need to register a specific callback to free
+ * these up using managed resources.
+ */
+static void netxbig_gpio_ext_remove(void *data)
+{
+ struct netxbig_gpio_ext *gpio_ext = data;
+ int i;
+
+ for (i = 0; i < gpio_ext->num_addr; i++)
+ gpiod_put(gpio_ext->addr[i]);
+ for (i = 0; i < gpio_ext->num_data; i++)
+ gpiod_put(gpio_ext->data[i]);
+ gpiod_put(gpio_ext->enable);
+}
+
+/**
+ * netxbig_gpio_ext_get() - Obtain GPIO extension device data
+ * @dev: main LED device
+ * @gpio_ext_dev: the GPIO extension device
+ * @gpio_ext: the data structure holding the GPIO extension data
+ *
+ * This function walks the subdevice that only contain GPIO line
+ * handles in the device tree and obtains the GPIO descriptors from that
+ * device.
+ */
+static int netxbig_gpio_ext_get(struct device *dev,
+ struct device *gpio_ext_dev,
+ struct netxbig_gpio_ext *gpio_ext)
{
- int *addr, *data;
+ struct gpio_desc **addr, **data;
int num_addr, num_data;
+ struct gpio_desc *gpiod;
int ret;
int i;
- ret = of_gpio_named_count(np, "addr-gpios");
+ ret = gpiod_count(gpio_ext_dev, "addr");
if (ret < 0) {
dev_err(dev,
"Failed to count GPIOs in DT property addr-gpios\n");
@@ -366,16 +364,25 @@ static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
if (!addr)
return -ENOMEM;
+ /*
+ * We cannot use devm_ managed resources with these GPIO descriptors
+ * since they are associated with the "GPIO extension device" which
+ * does not probe any driver. The device tree parser will however
+ * populate a platform device for it so we can anyway obtain the
+ * GPIO descriptors from the device.
+ */
for (i = 0; i < num_addr; i++) {
- ret = of_get_named_gpio(np, "addr-gpios", i);
- if (ret < 0)
- return ret;
- addr[i] = ret;
+ gpiod = gpiod_get_index(gpio_ext_dev, "addr", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ gpiod_set_consumer_name(gpiod, "GPIO extension addr");
+ addr[i] = gpiod;
}
gpio_ext->addr = addr;
gpio_ext->num_addr = num_addr;
- ret = of_gpio_named_count(np, "data-gpios");
+ ret = gpiod_count(gpio_ext_dev, "data");
if (ret < 0) {
dev_err(dev,
"Failed to count GPIOs in DT property data-gpios\n");
@@ -387,23 +394,26 @@ static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
return -ENOMEM;
for (i = 0; i < num_data; i++) {
- ret = of_get_named_gpio(np, "data-gpios", i);
- if (ret < 0)
- return ret;
- data[i] = ret;
+ gpiod = gpiod_get_index(gpio_ext_dev, "data", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ gpiod_set_consumer_name(gpiod, "GPIO extension data");
+ data[i] = gpiod;
}
gpio_ext->data = data;
gpio_ext->num_data = num_data;
- ret = of_get_named_gpio(np, "enable-gpio", 0);
- if (ret < 0) {
+ gpiod = gpiod_get(gpio_ext_dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
dev_err(dev,
"Failed to get GPIO from DT property enable-gpio\n");
- return ret;
+ return PTR_ERR(gpiod);
}
- gpio_ext->enable = ret;
+ gpiod_set_consumer_name(gpiod, "GPIO extension enable");
+ gpio_ext->enable = gpiod;
- return 0;
+ return devm_add_action_or_reset(dev, netxbig_gpio_ext_remove, gpio_ext);
}
static int netxbig_leds_get_of_pdata(struct device *dev,
@@ -411,6 +421,8 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
{
struct device_node *np = dev->of_node;
struct device_node *gpio_ext_np;
+ struct platform_device *gpio_ext_pdev;
+ struct device *gpio_ext_dev;
struct device_node *child;
struct netxbig_gpio_ext *gpio_ext;
struct netxbig_led_timer *timers;
@@ -426,13 +438,19 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
dev_err(dev, "Failed to get DT handle gpio-ext\n");
return -EINVAL;
}
+ gpio_ext_pdev = of_find_device_by_node(gpio_ext_np);
+ if (!gpio_ext_pdev) {
+ dev_err(dev, "Failed to find platform device for gpio-ext\n");
+ return -ENODEV;
+ }
+ gpio_ext_dev = &gpio_ext_pdev->dev;
gpio_ext = devm_kzalloc(dev, sizeof(*gpio_ext), GFP_KERNEL);
if (!gpio_ext) {
of_node_put(gpio_ext_np);
return -ENOMEM;
}
- ret = gpio_ext_get_of_pdata(dev, gpio_ext_np, gpio_ext);
+ ret = netxbig_gpio_ext_get(dev, gpio_ext_dev, gpio_ext);
of_node_put(gpio_ext_np);
if (ret)
return ret;
@@ -585,10 +603,6 @@ static int netxbig_led_probe(struct platform_device *pdev)
if (!leds_data)
return -ENOMEM;
- ret = gpio_ext_init(pdev, pdata->gpio_ext);
- if (ret < 0)
- return ret;
-
for (i = 0; i < pdata->num_leds; i++) {
ret = create_netxbig_led(pdev, pdata,
&leds_data[i], &pdata->leds[i]);
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 66cdc003b8f4..d288acbc99c7 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -4,7 +4,7 @@
* Copyright 2013 Qtechnology/AS
*
* Author: Peter Meerwald <p.meerwald@bct-electronic.com>
- * Author: Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ * Author: Ricardo Ribalda <ribalda@kernel.org>
*
* Based on leds-pca955x.c
*
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 6c8a724aac51..ef7b91bd2064 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -91,15 +91,21 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
pwm_init_state(led_data->pwm, &led_data->pwmstate);
ret = devm_led_classdev_register(dev, &led_data->cdev);
- if (ret == 0) {
- priv->num_leds++;
- led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
- } else {
+ if (ret) {
dev_err(dev, "failed to register PWM led for %s: %d\n",
led->name, ret);
+ return ret;
}
- return ret;
+ ret = led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
+ if (ret) {
+ dev_err(dev, "failed to set led PWM value for %s: %d",
+ led->name, ret);
+ return ret;
+ }
+
+ priv->num_leds++;
+ return 0;
}
static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
diff --git a/drivers/leds/leds-sgm3140.c b/drivers/leds/leds-sgm3140.c
new file mode 100644
index 000000000000..c494b934ae09
--- /dev/null
+++ b/drivers/leds/leds-sgm3140.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Luca Weiss <luca@z3ntu.xyz>
+
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-flash-led-class.h>
+
+#define FLASH_TIMEOUT_DEFAULT 250000U /* 250ms */
+#define FLASH_MAX_TIMEOUT_DEFAULT 300000U /* 300ms */
+
+struct sgm3140 {
+ struct led_classdev_flash fled_cdev;
+ struct v4l2_flash *v4l2_flash;
+
+ struct timer_list powerdown_timer;
+
+ struct gpio_desc *flash_gpio;
+ struct gpio_desc *enable_gpio;
+ struct regulator *vin_regulator;
+
+ bool enabled;
+
+ /* current timeout in us */
+ u32 timeout;
+ /* maximum timeout in us */
+ u32 max_timeout;
+};
+
+static struct sgm3140 *flcdev_to_sgm3140(struct led_classdev_flash *flcdev)
+{
+ return container_of(flcdev, struct sgm3140, fled_cdev);
+}
+
+static int sgm3140_strobe_set(struct led_classdev_flash *fled_cdev, bool state)
+{
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+ int ret;
+
+ if (priv->enabled == state)
+ return 0;
+
+ if (state) {
+ ret = regulator_enable(priv->vin_regulator);
+ if (ret) {
+ dev_err(fled_cdev->led_cdev.dev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ gpiod_set_value_cansleep(priv->flash_gpio, 1);
+ gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ mod_timer(&priv->powerdown_timer,
+ jiffies + usecs_to_jiffies(priv->timeout));
+ } else {
+ del_timer_sync(&priv->powerdown_timer);
+ gpiod_set_value_cansleep(priv->enable_gpio, 0);
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
+ ret = regulator_disable(priv->vin_regulator);
+ if (ret) {
+ dev_err(fled_cdev->led_cdev.dev,
+ "failed to disable regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ priv->enabled = state;
+
+ return 0;
+}
+
+static int sgm3140_strobe_get(struct led_classdev_flash *fled_cdev, bool *state)
+{
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+
+ *state = timer_pending(&priv->powerdown_timer);
+
+ return 0;
+}
+
+static int sgm3140_timeout_set(struct led_classdev_flash *fled_cdev,
+ u32 timeout)
+{
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+
+ priv->timeout = timeout;
+
+ return 0;
+}
+
+static const struct led_flash_ops sgm3140_flash_ops = {
+ .strobe_set = sgm3140_strobe_set,
+ .strobe_get = sgm3140_strobe_get,
+ .timeout_set = sgm3140_timeout_set,
+};
+
+static int sgm3140_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+ bool enable = brightness == LED_ON;
+ int ret;
+
+ if (priv->enabled == enable)
+ return 0;
+
+ if (enable) {
+ ret = regulator_enable(priv->vin_regulator);
+ if (ret) {
+ dev_err(led_cdev->dev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ } else {
+ gpiod_set_value_cansleep(priv->enable_gpio, 0);
+ ret = regulator_disable(priv->vin_regulator);
+ if (ret) {
+ dev_err(led_cdev->dev,
+ "failed to disable regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ priv->enabled = enable;
+
+ return 0;
+}
+
+static void sgm3140_powerdown_timer(struct timer_list *t)
+{
+ struct sgm3140 *priv = from_timer(priv, t, powerdown_timer);
+
+ gpiod_set_value(priv->enable_gpio, 0);
+ gpiod_set_value(priv->flash_gpio, 0);
+ regulator_disable(priv->vin_regulator);
+
+ priv->enabled = false;
+}
+
+static void sgm3140_init_flash_timeout(struct sgm3140 *priv)
+{
+ struct led_classdev_flash *fled_cdev = &priv->fled_cdev;
+ struct led_flash_setting *s;
+
+ /* Init flash timeout setting */
+ s = &fled_cdev->timeout;
+ s->min = 1;
+ s->max = priv->max_timeout;
+ s->step = 1;
+ s->val = FLASH_TIMEOUT_DEFAULT;
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+static void sgm3140_init_v4l2_flash_config(struct sgm3140 *priv,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+ struct led_classdev *led_cdev = &priv->fled_cdev.led_cdev;
+ struct led_flash_setting *s;
+
+ strscpy(v4l2_sd_cfg->dev_name, led_cdev->dev->kobj.name,
+ sizeof(v4l2_sd_cfg->dev_name));
+
+ /* Init flash intensity setting */
+ s = &v4l2_sd_cfg->intensity;
+ s->min = 0;
+ s->max = 1;
+ s->step = 1;
+ s->val = 1;
+}
+
+#else
+static void sgm3140_init_v4l2_flash_config(struct sgm3140 *priv,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+#endif
+
+static int sgm3140_probe(struct platform_device *pdev)
+{
+ struct sgm3140 *priv;
+ struct led_classdev *led_cdev;
+ struct led_classdev_flash *fled_cdev;
+ struct led_init_data init_data = {};
+ struct fwnode_handle *child_node;
+ struct v4l2_flash_config v4l2_sd_cfg = {};
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->flash_gpio = devm_gpiod_get(&pdev->dev, "flash", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(priv->flash_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request flash gpio: %d\n", ret);
+ return ret;
+ }
+
+ priv->enable_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(priv->enable_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request enable gpio: %d\n", ret);
+ return ret;
+ }
+
+ priv->vin_regulator = devm_regulator_get(&pdev->dev, "vin");
+ ret = PTR_ERR_OR_ZERO(priv->vin_regulator);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request regulator: %d\n", ret);
+ return ret;
+ }
+
+ child_node = fwnode_get_next_available_child_node(pdev->dev.fwnode,
+ NULL);
+ if (!child_node) {
+ dev_err(&pdev->dev,
+ "No fwnode child node found for connected LED.\n");
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32(child_node, "flash-max-timeout-us",
+ &priv->max_timeout);
+ if (ret) {
+ priv->max_timeout = FLASH_MAX_TIMEOUT_DEFAULT;
+ dev_warn(&pdev->dev,
+ "flash-max-timeout-us property missing\n");
+ }
+
+ /*
+ * Set default timeout to FLASH_DEFAULT_TIMEOUT except if max_timeout
+ * from DT is lower.
+ */
+ priv->timeout = min(priv->max_timeout, FLASH_TIMEOUT_DEFAULT);
+
+ timer_setup(&priv->powerdown_timer, sgm3140_powerdown_timer, 0);
+
+ fled_cdev = &priv->fled_cdev;
+ led_cdev = &fled_cdev->led_cdev;
+
+ fled_cdev->ops = &sgm3140_flash_ops;
+
+ led_cdev->brightness_set_blocking = sgm3140_brightness_set;
+ led_cdev->max_brightness = LED_ON;
+ led_cdev->flags |= LED_DEV_CAP_FLASH;
+
+ sgm3140_init_flash_timeout(priv);
+
+ init_data.fwnode = child_node;
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Register in the LED subsystem */
+ ret = devm_led_classdev_flash_register_ext(&pdev->dev,
+ fled_cdev, &init_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register flash device: %d\n",
+ ret);
+ goto err;
+ }
+
+ sgm3140_init_v4l2_flash_config(priv, &v4l2_sd_cfg);
+
+ /* Create V4L2 Flash subdev */
+ priv->v4l2_flash = v4l2_flash_init(&pdev->dev,
+ child_node,
+ fled_cdev, NULL,
+ &v4l2_sd_cfg);
+ if (IS_ERR(priv->v4l2_flash)) {
+ ret = PTR_ERR(priv->v4l2_flash);
+ goto err;
+ }
+
+ return ret;
+
+err:
+ fwnode_handle_put(child_node);
+ return ret;
+}
+
+static int sgm3140_remove(struct platform_device *pdev)
+{
+ struct sgm3140 *priv = platform_get_drvdata(pdev);
+
+ del_timer_sync(&priv->powerdown_timer);
+
+ v4l2_flash_release(priv->v4l2_flash);
+
+ return 0;
+}
+
+static const struct of_device_id sgm3140_dt_match[] = {
+ { .compatible = "sgmicro,sgm3140" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sgm3140_dt_match);
+
+static struct platform_driver sgm3140_driver = {
+ .probe = sgm3140_probe,
+ .remove = sgm3140_remove,
+ .driver = {
+ .name = "sgm3140",
+ .of_match_table = sgm3140_dt_match,
+ },
+};
+
+module_platform_driver(sgm3140_driver);
+
+MODULE_AUTHOR("Luca Weiss <luca@z3ntu.xyz>");
+MODULE_DESCRIPTION("SG Micro SGM3140 charge pump led driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 58be20cae183..1128ac75443c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -93,7 +93,7 @@
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/workqueue.h>
#include <linux/leds-tca6507.h>
#include <linux/of.h>
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index a8911ebd30e5..0929f1275814 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -214,8 +214,9 @@ tlc591xx_probe(struct i2c_client *client,
err = devm_led_classdev_register_ext(dev, &led->ldev,
&init_data);
if (err < 0) {
- dev_err(dev, "couldn't register LED %s\n",
- led->ldev.name);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "couldn't register LED %s\n",
+ led->ldev.name);
return err;
}
}
diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index 34a68604c46c..b4688d1d9d2b 100644
--- a/drivers/leds/trigger/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -28,7 +28,7 @@ static ssize_t led_delay_on_store(struct device *dev,
{
struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
ret = kstrtoul(buf, 10, &state);
if (ret)
@@ -53,7 +53,7 @@ static ssize_t led_delay_off_store(struct device *dev,
{
struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
ret = kstrtoul(buf, 10, &state);
if (ret)
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 5c1034c22197..f185f1a00008 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -21,16 +21,14 @@
void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
unsigned long flags)
{
- struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
sector_t lba = pblk_get_lba(bio);
- unsigned long start_time = jiffies;
+ unsigned long start_time;
unsigned int bpos, pos;
int nr_entries = pblk_get_secs(bio);
int i, ret;
- generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio),
- &pblk->disk->part0);
+ start_time = bio_start_io_acct(bio);
/* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to
@@ -79,7 +77,7 @@ retry:
pblk_rl_inserted(&pblk->rl, nr_entries);
out:
- generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
+ bio_end_io_acct(bio, start_time);
pblk_write_should_kick(pblk);
if (ret == NVM_IO_DONE)
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 9a967a2e83dd..6e677ff62cc9 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -145,9 +145,8 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
int ret = 0;
map_size = pblk_trans_map_size(pblk);
- pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN
- | __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM,
- PAGE_KERNEL);
+ pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM);
if (!pblk->trans_map) {
pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
map_size);
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 8efd14e683dc..140927ebf41e 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -187,12 +187,11 @@ static void pblk_end_user_read(struct bio *bio, int error)
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
bool put_line)
{
- struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *int_bio = rqd->bio;
unsigned long start_time = r_ctx->start_time;
- generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
+ bio_end_io_acct(int_bio, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
@@ -263,17 +262,15 @@ retry:
void pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
bool from_cache;
struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
struct bio *int_bio, *split_bio;
+ unsigned long start_time;
- generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
- &pblk->disk->part0);
+ start_time = bio_start_io_acct(bio);
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
@@ -283,7 +280,7 @@ void pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
- r_ctx->start_time = jiffies;
+ r_ctx->start_time = start_time;
r_ctx->lba = blba;
if (pblk_alloc_rqd_meta(pblk, rqd)) {
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index cbd46c1c5bf7..fcb9d7bd5bd0 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -247,7 +247,6 @@ config PMAC_RACKMETER
config SENSORS_AMS
tristate "Apple Motion Sensor driver"
depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C)
- select INPUT_POLLDEV
help
Support for the motion sensor included in PowerBooks. Includes
implementations for PMU and I2C.
diff --git a/drivers/macintosh/ams/ams-input.c b/drivers/macintosh/ams/ams-input.c
index 06a96b3f11de..0da493d449b2 100644
--- a/drivers/macintosh/ams/ams-input.c
+++ b/drivers/macintosh/ams/ams-input.c
@@ -25,9 +25,8 @@ MODULE_PARM_DESC(invert, "Invert input data on X and Y axis");
static DEFINE_MUTEX(ams_input_mutex);
-static void ams_idev_poll(struct input_polled_dev *dev)
+static void ams_idev_poll(struct input_dev *idev)
{
- struct input_dev *idev = dev->input;
s8 x, y, z;
mutex_lock(&ams_info.lock);
@@ -59,14 +58,10 @@ static int ams_input_enable(void)
ams_info.ycalib = y;
ams_info.zcalib = z;
- ams_info.idev = input_allocate_polled_device();
- if (!ams_info.idev)
+ input = input_allocate_device();
+ if (!input)
return -ENOMEM;
- ams_info.idev->poll = ams_idev_poll;
- ams_info.idev->poll_interval = 25;
-
- input = ams_info.idev->input;
input->name = "Apple Motion Sensor";
input->id.bustype = ams_info.bustype;
input->id.vendor = 0;
@@ -75,28 +70,32 @@ static int ams_input_enable(void)
input_set_abs_params(input, ABS_X, -50, 50, 3, 0);
input_set_abs_params(input, ABS_Y, -50, 50, 3, 0);
input_set_abs_params(input, ABS_Z, -50, 50, 3, 0);
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
- set_bit(EV_ABS, input->evbit);
- set_bit(EV_KEY, input->evbit);
- set_bit(BTN_TOUCH, input->keybit);
+ error = input_setup_polling(input, ams_idev_poll);
+ if (error)
+ goto err_free_input;
- error = input_register_polled_device(ams_info.idev);
- if (error) {
- input_free_polled_device(ams_info.idev);
- ams_info.idev = NULL;
- return error;
- }
+ input_set_poll_interval(input, 25);
+ error = input_register_device(input);
+ if (error)
+ goto err_free_input;
+
+ ams_info.idev = input;
joystick = true;
return 0;
+
+err_free_input:
+ input_free_device(input);
+ return error;
}
static void ams_input_disable(void)
{
if (ams_info.idev) {
- input_unregister_polled_device(ams_info.idev);
- input_free_polled_device(ams_info.idev);
+ input_unregister_device(ams_info.idev);
ams_info.idev = NULL;
}
diff --git a/drivers/macintosh/ams/ams.h b/drivers/macintosh/ams/ams.h
index fe8d596f9845..935bdd9cd9a6 100644
--- a/drivers/macintosh/ams/ams.h
+++ b/drivers/macintosh/ams/ams.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/i2c.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
@@ -51,7 +51,7 @@ struct ams {
#endif
/* Joystick emulation */
- struct input_polled_dev *idev;
+ struct input_dev *idev;
__u16 bustype;
/* calibrated null values */
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 7af0c536d568..28b8581b44dd 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -183,8 +183,7 @@ static void mac_hid_stop_emulation(void)
}
static int mac_hid_toggle_emumouse(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int old_val = *valp;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 83eb05bf85ff..8450d7c008d0 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2184,8 +2184,6 @@ pmu_read(struct file *file, char __user *buf,
if (count < 1 || !pp)
return -EINVAL;
- if (!access_ok(buf, count))
- return -EFAULT;
spin_lock_irqsave(&pp->lock, flags);
add_wait_queue(&pp->wait, &wait);
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index 4150301a89a5..e8377ce0a95a 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -132,14 +132,6 @@ static int create_cpu_loop(int cpu)
s32 tmax;
int fmin;
- /* Get PID params from the appropriate SAT */
- hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
- if (hdr == NULL) {
- printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
- return -EINVAL;
- }
- piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
-
/* Get FVT params to get Tmax; if not found, assume default */
hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL);
if (hdr) {
@@ -152,6 +144,16 @@ static int create_cpu_loop(int cpu)
if (tmax < cpu_all_tmax)
cpu_all_tmax = tmax;
+ kfree(hdr);
+
+ /* Get PID params from the appropriate SAT */
+ hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
+ if (hdr == NULL) {
+ printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
+ return -EINVAL;
+ }
+ piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
+
/*
* Darwin has a minimum fan speed of 1000 rpm for the 4-way and
* 515 for the 2-way. That appears to be overkill, so for now,
@@ -174,6 +176,9 @@ static int create_cpu_loop(int cpu)
pid.min = fmin;
wf_cpu_pid_init(&cpu_pid[cpu], &pid);
+
+ kfree(hdr);
+
return 0;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index d6d5ab23c088..6665b56865b7 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -269,6 +269,7 @@ config DM_UNSTRIPED
config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
+ depends on (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n)
select CRYPTO
select CRYPTO_CBC
select CRYPTO_ESSIV
@@ -336,6 +337,14 @@ config DM_WRITECACHE
The writecache target doesn't cache reads because reads are supposed
to be cached in standard RAM.
+config DM_EBS
+ tristate "Emulated block size target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM
+ select DM_BUFIO
+ help
+ dm-ebs emulates smaller logical block size on backing devices
+ with larger ones (e.g. 512 byte sectors on 4K native disks).
+
config DM_ERA
tristate "Era target (EXPERIMENTAL)"
depends on BLK_DEV_DM
@@ -443,6 +452,17 @@ config DM_MULTIPATH_ST
If unsure, say N.
+config DM_MULTIPATH_HST
+ tristate "I/O Path Selector based on historical service time"
+ depends on DM_MULTIPATH
+ help
+ This path selector is a dynamic load balancer which selects
+ the path expected to complete the incoming I/O in the shortest
+ time by comparing estimated service time (based on historical
+ service time).
+
+ If unsure, say N.
+
config DM_DELAY
tristate "I/O delaying target"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d91a7edcd2ab..31840f95cd40 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -17,6 +17,7 @@ dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o \
dm-cache-background-tracker.o
dm-cache-smq-y += dm-cache-policy-smq.o
+dm-ebs-y += dm-ebs-target.o
dm-era-y += dm-era-target.o
dm-clone-y += dm-clone-target.o dm-clone-metadata.o
dm-verity-y += dm-verity-target.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
+obj-$(CONFIG_DM_MULTIPATH_HST) += dm-historical-service-time.o
obj-$(CONFIG_DM_SWITCH) += dm-switch.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_PERSISTENT_DATA) += persistent-data/
@@ -65,6 +67,7 @@ obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
+obj-$(CONFIG_DM_EBS) += dm-ebs.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_CLONE) += dm-clone.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 6dfa653d30db..bf7dd96db9b3 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -26,3 +26,12 @@ config BCACHE_CLOSURES_DEBUG
Keeps all active closures in a linked list and provides a debugfs
interface to list them, which makes it possible to see asynchronous
operations that get stuck.
+
+config BCACHE_ASYNC_REGISTRAION
+ bool "Asynchronous device registration (EXPERIMENTAL)"
+ depends on BCACHE
+ help
+ Add a sysfs file /sys/fs/bcache/register_async. Writing registering
+ device path into this file will returns immediately and the real
+ registration work is handled in kernel work queue in asynchronous
+ way.
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 74a9849ea164..221e0191b687 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -176,7 +176,7 @@
* - updates to non leaf nodes just happen synchronously (see btree_split()).
*/
-#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
#include <linux/bcache.h>
#include <linux/bio.h>
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 4385303836d8..4995fcaefe29 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -6,7 +6,7 @@
* Copyright 2012 Google, Inc.
*/
-#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
#include "util.h"
#include "bset.h"
@@ -31,7 +31,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
if (b->ops->key_dump)
b->ops->key_dump(b, k);
else
- pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+ pr_cont("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
if (next < bset_bkey_last(i) &&
bkey_cmp(k, b->ops->is_extents ?
@@ -1225,7 +1225,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
- pr_debug("sorted %i keys", out->keys);
+ pr_debug("sorted %i keys\n", out->keys);
}
static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 72856e5f23a3..39de94edd73a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -619,7 +619,7 @@ retry:
* and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
*/
if (btree_node_journal_flush(b)) {
- pr_debug("bnode %p is flushing by journal, retry", b);
+ pr_debug("bnode %p is flushing by journal, retry\n", b);
mutex_unlock(&b->write_lock);
udelay(1);
goto retry;
@@ -802,7 +802,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->shrink.batch = c->btree_pages * 2;
if (register_shrinker(&c->shrink))
- pr_warn("bcache: %s: could not register shrinker",
+ pr_warn("bcache: %s: could not register shrinker\n",
__func__);
return 0;
@@ -1054,7 +1054,7 @@ retry:
*/
if (btree_node_journal_flush(b)) {
mutex_unlock(&b->write_lock);
- pr_debug("bnode %p journal_flush set, retry", b);
+ pr_debug("bnode %p journal_flush set, retry\n", b);
udelay(1);
goto retry;
}
@@ -1798,7 +1798,7 @@ static void bch_btree_gc(struct cache_set *c)
schedule_timeout_interruptible(msecs_to_jiffies
(GC_SLEEP_MS));
else if (ret)
- pr_warn("gc failed!");
+ pr_warn("gc failed!\n");
} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
bch_btree_gc_finish(c);
@@ -1907,10 +1907,8 @@ static int bch_btree_check_thread(void *arg)
struct btree_iter iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
- int i, n;
k = p = NULL;
- i = n = 0;
cur_idx = prev_idx = 0;
ret = 0;
@@ -2045,7 +2043,7 @@ int bch_btree_check(struct cache_set *c)
&check_state->infos[i],
name);
if (IS_ERR(check_state->infos[i].thread)) {
- pr_err("fails to run thread bch_btrchk[%d]", i);
+ pr_err("fails to run thread bch_btrchk[%d]\n", i);
for (--i; i >= 0; i--)
kthread_stop(check_state->infos[i].thread);
ret = -ENOMEM;
@@ -2456,7 +2454,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
if (ret) {
struct bkey *k;
- pr_err("error %i", ret);
+ pr_err("error %i\n", ret);
while ((k = bch_keylist_pop(keys)))
bkey_put(c, k);
@@ -2744,7 +2742,7 @@ struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
break;
if (bkey_cmp(&buf->last_scanned, end) >= 0) {
- pr_debug("scan finished");
+ pr_debug("scan finished\n");
break;
}
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 886710043025..9162af5bb6ec 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -130,18 +130,18 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
char buf[80];
bch_extent_to_text(buf, sizeof(buf), k);
- pr_err(" %s", buf);
+ pr_cont(" %s", buf);
for (j = 0; j < KEY_PTRS(k); j++) {
size_t n = PTR_BUCKET_NR(b->c, k, j);
- pr_err(" bucket %zu", n);
+ pr_cont(" bucket %zu", n);
if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
- pr_err(" prio %i",
- PTR_BUCKET(b->c, k, j)->prio);
+ pr_cont(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
}
- pr_err(" %s\n", bch_ptr_status(b->c, k));
+ pr_cont(" %s\n", bch_ptr_status(b->c, k));
}
/* Btree ptrs */
@@ -553,7 +553,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
if (stale && KEY_DIRTY(k)) {
bch_extent_to_text(buf, sizeof(buf), k);
- pr_info("stale dirty pointer, stale %u, key: %s",
+ pr_info("stale dirty pointer, stale %u, key: %s\n",
stale, buf);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 4d93f07f63e5..b25ee33b0d0b 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -65,14 +65,14 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
* we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
*/
if (bio->bi_opf & REQ_RAHEAD) {
- pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore",
+ pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n",
dc->backing_dev_name);
return;
}
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
- pr_err("%s: IO error on backing device, unrecoverable",
+ pr_err("%s: IO error on backing device, unrecoverable\n",
dc->backing_dev_name);
else
bch_cached_dev_error(dc);
@@ -123,12 +123,12 @@ void bch_count_io_errors(struct cache *ca,
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s%s",
+ pr_err("%s: IO error on %s%s\n",
ca->cache_dev_name, m,
is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
- "%s: too many IO errors %s",
+ "%s: too many IO errors %s\n",
ca->cache_dev_name, m);
}
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 0e3ff9745ac7..90aac4e2333f 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -47,7 +47,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
closure_init_stack(&cl);
- pr_debug("reading %u", bucket_index);
+ pr_debug("reading %u\n", bucket_index);
while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset;
@@ -78,13 +78,13 @@ reread: left = ca->sb.bucket_size - offset;
size_t blocks, bytes = set_bytes(j);
if (j->magic != jset_magic(&ca->sb)) {
- pr_debug("%u: bad magic", bucket_index);
+ pr_debug("%u: bad magic\n", bucket_index);
return ret;
}
if (bytes > left << 9 ||
bytes > PAGE_SIZE << JSET_BITS) {
- pr_info("%u: too big, %zu bytes, offset %u",
+ pr_info("%u: too big, %zu bytes, offset %u\n",
bucket_index, bytes, offset);
return ret;
}
@@ -93,7 +93,7 @@ reread: left = ca->sb.bucket_size - offset;
goto reread;
if (j->csum != csum_set(j)) {
- pr_info("%u: bad csum, %zu bytes, offset %u",
+ pr_info("%u: bad csum, %zu bytes, offset %u\n",
bucket_index, bytes, offset);
return ret;
}
@@ -190,7 +190,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
uint64_t seq;
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
- pr_debug("%u journal buckets", ca->sb.njournal_buckets);
+ pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
/*
* Read journal buckets ordered by golden ratio hash to quickly
@@ -215,7 +215,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
* If that fails, check all the buckets we haven't checked
* already
*/
- pr_debug("falling back to linear search");
+ pr_debug("falling back to linear search\n");
for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
l < ca->sb.njournal_buckets;
@@ -233,7 +233,7 @@ bsearch:
/* Binary search */
m = l;
r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
- pr_debug("starting binary search, l %u r %u", l, r);
+ pr_debug("starting binary search, l %u r %u\n", l, r);
while (l + 1 < r) {
seq = list_entry(list->prev, struct journal_replay,
@@ -253,7 +253,7 @@ bsearch:
* Read buckets in reverse order until we stop finding more
* journal entries
*/
- pr_debug("finishing up: m %u njournal_buckets %u",
+ pr_debug("finishing up: m %u njournal_buckets %u\n",
m, ca->sb.njournal_buckets);
l = m;
@@ -370,10 +370,10 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
if (n != i->j.seq) {
if (n == start && is_discard_enabled(s))
- pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
+ pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
n, i->j.seq - 1, start, end);
else {
- pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+ pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
n, i->j.seq - 1, start, end);
ret = -EIO;
goto err;
@@ -403,7 +403,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
entries++;
}
- pr_info("journal replay done, %i keys in %i entries, seq %llu",
+ pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
keys, entries, end);
err:
while (!list_empty(list)) {
@@ -481,7 +481,7 @@ static void btree_flush_write(struct cache_set *c)
break;
if (btree_node_journal_flush(b))
- pr_err("BUG: flush_write bit should not be set here!");
+ pr_err("BUG: flush_write bit should not be set here!\n");
mutex_lock(&b->write_lock);
@@ -534,13 +534,13 @@ static void btree_flush_write(struct cache_set *c)
for (i = 0; i < nr; i++) {
b = btree_nodes[i];
if (!b) {
- pr_err("BUG: btree_nodes[%d] is NULL", i);
+ pr_err("BUG: btree_nodes[%d] is NULL\n", i);
continue;
}
/* safe to check without holding b->write_lock */
if (!btree_node_journal_flush(b)) {
- pr_err("BUG: bnode %p: journal_flush bit cleaned", b);
+ pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b);
continue;
}
@@ -548,14 +548,14 @@ static void btree_flush_write(struct cache_set *c)
if (!btree_current_write(b)->journal) {
clear_bit(BTREE_NODE_journal_flush, &b->flags);
mutex_unlock(&b->write_lock);
- pr_debug("bnode %p: written by others", b);
+ pr_debug("bnode %p: written by others\n", b);
continue;
}
if (!btree_node_dirty(b)) {
clear_bit(BTREE_NODE_journal_flush, &b->flags);
mutex_unlock(&b->write_lock);
- pr_debug("bnode %p: dirty bit cleaned by others", b);
+ pr_debug("bnode %p: dirty bit cleaned by others\n", b);
continue;
}
@@ -716,7 +716,7 @@ void bch_journal_next(struct journal *j)
j->cur->data->keys = 0;
if (fifo_full(&j->pin))
- pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
+ pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin));
}
static void journal_write_endio(struct bio *bio)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 71a90fbec314..7acf024e99f3 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -110,7 +110,7 @@ static void bch_data_invalidate(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio;
- pr_debug("invalidating %i sectors from %llu",
+ pr_debug("invalidating %i sectors from %llu\n",
bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) {
@@ -396,7 +396,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
- pr_debug("skipping unaligned io");
+ pr_debug("skipping unaligned io\n");
goto skip;
}
@@ -650,7 +650,7 @@ static void backing_request_endio(struct bio *bio)
*/
if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) {
- pr_err("Can't flush %s: returned bi_status %i",
+ pr_err("Can't flush %s: returned bi_status %i\n",
dc->backing_dev_name, bio->bi_status);
} else {
/* set to orig_bio->bi_status in bio_complete() */
@@ -668,9 +668,7 @@ static void backing_request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
- &s->d->disk->part0, s->start_time);
-
+ bio_end_io_acct(s->orig_bio, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
s->orig_bio->bi_status = s->iop.status;
bio_endio(s->orig_bio);
@@ -730,7 +728,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->recoverable = 1;
s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
- s->start_time = jiffies;
+ s->start_time = bio_start_io_acct(bio);
s->iop.c = d->c;
s->iop.bio = NULL;
@@ -1082,8 +1080,7 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io = ddip->bi_end_io;
bio->bi_private = ddip->bi_private;
- generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
- &ddip->d->disk->part0, ddip->start_time);
+ bio_end_io_acct(bio, ddip->start_time);
if (bio->bi_status) {
struct cached_dev *dc = container_of(ddip->d,
@@ -1108,7 +1105,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
*/
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
ddip->d = d;
- ddip->start_time = jiffies;
+ ddip->start_time = bio_start_io_acct(bio);
ddip->bi_end_io = bio->bi_end_io;
ddip->bi_private = bio->bi_private;
bio->bi_end_io = detached_dev_end_io;
@@ -1190,11 +1187,6 @@ blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
}
}
- generic_start_io_acct(q,
- bio_op(bio),
- bio_sectors(bio),
- &d->disk->part0);
-
bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
@@ -1311,8 +1303,6 @@ blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
- generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
-
s = search_alloc(bio, d);
cl = &s->cl;
bio = &s->bio.bio;
@@ -1372,7 +1362,6 @@ void bch_flash_dev_request_init(struct bcache_device *d)
{
struct gendisk *g = d->disk;
- g->queue->make_request_fn = flash_dev_make_request;
g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index d98354fa28e3..f9975c22bf7e 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -89,7 +89,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
sb->d[i] = le64_to_cpu(s->d[i]);
- pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
+ pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n",
sb->version, sb->flags, sb->seq, sb->keys);
err = "Not a bcache superblock (bad offset)";
@@ -234,7 +234,7 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
out->csum = csum_set(out);
- pr_debug("ver %llu, flags %llu, seq %llu",
+ pr_debug("ver %llu, flags %llu, seq %llu\n",
sb->version, sb->flags, sb->seq);
submit_bio(bio);
@@ -365,11 +365,11 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
}
bch_extent_to_text(buf, sizeof(buf), k);
- pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
+ pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
if (!bch_is_zero(u->uuid, 16))
- pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
+ pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n",
u - c->uuids, u->uuid, u->label,
u->first_reg, u->last_reg, u->invalidated);
@@ -534,7 +534,7 @@ int bch_prio_write(struct cache *ca, bool wait)
struct bucket *b;
struct closure cl;
- pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu",
+ pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n",
fifo_used(&ca->free[RESERVE_PRIO]),
fifo_used(&ca->free[RESERVE_NONE]),
fifo_used(&ca->free_inc));
@@ -629,12 +629,12 @@ static int prio_read(struct cache *ca, uint64_t bucket)
if (p->csum !=
bch_crc64(&p->magic, bucket_bytes(ca) - 8)) {
- pr_warn("bad csum reading priorities");
+ pr_warn("bad csum reading priorities\n");
goto out;
}
if (p->magic != pset_magic(&ca->sb)) {
- pr_warn("bad magic reading priorities");
+ pr_warn("bad magic reading priorities\n");
goto out;
}
@@ -728,11 +728,11 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
if (ret < 0)
- pr_err("Couldn't create device -> cache set symlink");
+ pr_err("Couldn't create device -> cache set symlink\n");
ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
if (ret < 0)
- pr_err("Couldn't create cache set -> device symlink");
+ pr_err("Couldn't create cache set -> device symlink\n");
clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
}
@@ -789,15 +789,17 @@ static void bcache_device_free(struct bcache_device *d)
lockdep_assert_held(&bch_register_lock);
if (disk)
- pr_info("%s stopped", disk->disk_name);
+ pr_info("%s stopped\n", disk->disk_name);
else
- pr_err("bcache device (NULL gendisk) stopped");
+ pr_err("bcache device (NULL gendisk) stopped\n");
if (d->c)
bcache_device_detach(d);
if (disk) {
- if (disk->flags & GENHD_FL_UP)
+ bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
+
+ if (disk_added)
del_gendisk(disk);
if (disk->queue)
@@ -805,7 +807,8 @@ static void bcache_device_free(struct bcache_device *d)
ida_simple_remove(&bcache_device_idx,
first_minor_to_idx(disk->first_minor));
- put_disk(disk);
+ if (disk_added)
+ put_disk(disk);
}
bioset_exit(&d->bio_split);
@@ -830,7 +833,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
if (!d->nr_stripes || d->nr_stripes > max_stripes) {
- pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
+ pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)\n",
(unsigned int)d->nr_stripes);
return -ENOMEM;
}
@@ -928,11 +931,11 @@ static int cached_dev_status_update(void *arg)
dc->offline_seconds = 0;
if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
- pr_err("%s: device offline for %d seconds",
+ pr_err("%s: device offline for %d seconds\n",
dc->backing_dev_name,
BACKING_DEV_OFFLINE_TIMEOUT);
- pr_err("%s: disable I/O request due to backing "
- "device offline", dc->disk.name);
+ pr_err("%s: disable I/O request due to backing device offline\n",
+ dc->disk.name);
dc->io_disable = true;
/* let others know earlier that io_disable is true */
smp_mb();
@@ -959,7 +962,7 @@ int bch_cached_dev_run(struct cached_dev *dc)
};
if (dc->io_disable) {
- pr_err("I/O disabled on cached dev %s",
+ pr_err("I/O disabled on cached dev %s\n",
dc->backing_dev_name);
kfree(env[1]);
kfree(env[2]);
@@ -971,7 +974,7 @@ int bch_cached_dev_run(struct cached_dev *dc)
kfree(env[1]);
kfree(env[2]);
kfree(buf);
- pr_info("cached dev %s is running already",
+ pr_info("cached dev %s is running already\n",
dc->backing_dev_name);
return -EBUSY;
}
@@ -1001,16 +1004,14 @@ int bch_cached_dev_run(struct cached_dev *dc)
if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
sysfs_create_link(&disk_to_dev(d->disk)->kobj,
&d->kobj, "bcache")) {
- pr_err("Couldn't create bcache dev <-> disk sysfs symlinks");
+ pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n");
return -ENOMEM;
}
dc->status_update_thread = kthread_run(cached_dev_status_update,
dc, "bcache_status_update");
if (IS_ERR(dc->status_update_thread)) {
- pr_warn("failed to create bcache_status_update kthread, "
- "continue to run without monitoring backing "
- "device status");
+ pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n");
}
return 0;
@@ -1036,7 +1037,7 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
} while (time_out > 0);
if (time_out == 0)
- pr_warn("give up waiting for dc->writeback_write_update to quit");
+ pr_warn("give up waiting for dc->writeback_write_update to quit\n");
cancel_delayed_work_sync(&dc->writeback_rate_update);
}
@@ -1077,7 +1078,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_unlock(&bch_register_lock);
- pr_info("Caching disabled for %s", dc->backing_dev_name);
+ pr_info("Caching disabled for %s\n", dc->backing_dev_name);
/* Drop ref we took in cached_dev_detach() */
closure_put(&dc->disk.cl);
@@ -1117,20 +1118,20 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -ENOENT;
if (dc->disk.c) {
- pr_err("Can't attach %s: already attached",
+ pr_err("Can't attach %s: already attached\n",
dc->backing_dev_name);
return -EINVAL;
}
if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
- pr_err("Can't attach %s: shutting down",
+ pr_err("Can't attach %s: shutting down\n",
dc->backing_dev_name);
return -EINVAL;
}
if (dc->sb.block_size < c->sb.block_size) {
/* Will die */
- pr_err("Couldn't attach %s: block size less than set's block size",
+ pr_err("Couldn't attach %s: block size less than set's block size\n",
dc->backing_dev_name);
return -EINVAL;
}
@@ -1138,7 +1139,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
/* Check whether already attached */
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
- pr_err("Tried to attach %s but duplicate UUID already attached",
+ pr_err("Tried to attach %s but duplicate UUID already attached\n",
dc->backing_dev_name);
return -EINVAL;
@@ -1157,14 +1158,14 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
if (!u) {
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- pr_err("Couldn't find uuid for %s in set",
+ pr_err("Couldn't find uuid for %s in set\n",
dc->backing_dev_name);
return -ENOENT;
}
u = uuid_find_empty(c);
if (!u) {
- pr_err("Not caching %s, no room for UUID",
+ pr_err("Not caching %s, no room for UUID\n",
dc->backing_dev_name);
return -EINVAL;
}
@@ -1210,7 +1211,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
down_write(&dc->writeback_lock);
if (bch_cached_dev_writeback_start(dc)) {
up_write(&dc->writeback_lock);
- pr_err("Couldn't start writeback facilities for %s",
+ pr_err("Couldn't start writeback facilities for %s\n",
dc->disk.disk->disk_name);
return -ENOMEM;
}
@@ -1233,7 +1234,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
*/
kthread_stop(dc->writeback_thread);
cancel_writeback_rate_update_dwork(dc);
- pr_err("Couldn't run cached device %s",
+ pr_err("Couldn't run cached device %s\n",
dc->backing_dev_name);
return ret;
}
@@ -1244,7 +1245,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
- pr_info("Caching %s as %s on set %pU",
+ pr_info("Caching %s as %s on set %pU\n",
dc->backing_dev_name,
dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid);
@@ -1384,7 +1385,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
- pr_info("registered backing device %s", dc->backing_dev_name);
+ pr_info("registered backing device %s\n", dc->backing_dev_name);
list_add(&dc->list, &uncached_devices);
/* attach to a matched cache set if it exists */
@@ -1401,7 +1402,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
return 0;
err:
- pr_notice("error %s: %s", dc->backing_dev_name, err);
+ pr_notice("error %s: %s\n", dc->backing_dev_name, err);
bcache_device_stop(&dc->disk);
return ret;
}
@@ -1497,7 +1498,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
u = uuid_find_empty(c);
if (!u) {
- pr_err("Can't create volume, no room for UUID");
+ pr_err("Can't create volume, no room for UUID\n");
return -EINVAL;
}
@@ -1523,7 +1524,7 @@ bool bch_cached_dev_error(struct cached_dev *dc)
smp_mb();
pr_err("stop %s: too many IO errors on backing device %s\n",
- dc->disk.disk->disk_name, dc->backing_dev_name);
+ dc->disk.disk->disk_name, dc->backing_dev_name);
bcache_device_stop(&dc->disk);
return true;
@@ -1534,6 +1535,7 @@ bool bch_cached_dev_error(struct cached_dev *dc)
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (c->on_error != ON_ERROR_PANIC &&
@@ -1541,20 +1543,22 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
return false;
if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
- pr_info("CACHE_SET_IO_DISABLE already set");
+ pr_info("CACHE_SET_IO_DISABLE already set\n");
/*
* XXX: we can be called from atomic context
* acquire_console_sem();
*/
- pr_err("bcache: error on %pU: ", c->sb.set_uuid);
-
va_start(args, fmt);
- vprintk(fmt, args);
- va_end(args);
- pr_err(", disabling caching\n");
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_err("error on %pU: %pV, disabling caching\n",
+ c->sb.set_uuid, &vaf);
+
+ va_end(args);
if (c->on_error == ON_ERROR_PANIC)
panic("panic forced after error\n");
@@ -1606,7 +1610,7 @@ static void cache_set_free(struct closure *cl)
list_del(&c->list);
mutex_unlock(&bch_register_lock);
- pr_info("Cache set %pU unregistered", c->sb.set_uuid);
+ pr_info("Cache set %pU unregistered\n", c->sb.set_uuid);
wake_up(&unregister_wait);
closure_debug_destroy(&c->cl);
@@ -1677,7 +1681,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
struct cached_dev *dc)
{
if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
- pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.",
+ pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
d->disk->disk_name, c->sb.set_uuid);
bcache_device_stop(d);
} else if (atomic_read(&dc->has_dirty)) {
@@ -1685,7 +1689,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
* dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
* and dc->has_dirty == 1
*/
- pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
+ pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n",
d->disk->disk_name);
/*
* There might be a small time gap that cache set is
@@ -1707,7 +1711,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
* dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
* and dc->has_dirty == 0
*/
- pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.",
+ pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n",
d->disk->disk_name);
}
}
@@ -1874,7 +1878,7 @@ static int run_cache_set(struct cache_set *c)
if (bch_journal_read(c, &journal))
goto err;
- pr_debug("btree_journal_read() done");
+ pr_debug("btree_journal_read() done\n");
err = "no journal entries found";
if (list_empty(&journal))
@@ -1920,7 +1924,7 @@ static int run_cache_set(struct cache_set *c)
bch_journal_mark(c, &journal);
bch_initial_gc_finish(c);
- pr_debug("btree_check() done");
+ pr_debug("btree_check() done\n");
/*
* bcache_journal_next() can't happen sooner, or
@@ -1951,7 +1955,7 @@ static int run_cache_set(struct cache_set *c)
if (bch_journal_replay(c, &journal))
goto err;
} else {
- pr_notice("invalidating existing data");
+ pr_notice("invalidating existing data\n");
for_each_cache(ca, c, i) {
unsigned int j;
@@ -2085,7 +2089,7 @@ found:
memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
c->sb.flags = ca->sb.flags;
c->sb.seq = ca->sb.seq;
- pr_debug("set version = %llu", c->sb.version);
+ pr_debug("set version = %llu\n", c->sb.version);
}
kobject_get(&ca->kobj);
@@ -2247,7 +2251,7 @@ err_btree_alloc:
err_free:
module_put(THIS_MODULE);
if (err)
- pr_notice("error %s: %s", ca->cache_dev_name, err);
+ pr_notice("error %s: %s\n", ca->cache_dev_name, err);
return ret;
}
@@ -2301,14 +2305,14 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
- pr_info("registered cache device %s", ca->cache_dev_name);
+ pr_info("registered cache device %s\n", ca->cache_dev_name);
out:
kobject_put(&ca->kobj);
err:
if (err)
- pr_notice("error %s: %s", ca->cache_dev_name, err);
+ pr_notice("error %s: %s\n", ca->cache_dev_name, err);
return ret;
}
@@ -2323,6 +2327,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
kobj_attribute_write(register, register_bcache);
kobj_attribute_write(register_quiet, register_bcache);
+kobj_attribute_write(register_async, register_bcache);
kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
static bool bch_is_open_backing(struct block_device *bdev)
@@ -2358,6 +2363,83 @@ static bool bch_is_open(struct block_device *bdev)
return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
}
+struct async_reg_args {
+ struct work_struct reg_work;
+ char *path;
+ struct cache_sb *sb;
+ struct cache_sb_disk *sb_disk;
+ struct block_device *bdev;
+};
+
+static void register_bdev_worker(struct work_struct *work)
+{
+ int fail = false;
+ struct async_reg_args *args =
+ container_of(work, struct async_reg_args, reg_work);
+ struct cached_dev *dc;
+
+ dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ if (!dc) {
+ fail = true;
+ put_page(virt_to_page(args->sb_disk));
+ blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ goto out;
+ }
+
+ mutex_lock(&bch_register_lock);
+ if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0)
+ fail = true;
+ mutex_unlock(&bch_register_lock);
+
+out:
+ if (fail)
+ pr_info("error %s: fail to register backing device\n",
+ args->path);
+ kfree(args->sb);
+ kfree(args->path);
+ kfree(args);
+ module_put(THIS_MODULE);
+}
+
+static void register_cache_worker(struct work_struct *work)
+{
+ int fail = false;
+ struct async_reg_args *args =
+ container_of(work, struct async_reg_args, reg_work);
+ struct cache *ca;
+
+ ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ if (!ca) {
+ fail = true;
+ put_page(virt_to_page(args->sb_disk));
+ blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ goto out;
+ }
+
+ /* blkdev_put() will be called in bch_cache_release() */
+ if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0)
+ fail = true;
+
+out:
+ if (fail)
+ pr_info("error %s: fail to register cache device\n",
+ args->path);
+ kfree(args->sb);
+ kfree(args->path);
+ kfree(args);
+ module_put(THIS_MODULE);
+}
+
+static void register_device_aync(struct async_reg_args *args)
+{
+ if (SB_IS_BDEV(args->sb))
+ INIT_WORK(&args->reg_work, register_bdev_worker);
+ else
+ INIT_WORK(&args->reg_work, register_cache_worker);
+
+ queue_work(system_wq, &args->reg_work);
+}
+
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
@@ -2420,6 +2502,26 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
goto out_blkdev_put;
err = "failed to register device";
+ if (attr == &ksysfs_register_async) {
+ /* register in asynchronous way */
+ struct async_reg_args *args =
+ kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
+
+ if (!args) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
+ goto out_put_sb_page;
+ }
+
+ args->path = path;
+ args->sb = sb;
+ args->sb_disk = sb_disk;
+ args->bdev = bdev;
+ register_device_aync(args);
+ /* No wait and returns to user space */
+ goto async_done;
+ }
+
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
@@ -2447,6 +2549,7 @@ done:
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
+async_done:
return size;
out_put_sb_page:
@@ -2461,7 +2564,7 @@ out_free_path:
out_module_put:
module_put(THIS_MODULE);
out:
- pr_info("error %s: %s", path?path:"", err);
+ pr_info("error %s: %s\n", path?path:"", err);
return ret;
}
@@ -2506,7 +2609,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
mutex_unlock(&bch_register_lock);
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
- pr_info("delete pdev %p", pdev);
+ pr_info("delete pdev %p\n", pdev);
list_del(&pdev->list);
bcache_device_stop(&pdev->dc->disk);
kfree(pdev);
@@ -2549,7 +2652,7 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
mutex_unlock(&bch_register_lock);
- pr_info("Stopping all devices:");
+ pr_info("Stopping all devices:\n");
/*
* The reason bch_register_lock is not held to call
@@ -2599,9 +2702,9 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
finish_wait(&unregister_wait, &wait);
if (stopped)
- pr_info("All devices stopped");
+ pr_info("All devices stopped\n");
else
- pr_notice("Timeout waiting for devices to be closed");
+ pr_notice("Timeout waiting for devices to be closed\n");
out:
mutex_unlock(&bch_register_lock);
}
@@ -2637,7 +2740,7 @@ static void check_module_parameters(void)
if (bch_cutoff_writeback_sync == 0)
bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
- pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u",
+ pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n",
bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
}
@@ -2645,13 +2748,13 @@ static void check_module_parameters(void)
if (bch_cutoff_writeback == 0)
bch_cutoff_writeback = CUTOFF_WRITEBACK;
else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
- pr_warn("set bch_cutoff_writeback (%u) to max value %u",
+ pr_warn("set bch_cutoff_writeback (%u) to max value %u\n",
bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
}
if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
- pr_warn("set bch_cutoff_writeback (%u) to %u",
+ pr_warn("set bch_cutoff_writeback (%u) to %u\n",
bch_cutoff_writeback, bch_cutoff_writeback_sync);
bch_cutoff_writeback = bch_cutoff_writeback_sync;
}
@@ -2662,6 +2765,9 @@ static int __init bcache_init(void)
static const struct attribute *files[] = {
&ksysfs_register.attr,
&ksysfs_register_quiet.attr,
+#ifdef CONFIG_BCACHE_ASYNC_REGISTRAION
+ &ksysfs_register_async.attr,
+#endif
&ksysfs_pendings_cleanup.attr,
NULL
};
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 323276994aab..0dadec5a78f6 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -421,7 +421,7 @@ STORE(__cached_dev)
return size;
}
if (v == -ENOENT)
- pr_err("Can't attach %s: cache set not found", buf);
+ pr_err("Can't attach %s: cache set not found\n", buf);
return v;
}
@@ -455,7 +455,7 @@ STORE(bch_cached_dev)
*/
if (dc->writeback_running) {
dc->writeback_running = false;
- pr_err("%s: failed to run non-existent writeback thread",
+ pr_err("%s: failed to run non-existent writeback thread\n",
dc->disk.disk->disk_name);
}
} else
@@ -872,11 +872,11 @@ STORE(__bch_cache_set)
if (v) {
if (test_and_set_bit(CACHE_SET_IO_DISABLE,
&c->flags))
- pr_warn("CACHE_SET_IO_DISABLE already set");
+ pr_warn("CACHE_SET_IO_DISABLE already set\n");
} else {
if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
&c->flags))
- pr_warn("CACHE_SET_IO_DISABLE already cleared");
+ pr_warn("CACHE_SET_IO_DISABLE already cleared\n");
}
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 3f7641fb28d5..1cf1e5016cb9 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -809,7 +809,7 @@ static int bch_root_node_dirty_init(struct cache_set *c,
schedule_timeout_interruptible(
msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
else if (ret < 0) {
- pr_warn("sectors dirty init failed, ret=%d!", ret);
+ pr_warn("sectors dirty init failed, ret=%d!\n", ret);
break;
}
} while (ret == -EAGAIN);
@@ -917,7 +917,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
if (!state) {
- pr_warn("sectors dirty init failed: cannot allocate memory");
+ pr_warn("sectors dirty init failed: cannot allocate memory\n");
return;
}
@@ -945,7 +945,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
&state->infos[i],
name);
if (IS_ERR(state->infos[i].thread)) {
- pr_err("fails to run thread bch_dirty_init[%d]", i);
+ pr_err("fails to run thread bch_dirty_init[%d]\n", i);
for (--i; i >= 0; i--)
kthread_stop(state->infos[i].thread);
goto out;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 2d519c223562..6d1565021d74 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -256,12 +256,35 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
if (b->block == block)
return b;
- n = (b->block < block) ? n->rb_left : n->rb_right;
+ n = block < b->block ? n->rb_left : n->rb_right;
}
return NULL;
}
+static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
+{
+ struct rb_node *n = c->buffer_tree.rb_node;
+ struct dm_buffer *b;
+ struct dm_buffer *best = NULL;
+
+ while (n) {
+ b = container_of(n, struct dm_buffer, node);
+
+ if (b->block == block)
+ return b;
+
+ if (block <= b->block) {
+ n = n->rb_left;
+ best = b;
+ } else {
+ n = n->rb_right;
+ }
+ }
+
+ return best;
+}
+
static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
{
struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
@@ -276,8 +299,8 @@ static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
}
parent = *new;
- new = (found->block < b->block) ?
- &((*new)->rb_left) : &((*new)->rb_right);
+ new = b->block < found->block ?
+ &found->node.rb_left : &found->node.rb_right;
}
rb_link_node(&b->node, parent, new);
@@ -400,13 +423,13 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
*/
if (gfp_mask & __GFP_NORETRY) {
unsigned noio_flag = memalloc_noio_save();
- void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+ void *ptr = __vmalloc(c->block_size, gfp_mask);
memalloc_noio_restore(noio_flag);
return ptr;
}
- return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+ return __vmalloc(c->block_size, gfp_mask);
}
/*
@@ -631,6 +654,19 @@ dmio:
submit_bio(bio);
}
+static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
+{
+ sector_t sector;
+
+ if (likely(c->sectors_per_block_bits >= 0))
+ sector = block << c->sectors_per_block_bits;
+ else
+ sector = block * (c->block_size >> SECTOR_SHIFT);
+ sector += c->start;
+
+ return sector;
+}
+
static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned n_sectors;
@@ -639,11 +675,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
b->end_io = end_io;
- if (likely(b->c->sectors_per_block_bits >= 0))
- sector = b->block << b->c->sectors_per_block_bits;
- else
- sector = b->block * (b->c->block_size >> SECTOR_SHIFT);
- sector += b->c->start;
+ sector = block_to_sector(b->c, b->block);
if (rw != REQ_OP_WRITE) {
n_sectors = b->c->block_size >> SECTOR_SHIFT;
@@ -1326,6 +1358,30 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
/*
+ * Use dm-io to send a discard request to flush the device.
+ */
+int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
+{
+ struct dm_io_request io_req = {
+ .bi_op = REQ_OP_DISCARD,
+ .bi_op_flags = REQ_SYNC,
+ .mem.type = DM_IO_KMEM,
+ .mem.ptr.addr = NULL,
+ .client = c->dm_io,
+ };
+ struct dm_io_region io_reg = {
+ .bdev = c->bdev,
+ .sector = block_to_sector(c, block),
+ .count = block_to_sector(c, count),
+ };
+
+ BUG_ON(dm_bufio_in_request());
+
+ return dm_io(&io_req, 1, &io_reg, NULL);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
+
+/*
* We first delete any other buffer that may be at that new location.
*
* Then, we write the buffer to the original location if it was dirty.
@@ -1401,6 +1457,14 @@ retry:
}
EXPORT_SYMBOL_GPL(dm_bufio_release_move);
+static void forget_buffer_locked(struct dm_buffer *b)
+{
+ if (likely(!b->hold_count) && likely(!b->state)) {
+ __unlink_buffer(b);
+ __free_buffer_wake(b);
+ }
+}
+
/*
* Free the given buffer.
*
@@ -1414,15 +1478,36 @@ void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
dm_bufio_lock(c);
b = __find(c, block);
- if (b && likely(!b->hold_count) && likely(!b->state)) {
- __unlink_buffer(b);
- __free_buffer_wake(b);
- }
+ if (b)
+ forget_buffer_locked(b);
dm_bufio_unlock(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_forget);
+void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
+{
+ struct dm_buffer *b;
+ sector_t end_block = block + n_blocks;
+
+ while (block < end_block) {
+ dm_bufio_lock(c);
+
+ b = __find_next(c, block);
+ if (b) {
+ block = b->block + 1;
+ forget_buffer_locked(b);
+ }
+
+ dm_bufio_unlock(c);
+
+ if (!b)
+ break;
+ }
+
+}
+EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
+
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
{
c->minimum_buffers = n;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3df90daba89e..000ddfab5ba0 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -34,7 +34,9 @@
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
+#include <linux/key-type.h>
#include <keys/user-type.h>
+#include <keys/encrypted-type.h>
#include <linux/device-mapper.h>
@@ -212,7 +214,7 @@ struct crypt_config {
struct mutex bio_alloc_lock;
u8 *authenc_key; /* space for keys in authenc() format (if used) */
- u8 key[0];
+ u8 key[];
};
#define MIN_IOS 64
@@ -2215,12 +2217,47 @@ static bool contains_whitespace(const char *str)
return false;
}
+static int set_key_user(struct crypt_config *cc, struct key *key)
+{
+ const struct user_key_payload *ukp;
+
+ ukp = user_key_payload_locked(key);
+ if (!ukp)
+ return -EKEYREVOKED;
+
+ if (cc->key_size != ukp->datalen)
+ return -EINVAL;
+
+ memcpy(cc->key, ukp->data, cc->key_size);
+
+ return 0;
+}
+
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
+static int set_key_encrypted(struct crypt_config *cc, struct key *key)
+{
+ const struct encrypted_key_payload *ekp;
+
+ ekp = key->payload.data[0];
+ if (!ekp)
+ return -EKEYREVOKED;
+
+ if (cc->key_size != ekp->decrypted_datalen)
+ return -EINVAL;
+
+ memcpy(cc->key, ekp->decrypted_data, cc->key_size);
+
+ return 0;
+}
+#endif /* CONFIG_ENCRYPTED_KEYS */
+
static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
{
char *new_key_string, *key_desc;
int ret;
+ struct key_type *type;
struct key *key;
- const struct user_key_payload *ukp;
+ int (*set_key)(struct crypt_config *cc, struct key *key);
/*
* Reject key_string with whitespace. dm core currently lacks code for
@@ -2236,16 +2273,26 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
return -EINVAL;
- if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
- strncmp(key_string, "user:", key_desc - key_string + 1))
+ if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
+ type = &key_type_logon;
+ set_key = set_key_user;
+ } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
+ type = &key_type_user;
+ set_key = set_key_user;
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
+ } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
+ type = &key_type_encrypted;
+ set_key = set_key_encrypted;
+#endif
+ } else {
return -EINVAL;
+ }
new_key_string = kstrdup(key_string, GFP_KERNEL);
if (!new_key_string)
return -ENOMEM;
- key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
- key_desc + 1, NULL);
+ key = request_key(type, key_desc + 1, NULL);
if (IS_ERR(key)) {
kzfree(new_key_string);
return PTR_ERR(key);
@@ -2253,23 +2300,14 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
down_read(&key->sem);
- ukp = user_key_payload_locked(key);
- if (!ukp) {
- up_read(&key->sem);
- key_put(key);
- kzfree(new_key_string);
- return -EKEYREVOKED;
- }
-
- if (cc->key_size != ukp->datalen) {
+ ret = set_key(cc, key);
+ if (ret < 0) {
up_read(&key->sem);
key_put(key);
kzfree(new_key_string);
- return -EINVAL;
+ return ret;
}
- memcpy(cc->key, ukp->data, cc->key_size);
-
up_read(&key->sem);
key_put(key);
@@ -2323,7 +2361,7 @@ static int get_key_size(char **key_string)
return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
}
-#endif
+#endif /* CONFIG_KEYS */
static int crypt_set_key(struct crypt_config *cc, char *key)
{
@@ -3274,7 +3312,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
- max_t(unsigned short, limits->logical_block_size, cc->sector_size);
+ max_t(unsigned, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
@@ -3282,7 +3320,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 20, 0},
+ .version = {1, 21, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
new file mode 100644
index 000000000000..44451276f128
--- /dev/null
+++ b/drivers/md/dm-ebs-target.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2020 Red Hat GmbH
+ *
+ * This file is released under the GPL.
+ *
+ * Device-mapper target to emulate smaller logical block
+ * size on backing devices exposing (natively) larger ones.
+ *
+ * E.g. 512 byte sector emulation on 4K native disks.
+ */
+
+#include "dm.h"
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/dm-bufio.h>
+
+#define DM_MSG_PREFIX "ebs"
+
+static void ebs_dtr(struct dm_target *ti);
+
+/* Emulated block size context. */
+struct ebs_c {
+ struct dm_dev *dev; /* Underlying device to emulate block size on. */
+ struct dm_bufio_client *bufio; /* Use dm-bufio for read and read-modify-write processing. */
+ struct workqueue_struct *wq; /* Workqueue for ^ processing of bios. */
+ struct work_struct ws; /* Work item used for ^. */
+ struct bio_list bios_in; /* Worker bios input list. */
+ spinlock_t lock; /* Guard bios input list above. */
+ sector_t start; /* <start> table line argument, see ebs_ctr below. */
+ unsigned int e_bs; /* Emulated block size in sectors exposed to upper layer. */
+ unsigned int u_bs; /* Underlying block size in sectors retrievd from/set on lower layer device. */
+ unsigned char block_shift; /* bitshift sectors -> blocks used in dm-bufio API. */
+ bool u_bs_set:1; /* Flag to indicate underlying block size is set on table line. */
+};
+
+static inline sector_t __sector_to_block(struct ebs_c *ec, sector_t sector)
+{
+ return sector >> ec->block_shift;
+}
+
+static inline sector_t __block_mod(sector_t sector, unsigned int bs)
+{
+ return sector & (bs - 1);
+}
+
+/* Return number of blocks for a bio, accounting for misalignement of start and end sectors. */
+static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
+{
+ sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
+
+ return __sector_to_block(ec, end_sector) + (__block_mod(end_sector, ec->u_bs) ? 1 : 0);
+}
+
+static inline bool __ebs_check_bs(unsigned int bs)
+{
+ return bs && is_power_of_2(bs);
+}
+
+/*
+ * READ/WRITE:
+ *
+ * copy blocks between bufio blocks and bio vector's (partial/overlapping) pages.
+ */
+static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bvec_iter *iter)
+{
+ int r = 0;
+ unsigned char *ba, *pa;
+ unsigned int cur_len;
+ unsigned int bv_len = bv->bv_len;
+ unsigned int buf_off = to_bytes(__block_mod(iter->bi_sector, ec->u_bs));
+ sector_t block = __sector_to_block(ec, iter->bi_sector);
+ struct dm_buffer *b;
+
+ if (unlikely(!bv->bv_page || !bv_len))
+ return -EIO;
+
+ pa = page_address(bv->bv_page) + bv->bv_offset;
+
+ /* Handle overlapping page <-> blocks */
+ while (bv_len) {
+ cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len);
+
+ /* Avoid reading for writes in case bio vector's page overwrites block completely. */
+ if (rw == READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
+ ba = dm_bufio_read(ec->bufio, block, &b);
+ else
+ ba = dm_bufio_new(ec->bufio, block, &b);
+
+ if (unlikely(IS_ERR(ba))) {
+ /*
+ * Carry on with next buffer, if any, to issue all possible
+ * data but return error.
+ */
+ r = PTR_ERR(ba);
+ } else {
+ /* Copy data to/from bio to buffer if read/new was successful above. */
+ ba += buf_off;
+ if (rw == READ) {
+ memcpy(pa, ba, cur_len);
+ flush_dcache_page(bv->bv_page);
+ } else {
+ flush_dcache_page(bv->bv_page);
+ memcpy(ba, pa, cur_len);
+ dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len);
+ }
+
+ dm_bufio_release(b);
+ }
+
+ pa += cur_len;
+ bv_len -= cur_len;
+ buf_off = 0;
+ block++;
+ }
+
+ return r;
+}
+
+/* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */
+static int __ebs_rw_bio(struct ebs_c *ec, int rw, struct bio *bio)
+{
+ int r = 0, rr;
+ struct bio_vec bv;
+ struct bvec_iter iter;
+
+ bio_for_each_bvec(bv, bio, iter) {
+ rr = __ebs_rw_bvec(ec, rw, &bv, &iter);
+ if (rr)
+ r = rr;
+ }
+
+ return r;
+}
+
+/*
+ * Discard bio's blocks, i.e. pass discards down.
+ *
+ * Avoid discarding partial blocks at beginning and end;
+ * return 0 in case no blocks can be discarded as a result.
+ */
+static int __ebs_discard_bio(struct ebs_c *ec, struct bio *bio)
+{
+ sector_t block, blocks, sector = bio->bi_iter.bi_sector;
+
+ block = __sector_to_block(ec, sector);
+ blocks = __nr_blocks(ec, bio);
+
+ /*
+ * Partial first underlying block (__nr_blocks() may have
+ * resulted in one block).
+ */
+ if (__block_mod(sector, ec->u_bs)) {
+ block++;
+ blocks--;
+ }
+
+ /* Partial last underlying block if any. */
+ if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs))
+ blocks--;
+
+ return blocks ? dm_bufio_issue_discard(ec->bufio, block, blocks) : 0;
+}
+
+/* Release blocks them from the bufio cache. */
+static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
+{
+ sector_t blocks, sector = bio->bi_iter.bi_sector;
+
+ blocks = __nr_blocks(ec, bio);
+
+ dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
+}
+
+/* Worker funtion to process incoming bios. */
+static void __ebs_process_bios(struct work_struct *ws)
+{
+ int r;
+ bool write = false;
+ sector_t block1, block2;
+ struct ebs_c *ec = container_of(ws, struct ebs_c, ws);
+ struct bio *bio;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
+
+ spin_lock_irq(&ec->lock);
+ bios = ec->bios_in;
+ bio_list_init(&ec->bios_in);
+ spin_unlock_irq(&ec->lock);
+
+ /* Prefetch all read and any mis-aligned write buffers */
+ bio_list_for_each(bio, &bios) {
+ block1 = __sector_to_block(ec, bio->bi_iter.bi_sector);
+ if (bio_op(bio) == REQ_OP_READ)
+ dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio));
+ else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
+ block2 = __sector_to_block(ec, bio_end_sector(bio));
+ if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
+ dm_bufio_prefetch(ec->bufio, block1, 1);
+ if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
+ dm_bufio_prefetch(ec->bufio, block2, 1);
+ }
+ }
+
+ bio_list_for_each(bio, &bios) {
+ r = -EIO;
+ if (bio_op(bio) == REQ_OP_READ)
+ r = __ebs_rw_bio(ec, READ, bio);
+ else if (bio_op(bio) == REQ_OP_WRITE) {
+ write = true;
+ r = __ebs_rw_bio(ec, WRITE, bio);
+ } else if (bio_op(bio) == REQ_OP_DISCARD) {
+ __ebs_forget_bio(ec, bio);
+ r = __ebs_discard_bio(ec, bio);
+ }
+
+ if (r < 0)
+ bio->bi_status = errno_to_blk_status(r);
+ }
+
+ /*
+ * We write dirty buffers after processing I/O on them
+ * but before we endio thus addressing REQ_FUA/REQ_SYNC.
+ */
+ r = write ? dm_bufio_write_dirty_buffers(ec->bufio) : 0;
+
+ while ((bio = bio_list_pop(&bios))) {
+ /* Any other request is endioed. */
+ if (unlikely(r && bio_op(bio) == REQ_OP_WRITE))
+ bio_io_error(bio);
+ else
+ bio_endio(bio);
+ }
+}
+
+/*
+ * Construct an emulated block size mapping: <dev_path> <offset> <ebs> [<ubs>]
+ *
+ * <dev_path>: path of the underlying device
+ * <offset>: offset in 512 bytes sectors into <dev_path>
+ * <ebs>: emulated block size in units of 512 bytes exposed to the upper layer
+ * [<ubs>]: underlying block size in units of 512 bytes imposed on the lower layer;
+ * optional, if not supplied, retrieve logical block size from underlying device
+ */
+static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ int r;
+ unsigned short tmp1;
+ unsigned long long tmp;
+ char dummy;
+ struct ebs_c *ec;
+
+ if (argc < 3 || argc > 4) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ ec = ti->private = kzalloc(sizeof(*ec), GFP_KERNEL);
+ if (!ec) {
+ ti->error = "Cannot allocate ebs context";
+ return -ENOMEM;
+ }
+
+ r = -EINVAL;
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 ||
+ tmp != (sector_t)tmp ||
+ (sector_t)tmp >= ti->len) {
+ ti->error = "Invalid device offset sector";
+ goto bad;
+ }
+ ec->start = tmp;
+
+ if (sscanf(argv[2], "%hu%c", &tmp1, &dummy) != 1 ||
+ !__ebs_check_bs(tmp1) ||
+ to_bytes(tmp1) > PAGE_SIZE) {
+ ti->error = "Invalid emulated block size";
+ goto bad;
+ }
+ ec->e_bs = tmp1;
+
+ if (argc > 3) {
+ if (sscanf(argv[3], "%hu%c", &tmp1, &dummy) != 1 || !__ebs_check_bs(tmp1)) {
+ ti->error = "Invalid underlying block size";
+ goto bad;
+ }
+ ec->u_bs = tmp1;
+ ec->u_bs_set = true;
+ } else
+ ec->u_bs_set = false;
+
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ec->dev);
+ if (r) {
+ ti->error = "Device lookup failed";
+ ec->dev = NULL;
+ goto bad;
+ }
+
+ r = -EINVAL;
+ if (!ec->u_bs_set) {
+ ec->u_bs = to_sector(bdev_logical_block_size(ec->dev->bdev));
+ if (!__ebs_check_bs(ec->u_bs)) {
+ ti->error = "Invalid retrieved underlying block size";
+ goto bad;
+ }
+ }
+
+ if (!ec->u_bs_set && ec->e_bs == ec->u_bs)
+ DMINFO("Emulation superfluous: emulated equal to underlying block size");
+
+ if (__block_mod(ec->start, ec->u_bs)) {
+ ti->error = "Device offset must be multiple of underlying block size";
+ goto bad;
+ }
+
+ ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1, 0, NULL, NULL);
+ if (IS_ERR(ec->bufio)) {
+ ti->error = "Cannot create dm bufio client";
+ r = PTR_ERR(ec->bufio);
+ ec->bufio = NULL;
+ goto bad;
+ }
+
+ ec->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!ec->wq) {
+ ti->error = "Cannot create dm-" DM_MSG_PREFIX " workqueue";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ ec->block_shift = __ffs(ec->u_bs);
+ INIT_WORK(&ec->ws, &__ebs_process_bios);
+ bio_list_init(&ec->bios_in);
+ spin_lock_init(&ec->lock);
+
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
+ ti->num_secure_erase_bios = 0;
+ ti->num_write_same_bios = 0;
+ ti->num_write_zeroes_bios = 0;
+ return 0;
+bad:
+ ebs_dtr(ti);
+ return r;
+}
+
+static void ebs_dtr(struct dm_target *ti)
+{
+ struct ebs_c *ec = ti->private;
+
+ if (ec->wq)
+ destroy_workqueue(ec->wq);
+ if (ec->bufio)
+ dm_bufio_client_destroy(ec->bufio);
+ if (ec->dev)
+ dm_put_device(ti, ec->dev);
+ kfree(ec);
+}
+
+static int ebs_map(struct dm_target *ti, struct bio *bio)
+{
+ struct ebs_c *ec = ti->private;
+
+ bio_set_dev(bio, ec->dev->bdev);
+ bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
+
+ if (unlikely(bio->bi_opf & REQ_OP_FLUSH))
+ return DM_MAPIO_REMAPPED;
+ /*
+ * Only queue for bufio processing in case of partial or overlapping buffers
+ * -or-
+ * emulation with ebs == ubs aiming for tests of dm-bufio overhead.
+ */
+ if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) ||
+ __block_mod(bio_end_sector(bio), ec->u_bs) ||
+ ec->e_bs == ec->u_bs)) {
+ spin_lock_irq(&ec->lock);
+ bio_list_add(&ec->bios_in, bio);
+ spin_unlock_irq(&ec->lock);
+
+ queue_work(ec->wq, &ec->ws);
+
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /* Forget any buffer content relative to this direct backing device I/O. */
+ __ebs_forget_bio(ec, bio);
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static void ebs_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+{
+ struct ebs_c *ec = ti->private;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ *result = '\0';
+ break;
+ case STATUSTYPE_TABLE:
+ snprintf(result, maxlen, ec->u_bs_set ? "%s %llu %u %u" : "%s %llu %u",
+ ec->dev->name, (unsigned long long) ec->start, ec->e_bs, ec->u_bs);
+ break;
+ }
+}
+
+static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+{
+ struct ebs_c *ec = ti->private;
+ struct dm_dev *dev = ec->dev;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ *bdev = dev->bdev;
+ return !!(ec->start || ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT);
+}
+
+static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct ebs_c *ec = ti->private;
+
+ limits->logical_block_size = to_bytes(ec->e_bs);
+ limits->physical_block_size = to_bytes(ec->u_bs);
+ limits->alignment_offset = limits->physical_block_size;
+ blk_limits_io_min(limits, limits->logical_block_size);
+}
+
+static int ebs_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct ebs_c *ec = ti->private;
+
+ return fn(ti, ec->dev, ec->start, ti->len, data);
+}
+
+static struct target_type ebs_target = {
+ .name = "ebs",
+ .version = {1, 0, 1},
+ .features = DM_TARGET_PASSES_INTEGRITY,
+ .module = THIS_MODULE,
+ .ctr = ebs_ctr,
+ .dtr = ebs_dtr,
+ .map = ebs_map,
+ .status = ebs_status,
+ .io_hints = ebs_io_hints,
+ .prepare_ioctl = ebs_prepare_ioctl,
+ .iterate_devices = ebs_iterate_devices,
+};
+
+static int __init dm_ebs_init(void)
+{
+ int r = dm_register_target(&ebs_target);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void dm_ebs_exit(void)
+{
+ dm_unregister_target(&ebs_target);
+}
+
+module_init(dm_ebs_init);
+module_exit(dm_ebs_exit);
+
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_DESCRIPTION(DM_NAME " emulated block size target");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-historical-service-time.c b/drivers/md/dm-historical-service-time.c
new file mode 100644
index 000000000000..186f91e2752c
--- /dev/null
+++ b/drivers/md/dm-historical-service-time.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Historical Service Time
+ *
+ * Keeps a time-weighted exponential moving average of the historical
+ * service time. Estimates future service time based on the historical
+ * service time and the number of outstanding requests.
+ *
+ * Marks paths stale if they have not finished within hst *
+ * num_paths. If a path is stale and unused, we will send a single
+ * request to probe in case the path has improved. This situation
+ * generally arises if the path is so much worse than others that it
+ * will never have the best estimated service time, or if the entire
+ * multipath device is unused. If a path is stale and in use, limit the
+ * number of requests it can receive with the assumption that the path
+ * has become degraded.
+ *
+ * To avoid repeatedly calculating exponents for time weighting, times
+ * are split into HST_WEIGHT_COUNT buckets each (1 >> HST_BUCKET_SHIFT)
+ * ns, and the weighting is pre-calculated.
+ *
+ */
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+
+#define DM_MSG_PREFIX "multipath historical-service-time"
+#define HST_MIN_IO 1
+#define HST_VERSION "0.1.1"
+
+#define HST_FIXED_SHIFT 10 /* 10 bits of decimal precision */
+#define HST_FIXED_MAX (ULLONG_MAX >> HST_FIXED_SHIFT)
+#define HST_FIXED_1 (1 << HST_FIXED_SHIFT)
+#define HST_FIXED_95 972
+#define HST_MAX_INFLIGHT HST_FIXED_1
+#define HST_BUCKET_SHIFT 24 /* Buckets are ~ 16ms */
+#define HST_WEIGHT_COUNT 64ULL
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head failed_paths;
+ int valid_count;
+ spinlock_t lock;
+
+ unsigned int weights[HST_WEIGHT_COUNT];
+ unsigned int threshold_multiplier;
+};
+
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned int repeat_count;
+
+ spinlock_t lock;
+
+ u64 historical_service_time; /* Fixed point */
+
+ u64 stale_after;
+ u64 last_finish;
+
+ u64 outstanding;
+};
+
+/**
+ * fixed_power - compute: x^n, in O(log n) time
+ *
+ * @x: base of the power
+ * @frac_bits: fractional bits of @x
+ * @n: power to raise @x to.
+ *
+ * By exploiting the relation between the definition of the natural power
+ * function: x^n := x*x*...*x (x multiplied by itself for n times), and
+ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
+ * (where: n_i \elem {0, 1}, the binary vector representing n),
+ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
+ * of course trivially computable in O(log_2 n), the length of our binary
+ * vector.
+ *
+ * (see: kernel/sched/loadavg.c)
+ */
+static u64 fixed_power(u64 x, unsigned int frac_bits, unsigned int n)
+{
+ unsigned long result = 1UL << frac_bits;
+
+ if (n) {
+ for (;;) {
+ if (n & 1) {
+ result *= x;
+ result += 1UL << (frac_bits - 1);
+ result >>= frac_bits;
+ }
+ n >>= 1;
+ if (!n)
+ break;
+ x *= x;
+ x += 1UL << (frac_bits - 1);
+ x >>= frac_bits;
+ }
+ }
+
+ return result;
+}
+
+/*
+ * Calculate the next value of an exponential moving average
+ * a_1 = a_0 * e + a * (1 - e)
+ *
+ * @last: [0, ULLONG_MAX >> HST_FIXED_SHIFT]
+ * @next: [0, ULLONG_MAX >> HST_FIXED_SHIFT]
+ * @weight: [0, HST_FIXED_1]
+ *
+ * Note:
+ * To account for multiple periods in the same calculation,
+ * a_n = a_0 * e^n + a * (1 - e^n),
+ * so call fixed_ema(last, next, pow(weight, N))
+ */
+static u64 fixed_ema(u64 last, u64 next, u64 weight)
+{
+ last *= weight;
+ last += next * (HST_FIXED_1 - weight);
+ last += 1ULL << (HST_FIXED_SHIFT - 1);
+ return last >> HST_FIXED_SHIFT;
+}
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->failed_paths);
+ spin_lock_init(&s->lock);
+ s->valid_count = 0;
+ }
+
+ return s;
+}
+
+/*
+ * Get the weight for a given time span.
+ */
+static u64 hst_weight(struct path_selector *ps, u64 delta)
+{
+ struct selector *s = ps->context;
+ int bucket = clamp(delta >> HST_BUCKET_SHIFT, 0ULL,
+ HST_WEIGHT_COUNT - 1);
+
+ return s->weights[bucket];
+}
+
+/*
+ * Set up the weights array.
+ *
+ * weights[len-1] = 0
+ * weights[n] = base ^ (n + 1)
+ */
+static void hst_set_weights(struct path_selector *ps, unsigned int base)
+{
+ struct selector *s = ps->context;
+ int i;
+
+ if (base >= HST_FIXED_1)
+ return;
+
+ for (i = 0; i < HST_WEIGHT_COUNT - 1; i++)
+ s->weights[i] = fixed_power(base, HST_FIXED_SHIFT, i + 1);
+ s->weights[HST_WEIGHT_COUNT - 1] = 0;
+}
+
+static int hst_create(struct path_selector *ps, unsigned int argc, char **argv)
+{
+ struct selector *s;
+ unsigned int base_weight = HST_FIXED_95;
+ unsigned int threshold_multiplier = 0;
+ char dummy;
+
+ /*
+ * Arguments: [<base_weight> [<threshold_multiplier>]]
+ * <base_weight>: Base weight for ema [0, 1024) 10-bit fixed point. A
+ * value of 0 will completely ignore any history.
+ * If not given, default (HST_FIXED_95) is used.
+ * <threshold_multiplier>: Minimum threshold multiplier for paths to
+ * be considered different. That is, a path is
+ * considered different iff (p1 > N * p2) where p1
+ * is the path with higher service time. A threshold
+ * of 1 or 0 has no effect. Defaults to 0.
+ */
+ if (argc > 2)
+ return -EINVAL;
+
+ if (argc && (sscanf(argv[0], "%u%c", &base_weight, &dummy) != 1 ||
+ base_weight >= HST_FIXED_1)) {
+ return -EINVAL;
+ }
+
+ if (argc > 1 && (sscanf(argv[1], "%u%c",
+ &threshold_multiplier, &dummy) != 1)) {
+ return -EINVAL;
+ }
+
+ s = alloc_selector();
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+
+ hst_set_weights(ps, base_weight);
+ s->threshold_multiplier = threshold_multiplier;
+ return 0;
+}
+
+static void free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+static void hst_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ free_paths(&s->valid_paths);
+ free_paths(&s->failed_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int hst_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned int maxlen)
+{
+ unsigned int sz = 0;
+ struct path_info *pi;
+
+ if (!path) {
+ struct selector *s = ps->context;
+
+ DMEMIT("2 %u %u ", s->weights[0], s->threshold_multiplier);
+ } else {
+ pi = path->pscontext;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%llu %llu %llu ", pi->historical_service_time,
+ pi->outstanding, pi->stale_after);
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("0 ");
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static int hst_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned int repeat_count = HST_MIN_IO;
+ char dummy;
+ unsigned long flags;
+
+ /*
+ * Arguments: [<repeat_count>]
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (HST_MIN_IO) is used.
+ */
+ if (argc > 1) {
+ *error = "historical-service-time ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ if (argc && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
+ *error = "historical-service-time ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ /* allocate the path */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "historical-service-time ps: Error allocating path context";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+
+ pi->historical_service_time = HST_FIXED_1;
+
+ spin_lock_init(&pi->lock);
+ pi->outstanding = 0;
+
+ pi->stale_after = 0;
+ pi->last_finish = 0;
+
+ path->pscontext = pi;
+
+ spin_lock_irqsave(&s->lock, flags);
+ list_add_tail(&pi->list, &s->valid_paths);
+ s->valid_count++;
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return 0;
+}
+
+static void hst_fail_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ list_move(&pi->list, &s->failed_paths);
+ s->valid_count--;
+ spin_unlock_irqrestore(&s->lock, flags);
+}
+
+static int hst_reinstate_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ list_move_tail(&pi->list, &s->valid_paths);
+ s->valid_count++;
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return 0;
+}
+
+static void hst_fill_compare(struct path_info *pi, u64 *hst,
+ u64 *out, u64 *stale)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pi->lock, flags);
+ *hst = pi->historical_service_time;
+ *out = pi->outstanding;
+ *stale = pi->stale_after;
+ spin_unlock_irqrestore(&pi->lock, flags);
+}
+
+/*
+ * Compare the estimated service time of 2 paths, pi1 and pi2,
+ * for the incoming I/O.
+ *
+ * Returns:
+ * < 0 : pi1 is better
+ * 0 : no difference between pi1 and pi2
+ * > 0 : pi2 is better
+ *
+ */
+static long long hst_compare(struct path_info *pi1, struct path_info *pi2,
+ u64 time_now, struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+ u64 hst1, hst2;
+ long long out1, out2, stale1, stale2;
+ int pi2_better, over_threshold;
+
+ hst_fill_compare(pi1, &hst1, &out1, &stale1);
+ hst_fill_compare(pi2, &hst2, &out2, &stale2);
+
+ /* Check here if estimated latency for two paths are too similar.
+ * If this is the case, we skip extra calculation and just compare
+ * outstanding requests. In this case, any unloaded paths will
+ * be preferred.
+ */
+ if (hst1 > hst2)
+ over_threshold = hst1 > (s->threshold_multiplier * hst2);
+ else
+ over_threshold = hst2 > (s->threshold_multiplier * hst1);
+
+ if (!over_threshold)
+ return out1 - out2;
+
+ /*
+ * If an unloaded path is stale, choose it. If both paths are unloaded,
+ * choose path that is the most stale.
+ * (If one path is loaded, choose the other)
+ */
+ if ((!out1 && stale1 < time_now) || (!out2 && stale2 < time_now) ||
+ (!out1 && !out2))
+ return (!out2 * stale1) - (!out1 * stale2);
+
+ /* Compare estimated service time. If outstanding is the same, we
+ * don't need to multiply
+ */
+ if (out1 == out2) {
+ pi2_better = hst1 > hst2;
+ } else {
+ /* Potential overflow with out >= 1024 */
+ if (unlikely(out1 >= HST_MAX_INFLIGHT ||
+ out2 >= HST_MAX_INFLIGHT)) {
+ /* If over 1023 in-flights, we may overflow if hst
+ * is at max. (With this shift we still overflow at
+ * 1048576 in-flights, which is high enough).
+ */
+ hst1 >>= HST_FIXED_SHIFT;
+ hst2 >>= HST_FIXED_SHIFT;
+ }
+ pi2_better = (1 + out1) * hst1 > (1 + out2) * hst2;
+ }
+
+ /* In the case that the 'winner' is stale, limit to equal usage. */
+ if (pi2_better) {
+ if (stale2 < time_now)
+ return out1 - out2;
+ return 1;
+ }
+ if (stale1 < time_now)
+ return out1 - out2;
+ return -1;
+}
+
+static struct dm_path *hst_select_path(struct path_selector *ps,
+ size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL, *best = NULL;
+ u64 time_now = sched_clock();
+ struct dm_path *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ if (list_empty(&s->valid_paths))
+ goto out;
+
+ list_for_each_entry(pi, &s->valid_paths, list) {
+ if (!best || (hst_compare(pi, best, time_now, ps) < 0))
+ best = pi;
+ }
+
+ if (!best)
+ goto out;
+
+ /* Move last used path to end (least preferred in case of ties) */
+ list_move_tail(&best->list, &s->valid_paths);
+
+ ret = best->path;
+
+out:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return ret;
+}
+
+static int hst_start_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pi->lock, flags);
+ pi->outstanding++;
+ spin_unlock_irqrestore(&pi->lock, flags);
+
+ return 0;
+}
+
+static u64 path_service_time(struct path_info *pi, u64 start_time)
+{
+ u64 sched_now = ktime_get_ns();
+
+ /* if a previous disk request has finished after this IO was
+ * sent to the hardware, pretend the submission happened
+ * serially.
+ */
+ if (time_after64(pi->last_finish, start_time))
+ start_time = pi->last_finish;
+
+ pi->last_finish = sched_now;
+ if (time_before64(sched_now, start_time))
+ return 0;
+
+ return sched_now - start_time;
+}
+
+static int hst_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes, u64 start_time)
+{
+ struct path_info *pi = path->pscontext;
+ struct selector *s = ps->context;
+ unsigned long flags;
+ u64 st;
+
+ spin_lock_irqsave(&pi->lock, flags);
+
+ st = path_service_time(pi, start_time);
+ pi->outstanding--;
+ pi->historical_service_time =
+ fixed_ema(pi->historical_service_time,
+ min(st * HST_FIXED_1, HST_FIXED_MAX),
+ hst_weight(ps, st));
+
+ /*
+ * On request end, mark path as fresh. If a path hasn't
+ * finished any requests within the fresh period, the estimated
+ * service time is considered too optimistic and we limit the
+ * maximum requests on that path.
+ */
+ pi->stale_after = pi->last_finish +
+ (s->valid_count * (pi->historical_service_time >> HST_FIXED_SHIFT));
+
+ spin_unlock_irqrestore(&pi->lock, flags);
+
+ return 0;
+}
+
+static struct path_selector_type hst_ps = {
+ .name = "historical-service-time",
+ .module = THIS_MODULE,
+ .table_args = 1,
+ .info_args = 3,
+ .create = hst_create,
+ .destroy = hst_destroy,
+ .status = hst_status,
+ .add_path = hst_add_path,
+ .fail_path = hst_fail_path,
+ .reinstate_path = hst_reinstate_path,
+ .select_path = hst_select_path,
+ .start_io = hst_start_io,
+ .end_io = hst_end_io,
+};
+
+static int __init dm_hst_init(void)
+{
+ int r = dm_register_path_selector(&hst_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version " HST_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit dm_hst_exit(void)
+{
+ int r = dm_unregister_path_selector(&hst_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_hst_init);
+module_exit(dm_hst_exit);
+
+MODULE_DESCRIPTION(DM_NAME " measured service time oriented path selector");
+MODULE_AUTHOR("Khazhismel Kumykov <khazhy@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 4094c47eca7f..81dc5ff08909 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -92,7 +92,7 @@ struct journal_entry {
} s;
__u64 sector;
} u;
- commit_id_t last_bytes[0];
+ commit_id_t last_bytes[];
/* __u8 tag[0]; */
};
@@ -1553,8 +1553,6 @@ static void integrity_metadata(struct work_struct *w)
char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
sector_t sector;
unsigned sectors_to_process;
- sector_t save_metadata_block;
- unsigned save_metadata_offset;
if (unlikely(ic->mode == 'R'))
goto skip_io;
@@ -1605,8 +1603,6 @@ static void integrity_metadata(struct work_struct *w)
goto skip_io;
}
- save_metadata_block = dio->metadata_block;
- save_metadata_offset = dio->metadata_offset;
sector = dio->range.logical_sector;
sectors_to_process = dio->range.n_sectors;
@@ -2657,7 +2653,7 @@ static void bitmap_flush_work(struct work_struct *work)
dm_integrity_flush_buffers(ic);
if (ic->meta_dev)
- blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
+ blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
limit = ic->provided_data_sectors;
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 8ea20b56b4d6..e3d35c6c9f71 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -127,7 +127,7 @@ struct pending_block {
char *data;
u32 datalen;
struct list_head list;
- struct bio_vec vecs[0];
+ struct bio_vec vecs[];
};
struct per_bio_data {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3e500098132f..78cff42d987e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -439,7 +439,7 @@ failed:
}
/*
- * dm_report_EIO() is a macro instead of a function to make pr_debug()
+ * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
* report the function name and line number of the function from which
* it has been invoked.
*/
@@ -447,43 +447,25 @@ failed:
do { \
struct mapped_device *md = dm_table_get_md((m)->ti->table); \
\
- pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
- dm_device_name(md), \
- test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
- dm_noflush_suspending((m)->ti)); \
+ DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
+ dm_device_name(md), \
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
+ dm_noflush_suspending((m)->ti)); \
} while (0)
/*
* Check whether bios must be queued in the device-mapper core rather
* than here in the target.
- *
- * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
- * the same value then we are not between multipath_presuspend()
- * and multipath_resume() calls and we have no need to check
- * for the DMF_NOFLUSH_SUSPENDING flag.
*/
-static bool __must_push_back(struct multipath *m, unsigned long flags)
+static bool __must_push_back(struct multipath *m)
{
- return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
- dm_noflush_suspending(m->ti));
+ return dm_noflush_suspending(m->ti);
}
-/*
- * Following functions use READ_ONCE to get atomic access to
- * all m->flags to avoid taking spinlock
- */
static bool must_push_back_rq(struct multipath *m)
{
- unsigned long flags = READ_ONCE(m->flags);
- return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
-}
-
-static bool must_push_back_bio(struct multipath *m)
-{
- unsigned long flags = READ_ONCE(m->flags);
- return __must_push_back(m, flags);
+ return test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m);
}
/*
@@ -567,7 +549,8 @@ static void multipath_release_clone(struct request *clone,
if (pgpath && pgpath->pg->ps.type->end_io)
pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
&pgpath->path,
- mpio->nr_bytes);
+ mpio->nr_bytes,
+ clone->io_start_time_ns);
}
blk_put_request(clone);
@@ -619,7 +602,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
return DM_MAPIO_SUBMITTED;
if (!pgpath) {
- if (must_push_back_bio(m))
+ if (__must_push_back(m))
return DM_MAPIO_REQUEUE;
dm_report_EIO(m);
return DM_MAPIO_KILL;
@@ -709,15 +692,38 @@ static void process_queued_bios(struct work_struct *work)
* If we run out of usable paths, should we queue I/O or error it?
*/
static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
- bool save_old_value)
+ bool save_old_value, const char *caller)
{
unsigned long flags;
+ bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
+ const char *dm_dev_name = dm_device_name(dm_table_get_md(m->ti->table));
+
+ DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
+ dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
spin_lock_irqsave(&m->lock, flags);
- assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
- (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
- (!save_old_value && queue_if_no_path));
+
+ queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+
+ if (save_old_value) {
+ if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
+ DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
+ dm_dev_name);
+ } else
+ assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
+ } else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
+ /* due to "fail_if_no_path" message, need to honor it. */
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ }
assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
+
+ DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
+ dm_dev_name, __func__,
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
+ dm_noflush_suspending(m->ti));
+
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) {
@@ -738,7 +744,7 @@ static void queue_if_no_path_timeout_work(struct timer_list *t)
struct mapped_device *md = dm_table_get_md(m->ti->table);
DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md));
- queue_if_no_path(m, false, false);
+ queue_if_no_path(m, false, false, __func__);
}
/*
@@ -1078,7 +1084,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
argc--;
if (!strcasecmp(arg_name, "queue_if_no_path")) {
- r = queue_if_no_path(m, true, false);
+ r = queue_if_no_path(m, true, false, __func__);
continue;
}
@@ -1279,7 +1285,9 @@ static int fail_path(struct pgpath *pgpath)
if (!pgpath->is_active)
goto out;
- DMWARN("Failing path %s.", pgpath->path.dev->name);
+ DMWARN("%s: Failing path %s.",
+ dm_device_name(dm_table_get_md(m->ti->table)),
+ pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
pgpath->is_active = false;
@@ -1318,7 +1326,9 @@ static int reinstate_path(struct pgpath *pgpath)
if (pgpath->is_active)
goto out;
- DMWARN("Reinstating path %s.", pgpath->path.dev->name);
+ DMWARN("%s: Reinstating path %s.",
+ dm_device_name(dm_table_get_md(m->ti->table)),
+ pgpath->path.dev->name);
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
if (r)
@@ -1617,7 +1627,8 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
struct path_selector *ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
+ clone->io_start_time_ns);
}
return r;
@@ -1640,7 +1651,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- if (must_push_back_bio(m)) {
+ if (__must_push_back(m)) {
r = DM_ENDIO_REQUEUE;
} else {
dm_report_EIO(m);
@@ -1661,23 +1672,27 @@ done:
struct path_selector *ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
+ dm_start_time_ns_from_clone(clone));
}
return r;
}
/*
- * Suspend can't complete until all the I/O is processed so if
- * the last path fails we must error any remaining I/O.
- * Note that if the freeze_bdev fails while suspending, the
- * queue_if_no_path state is lost - userspace should reset it.
+ * Suspend with flush can't complete until all the I/O is processed
+ * so if the last path fails we must error any remaining I/O.
+ * - Note that if the freeze_bdev fails while suspending, the
+ * queue_if_no_path state is lost - userspace should reset it.
+ * Otherwise, during noflush suspend, queue_if_no_path will not change.
*/
static void multipath_presuspend(struct dm_target *ti)
{
struct multipath *m = ti->private;
- queue_if_no_path(m, false, true);
+ /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
+ if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
+ queue_if_no_path(m, false, true, __func__);
}
static void multipath_postsuspend(struct dm_target *ti)
@@ -1698,8 +1713,16 @@ static void multipath_resume(struct dm_target *ti)
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
+ if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
+ set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ }
+
+ DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
+ dm_device_name(dm_table_get_md(m->ti->table)), __func__,
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
+
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1859,13 +1882,13 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
- r = queue_if_no_path(m, true, false);
+ r = queue_if_no_path(m, true, false, __func__);
spin_lock_irqsave(&m->lock, flags);
enable_nopath_timeout(m);
spin_unlock_irqrestore(&m->lock, flags);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
- r = queue_if_no_path(m, false, false);
+ r = queue_if_no_path(m, false, false, __func__);
disable_nopath_timeout(m);
goto out;
}
@@ -1918,7 +1941,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
int r;
current_pgpath = READ_ONCE(m->current_pgpath);
- if (!current_pgpath)
+ if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
current_pgpath = choose_pgpath(m, 0);
if (current_pgpath) {
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index b6eb5365b1a4..c47bc0e20275 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -74,7 +74,7 @@ struct path_selector_type {
int (*start_io) (struct path_selector *ps, struct dm_path *path,
size_t nr_bytes);
int (*end_io) (struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes);
+ size_t nr_bytes, u64 start_time);
};
/* Register a path selector */
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 969c4f1a3633..5fd018d18418 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -227,7 +227,7 @@ static int ql_start_io(struct path_selector *ps, struct dm_path *path,
}
static int ql_end_io(struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes)
+ size_t nr_bytes, u64 start_time)
{
struct path_info *pi = path->pscontext;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9a18bef0a5ff..10e8b2fe787b 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -254,7 +254,7 @@ struct raid_set {
int mode;
} journal_dev;
- struct raid_dev dev[0];
+ struct raid_dev dev[];
};
static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 089aed57e083..2f655d9f4200 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -83,7 +83,7 @@ struct mirror_set {
struct work_struct trigger_event;
unsigned nr_mirrors;
- struct mirror mirror[0];
+ struct mirror mirror[];
};
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 3f8577e2c13b..f60c02512121 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -547,7 +547,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
md->tag_set->ops = &dm_mq_ops;
md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
md->tag_set->numa_node = md->numa_node_id;
- md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
+ md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
md->tag_set->driver_data = md;
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index f006a9005593..9cfda665e9eb 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -309,7 +309,7 @@ static int st_start_io(struct path_selector *ps, struct dm_path *path,
}
static int st_end_io(struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes)
+ size_t nr_bytes, u64 start_time)
{
struct path_info *pi = path->pscontext;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 71417048256a..35d368c418d0 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -56,7 +56,7 @@ struct dm_stat {
size_t percpu_alloc_size;
size_t histogram_alloc_size;
struct dm_stat_percpu *stat_percpu[NR_CPUS];
- struct dm_stat_shared stat_shared[0];
+ struct dm_stat_shared stat_shared[];
};
#define STAT_PRECISE_TIMESTAMPS 1
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index fa813c0f993d..151d022b032d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -41,7 +41,7 @@ struct stripe_c {
/* Work struct used for triggering events*/
struct work_struct trigger_event;
- struct stripe stripe[0];
+ struct stripe stripe[];
};
/*
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 8a0f057b8122..bff4c7fa1cd2 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -53,7 +53,7 @@ struct switch_ctx {
/*
* Array of dm devices to switch between.
*/
- struct switch_path path_list[0];
+ struct switch_path path_list[];
};
static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0a2cc197f62b..8277b959e00b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -279,7 +279,6 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q;
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
sector_t dev_size =
@@ -288,22 +287,6 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
- /*
- * Some devices exist without request functions,
- * such as loop devices not yet bound to backing files.
- * Forbid the use of such devices.
- */
- q = bdev_get_queue(bdev);
- if (!q || !q->make_request_fn) {
- DMWARN("%s: %s is not yet initialised: "
- "start=%llu, len=%llu, dev_size=%llu",
- dm_device_name(ti->table->md), bdevname(bdev, b),
- (unsigned long long)start,
- (unsigned long long)len,
- (unsigned long long)dev_size);
- return 1;
- }
-
if (!dev_size)
return 0;
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 613c171b1b6d..74f3c506f084 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -234,10 +234,6 @@ static int persistent_memory_claim(struct dm_writecache *wc)
wc->memory_vmapped = false;
- if (!wc->ssd_dev->dax_dev) {
- r = -EOPNOTSUPP;
- goto err1;
- }
s = wc->memory_map_size;
p = s >> PAGE_SHIFT;
if (!p) {
@@ -1143,6 +1139,42 @@ static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
return r;
}
+static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
+{
+ /*
+ * clflushopt performs better with block size 1024, 2048, 4096
+ * non-temporal stores perform better with block size 512
+ *
+ * block size 512 1024 2048 4096
+ * movnti 496 MB/s 642 MB/s 725 MB/s 744 MB/s
+ * clflushopt 373 MB/s 688 MB/s 1.1 GB/s 1.2 GB/s
+ *
+ * We see that movnti performs better for 512-byte blocks, and
+ * clflushopt performs better for 1024-byte and larger blocks. So, we
+ * prefer clflushopt for sizes >= 768.
+ *
+ * NOTE: this happens to be the case now (with dm-writecache's single
+ * threaded model) but re-evaluate this once memcpy_flushcache() is
+ * enabled to use movdir64b which might invalidate this performance
+ * advantage seen with cache-allocating-writes plus flushing.
+ */
+#ifdef CONFIG_X86
+ if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
+ likely(boot_cpu_data.x86_clflush_size == 64) &&
+ likely(size >= 768)) {
+ do {
+ memcpy((void *)dest, (void *)source, 64);
+ clflushopt((void *)dest);
+ dest += 64;
+ source += 64;
+ size -= 64;
+ } while (size >= 64);
+ return;
+ }
+#endif
+ memcpy_flushcache(dest, source, size);
+}
+
static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
{
void *buf;
@@ -1168,7 +1200,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
}
} else {
flush_dcache_page(bio_page(bio));
- memcpy_flushcache(data, buf, size);
+ memcpy_flushcache_optimized(data, buf, size);
}
bvec_kunmap_irq(buf, &flags);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 369de15c4e80..130b5a6d9f12 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -16,7 +16,7 @@
/*
* Metadata version.
*/
-#define DMZ_META_VER 1
+#define DMZ_META_VER 2
/*
* On-disk super block magic.
@@ -69,8 +69,17 @@ struct dmz_super {
/* Checksum */
__le32 crc; /* 48 */
+ /* DM-Zoned label */
+ u8 dmz_label[32]; /* 80 */
+
+ /* DM-Zoned UUID */
+ u8 dmz_uuid[16]; /* 96 */
+
+ /* Device UUID */
+ u8 dev_uuid[16]; /* 112 */
+
/* Padding to full 512B sector */
- u8 reserved[464]; /* 512 */
+ u8 reserved[400]; /* 512 */
};
/*
@@ -122,8 +131,10 @@ enum {
*/
struct dmz_sb {
sector_t block;
+ struct dmz_dev *dev;
struct dmz_mblock *mblk;
struct dmz_super *sb;
+ struct dm_zone *zone;
};
/*
@@ -131,28 +142,41 @@ struct dmz_sb {
*/
struct dmz_metadata {
struct dmz_dev *dev;
+ unsigned int nr_devs;
+
+ char devname[BDEVNAME_SIZE];
+ char label[BDEVNAME_SIZE];
+ uuid_t uuid;
sector_t zone_bitmap_size;
unsigned int zone_nr_bitmap_blocks;
unsigned int zone_bits_per_mblk;
+ sector_t zone_nr_blocks;
+ sector_t zone_nr_blocks_shift;
+
+ sector_t zone_nr_sectors;
+ sector_t zone_nr_sectors_shift;
+
unsigned int nr_bitmap_blocks;
unsigned int nr_map_blocks;
+ unsigned int nr_zones;
unsigned int nr_useable_zones;
unsigned int nr_meta_blocks;
unsigned int nr_meta_zones;
unsigned int nr_data_zones;
+ unsigned int nr_cache_zones;
unsigned int nr_rnd_zones;
unsigned int nr_reserved_seq;
unsigned int nr_chunks;
/* Zone information array */
- struct dm_zone *zones;
+ struct xarray zones;
- struct dm_zone *sb_zone;
struct dmz_sb sb[2];
unsigned int mblk_primary;
+ unsigned int sb_version;
u64 sb_gen;
unsigned int min_nr_mblks;
unsigned int max_nr_mblks;
@@ -168,15 +192,11 @@ struct dmz_metadata {
/* Zone allocation management */
struct mutex map_lock;
struct dmz_mblock **map_mblk;
- unsigned int nr_rnd;
- atomic_t unmap_nr_rnd;
- struct list_head unmap_rnd_list;
- struct list_head map_rnd_list;
- unsigned int nr_seq;
- atomic_t unmap_nr_seq;
- struct list_head unmap_seq_list;
- struct list_head map_seq_list;
+ unsigned int nr_cache;
+ atomic_t unmap_nr_cache;
+ struct list_head unmap_cache_list;
+ struct list_head map_cache_list;
atomic_t nr_reserved_seq_zones;
struct list_head reserved_seq_zones_list;
@@ -184,22 +204,65 @@ struct dmz_metadata {
wait_queue_head_t free_wq;
};
+#define dmz_zmd_info(zmd, format, args...) \
+ DMINFO("(%s): " format, (zmd)->label, ## args)
+
+#define dmz_zmd_err(zmd, format, args...) \
+ DMERR("(%s): " format, (zmd)->label, ## args)
+
+#define dmz_zmd_warn(zmd, format, args...) \
+ DMWARN("(%s): " format, (zmd)->label, ## args)
+
+#define dmz_zmd_debug(zmd, format, args...) \
+ DMDEBUG("(%s): " format, (zmd)->label, ## args)
/*
* Various accessors
*/
-unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone)
+static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return ((unsigned int)(zone - zmd->zones));
+ if (WARN_ON(!zone))
+ return 0;
+
+ return zone->id - zone->dev->zone_offset;
}
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift;
+ unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
+
+ return (sector_t)zone_id << zmd->zone_nr_sectors_shift;
}
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift;
+ unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
+
+ return (sector_t)zone_id << zmd->zone_nr_blocks_shift;
+}
+
+unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_blocks;
+}
+
+unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_blocks_shift;
+}
+
+unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_sectors;
+}
+
+unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_sectors_shift;
+}
+
+unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
+{
+ return zmd->nr_zones;
}
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
@@ -207,14 +270,88 @@ unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
return zmd->nr_chunks;
}
-unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
+unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx)
{
- return zmd->nr_rnd;
+ return zmd->dev[idx].nr_rnd;
}
-unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
+unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx)
{
- return atomic_read(&zmd->unmap_nr_rnd);
+ return atomic_read(&zmd->dev[idx].unmap_nr_rnd);
+}
+
+unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
+{
+ return zmd->nr_cache;
+}
+
+unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd)
+{
+ return atomic_read(&zmd->unmap_nr_cache);
+}
+
+unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx)
+{
+ return zmd->dev[idx].nr_seq;
+}
+
+unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx)
+{
+ return atomic_read(&zmd->dev[idx].unmap_nr_seq);
+}
+
+static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
+{
+ return xa_load(&zmd->zones, zone_id);
+}
+
+static struct dm_zone *dmz_insert(struct dmz_metadata *zmd,
+ unsigned int zone_id, struct dmz_dev *dev)
+{
+ struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL);
+
+ if (!zone)
+ return ERR_PTR(-ENOMEM);
+
+ if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
+ kfree(zone);
+ return ERR_PTR(-EBUSY);
+ }
+
+ INIT_LIST_HEAD(&zone->link);
+ atomic_set(&zone->refcount, 0);
+ zone->id = zone_id;
+ zone->chunk = DMZ_MAP_UNMAPPED;
+ zone->dev = dev;
+
+ return zone;
+}
+
+const char *dmz_metadata_label(struct dmz_metadata *zmd)
+{
+ return (const char *)zmd->label;
+}
+
+bool dmz_check_dev(struct dmz_metadata *zmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < zmd->nr_devs; i++) {
+ if (!dmz_check_bdev(&zmd->dev[i]))
+ return false;
+ }
+ return true;
+}
+
+bool dmz_dev_is_dying(struct dmz_metadata *zmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < zmd->nr_devs; i++) {
+ if (dmz_bdev_is_dying(&zmd->dev[i]))
+ return true;
+ }
+ return false;
}
/*
@@ -402,9 +539,10 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
{
struct dmz_mblock *mblk, *m;
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
+ struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
struct bio *bio;
- if (dmz_bdev_is_dying(zmd->dev))
+ if (dmz_bdev_is_dying(dev))
return ERR_PTR(-EIO);
/* Get a new block and a BIO to read it */
@@ -440,7 +578,7 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, zmd->dev->bdev);
+ bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
@@ -537,6 +675,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
sector_t mblk_no)
{
struct dmz_mblock *mblk;
+ struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
/* Check rbtree */
spin_lock(&zmd->mblk_lock);
@@ -555,7 +694,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
dmz_release_mblock(zmd, mblk);
- dmz_check_bdev(zmd->dev);
+ dmz_check_bdev(dev);
return ERR_PTR(-EIO);
}
@@ -579,10 +718,11 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
unsigned int set)
{
+ struct dmz_dev *dev = zmd->sb[set].dev;
sector_t block = zmd->sb[set].block + mblk->no;
struct bio *bio;
- if (dmz_bdev_is_dying(zmd->dev))
+ if (dmz_bdev_is_dying(dev))
return -EIO;
bio = bio_alloc(GFP_NOIO, 1);
@@ -594,7 +734,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, zmd->dev->bdev);
+ bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
@@ -607,13 +747,16 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
/*
* Read/write a metadata block.
*/
-static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
- struct page *page)
+static int dmz_rdwr_block(struct dmz_dev *dev, int op,
+ sector_t block, struct page *page)
{
struct bio *bio;
int ret;
- if (dmz_bdev_is_dying(zmd->dev))
+ if (WARN_ON(!dev))
+ return -EIO;
+
+ if (dmz_bdev_is_dying(dev))
return -EIO;
bio = bio_alloc(GFP_NOIO, 1);
@@ -621,14 +764,14 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
return -ENOMEM;
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, zmd->dev->bdev);
+ bio_set_dev(bio, dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
if (ret)
- dmz_check_bdev(zmd->dev);
+ dmz_check_bdev(dev);
return ret;
}
@@ -637,18 +780,32 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
*/
static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
{
- sector_t block = zmd->sb[set].block;
struct dmz_mblock *mblk = zmd->sb[set].mblk;
struct dmz_super *sb = zmd->sb[set].sb;
+ struct dmz_dev *dev = zmd->sb[set].dev;
+ sector_t sb_block;
u64 sb_gen = zmd->sb_gen + 1;
int ret;
sb->magic = cpu_to_le32(DMZ_MAGIC);
- sb->version = cpu_to_le32(DMZ_META_VER);
+
+ sb->version = cpu_to_le32(zmd->sb_version);
+ if (zmd->sb_version > 1) {
+ BUILD_BUG_ON(UUID_SIZE != 16);
+ export_uuid(sb->dmz_uuid, &zmd->uuid);
+ memcpy(sb->dmz_label, zmd->label, BDEVNAME_SIZE);
+ export_uuid(sb->dev_uuid, &dev->uuid);
+ }
sb->gen = cpu_to_le64(sb_gen);
- sb->sb_block = cpu_to_le64(block);
+ /*
+ * The metadata always references the absolute block address,
+ * ie relative to the entire block range, not the per-device
+ * block address.
+ */
+ sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
+ sb->sb_block = cpu_to_le64(sb_block);
sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
@@ -659,9 +816,10 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
sb->crc = 0;
sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
- ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
+ ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
+ mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
return ret;
}
@@ -674,6 +832,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
unsigned int set)
{
struct dmz_mblock *mblk;
+ struct dmz_dev *dev = zmd->sb[set].dev;
struct blk_plug plug;
int ret = 0, nr_mblks_submitted = 0;
@@ -695,7 +854,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
- dmz_check_bdev(zmd->dev);
+ dmz_check_bdev(dev);
ret = -EIO;
}
nr_mblks_submitted--;
@@ -703,7 +862,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
return ret;
}
@@ -740,6 +899,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
{
struct dmz_mblock *mblk;
struct list_head write_list;
+ struct dmz_dev *dev;
int ret;
if (WARN_ON(!zmd))
@@ -753,6 +913,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
* from modifying metadata.
*/
down_write(&zmd->mblk_sem);
+ dev = zmd->sb[zmd->mblk_primary].dev;
/*
* This is called from the target flush work and reclaim work.
@@ -760,7 +921,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
dmz_lock_flush(zmd);
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_bdev_is_dying(dev)) {
ret = -EIO;
goto out;
}
@@ -772,7 +933,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
goto err;
}
@@ -821,7 +982,7 @@ err:
list_splice(&write_list, &zmd->mblk_dirty_list);
spin_unlock(&zmd->mblk_lock);
}
- if (!dmz_check_bdev(zmd->dev))
+ if (!dmz_check_bdev(dev))
ret = -EIO;
goto out;
}
@@ -829,12 +990,31 @@ err:
/*
* Check super block.
*/
-static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
+static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
+ bool tertiary)
{
+ struct dmz_super *sb = dsb->sb;
+ struct dmz_dev *dev = dsb->dev;
unsigned int nr_meta_zones, nr_data_zones;
- struct dmz_dev *dev = zmd->dev;
u32 crc, stored_crc;
- u64 gen;
+ u64 gen, sb_block;
+
+ if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
+ dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
+ DMZ_MAGIC, le32_to_cpu(sb->magic));
+ return -ENXIO;
+ }
+
+ zmd->sb_version = le32_to_cpu(sb->version);
+ if (zmd->sb_version > DMZ_META_VER) {
+ dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
+ DMZ_META_VER, zmd->sb_version);
+ return -EINVAL;
+ }
+ if (zmd->sb_version < 2 && tertiary) {
+ dmz_dev_err(dev, "Tertiary superblocks are not supported");
+ return -EINVAL;
+ }
gen = le64_to_cpu(sb->gen);
stored_crc = le32_to_cpu(sb->crc);
@@ -846,20 +1026,57 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
return -ENXIO;
}
- if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
- dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
- DMZ_MAGIC, le32_to_cpu(sb->magic));
- return -ENXIO;
- }
+ sb_block = le64_to_cpu(sb->sb_block);
+ if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) {
+ dmz_dev_err(dev, "Invalid superblock position "
+ "(is %llu expected %llu)",
+ sb_block,
+ (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
+ return -EINVAL;
+ }
+ if (zmd->sb_version > 1) {
+ uuid_t sb_uuid;
+
+ import_uuid(&sb_uuid, sb->dmz_uuid);
+ if (uuid_is_null(&sb_uuid)) {
+ dmz_dev_err(dev, "NULL DM-Zoned uuid");
+ return -ENXIO;
+ } else if (uuid_is_null(&zmd->uuid)) {
+ uuid_copy(&zmd->uuid, &sb_uuid);
+ } else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
+ dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
+ "is %pUl expected %pUl",
+ &sb_uuid, &zmd->uuid);
+ return -ENXIO;
+ }
+ if (!strlen(zmd->label))
+ memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
+ else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
+ dmz_dev_err(dev, "mismatching DM-Zoned label, "
+ "is %s expected %s",
+ sb->dmz_label, zmd->label);
+ return -ENXIO;
+ }
+ import_uuid(&dev->uuid, sb->dev_uuid);
+ if (uuid_is_null(&dev->uuid)) {
+ dmz_dev_err(dev, "NULL device uuid");
+ return -ENXIO;
+ }
- if (le32_to_cpu(sb->version) != DMZ_META_VER) {
- dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
- DMZ_META_VER, le32_to_cpu(sb->version));
- return -ENXIO;
+ if (tertiary) {
+ /*
+ * Generation number should be 0, but it doesn't
+ * really matter if it isn't.
+ */
+ if (gen != 0)
+ dmz_dev_warn(dev, "Invalid generation %llu",
+ gen);
+ return 0;
+ }
}
- nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + dev->zone_nr_blocks - 1)
- >> dev->zone_nr_blocks_shift;
+ nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
+ >> zmd->zone_nr_blocks_shift;
if (!nr_meta_zones ||
nr_meta_zones >= zmd->nr_rnd_zones) {
dmz_dev_err(dev, "Invalid number of metadata blocks");
@@ -895,10 +1112,13 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
/*
* Read the first or second super block from disk.
*/
-static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
+static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
{
- return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block,
- zmd->sb[set].mblk->page);
+ dmz_zmd_debug(zmd, "read superblock set %d dev %s block %llu",
+ set, sb->dev->name, sb->block);
+
+ return dmz_rdwr_block(sb->dev, REQ_OP_READ,
+ sb->block, sb->mblk->page);
}
/*
@@ -908,8 +1128,9 @@ static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
*/
static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
{
- unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
struct dmz_mblock *mblk;
+ unsigned int zone_id = zmd->sb[0].zone->id;
int i;
/* Allocate a block */
@@ -922,24 +1143,29 @@ static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
/* Bad first super block: search for the second one */
zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
- for (i = 0; i < zmd->nr_rnd_zones - 1; i++) {
- if (dmz_read_sb(zmd, 1) != 0)
+ zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
+ zmd->sb[1].dev = zmd->sb[0].dev;
+ for (i = 1; i < zmd->nr_rnd_zones; i++) {
+ if (dmz_read_sb(zmd, &zmd->sb[1], 1) != 0)
break;
if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
return 0;
zmd->sb[1].block += zone_nr_blocks;
+ zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
}
dmz_free_mblock(zmd, mblk);
zmd->sb[1].mblk = NULL;
+ zmd->sb[1].zone = NULL;
+ zmd->sb[1].dev = NULL;
return -EIO;
}
/*
- * Read the first or second super block from disk.
+ * Read a super block from disk.
*/
-static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
+static int dmz_get_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
{
struct dmz_mblock *mblk;
int ret;
@@ -949,14 +1175,14 @@ static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
if (!mblk)
return -ENOMEM;
- zmd->sb[set].mblk = mblk;
- zmd->sb[set].sb = mblk->data;
+ sb->mblk = mblk;
+ sb->sb = mblk->data;
/* Read super block */
- ret = dmz_read_sb(zmd, set);
+ ret = dmz_read_sb(zmd, sb, set);
if (ret) {
dmz_free_mblock(zmd, mblk);
- zmd->sb[set].mblk = NULL;
+ sb->mblk = NULL;
return ret;
}
@@ -972,14 +1198,13 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
struct page *page;
int i, ret;
- dmz_dev_warn(zmd->dev, "Metadata set %u invalid: recovering", dst_set);
+ dmz_dev_warn(zmd->sb[dst_set].dev,
+ "Metadata set %u invalid: recovering", dst_set);
if (dst_set == 0)
- zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
- else {
- zmd->sb[1].block = zmd->sb[0].block +
- (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
- }
+ zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
+ else
+ zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
page = alloc_page(GFP_NOIO);
if (!page)
@@ -987,11 +1212,11 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
/* Copy metadata blocks */
for (i = 1; i < zmd->nr_meta_blocks; i++) {
- ret = dmz_rdwr_block(zmd, REQ_OP_READ,
+ ret = dmz_rdwr_block(zmd->sb[src_set].dev, REQ_OP_READ,
zmd->sb[src_set].block + i, page);
if (ret)
goto out;
- ret = dmz_rdwr_block(zmd, REQ_OP_WRITE,
+ ret = dmz_rdwr_block(zmd->sb[dst_set].dev, REQ_OP_WRITE,
zmd->sb[dst_set].block + i, page);
if (ret)
goto out;
@@ -1023,53 +1248,73 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
u64 sb_gen[2] = {0, 0};
int ret;
+ if (!zmd->sb[0].zone) {
+ dmz_zmd_err(zmd, "Primary super block zone not set");
+ return -ENXIO;
+ }
+
/* Read and check the primary super block */
- zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
- ret = dmz_get_sb(zmd, 0);
+ zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
+ zmd->sb[0].dev = zmd->sb[0].zone->dev;
+ ret = dmz_get_sb(zmd, &zmd->sb[0], 0);
if (ret) {
- dmz_dev_err(zmd->dev, "Read primary super block failed");
+ dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed");
return ret;
}
- ret = dmz_check_sb(zmd, zmd->sb[0].sb);
+ ret = dmz_check_sb(zmd, &zmd->sb[0], false);
/* Read and check secondary super block */
if (ret == 0) {
sb_good[0] = true;
- zmd->sb[1].block = zmd->sb[0].block +
- (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
- ret = dmz_get_sb(zmd, 1);
+ if (!zmd->sb[1].zone) {
+ unsigned int zone_id =
+ zmd->sb[0].zone->id + zmd->nr_meta_zones;
+
+ zmd->sb[1].zone = dmz_get(zmd, zone_id);
+ }
+ zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
+ zmd->sb[1].dev = zmd->sb[0].dev;
+ ret = dmz_get_sb(zmd, &zmd->sb[1], 1);
} else
ret = dmz_lookup_secondary_sb(zmd);
if (ret) {
- dmz_dev_err(zmd->dev, "Read secondary super block failed");
+ dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed");
return ret;
}
- ret = dmz_check_sb(zmd, zmd->sb[1].sb);
+ ret = dmz_check_sb(zmd, &zmd->sb[1], false);
if (ret == 0)
sb_good[1] = true;
/* Use highest generation sb first */
if (!sb_good[0] && !sb_good[1]) {
- dmz_dev_err(zmd->dev, "No valid super block found");
+ dmz_zmd_err(zmd, "No valid super block found");
return -EIO;
}
if (sb_good[0])
sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
- else
+ else {
ret = dmz_recover_mblocks(zmd, 0);
+ if (ret) {
+ dmz_dev_err(zmd->sb[0].dev,
+ "Recovery of superblock 0 failed");
+ return -EIO;
+ }
+ }
if (sb_good[1])
sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
- else
+ else {
ret = dmz_recover_mblocks(zmd, 1);
- if (ret) {
- dmz_dev_err(zmd->dev, "Recovery failed");
- return -EIO;
+ if (ret) {
+ dmz_dev_err(zmd->sb[1].dev,
+ "Recovery of superblock 1 failed");
+ return -EIO;
+ }
}
if (sb_gen[0] >= sb_gen[1]) {
@@ -1080,32 +1325,70 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
zmd->mblk_primary = 1;
}
- dmz_dev_debug(zmd->dev, "Using super block %u (gen %llu)",
+ dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev,
+ "Using super block %u (gen %llu)",
zmd->mblk_primary, zmd->sb_gen);
- return 0;
+ if (zmd->sb_version > 1) {
+ int i;
+ struct dmz_sb *sb;
+
+ sb = kzalloc(sizeof(struct dmz_sb), GFP_KERNEL);
+ if (!sb)
+ return -ENOMEM;
+ for (i = 1; i < zmd->nr_devs; i++) {
+ sb->block = 0;
+ sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset);
+ sb->dev = &zmd->dev[i];
+ if (!dmz_is_meta(sb->zone)) {
+ dmz_dev_err(sb->dev,
+ "Tertiary super block zone %u not marked as metadata zone",
+ sb->zone->id);
+ ret = -EINVAL;
+ goto out_kfree;
+ }
+ ret = dmz_get_sb(zmd, sb, i + 1);
+ if (ret) {
+ dmz_dev_err(sb->dev,
+ "Read tertiary super block failed");
+ dmz_free_mblock(zmd, sb->mblk);
+ goto out_kfree;
+ }
+ ret = dmz_check_sb(zmd, sb, true);
+ dmz_free_mblock(zmd, sb->mblk);
+ if (ret == -EINVAL)
+ goto out_kfree;
+ }
+ out_kfree:
+ kfree(sb);
+ }
+ return ret;
}
/*
* Initialize a zone descriptor.
*/
-static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
+static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
{
- struct dmz_metadata *zmd = data;
- struct dm_zone *zone = &zmd->zones[idx];
- struct dmz_dev *dev = zmd->dev;
+ struct dmz_dev *dev = data;
+ struct dmz_metadata *zmd = dev->metadata;
+ int idx = num + dev->zone_offset;
+ struct dm_zone *zone;
+
+ zone = dmz_insert(zmd, idx, dev);
+ if (IS_ERR(zone))
+ return PTR_ERR(zone);
- /* Ignore the eventual last runt (smaller) zone */
- if (blkz->len != dev->zone_nr_sectors) {
- if (blkz->start + blkz->len == dev->capacity)
+ if (blkz->len != zmd->zone_nr_sectors) {
+ if (zmd->sb_version > 1) {
+ /* Ignore the eventual runt (smaller) zone */
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ return 0;
+ } else if (blkz->start + blkz->len == dev->capacity)
return 0;
return -ENXIO;
}
- INIT_LIST_HEAD(&zone->link);
- atomic_set(&zone->refcount, 0);
- zone->chunk = DMZ_MAP_UNMAPPED;
-
switch (blkz->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);
@@ -1131,13 +1414,45 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
zmd->nr_useable_zones++;
if (dmz_is_rnd(zone)) {
zmd->nr_rnd_zones++;
- if (!zmd->sb_zone) {
- /* Super block zone */
- zmd->sb_zone = zone;
+ if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
+ /* Primary super block zone */
+ zmd->sb[0].zone = zone;
}
}
+ if (zmd->nr_devs > 1 && num == 0) {
+ /*
+ * Tertiary superblock zones are always at the
+ * start of the zoned devices, so mark them
+ * as metadata zone.
+ */
+ set_bit(DMZ_META, &zone->flags);
+ }
}
+ return 0;
+}
+
+static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
+{
+ int idx;
+ sector_t zone_offset = 0;
+ for(idx = 0; idx < dev->nr_zones; idx++) {
+ struct dm_zone *zone;
+
+ zone = dmz_insert(zmd, idx, dev);
+ if (IS_ERR(zone))
+ return PTR_ERR(zone);
+ set_bit(DMZ_CACHE, &zone->flags);
+ zone->wp_block = 0;
+ zmd->nr_cache_zones++;
+ zmd->nr_useable_zones++;
+ if (dev->capacity - zone_offset < zmd->zone_nr_sectors) {
+ /* Disable runt zone */
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ break;
+ }
+ zone_offset += zmd->zone_nr_sectors;
+ }
return 0;
}
@@ -1146,8 +1461,15 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
*/
static void dmz_drop_zones(struct dmz_metadata *zmd)
{
- kfree(zmd->zones);
- zmd->zones = NULL;
+ int idx;
+
+ for(idx = 0; idx < zmd->nr_zones; idx++) {
+ struct dm_zone *zone = xa_load(&zmd->zones, idx);
+
+ kfree(zone);
+ xa_erase(&zmd->zones, idx);
+ }
+ xa_destroy(&zmd->zones);
}
/*
@@ -1156,32 +1478,87 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
*/
static int dmz_init_zones(struct dmz_metadata *zmd)
{
- struct dmz_dev *dev = zmd->dev;
- int ret;
+ int i, ret;
+ struct dmz_dev *zoned_dev = &zmd->dev[0];
/* Init */
- zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
+ zmd->zone_nr_sectors = zmd->dev[0].zone_nr_sectors;
+ zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
+ zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
+ zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
+ zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
zmd->zone_nr_bitmap_blocks =
max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
- zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks,
+ zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
DMZ_BLOCK_SIZE_BITS);
/* Allocate zone array */
- zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
- if (!zmd->zones)
- return -ENOMEM;
+ zmd->nr_zones = 0;
+ for (i = 0; i < zmd->nr_devs; i++) {
+ struct dmz_dev *dev = &zmd->dev[i];
+
+ dev->metadata = zmd;
+ zmd->nr_zones += dev->nr_zones;
+
+ atomic_set(&dev->unmap_nr_rnd, 0);
+ INIT_LIST_HEAD(&dev->unmap_rnd_list);
+ INIT_LIST_HEAD(&dev->map_rnd_list);
+
+ atomic_set(&dev->unmap_nr_seq, 0);
+ INIT_LIST_HEAD(&dev->unmap_seq_list);
+ INIT_LIST_HEAD(&dev->map_seq_list);
+ }
+
+ if (!zmd->nr_zones) {
+ DMERR("(%s): No zones found", zmd->devname);
+ return -ENXIO;
+ }
+ xa_init(&zmd->zones);
+
+ DMDEBUG("(%s): Using %zu B for zone information",
+ zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones);
- dmz_dev_info(dev, "Using %zu B for zone information",
- sizeof(struct dm_zone) * dev->nr_zones);
+ if (zmd->nr_devs > 1) {
+ ret = dmz_emulate_zones(zmd, &zmd->dev[0]);
+ if (ret < 0) {
+ DMDEBUG("(%s): Failed to emulate zones, error %d",
+ zmd->devname, ret);
+ dmz_drop_zones(zmd);
+ return ret;
+ }
+
+ /*
+ * Primary superblock zone is always at zone 0 when multiple
+ * drives are present.
+ */
+ zmd->sb[0].zone = dmz_get(zmd, 0);
+
+ for (i = 1; i < zmd->nr_devs; i++) {
+ zoned_dev = &zmd->dev[i];
+
+ ret = blkdev_report_zones(zoned_dev->bdev, 0,
+ BLK_ALL_ZONES,
+ dmz_init_zone, zoned_dev);
+ if (ret < 0) {
+ DMDEBUG("(%s): Failed to report zones, error %d",
+ zmd->devname, ret);
+ dmz_drop_zones(zmd);
+ return ret;
+ }
+ }
+ return 0;
+ }
/*
* Get zone information and initialize zone descriptors. At the same
* time, determine where the super block should be: first block of the
* first randomly writable zone.
*/
- ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone,
- zmd);
+ ret = blkdev_report_zones(zoned_dev->bdev, 0, BLK_ALL_ZONES,
+ dmz_init_zone, zoned_dev);
if (ret < 0) {
+ DMDEBUG("(%s): Failed to report zones, error %d",
+ zmd->devname, ret);
dmz_drop_zones(zmd);
return ret;
}
@@ -1213,9 +1590,13 @@ static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
*/
static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
+ struct dmz_dev *dev = zone->dev;
unsigned int noio_flag;
int ret;
+ if (dev->flags & DMZ_BDEV_REGULAR)
+ return 0;
+
/*
* Get zone information from disk. Since blkdev_report_zones() uses
* GFP_KERNEL by default for memory allocations, set the per-task
@@ -1223,16 +1604,16 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
* GFP_NOIO was specified.
*/
noio_flag = memalloc_noio_save();
- ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1,
+ ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
dmz_update_zone_cb, zone);
memalloc_noio_restore(noio_flag);
if (ret == 0)
ret = -EIO;
if (ret < 0) {
- dmz_dev_err(zmd->dev, "Get zone %u report failed",
- dmz_id(zmd, zone));
- dmz_check_bdev(zmd->dev);
+ dmz_dev_err(dev, "Get zone %u report failed",
+ zone->id);
+ dmz_check_bdev(dev);
return ret;
}
@@ -1246,6 +1627,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
struct dm_zone *zone)
{
+ struct dmz_dev *dev = zone->dev;
unsigned int wp = 0;
int ret;
@@ -1254,8 +1636,8 @@ static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
if (ret)
return ret;
- dmz_dev_warn(zmd->dev, "Processing zone %u write error (zone wp %u/%u)",
- dmz_id(zmd, zone), zone->wp_block, wp);
+ dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
+ zone->id, zone->wp_block, wp);
if (zone->wp_block < wp) {
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
@@ -1265,11 +1647,6 @@ static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
return 0;
}
-static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
-{
- return &zmd->zones[zone_id];
-}
-
/*
* Reset a zone write pointer.
*/
@@ -1287,14 +1664,14 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
return 0;
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
- struct dmz_dev *dev = zmd->dev;
+ struct dmz_dev *dev = zone->dev;
ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_NOIO);
+ zmd->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
- dmz_id(zmd, zone), ret);
+ zone->id, ret);
return ret;
}
}
@@ -1313,7 +1690,6 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
*/
static int dmz_load_mapping(struct dmz_metadata *zmd)
{
- struct dmz_dev *dev = zmd->dev;
struct dm_zone *dzone, *bzone;
struct dmz_mblock *dmap_mblk = NULL;
struct dmz_map *dmap;
@@ -1345,36 +1721,48 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
if (dzone_id == DMZ_MAP_UNMAPPED)
goto next;
- if (dzone_id >= dev->nr_zones) {
- dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u",
+ if (dzone_id >= zmd->nr_zones) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
chunk, dzone_id);
return -EIO;
}
dzone = dmz_get(zmd, dzone_id);
+ if (!dzone) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
+ chunk, dzone_id);
+ return -EIO;
+ }
set_bit(DMZ_DATA, &dzone->flags);
dzone->chunk = chunk;
dmz_get_zone_weight(zmd, dzone);
- if (dmz_is_rnd(dzone))
- list_add_tail(&dzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(dzone))
+ list_add_tail(&dzone->link, &zmd->map_cache_list);
+ else if (dmz_is_rnd(dzone))
+ list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
else
- list_add_tail(&dzone->link, &zmd->map_seq_list);
+ list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
/* Check buffer zone */
bzone_id = le32_to_cpu(dmap[e].bzone_id);
if (bzone_id == DMZ_MAP_UNMAPPED)
goto next;
- if (bzone_id >= dev->nr_zones) {
- dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u",
+ if (bzone_id >= zmd->nr_zones) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u",
chunk, bzone_id);
return -EIO;
}
bzone = dmz_get(zmd, bzone_id);
- if (!dmz_is_rnd(bzone)) {
- dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone %u",
+ if (!bzone) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present",
+ chunk, bzone_id);
+ return -EIO;
+ }
+ if (!dmz_is_rnd(bzone) && !dmz_is_cache(bzone)) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u",
chunk, bzone_id);
return -EIO;
}
@@ -1385,7 +1773,10 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
bzone->bzone = dzone;
dzone->bzone = bzone;
dmz_get_zone_weight(zmd, bzone);
- list_add_tail(&bzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(bzone))
+ list_add_tail(&bzone->link, &zmd->map_cache_list);
+ else
+ list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
next:
chunk++;
e++;
@@ -1398,15 +1789,21 @@ next:
* fully initialized. All remaining zones are unmapped data
* zones. Finish initializing those here.
*/
- for (i = 0; i < dev->nr_zones; i++) {
+ for (i = 0; i < zmd->nr_zones; i++) {
dzone = dmz_get(zmd, i);
+ if (!dzone)
+ continue;
if (dmz_is_meta(dzone))
continue;
+ if (dmz_is_offline(dzone))
+ continue;
- if (dmz_is_rnd(dzone))
- zmd->nr_rnd++;
+ if (dmz_is_cache(dzone))
+ zmd->nr_cache++;
+ else if (dmz_is_rnd(dzone))
+ dzone->dev->nr_rnd++;
else
- zmd->nr_seq++;
+ dzone->dev->nr_seq++;
if (dmz_is_data(dzone)) {
/* Already initialized */
@@ -1416,16 +1813,22 @@ next:
/* Unmapped data zone */
set_bit(DMZ_DATA, &dzone->flags);
dzone->chunk = DMZ_MAP_UNMAPPED;
- if (dmz_is_rnd(dzone)) {
- list_add_tail(&dzone->link, &zmd->unmap_rnd_list);
- atomic_inc(&zmd->unmap_nr_rnd);
+ if (dmz_is_cache(dzone)) {
+ list_add_tail(&dzone->link, &zmd->unmap_cache_list);
+ atomic_inc(&zmd->unmap_nr_cache);
+ } else if (dmz_is_rnd(dzone)) {
+ list_add_tail(&dzone->link,
+ &dzone->dev->unmap_rnd_list);
+ atomic_inc(&dzone->dev->unmap_nr_rnd);
} else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
+ set_bit(DMZ_RESERVED, &dzone->flags);
atomic_inc(&zmd->nr_reserved_seq_zones);
- zmd->nr_seq--;
+ dzone->dev->nr_seq--;
} else {
- list_add_tail(&dzone->link, &zmd->unmap_seq_list);
- atomic_inc(&zmd->unmap_nr_seq);
+ list_add_tail(&dzone->link,
+ &dzone->dev->unmap_seq_list);
+ atomic_inc(&dzone->dev->unmap_nr_seq);
}
}
@@ -1459,10 +1862,13 @@ static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
list_del_init(&zone->link);
if (dmz_is_seq(zone)) {
/* LRU rotate sequential zone */
- list_add_tail(&zone->link, &zmd->map_seq_list);
+ list_add_tail(&zone->link, &zone->dev->map_seq_list);
+ } else if (dmz_is_cache(zone)) {
+ /* LRU rotate cache zone */
+ list_add_tail(&zone->link, &zmd->map_cache_list);
} else {
/* LRU rotate random zone */
- list_add_tail(&zone->link, &zmd->map_rnd_list);
+ list_add_tail(&zone->link, &zone->dev->map_rnd_list);
}
}
@@ -1529,58 +1935,76 @@ static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
{
dmz_unlock_map(zmd);
dmz_unlock_metadata(zmd);
+ set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
+ clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
dmz_lock_metadata(zmd);
dmz_lock_map(zmd);
}
/*
- * Select a random write zone for reclaim.
+ * Select a cache or random write zone for reclaim.
*/
-static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
+static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int idx, bool idle)
{
struct dm_zone *dzone = NULL;
- struct dm_zone *zone;
-
- if (list_empty(&zmd->map_rnd_list))
- return ERR_PTR(-EBUSY);
+ struct dm_zone *zone, *last = NULL;
+ struct list_head *zone_list;
+
+ /* If we have cache zones select from the cache zone list */
+ if (zmd->nr_cache) {
+ zone_list = &zmd->map_cache_list;
+ /* Try to relaim random zones, too, when idle */
+ if (idle && list_empty(zone_list))
+ zone_list = &zmd->dev[idx].map_rnd_list;
+ } else
+ zone_list = &zmd->dev[idx].map_rnd_list;
- list_for_each_entry(zone, &zmd->map_rnd_list, link) {
- if (dmz_is_buf(zone))
+ list_for_each_entry(zone, zone_list, link) {
+ if (dmz_is_buf(zone)) {
dzone = zone->bzone;
- else
+ if (dzone->dev->dev_idx != idx)
+ continue;
+ if (!last) {
+ last = dzone;
+ continue;
+ }
+ if (last->weight < dzone->weight)
+ continue;
+ dzone = last;
+ } else
dzone = zone;
if (dmz_lock_zone_reclaim(dzone))
return dzone;
}
- return ERR_PTR(-EBUSY);
+ return NULL;
}
/*
* Select a buffered sequential zone for reclaim.
*/
-static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
+static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int idx)
{
struct dm_zone *zone;
- if (list_empty(&zmd->map_seq_list))
- return ERR_PTR(-EBUSY);
-
- list_for_each_entry(zone, &zmd->map_seq_list, link) {
+ list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) {
if (!zone->bzone)
continue;
if (dmz_lock_zone_reclaim(zone))
return zone;
}
- return ERR_PTR(-EBUSY);
+ return NULL;
}
/*
* Select a zone for reclaim.
*/
-struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
+struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int dev_idx, bool idle)
{
struct dm_zone *zone;
@@ -1594,9 +2018,9 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
*/
dmz_lock_map(zmd);
if (list_empty(&zmd->reserved_seq_zones_list))
- zone = dmz_get_seq_zone_for_reclaim(zmd);
+ zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
else
- zone = dmz_get_rnd_zone_for_reclaim(zmd);
+ zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
dmz_unlock_map(zmd);
return zone;
@@ -1616,6 +2040,7 @@ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chu
unsigned int dzone_id;
struct dm_zone *dzone = NULL;
int ret = 0;
+ int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
dmz_lock_map(zmd);
again:
@@ -1630,9 +2055,9 @@ again:
goto out;
/* Allocate a random zone */
- dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ dzone = dmz_alloc_zone(zmd, 0, alloc_flags);
if (!dzone) {
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_dev_is_dying(zmd)) {
dzone = ERR_PTR(-EIO);
goto out;
}
@@ -1645,6 +2070,10 @@ again:
} else {
/* The chunk is already mapped: get the mapping zone */
dzone = dmz_get(zmd, dzone_id);
+ if (!dzone) {
+ dzone = ERR_PTR(-EIO);
+ goto out;
+ }
if (dzone->chunk != chunk) {
dzone = ERR_PTR(-EIO);
goto out;
@@ -1723,6 +2152,7 @@ struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
struct dm_zone *dzone)
{
struct dm_zone *bzone;
+ int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
dmz_lock_map(zmd);
again:
@@ -1731,9 +2161,9 @@ again:
goto out;
/* Allocate a random zone */
- bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ bzone = dmz_alloc_zone(zmd, 0, alloc_flags);
if (!bzone) {
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_dev_is_dying(zmd)) {
bzone = ERR_PTR(-EIO);
goto out;
}
@@ -1742,14 +2172,16 @@ again:
}
/* Update the chunk mapping */
- dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone),
- dmz_id(zmd, bzone));
+ dmz_set_chunk_mapping(zmd, dzone->chunk, dzone->id, bzone->id);
set_bit(DMZ_BUF, &bzone->flags);
bzone->chunk = dzone->chunk;
bzone->bzone = dzone;
dzone->bzone = bzone;
- list_add_tail(&bzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(bzone))
+ list_add_tail(&bzone->link, &zmd->map_cache_list);
+ else
+ list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
out:
dmz_unlock_map(zmd);
@@ -1760,46 +2192,68 @@ out:
* Get an unmapped (free) zone.
* This must be called with the mapping lock held.
*/
-struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)
+struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
+ unsigned long flags)
{
struct list_head *list;
struct dm_zone *zone;
+ int i = 0;
- if (flags & DMZ_ALLOC_RND)
- list = &zmd->unmap_rnd_list;
- else
- list = &zmd->unmap_seq_list;
again:
+ if (flags & DMZ_ALLOC_CACHE)
+ list = &zmd->unmap_cache_list;
+ else if (flags & DMZ_ALLOC_RND)
+ list = &zmd->dev[dev_idx].unmap_rnd_list;
+ else
+ list = &zmd->dev[dev_idx].unmap_seq_list;
+
if (list_empty(list)) {
/*
- * No free zone: if this is for reclaim, allow using the
- * reserved sequential zones.
+ * No free zone: return NULL if this is for not reclaim.
*/
- if (!(flags & DMZ_ALLOC_RECLAIM) ||
- list_empty(&zmd->reserved_seq_zones_list))
+ if (!(flags & DMZ_ALLOC_RECLAIM))
return NULL;
+ /*
+ * Try to allocate from other devices
+ */
+ if (i < zmd->nr_devs) {
+ dev_idx = (dev_idx + 1) % zmd->nr_devs;
+ i++;
+ goto again;
+ }
- zone = list_first_entry(&zmd->reserved_seq_zones_list,
- struct dm_zone, link);
- list_del_init(&zone->link);
- atomic_dec(&zmd->nr_reserved_seq_zones);
+ /*
+ * Fallback to the reserved sequential zones
+ */
+ zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
+ struct dm_zone, link);
+ if (zone) {
+ list_del_init(&zone->link);
+ atomic_dec(&zmd->nr_reserved_seq_zones);
+ }
return zone;
}
zone = list_first_entry(list, struct dm_zone, link);
list_del_init(&zone->link);
- if (dmz_is_rnd(zone))
- atomic_dec(&zmd->unmap_nr_rnd);
+ if (dmz_is_cache(zone))
+ atomic_dec(&zmd->unmap_nr_cache);
+ else if (dmz_is_rnd(zone))
+ atomic_dec(&zone->dev->unmap_nr_rnd);
else
- atomic_dec(&zmd->unmap_nr_seq);
+ atomic_dec(&zone->dev->unmap_nr_seq);
if (dmz_is_offline(zone)) {
- dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone));
+ dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
+ zone = NULL;
+ goto again;
+ }
+ if (dmz_is_meta(zone)) {
+ dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id);
zone = NULL;
goto again;
}
-
return zone;
}
@@ -1814,16 +2268,18 @@ void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
dmz_reset_zone(zmd, zone);
/* Return the zone to its type unmap list */
- if (dmz_is_rnd(zone)) {
- list_add_tail(&zone->link, &zmd->unmap_rnd_list);
- atomic_inc(&zmd->unmap_nr_rnd);
- } else if (atomic_read(&zmd->nr_reserved_seq_zones) <
- zmd->nr_reserved_seq) {
+ if (dmz_is_cache(zone)) {
+ list_add_tail(&zone->link, &zmd->unmap_cache_list);
+ atomic_inc(&zmd->unmap_nr_cache);
+ } else if (dmz_is_rnd(zone)) {
+ list_add_tail(&zone->link, &zone->dev->unmap_rnd_list);
+ atomic_inc(&zone->dev->unmap_nr_rnd);
+ } else if (dmz_is_reserved(zone)) {
list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
atomic_inc(&zmd->nr_reserved_seq_zones);
} else {
- list_add_tail(&zone->link, &zmd->unmap_seq_list);
- atomic_inc(&zmd->unmap_nr_seq);
+ list_add_tail(&zone->link, &zone->dev->unmap_seq_list);
+ atomic_inc(&zone->dev->unmap_nr_seq);
}
wake_up_all(&zmd->free_wq);
@@ -1837,13 +2293,15 @@ void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
unsigned int chunk)
{
/* Set the chunk mapping */
- dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone),
+ dmz_set_chunk_mapping(zmd, chunk, dzone->id,
DMZ_MAP_UNMAPPED);
dzone->chunk = chunk;
- if (dmz_is_rnd(dzone))
- list_add_tail(&dzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(dzone))
+ list_add_tail(&dzone->link, &zmd->map_cache_list);
+ else if (dmz_is_rnd(dzone))
+ list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
else
- list_add_tail(&dzone->link, &zmd->map_seq_list);
+ list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
}
/*
@@ -1865,7 +2323,7 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
* Unmapping the chunk buffer zone: clear only
* the chunk buffer mapping
*/
- dzone_id = dmz_id(zmd, zone->bzone);
+ dzone_id = zone->bzone->id;
zone->bzone->bzone = NULL;
zone->bzone = NULL;
@@ -1927,7 +2385,7 @@ static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
sector_t chunk_block)
{
sector_t bitmap_block = 1 + zmd->nr_map_blocks +
- (sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) +
+ (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
(chunk_block >> DMZ_BLOCK_SHIFT_BITS);
return dmz_get_mblock(zmd, bitmap_block);
@@ -1943,7 +2401,7 @@ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
sector_t chunk_block = 0;
/* Get the zones bitmap blocks */
- while (chunk_block < zmd->dev->zone_nr_blocks) {
+ while (chunk_block < zmd->zone_nr_blocks) {
from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
if (IS_ERR(from_mblk))
return PTR_ERR(from_mblk);
@@ -1978,7 +2436,7 @@ int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
int ret;
/* Get the zones bitmap blocks */
- while (chunk_block < zmd->dev->zone_nr_blocks) {
+ while (chunk_block < zmd->zone_nr_blocks) {
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
if (ret <= 0)
@@ -2002,12 +2460,12 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
sector_t chunk_block, unsigned int nr_blocks)
{
unsigned int count, bit, nr_bits;
- unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
struct dmz_mblock *mblk;
unsigned int n = 0;
- dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks",
- dmz_id(zmd, zone), (unsigned long long)chunk_block,
+ dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks",
+ zone->id, (unsigned long long)chunk_block,
nr_blocks);
WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
@@ -2036,8 +2494,8 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
if (likely(zone->weight + n <= zone_nr_blocks))
zone->weight += n;
else {
- dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u",
- dmz_id(zmd, zone), zone->weight,
+ dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u",
+ zone->id, zone->weight,
zone_nr_blocks - n);
zone->weight = zone_nr_blocks;
}
@@ -2086,10 +2544,10 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
struct dmz_mblock *mblk;
unsigned int n = 0;
- dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks",
- dmz_id(zmd, zone), (u64)chunk_block, nr_blocks);
+ dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks",
+ zone->id, (u64)chunk_block, nr_blocks);
- WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
@@ -2116,8 +2574,8 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
if (zone->weight >= n)
zone->weight -= n;
else {
- dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u",
- dmz_id(zmd, zone), zone->weight, n);
+ dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u",
+ zone->id, zone->weight, n);
zone->weight = 0;
}
@@ -2133,7 +2591,7 @@ static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
struct dmz_mblock *mblk;
int ret;
- WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block >= zmd->zone_nr_blocks);
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
@@ -2163,7 +2621,7 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
unsigned long *bitmap;
int n = 0;
- WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
@@ -2207,7 +2665,7 @@ int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
/* The block is valid: get the number of valid blocks from block */
return dmz_to_next_set_block(zmd, zone, chunk_block,
- zmd->dev->zone_nr_blocks - chunk_block, 0);
+ zmd->zone_nr_blocks - chunk_block, 0);
}
/*
@@ -2223,7 +2681,7 @@ int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
int ret;
ret = dmz_to_next_set_block(zmd, zone, start_block,
- zmd->dev->zone_nr_blocks - start_block, 1);
+ zmd->zone_nr_blocks - start_block, 1);
if (ret < 0)
return ret;
@@ -2231,7 +2689,7 @@ int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
*chunk_block = start_block;
return dmz_to_next_set_block(zmd, zone, start_block,
- zmd->dev->zone_nr_blocks - start_block, 0);
+ zmd->zone_nr_blocks - start_block, 0);
}
/*
@@ -2270,7 +2728,7 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
struct dmz_mblock *mblk;
sector_t chunk_block = 0;
unsigned int bit, nr_bits;
- unsigned int nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int nr_blocks = zmd->zone_nr_blocks;
void *bitmap;
int n = 0;
@@ -2326,7 +2784,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
while (!list_empty(&zmd->mblk_dirty_list)) {
mblk = list_first_entry(&zmd->mblk_dirty_list,
struct dmz_mblock, link);
- dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
+ dmz_zmd_warn(zmd, "mblock %llu still in dirty list (ref %u)",
(u64)mblk->no, mblk->ref);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
@@ -2344,7 +2802,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
/* Sanity checks: the mblock rbtree should now be empty */
root = &zmd->mblk_rbtree;
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
- dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
+ dmz_zmd_warn(zmd, "mblock %llu ref %u still in rbtree",
(u64)mblk->no, mblk->ref);
mblk->ref = 0;
dmz_free_mblock(zmd, mblk);
@@ -2357,13 +2815,42 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
mutex_destroy(&zmd->map_lock);
}
+static void dmz_print_dev(struct dmz_metadata *zmd, int num)
+{
+ struct dmz_dev *dev = &zmd->dev[num];
+
+ if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE)
+ dmz_dev_info(dev, "Regular block device");
+ else
+ dmz_dev_info(dev, "Host-%s zoned block device",
+ bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
+ "aware" : "managed");
+ if (zmd->sb_version > 1) {
+ sector_t sector_offset =
+ dev->zone_offset << zmd->zone_nr_sectors_shift;
+
+ dmz_dev_info(dev, " %llu 512-byte logical sectors (offset %llu)",
+ (u64)dev->capacity, (u64)sector_offset);
+ dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors (offset %llu)",
+ dev->nr_zones, (u64)zmd->zone_nr_sectors,
+ (u64)dev->zone_offset);
+ } else {
+ dmz_dev_info(dev, " %llu 512-byte logical sectors",
+ (u64)dev->capacity);
+ dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
+ dev->nr_zones, (u64)zmd->zone_nr_sectors);
+ }
+}
+
/*
* Initialize the zoned metadata.
*/
-int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
+int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
+ struct dmz_metadata **metadata,
+ const char *devname)
{
struct dmz_metadata *zmd;
- unsigned int i, zid;
+ unsigned int i;
struct dm_zone *zone;
int ret;
@@ -2371,7 +2858,9 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
if (!zmd)
return -ENOMEM;
+ strcpy(zmd->devname, devname);
zmd->dev = dev;
+ zmd->nr_devs = num_dev;
zmd->mblk_rbtree = RB_ROOT;
init_rwsem(&zmd->mblk_sem);
mutex_init(&zmd->mblk_flush_lock);
@@ -2380,13 +2869,10 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
INIT_LIST_HEAD(&zmd->mblk_dirty_list);
mutex_init(&zmd->map_lock);
- atomic_set(&zmd->unmap_nr_rnd, 0);
- INIT_LIST_HEAD(&zmd->unmap_rnd_list);
- INIT_LIST_HEAD(&zmd->map_rnd_list);
- atomic_set(&zmd->unmap_nr_seq, 0);
- INIT_LIST_HEAD(&zmd->unmap_seq_list);
- INIT_LIST_HEAD(&zmd->map_seq_list);
+ atomic_set(&zmd->unmap_nr_cache, 0);
+ INIT_LIST_HEAD(&zmd->unmap_cache_list);
+ INIT_LIST_HEAD(&zmd->map_cache_list);
atomic_set(&zmd->nr_reserved_seq_zones, 0);
INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
@@ -2404,14 +2890,22 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
goto err;
/* Set metadata zones starting from sb_zone */
- zid = dmz_id(zmd, zmd->sb_zone);
for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
- zone = dmz_get(zmd, zid + i);
- if (!dmz_is_rnd(zone))
+ zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
+ if (!zone) {
+ dmz_zmd_err(zmd,
+ "metadata zone %u not present", i);
+ ret = -ENXIO;
+ goto err;
+ }
+ if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
+ dmz_zmd_err(zmd,
+ "metadata zone %d is not random", i);
+ ret = -ENXIO;
goto err;
+ }
set_bit(DMZ_META, &zone->flags);
}
-
/* Load mapping table */
ret = dmz_load_mapping(zmd);
if (ret)
@@ -2432,34 +2926,38 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
/* Metadata cache shrinker */
ret = register_shrinker(&zmd->mblk_shrinker);
if (ret) {
- dmz_dev_err(dev, "Register metadata cache shrinker failed");
+ dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
goto err;
}
- dmz_dev_info(dev, "Host-%s zoned block device",
- bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
- "aware" : "managed");
- dmz_dev_info(dev, " %llu 512-byte logical sectors",
- (u64)dev->capacity);
- dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
- dev->nr_zones, (u64)dev->zone_nr_sectors);
- dmz_dev_info(dev, " %u metadata zones",
- zmd->nr_meta_zones * 2);
- dmz_dev_info(dev, " %u data zones for %u chunks",
- zmd->nr_data_zones, zmd->nr_chunks);
- dmz_dev_info(dev, " %u random zones (%u unmapped)",
- zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd));
- dmz_dev_info(dev, " %u sequential zones (%u unmapped)",
- zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq));
- dmz_dev_info(dev, " %u reserved sequential data zones",
- zmd->nr_reserved_seq);
-
- dmz_dev_debug(dev, "Format:");
- dmz_dev_debug(dev, "%u metadata blocks per set (%u max cache)",
+ dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
+ for (i = 0; i < zmd->nr_devs; i++)
+ dmz_print_dev(zmd, i);
+
+ dmz_zmd_info(zmd, " %u zones of %llu 512-byte logical sectors",
+ zmd->nr_zones, (u64)zmd->zone_nr_sectors);
+ dmz_zmd_debug(zmd, " %u metadata zones",
+ zmd->nr_meta_zones * 2);
+ dmz_zmd_debug(zmd, " %u data zones for %u chunks",
+ zmd->nr_data_zones, zmd->nr_chunks);
+ dmz_zmd_debug(zmd, " %u cache zones (%u unmapped)",
+ zmd->nr_cache, atomic_read(&zmd->unmap_nr_cache));
+ for (i = 0; i < zmd->nr_devs; i++) {
+ dmz_zmd_debug(zmd, " %u random zones (%u unmapped)",
+ dmz_nr_rnd_zones(zmd, i),
+ dmz_nr_unmap_rnd_zones(zmd, i));
+ dmz_zmd_debug(zmd, " %u sequential zones (%u unmapped)",
+ dmz_nr_seq_zones(zmd, i),
+ dmz_nr_unmap_seq_zones(zmd, i));
+ }
+ dmz_zmd_debug(zmd, " %u reserved sequential data zones",
+ zmd->nr_reserved_seq);
+ dmz_zmd_debug(zmd, "Format:");
+ dmz_zmd_debug(zmd, "%u metadata blocks per set (%u max cache)",
zmd->nr_meta_blocks, zmd->max_nr_mblks);
- dmz_dev_debug(dev, " %u data zone mapping blocks",
+ dmz_zmd_debug(zmd, " %u data zone mapping blocks",
zmd->nr_map_blocks);
- dmz_dev_debug(dev, " %u bitmap blocks",
+ dmz_zmd_debug(zmd, " %u bitmap blocks",
zmd->nr_bitmap_blocks);
*metadata = zmd;
@@ -2488,30 +2986,28 @@ void dmz_dtr_metadata(struct dmz_metadata *zmd)
*/
int dmz_resume_metadata(struct dmz_metadata *zmd)
{
- struct dmz_dev *dev = zmd->dev;
struct dm_zone *zone;
sector_t wp_block;
unsigned int i;
int ret;
/* Check zones */
- for (i = 0; i < dev->nr_zones; i++) {
+ for (i = 0; i < zmd->nr_zones; i++) {
zone = dmz_get(zmd, i);
if (!zone) {
- dmz_dev_err(dev, "Unable to get zone %u", i);
+ dmz_zmd_err(zmd, "Unable to get zone %u", i);
return -EIO;
}
-
wp_block = zone->wp_block;
ret = dmz_update_zone(zmd, zone);
if (ret) {
- dmz_dev_err(dev, "Broken zone %u", i);
+ dmz_zmd_err(zmd, "Broken zone %u", i);
return ret;
}
if (dmz_is_offline(zone)) {
- dmz_dev_warn(dev, "Zone %u is offline", i);
+ dmz_zmd_warn(zmd, "Zone %u is offline", i);
continue;
}
@@ -2519,11 +3015,11 @@ int dmz_resume_metadata(struct dmz_metadata *zmd)
if (!dmz_is_seq(zone))
zone->wp_block = 0;
else if (zone->wp_block != wp_block) {
- dmz_dev_err(dev, "Zone %u: Invalid wp (%llu / %llu)",
+ dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)",
i, (u64)zone->wp_block, (u64)wp_block);
zone->wp_block = wp_block;
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
- dev->zone_nr_blocks - zone->wp_block);
+ zmd->zone_nr_blocks - zone->wp_block);
}
}
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index e7ace908a9b7..2261b4dd60b7 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -13,7 +13,6 @@
struct dmz_reclaim {
struct dmz_metadata *metadata;
- struct dmz_dev *dev;
struct delayed_work work;
struct workqueue_struct *wq;
@@ -22,6 +21,8 @@ struct dmz_reclaim {
struct dm_kcopyd_throttle kc_throttle;
int kc_err;
+ int dev_idx;
+
unsigned long flags;
/* Last target access time */
@@ -44,13 +45,13 @@ enum {
* Percentage of unmapped (free) random zones below which reclaim starts
* even if the target is busy.
*/
-#define DMZ_RECLAIM_LOW_UNMAP_RND 30
+#define DMZ_RECLAIM_LOW_UNMAP_ZONES 30
/*
* Percentage of unmapped (free) random zones above which reclaim will
* stop if the target is busy.
*/
-#define DMZ_RECLAIM_HIGH_UNMAP_RND 50
+#define DMZ_RECLAIM_HIGH_UNMAP_ZONES 50
/*
* Align a sequential zone write pointer to chunk_block.
@@ -59,6 +60,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
sector_t block)
{
struct dmz_metadata *zmd = zrc->metadata;
+ struct dmz_dev *dev = zone->dev;
sector_t wp_block = zone->wp_block;
unsigned int nr_blocks;
int ret;
@@ -74,15 +76,15 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
* pointer and the requested position.
*/
nr_blocks = block - wp_block;
- ret = blkdev_issue_zeroout(zrc->dev->bdev,
+ ret = blkdev_issue_zeroout(dev->bdev,
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
if (ret) {
- dmz_dev_err(zrc->dev,
+ dmz_dev_err(dev,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
- dmz_id(zmd, zone), (unsigned long long)wp_block,
+ zone->id, (unsigned long long)wp_block,
(unsigned long long)block, nr_blocks, ret);
- dmz_check_bdev(zrc->dev);
+ dmz_check_bdev(dev);
return ret;
}
@@ -116,7 +118,6 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
struct dm_zone *src_zone, struct dm_zone *dst_zone)
{
struct dmz_metadata *zmd = zrc->metadata;
- struct dmz_dev *dev = zrc->dev;
struct dm_io_region src, dst;
sector_t block = 0, end_block;
sector_t nr_blocks;
@@ -128,7 +129,7 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
if (dmz_is_seq(src_zone))
end_block = src_zone->wp_block;
else
- end_block = dev->zone_nr_blocks;
+ end_block = dmz_zone_nr_blocks(zmd);
src_zone_block = dmz_start_block(zmd, src_zone);
dst_zone_block = dmz_start_block(zmd, dst_zone);
@@ -136,9 +137,14 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
while (block < end_block) {
- if (dev->flags & DMZ_BDEV_DYING)
+ if (src_zone->dev->flags & DMZ_BDEV_DYING)
+ return -EIO;
+ if (dst_zone->dev->flags & DMZ_BDEV_DYING)
return -EIO;
+ if (dmz_reclaim_should_terminate(src_zone))
+ return -EINTR;
+
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, src_zone, &block);
if (ret <= 0)
@@ -156,11 +162,11 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
return ret;
}
- src.bdev = dev->bdev;
+ src.bdev = src_zone->dev->bdev;
src.sector = dmz_blk2sect(src_zone_block + block);
src.count = dmz_blk2sect(nr_blocks);
- dst.bdev = dev->bdev;
+ dst.bdev = dst_zone->dev->bdev;
dst.sector = dmz_blk2sect(dst_zone_block + block);
dst.count = src.count;
@@ -194,10 +200,10 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dmz_metadata *zmd = zrc->metadata;
int ret;
- dmz_dev_debug(zrc->dev,
- "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
- dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
- dmz_id(zmd, dzone), dmz_weight(dzone));
+ DMDEBUG("(%s/%u): Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ dzone->chunk, bzone->id, dmz_weight(bzone),
+ dzone->id, dmz_weight(dzone));
/* Flush data zone into the buffer zone */
ret = dmz_reclaim_copy(zrc, bzone, dzone);
@@ -210,7 +216,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
if (ret == 0) {
/* Free the buffer zone */
- dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, bzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unlock_zone_reclaim(dzone);
@@ -233,10 +239,10 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dmz_metadata *zmd = zrc->metadata;
int ret = 0;
- dmz_dev_debug(zrc->dev,
- "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
- chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
- dmz_id(zmd, bzone), dmz_weight(bzone));
+ DMDEBUG("(%s/%u): Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ chunk, dzone->id, dmz_weight(dzone),
+ bzone->id, dmz_weight(bzone));
/* Flush data zone into the buffer zone */
ret = dmz_reclaim_copy(zrc, dzone, bzone);
@@ -252,7 +258,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
* Free the data zone and remap the chunk to
* the buffer zone.
*/
- dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unmap_zone(zmd, dzone);
@@ -277,18 +283,26 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dm_zone *szone = NULL;
struct dmz_metadata *zmd = zrc->metadata;
int ret;
+ int alloc_flags = DMZ_ALLOC_SEQ;
- /* Get a free sequential zone */
+ /* Get a free random or sequential zone */
dmz_lock_map(zmd);
- szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
+again:
+ szone = dmz_alloc_zone(zmd, zrc->dev_idx,
+ alloc_flags | DMZ_ALLOC_RECLAIM);
+ if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) {
+ alloc_flags = DMZ_ALLOC_RND;
+ goto again;
+ }
dmz_unlock_map(zmd);
if (!szone)
return -ENOSPC;
- dmz_dev_debug(zrc->dev,
- "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
- chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
- dmz_id(zmd, szone));
+ DMDEBUG("(%s/%u): Chunk %u, move %s zone %u (weight %u) to %s zone %u",
+ dmz_metadata_label(zmd), zrc->dev_idx, chunk,
+ dmz_is_cache(dzone) ? "cache" : "rnd",
+ dzone->id, dmz_weight(dzone),
+ dmz_is_rnd(szone) ? "rnd" : "seq", szone->id);
/* Flush the random data zone into the sequential zone */
ret = dmz_reclaim_copy(zrc, dzone, szone);
@@ -306,7 +320,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
dmz_unlock_map(zmd);
} else {
/* Free the data zone and remap the chunk */
- dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, dzone);
dmz_unlock_zone_reclaim(dzone);
@@ -337,6 +351,14 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
}
/*
+ * Test if the target device is idle.
+ */
+static inline int dmz_target_idle(struct dmz_reclaim *zrc)
+{
+ return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
+}
+
+/*
* Find a candidate zone for reclaim and process it.
*/
static int dmz_do_reclaim(struct dmz_reclaim *zrc)
@@ -348,13 +370,16 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
int ret;
/* Get a data zone */
- dzone = dmz_get_zone_for_reclaim(zmd);
- if (IS_ERR(dzone))
- return PTR_ERR(dzone);
+ dzone = dmz_get_zone_for_reclaim(zmd, zrc->dev_idx,
+ dmz_target_idle(zrc));
+ if (!dzone) {
+ DMDEBUG("(%s/%u): No zone found to reclaim",
+ dmz_metadata_label(zmd), zrc->dev_idx);
+ return -EBUSY;
+ }
start = jiffies;
-
- if (dmz_is_rnd(dzone)) {
+ if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
if (!dmz_weight(dzone)) {
/* Empty zone */
dmz_reclaim_empty(zrc, dzone);
@@ -395,54 +420,80 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
}
out:
if (ret) {
+ if (ret == -EINTR)
+ DMDEBUG("(%s/%u): reclaim zone %u interrupted",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ rzone->id);
+ else
+ DMDEBUG("(%s/%u): Failed to reclaim zone %u, err %d",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ rzone->id, ret);
dmz_unlock_zone_reclaim(dzone);
return ret;
}
ret = dmz_flush_metadata(zrc->metadata);
if (ret) {
- dmz_dev_debug(zrc->dev,
- "Metadata flush for zone %u failed, err %d\n",
- dmz_id(zmd, rzone), ret);
+ DMDEBUG("(%s/%u): Metadata flush for zone %u failed, err %d",
+ dmz_metadata_label(zmd), zrc->dev_idx, rzone->id, ret);
return ret;
}
- dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
- dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+ DMDEBUG("(%s/%u): Reclaimed zone %u in %u ms",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ rzone->id, jiffies_to_msecs(jiffies - start));
return 0;
}
-/*
- * Test if the target device is idle.
- */
-static inline int dmz_target_idle(struct dmz_reclaim *zrc)
+static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
{
- return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
+ struct dmz_metadata *zmd = zrc->metadata;
+ unsigned int nr_cache = dmz_nr_cache_zones(zmd);
+ unsigned int nr_unmap, nr_zones;
+
+ if (nr_cache) {
+ nr_zones = nr_cache;
+ nr_unmap = dmz_nr_unmap_cache_zones(zmd);
+ } else {
+ nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
+ nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
+ }
+ return nr_unmap * 100 / nr_zones;
}
/*
* Test if reclaim is necessary.
*/
-static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
+static bool dmz_should_reclaim(struct dmz_reclaim *zrc, unsigned int p_unmap)
{
- struct dmz_metadata *zmd = zrc->metadata;
- unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
- unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
- unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
+ unsigned int nr_reclaim;
+
+ nr_reclaim = dmz_nr_rnd_zones(zrc->metadata, zrc->dev_idx);
+
+ if (dmz_nr_cache_zones(zrc->metadata)) {
+ /*
+ * The first device in a multi-device
+ * setup only contains cache zones, so
+ * never start reclaim there.
+ */
+ if (zrc->dev_idx == 0)
+ return false;
+ nr_reclaim += dmz_nr_cache_zones(zrc->metadata);
+ }
/* Reclaim when idle */
- if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
+ if (dmz_target_idle(zrc) && nr_reclaim)
return true;
- /* If there are still plenty of random zones, do not reclaim */
- if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
+ /* If there are still plenty of cache zones, do not reclaim */
+ if (p_unmap >= DMZ_RECLAIM_HIGH_UNMAP_ZONES)
return false;
/*
- * If the percentage of unmapped random zones is low,
+ * If the percentage of unmapped cache zones is low,
* reclaim even if the target is busy.
*/
- return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
+ return p_unmap <= DMZ_RECLAIM_LOW_UNMAP_ZONES;
}
/*
@@ -452,14 +503,14 @@ static void dmz_reclaim_work(struct work_struct *work)
{
struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
struct dmz_metadata *zmd = zrc->metadata;
- unsigned int nr_rnd, nr_unmap_rnd;
- unsigned int p_unmap_rnd;
+ unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
int ret;
- if (dmz_bdev_is_dying(zrc->dev))
+ if (dmz_dev_is_dying(zmd))
return;
- if (!dmz_should_reclaim(zrc)) {
+ p_unmap = dmz_reclaim_percentage(zrc);
+ if (!dmz_should_reclaim(zrc, p_unmap)) {
mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
return;
}
@@ -470,27 +521,29 @@ static void dmz_reclaim_work(struct work_struct *work)
* and slower if there are still some free random zones to avoid
* as much as possible to negatively impact the user workload.
*/
- nr_rnd = dmz_nr_rnd_zones(zmd);
- nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
- p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
- if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
+ if (dmz_target_idle(zrc) || p_unmap < DMZ_RECLAIM_LOW_UNMAP_ZONES / 2) {
/* Idle or very low percentage: go fast */
zrc->kc_throttle.throttle = 100;
} else {
/* Busy but we still have some random zone: throttle */
- zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
+ zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
}
- dmz_dev_debug(zrc->dev,
- "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
- zrc->kc_throttle.throttle,
- (dmz_target_idle(zrc) ? "Idle" : "Busy"),
- p_unmap_rnd, nr_unmap_rnd, nr_rnd);
+ nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
+ nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
+
+ DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ zrc->kc_throttle.throttle,
+ (dmz_target_idle(zrc) ? "Idle" : "Busy"),
+ p_unmap, dmz_nr_unmap_cache_zones(zmd),
+ dmz_nr_cache_zones(zmd),
+ dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx),
+ dmz_nr_rnd_zones(zmd, zrc->dev_idx));
ret = dmz_do_reclaim(zrc);
- if (ret) {
- dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
- if (!dmz_check_bdev(zrc->dev))
+ if (ret && ret != -EINTR) {
+ if (!dmz_check_dev(zmd))
return;
}
@@ -500,8 +553,8 @@ static void dmz_reclaim_work(struct work_struct *work)
/*
* Initialize reclaim.
*/
-int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
- struct dmz_reclaim **reclaim)
+int dmz_ctr_reclaim(struct dmz_metadata *zmd,
+ struct dmz_reclaim **reclaim, int idx)
{
struct dmz_reclaim *zrc;
int ret;
@@ -510,9 +563,9 @@ int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
if (!zrc)
return -ENOMEM;
- zrc->dev = dev;
zrc->metadata = zmd;
zrc->atime = jiffies;
+ zrc->dev_idx = idx;
/* Reclaim kcopyd client */
zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
@@ -524,8 +577,8 @@ int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
/* Reclaim work */
INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
- zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
- dev->name);
+ zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s_%d", WQ_MEM_RECLAIM,
+ dmz_metadata_label(zmd), idx);
if (!zrc->wq) {
ret = -ENOMEM;
goto err;
@@ -583,7 +636,8 @@ void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
*/
void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
{
- if (dmz_should_reclaim(zrc))
+ unsigned int p_unmap = dmz_reclaim_percentage(zrc);
+
+ if (dmz_should_reclaim(zrc, p_unmap))
mod_delayed_work(zrc->wq, &zrc->work, 0);
}
-
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index f4f83d39b3dc..a907a9446c0b 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -17,7 +17,7 @@
* Zone BIO context.
*/
struct dmz_bioctx {
- struct dmz_target *target;
+ struct dmz_dev *dev;
struct dm_zone *zone;
struct bio *bio;
refcount_t ref;
@@ -38,9 +38,10 @@ struct dm_chunk_work {
* Target descriptor.
*/
struct dmz_target {
- struct dm_dev *ddev;
+ struct dm_dev **ddev;
+ unsigned int nr_ddevs;
- unsigned long flags;
+ unsigned int flags;
/* Zoned block device information */
struct dmz_dev *dev;
@@ -48,9 +49,6 @@ struct dmz_target {
/* For metadata handling */
struct dmz_metadata *metadata;
- /* For reclaim */
- struct dmz_reclaim *reclaim;
-
/* For chunk work */
struct radix_tree_root chunk_rxtree;
struct workqueue_struct *chunk_wq;
@@ -76,12 +74,13 @@ struct dmz_target {
*/
static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_bioctx *bioctx =
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
bio->bi_status = status;
- if (bio->bi_status != BLK_STS_OK)
- bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
+ if (bioctx->dev && bio->bi_status != BLK_STS_OK)
+ bioctx->dev->flags |= DMZ_CHECK_BDEV;
if (refcount_dec_and_test(&bioctx->ref)) {
struct dm_zone *zone = bioctx->zone;
@@ -118,14 +117,20 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio, sector_t chunk_block,
unsigned int nr_blocks)
{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_bioctx *bioctx =
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_dev *dev = zone->dev;
struct bio *clone;
+ if (dev->flags & DMZ_BDEV_DYING)
+ return -EIO;
+
clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
- bio_set_dev(clone, dmz->dev->bdev);
+ bio_set_dev(clone, dev->bdev);
+ bioctx->dev = dev;
clone->bi_iter.bi_sector =
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
@@ -165,7 +170,8 @@ static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ struct dmz_metadata *zmd = dmz->metadata;
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
unsigned int nr_blocks = dmz_bio_blocks(bio);
sector_t end_block = chunk_block + nr_blocks;
struct dm_zone *rzone, *bzone;
@@ -177,19 +183,22 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
return 0;
}
- dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
- dmz_id(dmz->metadata, zone),
- (unsigned long long)chunk_block, nr_blocks);
+ DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
+ dmz_metadata_label(zmd),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (dmz_is_rnd(zone) ? "RND" :
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
+ zone->id,
+ (unsigned long long)chunk_block, nr_blocks);
/* Check block validity to determine the read location */
bzone = zone->bzone;
while (chunk_block < end_block) {
nr_blocks = 0;
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block < zone->wp_block) {
/* Test block validity in the data zone */
- ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
+ ret = dmz_block_valid(zmd, zone, chunk_block);
if (ret < 0)
return ret;
if (ret > 0) {
@@ -204,7 +213,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
* Check the buffer zone, if there is one.
*/
if (!nr_blocks && bzone) {
- ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
+ ret = dmz_block_valid(zmd, bzone, chunk_block);
if (ret < 0)
return ret;
if (ret > 0) {
@@ -216,8 +225,10 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks) {
/* Valid blocks found: read them */
- nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
- ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+ nr_blocks = min_t(unsigned int, nr_blocks,
+ end_block - chunk_block);
+ ret = dmz_submit_bio(dmz, rzone, bio,
+ chunk_block, nr_blocks);
if (ret)
return ret;
chunk_block += nr_blocks;
@@ -308,25 +319,30 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ struct dmz_metadata *zmd = dmz->metadata;
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
unsigned int nr_blocks = dmz_bio_blocks(bio);
if (!zone)
return -ENOSPC;
- dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
- dmz_id(dmz->metadata, zone),
- (unsigned long long)chunk_block, nr_blocks);
+ DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
+ dmz_metadata_label(zmd),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (dmz_is_rnd(zone) ? "RND" :
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
+ zone->id,
+ (unsigned long long)chunk_block, nr_blocks);
- if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block == zone->wp_block) {
/*
* zone is a random zone or it is a sequential zone
* and the BIO is aligned to the zone write pointer:
* direct write the zone.
*/
- return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
+ return dmz_handle_direct_write(dmz, zone, bio,
+ chunk_block, nr_blocks);
}
/*
@@ -345,7 +361,7 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
struct dmz_metadata *zmd = dmz->metadata;
sector_t block = dmz_bio_block(bio);
unsigned int nr_blocks = dmz_bio_blocks(bio);
- sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
+ sector_t chunk_block = dmz_chunk_block(zmd, block);
int ret = 0;
/* For unmapped chunks, there is nothing to do */
@@ -355,16 +371,18 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
if (dmz_is_readonly(zone))
return -EROFS;
- dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- dmz_id(zmd, zone),
- (unsigned long long)chunk_block, nr_blocks);
+ DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
+ dmz_metadata_label(dmz->metadata),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ zone->id,
+ (unsigned long long)chunk_block, nr_blocks);
/*
* Invalidate blocks in the data zone and its
* buffer zone if one is mapped.
*/
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block < zone->wp_block)
ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
if (ret == 0 && zone->bzone)
ret = dmz_invalidate_blocks(zmd, zone->bzone,
@@ -378,31 +396,28 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
struct bio *bio)
{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_bioctx *bioctx =
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
struct dmz_metadata *zmd = dmz->metadata;
struct dm_zone *zone;
- int ret;
+ int i, ret;
/*
* Write may trigger a zone allocation. So make sure the
* allocation can succeed.
*/
if (bio_op(bio) == REQ_OP_WRITE)
- dmz_schedule_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_schedule_reclaim(dmz->dev[i].reclaim);
dmz_lock_metadata(zmd);
- if (dmz->dev->flags & DMZ_BDEV_DYING) {
- ret = -EIO;
- goto out;
- }
-
/*
* Get the data zone mapping the chunk. There may be no
* mapping for read and discard. If a mapping is obtained,
+ the zone returned will be set to active state.
*/
- zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
+ zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
bio_op(bio));
if (IS_ERR(zone)) {
ret = PTR_ERR(zone);
@@ -413,6 +428,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
if (zone) {
dmz_activate_zone(zone);
bioctx->zone = zone;
+ dmz_reclaim_bio_acc(zone->dev->reclaim);
}
switch (bio_op(bio)) {
@@ -427,8 +443,8 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
ret = dmz_handle_discard(dmz, zone, bio);
break;
default:
- dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
- bio_op(bio));
+ DMERR("(%s): Unsupported BIO operation 0x%x",
+ dmz_metadata_label(dmz->metadata), bio_op(bio));
ret = -EIO;
}
@@ -502,7 +518,8 @@ static void dmz_flush_work(struct work_struct *work)
/* Flush dirty metadata blocks */
ret = dmz_flush_metadata(dmz->metadata);
if (ret)
- dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
+ DMDEBUG("(%s): Metadata flush failed, rc=%d",
+ dmz_metadata_label(dmz->metadata), ret);
/* Process queued flush requests */
while (1) {
@@ -525,7 +542,7 @@ static void dmz_flush_work(struct work_struct *work)
*/
static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
- unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
+ unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
struct dm_chunk_work *cw;
int ret = 0;
@@ -558,7 +575,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
bio_list_add(&cw->bio_list, bio);
- dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work))
dmz_get_chunk_work(cw);
out:
@@ -618,23 +634,22 @@ bool dmz_check_bdev(struct dmz_dev *dmz_dev)
static int dmz_map(struct dm_target *ti, struct bio *bio)
{
struct dmz_target *dmz = ti->private;
- struct dmz_dev *dev = dmz->dev;
+ struct dmz_metadata *zmd = dmz->metadata;
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
sector_t sector = bio->bi_iter.bi_sector;
unsigned int nr_sectors = bio_sectors(bio);
sector_t chunk_sector;
int ret;
- if (dmz_bdev_is_dying(dmz->dev))
+ if (dmz_dev_is_dying(zmd))
return DM_MAPIO_KILL;
- dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
- bio_op(bio), (unsigned long long)sector, nr_sectors,
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
- (unsigned int)dmz_bio_blocks(bio));
-
- bio_set_dev(bio, dev->bdev);
+ DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
+ dmz_metadata_label(zmd),
+ bio_op(bio), (unsigned long long)sector, nr_sectors,
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
+ (unsigned int)dmz_bio_blocks(bio));
if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
return DM_MAPIO_REMAPPED;
@@ -644,7 +659,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_KILL;
/* Initialize the BIO context */
- bioctx->target = dmz;
+ bioctx->dev = NULL;
bioctx->zone = NULL;
bioctx->bio = bio;
refcount_set(&bioctx->ref, 1);
@@ -659,17 +674,17 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
}
/* Split zone BIOs to fit entirely into a zone */
- chunk_sector = sector & (dev->zone_nr_sectors - 1);
- if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
- dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
+ chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1);
+ if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
+ dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
/* Now ready to handle this BIO */
ret = dmz_queue_chunk_work(dmz, bio);
if (ret) {
- dmz_dev_debug(dmz->dev,
- "BIO op %d, can't process chunk %llu, err %i\n",
- bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
- ret);
+ DMDEBUG("(%s): BIO op %d, can't process chunk %llu, err %i",
+ dmz_metadata_label(zmd),
+ bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
+ ret);
return DM_MAPIO_REQUEUE;
}
@@ -679,64 +694,65 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
/*
* Get zoned device information.
*/
-static int dmz_get_zoned_device(struct dm_target *ti, char *path)
+static int dmz_get_zoned_device(struct dm_target *ti, char *path,
+ int idx, int nr_devs)
{
struct dmz_target *dmz = ti->private;
- struct request_queue *q;
+ struct dm_dev *ddev;
struct dmz_dev *dev;
- sector_t aligned_capacity;
int ret;
+ struct block_device *bdev;
/* Get the target device */
- ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
+ ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &ddev);
if (ret) {
ti->error = "Get target device failed";
- dmz->ddev = NULL;
return ret;
}
- dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto err;
+ bdev = ddev->bdev;
+ if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) {
+ if (nr_devs == 1) {
+ ti->error = "Invalid regular device";
+ goto err;
+ }
+ if (idx != 0) {
+ ti->error = "First device must be a regular device";
+ goto err;
+ }
+ if (dmz->ddev[0]) {
+ ti->error = "Too many regular devices";
+ goto err;
+ }
+ dev = &dmz->dev[idx];
+ dev->flags = DMZ_BDEV_REGULAR;
+ } else {
+ if (dmz->ddev[idx]) {
+ ti->error = "Too many zoned devices";
+ goto err;
+ }
+ if (nr_devs > 1 && idx == 0) {
+ ti->error = "First device must be a regular device";
+ goto err;
+ }
+ dev = &dmz->dev[idx];
}
-
- dev->bdev = dmz->ddev->bdev;
+ dev->bdev = bdev;
+ dev->dev_idx = idx;
(void)bdevname(dev->bdev, dev->name);
- if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
- ti->error = "Not a zoned block device";
- ret = -EINVAL;
+ dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ if (ti->begin) {
+ ti->error = "Partial mapping is not supported";
goto err;
}
- q = bdev_get_queue(dev->bdev);
- dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- aligned_capacity = dev->capacity &
- ~((sector_t)blk_queue_zone_sectors(q) - 1);
- if (ti->begin ||
- ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
- ti->error = "Partial mapping not supported";
- ret = -EINVAL;
- goto err;
- }
-
- dev->zone_nr_sectors = blk_queue_zone_sectors(q);
- dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
-
- dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
- dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
-
- dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk);
-
- dmz->dev = dev;
+ dmz->ddev[idx] = ddev;
return 0;
err:
- dm_put_device(ti, dmz->ddev);
- kfree(dev);
-
- return ret;
+ dm_put_device(ti, ddev);
+ return -EINVAL;
}
/*
@@ -745,10 +761,78 @@ err:
static void dmz_put_zoned_device(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
- dm_put_device(ti, dmz->ddev);
- kfree(dmz->dev);
- dmz->dev = NULL;
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ if (dmz->ddev[i]) {
+ dm_put_device(ti, dmz->ddev[i]);
+ dmz->ddev[i] = NULL;
+ }
+ }
+}
+
+static int dmz_fixup_devices(struct dm_target *ti)
+{
+ struct dmz_target *dmz = ti->private;
+ struct dmz_dev *reg_dev, *zoned_dev;
+ struct request_queue *q;
+ sector_t zone_nr_sectors = 0;
+ int i;
+
+ /*
+ * When we have more than on devices, the first one must be a
+ * regular block device and the others zoned block devices.
+ */
+ if (dmz->nr_ddevs > 1) {
+ reg_dev = &dmz->dev[0];
+ if (!(reg_dev->flags & DMZ_BDEV_REGULAR)) {
+ ti->error = "Primary disk is not a regular device";
+ return -EINVAL;
+ }
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ zoned_dev = &dmz->dev[i];
+ if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
+ ti->error = "Secondary disk is not a zoned device";
+ return -EINVAL;
+ }
+ q = bdev_get_queue(zoned_dev->bdev);
+ if (zone_nr_sectors &&
+ zone_nr_sectors != blk_queue_zone_sectors(q)) {
+ ti->error = "Zone nr sectors mismatch";
+ return -EINVAL;
+ }
+ zone_nr_sectors = blk_queue_zone_sectors(q);
+ zoned_dev->zone_nr_sectors = zone_nr_sectors;
+ zoned_dev->nr_zones =
+ blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ }
+ } else {
+ reg_dev = NULL;
+ zoned_dev = &dmz->dev[0];
+ if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
+ ti->error = "Disk is not a zoned device";
+ return -EINVAL;
+ }
+ q = bdev_get_queue(zoned_dev->bdev);
+ zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
+ zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ }
+
+ if (reg_dev) {
+ sector_t zone_offset;
+
+ reg_dev->zone_nr_sectors = zone_nr_sectors;
+ reg_dev->nr_zones =
+ DIV_ROUND_UP_SECTOR_T(reg_dev->capacity,
+ reg_dev->zone_nr_sectors);
+ reg_dev->zone_offset = 0;
+ zone_offset = reg_dev->nr_zones;
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ dmz->dev[i].zone_offset = zone_offset;
+ zone_offset += dmz->dev[i].nr_zones;
+ }
+ }
+ return 0;
}
/*
@@ -757,11 +841,10 @@ static void dmz_put_zoned_device(struct dm_target *ti)
static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dmz_target *dmz;
- struct dmz_dev *dev;
- int ret;
+ int ret, i;
/* Check arguments */
- if (argc != 1) {
+ if (argc < 1) {
ti->error = "Invalid argument count";
return -EINVAL;
}
@@ -772,25 +855,42 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Unable to allocate the zoned target descriptor";
return -ENOMEM;
}
+ dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
+ if (!dmz->dev) {
+ ti->error = "Unable to allocate the zoned device descriptors";
+ kfree(dmz);
+ return -ENOMEM;
+ }
+ dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
+ if (!dmz->ddev) {
+ ti->error = "Unable to allocate the dm device descriptors";
+ ret = -ENOMEM;
+ goto err;
+ }
+ dmz->nr_ddevs = argc;
+
ti->private = dmz;
/* Get the target zoned block device */
- ret = dmz_get_zoned_device(ti, argv[0]);
- if (ret) {
- dmz->ddev = NULL;
- goto err;
+ for (i = 0; i < argc; i++) {
+ ret = dmz_get_zoned_device(ti, argv[i], i, argc);
+ if (ret)
+ goto err_dev;
}
+ ret = dmz_fixup_devices(ti);
+ if (ret)
+ goto err_dev;
/* Initialize metadata */
- dev = dmz->dev;
- ret = dmz_ctr_metadata(dev, &dmz->metadata);
+ ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
+ dm_table_device_name(ti->table));
if (ret) {
ti->error = "Metadata initialization failed";
goto err_dev;
}
/* Set target (no write same support) */
- ti->max_io_len = dev->zone_nr_sectors << 9;
+ ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9;
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_zeroes_bios = 1;
@@ -799,7 +899,8 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->discards_supported = true;
/* The exposed capacity is the number of chunks that can be mapped */
- ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
+ ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
+ dmz_zone_nr_sectors_shift(dmz->metadata);
/* Zone BIO */
ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
@@ -811,8 +912,9 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
- dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
- 0, dev->name);
+ dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ dmz_metadata_label(dmz->metadata));
if (!dmz->chunk_wq) {
ti->error = "Create chunk workqueue failed";
ret = -ENOMEM;
@@ -824,7 +926,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bio_list_init(&dmz->flush_list);
INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
- dev->name);
+ dmz_metadata_label(dmz->metadata));
if (!dmz->flush_wq) {
ti->error = "Create flush workqueue failed";
ret = -ENOMEM;
@@ -833,15 +935,18 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
/* Initialize reclaim */
- ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
- if (ret) {
- ti->error = "Zone reclaim initialization failed";
- goto err_fwq;
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i);
+ if (ret) {
+ ti->error = "Zone reclaim initialization failed";
+ goto err_fwq;
+ }
}
- dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
- (unsigned long long)ti->len,
- (unsigned long long)dmz_sect2blk(ti->len));
+ DMINFO("(%s): Target device: %llu 512-byte logical sectors (%llu blocks)",
+ dmz_metadata_label(dmz->metadata),
+ (unsigned long long)ti->len,
+ (unsigned long long)dmz_sect2blk(ti->len));
return 0;
err_fwq:
@@ -856,6 +961,7 @@ err_meta:
err_dev:
dmz_put_zoned_device(ti);
err:
+ kfree(dmz->dev);
kfree(dmz);
return ret;
@@ -867,11 +973,13 @@ err:
static void dmz_dtr(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
flush_workqueue(dmz->chunk_wq);
destroy_workqueue(dmz->chunk_wq);
- dmz_dtr_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_dtr_reclaim(dmz->dev[i].reclaim);
cancel_delayed_work_sync(&dmz->flush_work);
destroy_workqueue(dmz->flush_wq);
@@ -886,6 +994,7 @@ static void dmz_dtr(struct dm_target *ti)
mutex_destroy(&dmz->chunk_lock);
+ kfree(dmz->dev);
kfree(dmz);
}
@@ -895,7 +1004,7 @@ static void dmz_dtr(struct dm_target *ti)
static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct dmz_target *dmz = ti->private;
- unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
+ unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
limits->logical_block_size = DMZ_BLOCK_SIZE;
limits->physical_block_size = DMZ_BLOCK_SIZE;
@@ -923,11 +1032,12 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct dmz_target *dmz = ti->private;
+ struct dmz_dev *dev = &dmz->dev[0];
- if (!dmz_check_bdev(dmz->dev))
+ if (!dmz_check_bdev(dev))
return -EIO;
- *bdev = dmz->dev->bdev;
+ *bdev = dev->bdev;
return 0;
}
@@ -938,9 +1048,11 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
static void dmz_suspend(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
flush_workqueue(dmz->chunk_wq);
- dmz_suspend_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_suspend_reclaim(dmz->dev[i].reclaim);
cancel_delayed_work_sync(&dmz->flush_work);
}
@@ -950,24 +1062,95 @@ static void dmz_suspend(struct dm_target *ti)
static void dmz_resume(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
- dmz_resume_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_resume_reclaim(dmz->dev[i].reclaim);
}
static int dmz_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dmz_target *dmz = ti->private;
- struct dmz_dev *dev = dmz->dev;
- sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
+ unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
+ sector_t capacity;
+ int i, r;
+
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
+ r = fn(ti, dmz->ddev[i], 0, capacity, data);
+ if (r)
+ break;
+ }
+ return r;
+}
+
+static void dmz_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
+{
+ struct dmz_target *dmz = ti->private;
+ ssize_t sz = 0;
+ char buf[BDEVNAME_SIZE];
+ struct dmz_dev *dev;
+ int i;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%u zones %u/%u cache",
+ dmz_nr_zones(dmz->metadata),
+ dmz_nr_unmap_cache_zones(dmz->metadata),
+ dmz_nr_cache_zones(dmz->metadata));
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ /*
+ * For a multi-device setup the first device
+ * contains only cache zones.
+ */
+ if ((i == 0) &&
+ (dmz_nr_cache_zones(dmz->metadata) > 0))
+ continue;
+ DMEMIT(" %u/%u random %u/%u sequential",
+ dmz_nr_unmap_rnd_zones(dmz->metadata, i),
+ dmz_nr_rnd_zones(dmz->metadata, i),
+ dmz_nr_unmap_seq_zones(dmz->metadata, i),
+ dmz_nr_seq_zones(dmz->metadata, i));
+ }
+ break;
+ case STATUSTYPE_TABLE:
+ dev = &dmz->dev[0];
+ format_dev_t(buf, dev->bdev->bd_dev);
+ DMEMIT("%s", buf);
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ dev = &dmz->dev[i];
+ format_dev_t(buf, dev->bdev->bd_dev);
+ DMEMIT(" %s", buf);
+ }
+ break;
+ }
+ return;
+}
+
+static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
+{
+ struct dmz_target *dmz = ti->private;
+ int r = -EINVAL;
+
+ if (!strcasecmp(argv[0], "reclaim")) {
+ int i;
- return fn(ti, dmz->ddev, 0, capacity, data);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_schedule_reclaim(dmz->dev[i].reclaim);
+ r = 0;
+ } else
+ DMERR("unrecognized message %s", argv[0]);
+ return r;
}
static struct target_type dmz_type = {
.name = "zoned",
- .version = {1, 1, 0},
+ .version = {2, 0, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = dmz_ctr,
@@ -978,6 +1161,8 @@ static struct target_type dmz_type = {
.postsuspend = dmz_suspend,
.resume = dmz_resume,
.iterate_devices = dmz_iterate_devices,
+ .status = dmz_status,
+ .message = dmz_message,
};
static int __init dmz_init(void)
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index 5b5e493d479c..22f11440b423 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -45,34 +45,50 @@
#define dmz_bio_block(bio) dmz_sect2blk((bio)->bi_iter.bi_sector)
#define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
+struct dmz_metadata;
+struct dmz_reclaim;
+
/*
* Zoned block device information.
*/
struct dmz_dev {
struct block_device *bdev;
+ struct dmz_metadata *metadata;
+ struct dmz_reclaim *reclaim;
char name[BDEVNAME_SIZE];
+ uuid_t uuid;
sector_t capacity;
+ unsigned int dev_idx;
+
unsigned int nr_zones;
+ unsigned int zone_offset;
unsigned int flags;
sector_t zone_nr_sectors;
- unsigned int zone_nr_sectors_shift;
- sector_t zone_nr_blocks;
- sector_t zone_nr_blocks_shift;
+ unsigned int nr_rnd;
+ atomic_t unmap_nr_rnd;
+ struct list_head unmap_rnd_list;
+ struct list_head map_rnd_list;
+
+ unsigned int nr_seq;
+ atomic_t unmap_nr_seq;
+ struct list_head unmap_seq_list;
+ struct list_head map_seq_list;
};
-#define dmz_bio_chunk(dev, bio) ((bio)->bi_iter.bi_sector >> \
- (dev)->zone_nr_sectors_shift)
-#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
+#define dmz_bio_chunk(zmd, bio) ((bio)->bi_iter.bi_sector >> \
+ dmz_zone_nr_sectors_shift(zmd))
+#define dmz_chunk_block(zmd, b) ((b) & (dmz_zone_nr_blocks(zmd) - 1))
/* Device flags. */
#define DMZ_BDEV_DYING (1 << 0)
#define DMZ_CHECK_BDEV (2 << 0)
+#define DMZ_BDEV_REGULAR (4 << 0)
/*
* Zone descriptor.
@@ -81,12 +97,18 @@ struct dm_zone {
/* For listing the zone depending on its state */
struct list_head link;
+ /* Device containing this zone */
+ struct dmz_dev *dev;
+
/* Zone type and state */
unsigned long flags;
/* Zone activation reference count */
atomic_t refcount;
+ /* Zone id */
+ unsigned int id;
+
/* Zone write pointer block (relative to the zone start block) */
unsigned int wp_block;
@@ -109,6 +131,7 @@ struct dm_zone {
*/
enum {
/* Zone write type */
+ DMZ_CACHE,
DMZ_RND,
DMZ_SEQ,
@@ -120,22 +143,28 @@ enum {
DMZ_META,
DMZ_DATA,
DMZ_BUF,
+ DMZ_RESERVED,
/* Zone internal state */
DMZ_RECLAIM,
DMZ_SEQ_WRITE_ERR,
+ DMZ_RECLAIM_TERMINATE,
};
/*
* Zone data accessors.
*/
+#define dmz_is_cache(z) test_bit(DMZ_CACHE, &(z)->flags)
#define dmz_is_rnd(z) test_bit(DMZ_RND, &(z)->flags)
#define dmz_is_seq(z) test_bit(DMZ_SEQ, &(z)->flags)
#define dmz_is_empty(z) ((z)->wp_block == 0)
#define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
#define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
#define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
+#define dmz_is_reserved(z) test_bit(DMZ_RESERVED, &(z)->flags)
#define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
+#define dmz_reclaim_should_terminate(z) \
+ test_bit(DMZ_RECLAIM_TERMINATE, &(z)->flags)
#define dmz_is_meta(z) test_bit(DMZ_META, &(z)->flags)
#define dmz_is_buf(z) test_bit(DMZ_BUF, &(z)->flags)
@@ -158,13 +187,11 @@ enum {
#define dmz_dev_debug(dev, format, args...) \
DMDEBUG("(%s): " format, (dev)->name, ## args)
-struct dmz_metadata;
-struct dmz_reclaim;
-
/*
* Functions defined in dm-zoned-metadata.c
*/
-int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **zmd);
+int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
+ struct dmz_metadata **zmd, const char *devname);
void dmz_dtr_metadata(struct dmz_metadata *zmd);
int dmz_resume_metadata(struct dmz_metadata *zmd);
@@ -175,23 +202,38 @@ void dmz_unlock_metadata(struct dmz_metadata *zmd);
void dmz_lock_flush(struct dmz_metadata *zmd);
void dmz_unlock_flush(struct dmz_metadata *zmd);
int dmz_flush_metadata(struct dmz_metadata *zmd);
+const char *dmz_metadata_label(struct dmz_metadata *zmd);
-unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone);
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd);
+bool dmz_check_dev(struct dmz_metadata *zmd);
+bool dmz_dev_is_dying(struct dmz_metadata *zmd);
+
#define DMZ_ALLOC_RND 0x01
-#define DMZ_ALLOC_RECLAIM 0x02
+#define DMZ_ALLOC_CACHE 0x02
+#define DMZ_ALLOC_SEQ 0x04
+#define DMZ_ALLOC_RECLAIM 0x10
-struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags);
+struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd,
+ unsigned int dev_idx, unsigned long flags);
void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
unsigned int chunk);
void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
-unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
-unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd);
/*
* Activate a zone (increment its reference count).
@@ -201,26 +243,10 @@ static inline void dmz_activate_zone(struct dm_zone *zone)
atomic_inc(&zone->refcount);
}
-/*
- * Deactivate a zone. This decrement the zone reference counter
- * indicating that all BIOs to the zone have completed when the count is 0.
- */
-static inline void dmz_deactivate_zone(struct dm_zone *zone)
-{
- atomic_dec(&zone->refcount);
-}
-
-/*
- * Test if a zone is active, that is, has a refcount > 0.
- */
-static inline bool dmz_is_active(struct dm_zone *zone)
-{
- return atomic_read(&zone->refcount);
-}
-
int dmz_lock_zone_reclaim(struct dm_zone *zone);
void dmz_unlock_zone_reclaim(struct dm_zone *zone);
-struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd);
+struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int dev_idx, bool idle);
struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
unsigned int chunk, int op);
@@ -244,8 +270,7 @@ int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
/*
* Functions defined in dm-zoned-reclaim.c
*/
-int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
- struct dmz_reclaim **zrc);
+int dmz_ctr_reclaim(struct dmz_metadata *zmd, struct dmz_reclaim **zrc, int idx);
void dmz_dtr_reclaim(struct dmz_reclaim *zrc);
void dmz_suspend_reclaim(struct dmz_reclaim *zrc);
void dmz_resume_reclaim(struct dmz_reclaim *zrc);
@@ -258,4 +283,22 @@ void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
bool dmz_check_bdev(struct dmz_dev *dmz_dev);
+/*
+ * Deactivate a zone. This decrement the zone reference counter
+ * indicating that all BIOs to the zone have completed when the count is 0.
+ */
+static inline void dmz_deactivate_zone(struct dm_zone *zone)
+{
+ dmz_reclaim_bio_acc(zone->dev->reclaim);
+ atomic_dec(&zone->refcount);
+}
+
+/*
+ * Test if a zone is active, that is, has a refcount > 0.
+ */
+static inline bool dmz_is_active(struct dm_zone *zone)
+{
+ return atomic_read(&zone->refcount);
+}
+
#endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index db9e46114653..109e81f33edb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -26,6 +26,7 @@
#include <linux/pr.h>
#include <linux/refcount.h>
#include <linux/part_stat.h>
+#include <linux/blk-crypto.h>
#define DM_MSG_PREFIX "core"
@@ -675,16 +676,21 @@ static bool md_in_flight(struct mapped_device *md)
return md_in_flight_bios(md);
}
+u64 dm_start_time_ns_from_clone(struct bio *bio)
+{
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_io *io = tio->io;
+
+ return jiffies_to_nsecs(io->start_time);
+}
+EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
+
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
- io->start_time = jiffies;
-
- generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
- &dm_disk(md)->part0);
-
+ io->start_time = bio_start_io_acct(bio);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
@@ -697,8 +703,7 @@ static void end_io_acct(struct dm_io *io)
struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
- generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
- io->start_time);
+ bio_end_io_acct(bio, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
@@ -1334,6 +1339,8 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
__bio_clone_fast(clone, bio);
+ bio_crypt_clone(clone, bio, GFP_NOIO);
+
if (bio_integrity(bio)) {
int r;
@@ -1788,6 +1795,18 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
int srcu_idx;
struct dm_table *map;
+ if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
+ /*
+ * We are called with a live reference on q_usage_counter, but
+ * that one will be released as soon as we return. Grab an
+ * extra one as blk_mq_make_request expects to be able to
+ * consume a reference (which lives until the request is freed
+ * in case a request is allocated).
+ */
+ percpu_ref_get(&q->q_usage_counter);
+ return blk_mq_make_request(q, bio);
+ }
+
map = dm_get_live_table(md, &srcu_idx);
/* if we're suspended, we have to queue this io for later */
@@ -2600,7 +2619,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
else
- pr_debug("%s: suspending with flush\n", dm_device_name(md));
+ DMDEBUG("%s: suspending with flush", dm_device_name(md));
/*
* This gets reverted if there's an error later and the targets
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index b952bd45bd6a..95a5f3757fa3 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -324,14 +324,6 @@ static void end_bitmap_write(struct buffer_head *bh, int uptodate)
wake_up(&bitmap->write_wait);
}
-/* copied from buffer.c */
-static void
-__clear_page_buffers(struct page *page)
-{
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
-}
static void free_buffers(struct page *page)
{
struct buffer_head *bh;
@@ -345,7 +337,7 @@ static void free_buffers(struct page *page)
free_buffer_head(bh);
bh = next;
}
- __clear_page_buffers(page);
+ detach_page_private(page);
put_page(page);
}
@@ -374,7 +366,7 @@ static int read_page(struct file *file, unsigned long index,
ret = -ENOMEM;
goto out;
}
- attach_page_buffers(page, bh);
+ attach_page_private(page, bh);
blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
while (bh) {
block = blk_cur;
diff --git a/drivers/md/md-linear.h b/drivers/md/md-linear.h
index 8381d651d4ed..24e97db50ebb 100644
--- a/drivers/md/md-linear.h
+++ b/drivers/md/md-linear.h
@@ -12,6 +12,6 @@ struct linear_conf
struct rcu_head rcu;
sector_t array_sectors;
int raid_disks; /* a copy of mddev->raid_disks */
- struct dev_info disks[0];
+ struct dev_info disks[];
};
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 271e8a587354..f567f536b529 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -89,6 +89,7 @@ static struct module *md_cluster_mod;
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
+static struct workqueue_struct *md_rdev_misc_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
@@ -227,13 +228,13 @@ void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
goto abort;
if (mddev->serial_info_pool == NULL) {
- unsigned int noio_flag;
-
- noio_flag = memalloc_noio_save();
+ /*
+ * already in memalloc noio context by
+ * mddev_suspend()
+ */
mddev->serial_info_pool =
mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
sizeof(struct serial_info));
- memalloc_noio_restore(noio_flag);
if (!mddev->serial_info_pool) {
rdevs_uninit_serial(mddev);
pr_err("can't alloc memory pool for serialization\n");
@@ -466,7 +467,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
const int sgrp = op_stat_group(bio_op(bio));
- struct mddev *mddev = q->queuedata;
+ struct mddev *mddev = bio->bi_disk->private_data;
unsigned int sectors;
if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
@@ -527,11 +528,15 @@ void mddev_suspend(struct mddev *mddev)
wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
del_timer_sync(&mddev->safemode_timer);
+ /* restrict memory reclaim I/O during raid array is suspend */
+ mddev->noio_flag = memalloc_noio_save();
}
EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
+ /* entred the memalloc scope from mddev_suspend() */
+ memalloc_noio_restore(mddev->noio_flag);
lockdep_assert_held(&mddev->reconfig_mutex);
if (--mddev->suspended)
return;
@@ -2454,7 +2459,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
return err;
}
-static void md_delayed_delete(struct work_struct *ws)
+static void rdev_delayed_delete(struct work_struct *ws)
{
struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
kobject_del(&rdev->kobj);
@@ -2479,9 +2484,9 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
* to delay it due to rcu usage.
*/
synchronize_rcu();
- INIT_WORK(&rdev->del_work, md_delayed_delete);
+ INIT_WORK(&rdev->del_work, rdev_delayed_delete);
kobject_get(&rdev->kobj);
- queue_work(md_misc_wq, &rdev->del_work);
+ queue_work(md_rdev_misc_wq, &rdev->del_work);
}
/*
@@ -3191,8 +3196,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
- err = rdev->mddev->pers->
- hot_add_disk(rdev->mddev, rdev);
+ err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
if (err) {
rdev->raid_disk = -1;
return err;
@@ -4514,6 +4518,20 @@ null_show(struct mddev *mddev, char *page)
return -EINVAL;
}
+/* need to ensure rdev_delayed_delete() has completed */
+static void flush_rdev_wq(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (work_pending(&rdev->del_work)) {
+ flush_workqueue(md_rdev_misc_wq);
+ break;
+ }
+ rcu_read_unlock();
+}
+
static ssize_t
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
{
@@ -4541,8 +4559,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
minor != MINOR(dev))
return -EOVERFLOW;
- flush_workqueue(md_misc_wq);
-
+ flush_rdev_wq(mddev);
err = mddev_lock(mddev);
if (err)
return err;
@@ -4780,7 +4797,8 @@ action_store(struct mddev *mddev, const char *page, size_t len)
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
mddev_lock(mddev) == 0) {
- flush_workqueue(md_misc_wq);
+ if (work_pending(&mddev->del_work))
+ flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
@@ -5626,7 +5644,6 @@ static int md_alloc(dev_t dev, char *name)
mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE);
if (!mddev->queue)
goto abort;
- mddev->queue->queuedata = mddev;
blk_set_stacking_limits(&mddev->queue->limits);
@@ -6147,7 +6164,8 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- flush_workqueue(md_misc_wq);
+ if (work_pending(&mddev->del_work))
+ flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
@@ -6200,7 +6218,8 @@ static void __md_stop(struct mddev *mddev)
md_bitmap_destroy(mddev);
mddev_detach(mddev);
/* Ensure ->event_work is done */
- flush_workqueue(md_misc_wq);
+ if (mddev->event_work.func)
+ flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock);
mddev->pers = NULL;
spin_unlock(&mddev->lock);
@@ -7495,9 +7514,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
- if (cmd == ADD_NEW_DISK)
- /* need to ensure md_delayed_delete() has completed */
- flush_workqueue(md_misc_wq);
+ if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
+ flush_rdev_wq(mddev);
if (cmd == HOT_REMOVE_DISK)
/* need to ensure recovery thread has run */
@@ -7752,7 +7770,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
- flush_workqueue(md_misc_wq);
+ if (work_pending(&mddev->del_work))
+ flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
return -ERESTARTSYS;
}
@@ -9040,8 +9059,7 @@ static int remove_and_add_spares(struct mddev *mddev,
rdev->recovery_offset = 0;
}
- if (mddev->pers->
- hot_add_disk(mddev, rdev) == 0) {
+ if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
if (!test_bit(Journal, &rdev->flags))
@@ -9469,6 +9487,10 @@ static int __init md_init(void)
if (!md_misc_wq)
goto err_misc_wq;
+ md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
+ if (!md_misc_wq)
+ goto err_rdev_misc_wq;
+
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
goto err_md;
@@ -9490,6 +9512,8 @@ static int __init md_init(void)
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
+ destroy_workqueue(md_rdev_misc_wq);
+err_rdev_misc_wq:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
@@ -9776,6 +9800,7 @@ static __exit void md_exit(void)
* destroy_workqueue() below will wait for that to complete.
*/
}
+ destroy_workqueue(md_rdev_misc_wq);
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
}
@@ -9785,7 +9810,7 @@ module_exit(md_exit)
static int get_ro(char *buffer, const struct kernel_param *kp)
{
- return sprintf(buffer, "%d", start_readonly);
+ return sprintf(buffer, "%d\n", start_readonly);
}
static int set_ro(const char *val, const struct kernel_param *kp)
{
diff --git a/drivers/md/md.h b/drivers/md/md.h
index acd681939112..612814d07d35 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -497,6 +497,7 @@ struct mddev {
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
unsigned int good_device_nr; /* good device num within cluster raid */
+ unsigned int noio_flag; /* for memalloc scope API */
bool has_superblocks:1;
bool fail_last_dev:1;
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index a240990a7f33..564896659dd4 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -38,7 +38,7 @@ struct node_header {
struct btree_node {
struct node_header header;
- __le64 keys[0];
+ __le64 keys[];
} __packed;
@@ -68,7 +68,7 @@ struct ro_spine {
};
void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
-int exit_ro_spine(struct ro_spine *s);
+void exit_ro_spine(struct ro_spine *s);
int ro_step(struct ro_spine *s, dm_block_t new_child);
void ro_pop(struct ro_spine *s);
struct btree_node *ro_node(struct ro_spine *s);
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index b27b8091a1ca..e03cb9e48773 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -132,15 +132,13 @@ void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info)
s->nodes[1] = NULL;
}
-int exit_ro_spine(struct ro_spine *s)
+void exit_ro_spine(struct ro_spine *s)
{
- int r = 0, i;
+ int i;
for (i = 0; i < s->count; i++) {
unlock_block(s->info, s->nodes[i]);
}
-
- return r;
}
int ro_step(struct ro_spine *s, dm_block_t new_child)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index cd810e195086..dcd27f3da84e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -296,22 +296,17 @@ static void reschedule_retry(struct r1bio *r1_bio)
static void call_bio_endio(struct r1bio *r1_bio)
{
struct bio *bio = r1_bio->master_bio;
- struct r1conf *conf = r1_bio->mddev->private;
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- /*
- * Wake up any possible resync thread that waits for the device
- * to go idle.
- */
- allow_barrier(conf, r1_bio->sector);
}
static void raid_end_bio_io(struct r1bio *r1_bio)
{
struct bio *bio = r1_bio->master_bio;
+ struct r1conf *conf = r1_bio->mddev->private;
/* if nobody has done the final endio yet, do it now */
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
@@ -322,6 +317,12 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
call_bio_endio(r1_bio);
}
+ /*
+ * Wake up any possible resync thread that waits for the device
+ * to go idle. All I/Os, even write-behind writes, are done.
+ */
+ allow_barrier(conf, r1_bio->sector);
+
free_r1bio(r1_bio);
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index e7ccad898736..b7eb09e8c025 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -180,7 +180,7 @@ struct r1bio {
* if the IO is in WRITE direction, then multiple bios are used.
* We choose the number when they are allocated.
*/
- struct bio *bios[0];
+ struct bio *bios[];
/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
};
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index d3eaaf3eb1bc..79cd2b7d3128 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -153,7 +153,7 @@ struct r10bio {
};
sector_t addr;
int devnum;
- } devs[0];
+ } devs[];
};
/* bits for r10bio.state */
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index d50238d0a85d..a750f4bbb5d9 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1037,7 +1037,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
}
/* flush the disk cache after recovery if necessary */
- ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
out:
__free_page(page);
return ret;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ba00e9877f02..ab8067f9ce8c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2215,10 +2215,13 @@ static int grow_stripes(struct r5conf *conf, int num)
}
/**
- * scribble_len - return the required size of the scribble region
+ * scribble_alloc - allocate percpu scribble buffer for required size
+ * of the scribble region
+ * @percpu - from for_each_present_cpu() of the caller
* @num - total number of disks in the array
+ * @cnt - scribble objs count for required size of the scribble region
*
- * The size must be enough to contain:
+ * The scribble buffer size must be enough to contain:
* 1/ a struct page pointer for each device in the array +2
* 2/ room to convert each entry in (1) to its corresponding dma
* (dma_map_page()) or page (page_address()) address.
@@ -2228,14 +2231,19 @@ static int grow_stripes(struct r5conf *conf, int num)
* of the P and Q blocks.
*/
static int scribble_alloc(struct raid5_percpu *percpu,
- int num, int cnt, gfp_t flags)
+ int num, int cnt)
{
size_t obj_size =
sizeof(struct page *) * (num+2) +
sizeof(addr_conv_t) * (num+2);
void *scribble;
- scribble = kvmalloc_array(cnt, obj_size, flags);
+ /*
+ * If here is in raid array suspend context, it is in memalloc noio
+ * context as well, there is no potential recursive memory reclaim
+ * I/Os with the GFP_KERNEL flag.
+ */
+ scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL);
if (!scribble)
return -ENOMEM;
@@ -2267,8 +2275,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
percpu = per_cpu_ptr(conf->percpu, cpu);
err = scribble_alloc(percpu, new_disks,
- new_sectors / STRIPE_SECTORS,
- GFP_NOIO);
+ new_sectors / STRIPE_SECTORS);
if (err)
break;
}
@@ -6759,8 +6766,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
conf->previous_raid_disks),
max(conf->chunk_sectors,
conf->prev_chunk_sectors)
- / STRIPE_SECTORS,
- GFP_KERNEL)) {
+ / STRIPE_SECTORS)) {
free_scratch_buffer(conf, percpu);
return -ENOMEM;
}
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 9dfea5c4b6ab..a6d073f2e036 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -3,42 +3,81 @@
# Multimedia device configuration
#
-config CEC_CORE
- tristate
-
-config CEC_NOTIFIER
- bool
-
-config CEC_PIN
- bool
-
+#
+# NOTE: CEC and Remote Controller support should not depend on MEDIA_SUPPORT
+#
source "drivers/media/rc/Kconfig"
+source "drivers/media/cec/Kconfig"
menuconfig MEDIA_SUPPORT
tristate "Multimedia support"
depends on HAS_IOMEM
help
- If you want to use Webcams, Video grabber devices and/or TV devices
- enable this option and other options below.
+ If you want to use media devices, including Webcams, Video grabber
+ devices and/or TV devices, V4L2 codecs, etc, enable this option
+ and other options below.
+
Additional info and docs are available on the web at
<https://linuxtv.org>
if MEDIA_SUPPORT
-comment "Multimedia core support"
+config MEDIA_SUPPORT_FILTER
+ bool "Filter media drivers"
+ depends on MEDIA_SUPPORT
+ default y if !EMBEDDED && !EXPERT
+ help
+ Configuring the media subsystem can be complex, as there are
+ hundreds of drivers and other config options.
+
+ This menu offers option that will help the Kernel's config
+ system to hide drivers that are out of the scope of the
+ user needs, and disabling core support for unused APIs.
+
+ If not selected, all non-optional media core functionality
+ needed to support media drivers will be enabled. Also, all
+ media device drivers should be shown.
+
+config MEDIA_SUBDRV_AUTOSELECT
+ bool "Autoselect ancillary drivers (tuners, sensors, i2c, spi, frontends)"
+ depends on HAS_IOMEM
+ select I2C
+ select I2C_MUX
+ default y if MEDIA_SUPPORT_FILTER
+ help
+ By default, a media driver auto-selects all possible ancillary
+ devices such as tuners, sensors, video encoders/decoders and
+ frontends, that are used by any of the supported devices.
+
+ This is generally the right thing to do, except when there
+ are strict constraints with regards to the kernel size,
+ like on embedded systems.
+
+ Use this option with care, as deselecting ancillary drivers which
+ are, in fact, necessary will result in the lack of the needed
+ functionality for your device (it may not tune or may not have
+ the needed demodulators).
+
+ If unsure say Y.
+
+menu "Media device types"
#
# Multimedia support - automatically enable V4L2 and DVB core
#
config MEDIA_CAMERA_SUPPORT
- bool "Cameras/video grabbers support"
+ bool
+ prompt "Cameras and video grabbers" if MEDIA_SUPPORT_FILTER
+ default y if !MEDIA_SUPPORT_FILTER
help
Enable support for webcams and video grabbers.
Say Y when you have a webcam or a video capture grabber board.
config MEDIA_ANALOG_TV_SUPPORT
- bool "Analog TV support"
+ bool
+ prompt "Analog TV" if MEDIA_SUPPORT_FILTER
+ default y if !MEDIA_SUPPORT_FILTER
help
Enable analog TV support.
@@ -50,7 +89,9 @@ config MEDIA_ANALOG_TV_SUPPORT
will disable support for them.
config MEDIA_DIGITAL_TV_SUPPORT
- bool "Digital TV support"
+ bool
+ prompt "Digital TV" if MEDIA_SUPPORT_FILTER
+ default y if !MEDIA_SUPPORT_FILTER
help
Enable digital TV support.
@@ -58,7 +99,9 @@ config MEDIA_DIGITAL_TV_SUPPORT
hybrid digital TV and analog TV.
config MEDIA_RADIO_SUPPORT
- bool "AM/FM radio receivers/transmitters support"
+ bool
+ prompt "AM/FM radio receivers/transmitters" if MEDIA_SUPPORT_FILTER
+ default y if !MEDIA_SUPPORT_FILTER
help
Enable AM/FM radio support.
@@ -72,47 +115,65 @@ config MEDIA_RADIO_SUPPORT
disable support for them.
config MEDIA_SDR_SUPPORT
- bool "Software defined radio support"
+ bool
+ prompt "Software defined radio" if MEDIA_SUPPORT_FILTER
+ default y if !MEDIA_SUPPORT_FILTER
help
Enable software defined radio support.
Say Y when you have a software defined radio device.
-config MEDIA_CEC_SUPPORT
- bool "HDMI CEC support"
+config MEDIA_PLATFORM_SUPPORT
+ bool
+ prompt "Platform-specific devices" if MEDIA_SUPPORT_FILTER
+ default y if !MEDIA_SUPPORT_FILTER
help
- Enable support for HDMI CEC (Consumer Electronics Control),
- which is an optional HDMI feature.
+ Enable support for complex cameras, codecs, and other hardware
+ that are integrated at the CPU, GPU or on Image Signalling Processor
+ and don't use PCI, USB or Firewire buses.
- Say Y when you have an HDMI receiver, transmitter or a USB CEC
- adapter that supports HDMI CEC.
+ This is found on Embedded hardware (SoC), on V4L2 codecs and
+ on some GPU and newer CPU chipsets.
-source "drivers/media/cec/Kconfig"
+ Say Y when you want to be able so see such devices.
-source "drivers/media/mc/Kconfig"
+config MEDIA_TEST_SUPPORT
+ bool
+ prompt "Test drivers" if MEDIA_SUPPORT_FILTER
+ default y if !MEDIA_SUPPORT_FILTER
+ help
+ Those drivers should not be used on production Kernels, but
+ can be useful on debug ones. It enables several dummy drivers
+ that simulate a real hardware. Very useful to test userspace
+ applications and to validate if the subsystem core is doesn't
+ have regressions.
-#
-# Video4Linux support
-# Only enables if one of the V4L2 types (ATV, webcam, radio) is selected
-#
+ Say Y if you want to use some virtual test driver.
+
+ In case of doubts, say N.
+ Say Y when you have a software defined radio device.
+endmenu # media device types
+
+
+menu "Media core support"
+ visible if !MEDIA_SUPPORT_FILTER
config VIDEO_DEV
- tristate
- depends on MEDIA_SUPPORT
- depends on MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT
- default y
+ tristate "Video4Linux core"
+ default MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT || MEDIA_PLATFORM_SUPPORT || MEDIA_TEST_SUPPORT
+ help
+ Enables the V4L2 API, used by cameras, analog TV, video grabbers,
+ radio devices and by some input devices.
-config VIDEO_V4L2_SUBDEV_API
- bool "V4L2 sub-device userspace API"
- depends on VIDEO_DEV && MEDIA_CONTROLLER
+config MEDIA_CONTROLLER
+ bool "Media Controller API"
+ default MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_PLATFORM_SUPPORT
help
- Enables the V4L2 sub-device pad-level userspace API used to configure
- video format, size and frame rate between hardware blocks.
+ Enable the media controller API used to query media devices internal
+ topology and configure it dynamically.
This API is mostly used by camera interfaces in embedded platforms.
-source "drivers/media/v4l2-core/Kconfig"
-
#
# DVB Core
# Only enables if one of DTV is selected
@@ -120,98 +181,73 @@ source "drivers/media/v4l2-core/Kconfig"
config DVB_CORE
tristate
- depends on MEDIA_SUPPORT
depends on MEDIA_DIGITAL_TV_SUPPORT
depends on (I2C || I2C=n)
- default y
+ default MEDIA_DIGITAL_TV_SUPPORT
select CRC32
-
-config DVB_MMAP
- bool "Enable DVB memory-mapped API (EXPERIMENTAL)"
- depends on DVB_CORE
- depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE
- select VIDEOBUF2_VMALLOC
help
- This option enables DVB experimental memory-mapped API, which
- reduces the number of context switches to read DVB buffers, as
- the buffers can use mmap() syscalls.
+ Enables the DVB API, used by Digital TV devices. Supports several
+ standards, including DVB, ATSC, ISDB and CMDB.
- Support for it is experimental. Use with care. If unsure,
- say N.
+endmenu # Media core support
-config DVB_NET
- bool "DVB Network Support"
- default (NET && INET)
- depends on NET && INET && DVB_CORE
- help
- This option enables DVB Network Support which is a part of the DVB
- standard. It is used, for example, by automatic firmware updates used
- on Set-Top-Boxes. It can also be used to access the Internet via the
- DVB card, if the network provider supports it.
+#
+# Extra per-media API core functionality
- You may want to disable the network support on embedded devices. If
- unsure say Y.
+menu "Video4Linux options"
+ visible if VIDEO_DEV
-# This Kconfig option is used by both PCI and USB drivers
-config TTPCI_EEPROM
- tristate
- depends on I2C
+source "drivers/media/v4l2-core/Kconfig"
+endmenu
+
+menu "Media controller options"
+ visible if MEDIA_CONTROLLER
+
+source "drivers/media/mc/Kconfig"
+endmenu
+
+menu "Digital TV options"
+ visible if DVB_CORE
source "drivers/media/dvb-core/Kconfig"
+endmenu
-comment "Media drivers"
+menu "Media drivers"
-#
-# V4L platform/mem2mem drivers
-#
+comment "Drivers filtered as selected at 'Filter media drivers'"
+ depends on MEDIA_SUPPORT_FILTER
source "drivers/media/usb/Kconfig"
source "drivers/media/pci/Kconfig"
-source "drivers/media/platform/Kconfig"
-source "drivers/media/mmc/Kconfig"
source "drivers/media/radio/Kconfig"
-comment "Supported FireWire (IEEE 1394) Adapters"
- depends on DVB_CORE && FIREWIRE
-source "drivers/media/firewire/Kconfig"
-
# Common driver options
source "drivers/media/common/Kconfig"
-comment "Media ancillary drivers (tuners, sensors, i2c, spi, frontends)"
-
-#
-# Ancillary drivers (tuners, i2c, spi, frontends)
-#
+if MEDIA_PLATFORM_SUPPORT
+source "drivers/media/platform/Kconfig"
+source "drivers/media/mmc/Kconfig"
+endif
-config MEDIA_SUBDRV_AUTOSELECT
- bool "Autoselect ancillary drivers (tuners, sensors, i2c, spi, frontends)"
- depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_CAMERA_SUPPORT || MEDIA_SDR_SUPPORT
- depends on HAS_IOMEM
- select I2C
- select I2C_MUX
- default y if !EMBEDDED
- help
- By default, a media driver auto-selects all possible ancillary
- devices such as tuners, sensors, video encoders/decoders and
- frontends, that are used by any of the supported devices.
+if MEDIA_TEST_SUPPORT
+source "drivers/media/test-drivers/Kconfig"
+endif
- This is generally the right thing to do, except when there
- are strict constraints with regards to the kernel size,
- like on embedded systems.
+source "drivers/media/firewire/Kconfig"
- Use this option with care, as deselecting ancillary drivers which
- are, in fact, necessary will result in the lack of the needed
- functionality for your device (it may not tune or may not have
- the needed demodulators).
+endmenu
- If unsure say Y.
+#
+# Ancillary drivers (tuners, i2c, spi, frontends)
+#
config MEDIA_HIDE_ANCILLARY_SUBDRV
bool
depends on MEDIA_SUBDRV_AUTOSELECT && !COMPILE_TEST && !EXPERT
default y
+menu "Media ancillary drivers"
+
config MEDIA_ATTACH
bool
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
@@ -223,4 +259,6 @@ source "drivers/media/spi/Kconfig"
source "drivers/media/tuners/Kconfig"
source "drivers/media/dvb-frontends/Kconfig"
+endmenu
+
endif # MEDIA_SUPPORT
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index f215f0a89f9e..d18357bf1346 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -29,6 +29,6 @@ obj-$(CONFIG_CEC_CORE) += cec/
# Finally, merge the drivers that require the core
#
-obj-y += common/ platform/ pci/ usb/ mmc/ firewire/ spi/
+obj-y += common/ platform/ pci/ usb/ mmc/ firewire/ spi/ test-drivers/
obj-$(CONFIG_VIDEO_DEV) += radio/
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index c01919713ab9..eea74b7cfa8c 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -1,4 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
+config CEC_CORE
+ tristate
+
+config CEC_NOTIFIER
+ bool
+
+config CEC_PIN
+ bool
+
config MEDIA_CEC_RC
bool "HDMI CEC RC integration"
depends on CEC_CORE && RC_CORE
@@ -11,3 +20,19 @@ config CEC_PIN_ERROR_INJ
depends on CEC_PIN && DEBUG_FS
help
This option enables CEC error injection using debugfs.
+
+menuconfig MEDIA_CEC_SUPPORT
+ bool
+ prompt "HDMI CEC drivers"
+ default y if !MEDIA_SUPPORT_FILTER
+ help
+ Enable support for HDMI CEC (Consumer Electronics Control),
+ which is an optional HDMI feature.
+
+ Say Y when you have an HDMI receiver, transmitter or a USB CEC
+ adapter that supports HDMI CEC.
+
+if MEDIA_CEC_SUPPORT
+source "drivers/media/cec/platform/Kconfig"
+source "drivers/media/cec/usb/Kconfig"
+endif
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index ad8677d8c896..74e80e1b3571 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,16 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-cec-objs := cec-core.o cec-adap.o cec-api.o
-
-ifeq ($(CONFIG_CEC_NOTIFIER),y)
- cec-objs += cec-notifier.o
-endif
-
-ifeq ($(CONFIG_CEC_PIN),y)
- cec-objs += cec-pin.o
-endif
-
-ifeq ($(CONFIG_CEC_PIN_ERROR_INJ),y)
- cec-objs += cec-pin-error-inj.o
-endif
-
-obj-$(CONFIG_CEC_CORE) += cec.o
+obj-y += core/ platform/ usb/
diff --git a/drivers/media/cec/core/Makefile b/drivers/media/cec/core/Makefile
new file mode 100644
index 000000000000..ad8677d8c896
--- /dev/null
+++ b/drivers/media/cec/core/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+cec-objs := cec-core.o cec-adap.o cec-api.o
+
+ifeq ($(CONFIG_CEC_NOTIFIER),y)
+ cec-objs += cec-notifier.o
+endif
+
+ifeq ($(CONFIG_CEC_PIN),y)
+ cec-objs += cec-pin.o
+endif
+
+ifeq ($(CONFIG_CEC_PIN_ERROR_INJ),y)
+ cec-objs += cec-pin-error-inj.o
+endif
+
+obj-$(CONFIG_CEC_CORE) += cec.o
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 6c95dc471d4c..6a04d19a96b2 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1734,6 +1734,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
unsigned j;
log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
+ dprintk(1, "unknown logical address type\n");
+ return -EINVAL;
+ }
if (type_mask & (1 << log_addrs->log_addr_type[i])) {
dprintk(1, "duplicate logical address type\n");
return -EINVAL;
@@ -1754,10 +1758,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
dprintk(1, "invalid primary device type\n");
return -EINVAL;
}
- if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
- dprintk(1, "unknown logical address type\n");
- return -EINVAL;
- }
for (j = 0; j < feature_sz; j++) {
if ((features[j] & 0x80) == 0) {
if (op_is_dev_features)
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/core/cec-api.c
index 17d1cb2e5f97..17d1cb2e5f97 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/core/cec-core.c
index 0c52e1bb3910..0c52e1bb3910 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/core/cec-notifier.c
index e748cd54b45d..517e0035fc99 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/core/cec-notifier.c
@@ -2,7 +2,7 @@
/*
* cec-notifier.c - notify CEC drivers of physical address changes
*
- * Copyright 2016 Russell King <rmk+kernel@arm.linux.org.uk>
+ * Copyright 2016 Russell King.
* Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
diff --git a/drivers/media/cec/cec-pin-error-inj.c b/drivers/media/cec/core/cec-pin-error-inj.c
index c0088d3b8e3d..c0088d3b8e3d 100644
--- a/drivers/media/cec/cec-pin-error-inj.c
+++ b/drivers/media/cec/core/cec-pin-error-inj.c
diff --git a/drivers/media/cec/cec-pin-priv.h b/drivers/media/cec/core/cec-pin-priv.h
index f423db8855d9..f423db8855d9 100644
--- a/drivers/media/cec/cec-pin-priv.h
+++ b/drivers/media/cec/core/cec-pin-priv.h
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index 660fe111f540..660fe111f540 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
diff --git a/drivers/media/cec/cec-priv.h b/drivers/media/cec/core/cec-priv.h
index 9bbd05053d42..9bbd05053d42 100644
--- a/drivers/media/cec/cec-priv.h
+++ b/drivers/media/cec/core/cec-priv.h
diff --git a/drivers/media/cec/platform/Kconfig b/drivers/media/cec/platform/Kconfig
new file mode 100644
index 000000000000..350533cd8261
--- /dev/null
+++ b/drivers/media/cec/platform/Kconfig
@@ -0,0 +1,120 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Platform drivers
+
+config CEC_CROS_EC
+ tristate "ChromeOS EC CEC driver"
+ depends on CROS_EC
+ select CEC_CORE
+ select CEC_NOTIFIER
+ select CROS_EC_PROTO
+ help
+ If you say yes here you will get support for the
+ ChromeOS Embedded Controller's CEC.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config CEC_MESON_AO
+ tristate "Amlogic Meson AO CEC driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+
+config CEC_MESON_G12A_AO
+ tristate "Amlogic Meson G12A AO CEC driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ select REGMAP
+ select REGMAP_MMIO
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for Amlogic Meson G12A SoCs AO CEC interface.
+ This driver if for the new AO-CEC module found in G12A SoCs,
+ usually named AO_CEC_B in documentation.
+ It uses the generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config CEC_GPIO
+ tristate "Generic GPIO-based CEC driver"
+ depends on PREEMPTION || COMPILE_TEST
+ select CEC_CORE
+ select CEC_PIN
+ select CEC_NOTIFIER
+ select GPIOLIB
+ help
+ This is a generic GPIO-based CEC driver.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config CEC_SAMSUNG_S5P
+ tristate "Samsung S5P CEC driver"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ This is a driver for Samsung S5P HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config CEC_STI
+ tristate "STMicroelectronics STiH4xx HDMI CEC driver"
+ depends on ARCH_STI || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ This is a driver for STIH4xx HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config CEC_STM32
+ tristate "STMicroelectronics STM32 HDMI CEC driver"
+ depends on ARCH_STM32 || COMPILE_TEST
+ select REGMAP
+ select REGMAP_MMIO
+ select CEC_CORE
+ help
+ This is a driver for STM32 interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config CEC_TEGRA
+ tristate "Tegra HDMI CEC driver"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ This is a driver for the Tegra HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config CEC_SECO
+ tristate "SECO Boards HDMI CEC driver"
+ depends on (X86 || IA64) || COMPILE_TEST
+ depends on PCI && DMI
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ This is a driver for SECO Boards integrated CEC interface.
+ Selecting it will enable support for this device.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config CEC_SECO_RC
+ bool "SECO Boards IR RC5 support"
+ depends on CEC_SECO
+ depends on RC_CORE=y || RC_CORE = CEC_SECO
+ help
+ If you say yes here you will get support for the
+ SECO Boards Consumer-IR in seco-cec driver.
+ The embedded controller supports RC5 protocol only, default mapping
+ is set to rc-hauppauge.
diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
new file mode 100644
index 000000000000..3a947159b25a
--- /dev/null
+++ b/drivers/media/cec/platform/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the CEC platform device drivers.
+#
+
+# Please keep it in alphabetic order
+obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
+obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+obj-$(CONFIG_CEC_MESON_AO) += meson/
+obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
+obj-$(CONFIG_CEC_SECO) += seco/
+obj-$(CONFIG_CEC_STI) += sti/
+obj-$(CONFIG_CEC_TEGRA) += tegra/
+
diff --git a/drivers/media/platform/cec-gpio/Makefile b/drivers/media/cec/platform/cec-gpio/Makefile
index a40c621dbd24..a40c621dbd24 100644
--- a/drivers/media/platform/cec-gpio/Makefile
+++ b/drivers/media/cec/platform/cec-gpio/Makefile
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
index 42d2c2cd9a78..c8c4efc83f5f 100644
--- a/drivers/media/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
@@ -31,12 +31,12 @@ struct cec_gpio {
ktime_t v5_ts;
};
-static bool cec_gpio_read(struct cec_adapter *adap)
+static int cec_gpio_read(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
if (cec->cec_is_low)
- return false;
+ return 0;
return gpiod_get_value(cec->cec_gpio);
}
@@ -71,9 +71,10 @@ static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv)
static irqreturn_t cec_5v_gpio_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
- bool is_high = gpiod_get_value(cec->v5_gpio);
+ int val = gpiod_get_value(cec->v5_gpio);
+ bool is_high = val > 0;
- if (is_high == cec->v5_is_high)
+ if (val < 0 || is_high == cec->v5_is_high)
return IRQ_HANDLED;
cec->v5_ts = ktime_get();
cec->v5_is_high = is_high;
@@ -91,9 +92,10 @@ static irqreturn_t cec_5v_gpio_irq_handler_thread(int irq, void *priv)
static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
- bool is_high = gpiod_get_value(cec->hpd_gpio);
+ int val = gpiod_get_value(cec->hpd_gpio);
+ bool is_high = val > 0;
- if (is_high == cec->hpd_is_high)
+ if (val < 0 || is_high == cec->hpd_is_high)
return IRQ_HANDLED;
cec->hpd_ts = ktime_get();
cec->hpd_is_high = is_high;
@@ -103,8 +105,10 @@ static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
static irqreturn_t cec_gpio_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
+ int val = gpiod_get_value(cec->cec_gpio);
- cec_pin_changed(cec->adap, gpiod_get_value(cec->cec_gpio));
+ if (val >= 0)
+ cec_pin_changed(cec->adap, val > 0);
return IRQ_HANDLED;
}
diff --git a/drivers/media/cec/platform/cros-ec/Makefile b/drivers/media/cec/platform/cros-ec/Makefile
new file mode 100644
index 000000000000..d7e3511078ef
--- /dev/null
+++ b/drivers/media/cec/platform/cros-ec/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CEC_CROS_EC) += cros-ec-cec.o
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index 0e7e2772f08f..0e7e2772f08f 100644
--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
diff --git a/drivers/media/cec/platform/meson/Makefile b/drivers/media/cec/platform/meson/Makefile
new file mode 100644
index 000000000000..34fc5d444d0e
--- /dev/null
+++ b/drivers/media/cec/platform/meson/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CEC_MESON_AO) += ao-cec.o
+obj-$(CONFIG_CEC_MESON_G12A_AO) += ao-cec-g12a.o
diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/cec/platform/meson/ao-cec-g12a.c
index 891533060d49..891533060d49 100644
--- a/drivers/media/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/cec/platform/meson/ao-cec-g12a.c
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/cec/platform/meson/ao-cec.c
index 09aff82c3773..09aff82c3773 100644
--- a/drivers/media/platform/meson/ao-cec.c
+++ b/drivers/media/cec/platform/meson/ao-cec.c
diff --git a/drivers/media/platform/s5p-cec/Makefile b/drivers/media/cec/platform/s5p/Makefile
index bd0103b91bee..92bf7b8557c5 100644
--- a/drivers/media/platform/s5p-cec/Makefile
+++ b/drivers/media/cec/platform/s5p/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec.o
+obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p-cec.o
s5p-cec-y += s5p_cec.o exynos_hdmi_cecctrl.o
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cec.h b/drivers/media/cec/platform/s5p/exynos_hdmi_cec.h
index 325db8c432bd..325db8c432bd 100644
--- a/drivers/media/platform/s5p-cec/exynos_hdmi_cec.h
+++ b/drivers/media/cec/platform/s5p/exynos_hdmi_cec.h
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/cec/platform/s5p/exynos_hdmi_cecctrl.c
index eb981ebce362..eb981ebce362 100644
--- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+++ b/drivers/media/cec/platform/s5p/exynos_hdmi_cecctrl.c
diff --git a/drivers/media/platform/s5p-cec/regs-cec.h b/drivers/media/cec/platform/s5p/regs-cec.h
index 447f717028a2..447f717028a2 100644
--- a/drivers/media/platform/s5p-cec/regs-cec.h
+++ b/drivers/media/cec/platform/s5p/regs-cec.h
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
index 2a3e7ffefe0a..2a3e7ffefe0a 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/cec/platform/s5p/s5p_cec.c
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/cec/platform/s5p/s5p_cec.h
index 34d033b20f96..34d033b20f96 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.h
+++ b/drivers/media/cec/platform/s5p/s5p_cec.h
diff --git a/drivers/media/cec/platform/seco/Makefile b/drivers/media/cec/platform/seco/Makefile
new file mode 100644
index 000000000000..aa1ca8ccdb8b
--- /dev/null
+++ b/drivers/media/cec/platform/seco/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CEC_SECO) += seco-cec.o
diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/cec/platform/seco/seco-cec.c
index 2ff62a488b27..075dd79beb6f 100644
--- a/drivers/media/platform/seco-cec/seco-cec.c
+++ b/drivers/media/cec/platform/seco/seco-cec.c
@@ -343,7 +343,7 @@ static const struct cec_adap_ops secocec_cec_adap_ops = {
.adap_transmit = secocec_adap_transmit,
};
-#ifdef CONFIG_VIDEO_SECO_RC
+#ifdef CONFIG_CEC_SECO_RC
static int secocec_ir_probe(void *priv)
{
struct secocec_data *cec = priv;
diff --git a/drivers/media/platform/seco-cec/seco-cec.h b/drivers/media/cec/platform/seco/seco-cec.h
index 843de8c7dfd4..843de8c7dfd4 100644
--- a/drivers/media/platform/seco-cec/seco-cec.h
+++ b/drivers/media/cec/platform/seco/seco-cec.h
diff --git a/drivers/media/cec/platform/sti/Makefile b/drivers/media/cec/platform/sti/Makefile
new file mode 100644
index 000000000000..26ec5ba1c633
--- /dev/null
+++ b/drivers/media/cec/platform/sti/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CEC_STI) += stih-cec.o
diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/cec/platform/sti/stih-cec.c
index f0c73e64b586..f0c73e64b586 100644
--- a/drivers/media/platform/sti/cec/stih-cec.c
+++ b/drivers/media/cec/platform/sti/stih-cec.c
diff --git a/drivers/media/cec/platform/stm32/Makefile b/drivers/media/cec/platform/stm32/Makefile
new file mode 100644
index 000000000000..b7597a00befa
--- /dev/null
+++ b/drivers/media/cec/platform/stm32/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CEC_STM32) += stm32-cec.o
diff --git a/drivers/media/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
index ea4b1ebfca99..ea4b1ebfca99 100644
--- a/drivers/media/platform/stm32/stm32-cec.c
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
diff --git a/drivers/media/cec/platform/tegra/Makefile b/drivers/media/cec/platform/tegra/Makefile
new file mode 100644
index 000000000000..275d1c019d49
--- /dev/null
+++ b/drivers/media/cec/platform/tegra/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CEC_TEGRA) += tegra_cec.o
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/cec/platform/tegra/tegra_cec.c
index 1ac0c70a5981..1ac0c70a5981 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/cec/platform/tegra/tegra_cec.c
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.h b/drivers/media/cec/platform/tegra/tegra_cec.h
index 8c370be38e1e..8c370be38e1e 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.h
+++ b/drivers/media/cec/platform/tegra/tegra_cec.h
diff --git a/drivers/media/cec/usb/Kconfig b/drivers/media/cec/usb/Kconfig
new file mode 100644
index 000000000000..3f3a5c75287a
--- /dev/null
+++ b/drivers/media/cec/usb/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# USB drivers
+
+if USB_SUPPORT && TTY
+source "drivers/media/cec/usb/pulse8/Kconfig"
+source "drivers/media/cec/usb/rainshadow/Kconfig"
+endif
diff --git a/drivers/media/cec/usb/Makefile b/drivers/media/cec/usb/Makefile
new file mode 100644
index 000000000000..e4183d1bfa9a
--- /dev/null
+++ b/drivers/media/cec/usb/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the CEC USB device drivers.
+#
+obj-$(CONFIG_USB_PULSE8_CEC) += pulse8/
+obj-$(CONFIG_USB_RAINSHADOW_CEC) += rainshadow/
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/cec/usb/pulse8/Kconfig
index e802d30dbbee..a0224ef80e6c 100644
--- a/drivers/media/usb/pulse8-cec/Kconfig
+++ b/drivers/media/cec/usb/pulse8/Kconfig
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_PULSE8_CEC
tristate "Pulse Eight HDMI CEC"
- depends on USB_ACM
select CEC_CORE
+ select USB
+ select USB_ACM
select SERIO
select SERIO_SERPORT
help
diff --git a/drivers/media/usb/pulse8-cec/Makefile b/drivers/media/cec/usb/pulse8/Makefile
index 7816c68bf928..7816c68bf928 100644
--- a/drivers/media/usb/pulse8-cec/Makefile
+++ b/drivers/media/cec/usb/pulse8/Makefile
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
index 0655aa9ecf28..beae6aa12638 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
@@ -661,7 +661,6 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
u8 *data = pulse8->data + 1;
u8 cmd[2];
int err;
- struct tm tm;
time64_t date;
pulse8->vers = 0;
@@ -682,10 +681,7 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
if (err)
return err;
date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
- time64_to_tm(date, 0, &tm);
- dev_info(pulse8->dev, "Firmware build date %04ld.%02d.%02d %02d:%02d:%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
+ dev_info(pulse8->dev, "Firmware build date %ptT\n", &date);
dev_dbg(pulse8->dev, "Persistent config:\n");
cmd[0] = MSGCODE_GET_AUTO_ENABLED;
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/cec/usb/rainshadow/Kconfig
index b481c5157d7e..c9ef2c192b17 100644
--- a/drivers/media/usb/rainshadow-cec/Kconfig
+++ b/drivers/media/cec/usb/rainshadow/Kconfig
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_RAINSHADOW_CEC
tristate "RainShadow Tech HDMI CEC"
- depends on USB_ACM
select CEC_CORE
+ select USB
+ select USB_ACM
select SERIO
select SERIO_SERPORT
help
diff --git a/drivers/media/usb/rainshadow-cec/Makefile b/drivers/media/cec/usb/rainshadow/Makefile
index 47b33c574c3e..47b33c574c3e 100644
--- a/drivers/media/usb/rainshadow-cec/Makefile
+++ b/drivers/media/cec/usb/rainshadow/Makefile
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
index ee870ea1a886..ee870ea1a886 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 1990b7f09454..4ea03b7899a8 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -14,7 +14,7 @@ config VIDEO_TVEEPROM
depends on I2C
config CYPRESS_FIRMWARE
- tristate "Cypress firmware helper routines"
+ tristate
depends on USB
source "drivers/media/common/videobuf2/Kconfig"
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 6db60e9d5183..92072a08af25 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -309,8 +309,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
if (buf->db_attach)
buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
else
- buf->vaddr = vm_map_ram(buf->pages,
- buf->num_pages, -1, PAGE_KERNEL);
+ buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
}
/* add offset in case userptr is not page-aligned */
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 1a4f0ca87c7c..c66fda4a65e4 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -107,8 +107,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
buf->vaddr = (__force void *)
ioremap(__pfn_to_phys(nums[0]), size + offset);
} else {
- buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
- PAGE_KERNEL);
+ buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
}
if (!buf->vaddr)
diff --git a/drivers/media/dvb-core/Kconfig b/drivers/media/dvb-core/Kconfig
index 90e038d5ffd9..6ffac618417b 100644
--- a/drivers/media/dvb-core/Kconfig
+++ b/drivers/media/dvb-core/Kconfig
@@ -3,6 +3,32 @@
# DVB device configuration
#
+config DVB_MMAP
+ bool "Enable DVB memory-mapped API (EXPERIMENTAL)"
+ depends on DVB_CORE
+ depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE
+ select VIDEOBUF2_VMALLOC
+ help
+ This option enables DVB experimental memory-mapped API, which
+ reduces the number of context switches to read DVB buffers, as
+ the buffers can use mmap() syscalls.
+
+ Support for it is experimental. Use with care. If unsure,
+ say N.
+
+config DVB_NET
+ bool "DVB Network Support"
+ default (NET && INET)
+ depends on NET && INET && DVB_CORE
+ help
+ This option enables DVB Network Support which is a part of the DVB
+ standard. It is used, for example, by automatic firmware updates used
+ on Set-Top-Boxes. It can also be used to access the Internet via the
+ DVB card, if the network provider supports it.
+
+ You may want to disable the network support on embedded devices. If
+ unsure say Y.
+
config DVB_MAX_ADAPTERS
int "maximum number of DVB/ATSC adapters"
depends on DVB_CORE
@@ -19,6 +45,7 @@ config DVB_MAX_ADAPTERS
config DVB_DYNAMIC_MINORS
bool "Dynamic DVB minor allocation"
depends on DVB_CORE
+ default y
help
If you say Y here, the DVB subsystem will use dynamic minor
allocation for any device that uses the DVB major number.
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 80b6a71aa33e..959fa2820259 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -707,9 +707,10 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
}
if (ntuner && ndemod) {
- pad_source = media_get_pad_index(tuner, true,
+ /* NOTE: first found tuner source pad presumed correct */
+ pad_source = media_get_pad_index(tuner, false,
PAD_SIGNAL_ANALOG);
- if (pad_source)
+ if (pad_source < 0)
return -EINVAL;
ret = media_create_pad_links(mdev,
MEDIA_ENT_F_TUNER,
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index a29e9ddf9c82..643b851a6b60 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -1,4 +1,8 @@
-comment "DVB Frontend drivers hidden by 'Autoselect ancillary drivers'"
+# SPDX-License-Identifier: GPL-2.0
+
+if MEDIA_DIGITAL_TV_SUPPORT
+
+comment "DVB Frontend drivers auto-selected by 'Autoselect ancillary drivers'"
depends on MEDIA_HIDE_ANCILLARY_SUBDRV
menu "Customise DVB Frontends"
@@ -943,9 +947,15 @@ config DVB_SP2
help
CIMaX SP2/SP2HF Common Interface module.
+endmenu # Customise DVB Frontends
+
+endif # MEDIA_DIGITAL_TV_SUPPORT
+
comment "Tools to develop new frontends"
+ depends on MEDIA_TEST_SUPPORT
config DVB_DUMMY_FE
tristate "Dummy frontend driver"
- depends on DVB_CORE
-endmenu
+ depends on MEDIA_TEST_SUPPORT && DVB_CORE
+ help
+ Dummy skeleton frontend driver.
diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
index f87e27481ea7..d5b1b3788e39 100644
--- a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
@@ -685,7 +685,7 @@ static int cxd2880_set_ber_per_period_t(struct dvb_frontend *fe)
int ret;
struct cxd2880_priv *priv;
struct cxd2880_dvbt_tpsinfo info;
- enum cxd2880_dtv_bandwidth bw = CXD2880_DTV_BW_1_7_MHZ;
+ enum cxd2880_dtv_bandwidth bw;
u32 pre_ber_rate = 0;
u32 post_ber_rate = 0;
u32 ucblock_rate = 0;
diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
index 9118a7a48ef2..3f129efa21de 100644
--- a/drivers/media/dvb-frontends/dib3000.h
+++ b/drivers/media/dvb-frontends/dib3000.h
@@ -14,7 +14,7 @@
* Amaury Demol from DiBcom for providing specs and driver
* sources, on which this driver (and the dvb-dibusb) are based.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef DIB3000_H
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 46ed0e20c8fa..0f0480d8576d 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -14,7 +14,7 @@
* Amaury Demol from DiBcom for providing specs and driver
* sources, on which this driver (and the dvb-dibusb) are based.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 0a4875b391d9..7e8e5c308d1c 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1093,7 +1093,7 @@ static int init_hi(struct drxk_state *state)
static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable)
{
- int status = -1;
+ int status;
u16 sio_pdr_mclk_cfg = 0;
u16 sio_pdr_mdx_cfg = 0;
u16 err_cfg = 0;
diff --git a/drivers/media/dvb-frontends/eds1547.h b/drivers/media/dvb-frontends/eds1547.h
index 907254b85708..bb85a6e27076 100644
--- a/drivers/media/dvb-frontends/eds1547.h
+++ b/drivers/media/dvb-frontends/eds1547.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Igor M. Liplianin (liplianin@me.by)
*
-* see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+* see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef EDS1547
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 6c4adec58174..d3c330e035c4 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -846,6 +846,7 @@ static int lgdt3306a_fe_sleep(struct dvb_frontend *fe)
static int lgdt3306a_init(struct dvb_frontend *fe)
{
struct lgdt3306a_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 val;
int ret;
@@ -997,6 +998,9 @@ static int lgdt3306a_init(struct dvb_frontend *fe)
ret = lgdt3306a_sleep(state);
lg_chkerr(ret);
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
fail:
return ret;
}
@@ -1597,6 +1601,7 @@ static int lgdt3306a_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct lgdt3306a_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u16 strength = 0;
int ret = 0;
@@ -1637,6 +1642,15 @@ static int lgdt3306a_read_status(struct dvb_frontend *fe,
default:
ret = -EINVAL;
}
+
+ if (*status & FE_HAS_SYNC) {
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = lgdt3306a_calculate_snr_x100(state) * 10;
+ } else {
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
}
return ret;
}
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index d2c28dcf6b42..f204e715bc59 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -980,6 +980,8 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
goto err;
ret = m88ds3103_update_bits(dev, 0xc9, 0x08, 0x08);
+ if (ret)
+ goto err;
}
dev_dbg(&client->dev, "carrier offset=%d\n",
@@ -1898,7 +1900,7 @@ static int m88ds3103_probe(struct i2c_client *client,
if (ret)
goto err_kfree;
dev->dt_addr = ((utmp & 0x80) == 0) ? 0x42 >> 1 : 0x40 >> 1;
- dev_err(&client->dev, "dt addr is 0x%02x", dev->dt_addr);
+ dev_dbg(&client->dev, "dt addr is 0x%02x\n", dev->dt_addr);
dev->dt_client = i2c_new_dummy_device(client->adapter,
dev->dt_addr);
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 7d93a1617e86..212312d20ff6 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -193,7 +193,7 @@ void stv0900_write_bits(struct stv0900_internal *intp, u32 label, u8 val)
u8 stv0900_get_bits(struct stv0900_internal *intp, u32 label)
{
- u8 val = 0xff;
+ u8 val;
u8 mask, pos;
extract_mask_pos(label, &mask, &pos);
diff --git a/drivers/media/dvb-frontends/z0194a.h b/drivers/media/dvb-frontends/z0194a.h
index 21442905d116..3446ccbf3c1c 100644
--- a/drivers/media/dvb-frontends/z0194a.h
+++ b/drivers/media/dvb-frontends/z0194a.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Igor M. Liplianin (liplianin@me.by)
*
-* see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+* see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef Z0194A
diff --git a/drivers/media/firewire/Kconfig b/drivers/media/firewire/Kconfig
index e7837da5905b..0c1f326f581f 100644
--- a/drivers/media/firewire/Kconfig
+++ b/drivers/media/firewire/Kconfig
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
+if DVB_CORE && FIREWIRE
+comment "FireWire (IEEE 1394) Adapters"
+
config DVB_FIREDTV
tristate "FireDTV and FloppyDTV"
- depends on DVB_CORE && FIREWIRE
help
Support for DVB receivers from Digital Everywhere
which are connected via IEEE 1394 (FireWire).
@@ -18,3 +20,4 @@ config DVB_FIREDTV_INPUT
def_bool INPUT = y || (INPUT = m && DVB_FIREDTV = m)
endif # DVB_FIREDTV
+endif # DVB_CORE && FIREWIRE
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 125d596c13dd..da11036ad804 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -5,6 +5,9 @@
if VIDEO_V4L2
+comment "IR I2C driver auto-selected by 'Autoselect ancillary drivers'"
+ depends on MEDIA_SUBDRV_AUTOSELECT && I2C && RC_CORE
+
config VIDEO_IR_I2C
tristate "I2C module for IR" if !MEDIA_SUBDRV_AUTOSELECT || EXPERT
depends on I2C && RC_CORE
@@ -19,17 +22,18 @@ config VIDEO_IR_I2C
In doubt, say Y.
#
-# Encoder / Decoder module configuration
+# V4L2 I2C drivers that aren't related with Camera support
#
-comment "I2C drivers hidden by 'Autoselect ancillary drivers'"
+comment "audio, video and radio I2C drivers auto-selected by 'Autoselect ancillary drivers'"
depends on MEDIA_HIDE_ANCILLARY_SUBDRV
+#
+# Encoder / Decoder module configuration
+#
-menu "I2C Encoders, decoders, sensors and other helper chips"
+menu "Audio decoders, processors and mixers"
visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
-comment "Audio decoders, processors and mixers"
-
config VIDEO_TVAUDIO
tristate "Simple audio decoder chips"
depends on VIDEO_V4L2 && I2C
@@ -62,11 +66,13 @@ config VIDEO_TDA9840
config VIDEO_TDA1997X
tristate "NXP TDA1997x HDMI receiver"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C
depends on SND_SOC
select HDMI
select SND_PCM
select V4L2_FWNODE
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
@@ -185,8 +191,10 @@ config VIDEO_SONY_BTF_MPX
To compile this driver as a module, choose M here: the
module will be called sony-btf-mpx.
+endmenu
-comment "RDS decoders"
+menu "RDS decoders"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
config VIDEO_SAA6588
tristate "SAA6588 Radio Chip RDS decoder support"
@@ -199,12 +207,16 @@ config VIDEO_SAA6588
To compile this driver as a module, choose M here: the
module will be called saa6588.
+endmenu
-comment "Video decoders"
+menu "Video decoders"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
config VIDEO_ADV7180
tristate "Analog Devices ADV7180 decoder"
- depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on GPIOLIB && VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
Support for the Analog Devices ADV7180 video decoder.
@@ -223,8 +235,10 @@ config VIDEO_ADV7183
config VIDEO_ADV748X
tristate "Analog Devices ADV748x decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C
depends on OF
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
select V4L2_FWNODE
help
@@ -236,8 +250,10 @@ config VIDEO_ADV748X
config VIDEO_ADV7604
tristate "Analog Devices ADV7604 decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C
depends on GPIOLIB || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
select HDMI
select V4L2_FWNODE
@@ -260,7 +276,9 @@ config VIDEO_ADV7604_CEC
config VIDEO_ADV7842
tristate "Analog Devices ADV7842 decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select HDMI
help
Support for the Analog Devices ADV7842 video decoder.
@@ -347,7 +365,9 @@ config VIDEO_SAA711X
config VIDEO_TC358743
tristate "Toshiba TC358743 decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select HDMI
select V4L2_FWNODE
help
@@ -457,7 +477,10 @@ config VIDEO_SAA717X
source "drivers/media/i2c/cx25840/Kconfig"
-comment "Video encoders"
+endmenu
+
+menu "Video encoders"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
config VIDEO_SAA7127
tristate "Philips SAA7127/9 digital video encoders"
@@ -515,8 +538,10 @@ config VIDEO_ADV7393
config VIDEO_ADV7511
tristate "Analog Devices ADV7511 encoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C
depends on DRM_I2C_ADV7511=n || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select HDMI
help
Support for the Analog Devices ADV7511 video encoder.
@@ -536,7 +561,10 @@ config VIDEO_ADV7511_CEC
config VIDEO_AD9389B
tristate "Analog Devices AD9389B encoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+
help
Support for the Analog Devices AD9389B video encoder.
@@ -559,8 +587,124 @@ config VIDEO_THS8200
To compile this driver as a module, choose M here: the
module will be called ths8200.
+endmenu
+
+menu "Video improvement chips"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_UPD64031A
+ tristate "NEC Electronics uPD64031A Ghost Reduction"
+ depends on VIDEO_V4L2 && I2C
+ help
+ Support for the NEC Electronics uPD64031A Ghost Reduction
+ video chip. It is most often found in NTSC TV cards made for
+ Japan and is used to reduce the 'ghosting' effect that can
+ be present in analog TV broadcasts.
+
+ To compile this driver as a module, choose M here: the
+ module will be called upd64031a.
+
+config VIDEO_UPD64083
+ tristate "NEC Electronics uPD64083 3-Dimensional Y/C separation"
+ depends on VIDEO_V4L2 && I2C
+ help
+ Support for the NEC Electronics uPD64083 3-Dimensional Y/C
+ separation video chip. It is used to improve the quality of
+ the colors of a composite signal.
+
+ To compile this driver as a module, choose M here: the
+ module will be called upd64083.
+endmenu
+
+menu "Audio/Video compression chips"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_SAA6752HS
+ tristate "Philips SAA6752HS MPEG-2 Audio/Video Encoder"
+ depends on VIDEO_V4L2 && I2C
+ select CRC32
+ help
+ Support for the Philips SAA6752HS MPEG-2 video and MPEG-audio/AC-3
+ audio encoder with multiplexer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa6752hs.
+
+endmenu
+
+menu "SDR tuner chips"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config SDR_MAX2175
+ tristate "Maxim 2175 RF to Bits tuner"
+ depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
+ select REGMAP_I2C
+ help
+ Support for Maxim 2175 tuner. It is an advanced analog/digital
+ radio receiver with RF-to-Bits front-end designed for SDR solutions.
+
+ To compile this driver as a module, choose M here; the
+ module will be called max2175.
+
+
+endmenu
+
+menu "Miscellaneous helper chips"
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
+
+config VIDEO_THS7303
+ tristate "THS7303/53 Video Amplifier"
+ depends on VIDEO_V4L2 && I2C
+ help
+ Support for TI THS7303/53 video amplifier
-comment "Camera sensor devices"
+ To compile this driver as a module, choose M here: the
+ module will be called ths7303.
+
+config VIDEO_M52790
+ tristate "Mitsubishi M52790 A/V switch"
+ depends on VIDEO_V4L2 && I2C
+ help
+ Support for the Mitsubishi M52790 A/V switch.
+
+ To compile this driver as a module, choose M here: the
+ module will be called m52790.
+
+config VIDEO_I2C
+ tristate "I2C transport video support"
+ depends on VIDEO_V4L2 && I2C
+ select VIDEOBUF2_VMALLOC
+ imply HWMON
+ help
+ Enable the I2C transport video support which supports the
+ following:
+ * Panasonic AMG88xx Grid-Eye Sensors
+ * Melexis MLX90640 Thermal Cameras
+
+ To compile this driver as a module, choose M here: the
+ module will be called video-i2c
+
+config VIDEO_ST_MIPID02
+ tristate "STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge"
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ Support for STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge.
+ It is used to allow usage of CSI-2 sensor with PARALLEL port
+ controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called st-mipid02.
+endmenu
+
+#
+# V4L2 I2C drivers that are related with Camera support
+#
+
+menu "Camera sensor devices"
+ visible if MEDIA_CAMERA_SUPPORT
config VIDEO_APTINA_PLL
tristate
@@ -568,12 +712,11 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
-if MEDIA_CAMERA_SUPPORT
-
config VIDEO_HI556
tristate "Hynix Hi-556 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CONTROLLER
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Hynix
@@ -584,8 +727,10 @@ config VIDEO_HI556
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
- depends on GPIOLIB && I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on GPIOLIB && I2C && VIDEO_V4L2
depends on V4L2_FWNODE
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
help
This is a Video4Linux2 sensor driver for the Sony
@@ -596,7 +741,9 @@ config VIDEO_IMX214
config VIDEO_IMX219
tristate "Sony IMX219 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
@@ -607,7 +754,9 @@ config VIDEO_IMX219
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Sony
IMX258 camera.
@@ -617,7 +766,9 @@ config VIDEO_IMX258
config VIDEO_IMX274
tristate "Sony IMX274 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
help
This is a V4L2 sensor driver for the Sony IMX274
@@ -625,7 +776,9 @@ config VIDEO_IMX274
config VIDEO_IMX290
tristate "Sony IMX290 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
select V4L2_FWNODE
help
@@ -637,7 +790,9 @@ config VIDEO_IMX290
config VIDEO_IMX319
tristate "Sony IMX319 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Sony
IMX319 camera.
@@ -647,7 +802,9 @@ config VIDEO_IMX319
config VIDEO_IMX355
tristate "Sony IMX355 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Sony
IMX355 camera.
@@ -678,7 +835,8 @@ config VIDEO_OV2659
config VIDEO_OV2680
tristate "OmniVision OV2680 sensor support"
- depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -689,7 +847,8 @@ config VIDEO_OV2680
config VIDEO_OV2685
tristate "OmniVision OV2685 sensor support"
- depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -698,10 +857,25 @@ config VIDEO_OV2685
To compile this driver as a module, choose M here: the
module will be called ov2685.
+config VIDEO_OV2740
+ tristate "OmniVision OV2740 sensor support"
+ depends on VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV2740 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov2740.
+
config VIDEO_OV5640
tristate "OmniVision OV5640 sensor support"
depends on OF
- depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on GPIOLIB && VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Omnivision
@@ -710,7 +884,9 @@ config VIDEO_OV5640
config VIDEO_OV5645
tristate "OmniVision OV5645 sensor support"
depends on OF
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -721,7 +897,9 @@ config VIDEO_OV5645
config VIDEO_OV5647
tristate "OmniVision OV5647 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -742,8 +920,9 @@ config VIDEO_OV6650
config VIDEO_OV5670
tristate "OmniVision OV5670 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CONTROLLER
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -754,8 +933,9 @@ config VIDEO_OV5670
config VIDEO_OV5675
tristate "OmniVision OV5675 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CONTROLLER
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -777,7 +957,9 @@ config VIDEO_OV5695
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -826,7 +1008,9 @@ config VIDEO_OV7740
config VIDEO_OV8856
tristate "OmniVision OV8856 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -844,7 +1028,9 @@ config VIDEO_OV9640
config VIDEO_OV9650
tristate "OmniVision OV9650/OV9652 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select REGMAP_SCCB
help
This is a V4L2 sensor driver for the Omnivision
@@ -852,7 +1038,9 @@ config VIDEO_OV9650
config VIDEO_OV13858
tristate "OmniVision OV13858 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -870,14 +1058,18 @@ config VIDEO_VS6624
config VIDEO_MT9M001
tristate "mt9m001 support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This driver supports MT9M001 cameras from Micron, monochrome
and colour models.
config VIDEO_MT9M032
tristate "MT9M032 camera sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEO_APTINA_PLL
help
This driver supports MT9M032 camera sensors from Aptina, monochrome
@@ -893,7 +1085,9 @@ config VIDEO_MT9M111
config VIDEO_MT9P031
tristate "Aptina MT9P031 support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEO_APTINA_PLL
help
This is a Video4Linux2 sensor driver for the Aptina
@@ -901,7 +1095,9 @@ config VIDEO_MT9P031
config VIDEO_MT9T001
tristate "Aptina MT9T001 support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt0t001 3 Mpixel camera.
@@ -926,7 +1122,9 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
select V4L2_FWNODE
help
@@ -951,7 +1149,9 @@ config VIDEO_SR030PC30
config VIDEO_NOON010PC30
tristate "Siliconfile NOON010PC30 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This driver supports NOON010PC30 CIF camera from Siliconfile
@@ -969,21 +1169,27 @@ config VIDEO_RJ54N1
config VIDEO_S5K6AA
tristate "Samsung S5K6AAFX sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
camera sensor with an embedded SoC image signal processor.
config VIDEO_S5K6A3
tristate "Samsung S5K6A3 sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6A3 raw
camera sensor.
config VIDEO_S5K4ECGX
tristate "Samsung S5K4ECGX sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select CRC32
help
This is a V4L2 sensor driver for Samsung S5K4ECGX 5M
@@ -991,7 +1197,9 @@ config VIDEO_S5K4ECGX
config VIDEO_S5K5BAF
tristate "Samsung S5K5BAF sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a V4L2 sensor driver for Samsung S5K5BAF 2M
@@ -1002,28 +1210,32 @@ source "drivers/media/i2c/et8ek8/Kconfig"
config VIDEO_S5C73M3
tristate "Samsung S5C73M3 sensor support"
- depends on I2C && SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && SPI && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
-endif
-comment "Lens drivers"
+endmenu
-if MEDIA_CAMERA_SUPPORT
+menu "Lens drivers"
+ visible if MEDIA_CAMERA_SUPPORT
config VIDEO_AD5820
tristate "AD5820 lens voice coil support"
- depends on GPIOLIB && I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on GPIOLIB && I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
help
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
config VIDEO_AK7375
tristate "AK7375 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a driver for the AK7375 camera lens voice coil.
AK7375 is a 12 bit DAC with 120mA output current sink
@@ -1032,8 +1244,9 @@ config VIDEO_AK7375
config VIDEO_DW9714
tristate "DW9714 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a driver for the DW9714 camera lens voice coil.
DW9714 is a 10 bit DAC with 120mA output current sink
@@ -1042,30 +1255,32 @@ config VIDEO_DW9714
config VIDEO_DW9807_VCM
tristate "DW9807 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This is a driver for the DW9807 camera lens voice coil.
DW9807 is a 10 bit DAC with 100mA output current sink
capability. This is designed for linear control of
voice coil motors, controlled via I2C serial interface.
-endif
-
-comment "Flash devices"
+endmenu
-if MEDIA_CAMERA_SUPPORT
+menu "Flash devices"
+ visible if MEDIA_CAMERA_SUPPORT
config VIDEO_ADP1653
tristate "ADP1653 flash support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
help
This is a driver for the ADP1653 flash controller. It is used for
example in Nokia N900.
config VIDEO_LM3560
tristate "LM3560 dual flash driver support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
select REGMAP_I2C
help
This is a driver for the lm3560 dual flash controllers. It controls
@@ -1073,112 +1288,12 @@ config VIDEO_LM3560
config VIDEO_LM3646
tristate "LM3646 dual flash driver support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
select REGMAP_I2C
help
This is a driver for the lm3646 dual flash controllers. It controls
flash, torch LEDs.
-
-endif
-
-comment "Video improvement chips"
-
-config VIDEO_UPD64031A
- tristate "NEC Electronics uPD64031A Ghost Reduction"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the NEC Electronics uPD64031A Ghost Reduction
- video chip. It is most often found in NTSC TV cards made for
- Japan and is used to reduce the 'ghosting' effect that can
- be present in analog TV broadcasts.
-
- To compile this driver as a module, choose M here: the
- module will be called upd64031a.
-
-config VIDEO_UPD64083
- tristate "NEC Electronics uPD64083 3-Dimensional Y/C separation"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the NEC Electronics uPD64083 3-Dimensional Y/C
- separation video chip. It is used to improve the quality of
- the colors of a composite signal.
-
- To compile this driver as a module, choose M here: the
- module will be called upd64083.
-
-comment "Audio/Video compression chips"
-
-config VIDEO_SAA6752HS
- tristate "Philips SAA6752HS MPEG-2 Audio/Video Encoder"
- depends on VIDEO_V4L2 && I2C
- select CRC32
- help
- Support for the Philips SAA6752HS MPEG-2 video and MPEG-audio/AC-3
- audio encoder with multiplexer.
-
- To compile this driver as a module, choose M here: the
- module will be called saa6752hs.
-
-comment "SDR tuner chips"
-
-config SDR_MAX2175
- tristate "Maxim 2175 RF to Bits tuner"
- depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
- select REGMAP_I2C
- help
- Support for Maxim 2175 tuner. It is an advanced analog/digital
- radio receiver with RF-to-Bits front-end designed for SDR solutions.
-
- To compile this driver as a module, choose M here; the
- module will be called max2175.
-
-comment "Miscellaneous helper chips"
-
-config VIDEO_THS7303
- tristate "THS7303/53 Video Amplifier"
- depends on VIDEO_V4L2 && I2C
- help
- Support for TI THS7303/53 video amplifier
-
- To compile this driver as a module, choose M here: the
- module will be called ths7303.
-
-config VIDEO_M52790
- tristate "Mitsubishi M52790 A/V switch"
- depends on VIDEO_V4L2 && I2C
- help
- Support for the Mitsubishi M52790 A/V switch.
-
- To compile this driver as a module, choose M here: the
- module will be called m52790.
-
-config VIDEO_I2C
- tristate "I2C transport video support"
- depends on VIDEO_V4L2 && I2C
- select VIDEOBUF2_VMALLOC
- imply HWMON
- help
- Enable the I2C transport video support which supports the
- following:
- * Panasonic AMG88xx Grid-Eye Sensors
- * Melexis MLX90640 Thermal Cameras
-
- To compile this driver as a module, choose M here: the
- module will be called video-i2c
-
-config VIDEO_ST_MIPID02
- tristate "STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
- select V4L2_FWNODE
- help
- Support for STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge.
- It is used to allow usage of CSI-2 sensor with PARALLEL port
- controller.
-
- To compile this driver as a module, choose M here: the
- module will be called st-mipid02.
-
endmenu
-endif
+endif # VIDEO_V4L2
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 77bf7d0b691f..993acab81b2c 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
obj-$(CONFIG_VIDEO_OV2685) += ov2685.o
+obj-$(CONFIG_VIDEO_OV2740) += ov2740.o
obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
obj-$(CONFIG_VIDEO_OV5645) += ov5645.o
obj-$(CONFIG_VIDEO_OV5647) += ov5647.o
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 0de946fe2109..e2e935f78986 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -997,14 +997,14 @@ static void cx23885_initialize(struct i2c_client *client)
*/
cx25840_write4(client, 0x404, 0x0010253e);
- /* CC on - Undocumented Register */
+ /* CC on - VBI_LINE_CTRL3, FLD_VBI_MD_LINE12 */
cx25840_write(client, state->vbi_regs_offset + 0x42f, 0x66);
/* HVR-1250 / HVR1850 DIF related */
/* Power everything up */
cx25840_write4(client, 0x130, 0x0);
- /* Undocumented */
+ /* SRC_COMB_CFG */
if (is_cx23888(state))
cx25840_write4(client, 0x454, 0x6628021F);
else
@@ -1486,24 +1486,24 @@ static int set_input(struct i2c_client *client,
cx25840_write4(client, 0x410, 0xffff0dbf);
cx25840_write4(client, 0x414, 0x00137d03);
- cx25840_write4(client, state->vbi_regs_offset + 0x42c,
- 0x42600000);
- cx25840_write4(client, state->vbi_regs_offset + 0x430,
- 0x0000039b);
- cx25840_write4(client, state->vbi_regs_offset + 0x438,
- 0x00000000);
-
- cx25840_write4(client, state->vbi_regs_offset + 0x440,
- 0xF8E3E824);
- cx25840_write4(client, state->vbi_regs_offset + 0x444,
- 0x401040dc);
- cx25840_write4(client, state->vbi_regs_offset + 0x448,
- 0xcd3f02a0);
- cx25840_write4(client, state->vbi_regs_offset + 0x44c,
- 0x161f1000);
- cx25840_write4(client, state->vbi_regs_offset + 0x450,
- 0x00000802);
-
+ if (is_cx23888(state)) {
+ /* 888 MISC_TIM_CTRL */
+ cx25840_write4(client, 0x42c, 0x42600000);
+ /* 888 FIELD_COUNT */
+ cx25840_write4(client, 0x430, 0x0000039b);
+ /* 888 VSCALE_CTRL */
+ cx25840_write4(client, 0x438, 0x00000000);
+ /* 888 DFE_CTRL1 */
+ cx25840_write4(client, 0x440, 0xF8E3E824);
+ /* 888 DFE_CTRL2 */
+ cx25840_write4(client, 0x444, 0x401040dc);
+ /* 888 DFE_CTRL3 */
+ cx25840_write4(client, 0x448, 0xcd3f02a0);
+ /* 888 PLL_CTRL */
+ cx25840_write4(client, 0x44c, 0x161f1000);
+ /* 888 HTL_CTRL */
+ cx25840_write4(client, 0x450, 0x00000802);
+ }
cx25840_write4(client, 0x91c, 0x01000000);
cx25840_write4(client, 0x8e0, 0x03063870);
cx25840_write4(client, 0x8d4, 0x7FFF0024);
diff --git a/drivers/media/i2c/et8ek8/Kconfig b/drivers/media/i2c/et8ek8/Kconfig
index 1c6909874d56..afcc4ea764f6 100644
--- a/drivers/media/i2c/et8ek8/Kconfig
+++ b/drivers/media/i2c/et8ek8/Kconfig
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_ET8EK8
tristate "ET8EK8 camera sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
This is a driver for the Toshiba ET8EK8 5 MP camera sensor.
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 4175d06ffd47..1ef5af9a8c8b 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -4,7 +4,7 @@
*
* Copyright 2018 Qtechnology A/S
*
- * Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ * Ricardo Ribalda <ribalda@kernel.org>
*/
#include <linux/clk.h>
#include <linux/delay.h>
@@ -1120,5 +1120,5 @@ static struct i2c_driver imx214_i2c_driver = {
module_i2c_driver(imx214_i2c_driver);
MODULE_DESCRIPTION("Sony IMX214 Camera driver");
-MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
+MODULE_AUTHOR("Ricardo Ribalda <ribalda@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index cb03bdec1f9c..f64c0ef7a897 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -15,8 +15,6 @@
*/
#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -112,6 +110,14 @@
#define IMX219_TESTP_BLUE_DEFAULT 0
#define IMX219_TESTP_GREENB_DEFAULT 0
+/* IMX219 native and active pixel array size. */
+#define IMX219_NATIVE_WIDTH 3296U
+#define IMX219_NATIVE_HEIGHT 2480U
+#define IMX219_PIXEL_ARRAY_LEFT 8U
+#define IMX219_PIXEL_ARRAY_TOP 8U
+#define IMX219_PIXEL_ARRAY_WIDTH 3280U
+#define IMX219_PIXEL_ARRAY_HEIGHT 2464U
+
struct imx219_reg {
u16 address;
u8 val;
@@ -129,6 +135,9 @@ struct imx219_mode {
/* Frame height */
unsigned int height;
+ /* Analog crop rectangle. */
+ struct v4l2_rect crop;
+
/* V-timing */
unsigned int vts_def;
@@ -463,6 +472,12 @@ static const struct imx219_mode supported_modes[] = {
/* 8MPix 15fps mode */
.width = 3280,
.height = 2464,
+ .crop = {
+ .left = 0,
+ .top = 0,
+ .width = 3280,
+ .height = 2464
+ },
.vts_def = IMX219_VTS_15FPS,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
@@ -473,6 +488,12 @@ static const struct imx219_mode supported_modes[] = {
/* 1080P 30fps cropped */
.width = 1920,
.height = 1080,
+ .crop = {
+ .left = 680,
+ .top = 692,
+ .width = 1920,
+ .height = 1080
+ },
.vts_def = IMX219_VTS_30FPS_1080P,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_1920_1080_regs),
@@ -483,6 +504,12 @@ static const struct imx219_mode supported_modes[] = {
/* 2x2 binned 30fps mode */
.width = 1640,
.height = 1232,
+ .crop = {
+ .left = 0,
+ .top = 0,
+ .width = 3280,
+ .height = 2464
+ },
.vts_def = IMX219_VTS_30FPS_BINNED,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_1640_1232_regs),
@@ -493,6 +520,12 @@ static const struct imx219_mode supported_modes[] = {
/* 640x480 30fps mode */
.width = 640,
.height = 480,
+ .crop = {
+ .left = 1000,
+ .top = 752,
+ .width = 1280,
+ .height = 960
+ },
.vts_def = IMX219_VTS_30FPS_640x480,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_640_480_regs),
@@ -654,6 +687,7 @@ static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct imx219 *imx219 = to_imx219(sd);
struct v4l2_mbus_framefmt *try_fmt =
v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_rect *try_crop;
mutex_lock(&imx219->mutex);
@@ -664,6 +698,13 @@ static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
MEDIA_BUS_FMT_SRGGB10_1X10);
try_fmt->field = V4L2_FIELD_NONE;
+ /* Initialize try_crop rectangle. */
+ try_crop = v4l2_subdev_get_try_crop(sd, fh->pad, 0);
+ try_crop->top = IMX219_PIXEL_ARRAY_TOP;
+ try_crop->left = IMX219_PIXEL_ARRAY_LEFT;
+ try_crop->width = IMX219_PIXEL_ARRAY_WIDTH;
+ try_crop->height = IMX219_PIXEL_ARRAY_HEIGHT;
+
mutex_unlock(&imx219->mutex);
return 0;
@@ -781,7 +822,7 @@ static int imx219_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index >= ARRAY_SIZE(supported_modes))
return -EINVAL;
- if (fse->code != imx219_get_format_code(imx219, imx219->fmt.code))
+ if (fse->code != imx219_get_format_code(imx219, fse->code))
return -EINVAL;
fse->min_width = supported_modes[fse->index].width;
@@ -928,6 +969,56 @@ static int imx219_set_framefmt(struct imx219 *imx219)
return -EINVAL;
}
+static const struct v4l2_rect *
+__imx219_get_pad_crop(struct imx219 *imx219, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(&imx219->sd, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &imx219->mode->crop;
+ }
+
+ return NULL;
+}
+
+static int imx219_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP: {
+ struct imx219 *imx219 = to_imx219(sd);
+
+ mutex_lock(&imx219->mutex);
+ sel->r = *__imx219_get_pad_crop(imx219, cfg, sel->pad,
+ sel->which);
+ mutex_unlock(&imx219->mutex);
+
+ return 0;
+ }
+
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = IMX219_NATIVE_WIDTH;
+ sel->r.height = IMX219_NATIVE_HEIGHT;
+
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = IMX219_PIXEL_ARRAY_TOP;
+ sel->r.left = IMX219_PIXEL_ARRAY_LEFT;
+ sel->r.width = IMX219_PIXEL_ARRAY_WIDTH;
+ sel->r.height = IMX219_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int imx219_start_streaming(struct imx219 *imx219)
{
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
@@ -1152,6 +1243,7 @@ static const struct v4l2_subdev_pad_ops imx219_pad_ops = {
.enum_mbus_code = imx219_enum_mbus_code,
.get_fmt = imx219_get_pad_format,
.set_fmt = imx219_set_pad_format,
+ .get_selection = imx219_get_selection,
.enum_frame_size = imx219_enum_frame_size,
};
@@ -1171,11 +1263,12 @@ static int imx219_init_controls(struct imx219 *imx219)
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
unsigned int height = imx219->mode->height;
+ struct v4l2_fwnode_device_properties props;
int exposure_max, exposure_def, hblank;
int i, ret;
ctrl_hdlr = &imx219->ctrl_handler;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 11);
if (ret)
return ret;
@@ -1254,6 +1347,15 @@ static int imx219_init_controls(struct imx219 *imx219)
goto error;
}
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx219_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
+
imx219->sd.ctrl_handler = ctrl_hdlr;
return 0;
diff --git a/drivers/media/i2c/m5mols/Kconfig b/drivers/media/i2c/m5mols/Kconfig
index e573482f269f..6f0ef33b7ee1 100644
--- a/drivers/media/i2c/m5mols/Kconfig
+++ b/drivers/media/i2c/m5mols/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_M5MOLS
tristate "Fujitsu M-5MOLS 8MP sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
This driver supports Fujitsu M-5MOLS camera sensor with ISP
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 506a30e69ced..03b4ed3a61b8 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1194,7 +1194,7 @@ static const struct v4l2_ctrl_ops max2175_ctrl_ops = {
/*
* I2S output enable/disable configuration. This is a private control.
- * Refer to Documentation/media/v4l-drivers/max2175.rst for more details.
+ * Refer to Documentation/userspace-api/media/drivers/max2175.rst for more details.
*/
static const struct v4l2_ctrl_config max2175_i2s_en = {
.ops = &max2175_ctrl_ops,
@@ -1210,7 +1210,7 @@ static const struct v4l2_ctrl_config max2175_i2s_en = {
/*
* HSLS value control LO freq adjacent location configuration.
- * Refer to Documentation/media/v4l-drivers/max2175.rst for more details.
+ * Refer to Documentation/userspace-api/media/drivers/max2175.rst for more details.
*/
static const struct v4l2_ctrl_config max2175_hsls = {
.ops = &max2175_ctrl_ops,
@@ -1226,7 +1226,7 @@ static const struct v4l2_ctrl_config max2175_hsls = {
/*
* Rx modes below are a set of preset configurations that decides the tuner's
* sck and sample rate of transmission. They are separate for EU & NA regions.
- * Refer to Documentation/media/v4l-drivers/max2175.rst for more details.
+ * Refer to Documentation/userspace-api/media/drivers/max2175.rst for more details.
*/
static const char * const max2175_ctrl_eu_rx_modes[] = {
[MAX2175_EU_FM_1_2] = "EU FM 1.2",
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index aac6f77afa0f..236ad2c816b7 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#define OV13858_REG_VALUE_08BIT 1
#define OV13858_REG_VALUE_16BIT 2
@@ -1589,6 +1590,7 @@ static const struct v4l2_subdev_internal_ops ov13858_internal_ops = {
static int ov13858_init_controls(struct ov13858 *ov13858)
{
struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
+ struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
s64 vblank_def;
@@ -1600,7 +1602,7 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
int ret;
ctrl_hdlr = &ov13858->ctrl_handler;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
if (ret)
return ret;
@@ -1666,6 +1668,15 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
goto error;
}
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov13858_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
+
ov13858->sd.ctrl_handler = ctrl_hdlr;
return 0;
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
new file mode 100644
index 000000000000..2dd2609db873
--- /dev/null
+++ b/drivers/media/i2c/ov2740.c
@@ -0,0 +1,1016 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV2740_LINK_FREQ_360MHZ 360000000ULL
+#define OV2740_SCLK 72000000LL
+#define OV2740_MCLK 19200000
+#define OV2740_DATA_LANES 2
+#define OV2740_RGB_DEPTH 10
+
+#define OV2740_REG_CHIP_ID 0x300a
+#define OV2740_CHIP_ID 0x2740
+
+#define OV2740_REG_MODE_SELECT 0x0100
+#define OV2740_MODE_STANDBY 0x00
+#define OV2740_MODE_STREAMING 0x01
+
+/* vertical-timings from sensor */
+#define OV2740_REG_VTS 0x380e
+#define OV2740_VTS_DEF 0x088a
+#define OV2740_VTS_MIN 0x0460
+#define OV2740_VTS_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define OV2740_REG_HTS 0x380c
+
+/* Exposure controls from sensor */
+#define OV2740_REG_EXPOSURE 0x3500
+#define OV2740_EXPOSURE_MIN 8
+#define OV2740_EXPOSURE_MAX_MARGIN 8
+#define OV2740_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV2740_REG_ANALOG_GAIN 0x3508
+#define OV2740_ANAL_GAIN_MIN 128
+#define OV2740_ANAL_GAIN_MAX 1983
+#define OV2740_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define OV2740_REG_MWB_R_GAIN 0x500a
+#define OV2740_REG_MWB_G_GAIN 0x500c
+#define OV2740_REG_MWB_B_GAIN 0x500e
+#define OV2740_DGTL_GAIN_MIN 0
+#define OV2740_DGTL_GAIN_MAX 4095
+#define OV2740_DGTL_GAIN_STEP 1
+#define OV2740_DGTL_GAIN_DEFAULT 1024
+
+/* Test Pattern Control */
+#define OV2740_REG_TEST_PATTERN 0x5040
+#define OV2740_TEST_PATTERN_ENABLE BIT(7)
+#define OV2740_TEST_PATTERN_BAR_SHIFT 2
+
+enum {
+ OV2740_LINK_FREQ_360MHZ_INDEX,
+};
+
+struct ov2740_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov2740_reg_list {
+ u32 num_of_regs;
+ const struct ov2740_reg *regs;
+};
+
+struct ov2740_link_freq_config {
+ const struct ov2740_reg_list reg_list;
+};
+
+struct ov2740_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timining size */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct ov2740_reg_list reg_list;
+};
+
+static const struct ov2740_reg mipi_data_rate_720mbps[] = {
+ {0x0103, 0x01},
+ {0x0302, 0x4b},
+ {0x030d, 0x4b},
+ {0x030e, 0x02},
+ {0x030a, 0x01},
+ {0x0312, 0x11},
+};
+
+static const struct ov2740_reg mode_1932x1092_regs[] = {
+ {0x3000, 0x00},
+ {0x3018, 0x32},
+ {0x3031, 0x0a},
+ {0x3080, 0x08},
+ {0x3083, 0xB4},
+ {0x3103, 0x00},
+ {0x3104, 0x01},
+ {0x3106, 0x01},
+ {0x3500, 0x00},
+ {0x3501, 0x44},
+ {0x3502, 0x40},
+ {0x3503, 0x88},
+ {0x3507, 0x00},
+ {0x3508, 0x00},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x3510, 0x00},
+ {0x3511, 0x00},
+ {0x3512, 0x20},
+ {0x3632, 0x00},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3645, 0x13},
+ {0x3646, 0x81},
+ {0x3636, 0x10},
+ {0x3651, 0x0a},
+ {0x3656, 0x02},
+ {0x3659, 0x04},
+ {0x365a, 0xda},
+ {0x365b, 0xa2},
+ {0x365c, 0x04},
+ {0x365d, 0x1d},
+ {0x365e, 0x1a},
+ {0x3662, 0xd7},
+ {0x3667, 0x78},
+ {0x3669, 0x0a},
+ {0x366a, 0x92},
+ {0x3700, 0x54},
+ {0x3702, 0x10},
+ {0x3706, 0x42},
+ {0x3709, 0x30},
+ {0x370b, 0xc2},
+ {0x3714, 0x63},
+ {0x3715, 0x01},
+ {0x3716, 0x00},
+ {0x371a, 0x3e},
+ {0x3732, 0x0e},
+ {0x3733, 0x10},
+ {0x375f, 0x0e},
+ {0x3768, 0x30},
+ {0x3769, 0x44},
+ {0x376a, 0x22},
+ {0x377b, 0x20},
+ {0x377c, 0x00},
+ {0x377d, 0x0c},
+ {0x3798, 0x00},
+ {0x37a1, 0x55},
+ {0x37a8, 0x6d},
+ {0x37c2, 0x04},
+ {0x37c5, 0x00},
+ {0x37c8, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x07},
+ {0x3805, 0x8f},
+ {0x3806, 0x04},
+ {0x3807, 0x47},
+ {0x3808, 0x07},
+ {0x3809, 0x88},
+ {0x380a, 0x04},
+ {0x380b, 0x40},
+ {0x380c, 0x04},
+ {0x380d, 0x38},
+ {0x380e, 0x04},
+ {0x380f, 0x60},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x04},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3820, 0x80},
+ {0x3821, 0x46},
+ {0x3822, 0x84},
+ {0x3829, 0x00},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x04},
+ {0x3836, 0x01},
+ {0x3837, 0x08},
+ {0x3839, 0x01},
+ {0x383a, 0x00},
+ {0x383b, 0x08},
+ {0x383c, 0x00},
+ {0x3f0b, 0x00},
+ {0x4001, 0x20},
+ {0x4009, 0x07},
+ {0x4003, 0x10},
+ {0x4010, 0xe0},
+ {0x4016, 0x00},
+ {0x4017, 0x10},
+ {0x4044, 0x02},
+ {0x4304, 0x08},
+ {0x4307, 0x30},
+ {0x4320, 0x80},
+ {0x4322, 0x00},
+ {0x4323, 0x00},
+ {0x4324, 0x00},
+ {0x4325, 0x00},
+ {0x4326, 0x00},
+ {0x4327, 0x00},
+ {0x4328, 0x00},
+ {0x4329, 0x00},
+ {0x432c, 0x03},
+ {0x432d, 0x81},
+ {0x4501, 0x84},
+ {0x4502, 0x40},
+ {0x4503, 0x18},
+ {0x4504, 0x04},
+ {0x4508, 0x02},
+ {0x4601, 0x10},
+ {0x4800, 0x00},
+ {0x4816, 0x52},
+ {0x4837, 0x16},
+ {0x5000, 0x7f},
+ {0x5001, 0x00},
+ {0x5005, 0x38},
+ {0x501e, 0x0d},
+ {0x5040, 0x00},
+ {0x5901, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x07},
+ {0x3805, 0x8f},
+ {0x3806, 0x04},
+ {0x3807, 0x47},
+ {0x3808, 0x07},
+ {0x3809, 0x8c},
+ {0x380a, 0x04},
+ {0x380b, 0x44},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x01},
+};
+
+static const char * const ov2740_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Bottom-Top Darker Color Bar",
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV2740_LINK_FREQ_360MHZ,
+};
+
+static const struct ov2740_link_freq_config link_freq_configs[] = {
+ [OV2740_LINK_FREQ_360MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps),
+ .regs = mipi_data_rate_720mbps,
+ }
+ },
+};
+
+static const struct ov2740_mode supported_modes[] = {
+ {
+ .width = 1932,
+ .height = 1092,
+ .hts = 1080,
+ .vts_def = OV2740_VTS_DEF,
+ .vts_min = OV2740_VTS_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1932x1092_regs),
+ .regs = mode_1932x1092_regs,
+ },
+ .link_freq_index = OV2740_LINK_FREQ_360MHZ_INDEX,
+ },
+};
+
+struct ov2740 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov2740_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static inline struct ov2740 *to_ov2740(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ov2740, sd);
+}
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV2740_DATA_LANES;
+
+ do_div(pixel_rate, OV2740_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static u64 to_pixels_per_line(u32 hts, u32 f_index)
+{
+ u64 ppl = hts * to_pixel_rate(f_index);
+
+ do_div(ppl, OV2740_SCLK);
+
+ return ppl;
+}
+
+static int ov2740_read_reg(struct ov2740 *ov2740, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret = 0;
+
+ if (len > sizeof(data_buf))
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[sizeof(data_buf) - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return ret < 0 ? ret : -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int ov2740_write_reg(struct ov2740 *ov2740, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
+ u8 buf[6];
+ int ret = 0;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int ov2740_write_reg_list(struct ov2740 *ov2740,
+ const struct ov2740_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = ov2740_write_reg(ov2740, r_list->regs[i].address, 1,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "write reg 0x%4.4x return err = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov2740_update_digital_gain(struct ov2740 *ov2740, u32 d_gain)
+{
+ int ret = 0;
+
+ ret = ov2740_write_reg(ov2740, OV2740_REG_MWB_R_GAIN, 2, d_gain);
+ if (ret)
+ return ret;
+
+ ret = ov2740_write_reg(ov2740, OV2740_REG_MWB_G_GAIN, 2, d_gain);
+ if (ret)
+ return ret;
+
+ return ov2740_write_reg(ov2740, OV2740_REG_MWB_B_GAIN, 2, d_gain);
+}
+
+static int ov2740_test_pattern(struct ov2740 *ov2740, u32 pattern)
+{
+ if (pattern)
+ pattern = (pattern - 1) << OV2740_TEST_PATTERN_BAR_SHIFT |
+ OV2740_TEST_PATTERN_ENABLE;
+
+ return ov2740_write_reg(ov2740, OV2740_REG_TEST_PATTERN, 1, pattern);
+}
+
+static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2740 *ov2740 = container_of(ctrl->handler,
+ struct ov2740, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = ov2740->cur_mode->height + ctrl->val -
+ OV2740_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(ov2740->exposure,
+ ov2740->exposure->minimum,
+ exposure_max, ov2740->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov2740_write_reg(ov2740, OV2740_REG_ANALOG_GAIN, 2,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov2740_update_digital_gain(ov2740, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ /* 4 least significant bits of expsoure are fractional part */
+ ret = ov2740_write_reg(ov2740, OV2740_REG_EXPOSURE, 3,
+ ctrl->val << 4);
+ break;
+
+ case V4L2_CID_VBLANK:
+ ret = ov2740_write_reg(ov2740, OV2740_REG_VTS, 2,
+ ov2740->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov2740_test_pattern(ov2740, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov2740_ctrl_ops = {
+ .s_ctrl = ov2740_set_ctrl,
+};
+
+static int ov2740_init_controls(struct ov2740 *ov2740)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ const struct ov2740_mode *cur_mode;
+ s64 exposure_max, h_blank, pixel_rate;
+ u32 vblank_min, vblank_max, vblank_default;
+ int size;
+ int ret = 0;
+
+ ctrl_hdlr = &ov2740->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &ov2740->mutex;
+ cur_mode = ov2740->cur_mode;
+ size = ARRAY_SIZE(link_freq_menu_items);
+
+ ov2740->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ size - 1, 0,
+ link_freq_menu_items);
+ if (ov2740->link_freq)
+ ov2740->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = to_pixel_rate(OV2740_LINK_FREQ_360MHZ_INDEX);
+ ov2740->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ pixel_rate, 1, pixel_rate);
+
+ vblank_min = cur_mode->vts_min - cur_mode->height;
+ vblank_max = OV2740_VTS_MAX - cur_mode->height;
+ vblank_default = cur_mode->vts_def - cur_mode->height;
+ ov2740->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_default);
+
+ h_blank = to_pixels_per_line(cur_mode->hts, cur_mode->link_freq_index);
+ h_blank -= cur_mode->width;
+ ov2740->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (ov2740->hblank)
+ ov2740->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV2740_ANAL_GAIN_MIN, OV2740_ANAL_GAIN_MAX,
+ OV2740_ANAL_GAIN_STEP, OV2740_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV2740_DGTL_GAIN_MIN, OV2740_DGTL_GAIN_MAX,
+ OV2740_DGTL_GAIN_STEP, OV2740_DGTL_GAIN_DEFAULT);
+ exposure_max = cur_mode->vts_def - OV2740_EXPOSURE_MAX_MARGIN;
+ ov2740->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV2740_EXPOSURE_MIN, exposure_max,
+ OV2740_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov2740_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov2740_test_pattern_menu) - 1,
+ 0, 0, ov2740_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov2740->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov2740_update_pad_format(const struct ov2740_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov2740_start_streaming(struct ov2740 *ov2740)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
+ const struct ov2740_reg_list *reg_list;
+ int link_freq_index;
+ int ret = 0;
+
+ link_freq_index = ov2740->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov2740_write_reg_list(ov2740, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &ov2740->cur_mode->reg_list;
+ ret = ov2740_write_reg_list(ov2740, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov2740->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1,
+ OV2740_MODE_STREAMING);
+ if (ret)
+ dev_err(&client->dev, "failed to start streaming");
+
+ return ret;
+}
+
+static void ov2740_stop_streaming(struct ov2740 *ov2740)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
+
+ if (ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1,
+ OV2740_MODE_STANDBY))
+ dev_err(&client->dev, "failed to stop streaming");
+}
+
+static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov2740 *ov2740 = to_ov2740(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (ov2740->streaming == enable)
+ return 0;
+
+ mutex_lock(&ov2740->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&ov2740->mutex);
+ return ret;
+ }
+
+ ret = ov2740_start_streaming(ov2740);
+ if (ret) {
+ enable = 0;
+ ov2740_stop_streaming(ov2740);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ ov2740_stop_streaming(ov2740);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov2740->streaming = enable;
+ mutex_unlock(&ov2740->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov2740_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2740 *ov2740 = to_ov2740(sd);
+
+ mutex_lock(&ov2740->mutex);
+ if (ov2740->streaming)
+ ov2740_stop_streaming(ov2740);
+
+ mutex_unlock(&ov2740->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused ov2740_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2740 *ov2740 = to_ov2740(sd);
+ int ret = 0;
+
+ mutex_lock(&ov2740->mutex);
+ if (!ov2740->streaming)
+ goto exit;
+
+ ret = ov2740_start_streaming(ov2740);
+ if (ret) {
+ ov2740->streaming = false;
+ ov2740_stop_streaming(ov2740);
+ }
+
+exit:
+ mutex_unlock(&ov2740->mutex);
+ return ret;
+}
+
+static int ov2740_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov2740 *ov2740 = to_ov2740(sd);
+ const struct ov2740_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&ov2740->mutex);
+ ov2740_update_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ } else {
+ ov2740->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov2740->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(ov2740->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(ov2740->vblank,
+ mode->vts_min - mode->height,
+ OV2740_VTS_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov2740->vblank, vblank_def);
+ h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
+ mode->width;
+ __v4l2_ctrl_modify_range(ov2740->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+ mutex_unlock(&ov2740->mutex);
+
+ return 0;
+}
+
+static int ov2740_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov2740 *ov2740 = to_ov2740(sd);
+
+ mutex_lock(&ov2740->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&ov2740->sd, cfg,
+ fmt->pad);
+ else
+ ov2740_update_pad_format(ov2740->cur_mode, &fmt->format);
+
+ mutex_unlock(&ov2740->mutex);
+
+ return 0;
+}
+
+static int ov2740_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov2740_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov2740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ov2740 *ov2740 = to_ov2740(sd);
+
+ mutex_lock(&ov2740->mutex);
+ ov2740_update_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ mutex_unlock(&ov2740->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov2740_video_ops = {
+ .s_stream = ov2740_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov2740_pad_ops = {
+ .set_fmt = ov2740_set_format,
+ .get_fmt = ov2740_get_format,
+ .enum_mbus_code = ov2740_enum_mbus_code,
+ .enum_frame_size = ov2740_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov2740_subdev_ops = {
+ .video = &ov2740_video_ops,
+ .pad = &ov2740_pad_ops,
+};
+
+static const struct media_entity_operations ov2740_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov2740_internal_ops = {
+ .open = ov2740_open,
+};
+
+static int ov2740_identify_module(struct ov2740 *ov2740)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
+ int ret;
+ u32 val;
+
+ ret = ov2740_read_reg(ov2740, OV2740_REG_CHIP_ID, 3, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV2740_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OV2740_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ov2740_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 mclk;
+ int ret;
+ unsigned int i, j;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (ret)
+ return ret;
+
+ if (mclk != OV2740_MCLK) {
+ dev_err(dev, "external clock %d is not supported", mclk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV2740_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov2740_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2740 *ov2740 = to_ov2740(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&ov2740->mutex);
+
+ return 0;
+}
+
+static int ov2740_probe(struct i2c_client *client)
+{
+ struct ov2740 *ov2740;
+ int ret = 0;
+
+ ret = ov2740_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ ov2740 = devm_kzalloc(&client->dev, sizeof(*ov2740), GFP_KERNEL);
+ if (!ov2740)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
+ ret = ov2740_identify_module(ov2740);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&ov2740->mutex);
+ ov2740->cur_mode = &supported_modes[0];
+ ret = ov2740_init_controls(ov2740);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov2740->sd.internal_ops = &ov2740_internal_ops;
+ ov2740->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov2740->sd.entity.ops = &ov2740_subdev_entity_ops;
+ ov2740->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov2740->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov2740->sd.entity, 1, &ov2740->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&ov2740->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov2740->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler);
+ mutex_destroy(&ov2740->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops ov2740_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov2740_suspend, ov2740_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov2740_acpi_ids[] = {
+ {"INT3474"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, ov2740_acpi_ids);
+#endif
+
+static struct i2c_driver ov2740_i2c_driver = {
+ .driver = {
+ .name = "ov2740",
+ .pm = &ov2740_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov2740_acpi_ids),
+ },
+ .probe_new = ov2740_probe,
+ .remove = ov2740_remove,
+};
+
+module_i2c_driver(ov2740_i2c_driver);
+
+MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_DESCRIPTION("OmniVision OV2740 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 854031f0b64a..2fe4a7ac0592 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -3093,8 +3093,8 @@ static int ov5640_probe(struct i2c_client *client)
free_ctrls:
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
entity_cleanup:
- mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
+ mutex_destroy(&sensor->lock);
return ret;
}
@@ -3104,9 +3104,9 @@ static int ov5640_remove(struct i2c_client *client)
struct ov5640_dev *sensor = to_ov5640_dev(sd);
v4l2_async_unregister_subdev(&sensor->sd);
- mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ mutex_destroy(&sensor->lock);
return 0;
}
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 041fcbb4eebd..f26252e35e08 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#define OV5670_REG_CHIP_ID 0x300a
#define OV5670_CHIP_ID 0x005670
@@ -2059,6 +2060,8 @@ static const struct v4l2_ctrl_ops ov5670_ctrl_ops = {
/* Initialize control handlers */
static int ov5670_init_controls(struct ov5670 *ov5670)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
+ struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 vblank_max;
s64 vblank_def;
@@ -2067,7 +2070,7 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
int ret;
ctrl_hdlr = &ov5670->ctrl_handler;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
if (ret)
return ret;
@@ -2129,6 +2132,15 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
goto error;
}
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov5670_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
+
ov5670->sd.ctrl_handler = ctrl_hdlr;
return 0;
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index 8655842af275..4ca27675cc5a 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -3,10 +3,13 @@
#include <asm/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -18,7 +21,7 @@
#define OV8856_LINK_FREQ_360MHZ 360000000ULL
#define OV8856_LINK_FREQ_180MHZ 180000000ULL
#define OV8856_SCLK 144000000ULL
-#define OV8856_MCLK 19200000
+#define OV8856_XVCLK_19_2 19200000
#define OV8856_DATA_LANES 4
#define OV8856_RGB_DEPTH 10
@@ -29,6 +32,19 @@
#define OV8856_MODE_STANDBY 0x00
#define OV8856_MODE_STREAMING 0x01
+/* module revisions */
+#define OV8856_2A_MODULE 0x01
+#define OV8856_1B_MODULE 0x02
+
+/* the OTP read-out buffer is at 0x7000 and 0xf is the offset
+ * of the byte in the OTP that means the module revision
+ */
+#define OV8856_MODULE_REVISION 0x700f
+#define OV8856_OTP_MODE_CTRL 0x3d84
+#define OV8856_OTP_LOAD_CTRL 0x3d81
+#define OV8856_OTP_MODE_AUTO 0x00
+#define OV8856_OTP_LOAD_CTRL_ENABLE BIT(0)
+
/* vertical-timings from sensor */
#define OV8856_REG_VTS 0x380e
#define OV8856_VTS_MAX 0x7fff
@@ -64,6 +80,12 @@
#define to_ov8856(_sd) container_of(_sd, struct ov8856, sd)
+static const char * const ov8856_supply_names[] = {
+ "dovdd", /* Digital I/O power */
+ "avdd", /* Analog power */
+ "dvdd", /* Digital core power */
+};
+
enum {
OV8856_LINK_FREQ_720MBPS,
OV8856_LINK_FREQ_360MBPS,
@@ -566,6 +588,10 @@ struct ov8856 {
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
+ struct clk *xvclk;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov8856_supply_names)];
+
/* V4L2 Controls */
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *pixel_rate;
@@ -908,6 +934,57 @@ static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
+static int __ov8856_power_on(struct ov8856 *ov8856)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+ int ret;
+
+ if (is_acpi_node(dev_fwnode(&client->dev)))
+ return 0;
+
+ ret = clk_prepare_enable(ov8856->xvclk);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to enable xvclk\n");
+ return ret;
+ }
+
+ if (ov8856->reset_gpio) {
+ gpiod_set_value_cansleep(ov8856->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov8856_supply_names),
+ ov8856->supplies);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to enable regulators\n");
+ goto disable_clk;
+ }
+
+ gpiod_set_value_cansleep(ov8856->reset_gpio, 0);
+ usleep_range(1500, 1800);
+
+ return 0;
+
+disable_clk:
+ gpiod_set_value_cansleep(ov8856->reset_gpio, 1);
+ clk_disable_unprepare(ov8856->xvclk);
+
+ return ret;
+}
+
+static void __ov8856_power_off(struct ov8856 *ov8856)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+
+ if (is_acpi_node(dev_fwnode(&client->dev)))
+ return;
+
+ gpiod_set_value_cansleep(ov8856->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ov8856_supply_names),
+ ov8856->supplies);
+ clk_disable_unprepare(ov8856->xvclk);
+}
+
static int __maybe_unused ov8856_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -918,6 +995,7 @@ static int __maybe_unused ov8856_suspend(struct device *dev)
if (ov8856->streaming)
ov8856_stop_streaming(ov8856);
+ __ov8856_power_off(ov8856);
mutex_unlock(&ov8856->mutex);
return 0;
@@ -931,6 +1009,8 @@ static int __maybe_unused ov8856_resume(struct device *dev)
int ret;
mutex_lock(&ov8856->mutex);
+
+ __ov8856_power_on(ov8856);
if (ov8856->streaming) {
ret = ov8856_start_streaming(ov8856);
if (ret) {
@@ -1089,32 +1169,96 @@ static int ov8856_identify_module(struct ov8856 *ov8856)
return -ENXIO;
}
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+ OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
+ if (ret)
+ return ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_OTP_MODE_CTRL,
+ OV8856_REG_VALUE_08BIT, OV8856_OTP_MODE_AUTO);
+ if (ret) {
+ dev_err(&client->dev, "failed to set otp mode");
+ return ret;
+ }
+
+ ret = ov8856_write_reg(ov8856, OV8856_OTP_LOAD_CTRL,
+ OV8856_REG_VALUE_08BIT,
+ OV8856_OTP_LOAD_CTRL_ENABLE);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable load control");
+ return ret;
+ }
+
+ ret = ov8856_read_reg(ov8856, OV8856_MODULE_REVISION,
+ OV8856_REG_VALUE_08BIT, &val);
+ if (ret) {
+ dev_err(&client->dev, "failed to read module revision");
+ return ret;
+ }
+
+ dev_info(&client->dev, "OV8856 revision %x (%s) at address 0x%02x\n",
+ val,
+ val == OV8856_2A_MODULE ? "2A" :
+ val == OV8856_1B_MODULE ? "1B" : "unknown revision",
+ client->addr);
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+ OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY);
+ if (ret) {
+ dev_err(&client->dev, "failed to exit streaming mode");
+ return ret;
+ }
+
return 0;
}
-static int ov8856_check_hwcfg(struct device *dev)
+static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
{
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
+ u32 xvclk_rate;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &xvclk_rate);
if (ret)
return ret;
- if (mclk != OV8856_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
+ if (!is_acpi_node(fwnode)) {
+ ov8856->xvclk = devm_clk_get(dev, "xvclk");
+ if (IS_ERR(ov8856->xvclk)) {
+ dev_err(dev, "could not get xvclk clock (%pe)\n",
+ ov8856->xvclk);
+ return PTR_ERR(ov8856->xvclk);
+ }
+
+ clk_set_rate(ov8856->xvclk, xvclk_rate);
+ xvclk_rate = clk_get_rate(ov8856->xvclk);
}
+ if (xvclk_rate != OV8856_XVCLK_19_2)
+ dev_warn(dev, "external clock rate %u is unsupported",
+ xvclk_rate);
+
+ ov8856->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ov8856->reset_gpio))
+ return PTR_ERR(ov8856->reset_gpio);
+
+ for (i = 0; i < ARRAY_SIZE(ov8856_supply_names); i++)
+ ov8856->supplies[i].supply = ov8856_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ov8856_supply_names),
+ ov8856->supplies);
+ if (ret)
+ return ret;
+
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -1169,6 +1313,8 @@ static int ov8856_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
mutex_destroy(&ov8856->mutex);
+ __ov8856_power_off(ov8856);
+
return 0;
}
@@ -1177,22 +1323,29 @@ static int ov8856_probe(struct i2c_client *client)
struct ov8856 *ov8856;
int ret;
- ret = ov8856_check_hwcfg(&client->dev);
+ ov8856 = devm_kzalloc(&client->dev, sizeof(*ov8856), GFP_KERNEL);
+ if (!ov8856)
+ return -ENOMEM;
+
+ ret = ov8856_get_hwcfg(ov8856, &client->dev);
if (ret) {
- dev_err(&client->dev, "failed to check HW configuration: %d",
+ dev_err(&client->dev, "failed to get HW configuration: %d",
ret);
return ret;
}
- ov8856 = devm_kzalloc(&client->dev, sizeof(*ov8856), GFP_KERNEL);
- if (!ov8856)
- return -ENOMEM;
-
v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
+
+ ret = __ov8856_power_on(ov8856);
+ if (ret) {
+ dev_err(&client->dev, "failed to power on\n");
+ return ret;
+ }
+
ret = ov8856_identify_module(ov8856);
if (ret) {
dev_err(&client->dev, "failed to find sensor: %d", ret);
- return ret;
+ goto probe_power_off;
}
mutex_init(&ov8856->mutex);
@@ -1238,6 +1391,9 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov8856->sd.ctrl_handler);
mutex_destroy(&ov8856->mutex);
+probe_power_off:
+ __ov8856_power_off(ov8856);
+
return ret;
}
@@ -1254,11 +1410,18 @@ static const struct acpi_device_id ov8856_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, ov8856_acpi_ids);
#endif
+static const struct of_device_id ov8856_of_match[] = {
+ { .compatible = "ovti,ov8856" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov8856_of_match);
+
static struct i2c_driver ov8856_i2c_driver = {
.driver = {
.name = "ov8856",
.pm = &ov8856_pm_ops,
.acpi_match_table = ACPI_PTR(ov8856_acpi_ids),
+ .of_match_table = ov8856_of_match,
},
.probe_new = ov8856_probe,
.remove = ov8856_remove,
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index cdfe008ba39f..42584a088273 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -281,7 +281,7 @@ struct s5k5baf_fw {
u16 id;
u16 offset;
} seq[0];
- u16 data[0];
+ u16 data[];
};
struct s5k5baf {
diff --git a/drivers/media/i2c/smiapp/Kconfig b/drivers/media/i2c/smiapp/Kconfig
index fcaa7f9494a8..6893b532824f 100644
--- a/drivers/media/i2c/smiapp/Kconfig
+++ b/drivers/media/i2c/smiapp/Kconfig
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_SMIAPP
tristate "SMIA++/SMIA sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
- depends on MEDIA_CAMERA_SUPPORT
+ depends on I2C && VIDEO_V4L2 && HAVE_CLK
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEO_SMIAPP_PLL
select V4L2_FWNODE
help
diff --git a/drivers/media/mc/Kconfig b/drivers/media/mc/Kconfig
index 3b9795cfcb36..4815b9dde9af 100644
--- a/drivers/media/mc/Kconfig
+++ b/drivers/media/mc/Kconfig
@@ -1,17 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
#
# Media controller
-# Selectable only for webcam/grabbers, as other drivers don't use it
#
-config MEDIA_CONTROLLER
- bool "Media Controller API"
- depends on MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT
- help
- Enable the media controller API used to query media devices internal
- topology and configure it dynamically.
-
- This API is mostly used by camera interfaces in embedded platforms.
-
config MEDIA_CONTROLLER_DVB
bool "Enable Media controller for DVB (EXPERIMENTAL)"
depends on MEDIA_CONTROLLER && DVB_CORE
@@ -21,8 +13,8 @@ config MEDIA_CONTROLLER_DVB
This is currently experimental.
config MEDIA_CONTROLLER_REQUEST_API
- bool "Enable Media controller Request API (EXPERIMENTAL)"
- depends on MEDIA_CONTROLLER && STAGING_MEDIA
+ bool
+ depends on MEDIA_CONTROLLER
help
DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
@@ -31,3 +23,6 @@ config MEDIA_CONTROLLER_REQUEST_API
There is currently no intention to provide API or ABI stability for
this new API as of yet.
+
+comment "Please notice that the enabled Media controller Request API is EXPERIMENTAL"
+ depends on MEDIA_CONTROLLER_REQUEST_API
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 211279c5fd77..12b45e669bcc 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -386,7 +386,7 @@ int media_entity_get_fwnode_pad(struct media_entity *entity,
if (ret)
return ret;
- ret = entity->ops->get_fwnode_pad(&endpoint);
+ ret = entity->ops->get_fwnode_pad(entity, &endpoint);
if (ret < 0)
return ret;
diff --git a/drivers/media/mmc/Kconfig b/drivers/media/mmc/Kconfig
index de0528c6994a..75aa6de08d53 100644
--- a/drivers/media/mmc/Kconfig
+++ b/drivers/media/mmc/Kconfig
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-comment "Supported MMC/SDIO adapters"
source "drivers/media/mmc/siano/Kconfig"
diff --git a/drivers/media/mmc/siano/Kconfig b/drivers/media/mmc/siano/Kconfig
index 1919f6fea8b1..570696019a9e 100644
--- a/drivers/media/mmc/siano/Kconfig
+++ b/drivers/media/mmc/siano/Kconfig
@@ -2,6 +2,8 @@
#
# Siano Mobile Silicon Digital TV device configuration
#
+comment "MMC/SDIO DVB adapters"
+ depends on DVB_CORE && HAS_DMA && MMC
config SMS_SDIO_DRV
tristate "Siano SMS1xxx based MDTV via SDIO interface"
diff --git a/drivers/media/mmc/siano/smssdio.c b/drivers/media/mmc/siano/smssdio.c
index def5e93849d2..065b572e0272 100644
--- a/drivers/media/mmc/siano/smssdio.c
+++ b/drivers/media/mmc/siano/smssdio.c
@@ -58,15 +58,15 @@ static const struct sdio_device_id smssdio_ids[] = {
.driver_data = SMS1XXX_BOARD_SIANO_VEGA},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VENICE),
.driver_data = SMS1XXX_BOARD_SIANO_VEGA},
- {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, 0x302),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_MING),
.driver_data = SMS1XXX_BOARD_SIANO_MING},
- {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, 0x500),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_PELE),
.driver_data = SMS1XXX_BOARD_SIANO_PELE},
- {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, 0x600),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_RIO),
.driver_data = SMS1XXX_BOARD_SIANO_RIO},
- {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, 0x700),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_DENVER_2160),
.driver_data = SMS1XXX_BOARD_SIANO_DENVER_2160},
- {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, 0x800),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_DENVER_1530),
.driver_data = SMS1XXX_BOARD_SIANO_DENVER_1530},
{ /* end: all zeroes */ },
};
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index dcb3719f440e..2cd8e328dda9 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-if PCI && MEDIA_SUPPORT
+
+if PCI
menuconfig MEDIA_PCI_SUPPORT
bool "Media PCI Adapters"
@@ -56,5 +57,16 @@ endif
source "drivers/media/pci/intel/ipu3/Kconfig"
+config VIDEO_PCI_SKELETON
+ tristate "Skeleton PCI V4L2 driver"
+ depends on SAMPLES
+ depends on MEDIA_TEST_SUPPORT
+ depends on PCI && VIDEO_V4L2
+ select VIDEOBUF2_MEMOPS
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Enable build of the skeleton PCI driver, used as a reference
+ when developing new drivers.
+
endif #MEDIA_PCI_SUPPORT
endif #PCI
diff --git a/drivers/media/pci/bt8xx/Kconfig b/drivers/media/pci/bt8xx/Kconfig
index 75d172a6f54c..3f56decbb681 100644
--- a/drivers/media/pci/bt8xx/Kconfig
+++ b/drivers/media/pci/bt8xx/Kconfig
@@ -17,7 +17,7 @@ config VIDEO_BT848
help
Support for BT848 based frame grabber/overlay boards. This includes
the Miro, Hauppauge and STB boards. Please read the material in
- <file:Documentation/media/v4l-drivers/bttv.rst> for more information.
+ <file:Documentation/admin-guide/media/bttv.rst> for more information.
To compile this driver as a module, choose M here: the
module will be called bttv.
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index e0e7df460a92..d8d9ea6b09bc 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -1,11 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_COBALT
tristate "Cisco Cobalt support"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
depends on (GPIOLIB && DRM_I2C_ADV7511=n) || COMPILE_TEST
depends on SND
depends on MTD
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select I2C_ALGOBIT
select SND_PCM
select VIDEO_ADV7604
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index fa57e12f2ac8..4864def20676 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -466,14 +466,24 @@ static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
static const struct v4l2_fmtdesc formats[] = {
- { 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
- "HM12 (YUV 4:1:1)", V4L2_PIX_FMT_HM12, { 0, 0, 0, 0 }
+ {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "HM12 (YUV 4:1:1)",
+ .pixelformat = V4L2_PIX_FMT_HM12,
},
- { 1, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FMT_FLAG_COMPRESSED,
- "MPEG", V4L2_PIX_FMT_MPEG, { 0, 0, 0, 0 }
+ {
+ .index = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .description = "MPEG",
+ .pixelformat = V4L2_PIX_FMT_MPEG,
},
- { 2, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
- "UYVY 4:2:2", V4L2_PIX_FMT_UYVY, { 0, 0, 0, 0 }
+ {
+ .index = 2,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "UYVY 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
},
};
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 3178df3c4922..c41bae118415 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -845,7 +845,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
/*
* Audio related reset according to
- * Documentation/media/v4l-drivers/cx2341x.rst
+ * Documentation/driver-api/media/drivers/cx2341x-devel.rst
*/
if (atomic_read(&cx->ana_capturing) == 0)
cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2,
@@ -853,7 +853,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
/*
* Number of lines for Field 1 & Field 2 according to
- * Documentation/media/v4l-drivers/cx2341x.rst
+ * Documentation/driver-api/media/drivers/cx2341x-devel.rst
* Field 1 is 312 for 625 line systems in BT.656
* Field 2 is 313 for 625 line systems in BT.656
*/
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 8e5a2c580821..570a4a09c387 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -703,8 +703,19 @@ struct cx23885_board cx23885_boards[] = {
},
[CX23885_BOARD_HAUPPAUGE_HVR5525] = {
.name = "Hauppauge WinTV-HVR5525",
+ .porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ } },
},
[CX23885_BOARD_VIEWCAST_260E] = {
.name = "ViewCast 260e",
@@ -757,32 +768,61 @@ struct cx23885_board cx23885_boards[] = {
} },
},
[CX23885_BOARD_HAUPPAUGE_QUADHD_DVB] = {
- .name = "Hauppauge WinTV-QuadHD-DVB",
+ .name = "Hauppauge WinTV-QuadHD-DVB",
+ .porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ } },
},
[CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885] = {
- .name = "Hauppauge WinTV-QuadHD-DVB(885)",
+ .name = "Hauppauge WinTV-QuadHD-DVB(885)",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
},
[CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC] = {
- .name = "Hauppauge WinTV-QuadHD-ATSC",
+ .name = "Hauppauge WinTV-QuadHD-ATSC",
+ .porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ } },
},
[CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885] = {
- .name = "Hauppauge WinTV-QuadHD-ATSC(885)",
+ .name = "Hauppauge WinTV-QuadHD-ATSC(885)",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
},
[CX23885_BOARD_HAUPPAUGE_HVR1265_K4] = {
.name = "Hauppauge WinTV-HVR-1265(161111)",
.porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_ABSENT,
- .force_bff = 1,
.input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
@@ -2350,6 +2390,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 494751a067a3..45c2f4afceb8 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -2314,6 +2314,12 @@ static int dvb_register(struct cx23885_tsport *port)
goto frontend_detach;
}
port->i2c_client_tuner = client_tuner;
+
+ dev->ts1.analog_fe.tuner_priv = client_tuner;
+ memcpy(&dev->ts1.analog_fe.ops.tuner_ops,
+ &fe0->dvb.frontend->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
break;
}
break;
@@ -2367,6 +2373,16 @@ static int dvb_register(struct cx23885_tsport *port)
goto frontend_detach;
}
port->i2c_client_tuner = client_tuner;
+
+ /* we only attach tuner for analog on the 888 version */
+ if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB) {
+ pr_info("%s(): QUADHD_DVB analog setup\n",
+ __func__);
+ dev->ts1.analog_fe.tuner_priv = client_tuner;
+ memcpy(&dev->ts1.analog_fe.ops.tuner_ops,
+ &fe0->dvb.frontend->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ }
break;
/* port c - terrestrial/cable */
@@ -2456,6 +2472,16 @@ static int dvb_register(struct cx23885_tsport *port)
goto frontend_detach;
}
port->i2c_client_tuner = client_tuner;
+
+ /* we only attach tuner for analog on the 888 version */
+ if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC) {
+ pr_info("%s(): QUADHD_ATSC analog setup\n",
+ __func__);
+ dev->ts1.analog_fe.tuner_priv = client_tuner;
+ memcpy(&dev->ts1.analog_fe.ops.tuner_ops,
+ &fe0->dvb.frontend->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ }
break;
/* port c - terrestrial/cable */
@@ -2527,6 +2553,11 @@ static int dvb_register(struct cx23885_tsport *port)
goto frontend_detach;
}
port->i2c_client_tuner = client_tuner;
+
+ dev->ts1.analog_fe.tuner_priv = client_tuner;
+ memcpy(&dev->ts1.analog_fe.ops.tuner_ops,
+ &fe0->dvb.frontend->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
break;
}
break;
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 000c108b94fd..440d108b7ddd 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -253,7 +253,10 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1265_K4) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR5525) ||
(dev->board == CX23885_BOARD_MYGICA_X8507) ||
(dev->board == CX23885_BOARD_AVERMEDIA_HC81R) ||
(dev->board == CX23885_BOARD_VIEWCAST_260E) ||
@@ -636,8 +639,18 @@ static int vidioc_querycap(struct file *file, void *priv,
V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE |
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE |
V4L2_CAP_DEVICE_CAPS;
- if (dev->tuner_type != TUNER_ABSENT)
+ switch (dev->board) { /* i2c device tuners */
+ case CX23885_BOARD_HAUPPAUGE_HVR1265_K4:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
cap->capabilities |= V4L2_CAP_TUNER;
+ break;
+ default:
+ if (dev->tuner_type != TUNER_ABSENT)
+ cap->capabilities |= V4L2_CAP_TUNER;
+ break;
+ }
return 0;
}
@@ -883,8 +896,17 @@ static int vidioc_g_tuner(struct file *file, void *priv,
{
struct cx23885_dev *dev = video_drvdata(file);
- if (dev->tuner_type == TUNER_ABSENT)
- return -EINVAL;
+ switch (dev->board) { /* i2c device tuners */
+ case CX23885_BOARD_HAUPPAUGE_HVR1265_K4:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
+ break;
+ default:
+ if (dev->tuner_type == TUNER_ABSENT)
+ return -EINVAL;
+ break;
+ }
if (0 != t->index)
return -EINVAL;
@@ -899,8 +921,17 @@ static int vidioc_s_tuner(struct file *file, void *priv,
{
struct cx23885_dev *dev = video_drvdata(file);
- if (dev->tuner_type == TUNER_ABSENT)
- return -EINVAL;
+ switch (dev->board) { /* i2c device tuners */
+ case CX23885_BOARD_HAUPPAUGE_HVR1265_K4:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
+ break;
+ default:
+ if (dev->tuner_type == TUNER_ABSENT)
+ return -EINVAL;
+ break;
+ }
if (0 != t->index)
return -EINVAL;
/* Update the A/V core */
@@ -914,9 +945,17 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct cx23885_dev *dev = video_drvdata(file);
- if (dev->tuner_type == TUNER_ABSENT)
- return -EINVAL;
-
+ switch (dev->board) { /* i2c device tuners */
+ case CX23885_BOARD_HAUPPAUGE_HVR1265_K4:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
+ break;
+ default:
+ if (dev->tuner_type == TUNER_ABSENT)
+ return -EINVAL;
+ break;
+ }
f->type = V4L2_TUNER_ANALOG_TV;
f->frequency = dev->freq;
@@ -930,8 +969,17 @@ static int cx23885_set_freq(struct cx23885_dev *dev, const struct v4l2_frequency
struct v4l2_ctrl *mute;
int old_mute_val = 1;
- if (dev->tuner_type == TUNER_ABSENT)
- return -EINVAL;
+ switch (dev->board) { /* i2c device tuners */
+ case CX23885_BOARD_HAUPPAUGE_HVR1265_K4:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
+ break;
+ default:
+ if (dev->tuner_type == TUNER_ABSENT)
+ return -EINVAL;
+ break;
+ }
if (unlikely(f->tuner != 0))
return -EINVAL;
@@ -996,7 +1044,10 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
- (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1265_K4))
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1265_K4) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR5525) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC))
fe = &dev->ts1.analog_fe;
if (fe && fe->ops.tuner_ops.set_analog_params) {
@@ -1027,6 +1078,9 @@ int cx23885_set_frequency(struct file *file, void *priv,
case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1265_K4:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
ret = cx23885_set_freq_via_ops(dev, f);
break;
default:
@@ -1302,8 +1356,18 @@ int cx23885_video_register(struct cx23885_dev *dev)
dev->video_dev->queue = &dev->vb2_vidq;
dev->video_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE;
- if (dev->tuner_type != TUNER_ABSENT)
+ switch (dev->board) { /* i2c device tuners */
+ case CX23885_BOARD_HAUPPAUGE_HVR1265_K4:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
dev->video_dev->device_caps |= V4L2_CAP_TUNER;
+ break;
+ default:
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->video_dev->device_caps |= V4L2_CAP_TUNER;
+ }
+
err = video_register_device(dev->video_dev, VFL_TYPE_VIDEO,
video_nr[dev->nr]);
if (err < 0) {
@@ -1320,8 +1384,17 @@ int cx23885_video_register(struct cx23885_dev *dev)
dev->vbi_dev->queue = &dev->vb2_vbiq;
dev->vbi_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE;
- if (dev->tuner_type != TUNER_ABSENT)
+ switch (dev->board) { /* i2c device tuners */
+ case CX23885_BOARD_HAUPPAUGE_HVR1265_K4:
+ case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
dev->vbi_dev->device_caps |= V4L2_CAP_TUNER;
+ break;
+ default:
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->vbi_dev->device_caps |= V4L2_CAP_TUNER;
+ }
err = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0) {
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index dcadf78657d6..48c8a3429542 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -1070,8 +1070,7 @@ void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
mutex_lock(&devlist);
cx88_ir_fini(core);
if (core->i2c_rc == 0) {
- if (core->i2c_rtc)
- i2c_unregister_device(core->i2c_rtc);
+ i2c_unregister_device(core->i2c_rtc);
i2c_del_adapter(&core->i2c_adap);
}
list_del(&core->devlist);
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index c7c2acd55266..7e0fed9cd200 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -638,7 +638,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
I2C_SMBUS_READ, 0,
I2C_SMBUS_QUICK, NULL) >= 0) {
info.addr = *addrp;
- i2c_new_device(&core->i2c_adap, &info);
+ i2c_new_client_device(&core->i2c_adap, &info);
break;
}
}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 6aabc45aa93c..ba0e9660a047 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1385,7 +1385,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
};
request_module("rtc-isl1208");
- core->i2c_rtc = i2c_new_device(&core->i2c_adap, &rtc_info);
+ core->i2c_rtc = i2c_new_client_device(&core->i2c_adap, &rtc_info);
}
/* fall-through */
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
diff --git a/drivers/media/pci/ddbridge/Kconfig b/drivers/media/pci/ddbridge/Kconfig
index dab34fb85c09..169efd558e45 100644
--- a/drivers/media/pci/ddbridge/Kconfig
+++ b/drivers/media/pci/ddbridge/Kconfig
@@ -15,7 +15,6 @@ config DVB_DDBRIDGE
select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
select DVB_MXL5XX if MEDIA_SUBDRV_AUTOSELECT
select DVB_CXD2099 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_DUMMY_FE if MEDIA_SUBDRV_AUTOSELECT
help
Support for cards with the Digital Devices PCI express bridge:
- Octopus PCIe Bridge
diff --git a/drivers/media/pci/ddbridge/Makefile b/drivers/media/pci/ddbridge/Makefile
index 2b77c8d0eb2e..5e7eab81173b 100644
--- a/drivers/media/pci/ddbridge/Makefile
+++ b/drivers/media/pci/ddbridge/Makefile
@@ -7,7 +7,7 @@ ddbridge-objs := ddbridge-main.o ddbridge-core.o ddbridge-ci.o \
ddbridge-hw.o ddbridge-i2c.o ddbridge-max.o ddbridge-mci.o \
ddbridge-sx8.o
-obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o
+obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o ddbridge-dummy-fe.o
ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
ccflags-y += -I $(srctree)/drivers/media/tuners/
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 7a2d19682fe3..7cabb9e9ffe2 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -50,7 +50,7 @@
#include "stv6111.h"
#include "lnbh25.h"
#include "cxd2099.h"
-#include "dvb_dummy_fe.h"
+#include "ddbridge-dummy-fe.h"
/****************************************************************************/
@@ -1265,7 +1265,7 @@ static int demod_attach_dummy(struct ddb_input *input)
struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
struct device *dev = input->port->dev->dev;
- dvb->fe = dvb_attach(dvb_dummy_fe_qam_attach);
+ dvb->fe = dvb_attach(ddbridge_dummy_fe_qam_attach);
if (!dvb->fe) {
dev_err(dev, "QAM dummy attach failed!\n");
return -ENODEV;
diff --git a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
new file mode 100644
index 000000000000..6868a0c4fc82
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Dummy Frontend
+ *
+ * Written by Emard <emard@softhome.net>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <media/dvb_frontend.h>
+#include "ddbridge-dummy-fe.h"
+
+struct ddbridge_dummy_fe_state {
+ struct dvb_frontend frontend;
+};
+
+static int ddbridge_dummy_fe_read_status(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ *status = FE_HAS_SIGNAL
+ | FE_HAS_CARRIER
+ | FE_HAS_VITERBI
+ | FE_HAS_SYNC
+ | FE_HAS_LOCK;
+
+ return 0;
+}
+
+static int ddbridge_dummy_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ *ber = 0;
+ return 0;
+}
+
+static int ddbridge_dummy_fe_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ *strength = 0;
+ return 0;
+}
+
+static int ddbridge_dummy_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ *snr = 0;
+ return 0;
+}
+
+static int ddbridge_dummy_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ *ucblocks = 0;
+ return 0;
+}
+
+/*
+ * Should only be implemented if it actually reads something from the hardware.
+ * Also, it should check for the locks, in order to avoid report wrong data
+ * to userspace.
+ */
+static int ddbridge_dummy_fe_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
+{
+ return 0;
+}
+
+static int ddbridge_dummy_fe_set_frontend(struct dvb_frontend *fe)
+{
+ if (fe->ops.tuner_ops.set_params) {
+ fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ return 0;
+}
+
+static int ddbridge_dummy_fe_sleep(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int ddbridge_dummy_fe_init(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static void ddbridge_dummy_fe_release(struct dvb_frontend *fe)
+{
+ struct ddbridge_dummy_fe_state *state = fe->demodulator_priv;
+
+ kfree(state);
+}
+
+static const struct dvb_frontend_ops ddbridge_dummy_fe_qam_ops;
+
+struct dvb_frontend *ddbridge_dummy_fe_qam_attach(void)
+{
+ struct ddbridge_dummy_fe_state *state = NULL;
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct ddbridge_dummy_fe_state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops,
+ &ddbridge_dummy_fe_qam_ops,
+ sizeof(struct dvb_frontend_ops));
+
+ state->frontend.demodulator_priv = state;
+ return &state->frontend;
+}
+EXPORT_SYMBOL(ddbridge_dummy_fe_qam_attach);
+
+static const struct dvb_frontend_ops ddbridge_dummy_fe_qam_ops = {
+ .delsys = { SYS_DVBC_ANNEX_A },
+ .info = {
+ .name = "ddbridge dummy DVB-C",
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
+ /* symbol_rate_min: SACLK/64 == (XIN/2)/64 */
+ .symbol_rate_min = (57840000 / 2) / 64,
+ .symbol_rate_max = (57840000 / 2) / 4, /* SACLK/4 */
+ .caps = FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_INVERSION_AUTO
+ },
+
+ .release = ddbridge_dummy_fe_release,
+
+ .init = ddbridge_dummy_fe_init,
+ .sleep = ddbridge_dummy_fe_sleep,
+
+ .set_frontend = ddbridge_dummy_fe_set_frontend,
+ .get_frontend = ddbridge_dummy_fe_get_frontend,
+
+ .read_status = ddbridge_dummy_fe_read_status,
+ .read_ber = ddbridge_dummy_fe_read_ber,
+ .read_signal_strength = ddbridge_dummy_fe_read_signal_strength,
+ .read_snr = ddbridge_dummy_fe_read_snr,
+ .read_ucblocks = ddbridge_dummy_fe_read_ucblocks,
+};
+
+MODULE_DESCRIPTION("ddbridge dummy Frontend");
+MODULE_AUTHOR("Emard");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.h b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.h
new file mode 100644
index 000000000000..ddf189c09524
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Dummy Frontend
+ *
+ * Written by Emard <emard@softhome.net>
+ */
+
+#ifndef DDBRIDGE_DUMMY_FE_H
+#define DDBRIDGE_DUMMY_FE_H
+
+#include <linux/dvb/frontend.h>
+#include <media/dvb_frontend.h>
+
+struct dvb_frontend *ddbridge_dummy_fe_qam_attach(void);
+
+#endif // DDBRIDGE_DUMMY_FE_H
diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
index f35bba16b60e..82d7f17e6a02 100644
--- a/drivers/media/pci/intel/ipu3/Kconfig
+++ b/drivers/media/pci/intel/ipu3/Kconfig
@@ -2,9 +2,9 @@
config VIDEO_IPU3_CIO2
tristate "Intel ipu3-cio2 driver"
depends on VIDEO_V4L2 && PCI
- depends on VIDEO_V4L2_SUBDEV_API
depends on (X86 && ACPI) || COMPILE_TEST
- depends on MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
select VIDEOBUF2_DMA_SG
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 137853944e46..35dccb31174c 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -920,14 +920,15 @@ static int ivtv_g_selection(struct file *file, void *fh,
static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
static const struct v4l2_fmtdesc hm12 = {
- 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
- "HM12 (YUV 4:2:0)", V4L2_PIX_FMT_HM12,
- { 0, 0, 0, 0 }
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "HM12 (YUV 4:2:0)",
+ .pixelformat = V4L2_PIX_FMT_HM12,
};
static const struct v4l2_fmtdesc mpeg = {
- 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FMT_FLAG_COMPRESSED,
- "MPEG", V4L2_PIX_FMT_MPEG,
- { 0, 0, 0, 0 }
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .description = "MPEG",
+ .pixelformat = V4L2_PIX_FMT_MPEG,
};
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
@@ -946,14 +947,15 @@ static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
static const struct v4l2_fmtdesc hm12 = {
- 0, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0,
- "HM12 (YUV 4:2:0)", V4L2_PIX_FMT_HM12,
- { 0, 0, 0, 0 }
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ .description = "HM12 (YUV 4:2:0)",
+ .pixelformat = V4L2_PIX_FMT_HM12,
};
static const struct v4l2_fmtdesc mpeg = {
- 0, V4L2_BUF_TYPE_VIDEO_OUTPUT, V4L2_FMT_FLAG_COMPRESSED,
- "MPEG", V4L2_PIX_FMT_MPEG,
- { 0, 0, 0, 0 }
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .description = "MPEG",
+ .pixelformat = V4L2_PIX_FMT_MPEG,
};
struct ivtv *itv = fh2id(fh)->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 5f8883031c9c..0d8372cc364a 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -92,7 +92,7 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
{
struct ivtv_dma_page_info user_dma;
struct ivtv_user_dma *dma = &itv->udma;
- int i, err;
+ int err;
IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
@@ -111,16 +111,15 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
return -EINVAL;
}
- /* Get user pages for DMA Xfer */
- err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
+ /* Pin user pages for DMA Xfer */
+ err = pin_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
dma->map, FOLL_FORCE);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
err, user_dma.page_count);
if (err >= 0) {
- for (i = 0; i < err; i++)
- put_page(dma->map[i]);
+ unpin_user_pages(dma->map, err);
return -EINVAL;
}
return err;
@@ -130,9 +129,7 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
/* Fill SG List with new values */
if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
- for (i = 0; i < dma->page_count; i++) {
- put_page(dma->map[i]);
- }
+ unpin_user_pages(dma->map, dma->page_count);
dma->page_count = 0;
return -ENOMEM;
}
@@ -153,7 +150,6 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
void ivtv_udma_unmap(struct ivtv *itv)
{
struct ivtv_user_dma *dma = &itv->udma;
- int i;
IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
@@ -169,10 +165,7 @@ void ivtv_udma_unmap(struct ivtv *itv)
/* sync DMA */
ivtv_udma_sync_for_cpu(itv);
- /* Release User Pages */
- for (i = 0; i < dma->page_count; i++) {
- put_page(dma->map[i]);
- }
+ unpin_user_pages(dma->map, dma->page_count);
dma->page_count = 0;
}
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index cd2fe2d444c0..5f7dc9771f8d 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -30,7 +30,6 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
struct yuv_playback_info *yi = &itv->yuv_info;
u8 frame = yi->draw_frame;
struct yuv_frame_info *f = &yi->new_frame_info[frame];
- int i;
int y_pages, uv_pages;
unsigned long y_buffer_offset, uv_buffer_offset;
int y_decode_height, uv_decode_height, y_size;
@@ -62,12 +61,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
- /* Get user pages for DMA Xfer */
- y_pages = get_user_pages_unlocked(y_dma.uaddr,
+ /* Pin user pages for DMA Xfer */
+ y_pages = pin_user_pages_unlocked(y_dma.uaddr,
y_dma.page_count, &dma->map[0], FOLL_FORCE);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
- uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
+ uv_pages = pin_user_pages_unlocked(uv_dma.uaddr,
uv_dma.page_count, &dma->map[y_pages],
FOLL_FORCE);
}
@@ -81,8 +80,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
uv_pages, uv_dma.page_count);
if (uv_pages >= 0) {
- for (i = 0; i < uv_pages; i++)
- put_page(dma->map[y_pages + i]);
+ unpin_user_pages(&dma->map[y_pages], uv_pages);
rc = -EFAULT;
} else {
rc = uv_pages;
@@ -93,8 +91,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
y_pages, y_dma.page_count);
}
if (y_pages >= 0) {
- for (i = 0; i < y_pages; i++)
- put_page(dma->map[i]);
+ unpin_user_pages(dma->map, y_pages);
/*
* Inherit the -EFAULT from rc's
* initialization, but allow it to be
@@ -112,9 +109,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
/* Fill & map SG List */
if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)) < 0) {
IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n");
- for (i = 0; i < dma->page_count; i++) {
- put_page(dma->map[i]);
- }
+ unpin_user_pages(dma->map, dma->page_count);
dma->page_count = 0;
return -ENOMEM;
}
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 0c2859844081..e2d56dca5be4 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -281,10 +281,10 @@ static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv,
/* Map User DMA */
if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) {
mutex_unlock(&itv->udma.lock);
- IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with get_user_pages: %d bytes, %d pages returned\n",
+ IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with pin_user_pages: %d bytes, %d pages returned\n",
size_in_bytes, itv->udma.page_count);
- /* get_user_pages must have failed completely */
+ /* pin_user_pages must have failed completely */
return -EIO;
}
diff --git a/drivers/media/pci/mantis/mantis_dvb.c b/drivers/media/pci/mantis/mantis_dvb.c
index e78ca1f26e68..2da94be5b373 100644
--- a/drivers/media/pci/mantis/mantis_dvb.c
+++ b/drivers/media/pci/mantis/mantis_dvb.c
@@ -135,7 +135,7 @@ static int mantis_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
int mantis_dvb_init(struct mantis_pci *mantis)
{
struct mantis_hwconfig *config = mantis->hwconfig;
- int result = -1;
+ int result;
dprintk(MANTIS_DEBUG, 1, "dvb_register_adapter");
diff --git a/drivers/media/pci/meye/Kconfig b/drivers/media/pci/meye/Kconfig
index b37da612dd0c..fed1f4a01817 100644
--- a/drivers/media/pci/meye/Kconfig
+++ b/drivers/media/pci/meye/Kconfig
@@ -7,7 +7,7 @@ config VIDEO_MEYE
help
This is the video4linux driver for the Motion Eye camera found
in the Vaio Picturebook laptops. Please read the material in
- <file:Documentation/media/v4l-drivers/meye.rst> for more information.
+ <file:Documentation/admin-guide/media/meye.rst> for more information.
If you say Y or M here, you need to say Y or M to "Sony Laptop
Extras" in the misc device section.
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 9aea7c30380b..8610eb473b39 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -982,7 +982,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
if (dev->init_data.name)
info.platform_data = &dev->init_data;
- i2c_new_device(&dev->i2c_adap, &info);
+ i2c_new_client_device(&dev->i2c_adap, &info);
}
static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
index 011b766f0bff..4dd98f94a91e 100644
--- a/drivers/media/pci/sta2x11/Kconfig
+++ b/drivers/media/pci/sta2x11/Kconfig
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config STA2X11_VIP
tristate "STA2X11 VIP Video For Linux"
+ depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS && I2C
depends on STA2X11 || COMPILE_TEST
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
select VIDEOBUF2_DMA_CONTIG
- depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS
- depends on VIDEO_V4L2_SUBDEV_API
- depends on I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
Say Y for support for STA2X11 VIP (Video Input Port) capture
device.
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index e01bbb9dd1c1..c57ee78fa99d 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -5,7 +5,6 @@
menuconfig V4L_PLATFORM_DRIVERS
bool "V4L platform devices"
- depends on MEDIA_CAMERA_SUPPORT
help
Say Y here to enable support for platform-specific V4L drivers.
@@ -15,7 +14,7 @@ source "drivers/media/platform/marvell-ccic/Kconfig"
config VIDEO_VIA_CAMERA
tristate "VIAFB camera controller support"
- depends on FB_VIA
+ depends on FB_VIA && VIDEO_V4L2
select VIDEOBUF2_DMA_SG
select VIDEO_OV7670
help
@@ -43,7 +42,6 @@ config VIDEO_ASPEED
config VIDEO_SH_VOU
tristate "SuperH VOU video output driver"
- depends on MEDIA_CAMERA_SUPPORT
depends on VIDEO_DEV && I2C
depends on ARCH_SHMOBILE || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
@@ -65,7 +63,9 @@ config VIDEO_VIU
config VIDEO_MUX
tristate "Video Multiplexer"
select MULTIPLEXER
- depends on VIDEO_V4L2 && OF && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2 && OF
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select REGMAP
select V4L2_FWNODE
help
@@ -73,10 +73,12 @@ config VIDEO_MUX
config VIDEO_OMAP3
tristate "OMAP 3 Camera support"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && I2C
depends on (ARCH_OMAP3 && OMAP_IOMMU) || COMPILE_TEST
depends on COMMON_CLK && OF
select ARM_DMA_USE_IOMMU if OMAP_IOMMU
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select MFD_SYSCON
select V4L2_FWNODE
@@ -101,16 +103,19 @@ config VIDEO_PXA27x
config VIDEO_QCOM_CAMSS
tristate "Qualcomm V4L2 Camera Subsystem driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_SG
select V4L2_FWNODE
config VIDEO_S3C_CAMIF
tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- depends on PM
+ depends on VIDEO_V4L2 && I2C && PM
depends on ARCH_S3C64XX || PLAT_S3C24XX || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
help
This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
@@ -121,9 +126,10 @@ config VIDEO_S3C_CAMIF
config VIDEO_STM32_DCMI
tristate "STM32 Digital Camera Memory Interface (DCMI) support"
- depends on VIDEO_V4L2 && OF && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2 && OF
depends on ARCH_STM32 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
+ select MEDIA_CONTROLLER
select V4L2_FWNODE
help
This module makes the STM32 Digital Camera Memory Interface (DCMI)
@@ -150,7 +156,9 @@ source "drivers/media/platform/sunxi/Kconfig"
config VIDEO_TI_CAL
tristate "TI CAL (Camera Adaptation Layer) driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
depends on SOC_DRA7XX || ARCH_K3 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
@@ -165,7 +173,6 @@ endif # V4L_PLATFORM_DRIVERS
menuconfig V4L_MEM2MEM_DRIVERS
bool "Memory-to-memory multimedia devices"
depends on VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
Say Y here to enable selecting drivers for V4L devices that
use system memory for both source and destination buffers, as opposed
@@ -180,6 +187,7 @@ config VIDEO_CODA
select SRAM
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
+ select V4L2_JPEG_HELPER
select V4L2_MEM2MEM_DEV
select GENERIC_ALLOCATOR
help
@@ -385,15 +393,6 @@ config VIDEO_STI_DELTA_DRIVER
endif # VIDEO_STI_DELTA
-config VIDEO_SH_VEU
- tristate "SuperH VEU mem2mem video processing driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- help
- Support for the Video Engine Unit (VEU) on SuperH and
- SH-Mobile SoCs.
-
config VIDEO_RENESAS_FDP1
tristate "Renesas Fine Display Processor"
depends on VIDEO_DEV && VIDEO_V4L2
@@ -435,9 +434,11 @@ config VIDEO_RENESAS_FCP
config VIDEO_RENESAS_VSP1
tristate "Renesas VSP1 Video Processing Engine"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2
depends on ARCH_RENESAS || COMPILE_TEST
depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
help
@@ -532,29 +533,6 @@ config VIDEO_TI_SC
config VIDEO_TI_CSC
tristate
-menuconfig V4L_TEST_DRIVERS
- bool "Media test drivers"
- depends on MEDIA_CAMERA_SUPPORT
-
-if V4L_TEST_DRIVERS
-
-source "drivers/media/platform/vimc/Kconfig"
-
-source "drivers/media/platform/vivid/Kconfig"
-
-config VIDEO_VIM2M
- tristate "Virtual Memory-to-Memory Driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- select VIDEOBUF2_VMALLOC
- select V4L2_MEM2MEM_DEV
- help
- This is a virtual test device for the memory-to-memory driver
- framework.
-
-source "drivers/media/platform/vicodec/Kconfig"
-
-endif #V4L_TEST_DRIVERS
-
menuconfig DVB_PLATFORM_DRIVERS
bool "DVB platform devices"
depends on MEDIA_DIGITAL_TV_SUPPORT
@@ -565,131 +543,6 @@ if DVB_PLATFORM_DRIVERS
source "drivers/media/platform/sti/c8sectpfe/Kconfig"
endif #DVB_PLATFORM_DRIVERS
-menuconfig CEC_PLATFORM_DRIVERS
- bool "CEC platform devices"
- depends on MEDIA_CEC_SUPPORT
-
-if CEC_PLATFORM_DRIVERS
-
-config VIDEO_CROS_EC_CEC
- tristate "ChromeOS EC CEC driver"
- depends on CROS_EC
- select CEC_CORE
- select CEC_NOTIFIER
- select CROS_EC_PROTO
- help
- If you say yes here you will get support for the
- ChromeOS Embedded Controller's CEC.
- The CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
-
-config VIDEO_MESON_AO_CEC
- tristate "Amlogic Meson AO CEC driver"
- depends on ARCH_MESON || COMPILE_TEST
- select CEC_CORE
- select CEC_NOTIFIER
- help
- This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the
- generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
-
-config VIDEO_MESON_G12A_AO_CEC
- tristate "Amlogic Meson G12A AO CEC driver"
- depends on ARCH_MESON || COMPILE_TEST
- depends on COMMON_CLK && OF
- select REGMAP
- select REGMAP_MMIO
- select CEC_CORE
- select CEC_NOTIFIER
- ---help---
- This is a driver for Amlogic Meson G12A SoCs AO CEC interface.
- This driver if for the new AO-CEC module found in G12A SoCs,
- usually named AO_CEC_B in documentation.
- It uses the generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
-
-config CEC_GPIO
- tristate "Generic GPIO-based CEC driver"
- depends on PREEMPTION || COMPILE_TEST
- select CEC_CORE
- select CEC_PIN
- select CEC_NOTIFIER
- select GPIOLIB
- help
- This is a generic GPIO-based CEC driver.
- The CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
-
-config VIDEO_SAMSUNG_S5P_CEC
- tristate "Samsung S5P CEC driver"
- depends on ARCH_EXYNOS || COMPILE_TEST
- select CEC_CORE
- select CEC_NOTIFIER
- help
- This is a driver for Samsung S5P HDMI CEC interface. It uses the
- generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
-
-config VIDEO_STI_HDMI_CEC
- tristate "STMicroelectronics STiH4xx HDMI CEC driver"
- depends on ARCH_STI || COMPILE_TEST
- select CEC_CORE
- select CEC_NOTIFIER
- help
- This is a driver for STIH4xx HDMI CEC interface. It uses the
- generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
-
-config VIDEO_STM32_HDMI_CEC
- tristate "STMicroelectronics STM32 HDMI CEC driver"
- depends on ARCH_STM32 || COMPILE_TEST
- select REGMAP
- select REGMAP_MMIO
- select CEC_CORE
- help
- This is a driver for STM32 interface. It uses the
- generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
-
-config VIDEO_TEGRA_HDMI_CEC
- tristate "Tegra HDMI CEC driver"
- depends on ARCH_TEGRA || COMPILE_TEST
- select CEC_CORE
- select CEC_NOTIFIER
- help
- This is a driver for the Tegra HDMI CEC interface. It uses the
- generic CEC framework interface.
- The CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
-
-config VIDEO_SECO_CEC
- tristate "SECO Boards HDMI CEC driver"
- depends on (X86 || IA64) || COMPILE_TEST
- depends on PCI && DMI
- select CEC_CORE
- select CEC_NOTIFIER
- help
- This is a driver for SECO Boards integrated CEC interface.
- Selecting it will enable support for this device.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
-
-config VIDEO_SECO_RC
- bool "SECO Boards IR RC5 support"
- depends on VIDEO_SECO_CEC
- depends on RC_CORE=y || RC_CORE = VIDEO_SECO_CEC
- help
- If you say yes here you will get support for the
- SECO Boards Consumer-IR in seco-cec driver.
- The embedded controller supports RC5 protocol only, default mapping
- is set to rc-hauppauge.
-
-endif #CEC_PLATFORM_DRIVERS
-
menuconfig SDR_PLATFORM_DRIVERS
bool "SDR platform devices"
depends on MEDIA_SDR_SUPPORT
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index d13db96e3015..62b6cdc8c730 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -14,11 +14,6 @@ obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
-obj-$(CONFIG_VIDEO_VIMC) += vimc/
-obj-$(CONFIG_VIDEO_VIVID) += vivid/
-obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
-obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
-
obj-y += ti-vpe/
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
@@ -26,10 +21,6 @@ obj-$(CONFIG_VIDEO_CODA) += coda/
obj-$(CONFIG_VIDEO_IMX_PXP) += imx-pxp.o
-obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
-
-obj-$(CONFIG_CEC_GPIO) += cec-gpio/
-
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
obj-$(CONFIG_VIDEO_MUX) += video-mux.o
@@ -40,22 +31,16 @@ obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d/
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec/
obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC) += exynos-gsc/
obj-$(CONFIG_VIDEO_STI_BDISP) += sti/bdisp/
obj-$(CONFIG_VIDEO_STI_HVA) += sti/hva/
obj-$(CONFIG_DVB_C8SECTPFE) += sti/c8sectpfe/
-obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += sti/cec/
obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/
-obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra-cec/
-
obj-y += stm32/
-obj-$(CONFIG_VIDEO_SECO_CEC) += seco-cec/
-
obj-y += davinci/
obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
@@ -94,8 +79,4 @@ obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss/
obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
-obj-y += meson/
-
-obj-y += cros-ec-cec/
-
obj-y += sunxi/
diff --git a/drivers/media/platform/am437x/Kconfig b/drivers/media/platform/am437x/Kconfig
index d6f2e3d0cbef..9ef898f512de 100644
--- a/drivers/media/platform/am437x/Kconfig
+++ b/drivers/media/platform/am437x/Kconfig
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_AM437X_VPFE
tristate "TI AM437x VPFE video capture driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2
depends on SOC_AM43XX || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
help
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
index 5ae3f60b81b1..1850fe7f9360 100644
--- a/drivers/media/platform/atmel/Kconfig
+++ b/drivers/media/platform/atmel/Kconfig
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_ATMEL_ISC
tristate "ATMEL Image Sensor Controller (ISC) support"
- depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && COMMON_CLK
depends on ARCH_AT91 || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select REGMAP_MMIO
select V4L2_FWNODE
diff --git a/drivers/media/platform/cadence/Kconfig b/drivers/media/platform/cadence/Kconfig
index c154e368d701..80cf601323ce 100644
--- a/drivers/media/platform/cadence/Kconfig
+++ b/drivers/media/platform/cadence/Kconfig
@@ -13,8 +13,8 @@ if VIDEO_CADENCE
config VIDEO_CADENCE_CSI2RX
tristate "Cadence MIPI-CSI2 RX Controller"
depends on VIDEO_V4L2
- depends on MEDIA_CONTROLLER
- depends on VIDEO_V4L2_SUBDEV_API
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
Support for the Cadence MIPI CSI2 Receiver controller.
@@ -25,8 +25,8 @@ config VIDEO_CADENCE_CSI2RX
config VIDEO_CADENCE_CSI2TX
tristate "Cadence MIPI-CSI2 TX Controller"
depends on VIDEO_V4L2
- depends on MEDIA_CONTROLLER
- depends on VIDEO_V4L2_SUBDEV_API
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
Support for the Cadence MIPI CSI2 Transceiver controller.
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 3443396ba5f3..b021604eceaa 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1215,7 +1215,8 @@ static int coda_start_encoding(struct coda_ctx *ctx)
coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
}
- if (ctx->params.bitrate) {
+ if (ctx->params.bitrate && (ctx->params.frame_rc_enable ||
+ ctx->params.mb_rc_enable)) {
ctx->params.bitrate_changed = false;
ctx->params.h264_intra_qp_changed = false;
@@ -1276,7 +1277,11 @@ static int coda_start_encoding(struct coda_ctx *ctx)
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
- coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_INTERVAL_MODE);
+ if (ctx->params.frame_rc_enable && !ctx->params.mb_rc_enable)
+ value = 1;
+ else
+ value = 0;
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_INTERVAL_MODE);
coda_setup_iram(ctx);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index d0d093dd8f7c..6f41f74d492c 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -159,6 +159,7 @@ static const struct coda_codec coda9_codecs[] = {
CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA9_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_DECODE_MJPG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420, 8192, 8192),
};
struct coda_video_device {
@@ -252,6 +253,22 @@ static const struct coda_video_device coda9_jpeg_encoder = {
},
};
+static const struct coda_video_device coda9_jpeg_decoder = {
+ .name = "coda-jpeg-decoder",
+ .type = CODA_INST_DECODER,
+ .ops = &coda9_jpeg_decode_ops,
+ .direct = true,
+ .src_formats = {
+ V4L2_PIX_FMT_JPEG,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_YUV422P,
+ },
+};
+
static const struct coda_video_device *codadx6_video_devices[] = {
&coda_bit_encoder,
};
@@ -270,6 +287,7 @@ static const struct coda_video_device *coda7_video_devices[] = {
static const struct coda_video_device *coda9_video_devices[] = {
&coda9_jpeg_encoder,
+ &coda9_jpeg_decoder,
&coda_bit_encoder,
&coda_bit_decoder,
};
@@ -411,6 +429,12 @@ static int coda_querycap(struct file *file, void *priv,
return 0;
}
+static const u32 coda_formats_420[CODA_MAX_FORMATS] = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+};
+
static int coda_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -421,10 +445,33 @@ static int coda_enum_fmt(struct file *file, void *priv,
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
formats = cvd->src_formats;
- else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ struct coda_q_data *q_data_src;
+ struct vb2_queue *src_vq;
+
formats = cvd->dst_formats;
- else
+
+ /*
+ * If the source format is already fixed, only allow the same
+ * chroma subsampling.
+ */
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
+ vb2_is_streaming(src_vq)) {
+ if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420) {
+ formats = coda_formats_420;
+ } else if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422) {
+ f->pixelformat = V4L2_PIX_FMT_YUV422P;
+ return f->index ? -EINVAL : 0;
+ }
+ }
+ } else {
return -EINVAL;
+ }
if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0)
return -EINVAL;
@@ -614,12 +661,23 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
/*
* If the source format is already fixed, only allow the same output
- * resolution
+ * resolution. When decoding JPEG images, we also have to make sure to
+ * use the same chroma subsampling.
*/
src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (vb2_is_streaming(src_vq)) {
f->fmt.pix.width = q_data_src->width;
f->fmt.pix.height = q_data_src->height;
+
+ if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG) {
+ if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420 &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P)
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_NV12;
+ else if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422)
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
+ }
}
f->fmt.pix.colorspace = ctx->colorspace;
@@ -637,12 +695,18 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
if (ret < 0)
return ret;
- /* The h.264 decoder only returns complete 16x16 macroblocks */
- if (codec && codec->src_fourcc == V4L2_PIX_FMT_H264) {
- f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
+ /* The decoders always write complete macroblocks or MCUs */
+ if (ctx->inst_type == CODA_INST_DECODER) {
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
- f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
- f->fmt.pix.height * 3 / 2;
+ f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
+ if (codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P) {
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 2;
+ } else {
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 3 / 2;
+ }
ret = coda_try_fmt_vdoa(ctx, f, &use_vdoa);
if (ret < 0)
@@ -747,6 +811,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
/* else fall through */
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
break;
default:
@@ -1088,6 +1153,51 @@ static int coda_try_decoder_cmd(struct file *file, void *fh,
return v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc);
}
+static bool coda_mark_last_meta(struct coda_ctx *ctx)
+{
+ struct coda_buffer_meta *meta;
+
+ coda_dbg(1, ctx, "marking last meta\n");
+
+ spin_lock(&ctx->buffer_meta_lock);
+ if (list_empty(&ctx->buffer_meta_list)) {
+ spin_unlock(&ctx->buffer_meta_lock);
+ return false;
+ }
+
+ meta = list_last_entry(&ctx->buffer_meta_list, struct coda_buffer_meta,
+ list);
+ meta->last = true;
+
+ spin_unlock(&ctx->buffer_meta_lock);
+ return true;
+}
+
+static bool coda_mark_last_dst_buf(struct coda_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *buf;
+ struct vb2_buffer *dst_vb;
+ struct vb2_queue *dst_vq;
+ unsigned long flags;
+
+ coda_dbg(1, ctx, "marking last capture buffer\n");
+
+ dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ spin_lock_irqsave(&dst_vq->done_lock, flags);
+ if (list_empty(&dst_vq->done_list)) {
+ spin_unlock_irqrestore(&dst_vq->done_lock, flags);
+ return false;
+ }
+
+ dst_vb = list_last_entry(&dst_vq->done_list, struct vb2_buffer,
+ done_entry);
+ buf = to_vb2_v4l2_buffer(dst_vb);
+ buf->flags |= V4L2_BUF_FLAG_LAST;
+
+ spin_unlock_irqrestore(&dst_vq->done_lock, flags);
+ return true;
+}
+
static int coda_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
@@ -1120,6 +1230,8 @@ static int coda_decoder_cmd(struct file *file, void *fh,
stream_end = false;
wakeup = false;
+ mutex_lock(&ctx->wakeup_mutex);
+
buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
if (buf) {
coda_dbg(1, ctx, "marking last pending buffer\n");
@@ -1132,22 +1244,14 @@ static int coda_decoder_cmd(struct file *file, void *fh,
stream_end = true;
}
} else {
- coda_dbg(1, ctx, "marking last meta\n");
-
- /* Mark last meta */
- spin_lock(&ctx->buffer_meta_lock);
- if (!list_empty(&ctx->buffer_meta_list)) {
- struct coda_buffer_meta *meta;
-
- meta = list_last_entry(&ctx->buffer_meta_list,
- struct coda_buffer_meta,
- list);
- meta->last = true;
- stream_end = true;
- } else {
- wakeup = true;
- }
- spin_unlock(&ctx->buffer_meta_lock);
+ if (ctx->use_bit)
+ if (coda_mark_last_meta(ctx))
+ stream_end = true;
+ else
+ wakeup = true;
+ else
+ if (!coda_mark_last_dst_buf(ctx))
+ wakeup = true;
}
if (stream_end) {
@@ -1164,6 +1268,7 @@ static int coda_decoder_cmd(struct file *file, void *fh,
coda_wake_up_capture_queue(ctx);
}
+ mutex_unlock(&ctx->wakeup_mutex);
break;
default:
return -EINVAL;
@@ -1894,6 +1999,42 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
}
}
+ /*
+ * Check the first input JPEG buffer to determine chroma
+ * subsampling.
+ */
+ if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG) {
+ buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ ret = coda_jpeg_decode_header(ctx, &buf->vb2_buf);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev,
+ "failed to decode JPEG header: %d\n",
+ ret);
+ goto err;
+ }
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ q_data_dst->width = round_up(q_data_src->width, 16);
+ q_data_dst->height = round_up(q_data_src->height, 16);
+ q_data_dst->bytesperline = q_data_dst->width;
+ if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420) {
+ q_data_dst->sizeimage =
+ q_data_dst->bytesperline *
+ q_data_dst->height * 3 / 2;
+ if (q_data_dst->fourcc != V4L2_PIX_FMT_YUV420)
+ q_data_dst->fourcc = V4L2_PIX_FMT_NV12;
+ } else {
+ q_data_dst->sizeimage =
+ q_data_dst->bytesperline *
+ q_data_dst->height * 2;
+ q_data_dst->fourcc = V4L2_PIX_FMT_YUV422P;
+ }
+ q_data_dst->rect.left = 0;
+ q_data_dst->rect.top = 0;
+ q_data_dst->rect.width = q_data_src->width;
+ q_data_dst->rect.height = q_data_src->height;
+ }
ctx->streamon_out = 1;
} else {
ctx->streamon_cap = 1;
@@ -2082,6 +2223,12 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
ctx->params.h264_constrained_intra_pred_flag = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ ctx->params.frame_rc_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ ctx->params.mb_rc_enable = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET:
ctx->params.h264_chroma_qp_index_offset = ctrl->val;
break;
@@ -2181,6 +2328,10 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION, 0, 1, 1,
0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
index 92234fd1f4fd..00d19859db50 100644
--- a/drivers/media/platform/coda/coda-jpeg.c
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -15,6 +15,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-fh.h>
+#include <media/v4l2-jpeg.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
@@ -23,10 +24,12 @@
#include "trace.h"
#define SOI_MARKER 0xffd8
+#define APP9_MARKER 0xffe9
#define DRI_MARKER 0xffdd
#define DQT_MARKER 0xffdb
#define DHT_MARKER 0xffc4
#define SOF_MARKER 0xffc0
+#define SOS_MARKER 0xffda
#define EOI_MARKER 0xffd9
enum {
@@ -37,6 +40,18 @@ enum {
CODA9_JPEG_FORMAT_400,
};
+struct coda_huff_tab {
+ u8 luma_dc[16 + 12];
+ u8 chroma_dc[16 + 12];
+ u8 luma_ac[16 + 162];
+ u8 chroma_ac[16 + 162];
+
+ /* DC Luma, DC Chroma, AC Luma, AC Chroma */
+ s16 min[4 * 16];
+ s16 max[4 * 16];
+ s8 ptr[4 * 16];
+};
+
#define CODA9_JPEG_ENC_HUFF_DATA_SIZE (256 + 256 + 16 + 16)
/*
@@ -247,6 +262,291 @@ bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb)
return false;
}
+static int coda9_jpeg_gen_dec_huff_tab(struct coda_ctx *ctx, int tab_num);
+
+int coda_jpeg_decode_header(struct coda_ctx *ctx, struct vb2_buffer *vb)
+{
+ struct coda_dev *dev = ctx->dev;
+ u8 *buf = vb2_plane_vaddr(vb, 0);
+ size_t len = vb2_get_plane_payload(vb, 0);
+ struct v4l2_jpeg_scan_header scan_header;
+ struct v4l2_jpeg_reference quantization_tables[4] = { };
+ struct v4l2_jpeg_reference huffman_tables[4] = { };
+ struct v4l2_jpeg_header header = {
+ .scan = &scan_header,
+ .quantization_tables = quantization_tables,
+ .huffman_tables = huffman_tables,
+ };
+ struct coda_q_data *q_data_src;
+ struct coda_huff_tab *huff_tab;
+ int i, j, ret;
+
+ ret = v4l2_jpeg_parse_header(buf, len, &header);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to parse header\n");
+ return ret;
+ }
+
+ ctx->params.jpeg_restart_interval = header.restart_interval;
+
+ /* check frame header */
+ if (header.frame.height > ctx->codec->max_h ||
+ header.frame.width > ctx->codec->max_w) {
+ v4l2_err(&dev->v4l2_dev, "invalid dimensions: %dx%d\n",
+ header.frame.width, header.frame.height);
+ return -EINVAL;
+ }
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (header.frame.height != q_data_src->height ||
+ header.frame.width != q_data_src->width) {
+ v4l2_err(&dev->v4l2_dev,
+ "dimensions don't match format: %dx%d\n",
+ header.frame.width, header.frame.height);
+ return -EINVAL;
+ }
+
+ if (header.frame.num_components != 3) {
+ v4l2_err(&dev->v4l2_dev,
+ "unsupported number of components: %d\n",
+ header.frame.num_components);
+ return -EINVAL;
+ }
+
+ /* install quantization tables */
+ if (quantization_tables[3].start) {
+ v4l2_err(&dev->v4l2_dev,
+ "only 3 quantization tables supported\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < 3; i++) {
+ if (!quantization_tables[i].start)
+ continue;
+ if (quantization_tables[i].length != 64) {
+ v4l2_err(&dev->v4l2_dev,
+ "only 8-bit quantization tables supported\n");
+ continue;
+ }
+ if (!ctx->params.jpeg_qmat_tab[i])
+ ctx->params.jpeg_qmat_tab[i] = kmalloc(64, GFP_KERNEL);
+ memcpy(ctx->params.jpeg_qmat_tab[i],
+ quantization_tables[i].start, 64);
+ }
+
+ /* install Huffman tables */
+ for (i = 0; i < 4; i++) {
+ if (!huffman_tables[i].start) {
+ v4l2_err(&dev->v4l2_dev, "missing Huffman table\n");
+ return -EINVAL;
+ }
+ /* AC tables should be between 17 -> 178, DC between 17 -> 28 */
+ if (huffman_tables[i].length < 17 ||
+ huffman_tables[i].length > 178 ||
+ ((i & 2) == 0 && huffman_tables[i].length > 28)) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid Huffman table %d length: %zu\n",
+ i, huffman_tables[i].length);
+ return -EINVAL;
+ }
+ }
+ huff_tab = ctx->params.jpeg_huff_tab;
+ if (!huff_tab) {
+ huff_tab = kzalloc(sizeof(struct coda_huff_tab), GFP_KERNEL);
+ if (!huff_tab)
+ return -ENOMEM;
+ ctx->params.jpeg_huff_tab = huff_tab;
+ }
+
+ memset(huff_tab, 0, sizeof(*huff_tab));
+ memcpy(huff_tab->luma_dc, huffman_tables[0].start, huffman_tables[0].length);
+ memcpy(huff_tab->chroma_dc, huffman_tables[1].start, huffman_tables[1].length);
+ memcpy(huff_tab->luma_ac, huffman_tables[2].start, huffman_tables[2].length);
+ memcpy(huff_tab->chroma_ac, huffman_tables[3].start, huffman_tables[3].length);
+
+ /* check scan header */
+ for (i = 0; i < scan_header.num_components; i++) {
+ struct v4l2_jpeg_scan_component_spec *scan_component;
+
+ scan_component = &scan_header.component[i];
+ for (j = 0; j < header.frame.num_components; j++) {
+ if (header.frame.component[j].component_identifier ==
+ scan_component->component_selector)
+ break;
+ }
+ if (j == header.frame.num_components)
+ continue;
+
+ ctx->params.jpeg_huff_dc_index[j] =
+ scan_component->dc_entropy_coding_table_selector;
+ ctx->params.jpeg_huff_ac_index[j] =
+ scan_component->ac_entropy_coding_table_selector;
+ }
+
+ /* Generate Huffman table information */
+ for (i = 0; i < 4; i++)
+ coda9_jpeg_gen_dec_huff_tab(ctx, i);
+
+ /* start of entropy coded segment */
+ ctx->jpeg_ecs_offset = header.ecs_offset;
+
+ switch (header.frame.subsampling) {
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+ ctx->params.jpeg_chroma_subsampling = header.frame.subsampling;
+ break;
+ default:
+ v4l2_err(&dev->v4l2_dev, "chroma subsampling not supported: %d",
+ header.frame.subsampling);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void coda9_jpeg_write_huff_values(struct coda_dev *dev, u8 *bits,
+ int num_values)
+{
+ s8 *values = (s8 *)(bits + 16);
+ int huff_length, i;
+
+ for (huff_length = 0, i = 0; i < 16; i++)
+ huff_length += bits[i];
+ for (i = huff_length; i < num_values; i++)
+ values[i] = -1;
+ for (i = 0; i < num_values; i++)
+ coda_write(dev, (s32)values[i], CODA9_REG_JPEG_HUFF_DATA);
+}
+
+static int coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx)
+{
+ struct coda_huff_tab *huff_tab = ctx->params.jpeg_huff_tab;
+ struct coda_dev *dev = ctx->dev;
+ s16 *huff_min = huff_tab->min;
+ s16 *huff_max = huff_tab->max;
+ s8 *huff_ptr = huff_tab->ptr;
+ int i;
+
+ /* MIN Tables */
+ coda_write(dev, 0x003, CODA9_REG_JPEG_HUFF_CTRL);
+ coda_write(dev, 0x000, CODA9_REG_JPEG_HUFF_ADDR);
+ for (i = 0; i < 4 * 16; i++)
+ coda_write(dev, (s32)huff_min[i], CODA9_REG_JPEG_HUFF_DATA);
+
+ /* MAX Tables */
+ coda_write(dev, 0x403, CODA9_REG_JPEG_HUFF_CTRL);
+ coda_write(dev, 0x440, CODA9_REG_JPEG_HUFF_ADDR);
+ for (i = 0; i < 4 * 16; i++)
+ coda_write(dev, (s32)huff_max[i], CODA9_REG_JPEG_HUFF_DATA);
+
+ /* PTR Tables */
+ coda_write(dev, 0x803, CODA9_REG_JPEG_HUFF_CTRL);
+ coda_write(dev, 0x880, CODA9_REG_JPEG_HUFF_ADDR);
+ for (i = 0; i < 4 * 16; i++)
+ coda_write(dev, (s32)huff_ptr[i], CODA9_REG_JPEG_HUFF_DATA);
+
+ /* VAL Tables: DC Luma, DC Chroma, AC Luma, AC Chroma */
+ coda_write(dev, 0xc03, CODA9_REG_JPEG_HUFF_CTRL);
+ coda9_jpeg_write_huff_values(dev, huff_tab->luma_dc, 12);
+ coda9_jpeg_write_huff_values(dev, huff_tab->chroma_dc, 12);
+ coda9_jpeg_write_huff_values(dev, huff_tab->luma_ac, 162);
+ coda9_jpeg_write_huff_values(dev, huff_tab->chroma_ac, 162);
+ coda_write(dev, 0x000, CODA9_REG_JPEG_HUFF_CTRL);
+ return 0;
+}
+
+static inline void coda9_jpeg_write_qmat_tab(struct coda_dev *dev,
+ u8 *qmat, int index)
+{
+ int i;
+
+ coda_write(dev, index | 0x3, CODA9_REG_JPEG_QMAT_CTRL);
+ for (i = 0; i < 64; i++)
+ coda_write(dev, qmat[i], CODA9_REG_JPEG_QMAT_DATA);
+ coda_write(dev, 0, CODA9_REG_JPEG_QMAT_CTRL);
+}
+
+static void coda9_jpeg_qmat_setup(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ int *qmat_index = ctx->params.jpeg_qmat_index;
+ u8 **qmat_tab = ctx->params.jpeg_qmat_tab;
+
+ coda9_jpeg_write_qmat_tab(dev, qmat_tab[qmat_index[0]], 0x00);
+ coda9_jpeg_write_qmat_tab(dev, qmat_tab[qmat_index[1]], 0x40);
+ coda9_jpeg_write_qmat_tab(dev, qmat_tab[qmat_index[2]], 0x80);
+}
+
+static void coda9_jpeg_dec_bbc_gbu_setup(struct coda_ctx *ctx,
+ struct vb2_buffer *buf, u32 ecs_offset)
+{
+ struct coda_dev *dev = ctx->dev;
+ int page_ptr, word_ptr, bit_ptr;
+ u32 bbc_base_addr, end_addr;
+ int bbc_cur_pos;
+ int ret, val;
+
+ bbc_base_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
+ end_addr = bbc_base_addr + vb2_get_plane_payload(buf, 0);
+
+ page_ptr = ecs_offset / 256;
+ word_ptr = (ecs_offset % 256) / 4;
+ if (page_ptr & 1)
+ word_ptr += 64;
+ bit_ptr = (ecs_offset % 4) * 8;
+ if (word_ptr & 1)
+ bit_ptr += 32;
+ word_ptr &= ~0x1;
+
+ coda_write(dev, end_addr, CODA9_REG_JPEG_BBC_WR_PTR);
+ coda_write(dev, bbc_base_addr, CODA9_REG_JPEG_BBC_BAS_ADDR);
+
+ /* Leave 3 256-byte page margin to avoid a BBC interrupt */
+ coda_write(dev, end_addr + 256 * 3 + 256, CODA9_REG_JPEG_BBC_END_ADDR);
+ val = DIV_ROUND_UP(vb2_plane_size(buf, 0), 256) + 3;
+ coda_write(dev, BIT(31) | val, CODA9_REG_JPEG_BBC_STRM_CTRL);
+
+ bbc_cur_pos = page_ptr;
+ coda_write(dev, bbc_cur_pos, CODA9_REG_JPEG_BBC_CUR_POS);
+ coda_write(dev, bbc_base_addr + (bbc_cur_pos << 8),
+ CODA9_REG_JPEG_BBC_EXT_ADDR);
+ coda_write(dev, (bbc_cur_pos & 1) << 6, CODA9_REG_JPEG_BBC_INT_ADDR);
+ coda_write(dev, 64, CODA9_REG_JPEG_BBC_DATA_CNT);
+ coda_write(dev, 0, CODA9_REG_JPEG_BBC_COMMAND);
+ do {
+ ret = coda_read(dev, CODA9_REG_JPEG_BBC_BUSY);
+ } while (ret == 1);
+
+ bbc_cur_pos++;
+ coda_write(dev, bbc_cur_pos, CODA9_REG_JPEG_BBC_CUR_POS);
+ coda_write(dev, bbc_base_addr + (bbc_cur_pos << 8),
+ CODA9_REG_JPEG_BBC_EXT_ADDR);
+ coda_write(dev, (bbc_cur_pos & 1) << 6, CODA9_REG_JPEG_BBC_INT_ADDR);
+ coda_write(dev, 64, CODA9_REG_JPEG_BBC_DATA_CNT);
+ coda_write(dev, 0, CODA9_REG_JPEG_BBC_COMMAND);
+ do {
+ ret = coda_read(dev, CODA9_REG_JPEG_BBC_BUSY);
+ } while (ret == 1);
+
+ bbc_cur_pos++;
+ coda_write(dev, bbc_cur_pos, CODA9_REG_JPEG_BBC_CUR_POS);
+ coda_write(dev, 1, CODA9_REG_JPEG_BBC_CTRL);
+
+ coda_write(dev, 0, CODA9_REG_JPEG_GBU_TT_CNT);
+ coda_write(dev, word_ptr, CODA9_REG_JPEG_GBU_WD_PTR);
+ coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBSR);
+ coda_write(dev, 127, CODA9_REG_JPEG_GBU_BBER);
+ if (page_ptr & 1) {
+ coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBIR);
+ coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBHR);
+ } else {
+ coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBIR);
+ coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBHR);
+ }
+ coda_write(dev, 4, CODA9_REG_JPEG_GBU_CTRL);
+ coda_write(dev, bit_ptr, CODA9_REG_JPEG_GBU_FF_RPTR);
+ coda_write(dev, 3, CODA9_REG_JPEG_GBU_CTRL);
+}
+
static const int bus_req_num[] = {
[CODA9_JPEG_FORMAT_420] = 2,
[CODA9_JPEG_FORMAT_422] = 3,
@@ -345,6 +645,71 @@ out:
#define DC_TABLE_INDEX1 2
#define AC_TABLE_INDEX1 3
+static u8 *coda9_jpeg_get_huff_bits(struct coda_ctx *ctx, int tab_num)
+{
+ struct coda_huff_tab *huff_tab = ctx->params.jpeg_huff_tab;
+
+ if (!huff_tab)
+ return NULL;
+
+ switch (tab_num) {
+ case DC_TABLE_INDEX0: return huff_tab->luma_dc;
+ case AC_TABLE_INDEX0: return huff_tab->luma_ac;
+ case DC_TABLE_INDEX1: return huff_tab->chroma_dc;
+ case AC_TABLE_INDEX1: return huff_tab->chroma_ac;
+ }
+
+ return NULL;
+}
+
+static int coda9_jpeg_gen_dec_huff_tab(struct coda_ctx *ctx, int tab_num)
+{
+ int ptr_cnt = 0, huff_code = 0, zero_flag = 0, data_flag = 0;
+ u8 *huff_bits;
+ s16 *huff_max;
+ s16 *huff_min;
+ s8 *huff_ptr;
+ int ofs;
+ int i;
+
+ huff_bits = coda9_jpeg_get_huff_bits(ctx, tab_num);
+ if (!huff_bits)
+ return -EINVAL;
+
+ /* DC/AC Luma, DC/AC Chroma -> DC Luma/Chroma, AC Luma/Chroma */
+ ofs = ((tab_num & 1) << 1) | ((tab_num >> 1) & 1);
+ ofs *= 16;
+
+ huff_ptr = ctx->params.jpeg_huff_tab->ptr + ofs;
+ huff_max = ctx->params.jpeg_huff_tab->max + ofs;
+ huff_min = ctx->params.jpeg_huff_tab->min + ofs;
+
+ for (i = 0; i < 16; i++) {
+ if (huff_bits[i]) {
+ huff_ptr[i] = ptr_cnt;
+ ptr_cnt += huff_bits[i];
+ huff_min[i] = huff_code;
+ huff_max[i] = huff_code + (huff_bits[i] - 1);
+ data_flag = 1;
+ zero_flag = 0;
+ } else {
+ huff_ptr[i] = -1;
+ huff_min[i] = -1;
+ huff_max[i] = -1;
+ zero_flag = 1;
+ }
+
+ if (data_flag == 1) {
+ if (zero_flag == 1)
+ huff_code <<= 1;
+ else
+ huff_code = (huff_max[i] + 1) << 1;
+ }
+ }
+
+ return 0;
+}
+
static int coda9_jpeg_load_huff_tab(struct coda_ctx *ctx)
{
struct {
@@ -880,6 +1245,13 @@ static void coda9_jpeg_finish_encode(struct coda_ctx *ctx)
coda_dbg(1, ctx, "job finished: encoded frame (%u)%s\n",
dst_buf->sequence,
(dst_buf->flags & V4L2_BUF_FLAG_LAST) ? " (last)" : "");
+
+ /*
+ * Reset JPEG processing unit after each encode run to work
+ * around hangups when switching context between encoder and
+ * decoder.
+ */
+ coda_hw_reset(ctx);
}
static void coda9_jpeg_release(struct coda_ctx *ctx)
@@ -893,6 +1265,7 @@ static void coda9_jpeg_release(struct coda_ctx *ctx)
for (i = 0; i < 3; i++)
kfree(ctx->params.jpeg_qmat_tab[i]);
kfree(ctx->params.jpeg_huff_data);
+ kfree(ctx->params.jpeg_huff_tab);
}
const struct coda_context_ops coda9_jpeg_encode_ops = {
@@ -903,6 +1276,210 @@ const struct coda_context_ops coda9_jpeg_encode_ops = {
.release = coda9_jpeg_release,
};
+/*
+ * Decoder context operations
+ */
+
+static int coda9_jpeg_start_decoding(struct coda_ctx *ctx)
+{
+ ctx->params.jpeg_qmat_index[0] = 0;
+ ctx->params.jpeg_qmat_index[1] = 1;
+ ctx->params.jpeg_qmat_index[2] = 1;
+ ctx->params.jpeg_qmat_tab[0] = luma_q;
+ ctx->params.jpeg_qmat_tab[1] = chroma_q;
+ /* nothing more to do here */
+
+ /* TODO: we could already scan the first header to get the chroma
+ * format.
+ */
+
+ return 0;
+}
+
+static int coda9_jpeg_prepare_decode(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ int aligned_width, aligned_height;
+ int chroma_format;
+ int ret;
+ u32 val, dst_fourcc;
+ struct coda_q_data *q_data_src, *q_data_dst;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ int chroma_interleave;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_fourcc = q_data_dst->fourcc;
+
+ if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == 0)
+ vb2_set_plane_payload(&src_buf->vb2_buf, 0,
+ vb2_plane_size(&src_buf->vb2_buf, 0));
+
+ chroma_format = coda9_jpeg_chroma_format(q_data_dst->fourcc);
+ if (chroma_format < 0) {
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ return chroma_format;
+ }
+
+ ret = coda_jpeg_decode_header(ctx, &src_buf->vb2_buf);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to decode JPEG header: %d\n",
+ ret);
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ return ret;
+ }
+
+ /* Round image dimensions to multiple of MCU size */
+ aligned_width = round_up(q_data_src->width, width_align[chroma_format]);
+ aligned_height = round_up(q_data_src->height, height_align[chroma_format]);
+ if (aligned_width != q_data_dst->bytesperline) {
+ v4l2_err(&dev->v4l2_dev, "stride mismatch: %d != %d\n",
+ aligned_width, q_data_dst->bytesperline);
+ }
+
+ coda_set_gdi_regs(ctx);
+
+ val = ctx->params.jpeg_huff_ac_index[0] << 12 |
+ ctx->params.jpeg_huff_ac_index[1] << 11 |
+ ctx->params.jpeg_huff_ac_index[2] << 10 |
+ ctx->params.jpeg_huff_dc_index[0] << 9 |
+ ctx->params.jpeg_huff_dc_index[1] << 8 |
+ ctx->params.jpeg_huff_dc_index[2] << 7;
+ if (ctx->params.jpeg_huff_tab)
+ val |= CODA9_JPEG_PIC_CTRL_USER_HUFFMAN_EN;
+ coda_write(dev, val, CODA9_REG_JPEG_PIC_CTRL);
+
+ coda_write(dev, aligned_width << 16 | aligned_height,
+ CODA9_REG_JPEG_PIC_SIZE);
+
+ chroma_interleave = (dst_fourcc == V4L2_PIX_FMT_NV12);
+ coda_write(dev, 0, CODA9_REG_JPEG_ROT_INFO);
+ coda_write(dev, bus_req_num[chroma_format], CODA9_REG_JPEG_OP_INFO);
+ coda_write(dev, mcu_info[chroma_format], CODA9_REG_JPEG_MCU_INFO);
+ coda_write(dev, 0, CODA9_REG_JPEG_SCL_INFO);
+ coda_write(dev, chroma_interleave, CODA9_REG_JPEG_DPB_CONFIG);
+ coda_write(dev, ctx->params.jpeg_restart_interval,
+ CODA9_REG_JPEG_RST_INTVAL);
+
+ if (ctx->params.jpeg_huff_tab) {
+ ret = coda9_jpeg_dec_huff_setup(ctx);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to set up Huffman tables: %d\n", ret);
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ return ret;
+ }
+ }
+
+ coda9_jpeg_qmat_setup(ctx);
+
+ coda9_jpeg_dec_bbc_gbu_setup(ctx, &src_buf->vb2_buf,
+ ctx->jpeg_ecs_offset);
+
+ coda_write(dev, 0, CODA9_REG_JPEG_RST_INDEX);
+ coda_write(dev, 0, CODA9_REG_JPEG_RST_COUNT);
+
+ coda_write(dev, 0, CODA9_REG_JPEG_DPCM_DIFF_Y);
+ coda_write(dev, 0, CODA9_REG_JPEG_DPCM_DIFF_CB);
+ coda_write(dev, 0, CODA9_REG_JPEG_DPCM_DIFF_CR);
+
+ coda_write(dev, 0, CODA9_REG_JPEG_ROT_INFO);
+
+ coda_write(dev, 1, CODA9_GDI_CONTROL);
+ do {
+ ret = coda_read(dev, CODA9_GDI_STATUS);
+ } while (!ret);
+
+ val = (chroma_format << 17) | (chroma_interleave << 16) |
+ q_data_dst->bytesperline;
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+ val |= 3 << 20;
+ coda_write(dev, val, CODA9_GDI_INFO_CONTROL);
+
+ coda_write(dev, aligned_width << 16 | aligned_height,
+ CODA9_GDI_INFO_PIC_SIZE);
+
+ coda_write_base(ctx, q_data_dst, dst_buf, CODA9_GDI_INFO_BASE_Y);
+
+ coda_write(dev, 0, CODA9_REG_JPEG_DPB_BASE00);
+ coda_write(dev, 0, CODA9_GDI_CONTROL);
+ coda_write(dev, 1, CODA9_GDI_PIC_INIT_HOST);
+
+ trace_coda_jpeg_run(ctx, src_buf);
+
+ coda_write(dev, 1, CODA9_REG_JPEG_PIC_START);
+
+ return 0;
+}
+
+static void coda9_jpeg_finish_decode(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *dst_buf, *src_buf;
+ struct coda_q_data *q_data_dst;
+ u32 err_mb;
+
+ err_mb = coda_read(dev, CODA9_REG_JPEG_PIC_ERRMB);
+ if (err_mb)
+ v4l2_err(&dev->v4l2_dev, "ERRMB: 0x%x\n", err_mb);
+
+ coda_write(dev, 0, CODA9_REG_JPEG_BBC_FLUSH_CMD);
+
+ /*
+ * Lock to make sure that a decoder stop command running in parallel
+ * will either already have marked src_buf as last, or it will wake up
+ * the capture queue after the buffers are returned.
+ */
+ mutex_lock(&ctx->wakeup_mutex);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf->sequence = ctx->osequence++;
+
+ trace_coda_jpeg_done(ctx, dst_buf);
+
+ dst_buf->flags &= ~(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_LAST);
+ dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_LAST;
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, q_data_dst->sizeimage);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ coda_m2m_buf_done(ctx, dst_buf, err_mb ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
+
+ mutex_unlock(&ctx->wakeup_mutex);
+
+ coda_dbg(1, ctx, "job finished: decoded frame (%u)%s\n",
+ dst_buf->sequence,
+ (dst_buf->flags & V4L2_BUF_FLAG_LAST) ? " (last)" : "");
+
+ /*
+ * Reset JPEG processing unit after each decode run to work
+ * around hangups when switching context between encoder and
+ * decoder.
+ */
+ coda_hw_reset(ctx);
+}
+
+const struct coda_context_ops coda9_jpeg_decode_ops = {
+ .queue_init = coda_encoder_queue_init, /* non-bitstream operation */
+ .start_streaming = coda9_jpeg_start_decoding,
+ .prepare_run = coda9_jpeg_prepare_decode,
+ .finish_run = coda9_jpeg_finish_decode,
+ .release = coda9_jpeg_release,
+};
+
irqreturn_t coda9_jpeg_irq_handler(int irq, void *data)
{
struct coda_dev *dev = data;
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 43bda175f517..b81f3aca9209 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -69,7 +69,7 @@ struct coda_aux_buf {
struct coda_dev {
struct v4l2_device v4l2_dev;
- struct video_device vfd[5];
+ struct video_device vfd[6];
struct device *dev;
const struct coda_devtype *devtype;
int firmware;
@@ -123,10 +123,15 @@ struct coda_params {
u8 mpeg4_inter_qp;
u8 gop_size;
int intra_refresh;
+ enum v4l2_jpeg_chroma_subsampling jpeg_chroma_subsampling;
u8 jpeg_quality;
u8 jpeg_restart_interval;
u8 *jpeg_qmat_tab[3];
+ int jpeg_qmat_index[3];
+ int jpeg_huff_dc_index[3];
+ int jpeg_huff_ac_index[3];
u32 *jpeg_huff_data;
+ struct coda_huff_tab *jpeg_huff_tab;
int codec_mode;
int codec_mode_aux;
enum v4l2_mpeg_video_multi_slice_mode slice_mode;
@@ -143,6 +148,8 @@ struct coda_params {
bool h264_intra_qp_changed;
bool intra_refresh_changed;
bool slice_mode_changed;
+ bool frame_rc_enable;
+ bool mb_rc_enable;
};
struct coda_buffer_meta {
@@ -238,6 +245,7 @@ struct coda_ctx {
struct v4l2_fh fh;
int gopcounter;
int runcounter;
+ int jpeg_ecs_offset;
char vpu_header[3][64];
int vpu_header_size[3];
struct kfifo bitstream_fifo;
@@ -362,12 +370,14 @@ void coda_update_profile_level_ctrls(struct coda_ctx *ctx, u8 profile_idc,
u8 level_idc);
bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
+int coda_jpeg_decode_header(struct coda_ctx *ctx, struct vb2_buffer *vb);
int coda_jpeg_write_tables(struct coda_ctx *ctx);
void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
extern const struct coda_context_ops coda_bit_encode_ops;
extern const struct coda_context_ops coda_bit_decode_ops;
extern const struct coda_context_ops coda9_jpeg_encode_ops;
+extern const struct coda_context_ops coda9_jpeg_decode_ops;
irqreturn_t coda_irq_handler(int irq, void *data);
irqreturn_t coda9_jpeg_irq_handler(int irq, void *data);
diff --git a/drivers/media/platform/cros-ec-cec/Makefile b/drivers/media/platform/cros-ec-cec/Makefile
deleted file mode 100644
index 2615cdc6e227..000000000000
--- a/drivers/media/platform/cros-ec-cec/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_CROS_EC_CEC) += cros-ec-cec.o
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index ead14c49d4f5..7d55fd45240e 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1225,7 +1225,6 @@ static int vpif_probe_complete(void)
probe_out:
for (k = 0; k < j; k++) {
ch = vpif_obj.dev[k];
- common = &ch->common[k];
video_unregister_device(&ch->video_dev);
}
return err;
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index be4effcbfe7b..136d3b2a0fbb 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -2,9 +2,10 @@
config VIDEO_SAMSUNG_EXYNOS4_IS
tristate "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && OF && COMMON_CLK
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
- depends on OF && COMMON_CLK
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
help
Say Y here to enable camera host interface devices for
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index 37fdcc53a1c4..9a09a10a3631 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -556,7 +556,7 @@ static int cafe_pci_probe(struct pci_dev *pdev,
clkdev_create(mcam->mclk, "xclk", "%d-%04x",
i2c_adapter_id(cam->i2c_adapter), ov7670_info.addr);
- if (i2c_new_device(cam->i2c_adapter, &ov7670_info)) {
+ if (!IS_ERR(i2c_new_client_device(cam->i2c_adapter, &ov7670_info))) {
cam->registered = 1;
return 0;
}
diff --git a/drivers/media/platform/meson/Makefile b/drivers/media/platform/meson/Makefile
deleted file mode 100644
index 6bf728addbf8..000000000000
--- a/drivers/media/platform/meson/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_MESON_AO_CEC) += ao-cec.o
-obj-$(CONFIG_VIDEO_MESON_G12A_AO_CEC) += ao-cec-g12a.o
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
index 14991685adb7..58abfbdfb82d 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
@@ -15,10 +15,10 @@
static const char * const mtk_mdp_comp_stem[MTK_MDP_COMP_TYPE_MAX] = {
- "mdp_rdma",
- "mdp_rsz",
- "mdp_wdma",
- "mdp_wrot",
+ "mdp-rdma",
+ "mdp-rsz",
+ "mdp-wdma",
+ "mdp-wrot",
};
struct mtk_mdp_comp_match {
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 70c85a2a10f5..3c5fe737d36f 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -1016,7 +1016,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
* - a videobuffer is queued on the pcdev->capture list
*
* Please check the "DMA hot chaining timeslice issue" in
- * Documentation/media/v4l-drivers/pxa_camera.rst
+ * Documentation/driver-api/media/drivers/pxa_camera.rst
*
* Context: should only be called within the dma irq handler
*/
@@ -1438,7 +1438,7 @@ static void pxac_vb2_queue(struct vb2_buffer *vb)
/*
* Please check the DMA prepared buffer structure in :
- * Documentation/media/v4l-drivers/pxa_camera.rst
+ * Documentation/driver-api/media/drivers/pxa_camera.rst
* Please check also in pxa_camera_check_link_miss() to understand why DMA chain
* modification while DMA chain is running will work anyway.
*/
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 194b10b98767..203c6538044f 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -210,6 +210,8 @@ static int venus_probe(struct platform_device *pdev)
if (!core->res)
return -ENODEV;
+ mutex_init(&core->pm_lock);
+
core->pm_ops = venus_pm_get(core->res->hfi_version);
if (!core->pm_ops)
return -ENODEV;
@@ -242,10 +244,6 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = icc_set_bw(core->cpucfg_path, 0, kbps_to_icc(1000));
- if (ret)
- return ret;
-
ret = hfi_create(core, &venus_core_ops);
if (ret)
return ret;
@@ -320,7 +318,6 @@ static int venus_remove(struct platform_device *pdev)
ret = hfi_core_deinit(core, true);
WARN_ON(ret);
- hfi_destroy(core);
venus_shutdown(core);
of_platform_depopulate(dev);
@@ -332,10 +329,14 @@ static int venus_remove(struct platform_device *pdev)
if (pm_ops->core_put)
pm_ops->core_put(dev);
+ hfi_destroy(core);
+
icc_put(core->video_path);
icc_put(core->cpucfg_path);
v4l2_device_unregister(&core->v4l2_dev);
+ mutex_destroy(&core->pm_lock);
+ mutex_destroy(&core->lock);
return ret;
}
@@ -350,6 +351,10 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
if (ret)
return ret;
+ ret = icc_set_bw(core->cpucfg_path, 0, 0);
+ if (ret)
+ return ret;
+
if (pm_ops->core_power)
ret = pm_ops->core_power(dev, POWER_OFF);
@@ -368,6 +373,10 @@ static __maybe_unused int venus_runtime_resume(struct device *dev)
return ret;
}
+ ret = icc_set_bw(core->cpucfg_path, 0, kbps_to_icc(1000));
+ if (ret)
+ return ret;
+
return hfi_core_resume(core, false);
}
@@ -447,7 +456,7 @@ static const struct freq_tbl sdm845_freq_table[] = {
{ 244800, 100000000 }, /* 1920x1080@30 */
};
-static struct codec_freq_data sdm845_codec_freq_data[] = {
+static const struct codec_freq_data sdm845_codec_freq_data[] = {
{ V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10 },
{ V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10 },
{ V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10 },
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index bd3ac6a4501f..7118612673c9 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -128,6 +128,7 @@ struct venus_caps {
* @error: an error returned during last HFI sync operations
* @sys_error: an error flag that signal system error event
* @core_ops: the core operations
+ * @pm_lock: a lock for PM operations
* @enc_codecs: encoders supported by this core
* @dec_codecs: decoders supported by this core
* @max_sessions_supported: holds the maximum number of sessions
@@ -168,6 +169,7 @@ struct venus_core {
bool sys_error;
const struct hfi_core_ops *core_ops;
const struct venus_pm_ops *pm_ops;
+ struct mutex pm_lock;
unsigned long enc_codecs;
unsigned long dec_codecs;
unsigned int max_sessions_supported;
@@ -259,7 +261,8 @@ enum venus_dec_state {
VENUS_DEC_STATE_SEEK = 4,
VENUS_DEC_STATE_DRAIN = 5,
VENUS_DEC_STATE_DECODING = 6,
- VENUS_DEC_STATE_DRC = 7
+ VENUS_DEC_STATE_DRC = 7,
+ VENUS_DEC_STATE_DRC_FLUSH_DONE = 8,
};
struct venus_ts_metadata {
@@ -324,6 +327,7 @@ struct venus_ts_metadata {
* @priv: a private for HFI operations callbacks
* @session_type: the type of the session (decoder or encoder)
* @hprop: a union used as a holder by get property
+ * @last_buf: last capture buffer for dynamic-resoluton-change
*/
struct venus_inst {
struct list_head list;
@@ -385,6 +389,7 @@ struct venus_inst {
union hfi_get_property hprop;
unsigned int core_acquired: 1;
unsigned int bit_depth;
+ struct vb2_buffer *last_buf;
};
#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index bcc603804041..0143af7822b2 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -1129,15 +1129,18 @@ unlock:
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
-void venus_helper_buffers_done(struct venus_inst *inst,
+void venus_helper_buffers_done(struct venus_inst *inst, unsigned int type,
enum vb2_buffer_state state)
{
struct vb2_v4l2_buffer *buf;
- while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
- v4l2_m2m_buf_done(buf, state);
- while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
- v4l2_m2m_buf_done(buf, state);
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+ } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+ }
}
EXPORT_SYMBOL_GPL(venus_helper_buffers_done);
@@ -1168,7 +1171,10 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
INIT_LIST_HEAD(&inst->registeredbufs);
}
- venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
+ venus_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ VB2_BUF_STATE_ERROR);
+ venus_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ VB2_BUF_STATE_ERROR);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->streamon_out = 0;
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index b64875564064..8fbbda12a4fe 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -14,7 +14,7 @@ struct venus_core;
bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt);
struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst,
unsigned int type, u32 idx);
-void venus_helper_buffers_done(struct venus_inst *inst,
+void venus_helper_buffers_done(struct venus_inst *inst, unsigned int type,
enum vb2_buffer_state state);
int venus_helper_vb2_buf_init(struct vb2_buffer *vb);
int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb);
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index 3d8b1284d1f3..a211eb93e0f9 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -382,7 +382,7 @@ int hfi_session_unload_res(struct venus_inst *inst)
}
EXPORT_SYMBOL_GPL(hfi_session_unload_res);
-int hfi_session_flush(struct venus_inst *inst, u32 type)
+int hfi_session_flush(struct venus_inst *inst, u32 type, bool block)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
@@ -393,9 +393,11 @@ int hfi_session_flush(struct venus_inst *inst, u32 type)
if (ret)
return ret;
- ret = wait_session_msg(inst);
- if (ret)
- return ret;
+ if (block) {
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h
index 855822c9f39b..62c315291484 100644
--- a/drivers/media/platform/qcom/venus/hfi.h
+++ b/drivers/media/platform/qcom/venus/hfi.h
@@ -102,6 +102,7 @@ struct hfi_inst_ops {
u32 hfi_flags, u64 timestamp_us);
void (*event_notify)(struct venus_inst *inst, u32 event,
struct hfi_event_data *data);
+ void (*flush_done)(struct venus_inst *inst);
};
struct hfi_ops {
@@ -161,7 +162,7 @@ int hfi_session_continue(struct venus_inst *inst);
int hfi_session_abort(struct venus_inst *inst);
int hfi_session_load_res(struct venus_inst *inst);
int hfi_session_unload_res(struct venus_inst *inst);
-int hfi_session_flush(struct venus_inst *inst, u32 type);
+int hfi_session_flush(struct venus_inst *inst, u32 type, bool block);
int hfi_session_set_buffers(struct venus_inst *inst,
struct hfi_buffer_desc *bd);
int hfi_session_unset_buffers(struct venus_inst *inst,
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h
index cae9d5d61c0c..83705e237f1c 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.h
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.h
@@ -107,7 +107,7 @@ struct hfi_session_abort_pkt {
struct hfi_session_set_property_pkt {
struct hfi_session_hdr_pkt shdr;
u32 num_properties;
- u32 data[0];
+ u32 data[];
};
struct hfi_session_set_buffers_pkt {
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 04ef2286efc6..279a9d6fe737 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -439,6 +439,8 @@ static void hfi_session_flush_done(struct venus_core *core,
inst->error = pkt->error_type;
complete(&inst->done);
+ if (inst->ops->flush_done)
+ inst->ops->flush_done(inst);
}
static void hfi_session_etb_done(struct venus_core *core,
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.h b/drivers/media/platform/qcom/venus/hfi_msgs.h
index 7694b1d25d9d..526d9f5b487b 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.h
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.h
@@ -155,7 +155,7 @@ struct hfi_msg_session_empty_buffer_done_pkt {
u32 input_tag;
u32 packet_buffer;
u32 extradata_buffer;
- u32 data[0];
+ u32 data[];
};
struct hfi_msg_session_fbd_compressed_pkt {
@@ -175,7 +175,7 @@ struct hfi_msg_session_fbd_compressed_pkt {
u32 picture_type;
u32 packet_buffer;
u32 extradata_buffer;
- u32 data[0];
+ u32 data[];
};
struct hfi_msg_session_fbd_uncompressed_plane0_pkt {
@@ -202,7 +202,7 @@ struct hfi_msg_session_fbd_uncompressed_plane0_pkt {
u32 picture_type;
u32 packet_buffer;
u32 extradata_buffer;
- u32 data[0];
+ u32 data[];
};
struct hfi_msg_session_fbd_uncompressed_plane1_pkt {
@@ -211,7 +211,7 @@ struct hfi_msg_session_fbd_uncompressed_plane1_pkt {
u32 filled_len;
u32 offset;
u32 packet_buffer2;
- u32 data[0];
+ u32 data[];
};
struct hfi_msg_session_fbd_uncompressed_plane2_pkt {
@@ -220,7 +220,7 @@ struct hfi_msg_session_fbd_uncompressed_plane2_pkt {
u32 filled_len;
u32 offset;
u32 packet_buffer3;
- u32 data[0];
+ u32 data[];
};
struct hfi_msg_session_parse_sequence_header_done_pkt {
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 4ed2628585a1..7c4c483d5438 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -276,6 +276,14 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
const struct venus_format *fmt;
struct v4l2_format format;
u32 pixfmt_out = 0, pixfmt_cap = 0;
+ struct vb2_queue *q;
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
+ if (!q)
+ return -EINVAL;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
orig_pixmp = *pixmp;
@@ -545,6 +553,64 @@ static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
.vidioc_decoder_cmd = vdec_decoder_cmd,
};
+static int vdec_pm_get(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_dec;
+ int ret;
+
+ mutex_lock(&core->pm_lock);
+ ret = pm_runtime_get_sync(dev);
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int vdec_pm_put(struct venus_inst *inst, bool autosuspend)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_dec;
+ int ret;
+
+ mutex_lock(&core->pm_lock);
+
+ if (autosuspend)
+ ret = pm_runtime_put_autosuspend(dev);
+ else
+ ret = pm_runtime_put_sync(dev);
+
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int vdec_pm_get_put(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_dec;
+ int ret = 0;
+
+ mutex_lock(&core->pm_lock);
+
+ if (pm_runtime_suspended(dev)) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto error;
+
+ ret = pm_runtime_put_autosuspend(dev);
+ }
+
+error:
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static void vdec_pm_touch(struct venus_inst *inst)
+{
+ pm_runtime_mark_last_busy(inst->core->dev_dec);
+}
+
static int vdec_set_properties(struct venus_inst *inst)
{
struct vdec_controls *ctr = &inst->controls.dec;
@@ -746,12 +812,20 @@ static int vdec_queue_setup(struct vb2_queue *q,
return 0;
}
- ret = vdec_session_init(inst);
+ ret = vdec_pm_get(inst);
if (ret)
return ret;
+ ret = vdec_session_init(inst);
+ if (ret)
+ goto put_power;
+
ret = vdec_num_buffers(inst, &in_num, &out_num);
if (ret)
+ goto put_power;
+
+ ret = vdec_pm_put(inst, false);
+ if (ret)
return ret;
switch (q->type) {
@@ -786,6 +860,10 @@ static int vdec_queue_setup(struct vb2_queue *q,
}
return ret;
+
+put_power:
+ vdec_pm_put(inst, false);
+ return ret;
}
static int vdec_verify_conf(struct venus_inst *inst)
@@ -836,7 +914,7 @@ static int vdec_start_capture(struct venus_inst *inst)
return 0;
reconfigure:
- ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT);
+ ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, true);
if (ret)
return ret;
@@ -947,14 +1025,23 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
mutex_lock(&inst->lock);
- ret = venus_pm_acquire_core(inst);
- if (ret)
- goto error;
-
- if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
ret = vdec_start_capture(inst);
- else
+ } else {
+ ret = vdec_pm_get(inst);
+ if (ret)
+ goto error;
+
+ ret = venus_pm_acquire_core(inst);
+ if (ret)
+ goto put_power;
+
+ ret = vdec_pm_put(inst, true);
+ if (ret)
+ goto error;
+
ret = vdec_start_output(inst);
+ }
if (ret)
goto error;
@@ -962,8 +1049,10 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
mutex_unlock(&inst->lock);
return 0;
+put_power:
+ vdec_pm_put(inst, false);
error:
- venus_helper_buffers_done(inst, VB2_BUF_STATE_QUEUED);
+ venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
mutex_unlock(&inst->lock);
return ret;
}
@@ -982,23 +1071,25 @@ static int vdec_stop_capture(struct venus_inst *inst)
switch (inst->codec_state) {
case VENUS_DEC_STATE_DECODING:
- ret = hfi_session_flush(inst, HFI_FLUSH_ALL);
+ ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
/* fallthrough */
case VENUS_DEC_STATE_DRAIN:
vdec_cancel_dst_buffers(inst);
inst->codec_state = VENUS_DEC_STATE_STOPPED;
break;
case VENUS_DEC_STATE_DRC:
- ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT);
- vdec_cancel_dst_buffers(inst);
+ WARN_ON(1);
+ fallthrough;
+ case VENUS_DEC_STATE_DRC_FLUSH_DONE:
inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
- INIT_LIST_HEAD(&inst->registeredbufs);
venus_helper_free_dpb_bufs(inst);
break;
default:
- return 0;
+ break;
}
+ INIT_LIST_HEAD(&inst->registeredbufs);
+
return ret;
}
@@ -1010,12 +1101,12 @@ static int vdec_stop_output(struct venus_inst *inst)
case VENUS_DEC_STATE_DECODING:
case VENUS_DEC_STATE_DRAIN:
case VENUS_DEC_STATE_STOPPED:
- ret = hfi_session_flush(inst, HFI_FLUSH_ALL);
+ ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
inst->codec_state = VENUS_DEC_STATE_SEEK;
break;
case VENUS_DEC_STATE_INIT:
case VENUS_DEC_STATE_CAPTURE_SETUP:
- ret = hfi_session_flush(inst, HFI_FLUSH_INPUT);
+ ret = hfi_session_flush(inst, HFI_FLUSH_INPUT, true);
break;
default:
break;
@@ -1036,7 +1127,7 @@ static void vdec_stop_streaming(struct vb2_queue *q)
else
ret = vdec_stop_output(inst);
- venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
+ venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR);
if (ret)
goto unlock;
@@ -1055,8 +1146,9 @@ static void vdec_session_release(struct venus_inst *inst)
struct venus_core *core = inst->core;
int ret, abort = 0;
- mutex_lock(&inst->lock);
+ vdec_pm_get(inst);
+ mutex_lock(&inst->lock);
inst->codec_state = VENUS_DEC_STATE_DEINIT;
ret = hfi_session_stop(inst);
@@ -1078,10 +1170,11 @@ static void vdec_session_release(struct venus_inst *inst)
venus_helper_free_dpb_bufs(inst);
venus_pm_load_scale(inst);
- venus_pm_release_core(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
-
mutex_unlock(&inst->lock);
+
+ venus_pm_release_core(inst);
+ vdec_pm_put(inst, false);
}
static int vdec_buf_init(struct vb2_buffer *vb)
@@ -1102,6 +1195,15 @@ static void vdec_buf_cleanup(struct vb2_buffer *vb)
vdec_session_release(inst);
}
+static void vdec_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+
+ vdec_pm_get_put(inst);
+
+ venus_helper_vb2_buf_queue(vb);
+}
+
static const struct vb2_ops vdec_vb2_ops = {
.queue_setup = vdec_queue_setup,
.buf_init = vdec_buf_init,
@@ -1109,7 +1211,7 @@ static const struct vb2_ops vdec_vb2_ops = {
.buf_prepare = venus_helper_vb2_buf_prepare,
.start_streaming = vdec_start_streaming,
.stop_streaming = vdec_stop_streaming,
- .buf_queue = venus_helper_vb2_buf_queue,
+ .buf_queue = vdec_vb2_buf_queue,
};
static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
@@ -1121,6 +1223,8 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
struct vb2_buffer *vb;
unsigned int type;
+ vdec_pm_touch(inst);
+
if (buf_type == HFI_BUFFER_INPUT)
type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
else
@@ -1140,6 +1244,13 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
+ if (inst->last_buf == vb) {
+ inst->last_buf = NULL;
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ vb2_set_plane_payload(vb, 0, 0);
+ vb->timestamp = 0;
+ }
+
if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
@@ -1148,6 +1259,9 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
if (inst->codec_state == VENUS_DEC_STATE_DRAIN)
inst->codec_state = VENUS_DEC_STATE_STOPPED;
}
+
+ if (!bytesused)
+ state = VB2_BUF_STATE_ERROR;
} else {
vbuf->sequence = inst->sequence_out++;
}
@@ -1214,6 +1328,25 @@ static void vdec_event_change(struct venus_inst *inst,
}
}
+ /*
+ * The assumption is that the firmware have to return the last buffer
+ * before this event is received in the v4l2 driver. Also the firmware
+ * itself doesn't mark the last decoder output buffer with HFI EOS flag.
+ */
+
+ if (!sufficient && inst->codec_state == VENUS_DEC_STATE_DRC) {
+ struct vb2_v4l2_buffer *last;
+ int ret;
+
+ last = v4l2_m2m_last_dst_buf(inst->m2m_ctx);
+ if (last)
+ inst->last_buf = &last->vb2_buf;
+
+ ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, false);
+ if (ret)
+ dev_dbg(dev, "flush output error %d\n", ret);
+ }
+
inst->reconfig = true;
v4l2_event_queue_fh(&inst->fh, &ev);
wake_up(&inst->reconf_wait);
@@ -1227,6 +1360,8 @@ static void vdec_event_notify(struct venus_inst *inst, u32 event,
struct venus_core *core = inst->core;
struct device *dev = core->dev_dec;
+ vdec_pm_touch(inst);
+
switch (event) {
case EVT_SESSION_ERROR:
inst->session_error = true;
@@ -1252,9 +1387,16 @@ static void vdec_event_notify(struct venus_inst *inst, u32 event,
}
}
+static void vdec_flush_done(struct venus_inst *inst)
+{
+ if (inst->codec_state == VENUS_DEC_STATE_DRC)
+ inst->codec_state = VENUS_DEC_STATE_DRC_FLUSH_DONE;
+}
+
static const struct hfi_inst_ops vdec_hfi_ops = {
.buf_done = vdec_buf_done,
.event_notify = vdec_event_notify,
+ .flush_done = vdec_flush_done,
};
static void vdec_inst_init(struct venus_inst *inst)
@@ -1347,13 +1489,9 @@ static int vdec_open(struct file *file)
init_waitqueue_head(&inst->reconf_wait);
venus_helper_init_instance(inst);
- ret = pm_runtime_get_sync(core->dev_dec);
- if (ret < 0)
- goto err_free_inst;
-
ret = vdec_ctrl_init(inst);
if (ret)
- goto err_put_sync;
+ goto err_free;
ret = hfi_session_create(inst, &vdec_hfi_ops);
if (ret)
@@ -1392,9 +1530,7 @@ err_session_destroy:
hfi_session_destroy(inst);
err_ctrl_deinit:
vdec_ctrl_deinit(inst);
-err_put_sync:
- pm_runtime_put_sync(core->dev_dec);
-err_free_inst:
+err_free:
kfree(inst);
return ret;
}
@@ -1403,6 +1539,8 @@ static int vdec_close(struct file *file)
{
struct venus_inst *inst = to_inst(file);
+ vdec_pm_get(inst);
+
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
vdec_ctrl_deinit(inst);
@@ -1411,7 +1549,7 @@ static int vdec_close(struct file *file)
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
- pm_runtime_put_sync(inst->core->dev_dec);
+ vdec_pm_put(inst, false);
kfree(inst);
return 0;
@@ -1468,6 +1606,8 @@ static int vdec_probe(struct platform_device *pdev)
core->dev_dec = dev;
video_set_drvdata(vdev, core);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
+ pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 9981a2a27c90..feed648550d1 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -357,6 +357,14 @@ static int venc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
const struct venus_format *fmt;
struct v4l2_format format;
u32 pixfmt_out = 0, pixfmt_cap = 0;
+ struct vb2_queue *q;
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
+ if (!q)
+ return -EINVAL;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
orig_pixmp = *pixmp;
@@ -1018,7 +1026,7 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
deinit_sess:
hfi_session_deinit(inst);
bufs_done:
- venus_helper_buffers_done(inst, VB2_BUF_STATE_QUEUED);
+ venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->streamon_out = 0;
else
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index 43c78620c9d8..5c6b00737fe7 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -8,6 +8,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -21,6 +22,7 @@
struct rcar_fcp_device {
struct list_head list;
struct device *dev;
+ struct device_dma_parameters dma_parms;
};
static LIST_HEAD(fcp_devices);
@@ -136,6 +138,9 @@ static int rcar_fcp_probe(struct platform_device *pdev)
fcp->dev = &pdev->dev;
+ fcp->dev->dma_parms = &fcp->dma_parms;
+ dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32));
+
pm_runtime_enable(&pdev->dev);
mutex_lock(&fcp_lock);
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
index 240ac3f3c941..ca0d906dce2f 100644
--- a/drivers/media/platform/rcar-vin/Kconfig
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_RCAR_CSI2
tristate "R-Car MIPI CSI-2 Receiver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF
+ depends on VIDEO_V4L2 && OF
depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select RESET_CONTROLLER
select V4L2_FWNODE
help
@@ -14,8 +16,10 @@ config VIDEO_RCAR_CSI2
config VIDEO_RCAR_VIN
tristate "R-Car Video Input (VIN) Driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2 && OF
depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
help
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index faa9fb23a2e9..151e6a90c5fb 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -52,8 +52,8 @@ struct rcar_csi2;
/*
* Channel Data Type Select
- * VCDT[0-15]: Channel 1 VCDT[16-31]: Channel 2
- * VCDT2[0-15]: Channel 3 VCDT2[16-31]: Channel 4
+ * VCDT[0-15]: Channel 0 VCDT[16-31]: Channel 1
+ * VCDT2[0-15]: Channel 2 VCDT2[16-31]: Channel 3
*/
#define VCDT_REG 0x10
#define VCDT2_REG 0x14
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 5151a3cd8a6e..f421e2584875 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -343,6 +343,29 @@ static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
unsigned int i;
int matched;
+ /*
+ * If mbus_code is set only enumerate supported pixel formats for that
+ * bus code. Converting from YCbCr to RGB and RGB to YCbCr is possible
+ * with VIN, so all supported YCbCr and RGB media bus codes can produce
+ * all of the related pixel formats. If mbus_code is not set enumerate
+ * all possible pixelformats.
+ *
+ * TODO: Once raw capture formats are added to the driver this needs
+ * to be extended so raw media bus codes only result in raw pixel
+ * formats.
+ */
+ switch (f->mbus_code) {
+ case 0:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ break;
+ default:
+ return -EINVAL;
+ }
+
matched = -1;
for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) {
if (rvin_format_from_pixel(vin, rvin_formats[i].fourcc))
@@ -767,18 +790,6 @@ static int rvin_mc_s_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int rvin_mc_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- if (i->index != 0)
- return -EINVAL;
-
- i->type = V4L2_INPUT_TYPE_CAMERA;
- strscpy(i->name, "Camera", sizeof(i->name));
-
- return 0;
-}
-
static const struct v4l2_ioctl_ops rvin_mc_ioctl_ops = {
.vidioc_querycap = rvin_querycap,
.vidioc_try_fmt_vid_cap = rvin_mc_try_fmt_vid_cap,
@@ -786,10 +797,6 @@ static const struct v4l2_ioctl_ops rvin_mc_ioctl_ops = {
.vidioc_s_fmt_vid_cap = rvin_mc_s_fmt_vid_cap,
.vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap,
- .vidioc_enum_input = rvin_mc_enum_input,
- .vidioc_g_input = rvin_g_input,
- .vidioc_s_input = rvin_s_input,
-
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
@@ -961,6 +968,7 @@ int rvin_v4l2_register(struct rvin_dev *vin)
vin->format.colorspace = RVIN_DEFAULT_COLORSPACE;
if (vin->info->use_mc) {
+ vdev->device_caps |= V4L2_CAP_IO_MC;
vdev->ioctl_ops = &rvin_mc_ioctl_ops;
} else {
vdev->ioctl_ops = &rvin_ioctl_ops;
diff --git a/drivers/media/platform/seco-cec/Makefile b/drivers/media/platform/seco-cec/Makefile
deleted file mode 100644
index 79fde6947ff2..000000000000
--- a/drivers/media/platform/seco-cec/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_SECO_CEC) += seco-cec.o
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
deleted file mode 100644
index f08b8fc192d8..000000000000
--- a/drivers/media/platform/sh_veu.c
+++ /dev/null
@@ -1,1203 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * sh-mobile VEU mem2mem driver
- *
- * Copyright (C) 2012 Renesas Electronics Corporation
- * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
- * Copyright (C) 2008 Magnus Damm
- */
-
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/videodev2.h>
-
-#include <media/v4l2-dev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-mem2mem.h>
-#include <media/v4l2-image-sizes.h>
-#include <media/videobuf2-dma-contig.h>
-
-#define VEU_STR 0x00 /* start register */
-#define VEU_SWR 0x10 /* src: line length */
-#define VEU_SSR 0x14 /* src: image size */
-#define VEU_SAYR 0x18 /* src: y/rgb plane address */
-#define VEU_SACR 0x1c /* src: c plane address */
-#define VEU_BSSR 0x20 /* bundle mode register */
-#define VEU_EDWR 0x30 /* dst: line length */
-#define VEU_DAYR 0x34 /* dst: y/rgb plane address */
-#define VEU_DACR 0x38 /* dst: c plane address */
-#define VEU_TRCR 0x50 /* transform control */
-#define VEU_RFCR 0x54 /* resize scale */
-#define VEU_RFSR 0x58 /* resize clip */
-#define VEU_ENHR 0x5c /* enhance */
-#define VEU_FMCR 0x70 /* filter mode */
-#define VEU_VTCR 0x74 /* lowpass vertical */
-#define VEU_HTCR 0x78 /* lowpass horizontal */
-#define VEU_APCR 0x80 /* color match */
-#define VEU_ECCR 0x84 /* color replace */
-#define VEU_AFXR 0x90 /* fixed mode */
-#define VEU_SWPR 0x94 /* swap */
-#define VEU_EIER 0xa0 /* interrupt mask */
-#define VEU_EVTR 0xa4 /* interrupt event */
-#define VEU_STAR 0xb0 /* status */
-#define VEU_BSRR 0xb4 /* reset */
-
-#define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */
-#define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */
-#define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */
-#define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */
-#define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */
-#define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */
-#define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */
-#define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */
-#define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */
-#define VEU_COFFR 0x224 /* color conversion offset */
-#define VEU_CBR 0x228 /* color conversion clip */
-
-/*
- * 4092x4092 max size is the normal case. In some cases it can be reduced to
- * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188.
- */
-#define MAX_W 4092
-#define MAX_H 4092
-#define MIN_W 8
-#define MIN_H 8
-#define ALIGN_W 4
-
-/* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */
-#define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024)
-
-#define MEM2MEM_DEF_TRANSLEN 1
-
-struct sh_veu_dev;
-
-struct sh_veu_file {
- struct v4l2_fh fh;
- struct sh_veu_dev *veu_dev;
- bool cfg_needed;
-};
-
-struct sh_veu_format {
- u32 fourcc;
- unsigned int depth;
- unsigned int ydepth;
-};
-
-/* video data format */
-struct sh_veu_vfmt {
- /* Replace with v4l2_rect */
- struct v4l2_rect frame;
- unsigned int bytesperline;
- unsigned int offset_y;
- unsigned int offset_c;
- const struct sh_veu_format *fmt;
-};
-
-struct sh_veu_dev {
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct v4l2_m2m_dev *m2m_dev;
- struct device *dev;
- struct v4l2_m2m_ctx *m2m_ctx;
- struct sh_veu_vfmt vfmt_out;
- struct sh_veu_vfmt vfmt_in;
- /* Only single user per direction so far */
- struct sh_veu_file *capture;
- struct sh_veu_file *output;
- struct mutex fop_lock;
- void __iomem *base;
- spinlock_t lock;
- bool is_2h;
- unsigned int xaction;
- bool aborting;
-};
-
-enum sh_veu_fmt_idx {
- SH_VEU_FMT_NV12,
- SH_VEU_FMT_NV16,
- SH_VEU_FMT_NV24,
- SH_VEU_FMT_RGB332,
- SH_VEU_FMT_RGB444,
- SH_VEU_FMT_RGB565,
- SH_VEU_FMT_RGB666,
- SH_VEU_FMT_RGB24,
-};
-
-#define DEFAULT_IN_WIDTH VGA_WIDTH
-#define DEFAULT_IN_HEIGHT VGA_HEIGHT
-#define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12
-#define DEFAULT_OUT_WIDTH VGA_WIDTH
-#define DEFAULT_OUT_HEIGHT VGA_HEIGHT
-#define DEFAULT_OUT_FMTIDX SH_VEU_FMT_RGB565
-
-/*
- * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte
- * aligned for NV24.
- */
-static const struct sh_veu_format sh_veu_fmt[] = {
- [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .fourcc = V4L2_PIX_FMT_NV12 },
- [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .fourcc = V4L2_PIX_FMT_NV16 },
- [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .fourcc = V4L2_PIX_FMT_NV24 },
- [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .fourcc = V4L2_PIX_FMT_RGB332 },
- [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .fourcc = V4L2_PIX_FMT_RGB444 },
- [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .fourcc = V4L2_PIX_FMT_RGB565 },
- [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .fourcc = V4L2_PIX_FMT_BGR666 },
- [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .fourcc = V4L2_PIX_FMT_RGB24 },
-};
-
-#define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \
- .frame = { \
- .width = VGA_WIDTH, \
- .height = VGA_HEIGHT, \
- }, \
- .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3, \
- .fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX], \
-}
-
-#define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){ \
- .frame = { \
- .width = VGA_WIDTH, \
- .height = VGA_HEIGHT, \
- }, \
- .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3, \
- .fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX], \
-}
-
-/*
- * TODO: add support for further output formats:
- * SH_VEU_FMT_NV12,
- * SH_VEU_FMT_NV16,
- * SH_VEU_FMT_NV24,
- * SH_VEU_FMT_RGB332,
- * SH_VEU_FMT_RGB444,
- * SH_VEU_FMT_RGB666,
- * SH_VEU_FMT_RGB24,
- */
-
-static const int sh_veu_fmt_out[] = {
- SH_VEU_FMT_RGB565,
-};
-
-/*
- * TODO: add support for further input formats:
- * SH_VEU_FMT_NV16,
- * SH_VEU_FMT_NV24,
- * SH_VEU_FMT_RGB565,
- * SH_VEU_FMT_RGB666,
- * SH_VEU_FMT_RGB24,
- */
-static const int sh_veu_fmt_in[] = {
- SH_VEU_FMT_NV12,
-};
-
-static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
-{
- switch (fourcc) {
- default:
- BUG();
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV24:
- return V4L2_COLORSPACE_SMPTE170M;
- case V4L2_PIX_FMT_RGB332:
- case V4L2_PIX_FMT_RGB444:
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_BGR666:
- case V4L2_PIX_FMT_RGB24:
- return V4L2_COLORSPACE_SRGB;
- }
-}
-
-static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg)
-{
- return ioread32(veu->base + reg);
-}
-
-static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg,
- u32 value)
-{
- iowrite32(value, veu->base + reg);
-}
-
- /* ========== mem2mem callbacks ========== */
-
-static void sh_veu_job_abort(void *priv)
-{
- struct sh_veu_dev *veu = priv;
-
- /* Will cancel the transaction in the next interrupt handler */
- veu->aborting = true;
-}
-
-static void sh_veu_process(struct sh_veu_dev *veu,
- struct vb2_buffer *src_buf,
- struct vb2_buffer *dst_buf)
-{
- dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
-
- sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y);
- sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ?
- addr + veu->vfmt_out.offset_c : 0);
- dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__,
- (unsigned long)addr,
- veu->vfmt_out.offset_y, veu->vfmt_out.offset_c);
-
- addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y);
- sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ?
- addr + veu->vfmt_in.offset_c : 0);
- dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__,
- (unsigned long)addr,
- veu->vfmt_in.offset_y, veu->vfmt_in.offset_c);
-
- sh_veu_reg_write(veu, VEU_STR, 1);
-
- sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
-}
-
-/*
- * sh_veu_device_run() - prepares and starts the device
- *
- * This will be called by the framework when it decides to schedule a particular
- * instance.
- */
-static void sh_veu_device_run(void *priv)
-{
- struct sh_veu_dev *veu = priv;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
-
- src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
-
- if (src_buf && dst_buf)
- sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
-}
-
- /* ========== video ioctls ========== */
-
-static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
- enum v4l2_buf_type type)
-{
- return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- veu_file == veu->capture) ||
- (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- veu_file == veu->output);
-}
-
-static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
- struct vb2_queue *dst_vq);
-
-/*
- * It is not unusual to have video nodes open()ed multiple times. While some
- * V4L2 operations are non-intrusive, like querying formats and various
- * parameters, others, like setting formats, starting and stopping streaming,
- * queuing and dequeuing buffers, directly affect hardware configuration and /
- * or execution. This function verifies availability of the requested interface
- * and, if available, reserves it for the requesting user.
- */
-static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
- enum v4l2_buf_type type)
-{
- struct sh_veu_file **stream;
-
- switch (type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- stream = &veu->capture;
- break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- stream = &veu->output;
- break;
- default:
- return -EINVAL;
- }
-
- if (*stream == veu_file)
- return 0;
-
- if (*stream)
- return -EBUSY;
-
- *stream = veu_file;
-
- return 0;
-}
-
-static int sh_veu_context_init(struct sh_veu_dev *veu)
-{
- if (veu->m2m_ctx)
- return 0;
-
- veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
- sh_veu_queue_init);
-
- return PTR_ERR_OR_ZERO(veu->m2m_ctx);
-}
-
-static int sh_veu_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- strscpy(cap->driver, "sh-veu", sizeof(cap->driver));
- strscpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
- strscpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
- return 0;
-}
-
-static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
-{
- if (f->index >= fmt_num)
- return -EINVAL;
-
- f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
- return 0;
-}
-
-static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out));
-}
-
-static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in));
-}
-
-static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu,
- enum v4l2_buf_type type)
-{
- switch (type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return &veu->vfmt_out;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return &veu->vfmt_in;
- default:
- return NULL;
- }
-}
-
-static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
-{
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct sh_veu_dev *veu = veu_file->veu_dev;
- struct sh_veu_vfmt *vfmt;
-
- vfmt = sh_veu_get_vfmt(veu, f->type);
-
- pix->width = vfmt->frame.width;
- pix->height = vfmt->frame.height;
- pix->field = V4L2_FIELD_NONE;
- pix->pixelformat = vfmt->fmt->fourcc;
- pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
- pix->bytesperline = vfmt->bytesperline;
- pix->sizeimage = vfmt->bytesperline * pix->height *
- vfmt->fmt->depth / vfmt->fmt->ydepth;
- dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__,
- f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat);
-
- return 0;
-}
-
-static int sh_veu_g_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- return sh_veu_g_fmt(priv, f);
-}
-
-static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- return sh_veu_g_fmt(priv, f);
-}
-
-static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt)
-{
- struct v4l2_pix_format *pix = &f->fmt.pix;
- unsigned int y_bytes_used;
-
- /*
- * V4L2 specification suggests, that the driver should correct the
- * format struct if any of the dimensions is unsupported
- */
- switch (pix->field) {
- default:
- case V4L2_FIELD_ANY:
- pix->field = V4L2_FIELD_NONE;
- /* fall through: continue handling V4L2_FIELD_NONE */
- case V4L2_FIELD_NONE:
- break;
- }
-
- v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W,
- &pix->height, MIN_H, MAX_H, 0, 0);
-
- y_bytes_used = (pix->width * fmt->ydepth) >> 3;
-
- if (pix->bytesperline < y_bytes_used)
- pix->bytesperline = y_bytes_used;
- pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth;
-
- pix->pixelformat = fmt->fourcc;
- pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
-
- pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage);
-
- return 0;
-}
-
-static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f)
-{
- const int *fmt;
- int i, n, dflt;
-
- pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field);
-
- switch (f->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- fmt = sh_veu_fmt_out;
- n = ARRAY_SIZE(sh_veu_fmt_out);
- dflt = DEFAULT_OUT_FMTIDX;
- break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- default:
- fmt = sh_veu_fmt_in;
- n = ARRAY_SIZE(sh_veu_fmt_in);
- dflt = DEFAULT_IN_FMTIDX;
- break;
- }
-
- for (i = 0; i < n; i++)
- if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat)
- return &sh_veu_fmt[fmt[i]];
-
- return &sh_veu_fmt[dflt];
-}
-
-static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- const struct sh_veu_format *fmt;
-
- fmt = sh_veu_find_fmt(f);
-
- return sh_veu_try_fmt(f, fmt);
-}
-
-static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- const struct sh_veu_format *fmt;
-
- fmt = sh_veu_find_fmt(f);
-
- return sh_veu_try_fmt(f, fmt);
-}
-
-static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt)
-{
- /* dst_left and dst_top validity will be verified in CROP / COMPOSE */
- unsigned int left = vfmt->frame.left & ~0x03;
- unsigned int top = vfmt->frame.top;
- dma_addr_t offset = (dma_addr_t)top * veu->vfmt_out.bytesperline +
- (((dma_addr_t)left * veu->vfmt_out.fmt->depth) >> 3);
- unsigned int y_line;
-
- vfmt->offset_y = offset;
-
- switch (vfmt->fmt->fourcc) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV24:
- y_line = ALIGN(vfmt->frame.width, 16);
- vfmt->offset_c = offset + y_line * vfmt->frame.height;
- break;
- case V4L2_PIX_FMT_RGB332:
- case V4L2_PIX_FMT_RGB444:
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_BGR666:
- case V4L2_PIX_FMT_RGB24:
- vfmt->offset_c = 0;
- break;
- default:
- BUG();
- }
-}
-
-static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
-{
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct sh_veu_dev *veu = veu_file->veu_dev;
- struct sh_veu_vfmt *vfmt;
- struct vb2_queue *vq;
- int ret = sh_veu_context_init(veu);
- if (ret < 0)
- return ret;
-
- vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type);
- if (!vq)
- return -EINVAL;
-
- if (vb2_is_busy(vq)) {
- v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__);
- return -EBUSY;
- }
-
- vfmt = sh_veu_get_vfmt(veu, f->type);
- /* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */
-
- vfmt->fmt = sh_veu_find_fmt(f);
- /* vfmt->fmt != NULL following the same argument as above */
- vfmt->frame.width = pix->width;
- vfmt->frame.height = pix->height;
- vfmt->bytesperline = pix->bytesperline;
-
- sh_veu_colour_offset(veu, vfmt);
-
- /*
- * We could also verify and require configuration only if any parameters
- * actually have changed, but it is unlikely, that the user requests the
- * same configuration several times without closing the device.
- */
- veu_file->cfg_needed = true;
-
- dev_dbg(veu->dev,
- "Setting format for type %d, wxh: %dx%d, fmt: %x\n",
- f->type, pix->width, pix->height, vfmt->fmt->fourcc);
-
- return 0;
-}
-
-static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- int ret = sh_veu_try_fmt_vid_cap(file, priv, f);
- if (ret)
- return ret;
-
- return sh_veu_s_fmt(priv, f);
-}
-
-static int sh_veu_s_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- int ret = sh_veu_try_fmt_vid_out(file, priv, f);
- if (ret)
- return ret;
-
- return sh_veu_s_fmt(priv, f);
-}
-
-static int sh_veu_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct sh_veu_file *veu_file = priv;
- struct sh_veu_dev *veu = veu_file->veu_dev;
- int ret = sh_veu_context_init(veu);
- if (ret < 0)
- return ret;
-
- ret = sh_veu_stream_init(veu, veu_file, reqbufs->type);
- if (ret < 0)
- return ret;
-
- return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs);
-}
-
-static int sh_veu_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct sh_veu_file *veu_file = priv;
-
- if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
- return -EBUSY;
-
- return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf);
-}
-
-static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct sh_veu_file *veu_file = priv;
-
- dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
- if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
- return -EBUSY;
-
- return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf);
-}
-
-static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct sh_veu_file *veu_file = priv;
-
- dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
- if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
- return -EBUSY;
-
- return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf);
-}
-
-static void sh_veu_calc_scale(struct sh_veu_dev *veu,
- int size_in, int size_out, int crop_out,
- u32 *mant, u32 *frac, u32 *rep)
-{
- u32 fixpoint;
-
- /* calculate FRAC and MANT */
- *rep = *mant = *frac = 0;
-
- if (size_in == size_out) {
- if (crop_out != size_out)
- *mant = 1; /* needed for cropping */
- return;
- }
-
- /* VEU2H special upscale */
- if (veu->is_2h && size_out > size_in) {
- u32 fixpoint = (4096 * size_in) / size_out;
- *mant = fixpoint / 4096;
- *frac = (fixpoint - (*mant * 4096)) & ~0x07;
-
- switch (*frac) {
- case 0x800:
- *rep = 1;
- break;
- case 0x400:
- *rep = 3;
- break;
- case 0x200:
- *rep = 7;
- break;
- }
- if (*rep)
- return;
- }
-
- fixpoint = (4096 * (size_in - 1)) / (size_out + 1);
- *mant = fixpoint / 4096;
- *frac = fixpoint - (*mant * 4096);
-
- if (*frac & 0x07) {
- /*
- * FIXME: do we really have to round down twice in the
- * up-scaling case?
- */
- *frac &= ~0x07;
- if (size_out > size_in)
- *frac -= 8; /* round down if scaling up */
- else
- *frac += 8; /* round up if scaling down */
- }
-}
-
-static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu,
- int size_in, int size_out, int crop_out)
-{
- u32 mant, frac, value, rep;
-
- sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
-
- /* set scale */
- value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) |
- (((mant << 12) | frac) << 16);
-
- sh_veu_reg_write(veu, VEU_RFCR, value);
-
- /* set clip */
- value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) |
- (((rep << 12) | crop_out) << 16);
-
- sh_veu_reg_write(veu, VEU_RFSR, value);
-
- return ALIGN((size_in * crop_out) / size_out, 4);
-}
-
-static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu,
- int size_in, int size_out, int crop_out)
-{
- u32 mant, frac, value, rep;
-
- sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
-
- /* set scale */
- value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) |
- (mant << 12) | frac;
-
- sh_veu_reg_write(veu, VEU_RFCR, value);
-
- /* set clip */
- value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) |
- (rep << 12) | crop_out;
-
- sh_veu_reg_write(veu, VEU_RFSR, value);
-
- return ALIGN((size_in * crop_out) / size_out, 4);
-}
-
-static void sh_veu_configure(struct sh_veu_dev *veu)
-{
- u32 src_width, src_stride, src_height;
- u32 dst_width, dst_stride, dst_height;
- u32 real_w, real_h;
-
- /* reset VEU */
- sh_veu_reg_write(veu, VEU_BSRR, 0x100);
-
- src_width = veu->vfmt_in.frame.width;
- src_height = veu->vfmt_in.frame.height;
- src_stride = ALIGN(veu->vfmt_in.frame.width, 16);
-
- dst_width = real_w = veu->vfmt_out.frame.width;
- dst_height = real_h = veu->vfmt_out.frame.height;
- /* Datasheet is unclear - whether it's always number of bytes or not */
- dst_stride = veu->vfmt_out.bytesperline;
-
- /*
- * So far real_w == dst_width && real_h == dst_height, but it wasn't
- * necessarily the case in the original vidix driver, so, it may change
- * here in the future too.
- */
- src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width);
- src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height);
-
- sh_veu_reg_write(veu, VEU_SWR, src_stride);
- sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16));
- sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */
-
- sh_veu_reg_write(veu, VEU_EDWR, dst_stride);
- sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */
-
- sh_veu_reg_write(veu, VEU_SWPR, 0x67);
- sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4);
-
- if (veu->is_2h) {
- sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5);
- sh_veu_reg_write(veu, VEU_MCR01, 0x0950);
- sh_veu_reg_write(veu, VEU_MCR02, 0x0000);
-
- sh_veu_reg_write(veu, VEU_MCR10, 0x397f);
- sh_veu_reg_write(veu, VEU_MCR11, 0x0950);
- sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd);
-
- sh_veu_reg_write(veu, VEU_MCR20, 0x0000);
- sh_veu_reg_write(veu, VEU_MCR21, 0x0950);
- sh_veu_reg_write(veu, VEU_MCR22, 0x1023);
-
- sh_veu_reg_write(veu, VEU_COFFR, 0x00800010);
- }
-}
-
-static int sh_veu_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct sh_veu_file *veu_file = priv;
-
- if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
- return -EBUSY;
-
- if (veu_file->cfg_needed) {
- struct sh_veu_dev *veu = veu_file->veu_dev;
- veu_file->cfg_needed = false;
- sh_veu_configure(veu_file->veu_dev);
- veu->xaction = 0;
- veu->aborting = false;
- }
-
- return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type);
-}
-
-static int sh_veu_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct sh_veu_file *veu_file = priv;
-
- if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
- return -EBUSY;
-
- return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type);
-}
-
-static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
- .vidioc_querycap = sh_veu_querycap,
-
- .vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = sh_veu_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = sh_veu_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = sh_veu_s_fmt_vid_cap,
-
- .vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out,
- .vidioc_g_fmt_vid_out = sh_veu_g_fmt_vid_out,
- .vidioc_try_fmt_vid_out = sh_veu_try_fmt_vid_out,
- .vidioc_s_fmt_vid_out = sh_veu_s_fmt_vid_out,
-
- .vidioc_reqbufs = sh_veu_reqbufs,
- .vidioc_querybuf = sh_veu_querybuf,
-
- .vidioc_qbuf = sh_veu_qbuf,
- .vidioc_dqbuf = sh_veu_dqbuf,
-
- .vidioc_streamon = sh_veu_streamon,
- .vidioc_streamoff = sh_veu_streamoff,
-};
-
- /* ========== Queue operations ========== */
-
-static int sh_veu_queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], struct device *alloc_devs[])
-{
- struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
- struct sh_veu_vfmt *vfmt = sh_veu_get_vfmt(veu, vq->type);
- unsigned int count = *nbuffers;
- unsigned int size = vfmt->bytesperline * vfmt->frame.height *
- vfmt->fmt->depth / vfmt->fmt->ydepth;
-
- if (count < 2)
- *nbuffers = count = 2;
-
- if (size * count > VIDEO_MEM_LIMIT) {
- count = VIDEO_MEM_LIMIT / size;
- *nbuffers = count;
- }
-
- if (*nplanes)
- return sizes[0] < size ? -EINVAL : 0;
-
- *nplanes = 1;
- sizes[0] = size;
-
- dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
-
- return 0;
-}
-
-static int sh_veu_buf_prepare(struct vb2_buffer *vb)
-{
- struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
- struct sh_veu_vfmt *vfmt;
- unsigned int sizeimage;
-
- vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type);
- sizeimage = vfmt->bytesperline * vfmt->frame.height *
- vfmt->fmt->depth / vfmt->fmt->ydepth;
-
- if (vb2_plane_size(vb, 0) < sizeimage) {
- dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n",
- __func__, vb2_plane_size(vb, 0), sizeimage);
- return -EINVAL;
- }
-
- vb2_set_plane_payload(vb, 0, sizeimage);
-
- return 0;
-}
-
-static void sh_veu_buf_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
- dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->type);
- v4l2_m2m_buf_queue(veu->m2m_ctx, vbuf);
-}
-
-static const struct vb2_ops sh_veu_qops = {
- .queue_setup = sh_veu_queue_setup,
- .buf_prepare = sh_veu_buf_prepare,
- .buf_queue = sh_veu_buf_queue,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
-
-static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
- struct vb2_queue *dst_vq)
-{
- struct sh_veu_dev *veu = priv;
- int ret;
-
- memset(src_vq, 0, sizeof(*src_vq));
- src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- src_vq->drv_priv = veu;
- src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->ops = &sh_veu_qops;
- src_vq->mem_ops = &vb2_dma_contig_memops;
- src_vq->lock = &veu->fop_lock;
- src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- src_vq->dev = veu->v4l2_dev.dev;
-
- ret = vb2_queue_init(src_vq);
- if (ret < 0)
- return ret;
-
- memset(dst_vq, 0, sizeof(*dst_vq));
- dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- dst_vq->drv_priv = veu;
- dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->ops = &sh_veu_qops;
- dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->lock = &veu->fop_lock;
- dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- dst_vq->dev = veu->v4l2_dev.dev;
-
- return vb2_queue_init(dst_vq);
-}
-
- /* ========== File operations ========== */
-
-static int sh_veu_open(struct file *file)
-{
- struct sh_veu_dev *veu = video_drvdata(file);
- struct sh_veu_file *veu_file;
-
- veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL);
- if (!veu_file)
- return -ENOMEM;
-
- v4l2_fh_init(&veu_file->fh, video_devdata(file));
- veu_file->veu_dev = veu;
- veu_file->cfg_needed = true;
-
- file->private_data = veu_file;
-
- pm_runtime_get_sync(veu->dev);
- v4l2_fh_add(&veu_file->fh);
-
- dev_dbg(veu->dev, "Created instance %p\n", veu_file);
-
- return 0;
-}
-
-static int sh_veu_release(struct file *file)
-{
- struct sh_veu_dev *veu = video_drvdata(file);
- struct sh_veu_file *veu_file = file->private_data;
-
- dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
-
- if (veu_file == veu->capture) {
- veu->capture = NULL;
- vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
- }
-
- if (veu_file == veu->output) {
- veu->output = NULL;
- vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT));
- }
-
- if (!veu->output && !veu->capture && veu->m2m_ctx) {
- v4l2_m2m_ctx_release(veu->m2m_ctx);
- veu->m2m_ctx = NULL;
- }
-
- pm_runtime_put(veu->dev);
- v4l2_fh_del(&veu_file->fh);
- v4l2_fh_exit(&veu_file->fh);
-
- kfree(veu_file);
-
- return 0;
-}
-
-static __poll_t sh_veu_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct sh_veu_file *veu_file = file->private_data;
-
- return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait);
-}
-
-static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct sh_veu_file *veu_file = file->private_data;
-
- return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
-}
-
-static const struct v4l2_file_operations sh_veu_fops = {
- .owner = THIS_MODULE,
- .open = sh_veu_open,
- .release = sh_veu_release,
- .poll = sh_veu_poll,
- .unlocked_ioctl = video_ioctl2,
- .mmap = sh_veu_mmap,
-};
-
-static const struct video_device sh_veu_videodev = {
- .name = "sh-veu",
- .fops = &sh_veu_fops,
- .ioctl_ops = &sh_veu_ioctl_ops,
- .minor = -1,
- .release = video_device_release_empty,
- .vfl_dir = VFL_DIR_M2M,
- .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
-};
-
-static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
- .device_run = sh_veu_device_run,
- .job_abort = sh_veu_job_abort,
-};
-
-static irqreturn_t sh_veu_bh(int irq, void *dev_id)
-{
- struct sh_veu_dev *veu = dev_id;
-
- if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) {
- v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx);
- veu->xaction = 0;
- } else {
- sh_veu_device_run(veu);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t sh_veu_isr(int irq, void *dev_id)
-{
- struct sh_veu_dev *veu = dev_id;
- struct vb2_v4l2_buffer *dst;
- struct vb2_v4l2_buffer *src;
- u32 status = sh_veu_reg_read(veu, VEU_EVTR);
-
- /* bundle read mode not used */
- if (!(status & 1))
- return IRQ_NONE;
-
- /* disable interrupt in VEU */
- sh_veu_reg_write(veu, VEU_EIER, 0);
- /* halt operation */
- sh_veu_reg_write(veu, VEU_STR, 0);
- /* ack int, write 0 to clear bits */
- sh_veu_reg_write(veu, VEU_EVTR, status & ~1);
-
- /* conversion completed */
- dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx);
- src = v4l2_m2m_src_buf_remove(veu->m2m_ctx);
- if (!src || !dst)
- return IRQ_NONE;
-
- dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
- dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst->flags |=
- src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst->timecode = src->timecode;
-
- spin_lock(&veu->lock);
- v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
- v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
- spin_unlock(&veu->lock);
-
- veu->xaction++;
-
- return IRQ_WAKE_THREAD;
-}
-
-static int sh_veu_probe(struct platform_device *pdev)
-{
- struct sh_veu_dev *veu;
- struct resource *reg_res;
- struct video_device *vdev;
- int irq, ret;
-
- reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
-
- if (!reg_res || irq <= 0) {
- dev_err(&pdev->dev, "Insufficient VEU platform information.\n");
- return -ENODEV;
- }
-
- veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL);
- if (!veu)
- return -ENOMEM;
-
- veu->is_2h = resource_size(reg_res) == 0x22c;
-
- veu->base = devm_ioremap_resource(&pdev->dev, reg_res);
- if (IS_ERR(veu->base))
- return PTR_ERR(veu->base);
-
- ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh,
- 0, "veu", veu);
- if (ret < 0)
- return ret;
-
- ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "Error registering v4l2 device\n");
- return ret;
- }
-
- vdev = &veu->vdev;
-
- *vdev = sh_veu_videodev;
- vdev->v4l2_dev = &veu->v4l2_dev;
- spin_lock_init(&veu->lock);
- mutex_init(&veu->fop_lock);
- vdev->lock = &veu->fop_lock;
-
- video_set_drvdata(vdev, veu);
-
- veu->dev = &pdev->dev;
- veu->vfmt_out = DEFAULT_OUT_VFMT;
- veu->vfmt_in = DEFAULT_IN_VFMT;
-
- veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops);
- if (IS_ERR(veu->m2m_dev)) {
- ret = PTR_ERR(veu->m2m_dev);
- v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret);
- goto em2minit;
- }
-
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
-
- ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
- pm_runtime_suspend(&pdev->dev);
- if (ret < 0)
- goto evidreg;
-
- return ret;
-
-evidreg:
- pm_runtime_disable(&pdev->dev);
- v4l2_m2m_release(veu->m2m_dev);
-em2minit:
- v4l2_device_unregister(&veu->v4l2_dev);
- return ret;
-}
-
-static int sh_veu_remove(struct platform_device *pdev)
-{
- struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
- struct sh_veu_dev *veu = container_of(v4l2_dev,
- struct sh_veu_dev, v4l2_dev);
-
- video_unregister_device(&veu->vdev);
- pm_runtime_disable(&pdev->dev);
- v4l2_m2m_release(veu->m2m_dev);
- v4l2_device_unregister(&veu->v4l2_dev);
-
- return 0;
-}
-
-static struct platform_driver __refdata sh_veu_pdrv = {
- .remove = sh_veu_remove,
- .driver = {
- .name = "sh_veu",
- },
-};
-
-module_platform_driver_probe(sh_veu_pdrv, sh_veu_probe);
-
-MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver");
-MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/sti/cec/Makefile b/drivers/media/platform/sti/cec/Makefile
deleted file mode 100644
index d0c6b4ae94d6..000000000000
--- a/drivers/media/platform/sti/cec/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += stih-cec.o
diff --git a/drivers/media/platform/stm32/Makefile b/drivers/media/platform/stm32/Makefile
index 5ed73599ca44..48b36db2c2e2 100644
--- a/drivers/media/platform/stm32/Makefile
+++ b/drivers/media/platform/stm32/Makefile
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32-dcmi.o
-obj-$(CONFIG_VIDEO_STM32_HDMI_CEC) += stm32-cec.o
diff --git a/drivers/media/platform/sunxi/Kconfig b/drivers/media/platform/sunxi/Kconfig
index 71808e93ac2e..7151cc249afa 100644
--- a/drivers/media/platform/sunxi/Kconfig
+++ b/drivers/media/platform/sunxi/Kconfig
@@ -1,2 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
source "drivers/media/platform/sunxi/sun4i-csi/Kconfig"
source "drivers/media/platform/sunxi/sun6i-csi/Kconfig"
diff --git a/drivers/media/platform/sunxi/Makefile b/drivers/media/platform/sunxi/Makefile
index ff0993f70dc3..fc537c9f5ca9 100644
--- a/drivers/media/platform/sunxi/Makefile
+++ b/drivers/media/platform/sunxi/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
obj-y += sun4i-csi/
obj-y += sun6i-csi/
obj-y += sun8i-di/
diff --git a/drivers/media/platform/sunxi/sun4i-csi/Kconfig b/drivers/media/platform/sunxi/sun4i-csi/Kconfig
index e86e29b6a603..903c6152f6e8 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/Kconfig
+++ b/drivers/media/platform/sunxi/sun4i-csi/Kconfig
@@ -1,7 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
config VIDEO_SUN4I_CSI
tristate "Allwinner A10 CMOS Sensor Interface Support"
- depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API && HAS_DMA
+ depends on VIDEO_V4L2 && COMMON_CLK && HAS_DMA
depends on ARCH_SUNXI || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
help
diff --git a/drivers/media/platform/sunxi/sun4i-csi/Makefile b/drivers/media/platform/sunxi/sun4i-csi/Makefile
index 7c790a57f5ee..5062b006d63e 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/Makefile
+++ b/drivers/media/platform/sunxi/sun4i-csi/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
sun4i-csi-y += sun4i_csi.o
sun4i-csi-y += sun4i_dma.o
sun4i-csi-y += sun4i_v4l2.o
diff --git a/drivers/media/platform/sunxi/sun6i-csi/Kconfig b/drivers/media/platform/sunxi/sun6i-csi/Kconfig
index 269b3ebf4f52..586e3fb3a80d 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/Kconfig
+++ b/drivers/media/platform/sunxi/sun6i-csi/Kconfig
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_SUN6I_CSI
tristate "Allwinner V3s Camera Sensor Interface driver"
- depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API && HAS_DMA
+ depends on VIDEO_V4L2 && COMMON_CLK && HAS_DMA
depends on ARCH_SUNXI || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select REGMAP_MMIO
select V4L2_FWNODE
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index d78f6593ddd1..ba5d07886607 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -941,7 +941,7 @@ static int deinterlace_runtime_resume(struct device *device)
if (ret) {
dev_err(dev->dev, "Failed to enable bus clock\n");
- goto err_exlusive_rate;
+ goto err_exclusive_rate;
}
ret = clk_prepare_enable(dev->mod_clk);
@@ -969,14 +969,14 @@ static int deinterlace_runtime_resume(struct device *device)
return 0;
-err_exlusive_rate:
- clk_rate_exclusive_put(dev->mod_clk);
err_ram_clk:
clk_disable_unprepare(dev->ram_clk);
err_mod_clk:
clk_disable_unprepare(dev->mod_clk);
err_bus_clk:
clk_disable_unprepare(dev->bus_clk);
+err_exclusive_rate:
+ clk_rate_exclusive_put(dev->mod_clk);
return ret;
}
diff --git a/drivers/media/platform/tegra-cec/Makefile b/drivers/media/platform/tegra-cec/Makefile
deleted file mode 100644
index 97e57c7493c0..000000000000
--- a/drivers/media/platform/tegra-cec/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra_cec.o
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 6c8f3702eac0..9b18db7af6c3 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -6,6 +6,7 @@
* Benoit Parrot, <bparrot@ti.com>
*/
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioctl.h>
@@ -340,6 +341,7 @@ static const struct cal_data am654_cal_data = {
* all instances.
*/
struct cal_dev {
+ struct clk *fclk;
int irq;
void __iomem *base;
struct resource *res;
@@ -412,6 +414,8 @@ struct cal_ctx {
struct cal_buffer *cur_frm;
/* Pointer pointing to next v4l2_buffer */
struct cal_buffer *next_frm;
+
+ bool dma_act;
};
static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
@@ -643,36 +647,12 @@ static void i913_errata(struct cal_dev *dev, unsigned int port)
{
u32 reg10 = reg_read(dev->cc[port], CAL_CSI2_PHY_REG10);
- set_field(&reg10, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
- CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK);
+ set_field(&reg10, 1, CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK);
cal_dbg(1, dev, "CSI2_%d_REG10 = 0x%08x\n", port, reg10);
reg_write(dev->cc[port], CAL_CSI2_PHY_REG10, reg10);
}
-static int cal_runtime_get(struct cal_dev *dev)
-{
- int r;
-
- r = pm_runtime_get_sync(&dev->pdev->dev);
-
- if (dev->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) {
- /*
- * Apply errata on both port eveytime we (re-)enable
- * the clock
- */
- i913_errata(dev, 0);
- i913_errata(dev, 1);
- }
-
- return r;
-}
-
-static inline void cal_runtime_put(struct cal_dev *dev)
-{
- pm_runtime_put_sync(&dev->pdev->dev);
-}
-
static void cal_quickdump_regs(struct cal_dev *dev)
{
cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
@@ -704,16 +684,31 @@ static void cal_quickdump_regs(struct cal_dev *dev)
*/
static void enable_irqs(struct cal_ctx *ctx)
{
+ u32 val;
+
+ const u32 cio_err_mask =
+ CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK |
+ CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK |
+ CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK |
+ CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK;
+
+ /* Enable CIO error irqs */
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_SET(1),
+ CAL_HL_IRQ_CIO_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_IRQENABLE(ctx->csi2_port),
+ cio_err_mask);
+
+ /* Always enable OCPO error */
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_SET(1), CAL_HL_IRQ_OCPO_ERR_MASK);
+
/* Enable IRQ_WDMA_END 0/1 */
- reg_write_field(ctx->dev,
- CAL_HL_IRQENABLE_SET(2),
- CAL_HL_IRQ_ENABLE,
- CAL_HL_IRQ_MASK(ctx->csi2_port));
+ val = 0;
+ set_field(&val, 1, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_SET(2), val);
/* Enable IRQ_WDMA_START 0/1 */
- reg_write_field(ctx->dev,
- CAL_HL_IRQENABLE_SET(3),
- CAL_HL_IRQ_ENABLE,
- CAL_HL_IRQ_MASK(ctx->csi2_port));
+ val = 0;
+ set_field(&val, 1, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_SET(3), val);
/* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
}
@@ -722,24 +717,59 @@ static void disable_irqs(struct cal_ctx *ctx)
{
u32 val;
+ /* Disable CIO error irqs */
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(1),
+ CAL_HL_IRQ_CIO_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_IRQENABLE(ctx->csi2_port),
+ 0);
+
/* Disable IRQ_WDMA_END 0/1 */
val = 0;
- set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ set_field(&val, 1, CAL_HL_IRQ_MASK(ctx->csi2_port));
reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val);
/* Disable IRQ_WDMA_START 0/1 */
val = 0;
- set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ set_field(&val, 1, CAL_HL_IRQ_MASK(ctx->csi2_port));
reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val);
/* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
}
+static void csi2_cio_power(struct cal_ctx *ctx, bool enable)
+{
+ u32 target_state;
+ unsigned int i;
+
+ target_state = enable ? CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON :
+ CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF;
+
+ reg_write_field(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ target_state, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
+
+ for (i = 0; i < 10; i++) {
+ u32 current_state;
+
+ current_state = reg_read_field(ctx->dev,
+ CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK);
+
+ if (current_state == target_state)
+ break;
+
+ usleep_range(1000, 1100);
+ }
+
+ if (i == 10)
+ ctx_err(ctx, "Failed to power %s complexio\n",
+ enable ? "up" : "down");
+}
+
static void csi2_phy_config(struct cal_ctx *ctx);
static void csi2_phy_init(struct cal_ctx *ctx)
{
- int i;
u32 val;
+ u32 sscounter;
/* Steps
* 1. Configure D-PHY mode and enable required lanes
@@ -762,66 +792,90 @@ static void csi2_phy_init(struct cal_ctx *ctx)
camerarx_phy_enable(ctx);
/* 2. Reset complex IO - Do not wait for reset completion */
- val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
- set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
- CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
- reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+ reg_write_field(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
+ CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x De-assert Complex IO Reset\n",
ctx->csi2_port,
reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
- /* Dummy read to allow SCP to complete */
- val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+ /* Dummy read to allow SCP reset to complete */
+ reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
/* 3.A. Program Phy Timing Parameters */
csi2_phy_config(ctx);
/* 3.B. Program Stop States */
+ /*
+ * The stop-state-counter is based on fclk cycles, and we always use
+ * the x16 and x4 settings, so stop-state-timeout =
+ * fclk-cycle * 16 * 4 * counter.
+ *
+ * Stop-state-timeout must be more than 100us as per CSI2 spec, so we
+ * calculate a timeout that's 100us (rounding up).
+ */
+ sscounter = DIV_ROUND_UP(clk_get_rate(ctx->dev->fclk), 10000 * 16 * 4);
+
val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
- set_field(&val, CAL_GEN_ENABLE,
- CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
- set_field(&val, CAL_GEN_DISABLE,
- CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
- set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
+ set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
+ set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
+ set_field(&val, sscounter, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x Stop States\n",
ctx->csi2_port,
reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
/* 4. Force FORCERXMODE */
- val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
- set_field(&val, CAL_GEN_ENABLE,
- CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
- reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
+ reg_write_field(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port),
+ 1, CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x Force RXMODE\n",
ctx->csi2_port,
reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
/* E. Power up the PHY using the complex IO */
- val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
- set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
- CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
- reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+ csi2_cio_power(ctx, true);
+}
- /* F. Wait for power up completion */
- for (i = 0; i < 10; i++) {
+static void csi2_wait_complexio_reset(struct cal_ctx *ctx)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(750);
+ while (time_before(jiffies, timeout)) {
if (reg_read_field(ctx->dev,
CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
- CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
- CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
+ CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) ==
+ CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED)
break;
- usleep_range(1000, 1100);
+ usleep_range(500, 5000);
}
- ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Powered UP %s\n",
- ctx->csi2_port,
- reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)),
- (i >= 10) ? "(timeout)" : "");
+
+ if (reg_read_field(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) !=
+ CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED)
+ ctx_err(ctx, "Timeout waiting for Complex IO reset done\n");
}
-static void csi2_wait_for_phy(struct cal_ctx *ctx)
+static void csi2_wait_stop_state(struct cal_ctx *ctx)
{
- int i;
+ unsigned long timeout;
+ timeout = jiffies + msecs_to_jiffies(750);
+ while (time_before(jiffies, timeout)) {
+ if (reg_read_field(ctx->dev,
+ CAL_CSI2_TIMING(ctx->csi2_port),
+ CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) == 0)
+ break;
+ usleep_range(500, 5000);
+ }
+
+ if (reg_read_field(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port),
+ CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) != 0)
+ ctx_err(ctx, "Timeout waiting for stop state\n");
+}
+
+static void csi2_wait_for_phy(struct cal_ctx *ctx)
+{
/* Steps
* 2. Wait for completion of reset
* Note if the external sensor is not sending byte clock,
@@ -832,32 +886,10 @@ static void csi2_wait_for_phy(struct cal_ctx *ctx)
*/
/* 2. Wait for reset completion */
- for (i = 0; i < 250; i++) {
- if (reg_read_field(ctx->dev,
- CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
- CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) ==
- CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED)
- break;
- usleep_range(1000, 1100);
- }
- ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO Reset Done (%d) %s\n",
- ctx->csi2_port,
- reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)), i,
- (i >= 250) ? "(timeout)" : "");
+ csi2_wait_complexio_reset(ctx);
/* 4. G. Wait for all enabled lane to reach stop state */
- for (i = 0; i < 10; i++) {
- if (reg_read_field(ctx->dev,
- CAL_CSI2_TIMING(ctx->csi2_port),
- CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) ==
- CAL_GEN_DISABLE)
- break;
- usleep_range(1000, 1100);
- }
- ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x Stop State Reached %s\n",
- ctx->csi2_port,
- reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)),
- (i >= 10) ? "(timeout)" : "");
+ csi2_wait_stop_state(ctx);
ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x (Bit(31,28) should be set!)\n",
(ctx->csi2_port - 1), reg_read(ctx->cc, CAL_CSI2_PHY_REG1));
@@ -866,33 +898,13 @@ static void csi2_wait_for_phy(struct cal_ctx *ctx)
static void csi2_phy_deinit(struct cal_ctx *ctx)
{
int i;
- u32 val;
- /* Power down the PHY using the complex IO */
- val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
- set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF,
- CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
- reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
-
- /* Wait for power down completion */
- for (i = 0; i < 10; i++) {
- if (reg_read_field(ctx->dev,
- CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
- CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
- CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF)
- break;
- usleep_range(1000, 1100);
- }
- ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Powered Down %s\n",
- ctx->csi2_port,
- reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)),
- (i >= 10) ? "(timeout)" : "");
+ csi2_cio_power(ctx, false);
/* Assert Comple IO Reset */
- val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
- set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL,
- CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
- reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+ reg_write_field(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL,
+ CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
/* Wait for power down completion */
for (i = 0; i < 10; i++) {
@@ -942,14 +954,15 @@ static void csi2_lane_config(struct cal_ctx *ctx)
static void csi2_ppi_enable(struct cal_ctx *ctx)
{
+ reg_write(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port), BIT(3));
reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
- CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+ 1, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
}
static void csi2_ppi_disable(struct cal_ctx *ctx)
{
reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
- CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+ 0, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
}
static void csi2_ctx_config(struct cal_ctx *ctx)
@@ -969,8 +982,7 @@ static void csi2_ctx_config(struct cal_ctx *ctx)
set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
/* Virtual Channel from the CSI2 sensor usually 0! */
set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
- /* NUM_LINES_PER_FRAME => 0 means auto detect */
- set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
+ set_field(&val, ctx->v_fmt.fmt.pix.height, CAL_CSI2_CTX_LINES_MASK);
set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
CAL_CSI2_CTX_PACK_MODE_MASK);
@@ -1024,7 +1036,7 @@ static void pix_proc_config(struct cal_ctx *ctx)
set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
set_field(&val, pack, CAL_PIX_PROC_PACK_MASK);
set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
- set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
+ set_field(&val, 1, CAL_PIX_PROC_EN_MASK);
reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
@@ -1044,7 +1056,7 @@ static void cal_wr_dma_config(struct cal_ctx *ctx,
CAL_WR_DMA_CTRL_MODE_MASK);
set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
CAL_WR_DMA_CTRL_PATTERN_MASK);
- set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
+ set_field(&val, 1, CAL_WR_DMA_CTRL_STALL_RD_MASK);
reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
@@ -1192,57 +1204,74 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
struct cal_dev *dev = (struct cal_dev *)data;
struct cal_ctx *ctx;
struct cal_dmaqueue *dma_q;
- u32 irqst2, irqst3;
+ u32 irqst1, irqst2, irqst3;
+
+ irqst1 = reg_read(dev, CAL_HL_IRQSTATUS(1));
+ if (irqst1) {
+ int i;
+
+ reg_write(dev, CAL_HL_IRQSTATUS(1), irqst1);
+
+ if (irqst1 & CAL_HL_IRQ_OCPO_ERR_MASK)
+ dev_err_ratelimited(&dev->pdev->dev, "OCPO ERROR\n");
+
+ for (i = 1; i <= 2; ++i) {
+ if (irqst1 & CAL_HL_IRQ_CIO_MASK(i)) {
+ u32 cio_stat = reg_read(dev,
+ CAL_CSI2_COMPLEXIO_IRQSTATUS(i));
+
+ dev_err_ratelimited(&dev->pdev->dev,
+ "CIO%d error: %#08x\n", i, cio_stat);
+
+ reg_write(dev, CAL_CSI2_COMPLEXIO_IRQSTATUS(i),
+ cio_stat);
+ }
+ }
+ }
/* Check which DMA just finished */
irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
if (irqst2) {
+ int i;
+
/* Clear Interrupt status */
reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
- /* Need to check both port */
- if (isportirqset(irqst2, 1)) {
- ctx = dev->ctx[0];
+ for (i = 1; i <= 2; ++i) {
+ if (isportirqset(irqst2, i)) {
+ ctx = dev->ctx[i - 1];
- if (ctx->cur_frm != ctx->next_frm)
- cal_process_buffer_complete(ctx);
- }
+ spin_lock(&ctx->slock);
+ ctx->dma_act = false;
- if (isportirqset(irqst2, 2)) {
- ctx = dev->ctx[1];
+ if (ctx->cur_frm != ctx->next_frm)
+ cal_process_buffer_complete(ctx);
- if (ctx->cur_frm != ctx->next_frm)
- cal_process_buffer_complete(ctx);
+ spin_unlock(&ctx->slock);
+ }
}
}
/* Check which DMA just started */
irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
if (irqst3) {
+ int i;
+
/* Clear Interrupt status */
reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
- /* Need to check both port */
- if (isportirqset(irqst3, 1)) {
- ctx = dev->ctx[0];
- dma_q = &ctx->vidq;
-
- spin_lock(&ctx->slock);
- if (!list_empty(&dma_q->active) &&
- ctx->cur_frm == ctx->next_frm)
- cal_schedule_next_buffer(ctx);
- spin_unlock(&ctx->slock);
- }
-
- if (isportirqset(irqst3, 2)) {
- ctx = dev->ctx[1];
- dma_q = &ctx->vidq;
-
- spin_lock(&ctx->slock);
- if (!list_empty(&dma_q->active) &&
- ctx->cur_frm == ctx->next_frm)
- cal_schedule_next_buffer(ctx);
- spin_unlock(&ctx->slock);
+ for (i = 1; i <= 2; ++i) {
+ if (isportirqset(irqst3, i)) {
+ ctx = dev->ctx[i - 1];
+ dma_q = &ctx->vidq;
+
+ spin_lock(&ctx->slock);
+ ctx->dma_act = true;
+ if (!list_empty(&dma_q->active) &&
+ ctx->cur_frm == ctx->next_frm)
+ cal_schedule_next_buffer(ctx);
+ spin_unlock(&ctx->slock);
+ }
}
}
@@ -1665,7 +1694,7 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
goto err;
}
- cal_runtime_get(ctx->dev);
+ pm_runtime_get_sync(&ctx->dev->pdev->dev);
csi2_ctx_config(ctx);
pix_proc_config(ctx);
@@ -1680,7 +1709,7 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret) {
v4l2_subdev_call(ctx->sensor, core, s_power, 0);
ctx_err(ctx, "stream on failed in subdev\n");
- cal_runtime_put(ctx->dev);
+ pm_runtime_put_sync(&ctx->dev->pdev->dev);
goto err;
}
@@ -1711,10 +1740,27 @@ static void cal_stop_streaming(struct vb2_queue *vq)
struct cal_ctx *ctx = vb2_get_drv_priv(vq);
struct cal_dmaqueue *dma_q = &ctx->vidq;
struct cal_buffer *buf, *tmp;
+ unsigned long timeout;
unsigned long flags;
int ret;
+ bool dma_act;
csi2_ppi_disable(ctx);
+
+ /* wait for stream and dma to finish */
+ dma_act = true;
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (dma_act && time_before(jiffies, timeout)) {
+ msleep(50);
+
+ spin_lock_irqsave(&ctx->slock, flags);
+ dma_act = ctx->dma_act;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ }
+
+ if (dma_act)
+ ctx_err(ctx, "failed to disable dma cleanly\n");
+
disable_irqs(ctx);
csi2_phy_deinit(ctx);
@@ -1743,7 +1789,7 @@ static void cal_stop_streaming(struct vb2_queue *vq)
ctx->next_frm = NULL;
spin_unlock_irqrestore(&ctx->slock, flags);
- cal_runtime_put(ctx->dev);
+ pm_runtime_put_sync(&ctx->dev->pdev->dev);
}
static const struct vb2_ops cal_video_qops = {
@@ -2191,7 +2237,26 @@ err_exit:
return NULL;
}
-static const struct of_device_id cal_of_match[];
+static const struct of_device_id cal_of_match[] = {
+ {
+ .compatible = "ti,dra72-cal",
+ .data = (void *)&dra72x_cal_data,
+ },
+ {
+ .compatible = "ti,dra72-pre-es2-cal",
+ .data = (void *)&dra72x_es1_cal_data,
+ },
+ {
+ .compatible = "ti,dra76-cal",
+ .data = (void *)&dra76x_cal_data,
+ },
+ {
+ .compatible = "ti,am654-cal",
+ .data = (void *)&am654_cal_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cal_of_match);
static int cal_probe(struct platform_device *pdev)
{
@@ -2223,6 +2288,12 @@ static int cal_probe(struct platform_device *pdev)
/* save pdev pointer */
dev->pdev = pdev;
+ dev->fclk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(dev->fclk)) {
+ dev_err(&pdev->dev, "cannot get CAL fclk\n");
+ return PTR_ERR(dev->fclk);
+ }
+
syscon_camerrx = syscon_regmap_lookup_by_phandle(parent,
"ti,camerrx-control");
ret = of_property_read_u32_index(parent, "ti,camerrx-control", 1,
@@ -2296,20 +2367,24 @@ static int cal_probe(struct platform_device *pdev)
return -ENODEV;
}
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
pm_runtime_enable(&pdev->dev);
- ret = cal_runtime_get(dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
if (ret)
goto runtime_disable;
/* Just check we can actually access the module */
cal_get_hwinfo(dev);
- cal_runtime_put(dev);
+ pm_runtime_put_sync(&pdev->dev);
return 0;
runtime_disable:
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
for (i = 0; i < CAL_NUM_CONTEXT; i++) {
ctx = dev->ctx[i];
@@ -2333,7 +2408,7 @@ static int cal_remove(struct platform_device *pdev)
cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
- cal_runtime_get(dev);
+ pm_runtime_get_sync(&pdev->dev);
for (i = 0; i < CAL_NUM_CONTEXT; i++) {
ctx = dev->ctx[i];
@@ -2349,41 +2424,41 @@ static int cal_remove(struct platform_device *pdev)
}
}
- cal_runtime_put(dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+
return 0;
}
-#if defined(CONFIG_OF)
-static const struct of_device_id cal_of_match[] = {
- {
- .compatible = "ti,dra72-cal",
- .data = (void *)&dra72x_cal_data,
- },
- {
- .compatible = "ti,dra72-pre-es2-cal",
- .data = (void *)&dra72x_es1_cal_data,
- },
- {
- .compatible = "ti,dra76-cal",
- .data = (void *)&dra76x_cal_data,
- },
- {
- .compatible = "ti,am654-cal",
- .data = (void *)&am654_cal_data,
- },
- {},
+static int cal_runtime_resume(struct device *dev)
+{
+ struct cal_dev *caldev = dev_get_drvdata(dev);
+
+ if (caldev->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) {
+ /*
+ * Apply errata on both port everytime we (re-)enable
+ * the clock
+ */
+ i913_errata(caldev, 0);
+ i913_errata(caldev, 1);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops cal_pm_ops = {
+ .runtime_resume = cal_runtime_resume,
};
-MODULE_DEVICE_TABLE(of, cal_of_match);
-#endif
static struct platform_driver cal_pdrv = {
.probe = cal_probe,
.remove = cal_remove,
.driver = {
.name = CAL_MODULE_NAME,
- .of_match_table = of_match_ptr(cal_of_match),
+ .pm = &cal_pm_ops,
+ .of_match_table = cal_of_match,
},
};
diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti-vpe/cal_regs.h
index 0b76d1186074..ac54a2fe7bb6 100644
--- a/drivers/media/platform/ti-vpe/cal_regs.h
+++ b/drivers/media/platform/ti-vpe/cal_regs.h
@@ -101,15 +101,6 @@
#define CM_CTRL_CORE_CAMERRX_CONTROL 0x000
/*********************************************************************
-* Generic value used in various field below
-*********************************************************************/
-
-#define CAL_GEN_DISABLE 0
-#define CAL_GEN_ENABLE 1
-#define CAL_GEN_FALSE 0
-#define CAL_GEN_TRUE 1
-
-/*********************************************************************
* Field Definition Macros
*********************************************************************/
@@ -151,12 +142,11 @@
#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0 0
#define CAL_HL_IRQ_MASK(m) BIT((m) - 1)
-#define CAL_HL_IRQ_NOACTION 0x0
-#define CAL_HL_IRQ_ENABLE 0x1
-#define CAL_HL_IRQ_CLEAR 0x1
-#define CAL_HL_IRQ_DISABLED 0x0
-#define CAL_HL_IRQ_ENABLED 0x1
-#define CAL_HL_IRQ_PENDING 0x1
+
+#define CAL_HL_IRQ_OCPO_ERR_MASK BIT(6)
+
+#define CAL_HL_IRQ_CIO_MASK(i) BIT(16 + ((i) - 1) * 8)
+#define CAL_HL_IRQ_VC_MASK(i) BIT(17 + ((i) - 1) * 8)
#define CAL_PIX_PROC_EN_MASK BIT(0)
#define CAL_PIX_PROC_EXTRACT_MASK GENMASK(4, 1)
@@ -414,6 +404,7 @@
#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK BIT(17)
#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK BIT(18)
#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK BIT(19)
+#define CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK GENMASK(19, 0)
#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK BIT(20)
#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK BIT(21)
#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK BIT(22)
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index ddd0e338f9e4..53570250a25d 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -17,10 +17,12 @@
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
struct video_mux {
struct v4l2_subdev subdev;
+ struct v4l2_async_notifier notifier;
struct media_pad *pads;
struct v4l2_mbus_framefmt *format_mbus;
struct mux_control *mux;
@@ -35,6 +37,12 @@ static const struct v4l2_mbus_framefmt video_mux_format_mbus_default = {
.field = V4L2_FIELD_NONE,
};
+static inline struct video_mux *
+notifier_to_video_mux(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct video_mux, notifier);
+}
+
static inline struct video_mux *v4l2_subdev_to_video_mux(struct v4l2_subdev *sd)
{
return container_of(sd, struct video_mux, subdev);
@@ -96,6 +104,7 @@ out:
static const struct media_entity_operations video_mux_ops = {
.link_setup = video_mux_link_setup,
.link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
};
static int video_mux_s_stream(struct v4l2_subdev *sd, int enable)
@@ -330,36 +339,64 @@ static const struct v4l2_subdev_ops video_mux_subdev_ops = {
.video = &video_mux_subdev_video_ops,
};
-static int video_mux_parse_endpoint(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
+static int video_mux_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
{
- /*
- * it's not an error if remote is missing on a video-mux
- * input port, return -ENOTCONN to skip this endpoint with
- * no error.
- */
- return fwnode_device_is_available(asd->match.fwnode) ? 0 : -ENOTCONN;
+ struct video_mux *vmux = notifier_to_video_mux(notifier);
+
+ return v4l2_create_fwnode_links(sd, &vmux->subdev);
}
+static const struct v4l2_async_notifier_operations video_mux_notify_ops = {
+ .bound = video_mux_notify_bound,
+};
+
static int video_mux_async_register(struct video_mux *vmux,
unsigned int num_input_pads)
{
- unsigned int i, *ports;
+ unsigned int i;
int ret;
- ports = kcalloc(num_input_pads, sizeof(*ports), GFP_KERNEL);
- if (!ports)
- return -ENOMEM;
- for (i = 0; i < num_input_pads; i++)
- ports[i] = i;
+ v4l2_async_notifier_init(&vmux->notifier);
- ret = v4l2_async_register_fwnode_subdev(
- &vmux->subdev, sizeof(struct v4l2_async_subdev),
- ports, num_input_pads, video_mux_parse_endpoint);
+ for (i = 0; i < num_input_pads; i++) {
+ struct v4l2_async_subdev *asd;
+ struct fwnode_handle *ep;
- kfree(ports);
- return ret;
+ ep = fwnode_graph_get_endpoint_by_id(
+ dev_fwnode(vmux->subdev.dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ continue;
+
+ asd = kzalloc(sizeof(*asd), GFP_KERNEL);
+ if (!asd) {
+ fwnode_handle_put(ep);
+ return -ENOMEM;
+ }
+
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &vmux->notifier, ep, asd);
+
+ fwnode_handle_put(ep);
+
+ if (ret) {
+ kfree(asd);
+ /* OK if asd already exists */
+ if (ret != -EEXIST)
+ return ret;
+ }
+ }
+
+ vmux->notifier.ops = &video_mux_notify_ops;
+
+ ret = v4l2_async_subdev_notifier_register(&vmux->subdev,
+ &vmux->notifier);
+ if (ret)
+ return ret;
+
+ return v4l2_async_register_subdev(&vmux->subdev);
}
static int video_mux_probe(struct platform_device *pdev)
@@ -434,7 +471,13 @@ static int video_mux_probe(struct platform_device *pdev)
vmux->subdev.entity.ops = &video_mux_ops;
- return video_mux_async_register(vmux, num_pads - 1);
+ ret = video_mux_async_register(vmux, num_pads - 1);
+ if (ret) {
+ v4l2_async_notifier_unregister(&vmux->notifier);
+ v4l2_async_notifier_cleanup(&vmux->notifier);
+ }
+
+ return ret;
}
static int video_mux_remove(struct platform_device *pdev)
@@ -442,6 +485,8 @@ static int video_mux_remove(struct platform_device *pdev)
struct video_mux *vmux = platform_get_drvdata(pdev);
struct v4l2_subdev *sd = &vmux->subdev;
+ v4l2_async_notifier_unregister(&vmux->notifier);
+ v4l2_async_notifier_cleanup(&vmux->notifier);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
index a2773ad7c185..01c96fb66414 100644
--- a/drivers/media/platform/xilinx/Kconfig
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -2,7 +2,9 @@
config VIDEO_XILINX
tristate "Xilinx Video IP (EXPERIMENTAL)"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
+ depends on VIDEO_V4L2 && OF && HAS_DMA
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
help
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index eb79d99787bd..d29e29645e04 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -36,7 +36,7 @@ config RADIO_SI476X
In order to control your radio card, you will need to use programs
that are compatible with the Video For Linux 2 API. Information on
this API and pointers to "v4l2" programs may be found at
- <file:Documentation/media/media_uapi.rst>.
+ <file:Documentation/userspace-api/media/index.rst>.
To compile this driver as a module, choose M here: the
module will be called radio-si476x.
@@ -76,7 +76,7 @@ config RADIO_MAXIRADIO
In order to control your radio card, you will need to use programs
that are compatible with the Video For Linux API. Information on
this API and pointers to "v4l" programs may be found at
- <file:Documentation/media/media_uapi.rst>.
+ <file:Documentation/userspace-api/media/index.rst>.
To compile this driver as a module, choose M here: the
module will be called radio-maxiradio.
@@ -94,7 +94,7 @@ config RADIO_SHARK
In order to control your radio card, you will need to use programs
that are compatible with the Video For Linux API. Information on
this API and pointers to "v4l" programs may be found at
- <file:Documentation/media/media_uapi.rst>.
+ <file:Documentation/userspace-api/media/index.rst>.
To compile this driver as a module, choose M here: the
module will be called radio-shark.
@@ -111,7 +111,7 @@ config RADIO_SHARK2
In order to control your radio card, you will need to use programs
that are compatible with the Video For Linux API. Information on
this API and pointers to "v4l" programs may be found at
- <file:Documentation/media/media_uapi.rst>.
+ <file:Documentation/userspace-api/media/index.rst>.
To compile this driver as a module, choose M here: the
module will be called radio-shark2.
@@ -218,7 +218,7 @@ config RADIO_WL1273
In order to control your radio card, you will need to use programs
that are compatible with the Video For Linux 2 API. Information on
this API and pointers to "v4l2" programs may be found at
- <file:Documentation/media/media_uapi.rst>.
+ <file:Documentation/userspace-api/media/index.rst>.
To compile this driver as a module, choose M here: the
module will be called radio-wl1273.
@@ -272,7 +272,7 @@ config RADIO_RTRACK
been reported to be used by these cards.
More information is contained in the file
- <file:Documentation/media/v4l-drivers/radiotrack.rst>.
+ <file:Documentation/driver-api/media/drivers/radiotrack.rst>.
To compile this driver as a module, choose M here: the
module will be called radio-aimslab.
diff --git a/drivers/media/radio/si470x/Kconfig b/drivers/media/radio/si470x/Kconfig
index a1ba8bc54b62..7161bd6cd13c 100644
--- a/drivers/media/radio/si470x/Kconfig
+++ b/drivers/media/radio/si470x/Kconfig
@@ -30,7 +30,7 @@ config USB_SI470X
Please have a look at the documentation, especially on how
to redirect the audio stream from the radio to your sound device:
- Documentation/media/v4l-drivers/si470x.rst
+ Documentation/admin-guide/media/si470x.rst
Say Y here if you want to connect this type of radio to your
computer's USB port.
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index 1dee7277004b..d5ae3388d3db 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -2,7 +2,6 @@
#
# TI's wl128x FM driver based on TI's ST driver.
#
-menu "Texas Instruments WL128x FM driver (ST based)"
config RADIO_WL128X
tristate "Texas Instruments WL128x FM Radio"
depends on VIDEO_V4L2 && RFKILL && TTY && TI_ST
@@ -13,6 +12,4 @@ config RADIO_WL128X
In order to control your radio card, you will need to use programs
that are compatible with the Video For Linux 2 API. Information on
this API and pointers to "v4l2" programs may be found at
- <file:Documentation/media/media_uapi.rst>.
-
-endmenu
+ <file:Documentation/userspace-api/media/index.rst>.
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 0f3417d161b8..5bb144435c16 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -103,12 +103,14 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_map_peek_elem_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
+ case BPF_FUNC_ktime_get_boot_ns:
+ return &bpf_ktime_get_boot_ns_proto;
case BPF_FUNC_tail_call:
return &bpf_tail_call_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_trace_printk:
- if (capable(CAP_SYS_ADMIN))
+ if (perfmon_capable())
return bpf_get_trace_printk_proto();
/* fall through */
default:
diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
index 18ca12d78314..f33b443bfa47 100644
--- a/drivers/media/rc/gpio-ir-tx.c
+++ b/drivers/media/rc/gpio-ir-tx.c
@@ -42,7 +42,7 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier)
{
struct gpio_ir *gpio_ir = dev->priv;
- if (!carrier)
+ if (carrier > 500000)
return -EINVAL;
gpio_ir->carrier = carrier;
@@ -50,10 +50,35 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier)
return 0;
}
-static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
- unsigned int count)
+static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
+ uint count)
+{
+ unsigned long flags;
+ ktime_t edge;
+ s32 delta;
+ int i;
+
+ spin_lock_irqsave(&gpio_ir->lock, flags);
+
+ edge = ktime_get();
+
+ for (i = 0; i < count; i++) {
+ gpiod_set_value(gpio_ir->gpio, !(i % 2));
+
+ edge = ktime_add_us(edge, txbuf[i]);
+ delta = ktime_us_delta(edge, ktime_get());
+ if (delta > 0)
+ udelay(delta);
+ }
+
+ gpiod_set_value(gpio_ir->gpio, 0);
+
+ spin_unlock_irqrestore(&gpio_ir->lock, flags);
+}
+
+static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf,
+ uint count)
{
- struct gpio_ir *gpio_ir = dev->priv;
unsigned long flags;
ktime_t edge;
/*
@@ -79,13 +104,8 @@ static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
// space
edge = ktime_add_us(edge, txbuf[i]);
delta = ktime_us_delta(edge, ktime_get());
- if (delta > 10) {
- spin_unlock_irqrestore(&gpio_ir->lock, flags);
- usleep_range(delta, delta + 10);
- spin_lock_irqsave(&gpio_ir->lock, flags);
- } else if (delta > 0) {
+ if (delta > 0)
udelay(delta);
- }
} else {
// pulse
ktime_t last = ktime_add_us(edge, txbuf[i]);
@@ -110,6 +130,17 @@ static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
}
spin_unlock_irqrestore(&gpio_ir->lock, flags);
+}
+
+static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+ unsigned int count)
+{
+ struct gpio_ir *gpio_ir = dev->priv;
+
+ if (gpio_ir->carrier)
+ gpio_ir_tx_modulated(gpio_ir, txbuf, count);
+ else
+ gpio_ir_tx_unmodulated(gpio_ir, txbuf, count);
return count;
}
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 3c8bd13d029a..566c2816d5be 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -14,7 +14,6 @@
#include <linux/completion.h>
#include <media/rc-core.h>
-#define DRIVER_NAME "iguanair"
#define BUF_SIZE 152
struct iguanair {
@@ -27,8 +26,6 @@ struct iguanair {
uint8_t bufsize;
uint8_t cycle_overhead;
- struct mutex lock;
-
/* receiver support */
bool receiver_on;
dma_addr_t dma_in, dma_out;
@@ -284,8 +281,6 @@ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
if (carrier < 25000 || carrier > 150000)
return -EINVAL;
- mutex_lock(&ir->lock);
-
if (carrier != ir->carrier) {
uint32_t cycles, fours, sevens;
@@ -314,8 +309,6 @@ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
ir->packet->busy4 = 110 - fours;
}
- mutex_unlock(&ir->lock);
-
return 0;
}
@@ -326,9 +319,7 @@ static int iguanair_set_tx_mask(struct rc_dev *dev, uint32_t mask)
if (mask > 15)
return 4;
- mutex_lock(&ir->lock);
ir->packet->channels = mask << 4;
- mutex_unlock(&ir->lock);
return 0;
}
@@ -339,8 +330,6 @@ static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
unsigned int i, size, p, periods;
int rc;
- mutex_lock(&ir->lock);
-
/* convert from us to carrier periods */
for (i = size = 0; i < count; i++) {
periods = DIV_ROUND_CLOSEST(txbuf[i] * ir->carrier, 1000000);
@@ -368,8 +357,6 @@ static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
rc = -EOVERFLOW;
out:
- mutex_unlock(&ir->lock);
-
return rc ? rc : count;
}
@@ -378,14 +365,10 @@ static int iguanair_open(struct rc_dev *rdev)
struct iguanair *ir = rdev->priv;
int rc;
- mutex_lock(&ir->lock);
-
rc = iguanair_receiver(ir, true);
if (rc == 0)
ir->receiver_on = true;
- mutex_unlock(&ir->lock);
-
return rc;
}
@@ -394,14 +377,10 @@ static void iguanair_close(struct rc_dev *rdev)
struct iguanair *ir = rdev->priv;
int rc;
- mutex_lock(&ir->lock);
-
rc = iguanair_receiver(ir, false);
ir->receiver_on = false;
if (rc && rc != -ENODEV)
dev_warn(ir->dev, "failed to disable receiver: %d\n", rc);
-
- mutex_unlock(&ir->lock);
}
static int iguanair_probe(struct usb_interface *intf,
@@ -441,7 +420,6 @@ static int iguanair_probe(struct usb_interface *intf,
ir->rc = rc;
ir->dev = &intf->dev;
ir->udev = udev;
- mutex_init(&ir->lock);
init_completion(&ir->completion);
pipeout = usb_sndintpipe(udev,
@@ -483,7 +461,7 @@ static int iguanair_probe(struct usb_interface *intf,
rc->s_tx_mask = iguanair_set_tx_mask;
rc->s_tx_carrier = iguanair_set_tx_carrier;
rc->tx_ir = iguanair_tx;
- rc->driver_name = DRIVER_NAME;
+ rc->driver_name = KBUILD_MODNAME;
rc->map_name = RC_MAP_RC6_MCE;
rc->min_timeout = 1;
rc->timeout = IR_DEFAULT_TIMEOUT;
@@ -538,8 +516,6 @@ static int iguanair_suspend(struct usb_interface *intf, pm_message_t message)
struct iguanair *ir = usb_get_intfdata(intf);
int rc = 0;
- mutex_lock(&ir->lock);
-
if (ir->receiver_on) {
rc = iguanair_receiver(ir, false);
if (rc)
@@ -549,17 +525,13 @@ static int iguanair_suspend(struct usb_interface *intf, pm_message_t message)
usb_kill_urb(ir->urb_in);
usb_kill_urb(ir->urb_out);
- mutex_unlock(&ir->lock);
-
return rc;
}
static int iguanair_resume(struct usb_interface *intf)
{
struct iguanair *ir = usb_get_intfdata(intf);
- int rc = 0;
-
- mutex_lock(&ir->lock);
+ int rc;
rc = usb_submit_urb(ir->urb_in, GFP_KERNEL);
if (rc)
@@ -571,8 +543,6 @@ static int iguanair_resume(struct usb_interface *intf)
dev_warn(ir->dev, "failed to enable receiver after resume\n");
}
- mutex_unlock(&ir->lock);
-
return rc;
}
@@ -582,7 +552,7 @@ static const struct usb_device_id iguanair_table[] = {
};
static struct usb_driver iguanair_driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.probe = iguanair_probe,
.disconnect = iguanair_disconnect,
.suspend = iguanair_suspend,
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 8574eda45102..a0d9c02a7588 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -241,7 +241,7 @@ static int ir_rx51_probe(struct platform_device *dev)
}
/* Use default, in case userspace does not set the carrier */
- ir_rx51.freq = DIV_ROUND_CLOSEST(pwm_get_period(pwm), NSEC_PER_SEC);
+ ir_rx51.freq = DIV_ROUND_CLOSEST_ULL(pwm_get_period(pwm), NSEC_PER_SEC);
pwm_put(pwm);
hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 5f36244cc34f..1eeab277a08e 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -64,6 +64,7 @@ struct ir_raw_event_ctrl {
u32 bpf_sample;
struct bpf_prog_array __rcu *progs;
#endif
+#if IS_ENABLED(CONFIG_IR_NEC_DECODER)
struct nec_dec {
int state;
unsigned count;
@@ -71,12 +72,16 @@ struct ir_raw_event_ctrl {
bool is_nec_x;
bool necx_repeat;
} nec;
+#endif
+#if IS_ENABLED(CONFIG_IR_RC5_DECODER)
struct rc5_dec {
int state;
u32 bits;
unsigned count;
bool is_rc5x;
} rc5;
+#endif
+#if IS_ENABLED(CONFIG_IR_RC6_DECODER)
struct rc6_dec {
int state;
u8 header;
@@ -85,11 +90,15 @@ struct ir_raw_event_ctrl {
unsigned count;
unsigned wanted_bits;
} rc6;
+#endif
+#if IS_ENABLED(CONFIG_IR_SONY_DECODER)
struct sony_dec {
int state;
u32 bits;
unsigned count;
} sony;
+#endif
+#if IS_ENABLED(CONFIG_IR_JVC_DECODER)
struct jvc_dec {
int state;
u16 bits;
@@ -98,17 +107,23 @@ struct ir_raw_event_ctrl {
bool first;
bool toggle;
} jvc;
+#endif
+#if IS_ENABLED(CONFIG_IR_SANYO_DECODER)
struct sanyo_dec {
int state;
unsigned count;
u64 bits;
} sanyo;
+#endif
+#if IS_ENABLED(CONFIG_IR_SHARP_DECODER)
struct sharp_dec {
int state;
unsigned count;
u32 bits;
unsigned int pulse_len;
} sharp;
+#endif
+#if IS_ENABLED(CONFIG_IR_MCE_KBD_DECODER)
struct mce_kbd_dec {
/* locks key up timer */
spinlock_t keylock;
@@ -119,11 +134,15 @@ struct ir_raw_event_ctrl {
unsigned count;
unsigned wanted_bits;
} mce_kbd;
+#endif
+#if IS_ENABLED(CONFIG_IR_XMP_DECODER)
struct xmp_dec {
int state;
unsigned count;
u32 durations[16];
} xmp;
+#endif
+#if IS_ENABLED(CONFIG_IR_IMON_DECODER)
struct imon_dec {
int state;
int count;
@@ -131,11 +150,14 @@ struct ir_raw_event_ctrl {
unsigned int bits;
bool stick_keyboard;
} imon;
+#endif
+#if IS_ENABLED(CONFIG_IR_RCMM_DECODER)
struct rcmm_dec {
int state;
unsigned int count;
u32 bits;
} rcmm;
+#endif
};
/* Mutex for locking raw IR processing and handler change */
diff --git a/drivers/media/spi/Kconfig b/drivers/media/spi/Kconfig
index bcc49cb47de6..857ef4ace6e9 100644
--- a/drivers/media/spi/Kconfig
+++ b/drivers/media/spi/Kconfig
@@ -1,15 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-only
if VIDEO_V4L2
-comment "SPI drivers hidden by 'Autoselect ancillary drivers'"
- depends on MEDIA_HIDE_ANCILLARY_SUBDRV
+comment "SPI I2C drivers auto-selected by 'Autoselect ancillary drivers'"
+ depends on MEDIA_HIDE_ANCILLARY_SUBDRV && SPI
menu "SPI helper chips"
visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
config VIDEO_GS1662
tristate "Gennum Serializers video"
- depends on SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on SPI && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
help
Enable the GS1662 driver which serializes video streams.
diff --git a/drivers/media/test-drivers/Kconfig b/drivers/media/test-drivers/Kconfig
new file mode 100644
index 000000000000..188381c85593
--- /dev/null
+++ b/drivers/media/test-drivers/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menuconfig V4L_TEST_DRIVERS
+ bool "V4L test drivers"
+ depends on VIDEO_DEV
+
+if V4L_TEST_DRIVERS
+
+source "drivers/media/test-drivers/vimc/Kconfig"
+
+source "drivers/media/test-drivers/vivid/Kconfig"
+
+config VIDEO_VIM2M
+ tristate "Virtual Memory-to-Memory Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
+ help
+ This is a virtual test device for the memory-to-memory driver
+ framework.
+
+source "drivers/media/test-drivers/vicodec/Kconfig"
+
+endif #V4L_TEST_DRIVERS
diff --git a/drivers/media/test-drivers/Makefile b/drivers/media/test-drivers/Makefile
new file mode 100644
index 000000000000..74410d3a9f2d
--- /dev/null
+++ b/drivers/media/test-drivers/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the test drivers.
+#
+
+obj-$(CONFIG_VIDEO_VIMC) += vimc/
+obj-$(CONFIG_VIDEO_VIVID) += vivid/
+obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
+obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
diff --git a/drivers/media/platform/vicodec/Kconfig b/drivers/media/test-drivers/vicodec/Kconfig
index 89456665cb16..d77c67810c73 100644
--- a/drivers/media/platform/vicodec/Kconfig
+++ b/drivers/media/test-drivers/vicodec/Kconfig
@@ -4,6 +4,8 @@ config VIDEO_VICODEC
depends on VIDEO_DEV && VIDEO_V4L2
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
help
Driver for a Virtual Codec
diff --git a/drivers/media/platform/vicodec/Makefile b/drivers/media/test-drivers/vicodec/Makefile
index 01bf7e9308a6..01bf7e9308a6 100644
--- a/drivers/media/platform/vicodec/Makefile
+++ b/drivers/media/test-drivers/vicodec/Makefile
diff --git a/drivers/media/platform/vicodec/codec-fwht.c b/drivers/media/test-drivers/vicodec/codec-fwht.c
index 31faf319e508..31faf319e508 100644
--- a/drivers/media/platform/vicodec/codec-fwht.c
+++ b/drivers/media/test-drivers/vicodec/codec-fwht.c
diff --git a/drivers/media/platform/vicodec/codec-fwht.h b/drivers/media/test-drivers/vicodec/codec-fwht.h
index b6fec2b1cbca..b6fec2b1cbca 100644
--- a/drivers/media/platform/vicodec/codec-fwht.h
+++ b/drivers/media/test-drivers/vicodec/codec-fwht.h
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
index b6e39fbd8ad5..b6e39fbd8ad5 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+++ b/drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.h b/drivers/media/test-drivers/vicodec/codec-v4l2-fwht.h
index 1a0d2a9f931a..1a0d2a9f931a 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.h
+++ b/drivers/media/test-drivers/vicodec/codec-v4l2-fwht.h
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index 30ced1c21387..e879290727ef 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -2114,16 +2114,19 @@ static int vicodec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- if (register_instance(dev, &dev->stateful_enc,
- "stateful-encoder", true))
+ ret = register_instance(dev, &dev->stateful_enc, "stateful-encoder",
+ true);
+ if (ret)
goto unreg_dev;
- if (register_instance(dev, &dev->stateful_dec,
- "stateful-decoder", false))
+ ret = register_instance(dev, &dev->stateful_dec, "stateful-decoder",
+ false);
+ if (ret)
goto unreg_sf_enc;
- if (register_instance(dev, &dev->stateless_dec,
- "stateless-decoder", false))
+ ret = register_instance(dev, &dev->stateless_dec, "stateless-decoder",
+ false);
+ if (ret)
goto unreg_sf_dec;
#ifdef CONFIG_MEDIA_CONTROLLER
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/test-drivers/vim2m.c
index ac6717fbb764..a776bb8e0e09 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -216,7 +216,6 @@ struct vim2m_ctx {
struct mutex vb_mutex;
struct delayed_work work_run;
- spinlock_t irqlock;
/* Abort requested by m2m */
int aborting;
@@ -622,7 +621,6 @@ static void device_work(struct work_struct *w)
struct vim2m_ctx *curr_ctx;
struct vim2m_dev *vim2m_dev;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
- unsigned long flags;
curr_ctx = container_of(w, struct vim2m_ctx, work_run.work);
@@ -638,10 +636,8 @@ static void device_work(struct work_struct *w)
curr_ctx->num_processed++;
- spin_lock_irqsave(&curr_ctx->irqlock, flags);
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
- spin_unlock_irqrestore(&curr_ctx->irqlock, flags);
if (curr_ctx->num_processed == curr_ctx->translen
|| curr_ctx->aborting) {
@@ -1084,7 +1080,6 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
struct vb2_v4l2_buffer *vbuf;
- unsigned long flags;
cancel_delayed_work_sync(&ctx->work_run);
@@ -1097,9 +1092,7 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
return;
v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
&ctx->hdl);
- spin_lock_irqsave(&ctx->irqlock, flags);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
- spin_unlock_irqrestore(&ctx->irqlock, flags);
}
}
@@ -1226,7 +1219,6 @@ static int vim2m_open(struct file *file)
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
mutex_init(&ctx->vb_mutex);
- spin_lock_init(&ctx->irqlock);
INIT_DELAYED_WORK(&ctx->work_run, device_work);
if (IS_ERR(ctx->fh.m2m_ctx)) {
diff --git a/drivers/media/platform/vimc/Kconfig b/drivers/media/test-drivers/vimc/Kconfig
index bd221d3e1a4a..4068a67585f9 100644
--- a/drivers/media/platform/vimc/Kconfig
+++ b/drivers/media/test-drivers/vimc/Kconfig
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_VIMC
tristate "Virtual Media Controller Driver (VIMC)"
- depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_VMALLOC
select VIDEO_V4L2_TPG
help
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/test-drivers/vimc/Makefile
index a53b2b532e9f..a53b2b532e9f 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/test-drivers/vimc/Makefile
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c
index 23e740c1c5c0..c63496b17b9a 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/test-drivers/vimc/vimc-capture.c
@@ -37,7 +37,7 @@ static const struct v4l2_pix_format fmt_default = {
.height = 480,
.pixelformat = V4L2_PIX_FMT_RGB24,
.field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_DEFAULT,
+ .colorspace = V4L2_COLORSPACE_SRGB,
};
struct vimc_cap_buffer {
@@ -107,6 +107,9 @@ static int vimc_cap_try_fmt_vid_cap(struct file *file, void *priv,
vimc_colorimetry_clamp(format);
+ if (format->colorspace == V4L2_COLORSPACE_DEFAULT)
+ format->colorspace = fmt_default.colorspace;
+
return 0;
}
@@ -146,7 +149,16 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
static int vimc_cap_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- const struct vimc_pix_map *vpix = vimc_pix_map_by_index(f->index);
+ const struct vimc_pix_map *vpix;
+
+ if (f->mbus_code) {
+ if (f->index > 0)
+ return -EINVAL;
+
+ vpix = vimc_pix_map_by_code(f->mbus_code);
+ } else {
+ vpix = vimc_pix_map_by_index(f->index);
+ }
if (!vpix)
return -EINVAL;
@@ -325,7 +337,7 @@ static const struct media_entity_operations vimc_cap_mops = {
.link_validate = vimc_vdev_link_validate,
};
-void vimc_cap_release(struct vimc_ent_device *ved)
+static void vimc_cap_release(struct vimc_ent_device *ved)
{
struct vimc_cap_device *vcap =
container_of(ved, struct vimc_cap_device, ved);
@@ -334,7 +346,7 @@ void vimc_cap_release(struct vimc_ent_device *ved)
kfree(vcap);
}
-void vimc_cap_unregister(struct vimc_ent_device *ved)
+static void vimc_cap_unregister(struct vimc_ent_device *ved)
{
struct vimc_cap_device *vcap =
container_of(ved, struct vimc_cap_device, ved);
@@ -382,8 +394,8 @@ static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
return NULL;
}
-struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
- const char *vcfg_name)
+static struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
const struct vimc_pix_map *vpix;
@@ -395,7 +407,7 @@ struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
/* Allocate the vimc_cap_device struct */
vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
if (!vcap)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Initialize the media entity */
vcap->vdev.entity.name = vcfg_name;
@@ -447,7 +459,8 @@ struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
/* Initialize the video_device struct */
vdev = &vcap->vdev;
- vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
+ | V4L2_CAP_IO_MC;
vdev->entity.ops = &vimc_cap_mops;
vdev->release = video_device_release_empty;
vdev->fops = &vimc_cap_fops;
@@ -476,5 +489,11 @@ err_clean_m_ent:
err_free_vcap:
kfree(vcap);
- return NULL;
+ return ERR_PTR(ret);
}
+
+struct vimc_ent_type vimc_cap_type = {
+ .add = vimc_cap_add,
+ .unregister = vimc_cap_unregister,
+ .release = vimc_cap_release
+};
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/test-drivers/vimc/vimc-common.c
index c95c17c048f2..7b27153c0728 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/test-drivers/vimc/vimc-common.c
@@ -19,19 +19,31 @@ static const struct vimc_pix_map vimc_pix_map_list[] = {
/* RGB formats */
{
- .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .code = {
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_BGR888_3X8
+ },
.pixelformat = V4L2_PIX_FMT_BGR24,
.bpp = 3,
.bayer = false,
},
{
- .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .code = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_3X8,
+ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ MEDIA_BUS_FMT_GBR888_1X24
+ },
.pixelformat = V4L2_PIX_FMT_RGB24,
.bpp = 3,
.bayer = false,
},
{
- .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .code = { MEDIA_BUS_FMT_ARGB8888_1X32 },
.pixelformat = V4L2_PIX_FMT_ARGB32,
.bpp = 4,
.bayer = false,
@@ -39,49 +51,49 @@ static const struct vimc_pix_map vimc_pix_map_list[] = {
/* Bayer formats */
{
- .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .code = { MEDIA_BUS_FMT_SBGGR8_1X8 },
.pixelformat = V4L2_PIX_FMT_SBGGR8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .code = { MEDIA_BUS_FMT_SGBRG8_1X8 },
.pixelformat = V4L2_PIX_FMT_SGBRG8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .code = { MEDIA_BUS_FMT_SGRBG8_1X8 },
.pixelformat = V4L2_PIX_FMT_SGRBG8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .code = { MEDIA_BUS_FMT_SRGGB8_1X8 },
.pixelformat = V4L2_PIX_FMT_SRGGB8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .code = { MEDIA_BUS_FMT_SBGGR10_1X10 },
.pixelformat = V4L2_PIX_FMT_SBGGR10,
.bpp = 2,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .code = { MEDIA_BUS_FMT_SGBRG10_1X10 },
.pixelformat = V4L2_PIX_FMT_SGBRG10,
.bpp = 2,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .code = { MEDIA_BUS_FMT_SGRBG10_1X10 },
.pixelformat = V4L2_PIX_FMT_SGRBG10,
.bpp = 2,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .code = { MEDIA_BUS_FMT_SRGGB10_1X10 },
.pixelformat = V4L2_PIX_FMT_SRGGB10,
.bpp = 2,
.bayer = true,
@@ -89,25 +101,25 @@ static const struct vimc_pix_map vimc_pix_map_list[] = {
/* 10bit raw bayer a-law compressed to 8 bits */
{
- .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .code = { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 },
.pixelformat = V4L2_PIX_FMT_SBGGR10ALAW8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .code = { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 },
.pixelformat = V4L2_PIX_FMT_SGBRG10ALAW8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .code = { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 },
.pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .code = { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 },
.pixelformat = V4L2_PIX_FMT_SRGGB10ALAW8,
.bpp = 1,
.bayer = true,
@@ -115,49 +127,49 @@ static const struct vimc_pix_map vimc_pix_map_list[] = {
/* 10bit raw bayer DPCM compressed to 8 bits */
{
- .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .code = { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 },
.pixelformat = V4L2_PIX_FMT_SBGGR10DPCM8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .code = { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 },
.pixelformat = V4L2_PIX_FMT_SGBRG10DPCM8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .code = { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 },
.pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .code = { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 },
.pixelformat = V4L2_PIX_FMT_SRGGB10DPCM8,
.bpp = 1,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .code = { MEDIA_BUS_FMT_SBGGR12_1X12 },
.pixelformat = V4L2_PIX_FMT_SBGGR12,
.bpp = 2,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .code = { MEDIA_BUS_FMT_SGBRG12_1X12 },
.pixelformat = V4L2_PIX_FMT_SGBRG12,
.bpp = 2,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .code = { MEDIA_BUS_FMT_SGRBG12_1X12 },
.pixelformat = V4L2_PIX_FMT_SGRBG12,
.bpp = 2,
.bayer = true,
},
{
- .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .code = { MEDIA_BUS_FMT_SRGGB12_1X12 },
.pixelformat = V4L2_PIX_FMT_SRGGB12,
.bpp = 2,
.bayer = true,
@@ -182,13 +194,32 @@ const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
return &vimc_pix_map_list[i];
}
+u32 vimc_mbus_code_by_index(unsigned int index)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
+ for (j = 0; j < ARRAY_SIZE(vimc_pix_map_list[i].code); j++) {
+ if (!vimc_pix_map_list[i].code[j])
+ break;
+
+ if (!index)
+ return vimc_pix_map_list[i].code[j];
+ index--;
+ }
+ }
+ return 0;
+}
+
const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
{
- unsigned int i;
+ unsigned int i, j;
for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
- if (vimc_pix_map_list[i].code == code)
- return &vimc_pix_map_list[i];
+ for (j = 0; j < ARRAY_SIZE(vimc_pix_map_list[i].code); j++) {
+ if (vimc_pix_map_list[i].code[j] == code)
+ return &vimc_pix_map_list[i];
+ }
}
return NULL;
}
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/test-drivers/vimc/vimc-common.h
index 616d5a6b0754..ae163dec2459 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/test-drivers/vimc/vimc-common.h
@@ -32,8 +32,10 @@
#define VIMC_IS_SRC(pad) (pad)
#define VIMC_IS_SINK(pad) (!(pad))
+#define VIMC_PIX_FMT_MAX_CODES 8
+
/**
- * struct vimc_colorimetry_clamp - Adjust colorimetry parameters
+ * vimc_colorimetry_clamp - Adjust colorimetry parameters
*
* @fmt: the pointer to struct v4l2_pix_format or
* struct v4l2_mbus_framefmt
@@ -62,14 +64,15 @@ do { \
* struct vimc_pix_map - maps media bus code with v4l2 pixel format
*
* @code: media bus format code defined by MEDIA_BUS_FMT_* macros
- * @bbp: number of bytes each pixel occupies
- * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ * @bpp: number of bytes each pixel occupies
+ * @pixelformat: pixel format defined by V4L2_PIX_FMT_* macros
+ * @bayer: true if this is a bayer format
*
* Struct which matches the MEDIA_BUS_FMT_* codes with the corresponding
* V4L2_PIX_FMT_* fourcc pixelformat and its bytes per pixel (bpp)
*/
struct vimc_pix_map {
- unsigned int code;
+ unsigned int code[VIMC_PIX_FMT_MAX_CODES];
unsigned int bpp;
u32 pixelformat;
bool bayer;
@@ -90,7 +93,7 @@ struct vimc_pix_map {
* the node it will be of an instance of v4l2_subdev or video_device struct
* where both contains a struct media_entity.
* Those structures should embedded the vimc_ent_device struct through
- * v4l2_set_subdevdata() and video_set_drvdata() respectivaly, allowing the
+ * v4l2_set_subdevdata() and video_set_drvdata() respectively, allowing the
* vimc_ent_device struct to be retrieved from the corresponding struct
* media_entity
*/
@@ -106,10 +109,10 @@ struct vimc_ent_device {
/**
* struct vimc_device - main device for vimc driver
*
- * @pipe_cfg pointer to the vimc pipeline configuration structure
- * @ent_devs array of vimc_ent_device pointers
- * @mdev the associated media_device parent
- * @v4l2_dev Internal v4l2 parent device
+ * @pipe_cfg: pointer to the vimc pipeline configuration structure
+ * @ent_devs: array of vimc_ent_device pointers
+ * @mdev: the associated media_device parent
+ * @v4l2_dev: Internal v4l2 parent device
*/
struct vimc_device {
const struct vimc_pipeline_config *pipe_cfg;
@@ -119,20 +122,16 @@ struct vimc_device {
};
/**
- * struct vimc_ent_config Structure which describes individual
- * configuration for each entity
+ * struct vimc_ent_type Structure for the callbacks of the entity types
*
- * @name entity name
- * @ved pointer to vimc_ent_device (a node in the
- * topology)
- * @add initializes and registers
- * vim entity - called from vimc-core
- * @unregister unregisters vimc entity - called from vimc-core
- * @release releases vimc entity - called from the v4l2_dev
- * release callback
+ *
+ * @add: initializes and registers
+ * vimc entity - called from vimc-core
+ * @unregister: unregisters vimc entity - called from vimc-core
+ * @release: releases vimc entity - called from the v4l2_dev
+ * release callback
*/
-struct vimc_ent_config {
- const char *name;
+struct vimc_ent_type {
struct vimc_ent_device *(*add)(struct vimc_device *vimc,
const char *vcfg_name);
void (*unregister)(struct vimc_ent_device *ved);
@@ -140,6 +139,19 @@ struct vimc_ent_config {
};
/**
+ * struct vimc_ent_config Structure which describes individual
+ * configuration for each entity
+ *
+ * @name: entity name
+ * @type: contain the callbacks of this entity type
+ *
+ */
+struct vimc_ent_config {
+ const char *name;
+ struct vimc_ent_type *type;
+};
+
+/**
* vimc_is_source - returns true if the entity has only source pads
*
* @ent: pointer to &struct media_entity
@@ -147,23 +159,10 @@ struct vimc_ent_config {
*/
bool vimc_is_source(struct media_entity *ent);
-/* prototypes for vimc_ent_config hooks */
-struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
- const char *vcfg_name);
-void vimc_cap_unregister(struct vimc_ent_device *ved);
-void vimc_cap_release(struct vimc_ent_device *ved);
-
-struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
- const char *vcfg_name);
-void vimc_deb_release(struct vimc_ent_device *ved);
-
-struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
- const char *vcfg_name);
-void vimc_sca_release(struct vimc_ent_device *ved);
-
-struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
- const char *vcfg_name);
-void vimc_sen_release(struct vimc_ent_device *ved);
+extern struct vimc_ent_type vimc_sen_type;
+extern struct vimc_ent_type vimc_deb_type;
+extern struct vimc_ent_type vimc_sca_type;
+extern struct vimc_ent_type vimc_cap_type;
/**
* vimc_pix_map_by_index - get vimc_pix_map struct by its index
@@ -173,6 +172,15 @@ void vimc_sen_release(struct vimc_ent_device *ved);
const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i);
/**
+ * vimc_mbus_code_by_index - get mbus code by its index
+ *
+ * @index: index of the mbus code in vimc_pix_map_list
+ *
+ * Returns 0 if no mbus code is found for the given index.
+ */
+u32 vimc_mbus_code_by_index(unsigned int index);
+
+/**
* vimc_pix_map_by_code - get vimc_pix_map struct by media bus code
*
* @code: media bus format code defined by MEDIA_BUS_FMT_* macros
@@ -182,7 +190,7 @@ const struct vimc_pix_map *vimc_pix_map_by_code(u32 code);
/**
* vimc_pix_map_by_pixelformat - get vimc_pix_map struct by v4l2 pixel format
*
- * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ * @pixelformat: pixel format defined by V4L2_PIX_FMT_* macros
*/
const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
@@ -197,7 +205,7 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
* @function: media entity function defined by MEDIA_ENT_F_* macros
* @num_pads: number of pads to initialize
* @pads: the array of pads of the entity, the caller should set the
- flags of the pads
+ * flags of the pads
* @sd_ops: pointer to &struct v4l2_subdev_ops.
*
* Helper function initialize and register the struct vimc_ent_device and struct
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c
index 339126e565dc..11210aaa2551 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/test-drivers/vimc/vimc-core.c
@@ -47,52 +47,40 @@ struct vimc_pipeline_config {
static struct vimc_ent_config ent_config[] = {
{
.name = "Sensor A",
- .add = vimc_sen_add,
- .release = vimc_sen_release,
+ .type = &vimc_sen_type
},
{
.name = "Sensor B",
- .add = vimc_sen_add,
- .release = vimc_sen_release,
+ .type = &vimc_sen_type
},
{
.name = "Debayer A",
- .add = vimc_deb_add,
- .release = vimc_deb_release,
+ .type = &vimc_deb_type
},
{
.name = "Debayer B",
- .add = vimc_deb_add,
- .release = vimc_deb_release,
+ .type = &vimc_deb_type
},
{
.name = "Raw Capture 0",
- .add = vimc_cap_add,
- .unregister = vimc_cap_unregister,
- .release = vimc_cap_release,
+ .type = &vimc_cap_type
},
{
.name = "Raw Capture 1",
- .add = vimc_cap_add,
- .unregister = vimc_cap_unregister,
- .release = vimc_cap_release,
+ .type = &vimc_cap_type
},
{
/* TODO: change this to vimc-input when it is implemented */
.name = "RGB/YUV Input",
- .add = vimc_sen_add,
- .release = vimc_sen_release,
+ .type = &vimc_sen_type
},
{
.name = "Scaler",
- .add = vimc_sca_add,
- .release = vimc_sca_release,
+ .type = &vimc_sca_type
},
{
.name = "RGB/YUV Capture",
- .add = vimc_cap_add,
- .unregister = vimc_cap_unregister,
- .release = vimc_cap_release,
+ .type = &vimc_cap_type
},
};
@@ -160,40 +148,45 @@ err_rm_links:
return ret;
}
-static int vimc_add_subdevs(struct vimc_device *vimc)
+static void vimc_release_subdevs(struct vimc_device *vimc)
{
unsigned int i;
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
- dev_dbg(vimc->mdev.dev, "new entity for %s\n",
- vimc->pipe_cfg->ents[i].name);
- vimc->ent_devs[i] = vimc->pipe_cfg->ents[i].add(vimc,
- vimc->pipe_cfg->ents[i].name);
- if (!vimc->ent_devs[i]) {
- dev_err(vimc->mdev.dev, "add new entity for %s\n",
- vimc->pipe_cfg->ents[i].name);
- return -EINVAL;
- }
- }
- return 0;
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ if (vimc->ent_devs[i])
+ vimc->pipe_cfg->ents[i].type->release(vimc->ent_devs[i]);
}
-static void vimc_release_subdevs(struct vimc_device *vimc)
+static void vimc_unregister_subdevs(struct vimc_device *vimc)
{
unsigned int i;
for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
- if (vimc->ent_devs[i])
- vimc->pipe_cfg->ents[i].release(vimc->ent_devs[i]);
+ if (vimc->ent_devs[i] && vimc->pipe_cfg->ents[i].type->unregister)
+ vimc->pipe_cfg->ents[i].type->unregister(vimc->ent_devs[i]);
}
-static void vimc_unregister_subdevs(struct vimc_device *vimc)
+static int vimc_add_subdevs(struct vimc_device *vimc)
{
unsigned int i;
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
- if (vimc->ent_devs[i] && vimc->pipe_cfg->ents[i].unregister)
- vimc->pipe_cfg->ents[i].unregister(vimc->ent_devs[i]);
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
+ dev_dbg(vimc->mdev.dev, "new entity for %s\n",
+ vimc->pipe_cfg->ents[i].name);
+ vimc->ent_devs[i] = vimc->pipe_cfg->ents[i].type->add(vimc,
+ vimc->pipe_cfg->ents[i].name);
+ if (IS_ERR(vimc->ent_devs[i])) {
+ int err = PTR_ERR(vimc->ent_devs[i]);
+
+ dev_err(vimc->mdev.dev, "adding entity %s failed (%d)\n",
+ vimc->pipe_cfg->ents[i].name, err);
+ vimc->ent_devs[i] = NULL;
+ vimc_unregister_subdevs(vimc);
+ vimc_release_subdevs(vimc);
+ return err;
+ }
+ }
+ return 0;
}
static void vimc_v4l2_dev_release(struct v4l2_device *v4l2_dev)
@@ -229,8 +222,7 @@ static int vimc_register_devices(struct vimc_device *vimc)
/* Invoke entity config hooks to initialize and register subdevs */
ret = vimc_add_subdevs(vimc);
if (ret)
- /* remove sundevs that got added */
- goto err_rm_subdevs;
+ goto err_free_ent_devs;
/* Initialize links */
ret = vimc_create_links(vimc);
@@ -261,6 +253,7 @@ err_mdev_unregister:
err_rm_subdevs:
vimc_unregister_subdevs(vimc);
vimc_release_subdevs(vimc);
+err_free_ent_devs:
kfree(vimc->ent_devs);
err_v4l2_unregister:
v4l2_device_unregister(&vimc->v4l2_dev);
@@ -268,13 +261,6 @@ err_v4l2_unregister:
return ret;
}
-static void vimc_unregister(struct vimc_device *vimc)
-{
- vimc_unregister_subdevs(vimc);
- media_device_unregister(&vimc->mdev);
- v4l2_device_unregister(&vimc->v4l2_dev);
-}
-
static int vimc_probe(struct platform_device *pdev)
{
struct vimc_device *vimc;
@@ -321,7 +307,9 @@ static int vimc_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "remove");
- vimc_unregister(vimc);
+ vimc_unregister_subdevs(vimc);
+ media_device_unregister(&vimc->mdev);
+ v4l2_device_unregister(&vimc->v4l2_dev);
v4l2_device_put(&vimc->v4l2_dev);
return 0;
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/test-drivers/vimc/vimc-debayer.c
index baf6bf9f65b5..c3f6fef34f68 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/test-drivers/vimc/vimc-debayer.c
@@ -48,7 +48,20 @@ static const struct v4l2_mbus_framefmt sink_fmt_default = {
.height = 480,
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
.field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_DEFAULT,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+};
+
+static const u32 vimc_deb_src_mbus_codes[] = {
+ MEDIA_BUS_FMT_GBR888_1X24,
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_BGR888_3X8,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_3X8,
+ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI,
};
static const struct vimc_deb_pix_map vimc_deb_pix_map_list[] = {
@@ -125,6 +138,17 @@ static const struct vimc_deb_pix_map *vimc_deb_pix_map_by_code(u32 code)
return NULL;
}
+static bool vimc_deb_src_code_is_valid(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_deb_src_mbus_codes); i++)
+ if (vimc_deb_src_mbus_codes[i] == code)
+ return true;
+
+ return false;
+}
+
static int vimc_deb_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg)
{
@@ -148,14 +172,11 @@ static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
- /* We only support one format for source pads */
if (VIMC_IS_SRC(code->pad)) {
- struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
-
- if (code->index)
+ if (code->index >= ARRAY_SIZE(vimc_deb_src_mbus_codes))
return -EINVAL;
- code->code = vdeb->src_code;
+ code->code = vimc_deb_src_mbus_codes[code->index];
} else {
if (code->index >= ARRAY_SIZE(vimc_deb_pix_map_list))
return -EINVAL;
@@ -170,8 +191,6 @@ static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
-
if (fse->index)
return -EINVAL;
@@ -181,7 +200,7 @@ static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
if (!vpix)
return -EINVAL;
- } else if (fse->code != vdeb->src_code) {
+ } else if (!vimc_deb_src_code_is_valid(fse->code)) {
return -EINVAL;
}
@@ -237,6 +256,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
{
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
+ u32 *src_code;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
@@ -244,8 +264,10 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
return -EBUSY;
sink_fmt = &vdeb->sink_fmt;
+ src_code = &vdeb->src_code;
} else {
sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ src_code = &v4l2_subdev_get_try_format(sd, cfg, 1)->code;
}
/*
@@ -253,9 +275,14 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
* it is propagated from the sink
*/
if (VIMC_IS_SRC(fmt->pad)) {
+ u32 code = fmt->format.code;
+
fmt->format = *sink_fmt;
- /* TODO: Add support for other formats */
- fmt->format.code = vdeb->src_code;
+
+ if (vimc_deb_src_code_is_valid(code))
+ *src_code = code;
+
+ fmt->format.code = *src_code;
} else {
/* Set the new format in the sink pad */
vimc_deb_adjust_sink_fmt(&fmt->format);
@@ -286,16 +313,26 @@ static const struct v4l2_subdev_pad_ops vimc_deb_pad_ops = {
.set_fmt = vimc_deb_set_fmt,
};
-static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
- unsigned int lin,
- unsigned int col,
- unsigned int rgb[3])
+static void vimc_deb_process_rgb_frame(struct vimc_deb_device *vdeb,
+ unsigned int lin,
+ unsigned int col,
+ unsigned int rgb[3])
{
+ const struct vimc_pix_map *vpix;
unsigned int i, index;
+ vpix = vimc_pix_map_by_code(vdeb->src_code);
index = VIMC_FRAME_INDEX(lin, col, vdeb->sink_fmt.width, 3);
- for (i = 0; i < 3; i++)
- vdeb->src_frame[index + i] = rgb[i];
+ for (i = 0; i < 3; i++) {
+ switch (vpix->pixelformat) {
+ case V4L2_PIX_FMT_RGB24:
+ vdeb->src_frame[index + i] = rgb[i];
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ vdeb->src_frame[index + i] = rgb[2 - i];
+ break;
+ }
+ }
}
static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
@@ -494,7 +531,7 @@ static const struct v4l2_ctrl_ops vimc_deb_ctrl_ops = {
.s_ctrl = vimc_deb_s_ctrl,
};
-void vimc_deb_release(struct vimc_ent_device *ved)
+static void vimc_deb_release(struct vimc_ent_device *ved)
{
struct vimc_deb_device *vdeb =
container_of(ved, struct vimc_deb_device, ved);
@@ -522,8 +559,8 @@ static const struct v4l2_ctrl_config vimc_deb_ctrl_mean_win_size = {
.def = 3,
};
-struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
- const char *vcfg_name)
+static struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_deb_device *vdeb;
@@ -532,7 +569,7 @@ struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
/* Allocate the vdeb struct */
vdeb = kzalloc(sizeof(*vdeb), GFP_KERNEL);
if (!vdeb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Create controls: */
v4l2_ctrl_handler_init(&vdeb->hdl, 2);
@@ -568,7 +605,7 @@ struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
* for the code
*/
vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
- vdeb->set_rgb_src = vimc_deb_set_rgb_mbus_fmt_rgb888_1x24;
+ vdeb->set_rgb_src = vimc_deb_process_rgb_frame;
return &vdeb->ved;
@@ -577,5 +614,10 @@ err_free_hdl:
err_free_vdeb:
kfree(vdeb);
- return NULL;
+ return ERR_PTR(ret);
}
+
+struct vimc_ent_type vimc_deb_type = {
+ .add = vimc_deb_add,
+ .release = vimc_deb_release
+};
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/test-drivers/vimc/vimc-scaler.c
index 7521439747c5..121fa7d62a2e 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/test-drivers/vimc/vimc-scaler.c
@@ -42,7 +42,7 @@ static const struct v4l2_mbus_framefmt sink_fmt_default = {
.height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
.code = MEDIA_BUS_FMT_RGB888_1X24,
.field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_DEFAULT,
+ .colorspace = V4L2_COLORSPACE_SRGB,
};
static const struct v4l2_rect crop_rect_default = {
@@ -110,13 +110,19 @@ static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
- const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
+ u32 mbus_code = vimc_mbus_code_by_index(code->index);
+ const struct vimc_pix_map *vpix;
+
+ if (!mbus_code)
+ return -EINVAL;
+
+ vpix = vimc_pix_map_by_code(mbus_code);
/* We don't support bayer format */
if (!vpix || vpix->bayer)
return -EINVAL;
- code->code = vpix->code;
+ code->code = mbus_code;
return 0;
}
@@ -464,7 +470,7 @@ static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
return vsca->src_frame;
};
-void vimc_sca_release(struct vimc_ent_device *ved)
+static void vimc_sca_release(struct vimc_ent_device *ved)
{
struct vimc_sca_device *vsca =
container_of(ved, struct vimc_sca_device, ved);
@@ -473,8 +479,8 @@ void vimc_sca_release(struct vimc_ent_device *ved)
kfree(vsca);
}
-struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
- const char *vcfg_name)
+static struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sca_device *vsca;
@@ -483,7 +489,7 @@ struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
/* Allocate the vsca struct */
vsca = kzalloc(sizeof(*vsca), GFP_KERNEL);
if (!vsca)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Initialize ved and sd */
vsca->pads[0].flags = MEDIA_PAD_FL_SINK;
@@ -495,7 +501,7 @@ struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
vsca->pads, &vimc_sca_ops);
if (ret) {
kfree(vsca);
- return NULL;
+ return ERR_PTR(ret);
}
vsca->ved.process_frame = vimc_sca_process_frame;
@@ -509,3 +515,8 @@ struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
return &vsca->ved;
}
+
+struct vimc_ent_type vimc_sca_type = {
+ .add = vimc_sca_add,
+ .release = vimc_sca_release
+};
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/test-drivers/vimc/vimc-sensor.c
index 92daee58209e..a2f09ac9a360 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/test-drivers/vimc/vimc-sensor.c
@@ -30,7 +30,7 @@ static const struct v4l2_mbus_framefmt fmt_default = {
.height = 480,
.code = MEDIA_BUS_FMT_RGB888_1X24,
.field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_DEFAULT,
+ .colorspace = V4L2_COLORSPACE_SRGB,
};
static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
@@ -52,12 +52,12 @@ static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
- const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
+ u32 mbus_code = vimc_mbus_code_by_index(code->index);
- if (!vpix)
+ if (!mbus_code)
return -EINVAL;
- code->code = vpix->code;
+ code->code = mbus_code;
return 0;
}
@@ -279,7 +279,7 @@ static const struct v4l2_ctrl_ops vimc_sen_ctrl_ops = {
.s_ctrl = vimc_sen_s_ctrl,
};
-void vimc_sen_release(struct vimc_ent_device *ved)
+static void vimc_sen_release(struct vimc_ent_device *ved)
{
struct vimc_sen_device *vsen =
container_of(ved, struct vimc_sen_device, ved);
@@ -307,8 +307,8 @@ static const struct v4l2_ctrl_config vimc_sen_ctrl_test_pattern = {
.qmenu = tpg_pattern_strings,
};
-struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
- const char *vcfg_name)
+static struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sen_device *vsen;
@@ -317,7 +317,7 @@ struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
/* Allocate the vsen struct */
vsen = kzalloc(sizeof(*vsen), GFP_KERNEL);
if (!vsen)
- return NULL;
+ return ERR_PTR(-ENOMEM);
v4l2_ctrl_handler_init(&vsen->hdl, 4);
@@ -372,5 +372,10 @@ err_free_hdl:
err_free_vsen:
kfree(vsen);
- return NULL;
+ return ERR_PTR(ret);
}
+
+struct vimc_ent_type vimc_sen_type = {
+ .add = vimc_sen_add,
+ .release = vimc_sen_release
+};
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/test-drivers/vimc/vimc-streamer.c
index 65feb3c596db..65feb3c596db 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/test-drivers/vimc/vimc-streamer.c
diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/test-drivers/vimc/vimc-streamer.h
index fe3c51f15fad..3bb6731b8d4d 100644
--- a/drivers/media/platform/vimc/vimc-streamer.h
+++ b/drivers/media/test-drivers/vimc/vimc-streamer.h
@@ -20,9 +20,10 @@
*
* @pipe: the media pipeline object associated with this stream
* @ved_pipeline: array containing all the entities participating in the
- * stream. The order is from a video device (usually a capture device) where
- * stream_on was called, to the entity generating the first base image to be
- * processed in the pipeline.
+ * stream. The order is from a video device (usually a
+ * capture device) where stream_on was called, to the
+ * entity generating the first base image to be
+ * processed in the pipeline.
* @pipe_size: size of @ved_pipeline
* @kthread: thread that generates the frames of the stream.
*
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/test-drivers/vivid/Kconfig
index e2ff06edfa93..c3abde2986b2 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/test-drivers/vivid/Kconfig
@@ -11,6 +11,8 @@ config VIDEO_VIVID
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEO_V4L2_TPG
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
help
Enables a virtual video driver. This driver emulates a webcam,
TV, S-Video and HDMI capture hardware, including VBI support for
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/test-drivers/vivid/Makefile
index b12ad0152a3e..b12ad0152a3e 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/test-drivers/vivid/Makefile
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c
index 4d2413e87730..4d2413e87730 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/test-drivers/vivid/vivid-cec.c
diff --git a/drivers/media/platform/vivid/vivid-cec.h b/drivers/media/test-drivers/vivid/vivid-cec.h
index 7524ed48a914..7524ed48a914 100644
--- a/drivers/media/platform/vivid/vivid-cec.h
+++ b/drivers/media/test-drivers/vivid/vivid-cec.h
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index 6c740e3e6999..6c740e3e6999 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index 99e69b8f770f..99e69b8f770f 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
index 334130568dcb..334130568dcb 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
diff --git a/drivers/media/platform/vivid/vivid-ctrls.h b/drivers/media/test-drivers/vivid/vivid-ctrls.h
index 6fad5f5d0054..6fad5f5d0054 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.h
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.h
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
index 01a9d671b947..01a9d671b947 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.h b/drivers/media/test-drivers/vivid/vivid-kthread-cap.h
index 0f43015306d6..0f43015306d6 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.h
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
index 6780687978f9..6780687978f9 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.h b/drivers/media/test-drivers/vivid/vivid-kthread-out.h
index d5bcf44bbaca..d5bcf44bbaca 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.h
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.h
diff --git a/drivers/media/platform/vivid/vivid-kthread-touch.c b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
index 674507b5ccb5..674507b5ccb5 100644
--- a/drivers/media/platform/vivid/vivid-kthread-touch.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
diff --git a/drivers/media/platform/vivid/vivid-kthread-touch.h b/drivers/media/test-drivers/vivid/vivid-kthread-touch.h
index ecf79b46209e..ecf79b46209e 100644
--- a/drivers/media/platform/vivid/vivid-kthread-touch.h
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.h
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.c b/drivers/media/test-drivers/vivid/vivid-meta-cap.c
index 780f96860a6d..780f96860a6d 100644
--- a/drivers/media/platform/vivid/vivid-meta-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-meta-cap.c
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.h b/drivers/media/test-drivers/vivid/vivid-meta-cap.h
index 4670d00d1576..4670d00d1576 100644
--- a/drivers/media/platform/vivid/vivid-meta-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-meta-cap.h
diff --git a/drivers/media/platform/vivid/vivid-meta-out.c b/drivers/media/test-drivers/vivid/vivid-meta-out.c
index ff8a039aba72..ff8a039aba72 100644
--- a/drivers/media/platform/vivid/vivid-meta-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-meta-out.c
diff --git a/drivers/media/platform/vivid/vivid-meta-out.h b/drivers/media/test-drivers/vivid/vivid-meta-out.h
index 0c639b7c2842..0c639b7c2842 100644
--- a/drivers/media/platform/vivid/vivid-meta-out.h
+++ b/drivers/media/test-drivers/vivid/vivid-meta-out.h
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/test-drivers/vivid/vivid-osd.c
index fbaec8acc161..fbaec8acc161 100644
--- a/drivers/media/platform/vivid/vivid-osd.c
+++ b/drivers/media/test-drivers/vivid/vivid-osd.c
diff --git a/drivers/media/platform/vivid/vivid-osd.h b/drivers/media/test-drivers/vivid/vivid-osd.h
index f9ac1af25dd3..f9ac1af25dd3 100644
--- a/drivers/media/platform/vivid/vivid-osd.h
+++ b/drivers/media/test-drivers/vivid/vivid-osd.h
diff --git a/drivers/media/platform/vivid/vivid-radio-common.c b/drivers/media/test-drivers/vivid/vivid-radio-common.c
index 138c7bce68b1..138c7bce68b1 100644
--- a/drivers/media/platform/vivid/vivid-radio-common.c
+++ b/drivers/media/test-drivers/vivid/vivid-radio-common.c
diff --git a/drivers/media/platform/vivid/vivid-radio-common.h b/drivers/media/test-drivers/vivid/vivid-radio-common.h
index 30a9900e5b2b..30a9900e5b2b 100644
--- a/drivers/media/platform/vivid/vivid-radio-common.h
+++ b/drivers/media/test-drivers/vivid/vivid-radio-common.h
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
index 232cab508f48..232cab508f48 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.c
+++ b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.h b/drivers/media/test-drivers/vivid/vivid-radio-rx.h
index c9c7849f6f99..c9c7849f6f99 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.h
+++ b/drivers/media/test-drivers/vivid/vivid-radio-rx.h
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/test-drivers/vivid/vivid-radio-tx.c
index 049d40b948bb..049d40b948bb 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.c
+++ b/drivers/media/test-drivers/vivid/vivid-radio-tx.c
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.h b/drivers/media/test-drivers/vivid/vivid-radio-tx.h
index c2bf1e7e634a..c2bf1e7e634a 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.h
+++ b/drivers/media/test-drivers/vivid/vivid-radio-tx.h
diff --git a/drivers/media/platform/vivid/vivid-rds-gen.c b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
index b5b104ee64c9..b5b104ee64c9 100644
--- a/drivers/media/platform/vivid/vivid-rds-gen.c
+++ b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
diff --git a/drivers/media/platform/vivid/vivid-rds-gen.h b/drivers/media/test-drivers/vivid/vivid-rds-gen.h
index 35ac5742302b..35ac5742302b 100644
--- a/drivers/media/platform/vivid/vivid-rds-gen.h
+++ b/drivers/media/test-drivers/vivid/vivid-rds-gen.h
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
index 2b7522e16efc..2b7522e16efc 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.h b/drivers/media/test-drivers/vivid/vivid-sdr-cap.h
index 813c9248e5a7..813c9248e5a7 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.h
diff --git a/drivers/media/platform/vivid/vivid-touch-cap.c b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
index ebb00b128030..ebb00b128030 100644
--- a/drivers/media/platform/vivid/vivid-touch-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
diff --git a/drivers/media/platform/vivid/vivid-touch-cap.h b/drivers/media/test-drivers/vivid/vivid-touch-cap.h
index 07e514046ae8..07e514046ae8 100644
--- a/drivers/media/platform/vivid/vivid-touch-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-touch-cap.h
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
index 1a9348eea781..1a9348eea781 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.h b/drivers/media/test-drivers/vivid/vivid-vbi-cap.h
index 91d2de01381c..91d2de01381c 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-cap.h
diff --git a/drivers/media/platform/vivid/vivid-vbi-gen.c b/drivers/media/test-drivers/vivid/vivid-vbi-gen.c
index acc98445a1fa..acc98445a1fa 100644
--- a/drivers/media/platform/vivid/vivid-vbi-gen.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-gen.c
diff --git a/drivers/media/platform/vivid/vivid-vbi-gen.h b/drivers/media/test-drivers/vivid/vivid-vbi-gen.h
index 2657a7f5571c..2657a7f5571c 100644
--- a/drivers/media/platform/vivid/vivid-vbi-gen.h
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-gen.h
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/test-drivers/vivid/vivid-vbi-out.c
index cd56476902a2..cd56476902a2 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-out.c
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.h b/drivers/media/test-drivers/vivid/vivid-vbi-out.h
index 76584940cdaf..76584940cdaf 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.h
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-out.h
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index e94beef008c8..e94beef008c8 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.h b/drivers/media/test-drivers/vivid/vivid-vid-cap.h
index 1e422a59eeab..1e422a59eeab 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.h
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/test-drivers/vivid/vivid-vid-common.c
index 76b0be670ebb..76b0be670ebb 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-common.c
diff --git a/drivers/media/platform/vivid/vivid-vid-common.h b/drivers/media/test-drivers/vivid/vivid-vid-common.h
index d908d9725283..d908d9725283 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.h
+++ b/drivers/media/test-drivers/vivid/vivid-vid-common.h
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
index ee3446e3217c..ee3446e3217c 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
diff --git a/drivers/media/platform/vivid/vivid-vid-out.h b/drivers/media/test-drivers/vivid/vivid-vid-out.h
index 8d56314f4ea1..8d56314f4ea1 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.h
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.h
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index e104bb7766e1..4605bb377574 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -15,7 +15,7 @@ config MEDIA_TUNER
select MEDIA_TUNER_TDA9887 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MC44S803 if MEDIA_SUBDRV_AUTOSELECT
-comment "Tuner drivers hidden by 'Autoselect ancillary drivers'"
+comment "Tuner drivers auto-selected by 'Autoselect ancillary drivers'"
depends on MEDIA_HIDE_ANCILLARY_SUBDRV
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT
@@ -223,7 +223,7 @@ config MEDIA_TUNER_TDA18212
config MEDIA_TUNER_E4000
tristate "Elonics E4000 silicon tuner"
- depends on MEDIA_SUPPORT && I2C
+ depends on MEDIA_SUPPORT && I2C && VIDEO_V4L2
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -231,7 +231,7 @@ config MEDIA_TUNER_E4000
config MEDIA_TUNER_FC2580
tristate "FCI FC2580 silicon tuner"
- depends on MEDIA_SUPPORT && I2C
+ depends on MEDIA_SUPPORT && I2C && VIDEO_V4L2
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 898e0f9f8b70..fefb2625f655 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -9,6 +9,10 @@
static const struct dvb_tuner_ops si2157_ops;
+static int tuner_lock_debug;
+module_param(tuner_lock_debug, int, 0644);
+MODULE_PARM_DESC(tuner_lock_debug, "if set, signal lock is briefly waited on after setting params");
+
/* execute firmware command */
static int si2157_cmd_execute(struct i2c_client *client, struct si2157_cmd *cmd)
{
@@ -47,14 +51,20 @@ static int si2157_cmd_execute(struct i2c_client *client, struct si2157_cmd *cmd)
break;
}
- dev_dbg(&client->dev, "cmd execution took %d ms\n",
- jiffies_to_msecs(jiffies) -
- (jiffies_to_msecs(timeout) - TIMEOUT));
+ dev_dbg(&client->dev, "cmd execution took %d ms, status=%x\n",
+ jiffies_to_msecs(jiffies) -
+ (jiffies_to_msecs(timeout) - TIMEOUT),
+ cmd->args[0]);
if (!((cmd->args[0] >> 7) & 0x01)) {
ret = -ETIMEDOUT;
goto err_mutex_unlock;
}
+ /* check error status bit */
+ if (cmd->args[0] & 0x40) {
+ ret = -EAGAIN;
+ goto err_mutex_unlock;
+ }
}
mutex_unlock(&dev->i2c_mutex);
@@ -75,24 +85,23 @@ static int si2157_init(struct dvb_frontend *fe)
struct si2157_cmd cmd;
const struct firmware *fw;
const char *fw_name;
- unsigned int uitmp, chip_id;
+ unsigned int chip_id, xtal_trim;
dev_dbg(&client->dev, "\n");
- /* Returned IF frequency is garbage when firmware is not running */
- memcpy(cmd.args, "\x15\x00\x06\x07", 4);
+ /* Try to get Xtal trim property, to verify tuner still running */
+ memcpy(cmd.args, "\x15\x00\x04\x02", 4);
cmd.wlen = 4;
cmd.rlen = 4;
ret = si2157_cmd_execute(client, &cmd);
- if (ret)
- goto err;
- uitmp = cmd.args[2] << 0 | cmd.args[3] << 8;
- dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp);
+ xtal_trim = cmd.args[2] | (cmd.args[3] << 8);
- if (uitmp == dev->if_frequency / 1000)
+ if (ret == 0 && xtal_trim < 16)
goto warm;
+ dev->if_frequency = 0; /* we no longer know current tuner state */
+
/* power up */
if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
@@ -106,7 +115,7 @@ static int si2157_init(struct dvb_frontend *fe)
}
cmd.rlen = 1;
ret = si2157_cmd_execute(client, &cmd);
- if (ret)
+ if (ret && (dev->chiptype != SI2157_CHIPTYPE_SI2141 || ret != -EAGAIN))
goto err;
/* Si2141 needs a second command before it answers the revision query */
@@ -230,6 +239,28 @@ skip_fw_download:
dev_info(&client->dev, "firmware version: %c.%c.%d\n",
cmd.args[6], cmd.args[7], cmd.args[8]);
+
+ /* enable tuner status flags */
+ memcpy(cmd.args, "\x14\x00\x01\x05\x01\x00", 6);
+ cmd.wlen = 6;
+ cmd.rlen = 1;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ memcpy(cmd.args, "\x14\x00\x01\x06\x01\x00", 6);
+ cmd.wlen = 6;
+ cmd.rlen = 1;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ memcpy(cmd.args, "\x14\x00\x01\x07\x01\x00", 6);
+ cmd.wlen = 6;
+ cmd.rlen = 1;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
warm:
/* init statistics in order signal app which are supported */
c->strength.len = 1;
@@ -274,6 +305,93 @@ err:
return ret;
}
+static int si2157_tune_wait(struct i2c_client *client, u8 is_digital)
+{
+#define TUN_TIMEOUT 40
+#define DIG_TIMEOUT 30
+#define ANALOG_TIMEOUT 150
+ struct si2157_dev *dev = i2c_get_clientdata(client);
+ int ret;
+ unsigned long timeout;
+ unsigned long start_time;
+ u8 wait_status;
+ u8 tune_lock_mask;
+
+ if (is_digital)
+ tune_lock_mask = 0x04;
+ else
+ tune_lock_mask = 0x02;
+
+ mutex_lock(&dev->i2c_mutex);
+
+ /* wait tuner command complete */
+ start_time = jiffies;
+ timeout = start_time + msecs_to_jiffies(TUN_TIMEOUT);
+ while (1) {
+ ret = i2c_master_recv(client, &wait_status,
+ sizeof(wait_status));
+ if (ret < 0) {
+ goto err_mutex_unlock;
+ } else if (ret != sizeof(wait_status)) {
+ ret = -EREMOTEIO;
+ goto err_mutex_unlock;
+ }
+
+ if (time_after(jiffies, timeout))
+ break;
+
+ /* tuner done? */
+ if ((wait_status & 0x81) == 0x81)
+ break;
+ usleep_range(5000, 10000);
+ }
+
+ dev_dbg(&client->dev, "tuning took %d ms, status=0x%x\n",
+ jiffies_to_msecs(jiffies) - jiffies_to_msecs(start_time),
+ wait_status);
+
+ /* if we tuned ok, wait a bit for tuner lock */
+ if (tuner_lock_debug && (wait_status & 0x81) == 0x81) {
+ if (is_digital)
+ timeout = jiffies + msecs_to_jiffies(DIG_TIMEOUT);
+ else
+ timeout = jiffies + msecs_to_jiffies(ANALOG_TIMEOUT);
+
+ while (!time_after(jiffies, timeout)) {
+ ret = i2c_master_recv(client, &wait_status,
+ sizeof(wait_status));
+ if (ret < 0) {
+ goto err_mutex_unlock;
+ } else if (ret != sizeof(wait_status)) {
+ ret = -EREMOTEIO;
+ goto err_mutex_unlock;
+ }
+
+ /* tuner locked? */
+ if (wait_status & tune_lock_mask)
+ break;
+ usleep_range(5000, 10000);
+ }
+
+ dev_dbg(&client->dev, "tuning+lock took %d ms, status=0x%x\n",
+ jiffies_to_msecs(jiffies) - jiffies_to_msecs(start_time),
+ wait_status);
+ }
+
+ if ((wait_status & 0xc0) != 0x80) {
+ ret = -ETIMEDOUT;
+ goto err_mutex_unlock;
+ }
+
+ mutex_unlock(&dev->i2c_mutex);
+ return 0;
+
+err_mutex_unlock:
+ mutex_unlock(&dev->i2c_mutex);
+ dev_err(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
static int si2157_set_params(struct dvb_frontend *fe)
{
struct i2c_client *client = fe->tuner_priv;
@@ -344,7 +462,7 @@ static int si2157_set_params(struct dvb_frontend *fe)
if (ret)
goto err;
- /* set if frequency if needed */
+ /* set digital if frequency if needed */
if (if_frequency != dev->if_frequency) {
memcpy(cmd.args, "\x14\x00\x06\x07", 4);
cmd.args[4] = (if_frequency / 1000) & 0xff;
@@ -358,7 +476,7 @@ static int si2157_set_params(struct dvb_frontend *fe)
dev->if_frequency = if_frequency;
}
- /* set frequency */
+ /* set digital frequency */
memcpy(cmd.args, "\x41\x00\x00\x00\x00\x00\x00\x00", 8);
cmd.args[4] = (c->frequency >> 0) & 0xff;
cmd.args[5] = (c->frequency >> 8) & 0xff;
@@ -370,21 +488,283 @@ static int si2157_set_params(struct dvb_frontend *fe)
if (ret)
goto err;
+ dev->bandwidth = bandwidth;
+ dev->frequency = c->frequency;
+
+ si2157_tune_wait(client, 1); /* wait to complete, ignore any errors */
+
return 0;
err:
+ dev->bandwidth = 0;
+ dev->frequency = 0;
+ dev->if_frequency = 0;
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
+static int si2157_set_analog_params(struct dvb_frontend *fe,
+ struct analog_parameters *params)
+{
+ struct i2c_client *client = fe->tuner_priv;
+ struct si2157_dev *dev = i2c_get_clientdata(client);
+ char *std; /* for debugging */
+ int ret;
+ struct si2157_cmd cmd;
+ u32 bandwidth = 0;
+ u32 if_frequency = 0;
+ u32 freq = 0;
+ u64 tmp_lval = 0;
+ u8 system = 0;
+ u8 color = 0; /* 0=NTSC/PAL, 0x10=SECAM */
+ u8 invert_analog = 1; /* analog tuner spectrum; 0=normal, 1=inverted */
+
+ if (dev->chiptype != SI2157_CHIPTYPE_SI2157) {
+ dev_info(&client->dev, "Analog tuning not supported for chiptype=%u\n",
+ dev->chiptype);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (!dev->active)
+ si2157_init(fe);
+
+ if (!dev->active) {
+ ret = -EAGAIN;
+ goto err;
+ }
+ if (params->mode == V4L2_TUNER_RADIO) {
+ /*
+ * std = "fm";
+ * bandwidth = 1700000; //best can do for FM, AGC will be a mess though
+ * if_frequency = 1250000; //HVR-225x(saa7164), HVR-12xx(cx23885)
+ * if_frequency = 6600000; //HVR-9xx(cx231xx)
+ * if_frequency = 5500000; //HVR-19xx(pvrusb2)
+ */
+ dev_err(&client->dev, "si2157 does not currently support FM radio\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ tmp_lval = params->frequency * 625LL;
+ do_div(tmp_lval, 10); /* convert to HZ */
+ freq = (u32)tmp_lval;
+
+ if (freq < 1000000) /* is freq in KHz */
+ freq = freq * 1000;
+ dev->frequency = freq;
+
+ /* if_frequency values based on tda187271C2 */
+ if (params->std & (V4L2_STD_B | V4L2_STD_GH)) {
+ if (freq >= 470000000) {
+ std = "palGH";
+ bandwidth = 8000000;
+ if_frequency = 6000000;
+ system = 1;
+ if (params->std &
+ (V4L2_STD_SECAM_G | V4L2_STD_SECAM_H)) {
+ std = "secamGH";
+ color = 0x10;
+ }
+ } else {
+ std = "palB";
+ bandwidth = 7000000;
+ if_frequency = 6000000;
+ system = 0;
+ if (params->std & V4L2_STD_SECAM_B) {
+ std = "secamB";
+ color = 0x10;
+ }
+ }
+ } else if (params->std & V4L2_STD_MN) {
+ std = "MN";
+ bandwidth = 6000000;
+ if_frequency = 5400000;
+ system = 2;
+ } else if (params->std & V4L2_STD_PAL_I) {
+ std = "palI";
+ bandwidth = 8000000;
+ if_frequency = 7250000; /* TODO: does not work yet */
+ system = 4;
+ } else if (params->std & V4L2_STD_DK) {
+ std = "palDK";
+ bandwidth = 8000000;
+ if_frequency = 6900000; /* TODO: does not work yet */
+ system = 5;
+ if (params->std & V4L2_STD_SECAM_DK) {
+ std = "secamDK";
+ color = 0x10;
+ }
+ } else if (params->std & V4L2_STD_SECAM_L) {
+ std = "secamL";
+ bandwidth = 8000000;
+ if_frequency = 6750000; /* TODO: untested */
+ system = 6;
+ color = 0x10;
+ } else if (params->std & V4L2_STD_SECAM_LC) {
+ std = "secamL'";
+ bandwidth = 7000000;
+ if_frequency = 1250000; /* TODO: untested */
+ system = 7;
+ color = 0x10;
+ } else {
+ std = "unknown";
+ }
+ /* calc channel center freq */
+ freq = freq - 1250000 + (bandwidth / 2);
+
+ dev_dbg(&client->dev,
+ "mode=%d system=%u std='%s' params->frequency=%u center freq=%u if=%u bandwidth=%u\n",
+ params->mode, system, std, params->frequency,
+ freq, if_frequency, bandwidth);
+
+ /* set analog IF port */
+ memcpy(cmd.args, "\x14\x00\x03\x06\x08\x02", 6);
+ /* in using dev->if_port, we assume analog and digital IF's */
+ /* are always on different ports */
+ /* assumes if_port definition is 0 or 1 for digital out */
+ cmd.args[4] = (dev->if_port == 1) ? 8 : 10;
+ /* Analog AGC assumed external */
+ cmd.args[5] = (dev->if_port == 1) ? 2 : 1;
+ cmd.wlen = 6;
+ cmd.rlen = 4;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ /* set analog IF output config */
+ memcpy(cmd.args, "\x14\x00\x0d\x06\x94\x64", 6);
+ cmd.wlen = 6;
+ cmd.rlen = 4;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ /* make this distinct from a digital IF */
+ dev->if_frequency = if_frequency | 1;
+
+ /* calc and set tuner analog if center frequency */
+ if_frequency = if_frequency + 1250000 - (bandwidth / 2);
+ dev_dbg(&client->dev, "IF Ctr freq=%d\n", if_frequency);
+
+ memcpy(cmd.args, "\x14\x00\x0C\x06", 4);
+ cmd.args[4] = (if_frequency / 1000) & 0xff;
+ cmd.args[5] = ((if_frequency / 1000) >> 8) & 0xff;
+ cmd.wlen = 6;
+ cmd.rlen = 4;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ /* set analog AGC config */
+ memcpy(cmd.args, "\x14\x00\x07\x06\x32\xc8", 6);
+ cmd.wlen = 6;
+ cmd.rlen = 4;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ /* set analog video mode */
+ memcpy(cmd.args, "\x14\x00\x04\x06\x00\x00", 6);
+ cmd.args[4] = system | color;
+ /* can use dev->inversion if assumed applies to both digital/analog */
+ if (invert_analog)
+ cmd.args[5] |= 0x02;
+ cmd.wlen = 6;
+ cmd.rlen = 1;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ /* set analog frequency */
+ memcpy(cmd.args, "\x41\x01\x00\x00\x00\x00\x00\x00", 8);
+ cmd.args[4] = (freq >> 0) & 0xff;
+ cmd.args[5] = (freq >> 8) & 0xff;
+ cmd.args[6] = (freq >> 16) & 0xff;
+ cmd.args[7] = (freq >> 24) & 0xff;
+ cmd.wlen = 8;
+ cmd.rlen = 1;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ dev->bandwidth = bandwidth;
+
+ si2157_tune_wait(client, 0); /* wait to complete, ignore any errors */
+
+ return 0;
+err:
+ dev->bandwidth = 0;
+ dev->frequency = 0;
+ dev->if_frequency = 0;
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int si2157_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct i2c_client *client = fe->tuner_priv;
+ struct si2157_dev *dev = i2c_get_clientdata(client);
+
+ *frequency = dev->frequency;
+ dev_dbg(&client->dev, "freq=%u\n", dev->frequency);
+ return 0;
+}
+
+static int si2157_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct i2c_client *client = fe->tuner_priv;
+ struct si2157_dev *dev = i2c_get_clientdata(client);
+
+ *bandwidth = dev->bandwidth;
+ dev_dbg(&client->dev, "bandwidth=%u\n", dev->bandwidth);
+ return 0;
+}
+
static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct i2c_client *client = fe->tuner_priv;
struct si2157_dev *dev = i2c_get_clientdata(client);
- *frequency = dev->if_frequency;
+ *frequency = dev->if_frequency & ~1; /* strip analog IF indicator bit */
+ dev_dbg(&client->dev, "if_frequency=%u\n", *frequency);
return 0;
}
+static int si2157_get_rf_strength(struct dvb_frontend *fe, u16 *rssi)
+{
+ struct i2c_client *client = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct si2157_cmd cmd;
+ int ret;
+ int strength;
+
+ dev_dbg(&client->dev, "\n");
+
+ memcpy(cmd.args, "\x42\x00", 2);
+ cmd.wlen = 2;
+ cmd.rlen = 12;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ c->strength.stat[0].svalue = (s8)cmd.args[3] * 1000;
+
+ /* normalize values based on Silicon Labs reference
+ * add 100, then anything > 80 is 100% signal
+ */
+ strength = (s8)cmd.args[3] + 100;
+ strength = clamp_val(strength, 0, 80);
+ *rssi = (u16)(strength * 0xffff / 80);
+
+ dev_dbg(&client->dev, "strength=%d rssi=%u\n",
+ (s8)cmd.args[3], *rssi);
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
static const struct dvb_tuner_ops si2157_ops = {
.info = {
.name = "Silicon Labs Si2141/Si2146/2147/2148/2157/2158",
@@ -395,7 +775,12 @@ static const struct dvb_tuner_ops si2157_ops = {
.init = si2157_init,
.sleep = si2157_sleep,
.set_params = si2157_set_params,
- .get_if_frequency = si2157_get_if_frequency,
+ .set_analog_params = si2157_set_analog_params,
+ .get_frequency = si2157_get_frequency,
+ .get_bandwidth = si2157_get_bandwidth,
+ .get_if_frequency = si2157_get_if_frequency,
+
+ .get_rf_strength = si2157_get_rf_strength,
};
static void si2157_stat_work(struct work_struct *work)
@@ -456,7 +841,7 @@ static int si2157_probe(struct i2c_client *client,
cmd.wlen = 0;
cmd.rlen = 1;
ret = si2157_cmd_execute(client, &cmd);
- if (ret)
+ if (ret && ret != -EAGAIN)
goto err_kfree;
memcpy(&fe->ops.tuner_ops, &si2157_ops, sizeof(struct dvb_tuner_ops));
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 778f81b39996..ef4796098931 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -29,6 +29,8 @@ struct si2157_dev {
u8 chiptype;
u8 if_port;
u32 if_frequency;
+ u32 bandwidth;
+ u32 frequency;
struct delayed_work stat_work;
#if defined(CONFIG_MEDIA_CONTROLLER)
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index e678d3d11467..00feadb217d8 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -1,4 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+# This Kconfig option is also used by the legacy av7110 driver
+config TTPCI_EEPROM
+ tristate
+ depends on I2C
+
if USB && MEDIA_SUPPORT
menuconfig MEDIA_USB_SUPPORT
@@ -60,11 +66,5 @@ source "drivers/media/usb/hackrf/Kconfig"
source "drivers/media/usb/msi2500/Kconfig"
endif
-if MEDIA_CEC_SUPPORT
- comment "USB HDMI CEC adapters"
-source "drivers/media/usb/pulse8-cec/Kconfig"
-source "drivers/media/usb/rainshadow-cec/Kconfig"
-endif
-
endif #MEDIA_USB_SUPPORT
endif #USB
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 169aa07c97bd..3eaff3149ef4 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -24,5 +24,3 @@ obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_USBTV) += usbtv/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_DVB_AS102) += as102/
-obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/
-obj-$(CONFIG_USB_RAINSHADOW_CEC) += rainshadow-cec/
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 0974965e848f..3d3c881c8e58 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -587,14 +587,27 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
return status;
}
}
- if (dev->tuner_type == TUNER_NXP_TDA18271)
+ switch (dev->model) { /* i2c device tuners */
+ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
+ case CX231XX_BOARD_HAUPPAUGE_935C:
+ case CX231XX_BOARD_HAUPPAUGE_955Q:
+ case CX231XX_BOARD_HAUPPAUGE_975:
+ case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD:
status = cx231xx_set_decoder_video_input(dev,
CX231XX_VMUX_TELEVISION,
INPUT(input)->vmux);
- else
- status = cx231xx_set_decoder_video_input(dev,
+ break;
+ default:
+ if (dev->tuner_type == TUNER_NXP_TDA18271)
+ status = cx231xx_set_decoder_video_input(dev,
+ CX231XX_VMUX_TELEVISION,
+ INPUT(input)->vmux);
+ else
+ status = cx231xx_set_decoder_video_input(dev,
CX231XX_VMUX_COMPOSITE1,
INPUT(input)->vmux);
+ break;
+ }
break;
default:
@@ -1193,12 +1206,22 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
cx231xx_set_field(FLD_SIF_EN, 0));
break;
default:
+ switch (dev->model) { /* i2c device tuners */
+ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
+ case CX231XX_BOARD_HAUPPAUGE_935C:
+ case CX231XX_BOARD_HAUPPAUGE_955Q:
+ case CX231XX_BOARD_HAUPPAUGE_975:
+ case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD:
+ /* TODO: Normal mode: SIF passthrough at 14.32 MHz?? */
+ break;
+ default:
/* This is just a casual suggestion to people adding
new boards in case they use a tuner type we don't
currently know about */
- dev_info(dev->dev,
- "Unknown tuner type configuring SIF");
- break;
+ dev_info(dev->dev,
+ "Unknown tuner type configuring SIF");
+ break;
+ }
}
break;
diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c
index 9f88c640ec2b..8149702bcf89 100644
--- a/drivers/media/usb/cx231xx/cx231xx-input.c
+++ b/drivers/media/usb/cx231xx/cx231xx-input.c
@@ -88,7 +88,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
ir_i2c_bus = cx231xx_boards[dev->model].ir_i2c_master;
dev_dbg(dev->dev, "Trying to bind ir at bus %d, addr 0x%02x\n",
ir_i2c_bus, info.addr);
- dev->ir_i2c_client = i2c_new_device(
+ dev->ir_i2c_client = i2c_new_client_device(
cx231xx_get_i2c_adap(dev, ir_i2c_bus), &info);
return 0;
@@ -96,7 +96,6 @@ int cx231xx_ir_init(struct cx231xx *dev)
void cx231xx_ir_exit(struct cx231xx *dev)
{
- if (dev->ir_i2c_client)
- i2c_unregister_device(dev->ir_i2c_client);
+ i2c_unregister_device(dev->ir_i2c_client);
dev->ir_i2c_client = NULL;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 8bff7d8a0310..d9f953f2d088 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1129,7 +1129,7 @@ int cx231xx_s_frequency(struct file *file, void *priv,
{
struct cx231xx *dev = video_drvdata(file);
struct v4l2_frequency new_freq = *f;
- int rc;
+ int rc, need_if_freq = 0;
u32 if_frequency = 5400000;
dev_dbg(dev->dev,
@@ -1142,14 +1142,30 @@ int cx231xx_s_frequency(struct file *file, void *priv,
/* set pre channel change settings in DIF first */
rc = cx231xx_tuner_pre_channel_change(dev);
- call_all(dev, tuner, s_frequency, f);
- call_all(dev, tuner, g_frequency, &new_freq);
- dev->ctl_freq = new_freq.frequency;
+ switch (dev->model) { /* i2c device tuners */
+ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
+ case CX231XX_BOARD_HAUPPAUGE_935C:
+ case CX231XX_BOARD_HAUPPAUGE_955Q:
+ case CX231XX_BOARD_HAUPPAUGE_975:
+ case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD:
+ if (dev->cx231xx_set_analog_freq)
+ dev->cx231xx_set_analog_freq(dev, f->frequency);
+ dev->ctl_freq = f->frequency;
+ need_if_freq = 1;
+ break;
+ default:
+ call_all(dev, tuner, s_frequency, f);
+ call_all(dev, tuner, g_frequency, &new_freq);
+ dev->ctl_freq = new_freq.frequency;
+ break;
+ }
+
+ pr_debug("%s() %u : %u\n", __func__, f->frequency, dev->ctl_freq);
/* set post channel change settings in DIF first */
rc = cx231xx_tuner_post_channel_change(dev);
- if (dev->tuner_type == TUNER_NXP_TDA18271) {
+ if (need_if_freq || dev->tuner_type == TUNER_NXP_TDA18271) {
if (dev->norm & (V4L2_STD_MN | V4L2_STD_NTSC_443))
if_frequency = 5400000; /*5.4MHz */
else if (dev->norm & V4L2_STD_B)
@@ -1362,9 +1378,20 @@ int cx231xx_querycap(struct file *file, void *priv,
V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
if (video_is_registered(&dev->radio_dev))
cap->capabilities |= V4L2_CAP_RADIO;
- if (dev->tuner_type != TUNER_ABSENT)
- cap->capabilities |= V4L2_CAP_TUNER;
+ switch (dev->model) {
+ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
+ case CX231XX_BOARD_HAUPPAUGE_935C:
+ case CX231XX_BOARD_HAUPPAUGE_955Q:
+ case CX231XX_BOARD_HAUPPAUGE_975:
+ case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD:
+ cap->capabilities |= V4L2_CAP_TUNER;
+ break;
+ default:
+ if (dev->tuner_type != TUNER_ABSENT)
+ cap->capabilities |= V4L2_CAP_TUNER;
+ break;
+ }
return 0;
}
@@ -1708,10 +1735,20 @@ static void cx231xx_vdev_init(struct cx231xx *dev,
video_set_drvdata(vfd, dev);
if (dev->tuner_type == TUNER_ABSENT) {
- v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY);
- v4l2_disable_ioctl(vfd, VIDIOC_S_FREQUENCY);
- v4l2_disable_ioctl(vfd, VIDIOC_G_TUNER);
- v4l2_disable_ioctl(vfd, VIDIOC_S_TUNER);
+ switch (dev->model) {
+ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
+ case CX231XX_BOARD_HAUPPAUGE_935C:
+ case CX231XX_BOARD_HAUPPAUGE_955Q:
+ case CX231XX_BOARD_HAUPPAUGE_975:
+ case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD:
+ break;
+ default:
+ v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(vfd, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(vfd, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(vfd, VIDIOC_S_TUNER);
+ break;
+ }
}
}
@@ -1781,8 +1818,20 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev->vdev.queue = q;
dev->vdev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_CAPTURE;
- if (dev->tuner_type != TUNER_ABSENT)
+
+ switch (dev->model) { /* i2c device tuners */
+ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
+ case CX231XX_BOARD_HAUPPAUGE_935C:
+ case CX231XX_BOARD_HAUPPAUGE_955Q:
+ case CX231XX_BOARD_HAUPPAUGE_975:
+ case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD:
dev->vdev.device_caps |= V4L2_CAP_TUNER;
+ break;
+ default:
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->vdev.device_caps |= V4L2_CAP_TUNER;
+ break;
+ }
/* register v4l2 video video_device */
ret = video_register_device(&dev->vdev, VFL_TYPE_VIDEO,
@@ -1829,8 +1878,18 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev->vbi_dev.queue = q;
dev->vbi_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VBI_CAPTURE;
- if (dev->tuner_type != TUNER_ABSENT)
+ switch (dev->model) { /* i2c device tuners */
+ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
+ case CX231XX_BOARD_HAUPPAUGE_935C:
+ case CX231XX_BOARD_HAUPPAUGE_955Q:
+ case CX231XX_BOARD_HAUPPAUGE_975:
+ case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD:
dev->vbi_dev.device_caps |= V4L2_CAP_TUNER;
+ break;
+ default:
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->vbi_dev.device_caps |= V4L2_CAP_TUNER;
+ }
/* register v4l2 vbi video_device */
ret = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI,
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index b21a4d413872..5c75303fba9d 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -7,7 +7,7 @@ config DVB_USB_V2
USB1.1 and USB2.0 DVB devices.
Almost every USB device needs a firmware, please look into
- <file:Documentation/media/dvb-drivers/dvb-usb.rst>.
+ <file:Documentation/driver-api/media/drivers/dvb-usb.rst>.
For a complete list of supported USB devices see the LinuxTV DVB Wiki:
<https://linuxtv.org/wiki/index.php/DVB_USB>
@@ -38,7 +38,7 @@ config DVB_USB_AF9035
select MEDIA_TUNER_FC0011 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MXL5007T if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18218 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_FC2580 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_FC2580 if (MEDIA_SUBDRV_AUTOSELECT && VIDEO_V4L2)
select MEDIA_TUNER_IT913X if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the Afatech AF9035 based DVB USB receiver.
@@ -137,12 +137,12 @@ config DVB_USB_RTL28XXU
select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
select DVB_RTL2830
select DVB_RTL2832
- select DVB_RTL2832_SDR if (MEDIA_SUBDRV_AUTOSELECT && MEDIA_SDR_SUPPORT)
+ select DVB_RTL2832_SDR if (MEDIA_SUBDRV_AUTOSELECT && MEDIA_SDR_SUPPORT && VIDEO_V4L2)
select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_E4000 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_E4000 if (MEDIA_SUBDRV_AUTOSELECT && VIDEO_V4L2)
select MEDIA_TUNER_FC0012 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_FC0013 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_FC2580 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_FC2580 if (MEDIA_SUBDRV_AUTOSELECT && VIDEO_V4L2)
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MXL5005S if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index e30305876840..7ed0ab9e429b 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -332,22 +332,17 @@ static const struct dvb_usb_device_properties ec168_props = {
},
};
-static const struct dvb_usb_driver_info ec168_driver_info = {
- .name = "E3C EC168 reference design",
- .props = &ec168_props,
-};
-
static const struct usb_device_id ec168_id[] = {
- { USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168),
- .driver_info = (kernel_ulong_t) &ec168_driver_info },
- { USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168_2),
- .driver_info = (kernel_ulong_t) &ec168_driver_info },
- { USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168_3),
- .driver_info = (kernel_ulong_t) &ec168_driver_info },
- { USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168_4),
- .driver_info = (kernel_ulong_t) &ec168_driver_info },
- { USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168_5),
- .driver_info = (kernel_ulong_t) &ec168_driver_info },
+ { DVB_USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168,
+ &ec168_props, "E3C EC168 reference design", NULL)},
+ { DVB_USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168_2,
+ &ec168_props, "E3C EC168 reference design", NULL)},
+ { DVB_USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168_3,
+ &ec168_props, "E3C EC168 reference design", NULL)},
+ { DVB_USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168_4,
+ &ec168_props, "E3C EC168 reference design", NULL)},
+ { DVB_USB_DEVICE(USB_VID_E3C, USB_PID_E3C_EC168_5,
+ &ec168_props, "E3C EC168 reference design", NULL)},
{}
};
MODULE_DEVICE_TABLE(usb, ec168_id);
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index 19217dcf20f1..b7ca236174f3 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* DVB USB compliant linux driver for GL861 USB2.0 devices.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include <linux/string.h>
@@ -550,7 +550,7 @@ static struct dvb_usb_device_properties friio_props = {
static const struct usb_device_id gl861_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580_55801,
&gl861_props, "MSI Mega Sky 55801 DVB-T USB2.0", NULL) },
- { DVB_USB_DEVICE(USB_VID_ALINK, USB_VID_ALINK_DTU,
+ { DVB_USB_DEVICE(USB_VID_ALINK, USB_PID_ALINK_DTU,
&gl861_props, "A-LINK DTU DVB-T USB2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_774, USB_PID_FRIIO_WHITE,
&friio_props, "774 Friio White ISDB-T USB2.0", NULL) },
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index fd8b42bb9a84..8a3c0eeed959 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -22,7 +22,7 @@
*
* LME2510C + M88RS2000
*
- * For firmware see Documentation/media/dvb-drivers/lmedm04.rst
+ * For firmware see Documentation/admin-guide/media/lmedm04.rst
*
* I2C addresses:
* 0xd0 - STV0288 - Demodulator
@@ -39,7 +39,7 @@
* Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com)
* LME2510(C)(C) Leaguerme (Shenzhen) MicroElectronics Co., Ltd.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*
* Known Issues :
* LME2510: Non Intel USB chipsets fail to maintain High Speed on
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.h b/drivers/media/usb/dvb-usb-v2/lmedm04.h
index 766a8348624d..4335b6ebcc1c 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.h
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.h
@@ -14,7 +14,7 @@
* MVB0001F (LME2510C+LGTDQT-P001F)
*
* *
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef _DVB_USB_LME2510_H_
#define _DVB_USB_LME2510_H_
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 55b4ae7037a4..7865fa0a8295 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include <linux/vmalloc.h>
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 70bd2a2a8ec1..e57e5d2353b4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef _DVB_USB_MXL111SF_H_
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index 1a3e5f965ae4..15d29c91662f 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -2,12 +2,13 @@
config DVB_USB
tristate "Support for various USB DVB devices"
depends on DVB_CORE && USB && I2C && RC_CORE
+ select CYPRESS_FIRMWARE
help
By enabling this you will be able to choose the various supported
USB1.1 and USB2.0 DVB devices.
Almost every USB device needs a firmware, please look into
- <file:Documentation/media/dvb-drivers/dvb-usb.rst>.
+ <file:Documentation/driver-api/media/drivers/dvb-usb.rst>.
For a complete list of supported USB devices see the LinuxTV DVB Wiki:
<https://linuxtv.org/wiki/index.php/DVB_USB>
diff --git a/drivers/media/usb/dvb-usb/a800.c b/drivers/media/usb/dvb-usb/a800.c
index 666213f5d5d8..36b5b6227412 100644
--- a/drivers/media/usb/dvb-usb/a800.c
+++ b/drivers/media/usb/dvb-usb/a800.c
@@ -8,7 +8,7 @@
* - AVerMedia who kindly provided information and
* - Glen Harris who suffered from my mistakes during development.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dibusb.h"
@@ -27,8 +27,10 @@ static int a800_power_ctrl(struct dvb_usb_device *d, int onoff)
}
/* assure to put cold to 0 for iManufacturer == 1 */
-static int a800_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc, int *cold)
+static int a800_identify_state(struct usb_device *udev,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
+ int *cold)
{
*cold = udev->descriptor.iManufacturer != 1;
return 0;
diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c
index 6c960f723457..9d6fa0556d7b 100644
--- a/drivers/media/usb/dvb-usb/af9005-fe.c
+++ b/drivers/media/usb/dvb-usb/af9005-fe.c
@@ -6,7 +6,7 @@
*
* Thanks to Afatech who kindly provided information.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "af9005.h"
#include "af9005-script.h"
diff --git a/drivers/media/usb/dvb-usb/af9005-remote.c b/drivers/media/usb/dvb-usb/af9005-remote.c
index c664353f3911..41d48b3c8d05 100644
--- a/drivers/media/usb/dvb-usb/af9005-remote.c
+++ b/drivers/media/usb/dvb-usb/af9005-remote.c
@@ -8,7 +8,7 @@
*
* Thanks to Afatech who kindly provided information.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "af9005.h"
/* debug */
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 89b4b5d84cdf..b6a2436d16e9 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -6,7 +6,7 @@
*
* Thanks to Afatech who kindly provided information.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "af9005.h"
@@ -955,8 +955,8 @@ static int af9005_pid_filter(struct dvb_usb_adapter *adap, int index,
}
static int af9005_identify_state(struct usb_device *udev,
- struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
int *cold)
{
int ret;
diff --git a/drivers/media/usb/dvb-usb/af9005.h b/drivers/media/usb/dvb-usb/af9005.h
index 3179a7c71e8f..11d74dc26d83 100644
--- a/drivers/media/usb/dvb-usb/af9005.h
+++ b/drivers/media/usb/dvb-usb/af9005.h
@@ -6,7 +6,7 @@
*
* Thanks to Afatech who kindly provided information.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef _DVB_USB_AF9005_H_
#define _DVB_USB_AF9005_H_
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 8de18da0c4bd..1c39b61cde29 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2009 Adams.Xu <adams.xu@azwave.com.cn>
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "az6027.h"
@@ -1051,8 +1051,8 @@ static struct i2c_algorithm az6027_i2c_algo = {
};
static int az6027_identify_state(struct usb_device *udev,
- struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
int *cold)
{
u8 *b;
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index c421b603be44..761992ad05e2 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -18,7 +18,7 @@
* Copyright (C) 2006, 2007 Chris Pascoe (c.pascoe@itee.uq.edu.au)
* Copyright (C) 2011, 2017 Maciej S. Szmigiero (mail@maciej.szmigiero.name)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include <media/tuner.h>
#include <linux/delay.h>
@@ -1358,8 +1358,8 @@ static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap)
* not, and forget a match if it turns out we selected the wrong device.
*/
static int bluebird_fx2_identify_state(struct usb_device *udev,
- struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
int *cold)
{
int wascold = *cold;
diff --git a/drivers/media/usb/dvb-usb/dib0700.h b/drivers/media/usb/dvb-usb/dib0700.h
index ca4d3d2da969..2defbd8b6fc1 100644
--- a/drivers/media/usb/dvb-usb/dib0700.h
+++ b/drivers/media/usb/dvb-usb/dib0700.h
@@ -52,20 +52,25 @@ struct dib0700_state {
struct i2c_client *i2c_client_tuner;
};
-extern int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
- u32 *romversion, u32 *ramversion, u32 *fwtype);
-extern int dib0700_set_gpio(struct dvb_usb_device *, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val);
-extern int dib0700_ctrl_clock(struct dvb_usb_device *d, u32 clk_MHz, u8 clock_out_gp3);
-extern int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u8 rxlen);
-extern int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw);
-extern int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf);
-extern int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff);
-extern struct i2c_algorithm dib0700_i2c_algo;
-extern int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc, int *cold);
-extern int dib0700_change_protocol(struct rc_dev *dev, u64 *rc_proto);
-extern int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz);
+int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
+ u32 *romversion, u32 *ramversion, u32 *fwtype);
+int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio,
+ u8 gpio_dir, u8 gpio_val);
+int dib0700_ctrl_clock(struct dvb_usb_device *d, u32 clk_MHz, u8 clock_out_gp3);
+int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx,
+ u8 rxlen);
+int dib0700_download_firmware(struct usb_device *d,
+ const struct firmware *fw);
+int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf);
+int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff);
+int dib0700_identify_state(struct usb_device *d,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
+ int *cold);
+int dib0700_change_protocol(struct rc_dev *dev, u64 *rc_proto);
+int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz);
+extern struct i2c_algorithm dib0700_i2c_algo;
extern int dib0700_device_count;
extern int dvb_usb_dib0700_ir_proto;
extern struct dvb_usb_device_properties dib0700_devices[];
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index ef62dd6c5ae4..70219b3e8566 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -372,8 +372,10 @@ struct i2c_algorithm dib0700_i2c_algo = {
.functionality = dib0700_i2c_func,
};
-int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc, int *cold)
+int dib0700_identify_state(struct usb_device *udev,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
+ int *cold)
{
s16 ret;
u8 *b;
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 59ce2dec11e9..02b51d1a1b67 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dibusb.h"
diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c
index d4ea72bf09c5..e9dc27f73970 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mb.c
@@ -7,7 +7,7 @@
* based on GPL code from DiBcom, which has
* Copyright (C) 2004 Amaury Demol for DiBcom
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dibusb.h"
@@ -81,7 +81,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap)
if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) {
err("tuner i2c write failed.");
- ret = -EREMOTEIO;
+ return -EREMOTEIO;
}
if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl)
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
index 967027e29c17..b8cde4cded33 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dibusb.h"
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc.c b/drivers/media/usb/dvb-usb/dibusb-mc.c
index ada3bee296c2..e2689977c8c8 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc.c
@@ -7,7 +7,7 @@
* based on GPL code from DiBcom, which has
* Copyright (C) 2004 Amaury Demol for DiBcom
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dibusb.h"
diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h
index a83326c36ca7..f61de0744821 100644
--- a/drivers/media/usb/dvb-usb/dibusb.h
+++ b/drivers/media/usb/dvb-usb/dibusb.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef _DVB_USB_DIBUSB_H_
#define _DVB_USB_DIBUSB_H_
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 99a39339d45d..4e3b3c064bcf 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -6,7 +6,7 @@
*
* partly based on the SDK published by Nebula Electronics
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "digitv.h"
@@ -90,9 +90,10 @@ static struct i2c_algorithm digitv_i2c_algo = {
};
/* Callbacks for DVB USB */
-static int digitv_identify_state (struct usb_device *udev, struct
- dvb_usb_device_properties *props, struct dvb_usb_device_description **desc,
- int *cold)
+static int digitv_identify_state(struct usb_device *udev,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
+ int *cold)
{
*cold = udev->descriptor.iManufacturer == 0 && udev->descriptor.iProduct == 0;
return 0;
@@ -230,14 +231,15 @@ static struct rc_map_table rc_map_digitv_table[] = {
static int digitv_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
+ struct rc_map_table *entry;
int ret, i;
- u8 key[5];
+ u8 key[4];
u8 b[4] = { 0 };
*event = 0;
*state = REMOTE_NO_KEY_PRESSED;
- ret = digitv_ctrl_msg(d, USB_READ_REMOTE, 0, NULL, 0, &key[1], 4);
+ ret = digitv_ctrl_msg(d, USB_READ_REMOTE, 0, NULL, 0, key, 4);
if (ret)
return ret;
@@ -248,20 +250,21 @@ static int digitv_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return ret;
/* if something is inside the buffer, simulate key press */
- if (key[1] != 0)
- {
- for (i = 0; i < d->props.rc.legacy.rc_map_size; i++) {
- if (rc5_custom(&d->props.rc.legacy.rc_map_table[i]) == key[1] &&
- rc5_data(&d->props.rc.legacy.rc_map_table[i]) == key[2]) {
- *event = d->props.rc.legacy.rc_map_table[i].keycode;
+ if (key[0] != 0) {
+ for (i = 0; i < d->props.rc.legacy.rc_map_size; i++) {
+ entry = &d->props.rc.legacy.rc_map_table[i];
+
+ if (rc5_custom(entry) == key[0] &&
+ rc5_data(entry) == key[1]) {
+ *event = entry->keycode;
*state = REMOTE_KEY_PRESSED;
return 0;
}
}
+
+ deb_rc("key: %*ph\n", 4, key);
}
- if (key[0] != 0)
- deb_rc("key: %*ph\n", 5, key);
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index 00ce723c7bf0..9f83560ba63d 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dtt200u.h"
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index 1e7296b2e5b2..24efa023d827 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -6,7 +6,7 @@
*
* Thanks to Steve Chang from WideView for providing support for the WT-220U.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dtt200u.h"
diff --git a/drivers/media/usb/dvb-usb/dtt200u.h b/drivers/media/usb/dvb-usb/dtt200u.h
index 832f355114e4..696c2c1f3af3 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.h
+++ b/drivers/media/usb/dvb-usb/dtt200u.h
@@ -4,7 +4,7 @@
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef _DVB_USB_DTT200U_H_
#define _DVB_USB_DTT200U_H_
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-common.h b/drivers/media/usb/dvb-usb/dvb-usb-common.h
index 8c51ac4493dd..70f4eedd7c48 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-common.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb-common.h
@@ -26,7 +26,8 @@ extern int dvb_usb_disable_rc_polling;
#define deb_uxfer(args...) dprintk(dvb_usb_debug,0x100,args)
/* commonly used methods */
-extern int dvb_usb_download_firmware(struct usb_device *, struct dvb_usb_device_properties *);
+int dvb_usb_download_firmware(struct usb_device *udev,
+ const struct dvb_usb_device_properties *props);
extern int dvb_usb_device_power_ctrl(struct dvb_usb_device *d, int onoff);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index 42c207aacbb1..0fb3fa6100e4 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -84,7 +84,8 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
}
EXPORT_SYMBOL(usb_cypress_load_firmware);
-int dvb_usb_download_firmware(struct usb_device *udev, struct dvb_usb_device_properties *props)
+int dvb_usb_download_firmware(struct usb_device *udev,
+ const struct dvb_usb_device_properties *props)
{
int ret;
const struct firmware *fw = NULL;
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 16a0b4a359ea..c1a7634e27b4 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -6,7 +6,7 @@
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dvb-usb-common.h"
@@ -184,10 +184,10 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
}
/* determine the name and the state of the just found USB device */
-static struct dvb_usb_device_description *dvb_usb_find_device(struct usb_device *udev, struct dvb_usb_device_properties *props, int *cold)
+static const struct dvb_usb_device_description *dvb_usb_find_device(struct usb_device *udev, const struct dvb_usb_device_properties *props, int *cold)
{
int i, j;
- struct dvb_usb_device_description *desc = NULL;
+ const struct dvb_usb_device_description *desc = NULL;
*cold = -1;
@@ -242,13 +242,13 @@ int dvb_usb_device_power_ctrl(struct dvb_usb_device *d, int onoff)
* USB
*/
int dvb_usb_device_init(struct usb_interface *intf,
- struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_properties *props,
struct module *owner, struct dvb_usb_device **du,
short *adapter_nums)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct dvb_usb_device *d = NULL;
- struct dvb_usb_device_description *desc = NULL;
+ const struct dvb_usb_device_description *desc = NULL;
int ret = -ENOMEM, cold = 0;
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 2eb0e24e8943..741be0e69447 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -291,8 +291,10 @@ struct dvb_usb_device_properties {
int (*power_ctrl) (struct dvb_usb_device *, int);
int (*read_mac_address) (struct dvb_usb_device *, u8 []);
- int (*identify_state) (struct usb_device *, struct dvb_usb_device_properties *,
- struct dvb_usb_device_description **, int *);
+ int (*identify_state)(struct usb_device *udev,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
+ int *cold);
struct {
enum dvb_usb_mode mode; /* Drivers shouldn't touch on it */
@@ -436,7 +438,7 @@ struct dvb_usb_adapter {
*/
struct dvb_usb_device {
struct dvb_usb_device_properties props;
- struct dvb_usb_device_description *desc;
+ const struct dvb_usb_device_description *desc;
struct usb_device *udev;
@@ -473,7 +475,7 @@ struct dvb_usb_device {
};
extern int dvb_usb_device_init(struct usb_interface *,
- struct dvb_usb_device_properties *,
+ const struct dvb_usb_device_properties *,
struct module *, struct dvb_usb_device **,
short *adapter_nums);
extern void dvb_usb_device_exit(struct usb_interface *);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 1007366a295b..f96626fe2c0b 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -8,7 +8,7 @@
* Terratec Cinergy S2 cards
* Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include <media/dvb-usb-ids.h>
#include "dw2102.h"
@@ -955,8 +955,8 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
}
static int su3000_identify_state(struct usb_device *udev,
- struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
int *cold)
{
info("%s", __func__);
@@ -1779,6 +1779,8 @@ enum dw2102_table_entry {
TERRATEC_CINERGY_S2_R2,
TERRATEC_CINERGY_S2_R3,
TERRATEC_CINERGY_S2_R4,
+ TERRATEC_CINERGY_S2_1,
+ TERRATEC_CINERGY_S2_2,
GOTVIEW_SAT_HD,
GENIATECH_T220,
TECHNOTREND_S2_4600,
@@ -1806,9 +1808,16 @@ static struct usb_device_id dw2102_table[] = {
[X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)},
[TEVII_S421] = {USB_DEVICE(0x9022, USB_PID_TEVII_S421)},
[TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
- [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R2)},
- [TERRATEC_CINERGY_S2_R3] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R3)},
- [TERRATEC_CINERGY_S2_R4] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R4)},
+ [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_CINERGY_S2_R2)},
+ [TERRATEC_CINERGY_S2_R3] = {USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_CINERGY_S2_R3)},
+ [TERRATEC_CINERGY_S2_R4] = {USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_CINERGY_S2_R4)},
+ [TERRATEC_CINERGY_S2_1] = {USB_DEVICE(USB_VID_TERRATEC_2,
+ USB_PID_TERRATEC_CINERGY_S2_1)},
+ [TERRATEC_CINERGY_S2_2] = {USB_DEVICE(USB_VID_TERRATEC_2,
+ USB_PID_TERRATEC_CINERGY_S2_2)},
[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
[GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
[TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND,
@@ -2221,7 +2230,7 @@ static struct dvb_usb_device_properties su3000_properties = {
}},
}
},
- .num_device_descs = 6,
+ .num_device_descs = 8,
.devices = {
{ "SU3000HD DVB-S USB2.0",
{ &dw2102_table[GENIATECH_SU3000], NULL },
@@ -2243,6 +2252,14 @@ static struct dvb_usb_device_properties su3000_properties = {
{ &dw2102_table[TERRATEC_CINERGY_S2_R3], NULL },
{ NULL },
},
+ { "Terratec Cinergy S2 PCIe Dual Port 1",
+ { &dw2102_table[TERRATEC_CINERGY_S2_1], NULL },
+ { NULL },
+ },
+ { "Terratec Cinergy S2 PCIe Dual Port 2",
+ { &dw2102_table[TERRATEC_CINERGY_S2_2], NULL },
+ { NULL },
+ },
{ "GOTVIEW Satellite HD",
{ &dw2102_table[GOTVIEW_SAT_HD], NULL },
{ NULL },
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 1282f701f185..c07f46f5176e 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -9,7 +9,7 @@
*
* This module is based off the vp7045 and vp702x modules
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "gp8psk.h"
#include "gp8psk-fe.h"
diff --git a/drivers/media/usb/dvb-usb/gp8psk.h b/drivers/media/usb/dvb-usb/gp8psk.h
index 2f4c1368eabe..5293dfdd2609 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.h
+++ b/drivers/media/usb/dvb-usb/gp8psk.h
@@ -9,7 +9,7 @@
*
* This module is based off the vp7045 and vp702x modules
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef _DVB_USB_GP8PSK_H_
#define _DVB_USB_GP8PSK_H_
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index d866a1990a7d..4bb5b82599a7 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006 Aapo Tahkola (aet@rasterburn.org)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "m920x.h"
@@ -459,8 +459,8 @@ static int m920x_firmware_download(struct usb_device *udev, const struct firmwar
/* Callbacks for DVB USB */
static int m920x_identify_state(struct usb_device *udev,
- struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
int *cold)
{
struct usb_host_interface *alt;
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index e368935a5089..e7b290552b66 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dibusb.h"
diff --git a/drivers/media/usb/dvb-usb/opera1.c b/drivers/media/usb/dvb-usb/opera1.c
index 823b33ae828d..e8d784b9d119 100644
--- a/drivers/media/usb/dvb-usb/opera1.c
+++ b/drivers/media/usb/dvb-usb/opera1.c
@@ -4,7 +4,7 @@
* Copyright (C) 2006 Mario Hlawitschka (dh1pa@amsat.org)
* Copyright (C) 2006 Marco Gittler (g.marco@freenet.de)
*
-* see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+* see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#define DVB_USB_LOG_PREFIX "opera"
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 676d233d46d5..f172120db2aa 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -330,8 +330,8 @@ schedule:
/* method to find out whether the firmware has to be downloaded or not */
static int technisat_usb2_identify_state(struct usb_device *udev,
- struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc, int *cold)
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc, int *cold)
{
int ret;
u8 *version;
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index e12a5466b677..294274fd8f55 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -17,7 +17,7 @@
* Copyright (c) 2003 Felix Domke <tmbinc@elitedvb.net>
* Copyright (C) 2005-6 Patrick Boettcher <pb@linuxtv.org>
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#define DVB_USB_LOG_PREFIX "ttusb2"
#include "dvb-usb.h"
@@ -467,9 +467,10 @@ static int tt3650_rc_query(struct dvb_usb_device *d)
/* Callbacks for DVB USB */
-static int ttusb2_identify_state (struct usb_device *udev, struct
- dvb_usb_device_properties *props, struct dvb_usb_device_description **desc,
- int *cold)
+static int ttusb2_identify_state(struct usb_device *udev,
+ const struct dvb_usb_device_properties *props,
+ const struct dvb_usb_device_description **desc,
+ int *cold)
{
*cold = udev->descriptor.iManufacturer == 0 && udev->descriptor.iProduct == 0;
return 0;
diff --git a/drivers/media/usb/dvb-usb/ttusb2.h b/drivers/media/usb/dvb-usb/ttusb2.h
index 8a3853cd6a26..b34c469d83f9 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.h
+++ b/drivers/media/usb/dvb-usb/ttusb2.h
@@ -6,7 +6,7 @@
* Copyright (c) 2003 Felix Domke <tmbinc@elitedvb.net>
* Copyright (C) 2005-6 Patrick Boettcher <pb@linuxtv.de>
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef _DVB_USB_TTUSB2_H_
#define _DVB_USB_TTUSB2_H_
diff --git a/drivers/media/usb/dvb-usb/umt-010.c b/drivers/media/usb/dvb-usb/umt-010.c
index a2101bd43349..2181993771ae 100644
--- a/drivers/media/usb/dvb-usb/umt-010.c
+++ b/drivers/media/usb/dvb-usb/umt-010.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "dibusb.h"
diff --git a/drivers/media/usb/dvb-usb/vp702x-fe.c b/drivers/media/usb/dvb-usb/vp702x-fe.c
index 1c75a9c9dfca..c1e7931900ee 100644
--- a/drivers/media/usb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/usb/dvb-usb/vp702x-fe.c
@@ -12,7 +12,7 @@
* This file can be removed soon, after the DST-driver is rewritten to provice
* the frontend-controlling separately.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "vp702x.h"
diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
index 381b5c898a07..bf54747e2e01 100644
--- a/drivers/media/usb/dvb-usb/vp702x.c
+++ b/drivers/media/usb/dvb-usb/vp702x.c
@@ -9,7 +9,7 @@
*
* Thanks to Twinhan who kindly provided hardware and information.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "vp702x.h"
#include <linux/mutex.h>
diff --git a/drivers/media/usb/dvb-usb/vp7045-fe.c b/drivers/media/usb/dvb-usb/vp7045-fe.c
index d253307a35f8..e99740ec2650 100644
--- a/drivers/media/usb/dvb-usb/vp7045-fe.c
+++ b/drivers/media/usb/dvb-usb/vp7045-fe.c
@@ -6,7 +6,7 @@
*
* Thanks to Twinhan who kindly provided hardware and information.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "vp7045.h"
diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c
index 2baf57216d19..23e3a90af1f4 100644
--- a/drivers/media/usb/dvb-usb/vp7045.c
+++ b/drivers/media/usb/dvb-usb/vp7045.c
@@ -7,7 +7,7 @@
*
* Thanks to Twinhan who kindly provided hardware and information.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#include "vp7045.h"
diff --git a/drivers/media/usb/dvb-usb/vp7045.h b/drivers/media/usb/dvb-usb/vp7045.h
index 818366746c41..1c8438f22b97 100644
--- a/drivers/media/usb/dvb-usb/vp7045.h
+++ b/drivers/media/usb/dvb-usb/vp7045.h
@@ -6,7 +6,7 @@
*
* Thanks to Twinhan who kindly provided hardware and information.
*
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
+ * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information
*/
#ifndef _DVB_USB_VP7045_H_
#define _DVB_USB_VP7045_H_
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index 77a360958239..0283e3b908e4 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -9,7 +9,7 @@ menuconfig USB_GSPCA
Say Y here if you want to enable selecting webcams based
on the GSPCA framework.
- See <file:Documentation/media/v4l-drivers/gspca-cardlist.rst> for more info.
+ See <file:Documentation/admin-guide/media/gspca-cardlist.rst> for more info.
This driver uses the Video For Linux API. You must say Y or M to
"Video For Linux" to use this driver.
diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c
index 502fc2eaffe0..464aa61cd914 100644
--- a/drivers/media/usb/gspca/mr97310a.c
+++ b/drivers/media/usb/gspca/mr97310a.c
@@ -287,7 +287,6 @@ static int zero_the_pointer(struct gspca_dev *gspca_dev)
return err_code;
err_code = cam_get_response16(gspca_dev, 0x21, 0);
- status = data[0];
tries++;
if (err_code < 0)
return err_code;
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index b75c18a012a7..52e05a69c46e 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -363,9 +363,9 @@ static int hdpvr_probe(struct usb_interface *interface,
}
client = hdpvr_register_ir_i2c(dev);
- if (!client) {
+ if (IS_ERR(client)) {
v4l2_err(&dev->v4l2_dev, "i2c IR device register failed\n");
- retval = -ENODEV;
+ retval = PTR_ERR(client);
goto reg_fail;
}
#endif
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 785c8508a46e..070559b01b01 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -44,7 +44,7 @@ struct i2c_client *hdpvr_register_ir_i2c(struct hdpvr_device *dev)
init_data->polling_interval = 405; /* ms, duplicated from Windows */
info.platform_data = init_data;
- return i2c_new_device(&dev->i2c_adapter, &info);
+ return i2c_new_client_device(&dev->i2c_adapter, &info);
}
static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 275394bafe7d..63db04fe12d3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -564,7 +564,7 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
strscpy(info.type, "ir_video", I2C_NAME_SIZE);
pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
info.type, info.addr);
- i2c_new_device(&hdw->i2c_adap, &info);
+ i2c_new_client_device(&hdw->i2c_adap, &info);
break;
case PVR2_IR_SCHEME_ZILOG: /* HVR-1950 style */
case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
@@ -579,7 +579,7 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
strscpy(info.type, "ir_z8f0811_haup", I2C_NAME_SIZE);
pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
info.type, info.addr);
- i2c_new_device(&hdw->i2c_adap, &info);
+ i2c_new_client_device(&hdw->i2c_adap, &info);
break;
default:
/* The device either doesn't support I2C-based IR or we
diff --git a/drivers/media/usb/pwc/pwc-ctrl.c b/drivers/media/usb/pwc/pwc-ctrl.c
index 315c55927f5c..cff64d872058 100644
--- a/drivers/media/usb/pwc/pwc-ctrl.c
+++ b/drivers/media/usb/pwc/pwc-ctrl.c
@@ -523,7 +523,7 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
#ifdef CONFIG_USB_PWC_DEBUG
int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
{
- int ret = -1, request;
+ int ret, request;
if (pdev->type < 675)
request = SENSOR_TYPE_FORMATTER1;
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index d57b8b786506..61869636ec61 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -71,37 +71,45 @@
/* hotplug device table support */
static const struct usb_device_id pwc_device_table [] = {
- { USB_DEVICE(0x0471, 0x0302) }, /* Philips models */
- { USB_DEVICE(0x0471, 0x0303) },
- { USB_DEVICE(0x0471, 0x0304) },
- { USB_DEVICE(0x0471, 0x0307) },
- { USB_DEVICE(0x0471, 0x0308) },
- { USB_DEVICE(0x0471, 0x030C) },
- { USB_DEVICE(0x0471, 0x0310) },
- { USB_DEVICE(0x0471, 0x0311) }, /* Philips ToUcam PRO II */
- { USB_DEVICE(0x0471, 0x0312) },
- { USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
- { USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
- { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
- { USB_DEVICE(0x069A, 0x0001) }, /* Askey */
- { USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
+ { USB_DEVICE(0x041E, 0x400C) }, /* Creative Webcam 5 */
+ { USB_DEVICE(0x041E, 0x4011) }, /* Creative Webcam Pro Ex */
+
+ { USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam 3000 Pro */
{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
- { USB_DEVICE(0x046D, 0x08B2) }, /* Logitech QuickCam Pro 4000 */
+ { USB_DEVICE(0x046D, 0x08B2) }, /* Logitech QuickCam 4000 Pro */
{ USB_DEVICE(0x046D, 0x08B3) }, /* Logitech QuickCam Zoom (old model) */
{ USB_DEVICE(0x046D, 0x08B4) }, /* Logitech QuickCam Zoom (new model) */
{ USB_DEVICE(0x046D, 0x08B5) }, /* Logitech QuickCam Orbit/Sphere */
- { USB_DEVICE(0x046D, 0x08B6) }, /* Cisco VT Camera */
+ { USB_DEVICE(0x046D, 0x08B6) }, /* Logitech/Cisco VT Camera */
{ USB_DEVICE(0x046D, 0x08B7) }, /* Logitech ViewPort AV 100 */
- { USB_DEVICE(0x046D, 0x08B8) }, /* Logitech (reserved) */
+ { USB_DEVICE(0x046D, 0x08B8) }, /* Logitech QuickCam */
+
+ { USB_DEVICE(0x0471, 0x0302) }, /* Philips PCA645VC */
+ { USB_DEVICE(0x0471, 0x0303) }, /* Philips PCA646VC */
+ { USB_DEVICE(0x0471, 0x0304) }, /* Askey VC010 type 2 */
+ { USB_DEVICE(0x0471, 0x0307) }, /* Philips PCVC675K (Vesta) */
+ { USB_DEVICE(0x0471, 0x0308) }, /* Philips PCVC680K (Vesta Pro) */
+ { USB_DEVICE(0x0471, 0x030C) }, /* Philips PCVC690K (Vesta Pro Scan) */
+ { USB_DEVICE(0x0471, 0x0310) }, /* Philips PCVC730K (ToUCam Fun)/PCVC830 (ToUCam II) */
+ { USB_DEVICE(0x0471, 0x0311) }, /* Philips PCVC740K (ToUCam Pro)/PCVC840 (ToUCam II) */
+ { USB_DEVICE(0x0471, 0x0312) }, /* Philips PCVC750K (ToUCam Pro Scan) */
+ { USB_DEVICE(0x0471, 0x0313) }, /* Philips PCVC720K/40 (ToUCam XS) */
+ { USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC webcam */
+ { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC webcam */
+
+ { USB_DEVICE(0x04CC, 0x8116) }, /* Sotec Afina Eye */
+
{ USB_DEVICE(0x055D, 0x9000) }, /* Samsung MPC-C10 */
{ USB_DEVICE(0x055D, 0x9001) }, /* Samsung MPC-C30 */
{ USB_DEVICE(0x055D, 0x9002) }, /* Samsung SNC-35E (Ver3.0) */
- { USB_DEVICE(0x041E, 0x400C) }, /* Creative Webcam 5 */
- { USB_DEVICE(0x041E, 0x4011) }, /* Creative Webcam Pro Ex */
- { USB_DEVICE(0x04CC, 0x8116) }, /* Afina Eye */
- { USB_DEVICE(0x06BE, 0x8116) }, /* new Afina Eye */
- { USB_DEVICE(0x0d81, 0x1910) }, /* Visionite */
- { USB_DEVICE(0x0d81, 0x1900) },
+
+ { USB_DEVICE(0x069A, 0x0001) }, /* Askey VC010 type 1 */
+
+ { USB_DEVICE(0x06BE, 0x8116) }, /* AME Co. Afina Eye */
+
+ { USB_DEVICE(0x0d81, 0x1900) }, /* Visionite VCS-UC300 */
+ { USB_DEVICE(0x0d81, 0x1910) }, /* Visionite VCS-UM100 */
+
{ }
};
MODULE_DEVICE_TABLE(usb, pwc_device_table);
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 3198f9624b7c..b8d39b2f777f 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -250,6 +250,7 @@ static void ttusb_dec_handle_irq( struct urb *urb)
struct ttusb_dec *dec = urb->context;
char *buffer = dec->irq_buffer;
int retval;
+ int index = buffer[4];
switch(urb->status) {
case 0: /*success*/
@@ -281,11 +282,11 @@ static void ttusb_dec_handle_irq( struct urb *urb)
* this should/could be added later ...
* for now lets report each signal as a key down and up
*/
- if (buffer[4] - 1 < ARRAY_SIZE(rc_keys)) {
- dprintk("%s:rc signal:%d\n", __func__, buffer[4]);
- input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 1);
+ if (index - 1 < ARRAY_SIZE(rc_keys)) {
+ dprintk("%s:rc signal:%d\n", __func__, index);
+ input_report_key(dec->rc_input_dev, rc_keys[index - 1], 1);
input_sync(dec->rc_input_dev);
- input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 0);
+ input_report_key(dec->rc_input_dev, rc_keys[index - 1], 0);
input_sync(dec->rc_input_dev);
}
}
diff --git a/drivers/media/usb/zr364xx/Kconfig b/drivers/media/usb/zr364xx/Kconfig
index 55b06c833667..49b4257487bb 100644
--- a/drivers/media/usb/zr364xx/Kconfig
+++ b/drivers/media/usb/zr364xx/Kconfig
@@ -7,7 +7,7 @@ config USB_ZR364XX
help
Say Y here if you want to connect this type of camera to your
computer's USB port.
- See <file:Documentation/media/v4l-drivers/zr364xx.rst> for more info
+ See <file:Documentation/admin-guide/media/zr364xx.rst> for more info
and list of supported cameras.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 39e3fb30ba0b..bf49f83cb86f 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -16,6 +16,15 @@ config VIDEO_V4L2_I2C
depends on I2C && VIDEO_V4L2
default y
+config VIDEO_V4L2_SUBDEV_API
+ bool "V4L2 sub-device userspace API"
+ depends on VIDEO_DEV && MEDIA_CONTROLLER
+ help
+ Enables the V4L2 sub-device pad-level userspace API used to configure
+ video format, size and frame rate between hardware blocks.
+
+ This API is mostly used by camera interfaces in embedded platforms.
+
config VIDEO_ADV_DEBUG
bool "Enable advanced debug functionality on V4L2 drivers"
help
@@ -31,20 +40,18 @@ config VIDEO_FIXED_MINOR_RANGES
When in doubt, say N.
-config VIDEO_PCI_SKELETON
- tristate "Skeleton PCI V4L2 driver"
- depends on PCI
- depends on SAMPLES
- depends on VIDEO_V4L2 && VIDEOBUF2_CORE
- depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
- help
- Enable build of the skeleton PCI driver, used as a reference
- when developing new drivers.
-
# Used by drivers that need tuner.ko
config VIDEO_TUNER
tristate
+# Used by drivers that need v4l2-jpeg.ko
+config V4L2_JPEG_HELPER
+ tristate
+
+# Used by drivers that need v4l2-h264.ko
+config V4L2_H264
+ tristate
+
# Used by drivers that need v4l2-mem2mem.ko
config V4L2_MEM2MEM_DEV
tristate
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 786bd1ec4d1b..2ef0c7c958a2 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -21,9 +21,12 @@ obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
+obj-$(CONFIG_V4L2_H264) += v4l2-h264.o
obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o
+obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o
+
obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index d0e5ebc736f9..9e8eb45a5b03 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -250,9 +250,9 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
/* YUV packed formats */
{ .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
@@ -274,6 +274,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
/* YUV planar formats, non contiguous variant */
{ .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 93d33d1db4e8..b188577db40f 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -9,14 +9,15 @@
#define pr_fmt(fmt) "v4l2-ctrls: " fmt
#include <linux/ctype.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/export.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-event.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
#define dprintk(vdev, fmt, arg...) do { \
if (!WARN_ON(!(vdev)) && ((vdev)->dev_debug & V4L2_DEV_DEBUG_CTRL)) \
@@ -336,6 +337,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"4.2",
"5",
"5.1",
+ "5.2",
+ "6.0",
+ "6.1",
+ "6.2",
NULL,
};
static const char * const h264_loop_filter[] = {
@@ -362,6 +367,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Scalable High Intra",
"Stereo High",
"Multiview High",
+ "Constrained High",
NULL,
};
static const char * const vui_sar_idc[] = {
@@ -578,6 +584,12 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Annex B Start Code",
NULL,
};
+ static const char * const camera_orientation[] = {
+ "Front",
+ "Back",
+ "External",
+ NULL,
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -703,6 +715,8 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return hevc_decode_mode;
case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
return hevc_start_code;
+ case V4L2_CID_CAMERA_ORIENTATION:
+ return camera_orientation;
default:
return NULL;
}
@@ -1015,6 +1029,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_PAN_SPEED: return "Pan, Speed";
case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
+ case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation";
+ case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation";
/* FM Radio Modulator controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1288,6 +1304,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
+ case V4L2_CID_CAMERA_ORIENTATION:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_LINK_FREQ:
@@ -1477,6 +1494,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
case V4L2_CID_RDS_RX_MUSIC_SPEECH:
+ case V4L2_CID_CAMERA_ORIENTATION:
+ case V4L2_CID_CAMERA_SENSOR_ROTATION:
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
case V4L2_CID_RF_TUNER_PLL_LOCK:
@@ -3794,7 +3813,8 @@ s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
struct v4l2_ext_control c;
/* It's a driver bug if this happens. */
- WARN_ON(!ctrl->is_int);
+ if (WARN_ON(!ctrl->is_int))
+ return 0;
c.value = 0;
get_ctrl(ctrl, &c);
return c.value;
@@ -3806,7 +3826,8 @@ s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl)
struct v4l2_ext_control c;
/* It's a driver bug if this happens. */
- WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
+ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
+ return 0;
c.value64 = 0;
get_ctrl(ctrl, &c);
return c.value64;
@@ -4215,7 +4236,8 @@ int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
lockdep_assert_held(ctrl->handler->lock);
/* It's a driver bug if this happens. */
- WARN_ON(!ctrl->is_int);
+ if (WARN_ON(!ctrl->is_int))
+ return -EINVAL;
ctrl->val = val;
return set_ctrl(NULL, ctrl, 0);
}
@@ -4226,7 +4248,8 @@ int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
lockdep_assert_held(ctrl->handler->lock);
/* It's a driver bug if this happens. */
- WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
+ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
+ return -EINVAL;
*ctrl->p_new.p_s64 = val;
return set_ctrl(NULL, ctrl, 0);
}
@@ -4237,23 +4260,25 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
lockdep_assert_held(ctrl->handler->lock);
/* It's a driver bug if this happens. */
- WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING);
+ if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING))
+ return -EINVAL;
strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
return set_ctrl(NULL, ctrl, 0);
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
-int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
- const struct v4l2_area *area)
+int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl,
+ enum v4l2_ctrl_type type, const void *p)
{
lockdep_assert_held(ctrl->handler->lock);
/* It's a driver bug if this happens. */
- WARN_ON(ctrl->type != V4L2_CTRL_TYPE_AREA);
- *ctrl->p_new.p_area = *area;
+ if (WARN_ON(ctrl->type != type))
+ return -EINVAL;
+ memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size);
return set_ctrl(NULL, ctrl, 0);
}
-EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_area);
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound);
void v4l2_ctrl_request_complete(struct media_request *req,
struct v4l2_ctrl_handler *main_hdl)
@@ -4597,3 +4622,42 @@ __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_poll);
+
+int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ctrl_ops,
+ const struct v4l2_fwnode_device_properties *p)
+{
+ if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) {
+ u32 orientation_ctrl;
+
+ switch (p->orientation) {
+ case V4L2_FWNODE_ORIENTATION_FRONT:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT;
+ break;
+ case V4L2_FWNODE_ORIENTATION_BACK:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK;
+ break;
+ case V4L2_FWNODE_ORIENTATION_EXTERNAL:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops,
+ V4L2_CID_CAMERA_ORIENTATION,
+ V4L2_CAMERA_ORIENTATION_EXTERNAL, 0,
+ orientation_ctrl))
+ return hdl->error;
+ }
+
+ if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) {
+ if (!v4l2_ctrl_new_std(hdl, ctrl_ops,
+ V4L2_CID_CAMERA_SENSOR_ROTATION,
+ p->rotation, p->rotation, 1,
+ p->rotation))
+ return hdl->error;
+ }
+
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 97b6a3af1361..a593ea0598b5 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -552,6 +552,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
(vdev->device_caps & meta_caps);
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
+ bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC;
bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
@@ -725,9 +726,15 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std);
if (is_rx) {
SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
- SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
- SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
- SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ if (is_io_mc) {
+ set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls);
+ } else {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ }
SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio);
SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
@@ -735,9 +742,15 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
}
if (is_tx) {
- SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
- SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
- SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ if (is_io_mc) {
+ set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls);
+ } else {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
+ SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
+ SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ }
SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout);
SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index c69941214bb2..de4287251a89 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -186,7 +186,8 @@ static void v4l2_device_release_subdev_node(struct video_device *vdev)
kfree(vdev);
}
-int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
+int __v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev,
+ bool read_only)
{
struct video_device *vdev;
struct v4l2_subdev *sd;
@@ -215,6 +216,8 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
vdev->fops = &v4l2_subdev_fops;
vdev->release = v4l2_device_release_subdev_node;
vdev->ctrl_handler = sd->ctrl_handler;
+ if (read_only)
+ set_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
sd->owner);
if (err < 0) {
@@ -252,7 +255,7 @@ clean_up:
return err;
}
-EXPORT_SYMBOL_GPL(v4l2_device_register_subdev_nodes);
+EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev_nodes);
void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
{
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 97f0f8b23b5d..a4c3c77c1894 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -756,6 +756,48 @@ err:
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_add_link);
+int v4l2_fwnode_device_parse(struct device *dev,
+ struct v4l2_fwnode_device_properties *props)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ u32 val;
+ int ret;
+
+ memset(props, 0, sizeof(*props));
+
+ props->orientation = V4L2_FWNODE_PROPERTY_UNSET;
+ ret = fwnode_property_read_u32(fwnode, "orientation", &val);
+ if (!ret) {
+ switch (val) {
+ case V4L2_FWNODE_ORIENTATION_FRONT:
+ case V4L2_FWNODE_ORIENTATION_BACK:
+ case V4L2_FWNODE_ORIENTATION_EXTERNAL:
+ break;
+ default:
+ dev_warn(dev, "Unsupported device orientation: %u\n", val);
+ return -EINVAL;
+ }
+
+ props->orientation = val;
+ dev_dbg(dev, "device orientation: %u\n", val);
+ }
+
+ props->rotation = V4L2_FWNODE_PROPERTY_UNSET;
+ ret = fwnode_property_read_u32(fwnode, "rotation", &val);
+ if (!ret) {
+ if (val >= 360) {
+ dev_warn(dev, "Unsupported device rotation: %u\n", val);
+ return -EINVAL;
+ }
+
+ props->rotation = val;
+ dev_dbg(dev, "device rotation: %u\n", val);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse);
+
static int
v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
struct v4l2_async_notifier *notifier,
@@ -980,7 +1022,7 @@ static int v4l2_fwnode_reference_parse(struct device *dev,
*
* THIS EXAMPLE EXISTS MERELY TO DOCUMENT THIS FUNCTION. DO NOT USE IT AS A
* REFERENCE IN HOW ACPI TABLES SHOULD BE WRITTEN!! See documentation under
- * Documentation/acpi/dsd instead and especially graph.txt,
+ * Documentation/firmware-guide/acpi/dsd/ instead and especially graph.txt,
* data-node-references.txt and leds.txt .
*
* Scope (\_SB.PCI0.I2C2)
@@ -1323,68 +1365,6 @@ out_cleanup:
}
EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor_common);
-int v4l2_async_register_fwnode_subdev(struct v4l2_subdev *sd,
- size_t asd_struct_size,
- unsigned int *ports,
- unsigned int num_ports,
- parse_endpoint_func parse_endpoint)
-{
- struct v4l2_async_notifier *notifier;
- struct device *dev = sd->dev;
- struct fwnode_handle *fwnode;
- int ret;
-
- if (WARN_ON(!dev))
- return -ENODEV;
-
- fwnode = dev_fwnode(dev);
- if (!fwnode_device_is_available(fwnode))
- return -ENODEV;
-
- notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
- if (!notifier)
- return -ENOMEM;
-
- v4l2_async_notifier_init(notifier);
-
- if (!ports) {
- ret = v4l2_async_notifier_parse_fwnode_endpoints(dev, notifier,
- asd_struct_size,
- parse_endpoint);
- if (ret < 0)
- goto out_cleanup;
- } else {
- unsigned int i;
-
- for (i = 0; i < num_ports; i++) {
- ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(dev, notifier, asd_struct_size, ports[i], parse_endpoint);
- if (ret < 0)
- goto out_cleanup;
- }
- }
-
- ret = v4l2_async_subdev_notifier_register(sd, notifier);
- if (ret < 0)
- goto out_cleanup;
-
- ret = v4l2_async_register_subdev(sd);
- if (ret < 0)
- goto out_unregister;
-
- sd->subdev_notifier = notifier;
-
- return 0;
-
-out_unregister:
- v4l2_async_notifier_unregister(notifier);
-out_cleanup:
- v4l2_async_notifier_cleanup(notifier);
- kfree(notifier);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(v4l2_async_register_fwnode_subdev);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
diff --git a/drivers/media/v4l2-core/v4l2-h264.c b/drivers/media/v4l2-core/v4l2-h264.c
new file mode 100644
index 000000000000..edf6225f0522
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-h264.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 H264 helpers.
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Author: Boris Brezillon <boris.brezillon@collabora.com>
+ */
+
+#include <linux/module.h>
+#include <linux/sort.h>
+
+#include <media/v4l2-h264.h>
+
+/**
+ * v4l2_h264_init_reflist_builder() - Initialize a P/B0/B1 reference list
+ * builder
+ *
+ * @b: the builder context to initialize
+ * @dec_params: decode parameters control
+ * @slice_params: first slice parameters control
+ * @sps: SPS control
+ * @dpb: DPB to use when creating the reference list
+ */
+void
+v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
+ const struct v4l2_ctrl_h264_decode_params *dec_params,
+ const struct v4l2_ctrl_h264_slice_params *slice_params,
+ const struct v4l2_ctrl_h264_sps *sps,
+ const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES])
+{
+ int cur_frame_num, max_frame_num;
+ unsigned int i;
+
+ max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ cur_frame_num = slice_params->frame_num;
+
+ memset(b, 0, sizeof(*b));
+ if (!(slice_params->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC))
+ b->cur_pic_order_count = min(dec_params->bottom_field_order_cnt,
+ dec_params->top_field_order_cnt);
+ else if (slice_params->flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ b->cur_pic_order_count = dec_params->bottom_field_order_cnt;
+ else
+ b->cur_pic_order_count = dec_params->top_field_order_cnt;
+
+ for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
+ u32 pic_order_count;
+
+ if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ b->refs[i].pic_num = dpb[i].pic_num;
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ b->refs[i].longterm = true;
+
+ /*
+ * Handle frame_num wraparound as described in section
+ * '8.2.4.1 Decoding process for picture numbers' of the spec.
+ * TODO: This logic will have to be adjusted when we start
+ * supporting interlaced content.
+ */
+ if (dpb[i].frame_num > cur_frame_num)
+ b->refs[i].frame_num = (int)dpb[i].frame_num -
+ max_frame_num;
+ else
+ b->refs[i].frame_num = dpb[i].frame_num;
+
+ if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_FIELD))
+ pic_order_count = min(dpb[i].top_field_order_cnt,
+ dpb[i].bottom_field_order_cnt);
+ else if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_BOTTOM_FIELD)
+ pic_order_count = dpb[i].bottom_field_order_cnt;
+ else
+ pic_order_count = dpb[i].top_field_order_cnt;
+
+ b->refs[i].pic_order_count = pic_order_count;
+ b->unordered_reflist[b->num_valid] = i;
+ b->num_valid++;
+ }
+
+ for (i = b->num_valid; i < ARRAY_SIZE(b->unordered_reflist); i++)
+ b->unordered_reflist[i] = i;
+}
+EXPORT_SYMBOL_GPL(v4l2_h264_init_reflist_builder);
+
+static int v4l2_h264_p_ref_list_cmp(const void *ptra, const void *ptrb,
+ const void *data)
+{
+ const struct v4l2_h264_reflist_builder *builder = data;
+ u8 idxa, idxb;
+
+ idxa = *((u8 *)ptra);
+ idxb = *((u8 *)ptrb);
+
+ if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
+ idxb >= V4L2_H264_NUM_DPB_ENTRIES))
+ return 1;
+
+ if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
+ /* Short term pics first. */
+ if (!builder->refs[idxa].longterm)
+ return -1;
+ else
+ return 1;
+ }
+
+ /*
+ * Short term pics in descending pic num order, long term ones in
+ * ascending order.
+ */
+ if (!builder->refs[idxa].longterm)
+ return builder->refs[idxb].frame_num <
+ builder->refs[idxa].frame_num ?
+ -1 : 1;
+
+ return builder->refs[idxa].pic_num < builder->refs[idxb].pic_num ?
+ -1 : 1;
+}
+
+static int v4l2_h264_b0_ref_list_cmp(const void *ptra, const void *ptrb,
+ const void *data)
+{
+ const struct v4l2_h264_reflist_builder *builder = data;
+ s32 poca, pocb;
+ u8 idxa, idxb;
+
+ idxa = *((u8 *)ptra);
+ idxb = *((u8 *)ptrb);
+
+ if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
+ idxb >= V4L2_H264_NUM_DPB_ENTRIES))
+ return 1;
+
+ if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
+ /* Short term pics first. */
+ if (!builder->refs[idxa].longterm)
+ return -1;
+ else
+ return 1;
+ }
+
+ /* Long term pics in ascending pic num order. */
+ if (builder->refs[idxa].longterm)
+ return builder->refs[idxa].pic_num <
+ builder->refs[idxb].pic_num ?
+ -1 : 1;
+
+ poca = builder->refs[idxa].pic_order_count;
+ pocb = builder->refs[idxb].pic_order_count;
+
+ /*
+ * Short term pics with POC < cur POC first in POC descending order
+ * followed by short term pics with POC > cur POC in POC ascending
+ * order.
+ */
+ if ((poca < builder->cur_pic_order_count) !=
+ (pocb < builder->cur_pic_order_count))
+ return poca < pocb ? -1 : 1;
+ else if (poca < builder->cur_pic_order_count)
+ return pocb < poca ? -1 : 1;
+
+ return poca < pocb ? -1 : 1;
+}
+
+static int v4l2_h264_b1_ref_list_cmp(const void *ptra, const void *ptrb,
+ const void *data)
+{
+ const struct v4l2_h264_reflist_builder *builder = data;
+ s32 poca, pocb;
+ u8 idxa, idxb;
+
+ idxa = *((u8 *)ptra);
+ idxb = *((u8 *)ptrb);
+
+ if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
+ idxb >= V4L2_H264_NUM_DPB_ENTRIES))
+ return 1;
+
+ if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
+ /* Short term pics first. */
+ if (!builder->refs[idxa].longterm)
+ return -1;
+ else
+ return 1;
+ }
+
+ /* Long term pics in ascending pic num order. */
+ if (builder->refs[idxa].longterm)
+ return builder->refs[idxa].pic_num <
+ builder->refs[idxb].pic_num ?
+ -1 : 1;
+
+ poca = builder->refs[idxa].pic_order_count;
+ pocb = builder->refs[idxb].pic_order_count;
+
+ /*
+ * Short term pics with POC > cur POC first in POC ascending order
+ * followed by short term pics with POC < cur POC in POC descending
+ * order.
+ */
+ if ((poca < builder->cur_pic_order_count) !=
+ (pocb < builder->cur_pic_order_count))
+ return pocb < poca ? -1 : 1;
+ else if (poca < builder->cur_pic_order_count)
+ return pocb < poca ? -1 : 1;
+
+ return poca < pocb ? -1 : 1;
+}
+
+/**
+ * v4l2_h264_build_p_ref_list() - Build the P reference list
+ *
+ * @builder: reference list builder context
+ * @reflist: 16-bytes array used to store the P reference list. Each entry
+ * is an index in the DPB
+ *
+ * This functions builds the P reference lists. This procedure is describe in
+ * section '8.2.4 Decoding process for reference picture lists construction'
+ * of the H264 spec. This function can be used by H264 decoder drivers that
+ * need to pass a P reference list to the hardware.
+ */
+void
+v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder,
+ u8 *reflist)
+{
+ memcpy(reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist[0]) * builder->num_valid);
+ sort_r(reflist, builder->num_valid, sizeof(*reflist),
+ v4l2_h264_p_ref_list_cmp, NULL, builder);
+}
+EXPORT_SYMBOL_GPL(v4l2_h264_build_p_ref_list);
+
+/**
+ * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists
+ *
+ * @builder: reference list builder context
+ * @b0_reflist: 16-bytes array used to store the B0 reference list. Each entry
+ * is an index in the DPB
+ * @b1_reflist: 16-bytes array used to store the B1 reference list. Each entry
+ * is an index in the DPB
+ *
+ * This functions builds the B0/B1 reference lists. This procedure is described
+ * in section '8.2.4 Decoding process for reference picture lists construction'
+ * of the H264 spec. This function can be used by H264 decoder drivers that
+ * need to pass B0/B1 reference lists to the hardware.
+ */
+void
+v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder,
+ u8 *b0_reflist, u8 *b1_reflist)
+{
+ memcpy(b0_reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist[0]) * builder->num_valid);
+ sort_r(b0_reflist, builder->num_valid, sizeof(*b0_reflist),
+ v4l2_h264_b0_ref_list_cmp, NULL, builder);
+
+ memcpy(b1_reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist[0]) * builder->num_valid);
+ sort_r(b1_reflist, builder->num_valid, sizeof(*b1_reflist),
+ v4l2_h264_b1_ref_list_cmp, NULL, builder);
+
+ if (builder->num_valid > 1 &&
+ !memcmp(b1_reflist, b0_reflist, builder->num_valid))
+ swap(b1_reflist[0], b1_reflist[1]);
+}
+EXPORT_SYMBOL_GPL(v4l2_h264_build_b_ref_lists);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("V4L2 H264 Helpers");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@collabora.com>");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index b2ef8e60ea7d..2322f08a98be 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -264,12 +264,13 @@ static void v4l_print_fmtdesc(const void *arg, bool write_only)
{
const struct v4l2_fmtdesc *p = arg;
- pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, description='%.*s'\n",
+ pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, mbus_code=0x%04x, description='%.*s'\n",
p->index, prt_names(p->type, v4l2_type_names),
p->flags, (p->pixelformat & 0xff),
(p->pixelformat >> 8) & 0xff,
(p->pixelformat >> 16) & 0xff,
(p->pixelformat >> 24) & 0xff,
+ p->mbus_code,
(int)sizeof(p->description), p->description);
}
@@ -1085,6 +1086,32 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
return ret;
}
+static int v4l_g_input(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ *(int *)arg = 0;
+ return 0;
+ }
+
+ return ops->vidioc_g_input(file, fh, arg);
+}
+
+static int v4l_g_output(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ *(int *)arg = 0;
+ return 0;
+ }
+
+ return ops->vidioc_g_output(file, fh, arg);
+}
+
static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
@@ -1094,12 +1121,21 @@ static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
ret = v4l_enable_media_source(vfd);
if (ret)
return ret;
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC)
+ return *(int *)arg ? -EINVAL : 0;
+
return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
}
static int v4l_s_output(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC)
+ return *(int *)arg ? -EINVAL : 0;
+
return ops->vidioc_s_output(file, fh, *(unsigned int *)arg);
}
@@ -1143,6 +1179,14 @@ static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
if (is_valid_ioctl(vfd, VIDIOC_S_STD))
p->capabilities |= V4L2_IN_CAP_STD;
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ if (p->index)
+ return -EINVAL;
+ strscpy(p->name, vfd->name, sizeof(p->name));
+ p->type = V4L2_INPUT_TYPE_CAMERA;
+ return 0;
+ }
+
return ops->vidioc_enum_input(file, fh, p);
}
@@ -1161,6 +1205,14 @@ static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
if (is_valid_ioctl(vfd, VIDIOC_S_STD))
p->capabilities |= V4L2_OUT_CAP_STD;
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ if (p->index)
+ return -EINVAL;
+ strscpy(p->name, vfd->name, sizeof(p->name));
+ p->type = V4L2_OUTPUT_TYPE_ANALOG;
+ return 0;
+ }
+
return ops->vidioc_enum_output(file, fh, p);
}
@@ -1421,12 +1473,20 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
struct video_device *vdev = video_devdata(file);
struct v4l2_fmtdesc *p = arg;
int ret = check_fmt(file, p->type);
+ u32 mbus_code;
u32 cap_mask;
if (ret)
return ret;
ret = -EINVAL;
+ if (!(vdev->device_caps & V4L2_CAP_IO_MC))
+ p->mbus_code = 0;
+
+ mbus_code = p->mbus_code;
+ CLEAR_AFTER_FIELD(p, type);
+ p->mbus_code = mbus_code;
+
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
@@ -2683,10 +2743,8 @@ DEFINE_V4L_STUB_FUNC(expbuf)
DEFINE_V4L_STUB_FUNC(g_std)
DEFINE_V4L_STUB_FUNC(g_audio)
DEFINE_V4L_STUB_FUNC(s_audio)
-DEFINE_V4L_STUB_FUNC(g_input)
DEFINE_V4L_STUB_FUNC(g_edid)
DEFINE_V4L_STUB_FUNC(s_edid)
-DEFINE_V4L_STUB_FUNC(g_output)
DEFINE_V4L_STUB_FUNC(g_audout)
DEFINE_V4L_STUB_FUNC(s_audout)
DEFINE_V4L_STUB_FUNC(g_jpegcomp)
@@ -2708,7 +2766,7 @@ DEFINE_V4L_STUB_FUNC(dv_timings_cap)
static const struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0),
- IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, INFO_FL_CLEAR(v4l2_fmtdesc, type)),
+ IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, 0),
IOCTL_INFO(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0),
IOCTL_INFO(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE),
@@ -2735,11 +2793,11 @@ static const struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO(VIDIOC_S_AUDIO, v4l_stub_s_audio, v4l_print_audio, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)),
IOCTL_INFO(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
- IOCTL_INFO(VIDIOC_G_INPUT, v4l_stub_g_input, v4l_print_u32, 0),
+ IOCTL_INFO(VIDIOC_G_INPUT, v4l_g_input, v4l_print_u32, 0),
IOCTL_INFO(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_EDID, v4l_stub_g_edid, v4l_print_edid, INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_S_EDID, v4l_stub_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_ALWAYS_COPY),
- IOCTL_INFO(VIDIOC_G_OUTPUT, v4l_stub_g_output, v4l_print_u32, 0),
+ IOCTL_INFO(VIDIOC_G_OUTPUT, v4l_g_output, v4l_print_u32, 0),
IOCTL_INFO(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
IOCTL_INFO(VIDIOC_G_AUDOUT, v4l_stub_g_audout, v4l_print_audioout, 0),
@@ -2805,13 +2863,11 @@ static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev,
{
if (_IOC_NR(cmd) >= V4L2_IOCTLS)
return vdev->lock;
-#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV)
if (vfh && vfh->m2m_ctx &&
(v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) {
if (vfh->m2m_ctx->q_lock)
return vfh->m2m_ctx->q_lock;
}
-#endif
if (vdev->queue && vdev->queue->lock &&
(v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE))
return vdev->queue->lock;
diff --git a/drivers/media/v4l2-core/v4l2-jpeg.c b/drivers/media/v4l2-core/v4l2-jpeg.c
new file mode 100644
index 000000000000..8947fd95c6f1
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-jpeg.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * V4L2 JPEG header parser helpers.
+ *
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ *
+ * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1]
+ *
+ * [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf
+ */
+
+#include <asm/unaligned.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <media/v4l2-jpeg.h>
+
+MODULE_DESCRIPTION("V4L2 JPEG header parser helpers");
+MODULE_AUTHOR("Philipp Zabel <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+
+/* Table B.1 - Marker code assignments */
+#define SOF0 0xffc0 /* start of frame */
+#define SOF1 0xffc1
+#define SOF2 0xffc2
+#define SOF3 0xffc3
+#define SOF5 0xffc5
+#define SOF7 0xffc7
+#define JPG 0xffc8 /* extensions */
+#define SOF9 0xffc9
+#define SOF11 0xffcb
+#define SOF13 0xffcd
+#define SOF15 0xffcf
+#define DHT 0xffc4 /* huffman table */
+#define DAC 0xffcc /* arithmetic coding conditioning */
+#define RST0 0xffd0 /* restart */
+#define RST7 0xffd7
+#define SOI 0xffd8 /* start of image */
+#define EOI 0xffd9 /* end of image */
+#define SOS 0xffda /* start of stream */
+#define DQT 0xffdb /* quantization table */
+#define DNL 0xffdc /* number of lines */
+#define DRI 0xffdd /* restart interval */
+#define DHP 0xffde /* hierarchical progression */
+#define EXP 0xffdf /* expand reference */
+#define APP0 0xffe0 /* application data */
+#define APP15 0xffef
+#define JPG0 0xfff0 /* extensions */
+#define JPG13 0xfffd
+#define COM 0xfffe /* comment */
+#define TEM 0xff01 /* temporary */
+
+/**
+ * struct jpeg_stream - JPEG byte stream
+ * @curr: current position in stream
+ * @end: end position, after last byte
+ */
+struct jpeg_stream {
+ u8 *curr;
+ u8 *end;
+};
+
+/* returns a value that fits into u8, or negative error */
+static int jpeg_get_byte(struct jpeg_stream *stream)
+{
+ if (stream->curr >= stream->end)
+ return -EINVAL;
+
+ return *stream->curr++;
+}
+
+/* returns a value that fits into u16, or negative error */
+static int jpeg_get_word_be(struct jpeg_stream *stream)
+{
+ u16 word;
+
+ if (stream->curr + sizeof(__be16) > stream->end)
+ return -EINVAL;
+
+ word = get_unaligned_be16(stream->curr);
+ stream->curr += sizeof(__be16);
+
+ return word;
+}
+
+static int jpeg_skip(struct jpeg_stream *stream, size_t len)
+{
+ if (stream->curr + len > stream->end)
+ return -EINVAL;
+
+ stream->curr += len;
+
+ return 0;
+}
+
+static int jpeg_next_marker(struct jpeg_stream *stream)
+{
+ int byte;
+ u16 marker = 0;
+
+ while ((byte = jpeg_get_byte(stream)) >= 0) {
+ marker = (marker << 8) | byte;
+ /* skip stuffing bytes and REServed markers */
+ if (marker == TEM || (marker > 0xffbf && marker < 0xffff))
+ return marker;
+ }
+
+ return byte;
+}
+
+/* this does not advance the current position in the stream */
+static int jpeg_reference_segment(struct jpeg_stream *stream,
+ struct v4l2_jpeg_reference *segment)
+{
+ u16 len;
+
+ if (stream->curr + sizeof(__be16) > stream->end)
+ return -EINVAL;
+
+ len = get_unaligned_be16(stream->curr);
+ if (stream->curr + len > stream->end)
+ return -EINVAL;
+
+ segment->start = stream->curr;
+ segment->length = len;
+
+ return 0;
+}
+
+static int v4l2_jpeg_decode_subsampling(u8 nf, u8 h_v)
+{
+ if (nf == 1)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+
+ /* no chroma subsampling for 4-component images */
+ if (nf == 4 && h_v != 0x11)
+ return -EINVAL;
+
+ switch (h_v) {
+ case 0x11:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_444;
+ case 0x21:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_422;
+ case 0x22:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+ case 0x41:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_411;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int jpeg_parse_frame_header(struct jpeg_stream *stream, u16 sof_marker,
+ struct v4l2_jpeg_frame_header *frame_header)
+{
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Lf = 8 + 3 * Nf, Nf >= 1 */
+ if (len < 8 + 3)
+ return -EINVAL;
+
+ if (frame_header) {
+ /* Table B.2 - Frame header parameter sizes and values */
+ int p, y, x, nf;
+ int i;
+
+ p = jpeg_get_byte(stream);
+ if (p < 0)
+ return p;
+ /*
+ * Baseline DCT only supports 8-bit precision.
+ * Extended sequential DCT also supports 12-bit precision.
+ */
+ if (p != 8 && (p != 12 || sof_marker != SOF1))
+ return -EINVAL;
+
+ y = jpeg_get_word_be(stream);
+ if (y < 0)
+ return y;
+ if (y == 0)
+ return -EINVAL;
+
+ x = jpeg_get_word_be(stream);
+ if (x < 0)
+ return x;
+ if (x == 0)
+ return -EINVAL;
+
+ nf = jpeg_get_byte(stream);
+ if (nf < 0)
+ return nf;
+ /*
+ * The spec allows 1 <= Nf <= 255, but we only support up to 4
+ * components.
+ */
+ if (nf < 1 || nf > V4L2_JPEG_MAX_COMPONENTS)
+ return -EINVAL;
+ if (len != 8 + 3 * nf)
+ return -EINVAL;
+
+ frame_header->precision = p;
+ frame_header->height = y;
+ frame_header->width = x;
+ frame_header->num_components = nf;
+
+ for (i = 0; i < nf; i++) {
+ struct v4l2_jpeg_frame_component_spec *component;
+ int c, h_v, tq;
+
+ c = jpeg_get_byte(stream);
+ if (c < 0)
+ return c;
+
+ h_v = jpeg_get_byte(stream);
+ if (h_v < 0)
+ return h_v;
+ if (i == 0) {
+ int subs;
+
+ subs = v4l2_jpeg_decode_subsampling(nf, h_v);
+ if (subs < 0)
+ return subs;
+ frame_header->subsampling = subs;
+ } else if (h_v != 0x11) {
+ /* all chroma sampling factors must be 1 */
+ return -EINVAL;
+ }
+
+ tq = jpeg_get_byte(stream);
+ if (tq < 0)
+ return tq;
+
+ component = &frame_header->component[i];
+ component->component_identifier = c;
+ component->horizontal_sampling_factor =
+ (h_v >> 4) & 0xf;
+ component->vertical_sampling_factor = h_v & 0xf;
+ component->quantization_table_selector = tq;
+ }
+ } else {
+ return jpeg_skip(stream, len - 2);
+ }
+
+ return 0;
+}
+
+static int jpeg_parse_scan_header(struct jpeg_stream *stream,
+ struct v4l2_jpeg_scan_header *scan_header)
+{
+ size_t skip;
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Ls = 8 + 3 * Ns, Ns >= 1 */
+ if (len < 6 + 2)
+ return -EINVAL;
+
+ if (scan_header) {
+ int ns;
+ int i;
+
+ ns = jpeg_get_byte(stream);
+ if (ns < 0)
+ return ns;
+ if (ns < 1 || ns > 4 || len != 6 + 2 * ns)
+ return -EINVAL;
+
+ scan_header->num_components = ns;
+
+ for (i = 0; i < ns; i++) {
+ struct v4l2_jpeg_scan_component_spec *component;
+ int cs, td_ta;
+
+ cs = jpeg_get_byte(stream);
+ if (cs < 0)
+ return cs;
+
+ td_ta = jpeg_get_byte(stream);
+ if (td_ta < 0)
+ return td_ta;
+
+ component = &scan_header->component[i];
+ component->component_selector = cs;
+ component->dc_entropy_coding_table_selector =
+ (td_ta >> 4) & 0xf;
+ component->ac_entropy_coding_table_selector =
+ td_ta & 0xf;
+ }
+
+ skip = 3; /* skip Ss, Se, Ah, and Al */
+ } else {
+ skip = len - 2;
+ }
+
+ return jpeg_skip(stream, skip);
+}
+
+/* B.2.4.1 Quantization table-specification syntax */
+static int jpeg_parse_quantization_tables(struct jpeg_stream *stream,
+ u8 precision,
+ struct v4l2_jpeg_reference *tables)
+{
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Lq = 2 + n * 65 (for baseline DCT), n >= 1 */
+ if (len < 2 + 65)
+ return -EINVAL;
+
+ len -= 2;
+ while (len >= 65) {
+ u8 pq, tq, *qk;
+ int ret;
+ int pq_tq = jpeg_get_byte(stream);
+
+ if (pq_tq < 0)
+ return pq_tq;
+
+ /* quantization table element precision */
+ pq = (pq_tq >> 4) & 0xf;
+ /*
+ * Only 8-bit Qk values for 8-bit sample precision. Extended
+ * sequential DCT with 12-bit sample precision also supports
+ * 16-bit Qk values.
+ */
+ if (pq != 0 && (pq != 1 || precision != 12))
+ return -EINVAL;
+
+ /* quantization table destination identifier */
+ tq = pq_tq & 0xf;
+ if (tq > 3)
+ return -EINVAL;
+
+ /* quantization table element */
+ qk = stream->curr;
+ ret = jpeg_skip(stream, pq ? 128 : 64);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (tables) {
+ tables[tq].start = qk;
+ tables[tq].length = pq ? 128 : 64;
+ }
+
+ len -= pq ? 129 : 65;
+ }
+
+ return 0;
+}
+
+/* B.2.4.2 Huffman table-specification syntax */
+static int jpeg_parse_huffman_tables(struct jpeg_stream *stream,
+ struct v4l2_jpeg_reference *tables)
+{
+ int mt;
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Table B.5 - Huffman table specification parameter sizes and values */
+ if (len < 2 + 17)
+ return -EINVAL;
+
+ for (len -= 2; len >= 17; len -= 17 + mt) {
+ u8 tc, th, *table;
+ int tc_th = jpeg_get_byte(stream);
+ int i, ret;
+
+ if (tc_th < 0)
+ return tc_th;
+
+ /* table class - 0 = DC, 1 = AC */
+ tc = (tc_th >> 4) & 0xf;
+ if (tc > 1)
+ return -EINVAL;
+
+ /* huffman table destination identifier */
+ th = tc_th & 0xf;
+ /* only two Huffman tables for baseline DCT */
+ if (th > 1)
+ return -EINVAL;
+
+ /* BITS - number of Huffman codes with length i */
+ table = stream->curr;
+ mt = 0;
+ for (i = 0; i < 16; i++) {
+ int li;
+
+ li = jpeg_get_byte(stream);
+ if (li < 0)
+ return li;
+
+ mt += li;
+ }
+ /* HUFFVAL - values associated with each Huffman code */
+ ret = jpeg_skip(stream, mt);
+ if (ret < 0)
+ return ret;
+
+ if (tables) {
+ tables[(tc << 1) | th].start = table;
+ tables[(tc << 1) | th].length = stream->curr - table;
+ }
+ }
+
+ return jpeg_skip(stream, len - 2);
+}
+
+/* B.2.4.4 Restart interval definition syntax */
+static int jpeg_parse_restart_interval(struct jpeg_stream *stream,
+ u16 *restart_interval)
+{
+ int len = jpeg_get_word_be(stream);
+ int ri;
+
+ if (len < 0)
+ return len;
+ if (len != 4)
+ return -EINVAL;
+
+ ri = jpeg_get_word_be(stream);
+ if (ri < 0)
+ return ri;
+
+ *restart_interval = ri;
+
+ return 0;
+}
+
+static int jpeg_skip_segment(struct jpeg_stream *stream)
+{
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ if (len < 2)
+ return -EINVAL;
+
+ return jpeg_skip(stream, len - 2);
+}
+
+/**
+ * jpeg_parse_header - locate marker segments and optionally parse headers
+ * @buf: address of the JPEG buffer, should start with a SOI marker
+ * @len: length of the JPEG buffer
+ * @out: returns marker segment positions and optionally parsed headers
+ *
+ * The out->scan_header pointer must be initialized to NULL or point to a valid
+ * v4l2_jpeg_scan_header structure. The out->huffman_tables and
+ * out->quantization_tables pointers must be initialized to NULL or point to a
+ * valid array of 4 v4l2_jpeg_reference structures each.
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out)
+{
+ struct jpeg_stream stream;
+ int marker;
+ int ret = 0;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+
+ out->num_dht = 0;
+ out->num_dqt = 0;
+
+ /* the first marker must be SOI */
+ marker = jpeg_next_marker(&stream);
+ if (marker < 0)
+ return marker;
+ if (marker != SOI)
+ return -EINVAL;
+
+ /* loop through marker segments */
+ while ((marker = jpeg_next_marker(&stream)) >= 0) {
+ switch (marker) {
+ /* baseline DCT, extended sequential DCT */
+ case SOF0 ... SOF1:
+ ret = jpeg_reference_segment(&stream, &out->sof);
+ if (ret < 0)
+ return ret;
+ ret = jpeg_parse_frame_header(&stream, marker,
+ &out->frame);
+ break;
+ /* progressive, lossless */
+ case SOF2 ... SOF3:
+ /* differential coding */
+ case SOF5 ... SOF7:
+ /* arithmetic coding */
+ case SOF9 ... SOF11:
+ case SOF13 ... SOF15:
+ case DAC:
+ case TEM:
+ return -EINVAL;
+
+ case DHT:
+ ret = jpeg_reference_segment(&stream,
+ &out->dht[out->num_dht++ % 4]);
+ if (ret < 0)
+ return ret;
+ ret = jpeg_parse_huffman_tables(&stream,
+ out->huffman_tables);
+ break;
+ case DQT:
+ ret = jpeg_reference_segment(&stream,
+ &out->dqt[out->num_dqt++ % 4]);
+ if (ret < 0)
+ return ret;
+ ret = jpeg_parse_quantization_tables(&stream,
+ out->frame.precision,
+ out->quantization_tables);
+ break;
+ case DRI:
+ ret = jpeg_parse_restart_interval(&stream,
+ &out->restart_interval);
+ break;
+
+ case SOS:
+ ret = jpeg_reference_segment(&stream, &out->sos);
+ if (ret < 0)
+ return ret;
+ ret = jpeg_parse_scan_header(&stream, out->scan);
+ /*
+ * stop parsing, the scan header marks the beginning of
+ * the entropy coded segment
+ */
+ out->ecs_offset = stream.curr - (u8 *)buf;
+ return ret;
+
+ /* markers without parameters */
+ case RST0 ... RST7: /* restart */
+ case SOI: /* start of image */
+ case EOI: /* end of image */
+ break;
+
+ /* skip unknown or unsupported marker segments */
+ default:
+ ret = jpeg_skip_segment(&stream);
+ break;
+ }
+ if (ret < 0)
+ return ret;
+ }
+
+ return marker;
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_header);
+
+/**
+ * v4l2_jpeg_parse_frame_header - parse frame header
+ * @buf: address of the frame header, after the SOF0 marker
+ * @len: length of the frame header
+ * @frame_header: returns the parsed frame header
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_frame_header(void *buf, size_t len,
+ struct v4l2_jpeg_frame_header *frame_header)
+{
+ struct jpeg_stream stream;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+ return jpeg_parse_frame_header(&stream, SOF0, frame_header);
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_frame_header);
+
+/**
+ * v4l2_jpeg_parse_scan_header - parse scan header
+ * @buf: address of the scan header, after the SOS marker
+ * @len: length of the scan header
+ * @scan_header: returns the parsed scan header
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_scan_header(void *buf, size_t len,
+ struct v4l2_jpeg_scan_header *scan_header)
+{
+ struct jpeg_stream stream;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+ return jpeg_parse_scan_header(&stream, scan_header);
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_scan_header);
+
+/**
+ * v4l2_jpeg_parse_quantization_tables - parse quantization tables segment
+ * @buf: address of the quantization table segment, after the DQT marker
+ * @len: length of the quantization table segment
+ * @precision: sample precision (P) in bits per component
+ * @q_tables: returns four references into the buffer for the
+ * four possible quantization table destinations
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision,
+ struct v4l2_jpeg_reference *q_tables)
+{
+ struct jpeg_stream stream;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+ return jpeg_parse_quantization_tables(&stream, precision, q_tables);
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_quantization_tables);
+
+/**
+ * v4l2_jpeg_parse_huffman_tables - parse huffman tables segment
+ * @buf: address of the Huffman table segment, after the DHT marker
+ * @len: length of the Huffman table segment
+ * @huffman_tables: returns four references into the buffer for the
+ * four possible Huffman table destinations, in
+ * the order DC0, DC1, AC0, AC1
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len,
+ struct v4l2_jpeg_reference *huffman_tables)
+{
+ struct jpeg_stream stream;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+ return jpeg_parse_huffman_tables(&stream, huffman_tables);
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_huffman_tables);
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 0fffdd3ce6a4..ba2f2b8dcc8c 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -309,6 +309,101 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source);
+int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
+ struct media_pad *sink)
+{
+ struct fwnode_handle *endpoint;
+ struct v4l2_subdev *sink_sd;
+
+ if (!(sink->flags & MEDIA_PAD_FL_SINK) ||
+ !is_media_entity_v4l2_subdev(sink->entity))
+ return -EINVAL;
+
+ sink_sd = media_entity_to_v4l2_subdev(sink->entity);
+
+ fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
+ struct fwnode_handle *remote_ep;
+ int src_idx, sink_idx, ret;
+ struct media_pad *src;
+
+ src_idx = media_entity_get_fwnode_pad(&src_sd->entity,
+ endpoint,
+ MEDIA_PAD_FL_SOURCE);
+ if (src_idx < 0)
+ continue;
+
+ remote_ep = fwnode_graph_get_remote_endpoint(endpoint);
+ if (!remote_ep)
+ continue;
+
+ /*
+ * ask the sink to verify it owns the remote endpoint,
+ * and translate to a sink pad.
+ */
+ sink_idx = media_entity_get_fwnode_pad(&sink_sd->entity,
+ remote_ep,
+ MEDIA_PAD_FL_SINK);
+ fwnode_handle_put(remote_ep);
+
+ if (sink_idx < 0 || sink_idx != sink->index)
+ continue;
+
+ /*
+ * the source endpoint corresponds to one of its source pads,
+ * the source endpoint connects to an endpoint at the sink
+ * entity, and the sink endpoint corresponds to the sink
+ * pad requested, so we have found an endpoint connection
+ * that works, create the media link for it.
+ */
+
+ src = &src_sd->entity.pads[src_idx];
+
+ /* skip if link already exists */
+ if (media_entity_find_link(src, sink))
+ continue;
+
+ dev_dbg(sink_sd->dev, "creating link %s:%d -> %s:%d\n",
+ src_sd->entity.name, src_idx,
+ sink_sd->entity.name, sink_idx);
+
+ ret = media_create_pad_link(&src_sd->entity, src_idx,
+ &sink_sd->entity, sink_idx, 0);
+ if (ret) {
+ dev_err(sink_sd->dev,
+ "link %s:%d -> %s:%d failed with %d\n",
+ src_sd->entity.name, src_idx,
+ sink_sd->entity.name, sink_idx, ret);
+
+ fwnode_handle_put(endpoint);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links_to_pad);
+
+int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd,
+ struct v4l2_subdev *sink_sd)
+{
+ unsigned int i;
+
+ for (i = 0; i < sink_sd->entity.num_pads; i++) {
+ struct media_pad *pad = &sink_sd->entity.pads[i];
+ int ret;
+
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ ret = v4l2_create_fwnode_links_to_pad(src_sd, pad);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links);
+
/* -----------------------------------------------------------------------------
* Pipeline power management
*
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 8986c31176e9..62ac9424c92a 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -504,12 +504,21 @@ void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
if (WARN_ON(!src_buf || !dst_buf))
goto unlock;
- v4l2_m2m_buf_done(src_buf, state);
dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
if (!dst_buf->is_held) {
v4l2_m2m_dst_buf_remove(m2m_ctx);
v4l2_m2m_buf_done(dst_buf, state);
}
+ /*
+ * If the request API is being used, returning the OUTPUT
+ * (src) buffer will wake-up any process waiting on the
+ * request file descriptor.
+ *
+ * Therefore, return the CAPTURE (dst) buffer first,
+ * to avoid signalling the request file descriptor
+ * before the CAPTURE buffer is done.
+ */
+ v4l2_m2m_buf_done(src_buf, state);
schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index a376b351135f..6b989fe5a0a9 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/videodev2.h>
#include <linux/export.h>
+#include <linux/version.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -22,24 +23,22 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
{
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
if (sd->entity.num_pads) {
fh->pad = v4l2_subdev_alloc_pad_config(sd);
if (fh->pad == NULL)
return -ENOMEM;
}
-#endif
+
return 0;
}
static void subdev_fh_free(struct v4l2_subdev_fh *fh)
{
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
v4l2_subdev_free_pad_config(fh->pad);
fh->pad = NULL;
-#endif
}
static int subdev_open(struct file *file)
@@ -111,6 +110,17 @@ static int subdev_close(struct file *file)
return 0;
}
+#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+static int subdev_open(struct file *file)
+{
+ return -ENODEV;
+}
+
+static int subdev_close(struct file *file)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
static inline int check_which(u32 which)
{
@@ -324,17 +334,27 @@ const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
};
EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_fh *vfh = file->private_data;
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
-#endif
+ bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
int rval;
switch (cmd) {
+ case VIDIOC_SUBDEV_QUERYCAP: {
+ struct v4l2_subdev_capability *cap = arg;
+
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+ cap->version = LINUX_VERSION_CODE;
+ cap->capabilities = ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0;
+
+ return 0;
+ }
+
case VIDIOC_QUERYCTRL:
/*
* TODO: this really should be folded into v4l2_queryctrl (this
@@ -465,7 +485,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return ret;
}
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
case VIDIOC_SUBDEV_G_FMT: {
struct v4l2_subdev_format *format = arg;
@@ -477,6 +496,9 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_FMT: {
struct v4l2_subdev_format *format = arg;
+ if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
memset(format->reserved, 0, sizeof(format->reserved));
memset(format->format.reserved, 0, sizeof(format->format.reserved));
return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh->pad, format);
@@ -504,6 +526,9 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_subdev_crop *crop = arg;
struct v4l2_subdev_selection sel;
+ if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
memset(crop->reserved, 0, sizeof(crop->reserved));
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
@@ -545,6 +570,9 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval *fi = arg;
+ if (ro_subdev)
+ return -EPERM;
+
memset(fi->reserved, 0, sizeof(fi->reserved));
return v4l2_subdev_call(sd, video, s_frame_interval, arg);
}
@@ -568,6 +596,9 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_SELECTION: {
struct v4l2_subdev_selection *sel = arg;
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
sd, pad, set_selection, subdev_fh->pad, sel);
@@ -604,6 +635,9 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return v4l2_subdev_call(sd, video, g_dv_timings, arg);
case VIDIOC_SUBDEV_S_DV_TIMINGS:
+ if (ro_subdev)
+ return -EPERM;
+
return v4l2_subdev_call(sd, video, s_dv_timings, arg);
case VIDIOC_SUBDEV_G_STD:
@@ -612,6 +646,9 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_STD: {
v4l2_std_id *std = arg;
+ if (ro_subdev)
+ return -EPERM;
+
return v4l2_subdev_call(sd, video, s_std, *std);
}
@@ -627,7 +664,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_QUERYSTD:
return v4l2_subdev_call(sd, video, querystd, arg);
-#endif
+
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
}
@@ -667,6 +704,22 @@ static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
}
#endif
+#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+static long subdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENODEV;
+}
+
+#ifdef CONFIG_COMPAT
+static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENODEV;
+}
+#endif
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
static __poll_t subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
@@ -696,6 +749,28 @@ const struct v4l2_file_operations v4l2_subdev_fops = {
};
#ifdef CONFIG_MEDIA_CONTROLLER
+
+int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
+ struct fwnode_endpoint *endpoint)
+{
+ struct fwnode_handle *fwnode;
+ struct v4l2_subdev *sd;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ return -EINVAL;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
+ fwnode_handle_put(fwnode);
+
+ if (dev_fwnode(sd->dev) == fwnode)
+ return endpoint->port;
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
+
int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
struct media_link *link,
struct v4l2_subdev_format *source_fmt,
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 9bddca292330..04368ee2a809 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -46,6 +46,17 @@ config ATMEL_EBI
tree is used. This bus supports NANDs, external ethernet controller,
SRAMs, ATA devices, etc.
+config BT1_L2_CTL
+ bool "Baikal-T1 CM2 L2-RAM Cache Control Block"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Baikal-T1 CPU is based on the MIPS P5600 Warrior IP-core. The CPU
+ resides Coherency Manager v2 with embedded 1MB L2-cache. It's
+ possible to tune the L2 cache performance up by setting the data,
+ tags and way-select latencies of RAM access. This driver provides a
+ dt properties-based and sysfs interface for it.
+
config TI_AEMIF
tristate "Texas Instruments AEMIF driver"
depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 27b493435e61..6d7e3e64ba62 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
obj-$(CONFIG_ARCH_BRCMSTB) += brcmstb_dpfe.o
+obj-$(CONFIG_BT1_L2_CTL) += bt1-l2-ctl.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o
diff --git a/drivers/memory/bt1-l2-ctl.c b/drivers/memory/bt1-l2-ctl.c
new file mode 100644
index 000000000000..633fea6a4edf
--- /dev/null
+++ b/drivers/memory/bt1-l2-ctl.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 CM2 L2-cache Control Block driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/sysfs.h>
+#include <linux/of.h>
+
+#define L2_CTL_REG 0x028
+#define L2_CTL_DATA_STALL_FLD 0
+#define L2_CTL_DATA_STALL_MASK GENMASK(1, L2_CTL_DATA_STALL_FLD)
+#define L2_CTL_TAG_STALL_FLD 2
+#define L2_CTL_TAG_STALL_MASK GENMASK(3, L2_CTL_TAG_STALL_FLD)
+#define L2_CTL_WS_STALL_FLD 4
+#define L2_CTL_WS_STALL_MASK GENMASK(5, L2_CTL_WS_STALL_FLD)
+#define L2_CTL_SET_CLKRATIO BIT(13)
+#define L2_CTL_CLKRATIO_LOCK BIT(31)
+
+#define L2_CTL_STALL_MIN 0
+#define L2_CTL_STALL_MAX 3
+#define L2_CTL_STALL_SET_DELAY_US 1
+#define L2_CTL_STALL_SET_TOUT_US 1000
+
+/*
+ * struct l2_ctl - Baikal-T1 L2 Control block private data.
+ * @dev: Pointer to the device structure.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ */
+struct l2_ctl {
+ struct device *dev;
+
+ struct regmap *sys_regs;
+};
+
+/*
+ * enum l2_ctl_stall - Baikal-T1 L2-cache-RAM stall identifier.
+ * @L2_WSSTALL: Way-select latency.
+ * @L2_TAGSTALL: Tag latency.
+ * @L2_DATASTALL: Data latency.
+ */
+enum l2_ctl_stall {
+ L2_WS_STALL,
+ L2_TAG_STALL,
+ L2_DATA_STALL
+};
+
+/*
+ * struct l2_ctl_device_attribute - Baikal-T1 L2-cache device attribute.
+ * @dev_attr: Actual sysfs device attribute.
+ * @id: L2-cache stall field identifier.
+ */
+struct l2_ctl_device_attribute {
+ struct device_attribute dev_attr;
+ enum l2_ctl_stall id;
+};
+#define to_l2_ctl_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct l2_ctl_device_attribute, dev_attr)
+
+#define L2_CTL_ATTR_RW(_name, _prefix, _id) \
+ struct l2_ctl_device_attribute l2_ctl_attr_##_name = \
+ { __ATTR(_name, 0644, _prefix##_show, _prefix##_store), _id }
+
+static int l2_ctl_get_latency(struct l2_ctl *l2, enum l2_ctl_stall id, u32 *val)
+{
+ u32 data = 0;
+ int ret;
+
+ ret = regmap_read(l2->sys_regs, L2_CTL_REG, &data);
+ if (ret)
+ return ret;
+
+ switch (id) {
+ case L2_WS_STALL:
+ *val = FIELD_GET(L2_CTL_WS_STALL_MASK, data);
+ break;
+ case L2_TAG_STALL:
+ *val = FIELD_GET(L2_CTL_TAG_STALL_MASK, data);
+ break;
+ case L2_DATA_STALL:
+ *val = FIELD_GET(L2_CTL_DATA_STALL_MASK, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int l2_ctl_set_latency(struct l2_ctl *l2, enum l2_ctl_stall id, u32 val)
+{
+ u32 mask = 0, data = 0;
+ int ret;
+
+ val = clamp_val(val, L2_CTL_STALL_MIN, L2_CTL_STALL_MAX);
+
+ switch (id) {
+ case L2_WS_STALL:
+ data = FIELD_PREP(L2_CTL_WS_STALL_MASK, val);
+ mask = L2_CTL_WS_STALL_MASK;
+ break;
+ case L2_TAG_STALL:
+ data = FIELD_PREP(L2_CTL_TAG_STALL_MASK, val);
+ mask = L2_CTL_TAG_STALL_MASK;
+ break;
+ case L2_DATA_STALL:
+ data = FIELD_PREP(L2_CTL_DATA_STALL_MASK, val);
+ mask = L2_CTL_DATA_STALL_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data |= L2_CTL_SET_CLKRATIO;
+ mask |= L2_CTL_SET_CLKRATIO;
+
+ ret = regmap_update_bits(l2->sys_regs, L2_CTL_REG, mask, data);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(l2->sys_regs, L2_CTL_REG, data,
+ data & L2_CTL_CLKRATIO_LOCK,
+ L2_CTL_STALL_SET_DELAY_US,
+ L2_CTL_STALL_SET_TOUT_US);
+}
+
+static void l2_ctl_clear_data(void *data)
+{
+ struct l2_ctl *l2 = data;
+ struct platform_device *pdev = to_platform_device(l2->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct l2_ctl *l2_ctl_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct l2_ctl *l2;
+ int ret;
+
+ l2 = devm_kzalloc(dev, sizeof(*l2), GFP_KERNEL);
+ if (!l2)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, l2_ctl_clear_data, l2);
+ if (ret) {
+ dev_err(dev, "Can't add L2 CTL data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ l2->dev = dev;
+ platform_set_drvdata(pdev, l2);
+
+ return l2;
+}
+
+static int l2_ctl_find_sys_regs(struct l2_ctl *l2)
+{
+ l2->sys_regs = syscon_node_to_regmap(l2->dev->of_node->parent);
+ if (IS_ERR(l2->sys_regs)) {
+ dev_err(l2->dev, "Couldn't get L2 CTL register map\n");
+ return PTR_ERR(l2->sys_regs);
+ }
+
+ return 0;
+}
+
+static int l2_ctl_of_parse_property(struct l2_ctl *l2, enum l2_ctl_stall id,
+ const char *propname)
+{
+ int ret = 0;
+ u32 data;
+
+ if (!of_property_read_u32(l2->dev->of_node, propname, &data)) {
+ ret = l2_ctl_set_latency(l2, id, data);
+ if (ret)
+ dev_err(l2->dev, "Invalid value of '%s'\n", propname);
+ }
+
+ return ret;
+}
+
+static int l2_ctl_of_parse(struct l2_ctl *l2)
+{
+ int ret;
+
+ ret = l2_ctl_of_parse_property(l2, L2_WS_STALL, "baikal,l2-ws-latency");
+ if (ret)
+ return ret;
+
+ ret = l2_ctl_of_parse_property(l2, L2_TAG_STALL, "baikal,l2-tag-latency");
+ if (ret)
+ return ret;
+
+ return l2_ctl_of_parse_property(l2, L2_DATA_STALL,
+ "baikal,l2-data-latency");
+}
+
+static ssize_t l2_ctl_latency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct l2_ctl_device_attribute *devattr = to_l2_ctl_dev_attr(attr);
+ struct l2_ctl *l2 = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = l2_ctl_get_latency(l2, devattr->id, &data);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", data);
+}
+
+static ssize_t l2_ctl_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct l2_ctl_device_attribute *devattr = to_l2_ctl_dev_attr(attr);
+ struct l2_ctl *l2 = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ if (kstrtouint(buf, 0, &data) < 0)
+ return -EINVAL;
+
+ ret = l2_ctl_set_latency(l2, devattr->id, data);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static L2_CTL_ATTR_RW(l2_ws_latency, l2_ctl_latency, L2_WS_STALL);
+static L2_CTL_ATTR_RW(l2_tag_latency, l2_ctl_latency, L2_TAG_STALL);
+static L2_CTL_ATTR_RW(l2_data_latency, l2_ctl_latency, L2_DATA_STALL);
+
+static struct attribute *l2_ctl_sysfs_attrs[] = {
+ &l2_ctl_attr_l2_ws_latency.dev_attr.attr,
+ &l2_ctl_attr_l2_tag_latency.dev_attr.attr,
+ &l2_ctl_attr_l2_data_latency.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(l2_ctl_sysfs);
+
+static void l2_ctl_remove_sysfs(void *data)
+{
+ struct l2_ctl *l2 = data;
+
+ device_remove_groups(l2->dev, l2_ctl_sysfs_groups);
+}
+
+static int l2_ctl_init_sysfs(struct l2_ctl *l2)
+{
+ int ret;
+
+ ret = device_add_groups(l2->dev, l2_ctl_sysfs_groups);
+ if (ret) {
+ dev_err(l2->dev, "Failed to create L2 CTL sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(l2->dev, l2_ctl_remove_sysfs, l2);
+ if (ret)
+ dev_err(l2->dev, "Can't add L2 CTL sysfs remove action\n");
+
+ return ret;
+}
+
+static int l2_ctl_probe(struct platform_device *pdev)
+{
+ struct l2_ctl *l2;
+ int ret;
+
+ l2 = l2_ctl_create_data(pdev);
+ if (IS_ERR(l2))
+ return PTR_ERR(l2);
+
+ ret = l2_ctl_find_sys_regs(l2);
+ if (ret)
+ return ret;
+
+ ret = l2_ctl_of_parse(l2);
+ if (ret)
+ return ret;
+
+ ret = l2_ctl_init_sysfs(l2);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id l2_ctl_of_match[] = {
+ { .compatible = "baikal,bt1-l2-ctl" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, l2_ctl_of_match);
+
+static struct platform_driver l2_ctl_driver = {
+ .probe = l2_ctl_probe,
+ .driver = {
+ .name = "bt1-l2-ctl",
+ .of_match_table = l2_ctl_of_match
+ }
+};
+module_platform_driver(l2_ctl_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 L2-cache driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 81a1b1d01683..25196d6268e2 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1091,7 +1091,7 @@ static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row,
/* power related timings */
val = dmc->timings->tFAW / clk_period_ps;
val += dmc->timings->tFAW % clk_period_ps ? 1 : 0;
- val = max(val, dmc->min_tck->tXP);
+ val = max(val, dmc->min_tck->tFAW);
reg = &timing_power[0];
*reg_timing_power |= TIMING_VAL2REG(reg, val);
@@ -1346,15 +1346,13 @@ static irqreturn_t dmc_irq_thread(int irq, void *priv)
struct exynos5_dmc *dmc = priv;
mutex_lock(&dmc->df->lock);
-
exynos5_dmc_perf_events_check(dmc);
-
res = update_devfreq(dmc->df);
+ mutex_unlock(&dmc->df->lock);
+
if (res)
dev_warn(dmc->dev, "devfreq failed with %d\n", res);
- mutex_unlock(&dmc->df->lock);
-
return IRQ_HANDLED;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index c2dd322691d1..68aea22f2b89 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5052,9 +5052,11 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure
* @persist_opcode: see below
*
- * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
- * devices not currently present.
- * MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
+ * =============================== ======================================
+ * MPI_SAS_OP_CLEAR_NOT_PRESENT Free all persist TargetID mappings for
+ * devices not currently present.
+ * MPI_SAS_OP_CLEAR_ALL_PERSISTENT Clear al persist TargetID mappings
+ * =============================== ======================================
*
* NOTE: Don't use not this function during interrupt time.
*
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0a59249198d3..4f8b73d92df3 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -407,6 +407,21 @@ config MFD_EXYNOS_LPASS
Select this option to enable support for Samsung Exynos Low Power
Audio Subsystem.
+config MFD_GATEWORKS_GSC
+ tristate "Gateworks System Controller"
+ depends on (I2C && OF)
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Enable support for the Gateworks System Controller (GSC) found
+ on Gateworks Single Board Computers supporting system functions
+ such as push-button monitor, multiple ADC's for voltage and
+ temperature monitoring, fan controller and watchdog monitor.
+ This driver provides common support for accessing the device.
+ Additional drivers must be enabled in order to use the
+ functionality of the device.
+
config MFD_MC13XXX
tristate
depends on (SPI_MASTER || I2C)
@@ -434,6 +449,15 @@ config MFD_MC13XXX_I2C
help
Select this if your MC13xxx is connected via an I2C bus.
+config MFD_MP2629
+ tristate "Monolithic Power Systems MP2629 ADC and Battery charger"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Select this option to enable support for Monolithic Power Systems
+ battery charger. This provides ADC, thermal and battery charger power
+ management functions.
+
config MFD_MXS_LRADC
tristate "Freescale i.MX23/i.MX28 LRADC"
depends on ARCH_MXS || COMPILE_TEST
@@ -551,7 +575,7 @@ config INTEL_SOC_PMIC
config INTEL_SOC_PMIC_BXTWC
tristate "Support for Intel Broxton Whiskey Cove PMIC"
- depends on INTEL_PMC_IPC
+ depends on MFD_INTEL_PMC_BXT
select MFD_CORE
select REGMAP_IRQ
help
@@ -593,7 +617,7 @@ config INTEL_SOC_PMIC_MRFLD
tristate "Support for Intel Merrifield Basin Cove PMIC"
depends on GPIOLIB
depends on ACPI
- depends on INTEL_SCU_IPC
+ depends on INTEL_SCU
select MFD_CORE
select REGMAP_IRQ
help
@@ -625,13 +649,27 @@ config MFD_INTEL_LPSS_PCI
config MFD_INTEL_MSIC
bool "Intel MSIC"
- depends on INTEL_SCU_IPC
+ depends on INTEL_SCU
select MFD_CORE
help
Select this option to enable access to Intel MSIC (Avatele
Passage) chip. This chip embeds audio, battery, GPIO, etc.
devices used in Intel Medfield platforms.
+config MFD_INTEL_PMC_BXT
+ tristate "Intel PMC Driver for Broxton"
+ depends on X86
+ depends on X86_PLATFORM_DEVICES
+ depends on ACPI
+ select INTEL_SCU_IPC
+ select MFD_CORE
+ help
+ This driver provides support for the PMC (Power Management
+ Controller) on Intel Broxton and Apollo Lake. The PMC is a
+ multi-function device that exposes IPC, General Control
+ Register and P-unit access. In addition this creates devices
+ for iTCO watchdog and telemetry that are part of the PMC.
+
config MFD_IPAQ_MICRO
bool "Atmel Micro ASIC (iPAQ h3100/h3600/h3700) Support"
depends on SA1100_H3100 || SA1100_H3600
@@ -870,6 +908,18 @@ config MFD_MAX8998
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_MT6360
+ tristate "Mediatek MT6360 SubPMIC"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C
+ help
+ Say Y here to enable MT6360 PMU/PMIC/LDO functional support.
+ PMU part includes Charger, Flashlight, RGB LED
+ PMIC part includes 2-channel BUCKs and 2-channel LDOs
+ LDO part includes 4-channel LDOs
+
config MFD_MT6397
tristate "MediaTek MT6397 PMIC Support"
select MFD_CORE
@@ -2028,10 +2078,9 @@ config MCP_UCB1200_TS
endmenu
config MFD_VEXPRESS_SYSREG
- bool "Versatile Express System Registers"
- depends on VEXPRESS_CONFIG && GPIOLIB && !ARCH_USES_GETTIMEOFFSET
+ tristate "Versatile Express System Registers"
+ depends on VEXPRESS_CONFIG && GPIOLIB
default y
- select CLKSRC_MMIO
select GPIO_GENERIC_PLATFORM
select MFD_CORE
select MFD_SYSCON
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index f935d10cbf0f..9367a92f795a 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o
obj-$(CONFIG_MFD_CROS_EC_DEV) += cros_ec_dev.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
+obj-$(CONFIG_MFD_GATEWORKS_GSC) += gateworks-gsc.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
@@ -170,6 +171,8 @@ obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o
+obj-$(CONFIG_MFD_MP2629) += mp2629.o
+
pcf50633-objs := pcf50633-core.o pcf50633-irq.o
obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
@@ -212,6 +215,7 @@ obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o
obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o
obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
+obj-$(CONFIG_MFD_INTEL_PMC_BXT) += intel_pmc_bxt.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o
@@ -239,7 +243,8 @@ obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
-mt6397-objs := mt6397-core.o mt6397-irq.o
+obj-$(CONFIG_MFD_MT6360) += mt6360-core.o
+mt6397-objs := mt6397-core.o mt6397-irq.o mt6358-irq.o
obj-$(CONFIG_MFD_MT6397) += mt6397.o
obj-$(CONFIG_INTEL_SOC_PMIC_MRFLD) += intel_soc_pmic_mrfld.o
diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c
new file mode 100644
index 000000000000..576da62fbb0c
--- /dev/null
+++ b/drivers/mfd/gateworks-gsc.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The Gateworks System Controller (GSC) is a multi-function
+ * device designed for use in Gateworks Single Board Computers.
+ * The control interface is I2C, with an interrupt. The device supports
+ * system functions such as push-button monitoring, multiple ADC's for
+ * voltage and temperature monitoring, fan controller and watchdog monitor.
+ *
+ * Copyright (C) 2020 Gateworks Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/gsc.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <asm/unaligned.h>
+
+/*
+ * The GSC suffers from an errata where occasionally during
+ * ADC cycles the chip can NAK I2C transactions. To ensure we have reliable
+ * register access we place retries around register access.
+ */
+#define I2C_RETRIES 3
+
+int gsc_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i2c_client *client = context;
+ int retry, ret;
+
+ for (retry = 0; retry < I2C_RETRIES; retry++) {
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ /*
+ * -EAGAIN returned when the i2c host controller is busy
+ * -EIO returned when i2c device is busy
+ */
+ if (ret != -EAGAIN && ret != -EIO)
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gsc_write);
+
+int gsc_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct i2c_client *client = context;
+ int retry, ret;
+
+ for (retry = 0; retry < I2C_RETRIES; retry++) {
+ ret = i2c_smbus_read_byte_data(client, reg);
+ /*
+ * -EAGAIN returned when the i2c host controller is busy
+ * -EIO returned when i2c device is busy
+ */
+ if (ret != -EAGAIN && ret != -EIO)
+ break;
+ }
+ *val = ret & 0xff;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gsc_read);
+
+/*
+ * gsc_powerdown - API to use GSC to power down board for a specific time
+ *
+ * secs - number of seconds to remain powered off
+ */
+static int gsc_powerdown(struct gsc_dev *gsc, unsigned long secs)
+{
+ int ret;
+ unsigned char regs[4];
+
+ dev_info(&gsc->i2c->dev, "GSC powerdown for %ld seconds\n",
+ secs);
+
+ put_unaligned_le32(secs, regs);
+ ret = regmap_bulk_write(gsc->regmap, GSC_TIME_ADD, regs, 4);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(gsc->regmap, GSC_CTRL_1,
+ BIT(GSC_CTRL_1_SLEEP_ADD),
+ BIT(GSC_CTRL_1_SLEEP_ADD));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(gsc->regmap, GSC_CTRL_1,
+ BIT(GSC_CTRL_1_SLEEP_ACTIVATE) |
+ BIT(GSC_CTRL_1_SLEEP_ENABLE),
+ BIT(GSC_CTRL_1_SLEEP_ACTIVATE) |
+ BIT(GSC_CTRL_1_SLEEP_ENABLE));
+
+
+ return ret;
+}
+
+static ssize_t gsc_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(dev);
+ const char *name = attr->attr.name;
+ int rz = 0;
+
+ if (strcasecmp(name, "fw_version") == 0)
+ rz = sprintf(buf, "%d\n", gsc->fwver);
+ else if (strcasecmp(name, "fw_crc") == 0)
+ rz = sprintf(buf, "0x%04x\n", gsc->fwcrc);
+ else
+ dev_err(dev, "invalid command: '%s'\n", name);
+
+ return rz;
+}
+
+static ssize_t gsc_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(dev);
+ const char *name = attr->attr.name;
+ long value;
+
+ if (strcasecmp(name, "powerdown") == 0) {
+ if (kstrtol(buf, 0, &value) == 0)
+ gsc_powerdown(gsc, value);
+ } else {
+ dev_err(dev, "invalid command: '%s\n", name);
+ }
+
+ return count;
+}
+
+static struct device_attribute attr_fwver =
+ __ATTR(fw_version, 0440, gsc_show, NULL);
+static struct device_attribute attr_fwcrc =
+ __ATTR(fw_crc, 0440, gsc_show, NULL);
+static struct device_attribute attr_pwrdown =
+ __ATTR(powerdown, 0220, NULL, gsc_store);
+
+static struct attribute *gsc_attrs[] = {
+ &attr_fwver.attr,
+ &attr_fwcrc.attr,
+ &attr_pwrdown.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = gsc_attrs,
+};
+
+static const struct of_device_id gsc_of_match[] = {
+ { .compatible = "gw,gsc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gsc_of_match);
+
+static struct regmap_bus gsc_regmap_bus = {
+ .reg_read = gsc_read,
+ .reg_write = gsc_write,
+};
+
+static const struct regmap_config gsc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+ .max_register = GSC_WP,
+};
+
+static const struct regmap_irq gsc_irqs[] = {
+ REGMAP_IRQ_REG(GSC_IRQ_PB, 0, BIT(GSC_IRQ_PB)),
+ REGMAP_IRQ_REG(GSC_IRQ_KEY_ERASED, 0, BIT(GSC_IRQ_KEY_ERASED)),
+ REGMAP_IRQ_REG(GSC_IRQ_EEPROM_WP, 0, BIT(GSC_IRQ_EEPROM_WP)),
+ REGMAP_IRQ_REG(GSC_IRQ_RESV, 0, BIT(GSC_IRQ_RESV)),
+ REGMAP_IRQ_REG(GSC_IRQ_GPIO, 0, BIT(GSC_IRQ_GPIO)),
+ REGMAP_IRQ_REG(GSC_IRQ_TAMPER, 0, BIT(GSC_IRQ_TAMPER)),
+ REGMAP_IRQ_REG(GSC_IRQ_WDT_TIMEOUT, 0, BIT(GSC_IRQ_WDT_TIMEOUT)),
+ REGMAP_IRQ_REG(GSC_IRQ_SWITCH_HOLD, 0, BIT(GSC_IRQ_SWITCH_HOLD)),
+};
+
+static const struct regmap_irq_chip gsc_irq_chip = {
+ .name = "gateworks-gsc",
+ .irqs = gsc_irqs,
+ .num_irqs = ARRAY_SIZE(gsc_irqs),
+ .num_regs = 1,
+ .status_base = GSC_IRQ_STATUS,
+ .mask_base = GSC_IRQ_ENABLE,
+ .mask_invert = true,
+ .ack_base = GSC_IRQ_STATUS,
+ .ack_invert = true,
+};
+
+static int gsc_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct gsc_dev *gsc;
+ struct regmap_irq_chip_data *irq_data;
+ int ret;
+ unsigned int reg;
+
+ gsc = devm_kzalloc(dev, sizeof(*gsc), GFP_KERNEL);
+ if (!gsc)
+ return -ENOMEM;
+
+ gsc->dev = &client->dev;
+ gsc->i2c = client;
+ i2c_set_clientdata(client, gsc);
+
+ gsc->regmap = devm_regmap_init(dev, &gsc_regmap_bus, client,
+ &gsc_regmap_config);
+ if (IS_ERR(gsc->regmap))
+ return PTR_ERR(gsc->regmap);
+
+ if (regmap_read(gsc->regmap, GSC_FW_VER, &reg))
+ return -EIO;
+ gsc->fwver = reg;
+
+ regmap_read(gsc->regmap, GSC_FW_CRC, &reg);
+ gsc->fwcrc = reg;
+ regmap_read(gsc->regmap, GSC_FW_CRC + 1, &reg);
+ gsc->fwcrc |= reg << 8;
+
+ gsc->i2c_hwmon = devm_i2c_new_dummy_device(dev, client->adapter,
+ GSC_HWMON);
+ if (IS_ERR(gsc->i2c_hwmon)) {
+ dev_err(dev, "Failed to allocate I2C device for HWMON\n");
+ return PTR_ERR(gsc->i2c_hwmon);
+ }
+
+ ret = devm_regmap_add_irq_chip(dev, gsc->regmap, client->irq,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_FALLING, 0,
+ &gsc_irq_chip, &irq_data);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Gateworks System Controller v%d: fw 0x%04x\n",
+ gsc->fwver, gsc->fwcrc);
+
+ ret = sysfs_create_group(&dev->kobj, &attr_group);
+ if (ret)
+ dev_err(dev, "failed to create sysfs attrs\n");
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ sysfs_remove_group(&dev->kobj, &attr_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gsc_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &attr_group);
+
+ return 0;
+}
+
+static struct i2c_driver gsc_driver = {
+ .driver = {
+ .name = "gateworks-gsc",
+ .of_match_table = gsc_of_match,
+ },
+ .probe_new = gsc_probe,
+ .remove = gsc_remove,
+};
+module_i2c_driver(gsc_driver);
+
+MODULE_AUTHOR("Tim Harvey <tharvey@gateworks.com>");
+MODULE_DESCRIPTION("I2C Core interface for GSC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 8ad6768bd7a2..247f9849e54a 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -355,12 +355,12 @@ static int htcpld_register_chip_i2c(
info.platform_data = chip;
/* Add the I2C device. This calls the probe() function. */
- client = i2c_new_device(adapter, &info);
- if (!client) {
+ client = i2c_new_client_device(adapter, &info);
+ if (IS_ERR(client)) {
/* I2C device registration failed, contineu with the next */
dev_warn(dev, "Unable to add I2C device for 0x%x\n",
plat_chip_data->addr);
- return -ENODEV;
+ return PTR_ERR(client);
}
i2c_set_clientdata(client, chip);
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 7fc0c5d4edff..046222684b8b 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -250,9 +250,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x4daf), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4dc5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dc6), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4dc7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4de8), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4de9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dea), (kernel_ulong_t)&bxt_i2c_info },
diff --git a/drivers/mfd/intel_pmc_bxt.c b/drivers/mfd/intel_pmc_bxt.c
new file mode 100644
index 000000000000..9f01d38acc7f
--- /dev/null
+++ b/drivers/mfd/intel_pmc_bxt.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Intel Broxton PMC
+ *
+ * (C) Copyright 2014 - 2020 Intel Corporation
+ *
+ * This driver is based on Intel SCU IPC driver (intel_scu_ipc.c) by
+ * Sreedhara DS <sreedhara.ds@intel.com>
+ *
+ * The PMC (Power Management Controller) running on the ARC processor
+ * communicates with another entity running in the IA (Intel Architecture)
+ * core through an IPC (Intel Processor Communications) mechanism which in
+ * turn sends messages between the IA and the PMC.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel_pmc_bxt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/itco_wdt.h>
+
+#include <asm/intel_scu_ipc.h>
+
+/* Residency with clock rate at 19.2MHz to usecs */
+#define S0IX_RESIDENCY_IN_USECS(d, s) \
+({ \
+ u64 result = 10ull * ((d) + (s)); \
+ do_div(result, 192); \
+ result; \
+})
+
+/* Resources exported from IFWI */
+#define PLAT_RESOURCE_IPC_INDEX 0
+#define PLAT_RESOURCE_IPC_SIZE 0x1000
+#define PLAT_RESOURCE_GCR_OFFSET 0x1000
+#define PLAT_RESOURCE_GCR_SIZE 0x1000
+#define PLAT_RESOURCE_BIOS_DATA_INDEX 1
+#define PLAT_RESOURCE_BIOS_IFACE_INDEX 2
+#define PLAT_RESOURCE_TELEM_SSRAM_INDEX 3
+#define PLAT_RESOURCE_ISP_DATA_INDEX 4
+#define PLAT_RESOURCE_ISP_IFACE_INDEX 5
+#define PLAT_RESOURCE_GTD_DATA_INDEX 6
+#define PLAT_RESOURCE_GTD_IFACE_INDEX 7
+#define PLAT_RESOURCE_ACPI_IO_INDEX 0
+
+/*
+ * BIOS does not create an ACPI device for each PMC function, but
+ * exports multiple resources from one ACPI device (IPC) for multiple
+ * functions. This driver is responsible for creating a child device and
+ * to export resources for those functions.
+ */
+#define SMI_EN_OFFSET 0x0040
+#define SMI_EN_SIZE 4
+#define TCO_BASE_OFFSET 0x0060
+#define TCO_REGS_SIZE 16
+#define TELEM_SSRAM_SIZE 240
+#define TELEM_PMC_SSRAM_OFFSET 0x1b00
+#define TELEM_PUNIT_SSRAM_OFFSET 0x1a00
+
+/* Commands */
+#define PMC_NORTHPEAK_CTRL 0xed
+
+static inline bool is_gcr_valid(u32 offset)
+{
+ return offset < PLAT_RESOURCE_GCR_SIZE - 8;
+}
+
+/**
+ * intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register
+ * @pmc: PMC device pointer
+ * @offset: offset of GCR register from GCR address base
+ * @data: data pointer for storing the register output
+ *
+ * Reads the 64-bit PMC GCR register at given offset.
+ *
+ * Return: Negative value on error or 0 on success.
+ */
+int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset, u64 *data)
+{
+ if (!is_gcr_valid(offset))
+ return -EINVAL;
+
+ spin_lock(&pmc->gcr_lock);
+ *data = readq(pmc->gcr_mem_base + offset);
+ spin_unlock(&pmc->gcr_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64);
+
+/**
+ * intel_pmc_gcr_update() - Update PMC GCR register bits
+ * @pmc: PMC device pointer
+ * @offset: offset of GCR register from GCR address base
+ * @mask: bit mask for update operation
+ * @val: update value
+ *
+ * Updates the bits of given GCR register as specified by
+ * @mask and @val.
+ *
+ * Return: Negative value on error or 0 on success.
+ */
+int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset, u32 mask, u32 val)
+{
+ u32 new_val;
+
+ if (!is_gcr_valid(offset))
+ return -EINVAL;
+
+ spin_lock(&pmc->gcr_lock);
+ new_val = readl(pmc->gcr_mem_base + offset);
+
+ new_val = (new_val & ~mask) | (val & mask);
+ writel(new_val, pmc->gcr_mem_base + offset);
+
+ new_val = readl(pmc->gcr_mem_base + offset);
+ spin_unlock(&pmc->gcr_lock);
+
+ /* Check whether the bit update is successful */
+ return (new_val & mask) != (val & mask) ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
+
+/**
+ * intel_pmc_s0ix_counter_read() - Read S0ix residency
+ * @pmc: PMC device pointer
+ * @data: Out param that contains current S0ix residency count.
+ *
+ * Writes to @data how many usecs the system has been in low-power S0ix
+ * state.
+ *
+ * Return: An error code or 0 on success.
+ */
+int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data)
+{
+ u64 deep, shlw;
+
+ spin_lock(&pmc->gcr_lock);
+ deep = readq(pmc->gcr_mem_base + PMC_GCR_TELEM_DEEP_S0IX_REG);
+ shlw = readq(pmc->gcr_mem_base + PMC_GCR_TELEM_SHLW_S0IX_REG);
+ spin_unlock(&pmc->gcr_lock);
+
+ *data = S0IX_RESIDENCY_IN_USECS(deep, shlw);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_s0ix_counter_read);
+
+/**
+ * simplecmd_store() - Send a simple IPC command
+ * @dev: Device under the attribute is
+ * @attr: Attribute in question
+ * @buf: Buffer holding data to be stored to the attribute
+ * @count: Number of bytes in @buf
+ *
+ * Expects a string with two integers separated with space. These two
+ * values hold command and subcommand that is send to PMC.
+ *
+ * Return: Number number of bytes written (@count) or negative errno in
+ * case of error.
+ */
+static ssize_t simplecmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_pmc_dev *pmc = dev_get_drvdata(dev);
+ struct intel_scu_ipc_dev *scu = pmc->scu;
+ int subcmd;
+ int cmd;
+ int ret;
+
+ ret = sscanf(buf, "%d %d", &cmd, &subcmd);
+ if (ret != 2) {
+ dev_err(dev, "Invalid values, expected: cmd subcmd\n");
+ return -EINVAL;
+ }
+
+ ret = intel_scu_ipc_dev_simple_command(scu, cmd, subcmd);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_WO(simplecmd);
+
+/**
+ * northpeak_store() - Enable or disable Northpeak
+ * @dev: Device under the attribute is
+ * @attr: Attribute in question
+ * @buf: Buffer holding data to be stored to the attribute
+ * @count: Number of bytes in @buf
+ *
+ * Expects an unsigned integer. Non-zero enables Northpeak and zero
+ * disables it.
+ *
+ * Return: Number number of bytes written (@count) or negative errno in
+ * case of error.
+ */
+static ssize_t northpeak_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_pmc_dev *pmc = dev_get_drvdata(dev);
+ struct intel_scu_ipc_dev *scu = pmc->scu;
+ unsigned long val;
+ int subcmd;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ /* Northpeak is enabled if subcmd == 1 and disabled if it is 0 */
+ if (val)
+ subcmd = 1;
+ else
+ subcmd = 0;
+
+ ret = intel_scu_ipc_dev_simple_command(scu, PMC_NORTHPEAK_CTRL, subcmd);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_WO(northpeak);
+
+static struct attribute *intel_pmc_attrs[] = {
+ &dev_attr_northpeak.attr,
+ &dev_attr_simplecmd.attr,
+ NULL
+};
+
+static const struct attribute_group intel_pmc_group = {
+ .attrs = intel_pmc_attrs,
+};
+
+static const struct attribute_group *intel_pmc_groups[] = {
+ &intel_pmc_group,
+ NULL
+};
+
+static struct resource punit_res[6];
+
+static struct mfd_cell punit = {
+ .name = "intel_punit_ipc",
+ .resources = punit_res,
+};
+
+static struct itco_wdt_platform_data tco_pdata = {
+ .name = "Apollo Lake SoC",
+ .version = 5,
+ .no_reboot_use_pmc = true,
+};
+
+static struct resource tco_res[2];
+
+static const struct mfd_cell tco = {
+ .name = "iTCO_wdt",
+ .ignore_resource_conflicts = true,
+ .resources = tco_res,
+ .num_resources = ARRAY_SIZE(tco_res),
+ .platform_data = &tco_pdata,
+ .pdata_size = sizeof(tco_pdata),
+};
+
+static const struct resource telem_res[] = {
+ DEFINE_RES_MEM(TELEM_PUNIT_SSRAM_OFFSET, TELEM_SSRAM_SIZE),
+ DEFINE_RES_MEM(TELEM_PMC_SSRAM_OFFSET, TELEM_SSRAM_SIZE),
+};
+
+static const struct mfd_cell telem = {
+ .name = "intel_telemetry",
+ .resources = telem_res,
+ .num_resources = ARRAY_SIZE(telem_res),
+};
+
+static int intel_pmc_get_tco_resources(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ if (acpi_has_watchdog())
+ return 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO,
+ PLAT_RESOURCE_ACPI_IO_INDEX);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get IO resource\n");
+ return -EINVAL;
+ }
+
+ tco_res[0].flags = IORESOURCE_IO;
+ tco_res[0].start = res->start + TCO_BASE_OFFSET;
+ tco_res[0].end = tco_res[0].start + TCO_REGS_SIZE - 1;
+ tco_res[1].flags = IORESOURCE_IO;
+ tco_res[1].start = res->start + SMI_EN_OFFSET;
+ tco_res[1].end = tco_res[1].start + SMI_EN_SIZE - 1;
+
+ return 0;
+}
+
+static int intel_pmc_get_resources(struct platform_device *pdev,
+ struct intel_pmc_dev *pmc,
+ struct intel_scu_ipc_data *scu_data)
+{
+ struct resource gcr_res;
+ size_t npunit_res = 0;
+ struct resource *res;
+ int ret;
+
+ scu_data->irq = platform_get_irq_optional(pdev, 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_IPC_INDEX);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get IPC resource\n");
+ return -EINVAL;
+ }
+
+ /* IPC registers */
+ scu_data->mem.flags = res->flags;
+ scu_data->mem.start = res->start;
+ scu_data->mem.end = res->start + PLAT_RESOURCE_IPC_SIZE - 1;
+
+ /* GCR registers */
+ gcr_res.flags = res->flags;
+ gcr_res.start = res->start + PLAT_RESOURCE_GCR_OFFSET;
+ gcr_res.end = gcr_res.start + PLAT_RESOURCE_GCR_SIZE - 1;
+
+ pmc->gcr_mem_base = devm_ioremap_resource(&pdev->dev, &gcr_res);
+ if (IS_ERR(pmc->gcr_mem_base))
+ return PTR_ERR(pmc->gcr_mem_base);
+
+ /* Only register iTCO watchdog if there is no WDAT ACPI table */
+ ret = intel_pmc_get_tco_resources(pdev);
+ if (ret)
+ return ret;
+
+ /* BIOS data register */
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_BIOS_DATA_INDEX);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get resource of P-unit BIOS data\n");
+ return -EINVAL;
+ }
+ punit_res[npunit_res++] = *res;
+
+ /* BIOS interface register */
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_BIOS_IFACE_INDEX);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get resource of P-unit BIOS interface\n");
+ return -EINVAL;
+ }
+ punit_res[npunit_res++] = *res;
+
+ /* ISP data register, optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_ISP_DATA_INDEX);
+ if (res)
+ punit_res[npunit_res++] = *res;
+
+ /* ISP interface register, optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_ISP_IFACE_INDEX);
+ if (res)
+ punit_res[npunit_res++] = *res;
+
+ /* GTD data register, optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_GTD_DATA_INDEX);
+ if (res)
+ punit_res[npunit_res++] = *res;
+
+ /* GTD interface register, optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_GTD_IFACE_INDEX);
+ if (res)
+ punit_res[npunit_res++] = *res;
+
+ punit.num_resources = npunit_res;
+
+ /* Telemetry SSRAM is optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ PLAT_RESOURCE_TELEM_SSRAM_INDEX);
+ if (res)
+ pmc->telem_base = res;
+
+ return 0;
+}
+
+static int intel_pmc_create_devices(struct intel_pmc_dev *pmc)
+{
+ int ret;
+
+ if (!acpi_has_watchdog()) {
+ ret = devm_mfd_add_devices(pmc->dev, PLATFORM_DEVID_AUTO, &tco,
+ 1, NULL, 0, NULL);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(pmc->dev, PLATFORM_DEVID_AUTO, &punit, 1,
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ if (pmc->telem_base) {
+ ret = devm_mfd_add_devices(pmc->dev, PLATFORM_DEVID_AUTO,
+ &telem, 1, pmc->telem_base, 0, NULL);
+ }
+
+ return ret;
+}
+
+static const struct acpi_device_id intel_pmc_acpi_ids[] = {
+ { "INT34D2" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, intel_pmc_acpi_ids);
+
+static int intel_pmc_probe(struct platform_device *pdev)
+{
+ struct intel_scu_ipc_data scu_data = {};
+ struct intel_pmc_dev *pmc;
+ int ret;
+
+ pmc = devm_kzalloc(&pdev->dev, sizeof(*pmc), GFP_KERNEL);
+ if (!pmc)
+ return -ENOMEM;
+
+ pmc->dev = &pdev->dev;
+ spin_lock_init(&pmc->gcr_lock);
+
+ ret = intel_pmc_get_resources(pdev, pmc, &scu_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request resources\n");
+ return ret;
+ }
+
+ pmc->scu = devm_intel_scu_ipc_register(&pdev->dev, &scu_data);
+ if (IS_ERR(pmc->scu))
+ return PTR_ERR(pmc->scu);
+
+ platform_set_drvdata(pdev, pmc);
+
+ ret = intel_pmc_create_devices(pmc);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to create PMC devices\n");
+
+ return ret;
+}
+
+static struct platform_driver intel_pmc_driver = {
+ .probe = intel_pmc_probe,
+ .driver = {
+ .name = "intel_pmc_bxt",
+ .acpi_match_table = intel_pmc_acpi_ids,
+ .dev_groups = intel_pmc_groups,
+ },
+};
+module_platform_driver(intel_pmc_driver);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_AUTHOR("Zha Qipeng <qipeng.zha@intel.com>");
+MODULE_DESCRIPTION("Intel Broxton PMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 41326b48da55..84ca7902e1df 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -216,7 +216,6 @@ static int intel_quark_gpio_setup(struct pci_dev *pdev, struct mfd_cell *cell)
pdata->properties->ngpio = INTEL_QUARK_MFD_NGPIO;
pdata->properties->gpio_base = INTEL_QUARK_MFD_GPIO_BASE;
pdata->properties->irq[0] = pdev->irq;
- pdata->properties->has_irq = true;
pdata->properties->irq_shared = true;
cell->platform_data = pdata;
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 739cfb5b69fe..eba89780dbe7 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -15,7 +15,7 @@
#include <linux/mfd/intel_soc_pmic_bxtwc.h>
#include <linux/module.h>
-#include <asm/intel_pmc_ipc.h>
+#include <asm/intel_scu_ipc.h>
/* PMIC device registers */
#define REG_ADDR_MASK 0xFF00
@@ -58,6 +58,10 @@
/* Whiskey Cove PMIC share same ACPI ID between different platforms */
#define BROXTON_PMIC_WC_HRV 4
+#define PMC_PMIC_ACCESS 0xFF
+#define PMC_PMIC_READ 0x0
+#define PMC_PMIC_WRITE 0x1
+
enum bxtwc_irqs {
BXTWC_PWRBTN_LVL1_IRQ = 0,
BXTWC_TMU_LVL1_IRQ,
@@ -288,13 +292,12 @@ static int regmap_ipc_byte_reg_read(void *context, unsigned int reg,
ipc_in[0] = reg;
ipc_in[1] = i2c_addr;
- ret = intel_pmc_ipc_command(PMC_IPC_PMIC_ACCESS,
- PMC_IPC_PMIC_ACCESS_READ,
- ipc_in, sizeof(ipc_in), (u32 *)ipc_out, 1);
- if (ret) {
- dev_err(pmic->dev, "Failed to read from PMIC\n");
+ ret = intel_scu_ipc_dev_command(pmic->scu, PMC_PMIC_ACCESS,
+ PMC_PMIC_READ, ipc_in, sizeof(ipc_in),
+ ipc_out, sizeof(ipc_out));
+ if (ret)
return ret;
- }
+
*val = ipc_out[0];
return 0;
@@ -303,7 +306,6 @@ static int regmap_ipc_byte_reg_read(void *context, unsigned int reg,
static int regmap_ipc_byte_reg_write(void *context, unsigned int reg,
unsigned int val)
{
- int ret;
int i2c_addr;
u8 ipc_in[3];
struct intel_soc_pmic *pmic = context;
@@ -321,15 +323,9 @@ static int regmap_ipc_byte_reg_write(void *context, unsigned int reg,
ipc_in[0] = reg;
ipc_in[1] = i2c_addr;
ipc_in[2] = val;
- ret = intel_pmc_ipc_command(PMC_IPC_PMIC_ACCESS,
- PMC_IPC_PMIC_ACCESS_WRITE,
- ipc_in, sizeof(ipc_in), NULL, 0);
- if (ret) {
- dev_err(pmic->dev, "Failed to write to PMIC\n");
- return ret;
- }
-
- return 0;
+ return intel_scu_ipc_dev_command(pmic->scu, PMC_PMIC_ACCESS,
+ PMC_PMIC_WRITE, ipc_in, sizeof(ipc_in),
+ NULL, 0);
}
/* sysfs interfaces to r/w PMIC registers, required by initial script */
@@ -457,6 +453,10 @@ static int bxtwc_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, pmic);
pmic->dev = &pdev->dev;
+ pmic->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ if (!pmic->scu)
+ return -EPROBE_DEFER;
+
pmic->regmap = devm_regmap_init(&pdev->dev, NULL, pmic,
&bxtwc_regmap_config);
if (IS_ERR(pmic->regmap)) {
diff --git a/drivers/mfd/intel_soc_pmic_mrfld.c b/drivers/mfd/intel_soc_pmic_mrfld.c
index 26a1551c5faf..bd94c989d232 100644
--- a/drivers/mfd/intel_soc_pmic_mrfld.c
+++ b/drivers/mfd/intel_soc_pmic_mrfld.c
@@ -74,10 +74,11 @@ static const struct mfd_cell bcove_dev[] = {
static int bcove_ipc_byte_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
+ struct intel_soc_pmic *pmic = context;
u8 ipc_out;
int ret;
- ret = intel_scu_ipc_ioread8(reg, &ipc_out);
+ ret = intel_scu_ipc_dev_ioread8(pmic->scu, reg, &ipc_out);
if (ret)
return ret;
@@ -88,10 +89,11 @@ static int bcove_ipc_byte_reg_read(void *context, unsigned int reg,
static int bcove_ipc_byte_reg_write(void *context, unsigned int reg,
unsigned int val)
{
+ struct intel_soc_pmic *pmic = context;
u8 ipc_in = val;
int ret;
- ret = intel_scu_ipc_iowrite8(reg, ipc_in);
+ ret = intel_scu_ipc_dev_iowrite8(pmic->scu, reg, ipc_in);
if (ret)
return ret;
@@ -117,6 +119,10 @@ static int bcove_probe(struct platform_device *pdev)
if (!pmic)
return -ENOMEM;
+ pmic->scu = devm_intel_scu_ipc_dev_get(dev);
+ if (!pmic->scu)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, pmic);
pmic->dev = &pdev->dev;
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index c7ed5c353553..fec2096474ad 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -177,6 +177,7 @@ static const struct regmap_config max77620_regmap_config = {
.rd_table = &max77620_readable_table,
.wr_table = &max77620_writable_table,
.volatile_table = &max77620_volatile_table,
+ .use_single_write = true,
};
static const struct regmap_config max20024_regmap_config = {
diff --git a/drivers/mfd/mp2629.c b/drivers/mfd/mp2629.c
new file mode 100644
index 000000000000..16840ec5fd1c
--- /dev/null
+++ b/drivers/mfd/mp2629.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MP2629 parent driver for ADC and battery charger
+ *
+ * Copyright 2020 Monolithic Power Systems, Inc
+ *
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mp2629.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static const struct mfd_cell mp2629_cell[] = {
+ {
+ .name = "mp2629_adc",
+ .of_compatible = "mps,mp2629_adc",
+ },
+ {
+ .name = "mp2629_charger",
+ .of_compatible = "mps,mp2629_charger",
+ }
+};
+
+static const struct regmap_config mp2629_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x17,
+};
+
+static int mp2629_probe(struct i2c_client *client)
+{
+ struct mp2629_data *ddata;
+ int ret;
+
+ ddata = devm_kzalloc(&client->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->dev = &client->dev;
+ i2c_set_clientdata(client, ddata);
+
+ ddata->regmap = devm_regmap_init_i2c(client, &mp2629_regmap_config);
+ if (IS_ERR(ddata->regmap)) {
+ dev_err(ddata->dev, "Failed to allocate regmap\n");
+ return PTR_ERR(ddata->regmap);
+ }
+
+ ret = devm_mfd_add_devices(ddata->dev, PLATFORM_DEVID_AUTO, mp2629_cell,
+ ARRAY_SIZE(mp2629_cell), NULL, 0, NULL);
+ if (ret)
+ dev_err(ddata->dev, "Failed to register sub-devices %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id mp2629_of_match[] = {
+ { .compatible = "mps,mp2629"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, mp2629_of_match);
+
+static struct i2c_driver mp2629_driver = {
+ .driver = {
+ .name = "mp2629",
+ .of_match_table = mp2629_of_match,
+ },
+ .probe_new = mp2629_probe,
+};
+module_i2c_driver(mp2629_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MP2629 Battery charger parent driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
new file mode 100644
index 000000000000..db734f2831ff
--- /dev/null
+++ b/drivers/mfd/mt6358-irq.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 MediaTek Inc.
+
+#include <linux/interrupt.h>
+#include <linux/mfd/mt6358/core.h>
+#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct irq_top_t mt6358_ints[] = {
+ MT6358_TOP_GEN(BUCK),
+ MT6358_TOP_GEN(LDO),
+ MT6358_TOP_GEN(PSC),
+ MT6358_TOP_GEN(SCK),
+ MT6358_TOP_GEN(BM),
+ MT6358_TOP_GEN(HK),
+ MT6358_TOP_GEN(AUD),
+ MT6358_TOP_GEN(MISC),
+};
+
+static void pmic_irq_enable(struct irq_data *data)
+{
+ unsigned int hwirq = irqd_to_hwirq(data);
+ struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pmic_irq_data *irqd = chip->irq_data;
+
+ irqd->enable_hwirq[hwirq] = true;
+}
+
+static void pmic_irq_disable(struct irq_data *data)
+{
+ unsigned int hwirq = irqd_to_hwirq(data);
+ struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pmic_irq_data *irqd = chip->irq_data;
+
+ irqd->enable_hwirq[hwirq] = false;
+}
+
+static void pmic_irq_lock(struct irq_data *data)
+{
+ struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&chip->irqlock);
+}
+
+static void pmic_irq_sync_unlock(struct irq_data *data)
+{
+ unsigned int i, top_gp, gp_offset, en_reg, int_regs, shift;
+ struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pmic_irq_data *irqd = chip->irq_data;
+
+ for (i = 0; i < irqd->num_pmic_irqs; i++) {
+ if (irqd->enable_hwirq[i] == irqd->cache_hwirq[i])
+ continue;
+
+ /* Find out the IRQ group */
+ top_gp = 0;
+ while ((top_gp + 1) < irqd->num_top &&
+ i >= mt6358_ints[top_gp + 1].hwirq_base)
+ top_gp++;
+
+ /* Find the IRQ registers */
+ gp_offset = i - mt6358_ints[top_gp].hwirq_base;
+ int_regs = gp_offset / MT6358_REG_WIDTH;
+ shift = gp_offset % MT6358_REG_WIDTH;
+ en_reg = mt6358_ints[top_gp].en_reg +
+ (mt6358_ints[top_gp].en_reg_shift * int_regs);
+
+ regmap_update_bits(chip->regmap, en_reg, BIT(shift),
+ irqd->enable_hwirq[i] << shift);
+
+ irqd->cache_hwirq[i] = irqd->enable_hwirq[i];
+ }
+ mutex_unlock(&chip->irqlock);
+}
+
+static struct irq_chip mt6358_irq_chip = {
+ .name = "mt6358-irq",
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+ .irq_enable = pmic_irq_enable,
+ .irq_disable = pmic_irq_disable,
+ .irq_bus_lock = pmic_irq_lock,
+ .irq_bus_sync_unlock = pmic_irq_sync_unlock,
+};
+
+static void mt6358_irq_sp_handler(struct mt6397_chip *chip,
+ unsigned int top_gp)
+{
+ unsigned int irq_status, sta_reg, status;
+ unsigned int hwirq, virq;
+ int i, j, ret;
+
+ for (i = 0; i < mt6358_ints[top_gp].num_int_regs; i++) {
+ sta_reg = mt6358_ints[top_gp].sta_reg +
+ mt6358_ints[top_gp].sta_reg_shift * i;
+
+ ret = regmap_read(chip->regmap, sta_reg, &irq_status);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to read IRQ status, ret=%d\n", ret);
+ return;
+ }
+
+ if (!irq_status)
+ continue;
+
+ status = irq_status;
+ do {
+ j = __ffs(status);
+
+ hwirq = mt6358_ints[top_gp].hwirq_base +
+ MT6358_REG_WIDTH * i + j;
+
+ virq = irq_find_mapping(chip->irq_domain, hwirq);
+ if (virq)
+ handle_nested_irq(virq);
+
+ status &= ~BIT(j);
+ } while (status);
+
+ regmap_write(chip->regmap, sta_reg, irq_status);
+ }
+}
+
+static irqreturn_t mt6358_irq_handler(int irq, void *data)
+{
+ struct mt6397_chip *chip = data;
+ struct pmic_irq_data *mt6358_irq_data = chip->irq_data;
+ unsigned int bit, i, top_irq_status = 0;
+ int ret;
+
+ ret = regmap_read(chip->regmap,
+ mt6358_irq_data->top_int_status_reg,
+ &top_irq_status);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to read status from the device, ret=%d\n", ret);
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < mt6358_irq_data->num_top; i++) {
+ bit = BIT(mt6358_ints[i].top_offset);
+ if (top_irq_status & bit) {
+ mt6358_irq_sp_handler(chip, i);
+ top_irq_status &= ~bit;
+ if (!top_irq_status)
+ break;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int pmic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct mt6397_chip *mt6397 = d->host_data;
+
+ irq_set_chip_data(irq, mt6397);
+ irq_set_chip_and_handler(irq, &mt6358_irq_chip, handle_level_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mt6358_irq_domain_ops = {
+ .map = pmic_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+int mt6358_irq_init(struct mt6397_chip *chip)
+{
+ int i, j, ret;
+ struct pmic_irq_data *irqd;
+
+ irqd = devm_kzalloc(chip->dev, sizeof(*irqd), GFP_KERNEL);
+ if (!irqd)
+ return -ENOMEM;
+
+ chip->irq_data = irqd;
+
+ mutex_init(&chip->irqlock);
+ irqd->top_int_status_reg = MT6358_TOP_INT_STATUS0;
+ irqd->num_pmic_irqs = MT6358_IRQ_NR;
+ irqd->num_top = ARRAY_SIZE(mt6358_ints);
+
+ irqd->enable_hwirq = devm_kcalloc(chip->dev,
+ irqd->num_pmic_irqs,
+ sizeof(*irqd->enable_hwirq),
+ GFP_KERNEL);
+ if (!irqd->enable_hwirq)
+ return -ENOMEM;
+
+ irqd->cache_hwirq = devm_kcalloc(chip->dev,
+ irqd->num_pmic_irqs,
+ sizeof(*irqd->cache_hwirq),
+ GFP_KERNEL);
+ if (!irqd->cache_hwirq)
+ return -ENOMEM;
+
+ /* Disable all interrupts for initializing */
+ for (i = 0; i < irqd->num_top; i++) {
+ for (j = 0; j < mt6358_ints[i].num_int_regs; j++)
+ regmap_write(chip->regmap,
+ mt6358_ints[i].en_reg +
+ mt6358_ints[i].en_reg_shift * j, 0);
+ }
+
+ chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
+ irqd->num_pmic_irqs,
+ &mt6358_irq_domain_ops, chip);
+ if (!chip->irq_domain) {
+ dev_err(chip->dev, "Could not create IRQ domain\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL,
+ mt6358_irq_handler, IRQF_ONESHOT,
+ mt6358_irq_chip.name, chip);
+ if (ret) {
+ dev_err(chip->dev, "Failed to register IRQ=%d, ret=%d\n",
+ chip->irq, ret);
+ return ret;
+ }
+
+ enable_irq_wake(chip->irq);
+ return ret;
+}
diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
new file mode 100644
index 000000000000..db8cdf5272c1
--- /dev/null
+++ b/drivers/mfd/mt6360-core.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ *
+ * Author: Gene Chen <gene_chen@richtek.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/version.h>
+
+#include <linux/mfd/mt6360.h>
+
+/* reg 0 -> 0 ~ 7 */
+#define MT6360_CHG_TREG_EVT (4)
+#define MT6360_CHG_AICR_EVT (5)
+#define MT6360_CHG_MIVR_EVT (6)
+#define MT6360_PWR_RDY_EVT (7)
+/* REG 1 -> 8 ~ 15 */
+#define MT6360_CHG_BATSYSUV_EVT (9)
+#define MT6360_FLED_CHG_VINOVP_EVT (11)
+#define MT6360_CHG_VSYSUV_EVT (12)
+#define MT6360_CHG_VSYSOV_EVT (13)
+#define MT6360_CHG_VBATOV_EVT (14)
+#define MT6360_CHG_VBUSOV_EVT (15)
+/* REG 2 -> 16 ~ 23 */
+/* REG 3 -> 24 ~ 31 */
+#define MT6360_WD_PMU_DET (25)
+#define MT6360_WD_PMU_DONE (26)
+#define MT6360_CHG_TMRI (27)
+#define MT6360_CHG_ADPBADI (29)
+#define MT6360_CHG_RVPI (30)
+#define MT6360_OTPI (31)
+/* REG 4 -> 32 ~ 39 */
+#define MT6360_CHG_AICCMEASL (32)
+#define MT6360_CHGDET_DONEI (34)
+#define MT6360_WDTMRI (35)
+#define MT6360_SSFINISHI (36)
+#define MT6360_CHG_RECHGI (37)
+#define MT6360_CHG_TERMI (38)
+#define MT6360_CHG_IEOCI (39)
+/* REG 5 -> 40 ~ 47 */
+#define MT6360_PUMPX_DONEI (40)
+#define MT6360_BAT_OVP_ADC_EVT (41)
+#define MT6360_TYPEC_OTP_EVT (42)
+#define MT6360_ADC_WAKEUP_EVT (43)
+#define MT6360_ADC_DONEI (44)
+#define MT6360_BST_BATUVI (45)
+#define MT6360_BST_VBUSOVI (46)
+#define MT6360_BST_OLPI (47)
+/* REG 6 -> 48 ~ 55 */
+#define MT6360_ATTACH_I (48)
+#define MT6360_DETACH_I (49)
+#define MT6360_QC30_STPDONE (51)
+#define MT6360_QC_VBUSDET_DONE (52)
+#define MT6360_HVDCP_DET (53)
+#define MT6360_CHGDETI (54)
+#define MT6360_DCDTI (55)
+/* REG 7 -> 56 ~ 63 */
+#define MT6360_FOD_DONE_EVT (56)
+#define MT6360_FOD_OV_EVT (57)
+#define MT6360_CHRDET_UVP_EVT (58)
+#define MT6360_CHRDET_OVP_EVT (59)
+#define MT6360_CHRDET_EXT_EVT (60)
+#define MT6360_FOD_LR_EVT (61)
+#define MT6360_FOD_HR_EVT (62)
+#define MT6360_FOD_DISCHG_FAIL_EVT (63)
+/* REG 8 -> 64 ~ 71 */
+#define MT6360_USBID_EVT (64)
+#define MT6360_APWDTRST_EVT (65)
+#define MT6360_EN_EVT (66)
+#define MT6360_QONB_RST_EVT (67)
+#define MT6360_MRSTB_EVT (68)
+#define MT6360_OTP_EVT (69)
+#define MT6360_VDDAOV_EVT (70)
+#define MT6360_SYSUV_EVT (71)
+/* REG 9 -> 72 ~ 79 */
+#define MT6360_FLED_STRBPIN_EVT (72)
+#define MT6360_FLED_TORPIN_EVT (73)
+#define MT6360_FLED_TX_EVT (74)
+#define MT6360_FLED_LVF_EVT (75)
+#define MT6360_FLED2_SHORT_EVT (78)
+#define MT6360_FLED1_SHORT_EVT (79)
+/* REG 10 -> 80 ~ 87 */
+#define MT6360_FLED2_STRB_EVT (80)
+#define MT6360_FLED1_STRB_EVT (81)
+#define MT6360_FLED2_STRB_TO_EVT (82)
+#define MT6360_FLED1_STRB_TO_EVT (83)
+#define MT6360_FLED2_TOR_EVT (84)
+#define MT6360_FLED1_TOR_EVT (85)
+/* REG 11 -> 88 ~ 95 */
+/* REG 12 -> 96 ~ 103 */
+#define MT6360_BUCK1_PGB_EVT (96)
+#define MT6360_BUCK1_OC_EVT (100)
+#define MT6360_BUCK1_OV_EVT (101)
+#define MT6360_BUCK1_UV_EVT (102)
+/* REG 13 -> 104 ~ 111 */
+#define MT6360_BUCK2_PGB_EVT (104)
+#define MT6360_BUCK2_OC_EVT (108)
+#define MT6360_BUCK2_OV_EVT (109)
+#define MT6360_BUCK2_UV_EVT (110)
+/* REG 14 -> 112 ~ 119 */
+#define MT6360_LDO1_OC_EVT (113)
+#define MT6360_LDO2_OC_EVT (114)
+#define MT6360_LDO3_OC_EVT (115)
+#define MT6360_LDO5_OC_EVT (117)
+#define MT6360_LDO6_OC_EVT (118)
+#define MT6360_LDO7_OC_EVT (119)
+/* REG 15 -> 120 ~ 127 */
+#define MT6360_LDO1_PGB_EVT (121)
+#define MT6360_LDO2_PGB_EVT (122)
+#define MT6360_LDO3_PGB_EVT (123)
+#define MT6360_LDO5_PGB_EVT (125)
+#define MT6360_LDO6_PGB_EVT (126)
+#define MT6360_LDO7_PGB_EVT (127)
+
+static const struct regmap_irq mt6360_pmu_irqs[] = {
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_TREG_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_AICR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_MIVR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_PWR_RDY_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_BATSYSUV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_CHG_VINOVP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_VSYSUV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_VSYSOV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_VBATOV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_VBUSOV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_WD_PMU_DET, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_WD_PMU_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_TMRI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_ADPBADI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_RVPI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_OTPI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_AICCMEASL, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHGDET_DONEI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_WDTMRI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_SSFINISHI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_RECHGI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_TERMI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_IEOCI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_PUMPX_DONEI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BAT_OVP_ADC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_TYPEC_OTP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_ADC_WAKEUP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_ADC_DONEI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BST_BATUVI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BST_VBUSOVI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BST_OLPI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_ATTACH_I, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_DETACH_I, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_QC30_STPDONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_QC_VBUSDET_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_HVDCP_DET, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHGDETI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_DCDTI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_DONE_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_OV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHRDET_UVP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHRDET_OVP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHRDET_EXT_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_LR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_HR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_DISCHG_FAIL_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_USBID_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_APWDTRST_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_EN_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_QONB_RST_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_MRSTB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_OTP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_VDDAOV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_SYSUV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_STRBPIN_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_TORPIN_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_TX_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_LVF_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED2_SHORT_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED1_SHORT_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED2_STRB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED1_STRB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED2_STRB_TO_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED1_STRB_TO_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED2_TOR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED1_TOR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK1_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK1_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK1_OV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK1_UV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK2_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK2_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK2_OV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK2_UV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO1_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO2_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO3_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO5_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO6_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO7_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO1_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO2_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO3_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO5_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO6_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO7_PGB_EVT, 8),
+};
+
+static int mt6360_pmu_handle_post_irq(void *irq_drv_data)
+{
+ struct mt6360_pmu_data *mpd = irq_drv_data;
+
+ return regmap_update_bits(mpd->regmap,
+ MT6360_PMU_IRQ_SET, MT6360_IRQ_RETRIG, MT6360_IRQ_RETRIG);
+}
+
+static struct regmap_irq_chip mt6360_pmu_irq_chip = {
+ .irqs = mt6360_pmu_irqs,
+ .num_irqs = ARRAY_SIZE(mt6360_pmu_irqs),
+ .num_regs = MT6360_PMU_IRQ_REGNUM,
+ .mask_base = MT6360_PMU_CHG_MASK1,
+ .status_base = MT6360_PMU_CHG_IRQ1,
+ .ack_base = MT6360_PMU_CHG_IRQ1,
+ .init_ack_masked = true,
+ .use_ack = true,
+ .handle_post_irq = mt6360_pmu_handle_post_irq,
+};
+
+static const struct regmap_config mt6360_pmu_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MT6360_PMU_MAXREG,
+};
+
+static const struct resource mt6360_adc_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_ADC_DONEI, "adc_donei"),
+};
+
+static const struct resource mt6360_chg_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_TREG_EVT, "chg_treg_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_PWR_RDY_EVT, "pwr_rdy_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_BATSYSUV_EVT, "chg_batsysuv_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_VSYSUV_EVT, "chg_vsysuv_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_VSYSOV_EVT, "chg_vsysov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_VBATOV_EVT, "chg_vbatov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_VBUSOV_EVT, "chg_vbusov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_AICCMEASL, "chg_aiccmeasl"),
+ DEFINE_RES_IRQ_NAMED(MT6360_WDTMRI, "wdtmri"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_RECHGI, "chg_rechgi"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_TERMI, "chg_termi"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_IEOCI, "chg_ieoci"),
+ DEFINE_RES_IRQ_NAMED(MT6360_PUMPX_DONEI, "pumpx_donei"),
+ DEFINE_RES_IRQ_NAMED(MT6360_ATTACH_I, "attach_i"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHRDET_EXT_EVT, "chrdet_ext_evt"),
+};
+
+static const struct resource mt6360_led_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED_CHG_VINOVP_EVT, "fled_chg_vinovp_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED_LVF_EVT, "fled_lvf_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED2_SHORT_EVT, "fled2_short_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED1_SHORT_EVT, "fled1_short_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED2_STRB_TO_EVT, "fled2_strb_to_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED1_STRB_TO_EVT, "fled1_strb_to_evt"),
+};
+
+static const struct resource mt6360_pmic_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_PGB_EVT, "buck1_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_OC_EVT, "buck1_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_OV_EVT, "buck1_ov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_UV_EVT, "buck1_uv_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_PGB_EVT, "buck2_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_OC_EVT, "buck2_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_OV_EVT, "buck2_ov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_UV_EVT, "buck2_uv_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO6_OC_EVT, "ldo6_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO7_OC_EVT, "ldo7_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO6_PGB_EVT, "ldo6_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO7_PGB_EVT, "ldo7_pgb_evt"),
+};
+
+static const struct resource mt6360_ldo_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO1_OC_EVT, "ldo1_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO2_OC_EVT, "ldo2_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO3_OC_EVT, "ldo3_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO5_OC_EVT, "ldo5_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO1_PGB_EVT, "ldo1_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO2_PGB_EVT, "ldo2_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO3_PGB_EVT, "ldo3_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO5_PGB_EVT, "ldo5_pgb_evt"),
+};
+
+static const struct mfd_cell mt6360_devs[] = {
+ OF_MFD_CELL("mt6360_adc", mt6360_adc_resources,
+ NULL, 0, 0, "mediatek,mt6360_adc"),
+ OF_MFD_CELL("mt6360_chg", mt6360_chg_resources,
+ NULL, 0, 0, "mediatek,mt6360_chg"),
+ OF_MFD_CELL("mt6360_led", mt6360_led_resources,
+ NULL, 0, 0, "mediatek,mt6360_led"),
+ OF_MFD_CELL("mt6360_pmic", mt6360_pmic_resources,
+ NULL, 0, 0, "mediatek,mt6360_pmic"),
+ OF_MFD_CELL("mt6360_ldo", mt6360_ldo_resources,
+ NULL, 0, 0, "mediatek,mt6360_ldo"),
+ OF_MFD_CELL("mt6360_tcpc", NULL,
+ NULL, 0, 0, "mediatek,mt6360_tcpc"),
+};
+
+static const unsigned short mt6360_slave_addr[MT6360_SLAVE_MAX] = {
+ MT6360_PMU_SLAVEID,
+ MT6360_PMIC_SLAVEID,
+ MT6360_LDO_SLAVEID,
+ MT6360_TCPC_SLAVEID,
+};
+
+static int mt6360_pmu_probe(struct i2c_client *client)
+{
+ struct mt6360_pmu_data *mpd;
+ unsigned int reg_data;
+ int i, ret;
+
+ mpd = devm_kzalloc(&client->dev, sizeof(*mpd), GFP_KERNEL);
+ if (!mpd)
+ return -ENOMEM;
+
+ mpd->dev = &client->dev;
+ i2c_set_clientdata(client, mpd);
+
+ mpd->regmap = devm_regmap_init_i2c(client, &mt6360_pmu_regmap_config);
+ if (IS_ERR(mpd->regmap)) {
+ dev_err(&client->dev, "Failed to register regmap\n");
+ return PTR_ERR(mpd->regmap);
+ }
+
+ ret = regmap_read(mpd->regmap, MT6360_PMU_DEV_INFO, &reg_data);
+ if (ret) {
+ dev_err(&client->dev, "Device not found\n");
+ return ret;
+ }
+
+ mpd->chip_rev = reg_data & CHIP_REV_MASK;
+ if (mpd->chip_rev != CHIP_VEN_MT6360) {
+ dev_err(&client->dev, "Device not supported\n");
+ return -ENODEV;
+ }
+
+ mt6360_pmu_irq_chip.irq_drv_data = mpd;
+ ret = devm_regmap_add_irq_chip(&client->dev, mpd->regmap, client->irq,
+ IRQF_TRIGGER_FALLING, 0,
+ &mt6360_pmu_irq_chip, &mpd->irq_data);
+ if (ret) {
+ dev_err(&client->dev, "Failed to add Regmap IRQ Chip\n");
+ return ret;
+ }
+
+ mpd->i2c[0] = client;
+ for (i = 1; i < MT6360_SLAVE_MAX; i++) {
+ mpd->i2c[i] = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ mt6360_slave_addr[i]);
+ if (IS_ERR(mpd->i2c[i])) {
+ dev_err(&client->dev,
+ "Failed to get new dummy I2C device for address 0x%x",
+ mt6360_slave_addr[i]);
+ return PTR_ERR(mpd->i2c[i]);
+ }
+ i2c_set_clientdata(mpd->i2c[i], mpd);
+ }
+
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_AUTO,
+ mt6360_devs, ARRAY_SIZE(mt6360_devs), NULL,
+ 0, regmap_irq_get_domain(mpd->irq_data));
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to register subordinate devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mt6360_pmu_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(i2c->irq);
+
+ return 0;
+}
+
+static int __maybe_unused mt6360_pmu_resume(struct device *dev)
+{
+
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(i2c->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mt6360_pmu_pm_ops,
+ mt6360_pmu_suspend, mt6360_pmu_resume);
+
+static const struct of_device_id __maybe_unused mt6360_pmu_of_id[] = {
+ { .compatible = "mediatek,mt6360_pmu", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt6360_pmu_of_id);
+
+static struct i2c_driver mt6360_pmu_driver = {
+ .driver = {
+ .pm = &mt6360_pmu_pm_ops,
+ .of_match_table = of_match_ptr(mt6360_pmu_of_id),
+ },
+ .probe_new = mt6360_pmu_probe,
+};
+module_i2c_driver(mt6360_pmu_driver);
+
+MODULE_AUTHOR("Gene Chen <gene_chen@richtek.com>");
+MODULE_DESCRIPTION("MT6360 PMU I2C Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 0437c858d115..f6cd8a660602 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -12,13 +12,18 @@
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
+#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6397/registers.h>
#define MT6323_RTC_BASE 0x8000
#define MT6323_RTC_SIZE 0x40
+#define MT6358_RTC_BASE 0x0588
+#define MT6358_RTC_SIZE 0x3c
+
#define MT6397_RTC_BASE 0xe000
#define MT6397_RTC_SIZE 0x3e
@@ -30,6 +35,11 @@ static const struct resource mt6323_rtc_resources[] = {
DEFINE_RES_IRQ(MT6323_IRQ_STATUS_RTC),
};
+static const struct resource mt6358_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6358_RTC_BASE, MT6358_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6358_IRQ_RTC),
+};
+
static const struct resource mt6397_rtc_resources[] = {
DEFINE_RES_MEM(MT6397_RTC_BASE, MT6397_RTC_SIZE),
DEFINE_RES_IRQ(MT6397_IRQ_RTC),
@@ -74,6 +84,21 @@ static const struct mfd_cell mt6323_devs[] = {
},
};
+static const struct mfd_cell mt6358_devs[] = {
+ {
+ .name = "mt6358-regulator",
+ .of_compatible = "mediatek,mt6358-regulator"
+ }, {
+ .name = "mt6358-rtc",
+ .num_resources = ARRAY_SIZE(mt6358_rtc_resources),
+ .resources = mt6358_rtc_resources,
+ .of_compatible = "mediatek,mt6358-rtc",
+ }, {
+ .name = "mt6358-sound",
+ .of_compatible = "mediatek,mt6358-sound"
+ },
+};
+
static const struct mfd_cell mt6397_devs[] = {
{
.name = "mt6397-rtc",
@@ -100,54 +125,42 @@ static const struct mfd_cell mt6397_devs[] = {
}
};
-#ifdef CONFIG_PM_SLEEP
-static int mt6397_irq_suspend(struct device *dev)
-{
- struct mt6397_chip *chip = dev_get_drvdata(dev);
-
- regmap_write(chip->regmap, chip->int_con[0], chip->wake_mask[0]);
- regmap_write(chip->regmap, chip->int_con[1], chip->wake_mask[1]);
-
- enable_irq_wake(chip->irq);
-
- return 0;
-}
-
-static int mt6397_irq_resume(struct device *dev)
-{
- struct mt6397_chip *chip = dev_get_drvdata(dev);
-
- regmap_write(chip->regmap, chip->int_con[0], chip->irq_masks_cur[0]);
- regmap_write(chip->regmap, chip->int_con[1], chip->irq_masks_cur[1]);
-
- disable_irq_wake(chip->irq);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
- mt6397_irq_resume);
-
struct chip_data {
u32 cid_addr;
u32 cid_shift;
+ const struct mfd_cell *cells;
+ int cell_size;
+ int (*irq_init)(struct mt6397_chip *chip);
};
static const struct chip_data mt6323_core = {
.cid_addr = MT6323_CID,
.cid_shift = 0,
+ .cells = mt6323_devs,
+ .cell_size = ARRAY_SIZE(mt6323_devs),
+ .irq_init = mt6397_irq_init,
+};
+
+static const struct chip_data mt6358_core = {
+ .cid_addr = MT6358_SWCID,
+ .cid_shift = 8,
+ .cells = mt6358_devs,
+ .cell_size = ARRAY_SIZE(mt6358_devs),
+ .irq_init = mt6358_irq_init,
};
static const struct chip_data mt6397_core = {
.cid_addr = MT6397_CID,
.cid_shift = 0,
+ .cells = mt6397_devs,
+ .cell_size = ARRAY_SIZE(mt6397_devs),
+ .irq_init = mt6397_irq_init,
};
static int mt6397_probe(struct platform_device *pdev)
{
int ret;
- unsigned int id;
+ unsigned int id = 0;
struct mt6397_chip *pmic;
const struct chip_data *pmic_core;
@@ -183,29 +196,13 @@ static int mt6397_probe(struct platform_device *pdev)
if (pmic->irq <= 0)
return pmic->irq;
- ret = mt6397_irq_init(pmic);
+ ret = pmic_core->irq_init(pmic);
if (ret)
return ret;
- switch (pmic->chip_id) {
- case MT6323_CHIP_ID:
- ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
- mt6323_devs, ARRAY_SIZE(mt6323_devs),
- NULL, 0, pmic->irq_domain);
- break;
-
- case MT6391_CHIP_ID:
- case MT6397_CHIP_ID:
- ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
- mt6397_devs, ARRAY_SIZE(mt6397_devs),
- NULL, 0, pmic->irq_domain);
- break;
-
- default:
- dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id);
- return -ENODEV;
- }
-
+ ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ pmic_core->cells, pmic_core->cell_size,
+ NULL, 0, pmic->irq_domain);
if (ret) {
irq_domain_remove(pmic->irq_domain);
dev_err(&pdev->dev, "failed to add child devices: %d\n", ret);
@@ -219,6 +216,9 @@ static const struct of_device_id mt6397_of_match[] = {
.compatible = "mediatek,mt6323",
.data = &mt6323_core,
}, {
+ .compatible = "mediatek,mt6358",
+ .data = &mt6358_core,
+ }, {
.compatible = "mediatek,mt6397",
.data = &mt6397_core,
}, {
@@ -238,7 +238,6 @@ static struct platform_driver mt6397_driver = {
.driver = {
.name = "mt6397",
.of_match_table = of_match_ptr(mt6397_of_match),
- .pm = &mt6397_pm_ops,
},
.id_table = mt6397_id,
};
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index b2d3ce1f3115..2924919da991 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -9,6 +9,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/suspend.h>
#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6323/registers.h>
#include <linux/mfd/mt6397/core.h>
@@ -81,7 +82,7 @@ static struct irq_chip mt6397_irq_chip = {
static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
int irqbase)
{
- unsigned int status;
+ unsigned int status = 0;
int i, irq, ret;
ret = regmap_read(mt6397->regmap, reg, &status);
@@ -128,6 +129,36 @@ static const struct irq_domain_ops mt6397_irq_domain_ops = {
.map = mt6397_irq_domain_map,
};
+static int mt6397_irq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ struct mt6397_chip *chip =
+ container_of(notifier, struct mt6397_chip, pm_nb);
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ regmap_write(chip->regmap,
+ chip->int_con[0], chip->wake_mask[0]);
+ regmap_write(chip->regmap,
+ chip->int_con[1], chip->wake_mask[1]);
+ enable_irq_wake(chip->irq);
+ break;
+
+ case PM_POST_SUSPEND:
+ regmap_write(chip->regmap,
+ chip->int_con[0], chip->irq_masks_cur[0]);
+ regmap_write(chip->regmap,
+ chip->int_con[1], chip->irq_masks_cur[1]);
+ disable_irq_wake(chip->irq);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
int mt6397_irq_init(struct mt6397_chip *chip)
{
int ret;
@@ -159,6 +190,7 @@ int mt6397_irq_init(struct mt6397_chip *chip)
regmap_write(chip->regmap, chip->int_con[0], 0x0);
regmap_write(chip->regmap, chip->int_con[1], 0x0);
+ chip->pm_nb.notifier_call = mt6397_irq_pm_notifier;
chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
MT6397_IRQ_NR,
&mt6397_irq_domain_ops,
@@ -177,5 +209,6 @@ int mt6397_irq_init(struct mt6397_chip *chip)
return ret;
}
+ register_pm_notifier(&chip->pm_nb);
return 0;
}
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index e49787e6bb93..ccd62b963952 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1145,22 +1145,14 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
return -ENOMEM;
lookup->dev_id = "i2c-gpio";
- if (iic->pin_sda < 32)
- lookup->table[0].chip_label = "SM501-LOW";
- else
- lookup->table[0].chip_label = "SM501-HIGH";
- lookup->table[0].chip_hwnum = iic->pin_sda % 32;
- lookup->table[0].con_id = NULL;
- lookup->table[0].idx = 0;
- lookup->table[0].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN;
- if (iic->pin_scl < 32)
- lookup->table[1].chip_label = "SM501-LOW";
- else
- lookup->table[1].chip_label = "SM501-HIGH";
- lookup->table[1].chip_hwnum = iic->pin_scl % 32;
- lookup->table[1].con_id = NULL;
- lookup->table[1].idx = 1;
- lookup->table[1].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN;
+ lookup->table[0] = (struct gpiod_lookup)
+ GPIO_LOOKUP_IDX(iic->pin_sda < 32 ? "SM501-LOW" : "SM501-HIGH",
+ iic->pin_sda % 32, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN);
+ lookup->table[1] = (struct gpiod_lookup)
+ GPIO_LOOKUP_IDX(iic->pin_scl < 32 ? "SM501-LOW" : "SM501-HIGH",
+ iic->pin_scl % 32, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN);
gpiod_add_lookup_table(lookup);
icd = dev_get_platdata(&pdev->dev);
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index ebdf2f11ae28..33336cde4724 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -284,7 +284,6 @@ MODULE_DEVICE_TABLE(of, sprd_pmic_match);
static struct spi_driver sprd_pmic_driver = {
.driver = {
.name = "sc27xx-pmic",
- .bus = &spi_bus_type,
.of_match_table = sprd_pmic_match,
},
.probe = sprd_pmic_probe,
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index efcd4b980c94..add603359124 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -167,10 +167,11 @@ static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
regmap_write(ddata->regmap, TIM_ARR, 0x0);
}
-static void stm32_timers_dma_probe(struct device *dev,
+static int stm32_timers_dma_probe(struct device *dev,
struct stm32_timers *ddata)
{
int i;
+ int ret = 0;
char name[4];
init_completion(&ddata->dma.completion);
@@ -179,14 +180,23 @@ static void stm32_timers_dma_probe(struct device *dev,
/* Optional DMA support: get valid DMA channel(s) or NULL */
for (i = STM32_TIMERS_DMA_CH1; i <= STM32_TIMERS_DMA_CH4; i++) {
snprintf(name, ARRAY_SIZE(name), "ch%1d", i + 1);
- ddata->dma.chans[i] = dma_request_slave_channel(dev, name);
+ ddata->dma.chans[i] = dma_request_chan(dev, name);
}
- ddata->dma.chans[STM32_TIMERS_DMA_UP] =
- dma_request_slave_channel(dev, "up");
- ddata->dma.chans[STM32_TIMERS_DMA_TRIG] =
- dma_request_slave_channel(dev, "trig");
- ddata->dma.chans[STM32_TIMERS_DMA_COM] =
- dma_request_slave_channel(dev, "com");
+ ddata->dma.chans[STM32_TIMERS_DMA_UP] = dma_request_chan(dev, "up");
+ ddata->dma.chans[STM32_TIMERS_DMA_TRIG] = dma_request_chan(dev, "trig");
+ ddata->dma.chans[STM32_TIMERS_DMA_COM] = dma_request_chan(dev, "com");
+
+ for (i = STM32_TIMERS_DMA_CH1; i < STM32_TIMERS_MAX_DMAS; i++) {
+ if (IS_ERR(ddata->dma.chans[i])) {
+ /* Save the first error code to return */
+ if (PTR_ERR(ddata->dma.chans[i]) != -ENODEV && !ret)
+ ret = PTR_ERR(ddata->dma.chans[i]);
+
+ ddata->dma.chans[i] = NULL;
+ }
+ }
+
+ return ret;
}
static void stm32_timers_dma_remove(struct device *dev,
@@ -230,7 +240,11 @@ static int stm32_timers_probe(struct platform_device *pdev)
stm32_timers_get_arr_size(ddata);
- stm32_timers_dma_probe(dev, ddata);
+ ret = stm32_timers_dma_probe(dev, ddata);
+ if (ret) {
+ stm32_timers_dma_remove(dev, ddata);
+ return ret;
+ }
platform_set_drvdata(pdev, ddata);
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index 857991cb3cbb..711979afd90a 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -287,14 +287,21 @@ static int stmfx_irq_init(struct i2c_client *client)
ret = regmap_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, irqoutpin);
if (ret)
- return ret;
+ goto irq_exit;
ret = devm_request_threaded_irq(stmfx->dev, client->irq,
NULL, stmfx_irq_handler,
irqtrigger | IRQF_ONESHOT,
"stmfx", stmfx);
if (ret)
- stmfx_irq_exit(client);
+ goto irq_exit;
+
+ stmfx->irq = client->irq;
+
+ return 0;
+
+irq_exit:
+ stmfx_irq_exit(client);
return ret;
}
@@ -481,6 +488,8 @@ static int stmfx_suspend(struct device *dev)
if (ret)
return ret;
+ disable_irq(stmfx->irq);
+
if (stmfx->vdd)
return regulator_disable(stmfx->vdd);
@@ -501,6 +510,13 @@ static int stmfx_resume(struct device *dev)
}
}
+ /* Reset STMFX - supply has been stopped during suspend */
+ ret = stmfx_chip_reset(stmfx);
+ if (ret) {
+ dev_err(stmfx->dev, "Failed to reset chip: %d\n", ret);
+ return ret;
+ }
+
ret = regmap_raw_write(stmfx->map, STMFX_REG_SYS_CTRL,
&stmfx->bkp_sysctrl, sizeof(stmfx->bkp_sysctrl));
if (ret)
@@ -517,6 +533,8 @@ static int stmfx_resume(struct device *dev)
if (ret)
return ret;
+ enable_irq(stmfx->irq);
+
return 0;
}
#endif
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index 7dfbe8906cb8..eb3da558c3fb 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -59,7 +59,7 @@ static const struct regmap_access_table stpmic1_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(stpmic1_volatile_ranges),
};
-const struct regmap_config stpmic1_regmap_config = {
+static const struct regmap_config stpmic1_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
index b9f48e588d95..ddddf08b6a4c 100644
--- a/drivers/mfd/tqmx86.c
+++ b/drivers/mfd/tqmx86.c
@@ -274,7 +274,7 @@ static int __init tqmx86_init(void)
module_init(tqmx86_init);
-MODULE_DESCRIPTION("TQx86 PLD Core Driver");
+MODULE_DESCRIPTION("TQMx86 PLD Core Driver");
MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:tqmx86");
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index c68ff56dbdb1..aaf24af287dd 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -8,13 +8,12 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mfd/core.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_data/syscon.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/stat.h>
-#include <linux/vexpress.h>
#define SYS_ID 0x000
#define SYS_SW 0x004
@@ -37,35 +36,8 @@
#define SYS_CFGCTRL 0x0a4
#define SYS_CFGSTAT 0x0a8
-#define SYS_HBI_MASK 0xfff
-#define SYS_PROCIDx_HBI_SHIFT 0
-
-#define SYS_MISC_MASTERSITE (1 << 14)
-
-void vexpress_flags_set(u32 data)
-{
- static void __iomem *base;
-
- if (!base) {
- struct device_node *node = of_find_compatible_node(NULL, NULL,
- "arm,vexpress-sysreg");
-
- base = of_iomap(node, 0);
- }
-
- if (WARN_ON(!base))
- return;
-
- writel(~0, base + SYS_FLAGSCLR);
- writel(data, base + SYS_FLAGSSET);
-}
-
/* The sysreg block is just a random collection of various functions... */
-static struct syscon_platform_data vexpress_sysreg_sys_id_pdata = {
- .label = "sys_id",
-};
-
static struct bgpio_pdata vexpress_sysreg_sys_led_pdata = {
.label = "sys_led",
.base = -1,
@@ -84,24 +56,8 @@ static struct bgpio_pdata vexpress_sysreg_sys_flash_pdata = {
.ngpio = 1,
};
-static struct syscon_platform_data vexpress_sysreg_sys_misc_pdata = {
- .label = "sys_misc",
-};
-
-static struct syscon_platform_data vexpress_sysreg_sys_procid_pdata = {
- .label = "sys_procid",
-};
-
static struct mfd_cell vexpress_sysreg_cells[] = {
{
- .name = "syscon",
- .num_resources = 1,
- .resources = (struct resource []) {
- DEFINE_RES_MEM(SYS_ID, 0x4),
- },
- .platform_data = &vexpress_sysreg_sys_id_pdata,
- .pdata_size = sizeof(vexpress_sysreg_sys_id_pdata),
- }, {
.name = "basic-mmio-gpio",
.of_compatible = "arm,vexpress-sysreg,sys_led",
.num_resources = 1,
@@ -129,26 +85,10 @@ static struct mfd_cell vexpress_sysreg_cells[] = {
.platform_data = &vexpress_sysreg_sys_flash_pdata,
.pdata_size = sizeof(vexpress_sysreg_sys_flash_pdata),
}, {
- .name = "syscon",
- .num_resources = 1,
- .resources = (struct resource []) {
- DEFINE_RES_MEM(SYS_MISC, 0x4),
- },
- .platform_data = &vexpress_sysreg_sys_misc_pdata,
- .pdata_size = sizeof(vexpress_sysreg_sys_misc_pdata),
- }, {
- .name = "syscon",
- .num_resources = 1,
- .resources = (struct resource []) {
- DEFINE_RES_MEM(SYS_PROCID0, 0x8),
- },
- .platform_data = &vexpress_sysreg_sys_procid_pdata,
- .pdata_size = sizeof(vexpress_sysreg_sys_procid_pdata),
- }, {
.name = "vexpress-syscfg",
.num_resources = 1,
.resources = (struct resource []) {
- DEFINE_RES_MEM(SYS_CFGDATA, 0xc),
+ DEFINE_RES_MEM(SYS_MISC, 0x4c),
},
}
};
@@ -158,8 +98,6 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
struct resource *mem;
void __iomem *base;
struct gpio_chip *mmc_gpio_chip;
- int master;
- u32 dt_hbi;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
@@ -169,21 +107,6 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
if (!base)
return -ENOMEM;
- master = readl(base + SYS_MISC) & SYS_MISC_MASTERSITE ?
- VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
- vexpress_config_set_master(master);
-
- /* Confirm board type against DT property, if available */
- if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
- u32 id = readl(base + (master == VEXPRESS_SITE_DB1 ?
- SYS_PROCID0 : SYS_PROCID1));
- u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
-
- if (WARN_ON(dt_hbi != hbi))
- dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
- dt_hbi, hbi);
- }
-
/*
* Duplicated SYS_MCI pseudo-GPIO controller for compatibility with
* older trees using sysreg node for MMC control lines.
@@ -195,9 +118,9 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI,
NULL, NULL, NULL, NULL, 0);
mmc_gpio_chip->ngpio = 2;
- gpiochip_add_data(mmc_gpio_chip, NULL);
+ devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
- return mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
vexpress_sysreg_cells,
ARRAY_SIZE(vexpress_sysreg_cells), mem, 0, NULL);
}
@@ -206,6 +129,7 @@ static const struct of_device_id vexpress_sysreg_match[] = {
{ .compatible = "arm,vexpress-sysreg", },
{},
};
+MODULE_DEVICE_TABLE(of, vexpress_sysreg_match);
static struct platform_driver vexpress_sysreg_driver = {
.driver = {
@@ -215,14 +139,5 @@ static struct platform_driver vexpress_sysreg_driver = {
.probe = vexpress_sysreg_probe,
};
-static int __init vexpress_sysreg_init(void)
-{
- struct device_node *node;
-
- /* Need the sysreg early, before any other device... */
- for_each_matching_node(node, vexpress_sysreg_match)
- of_platform_device_create(node, NULL, NULL);
-
- return platform_driver_register(&vexpress_sysreg_driver);
-}
-core_initcall(vexpress_sysreg_init);
+module_platform_driver(vexpress_sysreg_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
index 90341f3c6810..da910302d51a 100644
--- a/drivers/mfd/wcd934x.c
+++ b/drivers/mfd/wcd934x.c
@@ -280,7 +280,6 @@ static void wcd934x_slim_remove(struct slim_device *sdev)
regulator_bulk_disable(WCD934X_MAX_SUPPLY, ddata->supplies);
mfd_remove_devices(&sdev->dev);
- kfree(ddata);
}
static const struct slim_device_id wcd934x_slim_id[] = {
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 1e9fe7d92597..3b2b93c5bbcb 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -393,7 +393,9 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
- dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wm8994->dev, "Failed to get supplies: %d\n",
+ ret);
goto err;
}
@@ -584,6 +586,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err_irq;
}
+ pm_runtime_set_active(wm8994->dev);
pm_runtime_enable(wm8994->dev);
pm_runtime_idle(wm8994->dev);
@@ -603,7 +606,9 @@ err:
static void wm8994_device_exit(struct wm8994 *wm8994)
{
+ pm_runtime_get_sync(wm8994->dev);
pm_runtime_disable(wm8994->dev);
+ pm_runtime_put_noidle(wm8994->dev);
wm8994_irq_exit(wm8994);
regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies);
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
@@ -690,3 +695,4 @@ module_i2c_driver(wm8994_i2c_driver);
MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_SOFTDEP("pre: wm8994_regulator");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 99e151475d8f..edd5dd5ebfdc 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -423,15 +423,6 @@ config SRAM
config SRAM_EXEC
bool
-config VEXPRESS_SYSCFG
- bool "Versatile Express System Configuration driver"
- depends on VEXPRESS_CONFIG
- default y
- help
- ARM Ltd. Versatile Express uses specialised platform configuration
- bus. System Configuration interface is one of the possible means
- of generating transactions on this bus.
-
config PCI_ENDPOINT_TEST
depends on PCI
select CRC32
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9abf2923d831..c7bd01ac6291 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,7 +49,6 @@ obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
-obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 1a81cda948c1..6c6c9e95a29f 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -347,31 +347,6 @@ static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return rtsx_pci_send_cmd(pcr, 100);
}
-static void rts5249_set_aspm(struct rtsx_pcr *pcr, bool enable)
-{
- struct rtsx_cr_option *option = &pcr->option;
- u8 val = 0;
-
- if (pcr->aspm_enabled == enable)
- return;
-
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- if (enable)
- val = pcr->aspm_en;
- rtsx_pci_update_cfg_byte(pcr,
- pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
-
- if (!enable)
- val = FORCE_ASPM_CTL0;
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- }
-
- pcr->aspm_enabled = enable;
-}
-
static const struct pcr_ops rts5249_pcr_ops = {
.fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.extra_init_hw = rts5249_extra_init_hw,
@@ -384,7 +359,6 @@ static const struct pcr_ops rts5249_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
- .set_aspm = rts5249_set_aspm,
};
/* SD Pull Control Enable:
@@ -471,7 +445,6 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
- option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
option->ltr_l1off_snooze_sspwrgate =
@@ -612,7 +585,6 @@ static const struct pcr_ops rts524a_pcr_ops = {
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
- .set_aspm = rts5249_set_aspm,
};
void rts524a_init_params(struct rtsx_pcr *pcr)
@@ -728,7 +700,6 @@ static const struct pcr_ops rts525a_pcr_ops = {
.switch_output_voltage = rts525a_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
- .set_aspm = rts5249_set_aspm,
};
void rts525a_init_params(struct rtsx_pcr *pcr)
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 711054ebad74..7a9dbb778e84 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -570,30 +570,6 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
return 0;
}
-static void rts5260_set_aspm(struct rtsx_pcr *pcr, bool enable)
-{
- struct rtsx_cr_option *option = &pcr->option;
- u8 val = 0;
-
- if (pcr->aspm_enabled == enable)
- return;
-
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- if (enable)
- val = pcr->aspm_en;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
-
- if (!enable)
- val = FORCE_ASPM_CTL0;
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- }
-
- pcr->aspm_enabled = enable;
-}
-
static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
{
struct rtsx_cr_option *option = &pcr->option;
@@ -639,7 +615,6 @@ static const struct pcr_ops rts5260_pcr_ops = {
.switch_output_voltage = rts5260_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
.stop_cmd = rts5260_stop_cmd,
- .set_aspm = rts5260_set_aspm,
.set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
.enable_ocp = rts5260_enable_ocp,
.disable_ocp = rts5260_disable_ocp,
@@ -683,7 +658,6 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
- option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
option->ltr_l1off_snooze_sspwrgate =
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 78c3b1d424c3..195822ec858e 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -518,51 +518,22 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
static void rts5261_enable_aspm(struct rtsx_pcr *pcr, bool enable)
{
- struct rtsx_cr_option *option = &pcr->option;
- u8 val = 0;
-
if (pcr->aspm_enabled == enable)
return;
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- val = pcr->aspm_en;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
-
- val = FORCE_ASPM_CTL0;
- val |= (pcr->aspm_en & 0x02);
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- val = pcr->aspm_en;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- }
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
pcr->aspm_enabled = enable;
}
static void rts5261_disable_aspm(struct rtsx_pcr *pcr, bool enable)
{
- struct rtsx_cr_option *option = &pcr->option;
- u8 val = 0;
-
if (pcr->aspm_enabled == enable)
return;
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- val = 0;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
-
- val = 0;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- val = FORCE_ASPM_CTL0;
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- }
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0);
rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
udelay(10);
pcr->aspm_enabled = enable;
@@ -639,8 +610,13 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
- clk_divider = SD_CLK_DIVIDE_128;
- card_clock = 30000000;
+ if (is_version(pcr, PID_5261, IC_VER_D)) {
+ clk_divider = SD_CLK_DIVIDE_256;
+ card_clock = 60000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ }
} else {
clk_divider = SD_CLK_DIVIDE_0;
}
@@ -784,7 +760,6 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = 0x7F;
option->ltr_l1off_snooze_sspwrgate = 0x78;
- option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
option->ocp_en = 1;
hw_param->interrupt_en |= SD_OC_INT_EN;
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 06038b325b02..0d5928bc1b6d 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -55,16 +55,10 @@ static const struct pci_device_id rtsx_pci_ids[] = {
MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
-static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
-{
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- 0xFC, pcr->aspm_en);
-}
-
static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
{
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- 0xFC, 0);
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0);
}
static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
@@ -85,32 +79,17 @@ static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
{
- if (pcr->ops->set_ltr_latency)
- return pcr->ops->set_ltr_latency(pcr, latency);
- else
- return rtsx_comm_set_ltr_latency(pcr, latency);
+ return rtsx_comm_set_ltr_latency(pcr, latency);
}
static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
{
- struct rtsx_cr_option *option = &pcr->option;
-
if (pcr->aspm_enabled == enable)
return;
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- if (enable)
- rtsx_pci_enable_aspm(pcr);
- else
- rtsx_pci_disable_aspm(pcr);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK;
- u8 val = 0;
-
- if (enable)
- val = pcr->aspm_en;
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- }
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ enable ? pcr->aspm_en : 0);
pcr->aspm_enabled = enable;
}
@@ -142,6 +121,9 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
rtsx_disable_aspm(pcr);
+ /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
+ msleep(1);
+
if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
@@ -151,10 +133,7 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
{
- if (pcr->ops->full_on)
- pcr->ops->full_on(pcr);
- else
- rtsx_comm_pm_full_on(pcr);
+ rtsx_comm_pm_full_on(pcr);
}
void rtsx_pci_start_run(struct rtsx_pcr *pcr)
@@ -1108,10 +1087,7 @@ static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
{
- if (pcr->ops->power_saving)
- pcr->ops->power_saving(pcr);
- else
- rtsx_comm_pm_power_saving(pcr);
+ rtsx_comm_pm_power_saving(pcr);
}
static void rtsx_pci_idle_work(struct work_struct *work)
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index ed391df52f4f..024cbd998b2a 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -29,7 +29,6 @@
#define LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF 0xAC
#define LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF 0xF8
#define CMD_TIMEOUT_DEF 100
-#define ASPM_MASK_NEG 0xFC
#define MASK_8_BIT_DEF 0xFF
#define SSC_CLOCK_STABLE_WAIT 130
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 39eec9031487..51aecafdcbdf 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -7,18 +7,10 @@ config CXL_BASE
bool
select PPC_COPRO_BASE
-config CXL_AFU_DRIVER_OPS
- bool
-
-config CXL_LIB
- bool
-
config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
- select CXL_AFU_DRIVER_OPS
- select CXL_LIB
default m
help
Select this option to enable driver support for IBM Coherent
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index e3e085e33d46..7939c55daceb 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -904,6 +904,7 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
struct fastrpc_channel_ctx *cctx;
struct fastrpc_user *fl = ctx->fl;
struct fastrpc_msg *msg = &ctx->msg;
+ int ret;
cctx = fl->cctx;
msg->pid = fl->tgid;
@@ -919,7 +920,13 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
fastrpc_context_get(ctx);
- return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
+ ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
+
+ if (ret)
+ fastrpc_context_put(ctx);
+
+ return ret;
+
}
static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
@@ -1613,8 +1620,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
domains[domain_id]);
data->miscdev.fops = &fastrpc_fops;
err = misc_register(&data->miscdev);
- if (err)
+ if (err) {
+ kfree(data);
return err;
+ }
kref_init(&data->refcount);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 2e1c4d2905e8..60460a053b2d 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -515,30 +515,6 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
}
/**
- * genwqe_free_user_pages() - Give pinned pages back
- *
- * Documentation of get_user_pages is in mm/gup.c:
- *
- * If the page is written to, set_page_dirty (or set_page_dirty_lock,
- * as appropriate) must be called after the page is finished with, and
- * before put_page is called.
- */
-static int genwqe_free_user_pages(struct page **page_list,
- unsigned int nr_pages, int dirty)
-{
- unsigned int i;
-
- for (i = 0; i < nr_pages; i++) {
- if (page_list[i] != NULL) {
- if (dirty)
- set_page_dirty_lock(page_list[i]);
- put_page(page_list[i]);
- }
- }
- return 0;
-}
-
-/**
* genwqe_user_vmap() - Map user-space memory to virtual kernel memory
* @cd: pointer to genwqe device
* @m: mapping params
@@ -597,18 +573,18 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
/* pin user pages in memory */
- rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
+ rc = pin_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
m->write ? FOLL_WRITE : 0, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
- goto fail_get_user_pages;
+ goto fail_pin_user_pages;
- /* assumption: get_user_pages can be killed by signals. */
+ /* assumption: pin_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
- genwqe_free_user_pages(m->page_list, rc, m->write);
+ unpin_user_pages_dirty_lock(m->page_list, rc, m->write);
rc = -EFAULT;
- goto fail_get_user_pages;
+ goto fail_pin_user_pages;
}
rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
@@ -618,9 +594,9 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
return 0;
fail_free_user_pages:
- genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
+ unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write);
- fail_get_user_pages:
+ fail_pin_user_pages:
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;
@@ -650,8 +626,8 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
- genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
-
+ unpin_user_pages_dirty_lock(m->page_list, m->nr_pages,
+ m->write);
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index 482f6227dbba..421ebd903069 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -13,3 +13,6 @@ habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
include $(src)/goya/Makefile
habanalabs-y += $(HL_GOYA_FILES)
+
+include $(src)/gaudi/Makefile
+habanalabs-y += $(HL_GAUDI_FILES)
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
index 53fddbd8e693..02d13f71b1df 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -105,10 +105,9 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
goto out_err;
}
- if (cb_size > HL_MAX_CB_SIZE) {
- dev_err(hdev->dev,
- "CB size %d must be less then %d\n",
- cb_size, HL_MAX_CB_SIZE);
+ if (cb_size > SZ_2M) {
+ dev_err(hdev->dev, "CB size %d must be less than %d\n",
+ cb_size, SZ_2M);
rc = -EINVAL;
goto out_err;
}
@@ -211,7 +210,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cb_args *args = data;
struct hl_device *hdev = hpriv->hdev;
- u64 handle;
+ u64 handle = 0;
int rc;
if (hl_device_disabled_or_in_reset(hdev)) {
@@ -223,15 +222,26 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
switch (args->in.op) {
case HL_CB_OP_CREATE:
- rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
- &handle, hpriv->ctx->asid);
+ if (args->in.cb_size > HL_MAX_CB_SIZE) {
+ dev_err(hdev->dev,
+ "User requested CB size %d must be less than %d\n",
+ args->in.cb_size, HL_MAX_CB_SIZE);
+ rc = -EINVAL;
+ } else {
+ rc = hl_cb_create(hdev, &hpriv->cb_mgr,
+ args->in.cb_size, &handle,
+ hpriv->ctx->asid);
+ }
+
memset(args, 0, sizeof(*args));
args->out.cb_handle = handle;
break;
+
case HL_CB_OP_DESTROY:
rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
args->in.cb_handle);
break;
+
default:
rc = -ENOTTY;
break;
@@ -278,7 +288,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
if (!cb) {
dev_err(hdev->dev,
- "CB mmap failed, no match to handle %d\n", handle);
+ "CB mmap failed, no match to handle 0x%x\n", handle);
return -EINVAL;
}
@@ -347,7 +357,7 @@ struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
if (!cb) {
spin_unlock(&mgr->cb_lock);
dev_warn(hdev->dev,
- "CB get failed, no match to handle %d\n", handle);
+ "CB get failed, no match to handle 0x%x\n", handle);
return NULL;
}
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 409276b6374d..f82974a916c3 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -11,11 +11,33 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
+#define HL_CS_FLAGS_SIG_WAIT (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT)
+
static void job_wq_completion(struct work_struct *work);
static long _hl_cs_wait_ioctl(struct hl_device *hdev,
struct hl_ctx *ctx, u64 timeout_us, u64 seq);
static void cs_do_release(struct kref *ref);
+static void hl_sob_reset(struct kref *ref)
+{
+ struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
+ kref);
+ struct hl_device *hdev = hw_sob->hdev;
+
+ hdev->asic_funcs->reset_sob(hdev, hw_sob);
+}
+
+void hl_sob_reset_error(struct kref *ref)
+{
+ struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
+ kref);
+ struct hl_device *hdev = hw_sob->hdev;
+
+ dev_crit(hdev->dev,
+ "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
+ hw_sob->q_idx, hw_sob->sob_id);
+}
+
static const char *hl_fence_get_driver_name(struct dma_fence *fence)
{
return "HabanaLabs";
@@ -23,10 +45,10 @@ static const char *hl_fence_get_driver_name(struct dma_fence *fence)
static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
{
- struct hl_dma_fence *hl_fence =
- container_of(fence, struct hl_dma_fence, base_fence);
+ struct hl_cs_compl *hl_cs_compl =
+ container_of(fence, struct hl_cs_compl, base_fence);
- return dev_name(hl_fence->hdev->dev);
+ return dev_name(hl_cs_compl->hdev->dev);
}
static bool hl_fence_enable_signaling(struct dma_fence *fence)
@@ -36,17 +58,47 @@ static bool hl_fence_enable_signaling(struct dma_fence *fence)
static void hl_fence_release(struct dma_fence *fence)
{
- struct hl_dma_fence *hl_fence =
- container_of(fence, struct hl_dma_fence, base_fence);
+ struct hl_cs_compl *hl_cs_cmpl =
+ container_of(fence, struct hl_cs_compl, base_fence);
+ struct hl_device *hdev = hl_cs_cmpl->hdev;
+
+ if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
+ (hl_cs_cmpl->type == CS_TYPE_WAIT)) {
- kfree_rcu(hl_fence, base_fence.rcu);
+ dev_dbg(hdev->dev,
+ "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
+ hl_cs_cmpl->cs_seq,
+ hl_cs_cmpl->type,
+ hl_cs_cmpl->hw_sob->sob_id,
+ hl_cs_cmpl->sob_val);
+
+ /*
+ * A signal CS can get completion while the corresponding wait
+ * for signal CS is on its way to the PQ. The wait for signal CS
+ * will get stuck if the signal CS incremented the SOB to its
+ * max value and there are no pending (submitted) waits on this
+ * SOB.
+ * We do the following to void this situation:
+ * 1. The wait for signal CS must get a ref for the signal CS as
+ * soon as possible in cs_ioctl_signal_wait() and put it
+ * before being submitted to the PQ but after it incremented
+ * the SOB refcnt in init_signal_wait_cs().
+ * 2. Signal/Wait for signal CS will decrement the SOB refcnt
+ * here.
+ * These two measures guarantee that the wait for signal CS will
+ * reset the SOB upon completion rather than the signal CS and
+ * hence the above scenario is avoided.
+ */
+ kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
+ }
+
+ kfree_rcu(hl_cs_cmpl, base_fence.rcu);
}
static const struct dma_fence_ops hl_fence_ops = {
.get_driver_name = hl_fence_get_driver_name,
.get_timeline_name = hl_fence_get_timeline_name,
.enable_signaling = hl_fence_enable_signaling,
- .wait = dma_fence_default_wait,
.release = hl_fence_release
};
@@ -113,6 +165,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
if (!rc) {
job->patched_cb = parser.patched_cb;
job->job_cb_size = parser.patched_cb_size;
+ job->contains_dma_pkt = parser.contains_dma_pkt;
spin_lock(&job->patched_cb->lock);
job->patched_cb->cs_cnt++;
@@ -259,6 +312,12 @@ static void cs_do_release(struct kref *ref)
spin_unlock(&hdev->hw_queues_mirror_lock);
}
+ } else if (cs->type == CS_TYPE_WAIT) {
+ /*
+ * In case the wait for signal CS was submitted, the put occurs
+ * in init_signal_wait_cs() right before hanging on the PQ.
+ */
+ dma_fence_put(cs->signal_fence);
}
/*
@@ -312,9 +371,9 @@ static void cs_timedout(struct work_struct *work)
}
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
- struct hl_cs **cs_new)
+ enum hl_cs_type cs_type, struct hl_cs **cs_new)
{
- struct hl_dma_fence *fence;
+ struct hl_cs_compl *cs_cmpl;
struct dma_fence *other = NULL;
struct hl_cs *cs;
int rc;
@@ -326,25 +385,27 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->ctx = ctx;
cs->submitted = false;
cs->completed = false;
+ cs->type = cs_type;
INIT_LIST_HEAD(&cs->job_list);
INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
kref_init(&cs->refcount);
spin_lock_init(&cs->job_lock);
- fence = kmalloc(sizeof(*fence), GFP_ATOMIC);
- if (!fence) {
+ cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
+ if (!cs_cmpl) {
rc = -ENOMEM;
goto free_cs;
}
- fence->hdev = hdev;
- spin_lock_init(&fence->lock);
- cs->fence = &fence->base_fence;
+ cs_cmpl->hdev = hdev;
+ cs_cmpl->type = cs->type;
+ spin_lock_init(&cs_cmpl->lock);
+ cs->fence = &cs_cmpl->base_fence;
spin_lock(&ctx->cs_lock);
- fence->cs_seq = ctx->cs_sequence;
- other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
+ cs_cmpl->cs_seq = ctx->cs_sequence;
+ other = ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)];
if ((other) && (!dma_fence_is_signaled(other))) {
spin_unlock(&ctx->cs_lock);
dev_dbg(hdev->dev,
@@ -353,16 +414,16 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
goto free_fence;
}
- dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock,
+ dma_fence_init(&cs_cmpl->base_fence, &hl_fence_ops, &cs_cmpl->lock,
ctx->asid, ctx->cs_sequence);
- cs->sequence = fence->cs_seq;
+ cs->sequence = cs_cmpl->cs_seq;
- ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] =
- &fence->base_fence;
+ ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)] =
+ &cs_cmpl->base_fence;
ctx->cs_sequence++;
- dma_fence_get(&fence->base_fence);
+ dma_fence_get(&cs_cmpl->base_fence);
dma_fence_put(other);
@@ -373,7 +434,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
return 0;
free_fence:
- kfree(fence);
+ kfree(cs_cmpl);
free_cs:
kfree(cs);
return rc;
@@ -499,8 +560,8 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
return job;
}
-static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
- u32 num_chunks, u64 *cs_seq)
+static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
+ u32 num_chunks, u64 *cs_seq)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
@@ -538,7 +599,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/* increment refcnt for context */
hl_ctx_get(hdev, hpriv->ctx);
- rc = allocate_cs(hdev, hpriv->ctx, &cs);
+ rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs);
if (rc) {
hl_ctx_put(hpriv->ctx);
goto free_cs_chunk_array;
@@ -652,13 +713,230 @@ out:
return rc;
}
+static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
+ void __user *chunks, u32 num_chunks,
+ u64 *cs_seq)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_ctx *ctx = hpriv->ctx;
+ struct hl_cs_chunk *cs_chunk_array, *chunk;
+ struct hw_queue_properties *hw_queue_prop;
+ struct dma_fence *sig_fence = NULL;
+ struct hl_cs_job *job;
+ struct hl_cs *cs;
+ struct hl_cb *cb;
+ u64 *signal_seq_arr = NULL, signal_seq;
+ u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size;
+ int rc;
+
+ *cs_seq = ULLONG_MAX;
+
+ if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ dev_err(hdev->dev,
+ "Number of chunks can NOT be larger than %d\n",
+ HL_MAX_JOBS_PER_CS);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
+ GFP_ATOMIC);
+ if (!cs_chunk_array) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
+ if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
+ dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
+ rc = -EFAULT;
+ goto free_cs_chunk_array;
+ }
+
+ /* currently it is guaranteed to have only one chunk */
+ chunk = &cs_chunk_array[0];
+ q_idx = chunk->queue_index;
+ hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
+
+ if ((q_idx >= HL_MAX_QUEUES) ||
+ (hw_queue_prop->type != QUEUE_TYPE_EXT)) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
+ if (cs_type == CS_TYPE_WAIT) {
+ struct hl_cs_compl *sig_waitcs_cmpl;
+
+ signal_seq_arr_len = chunk->num_signal_seq_arr;
+
+ /* currently only one signal seq is supported */
+ if (signal_seq_arr_len != 1) {
+ dev_err(hdev->dev,
+ "Wait for signal CS supports only one signal CS seq\n");
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
+ signal_seq_arr = kmalloc_array(signal_seq_arr_len,
+ sizeof(*signal_seq_arr),
+ GFP_ATOMIC);
+ if (!signal_seq_arr) {
+ rc = -ENOMEM;
+ goto free_cs_chunk_array;
+ }
+
+ size_to_copy = chunk->num_signal_seq_arr *
+ sizeof(*signal_seq_arr);
+ if (copy_from_user(signal_seq_arr,
+ u64_to_user_ptr(chunk->signal_seq_arr),
+ size_to_copy)) {
+ dev_err(hdev->dev,
+ "Failed to copy signal seq array from user\n");
+ rc = -EFAULT;
+ goto free_signal_seq_array;
+ }
+
+ /* currently it is guaranteed to have only one signal seq */
+ signal_seq = signal_seq_arr[0];
+ sig_fence = hl_ctx_get_fence(ctx, signal_seq);
+ if (IS_ERR(sig_fence)) {
+ dev_err(hdev->dev,
+ "Failed to get signal CS with seq 0x%llx\n",
+ signal_seq);
+ rc = PTR_ERR(sig_fence);
+ goto free_signal_seq_array;
+ }
+
+ if (!sig_fence) {
+ /* signal CS already finished */
+ rc = 0;
+ goto free_signal_seq_array;
+ }
+
+ sig_waitcs_cmpl =
+ container_of(sig_fence, struct hl_cs_compl, base_fence);
+
+ if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ dev_err(hdev->dev,
+ "CS seq 0x%llx is not of a signal CS\n",
+ signal_seq);
+ dma_fence_put(sig_fence);
+ rc = -EINVAL;
+ goto free_signal_seq_array;
+ }
+
+ if (dma_fence_is_signaled(sig_fence)) {
+ /* signal CS already finished */
+ dma_fence_put(sig_fence);
+ rc = 0;
+ goto free_signal_seq_array;
+ }
+ }
+
+ /* increment refcnt for context */
+ hl_ctx_get(hdev, ctx);
+
+ rc = allocate_cs(hdev, ctx, cs_type, &cs);
+ if (rc) {
+ if (cs_type == CS_TYPE_WAIT)
+ dma_fence_put(sig_fence);
+ hl_ctx_put(ctx);
+ goto free_signal_seq_array;
+ }
+
+ /*
+ * Save the signal CS fence for later initialization right before
+ * hanging the wait CS on the queue.
+ */
+ if (cs->type == CS_TYPE_WAIT)
+ cs->signal_fence = sig_fence;
+
+ hl_debugfs_add_cs(cs);
+
+ *cs_seq = cs->sequence;
+
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto put_cs;
+ }
+
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (!cb) {
+ kfree(job);
+ rc = -EFAULT;
+ goto put_cs;
+ }
+
+ if (cs->type == CS_TYPE_WAIT)
+ cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
+ else
+ cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
+
+ job->id = 0;
+ job->cs = cs;
+ job->user_cb = cb;
+ job->user_cb->cs_cnt++;
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = q_idx;
+
+ /*
+ * No need in parsing, user CB is the patched CB.
+ * We call hl_cb_destroy() out of two reasons - we don't need the CB in
+ * the CB idr anymore and to decrement its refcount as it was
+ * incremented inside hl_cb_kernel_create().
+ */
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = job->user_cb_size;
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+
+ list_add_tail(&job->cs_node, &cs->job_list);
+
+ /* increment refcount as for external queues we get completion */
+ cs_get(cs);
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = hl_hw_queue_schedule_cs(cs);
+ if (rc) {
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu to H/W queues, error %d\n",
+ ctx->asid, cs->sequence, rc);
+ goto free_cs_object;
+ }
+
+ rc = HL_CS_STATUS_SUCCESS;
+ goto put_cs;
+
+free_cs_object:
+ cs_rollback(hdev, cs);
+ *cs_seq = ULLONG_MAX;
+ /* The path below is both for good and erroneous exits */
+put_cs:
+ /* We finished with the CS in this function, so put the ref */
+ cs_put(cs);
+free_signal_seq_array:
+ if (cs_type == CS_TYPE_WAIT)
+ kfree(signal_seq_arr);
+free_cs_chunk_array:
+ kfree(cs_chunk_array);
+out:
+ return rc;
+}
+
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct hl_device *hdev = hpriv->hdev;
union hl_cs_args *args = data;
struct hl_ctx *ctx = hpriv->ctx;
void __user *chunks_execute, *chunks_restore;
- u32 num_chunks_execute, num_chunks_restore;
+ enum hl_cs_type cs_type;
+ u32 num_chunks_execute, num_chunks_restore, sig_wait_flags;
u64 cs_seq = ULONG_MAX;
int rc, do_ctx_switch;
bool need_soft_reset = false;
@@ -671,12 +949,44 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
goto out;
}
+ sig_wait_flags = args->in.cs_flags & HL_CS_FLAGS_SIG_WAIT;
+
+ if (unlikely(sig_wait_flags == HL_CS_FLAGS_SIG_WAIT)) {
+ dev_err(hdev->dev,
+ "Signal and wait CS flags are mutually exclusive, context %d\n",
+ ctx->asid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely((sig_wait_flags & HL_CS_FLAGS_SIG_WAIT) &&
+ (!hdev->supports_sync_stream))) {
+ dev_err(hdev->dev, "Sync stream CS is not supported\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (args->in.cs_flags & HL_CS_FLAGS_SIGNAL)
+ cs_type = CS_TYPE_SIGNAL;
+ else if (args->in.cs_flags & HL_CS_FLAGS_WAIT)
+ cs_type = CS_TYPE_WAIT;
+ else
+ cs_type = CS_TYPE_DEFAULT;
+
chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute;
num_chunks_execute = args->in.num_chunks_execute;
- if (!num_chunks_execute) {
+ if (cs_type == CS_TYPE_DEFAULT) {
+ if (!num_chunks_execute) {
+ dev_err(hdev->dev,
+ "Got execute CS with 0 chunks, context %d\n",
+ ctx->asid);
+ rc = -EINVAL;
+ goto out;
+ }
+ } else if (num_chunks_execute != 1) {
dev_err(hdev->dev,
- "Got execute CS with 0 chunks, context %d\n",
+ "Sync stream CS mandates one chunk only, context %d\n",
ctx->asid);
rc = -EINVAL;
goto out;
@@ -722,7 +1032,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
"Need to run restore phase but restore CS is empty\n");
rc = 0;
} else {
- rc = _hl_cs_ioctl(hpriv, chunks_restore,
+ rc = cs_ioctl_default(hpriv, chunks_restore,
num_chunks_restore, &cs_seq);
}
@@ -764,7 +1074,12 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
}
}
- rc = _hl_cs_ioctl(hpriv, chunks_execute, num_chunks_execute, &cs_seq);
+ if (cs_type == CS_TYPE_DEFAULT)
+ rc = cs_ioctl_default(hpriv, chunks_execute, num_chunks_execute,
+ &cs_seq);
+ else
+ rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks_execute,
+ num_chunks_execute, &cs_seq);
out:
if (rc != -EAGAIN) {
@@ -796,6 +1111,10 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
fence = hl_ctx_get_fence(ctx, seq);
if (IS_ERR(fence)) {
rc = PTR_ERR(fence);
+ if (rc == -EINVAL)
+ dev_notice_ratelimited(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu\n",
+ seq, ctx->cs_sequence);
} else if (fence) {
rc = dma_fence_wait_timeout(fence, true, timeout);
if (fence->error == -ETIMEDOUT)
@@ -803,8 +1122,12 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
else if (fence->error == -EIO)
rc = -EIO;
dma_fence_put(fence);
- } else
+ } else {
+ dev_dbg(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
+ seq, ctx->cs_sequence);
rc = 1;
+ }
hl_ctx_put(ctx);
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 2df6fb87e7ff..ec92b3506b1f 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -170,24 +170,16 @@ int hl_ctx_put(struct hl_ctx *ctx)
struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
{
- struct hl_device *hdev = ctx->hdev;
struct dma_fence *fence;
spin_lock(&ctx->cs_lock);
if (seq >= ctx->cs_sequence) {
- dev_notice_ratelimited(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu\n",
- seq, ctx->cs_sequence);
spin_unlock(&ctx->cs_lock);
return ERR_PTR(-EINVAL);
}
-
if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
- dev_dbg(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
- seq, ctx->cs_sequence);
spin_unlock(&ctx->cs_lock);
return NULL;
}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 756d36ed5d95..3c8dcdfba20c 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -970,6 +970,98 @@ static ssize_t hl_device_write(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf, "%d\n", hdev->clock_gating);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't change clock gating during reset\n");
+ return 0;
+ }
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ if (value) {
+ hdev->clock_gating = 1;
+ if (hdev->asic_funcs->enable_clock_gating)
+ hdev->asic_funcs->enable_clock_gating(hdev);
+ } else {
+ if (hdev->asic_funcs->disable_clock_gating)
+ hdev->asic_funcs->disable_clock_gating(hdev);
+ hdev->clock_gating = 0;
+ }
+
+ return count;
+}
+
+static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't change stop on error during reset\n");
+ return 0;
+ }
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ hdev->stop_on_err = value ? 1 : 0;
+
+ hl_device_reset(hdev, false, false);
+
+ return count;
+}
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -1015,6 +1107,18 @@ static const struct file_operations hl_device_fops = {
.write = hl_device_write
};
+static const struct file_operations hl_clk_gate_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_clk_gate_read,
+ .write = hl_clk_gate_write
+};
+
+static const struct file_operations hl_stop_on_err_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_stop_on_err_read,
+ .write = hl_stop_on_err_write
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
@@ -1152,6 +1256,18 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_device_fops);
+ debugfs_create_file("clk_gate",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_clk_gate_fops);
+
+ debugfs_create_file("stop_on_err",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_stop_on_err_fops);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
ent = debugfs_create_file(hl_debugfs_list[i].name,
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index aef4de36b7aa..2b38a119704c 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -256,6 +256,10 @@ static int device_early_init(struct hl_device *hdev)
goya_set_asic_funcs(hdev);
strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
break;
+ case ASIC_GAUDI:
+ gaudi_set_asic_funcs(hdev);
+ sprintf(hdev->asic_name, "GAUDI");
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -603,6 +607,9 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
hdev->in_debug = 0;
+ if (!hdev->hard_reset_pending)
+ hdev->asic_funcs->enable_clock_gating(hdev);
+
goto out;
}
@@ -613,6 +620,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
goto out;
}
+ hdev->asic_funcs->disable_clock_gating(hdev);
hdev->in_debug = 1;
out:
@@ -718,7 +726,7 @@ disable_device:
return rc;
}
-static void device_kill_open_processes(struct hl_device *hdev)
+static int device_kill_open_processes(struct hl_device *hdev)
{
u16 pending_total, pending_cnt;
struct hl_fpriv *hpriv;
@@ -771,9 +779,7 @@ static void device_kill_open_processes(struct hl_device *hdev)
ssleep(1);
}
- if (!list_empty(&hdev->fpriv_list))
- dev_crit(hdev->dev,
- "Going to hard reset with open user contexts\n");
+ return list_empty(&hdev->fpriv_list) ? 0 : -EBUSY;
}
static void device_hard_reset_pending(struct work_struct *work)
@@ -793,6 +799,7 @@ static void device_hard_reset_pending(struct work_struct *work)
* @hdev: pointer to habanalabs device structure
* @hard_reset: should we do hard reset to all engines or just reset the
* compute/dma engines
+ * @from_hard_reset_thread: is the caller the hard-reset thread
*
* Block future CS and wait for pending CS to be enqueued
* Call ASIC H/W fini
@@ -815,6 +822,11 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
return 0;
}
+ if ((!hard_reset) && (!hdev->supports_soft_reset)) {
+ dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
+ hard_reset = true;
+ }
+
/*
* Prevent concurrency in this function - only one reset should be
* done at any given time. Only need to perform this if we didn't
@@ -894,7 +906,12 @@ again:
* process can't really exit until all its CSs are done, which
* is what we do in cs rollback
*/
- device_kill_open_processes(hdev);
+ rc = device_kill_open_processes(hdev);
+ if (rc) {
+ dev_crit(hdev->dev,
+ "Failed to kill all open processes, stopping hard reset\n");
+ goto out_err;
+ }
/* Flush the Event queue workers to make sure no other thread is
* reading or writing to registers during the reset
@@ -1062,7 +1079,7 @@ out_err:
*/
int hl_device_init(struct hl_device *hdev, struct class *hclass)
{
- int i, rc, cq_ready_cnt;
+ int i, rc, cq_cnt, cq_ready_cnt;
char *name;
bool add_cdev_sysfs_on_err = false;
@@ -1120,14 +1137,16 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto sw_fini;
}
+ cq_cnt = hdev->asic_prop.completion_queues_count;
+
/*
* Initialize the completion queues. Must be done before hw_init,
* because there the addresses of the completion queues are being
* passed as arguments to request_irq
*/
- hdev->completion_queue =
- kcalloc(hdev->asic_prop.completion_queues_count,
- sizeof(*hdev->completion_queue), GFP_KERNEL);
+ hdev->completion_queue = kcalloc(cq_cnt,
+ sizeof(*hdev->completion_queue),
+ GFP_KERNEL);
if (!hdev->completion_queue) {
dev_err(hdev->dev, "failed to allocate completion queues\n");
@@ -1135,10 +1154,9 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto hw_queues_destroy;
}
- for (i = 0, cq_ready_cnt = 0;
- i < hdev->asic_prop.completion_queues_count;
- i++, cq_ready_cnt++) {
- rc = hl_cq_init(hdev, &hdev->completion_queue[i], i);
+ for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
+ rc = hl_cq_init(hdev, &hdev->completion_queue[i],
+ hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
if (rc) {
dev_err(hdev->dev,
"failed to initialize completion queue\n");
@@ -1325,11 +1343,12 @@ void hl_device_fini(struct hl_device *hdev)
* This function is competing with the reset function, so try to
* take the reset atomic and if we are already in middle of reset,
* wait until reset function is finished. Reset function is designed
- * to always finish (could take up to a few seconds in worst case).
+ * to always finish. However, in Gaudi, because of all the network
+ * ports, the hard reset could take between 10-30 seconds
*/
timeout = ktime_add_us(ktime_get(),
- HL_PENDING_RESET_PER_SEC * 1000 * 1000 * 4);
+ HL_HARD_RESET_MAX_TIMEOUT * 1000 * 1000);
rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
while (rc) {
usleep_range(50, 200);
@@ -1375,7 +1394,9 @@ void hl_device_fini(struct hl_device *hdev)
* can't really exit until all its CSs are done, which is what we
* do in cs rollback
*/
- device_kill_open_processes(hdev);
+ rc = device_kill_open_processes(hdev);
+ if (rc)
+ dev_crit(hdev->dev, "Failed to kill all open processes\n");
hl_cb_pool_fini(hdev);
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index f5bd03171dac..baf790cf4b78 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -6,20 +6,22 @@
*/
#include "habanalabs.h"
+#include "include/hl_boot_if.h"
#include <linux/firmware.h>
#include <linux/genalloc.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/slab.h>
/**
- * hl_fw_push_fw_to_device() - Push FW code to device.
+ * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
* @hdev: pointer to hl_device structure.
*
* Copy fw code from firmware file to device memory.
*
* Return: 0 on success, non-zero for failure.
*/
-int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst)
{
const struct firmware *fw;
@@ -129,6 +131,68 @@ out:
return rc;
}
+int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(event_type);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
+
+ return rc;
+}
+
+int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
+ size_t irq_arr_size)
+{
+ struct armcp_unmask_irq_arr_packet *pkt;
+ size_t total_pkt_size;
+ long result;
+ int rc;
+
+ total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
+ irq_arr_size;
+
+ /* data should be aligned to 8 bytes in order to ArmCP to copy it */
+ total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
+
+ /* total_pkt_size is casted to u16 later on */
+ if (total_pkt_size > USHRT_MAX) {
+ dev_err(hdev->dev, "too many elements in IRQ array\n");
+ return -EINVAL;
+ }
+
+ pkt = kzalloc(total_pkt_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
+ memcpy(&pkt->irqs, irq_arr, irq_arr_size);
+
+ pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
+ total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to unmask IRQ array\n");
+
+ kfree(pkt);
+
+ return rc;
+}
+
int hl_fw_test_cpu_queue(struct hl_device *hdev)
{
struct armcp_packet test_pkt = {};
@@ -286,3 +350,232 @@ out:
return rc;
}
+
+static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
+{
+ u32 err_val;
+
+ /* Some of the firmware status codes are deprecated in newer f/w
+ * versions. In those versions, the errors are reported
+ * in different registers. Therefore, we need to check those
+ * registers and print the exact errors. Moreover, there
+ * may be multiple errors, so we need to report on each error
+ * separately. Some of the error codes might indicate a state
+ * that is not an error per-se, but it is an error in production
+ * environment
+ */
+ err_val = RREG32(boot_err0_reg);
+ if (!(err_val & CPU_BOOT_ERR0_ENABLED))
+ return;
+
+ if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - DRAM initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
+ dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
+ if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - Thermal Sensor initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
+ dev_warn(hdev->dev,
+ "Device boot warning - Skipped DRAM initialization\n");
+ if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED)
+ dev_warn(hdev->dev,
+ "Device boot error - Skipped waiting for BMC\n");
+ if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
+ dev_err(hdev->dev,
+ "Device boot error - Serdes data from BMC not available\n");
+ if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - NIC F/W initialization failed\n");
+}
+
+int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
+ u32 boot_err0_reg, bool skip_bmc,
+ u32 cpu_timeout, u32 boot_fit_timeout)
+{
+ u32 status;
+ int rc;
+
+ dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
+ cpu_timeout / USEC_PER_SEC);
+
+ /* Wait for boot FIT request */
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_boot_status_reg,
+ status,
+ status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
+ 10000,
+ boot_fit_timeout);
+
+ if (rc) {
+ dev_dbg(hdev->dev,
+ "No boot fit request received, resuming boot\n");
+ } else {
+ rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
+ if (rc)
+ goto out;
+
+ /* Clear device CPU message status */
+ WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
+
+ /* Signal device CPU that boot loader is ready */
+ WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
+
+ /* Poll for CPU device ack */
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_msg_status_reg,
+ status,
+ status == CPU_MSG_OK,
+ 10000,
+ boot_fit_timeout);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout waiting for boot fit load ack\n");
+ goto out;
+ }
+
+ /* Clear message */
+ WREG32(msg_to_cpu_reg, KMD_MSG_NA);
+ }
+
+ /* Make sure CPU boot-loader is running */
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_boot_status_reg,
+ status,
+ (status == CPU_BOOT_STATUS_DRAM_RDY) ||
+ (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
+ (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ 10000,
+ cpu_timeout);
+
+ /* Read U-Boot, preboot versions now in case we will later fail */
+ hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
+ hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT);
+
+ /* Some of the status codes below are deprecated in newer f/w
+ * versions but we keep them here for backward compatibility
+ */
+ if (rc) {
+ switch (status) {
+ case CPU_BOOT_STATUS_NA:
+ dev_err(hdev->dev,
+ "Device boot error - BTL did NOT run\n");
+ break;
+ case CPU_BOOT_STATUS_IN_WFE:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck inside WFE loop\n");
+ break;
+ case CPU_BOOT_STATUS_IN_BTL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in BTL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_PREBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in Preboot\n");
+ break;
+ case CPU_BOOT_STATUS_IN_SPL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in SPL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_UBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in u-boot\n");
+ break;
+ case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - DRAM initialization failed\n");
+ break;
+ case CPU_BOOT_STATUS_UBOOT_NOT_READY:
+ dev_err(hdev->dev,
+ "Device boot error - u-boot stopped by user\n");
+ break;
+ case CPU_BOOT_STATUS_TS_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - Thermal Sensor initialization failed\n");
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Device boot error - Invalid status code %d\n",
+ status);
+ break;
+ }
+
+ rc = -EIO;
+ goto out;
+ }
+
+ if (!hdev->fw_loading) {
+ dev_info(hdev->dev, "Skip loading FW\n");
+ goto out;
+ }
+
+ if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
+ goto out;
+
+ dev_info(hdev->dev,
+ "Loading firmware to device, may take some time...\n");
+
+ rc = hdev->asic_funcs->load_firmware_to_device(hdev);
+ if (rc)
+ goto out;
+
+ if (skip_bmc) {
+ WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
+
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_boot_status_reg,
+ status,
+ (status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
+ 10000,
+ cpu_timeout);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get ACK on skipping BMC, %d\n",
+ status);
+ WREG32(msg_to_cpu_reg, KMD_MSG_NA);
+ rc = -EIO;
+ goto out;
+ }
+ }
+
+ WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
+
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_boot_status_reg,
+ status,
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ 10000,
+ cpu_timeout);
+
+ /* Clear message */
+ WREG32(msg_to_cpu_reg, KMD_MSG_NA);
+
+ if (rc) {
+ if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
+ dev_err(hdev->dev,
+ "Device reports FIT image is corrupted\n");
+ else
+ dev_err(hdev->dev,
+ "Device failed to load, %d\n", status);
+
+ rc = -EIO;
+ goto out;
+ }
+
+ dev_info(hdev->dev, "Successfully loaded firmware to device\n");
+
+out:
+ fw_read_errors(hdev, boot_err0_reg);
+
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/misc/habanalabs/gaudi/Makefile
new file mode 100644
index 000000000000..f802cdc980ca
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)
+
+HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_hwmgr.o gaudi/gaudi_security.o \
+ gaudi/gaudi_coresight.o
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
new file mode 100644
index 000000000000..61f88e9884ce
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -0,0 +1,6748 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudiP.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+#include "include/hw_ip/mmu/mmu_v1_1.h"
+#include "include/gaudi/gaudi_masks.h"
+#include "include/gaudi/gaudi_fw_if.h"
+#include "include/gaudi/gaudi_reg_map.h"
+#include "include/gaudi/gaudi_async_ids_map_extended.h"
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/hwmon.h>
+#include <linux/genalloc.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+
+/*
+ * Gaudi security scheme:
+ *
+ * 1. Host is protected by:
+ * - Range registers
+ * - MMU
+ *
+ * 2. DDR is protected by:
+ * - Range registers (protect the first 512MB)
+ *
+ * 3. Configuration is protected by:
+ * - Range registers
+ * - Protection bits
+ *
+ * MMU is always enabled.
+ *
+ * QMAN DMA channels 0,1,5 (PCI DMAN):
+ * - DMA is not secured.
+ * - PQ and CQ are secured.
+ * - CP is secured: The driver needs to parse CB but WREG should be allowed
+ * because of TDMA (tensor DMA). Hence, WREG is always not
+ * secured.
+ *
+ * When the driver needs to use DMA it will check that Gaudi is idle, set DMA
+ * channel 0 to be secured, execute the DMA and change it back to not secured.
+ * Currently, the driver doesn't use the DMA while there are compute jobs
+ * running.
+ *
+ * The current use cases for the driver to use the DMA are:
+ * - Clear SRAM on context switch (happens on context switch when device is
+ * idle)
+ * - MMU page tables area clear (happens on init)
+ *
+ * QMAN DMA 2-4,6,7, TPC, MME, NIC:
+ * PQ is secured and is located on the Host (HBM CON TPC3 bug)
+ * CQ, CP and the engine are not secured
+ *
+ */
+
+#define GAUDI_BOOT_FIT_FILE "habanalabs/gaudi/gaudi-boot-fit.itb"
+#define GAUDI_LINUX_FW_FILE "habanalabs/gaudi/gaudi-fit.itb"
+#define GAUDI_TPC_FW_FILE "habanalabs/gaudi/gaudi_tpc.bin"
+
+#define GAUDI_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
+
+#define GAUDI_RESET_TIMEOUT_MSEC 1000 /* 1000ms */
+#define GAUDI_RESET_WAIT_MSEC 1 /* 1ms */
+#define GAUDI_CPU_RESET_WAIT_MSEC 200 /* 200ms */
+#define GAUDI_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
+
+#define GAUDI_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
+#define GAUDI_PLDM_HRESET_TIMEOUT_MSEC 20000 /* 20s */
+#define GAUDI_PLDM_SRESET_TIMEOUT_MSEC 14000 /* 14s */
+#define GAUDI_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
+#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
+#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
+#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
+#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
+
+#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
+
+#define GAUDI_MAX_STRING_LEN 20
+
+#define GAUDI_CB_POOL_CB_CNT 512
+#define GAUDI_CB_POOL_CB_SIZE 0x20000 /* 128KB */
+
+#define GAUDI_ALLOC_CPU_MEM_RETRY_CNT 3
+
+#define GAUDI_NUM_OF_TPC_INTR_CAUSE 20
+
+#define GAUDI_NUM_OF_QM_ERR_CAUSE 16
+
+#define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
+
+#define GAUDI_ARB_WDT_TIMEOUT 0x400000
+
+static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
+ "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
+ "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
+ "gaudi cq 5_0", "gaudi cq 5_1", "gaudi cq 5_2", "gaudi cq 5_3",
+ "gaudi cpu eq"
+};
+
+static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
+ [GAUDI_PCI_DMA_1] = 0,
+ [GAUDI_PCI_DMA_2] = 1,
+ [GAUDI_PCI_DMA_3] = 5,
+ [GAUDI_HBM_DMA_1] = 2,
+ [GAUDI_HBM_DMA_2] = 3,
+ [GAUDI_HBM_DMA_3] = 4,
+ [GAUDI_HBM_DMA_4] = 6,
+ [GAUDI_HBM_DMA_5] = 7
+};
+
+static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
+ [0] = GAUDI_QUEUE_ID_DMA_0_0,
+ [1] = GAUDI_QUEUE_ID_DMA_0_1,
+ [2] = GAUDI_QUEUE_ID_DMA_0_2,
+ [3] = GAUDI_QUEUE_ID_DMA_0_3,
+ [4] = GAUDI_QUEUE_ID_DMA_1_0,
+ [5] = GAUDI_QUEUE_ID_DMA_1_1,
+ [6] = GAUDI_QUEUE_ID_DMA_1_2,
+ [7] = GAUDI_QUEUE_ID_DMA_1_3,
+ [8] = GAUDI_QUEUE_ID_DMA_5_0,
+ [9] = GAUDI_QUEUE_ID_DMA_5_1,
+ [10] = GAUDI_QUEUE_ID_DMA_5_2,
+ [11] = GAUDI_QUEUE_ID_DMA_5_3
+};
+
+static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
+ [PACKET_WREG_32] = sizeof(struct packet_wreg32),
+ [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
+ [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
+ [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
+ [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
+ [PACKET_REPEAT] = sizeof(struct packet_repeat),
+ [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
+ [PACKET_FENCE] = sizeof(struct packet_fence),
+ [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
+ [PACKET_NOP] = sizeof(struct packet_nop),
+ [PACKET_STOP] = sizeof(struct packet_stop),
+ [PACKET_ARB_POINT] = sizeof(struct packet_arb_point),
+ [PACKET_WAIT] = sizeof(struct packet_wait),
+ [PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
+};
+
+static const char * const
+gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = {
+ "tpc_address_exceed_slm",
+ "tpc_div_by_0",
+ "tpc_spu_mac_overflow",
+ "tpc_spu_addsub_overflow",
+ "tpc_spu_abs_overflow",
+ "tpc_spu_fp_dst_nan_inf",
+ "tpc_spu_fp_dst_denorm",
+ "tpc_vpu_mac_overflow",
+ "tpc_vpu_addsub_overflow",
+ "tpc_vpu_abs_overflow",
+ "tpc_vpu_fp_dst_nan_inf",
+ "tpc_vpu_fp_dst_denorm",
+ "tpc_assertions",
+ "tpc_illegal_instruction",
+ "tpc_pc_wrap_around",
+ "tpc_qm_sw_err",
+ "tpc_hbw_rresp_err",
+ "tpc_hbw_bresp_err",
+ "tpc_lbw_rresp_err",
+ "tpc_lbw_bresp_err"
+};
+
+static const char * const
+gaudi_qman_error_cause[GAUDI_NUM_OF_QM_ERR_CAUSE] = {
+ "PQ AXI HBW error",
+ "CQ AXI HBW error",
+ "CP AXI HBW error",
+ "CP error due to undefined OPCODE",
+ "CP encountered STOP OPCODE",
+ "CP AXI LBW error",
+ "CP WRREG32 or WRBULK returned error",
+ "N/A",
+ "FENCE 0 inc over max value and clipped",
+ "FENCE 1 inc over max value and clipped",
+ "FENCE 2 inc over max value and clipped",
+ "FENCE 3 inc over max value and clipped",
+ "FENCE 0 dec under min value and clipped",
+ "FENCE 1 dec under min value and clipped",
+ "FENCE 2 dec under min value and clipped",
+ "FENCE 3 dec under min value and clipped"
+};
+
+static const char * const
+gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = {
+ "Choice push while full error",
+ "Choice Q watchdog error",
+ "MSG AXI LBW returned with error"
+};
+
+static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_2 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_3 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_0 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_1 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_2 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_3 */
+ QUEUE_TYPE_CPU, /* GAUDI_QUEUE_ID_CPU_PQ */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_3 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_0 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_1 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_2 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_3 */
+};
+
+static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
+ u64 phys_addr);
+static int gaudi_send_job_on_qman0(struct hl_device *hdev,
+ struct hl_cs_job *job);
+static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
+ u32 size, u64 val);
+static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
+ u32 tpc_id);
+static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
+static int gaudi_armcp_info_get(struct hl_device *hdev);
+static void gaudi_disable_clock_gating(struct hl_device *hdev);
+static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
+
+static int gaudi_get_fixed_properties(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int i;
+
+ if (GAUDI_QUEUE_ID_SIZE >= HL_MAX_QUEUES) {
+ dev_err(hdev->dev,
+ "Number of H/W queues must be smaller than %d\n",
+ HL_MAX_QUEUES);
+ return -EFAULT;
+ }
+
+ for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
+ if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
+ prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 1;
+ } else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
+ prop->hw_queues_props[i].driver_only = 1;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
+ } else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
+ prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
+ } else if (gaudi_queue_type[i] == QUEUE_TYPE_NA) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
+ prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
+ }
+ }
+
+ for (; i < HL_MAX_QUEUES; i++)
+ prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
+
+ prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
+
+ prop->dram_base_address = DRAM_PHYS_BASE;
+ prop->dram_size = GAUDI_HBM_SIZE_32GB;
+ prop->dram_end_address = prop->dram_base_address +
+ prop->dram_size;
+ prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
+
+ prop->sram_base_address = SRAM_BASE_ADDR;
+ prop->sram_size = SRAM_SIZE;
+ prop->sram_end_address = prop->sram_base_address +
+ prop->sram_size;
+ prop->sram_user_base_address = prop->sram_base_address +
+ SRAM_USER_BASE_OFFSET;
+
+ prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
+ if (hdev->pldm)
+ prop->mmu_pgt_size = 0x800000; /* 8MB */
+ else
+ prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
+ prop->mmu_pte_size = HL_PTE_SIZE;
+ prop->mmu_hop_table_size = HOP_TABLE_SIZE;
+ prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
+ prop->dram_page_size = PAGE_SIZE_2MB;
+
+ prop->pmmu.hop0_shift = HOP0_SHIFT;
+ prop->pmmu.hop1_shift = HOP1_SHIFT;
+ prop->pmmu.hop2_shift = HOP2_SHIFT;
+ prop->pmmu.hop3_shift = HOP3_SHIFT;
+ prop->pmmu.hop4_shift = HOP4_SHIFT;
+ prop->pmmu.hop0_mask = HOP0_MASK;
+ prop->pmmu.hop1_mask = HOP1_MASK;
+ prop->pmmu.hop2_mask = HOP2_MASK;
+ prop->pmmu.hop3_mask = HOP3_MASK;
+ prop->pmmu.hop4_mask = HOP4_MASK;
+ prop->pmmu.start_addr = VA_HOST_SPACE_START;
+ prop->pmmu.end_addr =
+ (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
+ prop->pmmu.page_size = PAGE_SIZE_4KB;
+
+ /* PMMU and HPMMU are the same except of page size */
+ memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
+ prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
+
+ /* shifts and masks are the same in PMMU and DMMU */
+ memcpy(&prop->dmmu, &prop->pmmu, sizeof(prop->pmmu));
+ prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2);
+ prop->dmmu.end_addr = VA_HOST_SPACE_END;
+ prop->dmmu.page_size = PAGE_SIZE_2MB;
+
+ prop->cfg_size = CFG_SIZE;
+ prop->max_asid = MAX_ASID;
+ prop->num_of_events = GAUDI_EVENT_SIZE;
+ prop->tpc_enabled_mask = TPC_ENABLED_MASK;
+
+ prop->max_power_default = MAX_POWER_DEFAULT;
+
+ prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
+ prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
+
+ prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
+ prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
+
+ strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int gaudi_pci_bars_map(struct hl_device *hdev)
+{
+ static const char * const name[] = {"SRAM", "CFG", "HBM"};
+ bool is_wc[3] = {false, false, true};
+ int rc;
+
+ rc = hl_pci_bars_map(hdev, name, is_wc);
+ if (rc)
+ return rc;
+
+ hdev->rmmio = hdev->pcie_bar[CFG_BAR_ID] +
+ (CFG_BASE - SPI_FLASH_BASE_ADDR);
+
+ return 0;
+}
+
+static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 old_addr = addr;
+ int rc;
+
+ if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr))
+ return old_addr;
+
+ /* Inbound Region 2 - Bar 4 - Point to HBM */
+ rc = hl_pci_set_dram_bar_base(hdev, 2, 4, addr);
+ if (rc)
+ return U64_MAX;
+
+ if (gaudi) {
+ old_addr = gaudi->hbm_bar_cur_addr;
+ gaudi->hbm_bar_cur_addr = addr;
+ }
+
+ return old_addr;
+}
+
+static int gaudi_init_iatu(struct hl_device *hdev)
+{
+ int rc = 0;
+
+ /* Inbound Region 1 - Bar 2 - Point to SPI FLASH */
+ rc = hl_pci_iatu_write(hdev, 0x314,
+ lower_32_bits(SPI_FLASH_BASE_ADDR));
+ rc |= hl_pci_iatu_write(hdev, 0x318,
+ upper_32_bits(SPI_FLASH_BASE_ADDR));
+ rc |= hl_pci_iatu_write(hdev, 0x300, 0);
+ /* Enable + Bar match + match enable */
+ rc |= hl_pci_iatu_write(hdev, 0x304, 0xC0080200);
+
+ if (rc)
+ return -EIO;
+
+ return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
+ HOST_PHYS_BASE, HOST_PHYS_SIZE);
+}
+
+static int gaudi_early_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pci_dev *pdev = hdev->pdev;
+ int rc;
+
+ rc = gaudi_get_fixed_properties(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get fixed properties\n");
+ return rc;
+ }
+
+ /* Check BAR sizes */
+ if (pci_resource_len(pdev, SRAM_BAR_ID) != SRAM_BAR_SIZE) {
+ dev_err(hdev->dev,
+ "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
+ SRAM_BAR_ID,
+ (unsigned long long) pci_resource_len(pdev,
+ SRAM_BAR_ID),
+ SRAM_BAR_SIZE);
+ return -ENODEV;
+ }
+
+ if (pci_resource_len(pdev, CFG_BAR_ID) != CFG_BAR_SIZE) {
+ dev_err(hdev->dev,
+ "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
+ CFG_BAR_ID,
+ (unsigned long long) pci_resource_len(pdev,
+ CFG_BAR_ID),
+ CFG_BAR_SIZE);
+ return -ENODEV;
+ }
+
+ prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
+
+ rc = hl_pci_init(hdev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int gaudi_early_fini(struct hl_device *hdev)
+{
+ hl_pci_fini(hdev);
+
+ return 0;
+}
+
+/**
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static void gaudi_fetch_psoc_frequency(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
+ prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
+ prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
+ prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+}
+
+static int _gaudi_init_tpc_mem(struct hl_device *hdev,
+ dma_addr_t tpc_kernel_src_addr, u32 tpc_kernel_size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct packet_lin_dma *init_tpc_mem_pkt;
+ struct hl_cs_job *job;
+ struct hl_cb *cb;
+ u64 dst_addr;
+ u32 cb_size, ctl;
+ u8 tpc_id;
+ int rc;
+
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (!cb)
+ return -EFAULT;
+
+ init_tpc_mem_pkt = (struct packet_lin_dma *) (uintptr_t)
+ cb->kernel_address;
+ cb_size = sizeof(*init_tpc_mem_pkt);
+ memset(init_tpc_mem_pkt, 0, cb_size);
+
+ init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size);
+
+ ctl = ((PACKET_LIN_DMA << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_LIN_DMA_CTL_LIN_SHIFT) |
+ (1 << GAUDI_PKT_CTL_RB_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT));
+
+ init_tpc_mem_pkt->ctl = cpu_to_le32(ctl);
+
+ init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr);
+ dst_addr = (prop->sram_user_base_address &
+ GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
+ GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
+ init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr);
+
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto release_cb;
+ }
+
+ job->id = 0;
+ job->user_cb = cb;
+ job->user_cb->cs_cnt++;
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = gaudi_send_job_on_qman0(hdev, job);
+
+ if (rc)
+ goto free_job;
+
+ for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
+ rc = gaudi_run_tpc_kernel(hdev, dst_addr, tpc_id);
+ if (rc)
+ break;
+ }
+
+free_job:
+ hl_userptr_delete_list(hdev, &job->userptr_list);
+ hl_debugfs_remove_job(hdev, job);
+ kfree(job);
+ cb->cs_cnt--;
+
+release_cb:
+ hl_cb_put(cb);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ return rc;
+}
+
+/*
+ * gaudi_init_tpc_mem() - Initialize TPC memories.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Copy TPC kernel fw from firmware file and run it to initialize TPC memories.
+ *
+ * Return: 0 for success, negative value for error.
+ */
+static int gaudi_init_tpc_mem(struct hl_device *hdev)
+{
+ const struct firmware *fw;
+ size_t fw_size;
+ void *cpu_addr;
+ dma_addr_t dma_handle;
+ int rc;
+
+ rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+ if (rc) {
+ dev_err(hdev->dev, "Firmware file %s is not found!\n",
+ GAUDI_TPC_FW_FILE);
+ goto out;
+ }
+
+ fw_size = fw->size;
+ cpu_addr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, fw_size,
+ &dma_handle, GFP_KERNEL | __GFP_ZERO);
+ if (!cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate %zu of dma memory for TPC kernel\n",
+ fw_size);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(cpu_addr, fw->data, fw_size);
+
+ rc = _gaudi_init_tpc_mem(hdev, dma_handle, fw_size);
+
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, fw->size, cpu_addr,
+ dma_handle);
+
+out:
+ release_firmware(fw);
+ return rc;
+}
+
+static int gaudi_late_init(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int rc;
+
+ rc = gaudi->armcp_info_get(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get armcp info\n");
+ return rc;
+ }
+
+ rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ return rc;
+ }
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
+
+ gaudi_fetch_psoc_frequency(hdev);
+
+ rc = gaudi_mmu_clear_pgt_range(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
+ goto disable_pci_access;
+ }
+
+ rc = gaudi_init_tpc_mem(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize TPC memories\n");
+ goto disable_pci_access;
+ }
+
+ return 0;
+
+disable_pci_access:
+ hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+
+ return rc;
+}
+
+static void gaudi_late_fini(struct hl_device *hdev)
+{
+ const struct hwmon_channel_info **channel_info_arr;
+ int i = 0;
+
+ if (!hdev->hl_chip_info->info)
+ return;
+
+ channel_info_arr = hdev->hl_chip_info->info;
+
+ while (channel_info_arr[i]) {
+ kfree(channel_info_arr[i]->config);
+ kfree(channel_info_arr[i]);
+ i++;
+ }
+
+ kfree(channel_info_arr);
+
+ hdev->hl_chip_info->info = NULL;
+}
+
+static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
+{
+ dma_addr_t dma_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {}, end_addr;
+ void *virt_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {};
+ int i, j, rc = 0;
+
+ /*
+ * The device CPU works with 40-bits addresses, while bit 39 must be set
+ * to '1' when accessing the host.
+ * Bits 49:39 of the full host address are saved for a later
+ * configuration of the HW to perform extension to 50 bits.
+ * Because there is a single HW register that holds the extension bits,
+ * these bits must be identical in all allocated range.
+ */
+
+ for (i = 0 ; i < GAUDI_ALLOC_CPU_MEM_RETRY_CNT ; i++) {
+ virt_addr_arr[i] =
+ hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
+ &dma_addr_arr[i],
+ GFP_KERNEL | __GFP_ZERO);
+ if (!virt_addr_arr[i]) {
+ rc = -ENOMEM;
+ goto free_dma_mem_arr;
+ }
+
+ end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1;
+ if (GAUDI_CPU_PCI_MSB_ADDR(dma_addr_arr[i]) ==
+ GAUDI_CPU_PCI_MSB_ADDR(end_addr))
+ break;
+ }
+
+ if (i == GAUDI_ALLOC_CPU_MEM_RETRY_CNT) {
+ dev_err(hdev->dev,
+ "MSB of CPU accessible DMA memory are not identical in all range\n");
+ rc = -EFAULT;
+ goto free_dma_mem_arr;
+ }
+
+ hdev->cpu_accessible_dma_mem = virt_addr_arr[i];
+ hdev->cpu_accessible_dma_address = dma_addr_arr[i];
+ hdev->cpu_pci_msb_addr =
+ GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address);
+
+ GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address);
+
+free_dma_mem_arr:
+ for (j = 0 ; j < i ; j++)
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
+ virt_addr_arr[j],
+ dma_addr_arr[j]);
+
+ return rc;
+}
+
+static void gaudi_free_internal_qmans_pq_mem(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ u32 i;
+
+ for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
+ q = &gaudi->internal_qmans[i];
+ if (!q->pq_kernel_addr)
+ continue;
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, q->pq_size,
+ q->pq_kernel_addr,
+ q->pq_dma_addr);
+ }
+}
+
+static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ int rc, i;
+
+ for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
+ if (gaudi_queue_type[i] != QUEUE_TYPE_INT)
+ continue;
+
+ q = &gaudi->internal_qmans[i];
+
+ switch (i) {
+ case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_4_3:
+ case GAUDI_QUEUE_ID_DMA_6_0 ... GAUDI_QUEUE_ID_DMA_7_3:
+ q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES;
+ break;
+ case GAUDI_QUEUE_ID_MME_0_0 ... GAUDI_QUEUE_ID_MME_1_3:
+ q->pq_size = MME_QMAN_SIZE_IN_BYTES;
+ break;
+ case GAUDI_QUEUE_ID_TPC_0_0 ... GAUDI_QUEUE_ID_TPC_7_3:
+ q->pq_size = TPC_QMAN_SIZE_IN_BYTES;
+ break;
+ default:
+ dev_err(hdev->dev, "Bad internal queue index %d", i);
+ rc = -EINVAL;
+ goto free_internal_qmans_pq_mem;
+ }
+
+ q->pq_kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
+ hdev, q->pq_size,
+ &q->pq_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!q->pq_kernel_addr) {
+ rc = -ENOMEM;
+ goto free_internal_qmans_pq_mem;
+ }
+ }
+
+ return 0;
+
+free_internal_qmans_pq_mem:
+ gaudi_free_internal_qmans_pq_mem(hdev);
+ return rc;
+}
+
+static int gaudi_sw_init(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi;
+ u32 i, event_id = 0;
+ int rc;
+
+ /* Allocate device structure */
+ gaudi = kzalloc(sizeof(*gaudi), GFP_KERNEL);
+ if (!gaudi)
+ return -ENOMEM;
+
+ for (i = 0 ; i < ARRAY_SIZE(gaudi_irq_map_table) ; i++) {
+ if (gaudi_irq_map_table[i].valid) {
+ if (event_id == GAUDI_EVENT_SIZE) {
+ dev_err(hdev->dev,
+ "Event array exceeds the limit of %u events\n",
+ GAUDI_EVENT_SIZE);
+ rc = -EINVAL;
+ goto free_gaudi_device;
+ }
+
+ gaudi->events[event_id++] =
+ gaudi_irq_map_table[i].fc_id;
+ }
+ }
+
+ gaudi->armcp_info_get = gaudi_armcp_info_get;
+
+ gaudi->max_freq_value = GAUDI_MAX_CLK_FREQ;
+
+ hdev->asic_specific = gaudi;
+
+ /* Create DMA pool for small allocations */
+ hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
+ &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
+ if (!hdev->dma_pool) {
+ dev_err(hdev->dev, "failed to create DMA pool\n");
+ rc = -ENOMEM;
+ goto free_gaudi_device;
+ }
+
+ rc = gaudi_alloc_cpu_accessible_dma_mem(hdev);
+ if (rc)
+ goto free_dma_pool;
+
+ hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
+ if (!hdev->cpu_accessible_dma_pool) {
+ dev_err(hdev->dev,
+ "Failed to create CPU accessible DMA pool\n");
+ rc = -ENOMEM;
+ goto free_cpu_dma_mem;
+ }
+
+ rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
+ (uintptr_t) hdev->cpu_accessible_dma_mem,
+ HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add memory to CPU accessible DMA pool\n");
+ rc = -EFAULT;
+ goto free_cpu_accessible_dma_pool;
+ }
+
+ rc = gaudi_alloc_internal_qmans_pq_mem(hdev);
+ if (rc)
+ goto free_cpu_accessible_dma_pool;
+
+ spin_lock_init(&gaudi->hw_queues_lock);
+ mutex_init(&gaudi->clk_gate_mutex);
+
+ hdev->supports_sync_stream = true;
+ hdev->supports_coresight = true;
+
+ return 0;
+
+free_cpu_accessible_dma_pool:
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+free_cpu_dma_mem:
+ GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
+ hdev->cpu_pci_msb_addr);
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+free_dma_pool:
+ dma_pool_destroy(hdev->dma_pool);
+free_gaudi_device:
+ kfree(gaudi);
+ return rc;
+}
+
+static int gaudi_sw_fini(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ gaudi_free_internal_qmans_pq_mem(hdev);
+
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+
+ GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
+ hdev->cpu_pci_msb_addr);
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+
+ dma_pool_destroy(hdev->dma_pool);
+
+ mutex_destroy(&gaudi->clk_gate_mutex);
+
+ kfree(gaudi);
+
+ return 0;
+}
+
+static irqreturn_t gaudi_irq_handler_single(int irq, void *arg)
+{
+ struct hl_device *hdev = arg;
+ int i;
+
+ if (hdev->disabled)
+ return IRQ_HANDLED;
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ hl_irq_handler_cq(irq, &hdev->completion_queue[i]);
+
+ hl_irq_handler_eq(irq, &hdev->event_queue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * For backward compatibility, new MSI interrupts should be set after the
+ * existing CPU and NIC interrupts.
+ */
+static int gaudi_pci_irq_vector(struct hl_device *hdev, unsigned int nr,
+ bool cpu_eq)
+{
+ int msi_vec;
+
+ if ((nr != GAUDI_EVENT_QUEUE_MSI_IDX) && (cpu_eq))
+ dev_crit(hdev->dev, "CPU EQ must use IRQ %d\n",
+ GAUDI_EVENT_QUEUE_MSI_IDX);
+
+ msi_vec = ((nr < GAUDI_EVENT_QUEUE_MSI_IDX) || (cpu_eq)) ? nr :
+ (nr + NIC_NUMBER_OF_ENGINES + 1);
+
+ return pci_irq_vector(hdev->pdev, msi_vec);
+}
+
+static int gaudi_enable_msi_single(struct hl_device *hdev)
+{
+ int rc, irq;
+
+ dev_info(hdev->dev, "Working in single MSI IRQ mode\n");
+
+ irq = gaudi_pci_irq_vector(hdev, 0, false);
+ rc = request_irq(irq, gaudi_irq_handler_single, 0,
+ "gaudi single msi", hdev);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to request single MSI IRQ\n");
+
+ return rc;
+}
+
+static int gaudi_enable_msi_multi(struct hl_device *hdev)
+{
+ int cq_cnt = hdev->asic_prop.completion_queues_count;
+ int rc, i, irq_cnt_init, irq;
+
+ for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
+ irq = gaudi_pci_irq_vector(hdev, i, false);
+ rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi_irq_name[i],
+ &hdev->completion_queue[i]);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_irqs;
+ }
+ }
+
+ irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX, true);
+ rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi_irq_name[cq_cnt],
+ &hdev->event_queue);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_irqs;
+ }
+
+ return 0;
+
+free_irqs:
+ for (i = 0 ; i < irq_cnt_init ; i++)
+ free_irq(gaudi_pci_irq_vector(hdev, i, false),
+ &hdev->completion_queue[i]);
+ return rc;
+}
+
+static int gaudi_enable_msi(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int rc;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MSI)
+ return 0;
+
+ rc = pci_alloc_irq_vectors(hdev->pdev, 1, GAUDI_MSI_ENTRIES,
+ PCI_IRQ_MSI);
+ if (rc < 0) {
+ dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc);
+ return rc;
+ }
+
+ if (rc < NUMBER_OF_INTERRUPTS) {
+ gaudi->multi_msi_mode = false;
+ rc = gaudi_enable_msi_single(hdev);
+ } else {
+ gaudi->multi_msi_mode = true;
+ rc = gaudi_enable_msi_multi(hdev);
+ }
+
+ if (rc)
+ goto free_pci_irq_vectors;
+
+ gaudi->hw_cap_initialized |= HW_CAP_MSI;
+
+ return 0;
+
+free_pci_irq_vectors:
+ pci_free_irq_vectors(hdev->pdev);
+ return rc;
+}
+
+static void gaudi_sync_irqs(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int i, cq_cnt = hdev->asic_prop.completion_queues_count;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
+ return;
+
+ /* Wait for all pending IRQs to be finished */
+ if (gaudi->multi_msi_mode) {
+ for (i = 0 ; i < cq_cnt ; i++)
+ synchronize_irq(gaudi_pci_irq_vector(hdev, i, false));
+
+ synchronize_irq(gaudi_pci_irq_vector(hdev,
+ GAUDI_EVENT_QUEUE_MSI_IDX,
+ true));
+ } else {
+ synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false));
+ }
+}
+
+static void gaudi_disable_msi(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int i, irq, cq_cnt = hdev->asic_prop.completion_queues_count;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
+ return;
+
+ gaudi_sync_irqs(hdev);
+
+ if (gaudi->multi_msi_mode) {
+ irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX,
+ true);
+ free_irq(irq, &hdev->event_queue);
+
+ for (i = 0 ; i < cq_cnt ; i++) {
+ irq = gaudi_pci_irq_vector(hdev, i, false);
+ free_irq(irq, &hdev->completion_queue[i]);
+ }
+ } else {
+ free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev);
+ }
+
+ pci_free_irq_vectors(hdev->pdev);
+
+ gaudi->hw_cap_initialized &= ~HW_CAP_MSI;
+}
+
+static void gaudi_init_scrambler_sram(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER)
+ return;
+
+ if (!hdev->sram_scrambler_enable)
+ return;
+
+ WREG32(mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_2_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_3_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_4_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_5_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_6_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_7_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_0_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_1_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_2_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_3_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_4_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_6_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_7_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+
+ gaudi->hw_cap_initialized |= HW_CAP_SRAM_SCRAMBLER;
+}
+
+static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER)
+ return;
+
+ if (!hdev->dram_scrambler_enable)
+ return;
+
+ WREG32(mmNIF_RTR_CTRL_0_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_1_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_2_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_3_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_4_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_5_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_6_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_7_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_0_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_1_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_2_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_3_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_4_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_5_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_6_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_7_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+
+ gaudi->hw_cap_initialized |= HW_CAP_HBM_SCRAMBLER;
+}
+
+static void gaudi_init_e2e(struct hl_device *hdev)
+{
+ WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3);
+ WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 785 >> 3);
+ WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 49);
+ WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 101);
+
+ WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
+ WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
+ WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
+
+ WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
+ WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
+ WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
+ WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
+ WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
+ WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
+ WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
+
+ WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 297 >> 3);
+ WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 908 >> 3);
+ WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 19);
+
+ WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 318 >> 3);
+ WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 956 >> 3);
+ WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 79);
+ WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 163);
+
+ WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
+ WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
+ WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
+
+ WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
+ WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
+ WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
+ WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
+ WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
+ WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
+ WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
+
+ WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 318 >> 3);
+ WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 956 >> 3);
+ WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 79);
+ WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 79);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
+
+ if (!hdev->dram_scrambler_enable) {
+ WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
+ }
+
+ WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+}
+
+static void gaudi_init_hbm_cred(struct hl_device *hdev)
+{
+ uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
+
+ hbm0_wr = 0x33333333;
+ hbm1_wr = 0x33333333;
+ hbm0_rd = 0x77777777;
+ hbm1_rd = 0xDDDDDDDD;
+
+ WREG32(mmDMA_IF_E_N_HBM0_WR_CRED_CNT, hbm0_wr);
+ WREG32(mmDMA_IF_E_N_HBM1_WR_CRED_CNT, hbm1_wr);
+ WREG32(mmDMA_IF_E_N_HBM0_RD_CRED_CNT, hbm0_rd);
+ WREG32(mmDMA_IF_E_N_HBM1_RD_CRED_CNT, hbm1_rd);
+
+ WREG32(mmDMA_IF_E_S_HBM0_WR_CRED_CNT, hbm0_wr);
+ WREG32(mmDMA_IF_E_S_HBM1_WR_CRED_CNT, hbm1_wr);
+ WREG32(mmDMA_IF_E_S_HBM0_RD_CRED_CNT, hbm0_rd);
+ WREG32(mmDMA_IF_E_S_HBM1_RD_CRED_CNT, hbm1_rd);
+
+ WREG32(mmDMA_IF_W_N_HBM0_WR_CRED_CNT, hbm0_wr);
+ WREG32(mmDMA_IF_W_N_HBM1_WR_CRED_CNT, hbm1_wr);
+ WREG32(mmDMA_IF_W_N_HBM0_RD_CRED_CNT, hbm0_rd);
+ WREG32(mmDMA_IF_W_N_HBM1_RD_CRED_CNT, hbm1_rd);
+
+ WREG32(mmDMA_IF_W_S_HBM0_WR_CRED_CNT, hbm0_wr);
+ WREG32(mmDMA_IF_W_S_HBM1_WR_CRED_CNT, hbm1_wr);
+ WREG32(mmDMA_IF_W_S_HBM0_RD_CRED_CNT, hbm0_rd);
+ WREG32(mmDMA_IF_W_S_HBM1_RD_CRED_CNT, hbm1_rd);
+
+ WREG32(mmDMA_IF_E_N_HBM_CRED_EN_0,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_E_S_HBM_CRED_EN_0,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_W_N_HBM_CRED_EN_0,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_W_S_HBM_CRED_EN_0,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+
+ WREG32(mmDMA_IF_E_N_HBM_CRED_EN_1,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_E_S_HBM_CRED_EN_1,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_W_N_HBM_CRED_EN_1,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_W_S_HBM_CRED_EN_1,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+}
+
+static void gaudi_init_rate_limiter(struct hl_device *hdev)
+{
+ u32 nr, nf, od, sat, rst, timeout;
+ u64 freq;
+
+ nr = RREG32(mmPSOC_HBM_PLL_NR);
+ nf = RREG32(mmPSOC_HBM_PLL_NF);
+ od = RREG32(mmPSOC_HBM_PLL_OD);
+ freq = (50 * (nf + 1)) / ((nr + 1) * (od + 1));
+
+ dev_dbg(hdev->dev, "HBM frequency is %lluMHz\n", freq);
+
+ /* Configuration is for five (5) DDMA channels */
+ if (freq == 800) {
+ sat = 4;
+ rst = 11;
+ timeout = 15;
+ } else if (freq == 900) {
+ sat = 4;
+ rst = 15;
+ timeout = 16;
+ } else if (freq == 950) {
+ sat = 4;
+ rst = 15;
+ timeout = 15;
+ } else {
+ dev_warn(hdev->dev,
+ "unsupported HBM frequency %lluMHz, no rate-limiters\n",
+ freq);
+ return;
+ }
+
+ WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_0, 0x111);
+ WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_1, 0x111);
+ WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_0, 0x111);
+ WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_1, 0x111);
+ WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_0, 0x111);
+ WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_1, 0x111);
+ WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_0, 0x111);
+ WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_1, 0x111);
+
+ if (!hdev->rl_enable) {
+ dev_info(hdev->dev, "Rate limiters disabled\n");
+ return;
+ }
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_SAT, sat);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_RST, rst);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_EN, 1);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_SAT, sat);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RST, rst);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_EN, 1);
+}
+
+static void gaudi_init_golden_registers(struct hl_device *hdev)
+{
+ u32 tpc_offset;
+ int tpc_id, i;
+
+ gaudi_init_e2e(hdev);
+
+ gaudi_init_hbm_cred(hdev);
+
+ gaudi_init_rate_limiter(hdev);
+
+ gaudi_disable_clock_gating(hdev);
+
+ for (tpc_id = 0, tpc_offset = 0;
+ tpc_id < TPC_NUMBER_OF_ENGINES;
+ tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
+ /* Mask all arithmetic interrupts from TPC */
+ WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFF);
+ /* Set 16 cache lines */
+ WREG32_FIELD(TPC0_CFG_MSS_CONFIG, tpc_offset,
+ ICACHE_FETCH_LINE_NUM, 2);
+ }
+
+ /* Make sure 1st 128 bytes in SRAM are 0 for Tensor DMA */
+ for (i = 0 ; i < 128 ; i += 8)
+ writeq(0, hdev->pcie_bar[SRAM_BAR_ID] + i);
+
+ WREG32(mmMME0_CTRL_EUS_ROLLUP_CNT_ADD, 3);
+ WREG32(mmMME1_CTRL_EUS_ROLLUP_CNT_ADD, 3);
+ WREG32(mmMME2_CTRL_EUS_ROLLUP_CNT_ADD, 3);
+ WREG32(mmMME3_CTRL_EUS_ROLLUP_CNT_ADD, 3);
+
+ /* WA for H3-2081 */
+ WREG32(mmPCIE_WRAP_MAX_OUTSTAND, 0x10ff);
+}
+
+static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
+ int qman_id, dma_addr_t qman_pq_addr)
+{
+ u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
+ u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
+ u32 q_off, dma_qm_offset;
+ u32 dma_qm_err_cfg;
+
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+
+ mtr_base_en_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_en_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_en_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_en_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ mtr_base_ws_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_ws_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_ws_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_ws_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ q_off = dma_qm_offset + qman_id * 4;
+
+ WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_pq_addr));
+ WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_pq_addr));
+
+ WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HL_QUEUE_LENGTH));
+ WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
+ WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
+
+ WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
+ WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
+ WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+
+ WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
+ WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
+ WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi);
+ WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
+
+ /* The following configuration is needed only once per QMAN */
+ if (qman_id == 0) {
+ /* Configure RAZWI IRQ */
+ dma_qm_err_cfg = PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+ if (hdev->stop_on_err) {
+ dma_qm_err_cfg |=
+ PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
+ }
+
+ WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
+ WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
+ lower_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
+ upper_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
+ dma_id);
+
+ WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
+ QM_ARB_ERR_MSG_EN_MASK);
+
+ /* Increase ARB WDT to support streams architecture */
+ WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
+ GAUDI_ARB_WDT_TIMEOUT);
+
+ WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
+ QMAN_EXTERNAL_MAKE_TRUSTED);
+
+ WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
+ }
+}
+
+static void gaudi_init_dma_core(struct hl_device *hdev, int dma_id)
+{
+ u32 dma_offset = dma_id * DMA_CORE_OFFSET;
+ u32 dma_err_cfg = 1 << DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT;
+
+ /* Set to maximum possible according to physical size */
+ WREG32(mmDMA0_CORE_RD_MAX_OUTSTAND + dma_offset, 0);
+ WREG32(mmDMA0_CORE_RD_MAX_SIZE + dma_offset, 0);
+
+ /* STOP_ON bit implies no completion to operation in case of RAZWI */
+ if (hdev->stop_on_err)
+ dma_err_cfg |= 1 << DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT;
+
+ WREG32(mmDMA0_CORE_ERR_CFG + dma_offset, dma_err_cfg);
+ WREG32(mmDMA0_CORE_ERRMSG_ADDR_LO + dma_offset,
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_CORE_ERRMSG_ADDR_HI + dma_offset,
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_CORE_ERRMSG_WDATA + dma_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_DMA0_CORE].cpu_id + dma_id);
+ WREG32(mmDMA0_CORE_PROT + dma_offset,
+ 1 << DMA0_CORE_PROT_ERR_VAL_SHIFT);
+ /* If the channel is secured, it should be in MMU bypass mode */
+ WREG32(mmDMA0_CORE_SECURE_PROPS + dma_offset,
+ 1 << DMA0_CORE_SECURE_PROPS_MMBP_SHIFT);
+ WREG32(mmDMA0_CORE_CFG_0 + dma_offset, 1 << DMA0_CORE_CFG_0_EN_SHIFT);
+}
+
+static void gaudi_enable_qman(struct hl_device *hdev, int dma_id,
+ u32 enable_mask)
+{
+ u32 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+
+ WREG32(mmDMA0_QM_GLBL_CFG0 + dma_qm_offset, enable_mask);
+}
+
+static void gaudi_init_pci_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_hw_queue *q;
+ int i, j, dma_id, cpu_skip, nic_skip, cq_id = 0, q_idx, msi_vec = 0;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)
+ return;
+
+ for (i = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
+ dma_id = gaudi_dma_assignment[i];
+ /*
+ * For queues after the CPU Q need to add 1 to get the correct
+ * queue. In addition, need to add the CPU EQ and NIC IRQs in
+ * order to get the correct MSI register.
+ */
+ if (dma_id > 1) {
+ cpu_skip = 1;
+ nic_skip = NIC_NUMBER_OF_ENGINES;
+ } else {
+ cpu_skip = 0;
+ nic_skip = 0;
+ }
+
+ for (j = 0 ; j < QMAN_STREAMS ; j++) {
+ q_idx = 4 * dma_id + j + cpu_skip;
+ q = &hdev->kernel_queues[q_idx];
+ q->cq_id = cq_id++;
+ q->msi_vec = nic_skip + cpu_skip + msi_vec++;
+ gaudi_init_pci_dma_qman(hdev, dma_id, j,
+ q->bus_address);
+ }
+
+ gaudi_init_dma_core(hdev, dma_id);
+
+ gaudi_enable_qman(hdev, dma_id, PCI_DMA_QMAN_ENABLE);
+ }
+
+ gaudi->hw_cap_initialized |= HW_CAP_PCI_DMA;
+}
+
+static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
+ int qman_id, u64 qman_base_addr)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 q_off, dma_qm_offset;
+ u32 dma_qm_err_cfg;
+
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ q_off = dma_qm_offset + qman_id * 4;
+
+ if (qman_id < 4) {
+ WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off,
+ lower_32_bits(qman_base_addr));
+ WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off,
+ upper_32_bits(qman_base_addr));
+
+ WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HBM_DMA_QMAN_LENGTH));
+ WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
+ WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
+
+ WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x81BC);
+ WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x81B4);
+ WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ } else {
+ WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
+ WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
+ WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+
+ /* Configure RAZWI IRQ */
+ dma_qm_err_cfg = HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+ if (hdev->stop_on_err) {
+ dma_qm_err_cfg |=
+ HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
+ }
+ WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
+
+ WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
+ lower_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
+ upper_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
+ dma_id);
+
+ WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
+ QM_ARB_ERR_MSG_EN_MASK);
+
+ /* Increase ARB WDT to support streams architecture */
+ WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
+ GAUDI_ARB_WDT_TIMEOUT);
+
+ WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
+ WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
+ QMAN_INTERNAL_MAKE_TRUSTED);
+ }
+
+ WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
+ WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
+}
+
+static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ u64 qman_base_addr;
+ int i, j, dma_id, internal_q_index;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)
+ return;
+
+ for (i = 0 ; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1 + i];
+
+ for (j = 0 ; j < QMAN_STREAMS ; j++) {
+ /*
+ * Add the CPU queue in order to get the correct queue
+ * number as all internal queue are placed after it
+ */
+ internal_q_index = dma_id * QMAN_STREAMS + j + 1;
+
+ q = &gaudi->internal_qmans[internal_q_index];
+ qman_base_addr = (u64) q->pq_dma_addr;
+ gaudi_init_hbm_dma_qman(hdev, dma_id, j,
+ qman_base_addr);
+ }
+
+ /* Initializing lower CP for HBM DMA QMAN */
+ gaudi_init_hbm_dma_qman(hdev, dma_id, 4, 0);
+
+ gaudi_init_dma_core(hdev, dma_id);
+
+ gaudi_enable_qman(hdev, dma_id, HBM_DMA_QMAN_ENABLE);
+ }
+
+ gaudi->hw_cap_initialized |= HW_CAP_HBM_DMA;
+}
+
+static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
+ int qman_id, u64 qman_base_addr)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 q_off, mme_id;
+ u32 mme_qm_err_cfg;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ q_off = mme_offset + qman_id * 4;
+
+ if (qman_id < 4) {
+ WREG32(mmMME0_QM_PQ_BASE_LO_0 + q_off,
+ lower_32_bits(qman_base_addr));
+ WREG32(mmMME0_QM_PQ_BASE_HI_0 + q_off,
+ upper_32_bits(qman_base_addr));
+
+ WREG32(mmMME0_QM_PQ_SIZE_0 + q_off, ilog2(MME_QMAN_LENGTH));
+ WREG32(mmMME0_QM_PQ_PI_0 + q_off, 0);
+ WREG32(mmMME0_QM_PQ_CI_0 + q_off, 0);
+
+ WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x81BC);
+ WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x81B4);
+ WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ } else {
+ WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
+ WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
+ WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+
+ /* Configure RAZWI IRQ */
+ mme_id = mme_offset /
+ (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0);
+
+ mme_qm_err_cfg = MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+ if (hdev->stop_on_err) {
+ mme_qm_err_cfg |=
+ MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
+ }
+ WREG32(mmMME0_QM_GLBL_ERR_CFG + mme_offset, mme_qm_err_cfg);
+ WREG32(mmMME0_QM_GLBL_ERR_ADDR_LO + mme_offset,
+ lower_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmMME0_QM_GLBL_ERR_ADDR_HI + mme_offset,
+ upper_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmMME0_QM_GLBL_ERR_WDATA + mme_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_MME0_QM].cpu_id +
+ mme_id);
+
+ WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset,
+ QM_ARB_ERR_MSG_EN_MASK);
+
+ /* Increase ARB WDT to support streams architecture */
+ WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset,
+ GAUDI_ARB_WDT_TIMEOUT);
+
+ WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0);
+ WREG32(mmMME0_QM_GLBL_PROT + mme_offset,
+ QMAN_INTERNAL_MAKE_TRUSTED);
+ }
+
+ WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
+ WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
+ WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
+ WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
+}
+
+static void gaudi_init_mme_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ u64 qman_base_addr;
+ u32 mme_offset;
+ int i, internal_q_index;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MME)
+ return;
+
+ /*
+ * map GAUDI_QUEUE_ID_MME_0_X to the N_W_MME (mmMME2_QM_BASE)
+ * and GAUDI_QUEUE_ID_MME_1_X to the S_W_MME (mmMME0_QM_BASE)
+ */
+
+ mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
+
+ for (i = 0 ; i < MME_NUMBER_OF_QMANS ; i++) {
+ internal_q_index = GAUDI_QUEUE_ID_MME_0_0 + i;
+ q = &gaudi->internal_qmans[internal_q_index];
+ qman_base_addr = (u64) q->pq_dma_addr;
+ gaudi_init_mme_qman(hdev, mme_offset, (i & 0x3),
+ qman_base_addr);
+ if (i == 3)
+ mme_offset = 0;
+ }
+
+ /* Initializing lower CP for MME QMANs */
+ mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
+ gaudi_init_mme_qman(hdev, mme_offset, 4, 0);
+ gaudi_init_mme_qman(hdev, 0, 4, 0);
+
+ WREG32(mmMME2_QM_GLBL_CFG0, QMAN_MME_ENABLE);
+ WREG32(mmMME0_QM_GLBL_CFG0, QMAN_MME_ENABLE);
+
+ gaudi->hw_cap_initialized |= HW_CAP_MME;
+}
+
+static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
+ int qman_id, u64 qman_base_addr)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 q_off, tpc_id;
+ u32 tpc_qm_err_cfg;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ q_off = tpc_offset + qman_id * 4;
+
+ if (qman_id < 4) {
+ WREG32(mmTPC0_QM_PQ_BASE_LO_0 + q_off,
+ lower_32_bits(qman_base_addr));
+ WREG32(mmTPC0_QM_PQ_BASE_HI_0 + q_off,
+ upper_32_bits(qman_base_addr));
+
+ WREG32(mmTPC0_QM_PQ_SIZE_0 + q_off, ilog2(TPC_QMAN_LENGTH));
+ WREG32(mmTPC0_QM_PQ_PI_0 + q_off, 0);
+ WREG32(mmTPC0_QM_PQ_CI_0 + q_off, 0);
+
+ WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x81BC);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x81B4);
+ WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ } else {
+ WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
+ WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+
+ /* Configure RAZWI IRQ */
+ tpc_id = tpc_offset /
+ (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0);
+
+ tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+ if (hdev->stop_on_err) {
+ tpc_qm_err_cfg |=
+ TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
+ }
+
+ WREG32(mmTPC0_QM_GLBL_ERR_CFG + tpc_offset, tpc_qm_err_cfg);
+ WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + tpc_offset,
+ lower_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + tpc_offset,
+ upper_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmTPC0_QM_GLBL_ERR_WDATA + tpc_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_TPC0_QM].cpu_id +
+ tpc_id);
+
+ WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset,
+ QM_ARB_ERR_MSG_EN_MASK);
+
+ /* Increase ARB WDT to support streams architecture */
+ WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset,
+ GAUDI_ARB_WDT_TIMEOUT);
+
+ WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0);
+ WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset,
+ QMAN_INTERNAL_MAKE_TRUSTED);
+ }
+
+ WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
+ WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
+ WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
+ WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
+}
+
+static void gaudi_init_tpc_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ u64 qman_base_addr;
+ u32 so_base_hi, tpc_offset = 0;
+ u32 tpc_delta = mmTPC1_CFG_SM_BASE_ADDRESS_HIGH -
+ mmTPC0_CFG_SM_BASE_ADDRESS_HIGH;
+ int i, tpc_id, internal_q_index;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)
+ return;
+
+ so_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
+ for (i = 0 ; i < QMAN_STREAMS ; i++) {
+ internal_q_index = GAUDI_QUEUE_ID_TPC_0_0 +
+ tpc_id * QMAN_STREAMS + i;
+ q = &gaudi->internal_qmans[internal_q_index];
+ qman_base_addr = (u64) q->pq_dma_addr;
+ gaudi_init_tpc_qman(hdev, tpc_offset, i,
+ qman_base_addr);
+
+ if (i == 3) {
+ /* Initializing lower CP for TPC QMAN */
+ gaudi_init_tpc_qman(hdev, tpc_offset, 4, 0);
+
+ /* Enable the QMAN and TPC channel */
+ WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset,
+ QMAN_TPC_ENABLE);
+ }
+ }
+
+ WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + tpc_id * tpc_delta,
+ so_base_hi);
+
+ tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
+
+ gaudi->hw_cap_initialized |= 1 << (HW_CAP_TPC_SHIFT + tpc_id);
+ }
+}
+
+static void gaudi_disable_pci_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
+ return;
+
+ WREG32(mmDMA0_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA1_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA5_QM_GLBL_CFG0, 0);
+}
+
+static void gaudi_disable_hbm_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
+ return;
+
+ WREG32(mmDMA2_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA3_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA4_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA6_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA7_QM_GLBL_CFG0, 0);
+}
+
+static void gaudi_disable_mme_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
+ return;
+
+ WREG32(mmMME2_QM_GLBL_CFG0, 0);
+ WREG32(mmMME0_QM_GLBL_CFG0, 0);
+}
+
+static void gaudi_disable_tpc_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 tpc_offset = 0;
+ int tpc_id;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
+ return;
+
+ for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
+ WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset, 0);
+ tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
+ }
+}
+
+static void gaudi_stop_pci_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
+ return;
+
+ /* Stop upper CPs of QMANs 0.0 to 1.3 and 5.0 to 5.3 */
+ WREG32(mmDMA0_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA1_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA5_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+}
+
+static void gaudi_stop_hbm_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
+ return;
+
+ /* Stop CPs of HBM DMA QMANs */
+
+ WREG32(mmDMA2_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA3_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA4_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA6_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA7_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+}
+
+static void gaudi_stop_mme_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
+ return;
+
+ /* Stop CPs of MME QMANs */
+ WREG32(mmMME2_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmMME0_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+}
+
+static void gaudi_stop_tpc_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
+ return;
+
+ WREG32(mmTPC0_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC1_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC2_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC3_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC4_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC5_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC6_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC7_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+}
+
+static void gaudi_pci_dma_stall(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
+ return;
+
+ WREG32(mmDMA0_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA1_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA5_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+}
+
+static void gaudi_hbm_dma_stall(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
+ return;
+
+ WREG32(mmDMA2_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA3_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA4_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA6_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA7_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+}
+
+static void gaudi_mme_stall(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
+ return;
+
+ /* WA for H3-1800 bug: do ACC and SBAB writes twice */
+ WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+}
+
+static void gaudi_tpc_stall(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
+ return;
+
+ WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+}
+
+static void gaudi_enable_clock_gating(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 qman_offset;
+ int i;
+
+ if (!hdev->clock_gating)
+ return;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE)
+ return;
+
+ /* In case we are during debug session, don't enable the clock gate
+ * as it may interfere
+ */
+ if (hdev->in_debug)
+ return;
+
+ for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
+ qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
+ QMAN_UPPER_CP_CGM_PWR_GATE_EN);
+ }
+
+ for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
+ qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
+ QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ }
+
+ WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmMME0_QM_CGM_CFG,
+ QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmMME2_QM_CGM_CFG,
+ QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+
+ for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
+ QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
+ QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+
+ qman_offset += TPC_QMAN_OFFSET;
+ }
+
+ gaudi->hw_cap_initialized |= HW_CAP_CLK_GATE;
+}
+
+static void gaudi_disable_clock_gating(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 qman_offset;
+ int i;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+ return;
+
+ for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
+ WREG32(mmDMA0_QM_CGM_CFG + qman_offset, 0);
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, 0);
+
+ qman_offset += (mmDMA1_QM_CGM_CFG - mmDMA0_QM_CGM_CFG);
+ }
+
+ WREG32(mmMME0_QM_CGM_CFG, 0);
+ WREG32(mmMME0_QM_CGM_CFG1, 0);
+ WREG32(mmMME2_QM_CGM_CFG, 0);
+ WREG32(mmMME2_QM_CGM_CFG1, 0);
+
+ for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ WREG32(mmTPC0_QM_CGM_CFG + qman_offset, 0);
+ WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, 0);
+
+ qman_offset += (mmTPC1_QM_CGM_CFG - mmTPC0_QM_CGM_CFG);
+ }
+
+ gaudi->hw_cap_initialized &= ~(HW_CAP_CLK_GATE);
+}
+
+static void gaudi_enable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+
+ /* Zero the lower/upper parts of the 64-bit counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
+
+ /* Enable the counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
+}
+
+static void gaudi_disable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+}
+
+static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
+{
+ u32 wait_timeout_ms, cpu_timeout_ms;
+
+ dev_info(hdev->dev,
+ "Halting compute engines and disabling interrupts\n");
+
+ if (hdev->pldm) {
+ wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
+ cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
+ } else {
+ wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
+ cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
+ }
+
+ if (hard_reset) {
+ /*
+ * I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GAUDI_EVENT_HALT_MACHINE);
+ msleep(cpu_timeout_ms);
+ }
+
+ gaudi_stop_mme_qmans(hdev);
+ gaudi_stop_tpc_qmans(hdev);
+ gaudi_stop_hbm_dma_qmans(hdev);
+ gaudi_stop_pci_dma_qmans(hdev);
+
+ gaudi_disable_clock_gating(hdev);
+
+ msleep(wait_timeout_ms);
+
+ gaudi_pci_dma_stall(hdev);
+ gaudi_hbm_dma_stall(hdev);
+ gaudi_tpc_stall(hdev);
+ gaudi_mme_stall(hdev);
+
+ msleep(wait_timeout_ms);
+
+ gaudi_disable_mme_qmans(hdev);
+ gaudi_disable_tpc_qmans(hdev);
+ gaudi_disable_hbm_dma_qmans(hdev);
+ gaudi_disable_pci_dma_qmans(hdev);
+
+ gaudi_disable_timestamp(hdev);
+
+ if (hard_reset)
+ gaudi_disable_msi(hdev);
+ else
+ gaudi_sync_irqs(hdev);
+}
+
+static int gaudi_mmu_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hop0_addr;
+ int rc, i;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MMU)
+ return 0;
+
+ hdev->dram_supports_virtual_memory = false;
+
+ for (i = 0 ; i < prop->max_asid ; i++) {
+ hop0_addr = prop->mmu_pgt_addr +
+ (i * prop->mmu_hop_table_size);
+
+ rc = gaudi_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to set hop0 addr for asid %d\n", i);
+ goto err;
+ }
+ }
+
+ /* init MMU cache manage page */
+ WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
+ WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
+ VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
+
+ WREG32(mmMMU_UP_MMU_ENABLE, 1);
+ WREG32(mmMMU_UP_SPI_MASK, 0xF);
+
+ WREG32(mmSTLB_HOP_CONFIGURATION,
+ hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
+
+ gaudi->hw_cap_initialized |= HW_CAP_MMU;
+
+ return 0;
+
+err:
+ return rc;
+}
+
+static int gaudi_load_firmware_to_device(struct hl_device *hdev)
+{
+ void __iomem *dst;
+
+ /* HBM scrambler must be initialized before pushing F/W to HBM */
+ gaudi_init_scrambler_hbm(hdev);
+
+ dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET;
+
+ return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst);
+}
+
+static int gaudi_load_boot_fit_to_device(struct hl_device *hdev)
+{
+ void __iomem *dst;
+
+ dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
+
+ return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst);
+}
+
+static void gaudi_read_device_fw_version(struct hl_device *hdev,
+ enum hl_fw_component fwc)
+{
+ const char *name;
+ u32 ver_off;
+ char *dest;
+
+ switch (fwc) {
+ case FW_COMP_UBOOT:
+ ver_off = RREG32(mmUBOOT_VER_OFFSET);
+ dest = hdev->asic_prop.uboot_ver;
+ name = "U-Boot";
+ break;
+ case FW_COMP_PREBOOT:
+ ver_off = RREG32(mmPREBOOT_VER_OFFSET);
+ dest = hdev->asic_prop.preboot_ver;
+ name = "Preboot";
+ break;
+ default:
+ dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
+ return;
+ }
+
+ ver_off &= ~((u32)SRAM_BASE_ADDR);
+
+ if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
+ memcpy_fromio(dest, hdev->pcie_bar[SRAM_BAR_ID] + ver_off,
+ VERSION_MAX_LEN);
+ } else {
+ dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
+ name, ver_off);
+ strcpy(dest, "unavailable");
+ }
+}
+
+static int gaudi_init_cpu(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int rc;
+
+ if (!hdev->cpu_enable)
+ return 0;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_CPU)
+ return 0;
+
+ /*
+ * The device CPU works with 40 bits addresses.
+ * This register sets the extension to 50 bits.
+ */
+ WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr);
+
+ rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
+ mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU,
+ mmCPU_CMD_STATUS_TO_HOST,
+ mmCPU_BOOT_ERR0,
+ !hdev->bmc_enable, GAUDI_CPU_TIMEOUT_USEC,
+ GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
+
+ if (rc)
+ return rc;
+
+ gaudi->hw_cap_initialized |= HW_CAP_CPU;
+
+ return 0;
+}
+
+static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_eq *eq;
+ u32 status;
+ struct hl_hw_queue *cpu_pq =
+ &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
+ int err;
+
+ if (!hdev->cpu_queues_enable)
+ return 0;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
+ return 0;
+
+ eq = &hdev->event_queue;
+
+ WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
+ WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
+
+ WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
+ WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
+
+ WREG32(mmCPU_IF_CQ_BASE_ADDR_LOW,
+ lower_32_bits(hdev->cpu_accessible_dma_address));
+ WREG32(mmCPU_IF_CQ_BASE_ADDR_HIGH,
+ upper_32_bits(hdev->cpu_accessible_dma_address));
+
+ WREG32(mmCPU_IF_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
+ WREG32(mmCPU_IF_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
+ WREG32(mmCPU_IF_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
+
+ /* Used for EQ CI */
+ WREG32(mmCPU_IF_EQ_RD_OFFS, 0);
+
+ WREG32(mmCPU_IF_PF_PQ_PI, 0);
+
+ if (gaudi->multi_msi_mode)
+ WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP);
+ else
+ WREG32(mmCPU_IF_QUEUE_INIT,
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_PI_UPDATE);
+
+ err = hl_poll_timeout(
+ hdev,
+ mmCPU_IF_QUEUE_INIT,
+ status,
+ (status == PQ_INIT_STATUS_READY_FOR_HOST),
+ 1000,
+ cpu_timeout);
+
+ if (err) {
+ dev_err(hdev->dev,
+ "Failed to communicate with ARM CPU (ArmCP timeout)\n");
+ return -EIO;
+ }
+
+ gaudi->hw_cap_initialized |= HW_CAP_CPU_Q;
+ return 0;
+}
+
+static void gaudi_pre_hw_init(struct hl_device *hdev)
+{
+ /* Perform read from the device to make sure device is up */
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ /*
+ * Let's mark in the H/W that we have reached this point. We check
+ * this value in the reset_before_init function to understand whether
+ * we need to reset the chip before doing H/W init. This register is
+ * cleared by the H/W upon H/W reset
+ */
+ WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
+
+ /* Set the access through PCI bars (Linux driver only) as secured */
+ WREG32(mmPCIE_WRAP_LBW_PROT_OVR, (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK |
+ PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK));
+
+ /* Perform read to flush the waiting writes to ensure configuration
+ * was set in the device
+ */
+ RREG32(mmPCIE_WRAP_LBW_PROT_OVR);
+
+ if (hdev->axi_drain) {
+ WREG32(mmPCIE_WRAP_LBW_DRAIN_CFG,
+ 1 << PCIE_WRAP_LBW_DRAIN_CFG_EN_SHIFT);
+ WREG32(mmPCIE_WRAP_HBW_DRAIN_CFG,
+ 1 << PCIE_WRAP_HBW_DRAIN_CFG_EN_SHIFT);
+
+ /* Perform read to flush the DRAIN cfg */
+ RREG32(mmPCIE_WRAP_HBW_DRAIN_CFG);
+ } else {
+ WREG32(mmPCIE_WRAP_LBW_DRAIN_CFG, 0);
+ WREG32(mmPCIE_WRAP_HBW_DRAIN_CFG, 0);
+
+ /* Perform read to flush the DRAIN cfg */
+ RREG32(mmPCIE_WRAP_HBW_DRAIN_CFG);
+ }
+
+ /* Configure the reset registers. Must be done as early as possible
+ * in case we fail during H/W initialization
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H,
+ (CFG_RST_H_DMA_MASK |
+ CFG_RST_H_MME_MASK |
+ CFG_RST_H_SM_MASK |
+ CFG_RST_H_TPC_MASK));
+
+ WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H,
+ (CFG_RST_H_HBM_MASK |
+ CFG_RST_H_TPC_MASK |
+ CFG_RST_H_NIC_MASK |
+ CFG_RST_H_SM_MASK |
+ CFG_RST_H_DMA_MASK |
+ CFG_RST_H_MME_MASK |
+ CFG_RST_H_CPU_MASK |
+ CFG_RST_H_MMU_MASK));
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L,
+ (CFG_RST_L_IF_MASK |
+ CFG_RST_L_PSOC_MASK |
+ CFG_RST_L_TPC_MASK));
+}
+
+static int gaudi_hw_init(struct hl_device *hdev)
+{
+ int rc;
+
+ dev_info(hdev->dev, "Starting initialization of H/W\n");
+
+ gaudi_pre_hw_init(hdev);
+
+ gaudi_init_pci_dma_qmans(hdev);
+
+ gaudi_init_hbm_dma_qmans(hdev);
+
+ /*
+ * Before pushing u-boot/linux to device, need to set the hbm bar to
+ * base address of dram
+ */
+ if (gaudi_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
+ dev_err(hdev->dev,
+ "failed to map HBM bar to DRAM base address\n");
+ return -EIO;
+ }
+
+ rc = gaudi_init_cpu(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU\n");
+ return rc;
+ }
+
+ /* SRAM scrambler must be initialized after CPU is running from HBM */
+ gaudi_init_scrambler_sram(hdev);
+
+ /* This is here just in case we are working without CPU */
+ gaudi_init_scrambler_hbm(hdev);
+
+ gaudi_init_golden_registers(hdev);
+
+ rc = gaudi_mmu_init(hdev);
+ if (rc)
+ return rc;
+
+ gaudi_init_security(hdev);
+
+ gaudi_init_mme_qmans(hdev);
+
+ gaudi_init_tpc_qmans(hdev);
+
+ gaudi_enable_clock_gating(hdev);
+
+ gaudi_enable_timestamp(hdev);
+
+ /* MSI must be enabled before CPU queues are initialized */
+ rc = gaudi_enable_msi(hdev);
+ if (rc)
+ goto disable_queues;
+
+ /* must be called after MSI was enabled */
+ rc = gaudi_init_cpu_queues(hdev, GAUDI_CPU_TIMEOUT_USEC);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
+ rc);
+ goto disable_msi;
+ }
+
+ /* Perform read from the device to flush all configuration */
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ return 0;
+
+disable_msi:
+ gaudi_disable_msi(hdev);
+disable_queues:
+ gaudi_disable_mme_qmans(hdev);
+ gaudi_disable_pci_dma_qmans(hdev);
+
+ return rc;
+}
+
+static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 status, reset_timeout_ms, boot_strap = 0;
+
+ if (hdev->pldm) {
+ if (hard_reset)
+ reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
+ else
+ reset_timeout_ms = GAUDI_PLDM_SRESET_TIMEOUT_MSEC;
+ } else {
+ reset_timeout_ms = GAUDI_RESET_TIMEOUT_MSEC;
+ }
+
+ if (hard_reset) {
+ /* Tell ASIC not to re-initialize PCIe */
+ WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
+
+ boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
+ /* H/W bug WA:
+ * rdata[31:0] = strap_read_val;
+ * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
+ */
+ boot_strap = (((boot_strap & 0x7FE00000) << 1) |
+ (boot_strap & 0x001FFFFF));
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
+
+ /* Restart BTL/BLR upon hard-reset */
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ /* Don't restart BTL/BLR upon soft-reset */
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 0);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST,
+ 1 << PSOC_GLOBAL_CONF_SOFT_RST_IND_SHIFT);
+ dev_info(hdev->dev,
+ "Issued SOFT reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ }
+
+ /*
+ * After hard reset, we can't poll the BTM_FSM register because the PSOC
+ * itself is in reset. Need to wait until the reset is deasserted
+ */
+ msleep(reset_timeout_ms);
+
+ status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
+ if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
+ dev_err(hdev->dev,
+ "Timeout while waiting for device to reset 0x%x\n",
+ status);
+
+ if (!hard_reset) {
+ gaudi->hw_cap_initialized &= ~(HW_CAP_PCI_DMA | HW_CAP_MME |
+ HW_CAP_TPC_MASK |
+ HW_CAP_HBM_DMA);
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GAUDI_EVENT_SOFT_RESET);
+ return;
+ }
+
+ /* We continue here only for hard-reset */
+
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap);
+
+ gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
+ HW_CAP_HBM | HW_CAP_PCI_DMA |
+ HW_CAP_MME | HW_CAP_TPC_MASK |
+ HW_CAP_HBM_DMA | HW_CAP_PLL |
+ HW_CAP_MMU |
+ HW_CAP_SRAM_SCRAMBLER |
+ HW_CAP_HBM_SCRAMBLER);
+ memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
+}
+
+static int gaudi_suspend(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ if (rc)
+ dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
+
+ return rc;
+}
+
+static int gaudi_resume(struct hl_device *hdev)
+{
+ return gaudi_init_iatu(hdev);
+}
+
+static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+ u64 kaddress, phys_addr_t paddress, u32 size)
+{
+ int rc;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE;
+
+ rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ return rc;
+}
+
+static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 db_reg_offset, db_value, dma_qm_offset, q_off;
+ int dma_id;
+ bool invalid_queue = false;
+
+ switch (hw_queue_id) {
+ case GAUDI_QUEUE_ID_DMA_0_0...GAUDI_QUEUE_ID_DMA_0_3:
+ dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_1_0...GAUDI_QUEUE_ID_DMA_1_3:
+ dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_2_0...GAUDI_QUEUE_ID_DMA_2_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_3_0...GAUDI_QUEUE_ID_DMA_3_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_2];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_4_0...GAUDI_QUEUE_ID_DMA_4_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_3];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3:
+ dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_3];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_CPU_PQ:
+ if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
+ db_reg_offset = mmCPU_IF_PF_PQ_PI;
+ else
+ invalid_queue = true;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_0_0:
+ db_reg_offset = mmMME2_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_0_1:
+ db_reg_offset = mmMME2_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_0_2:
+ db_reg_offset = mmMME2_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_0_3:
+ db_reg_offset = mmMME2_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_1_0:
+ db_reg_offset = mmMME0_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_1_1:
+ db_reg_offset = mmMME0_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_1_2:
+ db_reg_offset = mmMME0_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_1_3:
+ db_reg_offset = mmMME0_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_0_0:
+ db_reg_offset = mmTPC0_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_0_1:
+ db_reg_offset = mmTPC0_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_0_2:
+ db_reg_offset = mmTPC0_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_0_3:
+ db_reg_offset = mmTPC0_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_1_0:
+ db_reg_offset = mmTPC1_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_1_1:
+ db_reg_offset = mmTPC1_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_1_2:
+ db_reg_offset = mmTPC1_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_1_3:
+ db_reg_offset = mmTPC1_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_2_0:
+ db_reg_offset = mmTPC2_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_2_1:
+ db_reg_offset = mmTPC2_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_2_2:
+ db_reg_offset = mmTPC2_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_2_3:
+ db_reg_offset = mmTPC2_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_3_0:
+ db_reg_offset = mmTPC3_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_3_1:
+ db_reg_offset = mmTPC3_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_3_2:
+ db_reg_offset = mmTPC3_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_3_3:
+ db_reg_offset = mmTPC3_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_4_0:
+ db_reg_offset = mmTPC4_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_4_1:
+ db_reg_offset = mmTPC4_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_4_2:
+ db_reg_offset = mmTPC4_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_4_3:
+ db_reg_offset = mmTPC4_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_5_0:
+ db_reg_offset = mmTPC5_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_5_1:
+ db_reg_offset = mmTPC5_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_5_2:
+ db_reg_offset = mmTPC5_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_5_3:
+ db_reg_offset = mmTPC5_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_6_0:
+ db_reg_offset = mmTPC6_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_6_1:
+ db_reg_offset = mmTPC6_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_6_2:
+ db_reg_offset = mmTPC6_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_6_3:
+ db_reg_offset = mmTPC6_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_7_0:
+ db_reg_offset = mmTPC7_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_7_1:
+ db_reg_offset = mmTPC7_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_7_2:
+ db_reg_offset = mmTPC7_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_7_3:
+ db_reg_offset = mmTPC7_QM_PQ_PI_3;
+ break;
+
+ default:
+ invalid_queue = true;
+ }
+
+ if (invalid_queue) {
+ /* Should never get here */
+ dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
+ hw_queue_id);
+ return;
+ }
+
+ db_value = pi;
+
+ /* ring the doorbell */
+ WREG32(db_reg_offset, db_value);
+
+ if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ)
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GAUDI_EVENT_PI_UPDATE);
+}
+
+static void gaudi_pqe_write(struct hl_device *hdev, __le64 *pqe,
+ struct hl_bd *bd)
+{
+ __le64 *pbd = (__le64 *) bd;
+
+ /* The QMANs are on the host memory so a simple copy suffice */
+ pqe[0] = pbd[0];
+ pqe[1] = pbd[1];
+}
+
+static void *gaudi_dma_alloc_coherent(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
+{
+ void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
+ dma_handle, flags);
+
+ /* Shift to the device's base physical address of host memory */
+ if (kernel_addr)
+ *dma_handle += HOST_PHYS_BASE;
+
+ return kernel_addr;
+}
+
+static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ /* Cancel the device's base physical address of host memory */
+ dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
+
+ dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
+}
+
+static void *gaudi_get_int_queue_base(struct hl_device *hdev,
+ u32 queue_id, dma_addr_t *dma_handle,
+ u16 *queue_len)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+
+ if (queue_id >= GAUDI_QUEUE_ID_SIZE ||
+ gaudi_queue_type[queue_id] != QUEUE_TYPE_INT) {
+ dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
+ return NULL;
+ }
+
+ q = &gaudi->internal_qmans[queue_id];
+ *dma_handle = q->pq_dma_addr;
+ *queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE;
+
+ return q->pq_kernel_addr;
+}
+
+static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
+ u16 len, u32 timeout, long *result)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) {
+ if (result)
+ *result = 0;
+ return 0;
+ }
+
+ return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
+ timeout, result);
+}
+
+static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
+{
+ struct packet_msg_prot *fence_pkt;
+ dma_addr_t pkt_dma_addr;
+ u32 fence_val, tmp, timeout_usec;
+ dma_addr_t fence_dma_addr;
+ u32 *fence_ptr;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI_PLDM_TEST_QUEUE_WAIT_USEC;
+ else
+ timeout_usec = GAUDI_TEST_QUEUE_WAIT_USEC;
+
+ fence_val = GAUDI_QMAN0_FENCE_VAL;
+
+ fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ &fence_dma_addr);
+ if (!fence_ptr) {
+ dev_err(hdev->dev,
+ "Failed to allocate memory for queue testing\n");
+ return -ENOMEM;
+ }
+
+ *fence_ptr = 0;
+
+ fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
+ sizeof(struct packet_msg_prot),
+ GFP_KERNEL, &pkt_dma_addr);
+ if (!fence_pkt) {
+ dev_err(hdev->dev,
+ "Failed to allocate packet for queue testing\n");
+ rc = -ENOMEM;
+ goto free_fence_ptr;
+ }
+
+ tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_CTL_EB_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT);
+ fence_pkt->ctl = cpu_to_le32(tmp);
+ fence_pkt->value = cpu_to_le32(fence_val);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr);
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
+ sizeof(struct packet_msg_prot),
+ pkt_dma_addr);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send fence packet\n");
+ goto free_pkt;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
+ 1000, timeout_usec, true);
+
+ hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
+
+ if (rc == -ETIMEDOUT) {
+ dev_err(hdev->dev,
+ "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
+ hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
+ rc = -EIO;
+ }
+
+free_pkt:
+ hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
+ pkt_dma_addr);
+free_fence_ptr:
+ hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
+ fence_dma_addr);
+ return rc;
+}
+
+static int gaudi_test_cpu_queue(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ /*
+ * check capability here as send_cpu_message() won't update the result
+ * value if no capability
+ */
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_test_cpu_queue(hdev);
+}
+
+static int gaudi_test_queues(struct hl_device *hdev)
+{
+ int i, rc, ret_val = 0;
+
+ for (i = 0 ; i < HL_MAX_QUEUES ; i++) {
+ if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) {
+ rc = gaudi_test_queue(hdev, i);
+ if (rc)
+ ret_val = -EINVAL;
+ }
+ }
+
+ rc = gaudi_test_cpu_queue(hdev);
+ if (rc)
+ ret_val = -EINVAL;
+
+ return ret_val;
+}
+
+static void *gaudi_dma_pool_zalloc(struct hl_device *hdev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma_handle)
+{
+ void *kernel_addr;
+
+ if (size > GAUDI_DMA_POOL_BLK_SIZE)
+ return NULL;
+
+ kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
+
+ /* Shift to the device's base physical address of host memory */
+ if (kernel_addr)
+ *dma_handle += HOST_PHYS_BASE;
+
+ return kernel_addr;
+}
+
+static void gaudi_dma_pool_free(struct hl_device *hdev, void *vaddr,
+ dma_addr_t dma_addr)
+{
+ /* Cancel the device's base physical address of host memory */
+ dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
+
+ dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
+}
+
+static void *gaudi_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
+ size_t size, dma_addr_t *dma_handle)
+{
+ return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+}
+
+static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev,
+ size_t size, void *vaddr)
+{
+ hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
+}
+
+static int gaudi_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
+ return -ENOMEM;
+
+ /* Shift to the device's base physical address of host memory */
+ for_each_sg(sgl, sg, nents, i)
+ sg->dma_address += HOST_PHYS_BASE;
+
+ return 0;
+}
+
+static void gaudi_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /* Cancel the device's base physical address of host memory */
+ for_each_sg(sgl, sg, nents, i)
+ sg->dma_address -= HOST_PHYS_BASE;
+
+ dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
+}
+
+static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
+ struct sg_table *sgt)
+{
+ struct scatterlist *sg, *sg_next_iter;
+ u32 count, dma_desc_cnt;
+ u64 len, len_next;
+ dma_addr_t addr, addr_next;
+
+ dma_desc_cnt = 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+
+ len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+
+ if (len == 0)
+ break;
+
+ while ((count + 1) < sgt->nents) {
+ sg_next_iter = sg_next(sg);
+ len_next = sg_dma_len(sg_next_iter);
+ addr_next = sg_dma_address(sg_next_iter);
+
+ if (len_next == 0)
+ break;
+
+ if ((addr + len == addr_next) &&
+ (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
+ len += len_next;
+ count++;
+ sg = sg_next_iter;
+ } else {
+ break;
+ }
+ }
+
+ dma_desc_cnt++;
+ }
+
+ return dma_desc_cnt * sizeof(struct packet_lin_dma);
+}
+
+static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ u64 addr, enum dma_data_direction dir)
+{
+ struct hl_userptr *userptr;
+ int rc;
+
+ if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
+ parser->job_userptr_list, &userptr))
+ goto already_pinned;
+
+ userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
+ if (!userptr)
+ return -ENOMEM;
+
+ rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
+ userptr);
+ if (rc)
+ goto free_userptr;
+
+ list_add_tail(&userptr->job_node, parser->job_userptr_list);
+
+ rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents, dir);
+ if (rc) {
+ dev_err(hdev->dev, "failed to map sgt with DMA region\n");
+ goto unpin_memory;
+ }
+
+ userptr->dma_mapped = true;
+ userptr->dir = dir;
+
+already_pinned:
+ parser->patched_cb_size +=
+ gaudi_get_dma_desc_list_size(hdev, userptr->sgt);
+
+ return 0;
+
+unpin_memory:
+ hl_unpin_host_memory(hdev, userptr);
+free_userptr:
+ kfree(userptr);
+ return rc;
+}
+
+static int gaudi_validate_dma_pkt_host(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ bool src_in_host)
+{
+ enum dma_data_direction dir;
+ bool skip_host_mem_pin = false, user_memset;
+ u64 addr;
+ int rc = 0;
+
+ user_memset = (le32_to_cpu(user_dma_pkt->ctl) &
+ GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
+ GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
+
+ if (src_in_host) {
+ if (user_memset)
+ skip_host_mem_pin = true;
+
+ dev_dbg(hdev->dev, "DMA direction is HOST --> DEVICE\n");
+ dir = DMA_TO_DEVICE;
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ } else {
+ dev_dbg(hdev->dev, "DMA direction is DEVICE --> HOST\n");
+ dir = DMA_FROM_DEVICE;
+ addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
+ GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
+ GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
+ }
+
+ if (skip_host_mem_pin)
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+ else
+ rc = gaudi_pin_memory_before_cs(hdev, parser, user_dma_pkt,
+ addr, dir);
+
+ return rc;
+}
+
+static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ bool src_in_host = false;
+ u64 dst_addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
+ GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
+ GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
+
+ dev_dbg(hdev->dev, "DMA packet details:\n");
+ dev_dbg(hdev->dev, "source == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->src_addr));
+ dev_dbg(hdev->dev, "destination == 0x%llx\n", dst_addr);
+ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
+
+ /*
+ * Special handling for DMA with size 0. Bypass all validations
+ * because no transactions will be done except for WR_COMP, which
+ * is not a security issue
+ */
+ if (!le32_to_cpu(user_dma_pkt->tsize)) {
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+ return 0;
+ }
+
+ if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
+ src_in_host = true;
+
+ return gaudi_validate_dma_pkt_host(hdev, parser, user_dma_pkt,
+ src_in_host);
+}
+
+static int gaudi_validate_cb(struct hl_device *hdev,
+ struct hl_cs_parser *parser, bool is_mmu)
+{
+ u32 cb_parsed_length = 0;
+ int rc = 0;
+
+ parser->patched_cb_size = 0;
+
+ /* cb_user_size is more than 0 so loop will always be executed */
+ while (cb_parsed_length < parser->user_cb_size) {
+ enum packet_id pkt_id;
+ u16 pkt_size;
+ struct gaudi_packet *user_pkt;
+
+ user_pkt = (struct gaudi_packet *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+ pkt_size = gaudi_packet_sizes[pkt_id];
+ cb_parsed_length += pkt_size;
+ if (cb_parsed_length > parser->user_cb_size) {
+ dev_err(hdev->dev,
+ "packet 0x%x is out of CB boundary\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ switch (pkt_id) {
+ case PACKET_MSG_PROT:
+ dev_err(hdev->dev,
+ "User not allowed to use MSG_PROT\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_CP_DMA:
+ dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_STOP:
+ dev_err(hdev->dev, "User not allowed to use STOP\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_LIN_DMA:
+ parser->contains_dma_pkt = true;
+ if (is_mmu)
+ parser->patched_cb_size += pkt_size;
+ else
+ rc = gaudi_validate_dma_pkt_no_mmu(hdev, parser,
+ (struct packet_lin_dma *) user_pkt);
+ break;
+
+ case PACKET_WREG_32:
+ case PACKET_WREG_BULK:
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_REPEAT:
+ case PACKET_FENCE:
+ case PACKET_NOP:
+ case PACKET_ARB_POINT:
+ case PACKET_LOAD_AND_EXE:
+ parser->patched_cb_size += pkt_size;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid packet header 0x%x\n",
+ pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ /*
+ * The new CB should have space at the end for two MSG_PROT packets:
+ * 1. A packet that will act as a completion packet
+ * 2. A packet that will generate MSI-X interrupt
+ */
+ parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
+
+ return rc;
+}
+
+static int gaudi_patch_dma_packet(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ struct packet_lin_dma *new_dma_pkt,
+ u32 *new_dma_pkt_size)
+{
+ struct hl_userptr *userptr;
+ struct scatterlist *sg, *sg_next_iter;
+ u32 count, dma_desc_cnt, user_wrcomp_en_mask, ctl;
+ u64 len, len_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ u64 device_memory_addr, addr;
+ enum dma_data_direction dir;
+ struct sg_table *sgt;
+ bool src_in_host = false;
+ bool skip_host_mem_pin = false;
+ bool user_memset;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+
+ if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
+ src_in_host = true;
+
+ user_memset = (ctl & GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
+ GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
+
+ if (src_in_host) {
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ dir = DMA_TO_DEVICE;
+ if (user_memset)
+ skip_host_mem_pin = true;
+ } else {
+ addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ dir = DMA_FROM_DEVICE;
+ }
+
+ if ((!skip_host_mem_pin) &&
+ (!hl_userptr_is_pinned(hdev, addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ parser->job_userptr_list, &userptr))) {
+ dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
+ addr, user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+
+ if ((user_memset) && (dir == DMA_TO_DEVICE)) {
+ memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
+ *new_dma_pkt_size = sizeof(*user_dma_pkt);
+ return 0;
+ }
+
+ user_wrcomp_en_mask = ctl & GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
+
+ sgt = userptr->sgt;
+ dma_desc_cnt = 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ len = sg_dma_len(sg);
+ dma_addr = sg_dma_address(sg);
+
+ if (len == 0)
+ break;
+
+ while ((count + 1) < sgt->nents) {
+ sg_next_iter = sg_next(sg);
+ len_next = sg_dma_len(sg_next_iter);
+ dma_addr_next = sg_dma_address(sg_next_iter);
+
+ if (len_next == 0)
+ break;
+
+ if ((dma_addr + len == dma_addr_next) &&
+ (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
+ len += len_next;
+ count++;
+ sg = sg_next_iter;
+ } else {
+ break;
+ }
+ }
+
+ new_dma_pkt->ctl = user_dma_pkt->ctl;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+ if (likely(dma_desc_cnt))
+ ctl &= ~GAUDI_PKT_CTL_EB_MASK;
+ ctl &= ~GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
+ new_dma_pkt->ctl = cpu_to_le32(ctl);
+ new_dma_pkt->tsize = cpu_to_le32(len);
+
+ if (dir == DMA_TO_DEVICE) {
+ new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
+ new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
+ } else {
+ new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
+ new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
+ }
+
+ if (!user_memset)
+ device_memory_addr += len;
+ dma_desc_cnt++;
+ new_dma_pkt++;
+ }
+
+ if (!dma_desc_cnt) {
+ dev_err(hdev->dev,
+ "Error of 0 SG entries when patching DMA packet\n");
+ return -EFAULT;
+ }
+
+ /* Fix the last dma packet - wrcomp must be as user set it */
+ new_dma_pkt--;
+ new_dma_pkt->ctl |= cpu_to_le32(user_wrcomp_en_mask);
+
+ *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
+
+ return 0;
+}
+
+static int gaudi_patch_cb(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u32 cb_parsed_length = 0;
+ u32 cb_patched_cur_length = 0;
+ int rc = 0;
+
+ /* cb_user_size is more than 0 so loop will always be executed */
+ while (cb_parsed_length < parser->user_cb_size) {
+ enum packet_id pkt_id;
+ u16 pkt_size;
+ u32 new_pkt_size = 0;
+ struct gaudi_packet *user_pkt, *kernel_pkt;
+
+ user_pkt = (struct gaudi_packet *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+ kernel_pkt = (struct gaudi_packet *) (uintptr_t)
+ (parser->patched_cb->kernel_address +
+ cb_patched_cur_length);
+
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+ pkt_size = gaudi_packet_sizes[pkt_id];
+ cb_parsed_length += pkt_size;
+ if (cb_parsed_length > parser->user_cb_size) {
+ dev_err(hdev->dev,
+ "packet 0x%x is out of CB boundary\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ switch (pkt_id) {
+ case PACKET_LIN_DMA:
+ rc = gaudi_patch_dma_packet(hdev, parser,
+ (struct packet_lin_dma *) user_pkt,
+ (struct packet_lin_dma *) kernel_pkt,
+ &new_pkt_size);
+ cb_patched_cur_length += new_pkt_size;
+ break;
+
+ case PACKET_MSG_PROT:
+ dev_err(hdev->dev,
+ "User not allowed to use MSG_PROT\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_CP_DMA:
+ dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_STOP:
+ dev_err(hdev->dev, "User not allowed to use STOP\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_WREG_32:
+ case PACKET_WREG_BULK:
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_REPEAT:
+ case PACKET_FENCE:
+ case PACKET_NOP:
+ case PACKET_ARB_POINT:
+ case PACKET_LOAD_AND_EXE:
+ memcpy(kernel_pkt, user_pkt, pkt_size);
+ cb_patched_cur_length += pkt_size;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid packet header 0x%x\n",
+ pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int gaudi_parse_cb_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u64 patched_cb_handle;
+ u32 patched_cb_size;
+ struct hl_cb *user_cb;
+ int rc;
+
+ /*
+ * The new CB should have space at the end for two MSG_PROT pkt:
+ * 1. A packet that will act as a completion packet
+ * 2. A packet that will generate MSI interrupt
+ */
+ parser->patched_cb_size = parser->user_cb_size +
+ sizeof(struct packet_msg_prot) * 2;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
+ parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to allocate patched CB for DMA CS %d\n",
+ rc);
+ return rc;
+ }
+
+ patched_cb_handle >>= PAGE_SHIFT;
+ parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
+ (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
+ if (!parser->patched_cb) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * The check that parser->user_cb_size <= parser->user_cb->size was done
+ * in validate_queue_index().
+ */
+ memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
+ (void *) (uintptr_t) parser->user_cb->kernel_address,
+ parser->user_cb_size);
+
+ patched_cb_size = parser->patched_cb_size;
+
+ /* Validate patched CB instead of user CB */
+ user_cb = parser->user_cb;
+ parser->user_cb = parser->patched_cb;
+ rc = gaudi_validate_cb(hdev, parser, true);
+ parser->user_cb = user_cb;
+
+ if (rc) {
+ hl_cb_put(parser->patched_cb);
+ goto out;
+ }
+
+ if (patched_cb_size != parser->patched_cb_size) {
+ dev_err(hdev->dev, "user CB size mismatch\n");
+ hl_cb_put(parser->patched_cb);
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ /*
+ * Always call cb destroy here because we still have 1 reference
+ * to it by calling cb_get earlier. After the job will be completed,
+ * cb_put will release it, but here we want to remove it from the
+ * idr
+ */
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
+ patched_cb_handle << PAGE_SHIFT);
+
+ return rc;
+}
+
+static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u64 patched_cb_handle;
+ int rc;
+
+ rc = gaudi_validate_cb(hdev, parser, false);
+
+ if (rc)
+ goto free_userptr;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
+ parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to allocate patched CB for DMA CS %d\n", rc);
+ goto free_userptr;
+ }
+
+ patched_cb_handle >>= PAGE_SHIFT;
+ parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
+ (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
+ if (!parser->patched_cb) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = gaudi_patch_cb(hdev, parser);
+
+ if (rc)
+ hl_cb_put(parser->patched_cb);
+
+out:
+ /*
+ * Always call cb destroy here because we still have 1 reference
+ * to it by calling cb_get earlier. After the job will be completed,
+ * cb_put will release it, but here we want to remove it from the
+ * idr
+ */
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
+ patched_cb_handle << PAGE_SHIFT);
+
+free_userptr:
+ if (rc)
+ hl_userptr_delete_list(hdev, parser->job_userptr_list);
+ return rc;
+}
+
+static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
+
+ /* For internal queue jobs just check if CB address is valid */
+ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->sram_user_base_address,
+ asic_prop->sram_end_address))
+ return 0;
+
+ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->dram_user_base_address,
+ asic_prop->dram_end_address))
+ return 0;
+
+ /* PMMU and HPMMU addresses are equal, check only one of them */
+ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->pmmu.start_addr,
+ asic_prop->pmmu.end_addr))
+ return 0;
+
+ dev_err(hdev->dev,
+ "CB address 0x%px + 0x%x for internal QMAN is not valid\n",
+ parser->user_cb, parser->user_cb_size);
+
+ return -EFAULT;
+}
+
+static int gaudi_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (parser->queue_type == QUEUE_TYPE_INT)
+ return gaudi_parse_cb_no_ext_queue(hdev, parser);
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MMU)
+ return gaudi_parse_cb_mmu(hdev, parser);
+ else
+ return gaudi_parse_cb_no_mmu(hdev, parser);
+}
+
+static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
+ u64 kernel_address, u32 len,
+ u64 cq_addr, u32 cq_val, u32 msi_vec,
+ bool eb)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct packet_msg_prot *cq_pkt;
+ u32 tmp;
+
+ cq_pkt = (struct packet_msg_prot *) (uintptr_t)
+ (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
+
+ tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT);
+
+ if (eb)
+ tmp |= (1 << GAUDI_PKT_CTL_EB_SHIFT);
+
+ cq_pkt->ctl = cpu_to_le32(tmp);
+ cq_pkt->value = cpu_to_le32(cq_val);
+ cq_pkt->addr = cpu_to_le64(cq_addr);
+
+ cq_pkt++;
+
+ tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT);
+ cq_pkt->ctl = cpu_to_le32(tmp);
+ cq_pkt->value = cpu_to_le32(1);
+
+ if (!gaudi->multi_msi_mode)
+ msi_vec = 0;
+
+ cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4);
+}
+
+static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
+{
+ WREG32(mmCPU_IF_EQ_RD_OFFS, val);
+}
+
+static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
+ u32 size, u64 val)
+{
+ struct packet_lin_dma *lin_dma_pkt;
+ struct hl_cs_job *job;
+ u32 cb_size, ctl;
+ struct hl_cb *cb;
+ int rc;
+
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (!cb)
+ return -EFAULT;
+
+ lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
+ memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
+ cb_size = sizeof(*lin_dma_pkt);
+
+ ctl = ((PACKET_LIN_DMA << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
+ (1 << GAUDI_PKT_LIN_DMA_CTL_LIN_SHIFT) |
+ (1 << GAUDI_PKT_CTL_RB_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT));
+ lin_dma_pkt->ctl = cpu_to_le32(ctl);
+ lin_dma_pkt->src_addr = cpu_to_le64(val);
+ lin_dma_pkt->dst_addr |= cpu_to_le64(addr);
+ lin_dma_pkt->tsize = cpu_to_le32(size);
+
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto release_cb;
+ }
+
+ job->id = 0;
+ job->user_cb = cb;
+ job->user_cb->cs_cnt++;
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = gaudi_send_job_on_qman0(hdev, job);
+
+ hl_debugfs_remove_job(hdev, job);
+ kfree(job);
+ cb->cs_cnt--;
+
+release_cb:
+ hl_cb_put(cb);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ return rc;
+}
+
+static void gaudi_restore_sm_registers(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0 ; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4) {
+ WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ }
+
+ for (i = 0 ; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4) {
+ WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ }
+
+ i = GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4;
+
+ for (; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4)
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+
+ i = GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4;
+
+ for (; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4)
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+}
+
+static void gaudi_restore_dma_registers(struct hl_device *hdev)
+{
+ u32 sob_delta = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 -
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
+ int i;
+
+ for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
+ u64 sob_addr = CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ (i * sob_delta);
+ u32 dma_offset = i * DMA_CORE_OFFSET;
+
+ WREG32(mmDMA0_CORE_WR_COMP_ADDR_LO + dma_offset,
+ lower_32_bits(sob_addr));
+ WREG32(mmDMA0_CORE_WR_COMP_ADDR_HI + dma_offset,
+ upper_32_bits(sob_addr));
+ WREG32(mmDMA0_CORE_WR_COMP_WDATA + dma_offset, 0x80000001);
+
+ /* For DMAs 2-7, need to restore WR_AWUSER_31_11 as it can be
+ * modified by the user for SRAM reduction
+ */
+ if (i > 1)
+ WREG32(mmDMA0_CORE_WR_AWUSER_31_11 + dma_offset,
+ 0x00000001);
+ }
+}
+
+static void gaudi_restore_qm_registers(struct hl_device *hdev)
+{
+ u32 qman_offset;
+ int i;
+
+ for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
+ qman_offset = i * DMA_QMAN_OFFSET;
+ WREG32(mmDMA0_QM_ARB_CFG_0 + qman_offset, 0);
+ }
+
+ for (i = 0 ; i < MME_NUMBER_OF_MASTER_ENGINES ; i++) {
+ qman_offset = i * (mmMME2_QM_BASE - mmMME0_QM_BASE);
+ WREG32(mmMME0_QM_ARB_CFG_0 + qman_offset, 0);
+ }
+
+ for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ qman_offset = i * TPC_QMAN_OFFSET;
+ WREG32(mmTPC0_QM_ARB_CFG_0 + qman_offset, 0);
+ }
+}
+
+static void gaudi_restore_user_registers(struct hl_device *hdev)
+{
+ gaudi_restore_sm_registers(hdev);
+ gaudi_restore_dma_registers(hdev);
+ gaudi_restore_qm_registers(hdev);
+}
+
+static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 addr = prop->sram_user_base_address;
+ u32 size = hdev->pldm ? 0x10000 :
+ (prop->sram_size - SRAM_USER_BASE_OFFSET);
+ u64 val = 0x7777777777777777ull;
+ int rc;
+
+ rc = gaudi_memset_device_memory(hdev, addr, size, val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
+ return rc;
+ }
+
+ gaudi_mmu_prepare(hdev, asid);
+
+ gaudi_restore_user_registers(hdev);
+
+ return 0;
+}
+
+static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 addr = prop->mmu_pgt_addr;
+ u32 size = prop->mmu_pgt_size + MMU_CACHE_MNG_SIZE;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+ return 0;
+
+ return gaudi_memset_device_memory(hdev, addr, size, 0);
+}
+
+static void gaudi_restore_phase_topology(struct hl_device *hdev)
+{
+
+}
+
+static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hbm_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't read register - clock gating is enabled!\n");
+ rc = -EFAULT;
+ } else {
+ *val = RREG32(addr - CFG_BASE);
+ }
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
+ *val = readl(hdev->pcie_bar[SRAM_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
+ if (hbm_bar_addr != U64_MAX) {
+ *val = readl(hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - bar_base_addr));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
+ hbm_bar_addr);
+ }
+ if (hbm_bar_addr == U64_MAX)
+ rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hbm_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't write register - clock gating is enabled!\n");
+ rc = -EFAULT;
+ } else {
+ WREG32(addr - CFG_BASE, val);
+ }
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
+ writel(val, hdev->pcie_bar[SRAM_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
+ if (hbm_bar_addr != U64_MAX) {
+ writel(val, hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - bar_base_addr));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
+ hbm_bar_addr);
+ }
+ if (hbm_bar_addr == U64_MAX)
+ rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hbm_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't read register - clock gating is enabled!\n");
+ rc = -EFAULT;
+ } else {
+ u32 val_l = RREG32(addr - CFG_BASE);
+ u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
+
+ *val = (((u64) val_h) << 32) | val_l;
+ }
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
+ *val = readq(hdev->pcie_bar[SRAM_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
+ if (hbm_bar_addr != U64_MAX) {
+ *val = readq(hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - bar_base_addr));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
+ hbm_bar_addr);
+ }
+ if (hbm_bar_addr == U64_MAX)
+ rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hbm_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't write register - clock gating is enabled!\n");
+ rc = -EFAULT;
+ } else {
+ WREG32(addr - CFG_BASE, lower_32_bits(val));
+ WREG32(addr + sizeof(u32) - CFG_BASE,
+ upper_32_bits(val));
+ }
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
+ writeq(val, hdev->pcie_bar[SRAM_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
+ if (hbm_bar_addr != U64_MAX) {
+ writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - bar_base_addr));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
+ hbm_bar_addr);
+ }
+ if (hbm_bar_addr == U64_MAX)
+ rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (hdev->hard_reset_pending)
+ return U64_MAX;
+
+ return readq(hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - gaudi->hbm_bar_cur_addr));
+}
+
+static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (hdev->hard_reset_pending)
+ return;
+
+ writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - gaudi->hbm_bar_cur_addr));
+}
+
+static void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
+{
+ /* mask to zero the MMBP and ASID bits */
+ WREG32_AND(reg, ~0x7FF);
+ WREG32_OR(reg, asid);
+}
+
+static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) {
+ WARN(1, "asid %u is too big\n", asid);
+ return;
+ }
+
+ mutex_lock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_CORE_NON_SECURE_PROPS, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_ACC_WBC, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME1_ACC_WBC, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
+
+ hdev->asic_funcs->enable_clock_gating(hdev);
+
+ mutex_unlock(&gaudi->clk_gate_mutex);
+}
+
+static int gaudi_send_job_on_qman0(struct hl_device *hdev,
+ struct hl_cs_job *job)
+{
+ struct packet_msg_prot *fence_pkt;
+ u32 *fence_ptr;
+ dma_addr_t fence_dma_addr;
+ struct hl_cb *cb;
+ u32 tmp, timeout, dma_offset;
+ int rc;
+
+ if (hdev->pldm)
+ timeout = GAUDI_PLDM_QMAN0_TIMEOUT_USEC;
+ else
+ timeout = HL_DEVICE_TIMEOUT_USEC;
+
+ if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't send driver job on QMAN0 because the device is not idle\n");
+ return -EBUSY;
+ }
+
+ fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ &fence_dma_addr);
+ if (!fence_ptr) {
+ dev_err(hdev->dev,
+ "Failed to allocate fence memory for QMAN0\n");
+ return -ENOMEM;
+ }
+
+ cb = job->patched_cb;
+
+ fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
+ job->job_cb_size - sizeof(struct packet_msg_prot));
+
+ tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_CTL_EB_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT);
+ fence_pkt->ctl = cpu_to_le32(tmp);
+ fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr);
+
+ dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET;
+
+ WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0,
+ job->job_cb_size, cb->bus_address);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
+ goto free_fence_ptr;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
+ (tmp == GAUDI_QMAN0_FENCE_VAL), 1000,
+ timeout, true);
+
+ hl_hw_queue_inc_ci_kernel(hdev, GAUDI_QUEUE_ID_DMA_0_0);
+
+ if (rc == -ETIMEDOUT) {
+ dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
+ goto free_fence_ptr;
+ }
+
+free_fence_ptr:
+ WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
+ ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
+
+ hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
+ fence_dma_addr);
+ return rc;
+}
+
+static void gaudi_get_event_desc(u16 event_type, char *desc, size_t size)
+{
+ if (event_type >= GAUDI_EVENT_SIZE)
+ goto event_not_supported;
+
+ if (!gaudi_irq_map_table[event_type].valid)
+ goto event_not_supported;
+
+ snprintf(desc, size, gaudi_irq_map_table[event_type].name);
+
+ return;
+
+event_not_supported:
+ snprintf(desc, size, "N/A");
+}
+
+static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev,
+ u32 x_y, bool is_write)
+{
+ u32 dma_id[2], dma_offset, err_cause[2], mask, i;
+
+ mask = is_write ? DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK :
+ DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK;
+
+ switch (x_y) {
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
+ dma_id[0] = 0;
+ dma_id[1] = 2;
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
+ dma_id[0] = 1;
+ dma_id[1] = 3;
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
+ dma_id[0] = 4;
+ dma_id[1] = 6;
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
+ dma_id[0] = 5;
+ dma_id[1] = 7;
+ break;
+ default:
+ goto unknown_initiator;
+ }
+
+ for (i = 0 ; i < 2 ; i++) {
+ dma_offset = dma_id[i] * DMA_CORE_OFFSET;
+ err_cause[i] = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
+ }
+
+ switch (x_y) {
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ return "DMA0";
+ else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ return "DMA2";
+ else
+ return "DMA0 or DMA2";
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ return "DMA1";
+ else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ return "DMA3";
+ else
+ return "DMA1 or DMA3";
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ return "DMA4";
+ else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ return "DMA6";
+ else
+ return "DMA4 or DMA6";
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ return "DMA5";
+ else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ return "DMA7";
+ else
+ return "DMA5 or DMA7";
+ }
+
+unknown_initiator:
+ return "unknown initiator";
+}
+
+static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
+ bool is_write)
+{
+ u32 val, x_y, axi_id;
+
+ val = is_write ? RREG32(mmMMU_UP_RAZWI_WRITE_ID) :
+ RREG32(mmMMU_UP_RAZWI_READ_ID);
+ x_y = val & ((RAZWI_INITIATOR_Y_MASK << RAZWI_INITIATOR_Y_SHIFT) |
+ (RAZWI_INITIATOR_X_MASK << RAZWI_INITIATOR_X_SHIFT));
+ axi_id = val & (RAZWI_INITIATOR_AXI_ID_MASK <<
+ RAZWI_INITIATOR_AXI_ID_SHIFT);
+
+ switch (x_y) {
+ case RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0:
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ return "TPC0";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ return "NIC0";
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_TPC1:
+ return "TPC1";
+ case RAZWI_INITIATOR_ID_X_Y_MME0_0:
+ case RAZWI_INITIATOR_ID_X_Y_MME0_1:
+ return "MME0";
+ case RAZWI_INITIATOR_ID_X_Y_MME1_0:
+ case RAZWI_INITIATOR_ID_X_Y_MME1_1:
+ return "MME1";
+ case RAZWI_INITIATOR_ID_X_Y_TPC2:
+ return "TPC2";
+ case RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC:
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ return "TPC3";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PCI))
+ return "PCI";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_CPU))
+ return "CPU";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PSOC))
+ return "PSOC";
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
+ return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write);
+ case RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2:
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ return "TPC4";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ return "NIC1";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
+ return "NIC2";
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_TPC5:
+ return "TPC5";
+ case RAZWI_INITIATOR_ID_X_Y_MME2_0:
+ case RAZWI_INITIATOR_ID_X_Y_MME2_1:
+ return "MME2";
+ case RAZWI_INITIATOR_ID_X_Y_MME3_0:
+ case RAZWI_INITIATOR_ID_X_Y_MME3_1:
+ return "MME3";
+ case RAZWI_INITIATOR_ID_X_Y_TPC6:
+ return "TPC6";
+ case RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5:
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ return "TPC7";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ return "NIC4";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
+ return "NIC5";
+ break;
+ default:
+ break;
+ }
+
+ dev_err(hdev->dev,
+ "Unknown RAZWI initiator ID 0x%x [Y=%d, X=%d, AXI_ID=%d]\n",
+ val,
+ (val >> RAZWI_INITIATOR_Y_SHIFT) & RAZWI_INITIATOR_Y_MASK,
+ (val >> RAZWI_INITIATOR_X_SHIFT) & RAZWI_INITIATOR_X_MASK,
+ (val >> RAZWI_INITIATOR_AXI_ID_SHIFT) &
+ RAZWI_INITIATOR_AXI_ID_MASK);
+
+ return "unknown initiator";
+}
+
+static void gaudi_print_razwi_info(struct hl_device *hdev)
+{
+ if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) {
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI event caused by illegal write of %s\n",
+ gaudi_get_razwi_initiator_name(hdev, true));
+ WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0);
+ }
+
+ if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) {
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI event caused by illegal read of %s\n",
+ gaudi_get_razwi_initiator_name(hdev, false));
+ WREG32(mmMMU_UP_RAZWI_READ_VLD, 0);
+ }
+}
+
+static void gaudi_print_mmu_error_info(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 addr;
+ u32 val;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ val = RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE);
+ if (val & MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
+ addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
+ addr <<= 32;
+ addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
+
+ dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
+ addr);
+
+ WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0);
+ }
+
+ val = RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE);
+ if (val & MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK) {
+ addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK;
+ addr <<= 32;
+ addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
+
+ dev_err_ratelimited(hdev->dev,
+ "MMU access error on va 0x%llx\n", addr);
+
+ WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0);
+ }
+}
+
+/*
+ * +-------------------+------------------------------------------------------+
+ * | Configuration Reg | Description |
+ * | Address | |
+ * +-------------------+------------------------------------------------------+
+ * | 0xF30 - 0xF3F |ECC single error indication (1 bit per memory wrapper)|
+ * | |0xF30 memory wrappers 31:0 (MSB to LSB) |
+ * | |0xF34 memory wrappers 63:32 |
+ * | |0xF38 memory wrappers 95:64 |
+ * | |0xF3C memory wrappers 127:96 |
+ * +-------------------+------------------------------------------------------+
+ * | 0xF40 - 0xF4F |ECC double error indication (1 bit per memory wrapper)|
+ * | |0xF40 memory wrappers 31:0 (MSB to LSB) |
+ * | |0xF44 memory wrappers 63:32 |
+ * | |0xF48 memory wrappers 95:64 |
+ * | |0xF4C memory wrappers 127:96 |
+ * +-------------------+------------------------------------------------------+
+ */
+static void gaudi_print_ecc_info_generic(struct hl_device *hdev,
+ const char *block_name,
+ u64 block_address, int num_memories,
+ bool derr, bool disable_clock_gating)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int num_mem_regs = num_memories / 32 + ((num_memories % 32) ? 1 : 0);
+
+ if (block_address >= CFG_BASE)
+ block_address -= CFG_BASE;
+
+ if (derr)
+ block_address += GAUDI_ECC_DERR0_OFFSET;
+ else
+ block_address += GAUDI_ECC_SERR0_OFFSET;
+
+ if (disable_clock_gating) {
+ mutex_lock(&gaudi->clk_gate_mutex);
+ hdev->asic_funcs->disable_clock_gating(hdev);
+ }
+
+ switch (num_mem_regs) {
+ case 1:
+ dev_err(hdev->dev,
+ "%s ECC indication: 0x%08x\n",
+ block_name, RREG32(block_address));
+ break;
+ case 2:
+ dev_err(hdev->dev,
+ "%s ECC indication: 0x%08x 0x%08x\n",
+ block_name,
+ RREG32(block_address), RREG32(block_address + 4));
+ break;
+ case 3:
+ dev_err(hdev->dev,
+ "%s ECC indication: 0x%08x 0x%08x 0x%08x\n",
+ block_name,
+ RREG32(block_address), RREG32(block_address + 4),
+ RREG32(block_address + 8));
+ break;
+ case 4:
+ dev_err(hdev->dev,
+ "%s ECC indication: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ block_name,
+ RREG32(block_address), RREG32(block_address + 4),
+ RREG32(block_address + 8), RREG32(block_address + 0xc));
+ break;
+ default:
+ break;
+
+ }
+
+ if (disable_clock_gating) {
+ hdev->asic_funcs->enable_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+ }
+}
+
+static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
+ const char *qm_name,
+ u64 glbl_sts_addr,
+ u64 arb_err_addr)
+{
+ u32 i, j, glbl_sts_val, arb_err_val, glbl_sts_clr_val;
+ char reg_desc[32];
+
+ /* Iterate through all stream GLBL_STS1 registers + Lower CP */
+ for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) {
+ glbl_sts_clr_val = 0;
+ glbl_sts_val = RREG32(glbl_sts_addr + 4 * i);
+
+ if (!glbl_sts_val)
+ continue;
+
+ if (i == QMAN_STREAMS)
+ snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerCP");
+ else
+ snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i);
+
+ for (j = 0 ; j < GAUDI_NUM_OF_QM_ERR_CAUSE ; j++) {
+ if (glbl_sts_val & BIT(j)) {
+ dev_err_ratelimited(hdev->dev,
+ "%s %s. err cause: %s\n",
+ qm_name, reg_desc,
+ gaudi_qman_error_cause[j]);
+ glbl_sts_clr_val |= BIT(j);
+ }
+ }
+
+ /* Write 1 clear errors */
+ WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
+ }
+
+ arb_err_val = RREG32(arb_err_addr);
+
+ if (!arb_err_val)
+ return;
+
+ for (j = 0 ; j < GAUDI_NUM_OF_QM_ARB_ERR_CAUSE ; j++) {
+ if (arb_err_val & BIT(j)) {
+ dev_err_ratelimited(hdev->dev,
+ "%s ARB_ERR. err cause: %s\n",
+ qm_name,
+ gaudi_qman_arb_error_cause[j]);
+ }
+ }
+}
+
+static void gaudi_print_ecc_info(struct hl_device *hdev, u16 event_type)
+{
+ u64 block_address;
+ u8 index;
+ int num_memories;
+ char desc[32];
+ bool derr;
+ bool disable_clock_gating;
+
+ switch (event_type) {
+ case GAUDI_EVENT_PCIE_CORE_SERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
+ block_address = mmPCIE_CORE_BASE;
+ num_memories = 51;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_CORE_DERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
+ block_address = mmPCIE_CORE_BASE;
+ num_memories = 51;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_IF_SERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
+ block_address = mmPCIE_WRAP_BASE;
+ num_memories = 11;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_IF_DERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
+ block_address = mmPCIE_WRAP_BASE;
+ num_memories = 11;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_PHY_SERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
+ block_address = mmPCIE_PHY_BASE;
+ num_memories = 4;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_PHY_DERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
+ block_address = mmPCIE_PHY_BASE;
+ num_memories = 4;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
+ index = event_type - GAUDI_EVENT_TPC0_SERR;
+ block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
+ num_memories = 90;
+ derr = false;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
+ index = event_type - GAUDI_EVENT_TPC0_DERR;
+ block_address =
+ mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
+ num_memories = 90;
+ derr = true;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_MME0_ACC_SERR:
+ case GAUDI_EVENT_MME1_ACC_SERR:
+ case GAUDI_EVENT_MME2_ACC_SERR:
+ case GAUDI_EVENT_MME3_ACC_SERR:
+ index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4;
+ block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
+ num_memories = 128;
+ derr = false;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_MME0_ACC_DERR:
+ case GAUDI_EVENT_MME1_ACC_DERR:
+ case GAUDI_EVENT_MME2_ACC_DERR:
+ case GAUDI_EVENT_MME3_ACC_DERR:
+ index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4;
+ block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
+ num_memories = 128;
+ derr = true;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_MME0_SBAB_SERR:
+ case GAUDI_EVENT_MME1_SBAB_SERR:
+ case GAUDI_EVENT_MME2_SBAB_SERR:
+ case GAUDI_EVENT_MME3_SBAB_SERR:
+ index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4;
+ block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
+ num_memories = 33;
+ derr = false;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_MME0_SBAB_DERR:
+ case GAUDI_EVENT_MME1_SBAB_DERR:
+ case GAUDI_EVENT_MME2_SBAB_DERR:
+ case GAUDI_EVENT_MME3_SBAB_DERR:
+ index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4;
+ block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
+ num_memories = 33;
+ derr = true;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
+ index = event_type - GAUDI_EVENT_DMA0_SERR_ECC;
+ block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
+ num_memories = 16;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
+ index = event_type - GAUDI_EVENT_DMA0_DERR_ECC;
+ block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
+ num_memories = 16;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_CPU_IF_ECC_SERR:
+ block_address = mmCPU_IF_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 4;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_CPU_IF_ECC_DERR:
+ block_address = mmCPU_IF_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 4;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PSOC_MEM_SERR:
+ block_address = mmPSOC_GLOBAL_CONF_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 4;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PSOC_MEM_DERR:
+ block_address = mmPSOC_GLOBAL_CONF_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 4;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
+ block_address = mmPSOC_CS_TRACE_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 2;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
+ block_address = mmPSOC_CS_TRACE_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 2;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
+ index = event_type - GAUDI_EVENT_SRAM0_SERR;
+ block_address =
+ mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
+ num_memories = 2;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
+ index = event_type - GAUDI_EVENT_SRAM0_DERR;
+ block_address =
+ mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
+ num_memories = 2;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
+ index = event_type - GAUDI_EVENT_DMA_IF0_SERR;
+ block_address = mmDMA_IF_W_S_BASE +
+ index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
+ snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
+ num_memories = 60;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
+ index = event_type - GAUDI_EVENT_DMA_IF0_DERR;
+ block_address = mmDMA_IF_W_S_BASE +
+ index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
+ snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
+ derr = true;
+ num_memories = 60;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
+ index = event_type - GAUDI_EVENT_HBM_0_SERR;
+ /* HBM Registers are at different offsets */
+ block_address = mmHBM0_BASE + 0x8000 +
+ index * (mmHBM1_BASE - mmHBM0_BASE);
+ snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
+ derr = false;
+ num_memories = 64;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
+ index = event_type - GAUDI_EVENT_HBM_0_SERR;
+ /* HBM Registers are at different offsets */
+ block_address = mmHBM0_BASE + 0x8000 +
+ index * (mmHBM1_BASE - mmHBM0_BASE);
+ snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
+ derr = true;
+ num_memories = 64;
+ disable_clock_gating = false;
+ break;
+ default:
+ return;
+ }
+
+ gaudi_print_ecc_info_generic(hdev, desc, block_address, num_memories,
+ derr, disable_clock_gating);
+}
+
+static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
+{
+ u64 glbl_sts_addr, arb_err_addr;
+ u8 index;
+ char desc[32];
+
+ switch (event_type) {
+ case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
+ index = event_type - GAUDI_EVENT_TPC0_QM;
+ glbl_sts_addr =
+ mmTPC0_QM_GLBL_STS1_0 + index * TPC_QMAN_OFFSET;
+ arb_err_addr =
+ mmTPC0_QM_ARB_ERR_CAUSE + index * TPC_QMAN_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC_QM", index);
+ break;
+ case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
+ index = event_type - GAUDI_EVENT_MME0_QM;
+ glbl_sts_addr =
+ mmMME0_QM_GLBL_STS1_0 + index * MME_QMAN_OFFSET;
+ arb_err_addr =
+ mmMME0_QM_ARB_ERR_CAUSE + index * MME_QMAN_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "MME_QM", index);
+ break;
+ case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
+ index = event_type - GAUDI_EVENT_DMA0_QM;
+ glbl_sts_addr =
+ mmDMA0_QM_GLBL_STS1_0 + index * DMA_QMAN_OFFSET;
+ arb_err_addr =
+ mmDMA0_QM_ARB_ERR_CAUSE + index * DMA_QMAN_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index);
+ break;
+ default:
+ return;
+ }
+
+ gaudi_handle_qman_err_generic(hdev, desc, glbl_sts_addr, arb_err_addr);
+}
+
+static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
+ bool razwi)
+{
+ char desc[64] = "";
+
+ gaudi_get_event_desc(event_type, desc, sizeof(desc));
+ dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
+ event_type, desc);
+
+ gaudi_print_ecc_info(hdev, event_type);
+
+ if (razwi) {
+ gaudi_print_razwi_info(hdev);
+ gaudi_print_mmu_error_info(hdev);
+ }
+}
+
+static int gaudi_soft_reset_late_init(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ /* Unmask all IRQs since some could have been received
+ * during the soft reset
+ */
+ return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events));
+}
+
+static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device)
+{
+ int ch, err = 0;
+ u32 base, val, val2;
+
+ base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET;
+ for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) {
+ val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF);
+ val = (val & 0xFF) | ((val >> 8) & 0xFF);
+ if (val) {
+ err = 1;
+ dev_err(hdev->dev,
+ "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
+ device, ch * 2, val & 0x1, (val >> 1) & 0x1,
+ (val >> 2) & 0x1, (val >> 3) & 0x1,
+ (val >> 4) & 0x1);
+
+ val2 = RREG32(base + ch * 0x1000 + 0x060);
+ dev_err(hdev->dev,
+ "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n",
+ device, ch * 2,
+ RREG32(base + ch * 0x1000 + 0x064),
+ (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
+ (val2 & 0xFF0000) >> 16,
+ (val2 & 0xFF000000) >> 24);
+ }
+
+ val = RREG32_MASK(base + ch * 0x1000 + 0x07C, 0x0000FFFF);
+ val = (val & 0xFF) | ((val >> 8) & 0xFF);
+ if (val) {
+ err = 1;
+ dev_err(hdev->dev,
+ "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
+ device, ch * 2 + 1, val & 0x1, (val >> 1) & 0x1,
+ (val >> 2) & 0x1, (val >> 3) & 0x1,
+ (val >> 4) & 0x1);
+
+ val2 = RREG32(base + ch * 0x1000 + 0x070);
+ dev_err(hdev->dev,
+ "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n",
+ device, ch * 2 + 1,
+ RREG32(base + ch * 0x1000 + 0x074),
+ (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
+ (val2 & 0xFF0000) >> 16,
+ (val2 & 0xFF000000) >> 24);
+ }
+
+ /* Clear interrupts */
+ RMWREG32(base + (ch * 0x1000) + 0x060, 0x1C8, 0x1FF);
+ RMWREG32(base + (ch * 0x1000) + 0x070, 0x1C8, 0x1FF);
+ WREG32(base + (ch * 0x1000) + 0x06C, 0x1F1F);
+ WREG32(base + (ch * 0x1000) + 0x07C, 0x1F1F);
+ RMWREG32(base + (ch * 0x1000) + 0x060, 0x0, 0xF);
+ RMWREG32(base + (ch * 0x1000) + 0x070, 0x0, 0xF);
+ }
+
+ val = RREG32(base + 0x8F30);
+ val2 = RREG32(base + 0x8F34);
+ if (val | val2) {
+ err = 1;
+ dev_err(hdev->dev,
+ "HBM %d MC SRAM SERR info: Reg 0x8F30=0x%x, Reg 0x8F34=0x%x\n",
+ device, val, val2);
+ }
+ val = RREG32(base + 0x8F40);
+ val2 = RREG32(base + 0x8F44);
+ if (val | val2) {
+ err = 1;
+ dev_err(hdev->dev,
+ "HBM %d MC SRAM DERR info: Reg 0x8F40=0x%x, Reg 0x8F44=0x%x\n",
+ device, val, val2);
+ }
+
+ return err;
+}
+
+static int gaudi_hbm_event_to_dev(u16 hbm_event_type)
+{
+ switch (hbm_event_type) {
+ case GAUDI_EVENT_HBM0_SPI_0:
+ case GAUDI_EVENT_HBM0_SPI_1:
+ return 0;
+ case GAUDI_EVENT_HBM1_SPI_0:
+ case GAUDI_EVENT_HBM1_SPI_1:
+ return 1;
+ case GAUDI_EVENT_HBM2_SPI_0:
+ case GAUDI_EVENT_HBM2_SPI_1:
+ return 2;
+ case GAUDI_EVENT_HBM3_SPI_0:
+ case GAUDI_EVENT_HBM3_SPI_1:
+ return 3;
+ default:
+ break;
+ }
+
+ /* Should never happen */
+ return 0;
+}
+
+static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
+ char *interrupt_name)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 tpc_offset = tpc_id * TPC_CFG_OFFSET, tpc_interrupts_cause, i;
+ bool soft_reset_required = false;
+
+ /* Accessing the TPC_INTR_CAUSE registers requires disabling the clock
+ * gating, and thus cannot be done in ArmCP and should be done instead
+ * by the driver.
+ */
+
+ mutex_lock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ tpc_interrupts_cause = RREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset) &
+ TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK;
+
+ for (i = 0 ; i < GAUDI_NUM_OF_TPC_INTR_CAUSE ; i++)
+ if (tpc_interrupts_cause & BIT(i)) {
+ dev_err_ratelimited(hdev->dev,
+ "TPC%d_%s interrupt cause: %s\n",
+ tpc_id, interrupt_name,
+ gaudi_tpc_interrupts_cause[i]);
+ /* If this is QM error, we need to soft-reset */
+ if (i == 15)
+ soft_reset_required = true;
+ }
+
+ /* Clear interrupts */
+ WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
+
+ hdev->asic_funcs->enable_clock_gating(hdev);
+
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ return soft_reset_required;
+}
+
+static int tpc_dec_event_to_tpc_id(u16 tpc_dec_event_type)
+{
+ return (tpc_dec_event_type - GAUDI_EVENT_TPC0_DEC) >> 1;
+}
+
+static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
+{
+ return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6;
+}
+
+static void gaudi_print_clk_change_info(struct hl_device *hdev,
+ u16 event_type)
+{
+ switch (event_type) {
+ case GAUDI_EVENT_FIX_POWER_ENV_S:
+ dev_info_ratelimited(hdev->dev,
+ "Clock throttling due to power consumption\n");
+ break;
+
+ case GAUDI_EVENT_FIX_POWER_ENV_E:
+ dev_info_ratelimited(hdev->dev,
+ "Power envelop is safe, back to optimal clock\n");
+ break;
+
+ case GAUDI_EVENT_FIX_THERMAL_ENV_S:
+ dev_info_ratelimited(hdev->dev,
+ "Clock throttling due to overheating\n");
+ break;
+
+ case GAUDI_EVENT_FIX_THERMAL_ENV_E:
+ dev_info_ratelimited(hdev->dev,
+ "Thermal envelop is safe, back to optimal clock\n");
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid clock change event %d\n",
+ event_type);
+ break;
+ }
+}
+
+static void gaudi_handle_eqe(struct hl_device *hdev,
+ struct hl_eq_entry *eq_entry)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
+ u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
+ >> EQ_CTL_EVENT_TYPE_SHIFT);
+ u8 cause;
+ bool reset_required;
+
+ gaudi->events_stat[event_type]++;
+ gaudi->events_stat_aggregate[event_type]++;
+
+ switch (event_type) {
+ case GAUDI_EVENT_PCIE_CORE_DERR:
+ case GAUDI_EVENT_PCIE_IF_DERR:
+ case GAUDI_EVENT_PCIE_PHY_DERR:
+ case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
+ case GAUDI_EVENT_MME0_ACC_DERR:
+ case GAUDI_EVENT_MME0_SBAB_DERR:
+ case GAUDI_EVENT_MME1_ACC_DERR:
+ case GAUDI_EVENT_MME1_SBAB_DERR:
+ case GAUDI_EVENT_MME2_ACC_DERR:
+ case GAUDI_EVENT_MME2_SBAB_DERR:
+ case GAUDI_EVENT_MME3_ACC_DERR:
+ case GAUDI_EVENT_MME3_SBAB_DERR:
+ case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
+ fallthrough;
+ case GAUDI_EVENT_CPU_IF_ECC_DERR:
+ case GAUDI_EVENT_PSOC_MEM_DERR:
+ case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
+ case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
+ case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
+ fallthrough;
+ case GAUDI_EVENT_GIC500:
+ case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
+ case GAUDI_EVENT_MMU_DERR:
+ case GAUDI_EVENT_AXI_ECC:
+ case GAUDI_EVENT_L2_RAM_ECC:
+ case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
+ gaudi_print_irq_info(hdev, event_type, false);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GAUDI_EVENT_HBM0_SPI_0:
+ case GAUDI_EVENT_HBM1_SPI_0:
+ case GAUDI_EVENT_HBM2_SPI_0:
+ case GAUDI_EVENT_HBM3_SPI_0:
+ gaudi_print_irq_info(hdev, event_type, false);
+ gaudi_hbm_read_interrupts(hdev,
+ gaudi_hbm_event_to_dev(event_type));
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GAUDI_EVENT_HBM0_SPI_1:
+ case GAUDI_EVENT_HBM1_SPI_1:
+ case GAUDI_EVENT_HBM2_SPI_1:
+ case GAUDI_EVENT_HBM3_SPI_1:
+ gaudi_print_irq_info(hdev, event_type, false);
+ gaudi_hbm_read_interrupts(hdev,
+ gaudi_hbm_event_to_dev(event_type));
+ break;
+
+ case GAUDI_EVENT_TPC0_DEC:
+ case GAUDI_EVENT_TPC1_DEC:
+ case GAUDI_EVENT_TPC2_DEC:
+ case GAUDI_EVENT_TPC3_DEC:
+ case GAUDI_EVENT_TPC4_DEC:
+ case GAUDI_EVENT_TPC5_DEC:
+ case GAUDI_EVENT_TPC6_DEC:
+ case GAUDI_EVENT_TPC7_DEC:
+ gaudi_print_irq_info(hdev, event_type, true);
+ reset_required = gaudi_tpc_read_interrupts(hdev,
+ tpc_dec_event_to_tpc_id(event_type),
+ "AXI_SLV_DEC_Error");
+ if (reset_required) {
+ dev_err(hdev->dev, "hard reset required due to %s\n",
+ gaudi_irq_map_table[event_type].name);
+
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ } else {
+ hl_fw_unmask_irq(hdev, event_type);
+ }
+ break;
+
+ case GAUDI_EVENT_TPC0_KRN_ERR:
+ case GAUDI_EVENT_TPC1_KRN_ERR:
+ case GAUDI_EVENT_TPC2_KRN_ERR:
+ case GAUDI_EVENT_TPC3_KRN_ERR:
+ case GAUDI_EVENT_TPC4_KRN_ERR:
+ case GAUDI_EVENT_TPC5_KRN_ERR:
+ case GAUDI_EVENT_TPC6_KRN_ERR:
+ case GAUDI_EVENT_TPC7_KRN_ERR:
+ gaudi_print_irq_info(hdev, event_type, true);
+ reset_required = gaudi_tpc_read_interrupts(hdev,
+ tpc_krn_event_to_tpc_id(event_type),
+ "KRN_ERR");
+ if (reset_required) {
+ dev_err(hdev->dev, "hard reset required due to %s\n",
+ gaudi_irq_map_table[event_type].name);
+
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ } else {
+ hl_fw_unmask_irq(hdev, event_type);
+ }
+ break;
+
+ case GAUDI_EVENT_PCIE_CORE_SERR:
+ case GAUDI_EVENT_PCIE_IF_SERR:
+ case GAUDI_EVENT_PCIE_PHY_SERR:
+ case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
+ case GAUDI_EVENT_MME0_ACC_SERR:
+ case GAUDI_EVENT_MME0_SBAB_SERR:
+ case GAUDI_EVENT_MME1_ACC_SERR:
+ case GAUDI_EVENT_MME1_SBAB_SERR:
+ case GAUDI_EVENT_MME2_ACC_SERR:
+ case GAUDI_EVENT_MME2_SBAB_SERR:
+ case GAUDI_EVENT_MME3_ACC_SERR:
+ case GAUDI_EVENT_MME3_SBAB_SERR:
+ case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
+ case GAUDI_EVENT_CPU_IF_ECC_SERR:
+ case GAUDI_EVENT_PSOC_MEM_SERR:
+ case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
+ case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
+ case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
+ case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
+ fallthrough;
+ case GAUDI_EVENT_MMU_SERR:
+ case GAUDI_EVENT_PCIE_DEC:
+ case GAUDI_EVENT_MME0_WBC_RSP:
+ case GAUDI_EVENT_MME0_SBAB0_RSP:
+ case GAUDI_EVENT_MME1_WBC_RSP:
+ case GAUDI_EVENT_MME1_SBAB0_RSP:
+ case GAUDI_EVENT_MME2_WBC_RSP:
+ case GAUDI_EVENT_MME2_SBAB0_RSP:
+ case GAUDI_EVENT_MME3_WBC_RSP:
+ case GAUDI_EVENT_MME3_SBAB0_RSP:
+ case GAUDI_EVENT_CPU_AXI_SPLITTER:
+ case GAUDI_EVENT_PSOC_AXI_DEC:
+ case GAUDI_EVENT_PSOC_PRSTN_FALL:
+ case GAUDI_EVENT_MMU_PAGE_FAULT:
+ case GAUDI_EVENT_MMU_WR_PERM:
+ case GAUDI_EVENT_RAZWI_OR_ADC:
+ case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
+ case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
+ case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
+ fallthrough;
+ case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_qman_err(hdev, event_type);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
+ case GAUDI_EVENT_RAZWI_OR_ADC_SW:
+ gaudi_print_irq_info(hdev, event_type, true);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GAUDI_EVENT_TPC0_BMON_SPMU:
+ case GAUDI_EVENT_TPC1_BMON_SPMU:
+ case GAUDI_EVENT_TPC2_BMON_SPMU:
+ case GAUDI_EVENT_TPC3_BMON_SPMU:
+ case GAUDI_EVENT_TPC4_BMON_SPMU:
+ case GAUDI_EVENT_TPC5_BMON_SPMU:
+ case GAUDI_EVENT_TPC6_BMON_SPMU:
+ case GAUDI_EVENT_TPC7_BMON_SPMU:
+ case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7:
+ gaudi_print_irq_info(hdev, event_type, false);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
+ case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
+ gaudi_print_clk_change_info(hdev, event_type);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
+ case GAUDI_EVENT_PSOC_GPIO_U16_0:
+ cause = le64_to_cpu(eq_entry->data[0]) & 0xFF;
+ dev_err(hdev->dev,
+ "Received high temp H/W interrupt %d (cause %d)\n",
+ event_type, cause);
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
+ event_type);
+ break;
+ }
+}
+
+static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate,
+ u32 *size)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (aggregate) {
+ *size = (u32) sizeof(gaudi->events_stat_aggregate);
+ return gaudi->events_stat_aggregate;
+ }
+
+ *size = (u32) sizeof(gaudi->events_stat);
+ return gaudi->events_stat;
+}
+
+static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+ u32 flags)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 status, timeout_usec;
+ int rc;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
+ return 0;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ mutex_lock(&hdev->mmu_cache_lock);
+
+ /* L0 & L1 invalidation */
+ WREG32(mmSTLB_INV_PS, 2);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmSTLB_INV_PS,
+ status,
+ !status,
+ 1000,
+ timeout_usec);
+
+ WREG32(mmSTLB_INV_SET, 0);
+
+ mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
+}
+
+static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
+ bool is_hard, u32 asid, u64 va, u64 size)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 status, timeout_usec;
+ u32 inv_data;
+ u32 pi;
+ int rc;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
+ return 0;
+
+ mutex_lock(&hdev->mmu_cache_lock);
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ /*
+ * TODO: currently invalidate entire L0 & L1 as in regular hard
+ * invalidation. Need to apply invalidation of specific cache
+ * lines with mask of ASID & VA & size.
+ * Note that L1 with be flushed entirely in any case.
+ */
+
+ /* L0 & L1 invalidation */
+ inv_data = RREG32(mmSTLB_CACHE_INV);
+ /* PI is 8 bit */
+ pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
+ WREG32(mmSTLB_CACHE_INV,
+ (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmSTLB_INV_CONSUMER_INDEX,
+ status,
+ status == pi,
+ 1000,
+ timeout_usec);
+
+ mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
+}
+
+static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev,
+ u32 asid, u64 phys_addr)
+{
+ u32 status, timeout_usec;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ WREG32(MMU_ASID, asid);
+ WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
+ WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
+ WREG32(MMU_BUSY, 0x80000000);
+
+ rc = hl_poll_timeout(
+ hdev,
+ MMU_BUSY,
+ status,
+ !(status & 0x80000000),
+ 1000,
+ timeout_usec);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout during MMU hop0 config of asid %d\n", asid);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int gaudi_send_heartbeat(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_send_heartbeat(hdev);
+}
+
+static int gaudi_armcp_info_get(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ rc = hl_fw_armcp_info_get(hdev);
+ if (rc)
+ return rc;
+
+ if (!strlen(prop->armcp_info.card_name))
+ strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
+ struct seq_file *s)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
+ const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n";
+ u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts;
+ bool is_idle = true, is_eng_idle, is_slave;
+ u64 offset;
+ int i, dma_id;
+
+ mutex_lock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ if (s)
+ seq_puts(s,
+ "\nDMA is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
+ "--- ------- ------------ ---------- -------------\n");
+
+ for (i = 0 ; i < DMA_NUMBER_OF_CHNLS ; i++) {
+ dma_id = gaudi_dma_assignment[i];
+ offset = dma_id * DMA_QMAN_OFFSET;
+
+ qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + offset);
+ qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + offset);
+ dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + offset);
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
+ IS_DMA_IDLE(dma_core_sts0);
+ is_idle &= is_eng_idle;
+
+ if (mask)
+ *mask |= !is_eng_idle <<
+ (GAUDI_ENGINE_ID_DMA_0 + dma_id);
+ if (s)
+ seq_printf(s, fmt, dma_id,
+ is_eng_idle ? "Y" : "N", qm_glbl_sts0,
+ qm_cgm_sts, dma_core_sts0);
+ }
+
+ if (s)
+ seq_puts(s,
+ "\nTPC is_idle QM_GLBL_STS0 QM_CGM_STS CFG_STATUS\n"
+ "--- ------- ------------ ---------- ----------\n");
+
+ for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ offset = i * TPC_QMAN_OFFSET;
+ qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + offset);
+ qm_cgm_sts = RREG32(mmTPC0_QM_CGM_STS + offset);
+ tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + offset);
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
+ IS_TPC_IDLE(tpc_cfg_sts);
+ is_idle &= is_eng_idle;
+
+ if (mask)
+ *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_TPC_0 + i);
+ if (s)
+ seq_printf(s, fmt, i,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
+ }
+
+ if (s)
+ seq_puts(s,
+ "\nMME is_idle QM_GLBL_STS0 QM_CGM_STS ARCH_STATUS\n"
+ "--- ------- ------------ ---------- -----------\n");
+
+ for (i = 0 ; i < MME_NUMBER_OF_ENGINES ; i++) {
+ offset = i * MME_QMAN_OFFSET;
+ mme_arch_sts = RREG32(mmMME0_CTRL_ARCH_STATUS + offset);
+ is_eng_idle = IS_MME_IDLE(mme_arch_sts);
+
+ /* MME 1 & 3 are slaves, no need to check their QMANs */
+ is_slave = i % 2;
+ if (!is_slave) {
+ qm_glbl_sts0 = RREG32(mmMME0_QM_GLBL_STS0 + offset);
+ qm_cgm_sts = RREG32(mmMME0_QM_CGM_STS + offset);
+ is_eng_idle &= IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
+ }
+
+ is_idle &= is_eng_idle;
+
+ if (mask)
+ *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_MME_0 + i);
+ if (s) {
+ if (!is_slave)
+ seq_printf(s, fmt, i,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts, mme_arch_sts);
+ else
+ seq_printf(s, mme_slave_fmt, i,
+ is_eng_idle ? "Y" : "N", "-",
+ "-", mme_arch_sts);
+ }
+ }
+
+ if (s)
+ seq_puts(s, "\n");
+
+ hdev->asic_funcs->enable_clock_gating(hdev);
+
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ return is_idle;
+}
+
+static void gaudi_hw_queues_lock(struct hl_device *hdev)
+ __acquires(&gaudi->hw_queues_lock)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ spin_lock(&gaudi->hw_queues_lock);
+}
+
+static void gaudi_hw_queues_unlock(struct hl_device *hdev)
+ __releases(&gaudi->hw_queues_lock)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ spin_unlock(&gaudi->hw_queues_lock);
+}
+
+static u32 gaudi_get_pci_id(struct hl_device *hdev)
+{
+ return hdev->pdev->device;
+}
+
+static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data,
+ size_t max_size)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_get_eeprom_data(hdev, data, max_size);
+}
+
+/*
+ * this function should be used only during initialization and/or after reset,
+ * when there are no active users.
+ */
+static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
+ u32 tpc_id)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 kernel_timeout;
+ u32 status, offset;
+ int rc;
+
+ offset = tpc_id * (mmTPC1_CFG_STATUS - mmTPC0_CFG_STATUS);
+
+ if (hdev->pldm)
+ kernel_timeout = GAUDI_PLDM_TPC_KERNEL_WAIT_USEC;
+ else
+ kernel_timeout = HL_DEVICE_TIMEOUT_USEC;
+
+ mutex_lock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW + offset,
+ lower_32_bits(tpc_kernel));
+ WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH + offset,
+ upper_32_bits(tpc_kernel));
+
+ WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW + offset,
+ lower_32_bits(tpc_kernel));
+ WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH + offset,
+ upper_32_bits(tpc_kernel));
+ /* set a valid LUT pointer, content is of no significance */
+ WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_LO + offset,
+ lower_32_bits(tpc_kernel));
+ WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_HI + offset,
+ upper_32_bits(tpc_kernel));
+
+ WREG32(mmTPC0_CFG_QM_SYNC_OBJECT_ADDR + offset,
+ lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0));
+
+ WREG32(mmTPC0_CFG_TPC_CMD + offset,
+ (1 << TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT |
+ 1 << TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT));
+ /* wait a bit for the engine to start executing */
+ usleep_range(1000, 1500);
+
+ /* wait until engine has finished executing */
+ rc = hl_poll_timeout(
+ hdev,
+ mmTPC0_CFG_STATUS + offset,
+ status,
+ (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
+ TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
+ 1000,
+ kernel_timeout);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for TPC%d icache prefetch\n",
+ tpc_id);
+ hdev->asic_funcs->enable_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+ return -EIO;
+ }
+
+ WREG32(mmTPC0_CFG_TPC_EXECUTE + offset,
+ 1 << TPC0_CFG_TPC_EXECUTE_V_SHIFT);
+
+ /* wait a bit for the engine to start executing */
+ usleep_range(1000, 1500);
+
+ /* wait until engine has finished executing */
+ rc = hl_poll_timeout(
+ hdev,
+ mmTPC0_CFG_STATUS + offset,
+ status,
+ (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
+ TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
+ 1000,
+ kernel_timeout);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmTPC0_CFG_WQ_INFLIGHT_CNTR + offset,
+ status,
+ (status == 0),
+ 1000,
+ kernel_timeout);
+
+ hdev->asic_funcs->enable_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for TPC%d kernel to execute\n",
+ tpc_id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
+{
+ return RREG32(mmHW_STATE);
+}
+
+static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
+{
+ return gaudi_cq_assignment[cq_idx];
+}
+
+static void gaudi_ext_queue_init(struct hl_device *hdev, u32 q_idx)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+ struct hl_hw_sob *hw_sob;
+ int sob, ext_idx = gaudi->ext_queue_idx++;
+
+ /*
+ * The external queues might not sit sequentially, hence use the
+ * real external queue index for the SOB/MON base id.
+ */
+ hw_queue->base_sob_id = ext_idx * HL_RSVD_SOBS;
+ hw_queue->base_mon_id = ext_idx * HL_RSVD_MONS;
+ hw_queue->next_sob_val = 1;
+ hw_queue->curr_sob_offset = 0;
+
+ for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
+ hw_sob = &hw_queue->hw_sob[sob];
+ hw_sob->hdev = hdev;
+ hw_sob->sob_id = hw_queue->base_sob_id + sob;
+ hw_sob->q_idx = q_idx;
+ kref_init(&hw_sob->kref);
+ }
+}
+
+static void gaudi_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+{
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+
+ /*
+ * In case we got here due to a stuck CS, the refcnt might be bigger
+ * than 1 and therefore we reset it.
+ */
+ kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
+ hw_queue->curr_sob_offset = 0;
+ hw_queue->next_sob_val = 1;
+}
+
+static u32 gaudi_get_signal_cb_size(struct hl_device *hdev)
+{
+ return sizeof(struct packet_msg_short) +
+ sizeof(struct packet_msg_prot) * 2;
+}
+
+static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
+{
+ return sizeof(struct packet_msg_short) * 4 +
+ sizeof(struct packet_fence) +
+ sizeof(struct packet_msg_prot) * 2;
+}
+
+static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
+{
+ struct hl_cb *cb = (struct hl_cb *) data;
+ struct packet_msg_short *pkt;
+ u32 value, ctl;
+
+ pkt = (struct packet_msg_short *) (uintptr_t) cb->kernel_address;
+ memset(pkt, 0, sizeof(*pkt));
+
+ value = 1 << GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT; /* inc by 1 */
+ value |= 1 << GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT; /* add mode */
+
+ ctl = (sob_id * 4) << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT; /* SOB id */
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
+ ctl |= 3 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S SOB base */
+ ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+
+ pkt->value = cpu_to_le32(value);
+ pkt->ctl = cpu_to_le32(ctl);
+}
+
+static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
+ u16 addr)
+{
+ u32 ctl, pkt_size = sizeof(*pkt);
+
+ memset(pkt, 0, pkt_size);
+
+ ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
+ ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
+ ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_MB_SHIFT; /* only last pkt needs MB */
+
+ pkt->value = cpu_to_le32(value);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return pkt_size;
+}
+
+static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id,
+ u16 sob_val, u16 addr)
+{
+ u32 ctl, value, pkt_size = sizeof(*pkt);
+ u8 mask = ~(1 << (sob_id & 0x7));
+
+ memset(pkt, 0, pkt_size);
+
+ value = (sob_id / 8) << GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_SHIFT;
+ value |= sob_val << GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_SHIFT;
+ value |= 0 << GAUDI_PKT_SHORT_VAL_MON_MODE_SHIFT; /* GREATER_OR_EQUAL */
+ value |= mask << GAUDI_PKT_SHORT_VAL_MON_MASK_SHIFT;
+
+ ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
+ ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
+ ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+
+ pkt->value = cpu_to_le32(value);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return pkt_size;
+}
+
+static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
+{
+ u32 ctl, cfg, pkt_size = sizeof(*pkt);
+
+ memset(pkt, 0, pkt_size);
+
+ cfg = 1 << GAUDI_PKT_FENCE_CFG_DEC_VAL_SHIFT;
+ cfg |= 1 << GAUDI_PKT_FENCE_CFG_TARGET_VAL_SHIFT;
+ cfg |= 2 << GAUDI_PKT_FENCE_CFG_ID_SHIFT;
+
+ ctl = 0 << GAUDI_PKT_FENCE_CTL_PRED_SHIFT;
+ ctl |= PACKET_FENCE << GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT;
+ ctl |= 0 << GAUDI_PKT_FENCE_CTL_EB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_FENCE_CTL_RB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_FENCE_CTL_MB_SHIFT;
+
+ pkt->cfg = cpu_to_le32(cfg);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return pkt_size;
+}
+
+static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
+ u16 sob_val, u16 mon_id, u32 q_idx)
+{
+ struct hl_cb *cb = (struct hl_cb *) data;
+ void *buf = (void *) (uintptr_t) cb->kernel_address;
+ u64 monitor_base, fence_addr = 0;
+ u32 size = 0;
+ u16 msg_addr_offset;
+
+ switch (q_idx) {
+ case GAUDI_QUEUE_ID_DMA_0_0:
+ fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_0;
+ break;
+ case GAUDI_QUEUE_ID_DMA_0_1:
+ fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_1;
+ break;
+ case GAUDI_QUEUE_ID_DMA_0_2:
+ fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_2;
+ break;
+ case GAUDI_QUEUE_ID_DMA_0_3:
+ fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_3;
+ break;
+ case GAUDI_QUEUE_ID_DMA_1_0:
+ fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_0;
+ break;
+ case GAUDI_QUEUE_ID_DMA_1_1:
+ fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_1;
+ break;
+ case GAUDI_QUEUE_ID_DMA_1_2:
+ fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_2;
+ break;
+ case GAUDI_QUEUE_ID_DMA_1_3:
+ fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_3;
+ break;
+ case GAUDI_QUEUE_ID_DMA_5_0:
+ fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_0;
+ break;
+ case GAUDI_QUEUE_ID_DMA_5_1:
+ fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_1;
+ break;
+ case GAUDI_QUEUE_ID_DMA_5_2:
+ fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_2;
+ break;
+ case GAUDI_QUEUE_ID_DMA_5_3:
+ fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_3;
+ break;
+ default:
+ /* queue index should be valid here */
+ dev_crit(hdev->dev, "wrong queue id %d for wait packet\n",
+ q_idx);
+ return;
+ }
+
+ fence_addr += CFG_BASE;
+
+ /*
+ * monitor_base should be the content of the base0 address registers,
+ * so it will be added to the msg short offsets
+ */
+ monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
+
+ /* First monitor config packet: low address of the sync */
+ msg_addr_offset =
+ (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) -
+ monitor_base;
+
+ size += gaudi_add_mon_msg_short(buf + size, (u32) fence_addr,
+ msg_addr_offset);
+
+ /* Second monitor config packet: high address of the sync */
+ msg_addr_offset =
+ (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) -
+ monitor_base;
+
+ size += gaudi_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32),
+ msg_addr_offset);
+
+ /*
+ * Third monitor config packet: the payload, i.e. what to write when the
+ * sync triggers
+ */
+ msg_addr_offset =
+ (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) -
+ monitor_base;
+
+ size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset);
+
+ /* Fourth monitor config packet: bind the monitor to a sync object */
+ msg_addr_offset =
+ (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) -
+ monitor_base;
+ size += gaudi_add_arm_monitor_pkt(buf + size, sob_id, sob_val,
+ msg_addr_offset);
+
+ /* Fence packet */
+ size += gaudi_add_fence_pkt(buf + size);
+}
+
+static void gaudi_reset_sob(struct hl_device *hdev, void *data)
+{
+ struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
+
+ dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
+ hw_sob->sob_id);
+
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4,
+ 0);
+
+ kref_init(&hw_sob->kref);
+}
+
+static void gaudi_set_dma_mask_from_fw(struct hl_device *hdev)
+{
+ if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) ==
+ HL_POWER9_HOST_MAGIC) {
+ hdev->power9_64bit_dma_enable = 1;
+ hdev->dma_mask = 64;
+ } else {
+ hdev->power9_64bit_dma_enable = 0;
+ hdev->dma_mask = 48;
+ }
+}
+
+static u64 gaudi_get_device_time(struct hl_device *hdev)
+{
+ u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
+
+ return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
+}
+
+static const struct hl_asic_funcs gaudi_funcs = {
+ .early_init = gaudi_early_init,
+ .early_fini = gaudi_early_fini,
+ .late_init = gaudi_late_init,
+ .late_fini = gaudi_late_fini,
+ .sw_init = gaudi_sw_init,
+ .sw_fini = gaudi_sw_fini,
+ .hw_init = gaudi_hw_init,
+ .hw_fini = gaudi_hw_fini,
+ .halt_engines = gaudi_halt_engines,
+ .suspend = gaudi_suspend,
+ .resume = gaudi_resume,
+ .cb_mmap = gaudi_cb_mmap,
+ .ring_doorbell = gaudi_ring_doorbell,
+ .pqe_write = gaudi_pqe_write,
+ .asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
+ .asic_dma_free_coherent = gaudi_dma_free_coherent,
+ .get_int_queue_base = gaudi_get_int_queue_base,
+ .test_queues = gaudi_test_queues,
+ .asic_dma_pool_zalloc = gaudi_dma_pool_zalloc,
+ .asic_dma_pool_free = gaudi_dma_pool_free,
+ .cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc,
+ .cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free,
+ .hl_dma_unmap_sg = gaudi_dma_unmap_sg,
+ .cs_parser = gaudi_cs_parser,
+ .asic_dma_map_sg = gaudi_dma_map_sg,
+ .get_dma_desc_list_size = gaudi_get_dma_desc_list_size,
+ .add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
+ .update_eq_ci = gaudi_update_eq_ci,
+ .context_switch = gaudi_context_switch,
+ .restore_phase_topology = gaudi_restore_phase_topology,
+ .debugfs_read32 = gaudi_debugfs_read32,
+ .debugfs_write32 = gaudi_debugfs_write32,
+ .debugfs_read64 = gaudi_debugfs_read64,
+ .debugfs_write64 = gaudi_debugfs_write64,
+ .add_device_attr = gaudi_add_device_attr,
+ .handle_eqe = gaudi_handle_eqe,
+ .set_pll_profile = gaudi_set_pll_profile,
+ .get_events_stat = gaudi_get_events_stat,
+ .read_pte = gaudi_read_pte,
+ .write_pte = gaudi_write_pte,
+ .mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
+ .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
+ .send_heartbeat = gaudi_send_heartbeat,
+ .enable_clock_gating = gaudi_enable_clock_gating,
+ .disable_clock_gating = gaudi_disable_clock_gating,
+ .debug_coresight = gaudi_debug_coresight,
+ .is_device_idle = gaudi_is_device_idle,
+ .soft_reset_late_init = gaudi_soft_reset_late_init,
+ .hw_queues_lock = gaudi_hw_queues_lock,
+ .hw_queues_unlock = gaudi_hw_queues_unlock,
+ .get_pci_id = gaudi_get_pci_id,
+ .get_eeprom_data = gaudi_get_eeprom_data,
+ .send_cpu_message = gaudi_send_cpu_message,
+ .get_hw_state = gaudi_get_hw_state,
+ .pci_bars_map = gaudi_pci_bars_map,
+ .set_dram_bar_base = gaudi_set_hbm_bar_base,
+ .init_iatu = gaudi_init_iatu,
+ .rreg = hl_rreg,
+ .wreg = hl_wreg,
+ .halt_coresight = gaudi_halt_coresight,
+ .get_clk_rate = gaudi_get_clk_rate,
+ .get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
+ .read_device_fw_version = gaudi_read_device_fw_version,
+ .load_firmware_to_device = gaudi_load_firmware_to_device,
+ .load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
+ .ext_queue_init = gaudi_ext_queue_init,
+ .ext_queue_reset = gaudi_ext_queue_reset,
+ .get_signal_cb_size = gaudi_get_signal_cb_size,
+ .get_wait_cb_size = gaudi_get_wait_cb_size,
+ .gen_signal_cb = gaudi_gen_signal_cb,
+ .gen_wait_cb = gaudi_gen_wait_cb,
+ .reset_sob = gaudi_reset_sob,
+ .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
+ .get_device_time = gaudi_get_device_time
+};
+
+/**
+ * gaudi_set_asic_funcs - set GAUDI function pointers
+ *
+ * @*hdev: pointer to hl_device structure
+ *
+ */
+void gaudi_set_asic_funcs(struct hl_device *hdev)
+{
+ hdev->asic_funcs = &gaudi_funcs;
+}
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
new file mode 100644
index 000000000000..a46530d375fa
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDIP_H_
+#define GAUDIP_H_
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+#include "include/hl_boot_if.h"
+#include "include/gaudi/gaudi_packets.h"
+#include "include/gaudi/gaudi.h"
+#include "include/gaudi/gaudi_async_events.h"
+
+#define NUMBER_OF_EXT_HW_QUEUES 12
+#define NUMBER_OF_CMPLT_QUEUES NUMBER_OF_EXT_HW_QUEUES
+#define NUMBER_OF_CPU_HW_QUEUES 1
+#define NUMBER_OF_INT_HW_QUEUES 100
+#define NUMBER_OF_HW_QUEUES (NUMBER_OF_EXT_HW_QUEUES + \
+ NUMBER_OF_CPU_HW_QUEUES + \
+ NUMBER_OF_INT_HW_QUEUES)
+
+/*
+ * Number of MSI interrupts IDS:
+ * Each completion queue has 1 ID
+ * The event queue has 1 ID
+ */
+#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + \
+ NUMBER_OF_CPU_HW_QUEUES)
+
+#if (NUMBER_OF_INTERRUPTS > GAUDI_MSI_ENTRIES)
+#error "Number of MSI interrupts must be smaller or equal to GAUDI_MSI_ENTRIES"
+#endif
+
+#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
+
+#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
+
+#define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */
+
+#define MAX_POWER_DEFAULT 200000 /* 200W */
+
+#define GAUDI_CPU_TIMEOUT_USEC 15000000 /* 15s */
+
+#define TPC_ENABLED_MASK 0xFF
+
+#define GAUDI_HBM_SIZE_32GB 0x800000000ull
+#define GAUDI_HBM_DEVICES 4
+#define GAUDI_HBM_CHANNELS 8
+#define GAUDI_HBM_CFG_BASE (mmHBM0_BASE - CFG_BASE)
+#define GAUDI_HBM_CFG_OFFSET (mmHBM1_BASE - mmHBM0_BASE)
+
+#define DMA_MAX_TRANSFER_SIZE U32_MAX
+
+#define GAUDI_DEFAULT_CARD_NAME "HL2000"
+
+#define PCI_DMA_NUMBER_OF_CHNLS 3
+#define HBM_DMA_NUMBER_OF_CHNLS 5
+#define DMA_NUMBER_OF_CHNLS (PCI_DMA_NUMBER_OF_CHNLS + \
+ HBM_DMA_NUMBER_OF_CHNLS)
+
+#define MME_NUMBER_OF_SLAVE_ENGINES 2
+#define MME_NUMBER_OF_ENGINES (MME_NUMBER_OF_MASTER_ENGINES + \
+ MME_NUMBER_OF_SLAVE_ENGINES)
+#define MME_NUMBER_OF_QMANS (MME_NUMBER_OF_MASTER_ENGINES * \
+ QMAN_STREAMS)
+
+#define QMAN_STREAMS 4
+
+#define DMA_QMAN_OFFSET (mmDMA1_QM_BASE - mmDMA0_QM_BASE)
+#define TPC_QMAN_OFFSET (mmTPC1_QM_BASE - mmTPC0_QM_BASE)
+#define MME_QMAN_OFFSET (mmMME1_QM_BASE - mmMME0_QM_BASE)
+#define NIC_MACRO_QMAN_OFFSET (mmNIC1_QM0_BASE - mmNIC0_QM0_BASE)
+
+#define TPC_CFG_OFFSET (mmTPC1_CFG_BASE - mmTPC0_CFG_BASE)
+
+#define DMA_CORE_OFFSET (mmDMA1_CORE_BASE - mmDMA0_CORE_BASE)
+
+#define SIF_RTR_CTRL_OFFSET (mmSIF_RTR_CTRL_1_BASE - mmSIF_RTR_CTRL_0_BASE)
+
+#define NIF_RTR_CTRL_OFFSET (mmNIF_RTR_CTRL_1_BASE - mmNIF_RTR_CTRL_0_BASE)
+
+#define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
+#define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
+
+#define NUM_OF_SOB_IN_BLOCK \
+ (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
+
+#define NUM_OF_MONITORS_IN_BLOCK \
+ (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 - \
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2)
+
+
+/* DRAM Memory Map */
+
+#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
+#define MMU_PAGE_TABLES_SIZE 0x0BF00000 /* 191MB */
+#define MMU_CACHE_MNG_SIZE 0x00100000 /* 1MB */
+#define RESERVED 0x04000000 /* 64MB */
+
+#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
+#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE)
+#define MMU_CACHE_MNG_ADDR (MMU_PAGE_TABLES_ADDR + MMU_PAGE_TABLES_SIZE)
+
+#define DRAM_DRIVER_END_ADDR (MMU_CACHE_MNG_ADDR + MMU_CACHE_MNG_SIZE +\
+ RESERVED)
+
+#define DRAM_BASE_ADDR_USER 0x20000000
+
+#if (DRAM_DRIVER_END_ADDR > DRAM_BASE_ADDR_USER)
+#error "Driver must reserve no more than 512MB"
+#endif
+
+/* Internal QMANs PQ sizes */
+
+#define MME_QMAN_LENGTH 64
+#define MME_QMAN_SIZE_IN_BYTES (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
+
+#define HBM_DMA_QMAN_LENGTH 64
+#define HBM_DMA_QMAN_SIZE_IN_BYTES \
+ (HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
+
+#define TPC_QMAN_LENGTH 64
+#define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
+
+#define SRAM_USER_BASE_OFFSET GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START
+
+/* Virtual address space */
+#define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */
+#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */
+#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \
+ VA_HOST_SPACE_START) /* 767TB */
+
+#define HW_CAP_PLL 0x00000001
+#define HW_CAP_HBM 0x00000002
+#define HW_CAP_MMU 0x00000004
+#define HW_CAP_MME 0x00000008
+#define HW_CAP_CPU 0x00000010
+#define HW_CAP_PCI_DMA 0x00000020
+#define HW_CAP_MSI 0x00000040
+#define HW_CAP_CPU_Q 0x00000080
+#define HW_CAP_HBM_DMA 0x00000100
+#define HW_CAP_CLK_GATE 0x00000200
+#define HW_CAP_SRAM_SCRAMBLER 0x00000400
+#define HW_CAP_HBM_SCRAMBLER 0x00000800
+
+#define HW_CAP_TPC0 0x01000000
+#define HW_CAP_TPC1 0x02000000
+#define HW_CAP_TPC2 0x04000000
+#define HW_CAP_TPC3 0x08000000
+#define HW_CAP_TPC4 0x10000000
+#define HW_CAP_TPC5 0x20000000
+#define HW_CAP_TPC6 0x40000000
+#define HW_CAP_TPC7 0x80000000
+#define HW_CAP_TPC_MASK 0xFF000000
+#define HW_CAP_TPC_SHIFT 24
+
+#define GAUDI_CPU_PCI_MSB_ADDR(addr) (((addr) & GENMASK_ULL(49, 39)) >> 39)
+#define GAUDI_PCI_TO_CPU_ADDR(addr) \
+ do { \
+ (addr) &= ~GENMASK_ULL(49, 39); \
+ (addr) |= BIT_ULL(39); \
+ } while (0)
+#define GAUDI_CPU_TO_PCI_ADDR(addr, extension) \
+ do { \
+ (addr) &= ~GENMASK_ULL(49, 39); \
+ (addr) |= (u64) (extension) << 39; \
+ } while (0)
+
+enum gaudi_dma_channels {
+ GAUDI_PCI_DMA_1,
+ GAUDI_PCI_DMA_2,
+ GAUDI_PCI_DMA_3,
+ GAUDI_HBM_DMA_1,
+ GAUDI_HBM_DMA_2,
+ GAUDI_HBM_DMA_3,
+ GAUDI_HBM_DMA_4,
+ GAUDI_HBM_DMA_5,
+ GAUDI_DMA_MAX
+};
+
+enum gaudi_tpc_mask {
+ GAUDI_TPC_MASK_TPC0 = 0x01,
+ GAUDI_TPC_MASK_TPC1 = 0x02,
+ GAUDI_TPC_MASK_TPC2 = 0x04,
+ GAUDI_TPC_MASK_TPC3 = 0x08,
+ GAUDI_TPC_MASK_TPC4 = 0x10,
+ GAUDI_TPC_MASK_TPC5 = 0x20,
+ GAUDI_TPC_MASK_TPC6 = 0x40,
+ GAUDI_TPC_MASK_TPC7 = 0x80,
+ GAUDI_TPC_MASK_ALL = 0xFF
+};
+
+/**
+ * struct gaudi_internal_qman_info - Internal QMAN information.
+ * @pq_kernel_addr: Kernel address of the PQ memory area in the host.
+ * @pq_dma_addr: DMA address of the PQ memory area in the host.
+ * @pq_size: Size of allocated host memory for PQ.
+ */
+struct gaudi_internal_qman_info {
+ void *pq_kernel_addr;
+ dma_addr_t pq_dma_addr;
+ size_t pq_size;
+};
+
+/**
+ * struct gaudi_device - ASIC specific manage structure.
+ * @armcp_info_get: get information on device from ArmCP
+ * @hw_queues_lock: protects the H/W queues from concurrent access.
+ * @clk_gate_mutex: protects code areas that require clock gating to be disabled
+ * temporarily
+ * @internal_qmans: Internal QMANs information. The array size is larger than
+ * the actual number of internal queues because they are not in
+ * consecutive order.
+ * @hbm_bar_cur_addr: current address of HBM PCI bar.
+ * @max_freq_value: current max clk frequency.
+ * @events: array that holds all event id's
+ * @events_stat: array that holds histogram of all received events.
+ * @events_stat_aggregate: same as events_stat but doesn't get cleared on reset
+ * @hw_cap_initialized: This field contains a bit per H/W engine. When that
+ * engine is initialized, that bit is set by the driver to
+ * signal we can use this engine in later code paths.
+ * Each bit is cleared upon reset of its corresponding H/W
+ * engine.
+ * @multi_msi_mode: whether we are working in multi MSI single MSI mode.
+ * Multi MSI is possible only with IOMMU enabled.
+ * @ext_queue_idx: helper index for external queues initialization.
+ */
+struct gaudi_device {
+ int (*armcp_info_get)(struct hl_device *hdev);
+
+ /* TODO: remove hw_queues_lock after moving to scheduler code */
+ spinlock_t hw_queues_lock;
+ struct mutex clk_gate_mutex;
+
+ struct gaudi_internal_qman_info internal_qmans[GAUDI_QUEUE_ID_SIZE];
+
+ u64 hbm_bar_cur_addr;
+ u64 max_freq_value;
+
+ u32 events[GAUDI_EVENT_SIZE];
+ u32 events_stat[GAUDI_EVENT_SIZE];
+ u32 events_stat_aggregate[GAUDI_EVENT_SIZE];
+ u32 hw_cap_initialized;
+ u8 multi_msi_mode;
+ u8 ext_queue_idx;
+};
+
+void gaudi_init_security(struct hl_device *hdev);
+void gaudi_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp);
+void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
+int gaudi_debug_coresight(struct hl_device *hdev, void *data);
+void gaudi_halt_coresight(struct hl_device *hdev);
+int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+
+#endif /* GAUDIP_H_ */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
new file mode 100644
index 000000000000..bf0e062d7b87
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudiP.h"
+#include "include/gaudi/gaudi_coresight.h"
+#include "include/gaudi/asic_reg/gaudi_regs.h"
+#include "include/gaudi/gaudi_masks.h"
+
+#include <uapi/misc/habanalabs.h>
+#include <linux/coresight.h>
+
+#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
+#define SPMU_EVENT_TYPES_OFFSET 0x400
+#define SPMU_MAX_COUNTERS 6
+
+static u64 debug_stm_regs[GAUDI_STM_LAST + 1] = {
+ [GAUDI_STM_MME0_ACC] = mmMME0_ACC_STM_BASE,
+ [GAUDI_STM_MME0_SBAB] = mmMME0_SBAB_STM_BASE,
+ [GAUDI_STM_MME0_CTRL] = mmMME0_CTRL_STM_BASE,
+ [GAUDI_STM_MME1_ACC] = mmMME1_ACC_STM_BASE,
+ [GAUDI_STM_MME1_SBAB] = mmMME1_SBAB_STM_BASE,
+ [GAUDI_STM_MME1_CTRL] = mmMME1_CTRL_STM_BASE,
+ [GAUDI_STM_MME2_ACC] = mmMME2_ACC_STM_BASE,
+ [GAUDI_STM_MME2_SBAB] = mmMME2_SBAB_STM_BASE,
+ [GAUDI_STM_MME2_CTRL] = mmMME2_CTRL_STM_BASE,
+ [GAUDI_STM_MME3_ACC] = mmMME3_ACC_STM_BASE,
+ [GAUDI_STM_MME3_SBAB] = mmMME3_SBAB_STM_BASE,
+ [GAUDI_STM_MME3_CTRL] = mmMME3_CTRL_STM_BASE,
+ [GAUDI_STM_DMA_IF_W_S] = mmDMA_IF_W_S_STM_BASE,
+ [GAUDI_STM_DMA_IF_E_S] = mmDMA_IF_E_S_STM_BASE,
+ [GAUDI_STM_DMA_IF_W_N] = mmDMA_IF_W_N_STM_BASE,
+ [GAUDI_STM_DMA_IF_E_N] = mmDMA_IF_E_N_STM_BASE,
+ [GAUDI_STM_CPU] = mmCPU_STM_BASE,
+ [GAUDI_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_1_CS] = mmDMA_CH_1_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_2_CS] = mmDMA_CH_2_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_3_CS] = mmDMA_CH_3_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_4_CS] = mmDMA_CH_4_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_5_CS] = mmDMA_CH_5_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_6_CS] = mmDMA_CH_6_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_7_CS] = mmDMA_CH_7_CS_STM_BASE,
+ [GAUDI_STM_PCIE] = mmPCIE_STM_BASE,
+ [GAUDI_STM_MMU_CS] = mmMMU_CS_STM_BASE,
+ [GAUDI_STM_PSOC] = mmPSOC_STM_BASE,
+ [GAUDI_STM_NIC0_0] = mmSTM_0_NIC0_DBG_BASE,
+ [GAUDI_STM_NIC0_1] = mmSTM_1_NIC0_DBG_BASE,
+ [GAUDI_STM_NIC1_0] = mmSTM_0_NIC1_DBG_BASE,
+ [GAUDI_STM_NIC1_1] = mmSTM_1_NIC1_DBG_BASE,
+ [GAUDI_STM_NIC2_0] = mmSTM_0_NIC2_DBG_BASE,
+ [GAUDI_STM_NIC2_1] = mmSTM_1_NIC2_DBG_BASE,
+ [GAUDI_STM_NIC3_0] = mmSTM_0_NIC3_DBG_BASE,
+ [GAUDI_STM_NIC3_1] = mmSTM_1_NIC3_DBG_BASE,
+ [GAUDI_STM_NIC4_0] = mmSTM_0_NIC4_DBG_BASE,
+ [GAUDI_STM_NIC4_1] = mmSTM_1_NIC4_DBG_BASE,
+ [GAUDI_STM_TPC0_EML] = mmTPC0_EML_STM_BASE,
+ [GAUDI_STM_TPC1_EML] = mmTPC1_EML_STM_BASE,
+ [GAUDI_STM_TPC2_EML] = mmTPC2_EML_STM_BASE,
+ [GAUDI_STM_TPC3_EML] = mmTPC3_EML_STM_BASE,
+ [GAUDI_STM_TPC4_EML] = mmTPC4_EML_STM_BASE,
+ [GAUDI_STM_TPC5_EML] = mmTPC5_EML_STM_BASE,
+ [GAUDI_STM_TPC6_EML] = mmTPC6_EML_STM_BASE,
+ [GAUDI_STM_TPC7_EML] = mmTPC7_EML_STM_BASE
+};
+
+static u64 debug_etf_regs[GAUDI_ETF_LAST + 1] = {
+ [GAUDI_ETF_MME0_ACC] = mmMME0_ACC_ETF_BASE,
+ [GAUDI_ETF_MME0_SBAB] = mmMME0_SBAB_ETF_BASE,
+ [GAUDI_ETF_MME0_CTRL] = mmMME0_CTRL_ETF_BASE,
+ [GAUDI_ETF_MME1_ACC] = mmMME1_ACC_ETF_BASE,
+ [GAUDI_ETF_MME1_SBAB] = mmMME1_SBAB_ETF_BASE,
+ [GAUDI_ETF_MME1_CTRL] = mmMME1_CTRL_ETF_BASE,
+ [GAUDI_ETF_MME2_ACC] = mmMME2_MME2_ACC_ETF_BASE,
+ [GAUDI_ETF_MME2_SBAB] = mmMME2_SBAB_ETF_BASE,
+ [GAUDI_ETF_MME2_CTRL] = mmMME2_CTRL_ETF_BASE,
+ [GAUDI_ETF_MME3_ACC] = mmMME3_ACC_ETF_BASE,
+ [GAUDI_ETF_MME3_SBAB] = mmMME3_SBAB_ETF_BASE,
+ [GAUDI_ETF_MME3_CTRL] = mmMME3_CTRL_ETF_BASE,
+ [GAUDI_ETF_DMA_IF_W_S] = mmDMA_IF_W_S_ETF_BASE,
+ [GAUDI_ETF_DMA_IF_E_S] = mmDMA_IF_E_S_ETF_BASE,
+ [GAUDI_ETF_DMA_IF_W_N] = mmDMA_IF_W_N_ETF_BASE,
+ [GAUDI_ETF_DMA_IF_E_N] = mmDMA_IF_E_N_ETF_BASE,
+ [GAUDI_ETF_CPU_0] = mmCPU_ETF_0_BASE,
+ [GAUDI_ETF_CPU_1] = mmCPU_ETF_1_BASE,
+ [GAUDI_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE,
+ [GAUDI_ETF_DMA_CH_0_CS] = mmDMA_CH_0_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_1_CS] = mmDMA_CH_1_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_2_CS] = mmDMA_CH_2_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_3_CS] = mmDMA_CH_3_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_4_CS] = mmDMA_CH_4_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_5_CS] = mmDMA_CH_5_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_6_CS] = mmDMA_CH_6_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_7_CS] = mmDMA_CH_7_CS_ETF_BASE,
+ [GAUDI_ETF_PCIE] = mmPCIE_ETF_BASE,
+ [GAUDI_ETF_MMU_CS] = mmMMU_CS_ETF_BASE,
+ [GAUDI_ETF_PSOC] = mmPSOC_ETF_BASE,
+ [GAUDI_ETF_NIC0_0] = mmETF_0_NIC0_DBG_BASE,
+ [GAUDI_ETF_NIC0_1] = mmETF_1_NIC0_DBG_BASE,
+ [GAUDI_ETF_NIC1_0] = mmETF_0_NIC1_DBG_BASE,
+ [GAUDI_ETF_NIC1_1] = mmETF_1_NIC1_DBG_BASE,
+ [GAUDI_ETF_NIC2_0] = mmETF_0_NIC2_DBG_BASE,
+ [GAUDI_ETF_NIC2_1] = mmETF_1_NIC2_DBG_BASE,
+ [GAUDI_ETF_NIC3_0] = mmETF_0_NIC3_DBG_BASE,
+ [GAUDI_ETF_NIC3_1] = mmETF_1_NIC3_DBG_BASE,
+ [GAUDI_ETF_NIC4_0] = mmETF_0_NIC4_DBG_BASE,
+ [GAUDI_ETF_NIC4_1] = mmETF_1_NIC4_DBG_BASE,
+ [GAUDI_ETF_TPC0_EML] = mmTPC0_EML_ETF_BASE,
+ [GAUDI_ETF_TPC1_EML] = mmTPC1_EML_ETF_BASE,
+ [GAUDI_ETF_TPC2_EML] = mmTPC2_EML_ETF_BASE,
+ [GAUDI_ETF_TPC3_EML] = mmTPC3_EML_ETF_BASE,
+ [GAUDI_ETF_TPC4_EML] = mmTPC4_EML_ETF_BASE,
+ [GAUDI_ETF_TPC5_EML] = mmTPC5_EML_ETF_BASE,
+ [GAUDI_ETF_TPC6_EML] = mmTPC6_EML_ETF_BASE,
+ [GAUDI_ETF_TPC7_EML] = mmTPC7_EML_ETF_BASE
+};
+
+static u64 debug_funnel_regs[GAUDI_FUNNEL_LAST + 1] = {
+ [GAUDI_FUNNEL_MME0_ACC] = mmMME0_ACC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_MME1_ACC] = mmMME1_ACC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_MME2_ACC] = mmMME2_ACC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_MME3_ACC] = mmMME3_ACC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X0] = mmSRAM_Y0_X0_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X1] = mmSRAM_Y0_X1_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X2] = mmSRAM_Y0_X2_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X3] = mmSRAM_Y0_X3_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X4] = mmSRAM_Y0_X4_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X5] = mmSRAM_Y0_X5_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X6] = mmSRAM_Y0_X6_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X7] = mmSRAM_Y0_X7_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X0] = mmSRAM_Y1_X0_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X1] = mmSRAM_Y1_X1_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X2] = mmSRAM_Y1_X2_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X3] = mmSRAM_Y1_X3_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X4] = mmSRAM_Y1_X4_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X5] = mmSRAM_Y1_X5_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X6] = mmSRAM_Y1_X6_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X7] = mmSRAM_Y1_X7_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X0] = mmSRAM_Y2_X0_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X1] = mmSRAM_Y2_X1_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X2] = mmSRAM_Y2_X2_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X3] = mmSRAM_Y2_X3_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X4] = mmSRAM_Y2_X4_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X5] = mmSRAM_Y2_X5_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X6] = mmSRAM_Y2_X6_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X7] = mmSRAM_Y2_X7_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X0] = mmSRAM_Y3_X0_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X1] = mmSRAM_Y3_X1_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X2] = mmSRAM_Y3_X2_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X4] = mmSRAM_Y3_X4_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X3] = mmSRAM_Y3_X3_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X5] = mmSRAM_Y3_X5_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X6] = mmSRAM_Y3_X6_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X7] = mmSRAM_Y3_X7_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SIF_0] = mmSIF_FUNNEL_0_BASE,
+ [GAUDI_FUNNEL_SIF_1] = mmSIF_FUNNEL_1_BASE,
+ [GAUDI_FUNNEL_SIF_2] = mmSIF_FUNNEL_2_BASE,
+ [GAUDI_FUNNEL_SIF_3] = mmSIF_FUNNEL_3_BASE,
+ [GAUDI_FUNNEL_SIF_4] = mmSIF_FUNNEL_4_BASE,
+ [GAUDI_FUNNEL_SIF_5] = mmSIF_FUNNEL_5_BASE,
+ [GAUDI_FUNNEL_SIF_6] = mmSIF_FUNNEL_6_BASE,
+ [GAUDI_FUNNEL_SIF_7] = mmSIF_FUNNEL_7_BASE,
+ [GAUDI_FUNNEL_NIF_0] = mmNIF_FUNNEL_0_BASE,
+ [GAUDI_FUNNEL_NIF_1] = mmNIF_FUNNEL_1_BASE,
+ [GAUDI_FUNNEL_NIF_2] = mmNIF_FUNNEL_2_BASE,
+ [GAUDI_FUNNEL_NIF_3] = mmNIF_FUNNEL_3_BASE,
+ [GAUDI_FUNNEL_NIF_4] = mmNIF_FUNNEL_4_BASE,
+ [GAUDI_FUNNEL_NIF_5] = mmNIF_FUNNEL_5_BASE,
+ [GAUDI_FUNNEL_NIF_6] = mmNIF_FUNNEL_6_BASE,
+ [GAUDI_FUNNEL_NIF_7] = mmNIF_FUNNEL_7_BASE,
+ [GAUDI_FUNNEL_DMA_IF_W_S] = mmDMA_IF_W_S_FUNNEL_BASE,
+ [GAUDI_FUNNEL_DMA_IF_E_S] = mmDMA_IF_E_S_FUNNEL_BASE,
+ [GAUDI_FUNNEL_DMA_IF_W_N] = mmDMA_IF_W_N_FUNNEL_BASE,
+ [GAUDI_FUNNEL_DMA_IF_E_N] = mmDMA_IF_E_N_FUNNEL_BASE,
+ [GAUDI_FUNNEL_CPU] = mmCPU_FUNNEL_BASE,
+ [GAUDI_FUNNEL_NIC_TPC_W_S] = mmNIC_TPC_FUNNEL_W_S_BASE,
+ [GAUDI_FUNNEL_NIC_TPC_E_S] = mmNIC_TPC_FUNNEL_E_S_BASE,
+ [GAUDI_FUNNEL_NIC_TPC_W_N] = mmNIC_TPC_FUNNEL_W_N_BASE,
+ [GAUDI_FUNNEL_NIC_TPC_E_N] = mmNIC_TPC_FUNNEL_E_N_BASE,
+ [GAUDI_FUNNEL_PCIE] = mmPCIE_FUNNEL_BASE,
+ [GAUDI_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_NIC0] = mmFUNNEL_NIC0_DBG_BASE,
+ [GAUDI_FUNNEL_NIC1] = mmFUNNEL_NIC1_DBG_BASE,
+ [GAUDI_FUNNEL_NIC2] = mmFUNNEL_NIC2_DBG_BASE,
+ [GAUDI_FUNNEL_NIC3] = mmFUNNEL_NIC3_DBG_BASE,
+ [GAUDI_FUNNEL_NIC4] = mmFUNNEL_NIC4_DBG_BASE,
+ [GAUDI_FUNNEL_TPC0_EML] = mmTPC0_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC1_EML] = mmTPC1_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC2_EML] = mmTPC2_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC3_EML] = mmTPC3_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC4_EML] = mmTPC4_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC5_EML] = mmTPC5_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC6_EML] = mmTPC6_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC7_EML] = mmTPC7_EML_FUNNEL_BASE
+};
+
+static u64 debug_bmon_regs[GAUDI_BMON_LAST + 1] = {
+ [GAUDI_BMON_MME0_ACC_0] = mmMME0_ACC_BMON0_BASE,
+ [GAUDI_BMON_MME0_SBAB_0] = mmMME0_SBAB_BMON0_BASE,
+ [GAUDI_BMON_MME0_SBAB_1] = mmMME0_SBAB_BMON1_BASE,
+ [GAUDI_BMON_MME0_CTRL_0] = mmMME0_CTRL_BMON0_BASE,
+ [GAUDI_BMON_MME0_CTRL_1] = mmMME0_CTRL_BMON1_BASE,
+ [GAUDI_BMON_MME1_ACC_0] = mmMME1_ACC_BMON0_BASE,
+ [GAUDI_BMON_MME1_SBAB_0] = mmMME1_SBAB_BMON0_BASE,
+ [GAUDI_BMON_MME1_SBAB_1] = mmMME1_SBAB_BMON1_BASE,
+ [GAUDI_BMON_MME1_CTRL_0] = mmMME1_CTRL_BMON0_BASE,
+ [GAUDI_BMON_MME1_CTRL_1] = mmMME1_CTRL_BMON1_BASE,
+ [GAUDI_BMON_MME2_ACC_0] = mmMME2_ACC_BMON0_BASE,
+ [GAUDI_BMON_MME2_SBAB_0] = mmMME2_SBAB_BMON0_BASE,
+ [GAUDI_BMON_MME2_SBAB_1] = mmMME2_SBAB_BMON1_BASE,
+ [GAUDI_BMON_MME2_CTRL_0] = mmMME2_CTRL_BMON0_BASE,
+ [GAUDI_BMON_MME2_CTRL_1] = mmMME2_CTRL_BMON1_BASE,
+ [GAUDI_BMON_MME3_ACC_0] = mmMME3_ACC_BMON0_BASE,
+ [GAUDI_BMON_MME3_SBAB_0] = mmMME3_SBAB_BMON0_BASE,
+ [GAUDI_BMON_MME3_SBAB_1] = mmMME3_SBAB_BMON1_BASE,
+ [GAUDI_BMON_MME3_CTRL_0] = mmMME3_CTRL_BMON0_BASE,
+ [GAUDI_BMON_MME3_CTRL_1] = mmMME3_CTRL_BMON1_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_SOB_WR] = mmDMA_IF_W_S_SOB_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_0_WR] = mmDMA_IF_W_S_HBM0_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_0_RD] = mmDMA_IF_W_S_HBM0_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_1_WR] = mmDMA_IF_W_S_HBM1_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_1_RD] = mmDMA_IF_W_S_HBM1_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_SOB_WR] = mmDMA_IF_E_S_SOB_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_0_WR] = mmDMA_IF_E_S_HBM0_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_0_RD] = mmDMA_IF_E_S_HBM0_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_1_WR] = mmDMA_IF_E_S_HBM1_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_1_RD] = mmDMA_IF_E_S_HBM1_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_SOB_WR] = mmDMA_IF_W_N_SOB_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_HBM0_WR] = mmDMA_IF_W_N_HBM0_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_HBM0_RD] = mmDMA_IF_W_N_HBM0_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_HBM1_WR] = mmDMA_IF_W_N_HBM1_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_HBM1_RD] = mmDMA_IF_W_N_HBM1_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_SOB_WR] = mmDMA_IF_E_N_SOB_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_HBM0_WR] = mmDMA_IF_E_N_HBM0_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_HBM0_RD] = mmDMA_IF_E_N_HBM0_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_HBM1_WR] = mmDMA_IF_E_N_HBM1_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_HBM1_RD] = mmDMA_IF_E_N_HBM1_RD_BMON_BASE,
+ [GAUDI_BMON_CPU_WR] = mmCPU_WR_BMON_BASE,
+ [GAUDI_BMON_CPU_RD] = mmCPU_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_CH_0_0] = mmDMA_CH_0_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_0_1] = mmDMA_CH_0_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_1_0] = mmDMA_CH_1_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_1_1] = mmDMA_CH_1_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_2_0] = mmDMA_CH_2_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_2_1] = mmDMA_CH_2_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_3_0] = mmDMA_CH_3_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_3_1] = mmDMA_CH_3_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_4_0] = mmDMA_CH_4_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_4_1] = mmDMA_CH_4_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_5_0] = mmDMA_CH_5_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_5_1] = mmDMA_CH_5_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_6_0] = mmDMA_CH_6_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_6_1] = mmDMA_CH_6_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_7_0] = mmDMA_CH_7_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_7_1] = mmDMA_CH_7_BMON_1_BASE,
+ [GAUDI_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE,
+ [GAUDI_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE,
+ [GAUDI_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE,
+ [GAUDI_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE,
+ [GAUDI_BMON_MMU_0] = mmMMU_BMON_0_BASE,
+ [GAUDI_BMON_MMU_1] = mmMMU_BMON_1_BASE,
+ [GAUDI_BMON_NIC0_0] = mmBMON0_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC0_1] = mmBMON1_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC0_2] = mmBMON2_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC0_3] = mmBMON3_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC0_4] = mmBMON4_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC1_0] = mmBMON0_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC1_1] = mmBMON1_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC1_2] = mmBMON2_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC1_3] = mmBMON3_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC1_4] = mmBMON4_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC2_0] = mmBMON0_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC2_1] = mmBMON1_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC2_2] = mmBMON2_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC2_3] = mmBMON3_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC2_4] = mmBMON4_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC3_0] = mmBMON0_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC3_1] = mmBMON1_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC3_2] = mmBMON2_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC3_3] = mmBMON3_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC3_4] = mmBMON4_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC4_0] = mmBMON0_NIC4_DBG_BASE,
+ [GAUDI_BMON_NIC4_1] = mmBMON1_NIC4_DBG_BASE,
+ [GAUDI_BMON_NIC4_2] = mmBMON2_NIC4_DBG_BASE,
+ [GAUDI_BMON_NIC4_3] = mmBMON3_NIC4_DBG_BASE,
+ [GAUDI_BMON_NIC4_4] = mmBMON4_NIC4_DBG_BASE,
+ [GAUDI_BMON_TPC0_EML_0] = mmTPC0_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC0_EML_1] = mmTPC0_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC0_EML_2] = mmTPC0_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC0_EML_3] = mmTPC0_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC1_EML_0] = mmTPC1_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC1_EML_1] = mmTPC1_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC1_EML_2] = mmTPC1_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC1_EML_3] = mmTPC1_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC2_EML_0] = mmTPC2_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC2_EML_1] = mmTPC2_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC2_EML_2] = mmTPC2_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC2_EML_3] = mmTPC2_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC3_EML_0] = mmTPC3_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC3_EML_1] = mmTPC3_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC3_EML_2] = mmTPC3_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC3_EML_3] = mmTPC3_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC4_EML_0] = mmTPC4_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC4_EML_1] = mmTPC4_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC4_EML_2] = mmTPC4_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC4_EML_3] = mmTPC4_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC5_EML_0] = mmTPC5_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC5_EML_1] = mmTPC5_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC5_EML_2] = mmTPC5_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC5_EML_3] = mmTPC5_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC6_EML_0] = mmTPC6_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC6_EML_1] = mmTPC6_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC6_EML_2] = mmTPC6_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC6_EML_3] = mmTPC6_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC7_EML_0] = mmTPC7_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC7_EML_1] = mmTPC7_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC7_EML_2] = mmTPC7_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC7_EML_3] = mmTPC7_EML_BUSMON_3_BASE
+};
+
+static u64 debug_spmu_regs[GAUDI_SPMU_LAST + 1] = {
+ [GAUDI_SPMU_MME0_ACC] = mmMME0_ACC_SPMU_BASE,
+ [GAUDI_SPMU_MME0_SBAB] = mmMME0_SBAB_SPMU_BASE,
+ [GAUDI_SPMU_MME0_CTRL] = mmMME0_CTRL_SPMU_BASE,
+ [GAUDI_SPMU_MME1_ACC] = mmMME1_ACC_SPMU_BASE,
+ [GAUDI_SPMU_MME1_SBAB] = mmMME1_SBAB_SPMU_BASE,
+ [GAUDI_SPMU_MME1_CTRL] = mmMME1_CTRL_SPMU_BASE,
+ [GAUDI_SPMU_MME2_MME2_ACC] = mmMME2_ACC_SPMU_BASE,
+ [GAUDI_SPMU_MME2_SBAB] = mmMME2_SBAB_SPMU_BASE,
+ [GAUDI_SPMU_MME2_CTRL] = mmMME2_CTRL_SPMU_BASE,
+ [GAUDI_SPMU_MME3_ACC] = mmMME3_ACC_SPMU_BASE,
+ [GAUDI_SPMU_MME3_SBAB] = mmMME3_SBAB_SPMU_BASE,
+ [GAUDI_SPMU_MME3_CTRL] = mmMME3_CTRL_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_0_CS] = mmDMA_CH_0_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_1_CS] = mmDMA_CH_1_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_2_CS] = mmDMA_CH_2_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_3_CS] = mmDMA_CH_3_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_4_CS] = mmDMA_CH_4_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_5_CS] = mmDMA_CH_5_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_6_CS] = mmDMA_CH_6_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_7_CS] = mmDMA_CH_7_CS_SPMU_BASE,
+ [GAUDI_SPMU_PCIE] = mmPCIE_SPMU_BASE,
+ [GAUDI_SPMU_MMU_CS] = mmMMU_CS_SPMU_BASE,
+ [GAUDI_SPMU_NIC0_0] = mmSPMU_0_NIC0_DBG_BASE,
+ [GAUDI_SPMU_NIC0_1] = mmSPMU_1_NIC0_DBG_BASE,
+ [GAUDI_SPMU_NIC1_0] = mmSPMU_0_NIC1_DBG_BASE,
+ [GAUDI_SPMU_NIC1_1] = mmSPMU_1_NIC1_DBG_BASE,
+ [GAUDI_SPMU_NIC2_0] = mmSPMU_0_NIC2_DBG_BASE,
+ [GAUDI_SPMU_NIC2_1] = mmSPMU_1_NIC2_DBG_BASE,
+ [GAUDI_SPMU_NIC3_0] = mmSPMU_0_NIC3_DBG_BASE,
+ [GAUDI_SPMU_NIC3_1] = mmSPMU_1_NIC3_DBG_BASE,
+ [GAUDI_SPMU_NIC4_0] = mmSPMU_0_NIC4_DBG_BASE,
+ [GAUDI_SPMU_NIC4_1] = mmSPMU_1_NIC4_DBG_BASE,
+ [GAUDI_SPMU_TPC0_EML] = mmTPC0_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC1_EML] = mmTPC1_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC2_EML] = mmTPC2_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC3_EML] = mmTPC3_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC4_EML] = mmTPC4_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC5_EML] = mmTPC5_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC6_EML] = mmTPC6_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC7_EML] = mmTPC7_EML_SPMU_BASE
+};
+
+static int gaudi_coresight_timeout(struct hl_device *hdev, u64 addr,
+ int position, bool up)
+{
+ int rc;
+ u32 val;
+
+ rc = hl_poll_timeout(
+ hdev,
+ addr,
+ val,
+ up ? val & BIT(position) : !(val & BIT(position)),
+ 1000,
+ CORESIGHT_TIMEOUT_USEC);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
+ addr, position, up);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int gaudi_config_stm(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_stm *input;
+ u64 base_reg;
+ int rc;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
+ dev_err(hdev->dev, "Invalid register index in STM\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + 0xE80, 0x80004);
+ WREG32(base_reg + 0xD64, 7);
+ WREG32(base_reg + 0xD60, 0);
+ WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask));
+ WREG32(base_reg + 0xD60, 1);
+ WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask));
+ WREG32(base_reg + 0xE70, 0x10);
+ WREG32(base_reg + 0xE60, 0);
+ WREG32(base_reg + 0xE00, lower_32_bits(input->sp_mask));
+ WREG32(base_reg + 0xEF4, input->id);
+ WREG32(base_reg + 0xDF4, 0x80);
+ WREG32(base_reg + 0xE8C, input->frequency);
+ WREG32(base_reg + 0xE90, 0x7FF);
+
+ /* SW-2176 - SW WA for HW bug */
+ if ((CFG_BASE + base_reg) >= mmDMA_CH_0_CS_STM_BASE &&
+ (CFG_BASE + base_reg) <= mmDMA_CH_7_CS_STM_BASE) {
+
+ WREG32(base_reg + 0xE68, 0xffff8005);
+ WREG32(base_reg + 0xE6C, 0x0);
+ }
+
+ WREG32(base_reg + 0xE80, 0x27 | (input->id << 16));
+ } else {
+ WREG32(base_reg + 0xE80, 4);
+ WREG32(base_reg + 0xD64, 0);
+ WREG32(base_reg + 0xD60, 1);
+ WREG32(base_reg + 0xD00, 0);
+ WREG32(base_reg + 0xD20, 0);
+ WREG32(base_reg + 0xD60, 0);
+ WREG32(base_reg + 0xE20, 0);
+ WREG32(base_reg + 0xE00, 0);
+ WREG32(base_reg + 0xDF4, 0x80);
+ WREG32(base_reg + 0xE70, 0);
+ WREG32(base_reg + 0xE60, 0);
+ WREG32(base_reg + 0xE64, 0);
+ WREG32(base_reg + 0xE8C, 0);
+
+ rc = gaudi_coresight_timeout(hdev, base_reg + 0xE80, 23, false);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to disable STM on timeout, error %d\n",
+ rc);
+ return rc;
+ }
+
+ WREG32(base_reg + 0xE80, 4);
+ }
+
+ return 0;
+}
+
+static int gaudi_config_etf(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_etf *input;
+ u64 base_reg;
+ u32 val;
+ int rc;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) {
+ dev_err(hdev->dev, "Invalid register index in ETF\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+ val = RREG32(base_reg + 0x304);
+ val |= 0x1000;
+ WREG32(base_reg + 0x304, val);
+ val |= 0x40;
+ WREG32(base_reg + 0x304, val);
+
+ rc = gaudi_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to %s ETF on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ rc = gaudi_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to %s ETF on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ WREG32(base_reg + 0x20, 0);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + 0x34, 0x3FFC);
+ WREG32(base_reg + 0x28, input->sink_mode);
+ WREG32(base_reg + 0x304, 0x4001);
+ WREG32(base_reg + 0x308, 0xA);
+ WREG32(base_reg + 0x20, 1);
+ } else {
+ WREG32(base_reg + 0x34, 0);
+ WREG32(base_reg + 0x28, 0);
+ WREG32(base_reg + 0x304, 0);
+ }
+
+ return 0;
+}
+
+static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr,
+ u32 size, bool *is_host)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ /* maximum address length is 50 bits */
+ if (addr >> 50) {
+ dev_err(hdev->dev,
+ "ETR buffer address shouldn't exceed 50 bits\n");
+ return false;
+ }
+
+ /* PMMU and HPMMU addresses are equal, check only one of them */
+ if ((gaudi->hw_cap_initialized & HW_CAP_MMU) &&
+ hl_mem_area_inside_range(addr, size,
+ prop->pmmu.start_addr,
+ prop->pmmu.end_addr)) {
+ *is_host = true;
+ return true;
+ }
+
+ if (hl_mem_area_inside_range(addr, size,
+ prop->dram_user_base_address,
+ prop->dram_end_address))
+ return true;
+
+ if (hl_mem_area_inside_range(addr, size,
+ prop->sram_user_base_address,
+ prop->sram_end_address))
+ return true;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+ dev_err(hdev->dev, "ETR buffer should be in SRAM/DRAM\n");
+
+ return false;
+}
+
+static int gaudi_config_etr(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_etr *input;
+ u64 msb;
+ u32 val;
+ int rc;
+
+ WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK);
+
+ val = RREG32(mmPSOC_ETR_FFCR);
+ val |= 0x1000;
+ WREG32(mmPSOC_ETR_FFCR, val);
+ val |= 0x40;
+ WREG32(mmPSOC_ETR_FFCR, val);
+
+ rc = gaudi_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ rc = gaudi_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ WREG32(mmPSOC_ETR_CTL, 0);
+
+ if (params->enable) {
+ bool is_host = false;
+
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ if (input->buffer_size == 0) {
+ dev_err(hdev->dev,
+ "ETR buffer size should be bigger than 0\n");
+ return -EINVAL;
+ }
+
+ if (!gaudi_etr_validate_address(hdev,
+ input->buffer_address, input->buffer_size,
+ &is_host)) {
+ dev_err(hdev->dev, "ETR buffer address is invalid\n");
+ return -EINVAL;
+ }
+
+ msb = upper_32_bits(input->buffer_address) >> 8;
+ msb &= PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
+ WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb);
+
+ WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
+ WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
+ WREG32(mmPSOC_ETR_MODE, input->sink_mode);
+ /* Workaround for H3 #HW-2075 bug: use small data chunks */
+ WREG32(mmPSOC_ETR_AXICTL, (is_host ? 0 : 0x700) |
+ PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ WREG32(mmPSOC_ETR_DBALO,
+ lower_32_bits(input->buffer_address));
+ WREG32(mmPSOC_ETR_DBAHI,
+ upper_32_bits(input->buffer_address));
+ WREG32(mmPSOC_ETR_FFCR, 3);
+ WREG32(mmPSOC_ETR_PSCR, 0xA);
+ WREG32(mmPSOC_ETR_CTL, 1);
+ } else {
+ WREG32(mmPSOC_ETR_BUFWM, 0);
+ WREG32(mmPSOC_ETR_RSZ, 0x400);
+ WREG32(mmPSOC_ETR_DBALO, 0);
+ WREG32(mmPSOC_ETR_DBAHI, 0);
+ WREG32(mmPSOC_ETR_PSCR, 0);
+ WREG32(mmPSOC_ETR_MODE, 0);
+ WREG32(mmPSOC_ETR_FFCR, 0);
+
+ if (params->output_size >= sizeof(u64)) {
+ u32 rwp, rwphi;
+
+ /*
+ * The trace buffer address is 50 bits wide. The end of
+ * the buffer is set in the RWP register (lower 32
+ * bits), and in the RWPHI register (upper 8 bits).
+ * The 10 msb of the 50-bit address are stored in a
+ * global configuration register.
+ */
+ rwp = RREG32(mmPSOC_ETR_RWP);
+ rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
+ msb = RREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR) &
+ PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
+ *(u64 *) params->output = ((u64) msb << 40) |
+ ((u64) rwphi << 32) | rwp;
+ }
+ }
+
+ return 0;
+}
+
+static int gaudi_config_funnel(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ u64 base_reg;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) {
+ dev_err(hdev->dev, "Invalid register index in FUNNEL\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_funnel_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+ WREG32(base_reg, params->enable ? 0x33F : 0);
+
+ return 0;
+}
+
+static int gaudi_config_bmon(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_bmon *input;
+ u64 base_reg;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) {
+ dev_err(hdev->dev, "Invalid register index in BMON\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0x104, 1);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + 0x200, lower_32_bits(input->start_addr0));
+ WREG32(base_reg + 0x204, upper_32_bits(input->start_addr0));
+ WREG32(base_reg + 0x208, lower_32_bits(input->addr_mask0));
+ WREG32(base_reg + 0x20C, upper_32_bits(input->addr_mask0));
+ WREG32(base_reg + 0x240, lower_32_bits(input->start_addr1));
+ WREG32(base_reg + 0x244, upper_32_bits(input->start_addr1));
+ WREG32(base_reg + 0x248, lower_32_bits(input->addr_mask1));
+ WREG32(base_reg + 0x24C, upper_32_bits(input->addr_mask1));
+ WREG32(base_reg + 0x224, 0);
+ WREG32(base_reg + 0x234, 0);
+ WREG32(base_reg + 0x30C, input->bw_win);
+ WREG32(base_reg + 0x308, input->win_capture);
+ WREG32(base_reg + 0x700, 0xA000B00 | (input->id << 12));
+ WREG32(base_reg + 0x708, 0xA000A00 | (input->id << 12));
+ WREG32(base_reg + 0x70C, 0xA000C00 | (input->id << 12));
+ WREG32(base_reg + 0x100, 0x11);
+ WREG32(base_reg + 0x304, 0x1);
+ } else {
+ WREG32(base_reg + 0x200, 0);
+ WREG32(base_reg + 0x204, 0);
+ WREG32(base_reg + 0x208, 0xFFFFFFFF);
+ WREG32(base_reg + 0x20C, 0xFFFFFFFF);
+ WREG32(base_reg + 0x240, 0);
+ WREG32(base_reg + 0x244, 0);
+ WREG32(base_reg + 0x248, 0xFFFFFFFF);
+ WREG32(base_reg + 0x24C, 0xFFFFFFFF);
+ WREG32(base_reg + 0x224, 0xFFFFFFFF);
+ WREG32(base_reg + 0x234, 0x1070F);
+ WREG32(base_reg + 0x30C, 0);
+ WREG32(base_reg + 0x308, 0xFFFF);
+ WREG32(base_reg + 0x700, 0xA000B00);
+ WREG32(base_reg + 0x708, 0xA000A00);
+ WREG32(base_reg + 0x70C, 0xA000C00);
+ WREG32(base_reg + 0x100, 1);
+ WREG32(base_reg + 0x304, 0);
+ WREG32(base_reg + 0x104, 0);
+ }
+
+ return 0;
+}
+
+static int gaudi_config_spmu(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ u64 base_reg;
+ struct hl_debug_params_spmu *input = params->input;
+ u64 *output;
+ u32 output_arr_len;
+ u32 events_num;
+ u32 overflow_idx;
+ u32 cycle_cnt_idx;
+ int i;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) {
+ dev_err(hdev->dev, "Invalid register index in SPMU\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ if (input->event_types_num < 3) {
+ dev_err(hdev->dev,
+ "not enough event types values for SPMU enable\n");
+ return -EINVAL;
+ }
+
+ if (input->event_types_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev,
+ "too many event types values for SPMU enable\n");
+ return -EINVAL;
+ }
+
+ WREG32(base_reg + 0xE04, 0x41013046);
+ WREG32(base_reg + 0xE04, 0x41013040);
+
+ for (i = 0 ; i < input->event_types_num ; i++)
+ WREG32(base_reg + SPMU_EVENT_TYPES_OFFSET + i * 4,
+ input->event_types[i]);
+
+ WREG32(base_reg + 0xE04, 0x41013041);
+ WREG32(base_reg + 0xC00, 0x8000003F);
+ } else {
+ output = params->output;
+ output_arr_len = params->output_size / 8;
+ events_num = output_arr_len - 2;
+ overflow_idx = output_arr_len - 2;
+ cycle_cnt_idx = output_arr_len - 1;
+
+ if (!output)
+ return -EINVAL;
+
+ if (output_arr_len < 3) {
+ dev_err(hdev->dev,
+ "not enough values for SPMU disable\n");
+ return -EINVAL;
+ }
+
+ if (events_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev,
+ "too many events values for SPMU disable\n");
+ return -EINVAL;
+ }
+
+ WREG32(base_reg + 0xE04, 0x41013040);
+
+ for (i = 0 ; i < events_num ; i++)
+ output[i] = RREG32(base_reg + i * 8);
+
+ output[overflow_idx] = RREG32(base_reg + 0xCC0);
+
+ output[cycle_cnt_idx] = RREG32(base_reg + 0xFC);
+ output[cycle_cnt_idx] <<= 32;
+ output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8);
+
+ WREG32(base_reg + 0xCC0, 0);
+ }
+
+ return 0;
+}
+
+int gaudi_debug_coresight(struct hl_device *hdev, void *data)
+{
+ struct hl_debug_params *params = data;
+ int rc = 0;
+
+ switch (params->op) {
+ case HL_DEBUG_OP_STM:
+ rc = gaudi_config_stm(hdev, params);
+ break;
+ case HL_DEBUG_OP_ETF:
+ rc = gaudi_config_etf(hdev, params);
+ break;
+ case HL_DEBUG_OP_ETR:
+ rc = gaudi_config_etr(hdev, params);
+ break;
+ case HL_DEBUG_OP_FUNNEL:
+ rc = gaudi_config_funnel(hdev, params);
+ break;
+ case HL_DEBUG_OP_BMON:
+ rc = gaudi_config_bmon(hdev, params);
+ break;
+ case HL_DEBUG_OP_SPMU:
+ rc = gaudi_config_spmu(hdev, params);
+ break;
+ case HL_DEBUG_OP_TIMESTAMP:
+ /* Do nothing as this opcode is deprecated */
+ break;
+
+ default:
+ dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
+ return -EINVAL;
+ }
+
+ /* Perform read from the device to flush all configuration */
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ return rc;
+}
+
+void gaudi_halt_coresight(struct hl_device *hdev)
+{
+ struct hl_debug_params params = {};
+ int i, rc;
+
+ for (i = GAUDI_ETF_FIRST ; i <= GAUDI_ETF_LAST ; i++) {
+ params.reg_idx = i;
+ rc = gaudi_config_etf(hdev, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i);
+ }
+
+ rc = gaudi_config_etr(hdev, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETR failed, %d\n", rc);
+}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
new file mode 100644
index 000000000000..6dd2c2a1cd70
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudiP.h"
+#include "include/gaudi/gaudi_fw_if.h"
+
+void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (freq == PLL_LAST)
+ hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+}
+
+int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
+{
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ if (value < 0) {
+ dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
+ value);
+ return value;
+ }
+
+ *max_clk = (value / 1000 / 1000);
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ if (value < 0) {
+ dev_err(hdev->dev,
+ "Failed to retrieve device current clock %ld\n",
+ value);
+ return value;
+ }
+
+ *cur_clk = (value / 1000 / 1000);
+
+ return 0;
+}
+
+static ssize_t clk_max_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ gaudi->max_freq_value = value;
+
+ return sprintf(buf, "%lu\n", (value / 1000 / 1000));
+}
+
+static ssize_t clk_max_freq_mhz_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int rc;
+ u64 value;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto fail;
+ }
+
+ rc = kstrtoull(buf, 0, &value);
+ if (rc) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ gaudi->max_freq_value = value * 1000 * 1000;
+
+ hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+
+fail:
+ return count;
+}
+
+static ssize_t clk_cur_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ return sprintf(buf, "%lu\n", (value / 1000 / 1000));
+}
+
+static DEVICE_ATTR_RW(clk_max_freq_mhz);
+static DEVICE_ATTR_RO(clk_cur_freq_mhz);
+
+static struct attribute *gaudi_dev_attrs[] = {
+ &dev_attr_clk_max_freq_mhz.attr,
+ &dev_attr_clk_cur_freq_mhz.attr,
+ NULL,
+};
+
+void gaudi_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp)
+{
+ dev_attr_grp->attrs = gaudi_dev_attrs;
+}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
new file mode 100644
index 000000000000..6a351e31fa6a
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -0,0 +1,9090 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudiP.h"
+#include "include/gaudi/asic_reg/gaudi_regs.h"
+
+#define GAUDI_NUMBER_OF_RR_REGS 24
+#define GAUDI_NUMBER_OF_LBW_RANGES 12
+
+static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_HIT_WPROT,
+ mmDMA_IF_W_S_DMA1_HIT_WPROT,
+ mmDMA_IF_E_S_DMA0_HIT_WPROT,
+ mmDMA_IF_E_S_DMA1_HIT_WPROT,
+ mmDMA_IF_W_N_DMA0_HIT_WPROT,
+ mmDMA_IF_W_N_DMA1_HIT_WPROT,
+ mmDMA_IF_E_N_DMA0_HIT_WPROT,
+ mmDMA_IF_E_N_DMA1_HIT_WPROT,
+ mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
+};
+
+static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_HIT_RPROT,
+ mmDMA_IF_W_S_DMA1_HIT_RPROT,
+ mmDMA_IF_E_S_DMA0_HIT_RPROT,
+ mmDMA_IF_E_S_DMA1_HIT_RPROT,
+ mmDMA_IF_W_N_DMA0_HIT_RPROT,
+ mmDMA_IF_W_N_DMA1_HIT_RPROT,
+ mmDMA_IF_E_N_DMA0_HIT_RPROT,
+ mmDMA_IF_E_N_DMA1_HIT_RPROT,
+ mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
+};
+
+static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_MIN_WPROT_0,
+ mmDMA_IF_W_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_S_DMA0_MIN_WPROT_0,
+ mmDMA_IF_E_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_W_N_DMA0_MIN_WPROT_0,
+ mmDMA_IF_W_N_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_N_DMA0_MIN_WPROT_0,
+ mmDMA_IF_E_N_DMA1_MIN_WPROT_0,
+ mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
+};
+
+static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_MAX_WPROT_0,
+ mmDMA_IF_W_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_S_DMA0_MAX_WPROT_0,
+ mmDMA_IF_E_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_W_N_DMA0_MAX_WPROT_0,
+ mmDMA_IF_W_N_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_N_DMA0_MAX_WPROT_0,
+ mmDMA_IF_E_N_DMA1_MAX_WPROT_0,
+ mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
+};
+
+static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_MIN_RPROT_0,
+ mmDMA_IF_W_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_S_DMA0_MIN_RPROT_0,
+ mmDMA_IF_E_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_W_N_DMA0_MIN_RPROT_0,
+ mmDMA_IF_W_N_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_N_DMA0_MIN_RPROT_0,
+ mmDMA_IF_E_N_DMA1_MIN_RPROT_0,
+ mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
+};
+
+static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_MAX_RPROT_0,
+ mmDMA_IF_W_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_S_DMA0_MAX_RPROT_0,
+ mmDMA_IF_E_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_W_N_DMA0_MAX_RPROT_0,
+ mmDMA_IF_W_N_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_N_DMA0_MAX_RPROT_0,
+ mmDMA_IF_E_N_DMA1_MAX_RPROT_0,
+ mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
+};
+
+static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AW,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AW,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AW,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AW,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW
+};
+
+static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AR,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AR,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AR,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AR,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR
+};
+
+static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0
+};
+
+static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0
+};
+
+static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0
+};
+
+static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0
+};
+
+static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0
+};
+
+static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0
+};
+
+static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0
+};
+
+static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0
+};
+
+/**
+ * gaudi_set_block_as_protected - set the given block as protected
+ *
+ * @hdev: pointer to hl_device structure
+ * @block: block base address
+ *
+ */
+static void gaudi_pb_set_block(struct hl_device *hdev, u64 base)
+{
+ u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS;
+
+ while (pb_addr & 0xFFF) {
+ WREG32(pb_addr, 0);
+ pb_addr += 4;
+ }
+}
+
+static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ gaudi_pb_set_block(hdev, mmMME0_ACC_BASE);
+ gaudi_pb_set_block(hdev, mmMME0_SBAB_BASE);
+ gaudi_pb_set_block(hdev, mmMME0_PRTN_BASE);
+ gaudi_pb_set_block(hdev, mmMME1_ACC_BASE);
+ gaudi_pb_set_block(hdev, mmMME1_SBAB_BASE);
+ gaudi_pb_set_block(hdev, mmMME1_PRTN_BASE);
+ gaudi_pb_set_block(hdev, mmMME2_ACC_BASE);
+ gaudi_pb_set_block(hdev, mmMME2_SBAB_BASE);
+ gaudi_pb_set_block(hdev, mmMME2_PRTN_BASE);
+ gaudi_pb_set_block(hdev, mmMME3_ACC_BASE);
+ gaudi_pb_set_block(hdev, mmMME3_SBAB_BASE);
+ gaudi_pb_set_block(hdev, mmMME3_PRTN_BASE);
+
+ WREG32(mmMME0_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmMME1_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmMME2_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmMME3_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ WREG32(mmMME0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmMME2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmMME0_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME1_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME1_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME1_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME1_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME1_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME1_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ /* MME 1 is slave, hence its whole QM block is protected (with RR) */
+
+ pb_addr = (mmMME2_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME3_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME3_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME3_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME3_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME3_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME3_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ /* MME 3 is slave, hence its whole QM block is protected (with RR) */
+}
+
+static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE);
+
+ gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE);
+
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE);
+
+ WREG32(mmDMA0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA3_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA4_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA5_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA6_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA7_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ WREG32(mmDMA0_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA1_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA2_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA3_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA4_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA5_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA6_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA7_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmDMA0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+}
+
+static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE);
+
+ WREG32(mmTPC0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC0_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+
+ mask = 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC1_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC2_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC3_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC3_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC4_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC4_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC5_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC5_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC6_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC6_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+
+ mask = 1 << ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC7_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC7_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+
+ mask = 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+}
+
+/**
+ * gaudi_init_protection_bits - Initialize protection bits of specific registers
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * All protection bits are 1 by default, means not protected. Need to set to 0
+ * each bit that belongs to a protected register.
+ *
+ */
+static void gaudi_init_protection_bits(struct hl_device *hdev)
+{
+ /*
+ * In each 4K block of registers, the last 128 bytes are protection
+ * bits - total of 1024 bits, one for each register. Each bit is related
+ * to a specific register, by the order of the registers.
+ * So in order to calculate the bit that is related to a given register,
+ * we need to calculate its word offset and then the exact bit inside
+ * the word (which is 4 bytes).
+ *
+ * Register address:
+ *
+ * 31 12 11 7 6 2 1 0
+ * -----------------------------------------------------------------
+ * | Don't | word | bit location | 0 |
+ * | care | offset | inside word | |
+ * -----------------------------------------------------------------
+ *
+ * Bits 7-11 represents the word offset inside the 128 bytes.
+ * Bits 2-6 represents the bit location inside the word.
+ *
+ * When a bit is cleared, it means the register it represents can only
+ * be accessed by a secured entity. When the bit is set, any entity can
+ * access the register.
+ *
+ * The last 4 bytes in the block of the PBs control the security of
+ * the PBs themselves, so they always need to be configured to be
+ * secured
+ */
+
+ gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE);
+
+ gaudi_init_dma_protection_bits(hdev);
+
+ gaudi_init_mme_protection_bits(hdev);
+
+ gaudi_init_tpc_protection_bits(hdev);
+}
+
+static void gaudi_init_range_registers_lbw(struct hl_device *hdev)
+{
+ u32 lbw_rng_start[GAUDI_NUMBER_OF_LBW_RANGES];
+ u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES];
+ int i, j;
+
+ lbw_rng_start[0] = (0xFBFE0000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[0] = (0xFBFFF000 & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[1] = (0xFC0E8000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[1] = (0xFC120000 & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[2] = (0xFC1E8000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[2] = (0xFC48FFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[3] = (0xFC600000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[3] = (0xFCC48FFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[4] = (0xFCC4A000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[4] = (0xFCCDFFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[5] = (0xFCCE4000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[5] = (0xFCD1FFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[6] = (0xFCD24000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[6] = (0xFCD5FFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[7] = (0xFCD64000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[7] = (0xFCD9FFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[8] = (0xFCDA4000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[8] = (0xFCDDFFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[9] = (0xFCDE4000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[9] = (0xFCE05FFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[10] = (0xFEC43000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[10] = (0xFEC43FFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[11] = (0xFE484000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[11] = (0xFE484FFF & 0x3FFFFFF) + 1;
+
+ for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ WREG32(gaudi_rr_lbw_hit_aw_regs[i],
+ (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
+ WREG32(gaudi_rr_lbw_hit_ar_regs[i],
+ (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
+ }
+
+ for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++)
+ for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) {
+ WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2),
+ lbw_rng_start[j]);
+
+ WREG32(gaudi_rr_lbw_min_ar_regs[i] + (j << 2),
+ lbw_rng_start[j]);
+
+ WREG32(gaudi_rr_lbw_max_aw_regs[i] + (j << 2),
+ lbw_rng_end[j]);
+
+ WREG32(gaudi_rr_lbw_max_ar_regs[i] + (j << 2),
+ lbw_rng_end[j]);
+ }
+}
+
+static void gaudi_init_range_registers_hbw(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE);
+ u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE);
+
+ u32 sram_addr_lo = lower_32_bits(SRAM_BASE_ADDR);
+ u32 sram_addr_hi = upper_32_bits(SRAM_BASE_ADDR);
+
+ u32 scratch_addr_lo = lower_32_bits(PSOC_SCRATCHPAD_ADDR);
+ u32 scratch_addr_hi = upper_32_bits(PSOC_SCRATCHPAD_ADDR);
+
+ u32 pcie_fw_addr_lo = lower_32_bits(PCIE_FW_SRAM_ADDR);
+ u32 pcie_fw_addr_hi = upper_32_bits(PCIE_FW_SRAM_ADDR);
+
+ u32 spi_addr_lo = lower_32_bits(SPI_FLASH_BASE_ADDR);
+ u32 spi_addr_hi = upper_32_bits(SPI_FLASH_BASE_ADDR);
+
+ int i;
+
+ /* Configure HBW RR:
+ * 1st range is the DRAM (first 512MB)
+ * 2nd range is the 1st 128 bytes in SRAM (for tensor DMA). This area
+ * is defined as read-only for user
+ * 3rd range is the PSOC scratch-pad
+ * 4th range is the PCIe F/W SRAM area
+ * 5th range is the SPI FLASH area
+ * 6th range is the host
+ */
+
+ for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F);
+ WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D);
+ }
+
+ for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i], dram_addr_hi);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i], dram_addr_hi);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i], 0xE0000000);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i], 0xE0000000);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i], 0x3FFFF);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i], 0x3FFFF);
+
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 4, sram_addr_lo);
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 4, sram_addr_hi);
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 4, 0xFFFFFF80);
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 4, 0x3FFFF);
+
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 8, scratch_addr_lo);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 8, scratch_addr_lo);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 8, scratch_addr_hi);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 8, scratch_addr_hi);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 8, 0xFFFF0000);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 8, 0xFFFF0000);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 8, 0x3FFFF);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 8, 0x3FFFF);
+
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 12, pcie_fw_addr_lo);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 12, pcie_fw_addr_lo);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 12, pcie_fw_addr_hi);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 12, pcie_fw_addr_hi);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 12, 0xFFFF8000);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 12, 0xFFFF8000);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 12, 0x3FFFF);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 12, 0x3FFFF);
+
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 16, spi_addr_lo);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 16, spi_addr_lo);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 16, spi_addr_hi);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 16, spi_addr_hi);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 16, 0xFE000000);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 16, 0xFE000000);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 16, 0x3FFFF);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 16, 0x3FFFF);
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MMU)
+ continue;
+
+ /* Protect HOST */
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 20, 0);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 20, 0);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 20, 0);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 20, 0);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 20, 0);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 20, 0);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 20, 0xFFF80);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 20, 0xFFF80);
+ }
+}
+
+/**
+ * gaudi_init_security - Initialize security model
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Initialize the security model of the device
+ * That includes range registers and protection bit per register
+ *
+ */
+void gaudi_init_security(struct hl_device *hdev)
+{
+ /* Due to H/W errata GAUDI0500, need to override default security
+ * property configuration of MME SBAB and ACC to be non-privileged and
+ * non-secured
+ */
+ WREG32(mmMME0_SBAB_PROT, 0x2);
+ WREG32(mmMME0_ACC_PROT, 0x2);
+ WREG32(mmMME1_SBAB_PROT, 0x2);
+ WREG32(mmMME1_ACC_PROT, 0x2);
+ WREG32(mmMME2_SBAB_PROT, 0x2);
+ WREG32(mmMME2_ACC_PROT, 0x2);
+ WREG32(mmMME3_SBAB_PROT, 0x2);
+ WREG32(mmMME3_ACC_PROT, 0x2);
+
+ /* On RAZWI, 0 will be returned from RR and 0xBABA0BAD from PB */
+ WREG32(0xC01B28, 0x1);
+
+ gaudi_init_range_registers_lbw(hdev);
+
+ gaudi_init_range_registers_hbw(hdev);
+
+ gaudi_init_protection_bits(hdev);
+}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 68f065607544..0d2952bb58df 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -72,7 +72,7 @@
*
*/
-#define GOYA_UBOOT_FW_FILE "habanalabs/goya/goya-u-boot.bin"
+#define GOYA_BOOT_FIT_FILE "habanalabs/goya/goya-boot-fit.itb"
#define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
#define GOYA_MMU_REGS_NUM 63
@@ -87,6 +87,7 @@
#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
+#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
@@ -531,7 +532,7 @@ static int goya_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
- rc = hl_pci_init(hdev, 48);
+ rc = hl_pci_init(hdev);
if (rc)
return rc;
@@ -750,6 +751,8 @@ static int goya_sw_init(struct hl_device *hdev)
}
spin_lock_init(&goya->hw_queues_lock);
+ hdev->supports_coresight = true;
+ hdev->supports_soft_reset = true;
return 0;
@@ -800,6 +803,7 @@ static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
+ u32 dma_err_cfg = QMAN_DMA_ERR_MSG_EN;
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
@@ -836,7 +840,10 @@ static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
else
WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
- WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
+ if (hdev->stop_on_err)
+ dma_err_cfg |= 1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT;
+
+ WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, dma_err_cfg);
WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
}
@@ -886,6 +893,7 @@ void goya_init_dma_qmans(struct hl_device *hdev)
q = &hdev->kernel_queues[0];
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
+ q->cq_id = q->msi_vec = i;
goya_init_dma_qman(hdev, i, q->bus_address);
goya_init_dma_ch(hdev, i);
}
@@ -2205,80 +2213,37 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
}
/*
- * goya_push_uboot_to_device() - Push u-boot FW code to device.
+ * goya_load_firmware_to_device() - Load LINUX FW code to device.
* @hdev: Pointer to hl_device structure.
*
- * Copy u-boot fw code from firmware file to SRAM BAR.
+ * Copy LINUX fw code from firmware file to HBM BAR.
*
* Return: 0 on success, non-zero for failure.
*/
-static int goya_push_uboot_to_device(struct hl_device *hdev)
+static int goya_load_firmware_to_device(struct hl_device *hdev)
{
void __iomem *dst;
- dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
+ dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, GOYA_UBOOT_FW_FILE, dst);
+ return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
}
/*
- * goya_push_linux_to_device() - Push LINUX FW code to device.
+ * goya_load_boot_fit_to_device() - Load boot fit to device.
* @hdev: Pointer to hl_device structure.
*
- * Copy LINUX fw code from firmware file to HBM BAR.
+ * Copy boot fit file to SRAM BAR.
*
* Return: 0 on success, non-zero for failure.
*/
-static int goya_push_linux_to_device(struct hl_device *hdev)
+static int goya_load_boot_fit_to_device(struct hl_device *hdev)
{
void __iomem *dst;
- dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
+ dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
- return hl_fw_push_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
-}
-
-static int goya_pldm_init_cpu(struct hl_device *hdev)
-{
- u32 unit_rst_val;
- int rc;
-
- /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
- goya_init_golden_registers(hdev);
-
- /* Put ARM cores into reset */
- WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
- RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
-
- /* Reset the CA53 MACRO */
- unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
- WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
- RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
- WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
- RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
-
- rc = goya_push_uboot_to_device(hdev);
- if (rc)
- return rc;
-
- rc = goya_push_linux_to_device(hdev);
- if (rc)
- return rc;
-
- WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
- WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
-
- WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
- lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
- WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
- upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
-
- /* Release ARM core 0 from reset */
- WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
- CPU_RESET_CORE0_DEASSERT);
- RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
-
- return 0;
+ return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst);
}
/*
@@ -2286,7 +2251,7 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
* The version string should be located by that offset.
*/
static void goya_read_device_fw_version(struct hl_device *hdev,
- enum goya_fw_component fwc)
+ enum hl_fw_component fwc)
{
const char *name;
u32 ver_off;
@@ -2320,10 +2285,9 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
}
}
-static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
+static int goya_init_cpu(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
- u32 status;
int rc;
if (!hdev->cpu_enable)
@@ -2342,115 +2306,15 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
return -EIO;
}
- if (hdev->pldm) {
- rc = goya_pldm_init_cpu(hdev);
- if (rc)
- return rc;
-
- goto out;
- }
-
- /* Make sure CPU boot-loader is running */
- rc = hl_poll_timeout(
- hdev,
- mmPSOC_GLOBAL_CONF_WARM_REBOOT,
- status,
- (status == CPU_BOOT_STATUS_DRAM_RDY) ||
- (status == CPU_BOOT_STATUS_SRAM_AVAIL),
- 10000,
- cpu_timeout);
+ rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
+ mmPSOC_GLOBAL_CONF_UBOOT_MAGIC,
+ mmCPU_CMD_STATUS_TO_HOST, mmCPU_BOOT_ERR0,
+ false, GOYA_CPU_TIMEOUT_USEC,
+ GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
- /* Read U-Boot version now in case we will later fail */
- goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
- goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
-
- if (rc) {
- dev_err(hdev->dev, "Error in ARM u-boot!");
- switch (status) {
- case CPU_BOOT_STATUS_NA:
- dev_err(hdev->dev,
- "ARM status %d - BTL did NOT run\n", status);
- break;
- case CPU_BOOT_STATUS_IN_WFE:
- dev_err(hdev->dev,
- "ARM status %d - Inside WFE loop\n", status);
- break;
- case CPU_BOOT_STATUS_IN_BTL:
- dev_err(hdev->dev,
- "ARM status %d - Stuck in BTL\n", status);
- break;
- case CPU_BOOT_STATUS_IN_PREBOOT:
- dev_err(hdev->dev,
- "ARM status %d - Stuck in Preboot\n", status);
- break;
- case CPU_BOOT_STATUS_IN_SPL:
- dev_err(hdev->dev,
- "ARM status %d - Stuck in SPL\n", status);
- break;
- case CPU_BOOT_STATUS_IN_UBOOT:
- dev_err(hdev->dev,
- "ARM status %d - Stuck in u-boot\n", status);
- break;
- case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
- dev_err(hdev->dev,
- "ARM status %d - DDR initialization failed\n",
- status);
- break;
- case CPU_BOOT_STATUS_UBOOT_NOT_READY:
- dev_err(hdev->dev,
- "ARM status %d - u-boot stopped by user\n",
- status);
- break;
- case CPU_BOOT_STATUS_TS_INIT_FAIL:
- dev_err(hdev->dev,
- "ARM status %d - Thermal Sensor initialization failed\n",
- status);
- break;
- default:
- dev_err(hdev->dev,
- "ARM status %d - Invalid status code\n",
- status);
- break;
- }
- return -EIO;
- }
-
- if (!hdev->fw_loading) {
- dev_info(hdev->dev, "Skip loading FW\n");
- goto out;
- }
-
- if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
- goto out;
-
- rc = goya_push_linux_to_device(hdev);
if (rc)
return rc;
- WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
-
- rc = hl_poll_timeout(
- hdev,
- mmPSOC_GLOBAL_CONF_WARM_REBOOT,
- status,
- (status == CPU_BOOT_STATUS_SRAM_AVAIL),
- 10000,
- cpu_timeout);
-
- if (rc) {
- if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
- dev_err(hdev->dev,
- "ARM u-boot reports FIT image is corrupted\n");
- else
- dev_err(hdev->dev,
- "ARM Linux failed to load, %d\n", status);
- WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
- return -EIO;
- }
-
- dev_info(hdev->dev, "Successfully loaded firmware to device\n");
-
-out:
goya->hw_cap_initialized |= HW_CAP_CPU;
return 0;
@@ -2565,7 +2429,7 @@ static int goya_hw_init(struct hl_device *hdev)
*/
WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
- rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
+ rc = goya_init_cpu(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU\n");
return rc;
@@ -2684,30 +2548,6 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
HW_CAP_MMU | HW_CAP_TPC_MBIST |
HW_CAP_GOLDEN | HW_CAP_TPC);
memset(goya->events_stat, 0, sizeof(goya->events_stat));
-
- if (!hdev->pldm) {
- int rc;
- /* In case we are running inside VM and the VM is
- * shutting down, we need to make sure CPU boot-loader
- * is running before we can continue the VM shutdown.
- * That is because the VM will send an FLR signal that
- * we must answer
- */
- dev_info(hdev->dev,
- "Going to wait up to %ds for CPU boot loader\n",
- GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
-
- rc = hl_poll_timeout(
- hdev,
- mmPSOC_GLOBAL_CONF_WARM_REBOOT,
- status,
- (status == CPU_BOOT_STATUS_DRAM_RDY),
- 10000,
- GOYA_CPU_TIMEOUT_USEC);
- if (rc)
- dev_err(hdev->dev,
- "failed to wait for CPU boot loader\n");
- }
}
int goya_suspend(struct hl_device *hdev)
@@ -3555,6 +3395,7 @@ static int goya_validate_cb(struct hl_device *hdev,
*/
rc = goya_validate_wreg32(hdev,
parser, (struct packet_wreg32 *) user_pkt);
+ parser->patched_cb_size += pkt_size;
break;
case PACKET_WREG_BULK:
@@ -4016,7 +3857,8 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
}
void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
- u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec)
+ u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
+ bool eb)
{
struct packet_msg_prot *cq_pkt;
u32 tmp;
@@ -5042,7 +4884,7 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
}
-static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
u32 flags)
{
struct goya_device *goya = hdev->asic_specific;
@@ -5051,11 +4893,11 @@ static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
hdev->hard_reset_pending)
- return;
+ return 0;
/* no need in L1 only invalidation in Goya */
if (!is_hard)
- return;
+ return 0;
if (hdev->pldm)
timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
@@ -5077,13 +4919,17 @@ static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
mutex_unlock(&hdev->mmu_cache_lock);
- if (rc)
- dev_notice_ratelimited(hdev->dev,
- "Timeout when waiting for MMU cache invalidation\n");
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
}
-static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
- bool is_hard, u32 asid, u64 va, u64 size)
+static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
+ bool is_hard, u32 asid, u64 va, u64 size)
{
struct goya_device *goya = hdev->asic_specific;
u32 status, timeout_usec, inv_data, pi;
@@ -5091,11 +4937,11 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
hdev->hard_reset_pending)
- return;
+ return 0;
/* no need in L1 only invalidation in Goya */
if (!is_hard)
- return;
+ return 0;
if (hdev->pldm)
timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
@@ -5128,9 +4974,13 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
mutex_unlock(&hdev->mmu_cache_lock);
- if (rc)
- dev_notice_ratelimited(hdev->dev,
- "Timeout when waiting for MMU cache invalidation\n");
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
}
int goya_send_heartbeat(struct hl_device *hdev)
@@ -5178,6 +5028,16 @@ int goya_armcp_info_get(struct hl_device *hdev)
return 0;
}
+static void goya_enable_clock_gating(struct hl_device *hdev)
+{
+
+}
+
+static void goya_disable_clock_gating(struct hl_device *hdev)
+{
+
+}
+
static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
struct seq_file *s)
{
@@ -5293,6 +5153,68 @@ static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
return RREG32(mmHW_STATE);
}
+u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
+{
+ return cq_idx;
+}
+
+static void goya_ext_queue_init(struct hl_device *hdev, u32 q_idx)
+{
+
+}
+
+static void goya_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+{
+
+}
+
+static u32 goya_get_signal_cb_size(struct hl_device *hdev)
+{
+ return 0;
+}
+
+static u32 goya_get_wait_cb_size(struct hl_device *hdev)
+{
+ return 0;
+}
+
+static void goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
+{
+
+}
+
+static void goya_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
+ u16 sob_val, u16 mon_id, u32 q_idx)
+{
+
+}
+
+static void goya_reset_sob(struct hl_device *hdev, void *data)
+{
+
+}
+
+static void goya_set_dma_mask_from_fw(struct hl_device *hdev)
+{
+ if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) ==
+ HL_POWER9_HOST_MAGIC) {
+ dev_dbg(hdev->dev, "Working in 64-bit DMA mode\n");
+ hdev->power9_64bit_dma_enable = 1;
+ hdev->dma_mask = 64;
+ } else {
+ dev_dbg(hdev->dev, "Working in 48-bit DMA mode\n");
+ hdev->power9_64bit_dma_enable = 0;
+ hdev->dma_mask = 48;
+ }
+}
+
+u64 goya_get_device_time(struct hl_device *hdev)
+{
+ u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
+
+ return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5337,6 +5259,8 @@ static const struct hl_asic_funcs goya_funcs = {
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
.send_heartbeat = goya_send_heartbeat,
+ .enable_clock_gating = goya_enable_clock_gating,
+ .disable_clock_gating = goya_disable_clock_gating,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
.soft_reset_late_init = goya_soft_reset_late_init,
@@ -5352,7 +5276,20 @@ static const struct hl_asic_funcs goya_funcs = {
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = goya_halt_coresight,
- .get_clk_rate = goya_get_clk_rate
+ .get_clk_rate = goya_get_clk_rate,
+ .get_queue_id_for_cq = goya_get_queue_id_for_cq,
+ .read_device_fw_version = goya_read_device_fw_version,
+ .load_firmware_to_device = goya_load_firmware_to_device,
+ .load_boot_fit_to_device = goya_load_boot_fit_to_device,
+ .ext_queue_init = goya_ext_queue_init,
+ .ext_queue_reset = goya_ext_queue_reset,
+ .get_signal_cb_size = goya_get_signal_cb_size,
+ .get_wait_cb_size = goya_get_wait_cb_size,
+ .gen_signal_cb = goya_gen_signal_cb,
+ .gen_wait_cb = goya_gen_wait_cb,
+ .reset_sob = goya_reset_sob,
+ .set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
+ .get_device_time = goya_get_device_time
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index c3230cb6e25c..d36f8d90c9c9 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -45,7 +45,7 @@
#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
-#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
+#define GOYA_CPU_TIMEOUT_USEC 15000000 /* 15s */
#define TPC_ENABLED_MASK 0xFF
@@ -149,11 +149,6 @@
#define HW_CAP_GOLDEN 0x00000400
#define HW_CAP_TPC 0x00000800
-enum goya_fw_component {
- FW_COMP_UBOOT,
- FW_COMP_PREBOOT
-};
-
struct goya_device {
/* TODO: remove hw_queues_lock after moving to scheduler code */
spinlock_t hw_queues_lock;
@@ -221,7 +216,8 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size);
void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
- u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec);
+ u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
+ bool eb);
int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len);
@@ -234,5 +230,7 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx);
+u64 goya_get_device_time(struct hl_device *hdev);
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index a1bc930d904f..1258724ea510 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -266,7 +266,7 @@ static int goya_config_stm(struct hl_device *hdev,
WREG32(base_reg + 0xDF4, 0x80);
WREG32(base_reg + 0xE8C, input->frequency);
WREG32(base_reg + 0xE90, 0x7FF);
- WREG32(base_reg + 0xE80, 0x7 | (input->id << 16));
+ WREG32(base_reg + 0xE80, 0x27 | (input->id << 16));
} else {
WREG32(base_reg + 0xE80, 4);
WREG32(base_reg + 0xD64, 0);
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index d6ec12b3e692..de8297001fea 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -683,7 +683,6 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1 << ((mmTPC0_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_LFSR_POLYNOM & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -695,7 +694,6 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
@@ -875,6 +873,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC1_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC1_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -882,6 +890,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1057,6 +1069,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC2_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC2_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1064,6 +1086,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1239,6 +1265,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC3_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC3_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH
& PROT_BITS_OFFS) >> 7) << 2;
@@ -1246,6 +1282,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1421,6 +1461,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC4_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC4_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1428,6 +1478,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1603,6 +1657,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC5_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC5_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1610,6 +1674,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1785,6 +1853,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC6_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC6_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1792,6 +1870,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1967,6 +2049,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC7_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC7_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1974,6 +2066,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 31ebcf9458fe..1ecdcf8b763a 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -23,7 +23,9 @@
#define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT)
-#define HL_PENDING_RESET_PER_SEC 5
+#define HL_PENDING_RESET_PER_SEC 30
+
+#define HL_HARD_RESET_MAX_TIMEOUT 120
#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
@@ -51,6 +53,14 @@
/* MMU */
#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+#define HL_RSVD_SOBS 4
+#define HL_RSVD_MONS 2
+
+#define HL_RSVD_SOBS_IN_USE 2
+#define HL_RSVD_MONS_IN_USE 1
+
+#define HL_MAX_SOB_VAL (1 << 15)
+
/**
* struct pgt_info - MMU hop page info.
* @node: hash linked-list node for the pgts shadow hash of pgts.
@@ -76,6 +86,16 @@ struct hl_device;
struct hl_fpriv;
/**
+ * enum hl_fw_component - F/W components to read version through registers.
+ * @FW_COMP_UBOOT: u-boot.
+ * @FW_COMP_PREBOOT: preboot.
+ */
+enum hl_fw_component {
+ FW_COMP_UBOOT,
+ FW_COMP_PREBOOT
+};
+
+/**
* enum hl_queue_type - Supported QUEUE types.
* @QUEUE_TYPE_NA: queue is not available.
* @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
@@ -94,6 +114,26 @@ enum hl_queue_type {
QUEUE_TYPE_HW
};
+enum hl_cs_type {
+ CS_TYPE_DEFAULT,
+ CS_TYPE_SIGNAL,
+ CS_TYPE_WAIT
+};
+
+/*
+ * struct hl_hw_sob - H/W SOB info.
+ * @hdev: habanalabs device structure.
+ * @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
+ * @sob_id: id of this SOB.
+ * @q_idx: the H/W queue that uses this SOB.
+ */
+struct hl_hw_sob {
+ struct hl_device *hdev;
+ struct kref kref;
+ u32 sob_id;
+ u32 q_idx;
+};
+
/**
* struct hw_queue_properties - queue information.
* @type: queue type.
@@ -250,17 +290,23 @@ struct asic_fixed_properties {
};
/**
- * struct hl_dma_fence - wrapper for fence object used by command submissions.
+ * struct hl_cs_compl - command submission completion object.
* @base_fence: kernel fence object.
* @lock: spinlock to protect fence.
* @hdev: habanalabs device structure.
+ * @hw_sob: the H/W SOB used in this signal/wait CS.
* @cs_seq: command submission sequence number.
+ * @type: type of the CS - signal/wait.
+ * @sob_val: the SOB value that is used in this signal/wait CS.
*/
-struct hl_dma_fence {
+struct hl_cs_compl {
struct dma_fence base_fence;
spinlock_t lock;
struct hl_device *hdev;
+ struct hl_hw_sob *hw_sob;
u64 cs_seq;
+ enum hl_cs_type type;
+ u16 sob_val;
};
/*
@@ -358,6 +404,7 @@ struct hl_cs_job;
/**
* struct hl_hw_queue - describes a H/W transport queue.
+ * @hw_sob: array of the used H/W SOBs by this H/W queue.
* @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
* @queue_type: type of queue.
* @kernel_address: holds the queue's kernel virtual address.
@@ -365,11 +412,19 @@ struct hl_cs_job;
* @pi: holds the queue's pi value.
* @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
* @hw_queue_id: the id of the H/W queue.
+ * @cq_id: the id for the corresponding CQ for this H/W queue.
+ * @msi_vec: the IRQ number of the H/W queue.
* @int_queue_len: length of internal queue (number of entries).
+ * @next_sob_val: the next value to use for the currently used SOB.
+ * @base_sob_id: the base SOB id of the SOBs used by this queue.
+ * @base_mon_id: the base MON id of the MONs used by this queue.
* @valid: is the queue valid (we have array of 32 queues, not all of them
- * exists).
+ * exist).
+ * @curr_sob_offset: the id offset to the currently used SOB from the
+ * HL_RSVD_SOBS that are being used by this queue.
*/
struct hl_hw_queue {
+ struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
struct hl_cs_job **shadow_queue;
enum hl_queue_type queue_type;
u64 kernel_address;
@@ -377,8 +432,14 @@ struct hl_hw_queue {
u32 pi;
u32 ci;
u32 hw_queue_id;
+ u32 cq_id;
+ u32 msi_vec;
u16 int_queue_len;
+ u16 next_sob_val;
+ u16 base_sob_id;
+ u16 base_mon_id;
u8 valid;
+ u8 curr_sob_offset;
};
/**
@@ -517,6 +578,8 @@ enum hl_pll_frequency {
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
+ * @enable_clock_gating: enable clock gating for reducing power consumption.
+ * @disable_clock_gating: disable clock for accessing registers on HBW.
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
* @soft_reset_late_init: perform certain actions needed after soft reset.
@@ -534,6 +597,21 @@ enum hl_pll_frequency {
* @wreg: Write a register. Needed for simulator support.
* @halt_coresight: stop the ETF and ETR traces.
* @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
+ * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
+ * @read_device_fw_version: read the device's firmware versions that are
+ * contained in registers
+ * @load_firmware_to_device: load the firmware to the device's memory
+ * @load_boot_fit_to_device: load boot fit to device's memory
+ * @ext_queue_init: Initialize the given external queue.
+ * @ext_queue_reset: Reset the given external queue.
+ * @get_signal_cb_size: Get signal CB size.
+ * @get_wait_cb_size: Get wait CB size.
+ * @gen_signal_cb: Generate a signal CB.
+ * @gen_wait_cb: Generate a wait CB.
+ * @reset_sob: Reset a SOB.
+ * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
+ * firmware configuration
+ * @get_device_time: Get the device time.
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -578,7 +656,8 @@ struct hl_asic_funcs {
struct sg_table *sgt);
void (*add_end_of_cb_packets)(struct hl_device *hdev,
u64 kernel_address, u32 len,
- u64 cq_addr, u32 cq_val, u32 msix_num);
+ u64 cq_addr, u32 cq_val, u32 msix_num,
+ bool eb);
void (*update_eq_ci)(struct hl_device *hdev, u32 val);
int (*context_switch)(struct hl_device *hdev, u32 asid);
void (*restore_phase_topology)(struct hl_device *hdev);
@@ -596,11 +675,13 @@ struct hl_asic_funcs {
u32 *size);
u64 (*read_pte)(struct hl_device *hdev, u64 addr);
void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
- void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
+ int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
u32 flags);
- void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
+ int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
+ void (*enable_clock_gating)(struct hl_device *hdev);
+ void (*disable_clock_gating)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
struct seq_file *s);
@@ -620,6 +701,21 @@ struct hl_asic_funcs {
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
void (*halt_coresight)(struct hl_device *hdev);
int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+ u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
+ void (*read_device_fw_version)(struct hl_device *hdev,
+ enum hl_fw_component fwc);
+ int (*load_firmware_to_device)(struct hl_device *hdev);
+ int (*load_boot_fit_to_device)(struct hl_device *hdev);
+ void (*ext_queue_init)(struct hl_device *hdev, u32 hw_queue_id);
+ void (*ext_queue_reset)(struct hl_device *hdev, u32 hw_queue_id);
+ u32 (*get_signal_cb_size)(struct hl_device *hdev);
+ u32 (*get_wait_cb_size)(struct hl_device *hdev);
+ void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id);
+ void (*gen_wait_cb)(struct hl_device *hdev, void *data, u16 sob_id,
+ u16 sob_val, u16 mon_id, u32 q_idx);
+ void (*reset_sob)(struct hl_device *hdev, void *data);
+ void (*set_dma_mask_from_fw)(struct hl_device *hdev);
+ u64 (*get_device_time)(struct hl_device *hdev);
};
@@ -659,8 +755,8 @@ struct hl_va_range {
* with huge pages.
* @dram_va_range: holds available virtual addresses for DRAM mappings.
* @mem_hash_lock: protects the mem_hash.
- * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
- * MMU hash or walking the PGT requires talking this lock
+ * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
+ * MMU hash or walking the PGT requires talking this lock.
* @debugfs_list: node in debugfs list of contexts.
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
@@ -751,10 +847,14 @@ struct hl_userptr {
* @job_lock: spinlock for the CS's jobs list. Needed for free_job.
* @refcount: reference counter for usage of the CS.
* @fence: pointer to the fence object of this CS.
+ * @signal_fence: pointer to the fence object of the signal CS (used by wait
+ * CS only).
+ * @finish_work: workqueue object to run when CS is completed by H/W.
* @work_tdr: delayed work node for TDR.
* @mirror_node : node in device mirror list of command submissions.
* @debugfs_list: node in debugfs list of command submissions.
* @sequence: the sequence number of this CS.
+ * @type: CS_TYPE_*.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
* @timedout : true if CS was timedout.
@@ -769,10 +869,13 @@ struct hl_cs {
spinlock_t job_lock;
struct kref refcount;
struct dma_fence *fence;
+ struct dma_fence *signal_fence;
+ struct work_struct finish_work;
struct delayed_work work_tdr;
struct list_head mirror_node;
struct list_head debugfs_list;
u64 sequence;
+ enum hl_cs_type type;
u8 submitted;
u8 completed;
u8 timedout;
@@ -799,6 +902,12 @@ struct hl_cs {
* @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
* handle to a kernel-allocated CB object, false
* otherwise (SRAM/DRAM/host address).
+ * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
+ * info is needed later, when adding the 2xMSG_PROT at the
+ * end of the JOB, to know which barriers to put in the
+ * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
+ * have streams so the engine can't be busy by another
+ * stream.
*/
struct hl_cs_job {
struct list_head cs_node;
@@ -814,6 +923,7 @@ struct hl_cs_job {
u32 user_cb_size;
u32 job_cb_size;
u8 is_kernel_allocated_cb;
+ u8 contains_dma_pkt;
};
/**
@@ -833,6 +943,12 @@ struct hl_cs_job {
* @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
* handle to a kernel-allocated CB object, false
* otherwise (SRAM/DRAM/host address).
+ * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
+ * info is needed later, when adding the 2xMSG_PROT at the
+ * end of the JOB, to know which barriers to put in the
+ * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
+ * have streams so the engine can't be busy by another
+ * stream.
*/
struct hl_cs_parser {
struct hl_cb *user_cb;
@@ -846,6 +962,7 @@ struct hl_cs_parser {
u32 patched_cb_size;
u8 job_id;
u8 is_kernel_allocated_cb;
+ u8 contains_dma_pkt;
};
@@ -1093,6 +1210,16 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
+#define RMWREG32(reg, val, mask) \
+ do { \
+ u32 tmp_ = RREG32(reg); \
+ tmp_ &= ~(mask); \
+ tmp_ |= ((val) << __ffs(mask)); \
+ WREG32(reg, tmp_); \
+ } while (0)
+
+#define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))
+
#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
#define WREG32_FIELD(reg, offset, field, val) \
@@ -1282,6 +1409,8 @@ struct hl_device_idle_busy_ts {
* @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
* @id: device minor.
* @id_control: minor of the control device
+ * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
+ * addresses.
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
@@ -1295,11 +1424,19 @@ struct hl_device_idle_busy_ts {
* huge pages.
* @init_done: is the initialization of the device done.
* @mmu_enable: is MMU enabled.
+ * @mmu_huge_page_opt: is MMU huge pages optimization enabled.
+ * @clock_gating: is clock gating enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
* @in_debug: is device under debug. This, together with fpriv_list, enforces
* that only a single user is configuring the debug infrastructure.
+ * @power9_64bit_dma_enable: true to enable 64-bit DMA mask support. Relevant
+ * only to POWER9 machines.
* @cdev_sysfs_created: were char devices and sysfs nodes created.
+ * @stop_on_err: true if engines should stop on error.
+ * @supports_sync_stream: is sync stream supported.
+ * @supports_coresight: is CoreSight supported.
+ * @supports_soft_reset: is soft reset supported.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -1366,6 +1503,7 @@ struct hl_device {
u32 idle_busy_ts_idx;
u16 id;
u16 id_control;
+ u16 cpu_pci_msb_addr;
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
@@ -1376,18 +1514,31 @@ struct hl_device {
u8 dram_default_page_mapping;
u8 pmmu_huge_range;
u8 init_done;
+ u8 clock_gating;
u8 device_cpu_disabled;
u8 dma_mask;
u8 in_debug;
+ u8 power9_64bit_dma_enable;
u8 cdev_sysfs_created;
+ u8 stop_on_err;
+ u8 supports_sync_stream;
+ u8 supports_coresight;
+ u8 supports_soft_reset;
/* Parameters for bring-up */
u8 mmu_enable;
+ u8 mmu_huge_page_opt;
u8 cpu_enable;
u8 reset_pcilink;
u8 cpu_queues_enable;
u8 fw_loading;
u8 pldm;
+ u8 axi_drain;
+ u8 sram_scrambler_enable;
+ u8 dram_scrambler_enable;
+ u8 hard_reset_on_fw_events;
+ u8 bmc_enable;
+ u8 rl_enable;
};
@@ -1554,8 +1705,10 @@ int hl_cb_pool_fini(struct hl_device *hdev);
void hl_cs_rollback_all(struct hl_device *hdev);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
+void hl_sob_reset_error(struct kref *ref);
void goya_set_asic_funcs(struct hl_device *hdev);
+void gaudi_set_asic_funcs(struct hl_device *hdev);
int hl_vm_ctx_init(struct hl_ctx *ctx);
void hl_vm_ctx_fini(struct hl_ctx *ctx);
@@ -1583,11 +1736,14 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
-int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst);
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, long *result);
+int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
+int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
+ size_t irq_arr_size);
int hl_fw_test_cpu_queue(struct hl_device *hdev);
void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle);
@@ -1596,6 +1752,10 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
int hl_fw_send_heartbeat(struct hl_device *hdev);
int hl_fw_armcp_info_get(struct hl_device *hdev);
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
+int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
+ u32 boot_err0_reg, bool skip_bmc,
+ u32 cpu_timeout, u32 boot_fit_timeout);
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3]);
@@ -1605,9 +1765,8 @@ int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
u64 dram_base_address, u64 host_phys_base_address,
u64 host_phys_size);
-int hl_pci_init(struct hl_device *hdev, u8 dma_mask);
+int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
-int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
@@ -1627,6 +1786,10 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
long value);
u64 hl_get_max_power(struct hl_device *hdev);
void hl_set_max_power(struct hl_device *hdev, u64 value);
+int hl_set_voltage(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value);
+int hl_set_current(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index b670859c677a..8652c7e5d7f1 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -47,6 +47,7 @@ static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, ids);
/*
* get_asic_type - translate device id to asic type
@@ -171,6 +172,7 @@ out_err:
put_pid(hpriv->taskpid);
kfree(hpriv);
+
return rc;
}
@@ -230,8 +232,15 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->fw_loading = 1;
hdev->cpu_queues_enable = 1;
hdev->heartbeat = 1;
+ hdev->clock_gating = 1;
hdev->reset_pcilink = 0;
+ hdev->axi_drain = 0;
+ hdev->sram_scrambler_enable = 1;
+ hdev->dram_scrambler_enable = 1;
+ hdev->rl_enable = 1;
+ hdev->bmc_enable = 1;
+ hdev->hard_reset_on_fw_events = 1;
}
/*
@@ -267,11 +276,6 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
dev_err(&pdev->dev, "Unsupported ASIC\n");
rc = -ENODEV;
goto free_hdev;
- } else if (hdev->asic_type == ASIC_GAUDI) {
- dev_err(&pdev->dev,
- "GAUDI is not supported by the current kernel\n");
- rc = -ENODEV;
- goto free_hdev;
}
} else {
hdev->asic_type = asic_type;
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 6474b868ef27..52eedd3a6c3a 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -71,6 +71,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
+ hw_ip.module_id = le32_to_cpu(prop->armcp_info.card_location);
+
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
@@ -258,6 +260,22 @@ static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
}
+static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_time_sync time_sync = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
+ time_sync.host_time = ktime_get_raw_ns();
+
+ return copy_to_user(out, &time_sync,
+ min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -315,6 +333,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
rc = get_clk_rate(hdev, args);
break;
+ case HL_INFO_TIME_SYNC:
+ return time_sync_info(hdev, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 91579dde9262..f4434b39ef1b 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -111,7 +111,7 @@ static int ext_queue_sanity_checks(struct hl_device *hdev,
bool reserve_cq_entry)
{
atomic_t *free_slots =
- &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+ &hdev->completion_queue[q->cq_id].free_slots_cnt;
int free_slots_cnt;
/* Check we have enough space in the queue */
@@ -194,7 +194,7 @@ static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
int num_of_entries)
{
atomic_t *free_slots =
- &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+ &hdev->completion_queue[q->cq_id].free_slots_cnt;
/*
* Check we have enough space in the completion queue.
@@ -308,13 +308,14 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
* No need to check if CQ is full because it was already
* checked in ext_queue_sanity_checks
*/
- cq = &hdev->completion_queue[q->hw_queue_id];
+ cq = &hdev->completion_queue[q->cq_id];
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
cq_addr,
le32_to_cpu(cq_pkt.data),
- q->hw_queue_id);
+ q->msi_vec,
+ job->contains_dma_pkt);
q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
@@ -401,21 +402,111 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
* No need to check if CQ is full because it was already
* checked in hw_queue_sanity_checks
*/
- cq = &hdev->completion_queue[q->hw_queue_id];
+ cq = &hdev->completion_queue[q->cq_id];
+
cq->pi = hl_cq_inc_ptr(cq->pi);
ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
- * hl_hw_queue_schedule_cs - schedule a command submission
- *
- * @job : pointer to the CS
+ * init_signal_wait_cs - initialize a signal/wait CS
+ * @cs: pointer to the signal/wait CS
*
+ * H/W queues spinlock should be taken before calling this function
+ */
+static void init_signal_wait_cs(struct hl_cs *cs)
+{
+ struct hl_ctx *ctx = cs->ctx;
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_hw_queue *hw_queue;
+ struct hl_cs_compl *cs_cmpl =
+ container_of(cs->fence, struct hl_cs_compl, base_fence);
+
+ struct hl_hw_sob *hw_sob;
+ struct hl_cs_job *job;
+ u32 q_idx;
+
+ /* There is only one job in a signal/wait CS */
+ job = list_first_entry(&cs->job_list, struct hl_cs_job,
+ cs_node);
+ q_idx = job->hw_queue_id;
+ hw_queue = &hdev->kernel_queues[q_idx];
+
+ if (cs->type & CS_TYPE_SIGNAL) {
+ hw_sob = &hw_queue->hw_sob[hw_queue->curr_sob_offset];
+
+ cs_cmpl->hw_sob = hw_sob;
+ cs_cmpl->sob_val = hw_queue->next_sob_val++;
+
+ dev_dbg(hdev->dev,
+ "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
+ cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+
+ hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
+ cs_cmpl->hw_sob->sob_id);
+
+ kref_get(&hw_sob->kref);
+
+ /* check for wraparound */
+ if (hw_queue->next_sob_val == HL_MAX_SOB_VAL) {
+ /*
+ * Decrement as we reached the max value.
+ * The release function won't be called here as we've
+ * just incremented the refcount.
+ */
+ kref_put(&hw_sob->kref, hl_sob_reset_error);
+ hw_queue->next_sob_val = 1;
+ /* only two SOBs are currently in use */
+ hw_queue->curr_sob_offset =
+ (hw_queue->curr_sob_offset + 1) %
+ HL_RSVD_SOBS_IN_USE;
+
+ dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
+ hw_queue->curr_sob_offset, q_idx);
+ }
+ } else if (cs->type & CS_TYPE_WAIT) {
+ struct hl_cs_compl *signal_cs_cmpl;
+
+ signal_cs_cmpl = container_of(cs->signal_fence,
+ struct hl_cs_compl,
+ base_fence);
+
+ /* copy the the SOB id and value of the signal CS */
+ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+ cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+
+ dev_dbg(hdev->dev,
+ "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n",
+ cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
+ hw_queue->base_mon_id, q_idx);
+
+ hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb,
+ cs_cmpl->hw_sob->sob_id,
+ cs_cmpl->sob_val,
+ hw_queue->base_mon_id,
+ q_idx);
+
+ kref_get(&cs_cmpl->hw_sob->kref);
+ /*
+ * Must put the signal fence after the SOB refcnt increment so
+ * the SOB refcnt won't turn 0 and reset the SOB before the
+ * wait CS was submitted.
+ */
+ mb();
+ dma_fence_put(cs->signal_fence);
+ cs->signal_fence = NULL;
+ }
+}
+
+/*
+ * hl_hw_queue_schedule_cs - schedule a command submission
+ * @cs: pointer to the CS
*/
int hl_hw_queue_schedule_cs(struct hl_cs *cs)
{
- struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_ctx *ctx = cs->ctx;
+ struct hl_device *hdev = ctx->hdev;
struct hl_cs_job *job, *tmp;
struct hl_hw_queue *q;
int rc = 0, i, cq_cnt;
@@ -461,6 +552,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
}
+ if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT))
+ init_signal_wait_cs(cs);
+
spin_lock(&hdev->hw_queues_mirror_lock);
list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
@@ -569,6 +663,9 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
q->ci = 0;
q->pi = 0;
+ if (!is_cpu_queue)
+ hdev->asic_funcs->ext_queue_init(hdev, q->hw_queue_id);
+
return 0;
free_queue:
@@ -791,5 +888,8 @@ void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
continue;
q->pi = q->ci = 0;
+
+ if (q->queue_type == QUEUE_TYPE_EXT)
+ hdev->asic_funcs->ext_queue_reset(hdev, q->hw_queue_id);
}
}
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
index a21a26e07c3b..8c6cd77e6af6 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -200,6 +200,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_offset:
+ case hwmon_temp_reset_history:
break;
default:
return -EINVAL;
@@ -216,6 +217,24 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
}
hl_set_pwm_info(hdev, channel, attr, val);
break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_reset_history:
+ break;
+ default:
+ return -EINVAL;
+ }
+ hl_set_voltage(hdev, channel, attr, val);
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_reset_history:
+ break;
+ default:
+ return -EINVAL;
+ }
+ hl_set_current(hdev, channel, attr, val);
+ break;
default:
return -EINVAL;
}
@@ -237,6 +256,8 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
return 0444;
case hwmon_temp_offset:
return 0644;
+ case hwmon_temp_reset_history:
+ return 0200;
}
break;
case hwmon_in:
@@ -246,6 +267,8 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_in_max:
case hwmon_in_highest:
return 0444;
+ case hwmon_in_reset_history:
+ return 0200;
}
break;
case hwmon_curr:
@@ -255,6 +278,8 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_curr_max:
case hwmon_curr_highest:
return 0444;
+ case hwmon_curr_reset_history:
+ return 0200;
}
break;
case hwmon_fan:
@@ -462,6 +487,56 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
sensor_index, rc);
}
+int hl_set_voltage(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_VOLTAGE_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set voltage of sensor %d, error %d\n",
+ sensor_index, rc);
+
+ return rc;
+}
+
+int hl_set_current(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_CURRENT_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set current of sensor %d, error %d\n",
+ sensor_index, rc);
+
+ return rc;
+}
+
int hl_hwmon_init(struct hl_device *hdev)
{
struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
index bdd0a4c3a9cf..a34fc39ad87e 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -35,7 +35,8 @@ struct hl_eq_entry {
enum pq_init_status {
PQ_INIT_STATUS_NA = 0,
PQ_INIT_STATUS_READY_FOR_CP,
- PQ_INIT_STATUS_READY_FOR_HOST
+ PQ_INIT_STATUS_READY_FOR_HOST,
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI
};
/*
@@ -193,6 +194,16 @@ enum pq_init_status {
* Set the value of the offset property of a specified thermal sensor.
* The packet's arguments specify the desired sensor and the field to
* set.
+ *
+ * ARMCP_PACKET_VOLTAGE_SET -
+ * Trigger the reset_history property of a specified voltage sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * ARMCP_PACKET_CURRENT_SET -
+ * Trigger the reset_history property of a specified current sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
*/
enum armcp_packet_id {
@@ -220,6 +231,8 @@ enum armcp_packet_id {
ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */
ARMCP_RESERVED,
ARMCP_PACKET_TEMPERATURE_SET, /* sysfs */
+ ARMCP_PACKET_VOLTAGE_SET, /* sysfs */
+ ARMCP_PACKET_CURRENT_SET, /* sysfs */
};
#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -288,21 +301,24 @@ enum armcp_temp_type {
armcp_temp_crit,
armcp_temp_crit_hyst,
armcp_temp_offset = 19,
- armcp_temp_highest = 22
+ armcp_temp_highest = 22,
+ armcp_temp_reset_history = 23
};
enum armcp_in_attributes {
armcp_in_input,
armcp_in_min,
armcp_in_max,
- armcp_in_highest = 7
+ armcp_in_highest = 7,
+ armcp_in_reset_history
};
enum armcp_curr_attributes {
armcp_curr_input,
armcp_curr_min,
armcp_curr_max,
- armcp_curr_highest = 7
+ armcp_curr_highest = 7,
+ armcp_curr_reset_history
};
enum armcp_fan_attributes {
@@ -336,10 +352,23 @@ struct armcp_sensor {
};
/**
+ * struct armcp_card_types - ASIC card type.
+ * @armcp_card_type_pci: PCI card.
+ * @armcp_card_type_pmc: PCI Mezzanine Card.
+ */
+enum armcp_card_types {
+ armcp_card_type_pci,
+ armcp_card_type_pmc
+};
+
+/**
* struct armcp_info - Info from ArmCP that is necessary to the host's driver
* @sensors: available sensors description.
* @kernel_version: ArmCP linux kernel version.
* @reserved: reserved field.
+ * @card_type: card configuration type.
+ * @card_location: in a server, each card has different connections topology
+ * depending on its location (relevant for PMC card type)
* @cpld_version: CPLD programmed F/W version.
* @infineon_version: Infineon main DC-DC version.
* @fuse_version: silicon production FUSE information.
@@ -351,7 +380,9 @@ struct armcp_sensor {
struct armcp_info {
struct armcp_sensor sensors[ARMCP_MAX_SENSORS];
__u8 kernel_version[VERSION_MAX_LEN];
- __le32 reserved[3];
+ __le32 reserved;
+ __le32 card_type;
+ __le32 card_location;
__le32 cpld_version;
__le32 infineon_version;
__u8 fuse_version[VERSION_MAX_LEN];
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
new file mode 100644
index 000000000000..cf80e31317ad
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_IF_REGS_H_
+#define ASIC_REG_CPU_IF_REGS_H_
+
+/*
+ *****************************************
+ * CPU_IF (Prototype: CPU_IF)
+ *****************************************
+ */
+
+#define mmCPU_IF_ARUSER_OVR 0x442104
+
+#define mmCPU_IF_ARUSER_OVR_EN 0x442108
+
+#define mmCPU_IF_AWUSER_OVR 0x44210C
+
+#define mmCPU_IF_AWUSER_OVR_EN 0x442110
+
+#define mmCPU_IF_AXCACHE_OVR 0x442114
+
+#define mmCPU_IF_LOCK_OVR 0x442118
+
+#define mmCPU_IF_PROT_OVR 0x44211C
+
+#define mmCPU_IF_MAX_OUTSTANDING 0x442120
+
+#define mmCPU_IF_EARLY_BRESP_EN 0x442124
+
+#define mmCPU_IF_FORCE_RSP_OK 0x442128
+
+#define mmCPU_IF_CPU_MSB_ADDR 0x44212C
+
+#define mmCPU_IF_AXI_SPLIT_INTR 0x442130
+
+#define mmCPU_IF_TOTAL_WR_CNT 0x442140
+
+#define mmCPU_IF_INFLIGHT_WR_CNT 0x442144
+
+#define mmCPU_IF_TOTAL_RD_CNT 0x442150
+
+#define mmCPU_IF_INFLIGHT_RD_CNT 0x442154
+
+#define mmCPU_IF_PF_PQ_PI 0x442200
+
+#define mmCPU_IF_PQ_BASE_ADDR_LOW 0x442204
+
+#define mmCPU_IF_PQ_BASE_ADDR_HIGH 0x442208
+
+#define mmCPU_IF_PQ_LENGTH 0x44220C
+
+#define mmCPU_IF_CQ_BASE_ADDR_LOW 0x442210
+
+#define mmCPU_IF_CQ_BASE_ADDR_HIGH 0x442214
+
+#define mmCPU_IF_CQ_LENGTH 0x442218
+
+#define mmCPU_IF_EQ_BASE_ADDR_LOW 0x442220
+
+#define mmCPU_IF_EQ_BASE_ADDR_HIGH 0x442224
+
+#define mmCPU_IF_EQ_LENGTH 0x442228
+
+#define mmCPU_IF_EQ_RD_OFFS 0x44222C
+
+#define mmCPU_IF_QUEUE_INIT 0x442230
+
+#define mmCPU_IF_TPC_SERR_INTR_STS 0x442300
+
+#define mmCPU_IF_TPC_SERR_INTR_CLR 0x442304
+
+#define mmCPU_IF_TPC_SERR_INTR_MASK 0x442308
+
+#define mmCPU_IF_TPC_DERR_INTR_STS 0x442310
+
+#define mmCPU_IF_TPC_DERR_INTR_CLR 0x442314
+
+#define mmCPU_IF_TPC_DERR_INTR_MASK 0x442318
+
+#define mmCPU_IF_DMA_SERR_INTR_STS 0x442320
+
+#define mmCPU_IF_DMA_SERR_INTR_CLR 0x442324
+
+#define mmCPU_IF_DMA_SERR_INTR_MASK 0x442328
+
+#define mmCPU_IF_DMA_DERR_INTR_STS 0x442330
+
+#define mmCPU_IF_DMA_DERR_INTR_CLR 0x442334
+
+#define mmCPU_IF_DMA_DERR_INTR_MASK 0x442338
+
+#define mmCPU_IF_SRAM_SERR_INTR_STS 0x442340
+
+#define mmCPU_IF_SRAM_SERR_INTR_CLR 0x442344
+
+#define mmCPU_IF_SRAM_SERR_INTR_MASK 0x442348
+
+#define mmCPU_IF_SRAM_DERR_INTR_STS 0x442350
+
+#define mmCPU_IF_SRAM_DERR_INTR_CLR 0x442354
+
+#define mmCPU_IF_SRAM_DERR_INTR_MASK 0x442358
+
+#define mmCPU_IF_NIC_SERR_INTR_STS 0x442360
+
+#define mmCPU_IF_NIC_SERR_INTR_CLR 0x442364
+
+#define mmCPU_IF_NIC_SERR_INTR_MASK 0x442368
+
+#define mmCPU_IF_NIC_DERR_INTR_STS 0x442370
+
+#define mmCPU_IF_NIC_DERR_INTR_CLR 0x442374
+
+#define mmCPU_IF_NIC_DERR_INTR_MASK 0x442378
+
+#define mmCPU_IF_DMA_IF_SERR_INTR_STS 0x442380
+
+#define mmCPU_IF_DMA_IF_SERR_INTR_CLR 0x442384
+
+#define mmCPU_IF_DMA_IF_SERR_INTR_MASK 0x442388
+
+#define mmCPU_IF_DMA_IF_DERR_INTR_STS 0x442390
+
+#define mmCPU_IF_DMA_IF_DERR_INTR_CLR 0x442394
+
+#define mmCPU_IF_DMA_IF_DERR_INTR_MASK 0x442398
+
+#define mmCPU_IF_HBM_SERR_INTR_STS 0x4423A0
+
+#define mmCPU_IF_HBM_SERR_INTR_CLR 0x4423A4
+
+#define mmCPU_IF_HBM_SERR_INTR_MASK 0x4423A8
+
+#define mmCPU_IF_HBM_DERR_INTR_STS 0x4423B0
+
+#define mmCPU_IF_HBM_DERR_INTR_CLR 0x4423B4
+
+#define mmCPU_IF_HBM_DERR_INTR_MASK 0x4423B8
+
+#define mmCPU_IF_PLL_SEI_INTR_STS 0x442400
+
+#define mmCPU_IF_PLL_SEI_INTR_CLR 0x442404
+
+#define mmCPU_IF_PLL_SEI_INTR_MASK 0x442408
+
+#define mmCPU_IF_NIC_SEI_INTR_STS 0x442410
+
+#define mmCPU_IF_NIC_SEI_INTR_CLR 0x442414
+
+#define mmCPU_IF_NIC_SEI_INTR_MASK 0x442418
+
+#define mmCPU_IF_DMA_SEI_INTR_STS 0x442420
+
+#define mmCPU_IF_DMA_SEI_INTR_CLR 0x442424
+
+#define mmCPU_IF_DMA_SEI_INTR_MASK 0x442428
+
+#define mmCPU_IF_DMA_IF_SEI_INTR_STS 0x442430
+
+#define mmCPU_IF_DMA_IF_SEI_INTR_CLR 0x442434
+
+#define mmCPU_IF_DMA_IF_SEI_INTR_MASK 0x442438
+
+#endif /* ASIC_REG_CPU_IF_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
new file mode 100644
index 000000000000..d079a37acab8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA0_CORE_MASKS_H_
+#define ASIC_REG_DMA0_CORE_MASKS_H_
+
+/*
+ *****************************************
+ * DMA0_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+/* DMA0_CORE_CFG_0 */
+#define DMA0_CORE_CFG_0_EN_SHIFT 0
+#define DMA0_CORE_CFG_0_EN_MASK 0x1
+
+/* DMA0_CORE_CFG_1 */
+#define DMA0_CORE_CFG_1_HALT_SHIFT 0
+#define DMA0_CORE_CFG_1_HALT_MASK 0x1
+#define DMA0_CORE_CFG_1_FLUSH_SHIFT 1
+#define DMA0_CORE_CFG_1_FLUSH_MASK 0x2
+#define DMA0_CORE_CFG_1_SB_FORCE_MISS_SHIFT 2
+#define DMA0_CORE_CFG_1_SB_FORCE_MISS_MASK 0x4
+
+/* DMA0_CORE_LBW_MAX_OUTSTAND */
+#define DMA0_CORE_LBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define DMA0_CORE_LBW_MAX_OUTSTAND_VAL_MASK 0x1F
+
+/* DMA0_CORE_SRC_BASE_LO */
+#define DMA0_CORE_SRC_BASE_LO_VAL_SHIFT 0
+#define DMA0_CORE_SRC_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_BASE_HI */
+#define DMA0_CORE_SRC_BASE_HI_VAL_SHIFT 0
+#define DMA0_CORE_SRC_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_BASE_LO */
+#define DMA0_CORE_DST_BASE_LO_VAL_SHIFT 0
+#define DMA0_CORE_DST_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_BASE_HI */
+#define DMA0_CORE_DST_BASE_HI_VAL_SHIFT 0
+#define DMA0_CORE_DST_BASE_HI_VAL_MASK 0xFFFFFF
+#define DMA0_CORE_DST_BASE_HI_CTX_ID_HI_SHIFT 24
+#define DMA0_CORE_DST_BASE_HI_CTX_ID_HI_MASK 0xFF000000
+
+/* DMA0_CORE_SRC_TSIZE_1 */
+#define DMA0_CORE_SRC_TSIZE_1_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_STRIDE_1 */
+#define DMA0_CORE_SRC_STRIDE_1_VAL_SHIFT 0
+#define DMA0_CORE_SRC_STRIDE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_TSIZE_2 */
+#define DMA0_CORE_SRC_TSIZE_2_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_STRIDE_2 */
+#define DMA0_CORE_SRC_STRIDE_2_VAL_SHIFT 0
+#define DMA0_CORE_SRC_STRIDE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_TSIZE_3 */
+#define DMA0_CORE_SRC_TSIZE_3_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_STRIDE_3 */
+#define DMA0_CORE_SRC_STRIDE_3_VAL_SHIFT 0
+#define DMA0_CORE_SRC_STRIDE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_TSIZE_4 */
+#define DMA0_CORE_SRC_TSIZE_4_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_STRIDE_4 */
+#define DMA0_CORE_SRC_STRIDE_4_VAL_SHIFT 0
+#define DMA0_CORE_SRC_STRIDE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_TSIZE_0 */
+#define DMA0_CORE_SRC_TSIZE_0_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_1 */
+#define DMA0_CORE_DST_TSIZE_1_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_STRIDE_1 */
+#define DMA0_CORE_DST_STRIDE_1_VAL_SHIFT 0
+#define DMA0_CORE_DST_STRIDE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_2 */
+#define DMA0_CORE_DST_TSIZE_2_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_STRIDE_2 */
+#define DMA0_CORE_DST_STRIDE_2_VAL_SHIFT 0
+#define DMA0_CORE_DST_STRIDE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_3 */
+#define DMA0_CORE_DST_TSIZE_3_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_STRIDE_3 */
+#define DMA0_CORE_DST_STRIDE_3_VAL_SHIFT 0
+#define DMA0_CORE_DST_STRIDE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_4 */
+#define DMA0_CORE_DST_TSIZE_4_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_STRIDE_4 */
+#define DMA0_CORE_DST_STRIDE_4_VAL_SHIFT 0
+#define DMA0_CORE_DST_STRIDE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_0 */
+#define DMA0_CORE_DST_TSIZE_0_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_COMMIT */
+#define DMA0_CORE_COMMIT_WR_COMP_EN_SHIFT 0
+#define DMA0_CORE_COMMIT_WR_COMP_EN_MASK 0x1
+#define DMA0_CORE_COMMIT_TRANSPOSE_SHIFT 1
+#define DMA0_CORE_COMMIT_TRANSPOSE_MASK 0x2
+#define DMA0_CORE_COMMIT_DTYPE_SHIFT 2
+#define DMA0_CORE_COMMIT_DTYPE_MASK 0x4
+#define DMA0_CORE_COMMIT_LIN_SHIFT 3
+#define DMA0_CORE_COMMIT_LIN_MASK 0x8
+#define DMA0_CORE_COMMIT_MEM_SET_SHIFT 4
+#define DMA0_CORE_COMMIT_MEM_SET_MASK 0x10
+#define DMA0_CORE_COMMIT_COMPRESS_SHIFT 5
+#define DMA0_CORE_COMMIT_COMPRESS_MASK 0x20
+#define DMA0_CORE_COMMIT_DECOMPRESS_SHIFT 6
+#define DMA0_CORE_COMMIT_DECOMPRESS_MASK 0x40
+#define DMA0_CORE_COMMIT_CTX_ID_SHIFT 16
+#define DMA0_CORE_COMMIT_CTX_ID_MASK 0xFF0000
+
+/* DMA0_CORE_WR_COMP_WDATA */
+#define DMA0_CORE_WR_COMP_WDATA_VAL_SHIFT 0
+#define DMA0_CORE_WR_COMP_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_WR_COMP_ADDR_LO */
+#define DMA0_CORE_WR_COMP_ADDR_LO_VAL_SHIFT 0
+#define DMA0_CORE_WR_COMP_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_WR_COMP_ADDR_HI */
+#define DMA0_CORE_WR_COMP_ADDR_HI_VAL_SHIFT 0
+#define DMA0_CORE_WR_COMP_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_WR_COMP_AWUSER_31_11 */
+#define DMA0_CORE_WR_COMP_AWUSER_31_11_VAL_SHIFT 0
+#define DMA0_CORE_WR_COMP_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_CORE_TE_NUMROWS */
+#define DMA0_CORE_TE_NUMROWS_VAL_SHIFT 0
+#define DMA0_CORE_TE_NUMROWS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_PROT */
+#define DMA0_CORE_PROT_VAL_SHIFT 0
+#define DMA0_CORE_PROT_VAL_MASK 0x1
+#define DMA0_CORE_PROT_ERR_VAL_SHIFT 1
+#define DMA0_CORE_PROT_ERR_VAL_MASK 0x2
+
+/* DMA0_CORE_SECURE_PROPS */
+#define DMA0_CORE_SECURE_PROPS_ASID_SHIFT 0
+#define DMA0_CORE_SECURE_PROPS_ASID_MASK 0x3FF
+#define DMA0_CORE_SECURE_PROPS_MMBP_SHIFT 10
+#define DMA0_CORE_SECURE_PROPS_MMBP_MASK 0x400
+
+/* DMA0_CORE_NON_SECURE_PROPS */
+#define DMA0_CORE_NON_SECURE_PROPS_ASID_SHIFT 0
+#define DMA0_CORE_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define DMA0_CORE_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define DMA0_CORE_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* DMA0_CORE_RD_MAX_OUTSTAND */
+#define DMA0_CORE_RD_MAX_OUTSTAND_VAL_SHIFT 0
+#define DMA0_CORE_RD_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* DMA0_CORE_RD_MAX_SIZE */
+#define DMA0_CORE_RD_MAX_SIZE_DATA_SHIFT 0
+#define DMA0_CORE_RD_MAX_SIZE_DATA_MASK 0x7FF
+#define DMA0_CORE_RD_MAX_SIZE_MD_SHIFT 16
+#define DMA0_CORE_RD_MAX_SIZE_MD_MASK 0x7FF0000
+
+/* DMA0_CORE_RD_ARCACHE */
+#define DMA0_CORE_RD_ARCACHE_VAL_SHIFT 0
+#define DMA0_CORE_RD_ARCACHE_VAL_MASK 0xF
+
+/* DMA0_CORE_RD_ARUSER_31_11 */
+#define DMA0_CORE_RD_ARUSER_31_11_VAL_SHIFT 0
+#define DMA0_CORE_RD_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_CORE_RD_INFLIGHTS */
+#define DMA0_CORE_RD_INFLIGHTS_VAL_SHIFT 0
+#define DMA0_CORE_RD_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_WR_MAX_OUTSTAND */
+#define DMA0_CORE_WR_MAX_OUTSTAND_VAL_SHIFT 0
+#define DMA0_CORE_WR_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* DMA0_CORE_WR_MAX_AWID */
+#define DMA0_CORE_WR_MAX_AWID_VAL_SHIFT 0
+#define DMA0_CORE_WR_MAX_AWID_VAL_MASK 0xFFFF
+
+/* DMA0_CORE_WR_AWCACHE */
+#define DMA0_CORE_WR_AWCACHE_VAL_SHIFT 0
+#define DMA0_CORE_WR_AWCACHE_VAL_MASK 0xF
+
+/* DMA0_CORE_WR_AWUSER_31_11 */
+#define DMA0_CORE_WR_AWUSER_31_11_VAL_SHIFT 0
+#define DMA0_CORE_WR_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_CORE_WR_INFLIGHTS */
+#define DMA0_CORE_WR_INFLIGHTS_VAL_SHIFT 0
+#define DMA0_CORE_WR_INFLIGHTS_VAL_MASK 0xFFFF
+
+/* DMA0_CORE_RD_RATE_LIM_CFG_0 */
+#define DMA0_CORE_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DMA0_CORE_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DMA0_CORE_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DMA0_CORE_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DMA0_CORE_RD_RATE_LIM_CFG_1 */
+#define DMA0_CORE_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DMA0_CORE_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DMA0_CORE_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DMA0_CORE_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DMA0_CORE_WR_RATE_LIM_CFG_0 */
+#define DMA0_CORE_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DMA0_CORE_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DMA0_CORE_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DMA0_CORE_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DMA0_CORE_WR_RATE_LIM_CFG_1 */
+#define DMA0_CORE_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DMA0_CORE_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DMA0_CORE_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DMA0_CORE_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DMA0_CORE_ERR_CFG */
+#define DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT 0
+#define DMA0_CORE_ERR_CFG_ERR_MSG_EN_MASK 0x1
+#define DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT 1
+#define DMA0_CORE_ERR_CFG_STOP_ON_ERR_MASK 0x2
+
+/* DMA0_CORE_ERR_CAUSE */
+#define DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_SHIFT 0
+#define DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK 0x1
+#define DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_SHIFT 1
+#define DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK 0x2
+#define DMA0_CORE_ERR_CAUSE_LBW_WR_ERR_SHIFT 2
+#define DMA0_CORE_ERR_CAUSE_LBW_WR_ERR_MASK 0x4
+#define DMA0_CORE_ERR_CAUSE_DESC_OVF_SHIFT 3
+#define DMA0_CORE_ERR_CAUSE_DESC_OVF_MASK 0x8
+
+/* DMA0_CORE_ERRMSG_ADDR_LO */
+#define DMA0_CORE_ERRMSG_ADDR_LO_VAL_SHIFT 0
+#define DMA0_CORE_ERRMSG_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_ERRMSG_ADDR_HI */
+#define DMA0_CORE_ERRMSG_ADDR_HI_VAL_SHIFT 0
+#define DMA0_CORE_ERRMSG_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_ERRMSG_WDATA */
+#define DMA0_CORE_ERRMSG_WDATA_VAL_SHIFT 0
+#define DMA0_CORE_ERRMSG_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_STS0 */
+#define DMA0_CORE_STS0_RD_REQ_CNT_SHIFT 0
+#define DMA0_CORE_STS0_RD_REQ_CNT_MASK 0x7FFF
+#define DMA0_CORE_STS0_WR_REQ_CNT_SHIFT 16
+#define DMA0_CORE_STS0_WR_REQ_CNT_MASK 0x7FFF0000
+#define DMA0_CORE_STS0_BUSY_SHIFT 31
+#define DMA0_CORE_STS0_BUSY_MASK 0x80000000
+
+/* DMA0_CORE_STS1 */
+#define DMA0_CORE_STS1_IS_HALT_SHIFT 0
+#define DMA0_CORE_STS1_IS_HALT_MASK 0x1
+
+/* DMA0_CORE_RD_DBGMEM_ADD */
+#define DMA0_CORE_RD_DBGMEM_ADD_VAL_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_ADD_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_RD_DBGMEM_DATA_WR */
+#define DMA0_CORE_RD_DBGMEM_DATA_WR_VAL_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_DATA_WR_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_RD_DBGMEM_DATA_RD */
+#define DMA0_CORE_RD_DBGMEM_DATA_RD_VAL_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_DATA_RD_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_RD_DBGMEM_CTRL */
+#define DMA0_CORE_RD_DBGMEM_CTRL_WR_NRD_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_CTRL_WR_NRD_MASK 0x1
+
+/* DMA0_CORE_RD_DBGMEM_RC */
+#define DMA0_CORE_RD_DBGMEM_RC_VALID_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_RC_VALID_MASK 0x1
+
+/* DMA0_CORE_DBG_HBW_AXI_AR_CNT */
+
+/* DMA0_CORE_DBG_HBW_AXI_AW_CNT */
+
+/* DMA0_CORE_DBG_LBW_AXI_AW_CNT */
+
+/* DMA0_CORE_DBG_DESC_CNT */
+#define DMA0_CORE_DBG_DESC_CNT_RD_STS_CTX_CNT_SHIFT 0
+#define DMA0_CORE_DBG_DESC_CNT_RD_STS_CTX_CNT_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DBG_STS */
+#define DMA0_CORE_DBG_STS_RD_CTX_FULL_SHIFT 0
+#define DMA0_CORE_DBG_STS_RD_CTX_FULL_MASK 0x1
+#define DMA0_CORE_DBG_STS_WR_CTX_FULL_SHIFT 1
+#define DMA0_CORE_DBG_STS_WR_CTX_FULL_MASK 0x2
+#define DMA0_CORE_DBG_STS_WR_COMP_FULL_SHIFT 2
+#define DMA0_CORE_DBG_STS_WR_COMP_FULL_MASK 0x4
+#define DMA0_CORE_DBG_STS_RD_CTX_EMPTY_SHIFT 3
+#define DMA0_CORE_DBG_STS_RD_CTX_EMPTY_MASK 0x8
+#define DMA0_CORE_DBG_STS_WR_CTX_EMPTY_SHIFT 4
+#define DMA0_CORE_DBG_STS_WR_CTX_EMPTY_MASK 0x10
+#define DMA0_CORE_DBG_STS_WR_COMP_EMPTY_SHIFT 5
+#define DMA0_CORE_DBG_STS_WR_COMP_EMPTY_MASK 0x20
+#define DMA0_CORE_DBG_STS_TE_EMPTY_SHIFT 6
+#define DMA0_CORE_DBG_STS_TE_EMPTY_MASK 0x40
+#define DMA0_CORE_DBG_STS_TE_BUSY_SHIFT 7
+#define DMA0_CORE_DBG_STS_TE_BUSY_MASK 0x80
+#define DMA0_CORE_DBG_STS_GSKT_EMPTY_SHIFT 8
+#define DMA0_CORE_DBG_STS_GSKT_EMPTY_MASK 0x100
+#define DMA0_CORE_DBG_STS_GSKT_FULL_SHIFT 9
+#define DMA0_CORE_DBG_STS_GSKT_FULL_MASK 0x200
+#define DMA0_CORE_DBG_STS_RDBUF_FULLNESS_SHIFT 20
+#define DMA0_CORE_DBG_STS_RDBUF_FULLNESS_MASK 0x7FF00000
+
+/* DMA0_CORE_DBG_RD_DESC_ID */
+
+/* DMA0_CORE_DBG_WR_DESC_ID */
+
+#endif /* ASIC_REG_DMA0_CORE_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
new file mode 100644
index 000000000000..1fdd5d5fc6d2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA0_CORE_REGS_H_
+#define ASIC_REG_DMA0_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA0_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA0_CORE_CFG_0 0x500000
+
+#define mmDMA0_CORE_CFG_1 0x500004
+
+#define mmDMA0_CORE_LBW_MAX_OUTSTAND 0x500008
+
+#define mmDMA0_CORE_SRC_BASE_LO 0x500014
+
+#define mmDMA0_CORE_SRC_BASE_HI 0x500018
+
+#define mmDMA0_CORE_DST_BASE_LO 0x50001C
+
+#define mmDMA0_CORE_DST_BASE_HI 0x500020
+
+#define mmDMA0_CORE_SRC_TSIZE_1 0x50002C
+
+#define mmDMA0_CORE_SRC_STRIDE_1 0x500030
+
+#define mmDMA0_CORE_SRC_TSIZE_2 0x500034
+
+#define mmDMA0_CORE_SRC_STRIDE_2 0x500038
+
+#define mmDMA0_CORE_SRC_TSIZE_3 0x50003C
+
+#define mmDMA0_CORE_SRC_STRIDE_3 0x500040
+
+#define mmDMA0_CORE_SRC_TSIZE_4 0x500044
+
+#define mmDMA0_CORE_SRC_STRIDE_4 0x500048
+
+#define mmDMA0_CORE_SRC_TSIZE_0 0x50004C
+
+#define mmDMA0_CORE_DST_TSIZE_1 0x500054
+
+#define mmDMA0_CORE_DST_STRIDE_1 0x500058
+
+#define mmDMA0_CORE_DST_TSIZE_2 0x50005C
+
+#define mmDMA0_CORE_DST_STRIDE_2 0x500060
+
+#define mmDMA0_CORE_DST_TSIZE_3 0x500064
+
+#define mmDMA0_CORE_DST_STRIDE_3 0x500068
+
+#define mmDMA0_CORE_DST_TSIZE_4 0x50006C
+
+#define mmDMA0_CORE_DST_STRIDE_4 0x500070
+
+#define mmDMA0_CORE_DST_TSIZE_0 0x500074
+
+#define mmDMA0_CORE_COMMIT 0x500078
+
+#define mmDMA0_CORE_WR_COMP_WDATA 0x50007C
+
+#define mmDMA0_CORE_WR_COMP_ADDR_LO 0x500080
+
+#define mmDMA0_CORE_WR_COMP_ADDR_HI 0x500084
+
+#define mmDMA0_CORE_WR_COMP_AWUSER_31_11 0x500088
+
+#define mmDMA0_CORE_TE_NUMROWS 0x500094
+
+#define mmDMA0_CORE_PROT 0x5000B8
+
+#define mmDMA0_CORE_SECURE_PROPS 0x5000F0
+
+#define mmDMA0_CORE_NON_SECURE_PROPS 0x5000F4
+
+#define mmDMA0_CORE_RD_MAX_OUTSTAND 0x500100
+
+#define mmDMA0_CORE_RD_MAX_SIZE 0x500104
+
+#define mmDMA0_CORE_RD_ARCACHE 0x500108
+
+#define mmDMA0_CORE_RD_ARUSER_31_11 0x500110
+
+#define mmDMA0_CORE_RD_INFLIGHTS 0x500114
+
+#define mmDMA0_CORE_WR_MAX_OUTSTAND 0x500120
+
+#define mmDMA0_CORE_WR_MAX_AWID 0x500124
+
+#define mmDMA0_CORE_WR_AWCACHE 0x500128
+
+#define mmDMA0_CORE_WR_AWUSER_31_11 0x500130
+
+#define mmDMA0_CORE_WR_INFLIGHTS 0x500134
+
+#define mmDMA0_CORE_RD_RATE_LIM_CFG_0 0x500150
+
+#define mmDMA0_CORE_RD_RATE_LIM_CFG_1 0x500154
+
+#define mmDMA0_CORE_WR_RATE_LIM_CFG_0 0x500158
+
+#define mmDMA0_CORE_WR_RATE_LIM_CFG_1 0x50015C
+
+#define mmDMA0_CORE_ERR_CFG 0x500160
+
+#define mmDMA0_CORE_ERR_CAUSE 0x500164
+
+#define mmDMA0_CORE_ERRMSG_ADDR_LO 0x500170
+
+#define mmDMA0_CORE_ERRMSG_ADDR_HI 0x500174
+
+#define mmDMA0_CORE_ERRMSG_WDATA 0x500178
+
+#define mmDMA0_CORE_STS0 0x500190
+
+#define mmDMA0_CORE_STS1 0x500194
+
+#define mmDMA0_CORE_RD_DBGMEM_ADD 0x500200
+
+#define mmDMA0_CORE_RD_DBGMEM_DATA_WR 0x500204
+
+#define mmDMA0_CORE_RD_DBGMEM_DATA_RD 0x500208
+
+#define mmDMA0_CORE_RD_DBGMEM_CTRL 0x50020C
+
+#define mmDMA0_CORE_RD_DBGMEM_RC 0x500210
+
+#define mmDMA0_CORE_DBG_HBW_AXI_AR_CNT 0x500220
+
+#define mmDMA0_CORE_DBG_HBW_AXI_AW_CNT 0x500224
+
+#define mmDMA0_CORE_DBG_LBW_AXI_AW_CNT 0x500228
+
+#define mmDMA0_CORE_DBG_DESC_CNT 0x50022C
+
+#define mmDMA0_CORE_DBG_STS 0x500230
+
+#define mmDMA0_CORE_DBG_RD_DESC_ID 0x500234
+
+#define mmDMA0_CORE_DBG_WR_DESC_ID 0x500238
+
+#endif /* ASIC_REG_DMA0_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
new file mode 100644
index 000000000000..48376aabc3ba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
@@ -0,0 +1,800 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA0_QM_MASKS_H_
+#define ASIC_REG_DMA0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * DMA0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* DMA0_QM_GLBL_CFG0 */
+#define DMA0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define DMA0_QM_GLBL_CFG0_PQF_EN_MASK 0xF
+#define DMA0_QM_GLBL_CFG0_CQF_EN_SHIFT 4
+#define DMA0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0
+#define DMA0_QM_GLBL_CFG0_CP_EN_SHIFT 9
+#define DMA0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00
+
+/* DMA0_QM_GLBL_CFG1 */
+#define DMA0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define DMA0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF
+#define DMA0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4
+#define DMA0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0
+#define DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT 9
+#define DMA0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00
+#define DMA0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16
+#define DMA0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000
+#define DMA0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20
+#define DMA0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000
+#define DMA0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25
+#define DMA0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000
+
+/* DMA0_QM_GLBL_PROT */
+#define DMA0_QM_GLBL_PROT_PQF_SHIFT 0
+#define DMA0_QM_GLBL_PROT_PQF_MASK 0xF
+#define DMA0_QM_GLBL_PROT_CQF_SHIFT 4
+#define DMA0_QM_GLBL_PROT_CQF_MASK 0x1F0
+#define DMA0_QM_GLBL_PROT_CP_SHIFT 9
+#define DMA0_QM_GLBL_PROT_CP_MASK 0x3E00
+#define DMA0_QM_GLBL_PROT_ERR_SHIFT 14
+#define DMA0_QM_GLBL_PROT_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_PROT_ARB_SHIFT 15
+#define DMA0_QM_GLBL_PROT_ARB_MASK 0x8000
+
+/* DMA0_QM_GLBL_ERR_CFG */
+#define DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0
+#define DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF
+#define DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0
+#define DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9
+#define DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00
+#define DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16
+#define DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000
+#define DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20
+#define DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000
+#define DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25
+#define DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000
+#define DMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31
+#define DMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000
+
+/* DMA0_QM_GLBL_SECURE_PROPS */
+#define DMA0_QM_GLBL_SECURE_PROPS_0_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_1_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_2_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_3_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_4_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* DMA0_QM_GLBL_NON_SECURE_PROPS */
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* DMA0_QM_GLBL_STS0 */
+#define DMA0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define DMA0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF
+#define DMA0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4
+#define DMA0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0
+#define DMA0_QM_GLBL_STS0_CP_IDLE_SHIFT 9
+#define DMA0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00
+#define DMA0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16
+#define DMA0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000
+#define DMA0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20
+#define DMA0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000
+#define DMA0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25
+#define DMA0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000
+#define DMA0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31
+#define DMA0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000
+
+/* DMA0_QM_GLBL_STS1 */
+#define DMA0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define DMA0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define DMA0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define DMA0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define DMA0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define DMA0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define DMA0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define DMA0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define DMA0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define DMA0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define DMA0_QM_GLBL_STS1_CP_WREG_ERR_SHIFT 6
+#define DMA0_QM_GLBL_STS1_CP_WREG_ERR_MASK 0x40
+#define DMA0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DMA0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DMA0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DMA0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DMA0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DMA0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DMA0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DMA0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DMA0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DMA0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DMA0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DMA0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DMA0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DMA0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DMA0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* DMA0_QM_GLBL_STS1_4 */
+#define DMA0_QM_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1
+#define DMA0_QM_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2
+#define DMA0_QM_GLBL_STS1_4_CP_RD_ERR_SHIFT 2
+#define DMA0_QM_GLBL_STS1_4_CP_RD_ERR_MASK 0x4
+#define DMA0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA0_QM_GLBL_STS1_4_CP_STOP_OP_SHIFT 4
+#define DMA0_QM_GLBL_STS1_4_CP_STOP_OP_MASK 0x10
+#define DMA0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5
+#define DMA0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20
+#define DMA0_QM_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6
+#define DMA0_QM_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* DMA0_QM_GLBL_MSG_EN */
+#define DMA0_QM_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0
+#define DMA0_QM_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1
+#define DMA0_QM_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1
+#define DMA0_QM_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2
+#define DMA0_QM_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2
+#define DMA0_QM_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4
+#define DMA0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA0_QM_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4
+#define DMA0_QM_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10
+#define DMA0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5
+#define DMA0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20
+#define DMA0_QM_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6
+#define DMA0_QM_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* DMA0_QM_GLBL_MSG_EN_4 */
+#define DMA0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1
+#define DMA0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2
+#define DMA0_QM_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2
+#define DMA0_QM_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4
+#define DMA0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA0_QM_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4
+#define DMA0_QM_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10
+#define DMA0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5
+#define DMA0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20
+#define DMA0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6
+#define DMA0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* DMA0_QM_PQ_BASE_LO */
+#define DMA0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define DMA0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_BASE_HI */
+#define DMA0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define DMA0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_SIZE */
+#define DMA0_QM_PQ_SIZE_VAL_SHIFT 0
+#define DMA0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_PI */
+#define DMA0_QM_PQ_PI_VAL_SHIFT 0
+#define DMA0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_CI */
+#define DMA0_QM_PQ_CI_VAL_SHIFT 0
+#define DMA0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_CFG0 */
+#define DMA0_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define DMA0_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* DMA0_QM_PQ_CFG1 */
+#define DMA0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DMA0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define DMA0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DMA0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* DMA0_QM_PQ_ARUSER_31_11 */
+#define DMA0_QM_PQ_ARUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_PQ_STS0 */
+#define DMA0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define DMA0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define DMA0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define DMA0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* DMA0_QM_PQ_STS1 */
+#define DMA0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define DMA0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define DMA0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define DMA0_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define DMA0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* DMA0_QM_CQ_CFG0 */
+#define DMA0_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define DMA0_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* DMA0_QM_CQ_CFG1 */
+#define DMA0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DMA0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define DMA0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DMA0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_ARUSER_31_11 */
+#define DMA0_QM_CQ_ARUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_CQ_STS0 */
+#define DMA0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define DMA0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define DMA0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define DMA0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_STS1 */
+#define DMA0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define DMA0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define DMA0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define DMA0_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define DMA0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* DMA0_QM_CQ_PTR_LO_0 */
+#define DMA0_QM_CQ_PTR_LO_0_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_0 */
+#define DMA0_QM_CQ_PTR_HI_0_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_0 */
+#define DMA0_QM_CQ_TSIZE_0_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_0 */
+#define DMA0_QM_CQ_CTL_0_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_0_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_0_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_0_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_1 */
+#define DMA0_QM_CQ_PTR_LO_1_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_1 */
+#define DMA0_QM_CQ_PTR_HI_1_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_1 */
+#define DMA0_QM_CQ_TSIZE_1_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_1 */
+#define DMA0_QM_CQ_CTL_1_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_1_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_1_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_1_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_2 */
+#define DMA0_QM_CQ_PTR_LO_2_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_2 */
+#define DMA0_QM_CQ_PTR_HI_2_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_2 */
+#define DMA0_QM_CQ_TSIZE_2_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_2 */
+#define DMA0_QM_CQ_CTL_2_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_2_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_2_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_2_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_3 */
+#define DMA0_QM_CQ_PTR_LO_3_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_3 */
+#define DMA0_QM_CQ_PTR_HI_3_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_3 */
+#define DMA0_QM_CQ_TSIZE_3_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_3 */
+#define DMA0_QM_CQ_CTL_3_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_3_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_3_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_3_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_4 */
+#define DMA0_QM_CQ_PTR_LO_4_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_4 */
+#define DMA0_QM_CQ_PTR_HI_4_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_4 */
+#define DMA0_QM_CQ_TSIZE_4_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_4 */
+#define DMA0_QM_CQ_CTL_4_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_4_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_4_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_4_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_STS */
+#define DMA0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_STS */
+#define DMA0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_STS */
+#define DMA0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_STS */
+#define DMA0_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_IFIFO_CNT */
+#define DMA0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define DMA0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* DMA0_QM_CP_MSG_BASE0_ADDR_LO */
+#define DMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE0_ADDR_HI */
+#define DMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE1_ADDR_LO */
+#define DMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE1_ADDR_HI */
+#define DMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE2_ADDR_LO */
+#define DMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE2_ADDR_HI */
+#define DMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE3_ADDR_LO */
+#define DMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE3_ADDR_HI */
+#define DMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_LDMA_TSIZE_OFFSET */
+#define DMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define DMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define DMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define DMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_FENCE0_RDATA */
+#define DMA0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* DMA0_QM_CP_FENCE1_RDATA */
+#define DMA0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* DMA0_QM_CP_FENCE2_RDATA */
+#define DMA0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* DMA0_QM_CP_FENCE3_RDATA */
+#define DMA0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* DMA0_QM_CP_FENCE0_CNT */
+#define DMA0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF
+
+/* DMA0_QM_CP_FENCE1_CNT */
+#define DMA0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF
+
+/* DMA0_QM_CP_FENCE2_CNT */
+#define DMA0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF
+
+/* DMA0_QM_CP_FENCE3_CNT */
+#define DMA0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF
+
+/* DMA0_QM_CP_STS */
+#define DMA0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define DMA0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA0_QM_CP_STS_ERDY_SHIFT 16
+#define DMA0_QM_CP_STS_ERDY_MASK 0x10000
+#define DMA0_QM_CP_STS_RRDY_SHIFT 17
+#define DMA0_QM_CP_STS_RRDY_MASK 0x20000
+#define DMA0_QM_CP_STS_MRDY_SHIFT 18
+#define DMA0_QM_CP_STS_MRDY_MASK 0x40000
+#define DMA0_QM_CP_STS_SW_STOP_SHIFT 19
+#define DMA0_QM_CP_STS_SW_STOP_MASK 0x80000
+#define DMA0_QM_CP_STS_FENCE_ID_SHIFT 20
+#define DMA0_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define DMA0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define DMA0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* DMA0_QM_CP_CURRENT_INST_LO */
+#define DMA0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_CURRENT_INST_HI */
+#define DMA0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_BARRIER_CFG */
+#define DMA0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define DMA0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+#define DMA0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16
+#define DMA0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000
+
+/* DMA0_QM_CP_DBG_0 */
+#define DMA0_QM_CP_DBG_0_CS_SHIFT 0
+#define DMA0_QM_CP_DBG_0_CS_MASK 0xF
+#define DMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4
+#define DMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10
+#define DMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5
+#define DMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20
+#define DMA0_QM_CP_DBG_0_MREB_STALL_SHIFT 6
+#define DMA0_QM_CP_DBG_0_MREB_STALL_MASK 0x40
+#define DMA0_QM_CP_DBG_0_STALL_SHIFT 7
+#define DMA0_QM_CP_DBG_0_STALL_MASK 0x80
+
+/* DMA0_QM_CP_ARUSER_31_11 */
+#define DMA0_QM_CP_ARUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_CP_AWUSER_31_11 */
+#define DMA0_QM_CP_AWUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_ARB_CFG_0 */
+#define DMA0_QM_ARB_CFG_0_TYPE_SHIFT 0
+#define DMA0_QM_ARB_CFG_0_TYPE_MASK 0x1
+#define DMA0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4
+#define DMA0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10
+#define DMA0_QM_ARB_CFG_0_EN_SHIFT 8
+#define DMA0_QM_ARB_CFG_0_EN_MASK 0x100
+#define DMA0_QM_ARB_CFG_0_MASK_SHIFT 12
+#define DMA0_QM_ARB_CFG_0_MASK_MASK 0xF000
+#define DMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16
+#define DMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000
+
+/* DMA0_QM_ARB_CHOISE_Q_PUSH */
+#define DMA0_QM_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0
+#define DMA0_QM_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3
+
+/* DMA0_QM_ARB_WRR_WEIGHT */
+#define DMA0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0
+#define DMA0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_CFG_1 */
+#define DMA0_QM_ARB_CFG_1_CLR_SHIFT 0
+#define DMA0_QM_ARB_CFG_1_CLR_MASK 0x1
+
+/* DMA0_QM_ARB_MST_AVAIL_CRED */
+#define DMA0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F
+
+/* DMA0_QM_ARB_MST_CRED_INC */
+#define DMA0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_MST_CHOISE_PUSH_OFST */
+#define DMA0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST */
+#define DMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0
+#define DMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_MST_SLAVE_EN */
+#define DMA0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_MST_QUIET_PER */
+#define DMA0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_SLV_CHOISE_WDT */
+#define DMA0_QM_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0
+#define DMA0_QM_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_SLV_ID */
+#define DMA0_QM_ARB_SLV_ID_VAL_SHIFT 0
+#define DMA0_QM_ARB_SLV_ID_VAL_MASK 0x1F
+
+/* DMA0_QM_ARB_MSG_MAX_INFLIGHT */
+#define DMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0
+#define DMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F
+
+/* DMA0_QM_ARB_MSG_AWUSER_31_11 */
+#define DMA0_QM_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_ARB_MSG_AWUSER_SEC_PROP */
+#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0
+#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF
+#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10
+#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400
+
+/* DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP */
+#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0
+#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF
+#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10
+#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400
+
+/* DMA0_QM_ARB_BASE_LO */
+#define DMA0_QM_ARB_BASE_LO_VAL_SHIFT 0
+#define DMA0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_BASE_HI */
+#define DMA0_QM_ARB_BASE_HI_VAL_SHIFT 0
+#define DMA0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_STATE_STS */
+#define DMA0_QM_ARB_STATE_STS_VAL_SHIFT 0
+#define DMA0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_CHOISE_FULLNESS_STS */
+#define DMA0_QM_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0
+#define DMA0_QM_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F
+
+/* DMA0_QM_ARB_MSG_STS */
+#define DMA0_QM_ARB_MSG_STS_FULL_SHIFT 0
+#define DMA0_QM_ARB_MSG_STS_FULL_MASK 0x1
+#define DMA0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1
+#define DMA0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2
+
+/* DMA0_QM_ARB_SLV_CHOISE_Q_HEAD */
+#define DMA0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0
+#define DMA0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3
+
+/* DMA0_QM_ARB_ERR_CAUSE */
+#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0
+#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1
+#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1
+#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2
+#define DMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2
+#define DMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4
+
+/* DMA0_QM_ARB_ERR_MSG_EN */
+#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0
+#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
+#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1
+#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
+#define DMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2
+#define DMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+/* DMA0_QM_ARB_ERR_STS_DRP */
+#define DMA0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0
+#define DMA0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3
+
+/* DMA0_QM_ARB_MST_CRED_STS */
+#define DMA0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F
+
+/* DMA0_QM_CGM_CFG */
+#define DMA0_QM_CGM_CFG_IDLE_TH_SHIFT 0
+#define DMA0_QM_CGM_CFG_IDLE_TH_MASK 0xFFF
+#define DMA0_QM_CGM_CFG_G2F_TH_SHIFT 16
+#define DMA0_QM_CGM_CFG_G2F_TH_MASK 0xFF0000
+#define DMA0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT 24
+#define DMA0_QM_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000
+#define DMA0_QM_CGM_CFG_EN_SHIFT 31
+#define DMA0_QM_CGM_CFG_EN_MASK 0x80000000
+
+/* DMA0_QM_CGM_STS */
+#define DMA0_QM_CGM_STS_ST_SHIFT 0
+#define DMA0_QM_CGM_STS_ST_MASK 0x3
+#define DMA0_QM_CGM_STS_CG_SHIFT 4
+#define DMA0_QM_CGM_STS_CG_MASK 0x10
+#define DMA0_QM_CGM_STS_AGENT_IDLE_SHIFT 8
+#define DMA0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
+#define DMA0_QM_CGM_STS_AXI_IDLE_SHIFT 9
+#define DMA0_QM_CGM_STS_AXI_IDLE_MASK 0x200
+#define DMA0_QM_CGM_STS_CP_IDLE_SHIFT 10
+#define DMA0_QM_CGM_STS_CP_IDLE_MASK 0x400
+
+/* DMA0_QM_CGM_CFG1 */
+#define DMA0_QM_CGM_CFG1_MASK_TH_SHIFT 0
+#define DMA0_QM_CGM_CFG1_MASK_TH_MASK 0xFF
+
+/* DMA0_QM_LOCAL_RANGE_BASE */
+#define DMA0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0
+#define DMA0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF
+
+/* DMA0_QM_LOCAL_RANGE_SIZE */
+#define DMA0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0
+#define DMA0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF
+
+/* DMA0_QM_CSMR_STRICT_PRIO_CFG */
+#define DMA0_QM_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0
+#define DMA0_QM_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1
+
+/* DMA0_QM_HBW_RD_RATE_LIM_CFG_1 */
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DMA0_QM_LBW_WR_RATE_LIM_CFG_0 */
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DMA0_QM_LBW_WR_RATE_LIM_CFG_1 */
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DMA0_QM_HBW_RD_RATE_LIM_CFG_0 */
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DMA0_QM_GLBL_AXCACHE */
+#define DMA0_QM_GLBL_AXCACHE_AR_SHIFT 0
+#define DMA0_QM_GLBL_AXCACHE_AR_MASK 0xF
+#define DMA0_QM_GLBL_AXCACHE_AW_SHIFT 16
+#define DMA0_QM_GLBL_AXCACHE_AW_MASK 0xF0000
+
+/* DMA0_QM_IND_GW_APB_CFG */
+#define DMA0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0
+#define DMA0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF
+#define DMA0_QM_IND_GW_APB_CFG_CMD_SHIFT 31
+#define DMA0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000
+
+/* DMA0_QM_IND_GW_APB_WDATA */
+#define DMA0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0
+#define DMA0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_IND_GW_APB_RDATA */
+#define DMA0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0
+#define DMA0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_IND_GW_APB_STATUS */
+#define DMA0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0
+#define DMA0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1
+#define DMA0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1
+#define DMA0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2
+
+/* DMA0_QM_GLBL_ERR_ADDR_LO */
+#define DMA0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_GLBL_ERR_ADDR_HI */
+#define DMA0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_GLBL_ERR_WDATA */
+#define DMA0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define DMA0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_GLBL_MEM_INIT_BUSY */
+#define DMA0_QM_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0
+#define DMA0_QM_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF
+
+#endif /* ASIC_REG_DMA0_QM_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
new file mode 100644
index 000000000000..8e56a93d88a1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA0_QM_REGS_H_
+#define ASIC_REG_DMA0_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA0_QM_GLBL_CFG0 0x508000
+
+#define mmDMA0_QM_GLBL_CFG1 0x508004
+
+#define mmDMA0_QM_GLBL_PROT 0x508008
+
+#define mmDMA0_QM_GLBL_ERR_CFG 0x50800C
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_0 0x508010
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_1 0x508014
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_2 0x508018
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_3 0x50801C
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_4 0x508020
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 0x508024
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 0x508028
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 0x50802C
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 0x508030
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 0x508034
+
+#define mmDMA0_QM_GLBL_STS0 0x508038
+
+#define mmDMA0_QM_GLBL_STS1_0 0x508040
+
+#define mmDMA0_QM_GLBL_STS1_1 0x508044
+
+#define mmDMA0_QM_GLBL_STS1_2 0x508048
+
+#define mmDMA0_QM_GLBL_STS1_3 0x50804C
+
+#define mmDMA0_QM_GLBL_STS1_4 0x508050
+
+#define mmDMA0_QM_GLBL_MSG_EN_0 0x508054
+
+#define mmDMA0_QM_GLBL_MSG_EN_1 0x508058
+
+#define mmDMA0_QM_GLBL_MSG_EN_2 0x50805C
+
+#define mmDMA0_QM_GLBL_MSG_EN_3 0x508060
+
+#define mmDMA0_QM_GLBL_MSG_EN_4 0x508068
+
+#define mmDMA0_QM_PQ_BASE_LO_0 0x508070
+
+#define mmDMA0_QM_PQ_BASE_LO_1 0x508074
+
+#define mmDMA0_QM_PQ_BASE_LO_2 0x508078
+
+#define mmDMA0_QM_PQ_BASE_LO_3 0x50807C
+
+#define mmDMA0_QM_PQ_BASE_HI_0 0x508080
+
+#define mmDMA0_QM_PQ_BASE_HI_1 0x508084
+
+#define mmDMA0_QM_PQ_BASE_HI_2 0x508088
+
+#define mmDMA0_QM_PQ_BASE_HI_3 0x50808C
+
+#define mmDMA0_QM_PQ_SIZE_0 0x508090
+
+#define mmDMA0_QM_PQ_SIZE_1 0x508094
+
+#define mmDMA0_QM_PQ_SIZE_2 0x508098
+
+#define mmDMA0_QM_PQ_SIZE_3 0x50809C
+
+#define mmDMA0_QM_PQ_PI_0 0x5080A0
+
+#define mmDMA0_QM_PQ_PI_1 0x5080A4
+
+#define mmDMA0_QM_PQ_PI_2 0x5080A8
+
+#define mmDMA0_QM_PQ_PI_3 0x5080AC
+
+#define mmDMA0_QM_PQ_CI_0 0x5080B0
+
+#define mmDMA0_QM_PQ_CI_1 0x5080B4
+
+#define mmDMA0_QM_PQ_CI_2 0x5080B8
+
+#define mmDMA0_QM_PQ_CI_3 0x5080BC
+
+#define mmDMA0_QM_PQ_CFG0_0 0x5080C0
+
+#define mmDMA0_QM_PQ_CFG0_1 0x5080C4
+
+#define mmDMA0_QM_PQ_CFG0_2 0x5080C8
+
+#define mmDMA0_QM_PQ_CFG0_3 0x5080CC
+
+#define mmDMA0_QM_PQ_CFG1_0 0x5080D0
+
+#define mmDMA0_QM_PQ_CFG1_1 0x5080D4
+
+#define mmDMA0_QM_PQ_CFG1_2 0x5080D8
+
+#define mmDMA0_QM_PQ_CFG1_3 0x5080DC
+
+#define mmDMA0_QM_PQ_ARUSER_31_11_0 0x5080E0
+
+#define mmDMA0_QM_PQ_ARUSER_31_11_1 0x5080E4
+
+#define mmDMA0_QM_PQ_ARUSER_31_11_2 0x5080E8
+
+#define mmDMA0_QM_PQ_ARUSER_31_11_3 0x5080EC
+
+#define mmDMA0_QM_PQ_STS0_0 0x5080F0
+
+#define mmDMA0_QM_PQ_STS0_1 0x5080F4
+
+#define mmDMA0_QM_PQ_STS0_2 0x5080F8
+
+#define mmDMA0_QM_PQ_STS0_3 0x5080FC
+
+#define mmDMA0_QM_PQ_STS1_0 0x508100
+
+#define mmDMA0_QM_PQ_STS1_1 0x508104
+
+#define mmDMA0_QM_PQ_STS1_2 0x508108
+
+#define mmDMA0_QM_PQ_STS1_3 0x50810C
+
+#define mmDMA0_QM_CQ_CFG0_0 0x508110
+
+#define mmDMA0_QM_CQ_CFG0_1 0x508114
+
+#define mmDMA0_QM_CQ_CFG0_2 0x508118
+
+#define mmDMA0_QM_CQ_CFG0_3 0x50811C
+
+#define mmDMA0_QM_CQ_CFG0_4 0x508120
+
+#define mmDMA0_QM_CQ_CFG1_0 0x508124
+
+#define mmDMA0_QM_CQ_CFG1_1 0x508128
+
+#define mmDMA0_QM_CQ_CFG1_2 0x50812C
+
+#define mmDMA0_QM_CQ_CFG1_3 0x508130
+
+#define mmDMA0_QM_CQ_CFG1_4 0x508134
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_0 0x508138
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_1 0x50813C
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_2 0x508140
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_3 0x508144
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_4 0x508148
+
+#define mmDMA0_QM_CQ_STS0_0 0x50814C
+
+#define mmDMA0_QM_CQ_STS0_1 0x508150
+
+#define mmDMA0_QM_CQ_STS0_2 0x508154
+
+#define mmDMA0_QM_CQ_STS0_3 0x508158
+
+#define mmDMA0_QM_CQ_STS0_4 0x50815C
+
+#define mmDMA0_QM_CQ_STS1_0 0x508160
+
+#define mmDMA0_QM_CQ_STS1_1 0x508164
+
+#define mmDMA0_QM_CQ_STS1_2 0x508168
+
+#define mmDMA0_QM_CQ_STS1_3 0x50816C
+
+#define mmDMA0_QM_CQ_STS1_4 0x508170
+
+#define mmDMA0_QM_CQ_PTR_LO_0 0x508174
+
+#define mmDMA0_QM_CQ_PTR_HI_0 0x508178
+
+#define mmDMA0_QM_CQ_TSIZE_0 0x50817C
+
+#define mmDMA0_QM_CQ_CTL_0 0x508180
+
+#define mmDMA0_QM_CQ_PTR_LO_1 0x508184
+
+#define mmDMA0_QM_CQ_PTR_HI_1 0x508188
+
+#define mmDMA0_QM_CQ_TSIZE_1 0x50818C
+
+#define mmDMA0_QM_CQ_CTL_1 0x508190
+
+#define mmDMA0_QM_CQ_PTR_LO_2 0x508194
+
+#define mmDMA0_QM_CQ_PTR_HI_2 0x508198
+
+#define mmDMA0_QM_CQ_TSIZE_2 0x50819C
+
+#define mmDMA0_QM_CQ_CTL_2 0x5081A0
+
+#define mmDMA0_QM_CQ_PTR_LO_3 0x5081A4
+
+#define mmDMA0_QM_CQ_PTR_HI_3 0x5081A8
+
+#define mmDMA0_QM_CQ_TSIZE_3 0x5081AC
+
+#define mmDMA0_QM_CQ_CTL_3 0x5081B0
+
+#define mmDMA0_QM_CQ_PTR_LO_4 0x5081B4
+
+#define mmDMA0_QM_CQ_PTR_HI_4 0x5081B8
+
+#define mmDMA0_QM_CQ_TSIZE_4 0x5081BC
+
+#define mmDMA0_QM_CQ_CTL_4 0x5081C0
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_0 0x5081C4
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_1 0x5081C8
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_2 0x5081CC
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_3 0x5081D0
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_4 0x5081D4
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_0 0x5081D8
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_1 0x5081DC
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_2 0x5081E0
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_3 0x5081E4
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_4 0x5081E8
+
+#define mmDMA0_QM_CQ_TSIZE_STS_0 0x5081EC
+
+#define mmDMA0_QM_CQ_TSIZE_STS_1 0x5081F0
+
+#define mmDMA0_QM_CQ_TSIZE_STS_2 0x5081F4
+
+#define mmDMA0_QM_CQ_TSIZE_STS_3 0x5081F8
+
+#define mmDMA0_QM_CQ_TSIZE_STS_4 0x5081FC
+
+#define mmDMA0_QM_CQ_CTL_STS_0 0x508200
+
+#define mmDMA0_QM_CQ_CTL_STS_1 0x508204
+
+#define mmDMA0_QM_CQ_CTL_STS_2 0x508208
+
+#define mmDMA0_QM_CQ_CTL_STS_3 0x50820C
+
+#define mmDMA0_QM_CQ_CTL_STS_4 0x508210
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_0 0x508214
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_1 0x508218
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_2 0x50821C
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_3 0x508220
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_4 0x508224
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 0x508228
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 0x50822C
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 0x508230
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 0x508234
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 0x508238
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 0x50823C
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 0x508240
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 0x508244
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 0x508248
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 0x50824C
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 0x508250
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 0x508254
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 0x508258
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 0x50825C
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 0x508260
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 0x508264
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 0x508268
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 0x50826C
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 0x508270
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 0x508274
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 0x508278
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 0x50827C
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 0x508280
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 0x508284
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 0x508288
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 0x50828C
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 0x508290
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 0x508294
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 0x508298
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 0x50829C
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 0x5082A0
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 0x5082A4
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 0x5082A8
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 0x5082AC
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 0x5082B0
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 0x5082B4
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 0x5082B8
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 0x5082BC
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 0x5082C0
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 0x5082C4
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 0x5082C8
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 0x5082CC
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 0x5082D0
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 0x5082D4
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 0x5082D8
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5082E0
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5082E4
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5082E8
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5082EC
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5082F0
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5082F4
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5082F8
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5082FC
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x508300
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x508304
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_0 0x508308
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_1 0x50830C
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_2 0x508310
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_3 0x508314
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_4 0x508318
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_0 0x50831C
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_1 0x508320
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_2 0x508324
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_3 0x508328
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_4 0x50832C
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_0 0x508330
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_1 0x508334
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_2 0x508338
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_3 0x50833C
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_4 0x508340
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_0 0x508344
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_1 0x508348
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_2 0x50834C
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_3 0x508350
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_4 0x508354
+
+#define mmDMA0_QM_CP_FENCE0_CNT_0 0x508358
+
+#define mmDMA0_QM_CP_FENCE0_CNT_1 0x50835C
+
+#define mmDMA0_QM_CP_FENCE0_CNT_2 0x508360
+
+#define mmDMA0_QM_CP_FENCE0_CNT_3 0x508364
+
+#define mmDMA0_QM_CP_FENCE0_CNT_4 0x508368
+
+#define mmDMA0_QM_CP_FENCE1_CNT_0 0x50836C
+
+#define mmDMA0_QM_CP_FENCE1_CNT_1 0x508370
+
+#define mmDMA0_QM_CP_FENCE1_CNT_2 0x508374
+
+#define mmDMA0_QM_CP_FENCE1_CNT_3 0x508378
+
+#define mmDMA0_QM_CP_FENCE1_CNT_4 0x50837C
+
+#define mmDMA0_QM_CP_FENCE2_CNT_0 0x508380
+
+#define mmDMA0_QM_CP_FENCE2_CNT_1 0x508384
+
+#define mmDMA0_QM_CP_FENCE2_CNT_2 0x508388
+
+#define mmDMA0_QM_CP_FENCE2_CNT_3 0x50838C
+
+#define mmDMA0_QM_CP_FENCE2_CNT_4 0x508390
+
+#define mmDMA0_QM_CP_FENCE3_CNT_0 0x508394
+
+#define mmDMA0_QM_CP_FENCE3_CNT_1 0x508398
+
+#define mmDMA0_QM_CP_FENCE3_CNT_2 0x50839C
+
+#define mmDMA0_QM_CP_FENCE3_CNT_3 0x5083A0
+
+#define mmDMA0_QM_CP_FENCE3_CNT_4 0x5083A4
+
+#define mmDMA0_QM_CP_STS_0 0x5083A8
+
+#define mmDMA0_QM_CP_STS_1 0x5083AC
+
+#define mmDMA0_QM_CP_STS_2 0x5083B0
+
+#define mmDMA0_QM_CP_STS_3 0x5083B4
+
+#define mmDMA0_QM_CP_STS_4 0x5083B8
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_0 0x5083BC
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_1 0x5083C0
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_2 0x5083C4
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_3 0x5083C8
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_4 0x5083CC
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_0 0x5083D0
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_1 0x5083D4
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_2 0x5083D8
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_3 0x5083DC
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_4 0x5083E0
+
+#define mmDMA0_QM_CP_BARRIER_CFG_0 0x5083F4
+
+#define mmDMA0_QM_CP_BARRIER_CFG_1 0x5083F8
+
+#define mmDMA0_QM_CP_BARRIER_CFG_2 0x5083FC
+
+#define mmDMA0_QM_CP_BARRIER_CFG_3 0x508400
+
+#define mmDMA0_QM_CP_BARRIER_CFG_4 0x508404
+
+#define mmDMA0_QM_CP_DBG_0_0 0x508408
+
+#define mmDMA0_QM_CP_DBG_0_1 0x50840C
+
+#define mmDMA0_QM_CP_DBG_0_2 0x508410
+
+#define mmDMA0_QM_CP_DBG_0_3 0x508414
+
+#define mmDMA0_QM_CP_DBG_0_4 0x508418
+
+#define mmDMA0_QM_CP_ARUSER_31_11_0 0x50841C
+
+#define mmDMA0_QM_CP_ARUSER_31_11_1 0x508420
+
+#define mmDMA0_QM_CP_ARUSER_31_11_2 0x508424
+
+#define mmDMA0_QM_CP_ARUSER_31_11_3 0x508428
+
+#define mmDMA0_QM_CP_ARUSER_31_11_4 0x50842C
+
+#define mmDMA0_QM_CP_AWUSER_31_11_0 0x508430
+
+#define mmDMA0_QM_CP_AWUSER_31_11_1 0x508434
+
+#define mmDMA0_QM_CP_AWUSER_31_11_2 0x508438
+
+#define mmDMA0_QM_CP_AWUSER_31_11_3 0x50843C
+
+#define mmDMA0_QM_CP_AWUSER_31_11_4 0x508440
+
+#define mmDMA0_QM_ARB_CFG_0 0x508A00
+
+#define mmDMA0_QM_ARB_CHOISE_Q_PUSH 0x508A04
+
+#define mmDMA0_QM_ARB_WRR_WEIGHT_0 0x508A08
+
+#define mmDMA0_QM_ARB_WRR_WEIGHT_1 0x508A0C
+
+#define mmDMA0_QM_ARB_WRR_WEIGHT_2 0x508A10
+
+#define mmDMA0_QM_ARB_WRR_WEIGHT_3 0x508A14
+
+#define mmDMA0_QM_ARB_CFG_1 0x508A18
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_0 0x508A20
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_1 0x508A24
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_2 0x508A28
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_3 0x508A2C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_4 0x508A30
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_5 0x508A34
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_6 0x508A38
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_7 0x508A3C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_8 0x508A40
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_9 0x508A44
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_10 0x508A48
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_11 0x508A4C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_12 0x508A50
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_13 0x508A54
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_14 0x508A58
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_15 0x508A5C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_16 0x508A60
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_17 0x508A64
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_18 0x508A68
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_19 0x508A6C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_20 0x508A70
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_21 0x508A74
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_22 0x508A78
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_23 0x508A7C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_24 0x508A80
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_25 0x508A84
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_26 0x508A88
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_27 0x508A8C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_28 0x508A90
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_29 0x508A94
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_30 0x508A98
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_31 0x508A9C
+
+#define mmDMA0_QM_ARB_MST_CRED_INC 0x508AA0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x508AA4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x508AA8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x508AAC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x508AB0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x508AB4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x508AB8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x508ABC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x508AC0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x508AC4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x508AC8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x508ACC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x508AD0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x508AD4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x508AD8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x508ADC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x508AE0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x508AE4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x508AE8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x508AEC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x508AF0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x508AF4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x508AF8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x508AFC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x508B00
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x508B04
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x508B08
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x508B0C
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x508B10
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x508B14
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x508B18
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x508B1C
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x508B20
+
+#define mmDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x508B28
+
+#define mmDMA0_QM_ARB_MST_SLAVE_EN 0x508B2C
+
+#define mmDMA0_QM_ARB_MST_QUIET_PER 0x508B34
+
+#define mmDMA0_QM_ARB_SLV_CHOISE_WDT 0x508B38
+
+#define mmDMA0_QM_ARB_SLV_ID 0x508B3C
+
+#define mmDMA0_QM_ARB_MSG_MAX_INFLIGHT 0x508B44
+
+#define mmDMA0_QM_ARB_MSG_AWUSER_31_11 0x508B48
+
+#define mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP 0x508B4C
+
+#define mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x508B50
+
+#define mmDMA0_QM_ARB_BASE_LO 0x508B54
+
+#define mmDMA0_QM_ARB_BASE_HI 0x508B58
+
+#define mmDMA0_QM_ARB_STATE_STS 0x508B80
+
+#define mmDMA0_QM_ARB_CHOISE_FULLNESS_STS 0x508B84
+
+#define mmDMA0_QM_ARB_MSG_STS 0x508B88
+
+#define mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD 0x508B8C
+
+#define mmDMA0_QM_ARB_ERR_CAUSE 0x508B9C
+
+#define mmDMA0_QM_ARB_ERR_MSG_EN 0x508BA0
+
+#define mmDMA0_QM_ARB_ERR_STS_DRP 0x508BA8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_0 0x508BB0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_1 0x508BB4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_2 0x508BB8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_3 0x508BBC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_4 0x508BC0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_5 0x508BC4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_6 0x508BC8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_7 0x508BCC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_8 0x508BD0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_9 0x508BD4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_10 0x508BD8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_11 0x508BDC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_12 0x508BE0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_13 0x508BE4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_14 0x508BE8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_15 0x508BEC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_16 0x508BF0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_17 0x508BF4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_18 0x508BF8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_19 0x508BFC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_20 0x508C00
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_21 0x508C04
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_22 0x508C08
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_23 0x508C0C
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_24 0x508C10
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_25 0x508C14
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_26 0x508C18
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_27 0x508C1C
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_28 0x508C20
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_29 0x508C24
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_30 0x508C28
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_31 0x508C2C
+
+#define mmDMA0_QM_CGM_CFG 0x508C70
+
+#define mmDMA0_QM_CGM_STS 0x508C74
+
+#define mmDMA0_QM_CGM_CFG1 0x508C78
+
+#define mmDMA0_QM_LOCAL_RANGE_BASE 0x508C80
+
+#define mmDMA0_QM_LOCAL_RANGE_SIZE 0x508C84
+
+#define mmDMA0_QM_CSMR_STRICT_PRIO_CFG 0x508C90
+
+#define mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 0x508C94
+
+#define mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 0x508C98
+
+#define mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 0x508C9C
+
+#define mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 0x508CA0
+
+#define mmDMA0_QM_GLBL_AXCACHE 0x508CA4
+
+#define mmDMA0_QM_IND_GW_APB_CFG 0x508CB0
+
+#define mmDMA0_QM_IND_GW_APB_WDATA 0x508CB4
+
+#define mmDMA0_QM_IND_GW_APB_RDATA 0x508CB8
+
+#define mmDMA0_QM_IND_GW_APB_STATUS 0x508CBC
+
+#define mmDMA0_QM_GLBL_ERR_ADDR_LO 0x508CD0
+
+#define mmDMA0_QM_GLBL_ERR_ADDR_HI 0x508CD4
+
+#define mmDMA0_QM_GLBL_ERR_WDATA 0x508CD8
+
+#define mmDMA0_QM_GLBL_MEM_INIT_BUSY 0x508D00
+
+#endif /* ASIC_REG_DMA0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
new file mode 100644
index 000000000000..4d8d8f26c5d4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA1_CORE_REGS_H_
+#define ASIC_REG_DMA1_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA1_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA1_CORE_CFG_0 0x520000
+
+#define mmDMA1_CORE_CFG_1 0x520004
+
+#define mmDMA1_CORE_LBW_MAX_OUTSTAND 0x520008
+
+#define mmDMA1_CORE_SRC_BASE_LO 0x520014
+
+#define mmDMA1_CORE_SRC_BASE_HI 0x520018
+
+#define mmDMA1_CORE_DST_BASE_LO 0x52001C
+
+#define mmDMA1_CORE_DST_BASE_HI 0x520020
+
+#define mmDMA1_CORE_SRC_TSIZE_1 0x52002C
+
+#define mmDMA1_CORE_SRC_STRIDE_1 0x520030
+
+#define mmDMA1_CORE_SRC_TSIZE_2 0x520034
+
+#define mmDMA1_CORE_SRC_STRIDE_2 0x520038
+
+#define mmDMA1_CORE_SRC_TSIZE_3 0x52003C
+
+#define mmDMA1_CORE_SRC_STRIDE_3 0x520040
+
+#define mmDMA1_CORE_SRC_TSIZE_4 0x520044
+
+#define mmDMA1_CORE_SRC_STRIDE_4 0x520048
+
+#define mmDMA1_CORE_SRC_TSIZE_0 0x52004C
+
+#define mmDMA1_CORE_DST_TSIZE_1 0x520054
+
+#define mmDMA1_CORE_DST_STRIDE_1 0x520058
+
+#define mmDMA1_CORE_DST_TSIZE_2 0x52005C
+
+#define mmDMA1_CORE_DST_STRIDE_2 0x520060
+
+#define mmDMA1_CORE_DST_TSIZE_3 0x520064
+
+#define mmDMA1_CORE_DST_STRIDE_3 0x520068
+
+#define mmDMA1_CORE_DST_TSIZE_4 0x52006C
+
+#define mmDMA1_CORE_DST_STRIDE_4 0x520070
+
+#define mmDMA1_CORE_DST_TSIZE_0 0x520074
+
+#define mmDMA1_CORE_COMMIT 0x520078
+
+#define mmDMA1_CORE_WR_COMP_WDATA 0x52007C
+
+#define mmDMA1_CORE_WR_COMP_ADDR_LO 0x520080
+
+#define mmDMA1_CORE_WR_COMP_ADDR_HI 0x520084
+
+#define mmDMA1_CORE_WR_COMP_AWUSER_31_11 0x520088
+
+#define mmDMA1_CORE_TE_NUMROWS 0x520094
+
+#define mmDMA1_CORE_PROT 0x5200B8
+
+#define mmDMA1_CORE_SECURE_PROPS 0x5200F0
+
+#define mmDMA1_CORE_NON_SECURE_PROPS 0x5200F4
+
+#define mmDMA1_CORE_RD_MAX_OUTSTAND 0x520100
+
+#define mmDMA1_CORE_RD_MAX_SIZE 0x520104
+
+#define mmDMA1_CORE_RD_ARCACHE 0x520108
+
+#define mmDMA1_CORE_RD_ARUSER_31_11 0x520110
+
+#define mmDMA1_CORE_RD_INFLIGHTS 0x520114
+
+#define mmDMA1_CORE_WR_MAX_OUTSTAND 0x520120
+
+#define mmDMA1_CORE_WR_MAX_AWID 0x520124
+
+#define mmDMA1_CORE_WR_AWCACHE 0x520128
+
+#define mmDMA1_CORE_WR_AWUSER_31_11 0x520130
+
+#define mmDMA1_CORE_WR_INFLIGHTS 0x520134
+
+#define mmDMA1_CORE_RD_RATE_LIM_CFG_0 0x520150
+
+#define mmDMA1_CORE_RD_RATE_LIM_CFG_1 0x520154
+
+#define mmDMA1_CORE_WR_RATE_LIM_CFG_0 0x520158
+
+#define mmDMA1_CORE_WR_RATE_LIM_CFG_1 0x52015C
+
+#define mmDMA1_CORE_ERR_CFG 0x520160
+
+#define mmDMA1_CORE_ERR_CAUSE 0x520164
+
+#define mmDMA1_CORE_ERRMSG_ADDR_LO 0x520170
+
+#define mmDMA1_CORE_ERRMSG_ADDR_HI 0x520174
+
+#define mmDMA1_CORE_ERRMSG_WDATA 0x520178
+
+#define mmDMA1_CORE_STS0 0x520190
+
+#define mmDMA1_CORE_STS1 0x520194
+
+#define mmDMA1_CORE_RD_DBGMEM_ADD 0x520200
+
+#define mmDMA1_CORE_RD_DBGMEM_DATA_WR 0x520204
+
+#define mmDMA1_CORE_RD_DBGMEM_DATA_RD 0x520208
+
+#define mmDMA1_CORE_RD_DBGMEM_CTRL 0x52020C
+
+#define mmDMA1_CORE_RD_DBGMEM_RC 0x520210
+
+#define mmDMA1_CORE_DBG_HBW_AXI_AR_CNT 0x520220
+
+#define mmDMA1_CORE_DBG_HBW_AXI_AW_CNT 0x520224
+
+#define mmDMA1_CORE_DBG_LBW_AXI_AW_CNT 0x520228
+
+#define mmDMA1_CORE_DBG_DESC_CNT 0x52022C
+
+#define mmDMA1_CORE_DBG_STS 0x520230
+
+#define mmDMA1_CORE_DBG_RD_DESC_ID 0x520234
+
+#define mmDMA1_CORE_DBG_WR_DESC_ID 0x520238
+
+#endif /* ASIC_REG_DMA1_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
new file mode 100644
index 000000000000..c3ef300849be
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA1_QM_REGS_H_
+#define ASIC_REG_DMA1_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA1_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA1_QM_GLBL_CFG0 0x528000
+
+#define mmDMA1_QM_GLBL_CFG1 0x528004
+
+#define mmDMA1_QM_GLBL_PROT 0x528008
+
+#define mmDMA1_QM_GLBL_ERR_CFG 0x52800C
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_0 0x528010
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_1 0x528014
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_2 0x528018
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_3 0x52801C
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_4 0x528020
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_0 0x528024
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_1 0x528028
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_2 0x52802C
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_3 0x528030
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_4 0x528034
+
+#define mmDMA1_QM_GLBL_STS0 0x528038
+
+#define mmDMA1_QM_GLBL_STS1_0 0x528040
+
+#define mmDMA1_QM_GLBL_STS1_1 0x528044
+
+#define mmDMA1_QM_GLBL_STS1_2 0x528048
+
+#define mmDMA1_QM_GLBL_STS1_3 0x52804C
+
+#define mmDMA1_QM_GLBL_STS1_4 0x528050
+
+#define mmDMA1_QM_GLBL_MSG_EN_0 0x528054
+
+#define mmDMA1_QM_GLBL_MSG_EN_1 0x528058
+
+#define mmDMA1_QM_GLBL_MSG_EN_2 0x52805C
+
+#define mmDMA1_QM_GLBL_MSG_EN_3 0x528060
+
+#define mmDMA1_QM_GLBL_MSG_EN_4 0x528068
+
+#define mmDMA1_QM_PQ_BASE_LO_0 0x528070
+
+#define mmDMA1_QM_PQ_BASE_LO_1 0x528074
+
+#define mmDMA1_QM_PQ_BASE_LO_2 0x528078
+
+#define mmDMA1_QM_PQ_BASE_LO_3 0x52807C
+
+#define mmDMA1_QM_PQ_BASE_HI_0 0x528080
+
+#define mmDMA1_QM_PQ_BASE_HI_1 0x528084
+
+#define mmDMA1_QM_PQ_BASE_HI_2 0x528088
+
+#define mmDMA1_QM_PQ_BASE_HI_3 0x52808C
+
+#define mmDMA1_QM_PQ_SIZE_0 0x528090
+
+#define mmDMA1_QM_PQ_SIZE_1 0x528094
+
+#define mmDMA1_QM_PQ_SIZE_2 0x528098
+
+#define mmDMA1_QM_PQ_SIZE_3 0x52809C
+
+#define mmDMA1_QM_PQ_PI_0 0x5280A0
+
+#define mmDMA1_QM_PQ_PI_1 0x5280A4
+
+#define mmDMA1_QM_PQ_PI_2 0x5280A8
+
+#define mmDMA1_QM_PQ_PI_3 0x5280AC
+
+#define mmDMA1_QM_PQ_CI_0 0x5280B0
+
+#define mmDMA1_QM_PQ_CI_1 0x5280B4
+
+#define mmDMA1_QM_PQ_CI_2 0x5280B8
+
+#define mmDMA1_QM_PQ_CI_3 0x5280BC
+
+#define mmDMA1_QM_PQ_CFG0_0 0x5280C0
+
+#define mmDMA1_QM_PQ_CFG0_1 0x5280C4
+
+#define mmDMA1_QM_PQ_CFG0_2 0x5280C8
+
+#define mmDMA1_QM_PQ_CFG0_3 0x5280CC
+
+#define mmDMA1_QM_PQ_CFG1_0 0x5280D0
+
+#define mmDMA1_QM_PQ_CFG1_1 0x5280D4
+
+#define mmDMA1_QM_PQ_CFG1_2 0x5280D8
+
+#define mmDMA1_QM_PQ_CFG1_3 0x5280DC
+
+#define mmDMA1_QM_PQ_ARUSER_31_11_0 0x5280E0
+
+#define mmDMA1_QM_PQ_ARUSER_31_11_1 0x5280E4
+
+#define mmDMA1_QM_PQ_ARUSER_31_11_2 0x5280E8
+
+#define mmDMA1_QM_PQ_ARUSER_31_11_3 0x5280EC
+
+#define mmDMA1_QM_PQ_STS0_0 0x5280F0
+
+#define mmDMA1_QM_PQ_STS0_1 0x5280F4
+
+#define mmDMA1_QM_PQ_STS0_2 0x5280F8
+
+#define mmDMA1_QM_PQ_STS0_3 0x5280FC
+
+#define mmDMA1_QM_PQ_STS1_0 0x528100
+
+#define mmDMA1_QM_PQ_STS1_1 0x528104
+
+#define mmDMA1_QM_PQ_STS1_2 0x528108
+
+#define mmDMA1_QM_PQ_STS1_3 0x52810C
+
+#define mmDMA1_QM_CQ_CFG0_0 0x528110
+
+#define mmDMA1_QM_CQ_CFG0_1 0x528114
+
+#define mmDMA1_QM_CQ_CFG0_2 0x528118
+
+#define mmDMA1_QM_CQ_CFG0_3 0x52811C
+
+#define mmDMA1_QM_CQ_CFG0_4 0x528120
+
+#define mmDMA1_QM_CQ_CFG1_0 0x528124
+
+#define mmDMA1_QM_CQ_CFG1_1 0x528128
+
+#define mmDMA1_QM_CQ_CFG1_2 0x52812C
+
+#define mmDMA1_QM_CQ_CFG1_3 0x528130
+
+#define mmDMA1_QM_CQ_CFG1_4 0x528134
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_0 0x528138
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_1 0x52813C
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_2 0x528140
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_3 0x528144
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_4 0x528148
+
+#define mmDMA1_QM_CQ_STS0_0 0x52814C
+
+#define mmDMA1_QM_CQ_STS0_1 0x528150
+
+#define mmDMA1_QM_CQ_STS0_2 0x528154
+
+#define mmDMA1_QM_CQ_STS0_3 0x528158
+
+#define mmDMA1_QM_CQ_STS0_4 0x52815C
+
+#define mmDMA1_QM_CQ_STS1_0 0x528160
+
+#define mmDMA1_QM_CQ_STS1_1 0x528164
+
+#define mmDMA1_QM_CQ_STS1_2 0x528168
+
+#define mmDMA1_QM_CQ_STS1_3 0x52816C
+
+#define mmDMA1_QM_CQ_STS1_4 0x528170
+
+#define mmDMA1_QM_CQ_PTR_LO_0 0x528174
+
+#define mmDMA1_QM_CQ_PTR_HI_0 0x528178
+
+#define mmDMA1_QM_CQ_TSIZE_0 0x52817C
+
+#define mmDMA1_QM_CQ_CTL_0 0x528180
+
+#define mmDMA1_QM_CQ_PTR_LO_1 0x528184
+
+#define mmDMA1_QM_CQ_PTR_HI_1 0x528188
+
+#define mmDMA1_QM_CQ_TSIZE_1 0x52818C
+
+#define mmDMA1_QM_CQ_CTL_1 0x528190
+
+#define mmDMA1_QM_CQ_PTR_LO_2 0x528194
+
+#define mmDMA1_QM_CQ_PTR_HI_2 0x528198
+
+#define mmDMA1_QM_CQ_TSIZE_2 0x52819C
+
+#define mmDMA1_QM_CQ_CTL_2 0x5281A0
+
+#define mmDMA1_QM_CQ_PTR_LO_3 0x5281A4
+
+#define mmDMA1_QM_CQ_PTR_HI_3 0x5281A8
+
+#define mmDMA1_QM_CQ_TSIZE_3 0x5281AC
+
+#define mmDMA1_QM_CQ_CTL_3 0x5281B0
+
+#define mmDMA1_QM_CQ_PTR_LO_4 0x5281B4
+
+#define mmDMA1_QM_CQ_PTR_HI_4 0x5281B8
+
+#define mmDMA1_QM_CQ_TSIZE_4 0x5281BC
+
+#define mmDMA1_QM_CQ_CTL_4 0x5281C0
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_0 0x5281C4
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_1 0x5281C8
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_2 0x5281CC
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_3 0x5281D0
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_4 0x5281D4
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_0 0x5281D8
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_1 0x5281DC
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_2 0x5281E0
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_3 0x5281E4
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_4 0x5281E8
+
+#define mmDMA1_QM_CQ_TSIZE_STS_0 0x5281EC
+
+#define mmDMA1_QM_CQ_TSIZE_STS_1 0x5281F0
+
+#define mmDMA1_QM_CQ_TSIZE_STS_2 0x5281F4
+
+#define mmDMA1_QM_CQ_TSIZE_STS_3 0x5281F8
+
+#define mmDMA1_QM_CQ_TSIZE_STS_4 0x5281FC
+
+#define mmDMA1_QM_CQ_CTL_STS_0 0x528200
+
+#define mmDMA1_QM_CQ_CTL_STS_1 0x528204
+
+#define mmDMA1_QM_CQ_CTL_STS_2 0x528208
+
+#define mmDMA1_QM_CQ_CTL_STS_3 0x52820C
+
+#define mmDMA1_QM_CQ_CTL_STS_4 0x528210
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_0 0x528214
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_1 0x528218
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_2 0x52821C
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_3 0x528220
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_4 0x528224
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_0 0x528228
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_1 0x52822C
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_2 0x528230
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_3 0x528234
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_4 0x528238
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_0 0x52823C
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_1 0x528240
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_2 0x528244
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_3 0x528248
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_4 0x52824C
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_0 0x528250
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_1 0x528254
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_2 0x528258
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_3 0x52825C
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_4 0x528260
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_0 0x528264
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_1 0x528268
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_2 0x52826C
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_3 0x528270
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_4 0x528274
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_0 0x528278
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_1 0x52827C
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 0x528280
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_3 0x528284
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_4 0x528288
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_0 0x52828C
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_1 0x528290
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_2 0x528294
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_3 0x528298
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_4 0x52829C
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_0 0x5282A0
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_1 0x5282A4
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_2 0x5282A8
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_3 0x5282AC
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_4 0x5282B0
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_0 0x5282B4
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_1 0x5282B8
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_2 0x5282BC
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_3 0x5282C0
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_4 0x5282C4
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_0 0x5282C8
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_1 0x5282CC
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_2 0x5282D0
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_3 0x5282D4
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_4 0x5282D8
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5282E0
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5282E4
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5282E8
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5282EC
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5282F0
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5282F4
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5282F8
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5282FC
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x528300
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x528304
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_0 0x528308
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_1 0x52830C
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_2 0x528310
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_3 0x528314
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_4 0x528318
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_0 0x52831C
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_1 0x528320
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_2 0x528324
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_3 0x528328
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_4 0x52832C
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_0 0x528330
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_1 0x528334
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_2 0x528338
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_3 0x52833C
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_4 0x528340
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_0 0x528344
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_1 0x528348
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_2 0x52834C
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_3 0x528350
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_4 0x528354
+
+#define mmDMA1_QM_CP_FENCE0_CNT_0 0x528358
+
+#define mmDMA1_QM_CP_FENCE0_CNT_1 0x52835C
+
+#define mmDMA1_QM_CP_FENCE0_CNT_2 0x528360
+
+#define mmDMA1_QM_CP_FENCE0_CNT_3 0x528364
+
+#define mmDMA1_QM_CP_FENCE0_CNT_4 0x528368
+
+#define mmDMA1_QM_CP_FENCE1_CNT_0 0x52836C
+
+#define mmDMA1_QM_CP_FENCE1_CNT_1 0x528370
+
+#define mmDMA1_QM_CP_FENCE1_CNT_2 0x528374
+
+#define mmDMA1_QM_CP_FENCE1_CNT_3 0x528378
+
+#define mmDMA1_QM_CP_FENCE1_CNT_4 0x52837C
+
+#define mmDMA1_QM_CP_FENCE2_CNT_0 0x528380
+
+#define mmDMA1_QM_CP_FENCE2_CNT_1 0x528384
+
+#define mmDMA1_QM_CP_FENCE2_CNT_2 0x528388
+
+#define mmDMA1_QM_CP_FENCE2_CNT_3 0x52838C
+
+#define mmDMA1_QM_CP_FENCE2_CNT_4 0x528390
+
+#define mmDMA1_QM_CP_FENCE3_CNT_0 0x528394
+
+#define mmDMA1_QM_CP_FENCE3_CNT_1 0x528398
+
+#define mmDMA1_QM_CP_FENCE3_CNT_2 0x52839C
+
+#define mmDMA1_QM_CP_FENCE3_CNT_3 0x5283A0
+
+#define mmDMA1_QM_CP_FENCE3_CNT_4 0x5283A4
+
+#define mmDMA1_QM_CP_STS_0 0x5283A8
+
+#define mmDMA1_QM_CP_STS_1 0x5283AC
+
+#define mmDMA1_QM_CP_STS_2 0x5283B0
+
+#define mmDMA1_QM_CP_STS_3 0x5283B4
+
+#define mmDMA1_QM_CP_STS_4 0x5283B8
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_0 0x5283BC
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_1 0x5283C0
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_2 0x5283C4
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_3 0x5283C8
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_4 0x5283CC
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_0 0x5283D0
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_1 0x5283D4
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_2 0x5283D8
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_3 0x5283DC
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_4 0x5283E0
+
+#define mmDMA1_QM_CP_BARRIER_CFG_0 0x5283F4
+
+#define mmDMA1_QM_CP_BARRIER_CFG_1 0x5283F8
+
+#define mmDMA1_QM_CP_BARRIER_CFG_2 0x5283FC
+
+#define mmDMA1_QM_CP_BARRIER_CFG_3 0x528400
+
+#define mmDMA1_QM_CP_BARRIER_CFG_4 0x528404
+
+#define mmDMA1_QM_CP_DBG_0_0 0x528408
+
+#define mmDMA1_QM_CP_DBG_0_1 0x52840C
+
+#define mmDMA1_QM_CP_DBG_0_2 0x528410
+
+#define mmDMA1_QM_CP_DBG_0_3 0x528414
+
+#define mmDMA1_QM_CP_DBG_0_4 0x528418
+
+#define mmDMA1_QM_CP_ARUSER_31_11_0 0x52841C
+
+#define mmDMA1_QM_CP_ARUSER_31_11_1 0x528420
+
+#define mmDMA1_QM_CP_ARUSER_31_11_2 0x528424
+
+#define mmDMA1_QM_CP_ARUSER_31_11_3 0x528428
+
+#define mmDMA1_QM_CP_ARUSER_31_11_4 0x52842C
+
+#define mmDMA1_QM_CP_AWUSER_31_11_0 0x528430
+
+#define mmDMA1_QM_CP_AWUSER_31_11_1 0x528434
+
+#define mmDMA1_QM_CP_AWUSER_31_11_2 0x528438
+
+#define mmDMA1_QM_CP_AWUSER_31_11_3 0x52843C
+
+#define mmDMA1_QM_CP_AWUSER_31_11_4 0x528440
+
+#define mmDMA1_QM_ARB_CFG_0 0x528A00
+
+#define mmDMA1_QM_ARB_CHOISE_Q_PUSH 0x528A04
+
+#define mmDMA1_QM_ARB_WRR_WEIGHT_0 0x528A08
+
+#define mmDMA1_QM_ARB_WRR_WEIGHT_1 0x528A0C
+
+#define mmDMA1_QM_ARB_WRR_WEIGHT_2 0x528A10
+
+#define mmDMA1_QM_ARB_WRR_WEIGHT_3 0x528A14
+
+#define mmDMA1_QM_ARB_CFG_1 0x528A18
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_0 0x528A20
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_1 0x528A24
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_2 0x528A28
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_3 0x528A2C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_4 0x528A30
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_5 0x528A34
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_6 0x528A38
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_7 0x528A3C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_8 0x528A40
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_9 0x528A44
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_10 0x528A48
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_11 0x528A4C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_12 0x528A50
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_13 0x528A54
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_14 0x528A58
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_15 0x528A5C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_16 0x528A60
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_17 0x528A64
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_18 0x528A68
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_19 0x528A6C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_20 0x528A70
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_21 0x528A74
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_22 0x528A78
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_23 0x528A7C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_24 0x528A80
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_25 0x528A84
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_26 0x528A88
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_27 0x528A8C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_28 0x528A90
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_29 0x528A94
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_30 0x528A98
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_31 0x528A9C
+
+#define mmDMA1_QM_ARB_MST_CRED_INC 0x528AA0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x528AA4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x528AA8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x528AAC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x528AB0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x528AB4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x528AB8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x528ABC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x528AC0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x528AC4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x528AC8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x528ACC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x528AD0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x528AD4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x528AD8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x528ADC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x528AE0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x528AE4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x528AE8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x528AEC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x528AF0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x528AF4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x528AF8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x528AFC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x528B00
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x528B04
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x528B08
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x528B0C
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x528B10
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x528B14
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x528B18
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x528B1C
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x528B20
+
+#define mmDMA1_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x528B28
+
+#define mmDMA1_QM_ARB_MST_SLAVE_EN 0x528B2C
+
+#define mmDMA1_QM_ARB_MST_QUIET_PER 0x528B34
+
+#define mmDMA1_QM_ARB_SLV_CHOISE_WDT 0x528B38
+
+#define mmDMA1_QM_ARB_SLV_ID 0x528B3C
+
+#define mmDMA1_QM_ARB_MSG_MAX_INFLIGHT 0x528B44
+
+#define mmDMA1_QM_ARB_MSG_AWUSER_31_11 0x528B48
+
+#define mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP 0x528B4C
+
+#define mmDMA1_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x528B50
+
+#define mmDMA1_QM_ARB_BASE_LO 0x528B54
+
+#define mmDMA1_QM_ARB_BASE_HI 0x528B58
+
+#define mmDMA1_QM_ARB_STATE_STS 0x528B80
+
+#define mmDMA1_QM_ARB_CHOISE_FULLNESS_STS 0x528B84
+
+#define mmDMA1_QM_ARB_MSG_STS 0x528B88
+
+#define mmDMA1_QM_ARB_SLV_CHOISE_Q_HEAD 0x528B8C
+
+#define mmDMA1_QM_ARB_ERR_CAUSE 0x528B9C
+
+#define mmDMA1_QM_ARB_ERR_MSG_EN 0x528BA0
+
+#define mmDMA1_QM_ARB_ERR_STS_DRP 0x528BA8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_0 0x528BB0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_1 0x528BB4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_2 0x528BB8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_3 0x528BBC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_4 0x528BC0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_5 0x528BC4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_6 0x528BC8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_7 0x528BCC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_8 0x528BD0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_9 0x528BD4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_10 0x528BD8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_11 0x528BDC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_12 0x528BE0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_13 0x528BE4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_14 0x528BE8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_15 0x528BEC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_16 0x528BF0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_17 0x528BF4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_18 0x528BF8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_19 0x528BFC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_20 0x528C00
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_21 0x528C04
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_22 0x528C08
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_23 0x528C0C
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_24 0x528C10
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_25 0x528C14
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_26 0x528C18
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_27 0x528C1C
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_28 0x528C20
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_29 0x528C24
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_30 0x528C28
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_31 0x528C2C
+
+#define mmDMA1_QM_CGM_CFG 0x528C70
+
+#define mmDMA1_QM_CGM_STS 0x528C74
+
+#define mmDMA1_QM_CGM_CFG1 0x528C78
+
+#define mmDMA1_QM_LOCAL_RANGE_BASE 0x528C80
+
+#define mmDMA1_QM_LOCAL_RANGE_SIZE 0x528C84
+
+#define mmDMA1_QM_CSMR_STRICT_PRIO_CFG 0x528C90
+
+#define mmDMA1_QM_HBW_RD_RATE_LIM_CFG_1 0x528C94
+
+#define mmDMA1_QM_LBW_WR_RATE_LIM_CFG_0 0x528C98
+
+#define mmDMA1_QM_LBW_WR_RATE_LIM_CFG_1 0x528C9C
+
+#define mmDMA1_QM_HBW_RD_RATE_LIM_CFG_0 0x528CA0
+
+#define mmDMA1_QM_GLBL_AXCACHE 0x528CA4
+
+#define mmDMA1_QM_IND_GW_APB_CFG 0x528CB0
+
+#define mmDMA1_QM_IND_GW_APB_WDATA 0x528CB4
+
+#define mmDMA1_QM_IND_GW_APB_RDATA 0x528CB8
+
+#define mmDMA1_QM_IND_GW_APB_STATUS 0x528CBC
+
+#define mmDMA1_QM_GLBL_ERR_ADDR_LO 0x528CD0
+
+#define mmDMA1_QM_GLBL_ERR_ADDR_HI 0x528CD4
+
+#define mmDMA1_QM_GLBL_ERR_WDATA 0x528CD8
+
+#define mmDMA1_QM_GLBL_MEM_INIT_BUSY 0x528D00
+
+#endif /* ASIC_REG_DMA1_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
new file mode 100644
index 000000000000..a42862cd5ae0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA2_CORE_REGS_H_
+#define ASIC_REG_DMA2_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA2_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA2_CORE_CFG_0 0x540000
+
+#define mmDMA2_CORE_CFG_1 0x540004
+
+#define mmDMA2_CORE_LBW_MAX_OUTSTAND 0x540008
+
+#define mmDMA2_CORE_SRC_BASE_LO 0x540014
+
+#define mmDMA2_CORE_SRC_BASE_HI 0x540018
+
+#define mmDMA2_CORE_DST_BASE_LO 0x54001C
+
+#define mmDMA2_CORE_DST_BASE_HI 0x540020
+
+#define mmDMA2_CORE_SRC_TSIZE_1 0x54002C
+
+#define mmDMA2_CORE_SRC_STRIDE_1 0x540030
+
+#define mmDMA2_CORE_SRC_TSIZE_2 0x540034
+
+#define mmDMA2_CORE_SRC_STRIDE_2 0x540038
+
+#define mmDMA2_CORE_SRC_TSIZE_3 0x54003C
+
+#define mmDMA2_CORE_SRC_STRIDE_3 0x540040
+
+#define mmDMA2_CORE_SRC_TSIZE_4 0x540044
+
+#define mmDMA2_CORE_SRC_STRIDE_4 0x540048
+
+#define mmDMA2_CORE_SRC_TSIZE_0 0x54004C
+
+#define mmDMA2_CORE_DST_TSIZE_1 0x540054
+
+#define mmDMA2_CORE_DST_STRIDE_1 0x540058
+
+#define mmDMA2_CORE_DST_TSIZE_2 0x54005C
+
+#define mmDMA2_CORE_DST_STRIDE_2 0x540060
+
+#define mmDMA2_CORE_DST_TSIZE_3 0x540064
+
+#define mmDMA2_CORE_DST_STRIDE_3 0x540068
+
+#define mmDMA2_CORE_DST_TSIZE_4 0x54006C
+
+#define mmDMA2_CORE_DST_STRIDE_4 0x540070
+
+#define mmDMA2_CORE_DST_TSIZE_0 0x540074
+
+#define mmDMA2_CORE_COMMIT 0x540078
+
+#define mmDMA2_CORE_WR_COMP_WDATA 0x54007C
+
+#define mmDMA2_CORE_WR_COMP_ADDR_LO 0x540080
+
+#define mmDMA2_CORE_WR_COMP_ADDR_HI 0x540084
+
+#define mmDMA2_CORE_WR_COMP_AWUSER_31_11 0x540088
+
+#define mmDMA2_CORE_TE_NUMROWS 0x540094
+
+#define mmDMA2_CORE_PROT 0x5400B8
+
+#define mmDMA2_CORE_SECURE_PROPS 0x5400F0
+
+#define mmDMA2_CORE_NON_SECURE_PROPS 0x5400F4
+
+#define mmDMA2_CORE_RD_MAX_OUTSTAND 0x540100
+
+#define mmDMA2_CORE_RD_MAX_SIZE 0x540104
+
+#define mmDMA2_CORE_RD_ARCACHE 0x540108
+
+#define mmDMA2_CORE_RD_ARUSER_31_11 0x540110
+
+#define mmDMA2_CORE_RD_INFLIGHTS 0x540114
+
+#define mmDMA2_CORE_WR_MAX_OUTSTAND 0x540120
+
+#define mmDMA2_CORE_WR_MAX_AWID 0x540124
+
+#define mmDMA2_CORE_WR_AWCACHE 0x540128
+
+#define mmDMA2_CORE_WR_AWUSER_31_11 0x540130
+
+#define mmDMA2_CORE_WR_INFLIGHTS 0x540134
+
+#define mmDMA2_CORE_RD_RATE_LIM_CFG_0 0x540150
+
+#define mmDMA2_CORE_RD_RATE_LIM_CFG_1 0x540154
+
+#define mmDMA2_CORE_WR_RATE_LIM_CFG_0 0x540158
+
+#define mmDMA2_CORE_WR_RATE_LIM_CFG_1 0x54015C
+
+#define mmDMA2_CORE_ERR_CFG 0x540160
+
+#define mmDMA2_CORE_ERR_CAUSE 0x540164
+
+#define mmDMA2_CORE_ERRMSG_ADDR_LO 0x540170
+
+#define mmDMA2_CORE_ERRMSG_ADDR_HI 0x540174
+
+#define mmDMA2_CORE_ERRMSG_WDATA 0x540178
+
+#define mmDMA2_CORE_STS0 0x540190
+
+#define mmDMA2_CORE_STS1 0x540194
+
+#define mmDMA2_CORE_RD_DBGMEM_ADD 0x540200
+
+#define mmDMA2_CORE_RD_DBGMEM_DATA_WR 0x540204
+
+#define mmDMA2_CORE_RD_DBGMEM_DATA_RD 0x540208
+
+#define mmDMA2_CORE_RD_DBGMEM_CTRL 0x54020C
+
+#define mmDMA2_CORE_RD_DBGMEM_RC 0x540210
+
+#define mmDMA2_CORE_DBG_HBW_AXI_AR_CNT 0x540220
+
+#define mmDMA2_CORE_DBG_HBW_AXI_AW_CNT 0x540224
+
+#define mmDMA2_CORE_DBG_LBW_AXI_AW_CNT 0x540228
+
+#define mmDMA2_CORE_DBG_DESC_CNT 0x54022C
+
+#define mmDMA2_CORE_DBG_STS 0x540230
+
+#define mmDMA2_CORE_DBG_RD_DESC_ID 0x540234
+
+#define mmDMA2_CORE_DBG_WR_DESC_ID 0x540238
+
+#endif /* ASIC_REG_DMA2_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
new file mode 100644
index 000000000000..8c4d4e016852
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA2_QM_REGS_H_
+#define ASIC_REG_DMA2_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA2_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA2_QM_GLBL_CFG0 0x548000
+
+#define mmDMA2_QM_GLBL_CFG1 0x548004
+
+#define mmDMA2_QM_GLBL_PROT 0x548008
+
+#define mmDMA2_QM_GLBL_ERR_CFG 0x54800C
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_0 0x548010
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_1 0x548014
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_2 0x548018
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_3 0x54801C
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_4 0x548020
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_0 0x548024
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_1 0x548028
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_2 0x54802C
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_3 0x548030
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_4 0x548034
+
+#define mmDMA2_QM_GLBL_STS0 0x548038
+
+#define mmDMA2_QM_GLBL_STS1_0 0x548040
+
+#define mmDMA2_QM_GLBL_STS1_1 0x548044
+
+#define mmDMA2_QM_GLBL_STS1_2 0x548048
+
+#define mmDMA2_QM_GLBL_STS1_3 0x54804C
+
+#define mmDMA2_QM_GLBL_STS1_4 0x548050
+
+#define mmDMA2_QM_GLBL_MSG_EN_0 0x548054
+
+#define mmDMA2_QM_GLBL_MSG_EN_1 0x548058
+
+#define mmDMA2_QM_GLBL_MSG_EN_2 0x54805C
+
+#define mmDMA2_QM_GLBL_MSG_EN_3 0x548060
+
+#define mmDMA2_QM_GLBL_MSG_EN_4 0x548068
+
+#define mmDMA2_QM_PQ_BASE_LO_0 0x548070
+
+#define mmDMA2_QM_PQ_BASE_LO_1 0x548074
+
+#define mmDMA2_QM_PQ_BASE_LO_2 0x548078
+
+#define mmDMA2_QM_PQ_BASE_LO_3 0x54807C
+
+#define mmDMA2_QM_PQ_BASE_HI_0 0x548080
+
+#define mmDMA2_QM_PQ_BASE_HI_1 0x548084
+
+#define mmDMA2_QM_PQ_BASE_HI_2 0x548088
+
+#define mmDMA2_QM_PQ_BASE_HI_3 0x54808C
+
+#define mmDMA2_QM_PQ_SIZE_0 0x548090
+
+#define mmDMA2_QM_PQ_SIZE_1 0x548094
+
+#define mmDMA2_QM_PQ_SIZE_2 0x548098
+
+#define mmDMA2_QM_PQ_SIZE_3 0x54809C
+
+#define mmDMA2_QM_PQ_PI_0 0x5480A0
+
+#define mmDMA2_QM_PQ_PI_1 0x5480A4
+
+#define mmDMA2_QM_PQ_PI_2 0x5480A8
+
+#define mmDMA2_QM_PQ_PI_3 0x5480AC
+
+#define mmDMA2_QM_PQ_CI_0 0x5480B0
+
+#define mmDMA2_QM_PQ_CI_1 0x5480B4
+
+#define mmDMA2_QM_PQ_CI_2 0x5480B8
+
+#define mmDMA2_QM_PQ_CI_3 0x5480BC
+
+#define mmDMA2_QM_PQ_CFG0_0 0x5480C0
+
+#define mmDMA2_QM_PQ_CFG0_1 0x5480C4
+
+#define mmDMA2_QM_PQ_CFG0_2 0x5480C8
+
+#define mmDMA2_QM_PQ_CFG0_3 0x5480CC
+
+#define mmDMA2_QM_PQ_CFG1_0 0x5480D0
+
+#define mmDMA2_QM_PQ_CFG1_1 0x5480D4
+
+#define mmDMA2_QM_PQ_CFG1_2 0x5480D8
+
+#define mmDMA2_QM_PQ_CFG1_3 0x5480DC
+
+#define mmDMA2_QM_PQ_ARUSER_31_11_0 0x5480E0
+
+#define mmDMA2_QM_PQ_ARUSER_31_11_1 0x5480E4
+
+#define mmDMA2_QM_PQ_ARUSER_31_11_2 0x5480E8
+
+#define mmDMA2_QM_PQ_ARUSER_31_11_3 0x5480EC
+
+#define mmDMA2_QM_PQ_STS0_0 0x5480F0
+
+#define mmDMA2_QM_PQ_STS0_1 0x5480F4
+
+#define mmDMA2_QM_PQ_STS0_2 0x5480F8
+
+#define mmDMA2_QM_PQ_STS0_3 0x5480FC
+
+#define mmDMA2_QM_PQ_STS1_0 0x548100
+
+#define mmDMA2_QM_PQ_STS1_1 0x548104
+
+#define mmDMA2_QM_PQ_STS1_2 0x548108
+
+#define mmDMA2_QM_PQ_STS1_3 0x54810C
+
+#define mmDMA2_QM_CQ_CFG0_0 0x548110
+
+#define mmDMA2_QM_CQ_CFG0_1 0x548114
+
+#define mmDMA2_QM_CQ_CFG0_2 0x548118
+
+#define mmDMA2_QM_CQ_CFG0_3 0x54811C
+
+#define mmDMA2_QM_CQ_CFG0_4 0x548120
+
+#define mmDMA2_QM_CQ_CFG1_0 0x548124
+
+#define mmDMA2_QM_CQ_CFG1_1 0x548128
+
+#define mmDMA2_QM_CQ_CFG1_2 0x54812C
+
+#define mmDMA2_QM_CQ_CFG1_3 0x548130
+
+#define mmDMA2_QM_CQ_CFG1_4 0x548134
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_0 0x548138
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_1 0x54813C
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_2 0x548140
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_3 0x548144
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_4 0x548148
+
+#define mmDMA2_QM_CQ_STS0_0 0x54814C
+
+#define mmDMA2_QM_CQ_STS0_1 0x548150
+
+#define mmDMA2_QM_CQ_STS0_2 0x548154
+
+#define mmDMA2_QM_CQ_STS0_3 0x548158
+
+#define mmDMA2_QM_CQ_STS0_4 0x54815C
+
+#define mmDMA2_QM_CQ_STS1_0 0x548160
+
+#define mmDMA2_QM_CQ_STS1_1 0x548164
+
+#define mmDMA2_QM_CQ_STS1_2 0x548168
+
+#define mmDMA2_QM_CQ_STS1_3 0x54816C
+
+#define mmDMA2_QM_CQ_STS1_4 0x548170
+
+#define mmDMA2_QM_CQ_PTR_LO_0 0x548174
+
+#define mmDMA2_QM_CQ_PTR_HI_0 0x548178
+
+#define mmDMA2_QM_CQ_TSIZE_0 0x54817C
+
+#define mmDMA2_QM_CQ_CTL_0 0x548180
+
+#define mmDMA2_QM_CQ_PTR_LO_1 0x548184
+
+#define mmDMA2_QM_CQ_PTR_HI_1 0x548188
+
+#define mmDMA2_QM_CQ_TSIZE_1 0x54818C
+
+#define mmDMA2_QM_CQ_CTL_1 0x548190
+
+#define mmDMA2_QM_CQ_PTR_LO_2 0x548194
+
+#define mmDMA2_QM_CQ_PTR_HI_2 0x548198
+
+#define mmDMA2_QM_CQ_TSIZE_2 0x54819C
+
+#define mmDMA2_QM_CQ_CTL_2 0x5481A0
+
+#define mmDMA2_QM_CQ_PTR_LO_3 0x5481A4
+
+#define mmDMA2_QM_CQ_PTR_HI_3 0x5481A8
+
+#define mmDMA2_QM_CQ_TSIZE_3 0x5481AC
+
+#define mmDMA2_QM_CQ_CTL_3 0x5481B0
+
+#define mmDMA2_QM_CQ_PTR_LO_4 0x5481B4
+
+#define mmDMA2_QM_CQ_PTR_HI_4 0x5481B8
+
+#define mmDMA2_QM_CQ_TSIZE_4 0x5481BC
+
+#define mmDMA2_QM_CQ_CTL_4 0x5481C0
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_0 0x5481C4
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_1 0x5481C8
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_2 0x5481CC
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_3 0x5481D0
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_4 0x5481D4
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_0 0x5481D8
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_1 0x5481DC
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_2 0x5481E0
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_3 0x5481E4
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_4 0x5481E8
+
+#define mmDMA2_QM_CQ_TSIZE_STS_0 0x5481EC
+
+#define mmDMA2_QM_CQ_TSIZE_STS_1 0x5481F0
+
+#define mmDMA2_QM_CQ_TSIZE_STS_2 0x5481F4
+
+#define mmDMA2_QM_CQ_TSIZE_STS_3 0x5481F8
+
+#define mmDMA2_QM_CQ_TSIZE_STS_4 0x5481FC
+
+#define mmDMA2_QM_CQ_CTL_STS_0 0x548200
+
+#define mmDMA2_QM_CQ_CTL_STS_1 0x548204
+
+#define mmDMA2_QM_CQ_CTL_STS_2 0x548208
+
+#define mmDMA2_QM_CQ_CTL_STS_3 0x54820C
+
+#define mmDMA2_QM_CQ_CTL_STS_4 0x548210
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_0 0x548214
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_1 0x548218
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_2 0x54821C
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_3 0x548220
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_4 0x548224
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_0 0x548228
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_1 0x54822C
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_2 0x548230
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_3 0x548234
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_4 0x548238
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_0 0x54823C
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_1 0x548240
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_2 0x548244
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_3 0x548248
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_4 0x54824C
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_0 0x548250
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_1 0x548254
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_2 0x548258
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_3 0x54825C
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_4 0x548260
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_0 0x548264
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_1 0x548268
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_2 0x54826C
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_3 0x548270
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_4 0x548274
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_0 0x548278
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_1 0x54827C
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 0x548280
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_3 0x548284
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_4 0x548288
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_0 0x54828C
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_1 0x548290
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_2 0x548294
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_3 0x548298
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_4 0x54829C
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_0 0x5482A0
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_1 0x5482A4
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_2 0x5482A8
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_3 0x5482AC
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_4 0x5482B0
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_0 0x5482B4
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_1 0x5482B8
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_2 0x5482BC
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_3 0x5482C0
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_4 0x5482C4
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_0 0x5482C8
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_1 0x5482CC
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_2 0x5482D0
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_3 0x5482D4
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_4 0x5482D8
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5482E0
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5482E4
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5482E8
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5482EC
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5482F0
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5482F4
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5482F8
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5482FC
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x548300
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x548304
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_0 0x548308
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_1 0x54830C
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_2 0x548310
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_3 0x548314
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_4 0x548318
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_0 0x54831C
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_1 0x548320
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_2 0x548324
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_3 0x548328
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_4 0x54832C
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_0 0x548330
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_1 0x548334
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_2 0x548338
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_3 0x54833C
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_4 0x548340
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_0 0x548344
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_1 0x548348
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_2 0x54834C
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_3 0x548350
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_4 0x548354
+
+#define mmDMA2_QM_CP_FENCE0_CNT_0 0x548358
+
+#define mmDMA2_QM_CP_FENCE0_CNT_1 0x54835C
+
+#define mmDMA2_QM_CP_FENCE0_CNT_2 0x548360
+
+#define mmDMA2_QM_CP_FENCE0_CNT_3 0x548364
+
+#define mmDMA2_QM_CP_FENCE0_CNT_4 0x548368
+
+#define mmDMA2_QM_CP_FENCE1_CNT_0 0x54836C
+
+#define mmDMA2_QM_CP_FENCE1_CNT_1 0x548370
+
+#define mmDMA2_QM_CP_FENCE1_CNT_2 0x548374
+
+#define mmDMA2_QM_CP_FENCE1_CNT_3 0x548378
+
+#define mmDMA2_QM_CP_FENCE1_CNT_4 0x54837C
+
+#define mmDMA2_QM_CP_FENCE2_CNT_0 0x548380
+
+#define mmDMA2_QM_CP_FENCE2_CNT_1 0x548384
+
+#define mmDMA2_QM_CP_FENCE2_CNT_2 0x548388
+
+#define mmDMA2_QM_CP_FENCE2_CNT_3 0x54838C
+
+#define mmDMA2_QM_CP_FENCE2_CNT_4 0x548390
+
+#define mmDMA2_QM_CP_FENCE3_CNT_0 0x548394
+
+#define mmDMA2_QM_CP_FENCE3_CNT_1 0x548398
+
+#define mmDMA2_QM_CP_FENCE3_CNT_2 0x54839C
+
+#define mmDMA2_QM_CP_FENCE3_CNT_3 0x5483A0
+
+#define mmDMA2_QM_CP_FENCE3_CNT_4 0x5483A4
+
+#define mmDMA2_QM_CP_STS_0 0x5483A8
+
+#define mmDMA2_QM_CP_STS_1 0x5483AC
+
+#define mmDMA2_QM_CP_STS_2 0x5483B0
+
+#define mmDMA2_QM_CP_STS_3 0x5483B4
+
+#define mmDMA2_QM_CP_STS_4 0x5483B8
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_0 0x5483BC
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_1 0x5483C0
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_2 0x5483C4
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_3 0x5483C8
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_4 0x5483CC
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_0 0x5483D0
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_1 0x5483D4
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_2 0x5483D8
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_3 0x5483DC
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_4 0x5483E0
+
+#define mmDMA2_QM_CP_BARRIER_CFG_0 0x5483F4
+
+#define mmDMA2_QM_CP_BARRIER_CFG_1 0x5483F8
+
+#define mmDMA2_QM_CP_BARRIER_CFG_2 0x5483FC
+
+#define mmDMA2_QM_CP_BARRIER_CFG_3 0x548400
+
+#define mmDMA2_QM_CP_BARRIER_CFG_4 0x548404
+
+#define mmDMA2_QM_CP_DBG_0_0 0x548408
+
+#define mmDMA2_QM_CP_DBG_0_1 0x54840C
+
+#define mmDMA2_QM_CP_DBG_0_2 0x548410
+
+#define mmDMA2_QM_CP_DBG_0_3 0x548414
+
+#define mmDMA2_QM_CP_DBG_0_4 0x548418
+
+#define mmDMA2_QM_CP_ARUSER_31_11_0 0x54841C
+
+#define mmDMA2_QM_CP_ARUSER_31_11_1 0x548420
+
+#define mmDMA2_QM_CP_ARUSER_31_11_2 0x548424
+
+#define mmDMA2_QM_CP_ARUSER_31_11_3 0x548428
+
+#define mmDMA2_QM_CP_ARUSER_31_11_4 0x54842C
+
+#define mmDMA2_QM_CP_AWUSER_31_11_0 0x548430
+
+#define mmDMA2_QM_CP_AWUSER_31_11_1 0x548434
+
+#define mmDMA2_QM_CP_AWUSER_31_11_2 0x548438
+
+#define mmDMA2_QM_CP_AWUSER_31_11_3 0x54843C
+
+#define mmDMA2_QM_CP_AWUSER_31_11_4 0x548440
+
+#define mmDMA2_QM_ARB_CFG_0 0x548A00
+
+#define mmDMA2_QM_ARB_CHOISE_Q_PUSH 0x548A04
+
+#define mmDMA2_QM_ARB_WRR_WEIGHT_0 0x548A08
+
+#define mmDMA2_QM_ARB_WRR_WEIGHT_1 0x548A0C
+
+#define mmDMA2_QM_ARB_WRR_WEIGHT_2 0x548A10
+
+#define mmDMA2_QM_ARB_WRR_WEIGHT_3 0x548A14
+
+#define mmDMA2_QM_ARB_CFG_1 0x548A18
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_0 0x548A20
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_1 0x548A24
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_2 0x548A28
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_3 0x548A2C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_4 0x548A30
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_5 0x548A34
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_6 0x548A38
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_7 0x548A3C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_8 0x548A40
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_9 0x548A44
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_10 0x548A48
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_11 0x548A4C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_12 0x548A50
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_13 0x548A54
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_14 0x548A58
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_15 0x548A5C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_16 0x548A60
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_17 0x548A64
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_18 0x548A68
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_19 0x548A6C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_20 0x548A70
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_21 0x548A74
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_22 0x548A78
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_23 0x548A7C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_24 0x548A80
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_25 0x548A84
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_26 0x548A88
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_27 0x548A8C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_28 0x548A90
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_29 0x548A94
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_30 0x548A98
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_31 0x548A9C
+
+#define mmDMA2_QM_ARB_MST_CRED_INC 0x548AA0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x548AA4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x548AA8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x548AAC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x548AB0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x548AB4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x548AB8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x548ABC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x548AC0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x548AC4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x548AC8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x548ACC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x548AD0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x548AD4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x548AD8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x548ADC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x548AE0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x548AE4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x548AE8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x548AEC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x548AF0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x548AF4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x548AF8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x548AFC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x548B00
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x548B04
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x548B08
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x548B0C
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x548B10
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x548B14
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x548B18
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x548B1C
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x548B20
+
+#define mmDMA2_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x548B28
+
+#define mmDMA2_QM_ARB_MST_SLAVE_EN 0x548B2C
+
+#define mmDMA2_QM_ARB_MST_QUIET_PER 0x548B34
+
+#define mmDMA2_QM_ARB_SLV_CHOISE_WDT 0x548B38
+
+#define mmDMA2_QM_ARB_SLV_ID 0x548B3C
+
+#define mmDMA2_QM_ARB_MSG_MAX_INFLIGHT 0x548B44
+
+#define mmDMA2_QM_ARB_MSG_AWUSER_31_11 0x548B48
+
+#define mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP 0x548B4C
+
+#define mmDMA2_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x548B50
+
+#define mmDMA2_QM_ARB_BASE_LO 0x548B54
+
+#define mmDMA2_QM_ARB_BASE_HI 0x548B58
+
+#define mmDMA2_QM_ARB_STATE_STS 0x548B80
+
+#define mmDMA2_QM_ARB_CHOISE_FULLNESS_STS 0x548B84
+
+#define mmDMA2_QM_ARB_MSG_STS 0x548B88
+
+#define mmDMA2_QM_ARB_SLV_CHOISE_Q_HEAD 0x548B8C
+
+#define mmDMA2_QM_ARB_ERR_CAUSE 0x548B9C
+
+#define mmDMA2_QM_ARB_ERR_MSG_EN 0x548BA0
+
+#define mmDMA2_QM_ARB_ERR_STS_DRP 0x548BA8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_0 0x548BB0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_1 0x548BB4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_2 0x548BB8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_3 0x548BBC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_4 0x548BC0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_5 0x548BC4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_6 0x548BC8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_7 0x548BCC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_8 0x548BD0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_9 0x548BD4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_10 0x548BD8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_11 0x548BDC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_12 0x548BE0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_13 0x548BE4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_14 0x548BE8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_15 0x548BEC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_16 0x548BF0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_17 0x548BF4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_18 0x548BF8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_19 0x548BFC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_20 0x548C00
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_21 0x548C04
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_22 0x548C08
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_23 0x548C0C
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_24 0x548C10
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_25 0x548C14
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_26 0x548C18
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_27 0x548C1C
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_28 0x548C20
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_29 0x548C24
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_30 0x548C28
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_31 0x548C2C
+
+#define mmDMA2_QM_CGM_CFG 0x548C70
+
+#define mmDMA2_QM_CGM_STS 0x548C74
+
+#define mmDMA2_QM_CGM_CFG1 0x548C78
+
+#define mmDMA2_QM_LOCAL_RANGE_BASE 0x548C80
+
+#define mmDMA2_QM_LOCAL_RANGE_SIZE 0x548C84
+
+#define mmDMA2_QM_CSMR_STRICT_PRIO_CFG 0x548C90
+
+#define mmDMA2_QM_HBW_RD_RATE_LIM_CFG_1 0x548C94
+
+#define mmDMA2_QM_LBW_WR_RATE_LIM_CFG_0 0x548C98
+
+#define mmDMA2_QM_LBW_WR_RATE_LIM_CFG_1 0x548C9C
+
+#define mmDMA2_QM_HBW_RD_RATE_LIM_CFG_0 0x548CA0
+
+#define mmDMA2_QM_GLBL_AXCACHE 0x548CA4
+
+#define mmDMA2_QM_IND_GW_APB_CFG 0x548CB0
+
+#define mmDMA2_QM_IND_GW_APB_WDATA 0x548CB4
+
+#define mmDMA2_QM_IND_GW_APB_RDATA 0x548CB8
+
+#define mmDMA2_QM_IND_GW_APB_STATUS 0x548CBC
+
+#define mmDMA2_QM_GLBL_ERR_ADDR_LO 0x548CD0
+
+#define mmDMA2_QM_GLBL_ERR_ADDR_HI 0x548CD4
+
+#define mmDMA2_QM_GLBL_ERR_WDATA 0x548CD8
+
+#define mmDMA2_QM_GLBL_MEM_INIT_BUSY 0x548D00
+
+#endif /* ASIC_REG_DMA2_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
new file mode 100644
index 000000000000..fb145f416fe6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA3_CORE_REGS_H_
+#define ASIC_REG_DMA3_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA3_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA3_CORE_CFG_0 0x560000
+
+#define mmDMA3_CORE_CFG_1 0x560004
+
+#define mmDMA3_CORE_LBW_MAX_OUTSTAND 0x560008
+
+#define mmDMA3_CORE_SRC_BASE_LO 0x560014
+
+#define mmDMA3_CORE_SRC_BASE_HI 0x560018
+
+#define mmDMA3_CORE_DST_BASE_LO 0x56001C
+
+#define mmDMA3_CORE_DST_BASE_HI 0x560020
+
+#define mmDMA3_CORE_SRC_TSIZE_1 0x56002C
+
+#define mmDMA3_CORE_SRC_STRIDE_1 0x560030
+
+#define mmDMA3_CORE_SRC_TSIZE_2 0x560034
+
+#define mmDMA3_CORE_SRC_STRIDE_2 0x560038
+
+#define mmDMA3_CORE_SRC_TSIZE_3 0x56003C
+
+#define mmDMA3_CORE_SRC_STRIDE_3 0x560040
+
+#define mmDMA3_CORE_SRC_TSIZE_4 0x560044
+
+#define mmDMA3_CORE_SRC_STRIDE_4 0x560048
+
+#define mmDMA3_CORE_SRC_TSIZE_0 0x56004C
+
+#define mmDMA3_CORE_DST_TSIZE_1 0x560054
+
+#define mmDMA3_CORE_DST_STRIDE_1 0x560058
+
+#define mmDMA3_CORE_DST_TSIZE_2 0x56005C
+
+#define mmDMA3_CORE_DST_STRIDE_2 0x560060
+
+#define mmDMA3_CORE_DST_TSIZE_3 0x560064
+
+#define mmDMA3_CORE_DST_STRIDE_3 0x560068
+
+#define mmDMA3_CORE_DST_TSIZE_4 0x56006C
+
+#define mmDMA3_CORE_DST_STRIDE_4 0x560070
+
+#define mmDMA3_CORE_DST_TSIZE_0 0x560074
+
+#define mmDMA3_CORE_COMMIT 0x560078
+
+#define mmDMA3_CORE_WR_COMP_WDATA 0x56007C
+
+#define mmDMA3_CORE_WR_COMP_ADDR_LO 0x560080
+
+#define mmDMA3_CORE_WR_COMP_ADDR_HI 0x560084
+
+#define mmDMA3_CORE_WR_COMP_AWUSER_31_11 0x560088
+
+#define mmDMA3_CORE_TE_NUMROWS 0x560094
+
+#define mmDMA3_CORE_PROT 0x5600B8
+
+#define mmDMA3_CORE_SECURE_PROPS 0x5600F0
+
+#define mmDMA3_CORE_NON_SECURE_PROPS 0x5600F4
+
+#define mmDMA3_CORE_RD_MAX_OUTSTAND 0x560100
+
+#define mmDMA3_CORE_RD_MAX_SIZE 0x560104
+
+#define mmDMA3_CORE_RD_ARCACHE 0x560108
+
+#define mmDMA3_CORE_RD_ARUSER_31_11 0x560110
+
+#define mmDMA3_CORE_RD_INFLIGHTS 0x560114
+
+#define mmDMA3_CORE_WR_MAX_OUTSTAND 0x560120
+
+#define mmDMA3_CORE_WR_MAX_AWID 0x560124
+
+#define mmDMA3_CORE_WR_AWCACHE 0x560128
+
+#define mmDMA3_CORE_WR_AWUSER_31_11 0x560130
+
+#define mmDMA3_CORE_WR_INFLIGHTS 0x560134
+
+#define mmDMA3_CORE_RD_RATE_LIM_CFG_0 0x560150
+
+#define mmDMA3_CORE_RD_RATE_LIM_CFG_1 0x560154
+
+#define mmDMA3_CORE_WR_RATE_LIM_CFG_0 0x560158
+
+#define mmDMA3_CORE_WR_RATE_LIM_CFG_1 0x56015C
+
+#define mmDMA3_CORE_ERR_CFG 0x560160
+
+#define mmDMA3_CORE_ERR_CAUSE 0x560164
+
+#define mmDMA3_CORE_ERRMSG_ADDR_LO 0x560170
+
+#define mmDMA3_CORE_ERRMSG_ADDR_HI 0x560174
+
+#define mmDMA3_CORE_ERRMSG_WDATA 0x560178
+
+#define mmDMA3_CORE_STS0 0x560190
+
+#define mmDMA3_CORE_STS1 0x560194
+
+#define mmDMA3_CORE_RD_DBGMEM_ADD 0x560200
+
+#define mmDMA3_CORE_RD_DBGMEM_DATA_WR 0x560204
+
+#define mmDMA3_CORE_RD_DBGMEM_DATA_RD 0x560208
+
+#define mmDMA3_CORE_RD_DBGMEM_CTRL 0x56020C
+
+#define mmDMA3_CORE_RD_DBGMEM_RC 0x560210
+
+#define mmDMA3_CORE_DBG_HBW_AXI_AR_CNT 0x560220
+
+#define mmDMA3_CORE_DBG_HBW_AXI_AW_CNT 0x560224
+
+#define mmDMA3_CORE_DBG_LBW_AXI_AW_CNT 0x560228
+
+#define mmDMA3_CORE_DBG_DESC_CNT 0x56022C
+
+#define mmDMA3_CORE_DBG_STS 0x560230
+
+#define mmDMA3_CORE_DBG_RD_DESC_ID 0x560234
+
+#define mmDMA3_CORE_DBG_WR_DESC_ID 0x560238
+
+#endif /* ASIC_REG_DMA3_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
new file mode 100644
index 000000000000..a4b461ca3d94
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA3_QM_REGS_H_
+#define ASIC_REG_DMA3_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA3_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA3_QM_GLBL_CFG0 0x568000
+
+#define mmDMA3_QM_GLBL_CFG1 0x568004
+
+#define mmDMA3_QM_GLBL_PROT 0x568008
+
+#define mmDMA3_QM_GLBL_ERR_CFG 0x56800C
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_0 0x568010
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_1 0x568014
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_2 0x568018
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_3 0x56801C
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_4 0x568020
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_0 0x568024
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_1 0x568028
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_2 0x56802C
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_3 0x568030
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_4 0x568034
+
+#define mmDMA3_QM_GLBL_STS0 0x568038
+
+#define mmDMA3_QM_GLBL_STS1_0 0x568040
+
+#define mmDMA3_QM_GLBL_STS1_1 0x568044
+
+#define mmDMA3_QM_GLBL_STS1_2 0x568048
+
+#define mmDMA3_QM_GLBL_STS1_3 0x56804C
+
+#define mmDMA3_QM_GLBL_STS1_4 0x568050
+
+#define mmDMA3_QM_GLBL_MSG_EN_0 0x568054
+
+#define mmDMA3_QM_GLBL_MSG_EN_1 0x568058
+
+#define mmDMA3_QM_GLBL_MSG_EN_2 0x56805C
+
+#define mmDMA3_QM_GLBL_MSG_EN_3 0x568060
+
+#define mmDMA3_QM_GLBL_MSG_EN_4 0x568068
+
+#define mmDMA3_QM_PQ_BASE_LO_0 0x568070
+
+#define mmDMA3_QM_PQ_BASE_LO_1 0x568074
+
+#define mmDMA3_QM_PQ_BASE_LO_2 0x568078
+
+#define mmDMA3_QM_PQ_BASE_LO_3 0x56807C
+
+#define mmDMA3_QM_PQ_BASE_HI_0 0x568080
+
+#define mmDMA3_QM_PQ_BASE_HI_1 0x568084
+
+#define mmDMA3_QM_PQ_BASE_HI_2 0x568088
+
+#define mmDMA3_QM_PQ_BASE_HI_3 0x56808C
+
+#define mmDMA3_QM_PQ_SIZE_0 0x568090
+
+#define mmDMA3_QM_PQ_SIZE_1 0x568094
+
+#define mmDMA3_QM_PQ_SIZE_2 0x568098
+
+#define mmDMA3_QM_PQ_SIZE_3 0x56809C
+
+#define mmDMA3_QM_PQ_PI_0 0x5680A0
+
+#define mmDMA3_QM_PQ_PI_1 0x5680A4
+
+#define mmDMA3_QM_PQ_PI_2 0x5680A8
+
+#define mmDMA3_QM_PQ_PI_3 0x5680AC
+
+#define mmDMA3_QM_PQ_CI_0 0x5680B0
+
+#define mmDMA3_QM_PQ_CI_1 0x5680B4
+
+#define mmDMA3_QM_PQ_CI_2 0x5680B8
+
+#define mmDMA3_QM_PQ_CI_3 0x5680BC
+
+#define mmDMA3_QM_PQ_CFG0_0 0x5680C0
+
+#define mmDMA3_QM_PQ_CFG0_1 0x5680C4
+
+#define mmDMA3_QM_PQ_CFG0_2 0x5680C8
+
+#define mmDMA3_QM_PQ_CFG0_3 0x5680CC
+
+#define mmDMA3_QM_PQ_CFG1_0 0x5680D0
+
+#define mmDMA3_QM_PQ_CFG1_1 0x5680D4
+
+#define mmDMA3_QM_PQ_CFG1_2 0x5680D8
+
+#define mmDMA3_QM_PQ_CFG1_3 0x5680DC
+
+#define mmDMA3_QM_PQ_ARUSER_31_11_0 0x5680E0
+
+#define mmDMA3_QM_PQ_ARUSER_31_11_1 0x5680E4
+
+#define mmDMA3_QM_PQ_ARUSER_31_11_2 0x5680E8
+
+#define mmDMA3_QM_PQ_ARUSER_31_11_3 0x5680EC
+
+#define mmDMA3_QM_PQ_STS0_0 0x5680F0
+
+#define mmDMA3_QM_PQ_STS0_1 0x5680F4
+
+#define mmDMA3_QM_PQ_STS0_2 0x5680F8
+
+#define mmDMA3_QM_PQ_STS0_3 0x5680FC
+
+#define mmDMA3_QM_PQ_STS1_0 0x568100
+
+#define mmDMA3_QM_PQ_STS1_1 0x568104
+
+#define mmDMA3_QM_PQ_STS1_2 0x568108
+
+#define mmDMA3_QM_PQ_STS1_3 0x56810C
+
+#define mmDMA3_QM_CQ_CFG0_0 0x568110
+
+#define mmDMA3_QM_CQ_CFG0_1 0x568114
+
+#define mmDMA3_QM_CQ_CFG0_2 0x568118
+
+#define mmDMA3_QM_CQ_CFG0_3 0x56811C
+
+#define mmDMA3_QM_CQ_CFG0_4 0x568120
+
+#define mmDMA3_QM_CQ_CFG1_0 0x568124
+
+#define mmDMA3_QM_CQ_CFG1_1 0x568128
+
+#define mmDMA3_QM_CQ_CFG1_2 0x56812C
+
+#define mmDMA3_QM_CQ_CFG1_3 0x568130
+
+#define mmDMA3_QM_CQ_CFG1_4 0x568134
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_0 0x568138
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_1 0x56813C
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_2 0x568140
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_3 0x568144
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_4 0x568148
+
+#define mmDMA3_QM_CQ_STS0_0 0x56814C
+
+#define mmDMA3_QM_CQ_STS0_1 0x568150
+
+#define mmDMA3_QM_CQ_STS0_2 0x568154
+
+#define mmDMA3_QM_CQ_STS0_3 0x568158
+
+#define mmDMA3_QM_CQ_STS0_4 0x56815C
+
+#define mmDMA3_QM_CQ_STS1_0 0x568160
+
+#define mmDMA3_QM_CQ_STS1_1 0x568164
+
+#define mmDMA3_QM_CQ_STS1_2 0x568168
+
+#define mmDMA3_QM_CQ_STS1_3 0x56816C
+
+#define mmDMA3_QM_CQ_STS1_4 0x568170
+
+#define mmDMA3_QM_CQ_PTR_LO_0 0x568174
+
+#define mmDMA3_QM_CQ_PTR_HI_0 0x568178
+
+#define mmDMA3_QM_CQ_TSIZE_0 0x56817C
+
+#define mmDMA3_QM_CQ_CTL_0 0x568180
+
+#define mmDMA3_QM_CQ_PTR_LO_1 0x568184
+
+#define mmDMA3_QM_CQ_PTR_HI_1 0x568188
+
+#define mmDMA3_QM_CQ_TSIZE_1 0x56818C
+
+#define mmDMA3_QM_CQ_CTL_1 0x568190
+
+#define mmDMA3_QM_CQ_PTR_LO_2 0x568194
+
+#define mmDMA3_QM_CQ_PTR_HI_2 0x568198
+
+#define mmDMA3_QM_CQ_TSIZE_2 0x56819C
+
+#define mmDMA3_QM_CQ_CTL_2 0x5681A0
+
+#define mmDMA3_QM_CQ_PTR_LO_3 0x5681A4
+
+#define mmDMA3_QM_CQ_PTR_HI_3 0x5681A8
+
+#define mmDMA3_QM_CQ_TSIZE_3 0x5681AC
+
+#define mmDMA3_QM_CQ_CTL_3 0x5681B0
+
+#define mmDMA3_QM_CQ_PTR_LO_4 0x5681B4
+
+#define mmDMA3_QM_CQ_PTR_HI_4 0x5681B8
+
+#define mmDMA3_QM_CQ_TSIZE_4 0x5681BC
+
+#define mmDMA3_QM_CQ_CTL_4 0x5681C0
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_0 0x5681C4
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_1 0x5681C8
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_2 0x5681CC
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_3 0x5681D0
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_4 0x5681D4
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_0 0x5681D8
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_1 0x5681DC
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_2 0x5681E0
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_3 0x5681E4
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_4 0x5681E8
+
+#define mmDMA3_QM_CQ_TSIZE_STS_0 0x5681EC
+
+#define mmDMA3_QM_CQ_TSIZE_STS_1 0x5681F0
+
+#define mmDMA3_QM_CQ_TSIZE_STS_2 0x5681F4
+
+#define mmDMA3_QM_CQ_TSIZE_STS_3 0x5681F8
+
+#define mmDMA3_QM_CQ_TSIZE_STS_4 0x5681FC
+
+#define mmDMA3_QM_CQ_CTL_STS_0 0x568200
+
+#define mmDMA3_QM_CQ_CTL_STS_1 0x568204
+
+#define mmDMA3_QM_CQ_CTL_STS_2 0x568208
+
+#define mmDMA3_QM_CQ_CTL_STS_3 0x56820C
+
+#define mmDMA3_QM_CQ_CTL_STS_4 0x568210
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_0 0x568214
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_1 0x568218
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_2 0x56821C
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_3 0x568220
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_4 0x568224
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_0 0x568228
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_1 0x56822C
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_2 0x568230
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_3 0x568234
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_4 0x568238
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_0 0x56823C
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_1 0x568240
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_2 0x568244
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_3 0x568248
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_4 0x56824C
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_0 0x568250
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_1 0x568254
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_2 0x568258
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_3 0x56825C
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_4 0x568260
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_0 0x568264
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_1 0x568268
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_2 0x56826C
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_3 0x568270
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_4 0x568274
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_0 0x568278
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_1 0x56827C
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 0x568280
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_3 0x568284
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_4 0x568288
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_0 0x56828C
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_1 0x568290
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_2 0x568294
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_3 0x568298
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_4 0x56829C
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_0 0x5682A0
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_1 0x5682A4
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_2 0x5682A8
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_3 0x5682AC
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_4 0x5682B0
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_0 0x5682B4
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_1 0x5682B8
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_2 0x5682BC
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_3 0x5682C0
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_4 0x5682C4
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_0 0x5682C8
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_1 0x5682CC
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_2 0x5682D0
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_3 0x5682D4
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_4 0x5682D8
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5682E0
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5682E4
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5682E8
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5682EC
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5682F0
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5682F4
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5682F8
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5682FC
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x568300
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x568304
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_0 0x568308
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_1 0x56830C
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_2 0x568310
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_3 0x568314
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_4 0x568318
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_0 0x56831C
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_1 0x568320
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_2 0x568324
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_3 0x568328
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_4 0x56832C
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_0 0x568330
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_1 0x568334
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_2 0x568338
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_3 0x56833C
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_4 0x568340
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_0 0x568344
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_1 0x568348
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_2 0x56834C
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_3 0x568350
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_4 0x568354
+
+#define mmDMA3_QM_CP_FENCE0_CNT_0 0x568358
+
+#define mmDMA3_QM_CP_FENCE0_CNT_1 0x56835C
+
+#define mmDMA3_QM_CP_FENCE0_CNT_2 0x568360
+
+#define mmDMA3_QM_CP_FENCE0_CNT_3 0x568364
+
+#define mmDMA3_QM_CP_FENCE0_CNT_4 0x568368
+
+#define mmDMA3_QM_CP_FENCE1_CNT_0 0x56836C
+
+#define mmDMA3_QM_CP_FENCE1_CNT_1 0x568370
+
+#define mmDMA3_QM_CP_FENCE1_CNT_2 0x568374
+
+#define mmDMA3_QM_CP_FENCE1_CNT_3 0x568378
+
+#define mmDMA3_QM_CP_FENCE1_CNT_4 0x56837C
+
+#define mmDMA3_QM_CP_FENCE2_CNT_0 0x568380
+
+#define mmDMA3_QM_CP_FENCE2_CNT_1 0x568384
+
+#define mmDMA3_QM_CP_FENCE2_CNT_2 0x568388
+
+#define mmDMA3_QM_CP_FENCE2_CNT_3 0x56838C
+
+#define mmDMA3_QM_CP_FENCE2_CNT_4 0x568390
+
+#define mmDMA3_QM_CP_FENCE3_CNT_0 0x568394
+
+#define mmDMA3_QM_CP_FENCE3_CNT_1 0x568398
+
+#define mmDMA3_QM_CP_FENCE3_CNT_2 0x56839C
+
+#define mmDMA3_QM_CP_FENCE3_CNT_3 0x5683A0
+
+#define mmDMA3_QM_CP_FENCE3_CNT_4 0x5683A4
+
+#define mmDMA3_QM_CP_STS_0 0x5683A8
+
+#define mmDMA3_QM_CP_STS_1 0x5683AC
+
+#define mmDMA3_QM_CP_STS_2 0x5683B0
+
+#define mmDMA3_QM_CP_STS_3 0x5683B4
+
+#define mmDMA3_QM_CP_STS_4 0x5683B8
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_0 0x5683BC
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_1 0x5683C0
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_2 0x5683C4
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_3 0x5683C8
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_4 0x5683CC
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_0 0x5683D0
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_1 0x5683D4
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_2 0x5683D8
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_3 0x5683DC
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_4 0x5683E0
+
+#define mmDMA3_QM_CP_BARRIER_CFG_0 0x5683F4
+
+#define mmDMA3_QM_CP_BARRIER_CFG_1 0x5683F8
+
+#define mmDMA3_QM_CP_BARRIER_CFG_2 0x5683FC
+
+#define mmDMA3_QM_CP_BARRIER_CFG_3 0x568400
+
+#define mmDMA3_QM_CP_BARRIER_CFG_4 0x568404
+
+#define mmDMA3_QM_CP_DBG_0_0 0x568408
+
+#define mmDMA3_QM_CP_DBG_0_1 0x56840C
+
+#define mmDMA3_QM_CP_DBG_0_2 0x568410
+
+#define mmDMA3_QM_CP_DBG_0_3 0x568414
+
+#define mmDMA3_QM_CP_DBG_0_4 0x568418
+
+#define mmDMA3_QM_CP_ARUSER_31_11_0 0x56841C
+
+#define mmDMA3_QM_CP_ARUSER_31_11_1 0x568420
+
+#define mmDMA3_QM_CP_ARUSER_31_11_2 0x568424
+
+#define mmDMA3_QM_CP_ARUSER_31_11_3 0x568428
+
+#define mmDMA3_QM_CP_ARUSER_31_11_4 0x56842C
+
+#define mmDMA3_QM_CP_AWUSER_31_11_0 0x568430
+
+#define mmDMA3_QM_CP_AWUSER_31_11_1 0x568434
+
+#define mmDMA3_QM_CP_AWUSER_31_11_2 0x568438
+
+#define mmDMA3_QM_CP_AWUSER_31_11_3 0x56843C
+
+#define mmDMA3_QM_CP_AWUSER_31_11_4 0x568440
+
+#define mmDMA3_QM_ARB_CFG_0 0x568A00
+
+#define mmDMA3_QM_ARB_CHOISE_Q_PUSH 0x568A04
+
+#define mmDMA3_QM_ARB_WRR_WEIGHT_0 0x568A08
+
+#define mmDMA3_QM_ARB_WRR_WEIGHT_1 0x568A0C
+
+#define mmDMA3_QM_ARB_WRR_WEIGHT_2 0x568A10
+
+#define mmDMA3_QM_ARB_WRR_WEIGHT_3 0x568A14
+
+#define mmDMA3_QM_ARB_CFG_1 0x568A18
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_0 0x568A20
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_1 0x568A24
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_2 0x568A28
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_3 0x568A2C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_4 0x568A30
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_5 0x568A34
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_6 0x568A38
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_7 0x568A3C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_8 0x568A40
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_9 0x568A44
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_10 0x568A48
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_11 0x568A4C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_12 0x568A50
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_13 0x568A54
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_14 0x568A58
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_15 0x568A5C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_16 0x568A60
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_17 0x568A64
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_18 0x568A68
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_19 0x568A6C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_20 0x568A70
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_21 0x568A74
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_22 0x568A78
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_23 0x568A7C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_24 0x568A80
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_25 0x568A84
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_26 0x568A88
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_27 0x568A8C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_28 0x568A90
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_29 0x568A94
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_30 0x568A98
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_31 0x568A9C
+
+#define mmDMA3_QM_ARB_MST_CRED_INC 0x568AA0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x568AA4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x568AA8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x568AAC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x568AB0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x568AB4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x568AB8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x568ABC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x568AC0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x568AC4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x568AC8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x568ACC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x568AD0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x568AD4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x568AD8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x568ADC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x568AE0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x568AE4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x568AE8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x568AEC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x568AF0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x568AF4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x568AF8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x568AFC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x568B00
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x568B04
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x568B08
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x568B0C
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x568B10
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x568B14
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x568B18
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x568B1C
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x568B20
+
+#define mmDMA3_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x568B28
+
+#define mmDMA3_QM_ARB_MST_SLAVE_EN 0x568B2C
+
+#define mmDMA3_QM_ARB_MST_QUIET_PER 0x568B34
+
+#define mmDMA3_QM_ARB_SLV_CHOISE_WDT 0x568B38
+
+#define mmDMA3_QM_ARB_SLV_ID 0x568B3C
+
+#define mmDMA3_QM_ARB_MSG_MAX_INFLIGHT 0x568B44
+
+#define mmDMA3_QM_ARB_MSG_AWUSER_31_11 0x568B48
+
+#define mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP 0x568B4C
+
+#define mmDMA3_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x568B50
+
+#define mmDMA3_QM_ARB_BASE_LO 0x568B54
+
+#define mmDMA3_QM_ARB_BASE_HI 0x568B58
+
+#define mmDMA3_QM_ARB_STATE_STS 0x568B80
+
+#define mmDMA3_QM_ARB_CHOISE_FULLNESS_STS 0x568B84
+
+#define mmDMA3_QM_ARB_MSG_STS 0x568B88
+
+#define mmDMA3_QM_ARB_SLV_CHOISE_Q_HEAD 0x568B8C
+
+#define mmDMA3_QM_ARB_ERR_CAUSE 0x568B9C
+
+#define mmDMA3_QM_ARB_ERR_MSG_EN 0x568BA0
+
+#define mmDMA3_QM_ARB_ERR_STS_DRP 0x568BA8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_0 0x568BB0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_1 0x568BB4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_2 0x568BB8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_3 0x568BBC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_4 0x568BC0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_5 0x568BC4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_6 0x568BC8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_7 0x568BCC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_8 0x568BD0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_9 0x568BD4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_10 0x568BD8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_11 0x568BDC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_12 0x568BE0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_13 0x568BE4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_14 0x568BE8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_15 0x568BEC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_16 0x568BF0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_17 0x568BF4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_18 0x568BF8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_19 0x568BFC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_20 0x568C00
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_21 0x568C04
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_22 0x568C08
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_23 0x568C0C
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_24 0x568C10
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_25 0x568C14
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_26 0x568C18
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_27 0x568C1C
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_28 0x568C20
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_29 0x568C24
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_30 0x568C28
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_31 0x568C2C
+
+#define mmDMA3_QM_CGM_CFG 0x568C70
+
+#define mmDMA3_QM_CGM_STS 0x568C74
+
+#define mmDMA3_QM_CGM_CFG1 0x568C78
+
+#define mmDMA3_QM_LOCAL_RANGE_BASE 0x568C80
+
+#define mmDMA3_QM_LOCAL_RANGE_SIZE 0x568C84
+
+#define mmDMA3_QM_CSMR_STRICT_PRIO_CFG 0x568C90
+
+#define mmDMA3_QM_HBW_RD_RATE_LIM_CFG_1 0x568C94
+
+#define mmDMA3_QM_LBW_WR_RATE_LIM_CFG_0 0x568C98
+
+#define mmDMA3_QM_LBW_WR_RATE_LIM_CFG_1 0x568C9C
+
+#define mmDMA3_QM_HBW_RD_RATE_LIM_CFG_0 0x568CA0
+
+#define mmDMA3_QM_GLBL_AXCACHE 0x568CA4
+
+#define mmDMA3_QM_IND_GW_APB_CFG 0x568CB0
+
+#define mmDMA3_QM_IND_GW_APB_WDATA 0x568CB4
+
+#define mmDMA3_QM_IND_GW_APB_RDATA 0x568CB8
+
+#define mmDMA3_QM_IND_GW_APB_STATUS 0x568CBC
+
+#define mmDMA3_QM_GLBL_ERR_ADDR_LO 0x568CD0
+
+#define mmDMA3_QM_GLBL_ERR_ADDR_HI 0x568CD4
+
+#define mmDMA3_QM_GLBL_ERR_WDATA 0x568CD8
+
+#define mmDMA3_QM_GLBL_MEM_INIT_BUSY 0x568D00
+
+#endif /* ASIC_REG_DMA3_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
new file mode 100644
index 000000000000..192d11404b1c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA4_CORE_REGS_H_
+#define ASIC_REG_DMA4_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA4_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA4_CORE_CFG_0 0x580000
+
+#define mmDMA4_CORE_CFG_1 0x580004
+
+#define mmDMA4_CORE_LBW_MAX_OUTSTAND 0x580008
+
+#define mmDMA4_CORE_SRC_BASE_LO 0x580014
+
+#define mmDMA4_CORE_SRC_BASE_HI 0x580018
+
+#define mmDMA4_CORE_DST_BASE_LO 0x58001C
+
+#define mmDMA4_CORE_DST_BASE_HI 0x580020
+
+#define mmDMA4_CORE_SRC_TSIZE_1 0x58002C
+
+#define mmDMA4_CORE_SRC_STRIDE_1 0x580030
+
+#define mmDMA4_CORE_SRC_TSIZE_2 0x580034
+
+#define mmDMA4_CORE_SRC_STRIDE_2 0x580038
+
+#define mmDMA4_CORE_SRC_TSIZE_3 0x58003C
+
+#define mmDMA4_CORE_SRC_STRIDE_3 0x580040
+
+#define mmDMA4_CORE_SRC_TSIZE_4 0x580044
+
+#define mmDMA4_CORE_SRC_STRIDE_4 0x580048
+
+#define mmDMA4_CORE_SRC_TSIZE_0 0x58004C
+
+#define mmDMA4_CORE_DST_TSIZE_1 0x580054
+
+#define mmDMA4_CORE_DST_STRIDE_1 0x580058
+
+#define mmDMA4_CORE_DST_TSIZE_2 0x58005C
+
+#define mmDMA4_CORE_DST_STRIDE_2 0x580060
+
+#define mmDMA4_CORE_DST_TSIZE_3 0x580064
+
+#define mmDMA4_CORE_DST_STRIDE_3 0x580068
+
+#define mmDMA4_CORE_DST_TSIZE_4 0x58006C
+
+#define mmDMA4_CORE_DST_STRIDE_4 0x580070
+
+#define mmDMA4_CORE_DST_TSIZE_0 0x580074
+
+#define mmDMA4_CORE_COMMIT 0x580078
+
+#define mmDMA4_CORE_WR_COMP_WDATA 0x58007C
+
+#define mmDMA4_CORE_WR_COMP_ADDR_LO 0x580080
+
+#define mmDMA4_CORE_WR_COMP_ADDR_HI 0x580084
+
+#define mmDMA4_CORE_WR_COMP_AWUSER_31_11 0x580088
+
+#define mmDMA4_CORE_TE_NUMROWS 0x580094
+
+#define mmDMA4_CORE_PROT 0x5800B8
+
+#define mmDMA4_CORE_SECURE_PROPS 0x5800F0
+
+#define mmDMA4_CORE_NON_SECURE_PROPS 0x5800F4
+
+#define mmDMA4_CORE_RD_MAX_OUTSTAND 0x580100
+
+#define mmDMA4_CORE_RD_MAX_SIZE 0x580104
+
+#define mmDMA4_CORE_RD_ARCACHE 0x580108
+
+#define mmDMA4_CORE_RD_ARUSER_31_11 0x580110
+
+#define mmDMA4_CORE_RD_INFLIGHTS 0x580114
+
+#define mmDMA4_CORE_WR_MAX_OUTSTAND 0x580120
+
+#define mmDMA4_CORE_WR_MAX_AWID 0x580124
+
+#define mmDMA4_CORE_WR_AWCACHE 0x580128
+
+#define mmDMA4_CORE_WR_AWUSER_31_11 0x580130
+
+#define mmDMA4_CORE_WR_INFLIGHTS 0x580134
+
+#define mmDMA4_CORE_RD_RATE_LIM_CFG_0 0x580150
+
+#define mmDMA4_CORE_RD_RATE_LIM_CFG_1 0x580154
+
+#define mmDMA4_CORE_WR_RATE_LIM_CFG_0 0x580158
+
+#define mmDMA4_CORE_WR_RATE_LIM_CFG_1 0x58015C
+
+#define mmDMA4_CORE_ERR_CFG 0x580160
+
+#define mmDMA4_CORE_ERR_CAUSE 0x580164
+
+#define mmDMA4_CORE_ERRMSG_ADDR_LO 0x580170
+
+#define mmDMA4_CORE_ERRMSG_ADDR_HI 0x580174
+
+#define mmDMA4_CORE_ERRMSG_WDATA 0x580178
+
+#define mmDMA4_CORE_STS0 0x580190
+
+#define mmDMA4_CORE_STS1 0x580194
+
+#define mmDMA4_CORE_RD_DBGMEM_ADD 0x580200
+
+#define mmDMA4_CORE_RD_DBGMEM_DATA_WR 0x580204
+
+#define mmDMA4_CORE_RD_DBGMEM_DATA_RD 0x580208
+
+#define mmDMA4_CORE_RD_DBGMEM_CTRL 0x58020C
+
+#define mmDMA4_CORE_RD_DBGMEM_RC 0x580210
+
+#define mmDMA4_CORE_DBG_HBW_AXI_AR_CNT 0x580220
+
+#define mmDMA4_CORE_DBG_HBW_AXI_AW_CNT 0x580224
+
+#define mmDMA4_CORE_DBG_LBW_AXI_AW_CNT 0x580228
+
+#define mmDMA4_CORE_DBG_DESC_CNT 0x58022C
+
+#define mmDMA4_CORE_DBG_STS 0x580230
+
+#define mmDMA4_CORE_DBG_RD_DESC_ID 0x580234
+
+#define mmDMA4_CORE_DBG_WR_DESC_ID 0x580238
+
+#endif /* ASIC_REG_DMA4_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
new file mode 100644
index 000000000000..f0cbda0d1e4d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA4_QM_REGS_H_
+#define ASIC_REG_DMA4_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA4_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA4_QM_GLBL_CFG0 0x588000
+
+#define mmDMA4_QM_GLBL_CFG1 0x588004
+
+#define mmDMA4_QM_GLBL_PROT 0x588008
+
+#define mmDMA4_QM_GLBL_ERR_CFG 0x58800C
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_0 0x588010
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_1 0x588014
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_2 0x588018
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_3 0x58801C
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_4 0x588020
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_0 0x588024
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_1 0x588028
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_2 0x58802C
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_3 0x588030
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_4 0x588034
+
+#define mmDMA4_QM_GLBL_STS0 0x588038
+
+#define mmDMA4_QM_GLBL_STS1_0 0x588040
+
+#define mmDMA4_QM_GLBL_STS1_1 0x588044
+
+#define mmDMA4_QM_GLBL_STS1_2 0x588048
+
+#define mmDMA4_QM_GLBL_STS1_3 0x58804C
+
+#define mmDMA4_QM_GLBL_STS1_4 0x588050
+
+#define mmDMA4_QM_GLBL_MSG_EN_0 0x588054
+
+#define mmDMA4_QM_GLBL_MSG_EN_1 0x588058
+
+#define mmDMA4_QM_GLBL_MSG_EN_2 0x58805C
+
+#define mmDMA4_QM_GLBL_MSG_EN_3 0x588060
+
+#define mmDMA4_QM_GLBL_MSG_EN_4 0x588068
+
+#define mmDMA4_QM_PQ_BASE_LO_0 0x588070
+
+#define mmDMA4_QM_PQ_BASE_LO_1 0x588074
+
+#define mmDMA4_QM_PQ_BASE_LO_2 0x588078
+
+#define mmDMA4_QM_PQ_BASE_LO_3 0x58807C
+
+#define mmDMA4_QM_PQ_BASE_HI_0 0x588080
+
+#define mmDMA4_QM_PQ_BASE_HI_1 0x588084
+
+#define mmDMA4_QM_PQ_BASE_HI_2 0x588088
+
+#define mmDMA4_QM_PQ_BASE_HI_3 0x58808C
+
+#define mmDMA4_QM_PQ_SIZE_0 0x588090
+
+#define mmDMA4_QM_PQ_SIZE_1 0x588094
+
+#define mmDMA4_QM_PQ_SIZE_2 0x588098
+
+#define mmDMA4_QM_PQ_SIZE_3 0x58809C
+
+#define mmDMA4_QM_PQ_PI_0 0x5880A0
+
+#define mmDMA4_QM_PQ_PI_1 0x5880A4
+
+#define mmDMA4_QM_PQ_PI_2 0x5880A8
+
+#define mmDMA4_QM_PQ_PI_3 0x5880AC
+
+#define mmDMA4_QM_PQ_CI_0 0x5880B0
+
+#define mmDMA4_QM_PQ_CI_1 0x5880B4
+
+#define mmDMA4_QM_PQ_CI_2 0x5880B8
+
+#define mmDMA4_QM_PQ_CI_3 0x5880BC
+
+#define mmDMA4_QM_PQ_CFG0_0 0x5880C0
+
+#define mmDMA4_QM_PQ_CFG0_1 0x5880C4
+
+#define mmDMA4_QM_PQ_CFG0_2 0x5880C8
+
+#define mmDMA4_QM_PQ_CFG0_3 0x5880CC
+
+#define mmDMA4_QM_PQ_CFG1_0 0x5880D0
+
+#define mmDMA4_QM_PQ_CFG1_1 0x5880D4
+
+#define mmDMA4_QM_PQ_CFG1_2 0x5880D8
+
+#define mmDMA4_QM_PQ_CFG1_3 0x5880DC
+
+#define mmDMA4_QM_PQ_ARUSER_31_11_0 0x5880E0
+
+#define mmDMA4_QM_PQ_ARUSER_31_11_1 0x5880E4
+
+#define mmDMA4_QM_PQ_ARUSER_31_11_2 0x5880E8
+
+#define mmDMA4_QM_PQ_ARUSER_31_11_3 0x5880EC
+
+#define mmDMA4_QM_PQ_STS0_0 0x5880F0
+
+#define mmDMA4_QM_PQ_STS0_1 0x5880F4
+
+#define mmDMA4_QM_PQ_STS0_2 0x5880F8
+
+#define mmDMA4_QM_PQ_STS0_3 0x5880FC
+
+#define mmDMA4_QM_PQ_STS1_0 0x588100
+
+#define mmDMA4_QM_PQ_STS1_1 0x588104
+
+#define mmDMA4_QM_PQ_STS1_2 0x588108
+
+#define mmDMA4_QM_PQ_STS1_3 0x58810C
+
+#define mmDMA4_QM_CQ_CFG0_0 0x588110
+
+#define mmDMA4_QM_CQ_CFG0_1 0x588114
+
+#define mmDMA4_QM_CQ_CFG0_2 0x588118
+
+#define mmDMA4_QM_CQ_CFG0_3 0x58811C
+
+#define mmDMA4_QM_CQ_CFG0_4 0x588120
+
+#define mmDMA4_QM_CQ_CFG1_0 0x588124
+
+#define mmDMA4_QM_CQ_CFG1_1 0x588128
+
+#define mmDMA4_QM_CQ_CFG1_2 0x58812C
+
+#define mmDMA4_QM_CQ_CFG1_3 0x588130
+
+#define mmDMA4_QM_CQ_CFG1_4 0x588134
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_0 0x588138
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_1 0x58813C
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_2 0x588140
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_3 0x588144
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_4 0x588148
+
+#define mmDMA4_QM_CQ_STS0_0 0x58814C
+
+#define mmDMA4_QM_CQ_STS0_1 0x588150
+
+#define mmDMA4_QM_CQ_STS0_2 0x588154
+
+#define mmDMA4_QM_CQ_STS0_3 0x588158
+
+#define mmDMA4_QM_CQ_STS0_4 0x58815C
+
+#define mmDMA4_QM_CQ_STS1_0 0x588160
+
+#define mmDMA4_QM_CQ_STS1_1 0x588164
+
+#define mmDMA4_QM_CQ_STS1_2 0x588168
+
+#define mmDMA4_QM_CQ_STS1_3 0x58816C
+
+#define mmDMA4_QM_CQ_STS1_4 0x588170
+
+#define mmDMA4_QM_CQ_PTR_LO_0 0x588174
+
+#define mmDMA4_QM_CQ_PTR_HI_0 0x588178
+
+#define mmDMA4_QM_CQ_TSIZE_0 0x58817C
+
+#define mmDMA4_QM_CQ_CTL_0 0x588180
+
+#define mmDMA4_QM_CQ_PTR_LO_1 0x588184
+
+#define mmDMA4_QM_CQ_PTR_HI_1 0x588188
+
+#define mmDMA4_QM_CQ_TSIZE_1 0x58818C
+
+#define mmDMA4_QM_CQ_CTL_1 0x588190
+
+#define mmDMA4_QM_CQ_PTR_LO_2 0x588194
+
+#define mmDMA4_QM_CQ_PTR_HI_2 0x588198
+
+#define mmDMA4_QM_CQ_TSIZE_2 0x58819C
+
+#define mmDMA4_QM_CQ_CTL_2 0x5881A0
+
+#define mmDMA4_QM_CQ_PTR_LO_3 0x5881A4
+
+#define mmDMA4_QM_CQ_PTR_HI_3 0x5881A8
+
+#define mmDMA4_QM_CQ_TSIZE_3 0x5881AC
+
+#define mmDMA4_QM_CQ_CTL_3 0x5881B0
+
+#define mmDMA4_QM_CQ_PTR_LO_4 0x5881B4
+
+#define mmDMA4_QM_CQ_PTR_HI_4 0x5881B8
+
+#define mmDMA4_QM_CQ_TSIZE_4 0x5881BC
+
+#define mmDMA4_QM_CQ_CTL_4 0x5881C0
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_0 0x5881C4
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_1 0x5881C8
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_2 0x5881CC
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_3 0x5881D0
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_4 0x5881D4
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_0 0x5881D8
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_1 0x5881DC
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_2 0x5881E0
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_3 0x5881E4
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_4 0x5881E8
+
+#define mmDMA4_QM_CQ_TSIZE_STS_0 0x5881EC
+
+#define mmDMA4_QM_CQ_TSIZE_STS_1 0x5881F0
+
+#define mmDMA4_QM_CQ_TSIZE_STS_2 0x5881F4
+
+#define mmDMA4_QM_CQ_TSIZE_STS_3 0x5881F8
+
+#define mmDMA4_QM_CQ_TSIZE_STS_4 0x5881FC
+
+#define mmDMA4_QM_CQ_CTL_STS_0 0x588200
+
+#define mmDMA4_QM_CQ_CTL_STS_1 0x588204
+
+#define mmDMA4_QM_CQ_CTL_STS_2 0x588208
+
+#define mmDMA4_QM_CQ_CTL_STS_3 0x58820C
+
+#define mmDMA4_QM_CQ_CTL_STS_4 0x588210
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_0 0x588214
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_1 0x588218
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_2 0x58821C
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_3 0x588220
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_4 0x588224
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_0 0x588228
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_1 0x58822C
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_2 0x588230
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_3 0x588234
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_4 0x588238
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_0 0x58823C
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_1 0x588240
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_2 0x588244
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_3 0x588248
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_4 0x58824C
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_0 0x588250
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_1 0x588254
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_2 0x588258
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_3 0x58825C
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_4 0x588260
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_0 0x588264
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_1 0x588268
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_2 0x58826C
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_3 0x588270
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_4 0x588274
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_0 0x588278
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_1 0x58827C
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 0x588280
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_3 0x588284
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_4 0x588288
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_0 0x58828C
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_1 0x588290
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_2 0x588294
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_3 0x588298
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_4 0x58829C
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_0 0x5882A0
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_1 0x5882A4
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_2 0x5882A8
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_3 0x5882AC
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_4 0x5882B0
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_0 0x5882B4
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_1 0x5882B8
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_2 0x5882BC
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_3 0x5882C0
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_4 0x5882C4
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_0 0x5882C8
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_1 0x5882CC
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_2 0x5882D0
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_3 0x5882D4
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_4 0x5882D8
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5882E0
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5882E4
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5882E8
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5882EC
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5882F0
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5882F4
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5882F8
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5882FC
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x588300
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x588304
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_0 0x588308
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_1 0x58830C
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_2 0x588310
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_3 0x588314
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_4 0x588318
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_0 0x58831C
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_1 0x588320
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_2 0x588324
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_3 0x588328
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_4 0x58832C
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_0 0x588330
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_1 0x588334
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_2 0x588338
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_3 0x58833C
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_4 0x588340
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_0 0x588344
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_1 0x588348
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_2 0x58834C
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_3 0x588350
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_4 0x588354
+
+#define mmDMA4_QM_CP_FENCE0_CNT_0 0x588358
+
+#define mmDMA4_QM_CP_FENCE0_CNT_1 0x58835C
+
+#define mmDMA4_QM_CP_FENCE0_CNT_2 0x588360
+
+#define mmDMA4_QM_CP_FENCE0_CNT_3 0x588364
+
+#define mmDMA4_QM_CP_FENCE0_CNT_4 0x588368
+
+#define mmDMA4_QM_CP_FENCE1_CNT_0 0x58836C
+
+#define mmDMA4_QM_CP_FENCE1_CNT_1 0x588370
+
+#define mmDMA4_QM_CP_FENCE1_CNT_2 0x588374
+
+#define mmDMA4_QM_CP_FENCE1_CNT_3 0x588378
+
+#define mmDMA4_QM_CP_FENCE1_CNT_4 0x58837C
+
+#define mmDMA4_QM_CP_FENCE2_CNT_0 0x588380
+
+#define mmDMA4_QM_CP_FENCE2_CNT_1 0x588384
+
+#define mmDMA4_QM_CP_FENCE2_CNT_2 0x588388
+
+#define mmDMA4_QM_CP_FENCE2_CNT_3 0x58838C
+
+#define mmDMA4_QM_CP_FENCE2_CNT_4 0x588390
+
+#define mmDMA4_QM_CP_FENCE3_CNT_0 0x588394
+
+#define mmDMA4_QM_CP_FENCE3_CNT_1 0x588398
+
+#define mmDMA4_QM_CP_FENCE3_CNT_2 0x58839C
+
+#define mmDMA4_QM_CP_FENCE3_CNT_3 0x5883A0
+
+#define mmDMA4_QM_CP_FENCE3_CNT_4 0x5883A4
+
+#define mmDMA4_QM_CP_STS_0 0x5883A8
+
+#define mmDMA4_QM_CP_STS_1 0x5883AC
+
+#define mmDMA4_QM_CP_STS_2 0x5883B0
+
+#define mmDMA4_QM_CP_STS_3 0x5883B4
+
+#define mmDMA4_QM_CP_STS_4 0x5883B8
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_0 0x5883BC
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_1 0x5883C0
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_2 0x5883C4
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_3 0x5883C8
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_4 0x5883CC
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_0 0x5883D0
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_1 0x5883D4
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_2 0x5883D8
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_3 0x5883DC
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_4 0x5883E0
+
+#define mmDMA4_QM_CP_BARRIER_CFG_0 0x5883F4
+
+#define mmDMA4_QM_CP_BARRIER_CFG_1 0x5883F8
+
+#define mmDMA4_QM_CP_BARRIER_CFG_2 0x5883FC
+
+#define mmDMA4_QM_CP_BARRIER_CFG_3 0x588400
+
+#define mmDMA4_QM_CP_BARRIER_CFG_4 0x588404
+
+#define mmDMA4_QM_CP_DBG_0_0 0x588408
+
+#define mmDMA4_QM_CP_DBG_0_1 0x58840C
+
+#define mmDMA4_QM_CP_DBG_0_2 0x588410
+
+#define mmDMA4_QM_CP_DBG_0_3 0x588414
+
+#define mmDMA4_QM_CP_DBG_0_4 0x588418
+
+#define mmDMA4_QM_CP_ARUSER_31_11_0 0x58841C
+
+#define mmDMA4_QM_CP_ARUSER_31_11_1 0x588420
+
+#define mmDMA4_QM_CP_ARUSER_31_11_2 0x588424
+
+#define mmDMA4_QM_CP_ARUSER_31_11_3 0x588428
+
+#define mmDMA4_QM_CP_ARUSER_31_11_4 0x58842C
+
+#define mmDMA4_QM_CP_AWUSER_31_11_0 0x588430
+
+#define mmDMA4_QM_CP_AWUSER_31_11_1 0x588434
+
+#define mmDMA4_QM_CP_AWUSER_31_11_2 0x588438
+
+#define mmDMA4_QM_CP_AWUSER_31_11_3 0x58843C
+
+#define mmDMA4_QM_CP_AWUSER_31_11_4 0x588440
+
+#define mmDMA4_QM_ARB_CFG_0 0x588A00
+
+#define mmDMA4_QM_ARB_CHOISE_Q_PUSH 0x588A04
+
+#define mmDMA4_QM_ARB_WRR_WEIGHT_0 0x588A08
+
+#define mmDMA4_QM_ARB_WRR_WEIGHT_1 0x588A0C
+
+#define mmDMA4_QM_ARB_WRR_WEIGHT_2 0x588A10
+
+#define mmDMA4_QM_ARB_WRR_WEIGHT_3 0x588A14
+
+#define mmDMA4_QM_ARB_CFG_1 0x588A18
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_0 0x588A20
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_1 0x588A24
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_2 0x588A28
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_3 0x588A2C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_4 0x588A30
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_5 0x588A34
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_6 0x588A38
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_7 0x588A3C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_8 0x588A40
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_9 0x588A44
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_10 0x588A48
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_11 0x588A4C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_12 0x588A50
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_13 0x588A54
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_14 0x588A58
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_15 0x588A5C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_16 0x588A60
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_17 0x588A64
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_18 0x588A68
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_19 0x588A6C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_20 0x588A70
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_21 0x588A74
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_22 0x588A78
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_23 0x588A7C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_24 0x588A80
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_25 0x588A84
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_26 0x588A88
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_27 0x588A8C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_28 0x588A90
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_29 0x588A94
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_30 0x588A98
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_31 0x588A9C
+
+#define mmDMA4_QM_ARB_MST_CRED_INC 0x588AA0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x588AA4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x588AA8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x588AAC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x588AB0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x588AB4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x588AB8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x588ABC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x588AC0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x588AC4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x588AC8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x588ACC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x588AD0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x588AD4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x588AD8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x588ADC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x588AE0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x588AE4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x588AE8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x588AEC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x588AF0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x588AF4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x588AF8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x588AFC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x588B00
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x588B04
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x588B08
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x588B0C
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x588B10
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x588B14
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x588B18
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x588B1C
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x588B20
+
+#define mmDMA4_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x588B28
+
+#define mmDMA4_QM_ARB_MST_SLAVE_EN 0x588B2C
+
+#define mmDMA4_QM_ARB_MST_QUIET_PER 0x588B34
+
+#define mmDMA4_QM_ARB_SLV_CHOISE_WDT 0x588B38
+
+#define mmDMA4_QM_ARB_SLV_ID 0x588B3C
+
+#define mmDMA4_QM_ARB_MSG_MAX_INFLIGHT 0x588B44
+
+#define mmDMA4_QM_ARB_MSG_AWUSER_31_11 0x588B48
+
+#define mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP 0x588B4C
+
+#define mmDMA4_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x588B50
+
+#define mmDMA4_QM_ARB_BASE_LO 0x588B54
+
+#define mmDMA4_QM_ARB_BASE_HI 0x588B58
+
+#define mmDMA4_QM_ARB_STATE_STS 0x588B80
+
+#define mmDMA4_QM_ARB_CHOISE_FULLNESS_STS 0x588B84
+
+#define mmDMA4_QM_ARB_MSG_STS 0x588B88
+
+#define mmDMA4_QM_ARB_SLV_CHOISE_Q_HEAD 0x588B8C
+
+#define mmDMA4_QM_ARB_ERR_CAUSE 0x588B9C
+
+#define mmDMA4_QM_ARB_ERR_MSG_EN 0x588BA0
+
+#define mmDMA4_QM_ARB_ERR_STS_DRP 0x588BA8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_0 0x588BB0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_1 0x588BB4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_2 0x588BB8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_3 0x588BBC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_4 0x588BC0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_5 0x588BC4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_6 0x588BC8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_7 0x588BCC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_8 0x588BD0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_9 0x588BD4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_10 0x588BD8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_11 0x588BDC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_12 0x588BE0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_13 0x588BE4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_14 0x588BE8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_15 0x588BEC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_16 0x588BF0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_17 0x588BF4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_18 0x588BF8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_19 0x588BFC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_20 0x588C00
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_21 0x588C04
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_22 0x588C08
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_23 0x588C0C
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_24 0x588C10
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_25 0x588C14
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_26 0x588C18
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_27 0x588C1C
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_28 0x588C20
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_29 0x588C24
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_30 0x588C28
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_31 0x588C2C
+
+#define mmDMA4_QM_CGM_CFG 0x588C70
+
+#define mmDMA4_QM_CGM_STS 0x588C74
+
+#define mmDMA4_QM_CGM_CFG1 0x588C78
+
+#define mmDMA4_QM_LOCAL_RANGE_BASE 0x588C80
+
+#define mmDMA4_QM_LOCAL_RANGE_SIZE 0x588C84
+
+#define mmDMA4_QM_CSMR_STRICT_PRIO_CFG 0x588C90
+
+#define mmDMA4_QM_HBW_RD_RATE_LIM_CFG_1 0x588C94
+
+#define mmDMA4_QM_LBW_WR_RATE_LIM_CFG_0 0x588C98
+
+#define mmDMA4_QM_LBW_WR_RATE_LIM_CFG_1 0x588C9C
+
+#define mmDMA4_QM_HBW_RD_RATE_LIM_CFG_0 0x588CA0
+
+#define mmDMA4_QM_GLBL_AXCACHE 0x588CA4
+
+#define mmDMA4_QM_IND_GW_APB_CFG 0x588CB0
+
+#define mmDMA4_QM_IND_GW_APB_WDATA 0x588CB4
+
+#define mmDMA4_QM_IND_GW_APB_RDATA 0x588CB8
+
+#define mmDMA4_QM_IND_GW_APB_STATUS 0x588CBC
+
+#define mmDMA4_QM_GLBL_ERR_ADDR_LO 0x588CD0
+
+#define mmDMA4_QM_GLBL_ERR_ADDR_HI 0x588CD4
+
+#define mmDMA4_QM_GLBL_ERR_WDATA 0x588CD8
+
+#define mmDMA4_QM_GLBL_MEM_INIT_BUSY 0x588D00
+
+#endif /* ASIC_REG_DMA4_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
new file mode 100644
index 000000000000..6e07c6fb6fc9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA5_CORE_REGS_H_
+#define ASIC_REG_DMA5_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA5_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA5_CORE_CFG_0 0x5A0000
+
+#define mmDMA5_CORE_CFG_1 0x5A0004
+
+#define mmDMA5_CORE_LBW_MAX_OUTSTAND 0x5A0008
+
+#define mmDMA5_CORE_SRC_BASE_LO 0x5A0014
+
+#define mmDMA5_CORE_SRC_BASE_HI 0x5A0018
+
+#define mmDMA5_CORE_DST_BASE_LO 0x5A001C
+
+#define mmDMA5_CORE_DST_BASE_HI 0x5A0020
+
+#define mmDMA5_CORE_SRC_TSIZE_1 0x5A002C
+
+#define mmDMA5_CORE_SRC_STRIDE_1 0x5A0030
+
+#define mmDMA5_CORE_SRC_TSIZE_2 0x5A0034
+
+#define mmDMA5_CORE_SRC_STRIDE_2 0x5A0038
+
+#define mmDMA5_CORE_SRC_TSIZE_3 0x5A003C
+
+#define mmDMA5_CORE_SRC_STRIDE_3 0x5A0040
+
+#define mmDMA5_CORE_SRC_TSIZE_4 0x5A0044
+
+#define mmDMA5_CORE_SRC_STRIDE_4 0x5A0048
+
+#define mmDMA5_CORE_SRC_TSIZE_0 0x5A004C
+
+#define mmDMA5_CORE_DST_TSIZE_1 0x5A0054
+
+#define mmDMA5_CORE_DST_STRIDE_1 0x5A0058
+
+#define mmDMA5_CORE_DST_TSIZE_2 0x5A005C
+
+#define mmDMA5_CORE_DST_STRIDE_2 0x5A0060
+
+#define mmDMA5_CORE_DST_TSIZE_3 0x5A0064
+
+#define mmDMA5_CORE_DST_STRIDE_3 0x5A0068
+
+#define mmDMA5_CORE_DST_TSIZE_4 0x5A006C
+
+#define mmDMA5_CORE_DST_STRIDE_4 0x5A0070
+
+#define mmDMA5_CORE_DST_TSIZE_0 0x5A0074
+
+#define mmDMA5_CORE_COMMIT 0x5A0078
+
+#define mmDMA5_CORE_WR_COMP_WDATA 0x5A007C
+
+#define mmDMA5_CORE_WR_COMP_ADDR_LO 0x5A0080
+
+#define mmDMA5_CORE_WR_COMP_ADDR_HI 0x5A0084
+
+#define mmDMA5_CORE_WR_COMP_AWUSER_31_11 0x5A0088
+
+#define mmDMA5_CORE_TE_NUMROWS 0x5A0094
+
+#define mmDMA5_CORE_PROT 0x5A00B8
+
+#define mmDMA5_CORE_SECURE_PROPS 0x5A00F0
+
+#define mmDMA5_CORE_NON_SECURE_PROPS 0x5A00F4
+
+#define mmDMA5_CORE_RD_MAX_OUTSTAND 0x5A0100
+
+#define mmDMA5_CORE_RD_MAX_SIZE 0x5A0104
+
+#define mmDMA5_CORE_RD_ARCACHE 0x5A0108
+
+#define mmDMA5_CORE_RD_ARUSER_31_11 0x5A0110
+
+#define mmDMA5_CORE_RD_INFLIGHTS 0x5A0114
+
+#define mmDMA5_CORE_WR_MAX_OUTSTAND 0x5A0120
+
+#define mmDMA5_CORE_WR_MAX_AWID 0x5A0124
+
+#define mmDMA5_CORE_WR_AWCACHE 0x5A0128
+
+#define mmDMA5_CORE_WR_AWUSER_31_11 0x5A0130
+
+#define mmDMA5_CORE_WR_INFLIGHTS 0x5A0134
+
+#define mmDMA5_CORE_RD_RATE_LIM_CFG_0 0x5A0150
+
+#define mmDMA5_CORE_RD_RATE_LIM_CFG_1 0x5A0154
+
+#define mmDMA5_CORE_WR_RATE_LIM_CFG_0 0x5A0158
+
+#define mmDMA5_CORE_WR_RATE_LIM_CFG_1 0x5A015C
+
+#define mmDMA5_CORE_ERR_CFG 0x5A0160
+
+#define mmDMA5_CORE_ERR_CAUSE 0x5A0164
+
+#define mmDMA5_CORE_ERRMSG_ADDR_LO 0x5A0170
+
+#define mmDMA5_CORE_ERRMSG_ADDR_HI 0x5A0174
+
+#define mmDMA5_CORE_ERRMSG_WDATA 0x5A0178
+
+#define mmDMA5_CORE_STS0 0x5A0190
+
+#define mmDMA5_CORE_STS1 0x5A0194
+
+#define mmDMA5_CORE_RD_DBGMEM_ADD 0x5A0200
+
+#define mmDMA5_CORE_RD_DBGMEM_DATA_WR 0x5A0204
+
+#define mmDMA5_CORE_RD_DBGMEM_DATA_RD 0x5A0208
+
+#define mmDMA5_CORE_RD_DBGMEM_CTRL 0x5A020C
+
+#define mmDMA5_CORE_RD_DBGMEM_RC 0x5A0210
+
+#define mmDMA5_CORE_DBG_HBW_AXI_AR_CNT 0x5A0220
+
+#define mmDMA5_CORE_DBG_HBW_AXI_AW_CNT 0x5A0224
+
+#define mmDMA5_CORE_DBG_LBW_AXI_AW_CNT 0x5A0228
+
+#define mmDMA5_CORE_DBG_DESC_CNT 0x5A022C
+
+#define mmDMA5_CORE_DBG_STS 0x5A0230
+
+#define mmDMA5_CORE_DBG_RD_DESC_ID 0x5A0234
+
+#define mmDMA5_CORE_DBG_WR_DESC_ID 0x5A0238
+
+#endif /* ASIC_REG_DMA5_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
new file mode 100644
index 000000000000..0faea21756c5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA5_QM_REGS_H_
+#define ASIC_REG_DMA5_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA5_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA5_QM_GLBL_CFG0 0x5A8000
+
+#define mmDMA5_QM_GLBL_CFG1 0x5A8004
+
+#define mmDMA5_QM_GLBL_PROT 0x5A8008
+
+#define mmDMA5_QM_GLBL_ERR_CFG 0x5A800C
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_0 0x5A8010
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_1 0x5A8014
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_2 0x5A8018
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_3 0x5A801C
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_4 0x5A8020
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_0 0x5A8024
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_1 0x5A8028
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_2 0x5A802C
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_3 0x5A8030
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_4 0x5A8034
+
+#define mmDMA5_QM_GLBL_STS0 0x5A8038
+
+#define mmDMA5_QM_GLBL_STS1_0 0x5A8040
+
+#define mmDMA5_QM_GLBL_STS1_1 0x5A8044
+
+#define mmDMA5_QM_GLBL_STS1_2 0x5A8048
+
+#define mmDMA5_QM_GLBL_STS1_3 0x5A804C
+
+#define mmDMA5_QM_GLBL_STS1_4 0x5A8050
+
+#define mmDMA5_QM_GLBL_MSG_EN_0 0x5A8054
+
+#define mmDMA5_QM_GLBL_MSG_EN_1 0x5A8058
+
+#define mmDMA5_QM_GLBL_MSG_EN_2 0x5A805C
+
+#define mmDMA5_QM_GLBL_MSG_EN_3 0x5A8060
+
+#define mmDMA5_QM_GLBL_MSG_EN_4 0x5A8068
+
+#define mmDMA5_QM_PQ_BASE_LO_0 0x5A8070
+
+#define mmDMA5_QM_PQ_BASE_LO_1 0x5A8074
+
+#define mmDMA5_QM_PQ_BASE_LO_2 0x5A8078
+
+#define mmDMA5_QM_PQ_BASE_LO_3 0x5A807C
+
+#define mmDMA5_QM_PQ_BASE_HI_0 0x5A8080
+
+#define mmDMA5_QM_PQ_BASE_HI_1 0x5A8084
+
+#define mmDMA5_QM_PQ_BASE_HI_2 0x5A8088
+
+#define mmDMA5_QM_PQ_BASE_HI_3 0x5A808C
+
+#define mmDMA5_QM_PQ_SIZE_0 0x5A8090
+
+#define mmDMA5_QM_PQ_SIZE_1 0x5A8094
+
+#define mmDMA5_QM_PQ_SIZE_2 0x5A8098
+
+#define mmDMA5_QM_PQ_SIZE_3 0x5A809C
+
+#define mmDMA5_QM_PQ_PI_0 0x5A80A0
+
+#define mmDMA5_QM_PQ_PI_1 0x5A80A4
+
+#define mmDMA5_QM_PQ_PI_2 0x5A80A8
+
+#define mmDMA5_QM_PQ_PI_3 0x5A80AC
+
+#define mmDMA5_QM_PQ_CI_0 0x5A80B0
+
+#define mmDMA5_QM_PQ_CI_1 0x5A80B4
+
+#define mmDMA5_QM_PQ_CI_2 0x5A80B8
+
+#define mmDMA5_QM_PQ_CI_3 0x5A80BC
+
+#define mmDMA5_QM_PQ_CFG0_0 0x5A80C0
+
+#define mmDMA5_QM_PQ_CFG0_1 0x5A80C4
+
+#define mmDMA5_QM_PQ_CFG0_2 0x5A80C8
+
+#define mmDMA5_QM_PQ_CFG0_3 0x5A80CC
+
+#define mmDMA5_QM_PQ_CFG1_0 0x5A80D0
+
+#define mmDMA5_QM_PQ_CFG1_1 0x5A80D4
+
+#define mmDMA5_QM_PQ_CFG1_2 0x5A80D8
+
+#define mmDMA5_QM_PQ_CFG1_3 0x5A80DC
+
+#define mmDMA5_QM_PQ_ARUSER_31_11_0 0x5A80E0
+
+#define mmDMA5_QM_PQ_ARUSER_31_11_1 0x5A80E4
+
+#define mmDMA5_QM_PQ_ARUSER_31_11_2 0x5A80E8
+
+#define mmDMA5_QM_PQ_ARUSER_31_11_3 0x5A80EC
+
+#define mmDMA5_QM_PQ_STS0_0 0x5A80F0
+
+#define mmDMA5_QM_PQ_STS0_1 0x5A80F4
+
+#define mmDMA5_QM_PQ_STS0_2 0x5A80F8
+
+#define mmDMA5_QM_PQ_STS0_3 0x5A80FC
+
+#define mmDMA5_QM_PQ_STS1_0 0x5A8100
+
+#define mmDMA5_QM_PQ_STS1_1 0x5A8104
+
+#define mmDMA5_QM_PQ_STS1_2 0x5A8108
+
+#define mmDMA5_QM_PQ_STS1_3 0x5A810C
+
+#define mmDMA5_QM_CQ_CFG0_0 0x5A8110
+
+#define mmDMA5_QM_CQ_CFG0_1 0x5A8114
+
+#define mmDMA5_QM_CQ_CFG0_2 0x5A8118
+
+#define mmDMA5_QM_CQ_CFG0_3 0x5A811C
+
+#define mmDMA5_QM_CQ_CFG0_4 0x5A8120
+
+#define mmDMA5_QM_CQ_CFG1_0 0x5A8124
+
+#define mmDMA5_QM_CQ_CFG1_1 0x5A8128
+
+#define mmDMA5_QM_CQ_CFG1_2 0x5A812C
+
+#define mmDMA5_QM_CQ_CFG1_3 0x5A8130
+
+#define mmDMA5_QM_CQ_CFG1_4 0x5A8134
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_0 0x5A8138
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_1 0x5A813C
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_2 0x5A8140
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_3 0x5A8144
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_4 0x5A8148
+
+#define mmDMA5_QM_CQ_STS0_0 0x5A814C
+
+#define mmDMA5_QM_CQ_STS0_1 0x5A8150
+
+#define mmDMA5_QM_CQ_STS0_2 0x5A8154
+
+#define mmDMA5_QM_CQ_STS0_3 0x5A8158
+
+#define mmDMA5_QM_CQ_STS0_4 0x5A815C
+
+#define mmDMA5_QM_CQ_STS1_0 0x5A8160
+
+#define mmDMA5_QM_CQ_STS1_1 0x5A8164
+
+#define mmDMA5_QM_CQ_STS1_2 0x5A8168
+
+#define mmDMA5_QM_CQ_STS1_3 0x5A816C
+
+#define mmDMA5_QM_CQ_STS1_4 0x5A8170
+
+#define mmDMA5_QM_CQ_PTR_LO_0 0x5A8174
+
+#define mmDMA5_QM_CQ_PTR_HI_0 0x5A8178
+
+#define mmDMA5_QM_CQ_TSIZE_0 0x5A817C
+
+#define mmDMA5_QM_CQ_CTL_0 0x5A8180
+
+#define mmDMA5_QM_CQ_PTR_LO_1 0x5A8184
+
+#define mmDMA5_QM_CQ_PTR_HI_1 0x5A8188
+
+#define mmDMA5_QM_CQ_TSIZE_1 0x5A818C
+
+#define mmDMA5_QM_CQ_CTL_1 0x5A8190
+
+#define mmDMA5_QM_CQ_PTR_LO_2 0x5A8194
+
+#define mmDMA5_QM_CQ_PTR_HI_2 0x5A8198
+
+#define mmDMA5_QM_CQ_TSIZE_2 0x5A819C
+
+#define mmDMA5_QM_CQ_CTL_2 0x5A81A0
+
+#define mmDMA5_QM_CQ_PTR_LO_3 0x5A81A4
+
+#define mmDMA5_QM_CQ_PTR_HI_3 0x5A81A8
+
+#define mmDMA5_QM_CQ_TSIZE_3 0x5A81AC
+
+#define mmDMA5_QM_CQ_CTL_3 0x5A81B0
+
+#define mmDMA5_QM_CQ_PTR_LO_4 0x5A81B4
+
+#define mmDMA5_QM_CQ_PTR_HI_4 0x5A81B8
+
+#define mmDMA5_QM_CQ_TSIZE_4 0x5A81BC
+
+#define mmDMA5_QM_CQ_CTL_4 0x5A81C0
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_0 0x5A81C4
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_1 0x5A81C8
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_2 0x5A81CC
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_3 0x5A81D0
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_4 0x5A81D4
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_0 0x5A81D8
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_1 0x5A81DC
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_2 0x5A81E0
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_3 0x5A81E4
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_4 0x5A81E8
+
+#define mmDMA5_QM_CQ_TSIZE_STS_0 0x5A81EC
+
+#define mmDMA5_QM_CQ_TSIZE_STS_1 0x5A81F0
+
+#define mmDMA5_QM_CQ_TSIZE_STS_2 0x5A81F4
+
+#define mmDMA5_QM_CQ_TSIZE_STS_3 0x5A81F8
+
+#define mmDMA5_QM_CQ_TSIZE_STS_4 0x5A81FC
+
+#define mmDMA5_QM_CQ_CTL_STS_0 0x5A8200
+
+#define mmDMA5_QM_CQ_CTL_STS_1 0x5A8204
+
+#define mmDMA5_QM_CQ_CTL_STS_2 0x5A8208
+
+#define mmDMA5_QM_CQ_CTL_STS_3 0x5A820C
+
+#define mmDMA5_QM_CQ_CTL_STS_4 0x5A8210
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_0 0x5A8214
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_1 0x5A8218
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_2 0x5A821C
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_3 0x5A8220
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_4 0x5A8224
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_0 0x5A8228
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_1 0x5A822C
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_2 0x5A8230
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_3 0x5A8234
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_4 0x5A8238
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_0 0x5A823C
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_1 0x5A8240
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_2 0x5A8244
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_3 0x5A8248
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_4 0x5A824C
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_0 0x5A8250
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_1 0x5A8254
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_2 0x5A8258
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_3 0x5A825C
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_4 0x5A8260
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_0 0x5A8264
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_1 0x5A8268
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_2 0x5A826C
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_3 0x5A8270
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_4 0x5A8274
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_0 0x5A8278
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_1 0x5A827C
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 0x5A8280
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_3 0x5A8284
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_4 0x5A8288
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_0 0x5A828C
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_1 0x5A8290
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_2 0x5A8294
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_3 0x5A8298
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_4 0x5A829C
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_0 0x5A82A0
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_1 0x5A82A4
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_2 0x5A82A8
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_3 0x5A82AC
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_4 0x5A82B0
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_0 0x5A82B4
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_1 0x5A82B8
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_2 0x5A82BC
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_3 0x5A82C0
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_4 0x5A82C4
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_0 0x5A82C8
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_1 0x5A82CC
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_2 0x5A82D0
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_3 0x5A82D4
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_4 0x5A82D8
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5A82E0
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5A82E4
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5A82E8
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5A82EC
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5A82F0
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5A82F4
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5A82F8
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5A82FC
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x5A8300
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x5A8304
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_0 0x5A8308
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_1 0x5A830C
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_2 0x5A8310
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_3 0x5A8314
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_4 0x5A8318
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_0 0x5A831C
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_1 0x5A8320
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_2 0x5A8324
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_3 0x5A8328
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_4 0x5A832C
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_0 0x5A8330
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_1 0x5A8334
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_2 0x5A8338
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_3 0x5A833C
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_4 0x5A8340
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_0 0x5A8344
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_1 0x5A8348
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_2 0x5A834C
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_3 0x5A8350
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_4 0x5A8354
+
+#define mmDMA5_QM_CP_FENCE0_CNT_0 0x5A8358
+
+#define mmDMA5_QM_CP_FENCE0_CNT_1 0x5A835C
+
+#define mmDMA5_QM_CP_FENCE0_CNT_2 0x5A8360
+
+#define mmDMA5_QM_CP_FENCE0_CNT_3 0x5A8364
+
+#define mmDMA5_QM_CP_FENCE0_CNT_4 0x5A8368
+
+#define mmDMA5_QM_CP_FENCE1_CNT_0 0x5A836C
+
+#define mmDMA5_QM_CP_FENCE1_CNT_1 0x5A8370
+
+#define mmDMA5_QM_CP_FENCE1_CNT_2 0x5A8374
+
+#define mmDMA5_QM_CP_FENCE1_CNT_3 0x5A8378
+
+#define mmDMA5_QM_CP_FENCE1_CNT_4 0x5A837C
+
+#define mmDMA5_QM_CP_FENCE2_CNT_0 0x5A8380
+
+#define mmDMA5_QM_CP_FENCE2_CNT_1 0x5A8384
+
+#define mmDMA5_QM_CP_FENCE2_CNT_2 0x5A8388
+
+#define mmDMA5_QM_CP_FENCE2_CNT_3 0x5A838C
+
+#define mmDMA5_QM_CP_FENCE2_CNT_4 0x5A8390
+
+#define mmDMA5_QM_CP_FENCE3_CNT_0 0x5A8394
+
+#define mmDMA5_QM_CP_FENCE3_CNT_1 0x5A8398
+
+#define mmDMA5_QM_CP_FENCE3_CNT_2 0x5A839C
+
+#define mmDMA5_QM_CP_FENCE3_CNT_3 0x5A83A0
+
+#define mmDMA5_QM_CP_FENCE3_CNT_4 0x5A83A4
+
+#define mmDMA5_QM_CP_STS_0 0x5A83A8
+
+#define mmDMA5_QM_CP_STS_1 0x5A83AC
+
+#define mmDMA5_QM_CP_STS_2 0x5A83B0
+
+#define mmDMA5_QM_CP_STS_3 0x5A83B4
+
+#define mmDMA5_QM_CP_STS_4 0x5A83B8
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_0 0x5A83BC
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_1 0x5A83C0
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_2 0x5A83C4
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_3 0x5A83C8
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_4 0x5A83CC
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_0 0x5A83D0
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_1 0x5A83D4
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_2 0x5A83D8
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_3 0x5A83DC
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_4 0x5A83E0
+
+#define mmDMA5_QM_CP_BARRIER_CFG_0 0x5A83F4
+
+#define mmDMA5_QM_CP_BARRIER_CFG_1 0x5A83F8
+
+#define mmDMA5_QM_CP_BARRIER_CFG_2 0x5A83FC
+
+#define mmDMA5_QM_CP_BARRIER_CFG_3 0x5A8400
+
+#define mmDMA5_QM_CP_BARRIER_CFG_4 0x5A8404
+
+#define mmDMA5_QM_CP_DBG_0_0 0x5A8408
+
+#define mmDMA5_QM_CP_DBG_0_1 0x5A840C
+
+#define mmDMA5_QM_CP_DBG_0_2 0x5A8410
+
+#define mmDMA5_QM_CP_DBG_0_3 0x5A8414
+
+#define mmDMA5_QM_CP_DBG_0_4 0x5A8418
+
+#define mmDMA5_QM_CP_ARUSER_31_11_0 0x5A841C
+
+#define mmDMA5_QM_CP_ARUSER_31_11_1 0x5A8420
+
+#define mmDMA5_QM_CP_ARUSER_31_11_2 0x5A8424
+
+#define mmDMA5_QM_CP_ARUSER_31_11_3 0x5A8428
+
+#define mmDMA5_QM_CP_ARUSER_31_11_4 0x5A842C
+
+#define mmDMA5_QM_CP_AWUSER_31_11_0 0x5A8430
+
+#define mmDMA5_QM_CP_AWUSER_31_11_1 0x5A8434
+
+#define mmDMA5_QM_CP_AWUSER_31_11_2 0x5A8438
+
+#define mmDMA5_QM_CP_AWUSER_31_11_3 0x5A843C
+
+#define mmDMA5_QM_CP_AWUSER_31_11_4 0x5A8440
+
+#define mmDMA5_QM_ARB_CFG_0 0x5A8A00
+
+#define mmDMA5_QM_ARB_CHOISE_Q_PUSH 0x5A8A04
+
+#define mmDMA5_QM_ARB_WRR_WEIGHT_0 0x5A8A08
+
+#define mmDMA5_QM_ARB_WRR_WEIGHT_1 0x5A8A0C
+
+#define mmDMA5_QM_ARB_WRR_WEIGHT_2 0x5A8A10
+
+#define mmDMA5_QM_ARB_WRR_WEIGHT_3 0x5A8A14
+
+#define mmDMA5_QM_ARB_CFG_1 0x5A8A18
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_0 0x5A8A20
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_1 0x5A8A24
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_2 0x5A8A28
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_3 0x5A8A2C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_4 0x5A8A30
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_5 0x5A8A34
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_6 0x5A8A38
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_7 0x5A8A3C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_8 0x5A8A40
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_9 0x5A8A44
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_10 0x5A8A48
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_11 0x5A8A4C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_12 0x5A8A50
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_13 0x5A8A54
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_14 0x5A8A58
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_15 0x5A8A5C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_16 0x5A8A60
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_17 0x5A8A64
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_18 0x5A8A68
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_19 0x5A8A6C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_20 0x5A8A70
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_21 0x5A8A74
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_22 0x5A8A78
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_23 0x5A8A7C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_24 0x5A8A80
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_25 0x5A8A84
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_26 0x5A8A88
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_27 0x5A8A8C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_28 0x5A8A90
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_29 0x5A8A94
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_30 0x5A8A98
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_31 0x5A8A9C
+
+#define mmDMA5_QM_ARB_MST_CRED_INC 0x5A8AA0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x5A8AA4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x5A8AA8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x5A8AAC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x5A8AB0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x5A8AB4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x5A8AB8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x5A8ABC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x5A8AC0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x5A8AC4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x5A8AC8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x5A8ACC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x5A8AD0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x5A8AD4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x5A8AD8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x5A8ADC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x5A8AE0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x5A8AE4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x5A8AE8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x5A8AEC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x5A8AF0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x5A8AF4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x5A8AF8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x5A8AFC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x5A8B00
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x5A8B04
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x5A8B08
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x5A8B0C
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x5A8B10
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x5A8B14
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x5A8B18
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x5A8B1C
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x5A8B20
+
+#define mmDMA5_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x5A8B28
+
+#define mmDMA5_QM_ARB_MST_SLAVE_EN 0x5A8B2C
+
+#define mmDMA5_QM_ARB_MST_QUIET_PER 0x5A8B34
+
+#define mmDMA5_QM_ARB_SLV_CHOISE_WDT 0x5A8B38
+
+#define mmDMA5_QM_ARB_SLV_ID 0x5A8B3C
+
+#define mmDMA5_QM_ARB_MSG_MAX_INFLIGHT 0x5A8B44
+
+#define mmDMA5_QM_ARB_MSG_AWUSER_31_11 0x5A8B48
+
+#define mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP 0x5A8B4C
+
+#define mmDMA5_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x5A8B50
+
+#define mmDMA5_QM_ARB_BASE_LO 0x5A8B54
+
+#define mmDMA5_QM_ARB_BASE_HI 0x5A8B58
+
+#define mmDMA5_QM_ARB_STATE_STS 0x5A8B80
+
+#define mmDMA5_QM_ARB_CHOISE_FULLNESS_STS 0x5A8B84
+
+#define mmDMA5_QM_ARB_MSG_STS 0x5A8B88
+
+#define mmDMA5_QM_ARB_SLV_CHOISE_Q_HEAD 0x5A8B8C
+
+#define mmDMA5_QM_ARB_ERR_CAUSE 0x5A8B9C
+
+#define mmDMA5_QM_ARB_ERR_MSG_EN 0x5A8BA0
+
+#define mmDMA5_QM_ARB_ERR_STS_DRP 0x5A8BA8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_0 0x5A8BB0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_1 0x5A8BB4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_2 0x5A8BB8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_3 0x5A8BBC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_4 0x5A8BC0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_5 0x5A8BC4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_6 0x5A8BC8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_7 0x5A8BCC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_8 0x5A8BD0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_9 0x5A8BD4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_10 0x5A8BD8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_11 0x5A8BDC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_12 0x5A8BE0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_13 0x5A8BE4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_14 0x5A8BE8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_15 0x5A8BEC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_16 0x5A8BF0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_17 0x5A8BF4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_18 0x5A8BF8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_19 0x5A8BFC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_20 0x5A8C00
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_21 0x5A8C04
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_22 0x5A8C08
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_23 0x5A8C0C
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_24 0x5A8C10
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_25 0x5A8C14
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_26 0x5A8C18
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_27 0x5A8C1C
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_28 0x5A8C20
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_29 0x5A8C24
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_30 0x5A8C28
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_31 0x5A8C2C
+
+#define mmDMA5_QM_CGM_CFG 0x5A8C70
+
+#define mmDMA5_QM_CGM_STS 0x5A8C74
+
+#define mmDMA5_QM_CGM_CFG1 0x5A8C78
+
+#define mmDMA5_QM_LOCAL_RANGE_BASE 0x5A8C80
+
+#define mmDMA5_QM_LOCAL_RANGE_SIZE 0x5A8C84
+
+#define mmDMA5_QM_CSMR_STRICT_PRIO_CFG 0x5A8C90
+
+#define mmDMA5_QM_HBW_RD_RATE_LIM_CFG_1 0x5A8C94
+
+#define mmDMA5_QM_LBW_WR_RATE_LIM_CFG_0 0x5A8C98
+
+#define mmDMA5_QM_LBW_WR_RATE_LIM_CFG_1 0x5A8C9C
+
+#define mmDMA5_QM_HBW_RD_RATE_LIM_CFG_0 0x5A8CA0
+
+#define mmDMA5_QM_GLBL_AXCACHE 0x5A8CA4
+
+#define mmDMA5_QM_IND_GW_APB_CFG 0x5A8CB0
+
+#define mmDMA5_QM_IND_GW_APB_WDATA 0x5A8CB4
+
+#define mmDMA5_QM_IND_GW_APB_RDATA 0x5A8CB8
+
+#define mmDMA5_QM_IND_GW_APB_STATUS 0x5A8CBC
+
+#define mmDMA5_QM_GLBL_ERR_ADDR_LO 0x5A8CD0
+
+#define mmDMA5_QM_GLBL_ERR_ADDR_HI 0x5A8CD4
+
+#define mmDMA5_QM_GLBL_ERR_WDATA 0x5A8CD8
+
+#define mmDMA5_QM_GLBL_MEM_INIT_BUSY 0x5A8D00
+
+#endif /* ASIC_REG_DMA5_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
new file mode 100644
index 000000000000..4962c13e2e2e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA6_CORE_REGS_H_
+#define ASIC_REG_DMA6_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA6_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA6_CORE_CFG_0 0x5C0000
+
+#define mmDMA6_CORE_CFG_1 0x5C0004
+
+#define mmDMA6_CORE_LBW_MAX_OUTSTAND 0x5C0008
+
+#define mmDMA6_CORE_SRC_BASE_LO 0x5C0014
+
+#define mmDMA6_CORE_SRC_BASE_HI 0x5C0018
+
+#define mmDMA6_CORE_DST_BASE_LO 0x5C001C
+
+#define mmDMA6_CORE_DST_BASE_HI 0x5C0020
+
+#define mmDMA6_CORE_SRC_TSIZE_1 0x5C002C
+
+#define mmDMA6_CORE_SRC_STRIDE_1 0x5C0030
+
+#define mmDMA6_CORE_SRC_TSIZE_2 0x5C0034
+
+#define mmDMA6_CORE_SRC_STRIDE_2 0x5C0038
+
+#define mmDMA6_CORE_SRC_TSIZE_3 0x5C003C
+
+#define mmDMA6_CORE_SRC_STRIDE_3 0x5C0040
+
+#define mmDMA6_CORE_SRC_TSIZE_4 0x5C0044
+
+#define mmDMA6_CORE_SRC_STRIDE_4 0x5C0048
+
+#define mmDMA6_CORE_SRC_TSIZE_0 0x5C004C
+
+#define mmDMA6_CORE_DST_TSIZE_1 0x5C0054
+
+#define mmDMA6_CORE_DST_STRIDE_1 0x5C0058
+
+#define mmDMA6_CORE_DST_TSIZE_2 0x5C005C
+
+#define mmDMA6_CORE_DST_STRIDE_2 0x5C0060
+
+#define mmDMA6_CORE_DST_TSIZE_3 0x5C0064
+
+#define mmDMA6_CORE_DST_STRIDE_3 0x5C0068
+
+#define mmDMA6_CORE_DST_TSIZE_4 0x5C006C
+
+#define mmDMA6_CORE_DST_STRIDE_4 0x5C0070
+
+#define mmDMA6_CORE_DST_TSIZE_0 0x5C0074
+
+#define mmDMA6_CORE_COMMIT 0x5C0078
+
+#define mmDMA6_CORE_WR_COMP_WDATA 0x5C007C
+
+#define mmDMA6_CORE_WR_COMP_ADDR_LO 0x5C0080
+
+#define mmDMA6_CORE_WR_COMP_ADDR_HI 0x5C0084
+
+#define mmDMA6_CORE_WR_COMP_AWUSER_31_11 0x5C0088
+
+#define mmDMA6_CORE_TE_NUMROWS 0x5C0094
+
+#define mmDMA6_CORE_PROT 0x5C00B8
+
+#define mmDMA6_CORE_SECURE_PROPS 0x5C00F0
+
+#define mmDMA6_CORE_NON_SECURE_PROPS 0x5C00F4
+
+#define mmDMA6_CORE_RD_MAX_OUTSTAND 0x5C0100
+
+#define mmDMA6_CORE_RD_MAX_SIZE 0x5C0104
+
+#define mmDMA6_CORE_RD_ARCACHE 0x5C0108
+
+#define mmDMA6_CORE_RD_ARUSER_31_11 0x5C0110
+
+#define mmDMA6_CORE_RD_INFLIGHTS 0x5C0114
+
+#define mmDMA6_CORE_WR_MAX_OUTSTAND 0x5C0120
+
+#define mmDMA6_CORE_WR_MAX_AWID 0x5C0124
+
+#define mmDMA6_CORE_WR_AWCACHE 0x5C0128
+
+#define mmDMA6_CORE_WR_AWUSER_31_11 0x5C0130
+
+#define mmDMA6_CORE_WR_INFLIGHTS 0x5C0134
+
+#define mmDMA6_CORE_RD_RATE_LIM_CFG_0 0x5C0150
+
+#define mmDMA6_CORE_RD_RATE_LIM_CFG_1 0x5C0154
+
+#define mmDMA6_CORE_WR_RATE_LIM_CFG_0 0x5C0158
+
+#define mmDMA6_CORE_WR_RATE_LIM_CFG_1 0x5C015C
+
+#define mmDMA6_CORE_ERR_CFG 0x5C0160
+
+#define mmDMA6_CORE_ERR_CAUSE 0x5C0164
+
+#define mmDMA6_CORE_ERRMSG_ADDR_LO 0x5C0170
+
+#define mmDMA6_CORE_ERRMSG_ADDR_HI 0x5C0174
+
+#define mmDMA6_CORE_ERRMSG_WDATA 0x5C0178
+
+#define mmDMA6_CORE_STS0 0x5C0190
+
+#define mmDMA6_CORE_STS1 0x5C0194
+
+#define mmDMA6_CORE_RD_DBGMEM_ADD 0x5C0200
+
+#define mmDMA6_CORE_RD_DBGMEM_DATA_WR 0x5C0204
+
+#define mmDMA6_CORE_RD_DBGMEM_DATA_RD 0x5C0208
+
+#define mmDMA6_CORE_RD_DBGMEM_CTRL 0x5C020C
+
+#define mmDMA6_CORE_RD_DBGMEM_RC 0x5C0210
+
+#define mmDMA6_CORE_DBG_HBW_AXI_AR_CNT 0x5C0220
+
+#define mmDMA6_CORE_DBG_HBW_AXI_AW_CNT 0x5C0224
+
+#define mmDMA6_CORE_DBG_LBW_AXI_AW_CNT 0x5C0228
+
+#define mmDMA6_CORE_DBG_DESC_CNT 0x5C022C
+
+#define mmDMA6_CORE_DBG_STS 0x5C0230
+
+#define mmDMA6_CORE_DBG_RD_DESC_ID 0x5C0234
+
+#define mmDMA6_CORE_DBG_WR_DESC_ID 0x5C0238
+
+#endif /* ASIC_REG_DMA6_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
new file mode 100644
index 000000000000..af87adb94c94
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA6_QM_REGS_H_
+#define ASIC_REG_DMA6_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA6_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA6_QM_GLBL_CFG0 0x5C8000
+
+#define mmDMA6_QM_GLBL_CFG1 0x5C8004
+
+#define mmDMA6_QM_GLBL_PROT 0x5C8008
+
+#define mmDMA6_QM_GLBL_ERR_CFG 0x5C800C
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_0 0x5C8010
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_1 0x5C8014
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_2 0x5C8018
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_3 0x5C801C
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_4 0x5C8020
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_0 0x5C8024
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_1 0x5C8028
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_2 0x5C802C
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_3 0x5C8030
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_4 0x5C8034
+
+#define mmDMA6_QM_GLBL_STS0 0x5C8038
+
+#define mmDMA6_QM_GLBL_STS1_0 0x5C8040
+
+#define mmDMA6_QM_GLBL_STS1_1 0x5C8044
+
+#define mmDMA6_QM_GLBL_STS1_2 0x5C8048
+
+#define mmDMA6_QM_GLBL_STS1_3 0x5C804C
+
+#define mmDMA6_QM_GLBL_STS1_4 0x5C8050
+
+#define mmDMA6_QM_GLBL_MSG_EN_0 0x5C8054
+
+#define mmDMA6_QM_GLBL_MSG_EN_1 0x5C8058
+
+#define mmDMA6_QM_GLBL_MSG_EN_2 0x5C805C
+
+#define mmDMA6_QM_GLBL_MSG_EN_3 0x5C8060
+
+#define mmDMA6_QM_GLBL_MSG_EN_4 0x5C8068
+
+#define mmDMA6_QM_PQ_BASE_LO_0 0x5C8070
+
+#define mmDMA6_QM_PQ_BASE_LO_1 0x5C8074
+
+#define mmDMA6_QM_PQ_BASE_LO_2 0x5C8078
+
+#define mmDMA6_QM_PQ_BASE_LO_3 0x5C807C
+
+#define mmDMA6_QM_PQ_BASE_HI_0 0x5C8080
+
+#define mmDMA6_QM_PQ_BASE_HI_1 0x5C8084
+
+#define mmDMA6_QM_PQ_BASE_HI_2 0x5C8088
+
+#define mmDMA6_QM_PQ_BASE_HI_3 0x5C808C
+
+#define mmDMA6_QM_PQ_SIZE_0 0x5C8090
+
+#define mmDMA6_QM_PQ_SIZE_1 0x5C8094
+
+#define mmDMA6_QM_PQ_SIZE_2 0x5C8098
+
+#define mmDMA6_QM_PQ_SIZE_3 0x5C809C
+
+#define mmDMA6_QM_PQ_PI_0 0x5C80A0
+
+#define mmDMA6_QM_PQ_PI_1 0x5C80A4
+
+#define mmDMA6_QM_PQ_PI_2 0x5C80A8
+
+#define mmDMA6_QM_PQ_PI_3 0x5C80AC
+
+#define mmDMA6_QM_PQ_CI_0 0x5C80B0
+
+#define mmDMA6_QM_PQ_CI_1 0x5C80B4
+
+#define mmDMA6_QM_PQ_CI_2 0x5C80B8
+
+#define mmDMA6_QM_PQ_CI_3 0x5C80BC
+
+#define mmDMA6_QM_PQ_CFG0_0 0x5C80C0
+
+#define mmDMA6_QM_PQ_CFG0_1 0x5C80C4
+
+#define mmDMA6_QM_PQ_CFG0_2 0x5C80C8
+
+#define mmDMA6_QM_PQ_CFG0_3 0x5C80CC
+
+#define mmDMA6_QM_PQ_CFG1_0 0x5C80D0
+
+#define mmDMA6_QM_PQ_CFG1_1 0x5C80D4
+
+#define mmDMA6_QM_PQ_CFG1_2 0x5C80D8
+
+#define mmDMA6_QM_PQ_CFG1_3 0x5C80DC
+
+#define mmDMA6_QM_PQ_ARUSER_31_11_0 0x5C80E0
+
+#define mmDMA6_QM_PQ_ARUSER_31_11_1 0x5C80E4
+
+#define mmDMA6_QM_PQ_ARUSER_31_11_2 0x5C80E8
+
+#define mmDMA6_QM_PQ_ARUSER_31_11_3 0x5C80EC
+
+#define mmDMA6_QM_PQ_STS0_0 0x5C80F0
+
+#define mmDMA6_QM_PQ_STS0_1 0x5C80F4
+
+#define mmDMA6_QM_PQ_STS0_2 0x5C80F8
+
+#define mmDMA6_QM_PQ_STS0_3 0x5C80FC
+
+#define mmDMA6_QM_PQ_STS1_0 0x5C8100
+
+#define mmDMA6_QM_PQ_STS1_1 0x5C8104
+
+#define mmDMA6_QM_PQ_STS1_2 0x5C8108
+
+#define mmDMA6_QM_PQ_STS1_3 0x5C810C
+
+#define mmDMA6_QM_CQ_CFG0_0 0x5C8110
+
+#define mmDMA6_QM_CQ_CFG0_1 0x5C8114
+
+#define mmDMA6_QM_CQ_CFG0_2 0x5C8118
+
+#define mmDMA6_QM_CQ_CFG0_3 0x5C811C
+
+#define mmDMA6_QM_CQ_CFG0_4 0x5C8120
+
+#define mmDMA6_QM_CQ_CFG1_0 0x5C8124
+
+#define mmDMA6_QM_CQ_CFG1_1 0x5C8128
+
+#define mmDMA6_QM_CQ_CFG1_2 0x5C812C
+
+#define mmDMA6_QM_CQ_CFG1_3 0x5C8130
+
+#define mmDMA6_QM_CQ_CFG1_4 0x5C8134
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_0 0x5C8138
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_1 0x5C813C
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_2 0x5C8140
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_3 0x5C8144
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_4 0x5C8148
+
+#define mmDMA6_QM_CQ_STS0_0 0x5C814C
+
+#define mmDMA6_QM_CQ_STS0_1 0x5C8150
+
+#define mmDMA6_QM_CQ_STS0_2 0x5C8154
+
+#define mmDMA6_QM_CQ_STS0_3 0x5C8158
+
+#define mmDMA6_QM_CQ_STS0_4 0x5C815C
+
+#define mmDMA6_QM_CQ_STS1_0 0x5C8160
+
+#define mmDMA6_QM_CQ_STS1_1 0x5C8164
+
+#define mmDMA6_QM_CQ_STS1_2 0x5C8168
+
+#define mmDMA6_QM_CQ_STS1_3 0x5C816C
+
+#define mmDMA6_QM_CQ_STS1_4 0x5C8170
+
+#define mmDMA6_QM_CQ_PTR_LO_0 0x5C8174
+
+#define mmDMA6_QM_CQ_PTR_HI_0 0x5C8178
+
+#define mmDMA6_QM_CQ_TSIZE_0 0x5C817C
+
+#define mmDMA6_QM_CQ_CTL_0 0x5C8180
+
+#define mmDMA6_QM_CQ_PTR_LO_1 0x5C8184
+
+#define mmDMA6_QM_CQ_PTR_HI_1 0x5C8188
+
+#define mmDMA6_QM_CQ_TSIZE_1 0x5C818C
+
+#define mmDMA6_QM_CQ_CTL_1 0x5C8190
+
+#define mmDMA6_QM_CQ_PTR_LO_2 0x5C8194
+
+#define mmDMA6_QM_CQ_PTR_HI_2 0x5C8198
+
+#define mmDMA6_QM_CQ_TSIZE_2 0x5C819C
+
+#define mmDMA6_QM_CQ_CTL_2 0x5C81A0
+
+#define mmDMA6_QM_CQ_PTR_LO_3 0x5C81A4
+
+#define mmDMA6_QM_CQ_PTR_HI_3 0x5C81A8
+
+#define mmDMA6_QM_CQ_TSIZE_3 0x5C81AC
+
+#define mmDMA6_QM_CQ_CTL_3 0x5C81B0
+
+#define mmDMA6_QM_CQ_PTR_LO_4 0x5C81B4
+
+#define mmDMA6_QM_CQ_PTR_HI_4 0x5C81B8
+
+#define mmDMA6_QM_CQ_TSIZE_4 0x5C81BC
+
+#define mmDMA6_QM_CQ_CTL_4 0x5C81C0
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_0 0x5C81C4
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_1 0x5C81C8
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_2 0x5C81CC
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_3 0x5C81D0
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_4 0x5C81D4
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_0 0x5C81D8
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_1 0x5C81DC
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_2 0x5C81E0
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_3 0x5C81E4
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_4 0x5C81E8
+
+#define mmDMA6_QM_CQ_TSIZE_STS_0 0x5C81EC
+
+#define mmDMA6_QM_CQ_TSIZE_STS_1 0x5C81F0
+
+#define mmDMA6_QM_CQ_TSIZE_STS_2 0x5C81F4
+
+#define mmDMA6_QM_CQ_TSIZE_STS_3 0x5C81F8
+
+#define mmDMA6_QM_CQ_TSIZE_STS_4 0x5C81FC
+
+#define mmDMA6_QM_CQ_CTL_STS_0 0x5C8200
+
+#define mmDMA6_QM_CQ_CTL_STS_1 0x5C8204
+
+#define mmDMA6_QM_CQ_CTL_STS_2 0x5C8208
+
+#define mmDMA6_QM_CQ_CTL_STS_3 0x5C820C
+
+#define mmDMA6_QM_CQ_CTL_STS_4 0x5C8210
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_0 0x5C8214
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_1 0x5C8218
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_2 0x5C821C
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_3 0x5C8220
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_4 0x5C8224
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_0 0x5C8228
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_1 0x5C822C
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_2 0x5C8230
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_3 0x5C8234
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_4 0x5C8238
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_0 0x5C823C
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_1 0x5C8240
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_2 0x5C8244
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_3 0x5C8248
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_4 0x5C824C
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_0 0x5C8250
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_1 0x5C8254
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_2 0x5C8258
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_3 0x5C825C
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_4 0x5C8260
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_0 0x5C8264
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_1 0x5C8268
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_2 0x5C826C
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_3 0x5C8270
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_4 0x5C8274
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_0 0x5C8278
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_1 0x5C827C
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 0x5C8280
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_3 0x5C8284
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_4 0x5C8288
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_0 0x5C828C
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_1 0x5C8290
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_2 0x5C8294
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_3 0x5C8298
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_4 0x5C829C
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_0 0x5C82A0
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_1 0x5C82A4
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_2 0x5C82A8
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_3 0x5C82AC
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_4 0x5C82B0
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_0 0x5C82B4
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_1 0x5C82B8
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_2 0x5C82BC
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_3 0x5C82C0
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_4 0x5C82C4
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_0 0x5C82C8
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_1 0x5C82CC
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_2 0x5C82D0
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_3 0x5C82D4
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_4 0x5C82D8
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5C82E0
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5C82E4
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5C82E8
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5C82EC
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5C82F0
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5C82F4
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5C82F8
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5C82FC
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x5C8300
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x5C8304
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_0 0x5C8308
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_1 0x5C830C
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_2 0x5C8310
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_3 0x5C8314
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_4 0x5C8318
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_0 0x5C831C
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_1 0x5C8320
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_2 0x5C8324
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_3 0x5C8328
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_4 0x5C832C
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_0 0x5C8330
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_1 0x5C8334
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_2 0x5C8338
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_3 0x5C833C
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_4 0x5C8340
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_0 0x5C8344
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_1 0x5C8348
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_2 0x5C834C
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_3 0x5C8350
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_4 0x5C8354
+
+#define mmDMA6_QM_CP_FENCE0_CNT_0 0x5C8358
+
+#define mmDMA6_QM_CP_FENCE0_CNT_1 0x5C835C
+
+#define mmDMA6_QM_CP_FENCE0_CNT_2 0x5C8360
+
+#define mmDMA6_QM_CP_FENCE0_CNT_3 0x5C8364
+
+#define mmDMA6_QM_CP_FENCE0_CNT_4 0x5C8368
+
+#define mmDMA6_QM_CP_FENCE1_CNT_0 0x5C836C
+
+#define mmDMA6_QM_CP_FENCE1_CNT_1 0x5C8370
+
+#define mmDMA6_QM_CP_FENCE1_CNT_2 0x5C8374
+
+#define mmDMA6_QM_CP_FENCE1_CNT_3 0x5C8378
+
+#define mmDMA6_QM_CP_FENCE1_CNT_4 0x5C837C
+
+#define mmDMA6_QM_CP_FENCE2_CNT_0 0x5C8380
+
+#define mmDMA6_QM_CP_FENCE2_CNT_1 0x5C8384
+
+#define mmDMA6_QM_CP_FENCE2_CNT_2 0x5C8388
+
+#define mmDMA6_QM_CP_FENCE2_CNT_3 0x5C838C
+
+#define mmDMA6_QM_CP_FENCE2_CNT_4 0x5C8390
+
+#define mmDMA6_QM_CP_FENCE3_CNT_0 0x5C8394
+
+#define mmDMA6_QM_CP_FENCE3_CNT_1 0x5C8398
+
+#define mmDMA6_QM_CP_FENCE3_CNT_2 0x5C839C
+
+#define mmDMA6_QM_CP_FENCE3_CNT_3 0x5C83A0
+
+#define mmDMA6_QM_CP_FENCE3_CNT_4 0x5C83A4
+
+#define mmDMA6_QM_CP_STS_0 0x5C83A8
+
+#define mmDMA6_QM_CP_STS_1 0x5C83AC
+
+#define mmDMA6_QM_CP_STS_2 0x5C83B0
+
+#define mmDMA6_QM_CP_STS_3 0x5C83B4
+
+#define mmDMA6_QM_CP_STS_4 0x5C83B8
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_0 0x5C83BC
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_1 0x5C83C0
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_2 0x5C83C4
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_3 0x5C83C8
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_4 0x5C83CC
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_0 0x5C83D0
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_1 0x5C83D4
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_2 0x5C83D8
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_3 0x5C83DC
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_4 0x5C83E0
+
+#define mmDMA6_QM_CP_BARRIER_CFG_0 0x5C83F4
+
+#define mmDMA6_QM_CP_BARRIER_CFG_1 0x5C83F8
+
+#define mmDMA6_QM_CP_BARRIER_CFG_2 0x5C83FC
+
+#define mmDMA6_QM_CP_BARRIER_CFG_3 0x5C8400
+
+#define mmDMA6_QM_CP_BARRIER_CFG_4 0x5C8404
+
+#define mmDMA6_QM_CP_DBG_0_0 0x5C8408
+
+#define mmDMA6_QM_CP_DBG_0_1 0x5C840C
+
+#define mmDMA6_QM_CP_DBG_0_2 0x5C8410
+
+#define mmDMA6_QM_CP_DBG_0_3 0x5C8414
+
+#define mmDMA6_QM_CP_DBG_0_4 0x5C8418
+
+#define mmDMA6_QM_CP_ARUSER_31_11_0 0x5C841C
+
+#define mmDMA6_QM_CP_ARUSER_31_11_1 0x5C8420
+
+#define mmDMA6_QM_CP_ARUSER_31_11_2 0x5C8424
+
+#define mmDMA6_QM_CP_ARUSER_31_11_3 0x5C8428
+
+#define mmDMA6_QM_CP_ARUSER_31_11_4 0x5C842C
+
+#define mmDMA6_QM_CP_AWUSER_31_11_0 0x5C8430
+
+#define mmDMA6_QM_CP_AWUSER_31_11_1 0x5C8434
+
+#define mmDMA6_QM_CP_AWUSER_31_11_2 0x5C8438
+
+#define mmDMA6_QM_CP_AWUSER_31_11_3 0x5C843C
+
+#define mmDMA6_QM_CP_AWUSER_31_11_4 0x5C8440
+
+#define mmDMA6_QM_ARB_CFG_0 0x5C8A00
+
+#define mmDMA6_QM_ARB_CHOISE_Q_PUSH 0x5C8A04
+
+#define mmDMA6_QM_ARB_WRR_WEIGHT_0 0x5C8A08
+
+#define mmDMA6_QM_ARB_WRR_WEIGHT_1 0x5C8A0C
+
+#define mmDMA6_QM_ARB_WRR_WEIGHT_2 0x5C8A10
+
+#define mmDMA6_QM_ARB_WRR_WEIGHT_3 0x5C8A14
+
+#define mmDMA6_QM_ARB_CFG_1 0x5C8A18
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_0 0x5C8A20
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_1 0x5C8A24
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_2 0x5C8A28
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_3 0x5C8A2C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_4 0x5C8A30
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_5 0x5C8A34
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_6 0x5C8A38
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_7 0x5C8A3C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_8 0x5C8A40
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_9 0x5C8A44
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_10 0x5C8A48
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_11 0x5C8A4C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_12 0x5C8A50
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_13 0x5C8A54
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_14 0x5C8A58
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_15 0x5C8A5C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_16 0x5C8A60
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_17 0x5C8A64
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_18 0x5C8A68
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_19 0x5C8A6C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_20 0x5C8A70
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_21 0x5C8A74
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_22 0x5C8A78
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_23 0x5C8A7C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_24 0x5C8A80
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_25 0x5C8A84
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_26 0x5C8A88
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_27 0x5C8A8C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_28 0x5C8A90
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_29 0x5C8A94
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_30 0x5C8A98
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_31 0x5C8A9C
+
+#define mmDMA6_QM_ARB_MST_CRED_INC 0x5C8AA0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x5C8AA4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x5C8AA8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x5C8AAC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x5C8AB0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x5C8AB4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x5C8AB8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x5C8ABC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x5C8AC0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x5C8AC4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x5C8AC8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x5C8ACC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x5C8AD0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x5C8AD4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x5C8AD8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x5C8ADC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x5C8AE0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x5C8AE4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x5C8AE8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x5C8AEC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x5C8AF0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x5C8AF4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x5C8AF8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x5C8AFC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x5C8B00
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x5C8B04
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x5C8B08
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x5C8B0C
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x5C8B10
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x5C8B14
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x5C8B18
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x5C8B1C
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x5C8B20
+
+#define mmDMA6_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x5C8B28
+
+#define mmDMA6_QM_ARB_MST_SLAVE_EN 0x5C8B2C
+
+#define mmDMA6_QM_ARB_MST_QUIET_PER 0x5C8B34
+
+#define mmDMA6_QM_ARB_SLV_CHOISE_WDT 0x5C8B38
+
+#define mmDMA6_QM_ARB_SLV_ID 0x5C8B3C
+
+#define mmDMA6_QM_ARB_MSG_MAX_INFLIGHT 0x5C8B44
+
+#define mmDMA6_QM_ARB_MSG_AWUSER_31_11 0x5C8B48
+
+#define mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP 0x5C8B4C
+
+#define mmDMA6_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x5C8B50
+
+#define mmDMA6_QM_ARB_BASE_LO 0x5C8B54
+
+#define mmDMA6_QM_ARB_BASE_HI 0x5C8B58
+
+#define mmDMA6_QM_ARB_STATE_STS 0x5C8B80
+
+#define mmDMA6_QM_ARB_CHOISE_FULLNESS_STS 0x5C8B84
+
+#define mmDMA6_QM_ARB_MSG_STS 0x5C8B88
+
+#define mmDMA6_QM_ARB_SLV_CHOISE_Q_HEAD 0x5C8B8C
+
+#define mmDMA6_QM_ARB_ERR_CAUSE 0x5C8B9C
+
+#define mmDMA6_QM_ARB_ERR_MSG_EN 0x5C8BA0
+
+#define mmDMA6_QM_ARB_ERR_STS_DRP 0x5C8BA8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_0 0x5C8BB0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_1 0x5C8BB4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_2 0x5C8BB8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_3 0x5C8BBC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_4 0x5C8BC0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_5 0x5C8BC4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_6 0x5C8BC8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_7 0x5C8BCC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_8 0x5C8BD0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_9 0x5C8BD4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_10 0x5C8BD8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_11 0x5C8BDC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_12 0x5C8BE0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_13 0x5C8BE4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_14 0x5C8BE8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_15 0x5C8BEC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_16 0x5C8BF0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_17 0x5C8BF4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_18 0x5C8BF8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_19 0x5C8BFC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_20 0x5C8C00
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_21 0x5C8C04
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_22 0x5C8C08
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_23 0x5C8C0C
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_24 0x5C8C10
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_25 0x5C8C14
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_26 0x5C8C18
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_27 0x5C8C1C
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_28 0x5C8C20
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_29 0x5C8C24
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_30 0x5C8C28
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_31 0x5C8C2C
+
+#define mmDMA6_QM_CGM_CFG 0x5C8C70
+
+#define mmDMA6_QM_CGM_STS 0x5C8C74
+
+#define mmDMA6_QM_CGM_CFG1 0x5C8C78
+
+#define mmDMA6_QM_LOCAL_RANGE_BASE 0x5C8C80
+
+#define mmDMA6_QM_LOCAL_RANGE_SIZE 0x5C8C84
+
+#define mmDMA6_QM_CSMR_STRICT_PRIO_CFG 0x5C8C90
+
+#define mmDMA6_QM_HBW_RD_RATE_LIM_CFG_1 0x5C8C94
+
+#define mmDMA6_QM_LBW_WR_RATE_LIM_CFG_0 0x5C8C98
+
+#define mmDMA6_QM_LBW_WR_RATE_LIM_CFG_1 0x5C8C9C
+
+#define mmDMA6_QM_HBW_RD_RATE_LIM_CFG_0 0x5C8CA0
+
+#define mmDMA6_QM_GLBL_AXCACHE 0x5C8CA4
+
+#define mmDMA6_QM_IND_GW_APB_CFG 0x5C8CB0
+
+#define mmDMA6_QM_IND_GW_APB_WDATA 0x5C8CB4
+
+#define mmDMA6_QM_IND_GW_APB_RDATA 0x5C8CB8
+
+#define mmDMA6_QM_IND_GW_APB_STATUS 0x5C8CBC
+
+#define mmDMA6_QM_GLBL_ERR_ADDR_LO 0x5C8CD0
+
+#define mmDMA6_QM_GLBL_ERR_ADDR_HI 0x5C8CD4
+
+#define mmDMA6_QM_GLBL_ERR_WDATA 0x5C8CD8
+
+#define mmDMA6_QM_GLBL_MEM_INIT_BUSY 0x5C8D00
+
+#endif /* ASIC_REG_DMA6_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
new file mode 100644
index 000000000000..8dd705d20195
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA7_CORE_REGS_H_
+#define ASIC_REG_DMA7_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA7_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA7_CORE_CFG_0 0x5E0000
+
+#define mmDMA7_CORE_CFG_1 0x5E0004
+
+#define mmDMA7_CORE_LBW_MAX_OUTSTAND 0x5E0008
+
+#define mmDMA7_CORE_SRC_BASE_LO 0x5E0014
+
+#define mmDMA7_CORE_SRC_BASE_HI 0x5E0018
+
+#define mmDMA7_CORE_DST_BASE_LO 0x5E001C
+
+#define mmDMA7_CORE_DST_BASE_HI 0x5E0020
+
+#define mmDMA7_CORE_SRC_TSIZE_1 0x5E002C
+
+#define mmDMA7_CORE_SRC_STRIDE_1 0x5E0030
+
+#define mmDMA7_CORE_SRC_TSIZE_2 0x5E0034
+
+#define mmDMA7_CORE_SRC_STRIDE_2 0x5E0038
+
+#define mmDMA7_CORE_SRC_TSIZE_3 0x5E003C
+
+#define mmDMA7_CORE_SRC_STRIDE_3 0x5E0040
+
+#define mmDMA7_CORE_SRC_TSIZE_4 0x5E0044
+
+#define mmDMA7_CORE_SRC_STRIDE_4 0x5E0048
+
+#define mmDMA7_CORE_SRC_TSIZE_0 0x5E004C
+
+#define mmDMA7_CORE_DST_TSIZE_1 0x5E0054
+
+#define mmDMA7_CORE_DST_STRIDE_1 0x5E0058
+
+#define mmDMA7_CORE_DST_TSIZE_2 0x5E005C
+
+#define mmDMA7_CORE_DST_STRIDE_2 0x5E0060
+
+#define mmDMA7_CORE_DST_TSIZE_3 0x5E0064
+
+#define mmDMA7_CORE_DST_STRIDE_3 0x5E0068
+
+#define mmDMA7_CORE_DST_TSIZE_4 0x5E006C
+
+#define mmDMA7_CORE_DST_STRIDE_4 0x5E0070
+
+#define mmDMA7_CORE_DST_TSIZE_0 0x5E0074
+
+#define mmDMA7_CORE_COMMIT 0x5E0078
+
+#define mmDMA7_CORE_WR_COMP_WDATA 0x5E007C
+
+#define mmDMA7_CORE_WR_COMP_ADDR_LO 0x5E0080
+
+#define mmDMA7_CORE_WR_COMP_ADDR_HI 0x5E0084
+
+#define mmDMA7_CORE_WR_COMP_AWUSER_31_11 0x5E0088
+
+#define mmDMA7_CORE_TE_NUMROWS 0x5E0094
+
+#define mmDMA7_CORE_PROT 0x5E00B8
+
+#define mmDMA7_CORE_SECURE_PROPS 0x5E00F0
+
+#define mmDMA7_CORE_NON_SECURE_PROPS 0x5E00F4
+
+#define mmDMA7_CORE_RD_MAX_OUTSTAND 0x5E0100
+
+#define mmDMA7_CORE_RD_MAX_SIZE 0x5E0104
+
+#define mmDMA7_CORE_RD_ARCACHE 0x5E0108
+
+#define mmDMA7_CORE_RD_ARUSER_31_11 0x5E0110
+
+#define mmDMA7_CORE_RD_INFLIGHTS 0x5E0114
+
+#define mmDMA7_CORE_WR_MAX_OUTSTAND 0x5E0120
+
+#define mmDMA7_CORE_WR_MAX_AWID 0x5E0124
+
+#define mmDMA7_CORE_WR_AWCACHE 0x5E0128
+
+#define mmDMA7_CORE_WR_AWUSER_31_11 0x5E0130
+
+#define mmDMA7_CORE_WR_INFLIGHTS 0x5E0134
+
+#define mmDMA7_CORE_RD_RATE_LIM_CFG_0 0x5E0150
+
+#define mmDMA7_CORE_RD_RATE_LIM_CFG_1 0x5E0154
+
+#define mmDMA7_CORE_WR_RATE_LIM_CFG_0 0x5E0158
+
+#define mmDMA7_CORE_WR_RATE_LIM_CFG_1 0x5E015C
+
+#define mmDMA7_CORE_ERR_CFG 0x5E0160
+
+#define mmDMA7_CORE_ERR_CAUSE 0x5E0164
+
+#define mmDMA7_CORE_ERRMSG_ADDR_LO 0x5E0170
+
+#define mmDMA7_CORE_ERRMSG_ADDR_HI 0x5E0174
+
+#define mmDMA7_CORE_ERRMSG_WDATA 0x5E0178
+
+#define mmDMA7_CORE_STS0 0x5E0190
+
+#define mmDMA7_CORE_STS1 0x5E0194
+
+#define mmDMA7_CORE_RD_DBGMEM_ADD 0x5E0200
+
+#define mmDMA7_CORE_RD_DBGMEM_DATA_WR 0x5E0204
+
+#define mmDMA7_CORE_RD_DBGMEM_DATA_RD 0x5E0208
+
+#define mmDMA7_CORE_RD_DBGMEM_CTRL 0x5E020C
+
+#define mmDMA7_CORE_RD_DBGMEM_RC 0x5E0210
+
+#define mmDMA7_CORE_DBG_HBW_AXI_AR_CNT 0x5E0220
+
+#define mmDMA7_CORE_DBG_HBW_AXI_AW_CNT 0x5E0224
+
+#define mmDMA7_CORE_DBG_LBW_AXI_AW_CNT 0x5E0228
+
+#define mmDMA7_CORE_DBG_DESC_CNT 0x5E022C
+
+#define mmDMA7_CORE_DBG_STS 0x5E0230
+
+#define mmDMA7_CORE_DBG_RD_DESC_ID 0x5E0234
+
+#define mmDMA7_CORE_DBG_WR_DESC_ID 0x5E0238
+
+#endif /* ASIC_REG_DMA7_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
new file mode 100644
index 000000000000..d6c631f63e3e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA7_QM_REGS_H_
+#define ASIC_REG_DMA7_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA7_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA7_QM_GLBL_CFG0 0x5E8000
+
+#define mmDMA7_QM_GLBL_CFG1 0x5E8004
+
+#define mmDMA7_QM_GLBL_PROT 0x5E8008
+
+#define mmDMA7_QM_GLBL_ERR_CFG 0x5E800C
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_0 0x5E8010
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_1 0x5E8014
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_2 0x5E8018
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_3 0x5E801C
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_4 0x5E8020
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_0 0x5E8024
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_1 0x5E8028
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_2 0x5E802C
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_3 0x5E8030
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_4 0x5E8034
+
+#define mmDMA7_QM_GLBL_STS0 0x5E8038
+
+#define mmDMA7_QM_GLBL_STS1_0 0x5E8040
+
+#define mmDMA7_QM_GLBL_STS1_1 0x5E8044
+
+#define mmDMA7_QM_GLBL_STS1_2 0x5E8048
+
+#define mmDMA7_QM_GLBL_STS1_3 0x5E804C
+
+#define mmDMA7_QM_GLBL_STS1_4 0x5E8050
+
+#define mmDMA7_QM_GLBL_MSG_EN_0 0x5E8054
+
+#define mmDMA7_QM_GLBL_MSG_EN_1 0x5E8058
+
+#define mmDMA7_QM_GLBL_MSG_EN_2 0x5E805C
+
+#define mmDMA7_QM_GLBL_MSG_EN_3 0x5E8060
+
+#define mmDMA7_QM_GLBL_MSG_EN_4 0x5E8068
+
+#define mmDMA7_QM_PQ_BASE_LO_0 0x5E8070
+
+#define mmDMA7_QM_PQ_BASE_LO_1 0x5E8074
+
+#define mmDMA7_QM_PQ_BASE_LO_2 0x5E8078
+
+#define mmDMA7_QM_PQ_BASE_LO_3 0x5E807C
+
+#define mmDMA7_QM_PQ_BASE_HI_0 0x5E8080
+
+#define mmDMA7_QM_PQ_BASE_HI_1 0x5E8084
+
+#define mmDMA7_QM_PQ_BASE_HI_2 0x5E8088
+
+#define mmDMA7_QM_PQ_BASE_HI_3 0x5E808C
+
+#define mmDMA7_QM_PQ_SIZE_0 0x5E8090
+
+#define mmDMA7_QM_PQ_SIZE_1 0x5E8094
+
+#define mmDMA7_QM_PQ_SIZE_2 0x5E8098
+
+#define mmDMA7_QM_PQ_SIZE_3 0x5E809C
+
+#define mmDMA7_QM_PQ_PI_0 0x5E80A0
+
+#define mmDMA7_QM_PQ_PI_1 0x5E80A4
+
+#define mmDMA7_QM_PQ_PI_2 0x5E80A8
+
+#define mmDMA7_QM_PQ_PI_3 0x5E80AC
+
+#define mmDMA7_QM_PQ_CI_0 0x5E80B0
+
+#define mmDMA7_QM_PQ_CI_1 0x5E80B4
+
+#define mmDMA7_QM_PQ_CI_2 0x5E80B8
+
+#define mmDMA7_QM_PQ_CI_3 0x5E80BC
+
+#define mmDMA7_QM_PQ_CFG0_0 0x5E80C0
+
+#define mmDMA7_QM_PQ_CFG0_1 0x5E80C4
+
+#define mmDMA7_QM_PQ_CFG0_2 0x5E80C8
+
+#define mmDMA7_QM_PQ_CFG0_3 0x5E80CC
+
+#define mmDMA7_QM_PQ_CFG1_0 0x5E80D0
+
+#define mmDMA7_QM_PQ_CFG1_1 0x5E80D4
+
+#define mmDMA7_QM_PQ_CFG1_2 0x5E80D8
+
+#define mmDMA7_QM_PQ_CFG1_3 0x5E80DC
+
+#define mmDMA7_QM_PQ_ARUSER_31_11_0 0x5E80E0
+
+#define mmDMA7_QM_PQ_ARUSER_31_11_1 0x5E80E4
+
+#define mmDMA7_QM_PQ_ARUSER_31_11_2 0x5E80E8
+
+#define mmDMA7_QM_PQ_ARUSER_31_11_3 0x5E80EC
+
+#define mmDMA7_QM_PQ_STS0_0 0x5E80F0
+
+#define mmDMA7_QM_PQ_STS0_1 0x5E80F4
+
+#define mmDMA7_QM_PQ_STS0_2 0x5E80F8
+
+#define mmDMA7_QM_PQ_STS0_3 0x5E80FC
+
+#define mmDMA7_QM_PQ_STS1_0 0x5E8100
+
+#define mmDMA7_QM_PQ_STS1_1 0x5E8104
+
+#define mmDMA7_QM_PQ_STS1_2 0x5E8108
+
+#define mmDMA7_QM_PQ_STS1_3 0x5E810C
+
+#define mmDMA7_QM_CQ_CFG0_0 0x5E8110
+
+#define mmDMA7_QM_CQ_CFG0_1 0x5E8114
+
+#define mmDMA7_QM_CQ_CFG0_2 0x5E8118
+
+#define mmDMA7_QM_CQ_CFG0_3 0x5E811C
+
+#define mmDMA7_QM_CQ_CFG0_4 0x5E8120
+
+#define mmDMA7_QM_CQ_CFG1_0 0x5E8124
+
+#define mmDMA7_QM_CQ_CFG1_1 0x5E8128
+
+#define mmDMA7_QM_CQ_CFG1_2 0x5E812C
+
+#define mmDMA7_QM_CQ_CFG1_3 0x5E8130
+
+#define mmDMA7_QM_CQ_CFG1_4 0x5E8134
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_0 0x5E8138
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_1 0x5E813C
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_2 0x5E8140
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_3 0x5E8144
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_4 0x5E8148
+
+#define mmDMA7_QM_CQ_STS0_0 0x5E814C
+
+#define mmDMA7_QM_CQ_STS0_1 0x5E8150
+
+#define mmDMA7_QM_CQ_STS0_2 0x5E8154
+
+#define mmDMA7_QM_CQ_STS0_3 0x5E8158
+
+#define mmDMA7_QM_CQ_STS0_4 0x5E815C
+
+#define mmDMA7_QM_CQ_STS1_0 0x5E8160
+
+#define mmDMA7_QM_CQ_STS1_1 0x5E8164
+
+#define mmDMA7_QM_CQ_STS1_2 0x5E8168
+
+#define mmDMA7_QM_CQ_STS1_3 0x5E816C
+
+#define mmDMA7_QM_CQ_STS1_4 0x5E8170
+
+#define mmDMA7_QM_CQ_PTR_LO_0 0x5E8174
+
+#define mmDMA7_QM_CQ_PTR_HI_0 0x5E8178
+
+#define mmDMA7_QM_CQ_TSIZE_0 0x5E817C
+
+#define mmDMA7_QM_CQ_CTL_0 0x5E8180
+
+#define mmDMA7_QM_CQ_PTR_LO_1 0x5E8184
+
+#define mmDMA7_QM_CQ_PTR_HI_1 0x5E8188
+
+#define mmDMA7_QM_CQ_TSIZE_1 0x5E818C
+
+#define mmDMA7_QM_CQ_CTL_1 0x5E8190
+
+#define mmDMA7_QM_CQ_PTR_LO_2 0x5E8194
+
+#define mmDMA7_QM_CQ_PTR_HI_2 0x5E8198
+
+#define mmDMA7_QM_CQ_TSIZE_2 0x5E819C
+
+#define mmDMA7_QM_CQ_CTL_2 0x5E81A0
+
+#define mmDMA7_QM_CQ_PTR_LO_3 0x5E81A4
+
+#define mmDMA7_QM_CQ_PTR_HI_3 0x5E81A8
+
+#define mmDMA7_QM_CQ_TSIZE_3 0x5E81AC
+
+#define mmDMA7_QM_CQ_CTL_3 0x5E81B0
+
+#define mmDMA7_QM_CQ_PTR_LO_4 0x5E81B4
+
+#define mmDMA7_QM_CQ_PTR_HI_4 0x5E81B8
+
+#define mmDMA7_QM_CQ_TSIZE_4 0x5E81BC
+
+#define mmDMA7_QM_CQ_CTL_4 0x5E81C0
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_0 0x5E81C4
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_1 0x5E81C8
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_2 0x5E81CC
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_3 0x5E81D0
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_4 0x5E81D4
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_0 0x5E81D8
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_1 0x5E81DC
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_2 0x5E81E0
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_3 0x5E81E4
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_4 0x5E81E8
+
+#define mmDMA7_QM_CQ_TSIZE_STS_0 0x5E81EC
+
+#define mmDMA7_QM_CQ_TSIZE_STS_1 0x5E81F0
+
+#define mmDMA7_QM_CQ_TSIZE_STS_2 0x5E81F4
+
+#define mmDMA7_QM_CQ_TSIZE_STS_3 0x5E81F8
+
+#define mmDMA7_QM_CQ_TSIZE_STS_4 0x5E81FC
+
+#define mmDMA7_QM_CQ_CTL_STS_0 0x5E8200
+
+#define mmDMA7_QM_CQ_CTL_STS_1 0x5E8204
+
+#define mmDMA7_QM_CQ_CTL_STS_2 0x5E8208
+
+#define mmDMA7_QM_CQ_CTL_STS_3 0x5E820C
+
+#define mmDMA7_QM_CQ_CTL_STS_4 0x5E8210
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_0 0x5E8214
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_1 0x5E8218
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_2 0x5E821C
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_3 0x5E8220
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_4 0x5E8224
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_0 0x5E8228
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_1 0x5E822C
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_2 0x5E8230
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_3 0x5E8234
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_4 0x5E8238
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_0 0x5E823C
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_1 0x5E8240
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_2 0x5E8244
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_3 0x5E8248
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_4 0x5E824C
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_0 0x5E8250
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_1 0x5E8254
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_2 0x5E8258
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_3 0x5E825C
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_4 0x5E8260
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_0 0x5E8264
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_1 0x5E8268
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_2 0x5E826C
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_3 0x5E8270
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_4 0x5E8274
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_0 0x5E8278
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_1 0x5E827C
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 0x5E8280
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_3 0x5E8284
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_4 0x5E8288
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_0 0x5E828C
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_1 0x5E8290
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_2 0x5E8294
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_3 0x5E8298
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_4 0x5E829C
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_0 0x5E82A0
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_1 0x5E82A4
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_2 0x5E82A8
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_3 0x5E82AC
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_4 0x5E82B0
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_0 0x5E82B4
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_1 0x5E82B8
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_2 0x5E82BC
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_3 0x5E82C0
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_4 0x5E82C4
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_0 0x5E82C8
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_1 0x5E82CC
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_2 0x5E82D0
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_3 0x5E82D4
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_4 0x5E82D8
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5E82E0
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5E82E4
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5E82E8
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5E82EC
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5E82F0
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5E82F4
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5E82F8
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5E82FC
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x5E8300
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x5E8304
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_0 0x5E8308
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_1 0x5E830C
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_2 0x5E8310
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_3 0x5E8314
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_4 0x5E8318
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_0 0x5E831C
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_1 0x5E8320
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_2 0x5E8324
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_3 0x5E8328
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_4 0x5E832C
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_0 0x5E8330
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_1 0x5E8334
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_2 0x5E8338
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_3 0x5E833C
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_4 0x5E8340
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_0 0x5E8344
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_1 0x5E8348
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_2 0x5E834C
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_3 0x5E8350
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_4 0x5E8354
+
+#define mmDMA7_QM_CP_FENCE0_CNT_0 0x5E8358
+
+#define mmDMA7_QM_CP_FENCE0_CNT_1 0x5E835C
+
+#define mmDMA7_QM_CP_FENCE0_CNT_2 0x5E8360
+
+#define mmDMA7_QM_CP_FENCE0_CNT_3 0x5E8364
+
+#define mmDMA7_QM_CP_FENCE0_CNT_4 0x5E8368
+
+#define mmDMA7_QM_CP_FENCE1_CNT_0 0x5E836C
+
+#define mmDMA7_QM_CP_FENCE1_CNT_1 0x5E8370
+
+#define mmDMA7_QM_CP_FENCE1_CNT_2 0x5E8374
+
+#define mmDMA7_QM_CP_FENCE1_CNT_3 0x5E8378
+
+#define mmDMA7_QM_CP_FENCE1_CNT_4 0x5E837C
+
+#define mmDMA7_QM_CP_FENCE2_CNT_0 0x5E8380
+
+#define mmDMA7_QM_CP_FENCE2_CNT_1 0x5E8384
+
+#define mmDMA7_QM_CP_FENCE2_CNT_2 0x5E8388
+
+#define mmDMA7_QM_CP_FENCE2_CNT_3 0x5E838C
+
+#define mmDMA7_QM_CP_FENCE2_CNT_4 0x5E8390
+
+#define mmDMA7_QM_CP_FENCE3_CNT_0 0x5E8394
+
+#define mmDMA7_QM_CP_FENCE3_CNT_1 0x5E8398
+
+#define mmDMA7_QM_CP_FENCE3_CNT_2 0x5E839C
+
+#define mmDMA7_QM_CP_FENCE3_CNT_3 0x5E83A0
+
+#define mmDMA7_QM_CP_FENCE3_CNT_4 0x5E83A4
+
+#define mmDMA7_QM_CP_STS_0 0x5E83A8
+
+#define mmDMA7_QM_CP_STS_1 0x5E83AC
+
+#define mmDMA7_QM_CP_STS_2 0x5E83B0
+
+#define mmDMA7_QM_CP_STS_3 0x5E83B4
+
+#define mmDMA7_QM_CP_STS_4 0x5E83B8
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_0 0x5E83BC
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_1 0x5E83C0
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_2 0x5E83C4
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_3 0x5E83C8
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_4 0x5E83CC
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_0 0x5E83D0
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_1 0x5E83D4
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_2 0x5E83D8
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_3 0x5E83DC
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_4 0x5E83E0
+
+#define mmDMA7_QM_CP_BARRIER_CFG_0 0x5E83F4
+
+#define mmDMA7_QM_CP_BARRIER_CFG_1 0x5E83F8
+
+#define mmDMA7_QM_CP_BARRIER_CFG_2 0x5E83FC
+
+#define mmDMA7_QM_CP_BARRIER_CFG_3 0x5E8400
+
+#define mmDMA7_QM_CP_BARRIER_CFG_4 0x5E8404
+
+#define mmDMA7_QM_CP_DBG_0_0 0x5E8408
+
+#define mmDMA7_QM_CP_DBG_0_1 0x5E840C
+
+#define mmDMA7_QM_CP_DBG_0_2 0x5E8410
+
+#define mmDMA7_QM_CP_DBG_0_3 0x5E8414
+
+#define mmDMA7_QM_CP_DBG_0_4 0x5E8418
+
+#define mmDMA7_QM_CP_ARUSER_31_11_0 0x5E841C
+
+#define mmDMA7_QM_CP_ARUSER_31_11_1 0x5E8420
+
+#define mmDMA7_QM_CP_ARUSER_31_11_2 0x5E8424
+
+#define mmDMA7_QM_CP_ARUSER_31_11_3 0x5E8428
+
+#define mmDMA7_QM_CP_ARUSER_31_11_4 0x5E842C
+
+#define mmDMA7_QM_CP_AWUSER_31_11_0 0x5E8430
+
+#define mmDMA7_QM_CP_AWUSER_31_11_1 0x5E8434
+
+#define mmDMA7_QM_CP_AWUSER_31_11_2 0x5E8438
+
+#define mmDMA7_QM_CP_AWUSER_31_11_3 0x5E843C
+
+#define mmDMA7_QM_CP_AWUSER_31_11_4 0x5E8440
+
+#define mmDMA7_QM_ARB_CFG_0 0x5E8A00
+
+#define mmDMA7_QM_ARB_CHOISE_Q_PUSH 0x5E8A04
+
+#define mmDMA7_QM_ARB_WRR_WEIGHT_0 0x5E8A08
+
+#define mmDMA7_QM_ARB_WRR_WEIGHT_1 0x5E8A0C
+
+#define mmDMA7_QM_ARB_WRR_WEIGHT_2 0x5E8A10
+
+#define mmDMA7_QM_ARB_WRR_WEIGHT_3 0x5E8A14
+
+#define mmDMA7_QM_ARB_CFG_1 0x5E8A18
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_0 0x5E8A20
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_1 0x5E8A24
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_2 0x5E8A28
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_3 0x5E8A2C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_4 0x5E8A30
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_5 0x5E8A34
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_6 0x5E8A38
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_7 0x5E8A3C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_8 0x5E8A40
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_9 0x5E8A44
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_10 0x5E8A48
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_11 0x5E8A4C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_12 0x5E8A50
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_13 0x5E8A54
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_14 0x5E8A58
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_15 0x5E8A5C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_16 0x5E8A60
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_17 0x5E8A64
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_18 0x5E8A68
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_19 0x5E8A6C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_20 0x5E8A70
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_21 0x5E8A74
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_22 0x5E8A78
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_23 0x5E8A7C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_24 0x5E8A80
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_25 0x5E8A84
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_26 0x5E8A88
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_27 0x5E8A8C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_28 0x5E8A90
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_29 0x5E8A94
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_30 0x5E8A98
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_31 0x5E8A9C
+
+#define mmDMA7_QM_ARB_MST_CRED_INC 0x5E8AA0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x5E8AA4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x5E8AA8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x5E8AAC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x5E8AB0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x5E8AB4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x5E8AB8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x5E8ABC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x5E8AC0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x5E8AC4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x5E8AC8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x5E8ACC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x5E8AD0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x5E8AD4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x5E8AD8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x5E8ADC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x5E8AE0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x5E8AE4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x5E8AE8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x5E8AEC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x5E8AF0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x5E8AF4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x5E8AF8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x5E8AFC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x5E8B00
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x5E8B04
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x5E8B08
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x5E8B0C
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x5E8B10
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x5E8B14
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x5E8B18
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x5E8B1C
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x5E8B20
+
+#define mmDMA7_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x5E8B28
+
+#define mmDMA7_QM_ARB_MST_SLAVE_EN 0x5E8B2C
+
+#define mmDMA7_QM_ARB_MST_QUIET_PER 0x5E8B34
+
+#define mmDMA7_QM_ARB_SLV_CHOISE_WDT 0x5E8B38
+
+#define mmDMA7_QM_ARB_SLV_ID 0x5E8B3C
+
+#define mmDMA7_QM_ARB_MSG_MAX_INFLIGHT 0x5E8B44
+
+#define mmDMA7_QM_ARB_MSG_AWUSER_31_11 0x5E8B48
+
+#define mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP 0x5E8B4C
+
+#define mmDMA7_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x5E8B50
+
+#define mmDMA7_QM_ARB_BASE_LO 0x5E8B54
+
+#define mmDMA7_QM_ARB_BASE_HI 0x5E8B58
+
+#define mmDMA7_QM_ARB_STATE_STS 0x5E8B80
+
+#define mmDMA7_QM_ARB_CHOISE_FULLNESS_STS 0x5E8B84
+
+#define mmDMA7_QM_ARB_MSG_STS 0x5E8B88
+
+#define mmDMA7_QM_ARB_SLV_CHOISE_Q_HEAD 0x5E8B8C
+
+#define mmDMA7_QM_ARB_ERR_CAUSE 0x5E8B9C
+
+#define mmDMA7_QM_ARB_ERR_MSG_EN 0x5E8BA0
+
+#define mmDMA7_QM_ARB_ERR_STS_DRP 0x5E8BA8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_0 0x5E8BB0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_1 0x5E8BB4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_2 0x5E8BB8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_3 0x5E8BBC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_4 0x5E8BC0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_5 0x5E8BC4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_6 0x5E8BC8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_7 0x5E8BCC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_8 0x5E8BD0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_9 0x5E8BD4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_10 0x5E8BD8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_11 0x5E8BDC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_12 0x5E8BE0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_13 0x5E8BE4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_14 0x5E8BE8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_15 0x5E8BEC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_16 0x5E8BF0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_17 0x5E8BF4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_18 0x5E8BF8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_19 0x5E8BFC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_20 0x5E8C00
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_21 0x5E8C04
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_22 0x5E8C08
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_23 0x5E8C0C
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_24 0x5E8C10
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_25 0x5E8C14
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_26 0x5E8C18
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_27 0x5E8C1C
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_28 0x5E8C20
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_29 0x5E8C24
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_30 0x5E8C28
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_31 0x5E8C2C
+
+#define mmDMA7_QM_CGM_CFG 0x5E8C70
+
+#define mmDMA7_QM_CGM_STS 0x5E8C74
+
+#define mmDMA7_QM_CGM_CFG1 0x5E8C78
+
+#define mmDMA7_QM_LOCAL_RANGE_BASE 0x5E8C80
+
+#define mmDMA7_QM_LOCAL_RANGE_SIZE 0x5E8C84
+
+#define mmDMA7_QM_CSMR_STRICT_PRIO_CFG 0x5E8C90
+
+#define mmDMA7_QM_HBW_RD_RATE_LIM_CFG_1 0x5E8C94
+
+#define mmDMA7_QM_LBW_WR_RATE_LIM_CFG_0 0x5E8C98
+
+#define mmDMA7_QM_LBW_WR_RATE_LIM_CFG_1 0x5E8C9C
+
+#define mmDMA7_QM_HBW_RD_RATE_LIM_CFG_0 0x5E8CA0
+
+#define mmDMA7_QM_GLBL_AXCACHE 0x5E8CA4
+
+#define mmDMA7_QM_IND_GW_APB_CFG 0x5E8CB0
+
+#define mmDMA7_QM_IND_GW_APB_WDATA 0x5E8CB4
+
+#define mmDMA7_QM_IND_GW_APB_RDATA 0x5E8CB8
+
+#define mmDMA7_QM_IND_GW_APB_STATUS 0x5E8CBC
+
+#define mmDMA7_QM_GLBL_ERR_ADDR_LO 0x5E8CD0
+
+#define mmDMA7_QM_GLBL_ERR_ADDR_HI 0x5E8CD4
+
+#define mmDMA7_QM_GLBL_ERR_WDATA 0x5E8CD8
+
+#define mmDMA7_QM_GLBL_MEM_INIT_BUSY 0x5E8D00
+
+#endif /* ASIC_REG_DMA7_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
new file mode 100644
index 000000000000..8c1c72df4469
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_N_DOWN_CH0_REGS_H_
+#define ASIC_REG_DMA_IF_E_N_DOWN_CH0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_N_DOWN_CH0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_N_DOWN_CH0_PERM_SEL 0x4E1108
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_0 0x4E1114
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_1 0x4E1118
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_2 0x4E111C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_3 0x4E1120
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_4 0x4E1124
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_5 0x4E1128
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_6 0x4E112C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_7 0x4E1130
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_8 0x4E1134
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_9 0x4E1138
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_10 0x4E113C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_11 0x4E1140
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_12 0x4E1144
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_13 0x4E1148
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_14 0x4E114C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_15 0x4E1150
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_16 0x4E1154
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_17 0x4E1158
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_18 0x4E115C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_19 0x4E1160
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_20 0x4E1164
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_21 0x4E1168
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_22 0x4E116C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_23 0x4E1170
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_24 0x4E1174
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_25 0x4E1178
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_26 0x4E117C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_27 0x4E1180
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_0 0x4E1184
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_1 0x4E1188
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_2 0x4E118C
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_3 0x4E1190
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_4 0x4E1194
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_5 0x4E1198
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_6 0x4E119C
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_7 0x4E11A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_8 0x4E11A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_9 0x4E11A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_10 0x4E11AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_11 0x4E11B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_12 0x4E11B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_13 0x4E11B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_14 0x4E11BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_SCRAM_SRAM_EN 0x4E126C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_EN 0x4E1274
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_SAT 0x4E1278
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_RST 0x4E127C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_TIMEOUT 0x4E1280
+
+#define mmDMA_IF_E_N_DOWN_CH0_SCRAM_HBM_EN 0x4E1284
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_EN 0x4E1288
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_SAT 0x4E128C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_RST 0x4E1290
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_TIMEOUT 0x4E1294
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_EN 0x4E129C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_SAT 0x4E12A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RST 0x4E12A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_TIMEOUT 0x4E12AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RED 0x4E12B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_EN 0x4E12EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_EN 0x4E12F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_WR_SIZE 0x4E12F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_WR_SIZE 0x4E12F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x4E1404
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x4E1408
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x4E140C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x4E1410
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x4E1414
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x4E1418
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_RD_SIZE 0x4E141C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_RD_SIZE 0x4E1420
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x4E1424
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x4E1428
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x4E142C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x4E1430
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x4E1434
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x4E1438
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_0 0x4E1450
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_1 0x4E1454
+
+#define mmDMA_IF_E_N_DOWN_CH0_NON_LIN_EN 0x4E1480
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_0 0x4E1500
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_1 0x4E1504
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_2 0x4E1508
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_3 0x4E150C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_4 0x4E1510
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_0 0x4E1514
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_1 0x4E1520
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_2 0x4E1524
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_3 0x4E1528
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_4 0x4E152C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_5 0x4E1530
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_6 0x4E1534
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_7 0x4E1538
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_8 0x4E153C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_9 0x4E1540
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_0 0x4E1550
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_1 0x4E1554
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_2 0x4E1558
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_3 0x4E155C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_4 0x4E1560
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_5 0x4E1564
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_6 0x4E1568
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_7 0x4E156C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_8 0x4E1570
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_9 0x4E1574
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_10 0x4E1578
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_11 0x4E157C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_12 0x4E1580
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_13 0x4E1584
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_14 0x4E1588
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_15 0x4E158C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_16 0x4E1590
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_17 0x4E1594
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_18 0x4E1598
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4E15E4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4E15E8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4E15EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4E15F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4E15F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4E15F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4E15FC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x4E1600
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x4E1604
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x4E1608
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x4E160C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x4E1610
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x4E1614
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x4E1618
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x4E161C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x4E1620
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x4E1624
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x4E1628
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x4E162C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x4E1630
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x4E1634
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x4E1638
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x4E163C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x4E1640
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x4E1644
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x4E1648
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x4E164C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x4E1650
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x4E1654
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x4E1658
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x4E165C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x4E1660
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x4E1664
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x4E1668
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x4E166C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x4E1670
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x4E1674
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x4E1678
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x4E167C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x4E1680
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x4E1684
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x4E1688
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x4E168C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x4E1690
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x4E1694
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x4E1698
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x4E169C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4E16A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4E16A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4E16A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4E16AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4E16B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4E16B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4E16B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4E16BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4E16C0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4E16C4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4E16C8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4E16CC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4E16D0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4E16D4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4E16D8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4E16DC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4E16E0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4E16E4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4E16E8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4E16EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4E16F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4E16F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4E16F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4E16FC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x4E1700
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x4E1704
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x4E1708
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x4E170C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x4E1710
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x4E1714
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x4E1718
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x4E171C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x4E1720
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x4E1724
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x4E1728
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x4E172C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x4E1730
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x4E1734
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x4E1738
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x4E173C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x4E1740
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x4E1744
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x4E1748
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x4E174C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x4E1750
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x4E1754
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x4E1758
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x4E175C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x4E1760
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x4E1764
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x4E1768
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x4E176C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x4E1770
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x4E1774
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x4E1778
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x4E177C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x4E1780
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x4E1784
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x4E1788
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x4E178C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x4E1790
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x4E1794
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x4E1798
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x4E179C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4E17A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4E17A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4E17A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4E17AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4E17B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4E17B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4E17B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4E17BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4E17C0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4E17C4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4E17C8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4E17CC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4E17D0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4E17D4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4E17D8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4E17DC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4E17E0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x4E1824
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x4E1828
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x4E182C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x4E1830
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x4E1834
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x4E1838
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x4E183C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x4E1840
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x4E1844
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x4E1848
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x4E184C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x4E1850
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x4E1854
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x4E1858
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x4E185C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x4E1860
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x4E1864
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x4E1868
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x4E186C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x4E1870
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x4E1874
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x4E1878
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x4E187C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x4E1880
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x4E1884
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x4E1888
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x4E188C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x4E1890
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x4E1894
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x4E1898
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x4E189C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4E18A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4E18A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4E18A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4E18AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4E18B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4E18B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4E18B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4E18BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4E18C0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4E18C4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4E18C8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4E18CC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4E18D0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4E18D4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4E18D8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4E18DC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4E18E0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4E18E4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4E18E8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4E18EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4E18F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4E18F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4E18F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4E18FC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x4E1900
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x4E1904
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x4E1908
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x4E190C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x4E1910
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x4E1914
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x4E1918
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x4E191C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x4E1920
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x4E1924
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x4E1928
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x4E192C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x4E1930
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x4E1934
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x4E1938
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x4E193C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x4E1940
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x4E1944
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x4E1948
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x4E194C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x4E1950
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x4E1954
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x4E1958
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x4E195C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x4E1960
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x4E1964
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x4E1968
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x4E196C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x4E1970
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x4E1974
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x4E1978
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x4E197C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x4E1980
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x4E1984
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x4E1988
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x4E198C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x4E1990
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x4E1994
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x4E1998
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x4E199C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4E19A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4E19A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4E19A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4E19AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4E19B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4E19B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4E19B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4E19BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4E19C0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4E19C4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4E19C8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4E19CC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4E19D0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4E19D4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4E19D8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4E19DC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4E19E0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4E19E4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4E19E8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4E19EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4E19F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4E19F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4E19F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4E19FC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x4E1A00
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x4E1A04
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x4E1A08
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x4E1A0C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x4E1A10
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x4E1A14
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x4E1A18
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x4E1A1C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x4E1A20
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AW 0x4E1A64
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AR 0x4E1A68
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_HIT_AW 0x4E1A6C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_HIT_AR 0x4E1A70
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_CFG 0x4E1B64
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_SHIFT 0x4E1B68
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_0 0x4E1B6C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_1 0x4E1B70
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_2 0x4E1B74
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_3 0x4E1B78
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_4 0x4E1B7C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_5 0x4E1B80
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_6 0x4E1B84
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_7 0x4E1B88
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_0 0x4E1BAC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_1 0x4E1BB0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_2 0x4E1BB4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_3 0x4E1BB8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_4 0x4E1BBC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_5 0x4E1BC0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_6 0x4E1BC4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_7 0x4E1BC8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_0 0x4E1BEC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_1 0x4E1BF0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_2 0x4E1BF4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_3 0x4E1BF8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_4 0x4E1BFC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_5 0x4E1C00
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_6 0x4E1C04
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_7 0x4E1C08
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_WDT 0x4E1C2C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x4E1C30
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x4E1C34
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x4E1C38
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x4E1C3C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x4E1C40
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x4E1C44
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x4E1C48
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x4E1C4C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x4E1C50
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x4E1C54
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x4E1C58
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x4E1C5C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x4E1C60
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x4E1C64
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x4E1C68
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x4E1C6C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x4E1C70
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x4E1C74
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x4E1C78
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x4E1C7C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x4E1C80
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x4E1C84
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x4E1C88
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x4E1C8C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x4E1C90
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x4E1C94
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x4E1C98
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x4E1C9C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x4E1CA0
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x4E1CA4
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x4E1CA8
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x4E1CAC
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_0 0x4E1CB0
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_1 0x4E1CB4
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_2 0x4E1CB8
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_3 0x4E1CBC
+
+#endif /* ASIC_REG_DMA_IF_E_N_DOWN_CH0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
new file mode 100644
index 000000000000..b2b593fcec30
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_N_DOWN_CH1_REGS_H_
+#define ASIC_REG_DMA_IF_E_N_DOWN_CH1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_N_DOWN_CH1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_N_DOWN_CH1_PERM_SEL 0x4E2108
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_0 0x4E2114
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_1 0x4E2118
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_2 0x4E211C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_3 0x4E2120
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_4 0x4E2124
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_5 0x4E2128
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_6 0x4E212C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_7 0x4E2130
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_8 0x4E2134
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_9 0x4E2138
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_10 0x4E213C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_11 0x4E2140
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_12 0x4E2144
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_13 0x4E2148
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_14 0x4E214C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_15 0x4E2150
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_16 0x4E2154
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_17 0x4E2158
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_18 0x4E215C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_19 0x4E2160
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_20 0x4E2164
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_21 0x4E2168
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_22 0x4E216C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_23 0x4E2170
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_24 0x4E2174
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_25 0x4E2178
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_26 0x4E217C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_27 0x4E2180
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_0 0x4E2184
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_1 0x4E2188
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_2 0x4E218C
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_3 0x4E2190
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_4 0x4E2194
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_5 0x4E2198
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_6 0x4E219C
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_7 0x4E21A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_8 0x4E21A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_9 0x4E21A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_10 0x4E21AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_11 0x4E21B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_12 0x4E21B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_13 0x4E21B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_14 0x4E21BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_SCRAM_SRAM_EN 0x4E226C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_EN 0x4E2274
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_SAT 0x4E2278
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_RST 0x4E227C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_TIMEOUT 0x4E2280
+
+#define mmDMA_IF_E_N_DOWN_CH1_SCRAM_HBM_EN 0x4E2284
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_EN 0x4E2288
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_SAT 0x4E228C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_RST 0x4E2290
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_TIMEOUT 0x4E2294
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_EN 0x4E229C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_SAT 0x4E22A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RST 0x4E22A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_TIMEOUT 0x4E22AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RED 0x4E22B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_EN 0x4E22EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_EN 0x4E22F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_WR_SIZE 0x4E22F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_WR_SIZE 0x4E22F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4E2404
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4E2408
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4E240C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4E2410
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4E2414
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4E2418
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_RD_SIZE 0x4E241C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_RD_SIZE 0x4E2420
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4E2424
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4E2428
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4E242C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4E2430
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4E2434
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4E2438
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_0 0x4E2450
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_1 0x4E2454
+
+#define mmDMA_IF_E_N_DOWN_CH1_NON_LIN_EN 0x4E2480
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_0 0x4E2500
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_1 0x4E2504
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_2 0x4E2508
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_3 0x4E250C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_4 0x4E2510
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_0 0x4E2514
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_1 0x4E2520
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_2 0x4E2524
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_3 0x4E2528
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_4 0x4E252C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_5 0x4E2530
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_6 0x4E2534
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_7 0x4E2538
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_8 0x4E253C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_9 0x4E2540
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_0 0x4E2550
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_1 0x4E2554
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_2 0x4E2558
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_3 0x4E255C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_4 0x4E2560
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_5 0x4E2564
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_6 0x4E2568
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_7 0x4E256C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_8 0x4E2570
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_9 0x4E2574
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_10 0x4E2578
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_11 0x4E257C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_12 0x4E2580
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_13 0x4E2584
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_14 0x4E2588
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_15 0x4E258C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_16 0x4E2590
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_17 0x4E2594
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_18 0x4E2598
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4E25E4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4E25E8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4E25EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4E25F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4E25F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4E25F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4E25FC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4E2600
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4E2604
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4E2608
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4E260C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4E2610
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4E2614
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4E2618
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4E261C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4E2620
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4E2624
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4E2628
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4E262C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4E2630
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4E2634
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4E2638
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4E263C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4E2640
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4E2644
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4E2648
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4E264C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4E2650
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4E2654
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4E2658
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4E265C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4E2660
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4E2664
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4E2668
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4E266C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4E2670
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4E2674
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4E2678
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4E267C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4E2680
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4E2684
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4E2688
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4E268C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4E2690
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4E2694
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4E2698
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4E269C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4E26A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4E26A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4E26A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4E26AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4E26B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4E26B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4E26B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4E26BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4E26C0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4E26C4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4E26C8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4E26CC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4E26D0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4E26D4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4E26D8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4E26DC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4E26E0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4E26E4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4E26E8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4E26EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4E26F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4E26F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4E26F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4E26FC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4E2700
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4E2704
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4E2708
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4E270C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4E2710
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4E2714
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4E2718
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4E271C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4E2720
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4E2724
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4E2728
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4E272C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4E2730
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4E2734
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4E2738
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4E273C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4E2740
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4E2744
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4E2748
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4E274C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4E2750
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4E2754
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4E2758
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4E275C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4E2760
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4E2764
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4E2768
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4E276C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4E2770
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4E2774
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4E2778
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4E277C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4E2780
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4E2784
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4E2788
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4E278C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4E2790
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4E2794
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4E2798
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4E279C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4E27A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4E27A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4E27A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4E27AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4E27B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4E27B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4E27B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4E27BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4E27C0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4E27C4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4E27C8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4E27CC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4E27D0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4E27D4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4E27D8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4E27DC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4E27E0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4E2824
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4E2828
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4E282C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4E2830
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4E2834
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4E2838
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4E283C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4E2840
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4E2844
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4E2848
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4E284C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4E2850
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4E2854
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4E2858
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4E285C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4E2860
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4E2864
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4E2868
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4E286C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4E2870
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4E2874
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4E2878
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4E287C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4E2880
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4E2884
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4E2888
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4E288C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4E2890
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4E2894
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4E2898
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4E289C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4E28A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4E28A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4E28A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4E28AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4E28B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4E28B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4E28B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4E28BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4E28C0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4E28C4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4E28C8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4E28CC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4E28D0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4E28D4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4E28D8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4E28DC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4E28E0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4E28E4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4E28E8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4E28EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4E28F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4E28F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4E28F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4E28FC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4E2900
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4E2904
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4E2908
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4E290C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4E2910
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4E2914
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4E2918
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4E291C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4E2920
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4E2924
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4E2928
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4E292C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4E2930
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4E2934
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4E2938
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4E293C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4E2940
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4E2944
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4E2948
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4E294C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4E2950
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4E2954
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4E2958
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4E295C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4E2960
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4E2964
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4E2968
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4E296C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4E2970
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4E2974
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4E2978
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4E297C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4E2980
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4E2984
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4E2988
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4E298C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4E2990
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4E2994
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4E2998
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4E299C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4E29A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4E29A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4E29A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4E29AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4E29B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4E29B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4E29B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4E29BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4E29C0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4E29C4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4E29C8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4E29CC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4E29D0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4E29D4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4E29D8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4E29DC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4E29E0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4E29E4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4E29E8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4E29EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4E29F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4E29F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4E29F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4E29FC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4E2A00
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4E2A04
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4E2A08
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4E2A0C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4E2A10
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4E2A14
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4E2A18
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4E2A1C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4E2A20
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AW 0x4E2A64
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AR 0x4E2A68
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4E2A6C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4E2A70
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_CFG 0x4E2B64
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_SHIFT 0x4E2B68
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4E2B6C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4E2B70
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4E2B74
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4E2B78
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4E2B7C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4E2B80
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4E2B84
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4E2B88
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_0 0x4E2BAC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_1 0x4E2BB0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_2 0x4E2BB4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_3 0x4E2BB8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_4 0x4E2BBC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_5 0x4E2BC0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_6 0x4E2BC4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_7 0x4E2BC8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_0 0x4E2BEC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_1 0x4E2BF0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_2 0x4E2BF4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_3 0x4E2BF8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_4 0x4E2BFC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_5 0x4E2C00
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_6 0x4E2C04
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_7 0x4E2C08
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_WDT 0x4E2C2C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4E2C30
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4E2C34
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4E2C38
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4E2C3C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4E2C40
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4E2C44
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4E2C48
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4E2C4C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4E2C50
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4E2C54
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4E2C58
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4E2C5C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4E2C60
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4E2C64
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4E2C68
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4E2C6C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4E2C70
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4E2C74
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4E2C78
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4E2C7C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4E2C80
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4E2C84
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4E2C88
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4E2C8C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4E2C90
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4E2C94
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4E2C98
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4E2C9C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4E2CA0
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4E2CA4
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4E2CA8
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4E2CAC
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_0 0x4E2CB0
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_1 0x4E2CB4
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_2 0x4E2CB8
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_3 0x4E2CBC
+
+#endif /* ASIC_REG_DMA_IF_E_N_DOWN_CH1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
new file mode 100644
index 000000000000..8a10c6a76156
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_N_REGS_H_
+#define ASIC_REG_DMA_IF_E_N_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_N (Prototype: DMA_IF)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_N_HBM0_WR_CRED_CNT 0x4E0000
+
+#define mmDMA_IF_E_N_HBM1_WR_CRED_CNT 0x4E0004
+
+#define mmDMA_IF_E_N_HBM0_RD_CRED_CNT 0x4E0008
+
+#define mmDMA_IF_E_N_HBM1_RD_CRED_CNT 0x4E000C
+
+#define mmDMA_IF_E_N_HBM_LIMITER_0 0x4E0030
+
+#define mmDMA_IF_E_N_HBM_LIMITER_1 0x4E0034
+
+#define mmDMA_IF_E_N_HBM_LIMITER_2 0x4E0038
+
+#define mmDMA_IF_E_N_HBM_LIMITER_3 0x4E003C
+
+#define mmDMA_IF_E_N_HBM_ALMOST_EN_0 0x4E0040
+
+#define mmDMA_IF_E_N_HBM_ALMOST_EN_1 0x4E0044
+
+#define mmDMA_IF_E_N_HBM_CRED_EN_0 0x4E0050
+
+#define mmDMA_IF_E_N_HBM_CRED_EN_1 0x4E0054
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_0 0x4E0100
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_1 0x4E0104
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_2 0x4E0108
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_3 0x4E010C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_4 0x4E0110
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_5 0x4E0114
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_6 0x4E0118
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_7 0x4E011C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_8 0x4E0120
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_9 0x4E0124
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_10 0x4E0128
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_11 0x4E012C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_12 0x4E0130
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_13 0x4E0134
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_14 0x4E0138
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_15 0x4E013C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_0 0x4E0140
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_1 0x4E0144
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_2 0x4E0148
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_3 0x4E014C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_4 0x4E0150
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_5 0x4E0154
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_6 0x4E0158
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_7 0x4E015C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_8 0x4E0160
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_9 0x4E0164
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_10 0x4E0168
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_11 0x4E016C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_12 0x4E0170
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_13 0x4E0174
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_14 0x4E0178
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_15 0x4E017C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_0 0x4E0180
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_1 0x4E0184
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_2 0x4E0188
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_3 0x4E018C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_4 0x4E0190
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_5 0x4E0194
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_6 0x4E0198
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_7 0x4E019C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_8 0x4E01A0
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_9 0x4E01A4
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_10 0x4E01A8
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_11 0x4E01AC
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_12 0x4E01B0
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_13 0x4E01B4
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_14 0x4E01B8
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_15 0x4E01BC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_0 0x4E01C0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_1 0x4E01C4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_2 0x4E01C8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_3 0x4E01CC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_4 0x4E01D0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_5 0x4E01D4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_6 0x4E01D8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_7 0x4E01DC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_8 0x4E01E0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_9 0x4E01E4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_10 0x4E01E8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_11 0x4E01EC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_12 0x4E01F0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_13 0x4E01F4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_14 0x4E01F8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_15 0x4E01FC
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_0 0x4E0200
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_1 0x4E0204
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_2 0x4E0208
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_3 0x4E020C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_4 0x4E0210
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_5 0x4E0214
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_6 0x4E0218
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_7 0x4E021C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_8 0x4E0220
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_9 0x4E0224
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_10 0x4E0228
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_11 0x4E022C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_12 0x4E0230
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_13 0x4E0234
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_14 0x4E0238
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_15 0x4E023C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_0 0x4E0240
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_1 0x4E0244
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_2 0x4E0248
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_3 0x4E024C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_4 0x4E0250
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_5 0x4E0254
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_6 0x4E0258
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_7 0x4E025C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_8 0x4E0260
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_9 0x4E0264
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_10 0x4E0268
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_11 0x4E026C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_12 0x4E0270
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_13 0x4E0274
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_14 0x4E0278
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_15 0x4E027C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_0 0x4E0280
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_1 0x4E0284
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_2 0x4E0288
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_3 0x4E028C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_4 0x4E0290
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_5 0x4E0294
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_6 0x4E0298
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_7 0x4E029C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_8 0x4E02A0
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_9 0x4E02A4
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_10 0x4E02A8
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_11 0x4E02AC
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_12 0x4E02B0
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_13 0x4E02B4
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_14 0x4E02B8
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_15 0x4E02BC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_0 0x4E02C0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_1 0x4E02C4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_2 0x4E02C8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_3 0x4E02CC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_4 0x4E02D0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_5 0x4E02D4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_6 0x4E02D8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_7 0x4E02DC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_8 0x4E02E0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_9 0x4E02E4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_10 0x4E02E8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_11 0x4E02EC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_12 0x4E02F0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_13 0x4E02F4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_14 0x4E02F8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_15 0x4E02FC
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_0 0x4E0300
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_1 0x4E0304
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_2 0x4E0308
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_3 0x4E030C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_4 0x4E0310
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_5 0x4E0314
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_6 0x4E0318
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_7 0x4E031C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_8 0x4E0320
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_9 0x4E0324
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_10 0x4E0328
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_11 0x4E032C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_12 0x4E0330
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_13 0x4E0334
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_14 0x4E0338
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_15 0x4E033C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_0 0x4E0340
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_1 0x4E0344
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_2 0x4E0348
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_3 0x4E034C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_4 0x4E0350
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_5 0x4E0354
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_6 0x4E0358
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_7 0x4E035C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_8 0x4E0360
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_9 0x4E0364
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_10 0x4E0368
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_11 0x4E036C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_12 0x4E0370
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_13 0x4E0374
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_14 0x4E0378
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_15 0x4E037C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_0 0x4E0380
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_1 0x4E0384
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_2 0x4E0388
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_3 0x4E038C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_4 0x4E0390
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_5 0x4E0394
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_6 0x4E0398
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_7 0x4E039C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_8 0x4E03A0
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_9 0x4E03A4
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_10 0x4E03A8
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_11 0x4E03AC
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_12 0x4E03B0
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_13 0x4E03B4
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_14 0x4E03B8
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_15 0x4E03BC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_0 0x4E03C0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_1 0x4E03C4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_2 0x4E03C8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_3 0x4E03CC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_4 0x4E03D0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_5 0x4E03D4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_6 0x4E03D8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_7 0x4E03DC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_8 0x4E03E0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_9 0x4E03E4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_10 0x4E03E8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_11 0x4E03EC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_12 0x4E03F0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_13 0x4E03F4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_14 0x4E03F8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_15 0x4E03FC
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_0 0x4E0400
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_1 0x4E0404
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_2 0x4E0408
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_3 0x4E040C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_4 0x4E0410
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_5 0x4E0414
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_6 0x4E0418
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_7 0x4E041C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_8 0x4E0420
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_9 0x4E0424
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_10 0x4E0428
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_11 0x4E042C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_12 0x4E0430
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_13 0x4E0434
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_14 0x4E0438
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_15 0x4E043C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_0 0x4E0440
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_1 0x4E0444
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_2 0x4E0448
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_3 0x4E044C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_4 0x4E0450
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_5 0x4E0454
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_6 0x4E0458
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_7 0x4E045C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_8 0x4E0460
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_9 0x4E0464
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_10 0x4E0468
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_11 0x4E046C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_12 0x4E0470
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_13 0x4E0474
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_14 0x4E0478
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_15 0x4E047C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_0 0x4E0480
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_1 0x4E0484
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_2 0x4E0488
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_3 0x4E048C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_4 0x4E0490
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_5 0x4E0494
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_6 0x4E0498
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_7 0x4E049C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_8 0x4E04A0
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_9 0x4E04A4
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_10 0x4E04A8
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_11 0x4E04AC
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_12 0x4E04B0
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_13 0x4E04B4
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_14 0x4E04B8
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_15 0x4E04BC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_0 0x4E04C0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_1 0x4E04C4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_2 0x4E04C8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_3 0x4E04CC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_4 0x4E04D0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_5 0x4E04D4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_6 0x4E04D8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_7 0x4E04DC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_8 0x4E04E0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_9 0x4E04E4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_10 0x4E04E8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_11 0x4E04EC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_12 0x4E04F0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_13 0x4E04F4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_14 0x4E04F8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_15 0x4E04FC
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_0 0x4E0500
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_1 0x4E0504
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_2 0x4E0508
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_3 0x4E050C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_4 0x4E0510
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_5 0x4E0514
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_6 0x4E0518
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_7 0x4E051C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_8 0x4E0520
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_9 0x4E0524
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_10 0x4E0528
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_11 0x4E052C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_12 0x4E0530
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_13 0x4E0534
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_14 0x4E0538
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_15 0x4E053C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_0 0x4E0540
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_1 0x4E0544
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_2 0x4E0548
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_3 0x4E054C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_4 0x4E0550
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_5 0x4E0554
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_6 0x4E0558
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_7 0x4E055C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_8 0x4E0560
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_9 0x4E0564
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_10 0x4E0568
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_11 0x4E056C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_12 0x4E0570
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_13 0x4E0574
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_14 0x4E0578
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_15 0x4E057C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_0 0x4E0580
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_1 0x4E0584
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_2 0x4E0588
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_3 0x4E058C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_4 0x4E0590
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_5 0x4E0594
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_6 0x4E0598
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_7 0x4E059C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_8 0x4E05A0
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_9 0x4E05A4
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_10 0x4E05A8
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_11 0x4E05AC
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_12 0x4E05B0
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_13 0x4E05B4
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_14 0x4E05B8
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_15 0x4E05BC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_0 0x4E05C0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_1 0x4E05C4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_2 0x4E05C8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_3 0x4E05CC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_4 0x4E05D0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_5 0x4E05D4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_6 0x4E05D8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_7 0x4E05DC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_8 0x4E05E0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_9 0x4E05E4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_10 0x4E05E8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_11 0x4E05EC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_12 0x4E05F0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_13 0x4E05F4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_14 0x4E05F8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_15 0x4E05FC
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_0 0x4E0600
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_1 0x4E0604
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_2 0x4E0608
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_3 0x4E060C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_4 0x4E0610
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_5 0x4E0614
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_6 0x4E0618
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_7 0x4E061C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_8 0x4E0620
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_9 0x4E0624
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_10 0x4E0628
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_11 0x4E062C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_12 0x4E0630
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_13 0x4E0634
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_14 0x4E0638
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_15 0x4E063C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_0 0x4E0640
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_1 0x4E0644
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_2 0x4E0648
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_3 0x4E064C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_4 0x4E0650
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_5 0x4E0654
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_6 0x4E0658
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_7 0x4E065C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_8 0x4E0660
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_9 0x4E0664
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_10 0x4E0668
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_11 0x4E066C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_12 0x4E0670
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_13 0x4E0674
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_14 0x4E0678
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_15 0x4E067C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_0 0x4E0680
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_1 0x4E0684
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_2 0x4E0688
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_3 0x4E068C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_4 0x4E0690
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_5 0x4E0694
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_6 0x4E0698
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_7 0x4E069C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_8 0x4E06A0
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_9 0x4E06A4
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_10 0x4E06A8
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_11 0x4E06AC
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_12 0x4E06B0
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_13 0x4E06B4
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_14 0x4E06B8
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_15 0x4E06BC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_0 0x4E06C0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_1 0x4E06C4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_2 0x4E06C8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_3 0x4E06CC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_4 0x4E06D0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_5 0x4E06D4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_6 0x4E06D8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_7 0x4E06DC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_8 0x4E06E0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_9 0x4E06E4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_10 0x4E06E8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_11 0x4E06EC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_12 0x4E06F0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_13 0x4E06F4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_14 0x4E06F8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_15 0x4E06FC
+
+#define mmDMA_IF_E_N_SOB_HIT_RPROT 0x4E0700
+
+#define mmDMA_IF_E_N_SOB_HIT_WPROT 0x4E0704
+
+#define mmDMA_IF_E_N_SOB_HIT_RPRIV 0x4E070C
+
+#define mmDMA_IF_E_N_SOB_HIT_WPRIV 0x4E0710
+
+#define mmDMA_IF_E_N_DMA0_HIT_RPROT 0x4E071C
+
+#define mmDMA_IF_E_N_DMA0_HIT_WPROT 0x4E0720
+
+#define mmDMA_IF_E_N_DMA0_HIT_RPRIV 0x4E0724
+
+#define mmDMA_IF_E_N_DMA0_HIT_WPRIV 0x4E0728
+
+#define mmDMA_IF_E_N_DMA1_HIT_RPROT 0x4E0730
+
+#define mmDMA_IF_E_N_DMA1_HIT_WPROT 0x4E0734
+
+#define mmDMA_IF_E_N_DMA1_HIT_RPRIV 0x4E0738
+
+#define mmDMA_IF_E_N_DMA1_HIT_WPRIV 0x4E073C
+
+#define mmDMA_IF_E_N_HBM_BIN 0x4E0800
+
+#define mmDMA_IF_E_N_MME_BIN 0x4E0804
+
+#define mmDMA_IF_E_N_TPC_BIN 0x4E0808
+
+#define mmDMA_IF_E_N_DMA_BIN 0x4E080C
+
+#define mmDMA_IF_E_N_SOB_CG_EN 0x4E0810
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_0 0x4E0820
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_1 0x4E0824
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_2 0x4E0828
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_3 0x4E082C
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_4 0x4E0830
+
+#define mmDMA_IF_E_N_HBM_MISC 0x4E0834
+
+#endif /* ASIC_REG_DMA_IF_E_N_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
new file mode 100644
index 000000000000..cd61289a1e8a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_S_DOWN_CH0_REGS_H_
+#define ASIC_REG_DMA_IF_E_S_DOWN_CH0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_S_DOWN_CH0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_S_DOWN_CH0_PERM_SEL 0x4A1108
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_0 0x4A1114
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_1 0x4A1118
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_2 0x4A111C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_3 0x4A1120
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_4 0x4A1124
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_5 0x4A1128
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_6 0x4A112C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_7 0x4A1130
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_8 0x4A1134
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_9 0x4A1138
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_10 0x4A113C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_11 0x4A1140
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_12 0x4A1144
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_13 0x4A1148
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_14 0x4A114C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_15 0x4A1150
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_16 0x4A1154
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_17 0x4A1158
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_18 0x4A115C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_19 0x4A1160
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_20 0x4A1164
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_21 0x4A1168
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_22 0x4A116C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_23 0x4A1170
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_24 0x4A1174
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_25 0x4A1178
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_26 0x4A117C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_27 0x4A1180
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_0 0x4A1184
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_1 0x4A1188
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_2 0x4A118C
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_3 0x4A1190
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_4 0x4A1194
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_5 0x4A1198
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_6 0x4A119C
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_7 0x4A11A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_8 0x4A11A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_9 0x4A11A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_10 0x4A11AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_11 0x4A11B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_12 0x4A11B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_13 0x4A11B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_14 0x4A11BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_SCRAM_SRAM_EN 0x4A126C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_EN 0x4A1274
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_SAT 0x4A1278
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_RST 0x4A127C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_TIMEOUT 0x4A1280
+
+#define mmDMA_IF_E_S_DOWN_CH0_SCRAM_HBM_EN 0x4A1284
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_EN 0x4A1288
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_SAT 0x4A128C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_RST 0x4A1290
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_TIMEOUT 0x4A1294
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_EN 0x4A129C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_SAT 0x4A12A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RST 0x4A12A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_TIMEOUT 0x4A12AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RED 0x4A12B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_EN 0x4A12EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_EN 0x4A12F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_WR_SIZE 0x4A12F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_WR_SIZE 0x4A12F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x4A1404
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x4A1408
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x4A140C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x4A1410
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x4A1414
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x4A1418
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_RD_SIZE 0x4A141C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_RD_SIZE 0x4A1420
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x4A1424
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x4A1428
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x4A142C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x4A1430
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x4A1434
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x4A1438
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_0 0x4A1450
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_1 0x4A1454
+
+#define mmDMA_IF_E_S_DOWN_CH0_NON_LIN_EN 0x4A1480
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_0 0x4A1500
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_1 0x4A1504
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_2 0x4A1508
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_3 0x4A150C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_4 0x4A1510
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_0 0x4A1514
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_1 0x4A1520
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_2 0x4A1524
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_3 0x4A1528
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_4 0x4A152C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_5 0x4A1530
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_6 0x4A1534
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_7 0x4A1538
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_8 0x4A153C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_9 0x4A1540
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_0 0x4A1550
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_1 0x4A1554
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_2 0x4A1558
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_3 0x4A155C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_4 0x4A1560
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_5 0x4A1564
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_6 0x4A1568
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_7 0x4A156C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_8 0x4A1570
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_9 0x4A1574
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_10 0x4A1578
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_11 0x4A157C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_12 0x4A1580
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_13 0x4A1584
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_14 0x4A1588
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_15 0x4A158C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_16 0x4A1590
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_17 0x4A1594
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_18 0x4A1598
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4A15E4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4A15E8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4A15EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4A15F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4A15F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4A15F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4A15FC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x4A1600
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x4A1604
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x4A1608
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x4A160C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x4A1610
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x4A1614
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x4A1618
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x4A161C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x4A1620
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x4A1624
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x4A1628
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x4A162C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x4A1630
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x4A1634
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x4A1638
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x4A163C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x4A1640
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x4A1644
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x4A1648
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x4A164C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x4A1650
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x4A1654
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x4A1658
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x4A165C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x4A1660
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x4A1664
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x4A1668
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x4A166C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x4A1670
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x4A1674
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x4A1678
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x4A167C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x4A1680
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x4A1684
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x4A1688
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x4A168C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x4A1690
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x4A1694
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x4A1698
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x4A169C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4A16A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4A16A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4A16A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4A16AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4A16B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4A16B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4A16B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4A16BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4A16C0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4A16C4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4A16C8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4A16CC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4A16D0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4A16D4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4A16D8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4A16DC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4A16E0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4A16E4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4A16E8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4A16EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4A16F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4A16F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4A16F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4A16FC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x4A1700
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x4A1704
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x4A1708
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x4A170C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x4A1710
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x4A1714
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x4A1718
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x4A171C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x4A1720
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x4A1724
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x4A1728
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x4A172C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x4A1730
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x4A1734
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x4A1738
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x4A173C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x4A1740
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x4A1744
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x4A1748
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x4A174C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x4A1750
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x4A1754
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x4A1758
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x4A175C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x4A1760
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x4A1764
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x4A1768
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x4A176C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x4A1770
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x4A1774
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x4A1778
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x4A177C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x4A1780
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x4A1784
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x4A1788
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x4A178C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x4A1790
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x4A1794
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x4A1798
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x4A179C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4A17A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4A17A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4A17A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4A17AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4A17B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4A17B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4A17B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4A17BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4A17C0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4A17C4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4A17C8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4A17CC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4A17D0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4A17D4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4A17D8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4A17DC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4A17E0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x4A1824
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x4A1828
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x4A182C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x4A1830
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x4A1834
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x4A1838
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x4A183C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x4A1840
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x4A1844
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x4A1848
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x4A184C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x4A1850
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x4A1854
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x4A1858
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x4A185C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x4A1860
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x4A1864
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x4A1868
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x4A186C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x4A1870
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x4A1874
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x4A1878
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x4A187C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x4A1880
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x4A1884
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x4A1888
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x4A188C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x4A1890
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x4A1894
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x4A1898
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x4A189C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4A18A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4A18A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4A18A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4A18AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4A18B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4A18B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4A18B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4A18BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4A18C0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4A18C4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4A18C8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4A18CC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4A18D0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4A18D4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4A18D8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4A18DC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4A18E0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4A18E4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4A18E8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4A18EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4A18F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4A18F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4A18F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4A18FC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x4A1900
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x4A1904
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x4A1908
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x4A190C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x4A1910
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x4A1914
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x4A1918
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x4A191C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x4A1920
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x4A1924
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x4A1928
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x4A192C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x4A1930
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x4A1934
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x4A1938
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x4A193C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x4A1940
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x4A1944
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x4A1948
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x4A194C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x4A1950
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x4A1954
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x4A1958
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x4A195C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x4A1960
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x4A1964
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x4A1968
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x4A196C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x4A1970
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x4A1974
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x4A1978
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x4A197C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x4A1980
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x4A1984
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x4A1988
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x4A198C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x4A1990
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x4A1994
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x4A1998
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x4A199C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4A19A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4A19A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4A19A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4A19AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4A19B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4A19B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4A19B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4A19BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4A19C0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4A19C4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4A19C8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4A19CC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4A19D0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4A19D4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4A19D8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4A19DC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4A19E0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4A19E4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4A19E8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4A19EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4A19F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4A19F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4A19F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4A19FC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x4A1A00
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x4A1A04
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x4A1A08
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x4A1A0C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x4A1A10
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x4A1A14
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x4A1A18
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x4A1A1C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x4A1A20
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW 0x4A1A64
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR 0x4A1A68
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_HIT_AW 0x4A1A6C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_HIT_AR 0x4A1A70
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_CFG 0x4A1B64
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_SHIFT 0x4A1B68
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_0 0x4A1B6C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_1 0x4A1B70
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_2 0x4A1B74
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_3 0x4A1B78
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_4 0x4A1B7C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_5 0x4A1B80
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_6 0x4A1B84
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_7 0x4A1B88
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_0 0x4A1BAC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_1 0x4A1BB0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_2 0x4A1BB4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_3 0x4A1BB8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_4 0x4A1BBC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_5 0x4A1BC0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_6 0x4A1BC4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_7 0x4A1BC8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_0 0x4A1BEC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_1 0x4A1BF0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_2 0x4A1BF4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_3 0x4A1BF8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_4 0x4A1BFC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_5 0x4A1C00
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_6 0x4A1C04
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_7 0x4A1C08
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_WDT 0x4A1C2C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x4A1C30
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x4A1C34
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x4A1C38
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x4A1C3C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x4A1C40
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x4A1C44
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x4A1C48
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x4A1C4C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x4A1C50
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x4A1C54
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x4A1C58
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x4A1C5C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x4A1C60
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x4A1C64
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x4A1C68
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x4A1C6C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x4A1C70
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x4A1C74
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x4A1C78
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x4A1C7C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x4A1C80
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x4A1C84
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x4A1C88
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x4A1C8C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x4A1C90
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x4A1C94
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x4A1C98
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x4A1C9C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x4A1CA0
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x4A1CA4
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x4A1CA8
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x4A1CAC
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_0 0x4A1CB0
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_1 0x4A1CB4
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_2 0x4A1CB8
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_3 0x4A1CBC
+
+#endif /* ASIC_REG_DMA_IF_E_S_DOWN_CH0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
new file mode 100644
index 000000000000..3f32370a14c7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_S_DOWN_CH1_REGS_H_
+#define ASIC_REG_DMA_IF_E_S_DOWN_CH1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_S_DOWN_CH1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_S_DOWN_CH1_PERM_SEL 0x4A2108
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_0 0x4A2114
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_1 0x4A2118
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_2 0x4A211C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_3 0x4A2120
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_4 0x4A2124
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_5 0x4A2128
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_6 0x4A212C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_7 0x4A2130
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_8 0x4A2134
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_9 0x4A2138
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_10 0x4A213C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_11 0x4A2140
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_12 0x4A2144
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_13 0x4A2148
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_14 0x4A214C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_15 0x4A2150
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_16 0x4A2154
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_17 0x4A2158
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_18 0x4A215C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_19 0x4A2160
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_20 0x4A2164
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_21 0x4A2168
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_22 0x4A216C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_23 0x4A2170
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_24 0x4A2174
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_25 0x4A2178
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_26 0x4A217C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_27 0x4A2180
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_0 0x4A2184
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_1 0x4A2188
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_2 0x4A218C
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_3 0x4A2190
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_4 0x4A2194
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_5 0x4A2198
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_6 0x4A219C
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_7 0x4A21A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_8 0x4A21A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_9 0x4A21A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_10 0x4A21AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_11 0x4A21B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_12 0x4A21B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_13 0x4A21B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_14 0x4A21BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_SCRAM_SRAM_EN 0x4A226C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_EN 0x4A2274
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_SAT 0x4A2278
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_RST 0x4A227C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_TIMEOUT 0x4A2280
+
+#define mmDMA_IF_E_S_DOWN_CH1_SCRAM_HBM_EN 0x4A2284
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_EN 0x4A2288
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_SAT 0x4A228C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_RST 0x4A2290
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_TIMEOUT 0x4A2294
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_EN 0x4A229C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_SAT 0x4A22A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RST 0x4A22A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_TIMEOUT 0x4A22AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RED 0x4A22B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_EN 0x4A22EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_EN 0x4A22F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_WR_SIZE 0x4A22F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_WR_SIZE 0x4A22F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4A2404
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4A2408
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4A240C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4A2410
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4A2414
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4A2418
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_RD_SIZE 0x4A241C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_RD_SIZE 0x4A2420
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4A2424
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4A2428
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4A242C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4A2430
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4A2434
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4A2438
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_0 0x4A2450
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_1 0x4A2454
+
+#define mmDMA_IF_E_S_DOWN_CH1_NON_LIN_EN 0x4A2480
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_0 0x4A2500
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_1 0x4A2504
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_2 0x4A2508
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_3 0x4A250C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_4 0x4A2510
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_0 0x4A2514
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_1 0x4A2520
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_2 0x4A2524
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_3 0x4A2528
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_4 0x4A252C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_5 0x4A2530
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_6 0x4A2534
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_7 0x4A2538
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_8 0x4A253C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_9 0x4A2540
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_0 0x4A2550
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_1 0x4A2554
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_2 0x4A2558
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_3 0x4A255C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_4 0x4A2560
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_5 0x4A2564
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_6 0x4A2568
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_7 0x4A256C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_8 0x4A2570
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_9 0x4A2574
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_10 0x4A2578
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_11 0x4A257C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_12 0x4A2580
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_13 0x4A2584
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_14 0x4A2588
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_15 0x4A258C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_16 0x4A2590
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_17 0x4A2594
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_18 0x4A2598
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4A25E4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4A25E8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4A25EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4A25F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4A25F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4A25F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4A25FC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4A2600
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4A2604
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4A2608
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4A260C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4A2610
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4A2614
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4A2618
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4A261C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4A2620
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4A2624
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4A2628
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4A262C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4A2630
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4A2634
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4A2638
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4A263C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4A2640
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4A2644
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4A2648
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4A264C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4A2650
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4A2654
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4A2658
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4A265C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4A2660
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4A2664
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4A2668
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4A266C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4A2670
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4A2674
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4A2678
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4A267C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4A2680
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4A2684
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4A2688
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4A268C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4A2690
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4A2694
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4A2698
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4A269C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4A26A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4A26A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4A26A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4A26AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4A26B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4A26B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4A26B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4A26BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4A26C0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4A26C4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4A26C8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4A26CC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4A26D0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4A26D4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4A26D8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4A26DC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4A26E0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4A26E4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4A26E8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4A26EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4A26F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4A26F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4A26F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4A26FC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4A2700
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4A2704
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4A2708
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4A270C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4A2710
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4A2714
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4A2718
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4A271C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4A2720
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4A2724
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4A2728
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4A272C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4A2730
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4A2734
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4A2738
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4A273C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4A2740
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4A2744
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4A2748
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4A274C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4A2750
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4A2754
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4A2758
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4A275C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4A2760
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4A2764
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4A2768
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4A276C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4A2770
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4A2774
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4A2778
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4A277C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4A2780
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4A2784
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4A2788
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4A278C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4A2790
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4A2794
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4A2798
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4A279C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4A27A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4A27A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4A27A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4A27AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4A27B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4A27B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4A27B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4A27BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4A27C0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4A27C4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4A27C8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4A27CC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4A27D0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4A27D4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4A27D8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4A27DC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4A27E0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4A2824
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4A2828
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4A282C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4A2830
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4A2834
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4A2838
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4A283C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4A2840
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4A2844
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4A2848
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4A284C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4A2850
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4A2854
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4A2858
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4A285C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4A2860
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4A2864
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4A2868
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4A286C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4A2870
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4A2874
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4A2878
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4A287C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4A2880
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4A2884
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4A2888
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4A288C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4A2890
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4A2894
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4A2898
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4A289C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4A28A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4A28A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4A28A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4A28AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4A28B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4A28B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4A28B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4A28BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4A28C0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4A28C4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4A28C8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4A28CC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4A28D0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4A28D4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4A28D8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4A28DC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4A28E0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4A28E4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4A28E8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4A28EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4A28F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4A28F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4A28F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4A28FC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4A2900
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4A2904
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4A2908
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4A290C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4A2910
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4A2914
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4A2918
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4A291C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4A2920
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4A2924
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4A2928
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4A292C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4A2930
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4A2934
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4A2938
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4A293C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4A2940
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4A2944
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4A2948
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4A294C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4A2950
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4A2954
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4A2958
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4A295C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4A2960
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4A2964
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4A2968
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4A296C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4A2970
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4A2974
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4A2978
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4A297C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4A2980
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4A2984
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4A2988
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4A298C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4A2990
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4A2994
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4A2998
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4A299C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4A29A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4A29A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4A29A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4A29AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4A29B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4A29B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4A29B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4A29BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4A29C0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4A29C4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4A29C8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4A29CC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4A29D0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4A29D4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4A29D8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4A29DC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4A29E0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4A29E4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4A29E8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4A29EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4A29F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4A29F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4A29F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4A29FC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4A2A00
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4A2A04
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4A2A08
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4A2A0C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4A2A10
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4A2A14
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4A2A18
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4A2A1C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4A2A20
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AW 0x4A2A64
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AR 0x4A2A68
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4A2A6C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4A2A70
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_CFG 0x4A2B64
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_SHIFT 0x4A2B68
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4A2B6C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4A2B70
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4A2B74
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4A2B78
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4A2B7C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4A2B80
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4A2B84
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4A2B88
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_0 0x4A2BAC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_1 0x4A2BB0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_2 0x4A2BB4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_3 0x4A2BB8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_4 0x4A2BBC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_5 0x4A2BC0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_6 0x4A2BC4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_7 0x4A2BC8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_0 0x4A2BEC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_1 0x4A2BF0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_2 0x4A2BF4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_3 0x4A2BF8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_4 0x4A2BFC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_5 0x4A2C00
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_6 0x4A2C04
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_7 0x4A2C08
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_WDT 0x4A2C2C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4A2C30
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4A2C34
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4A2C38
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4A2C3C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4A2C40
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4A2C44
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4A2C48
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4A2C4C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4A2C50
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4A2C54
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4A2C58
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4A2C5C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4A2C60
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4A2C64
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4A2C68
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4A2C6C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4A2C70
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4A2C74
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4A2C78
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4A2C7C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4A2C80
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4A2C84
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4A2C88
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4A2C8C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4A2C90
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4A2C94
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4A2C98
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4A2C9C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4A2CA0
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4A2CA4
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4A2CA8
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4A2CAC
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_0 0x4A2CB0
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_1 0x4A2CB4
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_2 0x4A2CB8
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_3 0x4A2CBC
+
+#endif /* ASIC_REG_DMA_IF_E_S_DOWN_CH1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
new file mode 100644
index 000000000000..78c18da7154b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_S_REGS_H_
+#define ASIC_REG_DMA_IF_E_S_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_S (Prototype: DMA_IF)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_S_HBM0_WR_CRED_CNT 0x4A0000
+
+#define mmDMA_IF_E_S_HBM1_WR_CRED_CNT 0x4A0004
+
+#define mmDMA_IF_E_S_HBM0_RD_CRED_CNT 0x4A0008
+
+#define mmDMA_IF_E_S_HBM1_RD_CRED_CNT 0x4A000C
+
+#define mmDMA_IF_E_S_HBM_LIMITER_0 0x4A0030
+
+#define mmDMA_IF_E_S_HBM_LIMITER_1 0x4A0034
+
+#define mmDMA_IF_E_S_HBM_LIMITER_2 0x4A0038
+
+#define mmDMA_IF_E_S_HBM_LIMITER_3 0x4A003C
+
+#define mmDMA_IF_E_S_HBM_ALMOST_EN_0 0x4A0040
+
+#define mmDMA_IF_E_S_HBM_ALMOST_EN_1 0x4A0044
+
+#define mmDMA_IF_E_S_HBM_CRED_EN_0 0x4A0050
+
+#define mmDMA_IF_E_S_HBM_CRED_EN_1 0x4A0054
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_0 0x4A0100
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_1 0x4A0104
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_2 0x4A0108
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_3 0x4A010C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_4 0x4A0110
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_5 0x4A0114
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_6 0x4A0118
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_7 0x4A011C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_8 0x4A0120
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_9 0x4A0124
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_10 0x4A0128
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_11 0x4A012C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_12 0x4A0130
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_13 0x4A0134
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_14 0x4A0138
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_15 0x4A013C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_0 0x4A0140
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_1 0x4A0144
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_2 0x4A0148
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_3 0x4A014C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_4 0x4A0150
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_5 0x4A0154
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_6 0x4A0158
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_7 0x4A015C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_8 0x4A0160
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_9 0x4A0164
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_10 0x4A0168
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_11 0x4A016C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_12 0x4A0170
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_13 0x4A0174
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_14 0x4A0178
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_15 0x4A017C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_0 0x4A0180
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_1 0x4A0184
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_2 0x4A0188
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_3 0x4A018C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_4 0x4A0190
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_5 0x4A0194
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_6 0x4A0198
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_7 0x4A019C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_8 0x4A01A0
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_9 0x4A01A4
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_10 0x4A01A8
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_11 0x4A01AC
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_12 0x4A01B0
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_13 0x4A01B4
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_14 0x4A01B8
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_15 0x4A01BC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_0 0x4A01C0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_1 0x4A01C4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_2 0x4A01C8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_3 0x4A01CC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_4 0x4A01D0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_5 0x4A01D4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_6 0x4A01D8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_7 0x4A01DC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_8 0x4A01E0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_9 0x4A01E4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_10 0x4A01E8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_11 0x4A01EC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_12 0x4A01F0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_13 0x4A01F4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_14 0x4A01F8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_15 0x4A01FC
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_0 0x4A0200
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_1 0x4A0204
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_2 0x4A0208
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_3 0x4A020C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_4 0x4A0210
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_5 0x4A0214
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_6 0x4A0218
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_7 0x4A021C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_8 0x4A0220
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_9 0x4A0224
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_10 0x4A0228
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_11 0x4A022C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_12 0x4A0230
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_13 0x4A0234
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_14 0x4A0238
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_15 0x4A023C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_0 0x4A0240
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_1 0x4A0244
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_2 0x4A0248
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_3 0x4A024C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_4 0x4A0250
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_5 0x4A0254
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_6 0x4A0258
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_7 0x4A025C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_8 0x4A0260
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_9 0x4A0264
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_10 0x4A0268
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_11 0x4A026C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_12 0x4A0270
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_13 0x4A0274
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_14 0x4A0278
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_15 0x4A027C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_0 0x4A0280
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_1 0x4A0284
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_2 0x4A0288
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_3 0x4A028C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_4 0x4A0290
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_5 0x4A0294
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_6 0x4A0298
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_7 0x4A029C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_8 0x4A02A0
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_9 0x4A02A4
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_10 0x4A02A8
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_11 0x4A02AC
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_12 0x4A02B0
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_13 0x4A02B4
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_14 0x4A02B8
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_15 0x4A02BC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_0 0x4A02C0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_1 0x4A02C4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_2 0x4A02C8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_3 0x4A02CC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_4 0x4A02D0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_5 0x4A02D4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_6 0x4A02D8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_7 0x4A02DC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_8 0x4A02E0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_9 0x4A02E4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_10 0x4A02E8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_11 0x4A02EC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_12 0x4A02F0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_13 0x4A02F4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_14 0x4A02F8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_15 0x4A02FC
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_0 0x4A0300
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_1 0x4A0304
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_2 0x4A0308
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_3 0x4A030C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_4 0x4A0310
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_5 0x4A0314
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_6 0x4A0318
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_7 0x4A031C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_8 0x4A0320
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_9 0x4A0324
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_10 0x4A0328
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_11 0x4A032C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_12 0x4A0330
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_13 0x4A0334
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_14 0x4A0338
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_15 0x4A033C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_0 0x4A0340
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_1 0x4A0344
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_2 0x4A0348
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_3 0x4A034C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_4 0x4A0350
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_5 0x4A0354
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_6 0x4A0358
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_7 0x4A035C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_8 0x4A0360
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_9 0x4A0364
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_10 0x4A0368
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_11 0x4A036C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_12 0x4A0370
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_13 0x4A0374
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_14 0x4A0378
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_15 0x4A037C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_0 0x4A0380
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_1 0x4A0384
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_2 0x4A0388
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_3 0x4A038C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_4 0x4A0390
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_5 0x4A0394
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_6 0x4A0398
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_7 0x4A039C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_8 0x4A03A0
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_9 0x4A03A4
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_10 0x4A03A8
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_11 0x4A03AC
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_12 0x4A03B0
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_13 0x4A03B4
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_14 0x4A03B8
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_15 0x4A03BC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_0 0x4A03C0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_1 0x4A03C4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_2 0x4A03C8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_3 0x4A03CC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_4 0x4A03D0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_5 0x4A03D4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_6 0x4A03D8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_7 0x4A03DC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_8 0x4A03E0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_9 0x4A03E4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_10 0x4A03E8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_11 0x4A03EC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_12 0x4A03F0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_13 0x4A03F4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_14 0x4A03F8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_15 0x4A03FC
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_0 0x4A0400
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_1 0x4A0404
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_2 0x4A0408
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_3 0x4A040C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_4 0x4A0410
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_5 0x4A0414
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_6 0x4A0418
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_7 0x4A041C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_8 0x4A0420
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_9 0x4A0424
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_10 0x4A0428
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_11 0x4A042C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_12 0x4A0430
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_13 0x4A0434
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_14 0x4A0438
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_15 0x4A043C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_0 0x4A0440
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_1 0x4A0444
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_2 0x4A0448
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_3 0x4A044C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_4 0x4A0450
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_5 0x4A0454
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_6 0x4A0458
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_7 0x4A045C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_8 0x4A0460
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_9 0x4A0464
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_10 0x4A0468
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_11 0x4A046C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_12 0x4A0470
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_13 0x4A0474
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_14 0x4A0478
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_15 0x4A047C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_0 0x4A0480
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_1 0x4A0484
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_2 0x4A0488
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_3 0x4A048C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_4 0x4A0490
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_5 0x4A0494
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_6 0x4A0498
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_7 0x4A049C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_8 0x4A04A0
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_9 0x4A04A4
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_10 0x4A04A8
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_11 0x4A04AC
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_12 0x4A04B0
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_13 0x4A04B4
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_14 0x4A04B8
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_15 0x4A04BC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_0 0x4A04C0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_1 0x4A04C4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_2 0x4A04C8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_3 0x4A04CC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_4 0x4A04D0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_5 0x4A04D4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_6 0x4A04D8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_7 0x4A04DC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_8 0x4A04E0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_9 0x4A04E4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_10 0x4A04E8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_11 0x4A04EC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_12 0x4A04F0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_13 0x4A04F4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_14 0x4A04F8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_15 0x4A04FC
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_0 0x4A0500
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_1 0x4A0504
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_2 0x4A0508
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_3 0x4A050C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_4 0x4A0510
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_5 0x4A0514
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_6 0x4A0518
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_7 0x4A051C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_8 0x4A0520
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_9 0x4A0524
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_10 0x4A0528
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_11 0x4A052C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_12 0x4A0530
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_13 0x4A0534
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_14 0x4A0538
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_15 0x4A053C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_0 0x4A0540
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_1 0x4A0544
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_2 0x4A0548
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_3 0x4A054C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_4 0x4A0550
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_5 0x4A0554
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_6 0x4A0558
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_7 0x4A055C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_8 0x4A0560
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_9 0x4A0564
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_10 0x4A0568
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_11 0x4A056C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_12 0x4A0570
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_13 0x4A0574
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_14 0x4A0578
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_15 0x4A057C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_0 0x4A0580
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_1 0x4A0584
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_2 0x4A0588
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_3 0x4A058C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_4 0x4A0590
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_5 0x4A0594
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_6 0x4A0598
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_7 0x4A059C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_8 0x4A05A0
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_9 0x4A05A4
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_10 0x4A05A8
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_11 0x4A05AC
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_12 0x4A05B0
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_13 0x4A05B4
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_14 0x4A05B8
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_15 0x4A05BC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_0 0x4A05C0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_1 0x4A05C4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_2 0x4A05C8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_3 0x4A05CC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_4 0x4A05D0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_5 0x4A05D4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_6 0x4A05D8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_7 0x4A05DC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_8 0x4A05E0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_9 0x4A05E4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_10 0x4A05E8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_11 0x4A05EC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_12 0x4A05F0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_13 0x4A05F4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_14 0x4A05F8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_15 0x4A05FC
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_0 0x4A0600
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_1 0x4A0604
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_2 0x4A0608
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_3 0x4A060C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_4 0x4A0610
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_5 0x4A0614
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_6 0x4A0618
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_7 0x4A061C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_8 0x4A0620
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_9 0x4A0624
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_10 0x4A0628
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_11 0x4A062C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_12 0x4A0630
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_13 0x4A0634
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_14 0x4A0638
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_15 0x4A063C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_0 0x4A0640
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_1 0x4A0644
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_2 0x4A0648
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_3 0x4A064C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_4 0x4A0650
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_5 0x4A0654
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_6 0x4A0658
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_7 0x4A065C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_8 0x4A0660
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_9 0x4A0664
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_10 0x4A0668
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_11 0x4A066C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_12 0x4A0670
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_13 0x4A0674
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_14 0x4A0678
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_15 0x4A067C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_0 0x4A0680
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_1 0x4A0684
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_2 0x4A0688
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_3 0x4A068C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_4 0x4A0690
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_5 0x4A0694
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_6 0x4A0698
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_7 0x4A069C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_8 0x4A06A0
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_9 0x4A06A4
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_10 0x4A06A8
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_11 0x4A06AC
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_12 0x4A06B0
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_13 0x4A06B4
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_14 0x4A06B8
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_15 0x4A06BC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_0 0x4A06C0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_1 0x4A06C4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_2 0x4A06C8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_3 0x4A06CC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_4 0x4A06D0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_5 0x4A06D4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_6 0x4A06D8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_7 0x4A06DC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_8 0x4A06E0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_9 0x4A06E4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_10 0x4A06E8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_11 0x4A06EC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_12 0x4A06F0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_13 0x4A06F4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_14 0x4A06F8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_15 0x4A06FC
+
+#define mmDMA_IF_E_S_SOB_HIT_RPROT 0x4A0700
+
+#define mmDMA_IF_E_S_SOB_HIT_WPROT 0x4A0704
+
+#define mmDMA_IF_E_S_SOB_HIT_RPRIV 0x4A070C
+
+#define mmDMA_IF_E_S_SOB_HIT_WPRIV 0x4A0710
+
+#define mmDMA_IF_E_S_DMA0_HIT_RPROT 0x4A071C
+
+#define mmDMA_IF_E_S_DMA0_HIT_WPROT 0x4A0720
+
+#define mmDMA_IF_E_S_DMA0_HIT_RPRIV 0x4A0724
+
+#define mmDMA_IF_E_S_DMA0_HIT_WPRIV 0x4A0728
+
+#define mmDMA_IF_E_S_DMA1_HIT_RPROT 0x4A0730
+
+#define mmDMA_IF_E_S_DMA1_HIT_WPROT 0x4A0734
+
+#define mmDMA_IF_E_S_DMA1_HIT_RPRIV 0x4A0738
+
+#define mmDMA_IF_E_S_DMA1_HIT_WPRIV 0x4A073C
+
+#define mmDMA_IF_E_S_HBM_BIN 0x4A0800
+
+#define mmDMA_IF_E_S_MME_BIN 0x4A0804
+
+#define mmDMA_IF_E_S_TPC_BIN 0x4A0808
+
+#define mmDMA_IF_E_S_DMA_BIN 0x4A080C
+
+#define mmDMA_IF_E_S_SOB_CG_EN 0x4A0810
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_0 0x4A0820
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_1 0x4A0824
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_2 0x4A0828
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_3 0x4A082C
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_4 0x4A0830
+
+#define mmDMA_IF_E_S_HBM_MISC 0x4A0834
+
+#endif /* ASIC_REG_DMA_IF_E_S_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
new file mode 100644
index 000000000000..4ccaf8712948
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_N_DOWN_CH0_REGS_H_
+#define ASIC_REG_DMA_IF_W_N_DOWN_CH0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_N_DOWN_CH0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_N_DOWN_CH0_PERM_SEL 0x4C1108
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_0 0x4C1114
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_1 0x4C1118
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_2 0x4C111C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_3 0x4C1120
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_4 0x4C1124
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_5 0x4C1128
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_6 0x4C112C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_7 0x4C1130
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_8 0x4C1134
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_9 0x4C1138
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_10 0x4C113C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_11 0x4C1140
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_12 0x4C1144
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_13 0x4C1148
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_14 0x4C114C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_15 0x4C1150
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_16 0x4C1154
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_17 0x4C1158
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_18 0x4C115C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_19 0x4C1160
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_20 0x4C1164
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_21 0x4C1168
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_22 0x4C116C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_23 0x4C1170
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_24 0x4C1174
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_25 0x4C1178
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_26 0x4C117C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_27 0x4C1180
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_0 0x4C1184
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_1 0x4C1188
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_2 0x4C118C
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_3 0x4C1190
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_4 0x4C1194
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_5 0x4C1198
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_6 0x4C119C
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_7 0x4C11A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_8 0x4C11A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_9 0x4C11A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_10 0x4C11AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_11 0x4C11B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_12 0x4C11B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_13 0x4C11B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_14 0x4C11BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_SCRAM_SRAM_EN 0x4C126C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_EN 0x4C1274
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_SAT 0x4C1278
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_RST 0x4C127C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_TIMEOUT 0x4C1280
+
+#define mmDMA_IF_W_N_DOWN_CH0_SCRAM_HBM_EN 0x4C1284
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_EN 0x4C1288
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_SAT 0x4C128C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_RST 0x4C1290
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_TIMEOUT 0x4C1294
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_EN 0x4C129C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_SAT 0x4C12A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RST 0x4C12A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_TIMEOUT 0x4C12AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RED 0x4C12B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_EN 0x4C12EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_EN 0x4C12F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_WR_SIZE 0x4C12F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_WR_SIZE 0x4C12F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x4C1404
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x4C1408
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x4C140C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x4C1410
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x4C1414
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x4C1418
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_RD_SIZE 0x4C141C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_RD_SIZE 0x4C1420
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x4C1424
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x4C1428
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x4C142C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x4C1430
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x4C1434
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x4C1438
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_0 0x4C1450
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_1 0x4C1454
+
+#define mmDMA_IF_W_N_DOWN_CH0_NON_LIN_EN 0x4C1480
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_0 0x4C1500
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_1 0x4C1504
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_2 0x4C1508
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_3 0x4C150C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_4 0x4C1510
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_0 0x4C1514
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_1 0x4C1520
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_2 0x4C1524
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_3 0x4C1528
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_4 0x4C152C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_5 0x4C1530
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_6 0x4C1534
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_7 0x4C1538
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_8 0x4C153C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_9 0x4C1540
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_0 0x4C1550
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_1 0x4C1554
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_2 0x4C1558
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_3 0x4C155C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_4 0x4C1560
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_5 0x4C1564
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_6 0x4C1568
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_7 0x4C156C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_8 0x4C1570
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_9 0x4C1574
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_10 0x4C1578
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_11 0x4C157C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_12 0x4C1580
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_13 0x4C1584
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_14 0x4C1588
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_15 0x4C158C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_16 0x4C1590
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_17 0x4C1594
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_18 0x4C1598
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4C15E4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4C15E8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4C15EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4C15F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4C15F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4C15F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4C15FC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x4C1600
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x4C1604
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x4C1608
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x4C160C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x4C1610
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x4C1614
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x4C1618
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x4C161C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x4C1620
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x4C1624
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x4C1628
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x4C162C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x4C1630
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x4C1634
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x4C1638
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x4C163C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x4C1640
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x4C1644
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x4C1648
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x4C164C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x4C1650
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x4C1654
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x4C1658
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x4C165C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x4C1660
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x4C1664
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x4C1668
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x4C166C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x4C1670
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x4C1674
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x4C1678
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x4C167C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x4C1680
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x4C1684
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x4C1688
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x4C168C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x4C1690
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x4C1694
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x4C1698
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x4C169C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4C16A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4C16A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4C16A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4C16AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4C16B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4C16B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4C16B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4C16BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4C16C0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4C16C4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4C16C8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4C16CC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4C16D0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4C16D4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4C16D8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4C16DC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4C16E0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4C16E4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4C16E8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4C16EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4C16F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4C16F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4C16F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4C16FC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x4C1700
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x4C1704
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x4C1708
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x4C170C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x4C1710
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x4C1714
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x4C1718
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x4C171C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x4C1720
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x4C1724
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x4C1728
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x4C172C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x4C1730
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x4C1734
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x4C1738
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x4C173C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x4C1740
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x4C1744
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x4C1748
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x4C174C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x4C1750
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x4C1754
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x4C1758
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x4C175C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x4C1760
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x4C1764
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x4C1768
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x4C176C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x4C1770
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x4C1774
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x4C1778
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x4C177C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x4C1780
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x4C1784
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x4C1788
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x4C178C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x4C1790
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x4C1794
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x4C1798
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x4C179C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4C17A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4C17A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4C17A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4C17AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4C17B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4C17B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4C17B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4C17BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4C17C0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4C17C4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4C17C8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4C17CC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4C17D0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4C17D4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4C17D8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4C17DC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4C17E0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x4C1824
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x4C1828
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x4C182C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x4C1830
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x4C1834
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x4C1838
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x4C183C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x4C1840
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x4C1844
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x4C1848
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x4C184C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x4C1850
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x4C1854
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x4C1858
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x4C185C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x4C1860
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x4C1864
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x4C1868
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x4C186C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x4C1870
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x4C1874
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x4C1878
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x4C187C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x4C1880
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x4C1884
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x4C1888
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x4C188C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x4C1890
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x4C1894
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x4C1898
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x4C189C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4C18A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4C18A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4C18A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4C18AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4C18B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4C18B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4C18B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4C18BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4C18C0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4C18C4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4C18C8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4C18CC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4C18D0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4C18D4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4C18D8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4C18DC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4C18E0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4C18E4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4C18E8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4C18EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4C18F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4C18F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4C18F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4C18FC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x4C1900
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x4C1904
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x4C1908
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x4C190C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x4C1910
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x4C1914
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x4C1918
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x4C191C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x4C1920
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x4C1924
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x4C1928
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x4C192C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x4C1930
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x4C1934
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x4C1938
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x4C193C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x4C1940
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x4C1944
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x4C1948
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x4C194C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x4C1950
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x4C1954
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x4C1958
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x4C195C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x4C1960
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x4C1964
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x4C1968
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x4C196C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x4C1970
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x4C1974
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x4C1978
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x4C197C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x4C1980
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x4C1984
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x4C1988
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x4C198C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x4C1990
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x4C1994
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x4C1998
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x4C199C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4C19A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4C19A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4C19A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4C19AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4C19B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4C19B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4C19B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4C19BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4C19C0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4C19C4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4C19C8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4C19CC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4C19D0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4C19D4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4C19D8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4C19DC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4C19E0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4C19E4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4C19E8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4C19EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4C19F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4C19F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4C19F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4C19FC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x4C1A00
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x4C1A04
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x4C1A08
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x4C1A0C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x4C1A10
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x4C1A14
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x4C1A18
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x4C1A1C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x4C1A20
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AW 0x4C1A64
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AR 0x4C1A68
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_HIT_AW 0x4C1A6C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_HIT_AR 0x4C1A70
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_CFG 0x4C1B64
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_SHIFT 0x4C1B68
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_0 0x4C1B6C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_1 0x4C1B70
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_2 0x4C1B74
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_3 0x4C1B78
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_4 0x4C1B7C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_5 0x4C1B80
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_6 0x4C1B84
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_7 0x4C1B88
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_0 0x4C1BAC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_1 0x4C1BB0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_2 0x4C1BB4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_3 0x4C1BB8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_4 0x4C1BBC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_5 0x4C1BC0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_6 0x4C1BC4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_7 0x4C1BC8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_0 0x4C1BEC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_1 0x4C1BF0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_2 0x4C1BF4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_3 0x4C1BF8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_4 0x4C1BFC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_5 0x4C1C00
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_6 0x4C1C04
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_7 0x4C1C08
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_WDT 0x4C1C2C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x4C1C30
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x4C1C34
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x4C1C38
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x4C1C3C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x4C1C40
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x4C1C44
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x4C1C48
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x4C1C4C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x4C1C50
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x4C1C54
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x4C1C58
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x4C1C5C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x4C1C60
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x4C1C64
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x4C1C68
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x4C1C6C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x4C1C70
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x4C1C74
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x4C1C78
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x4C1C7C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x4C1C80
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x4C1C84
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x4C1C88
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x4C1C8C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x4C1C90
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x4C1C94
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x4C1C98
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x4C1C9C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x4C1CA0
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x4C1CA4
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x4C1CA8
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x4C1CAC
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_0 0x4C1CB0
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_1 0x4C1CB4
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_2 0x4C1CB8
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_3 0x4C1CBC
+
+#endif /* ASIC_REG_DMA_IF_W_N_DOWN_CH0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
new file mode 100644
index 000000000000..9236f4183084
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_
+#define ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_N_DOWN_CH1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_N_DOWN_CH1_PERM_SEL 0x4C2108
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_0 0x4C2114
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_1 0x4C2118
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_2 0x4C211C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_3 0x4C2120
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_4 0x4C2124
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_5 0x4C2128
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_6 0x4C212C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_7 0x4C2130
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_8 0x4C2134
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_9 0x4C2138
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_10 0x4C213C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_11 0x4C2140
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_12 0x4C2144
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_13 0x4C2148
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_14 0x4C214C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_15 0x4C2150
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_16 0x4C2154
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_17 0x4C2158
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_18 0x4C215C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_19 0x4C2160
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_20 0x4C2164
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_21 0x4C2168
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_22 0x4C216C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_23 0x4C2170
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_24 0x4C2174
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_25 0x4C2178
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_26 0x4C217C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_27 0x4C2180
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_0 0x4C2184
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_1 0x4C2188
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_2 0x4C218C
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_3 0x4C2190
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_4 0x4C2194
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_5 0x4C2198
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_6 0x4C219C
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_7 0x4C21A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_8 0x4C21A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_9 0x4C21A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_10 0x4C21AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_11 0x4C21B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_12 0x4C21B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_13 0x4C21B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_14 0x4C21BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN 0x4C226C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_EN 0x4C2274
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_SAT 0x4C2278
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_RST 0x4C227C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_TIMEOUT 0x4C2280
+
+#define mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN 0x4C2284
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_EN 0x4C2288
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_SAT 0x4C228C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_RST 0x4C2290
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_TIMEOUT 0x4C2294
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_EN 0x4C229C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_SAT 0x4C22A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RST 0x4C22A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_TIMEOUT 0x4C22AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RED 0x4C22B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN 0x4C22EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN 0x4C22F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE 0x4C22F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE 0x4C22F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4C2404
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4C2408
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4C240C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4C2410
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4C2414
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4C2418
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE 0x4C241C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE 0x4C2420
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4C2424
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4C2428
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4C242C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4C2430
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4C2434
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4C2438
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0 0x4C2450
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1 0x4C2454
+
+#define mmDMA_IF_W_N_DOWN_CH1_NON_LIN_EN 0x4C2480
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_0 0x4C2500
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_1 0x4C2504
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_2 0x4C2508
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_3 0x4C250C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_4 0x4C2510
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_0 0x4C2514
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_1 0x4C2520
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_2 0x4C2524
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_3 0x4C2528
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_4 0x4C252C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_5 0x4C2530
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_6 0x4C2534
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_7 0x4C2538
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_8 0x4C253C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_9 0x4C2540
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_0 0x4C2550
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_1 0x4C2554
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_2 0x4C2558
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_3 0x4C255C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_4 0x4C2560
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_5 0x4C2564
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_6 0x4C2568
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_7 0x4C256C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_8 0x4C2570
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_9 0x4C2574
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_10 0x4C2578
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_11 0x4C257C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_12 0x4C2580
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_13 0x4C2584
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_14 0x4C2588
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_15 0x4C258C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_16 0x4C2590
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_17 0x4C2594
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18 0x4C2598
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4C25E4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4C25E8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4C25EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4C25F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4C25F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4C25F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4C25FC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4C2600
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4C2604
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4C2608
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4C260C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4C2610
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4C2614
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4C2618
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4C261C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4C2620
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4C2624
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4C2628
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4C262C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4C2630
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4C2634
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4C2638
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4C263C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4C2640
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4C2644
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4C2648
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4C264C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4C2650
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4C2654
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4C2658
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4C265C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4C2660
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4C2664
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4C2668
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4C266C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4C2670
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4C2674
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4C2678
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4C267C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4C2680
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4C2684
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4C2688
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4C268C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4C2690
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4C2694
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4C2698
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4C269C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4C26A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4C26A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4C26A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4C26AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4C26B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4C26B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4C26B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4C26BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4C26C0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4C26C4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4C26C8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4C26CC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4C26D0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4C26D4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4C26D8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4C26DC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4C26E0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4C26E4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4C26E8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4C26EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4C26F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4C26F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4C26F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4C26FC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4C2700
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4C2704
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4C2708
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4C270C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4C2710
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4C2714
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4C2718
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4C271C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4C2720
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4C2724
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4C2728
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4C272C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4C2730
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4C2734
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4C2738
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4C273C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4C2740
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4C2744
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4C2748
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4C274C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4C2750
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4C2754
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4C2758
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4C275C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4C2760
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4C2764
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4C2768
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4C276C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4C2770
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4C2774
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4C2778
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4C277C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4C2780
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4C2784
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4C2788
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4C278C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4C2790
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4C2794
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4C2798
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4C279C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4C27A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4C27A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4C27A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4C27AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4C27B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4C27B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4C27B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4C27BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4C27C0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4C27C4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4C27C8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4C27CC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4C27D0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4C27D4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4C27D8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4C27DC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4C27E0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4C2824
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4C2828
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4C282C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4C2830
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4C2834
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4C2838
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4C283C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4C2840
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4C2844
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4C2848
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4C284C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4C2850
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4C2854
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4C2858
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4C285C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4C2860
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4C2864
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4C2868
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4C286C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4C2870
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4C2874
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4C2878
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4C287C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4C2880
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4C2884
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4C2888
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4C288C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4C2890
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4C2894
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4C2898
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4C289C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4C28A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4C28A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4C28A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4C28AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4C28B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4C28B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4C28B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4C28BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4C28C0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4C28C4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4C28C8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4C28CC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4C28D0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4C28D4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4C28D8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4C28DC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4C28E0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4C28E4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4C28E8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4C28EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4C28F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4C28F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4C28F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4C28FC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4C2900
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4C2904
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4C2908
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4C290C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4C2910
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4C2914
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4C2918
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4C291C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4C2920
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4C2924
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4C2928
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4C292C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4C2930
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4C2934
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4C2938
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4C293C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4C2940
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4C2944
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4C2948
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4C294C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4C2950
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4C2954
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4C2958
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4C295C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4C2960
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4C2964
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4C2968
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4C296C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4C2970
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4C2974
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4C2978
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4C297C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4C2980
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4C2984
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4C2988
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4C298C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4C2990
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4C2994
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4C2998
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4C299C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4C29A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4C29A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4C29A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4C29AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4C29B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4C29B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4C29B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4C29BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4C29C0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4C29C4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4C29C8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4C29CC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4C29D0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4C29D4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4C29D8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4C29DC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4C29E0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4C29E4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4C29E8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4C29EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4C29F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4C29F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4C29F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4C29FC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4C2A00
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4C2A04
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4C2A08
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4C2A0C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4C2A10
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4C2A14
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4C2A18
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4C2A1C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4C2A20
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AW 0x4C2A64
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AR 0x4C2A68
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4C2A6C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4C2A70
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_CFG 0x4C2B64
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_SHIFT 0x4C2B68
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4C2B6C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4C2B70
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4C2B74
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4C2B78
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4C2B7C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4C2B80
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4C2B84
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4C2B88
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_0 0x4C2BAC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_1 0x4C2BB0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_2 0x4C2BB4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_3 0x4C2BB8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_4 0x4C2BBC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_5 0x4C2BC0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_6 0x4C2BC4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_7 0x4C2BC8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_0 0x4C2BEC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_1 0x4C2BF0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_2 0x4C2BF4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_3 0x4C2BF8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_4 0x4C2BFC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_5 0x4C2C00
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_6 0x4C2C04
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_7 0x4C2C08
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_WDT 0x4C2C2C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4C2C30
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4C2C34
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4C2C38
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4C2C3C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4C2C40
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4C2C44
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4C2C48
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4C2C4C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4C2C50
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4C2C54
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4C2C58
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4C2C5C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4C2C60
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4C2C64
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4C2C68
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4C2C6C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4C2C70
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4C2C74
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4C2C78
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4C2C7C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4C2C80
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4C2C84
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4C2C88
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4C2C8C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4C2C90
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4C2C94
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4C2C98
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4C2C9C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4C2CA0
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4C2CA4
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4C2CA8
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4C2CAC
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_0 0x4C2CB0
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_1 0x4C2CB4
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_2 0x4C2CB8
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3 0x4C2CBC
+
+#endif /* ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
new file mode 100644
index 000000000000..da60893a5fab
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_N_REGS_H_
+#define ASIC_REG_DMA_IF_W_N_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_N (Prototype: DMA_IF)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_N_HBM0_WR_CRED_CNT 0x4C0000
+
+#define mmDMA_IF_W_N_HBM1_WR_CRED_CNT 0x4C0004
+
+#define mmDMA_IF_W_N_HBM0_RD_CRED_CNT 0x4C0008
+
+#define mmDMA_IF_W_N_HBM1_RD_CRED_CNT 0x4C000C
+
+#define mmDMA_IF_W_N_HBM_LIMITER_0 0x4C0030
+
+#define mmDMA_IF_W_N_HBM_LIMITER_1 0x4C0034
+
+#define mmDMA_IF_W_N_HBM_LIMITER_2 0x4C0038
+
+#define mmDMA_IF_W_N_HBM_LIMITER_3 0x4C003C
+
+#define mmDMA_IF_W_N_HBM_ALMOST_EN_0 0x4C0040
+
+#define mmDMA_IF_W_N_HBM_ALMOST_EN_1 0x4C0044
+
+#define mmDMA_IF_W_N_HBM_CRED_EN_0 0x4C0050
+
+#define mmDMA_IF_W_N_HBM_CRED_EN_1 0x4C0054
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_0 0x4C0100
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_1 0x4C0104
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_2 0x4C0108
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_3 0x4C010C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_4 0x4C0110
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_5 0x4C0114
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_6 0x4C0118
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_7 0x4C011C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_8 0x4C0120
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_9 0x4C0124
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_10 0x4C0128
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_11 0x4C012C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_12 0x4C0130
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_13 0x4C0134
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_14 0x4C0138
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_15 0x4C013C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_0 0x4C0140
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_1 0x4C0144
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_2 0x4C0148
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_3 0x4C014C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_4 0x4C0150
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_5 0x4C0154
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_6 0x4C0158
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_7 0x4C015C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_8 0x4C0160
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_9 0x4C0164
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_10 0x4C0168
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_11 0x4C016C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_12 0x4C0170
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_13 0x4C0174
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_14 0x4C0178
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_15 0x4C017C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_0 0x4C0180
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_1 0x4C0184
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_2 0x4C0188
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_3 0x4C018C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_4 0x4C0190
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_5 0x4C0194
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_6 0x4C0198
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_7 0x4C019C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_8 0x4C01A0
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_9 0x4C01A4
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_10 0x4C01A8
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_11 0x4C01AC
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_12 0x4C01B0
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_13 0x4C01B4
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_14 0x4C01B8
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_15 0x4C01BC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_0 0x4C01C0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_1 0x4C01C4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_2 0x4C01C8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_3 0x4C01CC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_4 0x4C01D0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_5 0x4C01D4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_6 0x4C01D8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_7 0x4C01DC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_8 0x4C01E0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_9 0x4C01E4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_10 0x4C01E8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_11 0x4C01EC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_12 0x4C01F0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_13 0x4C01F4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_14 0x4C01F8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_15 0x4C01FC
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_0 0x4C0200
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_1 0x4C0204
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_2 0x4C0208
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_3 0x4C020C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_4 0x4C0210
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_5 0x4C0214
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_6 0x4C0218
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_7 0x4C021C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_8 0x4C0220
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_9 0x4C0224
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_10 0x4C0228
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_11 0x4C022C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_12 0x4C0230
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_13 0x4C0234
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_14 0x4C0238
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_15 0x4C023C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_0 0x4C0240
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_1 0x4C0244
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_2 0x4C0248
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_3 0x4C024C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_4 0x4C0250
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_5 0x4C0254
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_6 0x4C0258
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_7 0x4C025C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_8 0x4C0260
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_9 0x4C0264
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_10 0x4C0268
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_11 0x4C026C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_12 0x4C0270
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_13 0x4C0274
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_14 0x4C0278
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_15 0x4C027C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_0 0x4C0280
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_1 0x4C0284
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_2 0x4C0288
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_3 0x4C028C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_4 0x4C0290
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_5 0x4C0294
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_6 0x4C0298
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_7 0x4C029C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_8 0x4C02A0
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_9 0x4C02A4
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_10 0x4C02A8
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_11 0x4C02AC
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_12 0x4C02B0
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_13 0x4C02B4
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_14 0x4C02B8
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_15 0x4C02BC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_0 0x4C02C0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_1 0x4C02C4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_2 0x4C02C8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_3 0x4C02CC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_4 0x4C02D0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_5 0x4C02D4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_6 0x4C02D8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_7 0x4C02DC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_8 0x4C02E0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_9 0x4C02E4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_10 0x4C02E8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_11 0x4C02EC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_12 0x4C02F0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_13 0x4C02F4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_14 0x4C02F8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_15 0x4C02FC
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_0 0x4C0300
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_1 0x4C0304
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_2 0x4C0308
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_3 0x4C030C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_4 0x4C0310
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_5 0x4C0314
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_6 0x4C0318
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_7 0x4C031C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_8 0x4C0320
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_9 0x4C0324
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_10 0x4C0328
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_11 0x4C032C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_12 0x4C0330
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_13 0x4C0334
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_14 0x4C0338
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_15 0x4C033C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_0 0x4C0340
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_1 0x4C0344
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_2 0x4C0348
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_3 0x4C034C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_4 0x4C0350
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_5 0x4C0354
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_6 0x4C0358
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_7 0x4C035C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_8 0x4C0360
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_9 0x4C0364
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_10 0x4C0368
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_11 0x4C036C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_12 0x4C0370
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_13 0x4C0374
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_14 0x4C0378
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_15 0x4C037C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_0 0x4C0380
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_1 0x4C0384
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_2 0x4C0388
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_3 0x4C038C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_4 0x4C0390
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_5 0x4C0394
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_6 0x4C0398
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_7 0x4C039C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_8 0x4C03A0
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_9 0x4C03A4
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_10 0x4C03A8
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_11 0x4C03AC
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_12 0x4C03B0
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_13 0x4C03B4
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_14 0x4C03B8
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_15 0x4C03BC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_0 0x4C03C0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_1 0x4C03C4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_2 0x4C03C8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_3 0x4C03CC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_4 0x4C03D0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_5 0x4C03D4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_6 0x4C03D8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_7 0x4C03DC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_8 0x4C03E0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_9 0x4C03E4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_10 0x4C03E8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_11 0x4C03EC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_12 0x4C03F0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_13 0x4C03F4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_14 0x4C03F8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_15 0x4C03FC
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_0 0x4C0400
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_1 0x4C0404
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_2 0x4C0408
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_3 0x4C040C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_4 0x4C0410
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_5 0x4C0414
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_6 0x4C0418
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_7 0x4C041C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_8 0x4C0420
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_9 0x4C0424
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_10 0x4C0428
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_11 0x4C042C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_12 0x4C0430
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_13 0x4C0434
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_14 0x4C0438
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_15 0x4C043C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_0 0x4C0440
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_1 0x4C0444
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_2 0x4C0448
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_3 0x4C044C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_4 0x4C0450
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_5 0x4C0454
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_6 0x4C0458
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_7 0x4C045C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_8 0x4C0460
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_9 0x4C0464
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_10 0x4C0468
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_11 0x4C046C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_12 0x4C0470
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_13 0x4C0474
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_14 0x4C0478
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_15 0x4C047C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_0 0x4C0480
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_1 0x4C0484
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_2 0x4C0488
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_3 0x4C048C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_4 0x4C0490
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_5 0x4C0494
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_6 0x4C0498
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_7 0x4C049C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_8 0x4C04A0
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_9 0x4C04A4
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_10 0x4C04A8
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_11 0x4C04AC
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_12 0x4C04B0
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_13 0x4C04B4
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_14 0x4C04B8
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_15 0x4C04BC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_0 0x4C04C0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_1 0x4C04C4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_2 0x4C04C8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_3 0x4C04CC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_4 0x4C04D0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_5 0x4C04D4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_6 0x4C04D8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_7 0x4C04DC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_8 0x4C04E0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_9 0x4C04E4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_10 0x4C04E8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_11 0x4C04EC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_12 0x4C04F0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_13 0x4C04F4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_14 0x4C04F8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_15 0x4C04FC
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_0 0x4C0500
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_1 0x4C0504
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_2 0x4C0508
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_3 0x4C050C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_4 0x4C0510
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_5 0x4C0514
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_6 0x4C0518
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_7 0x4C051C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_8 0x4C0520
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_9 0x4C0524
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_10 0x4C0528
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_11 0x4C052C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_12 0x4C0530
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_13 0x4C0534
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_14 0x4C0538
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_15 0x4C053C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_0 0x4C0540
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_1 0x4C0544
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_2 0x4C0548
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_3 0x4C054C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_4 0x4C0550
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_5 0x4C0554
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_6 0x4C0558
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_7 0x4C055C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_8 0x4C0560
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_9 0x4C0564
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_10 0x4C0568
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_11 0x4C056C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_12 0x4C0570
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_13 0x4C0574
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_14 0x4C0578
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_15 0x4C057C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_0 0x4C0580
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_1 0x4C0584
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_2 0x4C0588
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_3 0x4C058C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_4 0x4C0590
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_5 0x4C0594
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_6 0x4C0598
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_7 0x4C059C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_8 0x4C05A0
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_9 0x4C05A4
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_10 0x4C05A8
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_11 0x4C05AC
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_12 0x4C05B0
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_13 0x4C05B4
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_14 0x4C05B8
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_15 0x4C05BC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_0 0x4C05C0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_1 0x4C05C4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_2 0x4C05C8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_3 0x4C05CC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_4 0x4C05D0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_5 0x4C05D4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_6 0x4C05D8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_7 0x4C05DC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_8 0x4C05E0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_9 0x4C05E4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_10 0x4C05E8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_11 0x4C05EC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_12 0x4C05F0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_13 0x4C05F4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_14 0x4C05F8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_15 0x4C05FC
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_0 0x4C0600
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_1 0x4C0604
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_2 0x4C0608
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_3 0x4C060C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_4 0x4C0610
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_5 0x4C0614
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_6 0x4C0618
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_7 0x4C061C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_8 0x4C0620
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_9 0x4C0624
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_10 0x4C0628
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_11 0x4C062C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_12 0x4C0630
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_13 0x4C0634
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_14 0x4C0638
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_15 0x4C063C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_0 0x4C0640
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_1 0x4C0644
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_2 0x4C0648
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_3 0x4C064C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_4 0x4C0650
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_5 0x4C0654
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_6 0x4C0658
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_7 0x4C065C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_8 0x4C0660
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_9 0x4C0664
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_10 0x4C0668
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_11 0x4C066C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_12 0x4C0670
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_13 0x4C0674
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_14 0x4C0678
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_15 0x4C067C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_0 0x4C0680
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_1 0x4C0684
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_2 0x4C0688
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_3 0x4C068C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_4 0x4C0690
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_5 0x4C0694
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_6 0x4C0698
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_7 0x4C069C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_8 0x4C06A0
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_9 0x4C06A4
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_10 0x4C06A8
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_11 0x4C06AC
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_12 0x4C06B0
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_13 0x4C06B4
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_14 0x4C06B8
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_15 0x4C06BC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_0 0x4C06C0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_1 0x4C06C4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_2 0x4C06C8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_3 0x4C06CC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_4 0x4C06D0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_5 0x4C06D4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_6 0x4C06D8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_7 0x4C06DC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_8 0x4C06E0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_9 0x4C06E4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_10 0x4C06E8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_11 0x4C06EC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_12 0x4C06F0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_13 0x4C06F4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_14 0x4C06F8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_15 0x4C06FC
+
+#define mmDMA_IF_W_N_SOB_HIT_RPROT 0x4C0700
+
+#define mmDMA_IF_W_N_SOB_HIT_WPROT 0x4C0704
+
+#define mmDMA_IF_W_N_SOB_HIT_RPRIV 0x4C070C
+
+#define mmDMA_IF_W_N_SOB_HIT_WPRIV 0x4C0710
+
+#define mmDMA_IF_W_N_DMA0_HIT_RPROT 0x4C071C
+
+#define mmDMA_IF_W_N_DMA0_HIT_WPROT 0x4C0720
+
+#define mmDMA_IF_W_N_DMA0_HIT_RPRIV 0x4C0724
+
+#define mmDMA_IF_W_N_DMA0_HIT_WPRIV 0x4C0728
+
+#define mmDMA_IF_W_N_DMA1_HIT_RPROT 0x4C0730
+
+#define mmDMA_IF_W_N_DMA1_HIT_WPROT 0x4C0734
+
+#define mmDMA_IF_W_N_DMA1_HIT_RPRIV 0x4C0738
+
+#define mmDMA_IF_W_N_DMA1_HIT_WPRIV 0x4C073C
+
+#define mmDMA_IF_W_N_HBM_BIN 0x4C0800
+
+#define mmDMA_IF_W_N_MME_BIN 0x4C0804
+
+#define mmDMA_IF_W_N_TPC_BIN 0x4C0808
+
+#define mmDMA_IF_W_N_DMA_BIN 0x4C080C
+
+#define mmDMA_IF_W_N_SOB_CG_EN 0x4C0810
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_0 0x4C0820
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_1 0x4C0824
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_2 0x4C0828
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_3 0x4C082C
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_4 0x4C0830
+
+#define mmDMA_IF_W_N_HBM_MISC 0x4C0834
+
+#endif /* ASIC_REG_DMA_IF_W_N_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
new file mode 100644
index 000000000000..56ffc920d58a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_S_DOWN_CH0_REGS_H_
+#define ASIC_REG_DMA_IF_W_S_DOWN_CH0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_S_DOWN_CH0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_S_DOWN_CH0_PERM_SEL 0x481108
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_0 0x481114
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_1 0x481118
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_2 0x48111C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_3 0x481120
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_4 0x481124
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_5 0x481128
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_6 0x48112C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_7 0x481130
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_8 0x481134
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_9 0x481138
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_10 0x48113C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_11 0x481140
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_12 0x481144
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_13 0x481148
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_14 0x48114C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_15 0x481150
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_16 0x481154
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_17 0x481158
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_18 0x48115C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_19 0x481160
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_20 0x481164
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_21 0x481168
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_22 0x48116C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_23 0x481170
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_24 0x481174
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_25 0x481178
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_26 0x48117C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_27 0x481180
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_0 0x481184
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_1 0x481188
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_2 0x48118C
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_3 0x481190
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_4 0x481194
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_5 0x481198
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_6 0x48119C
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_7 0x4811A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_8 0x4811A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_9 0x4811A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_10 0x4811AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_11 0x4811B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_12 0x4811B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_13 0x4811B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_14 0x4811BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_SCRAM_SRAM_EN 0x48126C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_EN 0x481274
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_SAT 0x481278
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_RST 0x48127C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_TIMEOUT 0x481280
+
+#define mmDMA_IF_W_S_DOWN_CH0_SCRAM_HBM_EN 0x481284
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_EN 0x481288
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_SAT 0x48128C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_RST 0x481290
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_TIMEOUT 0x481294
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_EN 0x48129C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_SAT 0x4812A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RST 0x4812A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_TIMEOUT 0x4812AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RED 0x4812B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_EN 0x4812EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_EN 0x4812F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_WR_SIZE 0x4812F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_WR_SIZE 0x4812F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x481404
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x481408
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x48140C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x481410
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x481414
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x481418
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_RD_SIZE 0x48141C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_RD_SIZE 0x481420
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x481424
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x481428
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x48142C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x481430
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x481434
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x481438
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_0 0x481450
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_1 0x481454
+
+#define mmDMA_IF_W_S_DOWN_CH0_NON_LIN_EN 0x481480
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_0 0x481500
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_1 0x481504
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_2 0x481508
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_3 0x48150C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_4 0x481510
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_0 0x481514
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_1 0x481520
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_2 0x481524
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_3 0x481528
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_4 0x48152C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_5 0x481530
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_6 0x481534
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_7 0x481538
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_8 0x48153C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_9 0x481540
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_0 0x481550
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_1 0x481554
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_2 0x481558
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_3 0x48155C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_4 0x481560
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_5 0x481564
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_6 0x481568
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_7 0x48156C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_8 0x481570
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_9 0x481574
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_10 0x481578
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_11 0x48157C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_12 0x481580
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_13 0x481584
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_14 0x481588
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_15 0x48158C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_16 0x481590
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_17 0x481594
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_18 0x481598
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4815E4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4815E8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4815EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4815F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4815F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4815F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4815FC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x481600
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x481604
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x481608
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x48160C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x481610
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x481614
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x481618
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x48161C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x481620
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x481624
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x481628
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x48162C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x481630
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x481634
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x481638
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x48163C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x481640
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x481644
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x481648
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x48164C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x481650
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x481654
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x481658
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x48165C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x481660
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x481664
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x481668
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x48166C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x481670
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x481674
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x481678
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x48167C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x481680
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x481684
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x481688
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x48168C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x481690
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x481694
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x481698
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x48169C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4816A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4816A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4816A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4816AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4816B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4816B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4816B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4816BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4816C0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4816C4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4816C8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4816CC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4816D0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4816D4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4816D8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4816DC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4816E0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4816E4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4816E8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4816EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4816F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4816F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4816F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4816FC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x481700
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x481704
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x481708
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x48170C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x481710
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x481714
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x481718
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x48171C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x481720
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x481724
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x481728
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x48172C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x481730
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x481734
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x481738
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x48173C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x481740
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x481744
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x481748
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x48174C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x481750
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x481754
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x481758
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x48175C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x481760
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x481764
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x481768
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x48176C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x481770
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x481774
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x481778
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x48177C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x481780
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x481784
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x481788
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x48178C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x481790
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x481794
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x481798
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x48179C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4817A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4817A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4817A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4817AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4817B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4817B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4817B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4817BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4817C0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4817C4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4817C8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4817CC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4817D0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4817D4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4817D8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4817DC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4817E0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x481824
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x481828
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x48182C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x481830
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x481834
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x481838
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x48183C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x481840
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x481844
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x481848
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x48184C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x481850
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x481854
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x481858
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x48185C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x481860
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x481864
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x481868
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x48186C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x481870
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x481874
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x481878
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x48187C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x481880
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x481884
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x481888
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x48188C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x481890
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x481894
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x481898
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x48189C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4818A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4818A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4818A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4818AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4818B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4818B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4818B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4818BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4818C0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4818C4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4818C8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4818CC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4818D0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4818D4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4818D8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4818DC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4818E0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4818E4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4818E8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4818EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4818F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4818F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4818F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4818FC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x481900
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x481904
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x481908
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x48190C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x481910
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x481914
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x481918
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x48191C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x481920
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x481924
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x481928
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x48192C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x481930
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x481934
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x481938
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x48193C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x481940
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x481944
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x481948
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x48194C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x481950
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x481954
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x481958
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x48195C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x481960
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x481964
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x481968
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x48196C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x481970
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x481974
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x481978
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x48197C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x481980
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x481984
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x481988
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x48198C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x481990
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x481994
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x481998
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x48199C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4819A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4819A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4819A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4819AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4819B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4819B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4819B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4819BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4819C0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4819C4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4819C8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4819CC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4819D0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4819D4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4819D8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4819DC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4819E0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4819E4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4819E8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4819EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4819F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4819F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4819F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4819FC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x481A00
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x481A04
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x481A08
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x481A0C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x481A10
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x481A14
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x481A18
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x481A1C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x481A20
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW 0x481A64
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR 0x481A68
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_HIT_AW 0x481A6C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_HIT_AR 0x481A70
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_CFG 0x481B64
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_SHIFT 0x481B68
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_0 0x481B6C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_1 0x481B70
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_2 0x481B74
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_3 0x481B78
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_4 0x481B7C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_5 0x481B80
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_6 0x481B84
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_7 0x481B88
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_0 0x481BAC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_1 0x481BB0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_2 0x481BB4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_3 0x481BB8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_4 0x481BBC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_5 0x481BC0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_6 0x481BC4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_7 0x481BC8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_0 0x481BEC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_1 0x481BF0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_2 0x481BF4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_3 0x481BF8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_4 0x481BFC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_5 0x481C00
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_6 0x481C04
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_7 0x481C08
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_WDT 0x481C2C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x481C30
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x481C34
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x481C38
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x481C3C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x481C40
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x481C44
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x481C48
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x481C4C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x481C50
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x481C54
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x481C58
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x481C5C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x481C60
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x481C64
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x481C68
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x481C6C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x481C70
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x481C74
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x481C78
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x481C7C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x481C80
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x481C84
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x481C88
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x481C8C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x481C90
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x481C94
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x481C98
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x481C9C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x481CA0
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x481CA4
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x481CA8
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x481CAC
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_0 0x481CB0
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_1 0x481CB4
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_2 0x481CB8
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_3 0x481CBC
+
+#endif /* ASIC_REG_DMA_IF_W_S_DOWN_CH0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
new file mode 100644
index 000000000000..cbc642918deb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_S_DOWN_CH1_REGS_H_
+#define ASIC_REG_DMA_IF_W_S_DOWN_CH1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_S_DOWN_CH1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_S_DOWN_CH1_PERM_SEL 0x482108
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_0 0x482114
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_1 0x482118
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_2 0x48211C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_3 0x482120
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_4 0x482124
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_5 0x482128
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_6 0x48212C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_7 0x482130
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_8 0x482134
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_9 0x482138
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_10 0x48213C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_11 0x482140
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_12 0x482144
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_13 0x482148
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_14 0x48214C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_15 0x482150
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_16 0x482154
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_17 0x482158
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_18 0x48215C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_19 0x482160
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_20 0x482164
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_21 0x482168
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_22 0x48216C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_23 0x482170
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_24 0x482174
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_25 0x482178
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_26 0x48217C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_27 0x482180
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_0 0x482184
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_1 0x482188
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_2 0x48218C
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_3 0x482190
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_4 0x482194
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_5 0x482198
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_6 0x48219C
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_7 0x4821A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_8 0x4821A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_9 0x4821A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_10 0x4821AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_11 0x4821B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_12 0x4821B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_13 0x4821B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_14 0x4821BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_SCRAM_SRAM_EN 0x48226C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_EN 0x482274
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_SAT 0x482278
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_RST 0x48227C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_TIMEOUT 0x482280
+
+#define mmDMA_IF_W_S_DOWN_CH1_SCRAM_HBM_EN 0x482284
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_EN 0x482288
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_SAT 0x48228C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_RST 0x482290
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_TIMEOUT 0x482294
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_EN 0x48229C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_SAT 0x4822A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RST 0x4822A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_TIMEOUT 0x4822AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RED 0x4822B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_EN 0x4822EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_EN 0x4822F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_WR_SIZE 0x4822F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE 0x4822F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x482404
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x482408
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x48240C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x482410
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x482414
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x482418
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_RD_SIZE 0x48241C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE 0x482420
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x482424
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x482428
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x48242C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x482430
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x482434
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x482438
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_0 0x482450
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_1 0x482454
+
+#define mmDMA_IF_W_S_DOWN_CH1_NON_LIN_EN 0x482480
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_0 0x482500
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_1 0x482504
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_2 0x482508
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_3 0x48250C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_4 0x482510
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_0 0x482514
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_1 0x482520
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_2 0x482524
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_3 0x482528
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_4 0x48252C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_5 0x482530
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_6 0x482534
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_7 0x482538
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_8 0x48253C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_9 0x482540
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_0 0x482550
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_1 0x482554
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_2 0x482558
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_3 0x48255C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_4 0x482560
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_5 0x482564
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_6 0x482568
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_7 0x48256C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_8 0x482570
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_9 0x482574
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_10 0x482578
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_11 0x48257C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_12 0x482580
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_13 0x482584
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_14 0x482588
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_15 0x48258C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_16 0x482590
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_17 0x482594
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_18 0x482598
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4825E4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4825E8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4825EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4825F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4825F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4825F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4825FC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x482600
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x482604
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x482608
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x48260C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x482610
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x482614
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x482618
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x48261C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x482620
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x482624
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x482628
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x48262C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x482630
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x482634
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x482638
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x48263C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x482640
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x482644
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x482648
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x48264C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x482650
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x482654
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x482658
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x48265C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x482660
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x482664
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x482668
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x48266C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x482670
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x482674
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x482678
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x48267C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x482680
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x482684
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x482688
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x48268C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x482690
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x482694
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x482698
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x48269C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4826A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4826A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4826A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4826AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4826B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4826B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4826B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4826BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4826C0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4826C4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4826C8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4826CC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4826D0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4826D4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4826D8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4826DC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4826E0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4826E4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4826E8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4826EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4826F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4826F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4826F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4826FC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x482700
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x482704
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x482708
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x48270C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x482710
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x482714
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x482718
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x48271C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x482720
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x482724
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x482728
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x48272C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x482730
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x482734
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x482738
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x48273C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x482740
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x482744
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x482748
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x48274C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x482750
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x482754
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x482758
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x48275C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x482760
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x482764
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x482768
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x48276C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x482770
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x482774
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x482778
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x48277C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x482780
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x482784
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x482788
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x48278C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x482790
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x482794
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x482798
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x48279C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4827A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4827A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4827A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4827AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4827B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4827B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4827B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4827BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4827C0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4827C4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4827C8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4827CC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4827D0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4827D4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4827D8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4827DC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4827E0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x482824
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x482828
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x48282C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x482830
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x482834
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x482838
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x48283C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x482840
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x482844
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x482848
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x48284C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x482850
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x482854
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x482858
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x48285C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x482860
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x482864
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x482868
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x48286C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x482870
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x482874
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x482878
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x48287C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x482880
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x482884
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x482888
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x48288C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x482890
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x482894
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x482898
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x48289C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4828A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4828A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4828A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4828AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4828B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4828B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4828B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4828BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4828C0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4828C4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4828C8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4828CC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4828D0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4828D4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4828D8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4828DC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4828E0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4828E4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4828E8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4828EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4828F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4828F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4828F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4828FC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x482900
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x482904
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x482908
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x48290C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x482910
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x482914
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x482918
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x48291C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x482920
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x482924
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x482928
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x48292C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x482930
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x482934
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x482938
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x48293C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x482940
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x482944
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x482948
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x48294C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x482950
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x482954
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x482958
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x48295C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x482960
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x482964
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x482968
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x48296C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x482970
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x482974
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x482978
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x48297C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x482980
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x482984
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x482988
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x48298C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x482990
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x482994
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x482998
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x48299C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4829A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4829A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4829A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4829AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4829B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4829B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4829B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4829BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4829C0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4829C4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4829C8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4829CC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4829D0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4829D4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4829D8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4829DC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4829E0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4829E4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4829E8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4829EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4829F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4829F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4829F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4829FC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x482A00
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x482A04
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x482A08
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x482A0C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x482A10
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x482A14
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x482A18
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x482A1C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x482A20
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW 0x482A64
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR 0x482A68
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_HIT_AW 0x482A6C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_HIT_AR 0x482A70
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_CFG 0x482B64
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_SHIFT 0x482B68
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_0 0x482B6C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_1 0x482B70
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_2 0x482B74
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_3 0x482B78
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_4 0x482B7C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_5 0x482B80
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_6 0x482B84
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_7 0x482B88
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_0 0x482BAC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_1 0x482BB0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_2 0x482BB4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_3 0x482BB8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_4 0x482BBC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_5 0x482BC0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_6 0x482BC4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_7 0x482BC8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_0 0x482BEC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_1 0x482BF0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_2 0x482BF4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_3 0x482BF8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_4 0x482BFC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_5 0x482C00
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_6 0x482C04
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_7 0x482C08
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_WDT 0x482C2C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x482C30
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x482C34
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x482C38
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x482C3C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x482C40
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x482C44
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x482C48
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x482C4C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x482C50
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x482C54
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x482C58
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x482C5C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x482C60
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x482C64
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x482C68
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x482C6C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x482C70
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x482C74
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x482C78
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x482C7C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x482C80
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x482C84
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x482C88
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x482C8C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x482C90
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x482C94
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x482C98
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x482C9C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x482CA0
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x482CA4
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x482CA8
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x482CAC
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_0 0x482CB0
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_1 0x482CB4
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_2 0x482CB8
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_3 0x482CBC
+
+#endif /* ASIC_REG_DMA_IF_W_S_DOWN_CH1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
new file mode 100644
index 000000000000..2382bc41bea6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_S_REGS_H_
+#define ASIC_REG_DMA_IF_W_S_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_S (Prototype: DMA_IF)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_S_HBM0_WR_CRED_CNT 0x480000
+
+#define mmDMA_IF_W_S_HBM1_WR_CRED_CNT 0x480004
+
+#define mmDMA_IF_W_S_HBM0_RD_CRED_CNT 0x480008
+
+#define mmDMA_IF_W_S_HBM1_RD_CRED_CNT 0x48000C
+
+#define mmDMA_IF_W_S_HBM_LIMITER_0 0x480030
+
+#define mmDMA_IF_W_S_HBM_LIMITER_1 0x480034
+
+#define mmDMA_IF_W_S_HBM_LIMITER_2 0x480038
+
+#define mmDMA_IF_W_S_HBM_LIMITER_3 0x48003C
+
+#define mmDMA_IF_W_S_HBM_ALMOST_EN_0 0x480040
+
+#define mmDMA_IF_W_S_HBM_ALMOST_EN_1 0x480044
+
+#define mmDMA_IF_W_S_HBM_CRED_EN_0 0x480050
+
+#define mmDMA_IF_W_S_HBM_CRED_EN_1 0x480054
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_0 0x480100
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_1 0x480104
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_2 0x480108
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_3 0x48010C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_4 0x480110
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_5 0x480114
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_6 0x480118
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_7 0x48011C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_8 0x480120
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_9 0x480124
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_10 0x480128
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_11 0x48012C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_12 0x480130
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_13 0x480134
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_14 0x480138
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_15 0x48013C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_0 0x480140
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_1 0x480144
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_2 0x480148
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_3 0x48014C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_4 0x480150
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_5 0x480154
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_6 0x480158
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_7 0x48015C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_8 0x480160
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_9 0x480164
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_10 0x480168
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_11 0x48016C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_12 0x480170
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_13 0x480174
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_14 0x480178
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_15 0x48017C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_0 0x480180
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_1 0x480184
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_2 0x480188
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_3 0x48018C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_4 0x480190
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_5 0x480194
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_6 0x480198
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_7 0x48019C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_8 0x4801A0
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_9 0x4801A4
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_10 0x4801A8
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_11 0x4801AC
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_12 0x4801B0
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_13 0x4801B4
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_14 0x4801B8
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_15 0x4801BC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_0 0x4801C0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_1 0x4801C4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_2 0x4801C8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_3 0x4801CC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_4 0x4801D0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_5 0x4801D4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_6 0x4801D8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_7 0x4801DC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_8 0x4801E0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_9 0x4801E4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_10 0x4801E8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_11 0x4801EC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_12 0x4801F0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_13 0x4801F4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_14 0x4801F8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_15 0x4801FC
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_0 0x480200
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_1 0x480204
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_2 0x480208
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_3 0x48020C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_4 0x480210
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_5 0x480214
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_6 0x480218
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_7 0x48021C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_8 0x480220
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_9 0x480224
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_10 0x480228
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_11 0x48022C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_12 0x480230
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_13 0x480234
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_14 0x480238
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_15 0x48023C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_0 0x480240
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_1 0x480244
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_2 0x480248
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_3 0x48024C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_4 0x480250
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_5 0x480254
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_6 0x480258
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_7 0x48025C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_8 0x480260
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_9 0x480264
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_10 0x480268
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_11 0x48026C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_12 0x480270
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_13 0x480274
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_14 0x480278
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_15 0x48027C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_0 0x480280
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_1 0x480284
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_2 0x480288
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_3 0x48028C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_4 0x480290
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_5 0x480294
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_6 0x480298
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_7 0x48029C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_8 0x4802A0
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_9 0x4802A4
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_10 0x4802A8
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_11 0x4802AC
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_12 0x4802B0
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_13 0x4802B4
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_14 0x4802B8
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_15 0x4802BC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_0 0x4802C0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_1 0x4802C4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_2 0x4802C8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_3 0x4802CC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_4 0x4802D0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_5 0x4802D4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_6 0x4802D8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_7 0x4802DC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_8 0x4802E0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_9 0x4802E4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_10 0x4802E8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_11 0x4802EC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_12 0x4802F0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_13 0x4802F4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_14 0x4802F8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_15 0x4802FC
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_0 0x480300
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_1 0x480304
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_2 0x480308
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_3 0x48030C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_4 0x480310
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_5 0x480314
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_6 0x480318
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_7 0x48031C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_8 0x480320
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_9 0x480324
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_10 0x480328
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_11 0x48032C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_12 0x480330
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_13 0x480334
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_14 0x480338
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_15 0x48033C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_0 0x480340
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_1 0x480344
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_2 0x480348
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_3 0x48034C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_4 0x480350
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_5 0x480354
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_6 0x480358
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_7 0x48035C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_8 0x480360
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_9 0x480364
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_10 0x480368
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_11 0x48036C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_12 0x480370
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_13 0x480374
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_14 0x480378
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_15 0x48037C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_0 0x480380
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_1 0x480384
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_2 0x480388
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_3 0x48038C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_4 0x480390
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_5 0x480394
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_6 0x480398
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_7 0x48039C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_8 0x4803A0
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_9 0x4803A4
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_10 0x4803A8
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_11 0x4803AC
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_12 0x4803B0
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_13 0x4803B4
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_14 0x4803B8
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_15 0x4803BC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_0 0x4803C0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_1 0x4803C4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_2 0x4803C8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_3 0x4803CC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_4 0x4803D0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_5 0x4803D4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_6 0x4803D8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_7 0x4803DC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_8 0x4803E0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_9 0x4803E4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_10 0x4803E8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_11 0x4803EC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_12 0x4803F0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_13 0x4803F4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_14 0x4803F8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_15 0x4803FC
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_0 0x480400
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_1 0x480404
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_2 0x480408
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_3 0x48040C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_4 0x480410
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_5 0x480414
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_6 0x480418
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_7 0x48041C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_8 0x480420
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_9 0x480424
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_10 0x480428
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_11 0x48042C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_12 0x480430
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_13 0x480434
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_14 0x480438
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_15 0x48043C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_0 0x480440
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_1 0x480444
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_2 0x480448
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_3 0x48044C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_4 0x480450
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_5 0x480454
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_6 0x480458
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_7 0x48045C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_8 0x480460
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_9 0x480464
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_10 0x480468
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_11 0x48046C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_12 0x480470
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_13 0x480474
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_14 0x480478
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_15 0x48047C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_0 0x480480
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_1 0x480484
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_2 0x480488
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_3 0x48048C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_4 0x480490
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_5 0x480494
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_6 0x480498
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_7 0x48049C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_8 0x4804A0
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_9 0x4804A4
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_10 0x4804A8
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_11 0x4804AC
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_12 0x4804B0
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_13 0x4804B4
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_14 0x4804B8
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_15 0x4804BC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_0 0x4804C0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_1 0x4804C4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_2 0x4804C8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_3 0x4804CC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_4 0x4804D0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_5 0x4804D4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_6 0x4804D8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_7 0x4804DC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_8 0x4804E0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_9 0x4804E4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_10 0x4804E8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_11 0x4804EC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_12 0x4804F0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_13 0x4804F4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_14 0x4804F8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_15 0x4804FC
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_0 0x480500
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_1 0x480504
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_2 0x480508
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_3 0x48050C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_4 0x480510
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_5 0x480514
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_6 0x480518
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_7 0x48051C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_8 0x480520
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_9 0x480524
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_10 0x480528
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_11 0x48052C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_12 0x480530
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_13 0x480534
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_14 0x480538
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_15 0x48053C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_0 0x480540
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_1 0x480544
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_2 0x480548
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_3 0x48054C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_4 0x480550
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_5 0x480554
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_6 0x480558
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_7 0x48055C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_8 0x480560
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_9 0x480564
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_10 0x480568
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_11 0x48056C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_12 0x480570
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_13 0x480574
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_14 0x480578
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_15 0x48057C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_0 0x480580
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_1 0x480584
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_2 0x480588
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_3 0x48058C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_4 0x480590
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_5 0x480594
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_6 0x480598
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_7 0x48059C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_8 0x4805A0
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_9 0x4805A4
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_10 0x4805A8
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_11 0x4805AC
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_12 0x4805B0
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_13 0x4805B4
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_14 0x4805B8
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_15 0x4805BC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_0 0x4805C0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_1 0x4805C4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_2 0x4805C8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_3 0x4805CC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_4 0x4805D0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_5 0x4805D4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_6 0x4805D8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_7 0x4805DC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_8 0x4805E0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_9 0x4805E4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_10 0x4805E8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_11 0x4805EC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_12 0x4805F0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_13 0x4805F4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_14 0x4805F8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_15 0x4805FC
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_0 0x480600
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_1 0x480604
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_2 0x480608
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_3 0x48060C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_4 0x480610
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_5 0x480614
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_6 0x480618
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_7 0x48061C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_8 0x480620
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_9 0x480624
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_10 0x480628
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_11 0x48062C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_12 0x480630
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_13 0x480634
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_14 0x480638
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_15 0x48063C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_0 0x480640
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_1 0x480644
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_2 0x480648
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_3 0x48064C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_4 0x480650
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_5 0x480654
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_6 0x480658
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_7 0x48065C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_8 0x480660
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_9 0x480664
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_10 0x480668
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_11 0x48066C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_12 0x480670
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_13 0x480674
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_14 0x480678
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_15 0x48067C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_0 0x480680
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_1 0x480684
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_2 0x480688
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_3 0x48068C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_4 0x480690
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_5 0x480694
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_6 0x480698
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_7 0x48069C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_8 0x4806A0
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_9 0x4806A4
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_10 0x4806A8
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_11 0x4806AC
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_12 0x4806B0
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_13 0x4806B4
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_14 0x4806B8
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_15 0x4806BC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_0 0x4806C0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_1 0x4806C4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_2 0x4806C8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_3 0x4806CC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_4 0x4806D0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_5 0x4806D4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_6 0x4806D8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_7 0x4806DC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_8 0x4806E0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_9 0x4806E4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_10 0x4806E8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_11 0x4806EC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_12 0x4806F0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_13 0x4806F4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_14 0x4806F8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_15 0x4806FC
+
+#define mmDMA_IF_W_S_SOB_HIT_RPROT 0x480700
+
+#define mmDMA_IF_W_S_SOB_HIT_WPROT 0x480704
+
+#define mmDMA_IF_W_S_SOB_HIT_RPRIV 0x48070C
+
+#define mmDMA_IF_W_S_SOB_HIT_WPRIV 0x480710
+
+#define mmDMA_IF_W_S_DMA0_HIT_RPROT 0x48071C
+
+#define mmDMA_IF_W_S_DMA0_HIT_WPROT 0x480720
+
+#define mmDMA_IF_W_S_DMA0_HIT_RPRIV 0x480724
+
+#define mmDMA_IF_W_S_DMA0_HIT_WPRIV 0x480728
+
+#define mmDMA_IF_W_S_DMA1_HIT_RPROT 0x480730
+
+#define mmDMA_IF_W_S_DMA1_HIT_WPROT 0x480734
+
+#define mmDMA_IF_W_S_DMA1_HIT_RPRIV 0x480738
+
+#define mmDMA_IF_W_S_DMA1_HIT_WPRIV 0x48073C
+
+#define mmDMA_IF_W_S_HBM_BIN 0x480800
+
+#define mmDMA_IF_W_S_MME_BIN 0x480804
+
+#define mmDMA_IF_W_S_TPC_BIN 0x480808
+
+#define mmDMA_IF_W_S_DMA_BIN 0x48080C
+
+#define mmDMA_IF_W_S_SOB_CG_EN 0x480810
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_0 0x480820
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_1 0x480824
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_2 0x480828
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_3 0x48082C
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_4 0x480830
+
+#define mmDMA_IF_W_S_HBM_MISC 0x480834
+
+#endif /* ASIC_REG_DMA_IF_W_S_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
new file mode 100644
index 000000000000..c7596aac7a5c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
@@ -0,0 +1,4974 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef GAUDI_BLOCKS_H_
+#define GAUDI_BLOCKS_H_
+
+#define mmNIC0_PHY0_BASE 0x0ull
+#define NIC0_PHY0_MAX_OFFSET 0x9F13
+#define mmMME0_ACC_BASE 0x7FFC020000ull
+#define MME0_ACC_MAX_OFFSET 0x5C00
+#define MME0_ACC_SECTION 0x20000
+#define mmMME0_SBAB_BASE 0x7FFC040000ull
+#define MME0_SBAB_MAX_OFFSET 0x5800
+#define MME0_SBAB_SECTION 0x1000
+#define mmMME0_PRTN_BASE 0x7FFC041000ull
+#define MME0_PRTN_MAX_OFFSET 0x5000
+#define MME0_PRTN_SECTION 0x1F000
+#define mmMME0_CTRL_BASE 0x7FFC060000ull
+#define MME0_CTRL_MAX_OFFSET 0xDA80
+#define MME0_CTRL_SECTION 0x8000
+#define mmARCH_MME0_CTRL_BASE 0x7FFC060008ull
+#define ARCH_MME0_CTRL_MAX_OFFSET 0x3400
+#define ARCH_MME0_CTRL_SECTION 0x3400
+#define mmARCH_TENSOR_S_MME0_CTRL_BASE 0x7FFC06003Cull
+#define ARCH_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_S_MME0_CTRL_BASE 0x7FFC060088ull
+#define ARCH_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_L_MME0_CTRL_BASE 0x7FFC0600ACull
+#define ARCH_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC0600F8ull
+#define ARCH_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmARCH_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC06011Cull
+#define ARCH_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_O_MME0_CTRL_BASE 0x7FFC060140ull
+#define ARCH_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC06018Cull
+#define ARCH_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmARCH_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC0601B0ull
+#define ARCH_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmARCH_DESC_MME0_CTRL_BASE 0x7FFC0601D4ull
+#define ARCH_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define ARCH_DESC_MME0_CTRL_SECTION 0x2340
+#define mmSHADOW_0_MME0_CTRL_BASE 0x7FFC060408ull
+#define SHADOW_0_MME0_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_0_MME0_CTRL_SECTION 0x3400
+#define mmSHADOW_0_TENSOR_S_MME0_CTRL_BASE 0x7FFC06043Cull
+#define SHADOW_0_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_S_MME0_CTRL_BASE 0x7FFC060488ull
+#define SHADOW_0_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_L_MME0_CTRL_BASE 0x7FFC0604ACull
+#define SHADOW_0_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC0604F8ull
+#define SHADOW_0_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC06051Cull
+#define SHADOW_0_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_O_MME0_CTRL_BASE 0x7FFC060540ull
+#define SHADOW_0_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC06058Cull
+#define SHADOW_0_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC0605B0ull
+#define SHADOW_0_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_DESC_MME0_CTRL_BASE 0x7FFC0605D4ull
+#define SHADOW_0_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_0_DESC_MME0_CTRL_SECTION 0xB400
+#define mmSHADOW_1_MME0_CTRL_BASE 0x7FFC060688ull
+#define SHADOW_1_MME0_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_1_MME0_CTRL_SECTION 0x3400
+#define mmSHADOW_1_TENSOR_S_MME0_CTRL_BASE 0x7FFC0606BCull
+#define SHADOW_1_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_S_MME0_CTRL_BASE 0x7FFC060708ull
+#define SHADOW_1_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_L_MME0_CTRL_BASE 0x7FFC06072Cull
+#define SHADOW_1_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC060778ull
+#define SHADOW_1_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC06079Cull
+#define SHADOW_1_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_O_MME0_CTRL_BASE 0x7FFC0607C0ull
+#define SHADOW_1_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC06080Cull
+#define SHADOW_1_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC060830ull
+#define SHADOW_1_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_DESC_MME0_CTRL_BASE 0x7FFC060854ull
+#define SHADOW_1_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_1_DESC_MME0_CTRL_SECTION 0xB400
+#define mmSHADOW_2_MME0_CTRL_BASE 0x7FFC060908ull
+#define SHADOW_2_MME0_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_2_MME0_CTRL_SECTION 0x3400
+#define mmSHADOW_2_TENSOR_S_MME0_CTRL_BASE 0x7FFC06093Cull
+#define SHADOW_2_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_S_MME0_CTRL_BASE 0x7FFC060988ull
+#define SHADOW_2_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_L_MME0_CTRL_BASE 0x7FFC0609ACull
+#define SHADOW_2_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC0609F8ull
+#define SHADOW_2_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC060A1Cull
+#define SHADOW_2_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_O_MME0_CTRL_BASE 0x7FFC060A40ull
+#define SHADOW_2_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC060A8Cull
+#define SHADOW_2_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC060AB0ull
+#define SHADOW_2_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_DESC_MME0_CTRL_BASE 0x7FFC060AD4ull
+#define SHADOW_2_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_2_DESC_MME0_CTRL_SECTION 0xB400
+#define mmSHADOW_3_MME0_CTRL_BASE 0x7FFC060B88ull
+#define SHADOW_3_MME0_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_3_MME0_CTRL_SECTION 0x3400
+#define mmSHADOW_3_TENSOR_S_MME0_CTRL_BASE 0x7FFC060BBCull
+#define SHADOW_3_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_S_MME0_CTRL_BASE 0x7FFC060C08ull
+#define SHADOW_3_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_L_MME0_CTRL_BASE 0x7FFC060C2Cull
+#define SHADOW_3_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC060C78ull
+#define SHADOW_3_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC060C9Cull
+#define SHADOW_3_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_O_MME0_CTRL_BASE 0x7FFC060CC0ull
+#define SHADOW_3_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC060D0Cull
+#define SHADOW_3_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC060D30ull
+#define SHADOW_3_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_DESC_MME0_CTRL_BASE 0x7FFC060D54ull
+#define SHADOW_3_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_3_DESC_MME0_CTRL_SECTION 0x72AC
+#define mmMME0_QM_BASE 0x7FFC068000ull
+#define MME0_QM_MAX_OFFSET 0xD040
+#define MME0_QM_SECTION 0x38000
+#define mmMME1_ACC_BASE 0x7FFC0A0000ull
+#define MME1_ACC_MAX_OFFSET 0x5C00
+#define MME1_ACC_SECTION 0x20000
+#define mmMME1_SBAB_BASE 0x7FFC0C0000ull
+#define MME1_SBAB_MAX_OFFSET 0x5800
+#define MME1_SBAB_SECTION 0x1000
+#define mmMME1_PRTN_BASE 0x7FFC0C1000ull
+#define MME1_PRTN_MAX_OFFSET 0x5000
+#define MME1_PRTN_SECTION 0x1F000
+#define mmMME1_CTRL_BASE 0x7FFC0E0000ull
+#define MME1_CTRL_MAX_OFFSET 0xDA80
+#define MME1_CTRL_SECTION 0x8000
+#define mmARCH_MME1_CTRL_BASE 0x7FFC0E0008ull
+#define ARCH_MME1_CTRL_MAX_OFFSET 0x3400
+#define ARCH_MME1_CTRL_SECTION 0x3400
+#define mmARCH_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E003Cull
+#define ARCH_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_S_MME1_CTRL_BASE 0x7FFC0E0088ull
+#define ARCH_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E00ACull
+#define ARCH_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E00F8ull
+#define ARCH_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmARCH_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E011Cull
+#define ARCH_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0140ull
+#define ARCH_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E018Cull
+#define ARCH_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmARCH_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E01B0ull
+#define ARCH_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmARCH_DESC_MME1_CTRL_BASE 0x7FFC0E01D4ull
+#define ARCH_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define ARCH_DESC_MME1_CTRL_SECTION 0x2340
+#define mmSHADOW_0_MME1_CTRL_BASE 0x7FFC0E0408ull
+#define SHADOW_0_MME1_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_0_MME1_CTRL_SECTION 0x3400
+#define mmSHADOW_0_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E043Cull
+#define SHADOW_0_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_S_MME1_CTRL_BASE 0x7FFC0E0488ull
+#define SHADOW_0_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E04ACull
+#define SHADOW_0_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E04F8ull
+#define SHADOW_0_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E051Cull
+#define SHADOW_0_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0540ull
+#define SHADOW_0_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E058Cull
+#define SHADOW_0_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E05B0ull
+#define SHADOW_0_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_DESC_MME1_CTRL_BASE 0x7FFC0E05D4ull
+#define SHADOW_0_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_0_DESC_MME1_CTRL_SECTION 0xB400
+#define mmSHADOW_1_MME1_CTRL_BASE 0x7FFC0E0688ull
+#define SHADOW_1_MME1_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_1_MME1_CTRL_SECTION 0x3400
+#define mmSHADOW_1_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E06BCull
+#define SHADOW_1_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_S_MME1_CTRL_BASE 0x7FFC0E0708ull
+#define SHADOW_1_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E072Cull
+#define SHADOW_1_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E0778ull
+#define SHADOW_1_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E079Cull
+#define SHADOW_1_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E07C0ull
+#define SHADOW_1_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E080Cull
+#define SHADOW_1_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E0830ull
+#define SHADOW_1_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_DESC_MME1_CTRL_BASE 0x7FFC0E0854ull
+#define SHADOW_1_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_1_DESC_MME1_CTRL_SECTION 0xB400
+#define mmSHADOW_2_MME1_CTRL_BASE 0x7FFC0E0908ull
+#define SHADOW_2_MME1_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_2_MME1_CTRL_SECTION 0x3400
+#define mmSHADOW_2_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E093Cull
+#define SHADOW_2_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_S_MME1_CTRL_BASE 0x7FFC0E0988ull
+#define SHADOW_2_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E09ACull
+#define SHADOW_2_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E09F8ull
+#define SHADOW_2_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E0A1Cull
+#define SHADOW_2_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0A40ull
+#define SHADOW_2_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E0A8Cull
+#define SHADOW_2_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E0AB0ull
+#define SHADOW_2_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_DESC_MME1_CTRL_BASE 0x7FFC0E0AD4ull
+#define SHADOW_2_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_2_DESC_MME1_CTRL_SECTION 0xB400
+#define mmSHADOW_3_MME1_CTRL_BASE 0x7FFC0E0B88ull
+#define SHADOW_3_MME1_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_3_MME1_CTRL_SECTION 0x3400
+#define mmSHADOW_3_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E0BBCull
+#define SHADOW_3_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_S_MME1_CTRL_BASE 0x7FFC0E0C08ull
+#define SHADOW_3_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E0C2Cull
+#define SHADOW_3_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E0C78ull
+#define SHADOW_3_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E0C9Cull
+#define SHADOW_3_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0CC0ull
+#define SHADOW_3_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E0D0Cull
+#define SHADOW_3_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E0D30ull
+#define SHADOW_3_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_DESC_MME1_CTRL_BASE 0x7FFC0E0D54ull
+#define SHADOW_3_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_3_DESC_MME1_CTRL_SECTION 0x72AC
+#define mmMME1_QM_BASE 0x7FFC0E8000ull
+#define MME1_QM_MAX_OFFSET 0xD040
+#define MME1_QM_SECTION 0x38000
+#define mmMME2_ACC_BASE 0x7FFC120000ull
+#define MME2_ACC_MAX_OFFSET 0x5C00
+#define MME2_ACC_SECTION 0x20000
+#define mmMME2_SBAB_BASE 0x7FFC140000ull
+#define MME2_SBAB_MAX_OFFSET 0x5800
+#define MME2_SBAB_SECTION 0x1000
+#define mmMME2_PRTN_BASE 0x7FFC141000ull
+#define MME2_PRTN_MAX_OFFSET 0x5000
+#define MME2_PRTN_SECTION 0x1F000
+#define mmMME2_CTRL_BASE 0x7FFC160000ull
+#define MME2_CTRL_MAX_OFFSET 0xDA80
+#define MME2_CTRL_SECTION 0x8000
+#define mmARCH_MME2_CTRL_BASE 0x7FFC160008ull
+#define ARCH_MME2_CTRL_MAX_OFFSET 0x3400
+#define ARCH_MME2_CTRL_SECTION 0x3400
+#define mmARCH_TENSOR_S_MME2_CTRL_BASE 0x7FFC16003Cull
+#define ARCH_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_S_MME2_CTRL_BASE 0x7FFC160088ull
+#define ARCH_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_L_MME2_CTRL_BASE 0x7FFC1600ACull
+#define ARCH_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC1600F8ull
+#define ARCH_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmARCH_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC16011Cull
+#define ARCH_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_O_MME2_CTRL_BASE 0x7FFC160140ull
+#define ARCH_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC16018Cull
+#define ARCH_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmARCH_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC1601B0ull
+#define ARCH_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmARCH_DESC_MME2_CTRL_BASE 0x7FFC1601D4ull
+#define ARCH_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define ARCH_DESC_MME2_CTRL_SECTION 0x2340
+#define mmSHADOW_0_MME2_CTRL_BASE 0x7FFC160408ull
+#define SHADOW_0_MME2_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_0_MME2_CTRL_SECTION 0x3400
+#define mmSHADOW_0_TENSOR_S_MME2_CTRL_BASE 0x7FFC16043Cull
+#define SHADOW_0_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_S_MME2_CTRL_BASE 0x7FFC160488ull
+#define SHADOW_0_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_L_MME2_CTRL_BASE 0x7FFC1604ACull
+#define SHADOW_0_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC1604F8ull
+#define SHADOW_0_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC16051Cull
+#define SHADOW_0_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_O_MME2_CTRL_BASE 0x7FFC160540ull
+#define SHADOW_0_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC16058Cull
+#define SHADOW_0_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC1605B0ull
+#define SHADOW_0_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_DESC_MME2_CTRL_BASE 0x7FFC1605D4ull
+#define SHADOW_0_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_0_DESC_MME2_CTRL_SECTION 0xB400
+#define mmSHADOW_1_MME2_CTRL_BASE 0x7FFC160688ull
+#define SHADOW_1_MME2_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_1_MME2_CTRL_SECTION 0x3400
+#define mmSHADOW_1_TENSOR_S_MME2_CTRL_BASE 0x7FFC1606BCull
+#define SHADOW_1_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_S_MME2_CTRL_BASE 0x7FFC160708ull
+#define SHADOW_1_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_L_MME2_CTRL_BASE 0x7FFC16072Cull
+#define SHADOW_1_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC160778ull
+#define SHADOW_1_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC16079Cull
+#define SHADOW_1_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_O_MME2_CTRL_BASE 0x7FFC1607C0ull
+#define SHADOW_1_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC16080Cull
+#define SHADOW_1_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC160830ull
+#define SHADOW_1_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_DESC_MME2_CTRL_BASE 0x7FFC160854ull
+#define SHADOW_1_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_1_DESC_MME2_CTRL_SECTION 0xB400
+#define mmSHADOW_2_MME2_CTRL_BASE 0x7FFC160908ull
+#define SHADOW_2_MME2_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_2_MME2_CTRL_SECTION 0x3400
+#define mmSHADOW_2_TENSOR_S_MME2_CTRL_BASE 0x7FFC16093Cull
+#define SHADOW_2_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_S_MME2_CTRL_BASE 0x7FFC160988ull
+#define SHADOW_2_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_L_MME2_CTRL_BASE 0x7FFC1609ACull
+#define SHADOW_2_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC1609F8ull
+#define SHADOW_2_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC160A1Cull
+#define SHADOW_2_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_O_MME2_CTRL_BASE 0x7FFC160A40ull
+#define SHADOW_2_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC160A8Cull
+#define SHADOW_2_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC160AB0ull
+#define SHADOW_2_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_DESC_MME2_CTRL_BASE 0x7FFC160AD4ull
+#define SHADOW_2_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_2_DESC_MME2_CTRL_SECTION 0xB400
+#define mmSHADOW_3_MME2_CTRL_BASE 0x7FFC160B88ull
+#define SHADOW_3_MME2_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_3_MME2_CTRL_SECTION 0x3400
+#define mmSHADOW_3_TENSOR_S_MME2_CTRL_BASE 0x7FFC160BBCull
+#define SHADOW_3_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_S_MME2_CTRL_BASE 0x7FFC160C08ull
+#define SHADOW_3_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_L_MME2_CTRL_BASE 0x7FFC160C2Cull
+#define SHADOW_3_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC160C78ull
+#define SHADOW_3_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC160C9Cull
+#define SHADOW_3_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_O_MME2_CTRL_BASE 0x7FFC160CC0ull
+#define SHADOW_3_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC160D0Cull
+#define SHADOW_3_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC160D30ull
+#define SHADOW_3_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_DESC_MME2_CTRL_BASE 0x7FFC160D54ull
+#define SHADOW_3_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_3_DESC_MME2_CTRL_SECTION 0x72AC
+#define mmMME2_QM_BASE 0x7FFC168000ull
+#define MME2_QM_MAX_OFFSET 0xD040
+#define MME2_QM_SECTION 0x38000
+#define mmMME3_ACC_BASE 0x7FFC1A0000ull
+#define MME3_ACC_MAX_OFFSET 0x5C00
+#define MME3_ACC_SECTION 0x20000
+#define mmMME3_SBAB_BASE 0x7FFC1C0000ull
+#define MME3_SBAB_MAX_OFFSET 0x5800
+#define MME3_SBAB_SECTION 0x1000
+#define mmMME3_PRTN_BASE 0x7FFC1C1000ull
+#define MME3_PRTN_MAX_OFFSET 0x5000
+#define MME3_PRTN_SECTION 0x1F000
+#define mmMME3_CTRL_BASE 0x7FFC1E0000ull
+#define MME3_CTRL_MAX_OFFSET 0xDA80
+#define MME3_CTRL_SECTION 0x8000
+#define mmARCH_MME3_CTRL_BASE 0x7FFC1E0008ull
+#define ARCH_MME3_CTRL_MAX_OFFSET 0x3400
+#define ARCH_MME3_CTRL_SECTION 0x3400
+#define mmARCH_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E003Cull
+#define ARCH_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_S_MME3_CTRL_BASE 0x7FFC1E0088ull
+#define ARCH_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E00ACull
+#define ARCH_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E00F8ull
+#define ARCH_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmARCH_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E011Cull
+#define ARCH_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0140ull
+#define ARCH_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E018Cull
+#define ARCH_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmARCH_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E01B0ull
+#define ARCH_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmARCH_DESC_MME3_CTRL_BASE 0x7FFC1E01D4ull
+#define ARCH_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define ARCH_DESC_MME3_CTRL_SECTION 0x2340
+#define mmSHADOW_0_MME3_CTRL_BASE 0x7FFC1E0408ull
+#define SHADOW_0_MME3_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_0_MME3_CTRL_SECTION 0x3400
+#define mmSHADOW_0_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E043Cull
+#define SHADOW_0_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_S_MME3_CTRL_BASE 0x7FFC1E0488ull
+#define SHADOW_0_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E04ACull
+#define SHADOW_0_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E04F8ull
+#define SHADOW_0_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E051Cull
+#define SHADOW_0_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0540ull
+#define SHADOW_0_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E058Cull
+#define SHADOW_0_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E05B0ull
+#define SHADOW_0_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_DESC_MME3_CTRL_BASE 0x7FFC1E05D4ull
+#define SHADOW_0_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_0_DESC_MME3_CTRL_SECTION 0xB400
+#define mmSHADOW_1_MME3_CTRL_BASE 0x7FFC1E0688ull
+#define SHADOW_1_MME3_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_1_MME3_CTRL_SECTION 0x3400
+#define mmSHADOW_1_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E06BCull
+#define SHADOW_1_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_S_MME3_CTRL_BASE 0x7FFC1E0708ull
+#define SHADOW_1_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E072Cull
+#define SHADOW_1_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E0778ull
+#define SHADOW_1_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E079Cull
+#define SHADOW_1_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E07C0ull
+#define SHADOW_1_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E080Cull
+#define SHADOW_1_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E0830ull
+#define SHADOW_1_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_DESC_MME3_CTRL_BASE 0x7FFC1E0854ull
+#define SHADOW_1_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_1_DESC_MME3_CTRL_SECTION 0xB400
+#define mmSHADOW_2_MME3_CTRL_BASE 0x7FFC1E0908ull
+#define SHADOW_2_MME3_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_2_MME3_CTRL_SECTION 0x3400
+#define mmSHADOW_2_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E093Cull
+#define SHADOW_2_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_S_MME3_CTRL_BASE 0x7FFC1E0988ull
+#define SHADOW_2_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E09ACull
+#define SHADOW_2_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E09F8ull
+#define SHADOW_2_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E0A1Cull
+#define SHADOW_2_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0A40ull
+#define SHADOW_2_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E0A8Cull
+#define SHADOW_2_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E0AB0ull
+#define SHADOW_2_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_DESC_MME3_CTRL_BASE 0x7FFC1E0AD4ull
+#define SHADOW_2_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_2_DESC_MME3_CTRL_SECTION 0xB400
+#define mmSHADOW_3_MME3_CTRL_BASE 0x7FFC1E0B88ull
+#define SHADOW_3_MME3_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_3_MME3_CTRL_SECTION 0x3400
+#define mmSHADOW_3_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E0BBCull
+#define SHADOW_3_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_S_MME3_CTRL_BASE 0x7FFC1E0C08ull
+#define SHADOW_3_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E0C2Cull
+#define SHADOW_3_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E0C78ull
+#define SHADOW_3_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E0C9Cull
+#define SHADOW_3_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0CC0ull
+#define SHADOW_3_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E0D0Cull
+#define SHADOW_3_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E0D30ull
+#define SHADOW_3_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_DESC_MME3_CTRL_BASE 0x7FFC1E0D54ull
+#define SHADOW_3_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_3_DESC_MME3_CTRL_SECTION 0x72AC
+#define mmMME3_QM_BASE 0x7FFC1E8000ull
+#define MME3_QM_MAX_OFFSET 0xD040
+#define MME3_QM_SECTION 0x18000
+#define mmSRAM_Y0_X0_BANK_BASE 0x7FFC200000ull
+#define SRAM_Y0_X0_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X0_RTR_BASE 0x7FFC201000ull
+#define SRAM_Y0_X0_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X0_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X1_BANK_BASE 0x7FFC208000ull
+#define SRAM_Y0_X1_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X1_RTR_BASE 0x7FFC209000ull
+#define SRAM_Y0_X1_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X1_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X2_BANK_BASE 0x7FFC210000ull
+#define SRAM_Y0_X2_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X2_RTR_BASE 0x7FFC211000ull
+#define SRAM_Y0_X2_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X2_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X3_BANK_BASE 0x7FFC218000ull
+#define SRAM_Y0_X3_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X3_RTR_BASE 0x7FFC219000ull
+#define SRAM_Y0_X3_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X3_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X4_BANK_BASE 0x7FFC220000ull
+#define SRAM_Y0_X4_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X4_RTR_BASE 0x7FFC221000ull
+#define SRAM_Y0_X4_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X4_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X5_BANK_BASE 0x7FFC228000ull
+#define SRAM_Y0_X5_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X5_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X5_RTR_BASE 0x7FFC229000ull
+#define SRAM_Y0_X5_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X5_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X6_BANK_BASE 0x7FFC230000ull
+#define SRAM_Y0_X6_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X6_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X6_RTR_BASE 0x7FFC231000ull
+#define SRAM_Y0_X6_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X6_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X7_BANK_BASE 0x7FFC238000ull
+#define SRAM_Y0_X7_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X7_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X7_RTR_BASE 0x7FFC239000ull
+#define SRAM_Y0_X7_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X7_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X0_BANK_BASE 0x7FFC240000ull
+#define SRAM_Y1_X0_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X0_RTR_BASE 0x7FFC241000ull
+#define SRAM_Y1_X0_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X0_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X1_BANK_BASE 0x7FFC248000ull
+#define SRAM_Y1_X1_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X1_RTR_BASE 0x7FFC249000ull
+#define SRAM_Y1_X1_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X1_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X2_BANK_BASE 0x7FFC250000ull
+#define SRAM_Y1_X2_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X2_RTR_BASE 0x7FFC251000ull
+#define SRAM_Y1_X2_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X2_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X3_BANK_BASE 0x7FFC258000ull
+#define SRAM_Y1_X3_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X3_RTR_BASE 0x7FFC259000ull
+#define SRAM_Y1_X3_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X3_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X4_BANK_BASE 0x7FFC260000ull
+#define SRAM_Y1_X4_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X4_RTR_BASE 0x7FFC261000ull
+#define SRAM_Y1_X4_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X4_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X5_BANK_BASE 0x7FFC268000ull
+#define SRAM_Y1_X5_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X5_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X5_RTR_BASE 0x7FFC269000ull
+#define SRAM_Y1_X5_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X5_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X6_BANK_BASE 0x7FFC270000ull
+#define SRAM_Y1_X6_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X6_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X6_RTR_BASE 0x7FFC271000ull
+#define SRAM_Y1_X6_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X6_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X7_BANK_BASE 0x7FFC278000ull
+#define SRAM_Y1_X7_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X7_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X7_RTR_BASE 0x7FFC279000ull
+#define SRAM_Y1_X7_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X7_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X0_BANK_BASE 0x7FFC280000ull
+#define SRAM_Y2_X0_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X0_RTR_BASE 0x7FFC281000ull
+#define SRAM_Y2_X0_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X0_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X1_BANK_BASE 0x7FFC288000ull
+#define SRAM_Y2_X1_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X1_RTR_BASE 0x7FFC289000ull
+#define SRAM_Y2_X1_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X1_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X2_BANK_BASE 0x7FFC290000ull
+#define SRAM_Y2_X2_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X2_RTR_BASE 0x7FFC291000ull
+#define SRAM_Y2_X2_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X2_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X3_BANK_BASE 0x7FFC298000ull
+#define SRAM_Y2_X3_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X3_RTR_BASE 0x7FFC299000ull
+#define SRAM_Y2_X3_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X3_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X4_BANK_BASE 0x7FFC2A0000ull
+#define SRAM_Y2_X4_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X4_RTR_BASE 0x7FFC2A1000ull
+#define SRAM_Y2_X4_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X4_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X5_BANK_BASE 0x7FFC2A8000ull
+#define SRAM_Y2_X5_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X5_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X5_RTR_BASE 0x7FFC2A9000ull
+#define SRAM_Y2_X5_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X5_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X6_BANK_BASE 0x7FFC2B0000ull
+#define SRAM_Y2_X6_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X6_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X6_RTR_BASE 0x7FFC2B1000ull
+#define SRAM_Y2_X6_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X6_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X7_BANK_BASE 0x7FFC2B8000ull
+#define SRAM_Y2_X7_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X7_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X7_RTR_BASE 0x7FFC2B9000ull
+#define SRAM_Y2_X7_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X7_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X0_BANK_BASE 0x7FFC2C0000ull
+#define SRAM_Y3_X0_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X0_RTR_BASE 0x7FFC2C1000ull
+#define SRAM_Y3_X0_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X0_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X1_BANK_BASE 0x7FFC2C8000ull
+#define SRAM_Y3_X1_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X1_RTR_BASE 0x7FFC2C9000ull
+#define SRAM_Y3_X1_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X1_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X2_BANK_BASE 0x7FFC2D0000ull
+#define SRAM_Y3_X2_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X2_RTR_BASE 0x7FFC2D1000ull
+#define SRAM_Y3_X2_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X2_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X3_BANK_BASE 0x7FFC2D8000ull
+#define SRAM_Y3_X3_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X3_RTR_BASE 0x7FFC2D9000ull
+#define SRAM_Y3_X3_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X3_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X4_BANK_BASE 0x7FFC2E0000ull
+#define SRAM_Y3_X4_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X4_RTR_BASE 0x7FFC2E1000ull
+#define SRAM_Y3_X4_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X4_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X5_BANK_BASE 0x7FFC2E8000ull
+#define SRAM_Y3_X5_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X5_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X5_RTR_BASE 0x7FFC2E9000ull
+#define SRAM_Y3_X5_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X5_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X6_BANK_BASE 0x7FFC2F0000ull
+#define SRAM_Y3_X6_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X6_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X6_RTR_BASE 0x7FFC2F1000ull
+#define SRAM_Y3_X6_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X6_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X7_BANK_BASE 0x7FFC2F8000ull
+#define SRAM_Y3_X7_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X7_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X7_RTR_BASE 0x7FFC2F9000ull
+#define SRAM_Y3_X7_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X7_RTR_SECTION 0x7000
+#define mmSIF_RTR_0_BASE 0x7FFC300000ull
+#define SIF_RTR_0_MAX_OFFSET 0x6500
+#define SIF_RTR_0_SECTION 0x6000
+#define mmSIF_RTR_CTRL_0_BASE 0x7FFC306000ull
+#define SIF_RTR_CTRL_0_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_0_SECTION 0xA000
+#define mmSIF_RTR_1_BASE 0x7FFC310000ull
+#define SIF_RTR_1_MAX_OFFSET 0x6500
+#define SIF_RTR_1_SECTION 0x6000
+#define mmSIF_RTR_CTRL_1_BASE 0x7FFC316000ull
+#define SIF_RTR_CTRL_1_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_1_SECTION 0xA000
+#define mmSIF_RTR_2_BASE 0x7FFC320000ull
+#define SIF_RTR_2_MAX_OFFSET 0x6500
+#define SIF_RTR_2_SECTION 0x6000
+#define mmSIF_RTR_CTRL_2_BASE 0x7FFC326000ull
+#define SIF_RTR_CTRL_2_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_2_SECTION 0xA000
+#define mmSIF_RTR_3_BASE 0x7FFC330000ull
+#define SIF_RTR_3_MAX_OFFSET 0x6500
+#define SIF_RTR_3_SECTION 0x6000
+#define mmSIF_RTR_CTRL_3_BASE 0x7FFC336000ull
+#define SIF_RTR_CTRL_3_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_3_SECTION 0xA000
+#define mmSIF_RTR_4_BASE 0x7FFC340000ull
+#define SIF_RTR_4_MAX_OFFSET 0x6500
+#define SIF_RTR_4_SECTION 0x6000
+#define mmSIF_RTR_CTRL_4_BASE 0x7FFC346000ull
+#define SIF_RTR_CTRL_4_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_4_SECTION 0xA000
+#define mmSIF_RTR_5_BASE 0x7FFC350000ull
+#define SIF_RTR_5_MAX_OFFSET 0x6500
+#define SIF_RTR_5_SECTION 0x6000
+#define mmSIF_RTR_CTRL_5_BASE 0x7FFC356000ull
+#define SIF_RTR_CTRL_5_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_5_SECTION 0xA000
+#define mmSIF_RTR_6_BASE 0x7FFC360000ull
+#define SIF_RTR_6_MAX_OFFSET 0x6500
+#define SIF_RTR_6_SECTION 0x6000
+#define mmSIF_RTR_CTRL_6_BASE 0x7FFC366000ull
+#define SIF_RTR_CTRL_6_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_6_SECTION 0xA000
+#define mmSIF_RTR_7_BASE 0x7FFC370000ull
+#define SIF_RTR_7_MAX_OFFSET 0x6500
+#define SIF_RTR_7_SECTION 0x6000
+#define mmSIF_RTR_CTRL_7_BASE 0x7FFC376000ull
+#define SIF_RTR_CTRL_7_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_7_SECTION 0xA000
+#define mmNIF_RTR_0_BASE 0x7FFC380000ull
+#define NIF_RTR_0_MAX_OFFSET 0x6500
+#define NIF_RTR_0_SECTION 0x6000
+#define mmNIF_RTR_CTRL_0_BASE 0x7FFC386000ull
+#define NIF_RTR_CTRL_0_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_0_SECTION 0xA000
+#define mmNIF_RTR_1_BASE 0x7FFC390000ull
+#define NIF_RTR_1_MAX_OFFSET 0x6500
+#define NIF_RTR_1_SECTION 0x6000
+#define mmNIF_RTR_CTRL_1_BASE 0x7FFC396000ull
+#define NIF_RTR_CTRL_1_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_1_SECTION 0xA000
+#define mmNIF_RTR_2_BASE 0x7FFC3A0000ull
+#define NIF_RTR_2_MAX_OFFSET 0x6500
+#define NIF_RTR_2_SECTION 0x6000
+#define mmNIF_RTR_CTRL_2_BASE 0x7FFC3A6000ull
+#define NIF_RTR_CTRL_2_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_2_SECTION 0xA000
+#define mmNIF_RTR_3_BASE 0x7FFC3B0000ull
+#define NIF_RTR_3_MAX_OFFSET 0x6500
+#define NIF_RTR_3_SECTION 0x6000
+#define mmNIF_RTR_CTRL_3_BASE 0x7FFC3B6000ull
+#define NIF_RTR_CTRL_3_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_3_SECTION 0xA000
+#define mmNIF_RTR_4_BASE 0x7FFC3C0000ull
+#define NIF_RTR_4_MAX_OFFSET 0x6500
+#define NIF_RTR_4_SECTION 0x6000
+#define mmNIF_RTR_CTRL_4_BASE 0x7FFC3C6000ull
+#define NIF_RTR_CTRL_4_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_4_SECTION 0xA000
+#define mmNIF_RTR_5_BASE 0x7FFC3D0000ull
+#define NIF_RTR_5_MAX_OFFSET 0x6500
+#define NIF_RTR_5_SECTION 0x6000
+#define mmNIF_RTR_CTRL_5_BASE 0x7FFC3D6000ull
+#define NIF_RTR_CTRL_5_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_5_SECTION 0xA000
+#define mmNIF_RTR_6_BASE 0x7FFC3E0000ull
+#define NIF_RTR_6_MAX_OFFSET 0x6500
+#define NIF_RTR_6_SECTION 0x6000
+#define mmNIF_RTR_CTRL_6_BASE 0x7FFC3E6000ull
+#define NIF_RTR_CTRL_6_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_6_SECTION 0xA000
+#define mmNIF_RTR_7_BASE 0x7FFC3F0000ull
+#define NIF_RTR_7_MAX_OFFSET 0x6500
+#define NIF_RTR_7_SECTION 0x6000
+#define mmNIF_RTR_CTRL_7_BASE 0x7FFC3F6000ull
+#define NIF_RTR_CTRL_7_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_7_SECTION 0x4B000
+#define mmCPU_CA53_CFG_BASE 0x7FFC441000ull
+#define CPU_CA53_CFG_MAX_OFFSET 0x2180
+#define CPU_CA53_CFG_SECTION 0x1000
+#define mmCPU_IF_BASE 0x7FFC442000ull
+#define CPU_IF_MAX_OFFSET 0x43C0
+#define CPU_IF_SECTION 0x2000
+#define mmCPU_TIMESTAMP_BASE 0x7FFC444000ull
+#define CPU_TIMESTAMP_MAX_OFFSET 0x1000
+#define CPU_TIMESTAMP_SECTION 0x3C000
+#define mmDMA_IF_W_S_BASE 0x7FFC480000ull
+#define DMA_IF_W_S_MAX_OFFSET 0x8380
+#define DMA_IF_W_S_SECTION 0x1000
+#define mmDMA_IF_W_S_DOWN_CH0_BASE 0x7FFC481000ull
+#define DMA_IF_W_S_DOWN_CH0_MAX_OFFSET 0xCC00
+#define DMA_IF_W_S_DOWN_CH0_SECTION 0x1000
+#define mmDMA_IF_W_S_DOWN_CH1_BASE 0x7FFC482000ull
+#define DMA_IF_W_S_DOWN_CH1_MAX_OFFSET 0xCC00
+#define DMA_IF_W_S_DOWN_CH1_SECTION 0x5000
+#define mmDMA_W_PLL_BASE 0x7FFC487000ull
+#define DMA_W_PLL_MAX_OFFSET 0x5200
+#define DMA_W_PLL_SECTION 0x1000
+#define mmIF_W_PLL_BASE 0x7FFC488000ull
+#define IF_W_PLL_MAX_OFFSET 0x5200
+#define IF_W_PLL_SECTION 0x1000
+#define mmDMA_IF_W_S_DOWN_BASE 0x7FFC489000ull
+#define DMA_IF_W_S_DOWN_MAX_OFFSET 0x1500
+#define DMA_IF_W_S_DOWN_SECTION 0x7000
+#define mmSYNC_MNGR_GLBL_W_S_BASE 0x7FFC490000ull
+#define SYNC_MNGR_GLBL_W_S_MAX_OFFSET 0x6C00
+#define SYNC_MNGR_GLBL_W_S_SECTION 0x1000
+#define mmSYNC_MNGR_OBJS_W_S_BASE 0x7FFC491000ull
+#define SYNC_MNGR_OBJS_W_S_MAX_OFFSET 0x5C00
+#define SYNC_MNGR_OBJS_W_S_SECTION 0xF000
+#define mmDMA_IF_E_S_BASE 0x7FFC4A0000ull
+#define DMA_IF_E_S_MAX_OFFSET 0x8380
+#define DMA_IF_E_S_SECTION 0x1000
+#define mmDMA_IF_E_S_DOWN_CH0_BASE 0x7FFC4A1000ull
+#define DMA_IF_E_S_DOWN_CH0_MAX_OFFSET 0xCC00
+#define DMA_IF_E_S_DOWN_CH0_SECTION 0x1000
+#define mmDMA_IF_E_S_DOWN_CH1_BASE 0x7FFC4A2000ull
+#define DMA_IF_E_S_DOWN_CH1_MAX_OFFSET 0xCC00
+#define DMA_IF_E_S_DOWN_CH1_SECTION 0x5000
+#define mmIF_E_PLL_BASE 0x7FFC4A7000ull
+#define IF_E_PLL_MAX_OFFSET 0x5200
+#define IF_E_PLL_SECTION 0x1000
+#define mmDMA_E_PLL_BASE 0x7FFC4A8000ull
+#define DMA_E_PLL_MAX_OFFSET 0x5200
+#define DMA_E_PLL_SECTION 0x1000
+#define mmDMA_IF_E_S_DOWN_BASE 0x7FFC4A9000ull
+#define DMA_IF_E_S_DOWN_MAX_OFFSET 0x1500
+#define DMA_IF_E_S_DOWN_SECTION 0x7000
+#define mmSYNC_MNGR_GLBL_E_S_BASE 0x7FFC4B0000ull
+#define SYNC_MNGR_GLBL_E_S_MAX_OFFSET 0x6C00
+#define SYNC_MNGR_GLBL_E_S_SECTION 0x1000
+#define mmSYNC_MNGR_OBJS_E_S_BASE 0x7FFC4B1000ull
+#define SYNC_MNGR_OBJS_E_S_MAX_OFFSET 0x5C00
+#define SYNC_MNGR_OBJS_E_S_SECTION 0xF000
+#define mmDMA_IF_W_N_BASE 0x7FFC4C0000ull
+#define DMA_IF_W_N_MAX_OFFSET 0x8380
+#define DMA_IF_W_N_SECTION 0x1000
+#define mmDMA_IF_W_N_DOWN_CH0_BASE 0x7FFC4C1000ull
+#define DMA_IF_W_N_DOWN_CH0_MAX_OFFSET 0xCC00
+#define DMA_IF_W_N_DOWN_CH0_SECTION 0x1000
+#define mmDMA_IF_W_N_DOWN_CH1_BASE 0x7FFC4C2000ull
+#define DMA_IF_W_N_DOWN_CH1_MAX_OFFSET 0xCC00
+#define DMA_IF_W_N_DOWN_CH1_SECTION 0x5000
+#define mmMESH_W_PLL_BASE 0x7FFC4C7000ull
+#define MESH_W_PLL_MAX_OFFSET 0x5200
+#define MESH_W_PLL_SECTION 0x1000
+#define mmSRAM_W_PLL_BASE 0x7FFC4C8000ull
+#define SRAM_W_PLL_MAX_OFFSET 0x5200
+#define SRAM_W_PLL_SECTION 0x1000
+#define mmDMA_IF_W_N_DOWN_BASE 0x7FFC4C9000ull
+#define DMA_IF_W_N_DOWN_MAX_OFFSET 0x1500
+#define DMA_IF_W_N_DOWN_SECTION 0x7000
+#define mmSYNC_MNGR_GLBL_W_N_BASE 0x7FFC4D0000ull
+#define SYNC_MNGR_GLBL_W_N_MAX_OFFSET 0x6C00
+#define SYNC_MNGR_GLBL_W_N_SECTION 0x1000
+#define mmSYNC_MNGR_OBJS_W_N_BASE 0x7FFC4D1000ull
+#define SYNC_MNGR_OBJS_W_N_MAX_OFFSET 0x5C00
+#define SYNC_MNGR_OBJS_W_N_SECTION 0xF000
+#define mmDMA_IF_E_N_BASE 0x7FFC4E0000ull
+#define DMA_IF_E_N_MAX_OFFSET 0x8380
+#define DMA_IF_E_N_SECTION 0x1000
+#define mmDMA_IF_E_N_DOWN_CH0_BASE 0x7FFC4E1000ull
+#define DMA_IF_E_N_DOWN_CH0_MAX_OFFSET 0xCC00
+#define DMA_IF_E_N_DOWN_CH0_SECTION 0x1000
+#define mmDMA_IF_E_N_DOWN_CH1_BASE 0x7FFC4E2000ull
+#define DMA_IF_E_N_DOWN_CH1_MAX_OFFSET 0xCC00
+#define DMA_IF_E_N_DOWN_CH1_SECTION 0x5000
+#define mmMESH_E_PLL_BASE 0x7FFC4E7000ull
+#define MESH_E_PLL_MAX_OFFSET 0x5200
+#define MESH_E_PLL_SECTION 0x1000
+#define mmSRAM_E_PLL_BASE 0x7FFC4E8000ull
+#define SRAM_E_PLL_MAX_OFFSET 0x5200
+#define SRAM_E_PLL_SECTION 0x1000
+#define mmDMA_IF_E_N_DOWN_BASE 0x7FFC4E9000ull
+#define DMA_IF_E_N_DOWN_MAX_OFFSET 0x1500
+#define DMA_IF_E_N_DOWN_SECTION 0x7000
+#define mmSYNC_MNGR_GLBL_E_N_BASE 0x7FFC4F0000ull
+#define SYNC_MNGR_GLBL_E_N_MAX_OFFSET 0x6C00
+#define SYNC_MNGR_GLBL_E_N_SECTION 0x1000
+#define mmSYNC_MNGR_OBJS_E_N_BASE 0x7FFC4F1000ull
+#define SYNC_MNGR_OBJS_E_N_MAX_OFFSET 0x5C00
+#define SYNC_MNGR_OBJS_E_N_SECTION 0xF000
+#define mmDMA0_CORE_BASE 0x7FFC500000ull
+#define DMA0_CORE_MAX_OFFSET 0x23C0
+#define DMA0_CORE_SECTION 0x8000
+#define mmDMA0_QM_BASE 0x7FFC508000ull
+#define DMA0_QM_MAX_OFFSET 0xD040
+#define DMA0_QM_SECTION 0x18000
+#define mmDMA1_CORE_BASE 0x7FFC520000ull
+#define DMA1_CORE_MAX_OFFSET 0x23C0
+#define DMA1_CORE_SECTION 0x8000
+#define mmDMA1_QM_BASE 0x7FFC528000ull
+#define DMA1_QM_MAX_OFFSET 0xD040
+#define DMA1_QM_SECTION 0x18000
+#define mmDMA2_CORE_BASE 0x7FFC540000ull
+#define DMA2_CORE_MAX_OFFSET 0x23C0
+#define DMA2_CORE_SECTION 0x8000
+#define mmDMA2_QM_BASE 0x7FFC548000ull
+#define DMA2_QM_MAX_OFFSET 0xD040
+#define DMA2_QM_SECTION 0x18000
+#define mmDMA3_CORE_BASE 0x7FFC560000ull
+#define DMA3_CORE_MAX_OFFSET 0x23C0
+#define DMA3_CORE_SECTION 0x8000
+#define mmDMA3_QM_BASE 0x7FFC568000ull
+#define DMA3_QM_MAX_OFFSET 0xD040
+#define DMA3_QM_SECTION 0x18000
+#define mmDMA4_CORE_BASE 0x7FFC580000ull
+#define DMA4_CORE_MAX_OFFSET 0x23C0
+#define DMA4_CORE_SECTION 0x8000
+#define mmDMA4_QM_BASE 0x7FFC588000ull
+#define DMA4_QM_MAX_OFFSET 0xD040
+#define DMA4_QM_SECTION 0x18000
+#define mmDMA5_CORE_BASE 0x7FFC5A0000ull
+#define DMA5_CORE_MAX_OFFSET 0x23C0
+#define DMA5_CORE_SECTION 0x8000
+#define mmDMA5_QM_BASE 0x7FFC5A8000ull
+#define DMA5_QM_MAX_OFFSET 0xD040
+#define DMA5_QM_SECTION 0x18000
+#define mmDMA6_CORE_BASE 0x7FFC5C0000ull
+#define DMA6_CORE_MAX_OFFSET 0x23C0
+#define DMA6_CORE_SECTION 0x8000
+#define mmDMA6_QM_BASE 0x7FFC5C8000ull
+#define DMA6_QM_MAX_OFFSET 0xD040
+#define DMA6_QM_SECTION 0x18000
+#define mmDMA7_CORE_BASE 0x7FFC5E0000ull
+#define DMA7_CORE_MAX_OFFSET 0x23C0
+#define DMA7_CORE_SECTION 0x8000
+#define mmDMA7_QM_BASE 0x7FFC5E8000ull
+#define DMA7_QM_MAX_OFFSET 0xD040
+#define DMA7_QM_SECTION 0x18000
+#define mmHBM0_BASE 0x7FFC600000ull
+#define HBM0_MAX_OFFSET 0x8F58
+#define HBM0_SECTION 0x80000
+#define mmHBM1_BASE 0x7FFC680000ull
+#define HBM1_MAX_OFFSET 0x8F58
+#define HBM1_SECTION 0x80000
+#define mmHBM2_BASE 0x7FFC700000ull
+#define HBM2_MAX_OFFSET 0x8F58
+#define HBM2_SECTION 0x80000
+#define mmHBM3_BASE 0x7FFC780000ull
+#define HBM3_MAX_OFFSET 0x8F58
+#define HBM3_SECTION 0x80000
+#define mmGIC_BASE 0x7FFC800000ull
+#define GIC_MAX_OFFSET 0x10000
+#define GIC_SECTION 0x401000
+#define mmPCIE_WRAP_BASE 0x7FFCC01000ull
+#define PCIE_WRAP_MAX_OFFSET 0xDF00
+#define PCIE_WRAP_SECTION 0x1000
+#define mmPCIE_DBI_BASE 0x7FFCC02000ull
+#define PCIE_DBI_MAX_OFFSET 0xC040
+#define PCIE_DBI_SECTION 0x2000
+#define mmPCIE_CORE_BASE 0x7FFCC04000ull
+#define PCIE_CORE_MAX_OFFSET 0x9BC0
+#define PCIE_CORE_SECTION 0x3000
+#define mmPCIE_AUX_BASE 0x7FFCC07000ull
+#define PCIE_AUX_MAX_OFFSET 0x9C40
+#define PCIE_AUX_SECTION 0x9000
+#define mmPCIE_PHY_BASE 0x7FFCC10000ull
+#define PCIE_PHY_MAX_OFFSET 0x9640
+#define PCIE_PHY_SECTION 0x1000
+#define mmMMU_UP_BASE 0x7FFCC11000ull
+#define MMU_UP_MAX_OFFSET 0x7000
+#define MMU_UP_SECTION 0x1000
+#define mmSTLB_BASE 0x7FFCC12000ull
+#define STLB_MAX_OFFSET 0x8800
+#define STLB_SECTION 0x1000
+#define mmPCIE_MSI_BASE 0x7FFCC13000ull
+#define PCIE_MSI_MAX_OFFSET 0x8000
+#define PCIE_MSI_SECTION 0x2D000
+#define mmPSOC_I2C_M0_BASE 0x7FFCC40000ull
+#define PSOC_I2C_M0_MAX_OFFSET 0x1000
+#define PSOC_I2C_M0_SECTION 0x1000
+#define mmPSOC_I2C_M1_BASE 0x7FFCC41000ull
+#define PSOC_I2C_M1_MAX_OFFSET 0x1000
+#define PSOC_I2C_M1_SECTION 0x1000
+#define mmPSOC_I2C_S_BASE 0x7FFCC42000ull
+#define PSOC_I2C_S_MAX_OFFSET 0x1000
+#define PSOC_I2C_S_SECTION 0x1000
+#define mmPSOC_SPI_BASE 0x7FFCC43000ull
+#define PSOC_SPI_MAX_OFFSET 0x1000
+#define PSOC_SPI_SECTION 0x2000
+#define mmPSOC_UART_0_BASE 0x7FFCC45000ull
+#define PSOC_UART_0_MAX_OFFSET 0x1000
+#define PSOC_UART_0_SECTION 0x1000
+#define mmPSOC_UART_1_BASE 0x7FFCC46000ull
+#define PSOC_UART_1_MAX_OFFSET 0x1000
+#define PSOC_UART_1_SECTION 0x1000
+#define mmPSOC_TIMER_BASE 0x7FFCC47000ull
+#define PSOC_TIMER_MAX_OFFSET 0x1000
+#define PSOC_TIMER_SECTION 0x1000
+#define mmPSOC_WDOG_BASE 0x7FFCC48000ull
+#define PSOC_WDOG_MAX_OFFSET 0x1000
+#define PSOC_WDOG_SECTION 0x1000
+#define mmPSOC_TIMESTAMP_BASE 0x7FFCC49000ull
+#define PSOC_TIMESTAMP_MAX_OFFSET 0x1000
+#define PSOC_TIMESTAMP_SECTION 0x1000
+#define mmPSOC_EFUSE_BASE 0x7FFCC4A000ull
+#define PSOC_EFUSE_MAX_OFFSET 0x3040
+#define PSOC_EFUSE_SECTION 0x1000
+#define mmPSOC_GLOBAL_CONF_BASE 0x7FFCC4B000ull
+#define PSOC_GLOBAL_CONF_MAX_OFFSET 0xCD80
+#define PSOC_GLOBAL_CONF_SECTION 0x1000
+#define mmPSOC_GPIO0_BASE 0x7FFCC4C000ull
+#define PSOC_GPIO0_MAX_OFFSET 0x1000
+#define PSOC_GPIO0_SECTION 0x1000
+#define mmPSOC_GPIO1_BASE 0x7FFCC4D000ull
+#define PSOC_GPIO1_MAX_OFFSET 0x1000
+#define PSOC_GPIO1_SECTION 0x1000
+#define mmPSOC_BTL_BASE 0x7FFCC4E000ull
+#define PSOC_BTL_MAX_OFFSET 0x1480
+#define PSOC_BTL_SECTION 0x1000
+#define mmPSOC_CS_TRACE_BASE 0x7FFCC4F000ull
+#define PSOC_CS_TRACE_MAX_OFFSET 0x1680
+#define PSOC_CS_TRACE_SECTION 0x1000
+#define mmPSOC_GPIO2_BASE 0x7FFCC50000ull
+#define PSOC_GPIO2_MAX_OFFSET 0x1000
+#define PSOC_GPIO2_SECTION 0x1000
+#define mmPSOC_GPIO3_BASE 0x7FFCC51000ull
+#define PSOC_GPIO3_MAX_OFFSET 0x1000
+#define PSOC_GPIO3_SECTION 0x1000
+#define mmPSOC_GPIO4_BASE 0x7FFCC52000ull
+#define PSOC_GPIO4_MAX_OFFSET 0x1000
+#define PSOC_GPIO4_SECTION 0x1000
+#define mmPSOC_DFT_EFUSE_BASE 0x7FFCC53000ull
+#define PSOC_DFT_EFUSE_MAX_OFFSET 0x3040
+#define PSOC_DFT_EFUSE_SECTION 0x1000
+#define mmPSOC_RPM_0_BASE 0x7FFCC54000ull
+#define PSOC_RPM_0_MAX_OFFSET 0x8800
+#define PSOC_RPM_0_SECTION 0x1000
+#define mmPSOC_RPM_1_BASE 0x7FFCC55000ull
+#define PSOC_RPM_1_MAX_OFFSET 0x8800
+#define PSOC_RPM_1_SECTION 0x1000
+#define mmPSOC_RPM_2_BASE 0x7FFCC56000ull
+#define PSOC_RPM_2_MAX_OFFSET 0x8800
+#define PSOC_RPM_2_SECTION 0x1000
+#define mmPSOC_RPM_3_BASE 0x7FFCC57000ull
+#define PSOC_RPM_3_MAX_OFFSET 0x8800
+#define PSOC_RPM_3_SECTION 0x19000
+#define mmPSOC_CPU_PLL_BASE 0x7FFCC70000ull
+#define PSOC_CPU_PLL_MAX_OFFSET 0x5200
+#define PSOC_CPU_PLL_SECTION 0x1000
+#define mmPSOC_MME_PLL_BASE 0x7FFCC71000ull
+#define PSOC_MME_PLL_MAX_OFFSET 0x5200
+#define PSOC_MME_PLL_SECTION 0x1000
+#define mmPSOC_PCI_PLL_BASE 0x7FFCC72000ull
+#define PSOC_PCI_PLL_MAX_OFFSET 0x5200
+#define PSOC_PCI_PLL_SECTION 0x1000
+#define mmPSOC_TPC_PLL_BASE 0x7FFCC73000ull
+#define PSOC_TPC_PLL_MAX_OFFSET 0x5200
+#define PSOC_TPC_PLL_SECTION 0x1000
+#define mmPSOC_HBM_PLL_BASE 0x7FFCC74000ull
+#define PSOC_HBM_PLL_MAX_OFFSET 0x5200
+#define PSOC_HBM_PLL_SECTION 0x1000
+#define mmPSOC_PM_BASE 0x7FFCC75000ull
+#define PSOC_PM_MAX_OFFSET 0x1F00
+#define PSOC_PM_SECTION 0x1000
+#define mmPSOC_TS_BASE 0x7FFCC76000ull
+#define PSOC_TS_MAX_OFFSET 0xE640
+#define PSOC_TS_SECTION 0x2000
+#define mmPSOC_PWM0_BASE 0x7FFCC78000ull
+#define PSOC_PWM0_MAX_OFFSET 0x5800
+#define PSOC_PWM0_SECTION 0x1000
+#define mmPSOC_PWM1_BASE 0x7FFCC79000ull
+#define PSOC_PWM1_MAX_OFFSET 0x5800
+#define PSOC_PWM1_SECTION 0x1000
+#define mmPSOC_PWM2_BASE 0x7FFCC7A000ull
+#define PSOC_PWM2_MAX_OFFSET 0x5800
+#define PSOC_PWM2_SECTION 0x1000
+#define mmPSOC_PWM3_BASE 0x7FFCC7B000ull
+#define PSOC_PWM3_MAX_OFFSET 0x5800
+#define PSOC_PWM3_SECTION 0x1000
+#define mmPSOC_GPIO5_BASE 0x7FFCC7C000ull
+#define PSOC_GPIO5_MAX_OFFSET 0x1000
+#define PSOC_GPIO5_SECTION 0x1000
+#define mmPSOC_GPIO6_BASE 0x7FFCC7D000ull
+#define PSOC_GPIO6_MAX_OFFSET 0x1000
+#define PSOC_GPIO6_SECTION 0x3000
+#define mmPCIE_PMA_0_BASE 0x7FFCC80000ull
+#define PCIE_PMA_0_MAX_OFFSET 0x10003
+#define PCIE_PMA_0_SECTION 0x10000
+#define mmPCIE_PMA_1_BASE 0x7FFCC90000ull
+#define PCIE_PMA_1_MAX_OFFSET 0x10003
+#define PCIE_PMA_1_SECTION 0x10000
+#define mmPCIE_PMA_2_BASE 0x7FFCCA0000ull
+#define PCIE_PMA_2_MAX_OFFSET 0x10003
+#define PCIE_PMA_2_SECTION 0x10000
+#define mmPCIE_PMA_3_BASE 0x7FFCCB0000ull
+#define PCIE_PMA_3_MAX_OFFSET 0x10003
+#define PCIE_PMA_3_SECTION 0x10000
+#define mmNIC0_MAC_CH0_BASE 0x7FFCCC0000ull
+#define NIC0_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC0_MAC_CH0_SECTION 0x1000
+#define mmNIC0_MAC_CH1_BASE 0x7FFCCC1000ull
+#define NIC0_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC0_MAC_CH1_SECTION 0x1000
+#define mmNIC0_MAC_CH2_BASE 0x7FFCCC2000ull
+#define NIC0_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC0_MAC_CH2_SECTION 0x1000
+#define mmNIC0_MAC_CH3_BASE 0x7FFCCC3000ull
+#define NIC0_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC0_MAC_CH3_SECTION 0x1000
+#define mmNIC0_STAT_BASE 0x7FFCCC4000ull
+#define NIC0_STAT_MAX_OFFSET 0x4D00
+#define NIC0_STAT_SECTION 0x1000
+#define mmNIC0_MAC_XPCS91_BASE 0x7FFCCC5000ull
+#define NIC0_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC0_MAC_XPCS91_SECTION 0x3000
+#define mmNIC0_MAC_CORE_BASE 0x7FFCCC8000ull
+#define NIC0_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC0_MAC_CORE_SECTION 0x1000
+#define mmNIC0_MAC_AUX_BASE 0x7FFCCC9000ull
+#define NIC0_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC0_MAC_AUX_SECTION 0xF000
+#define mmNIC0_PHY_BASE 0x7FFCCD8000ull
+#define NIC0_PHY_MAX_OFFSET 0x3400
+#define NIC0_PHY_SECTION 0x8000
+#define mmNIC0_QM0_BASE 0x7FFCCE0000ull
+#define NIC0_QM0_MAX_OFFSET 0xD040
+#define NIC0_QM0_SECTION 0x2000
+#define mmNIC0_QM1_BASE 0x7FFCCE2000ull
+#define NIC0_QM1_MAX_OFFSET 0xD040
+#define NIC0_QM1_SECTION 0x2000
+#define mmNIC0_QPC0_BASE 0x7FFCCE4000ull
+#define NIC0_QPC0_MAX_OFFSET 0x7140
+#define NIC0_QPC0_SECTION 0x1000
+#define mmNIC0_QPC1_BASE 0x7FFCCE5000ull
+#define NIC0_QPC1_MAX_OFFSET 0x7140
+#define NIC0_QPC1_SECTION 0x3000
+#define mmNIC0_RXB_BASE 0x7FFCCE8000ull
+#define NIC0_RXB_MAX_OFFSET 0x6040
+#define NIC0_RXB_SECTION 0x1000
+#define mmNIC0_RXE0_BASE 0x7FFCCE9000ull
+#define NIC0_RXE0_MAX_OFFSET 0x2FC0
+#define NIC0_RXE0_SECTION 0x1000
+#define mmNIC0_RXE1_BASE 0x7FFCCEA000ull
+#define NIC0_RXE1_MAX_OFFSET 0x2FC0
+#define NIC0_RXE1_SECTION 0x1000
+#define mmNIC0_RX_GW_BASE 0x7FFCCEB000ull
+#define NIC0_RX_GW_MAX_OFFSET 0x4540
+#define NIC0_RX_GW_SECTION 0x5000
+#define mmNIC0_TXS0_BASE 0x7FFCCF0000ull
+#define NIC0_TXS0_MAX_OFFSET 0x19C0
+#define NIC0_TXS0_SECTION 0x1000
+#define mmNIC0_TXS1_BASE 0x7FFCCF1000ull
+#define NIC0_TXS1_MAX_OFFSET 0x19C0
+#define NIC0_TXS1_SECTION 0x1000
+#define mmNIC0_TXE0_BASE 0x7FFCCF2000ull
+#define NIC0_TXE0_MAX_OFFSET 0x2040
+#define NIC0_TXE0_SECTION 0x1000
+#define mmNIC0_TXE1_BASE 0x7FFCCF3000ull
+#define NIC0_TXE1_MAX_OFFSET 0x2040
+#define NIC0_TXE1_SECTION 0x1000
+#define mmNIC0_TXB_BASE 0x7FFCCF4000ull
+#define NIC0_TXB_MAX_OFFSET 0xD400
+#define NIC0_TXB_SECTION 0x1000
+#define mmNIC0_TMR_BASE 0x7FFCCF5000ull
+#define NIC0_TMR_MAX_OFFSET 0x1600
+#define NIC0_TMR_SECTION 0x1000
+#define mmNIC0_TX_GW_BASE 0x7FFCCF6000ull
+#define NIC0_TX_GW_MAX_OFFSET 0x1400
+#define NIC0_TX_GW_SECTION 0x2000
+#define mmNIC0_TS_BASE 0x7FFCCF8000ull
+#define NIC0_TS_MAX_OFFSET 0xE640
+#define NIC0_TS_SECTION 0x1000
+#define mmNIC0_PLL_BASE 0x7FFCCF9000ull
+#define NIC0_PLL_MAX_OFFSET 0x5200
+#define NIC0_PLL_SECTION 0x1000
+#define mmNIC0_PM_BASE 0x7FFCCFA000ull
+#define NIC0_PM_MAX_OFFSET 0x1F00
+#define NIC0_PM_SECTION 0x6000
+#define mmNIC1_MAC_CH0_BASE 0x7FFCD00000ull
+#define NIC1_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC1_MAC_CH0_SECTION 0x1000
+#define mmNIC1_MAC_CH1_BASE 0x7FFCD01000ull
+#define NIC1_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC1_MAC_CH1_SECTION 0x1000
+#define mmNIC1_MAC_CH2_BASE 0x7FFCD02000ull
+#define NIC1_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC1_MAC_CH2_SECTION 0x1000
+#define mmNIC1_MAC_CH3_BASE 0x7FFCD03000ull
+#define NIC1_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC1_MAC_CH3_SECTION 0x1000
+#define mmNIC1_STAT_BASE 0x7FFCD04000ull
+#define NIC1_STAT_MAX_OFFSET 0x4D00
+#define NIC1_STAT_SECTION 0x1000
+#define mmNIC1_MAC_XPCS91_BASE 0x7FFCD05000ull
+#define NIC1_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC1_MAC_XPCS91_SECTION 0x3000
+#define mmNIC1_MAC_CORE_BASE 0x7FFCD08000ull
+#define NIC1_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC1_MAC_CORE_SECTION 0x1000
+#define mmNIC1_MAC_AUX_BASE 0x7FFCD09000ull
+#define NIC1_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC1_MAC_AUX_SECTION 0xF000
+#define mmNIC1_PHY_BASE 0x7FFCD18000ull
+#define NIC1_PHY_MAX_OFFSET 0x3400
+#define NIC1_PHY_SECTION 0x8000
+#define mmNIC1_QM0_BASE 0x7FFCD20000ull
+#define NIC1_QM0_MAX_OFFSET 0xD040
+#define NIC1_QM0_SECTION 0x2000
+#define mmNIC1_QM1_BASE 0x7FFCD22000ull
+#define NIC1_QM1_MAX_OFFSET 0xD040
+#define NIC1_QM1_SECTION 0x2000
+#define mmNIC1_QPC0_BASE 0x7FFCD24000ull
+#define NIC1_QPC0_MAX_OFFSET 0x7140
+#define NIC1_QPC0_SECTION 0x1000
+#define mmNIC1_QPC1_BASE 0x7FFCD25000ull
+#define NIC1_QPC1_MAX_OFFSET 0x7140
+#define NIC1_QPC1_SECTION 0x3000
+#define mmNIC1_RXB_BASE 0x7FFCD28000ull
+#define NIC1_RXB_MAX_OFFSET 0x6040
+#define NIC1_RXB_SECTION 0x1000
+#define mmNIC1_RXE0_BASE 0x7FFCD29000ull
+#define NIC1_RXE0_MAX_OFFSET 0x2FC0
+#define NIC1_RXE0_SECTION 0x1000
+#define mmNIC1_RXE1_BASE 0x7FFCD2A000ull
+#define NIC1_RXE1_MAX_OFFSET 0x2FC0
+#define NIC1_RXE1_SECTION 0x1000
+#define mmNIC1_RX_GW_BASE 0x7FFCD2B000ull
+#define NIC1_RX_GW_MAX_OFFSET 0x4540
+#define NIC1_RX_GW_SECTION 0x5000
+#define mmNIC1_TXS0_BASE 0x7FFCD30000ull
+#define NIC1_TXS0_MAX_OFFSET 0x19C0
+#define NIC1_TXS0_SECTION 0x1000
+#define mmNIC1_TXS1_BASE 0x7FFCD31000ull
+#define NIC1_TXS1_MAX_OFFSET 0x19C0
+#define NIC1_TXS1_SECTION 0x1000
+#define mmNIC1_TXE0_BASE 0x7FFCD32000ull
+#define NIC1_TXE0_MAX_OFFSET 0x2040
+#define NIC1_TXE0_SECTION 0x1000
+#define mmNIC1_TXE1_BASE 0x7FFCD33000ull
+#define NIC1_TXE1_MAX_OFFSET 0x2040
+#define NIC1_TXE1_SECTION 0x1000
+#define mmNIC1_TXB_BASE 0x7FFCD34000ull
+#define NIC1_TXB_MAX_OFFSET 0xD400
+#define NIC1_TXB_SECTION 0x1000
+#define mmNIC1_TMR_BASE 0x7FFCD35000ull
+#define NIC1_TMR_MAX_OFFSET 0x1600
+#define NIC1_TMR_SECTION 0x1000
+#define mmNIC1_TX_GW_BASE 0x7FFCD36000ull
+#define NIC1_TX_GW_MAX_OFFSET 0x1400
+#define NIC1_TX_GW_SECTION 0x2000
+#define mmNIC1_TS_BASE 0x7FFCD38000ull
+#define NIC1_TS_MAX_OFFSET 0xE640
+#define NIC1_TS_SECTION 0x1000
+#define mmNIC1_PLL_BASE 0x7FFCD39000ull
+#define NIC1_PLL_MAX_OFFSET 0x5200
+#define NIC1_PLL_SECTION 0x1000
+#define mmNIC1_PM_BASE 0x7FFCD3A000ull
+#define NIC1_PM_MAX_OFFSET 0x1F00
+#define NIC1_PM_SECTION 0x6000
+#define mmNIC2_MAC_CH0_BASE 0x7FFCD40000ull
+#define NIC2_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC2_MAC_CH0_SECTION 0x1000
+#define mmNIC2_MAC_CH1_BASE 0x7FFCD41000ull
+#define NIC2_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC2_MAC_CH1_SECTION 0x1000
+#define mmNIC2_MAC_CH2_BASE 0x7FFCD42000ull
+#define NIC2_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC2_MAC_CH2_SECTION 0x1000
+#define mmNIC2_MAC_CH3_BASE 0x7FFCD43000ull
+#define NIC2_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC2_MAC_CH3_SECTION 0x1000
+#define mmNIC2_STAT_BASE 0x7FFCD44000ull
+#define NIC2_STAT_MAX_OFFSET 0x4D00
+#define NIC2_STAT_SECTION 0x1000
+#define mmNIC2_MAC_XPCS91_BASE 0x7FFCD45000ull
+#define NIC2_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC2_MAC_XPCS91_SECTION 0x3000
+#define mmNIC2_MAC_CORE_BASE 0x7FFCD48000ull
+#define NIC2_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC2_MAC_CORE_SECTION 0x1000
+#define mmNIC2_MAC_AUX_BASE 0x7FFCD49000ull
+#define NIC2_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC2_MAC_AUX_SECTION 0xF000
+#define mmNIC2_PHY_BASE 0x7FFCD58000ull
+#define NIC2_PHY_MAX_OFFSET 0x3400
+#define NIC2_PHY_SECTION 0x8000
+#define mmNIC2_QM0_BASE 0x7FFCD60000ull
+#define NIC2_QM0_MAX_OFFSET 0xD040
+#define NIC2_QM0_SECTION 0x2000
+#define mmNIC2_QM1_BASE 0x7FFCD62000ull
+#define NIC2_QM1_MAX_OFFSET 0xD040
+#define NIC2_QM1_SECTION 0x2000
+#define mmNIC2_QPC0_BASE 0x7FFCD64000ull
+#define NIC2_QPC0_MAX_OFFSET 0x7140
+#define NIC2_QPC0_SECTION 0x1000
+#define mmNIC2_QPC1_BASE 0x7FFCD65000ull
+#define NIC2_QPC1_MAX_OFFSET 0x7140
+#define NIC2_QPC1_SECTION 0x3000
+#define mmNIC2_RXB_BASE 0x7FFCD68000ull
+#define NIC2_RXB_MAX_OFFSET 0x6040
+#define NIC2_RXB_SECTION 0x1000
+#define mmNIC2_RXE0_BASE 0x7FFCD69000ull
+#define NIC2_RXE0_MAX_OFFSET 0x2FC0
+#define NIC2_RXE0_SECTION 0x1000
+#define mmNIC2_RXE1_BASE 0x7FFCD6A000ull
+#define NIC2_RXE1_MAX_OFFSET 0x2FC0
+#define NIC2_RXE1_SECTION 0x1000
+#define mmNIC2_RX_GW_BASE 0x7FFCD6B000ull
+#define NIC2_RX_GW_MAX_OFFSET 0x4540
+#define NIC2_RX_GW_SECTION 0x5000
+#define mmNIC2_TXS0_BASE 0x7FFCD70000ull
+#define NIC2_TXS0_MAX_OFFSET 0x19C0
+#define NIC2_TXS0_SECTION 0x1000
+#define mmNIC2_TXS1_BASE 0x7FFCD71000ull
+#define NIC2_TXS1_MAX_OFFSET 0x19C0
+#define NIC2_TXS1_SECTION 0x1000
+#define mmNIC2_TXE0_BASE 0x7FFCD72000ull
+#define NIC2_TXE0_MAX_OFFSET 0x2040
+#define NIC2_TXE0_SECTION 0x1000
+#define mmNIC2_TXE1_BASE 0x7FFCD73000ull
+#define NIC2_TXE1_MAX_OFFSET 0x2040
+#define NIC2_TXE1_SECTION 0x1000
+#define mmNIC2_TXB_BASE 0x7FFCD74000ull
+#define NIC2_TXB_MAX_OFFSET 0xD400
+#define NIC2_TXB_SECTION 0x1000
+#define mmNIC2_TMR_BASE 0x7FFCD75000ull
+#define NIC2_TMR_MAX_OFFSET 0x1600
+#define NIC2_TMR_SECTION 0x1000
+#define mmNIC2_TX_GW_BASE 0x7FFCD76000ull
+#define NIC2_TX_GW_MAX_OFFSET 0x1400
+#define NIC2_TX_GW_SECTION 0x2000
+#define mmNIC2_HBM_PLL_BASE 0x7FFCD78000ull
+#define NIC2_HBM_PLL_MAX_OFFSET 0x5200
+#define NIC2_HBM_PLL_SECTION 0x1000
+#define mmNIC2_MME_PLL_BASE 0x7FFCD79000ull
+#define NIC2_MME_PLL_MAX_OFFSET 0x5200
+#define NIC2_MME_PLL_SECTION 0x1000
+#define mmNIC2_TPC_PLL_BASE 0x7FFCD7A000ull
+#define NIC2_TPC_PLL_MAX_OFFSET 0x5200
+#define NIC2_TPC_PLL_SECTION 0x6000
+#define mmNIC3_MAC_CH0_BASE 0x7FFCD80000ull
+#define NIC3_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC3_MAC_CH0_SECTION 0x1000
+#define mmNIC3_MAC_CH1_BASE 0x7FFCD81000ull
+#define NIC3_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC3_MAC_CH1_SECTION 0x1000
+#define mmNIC3_MAC_CH2_BASE 0x7FFCD82000ull
+#define NIC3_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC3_MAC_CH2_SECTION 0x1000
+#define mmNIC3_MAC_CH3_BASE 0x7FFCD83000ull
+#define NIC3_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC3_MAC_CH3_SECTION 0x1000
+#define mmNIC3_STAT_BASE 0x7FFCD84000ull
+#define NIC3_STAT_MAX_OFFSET 0x4D00
+#define NIC3_STAT_SECTION 0x1000
+#define mmNIC3_MAC_XPCS91_BASE 0x7FFCD85000ull
+#define NIC3_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC3_MAC_XPCS91_SECTION 0x3000
+#define mmNIC3_MAC_CORE_BASE 0x7FFCD88000ull
+#define NIC3_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC3_MAC_CORE_SECTION 0x1000
+#define mmNIC3_MAC_AUX_BASE 0x7FFCD89000ull
+#define NIC3_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC3_MAC_AUX_SECTION 0xF000
+#define mmNIC3_PHY_BASE 0x7FFCD98000ull
+#define NIC3_PHY_MAX_OFFSET 0x3400
+#define NIC3_PHY_SECTION 0x8000
+#define mmNIC3_QM0_BASE 0x7FFCDA0000ull
+#define NIC3_QM0_MAX_OFFSET 0xD040
+#define NIC3_QM0_SECTION 0x2000
+#define mmNIC3_QM1_BASE 0x7FFCDA2000ull
+#define NIC3_QM1_MAX_OFFSET 0xD040
+#define NIC3_QM1_SECTION 0x2000
+#define mmNIC3_QPC0_BASE 0x7FFCDA4000ull
+#define NIC3_QPC0_MAX_OFFSET 0x7140
+#define NIC3_QPC0_SECTION 0x1000
+#define mmNIC3_QPC1_BASE 0x7FFCDA5000ull
+#define NIC3_QPC1_MAX_OFFSET 0x7140
+#define NIC3_QPC1_SECTION 0x3000
+#define mmNIC3_RXB_BASE 0x7FFCDA8000ull
+#define NIC3_RXB_MAX_OFFSET 0x6040
+#define NIC3_RXB_SECTION 0x1000
+#define mmNIC3_RXE0_BASE 0x7FFCDA9000ull
+#define NIC3_RXE0_MAX_OFFSET 0x2FC0
+#define NIC3_RXE0_SECTION 0x1000
+#define mmNIC3_RXE1_BASE 0x7FFCDAA000ull
+#define NIC3_RXE1_MAX_OFFSET 0x2FC0
+#define NIC3_RXE1_SECTION 0x1000
+#define mmNIC3_RX_GW_BASE 0x7FFCDAB000ull
+#define NIC3_RX_GW_MAX_OFFSET 0x4540
+#define NIC3_RX_GW_SECTION 0x5000
+#define mmNIC3_TXS0_BASE 0x7FFCDB0000ull
+#define NIC3_TXS0_MAX_OFFSET 0x19C0
+#define NIC3_TXS0_SECTION 0x1000
+#define mmNIC3_TXS1_BASE 0x7FFCDB1000ull
+#define NIC3_TXS1_MAX_OFFSET 0x19C0
+#define NIC3_TXS1_SECTION 0x1000
+#define mmNIC3_TXE0_BASE 0x7FFCDB2000ull
+#define NIC3_TXE0_MAX_OFFSET 0x2040
+#define NIC3_TXE0_SECTION 0x1000
+#define mmNIC3_TXE1_BASE 0x7FFCDB3000ull
+#define NIC3_TXE1_MAX_OFFSET 0x2040
+#define NIC3_TXE1_SECTION 0x1000
+#define mmNIC3_TXB_BASE 0x7FFCDB4000ull
+#define NIC3_TXB_MAX_OFFSET 0xD400
+#define NIC3_TXB_SECTION 0x1000
+#define mmNIC3_TMR_BASE 0x7FFCDB5000ull
+#define NIC3_TMR_MAX_OFFSET 0x1600
+#define NIC3_TMR_SECTION 0x1000
+#define mmNIC3_TX_GW_BASE 0x7FFCDB6000ull
+#define NIC3_TX_GW_MAX_OFFSET 0x1400
+#define NIC3_TX_GW_SECTION 0x2000
+#define mmNIC3_TS_BASE 0x7FFCDB8000ull
+#define NIC3_TS_MAX_OFFSET 0xE640
+#define NIC3_TS_SECTION 0x2000
+#define mmNIC3_PM_BASE 0x7FFCDBA000ull
+#define NIC3_PM_MAX_OFFSET 0x1F00
+#define NIC3_PM_SECTION 0x6000
+#define mmNIC4_MAC_CH0_BASE 0x7FFCDC0000ull
+#define NIC4_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC4_MAC_CH0_SECTION 0x1000
+#define mmNIC4_MAC_CH1_BASE 0x7FFCDC1000ull
+#define NIC4_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC4_MAC_CH1_SECTION 0x1000
+#define mmNIC4_MAC_CH2_BASE 0x7FFCDC2000ull
+#define NIC4_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC4_MAC_CH2_SECTION 0x1000
+#define mmNIC4_MAC_CH3_BASE 0x7FFCDC3000ull
+#define NIC4_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC4_MAC_CH3_SECTION 0x1000
+#define mmNIC4_STAT_BASE 0x7FFCDC4000ull
+#define NIC4_STAT_MAX_OFFSET 0x4D00
+#define NIC4_STAT_SECTION 0x1000
+#define mmNIC4_MAC_XPCS91_BASE 0x7FFCDC5000ull
+#define NIC4_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC4_MAC_XPCS91_SECTION 0x3000
+#define mmNIC4_MAC_CORE_BASE 0x7FFCDC8000ull
+#define NIC4_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC4_MAC_CORE_SECTION 0x1000
+#define mmNIC4_MAC_AUX_BASE 0x7FFCDC9000ull
+#define NIC4_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC4_MAC_AUX_SECTION 0xF000
+#define mmNIC4_PHY_BASE 0x7FFCDD8000ull
+#define NIC4_PHY_MAX_OFFSET 0x3400
+#define NIC4_PHY_SECTION 0x8000
+#define mmNIC4_QM0_BASE 0x7FFCDE0000ull
+#define NIC4_QM0_MAX_OFFSET 0xD040
+#define NIC4_QM0_SECTION 0x2000
+#define mmNIC4_QM1_BASE 0x7FFCDE2000ull
+#define NIC4_QM1_MAX_OFFSET 0xD040
+#define NIC4_QM1_SECTION 0x2000
+#define mmNIC4_QPC0_BASE 0x7FFCDE4000ull
+#define NIC4_QPC0_MAX_OFFSET 0x7140
+#define NIC4_QPC0_SECTION 0x1000
+#define mmNIC4_QPC1_BASE 0x7FFCDE5000ull
+#define NIC4_QPC1_MAX_OFFSET 0x7140
+#define NIC4_QPC1_SECTION 0x3000
+#define mmNIC4_RXB_BASE 0x7FFCDE8000ull
+#define NIC4_RXB_MAX_OFFSET 0x6040
+#define NIC4_RXB_SECTION 0x1000
+#define mmNIC4_RXE0_BASE 0x7FFCDE9000ull
+#define NIC4_RXE0_MAX_OFFSET 0x2FC0
+#define NIC4_RXE0_SECTION 0x1000
+#define mmNIC4_RXE1_BASE 0x7FFCDEA000ull
+#define NIC4_RXE1_MAX_OFFSET 0x2FC0
+#define NIC4_RXE1_SECTION 0x1000
+#define mmNIC4_RX_GW_BASE 0x7FFCDEB000ull
+#define NIC4_RX_GW_MAX_OFFSET 0x4540
+#define NIC4_RX_GW_SECTION 0x5000
+#define mmNIC4_TXS0_BASE 0x7FFCDF0000ull
+#define NIC4_TXS0_MAX_OFFSET 0x19C0
+#define NIC4_TXS0_SECTION 0x1000
+#define mmNIC4_TXS1_BASE 0x7FFCDF1000ull
+#define NIC4_TXS1_MAX_OFFSET 0x19C0
+#define NIC4_TXS1_SECTION 0x1000
+#define mmNIC4_TXE0_BASE 0x7FFCDF2000ull
+#define NIC4_TXE0_MAX_OFFSET 0x2040
+#define NIC4_TXE0_SECTION 0x1000
+#define mmNIC4_TXE1_BASE 0x7FFCDF3000ull
+#define NIC4_TXE1_MAX_OFFSET 0x2040
+#define NIC4_TXE1_SECTION 0x1000
+#define mmNIC4_TXB_BASE 0x7FFCDF4000ull
+#define NIC4_TXB_MAX_OFFSET 0xD400
+#define NIC4_TXB_SECTION 0x1000
+#define mmNIC4_TMR_BASE 0x7FFCDF5000ull
+#define NIC4_TMR_MAX_OFFSET 0x1600
+#define NIC4_TMR_SECTION 0x1000
+#define mmNIC4_TX_GW_BASE 0x7FFCDF6000ull
+#define NIC4_TX_GW_MAX_OFFSET 0x1400
+#define NIC4_TX_GW_SECTION 0x10000
+#define mmTPC0_CFG_BASE 0x7FFCE06000ull
+#define TPC0_CFG_MAX_OFFSET 0xE400
+#define TPC0_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC0_CFG_BASE 0x7FFCE06400ull
+#define KERNEL_TENSOR_0_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC0_CFG_BASE 0x7FFCE06438ull
+#define KERNEL_TENSOR_1_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC0_CFG_BASE 0x7FFCE06470ull
+#define KERNEL_TENSOR_2_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC0_CFG_BASE 0x7FFCE064A8ull
+#define KERNEL_TENSOR_3_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC0_CFG_BASE 0x7FFCE064E0ull
+#define KERNEL_TENSOR_4_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC0_CFG_BASE 0x7FFCE06518ull
+#define KERNEL_TENSOR_5_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC0_CFG_BASE 0x7FFCE06550ull
+#define KERNEL_TENSOR_6_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC0_CFG_BASE 0x7FFCE06588ull
+#define KERNEL_TENSOR_7_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC0_CFG_BASE 0x7FFCE065C0ull
+#define KERNEL_TENSOR_8_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC0_CFG_BASE 0x7FFCE065F8ull
+#define KERNEL_TENSOR_9_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC0_CFG_BASE 0x7FFCE06630ull
+#define KERNEL_TENSOR_10_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC0_CFG_BASE 0x7FFCE06668ull
+#define KERNEL_TENSOR_11_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC0_CFG_BASE 0x7FFCE066A0ull
+#define KERNEL_TENSOR_12_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC0_CFG_BASE 0x7FFCE066D8ull
+#define KERNEL_TENSOR_13_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC0_CFG_BASE 0x7FFCE06710ull
+#define KERNEL_TENSOR_14_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC0_CFG_BASE 0x7FFCE06748ull
+#define KERNEL_TENSOR_15_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC0_CFG_BASE 0x7FFCE06780ull
+#define KERNEL_SYNC_OBJECT_TPC0_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC0_CFG_SECTION 0x8000
+#define mmKERNEL_TPC0_CFG_BASE 0x7FFCE06788ull
+#define KERNEL_TPC0_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC0_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC0_CFG_BASE 0x7FFCE06A00ull
+#define QM_TENSOR_0_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC0_CFG_BASE 0x7FFCE06A38ull
+#define QM_TENSOR_1_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC0_CFG_BASE 0x7FFCE06A70ull
+#define QM_TENSOR_2_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC0_CFG_BASE 0x7FFCE06AA8ull
+#define QM_TENSOR_3_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC0_CFG_BASE 0x7FFCE06AE0ull
+#define QM_TENSOR_4_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC0_CFG_BASE 0x7FFCE06B18ull
+#define QM_TENSOR_5_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC0_CFG_BASE 0x7FFCE06B50ull
+#define QM_TENSOR_6_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC0_CFG_BASE 0x7FFCE06B88ull
+#define QM_TENSOR_7_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC0_CFG_BASE 0x7FFCE06BC0ull
+#define QM_TENSOR_8_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC0_CFG_BASE 0x7FFCE06BF8ull
+#define QM_TENSOR_9_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC0_CFG_BASE 0x7FFCE06C30ull
+#define QM_TENSOR_10_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC0_CFG_BASE 0x7FFCE06C68ull
+#define QM_TENSOR_11_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC0_CFG_BASE 0x7FFCE06CA0ull
+#define QM_TENSOR_12_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC0_CFG_BASE 0x7FFCE06CD8ull
+#define QM_TENSOR_13_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC0_CFG_BASE 0x7FFCE06D10ull
+#define QM_TENSOR_14_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC0_CFG_BASE 0x7FFCE06D48ull
+#define QM_TENSOR_15_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC0_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC0_CFG_BASE 0x7FFCE06D80ull
+#define QM_SYNC_OBJECT_TPC0_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC0_CFG_SECTION 0x8000
+#define mmQM_TPC0_CFG_BASE 0x7FFCE06D88ull
+#define QM_TPC0_CFG_MAX_OFFSET 0xB800
+#define QM_TPC0_CFG_SECTION 0x2780
+#define mmTPC0_E2E_CRED_BASE 0x7FFCE07000ull
+#define TPC0_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC0_E2E_CRED_SECTION 0x1000
+#define mmTPC0_QM_BASE 0x7FFCE08000ull
+#define TPC0_QM_MAX_OFFSET 0xD040
+#define TPC0_QM_SECTION 0x3E000
+#define mmTPC1_CFG_BASE 0x7FFCE46000ull
+#define TPC1_CFG_MAX_OFFSET 0xE400
+#define TPC1_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC1_CFG_BASE 0x7FFCE46400ull
+#define KERNEL_TENSOR_0_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC1_CFG_BASE 0x7FFCE46438ull
+#define KERNEL_TENSOR_1_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC1_CFG_BASE 0x7FFCE46470ull
+#define KERNEL_TENSOR_2_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC1_CFG_BASE 0x7FFCE464A8ull
+#define KERNEL_TENSOR_3_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC1_CFG_BASE 0x7FFCE464E0ull
+#define KERNEL_TENSOR_4_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC1_CFG_BASE 0x7FFCE46518ull
+#define KERNEL_TENSOR_5_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC1_CFG_BASE 0x7FFCE46550ull
+#define KERNEL_TENSOR_6_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC1_CFG_BASE 0x7FFCE46588ull
+#define KERNEL_TENSOR_7_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC1_CFG_BASE 0x7FFCE465C0ull
+#define KERNEL_TENSOR_8_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC1_CFG_BASE 0x7FFCE465F8ull
+#define KERNEL_TENSOR_9_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC1_CFG_BASE 0x7FFCE46630ull
+#define KERNEL_TENSOR_10_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC1_CFG_BASE 0x7FFCE46668ull
+#define KERNEL_TENSOR_11_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC1_CFG_BASE 0x7FFCE466A0ull
+#define KERNEL_TENSOR_12_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC1_CFG_BASE 0x7FFCE466D8ull
+#define KERNEL_TENSOR_13_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC1_CFG_BASE 0x7FFCE46710ull
+#define KERNEL_TENSOR_14_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC1_CFG_BASE 0x7FFCE46748ull
+#define KERNEL_TENSOR_15_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC1_CFG_BASE 0x7FFCE46780ull
+#define KERNEL_SYNC_OBJECT_TPC1_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC1_CFG_SECTION 0x8000
+#define mmKERNEL_TPC1_CFG_BASE 0x7FFCE46788ull
+#define KERNEL_TPC1_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC1_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC1_CFG_BASE 0x7FFCE46A00ull
+#define QM_TENSOR_0_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC1_CFG_BASE 0x7FFCE46A38ull
+#define QM_TENSOR_1_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC1_CFG_BASE 0x7FFCE46A70ull
+#define QM_TENSOR_2_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC1_CFG_BASE 0x7FFCE46AA8ull
+#define QM_TENSOR_3_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC1_CFG_BASE 0x7FFCE46AE0ull
+#define QM_TENSOR_4_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC1_CFG_BASE 0x7FFCE46B18ull
+#define QM_TENSOR_5_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC1_CFG_BASE 0x7FFCE46B50ull
+#define QM_TENSOR_6_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC1_CFG_BASE 0x7FFCE46B88ull
+#define QM_TENSOR_7_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC1_CFG_BASE 0x7FFCE46BC0ull
+#define QM_TENSOR_8_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC1_CFG_BASE 0x7FFCE46BF8ull
+#define QM_TENSOR_9_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC1_CFG_BASE 0x7FFCE46C30ull
+#define QM_TENSOR_10_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC1_CFG_BASE 0x7FFCE46C68ull
+#define QM_TENSOR_11_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC1_CFG_BASE 0x7FFCE46CA0ull
+#define QM_TENSOR_12_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC1_CFG_BASE 0x7FFCE46CD8ull
+#define QM_TENSOR_13_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC1_CFG_BASE 0x7FFCE46D10ull
+#define QM_TENSOR_14_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC1_CFG_BASE 0x7FFCE46D48ull
+#define QM_TENSOR_15_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC1_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC1_CFG_BASE 0x7FFCE46D80ull
+#define QM_SYNC_OBJECT_TPC1_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC1_CFG_SECTION 0x8000
+#define mmQM_TPC1_CFG_BASE 0x7FFCE46D88ull
+#define QM_TPC1_CFG_MAX_OFFSET 0xB800
+#define QM_TPC1_CFG_SECTION 0x2780
+#define mmTPC1_E2E_CRED_BASE 0x7FFCE47000ull
+#define TPC1_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC1_E2E_CRED_SECTION 0x1000
+#define mmTPC1_QM_BASE 0x7FFCE48000ull
+#define TPC1_QM_MAX_OFFSET 0xD040
+#define TPC1_QM_SECTION 0x3E000
+#define mmTPC2_CFG_BASE 0x7FFCE86000ull
+#define TPC2_CFG_MAX_OFFSET 0xE400
+#define TPC2_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC2_CFG_BASE 0x7FFCE86400ull
+#define KERNEL_TENSOR_0_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC2_CFG_BASE 0x7FFCE86438ull
+#define KERNEL_TENSOR_1_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC2_CFG_BASE 0x7FFCE86470ull
+#define KERNEL_TENSOR_2_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC2_CFG_BASE 0x7FFCE864A8ull
+#define KERNEL_TENSOR_3_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC2_CFG_BASE 0x7FFCE864E0ull
+#define KERNEL_TENSOR_4_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC2_CFG_BASE 0x7FFCE86518ull
+#define KERNEL_TENSOR_5_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC2_CFG_BASE 0x7FFCE86550ull
+#define KERNEL_TENSOR_6_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC2_CFG_BASE 0x7FFCE86588ull
+#define KERNEL_TENSOR_7_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC2_CFG_BASE 0x7FFCE865C0ull
+#define KERNEL_TENSOR_8_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC2_CFG_BASE 0x7FFCE865F8ull
+#define KERNEL_TENSOR_9_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC2_CFG_BASE 0x7FFCE86630ull
+#define KERNEL_TENSOR_10_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC2_CFG_BASE 0x7FFCE86668ull
+#define KERNEL_TENSOR_11_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC2_CFG_BASE 0x7FFCE866A0ull
+#define KERNEL_TENSOR_12_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC2_CFG_BASE 0x7FFCE866D8ull
+#define KERNEL_TENSOR_13_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC2_CFG_BASE 0x7FFCE86710ull
+#define KERNEL_TENSOR_14_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC2_CFG_BASE 0x7FFCE86748ull
+#define KERNEL_TENSOR_15_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC2_CFG_BASE 0x7FFCE86780ull
+#define KERNEL_SYNC_OBJECT_TPC2_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC2_CFG_SECTION 0x8000
+#define mmKERNEL_TPC2_CFG_BASE 0x7FFCE86788ull
+#define KERNEL_TPC2_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC2_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC2_CFG_BASE 0x7FFCE86A00ull
+#define QM_TENSOR_0_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC2_CFG_BASE 0x7FFCE86A38ull
+#define QM_TENSOR_1_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC2_CFG_BASE 0x7FFCE86A70ull
+#define QM_TENSOR_2_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC2_CFG_BASE 0x7FFCE86AA8ull
+#define QM_TENSOR_3_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC2_CFG_BASE 0x7FFCE86AE0ull
+#define QM_TENSOR_4_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC2_CFG_BASE 0x7FFCE86B18ull
+#define QM_TENSOR_5_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC2_CFG_BASE 0x7FFCE86B50ull
+#define QM_TENSOR_6_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC2_CFG_BASE 0x7FFCE86B88ull
+#define QM_TENSOR_7_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC2_CFG_BASE 0x7FFCE86BC0ull
+#define QM_TENSOR_8_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC2_CFG_BASE 0x7FFCE86BF8ull
+#define QM_TENSOR_9_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC2_CFG_BASE 0x7FFCE86C30ull
+#define QM_TENSOR_10_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC2_CFG_BASE 0x7FFCE86C68ull
+#define QM_TENSOR_11_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC2_CFG_BASE 0x7FFCE86CA0ull
+#define QM_TENSOR_12_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC2_CFG_BASE 0x7FFCE86CD8ull
+#define QM_TENSOR_13_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC2_CFG_BASE 0x7FFCE86D10ull
+#define QM_TENSOR_14_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC2_CFG_BASE 0x7FFCE86D48ull
+#define QM_TENSOR_15_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC2_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC2_CFG_BASE 0x7FFCE86D80ull
+#define QM_SYNC_OBJECT_TPC2_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC2_CFG_SECTION 0x8000
+#define mmQM_TPC2_CFG_BASE 0x7FFCE86D88ull
+#define QM_TPC2_CFG_MAX_OFFSET 0xB800
+#define QM_TPC2_CFG_SECTION 0x2780
+#define mmTPC2_E2E_CRED_BASE 0x7FFCE87000ull
+#define TPC2_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC2_E2E_CRED_SECTION 0x1000
+#define mmTPC2_QM_BASE 0x7FFCE88000ull
+#define TPC2_QM_MAX_OFFSET 0xD040
+#define TPC2_QM_SECTION 0x3E000
+#define mmTPC3_CFG_BASE 0x7FFCEC6000ull
+#define TPC3_CFG_MAX_OFFSET 0xE400
+#define TPC3_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC3_CFG_BASE 0x7FFCEC6400ull
+#define KERNEL_TENSOR_0_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC3_CFG_BASE 0x7FFCEC6438ull
+#define KERNEL_TENSOR_1_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC3_CFG_BASE 0x7FFCEC6470ull
+#define KERNEL_TENSOR_2_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC3_CFG_BASE 0x7FFCEC64A8ull
+#define KERNEL_TENSOR_3_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC3_CFG_BASE 0x7FFCEC64E0ull
+#define KERNEL_TENSOR_4_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC3_CFG_BASE 0x7FFCEC6518ull
+#define KERNEL_TENSOR_5_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC3_CFG_BASE 0x7FFCEC6550ull
+#define KERNEL_TENSOR_6_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC3_CFG_BASE 0x7FFCEC6588ull
+#define KERNEL_TENSOR_7_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC3_CFG_BASE 0x7FFCEC65C0ull
+#define KERNEL_TENSOR_8_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC3_CFG_BASE 0x7FFCEC65F8ull
+#define KERNEL_TENSOR_9_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC3_CFG_BASE 0x7FFCEC6630ull
+#define KERNEL_TENSOR_10_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC3_CFG_BASE 0x7FFCEC6668ull
+#define KERNEL_TENSOR_11_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC3_CFG_BASE 0x7FFCEC66A0ull
+#define KERNEL_TENSOR_12_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC3_CFG_BASE 0x7FFCEC66D8ull
+#define KERNEL_TENSOR_13_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC3_CFG_BASE 0x7FFCEC6710ull
+#define KERNEL_TENSOR_14_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC3_CFG_BASE 0x7FFCEC6748ull
+#define KERNEL_TENSOR_15_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC3_CFG_BASE 0x7FFCEC6780ull
+#define KERNEL_SYNC_OBJECT_TPC3_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC3_CFG_SECTION 0x8000
+#define mmKERNEL_TPC3_CFG_BASE 0x7FFCEC6788ull
+#define KERNEL_TPC3_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC3_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC3_CFG_BASE 0x7FFCEC6A00ull
+#define QM_TENSOR_0_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC3_CFG_BASE 0x7FFCEC6A38ull
+#define QM_TENSOR_1_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC3_CFG_BASE 0x7FFCEC6A70ull
+#define QM_TENSOR_2_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC3_CFG_BASE 0x7FFCEC6AA8ull
+#define QM_TENSOR_3_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC3_CFG_BASE 0x7FFCEC6AE0ull
+#define QM_TENSOR_4_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC3_CFG_BASE 0x7FFCEC6B18ull
+#define QM_TENSOR_5_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC3_CFG_BASE 0x7FFCEC6B50ull
+#define QM_TENSOR_6_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC3_CFG_BASE 0x7FFCEC6B88ull
+#define QM_TENSOR_7_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC3_CFG_BASE 0x7FFCEC6BC0ull
+#define QM_TENSOR_8_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC3_CFG_BASE 0x7FFCEC6BF8ull
+#define QM_TENSOR_9_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC3_CFG_BASE 0x7FFCEC6C30ull
+#define QM_TENSOR_10_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC3_CFG_BASE 0x7FFCEC6C68ull
+#define QM_TENSOR_11_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC3_CFG_BASE 0x7FFCEC6CA0ull
+#define QM_TENSOR_12_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC3_CFG_BASE 0x7FFCEC6CD8ull
+#define QM_TENSOR_13_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC3_CFG_BASE 0x7FFCEC6D10ull
+#define QM_TENSOR_14_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC3_CFG_BASE 0x7FFCEC6D48ull
+#define QM_TENSOR_15_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC3_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC3_CFG_BASE 0x7FFCEC6D80ull
+#define QM_SYNC_OBJECT_TPC3_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC3_CFG_SECTION 0x8000
+#define mmQM_TPC3_CFG_BASE 0x7FFCEC6D88ull
+#define QM_TPC3_CFG_MAX_OFFSET 0xB800
+#define QM_TPC3_CFG_SECTION 0x2780
+#define mmTPC3_E2E_CRED_BASE 0x7FFCEC7000ull
+#define TPC3_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC3_E2E_CRED_SECTION 0x1000
+#define mmTPC3_QM_BASE 0x7FFCEC8000ull
+#define TPC3_QM_MAX_OFFSET 0xD040
+#define TPC3_QM_SECTION 0x3E000
+#define mmTPC4_CFG_BASE 0x7FFCF06000ull
+#define TPC4_CFG_MAX_OFFSET 0xE400
+#define TPC4_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC4_CFG_BASE 0x7FFCF06400ull
+#define KERNEL_TENSOR_0_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC4_CFG_BASE 0x7FFCF06438ull
+#define KERNEL_TENSOR_1_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC4_CFG_BASE 0x7FFCF06470ull
+#define KERNEL_TENSOR_2_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC4_CFG_BASE 0x7FFCF064A8ull
+#define KERNEL_TENSOR_3_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC4_CFG_BASE 0x7FFCF064E0ull
+#define KERNEL_TENSOR_4_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC4_CFG_BASE 0x7FFCF06518ull
+#define KERNEL_TENSOR_5_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC4_CFG_BASE 0x7FFCF06550ull
+#define KERNEL_TENSOR_6_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC4_CFG_BASE 0x7FFCF06588ull
+#define KERNEL_TENSOR_7_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC4_CFG_BASE 0x7FFCF065C0ull
+#define KERNEL_TENSOR_8_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC4_CFG_BASE 0x7FFCF065F8ull
+#define KERNEL_TENSOR_9_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC4_CFG_BASE 0x7FFCF06630ull
+#define KERNEL_TENSOR_10_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC4_CFG_BASE 0x7FFCF06668ull
+#define KERNEL_TENSOR_11_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC4_CFG_BASE 0x7FFCF066A0ull
+#define KERNEL_TENSOR_12_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC4_CFG_BASE 0x7FFCF066D8ull
+#define KERNEL_TENSOR_13_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC4_CFG_BASE 0x7FFCF06710ull
+#define KERNEL_TENSOR_14_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC4_CFG_BASE 0x7FFCF06748ull
+#define KERNEL_TENSOR_15_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC4_CFG_BASE 0x7FFCF06780ull
+#define KERNEL_SYNC_OBJECT_TPC4_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC4_CFG_SECTION 0x8000
+#define mmKERNEL_TPC4_CFG_BASE 0x7FFCF06788ull
+#define KERNEL_TPC4_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC4_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC4_CFG_BASE 0x7FFCF06A00ull
+#define QM_TENSOR_0_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC4_CFG_BASE 0x7FFCF06A38ull
+#define QM_TENSOR_1_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC4_CFG_BASE 0x7FFCF06A70ull
+#define QM_TENSOR_2_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC4_CFG_BASE 0x7FFCF06AA8ull
+#define QM_TENSOR_3_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC4_CFG_BASE 0x7FFCF06AE0ull
+#define QM_TENSOR_4_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC4_CFG_BASE 0x7FFCF06B18ull
+#define QM_TENSOR_5_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC4_CFG_BASE 0x7FFCF06B50ull
+#define QM_TENSOR_6_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC4_CFG_BASE 0x7FFCF06B88ull
+#define QM_TENSOR_7_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC4_CFG_BASE 0x7FFCF06BC0ull
+#define QM_TENSOR_8_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC4_CFG_BASE 0x7FFCF06BF8ull
+#define QM_TENSOR_9_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC4_CFG_BASE 0x7FFCF06C30ull
+#define QM_TENSOR_10_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC4_CFG_BASE 0x7FFCF06C68ull
+#define QM_TENSOR_11_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC4_CFG_BASE 0x7FFCF06CA0ull
+#define QM_TENSOR_12_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC4_CFG_BASE 0x7FFCF06CD8ull
+#define QM_TENSOR_13_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC4_CFG_BASE 0x7FFCF06D10ull
+#define QM_TENSOR_14_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC4_CFG_BASE 0x7FFCF06D48ull
+#define QM_TENSOR_15_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC4_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC4_CFG_BASE 0x7FFCF06D80ull
+#define QM_SYNC_OBJECT_TPC4_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC4_CFG_SECTION 0x8000
+#define mmQM_TPC4_CFG_BASE 0x7FFCF06D88ull
+#define QM_TPC4_CFG_MAX_OFFSET 0xB800
+#define QM_TPC4_CFG_SECTION 0x2780
+#define mmTPC4_E2E_CRED_BASE 0x7FFCF07000ull
+#define TPC4_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC4_E2E_CRED_SECTION 0x1000
+#define mmTPC4_QM_BASE 0x7FFCF08000ull
+#define TPC4_QM_MAX_OFFSET 0xD040
+#define TPC4_QM_SECTION 0x3E000
+#define mmTPC5_CFG_BASE 0x7FFCF46000ull
+#define TPC5_CFG_MAX_OFFSET 0xE400
+#define TPC5_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC5_CFG_BASE 0x7FFCF46400ull
+#define KERNEL_TENSOR_0_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC5_CFG_BASE 0x7FFCF46438ull
+#define KERNEL_TENSOR_1_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC5_CFG_BASE 0x7FFCF46470ull
+#define KERNEL_TENSOR_2_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC5_CFG_BASE 0x7FFCF464A8ull
+#define KERNEL_TENSOR_3_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC5_CFG_BASE 0x7FFCF464E0ull
+#define KERNEL_TENSOR_4_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC5_CFG_BASE 0x7FFCF46518ull
+#define KERNEL_TENSOR_5_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC5_CFG_BASE 0x7FFCF46550ull
+#define KERNEL_TENSOR_6_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC5_CFG_BASE 0x7FFCF46588ull
+#define KERNEL_TENSOR_7_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC5_CFG_BASE 0x7FFCF465C0ull
+#define KERNEL_TENSOR_8_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC5_CFG_BASE 0x7FFCF465F8ull
+#define KERNEL_TENSOR_9_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC5_CFG_BASE 0x7FFCF46630ull
+#define KERNEL_TENSOR_10_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC5_CFG_BASE 0x7FFCF46668ull
+#define KERNEL_TENSOR_11_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC5_CFG_BASE 0x7FFCF466A0ull
+#define KERNEL_TENSOR_12_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC5_CFG_BASE 0x7FFCF466D8ull
+#define KERNEL_TENSOR_13_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC5_CFG_BASE 0x7FFCF46710ull
+#define KERNEL_TENSOR_14_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC5_CFG_BASE 0x7FFCF46748ull
+#define KERNEL_TENSOR_15_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC5_CFG_BASE 0x7FFCF46780ull
+#define KERNEL_SYNC_OBJECT_TPC5_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC5_CFG_SECTION 0x8000
+#define mmKERNEL_TPC5_CFG_BASE 0x7FFCF46788ull
+#define KERNEL_TPC5_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC5_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC5_CFG_BASE 0x7FFCF46A00ull
+#define QM_TENSOR_0_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC5_CFG_BASE 0x7FFCF46A38ull
+#define QM_TENSOR_1_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC5_CFG_BASE 0x7FFCF46A70ull
+#define QM_TENSOR_2_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC5_CFG_BASE 0x7FFCF46AA8ull
+#define QM_TENSOR_3_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC5_CFG_BASE 0x7FFCF46AE0ull
+#define QM_TENSOR_4_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC5_CFG_BASE 0x7FFCF46B18ull
+#define QM_TENSOR_5_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC5_CFG_BASE 0x7FFCF46B50ull
+#define QM_TENSOR_6_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC5_CFG_BASE 0x7FFCF46B88ull
+#define QM_TENSOR_7_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC5_CFG_BASE 0x7FFCF46BC0ull
+#define QM_TENSOR_8_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC5_CFG_BASE 0x7FFCF46BF8ull
+#define QM_TENSOR_9_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC5_CFG_BASE 0x7FFCF46C30ull
+#define QM_TENSOR_10_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC5_CFG_BASE 0x7FFCF46C68ull
+#define QM_TENSOR_11_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC5_CFG_BASE 0x7FFCF46CA0ull
+#define QM_TENSOR_12_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC5_CFG_BASE 0x7FFCF46CD8ull
+#define QM_TENSOR_13_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC5_CFG_BASE 0x7FFCF46D10ull
+#define QM_TENSOR_14_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC5_CFG_BASE 0x7FFCF46D48ull
+#define QM_TENSOR_15_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC5_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC5_CFG_BASE 0x7FFCF46D80ull
+#define QM_SYNC_OBJECT_TPC5_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC5_CFG_SECTION 0x8000
+#define mmQM_TPC5_CFG_BASE 0x7FFCF46D88ull
+#define QM_TPC5_CFG_MAX_OFFSET 0xB800
+#define QM_TPC5_CFG_SECTION 0x2780
+#define mmTPC5_E2E_CRED_BASE 0x7FFCF47000ull
+#define TPC5_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC5_E2E_CRED_SECTION 0x1000
+#define mmTPC5_QM_BASE 0x7FFCF48000ull
+#define TPC5_QM_MAX_OFFSET 0xD040
+#define TPC5_QM_SECTION 0x3E000
+#define mmTPC6_CFG_BASE 0x7FFCF86000ull
+#define TPC6_CFG_MAX_OFFSET 0xE400
+#define TPC6_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC6_CFG_BASE 0x7FFCF86400ull
+#define KERNEL_TENSOR_0_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC6_CFG_BASE 0x7FFCF86438ull
+#define KERNEL_TENSOR_1_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC6_CFG_BASE 0x7FFCF86470ull
+#define KERNEL_TENSOR_2_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC6_CFG_BASE 0x7FFCF864A8ull
+#define KERNEL_TENSOR_3_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC6_CFG_BASE 0x7FFCF864E0ull
+#define KERNEL_TENSOR_4_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC6_CFG_BASE 0x7FFCF86518ull
+#define KERNEL_TENSOR_5_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC6_CFG_BASE 0x7FFCF86550ull
+#define KERNEL_TENSOR_6_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC6_CFG_BASE 0x7FFCF86588ull
+#define KERNEL_TENSOR_7_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC6_CFG_BASE 0x7FFCF865C0ull
+#define KERNEL_TENSOR_8_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC6_CFG_BASE 0x7FFCF865F8ull
+#define KERNEL_TENSOR_9_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC6_CFG_BASE 0x7FFCF86630ull
+#define KERNEL_TENSOR_10_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC6_CFG_BASE 0x7FFCF86668ull
+#define KERNEL_TENSOR_11_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC6_CFG_BASE 0x7FFCF866A0ull
+#define KERNEL_TENSOR_12_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC6_CFG_BASE 0x7FFCF866D8ull
+#define KERNEL_TENSOR_13_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC6_CFG_BASE 0x7FFCF86710ull
+#define KERNEL_TENSOR_14_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC6_CFG_BASE 0x7FFCF86748ull
+#define KERNEL_TENSOR_15_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC6_CFG_BASE 0x7FFCF86780ull
+#define KERNEL_SYNC_OBJECT_TPC6_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC6_CFG_SECTION 0x8000
+#define mmKERNEL_TPC6_CFG_BASE 0x7FFCF86788ull
+#define KERNEL_TPC6_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC6_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC6_CFG_BASE 0x7FFCF86A00ull
+#define QM_TENSOR_0_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC6_CFG_BASE 0x7FFCF86A38ull
+#define QM_TENSOR_1_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC6_CFG_BASE 0x7FFCF86A70ull
+#define QM_TENSOR_2_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC6_CFG_BASE 0x7FFCF86AA8ull
+#define QM_TENSOR_3_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC6_CFG_BASE 0x7FFCF86AE0ull
+#define QM_TENSOR_4_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC6_CFG_BASE 0x7FFCF86B18ull
+#define QM_TENSOR_5_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC6_CFG_BASE 0x7FFCF86B50ull
+#define QM_TENSOR_6_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC6_CFG_BASE 0x7FFCF86B88ull
+#define QM_TENSOR_7_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC6_CFG_BASE 0x7FFCF86BC0ull
+#define QM_TENSOR_8_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC6_CFG_BASE 0x7FFCF86BF8ull
+#define QM_TENSOR_9_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC6_CFG_BASE 0x7FFCF86C30ull
+#define QM_TENSOR_10_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC6_CFG_BASE 0x7FFCF86C68ull
+#define QM_TENSOR_11_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC6_CFG_BASE 0x7FFCF86CA0ull
+#define QM_TENSOR_12_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC6_CFG_BASE 0x7FFCF86CD8ull
+#define QM_TENSOR_13_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC6_CFG_BASE 0x7FFCF86D10ull
+#define QM_TENSOR_14_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC6_CFG_BASE 0x7FFCF86D48ull
+#define QM_TENSOR_15_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC6_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC6_CFG_BASE 0x7FFCF86D80ull
+#define QM_SYNC_OBJECT_TPC6_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC6_CFG_SECTION 0x8000
+#define mmQM_TPC6_CFG_BASE 0x7FFCF86D88ull
+#define QM_TPC6_CFG_MAX_OFFSET 0xB800
+#define QM_TPC6_CFG_SECTION 0x2780
+#define mmTPC6_E2E_CRED_BASE 0x7FFCF87000ull
+#define TPC6_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC6_E2E_CRED_SECTION 0x1000
+#define mmTPC6_QM_BASE 0x7FFCF88000ull
+#define TPC6_QM_MAX_OFFSET 0xD040
+#define TPC6_QM_SECTION 0x3E000
+#define mmTPC7_CFG_BASE 0x7FFCFC6000ull
+#define TPC7_CFG_MAX_OFFSET 0xE400
+#define TPC7_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC7_CFG_BASE 0x7FFCFC6400ull
+#define KERNEL_TENSOR_0_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC7_CFG_BASE 0x7FFCFC6438ull
+#define KERNEL_TENSOR_1_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC7_CFG_BASE 0x7FFCFC6470ull
+#define KERNEL_TENSOR_2_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC7_CFG_BASE 0x7FFCFC64A8ull
+#define KERNEL_TENSOR_3_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC7_CFG_BASE 0x7FFCFC64E0ull
+#define KERNEL_TENSOR_4_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC7_CFG_BASE 0x7FFCFC6518ull
+#define KERNEL_TENSOR_5_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC7_CFG_BASE 0x7FFCFC6550ull
+#define KERNEL_TENSOR_6_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC7_CFG_BASE 0x7FFCFC6588ull
+#define KERNEL_TENSOR_7_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC7_CFG_BASE 0x7FFCFC65C0ull
+#define KERNEL_TENSOR_8_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC7_CFG_BASE 0x7FFCFC65F8ull
+#define KERNEL_TENSOR_9_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC7_CFG_BASE 0x7FFCFC6630ull
+#define KERNEL_TENSOR_10_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC7_CFG_BASE 0x7FFCFC6668ull
+#define KERNEL_TENSOR_11_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC7_CFG_BASE 0x7FFCFC66A0ull
+#define KERNEL_TENSOR_12_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC7_CFG_BASE 0x7FFCFC66D8ull
+#define KERNEL_TENSOR_13_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC7_CFG_BASE 0x7FFCFC6710ull
+#define KERNEL_TENSOR_14_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC7_CFG_BASE 0x7FFCFC6748ull
+#define KERNEL_TENSOR_15_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC7_CFG_BASE 0x7FFCFC6780ull
+#define KERNEL_SYNC_OBJECT_TPC7_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC7_CFG_SECTION 0x8000
+#define mmKERNEL_TPC7_CFG_BASE 0x7FFCFC6788ull
+#define KERNEL_TPC7_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC7_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC7_CFG_BASE 0x7FFCFC6A00ull
+#define QM_TENSOR_0_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC7_CFG_BASE 0x7FFCFC6A38ull
+#define QM_TENSOR_1_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC7_CFG_BASE 0x7FFCFC6A70ull
+#define QM_TENSOR_2_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC7_CFG_BASE 0x7FFCFC6AA8ull
+#define QM_TENSOR_3_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC7_CFG_BASE 0x7FFCFC6AE0ull
+#define QM_TENSOR_4_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC7_CFG_BASE 0x7FFCFC6B18ull
+#define QM_TENSOR_5_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC7_CFG_BASE 0x7FFCFC6B50ull
+#define QM_TENSOR_6_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC7_CFG_BASE 0x7FFCFC6B88ull
+#define QM_TENSOR_7_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC7_CFG_BASE 0x7FFCFC6BC0ull
+#define QM_TENSOR_8_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC7_CFG_BASE 0x7FFCFC6BF8ull
+#define QM_TENSOR_9_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC7_CFG_BASE 0x7FFCFC6C30ull
+#define QM_TENSOR_10_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC7_CFG_BASE 0x7FFCFC6C68ull
+#define QM_TENSOR_11_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC7_CFG_BASE 0x7FFCFC6CA0ull
+#define QM_TENSOR_12_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC7_CFG_BASE 0x7FFCFC6CD8ull
+#define QM_TENSOR_13_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC7_CFG_BASE 0x7FFCFC6D10ull
+#define QM_TENSOR_14_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC7_CFG_BASE 0x7FFCFC6D48ull
+#define QM_TENSOR_15_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC7_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC7_CFG_BASE 0x7FFCFC6D80ull
+#define QM_SYNC_OBJECT_TPC7_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC7_CFG_SECTION 0x8000
+#define mmQM_TPC7_CFG_BASE 0x7FFCFC6D88ull
+#define QM_TPC7_CFG_MAX_OFFSET 0xB800
+#define QM_TPC7_CFG_SECTION 0x2780
+#define mmTPC7_E2E_CRED_BASE 0x7FFCFC7000ull
+#define TPC7_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC7_E2E_CRED_SECTION 0x1000
+#define mmTPC7_QM_BASE 0x7FFCFC8000ull
+#define TPC7_QM_MAX_OFFSET 0xD040
+#define TPC7_QM_SECTION 0x1038000
+#define mmMME_S_ROM_TABLE_BASE 0x7FFE000000ull
+#define MME_S_ROM_TABLE_MAX_OFFSET 0x1000
+#define MME_S_ROM_TABLE_SECTION 0x21000
+#define mmMME0_ACC_STM_BASE 0x7FFE021000ull
+#define MME0_ACC_STM_MAX_OFFSET 0x1000
+#define MME0_ACC_STM_SECTION 0x1000
+#define mmMME0_ACC_CTI_BASE 0x7FFE022000ull
+#define MME0_ACC_CTI_MAX_OFFSET 0x1000
+#define MME0_ACC_CTI_SECTION 0x1000
+#define mmMME0_ACC_ETF_BASE 0x7FFE023000ull
+#define MME0_ACC_ETF_MAX_OFFSET 0x1000
+#define MME0_ACC_ETF_SECTION 0x1000
+#define mmMME0_ACC_SPMU_BASE 0x7FFE024000ull
+#define MME0_ACC_SPMU_MAX_OFFSET 0x1000
+#define MME0_ACC_SPMU_SECTION 0x1000
+#define mmMME0_ACC_CTI0_BASE 0x7FFE025000ull
+#define MME0_ACC_CTI0_MAX_OFFSET 0x1000
+#define MME0_ACC_CTI0_SECTION 0x1000
+#define mmMME0_ACC_CTI1_BASE 0x7FFE026000ull
+#define MME0_ACC_CTI1_MAX_OFFSET 0x1000
+#define MME0_ACC_CTI1_SECTION 0x1000
+#define mmMME0_ACC_BMON0_BASE 0x7FFE027000ull
+#define MME0_ACC_BMON0_MAX_OFFSET 0x1000
+#define MME0_ACC_BMON0_SECTION 0x9000
+#define mmMME0_ACC_FUNNEL_BASE 0x7FFE030000ull
+#define MME0_ACC_FUNNEL_MAX_OFFSET 0x1000
+#define MME0_ACC_FUNNEL_SECTION 0x11000
+#define mmMME0_SBAB_STM_BASE 0x7FFE041000ull
+#define MME0_SBAB_STM_MAX_OFFSET 0x1000
+#define MME0_SBAB_STM_SECTION 0x1000
+#define mmMME0_SBAB_CTI_BASE 0x7FFE042000ull
+#define MME0_SBAB_CTI_MAX_OFFSET 0x1000
+#define MME0_SBAB_CTI_SECTION 0x1000
+#define mmMME0_SBAB_ETF_BASE 0x7FFE043000ull
+#define MME0_SBAB_ETF_MAX_OFFSET 0x1000
+#define MME0_SBAB_ETF_SECTION 0x1000
+#define mmMME0_SBAB_SPMU_BASE 0x7FFE044000ull
+#define MME0_SBAB_SPMU_MAX_OFFSET 0x1000
+#define MME0_SBAB_SPMU_SECTION 0x1000
+#define mmMME0_SBAB_CTI0_BASE 0x7FFE045000ull
+#define MME0_SBAB_CTI0_MAX_OFFSET 0x1000
+#define MME0_SBAB_CTI0_SECTION 0x1000
+#define mmMME0_SBAB_CTI1_BASE 0x7FFE046000ull
+#define MME0_SBAB_CTI1_MAX_OFFSET 0x1000
+#define MME0_SBAB_CTI1_SECTION 0x1000
+#define mmMME0_SBAB_BMON0_BASE 0x7FFE047000ull
+#define MME0_SBAB_BMON0_MAX_OFFSET 0x1000
+#define MME0_SBAB_BMON0_SECTION 0x1000
+#define mmMME0_SBAB_BMON1_BASE 0x7FFE048000ull
+#define MME0_SBAB_BMON1_MAX_OFFSET 0x1000
+#define MME0_SBAB_BMON1_SECTION 0x19000
+#define mmMME0_CTRL_STM_BASE 0x7FFE061000ull
+#define MME0_CTRL_STM_MAX_OFFSET 0x1000
+#define MME0_CTRL_STM_SECTION 0x1000
+#define mmMME0_CTRL_CTI_BASE 0x7FFE062000ull
+#define MME0_CTRL_CTI_MAX_OFFSET 0x1000
+#define MME0_CTRL_CTI_SECTION 0x1000
+#define mmMME0_CTRL_ETF_BASE 0x7FFE063000ull
+#define MME0_CTRL_ETF_MAX_OFFSET 0x1000
+#define MME0_CTRL_ETF_SECTION 0x1000
+#define mmMME0_CTRL_SPMU_BASE 0x7FFE064000ull
+#define MME0_CTRL_SPMU_MAX_OFFSET 0x1000
+#define MME0_CTRL_SPMU_SECTION 0x1000
+#define mmMME0_CTRL_CTI0_BASE 0x7FFE065000ull
+#define MME0_CTRL_CTI0_MAX_OFFSET 0x1000
+#define MME0_CTRL_CTI0_SECTION 0x1000
+#define mmMME0_CTRL_CTI1_BASE 0x7FFE066000ull
+#define MME0_CTRL_CTI1_MAX_OFFSET 0x1000
+#define MME0_CTRL_CTI1_SECTION 0x1000
+#define mmMME0_CTRL_BMON0_BASE 0x7FFE067000ull
+#define MME0_CTRL_BMON0_MAX_OFFSET 0x1000
+#define MME0_CTRL_BMON0_SECTION 0x1000
+#define mmMME0_CTRL_BMON1_BASE 0x7FFE068000ull
+#define MME0_CTRL_BMON1_MAX_OFFSET 0x1000
+#define MME0_CTRL_BMON1_SECTION 0x39000
+#define mmMME1_ACC_STM_BASE 0x7FFE0A1000ull
+#define MME1_ACC_STM_MAX_OFFSET 0x1000
+#define MME1_ACC_STM_SECTION 0x1000
+#define mmMME1_ACC_CTI_BASE 0x7FFE0A2000ull
+#define MME1_ACC_CTI_MAX_OFFSET 0x1000
+#define MME1_ACC_CTI_SECTION 0x1000
+#define mmMME1_ACC_ETF_BASE 0x7FFE0A3000ull
+#define MME1_ACC_ETF_MAX_OFFSET 0x1000
+#define MME1_ACC_ETF_SECTION 0x1000
+#define mmMME1_ACC_SPMU_BASE 0x7FFE0A4000ull
+#define MME1_ACC_SPMU_MAX_OFFSET 0x1000
+#define MME1_ACC_SPMU_SECTION 0x1000
+#define mmMME1_ACC_CTI0_BASE 0x7FFE0A5000ull
+#define MME1_ACC_CTI0_MAX_OFFSET 0x1000
+#define MME1_ACC_CTI0_SECTION 0x1000
+#define mmMME1_ACC_CTI1_BASE 0x7FFE0A6000ull
+#define MME1_ACC_CTI1_MAX_OFFSET 0x1000
+#define MME1_ACC_CTI1_SECTION 0x1000
+#define mmMME1_ACC_BMON0_BASE 0x7FFE0A7000ull
+#define MME1_ACC_BMON0_MAX_OFFSET 0x1000
+#define MME1_ACC_BMON0_SECTION 0x9000
+#define mmMME1_ACC_FUNNEL_BASE 0x7FFE0B0000ull
+#define MME1_ACC_FUNNEL_MAX_OFFSET 0x1000
+#define MME1_ACC_FUNNEL_SECTION 0x11000
+#define mmMME1_SBAB_STM_BASE 0x7FFE0C1000ull
+#define MME1_SBAB_STM_MAX_OFFSET 0x1000
+#define MME1_SBAB_STM_SECTION 0x1000
+#define mmMME1_SBAB_CTI_BASE 0x7FFE0C2000ull
+#define MME1_SBAB_CTI_MAX_OFFSET 0x1000
+#define MME1_SBAB_CTI_SECTION 0x1000
+#define mmMME1_SBAB_ETF_BASE 0x7FFE0C3000ull
+#define MME1_SBAB_ETF_MAX_OFFSET 0x1000
+#define MME1_SBAB_ETF_SECTION 0x1000
+#define mmMME1_SBAB_SPMU_BASE 0x7FFE0C4000ull
+#define MME1_SBAB_SPMU_MAX_OFFSET 0x1000
+#define MME1_SBAB_SPMU_SECTION 0x1000
+#define mmMME1_SBAB_CTI0_BASE 0x7FFE0C5000ull
+#define MME1_SBAB_CTI0_MAX_OFFSET 0x1000
+#define MME1_SBAB_CTI0_SECTION 0x1000
+#define mmMME1_SBAB_CTI1_BASE 0x7FFE0C6000ull
+#define MME1_SBAB_CTI1_MAX_OFFSET 0x1000
+#define MME1_SBAB_CTI1_SECTION 0x1000
+#define mmMME1_SBAB_BMON0_BASE 0x7FFE0C7000ull
+#define MME1_SBAB_BMON0_MAX_OFFSET 0x1000
+#define MME1_SBAB_BMON0_SECTION 0x1000
+#define mmMME1_SBAB_BMON1_BASE 0x7FFE0C8000ull
+#define MME1_SBAB_BMON1_MAX_OFFSET 0x1000
+#define MME1_SBAB_BMON1_SECTION 0x19000
+#define mmMME1_CTRL_STM_BASE 0x7FFE0E1000ull
+#define MME1_CTRL_STM_MAX_OFFSET 0x1000
+#define MME1_CTRL_STM_SECTION 0x1000
+#define mmMME1_CTRL_CTI_BASE 0x7FFE0E2000ull
+#define MME1_CTRL_CTI_MAX_OFFSET 0x1000
+#define MME1_CTRL_CTI_SECTION 0x1000
+#define mmMME1_CTRL_ETF_BASE 0x7FFE0E3000ull
+#define MME1_CTRL_ETF_MAX_OFFSET 0x1000
+#define MME1_CTRL_ETF_SECTION 0x1000
+#define mmMME1_CTRL_SPMU_BASE 0x7FFE0E4000ull
+#define MME1_CTRL_SPMU_MAX_OFFSET 0x1000
+#define MME1_CTRL_SPMU_SECTION 0x1000
+#define mmMME1_CTRL_CTI0_BASE 0x7FFE0E5000ull
+#define MME1_CTRL_CTI0_MAX_OFFSET 0x1000
+#define MME1_CTRL_CTI0_SECTION 0x1000
+#define mmMME1_CTRL_CTI1_BASE 0x7FFE0E6000ull
+#define MME1_CTRL_CTI1_MAX_OFFSET 0x1000
+#define MME1_CTRL_CTI1_SECTION 0x1000
+#define mmMME1_CTRL_BMON0_BASE 0x7FFE0E7000ull
+#define MME1_CTRL_BMON0_MAX_OFFSET 0x1000
+#define MME1_CTRL_BMON0_SECTION 0x1000
+#define mmMME1_CTRL_BMON1_BASE 0x7FFE0E8000ull
+#define MME1_CTRL_BMON1_MAX_OFFSET 0x1000
+#define MME1_CTRL_BMON1_SECTION 0x18000
+#define mmMME_N_ROM_TABLE_BASE 0x7FFE100000ull
+#define MME_N_ROM_TABLE_MAX_OFFSET 0x1000
+#define MME_N_ROM_TABLE_SECTION 0x21000
+#define mmMME2_ACC_STM_BASE 0x7FFE121000ull
+#define MME2_ACC_STM_MAX_OFFSET 0x1000
+#define MME2_ACC_STM_SECTION 0x1000
+#define mmMME2_ACC_CTI_BASE 0x7FFE122000ull
+#define MME2_ACC_CTI_MAX_OFFSET 0x1000
+#define MME2_ACC_CTI_SECTION 0x1000
+#define mmMME2_MME2_ACC_ETF_BASE 0x7FFE123000ull
+#define MME2_MME2_ACC_ETF_MAX_OFFSET 0x1000
+#define MME2_MME2_ACC_ETF_SECTION 0x1000
+#define mmMME2_ACC_SPMU_BASE 0x7FFE124000ull
+#define MME2_ACC_SPMU_MAX_OFFSET 0x1000
+#define MME2_ACC_SPMU_SECTION 0x1000
+#define mmMME2_ACC_CTI0_BASE 0x7FFE125000ull
+#define MME2_ACC_CTI0_MAX_OFFSET 0x1000
+#define MME2_ACC_CTI0_SECTION 0x1000
+#define mmMME2_ACC_CTI1_BASE 0x7FFE126000ull
+#define MME2_ACC_CTI1_MAX_OFFSET 0x1000
+#define MME2_ACC_CTI1_SECTION 0x1000
+#define mmMME2_ACC_BMON0_BASE 0x7FFE127000ull
+#define MME2_ACC_BMON0_MAX_OFFSET 0x1000
+#define MME2_ACC_BMON0_SECTION 0x9000
+#define mmMME2_ACC_FUNNEL_BASE 0x7FFE130000ull
+#define MME2_ACC_FUNNEL_MAX_OFFSET 0x1000
+#define MME2_ACC_FUNNEL_SECTION 0x11000
+#define mmMME2_SBAB_STM_BASE 0x7FFE141000ull
+#define MME2_SBAB_STM_MAX_OFFSET 0x1000
+#define MME2_SBAB_STM_SECTION 0x1000
+#define mmMME2_SBAB_CTI_BASE 0x7FFE142000ull
+#define MME2_SBAB_CTI_MAX_OFFSET 0x1000
+#define MME2_SBAB_CTI_SECTION 0x1000
+#define mmMME2_SBAB_ETF_BASE 0x7FFE143000ull
+#define MME2_SBAB_ETF_MAX_OFFSET 0x1000
+#define MME2_SBAB_ETF_SECTION 0x1000
+#define mmMME2_SBAB_SPMU_BASE 0x7FFE144000ull
+#define MME2_SBAB_SPMU_MAX_OFFSET 0x1000
+#define MME2_SBAB_SPMU_SECTION 0x1000
+#define mmMME2_SBAB_CTI0_BASE 0x7FFE145000ull
+#define MME2_SBAB_CTI0_MAX_OFFSET 0x1000
+#define MME2_SBAB_CTI0_SECTION 0x1000
+#define mmMME2_SBAB_CTI1_BASE 0x7FFE146000ull
+#define MME2_SBAB_CTI1_MAX_OFFSET 0x1000
+#define MME2_SBAB_CTI1_SECTION 0x1000
+#define mmMME2_SBAB_BMON0_BASE 0x7FFE147000ull
+#define MME2_SBAB_BMON0_MAX_OFFSET 0x1000
+#define MME2_SBAB_BMON0_SECTION 0x1000
+#define mmMME2_SBAB_BMON1_BASE 0x7FFE148000ull
+#define MME2_SBAB_BMON1_MAX_OFFSET 0x1000
+#define MME2_SBAB_BMON1_SECTION 0x19000
+#define mmMME2_CTRL_STM_BASE 0x7FFE161000ull
+#define MME2_CTRL_STM_MAX_OFFSET 0x1000
+#define MME2_CTRL_STM_SECTION 0x1000
+#define mmMME2_CTRL_CTI_BASE 0x7FFE162000ull
+#define MME2_CTRL_CTI_MAX_OFFSET 0x1000
+#define MME2_CTRL_CTI_SECTION 0x1000
+#define mmMME2_CTRL_ETF_BASE 0x7FFE163000ull
+#define MME2_CTRL_ETF_MAX_OFFSET 0x1000
+#define MME2_CTRL_ETF_SECTION 0x1000
+#define mmMME2_CTRL_SPMU_BASE 0x7FFE164000ull
+#define MME2_CTRL_SPMU_MAX_OFFSET 0x1000
+#define MME2_CTRL_SPMU_SECTION 0x1000
+#define mmMME2_CTRL_CTI0_BASE 0x7FFE165000ull
+#define MME2_CTRL_CTI0_MAX_OFFSET 0x1000
+#define MME2_CTRL_CTI0_SECTION 0x1000
+#define mmMME2_CTRL_CTI1_BASE 0x7FFE166000ull
+#define MME2_CTRL_CTI1_MAX_OFFSET 0x1000
+#define MME2_CTRL_CTI1_SECTION 0x1000
+#define mmMME2_CTRL_BMON0_BASE 0x7FFE167000ull
+#define MME2_CTRL_BMON0_MAX_OFFSET 0x1000
+#define MME2_CTRL_BMON0_SECTION 0x1000
+#define mmMME2_CTRL_BMON1_BASE 0x7FFE168000ull
+#define MME2_CTRL_BMON1_MAX_OFFSET 0x1000
+#define MME2_CTRL_BMON1_SECTION 0x39000
+#define mmMME3_ACC_STM_BASE 0x7FFE1A1000ull
+#define MME3_ACC_STM_MAX_OFFSET 0x1000
+#define MME3_ACC_STM_SECTION 0x1000
+#define mmMME3_ACC_CTI_BASE 0x7FFE1A2000ull
+#define MME3_ACC_CTI_MAX_OFFSET 0x1000
+#define MME3_ACC_CTI_SECTION 0x1000
+#define mmMME3_ACC_ETF_BASE 0x7FFE1A3000ull
+#define MME3_ACC_ETF_MAX_OFFSET 0x1000
+#define MME3_ACC_ETF_SECTION 0x1000
+#define mmMME3_ACC_SPMU_BASE 0x7FFE1A4000ull
+#define MME3_ACC_SPMU_MAX_OFFSET 0x1000
+#define MME3_ACC_SPMU_SECTION 0x1000
+#define mmMME3_ACC_CTI0_BASE 0x7FFE1A5000ull
+#define MME3_ACC_CTI0_MAX_OFFSET 0x1000
+#define MME3_ACC_CTI0_SECTION 0x1000
+#define mmMME3_ACC_CTI1_BASE 0x7FFE1A6000ull
+#define MME3_ACC_CTI1_MAX_OFFSET 0x1000
+#define MME3_ACC_CTI1_SECTION 0x1000
+#define mmMME3_ACC_BMON0_BASE 0x7FFE1A7000ull
+#define MME3_ACC_BMON0_MAX_OFFSET 0x1000
+#define MME3_ACC_BMON0_SECTION 0x9000
+#define mmMME3_ACC_FUNNEL_BASE 0x7FFE1B0000ull
+#define MME3_ACC_FUNNEL_MAX_OFFSET 0x1000
+#define MME3_ACC_FUNNEL_SECTION 0x11000
+#define mmMME3_SBAB_STM_BASE 0x7FFE1C1000ull
+#define MME3_SBAB_STM_MAX_OFFSET 0x1000
+#define MME3_SBAB_STM_SECTION 0x1000
+#define mmMME3_SBAB_CTI_BASE 0x7FFE1C2000ull
+#define MME3_SBAB_CTI_MAX_OFFSET 0x1000
+#define MME3_SBAB_CTI_SECTION 0x1000
+#define mmMME3_SBAB_ETF_BASE 0x7FFE1C3000ull
+#define MME3_SBAB_ETF_MAX_OFFSET 0x1000
+#define MME3_SBAB_ETF_SECTION 0x1000
+#define mmMME3_SBAB_SPMU_BASE 0x7FFE1C4000ull
+#define MME3_SBAB_SPMU_MAX_OFFSET 0x1000
+#define MME3_SBAB_SPMU_SECTION 0x1000
+#define mmMME3_SBAB_CTI0_BASE 0x7FFE1C5000ull
+#define MME3_SBAB_CTI0_MAX_OFFSET 0x1000
+#define MME3_SBAB_CTI0_SECTION 0x1000
+#define mmMME3_SBAB_CTI1_BASE 0x7FFE1C6000ull
+#define MME3_SBAB_CTI1_MAX_OFFSET 0x1000
+#define MME3_SBAB_CTI1_SECTION 0x1000
+#define mmMME3_SBAB_BMON0_BASE 0x7FFE1C7000ull
+#define MME3_SBAB_BMON0_MAX_OFFSET 0x1000
+#define MME3_SBAB_BMON0_SECTION 0x1000
+#define mmMME3_SBAB_BMON1_BASE 0x7FFE1C8000ull
+#define MME3_SBAB_BMON1_MAX_OFFSET 0x1000
+#define MME3_SBAB_BMON1_SECTION 0x19000
+#define mmMME3_CTRL_STM_BASE 0x7FFE1E1000ull
+#define MME3_CTRL_STM_MAX_OFFSET 0x1000
+#define MME3_CTRL_STM_SECTION 0x1000
+#define mmMME3_CTRL_CTI_BASE 0x7FFE1E2000ull
+#define MME3_CTRL_CTI_MAX_OFFSET 0x1000
+#define MME3_CTRL_CTI_SECTION 0x1000
+#define mmMME3_CTRL_ETF_BASE 0x7FFE1E3000ull
+#define MME3_CTRL_ETF_MAX_OFFSET 0x1000
+#define MME3_CTRL_ETF_SECTION 0x1000
+#define mmMME3_CTRL_SPMU_BASE 0x7FFE1E4000ull
+#define MME3_CTRL_SPMU_MAX_OFFSET 0x1000
+#define MME3_CTRL_SPMU_SECTION 0x1000
+#define mmMME3_CTRL_CTI0_BASE 0x7FFE1E5000ull
+#define MME3_CTRL_CTI0_MAX_OFFSET 0x1000
+#define MME3_CTRL_CTI0_SECTION 0x1000
+#define mmMME3_CTRL_CTI1_BASE 0x7FFE1E6000ull
+#define MME3_CTRL_CTI1_MAX_OFFSET 0x1000
+#define MME3_CTRL_CTI1_SECTION 0x1000
+#define mmMME3_CTRL_BMON0_BASE 0x7FFE1E7000ull
+#define MME3_CTRL_BMON0_MAX_OFFSET 0x1000
+#define MME3_CTRL_BMON0_SECTION 0x1000
+#define mmMME3_CTRL_BMON1_BASE 0x7FFE1E8000ull
+#define MME3_CTRL_BMON1_MAX_OFFSET 0x1000
+#define MME3_CTRL_BMON1_SECTION 0x18000
+#define mmIC_ROM_TABLE_BASE 0x7FFE200000ull
+#define IC_ROM_TABLE_MAX_OFFSET 0x1000
+#define IC_ROM_TABLE_SECTION 0x1000
+#define mmSRAM_Y0_X0_FUNNEL_BASE 0x7FFE201000ull
+#define SRAM_Y0_X0_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X0_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X1_FUNNEL_BASE 0x7FFE209000ull
+#define SRAM_Y0_X1_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X1_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X2_FUNNEL_BASE 0x7FFE211000ull
+#define SRAM_Y0_X2_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X2_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X3_FUNNEL_BASE 0x7FFE219000ull
+#define SRAM_Y0_X3_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X3_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X4_FUNNEL_BASE 0x7FFE221000ull
+#define SRAM_Y0_X4_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X4_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X5_FUNNEL_BASE 0x7FFE229000ull
+#define SRAM_Y0_X5_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X5_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X6_FUNNEL_BASE 0x7FFE231000ull
+#define SRAM_Y0_X6_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X6_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X7_FUNNEL_BASE 0x7FFE239000ull
+#define SRAM_Y0_X7_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X7_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X0_FUNNEL_BASE 0x7FFE241000ull
+#define SRAM_Y1_X0_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X0_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X1_FUNNEL_BASE 0x7FFE249000ull
+#define SRAM_Y1_X1_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X1_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X2_FUNNEL_BASE 0x7FFE251000ull
+#define SRAM_Y1_X2_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X2_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X3_FUNNEL_BASE 0x7FFE259000ull
+#define SRAM_Y1_X3_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X3_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X4_FUNNEL_BASE 0x7FFE261000ull
+#define SRAM_Y1_X4_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X4_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X5_FUNNEL_BASE 0x7FFE269000ull
+#define SRAM_Y1_X5_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X5_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X6_FUNNEL_BASE 0x7FFE271000ull
+#define SRAM_Y1_X6_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X6_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X7_FUNNEL_BASE 0x7FFE279000ull
+#define SRAM_Y1_X7_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X7_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X0_FUNNEL_BASE 0x7FFE281000ull
+#define SRAM_Y2_X0_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X0_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X1_FUNNEL_BASE 0x7FFE289000ull
+#define SRAM_Y2_X1_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X1_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X2_FUNNEL_BASE 0x7FFE291000ull
+#define SRAM_Y2_X2_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X2_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X3_FUNNEL_BASE 0x7FFE299000ull
+#define SRAM_Y2_X3_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X3_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X4_FUNNEL_BASE 0x7FFE2A1000ull
+#define SRAM_Y2_X4_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X4_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X5_FUNNEL_BASE 0x7FFE2A9000ull
+#define SRAM_Y2_X5_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X5_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X6_FUNNEL_BASE 0x7FFE2B1000ull
+#define SRAM_Y2_X6_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X6_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X7_FUNNEL_BASE 0x7FFE2B9000ull
+#define SRAM_Y2_X7_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X7_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X0_FUNNEL_BASE 0x7FFE2C1000ull
+#define SRAM_Y3_X0_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X0_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X1_FUNNEL_BASE 0x7FFE2C9000ull
+#define SRAM_Y3_X1_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X1_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X2_FUNNEL_BASE 0x7FFE2D1000ull
+#define SRAM_Y3_X2_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X2_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X4_FUNNEL_BASE 0x7FFE2D9000ull
+#define SRAM_Y3_X4_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X4_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X3_FUNNEL_BASE 0x7FFE2E1000ull
+#define SRAM_Y3_X3_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X3_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X5_FUNNEL_BASE 0x7FFE2E9000ull
+#define SRAM_Y3_X5_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X5_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X6_FUNNEL_BASE 0x7FFE2F1000ull
+#define SRAM_Y3_X6_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X6_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X7_FUNNEL_BASE 0x7FFE2F9000ull
+#define SRAM_Y3_X7_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X7_FUNNEL_SECTION 0x7000
+#define mmIF_ROM_TABLE_BASE 0x7FFE300000ull
+#define IF_ROM_TABLE_MAX_OFFSET 0x1000
+#define IF_ROM_TABLE_SECTION 0x1000
+#define mmSIF_FUNNEL_0_BASE 0x7FFE301000ull
+#define SIF_FUNNEL_0_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_0_SECTION 0x10000
+#define mmSIF_FUNNEL_1_BASE 0x7FFE311000ull
+#define SIF_FUNNEL_1_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_1_SECTION 0x10000
+#define mmSIF_FUNNEL_2_BASE 0x7FFE321000ull
+#define SIF_FUNNEL_2_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_2_SECTION 0x10000
+#define mmSIF_FUNNEL_3_BASE 0x7FFE331000ull
+#define SIF_FUNNEL_3_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_3_SECTION 0x10000
+#define mmSIF_FUNNEL_4_BASE 0x7FFE341000ull
+#define SIF_FUNNEL_4_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_4_SECTION 0x10000
+#define mmSIF_FUNNEL_5_BASE 0x7FFE351000ull
+#define SIF_FUNNEL_5_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_5_SECTION 0x10000
+#define mmSIF_FUNNEL_6_BASE 0x7FFE361000ull
+#define SIF_FUNNEL_6_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_6_SECTION 0x10000
+#define mmSIF_FUNNEL_7_BASE 0x7FFE371000ull
+#define SIF_FUNNEL_7_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_7_SECTION 0x10000
+#define mmNIF_FUNNEL_0_BASE 0x7FFE381000ull
+#define NIF_FUNNEL_0_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_0_SECTION 0x10000
+#define mmNIF_FUNNEL_1_BASE 0x7FFE391000ull
+#define NIF_FUNNEL_1_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_1_SECTION 0x10000
+#define mmNIF_FUNNEL_2_BASE 0x7FFE3A1000ull
+#define NIF_FUNNEL_2_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_2_SECTION 0x10000
+#define mmNIF_FUNNEL_3_BASE 0x7FFE3B1000ull
+#define NIF_FUNNEL_3_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_3_SECTION 0x10000
+#define mmNIF_FUNNEL_4_BASE 0x7FFE3C1000ull
+#define NIF_FUNNEL_4_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_4_SECTION 0x10000
+#define mmNIF_FUNNEL_5_BASE 0x7FFE3D1000ull
+#define NIF_FUNNEL_5_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_5_SECTION 0x10000
+#define mmNIF_FUNNEL_6_BASE 0x7FFE3E1000ull
+#define NIF_FUNNEL_6_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_6_SECTION 0x10000
+#define mmNIF_FUNNEL_7_BASE 0x7FFE3F1000ull
+#define NIF_FUNNEL_7_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_7_SECTION 0xF000
+#define mmDMA_IF_ROM_TABLE_BASE 0x7FFE400000ull
+#define DMA_IF_ROM_TABLE_MAX_OFFSET 0x1000
+#define DMA_IF_ROM_TABLE_SECTION 0x1000
+#define mmDMA_IF_W_S_STM_BASE 0x7FFE401000ull
+#define DMA_IF_W_S_STM_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_STM_SECTION 0x1000
+#define mmDMA_IF_W_S_CTI_BASE 0x7FFE402000ull
+#define DMA_IF_W_S_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_CTI_SECTION 0x1000
+#define mmDMA_IF_W_S_ETF_BASE 0x7FFE403000ull
+#define DMA_IF_W_S_ETF_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_ETF_SECTION 0x2000
+#define mmDMA_IF_W_S_BMON0_CTI_BASE 0x7FFE405000ull
+#define DMA_IF_W_S_BMON0_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_BMON0_CTI_SECTION 0x1000
+#define mmDMA_IF_W_S_BMON1_CTI_BASE 0x7FFE406000ull
+#define DMA_IF_W_S_BMON1_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_BMON1_CTI_SECTION 0x1000
+#define mmDMA_IF_W_S_HBM0_WR_BMON_BASE 0x7FFE407000ull
+#define DMA_IF_W_S_HBM0_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_HBM0_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_W_S_HBM0_RD_BMON_BASE 0x7FFE408000ull
+#define DMA_IF_W_S_HBM0_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_HBM0_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_W_S_HBM1_WR_BMON_BASE 0x7FFE409000ull
+#define DMA_IF_W_S_HBM1_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_HBM1_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_W_S_HBM1_RD_BMON_BASE 0x7FFE40A000ull
+#define DMA_IF_W_S_HBM1_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_HBM1_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_W_S_SOB_WR_BMON_BASE 0x7FFE40B000ull
+#define DMA_IF_W_S_SOB_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_SOB_WR_BMON_SECTION 0x4000
+#define mmDMA_IF_W_S_FUNNEL_BASE 0x7FFE40F000ull
+#define DMA_IF_W_S_FUNNEL_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_FUNNEL_SECTION 0x12000
+#define mmDMA_IF_E_S_STM_BASE 0x7FFE421000ull
+#define DMA_IF_E_S_STM_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_STM_SECTION 0x1000
+#define mmDMA_IF_E_S_CTI_BASE 0x7FFE422000ull
+#define DMA_IF_E_S_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_CTI_SECTION 0x1000
+#define mmDMA_IF_E_S_ETF_BASE 0x7FFE423000ull
+#define DMA_IF_E_S_ETF_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_ETF_SECTION 0x2000
+#define mmDMA_IF_E_S_BMON0_CTI_BASE 0x7FFE425000ull
+#define DMA_IF_E_S_BMON0_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_BMON0_CTI_SECTION 0x1000
+#define mmDMA_IF_E_S_BMON1_CTI_BASE 0x7FFE426000ull
+#define DMA_IF_E_S_BMON1_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_BMON1_CTI_SECTION 0x1000
+#define mmDMA_IF_E_S_HBM0_WR_BMON_BASE 0x7FFE427000ull
+#define DMA_IF_E_S_HBM0_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_HBM0_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_E_S_HBM0_RD_BMON_BASE 0x7FFE428000ull
+#define DMA_IF_E_S_HBM0_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_HBM0_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_E_S_HBM1_WR_BMON_BASE 0x7FFE429000ull
+#define DMA_IF_E_S_HBM1_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_HBM1_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_E_S_HBM1_RD_BMON_BASE 0x7FFE42A000ull
+#define DMA_IF_E_S_HBM1_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_HBM1_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_E_S_SOB_WR_BMON_BASE 0x7FFE42B000ull
+#define DMA_IF_E_S_SOB_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_SOB_WR_BMON_SECTION 0x4000
+#define mmDMA_IF_E_S_FUNNEL_BASE 0x7FFE42F000ull
+#define DMA_IF_E_S_FUNNEL_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_FUNNEL_SECTION 0x12000
+#define mmDMA_IF_W_N_STM_BASE 0x7FFE441000ull
+#define DMA_IF_W_N_STM_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_STM_SECTION 0x1000
+#define mmDMA_IF_W_N_CTI_BASE 0x7FFE442000ull
+#define DMA_IF_W_N_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_CTI_SECTION 0x1000
+#define mmDMA_IF_W_N_ETF_BASE 0x7FFE443000ull
+#define DMA_IF_W_N_ETF_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_ETF_SECTION 0x2000
+#define mmDMA_IF_W_N_BMON0_CTI_BASE 0x7FFE445000ull
+#define DMA_IF_W_N_BMON0_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_BMON0_CTI_SECTION 0x1000
+#define mmDMA_IF_W_N_BMON1_CTI_BASE 0x7FFE446000ull
+#define DMA_IF_W_N_BMON1_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_BMON1_CTI_SECTION 0x1000
+#define mmDMA_IF_W_N_HBM0_WR_BMON_BASE 0x7FFE447000ull
+#define DMA_IF_W_N_HBM0_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_HBM0_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_W_N_HBM0_RD_BMON_BASE 0x7FFE448000ull
+#define DMA_IF_W_N_HBM0_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_HBM0_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_W_N_HBM1_WR_BMON_BASE 0x7FFE449000ull
+#define DMA_IF_W_N_HBM1_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_HBM1_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_W_N_HBM1_RD_BMON_BASE 0x7FFE44A000ull
+#define DMA_IF_W_N_HBM1_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_HBM1_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_W_N_SOB_WR_BMON_BASE 0x7FFE44B000ull
+#define DMA_IF_W_N_SOB_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_SOB_WR_BMON_SECTION 0x4000
+#define mmDMA_IF_W_N_FUNNEL_BASE 0x7FFE44F000ull
+#define DMA_IF_W_N_FUNNEL_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_FUNNEL_SECTION 0x12000
+#define mmDMA_IF_E_N_STM_BASE 0x7FFE461000ull
+#define DMA_IF_E_N_STM_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_STM_SECTION 0x1000
+#define mmDMA_IF_E_N_CTI_BASE 0x7FFE462000ull
+#define DMA_IF_E_N_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_CTI_SECTION 0x1000
+#define mmDMA_IF_E_N_ETF_BASE 0x7FFE463000ull
+#define DMA_IF_E_N_ETF_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_ETF_SECTION 0x2000
+#define mmDMA_IF_E_N_BMON0_CTI_BASE 0x7FFE465000ull
+#define DMA_IF_E_N_BMON0_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_BMON0_CTI_SECTION 0x1000
+#define mmDMA_IF_E_N_BMON1_CTI_BASE 0x7FFE466000ull
+#define DMA_IF_E_N_BMON1_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_BMON1_CTI_SECTION 0x1000
+#define mmDMA_IF_E_N_HBM0_WR_BMON_BASE 0x7FFE467000ull
+#define DMA_IF_E_N_HBM0_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_HBM0_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_E_N_HBM0_RD_BMON_BASE 0x7FFE468000ull
+#define DMA_IF_E_N_HBM0_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_HBM0_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_E_N_HBM1_WR_BMON_BASE 0x7FFE469000ull
+#define DMA_IF_E_N_HBM1_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_HBM1_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_E_N_HBM1_RD_BMON_BASE 0x7FFE46A000ull
+#define DMA_IF_E_N_HBM1_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_HBM1_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_E_N_SOB_WR_BMON_BASE 0x7FFE46B000ull
+#define DMA_IF_E_N_SOB_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_SOB_WR_BMON_SECTION 0x4000
+#define mmDMA_IF_E_N_FUNNEL_BASE 0x7FFE46F000ull
+#define DMA_IF_E_N_FUNNEL_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_FUNNEL_SECTION 0x11000
+#define mmCPU_ROM_TABLE_BASE 0x7FFE480000ull
+#define CPU_ROM_TABLE_MAX_OFFSET 0x1000
+#define CPU_ROM_TABLE_SECTION 0x1000
+#define mmCPU_ETF_0_BASE 0x7FFE481000ull
+#define CPU_ETF_0_MAX_OFFSET 0x1000
+#define CPU_ETF_0_SECTION 0x1000
+#define mmCPU_ETF_1_BASE 0x7FFE482000ull
+#define CPU_ETF_1_MAX_OFFSET 0x1000
+#define CPU_ETF_1_SECTION 0x2000
+#define mmCPU_CTI_BASE 0x7FFE484000ull
+#define CPU_CTI_MAX_OFFSET 0x1000
+#define CPU_CTI_SECTION 0x1000
+#define mmCPU_FUNNEL_BASE 0x7FFE485000ull
+#define CPU_FUNNEL_MAX_OFFSET 0x1000
+#define CPU_FUNNEL_SECTION 0x1000
+#define mmCPU_STM_BASE 0x7FFE486000ull
+#define CPU_STM_MAX_OFFSET 0x1000
+#define CPU_STM_SECTION 0x1000
+#define mmCPU_CTI_TRACE_BASE 0x7FFE487000ull
+#define CPU_CTI_TRACE_MAX_OFFSET 0x1000
+#define CPU_CTI_TRACE_SECTION 0x1000
+#define mmCPU_ETF_TRACE_BASE 0x7FFE488000ull
+#define CPU_ETF_TRACE_MAX_OFFSET 0x1000
+#define CPU_ETF_TRACE_SECTION 0x1000
+#define mmCPU_WR_BMON_BASE 0x7FFE489000ull
+#define CPU_WR_BMON_MAX_OFFSET 0x1000
+#define CPU_WR_BMON_SECTION 0x1000
+#define mmCPU_RD_BMON_BASE 0x7FFE48A000ull
+#define CPU_RD_BMON_MAX_OFFSET 0x1000
+#define CPU_RD_BMON_SECTION 0x76000
+#define mmDMA_ROM_TABLE_BASE 0x7FFE500000ull
+#define DMA_ROM_TABLE_MAX_OFFSET 0x1000
+#define DMA_ROM_TABLE_SECTION 0x1000
+#define mmDMA_CH_0_CS_STM_BASE 0x7FFE501000ull
+#define DMA_CH_0_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_STM_SECTION 0x1000
+#define mmDMA_CH_0_CS_CTI_BASE 0x7FFE502000ull
+#define DMA_CH_0_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_0_CS_ETF_BASE 0x7FFE503000ull
+#define DMA_CH_0_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_0_CS_SPMU_BASE 0x7FFE504000ull
+#define DMA_CH_0_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_0_BMON_CTI_BASE 0x7FFE505000ull
+#define DMA_CH_0_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_0_USER_CTI_BASE 0x7FFE506000ull
+#define DMA_CH_0_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_0_BMON_0_BASE 0x7FFE507000ull
+#define DMA_CH_0_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_0_SECTION 0x1000
+#define mmDMA_CH_0_BMON_1_BASE 0x7FFE508000ull
+#define DMA_CH_0_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_1_SECTION 0x19000
+#define mmDMA_CH_1_CS_STM_BASE 0x7FFE521000ull
+#define DMA_CH_1_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_STM_SECTION 0x1000
+#define mmDMA_CH_1_CS_CTI_BASE 0x7FFE522000ull
+#define DMA_CH_1_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_1_CS_ETF_BASE 0x7FFE523000ull
+#define DMA_CH_1_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_1_CS_SPMU_BASE 0x7FFE524000ull
+#define DMA_CH_1_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_1_BMON_CTI_BASE 0x7FFE525000ull
+#define DMA_CH_1_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_1_USER_CTI_BASE 0x7FFE526000ull
+#define DMA_CH_1_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_1_BMON_0_BASE 0x7FFE527000ull
+#define DMA_CH_1_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_0_SECTION 0x1000
+#define mmDMA_CH_1_BMON_1_BASE 0x7FFE528000ull
+#define DMA_CH_1_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_1_SECTION 0x19000
+#define mmDMA_CH_2_CS_STM_BASE 0x7FFE541000ull
+#define DMA_CH_2_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_STM_SECTION 0x1000
+#define mmDMA_CH_2_CS_CTI_BASE 0x7FFE542000ull
+#define DMA_CH_2_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_2_CS_ETF_BASE 0x7FFE543000ull
+#define DMA_CH_2_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_2_CS_SPMU_BASE 0x7FFE544000ull
+#define DMA_CH_2_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_2_BMON_CTI_BASE 0x7FFE545000ull
+#define DMA_CH_2_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_2_USER_CTI_BASE 0x7FFE546000ull
+#define DMA_CH_2_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_2_BMON_0_BASE 0x7FFE547000ull
+#define DMA_CH_2_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_0_SECTION 0x1000
+#define mmDMA_CH_2_BMON_1_BASE 0x7FFE548000ull
+#define DMA_CH_2_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_1_SECTION 0x19000
+#define mmDMA_CH_3_CS_STM_BASE 0x7FFE561000ull
+#define DMA_CH_3_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_STM_SECTION 0x1000
+#define mmDMA_CH_3_CS_CTI_BASE 0x7FFE562000ull
+#define DMA_CH_3_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_3_CS_ETF_BASE 0x7FFE563000ull
+#define DMA_CH_3_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_3_CS_SPMU_BASE 0x7FFE564000ull
+#define DMA_CH_3_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_3_BMON_CTI_BASE 0x7FFE565000ull
+#define DMA_CH_3_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_3_USER_CTI_BASE 0x7FFE566000ull
+#define DMA_CH_3_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_3_BMON_0_BASE 0x7FFE567000ull
+#define DMA_CH_3_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_0_SECTION 0x1000
+#define mmDMA_CH_3_BMON_1_BASE 0x7FFE568000ull
+#define DMA_CH_3_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_1_SECTION 0x19000
+#define mmDMA_CH_4_CS_STM_BASE 0x7FFE581000ull
+#define DMA_CH_4_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_STM_SECTION 0x1000
+#define mmDMA_CH_4_CS_CTI_BASE 0x7FFE582000ull
+#define DMA_CH_4_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_4_CS_ETF_BASE 0x7FFE583000ull
+#define DMA_CH_4_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_4_CS_SPMU_BASE 0x7FFE584000ull
+#define DMA_CH_4_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_4_BMON_CTI_BASE 0x7FFE585000ull
+#define DMA_CH_4_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_4_USER_CTI_BASE 0x7FFE586000ull
+#define DMA_CH_4_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_4_BMON_0_BASE 0x7FFE587000ull
+#define DMA_CH_4_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_0_SECTION 0x1000
+#define mmDMA_CH_4_BMON_1_BASE 0x7FFE588000ull
+#define DMA_CH_4_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_1_SECTION 0x19000
+#define mmDMA_CH_5_CS_STM_BASE 0x7FFE5A1000ull
+#define DMA_CH_5_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_5_CS_STM_SECTION 0x1000
+#define mmDMA_CH_5_CS_CTI_BASE 0x7FFE5A2000ull
+#define DMA_CH_5_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_5_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_5_CS_ETF_BASE 0x7FFE5A3000ull
+#define DMA_CH_5_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_5_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_5_CS_SPMU_BASE 0x7FFE5A4000ull
+#define DMA_CH_5_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_5_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_5_BMON_CTI_BASE 0x7FFE5A5000ull
+#define DMA_CH_5_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_5_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_5_USER_CTI_BASE 0x7FFE5A6000ull
+#define DMA_CH_5_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_5_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_5_BMON_0_BASE 0x7FFE5A7000ull
+#define DMA_CH_5_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_5_BMON_0_SECTION 0x1000
+#define mmDMA_CH_5_BMON_1_BASE 0x7FFE5A8000ull
+#define DMA_CH_5_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_5_BMON_1_SECTION 0x19000
+#define mmDMA_CH_6_CS_STM_BASE 0x7FFE5C1000ull
+#define DMA_CH_6_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_6_CS_STM_SECTION 0x1000
+#define mmDMA_CH_6_CS_CTI_BASE 0x7FFE5C2000ull
+#define DMA_CH_6_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_6_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_6_CS_ETF_BASE 0x7FFE5C3000ull
+#define DMA_CH_6_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_6_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_6_CS_SPMU_BASE 0x7FFE5C4000ull
+#define DMA_CH_6_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_6_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_6_BMON_CTI_BASE 0x7FFE5C5000ull
+#define DMA_CH_6_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_6_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_6_USER_CTI_BASE 0x7FFE5C6000ull
+#define DMA_CH_6_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_6_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_6_BMON_0_BASE 0x7FFE5C7000ull
+#define DMA_CH_6_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_6_BMON_0_SECTION 0x1000
+#define mmDMA_CH_6_BMON_1_BASE 0x7FFE5C8000ull
+#define DMA_CH_6_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_6_BMON_1_SECTION 0x19000
+#define mmDMA_CH_7_CS_STM_BASE 0x7FFE5E1000ull
+#define DMA_CH_7_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_7_CS_STM_SECTION 0x1000
+#define mmDMA_CH_7_CS_CTI_BASE 0x7FFE5E2000ull
+#define DMA_CH_7_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_7_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_7_CS_ETF_BASE 0x7FFE5E3000ull
+#define DMA_CH_7_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_7_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_7_CS_SPMU_BASE 0x7FFE5E4000ull
+#define DMA_CH_7_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_7_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_7_BMON_CTI_BASE 0x7FFE5E5000ull
+#define DMA_CH_7_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_7_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_7_USER_CTI_BASE 0x7FFE5E6000ull
+#define DMA_CH_7_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_7_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_7_BMON_0_BASE 0x7FFE5E7000ull
+#define DMA_CH_7_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_7_BMON_0_SECTION 0x1000
+#define mmDMA_CH_7_BMON_1_BASE 0x7FFE5E8000ull
+#define DMA_CH_7_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_7_BMON_1_SECTION 0x18000
+#define mmNIC_TPC_FUNNEL_W_S_BASE 0x7FFE600000ull
+#define NIC_TPC_FUNNEL_W_S_MAX_OFFSET 0x1000
+#define NIC_TPC_FUNNEL_W_S_SECTION 0x80000
+#define mmNIC_TPC_FUNNEL_E_S_BASE 0x7FFE680000ull
+#define NIC_TPC_FUNNEL_E_S_MAX_OFFSET 0x1000
+#define NIC_TPC_FUNNEL_E_S_SECTION 0x80000
+#define mmNIC_TPC_FUNNEL_W_N_BASE 0x7FFE700000ull
+#define NIC_TPC_FUNNEL_W_N_MAX_OFFSET 0x1000
+#define NIC_TPC_FUNNEL_W_N_SECTION 0x80000
+#define mmNIC_TPC_FUNNEL_E_N_BASE 0x7FFE780000ull
+#define NIC_TPC_FUNNEL_E_N_MAX_OFFSET 0x1000
+#define NIC_TPC_FUNNEL_E_N_SECTION 0x80000
+#define mmCA53_BASE 0x7FFE800000ull
+#define CA53_MAX_OFFSET 0x141000
+#define CA53_SECTION 0x400000
+#define mmPCI_ROM_TABLE_BASE 0x7FFEC00000ull
+#define PCI_ROM_TABLE_MAX_OFFSET 0x1000
+#define PCI_ROM_TABLE_SECTION 0x1000
+#define mmPCIE_STM_BASE 0x7FFEC01000ull
+#define PCIE_STM_MAX_OFFSET 0x1000
+#define PCIE_STM_SECTION 0x1000
+#define mmPCIE_ETF_BASE 0x7FFEC02000ull
+#define PCIE_ETF_MAX_OFFSET 0x1000
+#define PCIE_ETF_SECTION 0x1000
+#define mmPCIE_CTI_0_BASE 0x7FFEC03000ull
+#define PCIE_CTI_0_MAX_OFFSET 0x1000
+#define PCIE_CTI_0_SECTION 0x1000
+#define mmPCIE_SPMU_BASE 0x7FFEC04000ull
+#define PCIE_SPMU_MAX_OFFSET 0x1000
+#define PCIE_SPMU_SECTION 0x1000
+#define mmPCIE_CTI_1_BASE 0x7FFEC05000ull
+#define PCIE_CTI_1_MAX_OFFSET 0x1000
+#define PCIE_CTI_1_SECTION 0x1000
+#define mmPCIE_FUNNEL_BASE 0x7FFEC06000ull
+#define PCIE_FUNNEL_MAX_OFFSET 0x1000
+#define PCIE_FUNNEL_SECTION 0x1000
+#define mmPCIE_BMON_MSTR_WR_BASE 0x7FFEC07000ull
+#define PCIE_BMON_MSTR_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_WR_SECTION 0x1000
+#define mmPCIE_BMON_MSTR_RD_BASE 0x7FFEC08000ull
+#define PCIE_BMON_MSTR_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_RD_SECTION 0x1000
+#define mmPCIE_BMON_SLV_WR_BASE 0x7FFEC09000ull
+#define PCIE_BMON_SLV_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_WR_SECTION 0x1000
+#define mmPCIE_BMON_SLV_RD_BASE 0x7FFEC0A000ull
+#define PCIE_BMON_SLV_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_RD_SECTION 0x7000
+#define mmMMU_CS_STM_BASE 0x7FFEC11000ull
+#define MMU_CS_STM_MAX_OFFSET 0x1000
+#define MMU_CS_STM_SECTION 0x1000
+#define mmMMU_CS_CTI_BASE 0x7FFEC12000ull
+#define MMU_CS_CTI_MAX_OFFSET 0x1000
+#define MMU_CS_CTI_SECTION 0x1000
+#define mmMMU_CS_ETF_BASE 0x7FFEC13000ull
+#define MMU_CS_ETF_MAX_OFFSET 0x1000
+#define MMU_CS_ETF_SECTION 0x1000
+#define mmMMU_CS_SPMU_BASE 0x7FFEC14000ull
+#define MMU_CS_SPMU_MAX_OFFSET 0x1000
+#define MMU_CS_SPMU_SECTION 0x1000
+#define mmMMU_BMON_CTI_BASE 0x7FFEC15000ull
+#define MMU_BMON_CTI_MAX_OFFSET 0x1000
+#define MMU_BMON_CTI_SECTION 0x1000
+#define mmMMU_USER_CTI_BASE 0x7FFEC16000ull
+#define MMU_USER_CTI_MAX_OFFSET 0x1000
+#define MMU_USER_CTI_SECTION 0x1000
+#define mmMMU_BMON_0_BASE 0x7FFEC17000ull
+#define MMU_BMON_0_MAX_OFFSET 0x1000
+#define MMU_BMON_0_SECTION 0x1000
+#define mmMMU_BMON_1_BASE 0x7FFEC18000ull
+#define MMU_BMON_1_MAX_OFFSET 0x1000
+#define MMU_BMON_1_SECTION 0x28000
+#define mmPSOC_CTI_BASE 0x7FFEC40000ull
+#define PSOC_CTI_MAX_OFFSET 0x1000
+#define PSOC_CTI_SECTION 0x1000
+#define mmPSOC_STM_BASE 0x7FFEC41000ull
+#define PSOC_STM_MAX_OFFSET 0x1000
+#define PSOC_STM_SECTION 0x1000
+#define mmPSOC_FUNNEL_BASE 0x7FFEC42000ull
+#define PSOC_FUNNEL_MAX_OFFSET 0x1000
+#define PSOC_FUNNEL_SECTION 0x1000
+#define mmPSOC_ETR_BASE 0x7FFEC43000ull
+#define PSOC_ETR_MAX_OFFSET 0x1000
+#define PSOC_ETR_SECTION 0x1000
+#define mmPSOC_ETF_BASE 0x7FFEC44000ull
+#define PSOC_ETF_MAX_OFFSET 0x1000
+#define PSOC_ETF_SECTION 0x1000
+#define mmPSOC_TS_CTI_BASE 0x7FFEC45000ull
+#define PSOC_TS_CTI_MAX_OFFSET 0x1000
+#define PSOC_TS_CTI_SECTION 0xB000
+#define mmTOP_ROM_TABLE_BASE 0x7FFEC50000ull
+#define TOP_ROM_TABLE_MAX_OFFSET 0x1000
+#define TOP_ROM_TABLE_SECTION 0x70000
+#define mmNIC0_ROM_TABLE_BASE 0x7FFECC0000ull
+#define NIC0_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC0_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC0_DBG_BASE 0x7FFECC1000ull
+#define STM_0_NIC0_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC0_DBG_SECTION 0x1000
+#define mmCTI_0_NIC0_DBG_BASE 0x7FFECC2000ull
+#define CTI_0_NIC0_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC0_DBG_SECTION 0x1000
+#define mmETF_0_NIC0_DBG_BASE 0x7FFECC3000ull
+#define ETF_0_NIC0_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC0_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC0_DBG_BASE 0x7FFECC4000ull
+#define SPMU_0_NIC0_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC0_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC0_DBG_BASE 0x7FFECC6000ull
+#define USER_CTI_0_NIC0_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC0_DBG_SECTION 0xB000
+#define mmSTM_1_NIC0_DBG_BASE 0x7FFECD1000ull
+#define STM_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC0_DBG_SECTION 0x1000
+#define mmCTI_1_NIC0_DBG_BASE 0x7FFECD2000ull
+#define CTI_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC0_DBG_SECTION 0x1000
+#define mmETF_1_NIC0_DBG_BASE 0x7FFECD3000ull
+#define ETF_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC0_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC0_DBG_BASE 0x7FFECD4000ull
+#define SPMU_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC0_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC0_DBG_BASE 0x7FFECD5000ull
+#define BMON_CTI_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC0_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC0_DBG_BASE 0x7FFECD6000ull
+#define USER_CTI_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC0_DBG_SECTION 0x1000
+#define mmBMON0_NIC0_DBG_BASE 0x7FFECD7000ull
+#define BMON0_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC0_DBG_SECTION 0x1000
+#define mmBMON1_NIC0_DBG_BASE 0x7FFECD8000ull
+#define BMON1_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC0_DBG_SECTION 0x1000
+#define mmBMON2_NIC0_DBG_BASE 0x7FFECD9000ull
+#define BMON2_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC0_DBG_SECTION 0x1000
+#define mmBMON3_NIC0_DBG_BASE 0x7FFECDA000ull
+#define BMON3_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC0_DBG_SECTION 0x1000
+#define mmBMON4_NIC0_DBG_BASE 0x7FFECDB000ull
+#define BMON4_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC0_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC0_DBG_BASE 0x7FFECE1000ull
+#define FUNNEL_NIC0_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC0_DBG_SECTION 0x1F000
+#define mmNIC1_ROM_TABLE_BASE 0x7FFED00000ull
+#define NIC1_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC1_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC1_DBG_BASE 0x7FFED01000ull
+#define STM_0_NIC1_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC1_DBG_SECTION 0x1000
+#define mmCTI_0_NIC1_DBG_BASE 0x7FFED02000ull
+#define CTI_0_NIC1_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC1_DBG_SECTION 0x1000
+#define mmETF_0_NIC1_DBG_BASE 0x7FFED03000ull
+#define ETF_0_NIC1_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC1_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC1_DBG_BASE 0x7FFED04000ull
+#define SPMU_0_NIC1_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC1_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC1_DBG_BASE 0x7FFED06000ull
+#define USER_CTI_0_NIC1_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC1_DBG_SECTION 0xB000
+#define mmSTM_1_NIC1_DBG_BASE 0x7FFED11000ull
+#define STM_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC1_DBG_SECTION 0x1000
+#define mmCTI_1_NIC1_DBG_BASE 0x7FFED12000ull
+#define CTI_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC1_DBG_SECTION 0x1000
+#define mmETF_1_NIC1_DBG_BASE 0x7FFED13000ull
+#define ETF_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC1_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC1_DBG_BASE 0x7FFED14000ull
+#define SPMU_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC1_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC1_DBG_BASE 0x7FFED15000ull
+#define BMON_CTI_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC1_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC1_DBG_BASE 0x7FFED16000ull
+#define USER_CTI_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC1_DBG_SECTION 0x1000
+#define mmBMON0_NIC1_DBG_BASE 0x7FFED17000ull
+#define BMON0_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC1_DBG_SECTION 0x1000
+#define mmBMON1_NIC1_DBG_BASE 0x7FFED18000ull
+#define BMON1_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC1_DBG_SECTION 0x1000
+#define mmBMON2_NIC1_DBG_BASE 0x7FFED19000ull
+#define BMON2_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC1_DBG_SECTION 0x1000
+#define mmBMON3_NIC1_DBG_BASE 0x7FFED1A000ull
+#define BMON3_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC1_DBG_SECTION 0x1000
+#define mmBMON4_NIC1_DBG_BASE 0x7FFED1B000ull
+#define BMON4_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC1_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC1_DBG_BASE 0x7FFED21000ull
+#define FUNNEL_NIC1_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC1_DBG_SECTION 0x1F000
+#define mmNIC2_ROM_TABLE_BASE 0x7FFED40000ull
+#define NIC2_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC2_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC2_DBG_BASE 0x7FFED41000ull
+#define STM_0_NIC2_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC2_DBG_SECTION 0x1000
+#define mmCTI_0_NIC2_DBG_BASE 0x7FFED42000ull
+#define CTI_0_NIC2_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC2_DBG_SECTION 0x1000
+#define mmETF_0_NIC2_DBG_BASE 0x7FFED43000ull
+#define ETF_0_NIC2_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC2_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC2_DBG_BASE 0x7FFED44000ull
+#define SPMU_0_NIC2_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC2_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC2_DBG_BASE 0x7FFED46000ull
+#define USER_CTI_0_NIC2_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC2_DBG_SECTION 0xB000
+#define mmSTM_1_NIC2_DBG_BASE 0x7FFED51000ull
+#define STM_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC2_DBG_SECTION 0x1000
+#define mmCTI_1_NIC2_DBG_BASE 0x7FFED52000ull
+#define CTI_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC2_DBG_SECTION 0x1000
+#define mmETF_1_NIC2_DBG_BASE 0x7FFED53000ull
+#define ETF_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC2_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC2_DBG_BASE 0x7FFED54000ull
+#define SPMU_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC2_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC2_DBG_BASE 0x7FFED55000ull
+#define BMON_CTI_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC2_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC2_DBG_BASE 0x7FFED56000ull
+#define USER_CTI_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC2_DBG_SECTION 0x1000
+#define mmBMON0_NIC2_DBG_BASE 0x7FFED57000ull
+#define BMON0_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC2_DBG_SECTION 0x1000
+#define mmBMON1_NIC2_DBG_BASE 0x7FFED58000ull
+#define BMON1_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC2_DBG_SECTION 0x1000
+#define mmBMON2_NIC2_DBG_BASE 0x7FFED59000ull
+#define BMON2_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC2_DBG_SECTION 0x1000
+#define mmBMON3_NIC2_DBG_BASE 0x7FFED5A000ull
+#define BMON3_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC2_DBG_SECTION 0x1000
+#define mmBMON4_NIC2_DBG_BASE 0x7FFED5B000ull
+#define BMON4_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC2_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC2_DBG_BASE 0x7FFED61000ull
+#define FUNNEL_NIC2_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC2_DBG_SECTION 0x1F000
+#define mmNIC3_ROM_TABLE_BASE 0x7FFED80000ull
+#define NIC3_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC3_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC3_DBG_BASE 0x7FFED81000ull
+#define STM_0_NIC3_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC3_DBG_SECTION 0x1000
+#define mmCTI_0_NIC3_DBG_BASE 0x7FFED82000ull
+#define CTI_0_NIC3_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC3_DBG_SECTION 0x1000
+#define mmETF_0_NIC3_DBG_BASE 0x7FFED83000ull
+#define ETF_0_NIC3_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC3_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC3_DBG_BASE 0x7FFED84000ull
+#define SPMU_0_NIC3_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC3_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC3_DBG_BASE 0x7FFED86000ull
+#define USER_CTI_0_NIC3_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC3_DBG_SECTION 0xB000
+#define mmSTM_1_NIC3_DBG_BASE 0x7FFED91000ull
+#define STM_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC3_DBG_SECTION 0x1000
+#define mmCTI_1_NIC3_DBG_BASE 0x7FFED92000ull
+#define CTI_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC3_DBG_SECTION 0x1000
+#define mmETF_1_NIC3_DBG_BASE 0x7FFED93000ull
+#define ETF_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC3_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC3_DBG_BASE 0x7FFED94000ull
+#define SPMU_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC3_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC3_DBG_BASE 0x7FFED95000ull
+#define BMON_CTI_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC3_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC3_DBG_BASE 0x7FFED96000ull
+#define USER_CTI_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC3_DBG_SECTION 0x1000
+#define mmBMON0_NIC3_DBG_BASE 0x7FFED97000ull
+#define BMON0_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC3_DBG_SECTION 0x1000
+#define mmBMON1_NIC3_DBG_BASE 0x7FFED98000ull
+#define BMON1_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC3_DBG_SECTION 0x1000
+#define mmBMON2_NIC3_DBG_BASE 0x7FFED99000ull
+#define BMON2_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC3_DBG_SECTION 0x1000
+#define mmBMON3_NIC3_DBG_BASE 0x7FFED9A000ull
+#define BMON3_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC3_DBG_SECTION 0x1000
+#define mmBMON4_NIC3_DBG_BASE 0x7FFED9B000ull
+#define BMON4_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC3_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC3_DBG_BASE 0x7FFEDA1000ull
+#define FUNNEL_NIC3_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC3_DBG_SECTION 0x1F000
+#define mmNIC4_ROM_TABLE_BASE 0x7FFEDC0000ull
+#define NIC4_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC4_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC4_DBG_BASE 0x7FFEDC1000ull
+#define STM_0_NIC4_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC4_DBG_SECTION 0x1000
+#define mmCTI_0_NIC4_DBG_BASE 0x7FFEDC2000ull
+#define CTI_0_NIC4_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC4_DBG_SECTION 0x1000
+#define mmETF_0_NIC4_DBG_BASE 0x7FFEDC3000ull
+#define ETF_0_NIC4_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC4_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC4_DBG_BASE 0x7FFEDC4000ull
+#define SPMU_0_NIC4_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC4_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC4_DBG_BASE 0x7FFEDC6000ull
+#define USER_CTI_0_NIC4_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC4_DBG_SECTION 0xB000
+#define mmSTM_1_NIC4_DBG_BASE 0x7FFEDD1000ull
+#define STM_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC4_DBG_SECTION 0x1000
+#define mmCTI_1_NIC4_DBG_BASE 0x7FFEDD2000ull
+#define CTI_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC4_DBG_SECTION 0x1000
+#define mmETF_1_NIC4_DBG_BASE 0x7FFEDD3000ull
+#define ETF_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC4_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC4_DBG_BASE 0x7FFEDD4000ull
+#define SPMU_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC4_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC4_DBG_BASE 0x7FFEDD5000ull
+#define BMON_CTI_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC4_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC4_DBG_BASE 0x7FFEDD6000ull
+#define USER_CTI_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC4_DBG_SECTION 0x1000
+#define mmBMON0_NIC4_DBG_BASE 0x7FFEDD7000ull
+#define BMON0_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC4_DBG_SECTION 0x1000
+#define mmBMON1_NIC4_DBG_BASE 0x7FFEDD8000ull
+#define BMON1_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC4_DBG_SECTION 0x1000
+#define mmBMON2_NIC4_DBG_BASE 0x7FFEDD9000ull
+#define BMON2_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC4_DBG_SECTION 0x1000
+#define mmBMON3_NIC4_DBG_BASE 0x7FFEDDA000ull
+#define BMON3_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC4_DBG_SECTION 0x1000
+#define mmBMON4_NIC4_DBG_BASE 0x7FFEDDB000ull
+#define BMON4_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC4_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC4_DBG_BASE 0x7FFEDE1000ull
+#define FUNNEL_NIC4_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC4_DBG_SECTION 0x21F000
+#define mmTPC0_ROM_TABLE_BASE 0x7FFF000000ull
+#define TPC0_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC0_ROM_TABLE_SECTION 0x1000
+#define mmTPC0_EML_SPMU_BASE 0x7FFF001000ull
+#define TPC0_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC0_EML_SPMU_SECTION 0x1000
+#define mmTPC0_EML_ETF_BASE 0x7FFF002000ull
+#define TPC0_EML_ETF_MAX_OFFSET 0x1000
+#define TPC0_EML_ETF_SECTION 0x1000
+#define mmTPC0_EML_STM_BASE 0x7FFF003000ull
+#define TPC0_EML_STM_MAX_OFFSET 0x1000
+#define TPC0_EML_STM_SECTION 0x2000
+#define mmTPC0_EML_CTI_BASE 0x7FFF005000ull
+#define TPC0_EML_CTI_MAX_OFFSET 0x1000
+#define TPC0_EML_CTI_SECTION 0x1000
+#define mmTPC0_EML_FUNNEL_BASE 0x7FFF006000ull
+#define TPC0_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC0_EML_FUNNEL_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_0_BASE 0x7FFF007000ull
+#define TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_1_BASE 0x7FFF008000ull
+#define TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_2_BASE 0x7FFF009000ull
+#define TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_3_BASE 0x7FFF00A000ull
+#define TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC0_EML_CFG_BASE 0x7FFF040000ull
+#define TPC0_EML_CFG_MAX_OFFSET 0x3380
+#define TPC0_EML_CFG_SECTION 0x1000
+#define mmTPC0_EML_TPC_CFG_BASE 0x7FFF041000ull
+#define TPC0_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC0_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC0_EML_TPC_CFG_BASE 0x7FFF041400ull
+#define KERNEL_TENSOR_0_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC0_EML_TPC_CFG_BASE 0x7FFF041438ull
+#define KERNEL_TENSOR_1_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC0_EML_TPC_CFG_BASE 0x7FFF041470ull
+#define KERNEL_TENSOR_2_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC0_EML_TPC_CFG_BASE 0x7FFF0414A8ull
+#define KERNEL_TENSOR_3_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC0_EML_TPC_CFG_BASE 0x7FFF0414E0ull
+#define KERNEL_TENSOR_4_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC0_EML_TPC_CFG_BASE 0x7FFF041518ull
+#define KERNEL_TENSOR_5_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC0_EML_TPC_CFG_BASE 0x7FFF041550ull
+#define KERNEL_TENSOR_6_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC0_EML_TPC_CFG_BASE 0x7FFF041588ull
+#define KERNEL_TENSOR_7_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC0_EML_TPC_CFG_BASE 0x7FFF0415C0ull
+#define KERNEL_TENSOR_8_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC0_EML_TPC_CFG_BASE 0x7FFF0415F8ull
+#define KERNEL_TENSOR_9_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC0_EML_TPC_CFG_BASE 0x7FFF041630ull
+#define KERNEL_TENSOR_10_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC0_EML_TPC_CFG_BASE 0x7FFF041668ull
+#define KERNEL_TENSOR_11_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC0_EML_TPC_CFG_BASE 0x7FFF0416A0ull
+#define KERNEL_TENSOR_12_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC0_EML_TPC_CFG_BASE 0x7FFF0416D8ull
+#define KERNEL_TENSOR_13_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC0_EML_TPC_CFG_BASE 0x7FFF041710ull
+#define KERNEL_TENSOR_14_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC0_EML_TPC_CFG_BASE 0x7FFF041748ull
+#define KERNEL_TENSOR_15_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC0_EML_TPC_CFG_BASE 0x7FFF041780ull
+#define KERNEL_SYNC_OBJECT_TPC0_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC0_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC0_EML_TPC_CFG_BASE 0x7FFF041788ull
+#define KERNEL_TPC0_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC0_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC0_EML_TPC_CFG_BASE 0x7FFF041A00ull
+#define QM_TENSOR_0_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC0_EML_TPC_CFG_BASE 0x7FFF041A38ull
+#define QM_TENSOR_1_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC0_EML_TPC_CFG_BASE 0x7FFF041A70ull
+#define QM_TENSOR_2_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC0_EML_TPC_CFG_BASE 0x7FFF041AA8ull
+#define QM_TENSOR_3_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC0_EML_TPC_CFG_BASE 0x7FFF041AE0ull
+#define QM_TENSOR_4_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC0_EML_TPC_CFG_BASE 0x7FFF041B18ull
+#define QM_TENSOR_5_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC0_EML_TPC_CFG_BASE 0x7FFF041B50ull
+#define QM_TENSOR_6_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC0_EML_TPC_CFG_BASE 0x7FFF041B88ull
+#define QM_TENSOR_7_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC0_EML_TPC_CFG_BASE 0x7FFF041BC0ull
+#define QM_TENSOR_8_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC0_EML_TPC_CFG_BASE 0x7FFF041BF8ull
+#define QM_TENSOR_9_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC0_EML_TPC_CFG_BASE 0x7FFF041C30ull
+#define QM_TENSOR_10_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC0_EML_TPC_CFG_BASE 0x7FFF041C68ull
+#define QM_TENSOR_11_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC0_EML_TPC_CFG_BASE 0x7FFF041CA0ull
+#define QM_TENSOR_12_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC0_EML_TPC_CFG_BASE 0x7FFF041CD8ull
+#define QM_TENSOR_13_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC0_EML_TPC_CFG_BASE 0x7FFF041D10ull
+#define QM_TENSOR_14_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC0_EML_TPC_CFG_BASE 0x7FFF041D48ull
+#define QM_TENSOR_15_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC0_EML_TPC_CFG_BASE 0x7FFF041D80ull
+#define QM_SYNC_OBJECT_TPC0_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC0_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC0_EML_TPC_CFG_BASE 0x7FFF041D88ull
+#define QM_TPC0_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC0_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC0_EML_TPC_QM_BASE 0x7FFF042000ull
+#define TPC0_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC0_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC0_EML_CS_BASE 0x7FFF1FF000ull
+#define TPC0_EML_CS_MAX_OFFSET 0x1000
+#define TPC0_EML_CS_SECTION 0x1000
+#define mmTPC1_ROM_TABLE_BASE 0x7FFF200000ull
+#define TPC1_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC1_ROM_TABLE_SECTION 0x1000
+#define mmTPC1_EML_SPMU_BASE 0x7FFF201000ull
+#define TPC1_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC1_EML_SPMU_SECTION 0x1000
+#define mmTPC1_EML_ETF_BASE 0x7FFF202000ull
+#define TPC1_EML_ETF_MAX_OFFSET 0x1000
+#define TPC1_EML_ETF_SECTION 0x1000
+#define mmTPC1_EML_STM_BASE 0x7FFF203000ull
+#define TPC1_EML_STM_MAX_OFFSET 0x1000
+#define TPC1_EML_STM_SECTION 0x2000
+#define mmTPC1_EML_CTI_BASE 0x7FFF205000ull
+#define TPC1_EML_CTI_MAX_OFFSET 0x1000
+#define TPC1_EML_CTI_SECTION 0x1000
+#define mmTPC1_EML_FUNNEL_BASE 0x7FFF206000ull
+#define TPC1_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC1_EML_FUNNEL_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_0_BASE 0x7FFF207000ull
+#define TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_1_BASE 0x7FFF208000ull
+#define TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_2_BASE 0x7FFF209000ull
+#define TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_3_BASE 0x7FFF20A000ull
+#define TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC1_EML_CFG_BASE 0x7FFF240000ull
+#define TPC1_EML_CFG_MAX_OFFSET 0x3380
+#define TPC1_EML_CFG_SECTION 0x1000
+#define mmTPC1_EML_TPC_CFG_BASE 0x7FFF241000ull
+#define TPC1_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC1_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC1_EML_TPC_CFG_BASE 0x7FFF241400ull
+#define KERNEL_TENSOR_0_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC1_EML_TPC_CFG_BASE 0x7FFF241438ull
+#define KERNEL_TENSOR_1_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC1_EML_TPC_CFG_BASE 0x7FFF241470ull
+#define KERNEL_TENSOR_2_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC1_EML_TPC_CFG_BASE 0x7FFF2414A8ull
+#define KERNEL_TENSOR_3_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC1_EML_TPC_CFG_BASE 0x7FFF2414E0ull
+#define KERNEL_TENSOR_4_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC1_EML_TPC_CFG_BASE 0x7FFF241518ull
+#define KERNEL_TENSOR_5_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC1_EML_TPC_CFG_BASE 0x7FFF241550ull
+#define KERNEL_TENSOR_6_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC1_EML_TPC_CFG_BASE 0x7FFF241588ull
+#define KERNEL_TENSOR_7_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC1_EML_TPC_CFG_BASE 0x7FFF2415C0ull
+#define KERNEL_TENSOR_8_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC1_EML_TPC_CFG_BASE 0x7FFF2415F8ull
+#define KERNEL_TENSOR_9_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC1_EML_TPC_CFG_BASE 0x7FFF241630ull
+#define KERNEL_TENSOR_10_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC1_EML_TPC_CFG_BASE 0x7FFF241668ull
+#define KERNEL_TENSOR_11_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC1_EML_TPC_CFG_BASE 0x7FFF2416A0ull
+#define KERNEL_TENSOR_12_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC1_EML_TPC_CFG_BASE 0x7FFF2416D8ull
+#define KERNEL_TENSOR_13_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC1_EML_TPC_CFG_BASE 0x7FFF241710ull
+#define KERNEL_TENSOR_14_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC1_EML_TPC_CFG_BASE 0x7FFF241748ull
+#define KERNEL_TENSOR_15_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC1_EML_TPC_CFG_BASE 0x7FFF241780ull
+#define KERNEL_SYNC_OBJECT_TPC1_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC1_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC1_EML_TPC_CFG_BASE 0x7FFF241788ull
+#define KERNEL_TPC1_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC1_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC1_EML_TPC_CFG_BASE 0x7FFF241A00ull
+#define QM_TENSOR_0_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC1_EML_TPC_CFG_BASE 0x7FFF241A38ull
+#define QM_TENSOR_1_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC1_EML_TPC_CFG_BASE 0x7FFF241A70ull
+#define QM_TENSOR_2_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC1_EML_TPC_CFG_BASE 0x7FFF241AA8ull
+#define QM_TENSOR_3_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC1_EML_TPC_CFG_BASE 0x7FFF241AE0ull
+#define QM_TENSOR_4_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC1_EML_TPC_CFG_BASE 0x7FFF241B18ull
+#define QM_TENSOR_5_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC1_EML_TPC_CFG_BASE 0x7FFF241B50ull
+#define QM_TENSOR_6_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC1_EML_TPC_CFG_BASE 0x7FFF241B88ull
+#define QM_TENSOR_7_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC1_EML_TPC_CFG_BASE 0x7FFF241BC0ull
+#define QM_TENSOR_8_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC1_EML_TPC_CFG_BASE 0x7FFF241BF8ull
+#define QM_TENSOR_9_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC1_EML_TPC_CFG_BASE 0x7FFF241C30ull
+#define QM_TENSOR_10_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC1_EML_TPC_CFG_BASE 0x7FFF241C68ull
+#define QM_TENSOR_11_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC1_EML_TPC_CFG_BASE 0x7FFF241CA0ull
+#define QM_TENSOR_12_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC1_EML_TPC_CFG_BASE 0x7FFF241CD8ull
+#define QM_TENSOR_13_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC1_EML_TPC_CFG_BASE 0x7FFF241D10ull
+#define QM_TENSOR_14_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC1_EML_TPC_CFG_BASE 0x7FFF241D48ull
+#define QM_TENSOR_15_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC1_EML_TPC_CFG_BASE 0x7FFF241D80ull
+#define QM_SYNC_OBJECT_TPC1_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC1_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC1_EML_TPC_CFG_BASE 0x7FFF241D88ull
+#define QM_TPC1_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC1_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC1_EML_TPC_QM_BASE 0x7FFF242000ull
+#define TPC1_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC1_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC1_EML_CS_BASE 0x7FFF3FF000ull
+#define TPC1_EML_CS_MAX_OFFSET 0x1000
+#define TPC1_EML_CS_SECTION 0x1000
+#define mmTPC2_ROM_TABLE_BASE 0x7FFF400000ull
+#define TPC2_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC2_ROM_TABLE_SECTION 0x1000
+#define mmTPC2_EML_SPMU_BASE 0x7FFF401000ull
+#define TPC2_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC2_EML_SPMU_SECTION 0x1000
+#define mmTPC2_EML_ETF_BASE 0x7FFF402000ull
+#define TPC2_EML_ETF_MAX_OFFSET 0x1000
+#define TPC2_EML_ETF_SECTION 0x1000
+#define mmTPC2_EML_STM_BASE 0x7FFF403000ull
+#define TPC2_EML_STM_MAX_OFFSET 0x1000
+#define TPC2_EML_STM_SECTION 0x2000
+#define mmTPC2_EML_CTI_BASE 0x7FFF405000ull
+#define TPC2_EML_CTI_MAX_OFFSET 0x1000
+#define TPC2_EML_CTI_SECTION 0x1000
+#define mmTPC2_EML_FUNNEL_BASE 0x7FFF406000ull
+#define TPC2_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC2_EML_FUNNEL_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_0_BASE 0x7FFF407000ull
+#define TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_1_BASE 0x7FFF408000ull
+#define TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_2_BASE 0x7FFF409000ull
+#define TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_3_BASE 0x7FFF40A000ull
+#define TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC2_EML_CFG_BASE 0x7FFF440000ull
+#define TPC2_EML_CFG_MAX_OFFSET 0x3380
+#define TPC2_EML_CFG_SECTION 0x1000
+#define mmTPC2_EML_TPC_CFG_BASE 0x7FFF441000ull
+#define TPC2_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC2_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC2_EML_TPC_CFG_BASE 0x7FFF441400ull
+#define KERNEL_TENSOR_0_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC2_EML_TPC_CFG_BASE 0x7FFF441438ull
+#define KERNEL_TENSOR_1_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC2_EML_TPC_CFG_BASE 0x7FFF441470ull
+#define KERNEL_TENSOR_2_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC2_EML_TPC_CFG_BASE 0x7FFF4414A8ull
+#define KERNEL_TENSOR_3_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC2_EML_TPC_CFG_BASE 0x7FFF4414E0ull
+#define KERNEL_TENSOR_4_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC2_EML_TPC_CFG_BASE 0x7FFF441518ull
+#define KERNEL_TENSOR_5_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC2_EML_TPC_CFG_BASE 0x7FFF441550ull
+#define KERNEL_TENSOR_6_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC2_EML_TPC_CFG_BASE 0x7FFF441588ull
+#define KERNEL_TENSOR_7_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC2_EML_TPC_CFG_BASE 0x7FFF4415C0ull
+#define KERNEL_TENSOR_8_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC2_EML_TPC_CFG_BASE 0x7FFF4415F8ull
+#define KERNEL_TENSOR_9_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC2_EML_TPC_CFG_BASE 0x7FFF441630ull
+#define KERNEL_TENSOR_10_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC2_EML_TPC_CFG_BASE 0x7FFF441668ull
+#define KERNEL_TENSOR_11_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC2_EML_TPC_CFG_BASE 0x7FFF4416A0ull
+#define KERNEL_TENSOR_12_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC2_EML_TPC_CFG_BASE 0x7FFF4416D8ull
+#define KERNEL_TENSOR_13_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC2_EML_TPC_CFG_BASE 0x7FFF441710ull
+#define KERNEL_TENSOR_14_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC2_EML_TPC_CFG_BASE 0x7FFF441748ull
+#define KERNEL_TENSOR_15_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC2_EML_TPC_CFG_BASE 0x7FFF441780ull
+#define KERNEL_SYNC_OBJECT_TPC2_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC2_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC2_EML_TPC_CFG_BASE 0x7FFF441788ull
+#define KERNEL_TPC2_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC2_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC2_EML_TPC_CFG_BASE 0x7FFF441A00ull
+#define QM_TENSOR_0_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC2_EML_TPC_CFG_BASE 0x7FFF441A38ull
+#define QM_TENSOR_1_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC2_EML_TPC_CFG_BASE 0x7FFF441A70ull
+#define QM_TENSOR_2_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC2_EML_TPC_CFG_BASE 0x7FFF441AA8ull
+#define QM_TENSOR_3_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC2_EML_TPC_CFG_BASE 0x7FFF441AE0ull
+#define QM_TENSOR_4_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC2_EML_TPC_CFG_BASE 0x7FFF441B18ull
+#define QM_TENSOR_5_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC2_EML_TPC_CFG_BASE 0x7FFF441B50ull
+#define QM_TENSOR_6_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC2_EML_TPC_CFG_BASE 0x7FFF441B88ull
+#define QM_TENSOR_7_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC2_EML_TPC_CFG_BASE 0x7FFF441BC0ull
+#define QM_TENSOR_8_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC2_EML_TPC_CFG_BASE 0x7FFF441BF8ull
+#define QM_TENSOR_9_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC2_EML_TPC_CFG_BASE 0x7FFF441C30ull
+#define QM_TENSOR_10_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC2_EML_TPC_CFG_BASE 0x7FFF441C68ull
+#define QM_TENSOR_11_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC2_EML_TPC_CFG_BASE 0x7FFF441CA0ull
+#define QM_TENSOR_12_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC2_EML_TPC_CFG_BASE 0x7FFF441CD8ull
+#define QM_TENSOR_13_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC2_EML_TPC_CFG_BASE 0x7FFF441D10ull
+#define QM_TENSOR_14_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC2_EML_TPC_CFG_BASE 0x7FFF441D48ull
+#define QM_TENSOR_15_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC2_EML_TPC_CFG_BASE 0x7FFF441D80ull
+#define QM_SYNC_OBJECT_TPC2_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC2_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC2_EML_TPC_CFG_BASE 0x7FFF441D88ull
+#define QM_TPC2_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC2_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC2_EML_TPC_QM_BASE 0x7FFF442000ull
+#define TPC2_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC2_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC2_EML_CS_BASE 0x7FFF5FF000ull
+#define TPC2_EML_CS_MAX_OFFSET 0x1000
+#define TPC2_EML_CS_SECTION 0x1000
+#define mmTPC3_ROM_TABLE_BASE 0x7FFF600000ull
+#define TPC3_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC3_ROM_TABLE_SECTION 0x1000
+#define mmTPC3_EML_SPMU_BASE 0x7FFF601000ull
+#define TPC3_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC3_EML_SPMU_SECTION 0x1000
+#define mmTPC3_EML_ETF_BASE 0x7FFF602000ull
+#define TPC3_EML_ETF_MAX_OFFSET 0x1000
+#define TPC3_EML_ETF_SECTION 0x1000
+#define mmTPC3_EML_STM_BASE 0x7FFF603000ull
+#define TPC3_EML_STM_MAX_OFFSET 0x1000
+#define TPC3_EML_STM_SECTION 0x2000
+#define mmTPC3_EML_CTI_BASE 0x7FFF605000ull
+#define TPC3_EML_CTI_MAX_OFFSET 0x1000
+#define TPC3_EML_CTI_SECTION 0x1000
+#define mmTPC3_EML_FUNNEL_BASE 0x7FFF606000ull
+#define TPC3_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC3_EML_FUNNEL_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_0_BASE 0x7FFF607000ull
+#define TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_1_BASE 0x7FFF608000ull
+#define TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_2_BASE 0x7FFF609000ull
+#define TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_3_BASE 0x7FFF60A000ull
+#define TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC3_EML_CFG_BASE 0x7FFF640000ull
+#define TPC3_EML_CFG_MAX_OFFSET 0x3380
+#define TPC3_EML_CFG_SECTION 0x1000
+#define mmTPC3_EML_TPC_CFG_BASE 0x7FFF641000ull
+#define TPC3_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC3_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC3_EML_TPC_CFG_BASE 0x7FFF641400ull
+#define KERNEL_TENSOR_0_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC3_EML_TPC_CFG_BASE 0x7FFF641438ull
+#define KERNEL_TENSOR_1_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC3_EML_TPC_CFG_BASE 0x7FFF641470ull
+#define KERNEL_TENSOR_2_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC3_EML_TPC_CFG_BASE 0x7FFF6414A8ull
+#define KERNEL_TENSOR_3_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC3_EML_TPC_CFG_BASE 0x7FFF6414E0ull
+#define KERNEL_TENSOR_4_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC3_EML_TPC_CFG_BASE 0x7FFF641518ull
+#define KERNEL_TENSOR_5_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC3_EML_TPC_CFG_BASE 0x7FFF641550ull
+#define KERNEL_TENSOR_6_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC3_EML_TPC_CFG_BASE 0x7FFF641588ull
+#define KERNEL_TENSOR_7_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC3_EML_TPC_CFG_BASE 0x7FFF6415C0ull
+#define KERNEL_TENSOR_8_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC3_EML_TPC_CFG_BASE 0x7FFF6415F8ull
+#define KERNEL_TENSOR_9_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC3_EML_TPC_CFG_BASE 0x7FFF641630ull
+#define KERNEL_TENSOR_10_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC3_EML_TPC_CFG_BASE 0x7FFF641668ull
+#define KERNEL_TENSOR_11_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC3_EML_TPC_CFG_BASE 0x7FFF6416A0ull
+#define KERNEL_TENSOR_12_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC3_EML_TPC_CFG_BASE 0x7FFF6416D8ull
+#define KERNEL_TENSOR_13_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC3_EML_TPC_CFG_BASE 0x7FFF641710ull
+#define KERNEL_TENSOR_14_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC3_EML_TPC_CFG_BASE 0x7FFF641748ull
+#define KERNEL_TENSOR_15_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC3_EML_TPC_CFG_BASE 0x7FFF641780ull
+#define KERNEL_SYNC_OBJECT_TPC3_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC3_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC3_EML_TPC_CFG_BASE 0x7FFF641788ull
+#define KERNEL_TPC3_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC3_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC3_EML_TPC_CFG_BASE 0x7FFF641A00ull
+#define QM_TENSOR_0_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC3_EML_TPC_CFG_BASE 0x7FFF641A38ull
+#define QM_TENSOR_1_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC3_EML_TPC_CFG_BASE 0x7FFF641A70ull
+#define QM_TENSOR_2_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC3_EML_TPC_CFG_BASE 0x7FFF641AA8ull
+#define QM_TENSOR_3_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC3_EML_TPC_CFG_BASE 0x7FFF641AE0ull
+#define QM_TENSOR_4_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC3_EML_TPC_CFG_BASE 0x7FFF641B18ull
+#define QM_TENSOR_5_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC3_EML_TPC_CFG_BASE 0x7FFF641B50ull
+#define QM_TENSOR_6_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC3_EML_TPC_CFG_BASE 0x7FFF641B88ull
+#define QM_TENSOR_7_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC3_EML_TPC_CFG_BASE 0x7FFF641BC0ull
+#define QM_TENSOR_8_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC3_EML_TPC_CFG_BASE 0x7FFF641BF8ull
+#define QM_TENSOR_9_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC3_EML_TPC_CFG_BASE 0x7FFF641C30ull
+#define QM_TENSOR_10_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC3_EML_TPC_CFG_BASE 0x7FFF641C68ull
+#define QM_TENSOR_11_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC3_EML_TPC_CFG_BASE 0x7FFF641CA0ull
+#define QM_TENSOR_12_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC3_EML_TPC_CFG_BASE 0x7FFF641CD8ull
+#define QM_TENSOR_13_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC3_EML_TPC_CFG_BASE 0x7FFF641D10ull
+#define QM_TENSOR_14_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC3_EML_TPC_CFG_BASE 0x7FFF641D48ull
+#define QM_TENSOR_15_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC3_EML_TPC_CFG_BASE 0x7FFF641D80ull
+#define QM_SYNC_OBJECT_TPC3_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC3_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC3_EML_TPC_CFG_BASE 0x7FFF641D88ull
+#define QM_TPC3_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC3_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC3_EML_TPC_QM_BASE 0x7FFF642000ull
+#define TPC3_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC3_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC3_EML_CS_BASE 0x7FFF7FF000ull
+#define TPC3_EML_CS_MAX_OFFSET 0x1000
+#define TPC3_EML_CS_SECTION 0x1000
+#define mmTPC4_ROM_TABLE_BASE 0x7FFF800000ull
+#define TPC4_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC4_ROM_TABLE_SECTION 0x1000
+#define mmTPC4_EML_SPMU_BASE 0x7FFF801000ull
+#define TPC4_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC4_EML_SPMU_SECTION 0x1000
+#define mmTPC4_EML_ETF_BASE 0x7FFF802000ull
+#define TPC4_EML_ETF_MAX_OFFSET 0x1000
+#define TPC4_EML_ETF_SECTION 0x1000
+#define mmTPC4_EML_STM_BASE 0x7FFF803000ull
+#define TPC4_EML_STM_MAX_OFFSET 0x1000
+#define TPC4_EML_STM_SECTION 0x2000
+#define mmTPC4_EML_CTI_BASE 0x7FFF805000ull
+#define TPC4_EML_CTI_MAX_OFFSET 0x1000
+#define TPC4_EML_CTI_SECTION 0x1000
+#define mmTPC4_EML_FUNNEL_BASE 0x7FFF806000ull
+#define TPC4_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC4_EML_FUNNEL_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_0_BASE 0x7FFF807000ull
+#define TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_1_BASE 0x7FFF808000ull
+#define TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_2_BASE 0x7FFF809000ull
+#define TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_3_BASE 0x7FFF80A000ull
+#define TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC4_EML_CFG_BASE 0x7FFF840000ull
+#define TPC4_EML_CFG_MAX_OFFSET 0x3380
+#define TPC4_EML_CFG_SECTION 0x1000
+#define mmTPC4_EML_TPC_CFG_BASE 0x7FFF841000ull
+#define TPC4_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC4_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC4_EML_TPC_CFG_BASE 0x7FFF841400ull
+#define KERNEL_TENSOR_0_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC4_EML_TPC_CFG_BASE 0x7FFF841438ull
+#define KERNEL_TENSOR_1_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC4_EML_TPC_CFG_BASE 0x7FFF841470ull
+#define KERNEL_TENSOR_2_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC4_EML_TPC_CFG_BASE 0x7FFF8414A8ull
+#define KERNEL_TENSOR_3_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC4_EML_TPC_CFG_BASE 0x7FFF8414E0ull
+#define KERNEL_TENSOR_4_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC4_EML_TPC_CFG_BASE 0x7FFF841518ull
+#define KERNEL_TENSOR_5_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC4_EML_TPC_CFG_BASE 0x7FFF841550ull
+#define KERNEL_TENSOR_6_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC4_EML_TPC_CFG_BASE 0x7FFF841588ull
+#define KERNEL_TENSOR_7_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC4_EML_TPC_CFG_BASE 0x7FFF8415C0ull
+#define KERNEL_TENSOR_8_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC4_EML_TPC_CFG_BASE 0x7FFF8415F8ull
+#define KERNEL_TENSOR_9_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC4_EML_TPC_CFG_BASE 0x7FFF841630ull
+#define KERNEL_TENSOR_10_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC4_EML_TPC_CFG_BASE 0x7FFF841668ull
+#define KERNEL_TENSOR_11_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC4_EML_TPC_CFG_BASE 0x7FFF8416A0ull
+#define KERNEL_TENSOR_12_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC4_EML_TPC_CFG_BASE 0x7FFF8416D8ull
+#define KERNEL_TENSOR_13_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC4_EML_TPC_CFG_BASE 0x7FFF841710ull
+#define KERNEL_TENSOR_14_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC4_EML_TPC_CFG_BASE 0x7FFF841748ull
+#define KERNEL_TENSOR_15_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC4_EML_TPC_CFG_BASE 0x7FFF841780ull
+#define KERNEL_SYNC_OBJECT_TPC4_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC4_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC4_EML_TPC_CFG_BASE 0x7FFF841788ull
+#define KERNEL_TPC4_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC4_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC4_EML_TPC_CFG_BASE 0x7FFF841A00ull
+#define QM_TENSOR_0_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC4_EML_TPC_CFG_BASE 0x7FFF841A38ull
+#define QM_TENSOR_1_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC4_EML_TPC_CFG_BASE 0x7FFF841A70ull
+#define QM_TENSOR_2_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC4_EML_TPC_CFG_BASE 0x7FFF841AA8ull
+#define QM_TENSOR_3_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC4_EML_TPC_CFG_BASE 0x7FFF841AE0ull
+#define QM_TENSOR_4_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC4_EML_TPC_CFG_BASE 0x7FFF841B18ull
+#define QM_TENSOR_5_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC4_EML_TPC_CFG_BASE 0x7FFF841B50ull
+#define QM_TENSOR_6_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC4_EML_TPC_CFG_BASE 0x7FFF841B88ull
+#define QM_TENSOR_7_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC4_EML_TPC_CFG_BASE 0x7FFF841BC0ull
+#define QM_TENSOR_8_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC4_EML_TPC_CFG_BASE 0x7FFF841BF8ull
+#define QM_TENSOR_9_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC4_EML_TPC_CFG_BASE 0x7FFF841C30ull
+#define QM_TENSOR_10_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC4_EML_TPC_CFG_BASE 0x7FFF841C68ull
+#define QM_TENSOR_11_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC4_EML_TPC_CFG_BASE 0x7FFF841CA0ull
+#define QM_TENSOR_12_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC4_EML_TPC_CFG_BASE 0x7FFF841CD8ull
+#define QM_TENSOR_13_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC4_EML_TPC_CFG_BASE 0x7FFF841D10ull
+#define QM_TENSOR_14_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC4_EML_TPC_CFG_BASE 0x7FFF841D48ull
+#define QM_TENSOR_15_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC4_EML_TPC_CFG_BASE 0x7FFF841D80ull
+#define QM_SYNC_OBJECT_TPC4_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC4_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC4_EML_TPC_CFG_BASE 0x7FFF841D88ull
+#define QM_TPC4_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC4_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC4_EML_TPC_QM_BASE 0x7FFF842000ull
+#define TPC4_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC4_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC4_EML_CS_BASE 0x7FFF9FF000ull
+#define TPC4_EML_CS_MAX_OFFSET 0x1000
+#define TPC4_EML_CS_SECTION 0x1000
+#define mmTPC5_ROM_TABLE_BASE 0x7FFFA00000ull
+#define TPC5_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC5_ROM_TABLE_SECTION 0x1000
+#define mmTPC5_EML_SPMU_BASE 0x7FFFA01000ull
+#define TPC5_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC5_EML_SPMU_SECTION 0x1000
+#define mmTPC5_EML_ETF_BASE 0x7FFFA02000ull
+#define TPC5_EML_ETF_MAX_OFFSET 0x1000
+#define TPC5_EML_ETF_SECTION 0x1000
+#define mmTPC5_EML_STM_BASE 0x7FFFA03000ull
+#define TPC5_EML_STM_MAX_OFFSET 0x1000
+#define TPC5_EML_STM_SECTION 0x2000
+#define mmTPC5_EML_CTI_BASE 0x7FFFA05000ull
+#define TPC5_EML_CTI_MAX_OFFSET 0x1000
+#define TPC5_EML_CTI_SECTION 0x1000
+#define mmTPC5_EML_FUNNEL_BASE 0x7FFFA06000ull
+#define TPC5_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC5_EML_FUNNEL_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_0_BASE 0x7FFFA07000ull
+#define TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_1_BASE 0x7FFFA08000ull
+#define TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_2_BASE 0x7FFFA09000ull
+#define TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_3_BASE 0x7FFFA0A000ull
+#define TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC5_EML_CFG_BASE 0x7FFFA40000ull
+#define TPC5_EML_CFG_MAX_OFFSET 0x3380
+#define TPC5_EML_CFG_SECTION 0x1000
+#define mmTPC5_EML_TPC_CFG_BASE 0x7FFFA41000ull
+#define TPC5_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC5_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC5_EML_TPC_CFG_BASE 0x7FFFA41400ull
+#define KERNEL_TENSOR_0_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC5_EML_TPC_CFG_BASE 0x7FFFA41438ull
+#define KERNEL_TENSOR_1_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC5_EML_TPC_CFG_BASE 0x7FFFA41470ull
+#define KERNEL_TENSOR_2_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC5_EML_TPC_CFG_BASE 0x7FFFA414A8ull
+#define KERNEL_TENSOR_3_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC5_EML_TPC_CFG_BASE 0x7FFFA414E0ull
+#define KERNEL_TENSOR_4_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC5_EML_TPC_CFG_BASE 0x7FFFA41518ull
+#define KERNEL_TENSOR_5_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC5_EML_TPC_CFG_BASE 0x7FFFA41550ull
+#define KERNEL_TENSOR_6_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC5_EML_TPC_CFG_BASE 0x7FFFA41588ull
+#define KERNEL_TENSOR_7_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC5_EML_TPC_CFG_BASE 0x7FFFA415C0ull
+#define KERNEL_TENSOR_8_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC5_EML_TPC_CFG_BASE 0x7FFFA415F8ull
+#define KERNEL_TENSOR_9_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC5_EML_TPC_CFG_BASE 0x7FFFA41630ull
+#define KERNEL_TENSOR_10_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC5_EML_TPC_CFG_BASE 0x7FFFA41668ull
+#define KERNEL_TENSOR_11_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC5_EML_TPC_CFG_BASE 0x7FFFA416A0ull
+#define KERNEL_TENSOR_12_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC5_EML_TPC_CFG_BASE 0x7FFFA416D8ull
+#define KERNEL_TENSOR_13_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC5_EML_TPC_CFG_BASE 0x7FFFA41710ull
+#define KERNEL_TENSOR_14_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC5_EML_TPC_CFG_BASE 0x7FFFA41748ull
+#define KERNEL_TENSOR_15_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC5_EML_TPC_CFG_BASE 0x7FFFA41780ull
+#define KERNEL_SYNC_OBJECT_TPC5_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC5_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC5_EML_TPC_CFG_BASE 0x7FFFA41788ull
+#define KERNEL_TPC5_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC5_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC5_EML_TPC_CFG_BASE 0x7FFFA41A00ull
+#define QM_TENSOR_0_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC5_EML_TPC_CFG_BASE 0x7FFFA41A38ull
+#define QM_TENSOR_1_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC5_EML_TPC_CFG_BASE 0x7FFFA41A70ull
+#define QM_TENSOR_2_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC5_EML_TPC_CFG_BASE 0x7FFFA41AA8ull
+#define QM_TENSOR_3_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC5_EML_TPC_CFG_BASE 0x7FFFA41AE0ull
+#define QM_TENSOR_4_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC5_EML_TPC_CFG_BASE 0x7FFFA41B18ull
+#define QM_TENSOR_5_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC5_EML_TPC_CFG_BASE 0x7FFFA41B50ull
+#define QM_TENSOR_6_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC5_EML_TPC_CFG_BASE 0x7FFFA41B88ull
+#define QM_TENSOR_7_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC5_EML_TPC_CFG_BASE 0x7FFFA41BC0ull
+#define QM_TENSOR_8_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC5_EML_TPC_CFG_BASE 0x7FFFA41BF8ull
+#define QM_TENSOR_9_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC5_EML_TPC_CFG_BASE 0x7FFFA41C30ull
+#define QM_TENSOR_10_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC5_EML_TPC_CFG_BASE 0x7FFFA41C68ull
+#define QM_TENSOR_11_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC5_EML_TPC_CFG_BASE 0x7FFFA41CA0ull
+#define QM_TENSOR_12_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC5_EML_TPC_CFG_BASE 0x7FFFA41CD8ull
+#define QM_TENSOR_13_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D10ull
+#define QM_TENSOR_14_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D48ull
+#define QM_TENSOR_15_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D80ull
+#define QM_SYNC_OBJECT_TPC5_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC5_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D88ull
+#define QM_TPC5_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC5_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC5_EML_TPC_QM_BASE 0x7FFFA42000ull
+#define TPC5_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC5_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC5_EML_CS_BASE 0x7FFFBFF000ull
+#define TPC5_EML_CS_MAX_OFFSET 0x1000
+#define TPC5_EML_CS_SECTION 0x1000
+#define mmTPC6_ROM_TABLE_BASE 0x7FFFC00000ull
+#define TPC6_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC6_ROM_TABLE_SECTION 0x1000
+#define mmTPC6_EML_SPMU_BASE 0x7FFFC01000ull
+#define TPC6_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC6_EML_SPMU_SECTION 0x1000
+#define mmTPC6_EML_ETF_BASE 0x7FFFC02000ull
+#define TPC6_EML_ETF_MAX_OFFSET 0x1000
+#define TPC6_EML_ETF_SECTION 0x1000
+#define mmTPC6_EML_STM_BASE 0x7FFFC03000ull
+#define TPC6_EML_STM_MAX_OFFSET 0x1000
+#define TPC6_EML_STM_SECTION 0x2000
+#define mmTPC6_EML_CTI_BASE 0x7FFFC05000ull
+#define TPC6_EML_CTI_MAX_OFFSET 0x1000
+#define TPC6_EML_CTI_SECTION 0x1000
+#define mmTPC6_EML_FUNNEL_BASE 0x7FFFC06000ull
+#define TPC6_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC6_EML_FUNNEL_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_0_BASE 0x7FFFC07000ull
+#define TPC6_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_1_BASE 0x7FFFC08000ull
+#define TPC6_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_2_BASE 0x7FFFC09000ull
+#define TPC6_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_3_BASE 0x7FFFC0A000ull
+#define TPC6_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC6_EML_CFG_BASE 0x7FFFC40000ull
+#define TPC6_EML_CFG_MAX_OFFSET 0x3380
+#define TPC6_EML_CFG_SECTION 0x1000
+#define mmTPC6_EML_TPC_CFG_BASE 0x7FFFC41000ull
+#define TPC6_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC6_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC6_EML_TPC_CFG_BASE 0x7FFFC41400ull
+#define KERNEL_TENSOR_0_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC6_EML_TPC_CFG_BASE 0x7FFFC41438ull
+#define KERNEL_TENSOR_1_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC6_EML_TPC_CFG_BASE 0x7FFFC41470ull
+#define KERNEL_TENSOR_2_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC6_EML_TPC_CFG_BASE 0x7FFFC414A8ull
+#define KERNEL_TENSOR_3_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC6_EML_TPC_CFG_BASE 0x7FFFC414E0ull
+#define KERNEL_TENSOR_4_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC6_EML_TPC_CFG_BASE 0x7FFFC41518ull
+#define KERNEL_TENSOR_5_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC6_EML_TPC_CFG_BASE 0x7FFFC41550ull
+#define KERNEL_TENSOR_6_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC6_EML_TPC_CFG_BASE 0x7FFFC41588ull
+#define KERNEL_TENSOR_7_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC6_EML_TPC_CFG_BASE 0x7FFFC415C0ull
+#define KERNEL_TENSOR_8_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC6_EML_TPC_CFG_BASE 0x7FFFC415F8ull
+#define KERNEL_TENSOR_9_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC6_EML_TPC_CFG_BASE 0x7FFFC41630ull
+#define KERNEL_TENSOR_10_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC6_EML_TPC_CFG_BASE 0x7FFFC41668ull
+#define KERNEL_TENSOR_11_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC6_EML_TPC_CFG_BASE 0x7FFFC416A0ull
+#define KERNEL_TENSOR_12_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC6_EML_TPC_CFG_BASE 0x7FFFC416D8ull
+#define KERNEL_TENSOR_13_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC6_EML_TPC_CFG_BASE 0x7FFFC41710ull
+#define KERNEL_TENSOR_14_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC6_EML_TPC_CFG_BASE 0x7FFFC41748ull
+#define KERNEL_TENSOR_15_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC6_EML_TPC_CFG_BASE 0x7FFFC41780ull
+#define KERNEL_SYNC_OBJECT_TPC6_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC6_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC6_EML_TPC_CFG_BASE 0x7FFFC41788ull
+#define KERNEL_TPC6_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC6_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC6_EML_TPC_CFG_BASE 0x7FFFC41A00ull
+#define QM_TENSOR_0_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC6_EML_TPC_CFG_BASE 0x7FFFC41A38ull
+#define QM_TENSOR_1_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC6_EML_TPC_CFG_BASE 0x7FFFC41A70ull
+#define QM_TENSOR_2_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC6_EML_TPC_CFG_BASE 0x7FFFC41AA8ull
+#define QM_TENSOR_3_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC6_EML_TPC_CFG_BASE 0x7FFFC41AE0ull
+#define QM_TENSOR_4_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC6_EML_TPC_CFG_BASE 0x7FFFC41B18ull
+#define QM_TENSOR_5_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC6_EML_TPC_CFG_BASE 0x7FFFC41B50ull
+#define QM_TENSOR_6_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC6_EML_TPC_CFG_BASE 0x7FFFC41B88ull
+#define QM_TENSOR_7_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC6_EML_TPC_CFG_BASE 0x7FFFC41BC0ull
+#define QM_TENSOR_8_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC6_EML_TPC_CFG_BASE 0x7FFFC41BF8ull
+#define QM_TENSOR_9_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC6_EML_TPC_CFG_BASE 0x7FFFC41C30ull
+#define QM_TENSOR_10_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC6_EML_TPC_CFG_BASE 0x7FFFC41C68ull
+#define QM_TENSOR_11_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC6_EML_TPC_CFG_BASE 0x7FFFC41CA0ull
+#define QM_TENSOR_12_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC6_EML_TPC_CFG_BASE 0x7FFFC41CD8ull
+#define QM_TENSOR_13_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D10ull
+#define QM_TENSOR_14_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D48ull
+#define QM_TENSOR_15_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D80ull
+#define QM_SYNC_OBJECT_TPC6_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC6_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D88ull
+#define QM_TPC6_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC6_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC6_EML_TPC_QM_BASE 0x7FFFC42000ull
+#define TPC6_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC6_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC6_EML_CS_BASE 0x7FFFDFF000ull
+#define TPC6_EML_CS_MAX_OFFSET 0x1000
+#define TPC6_EML_CS_SECTION 0x1000
+#define mmTPC7_ROM_TABLE_BASE 0x7FFFE00000ull
+#define TPC7_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC7_ROM_TABLE_SECTION 0x1000
+#define mmTPC7_EML_SPMU_BASE 0x7FFFE01000ull
+#define TPC7_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC7_EML_SPMU_SECTION 0x1000
+#define mmTPC7_EML_ETF_BASE 0x7FFFE02000ull
+#define TPC7_EML_ETF_MAX_OFFSET 0x1000
+#define TPC7_EML_ETF_SECTION 0x1000
+#define mmTPC7_EML_STM_BASE 0x7FFFE03000ull
+#define TPC7_EML_STM_MAX_OFFSET 0x1000
+#define TPC7_EML_STM_SECTION 0x2000
+#define mmTPC7_EML_CTI_BASE 0x7FFFE05000ull
+#define TPC7_EML_CTI_MAX_OFFSET 0x1000
+#define TPC7_EML_CTI_SECTION 0x1000
+#define mmTPC7_EML_FUNNEL_BASE 0x7FFFE06000ull
+#define TPC7_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC7_EML_FUNNEL_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_0_BASE 0x7FFFE07000ull
+#define TPC7_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_1_BASE 0x7FFFE08000ull
+#define TPC7_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_2_BASE 0x7FFFE09000ull
+#define TPC7_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_3_BASE 0x7FFFE0A000ull
+#define TPC7_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC7_EML_CFG_BASE 0x7FFFE40000ull
+#define TPC7_EML_CFG_MAX_OFFSET 0x3380
+#define TPC7_EML_CFG_SECTION 0x1000
+#define mmTPC7_EML_TPC_CFG_BASE 0x7FFFE41000ull
+#define TPC7_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC7_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC7_EML_TPC_CFG_BASE 0x7FFFE41400ull
+#define KERNEL_TENSOR_0_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC7_EML_TPC_CFG_BASE 0x7FFFE41438ull
+#define KERNEL_TENSOR_1_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC7_EML_TPC_CFG_BASE 0x7FFFE41470ull
+#define KERNEL_TENSOR_2_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC7_EML_TPC_CFG_BASE 0x7FFFE414A8ull
+#define KERNEL_TENSOR_3_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC7_EML_TPC_CFG_BASE 0x7FFFE414E0ull
+#define KERNEL_TENSOR_4_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC7_EML_TPC_CFG_BASE 0x7FFFE41518ull
+#define KERNEL_TENSOR_5_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC7_EML_TPC_CFG_BASE 0x7FFFE41550ull
+#define KERNEL_TENSOR_6_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC7_EML_TPC_CFG_BASE 0x7FFFE41588ull
+#define KERNEL_TENSOR_7_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC7_EML_TPC_CFG_BASE 0x7FFFE415C0ull
+#define KERNEL_TENSOR_8_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC7_EML_TPC_CFG_BASE 0x7FFFE415F8ull
+#define KERNEL_TENSOR_9_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC7_EML_TPC_CFG_BASE 0x7FFFE41630ull
+#define KERNEL_TENSOR_10_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC7_EML_TPC_CFG_BASE 0x7FFFE41668ull
+#define KERNEL_TENSOR_11_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC7_EML_TPC_CFG_BASE 0x7FFFE416A0ull
+#define KERNEL_TENSOR_12_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC7_EML_TPC_CFG_BASE 0x7FFFE416D8ull
+#define KERNEL_TENSOR_13_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC7_EML_TPC_CFG_BASE 0x7FFFE41710ull
+#define KERNEL_TENSOR_14_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC7_EML_TPC_CFG_BASE 0x7FFFE41748ull
+#define KERNEL_TENSOR_15_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC7_EML_TPC_CFG_BASE 0x7FFFE41780ull
+#define KERNEL_SYNC_OBJECT_TPC7_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC7_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC7_EML_TPC_CFG_BASE 0x7FFFE41788ull
+#define KERNEL_TPC7_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC7_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC7_EML_TPC_CFG_BASE 0x7FFFE41A00ull
+#define QM_TENSOR_0_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC7_EML_TPC_CFG_BASE 0x7FFFE41A38ull
+#define QM_TENSOR_1_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC7_EML_TPC_CFG_BASE 0x7FFFE41A70ull
+#define QM_TENSOR_2_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC7_EML_TPC_CFG_BASE 0x7FFFE41AA8ull
+#define QM_TENSOR_3_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC7_EML_TPC_CFG_BASE 0x7FFFE41AE0ull
+#define QM_TENSOR_4_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC7_EML_TPC_CFG_BASE 0x7FFFE41B18ull
+#define QM_TENSOR_5_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC7_EML_TPC_CFG_BASE 0x7FFFE41B50ull
+#define QM_TENSOR_6_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC7_EML_TPC_CFG_BASE 0x7FFFE41B88ull
+#define QM_TENSOR_7_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC7_EML_TPC_CFG_BASE 0x7FFFE41BC0ull
+#define QM_TENSOR_8_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC7_EML_TPC_CFG_BASE 0x7FFFE41BF8ull
+#define QM_TENSOR_9_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC7_EML_TPC_CFG_BASE 0x7FFFE41C30ull
+#define QM_TENSOR_10_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC7_EML_TPC_CFG_BASE 0x7FFFE41C68ull
+#define QM_TENSOR_11_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC7_EML_TPC_CFG_BASE 0x7FFFE41CA0ull
+#define QM_TENSOR_12_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC7_EML_TPC_CFG_BASE 0x7FFFE41CD8ull
+#define QM_TENSOR_13_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D10ull
+#define QM_TENSOR_14_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D48ull
+#define QM_TENSOR_15_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D80ull
+#define QM_SYNC_OBJECT_TPC7_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC7_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D88ull
+#define QM_TPC7_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC7_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC7_EML_TPC_QM_BASE 0x7FFFE42000ull
+#define TPC7_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC7_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC7_EML_CS_BASE 0x7FFFFFF000ull
+#define TPC7_EML_CS_MAX_OFFSET 0x1000
+
+#endif /* GAUDI_BLOCKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
new file mode 100644
index 000000000000..85e3b5148595
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef ASIC_REG_GAUDI_REGS_H_
+#define ASIC_REG_GAUDI_REGS_H_
+
+#include "gaudi_blocks.h"
+#include "psoc_global_conf_regs.h"
+#include "psoc_timestamp_regs.h"
+#include "cpu_if_regs.h"
+#include "mmu_up_regs.h"
+#include "stlb_regs.h"
+#include "dma0_qm_regs.h"
+#include "dma1_qm_regs.h"
+#include "dma2_qm_regs.h"
+#include "dma3_qm_regs.h"
+#include "dma4_qm_regs.h"
+#include "dma5_qm_regs.h"
+#include "dma6_qm_regs.h"
+#include "dma7_qm_regs.h"
+#include "dma0_core_regs.h"
+#include "dma1_core_regs.h"
+#include "dma2_core_regs.h"
+#include "dma3_core_regs.h"
+#include "dma4_core_regs.h"
+#include "dma5_core_regs.h"
+#include "dma6_core_regs.h"
+#include "dma7_core_regs.h"
+#include "mme0_ctrl_regs.h"
+#include "mme1_ctrl_regs.h"
+#include "mme2_ctrl_regs.h"
+#include "mme3_ctrl_regs.h"
+#include "mme0_qm_regs.h"
+#include "mme2_qm_regs.h"
+#include "tpc0_cfg_regs.h"
+#include "tpc1_cfg_regs.h"
+#include "tpc2_cfg_regs.h"
+#include "tpc3_cfg_regs.h"
+#include "tpc4_cfg_regs.h"
+#include "tpc5_cfg_regs.h"
+#include "tpc6_cfg_regs.h"
+#include "tpc7_cfg_regs.h"
+#include "tpc0_qm_regs.h"
+#include "tpc1_qm_regs.h"
+#include "tpc2_qm_regs.h"
+#include "tpc3_qm_regs.h"
+#include "tpc4_qm_regs.h"
+#include "tpc5_qm_regs.h"
+#include "tpc6_qm_regs.h"
+#include "tpc7_qm_regs.h"
+#include "dma_if_e_n_down_ch0_regs.h"
+#include "dma_if_e_n_down_ch1_regs.h"
+#include "dma_if_e_s_down_ch0_regs.h"
+#include "dma_if_e_s_down_ch1_regs.h"
+#include "dma_if_w_n_down_ch0_regs.h"
+#include "dma_if_w_n_down_ch1_regs.h"
+#include "dma_if_w_s_down_ch0_regs.h"
+#include "dma_if_w_s_down_ch1_regs.h"
+#include "dma_if_e_n_regs.h"
+#include "dma_if_e_s_regs.h"
+#include "dma_if_w_n_regs.h"
+#include "dma_if_w_s_regs.h"
+#include "nif_rtr_ctrl_0_regs.h"
+#include "nif_rtr_ctrl_1_regs.h"
+#include "nif_rtr_ctrl_2_regs.h"
+#include "nif_rtr_ctrl_3_regs.h"
+#include "nif_rtr_ctrl_4_regs.h"
+#include "nif_rtr_ctrl_5_regs.h"
+#include "nif_rtr_ctrl_6_regs.h"
+#include "nif_rtr_ctrl_7_regs.h"
+#include "sif_rtr_ctrl_0_regs.h"
+#include "sif_rtr_ctrl_1_regs.h"
+#include "sif_rtr_ctrl_2_regs.h"
+#include "sif_rtr_ctrl_3_regs.h"
+#include "sif_rtr_ctrl_4_regs.h"
+#include "sif_rtr_ctrl_5_regs.h"
+#include "sif_rtr_ctrl_6_regs.h"
+#include "sif_rtr_ctrl_7_regs.h"
+#include "psoc_etr_regs.h"
+
+#include "dma0_qm_masks.h"
+#include "mme0_qm_masks.h"
+#include "tpc0_qm_masks.h"
+#include "dma0_core_masks.h"
+#include "tpc0_cfg_masks.h"
+#include "psoc_global_conf_masks.h"
+
+#include "psoc_pci_pll_regs.h"
+#include "psoc_hbm_pll_regs.h"
+
+#define GAUDI_ECC_MEM_SEL_OFFSET 0xF18
+#define GAUDI_ECC_ADDRESS_OFFSET 0xF1C
+#define GAUDI_ECC_SYNDROME_OFFSET 0xF20
+#define GAUDI_ECC_SERR0_OFFSET 0xF30
+#define GAUDI_ECC_SERR1_OFFSET 0xF34
+#define GAUDI_ECC_SERR2_OFFSET 0xF38
+#define GAUDI_ECC_SERR3_OFFSET 0xF3C
+#define GAUDI_ECC_DERR0_OFFSET 0xF40
+#define GAUDI_ECC_DERR1_OFFSET 0xF44
+#define GAUDI_ECC_DERR2_OFFSET 0xF48
+#define GAUDI_ECC_DERR3_OFFSET 0xF4C
+
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 0x492000
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x494000
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 0x494800
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 0x495000
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 0x495800
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 0x496000
+#define mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4B2000
+#define mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 0x4B6000
+#define mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4D2000
+#define mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 0x4D6000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4F2000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 0x4F2004
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 0x4F3FFC
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x4F4000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 0x4F6000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 0x4F67FC
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW 0x300400
+#define mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AW 0x310400
+#define mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AW 0x320400
+#define mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AW 0x330400
+#define mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AW 0x340400
+#define mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AW 0x350400
+#define mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AW 0x360400
+#define mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AW 0x370400
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR 0x300490
+#define mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AR 0x310490
+#define mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AR 0x320490
+#define mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AR 0x330490
+#define mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AR 0x340490
+#define mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AR 0x350490
+#define mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AR 0x360490
+#define mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AR 0x370490
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0 0x300410
+#define mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0 0x310410
+#define mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0 0x320410
+#define mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0 0x330410
+#define mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0 0x340410
+#define mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0 0x350410
+#define mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0 0x360410
+#define mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0 0x370410
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0 0x300450
+#define mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0 0x310450
+#define mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0 0x320450
+#define mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0 0x330450
+#define mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0 0x340450
+#define mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0 0x350450
+#define mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0 0x360450
+#define mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0 0x370450
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0 0x3004A0
+#define mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0 0x3104A0
+#define mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0 0x3204A0
+#define mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0 0x3304A0
+#define mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0 0x3404A0
+#define mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0 0x3504A0
+#define mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0 0x3604A0
+#define mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0 0x3704A0
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0 0x3004E0
+#define mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0 0x3104E0
+#define mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0 0x3204E0
+#define mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0 0x3304E0
+#define mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0 0x3404E0
+#define mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0 0x3504E0
+#define mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0 0x3604E0
+#define mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0 0x3704E0
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AW 0x380400
+#define mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AW 0x390400
+#define mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AW 0x3A0400
+#define mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AW 0x3B0400
+#define mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AW 0x3C0400
+#define mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AW 0x3D0400
+#define mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AW 0x3E0400
+#define mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW 0x3F0400
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AR 0x380490
+#define mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AR 0x390490
+#define mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AR 0x3A0490
+#define mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AR 0x3B0490
+#define mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AR 0x3C0490
+#define mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AR 0x3D0490
+#define mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AR 0x3E0490
+#define mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR 0x3F0490
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0 0x380410
+#define mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0 0x390410
+#define mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0 0x3A0410
+#define mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0 0x3B0410
+#define mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0 0x3C0410
+#define mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0 0x3D0410
+#define mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0 0x3E0410
+#define mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0 0x3F0410
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0 0x380450
+#define mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0 0x390450
+#define mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0 0x3A0450
+#define mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0 0x3B0450
+#define mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0 0x3C0450
+#define mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0 0x3D0450
+#define mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0 0x3E0450
+#define mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0 0x3F0450
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0 0x3804A0
+#define mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0 0x3904A0
+#define mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0 0x3A04A0
+#define mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0 0x3B04A0
+#define mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0 0x3C04A0
+#define mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0 0x3D04A0
+#define mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0 0x3E04A0
+#define mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0 0x3F04A0
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0 0x3804E0
+#define mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0 0x3904E0
+#define mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0 0x3A04E0
+#define mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0 0x3B04E0
+#define mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0 0x3C04E0
+#define mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0 0x3D04E0
+#define mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0 0x3E04E0
+#define mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0 0x3F04E0
+
+#define mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_0 0x489030
+#define mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_1 0x489034
+
+#define mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_0 0x4A9030
+#define mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_1 0x4A9034
+
+#define mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_0 0x4C9030
+#define mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_1 0x4C9034
+
+#define mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_0 0x4E9030
+#define mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_1 0x4E9034
+
+#define mmMME1_QM_GLBL_CFG0 0xE8000
+#define mmMME1_QM_GLBL_STS0 0xE8038
+
+#define mmMME0_SBAB_SB_STALL 0x4002C
+#define mmMME0_SBAB_ARUSER0 0x40034
+#define mmMME0_SBAB_ARUSER1 0x40038
+#define mmMME0_SBAB_PROT 0x40050
+
+#define mmMME1_SBAB_SB_STALL 0xC002C
+#define mmMME1_SBAB_ARUSER0 0xC0034
+#define mmMME1_SBAB_ARUSER1 0xC0038
+#define mmMME1_SBAB_PROT 0xC0050
+
+#define mmMME2_SBAB_SB_STALL 0x14002C
+#define mmMME2_SBAB_ARUSER0 0x140034
+#define mmMME2_SBAB_ARUSER1 0x140038
+#define mmMME2_SBAB_PROT 0x140050
+
+#define mmMME3_SBAB_SB_STALL 0x1C002C
+#define mmMME3_SBAB_ARUSER0 0x1C0034
+#define mmMME3_SBAB_ARUSER1 0x1C0038
+#define mmMME3_SBAB_PROT 0x1C0050
+
+#define mmMME0_ACC_ACC_STALL 0x20028
+#define mmMME0_ACC_WBC 0x20038
+#define mmMME0_ACC_PROT 0x20050
+
+#define mmMME1_ACC_ACC_STALL 0xA0028
+#define mmMME1_ACC_WBC 0xA0038
+#define mmMME1_ACC_PROT 0xA0050
+
+#define mmMME2_ACC_ACC_STALL 0x120028
+#define mmMME2_ACC_WBC 0x120038
+#define mmMME2_ACC_PROT 0x120050
+
+#define mmMME3_ACC_ACC_STALL 0x1A0028
+#define mmMME3_ACC_WBC 0x1A0038
+#define mmMME3_ACC_PROT 0x1A0050
+
+#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x800040
+
+#define mmPSOC_EFUSE_READ 0xC4A000
+#define mmPSOC_EFUSE_DATA_0 0xC4A080
+
+#define mmPCIE_WRAP_MAX_OUTSTAND 0xC01B20
+#define mmPCIE_WRAP_LBW_PROT_OVR 0xC01B48
+#define mmPCIE_WRAP_HBW_DRAIN_CFG 0xC01D54
+#define mmPCIE_WRAP_LBW_DRAIN_CFG 0xC01D5C
+
+#define mmPCIE_MSI_INTR_0 0xC13000
+
+#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000
+
+#define mmPCIE_AUX_DBI 0xC07490
+
+#endif /* ASIC_REG_GAUDI_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
new file mode 100644
index 000000000000..083d073a0128
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
@@ -0,0 +1,1456 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME0_CTRL_REGS_H_
+#define ASIC_REG_MME0_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * MME0_CTRL (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME0_CTRL_ARCH_STATUS 0x60000
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_HIGH_S 0x60008
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_HIGH_L 0x6000C
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_HIGH_O 0x60010
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_LOW_S 0x60014
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_LOW_L 0x60018
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_LOW_O 0x6001C
+
+#define mmMME0_CTRL_ARCH_HEADER_LOW 0x60020
+
+#define mmMME0_CTRL_ARCH_HEADER_HIGH 0x60024
+
+#define mmMME0_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0x60028
+
+#define mmMME0_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0x6002C
+
+#define mmMME0_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0x60030
+
+#define mmMME0_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0x60034
+
+#define mmMME0_CTRL_ARCH_OUTER_LOOP 0x60038
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0x6003C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0x60040
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0x60044
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0x60048
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0x6004C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0x60050
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0x60054
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0x60058
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0x6005C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0x60060
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0x60064
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0x60068
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0x6006C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0x60070
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0x60074
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0x60078
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0x6007C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0x60080
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60084
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0x60088
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0x6008C
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0x60090
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0x60094
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0x60098
+
+#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_0 0x6009C
+
+#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_1 0x600A0
+
+#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_2 0x600A4
+
+#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_3 0x600A8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0x600AC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0x600B0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0x600B4
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0x600B8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0x600BC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0x600C0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0x600C4
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0x600C8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0x600CC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0x600D0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0x600D4
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0x600D8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0x600DC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0x600E0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0x600E4
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0x600E8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0x600EC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0x600F0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x600F4
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x600F8
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x600FC
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60100
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60104
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60108
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0x6010C
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0x60110
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0x60114
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0x60118
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x6011C
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60120
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60124
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60128
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x6012C
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0x60130
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0x60134
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0x60138
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0x6013C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0x60140
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0x60144
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0x60148
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0x6014C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0x60150
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0x60154
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0x60158
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0x6015C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0x60160
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0x60164
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0x60168
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0x6016C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0x60170
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0x60174
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0x60178
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0x6017C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0x60180
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0x60184
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60188
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x6018C
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60190
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60194
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60198
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x6019C
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0x601A0
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0x601A4
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0x601A8
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0x601AC
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x601B0
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x601B4
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x601B8
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x601BC
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x601C0
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0x601C4
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0x601C8
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0x601CC
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0x601D0
+
+#define mmMME0_CTRL_ARCH_DESC_SB_REPEAT 0x601D4
+
+#define mmMME0_CTRL_ARCH_DESC_RATE_LIMITER 0x601D8
+
+#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x601DC
+
+#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x601E0
+
+#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0x601E4
+
+#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0x601E8
+
+#define mmMME0_CTRL_ARCH_DESC_AXI_USER_DATA 0x601EC
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_S 0x601F0
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0x601F4
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0x601F8
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0x601FC
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0x60200
+
+#define mmMME0_CTRL_ARCH_DESC_PADDING_VALUE_S 0x60204
+
+#define mmMME0_CTRL_ARCH_DESC_PADDING_VALUE_L 0x60208
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_S 0x6020C
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0x60210
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0x60214
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0x60218
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0x6021C
+
+#define mmMME0_CTRL_ARCH_DESC_PCU_RL_SATURATION 0x60220
+
+#define mmMME0_CTRL_ARCH_DESC_DUMMY 0x60224
+
+#define mmMME0_CTRL_CMD 0x60280
+
+#define mmMME0_CTRL_STATUS1 0x60284
+
+#define mmMME0_CTRL_RESET 0x60288
+
+#define mmMME0_CTRL_QM_STALL 0x6028C
+
+#define mmMME0_CTRL_SYNC_OBJECT_FIFO_TH 0x60290
+
+#define mmMME0_CTRL_EUS_ROLLUP_CNT_ADD 0x60294
+
+#define mmMME0_CTRL_INTR_CAUSE 0x60298
+
+#define mmMME0_CTRL_INTR_MASK 0x6029C
+
+#define mmMME0_CTRL_LOG_SHADOW 0x602A0
+
+#define mmMME0_CTRL_PCU_RL_DESC0 0x602A4
+
+#define mmMME0_CTRL_PCU_RL_TOKEN_UPDATE 0x602A8
+
+#define mmMME0_CTRL_PCU_RL_TH 0x602AC
+
+#define mmMME0_CTRL_PCU_RL_MIN 0x602B0
+
+#define mmMME0_CTRL_PCU_RL_CTRL_EN 0x602B4
+
+#define mmMME0_CTRL_PCU_RL_HISTORY_LOG_SIZE 0x602B8
+
+#define mmMME0_CTRL_PCU_DUMMY_A_BF16 0x602BC
+
+#define mmMME0_CTRL_PCU_DUMMY_B_BF16 0x602C0
+
+#define mmMME0_CTRL_PCU_DUMMY_A_FP32_ODD 0x602C4
+
+#define mmMME0_CTRL_PCU_DUMMY_A_FP32_EVEN 0x602C8
+
+#define mmMME0_CTRL_PCU_DUMMY_B_FP32_ODD 0x602CC
+
+#define mmMME0_CTRL_PCU_DUMMY_B_FP32_EVEN 0x602D0
+
+#define mmMME0_CTRL_PROT 0x602D4
+
+#define mmMME0_CTRL_EU_POWER_SAVE_DISABLE 0x602D8
+
+#define mmMME0_CTRL_CS_DBG_BLOCK_ID 0x602DC
+
+#define mmMME0_CTRL_CS_DBG_STATUS_DROP_CNT 0x602E0
+
+#define mmMME0_CTRL_TE_CLOSE_CGATE 0x602E4
+
+#define mmMME0_CTRL_AGU_SM_INFLIGHT_CNTR 0x602E8
+
+#define mmMME0_CTRL_AGU_SM_TOTAL_CNTR 0x602EC
+
+#define mmMME0_CTRL_EZSYNC_OUT_CREDIT 0x602F0
+
+#define mmMME0_CTRL_PCU_RL_SAT_SEC 0x602F4
+
+#define mmMME0_CTRL_AGU_SYNC_MSG_AXI_USER 0x602F8
+
+#define mmMME0_CTRL_QM_SLV_LBW_CLK_EN 0x602FC
+
+#define mmMME0_CTRL_SHADOW_0_STATUS 0x60400
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0x60408
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0x6040C
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0x60410
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0x60414
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0x60418
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0x6041C
+
+#define mmMME0_CTRL_SHADOW_0_HEADER_LOW 0x60420
+
+#define mmMME0_CTRL_SHADOW_0_HEADER_HIGH 0x60424
+
+#define mmMME0_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0x60428
+
+#define mmMME0_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0x6042C
+
+#define mmMME0_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0x60430
+
+#define mmMME0_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0x60434
+
+#define mmMME0_CTRL_SHADOW_0_OUTER_LOOP 0x60438
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0x6043C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0x60440
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0x60444
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0x60448
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0x6044C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0x60450
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0x60454
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0x60458
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0x6045C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0x60460
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0x60464
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0x60468
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0x6046C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0x60470
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0x60474
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0x60478
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0x6047C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0x60480
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60484
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0x60488
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0x6048C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0x60490
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0x60494
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0x60498
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0x6049C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0x604A0
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0x604A4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0x604A8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0x604AC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0x604B0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0x604B4
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0x604B8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0x604BC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0x604C0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0x604C4
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0x604C8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0x604CC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0x604D0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0x604D4
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0x604D8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0x604DC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0x604E0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0x604E4
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0x604E8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0x604EC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0x604F0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x604F4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x604F8
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x604FC
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60500
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60504
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60508
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0x6050C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0x60510
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0x60514
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0x60518
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x6051C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60520
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60524
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60528
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x6052C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0x60530
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0x60534
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0x60538
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0x6053C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0x60540
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0x60544
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0x60548
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0x6054C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0x60550
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0x60554
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0x60558
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0x6055C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0x60560
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0x60564
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0x60568
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0x6056C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0x60570
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0x60574
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0x60578
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0x6057C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0x60580
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0x60584
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60588
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x6058C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60590
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60594
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60598
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x6059C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0x605A0
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0x605A4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0x605A8
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0x605AC
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x605B0
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x605B4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x605B8
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x605BC
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x605C0
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0x605C4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0x605C8
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0x605CC
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0x605D0
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SB_REPEAT 0x605D4
+
+#define mmMME0_CTRL_SHADOW_0_DESC_RATE_LIMITER 0x605D8
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x605DC
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x605E0
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0x605E4
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0x605E8
+
+#define mmMME0_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0x605EC
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_S 0x605F0
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0x605F4
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0x605F8
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0x605FC
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0x60600
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0x60604
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0x60608
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0x6060C
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0x60610
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0x60614
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0x60618
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0x6061C
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0x60620
+
+#define mmMME0_CTRL_SHADOW_0_DESC_DUMMY 0x60624
+
+#define mmMME0_CTRL_SHADOW_1_STATUS 0x60680
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0x60688
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0x6068C
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0x60690
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0x60694
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0x60698
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0x6069C
+
+#define mmMME0_CTRL_SHADOW_1_HEADER_LOW 0x606A0
+
+#define mmMME0_CTRL_SHADOW_1_HEADER_HIGH 0x606A4
+
+#define mmMME0_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0x606A8
+
+#define mmMME0_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0x606AC
+
+#define mmMME0_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0x606B0
+
+#define mmMME0_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0x606B4
+
+#define mmMME0_CTRL_SHADOW_1_OUTER_LOOP 0x606B8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0x606BC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0x606C0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0x606C4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0x606C8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0x606CC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0x606D0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0x606D4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0x606D8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0x606DC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0x606E0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0x606E4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0x606E8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0x606EC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0x606F0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0x606F4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0x606F8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0x606FC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0x60700
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60704
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0x60708
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0x6070C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0x60710
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0x60714
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0x60718
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0x6071C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0x60720
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0x60724
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0x60728
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0x6072C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0x60730
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0x60734
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0x60738
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0x6073C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0x60740
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0x60744
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0x60748
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0x6074C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0x60750
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0x60754
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0x60758
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0x6075C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0x60760
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0x60764
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0x60768
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0x6076C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0x60770
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x60774
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x60778
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x6077C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60780
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60784
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60788
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0x6078C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0x60790
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0x60794
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0x60798
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x6079C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x607A0
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x607A4
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x607A8
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x607AC
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0x607B0
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0x607B4
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0x607B8
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0x607BC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0x607C0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0x607C4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0x607C8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0x607CC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0x607D0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0x607D4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0x607D8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0x607DC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0x607E0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0x607E4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0x607E8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0x607EC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0x607F0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0x607F4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0x607F8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0x607FC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0x60800
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0x60804
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60808
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x6080C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60810
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60814
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60818
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x6081C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0x60820
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0x60824
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0x60828
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0x6082C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x60830
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x60834
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x60838
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x6083C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x60840
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0x60844
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0x60848
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0x6084C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0x60850
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SB_REPEAT 0x60854
+
+#define mmMME0_CTRL_SHADOW_1_DESC_RATE_LIMITER 0x60858
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x6085C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x60860
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0x60864
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0x60868
+
+#define mmMME0_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0x6086C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_S 0x60870
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0x60874
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0x60878
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0x6087C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0x60880
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0x60884
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0x60888
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0x6088C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0x60890
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0x60894
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0x60898
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0x6089C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0x608A0
+
+#define mmMME0_CTRL_SHADOW_1_DESC_DUMMY 0x608A4
+
+#define mmMME0_CTRL_SHADOW_2_STATUS 0x60900
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0x60908
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0x6090C
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0x60910
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0x60914
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0x60918
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0x6091C
+
+#define mmMME0_CTRL_SHADOW_2_HEADER_LOW 0x60920
+
+#define mmMME0_CTRL_SHADOW_2_HEADER_HIGH 0x60924
+
+#define mmMME0_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0x60928
+
+#define mmMME0_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0x6092C
+
+#define mmMME0_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0x60930
+
+#define mmMME0_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0x60934
+
+#define mmMME0_CTRL_SHADOW_2_OUTER_LOOP 0x60938
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0x6093C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0x60940
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0x60944
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0x60948
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0x6094C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0x60950
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0x60954
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0x60958
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0x6095C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0x60960
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0x60964
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0x60968
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0x6096C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0x60970
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0x60974
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0x60978
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0x6097C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0x60980
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60984
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0x60988
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0x6098C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0x60990
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0x60994
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0x60998
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0x6099C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0x609A0
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0x609A4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0x609A8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0x609AC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0x609B0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0x609B4
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0x609B8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0x609BC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0x609C0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0x609C4
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0x609C8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0x609CC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0x609D0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0x609D4
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0x609D8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0x609DC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0x609E0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0x609E4
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0x609E8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0x609EC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0x609F0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x609F4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x609F8
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x609FC
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60A00
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60A04
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60A08
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0x60A0C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0x60A10
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0x60A14
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0x60A18
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x60A1C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60A20
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60A24
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60A28
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x60A2C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0x60A30
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0x60A34
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0x60A38
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0x60A3C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0x60A40
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0x60A44
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0x60A48
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0x60A4C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0x60A50
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0x60A54
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0x60A58
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0x60A5C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0x60A60
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0x60A64
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0x60A68
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0x60A6C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0x60A70
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0x60A74
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0x60A78
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0x60A7C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0x60A80
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0x60A84
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60A88
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x60A8C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60A90
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60A94
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60A98
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x60A9C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0x60AA0
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0x60AA4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0x60AA8
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0x60AAC
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x60AB0
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x60AB4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x60AB8
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x60ABC
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x60AC0
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0x60AC4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0x60AC8
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0x60ACC
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0x60AD0
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SB_REPEAT 0x60AD4
+
+#define mmMME0_CTRL_SHADOW_2_DESC_RATE_LIMITER 0x60AD8
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x60ADC
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x60AE0
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0x60AE4
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0x60AE8
+
+#define mmMME0_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0x60AEC
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_S 0x60AF0
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0x60AF4
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0x60AF8
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0x60AFC
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0x60B00
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0x60B04
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0x60B08
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0x60B0C
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0x60B10
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0x60B14
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0x60B18
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0x60B1C
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0x60B20
+
+#define mmMME0_CTRL_SHADOW_2_DESC_DUMMY 0x60B24
+
+#define mmMME0_CTRL_SHADOW_3_STATUS 0x60B80
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0x60B88
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0x60B8C
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0x60B90
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0x60B94
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0x60B98
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0x60B9C
+
+#define mmMME0_CTRL_SHADOW_3_HEADER_LOW 0x60BA0
+
+#define mmMME0_CTRL_SHADOW_3_HEADER_HIGH 0x60BA4
+
+#define mmMME0_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0x60BA8
+
+#define mmMME0_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0x60BAC
+
+#define mmMME0_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0x60BB0
+
+#define mmMME0_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0x60BB4
+
+#define mmMME0_CTRL_SHADOW_3_OUTER_LOOP 0x60BB8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0x60BBC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0x60BC0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0x60BC4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0x60BC8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0x60BCC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0x60BD0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0x60BD4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0x60BD8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0x60BDC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0x60BE0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0x60BE4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0x60BE8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0x60BEC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0x60BF0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0x60BF4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0x60BF8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0x60BFC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0x60C00
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60C04
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0x60C08
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0x60C0C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0x60C10
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0x60C14
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0x60C18
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0x60C1C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0x60C20
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0x60C24
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0x60C28
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0x60C2C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0x60C30
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0x60C34
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0x60C38
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0x60C3C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0x60C40
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0x60C44
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0x60C48
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0x60C4C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0x60C50
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0x60C54
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0x60C58
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0x60C5C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0x60C60
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0x60C64
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0x60C68
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0x60C6C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0x60C70
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x60C74
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x60C78
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x60C7C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60C80
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60C84
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60C88
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0x60C8C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0x60C90
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0x60C94
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0x60C98
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x60C9C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60CA0
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60CA4
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60CA8
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x60CAC
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0x60CB0
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0x60CB4
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0x60CB8
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0x60CBC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0x60CC0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0x60CC4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0x60CC8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0x60CCC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0x60CD0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0x60CD4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0x60CD8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0x60CDC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0x60CE0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0x60CE4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0x60CE8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0x60CEC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0x60CF0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0x60CF4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0x60CF8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0x60CFC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0x60D00
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0x60D04
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60D08
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x60D0C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60D10
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60D14
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60D18
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x60D1C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0x60D20
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0x60D24
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0x60D28
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0x60D2C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x60D30
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x60D34
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x60D38
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x60D3C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x60D40
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0x60D44
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0x60D48
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0x60D4C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0x60D50
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SB_REPEAT 0x60D54
+
+#define mmMME0_CTRL_SHADOW_3_DESC_RATE_LIMITER 0x60D58
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x60D5C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x60D60
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0x60D64
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0x60D68
+
+#define mmMME0_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0x60D6C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_S 0x60D70
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0x60D74
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0x60D78
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0x60D7C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0x60D80
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0x60D84
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0x60D88
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0x60D8C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0x60D90
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0x60D94
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0x60D98
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0x60D9C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0x60DA0
+
+#define mmMME0_CTRL_SHADOW_3_DESC_DUMMY 0x60DA4
+
+#endif /* ASIC_REG_MME0_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
new file mode 100644
index 000000000000..e6dd30ce0ca7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
@@ -0,0 +1,800 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME0_QM_MASKS_H_
+#define ASIC_REG_MME0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * MME0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* MME0_QM_GLBL_CFG0 */
+#define MME0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define MME0_QM_GLBL_CFG0_PQF_EN_MASK 0xF
+#define MME0_QM_GLBL_CFG0_CQF_EN_SHIFT 4
+#define MME0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0
+#define MME0_QM_GLBL_CFG0_CP_EN_SHIFT 9
+#define MME0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00
+
+/* MME0_QM_GLBL_CFG1 */
+#define MME0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define MME0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF
+#define MME0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4
+#define MME0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0
+#define MME0_QM_GLBL_CFG1_CP_STOP_SHIFT 9
+#define MME0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00
+#define MME0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16
+#define MME0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000
+#define MME0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20
+#define MME0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000
+#define MME0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25
+#define MME0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000
+
+/* MME0_QM_GLBL_PROT */
+#define MME0_QM_GLBL_PROT_PQF_SHIFT 0
+#define MME0_QM_GLBL_PROT_PQF_MASK 0xF
+#define MME0_QM_GLBL_PROT_CQF_SHIFT 4
+#define MME0_QM_GLBL_PROT_CQF_MASK 0x1F0
+#define MME0_QM_GLBL_PROT_CP_SHIFT 9
+#define MME0_QM_GLBL_PROT_CP_MASK 0x3E00
+#define MME0_QM_GLBL_PROT_ERR_SHIFT 14
+#define MME0_QM_GLBL_PROT_ERR_MASK 0x4000
+#define MME0_QM_GLBL_PROT_ARB_SHIFT 15
+#define MME0_QM_GLBL_PROT_ARB_MASK 0x8000
+
+/* MME0_QM_GLBL_ERR_CFG */
+#define MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0
+#define MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF
+#define MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0
+#define MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9
+#define MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00
+#define MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16
+#define MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000
+#define MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20
+#define MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000
+#define MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25
+#define MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000
+#define MME0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31
+#define MME0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000
+
+/* MME0_QM_GLBL_SECURE_PROPS */
+#define MME0_QM_GLBL_SECURE_PROPS_0_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_1_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_2_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_3_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_4_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400
+#define MME0_QM_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400
+#define MME0_QM_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400
+#define MME0_QM_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400
+#define MME0_QM_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* MME0_QM_GLBL_NON_SECURE_PROPS */
+#define MME0_QM_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400
+#define MME0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400
+#define MME0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400
+#define MME0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400
+#define MME0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* MME0_QM_GLBL_STS0 */
+#define MME0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define MME0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF
+#define MME0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4
+#define MME0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0
+#define MME0_QM_GLBL_STS0_CP_IDLE_SHIFT 9
+#define MME0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00
+#define MME0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16
+#define MME0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000
+#define MME0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20
+#define MME0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000
+#define MME0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25
+#define MME0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000
+#define MME0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31
+#define MME0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000
+
+/* MME0_QM_GLBL_STS1 */
+#define MME0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define MME0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define MME0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define MME0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define MME0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define MME0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define MME0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define MME0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define MME0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define MME0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define MME0_QM_GLBL_STS1_CP_WREG_ERR_SHIFT 6
+#define MME0_QM_GLBL_STS1_CP_WREG_ERR_MASK 0x40
+#define MME0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8
+#define MME0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100
+#define MME0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9
+#define MME0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200
+#define MME0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10
+#define MME0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400
+#define MME0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11
+#define MME0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800
+#define MME0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12
+#define MME0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define MME0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13
+#define MME0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define MME0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14
+#define MME0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define MME0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15
+#define MME0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* MME0_QM_GLBL_STS1_4 */
+#define MME0_QM_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1
+#define MME0_QM_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2
+#define MME0_QM_GLBL_STS1_4_CP_RD_ERR_SHIFT 2
+#define MME0_QM_GLBL_STS1_4_CP_RD_ERR_MASK 0x4
+#define MME0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME0_QM_GLBL_STS1_4_CP_STOP_OP_SHIFT 4
+#define MME0_QM_GLBL_STS1_4_CP_STOP_OP_MASK 0x10
+#define MME0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5
+#define MME0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20
+#define MME0_QM_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6
+#define MME0_QM_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40
+#define MME0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define MME0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define MME0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define MME0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define MME0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define MME0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define MME0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define MME0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define MME0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define MME0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define MME0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define MME0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define MME0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define MME0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define MME0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define MME0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* MME0_QM_GLBL_MSG_EN */
+#define MME0_QM_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0
+#define MME0_QM_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1
+#define MME0_QM_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1
+#define MME0_QM_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2
+#define MME0_QM_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2
+#define MME0_QM_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4
+#define MME0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME0_QM_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4
+#define MME0_QM_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10
+#define MME0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5
+#define MME0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20
+#define MME0_QM_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6
+#define MME0_QM_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* MME0_QM_GLBL_MSG_EN_4 */
+#define MME0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1
+#define MME0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2
+#define MME0_QM_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2
+#define MME0_QM_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4
+#define MME0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME0_QM_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4
+#define MME0_QM_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10
+#define MME0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5
+#define MME0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20
+#define MME0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6
+#define MME0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* MME0_QM_PQ_BASE_LO */
+#define MME0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define MME0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_BASE_HI */
+#define MME0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define MME0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_SIZE */
+#define MME0_QM_PQ_SIZE_VAL_SHIFT 0
+#define MME0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_PI */
+#define MME0_QM_PQ_PI_VAL_SHIFT 0
+#define MME0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_CI */
+#define MME0_QM_PQ_CI_VAL_SHIFT 0
+#define MME0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_CFG0 */
+#define MME0_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define MME0_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* MME0_QM_PQ_CFG1 */
+#define MME0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME0_QM_PQ_ARUSER_31_11 */
+#define MME0_QM_PQ_ARUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_PQ_STS0 */
+#define MME0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define MME0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define MME0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define MME0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME0_QM_PQ_STS1 */
+#define MME0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define MME0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define MME0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define MME0_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define MME0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* MME0_QM_CQ_CFG0 */
+#define MME0_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define MME0_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* MME0_QM_CQ_CFG1 */
+#define MME0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_ARUSER_31_11 */
+#define MME0_QM_CQ_ARUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_CQ_STS0 */
+#define MME0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define MME0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define MME0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define MME0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_STS1 */
+#define MME0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define MME0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define MME0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define MME0_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define MME0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* MME0_QM_CQ_PTR_LO_0 */
+#define MME0_QM_CQ_PTR_LO_0_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_0 */
+#define MME0_QM_CQ_PTR_HI_0_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_0 */
+#define MME0_QM_CQ_TSIZE_0_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_0 */
+#define MME0_QM_CQ_CTL_0_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_0_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_0_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_0_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_1 */
+#define MME0_QM_CQ_PTR_LO_1_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_1 */
+#define MME0_QM_CQ_PTR_HI_1_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_1 */
+#define MME0_QM_CQ_TSIZE_1_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_1 */
+#define MME0_QM_CQ_CTL_1_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_1_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_1_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_1_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_2 */
+#define MME0_QM_CQ_PTR_LO_2_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_2 */
+#define MME0_QM_CQ_PTR_HI_2_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_2 */
+#define MME0_QM_CQ_TSIZE_2_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_2 */
+#define MME0_QM_CQ_CTL_2_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_2_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_2_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_2_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_3 */
+#define MME0_QM_CQ_PTR_LO_3_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_3 */
+#define MME0_QM_CQ_PTR_HI_3_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_3 */
+#define MME0_QM_CQ_TSIZE_3_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_3 */
+#define MME0_QM_CQ_CTL_3_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_3_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_3_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_3_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_4 */
+#define MME0_QM_CQ_PTR_LO_4_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_4 */
+#define MME0_QM_CQ_PTR_HI_4_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_4 */
+#define MME0_QM_CQ_TSIZE_4_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_4 */
+#define MME0_QM_CQ_CTL_4_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_4_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_4_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_4_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_STS */
+#define MME0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_STS */
+#define MME0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_STS */
+#define MME0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_STS */
+#define MME0_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_IFIFO_CNT */
+#define MME0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define MME0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* MME0_QM_CP_MSG_BASE0_ADDR_LO */
+#define MME0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE0_ADDR_HI */
+#define MME0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE1_ADDR_LO */
+#define MME0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE1_ADDR_HI */
+#define MME0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE2_ADDR_LO */
+#define MME0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE2_ADDR_HI */
+#define MME0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE3_ADDR_LO */
+#define MME0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE3_ADDR_HI */
+#define MME0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_LDMA_TSIZE_OFFSET */
+#define MME0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define MME0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define MME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define MME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_FENCE0_RDATA */
+#define MME0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* MME0_QM_CP_FENCE1_RDATA */
+#define MME0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* MME0_QM_CP_FENCE2_RDATA */
+#define MME0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* MME0_QM_CP_FENCE3_RDATA */
+#define MME0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* MME0_QM_CP_FENCE0_CNT */
+#define MME0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF
+
+/* MME0_QM_CP_FENCE1_CNT */
+#define MME0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF
+
+/* MME0_QM_CP_FENCE2_CNT */
+#define MME0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF
+
+/* MME0_QM_CP_FENCE3_CNT */
+#define MME0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF
+
+/* MME0_QM_CP_STS */
+#define MME0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define MME0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define MME0_QM_CP_STS_ERDY_SHIFT 16
+#define MME0_QM_CP_STS_ERDY_MASK 0x10000
+#define MME0_QM_CP_STS_RRDY_SHIFT 17
+#define MME0_QM_CP_STS_RRDY_MASK 0x20000
+#define MME0_QM_CP_STS_MRDY_SHIFT 18
+#define MME0_QM_CP_STS_MRDY_MASK 0x40000
+#define MME0_QM_CP_STS_SW_STOP_SHIFT 19
+#define MME0_QM_CP_STS_SW_STOP_MASK 0x80000
+#define MME0_QM_CP_STS_FENCE_ID_SHIFT 20
+#define MME0_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define MME0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define MME0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* MME0_QM_CP_CURRENT_INST_LO */
+#define MME0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define MME0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_CURRENT_INST_HI */
+#define MME0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define MME0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_BARRIER_CFG */
+#define MME0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define MME0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+#define MME0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16
+#define MME0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000
+
+/* MME0_QM_CP_DBG_0 */
+#define MME0_QM_CP_DBG_0_CS_SHIFT 0
+#define MME0_QM_CP_DBG_0_CS_MASK 0xF
+#define MME0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4
+#define MME0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10
+#define MME0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5
+#define MME0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20
+#define MME0_QM_CP_DBG_0_MREB_STALL_SHIFT 6
+#define MME0_QM_CP_DBG_0_MREB_STALL_MASK 0x40
+#define MME0_QM_CP_DBG_0_STALL_SHIFT 7
+#define MME0_QM_CP_DBG_0_STALL_MASK 0x80
+
+/* MME0_QM_CP_ARUSER_31_11 */
+#define MME0_QM_CP_ARUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_CP_AWUSER_31_11 */
+#define MME0_QM_CP_AWUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_ARB_CFG_0 */
+#define MME0_QM_ARB_CFG_0_TYPE_SHIFT 0
+#define MME0_QM_ARB_CFG_0_TYPE_MASK 0x1
+#define MME0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4
+#define MME0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10
+#define MME0_QM_ARB_CFG_0_EN_SHIFT 8
+#define MME0_QM_ARB_CFG_0_EN_MASK 0x100
+#define MME0_QM_ARB_CFG_0_MASK_SHIFT 12
+#define MME0_QM_ARB_CFG_0_MASK_MASK 0xF000
+#define MME0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16
+#define MME0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000
+
+/* MME0_QM_ARB_CHOISE_Q_PUSH */
+#define MME0_QM_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0
+#define MME0_QM_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3
+
+/* MME0_QM_ARB_WRR_WEIGHT */
+#define MME0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0
+#define MME0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_CFG_1 */
+#define MME0_QM_ARB_CFG_1_CLR_SHIFT 0
+#define MME0_QM_ARB_CFG_1_CLR_MASK 0x1
+
+/* MME0_QM_ARB_MST_AVAIL_CRED */
+#define MME0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F
+
+/* MME0_QM_ARB_MST_CRED_INC */
+#define MME0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_MST_CHOISE_PUSH_OFST */
+#define MME0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_SLV_MASTER_INC_CRED_OFST */
+#define MME0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0
+#define MME0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_MST_SLAVE_EN */
+#define MME0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_MST_QUIET_PER */
+#define MME0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_SLV_CHOISE_WDT */
+#define MME0_QM_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0
+#define MME0_QM_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_SLV_ID */
+#define MME0_QM_ARB_SLV_ID_VAL_SHIFT 0
+#define MME0_QM_ARB_SLV_ID_VAL_MASK 0x1F
+
+/* MME0_QM_ARB_MSG_MAX_INFLIGHT */
+#define MME0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0
+#define MME0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F
+
+/* MME0_QM_ARB_MSG_AWUSER_31_11 */
+#define MME0_QM_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_ARB_MSG_AWUSER_SEC_PROP */
+#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0
+#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF
+#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10
+#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400
+
+/* MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP */
+#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0
+#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF
+#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10
+#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400
+
+/* MME0_QM_ARB_BASE_LO */
+#define MME0_QM_ARB_BASE_LO_VAL_SHIFT 0
+#define MME0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_BASE_HI */
+#define MME0_QM_ARB_BASE_HI_VAL_SHIFT 0
+#define MME0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_STATE_STS */
+#define MME0_QM_ARB_STATE_STS_VAL_SHIFT 0
+#define MME0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_CHOISE_FULLNESS_STS */
+#define MME0_QM_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0
+#define MME0_QM_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F
+
+/* MME0_QM_ARB_MSG_STS */
+#define MME0_QM_ARB_MSG_STS_FULL_SHIFT 0
+#define MME0_QM_ARB_MSG_STS_FULL_MASK 0x1
+#define MME0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1
+#define MME0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2
+
+/* MME0_QM_ARB_SLV_CHOISE_Q_HEAD */
+#define MME0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0
+#define MME0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3
+
+/* MME0_QM_ARB_ERR_CAUSE */
+#define MME0_QM_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0
+#define MME0_QM_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1
+#define MME0_QM_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1
+#define MME0_QM_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2
+#define MME0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2
+#define MME0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4
+
+/* MME0_QM_ARB_ERR_MSG_EN */
+#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0
+#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
+#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1
+#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
+#define MME0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2
+#define MME0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+/* MME0_QM_ARB_ERR_STS_DRP */
+#define MME0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0
+#define MME0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3
+
+/* MME0_QM_ARB_MST_CRED_STS */
+#define MME0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F
+
+/* MME0_QM_CGM_CFG */
+#define MME0_QM_CGM_CFG_IDLE_TH_SHIFT 0
+#define MME0_QM_CGM_CFG_IDLE_TH_MASK 0xFFF
+#define MME0_QM_CGM_CFG_G2F_TH_SHIFT 16
+#define MME0_QM_CGM_CFG_G2F_TH_MASK 0xFF0000
+#define MME0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT 24
+#define MME0_QM_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000
+#define MME0_QM_CGM_CFG_EN_SHIFT 31
+#define MME0_QM_CGM_CFG_EN_MASK 0x80000000
+
+/* MME0_QM_CGM_STS */
+#define MME0_QM_CGM_STS_ST_SHIFT 0
+#define MME0_QM_CGM_STS_ST_MASK 0x3
+#define MME0_QM_CGM_STS_CG_SHIFT 4
+#define MME0_QM_CGM_STS_CG_MASK 0x10
+#define MME0_QM_CGM_STS_AGENT_IDLE_SHIFT 8
+#define MME0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
+#define MME0_QM_CGM_STS_AXI_IDLE_SHIFT 9
+#define MME0_QM_CGM_STS_AXI_IDLE_MASK 0x200
+#define MME0_QM_CGM_STS_CP_IDLE_SHIFT 10
+#define MME0_QM_CGM_STS_CP_IDLE_MASK 0x400
+
+/* MME0_QM_CGM_CFG1 */
+#define MME0_QM_CGM_CFG1_MASK_TH_SHIFT 0
+#define MME0_QM_CGM_CFG1_MASK_TH_MASK 0xFF
+
+/* MME0_QM_LOCAL_RANGE_BASE */
+#define MME0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0
+#define MME0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF
+
+/* MME0_QM_LOCAL_RANGE_SIZE */
+#define MME0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0
+#define MME0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF
+
+/* MME0_QM_CSMR_STRICT_PRIO_CFG */
+#define MME0_QM_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0
+#define MME0_QM_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1
+
+/* MME0_QM_HBW_RD_RATE_LIM_CFG_1 */
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* MME0_QM_LBW_WR_RATE_LIM_CFG_0 */
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* MME0_QM_LBW_WR_RATE_LIM_CFG_1 */
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* MME0_QM_HBW_RD_RATE_LIM_CFG_0 */
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* MME0_QM_GLBL_AXCACHE */
+#define MME0_QM_GLBL_AXCACHE_AR_SHIFT 0
+#define MME0_QM_GLBL_AXCACHE_AR_MASK 0xF
+#define MME0_QM_GLBL_AXCACHE_AW_SHIFT 16
+#define MME0_QM_GLBL_AXCACHE_AW_MASK 0xF0000
+
+/* MME0_QM_IND_GW_APB_CFG */
+#define MME0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0
+#define MME0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF
+#define MME0_QM_IND_GW_APB_CFG_CMD_SHIFT 31
+#define MME0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000
+
+/* MME0_QM_IND_GW_APB_WDATA */
+#define MME0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0
+#define MME0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_IND_GW_APB_RDATA */
+#define MME0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0
+#define MME0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_IND_GW_APB_STATUS */
+#define MME0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0
+#define MME0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1
+#define MME0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1
+#define MME0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2
+
+/* MME0_QM_GLBL_ERR_ADDR_LO */
+#define MME0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_GLBL_ERR_ADDR_HI */
+#define MME0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_GLBL_ERR_WDATA */
+#define MME0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define MME0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_GLBL_MEM_INIT_BUSY */
+#define MME0_QM_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0
+#define MME0_QM_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF
+
+#endif /* ASIC_REG_MME0_QM_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
new file mode 100644
index 000000000000..4f078b328b00
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME0_QM_REGS_H_
+#define ASIC_REG_MME0_QM_REGS_H_
+
+/*
+ *****************************************
+ * MME0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmMME0_QM_GLBL_CFG0 0x68000
+
+#define mmMME0_QM_GLBL_CFG1 0x68004
+
+#define mmMME0_QM_GLBL_PROT 0x68008
+
+#define mmMME0_QM_GLBL_ERR_CFG 0x6800C
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_0 0x68010
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_1 0x68014
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_2 0x68018
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_3 0x6801C
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_4 0x68020
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_0 0x68024
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_1 0x68028
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_2 0x6802C
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_3 0x68030
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_4 0x68034
+
+#define mmMME0_QM_GLBL_STS0 0x68038
+
+#define mmMME0_QM_GLBL_STS1_0 0x68040
+
+#define mmMME0_QM_GLBL_STS1_1 0x68044
+
+#define mmMME0_QM_GLBL_STS1_2 0x68048
+
+#define mmMME0_QM_GLBL_STS1_3 0x6804C
+
+#define mmMME0_QM_GLBL_STS1_4 0x68050
+
+#define mmMME0_QM_GLBL_MSG_EN_0 0x68054
+
+#define mmMME0_QM_GLBL_MSG_EN_1 0x68058
+
+#define mmMME0_QM_GLBL_MSG_EN_2 0x6805C
+
+#define mmMME0_QM_GLBL_MSG_EN_3 0x68060
+
+#define mmMME0_QM_GLBL_MSG_EN_4 0x68068
+
+#define mmMME0_QM_PQ_BASE_LO_0 0x68070
+
+#define mmMME0_QM_PQ_BASE_LO_1 0x68074
+
+#define mmMME0_QM_PQ_BASE_LO_2 0x68078
+
+#define mmMME0_QM_PQ_BASE_LO_3 0x6807C
+
+#define mmMME0_QM_PQ_BASE_HI_0 0x68080
+
+#define mmMME0_QM_PQ_BASE_HI_1 0x68084
+
+#define mmMME0_QM_PQ_BASE_HI_2 0x68088
+
+#define mmMME0_QM_PQ_BASE_HI_3 0x6808C
+
+#define mmMME0_QM_PQ_SIZE_0 0x68090
+
+#define mmMME0_QM_PQ_SIZE_1 0x68094
+
+#define mmMME0_QM_PQ_SIZE_2 0x68098
+
+#define mmMME0_QM_PQ_SIZE_3 0x6809C
+
+#define mmMME0_QM_PQ_PI_0 0x680A0
+
+#define mmMME0_QM_PQ_PI_1 0x680A4
+
+#define mmMME0_QM_PQ_PI_2 0x680A8
+
+#define mmMME0_QM_PQ_PI_3 0x680AC
+
+#define mmMME0_QM_PQ_CI_0 0x680B0
+
+#define mmMME0_QM_PQ_CI_1 0x680B4
+
+#define mmMME0_QM_PQ_CI_2 0x680B8
+
+#define mmMME0_QM_PQ_CI_3 0x680BC
+
+#define mmMME0_QM_PQ_CFG0_0 0x680C0
+
+#define mmMME0_QM_PQ_CFG0_1 0x680C4
+
+#define mmMME0_QM_PQ_CFG0_2 0x680C8
+
+#define mmMME0_QM_PQ_CFG0_3 0x680CC
+
+#define mmMME0_QM_PQ_CFG1_0 0x680D0
+
+#define mmMME0_QM_PQ_CFG1_1 0x680D4
+
+#define mmMME0_QM_PQ_CFG1_2 0x680D8
+
+#define mmMME0_QM_PQ_CFG1_3 0x680DC
+
+#define mmMME0_QM_PQ_ARUSER_31_11_0 0x680E0
+
+#define mmMME0_QM_PQ_ARUSER_31_11_1 0x680E4
+
+#define mmMME0_QM_PQ_ARUSER_31_11_2 0x680E8
+
+#define mmMME0_QM_PQ_ARUSER_31_11_3 0x680EC
+
+#define mmMME0_QM_PQ_STS0_0 0x680F0
+
+#define mmMME0_QM_PQ_STS0_1 0x680F4
+
+#define mmMME0_QM_PQ_STS0_2 0x680F8
+
+#define mmMME0_QM_PQ_STS0_3 0x680FC
+
+#define mmMME0_QM_PQ_STS1_0 0x68100
+
+#define mmMME0_QM_PQ_STS1_1 0x68104
+
+#define mmMME0_QM_PQ_STS1_2 0x68108
+
+#define mmMME0_QM_PQ_STS1_3 0x6810C
+
+#define mmMME0_QM_CQ_CFG0_0 0x68110
+
+#define mmMME0_QM_CQ_CFG0_1 0x68114
+
+#define mmMME0_QM_CQ_CFG0_2 0x68118
+
+#define mmMME0_QM_CQ_CFG0_3 0x6811C
+
+#define mmMME0_QM_CQ_CFG0_4 0x68120
+
+#define mmMME0_QM_CQ_CFG1_0 0x68124
+
+#define mmMME0_QM_CQ_CFG1_1 0x68128
+
+#define mmMME0_QM_CQ_CFG1_2 0x6812C
+
+#define mmMME0_QM_CQ_CFG1_3 0x68130
+
+#define mmMME0_QM_CQ_CFG1_4 0x68134
+
+#define mmMME0_QM_CQ_ARUSER_31_11_0 0x68138
+
+#define mmMME0_QM_CQ_ARUSER_31_11_1 0x6813C
+
+#define mmMME0_QM_CQ_ARUSER_31_11_2 0x68140
+
+#define mmMME0_QM_CQ_ARUSER_31_11_3 0x68144
+
+#define mmMME0_QM_CQ_ARUSER_31_11_4 0x68148
+
+#define mmMME0_QM_CQ_STS0_0 0x6814C
+
+#define mmMME0_QM_CQ_STS0_1 0x68150
+
+#define mmMME0_QM_CQ_STS0_2 0x68154
+
+#define mmMME0_QM_CQ_STS0_3 0x68158
+
+#define mmMME0_QM_CQ_STS0_4 0x6815C
+
+#define mmMME0_QM_CQ_STS1_0 0x68160
+
+#define mmMME0_QM_CQ_STS1_1 0x68164
+
+#define mmMME0_QM_CQ_STS1_2 0x68168
+
+#define mmMME0_QM_CQ_STS1_3 0x6816C
+
+#define mmMME0_QM_CQ_STS1_4 0x68170
+
+#define mmMME0_QM_CQ_PTR_LO_0 0x68174
+
+#define mmMME0_QM_CQ_PTR_HI_0 0x68178
+
+#define mmMME0_QM_CQ_TSIZE_0 0x6817C
+
+#define mmMME0_QM_CQ_CTL_0 0x68180
+
+#define mmMME0_QM_CQ_PTR_LO_1 0x68184
+
+#define mmMME0_QM_CQ_PTR_HI_1 0x68188
+
+#define mmMME0_QM_CQ_TSIZE_1 0x6818C
+
+#define mmMME0_QM_CQ_CTL_1 0x68190
+
+#define mmMME0_QM_CQ_PTR_LO_2 0x68194
+
+#define mmMME0_QM_CQ_PTR_HI_2 0x68198
+
+#define mmMME0_QM_CQ_TSIZE_2 0x6819C
+
+#define mmMME0_QM_CQ_CTL_2 0x681A0
+
+#define mmMME0_QM_CQ_PTR_LO_3 0x681A4
+
+#define mmMME0_QM_CQ_PTR_HI_3 0x681A8
+
+#define mmMME0_QM_CQ_TSIZE_3 0x681AC
+
+#define mmMME0_QM_CQ_CTL_3 0x681B0
+
+#define mmMME0_QM_CQ_PTR_LO_4 0x681B4
+
+#define mmMME0_QM_CQ_PTR_HI_4 0x681B8
+
+#define mmMME0_QM_CQ_TSIZE_4 0x681BC
+
+#define mmMME0_QM_CQ_CTL_4 0x681C0
+
+#define mmMME0_QM_CQ_PTR_LO_STS_0 0x681C4
+
+#define mmMME0_QM_CQ_PTR_LO_STS_1 0x681C8
+
+#define mmMME0_QM_CQ_PTR_LO_STS_2 0x681CC
+
+#define mmMME0_QM_CQ_PTR_LO_STS_3 0x681D0
+
+#define mmMME0_QM_CQ_PTR_LO_STS_4 0x681D4
+
+#define mmMME0_QM_CQ_PTR_HI_STS_0 0x681D8
+
+#define mmMME0_QM_CQ_PTR_HI_STS_1 0x681DC
+
+#define mmMME0_QM_CQ_PTR_HI_STS_2 0x681E0
+
+#define mmMME0_QM_CQ_PTR_HI_STS_3 0x681E4
+
+#define mmMME0_QM_CQ_PTR_HI_STS_4 0x681E8
+
+#define mmMME0_QM_CQ_TSIZE_STS_0 0x681EC
+
+#define mmMME0_QM_CQ_TSIZE_STS_1 0x681F0
+
+#define mmMME0_QM_CQ_TSIZE_STS_2 0x681F4
+
+#define mmMME0_QM_CQ_TSIZE_STS_3 0x681F8
+
+#define mmMME0_QM_CQ_TSIZE_STS_4 0x681FC
+
+#define mmMME0_QM_CQ_CTL_STS_0 0x68200
+
+#define mmMME0_QM_CQ_CTL_STS_1 0x68204
+
+#define mmMME0_QM_CQ_CTL_STS_2 0x68208
+
+#define mmMME0_QM_CQ_CTL_STS_3 0x6820C
+
+#define mmMME0_QM_CQ_CTL_STS_4 0x68210
+
+#define mmMME0_QM_CQ_IFIFO_CNT_0 0x68214
+
+#define mmMME0_QM_CQ_IFIFO_CNT_1 0x68218
+
+#define mmMME0_QM_CQ_IFIFO_CNT_2 0x6821C
+
+#define mmMME0_QM_CQ_IFIFO_CNT_3 0x68220
+
+#define mmMME0_QM_CQ_IFIFO_CNT_4 0x68224
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 0x68228
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_1 0x6822C
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_2 0x68230
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_3 0x68234
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_4 0x68238
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 0x6823C
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_1 0x68240
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_2 0x68244
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_3 0x68248
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_4 0x6824C
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 0x68250
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_1 0x68254
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_2 0x68258
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_3 0x6825C
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_4 0x68260
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 0x68264
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_1 0x68268
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_2 0x6826C
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_3 0x68270
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_4 0x68274
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_0 0x68278
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_1 0x6827C
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 0x68280
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_3 0x68284
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_4 0x68288
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_0 0x6828C
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_1 0x68290
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_2 0x68294
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_3 0x68298
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_4 0x6829C
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_0 0x682A0
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_1 0x682A4
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_2 0x682A8
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_3 0x682AC
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_4 0x682B0
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_0 0x682B4
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_1 0x682B8
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_2 0x682BC
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_3 0x682C0
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_4 0x682C4
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 0x682C8
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_1 0x682CC
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_2 0x682D0
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_3 0x682D4
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_4 0x682D8
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x682E0
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x682E4
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x682E8
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x682EC
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x682F0
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x682F4
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x682F8
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x682FC
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x68300
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x68304
+
+#define mmMME0_QM_CP_FENCE0_RDATA_0 0x68308
+
+#define mmMME0_QM_CP_FENCE0_RDATA_1 0x6830C
+
+#define mmMME0_QM_CP_FENCE0_RDATA_2 0x68310
+
+#define mmMME0_QM_CP_FENCE0_RDATA_3 0x68314
+
+#define mmMME0_QM_CP_FENCE0_RDATA_4 0x68318
+
+#define mmMME0_QM_CP_FENCE1_RDATA_0 0x6831C
+
+#define mmMME0_QM_CP_FENCE1_RDATA_1 0x68320
+
+#define mmMME0_QM_CP_FENCE1_RDATA_2 0x68324
+
+#define mmMME0_QM_CP_FENCE1_RDATA_3 0x68328
+
+#define mmMME0_QM_CP_FENCE1_RDATA_4 0x6832C
+
+#define mmMME0_QM_CP_FENCE2_RDATA_0 0x68330
+
+#define mmMME0_QM_CP_FENCE2_RDATA_1 0x68334
+
+#define mmMME0_QM_CP_FENCE2_RDATA_2 0x68338
+
+#define mmMME0_QM_CP_FENCE2_RDATA_3 0x6833C
+
+#define mmMME0_QM_CP_FENCE2_RDATA_4 0x68340
+
+#define mmMME0_QM_CP_FENCE3_RDATA_0 0x68344
+
+#define mmMME0_QM_CP_FENCE3_RDATA_1 0x68348
+
+#define mmMME0_QM_CP_FENCE3_RDATA_2 0x6834C
+
+#define mmMME0_QM_CP_FENCE3_RDATA_3 0x68350
+
+#define mmMME0_QM_CP_FENCE3_RDATA_4 0x68354
+
+#define mmMME0_QM_CP_FENCE0_CNT_0 0x68358
+
+#define mmMME0_QM_CP_FENCE0_CNT_1 0x6835C
+
+#define mmMME0_QM_CP_FENCE0_CNT_2 0x68360
+
+#define mmMME0_QM_CP_FENCE0_CNT_3 0x68364
+
+#define mmMME0_QM_CP_FENCE0_CNT_4 0x68368
+
+#define mmMME0_QM_CP_FENCE1_CNT_0 0x6836C
+
+#define mmMME0_QM_CP_FENCE1_CNT_1 0x68370
+
+#define mmMME0_QM_CP_FENCE1_CNT_2 0x68374
+
+#define mmMME0_QM_CP_FENCE1_CNT_3 0x68378
+
+#define mmMME0_QM_CP_FENCE1_CNT_4 0x6837C
+
+#define mmMME0_QM_CP_FENCE2_CNT_0 0x68380
+
+#define mmMME0_QM_CP_FENCE2_CNT_1 0x68384
+
+#define mmMME0_QM_CP_FENCE2_CNT_2 0x68388
+
+#define mmMME0_QM_CP_FENCE2_CNT_3 0x6838C
+
+#define mmMME0_QM_CP_FENCE2_CNT_4 0x68390
+
+#define mmMME0_QM_CP_FENCE3_CNT_0 0x68394
+
+#define mmMME0_QM_CP_FENCE3_CNT_1 0x68398
+
+#define mmMME0_QM_CP_FENCE3_CNT_2 0x6839C
+
+#define mmMME0_QM_CP_FENCE3_CNT_3 0x683A0
+
+#define mmMME0_QM_CP_FENCE3_CNT_4 0x683A4
+
+#define mmMME0_QM_CP_STS_0 0x683A8
+
+#define mmMME0_QM_CP_STS_1 0x683AC
+
+#define mmMME0_QM_CP_STS_2 0x683B0
+
+#define mmMME0_QM_CP_STS_3 0x683B4
+
+#define mmMME0_QM_CP_STS_4 0x683B8
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_0 0x683BC
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_1 0x683C0
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_2 0x683C4
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_3 0x683C8
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_4 0x683CC
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_0 0x683D0
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_1 0x683D4
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_2 0x683D8
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_3 0x683DC
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_4 0x683E0
+
+#define mmMME0_QM_CP_BARRIER_CFG_0 0x683F4
+
+#define mmMME0_QM_CP_BARRIER_CFG_1 0x683F8
+
+#define mmMME0_QM_CP_BARRIER_CFG_2 0x683FC
+
+#define mmMME0_QM_CP_BARRIER_CFG_3 0x68400
+
+#define mmMME0_QM_CP_BARRIER_CFG_4 0x68404
+
+#define mmMME0_QM_CP_DBG_0_0 0x68408
+
+#define mmMME0_QM_CP_DBG_0_1 0x6840C
+
+#define mmMME0_QM_CP_DBG_0_2 0x68410
+
+#define mmMME0_QM_CP_DBG_0_3 0x68414
+
+#define mmMME0_QM_CP_DBG_0_4 0x68418
+
+#define mmMME0_QM_CP_ARUSER_31_11_0 0x6841C
+
+#define mmMME0_QM_CP_ARUSER_31_11_1 0x68420
+
+#define mmMME0_QM_CP_ARUSER_31_11_2 0x68424
+
+#define mmMME0_QM_CP_ARUSER_31_11_3 0x68428
+
+#define mmMME0_QM_CP_ARUSER_31_11_4 0x6842C
+
+#define mmMME0_QM_CP_AWUSER_31_11_0 0x68430
+
+#define mmMME0_QM_CP_AWUSER_31_11_1 0x68434
+
+#define mmMME0_QM_CP_AWUSER_31_11_2 0x68438
+
+#define mmMME0_QM_CP_AWUSER_31_11_3 0x6843C
+
+#define mmMME0_QM_CP_AWUSER_31_11_4 0x68440
+
+#define mmMME0_QM_ARB_CFG_0 0x68A00
+
+#define mmMME0_QM_ARB_CHOISE_Q_PUSH 0x68A04
+
+#define mmMME0_QM_ARB_WRR_WEIGHT_0 0x68A08
+
+#define mmMME0_QM_ARB_WRR_WEIGHT_1 0x68A0C
+
+#define mmMME0_QM_ARB_WRR_WEIGHT_2 0x68A10
+
+#define mmMME0_QM_ARB_WRR_WEIGHT_3 0x68A14
+
+#define mmMME0_QM_ARB_CFG_1 0x68A18
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_0 0x68A20
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_1 0x68A24
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_2 0x68A28
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_3 0x68A2C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_4 0x68A30
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_5 0x68A34
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_6 0x68A38
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_7 0x68A3C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_8 0x68A40
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_9 0x68A44
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_10 0x68A48
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_11 0x68A4C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_12 0x68A50
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_13 0x68A54
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_14 0x68A58
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_15 0x68A5C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_16 0x68A60
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_17 0x68A64
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_18 0x68A68
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_19 0x68A6C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_20 0x68A70
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_21 0x68A74
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_22 0x68A78
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_23 0x68A7C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_24 0x68A80
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_25 0x68A84
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_26 0x68A88
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_27 0x68A8C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_28 0x68A90
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_29 0x68A94
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_30 0x68A98
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_31 0x68A9C
+
+#define mmMME0_QM_ARB_MST_CRED_INC 0x68AA0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x68AA4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x68AA8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x68AAC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x68AB0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x68AB4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x68AB8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x68ABC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x68AC0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x68AC4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x68AC8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x68ACC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x68AD0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x68AD4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x68AD8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x68ADC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x68AE0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x68AE4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x68AE8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x68AEC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x68AF0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x68AF4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x68AF8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x68AFC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x68B00
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x68B04
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x68B08
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x68B0C
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x68B10
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x68B14
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x68B18
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x68B1C
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x68B20
+
+#define mmMME0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x68B28
+
+#define mmMME0_QM_ARB_MST_SLAVE_EN 0x68B2C
+
+#define mmMME0_QM_ARB_MST_QUIET_PER 0x68B34
+
+#define mmMME0_QM_ARB_SLV_CHOISE_WDT 0x68B38
+
+#define mmMME0_QM_ARB_SLV_ID 0x68B3C
+
+#define mmMME0_QM_ARB_MSG_MAX_INFLIGHT 0x68B44
+
+#define mmMME0_QM_ARB_MSG_AWUSER_31_11 0x68B48
+
+#define mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP 0x68B4C
+
+#define mmMME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x68B50
+
+#define mmMME0_QM_ARB_BASE_LO 0x68B54
+
+#define mmMME0_QM_ARB_BASE_HI 0x68B58
+
+#define mmMME0_QM_ARB_STATE_STS 0x68B80
+
+#define mmMME0_QM_ARB_CHOISE_FULLNESS_STS 0x68B84
+
+#define mmMME0_QM_ARB_MSG_STS 0x68B88
+
+#define mmMME0_QM_ARB_SLV_CHOISE_Q_HEAD 0x68B8C
+
+#define mmMME0_QM_ARB_ERR_CAUSE 0x68B9C
+
+#define mmMME0_QM_ARB_ERR_MSG_EN 0x68BA0
+
+#define mmMME0_QM_ARB_ERR_STS_DRP 0x68BA8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_0 0x68BB0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_1 0x68BB4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_2 0x68BB8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_3 0x68BBC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_4 0x68BC0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_5 0x68BC4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_6 0x68BC8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_7 0x68BCC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_8 0x68BD0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_9 0x68BD4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_10 0x68BD8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_11 0x68BDC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_12 0x68BE0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_13 0x68BE4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_14 0x68BE8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_15 0x68BEC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_16 0x68BF0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_17 0x68BF4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_18 0x68BF8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_19 0x68BFC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_20 0x68C00
+
+#define mmMME0_QM_ARB_MST_CRED_STS_21 0x68C04
+
+#define mmMME0_QM_ARB_MST_CRED_STS_22 0x68C08
+
+#define mmMME0_QM_ARB_MST_CRED_STS_23 0x68C0C
+
+#define mmMME0_QM_ARB_MST_CRED_STS_24 0x68C10
+
+#define mmMME0_QM_ARB_MST_CRED_STS_25 0x68C14
+
+#define mmMME0_QM_ARB_MST_CRED_STS_26 0x68C18
+
+#define mmMME0_QM_ARB_MST_CRED_STS_27 0x68C1C
+
+#define mmMME0_QM_ARB_MST_CRED_STS_28 0x68C20
+
+#define mmMME0_QM_ARB_MST_CRED_STS_29 0x68C24
+
+#define mmMME0_QM_ARB_MST_CRED_STS_30 0x68C28
+
+#define mmMME0_QM_ARB_MST_CRED_STS_31 0x68C2C
+
+#define mmMME0_QM_CGM_CFG 0x68C70
+
+#define mmMME0_QM_CGM_STS 0x68C74
+
+#define mmMME0_QM_CGM_CFG1 0x68C78
+
+#define mmMME0_QM_LOCAL_RANGE_BASE 0x68C80
+
+#define mmMME0_QM_LOCAL_RANGE_SIZE 0x68C84
+
+#define mmMME0_QM_CSMR_STRICT_PRIO_CFG 0x68C90
+
+#define mmMME0_QM_HBW_RD_RATE_LIM_CFG_1 0x68C94
+
+#define mmMME0_QM_LBW_WR_RATE_LIM_CFG_0 0x68C98
+
+#define mmMME0_QM_LBW_WR_RATE_LIM_CFG_1 0x68C9C
+
+#define mmMME0_QM_HBW_RD_RATE_LIM_CFG_0 0x68CA0
+
+#define mmMME0_QM_GLBL_AXCACHE 0x68CA4
+
+#define mmMME0_QM_IND_GW_APB_CFG 0x68CB0
+
+#define mmMME0_QM_IND_GW_APB_WDATA 0x68CB4
+
+#define mmMME0_QM_IND_GW_APB_RDATA 0x68CB8
+
+#define mmMME0_QM_IND_GW_APB_STATUS 0x68CBC
+
+#define mmMME0_QM_GLBL_ERR_ADDR_LO 0x68CD0
+
+#define mmMME0_QM_GLBL_ERR_ADDR_HI 0x68CD4
+
+#define mmMME0_QM_GLBL_ERR_WDATA 0x68CD8
+
+#define mmMME0_QM_GLBL_MEM_INIT_BUSY 0x68D00
+
+#endif /* ASIC_REG_MME0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
new file mode 100644
index 000000000000..6c07f7d45490
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
@@ -0,0 +1,1456 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME1_CTRL_REGS_H_
+#define ASIC_REG_MME1_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * MME1_CTRL (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME1_CTRL_ARCH_STATUS 0xE0000
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_HIGH_S 0xE0008
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_HIGH_L 0xE000C
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_HIGH_O 0xE0010
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_LOW_S 0xE0014
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_LOW_L 0xE0018
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_LOW_O 0xE001C
+
+#define mmMME1_CTRL_ARCH_HEADER_LOW 0xE0020
+
+#define mmMME1_CTRL_ARCH_HEADER_HIGH 0xE0024
+
+#define mmMME1_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0xE0028
+
+#define mmMME1_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0xE002C
+
+#define mmMME1_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0xE0030
+
+#define mmMME1_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0xE0034
+
+#define mmMME1_CTRL_ARCH_OUTER_LOOP 0xE0038
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0xE003C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0xE0040
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0xE0044
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0xE0048
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0xE004C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0xE0050
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0xE0054
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0xE0058
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0xE005C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0xE0060
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0xE0064
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0xE0068
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0xE006C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0xE0070
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0xE0074
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0xE0078
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0xE007C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0xE0080
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0084
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0xE0088
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0xE008C
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0xE0090
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0xE0094
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0xE0098
+
+#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_0 0xE009C
+
+#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_1 0xE00A0
+
+#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_2 0xE00A4
+
+#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_3 0xE00A8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0xE00AC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0xE00B0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0xE00B4
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0xE00B8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0xE00BC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0xE00C0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0xE00C4
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0xE00C8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0xE00CC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0xE00D0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0xE00D4
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0xE00D8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0xE00DC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0xE00E0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0xE00E4
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0xE00E8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0xE00EC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0xE00F0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE00F4
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE00F8
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE00FC
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0100
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0104
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0108
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0xE010C
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0xE0110
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0xE0114
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0xE0118
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE011C
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0120
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0124
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0128
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE012C
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0xE0130
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0xE0134
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0xE0138
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0xE013C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0xE0140
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0xE0144
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0xE0148
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0xE014C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0xE0150
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0xE0154
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0xE0158
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0xE015C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0xE0160
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0xE0164
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0xE0168
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0xE016C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0xE0170
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0xE0174
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0xE0178
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0xE017C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0xE0180
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0xE0184
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0188
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE018C
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0190
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0194
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0198
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE019C
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0xE01A0
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0xE01A4
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0xE01A8
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0xE01AC
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE01B0
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE01B4
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE01B8
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE01BC
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE01C0
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0xE01C4
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0xE01C8
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0xE01CC
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0xE01D0
+
+#define mmMME1_CTRL_ARCH_DESC_SB_REPEAT 0xE01D4
+
+#define mmMME1_CTRL_ARCH_DESC_RATE_LIMITER 0xE01D8
+
+#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE01DC
+
+#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE01E0
+
+#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0xE01E4
+
+#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0xE01E8
+
+#define mmMME1_CTRL_ARCH_DESC_AXI_USER_DATA 0xE01EC
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_S 0xE01F0
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0xE01F4
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0xE01F8
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0xE01FC
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0xE0200
+
+#define mmMME1_CTRL_ARCH_DESC_PADDING_VALUE_S 0xE0204
+
+#define mmMME1_CTRL_ARCH_DESC_PADDING_VALUE_L 0xE0208
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_S 0xE020C
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0xE0210
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0xE0214
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0xE0218
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0xE021C
+
+#define mmMME1_CTRL_ARCH_DESC_PCU_RL_SATURATION 0xE0220
+
+#define mmMME1_CTRL_ARCH_DESC_DUMMY 0xE0224
+
+#define mmMME1_CTRL_CMD 0xE0280
+
+#define mmMME1_CTRL_STATUS1 0xE0284
+
+#define mmMME1_CTRL_RESET 0xE0288
+
+#define mmMME1_CTRL_QM_STALL 0xE028C
+
+#define mmMME1_CTRL_SYNC_OBJECT_FIFO_TH 0xE0290
+
+#define mmMME1_CTRL_EUS_ROLLUP_CNT_ADD 0xE0294
+
+#define mmMME1_CTRL_INTR_CAUSE 0xE0298
+
+#define mmMME1_CTRL_INTR_MASK 0xE029C
+
+#define mmMME1_CTRL_LOG_SHADOW 0xE02A0
+
+#define mmMME1_CTRL_PCU_RL_DESC0 0xE02A4
+
+#define mmMME1_CTRL_PCU_RL_TOKEN_UPDATE 0xE02A8
+
+#define mmMME1_CTRL_PCU_RL_TH 0xE02AC
+
+#define mmMME1_CTRL_PCU_RL_MIN 0xE02B0
+
+#define mmMME1_CTRL_PCU_RL_CTRL_EN 0xE02B4
+
+#define mmMME1_CTRL_PCU_RL_HISTORY_LOG_SIZE 0xE02B8
+
+#define mmMME1_CTRL_PCU_DUMMY_A_BF16 0xE02BC
+
+#define mmMME1_CTRL_PCU_DUMMY_B_BF16 0xE02C0
+
+#define mmMME1_CTRL_PCU_DUMMY_A_FP32_ODD 0xE02C4
+
+#define mmMME1_CTRL_PCU_DUMMY_A_FP32_EVEN 0xE02C8
+
+#define mmMME1_CTRL_PCU_DUMMY_B_FP32_ODD 0xE02CC
+
+#define mmMME1_CTRL_PCU_DUMMY_B_FP32_EVEN 0xE02D0
+
+#define mmMME1_CTRL_PROT 0xE02D4
+
+#define mmMME1_CTRL_EU_POWER_SAVE_DISABLE 0xE02D8
+
+#define mmMME1_CTRL_CS_DBG_BLOCK_ID 0xE02DC
+
+#define mmMME1_CTRL_CS_DBG_STATUS_DROP_CNT 0xE02E0
+
+#define mmMME1_CTRL_TE_CLOSE_CGATE 0xE02E4
+
+#define mmMME1_CTRL_AGU_SM_INFLIGHT_CNTR 0xE02E8
+
+#define mmMME1_CTRL_AGU_SM_TOTAL_CNTR 0xE02EC
+
+#define mmMME1_CTRL_EZSYNC_OUT_CREDIT 0xE02F0
+
+#define mmMME1_CTRL_PCU_RL_SAT_SEC 0xE02F4
+
+#define mmMME1_CTRL_AGU_SYNC_MSG_AXI_USER 0xE02F8
+
+#define mmMME1_CTRL_QM_SLV_LBW_CLK_EN 0xE02FC
+
+#define mmMME1_CTRL_SHADOW_0_STATUS 0xE0400
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0xE0408
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0xE040C
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0xE0410
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0xE0414
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0xE0418
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0xE041C
+
+#define mmMME1_CTRL_SHADOW_0_HEADER_LOW 0xE0420
+
+#define mmMME1_CTRL_SHADOW_0_HEADER_HIGH 0xE0424
+
+#define mmMME1_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0xE0428
+
+#define mmMME1_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0xE042C
+
+#define mmMME1_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0xE0430
+
+#define mmMME1_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0xE0434
+
+#define mmMME1_CTRL_SHADOW_0_OUTER_LOOP 0xE0438
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0xE043C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0xE0440
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0xE0444
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0xE0448
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0xE044C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0xE0450
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0xE0454
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0xE0458
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0xE045C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0xE0460
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0xE0464
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0xE0468
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0xE046C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0xE0470
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0xE0474
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0xE0478
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0xE047C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0xE0480
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0484
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0xE0488
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0xE048C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0xE0490
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0xE0494
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0xE0498
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0xE049C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0xE04A0
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0xE04A4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0xE04A8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0xE04AC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0xE04B0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0xE04B4
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0xE04B8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0xE04BC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0xE04C0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0xE04C4
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0xE04C8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0xE04CC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0xE04D0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0xE04D4
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0xE04D8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0xE04DC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0xE04E0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0xE04E4
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0xE04E8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0xE04EC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0xE04F0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE04F4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE04F8
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE04FC
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0500
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0504
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0508
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0xE050C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0xE0510
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0xE0514
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0xE0518
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE051C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0520
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0524
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0528
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE052C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0xE0530
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0xE0534
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0xE0538
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0xE053C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0xE0540
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0xE0544
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0xE0548
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0xE054C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0xE0550
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0xE0554
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0xE0558
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0xE055C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0xE0560
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0xE0564
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0xE0568
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0xE056C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0xE0570
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0xE0574
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0xE0578
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0xE057C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0xE0580
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0xE0584
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0588
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE058C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0590
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0594
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0598
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE059C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0xE05A0
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0xE05A4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0xE05A8
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0xE05AC
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE05B0
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE05B4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE05B8
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE05BC
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE05C0
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0xE05C4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0xE05C8
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0xE05CC
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0xE05D0
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SB_REPEAT 0xE05D4
+
+#define mmMME1_CTRL_SHADOW_0_DESC_RATE_LIMITER 0xE05D8
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE05DC
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE05E0
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0xE05E4
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0xE05E8
+
+#define mmMME1_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0xE05EC
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_S 0xE05F0
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0xE05F4
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0xE05F8
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0xE05FC
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0xE0600
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0xE0604
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0xE0608
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0xE060C
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0xE0610
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0xE0614
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0xE0618
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0xE061C
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0xE0620
+
+#define mmMME1_CTRL_SHADOW_0_DESC_DUMMY 0xE0624
+
+#define mmMME1_CTRL_SHADOW_1_STATUS 0xE0680
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0xE0688
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0xE068C
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0xE0690
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0xE0694
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0xE0698
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0xE069C
+
+#define mmMME1_CTRL_SHADOW_1_HEADER_LOW 0xE06A0
+
+#define mmMME1_CTRL_SHADOW_1_HEADER_HIGH 0xE06A4
+
+#define mmMME1_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0xE06A8
+
+#define mmMME1_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0xE06AC
+
+#define mmMME1_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0xE06B0
+
+#define mmMME1_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0xE06B4
+
+#define mmMME1_CTRL_SHADOW_1_OUTER_LOOP 0xE06B8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0xE06BC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0xE06C0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0xE06C4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0xE06C8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0xE06CC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0xE06D0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0xE06D4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0xE06D8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0xE06DC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0xE06E0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0xE06E4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0xE06E8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0xE06EC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0xE06F0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0xE06F4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0xE06F8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0xE06FC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0xE0700
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0704
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0xE0708
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0xE070C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0xE0710
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0xE0714
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0xE0718
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0xE071C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0xE0720
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0xE0724
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0xE0728
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0xE072C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0xE0730
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0xE0734
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0xE0738
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0xE073C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0xE0740
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0xE0744
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0xE0748
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0xE074C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0xE0750
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0xE0754
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0xE0758
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0xE075C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0xE0760
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0xE0764
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0xE0768
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0xE076C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0xE0770
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE0774
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE0778
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE077C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0780
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0784
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0788
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0xE078C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0xE0790
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0xE0794
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0xE0798
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE079C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE07A0
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE07A4
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE07A8
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE07AC
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0xE07B0
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0xE07B4
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0xE07B8
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0xE07BC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0xE07C0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0xE07C4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0xE07C8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0xE07CC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0xE07D0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0xE07D4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0xE07D8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0xE07DC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0xE07E0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0xE07E4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0xE07E8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0xE07EC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0xE07F0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0xE07F4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0xE07F8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0xE07FC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0xE0800
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0xE0804
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0808
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE080C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0810
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0814
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0818
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE081C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0xE0820
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0xE0824
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0xE0828
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0xE082C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE0830
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE0834
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE0838
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE083C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE0840
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0xE0844
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0xE0848
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0xE084C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0xE0850
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SB_REPEAT 0xE0854
+
+#define mmMME1_CTRL_SHADOW_1_DESC_RATE_LIMITER 0xE0858
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE085C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE0860
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0xE0864
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0xE0868
+
+#define mmMME1_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0xE086C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_S 0xE0870
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0xE0874
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0xE0878
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0xE087C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0xE0880
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0xE0884
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0xE0888
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0xE088C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0xE0890
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0xE0894
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0xE0898
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0xE089C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0xE08A0
+
+#define mmMME1_CTRL_SHADOW_1_DESC_DUMMY 0xE08A4
+
+#define mmMME1_CTRL_SHADOW_2_STATUS 0xE0900
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0xE0908
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0xE090C
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0xE0910
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0xE0914
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0xE0918
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0xE091C
+
+#define mmMME1_CTRL_SHADOW_2_HEADER_LOW 0xE0920
+
+#define mmMME1_CTRL_SHADOW_2_HEADER_HIGH 0xE0924
+
+#define mmMME1_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0xE0928
+
+#define mmMME1_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0xE092C
+
+#define mmMME1_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0xE0930
+
+#define mmMME1_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0xE0934
+
+#define mmMME1_CTRL_SHADOW_2_OUTER_LOOP 0xE0938
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0xE093C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0xE0940
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0xE0944
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0xE0948
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0xE094C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0xE0950
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0xE0954
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0xE0958
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0xE095C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0xE0960
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0xE0964
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0xE0968
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0xE096C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0xE0970
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0xE0974
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0xE0978
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0xE097C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0xE0980
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0984
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0xE0988
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0xE098C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0xE0990
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0xE0994
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0xE0998
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0xE099C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0xE09A0
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0xE09A4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0xE09A8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0xE09AC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0xE09B0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0xE09B4
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0xE09B8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0xE09BC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0xE09C0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0xE09C4
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0xE09C8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0xE09CC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0xE09D0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0xE09D4
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0xE09D8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0xE09DC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0xE09E0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0xE09E4
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0xE09E8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0xE09EC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0xE09F0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE09F4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE09F8
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE09FC
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0A00
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0A04
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0A08
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0xE0A0C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0xE0A10
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0xE0A14
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0xE0A18
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE0A1C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0A20
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0A24
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0A28
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE0A2C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0xE0A30
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0xE0A34
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0xE0A38
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0xE0A3C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0xE0A40
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0xE0A44
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0xE0A48
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0xE0A4C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0xE0A50
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0xE0A54
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0xE0A58
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0xE0A5C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0xE0A60
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0xE0A64
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0xE0A68
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0xE0A6C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0xE0A70
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0xE0A74
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0xE0A78
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0xE0A7C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0xE0A80
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0xE0A84
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0A88
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE0A8C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0A90
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0A94
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0A98
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE0A9C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0xE0AA0
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0xE0AA4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0xE0AA8
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0xE0AAC
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE0AB0
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE0AB4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE0AB8
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE0ABC
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE0AC0
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0xE0AC4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0xE0AC8
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0xE0ACC
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0xE0AD0
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SB_REPEAT 0xE0AD4
+
+#define mmMME1_CTRL_SHADOW_2_DESC_RATE_LIMITER 0xE0AD8
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE0ADC
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE0AE0
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0xE0AE4
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0xE0AE8
+
+#define mmMME1_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0xE0AEC
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_S 0xE0AF0
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0xE0AF4
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0xE0AF8
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0xE0AFC
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0xE0B00
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0xE0B04
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0xE0B08
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0xE0B0C
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0xE0B10
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0xE0B14
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0xE0B18
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0xE0B1C
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0xE0B20
+
+#define mmMME1_CTRL_SHADOW_2_DESC_DUMMY 0xE0B24
+
+#define mmMME1_CTRL_SHADOW_3_STATUS 0xE0B80
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0xE0B88
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0xE0B8C
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0xE0B90
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0xE0B94
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0xE0B98
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0xE0B9C
+
+#define mmMME1_CTRL_SHADOW_3_HEADER_LOW 0xE0BA0
+
+#define mmMME1_CTRL_SHADOW_3_HEADER_HIGH 0xE0BA4
+
+#define mmMME1_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0xE0BA8
+
+#define mmMME1_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0xE0BAC
+
+#define mmMME1_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0xE0BB0
+
+#define mmMME1_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0xE0BB4
+
+#define mmMME1_CTRL_SHADOW_3_OUTER_LOOP 0xE0BB8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0xE0BBC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0xE0BC0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0xE0BC4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0xE0BC8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0xE0BCC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0xE0BD0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0xE0BD4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0xE0BD8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0xE0BDC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0xE0BE0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0xE0BE4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0xE0BE8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0xE0BEC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0xE0BF0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0xE0BF4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0xE0BF8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0xE0BFC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0xE0C00
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0C04
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0xE0C08
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0xE0C0C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0xE0C10
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0xE0C14
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0xE0C18
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0xE0C1C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0xE0C20
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0xE0C24
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0xE0C28
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0xE0C2C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0xE0C30
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0xE0C34
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0xE0C38
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0xE0C3C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0xE0C40
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0xE0C44
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0xE0C48
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0xE0C4C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0xE0C50
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0xE0C54
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0xE0C58
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0xE0C5C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0xE0C60
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0xE0C64
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0xE0C68
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0xE0C6C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0xE0C70
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE0C74
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE0C78
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE0C7C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0C80
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0C84
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0C88
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0xE0C8C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0xE0C90
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0xE0C94
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0xE0C98
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE0C9C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0CA0
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0CA4
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0CA8
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE0CAC
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0xE0CB0
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0xE0CB4
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0xE0CB8
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0xE0CBC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0xE0CC0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0xE0CC4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0xE0CC8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0xE0CCC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0xE0CD0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0xE0CD4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0xE0CD8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0xE0CDC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0xE0CE0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0xE0CE4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0xE0CE8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0xE0CEC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0xE0CF0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0xE0CF4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0xE0CF8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0xE0CFC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0xE0D00
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0xE0D04
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0D08
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE0D0C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0D10
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0D14
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0D18
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE0D1C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0xE0D20
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0xE0D24
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0xE0D28
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0xE0D2C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE0D30
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE0D34
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE0D38
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE0D3C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE0D40
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0xE0D44
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0xE0D48
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0xE0D4C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0xE0D50
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SB_REPEAT 0xE0D54
+
+#define mmMME1_CTRL_SHADOW_3_DESC_RATE_LIMITER 0xE0D58
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE0D5C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE0D60
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0xE0D64
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0xE0D68
+
+#define mmMME1_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0xE0D6C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_S 0xE0D70
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0xE0D74
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0xE0D78
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0xE0D7C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0xE0D80
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0xE0D84
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0xE0D88
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0xE0D8C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0xE0D90
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0xE0D94
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0xE0D98
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0xE0D9C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0xE0DA0
+
+#define mmMME1_CTRL_SHADOW_3_DESC_DUMMY 0xE0DA4
+
+#endif /* ASIC_REG_MME1_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
new file mode 100644
index 000000000000..a1f2eb8b91bd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
@@ -0,0 +1,1456 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME2_CTRL_REGS_H_
+#define ASIC_REG_MME2_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * MME2_CTRL (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME2_CTRL_ARCH_STATUS 0x160000
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_HIGH_S 0x160008
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_HIGH_L 0x16000C
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_HIGH_O 0x160010
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_LOW_S 0x160014
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_LOW_L 0x160018
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_LOW_O 0x16001C
+
+#define mmMME2_CTRL_ARCH_HEADER_LOW 0x160020
+
+#define mmMME2_CTRL_ARCH_HEADER_HIGH 0x160024
+
+#define mmMME2_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0x160028
+
+#define mmMME2_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0x16002C
+
+#define mmMME2_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0x160030
+
+#define mmMME2_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0x160034
+
+#define mmMME2_CTRL_ARCH_OUTER_LOOP 0x160038
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0x16003C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0x160040
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0x160044
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0x160048
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0x16004C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0x160050
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0x160054
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0x160058
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0x16005C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0x160060
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0x160064
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0x160068
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0x16006C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0x160070
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0x160074
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0x160078
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0x16007C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0x160080
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160084
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0x160088
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0x16008C
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0x160090
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0x160094
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0x160098
+
+#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_0 0x16009C
+
+#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_1 0x1600A0
+
+#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_2 0x1600A4
+
+#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_3 0x1600A8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0x1600AC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0x1600B0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0x1600B4
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0x1600B8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0x1600BC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0x1600C0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0x1600C4
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0x1600C8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0x1600CC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0x1600D0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0x1600D4
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0x1600D8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0x1600DC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0x1600E0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0x1600E4
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0x1600E8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0x1600EC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0x1600F0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1600F4
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1600F8
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1600FC
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160100
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160104
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160108
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0x16010C
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0x160110
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0x160114
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0x160118
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x16011C
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160120
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160124
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160128
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x16012C
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0x160130
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0x160134
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0x160138
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0x16013C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0x160140
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0x160144
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0x160148
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0x16014C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0x160150
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0x160154
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0x160158
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0x16015C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0x160160
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0x160164
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0x160168
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0x16016C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0x160170
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0x160174
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0x160178
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0x16017C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0x160180
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0x160184
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160188
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x16018C
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160190
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160194
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160198
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x16019C
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0x1601A0
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0x1601A4
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0x1601A8
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0x1601AC
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1601B0
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1601B4
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1601B8
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1601BC
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1601C0
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0x1601C4
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0x1601C8
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0x1601CC
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0x1601D0
+
+#define mmMME2_CTRL_ARCH_DESC_SB_REPEAT 0x1601D4
+
+#define mmMME2_CTRL_ARCH_DESC_RATE_LIMITER 0x1601D8
+
+#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1601DC
+
+#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1601E0
+
+#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0x1601E4
+
+#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0x1601E8
+
+#define mmMME2_CTRL_ARCH_DESC_AXI_USER_DATA 0x1601EC
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_S 0x1601F0
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0x1601F4
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0x1601F8
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0x1601FC
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0x160200
+
+#define mmMME2_CTRL_ARCH_DESC_PADDING_VALUE_S 0x160204
+
+#define mmMME2_CTRL_ARCH_DESC_PADDING_VALUE_L 0x160208
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_S 0x16020C
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0x160210
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0x160214
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0x160218
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0x16021C
+
+#define mmMME2_CTRL_ARCH_DESC_PCU_RL_SATURATION 0x160220
+
+#define mmMME2_CTRL_ARCH_DESC_DUMMY 0x160224
+
+#define mmMME2_CTRL_CMD 0x160280
+
+#define mmMME2_CTRL_STATUS1 0x160284
+
+#define mmMME2_CTRL_RESET 0x160288
+
+#define mmMME2_CTRL_QM_STALL 0x16028C
+
+#define mmMME2_CTRL_SYNC_OBJECT_FIFO_TH 0x160290
+
+#define mmMME2_CTRL_EUS_ROLLUP_CNT_ADD 0x160294
+
+#define mmMME2_CTRL_INTR_CAUSE 0x160298
+
+#define mmMME2_CTRL_INTR_MASK 0x16029C
+
+#define mmMME2_CTRL_LOG_SHADOW 0x1602A0
+
+#define mmMME2_CTRL_PCU_RL_DESC0 0x1602A4
+
+#define mmMME2_CTRL_PCU_RL_TOKEN_UPDATE 0x1602A8
+
+#define mmMME2_CTRL_PCU_RL_TH 0x1602AC
+
+#define mmMME2_CTRL_PCU_RL_MIN 0x1602B0
+
+#define mmMME2_CTRL_PCU_RL_CTRL_EN 0x1602B4
+
+#define mmMME2_CTRL_PCU_RL_HISTORY_LOG_SIZE 0x1602B8
+
+#define mmMME2_CTRL_PCU_DUMMY_A_BF16 0x1602BC
+
+#define mmMME2_CTRL_PCU_DUMMY_B_BF16 0x1602C0
+
+#define mmMME2_CTRL_PCU_DUMMY_A_FP32_ODD 0x1602C4
+
+#define mmMME2_CTRL_PCU_DUMMY_A_FP32_EVEN 0x1602C8
+
+#define mmMME2_CTRL_PCU_DUMMY_B_FP32_ODD 0x1602CC
+
+#define mmMME2_CTRL_PCU_DUMMY_B_FP32_EVEN 0x1602D0
+
+#define mmMME2_CTRL_PROT 0x1602D4
+
+#define mmMME2_CTRL_EU_POWER_SAVE_DISABLE 0x1602D8
+
+#define mmMME2_CTRL_CS_DBG_BLOCK_ID 0x1602DC
+
+#define mmMME2_CTRL_CS_DBG_STATUS_DROP_CNT 0x1602E0
+
+#define mmMME2_CTRL_TE_CLOSE_CGATE 0x1602E4
+
+#define mmMME2_CTRL_AGU_SM_INFLIGHT_CNTR 0x1602E8
+
+#define mmMME2_CTRL_AGU_SM_TOTAL_CNTR 0x1602EC
+
+#define mmMME2_CTRL_EZSYNC_OUT_CREDIT 0x1602F0
+
+#define mmMME2_CTRL_PCU_RL_SAT_SEC 0x1602F4
+
+#define mmMME2_CTRL_AGU_SYNC_MSG_AXI_USER 0x1602F8
+
+#define mmMME2_CTRL_QM_SLV_LBW_CLK_EN 0x1602FC
+
+#define mmMME2_CTRL_SHADOW_0_STATUS 0x160400
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0x160408
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0x16040C
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0x160410
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0x160414
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0x160418
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0x16041C
+
+#define mmMME2_CTRL_SHADOW_0_HEADER_LOW 0x160420
+
+#define mmMME2_CTRL_SHADOW_0_HEADER_HIGH 0x160424
+
+#define mmMME2_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0x160428
+
+#define mmMME2_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0x16042C
+
+#define mmMME2_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0x160430
+
+#define mmMME2_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0x160434
+
+#define mmMME2_CTRL_SHADOW_0_OUTER_LOOP 0x160438
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0x16043C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0x160440
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0x160444
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0x160448
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0x16044C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0x160450
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0x160454
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0x160458
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0x16045C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0x160460
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0x160464
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0x160468
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0x16046C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0x160470
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0x160474
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0x160478
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0x16047C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0x160480
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160484
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0x160488
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0x16048C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0x160490
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0x160494
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0x160498
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0x16049C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0x1604A0
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0x1604A4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0x1604A8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0x1604AC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0x1604B0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0x1604B4
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0x1604B8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0x1604BC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0x1604C0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0x1604C4
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0x1604C8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0x1604CC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0x1604D0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0x1604D4
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0x1604D8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0x1604DC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0x1604E0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0x1604E4
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0x1604E8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0x1604EC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0x1604F0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1604F4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1604F8
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1604FC
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160500
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160504
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160508
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0x16050C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0x160510
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0x160514
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0x160518
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x16051C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160520
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160524
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160528
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x16052C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0x160530
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0x160534
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0x160538
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0x16053C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0x160540
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0x160544
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0x160548
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0x16054C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0x160550
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0x160554
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0x160558
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0x16055C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0x160560
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0x160564
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0x160568
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0x16056C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0x160570
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0x160574
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0x160578
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0x16057C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0x160580
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0x160584
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160588
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x16058C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160590
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160594
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160598
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x16059C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0x1605A0
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0x1605A4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0x1605A8
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0x1605AC
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1605B0
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1605B4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1605B8
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1605BC
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1605C0
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0x1605C4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0x1605C8
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0x1605CC
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0x1605D0
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SB_REPEAT 0x1605D4
+
+#define mmMME2_CTRL_SHADOW_0_DESC_RATE_LIMITER 0x1605D8
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1605DC
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1605E0
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0x1605E4
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0x1605E8
+
+#define mmMME2_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0x1605EC
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_S 0x1605F0
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0x1605F4
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0x1605F8
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0x1605FC
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0x160600
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0x160604
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0x160608
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0x16060C
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0x160610
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0x160614
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0x160618
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0x16061C
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0x160620
+
+#define mmMME2_CTRL_SHADOW_0_DESC_DUMMY 0x160624
+
+#define mmMME2_CTRL_SHADOW_1_STATUS 0x160680
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0x160688
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0x16068C
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0x160690
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0x160694
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0x160698
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0x16069C
+
+#define mmMME2_CTRL_SHADOW_1_HEADER_LOW 0x1606A0
+
+#define mmMME2_CTRL_SHADOW_1_HEADER_HIGH 0x1606A4
+
+#define mmMME2_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0x1606A8
+
+#define mmMME2_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0x1606AC
+
+#define mmMME2_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0x1606B0
+
+#define mmMME2_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0x1606B4
+
+#define mmMME2_CTRL_SHADOW_1_OUTER_LOOP 0x1606B8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0x1606BC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0x1606C0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0x1606C4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0x1606C8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0x1606CC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0x1606D0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0x1606D4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0x1606D8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0x1606DC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0x1606E0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0x1606E4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0x1606E8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0x1606EC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0x1606F0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0x1606F4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0x1606F8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0x1606FC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0x160700
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160704
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0x160708
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0x16070C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0x160710
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0x160714
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0x160718
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0x16071C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0x160720
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0x160724
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0x160728
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0x16072C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0x160730
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0x160734
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0x160738
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0x16073C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0x160740
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0x160744
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0x160748
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0x16074C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0x160750
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0x160754
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0x160758
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0x16075C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0x160760
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0x160764
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0x160768
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0x16076C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0x160770
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x160774
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x160778
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x16077C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160780
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160784
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160788
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0x16078C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0x160790
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0x160794
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0x160798
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x16079C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1607A0
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1607A4
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1607A8
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1607AC
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0x1607B0
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0x1607B4
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0x1607B8
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0x1607BC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0x1607C0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0x1607C4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0x1607C8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0x1607CC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0x1607D0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0x1607D4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0x1607D8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0x1607DC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0x1607E0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0x1607E4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0x1607E8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0x1607EC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0x1607F0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0x1607F4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0x1607F8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0x1607FC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0x160800
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0x160804
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160808
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x16080C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160810
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160814
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160818
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x16081C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0x160820
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0x160824
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0x160828
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0x16082C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x160830
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x160834
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x160838
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x16083C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x160840
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0x160844
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0x160848
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0x16084C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0x160850
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SB_REPEAT 0x160854
+
+#define mmMME2_CTRL_SHADOW_1_DESC_RATE_LIMITER 0x160858
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x16085C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x160860
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0x160864
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0x160868
+
+#define mmMME2_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0x16086C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_S 0x160870
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0x160874
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0x160878
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0x16087C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0x160880
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0x160884
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0x160888
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0x16088C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0x160890
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0x160894
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0x160898
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0x16089C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0x1608A0
+
+#define mmMME2_CTRL_SHADOW_1_DESC_DUMMY 0x1608A4
+
+#define mmMME2_CTRL_SHADOW_2_STATUS 0x160900
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0x160908
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0x16090C
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0x160910
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0x160914
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0x160918
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0x16091C
+
+#define mmMME2_CTRL_SHADOW_2_HEADER_LOW 0x160920
+
+#define mmMME2_CTRL_SHADOW_2_HEADER_HIGH 0x160924
+
+#define mmMME2_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0x160928
+
+#define mmMME2_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0x16092C
+
+#define mmMME2_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0x160930
+
+#define mmMME2_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0x160934
+
+#define mmMME2_CTRL_SHADOW_2_OUTER_LOOP 0x160938
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0x16093C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0x160940
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0x160944
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0x160948
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0x16094C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0x160950
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0x160954
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0x160958
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0x16095C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0x160960
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0x160964
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0x160968
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0x16096C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0x160970
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0x160974
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0x160978
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0x16097C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0x160980
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160984
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0x160988
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0x16098C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0x160990
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0x160994
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0x160998
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0x16099C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0x1609A0
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0x1609A4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0x1609A8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0x1609AC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0x1609B0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0x1609B4
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0x1609B8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0x1609BC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0x1609C0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0x1609C4
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0x1609C8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0x1609CC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0x1609D0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0x1609D4
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0x1609D8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0x1609DC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0x1609E0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0x1609E4
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0x1609E8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0x1609EC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0x1609F0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1609F4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1609F8
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1609FC
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160A00
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160A04
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160A08
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0x160A0C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0x160A10
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0x160A14
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0x160A18
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x160A1C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160A20
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160A24
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160A28
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x160A2C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0x160A30
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0x160A34
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0x160A38
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0x160A3C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0x160A40
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0x160A44
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0x160A48
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0x160A4C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0x160A50
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0x160A54
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0x160A58
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0x160A5C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0x160A60
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0x160A64
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0x160A68
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0x160A6C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0x160A70
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0x160A74
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0x160A78
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0x160A7C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0x160A80
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0x160A84
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160A88
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x160A8C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160A90
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160A94
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160A98
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x160A9C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0x160AA0
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0x160AA4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0x160AA8
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0x160AAC
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x160AB0
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x160AB4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x160AB8
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x160ABC
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x160AC0
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0x160AC4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0x160AC8
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0x160ACC
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0x160AD0
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SB_REPEAT 0x160AD4
+
+#define mmMME2_CTRL_SHADOW_2_DESC_RATE_LIMITER 0x160AD8
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x160ADC
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x160AE0
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0x160AE4
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0x160AE8
+
+#define mmMME2_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0x160AEC
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_S 0x160AF0
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0x160AF4
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0x160AF8
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0x160AFC
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0x160B00
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0x160B04
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0x160B08
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0x160B0C
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0x160B10
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0x160B14
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0x160B18
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0x160B1C
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0x160B20
+
+#define mmMME2_CTRL_SHADOW_2_DESC_DUMMY 0x160B24
+
+#define mmMME2_CTRL_SHADOW_3_STATUS 0x160B80
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0x160B88
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0x160B8C
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0x160B90
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0x160B94
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0x160B98
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0x160B9C
+
+#define mmMME2_CTRL_SHADOW_3_HEADER_LOW 0x160BA0
+
+#define mmMME2_CTRL_SHADOW_3_HEADER_HIGH 0x160BA4
+
+#define mmMME2_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0x160BA8
+
+#define mmMME2_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0x160BAC
+
+#define mmMME2_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0x160BB0
+
+#define mmMME2_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0x160BB4
+
+#define mmMME2_CTRL_SHADOW_3_OUTER_LOOP 0x160BB8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0x160BBC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0x160BC0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0x160BC4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0x160BC8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0x160BCC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0x160BD0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0x160BD4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0x160BD8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0x160BDC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0x160BE0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0x160BE4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0x160BE8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0x160BEC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0x160BF0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0x160BF4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0x160BF8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0x160BFC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0x160C00
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160C04
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0x160C08
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0x160C0C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0x160C10
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0x160C14
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0x160C18
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0x160C1C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0x160C20
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0x160C24
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0x160C28
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0x160C2C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0x160C30
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0x160C34
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0x160C38
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0x160C3C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0x160C40
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0x160C44
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0x160C48
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0x160C4C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0x160C50
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0x160C54
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0x160C58
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0x160C5C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0x160C60
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0x160C64
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0x160C68
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0x160C6C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0x160C70
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x160C74
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x160C78
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x160C7C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160C80
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160C84
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160C88
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0x160C8C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0x160C90
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0x160C94
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0x160C98
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x160C9C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160CA0
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160CA4
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160CA8
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x160CAC
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0x160CB0
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0x160CB4
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0x160CB8
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0x160CBC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0x160CC0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0x160CC4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0x160CC8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0x160CCC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0x160CD0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0x160CD4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0x160CD8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0x160CDC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0x160CE0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0x160CE4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0x160CE8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0x160CEC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0x160CF0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0x160CF4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0x160CF8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0x160CFC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0x160D00
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0x160D04
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160D08
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x160D0C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160D10
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160D14
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160D18
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x160D1C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0x160D20
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0x160D24
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0x160D28
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0x160D2C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x160D30
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x160D34
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x160D38
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x160D3C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x160D40
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0x160D44
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0x160D48
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0x160D4C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0x160D50
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SB_REPEAT 0x160D54
+
+#define mmMME2_CTRL_SHADOW_3_DESC_RATE_LIMITER 0x160D58
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x160D5C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x160D60
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0x160D64
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0x160D68
+
+#define mmMME2_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0x160D6C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_S 0x160D70
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0x160D74
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0x160D78
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0x160D7C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0x160D80
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0x160D84
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0x160D88
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0x160D8C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0x160D90
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0x160D94
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0x160D98
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0x160D9C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0x160DA0
+
+#define mmMME2_CTRL_SHADOW_3_DESC_DUMMY 0x160DA4
+
+#endif /* ASIC_REG_MME2_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
new file mode 100644
index 000000000000..c1ea6a422010
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME2_QM_REGS_H_
+#define ASIC_REG_MME2_QM_REGS_H_
+
+/*
+ *****************************************
+ * MME2_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmMME2_QM_GLBL_CFG0 0x168000
+
+#define mmMME2_QM_GLBL_CFG1 0x168004
+
+#define mmMME2_QM_GLBL_PROT 0x168008
+
+#define mmMME2_QM_GLBL_ERR_CFG 0x16800C
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_0 0x168010
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_1 0x168014
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_2 0x168018
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_3 0x16801C
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_4 0x168020
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_0 0x168024
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_1 0x168028
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_2 0x16802C
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_3 0x168030
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_4 0x168034
+
+#define mmMME2_QM_GLBL_STS0 0x168038
+
+#define mmMME2_QM_GLBL_STS1_0 0x168040
+
+#define mmMME2_QM_GLBL_STS1_1 0x168044
+
+#define mmMME2_QM_GLBL_STS1_2 0x168048
+
+#define mmMME2_QM_GLBL_STS1_3 0x16804C
+
+#define mmMME2_QM_GLBL_STS1_4 0x168050
+
+#define mmMME2_QM_GLBL_MSG_EN_0 0x168054
+
+#define mmMME2_QM_GLBL_MSG_EN_1 0x168058
+
+#define mmMME2_QM_GLBL_MSG_EN_2 0x16805C
+
+#define mmMME2_QM_GLBL_MSG_EN_3 0x168060
+
+#define mmMME2_QM_GLBL_MSG_EN_4 0x168068
+
+#define mmMME2_QM_PQ_BASE_LO_0 0x168070
+
+#define mmMME2_QM_PQ_BASE_LO_1 0x168074
+
+#define mmMME2_QM_PQ_BASE_LO_2 0x168078
+
+#define mmMME2_QM_PQ_BASE_LO_3 0x16807C
+
+#define mmMME2_QM_PQ_BASE_HI_0 0x168080
+
+#define mmMME2_QM_PQ_BASE_HI_1 0x168084
+
+#define mmMME2_QM_PQ_BASE_HI_2 0x168088
+
+#define mmMME2_QM_PQ_BASE_HI_3 0x16808C
+
+#define mmMME2_QM_PQ_SIZE_0 0x168090
+
+#define mmMME2_QM_PQ_SIZE_1 0x168094
+
+#define mmMME2_QM_PQ_SIZE_2 0x168098
+
+#define mmMME2_QM_PQ_SIZE_3 0x16809C
+
+#define mmMME2_QM_PQ_PI_0 0x1680A0
+
+#define mmMME2_QM_PQ_PI_1 0x1680A4
+
+#define mmMME2_QM_PQ_PI_2 0x1680A8
+
+#define mmMME2_QM_PQ_PI_3 0x1680AC
+
+#define mmMME2_QM_PQ_CI_0 0x1680B0
+
+#define mmMME2_QM_PQ_CI_1 0x1680B4
+
+#define mmMME2_QM_PQ_CI_2 0x1680B8
+
+#define mmMME2_QM_PQ_CI_3 0x1680BC
+
+#define mmMME2_QM_PQ_CFG0_0 0x1680C0
+
+#define mmMME2_QM_PQ_CFG0_1 0x1680C4
+
+#define mmMME2_QM_PQ_CFG0_2 0x1680C8
+
+#define mmMME2_QM_PQ_CFG0_3 0x1680CC
+
+#define mmMME2_QM_PQ_CFG1_0 0x1680D0
+
+#define mmMME2_QM_PQ_CFG1_1 0x1680D4
+
+#define mmMME2_QM_PQ_CFG1_2 0x1680D8
+
+#define mmMME2_QM_PQ_CFG1_3 0x1680DC
+
+#define mmMME2_QM_PQ_ARUSER_31_11_0 0x1680E0
+
+#define mmMME2_QM_PQ_ARUSER_31_11_1 0x1680E4
+
+#define mmMME2_QM_PQ_ARUSER_31_11_2 0x1680E8
+
+#define mmMME2_QM_PQ_ARUSER_31_11_3 0x1680EC
+
+#define mmMME2_QM_PQ_STS0_0 0x1680F0
+
+#define mmMME2_QM_PQ_STS0_1 0x1680F4
+
+#define mmMME2_QM_PQ_STS0_2 0x1680F8
+
+#define mmMME2_QM_PQ_STS0_3 0x1680FC
+
+#define mmMME2_QM_PQ_STS1_0 0x168100
+
+#define mmMME2_QM_PQ_STS1_1 0x168104
+
+#define mmMME2_QM_PQ_STS1_2 0x168108
+
+#define mmMME2_QM_PQ_STS1_3 0x16810C
+
+#define mmMME2_QM_CQ_CFG0_0 0x168110
+
+#define mmMME2_QM_CQ_CFG0_1 0x168114
+
+#define mmMME2_QM_CQ_CFG0_2 0x168118
+
+#define mmMME2_QM_CQ_CFG0_3 0x16811C
+
+#define mmMME2_QM_CQ_CFG0_4 0x168120
+
+#define mmMME2_QM_CQ_CFG1_0 0x168124
+
+#define mmMME2_QM_CQ_CFG1_1 0x168128
+
+#define mmMME2_QM_CQ_CFG1_2 0x16812C
+
+#define mmMME2_QM_CQ_CFG1_3 0x168130
+
+#define mmMME2_QM_CQ_CFG1_4 0x168134
+
+#define mmMME2_QM_CQ_ARUSER_31_11_0 0x168138
+
+#define mmMME2_QM_CQ_ARUSER_31_11_1 0x16813C
+
+#define mmMME2_QM_CQ_ARUSER_31_11_2 0x168140
+
+#define mmMME2_QM_CQ_ARUSER_31_11_3 0x168144
+
+#define mmMME2_QM_CQ_ARUSER_31_11_4 0x168148
+
+#define mmMME2_QM_CQ_STS0_0 0x16814C
+
+#define mmMME2_QM_CQ_STS0_1 0x168150
+
+#define mmMME2_QM_CQ_STS0_2 0x168154
+
+#define mmMME2_QM_CQ_STS0_3 0x168158
+
+#define mmMME2_QM_CQ_STS0_4 0x16815C
+
+#define mmMME2_QM_CQ_STS1_0 0x168160
+
+#define mmMME2_QM_CQ_STS1_1 0x168164
+
+#define mmMME2_QM_CQ_STS1_2 0x168168
+
+#define mmMME2_QM_CQ_STS1_3 0x16816C
+
+#define mmMME2_QM_CQ_STS1_4 0x168170
+
+#define mmMME2_QM_CQ_PTR_LO_0 0x168174
+
+#define mmMME2_QM_CQ_PTR_HI_0 0x168178
+
+#define mmMME2_QM_CQ_TSIZE_0 0x16817C
+
+#define mmMME2_QM_CQ_CTL_0 0x168180
+
+#define mmMME2_QM_CQ_PTR_LO_1 0x168184
+
+#define mmMME2_QM_CQ_PTR_HI_1 0x168188
+
+#define mmMME2_QM_CQ_TSIZE_1 0x16818C
+
+#define mmMME2_QM_CQ_CTL_1 0x168190
+
+#define mmMME2_QM_CQ_PTR_LO_2 0x168194
+
+#define mmMME2_QM_CQ_PTR_HI_2 0x168198
+
+#define mmMME2_QM_CQ_TSIZE_2 0x16819C
+
+#define mmMME2_QM_CQ_CTL_2 0x1681A0
+
+#define mmMME2_QM_CQ_PTR_LO_3 0x1681A4
+
+#define mmMME2_QM_CQ_PTR_HI_3 0x1681A8
+
+#define mmMME2_QM_CQ_TSIZE_3 0x1681AC
+
+#define mmMME2_QM_CQ_CTL_3 0x1681B0
+
+#define mmMME2_QM_CQ_PTR_LO_4 0x1681B4
+
+#define mmMME2_QM_CQ_PTR_HI_4 0x1681B8
+
+#define mmMME2_QM_CQ_TSIZE_4 0x1681BC
+
+#define mmMME2_QM_CQ_CTL_4 0x1681C0
+
+#define mmMME2_QM_CQ_PTR_LO_STS_0 0x1681C4
+
+#define mmMME2_QM_CQ_PTR_LO_STS_1 0x1681C8
+
+#define mmMME2_QM_CQ_PTR_LO_STS_2 0x1681CC
+
+#define mmMME2_QM_CQ_PTR_LO_STS_3 0x1681D0
+
+#define mmMME2_QM_CQ_PTR_LO_STS_4 0x1681D4
+
+#define mmMME2_QM_CQ_PTR_HI_STS_0 0x1681D8
+
+#define mmMME2_QM_CQ_PTR_HI_STS_1 0x1681DC
+
+#define mmMME2_QM_CQ_PTR_HI_STS_2 0x1681E0
+
+#define mmMME2_QM_CQ_PTR_HI_STS_3 0x1681E4
+
+#define mmMME2_QM_CQ_PTR_HI_STS_4 0x1681E8
+
+#define mmMME2_QM_CQ_TSIZE_STS_0 0x1681EC
+
+#define mmMME2_QM_CQ_TSIZE_STS_1 0x1681F0
+
+#define mmMME2_QM_CQ_TSIZE_STS_2 0x1681F4
+
+#define mmMME2_QM_CQ_TSIZE_STS_3 0x1681F8
+
+#define mmMME2_QM_CQ_TSIZE_STS_4 0x1681FC
+
+#define mmMME2_QM_CQ_CTL_STS_0 0x168200
+
+#define mmMME2_QM_CQ_CTL_STS_1 0x168204
+
+#define mmMME2_QM_CQ_CTL_STS_2 0x168208
+
+#define mmMME2_QM_CQ_CTL_STS_3 0x16820C
+
+#define mmMME2_QM_CQ_CTL_STS_4 0x168210
+
+#define mmMME2_QM_CQ_IFIFO_CNT_0 0x168214
+
+#define mmMME2_QM_CQ_IFIFO_CNT_1 0x168218
+
+#define mmMME2_QM_CQ_IFIFO_CNT_2 0x16821C
+
+#define mmMME2_QM_CQ_IFIFO_CNT_3 0x168220
+
+#define mmMME2_QM_CQ_IFIFO_CNT_4 0x168224
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_0 0x168228
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_1 0x16822C
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_2 0x168230
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_3 0x168234
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_4 0x168238
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_0 0x16823C
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_1 0x168240
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_2 0x168244
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_3 0x168248
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_4 0x16824C
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_0 0x168250
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_1 0x168254
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_2 0x168258
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_3 0x16825C
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_4 0x168260
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_0 0x168264
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_1 0x168268
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_2 0x16826C
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_3 0x168270
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_4 0x168274
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_0 0x168278
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_1 0x16827C
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 0x168280
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_3 0x168284
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_4 0x168288
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_0 0x16828C
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_1 0x168290
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_2 0x168294
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_3 0x168298
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_4 0x16829C
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_0 0x1682A0
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_1 0x1682A4
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_2 0x1682A8
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_3 0x1682AC
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_4 0x1682B0
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_0 0x1682B4
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_1 0x1682B8
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_2 0x1682BC
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_3 0x1682C0
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_4 0x1682C4
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_0 0x1682C8
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_1 0x1682CC
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_2 0x1682D0
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_3 0x1682D4
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_4 0x1682D8
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x1682E0
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x1682E4
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x1682E8
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x1682EC
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x1682F0
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x1682F4
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x1682F8
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x1682FC
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x168300
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x168304
+
+#define mmMME2_QM_CP_FENCE0_RDATA_0 0x168308
+
+#define mmMME2_QM_CP_FENCE0_RDATA_1 0x16830C
+
+#define mmMME2_QM_CP_FENCE0_RDATA_2 0x168310
+
+#define mmMME2_QM_CP_FENCE0_RDATA_3 0x168314
+
+#define mmMME2_QM_CP_FENCE0_RDATA_4 0x168318
+
+#define mmMME2_QM_CP_FENCE1_RDATA_0 0x16831C
+
+#define mmMME2_QM_CP_FENCE1_RDATA_1 0x168320
+
+#define mmMME2_QM_CP_FENCE1_RDATA_2 0x168324
+
+#define mmMME2_QM_CP_FENCE1_RDATA_3 0x168328
+
+#define mmMME2_QM_CP_FENCE1_RDATA_4 0x16832C
+
+#define mmMME2_QM_CP_FENCE2_RDATA_0 0x168330
+
+#define mmMME2_QM_CP_FENCE2_RDATA_1 0x168334
+
+#define mmMME2_QM_CP_FENCE2_RDATA_2 0x168338
+
+#define mmMME2_QM_CP_FENCE2_RDATA_3 0x16833C
+
+#define mmMME2_QM_CP_FENCE2_RDATA_4 0x168340
+
+#define mmMME2_QM_CP_FENCE3_RDATA_0 0x168344
+
+#define mmMME2_QM_CP_FENCE3_RDATA_1 0x168348
+
+#define mmMME2_QM_CP_FENCE3_RDATA_2 0x16834C
+
+#define mmMME2_QM_CP_FENCE3_RDATA_3 0x168350
+
+#define mmMME2_QM_CP_FENCE3_RDATA_4 0x168354
+
+#define mmMME2_QM_CP_FENCE0_CNT_0 0x168358
+
+#define mmMME2_QM_CP_FENCE0_CNT_1 0x16835C
+
+#define mmMME2_QM_CP_FENCE0_CNT_2 0x168360
+
+#define mmMME2_QM_CP_FENCE0_CNT_3 0x168364
+
+#define mmMME2_QM_CP_FENCE0_CNT_4 0x168368
+
+#define mmMME2_QM_CP_FENCE1_CNT_0 0x16836C
+
+#define mmMME2_QM_CP_FENCE1_CNT_1 0x168370
+
+#define mmMME2_QM_CP_FENCE1_CNT_2 0x168374
+
+#define mmMME2_QM_CP_FENCE1_CNT_3 0x168378
+
+#define mmMME2_QM_CP_FENCE1_CNT_4 0x16837C
+
+#define mmMME2_QM_CP_FENCE2_CNT_0 0x168380
+
+#define mmMME2_QM_CP_FENCE2_CNT_1 0x168384
+
+#define mmMME2_QM_CP_FENCE2_CNT_2 0x168388
+
+#define mmMME2_QM_CP_FENCE2_CNT_3 0x16838C
+
+#define mmMME2_QM_CP_FENCE2_CNT_4 0x168390
+
+#define mmMME2_QM_CP_FENCE3_CNT_0 0x168394
+
+#define mmMME2_QM_CP_FENCE3_CNT_1 0x168398
+
+#define mmMME2_QM_CP_FENCE3_CNT_2 0x16839C
+
+#define mmMME2_QM_CP_FENCE3_CNT_3 0x1683A0
+
+#define mmMME2_QM_CP_FENCE3_CNT_4 0x1683A4
+
+#define mmMME2_QM_CP_STS_0 0x1683A8
+
+#define mmMME2_QM_CP_STS_1 0x1683AC
+
+#define mmMME2_QM_CP_STS_2 0x1683B0
+
+#define mmMME2_QM_CP_STS_3 0x1683B4
+
+#define mmMME2_QM_CP_STS_4 0x1683B8
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_0 0x1683BC
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_1 0x1683C0
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_2 0x1683C4
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_3 0x1683C8
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_4 0x1683CC
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_0 0x1683D0
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_1 0x1683D4
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_2 0x1683D8
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_3 0x1683DC
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_4 0x1683E0
+
+#define mmMME2_QM_CP_BARRIER_CFG_0 0x1683F4
+
+#define mmMME2_QM_CP_BARRIER_CFG_1 0x1683F8
+
+#define mmMME2_QM_CP_BARRIER_CFG_2 0x1683FC
+
+#define mmMME2_QM_CP_BARRIER_CFG_3 0x168400
+
+#define mmMME2_QM_CP_BARRIER_CFG_4 0x168404
+
+#define mmMME2_QM_CP_DBG_0_0 0x168408
+
+#define mmMME2_QM_CP_DBG_0_1 0x16840C
+
+#define mmMME2_QM_CP_DBG_0_2 0x168410
+
+#define mmMME2_QM_CP_DBG_0_3 0x168414
+
+#define mmMME2_QM_CP_DBG_0_4 0x168418
+
+#define mmMME2_QM_CP_ARUSER_31_11_0 0x16841C
+
+#define mmMME2_QM_CP_ARUSER_31_11_1 0x168420
+
+#define mmMME2_QM_CP_ARUSER_31_11_2 0x168424
+
+#define mmMME2_QM_CP_ARUSER_31_11_3 0x168428
+
+#define mmMME2_QM_CP_ARUSER_31_11_4 0x16842C
+
+#define mmMME2_QM_CP_AWUSER_31_11_0 0x168430
+
+#define mmMME2_QM_CP_AWUSER_31_11_1 0x168434
+
+#define mmMME2_QM_CP_AWUSER_31_11_2 0x168438
+
+#define mmMME2_QM_CP_AWUSER_31_11_3 0x16843C
+
+#define mmMME2_QM_CP_AWUSER_31_11_4 0x168440
+
+#define mmMME2_QM_ARB_CFG_0 0x168A00
+
+#define mmMME2_QM_ARB_CHOISE_Q_PUSH 0x168A04
+
+#define mmMME2_QM_ARB_WRR_WEIGHT_0 0x168A08
+
+#define mmMME2_QM_ARB_WRR_WEIGHT_1 0x168A0C
+
+#define mmMME2_QM_ARB_WRR_WEIGHT_2 0x168A10
+
+#define mmMME2_QM_ARB_WRR_WEIGHT_3 0x168A14
+
+#define mmMME2_QM_ARB_CFG_1 0x168A18
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_0 0x168A20
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_1 0x168A24
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_2 0x168A28
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_3 0x168A2C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_4 0x168A30
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_5 0x168A34
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_6 0x168A38
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_7 0x168A3C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_8 0x168A40
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_9 0x168A44
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_10 0x168A48
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_11 0x168A4C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_12 0x168A50
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_13 0x168A54
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_14 0x168A58
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_15 0x168A5C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_16 0x168A60
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_17 0x168A64
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_18 0x168A68
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_19 0x168A6C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_20 0x168A70
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_21 0x168A74
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_22 0x168A78
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_23 0x168A7C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_24 0x168A80
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_25 0x168A84
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_26 0x168A88
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_27 0x168A8C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_28 0x168A90
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_29 0x168A94
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_30 0x168A98
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_31 0x168A9C
+
+#define mmMME2_QM_ARB_MST_CRED_INC 0x168AA0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x168AA4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x168AA8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x168AAC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x168AB0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x168AB4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x168AB8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x168ABC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x168AC0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x168AC4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x168AC8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x168ACC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x168AD0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x168AD4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x168AD8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x168ADC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x168AE0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x168AE4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x168AE8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x168AEC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x168AF0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x168AF4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x168AF8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x168AFC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x168B00
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x168B04
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x168B08
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x168B0C
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x168B10
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x168B14
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x168B18
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x168B1C
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x168B20
+
+#define mmMME2_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x168B28
+
+#define mmMME2_QM_ARB_MST_SLAVE_EN 0x168B2C
+
+#define mmMME2_QM_ARB_MST_QUIET_PER 0x168B34
+
+#define mmMME2_QM_ARB_SLV_CHOISE_WDT 0x168B38
+
+#define mmMME2_QM_ARB_SLV_ID 0x168B3C
+
+#define mmMME2_QM_ARB_MSG_MAX_INFLIGHT 0x168B44
+
+#define mmMME2_QM_ARB_MSG_AWUSER_31_11 0x168B48
+
+#define mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP 0x168B4C
+
+#define mmMME2_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x168B50
+
+#define mmMME2_QM_ARB_BASE_LO 0x168B54
+
+#define mmMME2_QM_ARB_BASE_HI 0x168B58
+
+#define mmMME2_QM_ARB_STATE_STS 0x168B80
+
+#define mmMME2_QM_ARB_CHOISE_FULLNESS_STS 0x168B84
+
+#define mmMME2_QM_ARB_MSG_STS 0x168B88
+
+#define mmMME2_QM_ARB_SLV_CHOISE_Q_HEAD 0x168B8C
+
+#define mmMME2_QM_ARB_ERR_CAUSE 0x168B9C
+
+#define mmMME2_QM_ARB_ERR_MSG_EN 0x168BA0
+
+#define mmMME2_QM_ARB_ERR_STS_DRP 0x168BA8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_0 0x168BB0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_1 0x168BB4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_2 0x168BB8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_3 0x168BBC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_4 0x168BC0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_5 0x168BC4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_6 0x168BC8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_7 0x168BCC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_8 0x168BD0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_9 0x168BD4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_10 0x168BD8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_11 0x168BDC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_12 0x168BE0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_13 0x168BE4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_14 0x168BE8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_15 0x168BEC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_16 0x168BF0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_17 0x168BF4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_18 0x168BF8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_19 0x168BFC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_20 0x168C00
+
+#define mmMME2_QM_ARB_MST_CRED_STS_21 0x168C04
+
+#define mmMME2_QM_ARB_MST_CRED_STS_22 0x168C08
+
+#define mmMME2_QM_ARB_MST_CRED_STS_23 0x168C0C
+
+#define mmMME2_QM_ARB_MST_CRED_STS_24 0x168C10
+
+#define mmMME2_QM_ARB_MST_CRED_STS_25 0x168C14
+
+#define mmMME2_QM_ARB_MST_CRED_STS_26 0x168C18
+
+#define mmMME2_QM_ARB_MST_CRED_STS_27 0x168C1C
+
+#define mmMME2_QM_ARB_MST_CRED_STS_28 0x168C20
+
+#define mmMME2_QM_ARB_MST_CRED_STS_29 0x168C24
+
+#define mmMME2_QM_ARB_MST_CRED_STS_30 0x168C28
+
+#define mmMME2_QM_ARB_MST_CRED_STS_31 0x168C2C
+
+#define mmMME2_QM_CGM_CFG 0x168C70
+
+#define mmMME2_QM_CGM_STS 0x168C74
+
+#define mmMME2_QM_CGM_CFG1 0x168C78
+
+#define mmMME2_QM_LOCAL_RANGE_BASE 0x168C80
+
+#define mmMME2_QM_LOCAL_RANGE_SIZE 0x168C84
+
+#define mmMME2_QM_CSMR_STRICT_PRIO_CFG 0x168C90
+
+#define mmMME2_QM_HBW_RD_RATE_LIM_CFG_1 0x168C94
+
+#define mmMME2_QM_LBW_WR_RATE_LIM_CFG_0 0x168C98
+
+#define mmMME2_QM_LBW_WR_RATE_LIM_CFG_1 0x168C9C
+
+#define mmMME2_QM_HBW_RD_RATE_LIM_CFG_0 0x168CA0
+
+#define mmMME2_QM_GLBL_AXCACHE 0x168CA4
+
+#define mmMME2_QM_IND_GW_APB_CFG 0x168CB0
+
+#define mmMME2_QM_IND_GW_APB_WDATA 0x168CB4
+
+#define mmMME2_QM_IND_GW_APB_RDATA 0x168CB8
+
+#define mmMME2_QM_IND_GW_APB_STATUS 0x168CBC
+
+#define mmMME2_QM_GLBL_ERR_ADDR_LO 0x168CD0
+
+#define mmMME2_QM_GLBL_ERR_ADDR_HI 0x168CD4
+
+#define mmMME2_QM_GLBL_ERR_WDATA 0x168CD8
+
+#define mmMME2_QM_GLBL_MEM_INIT_BUSY 0x168D00
+
+#endif /* ASIC_REG_MME2_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
new file mode 100644
index 000000000000..36f6edc72e3d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
@@ -0,0 +1,1456 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME3_CTRL_REGS_H_
+#define ASIC_REG_MME3_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * MME3_CTRL (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME3_CTRL_ARCH_STATUS 0x1E0000
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_HIGH_S 0x1E0008
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_HIGH_L 0x1E000C
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_HIGH_O 0x1E0010
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_LOW_S 0x1E0014
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_LOW_L 0x1E0018
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_LOW_O 0x1E001C
+
+#define mmMME3_CTRL_ARCH_HEADER_LOW 0x1E0020
+
+#define mmMME3_CTRL_ARCH_HEADER_HIGH 0x1E0024
+
+#define mmMME3_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0x1E0028
+
+#define mmMME3_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0x1E002C
+
+#define mmMME3_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0x1E0030
+
+#define mmMME3_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0x1E0034
+
+#define mmMME3_CTRL_ARCH_OUTER_LOOP 0x1E0038
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0x1E003C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0x1E0040
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0x1E0044
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0x1E0048
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0x1E004C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0x1E0050
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0x1E0054
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0x1E0058
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0x1E005C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0x1E0060
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0x1E0064
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0x1E0068
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0x1E006C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0x1E0070
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0x1E0074
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0x1E0078
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0x1E007C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0x1E0080
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0084
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0x1E0088
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0x1E008C
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0x1E0090
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0x1E0094
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0x1E0098
+
+#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_0 0x1E009C
+
+#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_1 0x1E00A0
+
+#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_2 0x1E00A4
+
+#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_3 0x1E00A8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0x1E00AC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0x1E00B0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0x1E00B4
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0x1E00B8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0x1E00BC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0x1E00C0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0x1E00C4
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0x1E00C8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0x1E00CC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0x1E00D0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0x1E00D4
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0x1E00D8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0x1E00DC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0x1E00E0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0x1E00E4
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0x1E00E8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0x1E00EC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0x1E00F0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E00F4
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E00F8
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E00FC
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0100
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0104
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0108
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0x1E010C
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0x1E0110
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0x1E0114
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0x1E0118
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E011C
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0120
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0124
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0128
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E012C
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0x1E0130
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0x1E0134
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0x1E0138
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0x1E013C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0x1E0140
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0x1E0144
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0x1E0148
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0x1E014C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0x1E0150
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0x1E0154
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0x1E0158
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0x1E015C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0x1E0160
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0x1E0164
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0x1E0168
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0x1E016C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0x1E0170
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0x1E0174
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0x1E0178
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0x1E017C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0x1E0180
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0x1E0184
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0188
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E018C
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0190
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0194
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0198
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E019C
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0x1E01A0
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0x1E01A4
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0x1E01A8
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0x1E01AC
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E01B0
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E01B4
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E01B8
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E01BC
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E01C0
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0x1E01C4
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0x1E01C8
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0x1E01CC
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0x1E01D0
+
+#define mmMME3_CTRL_ARCH_DESC_SB_REPEAT 0x1E01D4
+
+#define mmMME3_CTRL_ARCH_DESC_RATE_LIMITER 0x1E01D8
+
+#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E01DC
+
+#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E01E0
+
+#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E01E4
+
+#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0x1E01E8
+
+#define mmMME3_CTRL_ARCH_DESC_AXI_USER_DATA 0x1E01EC
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_S 0x1E01F0
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0x1E01F4
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0x1E01F8
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0x1E01FC
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0x1E0200
+
+#define mmMME3_CTRL_ARCH_DESC_PADDING_VALUE_S 0x1E0204
+
+#define mmMME3_CTRL_ARCH_DESC_PADDING_VALUE_L 0x1E0208
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_S 0x1E020C
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0x1E0210
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0x1E0214
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0x1E0218
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0x1E021C
+
+#define mmMME3_CTRL_ARCH_DESC_PCU_RL_SATURATION 0x1E0220
+
+#define mmMME3_CTRL_ARCH_DESC_DUMMY 0x1E0224
+
+#define mmMME3_CTRL_CMD 0x1E0280
+
+#define mmMME3_CTRL_STATUS1 0x1E0284
+
+#define mmMME3_CTRL_RESET 0x1E0288
+
+#define mmMME3_CTRL_QM_STALL 0x1E028C
+
+#define mmMME3_CTRL_SYNC_OBJECT_FIFO_TH 0x1E0290
+
+#define mmMME3_CTRL_EUS_ROLLUP_CNT_ADD 0x1E0294
+
+#define mmMME3_CTRL_INTR_CAUSE 0x1E0298
+
+#define mmMME3_CTRL_INTR_MASK 0x1E029C
+
+#define mmMME3_CTRL_LOG_SHADOW 0x1E02A0
+
+#define mmMME3_CTRL_PCU_RL_DESC0 0x1E02A4
+
+#define mmMME3_CTRL_PCU_RL_TOKEN_UPDATE 0x1E02A8
+
+#define mmMME3_CTRL_PCU_RL_TH 0x1E02AC
+
+#define mmMME3_CTRL_PCU_RL_MIN 0x1E02B0
+
+#define mmMME3_CTRL_PCU_RL_CTRL_EN 0x1E02B4
+
+#define mmMME3_CTRL_PCU_RL_HISTORY_LOG_SIZE 0x1E02B8
+
+#define mmMME3_CTRL_PCU_DUMMY_A_BF16 0x1E02BC
+
+#define mmMME3_CTRL_PCU_DUMMY_B_BF16 0x1E02C0
+
+#define mmMME3_CTRL_PCU_DUMMY_A_FP32_ODD 0x1E02C4
+
+#define mmMME3_CTRL_PCU_DUMMY_A_FP32_EVEN 0x1E02C8
+
+#define mmMME3_CTRL_PCU_DUMMY_B_FP32_ODD 0x1E02CC
+
+#define mmMME3_CTRL_PCU_DUMMY_B_FP32_EVEN 0x1E02D0
+
+#define mmMME3_CTRL_PROT 0x1E02D4
+
+#define mmMME3_CTRL_EU_POWER_SAVE_DISABLE 0x1E02D8
+
+#define mmMME3_CTRL_CS_DBG_BLOCK_ID 0x1E02DC
+
+#define mmMME3_CTRL_CS_DBG_STATUS_DROP_CNT 0x1E02E0
+
+#define mmMME3_CTRL_TE_CLOSE_CGATE 0x1E02E4
+
+#define mmMME3_CTRL_AGU_SM_INFLIGHT_CNTR 0x1E02E8
+
+#define mmMME3_CTRL_AGU_SM_TOTAL_CNTR 0x1E02EC
+
+#define mmMME3_CTRL_EZSYNC_OUT_CREDIT 0x1E02F0
+
+#define mmMME3_CTRL_PCU_RL_SAT_SEC 0x1E02F4
+
+#define mmMME3_CTRL_AGU_SYNC_MSG_AXI_USER 0x1E02F8
+
+#define mmMME3_CTRL_QM_SLV_LBW_CLK_EN 0x1E02FC
+
+#define mmMME3_CTRL_SHADOW_0_STATUS 0x1E0400
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0x1E0408
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0x1E040C
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0x1E0410
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0x1E0414
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0x1E0418
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0x1E041C
+
+#define mmMME3_CTRL_SHADOW_0_HEADER_LOW 0x1E0420
+
+#define mmMME3_CTRL_SHADOW_0_HEADER_HIGH 0x1E0424
+
+#define mmMME3_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0x1E0428
+
+#define mmMME3_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0x1E042C
+
+#define mmMME3_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0x1E0430
+
+#define mmMME3_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0x1E0434
+
+#define mmMME3_CTRL_SHADOW_0_OUTER_LOOP 0x1E0438
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0x1E043C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0x1E0440
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0x1E0444
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0x1E0448
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0x1E044C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0x1E0450
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0x1E0454
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0x1E0458
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0x1E045C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0x1E0460
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0x1E0464
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0x1E0468
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0x1E046C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0x1E0470
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0x1E0474
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0x1E0478
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0x1E047C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0x1E0480
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0484
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0x1E0488
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0x1E048C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0x1E0490
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0x1E0494
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0x1E0498
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0x1E049C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0x1E04A0
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0x1E04A4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0x1E04A8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0x1E04AC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0x1E04B0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0x1E04B4
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0x1E04B8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0x1E04BC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0x1E04C0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0x1E04C4
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0x1E04C8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0x1E04CC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0x1E04D0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0x1E04D4
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0x1E04D8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0x1E04DC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0x1E04E0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0x1E04E4
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0x1E04E8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0x1E04EC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0x1E04F0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E04F4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E04F8
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E04FC
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0500
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0504
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0508
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0x1E050C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0x1E0510
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0x1E0514
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0x1E0518
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E051C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0520
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0524
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0528
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E052C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0x1E0530
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0x1E0534
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0x1E0538
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0x1E053C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0x1E0540
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0x1E0544
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0x1E0548
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0x1E054C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0x1E0550
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0x1E0554
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0x1E0558
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0x1E055C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0x1E0560
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0x1E0564
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0x1E0568
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0x1E056C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0x1E0570
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0x1E0574
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0x1E0578
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0x1E057C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0x1E0580
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0x1E0584
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0588
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E058C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0590
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0594
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0598
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E059C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0x1E05A0
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0x1E05A4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0x1E05A8
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0x1E05AC
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E05B0
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E05B4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E05B8
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E05BC
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E05C0
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0x1E05C4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0x1E05C8
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0x1E05CC
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0x1E05D0
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SB_REPEAT 0x1E05D4
+
+#define mmMME3_CTRL_SHADOW_0_DESC_RATE_LIMITER 0x1E05D8
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E05DC
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E05E0
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E05E4
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0x1E05E8
+
+#define mmMME3_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0x1E05EC
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_S 0x1E05F0
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0x1E05F4
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0x1E05F8
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0x1E05FC
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0x1E0600
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0x1E0604
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0x1E0608
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0x1E060C
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0x1E0610
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0x1E0614
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0x1E0618
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0x1E061C
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0x1E0620
+
+#define mmMME3_CTRL_SHADOW_0_DESC_DUMMY 0x1E0624
+
+#define mmMME3_CTRL_SHADOW_1_STATUS 0x1E0680
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0x1E0688
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0x1E068C
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0x1E0690
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0x1E0694
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0x1E0698
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0x1E069C
+
+#define mmMME3_CTRL_SHADOW_1_HEADER_LOW 0x1E06A0
+
+#define mmMME3_CTRL_SHADOW_1_HEADER_HIGH 0x1E06A4
+
+#define mmMME3_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0x1E06A8
+
+#define mmMME3_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0x1E06AC
+
+#define mmMME3_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0x1E06B0
+
+#define mmMME3_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0x1E06B4
+
+#define mmMME3_CTRL_SHADOW_1_OUTER_LOOP 0x1E06B8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0x1E06BC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0x1E06C0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0x1E06C4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0x1E06C8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0x1E06CC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0x1E06D0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0x1E06D4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0x1E06D8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0x1E06DC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0x1E06E0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0x1E06E4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0x1E06E8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0x1E06EC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0x1E06F0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0x1E06F4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0x1E06F8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0x1E06FC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0x1E0700
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0704
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0x1E0708
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0x1E070C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0x1E0710
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0x1E0714
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0x1E0718
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0x1E071C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0x1E0720
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0x1E0724
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0x1E0728
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0x1E072C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0x1E0730
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0x1E0734
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0x1E0738
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0x1E073C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0x1E0740
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0x1E0744
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0x1E0748
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0x1E074C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0x1E0750
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0x1E0754
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0x1E0758
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0x1E075C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0x1E0760
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0x1E0764
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0x1E0768
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0x1E076C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0x1E0770
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E0774
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E0778
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E077C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0780
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0784
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0788
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0x1E078C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0x1E0790
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0x1E0794
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0x1E0798
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E079C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E07A0
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E07A4
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E07A8
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E07AC
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0x1E07B0
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0x1E07B4
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0x1E07B8
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0x1E07BC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0x1E07C0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0x1E07C4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0x1E07C8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0x1E07CC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0x1E07D0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0x1E07D4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0x1E07D8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0x1E07DC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0x1E07E0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0x1E07E4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0x1E07E8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0x1E07EC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0x1E07F0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0x1E07F4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0x1E07F8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0x1E07FC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0x1E0800
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0x1E0804
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0808
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E080C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0810
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0814
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0818
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E081C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0x1E0820
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0x1E0824
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0x1E0828
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0x1E082C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E0830
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E0834
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E0838
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E083C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E0840
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0x1E0844
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0x1E0848
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0x1E084C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0x1E0850
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SB_REPEAT 0x1E0854
+
+#define mmMME3_CTRL_SHADOW_1_DESC_RATE_LIMITER 0x1E0858
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E085C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E0860
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E0864
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0x1E0868
+
+#define mmMME3_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0x1E086C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_S 0x1E0870
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0x1E0874
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0x1E0878
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0x1E087C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0x1E0880
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0x1E0884
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0x1E0888
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0x1E088C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0x1E0890
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0x1E0894
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0x1E0898
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0x1E089C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0x1E08A0
+
+#define mmMME3_CTRL_SHADOW_1_DESC_DUMMY 0x1E08A4
+
+#define mmMME3_CTRL_SHADOW_2_STATUS 0x1E0900
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0x1E0908
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0x1E090C
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0x1E0910
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0x1E0914
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0x1E0918
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0x1E091C
+
+#define mmMME3_CTRL_SHADOW_2_HEADER_LOW 0x1E0920
+
+#define mmMME3_CTRL_SHADOW_2_HEADER_HIGH 0x1E0924
+
+#define mmMME3_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0x1E0928
+
+#define mmMME3_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0x1E092C
+
+#define mmMME3_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0x1E0930
+
+#define mmMME3_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0x1E0934
+
+#define mmMME3_CTRL_SHADOW_2_OUTER_LOOP 0x1E0938
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0x1E093C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0x1E0940
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0x1E0944
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0x1E0948
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0x1E094C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0x1E0950
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0x1E0954
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0x1E0958
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0x1E095C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0x1E0960
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0x1E0964
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0x1E0968
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0x1E096C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0x1E0970
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0x1E0974
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0x1E0978
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0x1E097C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0x1E0980
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0984
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0x1E0988
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0x1E098C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0x1E0990
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0x1E0994
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0x1E0998
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0x1E099C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0x1E09A0
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0x1E09A4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0x1E09A8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0x1E09AC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0x1E09B0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0x1E09B4
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0x1E09B8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0x1E09BC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0x1E09C0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0x1E09C4
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0x1E09C8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0x1E09CC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0x1E09D0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0x1E09D4
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0x1E09D8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0x1E09DC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0x1E09E0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0x1E09E4
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0x1E09E8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0x1E09EC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0x1E09F0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E09F4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E09F8
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E09FC
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0A00
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0A04
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0A08
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0x1E0A0C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0x1E0A10
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0x1E0A14
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0x1E0A18
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E0A1C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0A20
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0A24
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0A28
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E0A2C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0x1E0A30
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0x1E0A34
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0x1E0A38
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0x1E0A3C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0x1E0A40
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0x1E0A44
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0x1E0A48
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0x1E0A4C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0x1E0A50
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0x1E0A54
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0x1E0A58
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0x1E0A5C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0x1E0A60
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0x1E0A64
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0x1E0A68
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0x1E0A6C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0x1E0A70
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0x1E0A74
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0x1E0A78
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0x1E0A7C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0x1E0A80
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0x1E0A84
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0A88
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E0A8C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0A90
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0A94
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0A98
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E0A9C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0x1E0AA0
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0x1E0AA4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0x1E0AA8
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0x1E0AAC
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E0AB0
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E0AB4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E0AB8
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E0ABC
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E0AC0
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0x1E0AC4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0x1E0AC8
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0x1E0ACC
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0x1E0AD0
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SB_REPEAT 0x1E0AD4
+
+#define mmMME3_CTRL_SHADOW_2_DESC_RATE_LIMITER 0x1E0AD8
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E0ADC
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E0AE0
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E0AE4
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0x1E0AE8
+
+#define mmMME3_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0x1E0AEC
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_S 0x1E0AF0
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0x1E0AF4
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0x1E0AF8
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0x1E0AFC
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0x1E0B00
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0x1E0B04
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0x1E0B08
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0x1E0B0C
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0x1E0B10
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0x1E0B14
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0x1E0B18
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0x1E0B1C
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0x1E0B20
+
+#define mmMME3_CTRL_SHADOW_2_DESC_DUMMY 0x1E0B24
+
+#define mmMME3_CTRL_SHADOW_3_STATUS 0x1E0B80
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0x1E0B88
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0x1E0B8C
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0x1E0B90
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0x1E0B94
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0x1E0B98
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0x1E0B9C
+
+#define mmMME3_CTRL_SHADOW_3_HEADER_LOW 0x1E0BA0
+
+#define mmMME3_CTRL_SHADOW_3_HEADER_HIGH 0x1E0BA4
+
+#define mmMME3_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0x1E0BA8
+
+#define mmMME3_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0x1E0BAC
+
+#define mmMME3_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0x1E0BB0
+
+#define mmMME3_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0x1E0BB4
+
+#define mmMME3_CTRL_SHADOW_3_OUTER_LOOP 0x1E0BB8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0x1E0BBC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0x1E0BC0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0x1E0BC4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0x1E0BC8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0x1E0BCC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0x1E0BD0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0x1E0BD4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0x1E0BD8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0x1E0BDC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0x1E0BE0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0x1E0BE4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0x1E0BE8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0x1E0BEC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0x1E0BF0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0x1E0BF4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0x1E0BF8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0x1E0BFC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0x1E0C00
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0C04
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0x1E0C08
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0x1E0C0C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0x1E0C10
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0x1E0C14
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0x1E0C18
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0x1E0C1C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0x1E0C20
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0x1E0C24
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0x1E0C28
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0x1E0C2C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0x1E0C30
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0x1E0C34
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0x1E0C38
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0x1E0C3C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0x1E0C40
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0x1E0C44
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0x1E0C48
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0x1E0C4C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0x1E0C50
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0x1E0C54
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0x1E0C58
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0x1E0C5C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0x1E0C60
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0x1E0C64
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0x1E0C68
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0x1E0C6C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0x1E0C70
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E0C74
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E0C78
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E0C7C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0C80
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0C84
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0C88
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0x1E0C8C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0x1E0C90
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0x1E0C94
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0x1E0C98
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E0C9C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0CA0
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0CA4
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0CA8
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E0CAC
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0x1E0CB0
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0x1E0CB4
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0x1E0CB8
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0x1E0CBC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0x1E0CC0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0x1E0CC4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0x1E0CC8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0x1E0CCC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0x1E0CD0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0x1E0CD4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0x1E0CD8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0x1E0CDC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0x1E0CE0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0x1E0CE4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0x1E0CE8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0x1E0CEC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0x1E0CF0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0x1E0CF4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0x1E0CF8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0x1E0CFC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0x1E0D00
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0x1E0D04
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0D08
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E0D0C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0D10
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0D14
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0D18
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E0D1C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0x1E0D20
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0x1E0D24
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0x1E0D28
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0x1E0D2C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E0D30
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E0D34
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E0D38
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E0D3C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E0D40
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0x1E0D44
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0x1E0D48
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0x1E0D4C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0x1E0D50
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SB_REPEAT 0x1E0D54
+
+#define mmMME3_CTRL_SHADOW_3_DESC_RATE_LIMITER 0x1E0D58
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E0D5C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E0D60
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E0D64
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0x1E0D68
+
+#define mmMME3_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0x1E0D6C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_S 0x1E0D70
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0x1E0D74
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0x1E0D78
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0x1E0D7C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0x1E0D80
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0x1E0D84
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0x1E0D88
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0x1E0D8C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0x1E0D90
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0x1E0D94
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0x1E0D98
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0x1E0D9C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0x1E0DA0
+
+#define mmMME3_CTRL_SHADOW_3_DESC_DUMMY 0x1E0DA4
+
+#endif /* ASIC_REG_MME3_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
new file mode 100644
index 000000000000..61465b599850
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MMU_UP_REGS_H_
+#define ASIC_REG_MMU_UP_REGS_H_
+
+/*
+ *****************************************
+ * MMU_UP (Prototype: MMU)
+ *****************************************
+ */
+
+#define mmMMU_UP_MMU_ENABLE 0xC1100C
+
+#define mmMMU_UP_FORCE_ORDERING 0xC11010
+
+#define mmMMU_UP_FEATURE_ENABLE 0xC11014
+
+#define mmMMU_UP_VA_ORDERING_MASK_31_7 0xC11018
+
+#define mmMMU_UP_VA_ORDERING_MASK_49_32 0xC1101C
+
+#define mmMMU_UP_LOG2_DDR_SIZE 0xC11020
+
+#define mmMMU_UP_SCRAMBLER 0xC11024
+
+#define mmMMU_UP_MEM_INIT_BUSY 0xC11028
+
+#define mmMMU_UP_SPI_MASK 0xC1102C
+
+#define mmMMU_UP_SPI_CAUSE 0xC11030
+
+#define mmMMU_UP_PAGE_ERROR_CAPTURE 0xC11034
+
+#define mmMMU_UP_PAGE_ERROR_CAPTURE_VA 0xC11038
+
+#define mmMMU_UP_ACCESS_ERROR_CAPTURE 0xC1103C
+
+#define mmMMU_UP_ACCESS_ERROR_CAPTURE_VA 0xC11040
+
+#define mmMMU_UP_SPI_INTERRUPT_CLR 0xC11044
+
+#define mmMMU_UP_SPI_INTERRUPT_MASK 0xC11048
+
+#define mmMMU_UP_DBG_MEM_WRAP_RM 0xC1104C
+
+#define mmMMU_UP_SPI_CAUSE_CLR 0xC11050
+
+#define mmMMU_UP_SLICE_CREDIT 0xC11054
+
+#define mmMMU_UP_PIPE_CREDIT 0xC11058
+
+#define mmMMU_UP_RAZWI_WRITE_VLD 0xC1105C
+
+#define mmMMU_UP_RAZWI_WRITE_ID 0xC11060
+
+#define mmMMU_UP_RAZWI_READ_VLD 0xC11064
+
+#define mmMMU_UP_RAZWI_READ_ID 0xC11068
+
+#define mmMMU_UP_MMU_BYPASS 0xC1106C
+
+#endif /* ASIC_REG_MMU_UP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
new file mode 100644
index 000000000000..2efa2a54deb4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_0_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_0_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_0_PERM_SEL 0x386108
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_0 0x386114
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_1 0x386118
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_2 0x38611C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_3 0x386120
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_4 0x386124
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_5 0x386128
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_6 0x38612C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_7 0x386130
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_8 0x386134
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_9 0x386138
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_10 0x38613C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_11 0x386140
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_12 0x386144
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_13 0x386148
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_14 0x38614C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_15 0x386150
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_16 0x386154
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_17 0x386158
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_18 0x38615C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_19 0x386160
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_20 0x386164
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_21 0x386168
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_22 0x38616C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_23 0x386170
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_24 0x386174
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_25 0x386178
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_26 0x38617C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_27 0x386180
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_0 0x386184
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_1 0x386188
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_2 0x38618C
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_3 0x386190
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_4 0x386194
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_5 0x386198
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_6 0x38619C
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_7 0x3861A0
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_8 0x3861A4
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_9 0x3861A8
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_10 0x3861AC
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_11 0x3861B0
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_12 0x3861B4
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_13 0x3861B8
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_14 0x3861BC
+
+#define mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN 0x38626C
+
+#define mmNIF_RTR_CTRL_0_RL_HBM_EN 0x386274
+
+#define mmNIF_RTR_CTRL_0_RL_HBM_SAT 0x386278
+
+#define mmNIF_RTR_CTRL_0_RL_HBM_RST 0x38627C
+
+#define mmNIF_RTR_CTRL_0_RL_HBM_TIMEOUT 0x386280
+
+#define mmNIF_RTR_CTRL_0_SCRAM_HBM_EN 0x386284
+
+#define mmNIF_RTR_CTRL_0_RL_PCI_EN 0x386288
+
+#define mmNIF_RTR_CTRL_0_RL_PCI_SAT 0x38628C
+
+#define mmNIF_RTR_CTRL_0_RL_PCI_RST 0x386290
+
+#define mmNIF_RTR_CTRL_0_RL_PCI_TIMEOUT 0x386294
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_EN 0x38629C
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_SAT 0x3862A0
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_RST 0x3862A4
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_TIMEOUT 0x3862AC
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_RED 0x3862B4
+
+#define mmNIF_RTR_CTRL_0_E2E_HBM_EN 0x3862EC
+
+#define mmNIF_RTR_CTRL_0_E2E_PCI_EN 0x3862F0
+
+#define mmNIF_RTR_CTRL_0_E2E_HBM_WR_SIZE 0x3862F4
+
+#define mmNIF_RTR_CTRL_0_E2E_PCI_WR_SIZE 0x3862F8
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET_EN 0x386404
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET 0x386408
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_WRAP 0x38640C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_CNT 0x386410
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET_EN 0x386414
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET 0x386418
+
+#define mmNIF_RTR_CTRL_0_E2E_HBM_RD_SIZE 0x38641C
+
+#define mmNIF_RTR_CTRL_0_E2E_PCI_RD_SIZE 0x386420
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET_EN 0x386424
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET 0x386428
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_WRAP 0x38642C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_CNT 0x386430
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET_EN 0x386434
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET 0x386438
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_SEL_0 0x386450
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_SEL_1 0x386454
+
+#define mmNIF_RTR_CTRL_0_NON_LIN_EN 0x386480
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_0 0x386500
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_1 0x386504
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_2 0x386508
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_3 0x38650C
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_4 0x386510
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_0 0x386514
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_1 0x386520
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_2 0x386524
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_3 0x386528
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_4 0x38652C
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_5 0x386530
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_6 0x386534
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_7 0x386538
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_8 0x38653C
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_9 0x386540
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_0 0x386550
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_1 0x386554
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_2 0x386558
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_3 0x38655C
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_4 0x386560
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_5 0x386564
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_6 0x386568
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_7 0x38656C
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_8 0x386570
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_9 0x386574
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_10 0x386578
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_11 0x38657C
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_12 0x386580
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_13 0x386584
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_14 0x386588
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_15 0x38658C
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_16 0x386590
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_17 0x386594
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_18 0x386598
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0 0x3865E4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_1 0x3865E8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_2 0x3865EC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_3 0x3865F0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_4 0x3865F4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_5 0x3865F8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_6 0x3865FC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_7 0x386600
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_8 0x386604
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_9 0x386608
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_10 0x38660C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_11 0x386610
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_12 0x386614
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_13 0x386618
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_14 0x38661C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_15 0x386620
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0 0x386624
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_1 0x386628
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_2 0x38662C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_3 0x386630
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_4 0x386634
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_5 0x386638
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_6 0x38663C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_7 0x386640
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_8 0x386644
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_9 0x386648
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_10 0x38664C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_11 0x386650
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_12 0x386654
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_13 0x386658
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_14 0x38665C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_15 0x386660
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0 0x386664
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_1 0x386668
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_2 0x38666C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_3 0x386670
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_4 0x386674
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_5 0x386678
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_6 0x38667C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_7 0x386680
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_8 0x386684
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_9 0x386688
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_10 0x38668C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_11 0x386690
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_12 0x386694
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_13 0x386698
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_14 0x38669C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_15 0x3866A0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0 0x3866A4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_1 0x3866A8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_2 0x3866AC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_3 0x3866B0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_4 0x3866B4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_5 0x3866B8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_6 0x3866BC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_7 0x3866C0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_8 0x3866C4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_9 0x3866C8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_10 0x3866CC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_11 0x3866D0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_12 0x3866D4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_13 0x3866D8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_14 0x3866DC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_15 0x3866E0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_0 0x3866E4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_1 0x3866E8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_2 0x3866EC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_3 0x3866F0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_4 0x3866F4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_5 0x3866F8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_6 0x3866FC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_7 0x386700
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_8 0x386704
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_9 0x386708
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_10 0x38670C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_11 0x386710
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_12 0x386714
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_13 0x386718
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_14 0x38671C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_15 0x386720
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_0 0x386724
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_1 0x386728
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_2 0x38672C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_3 0x386730
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_4 0x386734
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_5 0x386738
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_6 0x38673C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_7 0x386740
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_8 0x386744
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_9 0x386748
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_10 0x38674C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_11 0x386750
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_12 0x386754
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_13 0x386758
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_14 0x38675C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_15 0x386760
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_0 0x386764
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_1 0x386768
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_2 0x38676C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_3 0x386770
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_4 0x386774
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_5 0x386778
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_6 0x38677C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_7 0x386780
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_8 0x386784
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_9 0x386788
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_10 0x38678C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_11 0x386790
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_12 0x386794
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_13 0x386798
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_14 0x38679C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_15 0x3867A0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_0 0x3867A4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_1 0x3867A8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_2 0x3867AC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_3 0x3867B0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_4 0x3867B4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_5 0x3867B8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_6 0x3867BC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_7 0x3867C0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_8 0x3867C4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_9 0x3867C8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_10 0x3867CC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_11 0x3867D0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_12 0x3867D4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_13 0x3867D8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_14 0x3867DC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_15 0x3867E0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0 0x386824
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_1 0x386828
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_2 0x38682C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_3 0x386830
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_4 0x386834
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_5 0x386838
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_6 0x38683C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_7 0x386840
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_8 0x386844
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_9 0x386848
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_10 0x38684C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_11 0x386850
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_12 0x386854
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_13 0x386858
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_14 0x38685C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_15 0x386860
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0 0x386864
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_1 0x386868
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_2 0x38686C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_3 0x386870
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_4 0x386874
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_5 0x386878
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_6 0x38687C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_7 0x386880
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_8 0x386884
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_9 0x386888
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_10 0x38688C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_11 0x386890
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_12 0x386894
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_13 0x386898
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_14 0x38689C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_15 0x3868A0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0 0x3868A4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_1 0x3868A8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_2 0x3868AC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_3 0x3868B0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_4 0x3868B4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_5 0x3868B8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_6 0x3868BC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_7 0x3868C0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_8 0x3868C4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_9 0x3868C8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_10 0x3868CC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_11 0x3868D0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_12 0x3868D4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_13 0x3868D8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_14 0x3868DC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_15 0x3868E0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0 0x3868E4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_1 0x3868E8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_2 0x3868EC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_3 0x3868F0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_4 0x3868F4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_5 0x3868F8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_6 0x3868FC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_7 0x386900
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_8 0x386904
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_9 0x386908
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_10 0x38690C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_11 0x386910
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_12 0x386914
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_13 0x386918
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_14 0x38691C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_15 0x386920
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_0 0x386924
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_1 0x386928
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_2 0x38692C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_3 0x386930
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_4 0x386934
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_5 0x386938
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_6 0x38693C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_7 0x386940
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_8 0x386944
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_9 0x386948
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_10 0x38694C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_11 0x386950
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_12 0x386954
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_13 0x386958
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_14 0x38695C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_15 0x386960
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_0 0x386964
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_1 0x386968
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_2 0x38696C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_3 0x386970
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_4 0x386974
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_5 0x386978
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_6 0x38697C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_7 0x386980
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_8 0x386984
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_9 0x386988
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_10 0x38698C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_11 0x386990
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_12 0x386994
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_13 0x386998
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_14 0x38699C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_15 0x3869A0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_0 0x3869A4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_1 0x3869A8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_2 0x3869AC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_3 0x3869B0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_4 0x3869B4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_5 0x3869B8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_6 0x3869BC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_7 0x3869C0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_8 0x3869C4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_9 0x3869C8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_10 0x3869CC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_11 0x3869D0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_12 0x3869D4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_13 0x3869D8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_14 0x3869DC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_15 0x3869E0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_0 0x3869E4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_1 0x3869E8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_2 0x3869EC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_3 0x3869F0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_4 0x3869F4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_5 0x3869F8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_6 0x3869FC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_7 0x386A00
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_8 0x386A04
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_9 0x386A08
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_10 0x386A0C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_11 0x386A10
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_12 0x386A14
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_13 0x386A18
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_14 0x386A1C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_15 0x386A20
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AW 0x386A64
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AR 0x386A68
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_HIT_AW 0x386A6C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_HIT_AR 0x386A70
+
+#define mmNIF_RTR_CTRL_0_RGL_CFG 0x386B64
+
+#define mmNIF_RTR_CTRL_0_RGL_SHIFT 0x386B68
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_0 0x386B6C
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_1 0x386B70
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_2 0x386B74
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_3 0x386B78
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_4 0x386B7C
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_5 0x386B80
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_6 0x386B84
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_7 0x386B88
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_0 0x386BAC
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_1 0x386BB0
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_2 0x386BB4
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_3 0x386BB8
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_4 0x386BBC
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_5 0x386BC0
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_6 0x386BC4
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_7 0x386BC8
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_0 0x386BEC
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_1 0x386BF0
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_2 0x386BF4
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_3 0x386BF8
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_4 0x386BFC
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_5 0x386C00
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_6 0x386C04
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_7 0x386C08
+
+#define mmNIF_RTR_CTRL_0_RGL_WDT 0x386C2C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_WRAP 0x386C30
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_WRAP 0x386C34
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_WRAP 0x386C38
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_WRAP 0x386C3C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_WRAP 0x386C40
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_WRAP 0x386C44
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_WRAP 0x386C48
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_WRAP 0x386C4C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_CNT 0x386C50
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_CNT 0x386C54
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_CNT 0x386C58
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_CNT 0x386C5C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_CNT 0x386C60
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_CNT 0x386C64
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_CNT 0x386C68
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_CNT 0x386C6C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_WRAP 0x386C70
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_WRAP 0x386C74
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_WRAP 0x386C78
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_WRAP 0x386C7C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_WRAP 0x386C80
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_WRAP 0x386C84
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_WRAP 0x386C88
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_WRAP 0x386C8C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_CNT 0x386C90
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_CNT 0x386C94
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_CNT 0x386C98
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_CNT 0x386C9C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_CNT 0x386CA0
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_CNT 0x386CA4
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_CNT 0x386CA8
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_CNT 0x386CAC
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_0 0x386CB0
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_1 0x386CB4
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_2 0x386CB8
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_3 0x386CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
new file mode 100644
index 000000000000..a6047d4e2560
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_1_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_1_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_1_PERM_SEL 0x396108
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_0 0x396114
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_1 0x396118
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_2 0x39611C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_3 0x396120
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_4 0x396124
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_5 0x396128
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_6 0x39612C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_7 0x396130
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_8 0x396134
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_9 0x396138
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_10 0x39613C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_11 0x396140
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_12 0x396144
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_13 0x396148
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_14 0x39614C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_15 0x396150
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_16 0x396154
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_17 0x396158
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_18 0x39615C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_19 0x396160
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_20 0x396164
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_21 0x396168
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_22 0x39616C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_23 0x396170
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_24 0x396174
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_25 0x396178
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_26 0x39617C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_27 0x396180
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_0 0x396184
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_1 0x396188
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_2 0x39618C
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_3 0x396190
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_4 0x396194
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_5 0x396198
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_6 0x39619C
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_7 0x3961A0
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_8 0x3961A4
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_9 0x3961A8
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_10 0x3961AC
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_11 0x3961B0
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_12 0x3961B4
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_13 0x3961B8
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_14 0x3961BC
+
+#define mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN 0x39626C
+
+#define mmNIF_RTR_CTRL_1_RL_HBM_EN 0x396274
+
+#define mmNIF_RTR_CTRL_1_RL_HBM_SAT 0x396278
+
+#define mmNIF_RTR_CTRL_1_RL_HBM_RST 0x39627C
+
+#define mmNIF_RTR_CTRL_1_RL_HBM_TIMEOUT 0x396280
+
+#define mmNIF_RTR_CTRL_1_SCRAM_HBM_EN 0x396284
+
+#define mmNIF_RTR_CTRL_1_RL_PCI_EN 0x396288
+
+#define mmNIF_RTR_CTRL_1_RL_PCI_SAT 0x39628C
+
+#define mmNIF_RTR_CTRL_1_RL_PCI_RST 0x396290
+
+#define mmNIF_RTR_CTRL_1_RL_PCI_TIMEOUT 0x396294
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_EN 0x39629C
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_SAT 0x3962A0
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_RST 0x3962A4
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_TIMEOUT 0x3962AC
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_RED 0x3962B4
+
+#define mmNIF_RTR_CTRL_1_E2E_HBM_EN 0x3962EC
+
+#define mmNIF_RTR_CTRL_1_E2E_PCI_EN 0x3962F0
+
+#define mmNIF_RTR_CTRL_1_E2E_HBM_WR_SIZE 0x3962F4
+
+#define mmNIF_RTR_CTRL_1_E2E_PCI_WR_SIZE 0x3962F8
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET_EN 0x396404
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET 0x396408
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_WRAP 0x39640C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_CNT 0x396410
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET_EN 0x396414
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET 0x396418
+
+#define mmNIF_RTR_CTRL_1_E2E_HBM_RD_SIZE 0x39641C
+
+#define mmNIF_RTR_CTRL_1_E2E_PCI_RD_SIZE 0x396420
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET_EN 0x396424
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET 0x396428
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_WRAP 0x39642C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_CNT 0x396430
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET_EN 0x396434
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET 0x396438
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_SEL_0 0x396450
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_SEL_1 0x396454
+
+#define mmNIF_RTR_CTRL_1_NON_LIN_EN 0x396480
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_0 0x396500
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_1 0x396504
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_2 0x396508
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_3 0x39650C
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_4 0x396510
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_0 0x396514
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_1 0x396520
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_2 0x396524
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_3 0x396528
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_4 0x39652C
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_5 0x396530
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_6 0x396534
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_7 0x396538
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_8 0x39653C
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_9 0x396540
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_0 0x396550
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_1 0x396554
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_2 0x396558
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_3 0x39655C
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_4 0x396560
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_5 0x396564
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_6 0x396568
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_7 0x39656C
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_8 0x396570
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_9 0x396574
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_10 0x396578
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_11 0x39657C
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_12 0x396580
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_13 0x396584
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_14 0x396588
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_15 0x39658C
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_16 0x396590
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_17 0x396594
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_18 0x396598
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0 0x3965E4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_1 0x3965E8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_2 0x3965EC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_3 0x3965F0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_4 0x3965F4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_5 0x3965F8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_6 0x3965FC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_7 0x396600
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_8 0x396604
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_9 0x396608
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_10 0x39660C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_11 0x396610
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_12 0x396614
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_13 0x396618
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_14 0x39661C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_15 0x396620
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0 0x396624
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_1 0x396628
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_2 0x39662C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_3 0x396630
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_4 0x396634
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_5 0x396638
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_6 0x39663C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_7 0x396640
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_8 0x396644
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_9 0x396648
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_10 0x39664C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_11 0x396650
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_12 0x396654
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_13 0x396658
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_14 0x39665C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_15 0x396660
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0 0x396664
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_1 0x396668
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_2 0x39666C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_3 0x396670
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_4 0x396674
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_5 0x396678
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_6 0x39667C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_7 0x396680
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_8 0x396684
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_9 0x396688
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_10 0x39668C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_11 0x396690
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_12 0x396694
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_13 0x396698
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_14 0x39669C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_15 0x3966A0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0 0x3966A4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_1 0x3966A8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_2 0x3966AC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_3 0x3966B0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_4 0x3966B4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_5 0x3966B8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_6 0x3966BC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_7 0x3966C0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_8 0x3966C4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_9 0x3966C8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_10 0x3966CC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_11 0x3966D0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_12 0x3966D4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_13 0x3966D8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_14 0x3966DC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_15 0x3966E0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_0 0x3966E4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_1 0x3966E8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_2 0x3966EC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_3 0x3966F0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_4 0x3966F4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_5 0x3966F8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_6 0x3966FC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_7 0x396700
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_8 0x396704
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_9 0x396708
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_10 0x39670C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_11 0x396710
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_12 0x396714
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_13 0x396718
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_14 0x39671C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_15 0x396720
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_0 0x396724
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_1 0x396728
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_2 0x39672C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_3 0x396730
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_4 0x396734
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_5 0x396738
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_6 0x39673C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_7 0x396740
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_8 0x396744
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_9 0x396748
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_10 0x39674C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_11 0x396750
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_12 0x396754
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_13 0x396758
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_14 0x39675C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_15 0x396760
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_0 0x396764
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_1 0x396768
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_2 0x39676C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_3 0x396770
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_4 0x396774
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_5 0x396778
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_6 0x39677C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_7 0x396780
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_8 0x396784
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_9 0x396788
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_10 0x39678C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_11 0x396790
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_12 0x396794
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_13 0x396798
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_14 0x39679C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_15 0x3967A0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_0 0x3967A4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_1 0x3967A8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_2 0x3967AC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_3 0x3967B0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_4 0x3967B4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_5 0x3967B8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_6 0x3967BC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_7 0x3967C0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_8 0x3967C4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_9 0x3967C8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_10 0x3967CC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_11 0x3967D0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_12 0x3967D4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_13 0x3967D8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_14 0x3967DC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_15 0x3967E0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0 0x396824
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_1 0x396828
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_2 0x39682C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_3 0x396830
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_4 0x396834
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_5 0x396838
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_6 0x39683C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_7 0x396840
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_8 0x396844
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_9 0x396848
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_10 0x39684C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_11 0x396850
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_12 0x396854
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_13 0x396858
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_14 0x39685C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_15 0x396860
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0 0x396864
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_1 0x396868
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_2 0x39686C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_3 0x396870
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_4 0x396874
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_5 0x396878
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_6 0x39687C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_7 0x396880
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_8 0x396884
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_9 0x396888
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_10 0x39688C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_11 0x396890
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_12 0x396894
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_13 0x396898
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_14 0x39689C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_15 0x3968A0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0 0x3968A4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_1 0x3968A8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_2 0x3968AC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_3 0x3968B0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_4 0x3968B4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_5 0x3968B8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_6 0x3968BC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_7 0x3968C0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_8 0x3968C4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_9 0x3968C8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_10 0x3968CC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_11 0x3968D0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_12 0x3968D4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_13 0x3968D8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_14 0x3968DC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_15 0x3968E0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0 0x3968E4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_1 0x3968E8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_2 0x3968EC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_3 0x3968F0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_4 0x3968F4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_5 0x3968F8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_6 0x3968FC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_7 0x396900
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_8 0x396904
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_9 0x396908
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_10 0x39690C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_11 0x396910
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_12 0x396914
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_13 0x396918
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_14 0x39691C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_15 0x396920
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_0 0x396924
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_1 0x396928
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_2 0x39692C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_3 0x396930
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_4 0x396934
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_5 0x396938
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_6 0x39693C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_7 0x396940
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_8 0x396944
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_9 0x396948
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_10 0x39694C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_11 0x396950
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_12 0x396954
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_13 0x396958
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_14 0x39695C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_15 0x396960
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_0 0x396964
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_1 0x396968
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_2 0x39696C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_3 0x396970
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_4 0x396974
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_5 0x396978
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_6 0x39697C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_7 0x396980
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_8 0x396984
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_9 0x396988
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_10 0x39698C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_11 0x396990
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_12 0x396994
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_13 0x396998
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_14 0x39699C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_15 0x3969A0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_0 0x3969A4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_1 0x3969A8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_2 0x3969AC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_3 0x3969B0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_4 0x3969B4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_5 0x3969B8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_6 0x3969BC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_7 0x3969C0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_8 0x3969C4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_9 0x3969C8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_10 0x3969CC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_11 0x3969D0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_12 0x3969D4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_13 0x3969D8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_14 0x3969DC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_15 0x3969E0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_0 0x3969E4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_1 0x3969E8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_2 0x3969EC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_3 0x3969F0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_4 0x3969F4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_5 0x3969F8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_6 0x3969FC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_7 0x396A00
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_8 0x396A04
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_9 0x396A08
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_10 0x396A0C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_11 0x396A10
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_12 0x396A14
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_13 0x396A18
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_14 0x396A1C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_15 0x396A20
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AW 0x396A64
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AR 0x396A68
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_HIT_AW 0x396A6C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_HIT_AR 0x396A70
+
+#define mmNIF_RTR_CTRL_1_RGL_CFG 0x396B64
+
+#define mmNIF_RTR_CTRL_1_RGL_SHIFT 0x396B68
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_0 0x396B6C
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_1 0x396B70
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_2 0x396B74
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_3 0x396B78
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_4 0x396B7C
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_5 0x396B80
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_6 0x396B84
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_7 0x396B88
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_0 0x396BAC
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_1 0x396BB0
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_2 0x396BB4
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_3 0x396BB8
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_4 0x396BBC
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_5 0x396BC0
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_6 0x396BC4
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_7 0x396BC8
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_0 0x396BEC
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_1 0x396BF0
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_2 0x396BF4
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_3 0x396BF8
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_4 0x396BFC
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_5 0x396C00
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_6 0x396C04
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_7 0x396C08
+
+#define mmNIF_RTR_CTRL_1_RGL_WDT 0x396C2C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_WRAP 0x396C30
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_WRAP 0x396C34
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_WRAP 0x396C38
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_WRAP 0x396C3C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_WRAP 0x396C40
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_WRAP 0x396C44
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_WRAP 0x396C48
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_WRAP 0x396C4C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_CNT 0x396C50
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_CNT 0x396C54
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_CNT 0x396C58
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_CNT 0x396C5C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_CNT 0x396C60
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_CNT 0x396C64
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_CNT 0x396C68
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_CNT 0x396C6C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_WRAP 0x396C70
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_WRAP 0x396C74
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_WRAP 0x396C78
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_WRAP 0x396C7C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_WRAP 0x396C80
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_WRAP 0x396C84
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_WRAP 0x396C88
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_WRAP 0x396C8C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_CNT 0x396C90
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_CNT 0x396C94
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_CNT 0x396C98
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_CNT 0x396C9C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_CNT 0x396CA0
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_CNT 0x396CA4
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_CNT 0x396CA8
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_CNT 0x396CAC
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_0 0x396CB0
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_1 0x396CB4
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_2 0x396CB8
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_3 0x396CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
new file mode 100644
index 000000000000..9de8442f9bc2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_2_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_2_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_2 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_2_PERM_SEL 0x3A6108
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_0 0x3A6114
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_1 0x3A6118
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_2 0x3A611C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_3 0x3A6120
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_4 0x3A6124
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_5 0x3A6128
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_6 0x3A612C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_7 0x3A6130
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_8 0x3A6134
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_9 0x3A6138
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_10 0x3A613C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_11 0x3A6140
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_12 0x3A6144
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_13 0x3A6148
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_14 0x3A614C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_15 0x3A6150
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_16 0x3A6154
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_17 0x3A6158
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_18 0x3A615C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_19 0x3A6160
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_20 0x3A6164
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_21 0x3A6168
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_22 0x3A616C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_23 0x3A6170
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_24 0x3A6174
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_25 0x3A6178
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_26 0x3A617C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_27 0x3A6180
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_0 0x3A6184
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_1 0x3A6188
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_2 0x3A618C
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_3 0x3A6190
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_4 0x3A6194
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_5 0x3A6198
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_6 0x3A619C
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_7 0x3A61A0
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_8 0x3A61A4
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_9 0x3A61A8
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_10 0x3A61AC
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_11 0x3A61B0
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_12 0x3A61B4
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_13 0x3A61B8
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_14 0x3A61BC
+
+#define mmNIF_RTR_CTRL_2_SCRAM_SRAM_EN 0x3A626C
+
+#define mmNIF_RTR_CTRL_2_RL_HBM_EN 0x3A6274
+
+#define mmNIF_RTR_CTRL_2_RL_HBM_SAT 0x3A6278
+
+#define mmNIF_RTR_CTRL_2_RL_HBM_RST 0x3A627C
+
+#define mmNIF_RTR_CTRL_2_RL_HBM_TIMEOUT 0x3A6280
+
+#define mmNIF_RTR_CTRL_2_SCRAM_HBM_EN 0x3A6284
+
+#define mmNIF_RTR_CTRL_2_RL_PCI_EN 0x3A6288
+
+#define mmNIF_RTR_CTRL_2_RL_PCI_SAT 0x3A628C
+
+#define mmNIF_RTR_CTRL_2_RL_PCI_RST 0x3A6290
+
+#define mmNIF_RTR_CTRL_2_RL_PCI_TIMEOUT 0x3A6294
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_EN 0x3A629C
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_SAT 0x3A62A0
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_RST 0x3A62A4
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_TIMEOUT 0x3A62AC
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_RED 0x3A62B4
+
+#define mmNIF_RTR_CTRL_2_E2E_HBM_EN 0x3A62EC
+
+#define mmNIF_RTR_CTRL_2_E2E_PCI_EN 0x3A62F0
+
+#define mmNIF_RTR_CTRL_2_E2E_HBM_WR_SIZE 0x3A62F4
+
+#define mmNIF_RTR_CTRL_2_E2E_PCI_WR_SIZE 0x3A62F8
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET_EN 0x3A6404
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET 0x3A6408
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_WRAP 0x3A640C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_CNT 0x3A6410
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET_EN 0x3A6414
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET 0x3A6418
+
+#define mmNIF_RTR_CTRL_2_E2E_HBM_RD_SIZE 0x3A641C
+
+#define mmNIF_RTR_CTRL_2_E2E_PCI_RD_SIZE 0x3A6420
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET_EN 0x3A6424
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET 0x3A6428
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_WRAP 0x3A642C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_CNT 0x3A6430
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET_EN 0x3A6434
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET 0x3A6438
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_SEL_0 0x3A6450
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_SEL_1 0x3A6454
+
+#define mmNIF_RTR_CTRL_2_NON_LIN_EN 0x3A6480
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_0 0x3A6500
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_1 0x3A6504
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_2 0x3A6508
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_3 0x3A650C
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_4 0x3A6510
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_0 0x3A6514
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_1 0x3A6520
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_2 0x3A6524
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_3 0x3A6528
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_4 0x3A652C
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_5 0x3A6530
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_6 0x3A6534
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_7 0x3A6538
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_8 0x3A653C
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_9 0x3A6540
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_0 0x3A6550
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_1 0x3A6554
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_2 0x3A6558
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_3 0x3A655C
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_4 0x3A6560
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_5 0x3A6564
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_6 0x3A6568
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_7 0x3A656C
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_8 0x3A6570
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_9 0x3A6574
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_10 0x3A6578
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_11 0x3A657C
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_12 0x3A6580
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_13 0x3A6584
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_14 0x3A6588
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_15 0x3A658C
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_16 0x3A6590
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_17 0x3A6594
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_18 0x3A6598
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0 0x3A65E4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_1 0x3A65E8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_2 0x3A65EC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_3 0x3A65F0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_4 0x3A65F4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_5 0x3A65F8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_6 0x3A65FC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_7 0x3A6600
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_8 0x3A6604
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_9 0x3A6608
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_10 0x3A660C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_11 0x3A6610
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_12 0x3A6614
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_13 0x3A6618
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_14 0x3A661C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_15 0x3A6620
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0 0x3A6624
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_1 0x3A6628
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_2 0x3A662C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_3 0x3A6630
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_4 0x3A6634
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_5 0x3A6638
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_6 0x3A663C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_7 0x3A6640
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_8 0x3A6644
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_9 0x3A6648
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_10 0x3A664C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_11 0x3A6650
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_12 0x3A6654
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_13 0x3A6658
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_14 0x3A665C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_15 0x3A6660
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0 0x3A6664
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_1 0x3A6668
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_2 0x3A666C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_3 0x3A6670
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_4 0x3A6674
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_5 0x3A6678
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_6 0x3A667C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_7 0x3A6680
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_8 0x3A6684
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_9 0x3A6688
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_10 0x3A668C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_11 0x3A6690
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_12 0x3A6694
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_13 0x3A6698
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_14 0x3A669C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_15 0x3A66A0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0 0x3A66A4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_1 0x3A66A8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_2 0x3A66AC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_3 0x3A66B0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_4 0x3A66B4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_5 0x3A66B8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_6 0x3A66BC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_7 0x3A66C0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_8 0x3A66C4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_9 0x3A66C8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_10 0x3A66CC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_11 0x3A66D0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_12 0x3A66D4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_13 0x3A66D8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_14 0x3A66DC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_15 0x3A66E0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_0 0x3A66E4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_1 0x3A66E8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_2 0x3A66EC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_3 0x3A66F0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_4 0x3A66F4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_5 0x3A66F8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_6 0x3A66FC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_7 0x3A6700
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_8 0x3A6704
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_9 0x3A6708
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_10 0x3A670C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_11 0x3A6710
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_12 0x3A6714
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_13 0x3A6718
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_14 0x3A671C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_15 0x3A6720
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_0 0x3A6724
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_1 0x3A6728
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_2 0x3A672C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_3 0x3A6730
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_4 0x3A6734
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_5 0x3A6738
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_6 0x3A673C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_7 0x3A6740
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_8 0x3A6744
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_9 0x3A6748
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_10 0x3A674C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_11 0x3A6750
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_12 0x3A6754
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_13 0x3A6758
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_14 0x3A675C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_15 0x3A6760
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_0 0x3A6764
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_1 0x3A6768
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_2 0x3A676C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_3 0x3A6770
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_4 0x3A6774
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_5 0x3A6778
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_6 0x3A677C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_7 0x3A6780
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_8 0x3A6784
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_9 0x3A6788
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_10 0x3A678C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_11 0x3A6790
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_12 0x3A6794
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_13 0x3A6798
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_14 0x3A679C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_15 0x3A67A0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_0 0x3A67A4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_1 0x3A67A8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_2 0x3A67AC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_3 0x3A67B0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_4 0x3A67B4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_5 0x3A67B8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_6 0x3A67BC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_7 0x3A67C0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_8 0x3A67C4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_9 0x3A67C8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_10 0x3A67CC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_11 0x3A67D0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_12 0x3A67D4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_13 0x3A67D8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_14 0x3A67DC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_15 0x3A67E0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0 0x3A6824
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_1 0x3A6828
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_2 0x3A682C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_3 0x3A6830
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_4 0x3A6834
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_5 0x3A6838
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_6 0x3A683C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_7 0x3A6840
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_8 0x3A6844
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_9 0x3A6848
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_10 0x3A684C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_11 0x3A6850
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_12 0x3A6854
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_13 0x3A6858
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_14 0x3A685C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_15 0x3A6860
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0 0x3A6864
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_1 0x3A6868
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_2 0x3A686C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_3 0x3A6870
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_4 0x3A6874
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_5 0x3A6878
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_6 0x3A687C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_7 0x3A6880
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_8 0x3A6884
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_9 0x3A6888
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_10 0x3A688C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_11 0x3A6890
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_12 0x3A6894
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_13 0x3A6898
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_14 0x3A689C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_15 0x3A68A0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0 0x3A68A4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_1 0x3A68A8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_2 0x3A68AC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_3 0x3A68B0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_4 0x3A68B4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_5 0x3A68B8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_6 0x3A68BC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_7 0x3A68C0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_8 0x3A68C4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_9 0x3A68C8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_10 0x3A68CC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_11 0x3A68D0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_12 0x3A68D4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_13 0x3A68D8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_14 0x3A68DC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_15 0x3A68E0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0 0x3A68E4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_1 0x3A68E8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_2 0x3A68EC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_3 0x3A68F0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_4 0x3A68F4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_5 0x3A68F8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_6 0x3A68FC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_7 0x3A6900
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_8 0x3A6904
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_9 0x3A6908
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_10 0x3A690C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_11 0x3A6910
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_12 0x3A6914
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_13 0x3A6918
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_14 0x3A691C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_15 0x3A6920
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_0 0x3A6924
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_1 0x3A6928
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_2 0x3A692C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_3 0x3A6930
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_4 0x3A6934
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_5 0x3A6938
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_6 0x3A693C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_7 0x3A6940
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_8 0x3A6944
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_9 0x3A6948
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_10 0x3A694C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_11 0x3A6950
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_12 0x3A6954
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_13 0x3A6958
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_14 0x3A695C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_15 0x3A6960
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_0 0x3A6964
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_1 0x3A6968
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_2 0x3A696C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_3 0x3A6970
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_4 0x3A6974
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_5 0x3A6978
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_6 0x3A697C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_7 0x3A6980
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_8 0x3A6984
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_9 0x3A6988
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_10 0x3A698C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_11 0x3A6990
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_12 0x3A6994
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_13 0x3A6998
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_14 0x3A699C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_15 0x3A69A0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_0 0x3A69A4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_1 0x3A69A8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_2 0x3A69AC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_3 0x3A69B0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_4 0x3A69B4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_5 0x3A69B8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_6 0x3A69BC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_7 0x3A69C0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_8 0x3A69C4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_9 0x3A69C8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_10 0x3A69CC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_11 0x3A69D0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_12 0x3A69D4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_13 0x3A69D8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_14 0x3A69DC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_15 0x3A69E0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_0 0x3A69E4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_1 0x3A69E8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_2 0x3A69EC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_3 0x3A69F0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_4 0x3A69F4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_5 0x3A69F8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_6 0x3A69FC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_7 0x3A6A00
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_8 0x3A6A04
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_9 0x3A6A08
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_10 0x3A6A0C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_11 0x3A6A10
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_12 0x3A6A14
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_13 0x3A6A18
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_14 0x3A6A1C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_15 0x3A6A20
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AW 0x3A6A64
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AR 0x3A6A68
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_HIT_AW 0x3A6A6C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_HIT_AR 0x3A6A70
+
+#define mmNIF_RTR_CTRL_2_RGL_CFG 0x3A6B64
+
+#define mmNIF_RTR_CTRL_2_RGL_SHIFT 0x3A6B68
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_0 0x3A6B6C
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_1 0x3A6B70
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_2 0x3A6B74
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_3 0x3A6B78
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_4 0x3A6B7C
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_5 0x3A6B80
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_6 0x3A6B84
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_7 0x3A6B88
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_0 0x3A6BAC
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_1 0x3A6BB0
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_2 0x3A6BB4
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_3 0x3A6BB8
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_4 0x3A6BBC
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_5 0x3A6BC0
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_6 0x3A6BC4
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_7 0x3A6BC8
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_0 0x3A6BEC
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_1 0x3A6BF0
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_2 0x3A6BF4
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_3 0x3A6BF8
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_4 0x3A6BFC
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_5 0x3A6C00
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_6 0x3A6C04
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_7 0x3A6C08
+
+#define mmNIF_RTR_CTRL_2_RGL_WDT 0x3A6C2C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_WRAP 0x3A6C30
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_WRAP 0x3A6C34
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_WRAP 0x3A6C38
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_WRAP 0x3A6C3C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_WRAP 0x3A6C40
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_WRAP 0x3A6C44
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_WRAP 0x3A6C48
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_WRAP 0x3A6C4C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_CNT 0x3A6C50
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_CNT 0x3A6C54
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_CNT 0x3A6C58
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_CNT 0x3A6C5C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_CNT 0x3A6C60
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_CNT 0x3A6C64
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_CNT 0x3A6C68
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_CNT 0x3A6C6C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_WRAP 0x3A6C70
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_WRAP 0x3A6C74
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_WRAP 0x3A6C78
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_WRAP 0x3A6C7C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_WRAP 0x3A6C80
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_WRAP 0x3A6C84
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_WRAP 0x3A6C88
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_WRAP 0x3A6C8C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_CNT 0x3A6C90
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_CNT 0x3A6C94
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_CNT 0x3A6C98
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_CNT 0x3A6C9C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_CNT 0x3A6CA0
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_CNT 0x3A6CA4
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_CNT 0x3A6CA8
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_CNT 0x3A6CAC
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_0 0x3A6CB0
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_1 0x3A6CB4
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_2 0x3A6CB8
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_3 0x3A6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_2_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
new file mode 100644
index 000000000000..34fd47685edd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_3_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_3_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_3 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_3_PERM_SEL 0x3B6108
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_0 0x3B6114
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_1 0x3B6118
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_2 0x3B611C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_3 0x3B6120
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_4 0x3B6124
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_5 0x3B6128
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_6 0x3B612C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_7 0x3B6130
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_8 0x3B6134
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_9 0x3B6138
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_10 0x3B613C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_11 0x3B6140
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_12 0x3B6144
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_13 0x3B6148
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_14 0x3B614C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_15 0x3B6150
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_16 0x3B6154
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_17 0x3B6158
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_18 0x3B615C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_19 0x3B6160
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_20 0x3B6164
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_21 0x3B6168
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_22 0x3B616C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_23 0x3B6170
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_24 0x3B6174
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_25 0x3B6178
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_26 0x3B617C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_27 0x3B6180
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_0 0x3B6184
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_1 0x3B6188
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_2 0x3B618C
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_3 0x3B6190
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_4 0x3B6194
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_5 0x3B6198
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_6 0x3B619C
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_7 0x3B61A0
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_8 0x3B61A4
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_9 0x3B61A8
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_10 0x3B61AC
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_11 0x3B61B0
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_12 0x3B61B4
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_13 0x3B61B8
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_14 0x3B61BC
+
+#define mmNIF_RTR_CTRL_3_SCRAM_SRAM_EN 0x3B626C
+
+#define mmNIF_RTR_CTRL_3_RL_HBM_EN 0x3B6274
+
+#define mmNIF_RTR_CTRL_3_RL_HBM_SAT 0x3B6278
+
+#define mmNIF_RTR_CTRL_3_RL_HBM_RST 0x3B627C
+
+#define mmNIF_RTR_CTRL_3_RL_HBM_TIMEOUT 0x3B6280
+
+#define mmNIF_RTR_CTRL_3_SCRAM_HBM_EN 0x3B6284
+
+#define mmNIF_RTR_CTRL_3_RL_PCI_EN 0x3B6288
+
+#define mmNIF_RTR_CTRL_3_RL_PCI_SAT 0x3B628C
+
+#define mmNIF_RTR_CTRL_3_RL_PCI_RST 0x3B6290
+
+#define mmNIF_RTR_CTRL_3_RL_PCI_TIMEOUT 0x3B6294
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_EN 0x3B629C
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_SAT 0x3B62A0
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_RST 0x3B62A4
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_TIMEOUT 0x3B62AC
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_RED 0x3B62B4
+
+#define mmNIF_RTR_CTRL_3_E2E_HBM_EN 0x3B62EC
+
+#define mmNIF_RTR_CTRL_3_E2E_PCI_EN 0x3B62F0
+
+#define mmNIF_RTR_CTRL_3_E2E_HBM_WR_SIZE 0x3B62F4
+
+#define mmNIF_RTR_CTRL_3_E2E_PCI_WR_SIZE 0x3B62F8
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET_EN 0x3B6404
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET 0x3B6408
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_WRAP 0x3B640C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_CNT 0x3B6410
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET_EN 0x3B6414
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET 0x3B6418
+
+#define mmNIF_RTR_CTRL_3_E2E_HBM_RD_SIZE 0x3B641C
+
+#define mmNIF_RTR_CTRL_3_E2E_PCI_RD_SIZE 0x3B6420
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET_EN 0x3B6424
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET 0x3B6428
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_WRAP 0x3B642C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_CNT 0x3B6430
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET_EN 0x3B6434
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET 0x3B6438
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_SEL_0 0x3B6450
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_SEL_1 0x3B6454
+
+#define mmNIF_RTR_CTRL_3_NON_LIN_EN 0x3B6480
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_0 0x3B6500
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_1 0x3B6504
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_2 0x3B6508
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_3 0x3B650C
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_4 0x3B6510
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_0 0x3B6514
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_1 0x3B6520
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_2 0x3B6524
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_3 0x3B6528
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_4 0x3B652C
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_5 0x3B6530
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_6 0x3B6534
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_7 0x3B6538
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_8 0x3B653C
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_9 0x3B6540
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_0 0x3B6550
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_1 0x3B6554
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_2 0x3B6558
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_3 0x3B655C
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_4 0x3B6560
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_5 0x3B6564
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_6 0x3B6568
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_7 0x3B656C
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_8 0x3B6570
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_9 0x3B6574
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_10 0x3B6578
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_11 0x3B657C
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_12 0x3B6580
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_13 0x3B6584
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_14 0x3B6588
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_15 0x3B658C
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_16 0x3B6590
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_17 0x3B6594
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_18 0x3B6598
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0 0x3B65E4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_1 0x3B65E8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_2 0x3B65EC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_3 0x3B65F0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_4 0x3B65F4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_5 0x3B65F8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_6 0x3B65FC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_7 0x3B6600
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_8 0x3B6604
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_9 0x3B6608
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_10 0x3B660C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_11 0x3B6610
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_12 0x3B6614
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_13 0x3B6618
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_14 0x3B661C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_15 0x3B6620
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0 0x3B6624
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_1 0x3B6628
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_2 0x3B662C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_3 0x3B6630
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_4 0x3B6634
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_5 0x3B6638
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_6 0x3B663C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_7 0x3B6640
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_8 0x3B6644
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_9 0x3B6648
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_10 0x3B664C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_11 0x3B6650
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_12 0x3B6654
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_13 0x3B6658
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_14 0x3B665C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_15 0x3B6660
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0 0x3B6664
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_1 0x3B6668
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_2 0x3B666C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_3 0x3B6670
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_4 0x3B6674
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_5 0x3B6678
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_6 0x3B667C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_7 0x3B6680
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_8 0x3B6684
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_9 0x3B6688
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_10 0x3B668C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_11 0x3B6690
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_12 0x3B6694
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_13 0x3B6698
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_14 0x3B669C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_15 0x3B66A0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0 0x3B66A4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_1 0x3B66A8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_2 0x3B66AC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_3 0x3B66B0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_4 0x3B66B4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_5 0x3B66B8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_6 0x3B66BC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_7 0x3B66C0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_8 0x3B66C4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_9 0x3B66C8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_10 0x3B66CC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_11 0x3B66D0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_12 0x3B66D4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_13 0x3B66D8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_14 0x3B66DC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_15 0x3B66E0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_0 0x3B66E4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_1 0x3B66E8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_2 0x3B66EC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_3 0x3B66F0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_4 0x3B66F4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_5 0x3B66F8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_6 0x3B66FC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_7 0x3B6700
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_8 0x3B6704
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_9 0x3B6708
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_10 0x3B670C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_11 0x3B6710
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_12 0x3B6714
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_13 0x3B6718
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_14 0x3B671C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_15 0x3B6720
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_0 0x3B6724
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_1 0x3B6728
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_2 0x3B672C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_3 0x3B6730
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_4 0x3B6734
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_5 0x3B6738
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_6 0x3B673C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_7 0x3B6740
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_8 0x3B6744
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_9 0x3B6748
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_10 0x3B674C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_11 0x3B6750
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_12 0x3B6754
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_13 0x3B6758
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_14 0x3B675C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_15 0x3B6760
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_0 0x3B6764
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_1 0x3B6768
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_2 0x3B676C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_3 0x3B6770
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_4 0x3B6774
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_5 0x3B6778
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_6 0x3B677C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_7 0x3B6780
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_8 0x3B6784
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_9 0x3B6788
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_10 0x3B678C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_11 0x3B6790
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_12 0x3B6794
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_13 0x3B6798
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_14 0x3B679C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_15 0x3B67A0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_0 0x3B67A4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_1 0x3B67A8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_2 0x3B67AC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_3 0x3B67B0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_4 0x3B67B4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_5 0x3B67B8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_6 0x3B67BC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_7 0x3B67C0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_8 0x3B67C4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_9 0x3B67C8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_10 0x3B67CC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_11 0x3B67D0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_12 0x3B67D4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_13 0x3B67D8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_14 0x3B67DC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_15 0x3B67E0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0 0x3B6824
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_1 0x3B6828
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_2 0x3B682C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_3 0x3B6830
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_4 0x3B6834
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_5 0x3B6838
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_6 0x3B683C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_7 0x3B6840
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_8 0x3B6844
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_9 0x3B6848
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_10 0x3B684C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_11 0x3B6850
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_12 0x3B6854
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_13 0x3B6858
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_14 0x3B685C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_15 0x3B6860
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0 0x3B6864
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_1 0x3B6868
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_2 0x3B686C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_3 0x3B6870
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_4 0x3B6874
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_5 0x3B6878
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_6 0x3B687C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_7 0x3B6880
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_8 0x3B6884
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_9 0x3B6888
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_10 0x3B688C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_11 0x3B6890
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_12 0x3B6894
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_13 0x3B6898
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_14 0x3B689C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_15 0x3B68A0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0 0x3B68A4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_1 0x3B68A8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_2 0x3B68AC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_3 0x3B68B0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_4 0x3B68B4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_5 0x3B68B8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_6 0x3B68BC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_7 0x3B68C0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_8 0x3B68C4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_9 0x3B68C8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_10 0x3B68CC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_11 0x3B68D0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_12 0x3B68D4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_13 0x3B68D8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_14 0x3B68DC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_15 0x3B68E0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0 0x3B68E4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_1 0x3B68E8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_2 0x3B68EC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_3 0x3B68F0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_4 0x3B68F4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_5 0x3B68F8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_6 0x3B68FC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_7 0x3B6900
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_8 0x3B6904
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_9 0x3B6908
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_10 0x3B690C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_11 0x3B6910
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_12 0x3B6914
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_13 0x3B6918
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_14 0x3B691C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_15 0x3B6920
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_0 0x3B6924
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_1 0x3B6928
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_2 0x3B692C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_3 0x3B6930
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_4 0x3B6934
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_5 0x3B6938
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_6 0x3B693C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_7 0x3B6940
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_8 0x3B6944
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_9 0x3B6948
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_10 0x3B694C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_11 0x3B6950
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_12 0x3B6954
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_13 0x3B6958
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_14 0x3B695C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_15 0x3B6960
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_0 0x3B6964
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_1 0x3B6968
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_2 0x3B696C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_3 0x3B6970
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_4 0x3B6974
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_5 0x3B6978
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_6 0x3B697C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_7 0x3B6980
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_8 0x3B6984
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_9 0x3B6988
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_10 0x3B698C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_11 0x3B6990
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_12 0x3B6994
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_13 0x3B6998
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_14 0x3B699C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_15 0x3B69A0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_0 0x3B69A4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_1 0x3B69A8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_2 0x3B69AC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_3 0x3B69B0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_4 0x3B69B4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_5 0x3B69B8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_6 0x3B69BC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_7 0x3B69C0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_8 0x3B69C4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_9 0x3B69C8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_10 0x3B69CC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_11 0x3B69D0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_12 0x3B69D4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_13 0x3B69D8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_14 0x3B69DC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_15 0x3B69E0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_0 0x3B69E4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_1 0x3B69E8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_2 0x3B69EC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_3 0x3B69F0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_4 0x3B69F4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_5 0x3B69F8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_6 0x3B69FC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_7 0x3B6A00
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_8 0x3B6A04
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_9 0x3B6A08
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_10 0x3B6A0C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_11 0x3B6A10
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_12 0x3B6A14
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_13 0x3B6A18
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_14 0x3B6A1C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_15 0x3B6A20
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AW 0x3B6A64
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AR 0x3B6A68
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_HIT_AW 0x3B6A6C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_HIT_AR 0x3B6A70
+
+#define mmNIF_RTR_CTRL_3_RGL_CFG 0x3B6B64
+
+#define mmNIF_RTR_CTRL_3_RGL_SHIFT 0x3B6B68
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_0 0x3B6B6C
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_1 0x3B6B70
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_2 0x3B6B74
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_3 0x3B6B78
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_4 0x3B6B7C
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_5 0x3B6B80
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_6 0x3B6B84
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_7 0x3B6B88
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_0 0x3B6BAC
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_1 0x3B6BB0
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_2 0x3B6BB4
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_3 0x3B6BB8
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_4 0x3B6BBC
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_5 0x3B6BC0
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_6 0x3B6BC4
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_7 0x3B6BC8
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_0 0x3B6BEC
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_1 0x3B6BF0
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_2 0x3B6BF4
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_3 0x3B6BF8
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_4 0x3B6BFC
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_5 0x3B6C00
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_6 0x3B6C04
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_7 0x3B6C08
+
+#define mmNIF_RTR_CTRL_3_RGL_WDT 0x3B6C2C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_WRAP 0x3B6C30
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_WRAP 0x3B6C34
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_WRAP 0x3B6C38
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_WRAP 0x3B6C3C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_WRAP 0x3B6C40
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_WRAP 0x3B6C44
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_WRAP 0x3B6C48
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_WRAP 0x3B6C4C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_CNT 0x3B6C50
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_CNT 0x3B6C54
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_CNT 0x3B6C58
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_CNT 0x3B6C5C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_CNT 0x3B6C60
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_CNT 0x3B6C64
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_CNT 0x3B6C68
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_CNT 0x3B6C6C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_WRAP 0x3B6C70
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_WRAP 0x3B6C74
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_WRAP 0x3B6C78
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_WRAP 0x3B6C7C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_WRAP 0x3B6C80
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_WRAP 0x3B6C84
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_WRAP 0x3B6C88
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_WRAP 0x3B6C8C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_CNT 0x3B6C90
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_CNT 0x3B6C94
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_CNT 0x3B6C98
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_CNT 0x3B6C9C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_CNT 0x3B6CA0
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_CNT 0x3B6CA4
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_CNT 0x3B6CA8
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_CNT 0x3B6CAC
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_0 0x3B6CB0
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_1 0x3B6CB4
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_2 0x3B6CB8
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_3 0x3B6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_3_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
new file mode 100644
index 000000000000..543a98f81767
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_4_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_4_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_4 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_4_PERM_SEL 0x3C6108
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_0 0x3C6114
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_1 0x3C6118
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_2 0x3C611C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_3 0x3C6120
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_4 0x3C6124
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_5 0x3C6128
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_6 0x3C612C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_7 0x3C6130
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_8 0x3C6134
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_9 0x3C6138
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_10 0x3C613C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_11 0x3C6140
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_12 0x3C6144
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_13 0x3C6148
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_14 0x3C614C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_15 0x3C6150
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_16 0x3C6154
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_17 0x3C6158
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_18 0x3C615C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_19 0x3C6160
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_20 0x3C6164
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_21 0x3C6168
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_22 0x3C616C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_23 0x3C6170
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_24 0x3C6174
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_25 0x3C6178
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_26 0x3C617C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_27 0x3C6180
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_0 0x3C6184
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_1 0x3C6188
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_2 0x3C618C
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_3 0x3C6190
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_4 0x3C6194
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_5 0x3C6198
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_6 0x3C619C
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_7 0x3C61A0
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_8 0x3C61A4
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_9 0x3C61A8
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_10 0x3C61AC
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_11 0x3C61B0
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_12 0x3C61B4
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_13 0x3C61B8
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_14 0x3C61BC
+
+#define mmNIF_RTR_CTRL_4_SCRAM_SRAM_EN 0x3C626C
+
+#define mmNIF_RTR_CTRL_4_RL_HBM_EN 0x3C6274
+
+#define mmNIF_RTR_CTRL_4_RL_HBM_SAT 0x3C6278
+
+#define mmNIF_RTR_CTRL_4_RL_HBM_RST 0x3C627C
+
+#define mmNIF_RTR_CTRL_4_RL_HBM_TIMEOUT 0x3C6280
+
+#define mmNIF_RTR_CTRL_4_SCRAM_HBM_EN 0x3C6284
+
+#define mmNIF_RTR_CTRL_4_RL_PCI_EN 0x3C6288
+
+#define mmNIF_RTR_CTRL_4_RL_PCI_SAT 0x3C628C
+
+#define mmNIF_RTR_CTRL_4_RL_PCI_RST 0x3C6290
+
+#define mmNIF_RTR_CTRL_4_RL_PCI_TIMEOUT 0x3C6294
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_EN 0x3C629C
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_SAT 0x3C62A0
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_RST 0x3C62A4
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_TIMEOUT 0x3C62AC
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_RED 0x3C62B4
+
+#define mmNIF_RTR_CTRL_4_E2E_HBM_EN 0x3C62EC
+
+#define mmNIF_RTR_CTRL_4_E2E_PCI_EN 0x3C62F0
+
+#define mmNIF_RTR_CTRL_4_E2E_HBM_WR_SIZE 0x3C62F4
+
+#define mmNIF_RTR_CTRL_4_E2E_PCI_WR_SIZE 0x3C62F8
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET_EN 0x3C6404
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET 0x3C6408
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_WRAP 0x3C640C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_CNT 0x3C6410
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET_EN 0x3C6414
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET 0x3C6418
+
+#define mmNIF_RTR_CTRL_4_E2E_HBM_RD_SIZE 0x3C641C
+
+#define mmNIF_RTR_CTRL_4_E2E_PCI_RD_SIZE 0x3C6420
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET_EN 0x3C6424
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET 0x3C6428
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_WRAP 0x3C642C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_CNT 0x3C6430
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET_EN 0x3C6434
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET 0x3C6438
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_SEL_0 0x3C6450
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_SEL_1 0x3C6454
+
+#define mmNIF_RTR_CTRL_4_NON_LIN_EN 0x3C6480
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_0 0x3C6500
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_1 0x3C6504
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_2 0x3C6508
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_3 0x3C650C
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_4 0x3C6510
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_0 0x3C6514
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_1 0x3C6520
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_2 0x3C6524
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_3 0x3C6528
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_4 0x3C652C
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_5 0x3C6530
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_6 0x3C6534
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_7 0x3C6538
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_8 0x3C653C
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_9 0x3C6540
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_0 0x3C6550
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_1 0x3C6554
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_2 0x3C6558
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_3 0x3C655C
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_4 0x3C6560
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_5 0x3C6564
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_6 0x3C6568
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_7 0x3C656C
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_8 0x3C6570
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_9 0x3C6574
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_10 0x3C6578
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_11 0x3C657C
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_12 0x3C6580
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_13 0x3C6584
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_14 0x3C6588
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_15 0x3C658C
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_16 0x3C6590
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_17 0x3C6594
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_18 0x3C6598
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0 0x3C65E4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_1 0x3C65E8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_2 0x3C65EC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_3 0x3C65F0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_4 0x3C65F4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_5 0x3C65F8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_6 0x3C65FC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_7 0x3C6600
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_8 0x3C6604
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_9 0x3C6608
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_10 0x3C660C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_11 0x3C6610
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_12 0x3C6614
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_13 0x3C6618
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_14 0x3C661C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_15 0x3C6620
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0 0x3C6624
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_1 0x3C6628
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_2 0x3C662C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_3 0x3C6630
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_4 0x3C6634
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_5 0x3C6638
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_6 0x3C663C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_7 0x3C6640
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_8 0x3C6644
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_9 0x3C6648
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_10 0x3C664C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_11 0x3C6650
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_12 0x3C6654
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_13 0x3C6658
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_14 0x3C665C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_15 0x3C6660
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0 0x3C6664
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_1 0x3C6668
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_2 0x3C666C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_3 0x3C6670
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_4 0x3C6674
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_5 0x3C6678
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_6 0x3C667C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_7 0x3C6680
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_8 0x3C6684
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_9 0x3C6688
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_10 0x3C668C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_11 0x3C6690
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_12 0x3C6694
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_13 0x3C6698
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_14 0x3C669C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_15 0x3C66A0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0 0x3C66A4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_1 0x3C66A8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_2 0x3C66AC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_3 0x3C66B0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_4 0x3C66B4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_5 0x3C66B8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_6 0x3C66BC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_7 0x3C66C0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_8 0x3C66C4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_9 0x3C66C8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_10 0x3C66CC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_11 0x3C66D0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_12 0x3C66D4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_13 0x3C66D8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_14 0x3C66DC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_15 0x3C66E0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_0 0x3C66E4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_1 0x3C66E8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_2 0x3C66EC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_3 0x3C66F0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_4 0x3C66F4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_5 0x3C66F8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_6 0x3C66FC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_7 0x3C6700
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_8 0x3C6704
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_9 0x3C6708
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_10 0x3C670C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_11 0x3C6710
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_12 0x3C6714
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_13 0x3C6718
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_14 0x3C671C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_15 0x3C6720
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_0 0x3C6724
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_1 0x3C6728
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_2 0x3C672C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_3 0x3C6730
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_4 0x3C6734
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_5 0x3C6738
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_6 0x3C673C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_7 0x3C6740
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_8 0x3C6744
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_9 0x3C6748
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_10 0x3C674C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_11 0x3C6750
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_12 0x3C6754
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_13 0x3C6758
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_14 0x3C675C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_15 0x3C6760
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_0 0x3C6764
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_1 0x3C6768
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_2 0x3C676C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_3 0x3C6770
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_4 0x3C6774
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_5 0x3C6778
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_6 0x3C677C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_7 0x3C6780
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_8 0x3C6784
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_9 0x3C6788
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_10 0x3C678C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_11 0x3C6790
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_12 0x3C6794
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_13 0x3C6798
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_14 0x3C679C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_15 0x3C67A0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_0 0x3C67A4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_1 0x3C67A8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_2 0x3C67AC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_3 0x3C67B0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_4 0x3C67B4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_5 0x3C67B8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_6 0x3C67BC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_7 0x3C67C0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_8 0x3C67C4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_9 0x3C67C8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_10 0x3C67CC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_11 0x3C67D0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_12 0x3C67D4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_13 0x3C67D8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_14 0x3C67DC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_15 0x3C67E0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0 0x3C6824
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_1 0x3C6828
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_2 0x3C682C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_3 0x3C6830
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_4 0x3C6834
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_5 0x3C6838
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_6 0x3C683C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_7 0x3C6840
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_8 0x3C6844
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_9 0x3C6848
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_10 0x3C684C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_11 0x3C6850
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_12 0x3C6854
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_13 0x3C6858
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_14 0x3C685C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_15 0x3C6860
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0 0x3C6864
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_1 0x3C6868
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_2 0x3C686C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_3 0x3C6870
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_4 0x3C6874
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_5 0x3C6878
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_6 0x3C687C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_7 0x3C6880
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_8 0x3C6884
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_9 0x3C6888
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_10 0x3C688C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_11 0x3C6890
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_12 0x3C6894
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_13 0x3C6898
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_14 0x3C689C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_15 0x3C68A0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0 0x3C68A4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_1 0x3C68A8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_2 0x3C68AC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_3 0x3C68B0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_4 0x3C68B4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_5 0x3C68B8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_6 0x3C68BC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_7 0x3C68C0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_8 0x3C68C4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_9 0x3C68C8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_10 0x3C68CC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_11 0x3C68D0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_12 0x3C68D4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_13 0x3C68D8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_14 0x3C68DC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_15 0x3C68E0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0 0x3C68E4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_1 0x3C68E8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_2 0x3C68EC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_3 0x3C68F0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_4 0x3C68F4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_5 0x3C68F8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_6 0x3C68FC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_7 0x3C6900
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_8 0x3C6904
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_9 0x3C6908
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_10 0x3C690C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_11 0x3C6910
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_12 0x3C6914
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_13 0x3C6918
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_14 0x3C691C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_15 0x3C6920
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_0 0x3C6924
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_1 0x3C6928
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_2 0x3C692C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_3 0x3C6930
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_4 0x3C6934
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_5 0x3C6938
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_6 0x3C693C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_7 0x3C6940
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_8 0x3C6944
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_9 0x3C6948
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_10 0x3C694C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_11 0x3C6950
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_12 0x3C6954
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_13 0x3C6958
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_14 0x3C695C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_15 0x3C6960
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_0 0x3C6964
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_1 0x3C6968
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_2 0x3C696C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_3 0x3C6970
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_4 0x3C6974
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_5 0x3C6978
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_6 0x3C697C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_7 0x3C6980
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_8 0x3C6984
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_9 0x3C6988
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_10 0x3C698C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_11 0x3C6990
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_12 0x3C6994
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_13 0x3C6998
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_14 0x3C699C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_15 0x3C69A0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_0 0x3C69A4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_1 0x3C69A8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_2 0x3C69AC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_3 0x3C69B0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_4 0x3C69B4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_5 0x3C69B8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_6 0x3C69BC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_7 0x3C69C0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_8 0x3C69C4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_9 0x3C69C8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_10 0x3C69CC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_11 0x3C69D0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_12 0x3C69D4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_13 0x3C69D8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_14 0x3C69DC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_15 0x3C69E0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_0 0x3C69E4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_1 0x3C69E8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_2 0x3C69EC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_3 0x3C69F0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_4 0x3C69F4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_5 0x3C69F8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_6 0x3C69FC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_7 0x3C6A00
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_8 0x3C6A04
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_9 0x3C6A08
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_10 0x3C6A0C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_11 0x3C6A10
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_12 0x3C6A14
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_13 0x3C6A18
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_14 0x3C6A1C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_15 0x3C6A20
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AW 0x3C6A64
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AR 0x3C6A68
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_HIT_AW 0x3C6A6C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_HIT_AR 0x3C6A70
+
+#define mmNIF_RTR_CTRL_4_RGL_CFG 0x3C6B64
+
+#define mmNIF_RTR_CTRL_4_RGL_SHIFT 0x3C6B68
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_0 0x3C6B6C
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_1 0x3C6B70
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_2 0x3C6B74
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_3 0x3C6B78
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_4 0x3C6B7C
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_5 0x3C6B80
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_6 0x3C6B84
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_7 0x3C6B88
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_0 0x3C6BAC
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_1 0x3C6BB0
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_2 0x3C6BB4
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_3 0x3C6BB8
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_4 0x3C6BBC
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_5 0x3C6BC0
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_6 0x3C6BC4
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_7 0x3C6BC8
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_0 0x3C6BEC
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_1 0x3C6BF0
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_2 0x3C6BF4
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_3 0x3C6BF8
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_4 0x3C6BFC
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_5 0x3C6C00
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_6 0x3C6C04
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_7 0x3C6C08
+
+#define mmNIF_RTR_CTRL_4_RGL_WDT 0x3C6C2C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_WRAP 0x3C6C30
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_WRAP 0x3C6C34
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_WRAP 0x3C6C38
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_WRAP 0x3C6C3C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_WRAP 0x3C6C40
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_WRAP 0x3C6C44
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_WRAP 0x3C6C48
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_WRAP 0x3C6C4C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_CNT 0x3C6C50
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_CNT 0x3C6C54
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_CNT 0x3C6C58
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_CNT 0x3C6C5C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_CNT 0x3C6C60
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_CNT 0x3C6C64
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_CNT 0x3C6C68
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_CNT 0x3C6C6C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_WRAP 0x3C6C70
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_WRAP 0x3C6C74
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_WRAP 0x3C6C78
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_WRAP 0x3C6C7C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_WRAP 0x3C6C80
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_WRAP 0x3C6C84
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_WRAP 0x3C6C88
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_WRAP 0x3C6C8C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_CNT 0x3C6C90
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_CNT 0x3C6C94
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_CNT 0x3C6C98
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_CNT 0x3C6C9C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_CNT 0x3C6CA0
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_CNT 0x3C6CA4
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_CNT 0x3C6CA8
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_CNT 0x3C6CAC
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_0 0x3C6CB0
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_1 0x3C6CB4
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_2 0x3C6CB8
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_3 0x3C6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_4_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
new file mode 100644
index 000000000000..95486b7ddf1d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_5_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_5_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_5 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_5_PERM_SEL 0x3D6108
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_0 0x3D6114
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_1 0x3D6118
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_2 0x3D611C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_3 0x3D6120
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_4 0x3D6124
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_5 0x3D6128
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_6 0x3D612C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_7 0x3D6130
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_8 0x3D6134
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_9 0x3D6138
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_10 0x3D613C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_11 0x3D6140
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_12 0x3D6144
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_13 0x3D6148
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_14 0x3D614C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_15 0x3D6150
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_16 0x3D6154
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_17 0x3D6158
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_18 0x3D615C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_19 0x3D6160
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_20 0x3D6164
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_21 0x3D6168
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_22 0x3D616C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_23 0x3D6170
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_24 0x3D6174
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_25 0x3D6178
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_26 0x3D617C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_27 0x3D6180
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_0 0x3D6184
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_1 0x3D6188
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_2 0x3D618C
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_3 0x3D6190
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_4 0x3D6194
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_5 0x3D6198
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_6 0x3D619C
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_7 0x3D61A0
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_8 0x3D61A4
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_9 0x3D61A8
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_10 0x3D61AC
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_11 0x3D61B0
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_12 0x3D61B4
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_13 0x3D61B8
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_14 0x3D61BC
+
+#define mmNIF_RTR_CTRL_5_SCRAM_SRAM_EN 0x3D626C
+
+#define mmNIF_RTR_CTRL_5_RL_HBM_EN 0x3D6274
+
+#define mmNIF_RTR_CTRL_5_RL_HBM_SAT 0x3D6278
+
+#define mmNIF_RTR_CTRL_5_RL_HBM_RST 0x3D627C
+
+#define mmNIF_RTR_CTRL_5_RL_HBM_TIMEOUT 0x3D6280
+
+#define mmNIF_RTR_CTRL_5_SCRAM_HBM_EN 0x3D6284
+
+#define mmNIF_RTR_CTRL_5_RL_PCI_EN 0x3D6288
+
+#define mmNIF_RTR_CTRL_5_RL_PCI_SAT 0x3D628C
+
+#define mmNIF_RTR_CTRL_5_RL_PCI_RST 0x3D6290
+
+#define mmNIF_RTR_CTRL_5_RL_PCI_TIMEOUT 0x3D6294
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_EN 0x3D629C
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_SAT 0x3D62A0
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_RST 0x3D62A4
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_TIMEOUT 0x3D62AC
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_RED 0x3D62B4
+
+#define mmNIF_RTR_CTRL_5_E2E_HBM_EN 0x3D62EC
+
+#define mmNIF_RTR_CTRL_5_E2E_PCI_EN 0x3D62F0
+
+#define mmNIF_RTR_CTRL_5_E2E_HBM_WR_SIZE 0x3D62F4
+
+#define mmNIF_RTR_CTRL_5_E2E_PCI_WR_SIZE 0x3D62F8
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET_EN 0x3D6404
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET 0x3D6408
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_WRAP 0x3D640C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_CNT 0x3D6410
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET_EN 0x3D6414
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET 0x3D6418
+
+#define mmNIF_RTR_CTRL_5_E2E_HBM_RD_SIZE 0x3D641C
+
+#define mmNIF_RTR_CTRL_5_E2E_PCI_RD_SIZE 0x3D6420
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET_EN 0x3D6424
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET 0x3D6428
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_WRAP 0x3D642C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_CNT 0x3D6430
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET_EN 0x3D6434
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET 0x3D6438
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_SEL_0 0x3D6450
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_SEL_1 0x3D6454
+
+#define mmNIF_RTR_CTRL_5_NON_LIN_EN 0x3D6480
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_0 0x3D6500
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_1 0x3D6504
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_2 0x3D6508
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_3 0x3D650C
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_4 0x3D6510
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_0 0x3D6514
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_1 0x3D6520
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_2 0x3D6524
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_3 0x3D6528
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_4 0x3D652C
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_5 0x3D6530
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_6 0x3D6534
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_7 0x3D6538
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_8 0x3D653C
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_9 0x3D6540
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_0 0x3D6550
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_1 0x3D6554
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_2 0x3D6558
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_3 0x3D655C
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_4 0x3D6560
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_5 0x3D6564
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_6 0x3D6568
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_7 0x3D656C
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_8 0x3D6570
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_9 0x3D6574
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_10 0x3D6578
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_11 0x3D657C
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_12 0x3D6580
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_13 0x3D6584
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_14 0x3D6588
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_15 0x3D658C
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_16 0x3D6590
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_17 0x3D6594
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_18 0x3D6598
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0 0x3D65E4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_1 0x3D65E8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_2 0x3D65EC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_3 0x3D65F0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_4 0x3D65F4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_5 0x3D65F8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_6 0x3D65FC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_7 0x3D6600
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_8 0x3D6604
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_9 0x3D6608
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_10 0x3D660C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_11 0x3D6610
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_12 0x3D6614
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_13 0x3D6618
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_14 0x3D661C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_15 0x3D6620
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0 0x3D6624
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_1 0x3D6628
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_2 0x3D662C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_3 0x3D6630
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_4 0x3D6634
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_5 0x3D6638
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_6 0x3D663C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_7 0x3D6640
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_8 0x3D6644
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_9 0x3D6648
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_10 0x3D664C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_11 0x3D6650
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_12 0x3D6654
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_13 0x3D6658
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_14 0x3D665C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_15 0x3D6660
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0 0x3D6664
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_1 0x3D6668
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_2 0x3D666C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_3 0x3D6670
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_4 0x3D6674
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_5 0x3D6678
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_6 0x3D667C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_7 0x3D6680
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_8 0x3D6684
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_9 0x3D6688
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_10 0x3D668C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_11 0x3D6690
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_12 0x3D6694
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_13 0x3D6698
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_14 0x3D669C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_15 0x3D66A0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0 0x3D66A4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_1 0x3D66A8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_2 0x3D66AC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_3 0x3D66B0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_4 0x3D66B4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_5 0x3D66B8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_6 0x3D66BC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_7 0x3D66C0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_8 0x3D66C4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_9 0x3D66C8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_10 0x3D66CC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_11 0x3D66D0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_12 0x3D66D4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_13 0x3D66D8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_14 0x3D66DC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_15 0x3D66E0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_0 0x3D66E4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_1 0x3D66E8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_2 0x3D66EC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_3 0x3D66F0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_4 0x3D66F4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_5 0x3D66F8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_6 0x3D66FC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_7 0x3D6700
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_8 0x3D6704
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_9 0x3D6708
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_10 0x3D670C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_11 0x3D6710
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_12 0x3D6714
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_13 0x3D6718
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_14 0x3D671C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_15 0x3D6720
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_0 0x3D6724
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_1 0x3D6728
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_2 0x3D672C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_3 0x3D6730
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_4 0x3D6734
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_5 0x3D6738
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_6 0x3D673C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_7 0x3D6740
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_8 0x3D6744
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_9 0x3D6748
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_10 0x3D674C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_11 0x3D6750
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_12 0x3D6754
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_13 0x3D6758
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_14 0x3D675C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_15 0x3D6760
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_0 0x3D6764
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_1 0x3D6768
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_2 0x3D676C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_3 0x3D6770
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_4 0x3D6774
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_5 0x3D6778
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_6 0x3D677C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_7 0x3D6780
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_8 0x3D6784
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_9 0x3D6788
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_10 0x3D678C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_11 0x3D6790
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_12 0x3D6794
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_13 0x3D6798
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_14 0x3D679C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_15 0x3D67A0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_0 0x3D67A4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_1 0x3D67A8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_2 0x3D67AC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_3 0x3D67B0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_4 0x3D67B4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_5 0x3D67B8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_6 0x3D67BC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_7 0x3D67C0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_8 0x3D67C4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_9 0x3D67C8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_10 0x3D67CC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_11 0x3D67D0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_12 0x3D67D4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_13 0x3D67D8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_14 0x3D67DC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_15 0x3D67E0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0 0x3D6824
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_1 0x3D6828
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_2 0x3D682C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_3 0x3D6830
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_4 0x3D6834
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_5 0x3D6838
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_6 0x3D683C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_7 0x3D6840
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_8 0x3D6844
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_9 0x3D6848
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_10 0x3D684C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_11 0x3D6850
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_12 0x3D6854
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_13 0x3D6858
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_14 0x3D685C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_15 0x3D6860
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0 0x3D6864
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_1 0x3D6868
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_2 0x3D686C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_3 0x3D6870
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_4 0x3D6874
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_5 0x3D6878
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_6 0x3D687C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_7 0x3D6880
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_8 0x3D6884
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_9 0x3D6888
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_10 0x3D688C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_11 0x3D6890
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_12 0x3D6894
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_13 0x3D6898
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_14 0x3D689C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_15 0x3D68A0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0 0x3D68A4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_1 0x3D68A8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_2 0x3D68AC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_3 0x3D68B0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_4 0x3D68B4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_5 0x3D68B8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_6 0x3D68BC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_7 0x3D68C0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_8 0x3D68C4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_9 0x3D68C8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_10 0x3D68CC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_11 0x3D68D0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_12 0x3D68D4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_13 0x3D68D8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_14 0x3D68DC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_15 0x3D68E0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0 0x3D68E4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_1 0x3D68E8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_2 0x3D68EC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_3 0x3D68F0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_4 0x3D68F4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_5 0x3D68F8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_6 0x3D68FC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_7 0x3D6900
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_8 0x3D6904
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_9 0x3D6908
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_10 0x3D690C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_11 0x3D6910
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_12 0x3D6914
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_13 0x3D6918
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_14 0x3D691C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_15 0x3D6920
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_0 0x3D6924
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_1 0x3D6928
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_2 0x3D692C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_3 0x3D6930
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_4 0x3D6934
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_5 0x3D6938
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_6 0x3D693C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_7 0x3D6940
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_8 0x3D6944
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_9 0x3D6948
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_10 0x3D694C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_11 0x3D6950
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_12 0x3D6954
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_13 0x3D6958
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_14 0x3D695C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_15 0x3D6960
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_0 0x3D6964
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_1 0x3D6968
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_2 0x3D696C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_3 0x3D6970
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_4 0x3D6974
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_5 0x3D6978
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_6 0x3D697C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_7 0x3D6980
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_8 0x3D6984
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_9 0x3D6988
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_10 0x3D698C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_11 0x3D6990
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_12 0x3D6994
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_13 0x3D6998
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_14 0x3D699C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_15 0x3D69A0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_0 0x3D69A4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_1 0x3D69A8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_2 0x3D69AC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_3 0x3D69B0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_4 0x3D69B4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_5 0x3D69B8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_6 0x3D69BC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_7 0x3D69C0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_8 0x3D69C4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_9 0x3D69C8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_10 0x3D69CC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_11 0x3D69D0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_12 0x3D69D4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_13 0x3D69D8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_14 0x3D69DC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_15 0x3D69E0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_0 0x3D69E4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_1 0x3D69E8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_2 0x3D69EC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_3 0x3D69F0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_4 0x3D69F4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_5 0x3D69F8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_6 0x3D69FC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_7 0x3D6A00
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_8 0x3D6A04
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_9 0x3D6A08
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_10 0x3D6A0C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_11 0x3D6A10
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_12 0x3D6A14
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_13 0x3D6A18
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_14 0x3D6A1C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_15 0x3D6A20
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AW 0x3D6A64
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AR 0x3D6A68
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_HIT_AW 0x3D6A6C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_HIT_AR 0x3D6A70
+
+#define mmNIF_RTR_CTRL_5_RGL_CFG 0x3D6B64
+
+#define mmNIF_RTR_CTRL_5_RGL_SHIFT 0x3D6B68
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_0 0x3D6B6C
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_1 0x3D6B70
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_2 0x3D6B74
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_3 0x3D6B78
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_4 0x3D6B7C
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_5 0x3D6B80
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_6 0x3D6B84
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_7 0x3D6B88
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_0 0x3D6BAC
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_1 0x3D6BB0
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_2 0x3D6BB4
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_3 0x3D6BB8
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_4 0x3D6BBC
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_5 0x3D6BC0
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_6 0x3D6BC4
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_7 0x3D6BC8
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_0 0x3D6BEC
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_1 0x3D6BF0
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_2 0x3D6BF4
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_3 0x3D6BF8
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_4 0x3D6BFC
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_5 0x3D6C00
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_6 0x3D6C04
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_7 0x3D6C08
+
+#define mmNIF_RTR_CTRL_5_RGL_WDT 0x3D6C2C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_WRAP 0x3D6C30
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_WRAP 0x3D6C34
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_WRAP 0x3D6C38
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_WRAP 0x3D6C3C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_WRAP 0x3D6C40
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_WRAP 0x3D6C44
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_WRAP 0x3D6C48
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_WRAP 0x3D6C4C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_CNT 0x3D6C50
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_CNT 0x3D6C54
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_CNT 0x3D6C58
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_CNT 0x3D6C5C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_CNT 0x3D6C60
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_CNT 0x3D6C64
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_CNT 0x3D6C68
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_CNT 0x3D6C6C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_WRAP 0x3D6C70
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_WRAP 0x3D6C74
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_WRAP 0x3D6C78
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_WRAP 0x3D6C7C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_WRAP 0x3D6C80
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_WRAP 0x3D6C84
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_WRAP 0x3D6C88
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_WRAP 0x3D6C8C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_CNT 0x3D6C90
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_CNT 0x3D6C94
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_CNT 0x3D6C98
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_CNT 0x3D6C9C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_CNT 0x3D6CA0
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_CNT 0x3D6CA4
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_CNT 0x3D6CA8
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_CNT 0x3D6CAC
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_0 0x3D6CB0
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_1 0x3D6CB4
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_2 0x3D6CB8
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_3 0x3D6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_5_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
new file mode 100644
index 000000000000..b79c59887b21
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_6_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_6_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_6 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_6_PERM_SEL 0x3E6108
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_0 0x3E6114
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_1 0x3E6118
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_2 0x3E611C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_3 0x3E6120
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_4 0x3E6124
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_5 0x3E6128
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_6 0x3E612C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_7 0x3E6130
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_8 0x3E6134
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_9 0x3E6138
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_10 0x3E613C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_11 0x3E6140
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_12 0x3E6144
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_13 0x3E6148
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_14 0x3E614C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_15 0x3E6150
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_16 0x3E6154
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_17 0x3E6158
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_18 0x3E615C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_19 0x3E6160
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_20 0x3E6164
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_21 0x3E6168
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_22 0x3E616C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_23 0x3E6170
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_24 0x3E6174
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_25 0x3E6178
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_26 0x3E617C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_27 0x3E6180
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_0 0x3E6184
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_1 0x3E6188
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_2 0x3E618C
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_3 0x3E6190
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_4 0x3E6194
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_5 0x3E6198
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_6 0x3E619C
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_7 0x3E61A0
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_8 0x3E61A4
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_9 0x3E61A8
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_10 0x3E61AC
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_11 0x3E61B0
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_12 0x3E61B4
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_13 0x3E61B8
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_14 0x3E61BC
+
+#define mmNIF_RTR_CTRL_6_SCRAM_SRAM_EN 0x3E626C
+
+#define mmNIF_RTR_CTRL_6_RL_HBM_EN 0x3E6274
+
+#define mmNIF_RTR_CTRL_6_RL_HBM_SAT 0x3E6278
+
+#define mmNIF_RTR_CTRL_6_RL_HBM_RST 0x3E627C
+
+#define mmNIF_RTR_CTRL_6_RL_HBM_TIMEOUT 0x3E6280
+
+#define mmNIF_RTR_CTRL_6_SCRAM_HBM_EN 0x3E6284
+
+#define mmNIF_RTR_CTRL_6_RL_PCI_EN 0x3E6288
+
+#define mmNIF_RTR_CTRL_6_RL_PCI_SAT 0x3E628C
+
+#define mmNIF_RTR_CTRL_6_RL_PCI_RST 0x3E6290
+
+#define mmNIF_RTR_CTRL_6_RL_PCI_TIMEOUT 0x3E6294
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_EN 0x3E629C
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_SAT 0x3E62A0
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_RST 0x3E62A4
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_TIMEOUT 0x3E62AC
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_RED 0x3E62B4
+
+#define mmNIF_RTR_CTRL_6_E2E_HBM_EN 0x3E62EC
+
+#define mmNIF_RTR_CTRL_6_E2E_PCI_EN 0x3E62F0
+
+#define mmNIF_RTR_CTRL_6_E2E_HBM_WR_SIZE 0x3E62F4
+
+#define mmNIF_RTR_CTRL_6_E2E_PCI_WR_SIZE 0x3E62F8
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET_EN 0x3E6404
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET 0x3E6408
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_WRAP 0x3E640C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_CNT 0x3E6410
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET_EN 0x3E6414
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET 0x3E6418
+
+#define mmNIF_RTR_CTRL_6_E2E_HBM_RD_SIZE 0x3E641C
+
+#define mmNIF_RTR_CTRL_6_E2E_PCI_RD_SIZE 0x3E6420
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET_EN 0x3E6424
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET 0x3E6428
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_WRAP 0x3E642C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_CNT 0x3E6430
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET_EN 0x3E6434
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET 0x3E6438
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_SEL_0 0x3E6450
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_SEL_1 0x3E6454
+
+#define mmNIF_RTR_CTRL_6_NON_LIN_EN 0x3E6480
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_0 0x3E6500
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_1 0x3E6504
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_2 0x3E6508
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_3 0x3E650C
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_4 0x3E6510
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_0 0x3E6514
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_1 0x3E6520
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_2 0x3E6524
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_3 0x3E6528
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_4 0x3E652C
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_5 0x3E6530
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_6 0x3E6534
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_7 0x3E6538
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_8 0x3E653C
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_9 0x3E6540
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_0 0x3E6550
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_1 0x3E6554
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_2 0x3E6558
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_3 0x3E655C
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_4 0x3E6560
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_5 0x3E6564
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_6 0x3E6568
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_7 0x3E656C
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_8 0x3E6570
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_9 0x3E6574
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_10 0x3E6578
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_11 0x3E657C
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_12 0x3E6580
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_13 0x3E6584
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_14 0x3E6588
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_15 0x3E658C
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_16 0x3E6590
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_17 0x3E6594
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_18 0x3E6598
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0 0x3E65E4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_1 0x3E65E8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_2 0x3E65EC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_3 0x3E65F0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_4 0x3E65F4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_5 0x3E65F8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_6 0x3E65FC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_7 0x3E6600
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_8 0x3E6604
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_9 0x3E6608
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_10 0x3E660C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_11 0x3E6610
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_12 0x3E6614
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_13 0x3E6618
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_14 0x3E661C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_15 0x3E6620
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0 0x3E6624
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_1 0x3E6628
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_2 0x3E662C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_3 0x3E6630
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_4 0x3E6634
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_5 0x3E6638
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_6 0x3E663C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_7 0x3E6640
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_8 0x3E6644
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_9 0x3E6648
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_10 0x3E664C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_11 0x3E6650
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_12 0x3E6654
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_13 0x3E6658
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_14 0x3E665C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_15 0x3E6660
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0 0x3E6664
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_1 0x3E6668
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_2 0x3E666C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_3 0x3E6670
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_4 0x3E6674
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_5 0x3E6678
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_6 0x3E667C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_7 0x3E6680
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_8 0x3E6684
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_9 0x3E6688
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_10 0x3E668C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_11 0x3E6690
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_12 0x3E6694
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_13 0x3E6698
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_14 0x3E669C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_15 0x3E66A0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0 0x3E66A4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_1 0x3E66A8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_2 0x3E66AC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_3 0x3E66B0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_4 0x3E66B4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_5 0x3E66B8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_6 0x3E66BC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_7 0x3E66C0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_8 0x3E66C4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_9 0x3E66C8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_10 0x3E66CC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_11 0x3E66D0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_12 0x3E66D4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_13 0x3E66D8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_14 0x3E66DC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_15 0x3E66E0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_0 0x3E66E4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_1 0x3E66E8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_2 0x3E66EC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_3 0x3E66F0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_4 0x3E66F4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_5 0x3E66F8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_6 0x3E66FC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_7 0x3E6700
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_8 0x3E6704
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_9 0x3E6708
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_10 0x3E670C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_11 0x3E6710
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_12 0x3E6714
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_13 0x3E6718
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_14 0x3E671C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_15 0x3E6720
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_0 0x3E6724
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_1 0x3E6728
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_2 0x3E672C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_3 0x3E6730
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_4 0x3E6734
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_5 0x3E6738
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_6 0x3E673C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_7 0x3E6740
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_8 0x3E6744
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_9 0x3E6748
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_10 0x3E674C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_11 0x3E6750
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_12 0x3E6754
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_13 0x3E6758
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_14 0x3E675C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_15 0x3E6760
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_0 0x3E6764
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_1 0x3E6768
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_2 0x3E676C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_3 0x3E6770
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_4 0x3E6774
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_5 0x3E6778
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_6 0x3E677C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_7 0x3E6780
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_8 0x3E6784
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_9 0x3E6788
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_10 0x3E678C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_11 0x3E6790
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_12 0x3E6794
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_13 0x3E6798
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_14 0x3E679C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_15 0x3E67A0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_0 0x3E67A4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_1 0x3E67A8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_2 0x3E67AC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_3 0x3E67B0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_4 0x3E67B4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_5 0x3E67B8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_6 0x3E67BC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_7 0x3E67C0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_8 0x3E67C4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_9 0x3E67C8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_10 0x3E67CC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_11 0x3E67D0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_12 0x3E67D4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_13 0x3E67D8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_14 0x3E67DC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_15 0x3E67E0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0 0x3E6824
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_1 0x3E6828
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_2 0x3E682C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_3 0x3E6830
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_4 0x3E6834
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_5 0x3E6838
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_6 0x3E683C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_7 0x3E6840
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_8 0x3E6844
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_9 0x3E6848
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_10 0x3E684C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_11 0x3E6850
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_12 0x3E6854
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_13 0x3E6858
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_14 0x3E685C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_15 0x3E6860
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0 0x3E6864
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_1 0x3E6868
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_2 0x3E686C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_3 0x3E6870
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_4 0x3E6874
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_5 0x3E6878
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_6 0x3E687C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_7 0x3E6880
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_8 0x3E6884
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_9 0x3E6888
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_10 0x3E688C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_11 0x3E6890
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_12 0x3E6894
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_13 0x3E6898
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_14 0x3E689C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_15 0x3E68A0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0 0x3E68A4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_1 0x3E68A8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_2 0x3E68AC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_3 0x3E68B0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_4 0x3E68B4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_5 0x3E68B8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_6 0x3E68BC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_7 0x3E68C0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_8 0x3E68C4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_9 0x3E68C8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_10 0x3E68CC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_11 0x3E68D0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_12 0x3E68D4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_13 0x3E68D8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_14 0x3E68DC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_15 0x3E68E0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0 0x3E68E4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_1 0x3E68E8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_2 0x3E68EC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_3 0x3E68F0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_4 0x3E68F4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_5 0x3E68F8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_6 0x3E68FC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_7 0x3E6900
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_8 0x3E6904
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_9 0x3E6908
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_10 0x3E690C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_11 0x3E6910
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_12 0x3E6914
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_13 0x3E6918
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_14 0x3E691C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_15 0x3E6920
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_0 0x3E6924
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_1 0x3E6928
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_2 0x3E692C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_3 0x3E6930
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_4 0x3E6934
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_5 0x3E6938
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_6 0x3E693C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_7 0x3E6940
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_8 0x3E6944
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_9 0x3E6948
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_10 0x3E694C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_11 0x3E6950
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_12 0x3E6954
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_13 0x3E6958
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_14 0x3E695C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_15 0x3E6960
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_0 0x3E6964
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_1 0x3E6968
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_2 0x3E696C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_3 0x3E6970
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_4 0x3E6974
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_5 0x3E6978
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_6 0x3E697C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_7 0x3E6980
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_8 0x3E6984
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_9 0x3E6988
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_10 0x3E698C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_11 0x3E6990
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_12 0x3E6994
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_13 0x3E6998
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_14 0x3E699C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_15 0x3E69A0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_0 0x3E69A4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_1 0x3E69A8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_2 0x3E69AC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_3 0x3E69B0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_4 0x3E69B4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_5 0x3E69B8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_6 0x3E69BC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_7 0x3E69C0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_8 0x3E69C4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_9 0x3E69C8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_10 0x3E69CC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_11 0x3E69D0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_12 0x3E69D4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_13 0x3E69D8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_14 0x3E69DC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_15 0x3E69E0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_0 0x3E69E4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_1 0x3E69E8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_2 0x3E69EC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_3 0x3E69F0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_4 0x3E69F4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_5 0x3E69F8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_6 0x3E69FC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_7 0x3E6A00
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_8 0x3E6A04
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_9 0x3E6A08
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_10 0x3E6A0C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_11 0x3E6A10
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_12 0x3E6A14
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_13 0x3E6A18
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_14 0x3E6A1C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_15 0x3E6A20
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AW 0x3E6A64
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AR 0x3E6A68
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_HIT_AW 0x3E6A6C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_HIT_AR 0x3E6A70
+
+#define mmNIF_RTR_CTRL_6_RGL_CFG 0x3E6B64
+
+#define mmNIF_RTR_CTRL_6_RGL_SHIFT 0x3E6B68
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_0 0x3E6B6C
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_1 0x3E6B70
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_2 0x3E6B74
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_3 0x3E6B78
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_4 0x3E6B7C
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_5 0x3E6B80
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_6 0x3E6B84
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_7 0x3E6B88
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_0 0x3E6BAC
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_1 0x3E6BB0
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_2 0x3E6BB4
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_3 0x3E6BB8
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_4 0x3E6BBC
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_5 0x3E6BC0
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_6 0x3E6BC4
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_7 0x3E6BC8
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_0 0x3E6BEC
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_1 0x3E6BF0
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_2 0x3E6BF4
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_3 0x3E6BF8
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_4 0x3E6BFC
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_5 0x3E6C00
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_6 0x3E6C04
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_7 0x3E6C08
+
+#define mmNIF_RTR_CTRL_6_RGL_WDT 0x3E6C2C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_WRAP 0x3E6C30
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_WRAP 0x3E6C34
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_WRAP 0x3E6C38
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_WRAP 0x3E6C3C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_WRAP 0x3E6C40
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_WRAP 0x3E6C44
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_WRAP 0x3E6C48
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_WRAP 0x3E6C4C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_CNT 0x3E6C50
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_CNT 0x3E6C54
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_CNT 0x3E6C58
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_CNT 0x3E6C5C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_CNT 0x3E6C60
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_CNT 0x3E6C64
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_CNT 0x3E6C68
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_CNT 0x3E6C6C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_WRAP 0x3E6C70
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_WRAP 0x3E6C74
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_WRAP 0x3E6C78
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_WRAP 0x3E6C7C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_WRAP 0x3E6C80
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_WRAP 0x3E6C84
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_WRAP 0x3E6C88
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_WRAP 0x3E6C8C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_CNT 0x3E6C90
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_CNT 0x3E6C94
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_CNT 0x3E6C98
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_CNT 0x3E6C9C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_CNT 0x3E6CA0
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_CNT 0x3E6CA4
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_CNT 0x3E6CA8
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_CNT 0x3E6CAC
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_0 0x3E6CB0
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_1 0x3E6CB4
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_2 0x3E6CB8
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_3 0x3E6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_6_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
new file mode 100644
index 000000000000..3a6a34ba2958
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_7_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_7_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_7 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_7_PERM_SEL 0x3F6108
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_0 0x3F6114
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_1 0x3F6118
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_2 0x3F611C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_3 0x3F6120
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_4 0x3F6124
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_5 0x3F6128
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_6 0x3F612C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_7 0x3F6130
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_8 0x3F6134
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_9 0x3F6138
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_10 0x3F613C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_11 0x3F6140
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_12 0x3F6144
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_13 0x3F6148
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_14 0x3F614C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_15 0x3F6150
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_16 0x3F6154
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_17 0x3F6158
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_18 0x3F615C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_19 0x3F6160
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_20 0x3F6164
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_21 0x3F6168
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_22 0x3F616C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_23 0x3F6170
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_24 0x3F6174
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_25 0x3F6178
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_26 0x3F617C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_27 0x3F6180
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_0 0x3F6184
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_1 0x3F6188
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_2 0x3F618C
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_3 0x3F6190
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_4 0x3F6194
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_5 0x3F6198
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_6 0x3F619C
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_7 0x3F61A0
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_8 0x3F61A4
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_9 0x3F61A8
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_10 0x3F61AC
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_11 0x3F61B0
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_12 0x3F61B4
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_13 0x3F61B8
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_14 0x3F61BC
+
+#define mmNIF_RTR_CTRL_7_SCRAM_SRAM_EN 0x3F626C
+
+#define mmNIF_RTR_CTRL_7_RL_HBM_EN 0x3F6274
+
+#define mmNIF_RTR_CTRL_7_RL_HBM_SAT 0x3F6278
+
+#define mmNIF_RTR_CTRL_7_RL_HBM_RST 0x3F627C
+
+#define mmNIF_RTR_CTRL_7_RL_HBM_TIMEOUT 0x3F6280
+
+#define mmNIF_RTR_CTRL_7_SCRAM_HBM_EN 0x3F6284
+
+#define mmNIF_RTR_CTRL_7_RL_PCI_EN 0x3F6288
+
+#define mmNIF_RTR_CTRL_7_RL_PCI_SAT 0x3F628C
+
+#define mmNIF_RTR_CTRL_7_RL_PCI_RST 0x3F6290
+
+#define mmNIF_RTR_CTRL_7_RL_PCI_TIMEOUT 0x3F6294
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_EN 0x3F629C
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_SAT 0x3F62A0
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_RST 0x3F62A4
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_TIMEOUT 0x3F62AC
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_RED 0x3F62B4
+
+#define mmNIF_RTR_CTRL_7_E2E_HBM_EN 0x3F62EC
+
+#define mmNIF_RTR_CTRL_7_E2E_PCI_EN 0x3F62F0
+
+#define mmNIF_RTR_CTRL_7_E2E_HBM_WR_SIZE 0x3F62F4
+
+#define mmNIF_RTR_CTRL_7_E2E_PCI_WR_SIZE 0x3F62F8
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET_EN 0x3F6404
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET 0x3F6408
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_WRAP 0x3F640C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_CNT 0x3F6410
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET_EN 0x3F6414
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET 0x3F6418
+
+#define mmNIF_RTR_CTRL_7_E2E_HBM_RD_SIZE 0x3F641C
+
+#define mmNIF_RTR_CTRL_7_E2E_PCI_RD_SIZE 0x3F6420
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET_EN 0x3F6424
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET 0x3F6428
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_WRAP 0x3F642C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_CNT 0x3F6430
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET_EN 0x3F6434
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET 0x3F6438
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_SEL_0 0x3F6450
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_SEL_1 0x3F6454
+
+#define mmNIF_RTR_CTRL_7_NON_LIN_EN 0x3F6480
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_0 0x3F6500
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_1 0x3F6504
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_2 0x3F6508
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_3 0x3F650C
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_4 0x3F6510
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_0 0x3F6514
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_1 0x3F6520
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_2 0x3F6524
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_3 0x3F6528
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_4 0x3F652C
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_5 0x3F6530
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_6 0x3F6534
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_7 0x3F6538
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_8 0x3F653C
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_9 0x3F6540
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_0 0x3F6550
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_1 0x3F6554
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_2 0x3F6558
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_3 0x3F655C
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_4 0x3F6560
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_5 0x3F6564
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_6 0x3F6568
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_7 0x3F656C
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_8 0x3F6570
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_9 0x3F6574
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_10 0x3F6578
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_11 0x3F657C
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_12 0x3F6580
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_13 0x3F6584
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_14 0x3F6588
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_15 0x3F658C
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_16 0x3F6590
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_17 0x3F6594
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_18 0x3F6598
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0 0x3F65E4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_1 0x3F65E8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_2 0x3F65EC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_3 0x3F65F0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_4 0x3F65F4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_5 0x3F65F8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_6 0x3F65FC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_7 0x3F6600
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_8 0x3F6604
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_9 0x3F6608
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_10 0x3F660C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_11 0x3F6610
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_12 0x3F6614
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_13 0x3F6618
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_14 0x3F661C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_15 0x3F6620
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0 0x3F6624
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_1 0x3F6628
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_2 0x3F662C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_3 0x3F6630
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_4 0x3F6634
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_5 0x3F6638
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_6 0x3F663C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_7 0x3F6640
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_8 0x3F6644
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_9 0x3F6648
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_10 0x3F664C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_11 0x3F6650
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_12 0x3F6654
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_13 0x3F6658
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_14 0x3F665C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_15 0x3F6660
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0 0x3F6664
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_1 0x3F6668
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_2 0x3F666C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_3 0x3F6670
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_4 0x3F6674
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_5 0x3F6678
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_6 0x3F667C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_7 0x3F6680
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_8 0x3F6684
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_9 0x3F6688
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_10 0x3F668C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_11 0x3F6690
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_12 0x3F6694
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_13 0x3F6698
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_14 0x3F669C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_15 0x3F66A0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0 0x3F66A4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_1 0x3F66A8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_2 0x3F66AC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_3 0x3F66B0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_4 0x3F66B4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_5 0x3F66B8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_6 0x3F66BC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_7 0x3F66C0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_8 0x3F66C4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_9 0x3F66C8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_10 0x3F66CC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_11 0x3F66D0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_12 0x3F66D4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_13 0x3F66D8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_14 0x3F66DC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_15 0x3F66E0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_0 0x3F66E4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_1 0x3F66E8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_2 0x3F66EC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_3 0x3F66F0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_4 0x3F66F4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_5 0x3F66F8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_6 0x3F66FC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_7 0x3F6700
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_8 0x3F6704
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_9 0x3F6708
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_10 0x3F670C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_11 0x3F6710
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_12 0x3F6714
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_13 0x3F6718
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_14 0x3F671C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_15 0x3F6720
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_0 0x3F6724
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_1 0x3F6728
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_2 0x3F672C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_3 0x3F6730
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_4 0x3F6734
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_5 0x3F6738
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_6 0x3F673C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_7 0x3F6740
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_8 0x3F6744
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_9 0x3F6748
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_10 0x3F674C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_11 0x3F6750
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_12 0x3F6754
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_13 0x3F6758
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_14 0x3F675C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_15 0x3F6760
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_0 0x3F6764
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_1 0x3F6768
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_2 0x3F676C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_3 0x3F6770
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_4 0x3F6774
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_5 0x3F6778
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_6 0x3F677C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_7 0x3F6780
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_8 0x3F6784
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_9 0x3F6788
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_10 0x3F678C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_11 0x3F6790
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_12 0x3F6794
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_13 0x3F6798
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_14 0x3F679C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_15 0x3F67A0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_0 0x3F67A4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_1 0x3F67A8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_2 0x3F67AC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_3 0x3F67B0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_4 0x3F67B4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_5 0x3F67B8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_6 0x3F67BC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_7 0x3F67C0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_8 0x3F67C4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_9 0x3F67C8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_10 0x3F67CC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_11 0x3F67D0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_12 0x3F67D4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_13 0x3F67D8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_14 0x3F67DC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_15 0x3F67E0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0 0x3F6824
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_1 0x3F6828
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_2 0x3F682C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_3 0x3F6830
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_4 0x3F6834
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_5 0x3F6838
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_6 0x3F683C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_7 0x3F6840
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_8 0x3F6844
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_9 0x3F6848
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_10 0x3F684C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_11 0x3F6850
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_12 0x3F6854
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_13 0x3F6858
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_14 0x3F685C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_15 0x3F6860
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0 0x3F6864
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_1 0x3F6868
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_2 0x3F686C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_3 0x3F6870
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_4 0x3F6874
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_5 0x3F6878
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_6 0x3F687C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_7 0x3F6880
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_8 0x3F6884
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_9 0x3F6888
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_10 0x3F688C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_11 0x3F6890
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_12 0x3F6894
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_13 0x3F6898
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_14 0x3F689C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_15 0x3F68A0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0 0x3F68A4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_1 0x3F68A8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_2 0x3F68AC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_3 0x3F68B0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_4 0x3F68B4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_5 0x3F68B8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_6 0x3F68BC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_7 0x3F68C0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_8 0x3F68C4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_9 0x3F68C8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_10 0x3F68CC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_11 0x3F68D0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_12 0x3F68D4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_13 0x3F68D8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_14 0x3F68DC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_15 0x3F68E0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0 0x3F68E4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_1 0x3F68E8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_2 0x3F68EC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_3 0x3F68F0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_4 0x3F68F4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_5 0x3F68F8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_6 0x3F68FC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_7 0x3F6900
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_8 0x3F6904
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_9 0x3F6908
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_10 0x3F690C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_11 0x3F6910
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_12 0x3F6914
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_13 0x3F6918
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_14 0x3F691C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_15 0x3F6920
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_0 0x3F6924
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_1 0x3F6928
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_2 0x3F692C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_3 0x3F6930
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_4 0x3F6934
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_5 0x3F6938
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_6 0x3F693C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_7 0x3F6940
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_8 0x3F6944
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_9 0x3F6948
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_10 0x3F694C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_11 0x3F6950
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_12 0x3F6954
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_13 0x3F6958
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_14 0x3F695C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_15 0x3F6960
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_0 0x3F6964
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_1 0x3F6968
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_2 0x3F696C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_3 0x3F6970
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_4 0x3F6974
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_5 0x3F6978
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_6 0x3F697C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_7 0x3F6980
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_8 0x3F6984
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_9 0x3F6988
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_10 0x3F698C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_11 0x3F6990
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_12 0x3F6994
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_13 0x3F6998
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_14 0x3F699C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_15 0x3F69A0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_0 0x3F69A4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_1 0x3F69A8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_2 0x3F69AC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_3 0x3F69B0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_4 0x3F69B4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_5 0x3F69B8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_6 0x3F69BC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_7 0x3F69C0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_8 0x3F69C4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_9 0x3F69C8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_10 0x3F69CC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_11 0x3F69D0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_12 0x3F69D4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_13 0x3F69D8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_14 0x3F69DC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_15 0x3F69E0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_0 0x3F69E4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_1 0x3F69E8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_2 0x3F69EC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_3 0x3F69F0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_4 0x3F69F4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_5 0x3F69F8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_6 0x3F69FC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_7 0x3F6A00
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_8 0x3F6A04
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_9 0x3F6A08
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_10 0x3F6A0C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_11 0x3F6A10
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_12 0x3F6A14
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_13 0x3F6A18
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_14 0x3F6A1C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_15 0x3F6A20
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW 0x3F6A64
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR 0x3F6A68
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_HIT_AW 0x3F6A6C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_HIT_AR 0x3F6A70
+
+#define mmNIF_RTR_CTRL_7_RGL_CFG 0x3F6B64
+
+#define mmNIF_RTR_CTRL_7_RGL_SHIFT 0x3F6B68
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_0 0x3F6B6C
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_1 0x3F6B70
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_2 0x3F6B74
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_3 0x3F6B78
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_4 0x3F6B7C
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_5 0x3F6B80
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_6 0x3F6B84
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_7 0x3F6B88
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_0 0x3F6BAC
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_1 0x3F6BB0
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_2 0x3F6BB4
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_3 0x3F6BB8
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_4 0x3F6BBC
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_5 0x3F6BC0
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_6 0x3F6BC4
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_7 0x3F6BC8
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_0 0x3F6BEC
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_1 0x3F6BF0
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_2 0x3F6BF4
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_3 0x3F6BF8
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_4 0x3F6BFC
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_5 0x3F6C00
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_6 0x3F6C04
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_7 0x3F6C08
+
+#define mmNIF_RTR_CTRL_7_RGL_WDT 0x3F6C2C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_WRAP 0x3F6C30
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_WRAP 0x3F6C34
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_WRAP 0x3F6C38
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_WRAP 0x3F6C3C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_WRAP 0x3F6C40
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_WRAP 0x3F6C44
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_WRAP 0x3F6C48
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_WRAP 0x3F6C4C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_CNT 0x3F6C50
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_CNT 0x3F6C54
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_CNT 0x3F6C58
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_CNT 0x3F6C5C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_CNT 0x3F6C60
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_CNT 0x3F6C64
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_CNT 0x3F6C68
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_CNT 0x3F6C6C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_WRAP 0x3F6C70
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_WRAP 0x3F6C74
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_WRAP 0x3F6C78
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_WRAP 0x3F6C7C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_WRAP 0x3F6C80
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_WRAP 0x3F6C84
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_WRAP 0x3F6C88
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_WRAP 0x3F6C8C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_CNT 0x3F6C90
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_CNT 0x3F6C94
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_CNT 0x3F6C98
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_CNT 0x3F6C9C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_CNT 0x3F6CA0
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_CNT 0x3F6CA4
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_CNT 0x3F6CA8
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_CNT 0x3F6CAC
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_0 0x3F6CB0
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_1 0x3F6CB4
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_2 0x3F6CB8
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_3 0x3F6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_7_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
new file mode 100644
index 000000000000..b7c33e025db5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_ETR_REGS_H_
+#define ASIC_REG_PSOC_ETR_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_ETR (Prototype: ETR)
+ *****************************************
+ */
+
+#define mmPSOC_ETR_RSZ 0x2C43004
+
+#define mmPSOC_ETR_STS 0x2C4300C
+
+#define mmPSOC_ETR_RRD 0x2C43010
+
+#define mmPSOC_ETR_RRP 0x2C43014
+
+#define mmPSOC_ETR_RWP 0x2C43018
+
+#define mmPSOC_ETR_TRG 0x2C4301C
+
+#define mmPSOC_ETR_CTL 0x2C43020
+
+#define mmPSOC_ETR_RWD 0x2C43024
+
+#define mmPSOC_ETR_MODE 0x2C43028
+
+#define mmPSOC_ETR_LBUFLEVEL 0x2C4302C
+
+#define mmPSOC_ETR_CBUFLEVEL 0x2C43030
+
+#define mmPSOC_ETR_BUFWM 0x2C43034
+
+#define mmPSOC_ETR_RRPHI 0x2C43038
+
+#define mmPSOC_ETR_RWPHI 0x2C4303C
+
+#define mmPSOC_ETR_AXICTL 0x2C43110
+
+#define mmPSOC_ETR_DBALO 0x2C43118
+
+#define mmPSOC_ETR_DBAHI 0x2C4311C
+
+#define mmPSOC_ETR_FFSR 0x2C43300
+
+#define mmPSOC_ETR_FFCR 0x2C43304
+
+#define mmPSOC_ETR_PSCR 0x2C43308
+
+#define mmPSOC_ETR_ITMISCOP0 0x2C43EE0
+
+#define mmPSOC_ETR_ITTRFLIN 0x2C43EE8
+
+#define mmPSOC_ETR_ITATBDATA0 0x2C43EEC
+
+#define mmPSOC_ETR_ITATBCTR2 0x2C43EF0
+
+#define mmPSOC_ETR_ITATBCTR1 0x2C43EF4
+
+#define mmPSOC_ETR_ITATBCTR0 0x2C43EF8
+
+#define mmPSOC_ETR_ITCTRL 0x2C43F00
+
+#define mmPSOC_ETR_CLAIMSET 0x2C43FA0
+
+#define mmPSOC_ETR_CLAIMCLR 0x2C43FA4
+
+#define mmPSOC_ETR_LAR 0x2C43FB0
+
+#define mmPSOC_ETR_LSR 0x2C43FB4
+
+#define mmPSOC_ETR_AUTHSTATUS 0x2C43FB8
+
+#define mmPSOC_ETR_DEVID 0x2C43FC8
+
+#define mmPSOC_ETR_DEVTYPE 0x2C43FCC
+
+#define mmPSOC_ETR_PERIPHID4 0x2C43FD0
+
+#define mmPSOC_ETR_PERIPHID5 0x2C43FD4
+
+#define mmPSOC_ETR_PERIPHID6 0x2C43FD8
+
+#define mmPSOC_ETR_PERIPHID7 0x2C43FDC
+
+#define mmPSOC_ETR_PERIPHID0 0x2C43FE0
+
+#define mmPSOC_ETR_PERIPHID1 0x2C43FE4
+
+#define mmPSOC_ETR_PERIPHID2 0x2C43FE8
+
+#define mmPSOC_ETR_PERIPHID3 0x2C43FEC
+
+#define mmPSOC_ETR_COMPID0 0x2C43FF0
+
+#define mmPSOC_ETR_COMPID1 0x2C43FF4
+
+#define mmPSOC_ETR_COMPID2 0x2C43FF8
+
+#define mmPSOC_ETR_COMPID3 0x2C43FFC
+
+#endif /* ASIC_REG_PSOC_ETR_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
new file mode 100644
index 000000000000..6703e678ee9f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+/* PSOC_GLOBAL_CONF_NON_RST_FLOPS */
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_PCI_FW_FSM */
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BTM_FSM */
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_SW_BTM_FSM */
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_MEM_EN */
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN */
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_EN */
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR */
+#define PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SPI_IMG_STS */
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_MASK 0x2
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SHIFT 2
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_MASK 0x4
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_SHIFT 3
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_MASK 0x8
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_MASK 0x4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_SHIFT 3
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_MASK 0x8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_SHIFT 6
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_MASK 0x40
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_MASK 0x80
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_MASK 0x100
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD_DONE_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PHY_STABLE */
+#define PSOC_GLOBAL_CONF_PHY_STABLE_PRSTN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PHY_STABLE_PRSTN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN_OVR */
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_VAL_SHIFT 4
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_VAL_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_ETR_FLUSH */
+#define PSOC_GLOBAL_CONF_ETR_FLUSH_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_ETR_FLUSH_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_COLD_RST_FLOPS */
+#define PSOC_GLOBAL_CONF_COLD_RST_FLOPS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_COLD_RST_FLOPS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_DIS_RAZWI_ERR */
+#define PSOC_GLOBAL_CONF_DIS_RAZWI_ERR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_DIS_RAZWI_ERR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_PHY_RST_N */
+#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RAZWI */
+#define PSOC_GLOBAL_CONF_RAZWI_INTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_RAZWI_INTR_MASK 0x1
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_SHIFT 4
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_PROT */
+#define PSOC_GLOBAL_CONF_PROT_AR_SHIFT 0
+#define PSOC_GLOBAL_CONF_PROT_AR_MASK 0x7
+#define PSOC_GLOBAL_CONF_PROT_AW_SHIFT 4
+#define PSOC_GLOBAL_CONF_PROT_AW_MASK 0x70
+
+/* PSOC_GLOBAL_CONF_ADC */
+#define PSOC_GLOBAL_CONF_ADC_INTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_INTR_MASK 0x1
+#define PSOC_GLOBAL_CONF_ADC_MASK_SHIFT 4
+#define PSOC_GLOBAL_CONF_ADC_MASK_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_TO */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TO_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TO_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SCRATCHPAD */
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SEMAPHORE */
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_CPU_BOOT_STATUS */
+#define PSOC_GLOBAL_CONF_CPU_BOOT_STATUS_CNTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_CPU_BOOT_STATUS_CNTR_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU */
+#define PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPL_SOURCE */
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_I2C_MSTR1_DBG */
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_MASK 0x1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_SHIFT 1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_MASK 0x2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_SHIFT 2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_MASK 0x4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_SHIFT 3
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_MASK 0x8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_SHIFT 4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_MASK 0x10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_SHIFT 5
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_MASK 0x20
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_SHIFT 6
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_MASK 0x40
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_SHIFT 7
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_MASK 0x80
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_SHIFT 8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_MASK 0x100
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_SHIFT 9
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_MASK 0x200
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_SHIFT 10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_MASK 0x7C00
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_SHIFT 15
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_MASK 0x78000
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_SHIFT 19
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_MASK 0x80000
+
+/* PSOC_GLOBAL_CONF_I2C_SLV */
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK */
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_TRACE_ADDR */
+#define PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK 0x3FF
+
+/* PSOC_GLOBAL_CONF_ARUSER */
+#define PSOC_GLOBAL_CONF_ARUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ARUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_AWUSER */
+#define PSOC_GLOBAL_CONF_AWUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TRACE_AWUSER */
+#define PSOC_GLOBAL_CONF_TRACE_AWUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TRACE_ARUSER */
+#define PSOC_GLOBAL_CONF_TRACE_ARUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_ARUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BTL_STS */
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_SHIFT 4
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_MASK 0x10
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_SHIFT 8
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_MASK 0xF00
+
+/* PSOC_GLOBAL_CONF_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_SHIFT 0
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_MASK 0x1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_SHIFT 1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_MASK 0x2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_SHIFT 2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_MASK 0x4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_SHIFT 3
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_MASK 0x8
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_SHIFT 4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_MASK 0x10
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_SHIFT 5
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_MASK 0x20
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_SHIFT 6
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_MASK 0x40
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_SHIFT 7
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_MASK 0x80
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_5_SHIFT 8
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_5_MASK 0x100
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_6_SHIFT 9
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_6_MASK 0x200
+
+/* PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_SHIFT 0
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_MASK 0x1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_SHIFT 1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_MASK 0x2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_SHIFT 2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_MASK 0x4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_SHIFT 3
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_MASK 0x8
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_SHIFT 4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_MASK 0x10
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_SHIFT 5
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_MASK 0x20
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_SHIFT 6
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_MASK 0x40
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_SHIFT 7
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_MASK 0x80
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_SHIFT 12
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_MASK 0x1000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_SHIFT 13
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_MASK 0x2000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_SHIFT 16
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_COMB_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_AXI_ERR_INTR */
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_TARGETID */
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_SHIFT 1
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_MASK 0xFFE
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_SHIFT 16
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_MASK 0xFFF0000
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_SHIFT 28
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_MASK 0xF0000000
+
+/* PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE */
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS */
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_REPAIR_CFG_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_REPAIR_CFG_MASK 0xC
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_SHIFT 6
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_MASK 0x40
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_MASK 0x80
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_SEL_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_SEL_MASK 0x1FFF00
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_GRAD_RST_SHIFT 22
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_GRAD_RST_MASK 0x400000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_DIS_SHIFT 23
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_DIS_MASK 0x800000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SHIFT 24
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_MASK 0x1F000000
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_DIV */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_VAL_SHIFT 8
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_VAL_MASK 0xFF00
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_SHIFT 1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_STS */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_FAIL_SHIFT 4
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_FAIL_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_OUTSTANT_TRANS */
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_SHIFT 0
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_MASK 0x1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_SHIFT 1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MASK_REQ */
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_WD_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_WD_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_MNL_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_MNL_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_SW_ALL_RST */
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SOFT_RST */
+#define PSOC_GLOBAL_CONF_SOFT_RST_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_SOFT_RST_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SOFT_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SOFT_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_UNIT_RST_N */
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_UNIT_RST_N_L */
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_UNIT_RST_N_H */
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_BTL_IMG */
+#define PSOC_GLOBAL_CONF_BTL_IMG_SEL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_IMG_SEL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN_MASK */
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_WD_MASK */
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RST_SRC */
+#define PSOC_GLOBAL_CONF_RST_SRC_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_RST_SRC_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_BOOT_STATE */
+#define PSOC_GLOBAL_CONF_BOOT_STATE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_STATE_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PAD_1V8_CFG */
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_PAD_3V3_CFG */
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_PAD_1V8_INPUT */
+#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_BNK3V3_MS */
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_ADC_CLK_FREQ */
+#define PSOC_GLOBAL_CONF_ADC_CLK_FREQ_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_CLK_FREQ_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START */
+#define PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_DATA_SAMPLES */
+#define PSOC_GLOBAL_CONF_ADC_DATA_SAMPLES_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_DATA_SAMPLES_VAL_MASK 0x1F
+
+/* PSOC_GLOBAL_CONF_ADC_TPH_CS */
+#define PSOC_GLOBAL_CONF_ADC_TPH_CS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_TPH_CS_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_LSB_NMSB */
+#define PSOC_GLOBAL_CONF_ADC_LSB_NMSB_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_LSB_NMSB_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES */
+#define PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE */
+#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ADC_CFG_DATA */
+#define PSOC_GLOBAL_CONF_ADC_CFG_DATA_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_CFG_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_ADC_TDV_CSDO */
+#define PSOC_GLOBAL_CONF_ADC_TDV_CSDO_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_TDV_CSDO_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_TSU_CSCK */
+#define PSOC_GLOBAL_CONF_ADC_TSU_CSCK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_TSU_CSCK_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_PAD_DEFAULT */
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_PAD_SEL */
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_RST_CTRL */
+#define PSOC_GLOBAL_CONF_RST_CTRL_SEL_SHIFT 0
+#define PSOC_GLOBAL_CONF_RST_CTRL_SEL_MASK 0xFF
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
new file mode 100644
index 000000000000..1b5cfcc1d85f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
@@ -0,0 +1,1062 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0 0xC4B000
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_1 0xC4B004
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_2 0xC4B008
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 0xC4B00C
+
+#define mmPSOC_GLOBAL_CONF_PCI_FW_FSM 0xC4B020
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START 0xC4B024
+
+#define mmPSOC_GLOBAL_CONF_BTM_FSM 0xC4B028
+
+#define mmPSOC_GLOBAL_CONF_SW_BTM_FSM 0xC4B030
+
+#define mmPSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM 0xC4B034
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT 0xC4B038
+
+#define mmPSOC_GLOBAL_CONF_SPI_MEM_EN 0xC4B040
+
+#define mmPSOC_GLOBAL_CONF_PRSTN 0xC4B044
+
+#define mmPSOC_GLOBAL_CONF_PCIE_EN 0xC4B048
+
+#define mmPSOC_GLOBAL_CONF_PCIE_PRSTN_INTR 0xC4B04C
+
+#define mmPSOC_GLOBAL_CONF_SPI_IMG_STS 0xC4B050
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_FSM 0xC4B054
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD 0xC4B058
+
+#define mmPSOC_GLOBAL_CONF_PHY_STABLE 0xC4B060
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_OVR 0xC4B064
+
+#define mmPSOC_GLOBAL_CONF_ETR_FLUSH 0xC4B068
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_0 0xC4B070
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1 0xC4B074
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2 0xC4B078
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3 0xC4B07C
+
+#define mmPSOC_GLOBAL_CONF_DIS_RAZWI_ERR 0xC4B080
+
+#define mmPSOC_GLOBAL_CONF_PCIE_PHY_RST_N 0xC4B084
+
+#define mmPSOC_GLOBAL_CONF_RAZWI 0xC4B088
+
+#define mmPSOC_GLOBAL_CONF_PROT 0xC4B090
+
+#define mmPSOC_GLOBAL_CONF_ADC 0xC4B094
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TO 0xC4B098
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 0xC4B100
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 0xC4B104
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 0xC4B108
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 0xC4B10C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 0xC4B110
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 0xC4B114
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 0xC4B118
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 0xC4B11C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 0xC4B120
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 0xC4B124
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 0xC4B128
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 0xC4B12C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 0xC4B130
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 0xC4B134
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 0xC4B138
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 0xC4B13C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_16 0xC4B140
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_17 0xC4B144
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 0xC4B148
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 0xC4B14C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 0xC4B150
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 0xC4B154
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 0xC4B158
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 0xC4B15C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 0xC4B160
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 0xC4B164
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 0xC4B168
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 0xC4B16C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 0xC4B170
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 0xC4B174
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 0xC4B178
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 0xC4B17C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_0 0xC4B200
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_1 0xC4B204
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_2 0xC4B208
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_3 0xC4B20C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_4 0xC4B210
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_5 0xC4B214
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_6 0xC4B218
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_7 0xC4B21C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_8 0xC4B220
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_9 0xC4B224
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_10 0xC4B228
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_11 0xC4B22C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_12 0xC4B230
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_13 0xC4B234
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_14 0xC4B238
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_15 0xC4B23C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_16 0xC4B240
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_17 0xC4B244
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_18 0xC4B248
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_19 0xC4B24C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_20 0xC4B250
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_21 0xC4B254
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_22 0xC4B258
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_23 0xC4B25C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_24 0xC4B260
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_25 0xC4B264
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_26 0xC4B268
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_27 0xC4B26C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_28 0xC4B270
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_29 0xC4B274
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_30 0xC4B278
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_31 0xC4B27C
+
+#define mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS 0xC4B300
+
+#define mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU 0xC4B304
+
+#define mmPSOC_GLOBAL_CONF_SPL_SOURCE 0xC4B308
+
+#define mmPSOC_GLOBAL_CONF_I2C_MSTR1_DBG 0xC4B30C
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV 0xC4B310
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK 0xC4B314
+
+#define mmPSOC_GLOBAL_CONF_TRACE_ADDR 0xC4B320
+
+#define mmPSOC_GLOBAL_CONF_ARUSER 0xC4B330
+
+#define mmPSOC_GLOBAL_CONF_AWUSER 0xC4B334
+
+#define mmPSOC_GLOBAL_CONF_TRACE_AWUSER 0xC4B338
+
+#define mmPSOC_GLOBAL_CONF_TRACE_ARUSER 0xC4B33C
+
+#define mmPSOC_GLOBAL_CONF_BTL_STS 0xC4B340
+
+#define mmPSOC_GLOBAL_CONF_TIMEOUT_INTR 0xC4B350
+
+#define mmPSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR 0xC4B354
+
+#define mmPSOC_GLOBAL_CONF_PERIPH_INTR 0xC4B358
+
+#define mmPSOC_GLOBAL_CONF_COMB_PERIPH_INTR 0xC4B35C
+
+#define mmPSOC_GLOBAL_CONF_AXI_ERR_INTR 0xC4B360
+
+#define mmPSOC_GLOBAL_CONF_TARGETID 0xC4B400
+
+#define mmPSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE 0xC4B420
+
+#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS 0xC4B430
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_DIV 0xC4B44C
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_CTRL 0xC4B450
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_STS 0xC4B454
+
+#define mmPSOC_GLOBAL_CONF_OUTSTANT_TRANS 0xC4B458
+
+#define mmPSOC_GLOBAL_CONF_MASK_REQ 0xC4B45C
+
+#define mmPSOC_GLOBAL_CONF_WD_RST_CFG_L 0xC4B460
+
+#define mmPSOC_GLOBAL_CONF_WD_RST_CFG_H 0xC4B464
+
+#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG_L 0xC4B470
+
+#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG_H 0xC4B474
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG_L 0xC4B480
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG_H 0xC4B484
+
+#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L 0xC4B490
+
+#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H 0xC4B494
+
+#define mmPSOC_GLOBAL_CONF_SW_ALL_RST 0xC4B498
+
+#define mmPSOC_GLOBAL_CONF_SOFT_RST 0xC4B4A0
+
+#define mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L 0xC4B4A4
+
+#define mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H 0xC4B4A8
+
+#define mmPSOC_GLOBAL_CONF_UNIT_RST_N 0xC4B4B0
+
+#define mmPSOC_GLOBAL_CONF_UNIT_RST_N_L 0xC4B4B4
+
+#define mmPSOC_GLOBAL_CONF_UNIT_RST_N_H 0xC4B4B8
+
+#define mmPSOC_GLOBAL_CONF_BTL_IMG 0xC4B4E0
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_MASK 0xC4B4E4
+
+#define mmPSOC_GLOBAL_CONF_WD_MASK 0xC4B4E8
+
+#define mmPSOC_GLOBAL_CONF_RST_SRC 0xC4B4F0
+
+#define mmPSOC_GLOBAL_CONF_BOOT_STATE 0xC4B4F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_0 0xC4B500
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_1 0xC4B504
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_2 0xC4B508
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_3 0xC4B50C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_4 0xC4B510
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_5 0xC4B514
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_6 0xC4B518
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_7 0xC4B51C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_8 0xC4B520
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_9 0xC4B524
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_10 0xC4B528
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_11 0xC4B52C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_12 0xC4B530
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_13 0xC4B534
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_14 0xC4B538
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_15 0xC4B53C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_16 0xC4B540
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_17 0xC4B544
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_18 0xC4B548
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_19 0xC4B54C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_20 0xC4B550
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_21 0xC4B554
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_22 0xC4B558
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_23 0xC4B55C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_24 0xC4B560
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_25 0xC4B564
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_26 0xC4B568
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_27 0xC4B56C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_28 0xC4B570
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_29 0xC4B574
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_30 0xC4B578
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_31 0xC4B57C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_32 0xC4B580
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_33 0xC4B584
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_34 0xC4B588
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_35 0xC4B58C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_36 0xC4B590
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_37 0xC4B594
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_38 0xC4B598
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_39 0xC4B59C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_40 0xC4B5A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_41 0xC4B5A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_42 0xC4B5A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_43 0xC4B5AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_44 0xC4B5B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_45 0xC4B5B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_46 0xC4B5B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_47 0xC4B5BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_48 0xC4B5C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_49 0xC4B5C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_50 0xC4B5C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_51 0xC4B5CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_52 0xC4B5D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_53 0xC4B5D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_54 0xC4B5D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_55 0xC4B5DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_56 0xC4B5E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_57 0xC4B5E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_58 0xC4B5E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_59 0xC4B5EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_60 0xC4B5F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_61 0xC4B5F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_62 0xC4B5F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_63 0xC4B5FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_64 0xC4B600
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_65 0xC4B604
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_66 0xC4B608
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_67 0xC4B60C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_68 0xC4B610
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_69 0xC4B614
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_70 0xC4B618
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_71 0xC4B61C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_72 0xC4B620
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_73 0xC4B624
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_74 0xC4B628
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_75 0xC4B62C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_76 0xC4B630
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_77 0xC4B634
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_78 0xC4B638
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_79 0xC4B63C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_80 0xC4B640
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_81 0xC4B644
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_82 0xC4B648
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_83 0xC4B64C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_84 0xC4B650
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_85 0xC4B654
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_86 0xC4B658
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_87 0xC4B65C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_88 0xC4B660
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_89 0xC4B664
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_0 0xC4B690
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_1 0xC4B694
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_2 0xC4B698
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_3 0xC4B69C
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_4 0xC4B6A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_5 0xC4B6A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_6 0xC4B6A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_7 0xC4B6AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_8 0xC4B6B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_9 0xC4B6B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_10 0xC4B6B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_11 0xC4B6BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_0 0xC4B6C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_1 0xC4B6C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_2 0xC4B6C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_3 0xC4B6CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_4 0xC4B6D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_5 0xC4B6D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_6 0xC4B6D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_7 0xC4B6DC
+
+#define mmPSOC_GLOBAL_CONF_BNK3V3_MS 0xC4B710
+
+#define mmPSOC_GLOBAL_CONF_ADC_CLK_FREQ 0xC4B720
+
+#define mmPSOC_GLOBAL_CONF_ADC_DELAY_FROM_START 0xC4B724
+
+#define mmPSOC_GLOBAL_CONF_ADC_DATA_SAMPLES 0xC4B728
+
+#define mmPSOC_GLOBAL_CONF_ADC_TPH_CS 0xC4B72C
+
+#define mmPSOC_GLOBAL_CONF_ADC_LSB_NMSB 0xC4B730
+
+#define mmPSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES 0xC4B734
+
+#define mmPSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE 0xC4B738
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA 0xC4B73C
+
+#define mmPSOC_GLOBAL_CONF_ADC_TDV_CSDO 0xC4B740
+
+#define mmPSOC_GLOBAL_CONF_ADC_TSU_CSCK 0xC4B744
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_0 0xC4B800
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_1 0xC4B804
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_2 0xC4B808
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_3 0xC4B80C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_4 0xC4B810
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_5 0xC4B814
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_6 0xC4B818
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_7 0xC4B81C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_8 0xC4B820
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_9 0xC4B824
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_10 0xC4B828
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_11 0xC4B82C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_12 0xC4B830
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_13 0xC4B834
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_14 0xC4B838
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_15 0xC4B83C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_16 0xC4B840
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_17 0xC4B844
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_18 0xC4B848
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_19 0xC4B84C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_20 0xC4B850
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_21 0xC4B854
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_22 0xC4B858
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_23 0xC4B85C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_24 0xC4B860
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_25 0xC4B864
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_26 0xC4B868
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_27 0xC4B86C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_28 0xC4B870
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_29 0xC4B874
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_30 0xC4B878
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_31 0xC4B87C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_32 0xC4B880
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_33 0xC4B884
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_34 0xC4B888
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_35 0xC4B88C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_36 0xC4B890
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_37 0xC4B894
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_38 0xC4B898
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_39 0xC4B89C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_40 0xC4B8A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_41 0xC4B8A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_42 0xC4B8A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_43 0xC4B8AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_44 0xC4B8B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_45 0xC4B8B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_46 0xC4B8B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_47 0xC4B8BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_48 0xC4B8C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_49 0xC4B8C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_50 0xC4B8C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_51 0xC4B8CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_52 0xC4B8D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_53 0xC4B8D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_54 0xC4B8D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_55 0xC4B8DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_56 0xC4B8E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_57 0xC4B8E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_58 0xC4B8E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_59 0xC4B8EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_60 0xC4B8F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_61 0xC4B8F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_62 0xC4B8F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_63 0xC4B8FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_64 0xC4B900
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_65 0xC4B904
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_66 0xC4B908
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_67 0xC4B90C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_68 0xC4B910
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_69 0xC4B914
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_70 0xC4B918
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_71 0xC4B91C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_72 0xC4B920
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_73 0xC4B924
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_74 0xC4B928
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_75 0xC4B92C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_76 0xC4B930
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_77 0xC4B934
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_78 0xC4B938
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_79 0xC4B93C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_80 0xC4B940
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_81 0xC4B944
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_82 0xC4B948
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_83 0xC4B94C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_84 0xC4B950
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_85 0xC4B954
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_86 0xC4B958
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_87 0xC4B95C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_88 0xC4B960
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_89 0xC4B964
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_90 0xC4B968
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_91 0xC4B96C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_92 0xC4B970
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_93 0xC4B974
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_94 0xC4B978
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_95 0xC4B97C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_96 0xC4B980
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_97 0xC4B984
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_98 0xC4B988
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_99 0xC4B98C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_100 0xC4B990
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_101 0xC4B994
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_102 0xC4B998
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_0 0xC4BA00
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_1 0xC4BA04
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_2 0xC4BA08
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_3 0xC4BA0C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_4 0xC4BA10
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_5 0xC4BA14
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_6 0xC4BA18
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_7 0xC4BA1C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_8 0xC4BA20
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_9 0xC4BA24
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_10 0xC4BA28
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_11 0xC4BA2C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_12 0xC4BA30
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_13 0xC4BA34
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_14 0xC4BA38
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_15 0xC4BA3C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_16 0xC4BA40
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_17 0xC4BA44
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_18 0xC4BA48
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_19 0xC4BA4C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_20 0xC4BA50
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_21 0xC4BA54
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_22 0xC4BA58
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_23 0xC4BA5C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_24 0xC4BA60
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_25 0xC4BA64
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_26 0xC4BA68
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_27 0xC4BA6C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_28 0xC4BA70
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_29 0xC4BA74
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_30 0xC4BA78
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_31 0xC4BA7C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_32 0xC4BA80
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_33 0xC4BA84
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_34 0xC4BA88
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_35 0xC4BA8C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_36 0xC4BA90
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_37 0xC4BA94
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_38 0xC4BA98
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_39 0xC4BA9C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_40 0xC4BAA0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_41 0xC4BAA4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_42 0xC4BAA8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_43 0xC4BAAC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_44 0xC4BAB0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_45 0xC4BAB4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_46 0xC4BAB8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_47 0xC4BABC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_48 0xC4BAC0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_49 0xC4BAC4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_50 0xC4BAC8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_51 0xC4BACC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_52 0xC4BAD0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_53 0xC4BAD4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_54 0xC4BAD8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_55 0xC4BADC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_56 0xC4BAE0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_57 0xC4BAE4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_58 0xC4BAE8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_59 0xC4BAEC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_60 0xC4BAF0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_61 0xC4BAF4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_62 0xC4BAF8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_63 0xC4BAFC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_64 0xC4BB00
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_65 0xC4BB04
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_66 0xC4BB08
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_67 0xC4BB0C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_68 0xC4BB10
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_69 0xC4BB14
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_70 0xC4BB18
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_71 0xC4BB1C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_72 0xC4BB20
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_73 0xC4BB24
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_74 0xC4BB28
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_75 0xC4BB2C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_76 0xC4BB30
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_77 0xC4BB34
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_78 0xC4BB38
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_79 0xC4BB3C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_80 0xC4BB40
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BB44
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_82 0xC4BB48
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_83 0xC4BB4C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_84 0xC4BB50
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_85 0xC4BB54
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_86 0xC4BB58
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_87 0xC4BB5C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_88 0xC4BB60
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_89 0xC4BB64
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_90 0xC4BB68
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_91 0xC4BB6C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_92 0xC4BB70
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_93 0xC4BB74
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_94 0xC4BB78
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_95 0xC4BB7C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_96 0xC4BB80
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_97 0xC4BB84
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_98 0xC4BB88
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_99 0xC4BB8C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_100 0xC4BB90
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_101 0xC4BB94
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_102 0xC4BB98
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_0 0xC4BC00
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_1 0xC4BC04
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_2 0xC4BC08
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_3 0xC4BC0C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_4 0xC4BC10
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_5 0xC4BC14
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_6 0xC4BC18
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_7 0xC4BC1C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_8 0xC4BC20
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_9 0xC4BC24
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_10 0xC4BC28
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_11 0xC4BC2C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_12 0xC4BC30
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_13 0xC4BC34
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_14 0xC4BC38
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_15 0xC4BC3C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_16 0xC4BC40
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_17 0xC4BC44
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_18 0xC4BC48
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_19 0xC4BC4C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_20 0xC4BC50
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_21 0xC4BC54
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_22 0xC4BC58
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_23 0xC4BC5C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_24 0xC4BC60
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_25 0xC4BC64
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_26 0xC4BC68
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_27 0xC4BC6C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_28 0xC4BC70
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_29 0xC4BC74
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_30 0xC4BC78
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_31 0xC4BC7C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_32 0xC4BC80
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_33 0xC4BC84
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_34 0xC4BC88
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_35 0xC4BC8C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_36 0xC4BC90
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_37 0xC4BC94
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_38 0xC4BC98
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_39 0xC4BC9C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_40 0xC4BCA0
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_41 0xC4BCA4
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_42 0xC4BCA8
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_43 0xC4BCAC
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_44 0xC4BCB0
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_45 0xC4BCB4
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_46 0xC4BCB8
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_47 0xC4BCBC
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_48 0xC4BCC0
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_49 0xC4BCC4
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_50 0xC4BCC8
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_51 0xC4BCCC
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_52 0xC4BCD0
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_53 0xC4BCD4
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h
new file mode 100644
index 000000000000..687e2255cb19
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_HBM_PLL_REGS_H_
+#define ASIC_REG_PSOC_HBM_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_HBM_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_HBM_PLL_NR 0xC74100
+
+#define mmPSOC_HBM_PLL_NF 0xC74104
+
+#define mmPSOC_HBM_PLL_OD 0xC74108
+
+#define mmPSOC_HBM_PLL_NB 0xC7410C
+
+#define mmPSOC_HBM_PLL_CFG 0xC74110
+
+#define mmPSOC_HBM_PLL_LOSE_MASK 0xC74120
+
+#define mmPSOC_HBM_PLL_LOCK_INTR 0xC74128
+
+#define mmPSOC_HBM_PLL_LOCK_BYPASS 0xC7412C
+
+#define mmPSOC_HBM_PLL_DATA_CHNG 0xC74130
+
+#define mmPSOC_HBM_PLL_RST 0xC74134
+
+#define mmPSOC_HBM_PLL_SLIP_WD_CNTR 0xC74150
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_0 0xC74200
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_1 0xC74204
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_2 0xC74208
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_3 0xC7420C
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_0 0xC74220
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_1 0xC74224
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_2 0xC74228
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_3 0xC7422C
+
+#define mmPSOC_HBM_PLL_DIV_SEL_0 0xC74280
+
+#define mmPSOC_HBM_PLL_DIV_SEL_1 0xC74284
+
+#define mmPSOC_HBM_PLL_DIV_SEL_2 0xC74288
+
+#define mmPSOC_HBM_PLL_DIV_SEL_3 0xC7428C
+
+#define mmPSOC_HBM_PLL_DIV_EN_0 0xC742A0
+
+#define mmPSOC_HBM_PLL_DIV_EN_1 0xC742A4
+
+#define mmPSOC_HBM_PLL_DIV_EN_2 0xC742A8
+
+#define mmPSOC_HBM_PLL_DIV_EN_3 0xC742AC
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_0 0xC742C0
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_1 0xC742C4
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_2 0xC742C8
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_3 0xC742CC
+
+#define mmPSOC_HBM_PLL_CLK_GATER 0xC74300
+
+#define mmPSOC_HBM_PLL_CLK_RLX_0 0xC74310
+
+#define mmPSOC_HBM_PLL_CLK_RLX_1 0xC74314
+
+#define mmPSOC_HBM_PLL_CLK_RLX_2 0xC74318
+
+#define mmPSOC_HBM_PLL_CLK_RLX_3 0xC7431C
+
+#define mmPSOC_HBM_PLL_REF_CNTR_PERIOD 0xC74400
+
+#define mmPSOC_HBM_PLL_REF_LOW_THRESHOLD 0xC74410
+
+#define mmPSOC_HBM_PLL_REF_HIGH_THRESHOLD 0xC74420
+
+#define mmPSOC_HBM_PLL_PLL_NOT_STABLE 0xC74430
+
+#define mmPSOC_HBM_PLL_FREQ_CALC_EN 0xC74440
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_CFG 0xC74500
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_0 0xC74510
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_1 0xC74514
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_2 0xC74518
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_3 0xC7451C
+
+#endif /* ASIC_REG_PSOC_HBM_PLL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h
new file mode 100644
index 000000000000..3dc9bb4542dd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_
+#define ASIC_REG_PSOC_PCI_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_PCI_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_PCI_PLL_NR 0xC72100
+
+#define mmPSOC_PCI_PLL_NF 0xC72104
+
+#define mmPSOC_PCI_PLL_OD 0xC72108
+
+#define mmPSOC_PCI_PLL_NB 0xC7210C
+
+#define mmPSOC_PCI_PLL_CFG 0xC72110
+
+#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120
+
+#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128
+
+#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C
+
+#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130
+
+#define mmPSOC_PCI_PLL_RST 0xC72134
+
+#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C
+
+#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280
+
+#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284
+
+#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288
+
+#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C
+
+#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0
+
+#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4
+
+#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8
+
+#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC
+
+#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300
+
+#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310
+
+#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314
+
+#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318
+
+#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C
+
+#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400
+
+#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410
+
+#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420
+
+#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430
+
+#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_CFG 0xC72500
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_0 0xC72510
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_1 0xC72514
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_2 0xC72518
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_3 0xC7251C
+
+#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
new file mode 100644
index 000000000000..9ce24597d4b0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+#define ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_TIMESTAMP (Prototype: TIMESTAMP)
+ *****************************************
+ */
+
+#define mmPSOC_TIMESTAMP_CNTCR 0xC49000
+
+#define mmPSOC_TIMESTAMP_CNTSR 0xC49004
+
+#define mmPSOC_TIMESTAMP_CNTCVL 0xC49008
+
+#define mmPSOC_TIMESTAMP_CNTCVU 0xC4900C
+
+#define mmPSOC_TIMESTAMP_CNTFID0 0xC49020
+
+#define mmPSOC_TIMESTAMP_PIDR4 0xC49FD0
+
+#define mmPSOC_TIMESTAMP_PIDR5 0xC49FD4
+
+#define mmPSOC_TIMESTAMP_PIDR6 0xC49FD8
+
+#define mmPSOC_TIMESTAMP_PIDR7 0xC49FDC
+
+#define mmPSOC_TIMESTAMP_PIDR0 0xC49FE0
+
+#define mmPSOC_TIMESTAMP_PIDR1 0xC49FE4
+
+#define mmPSOC_TIMESTAMP_PIDR2 0xC49FE8
+
+#define mmPSOC_TIMESTAMP_PIDR3 0xC49FEC
+
+#define mmPSOC_TIMESTAMP_CIDR0 0xC49FF0
+
+#define mmPSOC_TIMESTAMP_CIDR1 0xC49FF4
+
+#define mmPSOC_TIMESTAMP_CIDR2 0xC49FF8
+
+#define mmPSOC_TIMESTAMP_CIDR3 0xC49FFC
+
+#endif /* ASIC_REG_PSOC_TIMESTAMP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
new file mode 100644
index 000000000000..ddf824392cf7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_0_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_0_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_0_PERM_SEL 0x306108
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_0 0x306114
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_1 0x306118
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_2 0x30611C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_3 0x306120
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_4 0x306124
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_5 0x306128
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_6 0x30612C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_7 0x306130
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_8 0x306134
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_9 0x306138
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_10 0x30613C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_11 0x306140
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_12 0x306144
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_13 0x306148
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_14 0x30614C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_15 0x306150
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_16 0x306154
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_17 0x306158
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_18 0x30615C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_19 0x306160
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_20 0x306164
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_21 0x306168
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_22 0x30616C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_23 0x306170
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_24 0x306174
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_25 0x306178
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_26 0x30617C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_27 0x306180
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_0 0x306184
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_1 0x306188
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_2 0x30618C
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_3 0x306190
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_4 0x306194
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_5 0x306198
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_6 0x30619C
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_7 0x3061A0
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_8 0x3061A4
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_9 0x3061A8
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_10 0x3061AC
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_11 0x3061B0
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_12 0x3061B4
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_13 0x3061B8
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_14 0x3061BC
+
+#define mmSIF_RTR_CTRL_0_SCRAM_SRAM_EN 0x30626C
+
+#define mmSIF_RTR_CTRL_0_RL_HBM_EN 0x306274
+
+#define mmSIF_RTR_CTRL_0_RL_HBM_SAT 0x306278
+
+#define mmSIF_RTR_CTRL_0_RL_HBM_RST 0x30627C
+
+#define mmSIF_RTR_CTRL_0_RL_HBM_TIMEOUT 0x306280
+
+#define mmSIF_RTR_CTRL_0_SCRAM_HBM_EN 0x306284
+
+#define mmSIF_RTR_CTRL_0_RL_PCI_EN 0x306288
+
+#define mmSIF_RTR_CTRL_0_RL_PCI_SAT 0x30628C
+
+#define mmSIF_RTR_CTRL_0_RL_PCI_RST 0x306290
+
+#define mmSIF_RTR_CTRL_0_RL_PCI_TIMEOUT 0x306294
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_EN 0x30629C
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_SAT 0x3062A0
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_RST 0x3062A4
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_TIMEOUT 0x3062AC
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_RED 0x3062B4
+
+#define mmSIF_RTR_CTRL_0_E2E_HBM_EN 0x3062EC
+
+#define mmSIF_RTR_CTRL_0_E2E_PCI_EN 0x3062F0
+
+#define mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE 0x3062F4
+
+#define mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE 0x3062F8
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET_EN 0x306404
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET 0x306408
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_WRAP 0x30640C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_CNT 0x306410
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET_EN 0x306414
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET 0x306418
+
+#define mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE 0x30641C
+
+#define mmSIF_RTR_CTRL_0_E2E_PCI_RD_SIZE 0x306420
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET_EN 0x306424
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET 0x306428
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_WRAP 0x30642C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_CNT 0x306430
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET_EN 0x306434
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET 0x306438
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_SEL_0 0x306450
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_SEL_1 0x306454
+
+#define mmSIF_RTR_CTRL_0_NON_LIN_EN 0x306480
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_0 0x306500
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_1 0x306504
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_2 0x306508
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_3 0x30650C
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_4 0x306510
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_0 0x306514
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_1 0x306520
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_2 0x306524
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_3 0x306528
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_4 0x30652C
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_5 0x306530
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_6 0x306534
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_7 0x306538
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_8 0x30653C
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_9 0x306540
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_0 0x306550
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_1 0x306554
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_2 0x306558
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_3 0x30655C
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_4 0x306560
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_5 0x306564
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_6 0x306568
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_7 0x30656C
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_8 0x306570
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_9 0x306574
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_10 0x306578
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_11 0x30657C
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_12 0x306580
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_13 0x306584
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_14 0x306588
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_15 0x30658C
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_16 0x306590
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_17 0x306594
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_18 0x306598
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0 0x3065E4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_1 0x3065E8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_2 0x3065EC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_3 0x3065F0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_4 0x3065F4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_5 0x3065F8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_6 0x3065FC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_7 0x306600
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_8 0x306604
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_9 0x306608
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_10 0x30660C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_11 0x306610
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_12 0x306614
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_13 0x306618
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_14 0x30661C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_15 0x306620
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0 0x306624
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_1 0x306628
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_2 0x30662C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_3 0x306630
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_4 0x306634
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_5 0x306638
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_6 0x30663C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_7 0x306640
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_8 0x306644
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_9 0x306648
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_10 0x30664C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_11 0x306650
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_12 0x306654
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_13 0x306658
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_14 0x30665C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_15 0x306660
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0 0x306664
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_1 0x306668
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_2 0x30666C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_3 0x306670
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_4 0x306674
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_5 0x306678
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_6 0x30667C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_7 0x306680
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_8 0x306684
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_9 0x306688
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_10 0x30668C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_11 0x306690
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_12 0x306694
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_13 0x306698
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_14 0x30669C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_15 0x3066A0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0 0x3066A4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_1 0x3066A8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_2 0x3066AC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_3 0x3066B0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_4 0x3066B4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_5 0x3066B8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_6 0x3066BC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_7 0x3066C0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_8 0x3066C4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_9 0x3066C8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_10 0x3066CC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_11 0x3066D0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_12 0x3066D4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_13 0x3066D8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_14 0x3066DC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_15 0x3066E0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_0 0x3066E4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_1 0x3066E8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_2 0x3066EC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_3 0x3066F0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_4 0x3066F4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_5 0x3066F8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_6 0x3066FC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_7 0x306700
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_8 0x306704
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_9 0x306708
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_10 0x30670C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_11 0x306710
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_12 0x306714
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_13 0x306718
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_14 0x30671C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_15 0x306720
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_0 0x306724
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_1 0x306728
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_2 0x30672C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_3 0x306730
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_4 0x306734
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_5 0x306738
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_6 0x30673C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_7 0x306740
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_8 0x306744
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_9 0x306748
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_10 0x30674C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_11 0x306750
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_12 0x306754
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_13 0x306758
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_14 0x30675C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_15 0x306760
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_0 0x306764
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_1 0x306768
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_2 0x30676C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_3 0x306770
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_4 0x306774
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_5 0x306778
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_6 0x30677C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_7 0x306780
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_8 0x306784
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_9 0x306788
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_10 0x30678C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_11 0x306790
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_12 0x306794
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_13 0x306798
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_14 0x30679C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_15 0x3067A0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_0 0x3067A4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_1 0x3067A8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_2 0x3067AC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_3 0x3067B0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_4 0x3067B4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_5 0x3067B8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_6 0x3067BC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_7 0x3067C0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_8 0x3067C4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_9 0x3067C8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_10 0x3067CC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_11 0x3067D0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_12 0x3067D4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_13 0x3067D8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_14 0x3067DC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_15 0x3067E0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0 0x306824
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_1 0x306828
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_2 0x30682C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_3 0x306830
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_4 0x306834
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_5 0x306838
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_6 0x30683C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_7 0x306840
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_8 0x306844
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_9 0x306848
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_10 0x30684C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_11 0x306850
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_12 0x306854
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_13 0x306858
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_14 0x30685C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_15 0x306860
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0 0x306864
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_1 0x306868
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_2 0x30686C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_3 0x306870
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_4 0x306874
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_5 0x306878
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_6 0x30687C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_7 0x306880
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_8 0x306884
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_9 0x306888
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_10 0x30688C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_11 0x306890
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_12 0x306894
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_13 0x306898
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_14 0x30689C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_15 0x3068A0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0 0x3068A4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_1 0x3068A8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_2 0x3068AC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_3 0x3068B0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_4 0x3068B4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_5 0x3068B8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_6 0x3068BC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_7 0x3068C0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_8 0x3068C4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_9 0x3068C8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_10 0x3068CC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_11 0x3068D0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_12 0x3068D4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_13 0x3068D8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_14 0x3068DC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_15 0x3068E0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0 0x3068E4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_1 0x3068E8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_2 0x3068EC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_3 0x3068F0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_4 0x3068F4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_5 0x3068F8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_6 0x3068FC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_7 0x306900
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_8 0x306904
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_9 0x306908
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_10 0x30690C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_11 0x306910
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_12 0x306914
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_13 0x306918
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_14 0x30691C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_15 0x306920
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_0 0x306924
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_1 0x306928
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_2 0x30692C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_3 0x306930
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_4 0x306934
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_5 0x306938
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_6 0x30693C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_7 0x306940
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_8 0x306944
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_9 0x306948
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_10 0x30694C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_11 0x306950
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_12 0x306954
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_13 0x306958
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_14 0x30695C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_15 0x306960
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_0 0x306964
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_1 0x306968
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_2 0x30696C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_3 0x306970
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_4 0x306974
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_5 0x306978
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_6 0x30697C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_7 0x306980
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_8 0x306984
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_9 0x306988
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_10 0x30698C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_11 0x306990
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_12 0x306994
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_13 0x306998
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_14 0x30699C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_15 0x3069A0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_0 0x3069A4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_1 0x3069A8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_2 0x3069AC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_3 0x3069B0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_4 0x3069B4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_5 0x3069B8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_6 0x3069BC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_7 0x3069C0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_8 0x3069C4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_9 0x3069C8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_10 0x3069CC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_11 0x3069D0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_12 0x3069D4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_13 0x3069D8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_14 0x3069DC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_15 0x3069E0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_0 0x3069E4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_1 0x3069E8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_2 0x3069EC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_3 0x3069F0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_4 0x3069F4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_5 0x3069F8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_6 0x3069FC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_7 0x306A00
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_8 0x306A04
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_9 0x306A08
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_10 0x306A0C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_11 0x306A10
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_12 0x306A14
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_13 0x306A18
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_14 0x306A1C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_15 0x306A20
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AW 0x306A64
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AR 0x306A68
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_HIT_AW 0x306A6C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_HIT_AR 0x306A70
+
+#define mmSIF_RTR_CTRL_0_RGL_CFG 0x306B64
+
+#define mmSIF_RTR_CTRL_0_RGL_SHIFT 0x306B68
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_0 0x306B6C
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_1 0x306B70
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_2 0x306B74
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_3 0x306B78
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_4 0x306B7C
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_5 0x306B80
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_6 0x306B84
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_7 0x306B88
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_0 0x306BAC
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_1 0x306BB0
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_2 0x306BB4
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_3 0x306BB8
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_4 0x306BBC
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_5 0x306BC0
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_6 0x306BC4
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_7 0x306BC8
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_0 0x306BEC
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_1 0x306BF0
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_2 0x306BF4
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_3 0x306BF8
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_4 0x306BFC
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_5 0x306C00
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_6 0x306C04
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_7 0x306C08
+
+#define mmSIF_RTR_CTRL_0_RGL_WDT 0x306C2C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_WRAP 0x306C30
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_WRAP 0x306C34
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_WRAP 0x306C38
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_WRAP 0x306C3C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_WRAP 0x306C40
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_WRAP 0x306C44
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_WRAP 0x306C48
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_WRAP 0x306C4C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_CNT 0x306C50
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_CNT 0x306C54
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_CNT 0x306C58
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_CNT 0x306C5C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_CNT 0x306C60
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_CNT 0x306C64
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_CNT 0x306C68
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_CNT 0x306C6C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_WRAP 0x306C70
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_WRAP 0x306C74
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_WRAP 0x306C78
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_WRAP 0x306C7C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_WRAP 0x306C80
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_WRAP 0x306C84
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_WRAP 0x306C88
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_WRAP 0x306C8C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_CNT 0x306C90
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_CNT 0x306C94
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_CNT 0x306C98
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_CNT 0x306C9C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_CNT 0x306CA0
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_CNT 0x306CA4
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_CNT 0x306CA8
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_CNT 0x306CAC
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_0 0x306CB0
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_1 0x306CB4
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_2 0x306CB8
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_3 0x306CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
new file mode 100644
index 000000000000..c6d517dbbd54
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_1_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_1_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_1_PERM_SEL 0x316108
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_0 0x316114
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_1 0x316118
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_2 0x31611C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_3 0x316120
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_4 0x316124
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_5 0x316128
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_6 0x31612C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_7 0x316130
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_8 0x316134
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_9 0x316138
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_10 0x31613C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_11 0x316140
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_12 0x316144
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_13 0x316148
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_14 0x31614C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_15 0x316150
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_16 0x316154
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_17 0x316158
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_18 0x31615C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_19 0x316160
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_20 0x316164
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_21 0x316168
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_22 0x31616C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_23 0x316170
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_24 0x316174
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_25 0x316178
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_26 0x31617C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_27 0x316180
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_0 0x316184
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_1 0x316188
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_2 0x31618C
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_3 0x316190
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_4 0x316194
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_5 0x316198
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_6 0x31619C
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_7 0x3161A0
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_8 0x3161A4
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_9 0x3161A8
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_10 0x3161AC
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_11 0x3161B0
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_12 0x3161B4
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_13 0x3161B8
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_14 0x3161BC
+
+#define mmSIF_RTR_CTRL_1_SCRAM_SRAM_EN 0x31626C
+
+#define mmSIF_RTR_CTRL_1_RL_HBM_EN 0x316274
+
+#define mmSIF_RTR_CTRL_1_RL_HBM_SAT 0x316278
+
+#define mmSIF_RTR_CTRL_1_RL_HBM_RST 0x31627C
+
+#define mmSIF_RTR_CTRL_1_RL_HBM_TIMEOUT 0x316280
+
+#define mmSIF_RTR_CTRL_1_SCRAM_HBM_EN 0x316284
+
+#define mmSIF_RTR_CTRL_1_RL_PCI_EN 0x316288
+
+#define mmSIF_RTR_CTRL_1_RL_PCI_SAT 0x31628C
+
+#define mmSIF_RTR_CTRL_1_RL_PCI_RST 0x316290
+
+#define mmSIF_RTR_CTRL_1_RL_PCI_TIMEOUT 0x316294
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_EN 0x31629C
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_SAT 0x3162A0
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_RST 0x3162A4
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_TIMEOUT 0x3162AC
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_RED 0x3162B4
+
+#define mmSIF_RTR_CTRL_1_E2E_HBM_EN 0x3162EC
+
+#define mmSIF_RTR_CTRL_1_E2E_PCI_EN 0x3162F0
+
+#define mmSIF_RTR_CTRL_1_E2E_HBM_WR_SIZE 0x3162F4
+
+#define mmSIF_RTR_CTRL_1_E2E_PCI_WR_SIZE 0x3162F8
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET_EN 0x316404
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET 0x316408
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_WRAP 0x31640C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_CNT 0x316410
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET_EN 0x316414
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET 0x316418
+
+#define mmSIF_RTR_CTRL_1_E2E_HBM_RD_SIZE 0x31641C
+
+#define mmSIF_RTR_CTRL_1_E2E_PCI_RD_SIZE 0x316420
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET_EN 0x316424
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET 0x316428
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_WRAP 0x31642C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_CNT 0x316430
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET_EN 0x316434
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET 0x316438
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_SEL_0 0x316450
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_SEL_1 0x316454
+
+#define mmSIF_RTR_CTRL_1_NON_LIN_EN 0x316480
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_0 0x316500
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_1 0x316504
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_2 0x316508
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_3 0x31650C
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_4 0x316510
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_0 0x316514
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_1 0x316520
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_2 0x316524
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_3 0x316528
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_4 0x31652C
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_5 0x316530
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_6 0x316534
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_7 0x316538
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_8 0x31653C
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_9 0x316540
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_0 0x316550
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_1 0x316554
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_2 0x316558
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_3 0x31655C
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_4 0x316560
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_5 0x316564
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_6 0x316568
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_7 0x31656C
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_8 0x316570
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_9 0x316574
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_10 0x316578
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_11 0x31657C
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_12 0x316580
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_13 0x316584
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_14 0x316588
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_15 0x31658C
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_16 0x316590
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_17 0x316594
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_18 0x316598
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0 0x3165E4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_1 0x3165E8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_2 0x3165EC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_3 0x3165F0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_4 0x3165F4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_5 0x3165F8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_6 0x3165FC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_7 0x316600
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_8 0x316604
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_9 0x316608
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_10 0x31660C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_11 0x316610
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_12 0x316614
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_13 0x316618
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_14 0x31661C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_15 0x316620
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0 0x316624
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_1 0x316628
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_2 0x31662C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_3 0x316630
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_4 0x316634
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_5 0x316638
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_6 0x31663C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_7 0x316640
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_8 0x316644
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_9 0x316648
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_10 0x31664C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_11 0x316650
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_12 0x316654
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_13 0x316658
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_14 0x31665C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_15 0x316660
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0 0x316664
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_1 0x316668
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_2 0x31666C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_3 0x316670
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_4 0x316674
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_5 0x316678
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_6 0x31667C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_7 0x316680
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_8 0x316684
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_9 0x316688
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_10 0x31668C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_11 0x316690
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_12 0x316694
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_13 0x316698
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_14 0x31669C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_15 0x3166A0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0 0x3166A4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_1 0x3166A8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_2 0x3166AC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_3 0x3166B0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_4 0x3166B4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_5 0x3166B8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_6 0x3166BC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_7 0x3166C0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_8 0x3166C4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_9 0x3166C8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_10 0x3166CC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_11 0x3166D0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_12 0x3166D4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_13 0x3166D8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_14 0x3166DC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_15 0x3166E0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_0 0x3166E4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_1 0x3166E8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_2 0x3166EC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_3 0x3166F0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_4 0x3166F4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_5 0x3166F8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_6 0x3166FC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_7 0x316700
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_8 0x316704
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_9 0x316708
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_10 0x31670C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_11 0x316710
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_12 0x316714
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_13 0x316718
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_14 0x31671C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_15 0x316720
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_0 0x316724
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_1 0x316728
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_2 0x31672C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_3 0x316730
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_4 0x316734
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_5 0x316738
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_6 0x31673C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_7 0x316740
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_8 0x316744
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_9 0x316748
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_10 0x31674C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_11 0x316750
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_12 0x316754
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_13 0x316758
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_14 0x31675C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_15 0x316760
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_0 0x316764
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_1 0x316768
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_2 0x31676C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_3 0x316770
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_4 0x316774
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_5 0x316778
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_6 0x31677C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_7 0x316780
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_8 0x316784
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_9 0x316788
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_10 0x31678C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_11 0x316790
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_12 0x316794
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_13 0x316798
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_14 0x31679C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_15 0x3167A0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_0 0x3167A4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_1 0x3167A8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_2 0x3167AC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_3 0x3167B0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_4 0x3167B4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_5 0x3167B8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_6 0x3167BC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_7 0x3167C0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_8 0x3167C4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_9 0x3167C8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_10 0x3167CC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_11 0x3167D0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_12 0x3167D4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_13 0x3167D8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_14 0x3167DC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_15 0x3167E0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0 0x316824
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_1 0x316828
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_2 0x31682C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_3 0x316830
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_4 0x316834
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_5 0x316838
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_6 0x31683C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_7 0x316840
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_8 0x316844
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_9 0x316848
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_10 0x31684C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_11 0x316850
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_12 0x316854
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_13 0x316858
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_14 0x31685C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_15 0x316860
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0 0x316864
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_1 0x316868
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_2 0x31686C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_3 0x316870
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_4 0x316874
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_5 0x316878
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_6 0x31687C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_7 0x316880
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_8 0x316884
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_9 0x316888
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_10 0x31688C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_11 0x316890
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_12 0x316894
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_13 0x316898
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_14 0x31689C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_15 0x3168A0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0 0x3168A4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_1 0x3168A8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_2 0x3168AC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_3 0x3168B0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_4 0x3168B4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_5 0x3168B8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_6 0x3168BC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_7 0x3168C0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_8 0x3168C4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_9 0x3168C8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_10 0x3168CC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_11 0x3168D0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_12 0x3168D4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_13 0x3168D8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_14 0x3168DC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_15 0x3168E0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0 0x3168E4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_1 0x3168E8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_2 0x3168EC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_3 0x3168F0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_4 0x3168F4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_5 0x3168F8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_6 0x3168FC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_7 0x316900
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_8 0x316904
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_9 0x316908
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_10 0x31690C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_11 0x316910
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_12 0x316914
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_13 0x316918
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_14 0x31691C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_15 0x316920
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_0 0x316924
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_1 0x316928
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_2 0x31692C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_3 0x316930
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_4 0x316934
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_5 0x316938
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_6 0x31693C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_7 0x316940
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_8 0x316944
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_9 0x316948
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_10 0x31694C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_11 0x316950
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_12 0x316954
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_13 0x316958
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_14 0x31695C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_15 0x316960
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_0 0x316964
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_1 0x316968
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_2 0x31696C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_3 0x316970
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_4 0x316974
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_5 0x316978
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_6 0x31697C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_7 0x316980
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_8 0x316984
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_9 0x316988
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_10 0x31698C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_11 0x316990
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_12 0x316994
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_13 0x316998
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_14 0x31699C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_15 0x3169A0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_0 0x3169A4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_1 0x3169A8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_2 0x3169AC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_3 0x3169B0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_4 0x3169B4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_5 0x3169B8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_6 0x3169BC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_7 0x3169C0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_8 0x3169C4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_9 0x3169C8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_10 0x3169CC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_11 0x3169D0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_12 0x3169D4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_13 0x3169D8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_14 0x3169DC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_15 0x3169E0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_0 0x3169E4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_1 0x3169E8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_2 0x3169EC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_3 0x3169F0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_4 0x3169F4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_5 0x3169F8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_6 0x3169FC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_7 0x316A00
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_8 0x316A04
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_9 0x316A08
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_10 0x316A0C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_11 0x316A10
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_12 0x316A14
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_13 0x316A18
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_14 0x316A1C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_15 0x316A20
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AW 0x316A64
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AR 0x316A68
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_HIT_AW 0x316A6C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_HIT_AR 0x316A70
+
+#define mmSIF_RTR_CTRL_1_RGL_CFG 0x316B64
+
+#define mmSIF_RTR_CTRL_1_RGL_SHIFT 0x316B68
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_0 0x316B6C
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_1 0x316B70
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_2 0x316B74
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_3 0x316B78
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_4 0x316B7C
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_5 0x316B80
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_6 0x316B84
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_7 0x316B88
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_0 0x316BAC
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_1 0x316BB0
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_2 0x316BB4
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_3 0x316BB8
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_4 0x316BBC
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_5 0x316BC0
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_6 0x316BC4
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_7 0x316BC8
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_0 0x316BEC
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_1 0x316BF0
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_2 0x316BF4
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_3 0x316BF8
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_4 0x316BFC
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_5 0x316C00
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_6 0x316C04
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_7 0x316C08
+
+#define mmSIF_RTR_CTRL_1_RGL_WDT 0x316C2C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_WRAP 0x316C30
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_WRAP 0x316C34
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_WRAP 0x316C38
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_WRAP 0x316C3C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_WRAP 0x316C40
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_WRAP 0x316C44
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_WRAP 0x316C48
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_WRAP 0x316C4C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_CNT 0x316C50
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_CNT 0x316C54
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_CNT 0x316C58
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_CNT 0x316C5C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_CNT 0x316C60
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_CNT 0x316C64
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_CNT 0x316C68
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_CNT 0x316C6C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_WRAP 0x316C70
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_WRAP 0x316C74
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_WRAP 0x316C78
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_WRAP 0x316C7C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_WRAP 0x316C80
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_WRAP 0x316C84
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_WRAP 0x316C88
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_WRAP 0x316C8C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_CNT 0x316C90
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_CNT 0x316C94
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_CNT 0x316C98
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_CNT 0x316C9C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_CNT 0x316CA0
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_CNT 0x316CA4
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_CNT 0x316CA8
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_CNT 0x316CAC
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_0 0x316CB0
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_1 0x316CB4
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_2 0x316CB8
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_3 0x316CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
new file mode 100644
index 000000000000..330e5b42d679
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_2_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_2_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_2 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_2_PERM_SEL 0x326108
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_0 0x326114
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_1 0x326118
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_2 0x32611C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_3 0x326120
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_4 0x326124
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_5 0x326128
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_6 0x32612C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_7 0x326130
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_8 0x326134
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_9 0x326138
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_10 0x32613C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_11 0x326140
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_12 0x326144
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_13 0x326148
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_14 0x32614C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_15 0x326150
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_16 0x326154
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_17 0x326158
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_18 0x32615C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_19 0x326160
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_20 0x326164
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_21 0x326168
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_22 0x32616C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_23 0x326170
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_24 0x326174
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_25 0x326178
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_26 0x32617C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_27 0x326180
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_0 0x326184
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_1 0x326188
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_2 0x32618C
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_3 0x326190
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_4 0x326194
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_5 0x326198
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_6 0x32619C
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_7 0x3261A0
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_8 0x3261A4
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_9 0x3261A8
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_10 0x3261AC
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_11 0x3261B0
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_12 0x3261B4
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_13 0x3261B8
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_14 0x3261BC
+
+#define mmSIF_RTR_CTRL_2_SCRAM_SRAM_EN 0x32626C
+
+#define mmSIF_RTR_CTRL_2_RL_HBM_EN 0x326274
+
+#define mmSIF_RTR_CTRL_2_RL_HBM_SAT 0x326278
+
+#define mmSIF_RTR_CTRL_2_RL_HBM_RST 0x32627C
+
+#define mmSIF_RTR_CTRL_2_RL_HBM_TIMEOUT 0x326280
+
+#define mmSIF_RTR_CTRL_2_SCRAM_HBM_EN 0x326284
+
+#define mmSIF_RTR_CTRL_2_RL_PCI_EN 0x326288
+
+#define mmSIF_RTR_CTRL_2_RL_PCI_SAT 0x32628C
+
+#define mmSIF_RTR_CTRL_2_RL_PCI_RST 0x326290
+
+#define mmSIF_RTR_CTRL_2_RL_PCI_TIMEOUT 0x326294
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_EN 0x32629C
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_SAT 0x3262A0
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_RST 0x3262A4
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_TIMEOUT 0x3262AC
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_RED 0x3262B4
+
+#define mmSIF_RTR_CTRL_2_E2E_HBM_EN 0x3262EC
+
+#define mmSIF_RTR_CTRL_2_E2E_PCI_EN 0x3262F0
+
+#define mmSIF_RTR_CTRL_2_E2E_HBM_WR_SIZE 0x3262F4
+
+#define mmSIF_RTR_CTRL_2_E2E_PCI_WR_SIZE 0x3262F8
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET_EN 0x326404
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET 0x326408
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_WRAP 0x32640C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_CNT 0x326410
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET_EN 0x326414
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET 0x326418
+
+#define mmSIF_RTR_CTRL_2_E2E_HBM_RD_SIZE 0x32641C
+
+#define mmSIF_RTR_CTRL_2_E2E_PCI_RD_SIZE 0x326420
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET_EN 0x326424
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET 0x326428
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_WRAP 0x32642C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_CNT 0x326430
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET_EN 0x326434
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET 0x326438
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_SEL_0 0x326450
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_SEL_1 0x326454
+
+#define mmSIF_RTR_CTRL_2_NON_LIN_EN 0x326480
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_0 0x326500
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_1 0x326504
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_2 0x326508
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_3 0x32650C
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_4 0x326510
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_0 0x326514
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_1 0x326520
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_2 0x326524
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_3 0x326528
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_4 0x32652C
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_5 0x326530
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_6 0x326534
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_7 0x326538
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_8 0x32653C
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_9 0x326540
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_0 0x326550
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_1 0x326554
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_2 0x326558
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_3 0x32655C
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_4 0x326560
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_5 0x326564
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_6 0x326568
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_7 0x32656C
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_8 0x326570
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_9 0x326574
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_10 0x326578
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_11 0x32657C
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_12 0x326580
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_13 0x326584
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_14 0x326588
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_15 0x32658C
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_16 0x326590
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_17 0x326594
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_18 0x326598
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0 0x3265E4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_1 0x3265E8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_2 0x3265EC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_3 0x3265F0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_4 0x3265F4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_5 0x3265F8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_6 0x3265FC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_7 0x326600
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_8 0x326604
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_9 0x326608
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_10 0x32660C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_11 0x326610
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_12 0x326614
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_13 0x326618
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_14 0x32661C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_15 0x326620
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0 0x326624
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_1 0x326628
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_2 0x32662C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_3 0x326630
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_4 0x326634
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_5 0x326638
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_6 0x32663C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_7 0x326640
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_8 0x326644
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_9 0x326648
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_10 0x32664C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_11 0x326650
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_12 0x326654
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_13 0x326658
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_14 0x32665C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_15 0x326660
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0 0x326664
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_1 0x326668
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_2 0x32666C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_3 0x326670
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_4 0x326674
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_5 0x326678
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_6 0x32667C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_7 0x326680
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_8 0x326684
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_9 0x326688
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_10 0x32668C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_11 0x326690
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_12 0x326694
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_13 0x326698
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_14 0x32669C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_15 0x3266A0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0 0x3266A4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_1 0x3266A8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_2 0x3266AC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_3 0x3266B0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_4 0x3266B4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_5 0x3266B8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_6 0x3266BC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_7 0x3266C0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_8 0x3266C4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_9 0x3266C8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_10 0x3266CC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_11 0x3266D0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_12 0x3266D4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_13 0x3266D8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_14 0x3266DC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_15 0x3266E0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_0 0x3266E4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_1 0x3266E8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_2 0x3266EC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_3 0x3266F0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_4 0x3266F4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_5 0x3266F8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_6 0x3266FC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_7 0x326700
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_8 0x326704
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_9 0x326708
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_10 0x32670C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_11 0x326710
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_12 0x326714
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_13 0x326718
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_14 0x32671C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_15 0x326720
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_0 0x326724
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_1 0x326728
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_2 0x32672C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_3 0x326730
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_4 0x326734
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_5 0x326738
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_6 0x32673C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_7 0x326740
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_8 0x326744
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_9 0x326748
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_10 0x32674C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_11 0x326750
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_12 0x326754
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_13 0x326758
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_14 0x32675C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_15 0x326760
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_0 0x326764
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_1 0x326768
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_2 0x32676C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_3 0x326770
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_4 0x326774
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_5 0x326778
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_6 0x32677C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_7 0x326780
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_8 0x326784
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_9 0x326788
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_10 0x32678C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_11 0x326790
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_12 0x326794
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_13 0x326798
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_14 0x32679C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_15 0x3267A0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_0 0x3267A4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_1 0x3267A8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_2 0x3267AC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_3 0x3267B0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_4 0x3267B4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_5 0x3267B8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_6 0x3267BC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_7 0x3267C0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_8 0x3267C4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_9 0x3267C8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_10 0x3267CC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_11 0x3267D0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_12 0x3267D4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_13 0x3267D8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_14 0x3267DC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_15 0x3267E0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0 0x326824
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_1 0x326828
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_2 0x32682C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_3 0x326830
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_4 0x326834
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_5 0x326838
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_6 0x32683C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_7 0x326840
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_8 0x326844
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_9 0x326848
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_10 0x32684C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_11 0x326850
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_12 0x326854
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_13 0x326858
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_14 0x32685C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_15 0x326860
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0 0x326864
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_1 0x326868
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_2 0x32686C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_3 0x326870
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_4 0x326874
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_5 0x326878
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_6 0x32687C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_7 0x326880
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_8 0x326884
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_9 0x326888
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_10 0x32688C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_11 0x326890
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_12 0x326894
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_13 0x326898
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_14 0x32689C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_15 0x3268A0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0 0x3268A4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_1 0x3268A8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_2 0x3268AC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_3 0x3268B0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_4 0x3268B4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_5 0x3268B8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_6 0x3268BC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_7 0x3268C0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_8 0x3268C4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_9 0x3268C8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_10 0x3268CC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_11 0x3268D0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_12 0x3268D4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_13 0x3268D8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_14 0x3268DC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_15 0x3268E0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0 0x3268E4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_1 0x3268E8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_2 0x3268EC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_3 0x3268F0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_4 0x3268F4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_5 0x3268F8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_6 0x3268FC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_7 0x326900
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_8 0x326904
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_9 0x326908
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_10 0x32690C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_11 0x326910
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_12 0x326914
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_13 0x326918
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_14 0x32691C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_15 0x326920
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_0 0x326924
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_1 0x326928
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_2 0x32692C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_3 0x326930
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_4 0x326934
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_5 0x326938
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_6 0x32693C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_7 0x326940
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_8 0x326944
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_9 0x326948
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_10 0x32694C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_11 0x326950
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_12 0x326954
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_13 0x326958
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_14 0x32695C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_15 0x326960
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_0 0x326964
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_1 0x326968
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_2 0x32696C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_3 0x326970
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_4 0x326974
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_5 0x326978
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_6 0x32697C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_7 0x326980
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_8 0x326984
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_9 0x326988
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_10 0x32698C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_11 0x326990
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_12 0x326994
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_13 0x326998
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_14 0x32699C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_15 0x3269A0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_0 0x3269A4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_1 0x3269A8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_2 0x3269AC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_3 0x3269B0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_4 0x3269B4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_5 0x3269B8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_6 0x3269BC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_7 0x3269C0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_8 0x3269C4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_9 0x3269C8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_10 0x3269CC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_11 0x3269D0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_12 0x3269D4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_13 0x3269D8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_14 0x3269DC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_15 0x3269E0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_0 0x3269E4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_1 0x3269E8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_2 0x3269EC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_3 0x3269F0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_4 0x3269F4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_5 0x3269F8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_6 0x3269FC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_7 0x326A00
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_8 0x326A04
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_9 0x326A08
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_10 0x326A0C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_11 0x326A10
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_12 0x326A14
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_13 0x326A18
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_14 0x326A1C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_15 0x326A20
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AW 0x326A64
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AR 0x326A68
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_HIT_AW 0x326A6C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_HIT_AR 0x326A70
+
+#define mmSIF_RTR_CTRL_2_RGL_CFG 0x326B64
+
+#define mmSIF_RTR_CTRL_2_RGL_SHIFT 0x326B68
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_0 0x326B6C
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_1 0x326B70
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_2 0x326B74
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_3 0x326B78
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_4 0x326B7C
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_5 0x326B80
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_6 0x326B84
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_7 0x326B88
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_0 0x326BAC
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_1 0x326BB0
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_2 0x326BB4
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_3 0x326BB8
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_4 0x326BBC
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_5 0x326BC0
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_6 0x326BC4
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_7 0x326BC8
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_0 0x326BEC
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_1 0x326BF0
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_2 0x326BF4
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_3 0x326BF8
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_4 0x326BFC
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_5 0x326C00
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_6 0x326C04
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_7 0x326C08
+
+#define mmSIF_RTR_CTRL_2_RGL_WDT 0x326C2C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_WRAP 0x326C30
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_WRAP 0x326C34
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_WRAP 0x326C38
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_WRAP 0x326C3C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_WRAP 0x326C40
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_WRAP 0x326C44
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_WRAP 0x326C48
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_WRAP 0x326C4C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_CNT 0x326C50
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_CNT 0x326C54
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_CNT 0x326C58
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_CNT 0x326C5C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_CNT 0x326C60
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_CNT 0x326C64
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_CNT 0x326C68
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_CNT 0x326C6C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_WRAP 0x326C70
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_WRAP 0x326C74
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_WRAP 0x326C78
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_WRAP 0x326C7C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_WRAP 0x326C80
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_WRAP 0x326C84
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_WRAP 0x326C88
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_WRAP 0x326C8C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_CNT 0x326C90
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_CNT 0x326C94
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_CNT 0x326C98
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_CNT 0x326C9C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_CNT 0x326CA0
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_CNT 0x326CA4
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_CNT 0x326CA8
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_CNT 0x326CAC
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_0 0x326CB0
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_1 0x326CB4
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_2 0x326CB8
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_3 0x326CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_2_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
new file mode 100644
index 000000000000..d749f1968e5e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_3_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_3_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_3 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_3_PERM_SEL 0x336108
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_0 0x336114
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_1 0x336118
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_2 0x33611C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_3 0x336120
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_4 0x336124
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_5 0x336128
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_6 0x33612C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_7 0x336130
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_8 0x336134
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_9 0x336138
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_10 0x33613C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_11 0x336140
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_12 0x336144
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_13 0x336148
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_14 0x33614C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_15 0x336150
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_16 0x336154
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_17 0x336158
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_18 0x33615C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_19 0x336160
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_20 0x336164
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_21 0x336168
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_22 0x33616C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_23 0x336170
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_24 0x336174
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_25 0x336178
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_26 0x33617C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_27 0x336180
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_0 0x336184
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_1 0x336188
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_2 0x33618C
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_3 0x336190
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_4 0x336194
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_5 0x336198
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_6 0x33619C
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_7 0x3361A0
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_8 0x3361A4
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_9 0x3361A8
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_10 0x3361AC
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_11 0x3361B0
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_12 0x3361B4
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_13 0x3361B8
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_14 0x3361BC
+
+#define mmSIF_RTR_CTRL_3_SCRAM_SRAM_EN 0x33626C
+
+#define mmSIF_RTR_CTRL_3_RL_HBM_EN 0x336274
+
+#define mmSIF_RTR_CTRL_3_RL_HBM_SAT 0x336278
+
+#define mmSIF_RTR_CTRL_3_RL_HBM_RST 0x33627C
+
+#define mmSIF_RTR_CTRL_3_RL_HBM_TIMEOUT 0x336280
+
+#define mmSIF_RTR_CTRL_3_SCRAM_HBM_EN 0x336284
+
+#define mmSIF_RTR_CTRL_3_RL_PCI_EN 0x336288
+
+#define mmSIF_RTR_CTRL_3_RL_PCI_SAT 0x33628C
+
+#define mmSIF_RTR_CTRL_3_RL_PCI_RST 0x336290
+
+#define mmSIF_RTR_CTRL_3_RL_PCI_TIMEOUT 0x336294
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_EN 0x33629C
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_SAT 0x3362A0
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_RST 0x3362A4
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_TIMEOUT 0x3362AC
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_RED 0x3362B4
+
+#define mmSIF_RTR_CTRL_3_E2E_HBM_EN 0x3362EC
+
+#define mmSIF_RTR_CTRL_3_E2E_PCI_EN 0x3362F0
+
+#define mmSIF_RTR_CTRL_3_E2E_HBM_WR_SIZE 0x3362F4
+
+#define mmSIF_RTR_CTRL_3_E2E_PCI_WR_SIZE 0x3362F8
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET_EN 0x336404
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET 0x336408
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_WRAP 0x33640C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_CNT 0x336410
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET_EN 0x336414
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET 0x336418
+
+#define mmSIF_RTR_CTRL_3_E2E_HBM_RD_SIZE 0x33641C
+
+#define mmSIF_RTR_CTRL_3_E2E_PCI_RD_SIZE 0x336420
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET_EN 0x336424
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET 0x336428
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_WRAP 0x33642C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_CNT 0x336430
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET_EN 0x336434
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET 0x336438
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_SEL_0 0x336450
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_SEL_1 0x336454
+
+#define mmSIF_RTR_CTRL_3_NON_LIN_EN 0x336480
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_0 0x336500
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_1 0x336504
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_2 0x336508
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_3 0x33650C
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_4 0x336510
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_0 0x336514
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_1 0x336520
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_2 0x336524
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_3 0x336528
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_4 0x33652C
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_5 0x336530
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_6 0x336534
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_7 0x336538
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_8 0x33653C
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_9 0x336540
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_0 0x336550
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_1 0x336554
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_2 0x336558
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_3 0x33655C
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_4 0x336560
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_5 0x336564
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_6 0x336568
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_7 0x33656C
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_8 0x336570
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_9 0x336574
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_10 0x336578
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_11 0x33657C
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_12 0x336580
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_13 0x336584
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_14 0x336588
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_15 0x33658C
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_16 0x336590
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_17 0x336594
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_18 0x336598
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0 0x3365E4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_1 0x3365E8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_2 0x3365EC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_3 0x3365F0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_4 0x3365F4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_5 0x3365F8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_6 0x3365FC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_7 0x336600
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_8 0x336604
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_9 0x336608
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_10 0x33660C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_11 0x336610
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_12 0x336614
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_13 0x336618
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_14 0x33661C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_15 0x336620
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0 0x336624
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_1 0x336628
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_2 0x33662C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_3 0x336630
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_4 0x336634
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_5 0x336638
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_6 0x33663C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_7 0x336640
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_8 0x336644
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_9 0x336648
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_10 0x33664C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_11 0x336650
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_12 0x336654
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_13 0x336658
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_14 0x33665C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_15 0x336660
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0 0x336664
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_1 0x336668
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_2 0x33666C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_3 0x336670
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_4 0x336674
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_5 0x336678
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_6 0x33667C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_7 0x336680
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_8 0x336684
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_9 0x336688
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_10 0x33668C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_11 0x336690
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_12 0x336694
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_13 0x336698
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_14 0x33669C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_15 0x3366A0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0 0x3366A4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_1 0x3366A8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_2 0x3366AC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_3 0x3366B0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_4 0x3366B4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_5 0x3366B8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_6 0x3366BC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_7 0x3366C0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_8 0x3366C4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_9 0x3366C8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_10 0x3366CC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_11 0x3366D0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_12 0x3366D4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_13 0x3366D8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_14 0x3366DC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_15 0x3366E0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_0 0x3366E4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_1 0x3366E8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_2 0x3366EC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_3 0x3366F0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_4 0x3366F4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_5 0x3366F8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_6 0x3366FC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_7 0x336700
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_8 0x336704
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_9 0x336708
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_10 0x33670C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_11 0x336710
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_12 0x336714
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_13 0x336718
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_14 0x33671C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_15 0x336720
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_0 0x336724
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_1 0x336728
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_2 0x33672C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_3 0x336730
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_4 0x336734
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_5 0x336738
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_6 0x33673C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_7 0x336740
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_8 0x336744
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_9 0x336748
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_10 0x33674C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_11 0x336750
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_12 0x336754
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_13 0x336758
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_14 0x33675C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_15 0x336760
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_0 0x336764
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_1 0x336768
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_2 0x33676C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_3 0x336770
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_4 0x336774
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_5 0x336778
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_6 0x33677C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_7 0x336780
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_8 0x336784
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_9 0x336788
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_10 0x33678C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_11 0x336790
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_12 0x336794
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_13 0x336798
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_14 0x33679C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_15 0x3367A0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_0 0x3367A4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_1 0x3367A8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_2 0x3367AC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_3 0x3367B0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_4 0x3367B4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_5 0x3367B8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_6 0x3367BC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_7 0x3367C0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_8 0x3367C4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_9 0x3367C8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_10 0x3367CC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_11 0x3367D0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_12 0x3367D4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_13 0x3367D8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_14 0x3367DC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_15 0x3367E0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0 0x336824
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_1 0x336828
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_2 0x33682C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_3 0x336830
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_4 0x336834
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_5 0x336838
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_6 0x33683C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_7 0x336840
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_8 0x336844
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_9 0x336848
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_10 0x33684C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_11 0x336850
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_12 0x336854
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_13 0x336858
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_14 0x33685C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_15 0x336860
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0 0x336864
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_1 0x336868
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_2 0x33686C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_3 0x336870
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_4 0x336874
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_5 0x336878
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_6 0x33687C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_7 0x336880
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_8 0x336884
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_9 0x336888
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_10 0x33688C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_11 0x336890
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_12 0x336894
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_13 0x336898
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_14 0x33689C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_15 0x3368A0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0 0x3368A4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_1 0x3368A8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_2 0x3368AC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_3 0x3368B0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_4 0x3368B4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_5 0x3368B8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_6 0x3368BC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_7 0x3368C0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_8 0x3368C4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_9 0x3368C8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_10 0x3368CC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_11 0x3368D0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_12 0x3368D4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_13 0x3368D8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_14 0x3368DC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_15 0x3368E0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0 0x3368E4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_1 0x3368E8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_2 0x3368EC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_3 0x3368F0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_4 0x3368F4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_5 0x3368F8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_6 0x3368FC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_7 0x336900
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_8 0x336904
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_9 0x336908
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_10 0x33690C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_11 0x336910
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_12 0x336914
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_13 0x336918
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_14 0x33691C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_15 0x336920
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_0 0x336924
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_1 0x336928
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_2 0x33692C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_3 0x336930
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_4 0x336934
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_5 0x336938
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_6 0x33693C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_7 0x336940
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_8 0x336944
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_9 0x336948
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_10 0x33694C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_11 0x336950
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_12 0x336954
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_13 0x336958
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_14 0x33695C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_15 0x336960
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_0 0x336964
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_1 0x336968
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_2 0x33696C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_3 0x336970
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_4 0x336974
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_5 0x336978
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_6 0x33697C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_7 0x336980
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_8 0x336984
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_9 0x336988
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_10 0x33698C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_11 0x336990
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_12 0x336994
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_13 0x336998
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_14 0x33699C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_15 0x3369A0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_0 0x3369A4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_1 0x3369A8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_2 0x3369AC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_3 0x3369B0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_4 0x3369B4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_5 0x3369B8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_6 0x3369BC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_7 0x3369C0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_8 0x3369C4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_9 0x3369C8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_10 0x3369CC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_11 0x3369D0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_12 0x3369D4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_13 0x3369D8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_14 0x3369DC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_15 0x3369E0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_0 0x3369E4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_1 0x3369E8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_2 0x3369EC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_3 0x3369F0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_4 0x3369F4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_5 0x3369F8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_6 0x3369FC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_7 0x336A00
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_8 0x336A04
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_9 0x336A08
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_10 0x336A0C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_11 0x336A10
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_12 0x336A14
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_13 0x336A18
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_14 0x336A1C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_15 0x336A20
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AW 0x336A64
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AR 0x336A68
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_HIT_AW 0x336A6C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_HIT_AR 0x336A70
+
+#define mmSIF_RTR_CTRL_3_RGL_CFG 0x336B64
+
+#define mmSIF_RTR_CTRL_3_RGL_SHIFT 0x336B68
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_0 0x336B6C
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_1 0x336B70
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_2 0x336B74
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_3 0x336B78
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_4 0x336B7C
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_5 0x336B80
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_6 0x336B84
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_7 0x336B88
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_0 0x336BAC
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_1 0x336BB0
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_2 0x336BB4
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_3 0x336BB8
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_4 0x336BBC
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_5 0x336BC0
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_6 0x336BC4
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_7 0x336BC8
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_0 0x336BEC
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_1 0x336BF0
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_2 0x336BF4
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_3 0x336BF8
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_4 0x336BFC
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_5 0x336C00
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_6 0x336C04
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_7 0x336C08
+
+#define mmSIF_RTR_CTRL_3_RGL_WDT 0x336C2C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_WRAP 0x336C30
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_WRAP 0x336C34
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_WRAP 0x336C38
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_WRAP 0x336C3C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_WRAP 0x336C40
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_WRAP 0x336C44
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_WRAP 0x336C48
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_WRAP 0x336C4C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_CNT 0x336C50
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_CNT 0x336C54
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_CNT 0x336C58
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_CNT 0x336C5C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_CNT 0x336C60
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_CNT 0x336C64
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_CNT 0x336C68
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_CNT 0x336C6C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_WRAP 0x336C70
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_WRAP 0x336C74
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_WRAP 0x336C78
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_WRAP 0x336C7C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_WRAP 0x336C80
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_WRAP 0x336C84
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_WRAP 0x336C88
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_WRAP 0x336C8C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_CNT 0x336C90
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_CNT 0x336C94
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_CNT 0x336C98
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_CNT 0x336C9C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_CNT 0x336CA0
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_CNT 0x336CA4
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_CNT 0x336CA8
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_CNT 0x336CAC
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_0 0x336CB0
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_1 0x336CB4
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_2 0x336CB8
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_3 0x336CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_3_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
new file mode 100644
index 000000000000..ad48773c4bbd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_4_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_4_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_4 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_4_PERM_SEL 0x346108
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_0 0x346114
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_1 0x346118
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_2 0x34611C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_3 0x346120
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_4 0x346124
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_5 0x346128
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_6 0x34612C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_7 0x346130
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_8 0x346134
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_9 0x346138
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_10 0x34613C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_11 0x346140
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_12 0x346144
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_13 0x346148
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_14 0x34614C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_15 0x346150
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_16 0x346154
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_17 0x346158
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_18 0x34615C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_19 0x346160
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_20 0x346164
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_21 0x346168
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_22 0x34616C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_23 0x346170
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_24 0x346174
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_25 0x346178
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_26 0x34617C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_27 0x346180
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_0 0x346184
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_1 0x346188
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_2 0x34618C
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_3 0x346190
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_4 0x346194
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_5 0x346198
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_6 0x34619C
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_7 0x3461A0
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_8 0x3461A4
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_9 0x3461A8
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_10 0x3461AC
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_11 0x3461B0
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_12 0x3461B4
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_13 0x3461B8
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_14 0x3461BC
+
+#define mmSIF_RTR_CTRL_4_SCRAM_SRAM_EN 0x34626C
+
+#define mmSIF_RTR_CTRL_4_RL_HBM_EN 0x346274
+
+#define mmSIF_RTR_CTRL_4_RL_HBM_SAT 0x346278
+
+#define mmSIF_RTR_CTRL_4_RL_HBM_RST 0x34627C
+
+#define mmSIF_RTR_CTRL_4_RL_HBM_TIMEOUT 0x346280
+
+#define mmSIF_RTR_CTRL_4_SCRAM_HBM_EN 0x346284
+
+#define mmSIF_RTR_CTRL_4_RL_PCI_EN 0x346288
+
+#define mmSIF_RTR_CTRL_4_RL_PCI_SAT 0x34628C
+
+#define mmSIF_RTR_CTRL_4_RL_PCI_RST 0x346290
+
+#define mmSIF_RTR_CTRL_4_RL_PCI_TIMEOUT 0x346294
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_EN 0x34629C
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_SAT 0x3462A0
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_RST 0x3462A4
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_TIMEOUT 0x3462AC
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_RED 0x3462B4
+
+#define mmSIF_RTR_CTRL_4_E2E_HBM_EN 0x3462EC
+
+#define mmSIF_RTR_CTRL_4_E2E_PCI_EN 0x3462F0
+
+#define mmSIF_RTR_CTRL_4_E2E_HBM_WR_SIZE 0x3462F4
+
+#define mmSIF_RTR_CTRL_4_E2E_PCI_WR_SIZE 0x3462F8
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET_EN 0x346404
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET 0x346408
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_WRAP 0x34640C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_CNT 0x346410
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET_EN 0x346414
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET 0x346418
+
+#define mmSIF_RTR_CTRL_4_E2E_HBM_RD_SIZE 0x34641C
+
+#define mmSIF_RTR_CTRL_4_E2E_PCI_RD_SIZE 0x346420
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET_EN 0x346424
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET 0x346428
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_WRAP 0x34642C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_CNT 0x346430
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET_EN 0x346434
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET 0x346438
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_SEL_0 0x346450
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_SEL_1 0x346454
+
+#define mmSIF_RTR_CTRL_4_NON_LIN_EN 0x346480
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_0 0x346500
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_1 0x346504
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_2 0x346508
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_3 0x34650C
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_4 0x346510
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_0 0x346514
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_1 0x346520
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_2 0x346524
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_3 0x346528
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_4 0x34652C
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_5 0x346530
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_6 0x346534
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_7 0x346538
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_8 0x34653C
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_9 0x346540
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_0 0x346550
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_1 0x346554
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_2 0x346558
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_3 0x34655C
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_4 0x346560
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_5 0x346564
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_6 0x346568
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_7 0x34656C
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_8 0x346570
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_9 0x346574
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_10 0x346578
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_11 0x34657C
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_12 0x346580
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_13 0x346584
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_14 0x346588
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_15 0x34658C
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_16 0x346590
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_17 0x346594
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_18 0x346598
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0 0x3465E4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_1 0x3465E8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_2 0x3465EC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_3 0x3465F0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_4 0x3465F4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_5 0x3465F8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_6 0x3465FC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_7 0x346600
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_8 0x346604
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_9 0x346608
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_10 0x34660C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_11 0x346610
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_12 0x346614
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_13 0x346618
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_14 0x34661C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_15 0x346620
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0 0x346624
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_1 0x346628
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_2 0x34662C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_3 0x346630
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_4 0x346634
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_5 0x346638
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_6 0x34663C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_7 0x346640
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_8 0x346644
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_9 0x346648
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_10 0x34664C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_11 0x346650
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_12 0x346654
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_13 0x346658
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_14 0x34665C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_15 0x346660
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0 0x346664
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_1 0x346668
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_2 0x34666C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_3 0x346670
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_4 0x346674
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_5 0x346678
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_6 0x34667C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_7 0x346680
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_8 0x346684
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_9 0x346688
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_10 0x34668C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_11 0x346690
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_12 0x346694
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_13 0x346698
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_14 0x34669C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_15 0x3466A0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0 0x3466A4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_1 0x3466A8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_2 0x3466AC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_3 0x3466B0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_4 0x3466B4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_5 0x3466B8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_6 0x3466BC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_7 0x3466C0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_8 0x3466C4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_9 0x3466C8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_10 0x3466CC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_11 0x3466D0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_12 0x3466D4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_13 0x3466D8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_14 0x3466DC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_15 0x3466E0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_0 0x3466E4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_1 0x3466E8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_2 0x3466EC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_3 0x3466F0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_4 0x3466F4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_5 0x3466F8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_6 0x3466FC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_7 0x346700
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_8 0x346704
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_9 0x346708
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_10 0x34670C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_11 0x346710
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_12 0x346714
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_13 0x346718
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_14 0x34671C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_15 0x346720
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_0 0x346724
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_1 0x346728
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_2 0x34672C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_3 0x346730
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_4 0x346734
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_5 0x346738
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_6 0x34673C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_7 0x346740
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_8 0x346744
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_9 0x346748
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_10 0x34674C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_11 0x346750
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_12 0x346754
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_13 0x346758
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_14 0x34675C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_15 0x346760
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_0 0x346764
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_1 0x346768
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_2 0x34676C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_3 0x346770
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_4 0x346774
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_5 0x346778
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_6 0x34677C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_7 0x346780
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_8 0x346784
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_9 0x346788
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_10 0x34678C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_11 0x346790
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_12 0x346794
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_13 0x346798
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_14 0x34679C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_15 0x3467A0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_0 0x3467A4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_1 0x3467A8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_2 0x3467AC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_3 0x3467B0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_4 0x3467B4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_5 0x3467B8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_6 0x3467BC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_7 0x3467C0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_8 0x3467C4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_9 0x3467C8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_10 0x3467CC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_11 0x3467D0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_12 0x3467D4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_13 0x3467D8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_14 0x3467DC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_15 0x3467E0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0 0x346824
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_1 0x346828
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_2 0x34682C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_3 0x346830
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_4 0x346834
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_5 0x346838
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_6 0x34683C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_7 0x346840
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_8 0x346844
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_9 0x346848
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_10 0x34684C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_11 0x346850
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_12 0x346854
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_13 0x346858
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_14 0x34685C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_15 0x346860
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0 0x346864
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_1 0x346868
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_2 0x34686C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_3 0x346870
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_4 0x346874
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_5 0x346878
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_6 0x34687C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_7 0x346880
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_8 0x346884
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_9 0x346888
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_10 0x34688C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_11 0x346890
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_12 0x346894
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_13 0x346898
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_14 0x34689C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_15 0x3468A0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0 0x3468A4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_1 0x3468A8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_2 0x3468AC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_3 0x3468B0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_4 0x3468B4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_5 0x3468B8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_6 0x3468BC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_7 0x3468C0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_8 0x3468C4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_9 0x3468C8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_10 0x3468CC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_11 0x3468D0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_12 0x3468D4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_13 0x3468D8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_14 0x3468DC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_15 0x3468E0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0 0x3468E4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_1 0x3468E8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_2 0x3468EC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_3 0x3468F0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_4 0x3468F4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_5 0x3468F8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_6 0x3468FC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_7 0x346900
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_8 0x346904
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_9 0x346908
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_10 0x34690C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_11 0x346910
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_12 0x346914
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_13 0x346918
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_14 0x34691C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_15 0x346920
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_0 0x346924
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_1 0x346928
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_2 0x34692C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_3 0x346930
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_4 0x346934
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_5 0x346938
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_6 0x34693C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_7 0x346940
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_8 0x346944
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_9 0x346948
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_10 0x34694C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_11 0x346950
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_12 0x346954
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_13 0x346958
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_14 0x34695C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_15 0x346960
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_0 0x346964
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_1 0x346968
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_2 0x34696C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_3 0x346970
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_4 0x346974
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_5 0x346978
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_6 0x34697C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_7 0x346980
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_8 0x346984
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_9 0x346988
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_10 0x34698C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_11 0x346990
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_12 0x346994
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_13 0x346998
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_14 0x34699C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_15 0x3469A0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_0 0x3469A4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_1 0x3469A8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_2 0x3469AC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_3 0x3469B0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_4 0x3469B4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_5 0x3469B8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_6 0x3469BC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_7 0x3469C0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_8 0x3469C4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_9 0x3469C8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_10 0x3469CC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_11 0x3469D0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_12 0x3469D4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_13 0x3469D8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_14 0x3469DC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_15 0x3469E0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_0 0x3469E4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_1 0x3469E8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_2 0x3469EC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_3 0x3469F0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_4 0x3469F4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_5 0x3469F8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_6 0x3469FC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_7 0x346A00
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_8 0x346A04
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_9 0x346A08
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_10 0x346A0C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_11 0x346A10
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_12 0x346A14
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_13 0x346A18
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_14 0x346A1C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_15 0x346A20
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AW 0x346A64
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AR 0x346A68
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_HIT_AW 0x346A6C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_HIT_AR 0x346A70
+
+#define mmSIF_RTR_CTRL_4_RGL_CFG 0x346B64
+
+#define mmSIF_RTR_CTRL_4_RGL_SHIFT 0x346B68
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_0 0x346B6C
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_1 0x346B70
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_2 0x346B74
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_3 0x346B78
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_4 0x346B7C
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_5 0x346B80
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_6 0x346B84
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_7 0x346B88
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_0 0x346BAC
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_1 0x346BB0
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_2 0x346BB4
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_3 0x346BB8
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_4 0x346BBC
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_5 0x346BC0
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_6 0x346BC4
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_7 0x346BC8
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_0 0x346BEC
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_1 0x346BF0
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_2 0x346BF4
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_3 0x346BF8
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_4 0x346BFC
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_5 0x346C00
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_6 0x346C04
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_7 0x346C08
+
+#define mmSIF_RTR_CTRL_4_RGL_WDT 0x346C2C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_WRAP 0x346C30
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_WRAP 0x346C34
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_WRAP 0x346C38
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_WRAP 0x346C3C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_WRAP 0x346C40
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_WRAP 0x346C44
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_WRAP 0x346C48
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_WRAP 0x346C4C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_CNT 0x346C50
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_CNT 0x346C54
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_CNT 0x346C58
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_CNT 0x346C5C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_CNT 0x346C60
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_CNT 0x346C64
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_CNT 0x346C68
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_CNT 0x346C6C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_WRAP 0x346C70
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_WRAP 0x346C74
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_WRAP 0x346C78
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_WRAP 0x346C7C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_WRAP 0x346C80
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_WRAP 0x346C84
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_WRAP 0x346C88
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_WRAP 0x346C8C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_CNT 0x346C90
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_CNT 0x346C94
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_CNT 0x346C98
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_CNT 0x346C9C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_CNT 0x346CA0
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_CNT 0x346CA4
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_CNT 0x346CA8
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_CNT 0x346CAC
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_0 0x346CB0
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_1 0x346CB4
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_2 0x346CB8
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_3 0x346CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_4_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
new file mode 100644
index 000000000000..6c27850ca3f5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_5_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_5_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_5 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_5_PERM_SEL 0x356108
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_0 0x356114
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_1 0x356118
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_2 0x35611C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_3 0x356120
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_4 0x356124
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_5 0x356128
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_6 0x35612C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_7 0x356130
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_8 0x356134
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_9 0x356138
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_10 0x35613C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_11 0x356140
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_12 0x356144
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_13 0x356148
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_14 0x35614C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_15 0x356150
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_16 0x356154
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_17 0x356158
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_18 0x35615C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_19 0x356160
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_20 0x356164
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_21 0x356168
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_22 0x35616C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_23 0x356170
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_24 0x356174
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_25 0x356178
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_26 0x35617C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_27 0x356180
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_0 0x356184
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_1 0x356188
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_2 0x35618C
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_3 0x356190
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_4 0x356194
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_5 0x356198
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_6 0x35619C
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_7 0x3561A0
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_8 0x3561A4
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_9 0x3561A8
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_10 0x3561AC
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_11 0x3561B0
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_12 0x3561B4
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_13 0x3561B8
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_14 0x3561BC
+
+#define mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN 0x35626C
+
+#define mmSIF_RTR_CTRL_5_RL_HBM_EN 0x356274
+
+#define mmSIF_RTR_CTRL_5_RL_HBM_SAT 0x356278
+
+#define mmSIF_RTR_CTRL_5_RL_HBM_RST 0x35627C
+
+#define mmSIF_RTR_CTRL_5_RL_HBM_TIMEOUT 0x356280
+
+#define mmSIF_RTR_CTRL_5_SCRAM_HBM_EN 0x356284
+
+#define mmSIF_RTR_CTRL_5_RL_PCI_EN 0x356288
+
+#define mmSIF_RTR_CTRL_5_RL_PCI_SAT 0x35628C
+
+#define mmSIF_RTR_CTRL_5_RL_PCI_RST 0x356290
+
+#define mmSIF_RTR_CTRL_5_RL_PCI_TIMEOUT 0x356294
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_EN 0x35629C
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_SAT 0x3562A0
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_RST 0x3562A4
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_TIMEOUT 0x3562AC
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_RED 0x3562B4
+
+#define mmSIF_RTR_CTRL_5_E2E_HBM_EN 0x3562EC
+
+#define mmSIF_RTR_CTRL_5_E2E_PCI_EN 0x3562F0
+
+#define mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE 0x3562F4
+
+#define mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE 0x3562F8
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET_EN 0x356404
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET 0x356408
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_WRAP 0x35640C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_CNT 0x356410
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET_EN 0x356414
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET 0x356418
+
+#define mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE 0x35641C
+
+#define mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE 0x356420
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET_EN 0x356424
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET 0x356428
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_WRAP 0x35642C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_CNT 0x356430
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET_EN 0x356434
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET 0x356438
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_SEL_0 0x356450
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_SEL_1 0x356454
+
+#define mmSIF_RTR_CTRL_5_NON_LIN_EN 0x356480
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_0 0x356500
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_1 0x356504
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_2 0x356508
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_3 0x35650C
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_4 0x356510
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_0 0x356514
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_1 0x356520
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_2 0x356524
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_3 0x356528
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_4 0x35652C
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_5 0x356530
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_6 0x356534
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_7 0x356538
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_8 0x35653C
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_9 0x356540
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_0 0x356550
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_1 0x356554
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_2 0x356558
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_3 0x35655C
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_4 0x356560
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_5 0x356564
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_6 0x356568
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_7 0x35656C
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_8 0x356570
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_9 0x356574
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_10 0x356578
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_11 0x35657C
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_12 0x356580
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_13 0x356584
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_14 0x356588
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_15 0x35658C
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_16 0x356590
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_17 0x356594
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18 0x356598
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0 0x3565E4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_1 0x3565E8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_2 0x3565EC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_3 0x3565F0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_4 0x3565F4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_5 0x3565F8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_6 0x3565FC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_7 0x356600
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_8 0x356604
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_9 0x356608
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_10 0x35660C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_11 0x356610
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_12 0x356614
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_13 0x356618
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_14 0x35661C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_15 0x356620
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0 0x356624
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_1 0x356628
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_2 0x35662C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_3 0x356630
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_4 0x356634
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_5 0x356638
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_6 0x35663C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_7 0x356640
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_8 0x356644
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_9 0x356648
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_10 0x35664C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_11 0x356650
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_12 0x356654
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_13 0x356658
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_14 0x35665C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_15 0x356660
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0 0x356664
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_1 0x356668
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_2 0x35666C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_3 0x356670
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_4 0x356674
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_5 0x356678
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_6 0x35667C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_7 0x356680
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_8 0x356684
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_9 0x356688
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_10 0x35668C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_11 0x356690
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_12 0x356694
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_13 0x356698
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_14 0x35669C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_15 0x3566A0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0 0x3566A4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_1 0x3566A8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_2 0x3566AC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_3 0x3566B0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_4 0x3566B4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_5 0x3566B8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_6 0x3566BC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_7 0x3566C0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_8 0x3566C4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_9 0x3566C8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_10 0x3566CC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_11 0x3566D0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_12 0x3566D4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_13 0x3566D8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_14 0x3566DC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_15 0x3566E0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_0 0x3566E4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_1 0x3566E8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_2 0x3566EC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_3 0x3566F0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_4 0x3566F4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_5 0x3566F8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_6 0x3566FC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_7 0x356700
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_8 0x356704
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_9 0x356708
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_10 0x35670C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_11 0x356710
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_12 0x356714
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_13 0x356718
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_14 0x35671C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_15 0x356720
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_0 0x356724
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_1 0x356728
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_2 0x35672C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_3 0x356730
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_4 0x356734
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_5 0x356738
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_6 0x35673C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_7 0x356740
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_8 0x356744
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_9 0x356748
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_10 0x35674C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_11 0x356750
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_12 0x356754
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_13 0x356758
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_14 0x35675C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_15 0x356760
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_0 0x356764
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_1 0x356768
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_2 0x35676C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_3 0x356770
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_4 0x356774
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_5 0x356778
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_6 0x35677C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_7 0x356780
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_8 0x356784
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_9 0x356788
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_10 0x35678C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_11 0x356790
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_12 0x356794
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_13 0x356798
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_14 0x35679C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_15 0x3567A0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_0 0x3567A4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_1 0x3567A8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_2 0x3567AC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_3 0x3567B0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_4 0x3567B4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_5 0x3567B8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_6 0x3567BC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_7 0x3567C0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_8 0x3567C4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_9 0x3567C8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_10 0x3567CC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_11 0x3567D0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_12 0x3567D4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_13 0x3567D8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_14 0x3567DC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_15 0x3567E0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0 0x356824
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_1 0x356828
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_2 0x35682C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_3 0x356830
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_4 0x356834
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_5 0x356838
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_6 0x35683C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_7 0x356840
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_8 0x356844
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_9 0x356848
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_10 0x35684C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_11 0x356850
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_12 0x356854
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_13 0x356858
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_14 0x35685C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_15 0x356860
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0 0x356864
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_1 0x356868
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_2 0x35686C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_3 0x356870
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_4 0x356874
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_5 0x356878
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_6 0x35687C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_7 0x356880
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_8 0x356884
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_9 0x356888
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_10 0x35688C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_11 0x356890
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_12 0x356894
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_13 0x356898
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_14 0x35689C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_15 0x3568A0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0 0x3568A4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_1 0x3568A8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_2 0x3568AC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_3 0x3568B0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_4 0x3568B4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_5 0x3568B8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_6 0x3568BC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_7 0x3568C0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_8 0x3568C4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_9 0x3568C8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_10 0x3568CC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_11 0x3568D0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_12 0x3568D4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_13 0x3568D8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_14 0x3568DC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_15 0x3568E0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0 0x3568E4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_1 0x3568E8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_2 0x3568EC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_3 0x3568F0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_4 0x3568F4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_5 0x3568F8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_6 0x3568FC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_7 0x356900
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_8 0x356904
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_9 0x356908
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_10 0x35690C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_11 0x356910
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_12 0x356914
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_13 0x356918
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_14 0x35691C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_15 0x356920
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_0 0x356924
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_1 0x356928
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_2 0x35692C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_3 0x356930
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_4 0x356934
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_5 0x356938
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_6 0x35693C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_7 0x356940
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_8 0x356944
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_9 0x356948
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_10 0x35694C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_11 0x356950
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_12 0x356954
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_13 0x356958
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_14 0x35695C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_15 0x356960
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_0 0x356964
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_1 0x356968
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_2 0x35696C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_3 0x356970
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_4 0x356974
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_5 0x356978
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_6 0x35697C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_7 0x356980
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_8 0x356984
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_9 0x356988
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_10 0x35698C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_11 0x356990
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_12 0x356994
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_13 0x356998
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_14 0x35699C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_15 0x3569A0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_0 0x3569A4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_1 0x3569A8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_2 0x3569AC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_3 0x3569B0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_4 0x3569B4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_5 0x3569B8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_6 0x3569BC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_7 0x3569C0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_8 0x3569C4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_9 0x3569C8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_10 0x3569CC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_11 0x3569D0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_12 0x3569D4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_13 0x3569D8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_14 0x3569DC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_15 0x3569E0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_0 0x3569E4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_1 0x3569E8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_2 0x3569EC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_3 0x3569F0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_4 0x3569F4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_5 0x3569F8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_6 0x3569FC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_7 0x356A00
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_8 0x356A04
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_9 0x356A08
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_10 0x356A0C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_11 0x356A10
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_12 0x356A14
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_13 0x356A18
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_14 0x356A1C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_15 0x356A20
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AW 0x356A64
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AR 0x356A68
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_HIT_AW 0x356A6C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_HIT_AR 0x356A70
+
+#define mmSIF_RTR_CTRL_5_RGL_CFG 0x356B64
+
+#define mmSIF_RTR_CTRL_5_RGL_SHIFT 0x356B68
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_0 0x356B6C
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_1 0x356B70
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_2 0x356B74
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_3 0x356B78
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_4 0x356B7C
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_5 0x356B80
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_6 0x356B84
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_7 0x356B88
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_0 0x356BAC
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_1 0x356BB0
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_2 0x356BB4
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_3 0x356BB8
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_4 0x356BBC
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_5 0x356BC0
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_6 0x356BC4
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_7 0x356BC8
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_0 0x356BEC
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_1 0x356BF0
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_2 0x356BF4
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_3 0x356BF8
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_4 0x356BFC
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_5 0x356C00
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_6 0x356C04
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_7 0x356C08
+
+#define mmSIF_RTR_CTRL_5_RGL_WDT 0x356C2C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_WRAP 0x356C30
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_WRAP 0x356C34
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_WRAP 0x356C38
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_WRAP 0x356C3C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_WRAP 0x356C40
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_WRAP 0x356C44
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_WRAP 0x356C48
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_WRAP 0x356C4C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_CNT 0x356C50
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_CNT 0x356C54
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_CNT 0x356C58
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_CNT 0x356C5C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_CNT 0x356C60
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_CNT 0x356C64
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_CNT 0x356C68
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_CNT 0x356C6C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_WRAP 0x356C70
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_WRAP 0x356C74
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_WRAP 0x356C78
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_WRAP 0x356C7C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_WRAP 0x356C80
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_WRAP 0x356C84
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_WRAP 0x356C88
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_WRAP 0x356C8C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_CNT 0x356C90
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_CNT 0x356C94
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_CNT 0x356C98
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_CNT 0x356C9C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_CNT 0x356CA0
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_CNT 0x356CA4
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_CNT 0x356CA8
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_CNT 0x356CAC
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_0 0x356CB0
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_1 0x356CB4
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_2 0x356CB8
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3 0x356CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_5_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
new file mode 100644
index 000000000000..a9ea89aa6405
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_6_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_6_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_6 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_6_PERM_SEL 0x366108
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_0 0x366114
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_1 0x366118
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_2 0x36611C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_3 0x366120
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_4 0x366124
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_5 0x366128
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_6 0x36612C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_7 0x366130
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_8 0x366134
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_9 0x366138
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_10 0x36613C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_11 0x366140
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_12 0x366144
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_13 0x366148
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_14 0x36614C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_15 0x366150
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_16 0x366154
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_17 0x366158
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_18 0x36615C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_19 0x366160
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_20 0x366164
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_21 0x366168
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_22 0x36616C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_23 0x366170
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_24 0x366174
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_25 0x366178
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_26 0x36617C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_27 0x366180
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_0 0x366184
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_1 0x366188
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_2 0x36618C
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_3 0x366190
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_4 0x366194
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_5 0x366198
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_6 0x36619C
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_7 0x3661A0
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_8 0x3661A4
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_9 0x3661A8
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_10 0x3661AC
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_11 0x3661B0
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_12 0x3661B4
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_13 0x3661B8
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_14 0x3661BC
+
+#define mmSIF_RTR_CTRL_6_SCRAM_SRAM_EN 0x36626C
+
+#define mmSIF_RTR_CTRL_6_RL_HBM_EN 0x366274
+
+#define mmSIF_RTR_CTRL_6_RL_HBM_SAT 0x366278
+
+#define mmSIF_RTR_CTRL_6_RL_HBM_RST 0x36627C
+
+#define mmSIF_RTR_CTRL_6_RL_HBM_TIMEOUT 0x366280
+
+#define mmSIF_RTR_CTRL_6_SCRAM_HBM_EN 0x366284
+
+#define mmSIF_RTR_CTRL_6_RL_PCI_EN 0x366288
+
+#define mmSIF_RTR_CTRL_6_RL_PCI_SAT 0x36628C
+
+#define mmSIF_RTR_CTRL_6_RL_PCI_RST 0x366290
+
+#define mmSIF_RTR_CTRL_6_RL_PCI_TIMEOUT 0x366294
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_EN 0x36629C
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_SAT 0x3662A0
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_RST 0x3662A4
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_TIMEOUT 0x3662AC
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_RED 0x3662B4
+
+#define mmSIF_RTR_CTRL_6_E2E_HBM_EN 0x3662EC
+
+#define mmSIF_RTR_CTRL_6_E2E_PCI_EN 0x3662F0
+
+#define mmSIF_RTR_CTRL_6_E2E_HBM_WR_SIZE 0x3662F4
+
+#define mmSIF_RTR_CTRL_6_E2E_PCI_WR_SIZE 0x3662F8
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET_EN 0x366404
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET 0x366408
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_WRAP 0x36640C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_CNT 0x366410
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET_EN 0x366414
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET 0x366418
+
+#define mmSIF_RTR_CTRL_6_E2E_HBM_RD_SIZE 0x36641C
+
+#define mmSIF_RTR_CTRL_6_E2E_PCI_RD_SIZE 0x366420
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET_EN 0x366424
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET 0x366428
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_WRAP 0x36642C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_CNT 0x366430
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET_EN 0x366434
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET 0x366438
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_SEL_0 0x366450
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_SEL_1 0x366454
+
+#define mmSIF_RTR_CTRL_6_NON_LIN_EN 0x366480
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_0 0x366500
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_1 0x366504
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_2 0x366508
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_3 0x36650C
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_4 0x366510
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_0 0x366514
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_1 0x366520
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_2 0x366524
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_3 0x366528
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_4 0x36652C
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_5 0x366530
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_6 0x366534
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_7 0x366538
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_8 0x36653C
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_9 0x366540
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_0 0x366550
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_1 0x366554
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_2 0x366558
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_3 0x36655C
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_4 0x366560
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_5 0x366564
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_6 0x366568
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_7 0x36656C
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_8 0x366570
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_9 0x366574
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_10 0x366578
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_11 0x36657C
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_12 0x366580
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_13 0x366584
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_14 0x366588
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_15 0x36658C
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_16 0x366590
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_17 0x366594
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_18 0x366598
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0 0x3665E4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_1 0x3665E8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_2 0x3665EC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_3 0x3665F0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_4 0x3665F4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_5 0x3665F8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_6 0x3665FC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_7 0x366600
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_8 0x366604
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_9 0x366608
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_10 0x36660C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_11 0x366610
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_12 0x366614
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_13 0x366618
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_14 0x36661C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_15 0x366620
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0 0x366624
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_1 0x366628
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_2 0x36662C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_3 0x366630
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_4 0x366634
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_5 0x366638
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_6 0x36663C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_7 0x366640
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_8 0x366644
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_9 0x366648
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_10 0x36664C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_11 0x366650
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_12 0x366654
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_13 0x366658
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_14 0x36665C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_15 0x366660
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0 0x366664
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_1 0x366668
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_2 0x36666C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_3 0x366670
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_4 0x366674
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_5 0x366678
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_6 0x36667C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_7 0x366680
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_8 0x366684
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_9 0x366688
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_10 0x36668C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_11 0x366690
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_12 0x366694
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_13 0x366698
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_14 0x36669C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_15 0x3666A0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0 0x3666A4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_1 0x3666A8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_2 0x3666AC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_3 0x3666B0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_4 0x3666B4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_5 0x3666B8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_6 0x3666BC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_7 0x3666C0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_8 0x3666C4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_9 0x3666C8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_10 0x3666CC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_11 0x3666D0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_12 0x3666D4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_13 0x3666D8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_14 0x3666DC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_15 0x3666E0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_0 0x3666E4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_1 0x3666E8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_2 0x3666EC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_3 0x3666F0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_4 0x3666F4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_5 0x3666F8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_6 0x3666FC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_7 0x366700
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_8 0x366704
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_9 0x366708
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_10 0x36670C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_11 0x366710
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_12 0x366714
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_13 0x366718
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_14 0x36671C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_15 0x366720
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_0 0x366724
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_1 0x366728
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_2 0x36672C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_3 0x366730
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_4 0x366734
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_5 0x366738
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_6 0x36673C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_7 0x366740
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_8 0x366744
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_9 0x366748
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_10 0x36674C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_11 0x366750
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_12 0x366754
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_13 0x366758
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_14 0x36675C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_15 0x366760
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_0 0x366764
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_1 0x366768
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_2 0x36676C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_3 0x366770
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_4 0x366774
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_5 0x366778
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_6 0x36677C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_7 0x366780
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_8 0x366784
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_9 0x366788
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_10 0x36678C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_11 0x366790
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_12 0x366794
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_13 0x366798
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_14 0x36679C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_15 0x3667A0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_0 0x3667A4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_1 0x3667A8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_2 0x3667AC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_3 0x3667B0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_4 0x3667B4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_5 0x3667B8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_6 0x3667BC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_7 0x3667C0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_8 0x3667C4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_9 0x3667C8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_10 0x3667CC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_11 0x3667D0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_12 0x3667D4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_13 0x3667D8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_14 0x3667DC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_15 0x3667E0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0 0x366824
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_1 0x366828
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_2 0x36682C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_3 0x366830
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_4 0x366834
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_5 0x366838
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_6 0x36683C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_7 0x366840
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_8 0x366844
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_9 0x366848
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_10 0x36684C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_11 0x366850
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_12 0x366854
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_13 0x366858
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_14 0x36685C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_15 0x366860
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0 0x366864
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_1 0x366868
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_2 0x36686C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_3 0x366870
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_4 0x366874
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_5 0x366878
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_6 0x36687C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_7 0x366880
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_8 0x366884
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_9 0x366888
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_10 0x36688C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_11 0x366890
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_12 0x366894
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_13 0x366898
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_14 0x36689C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_15 0x3668A0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0 0x3668A4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_1 0x3668A8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_2 0x3668AC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_3 0x3668B0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_4 0x3668B4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_5 0x3668B8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_6 0x3668BC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_7 0x3668C0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_8 0x3668C4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_9 0x3668C8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_10 0x3668CC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_11 0x3668D0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_12 0x3668D4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_13 0x3668D8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_14 0x3668DC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_15 0x3668E0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0 0x3668E4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_1 0x3668E8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_2 0x3668EC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_3 0x3668F0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_4 0x3668F4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_5 0x3668F8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_6 0x3668FC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_7 0x366900
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_8 0x366904
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_9 0x366908
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_10 0x36690C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_11 0x366910
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_12 0x366914
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_13 0x366918
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_14 0x36691C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_15 0x366920
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_0 0x366924
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_1 0x366928
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_2 0x36692C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_3 0x366930
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_4 0x366934
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_5 0x366938
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_6 0x36693C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_7 0x366940
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_8 0x366944
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_9 0x366948
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_10 0x36694C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_11 0x366950
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_12 0x366954
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_13 0x366958
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_14 0x36695C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_15 0x366960
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_0 0x366964
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_1 0x366968
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_2 0x36696C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_3 0x366970
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_4 0x366974
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_5 0x366978
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_6 0x36697C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_7 0x366980
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_8 0x366984
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_9 0x366988
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_10 0x36698C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_11 0x366990
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_12 0x366994
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_13 0x366998
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_14 0x36699C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_15 0x3669A0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_0 0x3669A4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_1 0x3669A8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_2 0x3669AC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_3 0x3669B0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_4 0x3669B4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_5 0x3669B8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_6 0x3669BC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_7 0x3669C0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_8 0x3669C4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_9 0x3669C8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_10 0x3669CC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_11 0x3669D0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_12 0x3669D4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_13 0x3669D8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_14 0x3669DC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_15 0x3669E0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_0 0x3669E4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_1 0x3669E8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_2 0x3669EC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_3 0x3669F0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_4 0x3669F4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_5 0x3669F8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_6 0x3669FC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_7 0x366A00
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_8 0x366A04
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_9 0x366A08
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_10 0x366A0C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_11 0x366A10
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_12 0x366A14
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_13 0x366A18
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_14 0x366A1C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_15 0x366A20
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AW 0x366A64
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AR 0x366A68
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_HIT_AW 0x366A6C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_HIT_AR 0x366A70
+
+#define mmSIF_RTR_CTRL_6_RGL_CFG 0x366B64
+
+#define mmSIF_RTR_CTRL_6_RGL_SHIFT 0x366B68
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_0 0x366B6C
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_1 0x366B70
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_2 0x366B74
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_3 0x366B78
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_4 0x366B7C
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_5 0x366B80
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_6 0x366B84
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_7 0x366B88
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_0 0x366BAC
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_1 0x366BB0
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_2 0x366BB4
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_3 0x366BB8
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_4 0x366BBC
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_5 0x366BC0
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_6 0x366BC4
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_7 0x366BC8
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_0 0x366BEC
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_1 0x366BF0
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_2 0x366BF4
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_3 0x366BF8
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_4 0x366BFC
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_5 0x366C00
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_6 0x366C04
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_7 0x366C08
+
+#define mmSIF_RTR_CTRL_6_RGL_WDT 0x366C2C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_WRAP 0x366C30
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_WRAP 0x366C34
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_WRAP 0x366C38
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_WRAP 0x366C3C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_WRAP 0x366C40
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_WRAP 0x366C44
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_WRAP 0x366C48
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_WRAP 0x366C4C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_CNT 0x366C50
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_CNT 0x366C54
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_CNT 0x366C58
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_CNT 0x366C5C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_CNT 0x366C60
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_CNT 0x366C64
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_CNT 0x366C68
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_CNT 0x366C6C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_WRAP 0x366C70
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_WRAP 0x366C74
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_WRAP 0x366C78
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_WRAP 0x366C7C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_WRAP 0x366C80
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_WRAP 0x366C84
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_WRAP 0x366C88
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_WRAP 0x366C8C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_CNT 0x366C90
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_CNT 0x366C94
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_CNT 0x366C98
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_CNT 0x366C9C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_CNT 0x366CA0
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_CNT 0x366CA4
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_CNT 0x366CA8
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_CNT 0x366CAC
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_0 0x366CB0
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_1 0x366CB4
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_2 0x366CB8
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_3 0x366CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_6_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
new file mode 100644
index 000000000000..a37772c531d9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_7_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_7_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_7 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_7_PERM_SEL 0x376108
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_0 0x376114
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_1 0x376118
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_2 0x37611C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_3 0x376120
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_4 0x376124
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_5 0x376128
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_6 0x37612C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_7 0x376130
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_8 0x376134
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_9 0x376138
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_10 0x37613C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_11 0x376140
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_12 0x376144
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_13 0x376148
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_14 0x37614C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_15 0x376150
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_16 0x376154
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_17 0x376158
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_18 0x37615C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_19 0x376160
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_20 0x376164
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_21 0x376168
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_22 0x37616C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_23 0x376170
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_24 0x376174
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_25 0x376178
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_26 0x37617C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_27 0x376180
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_0 0x376184
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_1 0x376188
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_2 0x37618C
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_3 0x376190
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_4 0x376194
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_5 0x376198
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_6 0x37619C
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_7 0x3761A0
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_8 0x3761A4
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_9 0x3761A8
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_10 0x3761AC
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_11 0x3761B0
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_12 0x3761B4
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_13 0x3761B8
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_14 0x3761BC
+
+#define mmSIF_RTR_CTRL_7_SCRAM_SRAM_EN 0x37626C
+
+#define mmSIF_RTR_CTRL_7_RL_HBM_EN 0x376274
+
+#define mmSIF_RTR_CTRL_7_RL_HBM_SAT 0x376278
+
+#define mmSIF_RTR_CTRL_7_RL_HBM_RST 0x37627C
+
+#define mmSIF_RTR_CTRL_7_RL_HBM_TIMEOUT 0x376280
+
+#define mmSIF_RTR_CTRL_7_SCRAM_HBM_EN 0x376284
+
+#define mmSIF_RTR_CTRL_7_RL_PCI_EN 0x376288
+
+#define mmSIF_RTR_CTRL_7_RL_PCI_SAT 0x37628C
+
+#define mmSIF_RTR_CTRL_7_RL_PCI_RST 0x376290
+
+#define mmSIF_RTR_CTRL_7_RL_PCI_TIMEOUT 0x376294
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_EN 0x37629C
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_SAT 0x3762A0
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_RST 0x3762A4
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_TIMEOUT 0x3762AC
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_RED 0x3762B4
+
+#define mmSIF_RTR_CTRL_7_E2E_HBM_EN 0x3762EC
+
+#define mmSIF_RTR_CTRL_7_E2E_PCI_EN 0x3762F0
+
+#define mmSIF_RTR_CTRL_7_E2E_HBM_WR_SIZE 0x3762F4
+
+#define mmSIF_RTR_CTRL_7_E2E_PCI_WR_SIZE 0x3762F8
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET_EN 0x376404
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET 0x376408
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_WRAP 0x37640C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_CNT 0x376410
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET_EN 0x376414
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET 0x376418
+
+#define mmSIF_RTR_CTRL_7_E2E_HBM_RD_SIZE 0x37641C
+
+#define mmSIF_RTR_CTRL_7_E2E_PCI_RD_SIZE 0x376420
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET_EN 0x376424
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET 0x376428
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_WRAP 0x37642C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_CNT 0x376430
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET_EN 0x376434
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET 0x376438
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_SEL_0 0x376450
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_SEL_1 0x376454
+
+#define mmSIF_RTR_CTRL_7_NON_LIN_EN 0x376480
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_0 0x376500
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_1 0x376504
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_2 0x376508
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_3 0x37650C
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_4 0x376510
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_0 0x376514
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_1 0x376520
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_2 0x376524
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_3 0x376528
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_4 0x37652C
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_5 0x376530
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_6 0x376534
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_7 0x376538
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_8 0x37653C
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_9 0x376540
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_0 0x376550
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_1 0x376554
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_2 0x376558
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_3 0x37655C
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_4 0x376560
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_5 0x376564
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_6 0x376568
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_7 0x37656C
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_8 0x376570
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_9 0x376574
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_10 0x376578
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_11 0x37657C
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_12 0x376580
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_13 0x376584
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_14 0x376588
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_15 0x37658C
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_16 0x376590
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_17 0x376594
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_18 0x376598
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0 0x3765E4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_1 0x3765E8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_2 0x3765EC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_3 0x3765F0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_4 0x3765F4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_5 0x3765F8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_6 0x3765FC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_7 0x376600
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_8 0x376604
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_9 0x376608
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_10 0x37660C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_11 0x376610
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_12 0x376614
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_13 0x376618
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_14 0x37661C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_15 0x376620
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0 0x376624
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_1 0x376628
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_2 0x37662C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_3 0x376630
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_4 0x376634
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_5 0x376638
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_6 0x37663C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_7 0x376640
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_8 0x376644
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_9 0x376648
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_10 0x37664C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_11 0x376650
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_12 0x376654
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_13 0x376658
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_14 0x37665C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_15 0x376660
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0 0x376664
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_1 0x376668
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_2 0x37666C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_3 0x376670
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_4 0x376674
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_5 0x376678
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_6 0x37667C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_7 0x376680
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_8 0x376684
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_9 0x376688
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_10 0x37668C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_11 0x376690
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_12 0x376694
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_13 0x376698
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_14 0x37669C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_15 0x3766A0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0 0x3766A4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_1 0x3766A8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_2 0x3766AC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_3 0x3766B0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_4 0x3766B4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_5 0x3766B8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_6 0x3766BC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_7 0x3766C0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_8 0x3766C4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_9 0x3766C8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_10 0x3766CC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_11 0x3766D0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_12 0x3766D4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_13 0x3766D8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_14 0x3766DC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_15 0x3766E0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_0 0x3766E4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_1 0x3766E8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_2 0x3766EC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_3 0x3766F0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_4 0x3766F4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_5 0x3766F8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_6 0x3766FC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_7 0x376700
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_8 0x376704
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_9 0x376708
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_10 0x37670C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_11 0x376710
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_12 0x376714
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_13 0x376718
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_14 0x37671C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_15 0x376720
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_0 0x376724
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_1 0x376728
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_2 0x37672C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_3 0x376730
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_4 0x376734
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_5 0x376738
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_6 0x37673C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_7 0x376740
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_8 0x376744
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_9 0x376748
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_10 0x37674C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_11 0x376750
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_12 0x376754
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_13 0x376758
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_14 0x37675C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_15 0x376760
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_0 0x376764
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_1 0x376768
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_2 0x37676C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_3 0x376770
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_4 0x376774
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_5 0x376778
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_6 0x37677C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_7 0x376780
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_8 0x376784
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_9 0x376788
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_10 0x37678C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_11 0x376790
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_12 0x376794
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_13 0x376798
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_14 0x37679C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_15 0x3767A0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_0 0x3767A4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_1 0x3767A8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_2 0x3767AC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_3 0x3767B0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_4 0x3767B4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_5 0x3767B8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_6 0x3767BC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_7 0x3767C0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_8 0x3767C4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_9 0x3767C8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_10 0x3767CC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_11 0x3767D0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_12 0x3767D4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_13 0x3767D8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_14 0x3767DC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_15 0x3767E0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0 0x376824
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_1 0x376828
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_2 0x37682C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_3 0x376830
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_4 0x376834
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_5 0x376838
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_6 0x37683C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_7 0x376840
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_8 0x376844
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_9 0x376848
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_10 0x37684C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_11 0x376850
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_12 0x376854
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_13 0x376858
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_14 0x37685C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_15 0x376860
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0 0x376864
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_1 0x376868
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_2 0x37686C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_3 0x376870
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_4 0x376874
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_5 0x376878
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_6 0x37687C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_7 0x376880
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_8 0x376884
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_9 0x376888
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_10 0x37688C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_11 0x376890
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_12 0x376894
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_13 0x376898
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_14 0x37689C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_15 0x3768A0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0 0x3768A4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_1 0x3768A8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_2 0x3768AC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_3 0x3768B0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_4 0x3768B4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_5 0x3768B8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_6 0x3768BC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_7 0x3768C0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_8 0x3768C4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_9 0x3768C8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_10 0x3768CC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_11 0x3768D0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_12 0x3768D4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_13 0x3768D8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_14 0x3768DC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_15 0x3768E0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0 0x3768E4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_1 0x3768E8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_2 0x3768EC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_3 0x3768F0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_4 0x3768F4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_5 0x3768F8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_6 0x3768FC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_7 0x376900
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_8 0x376904
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_9 0x376908
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_10 0x37690C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_11 0x376910
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_12 0x376914
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_13 0x376918
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_14 0x37691C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_15 0x376920
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_0 0x376924
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_1 0x376928
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_2 0x37692C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_3 0x376930
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_4 0x376934
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_5 0x376938
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_6 0x37693C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_7 0x376940
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_8 0x376944
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_9 0x376948
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_10 0x37694C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_11 0x376950
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_12 0x376954
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_13 0x376958
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_14 0x37695C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_15 0x376960
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_0 0x376964
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_1 0x376968
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_2 0x37696C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_3 0x376970
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_4 0x376974
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_5 0x376978
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_6 0x37697C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_7 0x376980
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_8 0x376984
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_9 0x376988
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_10 0x37698C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_11 0x376990
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_12 0x376994
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_13 0x376998
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_14 0x37699C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_15 0x3769A0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_0 0x3769A4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_1 0x3769A8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_2 0x3769AC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_3 0x3769B0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_4 0x3769B4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_5 0x3769B8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_6 0x3769BC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_7 0x3769C0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_8 0x3769C4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_9 0x3769C8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_10 0x3769CC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_11 0x3769D0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_12 0x3769D4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_13 0x3769D8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_14 0x3769DC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_15 0x3769E0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_0 0x3769E4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_1 0x3769E8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_2 0x3769EC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_3 0x3769F0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_4 0x3769F4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_5 0x3769F8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_6 0x3769FC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_7 0x376A00
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_8 0x376A04
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_9 0x376A08
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_10 0x376A0C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_11 0x376A10
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_12 0x376A14
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_13 0x376A18
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_14 0x376A1C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_15 0x376A20
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AW 0x376A64
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AR 0x376A68
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_HIT_AW 0x376A6C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_HIT_AR 0x376A70
+
+#define mmSIF_RTR_CTRL_7_RGL_CFG 0x376B64
+
+#define mmSIF_RTR_CTRL_7_RGL_SHIFT 0x376B68
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_0 0x376B6C
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_1 0x376B70
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_2 0x376B74
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_3 0x376B78
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_4 0x376B7C
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_5 0x376B80
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_6 0x376B84
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_7 0x376B88
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_0 0x376BAC
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_1 0x376BB0
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_2 0x376BB4
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_3 0x376BB8
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_4 0x376BBC
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_5 0x376BC0
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_6 0x376BC4
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_7 0x376BC8
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_0 0x376BEC
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_1 0x376BF0
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_2 0x376BF4
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_3 0x376BF8
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_4 0x376BFC
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_5 0x376C00
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_6 0x376C04
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_7 0x376C08
+
+#define mmSIF_RTR_CTRL_7_RGL_WDT 0x376C2C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_WRAP 0x376C30
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_WRAP 0x376C34
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_WRAP 0x376C38
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_WRAP 0x376C3C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_WRAP 0x376C40
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_WRAP 0x376C44
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_WRAP 0x376C48
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_WRAP 0x376C4C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_CNT 0x376C50
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_CNT 0x376C54
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_CNT 0x376C58
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_CNT 0x376C5C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_CNT 0x376C60
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_CNT 0x376C64
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_CNT 0x376C68
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_CNT 0x376C6C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_WRAP 0x376C70
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_WRAP 0x376C74
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_WRAP 0x376C78
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_WRAP 0x376C7C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_WRAP 0x376C80
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_WRAP 0x376C84
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_WRAP 0x376C88
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_WRAP 0x376C8C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_CNT 0x376C90
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_CNT 0x376C94
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_CNT 0x376C98
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_CNT 0x376C9C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_CNT 0x376CA0
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_CNT 0x376CA4
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_CNT 0x376CA8
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_CNT 0x376CAC
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_0 0x376CB0
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_1 0x376CB4
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_2 0x376CB8
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_3 0x376CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_7_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h
new file mode 100644
index 000000000000..07d2a9000102
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_STLB_REGS_H_
+#define ASIC_REG_STLB_REGS_H_
+
+/*
+ *****************************************
+ * STLB (Prototype: STLB)
+ *****************************************
+ */
+
+#define mmSTLB_CACHE_INV 0xC12010
+
+#define mmSTLB_CACHE_INV_BASE_39_8 0xC12014
+
+#define mmSTLB_CACHE_INV_BASE_49_40 0xC12018
+
+#define mmSTLB_STLB_FEATURE_EN 0xC1201C
+
+#define mmSTLB_STLB_AXI_CACHE 0xC12020
+
+#define mmSTLB_HOP_CONFIGURATION 0xC12024
+
+#define mmSTLB_LINK_LIST_LOOKUP_MASK_49_32 0xC12028
+
+#define mmSTLB_LINK_LIST_LOOKUP_MASK_31_0 0xC1202C
+
+#define mmSTLB_LINK_LIST 0xC12030
+
+#define mmSTLB_INV_ALL_START 0xC12034
+
+#define mmSTLB_INV_ALL_SET 0xC12038
+
+#define mmSTLB_INV_PS 0xC1203C
+
+#define mmSTLB_INV_CONSUMER_INDEX 0xC12040
+
+#define mmSTLB_INV_HIT_COUNT 0xC12044
+
+#define mmSTLB_INV_SET 0xC12048
+
+#define mmSTLB_SRAM_INIT 0xC1204C
+
+#define mmSTLB_MEM_CACHE_INVALIDATION 0xC12050
+
+#define mmSTLB_MEM_CACHE_INV_STATUS 0xC12054
+
+#define mmSTLB_MEM_CACHE_BASE_38_7 0xC12058
+
+#define mmSTLB_MEM_CACHE_BASE_49_39 0xC1205C
+
+#define mmSTLB_MEM_CACHE_CONFIG 0xC12060
+
+#define mmSTLB_SET_THRESHOLD_HOP4 0xC12064
+
+#define mmSTLB_SET_THRESHOLD_HOP3 0xC12068
+
+#define mmSTLB_SET_THRESHOLD_HOP2 0xC1206C
+
+#define mmSTLB_SET_THRESHOLD_HOP1 0xC12070
+
+#define mmSTLB_SET_THRESHOLD_HOP0 0xC12074
+
+#define mmSTLB_MULTI_HIT_INTERRUPT_CLR 0xC12078
+
+#define mmSTLB_MULTI_HIT_INTERRUPT_MASK 0xC1207C
+
+#define mmSTLB_MEM_L0_CACHE_CFG 0xC12080
+
+#define mmSTLB_MEM_READ_ARPROT 0xC12084
+
+#endif /* ASIC_REG_STLB_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
new file mode 100644
index 000000000000..8f67c11c8de9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
@@ -0,0 +1,2578 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CFG_MASKS_H_
+#define ASIC_REG_TPC0_CFG_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE */
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_RSV_SHIFT 16
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_RSV_MASK 0x1FFF0000
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 29
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0xE0000000
+
+/* TPC0_CFG_KERNEL_SYNC_OBJECT_ADDR */
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_ADDR_V_SHIFT 0
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_ADDR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW */
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_0 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_0 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_1 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_1 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_2 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_2 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_3 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_3 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_4 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_4 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_CONFIG */
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 2
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0xFC
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_SHIFT 8
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_MASK 0xFF00
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_SHIFT 16
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_MASK 0xFF0000
+
+/* TPC0_CFG_KERNEL_KERNEL_ID */
+#define TPC0_CFG_KERNEL_KERNEL_ID_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_ID_V_MASK 0xFFFF
+
+/* TPC0_CFG_KERNEL_SRF */
+#define TPC0_CFG_KERNEL_SRF_V_SHIFT 0
+#define TPC0_CFG_KERNEL_SRF_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_ROUND_CSR */
+#define TPC0_CFG_ROUND_CSR_MODE_SHIFT 0
+#define TPC0_CFG_ROUND_CSR_MODE_MASK 0x7
+
+/* TPC0_CFG_PROT */
+#define TPC0_CFG_PROT_AWPROT_SHIFT 0
+#define TPC0_CFG_PROT_AWPROT_MASK 0x7
+#define TPC0_CFG_PROT_ARPROT_SHIFT 3
+#define TPC0_CFG_PROT_ARPROT_MASK 0x38
+
+/* TPC0_CFG_SEMAPHORE */
+#define TPC0_CFG_SEMAPHORE_V_SHIFT 0
+#define TPC0_CFG_SEMAPHORE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_VFLAGS */
+#define TPC0_CFG_VFLAGS_V_SHIFT 0
+#define TPC0_CFG_VFLAGS_V_MASK 0xF
+
+/* TPC0_CFG_SFLAGS */
+#define TPC0_CFG_SFLAGS_V_SHIFT 0
+#define TPC0_CFG_SFLAGS_V_MASK 0xF
+
+/* TPC0_CFG_LFSR_POLYNOM */
+#define TPC0_CFG_LFSR_POLYNOM_V_SHIFT 0
+#define TPC0_CFG_LFSR_POLYNOM_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_STATUS */
+#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT 1
+#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK 0x2
+#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT 2
+#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK 0x4
+#define TPC0_CFG_STATUS_IQ_EMPTY_SHIFT 3
+#define TPC0_CFG_STATUS_IQ_EMPTY_MASK 0x8
+#define TPC0_CFG_STATUS_SB_EMPTY_SHIFT 5
+#define TPC0_CFG_STATUS_SB_EMPTY_MASK 0x20
+#define TPC0_CFG_STATUS_QM_IDLE_SHIFT 6
+#define TPC0_CFG_STATUS_QM_IDLE_MASK 0x40
+#define TPC0_CFG_STATUS_QM_RDY_SHIFT 7
+#define TPC0_CFG_STATUS_QM_RDY_MASK 0x80
+
+/* TPC0_CFG_CFG_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_CFG_SUBTRACT_VALUE */
+#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_SHIFT 0
+#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_SM_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TPC_CMD */
+#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT 0
+#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_MASK 0x1
+#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_SHIFT 1
+#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_MASK 0x2
+#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_SHIFT 2
+#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_MASK 0x4
+#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_SHIFT 3
+#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_MASK 0x8
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT 4
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_MASK 0x10
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_SHIFT 5
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_MASK 0x20
+#define TPC0_CFG_TPC_CMD_QMAN_STOP_SHIFT 6
+#define TPC0_CFG_TPC_CMD_QMAN_STOP_MASK 0x40
+
+/* TPC0_CFG_TPC_EXECUTE */
+#define TPC0_CFG_TPC_EXECUTE_V_SHIFT 0
+#define TPC0_CFG_TPC_EXECUTE_V_MASK 0x1
+
+/* TPC0_CFG_TPC_STALL */
+#define TPC0_CFG_TPC_STALL_V_SHIFT 0
+#define TPC0_CFG_TPC_STALL_V_MASK 0x1
+
+/* TPC0_CFG_ICACHE_BASE_ADDERESS_LOW */
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_SHIFT 0
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH */
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_RD_RATE_LIMIT */
+#define TPC0_CFG_RD_RATE_LIMIT_ENABLE_SHIFT 0
+#define TPC0_CFG_RD_RATE_LIMIT_ENABLE_MASK 0x1
+#define TPC0_CFG_RD_RATE_LIMIT_SATURATION_SHIFT 1
+#define TPC0_CFG_RD_RATE_LIMIT_SATURATION_MASK 0x1FE
+#define TPC0_CFG_RD_RATE_LIMIT_TIMEOUT_SHIFT 9
+#define TPC0_CFG_RD_RATE_LIMIT_TIMEOUT_MASK 0x1FE00
+
+/* TPC0_CFG_WR_RATE_LIMIT */
+#define TPC0_CFG_WR_RATE_LIMIT_ENABLE_SHIFT 0
+#define TPC0_CFG_WR_RATE_LIMIT_ENABLE_MASK 0x1
+#define TPC0_CFG_WR_RATE_LIMIT_SATURATION_SHIFT 1
+#define TPC0_CFG_WR_RATE_LIMIT_SATURATION_MASK 0x1FE
+#define TPC0_CFG_WR_RATE_LIMIT_TIMEOUT_SHIFT 9
+#define TPC0_CFG_WR_RATE_LIMIT_TIMEOUT_MASK 0x1FE00
+
+/* TPC0_CFG_MSS_CONFIG */
+#define TPC0_CFG_MSS_CONFIG_AWCACHE_SHIFT 0
+#define TPC0_CFG_MSS_CONFIG_AWCACHE_MASK 0xF
+#define TPC0_CFG_MSS_CONFIG_ARCACHE_SHIFT 4
+#define TPC0_CFG_MSS_CONFIG_ARCACHE_MASK 0xF0
+#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT 8
+#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_MASK 0x300
+#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_SHIFT 10
+#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_MASK 0x400
+#define TPC0_CFG_MSS_CONFIG_DCACHE_PREFETCH_DIS_SHIFT 11
+#define TPC0_CFG_MSS_CONFIG_DCACHE_PREFETCH_DIS_MASK 0x800
+
+/* TPC0_CFG_TPC_INTR_CAUSE */
+#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_SHIFT 0
+#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK 0xFFFFF
+
+/* TPC0_CFG_TPC_INTR_MASK */
+#define TPC0_CFG_TPC_INTR_MASK_MASK_SHIFT 0
+#define TPC0_CFG_TPC_INTR_MASK_MASK_MASK 0xFFFFF
+
+/* TPC0_CFG_WQ_CREDITS */
+#define TPC0_CFG_WQ_CREDITS_ST_G_SHIFT 0
+#define TPC0_CFG_WQ_CREDITS_ST_G_MASK 0xF
+#define TPC0_CFG_WQ_CREDITS_KERNEL_FIFO_SHIFT 4
+#define TPC0_CFG_WQ_CREDITS_KERNEL_FIFO_MASK 0x70
+
+/* TPC0_CFG_ARUSER_LO */
+#define TPC0_CFG_ARUSER_LO_V_SHIFT 0
+#define TPC0_CFG_ARUSER_LO_V_MASK 0x7FF
+
+/* TPC0_CFG_ARUSER_HI */
+#define TPC0_CFG_ARUSER_HI_V_SHIFT 11
+#define TPC0_CFG_ARUSER_HI_V_MASK 0x1800
+#define TPC0_CFG_ARUSER_HI_RSRV_SHIFT 13
+#define TPC0_CFG_ARUSER_HI_RSRV_MASK 0xFFFFE000
+
+/* TPC0_CFG_AWUSER_LO */
+#define TPC0_CFG_AWUSER_LO_V_SHIFT 0
+#define TPC0_CFG_AWUSER_LO_V_MASK 0x7FF
+
+/* TPC0_CFG_AWUSER_HI */
+#define TPC0_CFG_AWUSER_HI_V_SHIFT 11
+#define TPC0_CFG_AWUSER_HI_V_MASK 0x1800
+#define TPC0_CFG_AWUSER_HI_RSRV_SHIFT 13
+#define TPC0_CFG_AWUSER_HI_RSRV_MASK 0xFFFFE000
+
+/* TPC0_CFG_OPCODE_EXEC */
+#define TPC0_CFG_OPCODE_EXEC_SPU_OP_SHIFT 0
+#define TPC0_CFG_OPCODE_EXEC_SPU_OP_MASK 0x7F
+#define TPC0_CFG_OPCODE_EXEC_SPU_EN_SHIFT 7
+#define TPC0_CFG_OPCODE_EXEC_SPU_EN_MASK 0x80
+#define TPC0_CFG_OPCODE_EXEC_VPU_OP_SHIFT 8
+#define TPC0_CFG_OPCODE_EXEC_VPU_OP_MASK 0x7F00
+#define TPC0_CFG_OPCODE_EXEC_VPU_EN_SHIFT 15
+#define TPC0_CFG_OPCODE_EXEC_VPU_EN_MASK 0x8000
+#define TPC0_CFG_OPCODE_EXEC_LD_OP_SHIFT 16
+#define TPC0_CFG_OPCODE_EXEC_LD_OP_MASK 0x7F0000
+#define TPC0_CFG_OPCODE_EXEC_LD_EN_SHIFT 23
+#define TPC0_CFG_OPCODE_EXEC_LD_EN_MASK 0x800000
+#define TPC0_CFG_OPCODE_EXEC_ST_OP_SHIFT 24
+#define TPC0_CFG_OPCODE_EXEC_ST_OP_MASK 0x7F000000
+#define TPC0_CFG_OPCODE_EXEC_ST_EN_SHIFT 31
+#define TPC0_CFG_OPCODE_EXEC_ST_EN_MASK 0x80000000
+
+/* TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO */
+#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI */
+#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO */
+#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI */
+#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO */
+#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI */
+#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO */
+#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI */
+#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TSB_CFG_MAX_SIZE */
+#define TPC0_CFG_TSB_CFG_MAX_SIZE_DATA_SHIFT 0
+#define TPC0_CFG_TSB_CFG_MAX_SIZE_DATA_MASK 0xFFFF
+#define TPC0_CFG_TSB_CFG_MAX_SIZE_MD_SHIFT 16
+#define TPC0_CFG_TSB_CFG_MAX_SIZE_MD_MASK 0xFFFF0000
+
+/* TPC0_CFG_TSB_CFG */
+#define TPC0_CFG_TSB_CFG_FORCE_MISS_SHIFT 0
+#define TPC0_CFG_TSB_CFG_FORCE_MISS_MASK 0x1
+#define TPC0_CFG_TSB_CFG_MAX_OS_SHIFT 1
+#define TPC0_CFG_TSB_CFG_MAX_OS_MASK 0x1FFFE
+
+/* TPC0_CFG_DBGMEM_ADD */
+#define TPC0_CFG_DBGMEM_ADD_V_SHIFT 0
+#define TPC0_CFG_DBGMEM_ADD_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_DBGMEM_DATA_WR */
+#define TPC0_CFG_DBGMEM_DATA_WR_V_SHIFT 0
+#define TPC0_CFG_DBGMEM_DATA_WR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_DBGMEM_DATA_RD */
+#define TPC0_CFG_DBGMEM_DATA_RD_V_SHIFT 0
+#define TPC0_CFG_DBGMEM_DATA_RD_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_DBGMEM_CTRL */
+#define TPC0_CFG_DBGMEM_CTRL_WR_NRD_SHIFT 0
+#define TPC0_CFG_DBGMEM_CTRL_WR_NRD_MASK 0x1
+
+/* TPC0_CFG_DBGMEM_RC */
+#define TPC0_CFG_DBGMEM_RC_VALID_SHIFT 0
+#define TPC0_CFG_DBGMEM_RC_VALID_MASK 0x1
+
+/* TPC0_CFG_TSB_INFLIGHT_CNTR */
+#define TPC0_CFG_TSB_INFLIGHT_CNTR_V_SHIFT 0
+#define TPC0_CFG_TSB_INFLIGHT_CNTR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_WQ_INFLIGHT_CNTR */
+#define TPC0_CFG_WQ_INFLIGHT_CNTR_HBW_SHIFT 0
+#define TPC0_CFG_WQ_INFLIGHT_CNTR_HBW_MASK 0xFFFF
+#define TPC0_CFG_WQ_INFLIGHT_CNTR_LBW_SHIFT 16
+#define TPC0_CFG_WQ_INFLIGHT_CNTR_LBW_MASK 0xF0000
+
+/* TPC0_CFG_WQ_LBW_TOTAL_CNTR */
+#define TPC0_CFG_WQ_LBW_TOTAL_CNTR_V_SHIFT 0
+#define TPC0_CFG_WQ_LBW_TOTAL_CNTR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_WQ_HBW_TOTAL_CNTR */
+#define TPC0_CFG_WQ_HBW_TOTAL_CNTR_V_SHIFT 0
+#define TPC0_CFG_WQ_HBW_TOTAL_CNTR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_IRQ_OCCOUPY_CNTR */
+#define TPC0_CFG_IRQ_OCCOUPY_CNTR_V_SHIFT 0
+#define TPC0_CFG_IRQ_OCCOUPY_CNTR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_FUNC_MBIST_CNTRL */
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_MASK 0x1
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_SHIFT 1
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK 0x2
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_SHIFT 2
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK 0x4
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_SHIFT 16
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_MASK 0x3FF0000
+
+/* TPC0_CFG_FUNC_MBIST_PAT */
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_MASK 0x3
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_SHIFT 2
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_MASK 0xC
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_SHIFT 4
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_MASK 0x30
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_SHIFT 6
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_MASK 0xC0
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_SHIFT 8
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_MASK 0x300
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_SHIFT 10
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_MASK 0xC00
+
+/* TPC0_CFG_FUNC_MBIST_MEM */
+#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_MASK 0x7FF
+#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_SHIFT 12
+#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_MASK 0x7000
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_SHIFT 16
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_MASK 0x7FF0000
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_SHIFT 28
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000
+
+/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_8_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_9_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_10_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_11_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_12_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_13_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_14_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_15_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_SYNC_OBJECT_MESSAGE */
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_RSV_SHIFT 16
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_RSV_MASK 0x1FFF0000
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 29
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0xE0000000
+
+/* TPC0_CFG_QM_SYNC_OBJECT_ADDR */
+#define TPC0_CFG_QM_SYNC_OBJECT_ADDR_V_SHIFT 0
+#define TPC0_CFG_QM_SYNC_OBJECT_ADDR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW */
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_0 */
+#define TPC0_CFG_QM_TID_BASE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_0 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_1 */
+#define TPC0_CFG_QM_TID_BASE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_1 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_2 */
+#define TPC0_CFG_QM_TID_BASE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_2 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_3 */
+#define TPC0_CFG_QM_TID_BASE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_3 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_4 */
+#define TPC0_CFG_QM_TID_BASE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_4 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_CONFIG */
+#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
+#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
+#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
+#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 2
+#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0xFC
+#define TPC0_CFG_QM_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_SHIFT 8
+#define TPC0_CFG_QM_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_MASK 0xFF00
+#define TPC0_CFG_QM_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_SHIFT 16
+#define TPC0_CFG_QM_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_MASK 0xFF0000
+
+/* TPC0_CFG_QM_KERNEL_ID */
+#define TPC0_CFG_QM_KERNEL_ID_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_ID_V_MASK 0xFFFF
+
+/* TPC0_CFG_QM_SRF */
+#define TPC0_CFG_QM_SRF_V_SHIFT 0
+#define TPC0_CFG_QM_SRF_V_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
new file mode 100644
index 000000000000..b82a906265a8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CFG_REGS_H_
+#define ASIC_REG_TPC0_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE06400
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE06404
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE06408
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE0640C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE06410
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE06414
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE06418
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE0641C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE06420
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE06424
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE06428
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE0642C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE06430
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE06434
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE06438
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE0643C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE06440
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE06444
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE06448
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE0644C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE06450
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE06454
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE06458
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE0645C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE06460
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE06464
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE06468
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE0646C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE06470
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE06474
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE06478
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE0647C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE06480
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE06484
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE06488
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE0648C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE06490
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE06494
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE06498
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE0649C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE064A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE064A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE064A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE064AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE064B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE064B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE064B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE064BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE064C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE064C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE064C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE064CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE064D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE064D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE064D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE064DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE064E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE064E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE064E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE064EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE064F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE064F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE064F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE064FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE06500
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE06504
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE06508
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE0650C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE06510
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE06514
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE06518
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE0651C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE06520
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE06524
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE06528
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE0652C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE06530
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE06534
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE06538
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE0653C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE06540
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE06544
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE06548
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE0654C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE06550
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE06554
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE06558
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE0655C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE06560
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE06564
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE06568
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE0656C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE06570
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE06574
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE06578
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE0657C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE06580
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE06584
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE06588
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE0658C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE06590
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE06594
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE06598
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE0659C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE065A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE065A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE065A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE065AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE065B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE065B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE065B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE065BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xE065C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xE065C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xE065C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xE065CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xE065D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xE065D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xE065D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xE065DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xE065E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xE065E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xE065E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xE065EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xE065F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xE065F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xE065F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xE065FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xE06600
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xE06604
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xE06608
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xE0660C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xE06610
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xE06614
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xE06618
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xE0661C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xE06620
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xE06624
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xE06628
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xE0662C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xE06630
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xE06634
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xE06638
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xE0663C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xE06640
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xE06644
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xE06648
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xE0664C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xE06650
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xE06654
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xE06658
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xE0665C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xE06660
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xE06664
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xE06668
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xE0666C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xE06670
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xE06674
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xE06678
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xE0667C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xE06680
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xE06684
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xE06688
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xE0668C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xE06690
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xE06694
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xE06698
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xE0669C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xE066A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xE066A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xE066A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xE066AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xE066B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xE066B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xE066B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xE066BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xE066C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xE066C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xE066C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xE066CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xE066D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xE066D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xE066D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xE066DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xE066E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xE066E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xE066E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xE066EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xE066F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xE066F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xE066F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xE066FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xE06700
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xE06704
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xE06708
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xE0670C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xE06710
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xE06714
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xE06718
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xE0671C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xE06720
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xE06724
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xE06728
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xE0672C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xE06730
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xE06734
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xE06738
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xE0673C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xE06740
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xE06744
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xE06748
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xE0674C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xE06750
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xE06754
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xE06758
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xE0675C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xE06760
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xE06764
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xE06768
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xE0676C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xE06770
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xE06774
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xE06778
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xE0677C
+
+#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE06780
+
+#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_ADDR 0xE06784
+
+#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE06788
+
+#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE0678C
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_0 0xE06790
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_0 0xE06794
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_1 0xE06798
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_1 0xE0679C
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_2 0xE067A0
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_2 0xE067A4
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_3 0xE067A8
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_3 0xE067AC
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_4 0xE067B0
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_4 0xE067B4
+
+#define mmTPC0_CFG_KERNEL_KERNEL_CONFIG 0xE067B8
+
+#define mmTPC0_CFG_KERNEL_KERNEL_ID 0xE067BC
+
+#define mmTPC0_CFG_KERNEL_SRF_0 0xE067C0
+
+#define mmTPC0_CFG_KERNEL_SRF_1 0xE067C4
+
+#define mmTPC0_CFG_KERNEL_SRF_2 0xE067C8
+
+#define mmTPC0_CFG_KERNEL_SRF_3 0xE067CC
+
+#define mmTPC0_CFG_KERNEL_SRF_4 0xE067D0
+
+#define mmTPC0_CFG_KERNEL_SRF_5 0xE067D4
+
+#define mmTPC0_CFG_KERNEL_SRF_6 0xE067D8
+
+#define mmTPC0_CFG_KERNEL_SRF_7 0xE067DC
+
+#define mmTPC0_CFG_KERNEL_SRF_8 0xE067E0
+
+#define mmTPC0_CFG_KERNEL_SRF_9 0xE067E4
+
+#define mmTPC0_CFG_KERNEL_SRF_10 0xE067E8
+
+#define mmTPC0_CFG_KERNEL_SRF_11 0xE067EC
+
+#define mmTPC0_CFG_KERNEL_SRF_12 0xE067F0
+
+#define mmTPC0_CFG_KERNEL_SRF_13 0xE067F4
+
+#define mmTPC0_CFG_KERNEL_SRF_14 0xE067F8
+
+#define mmTPC0_CFG_KERNEL_SRF_15 0xE067FC
+
+#define mmTPC0_CFG_KERNEL_SRF_16 0xE06800
+
+#define mmTPC0_CFG_KERNEL_SRF_17 0xE06804
+
+#define mmTPC0_CFG_KERNEL_SRF_18 0xE06808
+
+#define mmTPC0_CFG_KERNEL_SRF_19 0xE0680C
+
+#define mmTPC0_CFG_KERNEL_SRF_20 0xE06810
+
+#define mmTPC0_CFG_KERNEL_SRF_21 0xE06814
+
+#define mmTPC0_CFG_KERNEL_SRF_22 0xE06818
+
+#define mmTPC0_CFG_KERNEL_SRF_23 0xE0681C
+
+#define mmTPC0_CFG_KERNEL_SRF_24 0xE06820
+
+#define mmTPC0_CFG_KERNEL_SRF_25 0xE06824
+
+#define mmTPC0_CFG_KERNEL_SRF_26 0xE06828
+
+#define mmTPC0_CFG_KERNEL_SRF_27 0xE0682C
+
+#define mmTPC0_CFG_KERNEL_SRF_28 0xE06830
+
+#define mmTPC0_CFG_KERNEL_SRF_29 0xE06834
+
+#define mmTPC0_CFG_KERNEL_SRF_30 0xE06838
+
+#define mmTPC0_CFG_KERNEL_SRF_31 0xE0683C
+
+#define mmTPC0_CFG_ROUND_CSR 0xE068FC
+
+#define mmTPC0_CFG_PROT 0xE06900
+
+#define mmTPC0_CFG_SEMAPHORE 0xE06908
+
+#define mmTPC0_CFG_VFLAGS 0xE0690C
+
+#define mmTPC0_CFG_SFLAGS 0xE06910
+
+#define mmTPC0_CFG_LFSR_POLYNOM 0xE06918
+
+#define mmTPC0_CFG_STATUS 0xE0691C
+
+#define mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH 0xE06920
+
+#define mmTPC0_CFG_CFG_SUBTRACT_VALUE 0xE06924
+
+#define mmTPC0_CFG_SM_BASE_ADDRESS_HIGH 0xE0692C
+
+#define mmTPC0_CFG_TPC_CMD 0xE06930
+
+#define mmTPC0_CFG_TPC_EXECUTE 0xE06938
+
+#define mmTPC0_CFG_TPC_STALL 0xE0693C
+
+#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW 0xE06940
+
+#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE06944
+
+#define mmTPC0_CFG_RD_RATE_LIMIT 0xE06948
+
+#define mmTPC0_CFG_WR_RATE_LIMIT 0xE06950
+
+#define mmTPC0_CFG_MSS_CONFIG 0xE06954
+
+#define mmTPC0_CFG_TPC_INTR_CAUSE 0xE06958
+
+#define mmTPC0_CFG_TPC_INTR_MASK 0xE0695C
+
+#define mmTPC0_CFG_WQ_CREDITS 0xE06960
+
+#define mmTPC0_CFG_ARUSER_LO 0xE06964
+
+#define mmTPC0_CFG_ARUSER_HI 0xE06968
+
+#define mmTPC0_CFG_AWUSER_LO 0xE0696C
+
+#define mmTPC0_CFG_AWUSER_HI 0xE06970
+
+#define mmTPC0_CFG_OPCODE_EXEC 0xE06974
+
+#define mmTPC0_CFG_LUT_FUNC32_BASE_ADDR_LO 0xE06978
+
+#define mmTPC0_CFG_LUT_FUNC32_BASE_ADDR_HI 0xE0697C
+
+#define mmTPC0_CFG_LUT_FUNC64_BASE_ADDR_LO 0xE06980
+
+#define mmTPC0_CFG_LUT_FUNC64_BASE_ADDR_HI 0xE06984
+
+#define mmTPC0_CFG_LUT_FUNC128_BASE_ADDR_LO 0xE06988
+
+#define mmTPC0_CFG_LUT_FUNC128_BASE_ADDR_HI 0xE0698C
+
+#define mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_LO 0xE06990
+
+#define mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_HI 0xE06994
+
+#define mmTPC0_CFG_TSB_CFG_MAX_SIZE 0xE06998
+
+#define mmTPC0_CFG_TSB_CFG 0xE0699C
+
+#define mmTPC0_CFG_DBGMEM_ADD 0xE069A0
+
+#define mmTPC0_CFG_DBGMEM_DATA_WR 0xE069A4
+
+#define mmTPC0_CFG_DBGMEM_DATA_RD 0xE069A8
+
+#define mmTPC0_CFG_DBGMEM_CTRL 0xE069AC
+
+#define mmTPC0_CFG_DBGMEM_RC 0xE069B0
+
+#define mmTPC0_CFG_TSB_INFLIGHT_CNTR 0xE069B4
+
+#define mmTPC0_CFG_WQ_INFLIGHT_CNTR 0xE069B8
+
+#define mmTPC0_CFG_WQ_LBW_TOTAL_CNTR 0xE069BC
+
+#define mmTPC0_CFG_WQ_HBW_TOTAL_CNTR 0xE069C0
+
+#define mmTPC0_CFG_IRQ_OCCOUPY_CNTR 0xE069C4
+
+#define mmTPC0_CFG_FUNC_MBIST_CNTRL 0xE069D0
+
+#define mmTPC0_CFG_FUNC_MBIST_PAT 0xE069D4
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_0 0xE069D8
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_1 0xE069DC
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_2 0xE069E0
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_3 0xE069E4
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_4 0xE069E8
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_5 0xE069EC
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_6 0xE069F0
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_7 0xE069F4
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_8 0xE069F8
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE069FC
+
+#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE06A00
+
+#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE06A04
+
+#define mmTPC0_CFG_QM_TENSOR_0_PADDING_VALUE 0xE06A08
+
+#define mmTPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE06A0C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE06A10
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE06A14
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE06A18
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE06A1C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE06A20
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE06A24
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE06A28
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE06A2C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE06A30
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE06A34
+
+#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE06A38
+
+#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE06A3C
+
+#define mmTPC0_CFG_QM_TENSOR_1_PADDING_VALUE 0xE06A40
+
+#define mmTPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE06A44
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE06A48
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE06A4C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE06A50
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE06A54
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE06A58
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE06A5C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE06A60
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE06A64
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE06A68
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE06A6C
+
+#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE06A70
+
+#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE06A74
+
+#define mmTPC0_CFG_QM_TENSOR_2_PADDING_VALUE 0xE06A78
+
+#define mmTPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE06A7C
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE06A80
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE06A84
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE06A88
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE06A8C
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE06A90
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE06A94
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE06A98
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE06A9C
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE06AA0
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE06AA4
+
+#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE06AA8
+
+#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE06AAC
+
+#define mmTPC0_CFG_QM_TENSOR_3_PADDING_VALUE 0xE06AB0
+
+#define mmTPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE06AB4
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE06AB8
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE06ABC
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE06AC0
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE06AC4
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE06AC8
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE06ACC
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE06AD0
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE06AD4
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE06AD8
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE06ADC
+
+#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE06AE0
+
+#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE06AE4
+
+#define mmTPC0_CFG_QM_TENSOR_4_PADDING_VALUE 0xE06AE8
+
+#define mmTPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE06AEC
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE06AF0
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE06AF4
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE06AF8
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE06AFC
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE06B00
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE06B04
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE06B08
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE06B0C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE06B10
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE06B14
+
+#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE06B18
+
+#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE06B1C
+
+#define mmTPC0_CFG_QM_TENSOR_5_PADDING_VALUE 0xE06B20
+
+#define mmTPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE06B24
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE06B28
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE06B2C
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE06B30
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE06B34
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE06B38
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE06B3C
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE06B40
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE06B44
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE06B48
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE06B4C
+
+#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE06B50
+
+#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE06B54
+
+#define mmTPC0_CFG_QM_TENSOR_6_PADDING_VALUE 0xE06B58
+
+#define mmTPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE06B5C
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE06B60
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE06B64
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE06B68
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE06B6C
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE06B70
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE06B74
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE06B78
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE06B7C
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE06B80
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE06B84
+
+#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE06B88
+
+#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE06B8C
+
+#define mmTPC0_CFG_QM_TENSOR_7_PADDING_VALUE 0xE06B90
+
+#define mmTPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE06B94
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE06B98
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE06B9C
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE06BA0
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE06BA4
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE06BA8
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE06BAC
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE06BB0
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE06BB4
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE06BB8
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE06BBC
+
+#define mmTPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xE06BC0
+
+#define mmTPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xE06BC4
+
+#define mmTPC0_CFG_QM_TENSOR_8_PADDING_VALUE 0xE06BC8
+
+#define mmTPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xE06BCC
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_0_SIZE 0xE06BD0
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xE06BD4
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_1_SIZE 0xE06BD8
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xE06BDC
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_2_SIZE 0xE06BE0
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xE06BE4
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_3_SIZE 0xE06BE8
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xE06BEC
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_4_SIZE 0xE06BF0
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xE06BF4
+
+#define mmTPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xE06BF8
+
+#define mmTPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xE06BFC
+
+#define mmTPC0_CFG_QM_TENSOR_9_PADDING_VALUE 0xE06C00
+
+#define mmTPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xE06C04
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_0_SIZE 0xE06C08
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xE06C0C
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_1_SIZE 0xE06C10
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xE06C14
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_2_SIZE 0xE06C18
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xE06C1C
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_3_SIZE 0xE06C20
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xE06C24
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_4_SIZE 0xE06C28
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xE06C2C
+
+#define mmTPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xE06C30
+
+#define mmTPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xE06C34
+
+#define mmTPC0_CFG_QM_TENSOR_10_PADDING_VALUE 0xE06C38
+
+#define mmTPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xE06C3C
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_0_SIZE 0xE06C40
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xE06C44
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_1_SIZE 0xE06C48
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xE06C4C
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_2_SIZE 0xE06C50
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xE06C54
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_3_SIZE 0xE06C58
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xE06C5C
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_4_SIZE 0xE06C60
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xE06C64
+
+#define mmTPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xE06C68
+
+#define mmTPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xE06C6C
+
+#define mmTPC0_CFG_QM_TENSOR_11_PADDING_VALUE 0xE06C70
+
+#define mmTPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xE06C74
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_0_SIZE 0xE06C78
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xE06C7C
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_1_SIZE 0xE06C80
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xE06C84
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_2_SIZE 0xE06C88
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xE06C8C
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_3_SIZE 0xE06C90
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xE06C94
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_4_SIZE 0xE06C98
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xE06C9C
+
+#define mmTPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xE06CA0
+
+#define mmTPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xE06CA4
+
+#define mmTPC0_CFG_QM_TENSOR_12_PADDING_VALUE 0xE06CA8
+
+#define mmTPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xE06CAC
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_0_SIZE 0xE06CB0
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xE06CB4
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_1_SIZE 0xE06CB8
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xE06CBC
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_2_SIZE 0xE06CC0
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xE06CC4
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_3_SIZE 0xE06CC8
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xE06CCC
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_4_SIZE 0xE06CD0
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xE06CD4
+
+#define mmTPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xE06CD8
+
+#define mmTPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xE06CDC
+
+#define mmTPC0_CFG_QM_TENSOR_13_PADDING_VALUE 0xE06CE0
+
+#define mmTPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xE06CE4
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_0_SIZE 0xE06CE8
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xE06CEC
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_1_SIZE 0xE06CF0
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xE06CF4
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_2_SIZE 0xE06CF8
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xE06CFC
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_3_SIZE 0xE06D00
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xE06D04
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_4_SIZE 0xE06D08
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xE06D0C
+
+#define mmTPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xE06D10
+
+#define mmTPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xE06D14
+
+#define mmTPC0_CFG_QM_TENSOR_14_PADDING_VALUE 0xE06D18
+
+#define mmTPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xE06D1C
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_0_SIZE 0xE06D20
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xE06D24
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_1_SIZE 0xE06D28
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xE06D2C
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_2_SIZE 0xE06D30
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xE06D34
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_3_SIZE 0xE06D38
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xE06D3C
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_4_SIZE 0xE06D40
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xE06D44
+
+#define mmTPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xE06D48
+
+#define mmTPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xE06D4C
+
+#define mmTPC0_CFG_QM_TENSOR_15_PADDING_VALUE 0xE06D50
+
+#define mmTPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xE06D54
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_0_SIZE 0xE06D58
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xE06D5C
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_1_SIZE 0xE06D60
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xE06D64
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_2_SIZE 0xE06D68
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xE06D6C
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_3_SIZE 0xE06D70
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xE06D74
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_4_SIZE 0xE06D78
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xE06D7C
+
+#define mmTPC0_CFG_QM_SYNC_OBJECT_MESSAGE 0xE06D80
+
+#define mmTPC0_CFG_QM_SYNC_OBJECT_ADDR 0xE06D84
+
+#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE06D88
+
+#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE06D8C
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_0 0xE06D90
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_0 0xE06D94
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_1 0xE06D98
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_1 0xE06D9C
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_2 0xE06DA0
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_2 0xE06DA4
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_3 0xE06DA8
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_3 0xE06DAC
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_4 0xE06DB0
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_4 0xE06DB4
+
+#define mmTPC0_CFG_QM_KERNEL_CONFIG 0xE06DB8
+
+#define mmTPC0_CFG_QM_KERNEL_ID 0xE06DBC
+
+#define mmTPC0_CFG_QM_SRF_0 0xE06DC0
+
+#define mmTPC0_CFG_QM_SRF_1 0xE06DC4
+
+#define mmTPC0_CFG_QM_SRF_2 0xE06DC8
+
+#define mmTPC0_CFG_QM_SRF_3 0xE06DCC
+
+#define mmTPC0_CFG_QM_SRF_4 0xE06DD0
+
+#define mmTPC0_CFG_QM_SRF_5 0xE06DD4
+
+#define mmTPC0_CFG_QM_SRF_6 0xE06DD8
+
+#define mmTPC0_CFG_QM_SRF_7 0xE06DDC
+
+#define mmTPC0_CFG_QM_SRF_8 0xE06DE0
+
+#define mmTPC0_CFG_QM_SRF_9 0xE06DE4
+
+#define mmTPC0_CFG_QM_SRF_10 0xE06DE8
+
+#define mmTPC0_CFG_QM_SRF_11 0xE06DEC
+
+#define mmTPC0_CFG_QM_SRF_12 0xE06DF0
+
+#define mmTPC0_CFG_QM_SRF_13 0xE06DF4
+
+#define mmTPC0_CFG_QM_SRF_14 0xE06DF8
+
+#define mmTPC0_CFG_QM_SRF_15 0xE06DFC
+
+#define mmTPC0_CFG_QM_SRF_16 0xE06E00
+
+#define mmTPC0_CFG_QM_SRF_17 0xE06E04
+
+#define mmTPC0_CFG_QM_SRF_18 0xE06E08
+
+#define mmTPC0_CFG_QM_SRF_19 0xE06E0C
+
+#define mmTPC0_CFG_QM_SRF_20 0xE06E10
+
+#define mmTPC0_CFG_QM_SRF_21 0xE06E14
+
+#define mmTPC0_CFG_QM_SRF_22 0xE06E18
+
+#define mmTPC0_CFG_QM_SRF_23 0xE06E1C
+
+#define mmTPC0_CFG_QM_SRF_24 0xE06E20
+
+#define mmTPC0_CFG_QM_SRF_25 0xE06E24
+
+#define mmTPC0_CFG_QM_SRF_26 0xE06E28
+
+#define mmTPC0_CFG_QM_SRF_27 0xE06E2C
+
+#define mmTPC0_CFG_QM_SRF_28 0xE06E30
+
+#define mmTPC0_CFG_QM_SRF_29 0xE06E34
+
+#define mmTPC0_CFG_QM_SRF_30 0xE06E38
+
+#define mmTPC0_CFG_QM_SRF_31 0xE06E3C
+
+#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
new file mode 100644
index 000000000000..8e71532c6f36
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
@@ -0,0 +1,800 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_QM_MASKS_H_
+#define ASIC_REG_TPC0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* TPC0_QM_GLBL_CFG0 */
+#define TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define TPC0_QM_GLBL_CFG0_PQF_EN_MASK 0xF
+#define TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT 4
+#define TPC0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0
+#define TPC0_QM_GLBL_CFG0_CP_EN_SHIFT 9
+#define TPC0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00
+
+/* TPC0_QM_GLBL_CFG1 */
+#define TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define TPC0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF
+#define TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4
+#define TPC0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0
+#define TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT 9
+#define TPC0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00
+#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16
+#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000
+#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20
+#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000
+#define TPC0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25
+#define TPC0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000
+
+/* TPC0_QM_GLBL_PROT */
+#define TPC0_QM_GLBL_PROT_PQF_SHIFT 0
+#define TPC0_QM_GLBL_PROT_PQF_MASK 0xF
+#define TPC0_QM_GLBL_PROT_CQF_SHIFT 4
+#define TPC0_QM_GLBL_PROT_CQF_MASK 0x1F0
+#define TPC0_QM_GLBL_PROT_CP_SHIFT 9
+#define TPC0_QM_GLBL_PROT_CP_MASK 0x3E00
+#define TPC0_QM_GLBL_PROT_ERR_SHIFT 14
+#define TPC0_QM_GLBL_PROT_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_PROT_ARB_SHIFT 15
+#define TPC0_QM_GLBL_PROT_ARB_MASK 0x8000
+
+/* TPC0_QM_GLBL_ERR_CFG */
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00
+#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16
+#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000
+#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20
+#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000
+#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25
+#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000
+#define TPC0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31
+#define TPC0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000
+
+/* TPC0_QM_GLBL_SECURE_PROPS */
+#define TPC0_QM_GLBL_SECURE_PROPS_0_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_1_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_2_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_3_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_4_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* TPC0_QM_GLBL_NON_SECURE_PROPS */
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* TPC0_QM_GLBL_STS0 */
+#define TPC0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define TPC0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF
+#define TPC0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4
+#define TPC0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0
+#define TPC0_QM_GLBL_STS0_CP_IDLE_SHIFT 9
+#define TPC0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00
+#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16
+#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000
+#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20
+#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000
+#define TPC0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25
+#define TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000
+#define TPC0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31
+#define TPC0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000
+
+/* TPC0_QM_GLBL_STS1 */
+#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_STS1_CP_WREG_ERR_SHIFT 6
+#define TPC0_QM_GLBL_STS1_CP_WREG_ERR_MASK 0x40
+#define TPC0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8
+#define TPC0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100
+#define TPC0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9
+#define TPC0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200
+#define TPC0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10
+#define TPC0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400
+#define TPC0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11
+#define TPC0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800
+#define TPC0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12
+#define TPC0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define TPC0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13
+#define TPC0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define TPC0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14
+#define TPC0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15
+#define TPC0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* TPC0_QM_GLBL_STS1_4 */
+#define TPC0_QM_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_STS1_4_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_STS1_4_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_STS1_4_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_STS1_4_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6
+#define TPC0_QM_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* TPC0_QM_GLBL_MSG_EN */
+#define TPC0_QM_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0
+#define TPC0_QM_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1
+#define TPC0_QM_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6
+#define TPC0_QM_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* TPC0_QM_GLBL_MSG_EN_4 */
+#define TPC0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6
+#define TPC0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* TPC0_QM_PQ_BASE_LO */
+#define TPC0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define TPC0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_BASE_HI */
+#define TPC0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define TPC0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_SIZE */
+#define TPC0_QM_PQ_SIZE_VAL_SHIFT 0
+#define TPC0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PI */
+#define TPC0_QM_PQ_PI_VAL_SHIFT 0
+#define TPC0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_CI */
+#define TPC0_QM_PQ_CI_VAL_SHIFT 0
+#define TPC0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_CFG0 */
+#define TPC0_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_QM_PQ_CFG1 */
+#define TPC0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_ARUSER_31_11 */
+#define TPC0_QM_PQ_ARUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_PQ_STS0 */
+#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_STS1 */
+#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define TPC0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* TPC0_QM_CQ_CFG0 */
+#define TPC0_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_QM_CQ_CFG1 */
+#define TPC0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_ARUSER_31_11 */
+#define TPC0_QM_CQ_ARUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_CQ_STS0 */
+#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_STS1 */
+#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define TPC0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* TPC0_QM_CQ_PTR_LO_0 */
+#define TPC0_QM_CQ_PTR_LO_0_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_0 */
+#define TPC0_QM_CQ_PTR_HI_0_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_0 */
+#define TPC0_QM_CQ_TSIZE_0_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_0 */
+#define TPC0_QM_CQ_CTL_0_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_0_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_0_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_0_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_1 */
+#define TPC0_QM_CQ_PTR_LO_1_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_1 */
+#define TPC0_QM_CQ_PTR_HI_1_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_1 */
+#define TPC0_QM_CQ_TSIZE_1_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_1 */
+#define TPC0_QM_CQ_CTL_1_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_1_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_1_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_1_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_2 */
+#define TPC0_QM_CQ_PTR_LO_2_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_2 */
+#define TPC0_QM_CQ_PTR_HI_2_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_2 */
+#define TPC0_QM_CQ_TSIZE_2_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_2 */
+#define TPC0_QM_CQ_CTL_2_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_2_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_2_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_2_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_3 */
+#define TPC0_QM_CQ_PTR_LO_3_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_3 */
+#define TPC0_QM_CQ_PTR_HI_3_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_3 */
+#define TPC0_QM_CQ_TSIZE_3_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_3 */
+#define TPC0_QM_CQ_CTL_3_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_3_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_3_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_3_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_4 */
+#define TPC0_QM_CQ_PTR_LO_4_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_4 */
+#define TPC0_QM_CQ_PTR_HI_4_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_4 */
+#define TPC0_QM_CQ_TSIZE_4_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_4 */
+#define TPC0_QM_CQ_CTL_4_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_4_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_4_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_4_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_STS */
+#define TPC0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_STS */
+#define TPC0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_STS */
+#define TPC0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_STS */
+#define TPC0_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_IFIFO_CNT */
+#define TPC0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define TPC0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* TPC0_QM_CP_MSG_BASE0_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE0_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE1_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE1_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE2_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE2_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE3_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE3_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_TSIZE_OFFSET */
+#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_FENCE0_RDATA */
+#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE1_RDATA */
+#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE2_RDATA */
+#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE3_RDATA */
+#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE0_CNT */
+#define TPC0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF
+
+/* TPC0_QM_CP_FENCE1_CNT */
+#define TPC0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF
+
+/* TPC0_QM_CP_FENCE2_CNT */
+#define TPC0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF
+
+/* TPC0_QM_CP_FENCE3_CNT */
+#define TPC0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF
+
+/* TPC0_QM_CP_STS */
+#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_CP_STS_ERDY_SHIFT 16
+#define TPC0_QM_CP_STS_ERDY_MASK 0x10000
+#define TPC0_QM_CP_STS_RRDY_SHIFT 17
+#define TPC0_QM_CP_STS_RRDY_MASK 0x20000
+#define TPC0_QM_CP_STS_MRDY_SHIFT 18
+#define TPC0_QM_CP_STS_MRDY_MASK 0x40000
+#define TPC0_QM_CP_STS_SW_STOP_SHIFT 19
+#define TPC0_QM_CP_STS_SW_STOP_MASK 0x80000
+#define TPC0_QM_CP_STS_FENCE_ID_SHIFT 20
+#define TPC0_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* TPC0_QM_CP_CURRENT_INST_LO */
+#define TPC0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_CURRENT_INST_HI */
+#define TPC0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_BARRIER_CFG */
+#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+#define TPC0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16
+#define TPC0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000
+
+/* TPC0_QM_CP_DBG_0 */
+#define TPC0_QM_CP_DBG_0_CS_SHIFT 0
+#define TPC0_QM_CP_DBG_0_CS_MASK 0xF
+#define TPC0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4
+#define TPC0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10
+#define TPC0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5
+#define TPC0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20
+#define TPC0_QM_CP_DBG_0_MREB_STALL_SHIFT 6
+#define TPC0_QM_CP_DBG_0_MREB_STALL_MASK 0x40
+#define TPC0_QM_CP_DBG_0_STALL_SHIFT 7
+#define TPC0_QM_CP_DBG_0_STALL_MASK 0x80
+
+/* TPC0_QM_CP_ARUSER_31_11 */
+#define TPC0_QM_CP_ARUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_CP_AWUSER_31_11 */
+#define TPC0_QM_CP_AWUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_ARB_CFG_0 */
+#define TPC0_QM_ARB_CFG_0_TYPE_SHIFT 0
+#define TPC0_QM_ARB_CFG_0_TYPE_MASK 0x1
+#define TPC0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4
+#define TPC0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10
+#define TPC0_QM_ARB_CFG_0_EN_SHIFT 8
+#define TPC0_QM_ARB_CFG_0_EN_MASK 0x100
+#define TPC0_QM_ARB_CFG_0_MASK_SHIFT 12
+#define TPC0_QM_ARB_CFG_0_MASK_MASK 0xF000
+#define TPC0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16
+#define TPC0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000
+
+/* TPC0_QM_ARB_CHOISE_Q_PUSH */
+#define TPC0_QM_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0
+#define TPC0_QM_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3
+
+/* TPC0_QM_ARB_WRR_WEIGHT */
+#define TPC0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0
+#define TPC0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_CFG_1 */
+#define TPC0_QM_ARB_CFG_1_CLR_SHIFT 0
+#define TPC0_QM_ARB_CFG_1_CLR_MASK 0x1
+
+/* TPC0_QM_ARB_MST_AVAIL_CRED */
+#define TPC0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F
+
+/* TPC0_QM_ARB_MST_CRED_INC */
+#define TPC0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_MST_CHOISE_PUSH_OFST */
+#define TPC0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST */
+#define TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0
+#define TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_MST_SLAVE_EN */
+#define TPC0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_MST_QUIET_PER */
+#define TPC0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_SLV_CHOISE_WDT */
+#define TPC0_QM_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0
+#define TPC0_QM_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_SLV_ID */
+#define TPC0_QM_ARB_SLV_ID_VAL_SHIFT 0
+#define TPC0_QM_ARB_SLV_ID_VAL_MASK 0x1F
+
+/* TPC0_QM_ARB_MSG_MAX_INFLIGHT */
+#define TPC0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0
+#define TPC0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F
+
+/* TPC0_QM_ARB_MSG_AWUSER_31_11 */
+#define TPC0_QM_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_ARB_MSG_AWUSER_SEC_PROP */
+#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0
+#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF
+#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10
+#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400
+
+/* TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP */
+#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0
+#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF
+#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10
+#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400
+
+/* TPC0_QM_ARB_BASE_LO */
+#define TPC0_QM_ARB_BASE_LO_VAL_SHIFT 0
+#define TPC0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_BASE_HI */
+#define TPC0_QM_ARB_BASE_HI_VAL_SHIFT 0
+#define TPC0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_STATE_STS */
+#define TPC0_QM_ARB_STATE_STS_VAL_SHIFT 0
+#define TPC0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_CHOISE_FULLNESS_STS */
+#define TPC0_QM_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0
+#define TPC0_QM_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F
+
+/* TPC0_QM_ARB_MSG_STS */
+#define TPC0_QM_ARB_MSG_STS_FULL_SHIFT 0
+#define TPC0_QM_ARB_MSG_STS_FULL_MASK 0x1
+#define TPC0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1
+#define TPC0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2
+
+/* TPC0_QM_ARB_SLV_CHOISE_Q_HEAD */
+#define TPC0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0
+#define TPC0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3
+
+/* TPC0_QM_ARB_ERR_CAUSE */
+#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0
+#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1
+#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1
+#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2
+#define TPC0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2
+#define TPC0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4
+
+/* TPC0_QM_ARB_ERR_MSG_EN */
+#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0
+#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
+#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1
+#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
+#define TPC0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2
+#define TPC0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+/* TPC0_QM_ARB_ERR_STS_DRP */
+#define TPC0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0
+#define TPC0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3
+
+/* TPC0_QM_ARB_MST_CRED_STS */
+#define TPC0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F
+
+/* TPC0_QM_CGM_CFG */
+#define TPC0_QM_CGM_CFG_IDLE_TH_SHIFT 0
+#define TPC0_QM_CGM_CFG_IDLE_TH_MASK 0xFFF
+#define TPC0_QM_CGM_CFG_G2F_TH_SHIFT 16
+#define TPC0_QM_CGM_CFG_G2F_TH_MASK 0xFF0000
+#define TPC0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT 24
+#define TPC0_QM_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000
+#define TPC0_QM_CGM_CFG_EN_SHIFT 31
+#define TPC0_QM_CGM_CFG_EN_MASK 0x80000000
+
+/* TPC0_QM_CGM_STS */
+#define TPC0_QM_CGM_STS_ST_SHIFT 0
+#define TPC0_QM_CGM_STS_ST_MASK 0x3
+#define TPC0_QM_CGM_STS_CG_SHIFT 4
+#define TPC0_QM_CGM_STS_CG_MASK 0x10
+#define TPC0_QM_CGM_STS_AGENT_IDLE_SHIFT 8
+#define TPC0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
+#define TPC0_QM_CGM_STS_AXI_IDLE_SHIFT 9
+#define TPC0_QM_CGM_STS_AXI_IDLE_MASK 0x200
+#define TPC0_QM_CGM_STS_CP_IDLE_SHIFT 10
+#define TPC0_QM_CGM_STS_CP_IDLE_MASK 0x400
+
+/* TPC0_QM_CGM_CFG1 */
+#define TPC0_QM_CGM_CFG1_MASK_TH_SHIFT 0
+#define TPC0_QM_CGM_CFG1_MASK_TH_MASK 0xFF
+
+/* TPC0_QM_LOCAL_RANGE_BASE */
+#define TPC0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0
+#define TPC0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF
+
+/* TPC0_QM_LOCAL_RANGE_SIZE */
+#define TPC0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0
+#define TPC0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF
+
+/* TPC0_QM_CSMR_STRICT_PRIO_CFG */
+#define TPC0_QM_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0
+#define TPC0_QM_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1
+
+/* TPC0_QM_HBW_RD_RATE_LIM_CFG_1 */
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* TPC0_QM_LBW_WR_RATE_LIM_CFG_0 */
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* TPC0_QM_LBW_WR_RATE_LIM_CFG_1 */
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* TPC0_QM_HBW_RD_RATE_LIM_CFG_0 */
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* TPC0_QM_GLBL_AXCACHE */
+#define TPC0_QM_GLBL_AXCACHE_AR_SHIFT 0
+#define TPC0_QM_GLBL_AXCACHE_AR_MASK 0xF
+#define TPC0_QM_GLBL_AXCACHE_AW_SHIFT 16
+#define TPC0_QM_GLBL_AXCACHE_AW_MASK 0xF0000
+
+/* TPC0_QM_IND_GW_APB_CFG */
+#define TPC0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0
+#define TPC0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF
+#define TPC0_QM_IND_GW_APB_CFG_CMD_SHIFT 31
+#define TPC0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000
+
+/* TPC0_QM_IND_GW_APB_WDATA */
+#define TPC0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0
+#define TPC0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_IND_GW_APB_RDATA */
+#define TPC0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0
+#define TPC0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_IND_GW_APB_STATUS */
+#define TPC0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0
+#define TPC0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1
+#define TPC0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1
+#define TPC0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2
+
+/* TPC0_QM_GLBL_ERR_ADDR_LO */
+#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_ERR_ADDR_HI */
+#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_ERR_WDATA */
+#define TPC0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_MEM_INIT_BUSY */
+#define TPC0_QM_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0
+#define TPC0_QM_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF
+
+#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
new file mode 100644
index 000000000000..f9e310ab6df2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_QM_REGS_H_
+#define ASIC_REG_TPC0_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC0_QM_GLBL_CFG0 0xE08000
+
+#define mmTPC0_QM_GLBL_CFG1 0xE08004
+
+#define mmTPC0_QM_GLBL_PROT 0xE08008
+
+#define mmTPC0_QM_GLBL_ERR_CFG 0xE0800C
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_0 0xE08010
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_1 0xE08014
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_2 0xE08018
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_3 0xE0801C
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_4 0xE08020
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_0 0xE08024
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_1 0xE08028
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_2 0xE0802C
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_3 0xE08030
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_4 0xE08034
+
+#define mmTPC0_QM_GLBL_STS0 0xE08038
+
+#define mmTPC0_QM_GLBL_STS1_0 0xE08040
+
+#define mmTPC0_QM_GLBL_STS1_1 0xE08044
+
+#define mmTPC0_QM_GLBL_STS1_2 0xE08048
+
+#define mmTPC0_QM_GLBL_STS1_3 0xE0804C
+
+#define mmTPC0_QM_GLBL_STS1_4 0xE08050
+
+#define mmTPC0_QM_GLBL_MSG_EN_0 0xE08054
+
+#define mmTPC0_QM_GLBL_MSG_EN_1 0xE08058
+
+#define mmTPC0_QM_GLBL_MSG_EN_2 0xE0805C
+
+#define mmTPC0_QM_GLBL_MSG_EN_3 0xE08060
+
+#define mmTPC0_QM_GLBL_MSG_EN_4 0xE08068
+
+#define mmTPC0_QM_PQ_BASE_LO_0 0xE08070
+
+#define mmTPC0_QM_PQ_BASE_LO_1 0xE08074
+
+#define mmTPC0_QM_PQ_BASE_LO_2 0xE08078
+
+#define mmTPC0_QM_PQ_BASE_LO_3 0xE0807C
+
+#define mmTPC0_QM_PQ_BASE_HI_0 0xE08080
+
+#define mmTPC0_QM_PQ_BASE_HI_1 0xE08084
+
+#define mmTPC0_QM_PQ_BASE_HI_2 0xE08088
+
+#define mmTPC0_QM_PQ_BASE_HI_3 0xE0808C
+
+#define mmTPC0_QM_PQ_SIZE_0 0xE08090
+
+#define mmTPC0_QM_PQ_SIZE_1 0xE08094
+
+#define mmTPC0_QM_PQ_SIZE_2 0xE08098
+
+#define mmTPC0_QM_PQ_SIZE_3 0xE0809C
+
+#define mmTPC0_QM_PQ_PI_0 0xE080A0
+
+#define mmTPC0_QM_PQ_PI_1 0xE080A4
+
+#define mmTPC0_QM_PQ_PI_2 0xE080A8
+
+#define mmTPC0_QM_PQ_PI_3 0xE080AC
+
+#define mmTPC0_QM_PQ_CI_0 0xE080B0
+
+#define mmTPC0_QM_PQ_CI_1 0xE080B4
+
+#define mmTPC0_QM_PQ_CI_2 0xE080B8
+
+#define mmTPC0_QM_PQ_CI_3 0xE080BC
+
+#define mmTPC0_QM_PQ_CFG0_0 0xE080C0
+
+#define mmTPC0_QM_PQ_CFG0_1 0xE080C4
+
+#define mmTPC0_QM_PQ_CFG0_2 0xE080C8
+
+#define mmTPC0_QM_PQ_CFG0_3 0xE080CC
+
+#define mmTPC0_QM_PQ_CFG1_0 0xE080D0
+
+#define mmTPC0_QM_PQ_CFG1_1 0xE080D4
+
+#define mmTPC0_QM_PQ_CFG1_2 0xE080D8
+
+#define mmTPC0_QM_PQ_CFG1_3 0xE080DC
+
+#define mmTPC0_QM_PQ_ARUSER_31_11_0 0xE080E0
+
+#define mmTPC0_QM_PQ_ARUSER_31_11_1 0xE080E4
+
+#define mmTPC0_QM_PQ_ARUSER_31_11_2 0xE080E8
+
+#define mmTPC0_QM_PQ_ARUSER_31_11_3 0xE080EC
+
+#define mmTPC0_QM_PQ_STS0_0 0xE080F0
+
+#define mmTPC0_QM_PQ_STS0_1 0xE080F4
+
+#define mmTPC0_QM_PQ_STS0_2 0xE080F8
+
+#define mmTPC0_QM_PQ_STS0_3 0xE080FC
+
+#define mmTPC0_QM_PQ_STS1_0 0xE08100
+
+#define mmTPC0_QM_PQ_STS1_1 0xE08104
+
+#define mmTPC0_QM_PQ_STS1_2 0xE08108
+
+#define mmTPC0_QM_PQ_STS1_3 0xE0810C
+
+#define mmTPC0_QM_CQ_CFG0_0 0xE08110
+
+#define mmTPC0_QM_CQ_CFG0_1 0xE08114
+
+#define mmTPC0_QM_CQ_CFG0_2 0xE08118
+
+#define mmTPC0_QM_CQ_CFG0_3 0xE0811C
+
+#define mmTPC0_QM_CQ_CFG0_4 0xE08120
+
+#define mmTPC0_QM_CQ_CFG1_0 0xE08124
+
+#define mmTPC0_QM_CQ_CFG1_1 0xE08128
+
+#define mmTPC0_QM_CQ_CFG1_2 0xE0812C
+
+#define mmTPC0_QM_CQ_CFG1_3 0xE08130
+
+#define mmTPC0_QM_CQ_CFG1_4 0xE08134
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_0 0xE08138
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_1 0xE0813C
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_2 0xE08140
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_3 0xE08144
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_4 0xE08148
+
+#define mmTPC0_QM_CQ_STS0_0 0xE0814C
+
+#define mmTPC0_QM_CQ_STS0_1 0xE08150
+
+#define mmTPC0_QM_CQ_STS0_2 0xE08154
+
+#define mmTPC0_QM_CQ_STS0_3 0xE08158
+
+#define mmTPC0_QM_CQ_STS0_4 0xE0815C
+
+#define mmTPC0_QM_CQ_STS1_0 0xE08160
+
+#define mmTPC0_QM_CQ_STS1_1 0xE08164
+
+#define mmTPC0_QM_CQ_STS1_2 0xE08168
+
+#define mmTPC0_QM_CQ_STS1_3 0xE0816C
+
+#define mmTPC0_QM_CQ_STS1_4 0xE08170
+
+#define mmTPC0_QM_CQ_PTR_LO_0 0xE08174
+
+#define mmTPC0_QM_CQ_PTR_HI_0 0xE08178
+
+#define mmTPC0_QM_CQ_TSIZE_0 0xE0817C
+
+#define mmTPC0_QM_CQ_CTL_0 0xE08180
+
+#define mmTPC0_QM_CQ_PTR_LO_1 0xE08184
+
+#define mmTPC0_QM_CQ_PTR_HI_1 0xE08188
+
+#define mmTPC0_QM_CQ_TSIZE_1 0xE0818C
+
+#define mmTPC0_QM_CQ_CTL_1 0xE08190
+
+#define mmTPC0_QM_CQ_PTR_LO_2 0xE08194
+
+#define mmTPC0_QM_CQ_PTR_HI_2 0xE08198
+
+#define mmTPC0_QM_CQ_TSIZE_2 0xE0819C
+
+#define mmTPC0_QM_CQ_CTL_2 0xE081A0
+
+#define mmTPC0_QM_CQ_PTR_LO_3 0xE081A4
+
+#define mmTPC0_QM_CQ_PTR_HI_3 0xE081A8
+
+#define mmTPC0_QM_CQ_TSIZE_3 0xE081AC
+
+#define mmTPC0_QM_CQ_CTL_3 0xE081B0
+
+#define mmTPC0_QM_CQ_PTR_LO_4 0xE081B4
+
+#define mmTPC0_QM_CQ_PTR_HI_4 0xE081B8
+
+#define mmTPC0_QM_CQ_TSIZE_4 0xE081BC
+
+#define mmTPC0_QM_CQ_CTL_4 0xE081C0
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_0 0xE081C4
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_1 0xE081C8
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_2 0xE081CC
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_3 0xE081D0
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_4 0xE081D4
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_0 0xE081D8
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_1 0xE081DC
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_2 0xE081E0
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_3 0xE081E4
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_4 0xE081E8
+
+#define mmTPC0_QM_CQ_TSIZE_STS_0 0xE081EC
+
+#define mmTPC0_QM_CQ_TSIZE_STS_1 0xE081F0
+
+#define mmTPC0_QM_CQ_TSIZE_STS_2 0xE081F4
+
+#define mmTPC0_QM_CQ_TSIZE_STS_3 0xE081F8
+
+#define mmTPC0_QM_CQ_TSIZE_STS_4 0xE081FC
+
+#define mmTPC0_QM_CQ_CTL_STS_0 0xE08200
+
+#define mmTPC0_QM_CQ_CTL_STS_1 0xE08204
+
+#define mmTPC0_QM_CQ_CTL_STS_2 0xE08208
+
+#define mmTPC0_QM_CQ_CTL_STS_3 0xE0820C
+
+#define mmTPC0_QM_CQ_CTL_STS_4 0xE08210
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_0 0xE08214
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_1 0xE08218
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_2 0xE0821C
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_3 0xE08220
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_4 0xE08224
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 0xE08228
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_1 0xE0822C
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_2 0xE08230
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_3 0xE08234
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_4 0xE08238
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 0xE0823C
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_1 0xE08240
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_2 0xE08244
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_3 0xE08248
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_4 0xE0824C
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 0xE08250
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_1 0xE08254
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_2 0xE08258
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_3 0xE0825C
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_4 0xE08260
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 0xE08264
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_1 0xE08268
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_2 0xE0826C
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_3 0xE08270
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_4 0xE08274
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 0xE08278
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_1 0xE0827C
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 0xE08280
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_3 0xE08284
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_4 0xE08288
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 0xE0828C
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_1 0xE08290
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_2 0xE08294
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_3 0xE08298
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_4 0xE0829C
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 0xE082A0
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_1 0xE082A4
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_2 0xE082A8
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_3 0xE082AC
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_4 0xE082B0
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 0xE082B4
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_1 0xE082B8
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_2 0xE082BC
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_3 0xE082C0
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_4 0xE082C4
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 0xE082C8
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_1 0xE082CC
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_2 0xE082D0
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_3 0xE082D4
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_4 0xE082D8
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xE082E0
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xE082E4
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xE082E8
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xE082EC
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xE082F0
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xE082F4
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xE082F8
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xE082FC
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xE08300
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xE08304
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_0 0xE08308
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_1 0xE0830C
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_2 0xE08310
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_3 0xE08314
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_4 0xE08318
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_0 0xE0831C
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_1 0xE08320
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_2 0xE08324
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_3 0xE08328
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_4 0xE0832C
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_0 0xE08330
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_1 0xE08334
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_2 0xE08338
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_3 0xE0833C
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_4 0xE08340
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_0 0xE08344
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_1 0xE08348
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_2 0xE0834C
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_3 0xE08350
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_4 0xE08354
+
+#define mmTPC0_QM_CP_FENCE0_CNT_0 0xE08358
+
+#define mmTPC0_QM_CP_FENCE0_CNT_1 0xE0835C
+
+#define mmTPC0_QM_CP_FENCE0_CNT_2 0xE08360
+
+#define mmTPC0_QM_CP_FENCE0_CNT_3 0xE08364
+
+#define mmTPC0_QM_CP_FENCE0_CNT_4 0xE08368
+
+#define mmTPC0_QM_CP_FENCE1_CNT_0 0xE0836C
+
+#define mmTPC0_QM_CP_FENCE1_CNT_1 0xE08370
+
+#define mmTPC0_QM_CP_FENCE1_CNT_2 0xE08374
+
+#define mmTPC0_QM_CP_FENCE1_CNT_3 0xE08378
+
+#define mmTPC0_QM_CP_FENCE1_CNT_4 0xE0837C
+
+#define mmTPC0_QM_CP_FENCE2_CNT_0 0xE08380
+
+#define mmTPC0_QM_CP_FENCE2_CNT_1 0xE08384
+
+#define mmTPC0_QM_CP_FENCE2_CNT_2 0xE08388
+
+#define mmTPC0_QM_CP_FENCE2_CNT_3 0xE0838C
+
+#define mmTPC0_QM_CP_FENCE2_CNT_4 0xE08390
+
+#define mmTPC0_QM_CP_FENCE3_CNT_0 0xE08394
+
+#define mmTPC0_QM_CP_FENCE3_CNT_1 0xE08398
+
+#define mmTPC0_QM_CP_FENCE3_CNT_2 0xE0839C
+
+#define mmTPC0_QM_CP_FENCE3_CNT_3 0xE083A0
+
+#define mmTPC0_QM_CP_FENCE3_CNT_4 0xE083A4
+
+#define mmTPC0_QM_CP_STS_0 0xE083A8
+
+#define mmTPC0_QM_CP_STS_1 0xE083AC
+
+#define mmTPC0_QM_CP_STS_2 0xE083B0
+
+#define mmTPC0_QM_CP_STS_3 0xE083B4
+
+#define mmTPC0_QM_CP_STS_4 0xE083B8
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_0 0xE083BC
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_1 0xE083C0
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_2 0xE083C4
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_3 0xE083C8
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_4 0xE083CC
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_0 0xE083D0
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_1 0xE083D4
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_2 0xE083D8
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_3 0xE083DC
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_4 0xE083E0
+
+#define mmTPC0_QM_CP_BARRIER_CFG_0 0xE083F4
+
+#define mmTPC0_QM_CP_BARRIER_CFG_1 0xE083F8
+
+#define mmTPC0_QM_CP_BARRIER_CFG_2 0xE083FC
+
+#define mmTPC0_QM_CP_BARRIER_CFG_3 0xE08400
+
+#define mmTPC0_QM_CP_BARRIER_CFG_4 0xE08404
+
+#define mmTPC0_QM_CP_DBG_0_0 0xE08408
+
+#define mmTPC0_QM_CP_DBG_0_1 0xE0840C
+
+#define mmTPC0_QM_CP_DBG_0_2 0xE08410
+
+#define mmTPC0_QM_CP_DBG_0_3 0xE08414
+
+#define mmTPC0_QM_CP_DBG_0_4 0xE08418
+
+#define mmTPC0_QM_CP_ARUSER_31_11_0 0xE0841C
+
+#define mmTPC0_QM_CP_ARUSER_31_11_1 0xE08420
+
+#define mmTPC0_QM_CP_ARUSER_31_11_2 0xE08424
+
+#define mmTPC0_QM_CP_ARUSER_31_11_3 0xE08428
+
+#define mmTPC0_QM_CP_ARUSER_31_11_4 0xE0842C
+
+#define mmTPC0_QM_CP_AWUSER_31_11_0 0xE08430
+
+#define mmTPC0_QM_CP_AWUSER_31_11_1 0xE08434
+
+#define mmTPC0_QM_CP_AWUSER_31_11_2 0xE08438
+
+#define mmTPC0_QM_CP_AWUSER_31_11_3 0xE0843C
+
+#define mmTPC0_QM_CP_AWUSER_31_11_4 0xE08440
+
+#define mmTPC0_QM_ARB_CFG_0 0xE08A00
+
+#define mmTPC0_QM_ARB_CHOISE_Q_PUSH 0xE08A04
+
+#define mmTPC0_QM_ARB_WRR_WEIGHT_0 0xE08A08
+
+#define mmTPC0_QM_ARB_WRR_WEIGHT_1 0xE08A0C
+
+#define mmTPC0_QM_ARB_WRR_WEIGHT_2 0xE08A10
+
+#define mmTPC0_QM_ARB_WRR_WEIGHT_3 0xE08A14
+
+#define mmTPC0_QM_ARB_CFG_1 0xE08A18
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_0 0xE08A20
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_1 0xE08A24
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_2 0xE08A28
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_3 0xE08A2C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_4 0xE08A30
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_5 0xE08A34
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_6 0xE08A38
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_7 0xE08A3C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_8 0xE08A40
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_9 0xE08A44
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_10 0xE08A48
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_11 0xE08A4C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_12 0xE08A50
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_13 0xE08A54
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_14 0xE08A58
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_15 0xE08A5C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_16 0xE08A60
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_17 0xE08A64
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_18 0xE08A68
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_19 0xE08A6C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_20 0xE08A70
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_21 0xE08A74
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_22 0xE08A78
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_23 0xE08A7C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_24 0xE08A80
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_25 0xE08A84
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_26 0xE08A88
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_27 0xE08A8C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_28 0xE08A90
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_29 0xE08A94
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_30 0xE08A98
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_31 0xE08A9C
+
+#define mmTPC0_QM_ARB_MST_CRED_INC 0xE08AA0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xE08AA4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xE08AA8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xE08AAC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xE08AB0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xE08AB4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xE08AB8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xE08ABC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xE08AC0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xE08AC4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xE08AC8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xE08ACC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xE08AD0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xE08AD4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xE08AD8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xE08ADC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xE08AE0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xE08AE4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xE08AE8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xE08AEC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xE08AF0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xE08AF4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xE08AF8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xE08AFC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xE08B00
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xE08B04
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xE08B08
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xE08B0C
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xE08B10
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xE08B14
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xE08B18
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xE08B1C
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xE08B20
+
+#define mmTPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xE08B28
+
+#define mmTPC0_QM_ARB_MST_SLAVE_EN 0xE08B2C
+
+#define mmTPC0_QM_ARB_MST_QUIET_PER 0xE08B34
+
+#define mmTPC0_QM_ARB_SLV_CHOISE_WDT 0xE08B38
+
+#define mmTPC0_QM_ARB_SLV_ID 0xE08B3C
+
+#define mmTPC0_QM_ARB_MSG_MAX_INFLIGHT 0xE08B44
+
+#define mmTPC0_QM_ARB_MSG_AWUSER_31_11 0xE08B48
+
+#define mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP 0xE08B4C
+
+#define mmTPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xE08B50
+
+#define mmTPC0_QM_ARB_BASE_LO 0xE08B54
+
+#define mmTPC0_QM_ARB_BASE_HI 0xE08B58
+
+#define mmTPC0_QM_ARB_STATE_STS 0xE08B80
+
+#define mmTPC0_QM_ARB_CHOISE_FULLNESS_STS 0xE08B84
+
+#define mmTPC0_QM_ARB_MSG_STS 0xE08B88
+
+#define mmTPC0_QM_ARB_SLV_CHOISE_Q_HEAD 0xE08B8C
+
+#define mmTPC0_QM_ARB_ERR_CAUSE 0xE08B9C
+
+#define mmTPC0_QM_ARB_ERR_MSG_EN 0xE08BA0
+
+#define mmTPC0_QM_ARB_ERR_STS_DRP 0xE08BA8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_0 0xE08BB0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_1 0xE08BB4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_2 0xE08BB8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_3 0xE08BBC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_4 0xE08BC0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_5 0xE08BC4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_6 0xE08BC8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_7 0xE08BCC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_8 0xE08BD0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_9 0xE08BD4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_10 0xE08BD8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_11 0xE08BDC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_12 0xE08BE0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_13 0xE08BE4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_14 0xE08BE8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_15 0xE08BEC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_16 0xE08BF0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_17 0xE08BF4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_18 0xE08BF8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_19 0xE08BFC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_20 0xE08C00
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_21 0xE08C04
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_22 0xE08C08
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_23 0xE08C0C
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_24 0xE08C10
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_25 0xE08C14
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_26 0xE08C18
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_27 0xE08C1C
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_28 0xE08C20
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_29 0xE08C24
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_30 0xE08C28
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_31 0xE08C2C
+
+#define mmTPC0_QM_CGM_CFG 0xE08C70
+
+#define mmTPC0_QM_CGM_STS 0xE08C74
+
+#define mmTPC0_QM_CGM_CFG1 0xE08C78
+
+#define mmTPC0_QM_LOCAL_RANGE_BASE 0xE08C80
+
+#define mmTPC0_QM_LOCAL_RANGE_SIZE 0xE08C84
+
+#define mmTPC0_QM_CSMR_STRICT_PRIO_CFG 0xE08C90
+
+#define mmTPC0_QM_HBW_RD_RATE_LIM_CFG_1 0xE08C94
+
+#define mmTPC0_QM_LBW_WR_RATE_LIM_CFG_0 0xE08C98
+
+#define mmTPC0_QM_LBW_WR_RATE_LIM_CFG_1 0xE08C9C
+
+#define mmTPC0_QM_HBW_RD_RATE_LIM_CFG_0 0xE08CA0
+
+#define mmTPC0_QM_GLBL_AXCACHE 0xE08CA4
+
+#define mmTPC0_QM_IND_GW_APB_CFG 0xE08CB0
+
+#define mmTPC0_QM_IND_GW_APB_WDATA 0xE08CB4
+
+#define mmTPC0_QM_IND_GW_APB_RDATA 0xE08CB8
+
+#define mmTPC0_QM_IND_GW_APB_STATUS 0xE08CBC
+
+#define mmTPC0_QM_GLBL_ERR_ADDR_LO 0xE08CD0
+
+#define mmTPC0_QM_GLBL_ERR_ADDR_HI 0xE08CD4
+
+#define mmTPC0_QM_GLBL_ERR_WDATA 0xE08CD8
+
+#define mmTPC0_QM_GLBL_MEM_INIT_BUSY 0xE08D00
+
+#endif /* ASIC_REG_TPC0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
new file mode 100644
index 000000000000..6736c476d979
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_CFG_REGS_H_
+#define ASIC_REG_TPC1_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE46400
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE46404
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE46408
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE4640C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE46410
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE46414
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE46418
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE4641C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE46420
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE46424
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE46428
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE4642C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE46430
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE46434
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE46438
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE4643C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE46440
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE46444
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE46448
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE4644C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE46450
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE46454
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE46458
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE4645C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE46460
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE46464
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE46468
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE4646C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE46470
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE46474
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE46478
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE4647C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE46480
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE46484
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE46488
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE4648C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE46490
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE46494
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE46498
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE4649C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE464A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE464A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE464A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE464AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE464B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE464B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE464B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE464BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE464C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE464C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE464C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE464CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE464D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE464D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE464D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE464DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE464E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE464E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE464E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE464EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE464F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE464F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE464F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE464FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE46500
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE46504
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE46508
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE4650C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE46510
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE46514
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE46518
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE4651C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE46520
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE46524
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE46528
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE4652C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE46530
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE46534
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE46538
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE4653C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE46540
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE46544
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE46548
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE4654C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE46550
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE46554
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE46558
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE4655C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE46560
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE46564
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE46568
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE4656C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE46570
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE46574
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE46578
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE4657C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE46580
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE46584
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE46588
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE4658C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE46590
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE46594
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE46598
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE4659C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE465A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE465A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE465A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE465AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE465B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE465B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE465B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE465BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xE465C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xE465C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xE465C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xE465CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xE465D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xE465D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xE465D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xE465DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xE465E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xE465E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xE465E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xE465EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xE465F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xE465F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xE465F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xE465FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xE46600
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xE46604
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xE46608
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xE4660C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xE46610
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xE46614
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xE46618
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xE4661C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xE46620
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xE46624
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xE46628
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xE4662C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xE46630
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xE46634
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xE46638
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xE4663C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xE46640
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xE46644
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xE46648
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xE4664C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xE46650
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xE46654
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xE46658
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xE4665C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xE46660
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xE46664
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xE46668
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xE4666C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xE46670
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xE46674
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xE46678
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xE4667C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xE46680
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xE46684
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xE46688
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xE4668C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xE46690
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xE46694
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xE46698
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xE4669C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xE466A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xE466A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xE466A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xE466AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xE466B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xE466B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xE466B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xE466BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xE466C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xE466C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xE466C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xE466CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xE466D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xE466D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xE466D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xE466DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xE466E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xE466E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xE466E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xE466EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xE466F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xE466F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xE466F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xE466FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xE46700
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xE46704
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xE46708
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xE4670C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xE46710
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xE46714
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xE46718
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xE4671C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xE46720
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xE46724
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xE46728
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xE4672C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xE46730
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xE46734
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xE46738
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xE4673C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xE46740
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xE46744
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xE46748
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xE4674C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xE46750
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xE46754
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xE46758
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xE4675C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xE46760
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xE46764
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xE46768
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xE4676C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xE46770
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xE46774
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xE46778
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xE4677C
+
+#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE46780
+
+#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_ADDR 0xE46784
+
+#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE46788
+
+#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE4678C
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_0 0xE46790
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_0 0xE46794
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_1 0xE46798
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_1 0xE4679C
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_2 0xE467A0
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_2 0xE467A4
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_3 0xE467A8
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_3 0xE467AC
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_4 0xE467B0
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_4 0xE467B4
+
+#define mmTPC1_CFG_KERNEL_KERNEL_CONFIG 0xE467B8
+
+#define mmTPC1_CFG_KERNEL_KERNEL_ID 0xE467BC
+
+#define mmTPC1_CFG_KERNEL_SRF_0 0xE467C0
+
+#define mmTPC1_CFG_KERNEL_SRF_1 0xE467C4
+
+#define mmTPC1_CFG_KERNEL_SRF_2 0xE467C8
+
+#define mmTPC1_CFG_KERNEL_SRF_3 0xE467CC
+
+#define mmTPC1_CFG_KERNEL_SRF_4 0xE467D0
+
+#define mmTPC1_CFG_KERNEL_SRF_5 0xE467D4
+
+#define mmTPC1_CFG_KERNEL_SRF_6 0xE467D8
+
+#define mmTPC1_CFG_KERNEL_SRF_7 0xE467DC
+
+#define mmTPC1_CFG_KERNEL_SRF_8 0xE467E0
+
+#define mmTPC1_CFG_KERNEL_SRF_9 0xE467E4
+
+#define mmTPC1_CFG_KERNEL_SRF_10 0xE467E8
+
+#define mmTPC1_CFG_KERNEL_SRF_11 0xE467EC
+
+#define mmTPC1_CFG_KERNEL_SRF_12 0xE467F0
+
+#define mmTPC1_CFG_KERNEL_SRF_13 0xE467F4
+
+#define mmTPC1_CFG_KERNEL_SRF_14 0xE467F8
+
+#define mmTPC1_CFG_KERNEL_SRF_15 0xE467FC
+
+#define mmTPC1_CFG_KERNEL_SRF_16 0xE46800
+
+#define mmTPC1_CFG_KERNEL_SRF_17 0xE46804
+
+#define mmTPC1_CFG_KERNEL_SRF_18 0xE46808
+
+#define mmTPC1_CFG_KERNEL_SRF_19 0xE4680C
+
+#define mmTPC1_CFG_KERNEL_SRF_20 0xE46810
+
+#define mmTPC1_CFG_KERNEL_SRF_21 0xE46814
+
+#define mmTPC1_CFG_KERNEL_SRF_22 0xE46818
+
+#define mmTPC1_CFG_KERNEL_SRF_23 0xE4681C
+
+#define mmTPC1_CFG_KERNEL_SRF_24 0xE46820
+
+#define mmTPC1_CFG_KERNEL_SRF_25 0xE46824
+
+#define mmTPC1_CFG_KERNEL_SRF_26 0xE46828
+
+#define mmTPC1_CFG_KERNEL_SRF_27 0xE4682C
+
+#define mmTPC1_CFG_KERNEL_SRF_28 0xE46830
+
+#define mmTPC1_CFG_KERNEL_SRF_29 0xE46834
+
+#define mmTPC1_CFG_KERNEL_SRF_30 0xE46838
+
+#define mmTPC1_CFG_KERNEL_SRF_31 0xE4683C
+
+#define mmTPC1_CFG_ROUND_CSR 0xE468FC
+
+#define mmTPC1_CFG_PROT 0xE46900
+
+#define mmTPC1_CFG_SEMAPHORE 0xE46908
+
+#define mmTPC1_CFG_VFLAGS 0xE4690C
+
+#define mmTPC1_CFG_SFLAGS 0xE46910
+
+#define mmTPC1_CFG_LFSR_POLYNOM 0xE46918
+
+#define mmTPC1_CFG_STATUS 0xE4691C
+
+#define mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH 0xE46920
+
+#define mmTPC1_CFG_CFG_SUBTRACT_VALUE 0xE46924
+
+#define mmTPC1_CFG_SM_BASE_ADDRESS_HIGH 0xE4692C
+
+#define mmTPC1_CFG_TPC_CMD 0xE46930
+
+#define mmTPC1_CFG_TPC_EXECUTE 0xE46938
+
+#define mmTPC1_CFG_TPC_STALL 0xE4693C
+
+#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_LOW 0xE46940
+
+#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE46944
+
+#define mmTPC1_CFG_RD_RATE_LIMIT 0xE46948
+
+#define mmTPC1_CFG_WR_RATE_LIMIT 0xE46950
+
+#define mmTPC1_CFG_MSS_CONFIG 0xE46954
+
+#define mmTPC1_CFG_TPC_INTR_CAUSE 0xE46958
+
+#define mmTPC1_CFG_TPC_INTR_MASK 0xE4695C
+
+#define mmTPC1_CFG_WQ_CREDITS 0xE46960
+
+#define mmTPC1_CFG_ARUSER_LO 0xE46964
+
+#define mmTPC1_CFG_ARUSER_HI 0xE46968
+
+#define mmTPC1_CFG_AWUSER_LO 0xE4696C
+
+#define mmTPC1_CFG_AWUSER_HI 0xE46970
+
+#define mmTPC1_CFG_OPCODE_EXEC 0xE46974
+
+#define mmTPC1_CFG_LUT_FUNC32_BASE_ADDR_LO 0xE46978
+
+#define mmTPC1_CFG_LUT_FUNC32_BASE_ADDR_HI 0xE4697C
+
+#define mmTPC1_CFG_LUT_FUNC64_BASE_ADDR_LO 0xE46980
+
+#define mmTPC1_CFG_LUT_FUNC64_BASE_ADDR_HI 0xE46984
+
+#define mmTPC1_CFG_LUT_FUNC128_BASE_ADDR_LO 0xE46988
+
+#define mmTPC1_CFG_LUT_FUNC128_BASE_ADDR_HI 0xE4698C
+
+#define mmTPC1_CFG_LUT_FUNC256_BASE_ADDR_LO 0xE46990
+
+#define mmTPC1_CFG_LUT_FUNC256_BASE_ADDR_HI 0xE46994
+
+#define mmTPC1_CFG_TSB_CFG_MAX_SIZE 0xE46998
+
+#define mmTPC1_CFG_TSB_CFG 0xE4699C
+
+#define mmTPC1_CFG_DBGMEM_ADD 0xE469A0
+
+#define mmTPC1_CFG_DBGMEM_DATA_WR 0xE469A4
+
+#define mmTPC1_CFG_DBGMEM_DATA_RD 0xE469A8
+
+#define mmTPC1_CFG_DBGMEM_CTRL 0xE469AC
+
+#define mmTPC1_CFG_DBGMEM_RC 0xE469B0
+
+#define mmTPC1_CFG_TSB_INFLIGHT_CNTR 0xE469B4
+
+#define mmTPC1_CFG_WQ_INFLIGHT_CNTR 0xE469B8
+
+#define mmTPC1_CFG_WQ_LBW_TOTAL_CNTR 0xE469BC
+
+#define mmTPC1_CFG_WQ_HBW_TOTAL_CNTR 0xE469C0
+
+#define mmTPC1_CFG_IRQ_OCCOUPY_CNTR 0xE469C4
+
+#define mmTPC1_CFG_FUNC_MBIST_CNTRL 0xE469D0
+
+#define mmTPC1_CFG_FUNC_MBIST_PAT 0xE469D4
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_0 0xE469D8
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_1 0xE469DC
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_2 0xE469E0
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_3 0xE469E4
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_4 0xE469E8
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_5 0xE469EC
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_6 0xE469F0
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_7 0xE469F4
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_8 0xE469F8
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE469FC
+
+#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE46A00
+
+#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE46A04
+
+#define mmTPC1_CFG_QM_TENSOR_0_PADDING_VALUE 0xE46A08
+
+#define mmTPC1_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE46A0C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE46A10
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE46A14
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE46A18
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE46A1C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE46A20
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE46A24
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE46A28
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE46A2C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE46A30
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE46A34
+
+#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE46A38
+
+#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE46A3C
+
+#define mmTPC1_CFG_QM_TENSOR_1_PADDING_VALUE 0xE46A40
+
+#define mmTPC1_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE46A44
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE46A48
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE46A4C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE46A50
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE46A54
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE46A58
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE46A5C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE46A60
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE46A64
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE46A68
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE46A6C
+
+#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE46A70
+
+#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE46A74
+
+#define mmTPC1_CFG_QM_TENSOR_2_PADDING_VALUE 0xE46A78
+
+#define mmTPC1_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE46A7C
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE46A80
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE46A84
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE46A88
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE46A8C
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE46A90
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE46A94
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE46A98
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE46A9C
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE46AA0
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE46AA4
+
+#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE46AA8
+
+#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE46AAC
+
+#define mmTPC1_CFG_QM_TENSOR_3_PADDING_VALUE 0xE46AB0
+
+#define mmTPC1_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE46AB4
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE46AB8
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE46ABC
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE46AC0
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE46AC4
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE46AC8
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE46ACC
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE46AD0
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE46AD4
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE46AD8
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE46ADC
+
+#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE46AE0
+
+#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE46AE4
+
+#define mmTPC1_CFG_QM_TENSOR_4_PADDING_VALUE 0xE46AE8
+
+#define mmTPC1_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE46AEC
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE46AF0
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE46AF4
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE46AF8
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE46AFC
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE46B00
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE46B04
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE46B08
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE46B0C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE46B10
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE46B14
+
+#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE46B18
+
+#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE46B1C
+
+#define mmTPC1_CFG_QM_TENSOR_5_PADDING_VALUE 0xE46B20
+
+#define mmTPC1_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE46B24
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE46B28
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE46B2C
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE46B30
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE46B34
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE46B38
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE46B3C
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE46B40
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE46B44
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE46B48
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE46B4C
+
+#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE46B50
+
+#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE46B54
+
+#define mmTPC1_CFG_QM_TENSOR_6_PADDING_VALUE 0xE46B58
+
+#define mmTPC1_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE46B5C
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE46B60
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE46B64
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE46B68
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE46B6C
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE46B70
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE46B74
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE46B78
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE46B7C
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE46B80
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE46B84
+
+#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE46B88
+
+#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE46B8C
+
+#define mmTPC1_CFG_QM_TENSOR_7_PADDING_VALUE 0xE46B90
+
+#define mmTPC1_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE46B94
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE46B98
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE46B9C
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE46BA0
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE46BA4
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE46BA8
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE46BAC
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE46BB0
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE46BB4
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE46BB8
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE46BBC
+
+#define mmTPC1_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xE46BC0
+
+#define mmTPC1_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xE46BC4
+
+#define mmTPC1_CFG_QM_TENSOR_8_PADDING_VALUE 0xE46BC8
+
+#define mmTPC1_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xE46BCC
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_0_SIZE 0xE46BD0
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xE46BD4
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_1_SIZE 0xE46BD8
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xE46BDC
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_2_SIZE 0xE46BE0
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xE46BE4
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_3_SIZE 0xE46BE8
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xE46BEC
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_4_SIZE 0xE46BF0
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xE46BF4
+
+#define mmTPC1_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xE46BF8
+
+#define mmTPC1_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xE46BFC
+
+#define mmTPC1_CFG_QM_TENSOR_9_PADDING_VALUE 0xE46C00
+
+#define mmTPC1_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xE46C04
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_0_SIZE 0xE46C08
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xE46C0C
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_1_SIZE 0xE46C10
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xE46C14
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_2_SIZE 0xE46C18
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xE46C1C
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_3_SIZE 0xE46C20
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xE46C24
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_4_SIZE 0xE46C28
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xE46C2C
+
+#define mmTPC1_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xE46C30
+
+#define mmTPC1_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xE46C34
+
+#define mmTPC1_CFG_QM_TENSOR_10_PADDING_VALUE 0xE46C38
+
+#define mmTPC1_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xE46C3C
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_0_SIZE 0xE46C40
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xE46C44
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_1_SIZE 0xE46C48
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xE46C4C
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_2_SIZE 0xE46C50
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xE46C54
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_3_SIZE 0xE46C58
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xE46C5C
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_4_SIZE 0xE46C60
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xE46C64
+
+#define mmTPC1_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xE46C68
+
+#define mmTPC1_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xE46C6C
+
+#define mmTPC1_CFG_QM_TENSOR_11_PADDING_VALUE 0xE46C70
+
+#define mmTPC1_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xE46C74
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_0_SIZE 0xE46C78
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xE46C7C
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_1_SIZE 0xE46C80
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xE46C84
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_2_SIZE 0xE46C88
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xE46C8C
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_3_SIZE 0xE46C90
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xE46C94
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_4_SIZE 0xE46C98
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xE46C9C
+
+#define mmTPC1_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xE46CA0
+
+#define mmTPC1_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xE46CA4
+
+#define mmTPC1_CFG_QM_TENSOR_12_PADDING_VALUE 0xE46CA8
+
+#define mmTPC1_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xE46CAC
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_0_SIZE 0xE46CB0
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xE46CB4
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_1_SIZE 0xE46CB8
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xE46CBC
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_2_SIZE 0xE46CC0
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xE46CC4
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_3_SIZE 0xE46CC8
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xE46CCC
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_4_SIZE 0xE46CD0
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xE46CD4
+
+#define mmTPC1_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xE46CD8
+
+#define mmTPC1_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xE46CDC
+
+#define mmTPC1_CFG_QM_TENSOR_13_PADDING_VALUE 0xE46CE0
+
+#define mmTPC1_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xE46CE4
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_0_SIZE 0xE46CE8
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xE46CEC
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_1_SIZE 0xE46CF0
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xE46CF4
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_2_SIZE 0xE46CF8
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xE46CFC
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_3_SIZE 0xE46D00
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xE46D04
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_4_SIZE 0xE46D08
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xE46D0C
+
+#define mmTPC1_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xE46D10
+
+#define mmTPC1_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xE46D14
+
+#define mmTPC1_CFG_QM_TENSOR_14_PADDING_VALUE 0xE46D18
+
+#define mmTPC1_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xE46D1C
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_0_SIZE 0xE46D20
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xE46D24
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_1_SIZE 0xE46D28
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xE46D2C
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_2_SIZE 0xE46D30
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xE46D34
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_3_SIZE 0xE46D38
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xE46D3C
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_4_SIZE 0xE46D40
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xE46D44
+
+#define mmTPC1_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xE46D48
+
+#define mmTPC1_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xE46D4C
+
+#define mmTPC1_CFG_QM_TENSOR_15_PADDING_VALUE 0xE46D50
+
+#define mmTPC1_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xE46D54
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_0_SIZE 0xE46D58
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xE46D5C
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_1_SIZE 0xE46D60
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xE46D64
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_2_SIZE 0xE46D68
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xE46D6C
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_3_SIZE 0xE46D70
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xE46D74
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_4_SIZE 0xE46D78
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xE46D7C
+
+#define mmTPC1_CFG_QM_SYNC_OBJECT_MESSAGE 0xE46D80
+
+#define mmTPC1_CFG_QM_SYNC_OBJECT_ADDR 0xE46D84
+
+#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE46D88
+
+#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE46D8C
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_0 0xE46D90
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_0 0xE46D94
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_1 0xE46D98
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_1 0xE46D9C
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_2 0xE46DA0
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_2 0xE46DA4
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_3 0xE46DA8
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_3 0xE46DAC
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_4 0xE46DB0
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_4 0xE46DB4
+
+#define mmTPC1_CFG_QM_KERNEL_CONFIG 0xE46DB8
+
+#define mmTPC1_CFG_QM_KERNEL_ID 0xE46DBC
+
+#define mmTPC1_CFG_QM_SRF_0 0xE46DC0
+
+#define mmTPC1_CFG_QM_SRF_1 0xE46DC4
+
+#define mmTPC1_CFG_QM_SRF_2 0xE46DC8
+
+#define mmTPC1_CFG_QM_SRF_3 0xE46DCC
+
+#define mmTPC1_CFG_QM_SRF_4 0xE46DD0
+
+#define mmTPC1_CFG_QM_SRF_5 0xE46DD4
+
+#define mmTPC1_CFG_QM_SRF_6 0xE46DD8
+
+#define mmTPC1_CFG_QM_SRF_7 0xE46DDC
+
+#define mmTPC1_CFG_QM_SRF_8 0xE46DE0
+
+#define mmTPC1_CFG_QM_SRF_9 0xE46DE4
+
+#define mmTPC1_CFG_QM_SRF_10 0xE46DE8
+
+#define mmTPC1_CFG_QM_SRF_11 0xE46DEC
+
+#define mmTPC1_CFG_QM_SRF_12 0xE46DF0
+
+#define mmTPC1_CFG_QM_SRF_13 0xE46DF4
+
+#define mmTPC1_CFG_QM_SRF_14 0xE46DF8
+
+#define mmTPC1_CFG_QM_SRF_15 0xE46DFC
+
+#define mmTPC1_CFG_QM_SRF_16 0xE46E00
+
+#define mmTPC1_CFG_QM_SRF_17 0xE46E04
+
+#define mmTPC1_CFG_QM_SRF_18 0xE46E08
+
+#define mmTPC1_CFG_QM_SRF_19 0xE46E0C
+
+#define mmTPC1_CFG_QM_SRF_20 0xE46E10
+
+#define mmTPC1_CFG_QM_SRF_21 0xE46E14
+
+#define mmTPC1_CFG_QM_SRF_22 0xE46E18
+
+#define mmTPC1_CFG_QM_SRF_23 0xE46E1C
+
+#define mmTPC1_CFG_QM_SRF_24 0xE46E20
+
+#define mmTPC1_CFG_QM_SRF_25 0xE46E24
+
+#define mmTPC1_CFG_QM_SRF_26 0xE46E28
+
+#define mmTPC1_CFG_QM_SRF_27 0xE46E2C
+
+#define mmTPC1_CFG_QM_SRF_28 0xE46E30
+
+#define mmTPC1_CFG_QM_SRF_29 0xE46E34
+
+#define mmTPC1_CFG_QM_SRF_30 0xE46E38
+
+#define mmTPC1_CFG_QM_SRF_31 0xE46E3C
+
+#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
new file mode 100644
index 000000000000..af10ef7a87d9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_QM_REGS_H_
+#define ASIC_REG_TPC1_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC1_QM_GLBL_CFG0 0xE48000
+
+#define mmTPC1_QM_GLBL_CFG1 0xE48004
+
+#define mmTPC1_QM_GLBL_PROT 0xE48008
+
+#define mmTPC1_QM_GLBL_ERR_CFG 0xE4800C
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_0 0xE48010
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_1 0xE48014
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_2 0xE48018
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_3 0xE4801C
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_4 0xE48020
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_0 0xE48024
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_1 0xE48028
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_2 0xE4802C
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_3 0xE48030
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_4 0xE48034
+
+#define mmTPC1_QM_GLBL_STS0 0xE48038
+
+#define mmTPC1_QM_GLBL_STS1_0 0xE48040
+
+#define mmTPC1_QM_GLBL_STS1_1 0xE48044
+
+#define mmTPC1_QM_GLBL_STS1_2 0xE48048
+
+#define mmTPC1_QM_GLBL_STS1_3 0xE4804C
+
+#define mmTPC1_QM_GLBL_STS1_4 0xE48050
+
+#define mmTPC1_QM_GLBL_MSG_EN_0 0xE48054
+
+#define mmTPC1_QM_GLBL_MSG_EN_1 0xE48058
+
+#define mmTPC1_QM_GLBL_MSG_EN_2 0xE4805C
+
+#define mmTPC1_QM_GLBL_MSG_EN_3 0xE48060
+
+#define mmTPC1_QM_GLBL_MSG_EN_4 0xE48068
+
+#define mmTPC1_QM_PQ_BASE_LO_0 0xE48070
+
+#define mmTPC1_QM_PQ_BASE_LO_1 0xE48074
+
+#define mmTPC1_QM_PQ_BASE_LO_2 0xE48078
+
+#define mmTPC1_QM_PQ_BASE_LO_3 0xE4807C
+
+#define mmTPC1_QM_PQ_BASE_HI_0 0xE48080
+
+#define mmTPC1_QM_PQ_BASE_HI_1 0xE48084
+
+#define mmTPC1_QM_PQ_BASE_HI_2 0xE48088
+
+#define mmTPC1_QM_PQ_BASE_HI_3 0xE4808C
+
+#define mmTPC1_QM_PQ_SIZE_0 0xE48090
+
+#define mmTPC1_QM_PQ_SIZE_1 0xE48094
+
+#define mmTPC1_QM_PQ_SIZE_2 0xE48098
+
+#define mmTPC1_QM_PQ_SIZE_3 0xE4809C
+
+#define mmTPC1_QM_PQ_PI_0 0xE480A0
+
+#define mmTPC1_QM_PQ_PI_1 0xE480A4
+
+#define mmTPC1_QM_PQ_PI_2 0xE480A8
+
+#define mmTPC1_QM_PQ_PI_3 0xE480AC
+
+#define mmTPC1_QM_PQ_CI_0 0xE480B0
+
+#define mmTPC1_QM_PQ_CI_1 0xE480B4
+
+#define mmTPC1_QM_PQ_CI_2 0xE480B8
+
+#define mmTPC1_QM_PQ_CI_3 0xE480BC
+
+#define mmTPC1_QM_PQ_CFG0_0 0xE480C0
+
+#define mmTPC1_QM_PQ_CFG0_1 0xE480C4
+
+#define mmTPC1_QM_PQ_CFG0_2 0xE480C8
+
+#define mmTPC1_QM_PQ_CFG0_3 0xE480CC
+
+#define mmTPC1_QM_PQ_CFG1_0 0xE480D0
+
+#define mmTPC1_QM_PQ_CFG1_1 0xE480D4
+
+#define mmTPC1_QM_PQ_CFG1_2 0xE480D8
+
+#define mmTPC1_QM_PQ_CFG1_3 0xE480DC
+
+#define mmTPC1_QM_PQ_ARUSER_31_11_0 0xE480E0
+
+#define mmTPC1_QM_PQ_ARUSER_31_11_1 0xE480E4
+
+#define mmTPC1_QM_PQ_ARUSER_31_11_2 0xE480E8
+
+#define mmTPC1_QM_PQ_ARUSER_31_11_3 0xE480EC
+
+#define mmTPC1_QM_PQ_STS0_0 0xE480F0
+
+#define mmTPC1_QM_PQ_STS0_1 0xE480F4
+
+#define mmTPC1_QM_PQ_STS0_2 0xE480F8
+
+#define mmTPC1_QM_PQ_STS0_3 0xE480FC
+
+#define mmTPC1_QM_PQ_STS1_0 0xE48100
+
+#define mmTPC1_QM_PQ_STS1_1 0xE48104
+
+#define mmTPC1_QM_PQ_STS1_2 0xE48108
+
+#define mmTPC1_QM_PQ_STS1_3 0xE4810C
+
+#define mmTPC1_QM_CQ_CFG0_0 0xE48110
+
+#define mmTPC1_QM_CQ_CFG0_1 0xE48114
+
+#define mmTPC1_QM_CQ_CFG0_2 0xE48118
+
+#define mmTPC1_QM_CQ_CFG0_3 0xE4811C
+
+#define mmTPC1_QM_CQ_CFG0_4 0xE48120
+
+#define mmTPC1_QM_CQ_CFG1_0 0xE48124
+
+#define mmTPC1_QM_CQ_CFG1_1 0xE48128
+
+#define mmTPC1_QM_CQ_CFG1_2 0xE4812C
+
+#define mmTPC1_QM_CQ_CFG1_3 0xE48130
+
+#define mmTPC1_QM_CQ_CFG1_4 0xE48134
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_0 0xE48138
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_1 0xE4813C
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_2 0xE48140
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_3 0xE48144
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_4 0xE48148
+
+#define mmTPC1_QM_CQ_STS0_0 0xE4814C
+
+#define mmTPC1_QM_CQ_STS0_1 0xE48150
+
+#define mmTPC1_QM_CQ_STS0_2 0xE48154
+
+#define mmTPC1_QM_CQ_STS0_3 0xE48158
+
+#define mmTPC1_QM_CQ_STS0_4 0xE4815C
+
+#define mmTPC1_QM_CQ_STS1_0 0xE48160
+
+#define mmTPC1_QM_CQ_STS1_1 0xE48164
+
+#define mmTPC1_QM_CQ_STS1_2 0xE48168
+
+#define mmTPC1_QM_CQ_STS1_3 0xE4816C
+
+#define mmTPC1_QM_CQ_STS1_4 0xE48170
+
+#define mmTPC1_QM_CQ_PTR_LO_0 0xE48174
+
+#define mmTPC1_QM_CQ_PTR_HI_0 0xE48178
+
+#define mmTPC1_QM_CQ_TSIZE_0 0xE4817C
+
+#define mmTPC1_QM_CQ_CTL_0 0xE48180
+
+#define mmTPC1_QM_CQ_PTR_LO_1 0xE48184
+
+#define mmTPC1_QM_CQ_PTR_HI_1 0xE48188
+
+#define mmTPC1_QM_CQ_TSIZE_1 0xE4818C
+
+#define mmTPC1_QM_CQ_CTL_1 0xE48190
+
+#define mmTPC1_QM_CQ_PTR_LO_2 0xE48194
+
+#define mmTPC1_QM_CQ_PTR_HI_2 0xE48198
+
+#define mmTPC1_QM_CQ_TSIZE_2 0xE4819C
+
+#define mmTPC1_QM_CQ_CTL_2 0xE481A0
+
+#define mmTPC1_QM_CQ_PTR_LO_3 0xE481A4
+
+#define mmTPC1_QM_CQ_PTR_HI_3 0xE481A8
+
+#define mmTPC1_QM_CQ_TSIZE_3 0xE481AC
+
+#define mmTPC1_QM_CQ_CTL_3 0xE481B0
+
+#define mmTPC1_QM_CQ_PTR_LO_4 0xE481B4
+
+#define mmTPC1_QM_CQ_PTR_HI_4 0xE481B8
+
+#define mmTPC1_QM_CQ_TSIZE_4 0xE481BC
+
+#define mmTPC1_QM_CQ_CTL_4 0xE481C0
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_0 0xE481C4
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_1 0xE481C8
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_2 0xE481CC
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_3 0xE481D0
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_4 0xE481D4
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_0 0xE481D8
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_1 0xE481DC
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_2 0xE481E0
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_3 0xE481E4
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_4 0xE481E8
+
+#define mmTPC1_QM_CQ_TSIZE_STS_0 0xE481EC
+
+#define mmTPC1_QM_CQ_TSIZE_STS_1 0xE481F0
+
+#define mmTPC1_QM_CQ_TSIZE_STS_2 0xE481F4
+
+#define mmTPC1_QM_CQ_TSIZE_STS_3 0xE481F8
+
+#define mmTPC1_QM_CQ_TSIZE_STS_4 0xE481FC
+
+#define mmTPC1_QM_CQ_CTL_STS_0 0xE48200
+
+#define mmTPC1_QM_CQ_CTL_STS_1 0xE48204
+
+#define mmTPC1_QM_CQ_CTL_STS_2 0xE48208
+
+#define mmTPC1_QM_CQ_CTL_STS_3 0xE4820C
+
+#define mmTPC1_QM_CQ_CTL_STS_4 0xE48210
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_0 0xE48214
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_1 0xE48218
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_2 0xE4821C
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_3 0xE48220
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_4 0xE48224
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_0 0xE48228
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_1 0xE4822C
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_2 0xE48230
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_3 0xE48234
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_4 0xE48238
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_0 0xE4823C
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_1 0xE48240
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_2 0xE48244
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_3 0xE48248
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_4 0xE4824C
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_0 0xE48250
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_1 0xE48254
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_2 0xE48258
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_3 0xE4825C
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_4 0xE48260
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_0 0xE48264
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_1 0xE48268
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_2 0xE4826C
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_3 0xE48270
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_4 0xE48274
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_0 0xE48278
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_1 0xE4827C
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 0xE48280
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_3 0xE48284
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_4 0xE48288
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_0 0xE4828C
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_1 0xE48290
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_2 0xE48294
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_3 0xE48298
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_4 0xE4829C
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_0 0xE482A0
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_1 0xE482A4
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_2 0xE482A8
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_3 0xE482AC
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_4 0xE482B0
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_0 0xE482B4
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_1 0xE482B8
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_2 0xE482BC
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_3 0xE482C0
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_4 0xE482C4
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_0 0xE482C8
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_1 0xE482CC
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_2 0xE482D0
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_3 0xE482D4
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_4 0xE482D8
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xE482E0
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xE482E4
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xE482E8
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xE482EC
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xE482F0
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xE482F4
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xE482F8
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xE482FC
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xE48300
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xE48304
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_0 0xE48308
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_1 0xE4830C
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_2 0xE48310
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_3 0xE48314
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_4 0xE48318
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_0 0xE4831C
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_1 0xE48320
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_2 0xE48324
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_3 0xE48328
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_4 0xE4832C
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_0 0xE48330
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_1 0xE48334
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_2 0xE48338
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_3 0xE4833C
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_4 0xE48340
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_0 0xE48344
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_1 0xE48348
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_2 0xE4834C
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_3 0xE48350
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_4 0xE48354
+
+#define mmTPC1_QM_CP_FENCE0_CNT_0 0xE48358
+
+#define mmTPC1_QM_CP_FENCE0_CNT_1 0xE4835C
+
+#define mmTPC1_QM_CP_FENCE0_CNT_2 0xE48360
+
+#define mmTPC1_QM_CP_FENCE0_CNT_3 0xE48364
+
+#define mmTPC1_QM_CP_FENCE0_CNT_4 0xE48368
+
+#define mmTPC1_QM_CP_FENCE1_CNT_0 0xE4836C
+
+#define mmTPC1_QM_CP_FENCE1_CNT_1 0xE48370
+
+#define mmTPC1_QM_CP_FENCE1_CNT_2 0xE48374
+
+#define mmTPC1_QM_CP_FENCE1_CNT_3 0xE48378
+
+#define mmTPC1_QM_CP_FENCE1_CNT_4 0xE4837C
+
+#define mmTPC1_QM_CP_FENCE2_CNT_0 0xE48380
+
+#define mmTPC1_QM_CP_FENCE2_CNT_1 0xE48384
+
+#define mmTPC1_QM_CP_FENCE2_CNT_2 0xE48388
+
+#define mmTPC1_QM_CP_FENCE2_CNT_3 0xE4838C
+
+#define mmTPC1_QM_CP_FENCE2_CNT_4 0xE48390
+
+#define mmTPC1_QM_CP_FENCE3_CNT_0 0xE48394
+
+#define mmTPC1_QM_CP_FENCE3_CNT_1 0xE48398
+
+#define mmTPC1_QM_CP_FENCE3_CNT_2 0xE4839C
+
+#define mmTPC1_QM_CP_FENCE3_CNT_3 0xE483A0
+
+#define mmTPC1_QM_CP_FENCE3_CNT_4 0xE483A4
+
+#define mmTPC1_QM_CP_STS_0 0xE483A8
+
+#define mmTPC1_QM_CP_STS_1 0xE483AC
+
+#define mmTPC1_QM_CP_STS_2 0xE483B0
+
+#define mmTPC1_QM_CP_STS_3 0xE483B4
+
+#define mmTPC1_QM_CP_STS_4 0xE483B8
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_0 0xE483BC
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_1 0xE483C0
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_2 0xE483C4
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_3 0xE483C8
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_4 0xE483CC
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_0 0xE483D0
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_1 0xE483D4
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_2 0xE483D8
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_3 0xE483DC
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_4 0xE483E0
+
+#define mmTPC1_QM_CP_BARRIER_CFG_0 0xE483F4
+
+#define mmTPC1_QM_CP_BARRIER_CFG_1 0xE483F8
+
+#define mmTPC1_QM_CP_BARRIER_CFG_2 0xE483FC
+
+#define mmTPC1_QM_CP_BARRIER_CFG_3 0xE48400
+
+#define mmTPC1_QM_CP_BARRIER_CFG_4 0xE48404
+
+#define mmTPC1_QM_CP_DBG_0_0 0xE48408
+
+#define mmTPC1_QM_CP_DBG_0_1 0xE4840C
+
+#define mmTPC1_QM_CP_DBG_0_2 0xE48410
+
+#define mmTPC1_QM_CP_DBG_0_3 0xE48414
+
+#define mmTPC1_QM_CP_DBG_0_4 0xE48418
+
+#define mmTPC1_QM_CP_ARUSER_31_11_0 0xE4841C
+
+#define mmTPC1_QM_CP_ARUSER_31_11_1 0xE48420
+
+#define mmTPC1_QM_CP_ARUSER_31_11_2 0xE48424
+
+#define mmTPC1_QM_CP_ARUSER_31_11_3 0xE48428
+
+#define mmTPC1_QM_CP_ARUSER_31_11_4 0xE4842C
+
+#define mmTPC1_QM_CP_AWUSER_31_11_0 0xE48430
+
+#define mmTPC1_QM_CP_AWUSER_31_11_1 0xE48434
+
+#define mmTPC1_QM_CP_AWUSER_31_11_2 0xE48438
+
+#define mmTPC1_QM_CP_AWUSER_31_11_3 0xE4843C
+
+#define mmTPC1_QM_CP_AWUSER_31_11_4 0xE48440
+
+#define mmTPC1_QM_ARB_CFG_0 0xE48A00
+
+#define mmTPC1_QM_ARB_CHOISE_Q_PUSH 0xE48A04
+
+#define mmTPC1_QM_ARB_WRR_WEIGHT_0 0xE48A08
+
+#define mmTPC1_QM_ARB_WRR_WEIGHT_1 0xE48A0C
+
+#define mmTPC1_QM_ARB_WRR_WEIGHT_2 0xE48A10
+
+#define mmTPC1_QM_ARB_WRR_WEIGHT_3 0xE48A14
+
+#define mmTPC1_QM_ARB_CFG_1 0xE48A18
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_0 0xE48A20
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_1 0xE48A24
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_2 0xE48A28
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_3 0xE48A2C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_4 0xE48A30
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_5 0xE48A34
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_6 0xE48A38
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_7 0xE48A3C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_8 0xE48A40
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_9 0xE48A44
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_10 0xE48A48
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_11 0xE48A4C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_12 0xE48A50
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_13 0xE48A54
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_14 0xE48A58
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_15 0xE48A5C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_16 0xE48A60
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_17 0xE48A64
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_18 0xE48A68
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_19 0xE48A6C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_20 0xE48A70
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_21 0xE48A74
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_22 0xE48A78
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_23 0xE48A7C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_24 0xE48A80
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_25 0xE48A84
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_26 0xE48A88
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_27 0xE48A8C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_28 0xE48A90
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_29 0xE48A94
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_30 0xE48A98
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_31 0xE48A9C
+
+#define mmTPC1_QM_ARB_MST_CRED_INC 0xE48AA0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xE48AA4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xE48AA8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xE48AAC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xE48AB0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xE48AB4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xE48AB8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xE48ABC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xE48AC0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xE48AC4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xE48AC8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xE48ACC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xE48AD0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xE48AD4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xE48AD8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xE48ADC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xE48AE0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xE48AE4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xE48AE8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xE48AEC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xE48AF0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xE48AF4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xE48AF8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xE48AFC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xE48B00
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xE48B04
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xE48B08
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xE48B0C
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xE48B10
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xE48B14
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xE48B18
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xE48B1C
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xE48B20
+
+#define mmTPC1_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xE48B28
+
+#define mmTPC1_QM_ARB_MST_SLAVE_EN 0xE48B2C
+
+#define mmTPC1_QM_ARB_MST_QUIET_PER 0xE48B34
+
+#define mmTPC1_QM_ARB_SLV_CHOISE_WDT 0xE48B38
+
+#define mmTPC1_QM_ARB_SLV_ID 0xE48B3C
+
+#define mmTPC1_QM_ARB_MSG_MAX_INFLIGHT 0xE48B44
+
+#define mmTPC1_QM_ARB_MSG_AWUSER_31_11 0xE48B48
+
+#define mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP 0xE48B4C
+
+#define mmTPC1_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xE48B50
+
+#define mmTPC1_QM_ARB_BASE_LO 0xE48B54
+
+#define mmTPC1_QM_ARB_BASE_HI 0xE48B58
+
+#define mmTPC1_QM_ARB_STATE_STS 0xE48B80
+
+#define mmTPC1_QM_ARB_CHOISE_FULLNESS_STS 0xE48B84
+
+#define mmTPC1_QM_ARB_MSG_STS 0xE48B88
+
+#define mmTPC1_QM_ARB_SLV_CHOISE_Q_HEAD 0xE48B8C
+
+#define mmTPC1_QM_ARB_ERR_CAUSE 0xE48B9C
+
+#define mmTPC1_QM_ARB_ERR_MSG_EN 0xE48BA0
+
+#define mmTPC1_QM_ARB_ERR_STS_DRP 0xE48BA8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_0 0xE48BB0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_1 0xE48BB4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_2 0xE48BB8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_3 0xE48BBC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_4 0xE48BC0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_5 0xE48BC4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_6 0xE48BC8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_7 0xE48BCC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_8 0xE48BD0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_9 0xE48BD4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_10 0xE48BD8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_11 0xE48BDC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_12 0xE48BE0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_13 0xE48BE4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_14 0xE48BE8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_15 0xE48BEC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_16 0xE48BF0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_17 0xE48BF4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_18 0xE48BF8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_19 0xE48BFC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_20 0xE48C00
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_21 0xE48C04
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_22 0xE48C08
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_23 0xE48C0C
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_24 0xE48C10
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_25 0xE48C14
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_26 0xE48C18
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_27 0xE48C1C
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_28 0xE48C20
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_29 0xE48C24
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_30 0xE48C28
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_31 0xE48C2C
+
+#define mmTPC1_QM_CGM_CFG 0xE48C70
+
+#define mmTPC1_QM_CGM_STS 0xE48C74
+
+#define mmTPC1_QM_CGM_CFG1 0xE48C78
+
+#define mmTPC1_QM_LOCAL_RANGE_BASE 0xE48C80
+
+#define mmTPC1_QM_LOCAL_RANGE_SIZE 0xE48C84
+
+#define mmTPC1_QM_CSMR_STRICT_PRIO_CFG 0xE48C90
+
+#define mmTPC1_QM_HBW_RD_RATE_LIM_CFG_1 0xE48C94
+
+#define mmTPC1_QM_LBW_WR_RATE_LIM_CFG_0 0xE48C98
+
+#define mmTPC1_QM_LBW_WR_RATE_LIM_CFG_1 0xE48C9C
+
+#define mmTPC1_QM_HBW_RD_RATE_LIM_CFG_0 0xE48CA0
+
+#define mmTPC1_QM_GLBL_AXCACHE 0xE48CA4
+
+#define mmTPC1_QM_IND_GW_APB_CFG 0xE48CB0
+
+#define mmTPC1_QM_IND_GW_APB_WDATA 0xE48CB4
+
+#define mmTPC1_QM_IND_GW_APB_RDATA 0xE48CB8
+
+#define mmTPC1_QM_IND_GW_APB_STATUS 0xE48CBC
+
+#define mmTPC1_QM_GLBL_ERR_ADDR_LO 0xE48CD0
+
+#define mmTPC1_QM_GLBL_ERR_ADDR_HI 0xE48CD4
+
+#define mmTPC1_QM_GLBL_ERR_WDATA 0xE48CD8
+
+#define mmTPC1_QM_GLBL_MEM_INIT_BUSY 0xE48D00
+
+#endif /* ASIC_REG_TPC1_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
new file mode 100644
index 000000000000..3e77c37952bc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_CFG_REGS_H_
+#define ASIC_REG_TPC2_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE86400
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE86404
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE86408
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE8640C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE86410
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE86414
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE86418
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE8641C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE86420
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE86424
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE86428
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE8642C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE86430
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE86434
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE86438
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE8643C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE86440
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE86444
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE86448
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE8644C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE86450
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE86454
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE86458
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE8645C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE86460
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE86464
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE86468
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE8646C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE86470
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE86474
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE86478
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE8647C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE86480
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE86484
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE86488
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE8648C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE86490
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE86494
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE86498
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE8649C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE864A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE864A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE864A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE864AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE864B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE864B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE864B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE864BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE864C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE864C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE864C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE864CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE864D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE864D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE864D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE864DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE864E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE864E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE864E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE864EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE864F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE864F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE864F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE864FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE86500
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE86504
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE86508
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE8650C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE86510
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE86514
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE86518
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE8651C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE86520
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE86524
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE86528
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE8652C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE86530
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE86534
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE86538
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE8653C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE86540
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE86544
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE86548
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE8654C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE86550
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE86554
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE86558
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE8655C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE86560
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE86564
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE86568
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE8656C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE86570
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE86574
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE86578
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE8657C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE86580
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE86584
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE86588
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE8658C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE86590
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE86594
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE86598
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE8659C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE865A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE865A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE865A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE865AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE865B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE865B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE865B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE865BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xE865C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xE865C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xE865C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xE865CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xE865D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xE865D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xE865D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xE865DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xE865E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xE865E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xE865E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xE865EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xE865F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xE865F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xE865F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xE865FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xE86600
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xE86604
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xE86608
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xE8660C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xE86610
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xE86614
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xE86618
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xE8661C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xE86620
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xE86624
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xE86628
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xE8662C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xE86630
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xE86634
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xE86638
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xE8663C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xE86640
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xE86644
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xE86648
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xE8664C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xE86650
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xE86654
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xE86658
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xE8665C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xE86660
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xE86664
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xE86668
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xE8666C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xE86670
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xE86674
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xE86678
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xE8667C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xE86680
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xE86684
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xE86688
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xE8668C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xE86690
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xE86694
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xE86698
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xE8669C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xE866A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xE866A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xE866A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xE866AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xE866B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xE866B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xE866B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xE866BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xE866C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xE866C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xE866C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xE866CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xE866D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xE866D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xE866D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xE866DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xE866E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xE866E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xE866E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xE866EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xE866F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xE866F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xE866F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xE866FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xE86700
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xE86704
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xE86708
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xE8670C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xE86710
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xE86714
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xE86718
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xE8671C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xE86720
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xE86724
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xE86728
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xE8672C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xE86730
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xE86734
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xE86738
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xE8673C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xE86740
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xE86744
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xE86748
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xE8674C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xE86750
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xE86754
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xE86758
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xE8675C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xE86760
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xE86764
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xE86768
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xE8676C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xE86770
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xE86774
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xE86778
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xE8677C
+
+#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE86780
+
+#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_ADDR 0xE86784
+
+#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE86788
+
+#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE8678C
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_0 0xE86790
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_0 0xE86794
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_1 0xE86798
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_1 0xE8679C
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_2 0xE867A0
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_2 0xE867A4
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_3 0xE867A8
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_3 0xE867AC
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_4 0xE867B0
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_4 0xE867B4
+
+#define mmTPC2_CFG_KERNEL_KERNEL_CONFIG 0xE867B8
+
+#define mmTPC2_CFG_KERNEL_KERNEL_ID 0xE867BC
+
+#define mmTPC2_CFG_KERNEL_SRF_0 0xE867C0
+
+#define mmTPC2_CFG_KERNEL_SRF_1 0xE867C4
+
+#define mmTPC2_CFG_KERNEL_SRF_2 0xE867C8
+
+#define mmTPC2_CFG_KERNEL_SRF_3 0xE867CC
+
+#define mmTPC2_CFG_KERNEL_SRF_4 0xE867D0
+
+#define mmTPC2_CFG_KERNEL_SRF_5 0xE867D4
+
+#define mmTPC2_CFG_KERNEL_SRF_6 0xE867D8
+
+#define mmTPC2_CFG_KERNEL_SRF_7 0xE867DC
+
+#define mmTPC2_CFG_KERNEL_SRF_8 0xE867E0
+
+#define mmTPC2_CFG_KERNEL_SRF_9 0xE867E4
+
+#define mmTPC2_CFG_KERNEL_SRF_10 0xE867E8
+
+#define mmTPC2_CFG_KERNEL_SRF_11 0xE867EC
+
+#define mmTPC2_CFG_KERNEL_SRF_12 0xE867F0
+
+#define mmTPC2_CFG_KERNEL_SRF_13 0xE867F4
+
+#define mmTPC2_CFG_KERNEL_SRF_14 0xE867F8
+
+#define mmTPC2_CFG_KERNEL_SRF_15 0xE867FC
+
+#define mmTPC2_CFG_KERNEL_SRF_16 0xE86800
+
+#define mmTPC2_CFG_KERNEL_SRF_17 0xE86804
+
+#define mmTPC2_CFG_KERNEL_SRF_18 0xE86808
+
+#define mmTPC2_CFG_KERNEL_SRF_19 0xE8680C
+
+#define mmTPC2_CFG_KERNEL_SRF_20 0xE86810
+
+#define mmTPC2_CFG_KERNEL_SRF_21 0xE86814
+
+#define mmTPC2_CFG_KERNEL_SRF_22 0xE86818
+
+#define mmTPC2_CFG_KERNEL_SRF_23 0xE8681C
+
+#define mmTPC2_CFG_KERNEL_SRF_24 0xE86820
+
+#define mmTPC2_CFG_KERNEL_SRF_25 0xE86824
+
+#define mmTPC2_CFG_KERNEL_SRF_26 0xE86828
+
+#define mmTPC2_CFG_KERNEL_SRF_27 0xE8682C
+
+#define mmTPC2_CFG_KERNEL_SRF_28 0xE86830
+
+#define mmTPC2_CFG_KERNEL_SRF_29 0xE86834
+
+#define mmTPC2_CFG_KERNEL_SRF_30 0xE86838
+
+#define mmTPC2_CFG_KERNEL_SRF_31 0xE8683C
+
+#define mmTPC2_CFG_ROUND_CSR 0xE868FC
+
+#define mmTPC2_CFG_PROT 0xE86900
+
+#define mmTPC2_CFG_SEMAPHORE 0xE86908
+
+#define mmTPC2_CFG_VFLAGS 0xE8690C
+
+#define mmTPC2_CFG_SFLAGS 0xE86910
+
+#define mmTPC2_CFG_LFSR_POLYNOM 0xE86918
+
+#define mmTPC2_CFG_STATUS 0xE8691C
+
+#define mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH 0xE86920
+
+#define mmTPC2_CFG_CFG_SUBTRACT_VALUE 0xE86924
+
+#define mmTPC2_CFG_SM_BASE_ADDRESS_HIGH 0xE8692C
+
+#define mmTPC2_CFG_TPC_CMD 0xE86930
+
+#define mmTPC2_CFG_TPC_EXECUTE 0xE86938
+
+#define mmTPC2_CFG_TPC_STALL 0xE8693C
+
+#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_LOW 0xE86940
+
+#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE86944
+
+#define mmTPC2_CFG_RD_RATE_LIMIT 0xE86948
+
+#define mmTPC2_CFG_WR_RATE_LIMIT 0xE86950
+
+#define mmTPC2_CFG_MSS_CONFIG 0xE86954
+
+#define mmTPC2_CFG_TPC_INTR_CAUSE 0xE86958
+
+#define mmTPC2_CFG_TPC_INTR_MASK 0xE8695C
+
+#define mmTPC2_CFG_WQ_CREDITS 0xE86960
+
+#define mmTPC2_CFG_ARUSER_LO 0xE86964
+
+#define mmTPC2_CFG_ARUSER_HI 0xE86968
+
+#define mmTPC2_CFG_AWUSER_LO 0xE8696C
+
+#define mmTPC2_CFG_AWUSER_HI 0xE86970
+
+#define mmTPC2_CFG_OPCODE_EXEC 0xE86974
+
+#define mmTPC2_CFG_LUT_FUNC32_BASE_ADDR_LO 0xE86978
+
+#define mmTPC2_CFG_LUT_FUNC32_BASE_ADDR_HI 0xE8697C
+
+#define mmTPC2_CFG_LUT_FUNC64_BASE_ADDR_LO 0xE86980
+
+#define mmTPC2_CFG_LUT_FUNC64_BASE_ADDR_HI 0xE86984
+
+#define mmTPC2_CFG_LUT_FUNC128_BASE_ADDR_LO 0xE86988
+
+#define mmTPC2_CFG_LUT_FUNC128_BASE_ADDR_HI 0xE8698C
+
+#define mmTPC2_CFG_LUT_FUNC256_BASE_ADDR_LO 0xE86990
+
+#define mmTPC2_CFG_LUT_FUNC256_BASE_ADDR_HI 0xE86994
+
+#define mmTPC2_CFG_TSB_CFG_MAX_SIZE 0xE86998
+
+#define mmTPC2_CFG_TSB_CFG 0xE8699C
+
+#define mmTPC2_CFG_DBGMEM_ADD 0xE869A0
+
+#define mmTPC2_CFG_DBGMEM_DATA_WR 0xE869A4
+
+#define mmTPC2_CFG_DBGMEM_DATA_RD 0xE869A8
+
+#define mmTPC2_CFG_DBGMEM_CTRL 0xE869AC
+
+#define mmTPC2_CFG_DBGMEM_RC 0xE869B0
+
+#define mmTPC2_CFG_TSB_INFLIGHT_CNTR 0xE869B4
+
+#define mmTPC2_CFG_WQ_INFLIGHT_CNTR 0xE869B8
+
+#define mmTPC2_CFG_WQ_LBW_TOTAL_CNTR 0xE869BC
+
+#define mmTPC2_CFG_WQ_HBW_TOTAL_CNTR 0xE869C0
+
+#define mmTPC2_CFG_IRQ_OCCOUPY_CNTR 0xE869C4
+
+#define mmTPC2_CFG_FUNC_MBIST_CNTRL 0xE869D0
+
+#define mmTPC2_CFG_FUNC_MBIST_PAT 0xE869D4
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_0 0xE869D8
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_1 0xE869DC
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_2 0xE869E0
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_3 0xE869E4
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_4 0xE869E8
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_5 0xE869EC
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_6 0xE869F0
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_7 0xE869F4
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_8 0xE869F8
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE869FC
+
+#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE86A00
+
+#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE86A04
+
+#define mmTPC2_CFG_QM_TENSOR_0_PADDING_VALUE 0xE86A08
+
+#define mmTPC2_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE86A0C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE86A10
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE86A14
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE86A18
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE86A1C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE86A20
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE86A24
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE86A28
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE86A2C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE86A30
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE86A34
+
+#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE86A38
+
+#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE86A3C
+
+#define mmTPC2_CFG_QM_TENSOR_1_PADDING_VALUE 0xE86A40
+
+#define mmTPC2_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE86A44
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE86A48
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE86A4C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE86A50
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE86A54
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE86A58
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE86A5C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE86A60
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE86A64
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE86A68
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE86A6C
+
+#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE86A70
+
+#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE86A74
+
+#define mmTPC2_CFG_QM_TENSOR_2_PADDING_VALUE 0xE86A78
+
+#define mmTPC2_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE86A7C
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE86A80
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE86A84
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE86A88
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE86A8C
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE86A90
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE86A94
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE86A98
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE86A9C
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE86AA0
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE86AA4
+
+#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE86AA8
+
+#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE86AAC
+
+#define mmTPC2_CFG_QM_TENSOR_3_PADDING_VALUE 0xE86AB0
+
+#define mmTPC2_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE86AB4
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE86AB8
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE86ABC
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE86AC0
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE86AC4
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE86AC8
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE86ACC
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE86AD0
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE86AD4
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE86AD8
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE86ADC
+
+#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE86AE0
+
+#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE86AE4
+
+#define mmTPC2_CFG_QM_TENSOR_4_PADDING_VALUE 0xE86AE8
+
+#define mmTPC2_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE86AEC
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE86AF0
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE86AF4
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE86AF8
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE86AFC
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE86B00
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE86B04
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE86B08
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE86B0C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE86B10
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE86B14
+
+#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE86B18
+
+#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE86B1C
+
+#define mmTPC2_CFG_QM_TENSOR_5_PADDING_VALUE 0xE86B20
+
+#define mmTPC2_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE86B24
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE86B28
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE86B2C
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE86B30
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE86B34
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE86B38
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE86B3C
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE86B40
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE86B44
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE86B48
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE86B4C
+
+#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE86B50
+
+#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE86B54
+
+#define mmTPC2_CFG_QM_TENSOR_6_PADDING_VALUE 0xE86B58
+
+#define mmTPC2_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE86B5C
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE86B60
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE86B64
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE86B68
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE86B6C
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE86B70
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE86B74
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE86B78
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE86B7C
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE86B80
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE86B84
+
+#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE86B88
+
+#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE86B8C
+
+#define mmTPC2_CFG_QM_TENSOR_7_PADDING_VALUE 0xE86B90
+
+#define mmTPC2_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE86B94
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE86B98
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE86B9C
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE86BA0
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE86BA4
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE86BA8
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE86BAC
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE86BB0
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE86BB4
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE86BB8
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE86BBC
+
+#define mmTPC2_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xE86BC0
+
+#define mmTPC2_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xE86BC4
+
+#define mmTPC2_CFG_QM_TENSOR_8_PADDING_VALUE 0xE86BC8
+
+#define mmTPC2_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xE86BCC
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_0_SIZE 0xE86BD0
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xE86BD4
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_1_SIZE 0xE86BD8
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xE86BDC
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_2_SIZE 0xE86BE0
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xE86BE4
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_3_SIZE 0xE86BE8
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xE86BEC
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_4_SIZE 0xE86BF0
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xE86BF4
+
+#define mmTPC2_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xE86BF8
+
+#define mmTPC2_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xE86BFC
+
+#define mmTPC2_CFG_QM_TENSOR_9_PADDING_VALUE 0xE86C00
+
+#define mmTPC2_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xE86C04
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_0_SIZE 0xE86C08
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xE86C0C
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_1_SIZE 0xE86C10
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xE86C14
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_2_SIZE 0xE86C18
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xE86C1C
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_3_SIZE 0xE86C20
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xE86C24
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_4_SIZE 0xE86C28
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xE86C2C
+
+#define mmTPC2_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xE86C30
+
+#define mmTPC2_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xE86C34
+
+#define mmTPC2_CFG_QM_TENSOR_10_PADDING_VALUE 0xE86C38
+
+#define mmTPC2_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xE86C3C
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_0_SIZE 0xE86C40
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xE86C44
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_1_SIZE 0xE86C48
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xE86C4C
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_2_SIZE 0xE86C50
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xE86C54
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_3_SIZE 0xE86C58
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xE86C5C
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_4_SIZE 0xE86C60
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xE86C64
+
+#define mmTPC2_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xE86C68
+
+#define mmTPC2_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xE86C6C
+
+#define mmTPC2_CFG_QM_TENSOR_11_PADDING_VALUE 0xE86C70
+
+#define mmTPC2_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xE86C74
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_0_SIZE 0xE86C78
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xE86C7C
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_1_SIZE 0xE86C80
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xE86C84
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_2_SIZE 0xE86C88
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xE86C8C
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_3_SIZE 0xE86C90
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xE86C94
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_4_SIZE 0xE86C98
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xE86C9C
+
+#define mmTPC2_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xE86CA0
+
+#define mmTPC2_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xE86CA4
+
+#define mmTPC2_CFG_QM_TENSOR_12_PADDING_VALUE 0xE86CA8
+
+#define mmTPC2_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xE86CAC
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_0_SIZE 0xE86CB0
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xE86CB4
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_1_SIZE 0xE86CB8
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xE86CBC
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_2_SIZE 0xE86CC0
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xE86CC4
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_3_SIZE 0xE86CC8
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xE86CCC
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_4_SIZE 0xE86CD0
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xE86CD4
+
+#define mmTPC2_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xE86CD8
+
+#define mmTPC2_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xE86CDC
+
+#define mmTPC2_CFG_QM_TENSOR_13_PADDING_VALUE 0xE86CE0
+
+#define mmTPC2_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xE86CE4
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_0_SIZE 0xE86CE8
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xE86CEC
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_1_SIZE 0xE86CF0
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xE86CF4
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_2_SIZE 0xE86CF8
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xE86CFC
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_3_SIZE 0xE86D00
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xE86D04
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_4_SIZE 0xE86D08
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xE86D0C
+
+#define mmTPC2_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xE86D10
+
+#define mmTPC2_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xE86D14
+
+#define mmTPC2_CFG_QM_TENSOR_14_PADDING_VALUE 0xE86D18
+
+#define mmTPC2_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xE86D1C
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_0_SIZE 0xE86D20
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xE86D24
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_1_SIZE 0xE86D28
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xE86D2C
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_2_SIZE 0xE86D30
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xE86D34
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_3_SIZE 0xE86D38
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xE86D3C
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_4_SIZE 0xE86D40
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xE86D44
+
+#define mmTPC2_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xE86D48
+
+#define mmTPC2_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xE86D4C
+
+#define mmTPC2_CFG_QM_TENSOR_15_PADDING_VALUE 0xE86D50
+
+#define mmTPC2_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xE86D54
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_0_SIZE 0xE86D58
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xE86D5C
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_1_SIZE 0xE86D60
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xE86D64
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_2_SIZE 0xE86D68
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xE86D6C
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_3_SIZE 0xE86D70
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xE86D74
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_4_SIZE 0xE86D78
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xE86D7C
+
+#define mmTPC2_CFG_QM_SYNC_OBJECT_MESSAGE 0xE86D80
+
+#define mmTPC2_CFG_QM_SYNC_OBJECT_ADDR 0xE86D84
+
+#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE86D88
+
+#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE86D8C
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_0 0xE86D90
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_0 0xE86D94
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_1 0xE86D98
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_1 0xE86D9C
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_2 0xE86DA0
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_2 0xE86DA4
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_3 0xE86DA8
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_3 0xE86DAC
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_4 0xE86DB0
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_4 0xE86DB4
+
+#define mmTPC2_CFG_QM_KERNEL_CONFIG 0xE86DB8
+
+#define mmTPC2_CFG_QM_KERNEL_ID 0xE86DBC
+
+#define mmTPC2_CFG_QM_SRF_0 0xE86DC0
+
+#define mmTPC2_CFG_QM_SRF_1 0xE86DC4
+
+#define mmTPC2_CFG_QM_SRF_2 0xE86DC8
+
+#define mmTPC2_CFG_QM_SRF_3 0xE86DCC
+
+#define mmTPC2_CFG_QM_SRF_4 0xE86DD0
+
+#define mmTPC2_CFG_QM_SRF_5 0xE86DD4
+
+#define mmTPC2_CFG_QM_SRF_6 0xE86DD8
+
+#define mmTPC2_CFG_QM_SRF_7 0xE86DDC
+
+#define mmTPC2_CFG_QM_SRF_8 0xE86DE0
+
+#define mmTPC2_CFG_QM_SRF_9 0xE86DE4
+
+#define mmTPC2_CFG_QM_SRF_10 0xE86DE8
+
+#define mmTPC2_CFG_QM_SRF_11 0xE86DEC
+
+#define mmTPC2_CFG_QM_SRF_12 0xE86DF0
+
+#define mmTPC2_CFG_QM_SRF_13 0xE86DF4
+
+#define mmTPC2_CFG_QM_SRF_14 0xE86DF8
+
+#define mmTPC2_CFG_QM_SRF_15 0xE86DFC
+
+#define mmTPC2_CFG_QM_SRF_16 0xE86E00
+
+#define mmTPC2_CFG_QM_SRF_17 0xE86E04
+
+#define mmTPC2_CFG_QM_SRF_18 0xE86E08
+
+#define mmTPC2_CFG_QM_SRF_19 0xE86E0C
+
+#define mmTPC2_CFG_QM_SRF_20 0xE86E10
+
+#define mmTPC2_CFG_QM_SRF_21 0xE86E14
+
+#define mmTPC2_CFG_QM_SRF_22 0xE86E18
+
+#define mmTPC2_CFG_QM_SRF_23 0xE86E1C
+
+#define mmTPC2_CFG_QM_SRF_24 0xE86E20
+
+#define mmTPC2_CFG_QM_SRF_25 0xE86E24
+
+#define mmTPC2_CFG_QM_SRF_26 0xE86E28
+
+#define mmTPC2_CFG_QM_SRF_27 0xE86E2C
+
+#define mmTPC2_CFG_QM_SRF_28 0xE86E30
+
+#define mmTPC2_CFG_QM_SRF_29 0xE86E34
+
+#define mmTPC2_CFG_QM_SRF_30 0xE86E38
+
+#define mmTPC2_CFG_QM_SRF_31 0xE86E3C
+
+#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
new file mode 100644
index 000000000000..2919e2fa58f8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_QM_REGS_H_
+#define ASIC_REG_TPC2_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC2_QM_GLBL_CFG0 0xE88000
+
+#define mmTPC2_QM_GLBL_CFG1 0xE88004
+
+#define mmTPC2_QM_GLBL_PROT 0xE88008
+
+#define mmTPC2_QM_GLBL_ERR_CFG 0xE8800C
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_0 0xE88010
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_1 0xE88014
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_2 0xE88018
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_3 0xE8801C
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_4 0xE88020
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_0 0xE88024
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_1 0xE88028
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_2 0xE8802C
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_3 0xE88030
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_4 0xE88034
+
+#define mmTPC2_QM_GLBL_STS0 0xE88038
+
+#define mmTPC2_QM_GLBL_STS1_0 0xE88040
+
+#define mmTPC2_QM_GLBL_STS1_1 0xE88044
+
+#define mmTPC2_QM_GLBL_STS1_2 0xE88048
+
+#define mmTPC2_QM_GLBL_STS1_3 0xE8804C
+
+#define mmTPC2_QM_GLBL_STS1_4 0xE88050
+
+#define mmTPC2_QM_GLBL_MSG_EN_0 0xE88054
+
+#define mmTPC2_QM_GLBL_MSG_EN_1 0xE88058
+
+#define mmTPC2_QM_GLBL_MSG_EN_2 0xE8805C
+
+#define mmTPC2_QM_GLBL_MSG_EN_3 0xE88060
+
+#define mmTPC2_QM_GLBL_MSG_EN_4 0xE88068
+
+#define mmTPC2_QM_PQ_BASE_LO_0 0xE88070
+
+#define mmTPC2_QM_PQ_BASE_LO_1 0xE88074
+
+#define mmTPC2_QM_PQ_BASE_LO_2 0xE88078
+
+#define mmTPC2_QM_PQ_BASE_LO_3 0xE8807C
+
+#define mmTPC2_QM_PQ_BASE_HI_0 0xE88080
+
+#define mmTPC2_QM_PQ_BASE_HI_1 0xE88084
+
+#define mmTPC2_QM_PQ_BASE_HI_2 0xE88088
+
+#define mmTPC2_QM_PQ_BASE_HI_3 0xE8808C
+
+#define mmTPC2_QM_PQ_SIZE_0 0xE88090
+
+#define mmTPC2_QM_PQ_SIZE_1 0xE88094
+
+#define mmTPC2_QM_PQ_SIZE_2 0xE88098
+
+#define mmTPC2_QM_PQ_SIZE_3 0xE8809C
+
+#define mmTPC2_QM_PQ_PI_0 0xE880A0
+
+#define mmTPC2_QM_PQ_PI_1 0xE880A4
+
+#define mmTPC2_QM_PQ_PI_2 0xE880A8
+
+#define mmTPC2_QM_PQ_PI_3 0xE880AC
+
+#define mmTPC2_QM_PQ_CI_0 0xE880B0
+
+#define mmTPC2_QM_PQ_CI_1 0xE880B4
+
+#define mmTPC2_QM_PQ_CI_2 0xE880B8
+
+#define mmTPC2_QM_PQ_CI_3 0xE880BC
+
+#define mmTPC2_QM_PQ_CFG0_0 0xE880C0
+
+#define mmTPC2_QM_PQ_CFG0_1 0xE880C4
+
+#define mmTPC2_QM_PQ_CFG0_2 0xE880C8
+
+#define mmTPC2_QM_PQ_CFG0_3 0xE880CC
+
+#define mmTPC2_QM_PQ_CFG1_0 0xE880D0
+
+#define mmTPC2_QM_PQ_CFG1_1 0xE880D4
+
+#define mmTPC2_QM_PQ_CFG1_2 0xE880D8
+
+#define mmTPC2_QM_PQ_CFG1_3 0xE880DC
+
+#define mmTPC2_QM_PQ_ARUSER_31_11_0 0xE880E0
+
+#define mmTPC2_QM_PQ_ARUSER_31_11_1 0xE880E4
+
+#define mmTPC2_QM_PQ_ARUSER_31_11_2 0xE880E8
+
+#define mmTPC2_QM_PQ_ARUSER_31_11_3 0xE880EC
+
+#define mmTPC2_QM_PQ_STS0_0 0xE880F0
+
+#define mmTPC2_QM_PQ_STS0_1 0xE880F4
+
+#define mmTPC2_QM_PQ_STS0_2 0xE880F8
+
+#define mmTPC2_QM_PQ_STS0_3 0xE880FC
+
+#define mmTPC2_QM_PQ_STS1_0 0xE88100
+
+#define mmTPC2_QM_PQ_STS1_1 0xE88104
+
+#define mmTPC2_QM_PQ_STS1_2 0xE88108
+
+#define mmTPC2_QM_PQ_STS1_3 0xE8810C
+
+#define mmTPC2_QM_CQ_CFG0_0 0xE88110
+
+#define mmTPC2_QM_CQ_CFG0_1 0xE88114
+
+#define mmTPC2_QM_CQ_CFG0_2 0xE88118
+
+#define mmTPC2_QM_CQ_CFG0_3 0xE8811C
+
+#define mmTPC2_QM_CQ_CFG0_4 0xE88120
+
+#define mmTPC2_QM_CQ_CFG1_0 0xE88124
+
+#define mmTPC2_QM_CQ_CFG1_1 0xE88128
+
+#define mmTPC2_QM_CQ_CFG1_2 0xE8812C
+
+#define mmTPC2_QM_CQ_CFG1_3 0xE88130
+
+#define mmTPC2_QM_CQ_CFG1_4 0xE88134
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_0 0xE88138
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_1 0xE8813C
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_2 0xE88140
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_3 0xE88144
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_4 0xE88148
+
+#define mmTPC2_QM_CQ_STS0_0 0xE8814C
+
+#define mmTPC2_QM_CQ_STS0_1 0xE88150
+
+#define mmTPC2_QM_CQ_STS0_2 0xE88154
+
+#define mmTPC2_QM_CQ_STS0_3 0xE88158
+
+#define mmTPC2_QM_CQ_STS0_4 0xE8815C
+
+#define mmTPC2_QM_CQ_STS1_0 0xE88160
+
+#define mmTPC2_QM_CQ_STS1_1 0xE88164
+
+#define mmTPC2_QM_CQ_STS1_2 0xE88168
+
+#define mmTPC2_QM_CQ_STS1_3 0xE8816C
+
+#define mmTPC2_QM_CQ_STS1_4 0xE88170
+
+#define mmTPC2_QM_CQ_PTR_LO_0 0xE88174
+
+#define mmTPC2_QM_CQ_PTR_HI_0 0xE88178
+
+#define mmTPC2_QM_CQ_TSIZE_0 0xE8817C
+
+#define mmTPC2_QM_CQ_CTL_0 0xE88180
+
+#define mmTPC2_QM_CQ_PTR_LO_1 0xE88184
+
+#define mmTPC2_QM_CQ_PTR_HI_1 0xE88188
+
+#define mmTPC2_QM_CQ_TSIZE_1 0xE8818C
+
+#define mmTPC2_QM_CQ_CTL_1 0xE88190
+
+#define mmTPC2_QM_CQ_PTR_LO_2 0xE88194
+
+#define mmTPC2_QM_CQ_PTR_HI_2 0xE88198
+
+#define mmTPC2_QM_CQ_TSIZE_2 0xE8819C
+
+#define mmTPC2_QM_CQ_CTL_2 0xE881A0
+
+#define mmTPC2_QM_CQ_PTR_LO_3 0xE881A4
+
+#define mmTPC2_QM_CQ_PTR_HI_3 0xE881A8
+
+#define mmTPC2_QM_CQ_TSIZE_3 0xE881AC
+
+#define mmTPC2_QM_CQ_CTL_3 0xE881B0
+
+#define mmTPC2_QM_CQ_PTR_LO_4 0xE881B4
+
+#define mmTPC2_QM_CQ_PTR_HI_4 0xE881B8
+
+#define mmTPC2_QM_CQ_TSIZE_4 0xE881BC
+
+#define mmTPC2_QM_CQ_CTL_4 0xE881C0
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_0 0xE881C4
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_1 0xE881C8
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_2 0xE881CC
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_3 0xE881D0
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_4 0xE881D4
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_0 0xE881D8
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_1 0xE881DC
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_2 0xE881E0
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_3 0xE881E4
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_4 0xE881E8
+
+#define mmTPC2_QM_CQ_TSIZE_STS_0 0xE881EC
+
+#define mmTPC2_QM_CQ_TSIZE_STS_1 0xE881F0
+
+#define mmTPC2_QM_CQ_TSIZE_STS_2 0xE881F4
+
+#define mmTPC2_QM_CQ_TSIZE_STS_3 0xE881F8
+
+#define mmTPC2_QM_CQ_TSIZE_STS_4 0xE881FC
+
+#define mmTPC2_QM_CQ_CTL_STS_0 0xE88200
+
+#define mmTPC2_QM_CQ_CTL_STS_1 0xE88204
+
+#define mmTPC2_QM_CQ_CTL_STS_2 0xE88208
+
+#define mmTPC2_QM_CQ_CTL_STS_3 0xE8820C
+
+#define mmTPC2_QM_CQ_CTL_STS_4 0xE88210
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_0 0xE88214
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_1 0xE88218
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_2 0xE8821C
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_3 0xE88220
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_4 0xE88224
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_0 0xE88228
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_1 0xE8822C
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_2 0xE88230
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_3 0xE88234
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_4 0xE88238
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_0 0xE8823C
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_1 0xE88240
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_2 0xE88244
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_3 0xE88248
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_4 0xE8824C
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_0 0xE88250
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_1 0xE88254
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_2 0xE88258
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_3 0xE8825C
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_4 0xE88260
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_0 0xE88264
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_1 0xE88268
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_2 0xE8826C
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_3 0xE88270
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_4 0xE88274
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_0 0xE88278
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_1 0xE8827C
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 0xE88280
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_3 0xE88284
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_4 0xE88288
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_0 0xE8828C
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_1 0xE88290
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_2 0xE88294
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_3 0xE88298
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_4 0xE8829C
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_0 0xE882A0
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_1 0xE882A4
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_2 0xE882A8
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_3 0xE882AC
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_4 0xE882B0
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_0 0xE882B4
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_1 0xE882B8
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_2 0xE882BC
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_3 0xE882C0
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_4 0xE882C4
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_0 0xE882C8
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_1 0xE882CC
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_2 0xE882D0
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_3 0xE882D4
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_4 0xE882D8
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xE882E0
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xE882E4
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xE882E8
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xE882EC
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xE882F0
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xE882F4
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xE882F8
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xE882FC
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xE88300
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xE88304
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_0 0xE88308
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_1 0xE8830C
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_2 0xE88310
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_3 0xE88314
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_4 0xE88318
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_0 0xE8831C
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_1 0xE88320
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_2 0xE88324
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_3 0xE88328
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_4 0xE8832C
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_0 0xE88330
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_1 0xE88334
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_2 0xE88338
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_3 0xE8833C
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_4 0xE88340
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_0 0xE88344
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_1 0xE88348
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_2 0xE8834C
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_3 0xE88350
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_4 0xE88354
+
+#define mmTPC2_QM_CP_FENCE0_CNT_0 0xE88358
+
+#define mmTPC2_QM_CP_FENCE0_CNT_1 0xE8835C
+
+#define mmTPC2_QM_CP_FENCE0_CNT_2 0xE88360
+
+#define mmTPC2_QM_CP_FENCE0_CNT_3 0xE88364
+
+#define mmTPC2_QM_CP_FENCE0_CNT_4 0xE88368
+
+#define mmTPC2_QM_CP_FENCE1_CNT_0 0xE8836C
+
+#define mmTPC2_QM_CP_FENCE1_CNT_1 0xE88370
+
+#define mmTPC2_QM_CP_FENCE1_CNT_2 0xE88374
+
+#define mmTPC2_QM_CP_FENCE1_CNT_3 0xE88378
+
+#define mmTPC2_QM_CP_FENCE1_CNT_4 0xE8837C
+
+#define mmTPC2_QM_CP_FENCE2_CNT_0 0xE88380
+
+#define mmTPC2_QM_CP_FENCE2_CNT_1 0xE88384
+
+#define mmTPC2_QM_CP_FENCE2_CNT_2 0xE88388
+
+#define mmTPC2_QM_CP_FENCE2_CNT_3 0xE8838C
+
+#define mmTPC2_QM_CP_FENCE2_CNT_4 0xE88390
+
+#define mmTPC2_QM_CP_FENCE3_CNT_0 0xE88394
+
+#define mmTPC2_QM_CP_FENCE3_CNT_1 0xE88398
+
+#define mmTPC2_QM_CP_FENCE3_CNT_2 0xE8839C
+
+#define mmTPC2_QM_CP_FENCE3_CNT_3 0xE883A0
+
+#define mmTPC2_QM_CP_FENCE3_CNT_4 0xE883A4
+
+#define mmTPC2_QM_CP_STS_0 0xE883A8
+
+#define mmTPC2_QM_CP_STS_1 0xE883AC
+
+#define mmTPC2_QM_CP_STS_2 0xE883B0
+
+#define mmTPC2_QM_CP_STS_3 0xE883B4
+
+#define mmTPC2_QM_CP_STS_4 0xE883B8
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_0 0xE883BC
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_1 0xE883C0
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_2 0xE883C4
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_3 0xE883C8
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_4 0xE883CC
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_0 0xE883D0
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_1 0xE883D4
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_2 0xE883D8
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_3 0xE883DC
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_4 0xE883E0
+
+#define mmTPC2_QM_CP_BARRIER_CFG_0 0xE883F4
+
+#define mmTPC2_QM_CP_BARRIER_CFG_1 0xE883F8
+
+#define mmTPC2_QM_CP_BARRIER_CFG_2 0xE883FC
+
+#define mmTPC2_QM_CP_BARRIER_CFG_3 0xE88400
+
+#define mmTPC2_QM_CP_BARRIER_CFG_4 0xE88404
+
+#define mmTPC2_QM_CP_DBG_0_0 0xE88408
+
+#define mmTPC2_QM_CP_DBG_0_1 0xE8840C
+
+#define mmTPC2_QM_CP_DBG_0_2 0xE88410
+
+#define mmTPC2_QM_CP_DBG_0_3 0xE88414
+
+#define mmTPC2_QM_CP_DBG_0_4 0xE88418
+
+#define mmTPC2_QM_CP_ARUSER_31_11_0 0xE8841C
+
+#define mmTPC2_QM_CP_ARUSER_31_11_1 0xE88420
+
+#define mmTPC2_QM_CP_ARUSER_31_11_2 0xE88424
+
+#define mmTPC2_QM_CP_ARUSER_31_11_3 0xE88428
+
+#define mmTPC2_QM_CP_ARUSER_31_11_4 0xE8842C
+
+#define mmTPC2_QM_CP_AWUSER_31_11_0 0xE88430
+
+#define mmTPC2_QM_CP_AWUSER_31_11_1 0xE88434
+
+#define mmTPC2_QM_CP_AWUSER_31_11_2 0xE88438
+
+#define mmTPC2_QM_CP_AWUSER_31_11_3 0xE8843C
+
+#define mmTPC2_QM_CP_AWUSER_31_11_4 0xE88440
+
+#define mmTPC2_QM_ARB_CFG_0 0xE88A00
+
+#define mmTPC2_QM_ARB_CHOISE_Q_PUSH 0xE88A04
+
+#define mmTPC2_QM_ARB_WRR_WEIGHT_0 0xE88A08
+
+#define mmTPC2_QM_ARB_WRR_WEIGHT_1 0xE88A0C
+
+#define mmTPC2_QM_ARB_WRR_WEIGHT_2 0xE88A10
+
+#define mmTPC2_QM_ARB_WRR_WEIGHT_3 0xE88A14
+
+#define mmTPC2_QM_ARB_CFG_1 0xE88A18
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_0 0xE88A20
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_1 0xE88A24
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_2 0xE88A28
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_3 0xE88A2C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_4 0xE88A30
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_5 0xE88A34
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_6 0xE88A38
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_7 0xE88A3C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_8 0xE88A40
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_9 0xE88A44
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_10 0xE88A48
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_11 0xE88A4C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_12 0xE88A50
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_13 0xE88A54
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_14 0xE88A58
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_15 0xE88A5C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_16 0xE88A60
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_17 0xE88A64
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_18 0xE88A68
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_19 0xE88A6C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_20 0xE88A70
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_21 0xE88A74
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_22 0xE88A78
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_23 0xE88A7C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_24 0xE88A80
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_25 0xE88A84
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_26 0xE88A88
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_27 0xE88A8C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_28 0xE88A90
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_29 0xE88A94
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_30 0xE88A98
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_31 0xE88A9C
+
+#define mmTPC2_QM_ARB_MST_CRED_INC 0xE88AA0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xE88AA4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xE88AA8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xE88AAC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xE88AB0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xE88AB4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xE88AB8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xE88ABC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xE88AC0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xE88AC4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xE88AC8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xE88ACC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xE88AD0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xE88AD4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xE88AD8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xE88ADC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xE88AE0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xE88AE4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xE88AE8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xE88AEC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xE88AF0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xE88AF4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xE88AF8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xE88AFC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xE88B00
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xE88B04
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xE88B08
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xE88B0C
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xE88B10
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xE88B14
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xE88B18
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xE88B1C
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xE88B20
+
+#define mmTPC2_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xE88B28
+
+#define mmTPC2_QM_ARB_MST_SLAVE_EN 0xE88B2C
+
+#define mmTPC2_QM_ARB_MST_QUIET_PER 0xE88B34
+
+#define mmTPC2_QM_ARB_SLV_CHOISE_WDT 0xE88B38
+
+#define mmTPC2_QM_ARB_SLV_ID 0xE88B3C
+
+#define mmTPC2_QM_ARB_MSG_MAX_INFLIGHT 0xE88B44
+
+#define mmTPC2_QM_ARB_MSG_AWUSER_31_11 0xE88B48
+
+#define mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP 0xE88B4C
+
+#define mmTPC2_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xE88B50
+
+#define mmTPC2_QM_ARB_BASE_LO 0xE88B54
+
+#define mmTPC2_QM_ARB_BASE_HI 0xE88B58
+
+#define mmTPC2_QM_ARB_STATE_STS 0xE88B80
+
+#define mmTPC2_QM_ARB_CHOISE_FULLNESS_STS 0xE88B84
+
+#define mmTPC2_QM_ARB_MSG_STS 0xE88B88
+
+#define mmTPC2_QM_ARB_SLV_CHOISE_Q_HEAD 0xE88B8C
+
+#define mmTPC2_QM_ARB_ERR_CAUSE 0xE88B9C
+
+#define mmTPC2_QM_ARB_ERR_MSG_EN 0xE88BA0
+
+#define mmTPC2_QM_ARB_ERR_STS_DRP 0xE88BA8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_0 0xE88BB0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_1 0xE88BB4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_2 0xE88BB8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_3 0xE88BBC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_4 0xE88BC0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_5 0xE88BC4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_6 0xE88BC8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_7 0xE88BCC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_8 0xE88BD0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_9 0xE88BD4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_10 0xE88BD8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_11 0xE88BDC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_12 0xE88BE0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_13 0xE88BE4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_14 0xE88BE8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_15 0xE88BEC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_16 0xE88BF0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_17 0xE88BF4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_18 0xE88BF8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_19 0xE88BFC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_20 0xE88C00
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_21 0xE88C04
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_22 0xE88C08
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_23 0xE88C0C
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_24 0xE88C10
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_25 0xE88C14
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_26 0xE88C18
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_27 0xE88C1C
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_28 0xE88C20
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_29 0xE88C24
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_30 0xE88C28
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_31 0xE88C2C
+
+#define mmTPC2_QM_CGM_CFG 0xE88C70
+
+#define mmTPC2_QM_CGM_STS 0xE88C74
+
+#define mmTPC2_QM_CGM_CFG1 0xE88C78
+
+#define mmTPC2_QM_LOCAL_RANGE_BASE 0xE88C80
+
+#define mmTPC2_QM_LOCAL_RANGE_SIZE 0xE88C84
+
+#define mmTPC2_QM_CSMR_STRICT_PRIO_CFG 0xE88C90
+
+#define mmTPC2_QM_HBW_RD_RATE_LIM_CFG_1 0xE88C94
+
+#define mmTPC2_QM_LBW_WR_RATE_LIM_CFG_0 0xE88C98
+
+#define mmTPC2_QM_LBW_WR_RATE_LIM_CFG_1 0xE88C9C
+
+#define mmTPC2_QM_HBW_RD_RATE_LIM_CFG_0 0xE88CA0
+
+#define mmTPC2_QM_GLBL_AXCACHE 0xE88CA4
+
+#define mmTPC2_QM_IND_GW_APB_CFG 0xE88CB0
+
+#define mmTPC2_QM_IND_GW_APB_WDATA 0xE88CB4
+
+#define mmTPC2_QM_IND_GW_APB_RDATA 0xE88CB8
+
+#define mmTPC2_QM_IND_GW_APB_STATUS 0xE88CBC
+
+#define mmTPC2_QM_GLBL_ERR_ADDR_LO 0xE88CD0
+
+#define mmTPC2_QM_GLBL_ERR_ADDR_HI 0xE88CD4
+
+#define mmTPC2_QM_GLBL_ERR_WDATA 0xE88CD8
+
+#define mmTPC2_QM_GLBL_MEM_INIT_BUSY 0xE88D00
+
+#endif /* ASIC_REG_TPC2_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
new file mode 100644
index 000000000000..6d42469659f1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_CFG_REGS_H_
+#define ASIC_REG_TPC3_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xEC6400
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xEC6404
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xEC6408
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xEC640C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xEC6410
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xEC6414
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xEC6418
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xEC641C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xEC6420
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xEC6424
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xEC6428
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xEC642C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xEC6430
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xEC6434
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xEC6438
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xEC643C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xEC6440
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xEC6444
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xEC6448
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xEC644C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xEC6450
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xEC6454
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xEC6458
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xEC645C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xEC6460
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xEC6464
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xEC6468
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xEC646C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xEC6470
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xEC6474
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xEC6478
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xEC647C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xEC6480
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xEC6484
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xEC6488
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xEC648C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xEC6490
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xEC6494
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xEC6498
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xEC649C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xEC64A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xEC64A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xEC64A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xEC64AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xEC64B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xEC64B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xEC64B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xEC64BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xEC64C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xEC64C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xEC64C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xEC64CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xEC64D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xEC64D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xEC64D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xEC64DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xEC64E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xEC64E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xEC64E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xEC64EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xEC64F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xEC64F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xEC64F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xEC64FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xEC6500
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xEC6504
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xEC6508
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xEC650C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xEC6510
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xEC6514
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xEC6518
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xEC651C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xEC6520
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xEC6524
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xEC6528
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xEC652C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xEC6530
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xEC6534
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xEC6538
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xEC653C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xEC6540
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xEC6544
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xEC6548
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xEC654C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xEC6550
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xEC6554
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xEC6558
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xEC655C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xEC6560
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xEC6564
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xEC6568
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xEC656C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xEC6570
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xEC6574
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xEC6578
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xEC657C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xEC6580
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xEC6584
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xEC6588
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xEC658C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xEC6590
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xEC6594
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xEC6598
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xEC659C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xEC65A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xEC65A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xEC65A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xEC65AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xEC65B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xEC65B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xEC65B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xEC65BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xEC65C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xEC65C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xEC65C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xEC65CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xEC65D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xEC65D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xEC65D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xEC65DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xEC65E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xEC65E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xEC65E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xEC65EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xEC65F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xEC65F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xEC65F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xEC65FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xEC6600
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xEC6604
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xEC6608
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xEC660C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xEC6610
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xEC6614
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xEC6618
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xEC661C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xEC6620
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xEC6624
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xEC6628
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xEC662C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xEC6630
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xEC6634
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xEC6638
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xEC663C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xEC6640
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xEC6644
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xEC6648
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xEC664C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xEC6650
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xEC6654
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xEC6658
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xEC665C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xEC6660
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xEC6664
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xEC6668
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xEC666C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xEC6670
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xEC6674
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xEC6678
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xEC667C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xEC6680
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xEC6684
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xEC6688
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xEC668C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xEC6690
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xEC6694
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xEC6698
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xEC669C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xEC66A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xEC66A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xEC66A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xEC66AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xEC66B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xEC66B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xEC66B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xEC66BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xEC66C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xEC66C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xEC66C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xEC66CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xEC66D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xEC66D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xEC66D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xEC66DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xEC66E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xEC66E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xEC66E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xEC66EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xEC66F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xEC66F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xEC66F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xEC66FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xEC6700
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xEC6704
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xEC6708
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xEC670C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xEC6710
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xEC6714
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xEC6718
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xEC671C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xEC6720
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xEC6724
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xEC6728
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xEC672C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xEC6730
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xEC6734
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xEC6738
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xEC673C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xEC6740
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xEC6744
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xEC6748
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xEC674C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xEC6750
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xEC6754
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xEC6758
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xEC675C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xEC6760
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xEC6764
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xEC6768
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xEC676C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xEC6770
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xEC6774
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xEC6778
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xEC677C
+
+#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xEC6780
+
+#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_ADDR 0xEC6784
+
+#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xEC6788
+
+#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xEC678C
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_0 0xEC6790
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_0 0xEC6794
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_1 0xEC6798
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_1 0xEC679C
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_2 0xEC67A0
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_2 0xEC67A4
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_3 0xEC67A8
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_3 0xEC67AC
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_4 0xEC67B0
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_4 0xEC67B4
+
+#define mmTPC3_CFG_KERNEL_KERNEL_CONFIG 0xEC67B8
+
+#define mmTPC3_CFG_KERNEL_KERNEL_ID 0xEC67BC
+
+#define mmTPC3_CFG_KERNEL_SRF_0 0xEC67C0
+
+#define mmTPC3_CFG_KERNEL_SRF_1 0xEC67C4
+
+#define mmTPC3_CFG_KERNEL_SRF_2 0xEC67C8
+
+#define mmTPC3_CFG_KERNEL_SRF_3 0xEC67CC
+
+#define mmTPC3_CFG_KERNEL_SRF_4 0xEC67D0
+
+#define mmTPC3_CFG_KERNEL_SRF_5 0xEC67D4
+
+#define mmTPC3_CFG_KERNEL_SRF_6 0xEC67D8
+
+#define mmTPC3_CFG_KERNEL_SRF_7 0xEC67DC
+
+#define mmTPC3_CFG_KERNEL_SRF_8 0xEC67E0
+
+#define mmTPC3_CFG_KERNEL_SRF_9 0xEC67E4
+
+#define mmTPC3_CFG_KERNEL_SRF_10 0xEC67E8
+
+#define mmTPC3_CFG_KERNEL_SRF_11 0xEC67EC
+
+#define mmTPC3_CFG_KERNEL_SRF_12 0xEC67F0
+
+#define mmTPC3_CFG_KERNEL_SRF_13 0xEC67F4
+
+#define mmTPC3_CFG_KERNEL_SRF_14 0xEC67F8
+
+#define mmTPC3_CFG_KERNEL_SRF_15 0xEC67FC
+
+#define mmTPC3_CFG_KERNEL_SRF_16 0xEC6800
+
+#define mmTPC3_CFG_KERNEL_SRF_17 0xEC6804
+
+#define mmTPC3_CFG_KERNEL_SRF_18 0xEC6808
+
+#define mmTPC3_CFG_KERNEL_SRF_19 0xEC680C
+
+#define mmTPC3_CFG_KERNEL_SRF_20 0xEC6810
+
+#define mmTPC3_CFG_KERNEL_SRF_21 0xEC6814
+
+#define mmTPC3_CFG_KERNEL_SRF_22 0xEC6818
+
+#define mmTPC3_CFG_KERNEL_SRF_23 0xEC681C
+
+#define mmTPC3_CFG_KERNEL_SRF_24 0xEC6820
+
+#define mmTPC3_CFG_KERNEL_SRF_25 0xEC6824
+
+#define mmTPC3_CFG_KERNEL_SRF_26 0xEC6828
+
+#define mmTPC3_CFG_KERNEL_SRF_27 0xEC682C
+
+#define mmTPC3_CFG_KERNEL_SRF_28 0xEC6830
+
+#define mmTPC3_CFG_KERNEL_SRF_29 0xEC6834
+
+#define mmTPC3_CFG_KERNEL_SRF_30 0xEC6838
+
+#define mmTPC3_CFG_KERNEL_SRF_31 0xEC683C
+
+#define mmTPC3_CFG_ROUND_CSR 0xEC68FC
+
+#define mmTPC3_CFG_PROT 0xEC6900
+
+#define mmTPC3_CFG_SEMAPHORE 0xEC6908
+
+#define mmTPC3_CFG_VFLAGS 0xEC690C
+
+#define mmTPC3_CFG_SFLAGS 0xEC6910
+
+#define mmTPC3_CFG_LFSR_POLYNOM 0xEC6918
+
+#define mmTPC3_CFG_STATUS 0xEC691C
+
+#define mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH 0xEC6920
+
+#define mmTPC3_CFG_CFG_SUBTRACT_VALUE 0xEC6924
+
+#define mmTPC3_CFG_SM_BASE_ADDRESS_HIGH 0xEC692C
+
+#define mmTPC3_CFG_TPC_CMD 0xEC6930
+
+#define mmTPC3_CFG_TPC_EXECUTE 0xEC6938
+
+#define mmTPC3_CFG_TPC_STALL 0xEC693C
+
+#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_LOW 0xEC6940
+
+#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH 0xEC6944
+
+#define mmTPC3_CFG_RD_RATE_LIMIT 0xEC6948
+
+#define mmTPC3_CFG_WR_RATE_LIMIT 0xEC6950
+
+#define mmTPC3_CFG_MSS_CONFIG 0xEC6954
+
+#define mmTPC3_CFG_TPC_INTR_CAUSE 0xEC6958
+
+#define mmTPC3_CFG_TPC_INTR_MASK 0xEC695C
+
+#define mmTPC3_CFG_WQ_CREDITS 0xEC6960
+
+#define mmTPC3_CFG_ARUSER_LO 0xEC6964
+
+#define mmTPC3_CFG_ARUSER_HI 0xEC6968
+
+#define mmTPC3_CFG_AWUSER_LO 0xEC696C
+
+#define mmTPC3_CFG_AWUSER_HI 0xEC6970
+
+#define mmTPC3_CFG_OPCODE_EXEC 0xEC6974
+
+#define mmTPC3_CFG_LUT_FUNC32_BASE_ADDR_LO 0xEC6978
+
+#define mmTPC3_CFG_LUT_FUNC32_BASE_ADDR_HI 0xEC697C
+
+#define mmTPC3_CFG_LUT_FUNC64_BASE_ADDR_LO 0xEC6980
+
+#define mmTPC3_CFG_LUT_FUNC64_BASE_ADDR_HI 0xEC6984
+
+#define mmTPC3_CFG_LUT_FUNC128_BASE_ADDR_LO 0xEC6988
+
+#define mmTPC3_CFG_LUT_FUNC128_BASE_ADDR_HI 0xEC698C
+
+#define mmTPC3_CFG_LUT_FUNC256_BASE_ADDR_LO 0xEC6990
+
+#define mmTPC3_CFG_LUT_FUNC256_BASE_ADDR_HI 0xEC6994
+
+#define mmTPC3_CFG_TSB_CFG_MAX_SIZE 0xEC6998
+
+#define mmTPC3_CFG_TSB_CFG 0xEC699C
+
+#define mmTPC3_CFG_DBGMEM_ADD 0xEC69A0
+
+#define mmTPC3_CFG_DBGMEM_DATA_WR 0xEC69A4
+
+#define mmTPC3_CFG_DBGMEM_DATA_RD 0xEC69A8
+
+#define mmTPC3_CFG_DBGMEM_CTRL 0xEC69AC
+
+#define mmTPC3_CFG_DBGMEM_RC 0xEC69B0
+
+#define mmTPC3_CFG_TSB_INFLIGHT_CNTR 0xEC69B4
+
+#define mmTPC3_CFG_WQ_INFLIGHT_CNTR 0xEC69B8
+
+#define mmTPC3_CFG_WQ_LBW_TOTAL_CNTR 0xEC69BC
+
+#define mmTPC3_CFG_WQ_HBW_TOTAL_CNTR 0xEC69C0
+
+#define mmTPC3_CFG_IRQ_OCCOUPY_CNTR 0xEC69C4
+
+#define mmTPC3_CFG_FUNC_MBIST_CNTRL 0xEC69D0
+
+#define mmTPC3_CFG_FUNC_MBIST_PAT 0xEC69D4
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_0 0xEC69D8
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_1 0xEC69DC
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_2 0xEC69E0
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_3 0xEC69E4
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_4 0xEC69E8
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_5 0xEC69EC
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_6 0xEC69F0
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_7 0xEC69F4
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_8 0xEC69F8
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC69FC
+
+#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xEC6A00
+
+#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xEC6A04
+
+#define mmTPC3_CFG_QM_TENSOR_0_PADDING_VALUE 0xEC6A08
+
+#define mmTPC3_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xEC6A0C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_SIZE 0xEC6A10
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xEC6A14
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_SIZE 0xEC6A18
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xEC6A1C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_SIZE 0xEC6A20
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xEC6A24
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_SIZE 0xEC6A28
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xEC6A2C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_SIZE 0xEC6A30
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xEC6A34
+
+#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xEC6A38
+
+#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xEC6A3C
+
+#define mmTPC3_CFG_QM_TENSOR_1_PADDING_VALUE 0xEC6A40
+
+#define mmTPC3_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xEC6A44
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_SIZE 0xEC6A48
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xEC6A4C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_SIZE 0xEC6A50
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xEC6A54
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_SIZE 0xEC6A58
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xEC6A5C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_SIZE 0xEC6A60
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xEC6A64
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_SIZE 0xEC6A68
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xEC6A6C
+
+#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xEC6A70
+
+#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xEC6A74
+
+#define mmTPC3_CFG_QM_TENSOR_2_PADDING_VALUE 0xEC6A78
+
+#define mmTPC3_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xEC6A7C
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_SIZE 0xEC6A80
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xEC6A84
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_SIZE 0xEC6A88
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xEC6A8C
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_SIZE 0xEC6A90
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xEC6A94
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_SIZE 0xEC6A98
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xEC6A9C
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_SIZE 0xEC6AA0
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xEC6AA4
+
+#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xEC6AA8
+
+#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xEC6AAC
+
+#define mmTPC3_CFG_QM_TENSOR_3_PADDING_VALUE 0xEC6AB0
+
+#define mmTPC3_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xEC6AB4
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_SIZE 0xEC6AB8
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xEC6ABC
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_SIZE 0xEC6AC0
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xEC6AC4
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_SIZE 0xEC6AC8
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xEC6ACC
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_SIZE 0xEC6AD0
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xEC6AD4
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_SIZE 0xEC6AD8
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xEC6ADC
+
+#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xEC6AE0
+
+#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xEC6AE4
+
+#define mmTPC3_CFG_QM_TENSOR_4_PADDING_VALUE 0xEC6AE8
+
+#define mmTPC3_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xEC6AEC
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_SIZE 0xEC6AF0
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xEC6AF4
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_SIZE 0xEC6AF8
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xEC6AFC
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_SIZE 0xEC6B00
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xEC6B04
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_SIZE 0xEC6B08
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xEC6B0C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_SIZE 0xEC6B10
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xEC6B14
+
+#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xEC6B18
+
+#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xEC6B1C
+
+#define mmTPC3_CFG_QM_TENSOR_5_PADDING_VALUE 0xEC6B20
+
+#define mmTPC3_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xEC6B24
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_SIZE 0xEC6B28
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xEC6B2C
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_SIZE 0xEC6B30
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xEC6B34
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_SIZE 0xEC6B38
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xEC6B3C
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_SIZE 0xEC6B40
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xEC6B44
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_SIZE 0xEC6B48
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xEC6B4C
+
+#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xEC6B50
+
+#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xEC6B54
+
+#define mmTPC3_CFG_QM_TENSOR_6_PADDING_VALUE 0xEC6B58
+
+#define mmTPC3_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xEC6B5C
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_SIZE 0xEC6B60
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xEC6B64
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_SIZE 0xEC6B68
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xEC6B6C
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_SIZE 0xEC6B70
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xEC6B74
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_SIZE 0xEC6B78
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xEC6B7C
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_SIZE 0xEC6B80
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xEC6B84
+
+#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xEC6B88
+
+#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xEC6B8C
+
+#define mmTPC3_CFG_QM_TENSOR_7_PADDING_VALUE 0xEC6B90
+
+#define mmTPC3_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xEC6B94
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_SIZE 0xEC6B98
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xEC6B9C
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_SIZE 0xEC6BA0
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xEC6BA4
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_SIZE 0xEC6BA8
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xEC6BAC
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_SIZE 0xEC6BB0
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xEC6BB4
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_SIZE 0xEC6BB8
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xEC6BBC
+
+#define mmTPC3_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xEC6BC0
+
+#define mmTPC3_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xEC6BC4
+
+#define mmTPC3_CFG_QM_TENSOR_8_PADDING_VALUE 0xEC6BC8
+
+#define mmTPC3_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xEC6BCC
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_0_SIZE 0xEC6BD0
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xEC6BD4
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_1_SIZE 0xEC6BD8
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xEC6BDC
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_2_SIZE 0xEC6BE0
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xEC6BE4
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_3_SIZE 0xEC6BE8
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xEC6BEC
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_4_SIZE 0xEC6BF0
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xEC6BF4
+
+#define mmTPC3_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xEC6BF8
+
+#define mmTPC3_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xEC6BFC
+
+#define mmTPC3_CFG_QM_TENSOR_9_PADDING_VALUE 0xEC6C00
+
+#define mmTPC3_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xEC6C04
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_0_SIZE 0xEC6C08
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xEC6C0C
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_1_SIZE 0xEC6C10
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xEC6C14
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_2_SIZE 0xEC6C18
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xEC6C1C
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_3_SIZE 0xEC6C20
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xEC6C24
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_4_SIZE 0xEC6C28
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xEC6C2C
+
+#define mmTPC3_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xEC6C30
+
+#define mmTPC3_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xEC6C34
+
+#define mmTPC3_CFG_QM_TENSOR_10_PADDING_VALUE 0xEC6C38
+
+#define mmTPC3_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xEC6C3C
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_0_SIZE 0xEC6C40
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xEC6C44
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_1_SIZE 0xEC6C48
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xEC6C4C
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_2_SIZE 0xEC6C50
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xEC6C54
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_3_SIZE 0xEC6C58
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xEC6C5C
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_4_SIZE 0xEC6C60
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xEC6C64
+
+#define mmTPC3_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xEC6C68
+
+#define mmTPC3_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xEC6C6C
+
+#define mmTPC3_CFG_QM_TENSOR_11_PADDING_VALUE 0xEC6C70
+
+#define mmTPC3_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xEC6C74
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_0_SIZE 0xEC6C78
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xEC6C7C
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_1_SIZE 0xEC6C80
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xEC6C84
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_2_SIZE 0xEC6C88
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xEC6C8C
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_3_SIZE 0xEC6C90
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xEC6C94
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_4_SIZE 0xEC6C98
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xEC6C9C
+
+#define mmTPC3_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xEC6CA0
+
+#define mmTPC3_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xEC6CA4
+
+#define mmTPC3_CFG_QM_TENSOR_12_PADDING_VALUE 0xEC6CA8
+
+#define mmTPC3_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xEC6CAC
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_0_SIZE 0xEC6CB0
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xEC6CB4
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_1_SIZE 0xEC6CB8
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xEC6CBC
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_2_SIZE 0xEC6CC0
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xEC6CC4
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_3_SIZE 0xEC6CC8
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xEC6CCC
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_4_SIZE 0xEC6CD0
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xEC6CD4
+
+#define mmTPC3_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xEC6CD8
+
+#define mmTPC3_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xEC6CDC
+
+#define mmTPC3_CFG_QM_TENSOR_13_PADDING_VALUE 0xEC6CE0
+
+#define mmTPC3_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xEC6CE4
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_0_SIZE 0xEC6CE8
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xEC6CEC
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_1_SIZE 0xEC6CF0
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xEC6CF4
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_2_SIZE 0xEC6CF8
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xEC6CFC
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_3_SIZE 0xEC6D00
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xEC6D04
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_4_SIZE 0xEC6D08
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xEC6D0C
+
+#define mmTPC3_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xEC6D10
+
+#define mmTPC3_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xEC6D14
+
+#define mmTPC3_CFG_QM_TENSOR_14_PADDING_VALUE 0xEC6D18
+
+#define mmTPC3_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xEC6D1C
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_0_SIZE 0xEC6D20
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xEC6D24
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_1_SIZE 0xEC6D28
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xEC6D2C
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_2_SIZE 0xEC6D30
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xEC6D34
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_3_SIZE 0xEC6D38
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xEC6D3C
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_4_SIZE 0xEC6D40
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xEC6D44
+
+#define mmTPC3_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xEC6D48
+
+#define mmTPC3_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xEC6D4C
+
+#define mmTPC3_CFG_QM_TENSOR_15_PADDING_VALUE 0xEC6D50
+
+#define mmTPC3_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xEC6D54
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_0_SIZE 0xEC6D58
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xEC6D5C
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_1_SIZE 0xEC6D60
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xEC6D64
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_2_SIZE 0xEC6D68
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xEC6D6C
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_3_SIZE 0xEC6D70
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xEC6D74
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_4_SIZE 0xEC6D78
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xEC6D7C
+
+#define mmTPC3_CFG_QM_SYNC_OBJECT_MESSAGE 0xEC6D80
+
+#define mmTPC3_CFG_QM_SYNC_OBJECT_ADDR 0xEC6D84
+
+#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xEC6D88
+
+#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xEC6D8C
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_0 0xEC6D90
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_0 0xEC6D94
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_1 0xEC6D98
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_1 0xEC6D9C
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_2 0xEC6DA0
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_2 0xEC6DA4
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_3 0xEC6DA8
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_3 0xEC6DAC
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_4 0xEC6DB0
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_4 0xEC6DB4
+
+#define mmTPC3_CFG_QM_KERNEL_CONFIG 0xEC6DB8
+
+#define mmTPC3_CFG_QM_KERNEL_ID 0xEC6DBC
+
+#define mmTPC3_CFG_QM_SRF_0 0xEC6DC0
+
+#define mmTPC3_CFG_QM_SRF_1 0xEC6DC4
+
+#define mmTPC3_CFG_QM_SRF_2 0xEC6DC8
+
+#define mmTPC3_CFG_QM_SRF_3 0xEC6DCC
+
+#define mmTPC3_CFG_QM_SRF_4 0xEC6DD0
+
+#define mmTPC3_CFG_QM_SRF_5 0xEC6DD4
+
+#define mmTPC3_CFG_QM_SRF_6 0xEC6DD8
+
+#define mmTPC3_CFG_QM_SRF_7 0xEC6DDC
+
+#define mmTPC3_CFG_QM_SRF_8 0xEC6DE0
+
+#define mmTPC3_CFG_QM_SRF_9 0xEC6DE4
+
+#define mmTPC3_CFG_QM_SRF_10 0xEC6DE8
+
+#define mmTPC3_CFG_QM_SRF_11 0xEC6DEC
+
+#define mmTPC3_CFG_QM_SRF_12 0xEC6DF0
+
+#define mmTPC3_CFG_QM_SRF_13 0xEC6DF4
+
+#define mmTPC3_CFG_QM_SRF_14 0xEC6DF8
+
+#define mmTPC3_CFG_QM_SRF_15 0xEC6DFC
+
+#define mmTPC3_CFG_QM_SRF_16 0xEC6E00
+
+#define mmTPC3_CFG_QM_SRF_17 0xEC6E04
+
+#define mmTPC3_CFG_QM_SRF_18 0xEC6E08
+
+#define mmTPC3_CFG_QM_SRF_19 0xEC6E0C
+
+#define mmTPC3_CFG_QM_SRF_20 0xEC6E10
+
+#define mmTPC3_CFG_QM_SRF_21 0xEC6E14
+
+#define mmTPC3_CFG_QM_SRF_22 0xEC6E18
+
+#define mmTPC3_CFG_QM_SRF_23 0xEC6E1C
+
+#define mmTPC3_CFG_QM_SRF_24 0xEC6E20
+
+#define mmTPC3_CFG_QM_SRF_25 0xEC6E24
+
+#define mmTPC3_CFG_QM_SRF_26 0xEC6E28
+
+#define mmTPC3_CFG_QM_SRF_27 0xEC6E2C
+
+#define mmTPC3_CFG_QM_SRF_28 0xEC6E30
+
+#define mmTPC3_CFG_QM_SRF_29 0xEC6E34
+
+#define mmTPC3_CFG_QM_SRF_30 0xEC6E38
+
+#define mmTPC3_CFG_QM_SRF_31 0xEC6E3C
+
+#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
new file mode 100644
index 000000000000..5f2a0fd86c9e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_QM_REGS_H_
+#define ASIC_REG_TPC3_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC3_QM_GLBL_CFG0 0xEC8000
+
+#define mmTPC3_QM_GLBL_CFG1 0xEC8004
+
+#define mmTPC3_QM_GLBL_PROT 0xEC8008
+
+#define mmTPC3_QM_GLBL_ERR_CFG 0xEC800C
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_0 0xEC8010
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_1 0xEC8014
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_2 0xEC8018
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_3 0xEC801C
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_4 0xEC8020
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_0 0xEC8024
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_1 0xEC8028
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_2 0xEC802C
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_3 0xEC8030
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_4 0xEC8034
+
+#define mmTPC3_QM_GLBL_STS0 0xEC8038
+
+#define mmTPC3_QM_GLBL_STS1_0 0xEC8040
+
+#define mmTPC3_QM_GLBL_STS1_1 0xEC8044
+
+#define mmTPC3_QM_GLBL_STS1_2 0xEC8048
+
+#define mmTPC3_QM_GLBL_STS1_3 0xEC804C
+
+#define mmTPC3_QM_GLBL_STS1_4 0xEC8050
+
+#define mmTPC3_QM_GLBL_MSG_EN_0 0xEC8054
+
+#define mmTPC3_QM_GLBL_MSG_EN_1 0xEC8058
+
+#define mmTPC3_QM_GLBL_MSG_EN_2 0xEC805C
+
+#define mmTPC3_QM_GLBL_MSG_EN_3 0xEC8060
+
+#define mmTPC3_QM_GLBL_MSG_EN_4 0xEC8068
+
+#define mmTPC3_QM_PQ_BASE_LO_0 0xEC8070
+
+#define mmTPC3_QM_PQ_BASE_LO_1 0xEC8074
+
+#define mmTPC3_QM_PQ_BASE_LO_2 0xEC8078
+
+#define mmTPC3_QM_PQ_BASE_LO_3 0xEC807C
+
+#define mmTPC3_QM_PQ_BASE_HI_0 0xEC8080
+
+#define mmTPC3_QM_PQ_BASE_HI_1 0xEC8084
+
+#define mmTPC3_QM_PQ_BASE_HI_2 0xEC8088
+
+#define mmTPC3_QM_PQ_BASE_HI_3 0xEC808C
+
+#define mmTPC3_QM_PQ_SIZE_0 0xEC8090
+
+#define mmTPC3_QM_PQ_SIZE_1 0xEC8094
+
+#define mmTPC3_QM_PQ_SIZE_2 0xEC8098
+
+#define mmTPC3_QM_PQ_SIZE_3 0xEC809C
+
+#define mmTPC3_QM_PQ_PI_0 0xEC80A0
+
+#define mmTPC3_QM_PQ_PI_1 0xEC80A4
+
+#define mmTPC3_QM_PQ_PI_2 0xEC80A8
+
+#define mmTPC3_QM_PQ_PI_3 0xEC80AC
+
+#define mmTPC3_QM_PQ_CI_0 0xEC80B0
+
+#define mmTPC3_QM_PQ_CI_1 0xEC80B4
+
+#define mmTPC3_QM_PQ_CI_2 0xEC80B8
+
+#define mmTPC3_QM_PQ_CI_3 0xEC80BC
+
+#define mmTPC3_QM_PQ_CFG0_0 0xEC80C0
+
+#define mmTPC3_QM_PQ_CFG0_1 0xEC80C4
+
+#define mmTPC3_QM_PQ_CFG0_2 0xEC80C8
+
+#define mmTPC3_QM_PQ_CFG0_3 0xEC80CC
+
+#define mmTPC3_QM_PQ_CFG1_0 0xEC80D0
+
+#define mmTPC3_QM_PQ_CFG1_1 0xEC80D4
+
+#define mmTPC3_QM_PQ_CFG1_2 0xEC80D8
+
+#define mmTPC3_QM_PQ_CFG1_3 0xEC80DC
+
+#define mmTPC3_QM_PQ_ARUSER_31_11_0 0xEC80E0
+
+#define mmTPC3_QM_PQ_ARUSER_31_11_1 0xEC80E4
+
+#define mmTPC3_QM_PQ_ARUSER_31_11_2 0xEC80E8
+
+#define mmTPC3_QM_PQ_ARUSER_31_11_3 0xEC80EC
+
+#define mmTPC3_QM_PQ_STS0_0 0xEC80F0
+
+#define mmTPC3_QM_PQ_STS0_1 0xEC80F4
+
+#define mmTPC3_QM_PQ_STS0_2 0xEC80F8
+
+#define mmTPC3_QM_PQ_STS0_3 0xEC80FC
+
+#define mmTPC3_QM_PQ_STS1_0 0xEC8100
+
+#define mmTPC3_QM_PQ_STS1_1 0xEC8104
+
+#define mmTPC3_QM_PQ_STS1_2 0xEC8108
+
+#define mmTPC3_QM_PQ_STS1_3 0xEC810C
+
+#define mmTPC3_QM_CQ_CFG0_0 0xEC8110
+
+#define mmTPC3_QM_CQ_CFG0_1 0xEC8114
+
+#define mmTPC3_QM_CQ_CFG0_2 0xEC8118
+
+#define mmTPC3_QM_CQ_CFG0_3 0xEC811C
+
+#define mmTPC3_QM_CQ_CFG0_4 0xEC8120
+
+#define mmTPC3_QM_CQ_CFG1_0 0xEC8124
+
+#define mmTPC3_QM_CQ_CFG1_1 0xEC8128
+
+#define mmTPC3_QM_CQ_CFG1_2 0xEC812C
+
+#define mmTPC3_QM_CQ_CFG1_3 0xEC8130
+
+#define mmTPC3_QM_CQ_CFG1_4 0xEC8134
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_0 0xEC8138
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_1 0xEC813C
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_2 0xEC8140
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_3 0xEC8144
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_4 0xEC8148
+
+#define mmTPC3_QM_CQ_STS0_0 0xEC814C
+
+#define mmTPC3_QM_CQ_STS0_1 0xEC8150
+
+#define mmTPC3_QM_CQ_STS0_2 0xEC8154
+
+#define mmTPC3_QM_CQ_STS0_3 0xEC8158
+
+#define mmTPC3_QM_CQ_STS0_4 0xEC815C
+
+#define mmTPC3_QM_CQ_STS1_0 0xEC8160
+
+#define mmTPC3_QM_CQ_STS1_1 0xEC8164
+
+#define mmTPC3_QM_CQ_STS1_2 0xEC8168
+
+#define mmTPC3_QM_CQ_STS1_3 0xEC816C
+
+#define mmTPC3_QM_CQ_STS1_4 0xEC8170
+
+#define mmTPC3_QM_CQ_PTR_LO_0 0xEC8174
+
+#define mmTPC3_QM_CQ_PTR_HI_0 0xEC8178
+
+#define mmTPC3_QM_CQ_TSIZE_0 0xEC817C
+
+#define mmTPC3_QM_CQ_CTL_0 0xEC8180
+
+#define mmTPC3_QM_CQ_PTR_LO_1 0xEC8184
+
+#define mmTPC3_QM_CQ_PTR_HI_1 0xEC8188
+
+#define mmTPC3_QM_CQ_TSIZE_1 0xEC818C
+
+#define mmTPC3_QM_CQ_CTL_1 0xEC8190
+
+#define mmTPC3_QM_CQ_PTR_LO_2 0xEC8194
+
+#define mmTPC3_QM_CQ_PTR_HI_2 0xEC8198
+
+#define mmTPC3_QM_CQ_TSIZE_2 0xEC819C
+
+#define mmTPC3_QM_CQ_CTL_2 0xEC81A0
+
+#define mmTPC3_QM_CQ_PTR_LO_3 0xEC81A4
+
+#define mmTPC3_QM_CQ_PTR_HI_3 0xEC81A8
+
+#define mmTPC3_QM_CQ_TSIZE_3 0xEC81AC
+
+#define mmTPC3_QM_CQ_CTL_3 0xEC81B0
+
+#define mmTPC3_QM_CQ_PTR_LO_4 0xEC81B4
+
+#define mmTPC3_QM_CQ_PTR_HI_4 0xEC81B8
+
+#define mmTPC3_QM_CQ_TSIZE_4 0xEC81BC
+
+#define mmTPC3_QM_CQ_CTL_4 0xEC81C0
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_0 0xEC81C4
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_1 0xEC81C8
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_2 0xEC81CC
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_3 0xEC81D0
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_4 0xEC81D4
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_0 0xEC81D8
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_1 0xEC81DC
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_2 0xEC81E0
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_3 0xEC81E4
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_4 0xEC81E8
+
+#define mmTPC3_QM_CQ_TSIZE_STS_0 0xEC81EC
+
+#define mmTPC3_QM_CQ_TSIZE_STS_1 0xEC81F0
+
+#define mmTPC3_QM_CQ_TSIZE_STS_2 0xEC81F4
+
+#define mmTPC3_QM_CQ_TSIZE_STS_3 0xEC81F8
+
+#define mmTPC3_QM_CQ_TSIZE_STS_4 0xEC81FC
+
+#define mmTPC3_QM_CQ_CTL_STS_0 0xEC8200
+
+#define mmTPC3_QM_CQ_CTL_STS_1 0xEC8204
+
+#define mmTPC3_QM_CQ_CTL_STS_2 0xEC8208
+
+#define mmTPC3_QM_CQ_CTL_STS_3 0xEC820C
+
+#define mmTPC3_QM_CQ_CTL_STS_4 0xEC8210
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_0 0xEC8214
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_1 0xEC8218
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_2 0xEC821C
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_3 0xEC8220
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_4 0xEC8224
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_0 0xEC8228
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_1 0xEC822C
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_2 0xEC8230
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_3 0xEC8234
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_4 0xEC8238
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_0 0xEC823C
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_1 0xEC8240
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_2 0xEC8244
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_3 0xEC8248
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_4 0xEC824C
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_0 0xEC8250
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_1 0xEC8254
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_2 0xEC8258
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_3 0xEC825C
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_4 0xEC8260
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_0 0xEC8264
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_1 0xEC8268
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_2 0xEC826C
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_3 0xEC8270
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_4 0xEC8274
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_0 0xEC8278
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_1 0xEC827C
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 0xEC8280
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_3 0xEC8284
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_4 0xEC8288
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_0 0xEC828C
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_1 0xEC8290
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_2 0xEC8294
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_3 0xEC8298
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_4 0xEC829C
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_0 0xEC82A0
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_1 0xEC82A4
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_2 0xEC82A8
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_3 0xEC82AC
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_4 0xEC82B0
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_0 0xEC82B4
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_1 0xEC82B8
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_2 0xEC82BC
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_3 0xEC82C0
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_4 0xEC82C4
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_0 0xEC82C8
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_1 0xEC82CC
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_2 0xEC82D0
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_3 0xEC82D4
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_4 0xEC82D8
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xEC82E0
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xEC82E4
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xEC82E8
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xEC82EC
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xEC82F0
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xEC82F4
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xEC82F8
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xEC82FC
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xEC8300
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xEC8304
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_0 0xEC8308
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_1 0xEC830C
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_2 0xEC8310
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_3 0xEC8314
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_4 0xEC8318
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_0 0xEC831C
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_1 0xEC8320
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_2 0xEC8324
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_3 0xEC8328
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_4 0xEC832C
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_0 0xEC8330
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_1 0xEC8334
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_2 0xEC8338
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_3 0xEC833C
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_4 0xEC8340
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_0 0xEC8344
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_1 0xEC8348
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_2 0xEC834C
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_3 0xEC8350
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_4 0xEC8354
+
+#define mmTPC3_QM_CP_FENCE0_CNT_0 0xEC8358
+
+#define mmTPC3_QM_CP_FENCE0_CNT_1 0xEC835C
+
+#define mmTPC3_QM_CP_FENCE0_CNT_2 0xEC8360
+
+#define mmTPC3_QM_CP_FENCE0_CNT_3 0xEC8364
+
+#define mmTPC3_QM_CP_FENCE0_CNT_4 0xEC8368
+
+#define mmTPC3_QM_CP_FENCE1_CNT_0 0xEC836C
+
+#define mmTPC3_QM_CP_FENCE1_CNT_1 0xEC8370
+
+#define mmTPC3_QM_CP_FENCE1_CNT_2 0xEC8374
+
+#define mmTPC3_QM_CP_FENCE1_CNT_3 0xEC8378
+
+#define mmTPC3_QM_CP_FENCE1_CNT_4 0xEC837C
+
+#define mmTPC3_QM_CP_FENCE2_CNT_0 0xEC8380
+
+#define mmTPC3_QM_CP_FENCE2_CNT_1 0xEC8384
+
+#define mmTPC3_QM_CP_FENCE2_CNT_2 0xEC8388
+
+#define mmTPC3_QM_CP_FENCE2_CNT_3 0xEC838C
+
+#define mmTPC3_QM_CP_FENCE2_CNT_4 0xEC8390
+
+#define mmTPC3_QM_CP_FENCE3_CNT_0 0xEC8394
+
+#define mmTPC3_QM_CP_FENCE3_CNT_1 0xEC8398
+
+#define mmTPC3_QM_CP_FENCE3_CNT_2 0xEC839C
+
+#define mmTPC3_QM_CP_FENCE3_CNT_3 0xEC83A0
+
+#define mmTPC3_QM_CP_FENCE3_CNT_4 0xEC83A4
+
+#define mmTPC3_QM_CP_STS_0 0xEC83A8
+
+#define mmTPC3_QM_CP_STS_1 0xEC83AC
+
+#define mmTPC3_QM_CP_STS_2 0xEC83B0
+
+#define mmTPC3_QM_CP_STS_3 0xEC83B4
+
+#define mmTPC3_QM_CP_STS_4 0xEC83B8
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_0 0xEC83BC
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_1 0xEC83C0
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_2 0xEC83C4
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_3 0xEC83C8
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_4 0xEC83CC
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_0 0xEC83D0
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_1 0xEC83D4
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_2 0xEC83D8
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_3 0xEC83DC
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_4 0xEC83E0
+
+#define mmTPC3_QM_CP_BARRIER_CFG_0 0xEC83F4
+
+#define mmTPC3_QM_CP_BARRIER_CFG_1 0xEC83F8
+
+#define mmTPC3_QM_CP_BARRIER_CFG_2 0xEC83FC
+
+#define mmTPC3_QM_CP_BARRIER_CFG_3 0xEC8400
+
+#define mmTPC3_QM_CP_BARRIER_CFG_4 0xEC8404
+
+#define mmTPC3_QM_CP_DBG_0_0 0xEC8408
+
+#define mmTPC3_QM_CP_DBG_0_1 0xEC840C
+
+#define mmTPC3_QM_CP_DBG_0_2 0xEC8410
+
+#define mmTPC3_QM_CP_DBG_0_3 0xEC8414
+
+#define mmTPC3_QM_CP_DBG_0_4 0xEC8418
+
+#define mmTPC3_QM_CP_ARUSER_31_11_0 0xEC841C
+
+#define mmTPC3_QM_CP_ARUSER_31_11_1 0xEC8420
+
+#define mmTPC3_QM_CP_ARUSER_31_11_2 0xEC8424
+
+#define mmTPC3_QM_CP_ARUSER_31_11_3 0xEC8428
+
+#define mmTPC3_QM_CP_ARUSER_31_11_4 0xEC842C
+
+#define mmTPC3_QM_CP_AWUSER_31_11_0 0xEC8430
+
+#define mmTPC3_QM_CP_AWUSER_31_11_1 0xEC8434
+
+#define mmTPC3_QM_CP_AWUSER_31_11_2 0xEC8438
+
+#define mmTPC3_QM_CP_AWUSER_31_11_3 0xEC843C
+
+#define mmTPC3_QM_CP_AWUSER_31_11_4 0xEC8440
+
+#define mmTPC3_QM_ARB_CFG_0 0xEC8A00
+
+#define mmTPC3_QM_ARB_CHOISE_Q_PUSH 0xEC8A04
+
+#define mmTPC3_QM_ARB_WRR_WEIGHT_0 0xEC8A08
+
+#define mmTPC3_QM_ARB_WRR_WEIGHT_1 0xEC8A0C
+
+#define mmTPC3_QM_ARB_WRR_WEIGHT_2 0xEC8A10
+
+#define mmTPC3_QM_ARB_WRR_WEIGHT_3 0xEC8A14
+
+#define mmTPC3_QM_ARB_CFG_1 0xEC8A18
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_0 0xEC8A20
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_1 0xEC8A24
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_2 0xEC8A28
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_3 0xEC8A2C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_4 0xEC8A30
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_5 0xEC8A34
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_6 0xEC8A38
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_7 0xEC8A3C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_8 0xEC8A40
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_9 0xEC8A44
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_10 0xEC8A48
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_11 0xEC8A4C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_12 0xEC8A50
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_13 0xEC8A54
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_14 0xEC8A58
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_15 0xEC8A5C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_16 0xEC8A60
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_17 0xEC8A64
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_18 0xEC8A68
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_19 0xEC8A6C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_20 0xEC8A70
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_21 0xEC8A74
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_22 0xEC8A78
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_23 0xEC8A7C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_24 0xEC8A80
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_25 0xEC8A84
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_26 0xEC8A88
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_27 0xEC8A8C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_28 0xEC8A90
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_29 0xEC8A94
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_30 0xEC8A98
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_31 0xEC8A9C
+
+#define mmTPC3_QM_ARB_MST_CRED_INC 0xEC8AA0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xEC8AA4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xEC8AA8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xEC8AAC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xEC8AB0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xEC8AB4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xEC8AB8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xEC8ABC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xEC8AC0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xEC8AC4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xEC8AC8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xEC8ACC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xEC8AD0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xEC8AD4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xEC8AD8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xEC8ADC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xEC8AE0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xEC8AE4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xEC8AE8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xEC8AEC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xEC8AF0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xEC8AF4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xEC8AF8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xEC8AFC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xEC8B00
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xEC8B04
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xEC8B08
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xEC8B0C
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xEC8B10
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xEC8B14
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xEC8B18
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xEC8B1C
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xEC8B20
+
+#define mmTPC3_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xEC8B28
+
+#define mmTPC3_QM_ARB_MST_SLAVE_EN 0xEC8B2C
+
+#define mmTPC3_QM_ARB_MST_QUIET_PER 0xEC8B34
+
+#define mmTPC3_QM_ARB_SLV_CHOISE_WDT 0xEC8B38
+
+#define mmTPC3_QM_ARB_SLV_ID 0xEC8B3C
+
+#define mmTPC3_QM_ARB_MSG_MAX_INFLIGHT 0xEC8B44
+
+#define mmTPC3_QM_ARB_MSG_AWUSER_31_11 0xEC8B48
+
+#define mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP 0xEC8B4C
+
+#define mmTPC3_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xEC8B50
+
+#define mmTPC3_QM_ARB_BASE_LO 0xEC8B54
+
+#define mmTPC3_QM_ARB_BASE_HI 0xEC8B58
+
+#define mmTPC3_QM_ARB_STATE_STS 0xEC8B80
+
+#define mmTPC3_QM_ARB_CHOISE_FULLNESS_STS 0xEC8B84
+
+#define mmTPC3_QM_ARB_MSG_STS 0xEC8B88
+
+#define mmTPC3_QM_ARB_SLV_CHOISE_Q_HEAD 0xEC8B8C
+
+#define mmTPC3_QM_ARB_ERR_CAUSE 0xEC8B9C
+
+#define mmTPC3_QM_ARB_ERR_MSG_EN 0xEC8BA0
+
+#define mmTPC3_QM_ARB_ERR_STS_DRP 0xEC8BA8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_0 0xEC8BB0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_1 0xEC8BB4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_2 0xEC8BB8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_3 0xEC8BBC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_4 0xEC8BC0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_5 0xEC8BC4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_6 0xEC8BC8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_7 0xEC8BCC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_8 0xEC8BD0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_9 0xEC8BD4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_10 0xEC8BD8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_11 0xEC8BDC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_12 0xEC8BE0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_13 0xEC8BE4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_14 0xEC8BE8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_15 0xEC8BEC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_16 0xEC8BF0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_17 0xEC8BF4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_18 0xEC8BF8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_19 0xEC8BFC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_20 0xEC8C00
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_21 0xEC8C04
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_22 0xEC8C08
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_23 0xEC8C0C
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_24 0xEC8C10
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_25 0xEC8C14
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_26 0xEC8C18
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_27 0xEC8C1C
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_28 0xEC8C20
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_29 0xEC8C24
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_30 0xEC8C28
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_31 0xEC8C2C
+
+#define mmTPC3_QM_CGM_CFG 0xEC8C70
+
+#define mmTPC3_QM_CGM_STS 0xEC8C74
+
+#define mmTPC3_QM_CGM_CFG1 0xEC8C78
+
+#define mmTPC3_QM_LOCAL_RANGE_BASE 0xEC8C80
+
+#define mmTPC3_QM_LOCAL_RANGE_SIZE 0xEC8C84
+
+#define mmTPC3_QM_CSMR_STRICT_PRIO_CFG 0xEC8C90
+
+#define mmTPC3_QM_HBW_RD_RATE_LIM_CFG_1 0xEC8C94
+
+#define mmTPC3_QM_LBW_WR_RATE_LIM_CFG_0 0xEC8C98
+
+#define mmTPC3_QM_LBW_WR_RATE_LIM_CFG_1 0xEC8C9C
+
+#define mmTPC3_QM_HBW_RD_RATE_LIM_CFG_0 0xEC8CA0
+
+#define mmTPC3_QM_GLBL_AXCACHE 0xEC8CA4
+
+#define mmTPC3_QM_IND_GW_APB_CFG 0xEC8CB0
+
+#define mmTPC3_QM_IND_GW_APB_WDATA 0xEC8CB4
+
+#define mmTPC3_QM_IND_GW_APB_RDATA 0xEC8CB8
+
+#define mmTPC3_QM_IND_GW_APB_STATUS 0xEC8CBC
+
+#define mmTPC3_QM_GLBL_ERR_ADDR_LO 0xEC8CD0
+
+#define mmTPC3_QM_GLBL_ERR_ADDR_HI 0xEC8CD4
+
+#define mmTPC3_QM_GLBL_ERR_WDATA 0xEC8CD8
+
+#define mmTPC3_QM_GLBL_MEM_INIT_BUSY 0xEC8D00
+
+#endif /* ASIC_REG_TPC3_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
new file mode 100644
index 000000000000..7a9447f39a74
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_CFG_REGS_H_
+#define ASIC_REG_TPC4_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF06400
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF06404
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF06408
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF0640C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF06410
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF06414
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF06418
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF0641C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF06420
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF06424
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF06428
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF0642C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF06430
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF06434
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF06438
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF0643C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF06440
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF06444
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF06448
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF0644C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF06450
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF06454
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF06458
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF0645C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF06460
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF06464
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF06468
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF0646C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF06470
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF06474
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF06478
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF0647C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF06480
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF06484
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF06488
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF0648C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF06490
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF06494
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF06498
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF0649C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF064A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF064A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF064A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF064AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF064B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF064B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF064B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF064BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF064C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF064C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF064C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF064CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF064D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF064D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF064D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF064DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF064E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF064E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF064E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF064EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF064F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF064F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF064F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF064FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF06500
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF06504
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF06508
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF0650C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF06510
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF06514
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF06518
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF0651C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF06520
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF06524
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF06528
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF0652C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF06530
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF06534
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF06538
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF0653C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF06540
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF06544
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF06548
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF0654C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF06550
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF06554
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF06558
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF0655C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF06560
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF06564
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF06568
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF0656C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF06570
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF06574
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF06578
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF0657C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF06580
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF06584
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF06588
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF0658C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF06590
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF06594
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF06598
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF0659C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF065A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF065A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF065A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF065AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF065B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF065B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF065B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF065BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xF065C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xF065C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xF065C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xF065CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xF065D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xF065D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xF065D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xF065DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xF065E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xF065E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xF065E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xF065EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xF065F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xF065F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xF065F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xF065FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xF06600
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xF06604
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xF06608
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xF0660C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xF06610
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xF06614
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xF06618
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xF0661C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xF06620
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xF06624
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xF06628
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xF0662C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xF06630
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xF06634
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xF06638
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xF0663C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xF06640
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xF06644
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xF06648
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xF0664C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xF06650
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xF06654
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xF06658
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xF0665C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xF06660
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xF06664
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xF06668
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xF0666C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xF06670
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xF06674
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xF06678
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xF0667C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xF06680
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xF06684
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xF06688
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xF0668C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xF06690
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xF06694
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xF06698
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xF0669C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xF066A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xF066A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xF066A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xF066AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xF066B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xF066B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xF066B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xF066BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xF066C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xF066C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xF066C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xF066CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xF066D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xF066D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xF066D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xF066DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xF066E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xF066E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xF066E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xF066EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xF066F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xF066F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xF066F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xF066FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xF06700
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xF06704
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xF06708
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xF0670C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xF06710
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xF06714
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xF06718
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xF0671C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xF06720
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xF06724
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xF06728
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xF0672C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xF06730
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xF06734
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xF06738
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xF0673C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xF06740
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xF06744
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xF06748
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xF0674C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xF06750
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xF06754
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xF06758
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xF0675C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xF06760
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xF06764
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xF06768
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xF0676C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xF06770
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xF06774
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xF06778
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xF0677C
+
+#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF06780
+
+#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_ADDR 0xF06784
+
+#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF06788
+
+#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF0678C
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_0 0xF06790
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_0 0xF06794
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_1 0xF06798
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_1 0xF0679C
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_2 0xF067A0
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_2 0xF067A4
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_3 0xF067A8
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_3 0xF067AC
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_4 0xF067B0
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_4 0xF067B4
+
+#define mmTPC4_CFG_KERNEL_KERNEL_CONFIG 0xF067B8
+
+#define mmTPC4_CFG_KERNEL_KERNEL_ID 0xF067BC
+
+#define mmTPC4_CFG_KERNEL_SRF_0 0xF067C0
+
+#define mmTPC4_CFG_KERNEL_SRF_1 0xF067C4
+
+#define mmTPC4_CFG_KERNEL_SRF_2 0xF067C8
+
+#define mmTPC4_CFG_KERNEL_SRF_3 0xF067CC
+
+#define mmTPC4_CFG_KERNEL_SRF_4 0xF067D0
+
+#define mmTPC4_CFG_KERNEL_SRF_5 0xF067D4
+
+#define mmTPC4_CFG_KERNEL_SRF_6 0xF067D8
+
+#define mmTPC4_CFG_KERNEL_SRF_7 0xF067DC
+
+#define mmTPC4_CFG_KERNEL_SRF_8 0xF067E0
+
+#define mmTPC4_CFG_KERNEL_SRF_9 0xF067E4
+
+#define mmTPC4_CFG_KERNEL_SRF_10 0xF067E8
+
+#define mmTPC4_CFG_KERNEL_SRF_11 0xF067EC
+
+#define mmTPC4_CFG_KERNEL_SRF_12 0xF067F0
+
+#define mmTPC4_CFG_KERNEL_SRF_13 0xF067F4
+
+#define mmTPC4_CFG_KERNEL_SRF_14 0xF067F8
+
+#define mmTPC4_CFG_KERNEL_SRF_15 0xF067FC
+
+#define mmTPC4_CFG_KERNEL_SRF_16 0xF06800
+
+#define mmTPC4_CFG_KERNEL_SRF_17 0xF06804
+
+#define mmTPC4_CFG_KERNEL_SRF_18 0xF06808
+
+#define mmTPC4_CFG_KERNEL_SRF_19 0xF0680C
+
+#define mmTPC4_CFG_KERNEL_SRF_20 0xF06810
+
+#define mmTPC4_CFG_KERNEL_SRF_21 0xF06814
+
+#define mmTPC4_CFG_KERNEL_SRF_22 0xF06818
+
+#define mmTPC4_CFG_KERNEL_SRF_23 0xF0681C
+
+#define mmTPC4_CFG_KERNEL_SRF_24 0xF06820
+
+#define mmTPC4_CFG_KERNEL_SRF_25 0xF06824
+
+#define mmTPC4_CFG_KERNEL_SRF_26 0xF06828
+
+#define mmTPC4_CFG_KERNEL_SRF_27 0xF0682C
+
+#define mmTPC4_CFG_KERNEL_SRF_28 0xF06830
+
+#define mmTPC4_CFG_KERNEL_SRF_29 0xF06834
+
+#define mmTPC4_CFG_KERNEL_SRF_30 0xF06838
+
+#define mmTPC4_CFG_KERNEL_SRF_31 0xF0683C
+
+#define mmTPC4_CFG_ROUND_CSR 0xF068FC
+
+#define mmTPC4_CFG_PROT 0xF06900
+
+#define mmTPC4_CFG_SEMAPHORE 0xF06908
+
+#define mmTPC4_CFG_VFLAGS 0xF0690C
+
+#define mmTPC4_CFG_SFLAGS 0xF06910
+
+#define mmTPC4_CFG_LFSR_POLYNOM 0xF06918
+
+#define mmTPC4_CFG_STATUS 0xF0691C
+
+#define mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH 0xF06920
+
+#define mmTPC4_CFG_CFG_SUBTRACT_VALUE 0xF06924
+
+#define mmTPC4_CFG_SM_BASE_ADDRESS_HIGH 0xF0692C
+
+#define mmTPC4_CFG_TPC_CMD 0xF06930
+
+#define mmTPC4_CFG_TPC_EXECUTE 0xF06938
+
+#define mmTPC4_CFG_TPC_STALL 0xF0693C
+
+#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_LOW 0xF06940
+
+#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF06944
+
+#define mmTPC4_CFG_RD_RATE_LIMIT 0xF06948
+
+#define mmTPC4_CFG_WR_RATE_LIMIT 0xF06950
+
+#define mmTPC4_CFG_MSS_CONFIG 0xF06954
+
+#define mmTPC4_CFG_TPC_INTR_CAUSE 0xF06958
+
+#define mmTPC4_CFG_TPC_INTR_MASK 0xF0695C
+
+#define mmTPC4_CFG_WQ_CREDITS 0xF06960
+
+#define mmTPC4_CFG_ARUSER_LO 0xF06964
+
+#define mmTPC4_CFG_ARUSER_HI 0xF06968
+
+#define mmTPC4_CFG_AWUSER_LO 0xF0696C
+
+#define mmTPC4_CFG_AWUSER_HI 0xF06970
+
+#define mmTPC4_CFG_OPCODE_EXEC 0xF06974
+
+#define mmTPC4_CFG_LUT_FUNC32_BASE_ADDR_LO 0xF06978
+
+#define mmTPC4_CFG_LUT_FUNC32_BASE_ADDR_HI 0xF0697C
+
+#define mmTPC4_CFG_LUT_FUNC64_BASE_ADDR_LO 0xF06980
+
+#define mmTPC4_CFG_LUT_FUNC64_BASE_ADDR_HI 0xF06984
+
+#define mmTPC4_CFG_LUT_FUNC128_BASE_ADDR_LO 0xF06988
+
+#define mmTPC4_CFG_LUT_FUNC128_BASE_ADDR_HI 0xF0698C
+
+#define mmTPC4_CFG_LUT_FUNC256_BASE_ADDR_LO 0xF06990
+
+#define mmTPC4_CFG_LUT_FUNC256_BASE_ADDR_HI 0xF06994
+
+#define mmTPC4_CFG_TSB_CFG_MAX_SIZE 0xF06998
+
+#define mmTPC4_CFG_TSB_CFG 0xF0699C
+
+#define mmTPC4_CFG_DBGMEM_ADD 0xF069A0
+
+#define mmTPC4_CFG_DBGMEM_DATA_WR 0xF069A4
+
+#define mmTPC4_CFG_DBGMEM_DATA_RD 0xF069A8
+
+#define mmTPC4_CFG_DBGMEM_CTRL 0xF069AC
+
+#define mmTPC4_CFG_DBGMEM_RC 0xF069B0
+
+#define mmTPC4_CFG_TSB_INFLIGHT_CNTR 0xF069B4
+
+#define mmTPC4_CFG_WQ_INFLIGHT_CNTR 0xF069B8
+
+#define mmTPC4_CFG_WQ_LBW_TOTAL_CNTR 0xF069BC
+
+#define mmTPC4_CFG_WQ_HBW_TOTAL_CNTR 0xF069C0
+
+#define mmTPC4_CFG_IRQ_OCCOUPY_CNTR 0xF069C4
+
+#define mmTPC4_CFG_FUNC_MBIST_CNTRL 0xF069D0
+
+#define mmTPC4_CFG_FUNC_MBIST_PAT 0xF069D4
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_0 0xF069D8
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_1 0xF069DC
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_2 0xF069E0
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_3 0xF069E4
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_4 0xF069E8
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_5 0xF069EC
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_6 0xF069F0
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_7 0xF069F4
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_8 0xF069F8
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF069FC
+
+#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF06A00
+
+#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF06A04
+
+#define mmTPC4_CFG_QM_TENSOR_0_PADDING_VALUE 0xF06A08
+
+#define mmTPC4_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF06A0C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF06A10
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF06A14
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF06A18
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF06A1C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF06A20
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF06A24
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF06A28
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF06A2C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF06A30
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF06A34
+
+#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF06A38
+
+#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF06A3C
+
+#define mmTPC4_CFG_QM_TENSOR_1_PADDING_VALUE 0xF06A40
+
+#define mmTPC4_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF06A44
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF06A48
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF06A4C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF06A50
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF06A54
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF06A58
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF06A5C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF06A60
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF06A64
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF06A68
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF06A6C
+
+#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF06A70
+
+#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF06A74
+
+#define mmTPC4_CFG_QM_TENSOR_2_PADDING_VALUE 0xF06A78
+
+#define mmTPC4_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF06A7C
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF06A80
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF06A84
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF06A88
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF06A8C
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF06A90
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF06A94
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF06A98
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF06A9C
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF06AA0
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF06AA4
+
+#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF06AA8
+
+#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF06AAC
+
+#define mmTPC4_CFG_QM_TENSOR_3_PADDING_VALUE 0xF06AB0
+
+#define mmTPC4_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF06AB4
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF06AB8
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF06ABC
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF06AC0
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF06AC4
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF06AC8
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF06ACC
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF06AD0
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF06AD4
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF06AD8
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF06ADC
+
+#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF06AE0
+
+#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF06AE4
+
+#define mmTPC4_CFG_QM_TENSOR_4_PADDING_VALUE 0xF06AE8
+
+#define mmTPC4_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF06AEC
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF06AF0
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF06AF4
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF06AF8
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF06AFC
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF06B00
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF06B04
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF06B08
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF06B0C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF06B10
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF06B14
+
+#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF06B18
+
+#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF06B1C
+
+#define mmTPC4_CFG_QM_TENSOR_5_PADDING_VALUE 0xF06B20
+
+#define mmTPC4_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF06B24
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF06B28
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF06B2C
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF06B30
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF06B34
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF06B38
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF06B3C
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF06B40
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF06B44
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF06B48
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF06B4C
+
+#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF06B50
+
+#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF06B54
+
+#define mmTPC4_CFG_QM_TENSOR_6_PADDING_VALUE 0xF06B58
+
+#define mmTPC4_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF06B5C
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF06B60
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF06B64
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF06B68
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF06B6C
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF06B70
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF06B74
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF06B78
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF06B7C
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF06B80
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF06B84
+
+#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF06B88
+
+#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF06B8C
+
+#define mmTPC4_CFG_QM_TENSOR_7_PADDING_VALUE 0xF06B90
+
+#define mmTPC4_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF06B94
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF06B98
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF06B9C
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF06BA0
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF06BA4
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF06BA8
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF06BAC
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF06BB0
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF06BB4
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF06BB8
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF06BBC
+
+#define mmTPC4_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xF06BC0
+
+#define mmTPC4_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xF06BC4
+
+#define mmTPC4_CFG_QM_TENSOR_8_PADDING_VALUE 0xF06BC8
+
+#define mmTPC4_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xF06BCC
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_0_SIZE 0xF06BD0
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xF06BD4
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_1_SIZE 0xF06BD8
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xF06BDC
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_2_SIZE 0xF06BE0
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xF06BE4
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_3_SIZE 0xF06BE8
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xF06BEC
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_4_SIZE 0xF06BF0
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xF06BF4
+
+#define mmTPC4_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xF06BF8
+
+#define mmTPC4_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xF06BFC
+
+#define mmTPC4_CFG_QM_TENSOR_9_PADDING_VALUE 0xF06C00
+
+#define mmTPC4_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xF06C04
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_0_SIZE 0xF06C08
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xF06C0C
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_1_SIZE 0xF06C10
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xF06C14
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_2_SIZE 0xF06C18
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xF06C1C
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_3_SIZE 0xF06C20
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xF06C24
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_4_SIZE 0xF06C28
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xF06C2C
+
+#define mmTPC4_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xF06C30
+
+#define mmTPC4_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xF06C34
+
+#define mmTPC4_CFG_QM_TENSOR_10_PADDING_VALUE 0xF06C38
+
+#define mmTPC4_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xF06C3C
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_0_SIZE 0xF06C40
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xF06C44
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_1_SIZE 0xF06C48
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xF06C4C
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_2_SIZE 0xF06C50
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xF06C54
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_3_SIZE 0xF06C58
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xF06C5C
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_4_SIZE 0xF06C60
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xF06C64
+
+#define mmTPC4_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xF06C68
+
+#define mmTPC4_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xF06C6C
+
+#define mmTPC4_CFG_QM_TENSOR_11_PADDING_VALUE 0xF06C70
+
+#define mmTPC4_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xF06C74
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_0_SIZE 0xF06C78
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xF06C7C
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_1_SIZE 0xF06C80
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xF06C84
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_2_SIZE 0xF06C88
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xF06C8C
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_3_SIZE 0xF06C90
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xF06C94
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_4_SIZE 0xF06C98
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xF06C9C
+
+#define mmTPC4_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xF06CA0
+
+#define mmTPC4_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xF06CA4
+
+#define mmTPC4_CFG_QM_TENSOR_12_PADDING_VALUE 0xF06CA8
+
+#define mmTPC4_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xF06CAC
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_0_SIZE 0xF06CB0
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xF06CB4
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_1_SIZE 0xF06CB8
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xF06CBC
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_2_SIZE 0xF06CC0
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xF06CC4
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_3_SIZE 0xF06CC8
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xF06CCC
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_4_SIZE 0xF06CD0
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xF06CD4
+
+#define mmTPC4_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xF06CD8
+
+#define mmTPC4_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xF06CDC
+
+#define mmTPC4_CFG_QM_TENSOR_13_PADDING_VALUE 0xF06CE0
+
+#define mmTPC4_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xF06CE4
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_0_SIZE 0xF06CE8
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xF06CEC
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_1_SIZE 0xF06CF0
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xF06CF4
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_2_SIZE 0xF06CF8
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xF06CFC
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_3_SIZE 0xF06D00
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xF06D04
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_4_SIZE 0xF06D08
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xF06D0C
+
+#define mmTPC4_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xF06D10
+
+#define mmTPC4_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xF06D14
+
+#define mmTPC4_CFG_QM_TENSOR_14_PADDING_VALUE 0xF06D18
+
+#define mmTPC4_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xF06D1C
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_0_SIZE 0xF06D20
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xF06D24
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_1_SIZE 0xF06D28
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xF06D2C
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_2_SIZE 0xF06D30
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xF06D34
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_3_SIZE 0xF06D38
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xF06D3C
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_4_SIZE 0xF06D40
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xF06D44
+
+#define mmTPC4_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xF06D48
+
+#define mmTPC4_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xF06D4C
+
+#define mmTPC4_CFG_QM_TENSOR_15_PADDING_VALUE 0xF06D50
+
+#define mmTPC4_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xF06D54
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_0_SIZE 0xF06D58
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xF06D5C
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_1_SIZE 0xF06D60
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xF06D64
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_2_SIZE 0xF06D68
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xF06D6C
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_3_SIZE 0xF06D70
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xF06D74
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_4_SIZE 0xF06D78
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xF06D7C
+
+#define mmTPC4_CFG_QM_SYNC_OBJECT_MESSAGE 0xF06D80
+
+#define mmTPC4_CFG_QM_SYNC_OBJECT_ADDR 0xF06D84
+
+#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF06D88
+
+#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF06D8C
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_0 0xF06D90
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_0 0xF06D94
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_1 0xF06D98
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_1 0xF06D9C
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_2 0xF06DA0
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_2 0xF06DA4
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_3 0xF06DA8
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_3 0xF06DAC
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_4 0xF06DB0
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_4 0xF06DB4
+
+#define mmTPC4_CFG_QM_KERNEL_CONFIG 0xF06DB8
+
+#define mmTPC4_CFG_QM_KERNEL_ID 0xF06DBC
+
+#define mmTPC4_CFG_QM_SRF_0 0xF06DC0
+
+#define mmTPC4_CFG_QM_SRF_1 0xF06DC4
+
+#define mmTPC4_CFG_QM_SRF_2 0xF06DC8
+
+#define mmTPC4_CFG_QM_SRF_3 0xF06DCC
+
+#define mmTPC4_CFG_QM_SRF_4 0xF06DD0
+
+#define mmTPC4_CFG_QM_SRF_5 0xF06DD4
+
+#define mmTPC4_CFG_QM_SRF_6 0xF06DD8
+
+#define mmTPC4_CFG_QM_SRF_7 0xF06DDC
+
+#define mmTPC4_CFG_QM_SRF_8 0xF06DE0
+
+#define mmTPC4_CFG_QM_SRF_9 0xF06DE4
+
+#define mmTPC4_CFG_QM_SRF_10 0xF06DE8
+
+#define mmTPC4_CFG_QM_SRF_11 0xF06DEC
+
+#define mmTPC4_CFG_QM_SRF_12 0xF06DF0
+
+#define mmTPC4_CFG_QM_SRF_13 0xF06DF4
+
+#define mmTPC4_CFG_QM_SRF_14 0xF06DF8
+
+#define mmTPC4_CFG_QM_SRF_15 0xF06DFC
+
+#define mmTPC4_CFG_QM_SRF_16 0xF06E00
+
+#define mmTPC4_CFG_QM_SRF_17 0xF06E04
+
+#define mmTPC4_CFG_QM_SRF_18 0xF06E08
+
+#define mmTPC4_CFG_QM_SRF_19 0xF06E0C
+
+#define mmTPC4_CFG_QM_SRF_20 0xF06E10
+
+#define mmTPC4_CFG_QM_SRF_21 0xF06E14
+
+#define mmTPC4_CFG_QM_SRF_22 0xF06E18
+
+#define mmTPC4_CFG_QM_SRF_23 0xF06E1C
+
+#define mmTPC4_CFG_QM_SRF_24 0xF06E20
+
+#define mmTPC4_CFG_QM_SRF_25 0xF06E24
+
+#define mmTPC4_CFG_QM_SRF_26 0xF06E28
+
+#define mmTPC4_CFG_QM_SRF_27 0xF06E2C
+
+#define mmTPC4_CFG_QM_SRF_28 0xF06E30
+
+#define mmTPC4_CFG_QM_SRF_29 0xF06E34
+
+#define mmTPC4_CFG_QM_SRF_30 0xF06E38
+
+#define mmTPC4_CFG_QM_SRF_31 0xF06E3C
+
+#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
new file mode 100644
index 000000000000..80e63402f6e0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_QM_REGS_H_
+#define ASIC_REG_TPC4_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC4_QM_GLBL_CFG0 0xF08000
+
+#define mmTPC4_QM_GLBL_CFG1 0xF08004
+
+#define mmTPC4_QM_GLBL_PROT 0xF08008
+
+#define mmTPC4_QM_GLBL_ERR_CFG 0xF0800C
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_0 0xF08010
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_1 0xF08014
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_2 0xF08018
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_3 0xF0801C
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_4 0xF08020
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_0 0xF08024
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_1 0xF08028
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_2 0xF0802C
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_3 0xF08030
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_4 0xF08034
+
+#define mmTPC4_QM_GLBL_STS0 0xF08038
+
+#define mmTPC4_QM_GLBL_STS1_0 0xF08040
+
+#define mmTPC4_QM_GLBL_STS1_1 0xF08044
+
+#define mmTPC4_QM_GLBL_STS1_2 0xF08048
+
+#define mmTPC4_QM_GLBL_STS1_3 0xF0804C
+
+#define mmTPC4_QM_GLBL_STS1_4 0xF08050
+
+#define mmTPC4_QM_GLBL_MSG_EN_0 0xF08054
+
+#define mmTPC4_QM_GLBL_MSG_EN_1 0xF08058
+
+#define mmTPC4_QM_GLBL_MSG_EN_2 0xF0805C
+
+#define mmTPC4_QM_GLBL_MSG_EN_3 0xF08060
+
+#define mmTPC4_QM_GLBL_MSG_EN_4 0xF08068
+
+#define mmTPC4_QM_PQ_BASE_LO_0 0xF08070
+
+#define mmTPC4_QM_PQ_BASE_LO_1 0xF08074
+
+#define mmTPC4_QM_PQ_BASE_LO_2 0xF08078
+
+#define mmTPC4_QM_PQ_BASE_LO_3 0xF0807C
+
+#define mmTPC4_QM_PQ_BASE_HI_0 0xF08080
+
+#define mmTPC4_QM_PQ_BASE_HI_1 0xF08084
+
+#define mmTPC4_QM_PQ_BASE_HI_2 0xF08088
+
+#define mmTPC4_QM_PQ_BASE_HI_3 0xF0808C
+
+#define mmTPC4_QM_PQ_SIZE_0 0xF08090
+
+#define mmTPC4_QM_PQ_SIZE_1 0xF08094
+
+#define mmTPC4_QM_PQ_SIZE_2 0xF08098
+
+#define mmTPC4_QM_PQ_SIZE_3 0xF0809C
+
+#define mmTPC4_QM_PQ_PI_0 0xF080A0
+
+#define mmTPC4_QM_PQ_PI_1 0xF080A4
+
+#define mmTPC4_QM_PQ_PI_2 0xF080A8
+
+#define mmTPC4_QM_PQ_PI_3 0xF080AC
+
+#define mmTPC4_QM_PQ_CI_0 0xF080B0
+
+#define mmTPC4_QM_PQ_CI_1 0xF080B4
+
+#define mmTPC4_QM_PQ_CI_2 0xF080B8
+
+#define mmTPC4_QM_PQ_CI_3 0xF080BC
+
+#define mmTPC4_QM_PQ_CFG0_0 0xF080C0
+
+#define mmTPC4_QM_PQ_CFG0_1 0xF080C4
+
+#define mmTPC4_QM_PQ_CFG0_2 0xF080C8
+
+#define mmTPC4_QM_PQ_CFG0_3 0xF080CC
+
+#define mmTPC4_QM_PQ_CFG1_0 0xF080D0
+
+#define mmTPC4_QM_PQ_CFG1_1 0xF080D4
+
+#define mmTPC4_QM_PQ_CFG1_2 0xF080D8
+
+#define mmTPC4_QM_PQ_CFG1_3 0xF080DC
+
+#define mmTPC4_QM_PQ_ARUSER_31_11_0 0xF080E0
+
+#define mmTPC4_QM_PQ_ARUSER_31_11_1 0xF080E4
+
+#define mmTPC4_QM_PQ_ARUSER_31_11_2 0xF080E8
+
+#define mmTPC4_QM_PQ_ARUSER_31_11_3 0xF080EC
+
+#define mmTPC4_QM_PQ_STS0_0 0xF080F0
+
+#define mmTPC4_QM_PQ_STS0_1 0xF080F4
+
+#define mmTPC4_QM_PQ_STS0_2 0xF080F8
+
+#define mmTPC4_QM_PQ_STS0_3 0xF080FC
+
+#define mmTPC4_QM_PQ_STS1_0 0xF08100
+
+#define mmTPC4_QM_PQ_STS1_1 0xF08104
+
+#define mmTPC4_QM_PQ_STS1_2 0xF08108
+
+#define mmTPC4_QM_PQ_STS1_3 0xF0810C
+
+#define mmTPC4_QM_CQ_CFG0_0 0xF08110
+
+#define mmTPC4_QM_CQ_CFG0_1 0xF08114
+
+#define mmTPC4_QM_CQ_CFG0_2 0xF08118
+
+#define mmTPC4_QM_CQ_CFG0_3 0xF0811C
+
+#define mmTPC4_QM_CQ_CFG0_4 0xF08120
+
+#define mmTPC4_QM_CQ_CFG1_0 0xF08124
+
+#define mmTPC4_QM_CQ_CFG1_1 0xF08128
+
+#define mmTPC4_QM_CQ_CFG1_2 0xF0812C
+
+#define mmTPC4_QM_CQ_CFG1_3 0xF08130
+
+#define mmTPC4_QM_CQ_CFG1_4 0xF08134
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_0 0xF08138
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_1 0xF0813C
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_2 0xF08140
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_3 0xF08144
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_4 0xF08148
+
+#define mmTPC4_QM_CQ_STS0_0 0xF0814C
+
+#define mmTPC4_QM_CQ_STS0_1 0xF08150
+
+#define mmTPC4_QM_CQ_STS0_2 0xF08154
+
+#define mmTPC4_QM_CQ_STS0_3 0xF08158
+
+#define mmTPC4_QM_CQ_STS0_4 0xF0815C
+
+#define mmTPC4_QM_CQ_STS1_0 0xF08160
+
+#define mmTPC4_QM_CQ_STS1_1 0xF08164
+
+#define mmTPC4_QM_CQ_STS1_2 0xF08168
+
+#define mmTPC4_QM_CQ_STS1_3 0xF0816C
+
+#define mmTPC4_QM_CQ_STS1_4 0xF08170
+
+#define mmTPC4_QM_CQ_PTR_LO_0 0xF08174
+
+#define mmTPC4_QM_CQ_PTR_HI_0 0xF08178
+
+#define mmTPC4_QM_CQ_TSIZE_0 0xF0817C
+
+#define mmTPC4_QM_CQ_CTL_0 0xF08180
+
+#define mmTPC4_QM_CQ_PTR_LO_1 0xF08184
+
+#define mmTPC4_QM_CQ_PTR_HI_1 0xF08188
+
+#define mmTPC4_QM_CQ_TSIZE_1 0xF0818C
+
+#define mmTPC4_QM_CQ_CTL_1 0xF08190
+
+#define mmTPC4_QM_CQ_PTR_LO_2 0xF08194
+
+#define mmTPC4_QM_CQ_PTR_HI_2 0xF08198
+
+#define mmTPC4_QM_CQ_TSIZE_2 0xF0819C
+
+#define mmTPC4_QM_CQ_CTL_2 0xF081A0
+
+#define mmTPC4_QM_CQ_PTR_LO_3 0xF081A4
+
+#define mmTPC4_QM_CQ_PTR_HI_3 0xF081A8
+
+#define mmTPC4_QM_CQ_TSIZE_3 0xF081AC
+
+#define mmTPC4_QM_CQ_CTL_3 0xF081B0
+
+#define mmTPC4_QM_CQ_PTR_LO_4 0xF081B4
+
+#define mmTPC4_QM_CQ_PTR_HI_4 0xF081B8
+
+#define mmTPC4_QM_CQ_TSIZE_4 0xF081BC
+
+#define mmTPC4_QM_CQ_CTL_4 0xF081C0
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_0 0xF081C4
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_1 0xF081C8
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_2 0xF081CC
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_3 0xF081D0
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_4 0xF081D4
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_0 0xF081D8
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_1 0xF081DC
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_2 0xF081E0
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_3 0xF081E4
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_4 0xF081E8
+
+#define mmTPC4_QM_CQ_TSIZE_STS_0 0xF081EC
+
+#define mmTPC4_QM_CQ_TSIZE_STS_1 0xF081F0
+
+#define mmTPC4_QM_CQ_TSIZE_STS_2 0xF081F4
+
+#define mmTPC4_QM_CQ_TSIZE_STS_3 0xF081F8
+
+#define mmTPC4_QM_CQ_TSIZE_STS_4 0xF081FC
+
+#define mmTPC4_QM_CQ_CTL_STS_0 0xF08200
+
+#define mmTPC4_QM_CQ_CTL_STS_1 0xF08204
+
+#define mmTPC4_QM_CQ_CTL_STS_2 0xF08208
+
+#define mmTPC4_QM_CQ_CTL_STS_3 0xF0820C
+
+#define mmTPC4_QM_CQ_CTL_STS_4 0xF08210
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_0 0xF08214
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_1 0xF08218
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_2 0xF0821C
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_3 0xF08220
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_4 0xF08224
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_0 0xF08228
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_1 0xF0822C
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_2 0xF08230
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_3 0xF08234
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_4 0xF08238
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_0 0xF0823C
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_1 0xF08240
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_2 0xF08244
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_3 0xF08248
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_4 0xF0824C
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_0 0xF08250
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_1 0xF08254
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_2 0xF08258
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_3 0xF0825C
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_4 0xF08260
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_0 0xF08264
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_1 0xF08268
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_2 0xF0826C
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_3 0xF08270
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_4 0xF08274
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_0 0xF08278
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_1 0xF0827C
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 0xF08280
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_3 0xF08284
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_4 0xF08288
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_0 0xF0828C
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_1 0xF08290
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_2 0xF08294
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_3 0xF08298
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_4 0xF0829C
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_0 0xF082A0
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_1 0xF082A4
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_2 0xF082A8
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_3 0xF082AC
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_4 0xF082B0
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_0 0xF082B4
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_1 0xF082B8
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_2 0xF082BC
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_3 0xF082C0
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_4 0xF082C4
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_0 0xF082C8
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_1 0xF082CC
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_2 0xF082D0
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_3 0xF082D4
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_4 0xF082D8
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xF082E0
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xF082E4
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xF082E8
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xF082EC
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xF082F0
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xF082F4
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xF082F8
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xF082FC
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xF08300
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xF08304
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_0 0xF08308
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_1 0xF0830C
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_2 0xF08310
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_3 0xF08314
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_4 0xF08318
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_0 0xF0831C
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_1 0xF08320
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_2 0xF08324
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_3 0xF08328
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_4 0xF0832C
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_0 0xF08330
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_1 0xF08334
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_2 0xF08338
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_3 0xF0833C
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_4 0xF08340
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_0 0xF08344
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_1 0xF08348
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_2 0xF0834C
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_3 0xF08350
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_4 0xF08354
+
+#define mmTPC4_QM_CP_FENCE0_CNT_0 0xF08358
+
+#define mmTPC4_QM_CP_FENCE0_CNT_1 0xF0835C
+
+#define mmTPC4_QM_CP_FENCE0_CNT_2 0xF08360
+
+#define mmTPC4_QM_CP_FENCE0_CNT_3 0xF08364
+
+#define mmTPC4_QM_CP_FENCE0_CNT_4 0xF08368
+
+#define mmTPC4_QM_CP_FENCE1_CNT_0 0xF0836C
+
+#define mmTPC4_QM_CP_FENCE1_CNT_1 0xF08370
+
+#define mmTPC4_QM_CP_FENCE1_CNT_2 0xF08374
+
+#define mmTPC4_QM_CP_FENCE1_CNT_3 0xF08378
+
+#define mmTPC4_QM_CP_FENCE1_CNT_4 0xF0837C
+
+#define mmTPC4_QM_CP_FENCE2_CNT_0 0xF08380
+
+#define mmTPC4_QM_CP_FENCE2_CNT_1 0xF08384
+
+#define mmTPC4_QM_CP_FENCE2_CNT_2 0xF08388
+
+#define mmTPC4_QM_CP_FENCE2_CNT_3 0xF0838C
+
+#define mmTPC4_QM_CP_FENCE2_CNT_4 0xF08390
+
+#define mmTPC4_QM_CP_FENCE3_CNT_0 0xF08394
+
+#define mmTPC4_QM_CP_FENCE3_CNT_1 0xF08398
+
+#define mmTPC4_QM_CP_FENCE3_CNT_2 0xF0839C
+
+#define mmTPC4_QM_CP_FENCE3_CNT_3 0xF083A0
+
+#define mmTPC4_QM_CP_FENCE3_CNT_4 0xF083A4
+
+#define mmTPC4_QM_CP_STS_0 0xF083A8
+
+#define mmTPC4_QM_CP_STS_1 0xF083AC
+
+#define mmTPC4_QM_CP_STS_2 0xF083B0
+
+#define mmTPC4_QM_CP_STS_3 0xF083B4
+
+#define mmTPC4_QM_CP_STS_4 0xF083B8
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_0 0xF083BC
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_1 0xF083C0
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_2 0xF083C4
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_3 0xF083C8
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_4 0xF083CC
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_0 0xF083D0
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_1 0xF083D4
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_2 0xF083D8
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_3 0xF083DC
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_4 0xF083E0
+
+#define mmTPC4_QM_CP_BARRIER_CFG_0 0xF083F4
+
+#define mmTPC4_QM_CP_BARRIER_CFG_1 0xF083F8
+
+#define mmTPC4_QM_CP_BARRIER_CFG_2 0xF083FC
+
+#define mmTPC4_QM_CP_BARRIER_CFG_3 0xF08400
+
+#define mmTPC4_QM_CP_BARRIER_CFG_4 0xF08404
+
+#define mmTPC4_QM_CP_DBG_0_0 0xF08408
+
+#define mmTPC4_QM_CP_DBG_0_1 0xF0840C
+
+#define mmTPC4_QM_CP_DBG_0_2 0xF08410
+
+#define mmTPC4_QM_CP_DBG_0_3 0xF08414
+
+#define mmTPC4_QM_CP_DBG_0_4 0xF08418
+
+#define mmTPC4_QM_CP_ARUSER_31_11_0 0xF0841C
+
+#define mmTPC4_QM_CP_ARUSER_31_11_1 0xF08420
+
+#define mmTPC4_QM_CP_ARUSER_31_11_2 0xF08424
+
+#define mmTPC4_QM_CP_ARUSER_31_11_3 0xF08428
+
+#define mmTPC4_QM_CP_ARUSER_31_11_4 0xF0842C
+
+#define mmTPC4_QM_CP_AWUSER_31_11_0 0xF08430
+
+#define mmTPC4_QM_CP_AWUSER_31_11_1 0xF08434
+
+#define mmTPC4_QM_CP_AWUSER_31_11_2 0xF08438
+
+#define mmTPC4_QM_CP_AWUSER_31_11_3 0xF0843C
+
+#define mmTPC4_QM_CP_AWUSER_31_11_4 0xF08440
+
+#define mmTPC4_QM_ARB_CFG_0 0xF08A00
+
+#define mmTPC4_QM_ARB_CHOISE_Q_PUSH 0xF08A04
+
+#define mmTPC4_QM_ARB_WRR_WEIGHT_0 0xF08A08
+
+#define mmTPC4_QM_ARB_WRR_WEIGHT_1 0xF08A0C
+
+#define mmTPC4_QM_ARB_WRR_WEIGHT_2 0xF08A10
+
+#define mmTPC4_QM_ARB_WRR_WEIGHT_3 0xF08A14
+
+#define mmTPC4_QM_ARB_CFG_1 0xF08A18
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_0 0xF08A20
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_1 0xF08A24
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_2 0xF08A28
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_3 0xF08A2C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_4 0xF08A30
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_5 0xF08A34
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_6 0xF08A38
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_7 0xF08A3C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_8 0xF08A40
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_9 0xF08A44
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_10 0xF08A48
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_11 0xF08A4C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_12 0xF08A50
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_13 0xF08A54
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_14 0xF08A58
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_15 0xF08A5C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_16 0xF08A60
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_17 0xF08A64
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_18 0xF08A68
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_19 0xF08A6C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_20 0xF08A70
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_21 0xF08A74
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_22 0xF08A78
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_23 0xF08A7C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_24 0xF08A80
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_25 0xF08A84
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_26 0xF08A88
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_27 0xF08A8C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_28 0xF08A90
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_29 0xF08A94
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_30 0xF08A98
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_31 0xF08A9C
+
+#define mmTPC4_QM_ARB_MST_CRED_INC 0xF08AA0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xF08AA4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xF08AA8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xF08AAC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xF08AB0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xF08AB4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xF08AB8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xF08ABC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xF08AC0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xF08AC4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xF08AC8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xF08ACC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xF08AD0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xF08AD4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xF08AD8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xF08ADC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xF08AE0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xF08AE4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xF08AE8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xF08AEC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xF08AF0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xF08AF4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xF08AF8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xF08AFC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xF08B00
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xF08B04
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xF08B08
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xF08B0C
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xF08B10
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xF08B14
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xF08B18
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xF08B1C
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xF08B20
+
+#define mmTPC4_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xF08B28
+
+#define mmTPC4_QM_ARB_MST_SLAVE_EN 0xF08B2C
+
+#define mmTPC4_QM_ARB_MST_QUIET_PER 0xF08B34
+
+#define mmTPC4_QM_ARB_SLV_CHOISE_WDT 0xF08B38
+
+#define mmTPC4_QM_ARB_SLV_ID 0xF08B3C
+
+#define mmTPC4_QM_ARB_MSG_MAX_INFLIGHT 0xF08B44
+
+#define mmTPC4_QM_ARB_MSG_AWUSER_31_11 0xF08B48
+
+#define mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP 0xF08B4C
+
+#define mmTPC4_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xF08B50
+
+#define mmTPC4_QM_ARB_BASE_LO 0xF08B54
+
+#define mmTPC4_QM_ARB_BASE_HI 0xF08B58
+
+#define mmTPC4_QM_ARB_STATE_STS 0xF08B80
+
+#define mmTPC4_QM_ARB_CHOISE_FULLNESS_STS 0xF08B84
+
+#define mmTPC4_QM_ARB_MSG_STS 0xF08B88
+
+#define mmTPC4_QM_ARB_SLV_CHOISE_Q_HEAD 0xF08B8C
+
+#define mmTPC4_QM_ARB_ERR_CAUSE 0xF08B9C
+
+#define mmTPC4_QM_ARB_ERR_MSG_EN 0xF08BA0
+
+#define mmTPC4_QM_ARB_ERR_STS_DRP 0xF08BA8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_0 0xF08BB0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_1 0xF08BB4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_2 0xF08BB8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_3 0xF08BBC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_4 0xF08BC0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_5 0xF08BC4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_6 0xF08BC8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_7 0xF08BCC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_8 0xF08BD0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_9 0xF08BD4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_10 0xF08BD8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_11 0xF08BDC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_12 0xF08BE0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_13 0xF08BE4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_14 0xF08BE8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_15 0xF08BEC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_16 0xF08BF0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_17 0xF08BF4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_18 0xF08BF8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_19 0xF08BFC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_20 0xF08C00
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_21 0xF08C04
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_22 0xF08C08
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_23 0xF08C0C
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_24 0xF08C10
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_25 0xF08C14
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_26 0xF08C18
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_27 0xF08C1C
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_28 0xF08C20
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_29 0xF08C24
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_30 0xF08C28
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_31 0xF08C2C
+
+#define mmTPC4_QM_CGM_CFG 0xF08C70
+
+#define mmTPC4_QM_CGM_STS 0xF08C74
+
+#define mmTPC4_QM_CGM_CFG1 0xF08C78
+
+#define mmTPC4_QM_LOCAL_RANGE_BASE 0xF08C80
+
+#define mmTPC4_QM_LOCAL_RANGE_SIZE 0xF08C84
+
+#define mmTPC4_QM_CSMR_STRICT_PRIO_CFG 0xF08C90
+
+#define mmTPC4_QM_HBW_RD_RATE_LIM_CFG_1 0xF08C94
+
+#define mmTPC4_QM_LBW_WR_RATE_LIM_CFG_0 0xF08C98
+
+#define mmTPC4_QM_LBW_WR_RATE_LIM_CFG_1 0xF08C9C
+
+#define mmTPC4_QM_HBW_RD_RATE_LIM_CFG_0 0xF08CA0
+
+#define mmTPC4_QM_GLBL_AXCACHE 0xF08CA4
+
+#define mmTPC4_QM_IND_GW_APB_CFG 0xF08CB0
+
+#define mmTPC4_QM_IND_GW_APB_WDATA 0xF08CB4
+
+#define mmTPC4_QM_IND_GW_APB_RDATA 0xF08CB8
+
+#define mmTPC4_QM_IND_GW_APB_STATUS 0xF08CBC
+
+#define mmTPC4_QM_GLBL_ERR_ADDR_LO 0xF08CD0
+
+#define mmTPC4_QM_GLBL_ERR_ADDR_HI 0xF08CD4
+
+#define mmTPC4_QM_GLBL_ERR_WDATA 0xF08CD8
+
+#define mmTPC4_QM_GLBL_MEM_INIT_BUSY 0xF08D00
+
+#endif /* ASIC_REG_TPC4_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
new file mode 100644
index 000000000000..f428f891935a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_CFG_REGS_H_
+#define ASIC_REG_TPC5_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF46400
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF46404
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF46408
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF4640C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF46410
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF46414
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF46418
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF4641C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF46420
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF46424
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF46428
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF4642C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF46430
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF46434
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF46438
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF4643C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF46440
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF46444
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF46448
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF4644C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF46450
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF46454
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF46458
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF4645C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF46460
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF46464
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF46468
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF4646C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF46470
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF46474
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF46478
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF4647C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF46480
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF46484
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF46488
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF4648C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF46490
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF46494
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF46498
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF4649C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF464A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF464A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF464A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF464AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF464B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF464B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF464B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF464BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF464C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF464C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF464C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF464CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF464D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF464D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF464D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF464DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF464E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF464E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF464E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF464EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF464F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF464F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF464F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF464FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF46500
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF46504
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF46508
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF4650C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF46510
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF46514
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF46518
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF4651C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF46520
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF46524
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF46528
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF4652C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF46530
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF46534
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF46538
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF4653C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF46540
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF46544
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF46548
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF4654C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF46550
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF46554
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF46558
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF4655C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF46560
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF46564
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF46568
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF4656C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF46570
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF46574
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF46578
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF4657C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF46580
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF46584
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF46588
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF4658C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF46590
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF46594
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF46598
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF4659C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF465A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF465A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF465A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF465AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF465B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF465B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF465B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF465BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xF465C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xF465C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xF465C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xF465CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xF465D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xF465D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xF465D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xF465DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xF465E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xF465E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xF465E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xF465EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xF465F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xF465F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xF465F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xF465FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xF46600
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xF46604
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xF46608
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xF4660C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xF46610
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xF46614
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xF46618
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xF4661C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xF46620
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xF46624
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xF46628
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xF4662C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xF46630
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xF46634
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xF46638
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xF4663C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xF46640
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xF46644
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xF46648
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xF4664C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xF46650
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xF46654
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xF46658
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xF4665C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xF46660
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xF46664
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xF46668
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xF4666C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xF46670
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xF46674
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xF46678
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xF4667C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xF46680
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xF46684
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xF46688
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xF4668C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xF46690
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xF46694
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xF46698
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xF4669C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xF466A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xF466A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xF466A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xF466AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xF466B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xF466B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xF466B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xF466BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xF466C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xF466C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xF466C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xF466CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xF466D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xF466D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xF466D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xF466DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xF466E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xF466E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xF466E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xF466EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xF466F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xF466F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xF466F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xF466FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xF46700
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xF46704
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xF46708
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xF4670C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xF46710
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xF46714
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xF46718
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xF4671C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xF46720
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xF46724
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xF46728
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xF4672C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xF46730
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xF46734
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xF46738
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xF4673C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xF46740
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xF46744
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xF46748
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xF4674C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xF46750
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xF46754
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xF46758
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xF4675C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xF46760
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xF46764
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xF46768
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xF4676C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xF46770
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xF46774
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xF46778
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xF4677C
+
+#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF46780
+
+#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_ADDR 0xF46784
+
+#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF46788
+
+#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF4678C
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_0 0xF46790
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_0 0xF46794
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_1 0xF46798
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_1 0xF4679C
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_2 0xF467A0
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_2 0xF467A4
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_3 0xF467A8
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_3 0xF467AC
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_4 0xF467B0
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_4 0xF467B4
+
+#define mmTPC5_CFG_KERNEL_KERNEL_CONFIG 0xF467B8
+
+#define mmTPC5_CFG_KERNEL_KERNEL_ID 0xF467BC
+
+#define mmTPC5_CFG_KERNEL_SRF_0 0xF467C0
+
+#define mmTPC5_CFG_KERNEL_SRF_1 0xF467C4
+
+#define mmTPC5_CFG_KERNEL_SRF_2 0xF467C8
+
+#define mmTPC5_CFG_KERNEL_SRF_3 0xF467CC
+
+#define mmTPC5_CFG_KERNEL_SRF_4 0xF467D0
+
+#define mmTPC5_CFG_KERNEL_SRF_5 0xF467D4
+
+#define mmTPC5_CFG_KERNEL_SRF_6 0xF467D8
+
+#define mmTPC5_CFG_KERNEL_SRF_7 0xF467DC
+
+#define mmTPC5_CFG_KERNEL_SRF_8 0xF467E0
+
+#define mmTPC5_CFG_KERNEL_SRF_9 0xF467E4
+
+#define mmTPC5_CFG_KERNEL_SRF_10 0xF467E8
+
+#define mmTPC5_CFG_KERNEL_SRF_11 0xF467EC
+
+#define mmTPC5_CFG_KERNEL_SRF_12 0xF467F0
+
+#define mmTPC5_CFG_KERNEL_SRF_13 0xF467F4
+
+#define mmTPC5_CFG_KERNEL_SRF_14 0xF467F8
+
+#define mmTPC5_CFG_KERNEL_SRF_15 0xF467FC
+
+#define mmTPC5_CFG_KERNEL_SRF_16 0xF46800
+
+#define mmTPC5_CFG_KERNEL_SRF_17 0xF46804
+
+#define mmTPC5_CFG_KERNEL_SRF_18 0xF46808
+
+#define mmTPC5_CFG_KERNEL_SRF_19 0xF4680C
+
+#define mmTPC5_CFG_KERNEL_SRF_20 0xF46810
+
+#define mmTPC5_CFG_KERNEL_SRF_21 0xF46814
+
+#define mmTPC5_CFG_KERNEL_SRF_22 0xF46818
+
+#define mmTPC5_CFG_KERNEL_SRF_23 0xF4681C
+
+#define mmTPC5_CFG_KERNEL_SRF_24 0xF46820
+
+#define mmTPC5_CFG_KERNEL_SRF_25 0xF46824
+
+#define mmTPC5_CFG_KERNEL_SRF_26 0xF46828
+
+#define mmTPC5_CFG_KERNEL_SRF_27 0xF4682C
+
+#define mmTPC5_CFG_KERNEL_SRF_28 0xF46830
+
+#define mmTPC5_CFG_KERNEL_SRF_29 0xF46834
+
+#define mmTPC5_CFG_KERNEL_SRF_30 0xF46838
+
+#define mmTPC5_CFG_KERNEL_SRF_31 0xF4683C
+
+#define mmTPC5_CFG_ROUND_CSR 0xF468FC
+
+#define mmTPC5_CFG_PROT 0xF46900
+
+#define mmTPC5_CFG_SEMAPHORE 0xF46908
+
+#define mmTPC5_CFG_VFLAGS 0xF4690C
+
+#define mmTPC5_CFG_SFLAGS 0xF46910
+
+#define mmTPC5_CFG_LFSR_POLYNOM 0xF46918
+
+#define mmTPC5_CFG_STATUS 0xF4691C
+
+#define mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH 0xF46920
+
+#define mmTPC5_CFG_CFG_SUBTRACT_VALUE 0xF46924
+
+#define mmTPC5_CFG_SM_BASE_ADDRESS_HIGH 0xF4692C
+
+#define mmTPC5_CFG_TPC_CMD 0xF46930
+
+#define mmTPC5_CFG_TPC_EXECUTE 0xF46938
+
+#define mmTPC5_CFG_TPC_STALL 0xF4693C
+
+#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_LOW 0xF46940
+
+#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF46944
+
+#define mmTPC5_CFG_RD_RATE_LIMIT 0xF46948
+
+#define mmTPC5_CFG_WR_RATE_LIMIT 0xF46950
+
+#define mmTPC5_CFG_MSS_CONFIG 0xF46954
+
+#define mmTPC5_CFG_TPC_INTR_CAUSE 0xF46958
+
+#define mmTPC5_CFG_TPC_INTR_MASK 0xF4695C
+
+#define mmTPC5_CFG_WQ_CREDITS 0xF46960
+
+#define mmTPC5_CFG_ARUSER_LO 0xF46964
+
+#define mmTPC5_CFG_ARUSER_HI 0xF46968
+
+#define mmTPC5_CFG_AWUSER_LO 0xF4696C
+
+#define mmTPC5_CFG_AWUSER_HI 0xF46970
+
+#define mmTPC5_CFG_OPCODE_EXEC 0xF46974
+
+#define mmTPC5_CFG_LUT_FUNC32_BASE_ADDR_LO 0xF46978
+
+#define mmTPC5_CFG_LUT_FUNC32_BASE_ADDR_HI 0xF4697C
+
+#define mmTPC5_CFG_LUT_FUNC64_BASE_ADDR_LO 0xF46980
+
+#define mmTPC5_CFG_LUT_FUNC64_BASE_ADDR_HI 0xF46984
+
+#define mmTPC5_CFG_LUT_FUNC128_BASE_ADDR_LO 0xF46988
+
+#define mmTPC5_CFG_LUT_FUNC128_BASE_ADDR_HI 0xF4698C
+
+#define mmTPC5_CFG_LUT_FUNC256_BASE_ADDR_LO 0xF46990
+
+#define mmTPC5_CFG_LUT_FUNC256_BASE_ADDR_HI 0xF46994
+
+#define mmTPC5_CFG_TSB_CFG_MAX_SIZE 0xF46998
+
+#define mmTPC5_CFG_TSB_CFG 0xF4699C
+
+#define mmTPC5_CFG_DBGMEM_ADD 0xF469A0
+
+#define mmTPC5_CFG_DBGMEM_DATA_WR 0xF469A4
+
+#define mmTPC5_CFG_DBGMEM_DATA_RD 0xF469A8
+
+#define mmTPC5_CFG_DBGMEM_CTRL 0xF469AC
+
+#define mmTPC5_CFG_DBGMEM_RC 0xF469B0
+
+#define mmTPC5_CFG_TSB_INFLIGHT_CNTR 0xF469B4
+
+#define mmTPC5_CFG_WQ_INFLIGHT_CNTR 0xF469B8
+
+#define mmTPC5_CFG_WQ_LBW_TOTAL_CNTR 0xF469BC
+
+#define mmTPC5_CFG_WQ_HBW_TOTAL_CNTR 0xF469C0
+
+#define mmTPC5_CFG_IRQ_OCCOUPY_CNTR 0xF469C4
+
+#define mmTPC5_CFG_FUNC_MBIST_CNTRL 0xF469D0
+
+#define mmTPC5_CFG_FUNC_MBIST_PAT 0xF469D4
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_0 0xF469D8
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_1 0xF469DC
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_2 0xF469E0
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_3 0xF469E4
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_4 0xF469E8
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_5 0xF469EC
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_6 0xF469F0
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_7 0xF469F4
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_8 0xF469F8
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF469FC
+
+#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF46A00
+
+#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF46A04
+
+#define mmTPC5_CFG_QM_TENSOR_0_PADDING_VALUE 0xF46A08
+
+#define mmTPC5_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF46A0C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF46A10
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF46A14
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF46A18
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF46A1C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF46A20
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF46A24
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF46A28
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF46A2C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF46A30
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF46A34
+
+#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF46A38
+
+#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF46A3C
+
+#define mmTPC5_CFG_QM_TENSOR_1_PADDING_VALUE 0xF46A40
+
+#define mmTPC5_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF46A44
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF46A48
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF46A4C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF46A50
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF46A54
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF46A58
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF46A5C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF46A60
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF46A64
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF46A68
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF46A6C
+
+#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF46A70
+
+#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF46A74
+
+#define mmTPC5_CFG_QM_TENSOR_2_PADDING_VALUE 0xF46A78
+
+#define mmTPC5_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF46A7C
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF46A80
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF46A84
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF46A88
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF46A8C
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF46A90
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF46A94
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF46A98
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF46A9C
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF46AA0
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF46AA4
+
+#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF46AA8
+
+#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF46AAC
+
+#define mmTPC5_CFG_QM_TENSOR_3_PADDING_VALUE 0xF46AB0
+
+#define mmTPC5_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF46AB4
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF46AB8
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF46ABC
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF46AC0
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF46AC4
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF46AC8
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF46ACC
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF46AD0
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF46AD4
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF46AD8
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF46ADC
+
+#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF46AE0
+
+#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF46AE4
+
+#define mmTPC5_CFG_QM_TENSOR_4_PADDING_VALUE 0xF46AE8
+
+#define mmTPC5_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF46AEC
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF46AF0
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF46AF4
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF46AF8
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF46AFC
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF46B00
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF46B04
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF46B08
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF46B0C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF46B10
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF46B14
+
+#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF46B18
+
+#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF46B1C
+
+#define mmTPC5_CFG_QM_TENSOR_5_PADDING_VALUE 0xF46B20
+
+#define mmTPC5_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF46B24
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF46B28
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF46B2C
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF46B30
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF46B34
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF46B38
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF46B3C
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF46B40
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF46B44
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF46B48
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF46B4C
+
+#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF46B50
+
+#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF46B54
+
+#define mmTPC5_CFG_QM_TENSOR_6_PADDING_VALUE 0xF46B58
+
+#define mmTPC5_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF46B5C
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF46B60
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF46B64
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF46B68
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF46B6C
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF46B70
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF46B74
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF46B78
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF46B7C
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF46B80
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF46B84
+
+#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF46B88
+
+#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF46B8C
+
+#define mmTPC5_CFG_QM_TENSOR_7_PADDING_VALUE 0xF46B90
+
+#define mmTPC5_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF46B94
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF46B98
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF46B9C
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF46BA0
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF46BA4
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF46BA8
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF46BAC
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF46BB0
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF46BB4
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF46BB8
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF46BBC
+
+#define mmTPC5_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xF46BC0
+
+#define mmTPC5_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xF46BC4
+
+#define mmTPC5_CFG_QM_TENSOR_8_PADDING_VALUE 0xF46BC8
+
+#define mmTPC5_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xF46BCC
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_0_SIZE 0xF46BD0
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xF46BD4
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_1_SIZE 0xF46BD8
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xF46BDC
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_2_SIZE 0xF46BE0
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xF46BE4
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_3_SIZE 0xF46BE8
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xF46BEC
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_4_SIZE 0xF46BF0
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xF46BF4
+
+#define mmTPC5_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xF46BF8
+
+#define mmTPC5_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xF46BFC
+
+#define mmTPC5_CFG_QM_TENSOR_9_PADDING_VALUE 0xF46C00
+
+#define mmTPC5_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xF46C04
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_0_SIZE 0xF46C08
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xF46C0C
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_1_SIZE 0xF46C10
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xF46C14
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_2_SIZE 0xF46C18
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xF46C1C
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_3_SIZE 0xF46C20
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xF46C24
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_4_SIZE 0xF46C28
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xF46C2C
+
+#define mmTPC5_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xF46C30
+
+#define mmTPC5_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xF46C34
+
+#define mmTPC5_CFG_QM_TENSOR_10_PADDING_VALUE 0xF46C38
+
+#define mmTPC5_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xF46C3C
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_0_SIZE 0xF46C40
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xF46C44
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_1_SIZE 0xF46C48
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xF46C4C
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_2_SIZE 0xF46C50
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xF46C54
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_3_SIZE 0xF46C58
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xF46C5C
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_4_SIZE 0xF46C60
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xF46C64
+
+#define mmTPC5_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xF46C68
+
+#define mmTPC5_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xF46C6C
+
+#define mmTPC5_CFG_QM_TENSOR_11_PADDING_VALUE 0xF46C70
+
+#define mmTPC5_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xF46C74
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_0_SIZE 0xF46C78
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xF46C7C
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_1_SIZE 0xF46C80
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xF46C84
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_2_SIZE 0xF46C88
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xF46C8C
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_3_SIZE 0xF46C90
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xF46C94
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_4_SIZE 0xF46C98
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xF46C9C
+
+#define mmTPC5_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xF46CA0
+
+#define mmTPC5_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xF46CA4
+
+#define mmTPC5_CFG_QM_TENSOR_12_PADDING_VALUE 0xF46CA8
+
+#define mmTPC5_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xF46CAC
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_0_SIZE 0xF46CB0
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xF46CB4
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_1_SIZE 0xF46CB8
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xF46CBC
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_2_SIZE 0xF46CC0
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xF46CC4
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_3_SIZE 0xF46CC8
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xF46CCC
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_4_SIZE 0xF46CD0
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xF46CD4
+
+#define mmTPC5_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xF46CD8
+
+#define mmTPC5_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xF46CDC
+
+#define mmTPC5_CFG_QM_TENSOR_13_PADDING_VALUE 0xF46CE0
+
+#define mmTPC5_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xF46CE4
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_0_SIZE 0xF46CE8
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xF46CEC
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_1_SIZE 0xF46CF0
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xF46CF4
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_2_SIZE 0xF46CF8
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xF46CFC
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_3_SIZE 0xF46D00
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xF46D04
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_4_SIZE 0xF46D08
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xF46D0C
+
+#define mmTPC5_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xF46D10
+
+#define mmTPC5_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xF46D14
+
+#define mmTPC5_CFG_QM_TENSOR_14_PADDING_VALUE 0xF46D18
+
+#define mmTPC5_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xF46D1C
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_0_SIZE 0xF46D20
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xF46D24
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_1_SIZE 0xF46D28
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xF46D2C
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_2_SIZE 0xF46D30
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xF46D34
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_3_SIZE 0xF46D38
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xF46D3C
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_4_SIZE 0xF46D40
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xF46D44
+
+#define mmTPC5_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xF46D48
+
+#define mmTPC5_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xF46D4C
+
+#define mmTPC5_CFG_QM_TENSOR_15_PADDING_VALUE 0xF46D50
+
+#define mmTPC5_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xF46D54
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_0_SIZE 0xF46D58
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xF46D5C
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_1_SIZE 0xF46D60
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xF46D64
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_2_SIZE 0xF46D68
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xF46D6C
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_3_SIZE 0xF46D70
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xF46D74
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_4_SIZE 0xF46D78
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xF46D7C
+
+#define mmTPC5_CFG_QM_SYNC_OBJECT_MESSAGE 0xF46D80
+
+#define mmTPC5_CFG_QM_SYNC_OBJECT_ADDR 0xF46D84
+
+#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF46D88
+
+#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF46D8C
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_0 0xF46D90
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_0 0xF46D94
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_1 0xF46D98
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_1 0xF46D9C
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_2 0xF46DA0
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_2 0xF46DA4
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_3 0xF46DA8
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_3 0xF46DAC
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_4 0xF46DB0
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_4 0xF46DB4
+
+#define mmTPC5_CFG_QM_KERNEL_CONFIG 0xF46DB8
+
+#define mmTPC5_CFG_QM_KERNEL_ID 0xF46DBC
+
+#define mmTPC5_CFG_QM_SRF_0 0xF46DC0
+
+#define mmTPC5_CFG_QM_SRF_1 0xF46DC4
+
+#define mmTPC5_CFG_QM_SRF_2 0xF46DC8
+
+#define mmTPC5_CFG_QM_SRF_3 0xF46DCC
+
+#define mmTPC5_CFG_QM_SRF_4 0xF46DD0
+
+#define mmTPC5_CFG_QM_SRF_5 0xF46DD4
+
+#define mmTPC5_CFG_QM_SRF_6 0xF46DD8
+
+#define mmTPC5_CFG_QM_SRF_7 0xF46DDC
+
+#define mmTPC5_CFG_QM_SRF_8 0xF46DE0
+
+#define mmTPC5_CFG_QM_SRF_9 0xF46DE4
+
+#define mmTPC5_CFG_QM_SRF_10 0xF46DE8
+
+#define mmTPC5_CFG_QM_SRF_11 0xF46DEC
+
+#define mmTPC5_CFG_QM_SRF_12 0xF46DF0
+
+#define mmTPC5_CFG_QM_SRF_13 0xF46DF4
+
+#define mmTPC5_CFG_QM_SRF_14 0xF46DF8
+
+#define mmTPC5_CFG_QM_SRF_15 0xF46DFC
+
+#define mmTPC5_CFG_QM_SRF_16 0xF46E00
+
+#define mmTPC5_CFG_QM_SRF_17 0xF46E04
+
+#define mmTPC5_CFG_QM_SRF_18 0xF46E08
+
+#define mmTPC5_CFG_QM_SRF_19 0xF46E0C
+
+#define mmTPC5_CFG_QM_SRF_20 0xF46E10
+
+#define mmTPC5_CFG_QM_SRF_21 0xF46E14
+
+#define mmTPC5_CFG_QM_SRF_22 0xF46E18
+
+#define mmTPC5_CFG_QM_SRF_23 0xF46E1C
+
+#define mmTPC5_CFG_QM_SRF_24 0xF46E20
+
+#define mmTPC5_CFG_QM_SRF_25 0xF46E24
+
+#define mmTPC5_CFG_QM_SRF_26 0xF46E28
+
+#define mmTPC5_CFG_QM_SRF_27 0xF46E2C
+
+#define mmTPC5_CFG_QM_SRF_28 0xF46E30
+
+#define mmTPC5_CFG_QM_SRF_29 0xF46E34
+
+#define mmTPC5_CFG_QM_SRF_30 0xF46E38
+
+#define mmTPC5_CFG_QM_SRF_31 0xF46E3C
+
+#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
new file mode 100644
index 000000000000..cd3a810ff4c4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_QM_REGS_H_
+#define ASIC_REG_TPC5_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC5_QM_GLBL_CFG0 0xF48000
+
+#define mmTPC5_QM_GLBL_CFG1 0xF48004
+
+#define mmTPC5_QM_GLBL_PROT 0xF48008
+
+#define mmTPC5_QM_GLBL_ERR_CFG 0xF4800C
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_0 0xF48010
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_1 0xF48014
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_2 0xF48018
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_3 0xF4801C
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_4 0xF48020
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_0 0xF48024
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_1 0xF48028
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_2 0xF4802C
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_3 0xF48030
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_4 0xF48034
+
+#define mmTPC5_QM_GLBL_STS0 0xF48038
+
+#define mmTPC5_QM_GLBL_STS1_0 0xF48040
+
+#define mmTPC5_QM_GLBL_STS1_1 0xF48044
+
+#define mmTPC5_QM_GLBL_STS1_2 0xF48048
+
+#define mmTPC5_QM_GLBL_STS1_3 0xF4804C
+
+#define mmTPC5_QM_GLBL_STS1_4 0xF48050
+
+#define mmTPC5_QM_GLBL_MSG_EN_0 0xF48054
+
+#define mmTPC5_QM_GLBL_MSG_EN_1 0xF48058
+
+#define mmTPC5_QM_GLBL_MSG_EN_2 0xF4805C
+
+#define mmTPC5_QM_GLBL_MSG_EN_3 0xF48060
+
+#define mmTPC5_QM_GLBL_MSG_EN_4 0xF48068
+
+#define mmTPC5_QM_PQ_BASE_LO_0 0xF48070
+
+#define mmTPC5_QM_PQ_BASE_LO_1 0xF48074
+
+#define mmTPC5_QM_PQ_BASE_LO_2 0xF48078
+
+#define mmTPC5_QM_PQ_BASE_LO_3 0xF4807C
+
+#define mmTPC5_QM_PQ_BASE_HI_0 0xF48080
+
+#define mmTPC5_QM_PQ_BASE_HI_1 0xF48084
+
+#define mmTPC5_QM_PQ_BASE_HI_2 0xF48088
+
+#define mmTPC5_QM_PQ_BASE_HI_3 0xF4808C
+
+#define mmTPC5_QM_PQ_SIZE_0 0xF48090
+
+#define mmTPC5_QM_PQ_SIZE_1 0xF48094
+
+#define mmTPC5_QM_PQ_SIZE_2 0xF48098
+
+#define mmTPC5_QM_PQ_SIZE_3 0xF4809C
+
+#define mmTPC5_QM_PQ_PI_0 0xF480A0
+
+#define mmTPC5_QM_PQ_PI_1 0xF480A4
+
+#define mmTPC5_QM_PQ_PI_2 0xF480A8
+
+#define mmTPC5_QM_PQ_PI_3 0xF480AC
+
+#define mmTPC5_QM_PQ_CI_0 0xF480B0
+
+#define mmTPC5_QM_PQ_CI_1 0xF480B4
+
+#define mmTPC5_QM_PQ_CI_2 0xF480B8
+
+#define mmTPC5_QM_PQ_CI_3 0xF480BC
+
+#define mmTPC5_QM_PQ_CFG0_0 0xF480C0
+
+#define mmTPC5_QM_PQ_CFG0_1 0xF480C4
+
+#define mmTPC5_QM_PQ_CFG0_2 0xF480C8
+
+#define mmTPC5_QM_PQ_CFG0_3 0xF480CC
+
+#define mmTPC5_QM_PQ_CFG1_0 0xF480D0
+
+#define mmTPC5_QM_PQ_CFG1_1 0xF480D4
+
+#define mmTPC5_QM_PQ_CFG1_2 0xF480D8
+
+#define mmTPC5_QM_PQ_CFG1_3 0xF480DC
+
+#define mmTPC5_QM_PQ_ARUSER_31_11_0 0xF480E0
+
+#define mmTPC5_QM_PQ_ARUSER_31_11_1 0xF480E4
+
+#define mmTPC5_QM_PQ_ARUSER_31_11_2 0xF480E8
+
+#define mmTPC5_QM_PQ_ARUSER_31_11_3 0xF480EC
+
+#define mmTPC5_QM_PQ_STS0_0 0xF480F0
+
+#define mmTPC5_QM_PQ_STS0_1 0xF480F4
+
+#define mmTPC5_QM_PQ_STS0_2 0xF480F8
+
+#define mmTPC5_QM_PQ_STS0_3 0xF480FC
+
+#define mmTPC5_QM_PQ_STS1_0 0xF48100
+
+#define mmTPC5_QM_PQ_STS1_1 0xF48104
+
+#define mmTPC5_QM_PQ_STS1_2 0xF48108
+
+#define mmTPC5_QM_PQ_STS1_3 0xF4810C
+
+#define mmTPC5_QM_CQ_CFG0_0 0xF48110
+
+#define mmTPC5_QM_CQ_CFG0_1 0xF48114
+
+#define mmTPC5_QM_CQ_CFG0_2 0xF48118
+
+#define mmTPC5_QM_CQ_CFG0_3 0xF4811C
+
+#define mmTPC5_QM_CQ_CFG0_4 0xF48120
+
+#define mmTPC5_QM_CQ_CFG1_0 0xF48124
+
+#define mmTPC5_QM_CQ_CFG1_1 0xF48128
+
+#define mmTPC5_QM_CQ_CFG1_2 0xF4812C
+
+#define mmTPC5_QM_CQ_CFG1_3 0xF48130
+
+#define mmTPC5_QM_CQ_CFG1_4 0xF48134
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_0 0xF48138
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_1 0xF4813C
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_2 0xF48140
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_3 0xF48144
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_4 0xF48148
+
+#define mmTPC5_QM_CQ_STS0_0 0xF4814C
+
+#define mmTPC5_QM_CQ_STS0_1 0xF48150
+
+#define mmTPC5_QM_CQ_STS0_2 0xF48154
+
+#define mmTPC5_QM_CQ_STS0_3 0xF48158
+
+#define mmTPC5_QM_CQ_STS0_4 0xF4815C
+
+#define mmTPC5_QM_CQ_STS1_0 0xF48160
+
+#define mmTPC5_QM_CQ_STS1_1 0xF48164
+
+#define mmTPC5_QM_CQ_STS1_2 0xF48168
+
+#define mmTPC5_QM_CQ_STS1_3 0xF4816C
+
+#define mmTPC5_QM_CQ_STS1_4 0xF48170
+
+#define mmTPC5_QM_CQ_PTR_LO_0 0xF48174
+
+#define mmTPC5_QM_CQ_PTR_HI_0 0xF48178
+
+#define mmTPC5_QM_CQ_TSIZE_0 0xF4817C
+
+#define mmTPC5_QM_CQ_CTL_0 0xF48180
+
+#define mmTPC5_QM_CQ_PTR_LO_1 0xF48184
+
+#define mmTPC5_QM_CQ_PTR_HI_1 0xF48188
+
+#define mmTPC5_QM_CQ_TSIZE_1 0xF4818C
+
+#define mmTPC5_QM_CQ_CTL_1 0xF48190
+
+#define mmTPC5_QM_CQ_PTR_LO_2 0xF48194
+
+#define mmTPC5_QM_CQ_PTR_HI_2 0xF48198
+
+#define mmTPC5_QM_CQ_TSIZE_2 0xF4819C
+
+#define mmTPC5_QM_CQ_CTL_2 0xF481A0
+
+#define mmTPC5_QM_CQ_PTR_LO_3 0xF481A4
+
+#define mmTPC5_QM_CQ_PTR_HI_3 0xF481A8
+
+#define mmTPC5_QM_CQ_TSIZE_3 0xF481AC
+
+#define mmTPC5_QM_CQ_CTL_3 0xF481B0
+
+#define mmTPC5_QM_CQ_PTR_LO_4 0xF481B4
+
+#define mmTPC5_QM_CQ_PTR_HI_4 0xF481B8
+
+#define mmTPC5_QM_CQ_TSIZE_4 0xF481BC
+
+#define mmTPC5_QM_CQ_CTL_4 0xF481C0
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_0 0xF481C4
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_1 0xF481C8
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_2 0xF481CC
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_3 0xF481D0
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_4 0xF481D4
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_0 0xF481D8
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_1 0xF481DC
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_2 0xF481E0
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_3 0xF481E4
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_4 0xF481E8
+
+#define mmTPC5_QM_CQ_TSIZE_STS_0 0xF481EC
+
+#define mmTPC5_QM_CQ_TSIZE_STS_1 0xF481F0
+
+#define mmTPC5_QM_CQ_TSIZE_STS_2 0xF481F4
+
+#define mmTPC5_QM_CQ_TSIZE_STS_3 0xF481F8
+
+#define mmTPC5_QM_CQ_TSIZE_STS_4 0xF481FC
+
+#define mmTPC5_QM_CQ_CTL_STS_0 0xF48200
+
+#define mmTPC5_QM_CQ_CTL_STS_1 0xF48204
+
+#define mmTPC5_QM_CQ_CTL_STS_2 0xF48208
+
+#define mmTPC5_QM_CQ_CTL_STS_3 0xF4820C
+
+#define mmTPC5_QM_CQ_CTL_STS_4 0xF48210
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_0 0xF48214
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_1 0xF48218
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_2 0xF4821C
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_3 0xF48220
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_4 0xF48224
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_0 0xF48228
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_1 0xF4822C
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_2 0xF48230
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_3 0xF48234
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_4 0xF48238
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_0 0xF4823C
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_1 0xF48240
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_2 0xF48244
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_3 0xF48248
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_4 0xF4824C
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_0 0xF48250
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_1 0xF48254
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_2 0xF48258
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_3 0xF4825C
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_4 0xF48260
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_0 0xF48264
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_1 0xF48268
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_2 0xF4826C
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_3 0xF48270
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_4 0xF48274
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_0 0xF48278
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_1 0xF4827C
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 0xF48280
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_3 0xF48284
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_4 0xF48288
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_0 0xF4828C
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_1 0xF48290
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_2 0xF48294
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_3 0xF48298
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_4 0xF4829C
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_0 0xF482A0
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_1 0xF482A4
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_2 0xF482A8
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_3 0xF482AC
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_4 0xF482B0
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_0 0xF482B4
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_1 0xF482B8
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_2 0xF482BC
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_3 0xF482C0
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_4 0xF482C4
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_0 0xF482C8
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_1 0xF482CC
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_2 0xF482D0
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_3 0xF482D4
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_4 0xF482D8
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xF482E0
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xF482E4
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xF482E8
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xF482EC
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xF482F0
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xF482F4
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xF482F8
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xF482FC
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xF48300
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xF48304
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_0 0xF48308
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_1 0xF4830C
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_2 0xF48310
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_3 0xF48314
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_4 0xF48318
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_0 0xF4831C
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_1 0xF48320
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_2 0xF48324
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_3 0xF48328
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_4 0xF4832C
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_0 0xF48330
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_1 0xF48334
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_2 0xF48338
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_3 0xF4833C
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_4 0xF48340
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_0 0xF48344
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_1 0xF48348
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_2 0xF4834C
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_3 0xF48350
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_4 0xF48354
+
+#define mmTPC5_QM_CP_FENCE0_CNT_0 0xF48358
+
+#define mmTPC5_QM_CP_FENCE0_CNT_1 0xF4835C
+
+#define mmTPC5_QM_CP_FENCE0_CNT_2 0xF48360
+
+#define mmTPC5_QM_CP_FENCE0_CNT_3 0xF48364
+
+#define mmTPC5_QM_CP_FENCE0_CNT_4 0xF48368
+
+#define mmTPC5_QM_CP_FENCE1_CNT_0 0xF4836C
+
+#define mmTPC5_QM_CP_FENCE1_CNT_1 0xF48370
+
+#define mmTPC5_QM_CP_FENCE1_CNT_2 0xF48374
+
+#define mmTPC5_QM_CP_FENCE1_CNT_3 0xF48378
+
+#define mmTPC5_QM_CP_FENCE1_CNT_4 0xF4837C
+
+#define mmTPC5_QM_CP_FENCE2_CNT_0 0xF48380
+
+#define mmTPC5_QM_CP_FENCE2_CNT_1 0xF48384
+
+#define mmTPC5_QM_CP_FENCE2_CNT_2 0xF48388
+
+#define mmTPC5_QM_CP_FENCE2_CNT_3 0xF4838C
+
+#define mmTPC5_QM_CP_FENCE2_CNT_4 0xF48390
+
+#define mmTPC5_QM_CP_FENCE3_CNT_0 0xF48394
+
+#define mmTPC5_QM_CP_FENCE3_CNT_1 0xF48398
+
+#define mmTPC5_QM_CP_FENCE3_CNT_2 0xF4839C
+
+#define mmTPC5_QM_CP_FENCE3_CNT_3 0xF483A0
+
+#define mmTPC5_QM_CP_FENCE3_CNT_4 0xF483A4
+
+#define mmTPC5_QM_CP_STS_0 0xF483A8
+
+#define mmTPC5_QM_CP_STS_1 0xF483AC
+
+#define mmTPC5_QM_CP_STS_2 0xF483B0
+
+#define mmTPC5_QM_CP_STS_3 0xF483B4
+
+#define mmTPC5_QM_CP_STS_4 0xF483B8
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_0 0xF483BC
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_1 0xF483C0
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_2 0xF483C4
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_3 0xF483C8
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_4 0xF483CC
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_0 0xF483D0
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_1 0xF483D4
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_2 0xF483D8
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_3 0xF483DC
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_4 0xF483E0
+
+#define mmTPC5_QM_CP_BARRIER_CFG_0 0xF483F4
+
+#define mmTPC5_QM_CP_BARRIER_CFG_1 0xF483F8
+
+#define mmTPC5_QM_CP_BARRIER_CFG_2 0xF483FC
+
+#define mmTPC5_QM_CP_BARRIER_CFG_3 0xF48400
+
+#define mmTPC5_QM_CP_BARRIER_CFG_4 0xF48404
+
+#define mmTPC5_QM_CP_DBG_0_0 0xF48408
+
+#define mmTPC5_QM_CP_DBG_0_1 0xF4840C
+
+#define mmTPC5_QM_CP_DBG_0_2 0xF48410
+
+#define mmTPC5_QM_CP_DBG_0_3 0xF48414
+
+#define mmTPC5_QM_CP_DBG_0_4 0xF48418
+
+#define mmTPC5_QM_CP_ARUSER_31_11_0 0xF4841C
+
+#define mmTPC5_QM_CP_ARUSER_31_11_1 0xF48420
+
+#define mmTPC5_QM_CP_ARUSER_31_11_2 0xF48424
+
+#define mmTPC5_QM_CP_ARUSER_31_11_3 0xF48428
+
+#define mmTPC5_QM_CP_ARUSER_31_11_4 0xF4842C
+
+#define mmTPC5_QM_CP_AWUSER_31_11_0 0xF48430
+
+#define mmTPC5_QM_CP_AWUSER_31_11_1 0xF48434
+
+#define mmTPC5_QM_CP_AWUSER_31_11_2 0xF48438
+
+#define mmTPC5_QM_CP_AWUSER_31_11_3 0xF4843C
+
+#define mmTPC5_QM_CP_AWUSER_31_11_4 0xF48440
+
+#define mmTPC5_QM_ARB_CFG_0 0xF48A00
+
+#define mmTPC5_QM_ARB_CHOISE_Q_PUSH 0xF48A04
+
+#define mmTPC5_QM_ARB_WRR_WEIGHT_0 0xF48A08
+
+#define mmTPC5_QM_ARB_WRR_WEIGHT_1 0xF48A0C
+
+#define mmTPC5_QM_ARB_WRR_WEIGHT_2 0xF48A10
+
+#define mmTPC5_QM_ARB_WRR_WEIGHT_3 0xF48A14
+
+#define mmTPC5_QM_ARB_CFG_1 0xF48A18
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_0 0xF48A20
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_1 0xF48A24
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_2 0xF48A28
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_3 0xF48A2C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_4 0xF48A30
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_5 0xF48A34
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_6 0xF48A38
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_7 0xF48A3C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_8 0xF48A40
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_9 0xF48A44
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_10 0xF48A48
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_11 0xF48A4C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_12 0xF48A50
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_13 0xF48A54
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_14 0xF48A58
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_15 0xF48A5C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_16 0xF48A60
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_17 0xF48A64
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_18 0xF48A68
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_19 0xF48A6C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_20 0xF48A70
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_21 0xF48A74
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_22 0xF48A78
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_23 0xF48A7C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_24 0xF48A80
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_25 0xF48A84
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_26 0xF48A88
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_27 0xF48A8C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_28 0xF48A90
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_29 0xF48A94
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_30 0xF48A98
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_31 0xF48A9C
+
+#define mmTPC5_QM_ARB_MST_CRED_INC 0xF48AA0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xF48AA4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xF48AA8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xF48AAC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xF48AB0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xF48AB4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xF48AB8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xF48ABC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xF48AC0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xF48AC4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xF48AC8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xF48ACC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xF48AD0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xF48AD4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xF48AD8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xF48ADC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xF48AE0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xF48AE4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xF48AE8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xF48AEC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xF48AF0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xF48AF4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xF48AF8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xF48AFC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xF48B00
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xF48B04
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xF48B08
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xF48B0C
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xF48B10
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xF48B14
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xF48B18
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xF48B1C
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xF48B20
+
+#define mmTPC5_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xF48B28
+
+#define mmTPC5_QM_ARB_MST_SLAVE_EN 0xF48B2C
+
+#define mmTPC5_QM_ARB_MST_QUIET_PER 0xF48B34
+
+#define mmTPC5_QM_ARB_SLV_CHOISE_WDT 0xF48B38
+
+#define mmTPC5_QM_ARB_SLV_ID 0xF48B3C
+
+#define mmTPC5_QM_ARB_MSG_MAX_INFLIGHT 0xF48B44
+
+#define mmTPC5_QM_ARB_MSG_AWUSER_31_11 0xF48B48
+
+#define mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP 0xF48B4C
+
+#define mmTPC5_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xF48B50
+
+#define mmTPC5_QM_ARB_BASE_LO 0xF48B54
+
+#define mmTPC5_QM_ARB_BASE_HI 0xF48B58
+
+#define mmTPC5_QM_ARB_STATE_STS 0xF48B80
+
+#define mmTPC5_QM_ARB_CHOISE_FULLNESS_STS 0xF48B84
+
+#define mmTPC5_QM_ARB_MSG_STS 0xF48B88
+
+#define mmTPC5_QM_ARB_SLV_CHOISE_Q_HEAD 0xF48B8C
+
+#define mmTPC5_QM_ARB_ERR_CAUSE 0xF48B9C
+
+#define mmTPC5_QM_ARB_ERR_MSG_EN 0xF48BA0
+
+#define mmTPC5_QM_ARB_ERR_STS_DRP 0xF48BA8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_0 0xF48BB0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_1 0xF48BB4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_2 0xF48BB8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_3 0xF48BBC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_4 0xF48BC0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_5 0xF48BC4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_6 0xF48BC8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_7 0xF48BCC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_8 0xF48BD0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_9 0xF48BD4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_10 0xF48BD8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_11 0xF48BDC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_12 0xF48BE0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_13 0xF48BE4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_14 0xF48BE8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_15 0xF48BEC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_16 0xF48BF0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_17 0xF48BF4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_18 0xF48BF8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_19 0xF48BFC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_20 0xF48C00
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_21 0xF48C04
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_22 0xF48C08
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_23 0xF48C0C
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_24 0xF48C10
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_25 0xF48C14
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_26 0xF48C18
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_27 0xF48C1C
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_28 0xF48C20
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_29 0xF48C24
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_30 0xF48C28
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_31 0xF48C2C
+
+#define mmTPC5_QM_CGM_CFG 0xF48C70
+
+#define mmTPC5_QM_CGM_STS 0xF48C74
+
+#define mmTPC5_QM_CGM_CFG1 0xF48C78
+
+#define mmTPC5_QM_LOCAL_RANGE_BASE 0xF48C80
+
+#define mmTPC5_QM_LOCAL_RANGE_SIZE 0xF48C84
+
+#define mmTPC5_QM_CSMR_STRICT_PRIO_CFG 0xF48C90
+
+#define mmTPC5_QM_HBW_RD_RATE_LIM_CFG_1 0xF48C94
+
+#define mmTPC5_QM_LBW_WR_RATE_LIM_CFG_0 0xF48C98
+
+#define mmTPC5_QM_LBW_WR_RATE_LIM_CFG_1 0xF48C9C
+
+#define mmTPC5_QM_HBW_RD_RATE_LIM_CFG_0 0xF48CA0
+
+#define mmTPC5_QM_GLBL_AXCACHE 0xF48CA4
+
+#define mmTPC5_QM_IND_GW_APB_CFG 0xF48CB0
+
+#define mmTPC5_QM_IND_GW_APB_WDATA 0xF48CB4
+
+#define mmTPC5_QM_IND_GW_APB_RDATA 0xF48CB8
+
+#define mmTPC5_QM_IND_GW_APB_STATUS 0xF48CBC
+
+#define mmTPC5_QM_GLBL_ERR_ADDR_LO 0xF48CD0
+
+#define mmTPC5_QM_GLBL_ERR_ADDR_HI 0xF48CD4
+
+#define mmTPC5_QM_GLBL_ERR_WDATA 0xF48CD8
+
+#define mmTPC5_QM_GLBL_MEM_INIT_BUSY 0xF48D00
+
+#endif /* ASIC_REG_TPC5_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
new file mode 100644
index 000000000000..eb251e72813f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_CFG_REGS_H_
+#define ASIC_REG_TPC6_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF86400
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF86404
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF86408
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF8640C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF86410
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF86414
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF86418
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF8641C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF86420
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF86424
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF86428
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF8642C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF86430
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF86434
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF86438
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF8643C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF86440
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF86444
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF86448
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF8644C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF86450
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF86454
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF86458
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF8645C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF86460
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF86464
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF86468
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF8646C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF86470
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF86474
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF86478
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF8647C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF86480
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF86484
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF86488
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF8648C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF86490
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF86494
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF86498
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF8649C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF864A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF864A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF864A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF864AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF864B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF864B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF864B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF864BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF864C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF864C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF864C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF864CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF864D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF864D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF864D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF864DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF864E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF864E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF864E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF864EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF864F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF864F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF864F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF864FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF86500
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF86504
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF86508
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF8650C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF86510
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF86514
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF86518
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF8651C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF86520
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF86524
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF86528
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF8652C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF86530
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF86534
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF86538
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF8653C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF86540
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF86544
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF86548
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF8654C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF86550
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF86554
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF86558
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF8655C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF86560
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF86564
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF86568
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF8656C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF86570
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF86574
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF86578
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF8657C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF86580
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF86584
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF86588
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF8658C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF86590
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF86594
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF86598
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF8659C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF865A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF865A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF865A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF865AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF865B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF865B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF865B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF865BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xF865C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xF865C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xF865C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xF865CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xF865D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xF865D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xF865D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xF865DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xF865E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xF865E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xF865E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xF865EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xF865F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xF865F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xF865F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xF865FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xF86600
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xF86604
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xF86608
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xF8660C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xF86610
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xF86614
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xF86618
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xF8661C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xF86620
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xF86624
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xF86628
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xF8662C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xF86630
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xF86634
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xF86638
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xF8663C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xF86640
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xF86644
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xF86648
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xF8664C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xF86650
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xF86654
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xF86658
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xF8665C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xF86660
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xF86664
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xF86668
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xF8666C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xF86670
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xF86674
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xF86678
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xF8667C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xF86680
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xF86684
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xF86688
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xF8668C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xF86690
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xF86694
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xF86698
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xF8669C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xF866A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xF866A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xF866A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xF866AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xF866B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xF866B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xF866B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xF866BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xF866C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xF866C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xF866C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xF866CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xF866D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xF866D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xF866D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xF866DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xF866E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xF866E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xF866E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xF866EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xF866F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xF866F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xF866F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xF866FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xF86700
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xF86704
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xF86708
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xF8670C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xF86710
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xF86714
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xF86718
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xF8671C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xF86720
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xF86724
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xF86728
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xF8672C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xF86730
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xF86734
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xF86738
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xF8673C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xF86740
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xF86744
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xF86748
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xF8674C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xF86750
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xF86754
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xF86758
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xF8675C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xF86760
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xF86764
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xF86768
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xF8676C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xF86770
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xF86774
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xF86778
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xF8677C
+
+#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF86780
+
+#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_ADDR 0xF86784
+
+#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF86788
+
+#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF8678C
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_0 0xF86790
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_0 0xF86794
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_1 0xF86798
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_1 0xF8679C
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_2 0xF867A0
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_2 0xF867A4
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_3 0xF867A8
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_3 0xF867AC
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_4 0xF867B0
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_4 0xF867B4
+
+#define mmTPC6_CFG_KERNEL_KERNEL_CONFIG 0xF867B8
+
+#define mmTPC6_CFG_KERNEL_KERNEL_ID 0xF867BC
+
+#define mmTPC6_CFG_KERNEL_SRF_0 0xF867C0
+
+#define mmTPC6_CFG_KERNEL_SRF_1 0xF867C4
+
+#define mmTPC6_CFG_KERNEL_SRF_2 0xF867C8
+
+#define mmTPC6_CFG_KERNEL_SRF_3 0xF867CC
+
+#define mmTPC6_CFG_KERNEL_SRF_4 0xF867D0
+
+#define mmTPC6_CFG_KERNEL_SRF_5 0xF867D4
+
+#define mmTPC6_CFG_KERNEL_SRF_6 0xF867D8
+
+#define mmTPC6_CFG_KERNEL_SRF_7 0xF867DC
+
+#define mmTPC6_CFG_KERNEL_SRF_8 0xF867E0
+
+#define mmTPC6_CFG_KERNEL_SRF_9 0xF867E4
+
+#define mmTPC6_CFG_KERNEL_SRF_10 0xF867E8
+
+#define mmTPC6_CFG_KERNEL_SRF_11 0xF867EC
+
+#define mmTPC6_CFG_KERNEL_SRF_12 0xF867F0
+
+#define mmTPC6_CFG_KERNEL_SRF_13 0xF867F4
+
+#define mmTPC6_CFG_KERNEL_SRF_14 0xF867F8
+
+#define mmTPC6_CFG_KERNEL_SRF_15 0xF867FC
+
+#define mmTPC6_CFG_KERNEL_SRF_16 0xF86800
+
+#define mmTPC6_CFG_KERNEL_SRF_17 0xF86804
+
+#define mmTPC6_CFG_KERNEL_SRF_18 0xF86808
+
+#define mmTPC6_CFG_KERNEL_SRF_19 0xF8680C
+
+#define mmTPC6_CFG_KERNEL_SRF_20 0xF86810
+
+#define mmTPC6_CFG_KERNEL_SRF_21 0xF86814
+
+#define mmTPC6_CFG_KERNEL_SRF_22 0xF86818
+
+#define mmTPC6_CFG_KERNEL_SRF_23 0xF8681C
+
+#define mmTPC6_CFG_KERNEL_SRF_24 0xF86820
+
+#define mmTPC6_CFG_KERNEL_SRF_25 0xF86824
+
+#define mmTPC6_CFG_KERNEL_SRF_26 0xF86828
+
+#define mmTPC6_CFG_KERNEL_SRF_27 0xF8682C
+
+#define mmTPC6_CFG_KERNEL_SRF_28 0xF86830
+
+#define mmTPC6_CFG_KERNEL_SRF_29 0xF86834
+
+#define mmTPC6_CFG_KERNEL_SRF_30 0xF86838
+
+#define mmTPC6_CFG_KERNEL_SRF_31 0xF8683C
+
+#define mmTPC6_CFG_ROUND_CSR 0xF868FC
+
+#define mmTPC6_CFG_PROT 0xF86900
+
+#define mmTPC6_CFG_SEMAPHORE 0xF86908
+
+#define mmTPC6_CFG_VFLAGS 0xF8690C
+
+#define mmTPC6_CFG_SFLAGS 0xF86910
+
+#define mmTPC6_CFG_LFSR_POLYNOM 0xF86918
+
+#define mmTPC6_CFG_STATUS 0xF8691C
+
+#define mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH 0xF86920
+
+#define mmTPC6_CFG_CFG_SUBTRACT_VALUE 0xF86924
+
+#define mmTPC6_CFG_SM_BASE_ADDRESS_HIGH 0xF8692C
+
+#define mmTPC6_CFG_TPC_CMD 0xF86930
+
+#define mmTPC6_CFG_TPC_EXECUTE 0xF86938
+
+#define mmTPC6_CFG_TPC_STALL 0xF8693C
+
+#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_LOW 0xF86940
+
+#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF86944
+
+#define mmTPC6_CFG_RD_RATE_LIMIT 0xF86948
+
+#define mmTPC6_CFG_WR_RATE_LIMIT 0xF86950
+
+#define mmTPC6_CFG_MSS_CONFIG 0xF86954
+
+#define mmTPC6_CFG_TPC_INTR_CAUSE 0xF86958
+
+#define mmTPC6_CFG_TPC_INTR_MASK 0xF8695C
+
+#define mmTPC6_CFG_WQ_CREDITS 0xF86960
+
+#define mmTPC6_CFG_ARUSER_LO 0xF86964
+
+#define mmTPC6_CFG_ARUSER_HI 0xF86968
+
+#define mmTPC6_CFG_AWUSER_LO 0xF8696C
+
+#define mmTPC6_CFG_AWUSER_HI 0xF86970
+
+#define mmTPC6_CFG_OPCODE_EXEC 0xF86974
+
+#define mmTPC6_CFG_LUT_FUNC32_BASE_ADDR_LO 0xF86978
+
+#define mmTPC6_CFG_LUT_FUNC32_BASE_ADDR_HI 0xF8697C
+
+#define mmTPC6_CFG_LUT_FUNC64_BASE_ADDR_LO 0xF86980
+
+#define mmTPC6_CFG_LUT_FUNC64_BASE_ADDR_HI 0xF86984
+
+#define mmTPC6_CFG_LUT_FUNC128_BASE_ADDR_LO 0xF86988
+
+#define mmTPC6_CFG_LUT_FUNC128_BASE_ADDR_HI 0xF8698C
+
+#define mmTPC6_CFG_LUT_FUNC256_BASE_ADDR_LO 0xF86990
+
+#define mmTPC6_CFG_LUT_FUNC256_BASE_ADDR_HI 0xF86994
+
+#define mmTPC6_CFG_TSB_CFG_MAX_SIZE 0xF86998
+
+#define mmTPC6_CFG_TSB_CFG 0xF8699C
+
+#define mmTPC6_CFG_DBGMEM_ADD 0xF869A0
+
+#define mmTPC6_CFG_DBGMEM_DATA_WR 0xF869A4
+
+#define mmTPC6_CFG_DBGMEM_DATA_RD 0xF869A8
+
+#define mmTPC6_CFG_DBGMEM_CTRL 0xF869AC
+
+#define mmTPC6_CFG_DBGMEM_RC 0xF869B0
+
+#define mmTPC6_CFG_TSB_INFLIGHT_CNTR 0xF869B4
+
+#define mmTPC6_CFG_WQ_INFLIGHT_CNTR 0xF869B8
+
+#define mmTPC6_CFG_WQ_LBW_TOTAL_CNTR 0xF869BC
+
+#define mmTPC6_CFG_WQ_HBW_TOTAL_CNTR 0xF869C0
+
+#define mmTPC6_CFG_IRQ_OCCOUPY_CNTR 0xF869C4
+
+#define mmTPC6_CFG_FUNC_MBIST_CNTRL 0xF869D0
+
+#define mmTPC6_CFG_FUNC_MBIST_PAT 0xF869D4
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_0 0xF869D8
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_1 0xF869DC
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_2 0xF869E0
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_3 0xF869E4
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_4 0xF869E8
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_5 0xF869EC
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_6 0xF869F0
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_7 0xF869F4
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_8 0xF869F8
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF869FC
+
+#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF86A00
+
+#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF86A04
+
+#define mmTPC6_CFG_QM_TENSOR_0_PADDING_VALUE 0xF86A08
+
+#define mmTPC6_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF86A0C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF86A10
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF86A14
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF86A18
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF86A1C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF86A20
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF86A24
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF86A28
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF86A2C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF86A30
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF86A34
+
+#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF86A38
+
+#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF86A3C
+
+#define mmTPC6_CFG_QM_TENSOR_1_PADDING_VALUE 0xF86A40
+
+#define mmTPC6_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF86A44
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF86A48
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF86A4C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF86A50
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF86A54
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF86A58
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF86A5C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF86A60
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF86A64
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF86A68
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF86A6C
+
+#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF86A70
+
+#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF86A74
+
+#define mmTPC6_CFG_QM_TENSOR_2_PADDING_VALUE 0xF86A78
+
+#define mmTPC6_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF86A7C
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF86A80
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF86A84
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF86A88
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF86A8C
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF86A90
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF86A94
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF86A98
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF86A9C
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF86AA0
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF86AA4
+
+#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF86AA8
+
+#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF86AAC
+
+#define mmTPC6_CFG_QM_TENSOR_3_PADDING_VALUE 0xF86AB0
+
+#define mmTPC6_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF86AB4
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF86AB8
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF86ABC
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF86AC0
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF86AC4
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF86AC8
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF86ACC
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF86AD0
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF86AD4
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF86AD8
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF86ADC
+
+#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF86AE0
+
+#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF86AE4
+
+#define mmTPC6_CFG_QM_TENSOR_4_PADDING_VALUE 0xF86AE8
+
+#define mmTPC6_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF86AEC
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF86AF0
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF86AF4
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF86AF8
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF86AFC
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF86B00
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF86B04
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF86B08
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF86B0C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF86B10
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF86B14
+
+#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF86B18
+
+#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF86B1C
+
+#define mmTPC6_CFG_QM_TENSOR_5_PADDING_VALUE 0xF86B20
+
+#define mmTPC6_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF86B24
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF86B28
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF86B2C
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF86B30
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF86B34
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF86B38
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF86B3C
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF86B40
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF86B44
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF86B48
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF86B4C
+
+#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF86B50
+
+#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF86B54
+
+#define mmTPC6_CFG_QM_TENSOR_6_PADDING_VALUE 0xF86B58
+
+#define mmTPC6_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF86B5C
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF86B60
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF86B64
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF86B68
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF86B6C
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF86B70
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF86B74
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF86B78
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF86B7C
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF86B80
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF86B84
+
+#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF86B88
+
+#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF86B8C
+
+#define mmTPC6_CFG_QM_TENSOR_7_PADDING_VALUE 0xF86B90
+
+#define mmTPC6_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF86B94
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF86B98
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF86B9C
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF86BA0
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF86BA4
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF86BA8
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF86BAC
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF86BB0
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF86BB4
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF86BB8
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF86BBC
+
+#define mmTPC6_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xF86BC0
+
+#define mmTPC6_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xF86BC4
+
+#define mmTPC6_CFG_QM_TENSOR_8_PADDING_VALUE 0xF86BC8
+
+#define mmTPC6_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xF86BCC
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_0_SIZE 0xF86BD0
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xF86BD4
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_1_SIZE 0xF86BD8
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xF86BDC
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_2_SIZE 0xF86BE0
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xF86BE4
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_3_SIZE 0xF86BE8
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xF86BEC
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_4_SIZE 0xF86BF0
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xF86BF4
+
+#define mmTPC6_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xF86BF8
+
+#define mmTPC6_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xF86BFC
+
+#define mmTPC6_CFG_QM_TENSOR_9_PADDING_VALUE 0xF86C00
+
+#define mmTPC6_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xF86C04
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_0_SIZE 0xF86C08
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xF86C0C
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_1_SIZE 0xF86C10
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xF86C14
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_2_SIZE 0xF86C18
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xF86C1C
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_3_SIZE 0xF86C20
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xF86C24
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_4_SIZE 0xF86C28
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xF86C2C
+
+#define mmTPC6_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xF86C30
+
+#define mmTPC6_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xF86C34
+
+#define mmTPC6_CFG_QM_TENSOR_10_PADDING_VALUE 0xF86C38
+
+#define mmTPC6_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xF86C3C
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_0_SIZE 0xF86C40
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xF86C44
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_1_SIZE 0xF86C48
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xF86C4C
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_2_SIZE 0xF86C50
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xF86C54
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_3_SIZE 0xF86C58
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xF86C5C
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_4_SIZE 0xF86C60
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xF86C64
+
+#define mmTPC6_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xF86C68
+
+#define mmTPC6_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xF86C6C
+
+#define mmTPC6_CFG_QM_TENSOR_11_PADDING_VALUE 0xF86C70
+
+#define mmTPC6_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xF86C74
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_0_SIZE 0xF86C78
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xF86C7C
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_1_SIZE 0xF86C80
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xF86C84
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_2_SIZE 0xF86C88
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xF86C8C
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_3_SIZE 0xF86C90
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xF86C94
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_4_SIZE 0xF86C98
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xF86C9C
+
+#define mmTPC6_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xF86CA0
+
+#define mmTPC6_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xF86CA4
+
+#define mmTPC6_CFG_QM_TENSOR_12_PADDING_VALUE 0xF86CA8
+
+#define mmTPC6_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xF86CAC
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_0_SIZE 0xF86CB0
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xF86CB4
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_1_SIZE 0xF86CB8
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xF86CBC
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_2_SIZE 0xF86CC0
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xF86CC4
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_3_SIZE 0xF86CC8
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xF86CCC
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_4_SIZE 0xF86CD0
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xF86CD4
+
+#define mmTPC6_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xF86CD8
+
+#define mmTPC6_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xF86CDC
+
+#define mmTPC6_CFG_QM_TENSOR_13_PADDING_VALUE 0xF86CE0
+
+#define mmTPC6_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xF86CE4
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_0_SIZE 0xF86CE8
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xF86CEC
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_1_SIZE 0xF86CF0
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xF86CF4
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_2_SIZE 0xF86CF8
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xF86CFC
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_3_SIZE 0xF86D00
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xF86D04
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_4_SIZE 0xF86D08
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xF86D0C
+
+#define mmTPC6_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xF86D10
+
+#define mmTPC6_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xF86D14
+
+#define mmTPC6_CFG_QM_TENSOR_14_PADDING_VALUE 0xF86D18
+
+#define mmTPC6_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xF86D1C
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_0_SIZE 0xF86D20
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xF86D24
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_1_SIZE 0xF86D28
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xF86D2C
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_2_SIZE 0xF86D30
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xF86D34
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_3_SIZE 0xF86D38
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xF86D3C
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_4_SIZE 0xF86D40
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xF86D44
+
+#define mmTPC6_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xF86D48
+
+#define mmTPC6_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xF86D4C
+
+#define mmTPC6_CFG_QM_TENSOR_15_PADDING_VALUE 0xF86D50
+
+#define mmTPC6_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xF86D54
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_0_SIZE 0xF86D58
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xF86D5C
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_1_SIZE 0xF86D60
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xF86D64
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_2_SIZE 0xF86D68
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xF86D6C
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_3_SIZE 0xF86D70
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xF86D74
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_4_SIZE 0xF86D78
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xF86D7C
+
+#define mmTPC6_CFG_QM_SYNC_OBJECT_MESSAGE 0xF86D80
+
+#define mmTPC6_CFG_QM_SYNC_OBJECT_ADDR 0xF86D84
+
+#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF86D88
+
+#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF86D8C
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_0 0xF86D90
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_0 0xF86D94
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_1 0xF86D98
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_1 0xF86D9C
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_2 0xF86DA0
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_2 0xF86DA4
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_3 0xF86DA8
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_3 0xF86DAC
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_4 0xF86DB0
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_4 0xF86DB4
+
+#define mmTPC6_CFG_QM_KERNEL_CONFIG 0xF86DB8
+
+#define mmTPC6_CFG_QM_KERNEL_ID 0xF86DBC
+
+#define mmTPC6_CFG_QM_SRF_0 0xF86DC0
+
+#define mmTPC6_CFG_QM_SRF_1 0xF86DC4
+
+#define mmTPC6_CFG_QM_SRF_2 0xF86DC8
+
+#define mmTPC6_CFG_QM_SRF_3 0xF86DCC
+
+#define mmTPC6_CFG_QM_SRF_4 0xF86DD0
+
+#define mmTPC6_CFG_QM_SRF_5 0xF86DD4
+
+#define mmTPC6_CFG_QM_SRF_6 0xF86DD8
+
+#define mmTPC6_CFG_QM_SRF_7 0xF86DDC
+
+#define mmTPC6_CFG_QM_SRF_8 0xF86DE0
+
+#define mmTPC6_CFG_QM_SRF_9 0xF86DE4
+
+#define mmTPC6_CFG_QM_SRF_10 0xF86DE8
+
+#define mmTPC6_CFG_QM_SRF_11 0xF86DEC
+
+#define mmTPC6_CFG_QM_SRF_12 0xF86DF0
+
+#define mmTPC6_CFG_QM_SRF_13 0xF86DF4
+
+#define mmTPC6_CFG_QM_SRF_14 0xF86DF8
+
+#define mmTPC6_CFG_QM_SRF_15 0xF86DFC
+
+#define mmTPC6_CFG_QM_SRF_16 0xF86E00
+
+#define mmTPC6_CFG_QM_SRF_17 0xF86E04
+
+#define mmTPC6_CFG_QM_SRF_18 0xF86E08
+
+#define mmTPC6_CFG_QM_SRF_19 0xF86E0C
+
+#define mmTPC6_CFG_QM_SRF_20 0xF86E10
+
+#define mmTPC6_CFG_QM_SRF_21 0xF86E14
+
+#define mmTPC6_CFG_QM_SRF_22 0xF86E18
+
+#define mmTPC6_CFG_QM_SRF_23 0xF86E1C
+
+#define mmTPC6_CFG_QM_SRF_24 0xF86E20
+
+#define mmTPC6_CFG_QM_SRF_25 0xF86E24
+
+#define mmTPC6_CFG_QM_SRF_26 0xF86E28
+
+#define mmTPC6_CFG_QM_SRF_27 0xF86E2C
+
+#define mmTPC6_CFG_QM_SRF_28 0xF86E30
+
+#define mmTPC6_CFG_QM_SRF_29 0xF86E34
+
+#define mmTPC6_CFG_QM_SRF_30 0xF86E38
+
+#define mmTPC6_CFG_QM_SRF_31 0xF86E3C
+
+#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
new file mode 100644
index 000000000000..e35ef7fd8b1c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_QM_REGS_H_
+#define ASIC_REG_TPC6_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC6_QM_GLBL_CFG0 0xF88000
+
+#define mmTPC6_QM_GLBL_CFG1 0xF88004
+
+#define mmTPC6_QM_GLBL_PROT 0xF88008
+
+#define mmTPC6_QM_GLBL_ERR_CFG 0xF8800C
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_0 0xF88010
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_1 0xF88014
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_2 0xF88018
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_3 0xF8801C
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_4 0xF88020
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_0 0xF88024
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_1 0xF88028
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_2 0xF8802C
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_3 0xF88030
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_4 0xF88034
+
+#define mmTPC6_QM_GLBL_STS0 0xF88038
+
+#define mmTPC6_QM_GLBL_STS1_0 0xF88040
+
+#define mmTPC6_QM_GLBL_STS1_1 0xF88044
+
+#define mmTPC6_QM_GLBL_STS1_2 0xF88048
+
+#define mmTPC6_QM_GLBL_STS1_3 0xF8804C
+
+#define mmTPC6_QM_GLBL_STS1_4 0xF88050
+
+#define mmTPC6_QM_GLBL_MSG_EN_0 0xF88054
+
+#define mmTPC6_QM_GLBL_MSG_EN_1 0xF88058
+
+#define mmTPC6_QM_GLBL_MSG_EN_2 0xF8805C
+
+#define mmTPC6_QM_GLBL_MSG_EN_3 0xF88060
+
+#define mmTPC6_QM_GLBL_MSG_EN_4 0xF88068
+
+#define mmTPC6_QM_PQ_BASE_LO_0 0xF88070
+
+#define mmTPC6_QM_PQ_BASE_LO_1 0xF88074
+
+#define mmTPC6_QM_PQ_BASE_LO_2 0xF88078
+
+#define mmTPC6_QM_PQ_BASE_LO_3 0xF8807C
+
+#define mmTPC6_QM_PQ_BASE_HI_0 0xF88080
+
+#define mmTPC6_QM_PQ_BASE_HI_1 0xF88084
+
+#define mmTPC6_QM_PQ_BASE_HI_2 0xF88088
+
+#define mmTPC6_QM_PQ_BASE_HI_3 0xF8808C
+
+#define mmTPC6_QM_PQ_SIZE_0 0xF88090
+
+#define mmTPC6_QM_PQ_SIZE_1 0xF88094
+
+#define mmTPC6_QM_PQ_SIZE_2 0xF88098
+
+#define mmTPC6_QM_PQ_SIZE_3 0xF8809C
+
+#define mmTPC6_QM_PQ_PI_0 0xF880A0
+
+#define mmTPC6_QM_PQ_PI_1 0xF880A4
+
+#define mmTPC6_QM_PQ_PI_2 0xF880A8
+
+#define mmTPC6_QM_PQ_PI_3 0xF880AC
+
+#define mmTPC6_QM_PQ_CI_0 0xF880B0
+
+#define mmTPC6_QM_PQ_CI_1 0xF880B4
+
+#define mmTPC6_QM_PQ_CI_2 0xF880B8
+
+#define mmTPC6_QM_PQ_CI_3 0xF880BC
+
+#define mmTPC6_QM_PQ_CFG0_0 0xF880C0
+
+#define mmTPC6_QM_PQ_CFG0_1 0xF880C4
+
+#define mmTPC6_QM_PQ_CFG0_2 0xF880C8
+
+#define mmTPC6_QM_PQ_CFG0_3 0xF880CC
+
+#define mmTPC6_QM_PQ_CFG1_0 0xF880D0
+
+#define mmTPC6_QM_PQ_CFG1_1 0xF880D4
+
+#define mmTPC6_QM_PQ_CFG1_2 0xF880D8
+
+#define mmTPC6_QM_PQ_CFG1_3 0xF880DC
+
+#define mmTPC6_QM_PQ_ARUSER_31_11_0 0xF880E0
+
+#define mmTPC6_QM_PQ_ARUSER_31_11_1 0xF880E4
+
+#define mmTPC6_QM_PQ_ARUSER_31_11_2 0xF880E8
+
+#define mmTPC6_QM_PQ_ARUSER_31_11_3 0xF880EC
+
+#define mmTPC6_QM_PQ_STS0_0 0xF880F0
+
+#define mmTPC6_QM_PQ_STS0_1 0xF880F4
+
+#define mmTPC6_QM_PQ_STS0_2 0xF880F8
+
+#define mmTPC6_QM_PQ_STS0_3 0xF880FC
+
+#define mmTPC6_QM_PQ_STS1_0 0xF88100
+
+#define mmTPC6_QM_PQ_STS1_1 0xF88104
+
+#define mmTPC6_QM_PQ_STS1_2 0xF88108
+
+#define mmTPC6_QM_PQ_STS1_3 0xF8810C
+
+#define mmTPC6_QM_CQ_CFG0_0 0xF88110
+
+#define mmTPC6_QM_CQ_CFG0_1 0xF88114
+
+#define mmTPC6_QM_CQ_CFG0_2 0xF88118
+
+#define mmTPC6_QM_CQ_CFG0_3 0xF8811C
+
+#define mmTPC6_QM_CQ_CFG0_4 0xF88120
+
+#define mmTPC6_QM_CQ_CFG1_0 0xF88124
+
+#define mmTPC6_QM_CQ_CFG1_1 0xF88128
+
+#define mmTPC6_QM_CQ_CFG1_2 0xF8812C
+
+#define mmTPC6_QM_CQ_CFG1_3 0xF88130
+
+#define mmTPC6_QM_CQ_CFG1_4 0xF88134
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_0 0xF88138
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_1 0xF8813C
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_2 0xF88140
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_3 0xF88144
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_4 0xF88148
+
+#define mmTPC6_QM_CQ_STS0_0 0xF8814C
+
+#define mmTPC6_QM_CQ_STS0_1 0xF88150
+
+#define mmTPC6_QM_CQ_STS0_2 0xF88154
+
+#define mmTPC6_QM_CQ_STS0_3 0xF88158
+
+#define mmTPC6_QM_CQ_STS0_4 0xF8815C
+
+#define mmTPC6_QM_CQ_STS1_0 0xF88160
+
+#define mmTPC6_QM_CQ_STS1_1 0xF88164
+
+#define mmTPC6_QM_CQ_STS1_2 0xF88168
+
+#define mmTPC6_QM_CQ_STS1_3 0xF8816C
+
+#define mmTPC6_QM_CQ_STS1_4 0xF88170
+
+#define mmTPC6_QM_CQ_PTR_LO_0 0xF88174
+
+#define mmTPC6_QM_CQ_PTR_HI_0 0xF88178
+
+#define mmTPC6_QM_CQ_TSIZE_0 0xF8817C
+
+#define mmTPC6_QM_CQ_CTL_0 0xF88180
+
+#define mmTPC6_QM_CQ_PTR_LO_1 0xF88184
+
+#define mmTPC6_QM_CQ_PTR_HI_1 0xF88188
+
+#define mmTPC6_QM_CQ_TSIZE_1 0xF8818C
+
+#define mmTPC6_QM_CQ_CTL_1 0xF88190
+
+#define mmTPC6_QM_CQ_PTR_LO_2 0xF88194
+
+#define mmTPC6_QM_CQ_PTR_HI_2 0xF88198
+
+#define mmTPC6_QM_CQ_TSIZE_2 0xF8819C
+
+#define mmTPC6_QM_CQ_CTL_2 0xF881A0
+
+#define mmTPC6_QM_CQ_PTR_LO_3 0xF881A4
+
+#define mmTPC6_QM_CQ_PTR_HI_3 0xF881A8
+
+#define mmTPC6_QM_CQ_TSIZE_3 0xF881AC
+
+#define mmTPC6_QM_CQ_CTL_3 0xF881B0
+
+#define mmTPC6_QM_CQ_PTR_LO_4 0xF881B4
+
+#define mmTPC6_QM_CQ_PTR_HI_4 0xF881B8
+
+#define mmTPC6_QM_CQ_TSIZE_4 0xF881BC
+
+#define mmTPC6_QM_CQ_CTL_4 0xF881C0
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_0 0xF881C4
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_1 0xF881C8
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_2 0xF881CC
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_3 0xF881D0
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_4 0xF881D4
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_0 0xF881D8
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_1 0xF881DC
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_2 0xF881E0
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_3 0xF881E4
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_4 0xF881E8
+
+#define mmTPC6_QM_CQ_TSIZE_STS_0 0xF881EC
+
+#define mmTPC6_QM_CQ_TSIZE_STS_1 0xF881F0
+
+#define mmTPC6_QM_CQ_TSIZE_STS_2 0xF881F4
+
+#define mmTPC6_QM_CQ_TSIZE_STS_3 0xF881F8
+
+#define mmTPC6_QM_CQ_TSIZE_STS_4 0xF881FC
+
+#define mmTPC6_QM_CQ_CTL_STS_0 0xF88200
+
+#define mmTPC6_QM_CQ_CTL_STS_1 0xF88204
+
+#define mmTPC6_QM_CQ_CTL_STS_2 0xF88208
+
+#define mmTPC6_QM_CQ_CTL_STS_3 0xF8820C
+
+#define mmTPC6_QM_CQ_CTL_STS_4 0xF88210
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_0 0xF88214
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_1 0xF88218
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_2 0xF8821C
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_3 0xF88220
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_4 0xF88224
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_0 0xF88228
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_1 0xF8822C
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_2 0xF88230
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_3 0xF88234
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_4 0xF88238
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_0 0xF8823C
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_1 0xF88240
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_2 0xF88244
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_3 0xF88248
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_4 0xF8824C
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_0 0xF88250
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_1 0xF88254
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_2 0xF88258
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_3 0xF8825C
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_4 0xF88260
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_0 0xF88264
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_1 0xF88268
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_2 0xF8826C
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_3 0xF88270
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_4 0xF88274
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_0 0xF88278
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_1 0xF8827C
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 0xF88280
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_3 0xF88284
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_4 0xF88288
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_0 0xF8828C
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_1 0xF88290
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_2 0xF88294
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_3 0xF88298
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_4 0xF8829C
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_0 0xF882A0
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_1 0xF882A4
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_2 0xF882A8
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_3 0xF882AC
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_4 0xF882B0
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_0 0xF882B4
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_1 0xF882B8
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_2 0xF882BC
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_3 0xF882C0
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_4 0xF882C4
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_0 0xF882C8
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_1 0xF882CC
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_2 0xF882D0
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_3 0xF882D4
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_4 0xF882D8
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xF882E0
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xF882E4
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xF882E8
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xF882EC
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xF882F0
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xF882F4
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xF882F8
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xF882FC
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xF88300
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xF88304
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_0 0xF88308
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_1 0xF8830C
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_2 0xF88310
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_3 0xF88314
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_4 0xF88318
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_0 0xF8831C
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_1 0xF88320
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_2 0xF88324
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_3 0xF88328
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_4 0xF8832C
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_0 0xF88330
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_1 0xF88334
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_2 0xF88338
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_3 0xF8833C
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_4 0xF88340
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_0 0xF88344
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_1 0xF88348
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_2 0xF8834C
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_3 0xF88350
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_4 0xF88354
+
+#define mmTPC6_QM_CP_FENCE0_CNT_0 0xF88358
+
+#define mmTPC6_QM_CP_FENCE0_CNT_1 0xF8835C
+
+#define mmTPC6_QM_CP_FENCE0_CNT_2 0xF88360
+
+#define mmTPC6_QM_CP_FENCE0_CNT_3 0xF88364
+
+#define mmTPC6_QM_CP_FENCE0_CNT_4 0xF88368
+
+#define mmTPC6_QM_CP_FENCE1_CNT_0 0xF8836C
+
+#define mmTPC6_QM_CP_FENCE1_CNT_1 0xF88370
+
+#define mmTPC6_QM_CP_FENCE1_CNT_2 0xF88374
+
+#define mmTPC6_QM_CP_FENCE1_CNT_3 0xF88378
+
+#define mmTPC6_QM_CP_FENCE1_CNT_4 0xF8837C
+
+#define mmTPC6_QM_CP_FENCE2_CNT_0 0xF88380
+
+#define mmTPC6_QM_CP_FENCE2_CNT_1 0xF88384
+
+#define mmTPC6_QM_CP_FENCE2_CNT_2 0xF88388
+
+#define mmTPC6_QM_CP_FENCE2_CNT_3 0xF8838C
+
+#define mmTPC6_QM_CP_FENCE2_CNT_4 0xF88390
+
+#define mmTPC6_QM_CP_FENCE3_CNT_0 0xF88394
+
+#define mmTPC6_QM_CP_FENCE3_CNT_1 0xF88398
+
+#define mmTPC6_QM_CP_FENCE3_CNT_2 0xF8839C
+
+#define mmTPC6_QM_CP_FENCE3_CNT_3 0xF883A0
+
+#define mmTPC6_QM_CP_FENCE3_CNT_4 0xF883A4
+
+#define mmTPC6_QM_CP_STS_0 0xF883A8
+
+#define mmTPC6_QM_CP_STS_1 0xF883AC
+
+#define mmTPC6_QM_CP_STS_2 0xF883B0
+
+#define mmTPC6_QM_CP_STS_3 0xF883B4
+
+#define mmTPC6_QM_CP_STS_4 0xF883B8
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_0 0xF883BC
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_1 0xF883C0
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_2 0xF883C4
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_3 0xF883C8
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_4 0xF883CC
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_0 0xF883D0
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_1 0xF883D4
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_2 0xF883D8
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_3 0xF883DC
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_4 0xF883E0
+
+#define mmTPC6_QM_CP_BARRIER_CFG_0 0xF883F4
+
+#define mmTPC6_QM_CP_BARRIER_CFG_1 0xF883F8
+
+#define mmTPC6_QM_CP_BARRIER_CFG_2 0xF883FC
+
+#define mmTPC6_QM_CP_BARRIER_CFG_3 0xF88400
+
+#define mmTPC6_QM_CP_BARRIER_CFG_4 0xF88404
+
+#define mmTPC6_QM_CP_DBG_0_0 0xF88408
+
+#define mmTPC6_QM_CP_DBG_0_1 0xF8840C
+
+#define mmTPC6_QM_CP_DBG_0_2 0xF88410
+
+#define mmTPC6_QM_CP_DBG_0_3 0xF88414
+
+#define mmTPC6_QM_CP_DBG_0_4 0xF88418
+
+#define mmTPC6_QM_CP_ARUSER_31_11_0 0xF8841C
+
+#define mmTPC6_QM_CP_ARUSER_31_11_1 0xF88420
+
+#define mmTPC6_QM_CP_ARUSER_31_11_2 0xF88424
+
+#define mmTPC6_QM_CP_ARUSER_31_11_3 0xF88428
+
+#define mmTPC6_QM_CP_ARUSER_31_11_4 0xF8842C
+
+#define mmTPC6_QM_CP_AWUSER_31_11_0 0xF88430
+
+#define mmTPC6_QM_CP_AWUSER_31_11_1 0xF88434
+
+#define mmTPC6_QM_CP_AWUSER_31_11_2 0xF88438
+
+#define mmTPC6_QM_CP_AWUSER_31_11_3 0xF8843C
+
+#define mmTPC6_QM_CP_AWUSER_31_11_4 0xF88440
+
+#define mmTPC6_QM_ARB_CFG_0 0xF88A00
+
+#define mmTPC6_QM_ARB_CHOISE_Q_PUSH 0xF88A04
+
+#define mmTPC6_QM_ARB_WRR_WEIGHT_0 0xF88A08
+
+#define mmTPC6_QM_ARB_WRR_WEIGHT_1 0xF88A0C
+
+#define mmTPC6_QM_ARB_WRR_WEIGHT_2 0xF88A10
+
+#define mmTPC6_QM_ARB_WRR_WEIGHT_3 0xF88A14
+
+#define mmTPC6_QM_ARB_CFG_1 0xF88A18
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_0 0xF88A20
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_1 0xF88A24
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_2 0xF88A28
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_3 0xF88A2C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_4 0xF88A30
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_5 0xF88A34
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_6 0xF88A38
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_7 0xF88A3C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_8 0xF88A40
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_9 0xF88A44
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_10 0xF88A48
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_11 0xF88A4C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_12 0xF88A50
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_13 0xF88A54
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_14 0xF88A58
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_15 0xF88A5C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_16 0xF88A60
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_17 0xF88A64
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_18 0xF88A68
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_19 0xF88A6C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_20 0xF88A70
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_21 0xF88A74
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_22 0xF88A78
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_23 0xF88A7C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_24 0xF88A80
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_25 0xF88A84
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_26 0xF88A88
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_27 0xF88A8C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_28 0xF88A90
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_29 0xF88A94
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_30 0xF88A98
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_31 0xF88A9C
+
+#define mmTPC6_QM_ARB_MST_CRED_INC 0xF88AA0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xF88AA4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xF88AA8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xF88AAC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xF88AB0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xF88AB4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xF88AB8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xF88ABC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xF88AC0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xF88AC4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xF88AC8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xF88ACC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xF88AD0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xF88AD4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xF88AD8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xF88ADC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xF88AE0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xF88AE4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xF88AE8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xF88AEC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xF88AF0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xF88AF4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xF88AF8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xF88AFC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xF88B00
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xF88B04
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xF88B08
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xF88B0C
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xF88B10
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xF88B14
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xF88B18
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xF88B1C
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xF88B20
+
+#define mmTPC6_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xF88B28
+
+#define mmTPC6_QM_ARB_MST_SLAVE_EN 0xF88B2C
+
+#define mmTPC6_QM_ARB_MST_QUIET_PER 0xF88B34
+
+#define mmTPC6_QM_ARB_SLV_CHOISE_WDT 0xF88B38
+
+#define mmTPC6_QM_ARB_SLV_ID 0xF88B3C
+
+#define mmTPC6_QM_ARB_MSG_MAX_INFLIGHT 0xF88B44
+
+#define mmTPC6_QM_ARB_MSG_AWUSER_31_11 0xF88B48
+
+#define mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP 0xF88B4C
+
+#define mmTPC6_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xF88B50
+
+#define mmTPC6_QM_ARB_BASE_LO 0xF88B54
+
+#define mmTPC6_QM_ARB_BASE_HI 0xF88B58
+
+#define mmTPC6_QM_ARB_STATE_STS 0xF88B80
+
+#define mmTPC6_QM_ARB_CHOISE_FULLNESS_STS 0xF88B84
+
+#define mmTPC6_QM_ARB_MSG_STS 0xF88B88
+
+#define mmTPC6_QM_ARB_SLV_CHOISE_Q_HEAD 0xF88B8C
+
+#define mmTPC6_QM_ARB_ERR_CAUSE 0xF88B9C
+
+#define mmTPC6_QM_ARB_ERR_MSG_EN 0xF88BA0
+
+#define mmTPC6_QM_ARB_ERR_STS_DRP 0xF88BA8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_0 0xF88BB0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_1 0xF88BB4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_2 0xF88BB8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_3 0xF88BBC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_4 0xF88BC0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_5 0xF88BC4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_6 0xF88BC8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_7 0xF88BCC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_8 0xF88BD0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_9 0xF88BD4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_10 0xF88BD8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_11 0xF88BDC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_12 0xF88BE0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_13 0xF88BE4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_14 0xF88BE8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_15 0xF88BEC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_16 0xF88BF0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_17 0xF88BF4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_18 0xF88BF8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_19 0xF88BFC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_20 0xF88C00
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_21 0xF88C04
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_22 0xF88C08
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_23 0xF88C0C
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_24 0xF88C10
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_25 0xF88C14
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_26 0xF88C18
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_27 0xF88C1C
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_28 0xF88C20
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_29 0xF88C24
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_30 0xF88C28
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_31 0xF88C2C
+
+#define mmTPC6_QM_CGM_CFG 0xF88C70
+
+#define mmTPC6_QM_CGM_STS 0xF88C74
+
+#define mmTPC6_QM_CGM_CFG1 0xF88C78
+
+#define mmTPC6_QM_LOCAL_RANGE_BASE 0xF88C80
+
+#define mmTPC6_QM_LOCAL_RANGE_SIZE 0xF88C84
+
+#define mmTPC6_QM_CSMR_STRICT_PRIO_CFG 0xF88C90
+
+#define mmTPC6_QM_HBW_RD_RATE_LIM_CFG_1 0xF88C94
+
+#define mmTPC6_QM_LBW_WR_RATE_LIM_CFG_0 0xF88C98
+
+#define mmTPC6_QM_LBW_WR_RATE_LIM_CFG_1 0xF88C9C
+
+#define mmTPC6_QM_HBW_RD_RATE_LIM_CFG_0 0xF88CA0
+
+#define mmTPC6_QM_GLBL_AXCACHE 0xF88CA4
+
+#define mmTPC6_QM_IND_GW_APB_CFG 0xF88CB0
+
+#define mmTPC6_QM_IND_GW_APB_WDATA 0xF88CB4
+
+#define mmTPC6_QM_IND_GW_APB_RDATA 0xF88CB8
+
+#define mmTPC6_QM_IND_GW_APB_STATUS 0xF88CBC
+
+#define mmTPC6_QM_GLBL_ERR_ADDR_LO 0xF88CD0
+
+#define mmTPC6_QM_GLBL_ERR_ADDR_HI 0xF88CD4
+
+#define mmTPC6_QM_GLBL_ERR_WDATA 0xF88CD8
+
+#define mmTPC6_QM_GLBL_MEM_INIT_BUSY 0xF88D00
+
+#endif /* ASIC_REG_TPC6_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
new file mode 100644
index 000000000000..1887b10e58e2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_CFG_REGS_H_
+#define ASIC_REG_TPC7_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xFC6400
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xFC6404
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xFC6408
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xFC640C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xFC6410
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xFC6414
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xFC6418
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xFC641C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xFC6420
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xFC6424
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xFC6428
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xFC642C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xFC6430
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xFC6434
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xFC6438
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xFC643C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xFC6440
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xFC6444
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xFC6448
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xFC644C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xFC6450
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xFC6454
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xFC6458
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xFC645C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xFC6460
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xFC6464
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xFC6468
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xFC646C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xFC6470
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xFC6474
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xFC6478
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xFC647C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xFC6480
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xFC6484
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xFC6488
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xFC648C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xFC6490
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xFC6494
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xFC6498
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xFC649C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xFC64A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xFC64A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xFC64A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xFC64AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xFC64B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xFC64B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xFC64B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xFC64BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xFC64C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xFC64C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xFC64C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xFC64CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xFC64D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xFC64D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xFC64D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xFC64DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xFC64E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xFC64E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xFC64E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xFC64EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xFC64F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xFC64F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xFC64F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xFC64FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xFC6500
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xFC6504
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xFC6508
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xFC650C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xFC6510
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xFC6514
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xFC6518
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xFC651C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xFC6520
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xFC6524
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xFC6528
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xFC652C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xFC6530
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xFC6534
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xFC6538
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xFC653C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xFC6540
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xFC6544
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xFC6548
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xFC654C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xFC6550
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xFC6554
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xFC6558
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xFC655C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xFC6560
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xFC6564
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xFC6568
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xFC656C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xFC6570
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xFC6574
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xFC6578
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xFC657C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xFC6580
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xFC6584
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xFC6588
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xFC658C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xFC6590
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xFC6594
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xFC6598
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xFC659C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xFC65A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xFC65A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xFC65A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xFC65AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xFC65B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xFC65B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xFC65B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xFC65BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xFC65C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xFC65C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xFC65C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xFC65CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xFC65D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xFC65D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xFC65D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xFC65DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xFC65E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xFC65E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xFC65E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xFC65EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xFC65F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xFC65F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xFC65F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xFC65FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xFC6600
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xFC6604
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xFC6608
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xFC660C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xFC6610
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xFC6614
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xFC6618
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xFC661C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xFC6620
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xFC6624
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xFC6628
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xFC662C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xFC6630
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xFC6634
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xFC6638
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xFC663C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xFC6640
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xFC6644
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xFC6648
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xFC664C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xFC6650
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xFC6654
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xFC6658
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xFC665C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xFC6660
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xFC6664
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xFC6668
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xFC666C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xFC6670
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xFC6674
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xFC6678
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xFC667C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xFC6680
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xFC6684
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xFC6688
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xFC668C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xFC6690
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xFC6694
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xFC6698
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xFC669C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xFC66A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xFC66A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xFC66A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xFC66AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xFC66B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xFC66B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xFC66B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xFC66BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xFC66C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xFC66C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xFC66C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xFC66CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xFC66D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xFC66D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xFC66D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xFC66DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xFC66E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xFC66E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xFC66E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xFC66EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xFC66F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xFC66F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xFC66F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xFC66FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xFC6700
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xFC6704
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xFC6708
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xFC670C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xFC6710
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xFC6714
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xFC6718
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xFC671C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xFC6720
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xFC6724
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xFC6728
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xFC672C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xFC6730
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xFC6734
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xFC6738
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xFC673C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xFC6740
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xFC6744
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xFC6748
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xFC674C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xFC6750
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xFC6754
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xFC6758
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xFC675C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xFC6760
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xFC6764
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xFC6768
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xFC676C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xFC6770
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xFC6774
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xFC6778
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xFC677C
+
+#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xFC6780
+
+#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_ADDR 0xFC6784
+
+#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xFC6788
+
+#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xFC678C
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_0 0xFC6790
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_0 0xFC6794
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_1 0xFC6798
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_1 0xFC679C
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_2 0xFC67A0
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_2 0xFC67A4
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_3 0xFC67A8
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_3 0xFC67AC
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_4 0xFC67B0
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_4 0xFC67B4
+
+#define mmTPC7_CFG_KERNEL_KERNEL_CONFIG 0xFC67B8
+
+#define mmTPC7_CFG_KERNEL_KERNEL_ID 0xFC67BC
+
+#define mmTPC7_CFG_KERNEL_SRF_0 0xFC67C0
+
+#define mmTPC7_CFG_KERNEL_SRF_1 0xFC67C4
+
+#define mmTPC7_CFG_KERNEL_SRF_2 0xFC67C8
+
+#define mmTPC7_CFG_KERNEL_SRF_3 0xFC67CC
+
+#define mmTPC7_CFG_KERNEL_SRF_4 0xFC67D0
+
+#define mmTPC7_CFG_KERNEL_SRF_5 0xFC67D4
+
+#define mmTPC7_CFG_KERNEL_SRF_6 0xFC67D8
+
+#define mmTPC7_CFG_KERNEL_SRF_7 0xFC67DC
+
+#define mmTPC7_CFG_KERNEL_SRF_8 0xFC67E0
+
+#define mmTPC7_CFG_KERNEL_SRF_9 0xFC67E4
+
+#define mmTPC7_CFG_KERNEL_SRF_10 0xFC67E8
+
+#define mmTPC7_CFG_KERNEL_SRF_11 0xFC67EC
+
+#define mmTPC7_CFG_KERNEL_SRF_12 0xFC67F0
+
+#define mmTPC7_CFG_KERNEL_SRF_13 0xFC67F4
+
+#define mmTPC7_CFG_KERNEL_SRF_14 0xFC67F8
+
+#define mmTPC7_CFG_KERNEL_SRF_15 0xFC67FC
+
+#define mmTPC7_CFG_KERNEL_SRF_16 0xFC6800
+
+#define mmTPC7_CFG_KERNEL_SRF_17 0xFC6804
+
+#define mmTPC7_CFG_KERNEL_SRF_18 0xFC6808
+
+#define mmTPC7_CFG_KERNEL_SRF_19 0xFC680C
+
+#define mmTPC7_CFG_KERNEL_SRF_20 0xFC6810
+
+#define mmTPC7_CFG_KERNEL_SRF_21 0xFC6814
+
+#define mmTPC7_CFG_KERNEL_SRF_22 0xFC6818
+
+#define mmTPC7_CFG_KERNEL_SRF_23 0xFC681C
+
+#define mmTPC7_CFG_KERNEL_SRF_24 0xFC6820
+
+#define mmTPC7_CFG_KERNEL_SRF_25 0xFC6824
+
+#define mmTPC7_CFG_KERNEL_SRF_26 0xFC6828
+
+#define mmTPC7_CFG_KERNEL_SRF_27 0xFC682C
+
+#define mmTPC7_CFG_KERNEL_SRF_28 0xFC6830
+
+#define mmTPC7_CFG_KERNEL_SRF_29 0xFC6834
+
+#define mmTPC7_CFG_KERNEL_SRF_30 0xFC6838
+
+#define mmTPC7_CFG_KERNEL_SRF_31 0xFC683C
+
+#define mmTPC7_CFG_ROUND_CSR 0xFC68FC
+
+#define mmTPC7_CFG_PROT 0xFC6900
+
+#define mmTPC7_CFG_SEMAPHORE 0xFC6908
+
+#define mmTPC7_CFG_VFLAGS 0xFC690C
+
+#define mmTPC7_CFG_SFLAGS 0xFC6910
+
+#define mmTPC7_CFG_LFSR_POLYNOM 0xFC6918
+
+#define mmTPC7_CFG_STATUS 0xFC691C
+
+#define mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH 0xFC6920
+
+#define mmTPC7_CFG_CFG_SUBTRACT_VALUE 0xFC6924
+
+#define mmTPC7_CFG_SM_BASE_ADDRESS_HIGH 0xFC692C
+
+#define mmTPC7_CFG_TPC_CMD 0xFC6930
+
+#define mmTPC7_CFG_TPC_EXECUTE 0xFC6938
+
+#define mmTPC7_CFG_TPC_STALL 0xFC693C
+
+#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_LOW 0xFC6940
+
+#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH 0xFC6944
+
+#define mmTPC7_CFG_RD_RATE_LIMIT 0xFC6948
+
+#define mmTPC7_CFG_WR_RATE_LIMIT 0xFC6950
+
+#define mmTPC7_CFG_MSS_CONFIG 0xFC6954
+
+#define mmTPC7_CFG_TPC_INTR_CAUSE 0xFC6958
+
+#define mmTPC7_CFG_TPC_INTR_MASK 0xFC695C
+
+#define mmTPC7_CFG_WQ_CREDITS 0xFC6960
+
+#define mmTPC7_CFG_ARUSER_LO 0xFC6964
+
+#define mmTPC7_CFG_ARUSER_HI 0xFC6968
+
+#define mmTPC7_CFG_AWUSER_LO 0xFC696C
+
+#define mmTPC7_CFG_AWUSER_HI 0xFC6970
+
+#define mmTPC7_CFG_OPCODE_EXEC 0xFC6974
+
+#define mmTPC7_CFG_LUT_FUNC32_BASE_ADDR_LO 0xFC6978
+
+#define mmTPC7_CFG_LUT_FUNC32_BASE_ADDR_HI 0xFC697C
+
+#define mmTPC7_CFG_LUT_FUNC64_BASE_ADDR_LO 0xFC6980
+
+#define mmTPC7_CFG_LUT_FUNC64_BASE_ADDR_HI 0xFC6984
+
+#define mmTPC7_CFG_LUT_FUNC128_BASE_ADDR_LO 0xFC6988
+
+#define mmTPC7_CFG_LUT_FUNC128_BASE_ADDR_HI 0xFC698C
+
+#define mmTPC7_CFG_LUT_FUNC256_BASE_ADDR_LO 0xFC6990
+
+#define mmTPC7_CFG_LUT_FUNC256_BASE_ADDR_HI 0xFC6994
+
+#define mmTPC7_CFG_TSB_CFG_MAX_SIZE 0xFC6998
+
+#define mmTPC7_CFG_TSB_CFG 0xFC699C
+
+#define mmTPC7_CFG_DBGMEM_ADD 0xFC69A0
+
+#define mmTPC7_CFG_DBGMEM_DATA_WR 0xFC69A4
+
+#define mmTPC7_CFG_DBGMEM_DATA_RD 0xFC69A8
+
+#define mmTPC7_CFG_DBGMEM_CTRL 0xFC69AC
+
+#define mmTPC7_CFG_DBGMEM_RC 0xFC69B0
+
+#define mmTPC7_CFG_TSB_INFLIGHT_CNTR 0xFC69B4
+
+#define mmTPC7_CFG_WQ_INFLIGHT_CNTR 0xFC69B8
+
+#define mmTPC7_CFG_WQ_LBW_TOTAL_CNTR 0xFC69BC
+
+#define mmTPC7_CFG_WQ_HBW_TOTAL_CNTR 0xFC69C0
+
+#define mmTPC7_CFG_IRQ_OCCOUPY_CNTR 0xFC69C4
+
+#define mmTPC7_CFG_FUNC_MBIST_CNTRL 0xFC69D0
+
+#define mmTPC7_CFG_FUNC_MBIST_PAT 0xFC69D4
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_0 0xFC69D8
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_1 0xFC69DC
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_2 0xFC69E0
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_3 0xFC69E4
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_4 0xFC69E8
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_5 0xFC69EC
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_6 0xFC69F0
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_7 0xFC69F4
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_8 0xFC69F8
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC69FC
+
+#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xFC6A00
+
+#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xFC6A04
+
+#define mmTPC7_CFG_QM_TENSOR_0_PADDING_VALUE 0xFC6A08
+
+#define mmTPC7_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xFC6A0C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_SIZE 0xFC6A10
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xFC6A14
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_SIZE 0xFC6A18
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xFC6A1C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_SIZE 0xFC6A20
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xFC6A24
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_SIZE 0xFC6A28
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xFC6A2C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_SIZE 0xFC6A30
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xFC6A34
+
+#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xFC6A38
+
+#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xFC6A3C
+
+#define mmTPC7_CFG_QM_TENSOR_1_PADDING_VALUE 0xFC6A40
+
+#define mmTPC7_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xFC6A44
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_SIZE 0xFC6A48
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xFC6A4C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_SIZE 0xFC6A50
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xFC6A54
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_SIZE 0xFC6A58
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xFC6A5C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_SIZE 0xFC6A60
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xFC6A64
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_SIZE 0xFC6A68
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xFC6A6C
+
+#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xFC6A70
+
+#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xFC6A74
+
+#define mmTPC7_CFG_QM_TENSOR_2_PADDING_VALUE 0xFC6A78
+
+#define mmTPC7_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xFC6A7C
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_SIZE 0xFC6A80
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xFC6A84
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_SIZE 0xFC6A88
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xFC6A8C
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_SIZE 0xFC6A90
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xFC6A94
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_SIZE 0xFC6A98
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xFC6A9C
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_SIZE 0xFC6AA0
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xFC6AA4
+
+#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xFC6AA8
+
+#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xFC6AAC
+
+#define mmTPC7_CFG_QM_TENSOR_3_PADDING_VALUE 0xFC6AB0
+
+#define mmTPC7_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xFC6AB4
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_SIZE 0xFC6AB8
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xFC6ABC
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_SIZE 0xFC6AC0
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xFC6AC4
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_SIZE 0xFC6AC8
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xFC6ACC
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_SIZE 0xFC6AD0
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xFC6AD4
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_SIZE 0xFC6AD8
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xFC6ADC
+
+#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xFC6AE0
+
+#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xFC6AE4
+
+#define mmTPC7_CFG_QM_TENSOR_4_PADDING_VALUE 0xFC6AE8
+
+#define mmTPC7_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xFC6AEC
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_SIZE 0xFC6AF0
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xFC6AF4
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_SIZE 0xFC6AF8
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xFC6AFC
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_SIZE 0xFC6B00
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xFC6B04
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_SIZE 0xFC6B08
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xFC6B0C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_SIZE 0xFC6B10
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xFC6B14
+
+#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xFC6B18
+
+#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xFC6B1C
+
+#define mmTPC7_CFG_QM_TENSOR_5_PADDING_VALUE 0xFC6B20
+
+#define mmTPC7_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xFC6B24
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_SIZE 0xFC6B28
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xFC6B2C
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_SIZE 0xFC6B30
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xFC6B34
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_SIZE 0xFC6B38
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xFC6B3C
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_SIZE 0xFC6B40
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xFC6B44
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_SIZE 0xFC6B48
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xFC6B4C
+
+#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xFC6B50
+
+#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xFC6B54
+
+#define mmTPC7_CFG_QM_TENSOR_6_PADDING_VALUE 0xFC6B58
+
+#define mmTPC7_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xFC6B5C
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_SIZE 0xFC6B60
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xFC6B64
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_SIZE 0xFC6B68
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xFC6B6C
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_SIZE 0xFC6B70
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xFC6B74
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_SIZE 0xFC6B78
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xFC6B7C
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_SIZE 0xFC6B80
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xFC6B84
+
+#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xFC6B88
+
+#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xFC6B8C
+
+#define mmTPC7_CFG_QM_TENSOR_7_PADDING_VALUE 0xFC6B90
+
+#define mmTPC7_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xFC6B94
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_SIZE 0xFC6B98
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xFC6B9C
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_SIZE 0xFC6BA0
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xFC6BA4
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_SIZE 0xFC6BA8
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xFC6BAC
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_SIZE 0xFC6BB0
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xFC6BB4
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_SIZE 0xFC6BB8
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xFC6BBC
+
+#define mmTPC7_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xFC6BC0
+
+#define mmTPC7_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xFC6BC4
+
+#define mmTPC7_CFG_QM_TENSOR_8_PADDING_VALUE 0xFC6BC8
+
+#define mmTPC7_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xFC6BCC
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_0_SIZE 0xFC6BD0
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xFC6BD4
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_1_SIZE 0xFC6BD8
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xFC6BDC
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_2_SIZE 0xFC6BE0
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xFC6BE4
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_3_SIZE 0xFC6BE8
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xFC6BEC
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_4_SIZE 0xFC6BF0
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xFC6BF4
+
+#define mmTPC7_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xFC6BF8
+
+#define mmTPC7_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xFC6BFC
+
+#define mmTPC7_CFG_QM_TENSOR_9_PADDING_VALUE 0xFC6C00
+
+#define mmTPC7_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xFC6C04
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_0_SIZE 0xFC6C08
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xFC6C0C
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_1_SIZE 0xFC6C10
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xFC6C14
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_2_SIZE 0xFC6C18
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xFC6C1C
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_3_SIZE 0xFC6C20
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xFC6C24
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_4_SIZE 0xFC6C28
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xFC6C2C
+
+#define mmTPC7_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xFC6C30
+
+#define mmTPC7_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xFC6C34
+
+#define mmTPC7_CFG_QM_TENSOR_10_PADDING_VALUE 0xFC6C38
+
+#define mmTPC7_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xFC6C3C
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_0_SIZE 0xFC6C40
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xFC6C44
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_1_SIZE 0xFC6C48
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xFC6C4C
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_2_SIZE 0xFC6C50
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xFC6C54
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_3_SIZE 0xFC6C58
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xFC6C5C
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_4_SIZE 0xFC6C60
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xFC6C64
+
+#define mmTPC7_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xFC6C68
+
+#define mmTPC7_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xFC6C6C
+
+#define mmTPC7_CFG_QM_TENSOR_11_PADDING_VALUE 0xFC6C70
+
+#define mmTPC7_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xFC6C74
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_0_SIZE 0xFC6C78
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xFC6C7C
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_1_SIZE 0xFC6C80
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xFC6C84
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_2_SIZE 0xFC6C88
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xFC6C8C
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_3_SIZE 0xFC6C90
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xFC6C94
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_4_SIZE 0xFC6C98
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xFC6C9C
+
+#define mmTPC7_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xFC6CA0
+
+#define mmTPC7_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xFC6CA4
+
+#define mmTPC7_CFG_QM_TENSOR_12_PADDING_VALUE 0xFC6CA8
+
+#define mmTPC7_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xFC6CAC
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_0_SIZE 0xFC6CB0
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xFC6CB4
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_1_SIZE 0xFC6CB8
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xFC6CBC
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_2_SIZE 0xFC6CC0
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xFC6CC4
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_3_SIZE 0xFC6CC8
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xFC6CCC
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_4_SIZE 0xFC6CD0
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xFC6CD4
+
+#define mmTPC7_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xFC6CD8
+
+#define mmTPC7_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xFC6CDC
+
+#define mmTPC7_CFG_QM_TENSOR_13_PADDING_VALUE 0xFC6CE0
+
+#define mmTPC7_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xFC6CE4
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_0_SIZE 0xFC6CE8
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xFC6CEC
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_1_SIZE 0xFC6CF0
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xFC6CF4
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_2_SIZE 0xFC6CF8
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xFC6CFC
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_3_SIZE 0xFC6D00
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xFC6D04
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_4_SIZE 0xFC6D08
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xFC6D0C
+
+#define mmTPC7_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xFC6D10
+
+#define mmTPC7_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xFC6D14
+
+#define mmTPC7_CFG_QM_TENSOR_14_PADDING_VALUE 0xFC6D18
+
+#define mmTPC7_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xFC6D1C
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_0_SIZE 0xFC6D20
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xFC6D24
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_1_SIZE 0xFC6D28
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xFC6D2C
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_2_SIZE 0xFC6D30
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xFC6D34
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_3_SIZE 0xFC6D38
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xFC6D3C
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_4_SIZE 0xFC6D40
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xFC6D44
+
+#define mmTPC7_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xFC6D48
+
+#define mmTPC7_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xFC6D4C
+
+#define mmTPC7_CFG_QM_TENSOR_15_PADDING_VALUE 0xFC6D50
+
+#define mmTPC7_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xFC6D54
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_0_SIZE 0xFC6D58
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xFC6D5C
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_1_SIZE 0xFC6D60
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xFC6D64
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_2_SIZE 0xFC6D68
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xFC6D6C
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_3_SIZE 0xFC6D70
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xFC6D74
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_4_SIZE 0xFC6D78
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xFC6D7C
+
+#define mmTPC7_CFG_QM_SYNC_OBJECT_MESSAGE 0xFC6D80
+
+#define mmTPC7_CFG_QM_SYNC_OBJECT_ADDR 0xFC6D84
+
+#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xFC6D88
+
+#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xFC6D8C
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_0 0xFC6D90
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_0 0xFC6D94
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_1 0xFC6D98
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_1 0xFC6D9C
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_2 0xFC6DA0
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_2 0xFC6DA4
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_3 0xFC6DA8
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_3 0xFC6DAC
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_4 0xFC6DB0
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_4 0xFC6DB4
+
+#define mmTPC7_CFG_QM_KERNEL_CONFIG 0xFC6DB8
+
+#define mmTPC7_CFG_QM_KERNEL_ID 0xFC6DBC
+
+#define mmTPC7_CFG_QM_SRF_0 0xFC6DC0
+
+#define mmTPC7_CFG_QM_SRF_1 0xFC6DC4
+
+#define mmTPC7_CFG_QM_SRF_2 0xFC6DC8
+
+#define mmTPC7_CFG_QM_SRF_3 0xFC6DCC
+
+#define mmTPC7_CFG_QM_SRF_4 0xFC6DD0
+
+#define mmTPC7_CFG_QM_SRF_5 0xFC6DD4
+
+#define mmTPC7_CFG_QM_SRF_6 0xFC6DD8
+
+#define mmTPC7_CFG_QM_SRF_7 0xFC6DDC
+
+#define mmTPC7_CFG_QM_SRF_8 0xFC6DE0
+
+#define mmTPC7_CFG_QM_SRF_9 0xFC6DE4
+
+#define mmTPC7_CFG_QM_SRF_10 0xFC6DE8
+
+#define mmTPC7_CFG_QM_SRF_11 0xFC6DEC
+
+#define mmTPC7_CFG_QM_SRF_12 0xFC6DF0
+
+#define mmTPC7_CFG_QM_SRF_13 0xFC6DF4
+
+#define mmTPC7_CFG_QM_SRF_14 0xFC6DF8
+
+#define mmTPC7_CFG_QM_SRF_15 0xFC6DFC
+
+#define mmTPC7_CFG_QM_SRF_16 0xFC6E00
+
+#define mmTPC7_CFG_QM_SRF_17 0xFC6E04
+
+#define mmTPC7_CFG_QM_SRF_18 0xFC6E08
+
+#define mmTPC7_CFG_QM_SRF_19 0xFC6E0C
+
+#define mmTPC7_CFG_QM_SRF_20 0xFC6E10
+
+#define mmTPC7_CFG_QM_SRF_21 0xFC6E14
+
+#define mmTPC7_CFG_QM_SRF_22 0xFC6E18
+
+#define mmTPC7_CFG_QM_SRF_23 0xFC6E1C
+
+#define mmTPC7_CFG_QM_SRF_24 0xFC6E20
+
+#define mmTPC7_CFG_QM_SRF_25 0xFC6E24
+
+#define mmTPC7_CFG_QM_SRF_26 0xFC6E28
+
+#define mmTPC7_CFG_QM_SRF_27 0xFC6E2C
+
+#define mmTPC7_CFG_QM_SRF_28 0xFC6E30
+
+#define mmTPC7_CFG_QM_SRF_29 0xFC6E34
+
+#define mmTPC7_CFG_QM_SRF_30 0xFC6E38
+
+#define mmTPC7_CFG_QM_SRF_31 0xFC6E3C
+
+#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
new file mode 100644
index 000000000000..5c36c972c027
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_QM_REGS_H_
+#define ASIC_REG_TPC7_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC7_QM_GLBL_CFG0 0xFC8000
+
+#define mmTPC7_QM_GLBL_CFG1 0xFC8004
+
+#define mmTPC7_QM_GLBL_PROT 0xFC8008
+
+#define mmTPC7_QM_GLBL_ERR_CFG 0xFC800C
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_0 0xFC8010
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_1 0xFC8014
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_2 0xFC8018
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_3 0xFC801C
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_4 0xFC8020
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_0 0xFC8024
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_1 0xFC8028
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_2 0xFC802C
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_3 0xFC8030
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_4 0xFC8034
+
+#define mmTPC7_QM_GLBL_STS0 0xFC8038
+
+#define mmTPC7_QM_GLBL_STS1_0 0xFC8040
+
+#define mmTPC7_QM_GLBL_STS1_1 0xFC8044
+
+#define mmTPC7_QM_GLBL_STS1_2 0xFC8048
+
+#define mmTPC7_QM_GLBL_STS1_3 0xFC804C
+
+#define mmTPC7_QM_GLBL_STS1_4 0xFC8050
+
+#define mmTPC7_QM_GLBL_MSG_EN_0 0xFC8054
+
+#define mmTPC7_QM_GLBL_MSG_EN_1 0xFC8058
+
+#define mmTPC7_QM_GLBL_MSG_EN_2 0xFC805C
+
+#define mmTPC7_QM_GLBL_MSG_EN_3 0xFC8060
+
+#define mmTPC7_QM_GLBL_MSG_EN_4 0xFC8068
+
+#define mmTPC7_QM_PQ_BASE_LO_0 0xFC8070
+
+#define mmTPC7_QM_PQ_BASE_LO_1 0xFC8074
+
+#define mmTPC7_QM_PQ_BASE_LO_2 0xFC8078
+
+#define mmTPC7_QM_PQ_BASE_LO_3 0xFC807C
+
+#define mmTPC7_QM_PQ_BASE_HI_0 0xFC8080
+
+#define mmTPC7_QM_PQ_BASE_HI_1 0xFC8084
+
+#define mmTPC7_QM_PQ_BASE_HI_2 0xFC8088
+
+#define mmTPC7_QM_PQ_BASE_HI_3 0xFC808C
+
+#define mmTPC7_QM_PQ_SIZE_0 0xFC8090
+
+#define mmTPC7_QM_PQ_SIZE_1 0xFC8094
+
+#define mmTPC7_QM_PQ_SIZE_2 0xFC8098
+
+#define mmTPC7_QM_PQ_SIZE_3 0xFC809C
+
+#define mmTPC7_QM_PQ_PI_0 0xFC80A0
+
+#define mmTPC7_QM_PQ_PI_1 0xFC80A4
+
+#define mmTPC7_QM_PQ_PI_2 0xFC80A8
+
+#define mmTPC7_QM_PQ_PI_3 0xFC80AC
+
+#define mmTPC7_QM_PQ_CI_0 0xFC80B0
+
+#define mmTPC7_QM_PQ_CI_1 0xFC80B4
+
+#define mmTPC7_QM_PQ_CI_2 0xFC80B8
+
+#define mmTPC7_QM_PQ_CI_3 0xFC80BC
+
+#define mmTPC7_QM_PQ_CFG0_0 0xFC80C0
+
+#define mmTPC7_QM_PQ_CFG0_1 0xFC80C4
+
+#define mmTPC7_QM_PQ_CFG0_2 0xFC80C8
+
+#define mmTPC7_QM_PQ_CFG0_3 0xFC80CC
+
+#define mmTPC7_QM_PQ_CFG1_0 0xFC80D0
+
+#define mmTPC7_QM_PQ_CFG1_1 0xFC80D4
+
+#define mmTPC7_QM_PQ_CFG1_2 0xFC80D8
+
+#define mmTPC7_QM_PQ_CFG1_3 0xFC80DC
+
+#define mmTPC7_QM_PQ_ARUSER_31_11_0 0xFC80E0
+
+#define mmTPC7_QM_PQ_ARUSER_31_11_1 0xFC80E4
+
+#define mmTPC7_QM_PQ_ARUSER_31_11_2 0xFC80E8
+
+#define mmTPC7_QM_PQ_ARUSER_31_11_3 0xFC80EC
+
+#define mmTPC7_QM_PQ_STS0_0 0xFC80F0
+
+#define mmTPC7_QM_PQ_STS0_1 0xFC80F4
+
+#define mmTPC7_QM_PQ_STS0_2 0xFC80F8
+
+#define mmTPC7_QM_PQ_STS0_3 0xFC80FC
+
+#define mmTPC7_QM_PQ_STS1_0 0xFC8100
+
+#define mmTPC7_QM_PQ_STS1_1 0xFC8104
+
+#define mmTPC7_QM_PQ_STS1_2 0xFC8108
+
+#define mmTPC7_QM_PQ_STS1_3 0xFC810C
+
+#define mmTPC7_QM_CQ_CFG0_0 0xFC8110
+
+#define mmTPC7_QM_CQ_CFG0_1 0xFC8114
+
+#define mmTPC7_QM_CQ_CFG0_2 0xFC8118
+
+#define mmTPC7_QM_CQ_CFG0_3 0xFC811C
+
+#define mmTPC7_QM_CQ_CFG0_4 0xFC8120
+
+#define mmTPC7_QM_CQ_CFG1_0 0xFC8124
+
+#define mmTPC7_QM_CQ_CFG1_1 0xFC8128
+
+#define mmTPC7_QM_CQ_CFG1_2 0xFC812C
+
+#define mmTPC7_QM_CQ_CFG1_3 0xFC8130
+
+#define mmTPC7_QM_CQ_CFG1_4 0xFC8134
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_0 0xFC8138
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_1 0xFC813C
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_2 0xFC8140
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_3 0xFC8144
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_4 0xFC8148
+
+#define mmTPC7_QM_CQ_STS0_0 0xFC814C
+
+#define mmTPC7_QM_CQ_STS0_1 0xFC8150
+
+#define mmTPC7_QM_CQ_STS0_2 0xFC8154
+
+#define mmTPC7_QM_CQ_STS0_3 0xFC8158
+
+#define mmTPC7_QM_CQ_STS0_4 0xFC815C
+
+#define mmTPC7_QM_CQ_STS1_0 0xFC8160
+
+#define mmTPC7_QM_CQ_STS1_1 0xFC8164
+
+#define mmTPC7_QM_CQ_STS1_2 0xFC8168
+
+#define mmTPC7_QM_CQ_STS1_3 0xFC816C
+
+#define mmTPC7_QM_CQ_STS1_4 0xFC8170
+
+#define mmTPC7_QM_CQ_PTR_LO_0 0xFC8174
+
+#define mmTPC7_QM_CQ_PTR_HI_0 0xFC8178
+
+#define mmTPC7_QM_CQ_TSIZE_0 0xFC817C
+
+#define mmTPC7_QM_CQ_CTL_0 0xFC8180
+
+#define mmTPC7_QM_CQ_PTR_LO_1 0xFC8184
+
+#define mmTPC7_QM_CQ_PTR_HI_1 0xFC8188
+
+#define mmTPC7_QM_CQ_TSIZE_1 0xFC818C
+
+#define mmTPC7_QM_CQ_CTL_1 0xFC8190
+
+#define mmTPC7_QM_CQ_PTR_LO_2 0xFC8194
+
+#define mmTPC7_QM_CQ_PTR_HI_2 0xFC8198
+
+#define mmTPC7_QM_CQ_TSIZE_2 0xFC819C
+
+#define mmTPC7_QM_CQ_CTL_2 0xFC81A0
+
+#define mmTPC7_QM_CQ_PTR_LO_3 0xFC81A4
+
+#define mmTPC7_QM_CQ_PTR_HI_3 0xFC81A8
+
+#define mmTPC7_QM_CQ_TSIZE_3 0xFC81AC
+
+#define mmTPC7_QM_CQ_CTL_3 0xFC81B0
+
+#define mmTPC7_QM_CQ_PTR_LO_4 0xFC81B4
+
+#define mmTPC7_QM_CQ_PTR_HI_4 0xFC81B8
+
+#define mmTPC7_QM_CQ_TSIZE_4 0xFC81BC
+
+#define mmTPC7_QM_CQ_CTL_4 0xFC81C0
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_0 0xFC81C4
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_1 0xFC81C8
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_2 0xFC81CC
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_3 0xFC81D0
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_4 0xFC81D4
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_0 0xFC81D8
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_1 0xFC81DC
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_2 0xFC81E0
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_3 0xFC81E4
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_4 0xFC81E8
+
+#define mmTPC7_QM_CQ_TSIZE_STS_0 0xFC81EC
+
+#define mmTPC7_QM_CQ_TSIZE_STS_1 0xFC81F0
+
+#define mmTPC7_QM_CQ_TSIZE_STS_2 0xFC81F4
+
+#define mmTPC7_QM_CQ_TSIZE_STS_3 0xFC81F8
+
+#define mmTPC7_QM_CQ_TSIZE_STS_4 0xFC81FC
+
+#define mmTPC7_QM_CQ_CTL_STS_0 0xFC8200
+
+#define mmTPC7_QM_CQ_CTL_STS_1 0xFC8204
+
+#define mmTPC7_QM_CQ_CTL_STS_2 0xFC8208
+
+#define mmTPC7_QM_CQ_CTL_STS_3 0xFC820C
+
+#define mmTPC7_QM_CQ_CTL_STS_4 0xFC8210
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_0 0xFC8214
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_1 0xFC8218
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_2 0xFC821C
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_3 0xFC8220
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_4 0xFC8224
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_0 0xFC8228
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_1 0xFC822C
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_2 0xFC8230
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_3 0xFC8234
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_4 0xFC8238
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_0 0xFC823C
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_1 0xFC8240
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_2 0xFC8244
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_3 0xFC8248
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_4 0xFC824C
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_0 0xFC8250
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_1 0xFC8254
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_2 0xFC8258
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_3 0xFC825C
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_4 0xFC8260
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_0 0xFC8264
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_1 0xFC8268
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_2 0xFC826C
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_3 0xFC8270
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_4 0xFC8274
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_0 0xFC8278
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_1 0xFC827C
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 0xFC8280
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_3 0xFC8284
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_4 0xFC8288
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_0 0xFC828C
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_1 0xFC8290
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_2 0xFC8294
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_3 0xFC8298
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_4 0xFC829C
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_0 0xFC82A0
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_1 0xFC82A4
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_2 0xFC82A8
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_3 0xFC82AC
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_4 0xFC82B0
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_0 0xFC82B4
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_1 0xFC82B8
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_2 0xFC82BC
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_3 0xFC82C0
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_4 0xFC82C4
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_0 0xFC82C8
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_1 0xFC82CC
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_2 0xFC82D0
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_3 0xFC82D4
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_4 0xFC82D8
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xFC82E0
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xFC82E4
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xFC82E8
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xFC82EC
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xFC82F0
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xFC82F4
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xFC82F8
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xFC82FC
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xFC8300
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xFC8304
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_0 0xFC8308
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_1 0xFC830C
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_2 0xFC8310
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_3 0xFC8314
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_4 0xFC8318
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_0 0xFC831C
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_1 0xFC8320
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_2 0xFC8324
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_3 0xFC8328
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_4 0xFC832C
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_0 0xFC8330
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_1 0xFC8334
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_2 0xFC8338
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_3 0xFC833C
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_4 0xFC8340
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_0 0xFC8344
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_1 0xFC8348
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_2 0xFC834C
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_3 0xFC8350
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_4 0xFC8354
+
+#define mmTPC7_QM_CP_FENCE0_CNT_0 0xFC8358
+
+#define mmTPC7_QM_CP_FENCE0_CNT_1 0xFC835C
+
+#define mmTPC7_QM_CP_FENCE0_CNT_2 0xFC8360
+
+#define mmTPC7_QM_CP_FENCE0_CNT_3 0xFC8364
+
+#define mmTPC7_QM_CP_FENCE0_CNT_4 0xFC8368
+
+#define mmTPC7_QM_CP_FENCE1_CNT_0 0xFC836C
+
+#define mmTPC7_QM_CP_FENCE1_CNT_1 0xFC8370
+
+#define mmTPC7_QM_CP_FENCE1_CNT_2 0xFC8374
+
+#define mmTPC7_QM_CP_FENCE1_CNT_3 0xFC8378
+
+#define mmTPC7_QM_CP_FENCE1_CNT_4 0xFC837C
+
+#define mmTPC7_QM_CP_FENCE2_CNT_0 0xFC8380
+
+#define mmTPC7_QM_CP_FENCE2_CNT_1 0xFC8384
+
+#define mmTPC7_QM_CP_FENCE2_CNT_2 0xFC8388
+
+#define mmTPC7_QM_CP_FENCE2_CNT_3 0xFC838C
+
+#define mmTPC7_QM_CP_FENCE2_CNT_4 0xFC8390
+
+#define mmTPC7_QM_CP_FENCE3_CNT_0 0xFC8394
+
+#define mmTPC7_QM_CP_FENCE3_CNT_1 0xFC8398
+
+#define mmTPC7_QM_CP_FENCE3_CNT_2 0xFC839C
+
+#define mmTPC7_QM_CP_FENCE3_CNT_3 0xFC83A0
+
+#define mmTPC7_QM_CP_FENCE3_CNT_4 0xFC83A4
+
+#define mmTPC7_QM_CP_STS_0 0xFC83A8
+
+#define mmTPC7_QM_CP_STS_1 0xFC83AC
+
+#define mmTPC7_QM_CP_STS_2 0xFC83B0
+
+#define mmTPC7_QM_CP_STS_3 0xFC83B4
+
+#define mmTPC7_QM_CP_STS_4 0xFC83B8
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_0 0xFC83BC
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_1 0xFC83C0
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_2 0xFC83C4
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_3 0xFC83C8
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_4 0xFC83CC
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_0 0xFC83D0
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_1 0xFC83D4
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_2 0xFC83D8
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_3 0xFC83DC
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_4 0xFC83E0
+
+#define mmTPC7_QM_CP_BARRIER_CFG_0 0xFC83F4
+
+#define mmTPC7_QM_CP_BARRIER_CFG_1 0xFC83F8
+
+#define mmTPC7_QM_CP_BARRIER_CFG_2 0xFC83FC
+
+#define mmTPC7_QM_CP_BARRIER_CFG_3 0xFC8400
+
+#define mmTPC7_QM_CP_BARRIER_CFG_4 0xFC8404
+
+#define mmTPC7_QM_CP_DBG_0_0 0xFC8408
+
+#define mmTPC7_QM_CP_DBG_0_1 0xFC840C
+
+#define mmTPC7_QM_CP_DBG_0_2 0xFC8410
+
+#define mmTPC7_QM_CP_DBG_0_3 0xFC8414
+
+#define mmTPC7_QM_CP_DBG_0_4 0xFC8418
+
+#define mmTPC7_QM_CP_ARUSER_31_11_0 0xFC841C
+
+#define mmTPC7_QM_CP_ARUSER_31_11_1 0xFC8420
+
+#define mmTPC7_QM_CP_ARUSER_31_11_2 0xFC8424
+
+#define mmTPC7_QM_CP_ARUSER_31_11_3 0xFC8428
+
+#define mmTPC7_QM_CP_ARUSER_31_11_4 0xFC842C
+
+#define mmTPC7_QM_CP_AWUSER_31_11_0 0xFC8430
+
+#define mmTPC7_QM_CP_AWUSER_31_11_1 0xFC8434
+
+#define mmTPC7_QM_CP_AWUSER_31_11_2 0xFC8438
+
+#define mmTPC7_QM_CP_AWUSER_31_11_3 0xFC843C
+
+#define mmTPC7_QM_CP_AWUSER_31_11_4 0xFC8440
+
+#define mmTPC7_QM_ARB_CFG_0 0xFC8A00
+
+#define mmTPC7_QM_ARB_CHOISE_Q_PUSH 0xFC8A04
+
+#define mmTPC7_QM_ARB_WRR_WEIGHT_0 0xFC8A08
+
+#define mmTPC7_QM_ARB_WRR_WEIGHT_1 0xFC8A0C
+
+#define mmTPC7_QM_ARB_WRR_WEIGHT_2 0xFC8A10
+
+#define mmTPC7_QM_ARB_WRR_WEIGHT_3 0xFC8A14
+
+#define mmTPC7_QM_ARB_CFG_1 0xFC8A18
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_0 0xFC8A20
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_1 0xFC8A24
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_2 0xFC8A28
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_3 0xFC8A2C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_4 0xFC8A30
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_5 0xFC8A34
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_6 0xFC8A38
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_7 0xFC8A3C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_8 0xFC8A40
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_9 0xFC8A44
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_10 0xFC8A48
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_11 0xFC8A4C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_12 0xFC8A50
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_13 0xFC8A54
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_14 0xFC8A58
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_15 0xFC8A5C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_16 0xFC8A60
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_17 0xFC8A64
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_18 0xFC8A68
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_19 0xFC8A6C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_20 0xFC8A70
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_21 0xFC8A74
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_22 0xFC8A78
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_23 0xFC8A7C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_24 0xFC8A80
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_25 0xFC8A84
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_26 0xFC8A88
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_27 0xFC8A8C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_28 0xFC8A90
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_29 0xFC8A94
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_30 0xFC8A98
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_31 0xFC8A9C
+
+#define mmTPC7_QM_ARB_MST_CRED_INC 0xFC8AA0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xFC8AA4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xFC8AA8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xFC8AAC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xFC8AB0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xFC8AB4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xFC8AB8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xFC8ABC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xFC8AC0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xFC8AC4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xFC8AC8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xFC8ACC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xFC8AD0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xFC8AD4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xFC8AD8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xFC8ADC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xFC8AE0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xFC8AE4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xFC8AE8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xFC8AEC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xFC8AF0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xFC8AF4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xFC8AF8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xFC8AFC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xFC8B00
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xFC8B04
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xFC8B08
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xFC8B0C
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xFC8B10
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xFC8B14
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xFC8B18
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xFC8B1C
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xFC8B20
+
+#define mmTPC7_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xFC8B28
+
+#define mmTPC7_QM_ARB_MST_SLAVE_EN 0xFC8B2C
+
+#define mmTPC7_QM_ARB_MST_QUIET_PER 0xFC8B34
+
+#define mmTPC7_QM_ARB_SLV_CHOISE_WDT 0xFC8B38
+
+#define mmTPC7_QM_ARB_SLV_ID 0xFC8B3C
+
+#define mmTPC7_QM_ARB_MSG_MAX_INFLIGHT 0xFC8B44
+
+#define mmTPC7_QM_ARB_MSG_AWUSER_31_11 0xFC8B48
+
+#define mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP 0xFC8B4C
+
+#define mmTPC7_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xFC8B50
+
+#define mmTPC7_QM_ARB_BASE_LO 0xFC8B54
+
+#define mmTPC7_QM_ARB_BASE_HI 0xFC8B58
+
+#define mmTPC7_QM_ARB_STATE_STS 0xFC8B80
+
+#define mmTPC7_QM_ARB_CHOISE_FULLNESS_STS 0xFC8B84
+
+#define mmTPC7_QM_ARB_MSG_STS 0xFC8B88
+
+#define mmTPC7_QM_ARB_SLV_CHOISE_Q_HEAD 0xFC8B8C
+
+#define mmTPC7_QM_ARB_ERR_CAUSE 0xFC8B9C
+
+#define mmTPC7_QM_ARB_ERR_MSG_EN 0xFC8BA0
+
+#define mmTPC7_QM_ARB_ERR_STS_DRP 0xFC8BA8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_0 0xFC8BB0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_1 0xFC8BB4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_2 0xFC8BB8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_3 0xFC8BBC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_4 0xFC8BC0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_5 0xFC8BC4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_6 0xFC8BC8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_7 0xFC8BCC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_8 0xFC8BD0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_9 0xFC8BD4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_10 0xFC8BD8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_11 0xFC8BDC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_12 0xFC8BE0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_13 0xFC8BE4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_14 0xFC8BE8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_15 0xFC8BEC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_16 0xFC8BF0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_17 0xFC8BF4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_18 0xFC8BF8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_19 0xFC8BFC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_20 0xFC8C00
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_21 0xFC8C04
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_22 0xFC8C08
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_23 0xFC8C0C
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_24 0xFC8C10
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_25 0xFC8C14
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_26 0xFC8C18
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_27 0xFC8C1C
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_28 0xFC8C20
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_29 0xFC8C24
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_30 0xFC8C28
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_31 0xFC8C2C
+
+#define mmTPC7_QM_CGM_CFG 0xFC8C70
+
+#define mmTPC7_QM_CGM_STS 0xFC8C74
+
+#define mmTPC7_QM_CGM_CFG1 0xFC8C78
+
+#define mmTPC7_QM_LOCAL_RANGE_BASE 0xFC8C80
+
+#define mmTPC7_QM_LOCAL_RANGE_SIZE 0xFC8C84
+
+#define mmTPC7_QM_CSMR_STRICT_PRIO_CFG 0xFC8C90
+
+#define mmTPC7_QM_HBW_RD_RATE_LIM_CFG_1 0xFC8C94
+
+#define mmTPC7_QM_LBW_WR_RATE_LIM_CFG_0 0xFC8C98
+
+#define mmTPC7_QM_LBW_WR_RATE_LIM_CFG_1 0xFC8C9C
+
+#define mmTPC7_QM_HBW_RD_RATE_LIM_CFG_0 0xFC8CA0
+
+#define mmTPC7_QM_GLBL_AXCACHE 0xFC8CA4
+
+#define mmTPC7_QM_IND_GW_APB_CFG 0xFC8CB0
+
+#define mmTPC7_QM_IND_GW_APB_WDATA 0xFC8CB4
+
+#define mmTPC7_QM_IND_GW_APB_RDATA 0xFC8CB8
+
+#define mmTPC7_QM_IND_GW_APB_STATUS 0xFC8CBC
+
+#define mmTPC7_QM_GLBL_ERR_ADDR_LO 0xFC8CD0
+
+#define mmTPC7_QM_GLBL_ERR_ADDR_HI 0xFC8CD4
+
+#define mmTPC7_QM_GLBL_ERR_WDATA 0xFC8CD8
+
+#define mmTPC7_QM_GLBL_MEM_INIT_BUSY 0xFC8D00
+
+#endif /* ASIC_REG_TPC7_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi.h b/drivers/misc/habanalabs/include/gaudi/gaudi.h
new file mode 100644
index 000000000000..8829891d3eef
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_H
+#define GAUDI_H
+
+#define SRAM_BAR_ID 0
+#define CFG_BAR_ID 2
+#define HBM_BAR_ID 4
+
+#define SRAM_BAR_SIZE 0x4000000ull /* 64MB */
+#define CFG_BAR_SIZE 0x8000000ull /* 128MB */
+
+#define CFG_BASE 0x7FFC000000ull
+#define CFG_SIZE 0x4000000 /* 32MB CFG + 32MB DBG*/
+
+#define SRAM_BASE_ADDR 0x7FF0000000ull
+#define SRAM_SIZE 0x1400000 /* 20MB */
+
+#define SPI_FLASH_BASE_ADDR 0x7FF8000000ull
+
+#define PSOC_SCRATCHPAD_ADDR 0x7FFBFE0000ull
+#define PSOC_SCRATCHPAD_SIZE 0x10000 /* 64KB */
+
+#define PCIE_FW_SRAM_ADDR 0x7FFBFF0000ull
+#define PCIE_FW_SRAM_SIZE 0x8000 /* 32KB */
+
+#define DRAM_PHYS_BASE 0x0ull
+
+#define HOST_PHYS_BASE 0x8000000000ull /* 0.5TB */
+#define HOST_PHYS_SIZE 0x1000000000000ull /* 0.25PB (48 bits) */
+
+#define GAUDI_MSI_ENTRIES 32
+
+#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
+
+#define MAX_ASID 1024
+
+#define PROT_BITS_OFFS 0xF80
+
+#define MME_NUMBER_OF_MASTER_ENGINES 2
+
+#define TPC_NUMBER_OF_ENGINES 8
+
+#define DMA_NUMBER_OF_CHANNELS 8
+
+#define NIC_NUMBER_OF_MACROS 5
+
+#define NIC_NUMBER_OF_ENGINES (NIC_NUMBER_OF_MACROS * 2)
+
+#define NUMBER_OF_IF 8
+
+#define DEVICE_CACHE_LINE_SIZE 128
+
+#endif /* GAUDI_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
new file mode 100644
index 000000000000..9ccba8437ec9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef __GAUDI_ASYNC_EVENTS_H_
+#define __GAUDI_ASYNC_EVENTS_H_
+
+enum gaudi_async_event_id {
+ GAUDI_EVENT_PCIE_CORE_SERR = 32,
+ GAUDI_EVENT_PCIE_CORE_DERR = 33,
+ GAUDI_EVENT_PCIE_IF_SERR = 34,
+ GAUDI_EVENT_PCIE_IF_DERR = 35,
+ GAUDI_EVENT_PCIE_PHY_SERR = 36,
+ GAUDI_EVENT_PCIE_PHY_DERR = 37,
+ GAUDI_EVENT_TPC0_SERR = 38,
+ GAUDI_EVENT_TPC1_SERR = 39,
+ GAUDI_EVENT_TPC2_SERR = 40,
+ GAUDI_EVENT_TPC3_SERR = 41,
+ GAUDI_EVENT_TPC4_SERR = 42,
+ GAUDI_EVENT_TPC5_SERR = 43,
+ GAUDI_EVENT_TPC6_SERR = 44,
+ GAUDI_EVENT_TPC7_SERR = 45,
+ GAUDI_EVENT_TPC0_DERR = 46,
+ GAUDI_EVENT_TPC1_DERR = 47,
+ GAUDI_EVENT_TPC2_DERR = 48,
+ GAUDI_EVENT_TPC3_DERR = 49,
+ GAUDI_EVENT_TPC4_DERR = 50,
+ GAUDI_EVENT_TPC5_DERR = 51,
+ GAUDI_EVENT_TPC6_DERR = 52,
+ GAUDI_EVENT_TPC7_DERR = 53,
+ GAUDI_EVENT_MME0_ACC_SERR = 54,
+ GAUDI_EVENT_MME0_ACC_DERR = 55,
+ GAUDI_EVENT_MME0_SBAB_SERR = 56,
+ GAUDI_EVENT_MME0_SBAB_DERR = 57,
+ GAUDI_EVENT_MME1_ACC_SERR = 58,
+ GAUDI_EVENT_MME1_ACC_DERR = 59,
+ GAUDI_EVENT_MME1_SBAB_SERR = 60,
+ GAUDI_EVENT_MME1_SBAB_DERR = 61,
+ GAUDI_EVENT_MME2_ACC_SERR = 62,
+ GAUDI_EVENT_MME2_ACC_DERR = 63,
+ GAUDI_EVENT_MME2_SBAB_SERR = 64,
+ GAUDI_EVENT_MME2_SBAB_DERR = 65,
+ GAUDI_EVENT_MME3_ACC_SERR = 66,
+ GAUDI_EVENT_MME3_ACC_DERR = 67,
+ GAUDI_EVENT_MME3_SBAB_SERR = 68,
+ GAUDI_EVENT_MME3_SBAB_DERR = 69,
+ GAUDI_EVENT_DMA0_SERR_ECC = 70,
+ GAUDI_EVENT_DMA1_SERR_ECC = 71,
+ GAUDI_EVENT_DMA2_SERR_ECC = 72,
+ GAUDI_EVENT_DMA3_SERR_ECC = 73,
+ GAUDI_EVENT_DMA4_SERR_ECC = 74,
+ GAUDI_EVENT_DMA5_SERR_ECC = 75,
+ GAUDI_EVENT_DMA6_SERR_ECC = 76,
+ GAUDI_EVENT_DMA7_SERR_ECC = 77,
+ GAUDI_EVENT_DMA0_DERR_ECC = 78,
+ GAUDI_EVENT_DMA1_DERR_ECC = 79,
+ GAUDI_EVENT_DMA2_DERR_ECC = 80,
+ GAUDI_EVENT_DMA3_DERR_ECC = 81,
+ GAUDI_EVENT_DMA4_DERR_ECC = 82,
+ GAUDI_EVENT_DMA5_DERR_ECC = 83,
+ GAUDI_EVENT_DMA6_DERR_ECC = 84,
+ GAUDI_EVENT_DMA7_DERR_ECC = 85,
+ GAUDI_EVENT_CPU_IF_ECC_SERR = 86,
+ GAUDI_EVENT_CPU_IF_ECC_DERR = 87,
+ GAUDI_EVENT_PSOC_MEM_SERR = 88,
+ GAUDI_EVENT_PSOC_CORESIGHT_SERR = 89,
+ GAUDI_EVENT_PSOC_MEM_DERR = 90,
+ GAUDI_EVENT_PSOC_CORESIGHT_DERR = 91,
+ GAUDI_EVENT_SRAM0_SERR = 92,
+ GAUDI_EVENT_SRAM1_SERR = 93,
+ GAUDI_EVENT_SRAM2_SERR = 94,
+ GAUDI_EVENT_SRAM3_SERR = 95,
+ GAUDI_EVENT_SRAM7_SERR = 96,
+ GAUDI_EVENT_SRAM6_SERR = 97,
+ GAUDI_EVENT_SRAM5_SERR = 98,
+ GAUDI_EVENT_SRAM4_SERR = 99,
+ GAUDI_EVENT_SRAM8_SERR = 100,
+ GAUDI_EVENT_SRAM9_SERR = 101,
+ GAUDI_EVENT_SRAM10_SERR = 102,
+ GAUDI_EVENT_SRAM11_SERR = 103,
+ GAUDI_EVENT_SRAM15_SERR = 104,
+ GAUDI_EVENT_SRAM14_SERR = 105,
+ GAUDI_EVENT_SRAM13_SERR = 106,
+ GAUDI_EVENT_SRAM12_SERR = 107,
+ GAUDI_EVENT_SRAM16_SERR = 108,
+ GAUDI_EVENT_SRAM17_SERR = 109,
+ GAUDI_EVENT_SRAM18_SERR = 110,
+ GAUDI_EVENT_SRAM19_SERR = 111,
+ GAUDI_EVENT_SRAM23_SERR = 112,
+ GAUDI_EVENT_SRAM22_SERR = 113,
+ GAUDI_EVENT_SRAM21_SERR = 114,
+ GAUDI_EVENT_SRAM20_SERR = 115,
+ GAUDI_EVENT_SRAM24_SERR = 116,
+ GAUDI_EVENT_SRAM25_SERR = 117,
+ GAUDI_EVENT_SRAM26_SERR = 118,
+ GAUDI_EVENT_SRAM27_SERR = 119,
+ GAUDI_EVENT_SRAM31_SERR = 120,
+ GAUDI_EVENT_SRAM30_SERR = 121,
+ GAUDI_EVENT_SRAM29_SERR = 122,
+ GAUDI_EVENT_SRAM28_SERR = 123,
+ GAUDI_EVENT_SRAM0_DERR = 124,
+ GAUDI_EVENT_SRAM1_DERR = 125,
+ GAUDI_EVENT_SRAM2_DERR = 126,
+ GAUDI_EVENT_SRAM3_DERR = 127,
+ GAUDI_EVENT_SRAM7_DERR = 128,
+ GAUDI_EVENT_SRAM6_DERR = 129,
+ GAUDI_EVENT_SRAM5_DERR = 130,
+ GAUDI_EVENT_SRAM4_DERR = 131,
+ GAUDI_EVENT_SRAM8_DERR = 132,
+ GAUDI_EVENT_SRAM9_DERR = 133,
+ GAUDI_EVENT_SRAM10_DERR = 134,
+ GAUDI_EVENT_SRAM11_DERR = 135,
+ GAUDI_EVENT_SRAM15_DERR = 136,
+ GAUDI_EVENT_SRAM14_DERR = 137,
+ GAUDI_EVENT_SRAM13_DERR = 138,
+ GAUDI_EVENT_SRAM12_DERR = 139,
+ GAUDI_EVENT_SRAM16_DERR = 140,
+ GAUDI_EVENT_SRAM17_DERR = 141,
+ GAUDI_EVENT_SRAM18_DERR = 142,
+ GAUDI_EVENT_SRAM19_DERR = 143,
+ GAUDI_EVENT_SRAM23_DERR = 144,
+ GAUDI_EVENT_SRAM22_DERR = 145,
+ GAUDI_EVENT_SRAM21_DERR = 146,
+ GAUDI_EVENT_SRAM20_DERR = 147,
+ GAUDI_EVENT_SRAM24_DERR = 148,
+ GAUDI_EVENT_SRAM25_DERR = 149,
+ GAUDI_EVENT_SRAM26_DERR = 150,
+ GAUDI_EVENT_SRAM27_DERR = 151,
+ GAUDI_EVENT_SRAM31_DERR = 152,
+ GAUDI_EVENT_SRAM30_DERR = 153,
+ GAUDI_EVENT_SRAM29_DERR = 154,
+ GAUDI_EVENT_SRAM28_DERR = 155,
+ GAUDI_EVENT_NIC0_SERR = 156,
+ GAUDI_EVENT_NIC1_SERR = 157,
+ GAUDI_EVENT_NIC2_SERR = 158,
+ GAUDI_EVENT_NIC3_SERR = 159,
+ GAUDI_EVENT_NIC4_SERR = 160,
+ GAUDI_EVENT_NIC0_DERR = 166,
+ GAUDI_EVENT_NIC1_DERR = 167,
+ GAUDI_EVENT_NIC2_DERR = 168,
+ GAUDI_EVENT_NIC3_DERR = 169,
+ GAUDI_EVENT_NIC4_DERR = 170,
+ GAUDI_EVENT_DMA_IF0_SERR = 176,
+ GAUDI_EVENT_DMA_IF1_SERR = 177,
+ GAUDI_EVENT_DMA_IF2_SERR = 178,
+ GAUDI_EVENT_DMA_IF3_SERR = 179,
+ GAUDI_EVENT_DMA_IF0_DERR = 180,
+ GAUDI_EVENT_DMA_IF1_DERR = 181,
+ GAUDI_EVENT_DMA_IF2_DERR = 182,
+ GAUDI_EVENT_DMA_IF3_DERR = 183,
+ GAUDI_EVENT_GIC500 = 184,
+ GAUDI_EVENT_HBM_0_SERR = 185,
+ GAUDI_EVENT_HBM_1_SERR = 186,
+ GAUDI_EVENT_HBM_2_SERR = 187,
+ GAUDI_EVENT_HBM_3_SERR = 188,
+ GAUDI_EVENT_HBM_0_DERR = 189,
+ GAUDI_EVENT_HBM_1_DERR = 190,
+ GAUDI_EVENT_HBM_2_DERR = 191,
+ GAUDI_EVENT_HBM_3_DERR = 192,
+ GAUDI_EVENT_MMU_SERR = 193,
+ GAUDI_EVENT_MMU_DERR = 194,
+ GAUDI_EVENT_PCIE_DEC = 200,
+ GAUDI_EVENT_TPC0_DEC = 201,
+ GAUDI_EVENT_TPC1_DEC = 203,
+ GAUDI_EVENT_TPC2_DEC = 205,
+ GAUDI_EVENT_TPC3_DEC = 207,
+ GAUDI_EVENT_TPC4_DEC = 209,
+ GAUDI_EVENT_TPC5_DEC = 211,
+ GAUDI_EVENT_TPC6_DEC = 213,
+ GAUDI_EVENT_TPC7_DEC = 215,
+ GAUDI_EVENT_AXI_ECC = 217,
+ GAUDI_EVENT_L2_RAM_ECC = 218,
+ GAUDI_EVENT_MME0_WBC_RSP = 219,
+ GAUDI_EVENT_MME0_SBAB0_RSP = 220,
+ GAUDI_EVENT_MME1_WBC_RSP = 224,
+ GAUDI_EVENT_MME1_SBAB0_RSP = 225,
+ GAUDI_EVENT_MME2_WBC_RSP = 229,
+ GAUDI_EVENT_MME2_SBAB0_RSP = 230,
+ GAUDI_EVENT_MME3_WBC_RSP = 234,
+ GAUDI_EVENT_MME3_SBAB0_RSP = 235,
+ GAUDI_EVENT_PLL0 = 239,
+ GAUDI_EVENT_PLL1 = 240,
+ GAUDI_EVENT_PLL2 = 241,
+ GAUDI_EVENT_PLL3 = 242,
+ GAUDI_EVENT_PLL4 = 243,
+ GAUDI_EVENT_PLL5 = 244,
+ GAUDI_EVENT_PLL6 = 245,
+ GAUDI_EVENT_PLL7 = 246,
+ GAUDI_EVENT_PLL8 = 247,
+ GAUDI_EVENT_PLL9 = 248,
+ GAUDI_EVENT_PLL10 = 249,
+ GAUDI_EVENT_PLL11 = 250,
+ GAUDI_EVENT_PLL12 = 251,
+ GAUDI_EVENT_PLL13 = 252,
+ GAUDI_EVENT_PLL14 = 253,
+ GAUDI_EVENT_PLL15 = 254,
+ GAUDI_EVENT_PLL16 = 255,
+ GAUDI_EVENT_PLL17 = 256,
+ GAUDI_EVENT_CPU_AXI_SPLITTER = 257,
+ GAUDI_EVENT_PSOC_AXI_DEC = 262,
+ GAUDI_EVENT_PSOC_PRSTN_FALL = 263,
+ GAUDI_EVENT_NIC_SEI_0 = 264,
+ GAUDI_EVENT_NIC_SEI_1 = 265,
+ GAUDI_EVENT_NIC_SEI_2 = 266,
+ GAUDI_EVENT_NIC_SEI_3 = 267,
+ GAUDI_EVENT_NIC_SEI_4 = 268,
+ GAUDI_EVENT_PCIE_FLR = 290,
+ GAUDI_EVENT_TPC0_BMON_SPMU = 300,
+ GAUDI_EVENT_TPC0_KRN_ERR = 301,
+ GAUDI_EVENT_TPC1_BMON_SPMU = 306,
+ GAUDI_EVENT_TPC1_KRN_ERR = 307,
+ GAUDI_EVENT_TPC2_BMON_SPMU = 312,
+ GAUDI_EVENT_TPC2_KRN_ERR = 313,
+ GAUDI_EVENT_TPC3_BMON_SPMU = 318,
+ GAUDI_EVENT_TPC3_KRN_ERR = 319,
+ GAUDI_EVENT_TPC4_BMON_SPMU = 324,
+ GAUDI_EVENT_TPC4_KRN_ERR = 325,
+ GAUDI_EVENT_TPC5_BMON_SPMU = 330,
+ GAUDI_EVENT_TPC5_KRN_ERR = 331,
+ GAUDI_EVENT_TPC6_BMON_SPMU = 336,
+ GAUDI_EVENT_TPC6_KRN_ERR = 337,
+ GAUDI_EVENT_TPC7_BMON_SPMU = 342,
+ GAUDI_EVENT_TPC7_KRN_ERR = 343,
+ GAUDI_EVENT_MMU_PAGE_FAULT = 380,
+ GAUDI_EVENT_MMU_WR_PERM = 381,
+ GAUDI_EVENT_DMA_BM_CH0 = 383,
+ GAUDI_EVENT_DMA_BM_CH1 = 384,
+ GAUDI_EVENT_DMA_BM_CH2 = 385,
+ GAUDI_EVENT_DMA_BM_CH3 = 386,
+ GAUDI_EVENT_DMA_BM_CH4 = 387,
+ GAUDI_EVENT_DMA_BM_CH5 = 388,
+ GAUDI_EVENT_DMA_BM_CH6 = 389,
+ GAUDI_EVENT_DMA_BM_CH7 = 390,
+ GAUDI_EVENT_HBM0_SPI_0 = 395,
+ GAUDI_EVENT_HBM0_SPI_1 = 396,
+ GAUDI_EVENT_HBM1_SPI_0 = 399,
+ GAUDI_EVENT_HBM1_SPI_1 = 400,
+ GAUDI_EVENT_HBM2_SPI_0 = 403,
+ GAUDI_EVENT_HBM2_SPI_1 = 404,
+ GAUDI_EVENT_HBM3_SPI_0 = 407,
+ GAUDI_EVENT_HBM3_SPI_1 = 408,
+ GAUDI_EVENT_PSOC_GPIO_U16_0 = 421,
+ GAUDI_EVENT_PI_UPDATE = 484,
+ GAUDI_EVENT_HALT_MACHINE = 485,
+ GAUDI_EVENT_INTS_REGISTER = 486,
+ GAUDI_EVENT_SOFT_RESET = 487,
+ GAUDI_EVENT_RAZWI_OR_ADC = 548,
+ GAUDI_EVENT_TPC0_QM = 572,
+ GAUDI_EVENT_TPC1_QM = 573,
+ GAUDI_EVENT_TPC2_QM = 574,
+ GAUDI_EVENT_TPC3_QM = 575,
+ GAUDI_EVENT_TPC4_QM = 576,
+ GAUDI_EVENT_TPC5_QM = 577,
+ GAUDI_EVENT_TPC6_QM = 578,
+ GAUDI_EVENT_TPC7_QM = 579,
+ GAUDI_EVENT_MME0_QM = 581,
+ GAUDI_EVENT_MME2_QM = 582,
+ GAUDI_EVENT_DMA0_QM = 583,
+ GAUDI_EVENT_DMA1_QM = 584,
+ GAUDI_EVENT_DMA2_QM = 585,
+ GAUDI_EVENT_DMA3_QM = 586,
+ GAUDI_EVENT_DMA4_QM = 587,
+ GAUDI_EVENT_DMA5_QM = 588,
+ GAUDI_EVENT_DMA6_QM = 589,
+ GAUDI_EVENT_DMA7_QM = 590,
+ GAUDI_EVENT_NIC0_QM0 = 594,
+ GAUDI_EVENT_NIC0_QM1 = 595,
+ GAUDI_EVENT_NIC1_QM0 = 596,
+ GAUDI_EVENT_NIC1_QM1 = 597,
+ GAUDI_EVENT_NIC2_QM0 = 598,
+ GAUDI_EVENT_NIC2_QM1 = 599,
+ GAUDI_EVENT_NIC3_QM0 = 600,
+ GAUDI_EVENT_NIC3_QM1 = 601,
+ GAUDI_EVENT_NIC4_QM0 = 602,
+ GAUDI_EVENT_NIC4_QM1 = 603,
+ GAUDI_EVENT_DMA0_CORE = 604,
+ GAUDI_EVENT_DMA1_CORE = 605,
+ GAUDI_EVENT_DMA2_CORE = 606,
+ GAUDI_EVENT_DMA3_CORE = 607,
+ GAUDI_EVENT_DMA4_CORE = 608,
+ GAUDI_EVENT_DMA5_CORE = 609,
+ GAUDI_EVENT_DMA6_CORE = 610,
+ GAUDI_EVENT_DMA7_CORE = 611,
+ GAUDI_EVENT_NIC0_QP0 = 612,
+ GAUDI_EVENT_NIC0_QP1 = 613,
+ GAUDI_EVENT_NIC1_QP0 = 614,
+ GAUDI_EVENT_NIC1_QP1 = 615,
+ GAUDI_EVENT_NIC2_QP0 = 616,
+ GAUDI_EVENT_NIC2_QP1 = 617,
+ GAUDI_EVENT_NIC3_QP0 = 618,
+ GAUDI_EVENT_NIC3_QP1 = 619,
+ GAUDI_EVENT_NIC4_QP0 = 620,
+ GAUDI_EVENT_NIC4_QP1 = 621,
+ GAUDI_EVENT_FIX_POWER_ENV_S = 658,
+ GAUDI_EVENT_FIX_POWER_ENV_E = 659,
+ GAUDI_EVENT_FIX_THERMAL_ENV_S = 660,
+ GAUDI_EVENT_FIX_THERMAL_ENV_E = 661,
+ GAUDI_EVENT_RAZWI_OR_ADC_SW = 662,
+ GAUDI_EVENT_SIZE,
+};
+
+#endif /* __GAUDI_ASYNC_EVENTS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
new file mode 100644
index 000000000000..737176ba06fb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_
+#define __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_
+
+struct gaudi_async_events_ids_map {
+ int fc_id;
+ int cpu_id;
+ int valid;
+ char name[64];
+};
+
+static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = {
+ { .fc_id = 0, .cpu_id = 0, .valid = 0, .name = "" },
+ { .fc_id = 1, .cpu_id = 1, .valid = 0, .name = "" },
+ { .fc_id = 2, .cpu_id = 2, .valid = 0, .name = "" },
+ { .fc_id = 3, .cpu_id = 3, .valid = 0, .name = "" },
+ { .fc_id = 4, .cpu_id = 4, .valid = 0, .name = "" },
+ { .fc_id = 5, .cpu_id = 5, .valid = 0, .name = "" },
+ { .fc_id = 6, .cpu_id = 6, .valid = 0, .name = "" },
+ { .fc_id = 7, .cpu_id = 7, .valid = 0, .name = "" },
+ { .fc_id = 8, .cpu_id = 8, .valid = 0, .name = "" },
+ { .fc_id = 9, .cpu_id = 9, .valid = 0, .name = "" },
+ { .fc_id = 10, .cpu_id = 10, .valid = 0, .name = "" },
+ { .fc_id = 11, .cpu_id = 11, .valid = 0, .name = "" },
+ { .fc_id = 12, .cpu_id = 12, .valid = 0, .name = "" },
+ { .fc_id = 13, .cpu_id = 13, .valid = 0, .name = "" },
+ { .fc_id = 14, .cpu_id = 14, .valid = 0, .name = "" },
+ { .fc_id = 15, .cpu_id = 15, .valid = 0, .name = "" },
+ { .fc_id = 16, .cpu_id = 16, .valid = 0, .name = "" },
+ { .fc_id = 17, .cpu_id = 17, .valid = 0, .name = "" },
+ { .fc_id = 18, .cpu_id = 18, .valid = 0, .name = "" },
+ { .fc_id = 19, .cpu_id = 19, .valid = 0, .name = "" },
+ { .fc_id = 20, .cpu_id = 20, .valid = 0, .name = "" },
+ { .fc_id = 21, .cpu_id = 21, .valid = 0, .name = "" },
+ { .fc_id = 22, .cpu_id = 22, .valid = 0, .name = "" },
+ { .fc_id = 23, .cpu_id = 23, .valid = 0, .name = "" },
+ { .fc_id = 24, .cpu_id = 24, .valid = 0, .name = "" },
+ { .fc_id = 25, .cpu_id = 25, .valid = 0, .name = "" },
+ { .fc_id = 26, .cpu_id = 26, .valid = 0, .name = "" },
+ { .fc_id = 27, .cpu_id = 27, .valid = 0, .name = "" },
+ { .fc_id = 28, .cpu_id = 28, .valid = 0, .name = "" },
+ { .fc_id = 29, .cpu_id = 29, .valid = 0, .name = "" },
+ { .fc_id = 30, .cpu_id = 30, .valid = 0, .name = "" },
+ { .fc_id = 31, .cpu_id = 31, .valid = 0, .name = "" },
+ { .fc_id = 32, .cpu_id = 32, .valid = 1, .name = "PCIE_CORE_SERR" },
+ { .fc_id = 33, .cpu_id = 33, .valid = 1, .name = "PCIE_CORE_DERR" },
+ { .fc_id = 34, .cpu_id = 34, .valid = 1, .name = "PCIE_IF_SERR" },
+ { .fc_id = 35, .cpu_id = 35, .valid = 1, .name = "PCIE_IF_DERR" },
+ { .fc_id = 36, .cpu_id = 36, .valid = 1, .name = "PCIE_PHY_SERR" },
+ { .fc_id = 37, .cpu_id = 37, .valid = 1, .name = "PCIE_PHY_DERR" },
+ { .fc_id = 38, .cpu_id = 38, .valid = 1, .name = "TPC0_SERR" },
+ { .fc_id = 39, .cpu_id = 38, .valid = 1, .name = "TPC1_SERR" },
+ { .fc_id = 40, .cpu_id = 38, .valid = 1, .name = "TPC2_SERR" },
+ { .fc_id = 41, .cpu_id = 38, .valid = 1, .name = "TPC3_SERR" },
+ { .fc_id = 42, .cpu_id = 38, .valid = 1, .name = "TPC4_SERR" },
+ { .fc_id = 43, .cpu_id = 38, .valid = 1, .name = "TPC5_SERR" },
+ { .fc_id = 44, .cpu_id = 38, .valid = 1, .name = "TPC6_SERR" },
+ { .fc_id = 45, .cpu_id = 38, .valid = 1, .name = "TPC7_SERR" },
+ { .fc_id = 46, .cpu_id = 39, .valid = 1, .name = "TPC0_DERR" },
+ { .fc_id = 47, .cpu_id = 39, .valid = 1, .name = "TPC1_DERR" },
+ { .fc_id = 48, .cpu_id = 39, .valid = 1, .name = "TPC2_DERR" },
+ { .fc_id = 49, .cpu_id = 39, .valid = 1, .name = "TPC3_DERR" },
+ { .fc_id = 50, .cpu_id = 39, .valid = 1, .name = "TPC4_DERR" },
+ { .fc_id = 51, .cpu_id = 39, .valid = 1, .name = "TPC5_DERR" },
+ { .fc_id = 52, .cpu_id = 39, .valid = 1, .name = "TPC6_DERR" },
+ { .fc_id = 53, .cpu_id = 39, .valid = 1, .name = "TPC7_DERR" },
+ { .fc_id = 54, .cpu_id = 40, .valid = 1, .name = "MME0_ACC_SERR" },
+ { .fc_id = 55, .cpu_id = 41, .valid = 1, .name = "MME0_ACC_DERR" },
+ { .fc_id = 56, .cpu_id = 42, .valid = 1, .name = "MME0_SBAB_SERR" },
+ { .fc_id = 57, .cpu_id = 43, .valid = 1, .name = "MME0_SBAB_DERR" },
+ { .fc_id = 58, .cpu_id = 44, .valid = 1, .name = "MME1_ACC_SERR" },
+ { .fc_id = 59, .cpu_id = 45, .valid = 1, .name = "MME1_ACC_DERR" },
+ { .fc_id = 60, .cpu_id = 46, .valid = 1, .name = "MME1_SBAB_SERR" },
+ { .fc_id = 61, .cpu_id = 47, .valid = 1, .name = "MME1_SBAB_DERR" },
+ { .fc_id = 62, .cpu_id = 48, .valid = 1, .name = "MME2_ACC_SERR" },
+ { .fc_id = 63, .cpu_id = 49, .valid = 1, .name = "MME2_ACC_DERR" },
+ { .fc_id = 64, .cpu_id = 50, .valid = 1, .name = "MME2_SBAB_SERR" },
+ { .fc_id = 65, .cpu_id = 51, .valid = 1, .name = "MME2_SBAB_DERR" },
+ { .fc_id = 66, .cpu_id = 52, .valid = 1, .name = "MME3_ACC_SERR" },
+ { .fc_id = 67, .cpu_id = 53, .valid = 1, .name = "MME3_ACC_DERR" },
+ { .fc_id = 68, .cpu_id = 54, .valid = 1, .name = "MME3_SBAB_SERR" },
+ { .fc_id = 69, .cpu_id = 55, .valid = 1, .name = "MME3_SBAB_DERR" },
+ { .fc_id = 70, .cpu_id = 56, .valid = 1, .name = "DMA0_SERR_ECC" },
+ { .fc_id = 71, .cpu_id = 56, .valid = 1, .name = "DMA1_SERR_ECC" },
+ { .fc_id = 72, .cpu_id = 56, .valid = 1, .name = "DMA2_SERR_ECC" },
+ { .fc_id = 73, .cpu_id = 56, .valid = 1, .name = "DMA3_SERR_ECC" },
+ { .fc_id = 74, .cpu_id = 56, .valid = 1, .name = "DMA4_SERR_ECC" },
+ { .fc_id = 75, .cpu_id = 56, .valid = 1, .name = "DMA5_SERR_ECC" },
+ { .fc_id = 76, .cpu_id = 56, .valid = 1, .name = "DMA6_SERR_ECC" },
+ { .fc_id = 77, .cpu_id = 56, .valid = 1, .name = "DMA7_SERR_ECC" },
+ { .fc_id = 78, .cpu_id = 57, .valid = 1, .name = "DMA0_DERR_ECC" },
+ { .fc_id = 79, .cpu_id = 57, .valid = 1, .name = "DMA1_DERR_ECC" },
+ { .fc_id = 80, .cpu_id = 57, .valid = 1, .name = "DMA2_DERR_ECC" },
+ { .fc_id = 81, .cpu_id = 57, .valid = 1, .name = "DMA3_DERR_ECC" },
+ { .fc_id = 82, .cpu_id = 57, .valid = 1, .name = "DMA4_DERR_ECC" },
+ { .fc_id = 83, .cpu_id = 57, .valid = 1, .name = "DMA5_DERR_ECC" },
+ { .fc_id = 84, .cpu_id = 57, .valid = 1, .name = "DMA6_DERR_ECC" },
+ { .fc_id = 85, .cpu_id = 57, .valid = 1, .name = "DMA7_DERR_ECC" },
+ { .fc_id = 86, .cpu_id = 58, .valid = 1, .name = "CPU_IF_ECC_SERR" },
+ { .fc_id = 87, .cpu_id = 59, .valid = 1, .name = "CPU_IF_ECC_DERR" },
+ { .fc_id = 88, .cpu_id = 60, .valid = 1, .name = "PSOC_MEM_SERR" },
+ { .fc_id = 89, .cpu_id = 61, .valid = 1,
+ .name = "PSOC_CORESIGHT_SERR" },
+ { .fc_id = 90, .cpu_id = 62, .valid = 1, .name = "PSOC_MEM_DERR" },
+ { .fc_id = 91, .cpu_id = 63, .valid = 1,
+ .name = "PSOC_CORESIGHT_DERR" },
+ { .fc_id = 92, .cpu_id = 64, .valid = 1, .name = "SRAM0_SERR" },
+ { .fc_id = 93, .cpu_id = 64, .valid = 1, .name = "SRAM1_SERR" },
+ { .fc_id = 94, .cpu_id = 64, .valid = 1, .name = "SRAM2_SERR" },
+ { .fc_id = 95, .cpu_id = 64, .valid = 1, .name = "SRAM3_SERR" },
+ { .fc_id = 96, .cpu_id = 64, .valid = 1, .name = "SRAM7_SERR" },
+ { .fc_id = 97, .cpu_id = 64, .valid = 1, .name = "SRAM6_SERR" },
+ { .fc_id = 98, .cpu_id = 64, .valid = 1, .name = "SRAM5_SERR" },
+ { .fc_id = 99, .cpu_id = 64, .valid = 1, .name = "SRAM4_SERR" },
+ { .fc_id = 100, .cpu_id = 64, .valid = 1, .name = "SRAM8_SERR" },
+ { .fc_id = 101, .cpu_id = 64, .valid = 1, .name = "SRAM9_SERR" },
+ { .fc_id = 102, .cpu_id = 64, .valid = 1, .name = "SRAM10_SERR" },
+ { .fc_id = 103, .cpu_id = 64, .valid = 1, .name = "SRAM11_SERR" },
+ { .fc_id = 104, .cpu_id = 64, .valid = 1, .name = "SRAM15_SERR" },
+ { .fc_id = 105, .cpu_id = 64, .valid = 1, .name = "SRAM14_SERR" },
+ { .fc_id = 106, .cpu_id = 64, .valid = 1, .name = "SRAM13_SERR" },
+ { .fc_id = 107, .cpu_id = 64, .valid = 1, .name = "SRAM12_SERR" },
+ { .fc_id = 108, .cpu_id = 64, .valid = 1, .name = "SRAM16_SERR" },
+ { .fc_id = 109, .cpu_id = 64, .valid = 1, .name = "SRAM17_SERR" },
+ { .fc_id = 110, .cpu_id = 64, .valid = 1, .name = "SRAM18_SERR" },
+ { .fc_id = 111, .cpu_id = 64, .valid = 1, .name = "SRAM19_SERR" },
+ { .fc_id = 112, .cpu_id = 64, .valid = 1, .name = "SRAM23_SERR" },
+ { .fc_id = 113, .cpu_id = 64, .valid = 1, .name = "SRAM22_SERR" },
+ { .fc_id = 114, .cpu_id = 64, .valid = 1, .name = "SRAM21_SERR" },
+ { .fc_id = 115, .cpu_id = 64, .valid = 1, .name = "SRAM20_SERR" },
+ { .fc_id = 116, .cpu_id = 64, .valid = 1, .name = "SRAM24_SERR" },
+ { .fc_id = 117, .cpu_id = 64, .valid = 1, .name = "SRAM25_SERR" },
+ { .fc_id = 118, .cpu_id = 64, .valid = 1, .name = "SRAM26_SERR" },
+ { .fc_id = 119, .cpu_id = 64, .valid = 1, .name = "SRAM27_SERR" },
+ { .fc_id = 120, .cpu_id = 64, .valid = 1, .name = "SRAM31_SERR" },
+ { .fc_id = 121, .cpu_id = 64, .valid = 1, .name = "SRAM30_SERR" },
+ { .fc_id = 122, .cpu_id = 64, .valid = 1, .name = "SRAM29_SERR" },
+ { .fc_id = 123, .cpu_id = 64, .valid = 1, .name = "SRAM28_SERR" },
+ { .fc_id = 124, .cpu_id = 65, .valid = 1, .name = "SRAM0_DERR" },
+ { .fc_id = 125, .cpu_id = 65, .valid = 1, .name = "SRAM1_DERR" },
+ { .fc_id = 126, .cpu_id = 65, .valid = 1, .name = "SRAM2_DERR" },
+ { .fc_id = 127, .cpu_id = 65, .valid = 1, .name = "SRAM3_DERR" },
+ { .fc_id = 128, .cpu_id = 65, .valid = 1, .name = "SRAM7_DERR" },
+ { .fc_id = 129, .cpu_id = 65, .valid = 1, .name = "SRAM6_DERR" },
+ { .fc_id = 130, .cpu_id = 65, .valid = 1, .name = "SRAM5_DERR" },
+ { .fc_id = 131, .cpu_id = 65, .valid = 1, .name = "SRAM4_DERR" },
+ { .fc_id = 132, .cpu_id = 65, .valid = 1, .name = "SRAM8_DERR" },
+ { .fc_id = 133, .cpu_id = 65, .valid = 1, .name = "SRAM9_DERR" },
+ { .fc_id = 134, .cpu_id = 65, .valid = 1, .name = "SRAM10_DERR" },
+ { .fc_id = 135, .cpu_id = 65, .valid = 1, .name = "SRAM11_DERR" },
+ { .fc_id = 136, .cpu_id = 65, .valid = 1, .name = "SRAM15_DERR" },
+ { .fc_id = 137, .cpu_id = 65, .valid = 1, .name = "SRAM14_DERR" },
+ { .fc_id = 138, .cpu_id = 65, .valid = 1, .name = "SRAM13_DERR" },
+ { .fc_id = 139, .cpu_id = 65, .valid = 1, .name = "SRAM12_DERR" },
+ { .fc_id = 140, .cpu_id = 65, .valid = 1, .name = "SRAM16_DERR" },
+ { .fc_id = 141, .cpu_id = 65, .valid = 1, .name = "SRAM17_DERR" },
+ { .fc_id = 142, .cpu_id = 65, .valid = 1, .name = "SRAM18_DERR" },
+ { .fc_id = 143, .cpu_id = 65, .valid = 1, .name = "SRAM19_DERR" },
+ { .fc_id = 144, .cpu_id = 65, .valid = 1, .name = "SRAM23_DERR" },
+ { .fc_id = 145, .cpu_id = 65, .valid = 1, .name = "SRAM22_DERR" },
+ { .fc_id = 146, .cpu_id = 65, .valid = 1, .name = "SRAM21_DERR" },
+ { .fc_id = 147, .cpu_id = 65, .valid = 1, .name = "SRAM20_DERR" },
+ { .fc_id = 148, .cpu_id = 65, .valid = 1, .name = "SRAM24_DERR" },
+ { .fc_id = 149, .cpu_id = 65, .valid = 1, .name = "SRAM25_DERR" },
+ { .fc_id = 150, .cpu_id = 65, .valid = 1, .name = "SRAM26_DERR" },
+ { .fc_id = 151, .cpu_id = 65, .valid = 1, .name = "SRAM27_DERR" },
+ { .fc_id = 152, .cpu_id = 65, .valid = 1, .name = "SRAM31_DERR" },
+ { .fc_id = 153, .cpu_id = 65, .valid = 1, .name = "SRAM30_DERR" },
+ { .fc_id = 154, .cpu_id = 65, .valid = 1, .name = "SRAM29_DERR" },
+ { .fc_id = 155, .cpu_id = 65, .valid = 1, .name = "SRAM28_DERR" },
+ { .fc_id = 156, .cpu_id = 66, .valid = 1, .name = "NIC0_SERR" },
+ { .fc_id = 157, .cpu_id = 66, .valid = 1, .name = "NIC1_SERR" },
+ { .fc_id = 158, .cpu_id = 66, .valid = 1, .name = "NIC2_SERR" },
+ { .fc_id = 159, .cpu_id = 66, .valid = 1, .name = "NIC3_SERR" },
+ { .fc_id = 160, .cpu_id = 66, .valid = 1, .name = "NIC4_SERR" },
+ { .fc_id = 161, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 162, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 163, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 164, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 165, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 166, .cpu_id = 67, .valid = 1, .name = "NIC0_DERR" },
+ { .fc_id = 167, .cpu_id = 67, .valid = 1, .name = "NIC1_DERR" },
+ { .fc_id = 168, .cpu_id = 67, .valid = 1, .name = "NIC2_DERR" },
+ { .fc_id = 169, .cpu_id = 67, .valid = 1, .name = "NIC3_DERR" },
+ { .fc_id = 170, .cpu_id = 67, .valid = 1, .name = "NIC4_DERR" },
+ { .fc_id = 171, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 172, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 173, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 174, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 175, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 176, .cpu_id = 68, .valid = 1, .name = "DMA_IF0_SERR" },
+ { .fc_id = 177, .cpu_id = 68, .valid = 1, .name = "DMA_IF1_SERR" },
+ { .fc_id = 178, .cpu_id = 68, .valid = 1, .name = "DMA_IF2_SERR" },
+ { .fc_id = 179, .cpu_id = 68, .valid = 1, .name = "DMA_IF3_SERR" },
+ { .fc_id = 180, .cpu_id = 69, .valid = 1, .name = "DMA_IF0_DERR" },
+ { .fc_id = 181, .cpu_id = 69, .valid = 1, .name = "DMA_IF1_DERR" },
+ { .fc_id = 182, .cpu_id = 69, .valid = 1, .name = "DMA_IF2_DERR" },
+ { .fc_id = 183, .cpu_id = 69, .valid = 1, .name = "DMA_IF3_DERR" },
+ { .fc_id = 184, .cpu_id = 70, .valid = 1, .name = "GIC500" },
+ { .fc_id = 185, .cpu_id = 71, .valid = 1, .name = "HBM_0_SERR" },
+ { .fc_id = 186, .cpu_id = 71, .valid = 1, .name = "HBM_1_SERR" },
+ { .fc_id = 187, .cpu_id = 71, .valid = 1, .name = "HBM_2_SERR" },
+ { .fc_id = 188, .cpu_id = 71, .valid = 1, .name = "HBM_3_SERR" },
+ { .fc_id = 189, .cpu_id = 72, .valid = 1, .name = "HBM_0_DERR" },
+ { .fc_id = 190, .cpu_id = 72, .valid = 1, .name = "HBM_1_DERR" },
+ { .fc_id = 191, .cpu_id = 72, .valid = 1, .name = "HBM_2_DERR" },
+ { .fc_id = 192, .cpu_id = 72, .valid = 1, .name = "HBM_3_DERR" },
+ { .fc_id = 193, .cpu_id = 73, .valid = 1, .name = "MMU_SERR" },
+ { .fc_id = 194, .cpu_id = 74, .valid = 1, .name = "MMU_DERR" },
+ { .fc_id = 195, .cpu_id = 75, .valid = 0, .name = "" },
+ { .fc_id = 196, .cpu_id = 76, .valid = 0, .name = "" },
+ { .fc_id = 197, .cpu_id = 77, .valid = 0, .name = "" },
+ { .fc_id = 198, .cpu_id = 78, .valid = 0, .name = "" },
+ { .fc_id = 199, .cpu_id = 79, .valid = 0, .name = "" },
+ { .fc_id = 200, .cpu_id = 80, .valid = 1, .name = "PCIE_DEC" },
+ { .fc_id = 201, .cpu_id = 81, .valid = 1, .name = "TPC0_DEC" },
+ { .fc_id = 202, .cpu_id = 82, .valid = 0, .name = "" },
+ { .fc_id = 203, .cpu_id = 83, .valid = 1, .name = "TPC1_DEC" },
+ { .fc_id = 204, .cpu_id = 84, .valid = 0, .name = "" },
+ { .fc_id = 205, .cpu_id = 85, .valid = 1, .name = "TPC2_DEC" },
+ { .fc_id = 206, .cpu_id = 86, .valid = 0, .name = "" },
+ { .fc_id = 207, .cpu_id = 87, .valid = 1, .name = "TPC3_DEC" },
+ { .fc_id = 208, .cpu_id = 88, .valid = 0, .name = "" },
+ { .fc_id = 209, .cpu_id = 89, .valid = 1, .name = "TPC4_DEC" },
+ { .fc_id = 210, .cpu_id = 90, .valid = 0, .name = "" },
+ { .fc_id = 211, .cpu_id = 91, .valid = 1, .name = "TPC5_DEC" },
+ { .fc_id = 212, .cpu_id = 92, .valid = 0, .name = "" },
+ { .fc_id = 213, .cpu_id = 93, .valid = 1, .name = "TPC6_DEC" },
+ { .fc_id = 214, .cpu_id = 94, .valid = 0, .name = "" },
+ { .fc_id = 215, .cpu_id = 95, .valid = 1, .name = "TPC7_DEC" },
+ { .fc_id = 216, .cpu_id = 96, .valid = 0, .name = "" },
+ { .fc_id = 217, .cpu_id = 97, .valid = 1, .name = "AXI_ECC" },
+ { .fc_id = 218, .cpu_id = 98, .valid = 1, .name = "L2_RAM_ECC" },
+ { .fc_id = 219, .cpu_id = 99, .valid = 1, .name = "MME0_WBC_RSP" },
+ { .fc_id = 220, .cpu_id = 100, .valid = 1, .name = "MME0_SBAB0_RSP" },
+ { .fc_id = 221, .cpu_id = 101, .valid = 0, .name = "" },
+ { .fc_id = 222, .cpu_id = 102, .valid = 0, .name = "" },
+ { .fc_id = 223, .cpu_id = 103, .valid = 0, .name = "" },
+ { .fc_id = 224, .cpu_id = 104, .valid = 1, .name = "MME1_WBC_RSP" },
+ { .fc_id = 225, .cpu_id = 105, .valid = 1, .name = "MME1_SBAB0_RSP" },
+ { .fc_id = 226, .cpu_id = 106, .valid = 0, .name = "" },
+ { .fc_id = 227, .cpu_id = 107, .valid = 0, .name = "" },
+ { .fc_id = 228, .cpu_id = 108, .valid = 0, .name = "" },
+ { .fc_id = 229, .cpu_id = 109, .valid = 1, .name = "MME2_WBC_RSP" },
+ { .fc_id = 230, .cpu_id = 110, .valid = 1, .name = "MME2_SBAB0_RSP" },
+ { .fc_id = 231, .cpu_id = 111, .valid = 0, .name = "" },
+ { .fc_id = 232, .cpu_id = 112, .valid = 0, .name = "" },
+ { .fc_id = 233, .cpu_id = 113, .valid = 0, .name = "" },
+ { .fc_id = 234, .cpu_id = 114, .valid = 1, .name = "MME3_WBC_RSP" },
+ { .fc_id = 235, .cpu_id = 115, .valid = 1, .name = "MME3_SBAB0_RSP" },
+ { .fc_id = 236, .cpu_id = 116, .valid = 0, .name = "" },
+ { .fc_id = 237, .cpu_id = 117, .valid = 0, .name = "" },
+ { .fc_id = 238, .cpu_id = 118, .valid = 0, .name = "" },
+ { .fc_id = 239, .cpu_id = 119, .valid = 1, .name = "PLL0" },
+ { .fc_id = 240, .cpu_id = 119, .valid = 1, .name = "PLL1" },
+ { .fc_id = 241, .cpu_id = 119, .valid = 1, .name = "PLL2" },
+ { .fc_id = 242, .cpu_id = 119, .valid = 1, .name = "PLL3" },
+ { .fc_id = 243, .cpu_id = 119, .valid = 1, .name = "PLL4" },
+ { .fc_id = 244, .cpu_id = 119, .valid = 1, .name = "PLL5" },
+ { .fc_id = 245, .cpu_id = 119, .valid = 1, .name = "PLL6" },
+ { .fc_id = 246, .cpu_id = 119, .valid = 1, .name = "PLL7" },
+ { .fc_id = 247, .cpu_id = 119, .valid = 1, .name = "PLL8" },
+ { .fc_id = 248, .cpu_id = 119, .valid = 1, .name = "PLL9" },
+ { .fc_id = 249, .cpu_id = 119, .valid = 1, .name = "PLL10" },
+ { .fc_id = 250, .cpu_id = 119, .valid = 1, .name = "PLL11" },
+ { .fc_id = 251, .cpu_id = 119, .valid = 1, .name = "PLL12" },
+ { .fc_id = 252, .cpu_id = 119, .valid = 1, .name = "PLL13" },
+ { .fc_id = 253, .cpu_id = 119, .valid = 1, .name = "PLL14" },
+ { .fc_id = 254, .cpu_id = 119, .valid = 1, .name = "PLL15" },
+ { .fc_id = 255, .cpu_id = 119, .valid = 1, .name = "PLL16" },
+ { .fc_id = 256, .cpu_id = 119, .valid = 1, .name = "PLL17" },
+ { .fc_id = 257, .cpu_id = 120, .valid = 1,
+ .name = "CPU_AXI_SPLITTER" },
+ { .fc_id = 258, .cpu_id = 121, .valid = 0, .name = "" },
+ { .fc_id = 259, .cpu_id = 122, .valid = 0, .name = "" },
+ { .fc_id = 260, .cpu_id = 123, .valid = 0, .name = "" },
+ { .fc_id = 261, .cpu_id = 124, .valid = 0, .name = "" },
+ { .fc_id = 262, .cpu_id = 125, .valid = 1, .name = "PSOC_AXI_DEC" },
+ { .fc_id = 263, .cpu_id = 126, .valid = 1, .name = "PSOC_PRSTN_FALL" },
+ { .fc_id = 264, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_0" },
+ { .fc_id = 265, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_1" },
+ { .fc_id = 266, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_2" },
+ { .fc_id = 267, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_3" },
+ { .fc_id = 268, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_4" },
+ { .fc_id = 269, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 270, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 271, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 272, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 273, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 274, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 275, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 276, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 277, .cpu_id = 129, .valid = 0, .name = "" },
+ { .fc_id = 278, .cpu_id = 129, .valid = 0, .name = "" },
+ { .fc_id = 279, .cpu_id = 129, .valid = 0, .name = "" },
+ { .fc_id = 280, .cpu_id = 129, .valid = 0, .name = "" },
+ { .fc_id = 281, .cpu_id = 130, .valid = 0, .name = "" },
+ { .fc_id = 282, .cpu_id = 131, .valid = 0, .name = "" },
+ { .fc_id = 283, .cpu_id = 132, .valid = 0, .name = "" },
+ { .fc_id = 284, .cpu_id = 133, .valid = 0, .name = "" },
+ { .fc_id = 285, .cpu_id = 134, .valid = 0, .name = "" },
+ { .fc_id = 286, .cpu_id = 135, .valid = 0, .name = "" },
+ { .fc_id = 287, .cpu_id = 136, .valid = 0, .name = "" },
+ { .fc_id = 288, .cpu_id = 137, .valid = 0, .name = "" },
+ { .fc_id = 289, .cpu_id = 138, .valid = 0, .name = "" },
+ { .fc_id = 290, .cpu_id = 139, .valid = 1, .name = "PCIE_FLR" },
+ { .fc_id = 291, .cpu_id = 140, .valid = 0, .name = "" },
+ { .fc_id = 292, .cpu_id = 141, .valid = 0, .name = "" },
+ { .fc_id = 293, .cpu_id = 142, .valid = 0, .name = "" },
+ { .fc_id = 294, .cpu_id = 143, .valid = 0, .name = "" },
+ { .fc_id = 295, .cpu_id = 144, .valid = 0, .name = "" },
+ { .fc_id = 296, .cpu_id = 145, .valid = 0, .name = "" },
+ { .fc_id = 297, .cpu_id = 146, .valid = 0, .name = "" },
+ { .fc_id = 298, .cpu_id = 147, .valid = 0, .name = "" },
+ { .fc_id = 299, .cpu_id = 148, .valid = 0, .name = "" },
+ { .fc_id = 300, .cpu_id = 149, .valid = 1, .name = "TPC0_BMON_SPMU" },
+ { .fc_id = 301, .cpu_id = 150, .valid = 1, .name = "TPC0_KRN_ERR" },
+ { .fc_id = 302, .cpu_id = 151, .valid = 0, .name = "" },
+ { .fc_id = 303, .cpu_id = 152, .valid = 0, .name = "" },
+ { .fc_id = 304, .cpu_id = 153, .valid = 0, .name = "" },
+ { .fc_id = 305, .cpu_id = 154, .valid = 0, .name = "" },
+ { .fc_id = 306, .cpu_id = 155, .valid = 1, .name = "TPC1_BMON_SPMU" },
+ { .fc_id = 307, .cpu_id = 156, .valid = 1, .name = "TPC1_KRN_ERR" },
+ { .fc_id = 308, .cpu_id = 157, .valid = 0, .name = "" },
+ { .fc_id = 309, .cpu_id = 158, .valid = 0, .name = "" },
+ { .fc_id = 310, .cpu_id = 159, .valid = 0, .name = "" },
+ { .fc_id = 311, .cpu_id = 160, .valid = 0, .name = "" },
+ { .fc_id = 312, .cpu_id = 161, .valid = 1, .name = "TPC2_BMON_SPMU" },
+ { .fc_id = 313, .cpu_id = 162, .valid = 1, .name = "TPC2_KRN_ERR" },
+ { .fc_id = 314, .cpu_id = 163, .valid = 0, .name = "" },
+ { .fc_id = 315, .cpu_id = 164, .valid = 0, .name = "" },
+ { .fc_id = 316, .cpu_id = 165, .valid = 0, .name = "" },
+ { .fc_id = 317, .cpu_id = 166, .valid = 0, .name = "" },
+ { .fc_id = 318, .cpu_id = 167, .valid = 1, .name = "TPC3_BMON_SPMU" },
+ { .fc_id = 319, .cpu_id = 168, .valid = 1, .name = "TPC3_KRN_ERR" },
+ { .fc_id = 320, .cpu_id = 169, .valid = 0, .name = "" },
+ { .fc_id = 321, .cpu_id = 170, .valid = 0, .name = "" },
+ { .fc_id = 322, .cpu_id = 171, .valid = 0, .name = "" },
+ { .fc_id = 323, .cpu_id = 172, .valid = 0, .name = "" },
+ { .fc_id = 324, .cpu_id = 173, .valid = 1, .name = "TPC4_BMON_SPMU" },
+ { .fc_id = 325, .cpu_id = 174, .valid = 1, .name = "TPC4_KRN_ERR" },
+ { .fc_id = 326, .cpu_id = 175, .valid = 0, .name = "" },
+ { .fc_id = 327, .cpu_id = 176, .valid = 0, .name = "" },
+ { .fc_id = 328, .cpu_id = 177, .valid = 0, .name = "" },
+ { .fc_id = 329, .cpu_id = 178, .valid = 0, .name = "" },
+ { .fc_id = 330, .cpu_id = 179, .valid = 1, .name = "TPC5_BMON_SPMU" },
+ { .fc_id = 331, .cpu_id = 180, .valid = 1, .name = "TPC5_KRN_ERR" },
+ { .fc_id = 332, .cpu_id = 181, .valid = 0, .name = "" },
+ { .fc_id = 333, .cpu_id = 182, .valid = 0, .name = "" },
+ { .fc_id = 334, .cpu_id = 183, .valid = 0, .name = "" },
+ { .fc_id = 335, .cpu_id = 184, .valid = 0, .name = "" },
+ { .fc_id = 336, .cpu_id = 185, .valid = 1, .name = "TPC6_BMON_SPMU" },
+ { .fc_id = 337, .cpu_id = 186, .valid = 1, .name = "TPC6_KRN_ERR" },
+ { .fc_id = 338, .cpu_id = 187, .valid = 0, .name = "" },
+ { .fc_id = 339, .cpu_id = 188, .valid = 0, .name = "" },
+ { .fc_id = 340, .cpu_id = 189, .valid = 0, .name = "" },
+ { .fc_id = 341, .cpu_id = 190, .valid = 0, .name = "" },
+ { .fc_id = 342, .cpu_id = 191, .valid = 1, .name = "TPC7_BMON_SPMU" },
+ { .fc_id = 343, .cpu_id = 192, .valid = 1, .name = "TPC7_KRN_ERR" },
+ { .fc_id = 344, .cpu_id = 193, .valid = 0, .name = "" },
+ { .fc_id = 345, .cpu_id = 194, .valid = 0, .name = "" },
+ { .fc_id = 346, .cpu_id = 195, .valid = 0, .name = "" },
+ { .fc_id = 347, .cpu_id = 196, .valid = 0, .name = "" },
+ { .fc_id = 348, .cpu_id = 197, .valid = 0, .name = "" },
+ { .fc_id = 349, .cpu_id = 198, .valid = 0, .name = "" },
+ { .fc_id = 350, .cpu_id = 199, .valid = 0, .name = "" },
+ { .fc_id = 351, .cpu_id = 200, .valid = 0, .name = "" },
+ { .fc_id = 352, .cpu_id = 201, .valid = 0, .name = "" },
+ { .fc_id = 353, .cpu_id = 202, .valid = 0, .name = "" },
+ { .fc_id = 354, .cpu_id = 203, .valid = 0, .name = "" },
+ { .fc_id = 355, .cpu_id = 204, .valid = 0, .name = "" },
+ { .fc_id = 356, .cpu_id = 205, .valid = 0, .name = "" },
+ { .fc_id = 357, .cpu_id = 206, .valid = 0, .name = "" },
+ { .fc_id = 358, .cpu_id = 207, .valid = 0, .name = "" },
+ { .fc_id = 359, .cpu_id = 208, .valid = 0, .name = "" },
+ { .fc_id = 360, .cpu_id = 209, .valid = 0, .name = "" },
+ { .fc_id = 361, .cpu_id = 210, .valid = 0, .name = "" },
+ { .fc_id = 362, .cpu_id = 211, .valid = 0, .name = "" },
+ { .fc_id = 363, .cpu_id = 212, .valid = 0, .name = "" },
+ { .fc_id = 364, .cpu_id = 213, .valid = 0, .name = "" },
+ { .fc_id = 365, .cpu_id = 214, .valid = 0, .name = "" },
+ { .fc_id = 366, .cpu_id = 215, .valid = 0, .name = "" },
+ { .fc_id = 367, .cpu_id = 216, .valid = 0, .name = "" },
+ { .fc_id = 368, .cpu_id = 217, .valid = 0, .name = "" },
+ { .fc_id = 369, .cpu_id = 218, .valid = 0, .name = "" },
+ { .fc_id = 370, .cpu_id = 219, .valid = 0, .name = "" },
+ { .fc_id = 371, .cpu_id = 220, .valid = 0, .name = "" },
+ { .fc_id = 372, .cpu_id = 221, .valid = 0, .name = "" },
+ { .fc_id = 373, .cpu_id = 222, .valid = 0, .name = "" },
+ { .fc_id = 374, .cpu_id = 223, .valid = 0, .name = "" },
+ { .fc_id = 375, .cpu_id = 224, .valid = 0, .name = "" },
+ { .fc_id = 376, .cpu_id = 225, .valid = 0, .name = "" },
+ { .fc_id = 377, .cpu_id = 226, .valid = 0, .name = "" },
+ { .fc_id = 378, .cpu_id = 227, .valid = 0, .name = "" },
+ { .fc_id = 379, .cpu_id = 228, .valid = 0, .name = "" },
+ { .fc_id = 380, .cpu_id = 229, .valid = 1, .name = "MMU_PAGE_FAULT" },
+ { .fc_id = 381, .cpu_id = 230, .valid = 1, .name = "MMU_WR_PERM" },
+ { .fc_id = 382, .cpu_id = 231, .valid = 0, .name = "" },
+ { .fc_id = 383, .cpu_id = 232, .valid = 1, .name = "DMA_BM_CH0" },
+ { .fc_id = 384, .cpu_id = 233, .valid = 1, .name = "DMA_BM_CH1" },
+ { .fc_id = 385, .cpu_id = 234, .valid = 1, .name = "DMA_BM_CH2" },
+ { .fc_id = 386, .cpu_id = 235, .valid = 1, .name = "DMA_BM_CH3" },
+ { .fc_id = 387, .cpu_id = 236, .valid = 1, .name = "DMA_BM_CH4" },
+ { .fc_id = 388, .cpu_id = 237, .valid = 1, .name = "DMA_BM_CH5" },
+ { .fc_id = 389, .cpu_id = 238, .valid = 1, .name = "DMA_BM_CH6" },
+ { .fc_id = 390, .cpu_id = 239, .valid = 1, .name = "DMA_BM_CH7" },
+ { .fc_id = 391, .cpu_id = 240, .valid = 0, .name = "" },
+ { .fc_id = 392, .cpu_id = 241, .valid = 0, .name = "" },
+ { .fc_id = 393, .cpu_id = 242, .valid = 0, .name = "" },
+ { .fc_id = 394, .cpu_id = 243, .valid = 0, .name = "" },
+ { .fc_id = 395, .cpu_id = 244, .valid = 1, .name = "HBM0_SPI_0" },
+ { .fc_id = 396, .cpu_id = 245, .valid = 1, .name = "HBM0_SPI_1" },
+ { .fc_id = 397, .cpu_id = 246, .valid = 0, .name = "" },
+ { .fc_id = 398, .cpu_id = 247, .valid = 0, .name = "" },
+ { .fc_id = 399, .cpu_id = 248, .valid = 1, .name = "HBM1_SPI_0" },
+ { .fc_id = 400, .cpu_id = 249, .valid = 1, .name = "HBM1_SPI_1" },
+ { .fc_id = 401, .cpu_id = 250, .valid = 0, .name = "" },
+ { .fc_id = 402, .cpu_id = 251, .valid = 0, .name = "" },
+ { .fc_id = 403, .cpu_id = 252, .valid = 1, .name = "HBM2_SPI_0" },
+ { .fc_id = 404, .cpu_id = 253, .valid = 1, .name = "HBM2_SPI_1" },
+ { .fc_id = 405, .cpu_id = 254, .valid = 0, .name = "" },
+ { .fc_id = 406, .cpu_id = 255, .valid = 0, .name = "" },
+ { .fc_id = 407, .cpu_id = 256, .valid = 1, .name = "HBM3_SPI_0" },
+ { .fc_id = 408, .cpu_id = 257, .valid = 1, .name = "HBM3_SPI_1" },
+ { .fc_id = 409, .cpu_id = 258, .valid = 0, .name = "" },
+ { .fc_id = 410, .cpu_id = 259, .valid = 0, .name = "" },
+ { .fc_id = 411, .cpu_id = 260, .valid = 0, .name = "" },
+ { .fc_id = 412, .cpu_id = 261, .valid = 0, .name = "" },
+ { .fc_id = 413, .cpu_id = 262, .valid = 0, .name = "" },
+ { .fc_id = 414, .cpu_id = 263, .valid = 0, .name = "" },
+ { .fc_id = 415, .cpu_id = 264, .valid = 0, .name = "" },
+ { .fc_id = 416, .cpu_id = 265, .valid = 0, .name = "" },
+ { .fc_id = 417, .cpu_id = 266, .valid = 0, .name = "" },
+ { .fc_id = 418, .cpu_id = 267, .valid = 0, .name = "" },
+ { .fc_id = 419, .cpu_id = 268, .valid = 0, .name = "" },
+ { .fc_id = 420, .cpu_id = 269, .valid = 0, .name = "" },
+ { .fc_id = 421, .cpu_id = 270, .valid = 1, .name = "PSOC_GPIO_U16_0" },
+ { .fc_id = 422, .cpu_id = 271, .valid = 0, .name = "" },
+ { .fc_id = 423, .cpu_id = 272, .valid = 0, .name = "" },
+ { .fc_id = 424, .cpu_id = 273, .valid = 0, .name = "" },
+ { .fc_id = 425, .cpu_id = 274, .valid = 0, .name = "" },
+ { .fc_id = 426, .cpu_id = 275, .valid = 0, .name = "" },
+ { .fc_id = 427, .cpu_id = 276, .valid = 0, .name = "" },
+ { .fc_id = 428, .cpu_id = 277, .valid = 0, .name = "" },
+ { .fc_id = 429, .cpu_id = 278, .valid = 0, .name = "" },
+ { .fc_id = 430, .cpu_id = 279, .valid = 0, .name = "" },
+ { .fc_id = 431, .cpu_id = 280, .valid = 0, .name = "" },
+ { .fc_id = 432, .cpu_id = 281, .valid = 0, .name = "" },
+ { .fc_id = 433, .cpu_id = 282, .valid = 0, .name = "" },
+ { .fc_id = 434, .cpu_id = 283, .valid = 0, .name = "" },
+ { .fc_id = 435, .cpu_id = 284, .valid = 0, .name = "" },
+ { .fc_id = 436, .cpu_id = 285, .valid = 0, .name = "" },
+ { .fc_id = 437, .cpu_id = 286, .valid = 0, .name = "" },
+ { .fc_id = 438, .cpu_id = 287, .valid = 0, .name = "" },
+ { .fc_id = 439, .cpu_id = 288, .valid = 0, .name = "" },
+ { .fc_id = 440, .cpu_id = 289, .valid = 0, .name = "" },
+ { .fc_id = 441, .cpu_id = 290, .valid = 0, .name = "" },
+ { .fc_id = 442, .cpu_id = 291, .valid = 0, .name = "" },
+ { .fc_id = 443, .cpu_id = 292, .valid = 0, .name = "" },
+ { .fc_id = 444, .cpu_id = 293, .valid = 0, .name = "" },
+ { .fc_id = 445, .cpu_id = 294, .valid = 0, .name = "" },
+ { .fc_id = 446, .cpu_id = 295, .valid = 0, .name = "" },
+ { .fc_id = 447, .cpu_id = 296, .valid = 0, .name = "" },
+ { .fc_id = 448, .cpu_id = 297, .valid = 0, .name = "" },
+ { .fc_id = 449, .cpu_id = 298, .valid = 0, .name = "" },
+ { .fc_id = 450, .cpu_id = 299, .valid = 0, .name = "" },
+ { .fc_id = 451, .cpu_id = 300, .valid = 0, .name = "" },
+ { .fc_id = 452, .cpu_id = 301, .valid = 0, .name = "" },
+ { .fc_id = 453, .cpu_id = 302, .valid = 0, .name = "" },
+ { .fc_id = 454, .cpu_id = 303, .valid = 0, .name = "" },
+ { .fc_id = 455, .cpu_id = 304, .valid = 0, .name = "" },
+ { .fc_id = 456, .cpu_id = 305, .valid = 0, .name = "" },
+ { .fc_id = 457, .cpu_id = 306, .valid = 0, .name = "" },
+ { .fc_id = 458, .cpu_id = 307, .valid = 0, .name = "" },
+ { .fc_id = 459, .cpu_id = 308, .valid = 0, .name = "" },
+ { .fc_id = 460, .cpu_id = 309, .valid = 0, .name = "" },
+ { .fc_id = 461, .cpu_id = 310, .valid = 0, .name = "" },
+ { .fc_id = 462, .cpu_id = 311, .valid = 0, .name = "" },
+ { .fc_id = 463, .cpu_id = 312, .valid = 0, .name = "" },
+ { .fc_id = 464, .cpu_id = 313, .valid = 0, .name = "" },
+ { .fc_id = 465, .cpu_id = 314, .valid = 0, .name = "" },
+ { .fc_id = 466, .cpu_id = 315, .valid = 0, .name = "" },
+ { .fc_id = 467, .cpu_id = 316, .valid = 0, .name = "" },
+ { .fc_id = 468, .cpu_id = 317, .valid = 0, .name = "" },
+ { .fc_id = 469, .cpu_id = 318, .valid = 0, .name = "" },
+ { .fc_id = 470, .cpu_id = 319, .valid = 0, .name = "" },
+ { .fc_id = 471, .cpu_id = 320, .valid = 0, .name = "" },
+ { .fc_id = 472, .cpu_id = 321, .valid = 0, .name = "" },
+ { .fc_id = 473, .cpu_id = 322, .valid = 0, .name = "" },
+ { .fc_id = 474, .cpu_id = 323, .valid = 0, .name = "" },
+ { .fc_id = 475, .cpu_id = 324, .valid = 0, .name = "" },
+ { .fc_id = 476, .cpu_id = 325, .valid = 0, .name = "" },
+ { .fc_id = 477, .cpu_id = 326, .valid = 0, .name = "" },
+ { .fc_id = 478, .cpu_id = 327, .valid = 0, .name = "" },
+ { .fc_id = 479, .cpu_id = 328, .valid = 0, .name = "" },
+ { .fc_id = 480, .cpu_id = 329, .valid = 0, .name = "" },
+ { .fc_id = 481, .cpu_id = 330, .valid = 0, .name = "" },
+ { .fc_id = 482, .cpu_id = 331, .valid = 0, .name = "" },
+ { .fc_id = 483, .cpu_id = 332, .valid = 0, .name = "" },
+ { .fc_id = 484, .cpu_id = 333, .valid = 1, .name = "PI_UPDATE" },
+ { .fc_id = 485, .cpu_id = 334, .valid = 1, .name = "HALT_MACHINE" },
+ { .fc_id = 486, .cpu_id = 335, .valid = 1, .name = "INTS_REGISTER" },
+ { .fc_id = 487, .cpu_id = 336, .valid = 1, .name = "SOFT_RESET" },
+ { .fc_id = 488, .cpu_id = 337, .valid = 0, .name = "" },
+ { .fc_id = 489, .cpu_id = 338, .valid = 0, .name = "" },
+ { .fc_id = 490, .cpu_id = 339, .valid = 0, .name = "" },
+ { .fc_id = 491, .cpu_id = 340, .valid = 0, .name = "" },
+ { .fc_id = 492, .cpu_id = 341, .valid = 0, .name = "" },
+ { .fc_id = 493, .cpu_id = 342, .valid = 0, .name = "" },
+ { .fc_id = 494, .cpu_id = 343, .valid = 0, .name = "" },
+ { .fc_id = 495, .cpu_id = 344, .valid = 0, .name = "" },
+ { .fc_id = 496, .cpu_id = 345, .valid = 0, .name = "" },
+ { .fc_id = 497, .cpu_id = 346, .valid = 0, .name = "" },
+ { .fc_id = 498, .cpu_id = 347, .valid = 0, .name = "" },
+ { .fc_id = 499, .cpu_id = 348, .valid = 0, .name = "" },
+ { .fc_id = 500, .cpu_id = 349, .valid = 0, .name = "" },
+ { .fc_id = 501, .cpu_id = 350, .valid = 0, .name = "" },
+ { .fc_id = 502, .cpu_id = 351, .valid = 0, .name = "" },
+ { .fc_id = 503, .cpu_id = 352, .valid = 0, .name = "" },
+ { .fc_id = 504, .cpu_id = 353, .valid = 0, .name = "" },
+ { .fc_id = 505, .cpu_id = 354, .valid = 0, .name = "" },
+ { .fc_id = 506, .cpu_id = 355, .valid = 0, .name = "" },
+ { .fc_id = 507, .cpu_id = 356, .valid = 0, .name = "" },
+ { .fc_id = 508, .cpu_id = 357, .valid = 0, .name = "" },
+ { .fc_id = 509, .cpu_id = 358, .valid = 0, .name = "" },
+ { .fc_id = 510, .cpu_id = 359, .valid = 0, .name = "" },
+ { .fc_id = 511, .cpu_id = 360, .valid = 0, .name = "" },
+ { .fc_id = 512, .cpu_id = 361, .valid = 0, .name = "" },
+ { .fc_id = 513, .cpu_id = 362, .valid = 0, .name = "" },
+ { .fc_id = 514, .cpu_id = 363, .valid = 0, .name = "" },
+ { .fc_id = 515, .cpu_id = 364, .valid = 0, .name = "" },
+ { .fc_id = 516, .cpu_id = 365, .valid = 0, .name = "" },
+ { .fc_id = 517, .cpu_id = 366, .valid = 0, .name = "" },
+ { .fc_id = 518, .cpu_id = 367, .valid = 0, .name = "" },
+ { .fc_id = 519, .cpu_id = 368, .valid = 0, .name = "" },
+ { .fc_id = 520, .cpu_id = 369, .valid = 0, .name = "" },
+ { .fc_id = 521, .cpu_id = 370, .valid = 0, .name = "" },
+ { .fc_id = 522, .cpu_id = 371, .valid = 0, .name = "" },
+ { .fc_id = 523, .cpu_id = 372, .valid = 0, .name = "" },
+ { .fc_id = 524, .cpu_id = 373, .valid = 0, .name = "" },
+ { .fc_id = 525, .cpu_id = 374, .valid = 0, .name = "" },
+ { .fc_id = 526, .cpu_id = 375, .valid = 0, .name = "" },
+ { .fc_id = 527, .cpu_id = 376, .valid = 0, .name = "" },
+ { .fc_id = 528, .cpu_id = 377, .valid = 0, .name = "" },
+ { .fc_id = 529, .cpu_id = 378, .valid = 0, .name = "" },
+ { .fc_id = 530, .cpu_id = 379, .valid = 0, .name = "" },
+ { .fc_id = 531, .cpu_id = 380, .valid = 0, .name = "" },
+ { .fc_id = 532, .cpu_id = 381, .valid = 0, .name = "" },
+ { .fc_id = 533, .cpu_id = 382, .valid = 0, .name = "" },
+ { .fc_id = 534, .cpu_id = 383, .valid = 0, .name = "" },
+ { .fc_id = 535, .cpu_id = 384, .valid = 0, .name = "" },
+ { .fc_id = 536, .cpu_id = 385, .valid = 0, .name = "" },
+ { .fc_id = 537, .cpu_id = 386, .valid = 0, .name = "" },
+ { .fc_id = 538, .cpu_id = 387, .valid = 0, .name = "" },
+ { .fc_id = 539, .cpu_id = 388, .valid = 0, .name = "" },
+ { .fc_id = 540, .cpu_id = 389, .valid = 0, .name = "" },
+ { .fc_id = 541, .cpu_id = 390, .valid = 0, .name = "" },
+ { .fc_id = 542, .cpu_id = 391, .valid = 0, .name = "" },
+ { .fc_id = 543, .cpu_id = 392, .valid = 0, .name = "" },
+ { .fc_id = 544, .cpu_id = 393, .valid = 0, .name = "" },
+ { .fc_id = 545, .cpu_id = 394, .valid = 0, .name = "" },
+ { .fc_id = 546, .cpu_id = 395, .valid = 0, .name = "" },
+ { .fc_id = 547, .cpu_id = 396, .valid = 0, .name = "" },
+ { .fc_id = 548, .cpu_id = 397, .valid = 1, .name = "RAZWI_OR_ADC" },
+ { .fc_id = 549, .cpu_id = 398, .valid = 0, .name = "" },
+ { .fc_id = 550, .cpu_id = 399, .valid = 0, .name = "" },
+ { .fc_id = 551, .cpu_id = 400, .valid = 0, .name = "" },
+ { .fc_id = 552, .cpu_id = 401, .valid = 0, .name = "" },
+ { .fc_id = 553, .cpu_id = 402, .valid = 0, .name = "" },
+ { .fc_id = 554, .cpu_id = 403, .valid = 0, .name = "" },
+ { .fc_id = 555, .cpu_id = 404, .valid = 0, .name = "" },
+ { .fc_id = 556, .cpu_id = 405, .valid = 0, .name = "" },
+ { .fc_id = 557, .cpu_id = 406, .valid = 0, .name = "" },
+ { .fc_id = 558, .cpu_id = 407, .valid = 0, .name = "" },
+ { .fc_id = 559, .cpu_id = 408, .valid = 0, .name = "" },
+ { .fc_id = 560, .cpu_id = 409, .valid = 0, .name = "" },
+ { .fc_id = 561, .cpu_id = 410, .valid = 0, .name = "" },
+ { .fc_id = 562, .cpu_id = 411, .valid = 0, .name = "" },
+ { .fc_id = 563, .cpu_id = 412, .valid = 0, .name = "" },
+ { .fc_id = 564, .cpu_id = 413, .valid = 0, .name = "" },
+ { .fc_id = 565, .cpu_id = 414, .valid = 0, .name = "" },
+ { .fc_id = 566, .cpu_id = 415, .valid = 0, .name = "" },
+ { .fc_id = 567, .cpu_id = 416, .valid = 0, .name = "" },
+ { .fc_id = 568, .cpu_id = 417, .valid = 0, .name = "" },
+ { .fc_id = 569, .cpu_id = 418, .valid = 0, .name = "" },
+ { .fc_id = 570, .cpu_id = 419, .valid = 0, .name = "" },
+ { .fc_id = 571, .cpu_id = 420, .valid = 0, .name = "" },
+ { .fc_id = 572, .cpu_id = 421, .valid = 1, .name = "TPC0_QM" },
+ { .fc_id = 573, .cpu_id = 422, .valid = 1, .name = "TPC1_QM" },
+ { .fc_id = 574, .cpu_id = 423, .valid = 1, .name = "TPC2_QM" },
+ { .fc_id = 575, .cpu_id = 424, .valid = 1, .name = "TPC3_QM" },
+ { .fc_id = 576, .cpu_id = 425, .valid = 1, .name = "TPC4_QM" },
+ { .fc_id = 577, .cpu_id = 426, .valid = 1, .name = "TPC5_QM" },
+ { .fc_id = 578, .cpu_id = 427, .valid = 1, .name = "TPC6_QM" },
+ { .fc_id = 579, .cpu_id = 428, .valid = 1, .name = "TPC7_QM" },
+ { .fc_id = 580, .cpu_id = 429, .valid = 0, .name = "" },
+ { .fc_id = 581, .cpu_id = 430, .valid = 1, .name = "MME0_QM" },
+ { .fc_id = 582, .cpu_id = 431, .valid = 1, .name = "MME2_QM" },
+ { .fc_id = 583, .cpu_id = 432, .valid = 1, .name = "DMA0_QM" },
+ { .fc_id = 584, .cpu_id = 433, .valid = 1, .name = "DMA1_QM" },
+ { .fc_id = 585, .cpu_id = 434, .valid = 1, .name = "DMA2_QM" },
+ { .fc_id = 586, .cpu_id = 435, .valid = 1, .name = "DMA3_QM" },
+ { .fc_id = 587, .cpu_id = 436, .valid = 1, .name = "DMA4_QM" },
+ { .fc_id = 588, .cpu_id = 437, .valid = 1, .name = "DMA5_QM" },
+ { .fc_id = 589, .cpu_id = 438, .valid = 1, .name = "DMA6_QM" },
+ { .fc_id = 590, .cpu_id = 439, .valid = 1, .name = "DMA7_QM" },
+ { .fc_id = 591, .cpu_id = 440, .valid = 0, .name = "" },
+ { .fc_id = 592, .cpu_id = 441, .valid = 0, .name = "" },
+ { .fc_id = 593, .cpu_id = 442, .valid = 0, .name = "" },
+ { .fc_id = 594, .cpu_id = 443, .valid = 1, .name = "NIC0_QM0" },
+ { .fc_id = 595, .cpu_id = 444, .valid = 1, .name = "NIC0_QM1" },
+ { .fc_id = 596, .cpu_id = 445, .valid = 1, .name = "NIC1_QM0" },
+ { .fc_id = 597, .cpu_id = 446, .valid = 1, .name = "NIC1_QM1" },
+ { .fc_id = 598, .cpu_id = 447, .valid = 1, .name = "NIC2_QM0" },
+ { .fc_id = 599, .cpu_id = 448, .valid = 1, .name = "NIC2_QM1" },
+ { .fc_id = 600, .cpu_id = 449, .valid = 1, .name = "NIC3_QM0" },
+ { .fc_id = 601, .cpu_id = 450, .valid = 1, .name = "NIC3_QM1" },
+ { .fc_id = 602, .cpu_id = 451, .valid = 1, .name = "NIC4_QM0" },
+ { .fc_id = 603, .cpu_id = 452, .valid = 1, .name = "NIC4_QM1" },
+ { .fc_id = 604, .cpu_id = 453, .valid = 1, .name = "DMA0_CORE" },
+ { .fc_id = 605, .cpu_id = 454, .valid = 1, .name = "DMA1_CORE" },
+ { .fc_id = 606, .cpu_id = 455, .valid = 1, .name = "DMA2_CORE" },
+ { .fc_id = 607, .cpu_id = 456, .valid = 1, .name = "DMA3_CORE" },
+ { .fc_id = 608, .cpu_id = 457, .valid = 1, .name = "DMA4_CORE" },
+ { .fc_id = 609, .cpu_id = 458, .valid = 1, .name = "DMA5_CORE" },
+ { .fc_id = 610, .cpu_id = 459, .valid = 1, .name = "DMA6_CORE" },
+ { .fc_id = 611, .cpu_id = 460, .valid = 1, .name = "DMA7_CORE" },
+ { .fc_id = 612, .cpu_id = 461, .valid = 1, .name = "NIC0_QP0" },
+ { .fc_id = 613, .cpu_id = 462, .valid = 1, .name = "NIC0_QP1" },
+ { .fc_id = 614, .cpu_id = 463, .valid = 1, .name = "NIC1_QP0" },
+ { .fc_id = 615, .cpu_id = 464, .valid = 1, .name = "NIC1_QP1" },
+ { .fc_id = 616, .cpu_id = 465, .valid = 1, .name = "NIC2_QP0" },
+ { .fc_id = 617, .cpu_id = 466, .valid = 1, .name = "NIC2_QP1" },
+ { .fc_id = 618, .cpu_id = 467, .valid = 1, .name = "NIC3_QP0" },
+ { .fc_id = 619, .cpu_id = 468, .valid = 1, .name = "NIC3_QP1" },
+ { .fc_id = 620, .cpu_id = 469, .valid = 1, .name = "NIC4_QP0" },
+ { .fc_id = 621, .cpu_id = 470, .valid = 1, .name = "NIC4_QP1" },
+ { .fc_id = 622, .cpu_id = 471, .valid = 0, .name = "" },
+ { .fc_id = 623, .cpu_id = 472, .valid = 0, .name = "" },
+ { .fc_id = 624, .cpu_id = 473, .valid = 0, .name = "" },
+ { .fc_id = 625, .cpu_id = 474, .valid = 0, .name = "" },
+ { .fc_id = 626, .cpu_id = 475, .valid = 0, .name = "" },
+ { .fc_id = 627, .cpu_id = 476, .valid = 0, .name = "" },
+ { .fc_id = 628, .cpu_id = 477, .valid = 0, .name = "" },
+ { .fc_id = 629, .cpu_id = 478, .valid = 0, .name = "" },
+ { .fc_id = 630, .cpu_id = 479, .valid = 0, .name = "" },
+ { .fc_id = 631, .cpu_id = 480, .valid = 0, .name = "" },
+ { .fc_id = 632, .cpu_id = 481, .valid = 0, .name = "" },
+ { .fc_id = 633, .cpu_id = 482, .valid = 0, .name = "" },
+ { .fc_id = 634, .cpu_id = 483, .valid = 0, .name = "" },
+ { .fc_id = 635, .cpu_id = 484, .valid = 0, .name = "" },
+ { .fc_id = 636, .cpu_id = 485, .valid = 0, .name = "" },
+ { .fc_id = 637, .cpu_id = 486, .valid = 0, .name = "" },
+ { .fc_id = 638, .cpu_id = 487, .valid = 0, .name = "" },
+ { .fc_id = 639, .cpu_id = 488, .valid = 0, .name = "" },
+ { .fc_id = 640, .cpu_id = 489, .valid = 0, .name = "" },
+ { .fc_id = 641, .cpu_id = 490, .valid = 0, .name = "" },
+ { .fc_id = 642, .cpu_id = 491, .valid = 0, .name = "" },
+ { .fc_id = 643, .cpu_id = 492, .valid = 0, .name = "" },
+ { .fc_id = 644, .cpu_id = 493, .valid = 0, .name = "" },
+ { .fc_id = 645, .cpu_id = 494, .valid = 0, .name = "" },
+ { .fc_id = 646, .cpu_id = 495, .valid = 0, .name = "" },
+ { .fc_id = 647, .cpu_id = 496, .valid = 0, .name = "" },
+ { .fc_id = 648, .cpu_id = 497, .valid = 0, .name = "" },
+ { .fc_id = 649, .cpu_id = 498, .valid = 0, .name = "" },
+ { .fc_id = 650, .cpu_id = 499, .valid = 0, .name = "" },
+ { .fc_id = 651, .cpu_id = 500, .valid = 0, .name = "" },
+ { .fc_id = 652, .cpu_id = 501, .valid = 0, .name = "" },
+ { .fc_id = 653, .cpu_id = 502, .valid = 0, .name = "" },
+ { .fc_id = 654, .cpu_id = 503, .valid = 0, .name = "" },
+ { .fc_id = 655, .cpu_id = 504, .valid = 0, .name = "" },
+ { .fc_id = 656, .cpu_id = 505, .valid = 0, .name = "" },
+ { .fc_id = 657, .cpu_id = 506, .valid = 0, .name = "" },
+ { .fc_id = 658, .cpu_id = 507, .valid = 1, .name = "FIX_POWER_ENV_S" },
+ { .fc_id = 659, .cpu_id = 508, .valid = 1, .name = "FIX_POWER_ENV_E" },
+ { .fc_id = 660, .cpu_id = 509, .valid = 1,
+ .name = "FIX_THERMAL_ENV_S" },
+ { .fc_id = 661, .cpu_id = 510, .valid = 1,
+ .name = "FIX_THERMAL_ENV_E" },
+ { .fc_id = 662, .cpu_id = 511, .valid = 1, .name = "RAZWI_OR_ADC_SW" },
+};
+
+#endif /* __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h b/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h
new file mode 100644
index 000000000000..c45cc7f4d4d7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_CORESIGHT_H
+#define GAUDI_CORESIGHT_H
+
+enum gaudi_debug_stm_regs_index {
+ GAUDI_STM_FIRST = 0,
+ GAUDI_STM_MME0_ACC = GAUDI_STM_FIRST,
+ GAUDI_STM_MME0_SBAB,
+ GAUDI_STM_MME0_CTRL,
+ GAUDI_STM_MME1_ACC,
+ GAUDI_STM_MME1_SBAB,
+ GAUDI_STM_MME1_CTRL,
+ GAUDI_STM_MME2_ACC,
+ GAUDI_STM_MME2_SBAB,
+ GAUDI_STM_MME2_CTRL,
+ GAUDI_STM_MME3_ACC,
+ GAUDI_STM_MME3_SBAB,
+ GAUDI_STM_MME3_CTRL,
+ GAUDI_STM_DMA_IF_W_S,
+ GAUDI_STM_DMA_IF_E_S,
+ GAUDI_STM_DMA_IF_W_N,
+ GAUDI_STM_DMA_IF_E_N,
+ GAUDI_STM_CPU,
+ GAUDI_STM_DMA_CH_0_CS,
+ GAUDI_STM_DMA_CH_1_CS,
+ GAUDI_STM_DMA_CH_2_CS,
+ GAUDI_STM_DMA_CH_3_CS,
+ GAUDI_STM_DMA_CH_4_CS,
+ GAUDI_STM_DMA_CH_5_CS,
+ GAUDI_STM_DMA_CH_6_CS,
+ GAUDI_STM_DMA_CH_7_CS,
+ GAUDI_STM_PCIE,
+ GAUDI_STM_MMU_CS,
+ GAUDI_STM_PSOC,
+ GAUDI_STM_NIC0_0,
+ GAUDI_STM_NIC0_1,
+ GAUDI_STM_NIC1_0,
+ GAUDI_STM_NIC1_1,
+ GAUDI_STM_NIC2_0,
+ GAUDI_STM_NIC2_1,
+ GAUDI_STM_NIC3_0,
+ GAUDI_STM_NIC3_1,
+ GAUDI_STM_NIC4_0,
+ GAUDI_STM_NIC4_1,
+ GAUDI_STM_TPC0_EML,
+ GAUDI_STM_TPC1_EML,
+ GAUDI_STM_TPC2_EML,
+ GAUDI_STM_TPC3_EML,
+ GAUDI_STM_TPC4_EML,
+ GAUDI_STM_TPC5_EML,
+ GAUDI_STM_TPC6_EML,
+ GAUDI_STM_TPC7_EML,
+ GAUDI_STM_LAST = GAUDI_STM_TPC7_EML
+};
+
+enum gaudi_debug_etf_regs_index {
+ GAUDI_ETF_FIRST = 0,
+ GAUDI_ETF_MME0_ACC = GAUDI_ETF_FIRST,
+ GAUDI_ETF_MME0_SBAB,
+ GAUDI_ETF_MME0_CTRL,
+ GAUDI_ETF_MME1_ACC,
+ GAUDI_ETF_MME1_SBAB,
+ GAUDI_ETF_MME1_CTRL,
+ GAUDI_ETF_MME2_ACC,
+ GAUDI_ETF_MME2_SBAB,
+ GAUDI_ETF_MME2_CTRL,
+ GAUDI_ETF_MME3_ACC,
+ GAUDI_ETF_MME3_SBAB,
+ GAUDI_ETF_MME3_CTRL,
+ GAUDI_ETF_DMA_IF_W_S,
+ GAUDI_ETF_DMA_IF_E_S,
+ GAUDI_ETF_DMA_IF_W_N,
+ GAUDI_ETF_DMA_IF_E_N,
+ GAUDI_ETF_CPU_0,
+ GAUDI_ETF_CPU_1,
+ GAUDI_ETF_CPU_TRACE,
+ GAUDI_ETF_DMA_CH_0_CS,
+ GAUDI_ETF_DMA_CH_1_CS,
+ GAUDI_ETF_DMA_CH_2_CS,
+ GAUDI_ETF_DMA_CH_3_CS,
+ GAUDI_ETF_DMA_CH_4_CS,
+ GAUDI_ETF_DMA_CH_5_CS,
+ GAUDI_ETF_DMA_CH_6_CS,
+ GAUDI_ETF_DMA_CH_7_CS,
+ GAUDI_ETF_PCIE,
+ GAUDI_ETF_MMU_CS,
+ GAUDI_ETF_PSOC,
+ GAUDI_ETF_NIC0_0,
+ GAUDI_ETF_NIC0_1,
+ GAUDI_ETF_NIC1_0,
+ GAUDI_ETF_NIC1_1,
+ GAUDI_ETF_NIC2_0,
+ GAUDI_ETF_NIC2_1,
+ GAUDI_ETF_NIC3_0,
+ GAUDI_ETF_NIC3_1,
+ GAUDI_ETF_NIC4_0,
+ GAUDI_ETF_NIC4_1,
+ GAUDI_ETF_TPC0_EML,
+ GAUDI_ETF_TPC1_EML,
+ GAUDI_ETF_TPC2_EML,
+ GAUDI_ETF_TPC3_EML,
+ GAUDI_ETF_TPC4_EML,
+ GAUDI_ETF_TPC5_EML,
+ GAUDI_ETF_TPC6_EML,
+ GAUDI_ETF_TPC7_EML,
+ GAUDI_ETF_LAST = GAUDI_ETF_TPC7_EML
+};
+
+enum gaudi_debug_funnel_regs_index {
+ GAUDI_FUNNEL_FIRST = 0,
+ GAUDI_FUNNEL_MME0_ACC = GAUDI_FUNNEL_FIRST,
+ GAUDI_FUNNEL_MME1_ACC,
+ GAUDI_FUNNEL_MME2_ACC,
+ GAUDI_FUNNEL_MME3_ACC,
+ GAUDI_FUNNEL_SRAM_Y0_X0,
+ GAUDI_FUNNEL_SRAM_Y0_X1,
+ GAUDI_FUNNEL_SRAM_Y0_X2,
+ GAUDI_FUNNEL_SRAM_Y0_X3,
+ GAUDI_FUNNEL_SRAM_Y0_X4,
+ GAUDI_FUNNEL_SRAM_Y0_X5,
+ GAUDI_FUNNEL_SRAM_Y0_X6,
+ GAUDI_FUNNEL_SRAM_Y0_X7,
+ GAUDI_FUNNEL_SRAM_Y1_X0,
+ GAUDI_FUNNEL_SRAM_Y1_X1,
+ GAUDI_FUNNEL_SRAM_Y1_X2,
+ GAUDI_FUNNEL_SRAM_Y1_X3,
+ GAUDI_FUNNEL_SRAM_Y1_X4,
+ GAUDI_FUNNEL_SRAM_Y1_X5,
+ GAUDI_FUNNEL_SRAM_Y1_X6,
+ GAUDI_FUNNEL_SRAM_Y1_X7,
+ GAUDI_FUNNEL_SRAM_Y2_X0,
+ GAUDI_FUNNEL_SRAM_Y2_X1,
+ GAUDI_FUNNEL_SRAM_Y2_X2,
+ GAUDI_FUNNEL_SRAM_Y2_X3,
+ GAUDI_FUNNEL_SRAM_Y2_X4,
+ GAUDI_FUNNEL_SRAM_Y2_X5,
+ GAUDI_FUNNEL_SRAM_Y2_X6,
+ GAUDI_FUNNEL_SRAM_Y2_X7,
+ GAUDI_FUNNEL_SRAM_Y3_X0,
+ GAUDI_FUNNEL_SRAM_Y3_X1,
+ GAUDI_FUNNEL_SRAM_Y3_X2,
+ GAUDI_FUNNEL_SRAM_Y3_X4,
+ GAUDI_FUNNEL_SRAM_Y3_X3,
+ GAUDI_FUNNEL_SRAM_Y3_X5,
+ GAUDI_FUNNEL_SRAM_Y3_X6,
+ GAUDI_FUNNEL_SRAM_Y3_X7,
+ GAUDI_FUNNEL_SIF_0,
+ GAUDI_FUNNEL_SIF_1,
+ GAUDI_FUNNEL_SIF_2,
+ GAUDI_FUNNEL_SIF_3,
+ GAUDI_FUNNEL_SIF_4,
+ GAUDI_FUNNEL_SIF_5,
+ GAUDI_FUNNEL_SIF_6,
+ GAUDI_FUNNEL_SIF_7,
+ GAUDI_FUNNEL_NIF_0,
+ GAUDI_FUNNEL_NIF_1,
+ GAUDI_FUNNEL_NIF_2,
+ GAUDI_FUNNEL_NIF_3,
+ GAUDI_FUNNEL_NIF_4,
+ GAUDI_FUNNEL_NIF_5,
+ GAUDI_FUNNEL_NIF_6,
+ GAUDI_FUNNEL_NIF_7,
+ GAUDI_FUNNEL_DMA_IF_W_S,
+ GAUDI_FUNNEL_DMA_IF_E_S,
+ GAUDI_FUNNEL_DMA_IF_W_N,
+ GAUDI_FUNNEL_DMA_IF_E_N,
+ GAUDI_FUNNEL_CPU,
+ GAUDI_FUNNEL_NIC_TPC_W_S,
+ GAUDI_FUNNEL_NIC_TPC_E_S,
+ GAUDI_FUNNEL_NIC_TPC_W_N,
+ GAUDI_FUNNEL_NIC_TPC_E_N,
+ GAUDI_FUNNEL_PCIE,
+ GAUDI_FUNNEL_PSOC,
+ GAUDI_FUNNEL_NIC0,
+ GAUDI_FUNNEL_NIC1,
+ GAUDI_FUNNEL_NIC2,
+ GAUDI_FUNNEL_NIC3,
+ GAUDI_FUNNEL_NIC4,
+ GAUDI_FUNNEL_TPC0_EML,
+ GAUDI_FUNNEL_TPC1_EML,
+ GAUDI_FUNNEL_TPC2_EML,
+ GAUDI_FUNNEL_TPC3_EML,
+ GAUDI_FUNNEL_TPC4_EML,
+ GAUDI_FUNNEL_TPC5_EML,
+ GAUDI_FUNNEL_TPC6_EML,
+ GAUDI_FUNNEL_TPC7_EML,
+ GAUDI_FUNNEL_LAST = GAUDI_FUNNEL_TPC7_EML
+};
+
+enum gaudi_debug_bmon_regs_index {
+ GAUDI_BMON_FIRST = 0,
+ GAUDI_BMON_MME0_ACC_0 = GAUDI_BMON_FIRST,
+ GAUDI_BMON_MME0_SBAB_0,
+ GAUDI_BMON_MME0_SBAB_1,
+ GAUDI_BMON_MME0_CTRL_0,
+ GAUDI_BMON_MME0_CTRL_1,
+ GAUDI_BMON_MME1_ACC_0,
+ GAUDI_BMON_MME1_SBAB_0,
+ GAUDI_BMON_MME1_SBAB_1,
+ GAUDI_BMON_MME1_CTRL_0,
+ GAUDI_BMON_MME1_CTRL_1,
+ GAUDI_BMON_MME2_ACC_0,
+ GAUDI_BMON_MME2_SBAB_0,
+ GAUDI_BMON_MME2_SBAB_1,
+ GAUDI_BMON_MME2_CTRL_0,
+ GAUDI_BMON_MME2_CTRL_1,
+ GAUDI_BMON_MME3_ACC_0,
+ GAUDI_BMON_MME3_SBAB_0,
+ GAUDI_BMON_MME3_SBAB_1,
+ GAUDI_BMON_MME3_CTRL_0,
+ GAUDI_BMON_MME3_CTRL_1,
+ GAUDI_BMON_DMA_IF_W_S_SOB_WR,
+ GAUDI_BMON_DMA_IF_W_S_0_WR,
+ GAUDI_BMON_DMA_IF_W_S_0_RD,
+ GAUDI_BMON_DMA_IF_W_S_1_WR,
+ GAUDI_BMON_DMA_IF_W_S_1_RD,
+ GAUDI_BMON_DMA_IF_E_S_SOB_WR,
+ GAUDI_BMON_DMA_IF_E_S_0_WR,
+ GAUDI_BMON_DMA_IF_E_S_0_RD,
+ GAUDI_BMON_DMA_IF_E_S_1_WR,
+ GAUDI_BMON_DMA_IF_E_S_1_RD,
+ GAUDI_BMON_DMA_IF_W_N_SOB_WR,
+ GAUDI_BMON_DMA_IF_W_N_HBM0_WR,
+ GAUDI_BMON_DMA_IF_W_N_HBM0_RD,
+ GAUDI_BMON_DMA_IF_W_N_HBM1_WR,
+ GAUDI_BMON_DMA_IF_W_N_HBM1_RD,
+ GAUDI_BMON_DMA_IF_E_N_SOB_WR,
+ GAUDI_BMON_DMA_IF_E_N_HBM0_WR,
+ GAUDI_BMON_DMA_IF_E_N_HBM0_RD,
+ GAUDI_BMON_DMA_IF_E_N_HBM1_WR,
+ GAUDI_BMON_DMA_IF_E_N_HBM1_RD,
+ GAUDI_BMON_CPU_WR,
+ GAUDI_BMON_CPU_RD,
+ GAUDI_BMON_DMA_CH_0_0,
+ GAUDI_BMON_DMA_CH_0_1,
+ GAUDI_BMON_DMA_CH_1_0,
+ GAUDI_BMON_DMA_CH_1_1,
+ GAUDI_BMON_DMA_CH_2_0,
+ GAUDI_BMON_DMA_CH_2_1,
+ GAUDI_BMON_DMA_CH_3_0,
+ GAUDI_BMON_DMA_CH_3_1,
+ GAUDI_BMON_DMA_CH_4_0,
+ GAUDI_BMON_DMA_CH_4_1,
+ GAUDI_BMON_DMA_CH_5_0,
+ GAUDI_BMON_DMA_CH_5_1,
+ GAUDI_BMON_DMA_CH_6_0,
+ GAUDI_BMON_DMA_CH_6_1,
+ GAUDI_BMON_DMA_CH_7_0,
+ GAUDI_BMON_DMA_CH_7_1,
+ GAUDI_BMON_PCIE_MSTR_WR,
+ GAUDI_BMON_PCIE_MSTR_RD,
+ GAUDI_BMON_PCIE_SLV_WR,
+ GAUDI_BMON_PCIE_SLV_RD,
+ GAUDI_BMON_MMU_0,
+ GAUDI_BMON_MMU_1,
+ GAUDI_BMON_NIC0_0,
+ GAUDI_BMON_NIC0_1,
+ GAUDI_BMON_NIC0_2,
+ GAUDI_BMON_NIC0_3,
+ GAUDI_BMON_NIC0_4,
+ GAUDI_BMON_NIC1_0,
+ GAUDI_BMON_NIC1_1,
+ GAUDI_BMON_NIC1_2,
+ GAUDI_BMON_NIC1_3,
+ GAUDI_BMON_NIC1_4,
+ GAUDI_BMON_NIC2_0,
+ GAUDI_BMON_NIC2_1,
+ GAUDI_BMON_NIC2_2,
+ GAUDI_BMON_NIC2_3,
+ GAUDI_BMON_NIC2_4,
+ GAUDI_BMON_NIC3_0,
+ GAUDI_BMON_NIC3_1,
+ GAUDI_BMON_NIC3_2,
+ GAUDI_BMON_NIC3_3,
+ GAUDI_BMON_NIC3_4,
+ GAUDI_BMON_NIC4_0,
+ GAUDI_BMON_NIC4_1,
+ GAUDI_BMON_NIC4_2,
+ GAUDI_BMON_NIC4_3,
+ GAUDI_BMON_NIC4_4,
+ GAUDI_BMON_TPC0_EML_0,
+ GAUDI_BMON_TPC0_EML_1,
+ GAUDI_BMON_TPC0_EML_2,
+ GAUDI_BMON_TPC0_EML_3,
+ GAUDI_BMON_TPC1_EML_0,
+ GAUDI_BMON_TPC1_EML_1,
+ GAUDI_BMON_TPC1_EML_2,
+ GAUDI_BMON_TPC1_EML_3,
+ GAUDI_BMON_TPC2_EML_0,
+ GAUDI_BMON_TPC2_EML_1,
+ GAUDI_BMON_TPC2_EML_2,
+ GAUDI_BMON_TPC2_EML_3,
+ GAUDI_BMON_TPC3_EML_0,
+ GAUDI_BMON_TPC3_EML_1,
+ GAUDI_BMON_TPC3_EML_2,
+ GAUDI_BMON_TPC3_EML_3,
+ GAUDI_BMON_TPC4_EML_0,
+ GAUDI_BMON_TPC4_EML_1,
+ GAUDI_BMON_TPC4_EML_2,
+ GAUDI_BMON_TPC4_EML_3,
+ GAUDI_BMON_TPC5_EML_0,
+ GAUDI_BMON_TPC5_EML_1,
+ GAUDI_BMON_TPC5_EML_2,
+ GAUDI_BMON_TPC5_EML_3,
+ GAUDI_BMON_TPC6_EML_0,
+ GAUDI_BMON_TPC6_EML_1,
+ GAUDI_BMON_TPC6_EML_2,
+ GAUDI_BMON_TPC6_EML_3,
+ GAUDI_BMON_TPC7_EML_0,
+ GAUDI_BMON_TPC7_EML_1,
+ GAUDI_BMON_TPC7_EML_2,
+ GAUDI_BMON_TPC7_EML_3,
+ GAUDI_BMON_LAST = GAUDI_BMON_TPC7_EML_3
+};
+
+enum gaudi_debug_spmu_regs_index {
+ GAUDI_SPMU_FIRST = 0,
+ GAUDI_SPMU_MME0_ACC = GAUDI_SPMU_FIRST,
+ GAUDI_SPMU_MME0_SBAB,
+ GAUDI_SPMU_MME0_CTRL,
+ GAUDI_SPMU_MME1_ACC,
+ GAUDI_SPMU_MME1_SBAB,
+ GAUDI_SPMU_MME1_CTRL,
+ GAUDI_SPMU_MME2_MME2_ACC,
+ GAUDI_SPMU_MME2_SBAB,
+ GAUDI_SPMU_MME2_CTRL,
+ GAUDI_SPMU_MME3_ACC,
+ GAUDI_SPMU_MME3_SBAB,
+ GAUDI_SPMU_MME3_CTRL,
+ GAUDI_SPMU_DMA_CH_0_CS,
+ GAUDI_SPMU_DMA_CH_1_CS,
+ GAUDI_SPMU_DMA_CH_2_CS,
+ GAUDI_SPMU_DMA_CH_3_CS,
+ GAUDI_SPMU_DMA_CH_4_CS,
+ GAUDI_SPMU_DMA_CH_5_CS,
+ GAUDI_SPMU_DMA_CH_6_CS,
+ GAUDI_SPMU_DMA_CH_7_CS,
+ GAUDI_SPMU_PCIE,
+ GAUDI_SPMU_MMU_CS,
+ GAUDI_SPMU_NIC0_0,
+ GAUDI_SPMU_NIC0_1,
+ GAUDI_SPMU_NIC1_0,
+ GAUDI_SPMU_NIC1_1,
+ GAUDI_SPMU_NIC2_0,
+ GAUDI_SPMU_NIC2_1,
+ GAUDI_SPMU_NIC3_0,
+ GAUDI_SPMU_NIC3_1,
+ GAUDI_SPMU_NIC4_0,
+ GAUDI_SPMU_NIC4_1,
+ GAUDI_SPMU_TPC0_EML,
+ GAUDI_SPMU_TPC1_EML,
+ GAUDI_SPMU_TPC2_EML,
+ GAUDI_SPMU_TPC3_EML,
+ GAUDI_SPMU_TPC4_EML,
+ GAUDI_SPMU_TPC5_EML,
+ GAUDI_SPMU_TPC6_EML,
+ GAUDI_SPMU_TPC7_EML,
+ GAUDI_SPMU_LAST = GAUDI_SPMU_TPC7_EML
+};
+
+#endif /* GAUDI_CORESIGHT_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
new file mode 100644
index 000000000000..8aadc6357da1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_FW_IF_H
+#define GAUDI_FW_IF_H
+
+#define GAUDI_EVENT_QUEUE_MSI_IDX 8
+#define GAUDI_NIC_PORT1_MSI_IDX 10
+#define GAUDI_NIC_PORT3_MSI_IDX 12
+#define GAUDI_NIC_PORT5_MSI_IDX 14
+#define GAUDI_NIC_PORT7_MSI_IDX 16
+#define GAUDI_NIC_PORT9_MSI_IDX 18
+
+#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
+#define LINUX_FW_OFFSET 0x800000 /* 8MB in HBM */
+
+enum gaudi_pll_index {
+ CPU_PLL = 0,
+ PCI_PLL,
+ SRAM_PLL,
+ HBM_PLL,
+ NIC_PLL,
+ DMA_PLL,
+ MESH_PLL,
+ MME_PLL,
+ TPC_PLL,
+ IF_PLL
+};
+
+#define GAUDI_PLL_FREQ_LOW 200000000 /* 200 MHz */
+
+#endif /* GAUDI_FW_IF_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
new file mode 100644
index 000000000000..96f08050ef0f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_MASKS_H_
+#define GAUDI_MASKS_H_
+
+#include "asic_reg/gaudi_regs.h"
+
+/* Useful masks for bits in various registers */
+#define PCI_DMA_QMAN_ENABLE (\
+ (0xF << DMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_EXTERNAL_MAKE_TRUSTED (\
+ (0xF << DMA0_QM_GLBL_PROT_PQF_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_PROT_CQF_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_PROT_CP_SHIFT) | \
+ (0x1 << DMA0_QM_GLBL_PROT_ERR_SHIFT))
+
+#define QMAN_INTERNAL_MAKE_TRUSTED (\
+ (0xF << DMA0_QM_GLBL_PROT_PQF_SHIFT) | \
+ (0x1 << DMA0_QM_GLBL_PROT_ERR_SHIFT))
+
+#define HBM_DMA_QMAN_ENABLE (\
+ (0xF << DMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_MME_ENABLE (\
+ (0xF << MME0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_TPC_ENABLE (\
+ (0xF << TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_UPPER_CP_CGM_PWR_GATE_EN (\
+ (0x20 << DMA0_QM_CGM_CFG_IDLE_TH_SHIFT) | \
+ (0xA << DMA0_QM_CGM_CFG_G2F_TH_SHIFT) | \
+ (0x10 << DMA0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT) | \
+ (1 << DMA0_QM_CGM_CFG_EN_SHIFT))
+
+#define QMAN_COMMON_CP_CGM_PWR_GATE_EN (\
+ (0x20 << DMA0_QM_CGM_CFG_IDLE_TH_SHIFT) | \
+ (0xA << DMA0_QM_CGM_CFG_G2F_TH_SHIFT) | \
+ (0xF << DMA0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT) | \
+ (1 << DMA0_QM_CGM_CFG_EN_SHIFT))
+
+#define PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
+ (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+
+#define PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
+ (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+
+#define HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
+ (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+
+#define HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
+ (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+
+#define TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
+ (0xF << TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+
+#define TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
+ (0xF << TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+
+#define MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
+ (0xF << MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+
+#define MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
+ (0xF << MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+
+#define QMAN_CGM1_PWR_GATE_EN (0xA << DMA0_QM_CGM_CFG1_MASK_TH_SHIFT)
+
+/* RESET registers configuration */
+#define CFG_RST_L_PSOC_SHIFT 0
+#define CFG_RST_L_PCIE_SHIFT 1
+#define CFG_RST_L_PCIE_IF_SHIFT 2
+#define CFG_RST_L_HBM_S_PLL_SHIFT 3
+#define CFG_RST_L_TPC_S_PLL_SHIFT 4
+#define CFG_RST_L_MME_S_PLL_SHIFT 5
+#define CFG_RST_L_CPU_PLL_SHIFT 6
+#define CFG_RST_L_PCIE_PLL_SHIFT 7
+#define CFG_RST_L_NIC_S_PLL_SHIFT 8
+#define CFG_RST_L_HBM_N_PLL_SHIFT 9
+#define CFG_RST_L_TPC_N_PLL_SHIFT 10
+#define CFG_RST_L_MME_N_PLL_SHIFT 11
+#define CFG_RST_L_NIC_N_PLL_SHIFT 12
+#define CFG_RST_L_DMA_W_PLL_SHIFT 13
+#define CFG_RST_L_SIF_W_PLL_SHIFT 14
+#define CFG_RST_L_MESH_W_PLL_SHIFT 15
+#define CFG_RST_L_SRAM_W_PLL_SHIFT 16
+#define CFG_RST_L_DMA_E_PLL_SHIFT 17
+#define CFG_RST_L_SIF_E_PLL_SHIFT 18
+#define CFG_RST_L_MESH_E_PLL_SHIFT 19
+#define CFG_RST_L_SRAM_E_PLL_SHIFT 20
+#define CFG_RST_L_IF_1_SHIFT 21
+#define CFG_RST_L_IF_0_SHIFT 22
+#define CFG_RST_L_IF_2_SHIFT 23
+#define CFG_RST_L_IF_3_SHIFT 24
+#define CFG_RST_L_TPC_0_SHIFT 25
+#define CFG_RST_L_TPC_1_SHIFT 26
+#define CFG_RST_L_TPC_2_SHIFT 27
+#define CFG_RST_L_TPC_3_SHIFT 28
+#define CFG_RST_L_TPC_4_SHIFT 29
+#define CFG_RST_L_TPC_5_SHIFT 30
+#define CFG_RST_L_TPC_6_SHIFT 31
+#define CFG_RST_H_TPC_7_SHIFT 0
+#define CFG_RST_H_MME_0_SHIFT 1
+#define CFG_RST_H_MME_1_SHIFT 2
+#define CFG_RST_H_MME_2_SHIFT 3
+#define CFG_RST_H_MME_3_SHIFT 4
+#define CFG_RST_H_HBM_0_SHIFT 5
+#define CFG_RST_H_HBM_1_SHIFT 6
+#define CFG_RST_H_HBM_2_SHIFT 7
+#define CFG_RST_H_HBM_3_SHIFT 8
+#define CFG_RST_H_NIC_0_SHIFT 9
+#define CFG_RST_H_NIC_1_SHIFT 10
+#define CFG_RST_H_NIC_2_SHIFT 11
+#define CFG_RST_H_NIC_3_SHIFT 12
+#define CFG_RST_H_NIC_4_SHIFT 13
+#define CFG_RST_H_SM_0_SHIFT 14
+#define CFG_RST_H_SM_1_SHIFT 15
+#define CFG_RST_H_SM_2_SHIFT 16
+#define CFG_RST_H_SM_3_SHIFT 17
+#define CFG_RST_H_DMA_0_SHIFT 18
+#define CFG_RST_H_DMA_1_SHIFT 19
+#define CFG_RST_H_CPU_SHIFT 20
+#define CFG_RST_H_MMU_SHIFT 21
+
+
+#define CFG_RST_H_DMA_MASK ((1 << CFG_RST_H_DMA_0_SHIFT) | \
+ (1 << CFG_RST_H_DMA_1_SHIFT))
+
+#define CFG_RST_H_CPU_MASK (1 << CFG_RST_H_CPU_SHIFT)
+#define CFG_RST_H_MMU_MASK (1 << CFG_RST_H_MMU_SHIFT)
+
+#define CFG_RST_H_HBM_MASK ((1 << CFG_RST_H_HBM_0_SHIFT) | \
+ (1 << CFG_RST_H_HBM_1_SHIFT) | \
+ (1 << CFG_RST_H_HBM_2_SHIFT) | \
+ (1 << CFG_RST_H_HBM_3_SHIFT))
+
+#define CFG_RST_H_NIC_MASK ((1 << CFG_RST_H_NIC_0_SHIFT) | \
+ (1 << CFG_RST_H_NIC_1_SHIFT) | \
+ (1 << CFG_RST_H_NIC_2_SHIFT) | \
+ (1 << CFG_RST_H_NIC_3_SHIFT) | \
+ (1 << CFG_RST_H_NIC_4_SHIFT))
+
+#define CFG_RST_H_SM_MASK ((1 << CFG_RST_H_SM_0_SHIFT) | \
+ (1 << CFG_RST_H_SM_1_SHIFT) | \
+ (1 << CFG_RST_H_SM_2_SHIFT) | \
+ (1 << CFG_RST_H_SM_3_SHIFT))
+
+#define CFG_RST_H_MME_MASK ((1 << CFG_RST_H_MME_0_SHIFT) | \
+ (1 << CFG_RST_H_MME_1_SHIFT) | \
+ (1 << CFG_RST_H_MME_2_SHIFT) | \
+ (1 << CFG_RST_H_MME_3_SHIFT))
+
+#define CFG_RST_L_PSOC_MASK (1 << CFG_RST_L_PSOC_SHIFT)
+
+#define CFG_RST_L_IF_MASK ((1 << CFG_RST_L_IF_0_SHIFT) | \
+ (1 << CFG_RST_L_IF_1_SHIFT) | \
+ (1 << CFG_RST_L_IF_2_SHIFT) | \
+ (1 << CFG_RST_L_IF_3_SHIFT))
+
+#define CFG_RST_L_TPC_MASK ((1 << CFG_RST_L_TPC_0_SHIFT) | \
+ (1 << CFG_RST_L_TPC_1_SHIFT) | \
+ (1 << CFG_RST_L_TPC_2_SHIFT) | \
+ (1 << CFG_RST_L_TPC_3_SHIFT) | \
+ (1 << CFG_RST_L_TPC_4_SHIFT) | \
+ (1 << CFG_RST_L_TPC_5_SHIFT) | \
+ (1 << CFG_RST_L_TPC_6_SHIFT))
+
+#define CFG_RST_H_TPC_MASK (1 << CFG_RST_H_TPC_7_SHIFT)
+
+#define CA53_RESET (1 << CFG_RST_H_CPU_SHIFT)
+
+#define UNIT_RST_L_PSOC_SHIFT 0
+#define UNIT_RST_L_PCIE_SHIFT 1
+#define UNIT_RST_L_PCIE_IF_SHIFT 2
+#define UNIT_RST_L_HBM_S_PLL_SHIFT 3
+#define UNIT_RST_L_TPC_S_PLL_SHIFT 4
+#define UNIT_RST_L_MME_S_PLL_SHIFT 5
+#define UNIT_RST_L_CPU_PLL_SHIFT 6
+#define UNIT_RST_L_PCIE_PLL_SHIFT 7
+#define UNIT_RST_L_NIC_S_PLL_SHIFT 8
+#define UNIT_RST_L_HBM_N_PLL_SHIFT 9
+#define UNIT_RST_L_TPC_N_PLL_SHIFT 10
+#define UNIT_RST_L_MME_N_PLL_SHIFT 11
+#define UNIT_RST_L_NIC_N_PLL_SHIFT 12
+#define UNIT_RST_L_DMA_W_PLL_SHIFT 13
+#define UNIT_RST_L_SIF_W_PLL_SHIFT 14
+#define UNIT_RST_L_MESH_W_PLL_SHIFT 15
+#define UNIT_RST_L_SRAM_W_PLL_SHIFT 16
+#define UNIT_RST_L_DMA_E_PLL_SHIFT 17
+#define UNIT_RST_L_SIF_E_PLL_SHIFT 18
+#define UNIT_RST_L_MESH_E_PLL_SHIFT 19
+#define UNIT_RST_L_SRAM_E_PLL_SHIFT 20
+#define UNIT_RST_L_TPC_0_SHIFT 21
+#define UNIT_RST_L_TPC_1_SHIFT 22
+#define UNIT_RST_L_TPC_2_SHIFT 23
+#define UNIT_RST_L_TPC_3_SHIFT 24
+#define UNIT_RST_L_TPC_4_SHIFT 25
+#define UNIT_RST_L_TPC_5_SHIFT 26
+#define UNIT_RST_L_TPC_6_SHIFT 27
+#define UNIT_RST_L_TPC_7_SHIFT 28
+#define UNIT_RST_L_MME_0_SHIFT 29
+#define UNIT_RST_L_MME_1_SHIFT 30
+#define UNIT_RST_L_MME_2_SHIFT 31
+
+#define UNIT_RST_H_MME_3_SHIFT 0
+#define UNIT_RST_H_HBM_0_SHIFT 1
+#define UNIT_RST_H_HBM_1_SHIFT 2
+#define UNIT_RST_H_HBM_2_SHIFT 3
+#define UNIT_RST_H_HBM_3_SHIFT 4
+#define UNIT_RST_H_NIC_0_SHIFT 5
+#define UNIT_RST_H_NIC_1_SHIFT 6
+#define UNIT_RST_H_NIC_2_SHIFT 7
+#define UNIT_RST_H_NIC_3_SHIFT 8
+#define UNIT_RST_H_NIC_4_SHIFT 9
+#define UNIT_RST_H_SM_0_SHIFT 10
+#define UNIT_RST_H_SM_1_SHIFT 11
+#define UNIT_RST_H_SM_2_SHIFT 12
+#define UNIT_RST_H_SM_3_SHIFT 13
+#define UNIT_RST_H_IF_0_SHIFT 14
+#define UNIT_RST_H_IF_1_SHIFT 15
+#define UNIT_RST_H_IF_2_SHIFT 16
+#define UNIT_RST_H_IF_3_SHIFT 17
+#define UNIT_RST_H_DMA_0_SHIFT 18
+#define UNIT_RST_H_DMA_1_SHIFT 19
+#define UNIT_RST_H_CPU_SHIFT 20
+#define UNIT_RST_H_MMU_SHIFT 21
+
+#define UNIT_RST_H_HBM_MASK ((1 << UNIT_RST_H_HBM_0_SHIFT) | \
+ (1 << UNIT_RST_H_HBM_1_SHIFT) | \
+ (1 << UNIT_RST_H_HBM_2_SHIFT) | \
+ (1 << UNIT_RST_H_HBM_3_SHIFT))
+
+#define UNIT_RST_H_NIC_MASK ((1 << UNIT_RST_H_NIC_0_SHIFT) | \
+ (1 << UNIT_RST_H_NIC_1_SHIFT) | \
+ (1 << UNIT_RST_H_NIC_2_SHIFT) | \
+ (1 << UNIT_RST_H_NIC_3_SHIFT) | \
+ (1 << UNIT_RST_H_NIC_4_SHIFT))
+
+#define UNIT_RST_H_SM_MASK ((1 << UNIT_RST_H_SM_0_SHIFT) | \
+ (1 << UNIT_RST_H_SM_1_SHIFT) | \
+ (1 << UNIT_RST_H_SM_2_SHIFT) | \
+ (1 << UNIT_RST_H_SM_3_SHIFT))
+
+#define UNIT_RST_H_MME_MASK ((1 << UNIT_RST_H_MME_0_SHIFT) | \
+ (1 << UNIT_RST_H_MME_1_SHIFT) | \
+ (1 << UNIT_RST_H_MME_2_SHIFT))
+
+#define UNIT_RST_L_MME_MASK (1 << UNIT_RST_L_MME_3_SHIFT)
+
+#define UNIT_RST_L_IF_MASK ((1 << UNIT_RST_L_IF_0_SHIFT) | \
+ (1 << UNIT_RST_L_IF_1_SHIFT) | \
+ (1 << UNIT_RST_L_IF_2_SHIFT) | \
+ (1 << UNIT_RST_L_IF_3_SHIFT))
+
+#define UNIT_RST_L_TPC_MASK ((1 << UNIT_RST_L_TPC_0_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_1_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_2_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_3_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_4_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_5_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_6_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_7_SHIFT))
+
+/* CPU_CA53_CFG_ARM_RST_CONTROL */
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT 0
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_MASK 0x3
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT 4
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_MASK 0x30
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT 8
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_MASK 0x100
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_SHIFT 12
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_MASK 0x1000
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT 16
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_MASK 0x10000
+#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_SHIFT 20
+#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_MASK 0x300000
+
+#define CPU_RESET_ASSERT (\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
+
+#define CPU_RESET_CORE0_DEASSERT (\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
+
+/* QM_IDLE_MASK is valid for all engines QM idle check */
+#define QM_IDLE_MASK (DMA0_QM_GLBL_STS0_PQF_IDLE_MASK | \
+ DMA0_QM_GLBL_STS0_CQF_IDLE_MASK | \
+ DMA0_QM_GLBL_STS0_CP_IDLE_MASK)
+
+/* CGM_IDLE_MASK is valid for all engines CGM idle check */
+#define CGM_IDLE_MASK DMA0_QM_CGM_STS_AGENT_IDLE_MASK
+
+#define TPC_IDLE_MASK ((1 << TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_IQ_EMPTY_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_SB_EMPTY_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_QM_IDLE_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_QM_RDY_SHIFT))
+
+#define MME0_CTRL_ARCH_STATUS_SB_A_EMPTY_MASK 0x80
+#define MME0_CTRL_ARCH_STATUS_SB_B_EMPTY_MASK 0x100
+#define MME0_CTRL_ARCH_STATUS_WBC_AXI_IDLE_MASK 0x1000
+
+#define MME_ARCH_IDLE_MASK (MME0_CTRL_ARCH_STATUS_SB_A_EMPTY_MASK | \
+ MME0_CTRL_ARCH_STATUS_SB_B_EMPTY_MASK | \
+ MME0_CTRL_ARCH_STATUS_WBC_AXI_IDLE_MASK)
+
+#define IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) \
+ ((((qm_glbl_sts0) & QM_IDLE_MASK) == QM_IDLE_MASK) && \
+ (((qm_cgm_sts) & CGM_IDLE_MASK) == CGM_IDLE_MASK))
+
+#define IS_DMA_IDLE(dma_core_sts0) \
+ !(dma_core_sts0 & DMA0_CORE_STS0_BUSY_MASK)
+
+#define IS_TPC_IDLE(tpc_cfg_sts) \
+ (((tpc_cfg_sts) & TPC_IDLE_MASK) == TPC_IDLE_MASK)
+
+#define IS_MME_IDLE(mme_arch_sts) \
+ (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
+
+enum axi_id {
+ AXI_ID_MME,
+ AXI_ID_TPC,
+ AXI_ID_DMA,
+ AXI_ID_NIC, /* Local NIC */
+ AXI_ID_PCI,
+ AXI_ID_CPU,
+ AXI_ID_PSOC,
+ AXI_ID_MMU,
+ AXI_ID_NIC_FT /* Feed-Through NIC */
+};
+
+/* RAZWI initiator ID is built from the location in the chip and the AXI ID */
+
+#define RAZWI_INITIATOR_AXI_ID_SHIFT 20
+#define RAZWI_INITIATOR_AXI_ID_MASK 0xF
+#define RAZWI_INITIATOR_X_SHIFT 24
+#define RAZWI_INITIATOR_X_MASK 0xF
+#define RAZWI_INITIATOR_Y_SHIFT 28
+#define RAZWI_INITIATOR_Y_MASK 0x7
+
+#define RAZWI_INITIATOR_ID_AXI_ID(axi_id) \
+ (((axi_id) & RAZWI_INITIATOR_AXI_ID_MASK) << \
+ RAZWI_INITIATOR_AXI_ID_SHIFT)
+
+#define RAZWI_INITIATOR_ID_X_Y(x, y) \
+ ((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \
+ (((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT))
+
+#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0 RAZWI_INITIATOR_ID_X_Y(1, 0)
+#define RAZWI_INITIATOR_ID_X_Y_TPC1 RAZWI_INITIATOR_ID_X_Y(2, 0)
+#define RAZWI_INITIATOR_ID_X_Y_MME0_0 RAZWI_INITIATOR_ID_X_Y(3, 0)
+#define RAZWI_INITIATOR_ID_X_Y_MME0_1 RAZWI_INITIATOR_ID_X_Y(4, 0)
+#define RAZWI_INITIATOR_ID_X_Y_MME1_0 RAZWI_INITIATOR_ID_X_Y(5, 0)
+#define RAZWI_INITIATOR_ID_X_Y_MME1_1 RAZWI_INITIATOR_ID_X_Y(6, 0)
+#define RAZWI_INITIATOR_ID_X_Y_TPC2 RAZWI_INITIATOR_ID_X_Y(7, 0)
+#define RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC \
+ RAZWI_INITIATOR_ID_X_Y(8, 0)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0 RAZWI_INITIATOR_ID_X_Y(0, 1)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0 RAZWI_INITIATOR_ID_X_Y(9, 1)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1 RAZWI_INITIATOR_ID_X_Y(0, 2)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1 RAZWI_INITIATOR_ID_X_Y(9, 2)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0 RAZWI_INITIATOR_ID_X_Y(0, 3)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0 RAZWI_INITIATOR_ID_X_Y(9, 3)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1 RAZWI_INITIATOR_ID_X_Y(0, 4)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1 RAZWI_INITIATOR_ID_X_Y(9, 4)
+#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2 RAZWI_INITIATOR_ID_X_Y(1, 5)
+#define RAZWI_INITIATOR_ID_X_Y_TPC5 RAZWI_INITIATOR_ID_X_Y(2, 5)
+#define RAZWI_INITIATOR_ID_X_Y_MME2_0 RAZWI_INITIATOR_ID_X_Y(3, 5)
+#define RAZWI_INITIATOR_ID_X_Y_MME2_1 RAZWI_INITIATOR_ID_X_Y(4, 5)
+#define RAZWI_INITIATOR_ID_X_Y_MME3_0 RAZWI_INITIATOR_ID_X_Y(5, 5)
+#define RAZWI_INITIATOR_ID_X_Y_MME3_1 RAZWI_INITIATOR_ID_X_Y(6, 5)
+#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 5)
+#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 5)
+
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+
+/* STLB_CACHE_INV */
+#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
+#define STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF
+#define STLB_CACHE_INV_INDEX_MASK_SHIFT 8
+#define STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00
+
+#define MME_ACC_ACC_STALL_R_SHIFT 0
+#define MME_SBAB_SB_STALL_R_SHIFT 0
+
+#define PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK 0x700
+#define PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK 0x7000
+
+#define PCIE_WRAP_LBW_DRAIN_CFG_EN_SHIFT 0
+#define PCIE_WRAP_HBW_DRAIN_CFG_EN_SHIFT 0
+
+/* DMA_IF_HBM_CRED_EN */
+#define DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT 0
+#define DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_MASK 0x1
+#define DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT 1
+#define DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_MASK 0x2
+
+#define DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT 0
+#define DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT 0
+#define DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT 0
+#define DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT 0
+
+#define IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT 0
+#define IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT 0
+
+#define IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT 0
+#define IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT 0
+
+/* MMU_UP_PAGE_ERROR_CAPTURE */
+#define MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
+#define MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
+
+/* MMU_UP_ACCESS_ERROR_CAPTURE */
+#define MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
+#define MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
+
+#define QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
+#define QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
+#define QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+#define QM_ARB_ERR_MSG_EN_MASK (\
+ QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK |\
+ QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
+ QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
+
+#endif /* GAUDI_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
new file mode 100644
index 000000000000..9a5800b0086b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2017-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_PACKETS_H
+#define GAUDI_PACKETS_H
+
+#include <linux/types.h>
+
+#define PACKET_HEADER_PACKET_ID_SHIFT 56
+#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull
+
+enum packet_id {
+ PACKET_WREG_32 = 0x1,
+ PACKET_WREG_BULK = 0x2,
+ PACKET_MSG_LONG = 0x3,
+ PACKET_MSG_SHORT = 0x4,
+ PACKET_CP_DMA = 0x5,
+ PACKET_REPEAT = 0x6,
+ PACKET_MSG_PROT = 0x7,
+ PACKET_FENCE = 0x8,
+ PACKET_LIN_DMA = 0x9,
+ PACKET_NOP = 0xA,
+ PACKET_STOP = 0xB,
+ PACKET_ARB_POINT = 0xC,
+ PACKET_WAIT = 0xD,
+ PACKET_LOAD_AND_EXE = 0xF,
+ MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >>
+ PACKET_HEADER_PACKET_ID_SHIFT) + 1
+};
+
+#define GAUDI_PKT_CTL_OPCODE_SHIFT 24
+#define GAUDI_PKT_CTL_OPCODE_MASK 0x1F000000
+
+#define GAUDI_PKT_CTL_EB_SHIFT 29
+#define GAUDI_PKT_CTL_EB_MASK 0x20000000
+
+#define GAUDI_PKT_CTL_RB_SHIFT 30
+#define GAUDI_PKT_CTL_RB_MASK 0x40000000
+
+#define GAUDI_PKT_CTL_MB_SHIFT 31
+#define GAUDI_PKT_CTL_MB_MASK 0x80000000
+
+/* All packets have, at least, an 8-byte header, which contains
+ * the packet type. The kernel driver uses the packet header for packet
+ * validation and to perform any necessary required preparation before
+ * sending them off to the hardware.
+ */
+struct gaudi_packet {
+ __le64 header;
+ /* The rest of the packet data follows. Use the corresponding
+ * packet_XXX struct to deference the data, based on packet type
+ */
+ u8 contents[0];
+};
+
+struct packet_nop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct packet_stop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct packet_wreg32 {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_wreg_bulk {
+ __le32 size64;
+ __le32 ctl;
+ __le64 values[0]; /* data starts here */
+};
+
+struct packet_msg_long {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT 0
+#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x0000EFFF
+
+#define GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT 31
+#define GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK 0x80000000
+
+#define GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_SHIFT 0
+#define GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK 0x000000FF
+
+#define GAUDI_PKT_SHORT_VAL_MON_MASK_SHIFT 8
+#define GAUDI_PKT_SHORT_VAL_MON_MASK_MASK 0x0000FF00
+
+#define GAUDI_PKT_SHORT_VAL_MON_MODE_SHIFT 16
+#define GAUDI_PKT_SHORT_VAL_MON_MODE_MASK 0x00010000
+
+#define GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_SHIFT 17
+#define GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK 0xFFFE0000
+
+#define GAUDI_PKT_SHORT_CTL_ADDR_SHIFT 0
+#define GAUDI_PKT_SHORT_CTL_ADDR_MASK 0x0000FFFF
+
+#define GAUDI_PKT_SHORT_CTL_OP_SHIFT 20
+#define GAUDI_PKT_SHORT_CTL_OP_MASK 0x00300000
+
+#define GAUDI_PKT_SHORT_CTL_BASE_SHIFT 22
+#define GAUDI_PKT_SHORT_CTL_BASE_MASK 0x00C00000
+
+#define GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT 24
+#define GAUDI_PKT_SHORT_CTL_OPCODE_MASK 0x1F000000
+
+#define GAUDI_PKT_SHORT_CTL_EB_SHIFT 29
+#define GAUDI_PKT_SHORT_CTL_EB_MASK 0x20000000
+
+#define GAUDI_PKT_SHORT_CTL_RB_SHIFT 30
+#define GAUDI_PKT_SHORT_CTL_RB_MASK 0x40000000
+
+#define GAUDI_PKT_SHORT_CTL_MB_SHIFT 31
+#define GAUDI_PKT_SHORT_CTL_MB_MASK 0x80000000
+
+struct packet_msg_short {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_msg_prot {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+#define GAUDI_PKT_FENCE_CFG_DEC_VAL_SHIFT 0
+#define GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK 0x0000000F
+
+#define GAUDI_PKT_FENCE_CFG_TARGET_VAL_SHIFT 16
+#define GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK 0x00FF0000
+
+#define GAUDI_PKT_FENCE_CFG_ID_SHIFT 30
+#define GAUDI_PKT_FENCE_CFG_ID_MASK 0xC000000
+
+#define GAUDI_PKT_FENCE_CTL_PRED_SHIFT 0
+#define GAUDI_PKT_FENCE_CTL_PRED_MASK 0x0000001F
+
+#define GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT 24
+#define GAUDI_PKT_FENCE_CTL_OPCODE_MASK 0x1F000000
+
+#define GAUDI_PKT_FENCE_CTL_EB_SHIFT 29
+#define GAUDI_PKT_FENCE_CTL_EB_MASK 0x20000000
+
+#define GAUDI_PKT_FENCE_CTL_RB_SHIFT 30
+#define GAUDI_PKT_FENCE_CTL_RB_MASK 0x40000000
+
+#define GAUDI_PKT_FENCE_CTL_MB_SHIFT 31
+#define GAUDI_PKT_FENCE_CTL_MB_MASK 0x80000000
+
+struct packet_fence {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+#define GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_SHIFT 0
+#define GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK 0x00000001
+
+#define GAUDI_PKT_LIN_DMA_CTL_LIN_SHIFT 3
+#define GAUDI_PKT_LIN_DMA_CTL_LIN_MASK 0x00000008
+
+#define GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT 4
+#define GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000010
+
+#define GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT 0
+#define GAUDI_PKT_LIN_DMA_DST_ADDR_MASK 0x00FFFFFFFFFFFFFFull
+
+struct packet_lin_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+ __le64 dst_addr;
+};
+
+struct packet_arb_point {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+struct packet_repeat {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+struct packet_wait {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+struct packet_load_and_exe {
+ __le32 cfg;
+ __le32 ctl;
+ __le64 src_addr;
+};
+
+struct packet_cp_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+};
+
+#endif /* GAUDI_PACKETS_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
new file mode 100644
index 000000000000..f25c60a2c243
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_REG_MAP_H_
+#define GAUDI_REG_MAP_H_
+
+/*
+ * PSOC scratch-pad registers
+ */
+#define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23
+#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
+#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
+#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
+#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
+#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
+#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
+#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
+#define mmPREBOOT_PCIE_EN mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1
+#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3
+
+#endif /* GAUDI_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index 3c44ef3a23ed..067489bd048e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -55,8 +55,7 @@
(1 << DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
(1 << DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
(1 << DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
- (1 << DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
- (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
#define QMAN_MME_ENABLE (\
(1 << MME_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
index fce490e6a231..ce65c9da5c60 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -18,6 +18,7 @@
#include "psoc_mme_pll_regs.h"
#include "psoc_pci_pll_regs.h"
#include "psoc_emmc_pll_regs.h"
+#include "psoc_timestamp_regs.h"
#include "cpu_if_regs.h"
#include "cpu_ca53_cfg_regs.h"
#include "cpu_pll_regs.h"
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
new file mode 100644
index 000000000000..9ce24597d4b0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+#define ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_TIMESTAMP (Prototype: TIMESTAMP)
+ *****************************************
+ */
+
+#define mmPSOC_TIMESTAMP_CNTCR 0xC49000
+
+#define mmPSOC_TIMESTAMP_CNTSR 0xC49004
+
+#define mmPSOC_TIMESTAMP_CNTCVL 0xC49008
+
+#define mmPSOC_TIMESTAMP_CNTCVU 0xC4900C
+
+#define mmPSOC_TIMESTAMP_CNTFID0 0xC49020
+
+#define mmPSOC_TIMESTAMP_PIDR4 0xC49FD0
+
+#define mmPSOC_TIMESTAMP_PIDR5 0xC49FD4
+
+#define mmPSOC_TIMESTAMP_PIDR6 0xC49FD8
+
+#define mmPSOC_TIMESTAMP_PIDR7 0xC49FDC
+
+#define mmPSOC_TIMESTAMP_PIDR0 0xC49FE0
+
+#define mmPSOC_TIMESTAMP_PIDR1 0xC49FE4
+
+#define mmPSOC_TIMESTAMP_PIDR2 0xC49FE8
+
+#define mmPSOC_TIMESTAMP_PIDR3 0xC49FEC
+
+#define mmPSOC_TIMESTAMP_CIDR0 0xC49FF0
+
+#define mmPSOC_TIMESTAMP_CIDR1 0xC49FF4
+
+#define mmPSOC_TIMESTAMP_CIDR2 0xC49FF8
+
+#define mmPSOC_TIMESTAMP_CIDR3 0xC49FFC
+
+#endif /* ASIC_REG_PSOC_TIMESTAMP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
index 08061282cd9c..0195f62d7254 100644
--- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h
+++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
@@ -11,27 +11,30 @@
/*
* PSOC scratch-pad registers
*/
-#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
-#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
-#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
-#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
-#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
-#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
-#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
-#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
-#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
-#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
-#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
-#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
-#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
-#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
-#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
-#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
-#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
-#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
-#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
+#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
+#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
+#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
+#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
+#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
+#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
+#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
+#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
+#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
+#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
+#define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23
+#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
+#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
+#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
+#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
+#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
+#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
+#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
-#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
+#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
#define mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS mmPSOC_GLOBAL_CONF_WARM_REBOOT
+#define mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU mmPSOC_GLOBAL_CONF_UBOOT_MAGIC
+#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3
#endif /* GOYA_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index f7992a69fd3a..c22d134e73af 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2018 HabanaLabs, Ltd.
+ * Copyright 2018-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -9,8 +9,47 @@
#define HL_BOOT_IF_H
#define LKD_HARD_RESET_MAGIC 0xED7BD694
+#define HL_POWER9_HOST_MAGIC 0x1DA30009
-/* CPU error bits in BOOT_ERROR registers */
+#define BOOT_FIT_SRAM_OFFSET 0x200000
+
+/*
+ * CPU error bits in BOOT_ERROR registers
+ *
+ * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed.
+ * DRAM is not reliable to use.
+ *
+ * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the
+ * image provided by the host has failed.
+ *
+ * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed.
+ * Boot continues as usual, but keep in
+ * mind this is a warning.
+ *
+ * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped.
+ * Skipping DRAM initialization has been
+ * requested (e.g. strap, command, etc.)
+ * and FW skipped the DRAM initialization.
+ * Host can initialize the DRAM.
+ *
+ * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped.
+ * Meaning the BMC data might not be
+ * available until reset.
+ *
+ * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready.
+ * BMC has not provided the NIC data yet.
+ * Once provided this bit will be cleared.
+ *
+ * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed.
+ * The NIC FW loading and initialization
+ * failed. This means NICs are not usable.
+ *
+ * CPU_BOOT_ERR0_ENABLED Error registers enabled.
+ * This is a main indication that the
+ * running FW populates the error
+ * registers. Meaning the error bits are
+ * not garbage, but actual error statuses.
+ */
#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << 0)
#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << 1)
#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << 2)
@@ -27,22 +66,33 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_SRAM_AVAIL = 3,
CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */
CPU_BOOT_STATUS_IN_PREBOOT = 5,
- CPU_BOOT_STATUS_IN_SPL = 6,
+ CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */
CPU_BOOT_STATUS_IN_UBOOT = 7,
CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */
CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */
+ /* U-Boot console prompt activated, commands are not processed */
CPU_BOOT_STATUS_UBOOT_NOT_READY = 10,
+ /* Finished NICs init, reported after DRAM and NICs */
CPU_BOOT_STATUS_NIC_FW_RDY = 11,
CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */
CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */
CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
+ /* Last boot loader progress status, ready to receive commands */
CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
};
enum kmd_msg {
KMD_MSG_NA = 0,
KMD_MSG_GOTO_WFE,
- KMD_MSG_FIT_RDY
+ KMD_MSG_FIT_RDY,
+ KMD_MSG_SKIP_BMC,
+};
+
+enum cpu_msg_status {
+ CPU_MSG_CLR = 0,
+ CPU_MSG_OK,
+ CPU_MSG_ERR,
};
#endif /* HL_BOOT_IF_H */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index a6851a9d3f03..468bb045fbd1 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2018 HabanaLabs, Ltd.
+ * Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
new file mode 100644
index 000000000000..b2a9570583ac
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef INCLUDE_MMU_V1_1_H_
+#define INCLUDE_MMU_V1_1_H_
+
+#define MMU_ASID 0xC12004
+#define MMU_HOP0_PA43_12 0xC12008
+#define MMU_HOP0_PA49_44 0xC1200C
+#define MMU_BUSY 0xC12000
+
+#endif /* INCLUDE_MMU_V1_1_H_ */
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index a72f766ca470..47da84a17719 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -886,6 +886,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
vm_type = (enum vm_type_t *) userptr;
hint_addr = args->map_host.hint_addr;
+ handle = phys_pg_pack->handle;
} else {
handle = lower_32_bits(args->map_device.handle);
@@ -954,10 +955,17 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto map_err;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
+ rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
mutex_unlock(&ctx->mmu_lock);
+ if (rc) {
+ dev_err(hdev->dev,
+ "mapping handle %u failed due to MMU cache invalidation\n",
+ handle);
+ goto map_err;
+ }
+
ret_vaddr += phys_pg_pack->offset;
hnode->ptr = vm_type;
@@ -1015,7 +1023,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
struct hl_va_range *va_range;
enum vm_type_t *vm_type;
bool is_userptr;
- int rc;
+ int rc = 0;
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
@@ -1083,21 +1091,34 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
* at the loop end rather than for each iteration
*/
if (!ctx_free)
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, *vm_type);
+ rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
+ *vm_type);
mutex_unlock(&ctx->mmu_lock);
/*
- * No point in maintaining the free VA block list if the context is
- * closing as the list will be freed anyway
+ * If the context is closing we don't need to check for the MMU cache
+ * invalidation return code and update the VA free list as in this flow
+ * we invalidate the MMU cache outside of this unmap function and the VA
+ * free list will be freed anyway.
*/
if (!ctx_free) {
- rc = add_va_block(hdev, va_range, vaddr,
- vaddr + phys_pg_pack->total_size - 1);
+ int tmp_rc;
+
if (rc)
+ dev_err(hdev->dev,
+ "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
+ vaddr);
+
+ tmp_rc = add_va_block(hdev, va_range, vaddr,
+ vaddr + phys_pg_pack->total_size - 1);
+ if (tmp_rc) {
dev_warn(hdev->dev,
"add va block failed for vaddr: 0x%llx\n",
vaddr);
+ if (!rc)
+ rc = tmp_rc;
+ }
}
atomic_dec(&phys_pg_pack->mapping_cnt);
@@ -1108,7 +1129,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
dma_unmap_host_va(hdev, userptr);
}
- return 0;
+ return rc;
mapping_cnt_err:
if (is_userptr)
diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/pci.c
index c98d88c7a5c6..9f634ef6f5b3 100644
--- a/drivers/misc/habanalabs/pci.c
+++ b/drivers/misc/habanalabs/pci.c
@@ -267,6 +267,12 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
/* Enable + Bar match + match enable */
rc |= hl_pci_iatu_write(hdev, 0x104, 0xC0080000);
+ /* Return the DBI window to the default location */
+ rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
+ rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+
/* Point to DRAM */
if (!hdev->asic_funcs->set_dram_bar_base)
return -EINVAL;
@@ -274,7 +280,6 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
U64_MAX)
return -EIO;
-
/* Outbound Region 0 - Point to Host */
host_phys_end_addr = host_phys_base_address + host_phys_size - 1;
rc |= hl_pci_iatu_write(hdev, 0x008,
@@ -283,7 +288,12 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
upper_32_bits(host_phys_base_address));
rc |= hl_pci_iatu_write(hdev, 0x010, lower_32_bits(host_phys_end_addr));
rc |= hl_pci_iatu_write(hdev, 0x014, 0);
- rc |= hl_pci_iatu_write(hdev, 0x018, 0);
+
+ if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64))
+ rc |= hl_pci_iatu_write(hdev, 0x018, 0x08000000);
+ else
+ rc |= hl_pci_iatu_write(hdev, 0x018, 0);
+
rc |= hl_pci_iatu_write(hdev, 0x020, upper_32_bits(host_phys_end_addr));
/* Increase region size */
rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
@@ -310,41 +320,25 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
*
* Return: 0 on success, non-zero for failure.
*/
-int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask)
+static int hl_pci_set_dma_mask(struct hl_device *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int rc;
/* set DMA mask */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
if (rc) {
- dev_warn(hdev->dev,
+ dev_err(hdev->dev,
"Failed to set pci dma mask to %d bits, error %d\n",
- dma_mask, rc);
-
- dma_mask = hdev->dma_mask;
-
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
- if (rc) {
- dev_err(hdev->dev,
- "Failed to set pci dma mask to %d bits, error %d\n",
- dma_mask, rc);
- return rc;
- }
+ hdev->dma_mask, rc);
+ return rc;
}
- /*
- * We managed to set the dma mask, so update the dma mask field. If
- * the set to the coherent mask will fail with that mask, we will
- * fail the entire function
- */
- hdev->dma_mask = dma_mask;
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
if (rc) {
dev_err(hdev->dev,
"Failed to set pci consistent dma mask to %d bits, error %d\n",
- dma_mask, rc);
+ hdev->dma_mask, rc);
return rc;
}
@@ -354,21 +348,16 @@ int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask)
/**
* hl_pci_init() - PCI initialization code.
* @hdev: Pointer to hl_device structure.
- * @dma_mask: number of bits for the requested dma mask.
*
* Set DMA masks, initialize the PCI controller and map the PCI BARs.
*
* Return: 0 on success, non-zero for failure.
*/
-int hl_pci_init(struct hl_device *hdev, u8 dma_mask)
+int hl_pci_init(struct hl_device *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int rc;
- rc = hl_pci_set_dma_mask(hdev, dma_mask);
- if (rc)
- return rc;
-
if (hdev->reset_pcilink)
hl_pci_reset_link_through_bridge(hdev);
@@ -380,18 +369,22 @@ int hl_pci_init(struct hl_device *hdev, u8 dma_mask)
pci_set_master(pdev);
- rc = hdev->asic_funcs->init_iatu(hdev);
+ rc = hdev->asic_funcs->pci_bars_map(hdev);
if (rc) {
- dev_err(hdev->dev, "Failed to initialize iATU\n");
+ dev_err(hdev->dev, "Failed to initialize PCI BARs\n");
goto disable_device;
}
- rc = hdev->asic_funcs->pci_bars_map(hdev);
+ rc = hdev->asic_funcs->init_iatu(hdev);
if (rc) {
- dev_err(hdev->dev, "Failed to initialize PCI BARs\n");
+ dev_err(hdev->dev, "Failed to initialize iATU\n");
goto disable_device;
}
+ rc = hl_pci_set_dma_mask(hdev);
+ if (rc)
+ goto disable_device;
+
return 0;
disable_device:
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
index 4cd622b017b9..5d78d5e1c782 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -183,6 +183,13 @@ static ssize_t soft_reset_store(struct device *dev,
goto out;
}
+ if (!hdev->supports_soft_reset) {
+ dev_err(hdev->dev, "Device does not support soft-reset\n");
+ goto out;
+ }
+
+ dev_warn(hdev->dev, "Soft-Reset requested through sysfs\n");
+
hl_device_reset(hdev, false, false);
out:
@@ -204,6 +211,8 @@ static ssize_t hard_reset_store(struct device *dev,
goto out;
}
+ dev_warn(hdev->dev, "Hard-Reset requested through sysfs\n");
+
hl_device_reset(hdev, true, false);
out:
@@ -220,6 +229,9 @@ static ssize_t device_type_show(struct device *dev,
case ASIC_GOYA:
str = "GOYA";
break;
+ case ASIC_GAUDI:
+ str = "GAUDI";
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -406,7 +418,10 @@ int hl_sysfs_init(struct hl_device *hdev)
{
int rc;
- hdev->pm_mng_profile = PM_AUTO;
+ if (hdev->asic_type == ASIC_GOYA)
+ hdev->pm_mng_profile = PM_AUTO;
+ else
+ hdev->pm_mng_profile = PM_MANUAL;
hdev->max_power = hdev->asic_prop.max_power_default;
hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 886459e0ddd9..736675f0a246 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -208,7 +208,7 @@ void lkdtm_OVERFLOW_UNSIGNED(void)
ignored = value;
}
-/* Intentially using old-style flex array definition of 1 byte. */
+/* Intentionally using old-style flex array definition of 1 byte. */
struct array_bounds_flex_array {
int one;
int two;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 204d807e755b..b32c825a0945 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -266,6 +266,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
__mei_me_cl_del(dev, me_cl);
+ mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
@@ -287,6 +288,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
__mei_me_cl_del(dev, me_cl);
+ mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a1ed375fed37..71f795b510ce 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -241,7 +241,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* MEI requires to resume from runtime suspend mode
* in order to perform link reset flow upon system suspend.
*/
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/*
* ME maps runtime suspend/resume to D0i states,
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index beacf2a2f2b5..4bf26ce61044 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -128,7 +128,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* MEI requires to resume from runtime suspend mode
* in order to perform link reset flow upon system suspend.
*/
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/*
* TXE maps runtime suspend/resume to own power gating states,
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index fcd999f50d14..ea084626fe11 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -660,7 +660,7 @@ int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
struct device *spdev = NULL;
if (msg->uop > SCIF_EXIT_ACK) {
- /* Dont send messages once the exit flow has begun */
+ /* Don't send messages once the exit flow has begun */
if (OP_IDLE != scifdev->exit)
return -ENODEV;
spdev = scif_get_peer_dev(scifdev);
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 01e27682ea30..406cd5abfa72 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -113,14 +113,17 @@ static int scif_destroy_pinned_pages(struct scif_pinned_pages *pin)
int writeable = pin->prot & SCIF_PROT_WRITE;
int kernel = SCIF_MAP_KERNEL & pin->map_flags;
- for (j = 0; j < pin->nr_pages; j++) {
- if (pin->pages[j] && !kernel) {
- if (writeable)
- SetPageDirty(pin->pages[j]);
- put_page(pin->pages[j]);
+ if (kernel) {
+ for (j = 0; j < pin->nr_pages; j++) {
+ if (pin->pages[j] && !kernel) {
+ if (writeable)
+ set_page_dirty_lock(pin->pages[j]);
+ put_page(pin->pages[j]);
+ }
}
- }
-
+ } else
+ unpin_user_pages_dirty_lock(pin->pages, pin->nr_pages,
+ writeable);
scif_free(pin->pages,
pin->nr_pages * sizeof(*pin->pages));
scif_free(pin, sizeof(*pin));
@@ -1375,7 +1378,7 @@ retry:
}
}
- pinned_pages->nr_pages = get_user_pages_fast(
+ pinned_pages->nr_pages = pin_user_pages_fast(
(u64)addr,
nr_pages,
(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
@@ -1385,11 +1388,8 @@ retry:
if (ulimit)
__scif_dec_pinned_vm_lock(mm, nr_pages);
/* Roll back any pinned pages */
- for (i = 0; i < pinned_pages->nr_pages; i++) {
- if (pinned_pages->pages[i])
- put_page(
- pinned_pages->pages[i]);
- }
+ unpin_user_pages(pinned_pages->pages,
+ pinned_pages->nr_pages);
prot &= ~SCIF_PROT_WRITE;
try_upgrade = false;
goto retry;
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index de8a66b9d76b..c21f65a5c762 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -287,7 +287,7 @@ void ocxl_context_free(struct ocxl_context *ctx)
ocxl_afu_irq_free_all(ctx);
idr_destroy(&ctx->irq_idr);
- /* reference to the AFU taken in ocxl_context_init */
+ /* reference to the AFU taken in ocxl_context_alloc() */
ocxl_afu_put(ctx->afu);
kfree(ctx);
}
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index ef5a1af6bab7..41c40971979e 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -73,6 +73,8 @@
#define is_am654_pci_dev(pdev) \
((pdev)->device == PCI_DEVICE_ID_TI_AM654)
+#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
+
static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
@@ -942,6 +944,8 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
.driver_data = (kernel_ulong_t)&am654_data
},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 79a963105983..d5e097cd556d 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -59,16 +59,16 @@
/* define two XPC debug device structures to be used with dev_dbg() et al */
-struct device_driver xpc_dbg_name = {
+static struct device_driver xpc_dbg_name = {
.name = "xpc"
};
-struct device xpc_part_dbg_subname = {
+static struct device xpc_part_dbg_subname = {
.init_name = "", /* set to "part" at xpc_init() time */
.driver = &xpc_dbg_name
};
-struct device xpc_chan_dbg_subname = {
+static struct device xpc_chan_dbg_subname = {
.init_name = "", /* set to "chan" at xpc_init() time */
.driver = &xpc_dbg_name
};
@@ -1217,7 +1217,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
return NOTIFY_DONE;
}
-int __init
+static int __init
xpc_init(void)
{
int ret;
@@ -1319,7 +1319,7 @@ out_1:
module_init(xpc_init);
-void __exit
+static void __exit
xpc_exit(void)
{
xpc_do_exit(xpUnloading);
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index ada94e6a3c91..837d6c3fe69c 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -96,7 +96,7 @@ struct xpnet_pending_msg {
atomic_t use_count;
};
-struct net_device *xpnet_device;
+static struct net_device *xpnet_device;
/*
* When we are notified of other partitions activating, we add them to
@@ -131,16 +131,16 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
/* Define the XPNET debug device structures to be used with dev_dbg() et al */
-struct device_driver xpnet_dbg_name = {
+static struct device_driver xpnet_dbg_name = {
.name = "xpnet"
};
-struct device xpnet_dbg_subname = {
+static struct device xpnet_dbg_subname = {
.init_name = "", /* set to "" */
.driver = &xpnet_dbg_name
};
-struct device *xpnet = &xpnet_dbg_subname;
+static struct device *xpnet = &xpnet_dbg_subname;
/*
* Packet was recevied by XPC and forwarded to us.
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
deleted file mode 100644
index a431787c0898..000000000000
--- a/drivers/misc/vexpress-syscfg.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * Copyright (C) 2014 ARM Limited
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/syscore_ops.h>
-#include <linux/vexpress.h>
-
-
-#define SYS_CFGDATA 0x0
-
-#define SYS_CFGCTRL 0x4
-#define SYS_CFGCTRL_START (1 << 31)
-#define SYS_CFGCTRL_WRITE (1 << 30)
-#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
-#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
-#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
-#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
-#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
-
-#define SYS_CFGSTAT 0x8
-#define SYS_CFGSTAT_ERR (1 << 1)
-#define SYS_CFGSTAT_COMPLETE (1 << 0)
-
-
-struct vexpress_syscfg {
- struct device *dev;
- void __iomem *base;
- struct list_head funcs;
-};
-
-struct vexpress_syscfg_func {
- struct list_head list;
- struct vexpress_syscfg *syscfg;
- struct regmap *regmap;
- int num_templates;
- u32 template[]; /* Keep it last! */
-};
-
-
-static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
- int index, bool write, u32 *data)
-{
- struct vexpress_syscfg *syscfg = func->syscfg;
- u32 command, status;
- int tries;
- long timeout;
-
- if (WARN_ON(index >= func->num_templates))
- return -EINVAL;
-
- command = readl(syscfg->base + SYS_CFGCTRL);
- if (WARN_ON(command & SYS_CFGCTRL_START))
- return -EBUSY;
-
- command = func->template[index];
- command |= SYS_CFGCTRL_START;
- command |= write ? SYS_CFGCTRL_WRITE : 0;
-
- /* Use a canary for reads */
- if (!write)
- *data = 0xdeadbeef;
-
- dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
- func, command, *data);
- writel(*data, syscfg->base + SYS_CFGDATA);
- writel(0, syscfg->base + SYS_CFGSTAT);
- writel(command, syscfg->base + SYS_CFGCTRL);
- mb();
-
- /* The operation can take ages... Go to sleep, 100us initially */
- tries = 100;
- timeout = 100;
- do {
- if (!irqs_disabled()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(timeout));
- if (signal_pending(current))
- return -EINTR;
- } else {
- udelay(timeout);
- }
-
- status = readl(syscfg->base + SYS_CFGSTAT);
- if (status & SYS_CFGSTAT_ERR)
- return -EFAULT;
-
- if (timeout > 20)
- timeout -= 20;
- } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
- if (WARN_ON_ONCE(!tries))
- return -ETIMEDOUT;
-
- if (!write) {
- *data = readl(syscfg->base + SYS_CFGDATA);
- dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
- }
-
- return 0;
-}
-
-static int vexpress_syscfg_read(void *context, unsigned int index,
- unsigned int *val)
-{
- struct vexpress_syscfg_func *func = context;
-
- return vexpress_syscfg_exec(func, index, false, val);
-}
-
-static int vexpress_syscfg_write(void *context, unsigned int index,
- unsigned int val)
-{
- struct vexpress_syscfg_func *func = context;
-
- return vexpress_syscfg_exec(func, index, true, &val);
-}
-
-static struct regmap_config vexpress_syscfg_regmap_config = {
- .lock = vexpress_config_lock,
- .unlock = vexpress_config_unlock,
- .reg_bits = 32,
- .val_bits = 32,
- .reg_read = vexpress_syscfg_read,
- .reg_write = vexpress_syscfg_write,
- .reg_format_endian = REGMAP_ENDIAN_LITTLE,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
-};
-
-
-static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
- void *context)
-{
- int err;
- struct vexpress_syscfg *syscfg = context;
- struct vexpress_syscfg_func *func;
- struct property *prop;
- const __be32 *val = NULL;
- __be32 energy_quirk[4];
- int num;
- u32 site, position, dcc;
- int i;
-
- err = vexpress_config_get_topo(dev->of_node, &site,
- &position, &dcc);
- if (err)
- return ERR_PTR(err);
-
- prop = of_find_property(dev->of_node,
- "arm,vexpress-sysreg,func", NULL);
- if (!prop)
- return ERR_PTR(-EINVAL);
-
- num = prop->length / sizeof(u32) / 2;
- val = prop->value;
-
- /*
- * "arm,vexpress-energy" function used to be described
- * by its first device only, now it requires both
- */
- if (num == 1 && of_device_is_compatible(dev->of_node,
- "arm,vexpress-energy")) {
- num = 2;
- energy_quirk[0] = *val;
- energy_quirk[2] = *val++;
- energy_quirk[1] = *val;
- energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
- val = energy_quirk;
- }
-
- func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
- if (!func)
- return ERR_PTR(-ENOMEM);
-
- func->syscfg = syscfg;
- func->num_templates = num;
-
- for (i = 0; i < num; i++) {
- u32 function, device;
-
- function = be32_to_cpup(val++);
- device = be32_to_cpup(val++);
-
- dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
- func, site, position, dcc,
- function, device);
-
- func->template[i] = SYS_CFGCTRL_DCC(dcc);
- func->template[i] |= SYS_CFGCTRL_SITE(site);
- func->template[i] |= SYS_CFGCTRL_POSITION(position);
- func->template[i] |= SYS_CFGCTRL_FUNC(function);
- func->template[i] |= SYS_CFGCTRL_DEVICE(device);
- }
-
- vexpress_syscfg_regmap_config.max_register = num - 1;
-
- func->regmap = regmap_init(dev, NULL, func,
- &vexpress_syscfg_regmap_config);
-
- if (IS_ERR(func->regmap)) {
- void *err = func->regmap;
-
- kfree(func);
- return err;
- }
-
- list_add(&func->list, &syscfg->funcs);
-
- return func->regmap;
-}
-
-static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
-{
- struct vexpress_syscfg *syscfg = context;
- struct vexpress_syscfg_func *func, *tmp;
-
- regmap_exit(regmap);
-
- list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
- if (func->regmap == regmap) {
- list_del(&syscfg->funcs);
- kfree(func);
- break;
- }
- }
-}
-
-static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
- .regmap_init = vexpress_syscfg_regmap_init,
- .regmap_exit = vexpress_syscfg_regmap_exit,
-};
-
-
-static int vexpress_syscfg_probe(struct platform_device *pdev)
-{
- struct vexpress_syscfg *syscfg;
- struct resource *res;
- struct device *bridge;
-
- syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
- if (!syscfg)
- return -ENOMEM;
- syscfg->dev = &pdev->dev;
- INIT_LIST_HEAD(&syscfg->funcs);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- syscfg->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(syscfg->base))
- return PTR_ERR(syscfg->base);
-
- /* Must use dev.parent (MFD), as that's where DT phandle points at... */
- bridge = vexpress_config_bridge_register(pdev->dev.parent,
- &vexpress_syscfg_bridge_ops, syscfg);
-
- return PTR_ERR_OR_ZERO(bridge);
-}
-
-static const struct platform_device_id vexpress_syscfg_id_table[] = {
- { "vexpress-syscfg", },
- {},
-};
-
-static struct platform_driver vexpress_syscfg_driver = {
- .driver.name = "vexpress-syscfg",
- .id_table = vexpress_syscfg_id_table,
- .probe = vexpress_syscfg_probe,
-};
-
-static int __init vexpress_syscfg_init(void)
-{
- return platform_driver_register(&vexpress_syscfg_driver);
-}
-core_initcall(vexpress_syscfg_init);
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index ce16d6b99295..2d8328d928d5 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -233,8 +233,6 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
* about the size.
*/
BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
- if (!access_ok((void __user *)uva, sizeof(u8)))
- return VMCI_ERROR_GENERIC;
/*
* Lock physical page backing a given user VA.
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 71bbaa56bdb5..92291292756a 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -602,10 +602,10 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
const u32 depth)
{
u32 reg = 0;
- u32 res;
- u32 n, i;
+ int res, i, nr_pages;
+ u32 n;
u32 *addr = NULL;
- struct page *page[MAX_NUM_PAGES];
+ struct page *pages[MAX_NUM_PAGES];
/*
* Writes that go beyond the length of
@@ -622,15 +622,21 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
n += 1;
- res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page);
- if (res < n) {
- for (i = 0; i < res; i++)
- put_page(page[i]);
+ if (WARN_ON_ONCE(n > INT_MAX))
+ return -EINVAL;
+
+ nr_pages = n;
+
+ res = pin_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
+ if (res < nr_pages) {
+ if (res > 0)
+ unpin_user_pages(pages, res);
+
return -EINVAL;
}
- for (i = 0; i < n; i++) {
- addr = kmap(page[i]);
+ for (i = 0; i < nr_pages; i++) {
+ addr = kmap(pages[i]);
do {
xsdfec_regwrite(xsdfec,
base_addr + ((offset + reg) *
@@ -639,9 +645,9 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
reg++;
} while ((reg < len) &&
((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
- put_page(page[i]);
+ unpin_user_page(pages[i]);
}
- return reg;
+ return 0;
}
static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
@@ -649,14 +655,9 @@ static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
struct xsdfec_ldpc_params *ldpc;
int ret, n;
- ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
- if (!ldpc)
- return -ENOMEM;
-
- if (copy_from_user(ldpc, arg, sizeof(*ldpc))) {
- ret = -EFAULT;
- goto err_out;
- }
+ ldpc = memdup_user(arg, sizeof(*ldpc));
+ if (IS_ERR(ldpc))
+ return PTR_ERR(ldpc);
if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
ret = -EIO;
@@ -720,8 +721,6 @@ static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
XSDFEC_QC_TABLE_DEPTH);
- if (ret > 0)
- ret = 0;
err_out:
kfree(ldpc);
return ret;
@@ -1484,25 +1483,7 @@ static struct platform_driver xsdfec_driver = {
.remove = xsdfec_remove,
};
-static int __init xsdfec_init(void)
-{
- int err;
-
- err = platform_driver_register(&xsdfec_driver);
- if (err < 0) {
- pr_err("%s Unabled to register SDFEC driver", __func__);
- return err;
- }
- return 0;
-}
-
-static void __exit xsdfec_exit(void)
-{
- platform_driver_unregister(&xsdfec_driver);
-}
-
-module_init(xsdfec_init);
-module_exit(xsdfec_exit);
+module_platform_driver(xsdfec_driver);
MODULE_AUTHOR("Xilinx, Inc");
MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 8499b56a15a8..7896952de1ac 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1370,6 +1370,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
struct mmc_request *mrq = &mqrq->brq.mrq;
struct request_queue *q = req->q;
struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
unsigned long flags;
bool put_card;
int err;
@@ -1399,7 +1400,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
spin_lock_irqsave(&mq->lock, flags);
- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+ mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@@ -2483,8 +2484,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
struct mmc_rpmb_data, chrdev);
- put_device(&rpmb->dev);
mmc_blk_put(rpmb->md);
+ put_device(&rpmb->dev);
return 0;
}
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 74de3f2dda38..70207f11a654 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -93,6 +93,20 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
+ if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ retval = add_uevent_var(env, "SDIO_ID=%04X:%04X",
+ card->cis.vendor, card->cis.device);
+ if (retval)
+ return retval;
+ }
+
+ /*
+ * SDIO (non-combo) cards are not handled by mmc_block driver and do not
+ * have accessible CID register which used by mmc_card_name() function.
+ */
+ if (card->type == MMC_TYPE_SDIO)
+ return 0;
+
retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card));
if (retval)
return retval;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 4c5de6d37ac7..8d2b808e9b58 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1815,8 +1815,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int rem, to = from + nr;
int err;
- if (!(card->host->caps & MMC_CAP_ERASE) ||
- !(card->csd.cmdclass & CCC_ERASE))
+ if (!(card->csd.cmdclass & CCC_ERASE))
return -EOPNOTSUPP;
if (!card->erase_size)
@@ -1872,8 +1871,7 @@ EXPORT_SYMBOL(mmc_erase);
int mmc_can_erase(struct mmc_card *card)
{
- if ((card->host->caps & MMC_CAP_ERASE) &&
- (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
+ if (card->csd.cmdclass & CCC_ERASE && card->erase_size)
return 1;
return 0;
}
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 09e0c7659469..9ec84c86c46a 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -219,7 +219,7 @@ static int mmc_clock_opt_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
"%llu\n");
void mmc_add_host_debugfs(struct mmc_host *host)
@@ -232,8 +232,8 @@ void mmc_add_host_debugfs(struct mmc_host *host)
debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops);
debugfs_create_x32("caps", S_IRUSR, root, &host->caps);
debugfs_create_x32("caps2", S_IRUSR, root, &host->caps2);
- debugfs_create_file("clock", S_IRUSR | S_IWUSR, root, host,
- &mmc_clock_fops);
+ debugfs_create_file_unsafe("clock", S_IRUSR | S_IWUSR, root, host,
+ &mmc_clock_fops);
#ifdef CONFIG_FAIL_MMC_REQUEST
if (fail_request)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index de94fbe629bd..4203303f946a 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -647,6 +647,9 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
mmc_hostname(card->host),
card->ext_csd.cmdq_depth);
}
+ card->ext_csd.enhanced_rpmb_supported =
+ (card->ext_csd.rel_param &
+ EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
}
out:
return err;
@@ -786,6 +789,8 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
card->ext_csd.enhanced_area_offset);
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
+MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
+ card->ext_csd.enhanced_rpmb_supported);
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
@@ -843,6 +848,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_enhanced_area_offset.attr,
&dev_attr_enhanced_area_size.attr,
&dev_attr_raw_rpmb_size_mult.attr,
+ &dev_attr_enhanced_rpmb_supported.attr,
&dev_attr_rel_sectors.attr,
&dev_attr_ocr.attr,
&dev_attr_rca.attr,
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 25bee3daf9e2..4b1eb89b401d 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -107,11 +107,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
case MMC_ISSUE_DCMD:
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
if (recovery_needed)
- __mmc_cqe_recovery_notifier(mq);
+ mmc_cqe_recovery_notifier(mrq);
return BLK_EH_RESET_TIMER;
}
- /* No timeout (XXX: huh? comment doesn't make much sense) */
- blk_mq_complete_request(req);
+ /* The request has gone already */
return BLK_EH_DONE;
default:
/* Timeout is handled by mmc core */
@@ -127,18 +126,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
unsigned long flags;
- int ret;
+ bool ignore_tout;
spin_lock_irqsave(&mq->lock, flags);
-
- if (mq->recovery_needed || !mq->use_cqe || host->hsq_enabled)
- ret = BLK_EH_RESET_TIMER;
- else
- ret = mmc_cqe_timed_out(req);
-
+ ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
spin_unlock_irqrestore(&mq->lock, flags);
- return ret;
+ return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
}
static void mmc_mq_recovery_handler(struct work_struct *work)
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 3dba15bccce2..472fa2fdcf13 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -139,7 +139,7 @@ static const struct mmc_fixup sdio_fixup_methods[] = {
SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
- SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887WLAN,
+ SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887_F0,
add_limit_rate_quirk, 150000000),
END_FIXUP
diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
index b6febbcf8978..96b1d15045d6 100644
--- a/drivers/mmc/core/regulator.c
+++ b/drivers/mmc/core/regulator.c
@@ -136,6 +136,8 @@ static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
int min_uV, int target_uV,
int max_uV)
{
+ int current_uV;
+
/*
* Check if supported first to avoid errors since we may try several
* signal levels during power up and don't want to show errors.
@@ -143,6 +145,14 @@ static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
return -EINVAL;
+ /*
+ * The voltage is already set, no need to switch.
+ * Return 1 to indicate that no switch happened.
+ */
+ current_uV = regulator_get_voltage(regulator);
+ if (current_uV == target_uV)
+ return 1;
+
return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
max_uV);
}
@@ -198,9 +208,10 @@ int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
* voltage in two steps and try to stay close to vmmc
* with a 0.3V tolerance at first.
*/
- if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
- min_uV, volt, max_uV))
- return 0;
+ ret = mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ min_uV, volt, max_uV);
+ if (ret >= 0)
+ return ret;
return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
2700000, volt, 3600000);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 76c7add367d5..5a2210c25aa7 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -376,11 +376,11 @@ int mmc_sd_switch_hs(struct mmc_card *card)
if (!status)
return -ENOMEM;
- err = mmc_sd_switch(card, 1, 0, 1, status);
+ err = mmc_sd_switch(card, 1, 0, HIGH_SPEED_BUS_SPEED, status);
if (err)
goto out;
- if ((status[16] & 0xF) != 1) {
+ if ((status[16] & 0xF) != HIGH_SPEED_BUS_SPEED) {
pr_warn("%s: Problem switching card into high-speed mode!\n",
mmc_hostname(card->host));
err = 0;
@@ -707,7 +707,12 @@ static ssize_t mmc_dsr_show(struct device *dev,
static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
+MMC_DEV_ATTR(vendor, "0x%04x\n", card->cis.vendor);
+MMC_DEV_ATTR(device, "0x%04x\n", card->cis.device);
+
static struct attribute *sd_std_attrs[] = {
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
&dev_attr_cid.attr,
&dev_attr_csd.attr,
&dev_attr_scr.attr,
@@ -726,7 +731,26 @@ static struct attribute *sd_std_attrs[] = {
&dev_attr_dsr.attr,
NULL,
};
-ATTRIBUTE_GROUPS(sd_std);
+
+static umode_t sd_std_is_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ /* CIS vendor and device ids are available only for Combo cards */
+ if ((attr == &dev_attr_vendor.attr || attr == &dev_attr_device.attr) &&
+ card->type != MMC_TYPE_SD_COMBO)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group sd_std_group = {
+ .attrs = sd_std_attrs,
+ .is_visible = sd_std_is_visible,
+};
+__ATTRIBUTE_GROUPS(sd_std);
struct device_type sd_type = {
.groups = sd_std_groups,
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index ebb387aa5158..b65b26f76d71 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -27,6 +27,24 @@
#include "sdio_ops.h"
#include "sdio_cis.h"
+MMC_DEV_ATTR(vendor, "0x%04x\n", card->cis.vendor);
+MMC_DEV_ATTR(device, "0x%04x\n", card->cis.device);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
+MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
+
+static struct attribute *sdio_std_attrs[] = {
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_ocr.attr,
+ &dev_attr_rca.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sdio_std);
+
+static struct device_type sdio_type = {
+ .groups = sdio_std_groups,
+};
+
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
@@ -543,13 +561,33 @@ out:
return err;
}
-static void mmc_sdio_resend_if_cond(struct mmc_host *host,
- struct mmc_card *card)
+static int mmc_sdio_pre_init(struct mmc_host *host, u32 ocr,
+ struct mmc_card *card)
{
+ if (card)
+ mmc_remove_card(card);
+
+ /*
+ * Reset the card by performing the same steps that are taken by
+ * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
+ *
+ * sdio_reset() is technically not needed. Having just powered up the
+ * hardware, it should already be in reset state. However, some
+ * platforms (such as SD8686 on OLPC) do not instantly cut power,
+ * meaning that a reset is required when restoring power soon after
+ * powering off. It is harmless in other cases.
+ *
+ * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
+ * is not necessary for non-removable cards. However, it is required
+ * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
+ * harmless in other situations.
+ *
+ */
+
sdio_reset(host);
mmc_go_idle(host);
- mmc_send_if_cond(host, host->ocr_avail);
- mmc_remove_card(card);
+ mmc_send_if_cond(host, ocr);
+ return mmc_send_io_op_cond(host, 0, NULL);
}
/*
@@ -584,7 +622,7 @@ try_again:
*/
err = mmc_send_io_op_cond(host, ocr, &rocr);
if (err)
- goto err;
+ return err;
/*
* For SPI, enable CRC as appropriate.
@@ -592,17 +630,15 @@ try_again:
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
if (err)
- goto err;
+ return err;
}
/*
* Allocate card structure.
*/
- card = mmc_alloc_card(host, NULL);
- if (IS_ERR(card)) {
- err = PTR_ERR(card);
- goto err;
- }
+ card = mmc_alloc_card(host, &sdio_type);
+ if (IS_ERR(card))
+ return PTR_ERR(card);
if ((rocr & R4_MEMORY_PRESENT) &&
mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
@@ -610,19 +646,15 @@ try_again:
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
- mmc_remove_card(card);
- pr_debug("%s: Perhaps the card was replaced\n",
- mmc_hostname(host));
- return -ENOENT;
+ err = -ENOENT;
+ goto mismatch;
}
} else {
card->type = MMC_TYPE_SDIO;
if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
- mmc_remove_card(card);
- pr_debug("%s: Perhaps the card was replaced\n",
- mmc_hostname(host));
- return -ENOENT;
+ err = -ENOENT;
+ goto mismatch;
}
}
@@ -646,7 +678,7 @@ try_again:
if (rocr & ocr & R4_18V_PRESENT) {
err = mmc_set_uhs_voltage(host, ocr_card);
if (err == -EAGAIN) {
- mmc_sdio_resend_if_cond(host, card);
+ mmc_sdio_pre_init(host, ocr_card, card);
retries--;
goto try_again;
} else if (err) {
@@ -677,7 +709,7 @@ try_again:
if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
err = mmc_sd_get_csd(host, card);
if (err)
- return err;
+ goto remove;
mmc_decode_cid(card);
}
@@ -704,7 +736,12 @@ try_again:
mmc_set_timing(card->host, MMC_TIMING_SD_HS);
}
- goto finish;
+ if (oldcard)
+ mmc_remove_card(card);
+ else
+ host->card = card;
+
+ return 0;
}
/*
@@ -713,14 +750,13 @@ try_again:
*/
err = sdio_read_cccr(card, ocr);
if (err) {
- mmc_sdio_resend_if_cond(host, card);
+ mmc_sdio_pre_init(host, ocr_card, card);
if (ocr & R4_18V_PRESENT) {
/* Retry init sequence, but without R4_18V_PRESENT. */
retries = 0;
goto try_again;
- } else {
- goto remove;
}
+ return err;
}
/*
@@ -731,16 +767,14 @@ try_again:
goto remove;
if (oldcard) {
- int same = (card->cis.vendor == oldcard->cis.vendor &&
- card->cis.device == oldcard->cis.device);
- mmc_remove_card(card);
- if (!same) {
- pr_debug("%s: Perhaps the card was replaced\n",
- mmc_hostname(host));
- return -ENOENT;
+ if (card->cis.vendor == oldcard->cis.vendor &&
+ card->cis.device == oldcard->cis.device) {
+ mmc_remove_card(card);
+ card = oldcard;
+ } else {
+ err = -ENOENT;
+ goto mismatch;
}
-
- card = oldcard;
}
card->ocr = ocr_card;
mmc_fixup_device(card, sdio_fixup_methods);
@@ -801,16 +835,15 @@ try_again:
err = -EINVAL;
goto remove;
}
-finish:
- if (!oldcard)
- host->card = card;
+
+ host->card = card;
return 0;
+mismatch:
+ pr_debug("%s: Perhaps the card was replaced\n", mmc_hostname(host));
remove:
- if (!oldcard)
+ if (oldcard != card)
mmc_remove_card(card);
-
-err:
return err;
}
@@ -818,28 +851,7 @@ static int mmc_sdio_reinit_card(struct mmc_host *host)
{
int ret;
- /*
- * Reset the card by performing the same steps that are taken by
- * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
- *
- * sdio_reset() is technically not needed. Having just powered up the
- * hardware, it should already be in reset state. However, some
- * platforms (such as SD8686 on OLPC) do not instantly cut power,
- * meaning that a reset is required when restoring power soon after
- * powering off. It is harmless in other cases.
- *
- * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
- * is not necessary for non-removable cards. However, it is required
- * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
- * harmless in other situations.
- *
- */
-
- sdio_reset(host);
- mmc_go_idle(host);
- mmc_send_if_cond(host, host->card->ocr);
-
- ret = mmc_send_io_op_cond(host, 0, NULL);
+ ret = mmc_sdio_pre_init(host, host->card->ocr, NULL);
if (ret)
return ret;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 462b5352fea7..0ce332ad986b 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -171,7 +171,7 @@ config MMC_SDHCI_OF_ASPEED
config MMC_SDHCI_OF_AT91
tristate "SDHCI OF support for the Atmel SDMMC controller"
depends on MMC_SDHCI_PLTFM
- depends on OF
+ depends on OF && HAVE_CLK
help
This selects the Atmel SDMMC driver
@@ -235,6 +235,19 @@ config MMC_SDHCI_CNS3XXX
If unsure, say N.
+config MMC_SDHCI_ESDHC_MCF
+ tristate "SDHCI support for the Freescale eSDHC ColdFire controller"
+ depends on M5441x
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Freescale eSDHC controller support for
+ ColdFire mcf5441x devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_ESDHC_IMX
tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller"
depends on ARCH_MXC
@@ -405,6 +418,20 @@ config MMC_MESON_GX
If you have a controller with this interface, say Y here.
+config MMC_MESON_MX_SDHC
+ tristate "Amlogic Meson SDHC Host Controller support"
+ depends on (ARM && ARCH_MESON) || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on OF
+ help
+ This selects support for the SDHC Host Controller on
+ Amlogic Meson6, Meson8, Meson8b and Meson8m2 SoCs.
+ The controller supports the SD/SDIO Spec 3.x and eMMC Spec 4.5x
+ with 1, 4, and 8 bit bus widths.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
config MMC_MESON_MX_SDIO
tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support"
depends on ARCH_MESON || COMPILE_TEST
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index b929ef941208..4d5bcb0144a0 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -68,6 +68,8 @@ obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o
+meson-mx-sdhc-objs := meson-mx-sdhc-clkc.o meson-mx-sdhc-mmc.o
+obj-$(CONFIG_MMC_MESON_MX_SDHC) += meson-mx-sdhc.o
obj-$(CONFIG_MMC_MESON_MX_SDIO) += meson-mx-sdio.o
obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
@@ -82,6 +84,7 @@ obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_CADENCE) += sdhci-cadence.o
obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
+obj-$(CONFIG_MMC_SDHCI_ESDHC_MCF) += sdhci-esdhc-mcf.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index 1aee485d56d4..026ca9194ce5 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to get irq for data line\n");
- return ret;
+ goto free_host;
}
mutex_init(&host->cmd_mutex);
@@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, host);
mmc_add_host(mmc);
return 0;
+
+free_host:
+ mmc_free_host(mmc);
+ return ret;
}
static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 914e17bab3be..ceb4924e02d0 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -27,7 +27,6 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/mmc/mmc.h>
-#include <linux/mmc/sdio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -404,14 +403,6 @@ static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
host->mrq = req;
goldfish_mmc_prepare_data(host, req);
goldfish_mmc_start_command(host, req->cmd);
-
- /*
- * This is to avoid accidentally being detected as an SDIO card
- * in mmc_attach_sdio().
- */
- if (req->cmd->opcode == SD_IO_SEND_OP_COND &&
- req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR))
- req->cmd->error = -EINVAL;
}
static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -482,6 +473,7 @@ static int goldfish_mmc_probe(struct platform_device *pdev)
mmc->f_max = 24000000;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps = MMC_CAP_4_BIT_DATA;
+ mmc->caps2 = MMC_CAP2_NO_SDIO;
/* Use scatterlist DMA to reduce per-transfer costs.
* NOTE max_seg_size assumption that small blocks aren't
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index aeaaa5314924..5cb692687698 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -169,6 +169,7 @@
#define atmci_writel(port, reg, value) \
__raw_writel((value), (port)->regs + reg)
+#define ATMCI_CMD_TIMEOUT_MS 2000
#define AUTOSUSPEND_DELAY 50
#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
@@ -808,6 +809,9 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
static void atmci_send_command(struct atmel_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
+ unsigned int timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
+ ATMCI_CMD_TIMEOUT_MS;
+
WARN_ON(host->cmd);
host->cmd = cmd;
@@ -817,6 +821,8 @@ static void atmci_send_command(struct atmel_mci *host,
atmci_writel(host, ATMCI_ARGR, cmd->arg);
atmci_writel(host, ATMCI_CMDR, cmd_flags);
+
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
}
static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
@@ -1314,8 +1320,6 @@ static void atmci_start_request(struct atmel_mci *host,
* prepared yet.)
*/
atmci_writel(host, ATMCI_IER, iflags);
-
- mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
}
static void atmci_queue_request(struct atmel_mci *host,
@@ -1557,6 +1561,8 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
WARN_ON(host->cmd || host->data);
+ del_timer(&host->timer);
+
/*
* Update the MMC clock rate if necessary. This may be
* necessary if set_ios() is called when a different slot is
@@ -1583,8 +1589,6 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
host->state = STATE_IDLE;
}
- del_timer(&host->timer);
-
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 8823680ca42c..9bb1910268ca 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -259,7 +259,7 @@ static void au1xmmc_tasklet_finish(unsigned long param)
au1xmmc_finish_request(host);
}
-static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
+static int au1xmmc_send_command(struct au1xmmc_host *host,
struct mmc_command *cmd, struct mmc_data *data)
{
u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
@@ -302,9 +302,6 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
__raw_writel(cmd->arg, HOST_CMDARG(host));
wmb(); /* drain writebuffer */
- if (wait)
- IRQ_OFF(host, SD_CONFIG_CR);
-
__raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
wmb(); /* drain writebuffer */
@@ -312,19 +309,6 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO)
/* nop */;
- /* Wait for the command to come back */
- if (wait) {
- u32 status = __raw_readl(HOST_STATUS(host));
-
- while (!(status & SD_STATUS_CR))
- status = __raw_readl(HOST_STATUS(host));
-
- /* Clear the CR status */
- __raw_writel(SD_STATUS_CR, HOST_STATUS(host));
-
- IRQ_ON(host, SD_CONFIG_CR);
- }
-
return 0;
}
@@ -711,7 +695,7 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
}
if (!ret)
- ret = au1xmmc_send_command(host, 0, mrq->cmd, mrq->data);
+ ret = au1xmmc_send_command(host, mrq->cmd, mrq->data);
if (ret) {
mrq->cmd->error = ret;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index c3d949847cbd..a0767790a826 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1280,8 +1280,7 @@ static int bcm2835_add_host(struct bcm2835_host *host)
/* host controller capabilities */
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_NEEDS_POLL | MMC_CAP_HW_RESET | MMC_CAP_ERASE |
- MMC_CAP_CMD23;
+ MMC_CAP_NEEDS_POLL | MMC_CAP_HW_RESET | MMC_CAP_CMD23;
spin_lock_init(&host->lock);
mutex_init(&host->mutex);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 89deb451e0ac..c5da3aaee334 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -1038,8 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
* Disable bounce buffers for max_segs = 1
*/
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
- MMC_CAP_3_3V_DDR;
+ MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
if (host->use_sg)
mmc->max_segs = 16;
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index e33270e40539..e84ed84ea4cc 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -10,6 +10,8 @@
#include <linux/delay.h>
#include "cb710-mmc.h"
+#define CB710_MMC_REQ_TIMEOUT_MS 2000
+
static const u8 cb710_clock_divider_log2[8] = {
/* 1, 2, 4, 8, 16, 32, 128, 512 */
0, 1, 2, 3, 4, 5, 7, 9
@@ -707,6 +709,12 @@ static int cb710_mmc_init(struct platform_device *pdev)
mmc->f_min = val >> cb710_clock_divider_log2[CB710_MAX_DIVIDER_IDX];
mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
mmc->caps = MMC_CAP_4_BIT_DATA;
+ /*
+ * In cb710_wait_for_event() we use a fixed timeout of ~2s, hence let's
+ * inform the core about it. A future improvement should instead make
+ * use of the cmd->busy_timeout.
+ */
+ mmc->max_busy_timeout = CB710_MMC_REQ_TIMEOUT_MS;
reader = mmc_priv(mmc);
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 23b6f65b3785..50977ff18074 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -424,7 +424,7 @@ static int dw_mci_hi3660_switch_voltage(struct mmc_host *mmc,
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret) {
+ if (ret < 0) {
dev_err(host->dev, "Regulator set error %d\n", ret);
return ret;
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index bc5278ab5707..35ae5737c622 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1546,8 +1546,7 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = mmc_regulator_set_vqmmc(mmc, ios);
-
- if (ret) {
+ if (ret < 0) {
dev_dbg(&mmc->class_dev,
"Regulator set error %d - %s V\n",
ret, uhs & v18 ? "1.8" : "3.3");
@@ -2752,12 +2751,6 @@ static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
- /*
- * Support MMC_CAP_ERASE by default.
- * It needs to use trim/discard/erase commands.
- */
- mmc->caps |= MMC_CAP_ERASE;
-
if (host->pdata->pm_caps)
mmc->pm_caps = host->pdata->pm_caps;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index fbae87d1f017..cba7a6fcd178 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -108,6 +108,7 @@
#define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
#define JZ_MMC_CLK_RATE 24000000
+#define JZ_MMC_REQ_TIMEOUT_MS 5000
enum jz4740_mmc_version {
JZ_MMC_JZ4740,
@@ -440,7 +441,8 @@ static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
if (timeout == 0) {
set_bit(0, &host->waiting);
- mod_timer(&host->timeout_timer, jiffies + 5*HZ);
+ mod_timer(&host->timeout_timer,
+ jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
jz4740_mmc_set_irq_enabled(host, irq, true);
return true;
}
@@ -893,7 +895,8 @@ static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
host->state = JZ4740_MMC_STATE_READ_RESPONSE;
set_bit(0, &host->waiting);
- mod_timer(&host->timeout_timer, jiffies + 5*HZ);
+ mod_timer(&host->timeout_timer,
+ jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
jz4740_mmc_send_command(host, req->cmd);
}
@@ -1023,6 +1026,12 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
mmc->f_min = mmc->f_max / 128;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ /*
+ * We use a fixed timeout of 5s, hence inform the core about it. A
+ * future improvement should instead respect the cmd->busy_timeout.
+ */
+ mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
+
mmc->max_blk_size = (1 << 10) - 1;
mmc->max_blk_count = (1 << 15) - 1;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 35400cf2a2e4..7eb38d7482c6 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1004,6 +1004,8 @@ static int meson_mmc_card_busy(struct mmc_host *mmc)
static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
+ int ret;
+
/* vqmmc regulator is available */
if (!IS_ERR(mmc->supply.vqmmc)) {
/*
@@ -1013,7 +1015,8 @@ static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
* to 1.8v. Please make sure the regulator framework is aware
* of your own regulator constraints
*/
- return mmc_regulator_set_vqmmc(mmc, ios);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
+ return ret < 0 ? ret : 0;
}
/* no vqmmc regulator, assume fixed regulator at 3/3.3V */
diff --git a/drivers/mmc/host/meson-mx-sdhc-clkc.c b/drivers/mmc/host/meson-mx-sdhc-clkc.c
new file mode 100644
index 000000000000..e1f29b279123
--- /dev/null
+++ b/drivers/mmc/host/meson-mx-sdhc-clkc.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson SDHC clock controller
+ *
+ * Copyright (C) 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include "meson-mx-sdhc.h"
+
+#define MESON_SDHC_NUM_BUILTIN_CLKS 6
+
+struct meson_mx_sdhc_clkc {
+ struct clk_mux src_sel;
+ struct clk_divider div;
+ struct clk_gate mod_clk_en;
+ struct clk_gate tx_clk_en;
+ struct clk_gate rx_clk_en;
+ struct clk_gate sd_clk_en;
+};
+
+static const struct clk_parent_data meson_mx_sdhc_src_sel_parents[4] = {
+ { .fw_name = "clkin0" },
+ { .fw_name = "clkin1" },
+ { .fw_name = "clkin2" },
+ { .fw_name = "clkin3" },
+};
+
+static const struct clk_div_table meson_mx_sdhc_div_table[] = {
+ { .div = 6, .val = 5, },
+ { .div = 8, .val = 7, },
+ { .div = 9, .val = 8, },
+ { .div = 10, .val = 9, },
+ { .div = 12, .val = 11, },
+ { .div = 16, .val = 15, },
+ { .div = 18, .val = 17, },
+ { .div = 34, .val = 33, },
+ { .div = 142, .val = 141, },
+ { .div = 850, .val = 849, },
+ { .div = 2126, .val = 2125, },
+ { .div = 4096, .val = 4095, },
+ { /* sentinel */ }
+};
+
+static int meson_mx_sdhc_clk_hw_register(struct device *dev,
+ const char *name_suffix,
+ const struct clk_parent_data *parents,
+ unsigned int num_parents,
+ const struct clk_ops *ops,
+ struct clk_hw *hw)
+{
+ struct clk_init_data init = { };
+ char clk_name[32];
+
+ snprintf(clk_name, sizeof(clk_name), "%s#%s", dev_name(dev),
+ name_suffix);
+
+ init.name = clk_name;
+ init.ops = ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_data = parents;
+ init.num_parents = num_parents;
+
+ hw->init = &init;
+
+ return devm_clk_hw_register(dev, hw);
+}
+
+static int meson_mx_sdhc_gate_clk_hw_register(struct device *dev,
+ const char *name_suffix,
+ struct clk_hw *parent,
+ struct clk_hw *hw)
+{
+ struct clk_parent_data parent_data = { .hw = parent };
+
+ return meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1,
+ &clk_gate_ops, hw);
+}
+
+int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
+ struct clk_bulk_data *clk_bulk_data)
+{
+ struct clk_parent_data div_parent = { };
+ struct meson_mx_sdhc_clkc *clkc_data;
+ int ret;
+
+ clkc_data = devm_kzalloc(dev, sizeof(*clkc_data), GFP_KERNEL);
+ if (!clkc_data)
+ return -ENOMEM;
+
+ clkc_data->src_sel.reg = base + MESON_SDHC_CLKC;
+ clkc_data->src_sel.mask = 0x3;
+ clkc_data->src_sel.shift = 16;
+ ret = meson_mx_sdhc_clk_hw_register(dev, "src_sel",
+ meson_mx_sdhc_src_sel_parents, 4,
+ &clk_mux_ops,
+ &clkc_data->src_sel.hw);
+ if (ret)
+ return ret;
+
+ clkc_data->div.reg = base + MESON_SDHC_CLKC;
+ clkc_data->div.shift = 0;
+ clkc_data->div.width = 12;
+ clkc_data->div.table = meson_mx_sdhc_div_table;
+ div_parent.hw = &clkc_data->src_sel.hw;
+ ret = meson_mx_sdhc_clk_hw_register(dev, "div", &div_parent, 1,
+ &clk_divider_ops,
+ &clkc_data->div.hw);
+ if (ret)
+ return ret;
+
+ clkc_data->mod_clk_en.reg = base + MESON_SDHC_CLKC;
+ clkc_data->mod_clk_en.bit_idx = 15;
+ ret = meson_mx_sdhc_gate_clk_hw_register(dev, "mod_clk_on",
+ &clkc_data->div.hw,
+ &clkc_data->mod_clk_en.hw);
+ if (ret)
+ return ret;
+
+ clkc_data->tx_clk_en.reg = base + MESON_SDHC_CLKC;
+ clkc_data->tx_clk_en.bit_idx = 14;
+ ret = meson_mx_sdhc_gate_clk_hw_register(dev, "tx_clk_on",
+ &clkc_data->div.hw,
+ &clkc_data->tx_clk_en.hw);
+ if (ret)
+ return ret;
+
+ clkc_data->rx_clk_en.reg = base + MESON_SDHC_CLKC;
+ clkc_data->rx_clk_en.bit_idx = 13;
+ ret = meson_mx_sdhc_gate_clk_hw_register(dev, "rx_clk_on",
+ &clkc_data->div.hw,
+ &clkc_data->rx_clk_en.hw);
+ if (ret)
+ return ret;
+
+ clkc_data->sd_clk_en.reg = base + MESON_SDHC_CLKC;
+ clkc_data->sd_clk_en.bit_idx = 12;
+ ret = meson_mx_sdhc_gate_clk_hw_register(dev, "sd_clk_on",
+ &clkc_data->div.hw,
+ &clkc_data->sd_clk_en.hw);
+ if (ret)
+ return ret;
+
+ /*
+ * TODO: Replace clk_hw.clk with devm_clk_hw_get_clk() once that is
+ * available.
+ */
+ clk_bulk_data[0].clk = clkc_data->mod_clk_en.hw.clk;
+ clk_bulk_data[1].clk = clkc_data->sd_clk_en.hw.clk;
+ clk_bulk_data[2].clk = clkc_data->tx_clk_en.hw.clk;
+ clk_bulk_data[3].clk = clkc_data->rx_clk_en.hw.clk;
+
+ return 0;
+}
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
new file mode 100644
index 000000000000..53e3f6a4245a
--- /dev/null
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -0,0 +1,914 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson6/Meson8/Meson8b/Meson8m2 SDHC MMC host controller driver.
+ *
+ * Copyright (C) 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include "meson-mx-sdhc.h"
+
+#define MESON_SDHC_NUM_BULK_CLKS 4
+#define MESON_SDHC_MAX_BLK_SIZE 512
+#define MESON_SDHC_NUM_TUNING_TRIES 10
+
+#define MESON_SDHC_WAIT_CMD_READY_SLEEP_US 1
+#define MESON_SDHC_WAIT_CMD_READY_TIMEOUT_US 100000
+#define MESON_SDHC_WAIT_BEFORE_SEND_SLEEP_US 1
+#define MESON_SDHC_WAIT_BEFORE_SEND_TIMEOUT_US 200
+
+struct meson_mx_sdhc_data {
+ void (*init_hw)(struct mmc_host *mmc);
+ void (*set_pdma)(struct mmc_host *mmc);
+ void (*wait_before_send)(struct mmc_host *mmc);
+ bool hardware_flush_all_cmds;
+};
+
+struct meson_mx_sdhc_host {
+ struct mmc_host *mmc;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ int error;
+
+ struct regmap *regmap;
+
+ struct clk *pclk;
+ struct clk *sd_clk;
+ struct clk_bulk_data bulk_clks[MESON_SDHC_NUM_BULK_CLKS];
+ bool bulk_clks_enabled;
+
+ const struct meson_mx_sdhc_data *platform;
+};
+
+static const struct regmap_config meson_mx_sdhc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MESON_SDHC_CLK2,
+};
+
+static void meson_mx_sdhc_hw_reset(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+
+ regmap_write(host->regmap, MESON_SDHC_SRST, MESON_SDHC_SRST_MAIN_CTRL |
+ MESON_SDHC_SRST_RXFIFO | MESON_SDHC_SRST_TXFIFO |
+ MESON_SDHC_SRST_DPHY_RX | MESON_SDHC_SRST_DPHY_TX |
+ MESON_SDHC_SRST_DMA_IF);
+ usleep_range(10, 100);
+
+ regmap_write(host->regmap, MESON_SDHC_SRST, 0);
+ usleep_range(10, 100);
+}
+
+static void meson_mx_sdhc_clear_fifo(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ u32 stat;
+
+ regmap_read(host->regmap, MESON_SDHC_STAT, &stat);
+ if (!FIELD_GET(MESON_SDHC_STAT_RXFIFO_CNT, stat) &&
+ !FIELD_GET(MESON_SDHC_STAT_TXFIFO_CNT, stat))
+ return;
+
+ regmap_write(host->regmap, MESON_SDHC_SRST, MESON_SDHC_SRST_RXFIFO |
+ MESON_SDHC_SRST_TXFIFO | MESON_SDHC_SRST_MAIN_CTRL);
+ udelay(5);
+
+ regmap_read(host->regmap, MESON_SDHC_STAT, &stat);
+ if (FIELD_GET(MESON_SDHC_STAT_RXFIFO_CNT, stat) ||
+ FIELD_GET(MESON_SDHC_STAT_TXFIFO_CNT, stat))
+ dev_warn(mmc_dev(host->mmc),
+ "Failed to clear FIFOs, RX: %lu, TX: %lu\n",
+ FIELD_GET(MESON_SDHC_STAT_RXFIFO_CNT, stat),
+ FIELD_GET(MESON_SDHC_STAT_TXFIFO_CNT, stat));
+}
+
+static void meson_mx_sdhc_wait_cmd_ready(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ u32 stat, esta;
+ int ret;
+
+ ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_STAT, stat,
+ !(stat & MESON_SDHC_STAT_CMD_BUSY),
+ MESON_SDHC_WAIT_CMD_READY_SLEEP_US,
+ MESON_SDHC_WAIT_CMD_READY_TIMEOUT_US);
+ if (ret) {
+ dev_warn(mmc_dev(mmc),
+ "Failed to poll for CMD_BUSY while processing CMD%d\n",
+ host->cmd->opcode);
+ meson_mx_sdhc_hw_reset(mmc);
+ }
+
+ ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_ESTA, esta,
+ !(esta & MESON_SDHC_ESTA_11_13),
+ MESON_SDHC_WAIT_CMD_READY_SLEEP_US,
+ MESON_SDHC_WAIT_CMD_READY_TIMEOUT_US);
+ if (ret) {
+ dev_warn(mmc_dev(mmc),
+ "Failed to poll for ESTA[13:11] while processing CMD%d\n",
+ host->cmd->opcode);
+ meson_mx_sdhc_hw_reset(mmc);
+ }
+}
+
+static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ u32 ictl, send;
+ int pack_len;
+
+ host->cmd = cmd;
+
+ ictl = MESON_SDHC_ICTL_DATA_TIMEOUT | MESON_SDHC_ICTL_DATA_ERR_CRC |
+ MESON_SDHC_ICTL_RXFIFO_FULL | MESON_SDHC_ICTL_TXFIFO_EMPTY |
+ MESON_SDHC_ICTL_RESP_TIMEOUT | MESON_SDHC_ICTL_RESP_ERR_CRC;
+
+ send = FIELD_PREP(MESON_SDHC_SEND_CMD_INDEX, cmd->opcode);
+
+ if (cmd->data) {
+ send |= MESON_SDHC_SEND_CMD_HAS_DATA;
+ send |= FIELD_PREP(MESON_SDHC_SEND_TOTAL_PACK,
+ cmd->data->blocks - 1);
+
+ if (cmd->data->blksz < MESON_SDHC_MAX_BLK_SIZE)
+ pack_len = cmd->data->blksz;
+ else
+ pack_len = 0;
+
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ send |= MESON_SDHC_SEND_DATA_DIR;
+
+ /*
+ * If command with no data, just wait response done
+ * interrupt(int[0]), and if command with data transfer, just
+ * wait dma done interrupt(int[11]), don't need care about
+ * dat0 busy or not.
+ */
+ if (host->platform->hardware_flush_all_cmds ||
+ cmd->data->flags & MMC_DATA_WRITE)
+ /* hardware flush: */
+ ictl |= MESON_SDHC_ICTL_DMA_DONE;
+ else
+ /* software flush: */
+ ictl |= MESON_SDHC_ICTL_DATA_XFER_OK;
+ } else {
+ pack_len = 0;
+
+ ictl |= MESON_SDHC_ICTL_RESP_OK;
+ }
+
+ if (cmd->opcode == MMC_STOP_TRANSMISSION)
+ send |= MESON_SDHC_SEND_DATA_STOP;
+
+ if (cmd->flags & MMC_RSP_PRESENT)
+ send |= MESON_SDHC_SEND_CMD_HAS_RESP;
+
+ if (cmd->flags & MMC_RSP_136) {
+ send |= MESON_SDHC_SEND_RESP_LEN;
+ send |= MESON_SDHC_SEND_RESP_NO_CRC;
+ }
+
+ if (!(cmd->flags & MMC_RSP_CRC))
+ send |= MESON_SDHC_SEND_RESP_NO_CRC;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ send |= MESON_SDHC_SEND_R1B;
+
+ /* enable the new IRQs and mask all pending ones */
+ regmap_write(host->regmap, MESON_SDHC_ICTL, ictl);
+ regmap_write(host->regmap, MESON_SDHC_ISTA, MESON_SDHC_ISTA_ALL_IRQS);
+
+ regmap_write(host->regmap, MESON_SDHC_ARGU, cmd->arg);
+
+ regmap_update_bits(host->regmap, MESON_SDHC_CTRL,
+ MESON_SDHC_CTRL_PACK_LEN,
+ FIELD_PREP(MESON_SDHC_CTRL_PACK_LEN, pack_len));
+
+ if (cmd->data)
+ regmap_write(host->regmap, MESON_SDHC_ADDR,
+ sg_dma_address(cmd->data->sg));
+
+ meson_mx_sdhc_wait_cmd_ready(mmc);
+
+ if (cmd->data)
+ host->platform->set_pdma(mmc);
+
+ if (host->platform->wait_before_send)
+ host->platform->wait_before_send(mmc);
+
+ regmap_write(host->regmap, MESON_SDHC_SEND, send);
+}
+
+static void meson_mx_sdhc_disable_clks(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+
+ if (!host->bulk_clks_enabled)
+ return;
+
+ clk_bulk_disable_unprepare(MESON_SDHC_NUM_BULK_CLKS, host->bulk_clks);
+
+ host->bulk_clks_enabled = false;
+}
+
+static int meson_mx_sdhc_enable_clks(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ int ret;
+
+ if (host->bulk_clks_enabled)
+ return 0;
+
+ ret = clk_bulk_prepare_enable(MESON_SDHC_NUM_BULK_CLKS,
+ host->bulk_clks);
+ if (ret)
+ return ret;
+
+ host->bulk_clks_enabled = true;
+
+ return 0;
+}
+
+static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ u32 rx_clk_phase;
+ int ret;
+
+ meson_mx_sdhc_disable_clks(mmc);
+
+ if (ios->clock) {
+ ret = clk_set_rate(host->sd_clk, ios->clock);
+ if (ret) {
+ dev_warn(mmc_dev(mmc),
+ "Failed to set MMC clock to %uHz: %d\n",
+ ios->clock, host->error);
+ return ret;
+ }
+
+ ret = meson_mx_sdhc_enable_clks(mmc);
+ if (ret)
+ return ret;
+
+ mmc->actual_clock = clk_get_rate(host->sd_clk);
+
+ /*
+ * according to Amlogic the following latching points are
+ * selected with empirical values, there is no (known) formula
+ * to calculate these.
+ */
+ if (mmc->actual_clock > 100000000) {
+ rx_clk_phase = 1;
+ } else if (mmc->actual_clock > 45000000) {
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ rx_clk_phase = 15;
+ else
+ rx_clk_phase = 11;
+ } else if (mmc->actual_clock >= 25000000) {
+ rx_clk_phase = 15;
+ } else if (mmc->actual_clock > 5000000) {
+ rx_clk_phase = 23;
+ } else if (mmc->actual_clock > 1000000) {
+ rx_clk_phase = 55;
+ } else {
+ rx_clk_phase = 1061;
+ }
+
+ regmap_update_bits(host->regmap, MESON_SDHC_CLK2,
+ MESON_SDHC_CLK2_RX_CLK_PHASE,
+ FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE,
+ rx_clk_phase));
+ } else {
+ mmc->actual_clock = 0;
+ }
+
+ return 0;
+}
+
+static void meson_mx_sdhc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ unsigned short vdd = ios->vdd;
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ vdd = 0;
+ fallthrough;
+
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ host->error = mmc_regulator_set_ocr(mmc,
+ mmc->supply.vmmc,
+ vdd);
+ if (host->error)
+ return;
+ }
+
+ break;
+
+ case MMC_POWER_ON:
+ break;
+ }
+
+ host->error = meson_mx_sdhc_set_clk(mmc, ios);
+ if (host->error)
+ return;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ regmap_update_bits(host->regmap, MESON_SDHC_CTRL,
+ MESON_SDHC_CTRL_DAT_TYPE,
+ FIELD_PREP(MESON_SDHC_CTRL_DAT_TYPE, 0));
+ break;
+
+ case MMC_BUS_WIDTH_4:
+ regmap_update_bits(host->regmap, MESON_SDHC_CTRL,
+ MESON_SDHC_CTRL_DAT_TYPE,
+ FIELD_PREP(MESON_SDHC_CTRL_DAT_TYPE, 1));
+ break;
+
+ case MMC_BUS_WIDTH_8:
+ regmap_update_bits(host->regmap, MESON_SDHC_CTRL,
+ MESON_SDHC_CTRL_DAT_TYPE,
+ FIELD_PREP(MESON_SDHC_CTRL_DAT_TYPE, 2));
+ break;
+
+ default:
+ dev_err(mmc_dev(mmc), "unsupported bus width: %d\n",
+ ios->bus_width);
+ host->error = -EINVAL;
+ return;
+ }
+}
+
+static int meson_mx_sdhc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ int dma_len;
+
+ if (!data)
+ return 0;
+
+ dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (dma_len <= 0) {
+ dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void meson_mx_sdhc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+
+ if (!host->error)
+ host->error = meson_mx_sdhc_map_dma(mmc, mrq);
+
+ if (host->error) {
+ cmd->error = host->error;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ host->mrq = mrq;
+
+ meson_mx_sdhc_start_cmd(mmc, mrq->cmd);
+}
+
+static int meson_mx_sdhc_card_busy(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ u32 stat;
+
+ regmap_read(host->regmap, MESON_SDHC_STAT, &stat);
+ return FIELD_GET(MESON_SDHC_STAT_DAT3_0, stat) == 0;
+}
+
+static bool meson_mx_sdhc_tuning_point_matches(struct mmc_host *mmc,
+ u32 opcode)
+{
+ unsigned int i, num_matches = 0;
+ int ret;
+
+ for (i = 0; i < MESON_SDHC_NUM_TUNING_TRIES; i++) {
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (!ret)
+ num_matches++;
+ }
+
+ return num_matches == MESON_SDHC_NUM_TUNING_TRIES;
+}
+
+static int meson_mx_sdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ int div, start, len, best_start, best_len;
+ int curr_phase, old_phase, new_phase;
+ u32 val;
+
+ len = 0;
+ start = 0;
+ best_len = 0;
+
+ regmap_read(host->regmap, MESON_SDHC_CLK2, &val);
+ old_phase = FIELD_GET(MESON_SDHC_CLK2_RX_CLK_PHASE, val);
+
+ regmap_read(host->regmap, MESON_SDHC_CLKC, &val);
+ div = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val);
+
+ for (curr_phase = 0; curr_phase <= div; curr_phase++) {
+ regmap_update_bits(host->regmap, MESON_SDHC_CLK2,
+ MESON_SDHC_CLK2_RX_CLK_PHASE,
+ FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE,
+ curr_phase));
+
+ if (meson_mx_sdhc_tuning_point_matches(mmc, opcode)) {
+ if (!len) {
+ start = curr_phase;
+
+ dev_dbg(mmc_dev(mmc),
+ "New RX phase window starts at %u\n",
+ start);
+ }
+
+ len++;
+ } else {
+ if (len > best_len) {
+ best_start = start;
+ best_len = len;
+
+ dev_dbg(mmc_dev(mmc),
+ "New best RX phase window: %u - %u\n",
+ best_start, best_start + best_len);
+ }
+
+ /* reset the current window */
+ len = 0;
+ }
+ }
+
+ if (len > best_len)
+ /* the last window is the best (or possibly only) window */
+ new_phase = start + (len / 2);
+ else if (best_len)
+ /* there was a better window than the last */
+ new_phase = best_start + (best_len / 2);
+ else
+ /* no window was found at all, reset to the original phase */
+ new_phase = old_phase;
+
+ regmap_update_bits(host->regmap, MESON_SDHC_CLK2,
+ MESON_SDHC_CLK2_RX_CLK_PHASE,
+ FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE,
+ new_phase));
+
+ if (!len && !best_len)
+ return -EIO;
+
+ dev_dbg(mmc_dev(mmc), "Tuned RX clock phase to %u\n", new_phase);
+
+ return 0;
+}
+
+static const struct mmc_host_ops meson_mx_sdhc_ops = {
+ .hw_reset = meson_mx_sdhc_hw_reset,
+ .request = meson_mx_sdhc_request,
+ .set_ios = meson_mx_sdhc_set_ios,
+ .card_busy = meson_mx_sdhc_card_busy,
+ .execute_tuning = meson_mx_sdhc_execute_tuning,
+ .get_cd = mmc_gpio_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+};
+
+static void meson_mx_sdhc_request_done(struct meson_mx_sdhc_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_host *mmc = host->mmc;
+
+ /* disable interrupts and mask all pending ones */
+ regmap_update_bits(host->regmap, MESON_SDHC_ICTL,
+ MESON_SDHC_ICTL_ALL_IRQS, 0);
+ regmap_update_bits(host->regmap, MESON_SDHC_ISTA,
+ MESON_SDHC_ISTA_ALL_IRQS, MESON_SDHC_ISTA_ALL_IRQS);
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+
+ mmc_request_done(mmc, mrq);
+}
+
+static u32 meson_mx_sdhc_read_response(struct meson_mx_sdhc_host *host, u8 idx)
+{
+ u32 val;
+
+ regmap_update_bits(host->regmap, MESON_SDHC_PDMA,
+ MESON_SDHC_PDMA_DMA_MODE, 0);
+
+ regmap_update_bits(host->regmap, MESON_SDHC_PDMA,
+ MESON_SDHC_PDMA_PIO_RDRESP,
+ FIELD_PREP(MESON_SDHC_PDMA_PIO_RDRESP, idx));
+
+ regmap_read(host->regmap, MESON_SDHC_ARGU, &val);
+
+ return val;
+}
+
+static irqreturn_t meson_mx_sdhc_irq(int irq, void *data)
+{
+ struct meson_mx_sdhc_host *host = data;
+ struct mmc_command *cmd = host->cmd;
+ u32 ictl, ista;
+
+ regmap_read(host->regmap, MESON_SDHC_ICTL, &ictl);
+ regmap_read(host->regmap, MESON_SDHC_ISTA, &ista);
+
+ if (!(ictl & ista))
+ return IRQ_NONE;
+
+ if (ista & MESON_SDHC_ISTA_RXFIFO_FULL ||
+ ista & MESON_SDHC_ISTA_TXFIFO_EMPTY)
+ cmd->error = -EIO;
+ else if (ista & MESON_SDHC_ISTA_RESP_ERR_CRC)
+ cmd->error = -EILSEQ;
+ else if (ista & MESON_SDHC_ISTA_RESP_TIMEOUT)
+ cmd->error = -ETIMEDOUT;
+
+ if (cmd->data) {
+ if (ista & MESON_SDHC_ISTA_DATA_ERR_CRC)
+ cmd->data->error = -EILSEQ;
+ else if (ista & MESON_SDHC_ISTA_DATA_TIMEOUT)
+ cmd->data->error = -ETIMEDOUT;
+ }
+
+ if (cmd->error || (cmd->data && cmd->data->error))
+ dev_dbg(mmc_dev(host->mmc), "CMD%d error, ISTA: 0x%08x\n",
+ cmd->opcode, ista);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t meson_mx_sdhc_irq_thread(int irq, void *irq_data)
+{
+ struct meson_mx_sdhc_host *host = irq_data;
+ struct mmc_command *cmd;
+ u32 val;
+
+ cmd = host->cmd;
+ if (WARN_ON(!cmd))
+ return IRQ_HANDLED;
+
+ if (cmd->data && !cmd->data->error) {
+ if (!host->platform->hardware_flush_all_cmds &&
+ cmd->data->flags & MMC_DATA_READ) {
+ meson_mx_sdhc_wait_cmd_ready(host->mmc);
+
+ /*
+ * If MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH was
+ * previously 0x1 then it has to be set to 0x3. If it
+ * was 0x0 before then it has to be set to 0x2. Without
+ * this reading SD cards sometimes transfers garbage,
+ * which results in cards not being detected due to:
+ * unrecognised SCR structure version <random number>
+ */
+ val = FIELD_PREP(MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH,
+ 2);
+ regmap_update_bits(host->regmap, MESON_SDHC_PDMA, val,
+ val);
+ }
+
+ dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg,
+ cmd->data->sg_len, mmc_get_dma_dir(cmd->data));
+
+ cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks;
+ }
+
+ meson_mx_sdhc_wait_cmd_ready(host->mmc);
+
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = meson_mx_sdhc_read_response(host, 4);
+ cmd->resp[1] = meson_mx_sdhc_read_response(host, 3);
+ cmd->resp[2] = meson_mx_sdhc_read_response(host, 2);
+ cmd->resp[3] = meson_mx_sdhc_read_response(host, 1);
+ } else {
+ cmd->resp[0] = meson_mx_sdhc_read_response(host, 0);
+ }
+
+ if (cmd->error == -EIO || cmd->error == -ETIMEDOUT)
+ meson_mx_sdhc_hw_reset(host->mmc);
+ else if (cmd->data)
+ /*
+ * Clear the FIFOs after completing data transfers to prevent
+ * corrupting data on write access. It's not clear why this is
+ * needed (for reads and writes), but it mimics what the BSP
+ * kernel did.
+ */
+ meson_mx_sdhc_clear_fifo(host->mmc);
+
+ meson_mx_sdhc_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static void meson_mx_sdhc_init_hw_meson8(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+
+ regmap_write(host->regmap, MESON_SDHC_MISC,
+ FIELD_PREP(MESON_SDHC_MISC_TXSTART_THRES, 7) |
+ FIELD_PREP(MESON_SDHC_MISC_WCRC_ERR_PATT, 5) |
+ FIELD_PREP(MESON_SDHC_MISC_WCRC_OK_PATT, 2));
+
+ regmap_write(host->regmap, MESON_SDHC_ENHC,
+ FIELD_PREP(MESON_SDHC_ENHC_RXFIFO_TH, 63) |
+ MESON_SDHC_ENHC_MESON6_DMA_WR_RESP |
+ FIELD_PREP(MESON_SDHC_ENHC_MESON6_RX_TIMEOUT, 255) |
+ FIELD_PREP(MESON_SDHC_ENHC_SDIO_IRQ_PERIOD, 12));
+};
+
+static void meson_mx_sdhc_set_pdma_meson8(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+
+ if (host->cmd->data->flags & MMC_DATA_WRITE)
+ regmap_update_bits(host->regmap, MESON_SDHC_PDMA,
+ MESON_SDHC_PDMA_DMA_MODE |
+ MESON_SDHC_PDMA_RD_BURST |
+ MESON_SDHC_PDMA_TXFIFO_FILL,
+ MESON_SDHC_PDMA_DMA_MODE |
+ FIELD_PREP(MESON_SDHC_PDMA_RD_BURST, 31) |
+ MESON_SDHC_PDMA_TXFIFO_FILL);
+ else
+ regmap_update_bits(host->regmap, MESON_SDHC_PDMA,
+ MESON_SDHC_PDMA_DMA_MODE |
+ MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH,
+ MESON_SDHC_PDMA_DMA_MODE |
+ FIELD_PREP(MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH,
+ 1));
+
+ if (host->cmd->data->flags & MMC_DATA_WRITE)
+ regmap_update_bits(host->regmap, MESON_SDHC_PDMA,
+ MESON_SDHC_PDMA_RD_BURST,
+ FIELD_PREP(MESON_SDHC_PDMA_RD_BURST, 15));
+}
+
+static void meson_mx_sdhc_wait_before_send_meson8(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ u32 val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_ESTA, val,
+ val == 0,
+ MESON_SDHC_WAIT_BEFORE_SEND_SLEEP_US,
+ MESON_SDHC_WAIT_BEFORE_SEND_TIMEOUT_US);
+ if (ret)
+ dev_warn(mmc_dev(mmc),
+ "Failed to wait for ESTA to clear: 0x%08x\n", val);
+
+ if (host->cmd->data && host->cmd->data->flags & MMC_DATA_WRITE) {
+ ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_STAT,
+ val, val & MESON_SDHC_STAT_TXFIFO_CNT,
+ MESON_SDHC_WAIT_BEFORE_SEND_SLEEP_US,
+ MESON_SDHC_WAIT_BEFORE_SEND_TIMEOUT_US);
+ if (ret)
+ dev_warn(mmc_dev(mmc),
+ "Failed to wait for TX FIFO to fill\n");
+ }
+}
+
+static void meson_mx_sdhc_init_hw_meson8m2(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+
+ regmap_write(host->regmap, MESON_SDHC_MISC,
+ FIELD_PREP(MESON_SDHC_MISC_TXSTART_THRES, 6) |
+ FIELD_PREP(MESON_SDHC_MISC_WCRC_ERR_PATT, 5) |
+ FIELD_PREP(MESON_SDHC_MISC_WCRC_OK_PATT, 2));
+
+ regmap_write(host->regmap, MESON_SDHC_ENHC,
+ FIELD_PREP(MESON_SDHC_ENHC_RXFIFO_TH, 64) |
+ FIELD_PREP(MESON_SDHC_ENHC_MESON8M2_DEBUG, 1) |
+ MESON_SDHC_ENHC_MESON8M2_WRRSP_MODE |
+ FIELD_PREP(MESON_SDHC_ENHC_SDIO_IRQ_PERIOD, 12));
+}
+
+static void meson_mx_sdhc_set_pdma_meson8m2(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+
+ regmap_update_bits(host->regmap, MESON_SDHC_PDMA,
+ MESON_SDHC_PDMA_DMA_MODE, MESON_SDHC_PDMA_DMA_MODE);
+}
+
+static void meson_mx_sdhc_init_hw(struct mmc_host *mmc)
+{
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+
+ meson_mx_sdhc_hw_reset(mmc);
+
+ regmap_write(host->regmap, MESON_SDHC_CTRL,
+ FIELD_PREP(MESON_SDHC_CTRL_RX_PERIOD, 0xf) |
+ FIELD_PREP(MESON_SDHC_CTRL_RX_TIMEOUT, 0x7f) |
+ FIELD_PREP(MESON_SDHC_CTRL_RX_ENDIAN, 0x7) |
+ FIELD_PREP(MESON_SDHC_CTRL_TX_ENDIAN, 0x7));
+
+ /*
+ * start with a valid divider and enable the memory (un-setting
+ * MESON_SDHC_CLKC_MEM_PWR_OFF).
+ */
+ regmap_write(host->regmap, MESON_SDHC_CLKC, MESON_SDHC_CLKC_CLK_DIV);
+
+ regmap_write(host->regmap, MESON_SDHC_CLK2,
+ FIELD_PREP(MESON_SDHC_CLK2_SD_CLK_PHASE, 1));
+
+ regmap_write(host->regmap, MESON_SDHC_PDMA,
+ MESON_SDHC_PDMA_DMA_URGENT |
+ FIELD_PREP(MESON_SDHC_PDMA_WR_BURST, 7) |
+ FIELD_PREP(MESON_SDHC_PDMA_TXFIFO_TH, 49) |
+ FIELD_PREP(MESON_SDHC_PDMA_RD_BURST, 15) |
+ FIELD_PREP(MESON_SDHC_PDMA_RXFIFO_TH, 7));
+
+ /* some initialization bits depend on the SoC: */
+ host->platform->init_hw(mmc);
+
+ /* disable and mask all interrupts: */
+ regmap_write(host->regmap, MESON_SDHC_ICTL, 0);
+ regmap_write(host->regmap, MESON_SDHC_ISTA, MESON_SDHC_ISTA_ALL_IRQS);
+}
+
+static int meson_mx_sdhc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_mx_sdhc_host *host;
+ struct mmc_host *mmc;
+ void __iomem *base;
+ int ret, irq;
+
+ mmc = mmc_alloc_host(sizeof(*host), dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, (void(*)(void *))mmc_free_host,
+ mmc);
+ if (ret) {
+ dev_err(dev, "Failed to register mmc_free_host action\n");
+ return ret;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ platform_set_drvdata(pdev, host);
+
+ host->platform = device_get_match_data(dev);
+ if (!host->platform)
+ return -EINVAL;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ host->regmap = devm_regmap_init_mmio(dev, base,
+ &meson_mx_sdhc_regmap_config);
+ if (IS_ERR(host->regmap))
+ return PTR_ERR(host->regmap);
+
+ host->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(host->pclk))
+ return PTR_ERR(host->pclk);
+
+ /* accessing any register requires the module clock to be enabled: */
+ ret = clk_prepare_enable(host->pclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable 'pclk' clock\n");
+ return ret;
+ }
+
+ meson_mx_sdhc_init_hw(mmc);
+
+ ret = meson_mx_sdhc_register_clkc(dev, base, host->bulk_clks);
+ if (ret)
+ goto err_disable_pclk;
+
+ host->sd_clk = host->bulk_clks[1].clk;
+
+ /* Get regulators and the supported OCR mask */
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret)
+ goto err_disable_pclk;
+
+ mmc->max_req_size = SZ_128K;
+ mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_blk_count = FIELD_GET(MESON_SDHC_SEND_TOTAL_PACK, ~0);
+ mmc->max_blk_size = MESON_SDHC_MAX_BLK_SIZE;
+ mmc->max_busy_timeout = 30 * MSEC_PER_SEC;
+ mmc->f_min = clk_round_rate(host->sd_clk, 1);
+ mmc->f_max = clk_round_rate(host->sd_clk, ULONG_MAX);
+ mmc->max_current_180 = 300;
+ mmc->max_current_330 = 300;
+ mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_HW_RESET;
+ mmc->ops = &meson_mx_sdhc_ops;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_disable_pclk;
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(dev, irq, meson_mx_sdhc_irq,
+ meson_mx_sdhc_irq_thread, IRQF_ONESHOT,
+ NULL, host);
+ if (ret)
+ goto err_disable_pclk;
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto err_disable_pclk;
+
+ return 0;
+
+err_disable_pclk:
+ clk_disable_unprepare(host->pclk);
+ return ret;
+}
+
+static int meson_mx_sdhc_remove(struct platform_device *pdev)
+{
+ struct meson_mx_sdhc_host *host = platform_get_drvdata(pdev);
+
+ mmc_remove_host(host->mmc);
+
+ meson_mx_sdhc_disable_clks(host->mmc);
+
+ clk_disable_unprepare(host->pclk);
+
+ return 0;
+}
+
+static const struct meson_mx_sdhc_data meson_mx_sdhc_data_meson8 = {
+ .init_hw = meson_mx_sdhc_init_hw_meson8,
+ .set_pdma = meson_mx_sdhc_set_pdma_meson8,
+ .wait_before_send = meson_mx_sdhc_wait_before_send_meson8,
+ .hardware_flush_all_cmds = false,
+};
+
+static const struct meson_mx_sdhc_data meson_mx_sdhc_data_meson8m2 = {
+ .init_hw = meson_mx_sdhc_init_hw_meson8m2,
+ .set_pdma = meson_mx_sdhc_set_pdma_meson8m2,
+ .hardware_flush_all_cmds = true,
+};
+
+static const struct of_device_id meson_mx_sdhc_of_match[] = {
+ {
+ .compatible = "amlogic,meson8-sdhc",
+ .data = &meson_mx_sdhc_data_meson8
+ },
+ {
+ .compatible = "amlogic,meson8b-sdhc",
+ .data = &meson_mx_sdhc_data_meson8
+ },
+ {
+ .compatible = "amlogic,meson8m2-sdhc",
+ .data = &meson_mx_sdhc_data_meson8m2
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_mx_sdhc_of_match);
+
+static struct platform_driver meson_mx_sdhc_driver = {
+ .probe = meson_mx_sdhc_probe,
+ .remove = meson_mx_sdhc_remove,
+ .driver = {
+ .name = "meson-mx-sdhc",
+ .of_match_table = of_match_ptr(meson_mx_sdhc_of_match),
+ },
+};
+
+module_platform_driver(meson_mx_sdhc_driver);
+
+MODULE_DESCRIPTION("Meson6, Meson8, Meson8b and Meson8m2 SDHC Host Driver");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/meson-mx-sdhc.h b/drivers/mmc/host/meson-mx-sdhc.h
new file mode 100644
index 000000000000..230e8fbe6b3f
--- /dev/null
+++ b/drivers/mmc/host/meson-mx-sdhc.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#ifndef _MESON_MX_SDHC_H_
+#define _MESON_MX_SDHC_H_
+
+#include <linux/bitfield.h>
+
+#define MESON_SDHC_ARGU 0x00
+
+#define MESON_SDHC_SEND 0x04
+ #define MESON_SDHC_SEND_CMD_INDEX GENMASK(5, 0)
+ #define MESON_SDHC_SEND_CMD_HAS_RESP BIT(6)
+ #define MESON_SDHC_SEND_CMD_HAS_DATA BIT(7)
+ #define MESON_SDHC_SEND_RESP_LEN BIT(8)
+ #define MESON_SDHC_SEND_RESP_NO_CRC BIT(9)
+ #define MESON_SDHC_SEND_DATA_DIR BIT(10)
+ #define MESON_SDHC_SEND_DATA_STOP BIT(11)
+ #define MESON_SDHC_SEND_R1B BIT(12)
+ #define MESON_SDHC_SEND_TOTAL_PACK GENMASK(31, 16)
+
+#define MESON_SDHC_CTRL 0x08
+ #define MESON_SDHC_CTRL_DAT_TYPE GENMASK(1, 0)
+ #define MESON_SDHC_CTRL_DDR_MODE BIT(2)
+ #define MESON_SDHC_CTRL_TX_CRC_NOCHECK BIT(3)
+ #define MESON_SDHC_CTRL_PACK_LEN GENMASK(12, 4)
+ #define MESON_SDHC_CTRL_RX_TIMEOUT GENMASK(19, 13)
+ #define MESON_SDHC_CTRL_RX_PERIOD GENMASK(23, 20)
+ #define MESON_SDHC_CTRL_RX_ENDIAN GENMASK(26, 24)
+ #define MESON_SDHC_CTRL_SDIO_IRQ_MODE BIT(27)
+ #define MESON_SDHC_CTRL_DAT0_IRQ_SEL BIT(28)
+ #define MESON_SDHC_CTRL_TX_ENDIAN GENMASK(31, 29)
+
+#define MESON_SDHC_STAT 0x0c
+ #define MESON_SDHC_STAT_CMD_BUSY BIT(0)
+ #define MESON_SDHC_STAT_DAT3_0 GENMASK(4, 1)
+ #define MESON_SDHC_STAT_CMD BIT(5)
+ #define MESON_SDHC_STAT_RXFIFO_CNT GENMASK(12, 6)
+ #define MESON_SDHC_STAT_TXFIFO_CNT GENMASK(19, 13)
+ #define MESON_SDHC_STAT_DAT7_4 GENMASK(23, 20)
+
+#define MESON_SDHC_CLKC 0x10
+ #define MESON_SDHC_CLKC_CLK_DIV GENMASK(11, 0)
+ #define MESON_SDHC_CLKC_CLK_JIC BIT(24)
+ #define MESON_SDHC_CLKC_MEM_PWR_OFF GENMASK(26, 25)
+
+#define MESON_SDHC_ADDR 0x14
+
+#define MESON_SDHC_PDMA 0x18
+ #define MESON_SDHC_PDMA_DMA_MODE BIT(0)
+ #define MESON_SDHC_PDMA_PIO_RDRESP GENMASK(3, 1)
+ #define MESON_SDHC_PDMA_DMA_URGENT BIT(4)
+ #define MESON_SDHC_PDMA_WR_BURST GENMASK(9, 5)
+ #define MESON_SDHC_PDMA_RD_BURST GENMASK(14, 10)
+ #define MESON_SDHC_PDMA_RXFIFO_TH GENMASK(21, 15)
+ #define MESON_SDHC_PDMA_TXFIFO_TH GENMASK(28, 22)
+ #define MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH GENMASK(30, 29)
+ #define MESON_SDHC_PDMA_TXFIFO_FILL BIT(31)
+
+#define MESON_SDHC_MISC 0x1c
+ #define MESON_SDHC_MISC_WCRC_ERR_PATT GENMASK(6, 4)
+ #define MESON_SDHC_MISC_WCRC_OK_PATT GENMASK(9, 7)
+ #define MESON_SDHC_MISC_BURST_NUM GENMASK(21, 16)
+ #define MESON_SDHC_MISC_THREAD_ID GENMASK(27, 22)
+ #define MESON_SDHC_MISC_MANUAL_STOP BIT(28)
+ #define MESON_SDHC_MISC_TXSTART_THRES GENMASK(31, 29)
+
+#define MESON_SDHC_DATA 0x20
+
+#define MESON_SDHC_ICTL 0x24
+ #define MESON_SDHC_ICTL_RESP_OK BIT(0)
+ #define MESON_SDHC_ICTL_RESP_TIMEOUT BIT(1)
+ #define MESON_SDHC_ICTL_RESP_ERR_CRC BIT(2)
+ #define MESON_SDHC_ICTL_RESP_OK_NOCLEAR BIT(3)
+ #define MESON_SDHC_ICTL_DATA_1PACK_OK BIT(4)
+ #define MESON_SDHC_ICTL_DATA_TIMEOUT BIT(5)
+ #define MESON_SDHC_ICTL_DATA_ERR_CRC BIT(6)
+ #define MESON_SDHC_ICTL_DATA_XFER_OK BIT(7)
+ #define MESON_SDHC_ICTL_RX_HIGHER BIT(8)
+ #define MESON_SDHC_ICTL_RX_LOWER BIT(9)
+ #define MESON_SDHC_ICTL_DAT1_IRQ BIT(10)
+ #define MESON_SDHC_ICTL_DMA_DONE BIT(11)
+ #define MESON_SDHC_ICTL_RXFIFO_FULL BIT(12)
+ #define MESON_SDHC_ICTL_TXFIFO_EMPTY BIT(13)
+ #define MESON_SDHC_ICTL_ADDI_DAT1_IRQ BIT(14)
+ #define MESON_SDHC_ICTL_ALL_IRQS GENMASK(14, 0)
+ #define MESON_SDHC_ICTL_DAT1_IRQ_DELAY GENMASK(17, 16)
+
+#define MESON_SDHC_ISTA 0x28
+ #define MESON_SDHC_ISTA_RESP_OK BIT(0)
+ #define MESON_SDHC_ISTA_RESP_TIMEOUT BIT(1)
+ #define MESON_SDHC_ISTA_RESP_ERR_CRC BIT(2)
+ #define MESON_SDHC_ISTA_RESP_OK_NOCLEAR BIT(3)
+ #define MESON_SDHC_ISTA_DATA_1PACK_OK BIT(4)
+ #define MESON_SDHC_ISTA_DATA_TIMEOUT BIT(5)
+ #define MESON_SDHC_ISTA_DATA_ERR_CRC BIT(6)
+ #define MESON_SDHC_ISTA_DATA_XFER_OK BIT(7)
+ #define MESON_SDHC_ISTA_RX_HIGHER BIT(8)
+ #define MESON_SDHC_ISTA_RX_LOWER BIT(9)
+ #define MESON_SDHC_ISTA_DAT1_IRQ BIT(10)
+ #define MESON_SDHC_ISTA_DMA_DONE BIT(11)
+ #define MESON_SDHC_ISTA_RXFIFO_FULL BIT(12)
+ #define MESON_SDHC_ISTA_TXFIFO_EMPTY BIT(13)
+ #define MESON_SDHC_ISTA_ADDI_DAT1_IRQ BIT(14)
+ #define MESON_SDHC_ISTA_ALL_IRQS GENMASK(14, 0)
+
+#define MESON_SDHC_SRST 0x2c
+ #define MESON_SDHC_SRST_MAIN_CTRL BIT(0)
+ #define MESON_SDHC_SRST_RXFIFO BIT(1)
+ #define MESON_SDHC_SRST_TXFIFO BIT(2)
+ #define MESON_SDHC_SRST_DPHY_RX BIT(3)
+ #define MESON_SDHC_SRST_DPHY_TX BIT(4)
+ #define MESON_SDHC_SRST_DMA_IF BIT(5)
+
+#define MESON_SDHC_ESTA 0x30
+ #define MESON_SDHC_ESTA_11_13 GENMASK(13, 11)
+
+#define MESON_SDHC_ENHC 0x34
+ #define MESON_SDHC_ENHC_MESON8M2_WRRSP_MODE BIT(0)
+ #define MESON_SDHC_ENHC_MESON8M2_CHK_WRRSP BIT(1)
+ #define MESON_SDHC_ENHC_MESON8M2_CHK_DMA BIT(2)
+ #define MESON_SDHC_ENHC_MESON8M2_DEBUG GENMASK(5, 3)
+ #define MESON_SDHC_ENHC_MESON6_RX_TIMEOUT GENMASK(7, 0)
+ #define MESON_SDHC_ENHC_MESON6_DMA_RD_RESP BIT(16)
+ #define MESON_SDHC_ENHC_MESON6_DMA_WR_RESP BIT(17)
+ #define MESON_SDHC_ENHC_SDIO_IRQ_PERIOD GENMASK(15, 8)
+ #define MESON_SDHC_ENHC_RXFIFO_TH GENMASK(24, 18)
+ #define MESON_SDHC_ENHC_TXFIFO_TH GENMASK(31, 25)
+
+#define MESON_SDHC_CLK2 0x38
+ #define MESON_SDHC_CLK2_RX_CLK_PHASE GENMASK(11, 0)
+ #define MESON_SDHC_CLK2_SD_CLK_PHASE GENMASK(23, 12)
+
+struct clk_bulk_data;
+
+int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
+ struct clk_bulk_data *clk_bulk_data);
+
+#endif /* _MESON_MX_SDHC_H_ */
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 2e58743d83bb..9b2cf7afc246 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -246,6 +246,9 @@ static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
mrq = host->mrq;
+ if (host->cmd->error)
+ meson_mx_mmc_soft_reset(host);
+
host->mrq = NULL;
host->cmd = NULL;
@@ -561,7 +564,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
mmc->f_max = clk_round_rate(host->cfg_div_clk,
clk_get_rate(host->parent_clk));
- mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
+ mmc->caps |= MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
mmc->ops = &meson_mx_mmc_ops;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
index b90b2c97b6cf..a5e05ed0fda3 100644
--- a/drivers/mmc/host/mmc_hsq.c
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -16,11 +16,20 @@
#define HSQ_NUM_SLOTS 64
#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
+static void mmc_hsq_retry_handler(struct work_struct *work)
+{
+ struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
+ struct mmc_host *mmc = hsq->mmc;
+
+ mmc->ops->request(mmc, hsq->mrq);
+}
+
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
struct hsq_slot *slot;
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&hsq->lock, flags);
@@ -42,7 +51,24 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
spin_unlock_irqrestore(&hsq->lock, flags);
- mmc->ops->request(mmc, hsq->mrq);
+ if (mmc->ops->request_atomic)
+ ret = mmc->ops->request_atomic(mmc, hsq->mrq);
+ else
+ mmc->ops->request(mmc, hsq->mrq);
+
+ /*
+ * If returning BUSY from request_atomic(), which means the card
+ * may be busy now, and we should change to non-atomic context to
+ * try again for this unusual case, to avoid time-consuming operations
+ * in the atomic context.
+ *
+ * Note: we just give a warning for other error cases, since the host
+ * driver will handle them.
+ */
+ if (ret == -EBUSY)
+ schedule_work(&hsq->retry_work);
+ else
+ WARN_ON_ONCE(ret);
}
static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
@@ -325,6 +351,7 @@ int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
hsq->mmc->cqe_private = hsq;
mmc->cqe_ops = &mmc_hsq_ops;
+ INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
spin_lock_init(&hsq->lock);
init_waitqueue_head(&hsq->wait_queue);
diff --git a/drivers/mmc/host/mmc_hsq.h b/drivers/mmc/host/mmc_hsq.h
index 18b9cf55925f..ffdd9cd172c3 100644
--- a/drivers/mmc/host/mmc_hsq.h
+++ b/drivers/mmc/host/mmc_hsq.h
@@ -12,6 +12,7 @@ struct mmc_hsq {
wait_queue_head_t wait_queue;
struct hsq_slot *slot;
spinlock_t lock;
+ struct work_struct retry_work;
int next_tag;
int num_slots;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 951f76dc1ddd..39bb1e30c2d7 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -77,14 +77,8 @@
#define MMC_SPI_BLOCKSIZE 512
-
-/* These fixed timeouts come from the latest SD specs, which say to ignore
- * the CSD values. The R1B value is for card erase (e.g. the "I forgot the
- * card's password" scenario); it's mostly applied to STOP_TRANSMISSION after
- * reads which takes nowhere near that long. Older cards may be able to use
- * shorter timeouts ... but why bother?
- */
-#define r1b_timeout (HZ * 3)
+#define MMC_SPI_R1B_TIMEOUT_MS 3000
+#define MMC_SPI_INIT_TIMEOUT_MS 3000
/* One of the critical speed parameters is the amount of data which may
* be transferred in one command. If this value is too low, the SD card
@@ -248,6 +242,7 @@ static char *maptype(struct mmc_command *cmd)
static int mmc_spi_response_get(struct mmc_spi_host *host,
struct mmc_command *cmd, int cs_on)
{
+ unsigned long timeout_ms;
u8 *cp = host->data->status;
u8 *end = cp + host->t.len;
int value = 0;
@@ -346,8 +341,11 @@ checkstatus:
/* maybe we read all the busy tokens already */
while (cp < end && *cp == 0)
cp++;
- if (cp == end)
- mmc_spi_wait_unbusy(host, r1b_timeout);
+ if (cp == end) {
+ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
+ MMC_SPI_R1B_TIMEOUT_MS;
+ mmc_spi_wait_unbusy(host, msecs_to_jiffies(timeout_ms));
+ }
break;
/* SPI R2 == R1 + second status byte; SEND_STATUS
@@ -1118,7 +1116,7 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host)
/* Try to be very sure any previous command has completed;
* wait till not-busy, skip debris from any old commands.
*/
- mmc_spi_wait_unbusy(host, r1b_timeout);
+ mmc_spi_wait_unbusy(host, msecs_to_jiffies(MMC_SPI_INIT_TIMEOUT_MS));
mmc_spi_readbytes(host, 10);
/*
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 647567def612..a69d6a0c2e15 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1861,31 +1861,17 @@ static int mmci_get_cd(struct mmc_host *mmc)
static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
- int ret = 0;
-
- if (!IS_ERR(mmc->supply.vqmmc)) {
+ int ret;
- switch (ios->signal_voltage) {
- case MMC_SIGNAL_VOLTAGE_330:
- ret = regulator_set_voltage(mmc->supply.vqmmc,
- 2700000, 3600000);
- break;
- case MMC_SIGNAL_VOLTAGE_180:
- ret = regulator_set_voltage(mmc->supply.vqmmc,
- 1700000, 1950000);
- break;
- case MMC_SIGNAL_VOLTAGE_120:
- ret = regulator_set_voltage(mmc->supply.vqmmc,
- 1100000, 1300000);
- break;
- }
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (!ret && host->ops && host->ops->post_sig_volt_switch)
- ret = host->ops->post_sig_volt_switch(host, ios);
+ if (!ret && host->ops && host->ops->post_sig_volt_switch)
+ ret = host->ops->post_sig_volt_switch(host, ios);
+ else if (ret)
+ ret = 0;
- if (ret)
- dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
- }
+ if (ret < 0)
+ dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
return ret;
}
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index d33e62bd6153..51db30acf4dc 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -119,20 +119,19 @@ static void sdmmc_idma_unprep_data(struct mmci_host *host,
static int sdmmc_idma_setup(struct mmci_host *host)
{
struct sdmmc_idma *idma;
+ struct device *dev = mmc_dev(host->mmc);
- idma = devm_kzalloc(mmc_dev(host->mmc), sizeof(*idma), GFP_KERNEL);
+ idma = devm_kzalloc(dev, sizeof(*idma), GFP_KERNEL);
if (!idma)
return -ENOMEM;
host->dma_priv = idma;
if (host->variant->dma_lli) {
- idma->sg_cpu = dmam_alloc_coherent(mmc_dev(host->mmc),
- SDMMC_LLI_BUF_LEN,
+ idma->sg_cpu = dmam_alloc_coherent(dev, SDMMC_LLI_BUF_LEN,
&idma->sg_dma, GFP_KERNEL);
if (!idma->sg_cpu) {
- dev_err(mmc_dev(host->mmc),
- "Failed to alloc IDMA descriptor\n");
+ dev_err(dev, "Failed to alloc IDMA descriptor\n");
return -ENOMEM;
}
host->mmc->max_segs = SDMMC_LLI_BUF_LEN /
@@ -143,7 +142,7 @@ static int sdmmc_idma_setup(struct mmci_host *host)
host->mmc->max_seg_size = host->mmc->max_req_size;
}
- return 0;
+ return dma_set_max_seg_size(dev, host->mmc->max_seg_size);
}
static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
@@ -188,6 +187,9 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+
+ if (!data->host_cookie)
+ sdmmc_idma_unprep_data(host, data, 0);
}
static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
@@ -519,6 +521,7 @@ void sdmmc_variant_init(struct mmci_host *host)
struct sdmmc_dlyb *dlyb;
host->ops = &sdmmc_variant_ops;
+ host->pwr_reg = readl_relaxed(host->base + MMCIPOWER);
base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL);
if (IS_ERR(base_dlyb))
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index b221c02cc71f..39e7fc54c438 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1369,7 +1369,7 @@ static void msdc_set_buswidth(struct msdc_host *host, u32 width)
static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
- int ret = 0;
+ int ret;
if (!IS_ERR(mmc->supply.vqmmc)) {
if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 &&
@@ -1379,18 +1379,19 @@ static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
}
ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret) {
+ if (ret < 0) {
dev_dbg(host->dev, "Regulator set error %d (%d)\n",
ret, ios->signal_voltage);
- } else {
- /* Apply different pinctrl settings for different signal voltage */
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
- pinctrl_select_state(host->pinctrl, host->pins_uhs);
- else
- pinctrl_select_state(host->pinctrl, host->pins_default);
+ return ret;
}
+
+ /* Apply different pinctrl settings for different signal voltage */
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ else
+ pinctrl_select_state(host->pinctrl, host->pins_default);
}
- return ret;
+ return 0;
}
static int msdc_card_busy(struct mmc_host *mmc)
@@ -2325,7 +2326,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
if (mmc->caps & MMC_CAP_SDIO_IRQ)
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
- mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps |= MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
mmc->max_segs = MAX_BD_NUM;
if (host->dev_comp->support_64g)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 203b61712601..cc0752a9df6d 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -752,8 +752,6 @@ static int mvsd_probe(struct platform_device *pdev)
if (maxfreq)
mmc->f_max = maxfreq;
- mmc->caps |= MMC_CAP_ERASE;
-
spin_lock_init(&host->lock);
host->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index d82674aed447..b1820def36c0 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -634,8 +634,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
/* set mmc core parameters */
mmc->ops = &mxs_mmc_ops;
mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23 |
- MMC_CAP_ERASE;
+ MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
host->broken_cd = of_property_read_bool(np, "broken-cd");
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index d74e73c95fdf..33d7af7c7762 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1244,7 +1244,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
mmc->caps = 0;
if (host->pdata->slots[id].wires >= 4)
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_ERASE;
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
mmc->ops = &mmc_omap_ops;
mmc->f_min = 400000;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index a379c45b985c..37b8740513f5 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1922,7 +1922,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_CMD23;
mmc->caps |= mmc_pdata(host)->caps;
if (mmc->caps & MMC_CAP_8_BIT_DATA)
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index 01ffe51f413d..5e20c099fe03 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -92,6 +92,8 @@
#define OWL_SD_STATE_RC16ER BIT(1)
#define OWL_SD_STATE_CRC7ER BIT(0)
+#define OWL_CMD_TIMEOUT_MS 30000
+
struct owl_mmc_host {
struct device *dev;
struct reset_control *reset;
@@ -172,6 +174,7 @@ static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
struct mmc_command *cmd,
struct mmc_data *data)
{
+ unsigned long timeout;
u32 mode, state, resp[2];
u32 cmd_rsp_mask = 0;
@@ -239,7 +242,10 @@ static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
if (data)
return;
- if (!wait_for_completion_timeout(&owl_host->sdc_complete, 30 * HZ)) {
+ timeout = msecs_to_jiffies(cmd->busy_timeout ? cmd->busy_timeout :
+ OWL_CMD_TIMEOUT_MS);
+
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete, timeout)) {
dev_err(owl_host->dev, "CMD interrupt timeout\n");
cmd->error = -ETIMEDOUT;
return;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 2a4c83a5f32e..14c64caefc64 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -36,6 +36,7 @@ struct renesas_sdhi_of_data {
struct renesas_sdhi_quirks {
bool hs400_disabled;
bool hs400_4taps;
+ u32 hs400_bad_taps;
};
struct tmio_mmc_dma {
@@ -61,8 +62,10 @@ struct renesas_sdhi {
/* Tuning values: 1 for success, 0 for failure */
DECLARE_BITMAP(taps, BITS_PER_LONG);
+ /* Sampling data comparison: 1 for match, 0 for mismatch */
+ DECLARE_BITMAP(smpcmp, BITS_PER_LONG);
unsigned int tap_num;
- unsigned long tap_set;
+ unsigned int tap_set;
};
#define host_to_priv(host) \
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index df826661366f..15e21894bd44 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mfd/tmio.h>
@@ -82,16 +83,11 @@ static int renesas_sdhi_clk_enable(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
struct renesas_sdhi *priv = host_to_priv(host);
- int ret = clk_prepare_enable(priv->clk);
-
- if (ret < 0)
- return ret;
+ int ret;
ret = clk_prepare_enable(priv->clk_cd);
- if (ret < 0) {
- clk_disable_unprepare(priv->clk);
+ if (ret < 0)
return ret;
- }
/*
* The clock driver may not know what maximum frequency
@@ -197,7 +193,6 @@ static void renesas_sdhi_clk_disable(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
- clk_disable_unprepare(priv->clk);
clk_disable_unprepare(priv->clk_cd);
}
@@ -237,7 +232,7 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
MMC_SIGNAL_VOLTAGE_330 ? 0 : -EINVAL;
ret = mmc_regulator_set_vqmmc(host->mmc, ios);
- if (ret)
+ if (ret < 0)
return ret;
return pinctrl_select_state(priv->pinctrl, pin_state);
@@ -325,6 +320,8 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct renesas_sdhi *priv = host_to_priv(host);
+ u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0;
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
@@ -352,10 +349,23 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+ /* Avoid bad TAP */
+ if (bad_taps & BIT(priv->tap_set)) {
+ u32 new_tap = (priv->tap_set + 1) % priv->tap_num;
+
+ if (bad_taps & BIT(new_tap))
+ new_tap = (priv->tap_set - 1) % priv->tap_num;
- if (priv->quirks && priv->quirks->hs400_4taps)
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
- priv->tap_set / 2);
+ if (bad_taps & BIT(new_tap)) {
+ new_tap = priv->tap_set;
+ dev_dbg(&host->pdev->dev, "Can't handle three bad tap in a row\n");
+ }
+
+ priv->tap_set = new_tap;
+ }
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
+ priv->tap_set / (use_4tap ? 2 : 1));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
@@ -422,20 +432,16 @@ static int renesas_sdhi_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_io
return 0;
}
-#define SH_MOBILE_SDHI_MAX_TAP 3
+#define SH_MOBILE_SDHI_MIN_TAP_ROW 3
static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
- unsigned long tap_cnt; /* counter of tuning success */
- unsigned long tap_start;/* start position of tuning success */
- unsigned long tap_end; /* end position of tuning success */
- unsigned long ntap; /* temporary counter of tuning success */
- unsigned long i;
+ unsigned int tap_start = 0, tap_end = 0, tap_cnt = 0, rs, re, i;
+ unsigned int taps_size = priv->tap_num * 2, min_tap_row;
+ unsigned long *bitmap;
priv->doing_tune = false;
-
- /* Clear SCC_RVSREQ */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
/*
@@ -443,42 +449,42 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
* result requiring the tap to be good in both runs before
* considering it for tuning selection.
*/
- for (i = 0; i < priv->tap_num * 2; i++) {
+ for (i = 0; i < taps_size; i++) {
int offset = priv->tap_num * (i < priv->tap_num ? 1 : -1);
if (!test_bit(i, priv->taps))
clear_bit(i + offset, priv->taps);
+
+ if (!test_bit(i, priv->smpcmp))
+ clear_bit(i + offset, priv->smpcmp);
}
/*
- * Find the longest consecutive run of successful probes. If that
- * is more than SH_MOBILE_SDHI_MAX_TAP probes long then use the
- * center index as the tap.
+ * If all TAP are OK, the sampling clock position is selected by
+ * identifying the change point of data.
*/
- tap_cnt = 0;
- ntap = 0;
- tap_start = 0;
- tap_end = 0;
- for (i = 0; i < priv->tap_num * 2; i++) {
- if (test_bit(i, priv->taps)) {
- ntap++;
- } else {
- if (ntap > tap_cnt) {
- tap_start = i - ntap;
- tap_end = i - 1;
- tap_cnt = ntap;
- }
- ntap = 0;
- }
+ if (bitmap_full(priv->taps, taps_size)) {
+ bitmap = priv->smpcmp;
+ min_tap_row = 1;
+ } else {
+ bitmap = priv->taps;
+ min_tap_row = SH_MOBILE_SDHI_MIN_TAP_ROW;
}
- if (ntap > tap_cnt) {
- tap_start = i - ntap;
- tap_end = i - 1;
- tap_cnt = ntap;
+ /*
+ * Find the longest consecutive run of successful probes. If that
+ * is at least SH_MOBILE_SDHI_MIN_TAP_ROW probes long then use the
+ * center index as the tap, otherwise bail out.
+ */
+ bitmap_for_each_set_region(bitmap, rs, re, 0, taps_size) {
+ if (re - rs > tap_cnt) {
+ tap_end = re;
+ tap_start = rs;
+ tap_cnt = tap_end - tap_start;
+ }
}
- if (tap_cnt >= SH_MOBILE_SDHI_MAX_TAP)
+ if (tap_cnt >= min_tap_row)
priv->tap_set = (tap_start + tap_end) / 2 % priv->tap_num;
else
return -EIO;
@@ -511,6 +517,7 @@ static int renesas_sdhi_execute_tuning(struct tmio_mmc_host *host, u32 opcode)
priv->doing_tune = true;
bitmap_zero(priv->taps, priv->tap_num * 2);
+ bitmap_zero(priv->smpcmp, priv->tap_num * 2);
/* Issue CMD19 twice for each tap */
for (i = 0; i < 2 * priv->tap_num; i++) {
@@ -519,6 +526,9 @@ static int renesas_sdhi_execute_tuning(struct tmio_mmc_host *host, u32 opcode)
if (mmc_send_tuning(host->mmc, opcode, NULL) == 0)
set_bit(i, priv->taps);
+
+ if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
+ set_bit(i, priv->smpcmp);
}
return renesas_sdhi_select_tuning(host);
@@ -527,7 +537,7 @@ static int renesas_sdhi_execute_tuning(struct tmio_mmc_host *host, u32 opcode)
static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_4tap)
{
struct renesas_sdhi *priv = host_to_priv(host);
- unsigned long new_tap = priv->tap_set;
+ unsigned int new_tap = priv->tap_set, error_tap = priv->tap_set;
u32 val;
val = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ);
@@ -539,20 +549,32 @@ static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_
/* Change TAP position according to correction status */
if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC &&
host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
+ u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0;
/*
* With HS400, the DAT signal is based on DS, not CLK.
* Therefore, use only CMD status.
*/
u32 smpcmp = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) &
SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR;
- if (!smpcmp)
+ if (!smpcmp) {
return false; /* no error in CMD signal */
- else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP)
+ } else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP) {
new_tap++;
- else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN)
+ error_tap--;
+ } else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN) {
new_tap--;
- else
+ error_tap++;
+ } else {
return true; /* need retune */
+ }
+
+ /*
+ * When new_tap is a bad tap, we cannot change. Then, we compare
+ * with the HS200 tuning result. When smpcmp[error_tap] is OK,
+ * we can at least retune.
+ */
+ if (bad_taps & BIT(new_tap % priv->tap_num))
+ return test_bit(error_tap % priv->tap_num, priv->smpcmp);
} else {
if (val & SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR)
return true; /* need retune */
@@ -705,17 +727,35 @@ static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
.hs400_4taps = true,
+ .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
};
static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
.hs400_disabled = true,
};
+static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
+ .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
+ .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+};
+
+/*
+ * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
+ * So, we want to treat them equally and only have a match for ES1.2 to enforce
+ * this if there ever will be a way to distinguish ES1.2.
+ */
static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
+ { .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
+ { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_4tap },
+ { .soc_id = "r8a7796", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps1357 },
+ { .soc_id = "r8a77965", .data = &sdhi_quirks_bad_taps2367 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
{ /* Sentinel. */ },
};
@@ -860,6 +900,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
+ dev_pm_domain_start(&pdev->dev);
+
ret = renesas_sdhi_clk_enable(host);
if (ret)
goto efree;
@@ -933,10 +975,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
goto eirq;
}
- dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
- mmc_hostname(host->mmc), (unsigned long)
- (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
- host->mmc->f_max / 1000000);
+ dev_info(&pdev->dev, "%s base at %pa, max clock rate %u MHz\n",
+ mmc_hostname(host->mmc), &res->start, host->mmc->f_max / 1000000);
return ret;
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 11087976ab19..5a71f6678fd3 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -1347,7 +1347,7 @@ static void realtek_init_host(struct realtek_pci_sdmmc *host)
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
- MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_ERASE;
+ MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE;
mmc->max_current_330 = 400;
mmc->max_current_180 = 800;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 81d0dfe553a8..a7084c50ad65 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1314,7 +1314,7 @@ static void rtsx_usb_init_host(struct rtsx_usb_sdmmc *host)
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 |
- MMC_CAP_ERASE | MMC_CAP_SYNC_RUNTIME_PM;
+ MMC_CAP_SYNC_RUNTIME_PM;
mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE |
MMC_CAP2_NO_SDIO;
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 1e616ae56b13..444b2769ae2c 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -958,13 +958,6 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
{
u32 dcon, imsk, stoptries = 3;
- /* write DCON register */
-
- if (!data) {
- writel(0, host->base + S3C2410_SDIDCON);
- return 0;
- }
-
if ((data->blksz & 3) != 0) {
/* We cannot deal with unaligned blocks with more than
* one block being transferred. */
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index faba53cf139b..d8b76cb8698a 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -605,10 +605,12 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
}
static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
- .chip = &sdhci_acpi_chip_amd,
- .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
- .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
- SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
};
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 6da6d4fb5edd..4a6c9ba82538 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -97,6 +97,11 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
u32 tmp;
int ret;
+ ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK),
+ 0, 10);
+ if (ret)
+ return ret;
+
tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
writel(tmp, reg);
@@ -111,7 +116,10 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
tmp &= ~SDHCI_CDNS_HRS04_WR;
writel(tmp, reg);
- return 0;
+ ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK),
+ 0, 10);
+
+ return ret;
}
static unsigned int sdhci_cdns_phy_param_count(struct device_node *np)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 5ec8e4bf1ac7..1d7f84b23a22 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -8,6 +8,7 @@
* Author: Wolfram Sang <kernel@pengutronix.de>
*/
+#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/delay.h>
@@ -89,7 +90,8 @@
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
-#define ESDHC_TUNING_START_TAP_MASK 0xff
+#define ESDHC_TUNING_START_TAP_MASK 0x7f
+#define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7)
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
@@ -214,6 +216,7 @@ static const struct esdhc_soc_data usdhc_imx6sl_data = {
static const struct esdhc_soc_data usdhc_imx6sll_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
@@ -399,7 +402,8 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
| SDHCI_USE_SDR50_TUNING
- | (SDHCI_TUNING_MODE_3 << SDHCI_RETUNING_MODE_SHIFT);
+ | FIELD_PREP(SDHCI_RETUNING_MODE_MASK,
+ SDHCI_TUNING_MODE_3);
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
val |= SDHCI_SUPPORT_HS400;
@@ -417,9 +421,9 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) {
val = 0;
- val |= 0xFF << SDHCI_MAX_CURRENT_330_SHIFT;
- val |= 0xFF << SDHCI_MAX_CURRENT_300_SHIFT;
- val |= 0xFF << SDHCI_MAX_CURRENT_180_SHIFT;
+ val |= FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, 0xFF);
+ val |= FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, 0xFF);
+ val |= FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, 0xFF);
}
if (unlikely(reg == SDHCI_INT_STATUS)) {
@@ -1313,6 +1317,18 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
tmp |= imx_data->boarddata.tuning_step
<< ESDHC_TUNING_STEP_SHIFT;
}
+
+ /* Disable the CMD CRC check for tuning, if not, need to
+ * add some delay after every tuning command, because
+ * hardware standard tuning logic will directly go to next
+ * step once it detect the CMD CRC error, will not wait for
+ * the card side to finally send out the tuning data, trigger
+ * the buffer read ready interrupt immediately. If usdhc send
+ * the next tuning command some eMMC card will stuck, can't
+ * response, block the tuning procedure or the first command
+ * after the whole tuning procedure always can't get any response.
+ */
+ tmp |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE;
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
} else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
/*
@@ -1596,6 +1612,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (esdhc_is_usdhc(imx_data)) {
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
+
+ /* GPIO CD can be set as a wakeup source */
+ host->mmc->caps |= MMC_CAP_CD_WAKE;
+
if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
@@ -1653,8 +1673,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_ahb_clk;
- host->tuning_delay = 1;
-
sdhci_esdhc_imx_hwinit(host);
err = sdhci_add_host(host);
@@ -1731,8 +1749,14 @@ static int sdhci_esdhc_suspend(struct device *dev)
mmc_retune_needed(host->mmc);
ret = sdhci_suspend_host(host);
- if (!ret)
- return pinctrl_pm_select_sleep_state(dev);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret)
+ return ret;
+
+ ret = mmc_gpio_set_cd_wake(host->mmc, true);
return ret;
}
@@ -1756,6 +1780,9 @@ static int sdhci_esdhc_resume(struct device *dev)
if (host->mmc->caps2 & MMC_CAP2_CQE)
ret = cqhci_resume(host->mmc);
+ if (!ret)
+ ret = mmc_gpio_set_cd_wake(host->mmc, false);
+
return ret;
}
#endif
diff --git a/drivers/mmc/host/sdhci-esdhc-mcf.c b/drivers/mmc/host/sdhci-esdhc-mcf.c
new file mode 100644
index 000000000000..71bf086a9812
--- /dev/null
+++ b/drivers/mmc/host/sdhci-esdhc-mcf.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale eSDHC ColdFire family controller driver, platform bus.
+ *
+ * Copyright (c) 2020 Timesys Corporation
+ * Author: Angelo Dureghello <angelo.dureghello@timesys.it>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_data/mmc-esdhc-mcf.h>
+#include <linux/mmc/mmc.h>
+#include "sdhci-pltfm.h"
+#include "sdhci-esdhc.h"
+
+#define ESDHC_PROCTL_D3CD 0x08
+#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f
+#define ESDHC_DEFAULT_HOST_CONTROL 0x28
+
+/*
+ * Freescale eSDHC has DMA ERR flag at bit 28, not as std spec says, bit 25.
+ */
+#define ESDHC_INT_VENDOR_SPEC_DMA_ERR BIT(28)
+
+struct pltfm_mcf_data {
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
+ int aside;
+ int current_bus_width;
+};
+
+static inline void esdhc_mcf_buffer_swap32(u32 *buf, int len)
+{
+ int i;
+ u32 temp;
+
+ len = (len + 3) >> 2;
+
+ for (i = 0; i < len; i++) {
+ temp = swab32(*buf);
+ *buf++ = temp;
+ }
+}
+
+static inline void esdhc_clrset_be(struct sdhci_host *host,
+ u32 mask, u32 val, int reg)
+{
+ void __iomem *base = host->ioaddr + (reg & ~3);
+ u8 shift = (reg & 3) << 3;
+
+ mask <<= shift;
+ val <<= shift;
+
+ if (reg == SDHCI_HOST_CONTROL)
+ val |= ESDHC_PROCTL_D3CD;
+
+ writel((readl(base) & ~mask) | val, base);
+}
+
+/*
+ * Note: mcf is big-endian, single bytes need to be accessed at big endian
+ * offsets.
+ */
+static void esdhc_mcf_writeb_be(struct sdhci_host *host, u8 val, int reg)
+{
+ void __iomem *base = host->ioaddr + (reg & ~3);
+ u8 shift = (reg & 3) << 3;
+ u32 mask = ~(0xff << shift);
+
+ if (reg == SDHCI_HOST_CONTROL) {
+ u32 host_ctrl = ESDHC_DEFAULT_HOST_CONTROL;
+ u8 dma_bits = (val & SDHCI_CTRL_DMA_MASK) >> 3;
+ u8 tmp = readb(host->ioaddr + SDHCI_HOST_CONTROL + 1);
+
+ tmp &= ~0x03;
+ tmp |= dma_bits;
+
+ /*
+ * Recomposition needed, restore always endianness and
+ * keep D3CD and AI, just setting bus width.
+ */
+ host_ctrl |= val;
+ host_ctrl |= (dma_bits << 8);
+ writel(host_ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+
+ return;
+ }
+
+ writel((readl(base) & mask) | (val << shift), base);
+}
+
+static void esdhc_mcf_writew_be(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host);
+ void __iomem *base = host->ioaddr + (reg & ~3);
+ u8 shift = (reg & 3) << 3;
+ u32 mask = ~(0xffff << shift);
+
+ switch (reg) {
+ case SDHCI_TRANSFER_MODE:
+ mcf_data->aside = val;
+ return;
+ case SDHCI_COMMAND:
+ if (host->cmd->opcode == MMC_STOP_TRANSMISSION)
+ val |= SDHCI_CMD_ABORTCMD;
+
+ /*
+ * As for the fsl driver,
+ * we have to set the mode in a single write here.
+ */
+ writel(val << 16 | mcf_data->aside,
+ host->ioaddr + SDHCI_TRANSFER_MODE);
+ return;
+ }
+
+ writel((readl(base) & mask) | (val << shift), base);
+}
+
+static void esdhc_mcf_writel_be(struct sdhci_host *host, u32 val, int reg)
+{
+ writel(val, host->ioaddr + reg);
+}
+
+static u8 esdhc_mcf_readb_be(struct sdhci_host *host, int reg)
+{
+ if (reg == SDHCI_HOST_CONTROL) {
+ u8 __iomem *base = host->ioaddr + (reg & ~3);
+ u16 val = readw(base + 2);
+ u8 dma_bits = (val >> 5) & SDHCI_CTRL_DMA_MASK;
+ u8 host_ctrl = val & 0xff;
+
+ host_ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ host_ctrl |= dma_bits;
+
+ return host_ctrl;
+ }
+
+ return readb(host->ioaddr + (reg ^ 0x3));
+}
+
+static u16 esdhc_mcf_readw_be(struct sdhci_host *host, int reg)
+{
+ /*
+ * For SDHCI_HOST_VERSION, sdhci specs defines 0xFE,
+ * a wrong offset for us, we are at 0xFC.
+ */
+ if (reg == SDHCI_HOST_VERSION)
+ reg -= 2;
+
+ return readw(host->ioaddr + (reg ^ 0x2));
+}
+
+static u32 esdhc_mcf_readl_be(struct sdhci_host *host, int reg)
+{
+ u32 val;
+
+ val = readl(host->ioaddr + reg);
+
+ /*
+ * RM (25.3.9) sd pin clock must never exceed 25Mhz.
+ * So forcing legacy mode at 25Mhz.
+ */
+ if (unlikely(reg == SDHCI_CAPABILITIES))
+ val &= ~SDHCI_CAN_DO_HISPD;
+
+ if (unlikely(reg == SDHCI_INT_STATUS)) {
+ if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) {
+ val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
+ val |= SDHCI_INT_ADMA_ERROR;
+ }
+ }
+
+ return val;
+}
+
+static unsigned int esdhc_mcf_get_max_timeout_count(struct sdhci_host *host)
+{
+ return 1 << 27;
+}
+
+static void esdhc_mcf_set_timeout(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ /* Use maximum timeout counter */
+ esdhc_clrset_be(host, ESDHC_SYS_CTRL_DTOCV_MASK, 0xE,
+ SDHCI_TIMEOUT_CONTROL);
+}
+
+static void esdhc_mcf_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_reset(host, mask);
+
+ esdhc_clrset_be(host, ESDHC_CTRL_BUSWIDTH_MASK,
+ mcf_data->current_bus_width, SDHCI_HOST_CONTROL);
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+}
+
+static unsigned int esdhc_mcf_pltfm_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return pltfm_host->clock;
+}
+
+static unsigned int esdhc_mcf_pltfm_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return pltfm_host->clock / 256 / 16;
+}
+
+static void esdhc_mcf_pltfm_set_clock(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ unsigned long *pll_dr = (unsigned long *)MCF_PLL_DR;
+ u32 fvco, fsys, fesdhc, temp;
+ const int sdclkfs[] = {2, 4, 8, 16, 32, 64, 128, 256};
+ int delta, old_delta = clock;
+ int i, q, ri, rq;
+
+ if (clock == 0) {
+ host->mmc->actual_clock = 0;
+ return;
+ }
+
+ /*
+ * ColdFire eSDHC clock.s
+ *
+ * pll -+-> / outdiv1 --> fsys
+ * +-> / outdiv3 --> eSDHC clock ---> / SDCCLKFS / DVS
+ *
+ * mcf5441x datasheet says:
+ * (8.1.2) eSDHC should be 40 MHz max
+ * (25.3.9) eSDHC input is, as example, 96 Mhz ...
+ * (25.3.9) sd pin clock must never exceed 25Mhz
+ *
+ * fvco = fsys * outdvi1 + 1
+ * fshdc = fvco / outdiv3 + 1
+ */
+ temp = readl(pll_dr);
+ fsys = pltfm_host->clock;
+ fvco = fsys * ((temp & 0x1f) + 1);
+ fesdhc = fvco / (((temp >> 10) & 0x1f) + 1);
+
+ for (i = 0; i < 8; ++i) {
+ int result = fesdhc / sdclkfs[i];
+
+ for (q = 1; q < 17; ++q) {
+ int finale = result / q;
+
+ delta = abs(clock - finale);
+
+ if (delta < old_delta) {
+ old_delta = delta;
+ ri = i;
+ rq = q;
+ }
+ }
+ }
+
+ /*
+ * Apply divisors and re-enable all the clocks
+ */
+ temp = ((sdclkfs[ri] >> 1) << 8) | ((rq - 1) << 4) |
+ (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN);
+ esdhc_clrset_be(host, 0x0000fff7, temp, SDHCI_CLOCK_CONTROL);
+
+ host->mmc->actual_clock = clock;
+
+ mdelay(1);
+}
+
+static void esdhc_mcf_pltfm_set_bus_width(struct sdhci_host *host, int width)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host);
+
+ switch (width) {
+ case MMC_BUS_WIDTH_4:
+ mcf_data->current_bus_width = ESDHC_CTRL_4BITBUS;
+ break;
+ default:
+ mcf_data->current_bus_width = 0;
+ break;
+ }
+
+ esdhc_clrset_be(host, ESDHC_CTRL_BUSWIDTH_MASK,
+ mcf_data->current_bus_width, SDHCI_HOST_CONTROL);
+}
+
+static void esdhc_mcf_request_done(struct sdhci_host *host,
+ struct mmc_request *mrq)
+{
+ struct scatterlist *sg;
+ u32 *buffer;
+ int i;
+
+ if (!mrq->data || !mrq->data->bytes_xfered)
+ goto exit_done;
+
+ if (mmc_get_dma_dir(mrq->data) != DMA_FROM_DEVICE)
+ goto exit_done;
+
+ /*
+ * On mcf5441x there is no hw sdma option/flag to select the dma
+ * transfer endiannes. A swap after the transfer is needed.
+ */
+ for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) {
+ buffer = (u32 *)sg_virt(sg);
+ esdhc_mcf_buffer_swap32(buffer, sg->length);
+ }
+
+exit_done:
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void esdhc_mcf_copy_to_bounce_buffer(struct sdhci_host *host,
+ struct mmc_data *data,
+ unsigned int length)
+{
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ host->bounce_buffer, length);
+
+ esdhc_mcf_buffer_swap32((u32 *)host->bounce_buffer,
+ data->blksz * data->blocks);
+}
+
+static struct sdhci_ops sdhci_esdhc_ops = {
+ .reset = esdhc_mcf_reset,
+ .set_clock = esdhc_mcf_pltfm_set_clock,
+ .get_max_clock = esdhc_mcf_pltfm_get_max_clock,
+ .get_min_clock = esdhc_mcf_pltfm_get_min_clock,
+ .set_bus_width = esdhc_mcf_pltfm_set_bus_width,
+ .get_max_timeout_count = esdhc_mcf_get_max_timeout_count,
+ .set_timeout = esdhc_mcf_set_timeout,
+ .write_b = esdhc_mcf_writeb_be,
+ .write_w = esdhc_mcf_writew_be,
+ .write_l = esdhc_mcf_writel_be,
+ .read_b = esdhc_mcf_readb_be,
+ .read_w = esdhc_mcf_readw_be,
+ .read_l = esdhc_mcf_readl_be,
+ .copy_to_bounce_buffer = esdhc_mcf_copy_to_bounce_buffer,
+ .request_done = esdhc_mcf_request_done,
+};
+
+static const struct sdhci_pltfm_data sdhci_esdhc_mcf_pdata = {
+ .ops = &sdhci_esdhc_ops,
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_FORCE_DMA,
+ /*
+ * Mandatory quirk,
+ * controller does not support cmd23,
+ * without, on > 8G cards cmd23 is used, and
+ * driver times out.
+ */
+ SDHCI_QUIRK2_HOST_NO_CMD23,
+};
+
+static int esdhc_mcf_plat_init(struct sdhci_host *host,
+ struct pltfm_mcf_data *mcf_data)
+{
+ struct mcf_esdhc_platform_data *plat_data;
+
+ if (!host->mmc->parent->platform_data) {
+ dev_err(mmc_dev(host->mmc), "no platform data!\n");
+ return -EINVAL;
+ }
+
+ plat_data = (struct mcf_esdhc_platform_data *)
+ host->mmc->parent->platform_data;
+
+ /* Card_detect */
+ switch (plat_data->cd_type) {
+ default:
+ case ESDHC_CD_CONTROLLER:
+ /* We have a working card_detect back */
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ break;
+ case ESDHC_CD_PERMANENT:
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ break;
+ case ESDHC_CD_NONE:
+ break;
+ }
+
+ switch (plat_data->max_bus_width) {
+ case 4:
+ host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ default:
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+ break;
+ }
+
+ return 0;
+}
+
+static int sdhci_esdhc_mcf_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct pltfm_mcf_data *mcf_data;
+ int err;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_mcf_pdata,
+ sizeof(*mcf_data));
+
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ mcf_data = sdhci_pltfm_priv(pltfm_host);
+
+ host->sdma_boundary = 0;
+
+ host->flags |= SDHCI_AUTO_CMD12;
+
+ mcf_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(mcf_data->clk_ipg)) {
+ err = PTR_ERR(mcf_data->clk_ipg);
+ goto err_exit;
+ }
+
+ mcf_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(mcf_data->clk_ahb)) {
+ err = PTR_ERR(mcf_data->clk_ahb);
+ goto err_exit;
+ }
+
+ mcf_data->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(mcf_data->clk_per)) {
+ err = PTR_ERR(mcf_data->clk_per);
+ goto err_exit;
+ }
+
+ pltfm_host->clk = mcf_data->clk_per;
+ pltfm_host->clock = clk_get_rate(pltfm_host->clk);
+ err = clk_prepare_enable(mcf_data->clk_per);
+ if (err)
+ goto err_exit;
+
+ err = clk_prepare_enable(mcf_data->clk_ipg);
+ if (err)
+ goto unprep_per;
+
+ err = clk_prepare_enable(mcf_data->clk_ahb);
+ if (err)
+ goto unprep_ipg;
+
+ err = esdhc_mcf_plat_init(host, mcf_data);
+ if (err)
+ goto unprep_ahb;
+
+ err = sdhci_setup_host(host);
+ if (err)
+ goto unprep_ahb;
+
+ if (!host->bounce_buffer) {
+ dev_err(&pdev->dev, "bounce buffer not allocated");
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+unprep_ahb:
+ clk_disable_unprepare(mcf_data->clk_ahb);
+unprep_ipg:
+ clk_disable_unprepare(mcf_data->clk_ipg);
+unprep_per:
+ clk_disable_unprepare(mcf_data->clk_per);
+err_exit:
+ sdhci_pltfm_free(pdev);
+
+ return err;
+}
+
+static int sdhci_esdhc_mcf_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_remove_host(host, 0);
+
+ clk_disable_unprepare(mcf_data->clk_ipg);
+ clk_disable_unprepare(mcf_data->clk_ahb);
+ clk_disable_unprepare(mcf_data->clk_per);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_esdhc_mcf_driver = {
+ .driver = {
+ .name = "sdhci-esdhc-mcf",
+ },
+ .probe = sdhci_esdhc_mcf_probe,
+ .remove = sdhci_esdhc_mcf_remove,
+};
+
+module_platform_driver(sdhci_esdhc_mcf_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for Freescale ColdFire eSDHC");
+MODULE_AUTHOR("Angelo Dureghello <angelo.dureghello@timesys.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 947212f16bc6..a30796e79b1c 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -5,7 +5,7 @@
* Copyright (c) 2007 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
* Copyright (c) 2010 Pengutronix e.K.
- * Author: Wolfram Sang <w.sang@pengutronix.de>
+ * Author: Wolfram Sang <kernel@pengutronix.de>
*/
#ifndef _DRIVERS_MMC_SDHCI_ESDHC_H
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index a8bcb3f16aa4..b277dd7fbdb5 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/mmc/mmc.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
#include <linux/regulator/consumer.h>
@@ -56,19 +57,27 @@
#define CORE_FLL_CYCLE_CNT BIT(18)
#define CORE_DLL_CLOCK_DISABLE BIT(21)
-#define CORE_VENDOR_SPEC_POR_VAL 0xa1c
+#define DLL_USR_CTL_POR_VAL 0x10800
+#define ENABLE_DLL_LOCK_STATUS BIT(26)
+#define FINE_TUNE_MODE_EN BIT(27)
+#define BIAS_OK_SIGNAL BIT(29)
+
+#define DLL_CONFIG_3_LOW_FREQ_VAL 0x08
+#define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10
+
+#define CORE_VENDOR_SPEC_POR_VAL 0xa9c
#define CORE_CLK_PWRSAVE BIT(1)
#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
#define CORE_HC_MCLK_SEL_MASK (3 << 8)
-#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
-#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
+#define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
+#define CORE_IO_PAD_PWR_SWITCH BIT(16)
#define CORE_HC_SELECT_IN_EN BIT(18)
#define CORE_HC_SELECT_IN_HS400 (6 << 19)
#define CORE_HC_SELECT_IN_MASK (7 << 19)
-#define CORE_3_0V_SUPPORT (1 << 25)
-#define CORE_1_8V_SUPPORT (1 << 26)
+#define CORE_3_0V_SUPPORT BIT(25)
+#define CORE_1_8V_SUPPORT BIT(26)
#define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
#define CORE_CSR_CDC_CTLR_CFG0 0x130
@@ -156,6 +165,7 @@ struct sdhci_msm_offset {
u32 core_dll_config_3;
u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
u32 core_ddr_config;
+ u32 core_dll_usr_ctl; /* Present on SDCC5.1 onwards */
};
static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
@@ -185,6 +195,7 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
.core_dll_config_2 = 0x254,
.core_dll_config_3 = 0x258,
.core_ddr_config = 0x25c,
+ .core_dll_usr_ctl = 0x388,
};
static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
@@ -230,6 +241,7 @@ struct sdhci_msm_variant_ops {
struct sdhci_msm_variant_info {
bool mci_removed;
bool restore_dll_config;
+ bool uses_tassadar_dll;
const struct sdhci_msm_variant_ops *var_ops;
const struct sdhci_msm_offset *offset;
};
@@ -243,6 +255,8 @@ struct sdhci_msm_host {
struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
unsigned long clk_rate;
struct mmc_host *mmc;
+ struct opp_table *opp_table;
+ bool has_opp_table;
bool use_14lpp_dll_reset;
bool tuning_done;
bool calibration_done;
@@ -260,6 +274,9 @@ struct sdhci_msm_host {
bool use_cdr;
u32 transfer_mode;
bool updated_ddr_cfg;
+ bool uses_tassadar_dll;
+ u32 dll_config;
+ u32 ddr_config;
};
static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
@@ -332,7 +349,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
int rc;
clock = msm_get_clock_rate_for_bus_mode(host, clock);
- rc = clk_set_rate(core_clk, clock);
+ rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock);
if (rc) {
pr_err("%s: Failed to set clock at rate %u at timing %d\n",
mmc_hostname(host->mmc), clock,
@@ -601,6 +618,9 @@ static int msm_init_cm_dll(struct sdhci_host *host)
config &= ~CORE_CLK_PWRSAVE;
writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
+ config = msm_host->dll_config;
+ writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+
if (msm_host->use_14lpp_dll_reset) {
config = readl_relaxed(host->ioaddr +
msm_offset->core_dll_config);
@@ -626,7 +646,9 @@ static int msm_init_cm_dll(struct sdhci_host *host)
config |= CORE_DLL_PDN;
writel_relaxed(config, host->ioaddr +
msm_offset->core_dll_config);
- msm_cm_dll_set_freq(host);
+
+ if (!msm_host->dll_config)
+ msm_cm_dll_set_freq(host);
if (msm_host->use_14lpp_dll_reset &&
!IS_ERR_OR_NULL(msm_host->xo_clk)) {
@@ -666,7 +688,8 @@ static int msm_init_cm_dll(struct sdhci_host *host)
msm_offset->core_dll_config);
if (msm_host->use_14lpp_dll_reset) {
- msm_cm_dll_set_freq(host);
+ if (!msm_host->dll_config)
+ msm_cm_dll_set_freq(host);
config = readl_relaxed(host->ioaddr +
msm_offset->core_dll_config_2);
config &= ~CORE_DLL_CLOCK_DISABLE;
@@ -674,6 +697,27 @@ static int msm_init_cm_dll(struct sdhci_host *host)
msm_offset->core_dll_config_2);
}
+ /*
+ * Configure DLL user control register to enable DLL status.
+ * This setting is applicable to SDCC v5.1 onwards only.
+ */
+ if (msm_host->uses_tassadar_dll) {
+ config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
+ ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL;
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_usr_ctl);
+
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config_3);
+ config &= ~0xFF;
+ if (msm_host->clk_rate < 150000000)
+ config |= DLL_CONFIG_3_LOW_FREQ_VAL;
+ else
+ config |= DLL_CONFIG_3_HIGH_FREQ_VAL;
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config_3);
+ }
+
config = readl_relaxed(host->ioaddr +
msm_offset->core_dll_config);
config |= CORE_DLL_EN;
@@ -951,7 +995,7 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
ddr_cfg_offset = msm_offset->core_ddr_config;
else
ddr_cfg_offset = msm_offset->core_ddr_config_old;
- writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
+ writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset);
if (mmc->ios.enhanced_strobe) {
config = readl_relaxed(host->ioaddr +
@@ -1130,6 +1174,12 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
msm_host->use_cdr = true;
/*
+ * Clear tuning_done flag before tuning to ensure proper
+ * HS400 settings.
+ */
+ msm_host->tuning_done = 0;
+
+ /*
* For HS400 tuning in HS200 timing requires:
* - select MCLK/2 in VENDOR_SPEC
* - program MCLK to 400MHz (or nearest supported) in GCC
@@ -1830,6 +1880,36 @@ static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
sdhci_reset(host, mask);
}
+#define DRIVER_NAME "sdhci_msm"
+#define SDHCI_MSM_DUMP(f, x...) \
+ pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
+
+void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_msm_offset *msm_offset = msm_host->offset;
+
+ SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n");
+
+ SDHCI_MSM_DUMP(
+ "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n",
+ readl_relaxed(host->ioaddr + msm_offset->core_dll_status),
+ readl_relaxed(host->ioaddr + msm_offset->core_dll_config),
+ readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2));
+ SDHCI_MSM_DUMP(
+ "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n",
+ readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3),
+ readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl),
+ readl_relaxed(host->ioaddr + msm_offset->core_ddr_config));
+ SDHCI_MSM_DUMP(
+ "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n",
+ readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec),
+ readl_relaxed(host->ioaddr +
+ msm_offset->core_vendor_spec_func2),
+ readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3));
+}
+
static const struct sdhci_msm_variant_ops mci_var_ops = {
.msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
.msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
@@ -1858,10 +1938,18 @@ static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
.offset = &sdhci_msm_v5_offset,
};
+static const struct sdhci_msm_variant_info sm8250_sdhci_var = {
+ .mci_removed = true,
+ .uses_tassadar_dll = true,
+ .var_ops = &v5_var_ops,
+ .offset = &sdhci_msm_v5_offset,
+};
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
{.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
+ {.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var},
{},
};
@@ -1877,16 +1965,34 @@ static const struct sdhci_ops sdhci_msm_ops = {
.write_w = sdhci_msm_writew,
.write_b = sdhci_msm_writeb,
.irq = sdhci_msm_cqe_irq,
+ .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_msm_ops,
};
+static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
+ struct sdhci_host *host)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (of_property_read_u32(node, "qcom,ddr-config",
+ &msm_host->ddr_config))
+ msm_host->ddr_config = DDR_CONFIG_POR_VAL;
+
+ of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
+}
+
+
static int sdhci_msm_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
@@ -1925,10 +2031,12 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->restore_dll_config = var_info->restore_dll_config;
msm_host->var_ops = var_info->var_ops;
msm_host->offset = var_info->offset;
+ msm_host->uses_tassadar_dll = var_info->uses_tassadar_dll;
msm_offset = msm_host->offset;
sdhci_get_of_property(pdev);
+ sdhci_msm_get_of_property(pdev, host);
msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
@@ -1962,8 +2070,23 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
msm_host->bulk_clks[0].clk = clk;
+ msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
+ if (IS_ERR(msm_host->opp_table)) {
+ ret = PTR_ERR(msm_host->opp_table);
+ goto bus_clk_disable;
+ }
+
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(&pdev->dev);
+ if (!ret) {
+ msm_host->has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
+ goto opp_cleanup;
+ }
+
/* Vote for maximum clock rate for maximum performance */
- ret = clk_set_rate(clk, INT_MAX);
+ ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX);
if (ret)
dev_warn(&pdev->dev, "core clock boost failed\n");
@@ -1980,7 +2103,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
msm_host->bulk_clks);
if (ret)
- goto bus_clk_disable;
+ goto opp_cleanup;
/*
* xo clock is needed for FLL feature of cm_dll.
@@ -2117,6 +2240,10 @@ pm_runtime_disable:
clk_disable:
clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
msm_host->bulk_clks);
+opp_cleanup:
+ if (msm_host->has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(msm_host->opp_table);
bus_clk_disable:
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
@@ -2135,6 +2262,9 @@ static int sdhci_msm_remove(struct platform_device *pdev)
sdhci_remove_host(host, dead);
+ if (msm_host->has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(msm_host->opp_table);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -2153,6 +2283,8 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ /* Drop the performance vote */
+ dev_pm_opp_set_rate(dev, 0);
clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
msm_host->bulk_clks);
@@ -2175,9 +2307,11 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
* restore the SDR DLL settings when the clock is ungated.
*/
if (msm_host->restore_dll_config && msm_host->clk_rate)
- return sdhci_msm_restore_sdr_dll_config(host);
+ ret = sdhci_msm_restore_sdr_dll_config(host);
- return 0;
+ dev_pm_opp_set_rate(dev, msm_host->clk_rate);
+
+ return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index d4905c106c06..db9b544465cd 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -28,15 +28,26 @@
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+
+#define SDHCI_ARASAN_ITAPDLY_REGISTER 0xF0F8
+#define SDHCI_ARASAN_OTAPDLY_REGISTER 0xF0FC
+
#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
#define PHY_CLK_TOO_SLOW_HZ 400000
+#define SDHCI_ITAPDLY_CHGWIN 0x200
+#define SDHCI_ITAPDLY_ENABLE 0x100
+#define SDHCI_OTAPDLY_ENABLE 0x40
+
/* Default settings for ZynqMP Clock Phases */
#define ZYNQMP_ICLK_PHASE {0, 63, 63, 0, 63, 0, 0, 183, 54, 0, 0}
#define ZYNQMP_OCLK_PHASE {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0}
+#define VERSAL_ICLK_PHASE {0, 132, 132, 0, 132, 0, 0, 162, 90, 0, 0}
+#define VERSAL_OCLK_PHASE {0, 60, 48, 0, 48, 72, 90, 36, 60, 90, 0}
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -62,22 +73,36 @@ struct sdhci_arasan_soc_ctl_field {
/**
* struct sdhci_arasan_soc_ctl_map - Map in syscon to corecfg registers
*
- * It's up to the licensee of the Arsan IP block to make these available
- * somewhere if needed. Presumably these will be scattered somewhere that's
- * accessible via the syscon API.
- *
* @baseclkfreq: Where to find corecfg_baseclkfreq
* @clockmultiplier: Where to find corecfg_clockmultiplier
+ * @support64b: Where to find SUPPORT64B bit
* @hiword_update: If true, use HIWORD_UPDATE to access the syscon
+ *
+ * It's up to the licensee of the Arsan IP block to make these available
+ * somewhere if needed. Presumably these will be scattered somewhere that's
+ * accessible via the syscon API.
*/
struct sdhci_arasan_soc_ctl_map {
struct sdhci_arasan_soc_ctl_field baseclkfreq;
struct sdhci_arasan_soc_ctl_field clockmultiplier;
+ struct sdhci_arasan_soc_ctl_field support64b;
bool hiword_update;
};
/**
- * struct sdhci_arasan_clk_data
+ * struct sdhci_arasan_clk_ops - Clock Operations for Arasan SD controller
+ *
+ * @sdcardclk_ops: The output clock related operations
+ * @sampleclk_ops: The sample clock related operations
+ */
+struct sdhci_arasan_clk_ops {
+ const struct clk_ops *sdcardclk_ops;
+ const struct clk_ops *sampleclk_ops;
+};
+
+/**
+ * struct sdhci_arasan_clk_data - Arasan Controller Clock Data.
+ *
* @sdcardclk_hw: Struct for the clock we might provide to a PHY.
* @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
* @sampleclk_hw: Struct for the clock we might provide to a PHY.
@@ -98,19 +123,19 @@ struct sdhci_arasan_clk_data {
void *clk_of_data;
};
-struct sdhci_arasan_zynqmp_clk_data {
- const struct zynqmp_eemi_ops *eemi_ops;
-};
-
/**
- * struct sdhci_arasan_data
+ * struct sdhci_arasan_data - Arasan Controller Data
+ *
* @host: Pointer to the main SDHCI host structure.
* @clk_ahb: Pointer to the AHB clock
* @phy: Pointer to the generic phy
* @is_phy_on: True if the PHY is on; false if not.
+ * @has_cqe: True if controller has command queuing engine.
* @clk_data: Struct for the Arasan Controller Clock Data.
+ * @clk_ops: Struct for the Arasan Controller Clock Operations.
* @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
* @soc_ctl_map: Map to get offsets into soc_ctl registers.
+ * @quirks: Arasan deviations from spec.
*/
struct sdhci_arasan_data {
struct sdhci_host *host;
@@ -120,10 +145,11 @@ struct sdhci_arasan_data {
bool has_cqe;
struct sdhci_arasan_clk_data clk_data;
+ const struct sdhci_arasan_clk_ops *clk_ops;
struct regmap *soc_ctl_base;
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
- unsigned int quirks; /* Arasan deviations from spec */
+ unsigned int quirks;
/* Controller does not have CD wired and will not function normally without */
#define SDHCI_ARASAN_QUIRK_FORCE_CDTEST BIT(0)
@@ -135,6 +161,7 @@ struct sdhci_arasan_data {
struct sdhci_arasan_of_data {
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
const struct sdhci_pltfm_data *pdata;
+ const struct sdhci_arasan_clk_ops *clk_ops;
};
static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
@@ -155,17 +182,26 @@ static const struct sdhci_arasan_soc_ctl_map intel_lgm_sdxc_soc_ctl_map = {
.hiword_update = false,
};
+static const struct sdhci_arasan_soc_ctl_map intel_keembay_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0x0, .width = 8, .shift = 14 },
+ .clockmultiplier = { .reg = 0x4, .width = 8, .shift = 14 },
+ .support64b = { .reg = 0x4, .width = 1, .shift = 24 },
+ .hiword_update = false,
+};
+
/**
* sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
*
+ * @host: The sdhci_host
+ * @fld: The field to write to
+ * @val: The value to write
+ *
* This function allows writing to fields in sdhci_arasan_soc_ctl_map.
* Note that if a field is specified as not available (shift < 0) then
* this function will silently return an error code. It will be noisy
* and print errors for any other (unexpected) errors.
*
- * @host: The sdhci_host
- * @fld: The field to write to
- * @val: The value to write
+ * Return: 0 on success and error value on error
*/
static int sdhci_arasan_syscon_write(struct sdhci_host *host,
const struct sdhci_arasan_soc_ctl_field *fld,
@@ -335,29 +371,6 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_power = sdhci_set_power_and_bus_voltage,
};
-static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
- .ops = &sdhci_arasan_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
- SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
- SDHCI_QUIRK2_STOP_WITH_TC,
-};
-
-static struct sdhci_arasan_of_data sdhci_arasan_data = {
- .pdata = &sdhci_arasan_pdata,
-};
-
-static const struct sdhci_pltfm_data sdhci_arasan_zynqmp_pdata = {
- .ops = &sdhci_arasan_ops,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
- SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
- SDHCI_QUIRK2_STOP_WITH_TC,
-};
-
-static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = {
- .pdata = &sdhci_arasan_zynqmp_pdata,
-};
-
static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
{
int cmd_error = 0;
@@ -414,28 +427,14 @@ static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
-static struct sdhci_arasan_of_data sdhci_arasan_rk3399_data = {
- .soc_ctl_map = &rk3399_soc_ctl_map,
- .pdata = &sdhci_arasan_cqe_pdata,
-};
-
-static struct sdhci_arasan_of_data intel_lgm_emmc_data = {
- .soc_ctl_map = &intel_lgm_emmc_soc_ctl_map,
- .pdata = &sdhci_arasan_cqe_pdata,
-};
-
-static struct sdhci_arasan_of_data intel_lgm_sdxc_data = {
- .soc_ctl_map = &intel_lgm_sdxc_soc_ctl_map,
- .pdata = &sdhci_arasan_cqe_pdata,
-};
-
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
* @dev: Address of the device structure
- * Returns 0 on success and error value on error
*
* Put the device in a low power state.
+ *
+ * Return: 0 on success and error value on error
*/
static int sdhci_arasan_suspend(struct device *dev)
{
@@ -476,9 +475,10 @@ static int sdhci_arasan_suspend(struct device *dev)
/**
* sdhci_arasan_resume - Resume method for the driver
* @dev: Address of the device structure
- * Returns 0 on success and error value on error
*
* Resume operation after suspend
+ *
+ * Return: 0 on success and error value on error
*/
static int sdhci_arasan_resume(struct device *dev)
{
@@ -524,54 +524,19 @@ static int sdhci_arasan_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
sdhci_arasan_resume);
-static const struct of_device_id sdhci_arasan_of_match[] = {
- /* SoC-specific compatible strings w/ soc_ctl_map */
- {
- .compatible = "rockchip,rk3399-sdhci-5.1",
- .data = &sdhci_arasan_rk3399_data,
- },
- {
- .compatible = "intel,lgm-sdhci-5.1-emmc",
- .data = &intel_lgm_emmc_data,
- },
- {
- .compatible = "intel,lgm-sdhci-5.1-sdxc",
- .data = &intel_lgm_sdxc_data,
- },
- /* Generic compatible below here */
- {
- .compatible = "arasan,sdhci-8.9a",
- .data = &sdhci_arasan_data,
- },
- {
- .compatible = "arasan,sdhci-5.1",
- .data = &sdhci_arasan_data,
- },
- {
- .compatible = "arasan,sdhci-4.9a",
- .data = &sdhci_arasan_data,
- },
- {
- .compatible = "xlnx,zynqmp-8.9a",
- .data = &sdhci_arasan_zynqmp_data,
- },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
-
/**
* sdhci_arasan_sdcardclk_recalc_rate - Return the card clock rate
*
+ * @hw: Pointer to the hardware clock structure.
+ * @parent_rate: The parent rate (should be rate of clk_xin).
+ *
* Return the current actual rate of the SD card clock. This can be used
* to communicate with out PHY.
*
- * @hw: Pointer to the hardware clock structure.
- * @parent_rate The parent rate (should be rate of clk_xin).
- * Returns the card clock rate.
+ * Return: The card clock rate.
*/
static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
-
{
struct sdhci_arasan_clk_data *clk_data =
container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
@@ -589,16 +554,16 @@ static const struct clk_ops arasan_sdcardclk_ops = {
/**
* sdhci_arasan_sampleclk_recalc_rate - Return the sampling clock rate
*
+ * @hw: Pointer to the hardware clock structure.
+ * @parent_rate: The parent rate (should be rate of clk_xin).
+ *
* Return the current actual rate of the sampling clock. This can be used
* to communicate with out PHY.
*
- * @hw: Pointer to the hardware clock structure.
- * @parent_rate The parent rate (should be rate of clk_xin).
- * Returns the sample clock rate.
+ * Return: The sample clock rate.
*/
static unsigned long sdhci_arasan_sampleclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
-
{
struct sdhci_arasan_clk_data *clk_data =
container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
@@ -616,23 +581,20 @@ static const struct clk_ops arasan_sampleclk_ops = {
/**
* sdhci_zynqmp_sdcardclk_set_phase - Set the SD Output Clock Tap Delays
*
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees: The clock phase shift between 0 - 359.
+ *
* Set the SD Output Clock Tap Delays for Output path
*
- * @hw: Pointer to the hardware clock structure.
- * @degrees The clock phase shift between 0 - 359.
* Return: 0 on success and error value on error
*/
static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
-
{
struct sdhci_arasan_clk_data *clk_data =
container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
struct sdhci_arasan_data *sdhci_arasan =
container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
- clk_data->clk_of_data;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
const char *clk_name = clk_hw_get_name(hw);
u32 node_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : NODE_SD_1;
u8 tap_delay, tap_max = 0;
@@ -672,8 +634,7 @@ static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
tap_delay = (degrees * tap_max) / 360;
/* Set the Clock Phase */
- ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
- PM_TAPDELAY_OUTPUT, tap_delay, NULL);
+ ret = zynqmp_pm_set_sd_tapdelay(node_id, PM_TAPDELAY_OUTPUT, tap_delay);
if (ret)
pr_err("Error setting Output Tap Delay\n");
@@ -688,23 +649,20 @@ static const struct clk_ops zynqmp_sdcardclk_ops = {
/**
* sdhci_zynqmp_sampleclk_set_phase - Set the SD Input Clock Tap Delays
*
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees: The clock phase shift between 0 - 359.
+ *
* Set the SD Input Clock Tap Delays for Input path
*
- * @hw: Pointer to the hardware clock structure.
- * @degrees The clock phase shift between 0 - 359.
* Return: 0 on success and error value on error
*/
static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
-
{
struct sdhci_arasan_clk_data *clk_data =
container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
struct sdhci_arasan_data *sdhci_arasan =
container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
- clk_data->clk_of_data;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
const char *clk_name = clk_hw_get_name(hw);
u32 node_id = !strcmp(clk_name, "clk_in_sd0") ? NODE_SD_0 : NODE_SD_1;
u8 tap_delay, tap_max = 0;
@@ -744,8 +702,7 @@ static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
tap_delay = (degrees * tap_max) / 360;
/* Set the Clock Phase */
- ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
- PM_TAPDELAY_INPUT, tap_delay, NULL);
+ ret = zynqmp_pm_set_sd_tapdelay(node_id, PM_TAPDELAY_INPUT, tap_delay);
if (ret)
pr_err("Error setting Input Tap Delay\n");
@@ -757,13 +714,154 @@ static const struct clk_ops zynqmp_sampleclk_ops = {
.set_phase = sdhci_zynqmp_sampleclk_set_phase,
};
+/**
+ * sdhci_versal_sdcardclk_set_phase - Set the SD Output Clock Tap Delays
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees: The clock phase shift between 0 - 359.
+ *
+ * Set the SD Output Clock Tap Delays for Output path
+ *
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_versal_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ u8 tap_delay, tap_max = 0;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * Versal does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 30 Taps are available */
+ tap_max = 30;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 15 Taps are available */
+ tap_max = 15;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 8 Taps are available */
+ tap_max = 8;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ if (tap_delay) {
+ u32 regval;
+
+ regval = sdhci_readl(host, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ regval |= SDHCI_OTAPDLY_ENABLE;
+ sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ regval |= tap_delay;
+ sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ }
+
+ return 0;
+}
+
+static const struct clk_ops versal_sdcardclk_ops = {
+ .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate,
+ .set_phase = sdhci_versal_sdcardclk_set_phase,
+};
+
+/**
+ * sdhci_versal_sampleclk_set_phase - Set the SD Input Clock Tap Delays
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees: The clock phase shift between 0 - 359.
+ *
+ * Set the SD Input Clock Tap Delays for Input path
+ *
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_versal_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ u8 tap_delay, tap_max = 0;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * Versal does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 120 Taps are available */
+ tap_max = 120;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 60 Taps are available */
+ tap_max = 60;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 30 Taps are available */
+ tap_max = 30;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ if (tap_delay) {
+ u32 regval;
+
+ regval = sdhci_readl(host, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= SDHCI_ITAPDLY_CHGWIN;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= SDHCI_ITAPDLY_ENABLE;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= tap_delay;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval &= ~SDHCI_ITAPDLY_CHGWIN;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ }
+
+ return 0;
+}
+
+static const struct clk_ops versal_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+ .set_phase = sdhci_versal_sampleclk_set_phase,
+};
+
static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
- sdhci_arasan->clk_data.clk_of_data;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
u16 clk;
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
@@ -771,8 +869,7 @@ static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Issue DLL Reset */
- eemi_ops->ioctl(deviceid, IOCTL_SD_DLL_RESET,
- PM_DLL_RESET_PULSE, 0, NULL);
+ zynqmp_pm_sd_dll_reset(deviceid, PM_DLL_RESET_PULSE);
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
@@ -804,6 +901,9 @@ static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode)
/**
* sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
*
+ * @host: The sdhci_host
+ * @value: The value to write
+ *
* The corecfg_clockmultiplier is supposed to contain clock multiplier
* value of programmable clock generator.
*
@@ -815,8 +915,6 @@ static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode)
* - The value of corecfg_clockmultiplier should sync with that of corresponding
* value reading from sdhci_capability_register. So this function is called
* once at probe time and never called again.
- *
- * @host: The sdhci_host
*/
static void sdhci_arasan_update_clockmultiplier(struct sdhci_host *host,
u32 value)
@@ -843,6 +941,8 @@ static void sdhci_arasan_update_clockmultiplier(struct sdhci_host *host,
/**
* sdhci_arasan_update_baseclkfreq - Set corecfg_baseclkfreq
*
+ * @host: The sdhci_host
+ *
* The corecfg_baseclkfreq is supposed to contain the MHz of clk_xin. This
* function can be used to make that happen.
*
@@ -854,8 +954,6 @@ static void sdhci_arasan_update_clockmultiplier(struct sdhci_host *host,
* - It's assumed that clk_xin is not dynamic and that we use the SDHCI divider
* to achieve lower clock rates. That means that this function is called once
* at probe time and never called again.
- *
- * @host: The sdhci_host
*/
static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
{
@@ -919,10 +1017,10 @@ static void arasan_dt_read_clk_phase(struct device *dev,
/**
* arasan_dt_parse_clk_phases - Read Clock Delay values from DT
*
- * Called at initialization to parse the values of Clock Delays.
- *
* @dev: Pointer to our struct device.
* @clk_data: Pointer to the Clock Data structure
+ *
+ * Called at initialization to parse the values of Clock Delays.
*/
static void arasan_dt_parse_clk_phases(struct device *dev,
struct sdhci_arasan_clk_data *clk_data)
@@ -954,6 +1052,16 @@ static void arasan_dt_parse_clk_phases(struct device *dev,
}
}
+ if (of_device_is_compatible(dev->of_node, "xlnx,versal-8.9a")) {
+ iclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) VERSAL_ICLK_PHASE;
+ oclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) VERSAL_OCLK_PHASE;
+
+ for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
+ clk_data->clk_phase_in[i] = iclk_phase[i];
+ clk_data->clk_phase_out[i] = oclk_phase[i];
+ }
+ }
+
arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_LEGACY,
"clk-phase-legacy");
arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS,
@@ -978,17 +1086,191 @@ static void arasan_dt_parse_clk_phases(struct device *dev,
"clk-phase-mmc-hs400");
}
+static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
+ .ops = &sdhci_arasan_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC,
+};
+
+static const struct sdhci_arasan_clk_ops arasan_clk_ops = {
+ .sdcardclk_ops = &arasan_sdcardclk_ops,
+ .sampleclk_ops = &arasan_sampleclk_ops,
+};
+
+static struct sdhci_arasan_of_data sdhci_arasan_generic_data = {
+ .pdata = &sdhci_arasan_pdata,
+ .clk_ops = &arasan_clk_ops,
+};
+
+static const struct sdhci_pltfm_data sdhci_keembay_emmc_pdata = {
+ .ops = &sdhci_arasan_cqe_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_NO_LED |
+ SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
+ SDHCI_QUIRK2_STOP_WITH_TC |
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
+};
+
+static const struct sdhci_pltfm_data sdhci_keembay_sd_pdata = {
+ .ops = &sdhci_arasan_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_NO_LED |
+ SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
+ SDHCI_QUIRK2_STOP_WITH_TC |
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
+};
+
+static const struct sdhci_pltfm_data sdhci_keembay_sdio_pdata = {
+ .ops = &sdhci_arasan_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_NO_LED |
+ SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_HOST_OFF_CARD_ON |
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
+};
+
+static struct sdhci_arasan_of_data sdhci_arasan_rk3399_data = {
+ .soc_ctl_map = &rk3399_soc_ctl_map,
+ .pdata = &sdhci_arasan_cqe_pdata,
+ .clk_ops = &arasan_clk_ops,
+};
+
+static struct sdhci_arasan_of_data intel_lgm_emmc_data = {
+ .soc_ctl_map = &intel_lgm_emmc_soc_ctl_map,
+ .pdata = &sdhci_arasan_cqe_pdata,
+ .clk_ops = &arasan_clk_ops,
+};
+
+static struct sdhci_arasan_of_data intel_lgm_sdxc_data = {
+ .soc_ctl_map = &intel_lgm_sdxc_soc_ctl_map,
+ .pdata = &sdhci_arasan_cqe_pdata,
+ .clk_ops = &arasan_clk_ops,
+};
+
+static const struct sdhci_pltfm_data sdhci_arasan_zynqmp_pdata = {
+ .ops = &sdhci_arasan_ops,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC,
+};
+
+static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = {
+ .sdcardclk_ops = &zynqmp_sdcardclk_ops,
+ .sampleclk_ops = &zynqmp_sampleclk_ops,
+};
+
+static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = {
+ .pdata = &sdhci_arasan_zynqmp_pdata,
+ .clk_ops = &zynqmp_clk_ops,
+};
+
+static const struct sdhci_arasan_clk_ops versal_clk_ops = {
+ .sdcardclk_ops = &versal_sdcardclk_ops,
+ .sampleclk_ops = &versal_sampleclk_ops,
+};
+
+static struct sdhci_arasan_of_data sdhci_arasan_versal_data = {
+ .pdata = &sdhci_arasan_zynqmp_pdata,
+ .clk_ops = &versal_clk_ops,
+};
+
+static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
+ .soc_ctl_map = &intel_keembay_soc_ctl_map,
+ .pdata = &sdhci_keembay_emmc_pdata,
+};
+
+static struct sdhci_arasan_of_data intel_keembay_sd_data = {
+ .soc_ctl_map = &intel_keembay_soc_ctl_map,
+ .pdata = &sdhci_keembay_sd_pdata,
+};
+
+static struct sdhci_arasan_of_data intel_keembay_sdio_data = {
+ .soc_ctl_map = &intel_keembay_soc_ctl_map,
+ .pdata = &sdhci_keembay_sdio_pdata,
+};
+
+static const struct of_device_id sdhci_arasan_of_match[] = {
+ /* SoC-specific compatible strings w/ soc_ctl_map */
+ {
+ .compatible = "rockchip,rk3399-sdhci-5.1",
+ .data = &sdhci_arasan_rk3399_data,
+ },
+ {
+ .compatible = "intel,lgm-sdhci-5.1-emmc",
+ .data = &intel_lgm_emmc_data,
+ },
+ {
+ .compatible = "intel,lgm-sdhci-5.1-sdxc",
+ .data = &intel_lgm_sdxc_data,
+ },
+ {
+ .compatible = "intel,keembay-sdhci-5.1-emmc",
+ .data = &intel_keembay_emmc_data,
+ },
+ {
+ .compatible = "intel,keembay-sdhci-5.1-sd",
+ .data = &intel_keembay_sd_data,
+ },
+ {
+ .compatible = "intel,keembay-sdhci-5.1-sdio",
+ .data = &intel_keembay_sdio_data,
+ },
+ /* Generic compatible below here */
+ {
+ .compatible = "arasan,sdhci-8.9a",
+ .data = &sdhci_arasan_generic_data,
+ },
+ {
+ .compatible = "arasan,sdhci-5.1",
+ .data = &sdhci_arasan_generic_data,
+ },
+ {
+ .compatible = "arasan,sdhci-4.9a",
+ .data = &sdhci_arasan_generic_data,
+ },
+ {
+ .compatible = "xlnx,zynqmp-8.9a",
+ .data = &sdhci_arasan_zynqmp_data,
+ },
+ {
+ .compatible = "xlnx,versal-8.9a",
+ .data = &sdhci_arasan_versal_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
+
/**
* sdhci_arasan_register_sdcardclk - Register the sdcardclk for a PHY to use
*
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ *
* Some PHY devices need to know what the actual card clock is. In order for
* them to find out, we'll provide a clock through the common clock framework
* for them to query.
*
- * @sdhci_arasan: Our private data structure.
- * @clk_xin: Pointer to the functional clock
- * @dev: Pointer to our struct device.
- * Returns 0 on success and error value on error
+ * Return: 0 on success and error value on error
*/
static int
sdhci_arasan_register_sdcardclk(struct sdhci_arasan_data *sdhci_arasan,
@@ -1012,10 +1294,7 @@ sdhci_arasan_register_sdcardclk(struct sdhci_arasan_data *sdhci_arasan,
sdcardclk_init.parent_names = &parent_clk_name;
sdcardclk_init.num_parents = 1;
sdcardclk_init.flags = CLK_GET_RATE_NOCACHE;
- if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
- sdcardclk_init.ops = &zynqmp_sdcardclk_ops;
- else
- sdcardclk_init.ops = &arasan_sdcardclk_ops;
+ sdcardclk_init.ops = sdhci_arasan->clk_ops->sdcardclk_ops;
clk_data->sdcardclk_hw.init = &sdcardclk_init;
clk_data->sdcardclk =
@@ -1033,14 +1312,15 @@ sdhci_arasan_register_sdcardclk(struct sdhci_arasan_data *sdhci_arasan,
/**
* sdhci_arasan_register_sampleclk - Register the sampleclk for a PHY to use
*
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ *
* Some PHY devices need to know what the actual card clock is. In order for
* them to find out, we'll provide a clock through the common clock framework
* for them to query.
*
- * @sdhci_arasan: Our private data structure.
- * @clk_xin: Pointer to the functional clock
- * @dev: Pointer to our struct device.
- * Returns 0 on success and error value on error
+ * Return: 0 on success and error value on error
*/
static int
sdhci_arasan_register_sampleclk(struct sdhci_arasan_data *sdhci_arasan,
@@ -1064,10 +1344,7 @@ sdhci_arasan_register_sampleclk(struct sdhci_arasan_data *sdhci_arasan,
sampleclk_init.parent_names = &parent_clk_name;
sampleclk_init.num_parents = 1;
sampleclk_init.flags = CLK_GET_RATE_NOCACHE;
- if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
- sampleclk_init.ops = &zynqmp_sampleclk_ops;
- else
- sampleclk_init.ops = &arasan_sampleclk_ops;
+ sampleclk_init.ops = sdhci_arasan->clk_ops->sampleclk_ops;
clk_data->sampleclk_hw.init = &sampleclk_init;
clk_data->sampleclk =
@@ -1085,10 +1362,10 @@ sdhci_arasan_register_sampleclk(struct sdhci_arasan_data *sdhci_arasan,
/**
* sdhci_arasan_unregister_sdclk - Undoes sdhci_arasan_register_sdclk()
*
+ * @dev: Pointer to our struct device.
+ *
* Should be called any time we're exiting and sdhci_arasan_register_sdclk()
* returned success.
- *
- * @dev: Pointer to our struct device.
*/
static void sdhci_arasan_unregister_sdclk(struct device *dev)
{
@@ -1101,8 +1378,46 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
}
/**
+ * sdhci_arasan_update_support64b - Set SUPPORT_64B (64-bit System Bus Support)
+ *
+ * This should be set based on the System Address Bus.
+ * 0: the Core supports only 32-bit System Address Bus.
+ * 1: the Core supports 64-bit System Address Bus.
+ *
+ * NOTES:
+ * - For Keem Bay, it is required to clear this bit. Its default value is 1'b1.
+ * Keem Bay does not support 64-bit access.
+ *
+ * @host The sdhci_host
+ */
+static void sdhci_arasan_update_support64b(struct sdhci_host *host, u32 value)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_arasan_soc_ctl_map *soc_ctl_map =
+ sdhci_arasan->soc_ctl_map;
+
+ /* Having a map is optional */
+ if (!soc_ctl_map)
+ return;
+
+ /* If we have a map, we expect to have a syscon */
+ if (!sdhci_arasan->soc_ctl_base) {
+ pr_warn("%s: Have regmap, but no soc-ctl-syscon\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+
+ sdhci_arasan_syscon_write(host, &soc_ctl_map->support64b, value);
+}
+
+/**
* sdhci_arasan_register_sdclk - Register the sdcardclk for a PHY to use
*
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ *
* Some PHY devices need to know what the actual card clock is. In order for
* them to find out, we'll provide a clock through the common clock framework
* for them to query.
@@ -1115,10 +1430,7 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
* to create nice clean device tree bindings and later (if needed) we can try
* re-architecting SDHCI if we see some benefit to it.
*
- * @sdhci_arasan: Our private data structure.
- * @clk_xin: Pointer to the functional clock
- * @dev: Pointer to our struct device.
- * Returns 0 on success and error value on error
+ * Return: 0 on success and error value on error
*/
static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
struct clk *clk_xin,
@@ -1215,6 +1527,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan->host = host;
sdhci_arasan->soc_ctl_map = data->soc_ctl_map;
+ sdhci_arasan->clk_ops = data->clk_ops;
node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0);
if (node) {
@@ -1270,6 +1583,15 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
"rockchip,rk3399-sdhci-5.1"))
sdhci_arasan_update_clockmultiplier(host, 0x0);
+ if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
+ of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") ||
+ of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) {
+ sdhci_arasan_update_clockmultiplier(host, 0x0);
+ sdhci_arasan_update_support64b(host, 0x0);
+
+ host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ }
+
sdhci_arasan_update_baseclkfreq(host);
ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, &pdev->dev);
@@ -1277,20 +1599,6 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto clk_disable_all;
if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data;
- const struct zynqmp_eemi_ops *eemi_ops;
-
- zynqmp_clk_data = devm_kzalloc(&pdev->dev,
- sizeof(*zynqmp_clk_data),
- GFP_KERNEL);
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops)) {
- ret = PTR_ERR(eemi_ops);
- goto unreg_clk;
- }
-
- zynqmp_clk_data->eemi_ops = eemi_ops;
- sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
host->mmc_host_ops.execute_tuning =
arasan_zynqmp_execute_tuning;
}
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index c79bff5e2280..1ece2c50042c 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -6,6 +6,7 @@
* 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -120,9 +121,12 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
|| mmc_gpio_get_cd(host->mmc) >= 0)
sdhci_at91_set_force_card_detect(host);
- if (priv->cal_always_on && (mask & SDHCI_RESET_ALL))
- sdhci_writel(host, SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
+ if (priv->cal_always_on && (mask & SDHCI_RESET_ALL)) {
+ u32 calcr = sdhci_readl(host, SDMMC_CALCR);
+
+ sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
SDMMC_CALCR);
+ }
}
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
@@ -179,9 +183,9 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
clk_mul = gck_rate / clk_base_rate - 1;
caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK;
- caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK;
+ caps0 |= FIELD_PREP(SDHCI_CLOCK_V3_BASE_MASK, clk_base);
caps1 &= ~SDHCI_CLOCK_MUL_MASK;
- caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK;
+ caps1 |= FIELD_PREP(SDHCI_CLOCK_MUL_MASK, clk_mul);
/* Set capabilities in r/w mode. */
writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
writel(caps0, host->ioaddr + SDHCI_CAPABILITIES);
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index a5137845a1c7..64ac0dbee95c 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -16,6 +16,9 @@
#include "sdhci-pltfm.h"
+/* DWCMSHC specific Mode Select value */
+#define DWCMSHC_CTRL_HS400 0x7
+
#define BOUNDARY_OK(addr, len) \
((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
@@ -46,10 +49,36 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
sdhci_adma_write_desc(host, desc, addr, len, cmd);
}
+static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ u16 ctrl_2;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if ((timing == MMC_TIMING_MMC_HS200) ||
+ (timing == MMC_TIMING_UHS_SDR104))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ else if ((timing == MMC_TIMING_UHS_SDR25) ||
+ (timing == MMC_TIMING_MMC_HS))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ else if ((timing == MMC_TIMING_UHS_DDR50) ||
+ (timing == MMC_TIMING_MMC_DDR52))
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ else if (timing == MMC_TIMING_MMC_HS400)
+ ctrl_2 |= DWCMSHC_CTRL_HS400;
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_uhs_signaling = dwcmshc_set_uhs_signaling,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.reset = sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
@@ -134,6 +163,48 @@ static int dwcmshc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int dwcmshc_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = sdhci_suspend_host(host);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(pltfm_host->clk);
+ if (!IS_ERR(priv->bus_clk))
+ clk_disable_unprepare(priv->bus_clk);
+
+ return ret;
+}
+
+static int dwcmshc_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR(priv->bus_clk)) {
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret)
+ return ret;
+ }
+
+ return sdhci_resume_host(host);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dwcmshc_pmops, dwcmshc_suspend, dwcmshc_resume);
+
static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
{ .compatible = "snps,dwcmshc-sdhci" },
{}
@@ -144,6 +215,7 @@ static struct platform_driver sdhci_dwcmshc_driver = {
.driver = {
.name = "sdhci-dwcmshc",
.of_match_table = sdhci_dwcmshc_dt_ids,
+ .pm = &dwcmshc_pmops,
},
.probe = dwcmshc_probe,
.remove = dwcmshc_remove,
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 5d8dd870bd44..7c73d243dc6c 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -1135,6 +1135,40 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
static void esdhc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
+ u32 val;
+
+ /*
+ * There are specific registers setting for HS400 mode.
+ * Clean all of them if controller is in HS400 mode to
+ * exit HS400 mode before re-setting any speed mode.
+ */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ if (val & ESDHC_HS400_MODE) {
+ val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
+ val &= ~ESDHC_FLW_CTL_BG;
+ sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
+
+ val = sdhci_readl(host, ESDHC_SDCLKCTL);
+ val &= ~ESDHC_CMD_CLK_CTL;
+ sdhci_writel(host, val, ESDHC_SDCLKCTL);
+
+ esdhc_clock_enable(host, false);
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_HS400_MODE;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+ esdhc_clock_enable(host, true);
+
+ val = sdhci_readl(host, ESDHC_DLLCFG0);
+ val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
+ sdhci_writel(host, val, ESDHC_DLLCFG0);
+
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_HS400_WNDW_ADJUST;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ esdhc_tuning_block_enable(host, false);
+ }
+
if (timing == MMC_TIMING_MMC_HS400)
esdhc_tuning_block_enable(host, true);
else
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 2527244c2ae1..bb6802448b2f 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -249,12 +249,8 @@ static int ricoh_probe(struct sdhci_pci_chip *chip)
static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->caps =
- ((0x21 << SDHCI_TIMEOUT_CLK_SHIFT)
- & SDHCI_TIMEOUT_CLK_MASK) |
-
- ((0x21 << SDHCI_CLOCK_BASE_SHIFT)
- & SDHCI_CLOCK_BASE_MASK) |
-
+ FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
+ FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
SDHCI_TIMEOUT_CLK_UNIT |
SDHCI_CAN_VDD_330 |
SDHCI_CAN_DO_HISPD |
@@ -1749,6 +1745,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
+ SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index ce15a05f23d4..ca0166d9bf82 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -26,6 +26,9 @@
#define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26)
#define GLI_9750_DRIVING_1_VALUE 0xFFF
#define GLI_9750_DRIVING_2_VALUE 0x3
+#define SDHCI_GLI_9750_SEL_1 BIT(29)
+#define SDHCI_GLI_9750_SEL_2 BIT(31)
+#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30))
#define SDHCI_GLI_9750_PLL 0x864
#define SDHCI_GLI_9750_PLL_TX2_INV BIT(23)
@@ -60,6 +63,19 @@
#define SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY GENMASK(2, 0)
#define GLI_9750_TUNING_PARAMETERS_RX_DLY_VALUE 0x1
+#define SDHCI_GLI_9763E_CTRL_HS400 0x7
+
+#define SDHCI_GLI_9763E_HS400_ES_REG 0x52C
+#define SDHCI_GLI_9763E_HS400_ES_BIT BIT(8)
+
+#define PCIE_GLI_9763E_VHS 0x884
+#define GLI_9763E_VHS_REV GENMASK(19, 16)
+#define GLI_9763E_VHS_REV_R 0x0
+#define GLI_9763E_VHS_REV_M 0x1
+#define GLI_9763E_VHS_REV_W 0x2
+#define PCIE_GLI_9763E_SCR 0x8E0
+#define GLI_9763E_SCR_AXI_REQ BIT(9)
+
#define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */
@@ -122,6 +138,8 @@ static void gli_set_9750(struct sdhci_host *host)
GLI_9750_DRIVING_1_VALUE);
driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2,
GLI_9750_DRIVING_2_VALUE);
+ driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST);
+ driving_value |= SDHCI_GLI_9750_SEL_2;
sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING);
sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4;
@@ -334,6 +352,93 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
return value;
}
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+
+ pci_free_irq_vectors(slot->chip->pdev);
+ gli_pcie_enable_msi(slot);
+
+ return sdhci_pci_resume_host(chip);
+}
+#endif
+
+static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 val;
+
+ val = sdhci_readl(host, SDHCI_GLI_9763E_HS400_ES_REG);
+ if (ios->enhanced_strobe)
+ val |= SDHCI_GLI_9763E_HS400_ES_BIT;
+ else
+ val &= ~SDHCI_GLI_9763E_HS400_ES_BIT;
+
+ sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
+}
+
+static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ u16 ctrl_2;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if (timing == MMC_TIMING_MMC_HS200)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_MMC_HS)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_MMC_DDR52)
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ else if (timing == MMC_TIMING_MMC_HS400)
+ ctrl_2 |= SDHCI_GLI_9763E_CTRL_HS400;
+
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
+{
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_SCR, &value);
+ value |= GLI_9763E_SCR_AXI_REQ;
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_SCR, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+}
+
+static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
+{
+ struct sdhci_host *host = slot->host;
+
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA |
+ MMC_CAP_1_8V_DDR |
+ MMC_CAP_NONREMOVABLE;
+ host->mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR |
+ MMC_CAP2_HS400_1_8V |
+ MMC_CAP2_HS400_ES |
+ MMC_CAP2_NO_SDIO |
+ MMC_CAP2_NO_SD;
+ gli_pcie_enable_msi(slot);
+ host->mmc_host_ops.hs400_enhanced_strobe =
+ gl9763e_hs400_enhanced_strobe;
+ gli_set_gl9763e(slot);
+ sdhci_enable_v4_mode(host);
+
+ return 0;
+}
+
static const struct sdhci_ops sdhci_gl9755_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
@@ -348,6 +453,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = {
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9755,
.ops = &sdhci_gl9755_ops,
+#ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_gli_resume,
+#endif
};
static const struct sdhci_ops sdhci_gl9750_ops = {
@@ -366,4 +474,25 @@ const struct sdhci_pci_fixes sdhci_gl9750 = {
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9750,
.ops = &sdhci_gl9750_ops,
+#ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_gli_resume,
+#endif
+};
+
+static const struct sdhci_ops sdhci_gl9763e_ops = {
+ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_gl9763e_signaling,
+ .voltage_switch = sdhci_gli_voltage_switch,
+};
+
+const struct sdhci_pci_fixes sdhci_gl9763e = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = gli_probe_slot_gl9763e,
+ .ops = &sdhci_gl9763e_ops,
+#ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_gli_resume,
+#endif
};
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index fa8105087d68..e2a846885902 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -494,7 +494,7 @@ static void sdhci_o2_enable_clk(struct sdhci_host *host, u16 clk)
}
}
-void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
+static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
{
u16 clk;
@@ -509,7 +509,7 @@ void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_o2_enable_clk(host, clk);
}
-int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
@@ -578,7 +578,7 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
-int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
{
int ret;
u8 scratch;
@@ -783,7 +783,7 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
}
#ifdef CONFIG_PM_SLEEP
-int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
+static int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
{
sdhci_pci_o2_probe(chip);
return sdhci_pci_resume_host(chip);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 42ccd123b046..d0ed232af0eb 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -72,6 +72,7 @@
#define PCI_DEVICE_ID_GLI_9755 0x9755
#define PCI_DEVICE_ID_GLI_9750 0x9750
+#define PCI_DEVICE_ID_GLI_9763E 0xe763
/*
* PCI device class and mask
@@ -195,5 +196,6 @@ extern const struct sdhci_pci_fixes sdhci_snps;
extern const struct sdhci_pci_fixes sdhci_o2;
extern const struct sdhci_pci_fixes sdhci_gl9750;
extern const struct sdhci_pci_fixes sdhci_gl9755;
+extern const struct sdhci_pci_fixes sdhci_gl9763e;
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 2ab42c59e4f8..a910cb461ed7 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -406,7 +406,8 @@ static struct sdhci_ops sdhci_sprd_ops = {
.request_done = sdhci_sprd_request_done,
};
-static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
+static void sdhci_sprd_check_auto_cmd23(struct mmc_host *mmc,
+ struct mmc_request *mrq)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
@@ -422,10 +423,23 @@ static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
mrq->sbc && (mrq->sbc->arg & SDHCI_SPRD_ARG2_STUFF) &&
(host->flags & SDHCI_AUTO_CMD23))
host->flags &= ~SDHCI_AUTO_CMD23;
+}
+
+static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ sdhci_sprd_check_auto_cmd23(mmc, mrq);
sdhci_request(mmc, mrq);
}
+static int sdhci_sprd_request_atomic(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ sdhci_sprd_check_auto_cmd23(mmc, mrq);
+
+ return sdhci_request_atomic(mmc, mrq);
+}
+
static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -434,7 +448,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret) {
+ if (ret < 0) {
pr_err("%s: Switching signalling voltage failed\n",
mmc_hostname(mmc));
return ret;
@@ -556,11 +570,17 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
sdhci_sprd_voltage_switch;
host->mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
+ MMC_CAP_WAIT_WHILE_BUSY;
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto pltfm_free;
+ if (!mmc_card_is_removable(host->mmc))
+ host->mmc_host_ops.request_atomic = sdhci_sprd_request_atomic;
+ else
+ host->always_defer_done = true;
+
sprd_host = TO_SPRD_HOST(host);
sdhci_sprd_phy_param_parse(sprd_host, pdev->dev.of_node);
@@ -654,8 +674,6 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
if (ret)
goto err_cleanup_host;
- host->always_defer_done = true;
-
ret = __sdhci_add_host(host);
if (ret)
goto err_cleanup_host;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 3e2c5101291d..3a372ab3d12e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -605,6 +605,39 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
autocal->pull_down_1v8 = 0;
err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-up-offset-sdr104",
+ &autocal->pull_up_sdr104);
+ if (err)
+ autocal->pull_up_sdr104 = autocal->pull_up_1v8;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-down-offset-sdr104",
+ &autocal->pull_down_sdr104);
+ if (err)
+ autocal->pull_down_sdr104 = autocal->pull_down_1v8;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-up-offset-hs400",
+ &autocal->pull_up_hs400);
+ if (err)
+ autocal->pull_up_hs400 = autocal->pull_up_1v8;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-down-offset-hs400",
+ &autocal->pull_down_hs400);
+ if (err)
+ autocal->pull_down_hs400 = autocal->pull_down_1v8;
+
+ /*
+ * Different fail-safe drive strength values based on the signaling
+ * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
+ * So, avoid reading below device tree properties for SoCs that don't
+ * have NVQUIRK_NEEDS_PAD_CONTROL.
+ */
+ if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
+ return;
+
+ err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
&autocal->pull_up_3v3_timeout);
if (err) {
@@ -647,30 +680,6 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
mmc_hostname(host->mmc));
autocal->pull_down_1v8_timeout = 0;
}
-
- err = device_property_read_u32(host->mmc->parent,
- "nvidia,pad-autocal-pull-up-offset-sdr104",
- &autocal->pull_up_sdr104);
- if (err)
- autocal->pull_up_sdr104 = autocal->pull_up_1v8;
-
- err = device_property_read_u32(host->mmc->parent,
- "nvidia,pad-autocal-pull-down-offset-sdr104",
- &autocal->pull_down_sdr104);
- if (err)
- autocal->pull_down_sdr104 = autocal->pull_down_1v8;
-
- err = device_property_read_u32(host->mmc->parent,
- "nvidia,pad-autocal-pull-up-offset-hs400",
- &autocal->pull_up_hs400);
- if (err)
- autocal->pull_up_hs400 = autocal->pull_up_1v8;
-
- err = device_property_read_u32(host->mmc->parent,
- "nvidia,pad-autocal-pull-down-offset-hs400",
- &autocal->pull_down_hs400);
- if (err)
- autocal->pull_down_hs400 = autocal->pull_down_1v8;
}
static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 3f716466fcfd..37b1158c1c0c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -48,10 +48,10 @@
static unsigned int debug_quirks = 0;
static unsigned int debug_quirks2;
-static void sdhci_finish_data(struct sdhci_host *);
-
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
+
void sdhci_dumpregs(struct sdhci_host *host)
{
SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
@@ -111,6 +111,9 @@ void sdhci_dumpregs(struct sdhci_host *host)
}
}
+ if (host->ops->dump_vendor_regs)
+ host->ops->dump_vendor_regs(host);
+
SDHCI_DUMP("============================================\n");
}
EXPORT_SYMBOL_GPL(sdhci_dumpregs);
@@ -317,6 +320,7 @@ out:
static void sdhci_init(struct sdhci_host *host, int soft)
{
struct mmc_host *mmc = host->mmc;
+ unsigned long flags;
if (soft)
sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
@@ -326,7 +330,9 @@ static void sdhci_init(struct sdhci_host *host, int soft)
if (host->v4_mode)
sdhci_do_enable_v4_mode(host);
+ spin_lock_irqsave(&host->lock, flags);
sdhci_set_default_irqs(host);
+ spin_unlock_irqrestore(&host->lock, flags);
host->cqe_on = false;
@@ -634,9 +640,13 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
}
if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
/* Copy the data to the bounce buffer */
- sg_copy_to_buffer(data->sg, data->sg_len,
- host->bounce_buffer,
- length);
+ if (host->ops->copy_to_bounce_buffer) {
+ host->ops->copy_to_bounce_buffer(host,
+ data, length);
+ } else {
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ host->bounce_buffer, length);
+ }
}
/* Switch ownership to the DMA */
dma_sync_single_for_device(host->mmc->parent,
@@ -1350,13 +1360,25 @@ static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
!mrq->cap_cmd_during_tfr;
}
+static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
+ struct mmc_request *mrq)
+{
+ return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
+}
+
+static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
+ struct mmc_request *mrq)
+{
+ return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
+}
+
static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
struct mmc_command *cmd,
u16 *mode)
{
bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
(cmd->opcode != SD_IO_RW_EXTENDED);
- bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
+ bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
u16 ctrl2;
/*
@@ -1416,7 +1438,7 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
sdhci_auto_cmd_select(host, cmd, &mode);
- if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
+ if (sdhci_auto_cmd23(host, cmd->mrq))
sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
}
@@ -1466,6 +1488,9 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
if (host->data_cmd && host->data_cmd->mrq == mrq)
host->data_cmd = NULL;
+ if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
+ host->deferred_cmd = NULL;
+
if (host->data && host->data->mrq == mrq)
host->data = NULL;
@@ -1487,7 +1512,7 @@ static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
queue_work(host->complete_wq, &host->complete_work);
}
-static void sdhci_finish_data(struct sdhci_host *host)
+static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
{
struct mmc_command *data_cmd = host->data_cmd;
struct mmc_data *data = host->data;
@@ -1539,14 +1564,31 @@ static void sdhci_finish_data(struct sdhci_host *host)
} else {
/* Avoid triggering warning in sdhci_send_command() */
host->cmd = NULL;
- sdhci_send_command(host, data->stop);
+ if (!sdhci_send_command(host, data->stop)) {
+ if (sw_data_timeout) {
+ /*
+ * This is anyway a sw data timeout, so
+ * give up now.
+ */
+ data->stop->error = -EIO;
+ __sdhci_finish_mrq(host, data->mrq);
+ } else {
+ WARN_ON(host->deferred_cmd);
+ host->deferred_cmd = data->stop;
+ }
+ }
}
} else {
__sdhci_finish_mrq(host, data->mrq);
}
}
-void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+static void sdhci_finish_data(struct sdhci_host *host)
+{
+ __sdhci_finish_data(host, false);
+}
+
+static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
{
int flags;
u32 mask;
@@ -1561,9 +1603,6 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
cmd->opcode == MMC_STOP_TRANSMISSION)
cmd->flags |= MMC_RSP_BUSY;
- /* Wait max 10 ms */
- timeout = 10;
-
mask = SDHCI_CMD_INHIBIT;
if (sdhci_data_line_cmd(cmd))
mask |= SDHCI_DATA_INHIBIT;
@@ -1573,18 +1612,8 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
mask &= ~SDHCI_DATA_INHIBIT;
- while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
- if (timeout == 0) {
- pr_err("%s: Controller never released inhibit bit(s).\n",
- mmc_hostname(host->mmc));
- sdhci_dumpregs(host);
- cmd->error = -EIO;
- sdhci_finish_mrq(host, cmd->mrq);
- return;
- }
- timeout--;
- mdelay(1);
- }
+ if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
+ return false;
host->cmd = cmd;
host->data_timeout = 0;
@@ -1606,11 +1635,13 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_set_transfer_mode(host, cmd);
if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
- pr_err("%s: Unsupported response type!\n",
- mmc_hostname(host->mmc));
- cmd->error = -EINVAL;
- sdhci_finish_mrq(host, cmd->mrq);
- return;
+ WARN_ONCE(1, "Unsupported response type!\n");
+ /*
+ * This does not happen in practice because 136-bit response
+ * commands never have busy waiting, so rather than complicate
+ * the error path, just remove busy waiting and continue.
+ */
+ cmd->flags &= ~MMC_RSP_BUSY;
}
if (!(cmd->flags & MMC_RSP_PRESENT))
@@ -1645,8 +1676,61 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_external_dma_pre_transfer(host, cmd);
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+
+ return true;
+}
+
+static bool sdhci_present_error(struct sdhci_host *host,
+ struct mmc_command *cmd, bool present)
+{
+ if (!present || host->flags & SDHCI_DEVICE_DEAD) {
+ cmd->error = -ENOMEDIUM;
+ return true;
+ }
+
+ return false;
+}
+
+static bool sdhci_send_command_retry(struct sdhci_host *host,
+ struct mmc_command *cmd,
+ unsigned long flags)
+ __releases(host->lock)
+ __acquires(host->lock)
+{
+ struct mmc_command *deferred_cmd = host->deferred_cmd;
+ int timeout = 10; /* Approx. 10 ms */
+ bool present;
+
+ while (!sdhci_send_command(host, cmd)) {
+ if (!timeout--) {
+ pr_err("%s: Controller never released inhibit bit(s).\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ cmd->error = -EIO;
+ return false;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ usleep_range(1000, 1250);
+
+ present = host->mmc->ops->get_cd(host->mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* A deferred command might disappear, handle that */
+ if (cmd == deferred_cmd && cmd != host->deferred_cmd)
+ return true;
+
+ if (sdhci_present_error(host, cmd, present))
+ return false;
+ }
+
+ if (cmd == host->deferred_cmd)
+ host->deferred_cmd = NULL;
+
+ return true;
}
-EXPORT_SYMBOL_GPL(sdhci_send_command);
static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
{
@@ -1707,7 +1791,10 @@ static void sdhci_finish_command(struct sdhci_host *host)
/* Finished CMD23, now send actual command. */
if (cmd == cmd->mrq->sbc) {
- sdhci_send_command(host, cmd->mrq->cmd);
+ if (!sdhci_send_command(host, cmd->mrq->cmd)) {
+ WARN_ON(host->deferred_cmd);
+ host->deferred_cmd = cmd->mrq->cmd;
+ }
} else {
/* Processed actual command. */
@@ -2037,11 +2124,10 @@ EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
- struct sdhci_host *host;
- int present;
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd;
unsigned long flags;
-
- host = mmc_priv(mmc);
+ bool present;
/* Firstly check card presence */
present = mmc->ops->get_cd(mmc);
@@ -2050,19 +2136,57 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_led_activate(host);
- if (!present || host->flags & SDHCI_DEVICE_DEAD) {
- mrq->cmd->error = -ENOMEDIUM;
+ if (sdhci_present_error(host, mrq->cmd, present))
+ goto out_finish;
+
+ cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
+
+ if (!sdhci_send_command_retry(host, cmd, flags))
+ goto out_finish;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return;
+
+out_finish:
+ sdhci_finish_mrq(host, mrq);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+EXPORT_SYMBOL_GPL(sdhci_request);
+
+int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (sdhci_present_error(host, mrq->cmd, true)) {
sdhci_finish_mrq(host, mrq);
- } else {
- if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
- sdhci_send_command(host, mrq->sbc);
- else
- sdhci_send_command(host, mrq->cmd);
+ goto out_finish;
}
+ cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
+
+ /*
+ * The HSQ may send a command in interrupt context without polling
+ * the busy signaling, which means we should return BUSY if controller
+ * has not released inhibit bits to allow HSQ trying to send request
+ * again in non-atomic context. So we should not finish this request
+ * here.
+ */
+ if (!sdhci_send_command(host, cmd))
+ ret = -EBUSY;
+ else
+ sdhci_led_activate(host);
+
+out_finish:
spin_unlock_irqrestore(&host->lock, flags);
+ return ret;
}
-EXPORT_SYMBOL_GPL(sdhci_request);
+EXPORT_SYMBOL_GPL(sdhci_request_atomic);
void sdhci_set_bus_width(struct sdhci_host *host, int width)
{
@@ -2411,7 +2535,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret) {
+ if (ret < 0) {
pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
mmc_hostname(mmc));
return -EIO;
@@ -2434,7 +2558,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret) {
+ if (ret < 0) {
pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
mmc_hostname(mmc));
return -EIO;
@@ -2466,7 +2590,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret) {
+ if (ret < 0) {
pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
mmc_hostname(mmc));
return -EIO;
@@ -2600,7 +2724,11 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
*/
sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
- sdhci_send_command(host, &cmd);
+ if (!sdhci_send_command_retry(host, &cmd, flags)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ host->tuning_done = 0;
+ return;
+ }
host->cmd = NULL;
@@ -3018,7 +3146,7 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
if (host->data) {
host->data->error = -ETIMEDOUT;
- sdhci_finish_data(host);
+ __sdhci_finish_data(host, true);
queue_work(host->complete_wq, &host->complete_work);
} else if (host->data_cmd) {
host->data_cmd->error = -ETIMEDOUT;
@@ -3390,6 +3518,9 @@ cont:
}
}
out:
+ if (host->deferred_cmd)
+ result = IRQ_WAKE_THREAD;
+
spin_unlock(&host->lock);
/* Process mrqs ready for immediate completion */
@@ -3415,6 +3546,7 @@ out:
static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
struct sdhci_host *host = dev_id;
+ struct mmc_command *cmd;
unsigned long flags;
u32 isr;
@@ -3422,8 +3554,14 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
;
spin_lock_irqsave(&host->lock, flags);
+
isr = host->thread_isr;
host->thread_isr = 0;
+
+ cmd = host->deferred_cmd;
+ if (cmd && !sdhci_send_command_retry(host, cmd, flags))
+ sdhci_finish_mrq(host, cmd->mrq);
+
spin_unlock_irqrestore(&host->lock, flags);
if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
@@ -4000,9 +4138,6 @@ int sdhci_setup_host(struct sdhci_host *host)
mmc_hostname(mmc), host->version);
}
- if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
- mmc->caps2 &= ~MMC_CAP2_CQE;
-
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
else if (!(host->caps & SDHCI_CAN_DO_SDMA))
@@ -4117,11 +4252,9 @@ int sdhci_setup_host(struct sdhci_host *host)
}
if (host->version >= SDHCI_SPEC_300)
- host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
- >> SDHCI_CLOCK_BASE_SHIFT;
+ host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
else
- host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
- >> SDHCI_CLOCK_BASE_SHIFT;
+ host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
host->max_clk *= 1000000;
if (host->max_clk == 0 || host->quirks &
@@ -4139,8 +4272,7 @@ int sdhci_setup_host(struct sdhci_host *host)
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
*/
- host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
- SDHCI_CLOCK_MUL_SHIFT;
+ host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
/*
* In case the value in Clock Multiplier is 0, then programmable
@@ -4173,8 +4305,7 @@ int sdhci_setup_host(struct sdhci_host *host)
mmc->f_max = max_clk;
if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
- host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
- SDHCI_TIMEOUT_CLK_SHIFT;
+ host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
@@ -4204,7 +4335,7 @@ int sdhci_setup_host(struct sdhci_host *host)
!host->ops->get_max_timeout_count)
mmc->max_busy_timeout = 0;
- mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
@@ -4326,8 +4457,8 @@ int sdhci_setup_host(struct sdhci_host *host)
mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
/* Initial value for re-tuning timer count */
- host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
- SDHCI_RETUNING_TIMER_COUNT_SHIFT;
+ host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
+ host->caps1);
/*
* In case Re-tuning Timer is not disabled, the actual value of
@@ -4337,8 +4468,7 @@ int sdhci_setup_host(struct sdhci_host *host)
host->tuning_count = 1 << (host->tuning_count - 1);
/* Re-tuning mode supported by the Host Controller */
- host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
- SDHCI_RETUNING_MODE_SHIFT;
+ host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
ocr_avail = 0;
@@ -4360,35 +4490,32 @@ int sdhci_setup_host(struct sdhci_host *host)
curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
max_current_caps =
- (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
- (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
- (curr << SDHCI_MAX_CURRENT_180_SHIFT);
+ FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
+ FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
+ FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
}
}
if (host->caps & SDHCI_CAN_VDD_330) {
ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
- mmc->max_current_330 = ((max_current_caps &
- SDHCI_MAX_CURRENT_330_MASK) >>
- SDHCI_MAX_CURRENT_330_SHIFT) *
- SDHCI_MAX_CURRENT_MULTIPLIER;
+ mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
+ max_current_caps) *
+ SDHCI_MAX_CURRENT_MULTIPLIER;
}
if (host->caps & SDHCI_CAN_VDD_300) {
ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
- mmc->max_current_300 = ((max_current_caps &
- SDHCI_MAX_CURRENT_300_MASK) >>
- SDHCI_MAX_CURRENT_300_SHIFT) *
- SDHCI_MAX_CURRENT_MULTIPLIER;
+ mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
+ max_current_caps) *
+ SDHCI_MAX_CURRENT_MULTIPLIER;
}
if (host->caps & SDHCI_CAN_VDD_180) {
ocr_avail |= MMC_VDD_165_195;
- mmc->max_current_180 = ((max_current_caps &
- SDHCI_MAX_CURRENT_180_MASK) >>
- SDHCI_MAX_CURRENT_180_SHIFT) *
- SDHCI_MAX_CURRENT_MULTIPLIER;
+ mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
+ max_current_caps) *
+ SDHCI_MAX_CURRENT_MULTIPLIER;
}
/* If OCR set by host, use it instead. */
@@ -4539,6 +4666,12 @@ int __sdhci_add_host(struct sdhci_host *host)
struct mmc_host *mmc = host->mmc;
int ret;
+ if ((mmc->caps2 & MMC_CAP2_CQE) &&
+ (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
+ mmc->caps2 &= ~MMC_CAP2_CQE;
+ mmc->cqe_ops = NULL;
+ }
+
host->complete_wq = alloc_workqueue("sdhci", flags, 0);
if (!host->complete_wq)
return -ENOMEM;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 79dffbb731d3..0008bbd27127 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -200,12 +200,10 @@
#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
#define SDHCI_CAPABILITIES 0x40
-#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F
-#define SDHCI_TIMEOUT_CLK_SHIFT 0
+#define SDHCI_TIMEOUT_CLK_MASK GENMASK(5, 0)
#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
-#define SDHCI_CLOCK_BASE_MASK 0x00003F00
-#define SDHCI_CLOCK_V3_BASE_MASK 0x0000FF00
-#define SDHCI_CLOCK_BASE_SHIFT 8
+#define SDHCI_CLOCK_BASE_MASK GENMASK(13, 8)
+#define SDHCI_CLOCK_V3_BASE_MASK GENMASK(15, 8)
#define SDHCI_MAX_BLOCK_MASK 0x00030000
#define SDHCI_MAX_BLOCK_SHIFT 16
#define SDHCI_CAN_DO_8BIT 0x00040000
@@ -220,32 +218,25 @@
#define SDHCI_CAN_64BIT_V4 0x08000000
#define SDHCI_CAN_64BIT 0x10000000
+#define SDHCI_CAPABILITIES_1 0x44
#define SDHCI_SUPPORT_SDR50 0x00000001
#define SDHCI_SUPPORT_SDR104 0x00000002
#define SDHCI_SUPPORT_DDR50 0x00000004
#define SDHCI_DRIVER_TYPE_A 0x00000010
#define SDHCI_DRIVER_TYPE_C 0x00000020
#define SDHCI_DRIVER_TYPE_D 0x00000040
-#define SDHCI_RETUNING_TIMER_COUNT_MASK 0x00000F00
-#define SDHCI_RETUNING_TIMER_COUNT_SHIFT 8
+#define SDHCI_RETUNING_TIMER_COUNT_MASK GENMASK(11, 8)
#define SDHCI_USE_SDR50_TUNING 0x00002000
-#define SDHCI_RETUNING_MODE_MASK 0x0000C000
-#define SDHCI_RETUNING_MODE_SHIFT 14
-#define SDHCI_CLOCK_MUL_MASK 0x00FF0000
-#define SDHCI_CLOCK_MUL_SHIFT 16
+#define SDHCI_RETUNING_MODE_MASK GENMASK(15, 14)
+#define SDHCI_CLOCK_MUL_MASK GENMASK(23, 16)
#define SDHCI_CAN_DO_ADMA3 0x08000000
#define SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */
-#define SDHCI_CAPABILITIES_1 0x44
-
#define SDHCI_MAX_CURRENT 0x48
-#define SDHCI_MAX_CURRENT_LIMIT 0xFF
-#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF
-#define SDHCI_MAX_CURRENT_330_SHIFT 0
-#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00
-#define SDHCI_MAX_CURRENT_300_SHIFT 8
-#define SDHCI_MAX_CURRENT_180_MASK 0xFF0000
-#define SDHCI_MAX_CURRENT_180_SHIFT 16
+#define SDHCI_MAX_CURRENT_LIMIT GENMASK(7, 0)
+#define SDHCI_MAX_CURRENT_330_MASK GENMASK(7, 0)
+#define SDHCI_MAX_CURRENT_300_MASK GENMASK(15, 8)
+#define SDHCI_MAX_CURRENT_180_MASK GENMASK(23, 16)
#define SDHCI_MAX_CURRENT_MULTIPLIER 4
/* 4C-4F reserved for more max current */
@@ -540,6 +531,7 @@ struct sdhci_host {
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
struct mmc_command *data_cmd; /* Current data command */
+ struct mmc_command *deferred_cmd; /* Deferred command */
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
@@ -653,8 +645,12 @@ struct sdhci_ops {
void (*voltage_switch)(struct sdhci_host *host);
void (*adma_write_desc)(struct sdhci_host *host, void **desc,
dma_addr_t addr, int len, unsigned int cmd);
+ void (*copy_to_bounce_buffer)(struct sdhci_host *host,
+ struct mmc_data *data,
+ unsigned int length);
void (*request_done)(struct sdhci_host *host,
struct mmc_request *mrq);
+ void (*dump_vendor_regs)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -757,7 +753,6 @@ void sdhci_cleanup_host(struct sdhci_host *host);
int __sdhci_add_host(struct sdhci_host *host);
int sdhci_add_host(struct sdhci_host *host);
void sdhci_remove_host(struct sdhci_host *host, int dead);
-void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
static inline void sdhci_read_caps(struct sdhci_host *host)
{
@@ -776,6 +771,7 @@ void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
+int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index a38b8b2a4e5c..76a8cd3a186f 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioport.h>
+#include <linux/iopoll.h>
#include <linux/scatterlist.h>
#include <pcmcia/cistpl.h>
@@ -22,6 +23,7 @@
#include <linux/io.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#define DRIVER_NAME "sdricoh_cs"
@@ -57,10 +59,8 @@ static unsigned int switchlocked;
#define STATUS_BUSY 0x40000000
/* timeouts */
-#define INIT_TIMEOUT 100
-#define CMD_TIMEOUT 100000
-#define TRANSFER_TIMEOUT 100000
-#define BUSY_TIMEOUT 32767
+#define SDRICOH_CMD_TIMEOUT_US 1000000
+#define SDRICOH_DATA_TIMEOUT_US 1000000
/* list of supported pcmcia devices */
static const struct pcmcia_device_id pcmcia_ids[] = {
@@ -124,19 +124,24 @@ static inline unsigned int sdricoh_readb(struct sdricoh_host *host,
return value;
}
-static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted,
- unsigned int timeout){
- unsigned int loop;
+static bool sdricoh_status_ok(struct sdricoh_host *host, unsigned int status,
+ unsigned int wanted)
+{
+ sdricoh_writel(host, R2E4_STATUS_RESP, status);
+ return status & wanted;
+}
+
+static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted)
+{
+ int ret;
unsigned int status = 0;
struct device *dev = host->dev;
- for (loop = 0; loop < timeout; loop++) {
- status = sdricoh_readl(host, R21C_STATUS);
- sdricoh_writel(host, R2E4_STATUS_RESP, status);
- if (status & wanted)
- break;
- }
- if (loop == timeout) {
+ ret = read_poll_timeout(sdricoh_readl, status,
+ sdricoh_status_ok(host, status, wanted),
+ 32, SDRICOH_DATA_TIMEOUT_US, false,
+ host, R21C_STATUS);
+ if (ret) {
dev_err(dev, "query_status: timeout waiting for %x\n", wanted);
return -ETIMEDOUT;
}
@@ -150,35 +155,46 @@ static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted,
}
-static int sdricoh_mmc_cmd(struct sdricoh_host *host, unsigned char opcode,
- unsigned int arg)
+static int sdricoh_mmc_cmd(struct sdricoh_host *host, struct mmc_command *cmd)
{
- unsigned int status;
- int result = 0;
- unsigned int loop = 0;
+ unsigned int status, timeout_us;
+ int ret;
+ unsigned char opcode = cmd->opcode;
+
/* reset status reg? */
sdricoh_writel(host, R21C_STATUS, 0x18);
+
+ /* MMC_APP_CMDs need some special handling */
+ if (host->app_cmd) {
+ opcode |= 64;
+ host->app_cmd = 0;
+ } else if (opcode == MMC_APP_CMD)
+ host->app_cmd = 1;
+
/* fill parameters */
- sdricoh_writel(host, R204_CMD_ARG, arg);
+ sdricoh_writel(host, R204_CMD_ARG, cmd->arg);
sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode);
+
/* wait for command completion */
- if (opcode) {
- for (loop = 0; loop < CMD_TIMEOUT; loop++) {
- status = sdricoh_readl(host, R21C_STATUS);
- sdricoh_writel(host, R2E4_STATUS_RESP, status);
- if (status & STATUS_CMD_FINISHED)
- break;
- }
- /* don't check for timeout in the loop it is not always
- reset correctly
- */
- if (loop == CMD_TIMEOUT || status & STATUS_CMD_TIMEOUT)
- result = -ETIMEDOUT;
+ if (!opcode)
+ return 0;
- }
+ timeout_us = cmd->busy_timeout ? cmd->busy_timeout * 1000 :
+ SDRICOH_CMD_TIMEOUT_US;
- return result;
+ ret = read_poll_timeout(sdricoh_readl, status,
+ sdricoh_status_ok(host, status, STATUS_CMD_FINISHED),
+ 32, timeout_us, false,
+ host, R21C_STATUS);
+
+ /*
+ * Don't check for timeout status in the loop, as it's not always reset
+ * correctly.
+ */
+ if (ret || status & STATUS_CMD_TIMEOUT)
+ return -ETIMEDOUT;
+ return 0;
}
static int sdricoh_reset(struct sdricoh_host *host)
@@ -207,8 +223,7 @@ static int sdricoh_blockio(struct sdricoh_host *host, int read,
u32 data = 0;
/* wait until the data is available */
if (read) {
- if (sdricoh_query_status(host, STATUS_READY_TO_READ,
- TRANSFER_TIMEOUT))
+ if (sdricoh_query_status(host, STATUS_READY_TO_READ))
return -ETIMEDOUT;
sdricoh_writel(host, R21C_STATUS, 0x18);
/* read data */
@@ -224,8 +239,7 @@ static int sdricoh_blockio(struct sdricoh_host *host, int read,
}
}
} else {
- if (sdricoh_query_status(host, STATUS_READY_TO_WRITE,
- TRANSFER_TIMEOUT))
+ if (sdricoh_query_status(host, STATUS_READY_TO_WRITE))
return -ETIMEDOUT;
sdricoh_writel(host, R21C_STATUS, 0x18);
/* write data */
@@ -251,28 +265,20 @@ static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = cmd->data;
struct device *dev = host->dev;
- unsigned char opcode = cmd->opcode;
int i;
dev_dbg(dev, "=============================\n");
- dev_dbg(dev, "sdricoh_request opcode=%i\n", opcode);
+ dev_dbg(dev, "sdricoh_request opcode=%i\n", cmd->opcode);
sdricoh_writel(host, R21C_STATUS, 0x18);
- /* MMC_APP_CMDs need some special handling */
- if (host->app_cmd) {
- opcode |= 64;
- host->app_cmd = 0;
- } else if (opcode == 55)
- host->app_cmd = 1;
-
/* read/write commands seem to require this */
if (data) {
sdricoh_writew(host, R226_BLOCKSIZE, data->blksz);
sdricoh_writel(host, R208_DATAIO, 0);
}
- cmd->error = sdricoh_mmc_cmd(host, opcode, cmd->arg);
+ cmd->error = sdricoh_mmc_cmd(host, cmd);
/* read response buffer */
if (cmd->flags & MMC_RSP_PRESENT) {
@@ -323,8 +329,7 @@ static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdricoh_writel(host, R208_DATAIO, 1);
- if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED,
- TRANSFER_TIMEOUT)) {
+ if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED)) {
dev_err(dev, "sdricoh_request: transfer end error\n");
cmd->error = -EINVAL;
}
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index f87d7967457f..5e95bbc51644 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -951,9 +951,13 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int sunxi_mmc_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
+ int ret;
+
/* vqmmc regulator is available */
- if (!IS_ERR(mmc->supply.vqmmc))
- return mmc_regulator_set_vqmmc(mmc, ios);
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
+ return ret < 0 ? ret : 0;
+ }
/* no vqmmc regulator, assume fixed regulator at 3/3.3V */
if (mmc->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330)
@@ -1390,7 +1394,7 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
mmc->f_min = 400000;
mmc->f_max = 52000000;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
+ MMC_CAP_SDIO_IRQ;
/*
* Some H5 devices do not have signal traces precise enough to
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 54271b92ee59..5987656e0474 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -73,6 +73,8 @@ module_param(fixed_timeout, bool, 0644);
#define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL
+#define TIFM_MMCSD_REQ_TIMEOUT_MS 1000
+
enum {
CMD_READY = 0x0001,
FIFO_READY = 0x0002,
@@ -959,7 +961,12 @@ static int tifm_sd_probe(struct tifm_dev *sock)
host = mmc_priv(mmc);
tifm_set_drvdata(sock, mmc);
host->dev = sock;
- host->timeout_jiffies = msecs_to_jiffies(1000);
+ host->timeout_jiffies = msecs_to_jiffies(TIFM_MMCSD_REQ_TIMEOUT_MS);
+ /*
+ * We use a fixed request timeout of 1s, hence inform the core about it.
+ * A future improvement should instead respect the cmd->busy_timeout.
+ */
+ mmc->max_busy_timeout = TIFM_MMCSD_REQ_TIMEOUT_MS;
tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
(unsigned long)host);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 9520bd94cf43..d7fde57c78c1 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -39,7 +39,6 @@
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -1128,7 +1127,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (ret == -EPROBE_DEFER)
return ret;
- mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = pdata->max_segs ? : 32;
mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
@@ -1192,7 +1191,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
- dev_pm_domain_start(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
@@ -1231,12 +1229,14 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
cancel_work_sync(&host->done);
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
+ tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
if (host->native_hotplug)
pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index a1683c49cb90..f82baf99fd69 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -610,11 +610,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
}
}
- ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
- dev_name(dev), host);
- if (ret)
- goto free_host;
-
if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
host->dma_ops = &uniphier_sd_internal_dma_ops;
else
@@ -642,8 +637,15 @@ static int uniphier_sd_probe(struct platform_device *pdev)
if (ret)
goto free_host;
+ ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
+ dev_name(dev), host);
+ if (ret)
+ goto remove_host;
+
return 0;
+remove_host:
+ tmio_mmc_host_remove(host);
free_host:
tmio_mmc_host_free(host);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 9a0b1e4e405d..369b8dee2e3d 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -136,6 +136,8 @@
#define USDHI6_MIN_DMA 64
+#define USDHI6_REQ_TIMEOUT_MS 4000
+
enum usdhi6_wait_for {
USDHI6_WAIT_FOR_REQUEST,
USDHI6_WAIT_FOR_CMD,
@@ -1763,7 +1765,12 @@ static int usdhi6_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->wait = USDHI6_WAIT_FOR_REQUEST;
- host->timeout = msecs_to_jiffies(4000);
+ host->timeout = msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS);
+ /*
+ * We use a fixed timeout of 4s, hence inform the core about it. A
+ * future improvement should instead respect the cmd->busy_timeout.
+ */
+ mmc->max_busy_timeout = USDHI6_REQ_TIMEOUT_MS;
host->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(host->pinctrl)) {
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index e48bddd95ce6..ef95bce50889 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -319,6 +319,8 @@ struct via_crdr_mmc_host {
/* some devices need a very long delay for power to stabilize */
#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
+#define VIA_CMD_TIMEOUT_MS 1000
+
static const struct pci_device_id via_ids[] = {
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
@@ -551,14 +553,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host,
{
void __iomem *addrbase;
struct mmc_data *data;
+ unsigned int timeout_ms;
u32 cmdctrl = 0;
WARN_ON(host->cmd);
data = cmd->data;
- mod_timer(&host->timer, jiffies + HZ);
host->cmd = cmd;
+ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS;
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
+
/*Command index*/
cmdctrl = cmd->opcode << 8;
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 740179f42cf2..67f917d6ecd3 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -28,6 +28,8 @@
#include <linux/pnp.h>
#include <linux/highmem.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -770,22 +772,22 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
* interrupts.
*/
switch (cmd->opcode) {
- case 11:
- case 17:
- case 18:
- case 20:
- case 24:
- case 25:
- case 26:
- case 27:
- case 30:
- case 42:
- case 56:
+ case SD_SWITCH_VOLTAGE:
+ case MMC_READ_SINGLE_BLOCK:
+ case MMC_READ_MULTIPLE_BLOCK:
+ case MMC_WRITE_DAT_UNTIL_STOP:
+ case MMC_WRITE_BLOCK:
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ case MMC_PROGRAM_CID:
+ case MMC_PROGRAM_CSD:
+ case MMC_SEND_WRITE_PROT:
+ case MMC_LOCK_UNLOCK:
+ case MMC_GEN_CMD:
break;
/* ACMDs. We don't keep track of state, so we just treat them
* like any other command. */
- case 51:
+ case SD_APP_SEND_SCR:
break;
default:
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 42d401ea60ee..6ddab796216d 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -170,6 +170,16 @@ config MTD_OOPS
buffer in a flash partition where it can be read back at some
later point.
+config MTD_PSTORE
+ tristate "Log panic/oops to an MTD buffer based on pstore"
+ depends on PSTORE_BLK
+ help
+ This enables panic and oops messages to be logged to a circular
+ buffer in a flash partition where it can be read back as files after
+ mounting pstore filesystem.
+
+ If unsure, say N.
+
config MTD_SWAP
tristate "Swap on MTD device support"
depends on MTD && SWAP
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 56cc60ccc477..593d0593a038 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
obj-$(CONFIG_SSFDC) += ssfdc.o
obj-$(CONFIG_SM_FTL) += sm_ftl.o
obj-$(CONFIG_MTD_OOPS) += mtdoops.o
+obj-$(CONFIG_MTD_PSTORE) += mtdpstore.o
obj-$(CONFIG_MTD_SWAP) += mtdswap.o
nftl-objs := nftlcore.o nftlmount.o
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 2916674208b3..b47691e1b81c 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -555,7 +555,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.id = -1;
config.dev = &mtd->dev;
- config.name = mtd->name;
+ config.name = dev_name(&mtd->dev);
config.owner = THIS_MODULE;
config.reg_read = mtd_nvmem_reg_read;
config.size = mtd->size;
@@ -2036,11 +2036,10 @@ static struct backing_dev_info * __init mtd_bdi_init(char *name)
struct backing_dev_info *bdi;
int ret;
- bdi = bdi_alloc(GFP_KERNEL);
+ bdi = bdi_alloc(NUMA_NO_NODE);
if (!bdi)
return ERR_PTR(-ENOMEM);
- bdi->name = name;
/*
* We put '-0' suffix to the name to get the same name format as we
* used to get. Since this is called only once, we get a unique name.
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
new file mode 100644
index 000000000000..a4fe6060b960
--- /dev/null
+++ b/drivers/mtd/mtdpstore.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define dev_fmt(fmt) "mtdoops-pstore: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pstore_blk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/bitops.h>
+
+static struct mtdpstore_context {
+ int index;
+ struct pstore_blk_config info;
+ struct pstore_device_info dev;
+ struct mtd_info *mtd;
+ unsigned long *rmmap; /* removed bit map */
+ unsigned long *usedmap; /* used bit map */
+ /*
+ * used for panic write
+ * As there are no block_isbad for panic case, we should keep this
+ * status before panic to ensure panic_write not failed.
+ */
+ unsigned long *badmap; /* bad block bit map */
+} oops_cxt;
+
+static int mtdpstore_block_isbad(struct mtdpstore_context *cxt, loff_t off)
+{
+ int ret;
+ struct mtd_info *mtd = cxt->mtd;
+ u64 blknum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ blknum = div_u64(off, mtd->erasesize);
+
+ if (test_bit(blknum, cxt->badmap))
+ return true;
+ ret = mtd_block_isbad(mtd, off);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "mtd_block_isbad failed, aborting\n");
+ return ret;
+ } else if (ret > 0) {
+ set_bit(blknum, cxt->badmap);
+ return true;
+ }
+ return false;
+}
+
+static inline int mtdpstore_panic_block_isbad(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u64 blknum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ blknum = div_u64(off, mtd->erasesize);
+ return test_bit(blknum, cxt->badmap);
+}
+
+static inline void mtdpstore_mark_used(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+
+ dev_dbg(&mtd->dev, "mark zone %llu used\n", zonenum);
+ set_bit(zonenum, cxt->usedmap);
+}
+
+static inline void mtdpstore_mark_unused(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+
+ dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
+ clear_bit(zonenum, cxt->usedmap);
+}
+
+static inline void mtdpstore_block_mark_unused(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+ u64 zonenum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ zonenum = div_u64(off, cxt->info.kmsg_size);
+ while (zonecnt > 0) {
+ dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
+ clear_bit(zonenum, cxt->usedmap);
+ zonenum++;
+ zonecnt--;
+ }
+}
+
+static inline int mtdpstore_is_used(struct mtdpstore_context *cxt, loff_t off)
+{
+ u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+ u64 blknum = div_u64(off, cxt->mtd->erasesize);
+
+ if (test_bit(blknum, cxt->badmap))
+ return true;
+ return test_bit(zonenum, cxt->usedmap);
+}
+
+static int mtdpstore_block_is_used(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+ u64 zonenum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ zonenum = div_u64(off, cxt->info.kmsg_size);
+ while (zonecnt > 0) {
+ if (test_bit(zonenum, cxt->usedmap))
+ return true;
+ zonenum++;
+ zonecnt--;
+ }
+ return false;
+}
+
+static int mtdpstore_is_empty(struct mtdpstore_context *cxt, char *buf,
+ size_t size)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ size_t sz;
+ int i;
+
+ sz = min_t(uint32_t, size, mtd->writesize / 4);
+ for (i = 0; i < sz; i++) {
+ if (buf[i] != (char)0xFF)
+ return false;
+ }
+ return true;
+}
+
+static void mtdpstore_mark_removed(struct mtdpstore_context *cxt, loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+
+ dev_dbg(&mtd->dev, "mark zone %llu removed\n", zonenum);
+ set_bit(zonenum, cxt->rmmap);
+}
+
+static void mtdpstore_block_clear_removed(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+ u64 zonenum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ zonenum = div_u64(off, cxt->info.kmsg_size);
+ while (zonecnt > 0) {
+ clear_bit(zonenum, cxt->rmmap);
+ zonenum++;
+ zonecnt--;
+ }
+}
+
+static int mtdpstore_block_is_removed(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+ u64 zonenum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ zonenum = div_u64(off, cxt->info.kmsg_size);
+ while (zonecnt > 0) {
+ if (test_bit(zonenum, cxt->rmmap))
+ return true;
+ zonenum++;
+ zonecnt--;
+ }
+ return false;
+}
+
+static int mtdpstore_erase_do(struct mtdpstore_context *cxt, loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ struct erase_info erase;
+ int ret;
+
+ off = ALIGN_DOWN(off, cxt->mtd->erasesize);
+ dev_dbg(&mtd->dev, "try to erase off 0x%llx\n", off);
+ erase.len = cxt->mtd->erasesize;
+ erase.addr = off;
+ ret = mtd_erase(cxt->mtd, &erase);
+ if (!ret)
+ mtdpstore_block_clear_removed(cxt, off);
+ else
+ dev_err(&mtd->dev, "erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
+ (unsigned long long)erase.addr,
+ (unsigned long long)erase.len, cxt->info.device);
+ return ret;
+}
+
+/*
+ * called while removing file
+ *
+ * Avoiding over erasing, do erase block only when the whole block is unused.
+ * If the block contains valid log, do erase lazily on flush_removed() when
+ * unregister.
+ */
+static ssize_t mtdpstore_erase(size_t size, loff_t off)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+
+ if (mtdpstore_block_isbad(cxt, off))
+ return -EIO;
+
+ mtdpstore_mark_unused(cxt, off);
+
+ /* If the block still has valid data, mtdpstore do erase lazily */
+ if (likely(mtdpstore_block_is_used(cxt, off))) {
+ mtdpstore_mark_removed(cxt, off);
+ return 0;
+ }
+
+ /* all zones are unused, erase it */
+ return mtdpstore_erase_do(cxt, off);
+}
+
+/*
+ * What is security for mtdpstore?
+ * As there is no erase for panic case, we should ensure at least one zone
+ * is writable. Otherwise, panic write will fail.
+ * If zone is used, write operation will return -ENOMSG, which means that
+ * pstore/blk will try one by one until gets an empty zone. So, it is not
+ * needed to ensure the next zone is empty, but at least one.
+ */
+static int mtdpstore_security(struct mtdpstore_context *cxt, loff_t off)
+{
+ int ret = 0, i;
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonenum = (u32)div_u64(off, cxt->info.kmsg_size);
+ u32 zonecnt = (u32)div_u64(cxt->mtd->size, cxt->info.kmsg_size);
+ u32 blkcnt = (u32)div_u64(cxt->mtd->size, cxt->mtd->erasesize);
+ u32 erasesize = cxt->mtd->erasesize;
+
+ for (i = 0; i < zonecnt; i++) {
+ u32 num = (zonenum + i) % zonecnt;
+
+ /* found empty zone */
+ if (!test_bit(num, cxt->usedmap))
+ return 0;
+ }
+
+ /* If there is no any empty zone, we have no way but to do erase */
+ while (blkcnt--) {
+ div64_u64_rem(off + erasesize, cxt->mtd->size, (u64 *)&off);
+
+ if (mtdpstore_block_isbad(cxt, off))
+ continue;
+
+ ret = mtdpstore_erase_do(cxt, off);
+ if (!ret) {
+ mtdpstore_block_mark_unused(cxt, off);
+ break;
+ }
+ }
+
+ if (ret)
+ dev_err(&mtd->dev, "all blocks bad!\n");
+ dev_dbg(&mtd->dev, "end security\n");
+ return ret;
+}
+
+static ssize_t mtdpstore_write(const char *buf, size_t size, loff_t off)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen;
+ int ret;
+
+ if (mtdpstore_block_isbad(cxt, off))
+ return -ENOMSG;
+
+ /* zone is used, please try next one */
+ if (mtdpstore_is_used(cxt, off))
+ return -ENOMSG;
+
+ dev_dbg(&mtd->dev, "try to write off 0x%llx size %zu\n", off, size);
+ ret = mtd_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
+ if (ret < 0 || retlen != size) {
+ dev_err(&mtd->dev, "write failure at %lld (%zu of %zu written), err %d\n",
+ off, retlen, size, ret);
+ return -EIO;
+ }
+ mtdpstore_mark_used(cxt, off);
+
+ mtdpstore_security(cxt, off);
+ return retlen;
+}
+
+static inline bool mtdpstore_is_io_error(int ret)
+{
+ return ret < 0 && !mtd_is_bitflip(ret) && !mtd_is_eccerr(ret);
+}
+
+/*
+ * All zones will be read as pstore/blk will read zone one by one when do
+ * recover.
+ */
+static ssize_t mtdpstore_read(char *buf, size_t size, loff_t off)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen, done;
+ int ret;
+
+ if (mtdpstore_block_isbad(cxt, off))
+ return -ENOMSG;
+
+ dev_dbg(&mtd->dev, "try to read off 0x%llx size %zu\n", off, size);
+ for (done = 0, retlen = 0; done < size; done += retlen) {
+ retlen = 0;
+
+ ret = mtd_read(cxt->mtd, off + done, size - done, &retlen,
+ (u_char *)buf + done);
+ if (mtdpstore_is_io_error(ret)) {
+ dev_err(&mtd->dev, "read failure at %lld (%zu of %zu read), err %d\n",
+ off + done, retlen, size - done, ret);
+ /* the zone may be broken, try next one */
+ return -ENOMSG;
+ }
+
+ /*
+ * ECC error. The impact on log data is so small. Maybe we can
+ * still read it and try to understand. So mtdpstore just hands
+ * over what it gets and user can judge whether the data is
+ * valid or not.
+ */
+ if (mtd_is_eccerr(ret)) {
+ dev_err(&mtd->dev, "ecc error at %lld (%zu of %zu read), err %d\n",
+ off + done, retlen, size - done, ret);
+ /* driver may not set retlen when ecc error */
+ retlen = retlen == 0 ? size - done : retlen;
+ }
+ }
+
+ if (mtdpstore_is_empty(cxt, buf, size))
+ mtdpstore_mark_unused(cxt, off);
+ else
+ mtdpstore_mark_used(cxt, off);
+
+ mtdpstore_security(cxt, off);
+ return retlen;
+}
+
+static ssize_t mtdpstore_panic_write(const char *buf, size_t size, loff_t off)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen;
+ int ret;
+
+ if (mtdpstore_panic_block_isbad(cxt, off))
+ return -ENOMSG;
+
+ /* zone is used, please try next one */
+ if (mtdpstore_is_used(cxt, off))
+ return -ENOMSG;
+
+ ret = mtd_panic_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
+ if (ret < 0 || size != retlen) {
+ dev_err(&mtd->dev, "panic write failure at %lld (%zu of %zu read), err %d\n",
+ off, retlen, size, ret);
+ return -EIO;
+ }
+ mtdpstore_mark_used(cxt, off);
+
+ return retlen;
+}
+
+static void mtdpstore_notify_add(struct mtd_info *mtd)
+{
+ int ret;
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct pstore_blk_config *info = &cxt->info;
+ unsigned long longcnt;
+
+ if (!strcmp(mtd->name, info->device))
+ cxt->index = mtd->index;
+
+ if (mtd->index != cxt->index || cxt->index < 0)
+ return;
+
+ dev_dbg(&mtd->dev, "found matching MTD device %s\n", mtd->name);
+
+ if (mtd->size < info->kmsg_size * 2) {
+ dev_err(&mtd->dev, "MTD partition %d not big enough\n",
+ mtd->index);
+ return;
+ }
+ /*
+ * kmsg_size must be aligned to 4096 Bytes, which is limited by
+ * psblk. The default value of kmsg_size is 64KB. If kmsg_size
+ * is larger than erasesize, some errors will occur since mtdpsotre
+ * is designed on it.
+ */
+ if (mtd->erasesize < info->kmsg_size) {
+ dev_err(&mtd->dev, "eraseblock size of MTD partition %d too small\n",
+ mtd->index);
+ return;
+ }
+ if (unlikely(info->kmsg_size % mtd->writesize)) {
+ dev_err(&mtd->dev, "record size %lu KB must align to write size %d KB\n",
+ info->kmsg_size / 1024,
+ mtd->writesize / 1024);
+ return;
+ }
+
+ longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
+ cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+
+ longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
+ cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+
+ cxt->dev.total_size = mtd->size;
+ /* just support dmesg right now */
+ cxt->dev.flags = PSTORE_FLAGS_DMESG;
+ cxt->dev.read = mtdpstore_read;
+ cxt->dev.write = mtdpstore_write;
+ cxt->dev.erase = mtdpstore_erase;
+ cxt->dev.panic_write = mtdpstore_panic_write;
+
+ ret = register_pstore_device(&cxt->dev);
+ if (ret) {
+ dev_err(&mtd->dev, "mtd%d register to psblk failed\n",
+ mtd->index);
+ return;
+ }
+ cxt->mtd = mtd;
+ dev_info(&mtd->dev, "Attached to MTD device %d\n", mtd->index);
+}
+
+static int mtdpstore_flush_removed_do(struct mtdpstore_context *cxt,
+ loff_t off, size_t size)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u_char *buf;
+ int ret;
+ size_t retlen;
+ struct erase_info erase;
+
+ buf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* 1st. read to cache */
+ ret = mtd_read(mtd, off, mtd->erasesize, &retlen, buf);
+ if (mtdpstore_is_io_error(ret))
+ goto free;
+
+ /* 2nd. erase block */
+ erase.len = mtd->erasesize;
+ erase.addr = off;
+ ret = mtd_erase(mtd, &erase);
+ if (ret)
+ goto free;
+
+ /* 3rd. write back */
+ while (size) {
+ unsigned int zonesize = cxt->info.kmsg_size;
+
+ /* there is valid data on block, write back */
+ if (mtdpstore_is_used(cxt, off)) {
+ ret = mtd_write(mtd, off, zonesize, &retlen, buf);
+ if (ret)
+ dev_err(&mtd->dev, "write failure at %lld (%zu of %u written), err %d\n",
+ off, retlen, zonesize, ret);
+ }
+
+ off += zonesize;
+ size -= min_t(unsigned int, zonesize, size);
+ }
+
+free:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * What does mtdpstore_flush_removed() do?
+ * When user remove any log file on pstore filesystem, mtdpstore should do
+ * something to ensure log file removed. If the whole block is no longer used,
+ * it's nice to erase the block. However if the block still contains valid log,
+ * what mtdpstore can do is to erase and write the valid log back.
+ */
+static int mtdpstore_flush_removed(struct mtdpstore_context *cxt)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ int ret;
+ loff_t off;
+ u32 blkcnt = (u32)div_u64(mtd->size, mtd->erasesize);
+
+ for (off = 0; blkcnt > 0; blkcnt--, off += mtd->erasesize) {
+ ret = mtdpstore_block_isbad(cxt, off);
+ if (ret)
+ continue;
+
+ ret = mtdpstore_block_is_removed(cxt, off);
+ if (!ret)
+ continue;
+
+ ret = mtdpstore_flush_removed_do(cxt, off, mtd->erasesize);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void mtdpstore_notify_remove(struct mtd_info *mtd)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+
+ if (mtd->index != cxt->index || cxt->index < 0)
+ return;
+
+ mtdpstore_flush_removed(cxt);
+
+ unregister_pstore_device(&cxt->dev);
+ kfree(cxt->badmap);
+ kfree(cxt->usedmap);
+ kfree(cxt->rmmap);
+ cxt->mtd = NULL;
+ cxt->index = -1;
+}
+
+static struct mtd_notifier mtdpstore_notifier = {
+ .add = mtdpstore_notify_add,
+ .remove = mtdpstore_notify_remove,
+};
+
+static int __init mtdpstore_init(void)
+{
+ int ret;
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct pstore_blk_config *info = &cxt->info;
+
+ ret = pstore_blk_get_config(info);
+ if (unlikely(ret))
+ return ret;
+
+ if (strlen(info->device) == 0) {
+ pr_err("mtd device must be supplied (device name is empty)\n");
+ return -EINVAL;
+ }
+ if (!info->kmsg_size) {
+ pr_err("no backend enabled (kmsg_size is 0)\n");
+ return -EINVAL;
+ }
+
+ /* Setup the MTD device to use */
+ ret = kstrtoint((char *)info->device, 0, &cxt->index);
+ if (ret)
+ cxt->index = -1;
+
+ register_mtd_user(&mtdpstore_notifier);
+ return 0;
+}
+module_init(mtdpstore_init);
+
+static void __exit mtdpstore_exit(void)
+{
+ unregister_mtd_user(&mtdpstore_notifier);
+}
+module_exit(mtdpstore_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
+MODULE_DESCRIPTION("MTD backend for pstore/blk");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index e4e3ceeac38f..8f9ffb46a09f 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2728,9 +2728,8 @@ static int brcmnand_resume(struct device *dev)
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
}
- if (has_edu(ctrl))
+ if (has_edu(ctrl)) {
ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
- else {
edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
edu_readl(ctrl, EDU_CONFIG);
brcmnand_edu_init(ctrl);
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index b6bb358b96ce..e2c382ffc5b6 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -1089,6 +1089,10 @@ static int spinand_init(struct spinand_device *spinand)
mtd->oobavail = ret;
+ /* Propagate ECC information to mtd_info */
+ mtd->ecc_strength = nand->eccreq.strength;
+ mtd->ecc_step_size = nand->eccreq.step_size;
+
return 0;
err_cleanup_nanddev:
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 54646c2c2744..ac2bdba8bb1a 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -393,9 +393,6 @@ static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos)
{
struct ubi_device *ubi = s->private;
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
if (*pos < ubi->peb_count)
return pos;
@@ -409,8 +406,6 @@ static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct ubi_device *ubi = s->private;
- if (v == SEQ_START_TOKEN)
- return pos;
(*pos)++;
if (*pos < ubi->peb_count)
@@ -432,11 +427,8 @@ static int eraseblk_count_seq_show(struct seq_file *s, void *iter)
int err;
/* If this is the start, print a header */
- if (iter == SEQ_START_TOKEN) {
- seq_puts(s,
- "physical_block_number\terase_count\tblock_status\tread_status\n");
- return 0;
- }
+ if (*block_number == 0)
+ seq_puts(s, "physical_block_number\terase_count\n");
err = ubi_io_is_bad(ubi, *block_number);
if (err)
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index b57b84fb97d0..14d890b00d2c 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1297,7 +1297,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
if (!ubi_dbg_chk_io(ubi))
return 0;
- buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+ buf1 = __vmalloc(len, GFP_NOFS);
if (!buf1) {
ubi_err(ubi, "cannot allocate memory to check writes");
return 0;
@@ -1361,7 +1361,7 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
if (!ubi_dbg_chk_io(ubi))
return 0;
- buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+ buf = __vmalloc(len, GFP_NOFS);
if (!buf) {
ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
return 0;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b103fbdd0f68..c7d310ef1c83 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -50,7 +50,7 @@ config BONDING
The driver supports multiple bonding modes to allow for both high
performance and high availability operation.
- Refer to <file:Documentation/networking/bonding.txt> for more
+ Refer to <file:Documentation/networking/bonding.rst> for more
information.
To compile this driver as a module, choose M here: the module
@@ -126,7 +126,7 @@ config EQUALIZER
Linux driver or with a Livingston Portmaster 2e.
Say Y if you want this and read
- <file:Documentation/networking/eql.txt>. You may also want to read
+ <file:Documentation/networking/eql.rst>. You may also want to read
section 6.2 of the NET-3-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
@@ -302,7 +302,7 @@ config NETCONSOLE
tristate "Network console logging support"
---help---
If you want to log kernel messages over the network, enable this.
- See <file:Documentation/networking/netconsole.txt> for details.
+ See <file:Documentation/networking/netconsole.rst> for details.
config NETCONSOLE_DYNAMIC
bool "Dynamic reconfiguration of logging targets"
@@ -312,7 +312,7 @@ config NETCONSOLE_DYNAMIC
This option enables the ability to dynamically reconfigure target
parameters (interface, IP addresses, port numbers, MAC addresses)
at runtime through a userspace interface exported using configfs.
- See <file:Documentation/networking/netconsole.txt> for details.
+ See <file:Documentation/networking/netconsole.rst> for details.
config NETPOLL
def_bool NETCONSOLE
@@ -355,7 +355,7 @@ config TUN
devices, driver will automatically delete tunXX or tapXX device and
all routes corresponding to it.
- Please read <file:Documentation/networking/tuntap.txt> for more
+ Please read <file:Documentation/networking/tuntap.rst> for more
information.
To compile this driver as a module, choose M here: the module
@@ -460,7 +460,7 @@ config NET_SB1000
At present this driver only compiles as a module, so say M here if
you have this card. The module will be called sb1000. Then read
- <file:Documentation/networking/device_drivers/sb1000.txt> for
+ <file:Documentation/networking/device_drivers/sb1000.rst> for
information on how to use this module, as it needs special ppp
scripts for establishing a connection. Further documentation
and the necessary scripts can be found at:
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index af509b05ac5c..10589a82263b 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -48,7 +48,7 @@ config LTPC
If you are in doubt, this card is the one with the 65C02 chip on it.
You also need version 1.3.3 or later of the netatalk package.
This driver is experimental, which means that it may not work.
- See the file <file:Documentation/networking/ltpc.txt>.
+ See the file <file:Documentation/networking/ltpc.rst>.
config COPS
tristate "COPS LocalTalk PC support"
@@ -59,7 +59,7 @@ config COPS
package. This driver is experimental, which means that it may not
work. This driver will only work if you choose "AppleTalk DDP"
networking support, above.
- Please read the file <file:Documentation/networking/cops.txt>.
+ Please read the file <file:Documentation/networking/cops.rst>.
config COPS_DAYNA
bool "Dayna firmware support"
@@ -86,7 +86,7 @@ config IPDDP
box is stuck on an AppleTalk only network) or decapsulate (e.g. if
you want your Linux box to act as an Internet gateway for a zoo of
AppleTalk connected Macs). Please see the file
- <file:Documentation/networking/ipddp.txt> for more information.
+ <file:Documentation/networking/ipddp.rst> for more information.
If you say Y here, the AppleTalk-IP support will be compiled into
the kernel. In this case, you can either use encapsulation or
@@ -107,4 +107,4 @@ config IPDDP_ENCAP
IP packets inside AppleTalk frames; this is useful if your Linux box
is stuck on an AppleTalk network (which hopefully contains a
decapsulator somewhere). Please see
- <file:Documentation/networking/ipddp.txt> for more information.
+ <file:Documentation/networking/ipddp.rst> for more information.
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index 27551bf3d7e4..43eef60653b2 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -9,7 +9,7 @@ menuconfig ARCNET
---help---
If you have a network card of this type, say Y and check out the
(arguably) beautiful poetry in
- <file:Documentation/networking/arcnet.txt>.
+ <file:Documentation/networking/arcnet.rst>.
You need both this driver, and the driver for the particular ARCnet
chipset of your card. If you don't know, then it's probably a
@@ -28,7 +28,7 @@ config ARCNET_1201
arc0 device. You need to say Y here to communicate with
industry-standard RFC1201 implementations, like the arcether.com
packet driver or most DOS/Windows ODI drivers. Please read the
- ARCnet documentation in <file:Documentation/networking/arcnet.txt>
+ ARCnet documentation in <file:Documentation/networking/arcnet.rst>
for more information about using arc0.
config ARCNET_1051
@@ -42,7 +42,7 @@ config ARCNET_1051
industry-standard RFC1201 implementations, like the arcether.com
packet driver or most DOS/Windows ODI drivers. RFC1201 is included
automatically as the arc0 device. Please read the ARCnet
- documentation in <file:Documentation/networking/arcnet.txt> for more
+ documentation in <file:Documentation/networking/arcnet.rst> for more
information about using arc0e and arc0s.
config ARCNET_RAW
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index cc0703c3d57f..efd1a1d1f35e 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -136,25 +136,21 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
- if (family == AF_INET)
+ if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
-#if IS_ENABLED(CONFIG_IPV6)
else
err = IP6_ECN_decapsulate(oiph, skb);
-#endif
if (unlikely(err)) {
if (log_ecn_error) {
- if (family == AF_INET)
+ if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
net_info_ratelimited("non-ECT from %pI4 "
"with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
((struct iphdr *)oiph)->tos);
-#if IS_ENABLED(CONFIG_IPV6)
else
net_info_ratelimited("non-ECT from %pI6\n",
&((struct ipv6hdr *)oiph)->saddr);
-#endif
}
if (err > 1) {
++bareudp->dev->stats.rx_frame_errors;
@@ -350,7 +346,6 @@ free_dst:
return err;
}
-#if IS_ENABLED(CONFIG_IPV6)
static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
@@ -411,7 +406,6 @@ free_dst:
dst_release(dst);
return err;
}
-#endif
static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -435,11 +429,9 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
}
rcu_read_lock();
-#if IS_ENABLED(CONFIG_IPV6)
- if (info->mode & IP_TUNNEL_INFO_IPV6)
+ if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
err = bareudp6_xmit_skb(skb, dev, bareudp, info);
else
-#endif
err = bareudp_xmit_skb(skb, dev, bareudp, info);
rcu_read_unlock();
@@ -467,7 +459,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
- if (ip_tunnel_info_af(info) == AF_INET) {
+ if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
__be32 saddr;
@@ -478,7 +470,6 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
ip_rt_put(rt);
info->key.u.ipv4.src = saddr;
-#if IS_ENABLED(CONFIG_IPV6)
} else if (ip_tunnel_info_af(info) == AF_INET6) {
struct dst_entry *dst;
struct in6_addr saddr;
@@ -492,7 +483,6 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
dst_release(dst);
info->key.u.ipv6.src = saddr;
-#endif
} else {
return -EINVAL;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c81698550e5a..095ea51d1853 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1318,8 +1318,7 @@ static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
tx_slave->dev->dev_addr);
}
- bond_dev_queue_xmit(bond, skb, tx_slave->dev);
- goto out;
+ return bond_dev_queue_xmit(bond, skb, tx_slave->dev);
}
if (tx_slave && bond->params.tlb_dynamic_lb) {
@@ -1329,16 +1328,14 @@ static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
}
/* no suitable interface, frame not sent */
- bond_tx_drop(bond->dev, skb);
-out:
- return NETDEV_TX_OK;
+ return bond_tx_drop(bond->dev, skb);
}
-netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
+ struct sk_buff *skb)
{
- struct bonding *bond = netdev_priv(bond_dev);
- struct ethhdr *eth_data;
struct slave *tx_slave = NULL;
+ struct ethhdr *eth_data;
u32 hash_index;
skb_reset_mac_header(skb);
@@ -1360,7 +1357,7 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves;
unsigned int count;
- slaves = rcu_dereference(bond->slave_arr);
+ slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
@@ -1369,20 +1366,29 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
}
- return bond_do_alb_xmit(skb, bond, tx_slave);
+ return tx_slave;
}
-netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct ethhdr *eth_data;
+ struct slave *tx_slave;
+
+ tx_slave = bond_xmit_tlb_slave_get(bond, skb);
+ return bond_do_alb_xmit(skb, bond, tx_slave);
+}
+
+struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
+ struct sk_buff *skb)
+{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- struct slave *tx_slave = NULL;
static const __be32 ip_bcast = htonl(0xffffffff);
- int hash_size = 0;
+ struct slave *tx_slave = NULL;
+ const u8 *hash_start = NULL;
bool do_tx_balance = true;
+ struct ethhdr *eth_data;
u32 hash_index = 0;
- const u8 *hash_start = NULL;
+ int hash_size = 0;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
@@ -1494,14 +1500,22 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves;
unsigned int count;
- slaves = rcu_dereference(bond->slave_arr);
+ slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
count];
}
}
+ return tx_slave;
+}
+
+netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *tx_slave = NULL;
+ tx_slave = bond_xmit_alb_slave_get(bond, skb);
return bond_do_alb_xmit(skb, bond, tx_slave);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2e70e43c5df5..a25c65d4af71 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -287,7 +287,7 @@ const char *bond_mode_name(int mode)
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
*/
-void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
+netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
@@ -297,9 +297,9 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
if (unlikely(netpoll_tx_running(bond->dev)))
- bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
- else
- dev_queue_xmit(skb);
+ return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
+
+ return dev_queue_xmit(skb);
}
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
@@ -3923,16 +3923,15 @@ unwind:
}
/**
- * bond_xmit_slave_id - transmit skb through slave with slave_id
+ * bond_get_slave_by_id - get xmit slave with slave_id
* @bond: bonding device that is transmitting
- * @skb: buffer to transmit
* @slave_id: slave id up to slave_cnt-1 through which to transmit
*
- * This function tries to transmit through slave with slave_id but in case
+ * This function tries to get slave with slave_id but in case
* it fails, it tries to find the first available slave for transmission.
- * The skb is consumed in all cases, thus the function is void.
*/
-static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+static struct slave *bond_get_slave_by_id(struct bonding *bond,
+ int slave_id)
{
struct list_head *iter;
struct slave *slave;
@@ -3941,10 +3940,8 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
/* Here we start from the slave with slave_id */
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
- if (bond_slave_can_tx(slave)) {
- bond_dev_queue_xmit(bond, skb, slave->dev);
- return;
- }
+ if (bond_slave_can_tx(slave))
+ return slave;
}
}
@@ -3953,13 +3950,11 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
- if (bond_slave_can_tx(slave)) {
- bond_dev_queue_xmit(bond, skb, slave->dev);
- return;
- }
+ if (bond_slave_can_tx(slave))
+ return slave;
}
/* no slave that can tx has been found */
- bond_tx_drop(bond->dev, skb);
+ return NULL;
}
/**
@@ -3995,10 +3990,9 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
return slave_id;
}
-static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
- struct net_device *bond_dev)
+static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
+ struct sk_buff *skb)
{
- struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
int slave_cnt;
u32 slave_id;
@@ -4020,22 +4014,37 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
if (iph->protocol == IPPROTO_IGMP) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_xmit_slave_id(bond, skb, 0);
- return NETDEV_TX_OK;
+ return slave;
+ return bond_get_slave_by_id(bond, 0);
}
}
non_igmp:
slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
- slave_id = bond_rr_gen_slave_id(bond);
- bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
- } else {
- bond_tx_drop(bond_dev, skb);
+ slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
+ return bond_get_slave_by_id(bond, slave_id);
}
- return NETDEV_TX_OK;
+ return NULL;
+}
+
+static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
+ struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+
+ slave = bond_xmit_roundrobin_slave_get(bond, skb);
+ if (likely(slave))
+ return bond_dev_queue_xmit(bond, skb, slave->dev);
+
+ return bond_tx_drop(bond_dev, skb);
+}
+
+static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond,
+ struct sk_buff *skb)
+{
+ return rcu_dereference(bond->curr_active_slave);
}
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
@@ -4047,13 +4056,11 @@ static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- slave = rcu_dereference(bond->curr_active_slave);
+ slave = bond_xmit_activebackup_slave_get(bond, skb);
if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_tx_drop(bond_dev, skb);
+ return bond_dev_queue_xmit(bond, skb, slave->dev);
- return NETDEV_TX_OK;
+ return bond_tx_drop(bond_dev, skb);
}
/* Use this to update slave_array when (a) it's not appropriate to update
@@ -4087,6 +4094,61 @@ err:
bond_slave_arr_work_rearm(bond, 1);
}
+static void bond_skip_slave(struct bond_up_slave *slaves,
+ struct slave *skipslave)
+{
+ int idx;
+
+ /* Rare situation where caller has asked to skip a specific
+ * slave but allocation failed (most likely!). BTW this is
+ * only possible when the call is initiated from
+ * __bond_release_one(). In this situation; overwrite the
+ * skipslave entry in the array with the last entry from the
+ * array to avoid a situation where the xmit path may choose
+ * this to-be-skipped slave to send a packet out.
+ */
+ for (idx = 0; slaves && idx < slaves->count; idx++) {
+ if (skipslave == slaves->arr[idx]) {
+ slaves->arr[idx] =
+ slaves->arr[slaves->count - 1];
+ slaves->count--;
+ break;
+ }
+ }
+}
+
+static void bond_set_slave_arr(struct bonding *bond,
+ struct bond_up_slave *usable_slaves,
+ struct bond_up_slave *all_slaves)
+{
+ struct bond_up_slave *usable, *all;
+
+ usable = rtnl_dereference(bond->usable_slaves);
+ rcu_assign_pointer(bond->usable_slaves, usable_slaves);
+ kfree_rcu(usable, rcu);
+
+ all = rtnl_dereference(bond->all_slaves);
+ rcu_assign_pointer(bond->all_slaves, all_slaves);
+ kfree_rcu(all, rcu);
+}
+
+static void bond_reset_slave_arr(struct bonding *bond)
+{
+ struct bond_up_slave *usable, *all;
+
+ usable = rtnl_dereference(bond->usable_slaves);
+ if (usable) {
+ RCU_INIT_POINTER(bond->usable_slaves, NULL);
+ kfree_rcu(usable, rcu);
+ }
+
+ all = rtnl_dereference(bond->all_slaves);
+ if (all) {
+ RCU_INIT_POINTER(bond->all_slaves, NULL);
+ kfree_rcu(all, rcu);
+ }
+}
+
/* Build the usable slaves array in control path for modes that use xmit-hash
* to determine the slave interface -
* (a) BOND_MODE_8023AD
@@ -4097,9 +4159,9 @@ err:
*/
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
+ struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
struct slave *slave;
struct list_head *iter;
- struct bond_up_slave *new_arr, *old_arr;
int agg_id = 0;
int ret = 0;
@@ -4107,11 +4169,12 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif
- new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
- GFP_KERNEL);
- if (!new_arr) {
+ usable_slaves = kzalloc(struct_size(usable_slaves, arr,
+ bond->slave_cnt), GFP_KERNEL);
+ all_slaves = kzalloc(struct_size(all_slaves, arr,
+ bond->slave_cnt), GFP_KERNEL);
+ if (!usable_slaves || !all_slaves) {
ret = -ENOMEM;
- pr_err("Failed to build slave-array.\n");
goto out;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -4119,20 +4182,19 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("bond_3ad_get_active_agg_info failed\n");
- kfree_rcu(new_arr, rcu);
/* No active aggragator means it's not safe to use
* the previous array.
*/
- old_arr = rtnl_dereference(bond->slave_arr);
- if (old_arr) {
- RCU_INIT_POINTER(bond->slave_arr, NULL);
- kfree_rcu(old_arr, rcu);
- }
+ bond_reset_slave_arr(bond);
goto out;
}
agg_id = ad_info.aggregator_id;
}
bond_for_each_slave(bond, slave, iter) {
+ if (skipslave == slave)
+ continue;
+
+ all_slaves->arr[all_slaves->count++] = slave;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct aggregator *agg;
@@ -4142,44 +4204,45 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
}
if (!bond_slave_can_tx(slave))
continue;
- if (skipslave == slave)
- continue;
slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
- new_arr->count);
+ usable_slaves->count);
- new_arr->arr[new_arr->count++] = slave;
+ usable_slaves->arr[usable_slaves->count++] = slave;
}
- old_arr = rtnl_dereference(bond->slave_arr);
- rcu_assign_pointer(bond->slave_arr, new_arr);
- if (old_arr)
- kfree_rcu(old_arr, rcu);
+ bond_set_slave_arr(bond, usable_slaves, all_slaves);
+ return ret;
out:
if (ret != 0 && skipslave) {
- int idx;
-
- /* Rare situation where caller has asked to skip a specific
- * slave but allocation failed (most likely!). BTW this is
- * only possible when the call is initiated from
- * __bond_release_one(). In this situation; overwrite the
- * skipslave entry in the array with the last entry from the
- * array to avoid a situation where the xmit path may choose
- * this to-be-skipped slave to send a packet out.
- */
- old_arr = rtnl_dereference(bond->slave_arr);
- for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
- if (skipslave == old_arr->arr[idx]) {
- old_arr->arr[idx] =
- old_arr->arr[old_arr->count-1];
- old_arr->count--;
- break;
- }
- }
+ bond_skip_slave(rtnl_dereference(bond->all_slaves),
+ skipslave);
+ bond_skip_slave(rtnl_dereference(bond->usable_slaves),
+ skipslave);
}
+ kfree_rcu(all_slaves, rcu);
+ kfree_rcu(usable_slaves, rcu);
+
return ret;
}
+static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
+ struct sk_buff *skb,
+ struct bond_up_slave *slaves)
+{
+ struct slave *slave;
+ unsigned int count;
+ u32 hash;
+
+ hash = bond_xmit_hash(bond, skb);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (unlikely(!count))
+ return NULL;
+
+ slave = slaves->arr[hash % count];
+ return slave;
+}
+
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
@@ -4188,20 +4251,15 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
- struct slave *slave;
struct bond_up_slave *slaves;
- unsigned int count;
+ struct slave *slave;
- slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? READ_ONCE(slaves->count) : 0;
- if (likely(count)) {
- slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
- bond_dev_queue_xmit(bond, skb, slave->dev);
- } else {
- bond_tx_drop(dev, skb);
- }
+ slaves = rcu_dereference(bond->usable_slaves);
+ slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
+ if (likely(slave))
+ return bond_dev_queue_xmit(bond, skb, slave->dev);
- return NETDEV_TX_OK;
+ return bond_tx_drop(dev, skb);
}
/* in broadcast mode, we send everything to all usable interfaces. */
@@ -4227,11 +4285,9 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
}
}
if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_tx_drop(bond_dev, skb);
+ return bond_dev_queue_xmit(bond, skb, slave->dev);
- return NETDEV_TX_OK;
+ return bond_tx_drop(bond_dev, skb);
}
/*------------------------- Device initialization ---------------------------*/
@@ -4284,6 +4340,48 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
+static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
+ struct sk_buff *skb,
+ bool all_slaves)
+{
+ struct bonding *bond = netdev_priv(master_dev);
+ struct bond_up_slave *slaves;
+ struct slave *slave = NULL;
+
+ switch (BOND_MODE(bond)) {
+ case BOND_MODE_ROUNDROBIN:
+ slave = bond_xmit_roundrobin_slave_get(bond, skb);
+ break;
+ case BOND_MODE_ACTIVEBACKUP:
+ slave = bond_xmit_activebackup_slave_get(bond, skb);
+ break;
+ case BOND_MODE_8023AD:
+ case BOND_MODE_XOR:
+ if (all_slaves)
+ slaves = rcu_dereference(bond->all_slaves);
+ else
+ slaves = rcu_dereference(bond->usable_slaves);
+ slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
+ break;
+ case BOND_MODE_BROADCAST:
+ break;
+ case BOND_MODE_ALB:
+ slave = bond_xmit_alb_slave_get(bond, skb);
+ break;
+ case BOND_MODE_TLB:
+ slave = bond_xmit_tlb_slave_get(bond, skb);
+ break;
+ default:
+ /* Should never happen, mode already checked */
+ WARN_ONCE(true, "Unknown bonding mode");
+ break;
+ }
+
+ if (slave)
+ return slave->dev;
+ return NULL;
+}
+
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
@@ -4310,8 +4408,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
/* Should never happen, mode already checked */
netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
WARN_ON_ONCE(1);
- bond_tx_drop(dev, skb);
- return NETDEV_TX_OK;
+ return bond_tx_drop(dev, skb);
}
}
@@ -4330,7 +4427,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
- bond_tx_drop(dev, skb);
+ ret = bond_tx_drop(dev, skb);
rcu_read_unlock();
return ret;
@@ -4405,6 +4502,7 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
.ndo_features_check = passthru_features_check,
+ .ndo_get_xmit_slave = bond_xmit_get_slave,
};
static const struct device_type bond_type = {
@@ -4472,9 +4570,9 @@ void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct bond_up_slave *usable, *all;
struct list_head *iter;
struct slave *slave;
- struct bond_up_slave *arr;
bond_netpoll_cleanup(bond_dev);
@@ -4483,15 +4581,20 @@ static void bond_uninit(struct net_device *bond_dev)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
- arr = rtnl_dereference(bond->slave_arr);
- if (arr) {
- RCU_INIT_POINTER(bond->slave_arr, NULL);
- kfree_rcu(arr, rcu);
+ usable = rtnl_dereference(bond->usable_slaves);
+ if (usable) {
+ RCU_INIT_POINTER(bond->usable_slaves, NULL);
+ kfree_rcu(usable, rcu);
+ }
+
+ all = rtnl_dereference(bond->all_slaves);
+ if (all) {
+ RCU_INIT_POINTER(bond->all_slaves, NULL);
+ kfree_rcu(all, rcu);
}
list_del(&bond->bond_list);
- lockdep_unregister_key(&bond->stats_lock_key);
bond_debug_unregister(bond);
}
@@ -4896,8 +4999,7 @@ static int bond_init(struct net_device *bond_dev)
return -ENOMEM;
spin_lock_init(&bond->stats_lock);
- lockdep_register_key(&bond->stats_lock_key);
- lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
+ netdev_lockdep_set_classes(bond_dev);
list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 007481557191..9b8346638f69 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -149,8 +149,10 @@ int bond_sysfs_slave_add(struct slave *slave)
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
&(slave->dev->dev.kobj), "bonding_slave");
- if (err)
+ if (err) {
+ kobject_put(&slave->kobj);
return err;
+ }
for (a = slave_attrs; *a; ++a) {
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
index 45b77bc8c7b3..48cdf3a49a7d 100644
--- a/drivers/net/bonding/bonding_priv.h
+++ b/drivers/net/bonding/bonding_priv.h
@@ -14,7 +14,7 @@
#ifndef _BONDING_PRIV_H
#define _BONDING_PRIV_H
-#include <linux/vermagic.h>
+#include <generated/utsrelease.h>
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 661c25eb1c46..1538ad194cf4 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -28,7 +28,7 @@ config CAIF_SPI_SLAVE
The CAIF Link layer SPI Protocol driver for Slave SPI interface.
This driver implements a platform driver to accommodate for a
platform specific SPI device. A sample CAIF SPI Platform device is
- provided in <file:Documentation/networking/caif/spi_porting.txt>.
+ provided in <file:Documentation/networking/caif/spi_porting.rst>.
config CAIF_SPI_SYNC
bool "Next command and length in start of frame"
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 04d59bede5ea..74503cacf594 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -947,8 +947,11 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
u32 id, rev;
addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
irq = platform_get_irq(pdev, 0);
- if (IS_ERR(addr) || irq < 0)
+ if (irq < 0)
return -EINVAL;
id = readl(addr + IFI_CANFD_IP_ID);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index e3ba8ab0cbf4..e2c6cf4b2228 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -792,7 +792,7 @@ static int sun4ican_probe(struct platform_device *pdev)
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
- err = -EBUSY;
+ err = PTR_ERR(addr);
goto exit;
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index c283593bef17..1df05841ab6b 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1325,7 +1325,6 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
u16 pvid, new_pvid;
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
- new_pvid = pvid;
if (!vlan_filtering) {
/* Filtering is currently enabled, use the default PVID since
* the bridge does not expect tagging anymore
@@ -1484,8 +1483,7 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
}
static int b53_arl_read(struct b53_device *dev, u64 mac,
- u16 vid, struct b53_arl_entry *ent, u8 *idx,
- bool is_valid)
+ u16 vid, struct b53_arl_entry *ent, u8 *idx)
{
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
unsigned int i;
@@ -1495,10 +1493,10 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
if (ret)
return ret;
- bitmap_zero(free_bins, dev->num_arl_entries);
+ bitmap_zero(free_bins, dev->num_arl_bins);
/* Read the bins */
- for (i = 0; i < dev->num_arl_entries; i++) {
+ for (i = 0; i < dev->num_arl_bins; i++) {
u64 mac_vid;
u32 fwd_entry;
@@ -1521,10 +1519,10 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
return 0;
}
- if (bitmap_weight(free_bins, dev->num_arl_entries) == 0)
+ if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
return -ENOSPC;
- *idx = find_first_bit(free_bins, dev->num_arl_entries);
+ *idx = find_first_bit(free_bins, dev->num_arl_bins);
return -ENOENT;
}
@@ -1550,7 +1548,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
if (ret)
return ret;
- ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
+ ret = b53_arl_read(dev, mac, vid, &ent, &idx);
+
/* If this is a read, just finish now */
if (op)
return ret;
@@ -1692,7 +1691,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
if (ret)
return ret;
- if (priv->num_arl_entries > 2) {
+ if (priv->num_arl_bins > 2) {
b53_arl_search_rd(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
@@ -1702,7 +1701,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
break;
}
- } while (count++ < 1024);
+ } while (count++ < b53_max_arl_entries(priv) / 2);
return 0;
}
@@ -2185,7 +2184,8 @@ struct b53_chip_data {
u16 enabled_ports;
u8 cpu_port;
u8 vta_regs[3];
- u8 arl_entries;
+ u8 arl_bins;
+ u16 arl_buckets;
u8 duplex_reg;
u8 jumbo_pm_reg;
u8 jumbo_size_reg;
@@ -2204,7 +2204,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5325",
.vlans = 16,
.enabled_ports = 0x1f,
- .arl_entries = 2,
+ .arl_bins = 2,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
@@ -2213,7 +2214,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5365",
.vlans = 256,
.enabled_ports = 0x1f,
- .arl_entries = 2,
+ .arl_bins = 2,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
@@ -2222,7 +2224,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5389",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2234,7 +2237,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5395",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2246,7 +2250,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5397",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2258,7 +2263,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM5398",
.vlans = 4096,
.enabled_ports = 0x7f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2270,7 +2276,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53115",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.vta_regs = B53_VTA_REGS,
.cpu_port = B53_CPU_PORT,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2282,7 +2289,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53125",
.vlans = 4096,
.enabled_ports = 0xff,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2294,7 +2302,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53128",
.vlans = 4096,
.enabled_ports = 0x1ff,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2306,7 +2315,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM63xx",
.vlans = 4096,
.enabled_ports = 0, /* pdata must provide them */
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
@@ -2318,7 +2328,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53010",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2330,7 +2341,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53011",
.vlans = 4096,
.enabled_ports = 0x1bf,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2342,7 +2354,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53012",
.vlans = 4096,
.enabled_ports = 0x1bf,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2354,7 +2367,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53018",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2366,7 +2380,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53019",
.vlans = 4096,
.enabled_ports = 0x1f,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2378,7 +2393,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM585xx/586xx/88312",
.vlans = 4096,
.enabled_ports = 0x1ff,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2390,7 +2406,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM583xx/11360",
.vlans = 4096,
.enabled_ports = 0x103,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2402,7 +2419,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM7445",
.vlans = 4096,
.enabled_ports = 0x1ff,
- .arl_entries = 4,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2414,7 +2432,8 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM7278",
.vlans = 4096,
.enabled_ports = 0x1ff,
- .arl_entries= 4,
+ .arl_bins = 4,
+ .arl_buckets = 256,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
@@ -2442,7 +2461,8 @@ static int b53_switch_init(struct b53_device *dev)
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
dev->cpu_port = chip->cpu_port;
dev->num_vlans = chip->vlans;
- dev->num_arl_entries = chip->arl_entries;
+ dev->num_arl_bins = chip->arl_bins;
+ dev->num_arl_buckets = chip->arl_buckets;
break;
}
}
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 3d42318bc3f1..e942c60e4365 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -117,7 +117,8 @@ struct b53_device {
u8 jumbo_pm_reg;
u8 jumbo_size_reg;
int reset_gpio;
- u8 num_arl_entries;
+ u8 num_arl_bins;
+ u16 num_arl_buckets;
enum dsa_tag_protocol tag_protocol;
/* used ports mask */
@@ -212,6 +213,11 @@ static inline int is58xx(struct b53_device *dev)
#define B53_CPU_PORT_25 5
#define B53_CPU_PORT 8
+static inline unsigned int b53_max_arl_entries(struct b53_device *dev)
+{
+ return dev->num_arl_buckets * dev->num_arl_bins;
+}
+
struct b53_device *b53_switch_alloc(struct device *base,
const struct b53_io_ops *ops,
void *priv);
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 0a1be5259be0..aaa12d73784e 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -524,7 +524,7 @@ static void b53_srab_prepare_irq(struct platform_device *pdev)
port->num = i;
port->dev = dev;
- port->irq = platform_get_irq_byname(pdev, name);
+ port->irq = platform_get_irq_byname_optional(pdev, name);
kfree(name);
}
@@ -609,7 +609,7 @@ static int b53_srab_probe(struct platform_device *pdev)
priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
- return -ENOMEM;
+ return PTR_ERR(priv->regs);
dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv);
if (!dev)
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index fdcb70b9f0e4..400207c5c7de 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -360,6 +360,7 @@ static void __exit dsa_loop_exit(void)
}
module_exit(dsa_loop_exit);
+MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 5c444cd722bd..8dcb8a49ab67 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -628,11 +628,8 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
- /* Disable auto learning on the cpu port */
- mt7530_set(priv, MT7530_PSC_P(port), SA_DIS);
-
- /* Unknown unicast frame fordwarding to the cpu port */
- mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port)));
+ /* Unknown multicast frame forwarding to the cpu port */
+ mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port)));
/* Set CPU port number */
if (priv->id == ID_MT7621)
@@ -810,10 +807,15 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
/* Trapped into security mode allows packet forwarding through VLAN
- * table lookup.
+ * table lookup. CPU port is set to fallback mode to let untagged
+ * frames pass through.
*/
- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
- MT7530_PORT_SECURITY_MODE);
+ if (dsa_is_cpu_port(ds, port))
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_FALLBACK_MODE);
+ else
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_SECURITY_MODE);
/* Set the port as a user port which is to be able to recognize VID
* from incoming packets before fetching entry within the VLAN table.
@@ -1080,12 +1082,6 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u16 vid;
- /* The port is kept as VLAN-unaware if bridge with vlan_filtering not
- * being set.
- */
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
- return;
-
mutex_lock(&priv->reg_mutex);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
@@ -1111,12 +1107,6 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u16 vid, pvid;
- /* The port is kept as VLAN-unaware if bridge with vlan_filtering not
- * being set.
- */
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
- return 0;
-
mutex_lock(&priv->reg_mutex);
pvid = priv->ports[port].pvid;
@@ -1230,6 +1220,7 @@ mt7530_setup(struct dsa_switch *ds)
* as two netdev instances.
*/
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
+ ds->configure_vlan_while_not_filtering = true;
if (priv->id == ID_MT7530) {
regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
@@ -1294,8 +1285,6 @@ mt7530_setup(struct dsa_switch *ds)
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
- mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK);
-
for (i = 0; i < MT7530_NUM_PORTS; i++) {
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 979bb6374678..14de60d0b9ca 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -31,6 +31,7 @@ enum {
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
#define UNM_FFP(x) (((x) & 0xff) << 16)
+#define UNM_FFP_MASK UNM_FFP(~0)
#define UNU_FFP(x) (((x) & 0xff) << 8)
#define UNU_FFP_MASK UNU_FFP(~0)
#define CPU_EN BIT(7)
@@ -152,6 +153,12 @@ enum mt7530_port_mode {
/* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
+ /* Fallback Mode: Forward received frames with ingress ports that do
+ * not belong to the VLAN member. Frames whose VID is not listed on
+ * the VLAN table are forwarded by the PCR_MATRIX members.
+ */
+ MT7530_PORT_FALLBACK_MODE = PORT_VLAN(1),
+
/* Security Mode: Discard any frame due to ingress membership
* violation or VID missed on the VLAN table.
*/
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 2b4a723c8306..7627ea61e0ea 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2233,26 +2233,34 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
+static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
+ int tree_index, int sw_index,
int port, struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
+ if (tree_index != ds->dst->index)
+ return 0;
+
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_pvt_map(chip, dev, port);
+ err = mv88e6xxx_pvt_map(chip, sw_index, port);
mv88e6xxx_reg_unlock(chip);
return err;
}
-static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev,
+static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
+ int tree_index, int sw_index,
int port, struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ if (tree_index != ds->dst->index)
+ return;
+
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_pvt_map(chip, dev, port))
+ if (mv88e6xxx_pvt_map(chip, sw_index, port))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
mv88e6xxx_reg_unlock(chip);
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2098f19b534d..9c07b4f3d345 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -534,21 +534,21 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane,
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_PCS_CONTROL_1, &val);
+ MV88E6390_10G_CTRL1, &val);
if (err)
return err;
if (up)
- new_val = val & ~(MV88E6390_PCS_CONTROL_1_RESET |
- MV88E6390_PCS_CONTROL_1_LOOPBACK |
- MV88E6390_PCS_CONTROL_1_PDOWN);
+ new_val = val & ~(MDIO_CTRL1_RESET |
+ MDIO_PCS_CTRL1_LOOPBACK |
+ MDIO_CTRL1_LPOWER);
else
- new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN;
+ new_val = val | MDIO_CTRL1_LPOWER;
if (val != new_val)
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_PCS_CONTROL_1, new_val);
+ MV88E6390_10G_CTRL1, new_val);
return err;
}
@@ -748,8 +748,8 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
MV88E6390_SGMII_BMCR, bmcr);
}
-int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- u8 lane, struct phylink_link_state *state)
+static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
+ int port, u8 lane, struct phylink_link_state *state)
{
u16 lpa, status;
int err;
@@ -771,6 +771,45 @@ int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
}
+static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
+ int port, u8 lane, struct phylink_link_state *state)
+{
+ u16 status;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_10G_STAT1, &status);
+ if (err)
+ return err;
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ if (state->link) {
+ state->speed = SPEED_10000;
+ state->duplex = DUPLEX_FULL;
+ }
+
+ return 0;
+}
+
+int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state)
+{
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane,
+ state);
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return mv88e6390_serdes_pcs_get_state_10g(chip, port, lane,
+ state);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
u8 lane)
{
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 7990cadba4c2..14315f26228a 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -40,11 +40,8 @@
#define MV88E6390_PORT10_LANE3 0x17
/* 10GBASE-R and 10GBASE-X4/X2 */
-#define MV88E6390_PCS_CONTROL_1 0x1000
-#define MV88E6390_PCS_CONTROL_1_RESET BIT(15)
-#define MV88E6390_PCS_CONTROL_1_LOOPBACK BIT(14)
-#define MV88E6390_PCS_CONTROL_1_SPEED BIT(13)
-#define MV88E6390_PCS_CONTROL_1_PDOWN BIT(11)
+#define MV88E6390_10G_CTRL1 (0x1000 + MDIO_CTRL1)
+#define MV88E6390_10G_STAT1 (0x1000 + MDIO_STAT1)
/* 1000BASE-X and SGMII */
#define MV88E6390_SGMII_BMCR (0x2000 + MII_BMCR)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index e2c6bf0e430e..66648986e6e3 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -7,6 +7,7 @@
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot.h>
#include <linux/packing.h>
#include <linux/module.h>
@@ -102,13 +103,17 @@ static void felix_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
+ u16 flags = vlan->flags;
u16 vid;
int err;
+ if (dsa_is_cpu_port(ds, port))
+ flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
+
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
err = ocelot_vlan_add(ocelot, port, vid,
- vlan->flags & BRIDGE_VLAN_INFO_PVID,
- vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
if (err) {
dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
vid, port, err);
@@ -236,6 +241,10 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
if (felix->info->pcs_init)
felix->info->pcs_init(ocelot, port, link_an_mode, state);
+
+ if (felix->info->port_sched_speed_set)
+ felix->info->port_sched_speed_set(ocelot, port,
+ state->speed);
}
static void felix_phylink_mac_an_restart(struct dsa_switch *ds, int port)
@@ -288,6 +297,27 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
QSYS_SWITCH_PORT_MODE, port);
}
+static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
+{
+ int i;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_QOS_CFG_QOS_PCP_ENA,
+ ANA_PORT_QOS_CFG_QOS_PCP_ENA,
+ ANA_PORT_QOS_CFG,
+ port);
+
+ for (i = 0; i < FELIX_NUM_TC * 2; i++) {
+ ocelot_rmw_ix(ocelot,
+ (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
+ ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
+ ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL |
+ ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M,
+ ANA_PORT_PCP_DEI_MAP,
+ port, i);
+ }
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -388,6 +418,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
struct ocelot *ocelot = &felix->ocelot;
phy_interface_t *port_phy_modes;
resource_size_t switch_base;
+ struct resource res;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -422,17 +453,16 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (i = 0; i < TARGET_MAX; i++) {
struct regmap *target;
- struct resource *res;
if (!felix->info->target_io_res[i].name)
continue;
- res = &felix->info->target_io_res[i];
- res->flags = IORESOURCE_MEM;
- res->start += switch_base;
- res->end += switch_base;
+ memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.start += switch_base;
+ res.end += switch_base;
- target = ocelot_regmap_init(ocelot, res);
+ target = ocelot_regmap_init(ocelot, &res);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map device memory space\n");
@@ -453,7 +483,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
void __iomem *port_regs;
- struct resource *res;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
@@ -465,12 +494,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
}
- res = &felix->info->port_io_res[port];
- res->flags = IORESOURCE_MEM;
- res->start += switch_base;
- res->end += switch_base;
+ memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.start += switch_base;
+ res.end += switch_base;
- port_regs = devm_ioremap_resource(ocelot->dev, res);
+ port_regs = devm_ioremap_resource(ocelot->dev, &res);
if (IS_ERR(port_regs)) {
dev_err(ocelot->dev,
"failed to map registers for port %d\n", port);
@@ -495,6 +524,23 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return 0;
}
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "felix ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
/* Hardware initialization done here so that we can allocate structures with
* devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
* us to allocate structures twice (leak memory) and map PCI memory twice
@@ -505,12 +551,21 @@ static int felix_setup(struct dsa_switch *ds)
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int port, err;
+ int tc;
err = felix_init_structs(felix, ds->num_ports);
if (err)
return err;
ocelot_init(ocelot);
+ if (ocelot->ptp) {
+ err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+ if (err) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ ocelot->ptp = 0;
+ }
+ }
for (port = 0; port < ds->num_ports; port++) {
ocelot_init_port(ocelot, port);
@@ -520,6 +575,11 @@ static int felix_setup(struct dsa_switch *ds)
ocelot_configure_cpu(ocelot, port,
OCELOT_TAG_PREFIX_NONE,
OCELOT_TAG_PREFIX_LONG);
+
+ /* Set the default QoS Classification based on PCP and DEI
+ * bits of vlan tag.
+ */
+ felix_port_qos_map_init(ocelot, port);
}
/* Include the CPU port module in the forwarding mask for unknown
@@ -530,8 +590,15 @@ static int felix_setup(struct dsa_switch *ds)
ocelot_write_rix(ocelot,
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_UC);
+ /* Setup the per-traffic class flooding PGIDs */
+ for (tc = 0; tc < FELIX_NUM_TC; tc++)
+ ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST(PGID_UC),
+ ANA_FLOODING, tc);
ds->mtu_enforcement_ingress = true;
+ ds->configure_vlan_while_not_filtering = true;
/* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
* isn't instantiated for the Felix PF.
* In-band AN may take a few ms to complete, so we need to poll.
@@ -549,6 +616,7 @@ static void felix_teardown(struct dsa_switch *ds)
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
+ ocelot_deinit_timestamp(ocelot);
/* stop workqueue thread */
ocelot_deinit(ocelot);
}
@@ -670,6 +738,19 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port)
ocelot_port_policer_del(ocelot, port);
}
+static int felix_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->port_setup_tc)
+ return felix->info->port_setup_tc(ds, port, type, type_data);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.setup = felix_setup,
@@ -708,6 +789,7 @@ static const struct dsa_switch_ops felix_switch_ops = {
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
+ .port_setup_tc = felix_port_setup_tc,
};
static struct felix_info *felix_instance_tbl[] = {
@@ -740,6 +822,11 @@ static int felix_pci_probe(struct pci_dev *pdev,
struct felix *felix;
int err;
+ if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+ dev_info(&pdev->dev, "device is disabled, skipping\n");
+ return -ENODEV;
+ }
+
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "device enable failed\n");
@@ -791,6 +878,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
ds->dev = &pdev->dev;
ds->num_ports = felix->info->num_ports;
+ ds->num_tx_queues = felix->info->num_tx_queues;
ds->ops = &felix_switch_ops;
ds->priv = ocelot;
felix->ds = ds;
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 9af106513e53..a891736ca006 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -5,12 +5,13 @@
#define _MSCC_FELIX_H
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
+#define FELIX_NUM_TC 8
/* Platform-specific information */
struct felix_info {
- struct resource *target_io_res;
- struct resource *port_io_res;
- struct resource *imdio_res;
+ const struct resource *target_io_res;
+ const struct resource *port_io_res;
+ const struct resource *imdio_res;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
@@ -19,6 +20,7 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
+ int num_tx_queues;
struct vcap_field *vcap_is2_keys;
struct vcap_field *vcap_is2_actions;
const struct vcap_props *vcap;
@@ -34,6 +36,10 @@ struct felix_info {
struct phylink_link_state *state);
int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
phy_interface_t phy_mode);
+ int (*port_setup_tc)(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data);
+ void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
+ u32 speed);
};
extern struct felix_info felix_info_vsc9959;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 8bf395f12b47..1dd9e348152d 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -3,9 +3,12 @@
* Copyright 2018-2019 NXP Semiconductors
*/
#include <linux/fsl/enetc_mdio.h>
+#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
+#include <net/pkt_sched.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "felix.h"
@@ -27,6 +30,8 @@
#define USXGMII_LPA_DUPLEX(lpa) (((lpa) & GENMASK(12, 12)) >> 12)
#define USXGMII_LPA_SPEED(lpa) (((lpa) & GENMASK(11, 9)) >> 9)
+#define VSC9959_TAS_GCL_ENTRY_MAX 63
+
enum usxgmii_speed {
USXGMII_SPEED_10 = 0,
USXGMII_SPEED_100 = 1,
@@ -202,7 +207,7 @@ static const u32 vsc9959_qsys_regmap[] = {
REG(QSYS_QMAXSDU_CFG_6, 0x00f62c),
REG(QSYS_QMAXSDU_CFG_7, 0x00f648),
REG(QSYS_PREEMPTION_CFG, 0x00f664),
- REG_RESERVED(QSYS_CIR_CFG),
+ REG(QSYS_CIR_CFG, 0x000000),
REG(QSYS_EIR_CFG, 0x000004),
REG(QSYS_SE_CFG, 0x000008),
REG(QSYS_SE_DWRR_CFG, 0x00000c),
@@ -313,6 +318,8 @@ static const u32 vsc9959_ptp_regmap[] = {
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
REG(PTP_CFG_MISC, 0x0000a0),
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
@@ -333,10 +340,8 @@ static const u32 *vsc9959_regmap[] = {
[GCB] = vsc9959_gcb_regmap,
};
-/* Addresses are relative to the PCI device's base address and
- * will be fixed up at ioremap time.
- */
-static struct resource vsc9959_target_io_res[] = {
+/* Addresses are relative to the PCI device's base address */
+static const struct resource vsc9959_target_io_res[] = {
[ANA] = {
.start = 0x0280000,
.end = 0x028ffff,
@@ -379,7 +384,7 @@ static struct resource vsc9959_target_io_res[] = {
},
};
-static struct resource vsc9959_port_io_res[] = {
+static const struct resource vsc9959_port_io_res[] = {
{
.start = 0x0100000,
.end = 0x010ffff,
@@ -415,7 +420,7 @@ static struct resource vsc9959_port_io_res[] = {
/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
* SGMII/QSGMII MAC PCS can be found.
*/
-static struct resource vsc9959_imdio_res = {
+static const struct resource vsc9959_imdio_res = {
.start = 0x8030,
.end = 0x8040,
.name = "imdio",
@@ -1111,7 +1116,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
struct device *dev = ocelot->dev;
resource_size_t imdio_base;
void __iomem *imdio_regs;
- struct resource *res;
+ struct resource res;
struct enetc_hw *hw;
struct mii_bus *bus;
int port;
@@ -1128,12 +1133,12 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
imdio_base = pci_resource_start(felix->pdev,
felix->info->imdio_pci_bar);
- res = felix->info->imdio_res;
- res->flags = IORESOURCE_MEM;
- res->start += imdio_base;
- res->end += imdio_base;
+ memcpy(&res, felix->info->imdio_res, sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.start += imdio_base;
+ res.end += imdio_base;
- imdio_regs = devm_ioremap_resource(dev, res);
+ imdio_regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(imdio_regs)) {
dev_err(dev, "failed to map internal MDIO registers\n");
return PTR_ERR(imdio_regs);
@@ -1207,6 +1212,186 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_unregister(felix->imdio);
}
+static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
+ u32 speed)
+{
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_LINK_SPEED(speed),
+ QSYS_TAG_CONFIG_LINK_SPEED_M,
+ QSYS_TAG_CONFIG, port);
+}
+
+static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
+ u64 cycle_time,
+ struct timespec64 *new_base_ts)
+{
+ struct timespec64 ts;
+ ktime_t new_base_time;
+ ktime_t current_time;
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ new_base_time = base_time;
+
+ if (base_time < current_time) {
+ u64 nr_of_cycles = current_time - base_time;
+
+ do_div(nr_of_cycles, cycle_time);
+ new_base_time += cycle_time * (nr_of_cycles + 1);
+ }
+
+ *new_base_ts = ktime_to_timespec64(new_base_time);
+}
+
+static u32 vsc9959_tas_read_cfg_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, QSYS_TAS_PARAM_CFG_CTRL);
+}
+
+static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix,
+ struct tc_taprio_sched_entry *entry)
+{
+ ocelot_write(ocelot,
+ QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(gcl_ix) |
+ QSYS_GCL_CFG_REG_1_GATE_STATE(entry->gate_mask),
+ QSYS_GCL_CFG_REG_1);
+ ocelot_write(ocelot, entry->interval, QSYS_GCL_CFG_REG_2);
+}
+
+static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct timespec64 base_ts;
+ int ret, i;
+ u32 val;
+
+ if (!taprio->enable) {
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ QSYS_TAG_CONFIG, port);
+
+ return 0;
+ }
+
+ if (taprio->cycle_time > NSEC_PER_SEC ||
+ taprio->cycle_time_extension >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
+ return -ERANGE;
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Hardware errata - Admin config could not be overwritten if
+ * config is pending, need reset the TAS module
+ */
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING)
+ return -EBUSY;
+
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF) |
+ QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(0xFF),
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE_M |
+ QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M,
+ QSYS_TAG_CONFIG, port);
+
+ vsc9959_new_base_time(ocelot, taprio->base_time,
+ taprio->cycle_time, &base_ts);
+ ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec), QSYS_PARAM_CFG_REG_2);
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val) |
+ QSYS_PARAM_CFG_REG_3_LIST_LENGTH(taprio->num_entries),
+ QSYS_PARAM_CFG_REG_3);
+ ocelot_write(ocelot, taprio->cycle_time, QSYS_PARAM_CFG_REG_4);
+ ocelot_write(ocelot, taprio->cycle_time_extension, QSYS_PARAM_CFG_REG_5);
+
+ for (i = 0; i < taprio->num_entries; i++)
+ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val,
+ !(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
+ 10, 100000);
+
+ return ret;
+}
+
+static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *cbs_qopt)
+{
+ struct ocelot *ocelot = ds->priv;
+ int port_ix = port * 8 + cbs_qopt->queue;
+ u32 rate, burst;
+
+ if (cbs_qopt->queue >= ds->num_tx_queues)
+ return -EINVAL;
+
+ if (!cbs_qopt->enable) {
+ ocelot_write_gix(ocelot, QSYS_CIR_CFG_CIR_RATE(0) |
+ QSYS_CIR_CFG_CIR_BURST(0),
+ QSYS_CIR_CFG, port_ix);
+
+ ocelot_rmw_gix(ocelot, 0, QSYS_SE_CFG_SE_AVB_ENA,
+ QSYS_SE_CFG, port_ix);
+
+ return 0;
+ }
+
+ /* Rate unit is 100 kbps */
+ rate = DIV_ROUND_UP(cbs_qopt->idleslope, 100);
+ /* Avoid using zero rate */
+ rate = clamp_t(u32, rate, 1, GENMASK(14, 0));
+ /* Burst unit is 4kB */
+ burst = DIV_ROUND_UP(cbs_qopt->hicredit, 4096);
+ /* Avoid using zero burst size */
+ burst = clamp_t(u32, burst, 1, GENMASK(5, 0));
+ ocelot_write_gix(ocelot,
+ QSYS_CIR_CFG_CIR_RATE(rate) |
+ QSYS_CIR_CFG_CIR_BURST(burst),
+ QSYS_CIR_CFG,
+ port_ix);
+
+ ocelot_rmw_gix(ocelot,
+ QSYS_SE_CFG_SE_FRM_MODE(0) |
+ QSYS_SE_CFG_SE_AVB_ENA,
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE_M,
+ QSYS_SE_CFG,
+ port_ix);
+
+ return 0;
+}
+
+static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return vsc9959_qos_port_tas_set(ocelot, port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return vsc9959_qos_port_cbs_set(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
struct felix_info felix_info_vsc9959 = {
.target_io_res = vsc9959_target_io_res,
.port_io_res = vsc9959_port_io_res,
@@ -1222,6 +1407,7 @@ struct felix_info felix_info_vsc9959 = {
.shared_queue_sz = 128 * 1024,
.num_mact_rows = 2048,
.num_ports = 6,
+ .num_tx_queues = FELIX_NUM_TC,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
@@ -1230,4 +1416,6 @@ struct felix_info felix_info_vsc9959 = {
.pcs_an_restart = vsc9959_pcs_an_restart,
.pcs_link_state = vsc9959_pcs_link_state,
.prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
+ .port_setup_tc = vsc9959_port_setup_tc,
+ .port_sched_speed_set = vsc9959_sched_speed_set,
};
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index 68c3086af9af..5e83b365f17a 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -34,3 +34,12 @@ config NET_DSA_SJA1105_TAS
This enables support for the TTEthernet-based egress scheduling
engine in the SJA1105 DSA driver, which is controlled using a
hardware offload of the tc-tqprio qdisc.
+
+config NET_DSA_SJA1105_VL
+ bool "Support for Virtual Links on NXP SJA1105"
+ depends on NET_DSA_SJA1105_TAS
+ help
+ This enables support for flow classification using capable devices
+ (SJA1105T, SJA1105Q, SJA1105S). The following actions are supported:
+ - redirect, trap, drop
+ - time-based ingress policing, via the tc-gate action
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
index 8943d8d66f2b..c88e56a29db8 100644
--- a/drivers/net/dsa/sja1105/Makefile
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -17,3 +17,7 @@ endif
ifdef CONFIG_NET_DSA_SJA1105_TAS
sja1105-objs += sja1105_tas.o
endif
+
+ifdef CONFIG_NET_DSA_SJA1105_VL
+sja1105-objs += sja1105_vl.o
+endif
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 8b60dbd567f2..29ed21687295 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -8,6 +8,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/dsa/sja1105.h>
+#include <linux/dsa/8021q.h>
#include <net/dsa.h>
#include <linux/mutex.h>
#include "sja1105_static_config.h"
@@ -36,6 +37,7 @@ struct sja1105_regs {
u64 status;
u64 port_control;
u64 rgu;
+ u64 vl_status;
u64 config;
u64 sgmii;
u64 rmii_pll1;
@@ -49,6 +51,7 @@ struct sja1105_regs {
u64 ptpschtm;
u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS];
+ u64 pad_mii_rx[SJA1105_NUM_PORTS];
u64 pad_mii_id[SJA1105_NUM_PORTS];
u64 cgu_idiv[SJA1105_NUM_PORTS];
u64 mii_tx_clk[SJA1105_NUM_PORTS];
@@ -81,9 +84,16 @@ struct sja1105_info {
* the egress timestamps.
*/
int ptpegr_ts_bytes;
+ int num_cbs_shapers;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
+ /* Both E/T and P/Q/R/S have quirks when it comes to popping the S-Tag
+ * from double-tagged frames. E/T will pop it only when it's equal to
+ * TPID from the General Parameters Table, while P/Q/R/S will only
+ * pop it when it's equal to TPID2.
+ */
+ u16 qinq_tpid;
int (*reset_cmd)(struct dsa_switch *ds);
int (*setup_rgmii_delay)(const void *ctx, int port);
/* Prototypes from include/net/dsa.h */
@@ -96,17 +106,52 @@ struct sja1105_info {
const char *name;
};
+enum sja1105_key_type {
+ SJA1105_KEY_BCAST,
+ SJA1105_KEY_TC,
+ SJA1105_KEY_VLAN_UNAWARE_VL,
+ SJA1105_KEY_VLAN_AWARE_VL,
+};
+
+struct sja1105_key {
+ enum sja1105_key_type type;
+
+ union {
+ /* SJA1105_KEY_TC */
+ struct {
+ int pcp;
+ } tc;
+
+ /* SJA1105_KEY_VLAN_UNAWARE_VL */
+ /* SJA1105_KEY_VLAN_AWARE_VL */
+ struct {
+ u64 dmac;
+ u16 vid;
+ u16 pcp;
+ } vl;
+ };
+};
+
enum sja1105_rule_type {
SJA1105_RULE_BCAST_POLICER,
SJA1105_RULE_TC_POLICER,
+ SJA1105_RULE_VL,
+};
+
+enum sja1105_vl_type {
+ SJA1105_VL_NONCRITICAL,
+ SJA1105_VL_RATE_CONSTRAINED,
+ SJA1105_VL_TIME_TRIGGERED,
};
struct sja1105_rule {
struct list_head list;
unsigned long cookie;
unsigned long port_mask;
+ struct sja1105_key key;
enum sja1105_rule_type type;
+ /* Action */
union {
/* SJA1105_RULE_BCAST_POLICER */
struct {
@@ -116,30 +161,65 @@ struct sja1105_rule {
/* SJA1105_RULE_TC_POLICER */
struct {
int sharindx;
- int tc;
} tc_pol;
+
+ /* SJA1105_RULE_VL */
+ struct {
+ enum sja1105_vl_type type;
+ unsigned long destports;
+ int sharindx;
+ int maxlen;
+ int ipv;
+ u64 base_time;
+ u64 cycle_time;
+ int num_entries;
+ struct action_gate_entry *entries;
+ struct flow_stats stats;
+ } vl;
};
};
struct sja1105_flow_block {
struct list_head rules;
bool l2_policer_used[SJA1105_NUM_L2_POLICERS];
+ int num_virtual_links;
+};
+
+struct sja1105_bridge_vlan {
+ struct list_head list;
+ int port;
+ u16 vid;
+ bool pvid;
+ bool untagged;
+};
+
+enum sja1105_vlan_state {
+ SJA1105_VLAN_UNAWARE,
+ SJA1105_VLAN_BEST_EFFORT,
+ SJA1105_VLAN_FILTERING_FULL,
};
struct sja1105_private {
struct sja1105_static_config static_config;
bool rgmii_rx_delay[SJA1105_NUM_PORTS];
bool rgmii_tx_delay[SJA1105_NUM_PORTS];
+ bool best_effort_vlan_filtering;
const struct sja1105_info *info;
struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
+ struct list_head dsa_8021q_vlans;
+ struct list_head bridge_vlans;
+ struct list_head crosschip_links;
struct sja1105_flow_block flow_block;
struct sja1105_port ports[SJA1105_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ bool expect_dsa_8021q;
+ enum sja1105_vlan_state vlan_state;
+ struct sja1105_cbs_entry *cbs;
struct sja1105_tagger_data tagger_data;
struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
@@ -160,11 +240,14 @@ enum sja1105_reset_reason {
SJA1105_AGEING_TIME,
SJA1105_SCHEDULING,
SJA1105_BEST_EFFORT_POLICING,
+ SJA1105_VIRTUAL_LINKS,
};
int sja1105_static_config_reload(struct sja1105_private *priv,
enum sja1105_reset_reason reason);
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
+
/* From sja1105_spi.c */
int sja1105_xfer_buf(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr,
@@ -240,26 +323,16 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
-/* Common implementations for the static and dynamic configs */
-size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op);
-size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op);
-size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op);
-size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op);
-size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op);
-size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op);
-
/* From sja1105_flower.c */
int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress);
int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
void sja1105_flower_setup(struct dsa_switch *ds);
void sja1105_flower_teardown(struct dsa_switch *ds);
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie);
#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 0fdc2d55fff6..2a9b8a6a5306 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -7,12 +7,16 @@
#define SJA1105_SIZE_CGU_CMD 4
-struct sja1105_cfg_pad_mii_tx {
+/* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
+struct sja1105_cfg_pad_mii {
u64 d32_os;
+ u64 d32_ih;
u64 d32_ipud;
+ u64 d10_ih;
u64 d10_os;
u64 d10_ipud;
u64 ctrl_os;
+ u64 ctrl_ih;
u64 ctrl_ipud;
u64 clk_os;
u64 clk_ih;
@@ -338,16 +342,19 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
/* AGU */
static void
-sja1105_cfg_pad_mii_tx_packing(void *buf, struct sja1105_cfg_pad_mii_tx *cmd,
- enum packing_op op)
+sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd,
+ enum packing_op op)
{
const int size = 4;
sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
+ sja1105_packing(buf, &cmd->d32_ih, 26, 26, size, op);
sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
+ sja1105_packing(buf, &cmd->d10_ih, 18, 18, size, op);
sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ih, 10, 10, size, op);
sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
@@ -358,7 +365,7 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
int port)
{
const struct sja1105_regs *regs = priv->info->regs;
- struct sja1105_cfg_pad_mii_tx pad_mii_tx;
+ struct sja1105_cfg_pad_mii pad_mii_tx = {0};
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
/* Payload */
@@ -375,12 +382,45 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
- sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
+static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii pad_mii_rx = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ /* Payload */
+ pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d32_ipud = 2; /* RXD[3:2] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.d10_ih = 0; /* RXD[1:0] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d10_ipud = 2; /* RXD[1:0] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.ctrl_ih = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage weak pull-up/down: */
+ /* pull-down */
+ pad_mii_rx.clk_os = 2; /* RX_CLK/RXC output stage: */
+ /* medium noise/fast speed (default) */
+ pad_mii_rx.clk_ih = 0; /* RX_CLK/RXC input hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.clk_ipud = 2; /* RX_CLK/RXC input pull-up/down: */
+ /* plain input (default) */
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
static void
sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
enum packing_op op)
@@ -669,10 +709,14 @@ int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
phy_mode);
return -EINVAL;
}
- if (rc)
+ if (rc) {
dev_err(dev, "Clocking setup for port %d failed: %d\n",
port, rc);
- return rc;
+ return rc;
+ }
+
+ /* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */
+ return sja1105_cfg_pad_rx_config(priv, port);
}
int sja1105_clocking_setup(struct sja1105_private *priv)
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index bf9b36ff35bf..4471eeccc293 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -97,6 +97,12 @@
#define SJA1105_SIZE_DYN_CMD 4
+#define SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
+
#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
SJA1105_SIZE_DYN_CMD
@@ -121,14 +127,29 @@
#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
SJA1105_SIZE_DYN_CMD
+#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+
#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
SJA1105_SIZE_DYN_CMD
+#define SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY)
+
#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
+#define SJA1105_SIZE_RETAGGING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_RETAGGING_ENTRY)
+
+#define SJA1105ET_SIZE_CBS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_CBS_ENTRY)
+
+#define SJA1105PQRS_SIZE_CBS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_CBS_ENTRY)
+
#define SJA1105_MAX_DYN_CMD_SIZE \
- SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD
+ SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD
struct sja1105_dyn_cmd {
bool search;
@@ -147,6 +168,29 @@ enum sja1105_hostcmd {
};
static void
+sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->index, 9, 0, size, op);
+}
+
+static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD;
+
+ sja1105_packing(buf, &entry->egrmirr, 21, 17, size, op);
+ sja1105_packing(buf, &entry->ingrmirr, 16, 16, size, op);
+ return size;
+}
+
+static void
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -463,6 +507,18 @@ sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
}
static void
+sja1105pqrs_l2_lookup_params_cmd_packing(void *buf,
+ struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+}
+
+static void
sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -485,6 +541,18 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
}
static void
+sja1105pqrs_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
+}
+
+static void
sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -496,6 +564,74 @@ sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
}
+static void
+sja1105_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 29, 29, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
+ sja1105_packing(p, &cmd->index, 5, 0, size, op);
+}
+
+static void sja1105et_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->index, 19, 16, size, op);
+}
+
+static size_t sja1105et_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+ u8 *cmd = buf + size;
+ u32 *p = buf;
+
+ sja1105_packing(cmd, &entry->port, 5, 3, SJA1105_SIZE_DYN_CMD, op);
+ sja1105_packing(cmd, &entry->prio, 2, 0, SJA1105_SIZE_DYN_CMD, op);
+ sja1105_packing(p + 3, &entry->credit_lo, 31, 0, size, op);
+ sja1105_packing(p + 2, &entry->credit_hi, 31, 0, size, op);
+ sja1105_packing(p + 1, &entry->send_slope, 31, 0, size, op);
+ sja1105_packing(p + 0, &entry->idle_slope, 31, 0, size, op);
+ return size;
+}
+
+static void sja1105pqrs_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 3, 0, size, op);
+}
+
+static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->port, 159, 157, size, op);
+ sja1105_packing(buf, &entry->prio, 156, 154, size, op);
+ sja1105_packing(buf, &entry->credit_lo, 153, 122, size, op);
+ sja1105_packing(buf, &entry->credit_hi, 121, 90, size, op);
+ sja1105_packing(buf, &entry->send_slope, 89, 58, size, op);
+ sja1105_packing(buf, &entry->idle_slope, 57, 26, size, op);
+ return size;
+}
+
#define OP_READ BIT(0)
#define OP_WRITE BIT(1)
#define OP_DEL BIT(2)
@@ -505,6 +641,16 @@ sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105et_vl_lookup_entry_packing,
+ .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .access = OP_WRITE,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x35,
+ },
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105et_l2_lookup_cmd_packing,
@@ -548,6 +694,7 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.entry_packing = sja1105et_l2_lookup_params_entry_packing,
.cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
@@ -566,6 +713,22 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
.packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
.addr = 0x34,
},
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x31,
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1105et_cbs_entry_packing,
+ .cmd_packing = sja1105et_cbs_cmd_packing,
+ .max_entry_count = SJA1105ET_MAX_CBS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_CBS_DYN_CMD,
+ .addr = 0x2c,
+ },
[BLK_IDX_XMII_PARAMS] = {0},
};
@@ -573,6 +736,16 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105_vl_lookup_entry_packing,
+ .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE),
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x47,
+ },
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
@@ -616,13 +789,14 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
- .entry_packing = sja1105et_l2_lookup_params_entry_packing,
- .cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
+ .entry_packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_params_cmd_packing,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
.access = (OP_READ | OP_WRITE),
- .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
- .addr = 0x38,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x54,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {0},
[BLK_IDX_AVB_PARAMS] = {
@@ -634,12 +808,28 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
.addr = 0x8003,
},
[BLK_IDX_GENERAL_PARAMS] = {
- .entry_packing = sja1105et_general_params_entry_packing,
- .cmd_packing = sja1105et_general_params_cmd_packing,
+ .entry_packing = sja1105pqrs_general_params_entry_packing,
+ .cmd_packing = sja1105pqrs_general_params_cmd_packing,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x3B,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1105pqrs_cbs_entry_packing,
+ .cmd_packing = sja1105pqrs_cbs_cmd_packing,
+ .max_entry_count = SJA1105PQRS_MAX_CBS_COUNT,
.access = OP_WRITE,
- .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
- .addr = 0x34,
+ .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
+ .addr = 0x32,
},
[BLK_IDX_XMII_PARAMS] = {0},
};
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index d742ffcbfce9..9133a831ec79 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -421,92 +421,96 @@ static char sja1105pqrs_extra_port_stats[][ETH_GSTRING_LEN] = {
void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_port_status status;
+ struct sja1105_port_status *status;
int rc, i, k = 0;
- memset(&status, 0, sizeof(status));
+ status = kzalloc(sizeof(*status), GFP_KERNEL);
+ if (!status)
+ goto out;
- rc = sja1105_port_status_get(priv, &status, port);
+ rc = sja1105_port_status_get(priv, status, port);
if (rc < 0) {
dev_err(ds->dev, "Failed to read port %d counters: %d\n",
port, rc);
- return;
+ goto out;
}
memset(data, 0, ARRAY_SIZE(sja1105_port_stats) * sizeof(u64));
- data[k++] = status.mac.n_runt;
- data[k++] = status.mac.n_soferr;
- data[k++] = status.mac.n_alignerr;
- data[k++] = status.mac.n_miierr;
- data[k++] = status.mac.typeerr;
- data[k++] = status.mac.sizeerr;
- data[k++] = status.mac.tctimeout;
- data[k++] = status.mac.priorerr;
- data[k++] = status.mac.nomaster;
- data[k++] = status.mac.memov;
- data[k++] = status.mac.memerr;
- data[k++] = status.mac.invtyp;
- data[k++] = status.mac.intcyov;
- data[k++] = status.mac.domerr;
- data[k++] = status.mac.pcfbagdrop;
- data[k++] = status.mac.spcprior;
- data[k++] = status.mac.ageprior;
- data[k++] = status.mac.portdrop;
- data[k++] = status.mac.lendrop;
- data[k++] = status.mac.bagdrop;
- data[k++] = status.mac.policeerr;
- data[k++] = status.mac.drpnona664err;
- data[k++] = status.mac.spcerr;
- data[k++] = status.mac.agedrp;
- data[k++] = status.hl1.n_n664err;
- data[k++] = status.hl1.n_vlanerr;
- data[k++] = status.hl1.n_unreleased;
- data[k++] = status.hl1.n_sizeerr;
- data[k++] = status.hl1.n_crcerr;
- data[k++] = status.hl1.n_vlnotfound;
- data[k++] = status.hl1.n_ctpolerr;
- data[k++] = status.hl1.n_polerr;
- data[k++] = status.hl1.n_rxfrm;
- data[k++] = status.hl1.n_rxbyte;
- data[k++] = status.hl1.n_txfrm;
- data[k++] = status.hl1.n_txbyte;
- data[k++] = status.hl2.n_qfull;
- data[k++] = status.hl2.n_part_drop;
- data[k++] = status.hl2.n_egr_disabled;
- data[k++] = status.hl2.n_not_reach;
+ data[k++] = status->mac.n_runt;
+ data[k++] = status->mac.n_soferr;
+ data[k++] = status->mac.n_alignerr;
+ data[k++] = status->mac.n_miierr;
+ data[k++] = status->mac.typeerr;
+ data[k++] = status->mac.sizeerr;
+ data[k++] = status->mac.tctimeout;
+ data[k++] = status->mac.priorerr;
+ data[k++] = status->mac.nomaster;
+ data[k++] = status->mac.memov;
+ data[k++] = status->mac.memerr;
+ data[k++] = status->mac.invtyp;
+ data[k++] = status->mac.intcyov;
+ data[k++] = status->mac.domerr;
+ data[k++] = status->mac.pcfbagdrop;
+ data[k++] = status->mac.spcprior;
+ data[k++] = status->mac.ageprior;
+ data[k++] = status->mac.portdrop;
+ data[k++] = status->mac.lendrop;
+ data[k++] = status->mac.bagdrop;
+ data[k++] = status->mac.policeerr;
+ data[k++] = status->mac.drpnona664err;
+ data[k++] = status->mac.spcerr;
+ data[k++] = status->mac.agedrp;
+ data[k++] = status->hl1.n_n664err;
+ data[k++] = status->hl1.n_vlanerr;
+ data[k++] = status->hl1.n_unreleased;
+ data[k++] = status->hl1.n_sizeerr;
+ data[k++] = status->hl1.n_crcerr;
+ data[k++] = status->hl1.n_vlnotfound;
+ data[k++] = status->hl1.n_ctpolerr;
+ data[k++] = status->hl1.n_polerr;
+ data[k++] = status->hl1.n_rxfrm;
+ data[k++] = status->hl1.n_rxbyte;
+ data[k++] = status->hl1.n_txfrm;
+ data[k++] = status->hl1.n_txbyte;
+ data[k++] = status->hl2.n_qfull;
+ data[k++] = status->hl2.n_part_drop;
+ data[k++] = status->hl2.n_egr_disabled;
+ data[k++] = status->hl2.n_not_reach;
if (priv->info->device_id == SJA1105E_DEVICE_ID ||
priv->info->device_id == SJA1105T_DEVICE_ID)
- return;
+ goto out;
memset(data + k, 0, ARRAY_SIZE(sja1105pqrs_extra_port_stats) *
sizeof(u64));
for (i = 0; i < 8; i++) {
- data[k++] = status.hl2.qlevel_hwm[i];
- data[k++] = status.hl2.qlevel[i];
+ data[k++] = status->hl2.qlevel_hwm[i];
+ data[k++] = status->hl2.qlevel[i];
}
- data[k++] = status.ether.n_drops_nolearn;
- data[k++] = status.ether.n_drops_noroute;
- data[k++] = status.ether.n_drops_ill_dtag;
- data[k++] = status.ether.n_drops_dtag;
- data[k++] = status.ether.n_drops_sotag;
- data[k++] = status.ether.n_drops_sitag;
- data[k++] = status.ether.n_drops_utag;
- data[k++] = status.ether.n_tx_bytes_1024_2047;
- data[k++] = status.ether.n_tx_bytes_512_1023;
- data[k++] = status.ether.n_tx_bytes_256_511;
- data[k++] = status.ether.n_tx_bytes_128_255;
- data[k++] = status.ether.n_tx_bytes_65_127;
- data[k++] = status.ether.n_tx_bytes_64;
- data[k++] = status.ether.n_tx_mcast;
- data[k++] = status.ether.n_tx_bcast;
- data[k++] = status.ether.n_rx_bytes_1024_2047;
- data[k++] = status.ether.n_rx_bytes_512_1023;
- data[k++] = status.ether.n_rx_bytes_256_511;
- data[k++] = status.ether.n_rx_bytes_128_255;
- data[k++] = status.ether.n_rx_bytes_65_127;
- data[k++] = status.ether.n_rx_bytes_64;
- data[k++] = status.ether.n_rx_mcast;
- data[k++] = status.ether.n_rx_bcast;
+ data[k++] = status->ether.n_drops_nolearn;
+ data[k++] = status->ether.n_drops_noroute;
+ data[k++] = status->ether.n_drops_ill_dtag;
+ data[k++] = status->ether.n_drops_dtag;
+ data[k++] = status->ether.n_drops_sotag;
+ data[k++] = status->ether.n_drops_sitag;
+ data[k++] = status->ether.n_drops_utag;
+ data[k++] = status->ether.n_tx_bytes_1024_2047;
+ data[k++] = status->ether.n_tx_bytes_512_1023;
+ data[k++] = status->ether.n_tx_bytes_256_511;
+ data[k++] = status->ether.n_tx_bytes_128_255;
+ data[k++] = status->ether.n_tx_bytes_65_127;
+ data[k++] = status->ether.n_tx_bytes_64;
+ data[k++] = status->ether.n_tx_mcast;
+ data[k++] = status->ether.n_tx_bcast;
+ data[k++] = status->ether.n_rx_bytes_1024_2047;
+ data[k++] = status->ether.n_rx_bytes_512_1023;
+ data[k++] = status->ether.n_rx_bytes_256_511;
+ data[k++] = status->ether.n_rx_bytes_128_255;
+ data[k++] = status->ether.n_rx_bytes_65_127;
+ data[k++] = status->ether.n_rx_bytes_64;
+ data[k++] = status->ether.n_rx_mcast;
+ data[k++] = status->ether.n_rx_bcast;
+out:
+ kfree(status);
}
void sja1105_get_strings(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 5288a722e625..9ee8968610cd 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -2,9 +2,10 @@
/* Copyright 2020, NXP Semiconductors
*/
#include "sja1105.h"
+#include "sja1105_vl.h"
-static struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
- unsigned long cookie)
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie)
{
struct sja1105_rule *rule;
@@ -46,6 +47,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
rule->cookie = cookie;
rule->type = SJA1105_RULE_BCAST_POLICER;
rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->key.type = SJA1105_KEY_BCAST;
new_rule = true;
}
@@ -117,7 +119,8 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv,
rule->cookie = cookie;
rule->type = SJA1105_RULE_TC_POLICER;
rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
- rule->tc_pol.tc = tc;
+ rule->key.type = SJA1105_KEY_TC;
+ rule->key.tc.pcp = tc;
new_rule = true;
}
@@ -169,14 +172,38 @@ out:
return rc;
}
-static int sja1105_flower_parse_policer(struct sja1105_private *priv, int port,
- struct netlink_ext_ack *extack,
- struct flow_cls_offload *cls,
- u64 rate_bytes_per_sec,
- s64 burst)
+static int sja1105_flower_policer(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ u64 rate_bytes_per_sec,
+ s64 burst)
+{
+ switch (key->type) {
+ case SJA1105_KEY_BCAST:
+ return sja1105_setup_bcast_policer(priv, extack, cookie, port,
+ rate_bytes_per_sec, burst);
+ case SJA1105_KEY_TC:
+ return sja1105_setup_tc_policer(priv, extack, cookie, port,
+ key->tc.pcp, rate_bytes_per_sec,
+ burst);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sja1105_flower_parse_key(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ struct sja1105_key *key)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
+ bool is_bcast_dmac = false;
+ u64 dmac = U64_MAX;
+ u16 vid = U16_MAX;
+ u16 pcp = U16_MAX;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -213,16 +240,14 @@ static int sja1105_flower_parse_policer(struct sja1105_private *priv, int port,
return -EOPNOTSUPP;
}
- if (!ether_addr_equal_masked(match.key->dst, bcast,
- match.mask->dst)) {
+ if (!ether_addr_equal(match.mask->dst, bcast)) {
NL_SET_ERR_MSG_MOD(extack,
- "Only matching on broadcast DMAC is supported");
+ "Masked matching on MAC not supported");
return -EOPNOTSUPP;
}
- return sja1105_setup_bcast_policer(priv, extack, cls->cookie,
- port, rate_bytes_per_sec,
- burst);
+ dmac = ether_addr_to_u64(match.key->dst);
+ is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -230,22 +255,46 @@ static int sja1105_flower_parse_policer(struct sja1105_private *priv, int port,
flow_rule_match_vlan(rule, &match);
- if (match.key->vlan_id & match.mask->vlan_id) {
+ if (match.mask->vlan_id &&
+ match.mask->vlan_id != VLAN_VID_MASK) {
NL_SET_ERR_MSG_MOD(extack,
- "Matching on VID is not supported");
+ "Masked matching on VID is not supported");
return -EOPNOTSUPP;
}
- if (match.mask->vlan_priority != 0x7) {
+ if (match.mask->vlan_priority &&
+ match.mask->vlan_priority != 0x7) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching on PCP is not supported");
return -EOPNOTSUPP;
}
- return sja1105_setup_tc_policer(priv, extack, cls->cookie, port,
- match.key->vlan_priority,
- rate_bytes_per_sec,
- burst);
+ if (match.mask->vlan_id)
+ vid = match.key->vlan_id;
+ if (match.mask->vlan_priority)
+ pcp = match.key->vlan_priority;
+ }
+
+ if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
+ key->type = SJA1105_KEY_BCAST;
+ return 0;
+ }
+ if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_TC;
+ key->tc.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_VLAN_AWARE_VL;
+ key->vl.dmac = dmac;
+ key->vl.vid = vid;
+ key->vl.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX) {
+ key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
+ key->vl.dmac = dmac;
+ return 0;
}
NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
@@ -259,22 +308,110 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack = cls->common.extack;
struct sja1105_private *priv = ds->priv;
const struct flow_action_entry *act;
- int rc = -EOPNOTSUPP, i;
+ unsigned long cookie = cls->cookie;
+ bool routing_rule = false;
+ struct sja1105_key key;
+ bool gate_rule = false;
+ bool vl_rule = false;
+ int rc, i;
+
+ rc = sja1105_flower_parse_key(priv, extack, cls, &key);
+ if (rc)
+ return rc;
+
+ rc = -EOPNOTSUPP;
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- rc = sja1105_flower_parse_policer(priv, port, extack, cls,
- act->police.rate_bytes_ps,
- act->police.burst);
+ rc = sja1105_flower_policer(priv, port, extack, cookie,
+ &key,
+ act->police.rate_bytes_ps,
+ act->police.burst);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_TRAP: {
+ int cpu = dsa_upstream_port(ds, port);
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(cpu), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_REDIRECT: {
+ struct dsa_port *to_dp;
+
+ to_dp = dsa_port_from_netdev(act->dev);
+ if (IS_ERR(to_dp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a switch port");
+ return -EOPNOTSUPP;
+ }
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(to_dp->index), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_DROP:
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, 0, false);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_GATE:
+ gate_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_gate(priv, port, extack, cookie,
+ &key, act->gate.index,
+ act->gate.prio,
+ act->gate.basetime,
+ act->gate.cycletime,
+ act->gate.cycletimeext,
+ act->gate.num_entries,
+ act->gate.entries);
+ if (rc)
+ goto out;
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Action not supported");
- break;
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ if (vl_rule && !rc) {
+ /* Delay scheduling configuration until DESTPORTS has been
+ * populated by all other actions.
+ */
+ if (gate_rule) {
+ if (!routing_rule) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload gate action together with redirect or trap");
+ return -EOPNOTSUPP;
+ }
+ rc = sja1105_init_scheduling(priv);
+ if (rc)
+ goto out;
}
+
+ rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
}
+out:
return rc;
}
@@ -289,6 +426,9 @@ int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
if (!rule)
return 0;
+ if (rule->type == SJA1105_RULE_VL)
+ return sja1105_vl_delete(priv, port, rule, cls->common.extack);
+
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
if (rule->type == SJA1105_RULE_BCAST_POLICER) {
@@ -297,7 +437,7 @@ int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
old_sharindx = policing[bcast].sharindx;
policing[bcast].sharindx = port;
} else if (rule->type == SJA1105_RULE_TC_POLICER) {
- int index = (port * SJA1105_NUM_TC) + rule->tc_pol.tc;
+ int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
old_sharindx = policing[index].sharindx;
policing[index].sharindx = port;
@@ -315,6 +455,27 @@ int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ int rc;
+
+ if (!rule)
+ return 0;
+
+ if (rule->type != SJA1105_RULE_VL)
+ return 0;
+
+ rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
+ cls->common.extack);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
void sja1105_flower_setup(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 472f4eb20c49..789b288cc78b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -25,6 +25,8 @@
#include "sja1105_sgmii.h"
#include "sja1105_tas.h"
+static const struct dsa_switch_ops sja1105_switch_ops;
+
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
unsigned int startup_delay)
{
@@ -301,7 +303,8 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
.tag_port = 0,
.vlanid = 1,
};
- int i;
+ struct dsa_switch *ds = priv->ds;
+ int port;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
@@ -322,12 +325,31 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
/* VLAN 1: all DT-defined ports are members; no restrictions on
- * forwarding; always transmit priority-tagged frames as untagged.
+ * forwarding; always transmit as untagged.
*/
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- pvid.vmemb_port |= BIT(i);
- pvid.vlan_bc |= BIT(i);
- pvid.tag_port &= ~BIT(i);
+ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_bridge_vlan *v;
+
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ pvid.vmemb_port |= BIT(port);
+ pvid.vlan_bc |= BIT(port);
+ pvid.tag_port &= ~BIT(port);
+
+ /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
+ * transmitted as untagged.
+ */
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ v->port = port;
+ v->vid = 1;
+ v->untagged = true;
+ if (dsa_is_cpu_port(ds, port))
+ v->pvid = true;
+ list_add(&v->list, &priv->dsa_8021q_vlans);
}
((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
@@ -410,6 +432,41 @@ static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
return 0;
}
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ struct sja1105_table *table;
+ int max_mem;
+
+ /* VLAN retagging is implemented using a loopback port that consumes
+ * frame buffers. That leaves less for us.
+ */
+ if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
+ max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
+ else
+ max_mem = SJA1105_MAX_FRAME_MEMORY;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+ l2_fwd_params = table->entries;
+ l2_fwd_params->part_spc[0] = max_mem;
+
+ /* If we have any critical-traffic virtual links, we need to reserve
+ * some frame buffer memory for them. At the moment, hardcode the value
+ * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
+ * remaining for best-effort traffic. TODO: figure out a more flexible
+ * way to perform the frame buffer partitioning.
+ */
+ if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
+ return;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ vl_fwd_params = table->entries;
+
+ l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
+ vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
+}
+
static int sja1105_init_general_params(struct sja1105_private *priv)
{
struct sja1105_general_params_entry default_general_params = {
@@ -445,7 +502,7 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
*/
.casc_port = SJA1105_NUM_PORTS,
/* No TTEthernet */
- .vllupformat = 0,
+ .vllupformat = SJA1105_VL_FORMAT_PSFP,
.vlmarker = 0,
.vlmask = 0,
/* Only update correctionField for 1-step PTP (L2 transport) */
@@ -1301,7 +1358,7 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
+ if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1364,7 +1421,7 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
+ if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1410,7 +1467,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
* for what gets printed in 'bridge fdb show'. In the case of zero,
* no VID gets printed at all.
*/
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
vid = 0;
return priv->info->fdb_add_cmd(ds, port, addr, vid);
@@ -1421,7 +1478,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
vid = 0;
return priv->info->fdb_del_cmd(ds, port, addr, vid);
@@ -1460,7 +1517,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
@@ -1583,12 +1640,99 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
sja1105_bridge_member(ds, port, br, false);
}
+#define BYTES_PER_KBIT (1000LL / 8)
+
+static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
+ return i;
+
+ return -1;
+}
+
+static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
+ int prio)
+{
+ int i;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++) {
+ struct sja1105_cbs_entry *cbs = &priv->cbs[i];
+
+ if (cbs->port == port && cbs->prio == prio) {
+ memset(cbs, 0, sizeof(*cbs));
+ return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
+ i, cbs, true);
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *offload)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_cbs_entry *cbs;
+ int index;
+
+ if (!offload->enable)
+ return sja1105_delete_cbs_shaper(priv, port, offload->queue);
+
+ index = sja1105_find_unused_cbs_shaper(priv);
+ if (index < 0)
+ return -ENOSPC;
+
+ cbs = &priv->cbs[index];
+ cbs->port = port;
+ cbs->prio = offload->queue;
+ /* locredit and sendslope are negative by definition. In hardware,
+ * positive values must be provided, and the negative sign is implicit.
+ */
+ cbs->credit_hi = offload->hicredit;
+ cbs->credit_lo = abs(offload->locredit);
+ /* User space is in kbits/sec, hardware in bytes/sec */
+ cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
+ cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
+ /* Convert the negative values from 64-bit 2's complement
+ * to 32-bit 2's complement (for the case of 0x80000000 whose
+ * negative is still negative).
+ */
+ cbs->credit_lo &= GENMASK_ULL(31, 0);
+ cbs->send_slope &= GENMASK_ULL(31, 0);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
+ true);
+}
+
+static int sja1105_reload_cbs(struct sja1105_private *priv)
+{
+ int rc = 0, i;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++) {
+ struct sja1105_cbs_entry *cbs = &priv->cbs[i];
+
+ if (!cbs->idle_slope && !cbs->send_slope)
+ continue;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
+ true);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
static const char * const sja1105_reset_reasons[] = {
[SJA1105_VLAN_FILTERING] = "VLAN filtering",
[SJA1105_RX_HWTSTAMPING] = "RX timestamping",
[SJA1105_AGEING_TIME] = "Ageing time",
[SJA1105_SCHEDULING] = "Time-aware scheduling",
[SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
+ [SJA1105_VIRTUAL_LINKS] = "Virtual links",
};
/* For situations where we need to change a setting at runtime that is only
@@ -1696,6 +1840,10 @@ out_unlock_ptp:
sja1105_sgmii_pcs_force_speed(priv, speed);
}
}
+
+ rc = sja1105_reload_cbs(priv);
+ if (rc < 0)
+ goto out;
out:
mutex_unlock(&priv->mgmt_lock);
@@ -1714,6 +1862,154 @@ static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
&mac[port], true);
}
+static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
+ int tree_index, int sw_index,
+ int other_port, struct net_device *br)
+{
+ struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
+ struct sja1105_private *other_priv = other_ds->priv;
+ struct sja1105_private *priv = ds->priv;
+ int port, rc;
+
+ if (other_ds->ops != &sja1105_switch_ops)
+ return 0;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!dsa_is_user_port(ds, port))
+ continue;
+ if (dsa_to_port(ds, port)->bridge_dev != br)
+ continue;
+
+ other_priv->expect_dsa_8021q = true;
+ rc = dsa_8021q_crosschip_bridge_join(ds, port, other_ds,
+ other_port,
+ &priv->crosschip_links);
+ other_priv->expect_dsa_8021q = false;
+ if (rc)
+ return rc;
+
+ priv->expect_dsa_8021q = true;
+ rc = dsa_8021q_crosschip_bridge_join(other_ds, other_port, ds,
+ port,
+ &other_priv->crosschip_links);
+ priv->expect_dsa_8021q = false;
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
+ int tree_index, int sw_index,
+ int other_port,
+ struct net_device *br)
+{
+ struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
+ struct sja1105_private *other_priv = other_ds->priv;
+ struct sja1105_private *priv = ds->priv;
+ int port;
+
+ if (other_ds->ops != &sja1105_switch_ops)
+ return;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!dsa_is_user_port(ds, port))
+ continue;
+ if (dsa_to_port(ds, port)->bridge_dev != br)
+ continue;
+
+ other_priv->expect_dsa_8021q = true;
+ dsa_8021q_crosschip_bridge_leave(ds, port, other_ds, other_port,
+ &priv->crosschip_links);
+ other_priv->expect_dsa_8021q = false;
+
+ priv->expect_dsa_8021q = true;
+ dsa_8021q_crosschip_bridge_leave(other_ds, other_port, ds, port,
+ &other_priv->crosschip_links);
+ priv->expect_dsa_8021q = false;
+ }
+}
+
+static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc, i;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ priv->expect_dsa_8021q = true;
+ rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
+ priv->expect_dsa_8021q = false;
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ dev_info(ds->dev, "%s switch tagging\n",
+ enabled ? "Enabled" : "Disabled");
+ return 0;
+}
+
+static enum dsa_tag_protocol
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_SJA1105;
+}
+
+static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
+{
+ int subvlan;
+
+ if (pvid)
+ return 0;
+
+ for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ if (subvlan_map[subvlan] == VLAN_N_VID)
+ return subvlan;
+
+ return -1;
+}
+
+static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
+{
+ int subvlan;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ if (subvlan_map[subvlan] == vid)
+ return subvlan;
+
+ return -1;
+}
+
+static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
+ int port, u16 vid)
+{
+ struct sja1105_port *sp = &priv->ports[port];
+
+ return sja1105_find_subvlan(sp->subvlan_map, vid);
+}
+
+static void sja1105_init_subvlan_map(u16 *subvlan_map)
+{
+ int subvlan;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ subvlan_map[subvlan] = VLAN_N_VID;
+}
+
+static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
+ u16 *subvlan_map)
+{
+ struct sja1105_port *sp = &priv->ports[port];
+ int subvlan;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ sp->subvlan_map[subvlan] = subvlan_map[subvlan];
+}
+
static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
{
struct sja1105_vlan_lookup_entry *vlan;
@@ -1730,94 +2026,628 @@ static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
return -1;
}
-static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
- bool enabled, bool untagged)
+static int
+sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
+ int count, int from_port, u16 from_vid,
+ u16 to_vid)
{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (retagging[i].ing_port == BIT(from_port) &&
+ retagging[i].vlan_ing == from_vid &&
+ retagging[i].vlan_egr == to_vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_commit_vlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan,
+ struct sja1105_retagging_entry *new_retagging,
+ int num_retagging)
+{
+ struct sja1105_retagging_entry *retagging;
struct sja1105_vlan_lookup_entry *vlan;
struct sja1105_table *table;
- bool keep = true;
- int match, rc;
+ int num_vlans = 0;
+ int rc, i, k = 0;
+ /* VLAN table */
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ vlan = table->entries;
- match = sja1105_is_vlan_configured(priv, vid);
- if (match < 0) {
- /* Can't delete a missing entry. */
- if (!enabled)
- return 0;
- rc = sja1105_table_resize(table, table->entry_count + 1);
+ for (i = 0; i < VLAN_N_VID; i++) {
+ int match = sja1105_is_vlan_configured(priv, i);
+
+ if (new_vlan[i].vlanid != VLAN_N_VID)
+ num_vlans++;
+
+ if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
+ /* Was there before, no longer is. Delete */
+ dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
+ rc = sja1105_dynamic_config_write(priv,
+ BLK_IDX_VLAN_LOOKUP,
+ i, &vlan[match], false);
+ if (rc < 0)
+ return rc;
+ } else if (new_vlan[i].vlanid != VLAN_N_VID) {
+ /* Nothing changed, don't do anything */
+ if (match >= 0 &&
+ vlan[match].vlanid == new_vlan[i].vlanid &&
+ vlan[match].tag_port == new_vlan[i].tag_port &&
+ vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
+ vlan[match].vmemb_port == new_vlan[i].vmemb_port)
+ continue;
+ /* Update entry */
+ dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
+ rc = sja1105_dynamic_config_write(priv,
+ BLK_IDX_VLAN_LOOKUP,
+ i, &new_vlan[i],
+ true);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ if (table->entry_count)
+ kfree(table->entries);
+
+ table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = num_vlans;
+ vlan = table->entries;
+
+ for (i = 0; i < VLAN_N_VID; i++) {
+ if (new_vlan[i].vlanid == VLAN_N_VID)
+ continue;
+ vlan[k++] = new_vlan[i];
+ }
+
+ /* VLAN Retagging Table */
+ table = &priv->static_config.tables[BLK_IDX_RETAGGING];
+ retagging = table->entries;
+
+ for (i = 0; i < table->entry_count; i++) {
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
+ i, &retagging[i], false);
if (rc)
return rc;
- match = table->entry_count - 1;
}
- /* Assign pointer after the resize (it's new memory) */
- vlan = table->entries;
- vlan[match].vlanid = vid;
- if (enabled) {
- vlan[match].vlan_bc |= BIT(port);
- vlan[match].vmemb_port |= BIT(port);
- } else {
- vlan[match].vlan_bc &= ~BIT(port);
- vlan[match].vmemb_port &= ~BIT(port);
+
+ if (table->entry_count)
+ kfree(table->entries);
+
+ table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = num_retagging;
+ retagging = table->entries;
+
+ for (i = 0; i < num_retagging; i++) {
+ retagging[i] = new_retagging[i];
+
+ /* Update entry */
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
+ i, &retagging[i], true);
+ if (rc < 0)
+ return rc;
}
- /* Also unset tag_port if removing this VLAN was requested,
- * just so we don't have a confusing bitmap (no practical purpose).
- */
- if (untagged || !enabled)
- vlan[match].tag_port &= ~BIT(port);
+
+ return 0;
+}
+
+struct sja1105_crosschip_vlan {
+ struct list_head list;
+ u16 vid;
+ bool untagged;
+ int port;
+ int other_port;
+ struct dsa_switch *other_ds;
+};
+
+struct sja1105_crosschip_switch {
+ struct list_head list;
+ struct dsa_switch *other_ds;
+};
+
+static int sja1105_commit_pvid(struct sja1105_private *priv)
+{
+ struct sja1105_bridge_vlan *v;
+ struct list_head *vlan_list;
+ int rc = 0;
+
+ if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ vlan_list = &priv->bridge_vlans;
else
- vlan[match].tag_port |= BIT(port);
- /* If there's no port left as member of this VLAN,
- * it's time for it to go.
- */
- if (!vlan[match].vmemb_port)
- keep = false;
+ vlan_list = &priv->dsa_8021q_vlans;
- dev_dbg(priv->ds->dev,
- "%s: port %d, vid %llu, broadcast domain 0x%llx, "
- "port members 0x%llx, tagged ports 0x%llx, keep %d\n",
- __func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
- vlan[match].vmemb_port, vlan[match].tag_port, keep);
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->pvid) {
+ rc = sja1105_pvid_apply(priv, v->port, v->vid);
+ if (rc)
+ break;
+ }
+ }
- rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
- &vlan[match], keep);
- if (rc < 0)
- return rc;
+ return rc;
+}
+
+static int
+sja1105_build_bridge_vlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan)
+{
+ struct sja1105_bridge_vlan *v;
- if (!keep)
- return sja1105_table_delete_entry(table, match);
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
+ return 0;
+
+ list_for_each_entry(v, &priv->bridge_vlans, list) {
+ int match = v->vid;
+
+ new_vlan[match].vlanid = v->vid;
+ new_vlan[match].vmemb_port |= BIT(v->port);
+ new_vlan[match].vlan_bc |= BIT(v->port);
+ if (!v->untagged)
+ new_vlan[match].tag_port |= BIT(v->port);
+ }
return 0;
}
-static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
+static int
+sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan)
{
- int rc, i;
+ struct sja1105_bridge_vlan *v;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
- i, rc);
- return rc;
+ if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ return 0;
+
+ list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
+ int match = v->vid;
+
+ new_vlan[match].vlanid = v->vid;
+ new_vlan[match].vmemb_port |= BIT(v->port);
+ new_vlan[match].vlan_bc |= BIT(v->port);
+ if (!v->untagged)
+ new_vlan[match].tag_port |= BIT(v->port);
+ }
+
+ return 0;
+}
+
+static int sja1105_build_subvlans(struct sja1105_private *priv,
+ u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
+ struct sja1105_vlan_lookup_entry *new_vlan,
+ struct sja1105_retagging_entry *new_retagging,
+ int *num_retagging)
+{
+ struct sja1105_bridge_vlan *v;
+ int k = *num_retagging;
+
+ if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
+ return 0;
+
+ list_for_each_entry(v, &priv->bridge_vlans, list) {
+ int upstream = dsa_upstream_port(priv->ds, v->port);
+ int match, subvlan;
+ u16 rx_vid;
+
+ /* Only sub-VLANs on user ports need to be applied.
+ * Bridge VLANs also include VLANs added automatically
+ * by DSA on the CPU port.
+ */
+ if (!dsa_is_user_port(priv->ds, v->port))
+ continue;
+
+ subvlan = sja1105_find_subvlan(subvlan_map[v->port],
+ v->vid);
+ if (subvlan < 0) {
+ subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
+ v->pvid);
+ if (subvlan < 0) {
+ dev_err(priv->ds->dev, "No more free subvlans\n");
+ return -ENOSPC;
+ }
+ }
+
+ rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
+
+ /* @v->vid on @v->port needs to be retagged to @rx_vid
+ * on @upstream. Assume @v->vid on @v->port and on
+ * @upstream was already configured by the previous
+ * iteration over bridge_vlans.
+ */
+ match = rx_vid;
+ new_vlan[match].vlanid = rx_vid;
+ new_vlan[match].vmemb_port |= BIT(v->port);
+ new_vlan[match].vmemb_port |= BIT(upstream);
+ new_vlan[match].vlan_bc |= BIT(v->port);
+ new_vlan[match].vlan_bc |= BIT(upstream);
+ /* The "untagged" flag is set the same as for the
+ * original VLAN
+ */
+ if (!v->untagged)
+ new_vlan[match].tag_port |= BIT(v->port);
+ /* But it's always tagged towards the CPU */
+ new_vlan[match].tag_port |= BIT(upstream);
+
+ /* The Retagging Table generates packet *clones* with
+ * the new VLAN. This is a very odd hardware quirk
+ * which we need to suppress by dropping the original
+ * packet.
+ * Deny egress of the original VLAN towards the CPU
+ * port. This will force the switch to drop it, and
+ * we'll see only the retagged packets.
+ */
+ match = v->vid;
+ new_vlan[match].vlan_bc &= ~BIT(upstream);
+
+ /* And the retagging itself */
+ new_retagging[k].vlan_ing = v->vid;
+ new_retagging[k].vlan_egr = rx_vid;
+ new_retagging[k].ing_port = BIT(v->port);
+ new_retagging[k].egr_port = BIT(upstream);
+ if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
+ dev_err(priv->ds->dev, "No more retagging rules\n");
+ return -ENOSPC;
}
+
+ subvlan_map[v->port][subvlan] = v->vid;
}
- dev_info(ds->dev, "%s switch tagging\n",
- enabled ? "Enabled" : "Disabled");
+
+ *num_retagging = k;
+
return 0;
}
-static enum dsa_tag_protocol
-sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
- enum dsa_tag_protocol mp)
+/* Sadly, in crosschip scenarios where the CPU port is also the link to another
+ * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
+ * the CPU port of neighbour switches.
+ */
+static int
+sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
+ struct sja1105_vlan_lookup_entry *new_vlan,
+ struct sja1105_retagging_entry *new_retagging,
+ int *num_retagging)
+{
+ struct sja1105_crosschip_vlan *tmp, *pos;
+ struct dsa_8021q_crosschip_link *c;
+ struct sja1105_bridge_vlan *v, *w;
+ struct list_head crosschip_vlans;
+ int k = *num_retagging;
+ int rc = 0;
+
+ if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
+ return 0;
+
+ INIT_LIST_HEAD(&crosschip_vlans);
+
+ list_for_each_entry(c, &priv->crosschip_links, list) {
+ struct sja1105_private *other_priv = c->other_ds->priv;
+
+ if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ continue;
+
+ /* Crosschip links are also added to the CPU ports.
+ * Ignore those.
+ */
+ if (!dsa_is_user_port(priv->ds, c->port))
+ continue;
+ if (!dsa_is_user_port(c->other_ds, c->other_port))
+ continue;
+
+ /* Search for VLANs on the remote port */
+ list_for_each_entry(v, &other_priv->bridge_vlans, list) {
+ bool already_added = false;
+ bool we_have_it = false;
+
+ if (v->port != c->other_port)
+ continue;
+
+ /* If @v is a pvid on @other_ds, it does not need
+ * re-retagging, because its SVL field is 0 and we
+ * already allow that, via the dsa_8021q crosschip
+ * links.
+ */
+ if (v->pvid)
+ continue;
+
+ /* Search for the VLAN on our local port */
+ list_for_each_entry(w, &priv->bridge_vlans, list) {
+ if (w->port == c->port && w->vid == v->vid) {
+ we_have_it = true;
+ break;
+ }
+ }
+
+ if (!we_have_it)
+ continue;
+
+ list_for_each_entry(tmp, &crosschip_vlans, list) {
+ if (tmp->vid == v->vid &&
+ tmp->untagged == v->untagged &&
+ tmp->port == c->port &&
+ tmp->other_port == v->port &&
+ tmp->other_ds == c->other_ds) {
+ already_added = true;
+ break;
+ }
+ }
+
+ if (already_added)
+ continue;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ dev_err(priv->ds->dev, "Failed to allocate memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ tmp->vid = v->vid;
+ tmp->port = c->port;
+ tmp->other_port = v->port;
+ tmp->other_ds = c->other_ds;
+ tmp->untagged = v->untagged;
+ list_add(&tmp->list, &crosschip_vlans);
+ }
+ }
+
+ list_for_each_entry(tmp, &crosschip_vlans, list) {
+ struct sja1105_private *other_priv = tmp->other_ds->priv;
+ int upstream = dsa_upstream_port(priv->ds, tmp->port);
+ int match, subvlan;
+ u16 rx_vid;
+
+ subvlan = sja1105_find_committed_subvlan(other_priv,
+ tmp->other_port,
+ tmp->vid);
+ /* If this happens, it's a bug. The neighbour switch does not
+ * have a subvlan for tmp->vid on tmp->other_port, but it
+ * should, since we already checked for its vlan_state.
+ */
+ if (WARN_ON(subvlan < 0)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ds,
+ tmp->other_port,
+ subvlan);
+
+ /* The @rx_vid retagged from @tmp->vid on
+ * {@tmp->other_ds, @tmp->other_port} needs to be
+ * re-retagged to @tmp->vid on the way back to us.
+ *
+ * Assume the original @tmp->vid is already configured
+ * on this local switch, otherwise we wouldn't be
+ * retagging its subvlan on the other switch in the
+ * first place. We just need to add a reverse retagging
+ * rule for @rx_vid and install @rx_vid on our ports.
+ */
+ match = rx_vid;
+ new_vlan[match].vlanid = rx_vid;
+ new_vlan[match].vmemb_port |= BIT(tmp->port);
+ new_vlan[match].vmemb_port |= BIT(upstream);
+ /* The "untagged" flag is set the same as for the
+ * original VLAN. And towards the CPU, it doesn't
+ * really matter, because @rx_vid will only receive
+ * traffic on that port. For consistency with other dsa_8021q
+ * VLANs, we'll keep the CPU port tagged.
+ */
+ if (!tmp->untagged)
+ new_vlan[match].tag_port |= BIT(tmp->port);
+ new_vlan[match].tag_port |= BIT(upstream);
+ /* Deny egress of @rx_vid towards our front-panel port.
+ * This will force the switch to drop it, and we'll see
+ * only the re-retagged packets (having the original,
+ * pre-initial-retagging, VLAN @tmp->vid).
+ */
+ new_vlan[match].vlan_bc &= ~BIT(tmp->port);
+
+ /* On reverse retagging, the same ingress VLAN goes to multiple
+ * ports. So we have an opportunity to create composite rules
+ * to not waste the limited space in the retagging table.
+ */
+ k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
+ upstream, rx_vid, tmp->vid);
+ if (k < 0) {
+ if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
+ dev_err(priv->ds->dev, "No more retagging rules\n");
+ rc = -ENOSPC;
+ goto out;
+ }
+ k = (*num_retagging)++;
+ }
+ /* And the retagging itself */
+ new_retagging[k].vlan_ing = rx_vid;
+ new_retagging[k].vlan_egr = tmp->vid;
+ new_retagging[k].ing_port = BIT(upstream);
+ new_retagging[k].egr_port |= BIT(tmp->port);
+ }
+
+out:
+ list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+
+ return rc;
+}
+
+static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
+
+static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
{
- return DSA_TAG_PROTO_SJA1105;
+ struct sja1105_crosschip_switch *s, *pos;
+ struct list_head crosschip_switches;
+ struct dsa_8021q_crosschip_link *c;
+ int rc = 0;
+
+ INIT_LIST_HEAD(&crosschip_switches);
+
+ list_for_each_entry(c, &priv->crosschip_links, list) {
+ bool already_added = false;
+
+ list_for_each_entry(s, &crosschip_switches, list) {
+ if (s->other_ds == c->other_ds) {
+ already_added = true;
+ break;
+ }
+ }
+
+ if (already_added)
+ continue;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ dev_err(priv->ds->dev, "Failed to allocate memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ s->other_ds = c->other_ds;
+ list_add(&s->list, &crosschip_switches);
+ }
+
+ list_for_each_entry(s, &crosschip_switches, list) {
+ struct sja1105_private *other_priv = s->other_ds->priv;
+
+ rc = sja1105_build_vlan_table(other_priv, false);
+ if (rc)
+ goto out;
+ }
+
+out:
+ list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
+ list_del(&s->list);
+ kfree(s);
+ }
+
+ return rc;
+}
+
+static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
+{
+ u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
+ struct sja1105_retagging_entry *new_retagging;
+ struct sja1105_vlan_lookup_entry *new_vlan;
+ struct sja1105_table *table;
+ int i, num_retagging = 0;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ new_vlan = kcalloc(VLAN_N_VID,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!new_vlan)
+ return -ENOMEM;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+ new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!new_retagging) {
+ kfree(new_vlan);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < VLAN_N_VID; i++)
+ new_vlan[i].vlanid = VLAN_N_VID;
+
+ for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
+ new_retagging[i].vlan_ing = VLAN_N_VID;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ sja1105_init_subvlan_map(subvlan_map[i]);
+
+ /* Bridge VLANs */
+ rc = sja1105_build_bridge_vlans(priv, new_vlan);
+ if (rc)
+ goto out;
+
+ /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
+ * - RX VLANs
+ * - TX VLANs
+ * - Crosschip links
+ */
+ rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
+ if (rc)
+ goto out;
+
+ /* Private VLANs necessary for dsa_8021q operation, which we need to
+ * determine on our own:
+ * - Sub-VLANs
+ * - Sub-VLANs of crosschip switches
+ */
+ rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
+ &num_retagging);
+ if (rc)
+ goto out;
+
+ rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
+ &num_retagging);
+ if (rc)
+ goto out;
+
+ rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
+ if (rc)
+ goto out;
+
+ rc = sja1105_commit_pvid(priv);
+ if (rc)
+ goto out;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
+
+ if (notify) {
+ rc = sja1105_notify_crosschip_switches(priv);
+ if (rc)
+ goto out;
+ }
+
+out:
+ kfree(new_vlan);
+ kfree(new_retagging);
+
+ return rc;
+}
+
+/* Select the list to which we should add this VLAN. */
+static struct list_head *sja1105_classify_vlan(struct sja1105_private *priv,
+ u16 vid)
+{
+ if (priv->expect_dsa_8021q)
+ return &priv->dsa_8021q_vlans;
+
+ return &priv->bridge_vlans;
}
-/* This callback needs to be present */
static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
+ struct sja1105_private *priv = ds->priv;
+ u16 vid;
+
+ if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
+ return 0;
+
+ /* If the user wants best-effort VLAN filtering (aka vlan_filtering
+ * bridge plus tagging), be sure to at least deny alterations to the
+ * configuration done by dsa_8021q.
+ */
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (!priv->expect_dsa_8021q && vid_is_dsa_8021q(vid)) {
+ dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
+ return -EBUSY;
+ }
+ }
+
return 0;
}
@@ -1830,10 +2660,21 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
+ enum sja1105_vlan_state state;
struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ bool want_tagging;
u16 tpid, tpid2;
int rc;
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ dev_err(ds->dev,
+ "Cannot change VLAN filtering state while VL rules are active\n");
+ return -EBUSY;
+ }
+ }
+
if (enabled) {
/* Enable VLAN filtering. */
tpid = ETH_P_8021Q;
@@ -1844,6 +2685,29 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
tpid2 = ETH_P_SJA1105;
}
+ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (enabled)
+ sp->xmit_tpid = priv->info->qinq_tpid;
+ else
+ sp->xmit_tpid = ETH_P_SJA1105;
+ }
+
+ if (!enabled)
+ state = SJA1105_VLAN_UNAWARE;
+ else if (priv->best_effort_vlan_filtering)
+ state = SJA1105_VLAN_BEST_EFFORT;
+ else
+ state = SJA1105_VLAN_FILTERING_FULL;
+
+ if (priv->vlan_state == state)
+ return 0;
+
+ priv->vlan_state = state;
+ want_tagging = (state == SJA1105_VLAN_UNAWARE ||
+ state == SJA1105_VLAN_BEST_EFFORT);
+
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
@@ -1856,8 +2720,10 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
+ want_tagging = priv->best_effort_vlan_filtering || !enabled;
+
/* VLAN filtering => independent VLAN learning.
- * No VLAN filtering => shared VLAN learning.
+ * No VLAN filtering (or best effort) => shared VLAN learning.
*
* In shared VLAN learning mode, untagged traffic still gets
* pvid-tagged, and the FDB table gets populated with entries
@@ -1876,7 +2742,13 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
*/
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !enabled;
+ l2_lookup_params->shared_learn = want_tagging;
+
+ sja1105_frame_memory_partitioning(priv);
+
+ rc = sja1105_build_vlan_table(priv, false);
+ if (rc)
+ return rc;
rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
if (rc)
@@ -1884,56 +2756,191 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
/* Switch port identification based on 802.1Q is only passable
* if we are not under a vlan_filtering bridge. So make sure
- * the two configurations are mutually exclusive.
+ * the two configurations are mutually exclusive (of course, the
+ * user may know better, i.e. best_effort_vlan_filtering).
*/
- return sja1105_setup_8021q_tagging(ds, !enabled);
+ return sja1105_setup_8021q_tagging(ds, want_tagging);
}
static void sja1105_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct sja1105_private *priv = ds->priv;
+ bool vlan_table_changed = false;
u16 vid;
int rc;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
- BRIDGE_VLAN_INFO_UNTAGGED);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
- vid, port, rc);
- return;
- }
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
- rc = sja1105_pvid_apply(ds->priv, port, vid);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
- vid, port, rc);
- return;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct sja1105_bridge_vlan *v;
+ struct list_head *vlan_list;
+ bool already_added = false;
+
+ vlan_list = sja1105_classify_vlan(priv, vid);
+
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->port == port && v->vid == vid &&
+ v->untagged == untagged && v->pvid == pvid) {
+ already_added = true;
+ break;
}
}
+
+ if (already_added)
+ continue;
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ dev_err(ds->dev, "Out of memory while storing VLAN\n");
+ return;
+ }
+
+ v->port = port;
+ v->vid = vid;
+ v->untagged = untagged;
+ v->pvid = pvid;
+ list_add(&v->list, vlan_list);
+
+ vlan_table_changed = true;
}
+
+ if (!vlan_table_changed)
+ return;
+
+ rc = sja1105_build_vlan_table(priv, true);
+ if (rc)
+ dev_err(ds->dev, "Failed to build VLAN table: %d\n", rc);
}
static int sja1105_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct sja1105_private *priv = ds->priv;
+ bool vlan_table_changed = false;
u16 vid;
- int rc;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
- BRIDGE_VLAN_INFO_UNTAGGED);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
- vid, port, rc);
- return rc;
+ struct sja1105_bridge_vlan *v, *n;
+ struct list_head *vlan_list;
+
+ vlan_list = sja1105_classify_vlan(priv, vid);
+
+ list_for_each_entry_safe(v, n, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
+ list_del(&v->list);
+ kfree(v);
+ vlan_table_changed = true;
+ break;
+ }
}
}
+
+ if (!vlan_table_changed)
+ return 0;
+
+ return sja1105_build_vlan_table(priv, true);
+}
+
+static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
+ bool *be_vlan)
+{
+ *be_vlan = priv->best_effort_vlan_filtering;
+
return 0;
}
+static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
+ bool be_vlan)
+{
+ struct dsa_switch *ds = priv->ds;
+ bool vlan_filtering;
+ int port;
+ int rc;
+
+ priv->best_effort_vlan_filtering = be_vlan;
+
+ rtnl_lock();
+ for (port = 0; port < ds->num_ports; port++) {
+ struct dsa_port *dp;
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ dp = dsa_to_port(ds, port);
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering);
+ if (rc)
+ break;
+ }
+ rtnl_unlock();
+
+ return rc;
+}
+
+enum sja1105_devlink_param_id {
+ SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+};
+
+static int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_get(priv,
+ &ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_set(priv,
+ ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct devlink_param sja1105_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+ "best_effort_vlan_filtering",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+static int sja1105_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
+static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
/* The programming model for the SJA1105 switch is "all-at-once" via static
* configuration tables. Some of these can be dynamically modified at runtime,
* but not the xMII mode parameters table.
@@ -1999,6 +3006,12 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
+ ds->configure_vlan_while_not_filtering = true;
+
+ rc = sja1105_setup_devlink_params(ds);
+ if (rc < 0)
+ return rc;
+
/* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under
* a bridge, so it's safe to set up switch tagging at this time.
@@ -2009,6 +3022,7 @@ static int sja1105_setup(struct dsa_switch *ds)
static void sja1105_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
+ struct sja1105_bridge_vlan *v, *n;
int port;
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
@@ -2021,10 +3035,21 @@ static void sja1105_teardown(struct dsa_switch *ds)
kthread_destroy_worker(sp->xmit_worker);
}
+ sja1105_teardown_devlink_params(ds);
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
sja1105_static_config_free(&priv->static_config);
+
+ list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
+ list_del(&v->list);
+ kfree(v);
+ }
+
+ list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
+ list_del(&v->list);
+ kfree(v);
+ }
}
static int sja1105_port_enable(struct dsa_switch *ds, int port,
@@ -2200,6 +3225,8 @@ static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return sja1105_setup_tc_taprio(ds, port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return sja1105_setup_tc_cbs(ds, port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -2359,6 +3386,11 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_policer_del = sja1105_port_policer_del,
.cls_flower_add = sja1105_cls_flower_add,
.cls_flower_del = sja1105_cls_flower_del,
+ .cls_flower_stats = sja1105_cls_flower_stats,
+ .crosschip_bridge_join = sja1105_crosschip_bridge_join,
+ .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
+ .devlink_param_get = sja1105_devlink_param_get,
+ .devlink_param_set = sja1105_devlink_param_set,
};
static int sja1105_check_device_id(struct sja1105_private *priv)
@@ -2461,6 +3493,10 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->mgmt_lock);
+ INIT_LIST_HEAD(&priv->crosschip_links);
+ INIT_LIST_HEAD(&priv->bridge_vlans);
+ INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
+
sja1105_tas_setup(ds);
sja1105_flower_setup(ds);
@@ -2468,11 +3504,20 @@ static int sja1105_probe(struct spi_device *spi)
if (rc)
return rc;
+ if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
+ priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
+ sizeof(struct sja1105_cbs_entry),
+ GFP_KERNEL);
+ if (!priv->cbs)
+ return -ENOMEM;
+ }
+
/* Connections between dsa_port and sja1105_port */
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
struct sja1105_port *sp = &priv->ports[port];
struct dsa_port *dp = dsa_to_port(ds, port);
struct net_device *slave;
+ int subvlan;
if (!dsa_is_user_port(ds, port))
continue;
@@ -2492,6 +3537,10 @@ static int sja1105_probe(struct spi_device *spi)
goto out;
}
skb_queue_head_init(&sp->xmit_queue);
+ sp->xmit_tpid = ETH_P_SJA1105;
+
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
+ sp->subvlan_map[subvlan] = VLAN_N_VID;
}
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 43480b24f1f0..6408d1158f2d 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -48,6 +48,19 @@ static inline s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
return base_time + n * cycle_time;
}
+/* This is not a preprocessor macro because the "ns" argument may or may not be
+ * s64 at caller side. This ensures it is properly type-cast before div_s64.
+ */
+static inline s64 ns_to_sja1105_delta(s64 ns)
+{
+ return div_s64(ns, 200);
+}
+
+static inline s64 sja1105_delta_to_ns(s64 delta)
+{
+ return delta * 200;
+}
+
struct sja1105_ptp_cmd {
u64 startptpcp; /* start toggling PTP_CLK pin */
u64 stopptpcp; /* stop toggling PTP_CLK pin */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 04bdb72ae6b6..bb52b9c841b2 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -439,10 +439,12 @@ static struct sja1105_regs sja1105et_regs = {
.prod_id = 0x100BC3,
.status = 0x1,
.port_control = 0x11,
+ .vl_status = 0x10000,
.config = 0x020000,
.rgu = 0x100440,
/* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
@@ -471,10 +473,12 @@ static struct sja1105_regs sja1105pqrs_regs = {
.prod_id = 0x100BC3,
.status = 0x1,
.port_control = 0x12,
+ .vl_status = 0x10000,
.config = 0x020000,
.rgu = 0x100440,
/* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
.pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
.sgmii = 0x1F0000,
.rmii_pll1 = 0x10000A,
@@ -508,8 +512,10 @@ struct sja1105_info sja1105e_info = {
.part_no = SJA1105ET_PART_NO,
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
+ .qinq_tpid = ETH_P_8021Q,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
+ .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
@@ -522,8 +528,10 @@ struct sja1105_info sja1105t_info = {
.part_no = SJA1105ET_PART_NO,
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
+ .qinq_tpid = ETH_P_8021Q,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
+ .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
@@ -536,8 +544,10 @@ struct sja1105_info sja1105p_info = {
.part_no = SJA1105P_PART_NO,
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
+ .qinq_tpid = ETH_P_8021AD,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
@@ -551,8 +561,10 @@ struct sja1105_info sja1105q_info = {
.part_no = SJA1105Q_PART_NO,
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
+ .qinq_tpid = ETH_P_8021AD,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
@@ -566,8 +578,10 @@ struct sja1105_info sja1105r_info = {
.part_no = SJA1105R_PART_NO,
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
+ .qinq_tpid = ETH_P_8021AD,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
@@ -582,8 +596,10 @@ struct sja1105_info sja1105s_info = {
.static_ops = sja1105s_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
+ .qinq_tpid = ETH_P_8021AD,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index bbfe034910a0..ff3fe471efc2 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -146,9 +146,8 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
/* TPID and TPID2 are intentionally reversed so that semantic
* compatibility with E/T is kept.
*/
-static size_t
-sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op)
+size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
struct sja1105_general_params_entry *entry = entry_ptr;
@@ -228,9 +227,8 @@ sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
-static size_t
-sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op)
+size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
@@ -432,6 +430,84 @@ static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t
+sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 16; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->partspc[i],
+ offset + 9, offset + 0, size, op);
+ sja1105_packing(buf, &entry->debugen, 15, 15, size, op);
+ return size;
+}
+
+static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 31, 31, size, op);
+ sja1105_packing(buf, &entry->priority, 30, 28, size, op);
+ sja1105_packing(buf, &entry->partition, 27, 25, size, op);
+ sja1105_packing(buf, &entry->destports, 24, 20, size, op);
+ return size;
+}
+
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
+
+ if (entry->format == SJA1105_VL_FORMAT_PSFP) {
+ /* Interpreting vllupformat as 0 */
+ sja1105_packing(buf, &entry->destports,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->iscritical,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->macaddr,
+ 89, 42, size, op);
+ sja1105_packing(buf, &entry->vlanid,
+ 41, 30, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ sja1105_packing(buf, &entry->vlanprior,
+ 26, 24, size, op);
+ } else {
+ /* Interpreting vllupformat as 1 */
+ sja1105_packing(buf, &entry->egrmirr,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->ingrmirr,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->vlid,
+ 57, 42, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ }
+ return size;
+}
+
+static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 63, 63, size, op);
+ sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
+ sja1105_packing(buf, &entry->sharindx, 51, 42, size, op);
+ if (entry->type == 0) {
+ sja1105_packing(buf, &entry->bag, 41, 28, size, op);
+ sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
+ }
+ return size;
+}
+
size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -463,6 +539,22 @@ static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_retagging_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
+
+ sja1105_packing(buf, &entry->egr_port, 63, 59, size, op);
+ sja1105_packing(buf, &entry->ing_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vlan_ing, 53, 42, size, op);
+ sja1105_packing(buf, &entry->vlan_egr, 41, 30, size, op);
+ sja1105_packing(buf, &entry->do_not_learn, 29, 29, size, op);
+ sja1105_packing(buf, &entry->use_dest_ports, 28, 28, size, op);
+ sja1105_packing(buf, &entry->destports, 27, 23, size, op);
+ return size;
+}
+
size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -510,6 +602,9 @@ static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr)
static u64 blk_id_map[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = BLKID_SCHEDULE,
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = BLKID_SCHEDULE_ENTRY_POINTS,
+ [BLK_IDX_VL_LOOKUP] = BLKID_VL_LOOKUP,
+ [BLK_IDX_VL_POLICING] = BLKID_VL_POLICING,
+ [BLK_IDX_VL_FORWARDING] = BLKID_VL_FORWARDING,
[BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP,
[BLK_IDX_L2_POLICING] = BLKID_L2_POLICING,
[BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP,
@@ -517,10 +612,12 @@ static u64 blk_id_map[BLK_IDX_MAX] = {
[BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
[BLK_IDX_SCHEDULE_PARAMS] = BLKID_SCHEDULE_PARAMS,
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = BLKID_SCHEDULE_ENTRY_POINTS_PARAMS,
+ [BLK_IDX_VL_FORWARDING_PARAMS] = BLKID_VL_FORWARDING_PARAMS,
[BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
[BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
[BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS,
[BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
+ [BLK_IDX_RETAGGING] = BLKID_RETAGGING,
[BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
};
@@ -533,6 +630,9 @@ const char *sja1105_static_config_error_msg[] = {
"schedule-table present, but one of "
"schedule-entry-points-table, schedule-parameters-table or "
"schedule-entry-points-parameters table is empty",
+ [SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION] =
+ "vl-lookup-table present, but one of vl-policing-table, "
+ "vl-forwarding-table or vl-forwarding-parameters-table is empty",
[SJA1105_MISSING_L2_POLICING_TABLE] =
"l2-policing-table needs to have at least one entry",
[SJA1105_MISSING_L2_FORWARDING_TABLE] =
@@ -560,14 +660,26 @@ static sja1105_config_valid_t
static_config_check_memory_size(const struct sja1105_table *tables)
{
const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
- int i, mem = 0;
+ const struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ int i, max_mem, mem = 0;
l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
for (i = 0; i < 8; i++)
mem += l2_fwd_params->part_spc[i];
- if (mem > SJA1105_MAX_FRAME_MEMORY)
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count) {
+ vl_fwd_params = tables[BLK_IDX_VL_FORWARDING_PARAMS].entries;
+ for (i = 0; i < 8; i++)
+ mem += vl_fwd_params->partspc[i];
+ }
+
+ if (tables[BLK_IDX_RETAGGING].entry_count)
+ max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
+ else
+ max_mem = SJA1105_MAX_FRAME_MEMORY;
+
+ if (mem > max_mem)
return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
return SJA1105_CONFIG_OK;
@@ -594,6 +706,32 @@ sja1105_static_config_check_valid(const struct sja1105_static_config *config)
if (!IS_FULL(BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS))
return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
}
+ if (tables[BLK_IDX_VL_LOOKUP].entry_count) {
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool has_critical_links = false;
+ int i;
+
+ vl_lookup = tables[BLK_IDX_VL_LOOKUP].entries;
+
+ for (i = 0; i < tables[BLK_IDX_VL_LOOKUP].entry_count; i++) {
+ if (vl_lookup[i].iscritical) {
+ has_critical_links = true;
+ break;
+ }
+ }
+
+ if (tables[BLK_IDX_VL_POLICING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+ }
if (tables[BLK_IDX_L2_POLICING].entry_count == 0)
return SJA1105_MISSING_L2_POLICING_TABLE;
@@ -703,6 +841,9 @@ sja1105_static_config_get_length(const struct sja1105_static_config *config)
struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {0},
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105et_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -735,6 +876,7 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105et_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -759,6 +901,12 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -781,6 +929,24 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105et_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -823,6 +989,12 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105et_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -847,6 +1019,12 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -859,6 +1037,9 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {0},
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -891,6 +1072,7 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -915,6 +1097,12 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -937,6 +1125,24 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -979,6 +1185,12 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -1003,6 +1215,12 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -1015,6 +1233,9 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+ [BLK_IDX_VL_LOOKUP] = {0},
+ [BLK_IDX_VL_POLICING] = {0},
+ [BLK_IDX_VL_FORWARDING] = {0},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -1047,6 +1268,7 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
},
[BLK_IDX_SCHEDULE_PARAMS] = {0},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -1071,6 +1293,12 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
@@ -1093,6 +1321,24 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -1135,6 +1381,12 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -1159,6 +1411,12 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index 8afafb6aef12..ee0f10062763 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -13,28 +13,38 @@
#define SJA1105_SIZE_TABLE_HEADER 12
#define SJA1105_SIZE_SCHEDULE_ENTRY 8
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 4
+#define SJA1105_SIZE_VL_LOOKUP_ENTRY 12
+#define SJA1105_SIZE_VL_POLICING_ENTRY 8
+#define SJA1105_SIZE_VL_FORWARDING_ENTRY 4
#define SJA1105_SIZE_L2_POLICING_ENTRY 8
#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105_SIZE_RETAGGING_ENTRY 8
#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
#define SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY 12
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY 4
+#define SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY 12
#define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12
#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
#define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40
#define SJA1105ET_SIZE_AVB_PARAMS_ENTRY 12
+#define SJA1105ET_SIZE_CBS_ENTRY 16
#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
#define SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY 16
+#define SJA1105PQRS_SIZE_CBS_ENTRY 20
/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
enum {
BLKID_SCHEDULE = 0x00,
BLKID_SCHEDULE_ENTRY_POINTS = 0x01,
+ BLKID_VL_LOOKUP = 0x02,
+ BLKID_VL_POLICING = 0x03,
+ BLKID_VL_FORWARDING = 0x04,
BLKID_L2_LOOKUP = 0x05,
BLKID_L2_POLICING = 0x06,
BLKID_VLAN_LOOKUP = 0x07,
@@ -42,16 +52,22 @@ enum {
BLKID_MAC_CONFIG = 0x09,
BLKID_SCHEDULE_PARAMS = 0x0A,
BLKID_SCHEDULE_ENTRY_POINTS_PARAMS = 0x0B,
+ BLKID_VL_FORWARDING_PARAMS = 0x0C,
BLKID_L2_LOOKUP_PARAMS = 0x0D,
BLKID_L2_FORWARDING_PARAMS = 0x0E,
BLKID_AVB_PARAMS = 0x10,
BLKID_GENERAL_PARAMS = 0x11,
+ BLKID_RETAGGING = 0x12,
+ BLKID_CBS = 0x13,
BLKID_XMII_PARAMS = 0x4E,
};
enum sja1105_blk_idx {
BLK_IDX_SCHEDULE = 0,
BLK_IDX_SCHEDULE_ENTRY_POINTS,
+ BLK_IDX_VL_LOOKUP,
+ BLK_IDX_VL_POLICING,
+ BLK_IDX_VL_FORWARDING,
BLK_IDX_L2_LOOKUP,
BLK_IDX_L2_POLICING,
BLK_IDX_VLAN_LOOKUP,
@@ -59,10 +75,13 @@ enum sja1105_blk_idx {
BLK_IDX_MAC_CONFIG,
BLK_IDX_SCHEDULE_PARAMS,
BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS,
+ BLK_IDX_VL_FORWARDING_PARAMS,
BLK_IDX_L2_LOOKUP_PARAMS,
BLK_IDX_L2_FORWARDING_PARAMS,
BLK_IDX_AVB_PARAMS,
BLK_IDX_GENERAL_PARAMS,
+ BLK_IDX_RETAGGING,
+ BLK_IDX_CBS,
BLK_IDX_XMII_PARAMS,
BLK_IDX_MAX,
/* Fake block indices that are only valid for dynamic access */
@@ -73,6 +92,9 @@ enum sja1105_blk_idx {
#define SJA1105_MAX_SCHEDULE_COUNT 1024
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT 2048
+#define SJA1105_MAX_VL_LOOKUP_COUNT 1024
+#define SJA1105_MAX_VL_POLICING_COUNT 1024
+#define SJA1105_MAX_VL_FORWARDING_COUNT 1024
#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
#define SJA1105_MAX_L2_POLICING_COUNT 45
#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
@@ -80,13 +102,19 @@ enum sja1105_blk_idx {
#define SJA1105_MAX_MAC_CONFIG_COUNT 5
#define SJA1105_MAX_SCHEDULE_PARAMS_COUNT 1
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT 1
+#define SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT 1
#define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1
#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
+#define SJA1105_MAX_RETAGGING_COUNT 32
#define SJA1105_MAX_XMII_PARAMS_COUNT 1
#define SJA1105_MAX_AVB_PARAMS_COUNT 1
+#define SJA1105ET_MAX_CBS_COUNT 10
+#define SJA1105PQRS_MAX_CBS_COUNT 16
#define SJA1105_MAX_FRAME_MEMORY 929
+#define SJA1105_MAX_FRAME_MEMORY_RETAGGING 910
+#define SJA1105_VL_FRAME_MEMORY 100
#define SJA1105E_DEVICE_ID 0x9C00000Cull
#define SJA1105T_DEVICE_ID 0x9E00030Eull
@@ -257,11 +285,78 @@ struct sja1105_mac_config_entry {
u64 ingress;
};
+struct sja1105_retagging_entry {
+ u64 egr_port;
+ u64 ing_port;
+ u64 vlan_ing;
+ u64 vlan_egr;
+ u64 do_not_learn;
+ u64 use_dest_ports;
+ u64 destports;
+};
+
+struct sja1105_cbs_entry {
+ u64 port;
+ u64 prio;
+ u64 credit_hi;
+ u64 credit_lo;
+ u64 send_slope;
+ u64 idle_slope;
+};
+
struct sja1105_xmii_params_entry {
u64 phy_mac[5];
u64 xmii_mode[5];
};
+enum {
+ SJA1105_VL_FORMAT_PSFP = 0,
+ SJA1105_VL_FORMAT_ARINC664 = 1,
+};
+
+struct sja1105_vl_lookup_entry {
+ u64 format;
+ u64 port;
+ union {
+ /* SJA1105_VL_FORMAT_PSFP */
+ struct {
+ u64 destports;
+ u64 iscritical;
+ u64 macaddr;
+ u64 vlanid;
+ u64 vlanprior;
+ };
+ /* SJA1105_VL_FORMAT_ARINC664 */
+ struct {
+ u64 egrmirr;
+ u64 ingrmirr;
+ u64 vlid;
+ };
+ };
+ /* Not part of hardware structure */
+ unsigned long flow_cookie;
+};
+
+struct sja1105_vl_policing_entry {
+ u64 type;
+ u64 maxlen;
+ u64 sharindx;
+ u64 bag;
+ u64 jitter;
+};
+
+struct sja1105_vl_forwarding_entry {
+ u64 type;
+ u64 priority;
+ u64 partition;
+ u64 destports;
+};
+
+struct sja1105_vl_forwarding_params_entry {
+ u64 partspc[8];
+ u64 debugen;
+};
+
struct sja1105_table_header {
u64 block_id;
u64 len;
@@ -303,6 +398,7 @@ typedef enum {
SJA1105_CONFIG_OK = 0,
SJA1105_TTETHERNET_NOT_SUPPORTED,
SJA1105_INCORRECT_TTETHERNET_CONFIGURATION,
+ SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION,
SJA1105_MISSING_L2_POLICING_TABLE,
SJA1105_MISSING_L2_FORWARDING_TABLE,
SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE,
@@ -334,4 +430,26 @@ void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len);
void sja1105_packing(void *buf, u64 *val, int start, int end,
size_t len, enum packing_op op);
+/* Common implementations for the static and dynamic configs */
+size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+
#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index 77e547b4cd89..3aa1a8b5f766 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -7,7 +7,6 @@
#define SJA1105_TAS_CLKSRC_STANDALONE 1
#define SJA1105_TAS_CLKSRC_AS6802 2
#define SJA1105_TAS_CLKSRC_PTP 3
-#define SJA1105_TAS_MAX_DELTA BIT(19)
#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
#define work_to_sja1105_tas(d) \
@@ -15,22 +14,10 @@
#define tas_to_sja1105(d) \
container_of((d), struct sja1105_private, tas_data)
-/* This is not a preprocessor macro because the "ns" argument may or may not be
- * s64 at caller side. This ensures it is properly type-cast before div_s64.
- */
-static s64 ns_to_sja1105_delta(s64 ns)
-{
- return div_s64(ns, 200);
-}
-
-static s64 sja1105_delta_to_ns(s64 delta)
-{
- return delta * 200;
-}
-
static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
{
struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
struct dsa_switch *ds = priv->ds;
s64 earliest_base_time = S64_MAX;
s64 latest_base_time = 0;
@@ -59,6 +46,19 @@ static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
}
}
+ if (!list_empty(&gating_cfg->entries)) {
+ tas_data->enabled = true;
+
+ if (max_cycle_time < gating_cfg->cycle_time)
+ max_cycle_time = gating_cfg->cycle_time;
+ if (latest_base_time < gating_cfg->base_time)
+ latest_base_time = gating_cfg->base_time;
+ if (earliest_base_time > gating_cfg->base_time) {
+ earliest_base_time = gating_cfg->base_time;
+ its_cycle_time = gating_cfg->cycle_time;
+ }
+ }
+
if (!tas_data->enabled)
return 0;
@@ -155,13 +155,14 @@ static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
* their "subschedule end index" (subscheind) equal to the last valid
* subschedule's end index (in this case 5).
*/
-static int sja1105_init_scheduling(struct sja1105_private *priv)
+int sja1105_init_scheduling(struct sja1105_private *priv)
{
struct sja1105_schedule_entry_points_entry *schedule_entry_points;
struct sja1105_schedule_entry_points_params_entry
*schedule_entry_points_params;
struct sja1105_schedule_params_entry *schedule_params;
struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
struct sja1105_schedule_entry *schedule;
struct sja1105_table *table;
int schedule_start_idx;
@@ -213,6 +214,11 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
}
}
+ if (!list_empty(&gating_cfg->entries)) {
+ num_entries += gating_cfg->num_entries;
+ num_cycles++;
+ }
+
/* Nothing to do */
if (!num_cycles)
return 0;
@@ -312,6 +318,42 @@ static int sja1105_init_scheduling(struct sja1105_private *priv)
cycle++;
}
+ if (!list_empty(&gating_cfg->entries)) {
+ struct sja1105_gate_entry *e;
+
+ /* Relative base time */
+ s64 rbt;
+
+ schedule_start_idx = k;
+ schedule_end_idx = k + gating_cfg->num_entries - 1;
+ rbt = future_base_time(gating_cfg->base_time,
+ gating_cfg->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
+
+ schedule_entry_points[cycle].subschindx = cycle;
+ schedule_entry_points[cycle].delta = entry_point_delta;
+ schedule_entry_points[cycle].address = schedule_start_idx;
+
+ for (i = cycle; i < 8; i++)
+ schedule_params->subscheind[i] = schedule_end_idx;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ schedule[k].delta = ns_to_sja1105_delta(e->interval);
+ schedule[k].destports = e->rule->vl.destports;
+ schedule[k].setvalid = true;
+ schedule[k].txen = true;
+ schedule[k].vlindex = e->rule->vl.sharindx;
+ schedule[k].winstindex = e->rule->vl.sharindx;
+ if (e->gate_state) /* Gate open */
+ schedule[k].winst = true;
+ else /* Gate closed */
+ schedule[k].winend = true;
+ k++;
+ }
+ }
+
return 0;
}
@@ -415,6 +457,54 @@ sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
return false;
}
+/* Check the tc-taprio configuration on @port for conflicts with the tc-gate
+ * global subschedule. If @port is -1, check it against all ports.
+ * To reuse the sja1105_tas_check_conflicts logic without refactoring it,
+ * convert the gating configuration to a dummy tc-taprio offload structure.
+ */
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ size_t num_entries = gating_cfg->num_entries;
+ struct tc_taprio_qopt_offload *dummy;
+ struct sja1105_gate_entry *e;
+ bool conflict;
+ int i = 0;
+
+ if (list_empty(&gating_cfg->entries))
+ return false;
+
+ dummy = kzalloc(sizeof(struct tc_taprio_sched_entry) * num_entries +
+ sizeof(struct tc_taprio_qopt_offload), GFP_KERNEL);
+ if (!dummy) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
+ return true;
+ }
+
+ dummy->num_entries = num_entries;
+ dummy->base_time = gating_cfg->base_time;
+ dummy->cycle_time = gating_cfg->cycle_time;
+
+ list_for_each_entry(e, &gating_cfg->entries, list)
+ dummy->entries[i++].interval = e->interval;
+
+ if (port != -1) {
+ conflict = sja1105_tas_check_conflicts(priv, port, dummy);
+ } else {
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ conflict = sja1105_tas_check_conflicts(priv, port,
+ dummy);
+ if (conflict)
+ break;
+ }
+ }
+
+ kfree(dummy);
+
+ return conflict;
+}
+
int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
struct tc_taprio_qopt_offload *admin)
{
@@ -473,6 +563,11 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
return -ERANGE;
}
+ if (sja1105_gating_check_conflicts(priv, port, NULL)) {
+ dev_err(ds->dev, "Conflict with tc-gate schedule\n");
+ return -ERANGE;
+ }
+
tas_data->offload[port] = taprio_offload_get(admin);
rc = sja1105_init_scheduling(priv);
@@ -779,6 +874,8 @@ void sja1105_tas_setup(struct dsa_switch *ds)
INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
tas_data->state = SJA1105_TAS_STATE_DISABLED;
tas_data->last_op = SJA1105_PTP_NONE;
+
+ INIT_LIST_HEAD(&tas_data->gating_cfg.entries);
}
void sja1105_tas_teardown(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
index b226c3dfd5b1..0c173ff51751 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.h
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -6,6 +6,10 @@
#include <net/pkt_sched.h>
+#define SJA1105_TAS_MAX_DELTA BIT(18)
+
+struct sja1105_private;
+
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS)
enum sja1105_tas_state {
@@ -20,8 +24,23 @@ enum sja1105_ptp_op {
SJA1105_PTP_ADJUSTFREQ,
};
+struct sja1105_gate_entry {
+ struct list_head list;
+ struct sja1105_rule *rule;
+ s64 interval;
+ u8 gate_state;
+};
+
+struct sja1105_gating_config {
+ u64 cycle_time;
+ s64 base_time;
+ int num_entries;
+ struct list_head entries;
+};
+
struct sja1105_tas_data {
struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS];
+ struct sja1105_gating_config gating_cfg;
enum sja1105_tas_state state;
enum sja1105_ptp_op last_op;
struct work_struct tas_work;
@@ -42,6 +61,11 @@ void sja1105_tas_clockstep(struct dsa_switch *ds);
void sja1105_tas_adjfreq(struct dsa_switch *ds);
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack);
+
+int sja1105_init_scheduling(struct sja1105_private *priv);
+
#else
/* C doesn't allow empty structures, bah! */
@@ -63,6 +87,18 @@ static inline void sja1105_tas_clockstep(struct dsa_switch *ds) { }
static inline void sja1105_tas_adjfreq(struct dsa_switch *ds) { }
+static inline bool
+sja1105_gating_check_conflicts(struct dsa_switch *ds, int port,
+ struct netlink_ext_ack *extack)
+{
+ return true;
+}
+
+static inline int sja1105_init_scheduling(struct sja1105_private *priv)
+{
+ return 0;
+}
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */
#endif /* _SJA1105_TAS_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
new file mode 100644
index 000000000000..bdfd6c4e190d
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020, NXP Semiconductors
+ */
+#include <net/tc_act/tc_gate.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105_vl.h"
+
+#define SJA1105_SIZE_VL_STATUS 8
+
+/* The switch flow classification core implements TTEthernet, which 'thinks' in
+ * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
+ * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
+ * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
+ * (Per-Stream Filtering and Policing), which is what the driver is going to be
+ * implementing.
+ *
+ * VL Lookup
+ * Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
+ * && VLAN PCP | | VLMARKER)
+ * && INGRESS PORT} +---------+ (both fixed)
+ * (exact match, | && DMAC[15:0] == VLID
+ * all specified in rule) | (specified in rule)
+ * v && INGRESS PORT }
+ * ------------
+ * 0 (PSFP) / \ 1 (ARINC664)
+ * +-----------/ VLLUPFORMAT \----------+
+ * | \ (fixed) / |
+ * | \ / |
+ * 0 (forwarding) v ------------ |
+ * ------------ |
+ * / \ 1 (QoS classification) |
+ * +---/ ISCRITICAL \-----------+ |
+ * | \ (per rule) / | |
+ * | \ / VLID taken from VLID taken from
+ * v ------------ index of rule contents of rule
+ * select that matched that matched
+ * DESTPORTS | |
+ * | +---------+--------+
+ * | |
+ * | v
+ * | VL Forwarding
+ * | (indexed by VLID)
+ * | +---------+
+ * | +--------------| |
+ * | | select TYPE +---------+
+ * | v
+ * | 0 (rate ------------ 1 (time
+ * | constrained) / \ triggered)
+ * | +------/ TYPE \------------+
+ * | | \ (per VLID) / |
+ * | v \ / v
+ * | VL Policing ------------ VL Policing
+ * | (indexed by VLID) (indexed by VLID)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select SHARINDX select SHARINDX to
+ * | to rate-limit re-enter VL Forwarding
+ * | groups of VL's with new VLID for egress
+ * | to same quota |
+ * | | |
+ * | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
+ * | | |
+ * | v v
+ * | VL Forwarding VL Forwarding
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select PRIORITY, select PRIORITY,
+ * | PARTITION, DESTPORTS PARTITION, DESTPORTS
+ * | | |
+ * | v v
+ * | VL Policing VL Policing
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | | |
+ * | v |
+ * | select BAG, -> exceed => drop |
+ * | JITTER v
+ * | | ----------------------------------------------
+ * | | / Reception Window is open for this VL \
+ * | | / (the Schedule Table executes an entry i \
+ * | | / M <= i < N, for which these conditions hold): \ no
+ * | | +----/ \-+
+ * | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
+ * | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
+ * | | | \ / |
+ * | | | \ (the VL window has opened and not yet closed)/ |
+ * | | | ---------------------------------------------- |
+ * | | v v
+ * | | dispatch to DESTPORTS when the Schedule Table drop
+ * | | executes an entry i with TXEN == 1 && VLINDEX == i
+ * v v
+ * dispatch immediately to DESTPORTS
+ *
+ * The per-port classification key is always composed of {DMAC, VID, PCP} and
+ * is non-maskable. This 'looks like' the NULL stream identification function
+ * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
+ * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
+ * ID and PCP, and then the port-based defaults will be used.
+ *
+ * In TTEthernet, routing is something that needs to be done manually for each
+ * Virtual Link. So the flow action must always include one of:
+ * a. 'redirect', 'trap' or 'drop': select the egress port list
+ * Additionally, the following actions may be applied on a Virtual Link,
+ * turning it into 'critical' traffic:
+ * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
+ * given by the maximum frame length, bandwidth allocation gap (BAG) and
+ * maximum jitter.
+ * c. 'gate': turn it into a time-triggered VL, which can be only be received
+ * and forwarded according to a given schedule.
+ */
+
+static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
+ struct sja1105_vl_lookup_entry *b)
+{
+ if (a->macaddr < b->macaddr)
+ return true;
+ if (a->macaddr > b->macaddr)
+ return false;
+ if (a->vlanid < b->vlanid)
+ return true;
+ if (a->vlanid > b->vlanid)
+ return false;
+ if (a->port < b->port)
+ return true;
+ if (a->port > b->port)
+ return false;
+ if (a->vlanprior < b->vlanprior)
+ return true;
+ if (a->vlanprior > b->vlanprior)
+ return false;
+ /* Keys are equal */
+ return false;
+}
+
+static int sja1105_init_virtual_links(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_vl_policing_entry *vl_policing;
+ struct sja1105_vl_forwarding_entry *vl_fwd;
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool have_critical_virtual_links = false;
+ struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ int num_virtual_links = 0;
+ int max_sharindx = 0;
+ int i, j, k;
+
+ /* Figure out the dimensioning of the problem */
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ /* Each VL lookup entry matches on a single ingress port */
+ num_virtual_links += hweight_long(rule->port_mask);
+
+ if (rule->vl.type != SJA1105_VL_NONCRITICAL)
+ have_critical_virtual_links = true;
+ if (max_sharindx < rule->vl.sharindx)
+ max_sharindx = rule->vl.sharindx;
+ }
+
+ if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
+ return -ENOSPC;
+ }
+
+ if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
+ return -ENOSPC;
+ }
+
+ max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
+
+ /* Discard previous VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Nothing to do */
+ if (!num_virtual_links)
+ return 0;
+
+ /* Pre-allocate space in the static config tables */
+
+ /* VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ table->entries = kcalloc(num_virtual_links,
+ table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_virtual_links;
+ vl_lookup = table->entries;
+
+ k = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ unsigned long port;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+
+ for_each_set_bit(port, &rule->port_mask, SJA1105_NUM_PORTS) {
+ vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
+ vl_lookup[k].port = port;
+ vl_lookup[k].macaddr = rule->key.vl.dmac;
+ if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
+ vl_lookup[k].vlanid = rule->key.vl.vid;
+ vl_lookup[k].vlanprior = rule->key.vl.pcp;
+ } else {
+ u16 vid = dsa_8021q_rx_vid(priv->ds, port);
+
+ vl_lookup[k].vlanid = vid;
+ vl_lookup[k].vlanprior = 0;
+ }
+ /* For critical VLs, the DESTPORTS mask is taken from
+ * the VL Forwarding Table, so no point in putting it
+ * in the VL Lookup Table
+ */
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ vl_lookup[k].destports = rule->vl.destports;
+ else
+ vl_lookup[k].iscritical = true;
+ vl_lookup[k].flow_cookie = rule->cookie;
+ k++;
+ }
+ }
+
+ /* UM10944.pdf chapter 4.2.3 VL Lookup table:
+ * "the entries in the VL Lookup table must be sorted in ascending
+ * order (i.e. the smallest value must be loaded first) according to
+ * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
+ */
+ for (i = 0; i < num_virtual_links; i++) {
+ struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
+
+ for (j = i + 1; j < num_virtual_links; j++) {
+ struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
+
+ if (sja1105_vl_key_lower(b, a)) {
+ struct sja1105_vl_lookup_entry tmp = *a;
+
+ *a = *b;
+ *b = tmp;
+ }
+ }
+ }
+
+ if (!have_critical_virtual_links)
+ return 0;
+
+ /* VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_policing = table->entries;
+
+ /* VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_fwd = table->entries;
+
+ /* VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = 1;
+
+ for (i = 0; i < num_virtual_links; i++) {
+ unsigned long cookie = vl_lookup[i].flow_cookie;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ continue;
+ if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
+ int sharindx = rule->vl.sharindx;
+
+ vl_policing[i].type = 1;
+ vl_policing[i].sharindx = sharindx;
+ vl_policing[i].maxlen = rule->vl.maxlen;
+ vl_policing[sharindx].type = 1;
+
+ vl_fwd[i].type = 1;
+ vl_fwd[sharindx].type = 1;
+ vl_fwd[sharindx].priority = rule->vl.ipv;
+ vl_fwd[sharindx].partition = 0;
+ vl_fwd[sharindx].destports = rule->vl.destports;
+ }
+ }
+
+ sja1105_frame_memory_partitioning(priv);
+
+ return 0;
+}
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ int rc;
+
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on DMAC");
+ return -EOPNOTSUPP;
+ } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ list_add(&rule->list, &priv->flow_block.rules);
+ }
+
+ rule->port_mask |= BIT(port);
+ if (append)
+ rule->vl.destports |= destports;
+ else
+ rule->vl.destports = destports;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
+}
+
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+ struct sja1105_rule *rule,
+ u8 gate_state, s64 entry_time,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gate_entry *e;
+ int rc;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->rule = rule;
+ e->gate_state = gate_state;
+ e->interval = entry_time;
+
+ if (list_empty(&gating_cfg->entries)) {
+ list_add(&e->list, &gating_cfg->entries);
+ } else {
+ struct sja1105_gate_entry *p;
+
+ list_for_each_entry(p, &gating_cfg->entries, list) {
+ if (p->interval == e->interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate conflict");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (e->interval < p->interval)
+ break;
+ }
+ list_add(&e->list, p->list.prev);
+ }
+
+ gating_cfg->num_entries++;
+
+ return 0;
+err:
+ kfree(e);
+ return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+ u64 cycle_time)
+{
+ struct sja1105_gate_entry *last_e;
+ struct sja1105_gate_entry *e;
+ struct list_head *prev;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ struct sja1105_gate_entry *p;
+
+ prev = e->list.prev;
+
+ if (prev == &gating_cfg->entries)
+ continue;
+
+ p = list_entry(prev, struct sja1105_gate_entry, list);
+ p->interval = e->interval - p->interval;
+ }
+ last_e = list_last_entry(&gating_cfg->entries,
+ struct sja1105_gate_entry, list);
+ if (last_e->list.prev != &gating_cfg->entries)
+ last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+ struct sja1105_gate_entry *e, *n;
+
+ list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ struct sja1105_rule *rule;
+ s64 max_cycle_time = 0;
+ s64 its_base_time = 0;
+ int i, rc = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ if (max_cycle_time < rule->vl.cycle_time) {
+ max_cycle_time = rule->vl.cycle_time;
+ its_base_time = rule->vl.base_time;
+ }
+ }
+
+ if (!max_cycle_time)
+ return 0;
+
+ dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+ max_cycle_time, its_base_time);
+
+ sja1105_free_gating_config(gating_cfg);
+
+ gating_cfg->base_time = its_base_time;
+ gating_cfg->cycle_time = max_cycle_time;
+ gating_cfg->num_entries = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ s64 time;
+ s64 rbt;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ /* Calculate the difference between this gating schedule's
+ * base time, and the base time of the gating schedule with the
+ * longest cycle time. We call it the relative base time (rbt).
+ */
+ rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+ its_base_time);
+ rbt -= its_base_time;
+
+ time = rbt;
+
+ for (i = 0; i < rule->vl.num_entries; i++) {
+ u8 gate_state = rule->vl.entries[i].gate_state;
+ s64 entry_time = time;
+
+ while (entry_time < max_cycle_time) {
+ rc = sja1105_insert_gate_entry(gating_cfg, rule,
+ gate_state,
+ entry_time,
+ extack);
+ if (rc)
+ goto err;
+
+ entry_time += rule->vl.cycle_time;
+ }
+ time += rule->vl.entries[i].interval;
+ }
+ }
+
+ sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+ return 0;
+err:
+ sja1105_free_gating_config(gating_cfg);
+ return rc;
+}
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ int ipv = -1;
+ int i, rc;
+ s32 rem;
+
+ if (cycle_time_ext) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time extension not supported");
+ return -EOPNOTSUPP;
+ }
+
+ div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Base time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ dev_err(priv->ds->dev, "1: vlan state %d key type %d\n",
+ priv->vlan_state, key->type);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on DMAC");
+ return -EOPNOTSUPP;
+ } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ dev_err(priv->ds->dev, "2: vlan state %d key type %d\n",
+ priv->vlan_state, key->type);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ list_add(&rule->list, &priv->flow_block.rules);
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
+ rule->vl.sharindx = index;
+ rule->vl.base_time = base_time;
+ rule->vl.cycle_time = cycle_time;
+ rule->vl.num_entries = num_entries;
+ rule->vl.entries = kcalloc(num_entries,
+ sizeof(struct action_gate_entry),
+ GFP_KERNEL);
+ if (!rule->vl.entries) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ div_s64_rem(entries[i].interval,
+ sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval must be multiple of 200 ns");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (!entries[i].interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval cannot be zero");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (ns_to_sja1105_delta(entries[i].interval) >
+ SJA1105_TAS_MAX_DELTA) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Maximum interval is 52 ms");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (entries[i].maxoctets != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload IntervalOctetMax");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (ipv == -1) {
+ ipv = entries[i].ipv;
+ } else if (ipv != entries[i].ipv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only support a single IPV per VL");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rule->vl.entries[i] = entries[i];
+ }
+
+ if (ipv == -1) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
+ ipv = key->vl.pcp;
+ else
+ ipv = 0;
+ }
+
+ /* TODO: support per-flow MTU */
+ rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+ rule->vl.ipv = ipv;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ rc = sja1105_compose_gating_subschedule(priv, extack);
+ if (rc)
+ goto out;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ goto out;
+
+ if (sja1105_gating_check_conflicts(priv, -1, extack)) {
+ NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
+ rc = -ERANGE;
+ goto out;
+ }
+
+out:
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule->vl.entries);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+static int sja1105_find_vlid(struct sja1105_private *priv, int port,
+ struct sja1105_key *key)
+{
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ struct sja1105_table *table;
+ int i;
+
+ if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
+ return -1;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ vl_lookup = table->entries;
+
+ for (i = 0; i < table->entry_count; i++) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac &&
+ vl_lookup[i].vlanid == key->vl.vid &&
+ vl_lookup[i].vlanprior == key->vl.pcp)
+ return i;
+ } else {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
+ u64 unreleased;
+ u64 timingerr;
+ u64 lengtherr;
+ int vlid, rc;
+ u64 pkts;
+
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ return 0;
+
+ vlid = sja1105_find_vlid(priv, port, &rule->key);
+ if (vlid < 0)
+ return 0;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
+ SJA1105_SIZE_VL_STATUS);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
+ return rc;
+ }
+
+ sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
+
+ pkts = timingerr + unreleased + lengtherr;
+
+ flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts,
+ jiffies - rule->vl.stats.lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ rule->vl.stats.pkts = pkts;
+ rule->vl.stats.lastused = jiffies;
+
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.h b/drivers/net/dsa/sja1105/sja1105_vl.h
new file mode 100644
index 000000000000..173d78963fed
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020, NXP Semiconductors
+ */
+#ifndef _SJA1105_VL_H
+#define _SJA1105_VL_H
+
+#include "sja1105.h"
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL)
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append);
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack);
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries);
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack);
+
+#else
+
+static inline int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ unsigned long destports,
+ bool append)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_delete(struct sja1105_private *priv,
+ int port, struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time,
+ u64 cycle_time_ext, u32 num_entries,
+ struct action_gate_entry *entries)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL) */
+
+#endif /* _SJA1105_VL_H */
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index 0541785f9fee..5e54a5726aa4 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -89,7 +89,6 @@ static int vsc73xx_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vsc73xx_platform *vsc_platform;
- struct resource *res = NULL;
int ret;
vsc_platform = devm_kzalloc(dev, sizeof(*vsc_platform), GFP_KERNEL);
@@ -103,14 +102,7 @@ static int vsc73xx_platform_probe(struct platform_device *pdev)
vsc_platform->vsc.ops = &vsc73xx_platform_ops;
/* obtain I/O memory space */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
- ret = -ENXIO;
- return ret;
- }
-
- vsc_platform->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ vsc_platform->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vsc_platform->base_addr)) {
dev_err(&pdev->dev, "cannot request I/O memory space\n");
ret = -ENXIO;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index b762176a1406..139d0120f511 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -85,7 +85,6 @@
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/bitops.h>
-#include <linux/vermagic.h>
#include <linux/uaccess.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 90312fcd6319..47b4215bb93b 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -22,7 +22,6 @@
*/
-#include <linux/vermagic.h>
#define DRV_NAME "3c515"
#define CORKSCREW 1
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index a2b7f7ab8170..5984b7033999 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1149,7 +1149,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
print_info = (vortex_debug > 1);
if (print_info)
- pr_info("See Documentation/networking/device_drivers/3com/vortex.txt\n");
+ pr_info("See Documentation/networking/device_drivers/3com/vortex.rst\n");
pr_info("%s: 3Com %s %s at %p.\n",
print_name,
@@ -1954,7 +1954,7 @@ vortex_error(struct net_device *dev, int status)
dev->name, tx_status);
if (tx_status == 0x82) {
pr_err("Probably a duplex mismatch. See "
- "Documentation/networking/device_drivers/3com/vortex.txt\n");
+ "Documentation/networking/device_drivers/3com/vortex.rst\n");
}
dump_tx_ring(dev);
}
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 3a6fc99c6f32..7cc259893cb9 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -76,7 +76,7 @@ config VORTEX
"Hurricane" (3c555/3cSOHO) PCI
If you have such a card, say Y here. More specific information is in
- <file:Documentation/networking/device_drivers/3com/vortex.txt> and
+ <file:Documentation/networking/device_drivers/3com/vortex.rst> and
in the comments at the beginning of
<file:drivers/net/ethernet/3com/3c59x.c>.
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 42985a82321a..77d78b4c59c4 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -1,39 +1,43 @@
-/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */
-/*
- A Linux device driver for PCI NE2000 clones.
-
- Authors and other copyright holders:
- 1992-2000 by Donald Becker, NE2000 core and various modifications.
- 1995-1998 by Paul Gortmaker, core modifications and PCI support.
- Copyright 1993 assigned to the United States Government as represented
- by the Director, National Security Agency.
-
- This software may be used and distributed according to the terms of
- the GNU General Public License (GPL), incorporated herein by reference.
- Drivers based on or derived from this code fall under the GPL and must
- retain the authorship, copyright and license notice. This file is not
- a complete program and may only be used when the entire operating
- system is licensed under the GPL.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Issues remaining:
- People are making PCI ne2000 clones! Oh the horror, the horror...
- Limited full-duplex support.
-*/
+/* A Linux device driver for PCI NE2000 clones.
+ *
+ * Authors and other copyright holders:
+ * 1992-2000 by Donald Becker, NE2000 core and various modifications.
+ * 1995-1998 by Paul Gortmaker, core modifications and PCI support.
+ * Copyright 1993 assigned to the United States Government as represented
+ * by the Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ *
+ * The author may be reached as becker@scyld.com, or C/O
+ * Scyld Computing Corporation
+ * 410 Severn Ave., Suite 210
+ * Annapolis MD 21403
+ *
+ * Issues remaining:
+ * People are making PCI NE2000 clones! Oh the horror, the horror...
+ * Limited full-duplex support.
+ */
#define DRV_NAME "ne2k-pci"
+#define DRV_DESCRIPTION "PCI NE2000 clone driver"
+#define DRV_AUTHOR "Donald Becker / Paul Gortmaker"
#define DRV_VERSION "1.03"
#define DRV_RELDATE "9/22/2003"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* The user-configurable values.
- These may be modified when a driver module is loaded.*/
+ * These may be modified when a driver module is loaded.
+ */
+
+/* More are supported, limit only on options */
+#define MAX_UNITS 8
-#define MAX_UNITS 8 /* More are supported, limit only on options */
/* Used to pass the full-duplex flag, etc. */
static int full_duplex[MAX_UNITS];
static int options[MAX_UNITS];
@@ -52,7 +56,7 @@ static int options[MAX_UNITS];
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
@@ -60,20 +64,14 @@ static int options[MAX_UNITS];
static u32 ne2k_msg_enable;
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
- KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
- " D. Becker/P. Gortmaker\n";
-
#if defined(__powerpc__)
#define inl_le(addr) le32_to_cpu(inl(addr))
#define inw_le(addr) le16_to_cpu(inw(addr))
#endif
-#define PFX DRV_NAME ": "
-
-MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
-MODULE_DESCRIPTION("PCI NE2000 clone driver");
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
module_param_named(msg_enable, ne2k_msg_enable, uint, 0444);
@@ -83,7 +81,8 @@ MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bit
MODULE_PARM_DESC(options, "Bit 5: full duplex");
MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
-/* Some defines that people can play with if so inclined. */
+/* Some defines that people can play with if so inclined.
+ */
/* Use 32 bit data-movement operations instead of 16 bit. */
#define USE_LONGIO
@@ -91,14 +90,18 @@ MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
/* Do we implement the read before write bugfix ? */
/* #define NE_RW_BUGFIX */
-/* Flags. We rename an existing ei_status field to store flags! */
-/* Thus only the low 8 bits are usable for non-init-time flags. */
+/* Flags. We rename an existing ei_status field to store flags!
+ * Thus only the low 8 bits are usable for non-init-time flags.
+ */
#define ne2k_flags reg0
+
enum {
- ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */
- FORCE_FDX=0x20, /* User override. */
- REALTEK_FDX=0x40, HOLTEK_FDX=0x80,
- STOP_PG_0x60=0x100,
+ /* Chip can do only 16/32-bit xfers. */
+ ONLY_16BIT_IO = 8, ONLY_32BIT_IO = 4,
+ /* User override. */
+ FORCE_FDX = 0x20,
+ REALTEK_FDX = 0x40, HOLTEK_FDX = 0x80,
+ STOP_PG_0x60 = 0x100,
};
enum ne2k_pci_chipsets {
@@ -120,7 +123,7 @@ static struct {
char *name;
int flags;
} pci_clone_list[] = {
- {"RealTek RTL-8029", REALTEK_FDX},
+ {"RealTek RTL-8029(AS)", REALTEK_FDX},
{"Winbond 89C940", 0},
{"Compex RL2000", 0},
{"KTI ET32P2", 0},
@@ -149,13 +152,14 @@ static const struct pci_device_id ne2k_pci_tbl[] = {
{ 0x8c4a, 0x1980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940_8c4a },
{ 0, }
};
+
MODULE_DEVICE_TABLE(pci, ne2k_pci_tbl);
/* ---- No user-serviceable parts below ---- */
#define NE_BASE (dev->base_addr)
-#define NE_CMD 0x00
+#define NE_CMD 0x00
#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
#define NE_IO_EXTENT 0x20
@@ -168,18 +172,20 @@ static int ne2k_pci_open(struct net_device *dev);
static int ne2k_pci_close(struct net_device *dev);
static void ne2k_pci_reset_8390(struct net_device *dev);
-static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
+static void ne2k_pci_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
static void ne2k_pci_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
+ struct sk_buff *skb, int ring_offset);
static void ne2k_pci_block_output(struct net_device *dev, const int count,
- const unsigned char *buf, const int start_page);
+ const unsigned char *buf,
+ const int start_page);
static const struct ethtool_ops ne2k_pci_ethtool_ops;
/* There is no room in the standard 8390 structure for extra info we need,
- so we build a meta/outer-wrapper structure.. */
+ * so we build a meta/outer-wrapper structure..
+ */
struct ne2k_pci_card {
struct net_device *dev;
struct pci_dev *pci_dev;
@@ -187,18 +193,17 @@ struct ne2k_pci_card {
-/*
- NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
- buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
- 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
- detected by their SA prefix.
-
- Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
- mode results in doubled values, which can be detected and compensated for.
-
- The probe is also responsible for initializing the card and filling
- in the 'dev' and 'ei_status' structures.
-*/
+/* NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
+ * buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
+ * 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
+ * detected by their SA prefix.
+ *
+ * Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ * mode results in doubled values, which can be detected and compensated for.
+ *
+ * The probe is also responsible for initializing the card and filling
+ * in the 'dev' and 'ei_status' structures.
+ */
static const struct net_device_ops ne2k_netdev_ops = {
.ndo_open = ne2k_pci_open,
@@ -208,7 +213,7 @@ static const struct net_device_ops ne2k_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
#endif
@@ -227,28 +232,21 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
int flags = pci_clone_list[chip_idx].flags;
struct ei_device *ei_local;
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
fnd_cnt++;
- i = pci_enable_device (pdev);
+ i = pci_enable_device(pdev);
if (i)
return i;
- ioaddr = pci_resource_start (pdev, 0);
+ ioaddr = pci_resource_start(pdev, 0);
irq = pdev->irq;
- if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
+ if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) == 0)) {
dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
goto err_out;
}
- if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
+ if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) {
dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
NE_IO_EXTENT, ioaddr);
goto err_out;
@@ -261,14 +259,17 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
/* Do a preliminary verification that we have a 8390. */
{
int regd;
- outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+
+ outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
regd = inb(ioaddr + 0x0d);
outb(0xff, ioaddr + 0x0d);
- outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
- inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
+ /* Clear the counter by reading. */
+ inb(ioaddr + EN0_COUNTER0);
if (inb(ioaddr + EN0_COUNTER0) != 0) {
outb(reg0, ioaddr);
- outb(regd, ioaddr + 0x0d); /* Restore the old values. */
+ /* Restore the old values. */
+ outb(regd, ioaddr + 0x0d);
goto err_out_free_res;
}
}
@@ -291,9 +292,9 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
- /* This looks like a horrible timing loop, but it should never take
- more than a few cycles.
- */
+ /* This looks like a horrible timing loop, but it should never
+ * take more than a few cycles.
+ */
while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
/* Limit wait: '2' avoids jiffy roll-over. */
if (jiffies - reset_start_time > 2) {
@@ -301,42 +302,53 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
"Card failure (no reset ack).\n");
goto err_out_free_netdev;
}
-
- outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ /* Ack all intr. */
+ outb(0xff, ioaddr + EN0_ISR);
}
/* Read the 16 bytes of station address PROM.
- We must first initialize registers, similar to NS8390_init(eifdev, 0).
- We can't reliably read the SAPROM address without this.
- (I learned the hard way!). */
+ * We must first initialize registers, similar
+ * to NS8390_init(eifdev, 0).
+ * We can't reliably read the SAPROM address without this.
+ * (I learned the hard way!).
+ */
{
struct {unsigned char value, offset; } program_seq[] = {
- {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
- {0x49, EN0_DCFG}, /* Set word-wide access. */
- {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ /* Select page 0 */
+ {E8390_NODMA + E8390_PAGE0 + E8390_STOP, E8390_CMD},
+ /* Set word-wide access */
+ {0x49, EN0_DCFG},
+ /* Clear the count regs. */
+ {0x00, EN0_RCNTLO},
+ /* Mask completion IRQ */
{0x00, EN0_RCNTHI},
- {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0x00, EN0_IMR},
{0xFF, EN0_ISR},
- {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
- {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ /* 0x20 Set to monitor */
+ {E8390_RXOFF, EN0_RXCR},
+ /* 0x02 and loopback mode */
+ {E8390_TXOFF, EN0_TXCR},
{32, EN0_RCNTLO},
{0x00, EN0_RCNTHI},
- {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ /* DMA starting at 0x0000 */
+ {0x00, EN0_RSARLO},
{0x00, EN0_RSARHI},
{E8390_RREAD+E8390_START, E8390_CMD},
};
for (i = 0; i < ARRAY_SIZE(program_seq); i++)
- outb(program_seq[i].value, ioaddr + program_seq[i].offset);
+ outb(program_seq[i].value,
+ ioaddr + program_seq[i].offset);
}
/* Note: all PCI cards have at least 16 bit access, so we don't have
- to check for 8 bit cards. Most cards permit 32 bit access. */
+ * to check for 8 bit cards. Most cards permit 32 bit access.
+ */
if (flags & ONLY_32BIT_IO) {
for (i = 0; i < 4 ; i++)
((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT));
} else
- for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++)
+ for (i = 0; i < 32 /* sizeof(SA_prom )*/; i++)
SA_prom[i] = inb(ioaddr + NE_DATAPORT);
/* We always set the 8390 registers for word mode. */
@@ -356,7 +368,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
ei_status.word16 = 1;
ei_status.ne2k_flags = flags;
if (fnd_cnt < MAX_UNITS) {
- if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX))
+ if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX))
ei_status.ne2k_flags |= FORCE_FDX;
}
@@ -388,16 +400,15 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
return 0;
err_out_free_netdev:
- free_netdev (dev);
+ free_netdev(dev);
err_out_free_res:
- release_region (ioaddr, NE_IO_EXTENT);
+ release_region(ioaddr, NE_IO_EXTENT);
err_out:
pci_disable_device(pdev);
return -ENODEV;
}
-/*
- * Magic incantation sequence for full duplex on the supported cards.
+/* Magic incantation sequence for full duplex on the supported cards.
*/
static inline int set_realtek_fdx(struct net_device *dev)
{
@@ -431,7 +442,9 @@ static int ne2k_pci_set_fdx(struct net_device *dev)
static int ne2k_pci_open(struct net_device *dev)
{
- int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED, dev->name, dev);
+ int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED,
+ dev->name, dev);
+
if (ret)
return ret;
@@ -450,7 +463,8 @@ static int ne2k_pci_close(struct net_device *dev)
}
/* Hard reset the card. This used to pause for the same period that a
- 8390 reset command required, but that shouldn't be necessary. */
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
static void ne2k_pci_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
@@ -467,31 +481,34 @@ static void ne2k_pci_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (jiffies - reset_start_time > 2) {
- netdev_err(dev, "ne2k_pci_reset_8390() did not complete.\n");
+ netdev_err(dev, "%s did not complete.\n", __func__);
break;
}
- outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+ /* Ack intr. */
+ outb(ENISR_RESET, NE_BASE + EN0_ISR);
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
-static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+static void ne2k_pci_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
{
long nic_base = dev->base_addr;
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see
+ */
if (ei_status.dmaing) {
- netdev_err(dev, "DMAing conflict in ne2k_pci_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in %s [DMAstat:%d][irqlock:%d].\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
- outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
outb(0, nic_base + EN0_RCNTHI);
outb(0, nic_base + EN0_RSARLO); /* On page boundary */
@@ -499,20 +516,22 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *
outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
- insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ insw(NE_BASE + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr) >> 1);
} else {
- *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT));
+ *(u32 *)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT));
le16_to_cpus(&hdr->count);
}
-
- outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ /* Ack intr. */
+ outb(ENISR_RDC, nic_base + EN0_ISR);
ei_status.dmaing &= ~0x01;
}
/* Block input and output, similar to the Crynwr packet driver. If you
- are porting to a new ethercard, look at the packet driver source for hints.
- The NEx000 doesn't share the on-board packet memory -- you have to put
- the packet out through the "remote DMA" dataport using outb. */
+ *are porting to a new ethercard, look at the packet driver source for hints.
+ *The NEx000 doesn't share the on-board packet memory -- you have to put
+ *the packet out through the "remote DMA" dataport using outb.
+ */
static void ne2k_pci_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
@@ -520,30 +539,30 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
long nic_base = dev->base_addr;
char *buf = skb->data;
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see.
+ */
if (ei_status.dmaing) {
- netdev_err(dev, "DMAing conflict in ne2k_pci_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
if (ei_status.ne2k_flags & ONLY_32BIT_IO)
count = (count + 3) & 0xFFFC;
- outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
outb(count & 0xff, nic_base + EN0_RCNTLO);
outb(count >> 8, nic_base + EN0_RCNTHI);
outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
outb(ring_offset >> 8, nic_base + EN0_RSARHI);
- outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ outb(E8390_RREAD + E8390_START, nic_base + NE_CMD);
if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
- insw(NE_BASE + NE_DATAPORT,buf,count>>1);
- if (count & 0x01) {
+ insw(NE_BASE + NE_DATAPORT, buf, count >> 1);
+ if (count & 0x01)
buf[count-1] = inb(NE_BASE + NE_DATAPORT);
- }
} else {
- insl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ insl(NE_BASE + NE_DATAPORT, buf, count >> 2);
if (count & 3) {
buf += count & ~3;
if (count & 2) {
@@ -556,30 +575,32 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
*buf = inb(NE_BASE + NE_DATAPORT);
}
}
-
- outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ /* Ack intr. */
+ outb(ENISR_RDC, nic_base + EN0_ISR);
ei_status.dmaing &= ~0x01;
}
static void ne2k_pci_block_output(struct net_device *dev, int count,
- const unsigned char *buf, const int start_page)
+ const unsigned char *buf, const int start_page)
{
long nic_base = NE_BASE;
unsigned long dma_start;
/* On little-endian it's always safe to round the count up for
- word writes. */
+ * word writes.
+ */
if (ei_status.ne2k_flags & ONLY_32BIT_IO)
count = (count + 3) & 0xFFFC;
else
if (count & 0x01)
count++;
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see.
+ */
if (ei_status.dmaing) {
- netdev_err(dev, "DMAing conflict in ne2k_pci_block_output."
- "[DMAstat:%d][irqlock:%d]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -588,9 +609,10 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
#ifdef NE8390_RW_BUGFIX
/* Handle the read-before-write bug the same way as the
- Crynwr packet driver -- the NatSemi method doesn't work.
- Actually this doesn't always work either, but if you have
- problems with your NEx000 this is better than nothing! */
+ * Crynwr packet driver -- the NatSemi method doesn't work.
+ * Actually this doesn't always work either, but if you have
+ * problems with your NEx000 this is better than nothing!
+ */
outb(0x42, nic_base + EN0_RCNTLO);
outb(0x00, nic_base + EN0_RCNTHI);
outb(0x42, nic_base + EN0_RSARLO);
@@ -599,16 +621,16 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
#endif
outb(ENISR_RDC, nic_base + EN0_ISR);
- /* Now the normal output. */
+ /* Now the normal output. */
outb(count & 0xff, nic_base + EN0_RCNTLO);
outb(count >> 8, nic_base + EN0_RCNTHI);
outb(0x00, nic_base + EN0_RSARLO);
outb(start_page, nic_base + EN0_RSARHI);
outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
- outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ outsw(NE_BASE + NE_DATAPORT, buf, count >> 1);
} else {
- outsl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ outsl(NE_BASE + NE_DATAPORT, buf, count >> 2);
if (count & 3) {
buf += count & ~3;
if (count & 2) {
@@ -623,14 +645,15 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
dma_start = jiffies;
while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
- if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
+ /* Avoid clock roll-over. */
+ if (jiffies - dma_start > 2) {
netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne2k_pci_reset_8390(dev);
- NS8390_init(dev,1);
+ NS8390_init(dev, 1);
break;
}
-
- outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ /* Ack intr. */
+ outb(ENISR_RDC, nic_base + EN0_ISR);
ei_status.dmaing &= ~0x01;
}
@@ -640,9 +663,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
struct ei_device *ei = netdev_priv(dev);
struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static u32 ne2k_pci_get_msglevel(struct net_device *dev)
@@ -677,9 +700,9 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
}
#ifdef CONFIG_PM
-static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
+static int ne2k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
netif_device_detach(dev);
pci_save_state(pdev);
@@ -689,9 +712,9 @@ static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int ne2k_pci_resume (struct pci_dev *pdev)
+static int ne2k_pci_resume(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
int rc;
pci_set_power_state(pdev, PCI_D0);
@@ -718,24 +741,20 @@ static struct pci_driver ne2k_driver = {
#ifdef CONFIG_PM
.suspend = ne2k_pci_suspend,
.resume = ne2k_pci_resume,
-#endif /* CONFIG_PM */
+#endif
};
static int __init ne2k_pci_init(void)
{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
return pci_register_driver(&ne2k_driver);
}
static void __exit ne2k_pci_cleanup(void)
{
- pci_unregister_driver (&ne2k_driver);
+ pci_unregister_driver(&ne2k_driver);
}
module_init(ne2k_pci_init);
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 2db42211329f..a64191fc2af9 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -45,7 +45,6 @@
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <linux/vermagic.h>
/*
* The current frame processor firmware fails to checksum a fragment
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 1b19385ad8a9..865892c1f23f 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -714,11 +714,11 @@ static int et131x_init_eeprom(struct et131x_adapter *adapter)
* gather additional information that normally would
* come from the eeprom, like MAC Address
*/
- adapter->has_eeprom = 0;
+ adapter->has_eeprom = false;
return -EIO;
}
}
- adapter->has_eeprom = 1;
+ adapter->has_eeprom = true;
/* Read the EEPROM for information regarding LED behavior. Refer to
* et131x_xcvr_init() for its use.
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 18d3b4340bd4..b3b8a8010142 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -417,7 +417,7 @@ static void emac_timeout(struct net_device *dev, unsigned int txqueue)
/* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
unsigned long channel;
@@ -425,7 +425,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
channel = db->tx_fifo_stat & 3;
if (channel == 3)
- return 1;
+ return NETDEV_TX_BUSY;
channel = (channel == 1 ? 1 : 0);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1671c1f36691..907125abef2c 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -554,7 +554,7 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
* physically contiguous fragment starting at
* skb->data, for length of skb_headlen(skb).
*/
-static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
unsigned int txsize = priv->tx_ring_size;
@@ -562,7 +562,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tse_buffer *buffer = NULL;
int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int nopaged_len = skb_headlen(skb);
- enum netdev_tx ret = NETDEV_TX_OK;
+ netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
spin_lock_bh(&priv->tx_lock);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 8baf847e8622..336742f6e3c3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -404,6 +404,10 @@ struct ena_admin_basic_stats {
u32 rx_drops_low;
u32 rx_drops_high;
+
+ u32 tx_drops_low;
+
+ u32 tx_drops_high;
};
struct ena_admin_acq_get_stats_resp {
@@ -764,8 +768,8 @@ enum ena_admin_os_type {
ENA_ADMIN_OS_DPDK = 3,
ENA_ADMIN_OS_FREEBSD = 4,
ENA_ADMIN_OS_IPXE = 5,
- ENA_ADMIN_OS_ESXI = 6,
- ENA_ADMIN_OS_GROUPS_NUM = 6,
+ ENA_ADMIN_OS_ESXI = 6,
+ ENA_ADMIN_OS_GROUPS_NUM = 6,
};
struct ena_admin_host_info {
@@ -809,7 +813,8 @@ struct ena_admin_host_info {
u16 reserved;
- /* 1 :0 : reserved
+ /* 0 : reserved
+ * 1 : rx_offset
* 2 : interrupt_moderation
* 31:3 : reserved
*/
@@ -1017,6 +1022,10 @@ struct ena_admin_aenq_keep_alive_desc {
u32 rx_drops_low;
u32 rx_drops_high;
+
+ u32 tx_drops_low;
+
+ u32 tx_drops_high;
};
struct ena_admin_ena_mmio_req_read_less_resp {
@@ -1116,6 +1125,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT 1
+#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1)
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2)
@@ -1125,4 +1136,4 @@ struct ena_admin_ena_mmio_req_read_less_resp {
/* aenq_link_change_desc */
#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
-#endif /*_ENA_ADMIN_H_ */
+#endif /* _ENA_ADMIN_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index a250046b8e18..432f143559a1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -62,7 +62,9 @@
#define ENA_REGS_ADMIN_INTR_MASK 1
-#define ENA_POLL_MS 5
+#define ENA_MIN_ADMIN_POLL_US 100
+
+#define ENA_MAX_ADMIN_POLL_US 5000
/*****************************************************************************/
/*****************************************************************************/
@@ -200,17 +202,17 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
u16 command_id, bool capture)
{
- if (unlikely(!queue->comp_ctx)) {
- pr_err("Completion context is NULL\n");
- return NULL;
- }
-
if (unlikely(command_id >= queue->q_depth)) {
pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
command_id, queue->q_depth);
return NULL;
}
+ if (unlikely(!queue->comp_ctx)) {
+ pr_err("Completion context is NULL\n");
+ return NULL;
+ }
+
if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
pr_err("Completion context is occupied\n");
return NULL;
@@ -375,7 +377,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
io_sq->bounce_buf_ctrl.next_to_use = 0;
size = io_sq->bounce_buf_ctrl.buffer_size *
- io_sq->bounce_buf_ctrl.buffers_num;
+ io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
@@ -523,9 +525,6 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
if (unlikely(comp_status != 0))
pr_err("admin command failed[%u]\n", comp_status);
- if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
- return -EINVAL;
-
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
return 0;
@@ -540,7 +539,14 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
return -EINVAL;
}
- return 0;
+ return -EINVAL;
+}
+
+static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
+{
+ delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
+ delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
+ usleep_range(delay_us, 2 * delay_us);
}
static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
@@ -549,6 +555,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
unsigned long flags = 0;
unsigned long timeout;
int ret;
+ u32 exp = 0;
timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
@@ -572,7 +579,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- msleep(ENA_POLL_MS);
+ ena_delay_exponential_backoff_us(exp++,
+ admin_queue->ena_dev->ena_min_poll_delay_us);
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
@@ -702,8 +710,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
- pr_err("illegal entry size %d\n",
- llq_info->desc_list_entry_size);
+ pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size);
return -EINVAL;
}
@@ -775,7 +782,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (admin_queue->auto_polling)
admin_queue->polling = true;
} else {
- pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
+ pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
}
/* Check if shifted to polling mode.
@@ -943,12 +950,13 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
u16 exp_state)
{
- u32 val, i;
+ u32 val, exp = 0;
+ unsigned long timeout_stamp;
- /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
- timeout = (timeout * 100) / ENA_POLL_MS;
+ /* Convert timeout from resolution of 100ms to us resolution. */
+ timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
- for (i = 0; i < timeout; i++) {
+ while (1) {
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
@@ -960,10 +968,11 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
exp_state)
return 0;
- msleep(ENA_POLL_MS);
- }
+ if (time_is_before_jiffies(timeout_stamp))
+ return -ETIME;
- return -ETIME;
+ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
+ }
}
static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
@@ -1067,16 +1076,10 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_get_feat_resp get_resp;
- int rc;
- rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_HASH_FUNCTION,
- ena_dev->rss.hash_key_dma_addr,
- sizeof(ena_dev->rss.hash_key), 0);
- if (unlikely(rc)) {
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_FUNCTION))
return -EOPNOTSUPP;
- }
rss->hash_key =
dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
@@ -1290,13 +1293,9 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 intr_delay_resolution)
{
- /* Initial value of intr_delay_resolution might be 0 */
- u16 prev_intr_delay_resolution =
- ena_dev->intr_delay_resolution ?
- ena_dev->intr_delay_resolution :
- ENA_DEFAULT_INTR_DELAY_RESOLUTION;
+ u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
- if (!intr_delay_resolution) {
+ if (unlikely(!intr_delay_resolution)) {
pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
}
@@ -1450,11 +1449,13 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
unsigned long flags = 0;
+ u32 exp = 0;
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- msleep(ENA_POLL_MS);
+ ena_delay_exponential_backoff_us(exp++,
+ ena_dev->ena_min_poll_delay_us);
spin_lock_irqsave(&admin_queue->q_lock, flags);
}
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -1802,6 +1803,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
if (ret)
goto error;
+ admin_queue->ena_dev = ena_dev;
admin_queue->running_state = true;
return 0;
@@ -2009,7 +2011,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
- unsigned long long timestamp;
+ u64 timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
u8 phase;
@@ -2027,9 +2029,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
*/
dma_rmb();
- timestamp =
- (unsigned long long)aenq_common->timestamp_low |
- ((unsigned long long)aenq_common->timestamp_high << 32);
+ timestamp = (u64)aenq_common->timestamp_low |
+ ((u64)aenq_common->timestamp_high << 32);
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrom, timestamp);
@@ -2059,8 +2060,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head,
- dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2282,12 +2282,14 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
enum ena_admin_hash_functions func,
const u8 *key, u16 key_len, u32 init_val)
{
- struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
struct ena_admin_get_feat_resp get_resp;
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- rss->hash_key;
+ enum ena_admin_hash_functions old_func;
+ struct ena_rss *rss = &ena_dev->rss;
int rc;
+ hash_key = rss->hash_key;
+
/* Make sure size is a mult of DWs */
if (unlikely(key_len & 0x3))
return -EINVAL;
@@ -2299,7 +2301,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
+ if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
pr_err("Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP;
}
@@ -2325,26 +2327,27 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EINVAL;
}
+ old_func = rss->hash_func;
rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
/* Restore the old function */
if (unlikely(rc))
- ena_com_get_hash_function(ena_dev, NULL, NULL);
+ rss->hash_func = old_func;
return rc;
}
int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func,
- u8 *key)
+ enum ena_admin_hash_functions *func)
{
struct ena_rss *rss = &ena_dev->rss;
struct ena_admin_get_feat_resp get_resp;
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- rss->hash_key;
int rc;
+ if (unlikely(!func))
+ return -EINVAL;
+
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
rss->hash_key_dma_addr,
@@ -2357,8 +2360,15 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (rss->hash_func)
rss->hash_func--;
- if (func)
- *func = rss->hash_func;
+ *func = rss->hash_func;
+
+ return 0;
+}
+
+int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
+{
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ ena_dev->rss.hash_key;
if (key)
memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
@@ -2641,10 +2651,10 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
* ignore this error and have indirection table support only.
*/
rc = ena_com_hash_key_allocate(ena_dev);
- if (unlikely(rc) && rc != -EOPNOTSUPP)
- goto err_hash_key;
- else if (rc != -EOPNOTSUPP)
+ if (likely(!rc))
ena_com_hash_key_fill_default_key(ena_dev);
+ else if (rc != -EOPNOTSUPP)
+ goto err_hash_key;
rc = ena_com_hash_ctrl_init(ena_dev);
if (unlikely(rc))
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 469f298199a7..bc187adf54e4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -54,9 +54,9 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define ENA_MAX_NUM_IO_QUEUES 128U
+#define ENA_MAX_NUM_IO_QUEUES 128U
/* We need to queues for each IO (on for Tx and one for Rx) */
-#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
+#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
#define ENA_MAX_HANDLERS 256
@@ -73,13 +73,15 @@
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
-#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
-#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
-#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
+#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
-#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+#define ENA_HASH_KEY_SIZE 40
-#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+
+#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
struct ena_llq_configurations {
enum ena_admin_llq_header_location llq_header_location;
@@ -237,6 +239,7 @@ struct ena_com_stats_admin {
struct ena_com_admin_queue {
void *q_dmadev;
+ struct ena_com_dev *ena_dev;
spinlock_t q_lock; /* spinlock for the admin queue */
struct ena_comp_ctx *comp_ctx;
@@ -349,6 +352,8 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
+
+ u32 ena_min_poll_delay_us;
};
struct ena_com_dev_get_features_ctx {
@@ -393,7 +398,7 @@ struct ena_aenq_handlers {
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
-/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
+/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
*/
@@ -501,18 +506,6 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
*/
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
-/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
- * @ena_dev: ENA communication layer struct
- *
- * Get the admin completion mode.
- * If polling mode is on, ena_com_execute_admin_command will perform a
- * polling on the admin completion queue for the commands completion,
- * otherwise it will wait on wait event.
- *
- * @return state
- */
-bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
-
/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
* @ena_dev: ENA communication layer struct
* @polling: Enable/Disable polling mode
@@ -527,7 +520,7 @@ void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
- * This method go over the admin completion queue and wake up all the pending
+ * This method goes over the admin completion queue and wakes up all the pending
* threads that wait on the commands wait event.
*
* @note: Should be called after MSI-X interrupt.
@@ -537,7 +530,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
/* ena_com_aenq_intr_handler - AENQ interrupt handler
* @ena_dev: ENA communication layer struct
*
- * This method go over the async event notification queue and call the proper
+ * This method goes over the async event notification queue and calls the proper
* aenq handler.
*/
void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
@@ -554,14 +547,14 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
* @ena_dev: ENA communication layer struct
*
- * This method wait until all the outstanding admin commands will be completed.
+ * This method waits until all the outstanding admin commands are completed.
*/
void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
/* ena_com_validate_version - Validate the device parameters
* @ena_dev: ENA communication layer struct
*
- * This method validate the device parameters are the same as the saved
+ * This method verifies the device parameters are the same as the saved
* parameters in ena_dev.
* This method is useful after device reset, to validate the device mac address
* and the device offloads are the same as before the reset.
@@ -695,23 +688,32 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
*/
int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
-/* ena_com_get_hash_function - Retrieve the hash function and the hash key
- * from the device.
+/* ena_com_get_hash_function - Retrieve the hash function from the device.
* @ena_dev: ENA communication layer struct
* @func: hash function
- * @key: hash key
*
- * Retrieve the hash function and the hash key from the device.
+ * Retrieve the hash function from the device.
*
- * @note: If the caller called ena_com_fill_hash_function but didn't flash
+ * @note: If the caller called ena_com_fill_hash_function but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func,
- u8 *key);
+ enum ena_admin_hash_functions *func);
+/* ena_com_get_hash_key - Retrieve the hash key
+ * @ena_dev: ENA communication layer struct
+ * @key: hash key
+ *
+ * Retrieve the hash key.
+ *
+ * @note: If the caller called ena_com_fill_hash_key but didn't flush
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key);
/* ena_com_fill_hash_ctrl - Fill RSS hash control
* @ena_dev: ENA communication layer struct.
* @proto: The protocol to configure.
@@ -746,7 +748,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
*
* Retrieve the hash control from the device.
*
- * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
+ * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
@@ -798,7 +800,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
*
* Retrieve the RSS indirection table from the device.
*
- * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
@@ -824,14 +826,14 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
/* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct
*
- * Free the allocate debug area.
+ * Free the allocated debug area.
*/
void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
/* ena_com_delete_host_info - Free the host info resources.
* @ena_dev: ENA communication layer struct
*
- * Free the allocate host info.
+ * Free the allocated host info.
*/
void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
@@ -872,9 +874,9 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
* @cmd_completion: command completion return value.
* @cmd_comp_size: command completion size.
- * Submit an admin command and then wait until the device will return a
+ * Submit an admin command and then wait until the device returns a
* completion.
- * The completion will be copyed into cmd_comp.
+ * The completion will be copied into cmd_comp.
*
* @return - 0 on success, negative value on failure.
*/
@@ -937,7 +939,7 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *
/* ena_com_config_dev_mode - Configure the placement policy of the device.
* @ena_dev: ENA communication layer struct
* @llq_features: LLQ feature descriptor, retrieve via
- * ena_com_get_dev_attr_feat.
+ * ena_com_get_dev_attr_feat.
* @ena_llq_config: The default driver LLQ parameters configurations
*/
int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
@@ -963,7 +965,7 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
* @tx_delay_interval: Tx interval in usecs
- * @unmask: unask enable/disable
+ * @unmask: unmask enable/disable
*
* Prepare interrupt update register with the supplied parameters.
*/
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index 23beb7e7ed7b..8a8ded0de9ac 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -45,4 +45,4 @@ struct ena_common_mem_addr {
u16 reserved16;
};
-#endif /*_ENA_COMMON_H_ */
+#endif /* _ENA_COMMON_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 2845ac277724..ec8ea25e988d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -519,7 +519,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
u16 cdesc_idx = 0;
u16 nb_hw_desc;
- u16 i;
+ u16 i = 0;
WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
@@ -538,13 +538,19 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return -ENOSPC;
}
- for (i = 0; i < nb_hw_desc; i++) {
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
+ ena_rx_ctx->pkt_offset = cdesc->offset;
+
+ do {
+ ena_buf[i].len = cdesc->length;
+ ena_buf[i].req_id = cdesc->req_id;
+
+ if (++i >= nb_hw_desc)
+ break;
+
cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
- ena_buf->len = cdesc->length;
- ena_buf->req_id = cdesc->req_id;
- ena_buf++;
- }
+ } while (1);
/* Update SQ head ptr */
io_sq->next_to_comp += nb_hw_desc;
@@ -578,10 +584,10 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->length = ena_buf->len;
- desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
- desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
- desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
- desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
+ ENA_ETH_IO_RX_DESC_LAST_MASK |
+ (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
+ ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
desc->req_id = req_id;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 77986c0ea52c..8b1afd3b32f2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -73,6 +73,7 @@ struct ena_com_rx_ctx {
u32 hash;
u16 descs;
int max_bufs;
+ u8 pkt_offset;
};
int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
@@ -95,7 +96,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
writel(intr_reg->intr_control, io_cq->unmask_reg);
}
-static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
{
u16 tail, next_to_comp, cnt;
@@ -113,7 +114,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
int temp;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- return ena_com_free_desc(io_sq) >= required_buffers;
+ return ena_com_free_q_entries(io_sq) >= required_buffers;
/* This calculation doesn't need to be 100% accurate. So to reduce
* the calculation overhead just Subtract 2 lines from the free descs
@@ -122,7 +123,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
*/
temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
- return ena_com_free_desc(io_sq) > temp;
+ return ena_com_free_q_entries(io_sq) > temp;
}
static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index 00e0f056a741..d105c9c56192 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -264,7 +264,9 @@ struct ena_eth_io_rx_cdesc_base {
u16 sub_qid;
- u16 reserved;
+ u8 offset;
+
+ u8 reserved;
};
/* 8-word format */
@@ -412,4 +414,4 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
-#endif /*_ENA_ETH_IO_H_ */
+#endif /* _ENA_ETH_IO_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 9cc28b4b2627..e340b65af08c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -83,6 +83,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(bad_req_id),
ENA_STAT_TX_ENTRY(llq_buffer_copy),
ENA_STAT_TX_ENTRY(missed_tx),
+ ENA_STAT_TX_ENTRY(unmask_interrupt),
};
static const struct ena_stats ena_stats_rx_strings[] = {
@@ -205,7 +206,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
}
@@ -259,7 +260,6 @@ static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
-
memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
@@ -306,10 +306,8 @@ static int ena_get_coalesce(struct net_device *net_dev,
struct ena_adapter *adapter = netdev_priv(net_dev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
- if (!ena_com_interrupt_moderation_supported(ena_dev)) {
- /* the devie doesn't support interrupt moderation */
+ if (!ena_com_interrupt_moderation_supported(ena_dev))
return -EOPNOTSUPP;
- }
coalesce->tx_coalesce_usecs =
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
@@ -325,7 +323,7 @@ static int ena_get_coalesce(struct net_device *net_dev,
return 0;
}
-static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
+static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
{
unsigned int val;
int i;
@@ -336,7 +334,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
adapter->tx_ring[i].smoothed_interval = val;
}
-static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter)
+static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
{
unsigned int val;
int i;
@@ -354,24 +352,22 @@ static int ena_set_coalesce(struct net_device *net_dev,
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc;
- if (!ena_com_interrupt_moderation_supported(ena_dev)) {
- /* the devie doesn't support interrupt moderation */
+ if (!ena_com_interrupt_moderation_supported(ena_dev))
return -EOPNOTSUPP;
- }
rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev,
coalesce->tx_coalesce_usecs);
if (rc)
return rc;
- ena_update_tx_rings_intr_moderation(adapter);
+ ena_update_tx_rings_nonadaptive_intr_moderation(adapter);
rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
coalesce->rx_coalesce_usecs);
if (rc)
return rc;
- ena_update_rx_rings_intr_moderation(adapter);
+ ena_update_rx_rings_nonadaptive_intr_moderation(adapter);
if (coalesce->use_adaptive_rx_coalesce &&
!ena_com_get_adaptive_moderation_enabled(ena_dev))
@@ -635,6 +631,32 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
return ENA_HASH_KEY_SIZE;
}
+static int ena_indirection_table_set(struct ena_adapter *adapter,
+ const u32 *indir)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int i, rc;
+
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+ i,
+ ENA_IO_RXQ_IDX(indir[i]));
+ if (unlikely(rc)) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot fill indirect table (index is too large)\n");
+ return rc;
+ }
+ }
+
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (rc) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot set indirect table\n");
+ return rc == -EPERM ? -EOPNOTSUPP : rc;
+ }
+ return rc;
+}
+
static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -672,17 +694,18 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
/* We call this function in order to check if the device
* supports getting/setting the hash function.
*/
- rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
+ rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func);
if (rc) {
- if (rc == -EOPNOTSUPP) {
- key = NULL;
- hfunc = NULL;
+ if (rc == -EOPNOTSUPP)
rc = 0;
- }
return rc;
}
+ rc = ena_com_get_hash_key(adapter->ena_dev, key);
+ if (rc)
+ return rc;
+
switch (ena_func) {
case ENA_ADMIN_TOEPLITZ:
func = ETH_RSS_HASH_TOP;
@@ -699,7 +722,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (hfunc)
*hfunc = func;
- return rc;
+ return 0;
}
static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
@@ -707,27 +730,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
- enum ena_admin_hash_functions func;
- int rc, i;
+ enum ena_admin_hash_functions func = 0;
+ int rc;
if (indir) {
- for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
- rc = ena_com_indirect_table_fill_entry(ena_dev,
- i,
- ENA_IO_RXQ_IDX(indir[i]));
- if (unlikely(rc)) {
- netif_err(adapter, drv, netdev,
- "Cannot fill indirect table (index is too large)\n");
- return rc;
- }
- }
-
- rc = ena_com_indirect_table_set(ena_dev);
- if (rc) {
- netif_err(adapter, drv, netdev,
- "Cannot set indirect table\n");
- return rc == -EPERM ? -EOPNOTSUPP : rc;
- }
+ rc = ena_indirection_table_set(adapter, indir);
+ if (rc)
+ return rc;
}
switch (hfunc) {
@@ -746,7 +755,7 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
return -EOPNOTSUPP;
}
- if (key) {
+ if (key || func) {
rc = ena_com_fill_hash_function(ena_dev, func, key,
ENA_HASH_KEY_SIZE,
0xFFFFFFFF);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 2cc765df8da3..a0af74c93971 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -263,7 +263,7 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
dma_addr_t dma = 0;
u32 size;
- tx_info->xdpf = convert_to_xdp_frame(xdp);
+ tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
size = tx_info->xdpf->len;
ena_buf = tx_info->bufs;
@@ -1435,6 +1435,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
rx_info->page_offset, len, ENA_PAGE_SIZE);
+ /* The offset is non zero only for the first buffer */
+ rx_info->page_offset = 0;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx skb updated. len %d. data_len %d\n",
@@ -1590,6 +1592,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
{
u16 next_to_clean = rx_ring->next_to_clean;
struct ena_com_rx_ctx ena_rx_ctx;
+ struct ena_rx_buffer *rx_info;
struct ena_adapter *adapter;
u32 res_budget, work_done;
int rx_copybreak_pkt = 0;
@@ -1606,6 +1609,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
xdp.rxq = &rx_ring->xdp_rxq;
+ xdp.frame_sz = ENA_PAGE_SIZE;
do {
xdp_verdict = XDP_PASS;
@@ -1613,6 +1617,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
ena_rx_ctx.descs = 0;
+ ena_rx_ctx.pkt_offset = 0;
rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
rx_ring->ena_com_io_sq,
&ena_rx_ctx);
@@ -1622,6 +1627,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (unlikely(ena_rx_ctx.descs == 0))
break;
+ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+ rx_info->page_offset = ena_rx_ctx.pkt_offset;
+
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
@@ -1683,7 +1691,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->next_to_clean = next_to_clean;
- refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
+ refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
refill_threshold =
min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
ENA_RX_REFILL_THRESH_PACKET);
@@ -1762,6 +1770,9 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
tx_ring->smoothed_interval,
true);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.unmask_interrupt++;
+ u64_stats_update_end(&tx_ring->syncp);
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
@@ -2231,7 +2242,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -2304,7 +2315,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
if (rc) {
netif_err(adapter, ifup, adapter->netdev,
"Failed to create I/O TX queue num %d rc: %d\n",
- qid, rc);
+ qid, rc);
return rc;
}
@@ -2453,7 +2464,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter)
* ones due to past queue allocation failures.
*/
set_io_rings_size(adapter, adapter->requested_tx_ring_size,
- adapter->requested_rx_ring_size);
+ adapter->requested_rx_ring_size);
while (1) {
if (ena_xdp_present(adapter)) {
@@ -2494,7 +2505,7 @@ err_setup_tx:
if (rc != -ENOMEM) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with error code %d\n",
- rc);
+ rc);
return rc;
}
@@ -2517,7 +2528,7 @@ err_setup_tx:
new_rx_ring_size = cur_rx_ring_size / 2;
if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
- new_rx_ring_size < ENA_MIN_RING_SIZE) {
+ new_rx_ring_size < ENA_MIN_RING_SIZE) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
ENA_MIN_RING_SIZE);
@@ -3076,8 +3087,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
return qid;
}
-static void ena_config_host_info(struct ena_com_dev *ena_dev,
- struct pci_dev *pdev)
+static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
struct ena_admin_host_info *host_info;
int rc;
@@ -3107,6 +3117,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
host_info->num_cpus = num_online_cpus();
host_info->driver_supported_features =
+ ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
rc = ena_com_set_host_attributes(ena_dev);
@@ -3169,6 +3180,7 @@ static void ena_get_stats64(struct net_device *netdev,
struct ena_ring *rx_ring, *tx_ring;
unsigned int start;
u64 rx_drops;
+ u64 tx_drops;
int i;
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
@@ -3203,9 +3215,11 @@ static void ena_get_stats64(struct net_device *netdev,
do {
start = u64_stats_fetch_begin_irq(&adapter->syncp);
rx_drops = adapter->dev_stats.rx_drops;
+ tx_drops = adapter->dev_stats.tx_drops;
} while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
stats->rx_dropped = rx_drops;
+ stats->tx_dropped = tx_drops;
stats->multicast = 0;
stats->collisions = 0;
@@ -3433,6 +3447,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_com_mmio_reg_read_request_destroy(ena_dev);
+ /* return reset reason to default value */
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
@@ -3678,8 +3693,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
- refill_required =
- ena_com_free_desc(rx_ring->ena_com_io_sq);
+ refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
rx_ring->empty_rx_queue++;
@@ -3817,11 +3831,11 @@ static void ena_timer_service(struct timer_list *t)
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
}
-static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
+static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
+ u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
@@ -3991,7 +4005,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
}
- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
@@ -4107,8 +4121,8 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
@@ -4152,6 +4166,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_region;
}
+ ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
+
ena_dev->dmadev = &pdev->dev;
rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
@@ -4175,7 +4191,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
calc_queue_ctx.pdev = pdev;
- /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
+ /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
@@ -4219,12 +4235,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
+ adapter->last_monitored_tx_qid = 0;
adapter->xdp_first_ring = 0;
adapter->xdp_num_queues = 0;
- adapter->last_monitored_tx_qid = 0;
-
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
adapter->wd_state = wd_state;
@@ -4356,6 +4371,7 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
cancel_work_sync(&adapter->reset_task);
rtnl_lock(); /* lock released inside the below if-else block */
+ adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
if (shutdown) {
netif_device_detach(netdev);
@@ -4514,14 +4530,17 @@ static void ena_keep_alive_wd(void *adapter_data,
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
struct ena_admin_aenq_keep_alive_desc *desc;
u64 rx_drops;
+ u64 tx_drops;
desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
adapter->last_keep_alive_jiffies = jiffies;
rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
+ tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.rx_drops = rx_drops;
+ adapter->dev_stats.tx_drops = tx_drops;
u64_stats_update_end(&adapter->syncp);
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 9e1860d81908..ba030d260940 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -50,12 +50,6 @@
#define DRV_MODULE_GEN_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
-#ifndef DRV_MODULE_GENERATION
-#define DRV_MODULE_GENERATION \
- __stringify(DRV_MODULE_GEN_MAJOR) "." \
- __stringify(DRV_MODULE_GEN_MINOR) "." \
- __stringify(DRV_MODULE_GEN_SUBMINOR) "K"
-#endif
#define DEVICE_NAME "Elastic Network Adapter (ENA)"
@@ -104,8 +98,6 @@
#define ENA_RX_RSS_TABLE_LOG_SIZE 7
#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
-#define ENA_HASH_KEY_SIZE 40
-
/* The number of tx packet completions that will be handled each NAPI poll
* cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER.
*/
@@ -137,6 +129,8 @@
#define ENA_IO_IRQ_FIRST_IDX 1
#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
+#define ENA_ADMIN_POLL_DELAY_US 100
+
/* ENA device should send keep alive msg every 1 sec.
* We wait for 6 sec just to be on the safe side.
*/
@@ -151,8 +145,9 @@
* The buffer size we share with the device is defined to be ENA_PAGE_SIZE
*/
-#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
- VLAN_HLEN - XDP_PACKET_HEADROOM)
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
@@ -248,6 +243,7 @@ struct ena_stats_tx {
u64 bad_req_id;
u64 llq_buffer_copy;
u64 missed_tx;
+ u64 unmask_interrupt;
};
struct ena_stats_rx {
@@ -333,6 +329,7 @@ struct ena_stats_dev {
u64 interface_down;
u64 admin_q_pause;
u64 rx_drops;
+ u64 tx_drops;
};
enum ena_flags_t {
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 04fcafcc059c..b514bb1b855d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -154,4 +154,4 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
-#endif /*_ENA_REGS_H_ */
+#endif /* _ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index cf3562e82ca9..50fb66369415 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -536,7 +536,7 @@ void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 8266b3c1fefc..e53551daeea1 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -241,7 +241,7 @@ struct lance_private {
/* Now the prototypes we export */
int lance_open(struct net_device *dev);
int lance_close(struct net_device *dev);
-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
void lance_set_multicast(struct net_device *dev);
void lance_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 4e36122609a3..961796abab35 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -156,7 +156,7 @@ struct lance_memory {
struct lance_init_block init;
struct lance_tx_head tx_head[TX_RING_SIZE];
struct lance_rx_head rx_head[RX_RING_SIZE];
- char packet_area[0]; /* packet data follow after the
+ char packet_area[]; /* packet data follow after the
* init block and the ring
* descriptors and are located
* at runtime */
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a58185b1d8bf..3e3711b60d01 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1182,7 +1182,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
int i;
unsigned short data;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 3; i++)
{
reset_and_select_srom(dev);
data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 8b555665a33a..130a105d03f3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -25,6 +25,10 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o \
+ hw_atl2/hw_atl2.o \
+ hw_atl2/hw_atl2_utils.o \
+ hw_atl2/hw_atl2_utils_fw.o \
+ hw_atl2/hw_atl2_llh.o \
macsec/macsec_api.o
atlantic-$(CONFIG_MACSEC) += aq_macsec.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 7560f5506e55..52b9833fda99 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -80,8 +80,8 @@
#define AQ_CFG_LOCK_TRYS 100U
-#define AQ_CFG_DRV_AUTHOR "aQuantia"
-#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
+#define AQ_CFG_DRV_AUTHOR "Marvell"
+#define AQ_CFG_DRV_DESC "Marvell (Aquantia) Corporation(R) Network Driver"
#define AQ_CFG_DRV_NAME "atlantic"
#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index c8c402b013bb..52ad9433cabc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_common.h: Basic includes for all files in project. */
@@ -37,22 +38,31 @@
#define AQ_DEVICE_ID_AQC111S 0x91B1
#define AQ_DEVICE_ID_AQC112S 0x92B1
-#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
+#define AQ_DEVICE_ID_AQC113DEV 0x00C0
+#define AQ_DEVICE_ID_AQC113CS 0x94C0
+#define AQ_DEVICE_ID_AQC114CS 0x93C0
+#define AQ_DEVICE_ID_AQC113 0x04C0
+#define AQ_DEVICE_ID_AQC113C 0x14C0
+#define AQ_DEVICE_ID_AQC115C 0x12C0
+
+#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
#define AQ_HWREV_ANY 0
#define AQ_HWREV_1 1
#define AQ_HWREV_2 2
-#define AQ_NIC_RATE_10G BIT(0)
-#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2GS BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
-
-#define AQ_NIC_RATE_EEE_10G BIT(6)
-#define AQ_NIC_RATE_EEE_5G BIT(7)
-#define AQ_NIC_RATE_EEE_2GS BIT(8)
-#define AQ_NIC_RATE_EEE_1G BIT(9)
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_5GSR BIT(2)
+#define AQ_NIC_RATE_2G5 BIT(3)
+#define AQ_NIC_RATE_1G BIT(4)
+#define AQ_NIC_RATE_100M BIT(5)
+#define AQ_NIC_RATE_10M BIT(6)
+
+#define AQ_NIC_RATE_EEE_10G BIT(7)
+#define AQ_NIC_RATE_EEE_5G BIT(8)
+#define AQ_NIC_RATE_EEE_2G5 BIT(9)
+#define AQ_NIC_RATE_EEE_1G BIT(10)
+#define AQ_NIC_RATE_EEE_100M BIT(11)
#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 7241cf92b43a..743d3b13b39d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -88,13 +88,13 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InDroppedDma",
};
-static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
- "Queue[%d] InPackets",
- "Queue[%d] OutPackets",
- "Queue[%d] Restarts",
- "Queue[%d] InJumboPackets",
- "Queue[%d] InLroPackets",
- "Queue[%d] InErrors",
+static const char * const aq_ethtool_queue_stat_names[] = {
+ "%sQueue[%d] InPackets",
+ "%sQueue[%d] OutPackets",
+ "%sQueue[%d] Restarts",
+ "%sQueue[%d] InJumboPackets",
+ "%sQueue[%d] InLroPackets",
+ "%sQueue[%d] InErrors",
};
#if IS_ENABLED(CONFIG_MACSEC)
@@ -166,7 +166,8 @@ static u32 aq_ethtool_n_stats(struct net_device *ndev)
struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
- ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs;
+ ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs *
+ cfg->tcs;
#if IS_ENABLED(CONFIG_MACSEC)
if (nic->macsec_cfg) {
@@ -223,7 +224,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
- struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
u8 *p = data;
int i, si;
@@ -231,24 +232,35 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
int sa;
#endif
- cfg = aq_nic_get_cfg(aq_nic);
+ cfg = aq_nic_get_cfg(nic);
switch (stringset) {
- case ETH_SS_STATS:
+ case ETH_SS_STATS: {
+ const int stat_cnt = ARRAY_SIZE(aq_ethtool_queue_stat_names);
+ char tc_string[8];
+ int tc;
+
+ memset(tc_string, 0, sizeof(tc_string));
memcpy(p, aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names));
p = p + sizeof(aq_ethtool_stat_names);
- for (i = 0; i < cfg->vecs; i++) {
- for (si = 0;
- si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
- si++) {
- snprintf(p, ETH_GSTRING_LEN,
- aq_ethtool_queue_stat_names[si], i);
- p += ETH_GSTRING_LEN;
+
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ if (cfg->is_qos)
+ snprintf(tc_string, 8, "TC%d ", tc);
+
+ for (i = 0; i < cfg->vecs; i++) {
+ for (si = 0; si < stat_cnt; si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_ethtool_queue_stat_names[si],
+ tc_string,
+ AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
+ p += ETH_GSTRING_LEN;
+ }
}
}
#if IS_ENABLED(CONFIG_MACSEC)
- if (!aq_nic->macsec_cfg)
+ if (!nic->macsec_cfg)
break;
memcpy(p, aq_macsec_stat_names, sizeof(aq_macsec_stat_names));
@@ -256,7 +268,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_txsc *aq_txsc;
- if (!(test_bit(i, &aq_nic->macsec_cfg->txsc_idx_busy)))
+ if (!(test_bit(i, &nic->macsec_cfg->txsc_idx_busy)))
continue;
for (si = 0;
@@ -266,7 +278,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
aq_macsec_txsc_stat_names[si], i);
p += ETH_GSTRING_LEN;
}
- aq_txsc = &aq_nic->macsec_cfg->aq_txsc[i];
+ aq_txsc = &nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_txsc->tx_sa_idx_busy)))
continue;
@@ -283,10 +295,10 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_rxsc *aq_rxsc;
- if (!(test_bit(i, &aq_nic->macsec_cfg->rxsc_idx_busy)))
+ if (!(test_bit(i, &nic->macsec_cfg->rxsc_idx_busy)))
continue;
- aq_rxsc = &aq_nic->macsec_cfg->aq_rxsc[i];
+ aq_rxsc = &nic->macsec_cfg->aq_rxsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_rxsc->rx_sa_idx_busy)))
continue;
@@ -302,6 +314,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
}
#endif
break;
+ }
case ETH_SS_PRIV_FLAGS:
memcpy(p, aq_ethtool_priv_flag_names,
sizeof(aq_ethtool_priv_flag_names));
@@ -605,12 +618,15 @@ static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
if (speed & AQ_NIC_RATE_EEE_10G)
rate |= SUPPORTED_10000baseT_Full;
- if (speed & AQ_NIC_RATE_EEE_2GS)
+ if (speed & AQ_NIC_RATE_EEE_2G5)
rate |= SUPPORTED_2500baseX_Full;
if (speed & AQ_NIC_RATE_EEE_1G)
rate |= SUPPORTED_1000baseT_Full;
+ if (speed & AQ_NIC_RATE_EEE_100M)
+ rate |= SUPPORTED_100baseT_Full;
+
return rate;
}
@@ -777,8 +793,6 @@ static int aq_set_ringparam(struct net_device *ndev,
dev_close(ndev);
}
- aq_nic_free_vectors(aq_nic);
-
cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
cfg->rxds = min(cfg->rxds, hw_caps->rxds_max);
cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE);
@@ -787,15 +801,10 @@ static int aq_set_ringparam(struct net_device *ndev,
cfg->txds = min(cfg->txds, hw_caps->txds_max);
cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE);
- for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs;
- aq_nic->aq_vecs++) {
- aq_nic->aq_vec[aq_nic->aq_vecs] =
- aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg);
- if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
- err = -ENOMEM;
- goto err_exit;
- }
- }
+ err = aq_nic_realloc_vectors(aq_nic);
+ if (err)
+ goto err_exit;
+
if (ndev_running)
err = dev_open(ndev, NULL);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 03ff92bc4a7f..1bc4d33a0ce5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -153,6 +153,8 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
+ struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
+
if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
fsp->location > AQ_RX_LAST_LOC_FVLANID) {
netdev_err(aq_nic->ndev,
@@ -170,10 +172,10 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
return -EINVAL;
}
- if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
+ if (fsp->ring_cookie > cfg->num_rss_queues * cfg->tcs) {
netdev_err(aq_nic->ndev,
"ethtool: queue number must be in range [0, %d]",
- aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ cfg->num_rss_queues * cfg->tcs - 1);
return -EINVAL;
}
return 0;
@@ -262,6 +264,7 @@ static bool __must_check
aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
struct ethtool_rx_flow_spec *fsp)
{
+ struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
bool rule_is_not_correct = false;
if (!aq_nic) {
@@ -274,11 +277,11 @@ aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
} else if (aq_check_filter(aq_nic, fsp)) {
rule_is_not_correct = true;
} else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
- if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
+ if (fsp->ring_cookie >= cfg->num_rss_queues * cfg->tcs) {
netdev_err(aq_nic->ndev,
"ethtool: The specified action is invalid.\n"
"Maximum allowable value action is %u.\n",
- aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ cfg->num_rss_queues * cfg->tcs - 1);
rule_is_not_correct = true;
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 7d71bc7dc500..ed5b465bc664 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -18,6 +18,12 @@
#define AQ_HW_MAC_COUNTER_HZ 312500000ll
#define AQ_HW_PHY_COUNTER_HZ 160000000ll
+enum aq_tc_mode {
+ AQ_TC_MODE_INVALID = -1,
+ AQ_TC_MODE_8TCS,
+ AQ_TC_MODE_4TCS,
+};
+
#define AQ_RX_FIRST_LOC_FVLANID 0U
#define AQ_RX_LAST_LOC_FVLANID 15U
#define AQ_RX_FIRST_LOC_FETHERT 16U
@@ -29,6 +35,9 @@
(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
+/* Used for rate to Mbps conversion */
+#define AQ_MBPS_DIVISOR 125000 /* 1000000 / 8 */
+
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
@@ -46,7 +55,7 @@ struct aq_hw_caps_s {
u32 mac_regs_count;
u32 hw_alive_check_addr;
u8 msix_irqs;
- u8 tcs;
+ u8 tcs_max;
u8 rxd_alignment;
u8 rxd_size;
u8 txd_alignment;
@@ -55,6 +64,7 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
+ u32 priv_data_len;
};
struct aq_hw_link_status_s {
@@ -117,8 +127,11 @@ struct aq_stats_s {
#define AQ_HW_TXD_MULTIPLE 8U
#define AQ_HW_RXD_MULTIPLE 8U
+#define AQ_HW_QUEUES_MAX 32U
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
+#define AQ_HW_PTP_TC 2U
+
#define AQ_HW_LED_BLINK 0x2U
#define AQ_HW_LED_DEFAULT 0x0U
@@ -136,6 +149,19 @@ enum aq_priv_flags {
BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\
BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))
+#define ATL_HW_CHIP_MIPS 0x00000001U
+#define ATL_HW_CHIP_TPO2 0x00000002U
+#define ATL_HW_CHIP_RPF2 0x00000004U
+#define ATL_HW_CHIP_MPI_AQ 0x00000010U
+#define ATL_HW_CHIP_ATLANTIC 0x00800000U
+#define ATL_HW_CHIP_REVISION_A0 0x01000000U
+#define ATL_HW_CHIP_REVISION_B0 0x02000000U
+#define ATL_HW_CHIP_REVISION_B1 0x04000000U
+#define ATL_HW_CHIP_ANTIGUA 0x08000000U
+
+#define ATL_HW_IS_CHIP_FEATURE(_HW_, _F_) (!!(ATL_HW_CHIP_##_F_ & \
+ (_HW_)->chip_features))
+
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
@@ -159,6 +185,7 @@ struct aq_hw_s {
struct hw_atl_utils_fw_rpc rpc;
s64 ptp_clk_offset;
u16 phy_id;
+ void *priv;
};
struct aq_ring_s;
@@ -182,6 +209,11 @@ struct aq_hw_ops {
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_soft_reset)(struct aq_hw_s *self);
+
+ int (*hw_prepare)(struct aq_hw_s *self,
+ const struct aq_fw_ops **fw_ops);
+
int (*hw_reset)(struct aq_hw_s *self);
int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
@@ -248,21 +280,19 @@ struct aq_hw_ops {
int (*hw_rss_hash_set)(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
+ int (*hw_tc_rate_limit_set)(struct aq_hw_s *self);
+
int (*hw_get_regs)(struct aq_hw_s *self,
const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
- int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
+ u32 (*hw_get_fw_version)(struct aq_hw_s *self);
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
- int (*hw_tx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
-
- int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
-
int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 7dbf49adcea6..342c5179f846 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -79,3 +79,29 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
err_exit:
return err;
}
+
+int aq_hw_num_tcs(struct aq_hw_s *hw)
+{
+ switch (hw->aq_nic_cfg->tc_mode) {
+ case AQ_TC_MODE_8TCS:
+ return 8;
+ case AQ_TC_MODE_4TCS:
+ return 4;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+int aq_hw_q_per_tc(struct aq_hw_s *hw)
+{
+ switch (hw->aq_nic_cfg->tc_mode) {
+ case AQ_TC_MODE_8TCS:
+ return 4;
+ case AQ_TC_MODE_4TCS:
+ return 8;
+ default:
+ return 4;
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index 9ef82d487e01..32aa5f2fb840 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -34,5 +34,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
+int aq_hw_num_tcs(struct aq_hw_s *hw);
+int aq_hw_q_per_tc(struct aq_hw_s *hw);
#endif /* AQ_HW_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 0b3e234a54aa..4a6dfac857ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -401,7 +401,7 @@ static u32 aq_sc_idx_max(const enum aq_macsec_sc_sa sc_sa)
break;
default:
break;
- };
+ }
return result;
}
@@ -417,7 +417,7 @@ static u32 aq_to_hw_sc_idx(const u32 sc_idx, const enum aq_macsec_sc_sa sc_sa)
return sc_idx;
default:
WARN_ONCE(true, "Invalid sc_sa");
- };
+ }
return sc_idx;
}
@@ -478,7 +478,7 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
set_bit(txsc_idx, &cfg->txsc_idx_busy);
- return 0;
+ return ret;
}
static int aq_mdo_upd_secy(struct macsec_context *ctx)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 9fcab646cbd5..8a1da044e908 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -12,11 +12,13 @@
#include "aq_ethtool.h"
#include "aq_ptp.h"
#include "aq_filters.h"
+#include "aq_hw_utils.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
+#include <net/pkt_cls.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
@@ -38,7 +40,7 @@ struct net_device *aq_ndev_alloc(void)
struct net_device *ndev = NULL;
struct aq_nic_s *aq_nic = NULL;
- ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+ ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX);
if (!ndev)
return NULL;
@@ -330,6 +332,73 @@ static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
return 0;
}
+static int aq_validate_mqprio_opt(struct aq_nic_s *self,
+ struct tc_mqprio_qopt_offload *mqprio,
+ const unsigned int num_tc)
+{
+ const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self);
+ const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max,
+ AQ_CFG_TCS_MAX);
+
+ if (num_tc > tcs_max) {
+ netdev_err(self->ndev, "Too many TCs requested\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (num_tc != 0 && !is_power_of_2(num_tc)) {
+ netdev_err(self->ndev, "TC count should be power of 2\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
+ netdev_err(self->ndev, "Min tx rate is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ bool has_min_rate;
+ bool has_max_rate;
+ int err;
+ int i;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
+
+ err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ if (has_max_rate) {
+ u64 max_rate = mqprio->max_rate[i];
+
+ do_div(max_rate, AQ_MBPS_DIVISOR);
+ aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
+ }
+
+ if (has_min_rate) {
+ u64 min_rate = mqprio->min_rate[i];
+
+ do_div(min_rate, AQ_MBPS_DIVISOR);
+ aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate);
+ }
+ }
+
+ return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
+ mqprio->qopt.prio_tc_map);
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -341,6 +410,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_do_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = aq_ndo_setup_tc,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index a369705a786a..4435c6374f7e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_nic.c: Definition of common code for NIC. */
@@ -25,6 +26,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/ip.h>
+#include <net/pkt_cls.h>
static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
module_param_named(aq_itr, aq_itr, uint, 0644);
@@ -63,10 +65,38 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}
+/* Recalculate the number of vectors */
+static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
+ cfg->vecs = min(cfg->vecs, num_online_cpus());
+ if (self->irqvecs > AQ_HW_SERVICE_IRQS)
+ cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
+ /* cfg->vecs should be power of 2 for RSS */
+ cfg->vecs = rounddown_pow_of_two(cfg->vecs);
+
+ if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
+ if (cfg->tcs > 2)
+ cfg->vecs = min(cfg->vecs, 4U);
+ }
+
+ if (cfg->vecs <= 4)
+ cfg->tc_mode = AQ_TC_MODE_8TCS;
+ else
+ cfg->tc_mode = AQ_TC_MODE_4TCS;
+
+ /*rss rings */
+ cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
+ aq_nic_rss_init(self, cfg->num_rss_queues);
+}
+
/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
void aq_nic_cfg_start(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ int i;
cfg->tcs = AQ_CFG_TCS_DEF;
@@ -78,7 +108,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
- cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
cfg->fc.req = AQ_CFG_FC_MODE;
cfg->wol = AQ_CFG_WOL_MODES;
@@ -88,29 +117,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
+ cfg->is_ptp = true;
/*descriptors */
cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
- /*rss rings */
- cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
- cfg->vecs = min(cfg->vecs, num_online_cpus());
- if (self->irqvecs > AQ_HW_SERVICE_IRQS)
- cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
- /* cfg->vecs should be power of 2 for RSS */
- if (cfg->vecs >= 8U)
- cfg->vecs = 8U;
- else if (cfg->vecs >= 4U)
- cfg->vecs = 4U;
- else if (cfg->vecs >= 2U)
- cfg->vecs = 2U;
- else
- cfg->vecs = 1U;
-
- cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
-
- aq_nic_rss_init(self, cfg->num_rss_queues);
+ aq_nic_cfg_update_num_vecs(self);
cfg->irq_type = aq_pci_func_get_irq_type(self);
@@ -135,6 +148,9 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
cfg->is_vlan_force_promisc = true;
+
+ for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
+ cfg->prio_tc_map[i] = cfg->tcs * i / 8;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
@@ -180,6 +196,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
#if IS_ENABLED(CONFIG_MACSEC)
aq_macsec_enable(self);
#endif
+ if (self->aq_hw_ops->hw_tc_rate_limit_set)
+ self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw);
+
netif_tx_wake_all_queues(self->ndev);
}
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
@@ -257,6 +276,28 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
AQ_CFG_POLLING_TIMER_INTERVAL);
}
+static int aq_nic_hw_prepare(struct aq_nic_s *self)
+{
+ int err = 0;
+
+ err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
+ if (err)
+ goto exit;
+
+ err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);
+
+exit:
+ return err;
+}
+
+static bool aq_nic_is_valid_ether_addr(const u8 *addr)
+{
+ /* Some engineering samples of Aquantia NICs are provisioned with a
+ * partially populated MAC, which is still invalid.
+ */
+ return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
+}
+
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
@@ -266,7 +307,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
goto err_exit;
}
- err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
+ err = aq_nic_hw_prepare(self);
if (err)
goto err_exit;
@@ -281,6 +322,12 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
if (err)
goto err_exit;
+ if (!is_valid_ether_addr(self->ndev->dev_addr) ||
+ !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
+ eth_hw_addr_random(self->ndev);
+ }
+
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
{
static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
@@ -364,26 +411,35 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
+ if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
+ self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
err = aq_phy_init(self->aq_hw);
}
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ for (i = 0U; i < self->aq_vecs; i++) {
+ aq_vec = self->aq_vec[i];
+ err = aq_vec_ring_alloc(aq_vec, self, i,
+ aq_nic_get_cfg(self));
+ if (err)
+ goto err_exit;
+
aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+ }
- err = aq_ptp_init(self, self->irqvecs - 1);
- if (err < 0)
- goto err_exit;
+ if (aq_nic_get_cfg(self)->is_ptp) {
+ err = aq_ptp_init(self, self->irqvecs - 1);
+ if (err < 0)
+ goto err_exit;
- err = aq_ptp_ring_alloc(self);
- if (err < 0)
- goto err_exit;
+ err = aq_ptp_ring_alloc(self);
+ if (err < 0)
+ goto err_exit;
- err = aq_ptp_ring_init(self);
- if (err < 0)
- goto err_exit;
+ err = aq_ptp_ring_init(self);
+ if (err < 0)
+ goto err_exit;
+ }
netif_carrier_off(self->ndev);
@@ -394,9 +450,12 @@ err_exit:
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
+ struct aq_nic_cfg_s *cfg;
unsigned int i = 0U;
int err = 0;
+ cfg = aq_nic_get_cfg(self);
+
err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
@@ -434,7 +493,7 @@ int aq_nic_start(struct aq_nic_s *self)
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
- if (self->aq_nic_cfg.is_polling) {
+ if (cfg->is_polling) {
timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -452,16 +511,16 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- if (self->aq_nic_cfg.link_irq_vec) {
+ if (cfg->link_irq_vec) {
int irqvec = pci_irq_vector(self->pdev,
- self->aq_nic_cfg.link_irq_vec);
+ cfg->link_irq_vec);
err = request_threaded_irq(irqvec, NULL,
aq_linkstate_threaded_isr,
IRQF_SHARED | IRQF_ONESHOT,
self->ndev->name, self);
if (err < 0)
goto err_exit;
- self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
+ self->msix_entry_mask |= (1 << cfg->link_irq_vec);
}
err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
@@ -470,14 +529,21 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
- err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
+ err = netif_set_real_num_tx_queues(self->ndev,
+ self->aq_vecs * cfg->tcs);
if (err < 0)
goto err_exit;
- err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
+ err = netif_set_real_num_rx_queues(self->ndev,
+ self->aq_vecs * cfg->tcs);
if (err < 0)
goto err_exit;
+ for (i = 0; i < cfg->tcs; i++) {
+ u16 offset = self->aq_vecs * i;
+
+ netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
+ }
netif_tx_start_all_queues(self->ndev);
err_exit:
@@ -488,6 +554,8 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
+ struct device *dev = aq_nic_get_dev(self);
struct aq_ring_buff_s *first = NULL;
u8 ipver = ip_hdr(skb)->version;
struct aq_ring_buff_s *dx_buff;
@@ -529,7 +597,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
need_context_tag = true;
}
- if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
+ if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
dx_buff->len_pkt = skb->len;
dx_buff->is_vlan = 1U;
@@ -544,12 +612,12 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
}
dx_buff->len = skb_headlen(skb);
- dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
+ dx_buff->pa = dma_map_single(dev,
skb->data,
dx_buff->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
ret = 0;
goto exit;
}
@@ -581,13 +649,13 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
else
buff_size = frag_len;
- frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
+ frag_pa = skb_frag_dma_map(dev,
frag,
buff_offset,
buff_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
+ if (unlikely(dma_mapping_error(dev,
frag_pa)))
goto mapping_error;
@@ -621,12 +689,12 @@ mapping_error:
if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
!dx_buff->is_vlan && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) {
- dma_unmap_single(aq_nic_get_dev(self),
+ dma_unmap_single(dev,
dx_buff->pa,
dx_buff->len,
DMA_TO_DEVICE);
} else {
- dma_unmap_page(aq_nic_get_dev(self),
+ dma_unmap_page(dev,
dx_buff->pa,
dx_buff->len,
DMA_TO_DEVICE);
@@ -640,15 +708,16 @@ exit:
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
- unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
+ unsigned int vec = skb->queue_mapping % cfg->vecs;
+ unsigned int tc = skb->queue_mapping / cfg->vecs;
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
int err = NETDEV_TX_OK;
- unsigned int tc = 0U;
frags = skb_shinfo(skb)->nr_frags + 1;
- ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
+ ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
if (frags > AQ_CFG_SKB_FRAGS_MAX) {
dev_kfree_skb_any(skb);
@@ -657,13 +726,14 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
aq_ring_update_queue_state(ring);
- if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
/* Above status update may stop the queue. Check this. */
- if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
+ if (__netif_subqueue_stopped(self->ndev,
+ AQ_NIC_RING2QMAP(self, ring->idx))) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
@@ -764,6 +834,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
u32 *regs_buff = p;
int err = 0;
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return -EOPNOTSUPP;
+
regs->version = 1;
err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
@@ -778,6 +851,9 @@ err_exit:
int aq_nic_get_regs_count(struct aq_nic_s *self)
{
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return 0;
+
return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
@@ -787,6 +863,7 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
+ unsigned int tc;
if (self->aq_fw_ops->update_stats) {
mutex_lock(&self->fwreq_mutex);
@@ -825,10 +902,13 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
- data += count;
- aq_vec_get_sw_stats(aq_vec, data, &count);
+ for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ aq_vec && self->aq_vecs > i;
+ ++i, aq_vec = self->aq_vec[i]) {
+ data += count;
+ aq_vec_get_sw_stats(aq_vec, tc, data, &count);
+ }
}
data += count;
@@ -873,7 +953,7 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
5000baseT_Full);
- if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
ethtool_link_ksettings_add_link_mode(cmd, supported,
2500baseT_Full);
@@ -885,6 +965,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+
if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
@@ -912,7 +996,7 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
5000baseT_Full);
- if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
2500baseT_Full);
@@ -924,6 +1008,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+
if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
@@ -954,6 +1042,10 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
speed = cmd->base.speed;
switch (speed) {
+ case SPEED_10:
+ rate = AQ_NIC_RATE_10M;
+ break;
+
case SPEED_100:
rate = AQ_NIC_RATE_100M;
break;
@@ -963,7 +1055,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
break;
case SPEED_2500:
- rate = AQ_NIC_RATE_2GS;
+ rate = AQ_NIC_RATE_2G5;
break;
case SPEED_5000:
@@ -1006,11 +1098,7 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
- u32 fw_version = 0U;
-
- self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
-
- return fw_version;
+ return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
}
int aq_nic_set_loopback(struct aq_nic_s *self)
@@ -1101,9 +1189,11 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
if (!self)
goto err_exit;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ for (i = 0U; i < self->aq_vecs; i++) {
+ aq_vec = self->aq_vec[i];
aq_vec_deinit(aq_vec);
+ aq_vec_ring_free(aq_vec);
+ }
aq_ptp_unregister(self);
aq_ptp_ring_deinit(self);
@@ -1136,6 +1226,22 @@ void aq_nic_free_vectors(struct aq_nic_s *self)
err_exit:;
}
+int aq_nic_realloc_vectors(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
+
+ aq_nic_free_vectors(self);
+
+ for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) {
+ self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs,
+ cfg);
+ if (unlikely(!self->aq_vec[self->aq_vecs]))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void aq_nic_shutdown(struct aq_nic_s *self)
{
int err = 0;
@@ -1201,3 +1307,98 @@ void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
break;
}
}
+
+int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ const unsigned int prev_vecs = cfg->vecs;
+ bool ndev_running;
+ int err = 0;
+ int i;
+
+ /* if already the same configuration or
+ * disable request (tcs is 0) and we already is disabled
+ */
+ if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
+ return 0;
+
+ ndev_running = netif_running(self->ndev);
+ if (ndev_running)
+ dev_close(self->ndev);
+
+ cfg->tcs = tcs;
+ if (cfg->tcs == 0)
+ cfg->tcs = 1;
+ if (prio_tc_map)
+ memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
+ else
+ for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
+ cfg->prio_tc_map[i] = cfg->tcs * i / 8;
+
+ cfg->is_qos = (tcs != 0 ? true : false);
+ cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
+ if (!cfg->is_ptp)
+ netdev_warn(self->ndev, "%s\n",
+ "PTP is auto disabled due to requested TC count.");
+
+ netdev_set_num_tc(self->ndev, cfg->tcs);
+
+ /* Changing the number of TCs might change the number of vectors */
+ aq_nic_cfg_update_num_vecs(self);
+ if (prev_vecs != cfg->vecs) {
+ err = aq_nic_realloc_vectors(self);
+ if (err)
+ goto err_exit;
+ }
+
+ if (ndev_running)
+ err = dev_open(self->ndev, NULL);
+
+err_exit:
+ return err;
+}
+
+int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 max_rate)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (tc >= AQ_CFG_TCS_MAX)
+ return -EINVAL;
+
+ if (max_rate && max_rate < 10) {
+ netdev_warn(self->ndev,
+ "Setting %s to the minimum usable value of %dMbps.\n",
+ "max rate", 10);
+ cfg->tc_max_rate[tc] = 10;
+ } else {
+ cfg->tc_max_rate[tc] = max_rate;
+ }
+
+ return 0;
+}
+
+int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 min_rate)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (tc >= AQ_CFG_TCS_MAX)
+ return -EINVAL;
+
+ if (min_rate)
+ set_bit(tc, &cfg->tc_min_rate_msk);
+ else
+ clear_bit(tc, &cfg->tc_min_rate_msk);
+
+ if (min_rate && min_rate < 20) {
+ netdev_warn(self->ndev,
+ "Setting %s to the minimum usable value of %dMbps.\n",
+ "min rate", 20);
+ cfg->tc_min_rate[tc] = 20;
+ } else {
+ cfg->tc_min_rate[tc] = min_rate;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 0663b8d0220d..2ab003065e62 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -59,8 +59,15 @@ struct aq_nic_cfg_s {
bool is_polling;
bool is_rss;
bool is_lro;
+ bool is_qos;
+ bool is_ptp;
+ enum aq_tc_mode tc_mode;
u32 priv_flags;
u8 tcs;
+ u8 prio_tc_map[8];
+ u32 tc_max_rate[AQ_CFG_TCS_MAX];
+ unsigned long tc_min_rate_msk;
+ u32 tc_min_rate[AQ_CFG_TCS_MAX];
struct aq_rss_parameters aq_rss;
u32 eee_speeds;
};
@@ -77,8 +84,16 @@ struct aq_nic_cfg_s {
#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
WAKE_PHY)
-#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
- ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+#define AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) \
+ (((_NIC_CFG_)->tc_mode == AQ_TC_MODE_4TCS) ? 8 : 4)
+
+#define AQ_NIC_CFG_TCVEC2RING(_NIC_CFG_, _TC_, _VEC_) \
+ ((_TC_) * AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) + (_VEC_))
+
+#define AQ_NIC_RING2QMAP(_NIC_, _ID_) \
+ ((_ID_) / AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg) * \
+ (_NIC_)->aq_vecs + \
+ ((_ID_) % AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg)))
struct aq_hw_rx_fl2 {
struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
@@ -104,7 +119,7 @@ struct aq_nic_s {
atomic_t flags;
u32 msg_enable;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
- struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+ struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
struct net_device *ndev;
unsigned int aq_vecs;
@@ -164,6 +179,7 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
void aq_nic_set_power(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
void aq_nic_free_vectors(struct aq_nic_s *self);
+int aq_nic_realloc_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
@@ -181,4 +197,9 @@ void aq_nic_shutdown(struct aq_nic_s *self);
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
u32 location);
+int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map);
+int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 max_rate);
+int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 min_rate);
#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 8a70ffe1d326..41c0f560f95b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -16,6 +16,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl2/hw_atl2.h"
#include "aq_filters.h"
#include "aq_drvinfo.h"
#include "aq_macsec.h"
@@ -41,6 +42,13 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+
{}
};
@@ -70,6 +78,13 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
{ AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
{ AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
+
+ { AQ_DEVICE_ID_AQC113DEV, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
@@ -104,10 +119,8 @@ int aq_pci_func_init(struct pci_dev *pdev)
int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
+ if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-
- }
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
@@ -237,6 +250,15 @@ static int aq_pci_probe(struct pci_dev *pdev,
goto err_ioremap;
}
self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
+ if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) {
+ int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len;
+
+ self->aq_hw->priv = kzalloc(len, GFP_KERNEL);
+ if (!self->aq_hw->priv) {
+ err = -ENOMEM;
+ goto err_free_aq_hw;
+ }
+ }
for (bar = 0; bar < 4; ++bar) {
if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
@@ -245,19 +267,19 @@ static int aq_pci_probe(struct pci_dev *pdev,
mmio_pa = pci_resource_start(pdev, bar);
if (mmio_pa == 0U) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
reg_sz = pci_resource_len(pdev, bar);
if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
break;
}
@@ -265,7 +287,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
if (bar == 4) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
numvecs = min((u8)AQ_CFG_VECS_DEF,
@@ -305,6 +327,8 @@ err_register:
aq_pci_free_irq_vectors(self);
err_hwinit:
iounmap(self->aq_hw->mmio);
+err_free_aq_hw_priv:
+ kfree(self->aq_hw->priv);
err_free_aq_hw:
kfree(self->aq_hw);
err_ioremap:
@@ -332,6 +356,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
aq_nic_free_vectors(self);
aq_pci_free_irq_vectors(self);
iounmap(self->aq_hw->mmio);
+ kfree(self->aq_hw->priv);
kfree(self->aq_hw);
pci_release_regions(pdev);
free_netdev(self->ndev);
@@ -406,6 +431,9 @@ static int atl_resume_common(struct device *dev, bool deep)
netif_tx_start_all_queues(nic->ndev);
err_exit:
+ if (ret < 0)
+ aq_nic_deinit(nic, true);
+
rtnl_unlock();
return ret;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 58e8c641e8b3..599ced261b2a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -945,26 +945,29 @@ void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
#define PTP_4TC_RING_IDX 16
#define PTP_HWST_RING_IDX 31
+/* Index must be 8 (8 TCs) or 16 (4 TCs).
+ * It depends on Traffic Class mode.
+ */
+static unsigned int ptp_ring_idx(const enum aq_tc_mode tc_mode)
+{
+ if (tc_mode == AQ_TC_MODE_8TCS)
+ return PTP_8TC_RING_IDX;
+
+ return PTP_4TC_RING_IDX;
+}
+
int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
unsigned int tx_ring_idx, rx_ring_idx;
struct aq_ring_s *hwts;
- u32 tx_tc_mode, rx_tc_mode;
struct aq_ring_s *ring;
int err;
if (!aq_ptp)
return 0;
- /* Index must to be 8 (8 TCs) or 16 (4 TCs).
- * It depends from Traffic Class mode.
- */
- aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode);
- if (tx_tc_mode == 0)
- tx_ring_idx = PTP_8TC_RING_IDX;
- else
- tx_ring_idx = PTP_4TC_RING_IDX;
+ tx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
tx_ring_idx, &aq_nic->aq_nic_cfg);
@@ -973,11 +976,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
goto err_exit;
}
- aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode);
- if (rx_tc_mode == 0)
- rx_ring_idx = PTP_8TC_RING_IDX;
- else
- rx_ring_idx = PTP_4TC_RING_IDX;
+ rx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
rx_ring_idx, &aq_nic->aq_nic_cfg);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index bae95a618560..68fdb3994088 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -232,8 +232,11 @@ void aq_ring_queue_wake(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
- if (__netif_subqueue_stopped(ndev, ring->idx)) {
- netif_wake_subqueue(ndev, ring->idx);
+ if (__netif_subqueue_stopped(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic,
+ ring->idx))) {
+ netif_wake_subqueue(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
ring->stats.tx.queue_restarts++;
}
}
@@ -242,8 +245,11 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
- if (!__netif_subqueue_stopped(ndev, ring->idx))
- netif_stop_subqueue(ndev, ring->idx);
+ if (!__netif_subqueue_stopped(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic,
+ ring->idx)))
+ netif_stop_subqueue(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
}
bool aq_ring_tx_clean(struct aq_ring_s *self)
@@ -466,7 +472,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
/* Send all PTP traffic to 0 queue */
- skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(self->aq_nic,
+ self->idx));
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f40a427970dc..d1d43c8ce400 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -103,16 +103,11 @@ err_exit:
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
- struct aq_ring_s *ring = NULL;
struct aq_vec_s *self = NULL;
- unsigned int i = 0U;
- int err = 0;
self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self) {
- err = -ENOMEM;
+ if (!self)
goto err_exit;
- }
self->aq_nic = aq_nic;
self->aq_ring_param.vec_idx = idx;
@@ -128,10 +123,20 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
+err_exit:
+ return self;
+}
+
+int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
for (i = 0; i < aq_nic_cfg->tcs; ++i) {
- unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
- self->tx_rings,
- self->aq_ring_param.vec_idx);
+ const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
+ i, idx);
ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
idx_ring, aq_nic_cfg);
@@ -156,11 +161,11 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
err_exit:
if (err < 0) {
- aq_vec_free(self);
+ aq_vec_ring_free(self);
self = NULL;
}
- return self;
+ return err;
}
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
@@ -270,6 +275,18 @@ err_exit:;
void aq_vec_free(struct aq_vec_s *self)
{
+ if (!self)
+ goto err_exit;
+
+ netif_napi_del(&self->napi);
+
+ kfree(self);
+
+err_exit:;
+}
+
+void aq_vec_ring_free(struct aq_vec_s *self)
+{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
@@ -279,13 +296,12 @@ void aq_vec_free(struct aq_vec_s *self)
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
aq_ring_free(&ring[AQ_VEC_TX_ID]);
- aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ if (i < self->rx_rings)
+ aq_ring_free(&ring[AQ_VEC_RX_ID]);
}
- netif_napi_del(&self->napi);
-
- kfree(self);
-
+ self->tx_rings = 0;
+ self->rx_rings = 0;
err_exit:;
}
@@ -333,16 +349,14 @@ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
return &self->aq_ring_param.affinity_mask;
}
-void aq_vec_add_stats(struct aq_vec_s *self,
- struct aq_ring_stats_rx_s *stats_rx,
- struct aq_ring_stats_tx_s *stats_tx)
+static void aq_vec_add_stats(struct aq_vec_s *self,
+ const unsigned int tc,
+ struct aq_ring_stats_rx_s *stats_rx,
+ struct aq_ring_stats_tx_s *stats_tx)
{
- struct aq_ring_s *ring = NULL;
- unsigned int r = 0U;
+ struct aq_ring_s *ring = self->ring[tc];
- for (r = 0U, ring = self->ring[0];
- self->tx_rings > r; ++r, ring = self->ring[r]) {
- struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
+ if (tc < self->rx_rings) {
struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
stats_rx->packets += rx->packets;
@@ -353,6 +367,10 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_rx->pg_losts += rx->pg_losts;
stats_rx->pg_flips += rx->pg_flips;
stats_rx->pg_reuses += rx->pg_reuses;
+ }
+
+ if (tc < self->tx_rings) {
+ struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
@@ -361,7 +379,8 @@ void aq_vec_add_stats(struct aq_vec_s *self,
}
}
-int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
+int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
+ unsigned int *p_count)
{
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
@@ -369,7 +388,8 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
- aq_vec_add_stats(self, &stats_rx, &stats_tx);
+
+ aq_vec_add_stats(self, tc, &stats_rx, &stats_tx);
/* This data should mimic aq_ethtool_queue_stat_names structure
*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 0fe8e0904c7f..541af85e6510 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -25,17 +25,17 @@ irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg);
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
+void aq_vec_ring_free(struct aq_vec_s *self);
int aq_vec_start(struct aq_vec_s *self);
void aq_vec_stop(struct aq_vec_s *self);
cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
-int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data,
+int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
unsigned int *p_count);
-void aq_vec_add_stats(struct aq_vec_s *self,
- struct aq_ring_stats_rx_s *stats_rx,
- struct aq_ring_stats_tx_s *stats_tx);
#endif /* AQ_VEC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 9b1062b8af64..a312864969af 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
@@ -20,7 +21,7 @@
.msix_irqs = 4U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_A0_RSS_MAX, \
- .tcs = HW_ATL_A0_TC_MAX, \
+ .tcs_max = HW_ATL_A0_TC_MAX, \
.rxd_alignment = 1U, \
.rxd_size = HW_ATL_A0_RXD_SIZE, \
.rxds_max = HW_ATL_A0_MAX_RXD, \
@@ -47,7 +48,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -57,7 +58,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -66,7 +67,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -74,7 +75,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = AQ_NIC_RATE_2GS |
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -135,10 +136,10 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0U, 0xFFF);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0U, 0x64);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0U, 0x50);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0U, 0x1E);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
@@ -267,8 +268,7 @@ static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
- 0x00010000U : 0x00000000U);
+ aq_hw_write_reg(self, 0x00007040U, 0x00000000U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
@@ -886,6 +886,8 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
}
const struct aq_hw_ops hw_atl_ops_a0 = {
+ .hw_soft_reset = hw_atl_utils_soft_reset,
+ .hw_prepare = hw_atl_utils_initfw,
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_init = hw_atl_a0_hw_init,
.hw_reset = hw_atl_a0_hw_reset,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d20d91cdece8..14d79f70cad7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
@@ -22,7 +23,7 @@
.msix_irqs = 8U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_B0_RSS_MAX, \
- .tcs = HW_ATL_B0_TC_MAX, \
+ .tcs_max = HW_ATL_B0_TC_MAX, \
.rxd_alignment = 1U, \
.rxd_size = HW_ATL_B0_RXD_SIZE, \
.rxds_max = HW_ATL_B0_MAX_RXD, \
@@ -45,7 +46,8 @@
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_GSO_UDP_L4 | \
- NETIF_F_GSO_PARTIAL, \
+ NETIF_F_GSO_PARTIAL | \
+ NETIF_F_HW_TC, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -59,7 +61,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -69,7 +71,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -78,7 +80,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -86,7 +88,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = AQ_NIC_RATE_2GS |
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -113,12 +115,34 @@ static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
return 0;
}
+static int hw_atl_b0_tc_ptp_set(struct aq_hw_s *self)
+{
+ /* Init TC2 for PTP_TX */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
+ AQ_HW_PTP_TC);
+
+ /* Init TC2 for PTP_RX */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
+ AQ_HW_PTP_TC);
+ /* No flow control for PTP */
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, AQ_HW_PTP_TC);
+
+ return aq_hw_err_from_flags(self);
+}
+
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
- unsigned int i_priority = 0U;
- u32 buff_size = 0U;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX;
+ u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX;
+ unsigned int prio = 0U;
u32 tc = 0U;
+ if (cfg->is_ptp) {
+ tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE;
+ rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE;
+ }
+
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
@@ -126,69 +150,45 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* TPS VM init */
hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
- /* TPS TC credits init */
- hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
-
- tc = 0;
-
- /* TX Packet Scheduler Data TC0 */
- hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
- hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
- hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
- hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
-
- /* Tx buf size TC0 */
- buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
-
- hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024 / 32U) * 66U) /
- 100U, tc);
- hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024 / 32U) * 50U) /
- 100U, tc);
- /* Init TC2 for PTP_TX */
- tc = 2;
+ tx_buff_size /= cfg->tcs;
+ rx_buff_size /= cfg->tcs;
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ u32 threshold = 0U;
- hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
- tc);
+ /* Tx buf size TC0 */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
- /* QoS Rx buf size per TC */
- tc = 0;
- buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
+ threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
- hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
+ threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
- hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+ /* QoS Rx buf size per TC */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
- /* Init TC2 for PTP_RX */
- tc = 2;
+ threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
- hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
- tc);
- /* No flow control for PTP */
- hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
+ threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+ }
+
+ if (cfg->is_ptp)
+ hw_atl_b0_tc_ptp_set(self);
/* QoS 802.1p priority -> TC mapping */
- for (i_priority = 8U; i_priority--;)
- hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ for (prio = 0; prio < 8; ++prio)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
+ cfg->prio_tc_map[prio]);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
- struct aq_rss_parameters *rss_params)
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int addr = 0U;
@@ -251,9 +251,10 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg)
+int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg)
{
+ u64 rxcsum = !!(aq_nic_cfg->features & NETIF_F_RXCSUM);
unsigned int i;
/* TX checksums offloads*/
@@ -261,10 +262,8 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
- NETIF_F_RXCSUM));
- hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
- NETIF_F_RXCSUM));
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, rxcsum);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, rxcsum);
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -272,7 +271,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
/* Outer VLAN tag offload */
hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
-/* LRO offloads */
+ /* LRO offloads */
{
unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
@@ -311,10 +310,124 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
+{
+ static const u32 max_weight = BIT(HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
+ /* Scale factor is based on the number of bits in fractional portion */
+ static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
+ static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
+ HW_ATL_TPS_DESC_RATE_Y_SHIFT;
+ const u32 link_speed = self->aq_link_status.mbps;
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+ unsigned long num_min_rated_tcs = 0;
+ u32 tc_weight[AQ_CFG_TCS_MAX];
+ u32 fixed_max_credit;
+ u8 min_rate_msk = 0;
+ u32 sum_weight = 0;
+ int tc;
+
+ /* By default max_credit is based upon MTU (in unit of 64b) */
+ fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
+
+ if (link_speed) {
+ min_rate_msk = nic_cfg->tc_min_rate_msk &
+ (BIT(nic_cfg->tcs) - 1);
+ num_min_rated_tcs = hweight8(min_rate_msk);
+ }
+
+ /* First, calculate weights where min_rate is specified */
+ if (num_min_rated_tcs) {
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ if (!nic_cfg->tc_min_rate[tc]) {
+ tc_weight[tc] = 0;
+ continue;
+ }
+
+ tc_weight[tc] = (-1L + link_speed +
+ nic_cfg->tc_min_rate[tc] *
+ max_weight) /
+ link_speed;
+ tc_weight[tc] = min(tc_weight[tc], max_weight);
+ sum_weight += tc_weight[tc];
+ }
+ }
+
+ /* WSP, if min_rate is set for at least one TC.
+ * RR otherwise.
+ */
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+ /* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
+ * leave Descriptor TC Arbiter as RR.
+ */
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+
+ hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
+
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+ u32 weight, max_credit;
+
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
+ fixed_max_credit);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
+
+ if (num_min_rated_tcs) {
+ weight = tc_weight[tc];
+
+ if (!weight && sum_weight < max_weight)
+ weight = (max_weight - sum_weight) /
+ (nic_cfg->tcs - num_min_rated_tcs);
+ else if (!weight)
+ weight = 0x64;
+
+ max_credit = max(8 * weight, fixed_max_credit);
+ } else {
+ weight = 0x64;
+ max_credit = 0xFFF;
+ }
+
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
+ max_credit);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
+
+ if (en) {
+ /* Nominal rate is always 10G */
+ const u32 rate = 10000U * scale /
+ nic_cfg->tc_max_rate[tc];
+ const u32 rate_int = rate >>
+ HW_ATL_TPS_DESC_RATE_Y_WIDTH;
+ const u32 rate_frac = rate & frac_msk;
+
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
+ } else {
+ /* A value of 1 indicates the queue is not
+ * rate controlled.
+ */
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+ }
+ for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+
/* Tx TC/Queue number config */
- hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
+ hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
@@ -324,7 +437,7 @@ static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+ aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ?
0x00010000U : 0x00000000U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
@@ -334,20 +447,32 @@ static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
+void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 rss_ctrl1 = HW_ATL_RSS_DISABLED;
+
+ if (cfg->is_rss)
+ rss_ctrl1 = (cfg->tc_mode == AQ_TC_MODE_8TCS) ?
+ HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS :
+ HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS;
+
+ hw_atl_reg_rx_flr_rss_control1set(self, rss_ctrl1);
+}
+
static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int i;
/* Rx TC/RSS number config */
- hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
/* Rx flow control */
hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
- 0xB3333333U : 0x00000000U);
+ hw_atl_b0_hw_init_rx_rss_ctrl1(self);
/* Multicast filters */
for (i = HW_ATL_B0_MAC_MAX; i--;) {
@@ -372,8 +497,8 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00005040U,
- IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
+ aq_hw_write_reg(self, 0x00005040U, ATL_HW_IS_CHIP_FEATURE(self, RPF2) ?
+ 0x000F0000U : 0x00000000U);
hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
@@ -384,7 +509,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -479,23 +604,21 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_start(struct aq_hw_s *self)
+int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
@@ -511,9 +634,8 @@ static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
return 0;
}
-static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
- struct aq_ring_s *ring,
- unsigned int frags)
+int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int frags)
{
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
@@ -600,9 +722,8 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
- struct aq_ring_s *aq_ring,
- struct aq_ring_param_s *aq_ring_param)
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
@@ -643,9 +764,8 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
- struct aq_ring_s *aq_ring,
- struct aq_ring_param_s *aq_ring_param)
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
@@ -673,9 +793,8 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
- struct aq_ring_s *ring,
- unsigned int sw_tail_old)
+int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int sw_tail_old)
{
for (; sw_tail_old != ring->sw_tail;
sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
@@ -734,8 +853,8 @@ static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
{
unsigned int hw_head_;
int err = 0;
@@ -753,8 +872,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
{
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
@@ -854,14 +972,14 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
@@ -871,7 +989,7 @@ static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
@@ -880,8 +998,8 @@ static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
-static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
- unsigned int packet_filter)
+int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int i = 0U;
@@ -1071,34 +1189,20 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
-{
- *tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
- return aq_hw_err_from_flags(self);
-}
-
-static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
-{
- *tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
- return aq_hw_err_from_flags(self);
-}
-
#define get_ptp_ts_val_u64(self, indx) \
((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
@@ -1478,6 +1582,8 @@ static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
}
const struct aq_hw_ops hw_atl_ops_b0 = {
+ .hw_soft_reset = hw_atl_utils_soft_reset,
+ .hw_prepare = hw_atl_utils_initfw,
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
.hw_reset = hw_atl_b0_hw_reset,
@@ -1510,13 +1616,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
+ .hw_tc_rate_limit_set = hw_atl_b0_hw_init_tx_tc_rate_limit,
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
- .hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get,
- .hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get,
-
.hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
.hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index 09af1683034b..30f468f2084d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
@@ -33,4 +34,41 @@ extern const struct aq_hw_ops hw_atl_ops_b0;
#define hw_atl_ops_b1 hw_atl_ops_b0
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int sw_tail_old);
+int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int frags);
+int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
+
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
+
+int hw_atl_b0_hw_start(struct aq_hw_s *self);
+
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask);
+
+int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter);
+
#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 7ab23a1751d3..cf460d61a45e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -75,7 +75,7 @@
#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
#define HW_ATL_B0_TCRSS_4_8 1
-#define HW_ATL_B0_TC_MAX 1U
+#define HW_ATL_B0_TC_MAX 8U
#define HW_ATL_B0_RSS_MAX 8U
#define HW_ATL_B0_LRO_RXD_MAX 16U
@@ -151,6 +151,10 @@
#define HW_ATL_B0_MAX_RXD 8184U
#define HW_ATL_B0_MAX_TXD 8184U
+#define HW_ATL_RSS_DISABLED 0x00000000U
+#define HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS 0xA2222222U
+#define HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS 0x80003333U
+
/* HW layer capabilities */
#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index d1f68fc16291..3c8e8047ea1e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -693,6 +693,13 @@ void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
}
+u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT);
+}
+
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en)
{
@@ -747,7 +754,7 @@ void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
}
void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc)
+ u32 user_priority, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
@@ -766,10 +773,9 @@ void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
};
- aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
- rpf_rpb_rx_tc_upt_msk[tc],
- rpf_rpb_rx_tc_upt_shft[tc],
- user_priority_tc_map);
+ aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[user_priority],
+ rpf_rpb_rx_tc_upt_msk[user_priority],
+ rpf_rpb_rx_tc_upt_shft[user_priority], tc);
}
void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
@@ -867,6 +873,13 @@ void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
vlan_prom_mode_en);
}
+u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT);
+}
+
void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_acc_untagged_packets)
{
@@ -1304,14 +1317,14 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
HW_ATL_TPB_TX_TC_MODE_MSK,
HW_ATL_TPB_TX_TC_MODE_SHIFT);
}
-void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
@@ -1450,8 +1463,8 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 max_credit,
- u32 tc)
+ const u32 tc,
+ const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
@@ -1460,13 +1473,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight,
- u32 tc)
+ const u32 tc,
+ const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
- tx_pkt_shed_desc_tc_weight);
+ weight);
}
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
@@ -1479,8 +1492,8 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 max_credit,
- u32 tc)
+ const u32 tc,
+ const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
@@ -1489,13 +1502,49 @@ void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight,
- u32 tc)
+ const u32 tc,
+ const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
- tx_pkt_shed_tc_data_weight);
+ weight);
+}
+
+void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
+ const u32 rate_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_TX_DESC_RATE_MODE_ADR,
+ HW_ATL_TPS_TX_DESC_RATE_MODE_MSK,
+ HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT,
+ rate_mode);
+}
+
+void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_EN_ADR(desc),
+ HW_ATL_TPS_DESC_RATE_EN_MSK,
+ HW_ATL_TPS_DESC_RATE_EN_SHIFT,
+ enable);
+}
+
+void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_int)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_X_ADR(desc),
+ HW_ATL_TPS_DESC_RATE_X_MSK,
+ HW_ATL_TPS_DESC_RATE_X_SHIFT,
+ rate_int);
+}
+
+void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_frac)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_Y_ADR(desc),
+ HW_ATL_TPS_DESC_RATE_Y_MSK,
+ HW_ATL_TPS_DESC_RATE_Y_SHIFT,
+ rate_frac);
}
/* tx */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 62992b23c0e8..61a6f70c51cd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -349,6 +349,9 @@ void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
u32 l2multicast_flr_en,
u32 filter);
+/* get l2 promiscuous mode enable */
+u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw);
+
/* set l2 promiscuous mode enable */
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en);
@@ -420,6 +423,9 @@ void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
u32 vlan_prom_mode_en);
+/* Get VLAN promiscuous mode enable */
+u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw);
+
/* Set VLAN untagged action */
void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
u32 vlan_untagged_act);
@@ -610,11 +616,11 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
/* tpb */
/* set TX Traffic Class Mode */
-void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode);
/* get TX Traffic Class Mode */
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
@@ -682,13 +688,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
/* set tx packet scheduler descriptor tc max credit */
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 max_credit,
- u32 tc);
+ const u32 tc,
+ const u32 max_credit);
/* set tx packet scheduler descriptor tc weight */
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight,
- u32 tc);
+ const u32 tc,
+ const u32 weight);
/* set tx packet scheduler descriptor vm arbitration mode */
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
@@ -696,13 +702,29 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
/* set tx packet scheduler tc data max credit */
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 max_credit,
- u32 tc);
+ const u32 tc,
+ const u32 max_credit);
/* set tx packet scheduler tc data weight */
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight,
- u32 tc);
+ const u32 tc,
+ const u32 weight);
+
+/* set tx descriptor rate mode */
+void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
+ const u32 rate_mode);
+
+/* set tx packet scheduler descriptor rate enable */
+void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 enable);
+
+/* set tx packet scheduler descriptor rate integral value */
+void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_int);
+
+/* set tx packet scheduler descriptor rate fractional value */
+void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_frac);
/* tx */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 18de2f7b8959..06220792daf1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -2038,6 +2038,42 @@
/* default value of bitfield lso_tcp_flag_mid[b:0] */
#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
+/* tx tx_tc_mode bitfield definitions
+ * preprocessor definitions for the bitfield "tx_tc_mode".
+ * port="pif_tpb_tx_tc_mode_i,pif_tps_tx_tc_mode_i"
+ */
+
+/* register address for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
+/* bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
+/* lower bit position of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
+/* width of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
+/* default value of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
+
+/* tx tx_desc_rate_mode bitfield definitions
+ * preprocessor definitions for the bitfield "tx_desc_rate_mode".
+ * port="pif_tps_desc_rate_mode_i"
+ */
+
+/* register address for bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_ADR 0x00007900
+/* bitmask for bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSK 0x00000080
+/* inverted bitmask for bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSKN 0xFFFFFF7F
+/* lower bit position of bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT 7
+/* width of bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_WIDTH 1
+/* default value of bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_DEFAULT 0x0
+
/* tx tx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buf_en".
* port="pif_tpb_tx_buf_en_i"
@@ -2056,19 +2092,6 @@
/* default value of bitfield tx_buf_en */
#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
-/* register address for bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
-/* bitmask for bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
-/* inverted bitmask for bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
-/* lower bit position of bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
-/* width of bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
-/* default value of bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
-
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
@@ -2270,6 +2293,58 @@
/* default value of bitfield data_tc_arb_mode */
#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
+/* tx desc{r}_rate_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{r}_rate_en".
+ * port="pif_tps_desc_rate_en_i[0]"
+ */
+
+/* register address for bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_ADR(desc) (0x00007408 + (desc) * 0x10)
+/* bitmask for bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_MSK 0x80000000
+/* inverted bitmask for bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_MSKN 0x7FFFFFFF
+/* lower bit position of bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_SHIFT 31
+/* width of bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_WIDTH 1
+/* default value of bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_DEFAULT 0x0
+
+/* tx desc{r}_rate_x bitfield definitions
+ * preprocessor definitions for the bitfield "desc{r}_rate_x".
+ * port="pif_tps_desc0_rate_x"
+ */
+/* register address for bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_ADR(desc) (0x00007408 + (desc) * 0x10)
+/* bitmask for bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_MSK 0x03FF0000
+/* inverted bitmask for bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_MSKN 0xFC00FFFF
+/* lower bit position of bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_SHIFT 16
+/* width of bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_WIDTH 10
+/* default value of bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_DEFAULT 0x0
+
+/* tx desc{r}_rate_y bitfield definitions
+ * preprocessor definitions for the bitfield "desc{r}_rate_y".
+ * port="pif_tps_desc0_rate_y"
+ */
+/* register address for bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_ADR(desc) (0x00007408 + (desc) * 0x10)
+/* bitmask for bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_MSK 0x00003FFF
+/* inverted bitmask for bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_MSKN 0xFFFFC000
+/* lower bit position of bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_SHIFT 0
+/* width of bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_WIDTH 14
+/* default value of bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_DEFAULT 0x0
+
/* tx desc_rate_ta_rst bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_ta_rst".
* port="pif_tps_desc_rate_ta_rst_i"
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 354705f9bc49..73c0f41df8d8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
@@ -53,7 +54,6 @@ enum mcp_area {
MCP_AREA_SETTINGS = 0x20000000,
};
-static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state);
static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
@@ -67,14 +67,10 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
int err = 0;
- err = hw_atl_utils_soft_reset(self);
- if (err)
- return err;
-
hw_atl_utils_hw_chip_features_init(self,
&self->chip_features);
- hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
+ self->fw_ver_actual = hw_atl_utils_get_fw_version(self);
if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
self->fw_ver_actual) == 0) {
@@ -313,7 +309,7 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
for (++cnt; --cnt && !err;) {
aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
- if (IS_CHIP_FEATURE(REVISION_B1))
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
self, val, val != a,
1U, 1000U);
@@ -409,7 +405,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
if (err < 0)
goto err_exit;
- if (IS_CHIP_FEATURE(REVISION_B1))
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
else
err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
@@ -438,7 +434,7 @@ int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
p, cnt, MCP_AREA_SETTINGS);
}
-static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
+int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
{
const u32 dw_major_mask = 0xff000000U;
const u32 dw_minor_mask = 0x00ffffffU;
@@ -501,7 +497,7 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
int err = 0;
- if (!IS_CHIP_FEATURE(MIPS)) {
+ if (!ATL_HW_IS_CHIP_FEATURE(self, MIPS)) {
err = -1;
goto err_exit;
}
@@ -607,7 +603,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
- if (IS_CHIP_FEATURE(REVISION_A0)) {
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_A0)) {
unsigned int mtu = self->aq_nic_cfg ?
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
@@ -692,7 +688,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
link_status->mbps = 5000U;
break;
- case HAL_ATLANTIC_RATE_2GS:
+ case HAL_ATLANTIC_RATE_2G5:
link_status->mbps = 2500U;
break;
@@ -806,22 +802,24 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
u32 mif_rev = val & 0xFFU;
u32 chip_features = 0U;
+ chip_features |= ATL_HW_CHIP_ATLANTIC;
+
if ((0xFU & mif_rev) == 1U) {
- chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
- HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
- HAL_ATLANTIC_UTILS_CHIP_MIPS;
+ chip_features |= ATL_HW_CHIP_REVISION_A0 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS;
} else if ((0xFU & mif_rev) == 2U) {
- chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
- HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
- HAL_ATLANTIC_UTILS_CHIP_MIPS |
- HAL_ATLANTIC_UTILS_CHIP_TPO2 |
- HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ chip_features |= ATL_HW_CHIP_REVISION_B0 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS |
+ ATL_HW_CHIP_TPO2 |
+ ATL_HW_CHIP_RPF2;
} else if ((0xFU & mif_rev) == 0xAU) {
- chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
- HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
- HAL_ATLANTIC_UTILS_CHIP_MIPS |
- HAL_ATLANTIC_UTILS_CHIP_TPO2 |
- HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ chip_features |= ATL_HW_CHIP_REVISION_B1 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS |
+ ATL_HW_CHIP_TPO2 |
+ ATL_HW_CHIP_RPF2;
}
*p = chip_features;
@@ -919,11 +917,9 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
return 0;
}
-int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
+u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
{
- *fw_version = aq_hw_read_reg(self, 0x18U);
-
- return 0;
+ return aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
}
static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index b15513914636..0b4b54fc1de0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
@@ -360,6 +361,8 @@ struct aq_rx_filter_vlan {
u8 queue;
};
+#define HW_ATL_VLAN_MAX_FILTERS 16U
+
struct aq_rx_filter_l2 {
s8 queue;
u8 location;
@@ -406,17 +409,6 @@ enum hw_atl_rx_ctrl_registers_l3l4 {
#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
((location) - AQ_RX_FIRST_LOC_FL3L4)
-#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
-#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
-#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
-#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
-#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
-#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
-#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U
-
-#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
- self->chip_features)
-
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
MPI_RESET = 1,
@@ -427,7 +419,7 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_10G BIT(0)
#define HAL_ATLANTIC_RATE_5G BIT(1)
#define HAL_ATLANTIC_RATE_5GSR BIT(2)
-#define HAL_ATLANTIC_RATE_2GS BIT(3)
+#define HAL_ATLANTIC_RATE_2G5 BIT(3)
#define HAL_ATLANTIC_RATE_1G BIT(4)
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
@@ -622,7 +614,7 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
-int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
+u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
@@ -643,6 +635,8 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
struct hw_atl_utils_fw_rpc **rpc);
+int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+
extern const struct aq_fw_ops aq_fw_1x_ops;
extern const struct aq_fw_ops aq_fw_2x_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 1ad10cc14918..eeedd8c90067 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
@@ -134,7 +135,7 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
if (speed & AQ_NIC_RATE_5GSR)
rate |= FW2X_RATE_5G;
- if (speed & AQ_NIC_RATE_2GS)
+ if (speed & AQ_NIC_RATE_2G5)
rate |= FW2X_RATE_2G5;
if (speed & AQ_NIC_RATE_1G)
@@ -155,7 +156,7 @@ static u32 fw2x_to_eee_mask(u32 speed)
if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK)
rate |= AQ_NIC_RATE_EEE_5G;
if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK)
- rate |= AQ_NIC_RATE_EEE_2GS;
+ rate |= AQ_NIC_RATE_EEE_2G5;
if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK)
rate |= AQ_NIC_RATE_EEE_1G;
@@ -170,7 +171,7 @@ static u32 eee_mask_to_fw2x(u32 speed)
rate |= HW_ATL_FW2X_CAP_EEE_10G_MASK;
if (speed & AQ_NIC_RATE_EEE_5G)
rate |= HW_ATL_FW2X_CAP_EEE_5G_MASK;
- if (speed & AQ_NIC_RATE_EEE_2GS)
+ if (speed & AQ_NIC_RATE_EEE_2G5)
rate |= HW_ATL_FW2X_CAP_EEE_2G5_MASK;
if (speed & AQ_NIC_RATE_EEE_1G)
rate |= HW_ATL_FW2X_CAP_EEE_1G_MASK;
@@ -282,8 +283,6 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
u32 mac_addr[2] = { 0 };
int err = 0;
- u32 h = 0U;
- u32 l = 0U;
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@@ -298,26 +297,6 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
ether_addr_copy(mac, (u8 *)mac_addr);
- if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
- unsigned int rnd = 0;
-
- get_random_bytes(&rnd, sizeof(unsigned int));
-
- l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16);
- h = 0x8001300EU;
-
- mac[5] = (u8)(0xFFU & l);
- l >>= 8;
- mac[4] = (u8)(0xFFU & l);
- l >>= 8;
- mac[3] = (u8)(0xFFU & l);
- l >>= 8;
- mac[2] = (u8)(0xFFU & l);
- mac[1] = (u8)(0xFFU & h);
- h >>= 8;
- mac[0] = (u8)(0xFFU & h);
- }
-
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
new file mode 100644
index 000000000000..8df9d4ef36f0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -0,0 +1,841 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "aq_ring.h"
+#include "aq_nic.h"
+#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl/hw_atl_utils.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl/hw_atl_llh_internal.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_internal.h"
+#include "hw_atl2_llh_internal.h"
+
+static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
+ u32 tag, u32 mask, u32 action);
+
+#define DEFAULT_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .msix_irqs = 8U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL2_RSS_MAX, \
+ .tcs_max = HW_ATL2_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL2_RXD_SIZE, \
+ .rxds_max = HW_ATL2_MAX_RXD, \
+ .rxds_min = HW_ATL2_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL2_TXD_SIZE, \
+ .txds_max = HW_ATL2_MAX_TXD, \
+ .txds_min = HW_ATL2_MIN_TXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL2_TX_RINGS, \
+ .rx_rings = HW_ATL2_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_GSO_UDP_L4 | \
+ NETIF_F_GSO_PARTIAL | \
+ NETIF_F_HW_TC, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL2_MTU_JUMBO, \
+ .mac_regs_count = 72, \
+ .hw_alive_check_addr = 0x10U, \
+ .priv_data_len = sizeof(struct hw_atl2_priv)
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL2_FW_SM_ACT_RSLVR);
+}
+
+static int hw_atl2_hw_reset(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ int err;
+
+ err = hw_atl2_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ memset(priv, 0, sizeof(*priv));
+
+ self->aq_fw_ops->set_state(self, MPI_RESET);
+
+ err = aq_hw_err_from_flags(self);
+
+ return err;
+}
+
+static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int tcs, q_per_tc;
+ unsigned int tc, q;
+ u32 rx_map = 0;
+ u32 tx_map = 0;
+
+ hw_atl2_tpb_tx_tc_q_rand_map_en_set(self, 1U);
+
+ switch (cfg->tc_mode) {
+ case AQ_TC_MODE_8TCS:
+ tcs = 8;
+ q_per_tc = 4;
+ break;
+ case AQ_TC_MODE_4TCS:
+ tcs = 4;
+ q_per_tc = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (tc = 0; tc != tcs; tc++) {
+ unsigned int tc_q_offset = tc * q_per_tc;
+
+ for (q = tc_q_offset; q != tc_q_offset + q_per_tc; q++) {
+ rx_map |= tc << HW_ATL2_RX_Q_TC_MAP_SHIFT(q);
+ if (HW_ATL2_RX_Q_TC_MAP_ADR(q) !=
+ HW_ATL2_RX_Q_TC_MAP_ADR(q + 1)) {
+ aq_hw_write_reg(self,
+ HW_ATL2_RX_Q_TC_MAP_ADR(q),
+ rx_map);
+ rx_map = 0;
+ }
+
+ tx_map |= tc << HW_ATL2_TX_Q_TC_MAP_SHIFT(q);
+ if (HW_ATL2_TX_Q_TC_MAP_ADR(q) !=
+ HW_ATL2_TX_Q_TC_MAP_ADR(q + 1)) {
+ aq_hw_write_reg(self,
+ HW_ATL2_TX_Q_TC_MAP_ADR(q),
+ tx_map);
+ tx_map = 0;
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_qos_set(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 tx_buff_size = HW_ATL2_TXBUF_MAX;
+ u32 rx_buff_size = HW_ATL2_RXBUF_MAX;
+ unsigned int prio = 0U;
+ u32 tc = 0U;
+
+ /* TPS Descriptor rate init */
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ tx_buff_size /= cfg->tcs;
+ rx_buff_size /= cfg->tcs;
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ u32 threshold = 0U;
+
+ /* Tx buf size TC0 */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ /* QoS Rx buf size per TC */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+ }
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (prio = 0; prio < 8; ++prio)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
+ cfg->prio_tc_map[prio]);
+
+ /* ATL2 Apply ring to TC mapping */
+ hw_atl2_hw_queue_to_tc_map_set(self);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u8 *indirection_table = rss_params->indirection_table;
+ const u32 num_tcs = aq_hw_num_tcs(self);
+ u32 rpf_redir2_enable;
+ int tc;
+ int i;
+
+ rpf_redir2_enable = num_tcs > 4 ? 1 : 0;
+
+ hw_atl2_rpf_redirection_table2_select_set(self, rpf_redir2_enable);
+
+ for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) {
+ for (tc = 0; tc != num_tcs; tc++) {
+ hw_atl2_new_rpf_rss_redir_set(self, tc, i,
+ tc *
+ aq_hw_q_per_tc(self) +
+ indirection_table[i]);
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
+{
+ static const u32 max_weight = BIT(HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
+ /* Scale factor is based on the number of bits in fractional portion */
+ static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
+ static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
+ HW_ATL_TPS_DESC_RATE_Y_SHIFT;
+ const u32 link_speed = self->aq_link_status.mbps;
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+ unsigned long num_min_rated_tcs = 0;
+ u32 tc_weight[AQ_CFG_TCS_MAX];
+ u32 fixed_max_credit_4b;
+ u32 fixed_max_credit;
+ u8 min_rate_msk = 0;
+ u32 sum_weight = 0;
+ int tc;
+
+ /* By default max_credit is based upon MTU (in unit of 64b) */
+ fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
+ /* in unit of 4b */
+ fixed_max_credit_4b = nic_cfg->aq_hw_caps->mtu / 4;
+
+ if (link_speed) {
+ min_rate_msk = nic_cfg->tc_min_rate_msk &
+ (BIT(nic_cfg->tcs) - 1);
+ num_min_rated_tcs = hweight8(min_rate_msk);
+ }
+
+ /* First, calculate weights where min_rate is specified */
+ if (num_min_rated_tcs) {
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ if (!nic_cfg->tc_min_rate[tc]) {
+ tc_weight[tc] = 0;
+ continue;
+ }
+
+ tc_weight[tc] = (-1L + link_speed +
+ nic_cfg->tc_min_rate[tc] *
+ max_weight) /
+ link_speed;
+ tc_weight[tc] = min(tc_weight[tc], max_weight);
+ sum_weight += tc_weight[tc];
+ }
+ }
+
+ /* WSP, if min_rate is set for at least one TC.
+ * RR otherwise.
+ */
+ hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+ /* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
+ * leave Descriptor TC Arbiter as RR.
+ */
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+
+ hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
+
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+ u32 weight, max_credit;
+
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
+ fixed_max_credit);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
+
+ if (num_min_rated_tcs) {
+ weight = tc_weight[tc];
+
+ if (!weight && sum_weight < max_weight)
+ weight = (max_weight - sum_weight) /
+ (nic_cfg->tcs - num_min_rated_tcs);
+ else if (!weight)
+ weight = 0x640;
+
+ max_credit = max(2 * weight, fixed_max_credit_4b);
+ } else {
+ weight = 0x640;
+ max_credit = 0xFFF0;
+ }
+
+ hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
+ hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
+ max_credit);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
+
+ if (en) {
+ /* Nominal rate is always 10G */
+ const u32 rate = 10000U * scale /
+ nic_cfg->tc_max_rate[tc];
+ const u32 rate_int = rate >>
+ HW_ATL_TPS_DESC_RATE_Y_WIDTH;
+ const u32 rate_frac = rate & frac_msk;
+
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
+ } else {
+ /* A value of 1 indicates the queue is not
+ * rate controlled.
+ */
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+ }
+ for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+
+ /* Tx TC/RSS number config */
+ hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
+
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
+
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ hw_atl2_tpb_tx_buf_clk_gate_en_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map;
+ u16 action;
+ u8 index;
+ int i;
+
+ /* Action Resolver Table (ART) is used by RPF to decide which action
+ * to take with a packet based upon input tag and tag mask, where:
+ * - input tag is a combination of 3-bit VLan Prio (PTP) and
+ * 29-bit concatenation of all tags from filter block;
+ * - tag mask is a mask used for matching against input tag.
+ * The input_tag is compared with the all the Requested_tags in the
+ * Record table to find a match. Action field of the selected matched
+ * REC entry is used for further processing. If multiple entries match,
+ * the lowest REC entry, Action field will be selected.
+ */
+ hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF);
+ hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC,
+ HW_ATL2_MAC_UC);
+ hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC);
+
+ /* FW reserves the beginning of ART, thus all driver entries must
+ * start from the offset specified in FW caps.
+ */
+ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_UC_MASK |
+ HW_ATL2_RPF_TAG_ALLMC_MASK,
+ HW_ATL2_ACTION_DROP);
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_VLAN_MASK |
+ HW_ATL2_RPF_TAG_UNTAG_MASK,
+ HW_ATL2_ACTION_DROP);
+
+ /* Configure ART to map given VLan Prio (PCP) to the TC index for
+ * RSS redirection table.
+ */
+ for (i = 0; i < 8; i++) {
+ action = HW_ATL2_ACTION_ASSIGN_TC(prio_tc_map[i]);
+
+ index = priv->art_base_index + HW_ATL2_RPF_PCP_TO_TC_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index,
+ i << HW_ATL2_RPF_TAG_PCP_OFFSET,
+ HW_ATL2_RPF_TAG_PCP_MASK, action);
+ }
+}
+
+static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
+ bool promisc)
+{
+ u16 off_action = (!promisc &&
+ !hw_atl_rpfl2promiscuous_mode_en_get(self)) ?
+ HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE;
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u8 index;
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_VLAN_MASK |
+ HW_ATL2_RPF_TAG_UNTAG_MASK, off_action);
+}
+
+static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc)
+{
+ u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP;
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ bool vlan_promisc_enable;
+ u8 index;
+
+ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_UC_MASK |
+ HW_ATL2_RPF_TAG_ALLMC_MASK,
+ off_action);
+
+ /* turn VLAN promisc mode too */
+ vlan_promisc_enable = hw_atl_rpf_vlan_prom_mode_en_get(self);
+ hw_atl2_hw_new_rx_filter_vlan_promisc(self, promisc |
+ vlan_promisc_enable);
+}
+
+static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
+ u32 tag, u32 mask, u32 action)
+{
+ u32 val;
+ int err;
+
+ err = readx_poll_timeout_atomic(hw_atl2_sem_act_rslvr_get,
+ self, val, val == 1,
+ 1, 10000U);
+ if (err)
+ return err;
+
+ hw_atl2_rpf_act_rslvr_record_set(self, location, tag, mask,
+ action);
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1, HW_ATL2_FW_SM_ACT_RSLVR);
+
+ return err;
+}
+
+static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
+
+ /* Rx flow control */
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL);
+
+ /* RSS Ring selection */
+ hw_atl_b0_hw_init_rx_rss_ctrl1(self);
+
+ /* Multicast filters */
+ for (i = HW_ATL2_MAC_MAX; i--;) {
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, HW_ATL_MCAST_FLT_ANY_TO_HOST, 0U);
+
+ /* Vlan filters */
+ hw_atl_rpf_vlan_outer_etht_set(self, ETH_P_8021AD);
+ hw_atl_rpf_vlan_inner_etht_set(self, ETH_P_8021Q);
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
+
+ /* Always accept untagged packets */
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
+
+ hw_atl2_hw_init_new_rx_filters(self);
+
+ /* Rx Interrupts */
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+{
+ static u32 aq_hw_atl2_igcr_table_[4][2] = {
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
+ };
+
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+ u8 base_index, count;
+ int err;
+
+ err = hw_atl2_utils_get_action_resolve_table_caps(self, &base_index,
+ &count);
+ if (err)
+ return err;
+
+ priv->art_base_index = 8 * base_index;
+
+ hw_atl2_init_launchtime(self);
+
+ hw_atl2_hw_init_tx_path(self);
+ hw_atl2_hw_init_rx_path(self);
+
+ hw_atl_b0_hw_mac_addr_set(self, mac_addr);
+
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
+
+ hw_atl2_hw_qos_set(self);
+ hw_atl2_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ hw_atl2_rpf_new_enable_set(self, 1);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ self->aq_fw_ops->update_stats(self);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl2_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL2_ERR_INT << 0x18) |
+ (1U << 0x1F)) |
+ ((HW_ATL2_ERR_INT << 0x10) |
+ (1U << 0x17)), 0U);
+
+ hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl2_hw_ring_rx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ return hw_atl_b0_hw_ring_rx_init(self, aq_ring, aq_ring_param);
+}
+
+static int hw_atl2_hw_ring_tx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ return hw_atl_b0_hw_ring_tx_init(self, aq_ring, aq_ring_param);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl2_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ hw_atl2_hw_new_rx_filter_promisc(self, IS_FILTER_ENABLED(IFF_PROMISC));
+
+ return hw_atl_b0_hw_packet_filter_set(self, packet_filter);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl2_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int err = 0;
+
+ if (count > (HW_ATL2_MAC_MAX - HW_ATL2_MAC_MIN)) {
+ err = -EBADRQC;
+ goto err_exit;
+ }
+ for (cfg->mc_list_count = 0U;
+ cfg->mc_list_count < count;
+ ++cfg->mc_list_count) {
+ u32 i = cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
+ HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
+ HW_ATL2_MAC_MIN + i);
+
+ hw_atl2_rpfl2_uc_flr_tag_set(self, 1, HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2_uc_flr_en_set(self, (cfg->is_mc_list_enabled),
+ HW_ATL2_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl2_hw_interrupt_moderation_set(struct aq_hw_s *self)
+{
+ unsigned int i = 0U;
+ u32 itr_tx = 2U;
+ u32 itr_rx = 2U;
+
+ switch (self->aq_nic_cfg->itr) {
+ case AQ_CFG_INTERRUPT_MODERATION_ON:
+ case AQ_CFG_INTERRUPT_MODERATION_AUTO:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
+
+ if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
+ /* HW timers are in 2us units */
+ int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
+ int tx_min_timer = tx_max_timer / 2;
+
+ int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
+ int rx_min_timer = rx_max_timer / 2;
+
+ tx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
+ tx_max_timer);
+ tx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
+ tx_min_timer);
+ rx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
+ rx_max_timer);
+ rx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
+ rx_min_timer);
+
+ itr_tx |= tx_min_timer << 0x8U;
+ itr_tx |= tx_max_timer << 0x10U;
+ itr_rx |= rx_min_timer << 0x8U;
+ itr_rx |= rx_max_timer << 0x10U;
+ } else {
+ static unsigned int hw_atl2_timers_table_tx_[][2] = {
+ {0xfU, 0xffU}, /* 10Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit 5GS */
+ {0xfU, 0x1ffU}, /* 2.5Gbit */
+ {0xfU, 0x1ffU}, /* 1Gbit */
+ {0xfU, 0x1ffU}, /* 100Mbit */
+ };
+ static unsigned int hw_atl2_timers_table_rx_[][2] = {
+ {0x6U, 0x38U},/* 10Gbit */
+ {0xCU, 0x70U},/* 5Gbit */
+ {0xCU, 0x70U},/* 5Gbit 5GS */
+ {0x18U, 0xE0U},/* 2.5Gbit */
+ {0x30U, 0x80U},/* 1Gbit */
+ {0x4U, 0x50U},/* 100Mbit */
+ };
+ unsigned int mbps = self->aq_link_status.mbps;
+ unsigned int speed_index;
+
+ speed_index = hw_atl_utils_mbps_2_speed_index(mbps);
+
+ /* Update user visible ITR settings */
+ self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_
+ [speed_index][1] * 2;
+ self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_
+ [speed_index][1] * 2;
+
+ itr_tx |= hw_atl2_timers_table_tx_
+ [speed_index][0] << 0x8U;
+ itr_tx |= hw_atl2_timers_table_tx_
+ [speed_index][1] << 0x10U;
+
+ itr_rx |= hw_atl2_timers_table_rx_
+ [speed_index][0] << 0x8U;
+ itr_rx |= hw_atl2_timers_table_rx_
+ [speed_index][1] << 0x10U;
+ }
+ break;
+ case AQ_CFG_INTERRUPT_MODERATION_OFF:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
+ itr_tx = 0U;
+ itr_rx = 0U;
+ break;
+ }
+
+ for (i = HW_ATL2_RINGS_MAX; i--;) {
+ hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_stop(struct aq_hw_s *self)
+{
+ hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
+
+ return 0;
+}
+
+static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
+{
+ return &self->curr_stats;
+}
+
+static int hw_atl2_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u32 queue;
+ u8 index;
+ int i;
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1U);
+
+ for (i = 0; i < HW_ATL_VLAN_MAX_FILTERS; i++) {
+ queue = HW_ATL2_ACTION_ASSIGN_QUEUE(aq_vlans[i].queue);
+
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index, 0, 0,
+ HW_ATL2_ACTION_DISABLE);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id, i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+
+ hw_atl2_rpf_vlan_flr_tag_set(self, i + 2, i);
+
+ index = priv->art_base_index +
+ HW_ATL2_RPF_VLAN_USER_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index,
+ (i + 2) << HW_ATL2_RPF_TAG_VLAN_OFFSET,
+ HW_ATL2_RPF_TAG_VLAN_MASK, queue);
+ } else {
+ hw_atl2_rpf_vlan_flr_tag_set(self, 1, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vlan filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
+ hw_atl2_hw_new_rx_filter_vlan_promisc(self, !enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
+const struct aq_hw_ops hw_atl2_ops = {
+ .hw_soft_reset = hw_atl2_utils_soft_reset,
+ .hw_prepare = hw_atl2_utils_initfw,
+ .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
+ .hw_init = hw_atl2_hw_init,
+ .hw_reset = hw_atl2_hw_reset,
+ .hw_start = hw_atl_b0_hw_start,
+ .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
+ .hw_stop = hw_atl2_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_b0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_b0_hw_irq_disable,
+ .hw_irq_read = hw_atl_b0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl2_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl2_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl2_hw_packet_filter_set,
+ .hw_filter_vlan_set = hw_atl2_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl2_hw_vlan_ctrl,
+ .hw_multicast_list_set = hw_atl2_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl2_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
+ .hw_tc_rate_limit_set = hw_atl2_hw_init_tx_tc_rate_limit,
+ .hw_get_hw_stats = hw_atl2_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl2_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
new file mode 100644
index 000000000000..de8723f1c28a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_H
+#define HW_ATL2_H
+
+#include "aq_common.h"
+
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_ops hw_atl2_ops;
+
+#endif /* HW_ATL2_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h
new file mode 100644
index 000000000000..5a89bb8722f9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_INTERNAL_H
+#define HW_ATL2_INTERNAL_H
+
+#include "hw_atl2_utils.h"
+
+#define HW_ATL2_MTU_JUMBO 16352U
+#define HW_ATL2_MTU 1514U
+
+#define HW_ATL2_TX_RINGS 4U
+#define HW_ATL2_RX_RINGS 4U
+
+#define HW_ATL2_RINGS_MAX 32U
+#define HW_ATL2_TXD_SIZE (16U)
+#define HW_ATL2_RXD_SIZE (16U)
+
+#define HW_ATL2_MAC_UC 0U
+#define HW_ATL2_MAC_MIN 1U
+#define HW_ATL2_MAC_MAX 38U
+
+/* interrupts */
+#define HW_ATL2_ERR_INT 8U
+#define HW_ATL2_INT_MASK (0xFFFFFFFFU)
+
+#define HW_ATL2_TXBUF_MAX 128U
+#define HW_ATL2_RXBUF_MAX 192U
+
+#define HW_ATL2_RSS_REDIRECTION_MAX 64U
+
+#define HW_ATL2_TC_MAX 8U
+#define HW_ATL2_RSS_MAX 8U
+
+#define HW_ATL2_INTR_MODER_MAX 0x1FF
+#define HW_ATL2_INTR_MODER_MIN 0xFF
+
+#define HW_ATL2_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL2_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL2_MAX_RXD 8184U
+#define HW_ATL2_MAX_TXD 8184U
+
+#define HW_ATL2_FW_SM_ACT_RSLVR 0x3U
+
+#define HW_ATL2_RPF_TAG_UC_OFFSET 0x0
+#define HW_ATL2_RPF_TAG_ALLMC_OFFSET 0x6
+#define HW_ATL2_RPF_TAG_ET_OFFSET 0x7
+#define HW_ATL2_RPF_TAG_VLAN_OFFSET 0xA
+#define HW_ATL2_RPF_TAG_UNTAG_OFFSET 0xE
+#define HW_ATL2_RPF_TAG_L3_V4_OFFSET 0xF
+#define HW_ATL2_RPF_TAG_L3_V6_OFFSET 0x12
+#define HW_ATL2_RPF_TAG_L4_OFFSET 0x15
+#define HW_ATL2_RPF_TAG_L4_FLEX_OFFSET 0x18
+#define HW_ATL2_RPF_TAG_FLEX_OFFSET 0x1B
+#define HW_ATL2_RPF_TAG_PCP_OFFSET 0x1D
+
+#define HW_ATL2_RPF_TAG_UC_MASK (0x0000003F << HW_ATL2_RPF_TAG_UC_OFFSET)
+#define HW_ATL2_RPF_TAG_ALLMC_MASK (0x00000001 << HW_ATL2_RPF_TAG_ALLMC_OFFSET)
+#define HW_ATL2_RPF_TAG_UNTAG_MASK (0x00000001 << HW_ATL2_RPF_TAG_UNTAG_OFFSET)
+#define HW_ATL2_RPF_TAG_VLAN_MASK (0x0000000F << HW_ATL2_RPF_TAG_VLAN_OFFSET)
+#define HW_ATL2_RPF_TAG_ET_MASK (0x00000007 << HW_ATL2_RPF_TAG_ET_OFFSET)
+#define HW_ATL2_RPF_TAG_L3_V4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V4_OFFSET)
+#define HW_ATL2_RPF_TAG_L3_V6_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V6_OFFSET)
+#define HW_ATL2_RPF_TAG_L4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L4_OFFSET)
+#define HW_ATL2_RPF_TAG_PCP_MASK (0x00000007 << HW_ATL2_RPF_TAG_PCP_OFFSET)
+
+#define HW_ATL2_RPF_TAG_BASE_UC BIT(HW_ATL2_RPF_TAG_UC_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_ALLMC BIT(HW_ATL2_RPF_TAG_ALLMC_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_UNTAG BIT(HW_ATL2_RPF_TAG_UNTAG_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_VLAN BIT(HW_ATL2_RPF_TAG_VLAN_OFFSET)
+
+enum HW_ATL2_RPF_ART_INDEX {
+ HW_ATL2_RPF_L2_PROMISC_OFF_INDEX,
+ HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX,
+ HW_ATL2_RPF_L3L4_USER_INDEX = 8,
+ HW_ATL2_RPF_ET_PCP_USER_INDEX = HW_ATL2_RPF_L3L4_USER_INDEX + 16,
+ HW_ATL2_RPF_VLAN_USER_INDEX = HW_ATL2_RPF_ET_PCP_USER_INDEX + 16,
+ HW_ATL2_RPF_PCP_TO_TC_INDEX = HW_ATL2_RPF_VLAN_USER_INDEX +
+ HW_ATL_VLAN_MAX_FILTERS,
+};
+
+#define HW_ATL2_ACTION(ACTION, RSS, INDEX, VALID) \
+ ((((ACTION) & 0x3U) << 8) | \
+ (((RSS) & 0x1U) << 7) | \
+ (((INDEX) & 0x3FU) << 2) | \
+ (((VALID) & 0x1U) << 0))
+
+#define HW_ATL2_ACTION_DROP HW_ATL2_ACTION(0, 0, 0, 1)
+#define HW_ATL2_ACTION_DISABLE HW_ATL2_ACTION(0, 0, 0, 0)
+#define HW_ATL2_ACTION_ASSIGN_QUEUE(QUEUE) HW_ATL2_ACTION(1, 0, (QUEUE), 1)
+#define HW_ATL2_ACTION_ASSIGN_TC(TC) HW_ATL2_ACTION(1, 1, (TC), 1)
+
+enum HW_ATL2_RPF_RSS_HASH_TYPE {
+ HW_ATL2_RPF_RSS_HASH_TYPE_NONE = 0,
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 = BIT(0),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP = BIT(1),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP = BIT(2),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 = BIT(3),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP = BIT(4),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP = BIT(5),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX = BIT(6),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP = BIT(7),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP = BIT(8),
+ HW_ATL2_RPF_RSS_HASH_TYPE_ALL = HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP,
+};
+
+#define HW_ATL_MCAST_FLT_ANY_TO_HOST 0x00010FFFU
+
+struct hw_atl2_priv {
+ struct statistics_s last_stats;
+ unsigned int art_base_index;
+};
+
+#endif /* HW_ATL2_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c
new file mode 100644
index 000000000000..cd954b11d24a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "hw_atl2_llh.h"
+#include "hw_atl2_llh_internal.h"
+#include "aq_hw_utils.h"
+
+void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
+ u32 select)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR,
+ HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK,
+ HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT, select);
+}
+
+void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR,
+ HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK,
+ HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT,
+ rss_hash_type);
+}
+
+/* rpf */
+
+void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_NEW_EN_ADR,
+ HW_ATL2_RPF_NEW_EN_MSK,
+ HW_ATL2_RPF_NEW_EN_SHIFT,
+ enable);
+}
+
+void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPFL2UC_TAG_ADR(filter),
+ HW_ATL2_RPFL2UC_TAG_MSK,
+ HW_ATL2_RPFL2UC_TAG_SHIFT,
+ tag);
+}
+
+void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_L2_BC_TAG_ADR,
+ HW_ATL2_RPF_L2_BC_TAG_MSK,
+ HW_ATL2_RPF_L2_BC_TAG_SHIFT,
+ tag);
+}
+
+void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
+ u32 queue)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_RSS_REDIR_ADR(tc, index),
+ HW_ATL2_RPF_RSS_REDIR_MSK(tc),
+ HW_ATL2_RPF_RSS_REDIR_SHIFT(tc),
+ queue);
+}
+
+void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_VL_TAG_ADR(filter),
+ HW_ATL2_RPF_VL_TAG_MSK,
+ HW_ATL2_RPF_VL_TAG_SHIFT,
+ tag);
+}
+
+/* TX */
+
+void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
+ const u32 tc_q_rand_map_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR,
+ HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK,
+ HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT,
+ tc_q_rand_map_en);
+}
+
+void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR,
+ HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK,
+ HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT,
+ clk_gate_en);
+}
+
+void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue),
+ tx_intr_moderation_ctl);
+}
+
+void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ const u32 data_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR,
+ HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK,
+ HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT,
+ data_arb_mode);
+}
+
+void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
+}
+
+void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL2_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT,
+ weight);
+}
+
+u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_FPGA_VER_ADR);
+}
+
+void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw)
+{
+ u32 hw_ver = hw_atl2_get_hw_version(aq_hw);
+
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_LT_CTRL_ADR,
+ HW_ATL2_LT_CTRL_CLK_RATIO_MSK,
+ HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT,
+ hw_ver < HW_ATL2_FPGA_VER_U32(1, 0, 0, 0) ?
+ HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED :
+ hw_ver >= HW_ATL2_FPGA_VER_U32(1, 0, 85, 2) ?
+ HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED :
+ HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED);
+}
+
+/* set action resolver record */
+void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 tag, u32 mask, u32 action)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(location),
+ tag);
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(location),
+ mask);
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(location),
+ action);
+}
+
+void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_REC_TAB_EN_ADR,
+ HW_ATL2_RPF_REC_TAB_EN_MSK,
+ HW_ATL2_RPF_REC_TAB_EN_SHIFT,
+ sections);
+}
+
+void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ data[j] = aq_hw_read_reg(aq_hw,
+ HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i));
+}
+
+void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ aq_hw_write_reg(aq_hw, HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i),
+ data[j]);
+}
+
+void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ data[j] = aq_hw_read_reg(aq_hw,
+ HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(i));
+}
+
+void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR,
+ HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK,
+ HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT,
+ finish);
+}
+
+u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL2_MIF_MCP_FINISHED_READ_ADR,
+ HW_ATL2_MIF_MCP_FINISHED_READ_MSK,
+ HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT);
+}
+
+u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR);
+}
+
+void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR, val);
+}
+
+u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_ADR);
+}
+
+void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR,
+ val);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h
new file mode 100644
index 000000000000..98c7a4621297
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_LLH_H
+#define HW_ATL2_LLH_H
+
+#include <linux/types.h>
+
+struct aq_hw_s;
+
+/* Set TX Interrupt Moderation Control Register */
+void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
+
+/* Set Redirection Table 2 Select */
+void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
+ u32 select);
+
+/** Set RSS HASH type */
+void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type);
+
+/* set new RPF enable */
+void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable);
+
+/* set l2 unicast filter tag */
+void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
+
+/* set l2 broadcast filter tag */
+void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag);
+
+/* set new rss redirection table */
+void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
+ u32 queue);
+
+/* Set VLAN filter tag */
+void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
+
+/* set tx random TC-queue mapping enable bit */
+void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
+ const u32 tc_q_rand_map_en);
+
+/* set tx buffer clock gate enable */
+void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en);
+
+void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ const u32 data_arb_mode);
+
+/* set tx packet scheduler tc data max credit */
+void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit);
+
+/* set tx packet scheduler tc data weight */
+void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight);
+
+u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw);
+
+void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw);
+
+/* set action resolver record */
+void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 tag, u32 mask, u32 action);
+
+/* set enable action resolver section */
+void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections);
+
+/* get data from firmware shared input buffer */
+void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* set data into firmware shared input buffer */
+void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* get data from firmware shared output buffer */
+void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* set host finished write shared buffer indication */
+void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish);
+
+/* get mcp finished read shared buffer indication */
+u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw);
+
+/* get mcp boot register */
+u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw);
+
+/* set mcp boot register */
+void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val);
+
+/* get host interrupt request */
+u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw);
+
+/* clear host interrupt request */
+void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val);
+
+#endif /* HW_ATL2_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h
new file mode 100644
index 000000000000..e34c5cda061e
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h
@@ -0,0 +1,391 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_LLH_INTERNAL_H
+#define HW_ATL2_LLH_INTERNAL_H
+
+/* RX pif_rpf_redir_2_en_i Bitfield Definitions
+ * PORT="pif_rpf_redir_2_en_i"
+ */
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR 0x000054C8
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK 0x00001000
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSKN 0xFFFFEFFF
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT 12
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_WIDTH 1
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_DEFAULT 0x0
+
+/* RX pif_rpf_rss_hash_type_i Bitfield Definitions
+ */
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR 0x000054C8
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK 0x000001FF
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSKN 0xFFFFFE00
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT 0
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_WIDTH 9
+
+/* rx rpf_new_rpf_en bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_new_rpf_en_i".
+ * port="pif_rpf_new_rpf_en_i
+ */
+
+/* register address for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_ADR 0x00005104
+/* bitmask for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_MSK 0x00000800
+/* inverted bitmask for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_MSKN 0xfffff7ff
+/* lower bit position of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_SHIFT 11
+/* width of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_WIDTH 1
+/* default value of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_DEFAULT 0x0
+
+/* rx l2_uc_req_tag0{f}[5:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_req_tag0{f}[7:0]".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_req_tag0[5:0]"
+ */
+
+/* register address for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_MSK 0x0FC00000
+/* inverted bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_MSKN 0xF03FFFFF
+/* lower bit position of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_SHIFT 22
+/* width of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_WIDTH 6
+/* default value of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_DEFAULT 0x0
+
+/* rpf_l2_bc_req_tag[5:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_l2_bc_req_tag[5:0]".
+ * port="pifrpf_l2_bc_req_tag_i[5:0]"
+ */
+
+/* register address for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_ADR 0x000050F0
+/* bitmask for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_MSK 0x0000003F
+/* inverted bitmask for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_MSKN 0xffffffc0
+/* lower bit position of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_SHIFT 0
+/* width of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_WIDTH 6
+/* default value of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_DEFAULT 0x0
+
+/* rx rpf_rss_red1_data_[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_rss_red1_data[4:0]".
+ * port="pif_rpf_rss_red1_data_i[4:0]"
+ */
+
+/* register address for bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_ADR(TC, INDEX) (0x00006200 + \
+ (0x100 * !!((TC) > 3)) + (INDEX) * 4)
+/* bitmask for bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_MSK(TC) (0x00000001F << (5 * ((TC) % 4)))
+/* lower bit position of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_SHIFT(TC) (5 * ((TC) % 4))
+/* width of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_WIDTH 5
+/* default value of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_DEFAULT 0x0
+
+/* rx vlan_req_tag0{f}[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vlan_req_tag0{f}[3:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_vlan_req_tag0[3:0]"
+ */
+
+/* register address for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* bitmask for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_MSK 0x0000F000
+/* inverted bitmask for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_MSKN 0xFFFF0FFF
+/* lower bit position of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_SHIFT 12
+/* width of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_WIDTH 4
+/* default value of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_DEFAULT 0x0
+
+/* RX rx_q{Q}_tc_map[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rx_q{Q}_tc_map[2:0]".
+ * Parameter: Queue {Q} | bit-level stride | range [0, 31]
+ * PORT="pif_rx_q0_tc_map_i[2:0]"
+ */
+
+/* Register address for bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_ADR(queue) \
+ (((queue) < 32) ? 0x00005900 + ((queue) / 8) * 4 : 0)
+/* Lower bit position of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_SHIFT(queue) \
+ (((queue) < 32) ? ((queue) * 4) % 32 : 0)
+/* Width of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_WIDTH 3
+/* Default value of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_DEFAULT 0x0
+
+/* tx tx_tc_q_rand_map_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_tc_q_rand_map_en".
+ * port="pif_tpb_tx_tc_q_rand_map_en_i"
+ */
+
+/* register address for bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR 0x00007900
+/* bitmask for bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK 0x00000200
+/* inverted bitmask for bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSKN 0xFFFFFDFF
+/* lower bit position of bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT 9
+/* width of bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_WIDTH 1
+/* default value of bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_DEFAULT 0x0
+
+/* tx tx_buffer_clk_gate_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_buffer_clk_gate_en".
+ * port="pif_tpb_tx_buffer_clk_gate_en_i"
+ */
+
+/* register address for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR 0x00007900
+/* bitmask for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK 0x00000020
+/* inverted bitmask for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSKN 0xffffffdf
+/* lower bit position of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT 5
+/* width of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_WIDTH 1
+/* default value of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_DEFAULT 0x0
+
+/* tx tx_q_tc_map{q} bitfield definitions
+ * preprocessor definitions for the bitfield "tx_q_tc_map{q}".
+ * parameter: queue {q} | bit-level stride | range [0, 31]
+ * port="pif_tpb_tx_q_tc_map0_i[2:0]"
+ */
+
+/* register address for bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_ADR(queue) \
+ (((queue) < 32) ? 0x0000799C + ((queue) / 4) * 4 : 0)
+/* lower bit position of bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_SHIFT(queue) \
+ (((queue) < 32) ? ((queue) * 8) % 32 : 0)
+/* width of bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_WIDTH 3
+/* default value of bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_DEFAULT 0x0
+
+/* tx data_tc_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc_arb_mode".
+ * port="pif_tps_data_tc_arb_mode_i"
+ */
+
+/* register address for bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
+/* bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK 0x00000003
+/* inverted bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffc
+/* lower bit position of bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT 0
+/* width of bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_WIDTH 2
+/* default value of bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
+
+/* tx data_tc{t}_credit_max[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_credit_max[f:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_credit_max_i[15:0]"
+ */
+
+/* register address for bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0xffff0000
+/* inverted bitmask for bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0x0000ffff
+/* lower bit position of bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
+/* width of bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 16
+/* default value of bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
+
+/* tx data_tc{t}_weight[e:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_weight[e:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_weight_i[14:0]"
+ */
+
+/* register address for bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x00007fff
+/* inverted bitmask for bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xffff8000
+/* lower bit position of bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT 0
+/* width of bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 15
+/* default value of bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
+
+/* tx interrupt moderation control register definitions
+ * Preprocessor definitions for TX Interrupt Moderation Control Register
+ * Base Address: 0x00007c28
+ * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
+ */
+
+#define HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue) (0x00007c28u + (queue) * 0x40)
+
+/* Launch time control register */
+#define HW_ATL2_LT_CTRL_ADR 0x00007a1c
+
+#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_MSK 0xFFFF0000
+#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_SHIFT 16
+
+#define HW_ATL2_LT_CTRL_CLK_RATIO_MSK 0x0000FF00
+#define HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT 8
+#define HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED 4
+#define HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED 2
+#define HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED 1
+
+#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_MSK 0x00000008
+#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_SHIFT 3
+
+#define HW_ATL2_LT_CTRL_LINK_SPEED_MSK 0x00000007
+#define HW_ATL2_LT_CTRL_LINK_SPEED_SHIFT 0
+
+/* FPGA VER register */
+#define HW_ATL2_FPGA_VER_ADR 0x000000f4
+#define HW_ATL2_FPGA_VER_U32(mj, mi, bl, rv) \
+ ((((mj) & 0xff) << 24) | \
+ (((mi) & 0xff) << 16) | \
+ (((bl) & 0xff) << 8) | \
+ (((rv) & 0xff) << 0))
+
+/* ahb_mem_addr{f}[31:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "ahb_mem_addr{f}[31:0]".
+ * Parameter: filter {f} | stride size 0x10 | range [0, 127]
+ * PORT="ahb_mem_addr{f}[31:0]"
+ */
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(filter) \
+ (0x00014000u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSKN 0x00000000u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_WIDTH 31
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_DEFAULT 0x0
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(filter) \
+ (0x00014004u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSKN 0x00000000u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_WIDTH 31
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_DEFAULT 0x0
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(filter) \
+ (0x00014008u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSK 0x000007FFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSKN 0xFFFFF800u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_WIDTH 10
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_DEFAULT 0x0
+
+/* rpf_rec_tab_en[15:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rpf_rec_tab_en[15:0]".
+ * PORT="pif_rpf_rec_tab_en[15:0]"
+ */
+/* Register address for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_ADR 0x00006ff0u
+/* Bitmask for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_SHIFT 0
+/* Width of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_WIDTH 16
+/* Default value of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_DEFAULT 0x0
+
+/* Register address for firmware shared input buffer */
+#define HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(dword) (0x00012000U + (dword) * 0x4U)
+/* Register address for firmware shared output buffer */
+#define HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(dword) (0x00013000U + (dword) * 0x4U)
+
+/* pif_host_finished_buf_wr_i Bitfield Definitions
+ * Preprocessor definitions for the bitfield "pif_host_finished_buf_wr_i".
+ * PORT="pif_host_finished_buf_wr_i"
+ */
+/* Register address for bitfield rpif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR 0x00000e00u
+/* Bitmask for bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK 0x00000001u
+/* Inverted bitmask for bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSKN 0xFFFFFFFEu
+/* Lower bit position of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT 0
+/* Width of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_WIDTH 1
+/* Default value of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_DEFAULT 0x0
+
+/* pif_mcp_finished_buf_rd_i Bitfield Definitions
+ * Preprocessor definitions for the bitfield "pif_mcp_finished_buf_rd_i".
+ * PORT="pif_mcp_finished_buf_rd_i"
+ */
+/* Register address for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_ADR 0x00000e04u
+/* Bitmask for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_MSK 0x00000001u
+/* Inverted bitmask for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_MSKN 0xFFFFFFFEu
+/* Lower bit position of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT 0
+/* Width of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_WIDTH 1
+/* Default value of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_DEFAULT 0x0
+
+/* Register address for bitfield pif_mcp_boot_reg */
+#define HW_ATL2_MIF_BOOT_REG_ADR 0x00003040u
+
+#define HW_ATL2_MCP_HOST_REQ_INT_READY BIT(0)
+
+#define HW_ATL2_MCP_HOST_REQ_INT_ADR 0x00000F00u
+#define HW_ATL2_MCP_HOST_REQ_INT_SET_ADR 0x00000F04u
+#define HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR 0x00000F08u
+
+#endif /* HW_ATL2_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
new file mode 100644
index 000000000000..f3766780e975
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/iopoll.h>
+
+#include "aq_hw_utils.h"
+#include "hw_atl/hw_atl_utils.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_llh_internal.h"
+
+#define HW_ATL2_FW_VER_1X 0x01000000U
+
+#define AQ_A2_BOOT_STARTED BIT(0x18)
+#define AQ_A2_CRASH_INIT BIT(0x1B)
+#define AQ_A2_BOOT_CODE_FAILED BIT(0x1C)
+#define AQ_A2_FW_INIT_FAILED BIT(0x1D)
+#define AQ_A2_FW_INIT_COMP_SUCCESS BIT(0x1F)
+
+#define AQ_A2_FW_BOOT_FAILED_MASK (AQ_A2_CRASH_INIT | \
+ AQ_A2_BOOT_CODE_FAILED | \
+ AQ_A2_FW_INIT_FAILED)
+#define AQ_A2_FW_BOOT_COMPLETE_MASK (AQ_A2_FW_BOOT_FAILED_MASK | \
+ AQ_A2_FW_INIT_COMP_SUCCESS)
+
+#define AQ_A2_FW_BOOT_REQ_REBOOT BIT(0x0)
+#define AQ_A2_FW_BOOT_REQ_HOST_BOOT BIT(0x8)
+#define AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT BIT(0xA)
+#define AQ_A2_FW_BOOT_REQ_PHY_FAST_BOOT BIT(0xB)
+
+int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
+{
+ int err;
+
+ self->fw_ver_actual = hw_atl2_utils_get_fw_version(self);
+
+ if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_a2_fw_ops;
+ } else {
+ aq_pr_err("Bad FW version detected: %x, but continue\n",
+ self->fw_ver_actual);
+ *fw_ops = &aq_a2_fw_ops;
+ }
+ aq_pr_trace("Detect ATL2FW %x\n", self->fw_ver_actual);
+ self->aq_fw_ops = *fw_ops;
+ err = self->aq_fw_ops->init(self);
+
+ self->chip_features |= ATL_HW_CHIP_ANTIGUA;
+
+ return err;
+}
+
+static bool hw_atl2_mcp_boot_complete(struct aq_hw_s *self)
+{
+ u32 rbl_status;
+
+ rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
+ if (rbl_status & AQ_A2_FW_BOOT_COMPLETE_MASK)
+ return true;
+
+ /* Host boot requested */
+ if (hw_atl2_mif_host_req_int_get(self) & HW_ATL2_MCP_HOST_REQ_INT_READY)
+ return true;
+
+ return false;
+}
+
+int hw_atl2_utils_soft_reset(struct aq_hw_s *self)
+{
+ bool rbl_complete = false;
+ u32 rbl_status = 0;
+ u32 rbl_request;
+ int err;
+
+ hw_atl2_mif_host_req_int_clr(self, 0x01);
+ rbl_request = AQ_A2_FW_BOOT_REQ_REBOOT;
+#ifdef AQ_CFG_FAST_START
+ rbl_request |= AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT;
+#endif
+ hw_atl2_mif_mcp_boot_reg_set(self, rbl_request);
+
+ /* Wait for RBL boot */
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_boot_reg_get, self,
+ rbl_status,
+ ((rbl_status & AQ_A2_BOOT_STARTED) &&
+ (rbl_status != 0xFFFFFFFFu)),
+ 10, 200000);
+ if (err) {
+ aq_pr_err("Boot code hanged");
+ goto err_exit;
+ }
+
+ err = readx_poll_timeout_atomic(hw_atl2_mcp_boot_complete, self,
+ rbl_complete,
+ rbl_complete,
+ 10, 2000000);
+
+ if (err) {
+ aq_pr_err("FW Restart timed out");
+ goto err_exit;
+ }
+
+ rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
+
+ if (rbl_status & AQ_A2_FW_BOOT_FAILED_MASK) {
+ err = -EIO;
+ aq_pr_err("FW Restart failed");
+ goto err_exit;
+ }
+
+ if (hw_atl2_mif_host_req_int_get(self) &
+ HW_ATL2_MCP_HOST_REQ_INT_READY) {
+ err = -EIO;
+ aq_pr_err("No FW detected. Dynamic FW load not implemented");
+ goto err_exit;
+ }
+
+ if (self->aq_fw_ops) {
+ err = self->aq_fw_ops->init(self);
+ if (err) {
+ aq_pr_err("FW Init failed");
+ goto err_exit;
+ }
+ }
+
+err_exit:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
new file mode 100644
index 000000000000..b66fa346581c
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_UTILS_H
+#define HW_ATL2_UTILS_H
+
+#include "aq_hw.h"
+
+/* F W A P I */
+
+struct link_options_s {
+ u8 link_up:1;
+ u8 link_renegotiate:1;
+ u8 minimal_link_speed:1;
+ u8 internal_loopback:1;
+ u8 external_loopback:1;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 eee_5G:1;
+ u8 eee_10G:1;
+ u8 rsvd3:3;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 rsvd4:1;
+ u8 downshift:1;
+ u8 downshift_retry:4;
+};
+
+struct link_control_s {
+ u8 mode:4;
+ u8 disable_crc_corruption:1;
+ u8 discard_short_frames:1;
+ u8 flow_control_mode:1;
+ u8 disable_length_check:1;
+
+ u8 discard_errored_frames:1;
+ u8 control_frame_enable:1;
+ u8 enable_tx_padding:1;
+ u8 enable_crc_forwarding:1;
+ u8 enable_frame_padding_removal_rx: 1;
+ u8 promiscuous_mode: 1;
+ u8 rsvd:2;
+
+ u16 rsvd2;
+};
+
+struct thermal_shutdown_s {
+ u8 enable:1;
+ u8 warning_enable:1;
+ u8 rsvd:6;
+
+ u8 shutdown_temperature;
+ u8 cold_temperature;
+ u8 warning_temperature;
+};
+
+struct mac_address_s {
+ u8 mac_address[6];
+};
+
+struct mac_address_aligned_s {
+ struct mac_address_s aligned;
+ u16 rsvd;
+};
+
+struct sleep_proxy_s {
+ struct wake_on_lan_s {
+ u8 wake_on_magic_packet:1;
+ u8 wake_on_pattern:1;
+ u8 wake_on_link_up:1;
+ u8 wake_on_link_down:1;
+ u8 wake_on_ping:1;
+ u8 wake_on_timer:1;
+ u8 rsvd:2;
+
+ u8 rsvd2;
+ u16 rsvd3;
+
+ u32 link_up_timeout;
+ u32 link_down_timeout;
+ u32 timer;
+ } wake_on_lan;
+
+ struct {
+ u32 mask[4];
+ u32 crc32;
+ } wake_up_pattern[8];
+
+ struct __packed {
+ u8 arp_responder:1;
+ u8 echo_responder:1;
+ u8 igmp_client:1;
+ u8 echo_truncate:1;
+ u8 address_guard:1;
+ u8 ignore_fragmented:1;
+ u8 rsvd:2;
+
+ u16 echo_max_len;
+ u8 rsvd2;
+ } ipv4_offload;
+
+ u32 ipv4_offload_addr[8];
+ u32 reserved[8];
+
+ struct __packed {
+ u8 ns_responder:1;
+ u8 echo_responder:1;
+ u8 mld_client:1;
+ u8 echo_truncate:1;
+ u8 address_guard:1;
+ u8 rsvd:3;
+
+ u16 echo_max_len;
+ u8 rsvd2;
+ } ipv6_offload;
+
+ u32 ipv6_offload_addr[16][4];
+
+ struct {
+ u16 port[16];
+ } tcp_port_offload;
+
+ struct {
+ u16 port[16];
+ } udp_port_offload;
+
+ struct {
+ u32 retry_count;
+ u32 retry_interval;
+ } ka4_offload;
+
+ struct {
+ u32 timeout;
+ u16 local_port;
+ u16 remote_port;
+ u8 remote_mac_addr[6];
+ u16 rsvd;
+ u32 rsvd2;
+ u32 rsvd3;
+ u16 rsvd4;
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ u32 local_ip;
+ u32 remote_ip;
+ } ka4_connection[16];
+
+ struct {
+ u32 retry_count;
+ u32 retry_interval;
+ } ka6_offload;
+
+ struct {
+ u32 timeout;
+ u16 local_port;
+ u16 remote_port;
+ u8 remote_mac_addr[6];
+ u16 rsvd;
+ u32 rsvd2;
+ u32 rsvd3;
+ u16 rsvd4;
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ u32 local_ip[4];
+ u32 remote_ip[4];
+ } ka6_connection[16];
+
+ struct {
+ u32 rr_count;
+ u32 rr_buf_len;
+ u32 idx_offset;
+ u32 rr__offset;
+ } mdns_offload;
+};
+
+struct pause_quanta_s {
+ u16 quanta_10M;
+ u16 threshold_10M;
+ u16 quanta_100M;
+ u16 threshold_100M;
+ u16 quanta_1G;
+ u16 threshold_1G;
+ u16 quanta_2P5G;
+ u16 threshold_2P5G;
+ u16 quanta_5G;
+ u16 threshold_5G;
+ u16 quanta_10G;
+ u16 threshold_10G;
+};
+
+struct data_buffer_status_s {
+ u32 data_offset;
+ u32 data_length;
+};
+
+struct device_caps_s {
+ u8 finite_flashless:1;
+ u8 cable_diag:1;
+ u8 ncsi:1;
+ u8 avb:1;
+ u8 rsvd:4;
+
+ u8 rsvd2;
+ u16 rsvd3;
+ u32 rsvd4;
+};
+
+struct version_s {
+ struct bundle_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } bundle;
+ struct mac_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } mac;
+ struct phy_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } phy;
+ u32 rsvd;
+};
+
+struct link_status_s {
+ u8 link_state:4;
+ u8 link_rate:4;
+
+ u8 pause_tx:1;
+ u8 pause_rx:1;
+ u8 eee:1;
+ u8 duplex:1;
+ u8 rsvd:4;
+
+ u16 rsvd2;
+};
+
+struct wol_status_s {
+ u8 wake_count;
+ u8 wake_reason;
+
+ u16 wake_up_packet_length :12;
+ u16 wake_up_pattern_number :3;
+ u16 rsvd:1;
+
+ u32 wake_up_packet[379];
+};
+
+struct mac_health_monitor_s {
+ u8 mac_ready:1;
+ u8 mac_fault:1;
+ u8 mac_flashless_finished:1;
+ u8 rsvd:5;
+
+ u8 mac_temperature;
+ u16 mac_heart_beat;
+ u16 mac_fault_code;
+ u16 rsvd2;
+};
+
+struct phy_health_monitor_s {
+ u8 phy_ready:1;
+ u8 phy_fault:1;
+ u8 phy_hot_warning:1;
+ u8 rsvd:5;
+
+ u8 phy_temperature;
+ u16 phy_heart_beat;
+ u16 phy_fault_code;
+ u16 rsvd2;
+};
+
+struct device_link_caps_s {
+ u8 rsvd:3;
+ u8 internal_loopback:1;
+ u8 external_loopback:1;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 rsvd3:1;
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 rsvd4:1;
+ u8 eee_5G:1;
+ u8 rsvd5:1;
+ u8 eee_10G:1;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 pfc:1;
+ u8 downshift:1;
+ u8 downshift_retry:4;
+};
+
+struct sleep_proxy_caps_s {
+ u8 ipv4_offload:1;
+ u8 ipv6_offload:1;
+ u8 tcp_port_offload:1;
+ u8 udp_port_offload:1;
+ u8 ka4_offload:1;
+ u8 ka6_offload:1;
+ u8 mdns_offload:1;
+ u8 wake_on_ping:1;
+
+ u8 wake_on_magic_packet:1;
+ u8 wake_on_pattern:1;
+ u8 wake_on_timer:1;
+ u8 wake_on_link:1;
+ u8 wake_patterns_count:4;
+
+ u8 ipv4_count;
+ u8 ipv6_count;
+
+ u8 tcp_port_offload_count;
+ u8 udp_port_offload_count;
+
+ u8 tcp4_ka_count;
+ u8 tcp6_ka_count;
+
+ u8 igmp_offload:1;
+ u8 mld_offload:1;
+ u8 rsvd:6;
+
+ u8 rsvd2;
+ u16 rsvd3;
+};
+
+struct lkp_link_caps_s {
+ u8 rsvd:5;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 rsvd2:1;
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 rsvd3:1;
+ u8 eee_5G:1;
+ u8 rsvd4:1;
+ u8 eee_10G:1;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 rsvd5:6;
+};
+
+struct core_dump_s {
+ u32 reg0;
+ u32 reg1;
+ u32 reg2;
+
+ u32 hi;
+ u32 lo;
+
+ u32 regs[32];
+};
+
+struct trace_s {
+ u32 sync_counter;
+ u32 mem_buffer[0x1ff];
+};
+
+struct cable_diag_control_s {
+ u8 toggle :1;
+ u8 rsvd:7;
+
+ u8 wait_timeout_sec;
+ u16 rsvd2;
+};
+
+struct cable_diag_lane_data_s {
+ u8 result_code;
+ u8 dist;
+ u8 far_dist;
+ u8 rsvd;
+};
+
+struct cable_diag_status_s {
+ struct cable_diag_lane_data_s lane_data[4];
+ u8 transact_id;
+ u8 status:4;
+ u8 rsvd:4;
+ u16 rsvd2;
+};
+
+struct statistics_s {
+ struct {
+ u32 link_up;
+ u32 link_down;
+ } link;
+
+ struct {
+ u64 tx_unicast_octets;
+ u64 tx_multicast_octets;
+ u64 tx_broadcast_octets;
+ u64 rx_unicast_octets;
+ u64 rx_multicast_octets;
+ u64 rx_broadcast_octets;
+
+ u32 tx_unicast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_errors;
+
+ u32 rx_unicast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_dropped_frames;
+ u32 rx_error_frames;
+
+ u32 tx_good_frames;
+ u32 rx_good_frames;
+ u32 reserve_fw_gap;
+ } msm;
+ u32 main_loop_cycles;
+ u32 reserve_fw_gap;
+};
+
+struct filter_caps_s {
+ u8 l2_filters_base_index:6;
+ u8 flexible_filter_mask:2;
+ u8 l2_filter_count;
+ u8 ethertype_filter_base_index;
+ u8 ethertype_filter_count;
+
+ u8 vlan_filter_base_index;
+ u8 vlan_filter_count;
+ u8 l3_ip4_filter_base_index:4;
+ u8 l3_ip4_filter_count:4;
+ u8 l3_ip6_filter_base_index:4;
+ u8 l3_ip6_filter_count:4;
+
+ u8 l4_filter_base_index:4;
+ u8 l4_filter_count:4;
+ u8 l4_flex_filter_base_index:4;
+ u8 l4_flex_filter_count:4;
+ u8 rslv_tbl_base_index;
+ u8 rslv_tbl_count;
+};
+
+struct request_policy_s {
+ struct {
+ u8 all:1;
+ u8 mcast:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } promisc;
+
+ struct {
+ u8 accept:1;
+ u8 rsvd:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } bcast;
+
+ struct {
+ u8 accept:1;
+ u8 rsvd:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } mcast;
+
+ u8 rsvd:8;
+};
+
+struct fw_interface_in {
+ u32 mtu;
+ u32 rsvd1;
+ struct mac_address_aligned_s mac_address;
+ struct link_control_s link_control;
+ u32 rsvd2;
+ struct link_options_s link_options;
+ u32 rsvd3;
+ struct thermal_shutdown_s thermal_shutdown;
+ u32 rsvd4;
+ struct sleep_proxy_s sleep_proxy;
+ u32 rsvd5;
+ struct pause_quanta_s pause_quanta[8];
+ struct cable_diag_control_s cable_diag_control;
+ u32 rsvd6;
+ struct data_buffer_status_s data_buffer_status;
+ u32 rsvd7;
+ struct request_policy_s request_policy;
+};
+
+struct transaction_counter_s {
+ u16 transaction_cnt_a;
+ u16 transaction_cnt_b;
+};
+
+struct management_status_s {
+ struct mac_address_s mac_address;
+ u16 vlan;
+
+ struct{
+ u32 enable : 1;
+ u32 rsvd:31;
+ } flags;
+
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+struct fw_interface_out {
+ struct transaction_counter_s transaction_id;
+ struct version_s version;
+ struct link_status_s link_status;
+ struct wol_status_s wol_status;
+ u32 rsvd;
+ u32 rsvd2;
+ struct mac_health_monitor_s mac_health_monitor;
+ u32 rsvd3;
+ u32 rsvd4;
+ struct phy_health_monitor_s phy_health_monitor;
+ u32 rsvd5;
+ u32 rsvd6;
+ struct cable_diag_status_s cable_diag_status;
+ u32 rsvd7;
+ struct device_link_caps_s device_link_caps;
+ u32 rsvd8;
+ struct sleep_proxy_caps_s sleep_proxy_caps;
+ u32 rsvd9;
+ struct lkp_link_caps_s lkp_link_caps;
+ u32 rsvd10;
+ struct core_dump_s core_dump;
+ u32 rsvd11;
+ struct statistics_s stats;
+ u32 rsvd12;
+ struct filter_caps_s filter_caps;
+ struct device_caps_s device_caps;
+ u32 rsvd13;
+ struct management_status_s management_status;
+ u32 reserve[21];
+ struct trace_s trace;
+};
+
+#define AQ_A2_FW_LINK_RATE_INVALID 0
+#define AQ_A2_FW_LINK_RATE_10M 1
+#define AQ_A2_FW_LINK_RATE_100M 2
+#define AQ_A2_FW_LINK_RATE_1G 3
+#define AQ_A2_FW_LINK_RATE_2G5 4
+#define AQ_A2_FW_LINK_RATE_5G 5
+#define AQ_A2_FW_LINK_RATE_10G 6
+
+#define AQ_HOST_MODE_INVALID 0U
+#define AQ_HOST_MODE_ACTIVE 1U
+#define AQ_HOST_MODE_SLEEP_PROXY 2U
+#define AQ_HOST_MODE_LOW_POWER 3U
+#define AQ_HOST_MODE_SHUTDOWN 4U
+
+int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
+
+u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self);
+
+int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
+ u8 *base_index, u8 *count);
+
+extern const struct aq_fw_ops aq_a2_fw_ops;
+
+#endif /* HW_ATL2_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
new file mode 100644
index 000000000000..0ffc33bd67d0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/iopoll.h>
+
+#include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_internal.h"
+
+#define AQ_A2_FW_READ_TRY_MAX 1000
+
+#define hw_atl2_shared_buffer_write(HW, ITEM, VARIABLE) \
+ hw_atl2_mif_shared_buf_write(HW,\
+ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32))
+
+#define hw_atl2_shared_buffer_get(HW, ITEM, VARIABLE) \
+ hw_atl2_mif_shared_buf_get(HW, \
+ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), \
+ sizeof(VARIABLE) / sizeof(u32))
+
+/* This should never be used on non atomic fields,
+ * treat any > u32 read as non atomic.
+ */
+#define hw_atl2_shared_buffer_read(HW, ITEM, VARIABLE) \
+{\
+ BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
+ sizeof(u32)) != 0,\
+ "Non aligned read " # ITEM);\
+ BUILD_BUG_ON_MSG(sizeof(VARIABLE) > sizeof(u32),\
+ "Non atomic read " # ITEM);\
+ hw_atl2_mif_shared_buf_read(HW, \
+ (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\
+}
+
+#define hw_atl2_shared_buffer_read_safe(HW, ITEM, DATA) \
+ hw_atl2_shared_buffer_read_block((HW), \
+ (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
+ sizeof(((struct fw_interface_out *)0)->ITEM) / sizeof(u32),\
+ (DATA))
+
+static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
+ u32 offset, u32 dwords, void *data)
+{
+ struct transaction_counter_s tid1, tid2;
+ int cnt = 0;
+
+ do {
+ do {
+ hw_atl2_shared_buffer_read(self, transaction_id, tid1);
+ cnt++;
+ if (cnt > AQ_A2_FW_READ_TRY_MAX)
+ return -ETIME;
+ if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
+ udelay(1);
+ } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
+
+ hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
+
+ hw_atl2_shared_buffer_read(self, transaction_id, tid2);
+
+ cnt++;
+ if (cnt > AQ_A2_FW_READ_TRY_MAX)
+ return -ETIME;
+ } while (tid2.transaction_cnt_a != tid2.transaction_cnt_b ||
+ tid1.transaction_cnt_a != tid2.transaction_cnt_a);
+
+ return 0;
+}
+
+static inline int hw_atl2_shared_buffer_finish_ack(struct aq_hw_s *self)
+{
+ u32 val;
+ int err;
+
+ hw_atl2_mif_host_finished_write_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
+ self, val, val == 0U,
+ 100, 100000U);
+ WARN(err, "hw_atl2_shared_buffer_finish_ack");
+
+ return err;
+}
+
+static int aq_a2_fw_init(struct aq_hw_s *self)
+{
+ struct link_control_s link_control;
+ u32 mtu;
+ u32 val;
+ int err;
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_ACTIVE;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ hw_atl2_shared_buffer_get(self, mtu, mtu);
+ mtu = HW_ATL2_MTU_JUMBO;
+ hw_atl2_shared_buffer_write(self, mtu, mtu);
+
+ hw_atl2_mif_host_finished_write_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
+ self, val, val == 0U,
+ 100, 5000000U);
+ WARN(err, "hw_atl2_shared_buffer_finish_ack");
+
+ return err;
+}
+
+static int aq_a2_fw_deinit(struct aq_hw_s *self)
+{
+ struct link_control_s link_control;
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_SHUTDOWN;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static void a2_link_speed_mask2fw(u32 speed,
+ struct link_options_s *link_options)
+{
+ link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
+ link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
+ link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+ link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
+ link_options->rate_N2P5G = link_options->rate_2P5G;
+ link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
+ link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M);
+ link_options->rate_10M = !!(speed & AQ_NIC_RATE_10M);
+}
+
+static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.link_up = 1U;
+ a2_link_speed_mask2fw(speed, &link_options);
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+ switch (state) {
+ case MPI_INIT:
+ link_options.link_up = 1U;
+ break;
+ case MPI_DEINIT:
+ link_options.link_up = 0U;
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_update_link_status(struct aq_hw_s *self)
+{
+ struct link_status_s link_status;
+
+ hw_atl2_shared_buffer_read(self, link_status, link_status);
+
+ switch (link_status.link_rate) {
+ case AQ_A2_FW_LINK_RATE_10G:
+ self->aq_link_status.mbps = 10000;
+ break;
+ case AQ_A2_FW_LINK_RATE_5G:
+ self->aq_link_status.mbps = 5000;
+ break;
+ case AQ_A2_FW_LINK_RATE_2G5:
+ self->aq_link_status.mbps = 2500;
+ break;
+ case AQ_A2_FW_LINK_RATE_1G:
+ self->aq_link_status.mbps = 1000;
+ break;
+ case AQ_A2_FW_LINK_RATE_100M:
+ self->aq_link_status.mbps = 100;
+ break;
+ case AQ_A2_FW_LINK_RATE_10M:
+ self->aq_link_status.mbps = 10;
+ break;
+ default:
+ self->aq_link_status.mbps = 0;
+ }
+
+ return 0;
+}
+
+static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+ struct mac_address_aligned_s mac_address;
+
+ hw_atl2_shared_buffer_get(self, mac_address, mac_address);
+ ether_addr_copy(mac, (u8 *)mac_address.aligned.mac_address);
+
+ return 0;
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct statistics_s stats;
+
+ hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+
+#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
+ stats.msm._F_ - priv->last_stats.msm._F_)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc, rx_unicast_frames);
+ AQ_SDELTA(mprc, rx_multicast_frames);
+ AQ_SDELTA(bprc, rx_broadcast_frames);
+ AQ_SDELTA(erpr, rx_error_frames);
+
+ AQ_SDELTA(uptc, tx_unicast_frames);
+ AQ_SDELTA(mptc, tx_multicast_frames);
+ AQ_SDELTA(bptc, tx_broadcast_frames);
+ AQ_SDELTA(erpt, tx_errors);
+
+ AQ_SDELTA(ubrc, rx_unicast_octets);
+ AQ_SDELTA(ubtc, tx_unicast_octets);
+ AQ_SDELTA(mbrc, rx_multicast_octets);
+ AQ_SDELTA(mbtc, tx_multicast_octets);
+ AQ_SDELTA(bbrc, rx_broadcast_octets);
+ AQ_SDELTA(bbtc, tx_broadcast_octets);
+ }
+#undef AQ_SDELTA
+ self->curr_stats.dma_pkt_rc =
+ hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ self->curr_stats.dma_pkt_tc =
+ hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ self->curr_stats.dma_oct_rc =
+ hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ self->curr_stats.dma_oct_tc =
+ hw_atl_stats_tx_dma_good_octet_counter_get(self);
+ self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+ memcpy(&priv->last_stats, &stats, sizeof(stats));
+
+ return 0;
+}
+
+static int aq_a2_fw_renegotiate(struct aq_hw_s *self)
+{
+ struct link_options_s link_options;
+ int err;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.link_renegotiate = 1U;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ err = hw_atl2_shared_buffer_finish_ack(self);
+
+ /* We should put renegotiate status back to zero
+ * after command completes
+ */
+ link_options.link_renegotiate = 0U;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return err;
+}
+
+u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
+{
+ struct version_s version;
+
+ hw_atl2_shared_buffer_read_safe(self, version, &version);
+
+ /* A2 FW version is stored in reverse order */
+ return version.mac.major << 24 |
+ version.mac.minor << 16 |
+ version.mac.build;
+}
+
+int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
+ u8 *base_index, u8 *count)
+{
+ struct filter_caps_s filter_caps;
+ int err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, filter_caps, &filter_caps);
+ if (err)
+ return err;
+
+ *base_index = filter_caps.rslv_tbl_base_index;
+ *count = filter_caps.rslv_tbl_count;
+ return 0;
+}
+
+const struct aq_fw_ops aq_a2_fw_ops = {
+ .init = aq_a2_fw_init,
+ .deinit = aq_a2_fw_deinit,
+ .reset = NULL,
+ .renegotiate = aq_a2_fw_renegotiate,
+ .get_mac_permanent = aq_a2_fw_get_mac_permanent,
+ .set_link_speed = aq_a2_fw_set_link_speed,
+ .set_state = aq_a2_fw_set_state,
+ .update_link_status = aq_a2_fw_update_link_status,
+ .update_stats = aq_a2_fw_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index fbe9d88b13c7..36c7cf05630a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -846,8 +846,7 @@ static int get_ingress_sakey_record(struct aq_hw_s *hw,
rec->key[7] = packed_record[14];
rec->key[7] |= packed_record[15] << 16;
- rec->key_len = (rec->key_len & 0xFFFFFFFC) |
- (packed_record[16] & 0x3);
+ rec->key_len = packed_record[16] & 0x3;
return 0;
}
@@ -1158,6 +1157,7 @@ static int set_egress_ctlf_record(struct aq_hw_s *hw,
packed_record[0] = rec->sa_da[0] & 0xFFFF;
packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+
packed_record[2] = rec->sa_da[1] & 0xFFFF;
packed_record[3] = rec->eth_type & 0xFFFF;
@@ -1552,7 +1552,7 @@ static int set_egress_sc_record(struct aq_hw_s *hw,
packed_record[5] |= (rec->sak_len & 0x3) << 4;
- packed_record[7] |= (rec->valid & 0x1) << 15;
+ packed_record[7] = (rec->valid & 0x1) << 15;
return set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSCRECORD + table_index);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 02b7705393ca..112edbd30823 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -871,13 +871,40 @@ static void ag71xx_mac_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_MII) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
+ ag71xx_is(ag, AR9340) ||
+ ag71xx_is(ag, QCA9530) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_GMII:
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_RMII:
+ if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_RGMII:
+ if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ break;
+ goto unsupported;
+ default:
+ goto unsupported;
}
phylink_set(mask, MII);
@@ -889,6 +916,8 @@ static void ag71xx_mac_validate(struct phylink_config *config,
phylink_set(mask, 100baseT_Full);
if (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_RGMII ||
state->interface == PHY_INTERFACE_MODE_GMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
@@ -898,6 +927,10 @@ static void ag71xx_mac_validate(struct phylink_config *config,
__ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return;
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 00bd7bd55794..decab9a8e4a8 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1186,7 +1186,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
struct atl1c_hw *hw = &adapter->hw;
u32 mac, txq, rxq;
- hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false;
+ hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX;
hw->mac_speed = adapter->link_speed == SPEED_1000 ?
atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
@@ -2449,12 +2449,6 @@ static int atl1c_resume(struct device *dev)
atl1c_reset_mac(&adapter->hw);
atl1c_phy_init(&adapter->hw);
-#if 0
- AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
- pm_data &= ~PM_CTRLSTAT_PME_EN;
- AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
-#endif
-
netif_device_attach(netdev);
if (netif_running(netdev))
atl1c_up(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 271e7034fa70..b35fcfcd692d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1042,7 +1042,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
* each ring/block may need up to 8 bytes for alignment, hence the
* additional 40 bytes tacked onto the end.
*/
- ring_header->size = size =
+ ring_header->size =
sizeof(struct tx_packet_desc) * tpd_ring->count
+ sizeof(struct rx_free_desc) * rfd_ring->count
+ sizeof(struct rx_return_desc) * rrd_ring->count
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 53055ce5dfd6..2a69c0d06f3c 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -69,6 +69,7 @@ config BCMGENET
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
select DIMLIB
+ select BROADCOM_PHY if ARCH_BCM2835
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2c6ba046d2a8..17ae6df90723 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1145,7 +1145,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
break;
}
}
- if (false == pg_found) {
+ if (!pg_found) {
data[help_data->num_of_pg].pg = add_pg;
data[help_data->num_of_pg].pg_priority =
(1 << ttp[add_traf_type]);
@@ -1155,7 +1155,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
}
DP(BNX2X_MSG_DCB,
"add_traf_type %d pg_found %s num_of_pg %d\n",
- add_traf_type, (false == pg_found) ? "NO" : "YES",
+ add_traf_type, !pg_found ? "NO" : "YES",
help_data->num_of_pg);
}
}
@@ -1544,8 +1544,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
entry = 0;
- if (i == (num_of_pri-1) &&
- false == b_found_strict)
+ if (i == (num_of_pri-1) && !b_found_strict)
/* last entry will be handled separately
* If no priority is strict than last
* entry goes to last queue.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 517caedc0a87..1426c691c7c4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3085,6 +3085,7 @@ static int bnx2x_bsc_read(struct link_params *params,
u8 xfer_cnt,
u32 *data_array)
{
+ u64 t0, delta;
u32 val, i;
int rc = 0;
@@ -3114,17 +3115,18 @@ static int bnx2x_bsc_read(struct link_params *params,
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
/* Poll for completion */
- i = 0;
+ t0 = ktime_get_ns();
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
- udelay(10);
- val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
- if (i++ > 1000) {
- DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
- i);
+ delta = ktime_get_ns() - t0;
+ if (delta > 10 * NSEC_PER_MSEC) {
+ DP(NETIF_MSG_LINK, "wr 0 byte timed out after %Lu ns\n",
+ delta);
rc = -EFAULT;
break;
}
+ usleep_range(10, 20);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
}
if (rc == -EFAULT)
return rc;
@@ -3138,16 +3140,18 @@ static int bnx2x_bsc_read(struct link_params *params,
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
/* Poll for completion */
- i = 0;
+ t0 = ktime_get_ns();
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
- udelay(10);
- val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
- if (i++ > 1000) {
- DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
+ delta = ktime_get_ns() - t0;
+ if (delta > 10 * NSEC_PER_MSEC) {
+ DP(NETIF_MSG_LINK, "rd op timed out after %Lu ns\n",
+ delta);
rc = -EFAULT;
break;
}
+ usleep_range(10, 20);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
}
if (rc == -EFAULT)
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5097a44686b3..b4476f44e386 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -331,27 +331,6 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
BP_VFDB(bp)->vf_sbs_pool++;
}
-static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *obj,
- atomic_t *counter)
-{
- struct list_head *pos;
- int read_lock;
- int cnt = 0;
-
- read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
- if (read_lock)
- DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
-
- list_for_each(pos, &obj->head)
- cnt++;
-
- if (!read_lock)
- bnx2x_vlan_mac_h_read_unlock(bp, obj);
-
- atomic_set(counter, cnt);
-}
-
static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, bool drv_only, int type)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d1a83716d934..c62589c266b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1766,7 +1766,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.rx_buf_errors++;
+ bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
netdev_warn(bp->dev, "RX buffer error %x\n",
rx_err);
@@ -1849,7 +1849,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
}
}
@@ -4176,14 +4176,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int i, intr_process, rc, tmo_count;
struct input *req = msg;
u32 *data = msg;
- __le32 *resp_len;
u8 *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
struct hwrm_short_input short_input = {0};
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
- u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
@@ -4201,7 +4199,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
bar_offset = BNXT_GRCPF_REG_KONG_COMM;
doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
resp = bp->hwrm_cmd_kong_resp_addr;
- resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
}
memset(resp, 0, PAGE_SIZE);
@@ -4270,7 +4267,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
- resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
if (intr_process) {
u16 seq_id = bp->hwrm_intr_seq_id;
@@ -4298,9 +4294,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
le16_to_cpu(req->req_type));
return -EBUSY;
}
- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
- HWRM_RESP_LEN_SFT;
- valid = resp_addr + len - 1;
+ len = le16_to_cpu(resp->resp_len);
+ valid = ((u8 *)resp) + len - 1;
} else {
int j;
@@ -4311,8 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
*/
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
return -EBUSY;
- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
- HWRM_RESP_LEN_SFT;
+ len = le16_to_cpu(resp->resp_len);
if (len)
break;
/* on first few passes, just barely sleep */
@@ -4334,7 +4328,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
/* Last byte of resp contains valid bit */
- valid = resp_addr + len - 1;
+ valid = ((u8 *)resp) + len - 1;
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
/* make sure we read from updated DMA memory */
dma_rmb();
@@ -5045,8 +5039,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req.lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN);
+ req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -5356,9 +5349,9 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (BNXT_PF(bp))
- db->doorbell = bp->bar1 + 0x10000;
+ db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
else
- db->doorbell = bp->bar1 + 0x4000;
+ db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
@@ -6365,6 +6358,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 min_db_offset = 0;
u16 flags;
int rc;
@@ -6413,6 +6407,21 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (!bp->max_mtu)
bp->max_mtu = BNXT_MAX_MTU;
+ if (bp->db_size)
+ goto func_qcfg_exit;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_PF(bp))
+ min_db_offset = DB_PF_OFFSET_P5;
+ else
+ min_db_offset = DB_VF_OFFSET_P5;
+ }
+ bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
+ bp->db_size <= min_db_offset)
+ bp->db_size = pci_resource_len(bp->pdev, 2);
+
func_qcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6434,23 +6443,13 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
if (!rc) {
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
- int i;
+ int i, tqm_rings;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
rc = -ENOMEM;
goto ctx_err;
}
- ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
- if (!ctx_pg) {
- kfree(ctx);
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
-
- bp->ctx = ctx;
ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
@@ -6483,6 +6482,20 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+ ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
+ if (!ctx->tqm_fp_rings_count)
+ ctx->tqm_fp_rings_count = bp->max_q;
+
+ tqm_rings = ctx->tqm_fp_rings_count + 1;
+ ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
+ if (!ctx_pg) {
+ kfree(ctx);
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < tqm_rings; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+ bp->ctx = ctx;
} else {
rc = 0;
}
@@ -6735,7 +6748,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
return;
if (ctx->tqm_mem[0]) {
- for (i = 0; i < bp->max_q + 1; i++)
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
kfree(ctx->tqm_mem[0]);
ctx->tqm_mem[0] = NULL;
@@ -6756,6 +6769,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries;
+ u32 entries_sp, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
@@ -6845,14 +6859,17 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
- entries = ctx->qp_max_l2_entries + extra_qps;
+ min = ctx->tqm_min_entries_per_ring;
+ entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
+ 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
+ entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
+ entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
- ctx->tqm_max_entries_per_ring);
- for (i = 0; i < bp->max_q + 1; i++) {
+ entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = entries;
- mem_size = ctx->tqm_entry_size * entries;
+ ctx_pg->entries = i ? entries : entries_sp;
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
@@ -9310,7 +9327,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
- if (bp->bnapi)
+ if (bp->bnapi && irq_re_init)
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
if (irq_re_init) {
bnxt_free_irq(bp);
@@ -10265,7 +10282,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->missed_irqs++;
+ cpr->sw_stats.cmn.missed_irqs++;
}
}
}
@@ -10894,6 +10911,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->dev = dev;
bp->pdev = pdev;
+ /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
+ * determines the BAR size.
+ */
bp->bar0 = pci_ioremap_bar(pdev, 0);
if (!bp->bar0) {
dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
@@ -10901,13 +10921,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
goto init_err_release;
}
- bp->bar1 = pci_ioremap_bar(pdev, 2);
- if (!bp->bar1) {
- dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
- rc = -ENOMEM;
- goto init_err_release;
- }
-
bp->bar2 = pci_ioremap_bar(pdev, 4);
if (!bp->bar2) {
dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
@@ -11829,6 +11842,16 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
return 0;
}
+static int bnxt_map_db_bar(struct bnxt *bp)
+{
+ if (!bp->db_size)
+ return -ENODEV;
+ bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
+ if (!bp->bar1)
+ return -ENOMEM;
+ return 0;
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -11889,6 +11912,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ rc = bnxt_map_db_bar(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto init_err_pci_clean;
+ }
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f6a3250ef1c5..9e173d74b72a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -537,6 +537,9 @@ struct nqe_cn {
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
+#define DB_PF_OFFSET_P5 0x10000
+#define DB_VF_OFFSET_P5 0x4000
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -656,11 +659,6 @@ struct nqe_cn {
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
-#define HWRM_RESP_ERR_CODE_MASK 0xffff
-#define HWRM_RESP_LEN_OFFSET 4
-#define HWRM_RESP_LEN_MASK 0xffff0000
-#define HWRM_RESP_LEN_SFT 16
-#define HWRM_RESP_VALID_MASK 0xff000000
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
@@ -907,6 +905,20 @@ struct bnxt_rx_ring_info {
struct page_pool *page_pool;
};
+struct bnxt_rx_sw_stats {
+ u64 rx_l4_csum_errors;
+ u64 rx_buf_errors;
+};
+
+struct bnxt_cmn_sw_stats {
+ u64 missed_irqs;
+};
+
+struct bnxt_sw_stats {
+ struct bnxt_rx_sw_stats rx;
+ struct bnxt_cmn_sw_stats cmn;
+};
+
struct bnxt_cp_ring_info {
struct bnxt_napi *bnapi;
u32 cp_raw_cons;
@@ -934,9 +946,8 @@ struct bnxt_cp_ring_info {
struct ctx_hw_stats *hw_stats;
dma_addr_t hw_stats_map;
u32 hw_stats_ctx_id;
- u64 rx_l4_csum_errors;
- u64 rx_buf_errors;
- u64 missed_irqs;
+
+ struct bnxt_sw_stats sw_stats;
struct bnxt_ring_struct cp_ring_struct;
@@ -1356,6 +1367,7 @@ struct bnxt_ctx_mem_info {
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
+ u8 tqm_fp_rings_count;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
@@ -1815,6 +1827,7 @@ struct bnxt {
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
+ int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096
#define BNXT_NTP_FLTR_HASH_SIZE 512
@@ -1857,7 +1870,6 @@ struct bnxt {
u8 dsn[8];
struct bnxt_tc_info *tc_info;
struct list_head tc_indr_block_list;
- struct notifier_block tc_netdev_nb;
struct dentry *debugfs_pdev;
struct device *hwmon_dev;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 34046a6286e8..6b88143af5ea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,7 @@ reset_coalesce:
return rc;
}
-static const char * const bnxt_ring_stats_str[] = {
+static const char * const bnxt_ring_rx_stats_str[] = {
"rx_ucast_packets",
"rx_mcast_packets",
"rx_bcast_packets",
@@ -146,6 +146,9 @@ static const char * const bnxt_ring_stats_str[] = {
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
+};
+
+static const char * const bnxt_ring_tx_stats_str[] = {
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
@@ -171,9 +174,12 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
"rx_tpa_errors",
};
-static const char * const bnxt_ring_sw_stats_str[] = {
+static const char * const bnxt_rx_sw_stats_str[] = {
"rx_l4_csum_errors",
"rx_buf_errors",
+};
+
+static const char * const bnxt_cmn_sw_stats_str[] = {
"missed_irqs",
};
@@ -303,6 +309,11 @@ static struct {
{0, "tx_total_discard_pkts"},
};
+#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
+#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
+#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
+#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
+
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
@@ -482,12 +493,21 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
static int bnxt_get_num_ring_stats(struct bnxt *bp)
{
- int num_stats;
+ int rx, tx, cmn;
+ bool sh = false;
- num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
- ARRAY_SIZE(bnxt_ring_sw_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
- return num_stats * bp->cp_nr_rings;
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
+
+ rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
+ bnxt_get_num_tpa_ring_stats(bp);
+ tx = NUM_RING_TX_HW_STATS;
+ cmn = NUM_RING_CMN_SW_STATS;
+ if (sh)
+ return (rx + tx + cmn) * bp->cp_nr_rings;
+ else
+ return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ cmn * bp->cp_nr_rings;
}
static int bnxt_get_num_stats(struct bnxt *bp)
@@ -528,13 +548,29 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset)
}
}
+static bool is_rx_ring(struct bnxt *bp, int ring_num)
+{
+ return ring_num < bp->rx_nr_rings;
+}
+
+static bool is_tx_ring(struct bnxt *bp, int ring_num)
+{
+ int tx_base = 0;
+
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+ tx_base = bp->rx_nr_rings;
+
+ if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
+ return true;
+ return false;
+}
+
static void bnxt_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *buf)
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
+ u32 tpa_stats;
if (!bp->bnapi) {
j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
@@ -544,17 +580,42 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
bnxt_sw_func_stats[i].counter = 0;
+ tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
__le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ u64 *sw;
int k;
- for (k = 0; k < stat_fields; j++, k++)
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ }
+ if (is_tx_ring(bp, i)) {
+ k = NUM_RING_RX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ }
+ if (!tpa_stats || !is_rx_ring(bp, i))
+ goto skip_tpa_ring_stats;
+
+ k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
+ tpa_stats; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
- buf[j++] = cpr->rx_l4_csum_errors;
- buf[j++] = cpr->rx_buf_errors;
- buf[j++] = cpr->missed_irqs;
+
+skip_tpa_ring_stats:
+ sw = (u64 *)&cpr->sw_stats.rx;
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
+ buf[j] = sw[k];
+ }
+
+ sw = (u64 *)&cpr->sw_stats.cmn;
+ for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
+ buf[j] = sw[k];
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -632,31 +693,48 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- num_str = ARRAY_SIZE(bnxt_ring_stats_str);
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_rx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
}
- if (!BNXT_SUPPORTS_TPA(bp))
+ if (is_tx_ring(bp, i)) {
+ num_str = NUM_RING_TX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_tx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = bnxt_get_num_tpa_ring_stats(bp);
+ if (!num_str || !is_rx_ring(bp, i))
goto skip_tpa_stats;
- if (bp->max_tpa_v2) {
- num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ if (bp->max_tpa_v2)
str = bnxt_ring_tpa2_stats_str;
- } else {
- num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ else
str = bnxt_ring_tpa_stats_str;
- }
+
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i, str[j]);
buf += ETH_GSTRING_LEN;
}
skip_tpa_stats:
- num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_SW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_rx_sw_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = NUM_RING_CMN_SW_STATS;
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i,
- bnxt_ring_sw_stats_str[j]);
+ bnxt_cmn_sw_stats_str[j]);
buf += ETH_GSTRING_LEN;
}
}
@@ -1749,8 +1827,8 @@ static int bnxt_flash_nvram(struct net_device *dev,
return rc;
}
-static int bnxt_firmware_reset(struct net_device *dev,
- u16 dir_type)
+static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags)
{
struct hwrm_fw_reset_input req = {0};
struct bnxt *bp = netdev_priv(dev);
@@ -1758,48 +1836,77 @@ static int bnxt_firmware_reset(struct net_device *dev,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+ req.embedded_proc_type = proc_type;
+ req.selfrst_status = self_reset;
+ req.flags = flags;
+
+ if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ } else {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
+ }
+ return rc;
+}
+
+static int bnxt_firmware_reset(struct net_device *dev,
+ enum bnxt_nvm_directory_type dir_type)
+{
+ u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
+ u8 proc_type, flags = 0;
+
/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
/* (e.g. when firmware isn't already running) */
switch (dir_type) {
case BNX_DIR_TYPE_CHIMP_PATCH:
case BNX_DIR_TYPE_BOOTCODE:
case BNX_DIR_TYPE_BOOTCODE_2:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
/* Self-reset ChiMP upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_APE_FW:
case BNX_DIR_TYPE_APE_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
/* Self-reset APE upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
- req.embedded_proc_type =
- FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
break;
case BNX_DIR_TYPE_BONO_FW:
case BNX_DIR_TYPE_BONO_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
- break;
- case BNXT_FW_RESET_CHIP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
- if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
- req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
- break;
- case BNXT_FW_RESET_AP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
break;
default:
return -EINVAL;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc == -EACCES)
- bnxt_print_admin_err(bp);
- return rc;
+ return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
+}
+
+static int bnxt_firmware_reset_chip(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 flags = 0;
+
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+
+ return bnxt_hwrm_firmware_reset(dev,
+ FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
+ flags);
+}
+
+static int bnxt_firmware_reset_ap(struct net_device *dev)
+{
+ return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
+ 0);
}
static int bnxt_flash_firmware(struct net_device *dev,
@@ -1988,9 +2095,9 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
rc, filename);
return rc;
}
- if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+ if (bnxt_dir_type_is_ape_bin_format(dir_type))
rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
- else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+ else if (bnxt_dir_type_is_other_exec_format(dir_type))
rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
@@ -2012,11 +2119,12 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
bnxt_hwrm_fw_set_time(bp);
- if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
- BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, &item_len, NULL) != 0) {
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, &item_len, NULL);
+ if (rc) {
netdev_err(dev, "PKG update area not created in nvram\n");
- return -ENOBUFS;
+ return rc;
}
rc = request_firmware(&fw, filename, &dev->dev);
@@ -2377,7 +2485,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
}
/* Create or re-write an NVM item: */
- if (bnxt_dir_type_is_executable(type) == true)
+ if (bnxt_dir_type_is_executable(type))
return -EOPNOTSUPP;
ext = eeprom->magic & 0xffff;
ordinal = eeprom->offset >> 16;
@@ -2975,7 +3083,11 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
static int bnxt_reset(struct net_device *dev, u32 *flags)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = 0;
+ bool reload = false;
+ u32 req = *flags;
+
+ if (!req)
+ return -EINVAL;
if (!BNXT_PF(bp)) {
netdev_err(dev, "Reset is not supported from a VF\n");
@@ -2989,33 +3101,37 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EBUSY;
}
- if (*flags == ETH_RESET_ALL) {
+ if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
/* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
-
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
- if (!rc) {
- netdev_info(dev, "Reset request successful.\n");
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
- netdev_info(dev, "Reload driver to complete reset\n");
- *flags = 0;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_chip(dev)) {
+ netdev_info(dev, "Firmware reset request successful.\n");
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_CHIP;
+ }
+ } else if (req == BNXT_FW_RESET_CHIP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
- } else if (*flags == ETH_RESET_AP) {
- /* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
+ }
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
- if (!rc) {
- netdev_info(dev, "Reset Application Processor request successful.\n");
- *flags = 0;
+ if (req & BNXT_FW_RESET_AP) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_ap(dev)) {
+ netdev_info(dev, "Reset application processor successful.\n");
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_AP;
+ }
+ } else if (req == BNXT_FW_RESET_AP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
- } else {
- rc = -EINVAL;
}
- return rc;
+ if (reload)
+ netdev_info(dev, "Reload driver to complete reset\n");
+
+ return 0;
}
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 3576d951727b..ce7585ff9e4d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -77,8 +77,12 @@ struct hwrm_dbg_cmn_output {
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
-#define BNXT_FW_RESET_AP 0xfffe
-#define BNXT_FW_RESET_CHIP 0xffff
+#define BNXT_FW_RESET_AP (ETH_RESET_AP << ETH_RESET_SHARED_SHIFT)
+#define BNXT_FW_RESET_CHIP ((ETH_RESET_MGMT | ETH_RESET_IRQ | \
+ ETH_RESET_DMA | ETH_RESET_FILTER | \
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC | \
+ ETH_RESET_PHY | ETH_RESET_RAM) \
+ << ETH_RESET_SHARED_SHIFT)
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 7cf27dffadb5..7e9235c8d21e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2019 Broadcom Inc.
+ * Copyright (c) 2018-2020 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -207,6 +207,8 @@ struct cmd_nums {
#define HWRM_PORT_PHY_MDIO_READ 0xb6UL
#define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
#define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_PORT_ECN_QSTATS 0xbaUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -220,6 +222,8 @@ struct cmd_nums {
#define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
#define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
#define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
#define HWRM_EXEC_FWD_RESP 0xd0UL
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
@@ -233,6 +237,7 @@ struct cmd_nums {
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -331,6 +336,7 @@ struct cmd_nums {
#define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -341,6 +347,31 @@ struct cmd_nums {
#define HWRM_MFG_OTP_CFG 0x207UL
#define HWRM_MFG_OTP_QCFG 0x208UL
#define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_ATTACH 0x2c7UL
+ #define HWRM_TF_SESSION_CLOSE 0x2c8UL
+ #define HWRM_TF_SESSION_QCFG 0x2c9UL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL
+ #define HWRM_TF_TBL_TYPE_GET 0x2d0UL
+ #define HWRM_TF_TBL_TYPE_SET 0x2d1UL
+ #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL
+ #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL
+ #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL
+ #define HWRM_TF_EXT_EM_OP 0x2ddUL
+ #define HWRM_TF_EXT_EM_CFG 0x2deUL
+ #define HWRM_TF_EXT_EM_QCFG 0x2dfUL
+ #define HWRM_TF_TCAM_SET 0x2eeUL
+ #define HWRM_TF_TCAM_GET 0x2efUL
+ #define HWRM_TF_TCAM_MOVE 0x2f0UL
+ #define HWRM_TF_TCAM_FREE 0x2f1UL
+ #define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -356,6 +387,10 @@ struct cmd_nums {
#define HWRM_DBG_RING_INFO_GET 0xff1cUL
#define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
#define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -429,8 +464,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 12
-#define HWRM_VERSION_STR "1.10.1.12"
+#define HWRM_VERSION_RSVD 33
+#define HWRM_VERSION_STR "1.10.1.33"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -482,6 +517,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -647,6 +683,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1089,7 +1126,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:640b/80B) */
+/* hwrm_func_qcaps_output (size:704b/88B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1126,6 +1163,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
#define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
#define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1146,7 +1187,12 @@ struct hwrm_func_qcaps_output {
__le32 max_flow_id;
__le32 max_hw_ring_grps;
__le16 max_sp_tx_rings;
- u8 unused_0;
+ u8 unused_0[2];
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ u8 unused_1[3];
u8 valid;
};
@@ -1161,7 +1207,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:704b/88B) */
+/* hwrm_func_qcfg_output (size:768b/96B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1267,7 +1313,11 @@ struct hwrm_func_qcfg_output {
u8 always_1;
__le32 reset_addr_poll;
__le16 legacy_l2_db_size_kb;
- u8 unused_2[1];
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 unused_2[7];
u8 valid;
};
@@ -1420,9 +1470,10 @@ struct hwrm_func_qstats_input {
__le64 resp_addr;
__le16 fid;
u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY
+ #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK
u8 unused_0[5];
};
@@ -1456,6 +1507,53 @@ struct hwrm_func_qstats_output {
u8 valid;
};
+/* hwrm_func_qstats_ext_input (size:192b/24B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_ext_output (size:1472b/184B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
__le16 req_type;
@@ -1808,7 +1906,7 @@ struct hwrm_func_backing_store_qcaps_output {
u8 ctx_kind_initializer;
__le32 rsvd;
__le16 rsvd1;
- u8 rsvd2;
+ u8 tqm_fp_rings_count;
u8 valid;
};
@@ -2231,7 +2329,17 @@ struct hwrm_error_recovery_qcfg_output {
#define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
__le32 reset_reg_val[16];
u8 delay_after_reset[16];
- u8 unused_1[7];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
u8 valid;
};
@@ -2934,7 +3042,11 @@ struct hwrm_port_qstats_input {
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
- u8 unused_0[6];
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -3058,7 +3170,11 @@ struct hwrm_port_qstats_ext_input {
__le16 port_id;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[2];
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0;
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -3840,14 +3956,22 @@ struct hwrm_queue_pfcenable_qcfg_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
u8 unused_0[3];
u8 valid;
};
@@ -3860,14 +3984,22 @@ struct hwrm_queue_pfcenable_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
__le16 port_id;
u8 unused_0[2];
};
@@ -5287,7 +5419,11 @@ struct hwrm_ring_cmpl_ring_qaggint_params_input {
__le16 target_id;
__le64 resp_addr;
__le16 ring_id;
- u8 unused_0[6];
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
};
/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
@@ -7618,7 +7754,9 @@ struct hwrm_nvm_modify_input {
__le64 resp_addr;
__le64 host_src_addr;
__le16 dir_idx;
- u8 unused_0[2];
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
__le32 offset;
__le32 len;
u8 unused_1[4];
@@ -8027,4 +8165,18 @@ struct hwrm_selftest_irq_output {
u8 valid;
};
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+};
+
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index cea2f9958a1d..3a9a51f7063a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -645,7 +645,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
- mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
req.mru = cpu_to_le16(mtu);
req.mtu = cpu_to_le16(mtu);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 782ea0771221..0eef4f5e4a46 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1939,53 +1939,25 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
return 0;
}
-static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
-{
- switch (type) {
- case TC_SETUP_BLOCK:
- return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
{
return netif_is_vxlan(netdev);
}
-static int bnxt_tc_indr_block_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
{
- struct net_device *netdev;
- struct bnxt *bp;
- int rc;
-
- netdev = netdev_notifier_info_to_dev(ptr);
if (!bnxt_is_netdev_indr_offload(netdev))
- return NOTIFY_OK;
-
- bp = container_of(nb, struct bnxt, tc_netdev_nb);
+ return -EOPNOTSUPP;
- switch (event) {
- case NETDEV_REGISTER:
- rc = __flow_indr_block_cb_register(netdev, bp,
- bnxt_tc_setup_indr_cb,
- bp);
- if (rc)
- netdev_info(bp->dev,
- "Failed to register indirect blk: dev: %s\n",
- netdev->name);
- break;
- case NETDEV_UNREGISTER:
- __flow_indr_block_cb_unregister(netdev,
- bnxt_tc_setup_indr_cb,
- bp);
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+ default:
break;
}
- return NOTIFY_DONE;
+ return -EOPNOTSUPP;
}
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
@@ -2074,8 +2046,8 @@ int bnxt_init_tc(struct bnxt *bp)
/* init indirect block notifications */
INIT_LIST_HEAD(&bp->tc_indr_block_list);
- bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event;
- rc = register_netdevice_notifier(&bp->tc_netdev_nb);
+
+ rc = flow_indr_dev_register(bnxt_tc_setup_indr_cb, bp);
if (!rc)
return 0;
@@ -2101,7 +2073,8 @@ void bnxt_shutdown_tc(struct bnxt *bp)
if (!bnxt_tc_flower_enabled(bp))
return;
- unregister_netdevice_notifier(&bp->tc_netdev_nb);
+ flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
+ bnxt_tc_setup_indr_block_cb);
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 4a316c4b3fa8..8c8368c2f335 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -104,7 +104,13 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
- ent[i].db_offset = (idx + i) * 0x80;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ ent[i].db_offset = DB_PF_OFFSET_P5;
+ if (BNXT_VF(bp))
+ ent[i].db_offset = DB_VF_OFFSET_P5;
+ } else {
+ ent[i].db_offset = (idx + i) * 0x80;
+ }
}
}
@@ -475,6 +481,8 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
edev->net = dev;
edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
bp->edev = edev;
}
return bp->edev;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 9895406b9830..6b4d2556a6df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -67,6 +67,14 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ int l2_db_size; /* Doorbell BAR size in
+ * bytes mapped by L2
+ * driver.
+ */
+ int l2_db_size_nc; /* Doorbell BAR size in
+ * bytes mapped as non-
+ * cacheable.
+ */
};
struct bnxt_en_ops {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c6f6f2033880..5e3b4a3b69ea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -138,6 +138,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = *data_ptr + *len;
xdp.rxq = &rxr->xdp_rxq;
+ xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 61ab7d21f6bd..c5cca63b8571 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1918,7 +1918,6 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
if (ret) {
atomic_dec(&cp->iscsi_conn);
- ret = 0;
goto done;
}
ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 79636c78127c..ff31da0ed846 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2019 Broadcom
+ * Copyright (c) 2014-2020 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -23,11 +23,6 @@
#include <linux/dma-mapping.h>
#include <linux/pm.h>
#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_net.h>
-#include <linux/of_platform.h>
#include <net/arp.h>
#include <linux/mii.h>
@@ -70,6 +65,9 @@
#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
TOTAL_DESC * DMA_DESC_SIZE)
+/* Forward declarations */
+static void bcmgenet_set_rx_mode(struct net_device *dev);
+
static inline void bcmgenet_writel(u32 value, void __iomem *offset)
{
/* MIPS chips strapped for BE will automagically configure the
@@ -461,6 +459,384 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
+static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
+ u32 f_index)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ return !!(reg & (1 << (f_index % 32)));
+}
+
+static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+}
+
+static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+ u32 offset, reg, reg1;
+
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
+}
+
+static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
+ u32 f_index, u32 rx_queue)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = f_index / 8;
+ reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
+ reg &= ~(0xF << (4 * (f_index % 8)));
+ reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
+ bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
+}
+
+static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
+ u32 f_index, u32 f_length)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_LEN_V3PLUS +
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
+ sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg &= ~(0xFF << (8 * (f_index % 4)));
+ reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+}
+
+static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
+{
+ u32 f_index;
+
+ /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
+ for (f_index = MAX_NUM_OF_FS_RULES;
+ f_index < priv->hw_params->hfb_filter_cnt; f_index++)
+ if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
+ return f_index;
+
+ return -ENOMEM;
+}
+
+static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
+{
+ while (size) {
+ switch (*(unsigned char *)mask++) {
+ case 0x00:
+ case 0x0f:
+ case 0xf0:
+ case 0xff:
+ size--;
+ continue;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+#define VALIDATE_MASK(x) \
+ bcmgenet_hfb_validate_mask(&(x), sizeof(x))
+
+static int bcmgenet_hfb_insert_data(u32 *f, int offset,
+ void *val, void *mask, size_t size)
+{
+ int index;
+ u32 tmp;
+
+ index = offset / 2;
+ tmp = f[index];
+
+ while (size--) {
+ if (offset++ & 1) {
+ tmp &= ~0x300FF;
+ tmp |= (*(unsigned char *)val++);
+ switch ((*(unsigned char *)mask++)) {
+ case 0xFF:
+ tmp |= 0x30000;
+ break;
+ case 0xF0:
+ tmp |= 0x20000;
+ break;
+ case 0x0F:
+ tmp |= 0x10000;
+ break;
+ }
+ f[index++] = tmp;
+ if (size)
+ tmp = f[index];
+ } else {
+ tmp &= ~0xCFF00;
+ tmp |= (*(unsigned char *)val++) << 8;
+ switch ((*(unsigned char *)mask++)) {
+ case 0xFF:
+ tmp |= 0xC0000;
+ break;
+ case 0xF0:
+ tmp |= 0x80000;
+ break;
+ case 0x0F:
+ tmp |= 0x40000;
+ break;
+ }
+ if (!size)
+ f[index] = tmp;
+ }
+ }
+
+ return 0;
+}
+
+static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data,
+ u32 f_length, u32 rx_queue, int f_index)
+{
+ u32 base = f_index * priv->hw_params->hfb_filter_size;
+ int i;
+
+ for (i = 0; i < f_length; i++)
+ bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32));
+
+ bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
+}
+
+static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
+ struct bcmgenet_rxnfc_rule *rule)
+{
+ struct ethtool_rx_flow_spec *fs = &rule->fs;
+ int err = 0, offset = 0, f_length = 0;
+ u16 val_16, mask_16;
+ u8 val_8, mask_8;
+ size_t size;
+ u32 *f_data;
+
+ f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32),
+ GFP_KERNEL);
+ if (!f_data)
+ return -ENOMEM;
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ bcmgenet_hfb_insert_data(f_data, 0,
+ &fs->h_ext.h_dest, &fs->m_ext.h_dest,
+ sizeof(fs->h_ext.h_dest));
+ }
+
+ if (fs->flow_type & FLOW_EXT) {
+ if (fs->m_ext.vlan_etype ||
+ fs->m_ext.vlan_tci) {
+ bcmgenet_hfb_insert_data(f_data, 12,
+ &fs->h_ext.vlan_etype,
+ &fs->m_ext.vlan_etype,
+ sizeof(fs->h_ext.vlan_etype));
+ bcmgenet_hfb_insert_data(f_data, 14,
+ &fs->h_ext.vlan_tci,
+ &fs->m_ext.vlan_tci,
+ sizeof(fs->h_ext.vlan_tci));
+ offset += VLAN_HLEN;
+ f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
+ }
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ f_length += DIV_ROUND_UP(ETH_HLEN, 2);
+ bcmgenet_hfb_insert_data(f_data, 0,
+ &fs->h_u.ether_spec.h_dest,
+ &fs->m_u.ether_spec.h_dest,
+ sizeof(fs->h_u.ether_spec.h_dest));
+ bcmgenet_hfb_insert_data(f_data, ETH_ALEN,
+ &fs->h_u.ether_spec.h_source,
+ &fs->m_u.ether_spec.h_source,
+ sizeof(fs->h_u.ether_spec.h_source));
+ bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ &fs->h_u.ether_spec.h_proto,
+ &fs->m_u.ether_spec.h_proto,
+ sizeof(fs->h_u.ether_spec.h_proto));
+ break;
+ case IP_USER_FLOW:
+ f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
+ /* Specify IP Ether Type */
+ val_16 = htons(ETH_P_IP);
+ mask_16 = 0xFFFF;
+ bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmgenet_hfb_insert_data(f_data, 15 + offset,
+ &fs->h_u.usr_ip4_spec.tos,
+ &fs->m_u.usr_ip4_spec.tos,
+ sizeof(fs->h_u.usr_ip4_spec.tos));
+ bcmgenet_hfb_insert_data(f_data, 23 + offset,
+ &fs->h_u.usr_ip4_spec.proto,
+ &fs->m_u.usr_ip4_spec.proto,
+ sizeof(fs->h_u.usr_ip4_spec.proto));
+ bcmgenet_hfb_insert_data(f_data, 26 + offset,
+ &fs->h_u.usr_ip4_spec.ip4src,
+ &fs->m_u.usr_ip4_spec.ip4src,
+ sizeof(fs->h_u.usr_ip4_spec.ip4src));
+ bcmgenet_hfb_insert_data(f_data, 30 + offset,
+ &fs->h_u.usr_ip4_spec.ip4dst,
+ &fs->m_u.usr_ip4_spec.ip4dst,
+ sizeof(fs->h_u.usr_ip4_spec.ip4dst));
+ if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
+ break;
+
+ /* Only supports 20 byte IPv4 header */
+ val_8 = 0x45;
+ mask_8 = 0xFF;
+ bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset,
+ &val_8, &mask_8,
+ sizeof(val_8));
+ size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
+ bcmgenet_hfb_insert_data(f_data,
+ ETH_HLEN + 20 + offset,
+ &fs->h_u.usr_ip4_spec.l4_4_bytes,
+ &fs->m_u.usr_ip4_spec.l4_4_bytes,
+ size);
+ f_length += DIV_ROUND_UP(size, 2);
+ break;
+ }
+
+ if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
+ /* Ring 0 flows can be handled by the default Descriptor Ring
+ * We'll map them to ring 0, but don't enable the filter
+ */
+ bcmgenet_hfb_set_filter(priv, f_data, f_length, 0,
+ fs->location);
+ rule->state = BCMGENET_RXNFC_STATE_DISABLED;
+ } else {
+ /* Other Rx rings are direct mapped here */
+ bcmgenet_hfb_set_filter(priv, f_data, f_length,
+ fs->ring_cookie, fs->location);
+ bcmgenet_hfb_enable_filter(priv, fs->location);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
+ }
+
+ kfree(f_data);
+
+ return err;
+}
+
+/* bcmgenet_hfb_add_filter
+ *
+ * Add new filter to Hardware Filter Block to match and direct Rx traffic to
+ * desired Rx queue.
+ *
+ * f_data is an array of unsigned 32-bit integers where each 32-bit integer
+ * provides filter data for 2 bytes (4 nibbles) of Rx frame:
+ *
+ * bits 31:20 - unused
+ * bit 19 - nibble 0 match enable
+ * bit 18 - nibble 1 match enable
+ * bit 17 - nibble 2 match enable
+ * bit 16 - nibble 3 match enable
+ * bits 15:12 - nibble 0 data
+ * bits 11:8 - nibble 1 data
+ * bits 7:4 - nibble 2 data
+ * bits 3:0 - nibble 3 data
+ *
+ * Example:
+ * In order to match:
+ * - Ethernet frame type = 0x0800 (IP)
+ * - IP version field = 4
+ * - IP protocol field = 0x11 (UDP)
+ *
+ * The following filter is needed:
+ * u32 hfb_filter_ipv4_udp[] = {
+ * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
+ * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
+ * };
+ *
+ * To add the filter to HFB and direct the traffic to Rx queue 0, call:
+ * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
+ * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
+ */
+int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
+ u32 f_length, u32 rx_queue)
+{
+ int f_index;
+
+ f_index = bcmgenet_hfb_find_unused_filter(priv);
+ if (f_index < 0)
+ return -ENOMEM;
+
+ if (f_length > priv->hw_params->hfb_filter_size)
+ return -EINVAL;
+
+ bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
+ bcmgenet_hfb_enable_filter(priv, f_index);
+
+ return 0;
+}
+
+/* bcmgenet_hfb_clear
+ *
+ * Clear Hardware Filter Block and disable all filtering.
+ */
+static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
+{
+ u32 i;
+
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
+
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0x0, i);
+
+ for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
+ bcmgenet_hfb_reg_writel(priv, 0x0,
+ HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+
+ for (i = 0; i < priv->hw_params->hfb_filter_cnt *
+ priv->hw_params->hfb_filter_size; i++)
+ bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
+}
+
+static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
+ INIT_LIST_HEAD(&priv->rxnfc_list);
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
+ priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
+ }
+
+ bcmgenet_hfb_clear(priv);
+}
+
static int bcmgenet_begin(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -1045,6 +1421,229 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phy_ethtool_set_eee(dev->phydev, e);
}
+static int bcmgenet_validate_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_usrip4_spec *l4_mask;
+ struct ethhdr *eth_mask;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
+ netdev_err(dev, "rxnfc: Invalid location (%d)\n",
+ cmd->fs.location);
+ return -EINVAL;
+ }
+
+ switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case IP_USER_FLOW:
+ l4_mask = &cmd->fs.m_u.usr_ip4_spec;
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(l4_mask->ip4src) ||
+ VALIDATE_MASK(l4_mask->ip4dst) ||
+ VALIDATE_MASK(l4_mask->l4_4_bytes) ||
+ VALIDATE_MASK(l4_mask->proto) ||
+ VALIDATE_MASK(l4_mask->ip_ver) ||
+ VALIDATE_MASK(l4_mask->tos)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ break;
+ case ETHER_FLOW:
+ eth_mask = &cmd->fs.m_u.ether_spec;
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(eth_mask->h_source) ||
+ VALIDATE_MASK(eth_mask->h_source) ||
+ VALIDATE_MASK(eth_mask->h_proto)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
+ cmd->fs.flow_type);
+ return -EINVAL;
+ }
+
+ if ((cmd->fs.flow_type & FLOW_EXT)) {
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
+ VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
+ netdev_err(dev, "rxnfc: user-def not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int bcmgenet_insert_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *loc_rule;
+ int err;
+
+ if (priv->hw_params->hfb_filter_size < 128) {
+ netdev_err(dev, "rxnfc: Not supported by this device\n");
+ return -EINVAL;
+ }
+
+ if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+
+ err = bcmgenet_validate_flow(dev, cmd);
+ if (err)
+ return err;
+
+ loc_rule = &priv->rxnfc_rules[cmd->fs.location];
+ if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ list_del(&loc_rule->list);
+ loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
+ memcpy(&loc_rule->fs, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+ err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
+ if (err) {
+ netdev_err(dev, "rxnfc: Could not install rule (%d)\n",
+ err);
+ return err;
+ }
+
+ list_add_tail(&loc_rule->list, &priv->rxnfc_list);
+
+ return 0;
+}
+
+static int bcmgenet_delete_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ int err = 0;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->rxnfc_rules[cmd->fs.location];
+ if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ list_del(&rule->list);
+ rule->state = BCMGENET_RXNFC_STATE_UNUSED;
+ memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
+
+out:
+ return err;
+}
+
+static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = bcmgenet_insert_flow(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = bcmgenet_delete_flow(dev, cmd);
+ break;
+ default:
+ netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
+ cmd->cmd);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ int loc)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ int err = 0;
+
+ if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->rxnfc_rules[loc];
+ if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
+ err = -ENOENT;
+ else
+ memcpy(&cmd->fs, &rule->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+ return err;
+}
+
+static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
+{
+ struct list_head *pos;
+ int res = 0;
+
+ list_for_each(pos, &priv->rxnfc_list)
+ res++;
+
+ return res;
+}
+
+static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ int err = 0;
+ int i = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->hw_params->rx_queues ?: 1;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bcmgenet_get_num_flows(priv);
+ cmd->data = MAX_NUM_OF_FS_RULES;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (i < cmd->rule_cnt)
+ rule_locs[i++] = rule->fs.location;
+ cmd->rule_cnt = i;
+ cmd->data = MAX_NUM_OF_FS_RULES;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
@@ -1069,6 +1668,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_link_ksettings = bcmgenet_get_link_ksettings,
.set_link_ksettings = bcmgenet_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_rxnfc = bcmgenet_get_rxnfc,
+ .set_rxnfc = bcmgenet_set_rxnfc,
};
/* Power down the unimac, based on mode. */
@@ -2669,10 +3270,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
{
- struct bcmgenet_priv *priv = dev_id;
-
- pm_wakeup_event(&priv->pdev->dev, 0);
-
+ /* Acknowledge the interrupt */
return IRQ_HANDLED;
}
@@ -2710,9 +3308,8 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
unsigned char *addr)
{
- bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
- (addr[2] << 8) | addr[3], UMAC_MAC0);
- bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+ bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
+ bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
}
static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
@@ -2721,13 +3318,9 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
u32 addr_tmp;
addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
- addr[0] = addr_tmp >> 24;
- addr[1] = (addr_tmp >> 16) & 0xff;
- addr[2] = (addr_tmp >> 8) & 0xff;
- addr[3] = addr_tmp & 0xff;
+ put_unaligned_be32(addr_tmp, &addr[0]);
addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
- addr[4] = (addr_tmp >> 8) & 0xff;
- addr[5] = addr_tmp & 0xff;
+ put_unaligned_be16(addr_tmp, &addr[4]);
}
/* Returns a reusable dma control register value */
@@ -2766,43 +3359,12 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
-/* bcmgenet_hfb_clear
- *
- * Clear Hardware Filter Block and disable all filtering.
- */
-static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
-{
- u32 i;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
-
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
-
- for (i = 0; i < priv->hw_params->hfb_filter_cnt *
- priv->hw_params->hfb_filter_size; i++)
- bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
-}
-
-static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
-{
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_clear(priv);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ bcmgenet_set_rx_mode(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3421,8 +3983,6 @@ MODULE_DEVICE_TABLE(of, bcmgenet_match);
static int bcmgenet_probe(struct platform_device *pdev)
{
struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
- struct device_node *dn = pdev->dev.of_node;
- const struct of_device_id *of_id = NULL;
const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
@@ -3437,12 +3997,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
return -ENOMEM;
}
- if (dn) {
- of_id = of_match_node(bcmgenet_match, dn);
- if (!of_id)
- return -EINVAL;
- }
-
priv = netdev_priv(dev);
priv->irq0 = platform_get_irq(pdev, 0);
if (priv->irq0 < 0) {
@@ -3504,13 +4058,16 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
}
- priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+ priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk)) {
dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
- priv->clk = NULL;
+ err = PTR_ERR(priv->clk);
+ goto err;
}
- clk_prepare_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ goto err;
bcmgenet_set_hw_params(priv);
@@ -3528,16 +4085,18 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->rx_buf_len = RX_BUF_LENGTH;
INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
- priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
+ priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
if (IS_ERR(priv->clk_wol)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
- priv->clk_wol = NULL;
+ err = PTR_ERR(priv->clk_wol);
+ goto err;
}
- priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
+ priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
- priv->clk_eee = NULL;
+ err = PTR_ERR(priv->clk_eee);
+ goto err;
}
/* If this is an internal GPHY, power it on now, before UniMAC is
@@ -3546,7 +4105,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
- if ((pd) && (!IS_ERR_OR_NULL(pd->mac_address)))
+ if (pd && !IS_ERR_OR_NULL(pd->mac_address))
ether_addr_copy(dev->dev_addr, pd->mac_address);
else
if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
@@ -3612,11 +4171,10 @@ static void bcmgenet_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int bcmgenet_resume(struct device *d)
+static int bcmgenet_resume_noirq(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
u32 reg;
@@ -3628,6 +4186,38 @@ static int bcmgenet_resume(struct device *d)
if (ret)
return ret;
+ if (device_may_wakeup(d) && priv->wolopts) {
+ /* Account for Wake-on-LAN events and clear those events
+ * (Some devices need more time between enabling the clocks
+ * and the interrupt register reflecting the wake event so
+ * read the register twice)
+ */
+ reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
+ reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
+ if (reg & UMAC_IRQ_WAKE_EVENT)
+ pm_wakeup_event(&priv->pdev->dev, 0);
+ }
+
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+
+ return 0;
+}
+
+static int bcmgenet_resume(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long dma_ctrl;
+ u32 offset, reg;
+ int ret;
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (device_may_wakeup(d) && priv->wolopts)
+ bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+
/* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
@@ -3638,10 +4228,6 @@ static int bcmgenet_resume(struct device *d)
init_umac(priv);
- /* From WOL-enabled suspend, switch to regular clock */
- if (priv->wolopts)
- clk_disable_unprepare(priv->clk_wol);
-
phy_init_hw(dev->phydev);
/* Speed settings must be restored */
@@ -3653,15 +4239,17 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset);
+ bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32));
+ bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL);
+
if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
- if (priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@@ -3698,7 +4286,7 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret = 0;
+ u32 offset;
if (!netif_running(dev))
return 0;
@@ -3710,25 +4298,53 @@ static int bcmgenet_suspend(struct device *d)
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
+ /* Preserve filter state and disable filtering */
+ priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset);
+ priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_suspend_noirq(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
/* Prepare the device for Wake-on-LAN and switch to the slow clock */
- if (device_may_wakeup(d) && priv->wolopts) {
+ if (device_may_wakeup(d) && priv->wolopts)
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
- clk_prepare_enable(priv->clk_wol);
- } else if (priv->internal_phy) {
+ else if (priv->internal_phy)
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- }
+
+ /* Let the framework handle resumption and leave the clocks on */
+ if (ret)
+ return ret;
/* Turn off the clocks */
clk_disable_unprepare(priv->clk);
- if (ret)
- bcmgenet_resume(d);
-
- return ret;
+ return 0;
}
+#else
+#define bcmgenet_suspend NULL
+#define bcmgenet_suspend_noirq NULL
+#define bcmgenet_resume NULL
+#define bcmgenet_resume_noirq NULL
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
+static const struct dev_pm_ops bcmgenet_pm_ops = {
+ .suspend = bcmgenet_suspend,
+ .suspend_noirq = bcmgenet_suspend_noirq,
+ .resume = bcmgenet_resume,
+ .resume_noirq = bcmgenet_resume_noirq,
+};
static const struct acpi_device_id genet_acpi_match[] = {
{ "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
@@ -3744,7 +4360,7 @@ static struct platform_driver bcmgenet_driver = {
.name = "bcmgenet",
.of_match_table = bcmgenet_match,
.pm = &bcmgenet_pm_ops,
- .acpi_match_table = ACPI_PTR(genet_acpi_match),
+ .acpi_match_table = genet_acpi_match,
},
};
module_platform_driver(bcmgenet_driver);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index daf8fb2c39b6..a12cb59298f4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2020 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -14,6 +14,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include <linux/dim.h>
+#include <linux/ethtool.h>
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -31,6 +32,7 @@
#define DMA_MAX_BURST_LENGTH 0x10
/* misc. configuration */
+#define MAX_NUM_OF_FS_RULES 16
#define CLEAR_ALL_HFB 0xFF
#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
#define DMA_FC_THRESH_LO 5
@@ -310,6 +312,8 @@ struct bcmgenet_mib_counters {
#define UMAC_IRQ_HFB_SM (1 << 10)
#define UMAC_IRQ_HFB_MM (1 << 11)
#define UMAC_IRQ_MPD_R (1 << 12)
+#define UMAC_IRQ_WAKE_EVENT (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \
+ UMAC_IRQ_MPD_R)
#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
@@ -608,6 +612,18 @@ struct bcmgenet_rx_ring {
struct bcmgenet_priv *priv;
};
+enum bcmgenet_rxnfc_state {
+ BCMGENET_RXNFC_STATE_UNUSED = 0,
+ BCMGENET_RXNFC_STATE_DISABLED,
+ BCMGENET_RXNFC_STATE_ENABLED
+};
+
+struct bcmgenet_rxnfc_rule {
+ struct list_head list;
+ struct ethtool_rx_flow_spec fs;
+ enum bcmgenet_rxnfc_state state;
+};
+
/* device context */
struct bcmgenet_priv {
void __iomem *base;
@@ -626,6 +642,8 @@ struct bcmgenet_priv {
struct enet_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_buf_len;
+ struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
+ struct list_head rxnfc_list;
struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
@@ -676,6 +694,9 @@ struct bcmgenet_priv {
/* WOL */
struct clk *clk_wol;
u32 wolopts;
+ u8 sopass[SOPASS_MAX];
+ bool wol_active;
+ u32 hfb_en[3];
struct bcmgenet_mib_counters mib;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index c9a43695b182..4ea6a26b04f7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2020 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -41,18 +41,13 @@
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 reg;
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+ wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
memset(wol->sopass, 0, sizeof(wol->sopass));
- if (wol->wolopts & WAKE_MAGICSECURE) {
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS);
- put_unaligned_be16(reg, &wol->sopass[0]);
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS);
- put_unaligned_be32(reg, &wol->sopass[2]);
- }
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
/* ethtool function - set WOL (Wake on LAN) settings.
@@ -62,25 +57,15 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
- u32 reg;
if (!device_can_wakeup(kdev))
return -ENOTSUPP;
- if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
+ if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER))
return -EINVAL;
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- if (wol->wolopts & WAKE_MAGICSECURE) {
- bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
- UMAC_MPD_PW_MS);
- bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
- UMAC_MPD_PW_LS);
- reg |= MPD_PW_EN;
- } else {
- reg &= ~MPD_PW_EN;
- }
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
@@ -120,12 +105,21 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
return retries;
}
+static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv)
+{
+ bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
+ UMAC_MPD_PW_MS);
+ bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
+ UMAC_MPD_PW_LS);
+}
+
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_ctrl_reg, hfb_enable = 0;
int retries = 0;
- u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, dev, "unsupported mode: %d\n", mode);
@@ -142,22 +136,48 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
mdelay(10);
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- reg |= MPD_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+ reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ reg |= MPD_EN;
+ if (priv->wolopts & WAKE_MAGICSECURE) {
+ bcmgenet_set_mpd_password(priv);
+ reg |= MPD_PW_EN;
+ }
+ bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ }
+
+ hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (priv->wolopts & WAKE_FILTER) {
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
+ hfb_enable |= (1 << rule->fs.location);
+ reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
if (retries < 0) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
+ reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
return retries;
}
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
+ clk_prepare_enable(priv->clk_wol);
+ priv->wol_active = 1;
+
+ if (hfb_enable) {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
+ }
+
/* Enable CRC forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
@@ -173,6 +193,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
+ reg = UMAC_IRQ_MPD_R;
+ if (hfb_enable)
+ reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
+
+ bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
+
return 0;
}
@@ -186,12 +212,22 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
return;
}
+ if (!priv->wol_active)
+ return; /* failed to suspend so skip the rest */
+
+ priv->wol_active = 0;
+ clk_disable_unprepare(priv->clk_wol);
+
+ /* Disable Magic Packet Detection */
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- if (!(reg & MPD_EN))
- return; /* already powered up so skip the rest */
- reg &= ~MPD_EN;
+ reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ /* Disable WAKE_FILTER Detection */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+
/* Disable CRC Forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ff98a82b7bc4..7a3b22b35238 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10797,17 +10797,15 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
#ifdef CONFIG_TIGON3_HWMON
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
+ u32 off, len = TG3_OCIR_LEN;
int i;
- for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
- u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
-
+ for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
- off += len;
if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
!(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
- memset(ocir, 0, TG3_OCIR_LEN);
+ memset(ocir, 0, len);
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index d7e805749a5b..e40c64b79f66 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -782,7 +782,6 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
if ((work_done < budget && tx_done) ||
(iq && iq->pkt_in_done >= MAX_REG_CNT) ||
(droq->pkt_count >= MAX_REG_CNT)) {
- tx_done = 1;
napi_complete_done(napi, work_done);
octeon_enable_irq(droq->oct_dev, droq->q_no);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 3d01d3602d8f..fb380b4f3e02 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -712,18 +712,6 @@ struct octeon_device *lio_get_device(u32 octeon_id);
*/
int lio_get_device_id(void *dev);
-static inline u16 OCTEON_MAJOR_REV(struct octeon_device *oct)
-{
- u16 rev = (oct->rev_id & 0xC) >> 2;
-
- return (rev == 0) ? 1 : rev;
-}
-
-static inline u16 OCTEON_MINOR_REV(struct octeon_device *oct)
-{
- return oct->rev_id & 0x3;
-}
-
/** Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 9d868403d86c..cbaa1924afbe 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -234,6 +234,11 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
/* Put it in the ring. */
p->rx_ring[p->rx_next_fill] = re.d64;
+ /* Make sure there is no reorder of filling the ring and ringing
+ * the bell
+ */
+ wmb();
+
dma_sync_single_for_device(p->dev, p->rx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index b4b33368698f..2ba0ce115e63 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -552,6 +552,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 9909bfda167e..82cdfa51ce37 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -26,7 +26,7 @@ config CHELSIO_T1
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
performance tuning is in
- <file:Documentation/networking/device_drivers/chelsio/cxgb.txt>.
+ <file:Documentation/networking/device_drivers/chelsio/cxgb.rst>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index e46a14f44a6f..cf69c6edcfec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -60,6 +60,7 @@
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
extern struct list_head adapter_list;
+extern struct list_head uld_list;
extern struct mutex uld_mutex;
/* Suspend an Ethernet Tx queue with fewer available descriptors than this.
@@ -466,8 +467,6 @@ static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
return &((struct mbox_cmd *)&(log)[1])[entry_idx];
}
-#include "t4fw_api.h"
-
#define FW_VERSION(chip) ( \
FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
@@ -824,6 +823,13 @@ struct sge_uld_txq_info {
u16 ntxq; /* # of egress uld queues */
};
+/* struct to maintain ULD list to reallocate ULD resources on hotplug */
+struct cxgb4_uld_list {
+ struct cxgb4_uld_info uld_info;
+ struct list_head list_node;
+ enum cxgb4_uld uld_type;
+};
+
enum sge_eosw_state {
CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
@@ -1093,6 +1099,7 @@ struct adapter {
/* TC u32 offload */
struct cxgb4_tc_u32_table *tc_u32;
+ struct chcr_ktls chcr_ktls;
struct chcr_stats_debug chcr_stats;
/* TC flower offload */
@@ -1127,19 +1134,20 @@ struct adapter {
* programmed with various parameters.
*/
struct ch_sched_params {
- s8 type; /* packet or flow */
+ u8 type; /* packet or flow */
union {
struct {
- s8 level; /* scheduler hierarchy level */
- s8 mode; /* per-class or per-flow */
- s8 rateunit; /* bit or packet rate */
- s8 ratemode; /* %port relative or kbps absolute */
- s8 channel; /* scheduler channel [0..N] */
- s8 class; /* scheduler class [0..N] */
- s32 minrate; /* minimum rate */
- s32 maxrate; /* maximum rate */
- s16 weight; /* percent weight */
- s16 pktsize; /* average packet size */
+ u8 level; /* scheduler hierarchy level */
+ u8 mode; /* per-class or per-flow */
+ u8 rateunit; /* bit or packet rate */
+ u8 ratemode; /* %port relative or kbps absolute */
+ u8 channel; /* scheduler channel [0..N] */
+ u8 class; /* scheduler class [0..N] */
+ u32 minrate; /* minimum rate */
+ u32 maxrate; /* maximum rate */
+ u16 weight; /* percent weight */
+ u16 pktsize; /* average packet size */
+ u16 burstsize; /* burst buffer size */
} params;
} u;
};
@@ -1954,9 +1962,10 @@ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
enum ctxt_type ctype, u32 *data);
int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
enum ctxt_type ctype, u32 *data);
-int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
- int rateunit, int ratemode, int channel, int class,
- int minrate, int maxrate, int weight, int pktsize);
+int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
+ u8 rateunit, u8 ratemode, u8 channel, u8 class,
+ u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
+ u16 burstsize);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
void t4_idma_monitor_init(struct adapter *adapter,
struct sge_idma_monitor_state *idma);
@@ -2052,4 +2061,7 @@ int cxgb_open(struct net_device *dev);
int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+int cxgb4_set_ktls_feature(struct adapter *adap, bool enable);
+#endif
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index ebed99f3d4cf..41315712deb8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -49,6 +49,7 @@
#include "cudbg_lib_common.h"
#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#include "cxgb4_tc_mqprio.h"
/* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
@@ -1812,12 +1813,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
/* Inner header lookup */
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
seq_printf(seq,
- "%3u %02x:%02x:%02x:%02x:%02x:%02x "
- "%012llx %06x %06x - - %3c"
- " 'I' %4x "
- "%3c %#x%4u%4d", idx, addr[0],
- addr[1], addr[2], addr[3],
- addr[4], addr[5],
+ "%3u %pM %012llx %06x %06x - - %3c 'I' %4x %3c %#x%4u%4d",
+ idx, addr,
(unsigned long long)mask,
vniy, (vnix | vniy),
dip_hit ? 'Y' : 'N',
@@ -1829,10 +1826,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
T6_VF_G(cls_lo) : -1);
} else {
seq_printf(seq,
- "%3u %02x:%02x:%02x:%02x:%02x:%02x "
- "%012llx - - ",
- idx, addr[0], addr[1], addr[2],
- addr[3], addr[4], addr[5],
+ "%3u %pM %012llx - - ",
+ idx, addr,
(unsigned long long)mask);
if (vlan_vld)
@@ -1850,10 +1845,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
T6_VF_G(cls_lo) : -1);
}
} else
- seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
- "%012llx%3c %#x%4u%4d",
- idx, addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5], (unsigned long long)mask,
+ seq_printf(seq, "%3u %pM %012llx%3c %#x%4u%4d",
+ idx, addr, (unsigned long long)mask,
(cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
PORTMAP_G(cls_hi),
PF_G(cls_lo),
@@ -2657,32 +2650,19 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
- int eth_entries, ctrl_entries, eo_entries = 0;
+ int eth_entries, ctrl_entries, eohw_entries = 0, eosw_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
const struct sge_uld_txq_info *utxq_info;
const struct sge_uld_rxq_info *urxq_info;
+ struct cxgb4_tc_port_mqprio *port_mqprio;
struct adapter *adap = seq->private;
- int i, n, r = (uintptr_t)v - 1;
+ int i, j, n, r = (uintptr_t)v - 1;
struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
- if (adap->sge.eohw_txq)
- eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
-
- mutex_lock(&uld_mutex);
- if (s->uld_txq_info)
- for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
- uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
-
- if (s->uld_rxq_info) {
- for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
- uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
- uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
- }
- }
if (r)
seq_putc(seq, '\n');
@@ -2759,11 +2739,21 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- goto unlock;
+ goto out;
}
r -= eth_entries;
- if (r < eo_entries) {
+ if (!adap->tc_mqprio)
+ goto skip_mqprio;
+
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+ if (!refcount_read(&adap->tc_mqprio->refcnt)) {
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto skip_mqprio;
+ }
+
+ eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
+ if (r < eohw_entries) {
int base_qset = r * 4;
const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
@@ -2808,10 +2798,71 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- goto unlock;
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto out;
+ }
+
+ r -= eohw_entries;
+ for (j = 0; j < adap->params.nports; j++) {
+ int entries;
+ u8 tc;
+
+ port_mqprio = &adap->tc_mqprio->port_mqprio[j];
+ entries = 0;
+ for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
+ entries += port_mqprio->mqprio.qopt.count[tc];
+
+ if (!entries)
+ continue;
+
+ eosw_entries = DIV_ROUND_UP(entries, 4);
+ if (r < eosw_entries) {
+ const struct sge_eosw_txq *tx;
+
+ n = min(4, entries - 4 * r);
+ tx = &port_mqprio->eosw_txq[4 * r];
+
+ S("QType:", "EOSW-TXQ");
+ S("Interface:",
+ adap->port[j] ? adap->port[j]->name : "N/A");
+ T("EOTID:", hwtid);
+ T("HWQID:", hwqid);
+ T("State:", state);
+ T("Size:", ndesc);
+ T("In-Use:", inuse);
+ T("Credits:", cred);
+ T("Compl:", ncompl);
+ T("Last-Compl:", last_compl);
+ T("PIDX:", pidx);
+ T("Last-PIDX:", last_pidx);
+ T("CIDX:", cidx);
+ T("Last-CIDX:", last_cidx);
+ T("FLOWC-IDX:", flowc_idx);
+
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto out;
+ }
+
+ r -= eosw_entries;
+ }
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+
+skip_mqprio:
+ if (!is_uld(adap))
+ goto skip_uld;
+
+ mutex_lock(&uld_mutex);
+ if (s->uld_txq_info)
+ for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
+ uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
+
+ if (s->uld_rxq_info) {
+ for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
+ uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
+ uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
+ }
}
- r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx;
@@ -2994,6 +3045,9 @@ do { \
}
r -= uld_txq_entries[CXGB4_TX_CRYPTO];
+ mutex_unlock(&uld_mutex);
+
+skip_uld:
if (r < ctrl_entries) {
const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
@@ -3008,7 +3062,7 @@ do { \
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
- goto unlock;
+ goto out;
}
r -= ctrl_entries;
@@ -3026,11 +3080,9 @@ do { \
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
s->counter_val[evtq->pktcnt_idx]);
- goto unlock;
+ goto out;
}
-unlock:
- mutex_unlock(&uld_mutex);
#undef R
#undef RL
#undef T
@@ -3039,13 +3091,38 @@ unlock:
#undef R3
#undef T3
#undef S3
+out:
+ return 0;
+
+unlock:
+ mutex_unlock(&uld_mutex);
return 0;
}
static int sge_queue_entries(const struct adapter *adap)
{
- int tot_uld_entries = 0;
- int i;
+ int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0;
+
+ if (adap->tc_mqprio) {
+ struct cxgb4_tc_port_mqprio *port_mqprio;
+ u8 tc;
+
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+ if (adap->sge.eohw_txq)
+ eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
+
+ for (i = 0; i < adap->params.nports; i++) {
+ u32 entries = 0;
+
+ port_mqprio = &adap->tc_mqprio->port_mqprio[i];
+ for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
+ entries += port_mqprio->mqprio.qopt.count[tc];
+
+ if (entries)
+ eosw_entries += DIV_ROUND_UP(entries, 4);
+ }
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ }
if (!is_uld(adap))
goto lld_only;
@@ -3062,8 +3139,7 @@ static int sge_queue_entries(const struct adapter *adap)
lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
- tot_uld_entries +
+ eohw_entries + eosw_entries + tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -3244,6 +3320,10 @@ static int tid_info_show(struct seq_file *seq, void *v)
if (t->nhpftids)
seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base,
t->hpftid_base + t->nhpftids - 1);
+ if (t->neotids)
+ seq_printf(seq, "EOTID range: %u..%u, in use: %u\n",
+ t->eotid_base, t->eotid_base + t->neotids - 1,
+ atomic_read(&t->eotids_in_use));
if (t->ntids)
seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
@@ -3411,6 +3491,8 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.tls_key));
#ifdef CONFIG_CHELSIO_TLS_DEVICE
seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
+ seq_printf(seq, "Tx TLS offload refcount: %20u\n",
+ refcount_read(&adap->chcr_ktls.ktls_refcount));
seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_ctx));
seq_printf(seq, "Tx connection created: %20llu\n",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a70018f067aa..854b1717a70d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -66,6 +66,9 @@
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
#include <net/xfrm.h>
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+#include <net/tls.h>
+#endif
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -180,6 +183,7 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
+LIST_HEAD(uld_list);
static int cfg_queues(struct adapter *adap);
@@ -1579,6 +1583,7 @@ static int tid_init(struct tid_info *t)
atomic_set(&t->tids_in_use, 0);
atomic_set(&t->conns_in_use, 0);
atomic_set(&t->hash_tids_in_use, 0);
+ atomic_set(&t->eotids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
@@ -3021,7 +3026,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
SCHED_CLASS_RATEUNIT_BITS,
SCHED_CLASS_RATEMODE_ABS,
pi->tx_chan, class_id, 0,
- max_tx_rate * 1000, 0, pktsize);
+ max_tx_rate * 1000, 0, pktsize, 0);
if (ret) {
dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
ret);
@@ -6062,6 +6067,79 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
}
#endif /* CONFIG_PCI_IOV */
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+
+static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 tcp_sn)
+{
+ struct adapter *adap = netdev2adap(netdev);
+ int ret = 0;
+
+ mutex_lock(&uld_mutex);
+ if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
+ dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
+ if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
+ dev_err(adap->pdev_dev,
+ "chcr driver has no registered tlsdev_ops()\n");
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
+ ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
+ if (ret)
+ goto out_unlock;
+
+ ret = adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_add(netdev, sk,
+ direction,
+ crypto_info,
+ tcp_sn);
+ /* if there is a failure, clear the refcount */
+ if (ret)
+ cxgb4_set_ktls_feature(adap,
+ FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+out_unlock:
+ mutex_unlock(&uld_mutex);
+ return ret;
+}
+
+static void cxgb4_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct adapter *adap = netdev2adap(netdev);
+
+ mutex_lock(&uld_mutex);
+ if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
+ dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
+ goto out_unlock;
+ }
+
+ if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
+ dev_err(adap->pdev_dev,
+ "chcr driver has no registered tlsdev_ops\n");
+ goto out_unlock;
+ }
+
+ adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+ direction);
+ cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static const struct tlsdev_ops cxgb4_ktls_ops = {
+ .tls_dev_add = cxgb4_ktls_dev_add,
+ .tls_dev_del = cxgb4_ktls_dev_del,
+};
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
@@ -6311,7 +6389,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
-
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+ if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = &cxgb4_ktls_ops;
+ /* initialize the refcount */
+ refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
+ }
+#endif
netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 81 - 9600 */
@@ -6518,11 +6603,8 @@ fw_attach_fail:
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
- if (is_uld(adapter)) {
- mutex_lock(&uld_mutex);
- list_add_tail(&adapter->list_node, &adapter_list);
- mutex_unlock(&uld_mutex);
- }
+ if (is_uld(adapter))
+ cxgb4_uld_enable(adapter);
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index e6af4906d674..ae7123a9de8e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -342,6 +342,13 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
+ /* Request larger burst buffer for smaller MTU, so
+ * that hardware can work on more data per burst
+ * cycle.
+ */
+ if (dev->mtu <= ETH_DATA_LEN)
+ p.u.params.burstsize = 8 * dev->mtu;
+
e = cxgb4_sched_class_alloc(dev, &p);
if (!e) {
ret = -ENOMEM;
@@ -567,6 +574,7 @@ static void cxgb4_mqprio_disable_offload(struct net_device *dev)
int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
+ struct adapter *adap = netdev2adap(dev);
bool needs_bring_up = false;
int ret;
@@ -574,6 +582,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
if (ret)
return ret;
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+
/* To configure tc params, the current allocated EOTIDs must
* be freed up. However, they can't be freed up if there's
* traffic running on the interface. So, ensure interface is
@@ -609,6 +619,7 @@ out:
if (needs_bring_up)
cxgb_open(dev);
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
return ret;
}
@@ -621,6 +632,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap)
if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
return;
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
for_each_port(adap, i) {
dev = adap->port[i];
if (!dev)
@@ -632,6 +644,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap)
cxgb4_mqprio_disable_offload(dev);
}
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
}
int cxgb4_init_tc_mqprio(struct adapter *adap)
@@ -653,6 +666,8 @@ int cxgb4_init_tc_mqprio(struct adapter *adap)
goto out_free_mqprio;
}
+ mutex_init(&tc_mqprio->mqprio_mutex);
+
tc_mqprio->port_mqprio = tc_port_mqprio;
for (i = 0; i < adap->params.nports; i++) {
port_mqprio = &tc_mqprio->port_mqprio[i];
@@ -687,6 +702,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
u8 i;
if (adap->tc_mqprio) {
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
if (adap->tc_mqprio->port_mqprio) {
for (i = 0; i < adap->params.nports; i++) {
struct net_device *dev = adap->port[i];
@@ -698,6 +714,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
}
kfree(adap->tc_mqprio->port_mqprio);
}
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
kfree(adap->tc_mqprio);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index ff8794132b22..be96f1dc0372 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -33,6 +33,7 @@ struct cxgb4_tc_port_mqprio {
struct cxgb4_tc_mqprio {
refcount_t refcnt; /* Refcount for adapter-wide resources */
+ struct mutex mqprio_mutex; /* Lock for accessing MQPRIO info */
struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index e65b52375dd8..0307e9c69a47 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -174,13 +174,14 @@ static int
setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int i, ret = 0;
+ int i, ret;
- ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
+ ret = alloc_uld_rxqs(adap, rxq_info, lro);
+ if (ret)
+ return ret;
/* Tell uP to route control queue completions to rdma rspq */
- if (adap->flags & CXGB4_FULL_INIT_DONE &&
- !ret && uld_type == CXGB4_ULD_RDMA) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
unsigned int cmplqid;
u32 param, cmdop;
@@ -662,25 +663,129 @@ static int uld_attach(struct adapter *adap, unsigned int uld)
return 0;
}
+static bool cxgb4_uld_in_use(struct adapter *adap)
+{
+ const struct tid_info *t = &adap->tids;
+
+ return (atomic_read(&t->conns_in_use) || t->stids_in_use);
+}
+
#ifdef CONFIG_CHELSIO_TLS_DEVICE
/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
* @adap: adapter info
* @enable: 1 to enable / 0 to disable ktls settings.
*/
-static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
+int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
{
- u32 params = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_TX_HW) |
- FW_PARAMS_PARAM_Y_V(enable));
int ret = 0;
+ u32 params =
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) |
+ FW_PARAMS_PARAM_Y_V(enable) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
+
+ if (enable) {
+ if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) {
+ /* At this moment if ULD connection are up means, other
+ * ULD is/are already active, return failure.
+ */
+ if (cxgb4_uld_in_use(adap)) {
+ dev_warn(adap->pdev_dev,
+ "ULD connections (tid/stid) active. Can't enable kTLS\n");
+ return -EINVAL;
+ }
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &params, &params);
+ if (ret)
+ return ret;
+ refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
+ pr_info("kTLS has been enabled. Restrictions placed on ULD support\n");
+ } else {
+ /* ktls settings already up, just increment refcount. */
+ refcount_inc(&adap->chcr_ktls.ktls_refcount);
+ }
+ } else {
+ /* return failure if refcount is already 0. */
+ if (!refcount_read(&adap->chcr_ktls.ktls_refcount))
+ return -EINVAL;
+ /* decrement refcount and test, if 0, disable ktls feature,
+ * else return command success.
+ */
+ if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) {
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &params, &params);
+ if (ret)
+ return ret;
+ pr_info("kTLS is disabled. Restrictions on ULD support removed\n");
+ }
+ }
- ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &params, &params);
- /* if fw returns failure, clear the ktls flag */
- if (ret)
- adap->params.crypto &= ~ULP_CRYPTO_KTLS_INLINE;
+ return ret;
}
#endif
+static void cxgb4_uld_alloc_resources(struct adapter *adap,
+ enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
+{
+ int ret = 0;
+
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ return;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ return;
+ ret = cfg_queues_uld(adap, type, p);
+ if (ret)
+ goto out;
+ ret = setup_sge_queues_uld(adap, type, p->lro);
+ if (ret)
+ goto free_queues;
+ if (adap->flags & CXGB4_USING_MSIX) {
+ ret = request_msix_queue_irqs_uld(adap, type);
+ if (ret)
+ goto free_rxq;
+ }
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ enable_rx_uld(adap, type);
+ if (adap->uld[type].add)
+ goto free_irq;
+ ret = setup_sge_txq_uld(adap, type, p);
+ if (ret)
+ goto free_irq;
+ adap->uld[type] = *p;
+ ret = uld_attach(adap, type);
+ if (ret)
+ goto free_txq;
+ return;
+free_txq:
+ release_sge_txq_uld(adap, type);
+free_irq:
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & CXGB4_USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+free_rxq:
+ free_sge_queues_uld(adap, type);
+free_queues:
+ free_queues_uld(adap, type);
+out:
+ dev_warn(adap->pdev_dev,
+ "ULD registration failed for uld type %d\n", type);
+}
+
+void cxgb4_uld_enable(struct adapter *adap)
+{
+ struct cxgb4_uld_list *uld_entry;
+
+ mutex_lock(&uld_mutex);
+ list_add_tail(&adap->list_node, &adapter_list);
+ list_for_each_entry(uld_entry, &uld_list, list_node)
+ cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
+ &uld_entry->uld_info);
+ mutex_unlock(&uld_mutex);
+}
+
/* cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
@@ -691,63 +796,23 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
void cxgb4_register_uld(enum cxgb4_uld type,
const struct cxgb4_uld_info *p)
{
+ struct cxgb4_uld_list *uld_entry;
struct adapter *adap;
- int ret = 0;
if (type >= CXGB4_ULD_MAX)
return;
+ uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL);
+ if (!uld_entry)
+ return;
+
+ memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info));
mutex_lock(&uld_mutex);
- list_for_each_entry(adap, &adapter_list, list_node) {
- if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
- (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
- continue;
- if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
- continue;
- ret = cfg_queues_uld(adap, type, p);
- if (ret)
- goto out;
- ret = setup_sge_queues_uld(adap, type, p->lro);
- if (ret)
- goto free_queues;
- if (adap->flags & CXGB4_USING_MSIX) {
- ret = request_msix_queue_irqs_uld(adap, type);
- if (ret)
- goto free_rxq;
- }
- if (adap->flags & CXGB4_FULL_INIT_DONE)
- enable_rx_uld(adap, type);
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- /* send mbox to enable ktls related settings. */
- if (type == CXGB4_ULD_CRYPTO &&
- (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
- cxgb4_set_ktls_feature(adap, 1);
-#endif
- if (adap->uld[type].add)
- goto free_irq;
- ret = setup_sge_txq_uld(adap, type, p);
- if (ret)
- goto free_irq;
- adap->uld[type] = *p;
- ret = uld_attach(adap, type);
- if (ret)
- goto free_txq;
- continue;
-free_txq:
- release_sge_txq_uld(adap, type);
-free_irq:
- if (adap->flags & CXGB4_FULL_INIT_DONE)
- quiesce_rx_uld(adap, type);
- if (adap->flags & CXGB4_USING_MSIX)
- free_msix_queue_irqs_uld(adap, type);
-free_rxq:
- free_sge_queues_uld(adap, type);
-free_queues:
- free_queues_uld(adap, type);
-out:
- dev_warn(adap->pdev_dev,
- "ULD registration failed for uld type %d\n", type);
- }
+ list_for_each_entry(adap, &adapter_list, list_node)
+ cxgb4_uld_alloc_resources(adap, type, p);
+
+ uld_entry->uld_type = type;
+ list_add_tail(&uld_entry->list_node, &uld_list);
mutex_unlock(&uld_mutex);
return;
}
@@ -761,6 +826,7 @@ EXPORT_SYMBOL(cxgb4_register_uld);
*/
int cxgb4_unregister_uld(enum cxgb4_uld type)
{
+ struct cxgb4_uld_list *uld_entry, *tmp;
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
@@ -775,13 +841,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
continue;
cxgb4_shutdown_uld_adapter(adap, type);
+ }
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- /* send mbox to disable ktls related settings. */
- if (type == CXGB4_ULD_CRYPTO &&
- (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
- cxgb4_set_ktls_feature(adap, 0);
-#endif
+ list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) {
+ if (uld_entry->uld_type == type) {
+ list_del(&uld_entry->list_node);
+ kfree(uld_entry);
+ }
}
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index be831317520a..dbce99b209d6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -147,6 +147,9 @@ struct tid_info {
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
atomic_t conns_in_use;
+ /* ETHOFLD TIDs used for rate limiting */
+ atomic_t eotids_in_use;
+
/* lock for setting/clearing filter bitmap */
spinlock_t ftid_lock;
@@ -221,12 +224,14 @@ static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
{
set_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = data;
+ atomic_inc(&t->eotids_in_use);
}
static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
{
clear_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = NULL;
+ atomic_dec(&t->eotids_in_use);
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
@@ -263,6 +268,10 @@ struct filter_ctx {
u32 tid; /* to store tid */
};
+struct chcr_ktls {
+ refcount_t ktls_refcount;
+};
+
struct ch_filter_specification;
int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
@@ -322,6 +331,7 @@ enum cxgb4_control {
CXGB4_CONTROL_DB_DROP,
};
+struct adapter;
struct pci_dev;
struct l2t_data;
struct net_device;
@@ -458,8 +468,12 @@ struct cxgb4_uld_info {
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ const struct tlsdev_ops *tlsdev_ops;
+#endif
};
+void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index cebe1412d960..fde93c50cfec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -57,7 +57,8 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
p->u.params.ratemode,
p->u.params.channel, e->idx,
p->u.params.minrate, p->u.params.maxrate,
- p->u.params.weight, p->u.params.pktsize);
+ p->u.params.weight, p->u.params.pktsize,
+ p->u.params.burstsize);
break;
default:
err = -ENOTSUPP;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6516c45864b3..1359158652b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2091,10 +2091,9 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
return flits + nsgl;
}
-static inline void *write_eo_wr(struct adapter *adap,
- struct sge_eosw_txq *eosw_txq,
- struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
- u32 hdr_len, u32 wrlen)
+static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len, u32 wrlen)
{
const struct skb_shared_info *ssi = skb_shinfo(skb);
struct cpl_tx_pkt_core *cpl;
@@ -2113,7 +2112,8 @@ static inline void *write_eo_wr(struct adapter *adap,
immd_len += hdr_len;
if (!eosw_txq->ncompl ||
- eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) {
+ (eosw_txq->last_compl + wrlen16) >=
+ (adap->params.ofldq_wr_cred / 2)) {
compl = true;
eosw_txq->ncompl++;
eosw_txq->last_compl = 0;
@@ -2153,8 +2153,8 @@ static inline void *write_eo_wr(struct adapter *adap,
return cpl;
}
-static void ethofld_hard_xmit(struct net_device *dev,
- struct sge_eosw_txq *eosw_txq)
+static int ethofld_hard_xmit(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
@@ -2167,8 +2167,8 @@ static void ethofld_hard_xmit(struct net_device *dev,
bool skip_eotx_wr = false;
struct tx_sw_desc *d;
struct sk_buff *skb;
+ int left, ret = 0;
u8 flits, ndesc;
- int left;
eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
spin_lock(&eohw_txq->lock);
@@ -2198,11 +2198,19 @@ static void ethofld_hard_xmit(struct net_device *dev,
wrlen = flits * 8;
wrlen16 = DIV_ROUND_UP(wrlen, 16);
- /* If there are no CPL credits, then wait for credits
- * to come back and retry again
+ left = txq_avail(&eohw_txq->q) - ndesc;
+
+ /* If there are no descriptors left in hardware queues or no
+ * CPL credits left in software queues, then wait for them
+ * to come back and retry again. Note that we always request
+ * for credits update via interrupt for every half credits
+ * consumed. So, the interrupt will eventually restore the
+ * credits and invoke the Tx path again.
*/
- if (unlikely(wrlen16 > eosw_txq->cred))
+ if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
+ ret = -ENOMEM;
goto out_unlock;
+ }
if (unlikely(skip_eotx_wr)) {
start = (u64 *)wr;
@@ -2231,7 +2239,8 @@ write_wr_headers:
sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
hdr_len);
if (data_len) {
- if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) {
+ ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
+ if (unlikely(ret)) {
memset(d->addr, 0, sizeof(d->addr));
eohw_txq->mapping_err++;
goto out_unlock;
@@ -2277,12 +2286,13 @@ write_wr_headers:
out_unlock:
spin_unlock(&eohw_txq->lock);
+ return ret;
}
static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
{
struct sk_buff *skb;
- int pktcount;
+ int pktcount, ret;
switch (eosw_txq->state) {
case CXGB4_EO_STATE_ACTIVE:
@@ -2307,7 +2317,9 @@ static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
continue;
}
- ethofld_hard_xmit(dev, eosw_txq);
+ ret = ethofld_hard_xmit(dev, eosw_txq);
+ if (ret)
+ break;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2a3480fc1d91..1c8068c02728 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -10361,9 +10361,10 @@ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
return ret;
}
-int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
- int rateunit, int ratemode, int channel, int class,
- int minrate, int maxrate, int weight, int pktsize)
+int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
+ u8 rateunit, u8 ratemode, u8 channel, u8 class,
+ u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
+ u16 burstsize)
{
struct fw_sched_cmd cmd;
@@ -10385,6 +10386,7 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
cmd.u.params.max = cpu_to_be32(maxrate);
cmd.u.params.weight = cpu_to_be16(weight);
cmd.u.params.pktsize = cpu_to_be16(pktsize);
+ cmd.u.params.burstsize = cpu_to_be16(burstsize);
return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
NULL, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 68fe734b9b37..0a326c054707 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1205,7 +1205,7 @@ enum fw_caps_config_crypto {
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
FW_CAPS_CONFIG_TLS_INLINE = 0x00000002,
FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004,
- FW_CAPS_CONFIG_TX_TLS_HW = 0x00000008,
+ FW_CAPS_CONFIG_TLS_HW = 0x00000008,
};
enum fw_caps_config_fcoe {
@@ -1329,7 +1329,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
- FW_PARAMS_PARAM_DEV_KTLS_TX_HW = 0x31,
+ FW_PARAMS_PARAM_DEV_KTLS_HW = 0x31,
};
/*
@@ -1412,6 +1412,12 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
};
+enum fw_params_param_dev_ktls_hw {
+ FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE = 0x00,
+ FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE = 0x01,
+ FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE = 0x01,
+};
+
enum fw_params_param_dev_phyfw {
FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00,
FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9cc3541a7e1c..cec865a97464 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2480,7 +2480,7 @@ static int setup_debugfs(struct adapter *adapter)
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
debugfs_create_file(debugfs_files[i].name,
debugfs_files[i].mode,
- adapter->debugfs_root, (void *)adapter,
+ adapter->debugfs_root, adapter,
debugfs_files[i].fops);
return 0;
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 48f3198381bc..8d845f5ee0c5 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -24,7 +24,7 @@ config CS89x0
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
- <file:Documentation/networking/device_drivers/cirrus/cs89x0.txt>.
+ <file:Documentation/networking/device_drivers/cirrus/cs89x0.rst>.
To compile this driver as a module, choose M here. The module
will be called cs89x0.
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5bff5c2be88b..8d13ea370db1 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1224,7 +1224,8 @@ map_error:
return -ENOMEM;
}
-static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t gmac_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
unsigned short m = (1 << port->txq_order) - 1;
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 8ce6888ea722..177f36f4b89d 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -114,7 +114,7 @@ config DE4X5
These include the DE425, DE434, DE435, DE450 and DE500 models. If
you have a network card of this type, say Y. More specific
information is contained in
- <file:Documentation/networking/device_drivers/dec/de4x5.txt>.
+ <file:Documentation/networking/device_drivers/dec/de4x5.rst>.
To compile this driver as a module, choose M here. The module will
be called de4x5.
@@ -138,7 +138,7 @@ config DM9102
This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
Davicom (<http://www.davicom.com.tw/>). If you have such a network
(Ethernet) card, say Y. Some information is contained in the file
- <file:Documentation/networking/device_drivers/dec/dmfe.txt>.
+ <file:Documentation/networking/device_drivers/dec/dmfe.rst>.
To compile this driver as a module, choose M here. The module will
be called dmfe.
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f16853c3c851..0ccd9994ad45 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -951,7 +951,7 @@ static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 si
static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
static int test_tp(struct net_device *dev, s32 msec);
static int EISA_signature(char *name, struct device *device);
-static int PCI_signature(char *name, struct de4x5_private *lp);
+static void PCI_signature(char *name, struct de4x5_private *lp);
static void DevicePresent(struct net_device *dev, u_long iobase);
static void enet_addr_rst(u_long aprom_addr);
static int de4x5_bad_srom(struct de4x5_private *lp);
@@ -3902,14 +3902,14 @@ EISA_signature(char *name, struct device *device)
/*
** Look for a particular board name in the PCI configuration space
*/
-static int
+static void
PCI_signature(char *name, struct de4x5_private *lp)
{
- int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
+ int i, siglen = ARRAY_SIZE(de4x5_signatures);
if (lp->chipset == DC21040) {
strcpy(name, "DE434/5");
- return status;
+ return;
} else { /* Search for a DEC name in the SROM */
int tmp = *((char *)&lp->srom + 19) * 3;
strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
@@ -3935,8 +3935,6 @@ PCI_signature(char *name, struct de4x5_private *lp)
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
lp->useSROM = true;
}
-
- return status;
}
/*
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 643090555cc7..5143722c4419 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1869,7 +1869,7 @@ Compile command:
gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
-Read Documentation/networking/device_drivers/dlink/dl2k.txt for details.
+Read Documentation/networking/device_drivers/dlink/dl2k.rst for details.
*/
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 057a508dd6e2..db98274501a0 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -776,8 +776,7 @@ static int dnet_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bp->regs = devm_ioremap_resource(&pdev->dev, res);
+ bp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(bp->regs)) {
err = PTR_ERR(bp->regs);
goto err_out_free_dev;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 32cf54f0e35b..473b337b2e3b 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1057,9 +1057,6 @@ static int ftmac100_probe(struct platform_device *pdev)
struct ftmac100 *priv;
int err;
- if (!pdev)
- return -ENODEV;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 2bd7ace0a953..bfc6bfe94d0a 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -77,6 +77,7 @@ config UCC_GETH
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
select PHYLIB
+ select FIXED_PHY
---help---
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
@@ -90,6 +91,7 @@ config GIANFAR
depends on HAS_DMA
select FSL_PQ_MDIO
select PHYLIB
+ select FIXED_PHY
select CRC32
---help---
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index 3b325733a4f8..0a54c7e0e4ae 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -3,6 +3,7 @@ menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
depends on FSL_DPAA && FSL_FMAN
select PHYLIB
+ select FIXED_PHY
select FSL_FMAN_MAC
---help---
Data Path Acceleration Architecture Ethernet driver,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 2cd1f8efdfa3..2972244e6eb0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2107,7 +2107,7 @@ workaround:
/* Workaround for DPAA_A050385 requires data start to be aligned */
start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
- if (start - new_skb->data != 0)
+ if (start - new_skb->data)
skb_reserve(new_skb, start - new_skb->data);
skb_put(new_skb, skb->len);
@@ -2914,7 +2914,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
}
/* Do this here, so we can be verbose early */
- SET_NETDEV_DEV(net_dev, dev);
+ SET_NETDEV_DEV(net_dev, dev->parent);
dev_set_drvdata(dev, net_dev);
priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index c6fb8e4021ac..feea797cde02 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -9,6 +9,16 @@ config FSL_DPAA2_ETH
The driver manages network objects discovered on the Freescale
MC bus.
+if FSL_DPAA2_ETH
+config FSL_DPAA2_ETH_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on DCB
+ help
+ Enable Priority-Based Flow Control (PFC) support for DPAA2 Ethernet
+ devices.
+endif
+
config FSL_DPAA2_PTP_CLOCK
tristate "Freescale DPAA2 PTP Clock"
depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 69184ca3b7b9..6e7f33c956bf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
new file mode 100644
index 000000000000..83dee575c2fa
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2020 NXP */
+
+#include "dpaa2-eth.h"
+
+static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!(priv->link_state.options & DPNI_LINK_OPT_PFC_PAUSE))
+ return 0;
+
+ memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
+ pfc->pfc_cap = dpaa2_eth_tc_count(priv);
+
+ return 0;
+}
+
+static inline bool is_prio_enabled(u8 pfc_en, u8 tc)
+{
+ return !!(pfc_en & (1 << tc));
+}
+
+static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
+{
+ struct dpni_congestion_notification_cfg cfg = {0};
+ int i, err;
+
+ cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
+ cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cfg.message_iova = 0ULL;
+ cfg.message_ctx = 0ULL;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (is_prio_enabled(pfc_en, i)) {
+ cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
+ cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
+ } else {
+ /* For priorities not set in the pfc_en mask, we leave
+ * the congestion thresholds at zero, which effectively
+ * disables generation of PFC frames for them
+ */
+ cfg.threshold_entry = 0;
+ cfg.threshold_exit = 0;
+ }
+
+ err = dpni_set_congestion_notification(priv->mc_io, 0,
+ priv->mc_token,
+ DPNI_QUEUE_RX, i, &cfg);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_congestion_notification failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_cfg link_cfg = {0};
+ bool tx_pause;
+ int err;
+
+ if (pfc->mbc || pfc->delay)
+ return -EOPNOTSUPP;
+
+ /* If same PFC enabled mask, nothing to do */
+ if (priv->pfc.pfc_en == pfc->pfc_en)
+ return 0;
+
+ /* We allow PFC configuration even if it won't have any effect until
+ * general pause frames are enabled
+ */
+ tx_pause = dpaa2_eth_tx_pause_enabled(priv->link_state.options);
+ if (!dpaa2_eth_rx_pause_enabled(priv->link_state.options) || !tx_pause)
+ netdev_warn(net_dev, "Pause support must be enabled in order for PFC to work!\n");
+
+ link_cfg.rate = priv->link_state.rate;
+ link_cfg.options = priv->link_state.options;
+ if (pfc->pfc_en)
+ link_cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
+ else
+ link_cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_link_cfg failed\n");
+ return err;
+ }
+
+ /* Configure congestion notifications for the enabled priorities */
+ err = set_pfc_cn(priv, pfc->pfc_en);
+ if (err)
+ return err;
+
+ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
+ priv->pfc_enabled = !!pfc->pfc_en;
+
+ dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
+
+ return 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return priv->dcbx_mode;
+}
+
+static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return (mode != (priv->dcbx_mode)) ? 1 : 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = priv->dcbx_mode;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
+ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
+ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
+ .getdcbx = dpaa2_eth_dcbnl_getdcbx,
+ .setdcbx = dpaa2_eth_dcbnl_setdcbx,
+ .getcap = dpaa2_eth_dcbnl_getcap,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index a9afe46b837f..c453a23045c1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -81,8 +81,8 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
int i, err;
seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s\n",
- "VFQID", "CPU", "Type", "Frames", "Pending frames");
+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "TC", "Type", "Frames", "Pending frames");
for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i];
@@ -90,9 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
if (err)
fcnt = 0;
- seq_printf(file, "%5d%16d%16s%16llu%16u\n",
+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
fq->fqid,
fq->target_cpu,
+ fq->tc,
fq_type_to_str(fq),
fq->stats.frames,
fcnt);
@@ -127,16 +128,19 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
int i;
seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s\n",
- "CHID", "CPU", "Deq busy", "CDANs", "Buf count");
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
+ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
+ "Avg Frm/CDAN", "Buf count");
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- seq_printf(file, "%4d%16d%16llu%16llu%16d\n",
+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
ch->ch_id,
ch->nctx.desired_cpu,
ch->stats.dequeue_portal_busy,
+ ch->stats.frames,
ch->stats.cdan,
+ div64_u64(ch->stats.frames, ch->stats.cdan),
ch->buf_count);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index b6c46639aa4c..8fb48de5d18c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2020 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)sg_vaddr, 0);
@@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
- dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
@@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
(page_address(page) - page_address(head_page));
skb_add_rx_frag(skb, i - 1, head_page, page_offset,
- sg_length, DPAA2_ETH_RX_BUF_SIZE);
+ sg_length, priv->rx_buf_size);
}
if (dpaa2_sg_is_final(sge))
@@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)vaddr, 0);
}
@@ -244,13 +244,72 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
ch->xdp.drop_cnt = 0;
}
-static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
- void *buf_start, u16 queue_id)
+static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_eth_xdp_fds *xdp_fds)
+{
+ int total_enqueued = 0, retries = 0, enqueued;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ int num_fds, err, max_retries;
+ struct dpaa2_fd *fds;
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ /* try to enqueue all the FDs until the max number of retries is hit */
+ fds = xdp_fds->fds;
+ num_fds = xdp_fds->num;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fds[total_enqueued],
+ 0, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ percpu_extras->tx_portal_busy += ++retries;
+ continue;
+ }
+ total_enqueued += enqueued;
+ }
+ xdp_fds->num = 0;
+
+ return total_enqueued;
+}
+
+static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_fd *fds;
+ int enqueued, i;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ // enqueue the array of XDP_TX frames
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
+
+ /* update statistics */
+ percpu_stats->tx_packets += enqueued;
+ fds = fq->xdp_tx_fds.fds;
+ for (i = 0; i < enqueued; i++) {
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ ch->stats.xdp_tx++;
+ }
+ for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
+ xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+ percpu_stats->tx_errors++;
+ ch->stats.xdp_tx_err++;
+ }
+ fq->xdp_tx_fds.num = 0;
+}
+
+static void xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
{
- struct dpaa2_eth_fq *fq;
struct dpaa2_faead *faead;
+ struct dpaa2_fd *dest_fd;
+ struct dpaa2_eth_fq *fq;
u32 ctrl, frc;
- int i, err;
/* Mark the egress frame hardware annotation area as valid */
frc = dpaa2_fd_get_frc(fd);
@@ -267,13 +326,13 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
faead->conf_fqid = 0;
fq = &priv->fq[queue_id];
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, fd, 0);
- if (err != -EBUSY)
- break;
- }
+ dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
+ memcpy(dest_fd, fd, sizeof(*dest_fd));
- return err;
+ if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
+ return;
+
+ xdp_tx_flush(priv, ch, fq);
}
static u32 run_xdp(struct dpaa2_eth_priv *priv,
@@ -282,14 +341,11 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
struct dpaa2_fd *fd, void *vaddr)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
- struct rtnl_link_stats64 *percpu_stats;
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
u32 xdp_act = XDP_PASS;
int err;
- percpu_stats = this_cpu_ptr(priv->percpu_stats);
-
rcu_read_lock();
xdp_prog = READ_ONCE(ch->xdp.prog);
@@ -302,6 +358,9 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
xdp_set_data_meta_invalid(&xdp);
xdp.rxq = &ch->xdp_rxq;
+ xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
+ (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
+
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
/* xdp.data pointer may have changed */
@@ -312,16 +371,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
case XDP_PASS:
break;
case XDP_TX:
- err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
- if (err) {
- xdp_release_buf(priv, ch, addr);
- percpu_stats->tx_errors++;
- ch->stats.xdp_tx_err++;
- } else {
- percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
- ch->stats.xdp_tx++;
- }
+ xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
break;
default:
bpf_warn_invalid_xdp_action(xdp_act);
@@ -335,9 +385,13 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
break;
case XDP_REDIRECT:
dma_unmap_page(priv->net_dev->dev.parent, addr,
- DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
ch->buf_count--;
+
+ /* Allow redirect use of full headroom */
xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
+
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
if (unlikely(err))
ch->stats.xdp_drop++;
@@ -374,7 +428,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
trace_dpaa2_rx_fd(priv->net_dev, fd);
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
fas = dpaa2_get_fas(vaddr, false);
@@ -393,13 +447,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
}
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
free_pages((unsigned long)vaddr, 0);
@@ -493,6 +547,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
return 0;
fq->stats.frames += cleaned;
+ ch->stats.frames += cleaned;
/* A dequeue operation only pulls frames from a single queue
* into the store. Return the frame queue as an out param.
@@ -847,7 +902,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
* the Tx confirmation callback for this frame
*/
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, prio);
+ err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
if (err != -EBUSY)
break;
}
@@ -974,7 +1029,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
if (!page)
goto err_alloc;
- addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
+ addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
@@ -984,7 +1039,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
page, DPAA2_ETH_RX_BUF_RAW_SIZE,
- addr, DPAA2_ETH_RX_BUF_SIZE,
+ addr, priv->rx_buf_size,
bpid);
}
@@ -1138,6 +1193,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
int store_cleaned, work_done;
struct list_head rx_list;
int retries = 0;
+ u16 flowid;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -1160,6 +1216,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
break;
if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
+ flowid = fq->flowid;
} else {
txconf_cleaned += store_cleaned;
/* We have a single Tx conf FQ on this channel */
@@ -1202,6 +1259,8 @@ out:
if (ch->xdp.res & XDP_REDIRECT)
xdp_do_flush_map();
+ else if (rx_cleaned && ch->xdp.res & XDP_TX)
+ xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
}
@@ -1228,31 +1287,67 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
-static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
+void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+ bool tx_pause, bool pfc)
{
struct dpni_taildrop td = {0};
+ struct dpaa2_eth_fq *fq;
int i, err;
- if (priv->rx_td_enabled == enable)
- return;
+ /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
+ * flow control is disabled (as it might interfere with either the
+ * buffer pool depletion trigger for pause frames or with the group
+ * congestion trigger for PFC frames)
+ */
+ td.enable = !tx_pause;
+ if (priv->rx_fqtd_enabled == td.enable)
+ goto set_cgtd;
- td.enable = enable;
- td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+ td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
+ td.units = DPNI_CONGESTION_UNIT_BYTES;
for (i = 0; i < priv->num_fqs; i++) {
- if (priv->fq[i].type != DPAA2_RX_FQ)
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_RX_FQ)
continue;
err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
- DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
- priv->fq[i].flowid, &td);
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ fq->tc, fq->flowid, &td);
if (err) {
netdev_err(priv->net_dev,
- "dpni_set_taildrop() failed\n");
- break;
+ "dpni_set_taildrop(FQ) failed\n");
+ return;
}
}
- priv->rx_td_enabled = enable;
+ priv->rx_fqtd_enabled = td.enable;
+
+set_cgtd:
+ /* Congestion group taildrop: threshold is in frames, per group
+ * of FQs belonging to the same traffic class
+ * Enabled if general Tx pause disabled or if PFCs are enabled
+ * (congestion group threhsold for PFC generation is lower than the
+ * CG taildrop threshold, so it won't interfere with it; we also
+ * want frames in non-PFC enabled traffic classes to be kept in check)
+ */
+ td.enable = !tx_pause || (tx_pause && pfc);
+ if (priv->rx_cgtd_enabled == td.enable)
+ return;
+
+ td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
+ td.units = DPNI_CONGESTION_UNIT_FRAMES;
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_GROUP, DPNI_QUEUE_RX,
+ i, 0, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop(CG) failed\n");
+ return;
+ }
+ }
+
+ priv->rx_cgtd_enabled = td.enable;
}
static int link_state_update(struct dpaa2_eth_priv *priv)
@@ -1272,9 +1367,8 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
* Rx FQ taildrop configuration as well. We configure taildrop
* only when pause frame generation is disabled.
*/
- tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
- !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
- dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+ tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
+ dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
@@ -1720,7 +1814,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
int mfl, linear_mfl;
mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
- linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
+ linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
if (mfl > linear_mfl) {
@@ -1880,20 +1974,16 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return 0;
}
-static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
- struct xdp_frame *xdpf)
+static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
+ struct xdp_frame *xdpf,
+ struct dpaa2_fd *fd)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
- struct rtnl_link_stats64 *percpu_stats;
- struct dpaa2_eth_drv_stats *percpu_extras;
unsigned int needed_headroom;
struct dpaa2_eth_swa *swa;
- struct dpaa2_eth_fq *fq;
- struct dpaa2_fd fd;
void *buffer_start, *aligned_start;
dma_addr_t addr;
- int err, i;
/* We require a minimum headroom to be able to transmit the frame.
* Otherwise return an error and let the original net_device handle it
@@ -1902,11 +1992,8 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
if (xdpf->headroom < needed_headroom)
return -EINVAL;
- percpu_stats = this_cpu_ptr(priv->percpu_stats);
- percpu_extras = this_cpu_ptr(priv->percpu_extras);
-
/* Setup the FD fields */
- memset(&fd, 0, sizeof(fd));
+ memset(fd, 0, sizeof(*fd));
/* Align FD address, if possible */
buffer_start = xdpf->data - needed_headroom;
@@ -1924,32 +2011,14 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
addr = dma_map_single(dev, buffer_start,
swa->xdp.dma_size,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, addr))) {
- percpu_stats->tx_dropped++;
+ if (unlikely(dma_mapping_error(dev, addr)))
return -ENOMEM;
- }
-
- dpaa2_fd_set_addr(&fd, addr);
- dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
- dpaa2_fd_set_len(&fd, xdpf->len);
- dpaa2_fd_set_format(&fd, dpaa2_fd_single);
- dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
-
- fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, 0);
- if (err != -EBUSY)
- break;
- }
- percpu_extras->tx_portal_busy += i;
- if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
- /* let the Rx device handle the cleanup */
- return err;
- }
- percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
+ dpaa2_fd_set_len(fd, xdpf->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
return 0;
}
@@ -1957,8 +2026,12 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags)
{
- int drops = 0;
- int i, err;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_fd *fds;
+ int enqueued, i, err;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -1966,17 +2039,31 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
if (!netif_running(net_dev))
return -ENETDOWN;
- for (i = 0; i < n; i++) {
- struct xdp_frame *xdpf = frames[i];
+ fq = &priv->fq[smp_processor_id()];
+ xdp_redirect_fds = &fq->xdp_redirect_fds;
+ fds = xdp_redirect_fds->fds;
- err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ /* create a FD for each xdp_frame in the list received */
+ for (i = 0; i < n; i++) {
+ err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
+ if (err)
+ break;
}
+ xdp_redirect_fds->num = i;
- return n - drops;
+ /* enqueue all the frame descriptors */
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
+
+ /* update statistics */
+ percpu_stats->tx_packets += enqueued;
+ for (i = 0; i < enqueued; i++)
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ for (i = enqueued; i < n; i++)
+ xdp_return_frame_rx_napi(frames[i]);
+
+ return enqueued;
}
static int update_xps(struct dpaa2_eth_priv *priv)
@@ -2018,7 +2105,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev,
int i;
if (type != TC_SETUP_QDISC_MQPRIO)
- return -EINVAL;
+ return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_queues = dpaa2_eth_queue_count(priv);
@@ -2030,7 +2117,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev,
if (num_tc > dpaa2_eth_tc_count(priv)) {
netdev_err(net_dev, "Max %d traffic classes supported\n",
dpaa2_eth_tc_count(priv));
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (!num_tc) {
@@ -2355,7 +2442,7 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
static void setup_fqs(struct dpaa2_eth_priv *priv)
{
- int i;
+ int i, j;
/* We have one TxConf FQ per Tx flow.
* The number of Tx and Rx queues is the same.
@@ -2367,10 +2454,13 @@ static void setup_fqs(struct dpaa2_eth_priv *priv)
priv->fq[priv->num_fqs++].flowid = (u16)i;
}
- for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
- priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
- priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
- priv->fq[priv->num_fqs++].flowid = (u16)i;
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+ priv->fq[priv->num_fqs].tc = (u8)j;
+ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
}
/* For each FQ, decide on which core to process incoming frames */
@@ -2462,6 +2552,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
else
rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+ /* We need to ensure that the buffer size seen by WRIOP is a multiple
+ * of 64 or 256 bytes depending on the WRIOP version.
+ */
+ priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
+
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
buf_layout.pass_timestamp = true;
@@ -2523,19 +2618,38 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd, u8 prio)
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames __always_unused,
+ int *frames_enqueued)
{
- return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
- priv->tx_qdid, prio,
- fq->tx_qdbin, fd);
+ int err;
+
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, prio,
+ fq->tx_qdbin, fd);
+ if (!err && frames_enqueued)
+ *frames_enqueued = 1;
+ return err;
}
-static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd, u8 prio)
+static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd,
+ u8 prio, u32 num_frames,
+ int *frames_enqueued)
{
- return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
- fq->tx_fqid[prio], fd);
+ int err;
+
+ err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
+ fq->tx_fqid[prio],
+ fd, num_frames);
+
+ if (err == 0)
+ return -EBUSY;
+
+ if (frames_enqueued)
+ *frames_enqueued = err;
+ return 0;
}
static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
@@ -2544,7 +2658,7 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
priv->enqueue = dpaa2_eth_enqueue_qd;
else
- priv->enqueue = dpaa2_eth_enqueue_fq;
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
}
static int set_pause(struct dpaa2_eth_priv *priv)
@@ -2605,7 +2719,7 @@ static void update_tx_fqids(struct dpaa2_eth_priv *priv)
}
}
- priv->enqueue = dpaa2_eth_enqueue_fq;
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
return;
@@ -2615,6 +2729,118 @@ out_err:
priv->enqueue = dpaa2_eth_enqueue_qd;
}
+/* Configure ingress classification based on VLAN PCP */
+static int set_vlan_qos(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpkg_profile_cfg kg_cfg = {0};
+ struct dpni_qos_tbl_cfg qos_cfg = {0};
+ struct dpni_rule_cfg key_params;
+ void *dma_mem, *key, *mask;
+ u8 key_size = 2; /* VLAN TCI field */
+ int i, pcp, err;
+
+ /* VLAN-based classification only makes sense if we have multiple
+ * traffic classes.
+ * Also, we need to extract just the 3-bit PCP field from the VLAN
+ * header and we can only do that by using a mask
+ */
+ if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
+ dev_dbg(dev, "VLAN-based QoS classification not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ kg_cfg.num_extracts = 1;
+ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
+ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
+ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
+ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+
+ err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
+ if (err) {
+ dev_err(dev, "dpni_prepare_key_cfg failed\n");
+ goto out_free_tbl;
+ }
+
+ /* set QoS table */
+ qos_cfg.default_tc = 0;
+ qos_cfg.discard_on_miss = 0;
+ qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
+ DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
+ dev_err(dev, "QoS table DMA mapping failed\n");
+ err = -ENOMEM;
+ goto out_free_tbl;
+ }
+
+ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_qos_table failed\n");
+ goto out_unmap_tbl;
+ }
+
+ /* Add QoS table entries */
+ key = kzalloc(key_size * 2, GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto out_unmap_tbl;
+ }
+ mask = key + key_size;
+ *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
+
+ key_params.key_iova = dma_map_single(dev, key, key_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_params.key_iova)) {
+ dev_err(dev, "Qos table entry DMA mapping failed\n");
+ err = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_params.mask_iova = key_params.key_iova + key_size;
+ key_params.key_size = key_size;
+
+ /* We add rules for PCP-based distribution starting with highest
+ * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
+ * classes to accommodate all priority levels, the lowest ones end up
+ * on TC 0 which was configured as default
+ */
+ for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
+ *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
+ dma_sync_single_for_device(dev, key_params.key_iova,
+ key_size * 2, DMA_TO_DEVICE);
+
+ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
+ &key_params, i, i);
+ if (err) {
+ dev_err(dev, "dpni_add_qos_entry failed\n");
+ dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
+ goto out_unmap_key;
+ }
+ }
+
+ priv->vlan_cls_enabled = true;
+
+ /* Table and key memory is not persistent, clean everything up after
+ * configuration is finished
+ */
+out_unmap_key:
+ dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
+out_free_key:
+ kfree(key);
+out_unmap_tbl:
+ dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+out_free_tbl:
+ kfree(dma_mem);
+
+ return err;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -2677,10 +2903,16 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
goto close;
}
+ err = set_vlan_qos(priv);
+ if (err && err != -EOPNOTSUPP)
+ goto close;
+
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
- if (!priv->cls_rules)
+ if (!priv->cls_rules) {
+ err = -ENOMEM;
goto close;
+ }
return 0;
@@ -2711,7 +2943,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
int err;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
+ DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
if (err) {
dev_err(dev, "dpni_get_queue(RX) failed\n");
return err;
@@ -2724,7 +2956,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, 0, fq->flowid,
+ DPNI_QUEUE_RX, fq->tc, fq->flowid,
DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
&queue);
if (err) {
@@ -2733,6 +2965,10 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
}
/* xdp_rxq setup */
+ /* only once for each channel */
+ if (fq->tc > 0)
+ return 0;
+
err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
fq->flowid);
if (err) {
@@ -2870,7 +3106,7 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_tc_dist_cfg dist_cfg;
- int err;
+ int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
@@ -2878,9 +3114,14 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
- if (err)
- dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
+ i, &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+ break;
+ }
+ }
return err;
}
@@ -2890,7 +3131,7 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
- int err;
+ int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
@@ -2898,9 +3139,15 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
- err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
- if (err)
- dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+ break;
+ }
+ }
return err;
}
@@ -2910,7 +3157,7 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
- int err;
+ int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
@@ -2918,9 +3165,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
- err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
- if (err)
- dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+ break;
+ }
+ }
return err;
}
@@ -3126,7 +3379,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
pools_params.num_dpbp = 1;
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
- pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
+ pools_params.pools[0].buffer_size = priv->rx_buf_size;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
if (err) {
dev_err(dev, "dpni_set_pools() failed\n");
@@ -3606,6 +3859,15 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_alloc_rings;
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+ if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
+ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
+ } else {
+ dev_dbg(dev, "PFC not supported\n");
+ }
+#endif
+
err = setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 7635db3ef903..2d7ada0f0dbd 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2020 NXP
*/
#ifndef __DPAA2_ETH_H
#define __DPAA2_ETH_H
+#include <linux/dcbnl.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/fsl/mc.h>
@@ -36,27 +37,46 @@
/* Convert L3 MTU to L2 MFL */
#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
-/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
- * frames in the Rx queues (length of the current frame is not
- * taken into account when making the taildrop decision)
+/* Set the taildrop threshold (in bytes) to allow the enqueue of a large
+ * enough number of jumbo frames in the Rx queues (length of the current
+ * frame is not taken into account when making the taildrop decision)
*/
-#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
+#define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024)
/* Maximum number of Tx confirmation frames to be processed
* in a single NAPI call
*/
#define DPAA2_ETH_TXCONF_PER_NAPI 256
-/* Buffer quota per queue. Must be large enough such that for minimum sized
- * frames taildrop kicks in before the bpool gets depleted, so we compute
- * how many 64B frames fit inside the taildrop threshold and add a margin
- * to accommodate the buffer refill delay.
+/* Buffer qouta per channel. We want to keep in check number of ingress frames
+ * in flight: for small sized frames, congestion group taildrop may kick in
+ * first; for large sizes, Rx FQ taildrop threshold will ensure only a
+ * reasonable number of frames will be pending at any given time.
+ * Ingress frame drop due to buffer pool depletion should be a corner case only
*/
-#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
-#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
+#define DPAA2_ETH_NUM_BUFS 1280
#define DPAA2_ETH_REFILL_THRESH \
(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
+/* Congestion group taildrop threshold: number of frames allowed to accumulate
+ * at any moment in a group of Rx queues belonging to the same traffic class.
+ * Choose value such that we don't risk depleting the buffer pool before the
+ * taildrop kicks in
+ */
+#define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
+ (1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
+
+/* Congestion group notification threshold: when this many frames accumulate
+ * on the Rx queues belonging to the same TC, the MAC is instructed to send
+ * PFC frames for that TC.
+ * When number of pending frames drops below exit threshold transmission of
+ * PFC frames is stopped.
+ */
+#define DPAA2_ETH_CN_THRESH_ENTRY(priv) \
+ (DPAA2_ETH_CG_TAILDROP_THRESH(priv) / 2)
+#define DPAA2_ETH_CN_THRESH_EXIT(priv) \
+ (DPAA2_ETH_CN_THRESH_ENTRY(priv) * 3 / 4)
+
/* Maximum number of buffers that can be acquired/released through a single
* QBMan command
*/
@@ -288,11 +308,15 @@ struct dpaa2_eth_ch_stats {
__u64 xdp_tx;
__u64 xdp_tx_err;
__u64 xdp_redirect;
+ /* Must be last, does not show up in ethtool stats */
+ __u64 frames;
};
/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
-#define DPAA2_ETH_MAX_RX_QUEUES 16
+#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
+#define DPAA2_ETH_MAX_RX_QUEUES \
+ (DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
#define DPAA2_ETH_MAX_TX_QUEUES 16
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
DPAA2_ETH_MAX_TX_QUEUES)
@@ -308,6 +332,11 @@ enum dpaa2_eth_fq_type {
struct dpaa2_eth_priv;
+struct dpaa2_eth_xdp_fds {
+ struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
+ ssize_t num;
+};
+
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
@@ -325,6 +354,9 @@ struct dpaa2_eth_fq {
const struct dpaa2_fd *fd,
struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats;
+
+ struct dpaa2_eth_xdp_fds xdp_redirect_fds;
+ struct dpaa2_eth_xdp_fds xdp_tx_fds;
};
struct dpaa2_eth_ch_xdp {
@@ -371,7 +403,9 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
int (*enqueue)(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd, u8 prio);
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames,
+ int *frames_enqueued);
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
@@ -382,6 +416,7 @@ struct dpaa2_eth_priv {
u16 tx_data_offset;
struct fsl_mc_device *dpbp_dev;
+ u16 rx_buf_size;
u16 bpid;
struct iommu_domain *iommu_domain;
@@ -401,7 +436,8 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_drv_stats __percpu *percpu_extras;
u16 mc_token;
- u8 rx_td_enabled;
+ u8 rx_fqtd_enabled;
+ u8 rx_cgtd_enabled;
struct dpni_link_state link_state;
bool do_link_poll;
@@ -412,6 +448,12 @@ struct dpaa2_eth_priv {
u64 rx_cls_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
+ u8 vlan_cls_enabled;
+ u8 pfc_enabled;
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+ u8 dcbx_mode;
+ struct ieee_pfc pfc;
+#endif
struct bpf_prog *xdp_prog;
#ifdef CONFIG_DEBUG_FS
struct dpaa2_debugfs dbg;
@@ -494,6 +536,17 @@ enum dpaa2_eth_rx_dist {
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \
DPNI_PAUSE_VER_MINOR) >= 0)
+static inline bool dpaa2_eth_tx_pause_enabled(u64 link_options)
+{
+ return !!(link_options & DPNI_LINK_OPT_PAUSE) ^
+ !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+}
+
+static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
+{
+ return !!(link_options & DPNI_LINK_OPT_PAUSE);
+}
+
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
@@ -533,4 +586,9 @@ int dpaa2_eth_cls_key_size(u64 key);
int dpaa2_eth_cls_fld_off(int prot, int field);
void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
+void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+ bool tx_pause, bool pfc);
+
+extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
+
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 94347c695233..e88269fe3de7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -130,9 +130,8 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
return;
}
- pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
- pause->tx_pause = pause->rx_pause ^
- !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+ pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
+ pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
pause->autoneg = AUTONEG_DISABLE;
}
@@ -277,7 +276,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
/* Per-channel stats */
for (k = 0; k < priv->num_channels; k++) {
ch_stats = &priv->channel[k]->stats;
- for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
+ for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
i += j;
@@ -547,7 +546,7 @@ static int do_cls_rule(struct net_device *net_dev,
dma_addr_t key_iova;
u64 fields = 0;
void *key_buf;
- int err;
+ int i, err;
if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
fs->ring_cookie >= dpaa2_eth_queue_count(priv))
@@ -607,11 +606,18 @@ static int do_cls_rule(struct net_device *net_dev,
fs_act.options |= DPNI_FS_OPT_DISCARD;
else
fs_act.flow_id = fs->ring_cookie;
- err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
- fs->location, &rule_cfg, &fs_act);
- } else {
- err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
- &rule_cfg);
+ }
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (add)
+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
+ i, fs->location, &rule_cfg,
+ &fs_act);
+ else
+ err = dpni_remove_fs_entry(priv->mc_io, 0,
+ priv->mc_token, i,
+ &rule_cfg);
+ if (err)
+ break;
}
dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
@@ -635,7 +641,7 @@ static int num_rules(struct dpaa2_eth_priv *priv)
static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs,
- int location)
+ unsigned int location)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_cls_rule *rule;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index d9b6918807af..fd069f67be9b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -59,6 +59,10 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
+#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
+#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
+#define DPNI_CMDID_CLR_QOS_TBL DPNI_CMD(0x243)
#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
@@ -567,4 +571,59 @@ struct dpni_cmd_remove_fs_entry {
__le64 mask_iova;
};
+#define DPNI_DISCARD_ON_MISS_SHIFT 0
+#define DPNI_DISCARD_ON_MISS_SIZE 1
+
+struct dpni_cmd_set_qos_table {
+ __le32 pad;
+ u8 default_tc;
+ /* only the LSB */
+ u8 discard_on_miss;
+ __le16 pad1[21];
+ __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_qos_entry {
+ __le16 pad;
+ u8 tc_id;
+ u8 key_size;
+ __le16 index;
+ __le16 pad1;
+ __le64 key_iova;
+ __le64 mask_iova;
+};
+
+struct dpni_cmd_remove_qos_entry {
+ u8 pad[3];
+ u8 key_size;
+ __le32 pad1;
+ __le64 key_iova;
+ __le64 mask_iova;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_CONG_UNITS_SHIFT 4
+#define DPNI_CONG_UNITS_SIZE 2
+
+struct dpni_cmd_set_congestion_notification {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 tc;
+ u8 pad[6];
+ /* cmd word 1 */
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ u8 type_units;
+ /* cmd word 2 */
+ __le64 message_iova;
+ /* cmd word 3 */
+ __le64 message_ctx;
+ /* cmd word 4 */
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index dd54e6953aeb..6b479ba66465 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1355,6 +1355,52 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ const struct dpni_congestion_notification_cfg *cfg)
+{
+ struct dpni_cmd_set_congestion_notification *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header =
+ mc_encode_cmd_header(DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ dpni_set_field(cmd_params->type_units, DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpni_set_queue() - Set queue parameters
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -1786,3 +1832,134 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+/**
+ * dpni_set_qos_table() - Set QoS mapping table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS table configuration
+ *
+ * This function and all QoS-related functions require that
+ *'max_tcs > 1' was set at DPNI creation.
+ *
+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
+ * prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg)
+{
+ struct dpni_cmd_set_qos_table *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
+ cmd_params->default_tc = cfg->default_tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+ dpni_set_field(cmd_params->discard_on_miss, DISCARD_ON_MISS,
+ cfg->discard_on_miss);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to add
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the QoS table where to insert the entry.
+ * Only relevant if MASKING is enabled for QoS classification on
+ * this DPNI, it is ignored for exact match.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index)
+{
+ struct dpni_cmd_add_qos_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_qos_entry() - Remove QoS mapping entry
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_qos_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_qos_table() - Clear all QoS mapping entries
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Following this function call, all frames are directed to
+ * the default traffic class (0)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index ee0711d06b3a..e874d8084142 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -514,6 +514,11 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io,
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
/**
+ * Enable priority flow control pause frames
+ */
+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
+
+/**
* struct - Structure representing DPNI link configuration
* @rate: Rate
* @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
@@ -716,6 +721,26 @@ int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
const struct dpni_rx_dist_cfg *cfg);
/**
+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * key extractions to be used as the QoS criteria by calling
+ * dpkg_prepare_key_cfg()
+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
+ * '0' to use the 'default_tc' in such cases
+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
+ */
+struct dpni_qos_tbl_cfg {
+ u64 key_cfg_iova;
+ int discard_on_miss;
+ u8 default_tc;
+};
+
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg);
+
+/**
* enum dpni_dest - DPNI destination types
* @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
* does not generate FQDAN notifications; user is expected to
@@ -858,6 +883,62 @@ enum dpni_congestion_point {
};
/**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid
+ * values are 0-1 or 0-7, depending on the number of priorities
+ * in that channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+ enum dpni_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/* DPNI congestion options */
+
+/**
+ * This congestion will trigger flow control or priority flow control.
+ * This will have effect only if flow control is enabled with
+ * dpni_set_link_cfg().
+ */
+#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
+
+/**
+ * struct dpni_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: Units type
+ * @threshold_entry: Above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: Below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
+ * is contained in 'options'
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
+ */
+
+struct dpni_congestion_notification_cfg {
+ enum dpni_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpni_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ const struct dpni_congestion_notification_cfg *cfg);
+
+/**
* struct dpni_taildrop - Structure representing the taildrop
* @enable: Indicates whether the taildrop is active or not.
* @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
@@ -961,6 +1042,22 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
u8 tc_id,
const struct dpni_rule_cfg *cfg);
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index);
+
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
int dpni_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index ccf2611f4a20..298c55786fd9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -756,6 +756,9 @@ void enetc_get_si_caps(struct enetc_si *si)
if (val & ENETC_SIPCAPR0_QBV)
si->hw_features |= ENETC_SI_F_QBV;
+
+ if (val & ENETC_SIPCAPR0_PSFP)
+ si->hw_features |= ENETC_SI_F_PSFP;
}
static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
@@ -1518,6 +1521,8 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return enetc_setup_tc_cbs(ndev, type_data);
case TC_SETUP_QDISC_ETF:
return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -1567,15 +1572,42 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
+static int enetc_set_psfp(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
+ int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
- return 0;
+ if (changed & NETIF_F_HW_TC)
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+
+ return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 56c43f35b633..b705464f6882 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -151,6 +151,7 @@ enum enetc_errata {
};
#define ENETC_SI_F_QBV BIT(0)
+#define ENETC_SI_F_PSFP BIT(1)
/* PCI IEP device data */
struct enetc_si {
@@ -203,12 +204,20 @@ struct enetc_cls_rule {
};
#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+struct psfp_cap {
+ u32 max_streamid;
+ u32 max_psfp_filter;
+ u32 max_psfp_gate;
+ u32 max_psfp_gatelist;
+ u32 max_psfp_meter;
+};
/* TODO: more hardware offloads */
enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(0),
ENETC_F_TX_TSTAMP = BIT(1),
ENETC_F_QBV = BIT(2),
+ ENETC_F_QCI = BIT(3),
};
struct enetc_ndev_priv {
@@ -231,6 +240,8 @@ struct enetc_ndev_priv {
struct enetc_cls_rule *cls_rules;
+ struct psfp_cap psfp_cap;
+
struct device_node *phy_node;
phy_interface_t if_mode;
};
@@ -289,9 +300,84 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct net_device *ndev);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
+int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
+int enetc_psfp_init(struct enetc_ndev_priv *priv);
+int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+
+static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
+{
+ u32 reg;
+
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
+ /* Port stream filter capability */
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
+ /* Port stream gate capability */
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
+ priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
+ /* Port flow meter capability */
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
+}
+
+static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int err;
+
+ enetc_get_max_cap(priv);
+
+ err = enetc_psfp_init(priv);
+ if (err)
+ return err;
+
+ enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
+ ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
+ ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
+
+ return 0;
+}
+
+static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int err;
+
+ err = enetc_psfp_clean(priv);
+ if (err)
+ return err;
+
+ enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
+ ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
+ ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
+
+ memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
+
+ return 0;
+}
+
#else
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(ndev) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_block_cb NULL
+
+#define enetc_get_max_cap(p) \
+ memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
+
+static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
+{
+ return 0;
+}
+
+static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 2a6523136947..6314051bc6c1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -19,6 +19,7 @@
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
#define ENETC_SIPCAPR0_QBV BIT(4)
+#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
@@ -228,6 +229,15 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
#define ENETC_PM0_IFM_XGMII BIT(12)
+#define ENETC_PSIDCAPR 0x1b08
+#define ENETC_PSIDCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSFCAPR 0x1b18
+#define ENETC_PSFCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSGCAPR 0x1b28
+#define ENETC_PSGCAPR_GCL_MSK GENMASK(18, 16)
+#define ENETC_PSGCAPR_SGIT_MSK GENMASK(15, 0)
+#define ENETC_PFMCAPR 0x1b38
+#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
/* MAC counters */
#define ENETC_PM0_REOCT 0x8100
@@ -557,6 +567,9 @@ enum bdcr_cmd_class {
BDCR_CMD_RFS,
BDCR_CMD_PORT_GCL,
BDCR_CMD_RECV_CLASSIFIER,
+ BDCR_CMD_STREAM_IDENTIFY,
+ BDCR_CMD_STREAM_FILTER,
+ BDCR_CMD_STREAM_GCL,
__BDCR_CMD_MAX_LEN,
BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
};
@@ -588,13 +601,152 @@ struct tgs_gcl_data {
struct gce entry[];
};
+/* class 7, command 0, Stream Identity Entry Configuration */
+struct streamid_conf {
+ __le32 stream_handle; /* init gate value */
+ __le32 iports;
+ u8 id_type;
+ u8 oui[3];
+ u8 res[3];
+ u8 en;
+};
+
+#define ENETC_CBDR_SID_VID_MASK 0xfff
+#define ENETC_CBDR_SID_VIDM BIT(12)
+#define ENETC_CBDR_SID_TG_MASK 0xc000
+/* streamid_conf address point to this data space */
+struct streamid_data {
+ union {
+ u8 dmac[6];
+ u8 smac[6];
+ };
+ u16 vid_vidm_tg;
+};
+
+#define ENETC_CBDR_SFI_PRI_MASK 0x7
+#define ENETC_CBDR_SFI_PRIM BIT(3)
+#define ENETC_CBDR_SFI_BLOV BIT(4)
+#define ENETC_CBDR_SFI_BLEN BIT(5)
+#define ENETC_CBDR_SFI_MSDUEN BIT(6)
+#define ENETC_CBDR_SFI_FMITEN BIT(7)
+#define ENETC_CBDR_SFI_ENABLE BIT(7)
+/* class 8, command 0, Stream Filter Instance, Short Format */
+struct sfi_conf {
+ __le32 stream_handle;
+ u8 multi;
+ u8 res[2];
+ u8 sthm;
+ /* Max Service Data Unit or Flow Meter Instance Table index.
+ * Depending on the value of FLT this represents either Max
+ * Service Data Unit (max frame size) allowed by the filter
+ * entry or is an index into the Flow Meter Instance table
+ * index identifying the policer which will be used to police
+ * it.
+ */
+ __le16 fm_inst_table_index;
+ __le16 msdu;
+ __le16 sg_inst_table_index;
+ u8 res1[2];
+ __le32 input_ports;
+ u8 res2[3];
+ u8 en;
+};
+
+/* class 8, command 2 stream Filter Instance status query short format
+ * command no need structure define
+ * Stream Filter Instance Query Statistics Response data
+ */
+struct sfi_counter_data {
+ u32 matchl;
+ u32 matchh;
+ u32 msdu_dropl;
+ u32 msdu_droph;
+ u32 stream_gate_dropl;
+ u32 stream_gate_droph;
+ u32 flow_meter_dropl;
+ u32 flow_meter_droph;
+};
+
+#define ENETC_CBDR_SGI_OIPV_MASK 0x7
+#define ENETC_CBDR_SGI_OIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_CGTST BIT(6)
+#define ENETC_CBDR_SGI_OGTST BIT(7)
+#define ENETC_CBDR_SGI_CFG_CHG BIT(1)
+#define ENETC_CBDR_SGI_CFG_PND BIT(2)
+#define ENETC_CBDR_SGI_OEX BIT(4)
+#define ENETC_CBDR_SGI_OEXEN BIT(5)
+#define ENETC_CBDR_SGI_IRX BIT(6)
+#define ENETC_CBDR_SGI_IRXEN BIT(7)
+#define ENETC_CBDR_SGI_ACLLEN_MASK 0x3
+#define ENETC_CBDR_SGI_OCLLEN_MASK 0xc
+#define ENETC_CBDR_SGI_EN BIT(7)
+/* class 9, command 0, Stream Gate Instance Table, Short Format
+ * class 9, command 2, Stream Gate Instance Table entry query write back
+ * Short Format
+ */
+struct sgi_table {
+ u8 res[8];
+ u8 oipv;
+ u8 res0[2];
+ u8 ocgtst;
+ u8 res1[7];
+ u8 gset;
+ u8 oacl_len;
+ u8 res2[2];
+ u8 en;
+};
+
+#define ENETC_CBDR_SGI_AIPV_MASK 0x7
+#define ENETC_CBDR_SGI_AIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_AGTST BIT(7)
+
+/* class 9, command 1, Stream Gate Control List, Long Format */
+struct sgcl_conf {
+ u8 aipv;
+ u8 res[2];
+ u8 agtst;
+ u8 res1[4];
+ union {
+ struct {
+ u8 res2[4];
+ u8 acl_len;
+ u8 res3[3];
+ };
+ u8 cct[8]; /* Config change time */
+ };
+};
+
+#define ENETC_CBDR_SGL_IOMEN BIT(0)
+#define ENETC_CBDR_SGL_IPVEN BIT(3)
+#define ENETC_CBDR_SGL_GTST BIT(4)
+#define ENETC_CBDR_SGL_IPV_MASK 0xe
+/* Stream Gate Control List Entry */
+struct sgce {
+ u32 interval;
+ u8 msdu[3];
+ u8 multi;
+};
+
+/* stream control list class 9 , cmd 1 data buffer */
+struct sgcl_data {
+ u32 btl;
+ u32 bth;
+ u32 ct;
+ u32 cte;
+ struct sgce sgcl[0];
+};
+
struct enetc_cbd {
union{
+ struct sfi_conf sfi_conf;
+ struct sgi_table sgi_table;
struct {
__le32 addr[2];
union {
__le32 opt[4];
struct tgs_gcl_conf gcl_conf;
+ struct streamid_conf sid_set;
+ struct sgcl_conf sgcl_conf;
};
}; /* Long format */
__le32 data[6];
@@ -621,3 +773,10 @@ struct enetc_cbd {
/* Port time specific departure */
#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
#define ENETC_TSDE BIT(31)
+
+/* PSFP setting */
+#define ENETC_PPSFPMR 0x11b00
+#define ENETC_PPSFPMR_PSFPEN BIT(0)
+#define ENETC_PPSFPMR_VS BIT(1)
+#define ENETC_PPSFPMR_PVC BIT(2)
+#define ENETC_PPSFPMR_PVZC BIT(3)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 85e2b741df41..824d211ec00f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -50,21 +50,6 @@ static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val);
}
-static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx)
-{
- return pf->vlan_promisc_simap & BIT(si_idx);
-}
-
-static bool enetc_vlan_filter_is_on(struct enetc_pf *pf)
-{
- int i;
-
- for_each_set_bit(i, pf->active_vlans, VLAN_N_VID)
- return true;
-
- return false;
-}
-
static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
{
pf->vlan_promisc_simap |= BIT(si_idx);
@@ -204,6 +189,7 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_pf *pf = enetc_si_priv(priv->si);
+ char vlan_promisc_simap = pf->vlan_promisc_simap;
struct enetc_hw *hw = &priv->si->hw;
bool uprom = false, mprom = false;
struct enetc_mac_filter *filter;
@@ -216,16 +202,16 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
uprom = true;
mprom = true;
- /* enable VLAN promisc mode for SI0 */
- if (!enetc_si_vlan_promisc_is_on(pf, 0))
- enetc_enable_si_vlan_promisc(pf, 0);
-
+ /* Enable VLAN promiscuous mode for SI0 (PF) */
+ vlan_promisc_simap |= BIT(0);
} else if (ndev->flags & IFF_ALLMULTI) {
/* enable multi cast promisc mode for SI0 (PF) */
psipmr = ENETC_PSIPMR_SET_MP(0);
mprom = true;
}
+ enetc_set_vlan_promisc(&pf->si->hw, vlan_promisc_simap);
+
/* first 2 filter entries belong to PF */
if (!uprom) {
/* Update unicast filters */
@@ -306,9 +292,6 @@ static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
struct enetc_pf *pf = enetc_si_priv(priv->si);
int idx;
- if (enetc_si_vlan_promisc_is_on(pf, 0))
- enetc_disable_si_vlan_promisc(pf, 0);
-
__set_bit(vid, pf->active_vlans);
idx = enetc_vid_hash_idx(vid);
@@ -326,9 +309,6 @@ static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
__clear_bit(vid, pf->active_vlans);
enetc_sync_vlan_ht_filter(pf, true);
- if (!enetc_vlan_filter_is_on(pf))
- enetc_enable_si_vlan_promisc(pf, 0);
-
return 0;
}
@@ -677,6 +657,15 @@ static int enetc_pf_set_features(struct net_device *ndev,
enetc_enable_txvlan(&priv->si->hw, 0,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (!!(features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ enetc_disable_si_vlan_promisc(pf, 0);
+ else
+ enetc_enable_si_vlan_promisc(pf, 0);
+ }
+
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
@@ -719,12 +708,11 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_LOOPBACK;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
@@ -739,6 +727,12 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
if (si->hw_features & ENETC_SI_F_QBV)
priv->active_offloads |= ENETC_F_QBV;
+ if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
+ priv->active_offloads |= ENETC_F_QCI;
+ ndev->features |= NETIF_F_HW_TC;
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 0c6bf3a55a9a..fd3df19eaa32 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -5,6 +5,9 @@
#include <net/pkt_sched.h>
#include <linux/math64.h>
+#include <linux/refcount.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gate.h>
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
{
@@ -331,3 +334,1103 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
return 0;
}
+
+enum streamid_type {
+ STREAMID_TYPE_RESERVED = 0,
+ STREAMID_TYPE_NULL,
+ STREAMID_TYPE_SMAC,
+};
+
+enum streamid_vlan_tagged {
+ STREAMID_VLAN_RESERVED = 0,
+ STREAMID_VLAN_TAGGED,
+ STREAMID_VLAN_UNTAGGED,
+ STREAMID_VLAN_ALL,
+};
+
+#define ENETC_PSFP_WILDCARD -1
+#define HANDLE_OFFSET 100
+
+enum forward_type {
+ FILTER_ACTION_TYPE_PSFP = BIT(0),
+ FILTER_ACTION_TYPE_ACL = BIT(1),
+ FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0),
+};
+
+/* This is for limit output type for input actions */
+struct actions_fwd {
+ u64 actions;
+ u64 keys; /* include the must needed keys */
+ enum forward_type output;
+};
+
+struct psfp_streamfilter_counters {
+ u64 matching_frames_count;
+ u64 passing_frames_count;
+ u64 not_passing_frames_count;
+ u64 passing_sdu_count;
+ u64 not_passing_sdu_count;
+ u64 red_frames_count;
+};
+
+struct enetc_streamid {
+ u32 index;
+ union {
+ u8 src_mac[6];
+ u8 dst_mac[6];
+ };
+ u8 filtertype;
+ u16 vid;
+ u8 tagged;
+ s32 handle;
+};
+
+struct enetc_psfp_filter {
+ u32 index;
+ s32 handle;
+ s8 prio;
+ u32 gate_id;
+ s32 meter_id;
+ refcount_t refcount;
+ struct hlist_node node;
+};
+
+struct enetc_psfp_gate {
+ u32 index;
+ s8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletimext;
+ u32 num_entries;
+ refcount_t refcount;
+ struct hlist_node node;
+ struct action_gate_entry entries[0];
+};
+
+struct enetc_stream_filter {
+ struct enetc_streamid sid;
+ u32 sfi_index;
+ u32 sgi_index;
+ struct flow_stats stats;
+ struct hlist_node node;
+};
+
+struct enetc_psfp {
+ unsigned long dev_bitmap;
+ unsigned long *psfp_sfi_bitmap;
+ struct hlist_head stream_list;
+ struct hlist_head psfp_filter_list;
+ struct hlist_head psfp_gate_list;
+ spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
+};
+
+static struct actions_fwd enetc_act_fwd[] = {
+ {
+ BIT(FLOW_ACTION_GATE),
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+ FILTER_ACTION_TYPE_PSFP
+ },
+ /* example for ACL actions */
+ {
+ BIT(FLOW_ACTION_DROP),
+ 0,
+ FILTER_ACTION_TYPE_ACL
+ }
+};
+
+static struct enetc_psfp epsfp = {
+ .psfp_sfi_bitmap = NULL,
+};
+
+static LIST_HEAD(enetc_block_cb_list);
+
+static inline int enetc_get_port(struct enetc_ndev_priv *priv)
+{
+ return priv->si->pdev->devfn & 0x7;
+}
+
+/* Stream Identity Entry Set Descriptor */
+static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_streamid *sid,
+ u8 enable)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct streamid_data *si_data;
+ struct streamid_conf *si_conf;
+ u16 data_size;
+ dma_addr_t dma;
+ int err;
+
+ if (sid->index >= priv->psfp_cap.max_streamid)
+ return -EINVAL;
+
+ if (sid->filtertype != STREAMID_TYPE_NULL &&
+ sid->filtertype != STREAMID_TYPE_SMAC)
+ return -EOPNOTSUPP;
+
+ /* Disable operation before enable */
+ cbd.index = cpu_to_le16((u16)sid->index);
+ cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
+ cbd.status_flags = 0;
+
+ data_size = sizeof(struct streamid_data);
+ si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ cbd.length = cpu_to_le16(data_size);
+
+ dma = dma_map_single(&priv->si->pdev->dev, si_data,
+ data_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ kfree(si_data);
+ return -ENOMEM;
+ }
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+ memset(si_data->dmac, 0xff, ETH_ALEN);
+ si_data->vid_vidm_tg =
+ cpu_to_le16(ENETC_CBDR_SID_VID_MASK
+ + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
+
+ si_conf = &cbd.sid_set;
+ /* Only one port supported for one entry, set itself */
+ si_conf->iports = 1 << enetc_get_port(priv);
+ si_conf->id_type = 1;
+ si_conf->oui[2] = 0x0;
+ si_conf->oui[1] = 0x80;
+ si_conf->oui[0] = 0xC2;
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ return -EINVAL;
+
+ if (!enable) {
+ kfree(si_data);
+ return 0;
+ }
+
+ /* Enable the entry overwrite again incase space flushed by hardware */
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.index = cpu_to_le16((u16)sid->index);
+ cbd.cmd = 0;
+ cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
+ cbd.status_flags = 0;
+
+ si_conf->en = 0x80;
+ si_conf->stream_handle = cpu_to_le32(sid->handle);
+ si_conf->iports = 1 << enetc_get_port(priv);
+ si_conf->id_type = sid->filtertype;
+ si_conf->oui[2] = 0x0;
+ si_conf->oui[1] = 0x80;
+ si_conf->oui[0] = 0xC2;
+
+ memset(si_data, 0, data_size);
+
+ cbd.length = cpu_to_le16(data_size);
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+
+ /* VIDM default to be 1.
+ * VID Match. If set (b1) then the VID must match, otherwise
+ * any VID is considered a match. VIDM setting is only used
+ * when TG is set to b01.
+ */
+ if (si_conf->id_type == STREAMID_TYPE_NULL) {
+ ether_addr_copy(si_data->dmac, sid->dst_mac);
+ si_data->vid_vidm_tg =
+ cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM));
+ } else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
+ ether_addr_copy(si_data->smac, sid->src_mac);
+ si_data->vid_vidm_tg =
+ cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM));
+ }
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ kfree(si_data);
+
+ return err;
+}
+
+/* Stream Filter Instance Set Descriptor */
+static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_psfp_filter *sfi,
+ u8 enable)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct sfi_conf *sfi_config;
+
+ cbd.index = cpu_to_le16(sfi->index);
+ cbd.cls = BDCR_CMD_STREAM_FILTER;
+ cbd.status_flags = 0x80;
+ cbd.length = cpu_to_le16(1);
+
+ sfi_config = &cbd.sfi_conf;
+ if (!enable)
+ goto exit;
+
+ sfi_config->en = 0x80;
+
+ if (sfi->handle >= 0) {
+ sfi_config->stream_handle =
+ cpu_to_le32(sfi->handle);
+ sfi_config->sthm |= 0x80;
+ }
+
+ sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
+ sfi_config->input_ports = 1 << enetc_get_port(priv);
+
+ /* The priority value which may be matched against the
+ * frame’s priority value to determine a match for this entry.
+ */
+ if (sfi->prio >= 0)
+ sfi_config->multi |= (sfi->prio & 0x7) | 0x8;
+
+ /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
+ * field as being either an MSDU value or an index into the Flow
+ * Meter Instance table.
+ * TODO: no limit max sdu
+ */
+
+ if (sfi->meter_id >= 0) {
+ sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
+ sfi_config->multi |= 0x80;
+ }
+
+exit:
+ return enetc_send_cmd(priv->si, &cbd);
+}
+
+static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
+ u32 index,
+ struct psfp_streamfilter_counters *cnt)
+{
+ struct enetc_cbd cbd = { .cmd = 2 };
+ struct sfi_counter_data *data_buf;
+ dma_addr_t dma;
+ u16 data_size;
+ int err;
+
+ cbd.index = cpu_to_le16((u16)index);
+ cbd.cmd = 2;
+ cbd.cls = BDCR_CMD_STREAM_FILTER;
+ cbd.status_flags = 0;
+
+ data_size = sizeof(struct sfi_counter_data);
+ data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ dma = dma_map_single(&priv->si->pdev->dev, data_buf,
+ data_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+
+ cbd.length = cpu_to_le16(data_size);
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ goto exit;
+
+ cnt->matching_frames_count =
+ ((u64)le32_to_cpu(data_buf->matchh) << 32)
+ + data_buf->matchl;
+
+ cnt->not_passing_sdu_count =
+ ((u64)le32_to_cpu(data_buf->msdu_droph) << 32)
+ + data_buf->msdu_dropl;
+
+ cnt->passing_sdu_count = cnt->matching_frames_count
+ - cnt->not_passing_sdu_count;
+
+ cnt->not_passing_frames_count =
+ ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32)
+ + le32_to_cpu(data_buf->stream_gate_dropl);
+
+ cnt->passing_frames_count = cnt->matching_frames_count
+ - cnt->not_passing_sdu_count
+ - cnt->not_passing_frames_count;
+
+ cnt->red_frames_count =
+ ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32)
+ + le32_to_cpu(data_buf->flow_meter_dropl);
+
+exit:
+ kfree(data_buf);
+ return err;
+}
+
+static u64 get_ptp_now(struct enetc_hw *hw)
+{
+ u64 now_lo, now_hi, now;
+
+ now_lo = enetc_rd(hw, ENETC_SICTR0);
+ now_hi = enetc_rd(hw, ENETC_SICTR1);
+ now = now_lo | now_hi << 32;
+
+ return now;
+}
+
+static int get_start_ns(u64 now, u64 cycle, u64 *start)
+{
+ u64 n;
+
+ if (!cycle)
+ return -EFAULT;
+
+ n = div64_u64(now, cycle);
+
+ *start = (n + 1) * cycle;
+
+ return 0;
+}
+
+/* Stream Gate Instance Set Descriptor */
+static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_psfp_gate *sgi,
+ u8 enable)
+{
+ struct enetc_cbd cbd = { .cmd = 0 };
+ struct sgi_table *sgi_config;
+ struct sgcl_conf *sgcl_config;
+ struct sgcl_data *sgcl_data;
+ struct sgce *sgce;
+ dma_addr_t dma;
+ u16 data_size;
+ int err, i;
+ u64 now;
+
+ cbd.index = cpu_to_le16(sgi->index);
+ cbd.cmd = 0;
+ cbd.cls = BDCR_CMD_STREAM_GCL;
+ cbd.status_flags = 0x80;
+
+ /* disable */
+ if (!enable)
+ return enetc_send_cmd(priv->si, &cbd);
+
+ if (!sgi->num_entries)
+ return 0;
+
+ if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist ||
+ !sgi->cycletime)
+ return -EINVAL;
+
+ /* enable */
+ sgi_config = &cbd.sgi_table;
+
+ /* Keep open before gate list start */
+ sgi_config->ocgtst = 0x80;
+
+ sgi_config->oipv = (sgi->init_ipv < 0) ?
+ 0x0 : ((sgi->init_ipv & 0x7) | 0x8);
+
+ sgi_config->en = 0x80;
+
+ /* Basic config */
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ return -EINVAL;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.index = cpu_to_le16(sgi->index);
+ cbd.cmd = 1;
+ cbd.cls = BDCR_CMD_STREAM_GCL;
+ cbd.status_flags = 0;
+
+ sgcl_config = &cbd.sgcl_conf;
+
+ sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
+
+ data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
+
+ sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!sgcl_data)
+ return -ENOMEM;
+
+ cbd.length = cpu_to_le16(data_size);
+
+ dma = dma_map_single(&priv->si->pdev->dev,
+ sgcl_data, data_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ kfree(sgcl_data);
+ return -ENOMEM;
+ }
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+
+ sgce = &sgcl_data->sgcl[0];
+
+ sgcl_config->agtst = 0x80;
+
+ sgcl_data->ct = cpu_to_le32(sgi->cycletime);
+ sgcl_data->cte = cpu_to_le32(sgi->cycletimext);
+
+ if (sgi->init_ipv >= 0)
+ sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
+
+ for (i = 0; i < sgi->num_entries; i++) {
+ struct action_gate_entry *from = &sgi->entries[i];
+ struct sgce *to = &sgce[i];
+
+ if (from->gate_state)
+ to->multi |= 0x10;
+
+ if (from->ipv >= 0)
+ to->multi |= ((from->ipv & 0x7) << 5) | 0x08;
+
+ if (from->maxoctets >= 0) {
+ to->multi |= 0x01;
+ to->msdu[0] = from->maxoctets & 0xFF;
+ to->msdu[1] = (from->maxoctets >> 8) & 0xFF;
+ to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
+ }
+
+ to->interval = cpu_to_le32(from->interval);
+ }
+
+ /* If basetime is less than now, calculate start time */
+ now = get_ptp_now(&priv->si->hw);
+
+ if (sgi->basetime < now) {
+ u64 start;
+
+ err = get_start_ns(now, sgi->cycletime, &start);
+ if (err)
+ goto exit;
+ sgcl_data->btl = cpu_to_le32(lower_32_bits(start));
+ sgcl_data->bth = cpu_to_le32(upper_32_bits(start));
+ } else {
+ u32 hi, lo;
+
+ hi = upper_32_bits(sgi->basetime);
+ lo = lower_32_bits(sgi->basetime);
+ sgcl_data->bth = cpu_to_le32(hi);
+ sgcl_data->btl = cpu_to_le32(lo);
+ }
+
+ err = enetc_send_cmd(priv->si, &cbd);
+
+exit:
+ kfree(sgcl_data);
+
+ return err;
+}
+
+static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
+{
+ struct enetc_stream_filter *f;
+
+ hlist_for_each_entry(f, &epsfp.stream_list, node)
+ if (f->sid.index == index)
+ return f;
+
+ return NULL;
+}
+
+static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index)
+{
+ struct enetc_psfp_gate *g;
+
+ hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
+ if (g->index == index)
+ return g;
+
+ return NULL;
+}
+
+static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
+{
+ struct enetc_psfp_filter *s;
+
+ hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
+ if (s->index == index)
+ return s;
+
+ return NULL;
+}
+
+static struct enetc_psfp_filter
+ *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
+{
+ struct enetc_psfp_filter *s;
+
+ hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
+ if (s->gate_id == sfi->gate_id &&
+ s->prio == sfi->prio &&
+ s->meter_id == sfi->meter_id)
+ return s;
+
+ return NULL;
+}
+
+static int enetc_get_free_index(struct enetc_ndev_priv *priv)
+{
+ u32 max_size = priv->psfp_cap.max_psfp_filter;
+ unsigned long index;
+
+ index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size);
+ if (index == max_size)
+ return -1;
+
+ return index;
+}
+
+static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+ struct enetc_psfp_filter *sfi;
+ u8 z;
+
+ sfi = enetc_get_filter_by_index(index);
+ WARN_ON(!sfi);
+ z = refcount_dec_and_test(&sfi->refcount);
+
+ if (z) {
+ enetc_streamfilter_hw_set(priv, sfi, false);
+ hlist_del(&sfi->node);
+ kfree(sfi);
+ clear_bit(index, epsfp.psfp_sfi_bitmap);
+ }
+}
+
+static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+ struct enetc_psfp_gate *sgi;
+ u8 z;
+
+ sgi = enetc_get_gate_by_index(index);
+ WARN_ON(!sgi);
+ z = refcount_dec_and_test(&sgi->refcount);
+ if (z) {
+ enetc_streamgate_hw_set(priv, sgi, false);
+ hlist_del(&sgi->node);
+ kfree(sgi);
+ }
+}
+
+static void remove_one_chain(struct enetc_ndev_priv *priv,
+ struct enetc_stream_filter *filter)
+{
+ stream_gate_unref(priv, filter->sgi_index);
+ stream_filter_unref(priv, filter->sfi_index);
+
+ hlist_del(&filter->node);
+ kfree(filter);
+}
+
+static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_streamid *sid,
+ struct enetc_psfp_filter *sfi,
+ struct enetc_psfp_gate *sgi)
+{
+ int err;
+
+ err = enetc_streamid_hw_set(priv, sid, true);
+ if (err)
+ return err;
+
+ if (sfi) {
+ err = enetc_streamfilter_hw_set(priv, sfi, true);
+ if (err)
+ goto revert_sid;
+ }
+
+ err = enetc_streamgate_hw_set(priv, sgi, true);
+ if (err)
+ goto revert_sfi;
+
+ return 0;
+
+revert_sfi:
+ if (sfi)
+ enetc_streamfilter_hw_set(priv, sfi, false);
+revert_sid:
+ enetc_streamid_hw_set(priv, sid, false);
+ return err;
+}
+
+static struct actions_fwd *enetc_check_flow_actions(u64 acts,
+ unsigned int inputkeys)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++)
+ if (acts == enetc_act_fwd[i].actions &&
+ inputkeys & enetc_act_fwd[i].keys)
+ return &enetc_act_fwd[i];
+
+ return NULL;
+}
+
+static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct enetc_stream_filter *filter, *old_filter;
+ struct enetc_psfp_filter *sfi, *old_sfi;
+ struct enetc_psfp_gate *sgi, *old_sgi;
+ struct flow_action_entry *entry;
+ struct action_gate_entry *e;
+ u8 sfi_overwrite = 0;
+ int entries_size;
+ int i, err;
+
+ if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
+ return -ENOSPC;
+ }
+
+ flow_action_for_each(i, entry, &rule->action)
+ if (entry->id == FLOW_ACTION_GATE)
+ break;
+
+ if (entry->id != FLOW_ACTION_GATE)
+ return -EINVAL;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->sid.index = f->common.chain_index;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->dst) &&
+ !is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot match on both source and destination MAC");
+ err = EINVAL;
+ goto free_filter;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (!is_broadcast_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on destination MAC not supported");
+ err = EINVAL;
+ goto free_filter;
+ }
+ ether_addr_copy(filter->sid.dst_mac, match.key->dst);
+ filter->sid.filtertype = STREAMID_TYPE_NULL;
+ }
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (!is_broadcast_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on source MAC not supported");
+ err = EINVAL;
+ goto free_filter;
+ }
+ ether_addr_copy(filter->sid.src_mac, match.key->src);
+ filter->sid.filtertype = STREAMID_TYPE_SMAC;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
+ err = EINVAL;
+ goto free_filter;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority) {
+ if (match.mask->vlan_priority !=
+ (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
+ err = -EINVAL;
+ goto free_filter;
+ }
+ }
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id");
+ err = -EINVAL;
+ goto free_filter;
+ }
+
+ filter->sid.vid = match.key->vlan_id;
+ if (!filter->sid.vid)
+ filter->sid.tagged = STREAMID_VLAN_UNTAGGED;
+ else
+ filter->sid.tagged = STREAMID_VLAN_TAGGED;
+ }
+ } else {
+ filter->sid.tagged = STREAMID_VLAN_ALL;
+ }
+
+ /* parsing gate action */
+ if (entry->gate.index >= priv->psfp_cap.max_psfp_gate) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
+ err = -ENOSPC;
+ goto free_filter;
+ }
+
+ if (entry->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
+ err = -ENOSPC;
+ goto free_filter;
+ }
+
+ entries_size = struct_size(sgi, entries, entry->gate.num_entries);
+ sgi = kzalloc(entries_size, GFP_KERNEL);
+ if (!sgi) {
+ err = -ENOMEM;
+ goto free_filter;
+ }
+
+ refcount_set(&sgi->refcount, 1);
+ sgi->index = entry->gate.index;
+ sgi->init_ipv = entry->gate.prio;
+ sgi->basetime = entry->gate.basetime;
+ sgi->cycletime = entry->gate.cycletime;
+ sgi->num_entries = entry->gate.num_entries;
+
+ e = sgi->entries;
+ for (i = 0; i < entry->gate.num_entries; i++) {
+ e[i].gate_state = entry->gate.entries[i].gate_state;
+ e[i].interval = entry->gate.entries[i].interval;
+ e[i].ipv = entry->gate.entries[i].ipv;
+ e[i].maxoctets = entry->gate.entries[i].maxoctets;
+ }
+
+ filter->sgi_index = sgi->index;
+
+ sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
+ if (!sfi) {
+ err = -ENOMEM;
+ goto free_gate;
+ }
+
+ refcount_set(&sfi->refcount, 1);
+ sfi->gate_id = sgi->index;
+
+ /* flow meter not support yet */
+ sfi->meter_id = ENETC_PSFP_WILDCARD;
+
+ /* prio ref the filter prio */
+ if (f->common.prio && f->common.prio <= BIT(3))
+ sfi->prio = f->common.prio - 1;
+ else
+ sfi->prio = ENETC_PSFP_WILDCARD;
+
+ old_sfi = enetc_psfp_check_sfi(sfi);
+ if (!old_sfi) {
+ int index;
+
+ index = enetc_get_free_index(priv);
+ if (sfi->handle < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
+ err = -ENOSPC;
+ goto free_sfi;
+ }
+
+ sfi->index = index;
+ sfi->handle = index + HANDLE_OFFSET;
+ /* Update the stream filter handle also */
+ filter->sid.handle = sfi->handle;
+ filter->sfi_index = sfi->index;
+ sfi_overwrite = 0;
+ } else {
+ filter->sfi_index = old_sfi->index;
+ filter->sid.handle = old_sfi->handle;
+ sfi_overwrite = 1;
+ }
+
+ err = enetc_psfp_hw_set(priv, &filter->sid,
+ sfi_overwrite ? NULL : sfi, sgi);
+ if (err)
+ goto free_sfi;
+
+ spin_lock(&epsfp.psfp_lock);
+ /* Remove the old node if exist and update with a new node */
+ old_sgi = enetc_get_gate_by_index(filter->sgi_index);
+ if (old_sgi) {
+ refcount_set(&sgi->refcount,
+ refcount_read(&old_sgi->refcount) + 1);
+ hlist_del(&old_sgi->node);
+ kfree(old_sgi);
+ }
+
+ hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
+
+ if (!old_sfi) {
+ hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
+ set_bit(sfi->index, epsfp.psfp_sfi_bitmap);
+ } else {
+ kfree(sfi);
+ refcount_inc(&old_sfi->refcount);
+ }
+
+ old_filter = enetc_get_stream_by_index(filter->sid.index);
+ if (old_filter)
+ remove_one_chain(priv, old_filter);
+
+ filter->stats.lastused = jiffies;
+ hlist_add_head(&filter->node, &epsfp.stream_list);
+
+ spin_unlock(&epsfp.psfp_lock);
+
+ return 0;
+
+free_sfi:
+ kfree(sfi);
+free_gate:
+ kfree(sgi);
+free_filter:
+ kfree(filter);
+
+ return err;
+}
+
+static int enetc_config_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_action *action = &rule->action;
+ struct flow_action_entry *entry;
+ struct actions_fwd *fwd;
+ u64 actions = 0;
+ int i, err;
+
+ if (!flow_action_has_entries(action)) {
+ NL_SET_ERR_MSG_MOD(extack, "At least one action is needed");
+ return -EINVAL;
+ }
+
+ flow_action_for_each(i, entry, action)
+ actions |= BIT(entry->id);
+
+ fwd = enetc_check_flow_actions(actions, dissector->used_keys);
+ if (!fwd) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!");
+ return -EOPNOTSUPP;
+ }
+
+ if (fwd->output & FILTER_ACTION_TYPE_PSFP) {
+ err = enetc_psfp_parse_clsflower(priv, cls_flower);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs");
+ return err;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported actions");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct enetc_stream_filter *filter;
+ struct netlink_ext_ack *extack = f->common.extack;
+ int err;
+
+ if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
+ return -ENOSPC;
+ }
+
+ filter = enetc_get_stream_by_index(f->common.chain_index);
+ if (!filter)
+ return -EINVAL;
+
+ err = enetc_streamid_hw_set(priv, &filter->sid, false);
+ if (err)
+ return err;
+
+ remove_one_chain(priv, filter);
+
+ return 0;
+}
+
+static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ return enetc_psfp_destroy_clsflower(priv, f);
+}
+
+static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct psfp_streamfilter_counters counters = {};
+ struct enetc_stream_filter *filter;
+ struct flow_stats stats = {};
+ int err;
+
+ filter = enetc_get_stream_by_index(f->common.chain_index);
+ if (!filter)
+ return -EINVAL;
+
+ err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters);
+ if (err)
+ return -EINVAL;
+
+ spin_lock(&epsfp.psfp_lock);
+ stats.pkts = counters.matching_frames_count - filter->stats.pkts;
+ stats.lastused = filter->stats.lastused;
+ filter->stats.pkts += stats.pkts;
+ spin_unlock(&epsfp.psfp_lock);
+
+ flow_stats_update(&f->stats, 0x0, stats.pkts, stats.lastused,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+ return 0;
+}
+
+static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return enetc_config_clsflower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return enetc_destroy_clsflower(priv, cls_flower);
+ case FLOW_CLS_STATS:
+ return enetc_psfp_get_stats(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline void clean_psfp_sfi_bitmap(void)
+{
+ bitmap_free(epsfp.psfp_sfi_bitmap);
+ epsfp.psfp_sfi_bitmap = NULL;
+}
+
+static void clean_stream_list(void)
+{
+ struct enetc_stream_filter *s;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
+ hlist_del(&s->node);
+ kfree(s);
+ }
+}
+
+static void clean_sfi_list(void)
+{
+ struct enetc_psfp_filter *sfi;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
+ hlist_del(&sfi->node);
+ kfree(sfi);
+ }
+}
+
+static void clean_sgi_list(void)
+{
+ struct enetc_psfp_gate *sgi;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
+ hlist_del(&sgi->node);
+ kfree(sgi);
+ }
+}
+
+static void clean_psfp_all(void)
+{
+ /* Disable all list nodes and free all memory */
+ clean_sfi_list();
+ clean_sgi_list();
+ clean_stream_list();
+ epsfp.dev_bitmap = 0;
+ clean_psfp_sfi_bitmap();
+}
+
+int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct net_device *ndev = cb_priv;
+
+ if (!tc_can_offload(ndev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int enetc_psfp_init(struct enetc_ndev_priv *priv)
+{
+ if (epsfp.psfp_sfi_bitmap)
+ return 0;
+
+ epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter,
+ GFP_KERNEL);
+ if (!epsfp.psfp_sfi_bitmap)
+ return -ENOMEM;
+
+ spin_lock_init(&epsfp.psfp_lock);
+
+ if (list_empty(&enetc_block_cb_list))
+ epsfp.dev_bitmap = 0;
+
+ return 0;
+}
+
+int enetc_psfp_clean(struct enetc_ndev_priv *priv)
+{
+ if (!list_empty(&enetc_block_cb_list))
+ return -EBUSY;
+
+ clean_psfp_all();
+
+ return 0;
+}
+
+int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct flow_block_offload *f = type_data;
+ int err;
+
+ err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
+ enetc_setup_tc_block_cb,
+ ndev, ndev, true);
+ if (err)
+ return err;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ set_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ if (!epsfp.dev_bitmap)
+ clean_psfp_all();
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index e74dd1f86bba..a6cdd5b61921 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -376,8 +376,7 @@ struct bufdesc_ex {
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
-#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
-#define FEC_NAPI_IMASK FEC_ENET_MII
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF)
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* ENET interrupt coalescing macro define */
@@ -543,7 +542,6 @@ struct fec_enet_private {
int link;
int full_duplex;
int speed;
- struct completion mdio_done;
int irq[FEC_IRQ_NUM];
bool bufdesc_ex;
int pause_flag;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index dc6f8763a5d4..2d0d313ee7c5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -88,8 +88,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
struct fec_devinfo {
u32 quirks;
- u8 stop_gpr_reg;
- u8 stop_gpr_bit;
};
static const struct fec_devinfo fec_imx25_info = {
@@ -112,8 +110,6 @@ static const struct fec_devinfo fec_imx6q_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
FEC_QUIRK_HAS_RACC,
- .stop_gpr_reg = 0x34,
- .stop_gpr_bit = 27,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -976,8 +972,8 @@ fec_restart(struct net_device *ndev)
writel((__force u32)cpu_to_be32(temp_mac[1]),
fep->hwp + FEC_ADDR_HIGH);
- /* Clear any outstanding interrupt. */
- writel(0xffffffff, fep->hwp + FEC_IEVENT);
+ /* Clear any outstanding interrupt, except MDIO. */
+ writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
fec_enet_bd_init(ndev);
@@ -1123,7 +1119,7 @@ fec_restart(struct net_device *ndev)
if (fep->link)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
else
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+ writel(0, fep->hwp + FEC_IMASK);
/* Init the interrupt coalescing */
fec_enet_itr_coal_init(ndev);
@@ -1652,6 +1648,10 @@ fec_enet_interrupt(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
int_events = readl(fep->hwp + FEC_IEVENT);
+
+ /* Don't clear MDIO events, we poll for those */
+ int_events &= ~FEC_ENET_MII;
+
writel(int_events, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
@@ -1659,16 +1659,12 @@ fec_enet_interrupt(int irq, void *dev_id)
ret = IRQ_HANDLED;
if (napi_schedule_prep(&fep->napi)) {
- /* Disable the NAPI interrupts */
- writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
+ /* Disable interrupts */
+ writel(0, fep->hwp + FEC_IMASK);
__napi_schedule(&fep->napi);
}
}
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- complete(&fep->mdio_done);
- }
return ret;
}
@@ -1818,11 +1814,24 @@ static void fec_enet_adjust_link(struct net_device *ndev)
phy_print_status(phy_dev);
}
+static int fec_enet_mdio_wait(struct fec_enet_private *fep)
+{
+ uint ievent;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
+ ievent & FEC_ENET_MII, 2, 30000);
+
+ if (!ret)
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+ return ret;
+}
+
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
- unsigned long time_left;
int ret = 0, frame_start, frame_addr, frame_op;
bool is_c45 = !!(regnum & MII_ADDR_C45);
@@ -1830,8 +1839,6 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ret < 0)
return ret;
- reinit_completion(&fep->mdio_done);
-
if (is_c45) {
frame_start = FEC_MMFR_ST_C45;
@@ -1843,11 +1850,9 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
netdev_err(fep->netdev, "MDIO address write timeout\n");
- ret = -ETIMEDOUT;
goto out;
}
@@ -1866,11 +1871,9 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
netdev_err(fep->netdev, "MDIO read timeout\n");
- ret = -ETIMEDOUT;
goto out;
}
@@ -1888,7 +1891,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
- unsigned long time_left;
int ret, frame_start, frame_addr;
bool is_c45 = !!(regnum & MII_ADDR_C45);
@@ -1898,8 +1900,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
else
ret = 0;
- reinit_completion(&fep->mdio_done);
-
if (is_c45) {
frame_start = FEC_MMFR_ST_C45;
@@ -1911,11 +1911,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
netdev_err(fep->netdev, "MDIO address write timeout\n");
- ret = -ETIMEDOUT;
goto out;
}
} else {
@@ -1931,12 +1929,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret)
netdev_err(fep->netdev, "MDIO write timeout\n");
- ret = -ETIMEDOUT;
- }
out:
pm_runtime_mark_last_busy(dev);
@@ -1986,8 +1981,12 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
return 0;
failed_clk_ref:
- if (fep->clk_ref)
- clk_disable_unprepare(fep->clk_ref);
+ if (fep->clk_ptp) {
+ mutex_lock(&fep->ptp_clk_mutex);
+ clk_disable_unprepare(fep->clk_ptp);
+ fep->ptp_clk_on = false;
+ mutex_unlock(&fep->ptp_clk_mutex);
+ }
failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
@@ -2065,9 +2064,11 @@ static int fec_enet_mii_init(struct platform_device *pdev)
static struct mii_bus *fec0_mii_bus;
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ bool suppress_preamble = false;
struct device_node *node;
int err = -ENXIO;
u32 mii_speed, holdtime;
+ u32 bus_freq;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2095,15 +2096,23 @@ static int fec_enet_mii_init(struct platform_device *pdev)
return -ENOENT;
}
+ bus_freq = 2500000; /* 2.5MHz by default */
+ node = of_get_child_by_name(pdev->dev.of_node, "mdio");
+ if (node) {
+ of_property_read_u32(node, "clock-frequency", &bus_freq);
+ suppress_preamble = of_property_read_bool(node,
+ "suppress-preamble");
+ }
+
/*
- * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ * Set MII speed (= clk_get_rate() / 2 * phy_speed)
*
* The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
* for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+ mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
if (fep->quirks & FEC_QUIRK_ENET_MAC)
mii_speed--;
if (mii_speed > 63) {
@@ -2130,8 +2139,24 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->phy_speed = mii_speed << 1 | holdtime << 8;
+ if (suppress_preamble)
+ fep->phy_speed |= BIT(7);
+
+ /* Clear MMFR to avoid to generate MII event by writing MSCR.
+ * MII event generation condition:
+ * - writing MSCR:
+ * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+ * mscr_reg_data_in[7:0] != 0
+ * - writing MMFR:
+ * - mscr[7:0]_not_zero
+ */
+ writel(0, fep->hwp + FEC_MII_DATA);
+
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ /* Clear any pending transaction complete indication */
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
fep->mii_bus = mdiobus_alloc();
if (fep->mii_bus == NULL) {
err = -ENOMEM;
@@ -2146,7 +2171,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
- node = of_get_child_by_name(pdev->dev.of_node, "mdio");
err = of_mdiobus_register(fep->mii_bus, node);
of_node_put(node);
if (err)
@@ -3452,19 +3476,23 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev)
}
static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
- struct fec_devinfo *dev_info,
struct device_node *np)
{
struct device_node *gpr_np;
+ u32 out_val[3];
int ret = 0;
- if (!dev_info)
- return 0;
-
- gpr_np = of_parse_phandle(np, "gpr", 0);
+ gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
if (!gpr_np)
return 0;
+ ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
+ ARRAY_SIZE(out_val));
+ if (ret) {
+ dev_dbg(&fep->pdev->dev, "no stop mode property\n");
+ return ret;
+ }
+
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
if (IS_ERR(fep->stop_gpr.gpr)) {
dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
@@ -3473,8 +3501,8 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
goto out;
}
- fep->stop_gpr.reg = dev_info->stop_gpr_reg;
- fep->stop_gpr.bit = dev_info->stop_gpr_bit;
+ fep->stop_gpr.reg = out_val[1];
+ fep->stop_gpr.bit = out_val[2];
out:
of_node_put(gpr_np);
@@ -3551,7 +3579,7 @@ fec_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
- ret = fec_enet_init_stop_mode(fep, dev_info, np);
+ ret = fec_enet_init_stop_mode(fep, np);
if (ret)
goto failed_stop_mode;
@@ -3674,7 +3702,6 @@ fec_probe(struct platform_device *pdev)
fep->irq[i] = irq;
}
- init_completion(&fep->mdio_done);
ret = fec_enet_mii_init(pdev);
if (ret)
goto failed_mii_init;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 6e5f6dd169b5..552e7554a9f8 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -42,6 +42,7 @@
#include <soc/fsl/qe/ucc.h>
#include <soc/fsl/qe/ucc_fast.h>
#include <asm/machdep.h>
+#include <net/sch_generic.h>
#include "ucc_geth.h"
@@ -1548,11 +1549,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
- /* Prevent any further xmits, plus detach the device. */
- netif_device_detach(ugeth->ndev);
-
- /* Wait for any current xmits to finish. */
- netif_tx_disable(ugeth->ndev);
+ /* Prevent any further xmits */
+ netif_tx_stop_all_queues(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
disable_irq(ugeth->ug_info->uf_info.irq);
@@ -1565,7 +1563,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
- netif_device_attach(ugeth->ndev);
+
+ /* allow to xmit again */
+ netif_tx_wake_all_queues(ugeth->ndev);
+ __netdev_watchdog_up(ugeth->ndev);
}
/* Called every time the controller might need to be made
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 3892a2062404..2fff43509098 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -64,7 +64,7 @@ config HNS_MDIO
the PHY
config HNS
- tristate "Hisilicon Network Subsystem Support (Framework)"
+ tristate
---help---
This selects the framework support for Hisilicon Network Subsystem. It
is needed by any driver which provides HNS acceleration engine or make
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8aace2de0cc9..9a907947ba19 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -697,9 +697,9 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
return rc;
if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII)))
- is_c45 = 1;
+ is_c45 = true;
else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII)))
- is_c45 = 0;
+ is_c45 = false;
else
return -ENODATA;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 948e67ef30fd..1ffe8fac702d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -24,7 +24,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
- HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate response to VF */
HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
@@ -45,6 +45,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
+ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
@@ -70,6 +71,10 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
};
+enum hclge_mbx_tbl_cfg_subcode {
+ HCLGE_MBX_VPORT_LIST_CLEAR,
+};
+
#define HCLGE_MBX_MAX_MSG_SIZE 14
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 5587605d6deb..d041cac9a487 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -145,7 +145,6 @@ enum hnae3_reset_notify_type {
HNAE3_DOWN_CLIENT,
HNAE3_INIT_CLIENT,
HNAE3_UNINIT_CLIENT,
- HNAE3_RESTORE_CLIENT,
};
enum hnae3_hw_error_type {
@@ -233,7 +232,6 @@ struct hnae3_ae_dev {
struct list_head node;
u32 flag;
unsigned long hw_err_reset_req;
- enum hnae3_reset_type reset_type;
void *priv;
};
@@ -270,6 +268,8 @@ struct hnae3_ae_dev {
* Set loopback
* set_promisc_mode
* Set promisc mode
+ * request_update_promisc_mode
+ * request to hclge(vf) to update promisc mode
* set_mtu()
* set mtu
* get_pauseparam()
@@ -354,8 +354,6 @@ struct hnae3_ae_dev {
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
- * restore_vlan_table()
- * Restore vlan filter entries after reset
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
* set_gro_en
@@ -375,6 +373,8 @@ struct hnae3_ae_dev {
* Set the max tx rate of specified vf.
* set_vf_mac
* Configure the default MAC for specified VF
+ * get_module_eeprom
+ * Get the optical module eeprom info.
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -408,6 +408,7 @@ struct hnae3_ae_ops {
int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc);
+ void (*request_update_promisc_mode)(struct hnae3_handle *handle);
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
void (*get_pauseparam)(struct hnae3_handle *handle,
@@ -525,7 +526,6 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd);
int (*get_fd_all_rules)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd, u32 *rule_locs);
- int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
@@ -539,7 +539,6 @@ struct hnae3_ae_ops {
void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
- void (*restore_vlan_table)(struct hnae3_handle *handle);
int (*get_vf_config)(struct hnae3_handle *handle, int vf,
struct ifla_vf_info *ivf);
int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
@@ -550,6 +549,9 @@ struct hnae3_ae_ops {
int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
int min_tx_rate, int max_tx_rate, bool force);
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
+ int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data);
+ bool (*get_cmdq_stat)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
@@ -619,16 +621,6 @@ struct hnae3_roce_private_info {
unsigned long state;
};
-struct hnae3_unic_private_info {
- struct net_device *netdev;
- u16 rx_buf_len;
- u16 num_tx_desc;
- u16 num_rx_desc;
-
- u16 num_tqps; /* total number of tqps in this handle */
- struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
-};
-
#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0)
#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
@@ -654,7 +646,6 @@ struct hnae3_handle {
union {
struct net_device *netdev; /* first member */
struct hnae3_knic_private_info kinfo;
- struct hnae3_unic_private_info uinfo;
struct hnae3_roce_private_info rinfo;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index e1d88095a77e..fe7fb565da19 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -262,6 +262,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump mac tnl status\n");
dev_info(&h->pdev->dev, "dump loopback\n");
dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
+ dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
+ dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
@@ -270,7 +272,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
" [igu egu <port_id>] [rpu <tc_queue_num>]",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
strncat(printf_buf + strlen(printf_buf),
- " [rtc] [ppp] [rcb] [tqp <queue_num>]]\n",
+ " [rtc] [ppp] [rcb] [tqp <queue_num>] [mac]]\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index da98fd7c8eca..b14f2abc2425 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,7 +15,6 @@
#include <linux/aer.h>
#include <linux/skbuff.h>
#include <linux/sctp.h>
-#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
@@ -41,10 +40,8 @@
} while (0)
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
-static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3";
-const char hns3_driver_version[] = VERMAGIC_STRING;
static const char hns3_driver_string[] =
"Hisilicon Ethernet Network Driver for Hip08 Family";
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
@@ -550,6 +547,13 @@ static int hns3_nic_uc_unsync(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ /* need ignore the request of removing device address, because
+ * we store the device address and other addresses of uc list
+ * in the function's mac filter list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
if (h->ae_algo->ops->rm_uc_addr)
return h->ae_algo->ops->rm_uc_addr(h, addr);
@@ -597,34 +601,25 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
u8 new_flags;
- int ret;
new_flags = hns3_get_netdev_flags(netdev);
- ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
- if (ret) {
- netdev_err(netdev, "sync uc address fail\n");
- if (ret == -ENOSPC)
- new_flags |= HNAE3_OVERFLOW_UPE;
- }
-
- if (netdev->flags & IFF_MULTICAST) {
- ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
- hns3_nic_mc_unsync);
- if (ret) {
- netdev_err(netdev, "sync mc address fail\n");
- if (ret == -ENOSPC)
- new_flags |= HNAE3_OVERFLOW_MPE;
- }
- }
+ __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
+ __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
/* User mode Promisc mode enable and vlan filtering is disabled to
- * let all packets in. MAC-VLAN Table overflow Promisc enabled and
- * vlan fitering is enabled
+ * let all packets in.
*/
- hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
h->netdev_flags = new_flags;
- hns3_update_promisc_mode(netdev, new_flags);
+ hns3_request_update_promisc_mode(h);
+}
+
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (ops->request_update_promisc_mode)
+ ops->request_update_promisc_mode(handle);
}
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
@@ -1450,9 +1445,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
bd_num += ret;
- if (!skb_has_frag_list(skb))
- goto out;
-
skb_walk_frags(skb, frag_skb) {
ret = hns3_fill_skb_to_desc(ring, frag_skb,
DESC_TYPE_FRAGLIST_SKB);
@@ -1461,7 +1453,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
bd_num += ret;
}
-out:
+
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
@@ -1552,12 +1544,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
- if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- h->ae_algo->ops->enable_vlan_filter) {
- enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
- h->ae_algo->ops->enable_vlan_filter(h, enable);
- }
-
if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
h->ae_algo->ops->enable_hw_strip_rxvtag) {
enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
@@ -2107,7 +2093,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
- ae_dev->reset_type = HNAE3_NONE_RESET;
hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
@@ -3909,9 +3894,11 @@ static int hns3_init_mac_addr(struct net_device *netdev)
eth_hw_addr_random(netdev);
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
- } else {
+ } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
ether_addr_copy(netdev->dev_addr, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
+ } else {
+ return 0;
}
if (h->ae_algo->ops->set_mac_addr)
@@ -3939,17 +3926,6 @@ static void hns3_uninit_phy(struct net_device *netdev)
h->ae_algo->ops->mac_disconnect_phy(h);
}
-static int hns3_restore_fd_rules(struct net_device *netdev)
-{
- struct hnae3_handle *h = hns3_get_handle(netdev);
- int ret = 0;
-
- if (h->ae_algo->ops->restore_fd_rules)
- ret = h->ae_algo->ops->restore_fd_rules(h);
-
- return ret;
-}
-
static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -4121,8 +4097,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_remove_hw_addr(netdev);
-
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
@@ -4193,56 +4167,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
return hns3_nic_set_real_num_queue(ndev);
}
-static int hns3_recover_hw_addr(struct net_device *ndev)
-{
- struct netdev_hw_addr_list *list;
- struct netdev_hw_addr *ha, *tmp;
- int ret = 0;
-
- netif_addr_lock_bh(ndev);
- /* go through and sync uc_addr entries to the device */
- list = &ndev->uc;
- list_for_each_entry_safe(ha, tmp, &list->list, list) {
- ret = hns3_nic_uc_sync(ndev, ha->addr);
- if (ret)
- goto out;
- }
-
- /* go through and sync mc_addr entries to the device */
- list = &ndev->mc;
- list_for_each_entry_safe(ha, tmp, &list->list, list) {
- ret = hns3_nic_mc_sync(ndev, ha->addr);
- if (ret)
- goto out;
- }
-
-out:
- netif_addr_unlock_bh(ndev);
- return ret;
-}
-
-static void hns3_remove_hw_addr(struct net_device *netdev)
-{
- struct netdev_hw_addr_list *list;
- struct netdev_hw_addr *ha, *tmp;
-
- hns3_nic_uc_unsync(netdev, netdev->dev_addr);
-
- netif_addr_lock_bh(netdev);
- /* go through and unsync uc_addr entries to the device */
- list = &netdev->uc;
- list_for_each_entry_safe(ha, tmp, &list->list, list)
- hns3_nic_uc_unsync(netdev, ha->addr);
-
- /* go through and unsync mc_addr entries to the device */
- list = &netdev->mc;
- list_for_each_entry_safe(ha, tmp, &list->list, list)
- if (ha->refcount > 1)
- hns3_nic_mc_unsync(netdev, ha->addr);
-
- netif_addr_unlock_bh(netdev);
-}
-
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
@@ -4401,7 +4325,6 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev);
@@ -4409,15 +4332,6 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0;
- /* it is cumbersome for hardware to pick-and-choose entries for deletion
- * from table space. Hence, for function reset software intervention is
- * required to delete the entries
- */
- if (hns3_dev_ongoing_func_reset(ae_dev)) {
- hns3_remove_hw_addr(ndev);
- hns3_del_all_fd_rules(ndev, false);
- }
-
if (!netif_running(ndev))
return 0;
@@ -4484,6 +4398,9 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
goto err_init_irq_fail;
}
+ if (!hns3_is_phys_func(handle->pdev))
+ hns3_init_mac_addr(netdev);
+
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
@@ -4509,33 +4426,6 @@ err_put_ring:
return ret;
}
-static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
-{
- struct net_device *netdev = handle->kinfo.netdev;
- bool vlan_filter_enable;
- int ret;
-
- ret = hns3_init_mac_addr(netdev);
- if (ret)
- return ret;
-
- ret = hns3_recover_hw_addr(netdev);
- if (ret)
- return ret;
-
- ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
- if (ret)
- return ret;
-
- vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
- hns3_enable_vlan_filter(netdev, vlan_filter_enable);
-
- if (handle->ae_algo->ops->restore_vlan_table)
- handle->ae_algo->ops->restore_vlan_table(handle);
-
- return hns3_restore_fd_rules(netdev);
-}
-
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
@@ -4585,9 +4475,6 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
case HNAE3_UNINIT_CLIENT:
ret = hns3_reset_notify_uninit_enet(handle);
break;
- case HNAE3_RESTORE_CLIENT:
- ret = hns3_reset_notify_restore_enet(handle);
- break;
default:
break;
}
@@ -4765,4 +4652,3 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("pci:hns-nic");
-MODULE_VERSION(HNS3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index abefd7a179f7..66cd4395f781 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -8,10 +8,6 @@
#include "hnae3.h"
-#define HNS3_MOD_VERSION "1.0"
-
-extern const char hns3_driver_version[];
-
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
@@ -50,23 +46,6 @@ enum hns3_nic_state {
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
#define HNS3_RING_EN_REG 0x00090
-#define HNS3_RING_T0_BE_RST 0x00094
-#define HNS3_RING_COULD_BE_RST 0x00098
-#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
-
-#define HNS3_RING_INTMSK_RXWL_REG 0x000A0
-#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4
-#define HNS3_RX_RING_INT_STS_REG 0x000A8
-#define HNS3_RING_INTMSK_TXWL_REG 0x000AC
-#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0
-#define HNS3_TX_RING_INT_STS_REG 0x000B4
-#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8
-#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC
-#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4
-#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8
-
-#define HNS3_RING_MB_CTRL_REG 0x00100
-#define HNS3_RING_MB_DATA_BASE_REG 0x00200
#define HNS3_TX_REG_OFFSET 0x40
@@ -490,21 +469,8 @@ struct hns3_enet_tqp_vector {
unsigned long last_jiffies;
} ____cacheline_internodealigned_in_smp;
-enum hns3_udp_tnl_type {
- HNS3_UDP_TNL_VXLAN,
- HNS3_UDP_TNL_GENEVE,
- HNS3_UDP_TNL_MAX,
-};
-
-struct hns3_udp_tunnel {
- u16 dst_port;
- int used;
-};
-
struct hns3_nic_priv {
struct hnae3_handle *ae_handle;
- u32 enet_ver;
- u32 port_id;
struct net_device *netdev;
struct device *dev;
@@ -516,19 +482,10 @@ struct hns3_nic_priv {
struct hns3_enet_tqp_vector *tqp_vector;
u16 vector_num;
- /* The most recently read link state */
- int link;
u64 tx_timeout_count;
unsigned long state;
- struct timer_list service_timer;
-
- struct work_struct service_task;
-
- struct notifier_block notifier_block;
- /* Vxlan/Geneve information */
- struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
struct hns3_enet_coalesce tx_coal;
struct hns3_enet_coalesce rx_coal;
};
@@ -580,15 +537,6 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
writel(value, reg_addr + reg);
}
-static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
-{
- return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
- ae_dev->reset_type == HNAE3_FLR_RESET ||
- ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
- ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
- ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
-}
-
#define hns3_read_dev(a, reg) \
hns3_read_reg((a)->io_base, (reg))
@@ -662,6 +610,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 28b81f24afa1..6b1545f982aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
#include <linux/phy.h>
+#include <linux/sfp.h>
#include "hns3_enet.h"
@@ -12,6 +13,11 @@ struct hns3_stats {
int stats_offset;
};
+struct hns3_sfp_type {
+ u8 type;
+ u8 ext_type;
+};
+
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \
@@ -99,7 +105,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
h->ae_algo->ops->set_promisc_mode(h, true, true);
} else {
/* recover promisc mode before loopback test */
- hns3_update_promisc_mode(ndev, h->netdev_flags);
+ hns3_request_update_promisc_mode(h);
vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(ndev, vlan_filter_enable);
}
@@ -546,10 +552,6 @@ static void hns3_get_drvinfo(struct net_device *netdev,
return;
}
- strncpy(drvinfo->version, hns3_driver_version,
- sizeof(drvinfo->version));
- drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
-
strncpy(drvinfo->driver, h->pdev->driver->name,
sizeof(drvinfo->driver));
drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
@@ -771,8 +773,13 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
/* Only support ksettings_set for netdev with phy attached for now */
- if (netdev->phydev)
+ if (netdev->phydev) {
+ if (cmd->base.speed == SPEED_1000 &&
+ cmd->base.autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+ }
if (handle->pdev->revision == 0x20)
return -EOPNOTSUPP;
@@ -1390,6 +1397,73 @@ static int hns3_set_fecparam(struct net_device *netdev,
return ops->set_fec(handle, fec_mode);
}
+static int hns3_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+#define HNS3_SFF_8636_V1_3 0x03
+
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hns3_sfp_type sfp_type;
+ int ret;
+
+ if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ memset(&sfp_type, 0, sizeof(sfp_type));
+ ret = ops->get_module_eeprom(handle, 0, sizeof(sfp_type) / sizeof(u8),
+ (u8 *)&sfp_type);
+ if (ret)
+ return ret;
+
+ switch (sfp_type.type) {
+ case SFF8024_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case SFF8024_ID_QSFP_8438:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+ case SFF8024_ID_QSFP_8436_8636:
+ if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ }
+ break;
+ case SFF8024_ID_QSFP28_8636:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ break;
+ default:
+ netdev_err(netdev, "Optical module unknown: %#x\n",
+ sfp_type.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns3_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ return ops->get_module_eeprom(handle, ee->offset, ee->len, data);
+}
+
#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \
@@ -1453,6 +1527,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_fecparam = hns3_get_fecparam,
.set_fecparam = hns3_set_fecparam,
+ .get_module_info = hns3_get_module_info,
+ .get_module_eeprom = hns3_get_module_eeprom,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 0fb61d440d3b..6c28c8f6292c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 7f509eff562e..1d6c328bd9fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -11,8 +11,6 @@
#include "hnae3.h"
#include "hclge_main.h"
-#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
-
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclge_ring_space(struct hclge_cmq_ring *ring)
@@ -426,6 +424,9 @@ int hclge_cmd_init(struct hclge_dev *hdev)
* reset may happen when lower level reset is being processed.
*/
if ((hclge_is_reset_pending(hdev))) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init cmd since reset %#lx pending\n",
+ hdev->reset_pending);
ret = -EBUSY;
goto err_cmd_init;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 96498d9b4754..463f29151ef0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -184,11 +184,11 @@ enum hclge_opcode_type {
/* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
- HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
- HCLGE_OPC_QUERY_RX_STATUS = 0x0B13,
+ HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
@@ -270,6 +270,8 @@ enum hclge_opcode_type {
HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
/* SFP command */
+ HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
+ HCLGE_OPC_GET_SFP_EXIST = 0x7101,
HCLGE_OPC_GET_SFP_INFO = 0x7104,
/* Error INT commands */
@@ -733,31 +735,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-struct hclge_mac_vlan_add_cmd {
- __le16 flags;
- __le16 mac_addr_hi16;
- __le32 mac_addr_lo32;
- __le32 mac_addr_msk_hi32;
- __le16 mac_addr_msk_lo16;
- __le16 vlan_tag;
- __le16 ingress_port;
- __le16 egress_port;
- u8 rsv[4];
-};
-
-#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0
-struct hclge_mac_vlan_remove_cmd {
- __le16 flags;
- __le16 mac_addr_hi16;
- __le32 mac_addr_lo32;
- __le32 mac_addr_msk_hi32;
- __le16 mac_addr_msk_lo16;
- __le16 vlan_tag;
- __le16 ingress_port;
- __le16 egress_port;
- u8 rsv[4];
-};
-
struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type;
u8 vlan_fe;
@@ -907,8 +884,8 @@ struct hclge_cfg_tso_status_cmd {
#define HCLGE_GRO_EN_B 0
struct hclge_cfg_gro_status_cmd {
- __le16 gro_en;
- u8 rsv[22];
+ u8 gro_en;
+ u8 rsv[23];
};
#define HCLGE_TSO_MSS_MIN 256
@@ -1079,6 +1056,19 @@ struct hclge_firmware_compat_cmd {
u8 rsv[20];
};
+#define HCLGE_SFP_INFO_CMD_NUM 6
+#define HCLGE_SFP_INFO_BD0_LEN 20
+#define HCLGE_SFP_INFO_BDX_LEN 24
+#define HCLGE_SFP_INFO_MAX_LEN \
+ (HCLGE_SFP_INFO_BD0_LEN + \
+ (HCLGE_SFP_INFO_CMD_NUM - 1) * HCLGE_SFP_INFO_BDX_LEN)
+
+struct hclge_sfp_info_bd0_cmd {
+ __le16 offset;
+ __le16 read_len;
+ u8 data[HCLGE_SFP_INFO_BD0_LEN];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 17228288d4df..26f6f068b01d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -143,7 +143,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return;
}
- buf_len = sizeof(struct hclge_desc) * bd_num;
+ buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
if (!desc_src)
return;
@@ -173,6 +173,114 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
kfree(desc_src);
}
+static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
+{
+ struct hclge_config_mac_mode_cmd *req;
+ struct hclge_desc desc;
+ u32 loop_en;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac enable status, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_mac_mode_cmd *)desc.data;
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+
+ dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
+ dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
+ dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
+ dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
+ dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
+ dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
+ dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
+ dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
+}
+
+static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
+{
+ struct hclge_config_max_frm_size_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac frame size, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "max_frame_size: %u\n",
+ le16_to_cpu(req->max_frm_size));
+ dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size);
+}
+
+static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
+{
+#define HCLGE_MAC_SPEED_SHIFT 0
+#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
+#define HCLGE_MAC_DUPLEX_SHIFT 7
+
+ struct hclge_config_mac_speed_dup_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac speed duplex, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ dev_info(&hdev->pdev->dev, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
+}
+
+static void hclge_dbg_dump_mac(struct hclge_dev *hdev)
+{
+ hclge_dbg_dump_mac_enable_status(hdev);
+
+ hclge_dbg_dump_mac_frame_size(hdev);
+
+ hclge_dbg_dump_mac_speed_duplex(hdev);
+}
+
static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{
struct device *dev = &hdev->pdev->dev;
@@ -304,6 +412,11 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
}
}
+ if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) {
+ hclge_dbg_dump_mac(hdev);
+ has_dump = true;
+ }
+
if (strncmp(cmd_buf, "dcb", 3) == 0) {
hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
has_dump = true;
@@ -578,7 +691,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
enum hclge_opcode_type cmd;
struct hclge_desc desc;
int queue_id, group_id;
- u32 qset_maping[32];
+ u32 qset_mapping[32];
int tc_id, qset_id;
int pri_id, ret;
u32 i;
@@ -633,7 +746,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
if (ret)
goto err_tm_map_cmd_send;
- qset_maping[group_id] =
+ qset_mapping[group_id] =
le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
}
@@ -643,11 +756,11 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
for (group_id = 0; group_id < 4; group_id++) {
dev_info(&hdev->pdev->dev,
"%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_maping[(u32)(i + 7)],
- qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
- qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
- qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
- qset_maping[i]);
+ group_id * 256, qset_mapping[(u32)(i + 7)],
+ qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)],
+ qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)],
+ qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)],
+ qset_mapping[i]);
i += 8;
}
@@ -1145,6 +1258,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
+#define HCLGE_NCL_CONFIG_PARAM_NUM 2
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
@@ -1154,13 +1268,17 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
int ret;
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
- if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
- length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
- dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
+ if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Too few parameters, num = %d.\n", ret);
return;
}
- if (offset < 0 || length <= 0) {
- dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
+
+ if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
+ length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
+ dev_err(&hdev->pdev->dev,
+ "Invalid input, offset = %d, length = %d.\n",
+ offset, length);
return;
}
@@ -1328,6 +1446,49 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
hclge_dbg_dump_qs_shaper_single(hdev, qsid);
}
+static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf,
+ bool is_unicast)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_vport *vport;
+ struct list_head *list;
+ u32 func_id;
+ int ret;
+
+ ret = kstrtouint(cmd_buf, 0, &func_id);
+ if (ret < 0) {
+ dev_err(&hdev->pdev->dev,
+ "dump mac list: bad command string, ret = %d\n", ret);
+ return -EINVAL;
+ }
+
+ if (func_id >= hdev->num_alloc_vport) {
+ dev_err(&hdev->pdev->dev,
+ "function id(%u) is out of range(0-%u)\n", func_id,
+ hdev->num_alloc_vport - 1);
+ return -EINVAL;
+ }
+
+ vport = &hdev->vport[func_id];
+
+ list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
+
+ dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n",
+ func_id, is_unicast ? "uc" : "mc");
+ dev_info(&hdev->pdev->dev, "mac address state\n");
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ dev_info(&hdev->pdev->dev, "%pM %d\n",
+ mac_node->mac_addr, mac_node->state);
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
+}
+
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
#define DUMP_REG "dump reg"
@@ -1372,6 +1533,14 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
hclge_dbg_dump_qs_shaper(hdev,
&cmd_buf[sizeof("dump qs shaper")]);
+ } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) {
+ hclge_dbg_dump_mac_list(hdev,
+ &cmd_buf[sizeof("dump uc mac list")],
+ true);
+ } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) {
+ hclge_dbg_dump_mac_list(hdev,
+ &cmd_buf[sizeof("dump mc mac list")],
+ false);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 876fd81ad2f1..608fe26fc3fe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -16,7 +16,6 @@
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
-#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800
#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a758f9ae32be..96bfad52630d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -62,14 +62,16 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
-static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
- u16 *allocated_size, bool is_alloc);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr);
static int hclge_set_default_loopback(struct hclge_dev *hdev);
+static void hclge_sync_mac_table(struct hclge_dev *hdev);
+static void hclge_restore_hw_table(struct hclge_dev *hdev);
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
+
static struct hnae3_ae_algo ae_algo;
static struct workqueue_struct *hclge_wq;
@@ -550,7 +552,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -570,7 +572,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_TX_STATUS,
+ HCLGE_OPC_QUERY_TX_STATS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -1361,10 +1363,8 @@ static int hclge_configure(struct hclge_dev *hdev)
int ret;
ret = hclge_get_cfg(hdev, &cfg);
- if (ret) {
- dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
+ if (ret)
return ret;
- }
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
@@ -1387,7 +1387,8 @@ static int hclge_configure(struct hclge_dev *hdev)
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
- dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
+ dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
+ cfg.default_speed, ret);
return ret;
}
@@ -1429,26 +1430,17 @@ static int hclge_configure(struct hclge_dev *hdev)
return ret;
}
-static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
- unsigned int tso_mss_max)
+static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
+ u16 tso_mss_max)
{
struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
- u16 tso_mss;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
req = (struct hclge_cfg_tso_status_cmd *)desc.data;
-
- tso_mss = 0;
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_min);
- req->tso_mss_min = cpu_to_le16(tso_mss);
-
- tso_mss = 0;
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_max);
- req->tso_mss_max = cpu_to_le16(tso_mss);
+ req->tso_mss_min = cpu_to_le16(tso_mss_min);
+ req->tso_mss_max = cpu_to_le16(tso_mss_max);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -1465,7 +1457,7 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
req = (struct hclge_cfg_gro_status_cmd *)desc.data;
- req->gro_en = cpu_to_le16(en ? 1 : 0);
+ req->gro_en = en ? 1 : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -1687,6 +1679,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
+ spin_lock_init(&vport->mac_list_lock);
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -2965,13 +2958,11 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
- u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
+ u32 cmdq_src_reg, msix_src_reg;
/* fetch the events from their corresponding regs */
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
- msix_src_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
+ msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
/* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event in this go and will
@@ -2981,7 +2972,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*
* check for vector0 reset event sources
*/
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
@@ -2990,7 +2981,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
@@ -3480,7 +3471,7 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
+ HCLGE_MISC_VECTOR_INT_STS);
/* we will intentionally ignore any errors from this function
* as we will end up in *some* reset request in any case
*/
@@ -3729,22 +3720,13 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
}
static int hclge_reset_prepare(struct hclge_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
- /* Initialize ae_dev reset status as well, in case enet layer wants to
- * know if device is undergoing reset
- */
- ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.reset_cnt++;
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
@@ -3780,11 +3762,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hclge_clear_reset_cause(hdev);
- ret = hclge_reset_prepare_up(hdev);
- if (ret)
- return ret;
-
-
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
* times
@@ -3793,6 +3770,10 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
return ret;
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ return ret;
+
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
@@ -3806,7 +3787,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hdev->last_reset_time = jiffies;
hdev->rst_stats.reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
- ae_dev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
/* if default_reset_request has a higher level reset request,
@@ -3973,6 +3953,8 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
* updated when it is triggered by mbx.
*/
hclge_update_link_status(hdev);
+ hclge_sync_mac_table(hdev);
+ hclge_sync_promisc_mode(hdev);
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
delta = jiffies - hdev->last_serv_processed;
@@ -4722,7 +4704,8 @@ static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
- "Set promisc mode fail, status is %d.\n", ret);
+ "failed to set vport %d promisc mode, ret = %d.\n",
+ param->vf_id, ret);
return ret;
}
@@ -4772,6 +4755,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
en_bc_pmc);
}
+static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+}
+
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
{
struct hclge_get_fd_mode_cmd *req;
@@ -4822,7 +4813,8 @@ static int hclge_get_fd_allocation(struct hclge_dev *hdev,
return ret;
}
-static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
+static int hclge_set_fd_key_config(struct hclge_dev *hdev,
+ enum HCLGE_FD_STAGE stage_num)
{
struct hclge_set_fd_key_config_cmd *req;
struct hclge_fd_key_cfg *stage;
@@ -4876,9 +4868,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
return -EOPNOTSUPP;
}
- hdev->fd_cfg.proto_support =
- TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
- UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
@@ -4892,11 +4881,9 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* If use max 400bit key, we can support tuples for ether type */
- if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
- hdev->fd_cfg.proto_support |= ETHER_FLOW;
+ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
key_cfg->tuple_active |=
BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
- }
/* roce_type is used to filter roce frames
* dst_vport is used to specify the rule
@@ -5006,8 +4993,6 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
return true;
switch (tuple_bit) {
- case 0:
- return false;
case BIT(INNER_DST_MAC):
for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
@@ -5165,9 +5150,10 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
u8 *cur_key_x, *cur_key_y;
- unsigned int i;
- int ret, tuple_size;
u8 meta_data_region;
+ u8 tuple_size;
+ int ret;
+ u32 i;
memset(key_x, 0, sizeof(key_x));
memset(key_y, 0, sizeof(key_y));
@@ -5244,172 +5230,255 @@ static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
}
-static int hclge_fd_check_spec(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs, u32 *unused)
+static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
+ u32 *unused_tuple)
{
- struct ethtool_tcpip4_spec *tcp_ip4_spec;
- struct ethtool_usrip4_spec *usr_ip4_spec;
- struct ethtool_tcpip6_spec *tcp_ip6_spec;
- struct ethtool_usrip6_spec *usr_ip6_spec;
- struct ethhdr *ether_spec;
-
- if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ if (!spec || !unused_tuple)
return -EINVAL;
- if (!(fs->flow_type & hdev->fd_cfg.proto_support))
- return -EOPNOTSUPP;
-
- if ((fs->flow_type & FLOW_EXT) &&
- (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
- dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
- return -EOPNOTSUPP;
- }
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
- switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
- case SCTP_V4_FLOW:
- case TCP_V4_FLOW:
- case UDP_V4_FLOW:
- tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (!tcp_ip4_spec->ip4src)
- *unused |= BIT(INNER_SRC_IP);
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (!tcp_ip4_spec->ip4dst)
- *unused |= BIT(INNER_DST_IP);
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
- if (!tcp_ip4_spec->psrc)
- *unused |= BIT(INNER_SRC_PORT);
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
- if (!tcp_ip4_spec->pdst)
- *unused |= BIT(INNER_DST_PORT);
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- if (!tcp_ip4_spec->tos)
- *unused |= BIT(INNER_IP_TOS);
+ return 0;
+}
- break;
- case IP_USER_FLOW:
- usr_ip4_spec = &fs->h_u.usr_ip4_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (!usr_ip4_spec->ip4src)
- *unused |= BIT(INNER_SRC_IP);
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
- if (!usr_ip4_spec->ip4dst)
- *unused |= BIT(INNER_DST_IP);
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (!usr_ip4_spec->tos)
- *unused |= BIT(INNER_IP_TOS);
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (!usr_ip4_spec->proto)
- *unused |= BIT(INNER_IP_PROTO);
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- if (usr_ip4_spec->l4_4_bytes)
- return -EOPNOTSUPP;
+ if (!spec->proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
- if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
- return -EOPNOTSUPP;
+ if (spec->l4_4_bytes)
+ return -EOPNOTSUPP;
- break;
- case SCTP_V6_FLOW:
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
- tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS);
+ if (spec->ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
- /* check whether src/dst ip address used */
- if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
- !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
- *unused |= BIT(INNER_SRC_IP);
+ return 0;
+}
- if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
- !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
- *unused |= BIT(INNER_DST_IP);
+static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (!tcp_ip6_spec->psrc)
- *unused |= BIT(INNER_SRC_PORT);
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS);
- if (!tcp_ip6_spec->pdst)
- *unused |= BIT(INNER_DST_PORT);
+ /* check whether src/dst ip address used */
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
+ !spec->ip6src[2] && !spec->ip6src[3])
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (tcp_ip6_spec->tclass)
- return -EOPNOTSUPP;
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
+ !spec->ip6dst[2] && !spec->ip6dst[3])
+ *unused_tuple |= BIT(INNER_DST_IP);
- break;
- case IPV6_USER_FLOW:
- usr_ip6_spec = &fs->h_u.usr_ip6_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
- BIT(INNER_DST_PORT);
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
- /* check whether src/dst ip address used */
- if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
- !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
- *unused |= BIT(INNER_SRC_IP);
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
- if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
- !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
- *unused |= BIT(INNER_DST_IP);
+ if (spec->tclass)
+ return -EOPNOTSUPP;
- if (!usr_ip6_spec->l4_proto)
- *unused |= BIT(INNER_IP_PROTO);
+ return 0;
+}
- if (usr_ip6_spec->tclass)
- return -EOPNOTSUPP;
+static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (usr_ip6_spec->l4_4_bytes)
- return -EOPNOTSUPP;
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
- break;
- case ETHER_FLOW:
- ether_spec = &fs->h_u.ether_spec;
- *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
- BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
- BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+ /* check whether src/dst ip address used */
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
+ !spec->ip6src[2] && !spec->ip6src[3])
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (is_zero_ether_addr(ether_spec->h_source))
- *unused |= BIT(INNER_SRC_MAC);
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
+ !spec->ip6dst[2] && !spec->ip6dst[3])
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (is_zero_ether_addr(ether_spec->h_dest))
- *unused |= BIT(INNER_DST_MAC);
+ if (!spec->l4_proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
- if (!ether_spec->h_proto)
- *unused |= BIT(INNER_ETH_TYPE);
+ if (spec->tclass)
+ return -EOPNOTSUPP;
- break;
- default:
+ if (spec->l4_4_bytes)
return -EOPNOTSUPP;
- }
- if ((fs->flow_type & FLOW_EXT)) {
- if (fs->h_ext.vlan_etype)
+ return 0;
+}
+
+static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+ if (is_zero_ether_addr(spec->h_source))
+ *unused_tuple |= BIT(INNER_SRC_MAC);
+
+ if (is_zero_ether_addr(spec->h_dest))
+ *unused_tuple |= BIT(INNER_DST_MAC);
+
+ if (!spec->h_proto)
+ *unused_tuple |= BIT(INNER_ETH_TYPE);
+
+ return 0;
+}
+
+static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ if (fs->h_ext.vlan_etype) {
+ dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
return -EOPNOTSUPP;
+ }
+
if (!fs->h_ext.vlan_tci)
- *unused |= BIT(INNER_VLAN_TAG_FST);
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
- if (fs->m_ext.vlan_tci) {
- if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
- return -EINVAL;
+ if (fs->m_ext.vlan_tci &&
+ be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
+ ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
+ return -EINVAL;
}
} else {
- *unused |= BIT(INNER_VLAN_TAG_FST);
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
}
if (fs->flow_type & FLOW_MAC_EXT) {
- if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "FLOW_MAC_EXT is not supported in current fd mode!\n");
return -EOPNOTSUPP;
+ }
if (is_zero_ether_addr(fs->h_ext.h_dest))
- *unused |= BIT(INNER_DST_MAC);
+ *unused_tuple |= BIT(INNER_DST_MAC);
else
- *unused &= ~(BIT(INNER_DST_MAC));
+ *unused_tuple &= ~BIT(INNER_DST_MAC);
}
return 0;
}
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ u32 flow_type;
+ int ret;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config fd rules, invalid rule location: %u, max is %u\n.",
+ fs->location,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
+ return -EINVAL;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
+ unused_tuple);
+ break;
+ case IP_USER_FLOW:
+ ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
+ unused_tuple);
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
+ unused_tuple);
+ break;
+ case IPV6_USER_FLOW:
+ ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
+ unused_tuple);
+ break;
+ case ETHER_FLOW:
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "ETHER_FLOW is not supported in current fd mode!\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
+ unused_tuple);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported protocol type, protocol type = %#x\n",
+ flow_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check flow union tuple, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
+}
+
static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
{
struct hclge_fd_rule *rule = NULL;
@@ -5618,7 +5687,7 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break;
}
- if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->flow_type & FLOW_EXT) {
rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
}
@@ -5673,22 +5742,23 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_dev_fd_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "flow table director is not supported\n");
return -EOPNOTSUPP;
+ }
if (!hdev->fd_en) {
- dev_warn(&hdev->pdev->dev,
- "Please enable flow director first\n");
+ dev_err(&hdev->pdev->dev,
+ "please enable flow director first\n");
return -EOPNOTSUPP;
}
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
ret = hclge_fd_check_spec(hdev, fs, &unused);
- if (ret) {
- dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
+ if (ret)
return ret;
- }
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
action = HCLGE_FD_ACTION_DROP_PACKET;
@@ -5729,7 +5799,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
}
rule->flow_type = fs->flow_type;
-
rule->location = fs->location;
rule->unused_tuple = unused;
rule->vf_id = dst_vport_id;
@@ -5877,6 +5946,149 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
return 0;
}
+static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip4_spec *spec,
+ struct ethtool_tcpip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+}
+
+static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip4_spec *spec,
+ struct ethtool_usrip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ spec->proto = rule->tuples.ip_proto;
+ spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ spec->ip_ver = ETH_RX_NFC_IP4;
+}
+
+static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip6_spec *spec,
+ struct ethtool_tcpip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src,
+ rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst,
+ rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
+ IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
+ IPV6_SIZE);
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+}
+
+static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip6_spec *spec,
+ struct ethtool_usrip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src,
+ rule->tuples_mask.src_ip, IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
+
+ spec->l4_proto = rule->tuples.ip_proto;
+ spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+}
+
+static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
+ struct ethhdr *spec,
+ struct ethhdr *spec_mask)
+{
+ ether_addr_copy(spec->h_source, rule->tuples.src_mac);
+ ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
+
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+ eth_zero_addr(spec_mask->h_source);
+ else
+ ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
+
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(spec_mask->h_dest);
+ else
+ ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
+
+ spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
+ spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+}
+
+static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+ fs->m_ext.vlan_tci =
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+ cpu_to_be16(VLAN_VID_MASK) :
+ cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+ }
+}
+
static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -5909,162 +6121,34 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
- fs->h_u.tcp_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
-
- fs->h_u.tcp_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
-
- fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
- fs->m_u.tcp_ip4_spec.psrc =
- rule->unused_tuple & BIT(INNER_SRC_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.src_port);
-
- fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
- fs->m_u.tcp_ip4_spec.pdst =
- rule->unused_tuple & BIT(INNER_DST_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.dst_port);
-
- fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
- fs->m_u.tcp_ip4_spec.tos =
- rule->unused_tuple & BIT(INNER_IP_TOS) ?
- 0 : rule->tuples_mask.ip_tos;
-
+ hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
+ &fs->m_u.tcp_ip4_spec);
break;
case IP_USER_FLOW:
- fs->h_u.usr_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
-
- fs->h_u.usr_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
- fs->m_u.usr_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
-
- fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
- fs->m_u.usr_ip4_spec.tos =
- rule->unused_tuple & BIT(INNER_IP_TOS) ?
- 0 : rule->tuples_mask.ip_tos;
-
- fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
- fs->m_u.usr_ip4_spec.proto =
- rule->unused_tuple & BIT(INNER_IP_PROTO) ?
- 0 : rule->tuples_mask.ip_proto;
-
- fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
-
+ hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec);
break;
case SCTP_V6_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
- cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
-
- cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
-
- fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
- fs->m_u.tcp_ip6_spec.psrc =
- rule->unused_tuple & BIT(INNER_SRC_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.src_port);
-
- fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
- fs->m_u.tcp_ip6_spec.pdst =
- rule->unused_tuple & BIT(INNER_DST_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.dst_port);
-
+ hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
+ &fs->m_u.tcp_ip6_spec);
break;
case IPV6_USER_FLOW:
- cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.usr_ip6_spec.ip6src, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
-
- cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
-
- fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
- fs->m_u.usr_ip6_spec.l4_proto =
- rule->unused_tuple & BIT(INNER_IP_PROTO) ?
- 0 : rule->tuples_mask.ip_proto;
-
- break;
- case ETHER_FLOW:
- ether_addr_copy(fs->h_u.ether_spec.h_source,
- rule->tuples.src_mac);
- if (rule->unused_tuple & BIT(INNER_SRC_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_source);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_source,
- rule->tuples_mask.src_mac);
-
- ether_addr_copy(fs->h_u.ether_spec.h_dest,
- rule->tuples.dst_mac);
- if (rule->unused_tuple & BIT(INNER_DST_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_dest);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_dest,
- rule->tuples_mask.dst_mac);
-
- fs->h_u.ether_spec.h_proto =
- cpu_to_be16(rule->tuples.ether_proto);
- fs->m_u.ether_spec.h_proto =
- rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
- 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
-
+ hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec);
break;
+ /* The flow type of fd rule has been checked before adding in to rule
+ * list. As other flow types have been handled, it must be ETHER_FLOW
+ * for the default case
+ */
default:
- spin_unlock_bh(&hdev->fd_rule_lock);
- return -EOPNOTSUPP;
- }
-
- if (fs->flow_type & FLOW_EXT) {
- fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
- fs->m_ext.vlan_tci =
- rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
- cpu_to_be16(VLAN_VID_MASK) :
- cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
+ &fs->m_u.ether_spec);
+ break;
}
- if (fs->flow_type & FLOW_MAC_EXT) {
- ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
- if (rule->unused_tuple & BIT(INNER_DST_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_dest);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_dest,
- rule->tuples_mask.dst_mac);
- }
+ hclge_fd_get_ext_info(fs, rule);
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
fs->ring_cookie = RX_CLS_FLOW_DISC;
@@ -6202,7 +6286,6 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
*/
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -EOPNOTSUPP;
}
@@ -6216,14 +6299,12 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOSPC;
}
rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOMEM;
}
@@ -6310,6 +6391,14 @@ static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
}
+static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+}
+
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -6486,8 +6575,6 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
@@ -6834,8 +6921,22 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int hclge_vport_start(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
vport->last_active_jiffies = jiffies;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block)) {
+ if (vport->vport_id) {
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ } else {
+ hclge_restore_hw_table(hdev);
+ }
+ }
+
+ clear_bit(vport->vport_id, hdev->vport_config_block);
+
return 0;
}
@@ -6872,17 +6973,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
}
if (op == HCLGE_MAC_VLAN_ADD) {
- if ((!resp_code) || (resp_code == 1)) {
+ if (!resp_code || resp_code == 1)
return 0;
- } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for uc_overflow.\n");
- return -ENOSPC;
- } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for mc_overflow.\n");
+ else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
+ resp_code == HCLGE_ADD_MC_OVERFLOW)
return -ENOSPC;
- }
dev_err(&hdev->pdev->dev,
"add mac addr failed for undefined, code=%u.\n",
@@ -7106,52 +7201,8 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
return cfg_status;
}
-static int hclge_init_umv_space(struct hclge_dev *hdev)
-{
- u16 allocated_size = 0;
- int ret;
-
- ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
- true);
- if (ret)
- return ret;
-
- if (allocated_size < hdev->wanted_umv_size)
- dev_warn(&hdev->pdev->dev,
- "Alloc umv space failed, want %u, get %u\n",
- hdev->wanted_umv_size, allocated_size);
-
- mutex_init(&hdev->umv_mutex);
- hdev->max_umv_size = allocated_size;
- /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
- * preserve some unicast mac vlan table entries shared by pf
- * and its vfs.
- */
- hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
- hdev->share_umv_size = hdev->priv_umv_size +
- hdev->max_umv_size % (hdev->num_req_vfs + 2);
-
- return 0;
-}
-
-static int hclge_uninit_umv_space(struct hclge_dev *hdev)
-{
- int ret;
-
- if (hdev->max_umv_size > 0) {
- ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
- false);
- if (ret)
- return ret;
- hdev->max_umv_size = 0;
- }
- mutex_destroy(&hdev->umv_mutex);
-
- return 0;
-}
-
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
- u16 *allocated_size, bool is_alloc)
+ u16 *allocated_size)
{
struct hclge_umv_spc_alc_cmd *req;
struct hclge_desc desc;
@@ -7159,21 +7210,39 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
req = (struct hclge_umv_spc_alc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
- if (!is_alloc)
- hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
req->space_size = cpu_to_le32(space_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "%s umv space failed for cmd_send, ret =%d\n",
- is_alloc ? "allocate" : "free", ret);
+ dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
+ ret);
return ret;
}
- if (is_alloc && allocated_size)
- *allocated_size = le32_to_cpu(desc.data[1]);
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "failed to alloc umv space, want %u, get %u\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
return 0;
}
@@ -7188,21 +7257,25 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev)
vport->used_umv_num = 0;
}
- mutex_lock(&hdev->umv_mutex);
+ mutex_lock(&hdev->vport_lock);
hdev->share_umv_size = hdev->priv_umv_size +
- hdev->max_umv_size % (hdev->num_req_vfs + 2);
- mutex_unlock(&hdev->umv_mutex);
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ mutex_unlock(&hdev->vport_lock);
}
-static bool hclge_is_umv_space_full(struct hclge_vport *vport)
+static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
{
struct hclge_dev *hdev = vport->back;
bool is_full;
- mutex_lock(&hdev->umv_mutex);
+ if (need_lock)
+ mutex_lock(&hdev->vport_lock);
+
is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
hdev->share_umv_size == 0);
- mutex_unlock(&hdev->umv_mutex);
+
+ if (need_lock)
+ mutex_unlock(&hdev->vport_lock);
return is_full;
}
@@ -7211,7 +7284,6 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
{
struct hclge_dev *hdev = vport->back;
- mutex_lock(&hdev->umv_mutex);
if (is_free) {
if (vport->used_umv_num > hdev->priv_umv_size)
hdev->share_umv_size++;
@@ -7224,7 +7296,99 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
hdev->share_umv_size--;
vport->used_umv_num++;
}
- mutex_unlock(&hdev->umv_mutex);
+}
+
+static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
+ const u8 *mac_addr)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
+ enum HCLGE_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGE_MAC_TO_ADD:
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGE_MAC_TO_DEL:
+ if (mac_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * ACTIVE.
+ */
+ case HCLGE_MAC_ACTIVE:
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+
+ break;
+ }
+}
+
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_node *mac_node;
+ struct list_head *list;
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * new state, or just remove it, or do nothing.
+ */
+ mac_node = hclge_find_mac_node(list, addr);
+ if (mac_node) {
+ hclge_update_mac_node(mac_node, state);
+ spin_unlock_bh(&vport->mac_list_lock);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+ return 0;
+ }
+
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGE_MAC_TO_DEL) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ dev_err(&hdev->pdev->dev,
+ "failed to delete address %pM from mac list\n",
+ addr);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ return -ENOMEM;
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
}
static int hclge_add_uc_addr(struct hnae3_handle *handle,
@@ -7232,7 +7396,8 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_uc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
+ addr);
}
int hclge_add_uc_addr_common(struct hclge_vport *vport,
@@ -7271,15 +7436,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
*/
ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
if (ret == -ENOENT) {
- if (!hclge_is_umv_space_full(vport)) {
+ mutex_lock(&hdev->vport_lock);
+ if (!hclge_is_umv_space_full(vport, false)) {
ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
if (!ret)
hclge_update_umv_space(vport, false);
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
+ mutex_unlock(&hdev->vport_lock);
- dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
- hdev->priv_umv_size);
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
return -ENOSPC;
}
@@ -7303,7 +7472,8 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_rm_uc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
+ addr);
}
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
@@ -7326,8 +7496,13 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
- if (!ret)
+ if (!ret) {
+ mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true);
+ mutex_unlock(&hdev->vport_lock);
+ } else if (ret == -ENOENT) {
+ ret = 0;
+ }
return ret;
}
@@ -7337,7 +7512,8 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_mc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
+ addr);
}
int hclge_add_mc_addr_common(struct hclge_vport *vport,
@@ -7369,7 +7545,9 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- if (status == -ENOSPC)
+ /* if already overflow, not to print each time */
+ if (status == -ENOSPC &&
+ !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
return status;
@@ -7380,7 +7558,8 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_rm_mc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
+ addr);
}
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
@@ -7415,111 +7594,354 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- } else {
- /* Maybe this mac address is in mta table, but it cannot be
- * deleted here because an entry of mta represents an address
- * range rather than a specific address. the delete action to
- * all entries will take effect in update_mta_status called by
- * hns3_nic_set_rx_mode.
- */
+ } else if (status == -ENOENT) {
status = 0;
}
return status;
}
-void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ int (*sync)(struct hclge_vport *,
+ const unsigned char *))
{
- struct hclge_vport_mac_addr_cfg *mac_cfg;
- struct list_head *list;
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
- if (!vport->vport_id)
- return;
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = sync(vport, mac_node->mac_addr);
+ if (!ret) {
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+ break;
+ }
+ }
+}
- mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
- if (!mac_cfg)
- return;
+static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ int (*unsync)(struct hclge_vport *,
+ const unsigned char *))
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = unsync(vport, mac_node->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+ break;
+ }
+ }
+}
- mac_cfg->hd_tbl_status = true;
- memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
+static bool hclge_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ bool all_added = true;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ all_added = false;
- list_add_tail(&mac_cfg->node, list);
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of adding the mac address into mac
+ * table. if mac_node state is ACTIVE, then change it to TO_DEL,
+ * then it will be removed at next time. else it must be TO_ADD,
+ * this address hasn't been added into mac table,
+ * so just remove the mac node.
+ */
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclge_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+
+ return all_added;
}
-void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- bool is_write_tbl,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
{
- struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
- struct list_head *list;
- bool uc_flag, mc_flag;
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr exists in the mac list, it means
+ * received a new TO_ADD request during the time window
+ * of configuring the mac address. For the mac node
+ * state is TO_ADD, and the address is already in the
+ * in the hardware(due to delete fail), so we just need
+ * to change the mac node state to ACTIVE.
+ */
+ new_node->state = HCLGE_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ }
+ }
+}
- uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
- mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
+static void hclge_update_overflow_flags(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ bool is_all_added)
+{
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
+ } else {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
+ }
+}
- list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
- if (uc_flag && mac_cfg->hd_tbl_status)
- hclge_rm_uc_addr_common(vport, mac_addr);
+static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+ bool all_added;
- if (mc_flag && mac_cfg->hd_tbl_status)
- hclge_rm_mc_addr_common(vport, mac_addr);
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
- list_del(&mac_cfg->node);
- kfree(mac_cfg);
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
break;
}
}
+
+stop_traverse:
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_uc_addr_common);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
+ hclge_add_uc_addr_common);
+ } else {
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_mc_addr_common);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
+ hclge_add_mc_addr_common);
+ }
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+ all_added = hclge_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_update_overflow_flags(vport, mac_type, all_added);
+}
+
+static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block))
+ return false;
+
+ if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
+ return true;
+
+ return false;
+}
+
+static void hclge_sync_mac_table(struct hclge_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (!hclge_need_sync_mac_table(vport))
+ continue;
+
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
+ }
}
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type)
{
- struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
- struct list_head *list;
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_mac_node *mac_cfg, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+ int ret;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
- list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
- hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
+ INIT_LIST_HEAD(&tmp_del_list);
- if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
- hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
- mac_cfg->hd_tbl_status = false;
- if (is_del_list) {
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
+ switch (mac_cfg->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
- kfree(mac_cfg);
+ list_add_tail(&mac_cfg->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ if (is_del_list) {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
+ ret = unsync(vport, mac_cfg->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ /* clear all mac addr from hardware, but remain these
+ * mac addr in the mac list, and restore them after
+ * vf reset finished.
+ */
+ if (!is_del_list &&
+ mac_cfg->state == HCLGE_MAC_ACTIVE) {
+ mac_cfg->state = HCLGE_MAC_TO_ADD;
+ } else {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ } else if (is_del_list) {
+ mac_cfg->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+/* remove all mac address when uninitailize */
+static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ break;
}
}
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_uc_addr_common);
+ else
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_mc_addr_common);
+
+ if (!list_empty(&tmp_del_list))
+ dev_warn(&hdev->pdev->dev,
+ "uninit %s mac list for vport %u not completely.\n",
+ mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
+ vport->vport_id);
+
+ list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
}
-void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
+static void hclge_uninit_mac_table(struct hclge_dev *hdev)
{
- struct hclge_vport_mac_addr_cfg *mac, *tmp;
struct hclge_vport *vport;
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
- list_del(&mac->node);
- kfree(mac);
- }
-
- list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
- list_del(&mac->node);
- kfree(mac);
- }
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
}
}
@@ -7683,12 +8105,57 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr)
+{
+ struct list_head *list = &vport->uc_mac_list;
+ struct hclge_mac_node *old_node, *new_node;
+
+ new_node = hclge_find_mac_node(list, new_addr);
+ if (!new_node) {
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ return -ENOMEM;
+
+ new_node->state = HCLGE_MAC_TO_ADD;
+ ether_addr_copy(new_node->mac_addr, new_addr);
+ list_add(&new_node->node, list);
+ } else {
+ if (new_node->state == HCLGE_MAC_TO_DEL)
+ new_node->state = HCLGE_MAC_ACTIVE;
+
+ /* make sure the new addr is in the list head, avoid dev
+ * addr may be not re-added into mac table for the umv space
+ * limitation after global/imp reset which will clear mac
+ * table by hardware.
+ */
+ list_move(&new_node->node, list);
+ }
+
+ if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
+ old_node = hclge_find_mac_node(list, old_addr);
+ if (old_node) {
+ if (old_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&old_node->node);
+ kfree(old_node);
+ } else {
+ old_node->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ return 0;
+}
+
static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ unsigned char *old_addr = NULL;
int ret;
/* mac addr check */
@@ -7696,39 +8163,42 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
dev_err(&hdev->pdev->dev,
- "Change uc mac err! invalid mac:%pM.\n",
+ "change uc mac err! invalid mac: %pM.\n",
new_addr);
return -EINVAL;
}
- if ((!is_first || is_kdump_kernel()) &&
- hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
- dev_warn(&hdev->pdev->dev,
- "remove old uc mac address fail.\n");
-
- ret = hclge_add_uc_addr(handle, new_addr);
+ ret = hclge_pause_addr_cfg(hdev, new_addr);
if (ret) {
dev_err(&hdev->pdev->dev,
- "add uc mac address fail, ret =%d.\n",
+ "failed to configure mac pause address, ret = %d\n",
ret);
-
- if (!is_first &&
- hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
- dev_err(&hdev->pdev->dev,
- "restore uc mac address fail.\n");
-
- return -EIO;
+ return ret;
}
- ret = hclge_pause_addr_cfg(hdev, new_addr);
+ if (!is_first)
+ old_addr = hdev->hw.mac.mac_addr;
+
+ spin_lock_bh(&vport->mac_list_lock);
+ ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) {
dev_err(&hdev->pdev->dev,
- "configure mac pause address fail, ret =%d.\n",
- ret);
- return -EIO;
- }
+ "failed to change the mac addr:%pM, ret = %d\n",
+ new_addr, ret);
+ spin_unlock_bh(&vport->mac_list_lock);
+ if (!is_first)
+ hclge_pause_addr_cfg(hdev, old_addr);
+
+ return ret;
+ }
+ /* we must update dev addr with spin lock protect, preventing dev addr
+ * being removed by set_rx_mode path.
+ */
ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_task_schedule(hdev, 0);
return 0;
}
@@ -8308,42 +8778,80 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
}
}
-static void hclge_restore_vlan_table(struct hnae3_handle *handle)
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
u16 vlan_proto;
- u16 state, vlan_id;
- int i;
+ u16 vlan_id;
+ u16 state;
+ int ret;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
- vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- state = vport->port_base_vlan_cfg.state;
+ vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
+ vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ state = vport->port_base_vlan_cfg.state;
- if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
- hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id,
- false);
- continue;
- }
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
+ hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id, vlan_id,
+ false);
+ return;
+ }
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- int ret;
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
+ vlan->hd_tbl_status = true;
+ }
+}
- if (!vlan->hd_tbl_status)
- continue;
- ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id, false);
- if (ret)
- break;
+/* For global reset and imp reset, hardware will clear the mac table,
+ * so we change the mac address state from ACTIVE to TO_ADD, then they
+ * can be restored in the service task after reset complete. Furtherly,
+ * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
+ * be restored after reset, so just remove these mac nodes from mac_list.
+ */
+static void hclge_mac_node_convert_for_reset(struct list_head *list)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_ADD;
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
}
}
}
+void hclge_restore_mac_table_common(struct hclge_vport *vport)
+{
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
+ hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+static void hclge_restore_hw_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+
+ hclge_restore_fd_entries(handle);
+}
+
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -9412,10 +9920,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
int ret;
hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
- if (!hdev) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!hdev)
+ return -ENOMEM;
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
@@ -9594,6 +10100,7 @@ err_pci_uninit:
pci_release_regions(pdev);
pci_disable_device(pdev);
out:
+ mutex_destroy(&hdev->vport_lock);
return ret;
}
@@ -9658,7 +10165,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
dev_warn(&hdev->pdev->dev,
"vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
vf);
- else if (enable && hclge_is_umv_space_full(vport))
+ else if (enable && hclge_is_umv_space_full(vport, true))
dev_warn(&hdev->pdev->dev,
"vf %d mac table is full, enable spoof check may cause its packet send fail\n",
vf);
@@ -9835,8 +10342,16 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
hclge_stats_clear(hdev);
- memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
- memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
+ * so here should not clean table in memory.
+ */
+ if (hdev->reset_type == HNAE3_IMP_RESET ||
+ hdev->reset_type == HNAE3_GLOBAL_RESET) {
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
+ hclge_reset_umv_space(hdev);
+ }
ret = hclge_cmd_init(hdev);
if (ret) {
@@ -9850,8 +10365,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- hclge_reset_umv_space(hdev);
-
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -9947,12 +10460,11 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
+ hclge_uninit_mac_table(hdev);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
- hclge_uninit_umv_space(hdev);
-
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
@@ -9966,7 +10478,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
- hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev);
ae_dev->priv = NULL;
}
@@ -10213,16 +10724,19 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
- /*prepare 4 commands to query DFX BD number*/
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
- desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+ int i;
- return hclge_cmd_send(&hdev->hw, desc, 4);
+ /* initialize command BD except the last one */
+ for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+
+ /* initialize the last command BD */
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
}
static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
@@ -10576,6 +11090,131 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
return hclge_config_gro(hdev, enable);
}
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+ u8 tmp_flags = 0;
+ int ret;
+
+ if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ vport->last_promisc_flags = vport->overflow_promisc_flags;
+ }
+
+ if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
+ tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
+ ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
+ tmp_flags & HNAE3_MPE);
+ if (!ret) {
+ clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ hclge_enable_vlan_filter(handle,
+ tmp_flags & HNAE3_VLAN_FLTR);
+ }
+ }
+}
+
+static bool hclge_module_existed(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ u32 existed;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP exist state, ret = %d\n", ret);
+ return false;
+ }
+
+ existed = le32_to_cpu(desc.data[0]);
+
+ return existed != 0;
+}
+
+/* need 6 bds(total 140 bytes) in one reading
+ * return the number of bytes actually read, 0 means read failed.
+ */
+static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
+ struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
+ u16 read_len;
+ u16 copy_len;
+ int ret;
+ int i;
+
+ /* setup all 6 bds to read module eeprom info. */
+ for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
+ true);
+
+ /* bd0~bd4 need next flag */
+ if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+
+ /* setup bd0, this bd contains offset and read length. */
+ sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
+ sfp_info_bd0->offset = cpu_to_le16((u16)offset);
+ read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
+ sfp_info_bd0->read_len = cpu_to_le16(read_len);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, i);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP eeprom info, ret = %d\n", ret);
+ return 0;
+ }
+
+ /* copy sfp info from bd0 to out buffer. */
+ copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
+ memcpy(data, sfp_info_bd0->data, copy_len);
+ read_len = copy_len;
+
+ /* copy sfp info from bd1~bd5 to out buffer if needed. */
+ for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ if (read_len >= len)
+ return read_len;
+
+ copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
+ memcpy(data + read_len, desc[i].data, copy_len);
+ read_len += copy_len;
+ }
+
+ return read_len;
+}
+
+static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 read_len = 0;
+ u16 data_len;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ if (!hclge_module_existed(hdev))
+ return -ENXIO;
+
+ while (read_len < len) {
+ data_len = hclge_get_sfp_eeprom_info(hdev,
+ offset + read_len,
+ len - read_len,
+ data + read_len);
+ if (!data_len)
+ return -EIO;
+
+ read_len += data_len;
+ }
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -10588,6 +11227,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_vector = hclge_get_vector,
.put_vector = hclge_put_vector,
.set_promisc_mode = hclge_set_promisc_mode,
+ .request_update_promisc_mode = hclge_request_update_promisc_mode,
.set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
@@ -10649,7 +11289,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
.get_fd_rule_info = hclge_get_fd_rule_info,
.get_fd_all_rules = hclge_get_all_rules,
- .restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
@@ -10662,13 +11301,14 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_timer_task = hclge_set_timer_task,
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
- .restore_vlan_table = hclge_restore_vlan_table,
.get_vf_config = hclge_get_vf_config,
.set_vf_link_state = hclge_set_vf_link_state,
.set_vf_spoofchk = hclge_set_vf_spoofchk,
.set_vf_trust = hclge_set_vf_trust,
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
+ .get_module_eeprom = hclge_get_module_eeprom,
+ .get_cmdq_stat = hclge_get_cmdq_stat,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 71df23d5f1b4..46e6e0fef3ba 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -217,6 +217,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
+ HCLGE_STATE_PROMISC_CHANGED,
HCLGE_STATE_RST_FAIL,
HCLGE_STATE_MAX
};
@@ -580,7 +581,6 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg {
u8 fd_mode;
u16 max_key_length; /* use bit as unit */
- u32 proto_support;
u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
@@ -631,9 +631,15 @@ struct hclge_fd_ad_data {
u16 rule_id;
};
-struct hclge_vport_mac_addr_cfg {
+enum HCLGE_MAC_NODE_STATE {
+ HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ACTIVE
+};
+
+struct hclge_mac_node {
struct list_head node;
- int hd_tbl_status;
+ enum HCLGE_MAC_NODE_STATE state;
u8 mac_addr[ETH_ALEN];
};
@@ -765,12 +771,6 @@ struct hclge_dev {
u16 num_roce_msi; /* Num of roce vectors for this PF */
int roce_base_vector;
- u16 pending_udp_bitmap;
-
- u16 rx_itr_default;
- u16 tx_itr_default;
-
- u16 adminq_work_limit; /* Num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list reset_timer;
@@ -806,6 +806,8 @@ struct hclge_dev {
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
@@ -823,7 +825,6 @@ struct hclge_dev {
u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
- struct mutex umv_mutex; /* protect share_umv_size */
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
@@ -867,6 +868,7 @@ struct hclge_rss_tuple_cfg {
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
HCLGE_VPORT_STATE_MAX
};
@@ -923,6 +925,10 @@ struct hclge_vport {
u32 mps; /* Max packet size */
struct hclge_vf_info vf_info;
+ u8 overflow_promisc_flags;
+ u8 last_promisc_flags;
+
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
struct list_head vlan_list; /* Store VF vlan table */
@@ -978,16 +984,18 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
-void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- enum HCLGE_MAC_ADDR_TYPE mac_type);
-void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- bool is_write_tbl,
- enum HCLGE_MAC_ADDR_TYPE mac_type);
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr);
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr);
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
-void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+void hclge_restore_mac_table_common(struct hclge_vport *vport);
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 7f24fcb4f96a..0874ae47cb03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -5,6 +5,9 @@
#include "hclge_mbx.h"
#include "hnae3.h"
+#define CREATE_TRACE_POINTS
+#include "hclge_trace.h"
+
static u16 hclge_errno_to_resp(int errno)
{
return abs(errno);
@@ -90,6 +93,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
+ trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
+
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
@@ -270,26 +275,17 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
if (!is_valid_ether_addr(mac_addr))
return -EINVAL;
- hclge_rm_uc_addr_common(vport, old_addr);
- status = hclge_add_uc_addr_common(vport, mac_addr);
- if (status) {
- hclge_add_uc_addr_common(vport, old_addr);
- } else {
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_UC);
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_UC);
- }
+ spin_lock_bh(&vport->mac_list_lock);
+ status = hclge_update_mac_node_for_dev_addr(vport, old_addr,
+ mac_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+ hclge_task_schedule(hdev, 0);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
- status = hclge_add_uc_addr_common(vport, mac_addr);
- if (!status)
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_UC);
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_UC, mac_addr);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
- status = hclge_rm_uc_addr_common(vport, mac_addr);
- if (!status)
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_UC);
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_UC, mac_addr);
} else {
dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %u\n",
@@ -305,18 +301,13 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
{
const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back;
- int status;
if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
- status = hclge_add_mc_addr_common(vport, mac_addr);
- if (!status)
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_MC);
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_MC, mac_addr);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
- status = hclge_rm_mc_addr_common(vport, mac_addr);
- if (!status)
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_MC);
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_MC, mac_addr);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %u\n",
@@ -324,7 +315,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
return -EIO;
}
- return status;
+ return 0;
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
@@ -638,6 +629,23 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
ae_dev->ops->reset_event(hdev->pdev, NULL);
}
+static void hclge_handle_vf_tbl(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vf_vlan_cfg *msg_cmd;
+
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
+ if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) {
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(vport, true);
+ } else {
+ dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n",
+ msg_cmd->subcode);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -645,6 +653,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
struct hclge_desc *desc;
+ bool is_del = false;
unsigned int flag;
int ret = 0;
@@ -674,6 +683,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
vport = &hdev->vport[req->mbx_src_vfid];
+ trace_hclge_pf_mbx_get(hdev, req);
+
switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
@@ -731,7 +742,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret = hclge_get_link_info(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get link stat for VF\n",
+ "failed to inform link stat to VF, ret = %d\n",
ret);
break;
case HCLGE_MBX_QUEUE_RESET:
@@ -760,11 +771,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
case HCLGE_MBX_VF_UNINIT:
- hclge_rm_vport_all_mac_table(vport, true,
+ is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
+ hclge_rm_vport_all_mac_table(vport, is_del,
HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, true,
+ hclge_rm_vport_all_mac_table(vport, is_del,
HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, true);
+ hclge_rm_vport_all_vlan_table(vport, is_del);
break;
case HCLGE_MBX_GET_MEDIA_TYPE:
hclge_get_vf_media_type(vport, &resp_msg);
@@ -778,6 +790,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_NCSI_ERROR:
hclge_handle_ncsi_error(hdev);
break;
+ case HCLGE_MBX_HANDLE_VF_TBL:
+ hclge_handle_vf_tbl(vport, req);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 696c5ae922e3..e89820702540 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -155,7 +155,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
ret = mdiobus_register(mdio_bus);
if (ret) {
dev_err(mdio_bus->parent,
- "Failed to register MDIO bus ret = %#x\n", ret);
+ "failed to register MDIO bus, ret = %d\n", ret);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
new file mode 100644
index 000000000000..5b0b71bd6120
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2020 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGE_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_pf_mbx_get,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_pf_mbx_send,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = req->msg.code;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGE_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclge_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index 53804d95ea90..2c26ea607a53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index f38d236ebf4f..fec65239a3c8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -11,9 +11,6 @@
#include "hclgevf_main.h"
#include "hnae3.h"
-#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
-#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
- DMA_TO_DEVICE : DMA_FROM_DEVICE)
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index f830eef02e5c..40d6e602ab51 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -161,8 +161,8 @@ struct hclgevf_query_res_cmd {
#define HCLGEVF_GRO_EN_B 0
struct hclgevf_cfg_gro_status_cmd {
- __le16 gro_en;
- u8 rsv[22];
+ u8 gro_en;
+ u8 rsv[23];
};
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e02d427131ee..1b9578d0bd80 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -46,7 +46,7 @@ static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
HCLGEVF_CMDQ_RX_TAIL_REG,
HCLGEVF_CMDQ_RX_HEAD_REG,
HCLGEVF_VECTOR0_CMDQ_SRC_REG,
- HCLGEVF_CMDQ_INTR_STS_REG,
+ HCLGEVF_VECTOR0_CMDQ_STATE_REG,
HCLGEVF_CMDQ_INTR_EN_REG,
HCLGEVF_CMDQ_INTR_GEN_REG};
@@ -669,8 +669,8 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
u16 tc_size[HCLGEVF_MAX_TC_NUM];
struct hclgevf_desc desc;
u16 roundup_size;
- int status;
unsigned int i;
+ int status;
req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
@@ -1143,7 +1143,6 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
send_msg.en_mc = en_mc_pmc ? 1 : 0;
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
-
if (ret)
dev_err(&hdev->pdev->dev,
"Set promisc mode fail, status is %d.\n", ret);
@@ -1164,6 +1163,27 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
en_bc_pmc);
}
+static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+}
+
+static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
+ bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
+ ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
+ if (!ret)
+ clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+ }
+}
+
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
@@ -1245,10 +1265,12 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
int status;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
- send_msg.subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
- HCLGE_MBX_MAC_VLAN_UC_MODIFY;
+ send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
ether_addr_copy(send_msg.data, new_mac_addr);
- ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
+ if (is_first && !hdev->has_pf_mac)
+ eth_zero_addr(&send_msg.data[ETH_ALEN]);
+ else
+ ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
@@ -1256,54 +1278,302 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
return status;
}
-static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
- const unsigned char *addr)
+static struct hclgevf_mac_addr_node *
+hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGEVF_MAC_TO_ADD:
+ if (mac_node->state == HCLGEVF_MAC_TO_DEL)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGEVF_MAC_TO_DEL:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * HCLGEVF_MAC_ACTIVE
+ */
+ case HCLGEVF_MAC_ACTIVE:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ }
+}
+
+static int hclgevf_update_mac_list(struct hnae3_handle *handle,
+ enum HCLGEVF_MAC_NODE_STATE state,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclge_vf_to_pf_msg send_msg;
+ struct hclgevf_mac_addr_node *mac_node;
+ struct list_head *list;
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_ADD);
- ether_addr_copy(send_msg.data, addr);
- return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * new state, or just remove it, or do nothing.
+ */
+ mac_node = hclgevf_find_mac_node(list, addr);
+ if (mac_node) {
+ hclgevf_update_mac_node(mac_node, state);
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+ }
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGEVF_MAC_TO_DEL) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOMEM;
+ }
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_UC, addr);
}
static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclge_vf_to_pf_msg send_msg;
-
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_REMOVE);
- ether_addr_copy(send_msg.data, addr);
- return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_UC, addr);
}
static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclge_vf_to_pf_msg send_msg;
-
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_ADD);
- ether_addr_copy(send_msg.data, addr);
- return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_MC, addr);
}
static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_MC, addr);
+}
+
+static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
+ struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
struct hclge_vf_to_pf_msg send_msg;
+ u8 code, subcode;
+
+ if (mac_type == HCLGEVF_MAC_ADDR_UC) {
+ code = HCLGE_MBX_SET_UNICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
+ } else {
+ code = HCLGE_MBX_SET_MULTICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
+ }
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_REMOVE);
- ether_addr_copy(send_msg.data, addr);
+ hclgevf_build_send_msg(&send_msg, code, subcode);
+ ether_addr_copy(send_msg.data, mac_node->mac_addr);
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
+ struct list_head *list,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to configure mac %pM, state = %d, ret = %d\n",
+ mac_node->mac_addr, mac_node->state, ret);
+ return;
+ }
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of sending mac config request to PF
+ * If mac_node state is ACTIVE, then change its state to TO_DEL,
+ * then it will be removed at next time. If is TO_ADD, it means
+ * send TO_ADD request failed, so just remove the mac node.
+ */
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclgevf_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr is exist in the mac list, it means
+ * received a new request TO_ADD during the time window
+ * of sending mac addr configurrequest to PF, so just
+ * change the mac state to ACTIVE.
+ */
+ new_node->state = HCLGEVF_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ }
+ }
+}
+
+static void hclgevf_clear_list(struct list_head *list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+}
+
+static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGEVF_MAC_TO_DEL:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGEVF_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
+ break;
+ }
+ }
+
+stop_traverse:
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
+ hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_sync_from_del_list(&tmp_del_list, list);
+ hclgevf_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
+static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
+{
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
+}
+
+static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
+{
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
+ hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id,
bool is_kill)
@@ -1506,10 +1776,6 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
- if (ret)
- return ret;
-
/* clear handshake status with IMP */
hclgevf_reset_handshake(hdev, false);
@@ -1559,7 +1825,7 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
- hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG));
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
@@ -1589,13 +1855,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
- /* Initialize ae_dev reset status as well, in case enet layer wants to
- * know if device is undergoing reset
- */
- ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.rst_cnt++;
rtnl_lock();
@@ -1610,7 +1871,6 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
hdev->rst_stats.hw_rst_done_cnt++;
@@ -1625,7 +1885,6 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
}
hdev->last_reset_time = jiffies;
- ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++;
hdev->rst_stats.rst_fail_cnt = 0;
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
@@ -1951,6 +2210,10 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
hclgevf_sync_vlan_filter(hdev);
+ hclgevf_sync_mac_table(hdev);
+
+ hclgevf_sync_promisc_mode(hdev);
+
hdev->last_serv_processed = jiffies;
out:
@@ -1986,7 +2249,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
/* fetch the events from their corresponding regs */
cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
- HCLGEVF_VECTOR0_CMDQ_STAT_REG);
+ HCLGEVF_VECTOR0_CMDQ_STATE_REG);
if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
@@ -2139,7 +2402,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
false);
req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
- req->gro_en = cpu_to_le16(en ? 1 : 0);
+ req->gro_en = en ? 1 : 0;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -2313,6 +2576,10 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
+ spin_lock_init(&hdev->mac_table.mac_list_lock);
+ INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
+ INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
+
/* bring the device down */
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
}
@@ -2445,6 +2712,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
+ int rst_cnt = hdev->rst_stats.rst_cnt;
int ret;
ret = client->ops->init_instance(&hdev->nic);
@@ -2452,6 +2720,14 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
return ret;
set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.rst_cnt) {
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+
+ client->ops->uninit_instance(&hdev->nic, 0);
+ return -EBUSY;
+ }
+
hnae3_set_client_init_flag(client, ae_dev, 1);
if (netif_msg_drv(&hdev->nic))
@@ -2695,6 +2971,15 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
return ret;
}
+static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
+ HCLGE_MBX_VPORT_LIST_CLEAR);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -2730,6 +3015,8 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
@@ -2802,6 +3089,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ /* ensure vf tbl list as empty before init*/
+ ret = hclgevf_clear_vport_list(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to clear tbl list configuration, ret = %d.\n",
+ ret);
+ goto err_config;
+ }
+
ret = hclgevf_init_vlan_config(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -2846,6 +3142,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_pci_uninit(hdev);
hclgevf_cmd_uninit(hdev);
+ hclgevf_uninit_mac_list(hdev);
}
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
@@ -3213,6 +3510,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_timer_task = hclgevf_set_timer_task,
.get_link_mode = hclgevf_get_link_mode,
.set_promisc_mode = hclgevf_set_promisc_mode,
+ .request_update_promisc_mode = hclgevf_request_update_promisc_mode,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 3b88d866facc..c1fac8920ae3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -42,8 +42,6 @@
#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020
#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024
#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028
-#define HCLGEVF_CMDQ_INTR_SRC_REG 0x27100
-#define HCLGEVF_CMDQ_INTR_STS_REG 0x27104
#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
@@ -88,7 +86,7 @@
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* Vector0 interrupt CMDQ event status register(RO) */
-#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104
+#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
@@ -148,6 +146,7 @@ enum hclgevf_states {
HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE,
HCLGEVF_STATE_LINK_UPDATING,
+ HCLGEVF_STATE_PROMISC_CHANGED,
HCLGEVF_STATE_RST_FAIL,
};
@@ -234,6 +233,29 @@ struct hclgevf_rst_stats {
u32 rst_fail_cnt; /* the number of VF reset fail */
};
+enum HCLGEVF_MAC_ADDR_TYPE {
+ HCLGEVF_MAC_ADDR_UC,
+ HCLGEVF_MAC_ADDR_MC
+};
+
+enum HCLGEVF_MAC_NODE_STATE {
+ HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ACTIVE
+};
+
+struct hclgevf_mac_addr_node {
+ struct list_head node;
+ enum HCLGEVF_MAC_NODE_STATE state;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct hclgevf_mac_table_cfg {
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
+ struct list_head uc_mac_list;
+ struct list_head mc_mac_list;
+};
+
struct hclgevf_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@@ -256,7 +278,7 @@ struct hclgevf_dev {
struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
- u16 num_tqps; /* num task queue pairs of this PF */
+ u16 num_tqps; /* num task queue pairs of this VF */
u16 alloc_rss_size; /* allocated RSS task queue */
u16 rss_size_max; /* HW defined max RSS task queue */
@@ -282,6 +304,8 @@ struct hclgevf_dev {
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+ struct hclgevf_mac_table_cfg mac_table;
+
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 9b8154955f91..5b2dcd97c107 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -5,6 +5,9 @@
#include "hclgevf_main.h"
#include "hnae3.h"
+#define CREATE_TRACE_POINTS
+#include "hclgevf_trace.h"
+
static int hclgevf_resp_to_errno(u16 resp_code)
{
return resp_code ? -resp_code : 0;
@@ -106,6 +109,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
+ trace_hclge_vf_mbx_send(hdev, req);
+
/* synchronous send */
if (need_resp) {
mutex_lock(&hdev->mbx_resp.mbx_mutex);
@@ -179,6 +184,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
continue;
}
+ trace_hclge_vf_mbx_get(hdev, req);
+
/* synchronous messages are time critical and need preferential
* treatment. Therefore, we need to acknowledge all the sync
* responses as quickly as possible so that waiting tasks do not
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
new file mode 100644
index 000000000000..e4bfb6191fef
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGEVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGEVF_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define VF_GET_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+#define VF_SEND_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_vf_mbx_get,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = req->msg.code;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, VF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_vf_mbx_send,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, VF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGEVF_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclgevf_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile
index fe88ab88cacc..32a011ca44c3 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -4,4 +4,4 @@ obj-$(CONFIG_HINIC) += hinic.o
hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
- hinic_common.o hinic_ethtool.o
+ hinic_common.o hinic_ethtool.o hinic_hw_mbox.o hinic_sriov.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index a209b14160cc..48b40be3e84d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -16,6 +16,7 @@
#include "hinic_hw_dev.h"
#include "hinic_tx.h"
#include "hinic_rx.h"
+#include "hinic_sriov.h"
#define HINIC_DRV_NAME "hinic"
@@ -23,6 +24,7 @@ enum hinic_flags {
HINIC_LINK_UP = BIT(0),
HINIC_INTF_UP = BIT(1),
HINIC_RSS_ENABLE = BIT(2),
+ HINIC_LINK_DOWN = BIT(3),
};
struct hinic_rx_mode_work {
@@ -67,6 +69,8 @@ struct hinic_dev {
struct hinic_txq *txqs;
struct hinic_rxq *rxqs;
+ u16 sq_depth;
+ u16 rq_depth;
struct hinic_txq_stats tx_stats;
struct hinic_rxq_stats rx_stats;
@@ -78,6 +82,7 @@ struct hinic_dev {
struct hinic_rss_type rss_type;
u8 *rss_hkey_user;
s32 *rss_indir_user;
+ struct hinic_sriov_info sriov_info;
};
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 966aea949c0b..efb02e03e7da 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -33,6 +33,99 @@
#include "hinic_rx.h"
#include "hinic_dev.h"
+#define SET_LINK_STR_MAX_LEN 128
+
+#define GET_SUPPORTED_MODE 0
+#define GET_ADVERTISED_MODE 1
+
+#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= SUPPORTED_##mode)
+#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= ADVERTISED_##mode)
+
+struct hw2ethtool_link_mode {
+ enum ethtool_link_mode_bit_indices link_mode_bit;
+ u32 speed;
+ enum hinic_link_mode hw_link_mode;
+};
+
+struct cmd_link_settings {
+ u64 supported;
+ u64 advertising;
+
+ u32 speed;
+ u8 duplex;
+ u8 port;
+ u8 autoneg;
+};
+
+static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
+ SPEED_10, SPEED_100,
+ SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000,
+ SPEED_100000
+};
+
+static struct hw2ethtool_link_mode
+ hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
+ .hw_link_mode = HINIC_10GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
+ .hw_link_mode = HINIC_GE_BASE_KX,
+ },
+};
+
static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
enum hinic_speed speed)
{
@@ -71,18 +164,91 @@ static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
}
}
+static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
+{
+ int i = 0;
+
+ for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
+ if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
+ break;
+ }
+
+ return i;
+}
+
+static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
+ enum hinic_link_mode hw_link_mode,
+ u32 name)
+{
+ enum hinic_link_mode link_mode;
+ int idx = 0;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (hw_link_mode & ((u32)1 << link_mode)) {
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (name == GET_SUPPORTED_MODE)
+ ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
+ (link_settings, idx);
+ else
+ ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
+ (link_settings, idx);
+ }
+ }
+}
+
+static void hinic_link_port_type(struct cmd_link_settings *link_settings,
+ enum hinic_port_type port_type)
+{
+ switch (port_type) {
+ case HINIC_PORT_ELEC:
+ case HINIC_PORT_TP:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
+ link_settings->port = PORT_TP;
+ break;
+
+ case HINIC_PORT_AOC:
+ case HINIC_PORT_FIBRE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_FIBRE;
+ break;
+
+ case HINIC_PORT_COPPER:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_DA;
+ break;
+
+ case HINIC_PORT_BACKPLANE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
+ link_settings->port = PORT_NONE;
+ break;
+
+ default:
+ link_settings->port = PORT_OTHER;
+ break;
+ }
+}
+
static int hinic_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings
*link_ksettings)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct hinic_pause_config pause_info = { 0 };
+ struct cmd_link_settings settings = { 0 };
enum hinic_port_link_state link_state;
struct hinic_port_cap port_cap;
int err;
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
- Autoneg);
link_ksettings->base.speed = SPEED_UNKNOWN;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
@@ -92,14 +258,19 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
if (err)
return err;
+ hinic_link_port_type(&settings, port_cap.port_type);
+ link_ksettings->base.port = settings.port;
+
err = hinic_port_link_state(nic_dev, &link_state);
if (err)
return err;
- if (link_state != HINIC_LINK_STATE_UP)
- return err;
-
- set_link_speed(link_ksettings, port_cap.speed);
+ if (link_state == HINIC_LINK_STATE_UP) {
+ set_link_speed(link_ksettings, port_cap.speed);
+ link_ksettings->base.duplex =
+ (port_cap.duplex == HINIC_DUPLEX_FULL) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
ethtool_link_ksettings_add_link_mode(link_ksettings,
@@ -108,11 +279,243 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
link_ksettings->base.autoneg = AUTONEG_ENABLE;
- link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ?
- DUPLEX_FULL : DUPLEX_HALF;
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return -EIO;
+
+ hinic_add_ethtool_link_mode(&settings, link_mode.supported,
+ GET_SUPPORTED_MODE);
+ hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
+ GET_ADVERTISED_MODE);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
+ if (err)
+ return err;
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
+ if (pause_info.rx_pause && pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ } else if (pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ } else if (pause_info.rx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ }
+ }
+
+ bitmap_copy(link_ksettings->link_modes.supported,
+ (unsigned long *)&settings.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_copy(link_ksettings->link_modes.advertising,
+ (unsigned long *)&settings.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
return 0;
}
+static int hinic_ethtool_to_hw_speed_level(u32 speed)
+{
+ int i;
+
+ for (i = 0; i < LINK_SPEED_LEVELS; i++) {
+ if (hw_to_ethtool_speed[i] == speed)
+ break;
+ }
+
+ return i;
+}
+
+static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
+ u32 speed)
+{
+ enum hinic_link_mode link_mode;
+ int idx;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (!(supported_link & ((u32)1 << link_mode)))
+ continue;
+
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (hw_to_ethtool_link_mode_table[idx].speed == speed)
+ return true;
+ }
+
+ return false;
+}
+
+static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
+{
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err)
+ return false;
+
+ if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return false;
+
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ if (speed_level >= LINK_SPEED_LEVELS ||
+ !hinic_is_support_speed(link_mode.supported, speed)) {
+ netif_err(nic_dev, drv, netdev,
+ "Unsupported speed: %d\n", speed);
+ return false;
+ }
+
+ return true;
+}
+
+static int get_link_settings_type(struct hinic_dev *nic_dev,
+ u8 autoneg, u32 speed, u32 *set_settings)
+{
+ struct hinic_port_cap port_cap = { 0 };
+ int err;
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err)
+ return err;
+
+ /* always set autonegotiation */
+ if (port_cap.autoneg_cap)
+ *set_settings |= HILINK_LINK_SET_AUTONEG;
+
+ if (autoneg == AUTONEG_ENABLE) {
+ if (!port_cap.autoneg_cap) {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (speed != (u32)SPEED_UNKNOWN) {
+ /* set speed only when autoneg is disabled */
+ if (!hinic_is_speed_legal(nic_dev, speed))
+ return -EINVAL;
+ *set_settings |= HILINK_LINK_SET_SPEED;
+ } else {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg,
+ u32 speed)
+{
+ enum nic_speed_level speed_level = 0;
+ int err = 0;
+
+ if (set_settings & HILINK_LINK_SET_AUTONEG) {
+ err = hinic_set_autoneg(nic_dev->hwdev,
+ (autoneg == AUTONEG_ENABLE));
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ }
+
+ if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = hinic_set_speed(nic_dev->hwdev, speed_level);
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
+ speed);
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
+ speed);
+ }
+
+ return err;
+}
+
+static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg, u32 speed)
+{
+ struct hinic_link_ksettings_info settings = {0};
+ char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, "%s",
+ (set_settings & HILINK_LINK_SET_AUTONEG) ?
+ (autoneg ? "autong enable " : "autong disable ") : "");
+ if (err < 0 || err >= SET_LINK_STR_MAX_LEN) {
+ netif_err(nic_dev, drv, netdev, "Failed to snprintf link state, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+
+ if (set_settings & HILINK_LINK_SET_SPEED) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
+ "%sspeed %d ", set_link_str, speed);
+ if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) {
+ netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+ }
+
+ settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
+ settings.valid_bitmap = set_settings;
+ settings.autoneg = autoneg;
+ settings.speed = speed_level;
+
+ err = hinic_set_link_settings(nic_dev->hwdev, &settings);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
+ if (err)
+ netif_err(nic_dev, drv, netdev, "Set %s failed\n",
+ set_link_str);
+ else
+ netif_info(nic_dev, drv, netdev, "Set %s successfully\n",
+ set_link_str);
+
+ return err;
+ }
+
+ return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
+ speed);
+}
+
+static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u32 set_settings = 0;
+ int err;
+
+ err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
+ if (err)
+ return err;
+
+ if (set_settings)
+ err = hinic_set_settings_to_hw(nic_dev, set_settings,
+ autoneg, speed);
+ else
+ netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
+
+ return err;
+}
+
+static int hinic_set_link_ksettings(struct net_device *netdev, const struct
+ ethtool_link_ksettings *link_settings)
+{
+ /* only support to set autoneg and speed */
+ return set_link_settings(netdev, link_settings->base.autoneg,
+ link_settings->base.speed);
+}
+
static void hinic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -135,26 +538,118 @@ static void hinic_get_drvinfo(struct net_device *netdev,
static void hinic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
- ring->rx_max_pending = HINIC_RQ_DEPTH;
- ring->tx_max_pending = HINIC_SQ_DEPTH;
- ring->rx_pending = HINIC_RQ_DEPTH;
- ring->tx_pending = HINIC_SQ_DEPTH;
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->rx_pending = nic_dev->rq_depth;
+ ring->tx_pending = nic_dev->sq_depth;
+}
+
+static int check_ringparam_valid(struct hinic_dev *nic_dev,
+ struct ethtool_ringparam *ring)
+{
+ if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Unsupported rx_jumbo_pending/rx_mini_pending\n");
+ return -EINVAL;
+ }
+
+ if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
+ ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Queue depth out of range [%d-%d]\n",
+ HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+
+ return 0;
}
+static int hinic_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 new_sq_depth, new_rq_depth;
+ int err;
+
+ err = check_ringparam_valid(nic_dev, ring);
+ if (err)
+ return err;
+
+ new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
+ new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
+
+ if (new_sq_depth == nic_dev->sq_depth &&
+ new_rq_depth == nic_dev->rq_depth)
+ return 0;
+
+ netif_info(nic_dev, drv, netdev,
+ "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
+ nic_dev->sq_depth, nic_dev->rq_depth,
+ new_sq_depth, new_rq_depth);
+
+ nic_dev->sq_depth = new_sq_depth;
+ nic_dev->rq_depth = new_rq_depth;
+
+ if (netif_running(netdev)) {
+ netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ err = hinic_close(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
+ }
+
+ err = hinic_open(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
static void hinic_get_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
- channels->max_rx = hwdev->nic_cap.max_qps;
- channels->max_tx = hwdev->nic_cap.max_qps;
- channels->max_other = 0;
- channels->max_combined = 0;
- channels->rx_count = hinic_hwdev_num_qps(hwdev);
- channels->tx_count = hinic_hwdev_num_qps(hwdev);
- channels->other_count = 0;
- channels->combined_count = 0;
+ channels->max_combined = nic_dev->max_qps;
+ channels->combined_count = hinic_hwdev_num_qps(hwdev);
+}
+
+static int hinic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ unsigned int count = channels->combined_count;
+ int err;
+
+ netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
+ hinic_hwdev_num_qps(nic_dev->hwdev), count);
+
+ if (netif_running(netdev)) {
+ netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ hinic_close(netdev);
+
+ nic_dev->hwdev->nic_cap.num_qps = count;
+
+ err = hinic_open(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ return -EFAULT;
+ }
+ } else {
+ nic_dev->hwdev->nic_cap.num_qps = count;
+ }
+
+ return 0;
}
static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
@@ -741,10 +1236,13 @@ static void hinic_get_strings(struct net_device *netdev,
static const struct ethtool_ops hinic_ethtool_ops = {
.get_link_ksettings = hinic_get_link_ksettings,
+ .set_link_ksettings = hinic_set_link_ksettings,
.get_drvinfo = hinic_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = hinic_get_ringparam,
+ .set_ringparam = hinic_set_ringparam,
.get_channels = hinic_get_channels,
+ .set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
.set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 5f2d57d1b2d3..cb5b6e5f787f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -64,7 +64,7 @@
#define CMDQ_WQE_SIZE 64
#define CMDQ_DEPTH SZ_4K
-#define CMDQ_WQ_PAGE_SIZE SZ_4K
+#define CMDQ_WQ_PAGE_SIZE SZ_256K
#define WQE_LCMD_SIZE 64
#define WQE_SCMD_SIZE 64
@@ -705,7 +705,7 @@ static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
/* The data in the HW is in Big Endian Format */
wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
- pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
+ pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K);
ctxt_info->curr_wqe_page_pfn =
HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
@@ -714,16 +714,19 @@ static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
- /* block PFN - Read Modify Write */
- cmdq_first_block_paddr = cmdq_pages->page_paddr;
+ if (wq->num_q_pages != 1) {
+ /* block PFN - Read Modify Write */
+ cmdq_first_block_paddr = cmdq_pages->page_paddr;
- pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
+ pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
+ }
ctxt_info->wq_block_pfn =
HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
+ cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(cmdqs->hwif);
cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
}
@@ -795,11 +798,6 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
size_t cmdq_ctxts_size;
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI function type\n");
- return -EINVAL;
- }
-
cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
if (!cmdq_ctxts)
@@ -851,6 +849,25 @@ err_init_cmdq:
return err;
}
+static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 };
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+
+ hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE;
+ hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_HWCTXT_SET,
+ &hw_ioctxt, sizeof(hw_ioctxt), NULL,
+ NULL, HINIC_MGMT_MSG_SYNC);
+}
+
/**
* hinic_init_cmdqs - init all cmdqs
* @cmdqs: cmdqs to init
@@ -901,8 +918,18 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
cmdq_ceq_handler);
+
+ err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n");
+ goto err_set_cmdq_depth;
+ }
+
return 0;
+err_set_cmdq_depth:
+ hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
+
err_cmdq_ctxt:
hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
HINIC_MAX_CMDQ_TYPES);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index 7a434b653faa..3e4b0aef9fe6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -122,7 +122,7 @@ struct hinic_cmdq_ctxt {
u16 func_idx;
u8 cmdq_type;
- u8 rsvd1[1];
+ u8 ppf_idx;
u8 rsvd2[4];
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
index cdec1d0a3962..7e84e4e33fff 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
@@ -10,7 +10,7 @@
/* HW interface registers */
#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
-
+#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8
#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index c7c75b772a86..0245da02efbb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -15,7 +15,9 @@
#include <linux/jiffies.h>
#include <linux/log2.h>
#include <linux/err.h>
+#include <linux/netdevice.h>
+#include "hinic_sriov.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
#include "hinic_hw_mgmt.h"
@@ -42,24 +44,6 @@ enum io_status {
IO_RUNNING = 1,
};
-enum hw_ioctxt_set_cmdq_depth {
- HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
-};
-
-/* HW struct */
-struct hinic_dev_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 rsvd1[5];
- u8 intr_type;
- u8 rsvd2[66];
- u16 max_sqs;
- u16 max_rqs;
- u8 rsvd3[208];
-};
-
/**
* get_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
@@ -67,16 +51,13 @@ struct hinic_dev_cap {
*
* Return 0 - Success, negative - Failure
**/
-static int get_capability(struct hinic_hwdev *hwdev,
- struct hinic_dev_cap *dev_cap)
+static int parse_capability(struct hinic_hwdev *hwdev,
+ struct hinic_dev_cap *dev_cap)
{
struct hinic_cap *nic_cap = &hwdev->nic_cap;
int num_aeqs, num_ceqs, num_irqs;
- if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif))
- return -EINVAL;
-
- if (dev_cap->intr_type != INTR_MSIX_TYPE)
+ if (!HINIC_IS_VF(hwdev->hwif) && dev_cap->intr_type != INTR_MSIX_TYPE)
return -EFAULT;
num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
@@ -89,13 +70,19 @@ static int get_capability(struct hinic_hwdev *hwdev,
if (nic_cap->num_qps > HINIC_Q_CTXT_MAX)
nic_cap->num_qps = HINIC_Q_CTXT_MAX;
- nic_cap->max_qps = dev_cap->max_sqs + 1;
- if (nic_cap->max_qps != (dev_cap->max_rqs + 1))
- return -EFAULT;
+ if (!HINIC_IS_VF(hwdev->hwif))
+ nic_cap->max_qps = dev_cap->max_sqs + 1;
+ else
+ nic_cap->max_qps = dev_cap->max_sqs;
if (nic_cap->num_qps > nic_cap->max_qps)
nic_cap->num_qps = nic_cap->max_qps;
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ nic_cap->max_vf = dev_cap->max_vf;
+ nic_cap->max_vf_qps = dev_cap->max_vf_sqs + 1;
+ }
+
return 0;
}
@@ -105,27 +92,26 @@ static int get_capability(struct hinic_hwdev *hwdev,
*
* Return 0 - Success, negative - Failure
**/
-static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev)
+static int get_capability(struct hinic_pfhwdev *pfhwdev)
{
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_dev_cap dev_cap;
- u16 in_len, out_len;
+ u16 out_len;
int err;
- in_len = 0;
out_len = sizeof(dev_cap);
err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM,
- HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap,
- &out_len, HINIC_MGMT_MSG_SYNC);
+ HINIC_CFG_NIC_CAP, &dev_cap, sizeof(dev_cap),
+ &dev_cap, &out_len, HINIC_MGMT_MSG_SYNC);
if (err) {
dev_err(&pdev->dev, "Failed to get capability from FW\n");
return err;
}
- return get_capability(hwdev, &dev_cap);
+ return parse_capability(hwdev, &dev_cap);
}
/**
@@ -144,15 +130,14 @@ static int get_dev_cap(struct hinic_hwdev *hwdev)
switch (HINIC_FUNC_TYPE(hwif)) {
case HINIC_PPF:
case HINIC_PF:
+ case HINIC_VF:
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = get_cap_from_fw(pfhwdev);
+ err = get_capability(pfhwdev);
if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
+ dev_err(&pdev->dev, "Failed to get capability\n");
return err;
}
break;
-
default:
dev_err(&pdev->dev, "Unsupported PCI Function type\n");
return -EINVAL;
@@ -225,15 +210,8 @@ static void disable_msix(struct hinic_hwdev *hwdev)
int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd,
@@ -241,6 +219,19 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
HINIC_MGMT_MSG_SYNC);
}
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_HILINK, cmd,
+ buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+}
+
/**
* init_fw_ctxt- Init Firmware tables before network mgmt and io operations
* @hwdev: the NIC HW device
@@ -252,14 +243,9 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_cmd_fw_ctxt fw_ctxt;
- u16 out_size;
+ u16 out_size = sizeof(fw_ctxt);
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
@@ -283,19 +269,13 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
*
* Return 0 - Success, negative - Failure
**/
-static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
- unsigned int sq_depth)
+static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth,
+ unsigned int rq_depth)
{
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_cmd_hw_ioctxt hw_ioctxt;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
@@ -374,11 +354,6 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
struct hinic_pfhwdev *pfhwdev;
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
/* sleep 100ms to wait for firmware stopping I/O */
msleep(100);
@@ -410,14 +385,8 @@ static int set_resources_state(struct hinic_hwdev *hwdev,
{
struct hinic_cmd_set_res_state res_state;
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
res_state.state = state;
@@ -441,8 +410,8 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
{
struct hinic_cmd_base_qpn cmd_base_qpn;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(cmd_base_qpn);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -466,7 +435,7 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
*
* Return 0 - Success, negative - Failure
**/
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
{
struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
struct hinic_cap *nic_cap = &hwdev->nic_cap;
@@ -488,6 +457,9 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
ceq_msix_entries = &hwdev->msix_entries[num_aeqs];
+ func_to_io->hwdev = hwdev;
+ func_to_io->sq_depth = sq_depth;
+ func_to_io->rq_depth = rq_depth;
err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
ceq_msix_entries);
@@ -513,7 +485,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
hinic_db_state_set(hwif, HINIC_DB_ENABLE);
}
- err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH);
+ err = set_hw_ioctxt(hwdev, sq_depth, rq_depth);
if (err) {
dev_err(&pdev->dev, "Failed to set HW IO ctxt\n");
goto err_hw_ioctxt;
@@ -558,17 +530,10 @@ void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
u16 in_size, void *buf_out,
u16 *out_size))
{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
struct hinic_nic_cb *nic_cb;
u8 cmd_cb;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return;
- }
-
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
@@ -588,15 +553,12 @@ void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
enum hinic_mgmt_msg_cmd cmd)
{
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
struct hinic_nic_cb *nic_cb;
u8 cmd_cb;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
+ if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif))
return;
- }
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
@@ -676,10 +638,23 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
return err;
}
- hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
- pfhwdev, nic_mgmt_msg_handler);
+ err = hinic_func_to_func_init(hwdev);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
+ hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
+ return err;
+ }
+
+ if (!HINIC_IS_VF(hwif))
+ hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_L2NIC, pfhwdev,
+ nic_mgmt_msg_handler);
+ else
+ hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_mgmt_msg_handler);
hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
+
return 0;
}
@@ -693,11 +668,43 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
- hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC);
+ if (!HINIC_IS_VF(hwdev->hwif))
+ hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_L2NIC);
+ else
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+
+ hinic_func_to_func_free(hwdev);
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
}
+static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmd_l2nic_reset l2nic_reset = {0};
+ u16 out_size = sizeof(l2nic_reset);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ l2nic_reset.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ /* 0 represents standard l2nic reset flow */
+ l2nic_reset.reset_flag = 0;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_L2NIC_RESET, &l2nic_reset,
+ sizeof(l2nic_reset), &l2nic_reset,
+ &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || l2nic_reset.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, l2nic_reset.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* hinic_init_hwdev - Initialize the NIC HW
* @pdev: the NIC pci device
@@ -723,12 +730,6 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
return ERR_PTR(err);
}
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- err = -EFAULT;
- goto err_func_type;
- }
-
pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL);
if (!pfhwdev) {
err = -ENOMEM;
@@ -766,12 +767,22 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
goto err_init_pfhwdev;
}
+ err = hinic_l2nic_reset(hwdev);
+ if (err)
+ goto err_l2nic_reset;
+
err = get_dev_cap(hwdev);
if (err) {
dev_err(&pdev->dev, "Failed to get device capabilities\n");
goto err_dev_cap;
}
+ err = hinic_vf_func_init(hwdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init nic mbox\n");
+ goto err_vf_func_init;
+ }
+
err = init_fw_ctxt(hwdev);
if (err) {
dev_err(&pdev->dev, "Failed to init function table\n");
@@ -788,6 +799,9 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
err_resources_state:
err_init_fw_ctxt:
+ hinic_vf_func_free(hwdev);
+err_vf_func_init:
+err_l2nic_reset:
err_dev_cap:
free_pfhwdev(pfhwdev);
@@ -799,7 +813,6 @@ err_aeqs_init:
err_init_msix:
err_pfhwdev_alloc:
-err_func_type:
hinic_free_hwif(hwif);
return ERR_PTR(err);
}
@@ -930,15 +943,9 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
{
struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
struct hinic_cmd_hw_ci hw_ci;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
hw_ci.dma_attr_off = 0;
hw_ci.pending_limit = pending_limit;
hw_ci.coalesc_timer = coalesc_timer;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 66fd2340d447..71ea7e46dbbc 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -16,18 +16,33 @@
#include "hinic_hw_mgmt.h"
#include "hinic_hw_qp.h"
#include "hinic_hw_io.h"
+#include "hinic_hw_mbox.h"
#define HINIC_MAX_QPS 32
#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \
HINIC_MGMT_MSG_CMD_BASE)
+#define HINIC_PF_SET_VF_ALREADY 0x4
+#define HINIC_MGMT_STATUS_EXIST 0x6
+#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
+
struct hinic_cap {
u16 max_qps;
u16 num_qps;
+ u8 max_vf;
+ u16 max_vf_qps;
+};
+
+enum hw_ioctxt_set_cmdq_depth {
+ HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
+ HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE,
};
enum hinic_port_cmd {
+ HINIC_PORT_CMD_VF_REGISTER = 0x0,
+ HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
+
HINIC_PORT_CMD_CHANGE_MTU = 2,
HINIC_PORT_CMD_ADD_VLAN = 3,
@@ -39,6 +54,9 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_SET_RX_MODE = 12,
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 20,
+ HINIC_PORT_CMD_SET_PAUSE_INFO = 21,
+
HINIC_PORT_CMD_GET_LINK_STATE = 24,
HINIC_PORT_CMD_SET_LRO = 25,
@@ -77,19 +95,45 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_FWCTXT_INIT = 69,
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK = 78,
+
HINIC_PORT_CMD_GET_MGMT_VERSION = 88,
HINIC_PORT_CMD_SET_FUNC_STATE = 93,
HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
+ HINIC_PORT_CMD_SET_VF_RATE = 105,
+
+ HINIC_PORT_CMD_SET_VF_VLAN = 106,
+
+ HINIC_PORT_CMD_CLR_VF_VLAN,
+
HINIC_PORT_CMD_SET_TSO = 112,
HINIC_PORT_CMD_SET_RQ_IQ_MAP = 115,
+ HINIC_PORT_CMD_LINK_STATUS_REPORT = 160,
+
+ HINIC_PORT_CMD_UPDATE_MAC = 164,
+
HINIC_PORT_CMD_GET_CAP = 170,
+ HINIC_PORT_CMD_GET_LINK_MODE = 217,
+
+ HINIC_PORT_CMD_SET_SPEED = 218,
+
+ HINIC_PORT_CMD_SET_AUTONEG = 219,
+
HINIC_PORT_CMD_SET_LRO_TIMER = 244,
+
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 249,
+};
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
};
enum hinic_ucode_cmd {
@@ -191,6 +235,17 @@ struct hinic_cmd_set_res_state {
u32 rsvd2;
};
+struct hinic_ceq_ctrl_reg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+};
+
struct hinic_cmd_base_qpn {
u8 status;
u8 version;
@@ -219,12 +274,22 @@ struct hinic_cmd_hw_ci {
u64 ci_addr;
};
+struct hinic_cmd_l2nic_reset {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 reset_flag;
+};
+
struct hinic_hwdev {
struct hinic_hwif *hwif;
struct msix_entry *msix_entries;
struct hinic_aeqs aeqs;
struct hinic_func_to_io func_to_io;
+ struct hinic_mbox_func_to_func *func_to_func;
struct hinic_cap nic_cap;
};
@@ -246,6 +311,25 @@ struct hinic_pfhwdev {
struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD];
};
+struct hinic_dev_cap {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 rsvd1[5];
+ u8 intr_type;
+ u8 max_cos_id;
+ u8 er_id;
+ u8 port_id;
+ u8 max_vf;
+ u8 rsvd2[62];
+ u16 max_sqs;
+ u16 max_rqs;
+ u16 max_vf_sqs;
+ u16 max_vf_rqs;
+ u8 rsvd3[204];
+};
+
void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
enum hinic_mgmt_msg_cmd cmd, void *handle,
void (*handler)(void *handle, void *buf_in,
@@ -259,7 +343,11 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
void *buf_in, u16 in_size, void *buf_out,
u16 *out_size);
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev);
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index c0b6bcb067cd..397936cac304 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -17,6 +17,7 @@
#include <asm/byteorder.h>
#include <asm/barrier.h>
+#include "hinic_hw_dev.h"
#include "hinic_hw_csr.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
@@ -416,11 +417,11 @@ static irqreturn_t ceq_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static void set_ctrl0(struct hinic_eq *eq)
+static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr)
{
struct msix_entry *msix_entry = &eq->msix_entry;
enum hinic_eq_type type = eq->type;
- u32 addr, val, ctrl0;
+ u32 val, ctrl0;
if (type == HINIC_AEQ) {
/* RMW Ctrl0 */
@@ -440,9 +441,7 @@ static void set_ctrl0(struct hinic_eq *eq)
HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
+ } else {
/* RMW Ctrl0 */
addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
@@ -462,16 +461,28 @@ static void set_ctrl0(struct hinic_eq *eq)
HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
}
+ return val;
}
-static void set_ctrl1(struct hinic_eq *eq)
+static void set_ctrl0(struct hinic_eq *eq)
{
+ u32 val, addr;
+
+ if (eq->type == HINIC_AEQ)
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+ else
+ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = get_ctrl0_val(eq, addr);
+
+ hinic_hwif_write_reg(eq->hwif, addr, val);
+}
+
+static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr)
+{
+ u32 page_size_val, elem_size, val, ctrl1;
enum hinic_eq_type type = eq->type;
- u32 page_size_val, elem_size;
- u32 addr, val, ctrl1;
if (type == HINIC_AEQ) {
/* RMW Ctrl1 */
@@ -491,9 +502,7 @@ static void set_ctrl1(struct hinic_eq *eq)
HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
val |= ctrl1;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
+ } else {
/* RMW Ctrl1 */
addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
@@ -508,19 +517,70 @@ static void set_ctrl1(struct hinic_eq *eq)
HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
val |= ctrl1;
+ }
+ return val;
+}
- hinic_hwif_write_reg(eq->hwif, addr, val);
+static void set_ctrl1(struct hinic_eq *eq)
+{
+ u32 addr, val;
+
+ if (eq->type == HINIC_AEQ)
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+ else
+ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
+
+ val = get_ctrl1_val(eq, addr);
+
+ hinic_hwif_write_reg(eq->hwif, addr, val);
+}
+
+static int set_ceq_ctrl_reg(struct hinic_eq *eq)
+{
+ struct hinic_ceq_ctrl_reg ceq_ctrl = {0};
+ struct hinic_hwdev *hwdev = eq->hwdev;
+ u16 out_size = sizeof(ceq_ctrl);
+ u16 in_size = sizeof(ceq_ctrl);
+ struct hinic_pfhwdev *pfhwdev;
+ u32 addr;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
+ ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr);
+ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
+ ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr);
+
+ ceq_ctrl.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ ceq_ctrl.q_id = eq->q_id;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP,
+ &ceq_ctrl, in_size,
+ &ceq_ctrl, &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || ceq_ctrl.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
+ eq->q_id, err, ceq_ctrl.status, out_size);
+ return -EFAULT;
}
+
+ return 0;
}
/**
* set_eq_ctrls - setting eq's ctrl registers
* @eq: the Event Queue for setting
**/
-static void set_eq_ctrls(struct hinic_eq *eq)
+static int set_eq_ctrls(struct hinic_eq *eq)
{
+ if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ)
+ return set_ceq_ctrl_reg(eq);
+
set_ctrl0(eq);
set_ctrl1(eq);
+ return 0;
}
/**
@@ -703,7 +763,12 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
return -EINVAL;
}
- set_eq_ctrls(eq);
+ err = set_eq_ctrls(eq);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set eq ctrls\n");
+ return err;
+ }
+
eq_update_ci(eq, EQ_ARMED);
err = alloc_eq_pages(eq);
@@ -859,6 +924,7 @@ int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
ceqs->num_ceqs = num_ceqs;
for (q_id = 0; q_id < num_ceqs; q_id++) {
+ ceqs->ceq[q_id].hwdev = ceqs->hwdev;
err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
page_size, msix_entries[q_id]);
if (err) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
index d35f2068ee0c..74b9ff90640c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
@@ -143,8 +143,9 @@ enum hinic_eq_type {
};
enum hinic_aeq_type {
+ HINIC_MBX_FROM_FUNC = 1,
HINIC_MSG_FROM_MGMT_CPU = 2,
-
+ HINIC_MBX_SEND_RSLT = 5,
HINIC_MAX_AEQ_EVENTS,
};
@@ -171,7 +172,7 @@ struct hinic_eq_work {
struct hinic_eq {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
enum hinic_eq_type type;
int q_id;
u32 q_len;
@@ -219,7 +220,7 @@ struct hinic_ceq_cb {
struct hinic_ceqs {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
struct hinic_eq ceq[HINIC_MAX_CEQS];
int num_ceqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 07bbfbf68577..cf127d896ba6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include "hinic_hw_csr.h"
#include "hinic_hw_if.h"
@@ -18,6 +19,8 @@
#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
+#define WAIT_HWIF_READY_TIMEOUT 10000
+
/**
* hinic_msix_attr_set - set message attribute for msix entry
* @hwif: the HW interface of a pci function device
@@ -115,8 +118,12 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
**/
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
{
- u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
+ u32 attr5;
+
+ if (HINIC_IS_VF(hwif))
+ return;
+ attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION);
attr5 |= HINIC_FA5_SET(action, PF_ACTION);
@@ -183,27 +190,47 @@ void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
**/
static int hwif_ready(struct hinic_hwif *hwif)
{
- struct pci_dev *pdev = hwif->pdev;
u32 addr, attr1;
addr = HINIC_CSR_FUNC_ATTR1_ADDR;
attr1 = hinic_hwif_read_reg(hwif, addr);
- if (!HINIC_FA1_GET(attr1, INIT_STATUS)) {
- dev_err(&pdev->dev, "hwif status is not ready\n");
- return -EFAULT;
+ if (!HINIC_FA1_GET(attr1, MGMT_INIT_STATUS))
+ return -EBUSY;
+
+ if (HINIC_IS_VF(hwif)) {
+ if (!HINIC_FA1_GET(attr1, PF_INIT_STATUS))
+ return -EBUSY;
}
return 0;
}
+static int wait_hwif_ready(struct hinic_hwif *hwif)
+{
+ unsigned long timeout = 0;
+
+ do {
+ if (!hwif_ready(hwif))
+ return 0;
+
+ usleep_range(999, 1000);
+ timeout++;
+ } while (timeout <= WAIT_HWIF_READY_TIMEOUT);
+
+ dev_err(&hwif->pdev->dev, "Wait for hwif timeout\n");
+
+ return -EBUSY;
+}
+
/**
* set_hwif_attr - set the attributes in the relevant members in hwif
* @hwif: the HW interface of a pci function device
* @attr0: the first attribute that was read from the hw
* @attr1: the second attribute that was read from the hw
**/
-static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
+static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
+ u32 attr2)
{
hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX);
hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX);
@@ -214,6 +241,8 @@ static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC));
hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC));
hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC));
+ hwif->attr.global_vf_id_of_pf = HINIC_FA2_GET(attr2,
+ GLOBAL_VF_ID_OF_PF);
}
/**
@@ -222,7 +251,7 @@ static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
**/
static void read_hwif_attr(struct hinic_hwif *hwif)
{
- u32 addr, attr0, attr1;
+ u32 addr, attr0, attr1, attr2;
addr = HINIC_CSR_FUNC_ATTR0_ADDR;
attr0 = hinic_hwif_read_reg(hwif, addr);
@@ -230,7 +259,10 @@ static void read_hwif_attr(struct hinic_hwif *hwif)
addr = HINIC_CSR_FUNC_ATTR1_ADDR;
attr1 = hinic_hwif_read_reg(hwif, addr);
- set_hwif_attr(hwif, attr0, attr1);
+ addr = HINIC_CSR_FUNC_ATTR2_ADDR;
+ attr2 = hinic_hwif_read_reg(hwif, addr);
+
+ set_hwif_attr(hwif, attr0, attr1, attr2);
}
/**
@@ -309,6 +341,34 @@ static void dma_attr_init(struct hinic_hwif *hwif)
HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE);
}
+u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif)
+{
+ if (!hwif)
+ return 0;
+
+ return hwif->attr.global_vf_id_of_pf;
+}
+
+u16 hinic_global_func_id_hw(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ return HINIC_FA0_GET(attr0, FUNC_IDX);
+}
+
+u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ return HINIC_FA0_GET(attr0, PF_IDX);
+}
+
/**
* hinic_init_hwif - initialize the hw interface
* @hwif: the HW interface of a pci function device
@@ -335,7 +395,7 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
goto err_map_intr_bar;
}
- err = hwif_ready(hwif);
+ err = wait_hwif_ready(hwif);
if (err) {
dev_err(&pdev->dev, "HW interface is not ready\n");
goto err_hwif_ready;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index c7bb9ceca72c..0872e035faa1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -35,6 +35,7 @@
#define HINIC_FA0_FUNC_IDX_SHIFT 0
#define HINIC_FA0_PF_IDX_SHIFT 10
#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14
+#define HINIC_FA0_VF_IN_PF_SHIFT 16
/* reserved members - off 16 */
#define HINIC_FA0_FUNC_TYPE_SHIFT 24
@@ -42,6 +43,7 @@
#define HINIC_FA0_PF_IDX_MASK 0xF
#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3
#define HINIC_FA0_FUNC_TYPE_MASK 0x1
+#define HINIC_FA0_VF_IN_PF_MASK 0xFF
#define HINIC_FA0_GET(val, member) \
(((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK)
@@ -53,17 +55,25 @@
#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20
#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24
/* reserved members - off 27 */
-#define HINIC_FA1_INIT_STATUS_SHIFT 30
+#define HINIC_FA1_MGMT_INIT_STATUS_SHIFT 30
+#define HINIC_FA1_PF_INIT_STATUS_SHIFT 31
#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3
#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7
#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF
#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7
-#define HINIC_FA1_INIT_STATUS_MASK 0x1
+#define HINIC_FA1_MGMT_INIT_STATUS_MASK 0x1
+#define HINIC_FA1_PF_INIT_STATUS_MASK 0x1
#define HINIC_FA1_GET(val, member) \
(((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK)
+#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_SHIFT 16
+#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF
+
+#define HINIC_FA2_GET(val, member) \
+ (((val) >> HINIC_FA2_##member##_SHIFT) & HINIC_FA2_##member##_MASK)
+
#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0
#define HINIC_FA4_DB_STATE_SHIFT 1
@@ -140,6 +150,7 @@
#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
+#define HINIC_IS_VF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_VF)
#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
@@ -173,6 +184,7 @@ enum hinic_pcie_tph {
enum hinic_func_type {
HINIC_PF = 0,
+ HINIC_VF = 1,
HINIC_PPF = 2,
};
@@ -180,7 +192,7 @@ enum hinic_mod_type {
HINIC_MOD_COMM = 0, /* HW communication module */
HINIC_MOD_L2NIC = 1, /* L2NIC module */
HINIC_MOD_CFGM = 7, /* Configuration module */
-
+ HINIC_MOD_HILINK = 14, /* Hilink module */
HINIC_MOD_MAX = 15
};
@@ -223,6 +235,8 @@ struct hinic_func_attr {
u8 num_ceqs;
u8 num_dma_attr;
+
+ u16 global_vf_id_of_pf;
};
struct hinic_hwif {
@@ -271,6 +285,12 @@ enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif);
void hinic_db_state_set(struct hinic_hwif *hwif,
enum hinic_db_state db_state);
+u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif);
+
+u16 hinic_global_func_id_hw(struct hinic_hwif *hwif);
+
+u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif);
+
int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev);
void hinic_free_hwif(struct hinic_hwif *hwif);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index d66f86fa3f46..3e3fa742e476 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/err.h>
+#include "hinic_hw_dev.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
#include "hinic_hw_wqe.h"
@@ -34,6 +35,8 @@
#define DB_IDX(db, db_base) \
(((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
+#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
+
enum io_cmd {
IO_CMD_MODIFY_QUEUE_CTXT = 0,
IO_CMD_CLEAN_QUEUE_CTXT,
@@ -279,7 +282,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
- HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
+ func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE);
if (err) {
dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
return err;
@@ -287,7 +290,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
- HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
+ func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
if (err) {
dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
goto err_rq_alloc;
@@ -484,6 +487,33 @@ void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
devm_kfree(&pdev->dev, func_to_io->qps);
}
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct hinic_wq_page_size page_size_info = {0};
+ u16 out_size = sizeof(page_size_info);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ page_size_info.func_idx = func_idx;
+ page_size_info.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size);
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_PAGESIZE_SET, &page_size_info,
+ sizeof(page_size_info), &page_size_info,
+ &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || page_size_info.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
+ err, page_size_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
/**
* hinic_io_init - Initialize the IO components
* @func_to_io: func to io channel that holds the IO components
@@ -506,6 +536,7 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io,
func_to_io->hwif = hwif;
func_to_io->qps = NULL;
func_to_io->max_qps = max_qps;
+ func_to_io->ceqs.hwdev = func_to_io->hwdev;
err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
@@ -541,6 +572,14 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io,
func_to_io->cmdq_db_area[cmdq] = db_area;
}
+ err = hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(hwif),
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err) {
+ dev_err(&func_to_io->hwif->pdev->dev, "Failed to set wq page size\n");
+ goto init_wq_pg_size_err;
+ }
+
err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
func_to_io->cmdq_db_area);
if (err) {
@@ -551,6 +590,11 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io,
return 0;
err_init_cmdqs:
+ if (!HINIC_IS_VF(func_to_io->hwif))
+ hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(hwif),
+ HINIC_HW_WQ_PAGE_SIZE);
+init_wq_pg_size_err:
err_db_area:
for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
@@ -575,6 +619,11 @@ void hinic_io_free(struct hinic_func_to_io *func_to_io)
hinic_free_cmdqs(&func_to_io->cmdqs);
+ if (!HINIC_IS_VF(func_to_io->hwif))
+ hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(func_to_io->hwif),
+ HINIC_HW_WQ_PAGE_SIZE);
+
for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
index cac2b722e7dc..214f162f7579 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
@@ -20,6 +20,8 @@
#define HINIC_DB_PAGE_SIZE SZ_4K
#define HINIC_DB_SIZE SZ_4M
+#define HINIC_HW_WQ_PAGE_SIZE SZ_4K
+#define HINIC_DEFAULT_WQ_PAGE_SIZE SZ_256K
#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE)
@@ -47,7 +49,7 @@ struct hinic_free_db_area {
struct hinic_func_to_io {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
struct hinic_ceqs ceqs;
struct hinic_wqs wqs;
@@ -58,6 +60,9 @@ struct hinic_func_to_io {
struct hinic_qp *qps;
u16 max_qps;
+ u16 sq_depth;
+ u16 rq_depth;
+
void __iomem **sq_db;
void __iomem *db_base;
@@ -69,8 +74,27 @@ struct hinic_func_to_io {
void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES];
struct hinic_cmdqs cmdqs;
+
+ u16 max_vfs;
+ struct vf_data_storage *vf_infos;
+ u8 link_status;
+};
+
+struct hinic_wq_page_size {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 page_size;
+
+ u32 rsvd1;
};
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+
int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
u16 base_qpn, int num_qps,
struct msix_entry *sq_msix_entries,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
new file mode 100644
index 000000000000..bc2f87e6cb5d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw_csr.h"
+#include "hinic_hw_dev.h"
+#include "hinic_hw_mbox.h"
+
+#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0
+#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12
+#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14
+/* The size of data to be sended (unit of 4 bytes) */
+#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20
+/* SO_RO(strong order, relax order) */
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25
+#define HINIC_MBOX_INT_WB_EN_SHIFT 28
+
+#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF
+#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F
+#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3
+#define HINIC_MBOX_INT_WB_EN_MASK 0x1
+
+#define HINIC_MBOX_INT_SET(val, field) \
+ (((val) & HINIC_MBOX_INT_##field##_MASK) << \
+ HINIC_MBOX_INT_##field##_SHIFT)
+
+enum hinic_mbox_tx_status {
+ TX_NOT_DONE = 1,
+};
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0
+
+/* specifies the issue request for the message data.
+ * 0 - Tx request is done;
+ * 1 - Tx request is in process.
+ */
+#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1
+#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1
+
+#define HINIC_MBOX_CTRL_SET(val, field) \
+ (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
+ HINIC_MBOX_CTRL_##field##_SHIFT)
+
+#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MBOX_HEADER_MODULE_SHIFT 11
+#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MBOX_HEADER_SEQID_SHIFT 24
+#define HINIC_MBOX_HEADER_LAST_SHIFT 30
+
+/* specifies the mailbox message direction
+ * 0 - send
+ * 1 - receive
+ */
+#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MBOX_HEADER_CMD_SHIFT 32
+#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40
+#define HINIC_MBOX_HEADER_STATUS_SHIFT 48
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54
+
+#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F
+#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F
+#define HINIC_MBOX_HEADER_LAST_MASK 0x1
+#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MBOX_HEADER_CMD_MASK 0xFF
+#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF
+#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF
+
+#define HINIC_MBOX_HEADER_GET(val, field) \
+ (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
+ HINIC_MBOX_HEADER_##field##_MASK)
+#define HINIC_MBOX_HEADER_SET(val, field) \
+ ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
+ HINIC_MBOX_HEADER_##field##_SHIFT)
+
+#define MBOX_SEGLEN_MASK \
+ HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
+
+#define HINIC_MBOX_SEG_LEN 48
+#define HINIC_MBOX_COMP_TIME 8000U
+#define MBOX_MSG_POLLING_TIMEOUT 8000
+
+#define HINIC_MBOX_DATA_SIZE 2040
+
+#define MBOX_MAX_BUF_SZ 2048UL
+#define MBOX_HEADER_SZ 8
+
+#define MBOX_INFO_SZ 4
+
+/* MBOX size is 64B, 8B for mbox_header, 4B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16UL
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define SEQ_ID_START_VAL 0
+#define SEQ_ID_MAX_VAL 42
+
+#define DST_AEQ_IDX_DEFAULT_VAL 0
+#define SRC_AEQ_IDX_DEFAULT_VAL 0
+#define NO_DMA_ATTRIBUTE_VAL 0
+
+#define HINIC_MGMT_RSP_AEQN 0
+#define HINIC_MBOX_RSP_AEQN 2
+#define HINIC_MBOX_RECV_AEQN 0
+
+#define MBOX_MSG_NO_DATA_LEN 1
+
+#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ)
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
+
+#define MBOX_RESPONSE_ERROR 0x1
+#define MBOX_MSG_ID_MASK 0xFF
+#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
+#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
+ (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
+
+#define FUNC_ID_OFF_SET_8B 8
+#define FUNC_ID_OFF_SET_10B 10
+
+/* max message counter wait to process for one function */
+#define HINIC_MAX_MSG_CNT_TO_PROCESS 10
+
+#define HINIC_QUEUE_MIN_DEPTH 6
+#define HINIC_QUEUE_MAX_DEPTH 12
+#define HINIC_MAX_RX_BUFFER_SIZE 15
+
+enum hinic_hwif_direction_type {
+ HINIC_HWIF_DIRECT_SEND = 0,
+ HINIC_HWIF_RESPONSE = 1,
+};
+
+enum mbox_send_mod {
+ MBOX_SEND_MSG_INT,
+};
+
+enum mbox_seg_type {
+ NOT_LAST_SEG,
+ LAST_SEG,
+};
+
+enum mbox_ordering_type {
+ STRONG_ORDER,
+};
+
+enum mbox_write_back_type {
+ WRITE_BACK = 1,
+};
+
+enum mbox_aeq_trig_type {
+ NOT_TRIGGER,
+ TRIGGER,
+};
+
+/**
+ * hinic_register_pf_mbox_cb - register mbox callback for pf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->pf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hinic_register_vf_mbox_cb - register mbox callback for vf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->vf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->pf_mbox_cb[mod] = NULL;
+}
+
+/**
+ * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->vf_mbox_cb[mod] = NULL;
+}
+
+static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ void *buf_out, u16 *out_size)
+{
+ hinic_vf_mbox_cb cb;
+ int ret = 0;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
+ cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
+ recv_mbox->mbox_len, buf_out, out_size);
+ } else {
+ dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n");
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
+}
+
+static int
+recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx, void *buf_out,
+ u16 *out_size)
+{
+ hinic_pf_mbox_cb cb;
+ u16 vf_id = 0;
+ int ret;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
+ vf_id = src_func_idx -
+ hinic_glb_pf_vf_offset(func_to_func->hwif);
+ ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len,
+ buf_out, out_size);
+ } else {
+ dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n",
+ recv_mbox->mod);
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
+}
+
+static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_mbox->seq_id = seq_id;
+ } else {
+ if (seq_id != recv_mbox->seq_id + 1)
+ return false;
+
+ recv_mbox->seq_id = seq_id;
+ }
+
+ return true;
+}
+
+static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
+ func_to_func->event_flag == EVENT_START)
+ complete(&recv_mbox->recv_done);
+ else
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n",
+ func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
+ recv_mbox->msg_info.status);
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx);
+
+static void recv_func_mbox_work_handler(struct work_struct *work)
+{
+ struct hinic_mbox_work *mbox_work =
+ container_of(work, struct hinic_mbox_work, work);
+ struct hinic_recv_mbox *recv_mbox;
+
+ recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
+ mbox_work->src_func_idx);
+
+ recv_mbox =
+ &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx];
+
+ atomic_dec(&recv_mbox->msg_cnt);
+
+ kfree(mbox_work);
+}
+
+static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ void *header, struct hinic_recv_mbox *recv_mbox)
+{
+ void *mbox_body = MBOX_BODY_FROM_HDR(header);
+ struct hinic_recv_mbox *rcv_mbox_temp = NULL;
+ u64 mbox_header = *((u64 *)header);
+ struct hinic_mbox_work *mbox_work;
+ u8 seq_id, seg_len;
+ u16 src_func_idx;
+ int pos;
+
+ seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
+ seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
+ src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
+ src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
+ return;
+ }
+
+ pos = seq_id * MBOX_SEG_LEN;
+ memcpy((u8 *)recv_mbox->mbox + pos, mbox_body,
+ HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN));
+
+ if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
+ return;
+
+ recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
+ recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
+ recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
+ recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
+ recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
+ recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
+
+ if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
+ HINIC_HWIF_RESPONSE) {
+ resp_mbox_handler(func_to_func, recv_mbox);
+ return;
+ }
+
+ if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) {
+ dev_warn(&func_to_func->hwif->pdev->dev,
+ "This function(%u) have %d message wait to process,can't add to work queue\n",
+ src_func_idx, atomic_read(&recv_mbox->msg_cnt));
+ return;
+ }
+
+ rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL);
+ if (!rcv_mbox_temp)
+ return;
+
+ rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ,
+ GFP_KERNEL);
+ if (!rcv_mbox_temp->mbox)
+ goto err_alloc_rcv_mbox_msg;
+
+ rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!rcv_mbox_temp->buf_out)
+ goto err_alloc_rcv_mbox_buf;
+
+ mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
+ if (!mbox_work)
+ goto err_alloc_mbox_work;
+
+ mbox_work->func_to_func = func_to_func;
+ mbox_work->recv_mbox = rcv_mbox_temp;
+ mbox_work->src_func_idx = src_func_idx;
+
+ atomic_inc(&recv_mbox->msg_cnt);
+ INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
+ queue_work(func_to_func->workq, &mbox_work->work);
+
+ return;
+
+err_alloc_mbox_work:
+ kfree(rcv_mbox_temp->buf_out);
+
+err_alloc_rcv_mbox_buf:
+ kfree(rcv_mbox_temp->mbox);
+
+err_alloc_rcv_mbox_msg:
+ kfree(rcv_mbox_temp);
+}
+
+void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ u64 mbox_header = *((u64 *)header);
+ struct hinic_recv_mbox *recv_mbox;
+ u64 src, dir;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+
+ dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
+ src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (src >= HINIC_MAX_FUNCTIONS) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mailbox source function id:%u is invalid\n", (u32)src);
+ return;
+ }
+
+ recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
+ &func_to_func->mbox_send[src] :
+ &func_to_func->mbox_resp[src];
+
+ recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
+}
+
+void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_send_mbox *send_mbox;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+ send_mbox = &func_to_func->send_mbox;
+
+ complete(&send_mbox->send_done);
+}
+
+static void clear_mbox_status(struct hinic_send_mbox *mbox)
+{
+ *mbox->wb_status = 0;
+
+ /* clear mailbox write back status */
+ wmb();
+}
+
+static void mbox_copy_header(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, u64 *header)
+{
+ u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
+ u32 *data = (u32 *)header;
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i), mbox->data + i * sizeof(u32));
+}
+
+static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, void *seg,
+ u16 seg_len)
+{
+ u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
+ u32 data_len, chk_sz = sizeof(u32);
+ u32 *data = seg;
+ u32 i, idx_max;
+
+ /* The mbox message should be aligned in 4 bytes. */
+ if (seg_len % chk_sz) {
+ memcpy(mbox_max_buf, seg, seg_len);
+ data = (u32 *)mbox_max_buf;
+ }
+
+ data_len = seg_len;
+ idx_max = ALIGN(data_len, chk_sz) / chk_sz;
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i),
+ mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
+}
+
+static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
+ u16 dst_func, u16 dst_aeqn, u16 seg_len,
+ int poll)
+{
+ u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
+ u32 mbox_int, mbox_ctrl;
+
+ mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
+ HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) |
+ HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
+ HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ +
+ MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2,
+ TX_SIZE) |
+ HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
+ HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
+
+ hinic_hwif_write_reg(func_to_func->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
+
+ wmb(); /* writing the mbox int attributes */
+ mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
+
+ if (poll)
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
+ else
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
+
+ hinic_hwif_write_reg(func_to_func->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
+}
+
+static void dump_mox_reg(struct hinic_hwdev *hwdev)
+{
+ u32 val;
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
+ dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val);
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
+ dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n",
+ val);
+}
+
+static u16 get_mbox_status(struct hinic_send_mbox *mbox)
+{
+ /* write back is 16B, but only use first 4B */
+ u64 wb_val = be64_to_cpu(*mbox->wb_status);
+
+ rmb(); /* verify reading before check */
+
+ return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
+}
+
+static int
+wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
+ int poll, u16 *wb_status)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct completion *done = &send_mbox->send_done;
+ u32 cnt = 0;
+ unsigned long jif;
+
+ if (poll) {
+ while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
+ *wb_status = get_mbox_status(send_mbox);
+ if (MBOX_STATUS_FINISHED(*wb_status))
+ break;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
+ *wb_status);
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+ } else {
+ jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(done, jif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+
+ *wb_status = get_mbox_status(send_mbox);
+ }
+
+ return 0;
+}
+
+static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
+ u64 header, u16 dst_func, void *seg, u16 seg_len,
+ int poll, void *msg_info)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct completion *done = &send_mbox->send_done;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ u16 dst_aeqn, wb_status = 0, errcode;
+
+ if (num_aeqs >= 4)
+ dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
+ HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
+ else
+ dst_aeqn = 0;
+
+ if (!poll)
+ init_completion(done);
+
+ clear_mbox_status(send_mbox);
+
+ mbox_copy_header(hwdev, send_mbox, &header);
+
+ mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
+
+ write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
+
+ wmb(); /* writing the mbox msg attributes */
+
+ if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status))
+ return -ETIMEDOUT;
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n",
+ dst_func, wb_status);
+ errcode = MBOX_STATUS_ERRCODE(wb_status);
+ return errcode ? errcode : -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, void *msg,
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u16 seg_len = MBOX_SEG_LEN;
+ u8 *msg_seg = (u8 *)msg;
+ u16 left = msg_len;
+ u32 seq_id = 0;
+ u64 header = 0;
+ int err = 0;
+
+ down(&func_to_func->msg_send_sem);
+
+ header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MBOX_HEADER_SET(mod, MODULE) |
+ HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
+ HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
+ HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
+ HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
+ HINIC_MBOX_HEADER_SET(cmd, CMD) |
+ /* The vf's offset to it's associated pf */
+ HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
+ HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
+ SRC_GLB_FUNC_IDX);
+
+ while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
+ if (left <= HINIC_MBOX_SEG_LEN) {
+ header &= ~MBOX_SEGLEN_MASK;
+ header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
+ header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
+
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
+ seg_len, MBOX_SEND_MSG_INT, msg_info);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
+ HINIC_MBOX_HEADER_GET(header, SEQID));
+ goto err_send_mbox_seg;
+ }
+
+ left -= HINIC_MBOX_SEG_LEN;
+ msg_seg += HINIC_MBOX_SEG_LEN;
+
+ seq_id++;
+ header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
+ SEQID));
+ header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
+ }
+
+err_send_mbox_seg:
+ up(&func_to_func->msg_send_sem);
+
+ return err;
+}
+
+static void
+response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox, int err,
+ u16 out_size, u16 src_func_idx)
+{
+ struct mbox_msg_info msg_info = {0};
+
+ if (recv_mbox->ack_type == MBOX_ACK) {
+ msg_info.msg_id = recv_mbox->msg_info.msg_id;
+ if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
+ else if (err == HINIC_MBOX_VF_CMD_ERROR)
+ msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
+ else if (err)
+ msg_info.status = HINIC_MBOX_PF_SEND_ERR;
+
+ /* if no data needs to response, set out_size to 1 */
+ if (!out_size || err)
+ out_size = MBOX_MSG_NO_DATA_LEN;
+
+ send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
+ recv_mbox->buf_out, out_size, src_func_idx,
+ HINIC_HWIF_RESPONSE, MBOX_ACK,
+ &msg_info);
+ }
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx)
+{
+ void *buf_out = recv_mbox->buf_out;
+ u16 out_size = MBOX_MAX_BUF_SZ;
+ int err = 0;
+
+ if (HINIC_IS_VF(func_to_func->hwif)) {
+ err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
+ &out_size);
+ } else {
+ if (IS_PF_OR_PPF_SRC(src_func_idx))
+ dev_warn(&func_to_func->hwif->pdev->dev,
+ "Unsupported pf2pf mbox msg\n");
+ else
+ err = recv_pf_from_vf_mbox_handler(func_to_func,
+ recv_mbox,
+ src_func_idx,
+ buf_out, &out_size);
+ }
+
+ response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size,
+ src_func_idx);
+ kfree(recv_mbox->buf_out);
+ kfree(recv_mbox->mbox);
+ kfree(recv_mbox);
+}
+
+static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ func_to_func->event_flag = event_flag;
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *mbox_for_resp,
+ enum hinic_mod_type mod, u16 cmd,
+ void *buf_out, u16 *out_size)
+{
+ int err;
+
+ if (mbox_for_resp->msg_info.status) {
+ err = mbox_for_resp->msg_info.status;
+ if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n",
+ mbox_for_resp->msg_info.status);
+ return err;
+ }
+
+ if (buf_out && out_size) {
+ if (*out_size < mbox_for_resp->mbox_len) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n",
+ mbox_for_resp->mbox_len, mod, cmd, *out_size);
+ return -EFAULT;
+ }
+
+ if (mbox_for_resp->mbox_len)
+ memcpy(buf_out, mbox_for_resp->mbox,
+ mbox_for_resp->mbox_len);
+
+ *out_size = mbox_for_resp->mbox_len;
+ }
+
+ return 0;
+}
+
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_recv_mbox *mbox_for_resp;
+ struct mbox_msg_info msg_info = {0};
+ unsigned long timeo;
+ int err;
+
+ mbox_for_resp = &func_to_func->mbox_resp[dst_func];
+
+ down(&func_to_func->mbox_send_sem);
+
+ init_completion(&mbox_for_resp->recv_done);
+
+ msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func);
+
+ set_mbox_to_func_event(func_to_func, EVENT_START);
+
+ err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
+ dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
+ &msg_info);
+ if (err) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n",
+ msg_info.msg_id);
+ set_mbox_to_func_event(func_to_func, EVENT_FAIL);
+ goto err_send_mbox;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
+ set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
+ err = -ETIMEDOUT;
+ goto err_send_mbox;
+ }
+
+ set_mbox_to_func_event(func_to_func, EVENT_END);
+
+ err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd,
+ buf_out, out_size);
+
+err_send_mbox:
+ up(&func_to_func->mbox_send_sem);
+
+ return err;
+}
+
+static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
+ void *buf_in, u16 in_size)
+{
+ if (in_size > HINIC_MBOX_DATA_SIZE) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mbox msg len(%d) exceed limit(%d)\n",
+ in_size, HINIC_MBOX_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+
+ if (err)
+ return err;
+
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
+ HINIC_FUNC_TYPE(hwdev->hwif));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd,
+ hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ u16 dst_func_idx;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ func_to_func = hwdev->func_to_func;
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
+ HINIC_FUNC_TYPE(hwdev->hwif));
+ return -EINVAL;
+ }
+
+ if (!vf_id) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "VF id(%d) error!\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* vf_offset_to_pf + vf_id is the vf's global function id of vf in
+ * this pf
+ */
+ dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ int err;
+
+ mbox_info->seq_id = SEQ_ID_MAX_VAL;
+
+ mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->mbox)
+ return -ENOMEM;
+
+ mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->buf_out) {
+ err = -ENOMEM;
+ goto err_alloc_buf_out;
+ }
+
+ atomic_set(&mbox_info->msg_cnt, 0);
+
+ return 0;
+
+err_alloc_buf_out:
+ kfree(mbox_info->mbox);
+
+ return err;
+}
+
+static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ kfree(mbox_info->buf_out);
+ kfree(mbox_info->mbox);
+}
+
+static int alloc_mbox_info(struct hinic_hwdev *hwdev,
+ struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx, i;
+ int err;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
+ err = init_mbox_info(&mbox_info[func_idx]);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n",
+ func_idx);
+ goto err_init_mbox_info;
+ }
+ }
+
+ return 0;
+
+err_init_mbox_info:
+ for (i = 0; i < func_idx; i++)
+ clean_mbox_info(&mbox_info[i]);
+
+ return err;
+}
+
+static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
+ clean_mbox_info(&mbox_info[func_idx]);
+}
+
+static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+
+ send_mbox->data = MBOX_AREA(func_to_func->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev,
+ MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr,
+ GFP_KERNEL);
+ if (!send_mbox->wb_vaddr)
+ return -ENOMEM;
+
+ send_mbox->wb_status = send_mbox->wb_vaddr;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ addr_h);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ 0);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ 0);
+
+ dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr,
+ send_mbox->wb_paddr);
+}
+
+static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+ struct hinic_pfhwdev *pfhwdev;
+ int err = 0;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ if (cmd == HINIC_COMM_CMD_START_FLR) {
+ *out_size = 0;
+ } else {
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ cmd, buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+ if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "PF mbox common callback handler err: %d\n",
+ err);
+ }
+
+ return err;
+}
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+ func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
+ if (!func_to_func)
+ return -ENOMEM;
+
+ hwdev->func_to_func = func_to_func;
+ func_to_func->hwdev = hwdev;
+ func_to_func->hwif = hwdev->hwif;
+ sema_init(&func_to_func->mbox_send_sem, 1);
+ sema_init(&func_to_func->msg_send_sem, 1);
+ spin_lock_init(&func_to_func->mbox_lock);
+ func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
+ if (!func_to_func->workq) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n");
+ err = -ENOMEM;
+ goto err_create_mbox_workq;
+ }
+
+ err = alloc_mbox_info(hwdev, func_to_func->mbox_send);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n");
+ goto err_alloc_mbox_for_send;
+ }
+
+ err = alloc_mbox_info(hwdev, func_to_func->mbox_resp);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n");
+ goto err_alloc_mbox_for_resp;
+ }
+
+ err = alloc_mbox_wb_status(func_to_func);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n");
+ goto err_alloc_wb_status;
+ }
+
+ prepare_send_mbox(func_to_func);
+
+ hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC,
+ &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler);
+ hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT,
+ &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler);
+
+ if (!HINIC_IS_VF(hwdev->hwif))
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ comm_pf_mbox_handler);
+
+ return 0;
+
+err_alloc_wb_status:
+ free_mbox_info(func_to_func->mbox_resp);
+
+err_alloc_mbox_for_resp:
+ free_mbox_info(func_to_func->mbox_send);
+
+err_alloc_mbox_for_send:
+ destroy_workqueue(func_to_func->workq);
+
+err_create_mbox_workq:
+ kfree(func_to_func);
+
+ return err;
+}
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC);
+ hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT);
+
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ /* destroy workqueue before free related mbox resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(func_to_func->workq);
+
+ free_mbox_wb_status(func_to_func);
+ free_mbox_info(func_to_func->mbox_resp);
+ free_mbox_info(func_to_func->mbox_send);
+
+ kfree(func_to_func);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
new file mode 100644
index 000000000000..7b18559bfe80
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_MBOX_H_
+#define HINIC_MBOX_H_
+
+#define HINIC_MBOX_PF_SEND_ERR 0x1
+#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2
+#define HINIC_MBOX_VF_CMD_ERROR 0x3
+
+#define HINIC_MAX_FUNCTIONS 512
+
+#define HINIC_MAX_PF_FUNCS 16
+
+#define HINIC_MBOX_WQ_NAME "hinic_mbox"
+
+#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100
+#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C
+
+enum hinic_mbox_ack_type {
+ MBOX_ACK,
+ MBOX_NO_ACK,
+};
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status;
+};
+
+struct hinic_recv_mbox {
+ struct completion recv_done;
+ void *mbox;
+ u8 cmd;
+ enum hinic_mod_type mod;
+ u16 mbox_len;
+ void *buf_out;
+ enum hinic_mbox_ack_type ack_type;
+ struct mbox_msg_info msg_info;
+ u8 seq_id;
+ atomic_t msg_cnt;
+};
+
+struct hinic_send_mbox {
+ struct completion send_done;
+ u8 *data;
+
+ u64 *wb_status;
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+typedef void (*hinic_vf_mbox_cb)(void *handle, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+enum mbox_event_state {
+ EVENT_START = 0,
+ EVENT_FAIL,
+ EVENT_TIMEOUT,
+ EVENT_END,
+};
+
+enum hinic_mbox_cb_state {
+ HINIC_VF_MBOX_CB_REG = 0,
+ HINIC_VF_MBOX_CB_RUNNING,
+ HINIC_PF_MBOX_CB_REG,
+ HINIC_PF_MBOX_CB_RUNNING,
+ HINIC_PPF_MBOX_CB_REG,
+ HINIC_PPF_MBOX_CB_RUNNING,
+ HINIC_PPF_TO_PF_MBOX_CB_REG,
+ HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+};
+
+struct hinic_mbox_func_to_func {
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+
+ struct semaphore mbox_send_sem;
+ struct semaphore msg_send_sem;
+ struct hinic_send_mbox send_mbox;
+
+ struct workqueue_struct *workq;
+
+ struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS];
+ struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS];
+
+ hinic_vf_mbox_cb vf_mbox_cb[HINIC_MOD_MAX];
+ hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX];
+ unsigned long pf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long vf_mbox_cb_state[HINIC_MOD_MAX];
+
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+
+ /* lock for mbox event flag */
+ spinlock_t mbox_lock;
+};
+
+struct hinic_mbox_work {
+ struct work_struct work;
+ u16 src_func_idx;
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_recv_mbox *recv_mbox;
+};
+
+struct vf_cmd_msg_handle {
+ u8 cmd;
+ int (*cmd_msg_handler)(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+};
+
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback);
+
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback);
+
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size);
+
+void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size);
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 8995e32dd1c0..c33eb1147055 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -45,6 +45,8 @@
#define MGMT_MSG_TIMEOUT 5000
+#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
+
#define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@@ -238,12 +240,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
u8 *buf_in, u16 in_size,
u8 *buf_out, u16 *out_size,
enum mgmt_direction_type direction,
- u16 resp_msg_id)
+ u16 resp_msg_id, u32 timeout)
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_recv_msg *recv_msg;
struct completion *recv_done;
+ unsigned long timeo;
u16 msg_id;
int err;
@@ -267,8 +270,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
goto unlock_sync_msg;
}
- if (!wait_for_completion_timeout(recv_done,
- msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+
+ if (!wait_for_completion_timeout(recv_done, timeo)) {
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
err = -ETIMEDOUT;
goto unlock_sync_msg;
@@ -342,6 +346,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
+ u32 timeout = 0;
if (sync != HINIC_MGMT_MSG_SYNC) {
dev_err(&pdev->dev, "Invalid MGMT msg type\n");
@@ -353,9 +358,16 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
return -EINVAL;
}
- return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+ timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
+
+ if (HINIC_IS_VF(hwif))
+ return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size, 0);
+ else
+ return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND,
- MSG_NOT_RESP);
+ MSG_NOT_RESP, timeout);
}
/**
@@ -390,8 +402,8 @@ static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
recv_msg->msg, recv_msg->msg_len,
buf_out, &out_size);
else
- dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n",
- recv_msg->mod);
+ dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
+ recv_msg->mod, recv_msg->cmd);
mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
@@ -553,6 +565,10 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
int err;
pf_to_mgmt->hwif = hwif;
+ pf_to_mgmt->hwdev = hwdev;
+
+ if (HINIC_IS_VF(hwif))
+ return 0;
sema_init(&pf_to_mgmt->sync_msg_lock, 1);
pf_to_mgmt->sync_msg_id = 0;
@@ -584,6 +600,9 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ if (HINIC_IS_VF(hwdev->hwif))
+ return;
+
hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
index 182fba17b643..c2b142c08b0e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
@@ -60,7 +60,9 @@ enum hinic_cfg_cmd {
};
enum hinic_comm_cmd {
+ HINIC_COMM_CMD_START_FLR = 0x1,
HINIC_COMM_CMD_IO_STATUS_GET = 0x3,
+ HINIC_COMM_CMD_DMA_ATTR_SET = 0x4,
HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10,
HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11,
@@ -74,7 +76,13 @@ enum hinic_comm_cmd {
HINIC_COMM_CMD_IO_RES_CLEAR = 0x29,
- HINIC_COMM_CMD_MAX = 0x32,
+ HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
+
+ HINIC_COMM_CMD_L2NIC_RESET = 0x4b,
+
+ HINIC_COMM_CMD_PAGESIZE_SET = 0x50,
+
+ HINIC_COMM_CMD_MAX = 0x51,
};
enum hinic_mgmt_cb_state {
@@ -107,7 +115,7 @@ struct hinic_mgmt_cb {
struct hinic_pf_to_mgmt {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
struct semaphore sync_msg_lock;
u16 sync_msg_id;
u8 *sync_msg_buf;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index be364b7a7019..fcf7bfe4aa47 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -108,7 +108,12 @@ void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
+ /* If only one page, use 0-level CLA */
+ if (wq->num_q_pages == 1)
+ wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq_page_addr);
+ else
+ wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
+
wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
@@ -638,6 +643,7 @@ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
/* increment prod_idx to the next */
prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ prod_idx = SQ_MASKED_IDX(sq, prod_idx);
wmb(); /* Write all before the doorbell */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 79091e131418..ca3e2d060284 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -38,12 +38,15 @@
#define HINIC_SQ_WQEBB_SIZE 64
#define HINIC_RQ_WQEBB_SIZE 32
-#define HINIC_SQ_PAGE_SIZE SZ_4K
-#define HINIC_RQ_PAGE_SIZE SZ_4K
+#define HINIC_SQ_PAGE_SIZE SZ_256K
+#define HINIC_RQ_PAGE_SIZE SZ_256K
#define HINIC_SQ_DEPTH SZ_4K
#define HINIC_RQ_DEPTH SZ_4K
+#define HINIC_MAX_QUEUE_DEPTH SZ_4K
+#define HINIC_MIN_QUEUE_DEPTH 128
+
/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */
#define HINIC_RX_BUF_SZ 2048
#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 03363216ff59..5dc3743f8091 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -503,7 +503,7 @@ err_alloc_wq_pages:
* Return 0 - Success, negative - Failure
**/
int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
+ u16 wqebb_size, u32 wq_page_size, u16 q_depth,
u16 max_wqe_size)
{
struct hinic_hwif *hwif = wqs->hwif;
@@ -600,7 +600,7 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
**/
int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
+ int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
@@ -768,7 +768,10 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*prod_idx = curr_prod_idx;
- if (curr_pg != end_pg) {
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
index 811eef744140..b06f8c0255de 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -26,7 +26,7 @@ struct hinic_wq {
int block_idx;
u16 wqebb_size;
- u16 wq_page_size;
+ u32 wq_page_size;
u16 q_depth;
u16 max_wqe_size;
u16 num_wqebbs_per_page;
@@ -76,7 +76,7 @@ struct hinic_cmdq_pages {
int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
+ int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
u16 q_depth, u16 max_wqe_size);
void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
@@ -88,7 +88,7 @@ int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
void hinic_wqs_free(struct hinic_wqs *wqs);
int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
+ u16 wqebb_size, u32 wq_page_size, u16 q_depth,
u16 max_wqe_size);
void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 13560975c103..e9e6f4c9309a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -29,6 +29,7 @@
#include "hinic_tx.h"
#include "hinic_rx.h"
#include "hinic_dev.h"
+#include "hinic_sriov.h"
MODULE_AUTHOR("Huawei Technologies CO., Ltd");
MODULE_DESCRIPTION("Huawei Intelligent NIC driver");
@@ -46,6 +47,7 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200
#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205
#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210
+#define HINIC_DEV_ID_VF 0x375e
#define HINIC_WQ_NAME "hinic_dev"
@@ -65,6 +67,8 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define rx_mode_work_to_nic_dev(rx_mode_work) \
container_of(rx_mode_work, struct hinic_dev, rx_mode_work)
+#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+
static int change_mac_addr(struct net_device *netdev, const u8 *addr);
static int set_features(struct hinic_dev *nic_dev,
@@ -322,7 +326,6 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
int i, node, err = 0;
u16 num_cpus = 0;
- nic_dev->max_qps = hinic_hwdev_max_num_qps(hwdev);
if (nic_dev->max_qps <= 1) {
nic_dev->flags &= ~HINIC_RSS_ENABLE;
nic_dev->rss_limit = nic_dev->max_qps;
@@ -368,14 +371,15 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
}
-static int hinic_open(struct net_device *netdev)
+int hinic_open(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
enum hinic_port_link_state link_state;
int err, ret;
if (!(nic_dev->flags & HINIC_INTF_UP)) {
- err = hinic_hwdev_ifup(nic_dev->hwdev);
+ err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth,
+ nic_dev->rq_depth);
if (err) {
netif_err(nic_dev, drv, netdev,
"Failed - HW interface up\n");
@@ -423,9 +427,6 @@ static int hinic_open(struct net_device *netdev)
goto err_func_port_state;
}
- /* Wait up to 3 sec between port enable to link state */
- msleep(3000);
-
down(&nic_dev->mgmt_lock);
err = hinic_port_link_state(nic_dev, &link_state);
@@ -434,6 +435,9 @@ static int hinic_open(struct net_device *netdev)
goto err_port_link;
}
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state);
+
if (link_state == HINIC_LINK_STATE_UP)
nic_dev->flags |= HINIC_LINK_UP;
@@ -479,11 +483,10 @@ err_create_txqs:
return err;
}
-static int hinic_close(struct net_device *netdev)
+int hinic_close(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
- int err;
down(&nic_dev->mgmt_lock);
@@ -497,20 +500,12 @@ static int hinic_close(struct net_device *netdev)
up(&nic_dev->mgmt_lock);
- err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to set func port state\n");
- nic_dev->flags |= (flags & HINIC_INTF_UP);
- return err;
- }
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
- err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
- nic_dev->flags |= (flags & HINIC_INTF_UP);
- return err;
- }
+ hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
+
+ hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
if (nic_dev->flags & HINIC_RSS_ENABLE) {
hinic_rss_deinit(nic_dev);
@@ -685,7 +680,7 @@ static int hinic_vlan_rx_add_vid(struct net_device *netdev,
}
err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid);
- if (err) {
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
netif_err(nic_dev, drv, netdev, "Failed to set mac\n");
goto err_add_mac;
}
@@ -737,8 +732,6 @@ static void set_rx_mode(struct work_struct *work)
struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
- netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
-
hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode);
__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
@@ -757,10 +750,12 @@ static void hinic_set_rx_mode(struct net_device *netdev)
HINIC_RX_MODE_MC |
HINIC_RX_MODE_BC;
- if (netdev->flags & IFF_PROMISC)
- rx_mode |= HINIC_RX_MODE_PROMISC;
- else if (netdev->flags & IFF_ALLMULTI)
+ if (netdev->flags & IFF_PROMISC) {
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ rx_mode |= HINIC_RX_MODE_PROMISC;
+ } else if (netdev->flags & IFF_ALLMULTI) {
rx_mode |= HINIC_RX_MODE_MC_ALL;
+ }
rx_mode_work->rx_mode = rx_mode;
@@ -770,8 +765,26 @@ static void hinic_set_rx_mode(struct net_device *netdev)
static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 sw_pi, hw_ci, sw_ci;
+ struct hinic_sq *sq;
+ u16 num_sqs, q_id;
+
+ num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev);
netif_err(nic_dev, drv, netdev, "Tx timeout\n");
+
+ for (q_id = 0; q_id < num_sqs; q_id++) {
+ if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id)))
+ continue;
+
+ sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id);
+ sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask;
+ hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask;
+ sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask;
+ netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n",
+ q_id, sw_pi, hw_ci, sw_ci,
+ nic_dev->txqs[q_id].napi.state);
+ }
}
static void hinic_get_stats64(struct net_device *netdev,
@@ -837,6 +850,29 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_get_stats64 = hinic_get_stats64,
.ndo_fix_features = hinic_fix_features,
.ndo_set_features = hinic_set_features,
+ .ndo_set_vf_mac = hinic_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
+ .ndo_get_vf_config = hinic_ndo_get_vf_config,
+ .ndo_set_vf_trust = hinic_ndo_set_vf_trust,
+ .ndo_set_vf_rate = hinic_ndo_set_vf_bw,
+ .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
+ .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
+};
+
+static const struct net_device_ops hinicvf_netdev_ops = {
+ .ndo_open = hinic_open,
+ .ndo_stop = hinic_close,
+ .ndo_change_mtu = hinic_change_mtu,
+ .ndo_set_mac_address = hinic_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
+ .ndo_set_rx_mode = hinic_set_rx_mode,
+ .ndo_start_xmit = hinic_xmit_frame,
+ .ndo_tx_timeout = hinic_tx_timeout,
+ .ndo_get_stats64 = hinic_get_stats64,
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
};
static void netdev_features_init(struct net_device *netdev)
@@ -896,6 +932,10 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n");
}
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev,
+ link_status->link);
+
ret_link_status = buf_out;
ret_link_status->status = 0;
@@ -969,7 +1009,12 @@ static int nic_dev_init(struct pci_dev *pdev)
}
hinic_set_ethtool_ops(netdev);
- netdev->netdev_ops = &hinic_netdev_ops;
+
+ if (!HINIC_IS_VF(hwdev->hwif))
+ netdev->netdev_ops = &hinic_netdev_ops;
+ else
+ netdev->netdev_ops = &hinicvf_netdev_ops;
+
netdev->max_mtu = ETH_MAX_MTU;
nic_dev = netdev_priv(netdev);
@@ -981,6 +1026,11 @@ static int nic_dev_init(struct pci_dev *pdev)
nic_dev->rxqs = NULL;
nic_dev->tx_weight = tx_weight;
nic_dev->rx_weight = rx_weight;
+ nic_dev->sq_depth = HINIC_SQ_DEPTH;
+ nic_dev->rq_depth = HINIC_RQ_DEPTH;
+ nic_dev->sriov_info.hwdev = hwdev;
+ nic_dev->sriov_info.pdev = pdev;
+ nic_dev->max_qps = num_qps;
sema_init(&nic_dev->mgmt_lock, 1);
@@ -1007,11 +1057,25 @@ static int nic_dev_init(struct pci_dev *pdev)
pci_set_drvdata(pdev, netdev);
err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
- if (err)
- dev_warn(&pdev->dev, "Failed to get mac address\n");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get mac address\n");
+ goto err_get_mac;
+ }
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ dev_err(&pdev->dev, "Invalid MAC address\n");
+ err = -EIO;
+ goto err_add_mac;
+ }
+
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+ netdev->dev_addr);
+ eth_hw_addr_random(netdev);
+ }
err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0);
- if (err) {
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
dev_err(&pdev->dev, "Failed to add mac\n");
goto err_add_mac;
}
@@ -1053,6 +1117,7 @@ err_set_features:
cancel_work_sync(&rx_mode_work->work);
err_set_mtu:
+err_get_mac:
err_add_mac:
pci_set_drvdata(pdev, NULL);
destroy_workqueue(nic_dev->workq);
@@ -1126,14 +1191,41 @@ err_pci_regions:
return err;
}
+#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+
+static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
+{
+ struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
+ u32 loop_cnt = 0;
+
+ set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
+ usleep_range(9900, 10000);
+
+ while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
+ if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
+ !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
+ return;
+
+ usleep_range(9900, 10000);
+ loop_cnt++;
+ }
+}
+
static void hinic_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rx_mode_work *rx_mode_work;
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ wait_sriov_cfg_complete(nic_dev);
+ hinic_pci_sriov_disable(pdev);
+ }
+
unregister_netdev(netdev);
+ hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
+
hinic_hwdev_cb_unregister(nic_dev->hwdev,
HINIC_MGMT_MSG_CMD_LINK_STATUS);
@@ -1144,6 +1236,8 @@ static void hinic_remove(struct pci_dev *pdev)
destroy_workqueue(nic_dev->workq);
+ hinic_vf_func_free(nic_dev->hwdev);
+
hinic_free_hwdev(nic_dev->hwdev);
free_netdev(netdev);
@@ -1164,6 +1258,7 @@ static const struct pci_device_id hinic_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0},
{ 0, 0}
};
MODULE_DEVICE_TABLE(pci, hinic_pci_table);
@@ -1174,6 +1269,7 @@ static struct pci_driver hinic_driver = {
.probe = hinic_probe,
.remove = hinic_remove,
.shutdown = hinic_shutdown,
+ .sriov_configure = hinic_pci_sriov_configure,
};
module_pci_driver(hinic_driver);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 1e389a004e50..175c0ee00038 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -37,20 +37,14 @@ enum mac_op {
static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id, enum mac_op op)
{
- struct net_device *netdev = nic_dev->netdev;
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mac_cmd port_mac_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mac_cmd);
struct pci_dev *pdev = hwif->pdev;
enum hinic_port_cmd cmd;
- u16 out_size;
int err;
- if (vlan_id >= VLAN_N_VID) {
- netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n");
- return -EINVAL;
- }
-
if (op == MAC_SET)
cmd = HINIC_PORT_CMD_SET_MAC;
else
@@ -63,12 +57,25 @@ static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd,
sizeof(port_mac_cmd),
&port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
+ if (err || out_size != sizeof(port_mac_cmd) ||
+ (port_mac_cmd.status &&
+ port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY &&
+ port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) {
dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n",
port_mac_cmd.status);
return -EFAULT;
}
+ if (port_mac_cmd.status == HINIC_PF_SET_VF_ALREADY) {
+ dev_warn(&pdev->dev, "PF has already set VF mac, ignore %s operation\n",
+ (op == MAC_SET) ? "set" : "del");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (cmd == HINIC_PORT_CMD_SET_MAC && port_mac_cmd.status ==
+ HINIC_MGMT_STATUS_EXIST)
+ dev_warn(&pdev->dev, "MAC is repeated, ignore set operation\n");
+
return 0;
}
@@ -112,8 +119,8 @@ int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mac_cmd port_mac_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mac_cmd);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -144,9 +151,9 @@ int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mtu_cmd port_mtu_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mtu_cmd);
struct pci_dev *pdev = hwif->pdev;
int err, max_frame;
- u16 out_size;
if (new_mtu < HINIC_MIN_MTU_SIZE) {
netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size");
@@ -248,14 +255,9 @@ int hinic_port_link_state(struct hinic_dev *nic_dev,
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_port_link_cmd link_cmd;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(link_cmd);
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
@@ -284,13 +286,11 @@ int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
struct hinic_port_state_cmd port_state;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(port_state);
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
+ if (HINIC_IS_VF(hwdev->hwif))
+ return 0;
port_state.state = state;
@@ -320,7 +320,7 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(func_state);
int err;
func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -351,7 +351,7 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(*port_cap);
int err;
port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -382,7 +382,7 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_tso_config tso_cfg = {0};
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(tso_cfg);
int err;
tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -405,9 +405,9 @@ int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
{
struct hinic_checksum_offload rx_csum_cfg = {0};
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(rx_csum_cfg);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size;
int err;
if (!hwdev)
@@ -443,6 +443,7 @@ int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en)
if (!hwdev)
return -EINVAL;
+ out_size = sizeof(vlan_cfg);
hwif = hwdev->hwif;
pdev = hwif->pdev;
vlan_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -465,14 +466,14 @@ int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_rq_num rq_num = { 0 };
+ struct pci_dev *pdev = hwif->pdev;
u16 out_size = sizeof(rq_num);
int err;
rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif);
rq_num.num_rqs = num_rqs;
- rq_num.rq_depth = ilog2(HINIC_SQ_DEPTH);
+ rq_num.rq_depth = ilog2(nic_dev->rq_depth);
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
&rq_num, sizeof(rq_num),
@@ -491,8 +492,8 @@ static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en,
u8 max_wqe_num)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_lro_config lro_cfg = { 0 };
+ struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
u16 out_size = sizeof(lro_cfg);
int err;
@@ -568,6 +569,9 @@ int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
if (err)
return err;
+ if (HINIC_IS_VF(nic_dev->hwdev->hwif))
+ return 0;
+
err = hinic_set_rx_lro_timer(nic_dev, lro_timer);
if (err)
return err;
@@ -741,9 +745,9 @@ int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
{
struct hinic_rss_context_table ctx_tbl = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(ctx_tbl);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size = sizeof(ctx_tbl);
int err;
if (!hwdev || !rss_type)
@@ -784,7 +788,7 @@ int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id,
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_rss_key rss_key = { 0 };
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(rss_key);
int err;
rss_key.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -809,9 +813,9 @@ int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
{
struct hinic_rss_template_key temp_key = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(temp_key);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size = sizeof(temp_key);
int err;
if (!hwdev || !temp)
@@ -844,7 +848,7 @@ int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(rss_engine);
int err;
rss_engine.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -868,9 +872,9 @@ int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx, u8 *type)
{
struct hinic_rss_engine_type hash_type = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(hash_type);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size = sizeof(hash_type);
int err;
if (!hwdev || !type)
@@ -901,7 +905,7 @@ int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id)
struct hinic_rss_config rss_cfg = { 0 };
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(rss_cfg);
int err;
rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -927,8 +931,8 @@ int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx)
struct hinic_rss_template_mgmt template_mgmt = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(template_mgmt);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -953,8 +957,8 @@ int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx)
struct hinic_rss_template_mgmt template_mgmt = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(template_mgmt);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -1043,9 +1047,9 @@ int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_version_info up_ver = {0};
+ u16 out_size = sizeof(up_ver);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size;
int err;
if (!hwdev)
@@ -1068,3 +1072,132 @@ int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
return 0;
}
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode)
+{
+ u16 out_size;
+ int err;
+
+ if (!hwdev || !link_mode)
+ return -EINVAL;
+
+ out_size = sizeof(*link_mode);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
+ link_mode, sizeof(*link_mode),
+ link_mode, &out_size);
+ if (err || !out_size || link_mode->status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_mode->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable)
+{
+ struct hinic_set_autoneg_cmd autoneg = {0};
+ u16 out_size = sizeof(autoneg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ autoneg.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ autoneg.enable = enable;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_AUTONEG,
+ &autoneg, sizeof(autoneg),
+ &autoneg, &out_size);
+ if (err || !out_size || autoneg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n",
+ enable ? "enable" : "disable", err, autoneg.status,
+ out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed)
+{
+ struct hinic_speed_cmd speed_info = {0};
+ u16 out_size = sizeof(speed_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ speed_info.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ speed_info.speed = speed;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_SPEED,
+ &speed_info, sizeof(speed_info),
+ &speed_info, &out_size);
+ if (err || !out_size || speed_info.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, speed_info.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info)
+{
+ u16 out_size = sizeof(*info);
+ int err;
+
+ err = hinic_hilink_msg_cmd(hwdev, HINIC_HILINK_CMD_SET_LINK_SETTINGS,
+ info, sizeof(*info), info, &out_size);
+ if ((info->status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ info->status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, info->status, out_size);
+ return -EFAULT;
+ }
+
+ return info->status;
+}
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index 44772fd47fc1..661c6322dc15 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -79,6 +79,42 @@ enum hinic_speed {
HINIC_SPEED_UNKNOWN = 0xFF,
};
+enum hinic_link_mode {
+ HINIC_10GE_BASE_KR = 0,
+ HINIC_40GE_BASE_KR4 = 1,
+ HINIC_40GE_BASE_CR4 = 2,
+ HINIC_100GE_BASE_KR4 = 3,
+ HINIC_100GE_BASE_CR4 = 4,
+ HINIC_25GE_BASE_KR_S = 5,
+ HINIC_25GE_BASE_CR_S = 6,
+ HINIC_25GE_BASE_KR = 7,
+ HINIC_25GE_BASE_CR = 8,
+ HINIC_GE_BASE_KX = 9,
+ HINIC_LINK_MODE_NUMBERS,
+
+ HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
+};
+
+enum hinic_port_type {
+ HINIC_PORT_TP, /* BASET */
+ HINIC_PORT_AUI,
+ HINIC_PORT_MII,
+ HINIC_PORT_FIBRE, /* OPTICAL */
+ HINIC_PORT_BNC,
+ HINIC_PORT_ELEC,
+ HINIC_PORT_COPPER, /* PORT_DA */
+ HINIC_PORT_AOC,
+ HINIC_PORT_BACKPLANE,
+ HINIC_PORT_NONE = 0xEF,
+ HINIC_PORT_OTHER = 0xFF,
+};
+
+enum hinic_valid_link_settings {
+ HILINK_LINK_SET_SPEED = 0x1,
+ HILINK_LINK_SET_AUTONEG = 0x2,
+ HILINK_LINK_SET_FEC = 0x4,
+};
+
enum hinic_tso_state {
HINIC_TSO_DISABLE = 0,
HINIC_TSO_ENABLE = 1,
@@ -148,9 +184,9 @@ struct hinic_port_link_status {
u8 version;
u8 rsvd0[6];
- u16 rsvd1;
+ u16 func_id;
u8 link;
- u8 rsvd2;
+ u8 port_id;
};
struct hinic_port_func_state_cmd {
@@ -179,6 +215,50 @@ struct hinic_port_cap {
u8 rsvd2[3];
};
+struct hinic_link_mode_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represents invalid value */
+ u16 advertised;
+};
+
+struct hinic_speed_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 speed;
+};
+
+struct hinic_set_autoneg_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable; /* 1: enable , 0: disable */
+};
+
+struct hinic_link_ksettings_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+
+ u32 valid_bitmap;
+ u32 speed; /* enum nic_speed_level */
+ u8 autoneg; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u8 rsvd2[18]; /* reserved for duplex, port, etc. */
+};
+
struct hinic_tso_config {
u8 status;
u8 version;
@@ -506,6 +586,61 @@ struct hinic_cmd_vport_stats {
struct hinic_vport_stats stats;
};
+struct hinic_tx_rate_cfg_max_min {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 min_rate;
+ u32 max_rate;
+ u8 rsvd2[8];
+};
+
+struct hinic_tx_rate_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 tx_rate;
+};
+
+enum nic_speed_level {
+ LINK_SPEED_10MB = 0,
+ LINK_SPEED_100MB,
+ LINK_SPEED_1GB,
+ LINK_SPEED_10GB,
+ LINK_SPEED_25GB,
+ LINK_SPEED_40GB,
+ LINK_SPEED_100GB,
+ LINK_SPEED_LEVELS,
+};
+
+struct hinic_spoofchk_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1;
+ u16 func_id;
+};
+
+struct hinic_pause_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -585,4 +720,24 @@ int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en);
int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver);
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info);
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode);
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable);
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed);
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_open(struct net_device *netdev);
+
+int hinic_close(struct net_device *netdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 815649e37cb1..af20d0dd6de7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -432,9 +432,11 @@ static int rx_poll(struct napi_struct *napi, int budget)
return budget;
napi_complete(napi);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- rq->msix_entry,
- HINIC_MSIX_ENABLE);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_ENABLE);
return pkts;
}
@@ -461,9 +463,10 @@ static irqreturn_t rx_irq(int irq, void *data)
/* Disable the interrupt until napi will be completed */
nic_dev = netdev_priv(rxq->netdev);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- rq->msix_entry,
- HINIC_MSIX_DISABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_DISABLE);
nic_dev = netdev_priv(rxq->netdev);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
new file mode 100644
index 000000000000..efab2dd2c889
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -0,0 +1,1294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic_hw_dev.h"
+#include "hinic_dev.h"
+#include "hinic_hw_mbox.h"
+#include "hinic_hw_cmdq.h"
+#include "hinic_port.h"
+#include "hinic_sriov.h"
+
+static unsigned char set_vf_link_state;
+module_param(set_vf_link_state, byte, 0444);
+MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0.");
+
+#define HINIC_VLAN_PRIORITY_SHIFT 13
+#define HINIC_ADD_VLAN_IN_MAC 0x8000
+#define HINIC_TX_RATE_TABLE_FULL 12
+
+static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
+ u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_cmd mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ mac_info.func_idx = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || out_size != sizeof(mac_info) ||
+ (mac_info.status && mac_info.status != HINIC_PF_SET_VF_ALREADY &&
+ mac_info.status != HINIC_MGMT_STATUS_EXIST)) {
+ dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to change MAC, ret = %d\n",
+ mac_info.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id,
+ u8 link_status)
+{
+ struct vf_data_storage *vf_infos = hwdev->func_to_io.vf_infos;
+ struct hinic_port_link_status link = {0};
+ u16 out_size = sizeof(link);
+ int err;
+
+ if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) {
+ link.link = link_status;
+ link.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC,
+ vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT,
+ &link, sizeof(link),
+ &link, &out_size, 0);
+ if (err || !out_size || link.status)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err,
+ link.status, out_size);
+ }
+}
+
+/* send link change event mbox msg to active vfs under the pf */
+void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
+ u8 link_status)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ u16 i;
+
+ nic_io->link_status = link_status;
+ for (i = 1; i <= nic_io->max_vfs; i++) {
+ if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced)
+ hinic_notify_vf_link_status(hwdev, i, link_status);
+ }
+}
+
+static u16 hinic_vf_info_vlanprio(struct hinic_hwdev *hwdev, int vf_id)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ u16 pf_vlan, vlanprio;
+ u8 pf_qos;
+
+ pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan;
+ pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos;
+ vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT;
+
+ return vlanprio;
+}
+
+static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid,
+ u8 qos, int vf_id)
+{
+ struct hinic_vf_vlan_config vf_vlan = {0};
+ u16 out_size = sizeof(vf_vlan);
+ int err;
+ u8 cmd;
+
+ /* VLAN 0 is a special case, don't allow it to be removed */
+ if (!vid && !add)
+ return 0;
+
+ vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ vf_vlan.vlan_id = vid;
+ vf_vlan.qos = qos;
+
+ if (add)
+ cmd = HINIC_PORT_CMD_SET_VF_VLAN;
+ else
+ cmd = HINIC_PORT_CMD_CLR_VF_VLAN;
+
+ err = hinic_port_msg_cmd(hwdev, cmd, &vf_vlan,
+ sizeof(vf_vlan), &vf_vlan, &out_size);
+ if (err || !out_size || vf_vlan.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d vlan, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate_max_min(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg_max_min rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.max_rate = max_rate;
+ rate_cfg.min_rate = min_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rate_cfg.status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err,
+ rate_cfg.status, out_size);
+ return -EIO;
+ }
+
+ if (!rate_cfg.status) {
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate;
+ }
+
+ return rate_cfg.status;
+}
+
+static int hinic_set_vf_rate_limit(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 tx_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.tx_rate = tx_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if (err || !out_size || rate_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status,
+ out_size);
+ if (rate_cfg.status)
+ return rate_cfg.status;
+
+ return -EIO;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0;
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ int err;
+
+ err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED)
+ return err;
+
+ if (min_rate) {
+ dev_err(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate\n");
+ return -EOPNOTSUPP;
+ }
+
+ dev_info(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate, force min_tx_rate = max_tx_rate\n");
+
+ return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate);
+}
+
+static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 func_id, vlan_id;
+ int err = 0;
+
+ vf_info = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac) {
+ func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+
+ vlan_id = 0;
+
+ err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, vlan_id,
+ func_id);
+ if (err) {
+ dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set VF %d MAC\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (hinic_vf_info_vlanprio(hwdev, vf_id)) {
+ err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan,
+ vf_info->pf_qos, vf_id);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to add VF %d VLAN_QOS\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (vf_info->max_rate) {
+ err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate,
+ vf_info->min_rate);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d max rate: %d, min rate: %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->max_rate,
+ vf_info->min_rate);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_register_vf *register_info = buf_out;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ if (vf_id > nic_io->max_vfs) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Register VF id %d exceed limit[0-%d]\n",
+ HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs));
+ register_info->status = EFAULT;
+ return -EFAULT;
+ }
+
+ *out_size = sizeof(*register_info);
+ err = hinic_init_vf_config(hw_dev, vf_id);
+ if (err) {
+ register_info->status = EFAULT;
+ return err;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true;
+
+ return 0;
+}
+
+static int hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+
+ nic_io = &hw_dev->func_to_io;
+ *out_size = 0;
+ if (vf_id > nic_io->max_vfs)
+ return 0;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false;
+
+ return 0;
+}
+
+static int hinic_change_vf_mtu_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, buf_in,
+ in_size, buf_out, out_size);
+ if (err) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Failed to set VF %u mtu\n",
+ vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic_get_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_info = buf_out;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+
+ nic_io = &dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN);
+ mac_info->status = 0;
+ *out_size = sizeof(*mac_info);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_out = buf_out;
+ struct hinic_port_mac_cmd *mac_in = buf_in;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac && !(vf_info->trust) &&
+ is_valid_ether_addr(mac_in->mac)) {
+ dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF %d MAC address\n",
+ HW_VF_ID_TO_OS(vf_id));
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_SET_MAC, buf_in,
+ in_size, buf_out, out_size);
+ if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ dev_err(&hw_dev->hwif->pdev->dev,
+ "Failed to set VF %d MAC address, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_del_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_out = buf_out;
+ struct hinic_port_mac_cmd *mac_in = buf_in;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) &&
+ !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) {
+ dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF mac.\n");
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_DEL_MAC, buf_in,
+ in_size, buf_out, out_size);
+ if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_get_vf_link_status_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_link_cmd *get_link = buf_out;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct vf_data_storage *vf_infos;
+ struct hinic_func_to_io *nic_io;
+ bool link_forced, link_up;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_infos = nic_io->vf_infos;
+ link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced;
+ link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up;
+
+ if (link_forced)
+ get_link->state = link_up ?
+ HINIC_LINK_STATE_UP : HINIC_LINK_STATE_DOWN;
+ else
+ get_link->state = nic_io->link_status;
+
+ get_link->status = 0;
+ *out_size = sizeof(*get_link);
+
+ return 0;
+}
+
+static struct vf_cmd_msg_handle nic_vf_cmd_msg_handler[] = {
+ {HINIC_PORT_CMD_VF_REGISTER, hinic_register_vf_msg_handler},
+ {HINIC_PORT_CMD_VF_UNREGISTER, hinic_unregister_vf_msg_handler},
+ {HINIC_PORT_CMD_CHANGE_MTU, hinic_change_vf_mtu_msg_handler},
+ {HINIC_PORT_CMD_GET_MAC, hinic_get_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_SET_MAC, hinic_set_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_DEL_MAC, hinic_del_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_GET_LINK_STATE, hinic_get_vf_link_status_msg_handler},
+};
+
+#define CHECK_IPSU_15BIT 0X8000
+
+static
+struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ return &nic_dev->sriov_info;
+}
+
+static int hinic_check_mac_info(u8 status, u16 vlan_id)
+{
+ if ((status && status != HINIC_MGMT_STATUS_EXIST &&
+ status != HINIC_PF_SET_VF_ALREADY) ||
+ (vlan_id & CHECK_IPSU_15BIT &&
+ status == HINIC_MGMT_STATUS_EXIST))
+ return -EINVAL;
+
+ return 0;
+}
+
+#define HINIC_VLAN_ID_MASK 0x7FFF
+
+static int hinic_update_mac(struct hinic_hwdev *hwdev, u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_update mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !old_mac || !new_mac)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(&hwdev->hwif->pdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.old_mac, old_mac, ETH_ALEN);
+ memcpy(mac_info.new_mac, new_mac, ETH_ALEN);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_UPDATE_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+
+ if (err || !out_size ||
+ hinic_check_mac_info(mac_info.status, mac_info.vlan_id)) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ if (mac_info.status == HINIC_PF_SET_VF_ALREADY) {
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "PF has already set VF MAC. Ignore update operation\n");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (mac_info.status == HINIC_MGMT_STATUS_EXIST)
+ dev_warn(&hwdev->hwif->pdev->dev, "MAC is repeated. Ignore update operation\n");
+
+ return 0;
+}
+
+static void hinic_get_vf_config(struct hinic_hwdev *hwdev, u16 vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct vf_data_storage *vfinfo;
+
+ vfinfo = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ ivi->vf = HW_VF_ID_TO_OS(vf_id);
+ memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN);
+ ivi->vlan = vfinfo->pf_vlan;
+ ivi->qos = vfinfo->pf_qos;
+ ivi->spoofchk = vfinfo->spoofchk;
+ ivi->trusted = vfinfo->trust;
+ ivi->max_tx_rate = vfinfo->max_rate;
+ ivi->min_tx_rate = vfinfo->min_rate;
+
+ if (!vfinfo->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vfinfo->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+}
+
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac(struct hinic_hwdev *hwdev, int vf,
+ unsigned char *mac_addr)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct vf_data_storage *vf_info;
+ u16 func_id;
+ int err;
+
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+
+ /* duplicate request, so just return success */
+ if (vf_info->pf_set_mac &&
+ !memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN))
+ return 0;
+
+ vf_info->pf_set_mac = true;
+
+ func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf;
+ err = hinic_update_mac(hwdev, vf_info->vf_mac_addr,
+ mac_addr, 0, func_id);
+ if (err) {
+ vf_info->pf_set_mac = false;
+ return err;
+ }
+
+ memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN);
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ int err;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (!is_valid_ether_addr(mac) || vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac);
+ if (err)
+ return err;
+
+ netif_info(nic_dev, drv, netdev, "Setting MAC %pM on VF %d\n", mac, vf);
+ netif_info(nic_dev, drv, netdev, "Reload the VF driver to make this change effective.");
+
+ return 0;
+}
+
+static int hinic_add_vf_vlan(struct hinic_hwdev *hwdev, int vf_id,
+ u16 vlan, u8 qos)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hwdev, true, vlan, qos, vf_id);
+ if (err)
+ return err;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos;
+
+ dev_info(&hwdev->hwif->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan, qos, HW_VF_ID_TO_OS(vf_id));
+ return 0;
+}
+
+static int hinic_kill_vf_vlan(struct hinic_hwdev *hwdev, int vf_id)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hwdev, false,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos,
+ vf_id);
+ if (err)
+ return err;
+
+ dev_info(&hwdev->hwif->pdev->dev, "Remove VLAN %d on VF %d\n",
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ HW_VF_ID_TO_OS(vf_id));
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0;
+
+ return 0;
+}
+
+static int hinic_update_mac_vlan(struct hinic_dev *nic_dev, u16 old_vlan,
+ u16 new_vlan, int vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 vlan_id;
+ int err;
+
+ if (!nic_dev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID)
+ return -EINVAL;
+
+ vf_info = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (!vf_info->pf_set_mac)
+ return 0;
+
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+
+ err = hinic_port_del_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+ if (err) {
+ dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to delete VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, old_vlan);
+ return err;
+ }
+
+ vlan_id = new_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+
+ err = hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+ if (err) {
+ dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to add VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, new_vlan);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+ hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+
+ return err;
+}
+
+static int set_hw_vf_vlan(struct hinic_dev *nic_dev,
+ u16 cur_vlanprio, int vf, u16 vlan, u8 qos)
+{
+ u16 old_vlan = cur_vlanprio & VLAN_VID_MASK;
+ int err = 0;
+
+ if (vlan || qos) {
+ if (cur_vlanprio) {
+ err = hinic_kill_vf_vlan(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d old vlan %d\n",
+ vf, old_vlan);
+ goto out;
+ }
+ }
+ err = hinic_add_vf_vlan(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf), vlan, qos);
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to add vf %d new vlan %d\n",
+ vf, vlan);
+ goto out;
+ }
+ } else {
+ err = hinic_kill_vf_vlan(nic_dev->hwdev, OS_VF_ID_TO_HW(vf));
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d vlan %d\n",
+ vf, old_vlan);
+ goto out;
+ }
+ }
+
+ err = hinic_update_mac_vlan(nic_dev, old_vlan, vlan,
+ OS_VF_ID_TO_HW(vf));
+
+out:
+ return err;
+}
+
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ u16 vlanprio, cur_vlanprio;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+ vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT;
+ cur_vlanprio = hinic_vf_info_vlanprio(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ /* duplicate request, so just return success */
+ if (vlanprio == cur_vlanprio)
+ return 0;
+
+ return set_hw_vf_vlan(nic_dev, cur_vlanprio, vf, vlan, qos);
+}
+
+static int hinic_set_vf_trust(struct hinic_hwdev *hwdev, u16 vf_id,
+ bool trust)
+{
+ struct vf_data_storage *vf_infos;
+ struct hinic_func_to_io *nic_io;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = &hwdev->func_to_io;
+ vf_infos = nic_io->vf_infos;
+ vf_infos[vf_id].trust = trust;
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ struct hinic_func_to_io *nic_io;
+ bool cur_trust;
+ int err;
+
+ sriov_info = &adapter->sriov_info;
+ nic_io = &adapter->hwdev->func_to_io;
+
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_trust = nic_io->vf_infos[vf].trust;
+ /* same request, so just return success */
+ if ((setting && cur_trust) || (!setting && !cur_trust))
+ return 0;
+
+ err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
+ if (!err)
+ dev_info(&sriov_info->pdev->dev, "Set VF %d trusted %s succeed\n",
+ vf, setting ? "on" : "off");
+ else
+ dev_err(&sriov_info->pdev->dev, "Failed set VF %d trusted %s\n",
+ vf, setting ? "on" : "off");
+
+ return err;
+}
+
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate)
+{
+ u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000, SPEED_100000};
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_port_cap port_cap = { 0 };
+ enum hinic_port_link_state link_state;
+ int err;
+
+ if (vf >= nic_dev->sriov_info.num_vfs) {
+ netif_err(nic_dev, drv, netdev, "VF number must be less than %d\n",
+ nic_dev->sriov_info.num_vfs);
+ return -EINVAL;
+ }
+
+ if (max_tx_rate < min_tx_rate) {
+ netif_err(nic_dev, drv, netdev, "Max rate %d must be greater than or equal to min rate %d\n",
+ max_tx_rate, min_tx_rate);
+ return -EINVAL;
+ }
+
+ err = hinic_port_link_state(nic_dev, &link_state);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Get link status failed when setting vf tx rate\n");
+ return -EIO;
+ }
+
+ if (link_state == HINIC_LINK_STATE_DOWN) {
+ netif_err(nic_dev, drv, netdev,
+ "Link status must be up when setting vf tx rate\n");
+ return -EPERM;
+ }
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err || port_cap.speed > LINK_SPEED_100GB)
+ return -EIO;
+
+ /* rate limit cannot be less than 0 and greater than link speed */
+ if (max_tx_rate < 0 || max_tx_rate > speeds[port_cap.speed]) {
+ netif_err(nic_dev, drv, netdev, "Max tx rate must be in [0 - %d]\n",
+ speeds[port_cap.speed]);
+ return -EINVAL;
+ }
+
+ err = hinic_set_vf_tx_rate(nic_dev->hwdev, OS_VF_ID_TO_HW(vf),
+ max_tx_rate, min_tx_rate);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Unable to set VF %d max rate %d min rate %d%s\n",
+ vf, max_tx_rate, min_tx_rate,
+ err == HINIC_TX_RATE_TABLE_FULL ?
+ ", tx rate profile is full" : "");
+ return -EIO;
+ }
+
+ netif_info(nic_dev, drv, netdev,
+ "Set VF %d max tx rate %d min tx rate %d successfully\n",
+ vf, max_tx_rate, min_tx_rate);
+
+ return 0;
+}
+
+static int hinic_set_vf_spoofchk(struct hinic_hwdev *hwdev, u16 vf_id,
+ bool spoofchk)
+{
+ struct hinic_spoofchk_set spoofchk_cfg = {0};
+ struct vf_data_storage *vf_infos = NULL;
+ u16 out_size = sizeof(spoofchk_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vf_infos = hwdev->func_to_io.vf_infos;
+
+ spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ spoofchk_cfg.state = spoofchk ? 1 : 0;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ENABLE_SPOOFCHK,
+ &spoofchk_cfg, sizeof(spoofchk_cfg),
+ &spoofchk_cfg, &out_size);
+ if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || spoofchk_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status,
+ out_size);
+ err = -EIO;
+ }
+
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk;
+
+ return err;
+}
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ bool cur_spoofchk;
+ int err;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
+
+ /* same request, so just return success */
+ if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ return 0;
+
+ err = hinic_set_vf_spoofchk(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf), setting);
+
+ if (!err) {
+ netif_info(nic_dev, drv, netdev, "Set VF %d spoofchk %s successfully\n",
+ vf, setting ? "on" : "off");
+ } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ netif_err(nic_dev, drv, netdev,
+ "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n");
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int hinic_set_vf_link_state(struct hinic_hwdev *hwdev, u16 vf_id,
+ int link)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ u8 link_status = 0;
+
+ switch (link) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ?
+ true : false;
+ link_status = nic_io->link_status;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true;
+ link_status = HINIC_LINK_UP;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false;
+ link_status = HINIC_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Notify the VF of its new link state */
+ hinic_notify_vf_link_status(hwdev, vf_id, link_status);
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = &nic_dev->sriov_info;
+
+ if (vf_id >= sriov_info->num_vfs) {
+ netif_err(nic_dev, drv, netdev,
+ "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ return hinic_set_vf_link_state(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf_id), link);
+}
+
+/* pf receive message from vf */
+static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct vf_cmd_msg_handle *vf_msg_handle;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct hinic_pfhwdev *pfhwdev;
+ int err = 0;
+ u32 i;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ pfhwdev = container_of(dev, struct hinic_pfhwdev, hwdev);
+ nic_io = &dev->func_to_io;
+ for (i = 0; i < ARRAY_SIZE(nic_vf_cmd_msg_handler); i++) {
+ vf_msg_handle = &nic_vf_cmd_msg_handler[i];
+ if (cmd == vf_msg_handle->cmd &&
+ vf_msg_handle->cmd_msg_handler) {
+ err = vf_msg_handle->cmd_msg_handler(hwdev, vf_id,
+ buf_in, in_size,
+ buf_out,
+ out_size);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(nic_vf_cmd_msg_handler))
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
+ cmd, buf_in, in_size, buf_out,
+ out_size, HINIC_MGMT_MSG_SYNC);
+
+ if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&nic_io->hwif->pdev->dev, "PF receive VF L2NIC cmd: %d process error, err:%d\n",
+ cmd, err);
+ return err;
+}
+
+static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_dev_cap *dev_cap = buf_out;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_cap *cap;
+
+ cap = &dev->nic_cap;
+ memset(dev_cap, 0, sizeof(*dev_cap));
+
+ dev_cap->max_vf = cap->max_vf;
+ dev_cap->max_sqs = cap->max_vf_qps;
+ dev_cap->max_rqs = cap->max_vf_qps;
+
+ *out_size = sizeof(*dev_cap);
+
+ return 0;
+}
+
+static int hinic_init_vf_infos(struct hinic_func_to_io *nic_io, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+
+ if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) {
+ dev_warn(&nic_io->hwif->pdev->dev, "Module Parameter set_vf_link_state value %d is out of range, resetting to %d\n",
+ set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO);
+ set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO;
+ }
+
+ switch (set_vf_link_state) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[vf_id].link_forced = false;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = true;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = false;
+ break;
+ default:
+ dev_err(&nic_io->hwif->pdev->dev, "Invalid input parameter set_vf_link_state: %d\n",
+ set_vf_link_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos;
+ u16 func_id;
+
+ func_id = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + vf_id;
+ vf_infos = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_infos->pf_set_mac)
+ hinic_port_del_mac(nic_dev, vf_infos->vf_mac_addr, 0);
+
+ if (hinic_vf_info_vlanprio(nic_dev->hwdev, vf_id))
+ hinic_kill_vf_vlan(nic_dev->hwdev, vf_id);
+
+ if (vf_infos->max_rate)
+ hinic_set_vf_tx_rate(nic_dev->hwdev, vf_id, 0, 0);
+
+ if (vf_infos->spoofchk)
+ hinic_set_vf_spoofchk(nic_dev->hwdev, vf_id, false);
+
+ if (vf_infos->trust)
+ hinic_set_vf_trust(nic_dev->hwdev, vf_id, false);
+
+ memset(vf_infos, 0, sizeof(*vf_infos));
+ /* set vf_infos to default */
+ hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
+}
+
+static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
+ u16 start_vf_id, u16 end_vf_id)
+{
+ struct hinic_dev *nic_dev;
+ u16 func_idx, idx;
+
+ nic_dev = container_of(sriov_info, struct hinic_dev, sriov_info);
+
+ for (idx = start_vf_id; idx <= end_vf_id; idx++) {
+ func_idx = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + idx;
+ hinic_set_wq_page_size(nic_dev->hwdev, func_idx,
+ HINIC_HW_WQ_PAGE_SIZE);
+ hinic_clear_vf_infos(nic_dev, idx);
+ }
+
+ return 0;
+}
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf register_info = {0};
+ u16 out_size = sizeof(register_info);
+ struct hinic_func_to_io *nic_io;
+ int err = 0;
+ u32 size, i;
+
+ nic_io = &hwdev->func_to_io;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_REGISTER,
+ &register_info, sizeof(register_info),
+ &register_info, &out_size, 0);
+ if (err || register_info.status || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, register_info.status, out_size);
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ return -EIO;
+ }
+ } else {
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_CFGM,
+ cfg_mbx_pf_proc_vf_msg);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Register PF mailbox callback failed\n");
+ return err;
+ }
+ nic_io->max_vfs = hwdev->nic_cap.max_vf;
+ size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs;
+ if (size != 0) {
+ nic_io->vf_infos = kzalloc(size, GFP_KERNEL);
+ if (!nic_io->vf_infos) {
+ err = -ENOMEM;
+ goto out_free_nic_io;
+ }
+
+ for (i = 0; i < nic_io->max_vfs; i++) {
+ err = hinic_init_vf_infos(nic_io, i);
+ if (err)
+ goto err_init_vf_infos;
+ }
+
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_pf_mbox_handler);
+ if (err)
+ goto err_register_pf_mbox_cb;
+ }
+ }
+
+ return 0;
+
+err_register_pf_mbox_cb:
+err_init_vf_infos:
+ kfree(nic_io->vf_infos);
+out_free_nic_io:
+ return err;
+}
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf unregister = {0};
+ u16 out_size = sizeof(unregister);
+ int err;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_UNREGISTER,
+ &unregister, sizeof(unregister),
+ &unregister, &out_size, 0);
+ if (err || !out_size || unregister.status)
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, unregister.status, out_size);
+ } else {
+ if (hwdev->func_to_io.vf_infos) {
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ kfree(hwdev->func_to_io.vf_infos);
+ }
+ }
+}
+
+static int hinic_init_vf_hw(struct hinic_hwdev *hwdev, u16 start_vf_id,
+ u16 end_vf_id)
+{
+ u16 i, func_idx;
+ int err;
+
+ /* vf use 256K as default wq page size, and can't change it */
+ for (i = start_vf_id; i <= end_vf_id; i++) {
+ func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + i;
+ err = hinic_set_wq_page_size(hwdev, func_idx,
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_pci_sriov_disable(struct pci_dev *pdev)
+{
+ struct hinic_sriov_info *sriov_info;
+ u16 tmp_vfs;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+ /* if SR-IOV is already disabled then nothing will be done */
+ if (!sriov_info->sriov_enabled)
+ return 0;
+
+ set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(sriov_info->pdev)) {
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+ dev_warn(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return -EPERM;
+ }
+ sriov_info->sriov_enabled = false;
+
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(sriov_info->pdev);
+
+ tmp_vfs = (u16)sriov_info->num_vfs;
+ sriov_info->num_vfs = 0;
+ hinic_deinit_vf_hw(sriov_info, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW(tmp_vfs - 1));
+
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+
+ return 0;
+}
+
+int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct hinic_sriov_info *sriov_info;
+ int err;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+
+ if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) {
+ dev_err(&pdev->dev,
+ "SR-IOV enable in process, please wait, num_vfs %d\n",
+ num_vfs);
+ return -EPERM;
+ }
+
+ err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW((u16)num_vfs - 1));
+ if (err) {
+ dev_err(&sriov_info->pdev->dev,
+ "Failed to init vf in hardware before enable sriov, error %d\n",
+ err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ err = pci_enable_sriov(sriov_info->pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to enable SR-IOV, error %d\n", err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ sriov_info->sriov_enabled = true;
+ sriov_info->num_vfs = num_vfs;
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+
+ return num_vfs;
+}
+
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
+
+ if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state))
+ return -EBUSY;
+
+ if (!num_vfs)
+ return hinic_pci_sriov_disable(dev);
+ else
+ return hinic_pci_sriov_enable(dev, num_vfs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
new file mode 100644
index 000000000000..ba627a362f9a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_SRIOV_H
+#define HINIC_SRIOV_H
+
+#include "hinic_hw_dev.h"
+
+#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)
+#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)
+
+enum hinic_sriov_state {
+ HINIC_SRIOV_DISABLE,
+ HINIC_SRIOV_ENABLE,
+ HINIC_FUNC_REMOVE,
+};
+
+enum {
+ HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
+ HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */
+ HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */
+};
+
+struct hinic_sriov_info {
+ struct pci_dev *pdev;
+ struct hinic_hwdev *hwdev;
+ bool sriov_enabled;
+ unsigned int num_vfs;
+ unsigned long state;
+};
+
+struct vf_data_storage {
+ u8 vf_mac_addr[ETH_ALEN];
+ bool registered;
+ bool pf_set_mac;
+ u16 pf_vlan;
+ u8 pf_qos;
+ u32 max_rate;
+ u32 min_rate;
+
+ bool link_forced;
+ bool link_up; /* only valid if VF link is forced */
+ bool spoofchk;
+ bool trust;
+};
+
+struct hinic_register_vf {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct hinic_port_mac_update {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct hinic_vf_vlan_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u8 qos;
+ u8 rsvd1[7];
+};
+
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
+
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
+
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
+
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate);
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+
+void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
+ u8 link_status);
+
+int hinic_pci_sriov_disable(struct pci_dev *dev);
+
+int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 365016450bdb..4c66a0bc1b28 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -673,9 +673,11 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
if (pkts < budget) {
napi_complete(napi);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- sq->msix_entry,
- HINIC_MSIX_ENABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ sq->msix_entry,
+ HINIC_MSIX_ENABLE);
+
return pkts;
}
@@ -701,10 +703,11 @@ static irqreturn_t tx_irq(int irq, void *data)
nic_dev = netdev_priv(txq->netdev);
- /* Disable the interrupt until napi will be completed */
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- txq->sq->msix_entry,
- HINIC_MSIX_DISABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ /* Disable the interrupt until napi will be completed */
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ txq->sq->msix_entry,
+ HINIC_MSIX_DISABLE);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3de549c6c693..197dc5b2c090 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -4678,12 +4678,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
break;
}
- dev_info(dev, "Partner protocol version is %d\n",
- crq->version_exchange_rsp.version);
- if (be16_to_cpu(crq->version_exchange_rsp.version) <
- ibmvnic_version)
- ibmvnic_version =
+ ibmvnic_version =
be16_to_cpu(crq->version_exchange_rsp.version);
+ dev_info(dev, "Partner protocol version is %d\n",
+ ibmvnic_version);
send_cap_queries(adapter);
break;
case QUERY_CAPABILITY_RSP:
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 48428d6a00be..623e516a9630 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3960,7 +3960,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
* @hw: Struct containing variables accessed by shared code
*
* Reads the first 64 16 bit words of the EEPROM and sums the values read.
- * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
* valid.
*/
s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 0d51cbc88028..d9fa4600f745 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter)
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
- e1000_down(adapter);
- e1000_up(adapter);
+
+ /* only run the task if not already down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ }
+
clear_bit(__E1000_RESETTING, &adapter->flags);
}
@@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
int count = E1000_CHECK_RESET_COUNT;
- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
usleep_range(10000, 20000);
- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ WARN_ON(count < 0);
+
+ /* signal that we're down so that the reset task will no longer run */
+ set_bit(__E1000_DOWN, &adapter->flags);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+
e1000_down(adapter);
e1000_power_down_phy(adapter);
e1000_free_irq(adapter);
@@ -3136,8 +3146,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
+ case e1000_82544: {
unsigned int pull_size;
- case e1000_82544:
+
/* Make sure we have room to chop off 4 bytes,
* and that the end alignment will work out to
* this hardware's requirements
@@ -3158,6 +3169,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
len = skb_headlen(skb);
break;
+ }
default:
/* do nothing */
break;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 37a2314d3e6b..944abd5eae11 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -576,7 +576,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
#define er32(reg) __er32(hw, E1000_##reg)
-s32 __ew32_prepare(struct e1000_hw *hw);
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 735bf25952fc..f999cca37a8a 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -300,7 +300,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
* so forcibly disable it.
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
- e1000_disable_ulp_lpt_lp(hw, true);
+ ret_val = e1000_disable_ulp_lpt_lp(hw, true);
+ if (ret_val) {
+ e_warn("Failed to disable ULP\n");
+ goto out;
+ }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 177c6da80c57..a279f4fa9962 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -107,6 +107,45 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
+struct e1000e_me_supported {
+ u16 device_id; /* supported device ID */
+};
+
+static const struct e1000e_me_supported me_supported[] = {
+ {E1000_DEV_ID_PCH_LPT_I217_LM},
+ {E1000_DEV_ID_PCH_LPTLP_I218_LM},
+ {E1000_DEV_ID_PCH_I218_LM2},
+ {E1000_DEV_ID_PCH_I218_LM3},
+ {E1000_DEV_ID_PCH_SPT_I219_LM},
+ {E1000_DEV_ID_PCH_SPT_I219_LM2},
+ {E1000_DEV_ID_PCH_LBG_I219_LM3},
+ {E1000_DEV_ID_PCH_SPT_I219_LM4},
+ {E1000_DEV_ID_PCH_SPT_I219_LM5},
+ {E1000_DEV_ID_PCH_CNP_I219_LM6},
+ {E1000_DEV_ID_PCH_CNP_I219_LM7},
+ {E1000_DEV_ID_PCH_ICP_I219_LM8},
+ {E1000_DEV_ID_PCH_ICP_I219_LM9},
+ {E1000_DEV_ID_PCH_CMP_I219_LM10},
+ {E1000_DEV_ID_PCH_CMP_I219_LM11},
+ {E1000_DEV_ID_PCH_CMP_I219_LM12},
+ {E1000_DEV_ID_PCH_TGP_I219_LM13},
+ {E1000_DEV_ID_PCH_TGP_I219_LM14},
+ {E1000_DEV_ID_PCH_TGP_I219_LM15},
+ {0}
+};
+
+static bool e1000e_check_me(u16 device_id)
+{
+ struct e1000e_me_supported *id;
+
+ for (id = (struct e1000e_me_supported *)me_supported;
+ id->device_id; id++)
+ if (device_id == id->device_id)
+ return true;
+
+ return false;
+}
+
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@@ -119,14 +158,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
* has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
* and try again a number of times.
**/
-s32 __ew32_prepare(struct e1000_hw *hw)
+static void __ew32_prepare(struct e1000_hw *hw)
{
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
udelay(50);
-
- return i;
}
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
@@ -607,11 +644,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, rx_ring->tail);
- if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
+ if (unlikely(i != readl(rx_ring->tail))) {
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -624,11 +661,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, tx_ring->tail);
- if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
+ if (unlikely(i != readl(tx_ring->tail))) {
u32 tctl = er32(TCTL);
ew32(TCTL, tctl & ~E1000_TCTL_EN);
@@ -5294,6 +5331,10 @@ static void e1000_watchdog_task(struct work_struct *work)
/* oops */
break;
}
+ if (hw->mac.type == e1000_pch_spt) {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
}
/* enable transmits in the hardware, need to do this
@@ -6404,6 +6445,31 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
mac_data |= BIT(3);
ew32(CTRL_EXT, mac_data);
+ /* Disable disconnected cable conditioning for Power Gating */
+ mac_data = er32(DPGFR);
+ mac_data |= BIT(2);
+ ew32(DPGFR, mac_data);
+
+ /* Don't wake from dynamic Power Gating with clock request */
+ mac_data = er32(FEXTNVM12);
+ mac_data |= BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data |= BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Enable K1 off to enable mPHY Power Gating */
+ mac_data = er32(FEXTNVM6);
+ mac_data |= BIT(31);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Enable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data |= BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
/* Enable the Dynamic Clock Gating in the DMA and MAC */
mac_data = er32(CTRL_EXT);
mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
@@ -6433,6 +6499,35 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data |= BIT(0);
ew32(FEXTNVM7, mac_data);
+ /* Disable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data &= ~BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Disable K1 off */
+ mac_data = er32(FEXTNVM6);
+ mac_data &= ~BIT(31);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Disable Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data &= ~BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Cancel not waking from dynamic
+ * Power Gating with clock request
+ */
+ mac_data = er32(FEXTNVM12);
+ mac_data &= ~BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Cancel disable disconnected cable conditioning
+ * for Power Gating
+ */
+ mac_data = er32(DPGFR);
+ mac_data &= ~BIT(2);
+ ew32(DPGFR, mac_data);
+
/* Disable Dynamic Power Gating */
mac_data = er32(CTRL_EXT);
mac_data &= 0xFFFFFFF7;
@@ -6858,7 +6953,8 @@ static int e1000e_pm_suspend(struct device *dev)
e1000e_pm_thaw(dev);
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp)
+ if (hw->mac.type >= e1000_pch_cnp &&
+ !e1000e_check_me(hw->adapter->pdev->device))
e1000e_s0ix_entry_flow(adapter);
return rc;
@@ -6873,7 +6969,8 @@ static int e1000e_pm_resume(struct device *dev)
int rc;
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp)
+ if (hw->mac.type >= e1000_pch_cnp &&
+ !e1000e_check_me(hw->adapter->pdev->device))
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
@@ -7549,7 +7646,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e1000_print_device_info(adapter);
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index df59fd1d660c..8165ba2619a4 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -21,9 +21,12 @@
#define E1000_FEXTNVM5 0x00014 /* Future Extended NVM 5 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM8 0x5BB0 /* Future Extended NVM 8 - RW */
#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
+#define E1000_FEXTNVM12 0x5BC0 /* Future Extended NVM 12 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
+#define E1000_DPGFR 0x00FAC /* Dynamic Power Gate Force Control Register */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 37514a75f928..6a089848c857 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -694,10 +694,8 @@ init_adminq_exit:
* i40e_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
-i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
+void i40e_shutdown_adminq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
-
if (i40e_check_asq_alive(hw))
i40e_aq_queue_shutdown(hw, true);
@@ -706,8 +704,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
if (hw->nvm_buff.va)
i40e_free_virt_mem(hw, &hw->nvm_buff);
-
- return ret_code;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2a037ec244b9..5d807c8004f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -11,7 +11,7 @@
#include "i40e_diag.h"
#include "i40e_xsk.h"
#include <net/udp_tunnel.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
/* All i40e tracepoints are defined by the include below, which
* must be included exactly once across the whole kernel with
* CREATE_TRACE_POINTS defined
@@ -3260,26 +3260,31 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ kfree(ring->rx_bi);
ring->xsk_umem = i40e_xsk_umem(ring);
if (ring->xsk_umem) {
- ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ ret = i40e_alloc_rx_bi_zc(ring);
+ if (ret)
+ return ret;
+ ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
*/
chain_len = 1;
- ring->zca.free = i40e_zca_free;
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &ring->zca);
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
if (ret)
return ret;
dev_info(&vsi->back->pdev->dev,
- "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->queue_index);
} else {
+ ret = i40e_alloc_rx_bi(ring);
+ if (ret)
+ return ret;
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -3344,9 +3349,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- ok = ring->xsk_umem ?
- i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
- !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ if (ring->xsk_umem) {
+ xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
+ } else {
+ ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ }
if (!ok) {
/* Log this in case the user has forgotten to give the kernel
* any buffers, even later in the application.
@@ -14478,29 +14486,29 @@ static void i40e_print_features(struct i40e_pf *pf)
i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
#ifdef CONFIG_PCI_IOV
- i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
+ i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
- i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
+ i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
pf->hw.func_caps.num_vsis,
pf->vsi[pf->lan_vsi]->num_queue_pairs);
if (pf->flags & I40E_FLAG_RSS_ENABLED)
- i += snprintf(&buf[i], REMAIN(i), " RSS");
+ i += scnprintf(&buf[i], REMAIN(i), " RSS");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
- i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
+ i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
- i += snprintf(&buf[i], REMAIN(i), " FD_SB");
- i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
+ i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
+ i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
}
if (pf->flags & I40E_FLAG_DCB_CAPABLE)
- i += snprintf(&buf[i], REMAIN(i), " DCB");
- i += snprintf(&buf[i], REMAIN(i), " VxLAN");
- i += snprintf(&buf[i], REMAIN(i), " Geneve");
+ i += scnprintf(&buf[i], REMAIN(i), " DCB");
+ i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
+ i += scnprintf(&buf[i], REMAIN(i), " Geneve");
if (pf->flags & I40E_FLAG_PTP)
- i += snprintf(&buf[i], REMAIN(i), " PTP");
+ i += scnprintf(&buf[i], REMAIN(i), " PTP");
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
- i += snprintf(&buf[i], REMAIN(i), " VEB");
+ i += scnprintf(&buf[i], REMAIN(i), " VEB");
else
- i += snprintf(&buf[i], REMAIN(i), " VEPA");
+ i += scnprintf(&buf[i], REMAIN(i), " VEPA");
dev_info(&pf->pdev->dev, "%s\n", buf);
kfree(buf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index bbb478f09093..5c1378641b3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -17,7 +17,7 @@
/* adminq functions */
i40e_status i40e_init_adminq(struct i40e_hw *hw);
-i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
+void i40e_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b8496037ef7f..f9555c847f73 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -521,28 +521,29 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
/**
* i40e_fd_handle_status - check the Programming Status for FD
* @rx_ring: the Rx ring for this descriptor
- * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
+ * @qword0_raw: qword0
+ * @qword1: qword1 after le_to_cpu
* @prog_id: the id originally used for programming
*
* This is used to verify if the FD programming or invalidation
* requested by SW to the HW is successful or not and take actions accordingly.
**/
-void i40e_fd_handle_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, u8 prog_id)
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1, u8 prog_id)
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct pci_dev *pdev = pf->pdev;
+ struct i40e_32b_rx_wb_qw0 *qw0;
u32 fcnt_prog, fcnt_avail;
u32 error;
- u64 qw;
- qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+ qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
+ error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
- pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
- if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
+ pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
+ if (qw0->hi_dword.fd_id != 0 ||
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
pf->fd_inv);
@@ -560,7 +561,7 @@ void i40e_fd_handle_status(struct i40e_ring *rx_ring,
/* store the current atr filter count */
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
- if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
+ if (qw0->hi_dword.fd_id == 0 &&
test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
/* These set_bit() calls aren't atomic with the
* test_bit() here, but worse case we potentially
@@ -589,7 +590,7 @@ void i40e_fd_handle_status(struct i40e_ring *rx_ring,
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
- rx_desc->wb.qword0.hi_dword.fd_id);
+ qw0->hi_dword.fd_id);
}
}
@@ -1195,6 +1196,11 @@ clear_counts:
rc->total_packets = 0;
}
+static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->rx_bi[idx];
+}
+
/**
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
@@ -1208,7 +1214,7 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
- new_buff = &rx_ring->rx_bi[nta];
+ new_buff = i40e_rx_bi(rx_ring, nta);
/* update, and store next to alloc */
nta++;
@@ -1227,29 +1233,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_rx_is_programming_status - check for programming status descriptor
- * @qw: qword representing status_error_len in CPU ordering
- *
- * The value of in the descriptor length field indicate if this
- * is a programming status descriptor for flow director or FCoE
- * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
- * it is a packet descriptor.
- **/
-static inline bool i40e_rx_is_programming_status(u64 qw)
-{
- /* The Rx filter programming status and SPH bit occupy the same
- * spot in the descriptor. Since we don't support packet split we
- * can just reuse the bit as an indication that this is a
- * programming status descriptor.
- */
- return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
-}
-
-/**
- * i40e_clean_programming_status - try clean the programming status descriptor
+ * i40e_clean_programming_status - clean the programming status descriptor
* @rx_ring: the rx ring that has this descriptor
- * @rx_desc: the rx descriptor written back by HW
- * @qw: qword representing status_error_len in CPU ordering
+ * @qword0_raw: qword0
+ * @qword1: qword1 representing status_error_len in CPU ordering
*
* Flow director should handle FD_FILTER_STATUS to check its filter programming
* status being successful or not and take actions accordingly. FCoE should
@@ -1257,34 +1244,16 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
*
* Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
**/
-struct i40e_rx_buffer *i40e_clean_programming_status(
- struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- u64 qw)
+void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1)
{
- struct i40e_rx_buffer *rx_buffer;
- u32 ntc;
u8 id;
- if (!i40e_rx_is_programming_status(qw))
- return NULL;
-
- ntc = rx_ring->next_to_clean;
-
- /* fetch, update, and store next to clean */
- rx_buffer = &rx_ring->rx_bi[ntc++];
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-
- id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+ id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
- i40e_fd_handle_status(rx_ring, rx_desc, id);
-
- return rx_buffer;
+ i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
}
/**
@@ -1336,13 +1305,25 @@ err:
return -ENOMEM;
}
+int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
+{
+ unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
+
+ rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
+ return rx_ring->rx_bi ? 0 : -ENOMEM;
+}
+
+static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
+{
+ memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
+}
+
/**
* i40e_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
**/
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
- unsigned long bi_size;
u16 i;
/* ring already cleared, nothing to do */
@@ -1361,7 +1342,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
if (!rx_bi->page)
continue;
@@ -1388,8 +1369,10 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
}
skip_free:
- bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_bi, 0, bi_size);
+ if (rx_ring->xsk_umem)
+ i40e_clear_rx_bi_zc(rx_ring);
+ else
+ i40e_clear_rx_bi(rx_ring);
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
@@ -1430,15 +1413,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int err = -ENOMEM;
- int bi_size;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_bi);
- bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
- rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
- if (!rx_ring->rx_bi)
- goto err;
+ int err;
u64_stats_init(&rx_ring->syncp);
@@ -1451,7 +1426,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
if (!rx_ring->desc) {
dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
rx_ring->size);
- goto err;
+ return -ENOMEM;
}
rx_ring->next_to_alloc = 0;
@@ -1463,16 +1438,12 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
rx_ring->queue_index);
if (err < 0)
- goto err;
+ return err;
}
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
return 0;
-err:
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
- return err;
}
/**
@@ -1507,6 +1478,22 @@ static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
}
+static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = i40e_rx_offset(rx_ring) ?
+ SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
/**
* i40e_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
@@ -1576,7 +1563,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
return false;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
+ bi = i40e_rx_bi(rx_ring, ntu);
do {
if (!i40e_alloc_mapped_page(rx_ring, bi))
@@ -1598,7 +1585,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
+ bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
@@ -1965,7 +1952,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
{
struct i40e_rx_buffer *rx_buffer;
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
prefetchw(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
@@ -2180,7 +2167,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
{
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return I40E_XDP_CONSUMED;
@@ -2246,13 +2233,11 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -2335,6 +2320,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
bool failure = false;
struct xdp_buff xdp;
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
+#endif
xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
@@ -2365,9 +2353,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc,
- qword);
- if (unlikely(rx_buffer)) {
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring,
+ rx_desc->raw.qword[0],
+ qword);
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ i40e_inc_ntc(rx_ring);
i40e_reuse_rx_page(rx_ring, rx_buffer);
cleaned_count++;
continue;
@@ -2389,7 +2380,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
xdp.data_hard_start = xdp.data -
i40e_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
+#endif
skb = i40e_run_xdp(rx_ring, &xdp);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 36d37f31a287..5c255977fd58 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -296,17 +296,9 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
dma_addr_t dma;
- union {
- struct {
- struct page *page;
- __u32 page_offset;
- __u16 pagecnt_bias;
- };
- struct {
- void *addr;
- u64 handle;
- };
- };
+ struct page *page;
+ __u32 page_offset;
+ __u16 pagecnt_bias;
};
struct i40e_queue_stats {
@@ -358,6 +350,7 @@ struct i40e_ring {
union {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
+ struct xdp_buff **rx_bi_zc;
};
DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */
@@ -419,7 +412,6 @@ struct i40e_ring {
struct i40e_channel *ch;
struct xdp_rxq_info xdp_rxq;
struct xdp_umem *xsk_umem;
- struct zero_copy_allocator zca; /* ZC allocator anchor */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -495,6 +487,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 8af0e99c6c0d..667c4dc4b39f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -4,13 +4,9 @@
#ifndef I40E_TXRX_COMMON_
#define I40E_TXRX_COMMON_
-void i40e_fd_handle_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, u8 prog_id);
int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
-struct i40e_rx_buffer *i40e_clean_programming_status(
- struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- u64 qw);
+void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1);
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, struct sk_buff *skb);
void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
@@ -84,6 +80,38 @@ static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
}
}
+/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qword1: qword1 representing status_error_len in CPU ordering
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qword1)
+{
+ /* The Rx filter programming status and SPH bit occupy the same
+ * spot in the descriptor. Since we don't support packet split we
+ * can just reuse the bit as an indication that this is a
+ * programming status descriptor.
+ */
+ return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
+}
+
+/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static inline void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+}
+
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 6ea2867ff60f..63e098f7cb63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -689,7 +689,7 @@ union i40e_32byte_rx_desc {
__le64 rsvd2;
} read;
struct {
- struct {
+ struct i40e_32b_rx_wb_qw0 {
struct {
union {
__le16 mirroring_status;
@@ -727,6 +727,9 @@ union i40e_32byte_rx_desc {
} hi_dword;
} qword3;
} wb; /* writeback */
+ struct {
+ u64 qword[4];
+ } raw;
};
enum i40e_rx_desc_status_bits {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 0b7d29192b2c..7276580cbe64 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -2,68 +2,30 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "i40e.h"
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
-/**
- * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
- * @vsi: Current VSI
- * @umem: UMEM to DMA map
- *
- * Returns 0 on success, <0 on failure
- **/
-static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
+int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
{
- struct i40e_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i, j;
- dma_addr_t dma;
-
- dev = &pf->pdev->dev;
- for (i = 0; i < umem->npgs; i++) {
- dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
- if (dma_mapping_error(dev, dma))
- goto out_unmap;
+ unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
- umem->pages[i].dma = dma;
- }
-
- return 0;
-
-out_unmap:
- for (j = 0; j < i; j++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
- umem->pages[i].dma = 0;
- }
-
- return -1;
+ rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
+ return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
}
-/**
- * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev
- * @vsi: Current VSI
- * @umem: UMEM to DMA map
- **/
-static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
+void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
- struct i40e_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i;
-
- dev = &pf->pdev->dev;
-
- for (i = 0; i < umem->npgs; i++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
+ memset(rx_ring->rx_bi_zc, 0,
+ sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
+}
- umem->pages[i].dma = 0;
- }
+static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->rx_bi_zc[idx];
}
/**
@@ -78,7 +40,6 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid)
{
struct net_device *netdev = vsi->netdev;
- struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
@@ -92,13 +53,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
- if (!reuseq)
- return -ENOMEM;
-
- xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
- err = i40e_xsk_umem_dma_map(vsi, umem);
+ err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
if (err)
return err;
@@ -151,7 +106,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
clear_bit(qid, vsi->af_xdp_zc_qps);
- i40e_xsk_umem_dma_unmap(vsi, umem);
+ xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -184,17 +139,13 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
*
- * This function enables or disables a UMEM to a certain ring.
- *
* Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
**/
static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
{
- struct xdp_umem *umem = rx_ring->xsk_umem;
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
struct bpf_prog *xdp_prog;
- u64 offset;
u32 act;
rcu_read_lock();
@@ -203,9 +154,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
*/
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- offset = xdp->data - xdp->data_hard_start;
-
- xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
switch (act) {
case XDP_PASS:
@@ -232,107 +180,26 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
return result;
}
-/**
- * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer
- * @rx_ring: Rx ring
- * @bi: Rx buffer to populate
- *
- * This function allocates an Rx buffer. The buffer can come from fill
- * queue, or via the recycle queue (next_to_alloc).
- *
- * Returns true for a successful allocation, false otherwise
- **/
-static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- void *addr = bi->addr;
- u64 handle, hr;
-
- if (addr) {
- rx_ring->rx_stats.page_reuse_count++;
- return true;
- }
-
- if (!xsk_umem_peek_addr(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- bi->dma = xdp_umem_get_dma(umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
-
- xsk_umem_release_addr(umem);
- return true;
-}
-
-/**
- * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer
- * @rx_ring: Rx ring
- * @bi: Rx buffer to populate
- *
- * This function allocates an Rx buffer. The buffer can come from fill
- * queue, or via the reuse queue.
- *
- * Returns true for a successful allocation, false otherwise
- **/
-static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- u64 handle, hr;
-
- if (!xsk_umem_peek_addr_rq(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- handle &= rx_ring->xsk_umem->chunk_mask;
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- bi->dma = xdp_umem_get_dma(umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
-
- xsk_umem_release_addr_rq(umem);
- return true;
-}
-
-static __always_inline bool
-__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
- bool alloc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi))
+bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
{
u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
+ struct xdp_buff **bi, *xdp;
+ dma_addr_t dma;
bool ok = true;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
+ bi = i40e_rx_bi(rx_ring, ntu);
do {
- if (!alloc(rx_ring, bi)) {
+ xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ if (!xdp) {
ok = false;
goto no_buffers;
}
-
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0,
- rx_ring->rx_buf_len,
- DMA_BIDIRECTIONAL);
-
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ *bi = xdp;
+ dma = xsk_buff_xdp_get_dma(xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->read.hdr_addr = 0;
rx_desc++;
bi++;
@@ -340,11 +207,10 @@ __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
if (unlikely(ntu == rx_ring->count)) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
+ bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
- rx_desc->wb.qword1.status_error_len = 0;
count--;
} while (count);
@@ -356,127 +222,8 @@ no_buffers:
}
/**
- * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers
- * @rx_ring: Rx ring
- * @count: The number of buffers to allocate
- *
- * This function allocates a number of Rx buffers from the reuse queue
- * or fill ring and places them on the Rx ring.
- *
- * Returns true for a successful allocation, false otherwise
- **/
-bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
-{
- return __i40e_alloc_rx_buffers_zc(rx_ring, count,
- i40e_alloc_buffer_slow_zc);
-}
-
-/**
- * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers
- * @rx_ring: Rx ring
- * @count: The number of buffers to allocate
- *
- * This function allocates a number of Rx buffers from the fill ring
- * or the internal recycle mechanism and places them on the Rx ring.
- *
- * Returns true for a successful allocation, false otherwise
- **/
-static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
-{
- return __i40e_alloc_rx_buffers_zc(rx_ring, count,
- i40e_alloc_buffer_zc);
-}
-
-/**
- * i40e_get_rx_buffer_zc - Return the current Rx buffer
- * @rx_ring: Rx ring
- * @size: The size of the rx buffer (read from descriptor)
- *
- * This function returns the current, received Rx buffer, and also
- * does DMA synchronization. the Rx ring.
- *
- * Returns the received Rx buffer
- **/
-static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
- const unsigned int size)
-{
- struct i40e_rx_buffer *bi;
-
- bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- bi->dma, 0,
- size,
- DMA_BIDIRECTIONAL);
-
- return bi;
-}
-
-/**
- * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer
- * @rx_ring: Rx ring
- * @old_bi: The Rx buffer to recycle
- *
- * This function recycles a finished Rx buffer, and places it on the
- * recycle queue (next_to_alloc).
- **/
-static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *old_bi)
-{
- struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
- u16 nta = rx_ring->next_to_alloc;
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- new_bi->dma = old_bi->dma;
- new_bi->addr = old_bi->addr;
- new_bi->handle = old_bi->handle;
-
- old_bi->addr = NULL;
-}
-
-/**
- * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations
- * @alloc: Zero-copy allocator
- * @handle: Buffer handle
- **/
-void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
-{
- struct i40e_rx_buffer *bi;
- struct i40e_ring *rx_ring;
- u64 hr, mask;
- u16 nta;
-
- rx_ring = container_of(alloc, struct i40e_ring, zca);
- hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
- mask = rx_ring->xsk_umem->chunk_mask;
-
- nta = rx_ring->next_to_alloc;
- bi = &rx_ring->rx_bi[nta];
-
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- handle &= mask;
-
- bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
- rx_ring->xsk_umem->headroom);
-}
-
-/**
- * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer
+ * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
* @rx_ring: Rx ring
- * @bi: Rx buffer
* @xdp: xdp_buff
*
* This functions allocates a new skb from a zero-copy Rx buffer.
@@ -484,7 +231,6 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
* Returns the skb, or NULL on failure.
**/
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi,
struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
@@ -503,24 +249,11 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
if (metasize)
skb_metadata_set(skb, metasize);
- i40e_reuse_rx_buffer_zc(rx_ring, bi);
+ xsk_buff_free(xdp);
return skb;
}
/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static void i40e_inc_ntc(struct i40e_ring *rx_ring)
-{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-}
-
-/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
* @budget: NAPI budget
@@ -534,20 +267,17 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
struct sk_buff *skb;
- struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
- struct i40e_rx_buffer *bi;
union i40e_rx_desc *rx_desc;
+ struct xdp_buff **bi;
unsigned int size;
u64 qword;
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- !i40e_alloc_rx_buffers_fast_zc(rx_ring,
- cleaned_count);
+ !i40e_alloc_rx_buffers_zc(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -560,35 +290,36 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- bi = i40e_clean_programming_status(rx_ring, rx_desc,
- qword);
- if (unlikely(bi)) {
- i40e_reuse_rx_buffer_zc(rx_ring, bi);
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring,
+ rx_desc->raw.qword[0],
+ qword);
+ bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ xsk_buff_free(*bi);
+ *bi = NULL;
cleaned_count++;
+ i40e_inc_ntc(rx_ring);
continue;
}
+ bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size)
break;
- bi = i40e_get_rx_buffer_zc(rx_ring, size);
- xdp.data = bi->addr;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + size;
- xdp.handle = bi->handle;
+ bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ (*bi)->data_end = (*bi)->data + size;
+ xsk_buff_dma_sync_for_cpu(*bi);
- xdp_res = i40e_run_xdp_zc(rx_ring, &xdp);
+ xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
if (xdp_res) {
- if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+ if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
xdp_xmit |= xdp_res;
- bi->addr = NULL;
- } else {
- i40e_reuse_rx_buffer_zc(rx_ring, bi);
- }
+ else
+ xsk_buff_free(*bi);
+ *bi = NULL;
total_rx_bytes += size;
total_rx_packets++;
@@ -604,7 +335,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
* BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
* SBP is *not* set in PRT_SBPVSI (default not set).
*/
- skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
+ skb = i40e_construct_skb_zc(rx_ring, *bi);
+ *bi = NULL;
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
break;
@@ -662,10 +394,9 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
- dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
-
- dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
- DMA_BIDIRECTIONAL);
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ desc.len);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len;
@@ -824,13 +555,13 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
u16 i;
for (i = 0; i < rx_ring->count; i++) {
- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
- if (!rx_bi->addr)
+ if (!rx_bi)
continue;
- xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
- rx_bi->addr = NULL;
+ xsk_buff_free(rx_bi);
+ rx_bi = NULL;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index 9ed59c14eb55..ea919a7d60ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -12,12 +12,13 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid);
-void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
struct i40e_ring *tx_ring, int napi_budget);
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
+void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 29c6c6743450..980bbcc64b4b 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,10 +17,14 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx_lib.o \
ice_txrx.o \
+ ice_fltr.o \
+ ice_fdir.o \
+ ice_ethtool_fdir.o \
ice_flex_pipe.o \
ice_flow.o \
ice_devlink.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
+ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 5c11448bfbb3..5792ee616b5c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,9 +34,14 @@
#include <linux/ctype.h>
#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
+#include <linux/cpu_rmap.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
+#include <net/geneve.h>
+#include <net/gre.h>
+#include <net/udp_tunnel.h>
+#include <net/vxlan.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -46,7 +51,9 @@
#include "ice_sched.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_fdir.h"
#include "ice_xsk.h"
+#include "ice_arfs.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
@@ -62,6 +69,7 @@ extern const char ice_drv_ver[];
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
#define ICE_MIN_MSIX 2
+#define ICE_FDIR_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -90,6 +98,7 @@ extern const char ice_drv_ver[];
#define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
#define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
+#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
@@ -210,6 +219,7 @@ enum ice_state {
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
+ __ICE_FD_FLUSH_REQ,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
__ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
@@ -244,8 +254,8 @@ struct ice_vsi {
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
- int num_q_vectors;
- int base_vector; /* IRQ base for OS reserved vectors */
+ u16 num_q_vectors;
+ u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -253,6 +263,8 @@ struct ice_vsi {
s16 vf_id; /* VF ID for SR-IOV VSIs */
u16 ethtype; /* Ethernet protocol for pause frame */
+ u16 num_gfltr;
+ u16 num_bfltr;
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
@@ -261,6 +273,14 @@ struct ice_vsi {
u8 *rss_lut_user; /* User configured lookup table entries */
u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
+ /* aRFS members only allocated for the PF VSI */
+#define ICE_MAX_ARFS_LIST 1024
+#define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1)
+ struct hlist_head *arfs_fltr_list;
+ struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
+ spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
+ atomic_t *arfs_last_fltr_id;
+
u16 max_frame;
u16 rx_buf_len;
@@ -335,12 +355,14 @@ enum ice_pf_flags {
ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
+ ICE_FLAG_FD_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
+ ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -362,11 +384,13 @@ struct ice_pf {
*/
u16 sriov_base_vector;
+ u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
+
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
/* Virtchnl/SR-IOV config info */
struct ice_vf *vf;
- int num_alloc_vfs; /* actual number of VFs allocated */
+ u16 num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_qps_per_vf;
u16 num_msix_per_vf;
@@ -385,11 +409,11 @@ struct ice_pf {
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
u32 hw_csum_rx_error;
- u32 oicr_idx; /* Other interrupt cause MSIX vector index */
- u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ u16 oicr_idx; /* Other interrupt cause MSIX vector index */
+ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u32 num_lan_msix; /* Total MSIX vectors for base driver */
+ u16 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -500,8 +524,27 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
return NULL;
}
+/**
+ * ice_get_ctrl_vsi - Get the control VSI
+ * @pf: PF instance
+ */
+static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
+{
+ /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */
+ if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI)
+ return NULL;
+
+ return pf->vsi[pf->ctrl_vsi_idx];
+}
+
+#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
+#define ICE_FD_STAT_PF_IDX(base_idx) \
+ ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
+#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
+int ice_vsi_open_ctrl(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
@@ -523,7 +566,24 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+const char *ice_stat_str(enum ice_status stat_err);
+const char *ice_aq_str(enum ice_aq_err aq_err);
+int
+ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
+ bool is_tun);
+void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena);
+int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
+int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
+int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
+int
+ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+void ice_fdir_release_flows(struct ice_hw *hw);
+void ice_fdir_replay_flows(struct ice_hw *hw);
+void ice_fdir_replay_fltrs(struct ice_pf *pf);
+int ice_fdir_create_dflt_rules(struct ice_pf *pf);
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
+void ice_service_task_schedule(struct ice_pf *pf);
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 2381b4014ed6..92f82f2a8af4 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -107,6 +107,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_RXQS 0x0041
#define ICE_AQC_CAPS_TXQS 0x0042
#define ICE_AQC_CAPS_MSIX 0x0043
+#define ICE_AQC_CAPS_FD 0x0045
#define ICE_AQC_CAPS_MAX_MTU 0x0047
u8 major_ver;
@@ -155,13 +156,11 @@ struct ice_aqc_manage_mac_write {
#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
#define ICE_AQC_MAN_MAC_WR_S 6
-#define ICE_AQC_MAN_MAC_WR_M (3 << ICE_AQC_MAN_MAC_WR_S)
+#define ICE_AQC_MAN_MAC_WR_M ICE_M(3, ICE_AQC_MAN_MAC_WR_S)
#define ICE_AQC_MAN_MAC_UPDATE_LAA 0
-#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL (BIT(0) << ICE_AQC_MAN_MAC_WR_S)
- /* High 16 bits of MAC address in big endian order */
- __be16 sah;
- /* Low 32 bits of MAC address in big endian order */
- __be32 sal;
+#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S)
+ /* byte stream in network order */
+ u8 mac_addr[ETH_ALEN];
__le32 addr_high;
__le32 addr_low;
};
@@ -232,6 +231,11 @@ struct ice_aqc_get_sw_cfg_resp {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
+#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
+#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
@@ -240,6 +244,9 @@ struct ice_aqc_get_sw_cfg_resp {
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
+#define ICE_AQC_RES_TYPE_S 0
+#define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S)
+
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
*/
@@ -541,7 +548,7 @@ struct ice_sw_rule_lkup_rx_tx {
#define ICE_SINGLE_ACT_OTHER_ACTS 0x3
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \
- (0x3 << \ ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
+ (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
/* Bit 17:18 - Defines other actions */
/* Other action = 0 - Mirror VSI */
@@ -967,7 +974,7 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@@ -1059,6 +1066,25 @@ struct ice_aqc_set_phy_cfg_data {
u8 rsvd1;
};
+/* Set MAC Config command data structure (direct 0x0603) */
+struct ice_aqc_set_mac_cfg {
+ __le16 max_frame_size;
+ u8 params;
+#define ICE_AQ_SET_MAC_PACE_S 3
+#define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S)
+#define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7)
+#define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0
+#define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M
+ u8 tx_tmr_priority;
+ __le16 tx_tmr_value;
+ __le16 fc_refresh_threshold;
+ u8 drop_opts;
+#define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0)
+#define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0
+#define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0)
+ u8 reserved[7];
+};
+
/* Restart AN command data structure (direct 0x0605)
* Also used for response, with only the lport_num field present.
*/
@@ -1264,6 +1290,33 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12];
};
+/* The result of netlist NVM read comes in a TLV format. The actual data
+ * (netlist header) starts from word offset 1 (byte 2). The FW strips
+ * out the type field from the TLV header so all the netlist fields
+ * should adjust their offset value by 1 word (2 bytes) in order to map
+ * their correct location.
+ */
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M ICE_M(0x3FF, 0)
+#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
+
+/* netlist ID block field offsets (word offsets) */
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
+#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
+#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
+
/**
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -1648,10 +1701,12 @@ struct ice_pkg_ver {
};
#define ICE_PKG_NAME_SIZE 32
+#define ICE_SEG_NAME_SIZE 28
struct ice_aqc_get_pkg_info {
struct ice_pkg_ver ver;
- char name[ICE_PKG_NAME_SIZE];
+ char name[ICE_SEG_NAME_SIZE];
+ __le32 track_id;
u8 is_in_nvm;
u8 is_active;
u8 is_active_at_boot;
@@ -1738,6 +1793,7 @@ struct ice_aq_desc {
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
+ struct ice_aqc_set_mac_cfg set_mac_cfg;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
@@ -1770,6 +1826,7 @@ enum ice_aq_err {
ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
@@ -1834,6 +1891,7 @@ enum ice_adminq_opc {
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
ice_aqc_opc_set_phy_cfg = 0x0601,
+ ice_aqc_opc_set_mac_cfg = 0x0603,
ice_aqc_opc_restart_an = 0x0605,
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
new file mode 100644
index 000000000000..6560acd76c94
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#include "ice.h"
+
+/**
+ * ice_is_arfs_active - helper to check is aRFS is active
+ * @vsi: VSI to check
+ */
+static bool ice_is_arfs_active(struct ice_vsi *vsi)
+{
+ return !!vsi->arfs_fltr_list;
+}
+
+/**
+ * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
+ * @hw: pointer to the HW structure
+ * @flow_type: flow type as Flow Director understands it
+ *
+ * Flow Director will query this function to see if aRFS is currently using
+ * the specified flow_type for perfect (4-tuple) filters.
+ */
+bool
+ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+ struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
+ struct ice_pf *pf = hw->back;
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
+
+ /* active counters can be updated by multiple CPUs */
+ smp_mb__before_atomic();
+ switch (flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
+ * @vsi: VSI that aRFS is active on
+ * @entry: aRFS entry used to change counters
+ * @add: true to increment counter, false to decrement
+ */
+static void
+ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
+ struct ice_arfs_entry *entry, bool add)
+{
+ struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
+
+ switch (entry->fltr_info.flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_udpv4_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_udpv4_cnt);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_udpv6_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_udpv6_cnt);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
+ entry->fltr_info.flow_type);
+ }
+}
+
+/**
+ * ice_arfs_del_flow_rules - delete the rules passed in from HW
+ * @vsi: VSI for the flow rules that need to be deleted
+ * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
+ *
+ * Loop through the delete list passed in and remove the rules from HW. After
+ * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
+ * longer being referenced by the aRFS hash table.
+ */
+static void
+ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
+{
+ struct ice_arfs_entry *e;
+ struct hlist_node *n;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
+ int result;
+
+ result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
+ false);
+ if (!result)
+ ice_arfs_update_active_fltr_cntrs(vsi, e, false);
+ else
+ dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
+ result, e->fltr_state, e->fltr_info.fltr_id,
+ e->flow_id, e->fltr_info.q_index);
+
+ /* The aRFS hash table is no longer referencing this entry */
+ hlist_del(&e->list_entry);
+ devm_kfree(dev, e);
+ }
+}
+
+/**
+ * ice_arfs_add_flow_rules - add the rules passed in from HW
+ * @vsi: VSI for the flow rules that need to be added
+ * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
+ *
+ * Loop through the add list passed in and remove the rules from HW. After each
+ * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
+ * the ice_arfs_entry(s) because they are still being referenced in the aRFS
+ * hash table.
+ */
+static void
+ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
+{
+ struct ice_arfs_entry_ptr *ep;
+ struct hlist_node *n;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
+ int result;
+
+ result = ice_fdir_write_fltr(vsi->back,
+ &ep->arfs_entry->fltr_info, true,
+ false);
+ if (!result)
+ ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
+ true);
+ else
+ dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
+ result, ep->arfs_entry->fltr_state,
+ ep->arfs_entry->fltr_info.fltr_id,
+ ep->arfs_entry->flow_id,
+ ep->arfs_entry->fltr_info.q_index);
+
+ hlist_del(&ep->list_entry);
+ devm_kfree(dev, ep);
+ }
+}
+
+/**
+ * ice_arfs_is_flow_expired - check if the aRFS entry has expired
+ * @vsi: VSI containing the aRFS entry
+ * @arfs_entry: aRFS entry that's being checked for expiration
+ *
+ * Return true if the flow has expired, else false. This function should be used
+ * to determine whether or not an aRFS entry should be removed from the hardware
+ * and software structures.
+ */
+static bool
+ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
+{
+#define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000)
+ if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
+ arfs_entry->flow_id,
+ arfs_entry->fltr_info.fltr_id))
+ return true;
+
+ /* expiration timer only used for UDP filters */
+ if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
+ arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ return false;
+
+ return time_in_range64(arfs_entry->time_activated +
+ ICE_ARFS_TIME_DELTA_EXPIRATION,
+ arfs_entry->time_activated, get_jiffies_64());
+}
+
+/**
+ * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
+ * @vsi: the VSI to be forwarded to
+ * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
+ * @add_list: list to populate with filters to be added to Flow Director
+ * @del_list: list to populate with filters to be deleted from Flow Director
+ *
+ * Iterate over the hlist at the index given in the aRFS hash table and
+ * determine if there are any aRFS entries that need to be either added or
+ * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
+ * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
+ * the flow has expired delete the filter from HW. The caller of this function
+ * is expected to add/delete rules on the add_list/del_list respectively.
+ */
+static void
+ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
+ struct hlist_head *add_list,
+ struct hlist_head *del_list)
+{
+ struct ice_arfs_entry *e;
+ struct hlist_node *n;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ /* go through the aRFS hlist at this idx and check for needed updates */
+ hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
+ /* check if filter needs to be added to HW */
+ if (e->fltr_state == ICE_ARFS_INACTIVE) {
+ enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
+ struct ice_arfs_entry_ptr *ep =
+ devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
+
+ if (!ep)
+ continue;
+ INIT_HLIST_NODE(&ep->list_entry);
+ /* reference aRFS entry to add HW filter */
+ ep->arfs_entry = e;
+ hlist_add_head(&ep->list_entry, add_list);
+ e->fltr_state = ICE_ARFS_ACTIVE;
+ /* expiration timer only used for UDP flows */
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ e->time_activated = get_jiffies_64();
+ } else if (e->fltr_state == ICE_ARFS_ACTIVE) {
+ /* check if filter needs to be removed from HW */
+ if (ice_arfs_is_flow_expired(vsi, e)) {
+ /* remove aRFS entry from hash table for delete
+ * and to prevent referencing it the next time
+ * through this hlist index
+ */
+ hlist_del(&e->list_entry);
+ e->fltr_state = ICE_ARFS_TODEL;
+ /* save reference to aRFS entry for delete */
+ hlist_add_head(&e->list_entry, del_list);
+ }
+ }
+}
+
+/**
+ * ice_sync_arfs_fltrs - update all aRFS filters
+ * @pf: board private structure
+ */
+void ice_sync_arfs_fltrs(struct ice_pf *pf)
+{
+ HLIST_HEAD(tmp_del_list);
+ HLIST_HEAD(tmp_add_list);
+ struct ice_vsi *pf_vsi;
+ unsigned int i;
+
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi)
+ return;
+
+ if (!ice_is_arfs_active(pf_vsi))
+ return;
+
+ spin_lock_bh(&pf_vsi->arfs_lock);
+ /* Once we process aRFS for the PF VSI get out */
+ for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
+ ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
+ &tmp_del_list);
+ spin_unlock_bh(&pf_vsi->arfs_lock);
+
+ /* use list of ice_arfs_entry(s) for delete */
+ ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
+
+ /* use list of ice_arfs_entry_ptr(s) for add */
+ ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
+}
+
+/**
+ * ice_arfs_build_entry - builds an aRFS entry based on input
+ * @vsi: destination VSI for this flow
+ * @fk: flow dissector keys for creating the tuple
+ * @rxq_idx: Rx queue to steer this flow to
+ * @flow_id: passed down from the stack and saved for flow expiration
+ *
+ * returns an aRFS entry on success and NULL on failure
+ */
+static struct ice_arfs_entry *
+ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
+ u16 rxq_idx, u32 flow_id)
+{
+ struct ice_arfs_entry *arfs_entry;
+ struct ice_fdir_fltr *fltr_info;
+ u8 ip_proto;
+
+ arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
+ sizeof(*arfs_entry),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!arfs_entry)
+ return NULL;
+
+ fltr_info = &arfs_entry->fltr_info;
+ fltr_info->q_index = rxq_idx;
+ fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ fltr_info->dest_vsi = vsi->idx;
+ ip_proto = fk->basic.ip_proto;
+
+ if (fk->basic.n_proto == htons(ETH_P_IP)) {
+ fltr_info->ip.v4.proto = ip_proto;
+ fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP :
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
+ fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
+ fltr_info->ip.v4.src_port = fk->ports.src;
+ fltr_info->ip.v4.dst_port = fk->ports.dst;
+ } else { /* ETH_P_IPV6 */
+ fltr_info->ip.v6.proto = ip_proto;
+ fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP :
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+ fltr_info->ip.v6.src_port = fk->ports.src;
+ fltr_info->ip.v6.dst_port = fk->ports.dst;
+ }
+
+ arfs_entry->flow_id = flow_id;
+ fltr_info->fltr_id =
+ atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
+
+ return arfs_entry;
+}
+
+/**
+ * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
+ * @hw: pointer to HW structure
+ * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
+ * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
+ *
+ * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
+ * to check if perfect (4-tuple) flow rules are currently in place by Flow
+ * Director.
+ */
+static bool
+ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
+{
+ unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
+
+ /* advanced Flow Director disabled, perfect filters always supported */
+ if (!perfect_fltr)
+ return true;
+
+ if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
+ else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
+ else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
+ else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
+
+ return false;
+}
+
+/**
+ * ice_rx_flow_steer - steer the Rx flow to where application is being run
+ * @netdev: ptr to the netdev being adjusted
+ * @skb: buffer with required header information
+ * @rxq_idx: queue to which the flow needs to move
+ * @flow_id: flow identifier provided by the netdev
+ *
+ * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
+ * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
+ * if the flow_id already exists in the hash table but the rxq_idx has changed
+ * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
+ * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
+ * If neither of the previous conditions are true then add a new entry in the
+ * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
+ * added to HW.
+ */
+int
+ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
+ u16 rxq_idx, u32 flow_id)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_arfs_entry *arfs_entry;
+ struct ice_vsi *vsi = np->vsi;
+ struct flow_keys fk;
+ struct ice_pf *pf;
+ __be16 n_proto;
+ u8 ip_proto;
+ u16 idx;
+ int ret;
+
+ /* failed to allocate memory for aRFS so don't crash */
+ if (unlikely(!vsi->arfs_fltr_list))
+ return -ENODEV;
+
+ pf = vsi->back;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
+
+ n_proto = fk.basic.n_proto;
+ /* Support only IPV4 and IPV6 */
+ if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
+ n_proto == htons(ETH_P_IPV6))
+ ip_proto = fk.basic.ip_proto;
+ else
+ return -EPROTONOSUPPORT;
+
+ /* Support only TCP and UDP */
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
+ return -EPROTONOSUPPORT;
+
+ /* only support 4-tuple filters for aRFS */
+ if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
+ return -EOPNOTSUPP;
+
+ /* choose the aRFS list bucket based on skb hash */
+ idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
+ /* search for entry in the bucket */
+ spin_lock_bh(&vsi->arfs_lock);
+ hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
+ list_entry) {
+ struct ice_fdir_fltr *fltr_info;
+
+ /* keep searching for the already existing arfs_entry flow */
+ if (arfs_entry->flow_id != flow_id)
+ continue;
+
+ fltr_info = &arfs_entry->fltr_info;
+ ret = fltr_info->fltr_id;
+
+ if (fltr_info->q_index == rxq_idx ||
+ arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
+ goto out;
+
+ /* update the queue to forward to on an already existing flow */
+ fltr_info->q_index = rxq_idx;
+ arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
+ ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
+ goto out_schedule_service_task;
+ }
+
+ arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
+ if (!arfs_entry) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = arfs_entry->fltr_info.fltr_id;
+ INIT_HLIST_NODE(&arfs_entry->list_entry);
+ hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
+out_schedule_service_task:
+ ice_service_task_schedule(pf);
+out:
+ spin_unlock_bh(&vsi->arfs_lock);
+ return ret;
+}
+
+/**
+ * ice_init_arfs_cntrs - initialize aRFS counter values
+ * @vsi: VSI that aRFS counters need to be initialized on
+ */
+static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
+{
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
+ GFP_KERNEL);
+ if (!vsi->arfs_fltr_cntrs)
+ return -ENOMEM;
+
+ vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
+ GFP_KERNEL);
+ if (!vsi->arfs_last_fltr_id) {
+ kfree(vsi->arfs_fltr_cntrs);
+ vsi->arfs_fltr_cntrs = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_init_arfs - initialize aRFS resources
+ * @vsi: the VSI to be forwarded to
+ */
+void ice_init_arfs(struct ice_vsi *vsi)
+{
+ struct hlist_head *arfs_fltr_list;
+ unsigned int i;
+
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return;
+
+ arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
+ GFP_KERNEL);
+ if (!arfs_fltr_list)
+ return;
+
+ if (ice_init_arfs_cntrs(vsi))
+ goto free_arfs_fltr_list;
+
+ for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
+ INIT_HLIST_HEAD(&arfs_fltr_list[i]);
+
+ spin_lock_init(&vsi->arfs_lock);
+
+ vsi->arfs_fltr_list = arfs_fltr_list;
+
+ return;
+
+free_arfs_fltr_list:
+ kfree(arfs_fltr_list);
+}
+
+/**
+ * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
+ * @vsi: the VSI to be forwarded to
+ */
+void ice_clear_arfs(struct ice_vsi *vsi)
+{
+ struct device *dev;
+ unsigned int i;
+
+ if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
+ !vsi->arfs_fltr_list)
+ return;
+
+ dev = ice_pf_to_dev(vsi->back);
+ for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
+ struct ice_arfs_entry *r;
+ struct hlist_node *n;
+
+ spin_lock_bh(&vsi->arfs_lock);
+ hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
+ list_entry) {
+ hlist_del(&r->list_entry);
+ devm_kfree(dev, r);
+ }
+ spin_unlock_bh(&vsi->arfs_lock);
+ }
+
+ kfree(vsi->arfs_fltr_list);
+ vsi->arfs_fltr_list = NULL;
+ kfree(vsi->arfs_last_fltr_id);
+ vsi->arfs_last_fltr_id = NULL;
+ kfree(vsi->arfs_fltr_cntrs);
+ vsi->arfs_fltr_cntrs = NULL;
+}
+
+/**
+ * ice_free_cpu_rx_rmap - free setup CPU reverse map
+ * @vsi: the VSI to be forwarded to
+ */
+void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
+{
+ struct net_device *netdev;
+
+ if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev || !netdev->rx_cpu_rmap ||
+ netdev->reg_state != NETREG_REGISTERED)
+ return;
+
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+}
+
+/**
+ * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
+ * @vsi: the VSI to be forwarded to
+ */
+int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
+{
+ struct net_device *netdev;
+ struct ice_pf *pf;
+ int base_idx, i;
+
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ pf = vsi->back;
+ netdev = vsi->netdev;
+ if (!pf || !netdev || !vsi->num_q_vectors ||
+ vsi->netdev->reg_state != NETREG_REGISTERED)
+ return -EINVAL;
+
+ netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
+ vsi->type, netdev->name, vsi->num_q_vectors);
+
+ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
+ if (unlikely(!netdev->rx_cpu_rmap))
+ return -EINVAL;
+
+ base_idx = vsi->base_vector;
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
+ pf->msix_entries[base_idx + i].vector)) {
+ ice_free_cpu_rx_rmap(vsi);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_remove_arfs - remove/clear all aRFS resources
+ * @pf: device private structure
+ */
+void ice_remove_arfs(struct ice_pf *pf)
+{
+ struct ice_vsi *pf_vsi;
+
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi)
+ return;
+
+ ice_free_cpu_rx_rmap(pf_vsi);
+ ice_clear_arfs(pf_vsi);
+}
+
+/**
+ * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
+ * @pf: device private structure
+ */
+void ice_rebuild_arfs(struct ice_pf *pf)
+{
+ struct ice_vsi *pf_vsi;
+
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi)
+ return;
+
+ ice_remove_arfs(pf);
+ if (ice_set_cpu_rx_rmap(pf_vsi)) {
+ dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
+ return;
+ }
+ ice_init_arfs(pf_vsi);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
new file mode 100644
index 000000000000..f39cd16403ed
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#ifndef _ICE_ARFS_H_
+#define _ICE_ARFS_H_
+enum ice_arfs_fltr_state {
+ ICE_ARFS_INACTIVE,
+ ICE_ARFS_ACTIVE,
+ ICE_ARFS_TODEL,
+};
+
+struct ice_arfs_entry {
+ struct ice_fdir_fltr fltr_info;
+ struct hlist_node list_entry;
+ u64 time_activated; /* only valid for UDP flows */
+ u32 flow_id;
+ /* fltr_state = 0 - ICE_ARFS_INACTIVE:
+ * filter needs to be updated or programmed in HW.
+ * fltr_state = 1 - ICE_ARFS_ACTIVE:
+ * filter is active and programmed in HW.
+ * fltr_state = 2 - ICE_ARFS_TODEL:
+ * filter has been deleted from HW and needs to be removed from
+ * the aRFS hash table.
+ */
+ u8 fltr_state;
+};
+
+struct ice_arfs_entry_ptr {
+ struct ice_arfs_entry *arfs_entry;
+ struct hlist_node list_entry;
+};
+
+struct ice_arfs_active_fltr_cntrs {
+ atomic_t active_tcpv4_cnt;
+ atomic_t active_tcpv6_cnt;
+ atomic_t active_udpv4_cnt;
+ atomic_t active_udpv6_cnt;
+};
+
+#ifdef CONFIG_RFS_ACCEL
+int
+ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
+ u16 rxq_idx, u32 flow_id);
+void ice_clear_arfs(struct ice_vsi *vsi);
+void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
+void ice_init_arfs(struct ice_vsi *vsi);
+void ice_sync_arfs_fltrs(struct ice_pf *pf);
+int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
+void ice_remove_arfs(struct ice_pf *pf);
+void ice_rebuild_arfs(struct ice_pf *pf);
+bool
+ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
+ enum ice_fltr_ptype flow_type);
+#else
+#define ice_sync_arfs_fltrs(pf) do {} while (0)
+#define ice_init_arfs(vsi) do {} while (0)
+#define ice_clear_arfs(vsi) do {} while (0)
+#define ice_remove_arfs(pf) do {} while (0)
+#define ice_free_cpu_rx_rmap(vsi) do {} while (0)
+#define ice_rebuild_arfs(pf) do {} while (0)
+
+static inline int ice_set_cpu_rx_rmap(struct ice_vsi __always_unused *vsi)
+{
+ return 0;
+}
+
+static inline int
+ice_rx_flow_steer(struct net_device __always_unused *netdev,
+ const struct sk_buff __always_unused *skb,
+ u16 __always_unused rxq_idx, u32 __always_unused flow_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool
+ice_is_arfs_using_perfect_flow(struct ice_hw __always_unused *hw,
+ enum ice_fltr_ptype __always_unused flow_type)
+{
+ return false;
+}
+#endif /* CONFIG_RFS_ACCEL */
+#endif /* _ICE_ARFS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index a19cd6f5436b..d620d26d42ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */
+#include <net/xdp_sock_drv.h>
#include "ice_base.h"
+#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
@@ -12,7 +14,7 @@
*/
static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
{
- int offset, i;
+ unsigned int offset, i;
mutex_lock(qs_cfg->qs_mutex);
offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
@@ -24,7 +26,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
for (i = 0; i < qs_cfg->q_count; i++)
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
mutex_unlock(qs_cfg->qs_mutex);
return 0;
@@ -38,7 +40,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
*/
static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
{
- int i, index = 0;
+ unsigned int i, index = 0;
mutex_lock(qs_cfg->qs_mutex);
for (i = 0; i < qs_cfg->q_count; i++) {
@@ -47,7 +49,7 @@ static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
if (index >= qs_cfg->pf_map_size)
goto err_scatter;
set_bit(index, qs_cfg->pf_map);
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
}
mutex_unlock(qs_cfg->qs_mutex);
@@ -96,7 +98,7 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* We allocate one q_vector and set default value for ITR setting associated
* with this q_vector. If allocation fails we return -ENOMEM.
*/
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
{
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
@@ -246,6 +248,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
switch (vsi->type) {
case ICE_VSI_LB:
+ case ICE_VSI_CTRL:
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
@@ -279,12 +282,13 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
int ice_setup_rx_ctx(struct ice_ring *ring)
{
+ struct device *dev = ice_pf_to_dev(ring->vsi->back);
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
+ u16 num_bufs = ICE_DESC_UNUSED(ring);
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
struct ice_hw *hw;
- u32 regval;
u16 pf_q;
int err;
@@ -308,24 +312,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (ring->xsk_umem) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ ring->rx_buf_len =
+ xsk_umem_get_rx_frame_size(ring->xsk_umem);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
*/
chain_len = 1;
- ring->zca.free = ice_zca_free;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &ring->zca);
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
if (err)
return err;
+ xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
- dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- ring->zca.free = NULL;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
@@ -376,38 +379,27 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
- rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
chain_len * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
- /* Enable Flexible Descriptors in the queue context which
- * allows this driver to select a specific receive descriptor format
- */
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- if (vsi->type != ICE_VSI_VF) {
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile ID;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
-
- } else {
- regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
- QRXFLXP_CNTXT_RXDID_PRIO_M |
- QRXFLXP_CNTXT_TS_M);
- }
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ * increasing context priority to pick up profile ID; default is 0x01;
+ * setting to 0x03 to ensure profile is programming if prev context is
+ * of same priority
+ */
+ if (vsi->type != ICE_VSI_VF)
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3);
+ else
+ ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
@@ -425,13 +417,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
- err = ring->xsk_umem ?
- ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
- if (err)
- dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
- ring->xsk_umem ? "UMEM enabled " : "",
- ring->q_index, pf_q);
+ if (ring->xsk_umem) {
+ if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
+ dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ num_bufs, ring->q_index);
+ dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
+
+ return 0;
+ }
+
+ err = ice_alloc_rx_bufs_zc(ring, num_bufs);
+ if (err)
+ dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+ ring->q_index, pf_q);
+ return 0;
+ }
+
+ ice_alloc_rx_bufs(ring, num_bufs);
return 0;
}
@@ -453,7 +455,7 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
if (ret) {
/* contig failed, so try with scatter approach */
qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
- qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
+ qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
qs_cfg->scatter_count);
ret = __ice_vsi_get_qs_sc(qs_cfg);
}
@@ -526,7 +528,8 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
- int v_idx, err;
+ u16 v_idx;
+ int err;
if (vsi->q_vectors[0]) {
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
@@ -562,7 +565,7 @@ err_out:
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
{
int q_vectors = vsi->num_q_vectors;
- int tx_rings_rem, rx_rings_rem;
+ u16 tx_rings_rem, rx_rings_rem;
int v_id;
/* initially assigning remaining rings count to VSIs num queue value */
@@ -571,10 +574,12 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
for (v_id = 0; v_id < q_vectors; v_id++) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
- int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+ u8 tx_rings_per_v, rx_rings_per_v;
+ u16 q_id, q_base;
/* Tx rings mapping to vector */
- tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
+ q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
@@ -590,7 +595,8 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_rem -= tx_rings_per_v;
/* Rx rings mapping to vector */
- rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
+ q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
@@ -633,6 +639,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
u8 buf_len = sizeof(*qg_buf);
+ struct ice_hw *hw = &pf->hw;
enum ice_status status;
u16 pf_q;
u8 tc;
@@ -641,13 +648,13 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
- ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
if (IS_ENABLED(CONFIG_DCB))
tc = ring->dcb_tc;
@@ -662,8 +669,8 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
- status);
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
+ ice_stat_str(status));
return -ENODEV;
}
@@ -832,8 +839,8 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
- status);
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
+ ice_stat_str(status));
return -ENODEV;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2c0d8fd3d5cd..bce0e1281168 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -316,12 +316,78 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
}
/**
+ * ice_fill_tx_timer_and_fc_thresh
+ * @hw: pointer to the HW struct
+ * @cmd: pointer to MAC cfg structure
+ *
+ * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
+ * descriptor
+ */
+static void
+ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
+ struct ice_aqc_set_mac_cfg *cmd)
+{
+ u16 fc_thres_val, tx_timer_val;
+ u32 val;
+
+ /* We read back the transmit timer and FC threshold value of
+ * LFC. Thus, we will use index =
+ * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
+ *
+ * Also, because we are operating on transmit timer and FC
+ * threshold of LFC, we don't turn on any bit in tx_tmr_priority
+ */
+#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
+
+ /* Retrieve the transmit timer */
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
+ tx_timer_val = val &
+ PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
+
+ /* Retrieve the FC threshold */
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
+ fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+
+ cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
+}
+
+/**
+ * ice_aq_set_mac_cfg
+ * @hw: pointer to the HW struct
+ * @max_frame_size: Maximum Frame Size to be supported
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set MAC configuration (0x0603)
+ */
+enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_mac_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_mac_cfg;
+
+ if (max_frame_size == 0)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
+
+ cmd->max_frame_size = cpu_to_le16(max_frame_size);
+
+ ice_fill_tx_timer_and_fc_thresh(hw, cmd);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
+ enum ice_status status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL);
@@ -332,7 +398,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- return ice_init_def_sw_recp(hw);
+ status = ice_init_def_sw_recp(hw);
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
+ return status;
+ }
+ return 0;
}
/**
@@ -653,6 +724,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
+ /* Set bit to enable Flow Director filters */
+ wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
+ INIT_LIST_HEAD(&hw->fdir_list_head);
+
ice_clear_pxe_mode(hw);
status = ice_init_nvm(hw);
@@ -743,9 +818,18 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
+ /* enable jumbo frame support at MAC level */
+ status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
+ /* Obtain counter base index which would be used by flow director */
+ status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
+ mutex_init(&hw->tnl_lock);
return 0;
err_unroll_fltr_mgmt_struct:
@@ -769,12 +853,14 @@ err_unroll_cqinit:
*/
void ice_deinit_hw(struct ice_hw *hw)
{
+ ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
ice_cleanup_fltr_mgmt_struct(hw);
ice_sched_cleanup_all(hw);
ice_sched_clear_agg(hw);
ice_free_seg(hw);
ice_free_hw_tbls(hw);
+ mutex_destroy(&hw->tnl_lock);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
@@ -878,7 +964,12 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
- for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ /* Wait for the PFR to complete. The wait time is the global config lock
+ * timeout plus the PFR timeout which will account for a possible reset
+ * that is occurring during a download package operation.
+ */
+ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
+ ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M))
break;
@@ -1012,7 +1103,7 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
rlan_ctx->prefena = 1;
- ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
+ ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
}
@@ -1678,6 +1769,33 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
+ case ICE_AQC_CAPS_FD:
+ if (dev_p) {
+ dev_p->num_flow_director_fltr = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_flow_director_fltr = %d\n",
+ prefix,
+ dev_p->num_flow_director_fltr);
+ }
+ if (func_p) {
+ u32 reg_val, val;
+
+ reg_val = rd32(hw, GLQF_FD_SIZE);
+ val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
+ GLQF_FD_SIZE_FD_GSIZE_S;
+ func_p->fd_fltr_guar =
+ ice_get_num_per_func(hw, val);
+ val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
+ GLQF_FD_SIZE_FD_BSIZE_S;
+ func_p->fd_fltr_best_effort = val;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: fd_fltr_guar = %d\n",
+ prefix, func_p->fd_fltr_guar);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: fd_fltr_best_effort = %d\n",
+ prefix, func_p->fd_fltr_best_effort);
+ }
+ break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
@@ -1887,10 +2005,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
cmd->flags = flags;
-
- /* Prep values for flags, sah, sal */
- cmd->sah = htons(*((const u16 *)mac_addr));
- cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
+ ether_addr_copy(cmd->mac_addr, mac_addr);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -2117,6 +2232,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
+ enum ice_status status;
if (!cfg)
return ICE_ERR_PARAM;
@@ -2145,7 +2261,11 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
- return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+ status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ status = 0;
+
+ return status;
}
/**
@@ -3089,12 +3209,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/**
* ice_set_ctx - set context bits in packed structure
+ * @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
enum ice_status
-ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
int f;
@@ -3103,6 +3225,12 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* using the correct size so that we are correct regardless
* of the endianness of the machine.
*/
+ if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
+ ice_debug(hw, ICE_DBG_QCTX,
+ "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
+ f, ce_info[f].width, ce_info[f].size_of);
+ continue;
+ }
switch (ce_info[f].size_of) {
case sizeof(u8):
ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 8104f3d64d96..9b9e50d2398b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -70,7 +70,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
-ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
@@ -108,6 +109,8 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
+enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index dd946866d7b8..1e18021aa073 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -12,6 +12,7 @@ do { \
(qinfo)->sq.bal = prefix##_ATQBAL; \
(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
+ (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
(qinfo)->rq.head = prefix##_ARQH; \
(qinfo)->rq.tail = prefix##_ARQT; \
@@ -20,6 +21,7 @@ do { \
(qinfo)->rq.bal = prefix##_ARQBAL; \
(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
+ (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
} while (0)
@@ -199,7 +201,9 @@ unwind_alloc_rq_bufs:
cq->rq.r.rq_bi[i].pa = 0;
cq->rq.r.rq_bi[i].size = 0;
}
+ cq->rq.r.rq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
+ cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -245,7 +249,9 @@ unwind_alloc_sq_bufs:
cq->sq.r.sq_bi[i].pa = 0;
cq->sq.r.sq_bi[i].size = 0;
}
+ cq->sq.r.sq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+ cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -304,6 +310,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return 0;
}
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ if ((qi)->ring.r.ring##_bi) \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) { \
+ dmam_free_coherent(ice_hw_to_dev(hw), \
+ (qi)->ring.r.ring##_bi[i].size, \
+ (qi)->ring.r.ring##_bi[i].va, \
+ (qi)->ring.r.ring##_bi[i].pa); \
+ (qi)->ring.r.ring##_bi[i].va = NULL;\
+ (qi)->ring.r.ring##_bi[i].pa = 0;\
+ (qi)->ring.r.ring##_bi[i].size = 0;\
+ } \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
+ /* free DMA head */ \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
+} while (0)
+
/**
* ice_init_sq - main initialization routine for Control ATQ
* @hw: pointer to the hardware structure
@@ -357,6 +385,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
@@ -416,33 +445,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
-#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
-do { \
- int i; \
- /* free descriptors */ \
- for (i = 0; i < (qi)->num_##ring##_entries; i++) \
- if ((qi)->ring.r.ring##_bi[i].pa) { \
- dmam_free_coherent(ice_hw_to_dev(hw), \
- (qi)->ring.r.ring##_bi[i].size,\
- (qi)->ring.r.ring##_bi[i].va,\
- (qi)->ring.r.ring##_bi[i].pa);\
- (qi)->ring.r.ring##_bi[i].va = NULL; \
- (qi)->ring.r.ring##_bi[i].pa = 0; \
- (qi)->ring.r.ring##_bi[i].size = 0; \
- } \
- /* free the buffer info list */ \
- if ((qi)->ring.cmd_buf) \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
- /* free DMA head */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
-} while (0)
-
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@@ -635,6 +644,50 @@ init_ctrlq_free_sq:
}
/**
+ * ice_shutdown_ctrlq - shutdown routine for any control queue
+ * @hw: pointer to the hardware structure
+ * @q_type: specific Control queue type
+ *
+ * NOTE: this function does not destroy the control queue locks.
+ */
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+{
+ struct ice_ctl_q_info *cq;
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ cq = &hw->adminq;
+ if (ice_check_sq_alive(hw, cq))
+ ice_aq_q_shutdown(hw, true);
+ break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ break;
+ default:
+ return;
+ }
+
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
+}
+
+/**
+ * ice_shutdown_all_ctrlq - shutdown routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * NOTE: this function does not destroy the control queue locks. The driver
+ * may call this at runtime to shutdown and later restart control queues, such
+ * as in response to a reset event.
+ */
+void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+{
+ /* Shutdown FW admin queue */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PF-VF Mailbox */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+}
+
+/**
* ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
@@ -649,17 +702,27 @@ init_ctrlq_free_sq:
*/
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{
- enum ice_status ret_code;
+ enum ice_status status;
+ u32 retry = 0;
/* Init FW admin queue */
- ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
- if (ret_code)
- return ret_code;
+ do {
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ if (status)
+ return status;
- ret_code = ice_init_check_adminq(hw);
- if (ret_code)
- return ret_code;
+ status = ice_init_check_adminq(hw);
+ if (status != ICE_ERR_AQ_FW_CRITICAL)
+ break;
+
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Retry Admin Queue init due to FW critical error\n");
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
+ } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
+ if (status)
+ return status;
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
@@ -701,57 +764,12 @@ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
}
/**
- * ice_shutdown_ctrlq - shutdown routine for any control queue
- * @hw: pointer to the hardware structure
- * @q_type: specific Control queue type
- *
- * NOTE: this function does not destroy the control queue locks.
- */
-static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
-{
- struct ice_ctl_q_info *cq;
-
- switch (q_type) {
- case ICE_CTL_Q_ADMIN:
- cq = &hw->adminq;
- if (ice_check_sq_alive(hw, cq))
- ice_aq_q_shutdown(hw, true);
- break;
- case ICE_CTL_Q_MAILBOX:
- cq = &hw->mailboxq;
- break;
- default:
- return;
- }
-
- ice_shutdown_sq(hw, cq);
- ice_shutdown_rq(hw, cq);
-}
-
-/**
- * ice_shutdown_all_ctrlq - shutdown routine for all control queues
- * @hw: pointer to the hardware structure
- *
- * NOTE: this function does not destroy the control queue locks. The driver
- * may call this at runtime to shutdown and later restart control queues, such
- * as in response to a reset event.
- */
-void ice_shutdown_all_ctrlq(struct ice_hw *hw)
-{
- /* Shutdown FW admin queue */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
- /* Shutdown PF-VF Mailbox */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
-}
-
-/**
* ice_destroy_ctrlq_locks - Destroy locks for a control queue
* @cq: pointer to the control queue
*
* Destroys the send and receive queue locks for a given control queue.
*/
-static void
-ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
{
mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock);
@@ -1042,9 +1060,15 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* update the error if time out occurred */
if (!cmd_completed) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue Writeback timeout.\n");
- status = ICE_ERR_AQ_TIMEOUT;
+ if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
+ rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
+ status = ICE_ERR_AQ_FW_CRITICAL;
+ } else {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send Queue Writeback timeout.\n");
+ status = ICE_ERR_AQ_TIMEOUT;
+ }
}
sq_send_command_error:
@@ -1128,7 +1152,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
memcpy(&e->desc, desc, sizeof(e->desc));
datalen = le16_to_cpu(desc->datalen);
- e->msg_len = min(datalen, e->buf_len);
+ e->msg_len = min_t(u16, datalen, e->buf_len);
if (e->msg_buf && e->msg_len)
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index bf0ebe6149e8..faaa08e8171b 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -34,6 +34,8 @@ enum ice_ctl_q {
/* Control Queue timeout settings - max delay 250ms */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
+#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
+#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
@@ -59,6 +61,7 @@ struct ice_ctl_q_ring {
u32 bal;
u32 len_mask;
u32 len_ena_mask;
+ u32 len_crit_mask;
u32 head_mask;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 7bea09363b42..979af197f8a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -63,6 +63,64 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_is_pfc_causing_hung_q
+ * @pf: pointer to PF structure
+ * @txqueue: Tx queue which is supposedly hung queue
+ *
+ * find if PFC is causing the hung queue, if yes return true else false
+ */
+bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue)
+{
+ u8 num_tcs = 0, i, tc, up_mapped_tc, up_in_tc = 0;
+ u64 ref_prio_xoff[ICE_MAX_UP];
+ struct ice_vsi *vsi;
+ u32 up2tc;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ num_tcs++;
+
+ /* first find out the TC to which the hung queue belongs to */
+ for (tc = 0; tc < num_tcs - 1; tc++)
+ if (ice_find_q_in_range(vsi->tc_cfg.tc_info[tc].qoffset,
+ vsi->tc_cfg.tc_info[tc + 1].qoffset,
+ txqueue))
+ break;
+
+ /* Build a bit map of all UPs associated to the suspect hung queue TC,
+ * so that we check for its counter increment.
+ */
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+ for (i = 0; i < ICE_MAX_UP; i++) {
+ up_mapped_tc = (up2tc >> (i * 3)) & 0x7;
+ if (up_mapped_tc == tc)
+ up_in_tc |= BIT(i);
+ }
+
+ /* Now that we figured out that hung queue is PFC enabled, still the
+ * Tx timeout can be legitimate. So to make sure Tx timeout is
+ * absolutely caused by PFC storm, check if the counters are
+ * incrementing.
+ */
+ for (i = 0; i < ICE_MAX_UP; i++)
+ if (up_in_tc & BIT(i))
+ ref_prio_xoff[i] = pf->stats.priority_xoff_rx[i];
+
+ ice_update_dcb_stats(pf);
+
+ for (i = 0; i < ICE_MAX_UP; i++)
+ if (up_in_tc & BIT(i))
+ if (pf->stats.priority_xoff_rx[i] > ref_prio_xoff[i])
+ return true;
+
+ return false;
+}
+
+/**
* ice_dcb_get_mode - gets the DCB mode
* @port_info: pointer to port info structure
* @host: if set it's HOST if not it's MANAGED
@@ -526,16 +584,21 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
*/
static bool ice_dcb_tc_contig(u8 *prio_table)
{
- u8 max_tc = 0;
+ bool found_empty = false;
+ u8 used_tc = 0;
int i;
- for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
- u8 cur_tc = prio_table[i];
+ /* Create a bitmap of used TCs */
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
+ used_tc |= BIT(prio_table[i]);
- if (cur_tc > max_tc)
- return false;
- else if (cur_tc == max_tc)
- max_tc++;
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
+ if (used_tc & BIT(i)) {
+ if (found_empty)
+ return false;
+ } else {
+ found_empty = true;
+ }
}
return true;
@@ -728,39 +791,31 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
* @tx_ring: ring to send buffer on
* @first: pointer to struct ice_tx_buf
+ *
+ * This should not be called if the outer VLAN is software offloaded as the VLAN
+ * tag will already be configured with the correct ID and priority bits
*/
-int
+void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
- return 0;
+ return;
/* Insert 802.1p priority into VLAN header */
- if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) ||
+ if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
/* Mask the lower 3 bits to set the 802.1p priority */
first->tx_flags |= (skb->priority & 0x7) <<
ICE_TX_FLAGS_VLAN_PR_S;
- if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) {
- struct vlan_ethhdr *vhdr;
- int rc;
-
- rc = skb_cow_head(skb, 0);
- if (rc < 0)
- return rc;
- vhdr = (struct vlan_ethhdr *)skb->data;
- vhdr->h_vlan_TCI = htons(first->tx_flags >>
- ICE_TX_FLAGS_VLAN_S);
- } else {
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
- }
+ /* if this is not already set it means a VLAN 0 + priority needs
+ * to be offloaded
+ */
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
-
- return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 37680e815b02..323238669572 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -17,6 +17,8 @@
void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
+void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
+bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
@@ -25,13 +27,27 @@ void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
-int
+void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first);
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
+
+/**
+ * ice_find_q_in_range
+ * @low: start of queue range for a TC i.e. offset of TC
+ * @high: start of queue for next TC
+ * @tx_q: hung_queue/tx_queue
+ *
+ * finds if queue 'tx_q' falls between the two offsets of any given TC
+ */
+static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
+{
+ return (tx_q >= low) && (tx_q < high);
+}
+
static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
@@ -79,6 +95,13 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0;
}
+static inline bool
+ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
+ unsigned int __always_unused txqueue)
+{
+ return false;
+}
+
#define ice_update_dcb_stats(pf) do {} while (0)
#define ice_pf_dcb_recfg(pf) do {} while (0)
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index c4c12414083a..87f91b750d59 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -7,8 +7,6 @@
#include "ice_dcb_nl.h"
#include <net/dcbnl.h>
-#define ICE_APP_PROT_ID_ROCE 0x8915
-
/**
* ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info
* @netdev: device associated with interface that needs reset
@@ -671,7 +669,7 @@ static bool
ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
struct ice_dcb_app_priority_table *app)
{
- int i;
+ unsigned int i;
for (i = 0; i < cfg->numapps; i++) {
if (app->selector == cfg->app[i].selector &&
@@ -746,7 +744,8 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcbx_cfg *old_cfg, *new_cfg;
- int i, j, ret = 0;
+ unsigned int i, j;
+ int ret = 0;
if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
return -EINVAL;
@@ -869,7 +868,7 @@ void ice_dcbnl_set_all(struct ice_vsi *vsi)
struct ice_port_info *pi;
struct dcb_app sapp;
struct ice_pf *pf;
- int i;
+ unsigned int i;
if (!netdev)
return;
@@ -941,7 +940,7 @@ ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg)
{
struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
- int i;
+ unsigned int i;
if (!main_vsi)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index c6833944b90a..a73d06e06b5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -105,6 +105,27 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
return 0;
}
+static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+
+ /* The netlist version fields are BCD formatted */
+ snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
+ netlist->cust_ver);
+
+ return 0;
+}
+
+static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+
+ snprintf(buf, len, "0x%08x", netlist->hash);
+
+ return 0;
+}
+
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
@@ -128,6 +149,8 @@ static const struct ice_devlink_version {
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
+ running("fw.netlist", ice_info_netlist_ver),
+ running("fw.netlist.build", ice_info_netlist_build),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 593fb37bd59e..68c38004a088 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -5,6 +5,7 @@
#include "ice.h"
#include "ice_flow.h"
+#include "ice_fltr.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -129,6 +130,8 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults),
ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
+ ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
+ ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
};
static const u32 ice_regs_dump_list[] = {
@@ -139,9 +142,6 @@ static const u32 ice_regs_dump_list[] = {
QINT_RQCTL(0),
PFINT_OICR_ENA,
QRX_ITR(0),
- PF0INT_ITR_0(0),
- PF0INT_ITR_1(0),
- PF0INT_ITR_2(0),
};
struct ice_priv_flag {
@@ -157,6 +157,8 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("vf-true-promisc-support",
+ ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -203,7 +205,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
struct ice_pf *pf = np->vsi->back;
struct ice_hw *hw = &pf->hw;
u32 *regs_buf = (u32 *)p;
- int i;
+ unsigned int i;
regs->version = 1;
@@ -273,8 +275,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
- dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -282,8 +285,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
false);
if (status) {
- dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto release;
}
@@ -304,7 +308,7 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- int i;
+ unsigned int i;
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
@@ -332,7 +336,8 @@ static u64 ice_link_test(struct net_device *netdev)
netdev_info(netdev, "link test\n");
status = ice_get_link_status(np->vsi->port_info, &link_up);
if (status) {
- netdev_err(netdev, "link query error, status = %d\n", status);
+ netdev_err(netdev, "link query error, status = %s\n",
+ ice_stat_str(status));
return 1;
}
@@ -373,7 +378,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
0x00000000, 0xFFFFFFFF
};
u32 val, orig_val;
- int i;
+ unsigned int i;
orig_val = rd32(hw, reg);
for (i = 0; i < ARRAY_SIZE(patterns); ++i) {
@@ -426,7 +431,7 @@ static u64 ice_reg_test(struct net_device *netdev)
GLINT_ITR(2, 1) - GLINT_ITR(2, 0)},
{GLINT_CTL, 0xffff0001, 1, 0}
};
- int i;
+ unsigned int i;
netdev_dbg(netdev, "Register test\n");
for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) {
@@ -671,7 +676,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
- LIST_HEAD(tmp_list);
struct device *dev;
u8 *tx_frame;
int i;
@@ -707,16 +711,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
/* Test VSI needs to receive broadcast packets */
eth_broadcast_addr(broadcast);
- if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) {
+ if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) {
ret = 5;
goto lbtest_mac_dis;
}
- if (ice_add_mac(&pf->hw, &tmp_list)) {
- ret = 6;
- goto free_mac_list;
- }
-
if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 7;
goto remove_mac_filters;
@@ -739,10 +738,8 @@ static u64 ice_loopback_test(struct net_device *netdev)
lbtest_free_frame:
devm_kfree(dev, tx_frame);
remove_mac_filters:
- if (ice_remove_mac(&pf->hw, &tmp_list))
+ if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
-free_mac_list:
- ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
/* Disable MAC loopback after the test is completed. */
if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
@@ -1158,8 +1155,9 @@ static int ice_nway_reset(struct net_device *netdev)
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status) {
- netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
- status, pi->hw->adminq.sq_last_status);
+ netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(pi->hw->adminq.sq_last_status));
return -EIO;
}
@@ -1308,6 +1306,16 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
ice_down(vsi);
ice_up(vsi);
}
+ /* don't allow modification of this flag when a single VF is in
+ * promiscuous mode because it's not supported
+ */
+ if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
+ ice_is_any_vf_in_promisc(pf)) {
+ dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
+ /* toggle bit back to previous state */
+ change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
+ ret = -EAGAIN;
+ }
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
@@ -2450,8 +2458,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) {
- dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
- vsi->vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n",
+ vsi->vsi_num, ice_stat_str(status));
return -EINVAL;
}
@@ -2526,6 +2534,10 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
struct ice_vsi *vsi = np->vsi;
switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return ice_add_fdir_ethtool(vsi, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return ice_del_fdir_ethtool(vsi, cmd);
case ETHTOOL_SRXFH:
return ice_set_rss_hash_opt(vsi, cmd);
default:
@@ -2549,12 +2561,27 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret = -EOPNOTSUPP;
+ struct ice_hw *hw;
+
+ hw = &vsi->back->hw;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = vsi->rss_size;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = hw->fdir_active_fltr;
+ /* report total rule count */
+ cmd->data = ice_get_fdir_cnt_all(hw);
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = ice_get_ethtool_fdir_entry(hw, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
+ break;
case ETHTOOL_GRXFH:
ice_get_rss_hash_opt(vsi, cmd);
ret = 0;
@@ -2593,7 +2620,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
- u32 new_rx_cnt, new_tx_cnt;
+ u16 new_rx_cnt, new_tx_cnt;
if (ring->tx_pending > ICE_MAX_NUM_DESC ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
@@ -2645,8 +2672,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
if (ice_is_xdp_ena_vsi(vsi))
for (i = 0; i < vsi->num_xdp_txq; i++)
vsi->xdp_rings[i]->count = new_tx_cnt;
- vsi->num_tx_desc = new_tx_cnt;
- vsi->num_rx_desc = new_rx_cnt;
+ vsi->num_tx_desc = (u16)new_tx_cnt;
+ vsi->num_rx_desc = (u16)new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -2952,31 +2979,22 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
status = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
- if (!test_bit(__ICE_DOWN, pf->state)) {
- /* Give it a little more time to try to come back. If still
- * down, restart autoneg link or reinitialize the interface.
- */
- msleep(75);
- if (!test_bit(__ICE_DOWN, pf->state))
- return ice_nway_reset(netdev);
-
- ice_down(vsi);
- ice_up(vsi);
- }
-
return err;
}
@@ -3171,10 +3189,6 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- /* check to see if VSI is active */
- if (test_bit(__ICE_DOWN, vsi->state))
- return;
-
/* report maximum channels */
ch->max_rx = ice_get_max_rxq(pf);
ch->max_tx = ice_get_max_txq(pf);
@@ -3184,6 +3198,10 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
ch->combined_count = ice_get_combined_cnt(vsi);
ch->rx_count = vsi->num_rxq - ch->combined_count;
ch->tx_count = vsi->num_txq - ch->combined_count;
+
+ /* report other queues */
+ ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
+ ch->max_other = ch->other_count;
}
/**
@@ -3227,8 +3245,9 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut,
vsi->rss_table_size);
if (status) {
- dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EIO;
}
@@ -3255,9 +3274,14 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EOPNOTSUPP;
}
/* do not support changing other_count */
- if (ch->other_count)
+ if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
return -EINVAL;
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
+ netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
+ return -EOPNOTSUPP;
+ }
+
curr_combined = ice_get_combined_cnt(vsi);
/* these checks are for cases where user didn't specify a particular
@@ -3731,10 +3755,10 @@ ice_get_module_eeprom(struct net_device *netdev,
struct ice_hw *hw = &pf->hw;
enum ice_status status;
bool is_sfp = false;
+ unsigned int i;
u16 offset = 0;
u8 value = 0;
u8 page = 0;
- int i;
status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0,
&value, 1, 0, NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
new file mode 100644
index 000000000000..d7430ce6af26
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+/* flow director ethtool support for ice */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_flow.h"
+
+static struct in6_addr full_ipv6_addr_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
+ }
+};
+
+static struct in6_addr zero_ipv6_addr_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }
+ }
+};
+
+/* calls to ice_flow_add_prof require the number of segments in the array
+ * for segs_cnt. In this code that is one more than the index.
+ */
+#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
+
+/**
+ * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
+ * flow type values
+ * @flow: filter type to be converted
+ *
+ * Returns the corresponding ethtool flow type.
+ */
+static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
+{
+ switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ return TCP_V4_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ return UDP_V4_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ return SCTP_V4_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ return IPV4_USER_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ return TCP_V6_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ return UDP_V6_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ return SCTP_V6_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ return IPV6_USER_FLOW;
+ default:
+ /* 0 is undefined ethtool flow */
+ return 0;
+ }
+}
+
+/**
+ * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
+ * @eth: Ethtool flow type to be converted
+ *
+ * Returns flow enum
+ */
+static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
+{
+ switch (eth) {
+ case TCP_V4_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ case UDP_V4_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ case SCTP_V4_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ case IPV4_USER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ case TCP_V6_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+ case UDP_V6_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ case SCTP_V6_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+ case IPV6_USER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+ default:
+ return ICE_FLTR_PTYPE_NONF_NONE;
+ }
+}
+
+/**
+ * ice_is_mask_valid - check mask field set
+ * @mask: full mask to check
+ * @field: field for which mask should be valid
+ *
+ * If the mask is fully set return true. If it is not valid for field return
+ * false.
+ */
+static bool ice_is_mask_valid(u64 mask, u64 field)
+{
+ return (mask & field) == field;
+}
+
+/**
+ * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
+ * @hw: hardware structure that contains filter list
+ * @cmd: ethtool command data structure to receive the filter data
+ *
+ * Returns 0 on success and -EINVAL on failure
+ */
+int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct ice_fdir_fltr *rule;
+ int ret = 0;
+ u16 idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+
+ rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
+
+ if (!rule || fsp->location != rule->fltr_id) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
+
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case IPV4_USER_FLOW:
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
+ fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
+ fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
+ fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
+ fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
+ fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
+ fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
+ fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
+ fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
+ fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
+ fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
+ break;
+ case IPV6_USER_FLOW:
+ fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
+ fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
+ fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
+ fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
+ fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
+ rule->mask.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
+ rule->mask.v6.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
+ fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
+ break;
+ default:
+ break;
+ }
+
+ if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
+
+ idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
+ if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
+ dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
+ rule->flow_type);
+ ret = -EINVAL;
+ }
+
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+ return ret;
+}
+
+/**
+ * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
+ * @hw: hardware structure containing the filter list
+ * @cmd: ethtool command data structure
+ * @rule_locs: ethtool array passed in from OS to receive filter IDs
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+int
+ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct ice_fdir_fltr *f_rule;
+ unsigned int cnt = 0;
+ int val = 0;
+
+ /* report total rule count */
+ cmd->data = ice_get_fdir_cnt_all(hw);
+
+ mutex_lock(&hw->fdir_fltr_lock);
+
+ list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
+ if (cnt == cmd->rule_cnt) {
+ val = -EMSGSIZE;
+ goto release_lock;
+ }
+ rule_locs[cnt] = f_rule->fltr_id;
+ cnt++;
+ }
+
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+ if (!val)
+ cmd->rule_cnt = cnt;
+ return val;
+}
+
+/**
+ * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
+ * @hw: hardware structure containing the filter list
+ * @blk: hardware block
+ * @flow: FDir flow type to release
+ */
+static struct ice_fd_hw_prof *
+ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
+{
+ if (blk == ICE_BLK_FD && hw->fdir_prof)
+ return hw->fdir_prof[flow];
+
+ return NULL;
+}
+
+/**
+ * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
+ * @hw: hardware structure containing the filter list
+ * @blk: hardware block
+ * @flow: FDir flow type to release
+ */
+static void
+ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
+{
+ struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
+ int tun;
+
+ if (!prof)
+ return;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ u64 prof_id;
+ int j;
+
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ for (j = 0; j < prof->cnt; j++) {
+ u16 vsi_num;
+
+ if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
+ continue;
+ vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
+ prof->entry_h[j][tun] = 0;
+ }
+ ice_flow_rem_prof(hw, blk, prof_id);
+ }
+}
+
+/**
+ * ice_fdir_rem_flow - release the ice_flow structures for a filter type
+ * @hw: hardware structure containing the filter list
+ * @blk: hardware block
+ * @flow_type: FDir flow type to release
+ */
+static void
+ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
+ enum ice_fltr_ptype flow_type)
+{
+ int flow = (int)flow_type & ~FLOW_EXT;
+ struct ice_fd_hw_prof *prof;
+ int tun, i;
+
+ prof = ice_fdir_get_hw_prof(hw, blk, flow);
+ if (!prof)
+ return;
+
+ ice_fdir_erase_flow_from_hw(hw, blk, flow);
+ for (i = 0; i < prof->cnt; i++)
+ prof->vsi_h[i] = 0;
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ if (!prof->fdir_seg[tun])
+ continue;
+ devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
+ prof->fdir_seg[tun] = NULL;
+ }
+ prof->cnt = 0;
+}
+
+/**
+ * ice_fdir_release_flows - release all flows in use for later replay
+ * @hw: pointer to HW instance
+ */
+void ice_fdir_release_flows(struct ice_hw *hw)
+{
+ int flow;
+
+ /* release Flow Director HW table entries */
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
+ ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
+}
+
+/**
+ * ice_fdir_replay_flows - replay HW Flow Director filter info
+ * @hw: pointer to HW instance
+ */
+void ice_fdir_replay_flows(struct ice_hw *hw)
+{
+ int flow;
+
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ int tun;
+
+ if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
+ continue;
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ struct ice_flow_prof *hw_prof;
+ struct ice_fd_hw_prof *prof;
+ u64 prof_id;
+ int j;
+
+ prof = hw->fdir_prof[flow];
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
+ prof->fdir_seg[tun], TNL_SEG_CNT(tun),
+ &hw_prof);
+ for (j = 0; j < prof->cnt; j++) {
+ enum ice_flow_priority prio;
+ u64 entry_h = 0;
+ int err;
+
+ prio = ICE_FLOW_PRIO_NORMAL;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD,
+ prof_id,
+ prof->vsi_h[0],
+ prof->vsi_h[j],
+ prio, prof->fdir_seg,
+ &entry_h);
+ if (err) {
+ dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
+ flow);
+ continue;
+ }
+ prof->entry_h[j][tun] = entry_h;
+ }
+ }
+ }
+}
+
+/**
+ * ice_parse_rx_flow_user_data - deconstruct user-defined data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @data: pointer to userdef data structure for storage
+ *
+ * Returns 0 on success, negative error value on failure
+ */
+static int
+ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct ice_rx_flow_userdef *data)
+{
+ u64 value, mask;
+
+ memset(data, 0, sizeof(*data));
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
+ mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
+ if (!mask)
+ return 0;
+
+#define ICE_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0)
+#define ICE_USERDEF_FLEX_OFFS_S 16
+#define ICE_USERDEF_FLEX_OFFS_M GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
+#define ICE_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0)
+
+ /* 0x1fe is the maximum value for offsets stored in the internal
+ * filtering tables.
+ */
+#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
+
+ if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
+ value > ICE_USERDEF_FLEX_FLTR_M)
+ return -EINVAL;
+
+ data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
+ data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
+ ICE_USERDEF_FLEX_OFFS_S;
+ if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
+ return -EINVAL;
+
+ data->flex_fltr = true;
+
+ return 0;
+}
+
+/**
+ * ice_fdir_num_avail_fltr - return the number of unused flow director filters
+ * @hw: pointer to hardware structure
+ * @vsi: software VSI structure
+ *
+ * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
+ * use filters from either pool. The guaranteed pool is divided between VSIs.
+ * The best effort filter pool is common to all VSIs and is a device shared
+ * resource pool. The number of filters available to this VSI is the sum of
+ * the VSIs guaranteed filter pool and the global available best effort
+ * filter pool.
+ *
+ * Returns the number of available flow director filters to this VSI
+ */
+static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+{
+ u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+ u16 num_guar;
+ u16 num_be;
+
+ /* total guaranteed filters assigned to this VSI */
+ num_guar = vsi->num_gfltr;
+
+ /* minus the guaranteed filters programed by this VSI */
+ num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &
+ VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;
+
+ /* total global best effort filters */
+ num_be = hw->func_caps.fd_fltr_best_effort;
+
+ /* minus the global best effort filters programmed */
+ num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>
+ GLQF_FD_CNT_FD_BCNT_S;
+
+ return num_guar + num_be;
+}
+
+/**
+ * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
+ * @hw: HW structure containing the FDir flow profile structure(s)
+ * @flow: flow type to allocate the flow profile for
+ *
+ * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
+ * on success and negative on error.
+ */
+static int
+ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
+{
+ if (!hw)
+ return -EINVAL;
+
+ if (!hw->fdir_prof) {
+ hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
+ ICE_FLTR_PTYPE_MAX,
+ sizeof(*hw->fdir_prof),
+ GFP_KERNEL);
+ if (!hw->fdir_prof)
+ return -ENOMEM;
+ }
+
+ if (!hw->fdir_prof[flow]) {
+ hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(**hw->fdir_prof),
+ GFP_KERNEL);
+ if (!hw->fdir_prof[flow])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
+ * @pf: pointer to the PF structure
+ * @seg: protocol header description pointer
+ * @flow: filter enum
+ * @tun: FDir segment to program
+ */
+static int
+ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
+ enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *main_vsi, *ctrl_vsi;
+ struct ice_flow_seg_info *old_seg;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_fd_hw_prof *hw_prof;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u64 entry1_h = 0;
+ u64 entry2_h = 0;
+ u64 prof_id;
+ int err;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return -EINVAL;
+
+ ctrl_vsi = ice_get_ctrl_vsi(pf);
+ if (!ctrl_vsi)
+ return -EINVAL;
+
+ err = ice_fdir_alloc_flow_prof(hw, flow);
+ if (err)
+ return err;
+
+ hw_prof = hw->fdir_prof[flow];
+ old_seg = hw_prof->fdir_seg[tun];
+ if (old_seg) {
+ /* This flow_type already has a changed input set.
+ * If it matches the requested input set then we are
+ * done. Or, if it's different then it's an error.
+ */
+ if (!memcmp(old_seg, seg, sizeof(*seg)))
+ return -EEXIST;
+
+ /* if there are FDir filters using this flow,
+ * then return error.
+ */
+ if (hw->fdir_fltr_cnt[flow]) {
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ return -EINVAL;
+ }
+
+ if (ice_is_arfs_using_perfect_flow(hw, flow)) {
+ dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
+ flow);
+ return -EINVAL;
+ }
+
+ /* remove HW filter definition */
+ ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
+ }
+
+ /* Adding a profile, but there is only one header supported.
+ * That is the final parameters are 1 header (segment), no
+ * actions (NULL) and zero actions 0.
+ */
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ TNL_SEG_CNT(tun), &prof);
+ if (status)
+ return ice_status_to_errno(status);
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_prof;
+ }
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_entry;
+ }
+
+ hw_prof->fdir_seg[tun] = seg;
+ hw_prof->entry_h[0][tun] = entry1_h;
+ hw_prof->entry_h[1][tun] = entry2_h;
+ hw_prof->vsi_h[0] = main_vsi->idx;
+ hw_prof->vsi_h[1] = ctrl_vsi->idx;
+ if (!hw_prof->cnt)
+ hw_prof->cnt = 2;
+
+ return 0;
+
+err_entry:
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+ ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
+err_prof:
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+
+ return err;
+}
+
+/**
+ * ice_set_init_fdir_seg
+ * @seg: flow segment for programming
+ * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
+ * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
+ *
+ * Set the configuration for perfect filters to the provided flow segment for
+ * programming the HW filter. This is to be called only when initializing
+ * filters as this function it assumes no filters exist.
+ */
+static int
+ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
+ enum ice_flow_seg_hdr l3_proto,
+ enum ice_flow_seg_hdr l4_proto)
+{
+ enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
+
+ if (!seg)
+ return -EINVAL;
+
+ if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
+ src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
+ dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
+ } else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
+ src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
+ dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
+ } else {
+ return -EINVAL;
+ }
+
+ if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
+ src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
+ src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
+ } else {
+ return -EINVAL;
+ }
+
+ ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
+
+ /* IP source address */
+ ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* IP destination address */
+ ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 source port */
+ ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 destination port */
+ ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ return 0;
+}
+
+/**
+ * ice_create_init_fdir_rule
+ * @pf: PF structure
+ * @flow: filter enum
+ *
+ * Return error value or 0 on success.
+ */
+static int
+ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
+{
+ struct ice_flow_seg_info *seg, *tun_seg;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int ret;
+
+ /* if there is already a filter rule for kind return -EINVAL */
+ if (hw->fdir_prof && hw->fdir_prof[flow] &&
+ hw->fdir_prof[flow]->fdir_seg[0])
+ return -EINVAL;
+
+ seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return -ENOMEM;
+
+ tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ GFP_KERNEL);
+ if (!tun_seg) {
+ devm_kfree(dev, seg);
+ return -ENOMEM;
+ }
+
+ if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_SEG_HDR_TCP);
+ else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_SEG_HDR_UDP);
+ else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_SEG_HDR_TCP);
+ else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_SEG_HDR_UDP);
+ else
+ ret = -EINVAL;
+ if (ret)
+ goto err_exit;
+
+ /* add filter for outer headers */
+ ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
+ if (ret)
+ /* could not write filter, free memory */
+ goto err_exit;
+
+ /* make tunneled filter HW entries if possible */
+ memcpy(&tun_seg[1], seg, sizeof(*seg));
+ ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
+ if (ret)
+ /* could not write tunnel filter, but outer header filter
+ * exists
+ */
+ devm_kfree(dev, tun_seg);
+
+ set_bit(flow, hw->fdir_perfect_fltr);
+ return ret;
+err_exit:
+ devm_kfree(dev, tun_seg);
+ devm_kfree(dev, seg);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_set_fdir_ip4_seg
+ * @seg: flow segment for programming
+ * @tcp_ip4_spec: mask data from ethtool
+ * @l4_proto: Layer 4 protocol to program
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the mask data into the flow segment to be used to program HW
+ * table based on provided L4 protocol for IPv4
+ */
+static int
+ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_tcpip4_spec *tcp_ip4_spec,
+ enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
+{
+ enum ice_flow_field src_port, dst_port;
+
+ /* make sure we don't have any empty rule */
+ if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
+ !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
+ return -EINVAL;
+
+ /* filtering on TOS not supported */
+ if (tcp_ip4_spec->tos)
+ return -EOPNOTSUPP;
+
+ if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
+ src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
+ src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
+ src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
+
+ /* IP source address */
+ if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!tcp_ip4_spec->ip4src)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* IP destination address */
+ if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!tcp_ip4_spec->ip4dst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 source port */
+ if (tcp_ip4_spec->psrc == htons(0xFFFF))
+ ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip4_spec->psrc)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 destination port */
+ if (tcp_ip4_spec->pdst == htons(0xFFFF))
+ ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip4_spec->pdst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_set_fdir_ip4_usr_seg
+ * @seg: flow segment for programming
+ * @usr_ip4_spec: ethtool userdef packet offset
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the offset data into the flow segment to be used to program HW
+ * table for IPv4
+ */
+static int
+ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_usrip4_spec *usr_ip4_spec,
+ bool *perfect_fltr)
+{
+ /* first 4 bytes of Layer 4 header */
+ if (usr_ip4_spec->l4_4_bytes)
+ return -EINVAL;
+ if (usr_ip4_spec->tos)
+ return -EINVAL;
+ if (usr_ip4_spec->ip_ver)
+ return -EINVAL;
+ /* Filtering on Layer 4 protocol not supported */
+ if (usr_ip4_spec->proto)
+ return -EOPNOTSUPP;
+ /* empty rules are not valid */
+ if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
+ return -EINVAL;
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
+
+ /* IP source address */
+ if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!usr_ip4_spec->ip4src)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* IP destination address */
+ if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!usr_ip4_spec->ip4dst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_set_fdir_ip6_seg
+ * @seg: flow segment for programming
+ * @tcp_ip6_spec: mask data from ethtool
+ * @l4_proto: Layer 4 protocol to program
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the mask data into the flow segment to be used to program HW
+ * table based on provided L4 protocol for IPv6
+ */
+static int
+ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_tcpip6_spec *tcp_ip6_spec,
+ enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
+{
+ enum ice_flow_field src_port, dst_port;
+
+ /* make sure we don't have any empty rule */
+ if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)) &&
+ !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)) &&
+ !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
+ return -EINVAL;
+
+ /* filtering on TC not supported */
+ if (tcp_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
+ src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
+ src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
+ src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
+ } else {
+ return -EINVAL;
+ }
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
+
+ if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 source port */
+ if (tcp_ip6_spec->psrc == htons(0xFFFF))
+ ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip6_spec->psrc)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 destination port */
+ if (tcp_ip6_spec->pdst == htons(0xFFFF))
+ ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip6_spec->pdst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_set_fdir_ip6_usr_seg
+ * @seg: flow segment for programming
+ * @usr_ip6_spec: ethtool userdef packet offset
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the offset data into the flow segment to be used to program HW
+ * table for IPv6
+ */
+static int
+ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_usrip6_spec *usr_ip6_spec,
+ bool *perfect_fltr)
+{
+ /* filtering on Layer 4 bytes not supported */
+ if (usr_ip6_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+ /* filtering on TC not supported */
+ if (usr_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+ /* filtering on Layer 4 protocol not supported */
+ if (usr_ip6_spec->l4_proto)
+ return -EOPNOTSUPP;
+ /* empty rules are not valid */
+ if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)) &&
+ !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ return -EINVAL;
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
+
+ if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
+ * @pf: PF structure
+ * @fsp: pointer to ethtool Rx flow specification
+ * @user: user defined data from flow specification
+ *
+ * Returns 0 on success.
+ */
+static int
+ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
+ struct ice_rx_flow_userdef *user)
+{
+ struct ice_flow_seg_info *seg, *tun_seg;
+ struct device *dev = ice_pf_to_dev(pf);
+ enum ice_fltr_ptype fltr_idx;
+ struct ice_hw *hw = &pf->hw;
+ bool perfect_filter;
+ int ret;
+
+ seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return -ENOMEM;
+
+ tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ GFP_KERNEL);
+ if (!tun_seg) {
+ devm_kfree(dev, seg);
+ return -ENOMEM;
+ }
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
+ ICE_FLOW_SEG_HDR_TCP,
+ &perfect_filter);
+ break;
+ case UDP_V4_FLOW:
+ ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
+ ICE_FLOW_SEG_HDR_UDP,
+ &perfect_filter);
+ break;
+ case SCTP_V4_FLOW:
+ ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
+ ICE_FLOW_SEG_HDR_SCTP,
+ &perfect_filter);
+ break;
+ case IPV4_USER_FLOW:
+ ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
+ &perfect_filter);
+ break;
+ case TCP_V6_FLOW:
+ ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
+ ICE_FLOW_SEG_HDR_TCP,
+ &perfect_filter);
+ break;
+ case UDP_V6_FLOW:
+ ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
+ ICE_FLOW_SEG_HDR_UDP,
+ &perfect_filter);
+ break;
+ case SCTP_V6_FLOW:
+ ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
+ ICE_FLOW_SEG_HDR_SCTP,
+ &perfect_filter);
+ break;
+ case IPV6_USER_FLOW:
+ ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
+ &perfect_filter);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto err_exit;
+
+ /* tunnel segments are shifted up one. */
+ memcpy(&tun_seg[1], seg, sizeof(*seg));
+
+ if (user && user->flex_fltr) {
+ perfect_filter = false;
+ ice_flow_add_fld_raw(seg, user->flex_offset,
+ ICE_FLTR_PRGM_FLEX_WORD_SIZE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL);
+ ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
+ ICE_FLTR_PRGM_FLEX_WORD_SIZE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL);
+ }
+
+ /* add filter for outer headers */
+ fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
+ ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
+ ICE_FD_HW_SEG_NON_TUN);
+ if (ret == -EEXIST)
+ /* Rule already exists, free memory and continue */
+ devm_kfree(dev, seg);
+ else if (ret)
+ /* could not write filter, free memory */
+ goto err_exit;
+
+ /* make tunneled filter HW entries if possible */
+ memcpy(&tun_seg[1], seg, sizeof(*seg));
+ ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
+ ICE_FD_HW_SEG_TUN);
+ if (ret == -EEXIST) {
+ /* Rule already exists, free memory and count as success */
+ devm_kfree(dev, tun_seg);
+ ret = 0;
+ } else if (ret) {
+ /* could not write tunnel filter, but outer filter exists */
+ devm_kfree(dev, tun_seg);
+ }
+
+ if (perfect_filter)
+ set_bit(fltr_idx, hw->fdir_perfect_fltr);
+ else
+ clear_bit(fltr_idx, hw->fdir_perfect_fltr);
+
+ return ret;
+
+err_exit:
+ devm_kfree(dev, tun_seg);
+ devm_kfree(dev, seg);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_fdir_write_fltr - send a flow director filter to the hardware
+ * @pf: PF data structure
+ * @input: filter structure
+ * @add: true adds filter and false removed filter
+ * @is_tun: true adds inner filter on tunnel and false outer headers
+ *
+ * returns 0 on success and negative value on error
+ */
+int
+ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
+ bool is_tun)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_fltr_desc desc;
+ struct ice_vsi *ctrl_vsi;
+ enum ice_status status;
+ u8 *pkt, *frag_pkt;
+ bool has_frag;
+ int err;
+
+ ctrl_vsi = ice_get_ctrl_vsi(pf);
+ if (!ctrl_vsi)
+ return -EINVAL;
+
+ pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+ frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
+ if (!frag_pkt) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ ice_fdir_get_prgm_desc(hw, input, &desc, add);
+ status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_free_all;
+ }
+ err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
+ if (err)
+ goto err_free_all;
+
+ /* repeat for fragment packet */
+ has_frag = ice_fdir_has_frag(input->flow_type);
+ if (has_frag) {
+ /* does not return error */
+ ice_fdir_get_prgm_desc(hw, input, &desc, add);
+ status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
+ is_tun);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_frag;
+ }
+ err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
+ if (err)
+ goto err_frag;
+ } else {
+ devm_kfree(dev, frag_pkt);
+ }
+
+ return 0;
+
+err_free_all:
+ devm_kfree(dev, frag_pkt);
+err_free:
+ devm_kfree(dev, pkt);
+ return err;
+
+err_frag:
+ devm_kfree(dev, frag_pkt);
+ return err;
+}
+
+/**
+ * ice_fdir_write_all_fltr - send a flow director filter to the hardware
+ * @pf: PF data structure
+ * @input: filter structure
+ * @add: true adds filter and false removed filter
+ *
+ * returns 0 on success and negative value on error
+ */
+static int
+ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ bool add)
+{
+ u16 port_num;
+ int tun;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ bool is_tun = tun == ICE_FD_HW_SEG_TUN;
+ int err;
+
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL,
+ &port_num))
+ continue;
+ err = ice_fdir_write_fltr(pf, input, add, is_tun);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/**
+ * ice_fdir_replay_fltrs - replay filters from the HW filter list
+ * @pf: board private structure
+ */
+void ice_fdir_replay_fltrs(struct ice_pf *pf)
+{
+ struct ice_fdir_fltr *f_rule;
+ struct ice_hw *hw = &pf->hw;
+
+ list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
+ int err = ice_fdir_write_all_fltr(pf, f_rule, true);
+
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
+ err, f_rule->fltr_id);
+ }
+}
+
+/**
+ * ice_fdir_create_dflt_rules - create default perfect filters
+ * @pf: PF data structure
+ *
+ * Returns 0 for success or error.
+ */
+int ice_fdir_create_dflt_rules(struct ice_pf *pf)
+{
+ int err;
+
+ /* Create perfect TCP and UDP rules in hardware. */
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
+ if (err)
+ return err;
+
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
+ if (err)
+ return err;
+
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
+ if (err)
+ return err;
+
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+
+ return err;
+}
+
+/**
+ * ice_vsi_manage_fdir - turn on/off flow director
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is an enable or disable request
+ */
+void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_fdir_fltr *f_rule, *tmp;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_fltr_ptype flow;
+
+ if (ena) {
+ set_bit(ICE_FLAG_FD_ENA, pf->flags);
+ ice_fdir_create_dflt_rules(pf);
+ return;
+ }
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
+ goto release_lock;
+ list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
+ /* ignore return value */
+ ice_fdir_write_all_fltr(pf, f_rule, false);
+ ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
+ list_del(&f_rule->fltr_node);
+ devm_kfree(ice_hw_to_dev(hw), f_rule);
+ }
+
+ if (hw->fdir_prof)
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
+ flow++)
+ if (hw->fdir_prof[flow])
+ ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
+
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+}
+
+/**
+ * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
+ * @pf: PF structure
+ * @flow_type: FDir flow type to release
+ */
+static void
+ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
+{
+ struct ice_hw *hw = &pf->hw;
+ bool need_perfect = false;
+
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ need_perfect = true;
+
+ if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
+ return;
+
+ ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
+ if (need_perfect)
+ ice_create_init_fdir_rule(pf, flow_type);
+}
+
+/**
+ * ice_fdir_update_list_entry - add or delete a filter from the filter list
+ * @pf: PF structure
+ * @input: filter structure
+ * @fltr_idx: ethtool index of filter to modify
+ *
+ * returns 0 on success and negative on errors
+ */
+static int
+ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ int fltr_idx)
+{
+ struct ice_fdir_fltr *old_fltr;
+ struct ice_hw *hw = &pf->hw;
+ int err = -ENOENT;
+
+ /* Do not update filters during reset */
+ if (ice_is_reset_in_progress(pf->state))
+ return -EBUSY;
+
+ old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
+ if (old_fltr) {
+ err = ice_fdir_write_all_fltr(pf, old_fltr, false);
+ if (err)
+ return err;
+ ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
+ if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
+ /* we just deleted the last filter of flow_type so we
+ * should also delete the HW filter info.
+ */
+ ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
+ list_del(&old_fltr->fltr_node);
+ devm_kfree(ice_hw_to_dev(hw), old_fltr);
+ }
+ if (!input)
+ return err;
+ ice_fdir_list_add_fltr(hw, input);
+ ice_fdir_update_cntrs(hw, input->flow_type, true);
+ return 0;
+}
+
+/**
+ * ice_del_fdir_ethtool - delete Flow Director filter
+ * @vsi: pointer to target VSI
+ * @cmd: command to add or delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int val;
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ /* Do not delete filters during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(__ICE_FD_FLUSH_REQ, pf->state))
+ return -EBUSY;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
+ mutex_unlock(&hw->fdir_fltr_lock);
+
+ return val;
+}
+
+/**
+ * ice_set_fdir_input_set - Set the input set for Flow Director
+ * @vsi: pointer to target VSI
+ * @fsp: pointer to ethtool Rx flow specification
+ * @input: filter structure
+ */
+static int
+ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
+ struct ice_fdir_fltr *input)
+{
+ u16 dest_vsi, q_index = 0;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int flow_type;
+ u8 dest_ctl;
+
+ if (!vsi || !fsp || !input)
+ return -EINVAL;
+
+ pf = vsi->back;
+ hw = &pf->hw;
+
+ dest_vsi = vsi->idx;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (vf) {
+ dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
+ return -EINVAL;
+ }
+
+ if (ring >= vsi->num_rxq)
+ return -EINVAL;
+
+ dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ q_index = ring;
+ }
+
+ input->fltr_id = fsp->location;
+ input->q_index = q_index;
+ flow_type = fsp->flow_type & ~FLOW_EXT;
+
+ input->dest_vsi = dest_vsi;
+ input->dest_ctl = dest_ctl;
+ input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
+ input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
+ input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
+
+ if (fsp->flow_type & FLOW_EXT) {
+ memcpy(input->ext_data.usr_def, fsp->h_ext.data,
+ sizeof(input->ext_data.usr_def));
+ input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
+ input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
+ memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
+ sizeof(input->ext_mask.usr_def));
+ input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
+ input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
+ }
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
+ input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
+ break;
+ case IPV4_USER_FLOW:
+ input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
+ input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+ input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
+ input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
+ input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
+ input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
+ input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
+ input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
+ input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
+ input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
+ input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
+ memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
+ input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
+ input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
+ input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_fdir_ethtool - Add/Remove Flow Director filter
+ * @vsi: pointer to target VSI
+ * @cmd: command to add or delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
+{
+ struct ice_rx_flow_userdef userdata;
+ struct ethtool_rx_flow_spec *fsp;
+ struct ice_fdir_fltr *input;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int fltrs_needed;
+ u16 tunnel_port;
+ int ret;
+
+ if (!vsi)
+ return -EINVAL;
+
+ pf = vsi->back;
+ hw = &pf->hw;
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ /* Do not program filters during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
+ return -EBUSY;
+ }
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (ice_parse_rx_flow_user_data(fsp, &userdata))
+ return -EINVAL;
+
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
+ if (ret)
+ return ret;
+
+ if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ return -ENOSPC;
+ }
+
+ /* return error if not an update and no available filters */
+ fltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ?
+ 2 : 1;
+ if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
+ ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ return -ENOSPC;
+ }
+
+ input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ ret = ice_set_fdir_input_set(vsi, fsp, input);
+ if (ret)
+ goto free_input;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ if (ice_fdir_is_dup_fltr(hw, input)) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ if (userdata.flex_fltr) {
+ input->flex_fltr = true;
+ input->flex_word = cpu_to_be16(userdata.flex_word);
+ input->flex_offset = userdata.flex_offset;
+ }
+
+ /* input struct is added to the HW filter list */
+ ice_fdir_update_list_entry(pf, input, fsp->location);
+
+ ret = ice_fdir_write_all_fltr(pf, input, true);
+ if (ret)
+ goto remove_sw_rule;
+
+ goto release_lock;
+
+remove_sw_rule:
+ ice_fdir_update_cntrs(hw, input->flow_type, false);
+ list_del(&input->fltr_node);
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+free_input:
+ if (ret)
+ devm_kfree(dev, input);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
new file mode 100644
index 000000000000..6834df14332f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#include "ice_common.h"
+
+/* These are training packet headers used to program flow director filters. */
+static const u8 ice_fdir_tcpv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 ice_fdir_udpv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctpv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x20, 0x00, 0x00, 0x40, 0x00, 0x40, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00
+};
+
+static const u8 ice_fdir_tcpv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_udpv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctpv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x0C, 0x84, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3B, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_tcp4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_udp4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x4e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctp4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x52, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x20, 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ip4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x46, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_tcp6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x6e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_udp6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x62, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctp6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x66, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x84, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ip6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* Flow Director no-op training packet table */
+static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt,
+ sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ sizeof(ice_fdir_udpv4_pkt), ice_fdir_udpv4_pkt,
+ sizeof(ice_fdir_udp4_tun_pkt), ice_fdir_udp4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ sizeof(ice_fdir_sctpv4_pkt), ice_fdir_sctpv4_pkt,
+ sizeof(ice_fdir_sctp4_tun_pkt), ice_fdir_sctp4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ sizeof(ice_fdir_ipv4_pkt), ice_fdir_ipv4_pkt,
+ sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP,
+ sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt,
+ sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ sizeof(ice_fdir_udpv6_pkt), ice_fdir_udpv6_pkt,
+ sizeof(ice_fdir_udp6_tun_pkt), ice_fdir_udp6_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_SCTP,
+ sizeof(ice_fdir_sctpv6_pkt), ice_fdir_sctpv6_pkt,
+ sizeof(ice_fdir_sctp6_tun_pkt), ice_fdir_sctp6_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_OTHER,
+ sizeof(ice_fdir_ipv6_pkt), ice_fdir_ipv6_pkt,
+ sizeof(ice_fdir_ip6_tun_pkt), ice_fdir_ip6_tun_pkt,
+ },
+};
+
+#define ICE_FDIR_NUM_PKT ARRAY_SIZE(ice_fdir_pkt)
+
+/**
+ * ice_set_dflt_val_fd_desc
+ * @fd_fltr_ctx: pointer to fd filter descriptor
+ */
+static void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx)
+{
+ fd_fltr_ctx->comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;
+ fd_fltr_ctx->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
+ fd_fltr_ctx->fd_space = ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST;
+ fd_fltr_ctx->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ fd_fltr_ctx->evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE;
+ fd_fltr_ctx->toq = ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX;
+ fd_fltr_ctx->toq_prio = ICE_FXD_FLTR_QW0_TO_Q_PRIO1;
+ fd_fltr_ctx->dpu_recipe = ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT;
+ fd_fltr_ctx->drop = ICE_FXD_FLTR_QW0_DROP_NO;
+ fd_fltr_ctx->flex_prio = ICE_FXD_FLTR_QW0_FLEX_PRI_NONE;
+ fd_fltr_ctx->flex_mdid = ICE_FXD_FLTR_QW0_FLEX_MDID0;
+ fd_fltr_ctx->flex_val = ICE_FXD_FLTR_QW0_FLEX_VAL0;
+ fd_fltr_ctx->dtype = ICE_TX_DESC_DTYPE_FLTR_PROG;
+ fd_fltr_ctx->desc_prof_prio = ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO;
+ fd_fltr_ctx->desc_prof = ICE_FXD_FLTR_QW1_PROF_ZERO;
+ fd_fltr_ctx->swap = ICE_FXD_FLTR_QW1_SWAP_SET;
+ fd_fltr_ctx->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
+ fd_fltr_ctx->fdid_mdid = ICE_FXD_FLTR_QW1_FDID_MDID_FD;
+ fd_fltr_ctx->fdid = ICE_FXD_FLTR_QW1_FDID_ZERO;
+}
+
+/**
+ * ice_set_fd_desc_val
+ * @ctx: pointer to fd filter descriptor context
+ * @fdir_desc: populated with fd filter descriptor values
+ */
+static void
+ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx,
+ struct ice_fltr_desc *fdir_desc)
+{
+ u64 qword;
+
+ /* prep QW0 of FD filter programming desc */
+ qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) &
+ ICE_FXD_FLTR_QW0_QINDEX_M;
+ qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) &
+ ICE_FXD_FLTR_QW0_COMP_Q_M;
+ qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) &
+ ICE_FXD_FLTR_QW0_COMP_REPORT_M;
+ qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) &
+ ICE_FXD_FLTR_QW0_FD_SPACE_M;
+ qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) &
+ ICE_FXD_FLTR_QW0_STAT_CNT_M;
+ qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) &
+ ICE_FXD_FLTR_QW0_STAT_ENA_M;
+ qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) &
+ ICE_FXD_FLTR_QW0_EVICT_ENA_M;
+ qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) &
+ ICE_FXD_FLTR_QW0_TO_Q_M;
+ qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) &
+ ICE_FXD_FLTR_QW0_TO_Q_PRI_M;
+ qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) &
+ ICE_FXD_FLTR_QW0_DPU_RECIPE_M;
+ qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) &
+ ICE_FXD_FLTR_QW0_DROP_M;
+ qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) &
+ ICE_FXD_FLTR_QW0_FLEX_PRI_M;
+ qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) &
+ ICE_FXD_FLTR_QW0_FLEX_MDID_M;
+ qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) &
+ ICE_FXD_FLTR_QW0_FLEX_VAL_M;
+ fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword);
+
+ /* prep QW1 of FD filter programming desc */
+ qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) &
+ ICE_FXD_FLTR_QW1_DTYPE_M;
+ qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) &
+ ICE_FXD_FLTR_QW1_PCMD_M;
+ qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) &
+ ICE_FXD_FLTR_QW1_PROF_PRI_M;
+ qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) &
+ ICE_FXD_FLTR_QW1_PROF_M;
+ qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) &
+ ICE_FXD_FLTR_QW1_FD_VSI_M;
+ qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) &
+ ICE_FXD_FLTR_QW1_SWAP_M;
+ qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) &
+ ICE_FXD_FLTR_QW1_FDID_PRI_M;
+ qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) &
+ ICE_FXD_FLTR_QW1_FDID_MDID_M;
+ qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) &
+ ICE_FXD_FLTR_QW1_FDID_M;
+ fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword);
+}
+
+/**
+ * ice_fdir_get_prgm_desc - set a fdir descriptor from a fdir filter struct
+ * @hw: pointer to the hardware structure
+ * @input: filter
+ * @fdesc: filter descriptor
+ * @add: if add is true, this is an add operation, false implies delete
+ */
+void
+ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ struct ice_fltr_desc *fdesc, bool add)
+{
+ struct ice_fd_fltr_desc_ctx fdir_fltr_ctx = { 0 };
+
+ /* set default context info */
+ ice_set_dflt_val_fd_desc(&fdir_fltr_ctx);
+
+ /* change sideband filtering values */
+ fdir_fltr_ctx.fdid = input->fltr_id;
+ if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
+ fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES;
+ fdir_fltr_ctx.qindex = 0;
+ } else {
+ fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;
+ fdir_fltr_ctx.qindex = input->q_index;
+ }
+ fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ fdir_fltr_ctx.cnt_index = input->cnt_index;
+ fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi);
+ fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE;
+ fdir_fltr_ctx.toq_prio = 3;
+ fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD :
+ ICE_FXD_FLTR_QW1_PCMD_REMOVE;
+ fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET;
+ fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;
+ fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
+ fdir_fltr_ctx.fdid_prio = 3;
+ fdir_fltr_ctx.desc_prof = 1;
+ fdir_fltr_ctx.desc_prof_prio = 3;
+ ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc);
+}
+
+/**
+ * ice_alloc_fd_res_cntr - obtain counter resource for FD type
+ * @hw: pointer to the hardware structure
+ * @cntr_id: returns counter index
+ */
+enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
+}
+
+/**
+ * ice_free_fd_res_cntr - Free counter resource for FD type
+ * @hw: pointer to the hardware structure
+ * @cntr_id: counter index to be freed
+ */
+enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
+{
+ return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
+}
+
+/**
+ * ice_alloc_fd_guar_item - allocate resource for FD guaranteed entries
+ * @hw: pointer to the hardware structure
+ * @cntr_id: returns counter index
+ * @num_fltr: number of filter entries to be allocated
+ */
+enum ice_status
+ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
+ cntr_id);
+}
+
+/**
+ * ice_alloc_fd_shrd_item - allocate resource for flow director shared entries
+ * @hw: pointer to the hardware structure
+ * @cntr_id: returns counter index
+ * @num_fltr: number of filter entries to be allocated
+ */
+enum ice_status
+ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
+ cntr_id);
+}
+
+/**
+ * ice_get_fdir_cnt_all - get the number of Flow Director filters
+ * @hw: hardware data structure
+ *
+ * Returns the number of filters available on device
+ */
+int ice_get_fdir_cnt_all(struct ice_hw *hw)
+{
+ return hw->func_caps.fd_fltr_guar + hw->func_caps.fd_fltr_best_effort;
+}
+
+/**
+ * ice_pkt_insert_ipv6_addr - insert a be32 IPv6 address into a memory buffer
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @addr: IPv6 address to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr)
+{
+ int idx;
+
+ for (idx = 0; idx < ICE_IPV6_ADDR_LEN_AS_U32; idx++)
+ memcpy(pkt + offset + idx * sizeof(*addr), &addr[idx],
+ sizeof(*addr));
+}
+
+/**
+ * ice_pkt_insert_u16 - insert a be16 value into a memory buffer
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 16 bit value to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_u16(u8 *pkt, int offset, __be16 data)
+{
+ memcpy(pkt + offset, &data, sizeof(data));
+}
+
+/**
+ * ice_pkt_insert_u32 - insert a be32 value into a memory buffer
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 32 bit value to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data)
+{
+ memcpy(pkt + offset, &data, sizeof(data));
+}
+
+/**
+ * ice_fdir_get_gen_prgm_pkt - generate a training packet
+ * @hw: pointer to the hardware structure
+ * @input: flow director filter data structure
+ * @pkt: pointer to return filter packet
+ * @frag: generate a fragment packet
+ * @tun: true implies generate a tunnel packet
+ */
+enum ice_status
+ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ u8 *pkt, bool frag, bool tun)
+{
+ enum ice_fltr_ptype flow;
+ u16 tnl_port;
+ u8 *loc;
+ u16 idx;
+
+ if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ switch (input->ip.v4.proto) {
+ case IPPROTO_TCP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ break;
+ case IPPROTO_UDP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ break;
+ case IPPROTO_SCTP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ break;
+ case IPPROTO_IP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ } else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
+ switch (input->ip.v6.proto) {
+ case IPPROTO_TCP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+ break;
+ case IPPROTO_UDP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ break;
+ case IPPROTO_SCTP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+ break;
+ case IPPROTO_IP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ } else {
+ flow = input->flow_type;
+ }
+
+ for (idx = 0; idx < ICE_FDIR_NUM_PKT; idx++)
+ if (ice_fdir_pkt[idx].flow == flow)
+ break;
+ if (idx == ICE_FDIR_NUM_PKT)
+ return ICE_ERR_PARAM;
+ if (!tun) {
+ memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
+ loc = pkt;
+ } else {
+ if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port))
+ return ICE_ERR_DOES_NOT_EXIST;
+ if (!ice_fdir_pkt[idx].tun_pkt)
+ return ICE_ERR_PARAM;
+ memcpy(pkt, ice_fdir_pkt[idx].tun_pkt,
+ ice_fdir_pkt[idx].tun_pkt_len);
+ ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,
+ htons(tnl_port));
+ loc = &pkt[ICE_FDIR_TUN_PKT_OFF];
+ }
+
+ /* Reverse the src and dst, since the HW expects them to be from Tx
+ * perspective. The input from user is from Rx filter perspective.
+ */
+ switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_TCP_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ if (frag)
+ loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_UDP_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV6_TCP_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV6_UDP_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ if (input->flex_fltr)
+ ice_pkt_insert_u16(loc, input->flex_offset, input->flex_word);
+
+ return 0;
+}
+
+/**
+ * ice_fdir_has_frag - does flow type have 2 ptypes
+ * @flow: flow ptype
+ *
+ * returns true is there is a fragment packet for this ptype
+ */
+bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
+{
+ if (flow == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ice_fdir_find_by_idx - find filter with idx
+ * @hw: pointer to hardware structure
+ * @fltr_idx: index to find.
+ *
+ * Returns pointer to filter if found or null
+ */
+struct ice_fdir_fltr *
+ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx)
+{
+ struct ice_fdir_fltr *rule;
+
+ list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
+ /* rule ID found in the list */
+ if (fltr_idx == rule->fltr_id)
+ return rule;
+ if (fltr_idx < rule->fltr_id)
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * ice_fdir_list_add_fltr - add a new node to the flow director filter list
+ * @hw: hardware structure
+ * @fltr: filter node to add to structure
+ */
+void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *fltr)
+{
+ struct ice_fdir_fltr *rule, *parent = NULL;
+
+ list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
+ /* rule ID found or pass its spot in the list */
+ if (rule->fltr_id >= fltr->fltr_id)
+ break;
+ parent = rule;
+ }
+
+ if (parent)
+ list_add(&fltr->fltr_node, &parent->fltr_node);
+ else
+ list_add(&fltr->fltr_node, &hw->fdir_list_head);
+}
+
+/**
+ * ice_fdir_update_cntrs - increment / decrement filter counter
+ * @hw: pointer to hardware structure
+ * @flow: filter flow type
+ * @add: true implies filters added
+ */
+void
+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add)
+{
+ int incr;
+
+ incr = add ? 1 : -1;
+ hw->fdir_active_fltr += incr;
+
+ if (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX)
+ ice_debug(hw, ICE_DBG_SW, "Unknown filter type %d\n", flow);
+ else
+ hw->fdir_fltr_cnt[flow] += incr;
+}
+
+/**
+ * ice_cmp_ipv6_addr - compare 2 IP v6 addresses
+ * @a: IP v6 address
+ * @b: IP v6 address
+ *
+ * Returns 0 on equal, returns non-0 if different
+ */
+static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
+{
+ return memcmp(a, b, 4 * sizeof(__be32));
+}
+
+/**
+ * ice_fdir_comp_rules - compare 2 filters
+ * @a: a Flow Director filter data structure
+ * @b: a Flow Director filter data structure
+ * @v6: bool true if v6 filter
+ *
+ * Returns true if the filters match
+ */
+static bool
+ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6)
+{
+ enum ice_fltr_ptype flow_type = a->flow_type;
+
+ /* The calling function already checks that the two filters have the
+ * same flow_type.
+ */
+ if (!v6) {
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.dst_port == b->ip.v4.dst_port &&
+ a->ip.v4.src_port == b->ip.v4.src_port)
+ return true;
+ } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.l4_header == b->ip.v4.l4_header &&
+ a->ip.v4.proto == b->ip.v4.proto &&
+ a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
+ a->ip.v4.tos == b->ip.v4.tos)
+ return true;
+ }
+ } else {
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port &&
+ !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
+ b->ip.v6.dst_ip) &&
+ !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
+ b->ip.v6.src_ip))
+ return true;
+ } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * ice_fdir_is_dup_fltr - test if filter is already in list for PF
+ * @hw: hardware data structure
+ * @input: Flow Director filter data structure
+ *
+ * Returns true if the filter is found in the list
+ */
+bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
+{
+ struct ice_fdir_fltr *rule;
+ bool ret = false;
+
+ list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
+ enum ice_fltr_ptype flow_type;
+
+ if (rule->flow_type != input->flow_type)
+ continue;
+
+ flow_type = input->flow_type;
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ ret = ice_fdir_comp_rules(rule, input, false);
+ else
+ ret = ice_fdir_comp_rules(rule, input, true);
+ if (ret) {
+ if (rule->fltr_id == input->fltr_id &&
+ rule->q_index != input->q_index)
+ ret = false;
+ else
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
new file mode 100644
index 000000000000..1c587766daab
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#ifndef _ICE_FDIR_H_
+#define _ICE_FDIR_H_
+
+#define ICE_FDIR_TUN_PKT_OFF 50
+#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
+
+/* macros for offsets into packets for flow director programming */
+#define ICE_IPV4_SRC_ADDR_OFFSET 26
+#define ICE_IPV4_DST_ADDR_OFFSET 30
+#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34
+#define ICE_IPV4_TCP_DST_PORT_OFFSET 36
+#define ICE_IPV4_UDP_SRC_PORT_OFFSET 34
+#define ICE_IPV4_UDP_DST_PORT_OFFSET 36
+#define ICE_IPV4_SCTP_SRC_PORT_OFFSET 34
+#define ICE_IPV4_SCTP_DST_PORT_OFFSET 36
+#define ICE_IPV4_PROTO_OFFSET 23
+#define ICE_IPV6_SRC_ADDR_OFFSET 22
+#define ICE_IPV6_DST_ADDR_OFFSET 38
+#define ICE_IPV6_TCP_SRC_PORT_OFFSET 54
+#define ICE_IPV6_TCP_DST_PORT_OFFSET 56
+#define ICE_IPV6_UDP_SRC_PORT_OFFSET 54
+#define ICE_IPV6_UDP_DST_PORT_OFFSET 56
+#define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54
+#define ICE_IPV6_SCTP_DST_PORT_OFFSET 56
+/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF
+ * requests that the packet not be fragmented. MF indicates that a packet has
+ * been fragmented.
+ */
+#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20
+
+enum ice_fltr_prgm_desc_dest {
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
+};
+
+enum ice_fltr_prgm_desc_fd_status {
+ ICE_FLTR_PRGM_DESC_FD_STATUS_NONE,
+ ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID,
+};
+
+/* Flow Director (FD) Filter Programming descriptor */
+struct ice_fd_fltr_desc_ctx {
+ u32 fdid;
+ u16 qindex;
+ u16 cnt_index;
+ u16 fd_vsi;
+ u16 flex_val;
+ u8 comp_q;
+ u8 comp_report;
+ u8 fd_space;
+ u8 cnt_ena;
+ u8 evict_ena;
+ u8 toq;
+ u8 toq_prio;
+ u8 dpu_recipe;
+ u8 drop;
+ u8 flex_prio;
+ u8 flex_mdid;
+ u8 dtype;
+ u8 pcmd;
+ u8 desc_prof_prio;
+ u8 desc_prof;
+ u8 swap;
+ u8 fdid_prio;
+ u8 fdid_mdid;
+};
+
+#define ICE_FLTR_PRGM_FLEX_WORD_SIZE sizeof(__be16)
+
+struct ice_rx_flow_userdef {
+ u16 flex_word;
+ u16 flex_offset;
+ u16 flex_fltr;
+};
+
+struct ice_fdir_v4 {
+ __be32 dst_ip;
+ __be32 src_ip;
+ __be16 dst_port;
+ __be16 src_port;
+ __be32 l4_header;
+ __be32 sec_parm_idx; /* security parameter index */
+ u8 tos;
+ u8 ip_ver;
+ u8 proto;
+};
+
+#define ICE_IPV6_ADDR_LEN_AS_U32 4
+
+struct ice_fdir_v6 {
+ __be32 dst_ip[ICE_IPV6_ADDR_LEN_AS_U32];
+ __be32 src_ip[ICE_IPV6_ADDR_LEN_AS_U32];
+ __be16 dst_port;
+ __be16 src_port;
+ __be32 l4_header; /* next header */
+ __be32 sec_parm_idx; /* security parameter index */
+ u8 tc;
+ u8 proto;
+};
+
+struct ice_fdir_extra {
+ u8 dst_mac[ETH_ALEN]; /* dest MAC address */
+ u32 usr_def[2]; /* user data */
+ __be16 vlan_type; /* VLAN ethertype */
+ __be16 vlan_tag; /* VLAN tag info */
+};
+
+struct ice_fdir_fltr {
+ struct list_head fltr_node;
+ enum ice_fltr_ptype flow_type;
+
+ union {
+ struct ice_fdir_v4 v4;
+ struct ice_fdir_v6 v6;
+ } ip, mask;
+
+ struct ice_fdir_extra ext_data;
+ struct ice_fdir_extra ext_mask;
+
+ /* flex byte filter data */
+ __be16 flex_word;
+ u16 flex_offset;
+ u16 flex_fltr;
+
+ /* filter control */
+ u16 q_index;
+ u16 dest_vsi;
+ u8 dest_ctl;
+ u8 fltr_status;
+ u16 cnt_index;
+ u32 fltr_id;
+};
+
+/* Dummy packet filter definition structure */
+struct ice_fdir_base_pkt {
+ enum ice_fltr_ptype flow;
+ u16 pkt_len;
+ const u8 *pkt;
+ u16 tun_pkt_len;
+ const u8 *tun_pkt;
+};
+
+enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+enum ice_status
+ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+enum ice_status
+ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+void
+ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ struct ice_fltr_desc *fdesc, bool add);
+enum ice_status
+ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ u8 *pkt, bool frag, bool tun);
+int ice_get_fdir_cnt_all(struct ice_hw *hw);
+bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
+struct ice_fdir_fltr *
+ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx);
+void
+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add);
+void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+#endif /* _ICE_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 42bac3ec5526..4420fc02f7e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -5,6 +5,15 @@
#include "ice_flex_pipe.h"
#include "ice_flow.h"
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+static const struct ice_tunnel_type_scan tnls[] = {
+ { TNL_VXLAN, "TNL_VXLAN_PF" },
+ { TNL_GENEVE, "TNL_GENEVE_PF" },
+ { TNL_LAST, "" }
+};
+
static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
/* SWITCH */
{
@@ -239,6 +248,268 @@ ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
return state->sect;
}
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *
+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = section;
+ if (index >= le16_to_cpu(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static enum ice_status
+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ tcam = ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && le16_to_cpu(tcam->addr) == addr) {
+ *entry = tcam;
+ return 0;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *
+ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = section;
+ if (index >= le16_to_cpu(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *
+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
+ u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = le16_to_cpu(label->value);
+ return label->name;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ memset(&hw->tnl, 0, sizeof(hw->tnl));
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].in_use = false;
+ hw->tnl.tbl[hw->tnl.count].marked = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry)
+ hw->tnl.tbl[i].valid = true;
+ }
+}
+
/* Key creation */
#define ICE_DC_KEY 0x1 /* don't care */
@@ -593,8 +864,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
u32 i;
ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
- pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
- pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
/* Search all package segments for the requested segment type */
for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
@@ -764,13 +1036,15 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
- ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
- ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
le32_to_cpu(ice_seg->hdr.seg_type),
- le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
+ le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
ice_buf_tbl = ice_find_buf_table(ice_seg);
@@ -815,14 +1089,16 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
if (seg_hdr) {
- hw->ice_pkg_ver = seg_hdr->seg_ver;
- memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
+ hw->ice_pkg_ver = seg_hdr->seg_format_ver;
+ memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
sizeof(hw->ice_pkg_name));
- ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
- seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
- seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
- seg_hdr->seg_name);
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft,
+ seg_hdr->seg_id);
} else {
ice_debug(hw, ICE_DBG_INIT,
"Did not find ice segment in driver package\n");
@@ -863,9 +1139,11 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
if (pkg_info->pkg_info[i].is_active) {
flags[place++] = 'A';
hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ le32_to_cpu(pkg_info->pkg_info[i].track_id);
memcpy(hw->active_pkg_name,
pkg_info->pkg_info[i].name,
- sizeof(hw->active_pkg_name));
+ sizeof(pkg_info->pkg_info[i].name));
hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
}
if (pkg_info->pkg_info[i].is_active_at_boot)
@@ -905,10 +1183,10 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
if (len < sizeof(*pkg))
return ICE_ERR_BUF_TOO_SHORT;
- if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
- pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
- pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
- pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
return ICE_ERR_CFG;
/* pkg must have at least one segment */
@@ -990,6 +1268,68 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
}
/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_status
+ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_status status;
+ u16 size;
+ u32 i;
+
+ /* Check package version compatibility */
+ status = ice_chk_pkg_version(&hw->pkg_ver);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return status;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+ pkg = kzalloc(size, GFP_KERNEL);
+ if (!pkg)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
+ if (status)
+ goto fw_ddp_compat_free_alloc;
+
+ for (i = 0; i < le32_to_cpu(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ status = ICE_ERR_FW_DDP_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT,
+ "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ kfree(pkg);
+ return status;
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1039,18 +1379,12 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
/* before downloading the package, check package version for
* compatibility with driver
*/
- status = ice_chk_pkg_version(&hw->pkg_ver);
+ status = ice_chk_pkg_compat(hw, pkg, &seg);
if (status)
return status;
- /* find segment in given package */
- seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
- if (!seg) {
- ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_ERR_CFG;
- }
-
- /* download package */
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
status = ice_download_pkg(hw, seg);
if (status == ICE_ERR_AQ_NO_WORK) {
ice_debug(hw, ICE_DBG_INIT,
@@ -1292,6 +1626,284 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
return &bld->buf;
}
+/**
+ * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_tunnel_port_in_use
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
+{
+ bool res;
+
+ mutex_lock(&hw->tnl_lock);
+ res = ice_tunnel_port_in_use_hlpr(hw, port, index);
+ mutex_unlock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_find_free_tunnel_entry
+ * @hw: pointer to the HW structure
+ * @type: tunnel type
+ * @index: optionally returns index
+ *
+ * Returns whether there is a free tunnel entry, and optionally its index
+ */
+static bool
+ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
+ hw->tnl.tbl[i].type == type) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
+ * @hw: pointer to the HW structure
+ * @type: tunnel type (TNL_ALL will return any open port)
+ * @port: returns open port
+ */
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port)
+{
+ bool res = false;
+ u16 i;
+
+ mutex_lock(&hw->tnl_lock);
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
+ break;
+ }
+
+ mutex_unlock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_create_tunnel
+ * @hw: pointer to the HW structure
+ * @type: type of tunnel
+ * @port: port of tunnel to create
+ *
+ * Create a tunnel by updating the parse graph in the parser. We do that by
+ * creating a package buffer with the tunnel info and issuing an update package
+ * command.
+ */
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 index;
+
+ mutex_lock(&hw->tnl_lock);
+
+ if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
+ hw->tnl.tbl[index].ref++;
+ status = 0;
+ goto ice_create_tunnel_end;
+ }
+
+ if (!ice_find_free_tunnel_entry(hw, type, &index)) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto ice_create_tunnel_end;
+ }
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_create_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_create_tunnel_err;
+
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(*sect_rx));
+ if (!sect_rx)
+ goto ice_create_tunnel_err;
+ sect_rx->count = cpu_to_le16(1);
+
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ sizeof(*sect_tx));
+ if (!sect_tx)
+ goto ice_create_tunnel_err;
+ sect_tx->count = cpu_to_le16(1);
+
+ /* copy original boost entry to update package buffer */
+ memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam));
+
+ /* over-write the never-match dest port key bits with the encoded port
+ * bits
+ */
+ ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ (u8 *)&port, NULL, NULL, NULL,
+ (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
+ sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
+
+ /* exact copy of entry to Tx section entry */
+ memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status) {
+ hw->tnl.tbl[index].port = port;
+ hw->tnl.tbl[index].in_use = true;
+ hw->tnl.tbl[index].ref = 1;
+ }
+
+ice_create_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_create_tunnel_end:
+ mutex_unlock(&hw->tnl_lock);
+
+ return status;
+}
+
+/**
+ * ice_destroy_tunnel
+ * @hw: pointer to the HW structure
+ * @port: port of tunnel to destroy (ignored if the all parameter is true)
+ * @all: flag that states to destroy all tunnels
+ *
+ * Destroys a tunnel or all tunnels by creating an update package buffer
+ * targeting the specific updates requested and then performing an update
+ * package.
+ */
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 count = 0;
+ u16 index;
+ u16 size;
+ u16 i;
+
+ mutex_lock(&hw->tnl_lock);
+
+ if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
+ if (hw->tnl.tbl[index].ref > 1) {
+ hw->tnl.tbl[index].ref--;
+ status = 0;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* determine count */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port))
+ count++;
+
+ if (!count) {
+ status = ICE_ERR_PARAM;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* size of section - there is at least one entry */
+ size = struct_size(sect_rx, tcam, count - 1);
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_destroy_tunnel_err;
+
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_rx)
+ goto ice_destroy_tunnel_err;
+ sect_rx->count = cpu_to_le16(1);
+
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_tx)
+ goto ice_destroy_tunnel_err;
+ sect_tx->count = cpu_to_le16(1);
+
+ /* copy original boost entry to update package buffer, one copy to Rx
+ * section, another copy to the Tx section
+ */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port)) {
+ memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_rx->tcam));
+ memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_tx->tcam));
+ hw->tnl.tbl[i].marked = true;
+ }
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status)
+ for (i = 0; i < hw->tnl.count &&
+ i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].marked) {
+ hw->tnl.tbl[i].ref = 0;
+ hw->tnl.tbl[i].port = 0;
+ hw->tnl.tbl[i].in_use = false;
+ hw->tnl.tbl[i].marked = false;
+ }
+
+ice_destroy_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_destroy_tunnel_end:
+ mutex_unlock(&hw->tnl_lock);
+
+ return status;
+}
+
/* PTG Management */
/**
@@ -1807,9 +2419,16 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
- u16 off, i;
+ u16 off;
+ u8 i;
- for (i = 0; i < es->count; i++) {
+ /* For FD, we don't want to re-use a existed profile with the same
+ * field vector and mask. This will cause rule interference.
+ */
+ if (blk == ICE_BLK_FD)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ for (i = 0; i < (u8)es->count; i++) {
off = i * es->fvw;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
@@ -1830,6 +2449,9 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
{
switch (blk) {
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
+ break;
case ICE_BLK_RSS:
*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
break;
@@ -1847,6 +2469,9 @@ static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
{
switch (blk) {
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
+ break;
case ICE_BLK_RSS:
*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
break;
@@ -2290,6 +2915,12 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
mutex_lock(&hw->fl_profs_locks[blk_idx]);
list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
+ struct ice_flow_entry *e, *t;
+
+ list_for_each_entry_safe(e, t, &p->entries, l_entry)
+ ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
+ ICE_FLOW_ENTRY_HNDL(e));
+
list_del(&p->l_entry);
devm_kfree(ice_hw_to_dev(hw), p);
}
@@ -2919,6 +3550,212 @@ error_tmp:
}
/**
+ * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @mask_sel: mask select
+ *
+ * This function enable any of the masks selected by the mask select parameter
+ * for the profile specified.
+ */
+static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
+{
+ wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
+
+ ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
+ GLQF_FDMASK_SEL(prof_id), mask_sel);
+}
+
+struct ice_fd_src_dst_pair {
+ u8 prot_id;
+ u8 count;
+ u16 off;
+};
+
+static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
+ /* These are defined in pairs */
+ { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
+ { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
+
+ { ICE_PROT_IPV4_IL, 2, 12 },
+ { ICE_PROT_IPV4_IL, 2, 16 },
+
+ { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
+ { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
+
+ { ICE_PROT_IPV6_IL, 8, 8 },
+ { ICE_PROT_IPV6_IL, 8, 24 },
+
+ { ICE_PROT_TCP_IL, 1, 0 },
+ { ICE_PROT_TCP_IL, 1, 2 },
+
+ { ICE_PROT_UDP_OF, 1, 0 },
+ { ICE_PROT_UDP_OF, 1, 2 },
+
+ { ICE_PROT_UDP_IL_OR_S, 1, 0 },
+ { ICE_PROT_UDP_IL_OR_S, 1, 2 },
+
+ { ICE_PROT_SCTP_IL, 1, 0 },
+ { ICE_PROT_SCTP_IL, 1, 2 }
+};
+
+#define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
+
+/**
+ * ice_update_fd_swap - set register appropriately for a FD FV extraction
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @es: extraction sequence (length of array is determined by the block)
+ */
+static enum ice_status
+ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
+{
+ DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
+ u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
+#define ICE_FD_FV_NOT_FOUND (-2)
+ s8 first_free = ICE_FD_FV_NOT_FOUND;
+ u8 used[ICE_MAX_FV_WORDS] = { 0 };
+ s8 orig_free, si;
+ u32 mask_sel = 0;
+ u8 i, j, k;
+
+ bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
+
+ /* This code assumes that the Flow Director field vectors are assigned
+ * from the end of the FV indexes working towards the zero index, that
+ * only complete fields will be included and will be consecutive, and
+ * that there are no gaps between valid indexes.
+ */
+
+ /* Determine swap fields present */
+ for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
+ /* Find the first free entry, assuming right to left population.
+ * This is where we can start adding additional pairs if needed.
+ */
+ if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
+ ICE_PROT_INVALID)
+ first_free = i - 1;
+
+ for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
+ if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
+ es[i].off == ice_fd_pairs[j].off) {
+ set_bit(j, pair_list);
+ pair_start[j] = i;
+ }
+ }
+
+ orig_free = first_free;
+
+ /* determine missing swap fields that need to be added */
+ for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
+ u8 bit1 = test_bit(i + 1, pair_list);
+ u8 bit0 = test_bit(i, pair_list);
+
+ if (bit0 ^ bit1) {
+ u8 index;
+
+ /* add the appropriate 'paired' entry */
+ if (!bit0)
+ index = i;
+ else
+ index = i + 1;
+
+ /* check for room */
+ if (first_free + 1 < (s8)ice_fd_pairs[index].count)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* place in extraction sequence */
+ for (k = 0; k < ice_fd_pairs[index].count; k++) {
+ es[first_free - k].prot_id =
+ ice_fd_pairs[index].prot_id;
+ es[first_free - k].off =
+ ice_fd_pairs[index].off + (k * 2);
+
+ if (k > first_free)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* keep track of non-relevant fields */
+ mask_sel |= BIT(first_free - k);
+ }
+
+ pair_start[index] = first_free;
+ first_free -= ice_fd_pairs[index].count;
+ }
+ }
+
+ /* fill in the swap array */
+ si = hw->blk[ICE_BLK_FD].es.fvw - 1;
+ while (si >= 0) {
+ u8 indexes_used = 1;
+
+ /* assume flat at this index */
+#define ICE_SWAP_VALID 0x80
+ used[si] = si | ICE_SWAP_VALID;
+
+ if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
+ si -= indexes_used;
+ continue;
+ }
+
+ /* check for a swap location */
+ for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
+ if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
+ es[si].off == ice_fd_pairs[j].off) {
+ u8 idx;
+
+ /* determine the appropriate matching field */
+ idx = j + ((j % 2) ? -1 : 1);
+
+ indexes_used = ice_fd_pairs[idx].count;
+ for (k = 0; k < indexes_used; k++) {
+ used[si - k] = (pair_start[idx] - k) |
+ ICE_SWAP_VALID;
+ }
+
+ break;
+ }
+
+ si -= indexes_used;
+ }
+
+ /* for each set of 4 swap and 4 inset indexes, write the appropriate
+ * register
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
+ u32 raw_swap = 0;
+ u32 raw_in = 0;
+
+ for (k = 0; k < 4; k++) {
+ u8 idx;
+
+ idx = (j * 4) + k;
+ if (used[idx] && !(mask_sel & BIT(idx))) {
+ raw_swap |= used[idx] << (k * BITS_PER_BYTE);
+#define ICE_INSET_DFLT 0x9f
+ raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
+ }
+ }
+
+ /* write the appropriate swap register set */
+ wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+ ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+ /* write the appropriate inset register set */
+ wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
+
+ ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
+ }
+
+ /* initially clear the mask select for this profile */
+ ice_update_fd_mask(hw, prof_id, 0);
+
+ return 0;
+}
+
+/**
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -2939,7 +3776,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
enum ice_status status;
- u32 byte = 0;
+ u8 byte = 0;
u8 prof_id;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -2953,6 +3790,18 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
+ if (blk == ICE_BLK_FD) {
+ /* For Flow Director block, the extraction sequence may
+ * need to be altered in the case where there are paired
+ * fields that have no match. This is necessary because
+ * for Flow Director, src and dest fields need to paired
+ * for filter programming and these values are swapped
+ * during Tx.
+ */
+ status = ice_update_fd_swap(hw, prof_id, es);
+ if (status)
+ goto err_ice_add_prof;
+ }
/* and write new es */
ice_write_es(hw, blk, prof_id, es);
@@ -2962,8 +3811,10 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* add profile info */
prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
- if (!prof)
+ if (!prof) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof;
+ }
prof->profile_cookie = id;
prof->prof_id = prof_id;
@@ -2972,7 +3823,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* build list of ptgs */
while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u32 bit;
+ u8 bit;
if (!ptypes[byte]) {
bytes--;
@@ -3006,7 +3857,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
break;
/* nothing left in byte, then exit */
- m = ~((1 << (bit + 1)) - 1);
+ m = ~(u8)((1 << (bit + 1)) - 1);
if (!(ptypes[byte] & m))
break;
}
@@ -3703,8 +4554,10 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
t->tcam[i].prof_id,
t->tcam[i].ptg, vsig, 0, 0,
vl_msk, dc_msk, nm_msk);
- if (status)
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
+ }
/* log change */
list_add(&p->list_entry, chg);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index c7b5e1a6ea2b..568ea519af51 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,14 @@
#define ICE_PKG_CNT 4
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port);
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
+
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 0fb3fe3ff3ea..a6f391eac8ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -20,7 +20,7 @@ struct ice_fv {
/* Package and segment headers and tables */
struct ice_pkg_hdr {
- struct ice_pkg_ver format_ver;
+ struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[1];
};
@@ -30,9 +30,9 @@ struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE 0x00000010
__le32 seg_type;
- struct ice_pkg_ver seg_ver;
+ struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
- char seg_name[ICE_PKG_NAME_SIZE];
+ char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
@@ -75,7 +75,7 @@ struct ice_buf_table {
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
- __le32 track_id;
+ __le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
@@ -149,6 +149,7 @@ struct ice_buf_hdr {
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
@@ -291,6 +292,38 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
+/* Tunnel enabling */
+
+enum ice_tunnel_type {
+ TNL_VXLAN = 0,
+ TNL_GENEVE,
+ TNL_LAST = 0xFF,
+ TNL_ALL = 0xFF,
+};
+
+struct ice_tunnel_type_scan {
+ enum ice_tunnel_type type;
+ const char *label_prefix;
+};
+
+struct ice_tunnel_entry {
+ enum ice_tunnel_type type;
+ u16 boost_addr;
+ u16 port;
+ u16 ref;
+ struct ice_boost_tcam_entry *boost_entry;
+ u8 valid;
+ u8 in_use;
+ u8 marked;
+};
+
+#define ICE_TUNNEL_MAX_ENTRIES 16
+
+struct ice_tunnel_table {
+ struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
+ u16 count;
+};
+
struct ice_pkg_es {
__le16 count;
__le16 offset;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index 3de862a3c789..d74e5290677f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
-
+ /* GRE */
+ /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
+ sizeof_field(struct gre_full_hdr, key)),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
@@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outermost/First GRE header */
+static const u32 ice_ptypes_gre_of[] = {
+ 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
+ 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params {
enum ice_block blk;
@@ -178,6 +193,40 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
return 0;
}
+/* Sizes of fixed known protocol headers without header options */
+#define ICE_FLOW_PROT_HDR_SZ_MAC 14
+#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
+#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
+#define ICE_FLOW_PROT_HDR_SZ_TCP 20
+#define ICE_FLOW_PROT_HDR_SZ_UDP 8
+#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
+
+/**
+ * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose header size is to be determined
+ */
+static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
+{
+ u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
+
+ /* L3 headers */
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
+
+ /* L4 headers */
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
+ sz += ICE_FLOW_PROT_HDR_SZ_TCP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
+ sz += ICE_FLOW_PROT_HDR_SZ_UDP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
+ sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
+
+ return sz;
+}
+
/**
* ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
* @params: information about the flow to be processed
@@ -225,6 +274,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
src = (const unsigned long *)ice_ptypes_sctp_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
+ if (!i) {
+ src = (const unsigned long *)ice_ptypes_gre_of;
+ bitmap_and(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ }
}
}
@@ -275,6 +330,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL;
break;
+ case ICE_FLOW_FIELD_IDX_GRE_KEYID:
+ prot_id = ICE_PROT_GRE_OF;
+ break;
default:
return ICE_ERR_NOT_IMPL;
}
@@ -324,6 +382,81 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
}
/**
+ * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose raw fields are to be be extracted
+ */
+static enum ice_status
+ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
+ u8 seg)
+{
+ u16 fv_words;
+ u16 hdrs_sz;
+ u8 i;
+
+ if (!params->prof->segs[seg].raws_cnt)
+ return 0;
+
+ if (params->prof->segs[seg].raws_cnt >
+ ARRAY_SIZE(params->prof->segs[seg].raws))
+ return ICE_ERR_MAX_LIMIT;
+
+ /* Offsets within the segment headers are not supported */
+ hdrs_sz = ice_flow_calc_seg_sz(params, seg);
+ if (!hdrs_sz)
+ return ICE_ERR_PARAM;
+
+ fv_words = hw->blk[params->blk].es.fvw;
+
+ for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
+ struct ice_flow_seg_fld_raw *raw;
+ u16 off, cnt, j;
+
+ raw = &params->prof->segs[seg].raws[i];
+
+ /* Storing extraction information */
+ raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
+ raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
+ ICE_FLOW_FV_EXTRACT_SZ;
+ raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
+ BITS_PER_BYTE;
+ raw->info.xtrct.idx = params->es_cnt;
+
+ /* Determine the number of field vector entries this raw field
+ * consumes.
+ */
+ cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
+ (raw->info.src.last * BITS_PER_BYTE),
+ (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
+ off = raw->info.xtrct.off;
+ for (j = 0; j < cnt; j++) {
+ u16 idx;
+
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= hw->blk[params->blk].es.count ||
+ params->es_cnt >= ICE_MAX_FV_WORDS)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = raw->info.xtrct.prot_id;
+ params->es[idx].off = off;
+ params->es_cnt++;
+ off += ICE_FLOW_FV_EXTRACT_SZ;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
@@ -349,6 +482,11 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
if (status)
return status;
}
+
+ /* Process raw matching bytes */
+ status = ice_flow_xtract_raws(hw, params, i);
+ if (status)
+ return status;
}
return status;
@@ -373,10 +511,8 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
return status;
switch (params->blk) {
+ case ICE_BLK_FD:
case ICE_BLK_RSS:
- /* Only header information is provided for RSS configuration.
- * No further processing is needed.
- */
status = 0;
break;
default:
@@ -458,6 +594,43 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
}
/**
+ * ice_dealloc_flow_entry - Deallocate flow entry memory
+ * @hw: pointer to the HW struct
+ * @entry: flow entry to be removed
+ */
+static void
+ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
+{
+ if (!entry)
+ return;
+
+ if (entry->entry)
+ devm_kfree(ice_hw_to_dev(hw), entry->entry);
+
+ devm_kfree(ice_hw_to_dev(hw), entry);
+}
+
+/**
+ * ice_flow_rem_entry_sync - Remove a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry: flow entry to be removed
+ */
+static enum ice_status
+ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
+ struct ice_flow_entry *entry)
+{
+ if (!entry)
+ return ICE_ERR_BAD_PTR;
+
+ list_del(&entry->l_entry);
+
+ ice_dealloc_flow_entry(hw, entry);
+
+ return 0;
+}
+
+/**
* ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
* @hw: pointer to the HW struct
* @blk: classification stage
@@ -544,6 +717,21 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
{
enum ice_status status;
+ /* Remove all remaining flow entries before removing the flow profile */
+ if (!list_empty(&prof->entries)) {
+ struct ice_flow_entry *e, *t;
+
+ mutex_lock(&prof->entries_lock);
+
+ list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
+ status = ice_flow_rem_entry_sync(hw, blk, e);
+ if (status)
+ break;
+ }
+
+ mutex_unlock(&prof->entries_lock);
+ }
+
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
@@ -629,7 +817,7 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @segs_cnt: number of packet segments provided
* @prof: stores the returned flow profile added
*/
-static enum ice_status
+enum ice_status
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
@@ -667,7 +855,7 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-static enum ice_status
+enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
@@ -691,6 +879,113 @@ out:
}
/**
+ * ice_flow_add_entry - Add a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: ID of the profile to add a new flow entry to
+ * @entry_id: unique ID to identify this flow entry
+ * @vsi_handle: software VSI handle for the flow entry
+ * @prio: priority of the flow entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @entry_h: pointer to buffer that receives the new flow entry's handle
+ */
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
+ void *data, u64 *entry_h)
+{
+ struct ice_flow_entry *e = NULL;
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ /* No flow entry data is expected for RSS */
+ if (!entry_h || (!data && blk != ICE_BLK_RSS))
+ return ICE_ERR_BAD_PTR;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+
+ prof = ice_flow_find_prof_id(hw, blk, prof_id);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ } else {
+ /* Allocate memory for the entry being added and associate
+ * the VSI to the found flow profile
+ */
+ e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
+ if (!e)
+ status = ICE_ERR_NO_MEMORY;
+ else
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ }
+
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+ if (status)
+ goto out;
+
+ e->id = entry_id;
+ e->vsi_handle = vsi_handle;
+ e->prof = prof;
+ e->priority = prio;
+
+ switch (blk) {
+ case ICE_BLK_FD:
+ case ICE_BLK_RSS:
+ break;
+ default:
+ status = ICE_ERR_NOT_IMPL;
+ goto out;
+ }
+
+ mutex_lock(&prof->entries_lock);
+ list_add(&e->l_entry, &prof->entries);
+ mutex_unlock(&prof->entries_lock);
+
+ *entry_h = ICE_FLOW_ENTRY_HNDL(e);
+
+out:
+ if (status && e) {
+ if (e->entry)
+ devm_kfree(ice_hw_to_dev(hw), e->entry);
+ devm_kfree(ice_hw_to_dev(hw), e);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_entry - Remove a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry_h: handle to the flow entry to be removed
+ */
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
+ u64 entry_h)
+{
+ struct ice_flow_entry *entry;
+ struct ice_flow_prof *prof;
+ enum ice_status status = 0;
+
+ if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
+ return ICE_ERR_PARAM;
+
+ entry = ICE_FLOW_ENTRY_PTR(entry_h);
+
+ /* Retain the pointer to the flow profile as the entry will be freed */
+ prof = entry->prof;
+
+ if (prof) {
+ mutex_lock(&prof->entries_lock);
+ status = ice_flow_rem_entry_sync(hw, blk, entry);
+ mutex_unlock(&prof->entries_lock);
+ }
+
+ return status;
+}
+
+/**
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to
* @fld: field to be set
@@ -752,7 +1047,7 @@ ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
* create the content of a match entry. This function should only be used for
* fixed-size data structures.
*/
-static void
+void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
{
@@ -762,6 +1057,42 @@ ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
}
+/**
+ * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
+ * @seg: packet segment the field being set belongs to
+ * @off: offset of the raw field from the beginning of the segment in bytes
+ * @len: length of the raw pattern to be matched
+ * @val_loc: location of the value to match from entry's input buffer
+ * @mask_loc: location of mask value from entry's input buffer
+ *
+ * This function specifies the offset of the raw field to be match from the
+ * beginning of the specified packet segment, and the locations, in the form of
+ * byte offsets from the start of the input buffer for a flow entry, from where
+ * the value to match and the mask value to be extracted. These locations are
+ * then stored in the flow profile. When adding flow entries to the associated
+ * flow profile, these locations can be used to quickly extract the values to
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+ u16 val_loc, u16 mask_loc)
+{
+ if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
+ seg->raws[seg->raws_cnt].off = off;
+ seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
+ seg->raws[seg->raws_cnt].info.src.val = val_loc;
+ seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
+ /* The "last" field is used to store the length of the field */
+ seg->raws[seg->raws_cnt].info.src.last = len;
+ }
+
+ /* Overflows of "raws" will be handled as an error condition later in
+ * the flow when this information is processed.
+ */
+ seg->raws_cnt++;
+}
+
#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
@@ -945,6 +1276,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
#define ICE_RSS_OUTER_HEADERS 1
+#define ICE_RSS_INNER_HEADERS 2
/* Flow profile ID format:
* [0:31] - Packet match fields
@@ -1085,6 +1417,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS);
+ if (!status)
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
+ addl_hdrs, ICE_RSS_INNER_HEADERS);
mutex_unlock(&hw->rss_locks);
return status;
@@ -1238,6 +1573,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
ICE_RSS_OUTER_HEADERS);
if (status)
break;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ r->hashed_flds,
+ r->packet_hdr,
+ ICE_RSS_INNER_HEADERS);
+ if (status)
+ break;
}
}
mutex_unlock(&hw->rss_locks);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 5558627bd5eb..3913da2116d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -43,6 +43,7 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
+ ICE_FLOW_SEG_HDR_GRE = 0x00000200,
};
enum ice_flow_field {
@@ -58,6 +59,8 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ /* GRE */
+ ICE_FLOW_FIELD_IDX_GRE_KEYID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
@@ -125,6 +128,7 @@ enum ice_flow_priority {
};
#define ICE_FLOW_SEG_MAX 2
+#define ICE_FLOW_SEG_RAW_FLD_MAX 2
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
@@ -161,14 +165,38 @@ struct ice_flow_fld_info {
struct ice_flow_seg_xtrct xtrct;
};
+struct ice_flow_seg_fld_raw {
+ struct ice_flow_fld_info info;
+ u16 off; /* Offset from the start of the segment */
+};
+
struct ice_flow_seg_info {
u32 hdrs; /* Bitmask indicating protocol headers present */
u64 match; /* Bitmask indicating header fields to be matched */
u64 range; /* Bitmask indicating header fields matched as ranges */
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
+
+ u8 raws_cnt; /* Number of raw fields to be matched */
+ struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
};
+/* This structure describes a flow entry, and is tracked only in this file */
+struct ice_flow_entry {
+ struct list_head l_entry;
+
+ u64 id;
+ struct ice_flow_prof *prof;
+ /* Flow entry's content */
+ void *entry;
+ enum ice_flow_priority priority;
+ u16 vsi_handle;
+ u16 entry_sz;
+};
+
+#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
+#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
+
struct ice_flow_prof {
struct list_head l_entry;
@@ -194,7 +222,24 @@ struct ice_rss_cfg {
u32 packet_hdr;
};
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
+enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_prof **prof);
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u64 entry_id, u16 vsi, enum ice_flow_priority prio,
+ void *data, u64 *entry_h);
+enum ice_status
+ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
+void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+ u16 val_loc, u16 mask_loc);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
new file mode 100644
index 000000000000..2418d4fff037
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_fltr.h"
+
+/**
+ * ice_fltr_free_list - free filter lists helper
+ * @dev: pointer to the device struct
+ * @h: pointer to the list head to be freed
+ *
+ * Helper function to free filter lists previously created using
+ * ice_fltr_add_mac_to_list
+ */
+void ice_fltr_free_list(struct device *dev, struct list_head *h)
+{
+ struct ice_fltr_list_entry *e, *tmp;
+
+ list_for_each_entry_safe(e, tmp, h, list_entry) {
+ list_del(&e->list_entry);
+ devm_kfree(dev, e);
+ }
+}
+
+/**
+ * ice_fltr_add_entry_to_list - allocate and add filter entry to list
+ * @dev: pointer to device needed by alloc function
+ * @info: filter info struct that gets added to the passed in list
+ * @list: pointer to the list which contains MAC filters entry
+ */
+static int
+ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
+ struct list_head *list)
+{
+ struct ice_fltr_list_entry *entry;
+
+ entry = devm_kzalloc(dev, sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->fltr_info = *info;
+
+ INIT_LIST_HEAD(&entry->list_entry);
+ list_add(&entry->list_entry, list);
+
+ return 0;
+}
+
+/**
+ * ice_fltr_add_mac_list - add list of MAC filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+enum ice_status
+ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_add_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_mac_list - remove list of MAC filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+enum ice_status
+ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_remove_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_add_vlan_list - add list of VLAN filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_add_vlan(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_vlan_list - remove list of VLAN filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_remove_vlan(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_add_eth_list - add list of ethertype filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_add_eth_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_eth_list - remove list of ethertype filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_remove_eth_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_all - remove all filters associated with VSI
+ * @vsi: pointer to VSI struct
+ */
+void ice_fltr_remove_all(struct ice_vsi *vsi)
+{
+ ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+}
+
+/**
+ * ice_fltr_add_mac_to_list - add MAC filter info to exsisting list
+ * @vsi: pointer to VSI struct
+ * @list: list to add filter info to
+ * @mac: MAC address to add
+ * @action: filter action
+ */
+int
+ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
+ const u8 *mac, enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_info info = { 0 };
+
+ info.flag = ICE_FLTR_TX;
+ info.src_id = ICE_SRC_ID_VSI;
+ info.lkup_type = ICE_SW_LKUP_MAC;
+ info.fltr_act = action;
+ info.vsi_handle = vsi->idx;
+
+ ether_addr_copy(info.l_data.mac.mac_addr, mac);
+
+ return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
+ list);
+}
+
+/**
+ * ice_fltr_add_vlan_to_list - add VLAN filter info to exsisting list
+ * @vsi: pointer to VSI struct
+ * @list: list to add filter info to
+ * @vlan_id: VLAN ID to add
+ * @action: filter action
+ */
+static int
+ice_fltr_add_vlan_to_list(struct ice_vsi *vsi, struct list_head *list,
+ u16 vlan_id, enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_info info = { 0 };
+
+ info.flag = ICE_FLTR_TX;
+ info.src_id = ICE_SRC_ID_VSI;
+ info.lkup_type = ICE_SW_LKUP_VLAN;
+ info.fltr_act = action;
+ info.vsi_handle = vsi->idx;
+ info.l_data.vlan.vlan_id = vlan_id;
+
+ return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
+ list);
+}
+
+/**
+ * ice_fltr_add_eth_to_list - add ethertype filter info to exsisting list
+ * @vsi: pointer to VSI struct
+ * @list: list to add filter info to
+ * @ethertype: ethertype of packet that matches filter
+ * @flag: filter direction, Tx or Rx
+ * @action: filter action
+ */
+static int
+ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list,
+ u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_info info = { 0 };
+
+ info.flag = flag;
+ info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ info.fltr_act = action;
+ info.vsi_handle = vsi->idx;
+ info.l_data.ethertype_mac.ethertype = ethertype;
+
+ if (flag == ICE_FLTR_TX)
+ info.src_id = ICE_SRC_ID_VSI;
+ else
+ info.src_id = ICE_SRC_ID_LPORT;
+
+ return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
+ list);
+}
+
+/**
+ * ice_fltr_prepare_mac - add or remove MAC rule
+ * @vsi: pointer to VSI struct
+ * @mac: MAC address to add
+ * @action: action to be performed on filter match
+ * @mac_action: pointer to add or remove MAC function
+ */
+static enum ice_status
+ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status (*mac_action)(struct ice_vsi *,
+ struct list_head *))
+{
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ result = mac_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_prepare_mac_and_broadcast - add or remove MAC and broadcast filter
+ * @vsi: pointer to VSI struct
+ * @mac: MAC address to add
+ * @action: action to be performed on filter match
+ * @mac_action: pointer to add or remove MAC function
+ */
+static enum ice_status
+ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status(*mac_action)
+ (struct ice_vsi *, struct list_head *))
+{
+ u8 broadcast[ETH_ALEN];
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ eth_broadcast_addr(broadcast);
+ if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
+ ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ result = mac_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_prepare_vlan - add or remove VLAN filter
+ * @vsi: pointer to VSI struct
+ * @vlan_id: VLAN ID to add
+ * @action: action to be performed on filter match
+ * @vlan_action: pointer to add or remove VLAN function
+ */
+static enum ice_status
+ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status (*vlan_action)(struct ice_vsi *,
+ struct list_head *))
+{
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
+ return ICE_ERR_NO_MEMORY;
+
+ result = vlan_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_prepare_eth - add or remove ethertype filter
+ * @vsi: pointer to VSI struct
+ * @ethertype: ethertype of packet to be filtered
+ * @flag: direction of packet, Tx or Rx
+ * @action: action to be performed on filter match
+ * @eth_action: pointer to add or remove ethertype function
+ */
+static enum ice_status
+ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status (*eth_action)(struct ice_vsi *,
+ struct list_head *))
+{
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
+ return ICE_ERR_NO_MEMORY;
+
+ result = eth_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_add_mac - add single MAC filter
+ * @vsi: pointer to VSI struct
+ * @mac: MAC to add
+ * @action: action to be performed on filter match
+ */
+enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list);
+}
+
+/**
+ * ice_fltr_add_mac_and_broadcast - add single MAC and broadcast
+ * @vsi: pointer to VSI struct
+ * @mac: MAC to add
+ * @action: action to be performed on filter match
+ */
+enum ice_status
+ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_mac_and_broadcast(vsi, mac, action,
+ ice_fltr_add_mac_list);
+}
+
+/**
+ * ice_fltr_remove_mac - remove MAC filter
+ * @vsi: pointer to VSI struct
+ * @mac: filter MAC to remove
+ * @action: action to remove
+ */
+enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list);
+}
+
+/**
+ * ice_fltr_add_vlan - add single VLAN filter
+ * @vsi: pointer to VSI struct
+ * @vlan_id: VLAN ID to add
+ * @action: action to be performed on filter match
+ */
+enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_vlan(vsi, vlan_id, action,
+ ice_fltr_add_vlan_list);
+}
+
+/**
+ * ice_fltr_remove_vlan - remove VLAN filter
+ * @vsi: pointer to VSI struct
+ * @vlan_id: filter VLAN to remove
+ * @action: action to remove
+ */
+enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_vlan(vsi, vlan_id, action,
+ ice_fltr_remove_vlan_list);
+}
+
+/**
+ * ice_fltr_add_eth - add specyfic ethertype filter
+ * @vsi: pointer to VSI struct
+ * @ethertype: ethertype of filter
+ * @flag: direction of packet to be filtered, Tx or Rx
+ * @action: action to be performed on filter match
+ */
+enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
+ ice_fltr_add_eth_list);
+}
+
+/**
+ * ice_fltr_remove_eth - remove ethertype filter
+ * @vsi: pointer to VSI struct
+ * @ethertype: ethertype of filter
+ * @flag: direction of filter
+ * @action: action to remove
+ */
+enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
+ u16 flag, enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
+ ice_fltr_remove_eth_list);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
new file mode 100644
index 000000000000..361cb4da9b43
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#ifndef _ICE_FLTR_H_
+#define _ICE_FLTR_H_
+
+void ice_fltr_free_list(struct device *dev, struct list_head *h);
+enum ice_status
+ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
+ const u8 *mac, enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
+enum ice_status
+ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
+
+enum ice_status
+ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
+ enum ice_sw_fwd_act_type action);
+
+enum ice_status
+ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action);
+void ice_fltr_remove_all(struct ice_vsi *vsi);
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1d37a9f02c1c..1086c9f778b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,9 +6,6 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
-#define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096))
-#define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096))
-#define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096))
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD_HEAD_S 0
@@ -42,6 +39,7 @@
#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ARQLEN 0x0022E480
#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN_ARQCRIT_M BIT(30)
#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
#define PF_MBX_ARQT 0x0022E580
#define PF_MBX_ATQBAH 0x0022E180
@@ -50,6 +48,7 @@
#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN 0x0022E200
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
#define PRTDCB_GENC 0x00083000
@@ -58,6 +57,7 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
+#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
@@ -218,6 +218,11 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32))
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32))
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0)
#define GL_MDCK_TX_TDPU 0x00049348
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00
@@ -289,6 +294,20 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
+#define GLQF_FD_CNT 0x00460018
+#define GLQF_FD_CNT_FD_BCNT_S 16
+#define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16)
+#define GLQF_FD_SIZE 0x00460010
+#define GLQF_FD_SIZE_FD_GSIZE_S 0
+#define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0)
+#define GLQF_FD_SIZE_FD_BSIZE_S 16
+#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16)
+#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4))
+#define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512))
+#define PFQF_FD_ENA 0x0043A000
+#define PFQF_FD_ENA_FD_ENA_M BIT(0)
+#define PFQF_FD_SIZE 0x00460100
#define GLDCB_RTCTQ_RXQNUM_S 0
#define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0)
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
@@ -332,6 +351,7 @@
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
+#define GLSTAT_FD_CNT0L(_i) (0x003A0000 + ((_i) * 8))
#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
@@ -342,6 +362,9 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
+#define VSIQF_FD_CNT_FD_GCNT_S 0
+#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 878e125d8b42..14dfbbc1b2cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -40,6 +40,104 @@ union ice_32byte_rx_desc {
} wb; /* writeback */
};
+struct ice_fltr_desc {
+ __le64 qidx_compq_space_stat;
+ __le64 dtype_cmd_vsi_fdid;
+};
+
+#define ICE_FXD_FLTR_QW0_QINDEX_S 0
+#define ICE_FXD_FLTR_QW0_QINDEX_M (0x7FFULL << ICE_FXD_FLTR_QW0_QINDEX_S)
+#define ICE_FXD_FLTR_QW0_COMP_Q_S 11
+#define ICE_FXD_FLTR_QW0_COMP_Q_M BIT_ULL(ICE_FXD_FLTR_QW0_COMP_Q_S)
+#define ICE_FXD_FLTR_QW0_COMP_Q_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_S 12
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_M \
+ (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S)
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_FD_SPACE_S 14
+#define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S)
+#define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST 0x2ULL
+
+#define ICE_FXD_FLTR_QW0_STAT_CNT_S 16
+#define ICE_FXD_FLTR_QW0_STAT_CNT_M \
+ (0x1FFFULL << ICE_FXD_FLTR_QW0_STAT_CNT_S)
+#define ICE_FXD_FLTR_QW0_STAT_ENA_S 29
+#define ICE_FXD_FLTR_QW0_STAT_ENA_M (0x3ULL << ICE_FXD_FLTR_QW0_STAT_ENA_S)
+#define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_S 31
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_M BIT_ULL(ICE_FXD_FLTR_QW0_EVICT_ENA_S)
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE 0x0ULL
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_TO_Q_S 32
+#define ICE_FXD_FLTR_QW0_TO_Q_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_S)
+#define ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_S 35
+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_PRI_S)
+#define ICE_FXD_FLTR_QW0_TO_Q_PRIO1 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_S 38
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_M \
+ (0x3ULL << ICE_FXD_FLTR_QW0_DPU_RECIPE_S)
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_DROP_S 40
+#define ICE_FXD_FLTR_QW0_DROP_M BIT_ULL(ICE_FXD_FLTR_QW0_DROP_S)
+#define ICE_FXD_FLTR_QW0_DROP_NO 0x0ULL
+#define ICE_FXD_FLTR_QW0_DROP_YES 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_S 41
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_FLEX_PRI_S)
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_NONE 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_MDID_S 44
+#define ICE_FXD_FLTR_QW0_FLEX_MDID_M (0xFULL << ICE_FXD_FLTR_QW0_FLEX_MDID_S)
+#define ICE_FXD_FLTR_QW0_FLEX_MDID0 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_VAL_S 48
+#define ICE_FXD_FLTR_QW0_FLEX_VAL_M \
+ (0xFFFFULL << ICE_FXD_FLTR_QW0_FLEX_VAL_S)
+#define ICE_FXD_FLTR_QW0_FLEX_VAL0 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_DTYPE_S 0
+#define ICE_FXD_FLTR_QW1_DTYPE_M (0xFULL << ICE_FXD_FLTR_QW1_DTYPE_S)
+#define ICE_FXD_FLTR_QW1_PCMD_S 4
+#define ICE_FXD_FLTR_QW1_PCMD_M BIT_ULL(ICE_FXD_FLTR_QW1_PCMD_S)
+#define ICE_FXD_FLTR_QW1_PCMD_ADD 0x0ULL
+#define ICE_FXD_FLTR_QW1_PCMD_REMOVE 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_PROF_PRI_S 5
+#define ICE_FXD_FLTR_QW1_PROF_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_PROF_PRI_S)
+#define ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_PROF_S 8
+#define ICE_FXD_FLTR_QW1_PROF_M (0x3FULL << ICE_FXD_FLTR_QW1_PROF_S)
+#define ICE_FXD_FLTR_QW1_PROF_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_FD_VSI_S 14
+#define ICE_FXD_FLTR_QW1_FD_VSI_M (0x3FFULL << ICE_FXD_FLTR_QW1_FD_VSI_S)
+#define ICE_FXD_FLTR_QW1_SWAP_S 24
+#define ICE_FXD_FLTR_QW1_SWAP_M BIT_ULL(ICE_FXD_FLTR_QW1_SWAP_S)
+#define ICE_FXD_FLTR_QW1_SWAP_NOT_SET 0x0ULL
+#define ICE_FXD_FLTR_QW1_SWAP_SET 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
+#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
+#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28
+#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)
+#define ICE_FXD_FLTR_QW1_FDID_MDID_FD 0x05ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_S 32
+#define ICE_FXD_FLTR_QW1_FDID_M \
+ (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S)
+#define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL
+
struct ice_rx_ptype_decoded {
u32 ptype:10;
u32 known:1;
@@ -262,6 +360,12 @@ enum ice_rx_flex_desc_status_error_0_bits {
ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
};
+enum ice_rx_flex_desc_status_error_1_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
+};
+
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
@@ -340,6 +444,7 @@ struct ice_tx_desc {
enum ice_tx_desc_dtype_value {
ICE_TX_DESC_DTYPE_DATA = 0x0,
ICE_TX_DESC_DTYPE_CTX = 0x1,
+ ICE_TX_DESC_DTYPE_FLTR_PROG = 0x8,
/* DESC_DONE - HW has completed write-back of descriptor */
ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
};
@@ -351,12 +456,14 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_EOP = 0x0001,
ICE_TX_DESC_CMD_RS = 0x0002,
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ ICE_TX_DESC_CMD_DUMMY = 0x0010,
ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
+ ICE_TX_DESC_CMD_RE = 0x0400,
};
#define ICE_TXD_QW1_OFFSET_S 16
@@ -413,6 +520,25 @@ enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_RESERVED = 0x40
};
+enum ice_tx_ctx_desc_eipt_offload {
+ ICE_TX_CTX_EIPT_NONE = 0x0,
+ ICE_TX_CTX_EIPT_IPV6 = 0x1,
+ ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2,
+ ICE_TX_CTX_EIPT_IPV4 = 0x3
+};
+
+#define ICE_TXD_CTX_QW0_EIPLEN_S 2
+
+#define ICE_TXD_CTX_QW0_L4TUNT_S 9
+
+#define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S)
+#define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S)
+
+#define ICE_TXD_CTX_QW0_NATLEN_S 12
+
+#define ICE_TXD_CTX_QW0_L4T_CS_S 23
+#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S)
+
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
@@ -455,7 +581,7 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal do not write */
+ u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
/* macro to make the table lines short */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 2f256bf45efc..28b46cc9f5cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_flow.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
#include "ice_dcb_lib.h"
/**
@@ -18,6 +19,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
+ case ICE_VSI_CTRL:
+ return "ICE_VSI_CTRL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
default:
@@ -37,7 +40,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
*/
static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
{
- int i, ret = 0;
+ int ret = 0;
+ u16 i;
for (i = 0; i < vsi->num_rxq; i++)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
@@ -121,6 +125,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_CTRL:
case ICE_VSI_LB:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
@@ -185,6 +190,11 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
*/
vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
break;
+ case ICE_VSI_CTRL:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ vsi->num_q_vectors = 1;
+ break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
@@ -248,8 +258,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
- dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
- vsi->vsi_num, status);
+ dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
+ vsi->vsi_num, ice_stat_str(status));
kfree(ctxt);
}
@@ -320,7 +330,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
/* updates the PF for this cleared VSI */
pf->vsi[vsi->idx] = NULL;
- if (vsi->idx < pf->next_vsi)
+ if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
@@ -331,6 +341,25 @@ int ice_vsi_clear(struct ice_vsi *vsi)
}
/**
+ * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+
+ if (!q_vector->tx.ring)
+ return IRQ_HANDLED;
+
+#define FDIR_RX_DESC_CLEAN_BUDGET 64
+ ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_tx_irq(q_vector->tx.ring);
+
+ return IRQ_HANDLED;
+}
+
+/**
* ice_msix_clean_rings - MSIX mode Interrupt Handler
* @irq: interrupt number
* @data: pointer to a q_vector
@@ -381,8 +410,6 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
vsi->back = pf;
set_bit(__ICE_DOWN, vsi->state);
- vsi->idx = pf->next_vsi;
-
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
@@ -396,6 +423,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
+ case ICE_VSI_CTRL:
+ if (ice_vsi_alloc_arrays(vsi))
+ goto err_rings;
+
+ /* Setup ctrl VSI MSIX irq handler */
+ vsi->irq_handler = ice_msix_clean_ctrl_vsi;
+ break;
case ICE_VSI_VF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -409,12 +443,20 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
goto unlock_pf;
}
- /* fill VSI slot in the PF struct */
- pf->vsi[pf->next_vsi] = vsi;
+ if (vsi->type == ICE_VSI_CTRL) {
+ /* Use the last VSI slot as the index for the control VSI */
+ vsi->idx = pf->num_alloc_vsi - 1;
+ pf->ctrl_vsi_idx = vsi->idx;
+ pf->vsi[vsi->idx] = vsi;
+ } else {
+ /* fill slot and make note of the index */
+ vsi->idx = pf->next_vsi;
+ pf->vsi[pf->next_vsi] = vsi;
- /* prepare pf->next_vsi for next use */
- pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
- pf->next_vsi);
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
+ }
goto unlock_pf;
err_rings:
@@ -426,6 +468,48 @@ unlock_pf:
}
/**
+ * ice_alloc_fd_res - Allocate FD resource for a VSI
+ * @vsi: pointer to the ice_vsi
+ *
+ * This allocates the FD resources
+ *
+ * Returns 0 on success, -EPERM on no-op or -EIO on failure
+ */
+static int ice_alloc_fd_res(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u32 g_val, b_val;
+
+ /* Flow Director filters are only allocated/assigned to the PF VSI which
+ * passes the traffic. The CTRL VSI is only used to add/delete filters
+ * so we don't allocate resources to it
+ */
+
+ /* FD filters from guaranteed pool per VSI */
+ g_val = pf->hw.func_caps.fd_fltr_guar;
+ if (!g_val)
+ return -EPERM;
+
+ /* FD filters from best effort pool */
+ b_val = pf->hw.func_caps.fd_fltr_best_effort;
+ if (!b_val)
+ return -EPERM;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EPERM;
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EPERM;
+
+ vsi->num_gfltr = g_val / pf->num_alloc_vsi;
+
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+
+ return 0;
+}
+
+/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
@@ -521,8 +605,8 @@ static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
if (status)
- dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
- vsi->vsi_num, status);
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
+ vsi->vsi_num, ice_stat_str(status));
}
/**
@@ -565,8 +649,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
- vsi->rss_table_size = cap->rss_table_size;
- vsi->rss_size = min_t(int, num_online_cpus(),
+ vsi->rss_table_size = (u16)cap->rss_table_size;
+ vsi->rss_size = min_t(u16, num_online_cpus(),
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
@@ -581,8 +665,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
case ICE_VSI_LB:
break;
default:
- dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n",
- vsi->type);
+ dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
+ ice_vsi_type_str(vsi->type));
break;
}
}
@@ -684,15 +768,15 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_RSS_QS_PER_VF;
- qcount_rx = min_t(int, rx_numq_tc, max_rss);
+ qcount_rx = min_t(u16, rx_numq_tc, max_rss);
if (!vsi->req_rxq)
- qcount_rx = min_t(int, qcount_rx,
+ qcount_rx = min_t(u16, qcount_rx,
vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
- pow = order_base_2(qcount_rx);
+ pow = (u16)order_base_2(qcount_rx);
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -752,6 +836,51 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
}
/**
+ * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ * @vsi: the VSI being configured
+ */
+static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+{
+ u8 dflt_q_group, dflt_q_prio;
+ u16 dflt_q, report_q, val;
+
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL)
+ return;
+
+ val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
+ ctxt->info.valid_sections |= cpu_to_le16(val);
+ dflt_q = 0;
+ dflt_q_group = 0;
+ report_q = 0;
+ dflt_q_prio = 0;
+
+ /* enable flow director filtering/programming */
+ val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
+ ctxt->info.fd_options = cpu_to_le16(val);
+ /* max of allocated flow director filters */
+ ctxt->info.max_fd_fltr_dedicated =
+ cpu_to_le16(vsi->num_gfltr);
+ /* max of shared flow director filters any VSI may program */
+ ctxt->info.max_fd_fltr_shared =
+ cpu_to_le16(vsi->num_bfltr);
+ /* default queue index within the VSI of the default FD */
+ val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
+ ICE_AQ_VSI_FD_DEF_Q_M);
+ /* target queue or queue group to the FD filter */
+ val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
+ ICE_AQ_VSI_FD_DEF_GRP_M);
+ ctxt->info.fd_def_q = cpu_to_le16(val);
+ /* queue index on which FD filter completion is reported */
+ val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
+ ICE_AQ_VSI_FD_REPORT_Q_M);
+ /* priority of the default qindex action */
+ val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
+ ICE_AQ_VSI_FD_DEF_PRIORITY_M);
+ ctxt->info.fd_report_opt = cpu_to_le16(val);
+}
+
+/**
* ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
* @ctxt: the VSI context being set
* @vsi: the VSI being configured
@@ -776,13 +905,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
- case ICE_VSI_LB:
+ default:
dev_dbg(dev, "Unsupported VSI type %s\n",
ice_vsi_type_str(vsi->type));
return;
- default:
- dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
- return;
}
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
@@ -812,8 +938,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (!ctxt)
return -ENOMEM;
- ctxt->info = vsi->info;
switch (vsi->type) {
+ case ICE_VSI_CTRL:
case ICE_VSI_LB:
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
@@ -829,12 +955,15 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
}
ice_set_dflt_vsi_ctx(ctxt);
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ ice_set_fd_vsi_ctx(ctxt, vsi);
/* if the switch is in VEB mode, allow VSI loopback */
if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
/* Set LUT type and HASH type if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
+ vsi->type != ICE_VSI_CTRL) {
ice_set_rss_vsi_ctx(ctxt, vsi);
/* if updating VSI context, make sure to set valid_section:
* to indicate which section of VSI context being updated
@@ -941,7 +1070,7 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
- int start = 0, end = 0;
+ u16 start = 0, end = 0;
if (needed > res->end)
return -ENOMEM;
@@ -1024,6 +1153,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
struct device *dev;
u16 num_q_vectors;
+ int base;
dev = ice_pf_to_dev(pf);
/* SRIOV doesn't grab irq_tracker entries for each VSI */
@@ -1038,14 +1168,15 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
- vsi->idx);
- if (vsi->base_vector < 0) {
+ base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);
+
+ if (base < 0) {
dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
ice_get_free_res_count(pf->irq_tracker),
ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
return -ENOENT;
}
+ vsi->base_vector = (u16)base;
pf->num_avail_sw_msix -= num_q_vectors;
return 0;
@@ -1085,7 +1216,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct device *dev;
- int i;
+ u16 i;
dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
@@ -1178,7 +1309,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
u8 *lut;
dev = ice_pf_to_dev(pf);
- vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -1193,7 +1324,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size);
if (status) {
- dev_err(dev, "set_rss_lut failed, error %d\n", status);
+ dev_err(dev, "set_rss_lut failed, error %s\n",
+ ice_stat_str(status));
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
@@ -1215,7 +1347,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
- dev_err(dev, "set_rss_key failed, error %d\n", status);
+ dev_err(dev, "set_rss_key failed, error %s\n",
+ ice_stat_str(status));
err = -EIO;
}
@@ -1248,8 +1381,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
if (status)
- dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
- vsi->vsi_num, status);
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
+ vsi->vsi_num, ice_stat_str(status));
}
/**
@@ -1281,91 +1414,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for IPv6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for sctp4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for sctp6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-}
-
-/**
- * ice_add_mac_to_list - Add a MAC address filter entry to the list
- * @vsi: the VSI to be forwarded to
- * @add_list: pointer to the list which contains MAC filter entries
- * @macaddr: the MAC address to be added.
- *
- * Adds MAC address filter entry to the temp list
- *
- * Returns 0 on success or ENOMEM on failure.
- */
-int
-ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr)
-{
- struct ice_fltr_list_entry *tmp;
- struct ice_pf *pf = vsi->back;
-
- tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
- if (!tmp)
- return -ENOMEM;
-
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.vsi_handle = vsi->idx;
- ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, add_list);
-
- return 0;
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
}
/**
@@ -1415,54 +1514,21 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
}
/**
- * ice_free_fltr_list - free filter lists helper
- * @dev: pointer to the device struct
- * @h: pointer to the list head to be freed
- *
- * Helper function to free filter lists previously created using
- * ice_add_mac_to_list
- */
-void ice_free_fltr_list(struct device *dev, struct list_head *h)
-{
- struct ice_fltr_list_entry *e, *tmp;
-
- list_for_each_entry_safe(e, tmp, h, list_entry) {
- list_del(&e->list_entry);
- devm_kfree(dev, e);
- }
-}
-
-/**
* ice_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
* @vid: VLAN ID to be added
+ * @action: filter action to be performed on match
*/
-int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
+int
+ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
{
- struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
- tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
- tmp->fltr_info.vsi_handle = vsi->idx;
- tmp->fltr_info.l_data.vlan.vlan_id = vid;
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, &tmp_add_list);
-
- status = ice_add_vlan(&pf->hw, &tmp_add_list);
- if (!status) {
+ if (!ice_fltr_add_vlan(vsi, vid, action)) {
vsi->num_vlan++;
} else {
err = -ENODEV;
@@ -1470,7 +1536,6 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
vsi->vsi_num);
}
- ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
@@ -1483,41 +1548,25 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
*/
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
- struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
- list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return -ENOMEM;
-
- list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- list->fltr_info.vsi_handle = vsi->idx;
- list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- list->fltr_info.l_data.vlan.vlan_id = vid;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src_id = ICE_SRC_ID_VSI;
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- status = ice_remove_vlan(&pf->hw, &tmp_add_list);
+ status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!status) {
vsi->num_vlan--;
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
- vid, vsi->vsi_num, status);
+ dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
+ vid, vsi->vsi_num, ice_stat_str(status));
} else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
- vid, vsi->vsi_num, status);
+ dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
+ vid, vsi->vsi_num, ice_stat_str(status));
err = -EIO;
}
- ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
@@ -1547,6 +1596,32 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
}
/**
+ * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
+ * @hw: HW pointer
+ * @pf_q: index of the Rx queue in the PF's queue space
+ * @rxdid: flexible descriptor RXDID
+ * @prio: priority for the RXDID for this queue
+ */
+void
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
+{
+ int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+
+ /* clear any previous values */
+ regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
+ QRXFLXP_CNTXT_RXDID_PRIO_M |
+ QRXFLXP_CNTXT_TS_M);
+
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+}
+
+/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
@@ -1671,7 +1746,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- u32 txq = 0, rxq = 0;
+ u16 txq = 0, rxq = 0;
int i, q;
for (i = 0; i < vsi->num_q_vectors; i++) {
@@ -1737,8 +1812,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -1761,6 +1837,12 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
enum ice_status status;
int ret = 0;
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.pvid)
+ return 0;
+
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -1783,8 +1865,9 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
- ena, status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
+ ena, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -1922,9 +2005,10 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
- ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- pf->hw.adminq.sq_last_status);
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
+ ice_stat_str(status),
+ ice_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -1991,47 +2075,6 @@ clear_reg_idx:
}
/**
- * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
- * @vsi: the VSI being configured
- * @add_rule: boolean value to add or remove ethertype filter rule
- */
-static void
-ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
-{
- struct ice_fltr_list_entry *list;
- struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
- struct device *dev;
-
- dev = ice_pf_to_dev(pf);
- list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return;
-
- list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
- list->fltr_info.fltr_act = ICE_DROP_PACKET;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src_id = ICE_SRC_ID_VSI;
- list->fltr_info.vsi_handle = vsi->idx;
- list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
-
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- if (add_rule)
- status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
- else
- status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
-
- if (status)
- dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
- vsi->vsi_num, status);
-
- ice_free_fltr_list(dev, &tmp_add_list);
-}
-
-/**
* ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
@@ -2039,45 +2082,25 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
*/
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
- struct ice_fltr_list_entry *list;
+ enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
+ enum ice_sw_fwd_act_type act);
struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
dev = ice_pf_to_dev(pf);
- list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return;
+ eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
- list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
- list->fltr_info.vsi_handle = vsi->idx;
- list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
-
- if (tx) {
- list->fltr_info.fltr_act = ICE_DROP_PACKET;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src_id = ICE_SRC_ID_VSI;
- } else {
- list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- list->fltr_info.flag = ICE_FLTR_RX;
- list->fltr_info.src_id = ICE_SRC_ID_LPORT;
- }
-
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- if (create)
- status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
+ if (tx)
+ status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
else
- status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
+ status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI);
if (status)
- dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
+ dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, status);
-
- ice_free_fltr_list(dev, &tmp_add_list);
+ vsi->vsi_num, ice_stat_str(status));
}
/**
@@ -2122,10 +2145,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
+ ice_alloc_fd_res(vsi);
+
if (ice_vsi_get_qs(vsi)) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
vsi->idx);
- goto unroll_get_qs;
+ goto unroll_vsi_alloc;
}
/* set RSS capabilities */
@@ -2140,6 +2165,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_get_qs;
switch (vsi->type) {
+ case ICE_VSI_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2164,20 +2190,23 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* so this handles those cases (i.e. adding the PF to a bridge
* without the 8021q module loaded).
*/
- ret = ice_vsi_add_vlan(vsi, 0);
+ ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
if (ret)
goto unroll_clear_rings;
ice_vsi_map_rings_to_vectors(vsi);
- /* Do not exit if configuring RSS had an issue, at least
- * receive traffic on first queue. Hence no need to capture
- * return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- ice_vsi_cfg_rss_lut_key(vsi);
- ice_vsi_set_rss_flow_fld(vsi);
- }
+ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
+ if (vsi->type != ICE_VSI_CTRL)
+ /* Do not exit if configuring RSS had an issue, at
+ * least receive traffic on first queue. Hence no
+ * need to capture return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ ice_init_arfs(vsi);
break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
@@ -2223,8 +2252,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, status);
+ dev_err(dev, "VSI %d failed lan queue config, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
goto unroll_vector_base;
}
@@ -2239,9 +2268,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
*/
if (!ice_is_safe_mode(pf))
if (vsi->type == ICE_VSI_PF) {
- ice_vsi_add_rem_eth_mac(vsi, true);
-
- /* Tx LLDP packets */
+ ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, true);
}
@@ -2259,6 +2287,7 @@ unroll_vsi_init:
ice_vsi_delete(vsi);
unroll_get_qs:
ice_vsi_put_qs(vsi);
+unroll_vsi_alloc:
ice_vsi_clear(vsi);
return NULL;
@@ -2411,6 +2440,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
if (!locked)
rtnl_unlock();
}
+ } else if (vsi->type == ICE_VSI_CTRL) {
+ err = ice_vsi_open_ctrl(vsi);
}
return err;
@@ -2440,6 +2471,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
+ } else if (vsi->type == ICE_VSI_CTRL) {
+ ice_vsi_close(vsi);
}
}
@@ -2558,7 +2591,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
if (!ice_is_safe_mode(pf)) {
if (vsi->type == ICE_VSI_PF) {
- ice_vsi_add_rem_eth_mac(vsi, false);
+ ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, false);
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
@@ -2568,7 +2602,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -2673,15 +2707,13 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
&coalesce[i]);
- for (; i < vsi->num_q_vectors; i++) {
- struct ice_coalesce_stored coalesce_dflt = {
- .itr_tx = ICE_DFLT_TX_ITR,
- .itr_rx = ICE_DFLT_RX_ITR,
- .intrl = 0
- };
+ /* number of q_vectors increased, so assume coalesce settings were
+ * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
+ * the previous settings from q_vector 0 for all of the new q_vectors
+ */
+ for (; i < vsi->num_q_vectors; i++)
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
- &coalesce_dflt);
- }
+ &coalesce[0]);
}
/**
@@ -2746,6 +2778,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vsi;
ice_vsi_get_qs(vsi);
+
+ ice_alloc_fd_res(vsi);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
@@ -2754,6 +2788,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vsi;
switch (vsi->type) {
+ case ICE_VSI_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2773,17 +2808,19 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) {
- vsi->num_xdp_txq = vsi->alloc_txq;
+ vsi->num_xdp_txq = vsi->alloc_rxq;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto err_vectors;
}
- /* Do not exit if configuring RSS had an issue, at least
- * receive traffic on first queue. Hence no need to capture
- * return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_vsi_cfg_rss_lut_key(vsi);
+ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
+ if (vsi->type != ICE_VSI_CTRL)
+ /* Do not exit if configuring RSS had an issue, at
+ * least receive traffic on first queue. Hence no
+ * need to capture return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_vsi_cfg_rss_lut_key(vsi);
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -2814,8 +2851,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, status);
+ dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
if (init_vsi) {
ret = -EIO;
goto err_vectors;
@@ -2924,8 +2961,8 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs);
if (status) {
- dev_err(dev, "VSI %d failed TC config, error %d\n",
- vsi->vsi_num, status);
+ dev_err(dev, "VSI %d failed TC config, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
ret = -EIO;
goto out;
}
@@ -2985,33 +3022,27 @@ void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
}
/**
- * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
- * @vsi: the VSI being configured MAC filter
- * @macaddr: the MAC address to be added.
- * @set: Add or delete a MAC filter
- *
- * Adds or removes MAC address filter entry for VF VSI
+ * ice_status_to_errno - convert from enum ice_status to Linux errno
+ * @err: ice_status value to convert
*/
-enum ice_status
-ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
+int ice_status_to_errno(enum ice_status err)
{
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
-
- /* Update MAC filter list to be added or removed for a VSI */
- if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
- status = ICE_ERR_NO_MEMORY;
- goto cfg_mac_fltr_exit;
+ switch (err) {
+ case ICE_SUCCESS:
+ return 0;
+ case ICE_ERR_DOES_NOT_EXIST:
+ return -ENOENT;
+ case ICE_ERR_OUT_OF_RANGE:
+ return -ENOTTY;
+ case ICE_ERR_PARAM:
+ return -EINVAL;
+ case ICE_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case ICE_ERR_MAX_LIMIT:
+ return -EAGAIN;
+ default:
+ return -EINVAL;
}
-
- if (set)
- status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
- else
- status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
-
-cfg_mac_fltr_exit:
- ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
- return status;
}
/**
@@ -3079,8 +3110,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
- vsi->vsi_num, status);
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
return -EIO;
}
@@ -3118,8 +3149,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
- dflt_vsi->vsi_num, status);
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
+ dflt_vsi->vsi_num, ice_stat_str(status));
return -EIO;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 04ca00799364..d80e6afa4511 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -8,12 +8,6 @@
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
-int
-ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr);
-
-void ice_free_fltr_list(struct device *dev, struct list_head *h);
-
void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
@@ -22,7 +16,8 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
+int
+ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
@@ -79,6 +74,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
bool ice_is_reset_in_progress(unsigned long *state);
+void
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio);
+
void ice_vsi_put_qs(struct ice_vsi *vsi);
void ice_vsi_dis_irq(struct ice_vsi *vsi);
@@ -97,6 +95,8 @@ void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
+int ice_status_to_errno(enum ice_status err);
+
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5b190c257124..082825e3cb39 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -8,6 +8,7 @@
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
@@ -133,38 +134,24 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
static int ice_init_mac_fltr(struct ice_pf *pf)
{
enum ice_status status;
- u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
+ u8 *perm_addr;
vsi = ice_get_main_vsi(pf);
if (!vsi)
return -EINVAL;
- /* To add a MAC filter, first add the MAC to a list and then
- * pass the list to ice_add_mac.
- */
-
- /* Add a unicast MAC filter so the VSI can get its packets */
- status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
- if (status)
- goto unregister;
-
- /* VSI needs to receive broadcast traffic, so add the broadcast
- * MAC address to the list as well.
- */
- eth_broadcast_addr(broadcast);
- status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
- if (status)
- goto unregister;
+ perm_addr = vsi->port_info->mac.perm_addr;
+ status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
+ if (!status)
+ return 0;
- return 0;
-unregister:
/* We aren't useful with no MAC filters, so unregister if we
* had an error
*/
- if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n",
- status);
+ if (vsi->netdev->reg_state == NETREG_REGISTERED) {
+ dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
+ ice_stat_str(status));
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
@@ -188,7 +175,8 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
+ if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
+ ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
@@ -209,7 +197,8 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
+ if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
+ ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
@@ -307,8 +296,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Remove MAC addresses in the unsync list */
- status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
- ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
+ status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
+ ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
if (status) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
@@ -319,8 +308,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Add MAC addresses in the sync list */
- status = ice_add_mac(hw, &vsi->tmp_sync_list);
- ice_free_fltr_list(dev, &vsi->tmp_sync_list);
+ status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
+ ice_fltr_free_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
@@ -357,7 +346,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
goto out_promisc;
}
- } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ } else {
+ /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
if (vsi->vlan_ena)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
@@ -462,7 +452,7 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- int i;
+ unsigned int i;
/* already prepared for reset */
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
@@ -1017,8 +1007,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ret == ICE_ERR_AQ_NO_WORK)
break;
if (ret) {
- dev_err(dev, "%s Receive Queue event error %d\n", qtype,
- ret);
+ dev_err(dev, "%s Receive Queue event error %s\n", qtype,
+ ice_stat_str(ret));
break;
}
@@ -1123,7 +1113,7 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
*
* If not already scheduled, this puts the task into the work queue.
*/
-static void ice_service_task_schedule(struct ice_pf *pf)
+void ice_service_task_schedule(struct ice_pf *pf)
{
if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
!test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
@@ -1198,8 +1188,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
+ unsigned int i;
u32 reg;
- int i;
if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
/* Since the VF MDD event logging is rate limited, check if
@@ -1332,8 +1322,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* PF can be configured to reset the VF through ethtool
* private flag mdd-auto-reset-vf.
*/
- if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ ice_print_vf_rx_mdd_event(vf);
ice_reset_vf(&pf->vf[i], false);
+ }
}
}
@@ -1493,7 +1488,7 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
-
+ ice_sync_arfs_fltrs(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
@@ -1652,9 +1647,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
}
/* register for affinity change notifications */
- q_vector->affinity_notify.notify = ice_irq_affinity_notify;
- q_vector->affinity_notify.release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
+ struct irq_affinity_notify *affinity_notify;
+
+ affinity_notify = &q_vector->affinity_notify;
+ affinity_notify->notify = ice_irq_affinity_notify;
+ affinity_notify->release = ice_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, affinity_notify);
+ }
/* assign the mask for this irq */
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
@@ -1666,8 +1666,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector) {
vector--;
- irq_num = pf->msix_entries[base + vector].vector,
- irq_set_affinity_notifier(irq_num, NULL);
+ irq_num = pf->msix_entries[base + vector].vector;
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+ irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
@@ -1809,8 +1810,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n",
- status);
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
+ ice_stat_str(status));
goto clear_xdp_rings;
}
ice_vsi_assign_bpf_prog(vsi, prog);
@@ -1898,6 +1899,9 @@ free_qmap:
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
+ /* change number of XDP Tx queues to 0 */
+ vsi->num_xdp_txq = 0;
+
return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
}
@@ -1931,7 +1935,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
if (!ice_is_xdp_ena_vsi(vsi) && prog) {
- vsi->num_xdp_txq = vsi->alloc_txq;
+ vsi->num_xdp_txq = vsi->alloc_rxq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
@@ -2137,10 +2141,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
ret = IRQ_HANDLED;
- if (!test_bit(__ICE_DOWN, pf->state)) {
- ice_service_task_schedule(pf);
- ice_irq_dynamic_ena(hw, NULL, NULL);
- }
+ ice_service_task_schedule(pf);
+ ice_irq_dynamic_ena(hw, NULL, NULL);
return ret;
}
@@ -2247,7 +2249,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
return oicr_idx;
pf->num_avail_sw_msix -= 1;
- pf->oicr_idx = oicr_idx;
+ pf->oicr_idx = (u16)oicr_idx;
err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
@@ -2331,6 +2333,7 @@ static void ice_set_netdev_features(struct net_device *netdev)
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA |
+ NETIF_F_NTUPLE |
NETIF_F_RXHASH;
csumo_features = NETIF_F_RXCSUM |
@@ -2342,13 +2345,27 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- tso_features = NETIF_F_TSO |
+ tso_features = NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_L4;
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
vlano_features | tso_features;
+ /* add support for HW_CSUM on packets with MPLS header */
+ netdev->mpls_features = NETIF_F_HW_CSUM;
+
/* enable features */
netdev->features |= netdev->hw_features;
/* encap and VLAN devices inherit default, csumo and tso features */
@@ -2411,7 +2428,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
err = register_netdev(vsi->netdev);
if (err)
- goto err_destroy_devlink_port;
+ goto err_free_netdev;
devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
@@ -2422,9 +2439,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
return 0;
+err_free_netdev:
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
err_destroy_devlink_port:
ice_devlink_destroy_port(pf);
-
return err;
}
@@ -2457,6 +2476,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
+ * ice_ctrl_vsi_setup - Set up a control VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ *
+ * Returns pointer to the successfully allocated VSI software struct
+ * on success, otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
+}
+
+/**
* ice_lb_vsi_setup - Set up a loopback VSI
* @pf: board private structure
* @pi: pointer to the port_info instance
@@ -2509,7 +2542,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
- ret = ice_vsi_add_vlan(vsi, vid);
+ ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!ret) {
vsi->vlan_ena = true;
set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
@@ -2594,12 +2627,22 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
+ status = ice_set_cpu_rx_rmap(vsi);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
+ vsi->vsi_num, status);
+ status = -EINVAL;
+ goto unroll_napi_add;
+ }
status = ice_init_mac_fltr(pf);
if (status)
- goto unroll_napi_add;
+ goto free_cpu_rx_map;
return status;
+free_cpu_rx_map:
+ ice_free_cpu_rx_rmap(vsi);
+
unroll_napi_add:
if (vsi) {
ice_napi_del(vsi);
@@ -2630,7 +2673,8 @@ unroll_vsi_setup:
static u16
ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
{
- u16 count = 0, bit;
+ unsigned long bit;
+ u16 count = 0;
mutex_lock(lock);
for_each_clear_bit(bit, pf_qmap, size)
@@ -2703,6 +2747,23 @@ static void ice_set_pf_caps(struct ice_pf *pf)
if (func_caps->common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
+ clear_bit(ICE_FLAG_FD_ENA, pf->flags);
+ if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
+ u16 unused;
+
+ /* ctrl_vsi_idx will be set to a valid value when flow director
+ * is setup by ice_init_fdir
+ */
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ set_bit(ICE_FLAG_FD_ENA, pf->flags);
+ /* force guaranteed filter pool for PF */
+ ice_alloc_fd_guar_item(&pf->hw, &unused,
+ func_caps->fd_fltr_guar);
+ /* force shared filter pool for PF */
+ ice_alloc_fd_shrd_item(&pf->hw, &unused,
+ func_caps->fd_fltr_best_effort);
+ }
+
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
@@ -2769,6 +2830,15 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_budget += needed;
v_left -= needed;
+ /* reserve one vector for flow director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ needed = ICE_FDIR_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ v_budget += needed;
+ v_left -= needed;
+ }
+
pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
@@ -2793,8 +2863,10 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
-/* 2 vectors for LAN (traffic + OICR) */
+/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
#define ICE_MIN_LAN_VECS 2
+#define ICE_MIN_RDMA_VECS 2
+#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
if (v_actual < ICE_MIN_LAN_VECS) {
/* error if we can't get minimum vectors */
@@ -2869,8 +2941,8 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/* populate SW interrupts pool with number of OS granted IRQs. */
- pf->num_avail_sw_msix = vectors;
- pf->irq_tracker->num_entries = vectors;
+ pf->num_avail_sw_msix = (u16)vectors;
+ pf->irq_tracker->num_entries = (u16)vectors;
pf->irq_tracker->end = pf->irq_tracker->num_entries;
return 0;
@@ -2902,9 +2974,9 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
}
if (new_tx)
- vsi->req_txq = new_tx;
+ vsi->req_txq = (u16)new_tx;
if (new_rx)
- vsi->req_rxq = new_rx;
+ vsi->req_rxq = (u16)new_rx;
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
@@ -2985,6 +3057,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
*status = ICE_ERR_NOT_SUPPORTED;
}
break;
+ case ICE_ERR_FW_DDP_MISMATCH:
+ dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
+ break;
case ICE_ERR_BUF_TOO_SHORT:
case ICE_ERR_CFG:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
@@ -3013,6 +3088,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
case ICE_AQ_RC_EBADMAN:
case ICE_AQ_RC_EBADBUF:
dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ /* poll for reset to complete */
+ if (ice_check_reset(hw))
+ dev_err(dev, "Error resetting device. Please reload the driver\n");
return;
default:
break;
@@ -3100,6 +3178,53 @@ static enum ice_status ice_send_version(struct ice_pf *pf)
}
/**
+ * ice_init_fdir - Initialize flow director VSI and configuration
+ * @pf: pointer to the PF instance
+ *
+ * returns 0 on success, negative on error
+ */
+static int ice_init_fdir(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *ctrl_vsi;
+ int err;
+
+ /* Side Band Flow Director needs to have a control VSI.
+ * Allocate it and store it in the PF.
+ */
+ ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
+ if (!ctrl_vsi) {
+ dev_dbg(dev, "could not create control VSI\n");
+ return -ENOMEM;
+ }
+
+ err = ice_vsi_open_ctrl(ctrl_vsi);
+ if (err) {
+ dev_dbg(dev, "could not open control VSI\n");
+ goto err_vsi_open;
+ }
+
+ mutex_init(&pf->hw.fdir_fltr_lock);
+
+ err = ice_fdir_create_dflt_rules(pf);
+ if (err)
+ goto err_fdir_rule;
+
+ return 0;
+
+err_fdir_rule:
+ ice_fdir_release_flows(&pf->hw);
+ ice_vsi_close(ctrl_vsi);
+err_vsi_open:
+ ice_vsi_release(ctrl_vsi);
+ if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[pf->ctrl_vsi_idx] = NULL;
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+ return err;
+}
+
+/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance
*/
@@ -3123,7 +3248,7 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
if (!opt_fw_filename)
return NULL;
- snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg",
+ snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename;
@@ -3295,12 +3420,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_unroll;
+ goto err_init_vsi_unroll;
}
- /* Driver is mostly up */
- clear_bit(__ICE_DOWN, pf->state);
-
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
@@ -3356,12 +3478,16 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_verify_cacheline_size(pf);
- /* If no DDP driven features have to be setup, return here */
+ /* If no DDP driven features have to be setup, we are done with probe */
if (ice_is_safe_mode(pf))
- return 0;
+ goto probe_done;
/* initialize DDP driven features */
+ /* Note: Flow director init failure is non-fatal to load */
+ if (ice_init_fdir(pf))
+ dev_err(dev, "could not initialize flow director\n");
+
/* Note: DCB init failure is non-fatal to load */
if (ice_init_pf_dcb(pf, false)) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
@@ -3373,6 +3499,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* print PCI link speed and width */
pcie_print_link_status(pf->pdev);
+probe_done:
+ /* ready to go, so clear down state bit */
+ clear_bit(__ICE_DOWN, pf->state);
return 0;
err_alloc_sw_unroll:
@@ -3384,6 +3513,7 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
+err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
@@ -3421,6 +3551,9 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+ if (!ice_is_safe_mode(pf))
+ ice_remove_arfs(pf);
ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
@@ -3705,25 +3838,24 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
- /* When we change the MAC address we also have to change the MAC address
- * based filter rules that were created previously for the old MAC
- * address. So first, we remove the old filter rule using ice_remove_mac
- * and then create a new filter rule using ice_add_mac via
- * ice_vsi_cfg_mac_fltr function call for both add and/or remove
- * filters.
- */
- status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
- if (status) {
+ /* Clean up old MAC filter. Not an error if old filter doesn't exist */
+ status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
+ if (status && status != ICE_ERR_DOES_NOT_EXIST) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
- status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
- if (status) {
- err = -EADDRNOTAVAIL;
- goto err_update_filters;
+ /* Add filter for new MAC. If filter exists, just return success */
+ status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
+ return 0;
}
+ /* error if the new filter addition failed */
+ if (status)
+ err = -EADDRNOTAVAIL;
+
err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
@@ -3740,8 +3872,8 @@ err_update_filters:
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
- mac, status);
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
+ mac, ice_stat_str(status));
}
return 0;
}
@@ -3805,8 +3937,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
if (status) {
- netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
- status);
+ netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
+ ice_stat_str(status));
return -EIO;
}
@@ -3938,6 +4070,16 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
ret = ice_cfg_vlan_pruning(vsi, false, false);
+ if ((features & NETIF_F_NTUPLE) &&
+ !(netdev->features & NETIF_F_NTUPLE)) {
+ ice_vsi_manage_fdir(vsi, true);
+ ice_init_arfs(vsi);
+ } else if (!(features & NETIF_F_NTUPLE) &&
+ (netdev->features & NETIF_F_NTUPLE)) {
+ ice_vsi_manage_fdir(vsi, false);
+ ice_clear_arfs(vsi);
+ }
+
return ret;
}
@@ -4084,6 +4226,33 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
}
/**
+ * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
+ * @vsi: the VSI to be updated
+ * @rings: rings to work on
+ * @count: number of rings
+ */
+static void
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
+ u16 count)
+{
+ struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+ u16 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_ring *ring;
+ u64 pkts, bytes;
+
+ ring = READ_ONCE(rings[i]);
+ ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ vsi_stats->tx_packets += pkts;
+ vsi_stats->tx_bytes += bytes;
+ vsi->tx_restart += ring->tx_stats.restart_q;
+ vsi->tx_busy += ring->tx_stats.tx_busy;
+ vsi->tx_linearize += ring->tx_stats.tx_linearize;
+ }
+}
+
+/**
* ice_update_vsi_ring_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
@@ -4110,15 +4279,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_lock();
/* update Tx rings counters */
- ice_for_each_txq(vsi, i) {
- ring = READ_ONCE(vsi->tx_rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
- vsi_stats->tx_packets += pkts;
- vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->tx_stats.restart_q;
- vsi->tx_busy += ring->tx_stats.tx_busy;
- vsi->tx_linearize += ring->tx_stats.tx_linearize;
- }
+ ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
@@ -4130,6 +4291,11 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
}
+ /* update XDP Tx rings counters */
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+ vsi->num_xdp_txq);
+
rcu_read_unlock();
}
@@ -4162,7 +4328,13 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF) {
cur_ns->rx_crc_errors = pf->stats.crc_errors;
cur_ns->rx_errors = pf->stats.crc_errors +
- pf->stats.illegal_bytes;
+ pf->stats.illegal_bytes +
+ pf->stats.rx_len_errors +
+ pf->stats.rx_undersize +
+ pf->hw_csum_rx_error +
+ pf->stats.rx_jabber +
+ pf->stats.rx_fragments +
+ pf->stats.rx_oversize;
cur_ns->rx_length_errors = pf->stats.rx_len_errors;
/* record drops from the port level */
cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
@@ -4177,6 +4349,7 @@ void ice_update_pf_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
+ u16 fd_ctr_base;
u8 port;
port = hw->port_info->lport;
@@ -4265,6 +4438,12 @@ void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big);
+ fd_ctr_base = hw->fd_ctr_base;
+
+ ice_stat_update40(hw,
+ GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
+ pf->stat_prev_loaded, &prev_ps->fd_sb_match,
+ &cur_ps->fd_sb_match);
ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
@@ -4308,6 +4487,8 @@ void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
&prev_ps->rx_jabber, &cur_ps->rx_jabber);
+ cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
+
pf->stat_prev_loaded = true;
}
@@ -4493,6 +4674,62 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_open_ctrl - open control VSI for use
+ * @vsi: the VSI to open
+ *
+ * Initialization of the Control VSI
+ *
+ * Returns 0 on success, negative value on error
+ */
+int ice_vsi_open_ctrl(struct ice_vsi *vsi)
+{
+ char int_name[ICE_INT_NAME_STR_LEN];
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ /* allocate descriptors */
+ err = ice_vsi_setup_tx_rings(vsi);
+ if (err)
+ goto err_setup_tx;
+
+ err = ice_vsi_setup_rx_rings(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ err = ice_vsi_cfg(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
+ dev_driver_string(dev), dev_name(dev));
+ err = ice_vsi_req_irq_msix(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ ice_vsi_cfg_msix(vsi);
+
+ err = ice_vsi_start_all_rx_rings(vsi);
+ if (err)
+ goto err_up_complete;
+
+ clear_bit(__ICE_DOWN, vsi->state);
+ ice_vsi_ena_irq(vsi);
+
+ return 0;
+
+err_up_complete:
+ ice_down(vsi);
+err_setup_rx:
+ ice_vsi_free_rx_rings(vsi);
+err_setup_tx:
+ ice_vsi_free_tx_rings(vsi);
+
+ return err;
+}
+
+/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
@@ -4604,8 +4841,9 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
- dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n",
- status, vsi->idx, ice_vsi_type_str(type));
+ dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
+ ice_stat_str(status), vsi->idx,
+ ice_vsi_type_str(type));
return -EIO;
}
@@ -4659,6 +4897,11 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
* ice_rebuild - rebuild after reset
* @pf: PF to rebuild
* @reset_type: type of reset
+ *
+ * Do not rebuild VF VSI in this flow because that is already handled via
+ * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
+ * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
+ * to reset/rebuild all the VF VSI twice.
*/
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
@@ -4674,7 +4917,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_init_all_ctrlq(hw);
if (ret) {
- dev_err(dev, "control queues init failed %d\n", ret);
+ dev_err(dev, "control queues init failed %s\n",
+ ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4690,7 +4934,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_clear_pf_cfg(hw);
if (ret) {
- dev_err(dev, "clear PF configuration failed %d\n", ret);
+ dev_err(dev, "clear PF configuration failed %s\n",
+ ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4704,7 +4949,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_get_caps(hw);
if (ret) {
- dev_err(dev, "ice_get_caps failed %d\n", ret);
+ dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
+ goto err_init_ctrlq;
+ }
+
+ ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (ret) {
+ dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4723,6 +4974,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_sched_init_port;
}
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
+ if (!rd32(hw, PFQF_FD_SIZE)) {
+ u16 unused, guar, b_effort;
+
+ guar = hw->func_caps.fd_fltr_guar;
+ b_effort = hw->func_caps.fd_fltr_best_effort;
+
+ /* force guaranteed filter pool for PF */
+ ice_alloc_fd_guar_item(hw, &unused, guar);
+ /* force shared filter pool for PF */
+ ice_alloc_fd_shrd_item(hw, &unused, b_effort);
+ }
+ }
+
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
@@ -4733,12 +4999,22 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
- err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF);
+ /* If Flow Director is active */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
if (err) {
- dev_err(dev, "VF VSI rebuild failed: %d\n", err);
+ dev_err(dev, "control VSI rebuild failed: %d\n", err);
goto err_vsi_rebuild;
}
+
+ /* replay HW Flow Director recipes */
+ if (hw->fdir_prof)
+ ice_fdir_replay_flows(hw);
+
+ /* replay Flow Director filters */
+ ice_fdir_replay_fltrs(pf);
+
+ ice_rebuild_arfs(pf);
}
ice_update_pf_netdev_link(pf);
@@ -4746,8 +5022,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* tell the firmware we are up */
ret = ice_send_version(pf);
if (ret) {
- dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
- ret);
+ dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
+ ice_stat_str(ret));
goto err_vsi_rebuild;
}
@@ -4795,7 +5071,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_pf *pf = vsi->back;
u8 count = 0;
- if (new_mtu == netdev->mtu) {
+ if (new_mtu == (int)netdev->mtu) {
netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
return 0;
}
@@ -4810,11 +5086,11 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
- if (new_mtu < netdev->min_mtu) {
+ if (new_mtu < (int)netdev->min_mtu) {
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
- } else if (new_mtu > netdev->max_mtu) {
+ } else if (new_mtu > (int)netdev->max_mtu) {
netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
@@ -4835,7 +5111,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- netdev->mtu = new_mtu;
+ netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
@@ -4859,6 +5135,118 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * ice_aq_str - convert AQ err code to a string
+ * @aq_err: the AQ error code to convert
+ */
+const char *ice_aq_str(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_OK:
+ return "OK";
+ case ICE_AQ_RC_EPERM:
+ return "ICE_AQ_RC_EPERM";
+ case ICE_AQ_RC_ENOENT:
+ return "ICE_AQ_RC_ENOENT";
+ case ICE_AQ_RC_ENOMEM:
+ return "ICE_AQ_RC_ENOMEM";
+ case ICE_AQ_RC_EBUSY:
+ return "ICE_AQ_RC_EBUSY";
+ case ICE_AQ_RC_EEXIST:
+ return "ICE_AQ_RC_EEXIST";
+ case ICE_AQ_RC_EINVAL:
+ return "ICE_AQ_RC_EINVAL";
+ case ICE_AQ_RC_ENOSPC:
+ return "ICE_AQ_RC_ENOSPC";
+ case ICE_AQ_RC_ENOSYS:
+ return "ICE_AQ_RC_ENOSYS";
+ case ICE_AQ_RC_EMODE:
+ return "ICE_AQ_RC_EMODE";
+ case ICE_AQ_RC_ENOSEC:
+ return "ICE_AQ_RC_ENOSEC";
+ case ICE_AQ_RC_EBADSIG:
+ return "ICE_AQ_RC_EBADSIG";
+ case ICE_AQ_RC_ESVN:
+ return "ICE_AQ_RC_ESVN";
+ case ICE_AQ_RC_EBADMAN:
+ return "ICE_AQ_RC_EBADMAN";
+ case ICE_AQ_RC_EBADBUF:
+ return "ICE_AQ_RC_EBADBUF";
+ }
+
+ return "ICE_AQ_RC_UNKNOWN";
+}
+
+/**
+ * ice_stat_str - convert status err code to a string
+ * @stat_err: the status error code to convert
+ */
+const char *ice_stat_str(enum ice_status stat_err)
+{
+ switch (stat_err) {
+ case ICE_SUCCESS:
+ return "OK";
+ case ICE_ERR_PARAM:
+ return "ICE_ERR_PARAM";
+ case ICE_ERR_NOT_IMPL:
+ return "ICE_ERR_NOT_IMPL";
+ case ICE_ERR_NOT_READY:
+ return "ICE_ERR_NOT_READY";
+ case ICE_ERR_NOT_SUPPORTED:
+ return "ICE_ERR_NOT_SUPPORTED";
+ case ICE_ERR_BAD_PTR:
+ return "ICE_ERR_BAD_PTR";
+ case ICE_ERR_INVAL_SIZE:
+ return "ICE_ERR_INVAL_SIZE";
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ return "ICE_ERR_DEVICE_NOT_SUPPORTED";
+ case ICE_ERR_RESET_FAILED:
+ return "ICE_ERR_RESET_FAILED";
+ case ICE_ERR_FW_API_VER:
+ return "ICE_ERR_FW_API_VER";
+ case ICE_ERR_NO_MEMORY:
+ return "ICE_ERR_NO_MEMORY";
+ case ICE_ERR_CFG:
+ return "ICE_ERR_CFG";
+ case ICE_ERR_OUT_OF_RANGE:
+ return "ICE_ERR_OUT_OF_RANGE";
+ case ICE_ERR_ALREADY_EXISTS:
+ return "ICE_ERR_ALREADY_EXISTS";
+ case ICE_ERR_NVM_CHECKSUM:
+ return "ICE_ERR_NVM_CHECKSUM";
+ case ICE_ERR_BUF_TOO_SHORT:
+ return "ICE_ERR_BUF_TOO_SHORT";
+ case ICE_ERR_NVM_BLANK_MODE:
+ return "ICE_ERR_NVM_BLANK_MODE";
+ case ICE_ERR_IN_USE:
+ return "ICE_ERR_IN_USE";
+ case ICE_ERR_MAX_LIMIT:
+ return "ICE_ERR_MAX_LIMIT";
+ case ICE_ERR_RESET_ONGOING:
+ return "ICE_ERR_RESET_ONGOING";
+ case ICE_ERR_HW_TABLE:
+ return "ICE_ERR_HW_TABLE";
+ case ICE_ERR_DOES_NOT_EXIST:
+ return "ICE_ERR_DOES_NOT_EXIST";
+ case ICE_ERR_FW_DDP_MISMATCH:
+ return "ICE_ERR_FW_DDP_MISMATCH";
+ case ICE_ERR_AQ_ERROR:
+ return "ICE_ERR_AQ_ERROR";
+ case ICE_ERR_AQ_TIMEOUT:
+ return "ICE_ERR_AQ_TIMEOUT";
+ case ICE_ERR_AQ_FULL:
+ return "ICE_ERR_AQ_FULL";
+ case ICE_ERR_AQ_NO_WORK:
+ return "ICE_ERR_AQ_NO_WORK";
+ case ICE_ERR_AQ_EMPTY:
+ return "ICE_ERR_AQ_EMPTY";
+ case ICE_ERR_AQ_FW_CRITICAL:
+ return "ICE_ERR_AQ_FW_CRITICAL";
+ }
+
+ return "ICE_ERR_UNKNOWN";
+}
+
+/**
* ice_set_rss - Set RSS keys and lut
* @vsi: Pointer to VSI structure
* @seed: RSS hash seed
@@ -4882,8 +5270,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4892,8 +5281,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4924,8 +5314,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4934,8 +5325,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -5002,8 +5394,9 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
- bmode, status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
+ bmode, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -5072,8 +5465,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
- netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
- mode, status, hw->adminq.sq_last_status);
+ netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
+ mode, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
return -EIO;
@@ -5100,6 +5494,16 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
pf->tx_timeout_count++;
+ /* Check if PFC is enabled for the TC to which the queue belongs
+ * to. If yes then Tx timeout is not caused by a hung queue, no
+ * need to reset and rebuild
+ */
+ if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
+ dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
+ txqueue);
+ return;
+ }
+
/* now that we have an index, find the tx_ring struct */
for (i = 0; i < vsi->num_txq; i++)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
@@ -5158,6 +5562,70 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
+ * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
+ */
+static void
+ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ u16 port = ntohs(ti->port);
+ enum ice_status status;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ tnl_type = TNL_VXLAN;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ tnl_type = TNL_GENEVE;
+ break;
+ default:
+ netdev_err(netdev, "Unknown tunnel type\n");
+ return;
+ }
+
+ status = ice_create_tunnel(&pf->hw, tnl_type, port);
+ if (status == ICE_ERR_OUT_OF_RANGE)
+ netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
+ port);
+ else if (status)
+ netdev_err(netdev, "Error adding UDP tunnel - %s\n",
+ ice_stat_str(status));
+}
+
+/**
+ * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
+ */
+static void
+ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 port = ntohs(ti->port);
+ enum ice_status status;
+ bool retval;
+
+ retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
+ if (!retval) {
+ netdev_info(netdev, "port %d not found in UDP tunnels list\n",
+ port);
+ return;
+ }
+
+ status = ice_destroy_tunnel(&pf->hw, port, false);
+ if (status)
+ netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
+ port);
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -5173,14 +5641,20 @@ int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
int err;
- if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
+ if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
return -EIO;
}
+ if (test_bit(__ICE_DOWN, pf->state)) {
+ netdev_err(netdev, "device is not ready yet\n");
+ return -EBUSY;
+ }
+
netif_carrier_off(netdev);
pi = vsi->port_info;
@@ -5213,6 +5687,10 @@ int ice_open(struct net_device *netdev)
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
+
+ /* Update existing tunnels information */
+ udp_tunnel_get_rx_info(netdev);
+
return err;
}
@@ -5263,21 +5741,21 @@ ice_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
len = skb_network_header(skb) - skb->data;
- if (len & ~(ICE_TXD_MACLEN_MAX))
+ if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_transport_header(skb) - skb_network_header(skb);
- if (len & ~(ICE_TXD_IPLEN_MAX))
+ if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len & ~(ICE_TXD_L4LEN_MAX))
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_inner_transport_header(skb) -
skb_inner_network_header(skb);
- if (len & ~(ICE_TXD_IPLEN_MAX))
+ if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
@@ -5322,8 +5800,13 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bridge_setlink = ice_bridge_setlink,
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = ice_rx_flow_steer,
+#endif
.ndo_tx_timeout = ice_tx_timeout,
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_udp_tunnel_add = ice_udp_tunnel_add,
+ .ndo_udp_tunnel_del = ice_udp_tunnel_del,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 8beb675d676b..b049c1c30c88 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -172,7 +172,8 @@ void ice_release_nvm(struct ice_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+static enum ice_status
+ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
@@ -196,7 +197,7 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-enum ice_status
+static enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
@@ -367,6 +368,87 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
}
/**
+ * ice_get_netlist_ver_info
+ * @hw: pointer to the HW struct
+ *
+ * Get the netlist version information
+ */
+static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
+{
+ struct ice_netlist_ver_info *ver = &hw->netlist_ver;
+ enum ice_status ret;
+ u32 id_blk_start;
+ __le16 raw_data;
+ u16 data, i;
+ u16 *buff;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+ buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
+ GFP_KERNEL);
+ if (!buff) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto exit_no_mem;
+ }
+
+ /* read module length */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+
+ data = le16_to_cpu(raw_data);
+ /* exit if length is = 0 */
+ if (!data)
+ goto exit_error;
+
+ /* read node count */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+ data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
+
+ /* netlist ID block starts from offset 4 + node count * 2 */
+ id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
+
+ /* read the entire netlist ID block */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ id_blk_start * 2,
+ ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
+ false, NULL);
+ if (ret)
+ goto exit_error;
+
+ for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
+ buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
+
+ ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
+ ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
+ ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
+ ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
+
+exit_error:
+ kfree(buff);
+exit_no_mem:
+ ice_release_nvm(hw);
+ return ret;
+}
+
+/**
* ice_discover_flash_size - Discover the available flash size.
* @hw: pointer to the HW struct
*
@@ -515,6 +597,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status;
}
+ /* read the netlist version information */
+ status = ice_get_netlist_ver_info(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 999f273ba6ad..165eda07b93d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -11,10 +11,6 @@ enum ice_status
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
enum ice_status
-ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
- u16 module_type);
-enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 71647566964e..7f4c1ec1eff2 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -12,12 +12,15 @@
*/
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
+ ICE_PROT_MAC_OF_OR_S = 1,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
ICE_PROT_TCP_IL = 49,
+ ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
+ ICE_PROT_GRE_OF = 64,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index eae707ddf8e8..0475134295e4 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1714,8 +1714,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
* This function removes single aggregator VSI info entry from
* aggregator list.
*/
-static void
-ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
@@ -1917,7 +1916,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*/
static enum ice_status
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
- enum ice_rl_type rl_type, u8 bw_alloc)
+ enum ice_rl_type rl_type, u16 bw_alloc)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
@@ -1947,8 +1946,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
*
* Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
@@ -1967,8 +1965,7 @@ ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
@@ -1993,8 +1990,7 @@ ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save or clear shared bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index a9a8bc3aca42..4028c6365172 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -27,6 +27,8 @@ enum ice_status {
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_HW_TABLE = -19,
+ ICE_ERR_FW_DDP_MISMATCH = -20,
+
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
@@ -35,6 +37,7 @@ enum ice_status {
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
+ ICE_ERR_AQ_FW_CRITICAL = -105,
};
#endif /* _ICE_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 51825a203e35..ff7d16ac693e 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -593,8 +593,8 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true;
- res_type = le16_to_cpu(ele->vsi_port_num) >>
- ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
+ res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
+ ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */
@@ -1612,18 +1612,17 @@ exit:
* check for duplicates in this case, removing duplicates from a given
* list should be taken care of in the caller of this function.
*/
-enum ice_status
-ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
+enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
struct list_head *rule_head;
- u16 elem_sent, total_elem_left;
+ u16 total_elem_left, s_rule_size;
struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
u16 num_unicast = 0;
- u16 s_rule_size;
+ u8 elem_sent;
if (!m_list || !hw)
return ICE_ERR_PARAM;
@@ -1707,8 +1706,8 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
total_elem_left -= elem_sent) {
struct ice_aqc_sw_rules_elem *entry = r_iter;
- elem_sent = min(total_elem_left,
- (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
+ elem_sent = min_t(u8, total_elem_left,
+ (ICE_AQ_MAX_BUF_LEN / s_rule_size));
status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
elem_sent, ice_aqc_opc_add_sw_rules,
NULL);
@@ -1914,8 +1913,7 @@ exit:
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status
-ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
+enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr;
@@ -2145,8 +2143,7 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
* the entries passed into m_list were added previously. It will not attempt to
* do a partial remove of entries that were found.
*/
-enum ice_status
-ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
+enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
@@ -2678,6 +2675,81 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_alloc_res_cntr - allocating resource counter
+ * @hw: pointer to the hardware structure
+ * @type: type of resource
+ * @alloc_shared: if set it is shared else dedicated
+ * @num_items: number of entries requested for FD resource type
+ * @counter_id: counter index returned by AQ call
+ */
+enum ice_status
+ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 *counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ /* Allocate resource */
+ buf_len = sizeof(*buf);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->num_elems = cpu_to_le16(num_items);
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | alloc_shared);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (status)
+ goto exit;
+
+ *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
+
+exit:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_free_res_cntr - free resource counter
+ * @hw: pointer to the hardware structure
+ * @type: type of resource
+ * @alloc_shared: if set it is shared else dedicated
+ * @num_items: number of entries to be freed for FD resource type
+ * @counter_id: counter ID resource which needs to be freed
+ */
+enum ice_status
+ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ /* Free resource */
+ buf_len = sizeof(*buf);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->num_elems = cpu_to_le16(num_items);
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW,
+ "counter resource could not be freed\n");
+
+ kfree(buf);
+ return status;
+}
+
+/**
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index fa14b9545dab..8b4f9d35c860 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -208,6 +208,13 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw);
/* Switch config */
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
+enum ice_status
+ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 *counter_id);
+enum ice_status
+ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 counter_id);
+
/* Switch/bridge related commands */
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index f67e8362958c..abdb137c8bb7 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -15,6 +15,90 @@
#define ICE_RX_HDR_SIZE 256
+#define FDIR_DESC_RXDID 0x40
+#define ICE_FDIR_CLEAN_DELAY 10
+
+/**
+ * ice_prgm_fdir_fltr - Program a Flow Director filter
+ * @vsi: VSI to send dummy packet
+ * @fdir_desc: flow director descriptor
+ * @raw_packet: allocated buffer for flow director
+ */
+int
+ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
+ u8 *raw_packet)
+{
+ struct ice_tx_buf *tx_buf, *first;
+ struct ice_fltr_desc *f_desc;
+ struct ice_tx_desc *tx_desc;
+ struct ice_ring *tx_ring;
+ struct device *dev;
+ dma_addr_t dma;
+ u32 td_cmd;
+ u16 i;
+
+ /* VSI and Tx ring */
+ if (!vsi)
+ return -ENOENT;
+ tx_ring = vsi->tx_rings[0];
+ if (!tx_ring || !tx_ring->desc)
+ return -ENOENT;
+ dev = tx_ring->dev;
+
+ /* we are using two descriptors to add/del a filter and we can wait */
+ for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
+ if (!i)
+ return -EAGAIN;
+ msleep_interruptible(1);
+ }
+
+ dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma))
+ return -EINVAL;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ first = &tx_ring->tx_buf[i];
+ f_desc = ICE_TX_FDIRDESC(tx_ring, i);
+ memcpy(f_desc, fdir_desc, sizeof(*f_desc));
+
+ i++;
+ i = (i < tx_ring->count) ? i : 0;
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_buf[i];
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
+ ICE_TX_DESC_CMD_RE;
+
+ tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
+ tx_buf->raw_buf = raw_packet;
+
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
+
+ /* Force memory write to complete before letting h/w know
+ * there are new descriptors to fetch.
+ */
+ wmb();
+
+ /* mark the data descriptor to be watched */
+ first->next_to_watch = tx_desc;
+
+ writel(tx_ring->next_to_use, tx_ring->tail);
+
+ return 0;
+}
+
/**
* ice_unmap_and_free_tx_buf - Release a Tx buffer
* @ring: the ring that owns the buffer
@@ -24,7 +108,9 @@ static void
ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
- if (ice_ring_is_xdp(ring))
+ if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ devm_kfree(ring->dev, tx_buf->raw_buf);
+ else if (ice_ring_is_xdp(ring))
page_frag_free(tx_buf->raw_buf);
else
dev_kfree_skb_any(tx_buf->skb);
@@ -423,6 +509,22 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
return 0;
}
+static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ice_rx_offset(rx_ring) ?
+ SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
/**
* ice_run_xdp - Executes an XDP program on initialized xdp_buff
* @rx_ring: Rx ring
@@ -583,7 +685,8 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
+ if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
+ !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
@@ -803,7 +906,7 @@ static struct sk_buff *
ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
+ u8 metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#else
@@ -918,7 +1021,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ u16 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0;
@@ -981,7 +1084,7 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*
* Returns amount of work completed
*/
-static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
@@ -991,6 +1094,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
bool failure;
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+#endif
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
@@ -1020,6 +1127,12 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
*/
dma_rmb();
+ if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
+ ice_put_rx_buf(rx_ring, NULL);
+ cleaned_count++;
+ continue;
+ }
+
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
@@ -1038,6 +1151,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
xdp.data_meta = xdp.data;
xdp.data_end = xdp.data + size;
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
+#endif
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
@@ -1051,16 +1168,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- unsigned int truesize;
-
-#if (PAGE_SIZE < 8192)
- truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
- size);
-#endif
xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
} else {
rx_buf->pagecnt_bias++;
}
@@ -1528,7 +1637,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* don't allow the budget to go below 1 because that would exit
* polling early.
*/
- budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
+ budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
else
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
@@ -1664,7 +1773,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
*/
while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, max_data, td_tag);
+ ice_build_ctob(td_cmd, td_offset, max_data,
+ td_tag);
tx_desc++;
i++;
@@ -1684,8 +1794,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
if (likely(!data_len))
break;
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
- size, td_tag);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
+ size, td_tag);
tx_desc++;
i++;
@@ -1716,8 +1826,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
/* write last descriptor with RS and EOP bits */
td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
- td_tag);
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(td_cmd, td_offset, size, td_tag);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
@@ -1791,12 +1901,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- if (skb->encapsulation)
- return -1;
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ first->tx_flags |= ICE_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ first->tx_flags |= ICE_TX_FLAGS_IPV6;
+
+ if (skb->encapsulation) {
+ bool gso_ena = false;
+ u32 tunnel = 0;
+
+ /* define outer network header type */
+ if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
+ tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
+ ICE_TX_CTX_EIPT_IPV4 :
+ ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
+ l4_proto = ip.v4->protocol;
+ } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+ tunnel |= ICE_TX_CTX_EIPT_IPV6;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ }
+
+ /* define outer transport */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ break;
+ case IPPROTO_GRE:
+ tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
+ default:
+ if (first->tx_flags & ICE_TX_FLAGS_TSO)
+ return -1;
+
+ skb_checksum_help(skb);
+ return 0;
+ }
+
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ ICE_TXD_CTX_QW0_EIPLEN_S;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
+ /* compute tunnel header size */
+ tunnel |= ((ip.hdr - l4.hdr) / 2) <<
+ ICE_TXD_CTX_QW0_NATLEN_S;
+
+ gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
+ /* indicate if we need to offload outer UDP header */
+ if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
+
+ /* record tunnel offload values */
+ off->cd_tunnel_params |= tunnel;
+
+ /* set DTYP=1 to indicate that it's an Tx context descriptor
+ * in IPsec tunnel mode with Tx offloads in Quad word 1
+ */
+ off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
+
+ /* switch L4 header pointer from outer to inner */
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = 0;
+
+ /* reset type as we transition from outer to inner headers */
+ first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
+ if (ip.v4->version == 4)
+ first->tx_flags |= ICE_TX_FLAGS_IPV4;
+ if (ip.v6->version == 6)
+ first->tx_flags |= ICE_TX_FLAGS_IPV6;
+ }
/* Enable IP checksum offloads */
- protocol = vlan_get_protocol(skb);
- if (protocol == htons(ETH_P_IP)) {
+ if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
@@ -1806,7 +1998,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
else
cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
- } else if (protocol == htons(ETH_P_IPV6)) {
+ } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
@@ -1861,49 +2053,25 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
*
* Checks the skb and set up correspondingly several generic transmit flags
* related to VLAN tagging for the HW, such as VLAN, DCB, etc.
- *
- * Returns error code indicate the frame should be dropped upon error and the
- * otherwise returns 0 to indicate the flags has been set properly.
*/
-static int
+static void
ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
- __be16 protocol = skb->protocol;
-
- if (protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
- /* when HW VLAN acceleration is turned off by the user the
- * stack sets the protocol to 8021q so that the driver
- * can take any steps required to support the SW only
- * VLAN handling. In our case the driver doesn't need
- * to take any further steps so just set the protocol
- * to the encapsulated ethertype.
- */
- skb->protocol = vlan_get_protocol(skb);
- return 0;
- }
- /* if we have a HW VLAN tag being added, default to the HW one */
+ /* nothing left to do, software offloaded VLAN */
+ if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
+ return;
+
+ /* currently, we always assume 802.1Q for VLAN insertion as VLAN
+ * insertion for 802.1AD is not supported
+ */
if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
- } else if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_hdr *vhdr, _vhdr;
-
- /* for SW VLAN, check the next protocol and store the tag */
- vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
- sizeof(_vhdr),
- &_vhdr);
- if (!vhdr)
- return -EINVAL;
-
- first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
- ICE_TX_FLAGS_VLAN_S;
- first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
}
- return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
+ ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
}
/**
@@ -1928,7 +2096,8 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
- u32 paylen, l4_start;
+ u32 paylen;
+ u8 l4_start;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1953,8 +2122,42 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ip.v6->payload_len = 0;
}
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
+ /* determine offset of outer transport header */
+ l4_start = (u8)(l4.hdr - skb->data);
+
+ /* remove payload length from outer checksum */
+ paylen = skb->len - l4_start;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
+
+ /* reset pointers to inner headers */
+
+ /* cppcheck-suppress unreadVariable */
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
+ }
+
/* determine offset of transport header */
- l4_start = l4.hdr - skb->data;
+ l4_start = (u8)(l4.hdr - skb->data);
/* remove payload length from checksum */
paylen = skb->len - l4_start;
@@ -1963,12 +2166,12 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
/* compute length of UDP segmentation header */
- off->header_len = sizeof(l4.udp) + l4_start;
+ off->header_len = (u8)sizeof(l4.udp) + l4_start;
} else {
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
/* compute length of TCP segmentation header */
- off->header_len = (l4.tcp->doff * 4) + l4_start;
+ off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
}
/* update gso_segs and bytecount */
@@ -2176,8 +2379,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
first->tx_flags = 0;
/* prepare the VLAN tagging flags for Tx */
- if (ice_tx_prepare_vlan_flags(tx_ring, first))
- goto out_drop;
+ ice_tx_prepare_vlan_flags(tx_ring, first);
/* set up TSO offload */
tso = ice_tso(first, &offload);
@@ -2199,7 +2401,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
- int i = tx_ring->next_to_use;
+ u16 i = tx_ring->next_to_use;
/* grab the next descriptor */
cdesc = ICE_TX_CTX_DESC(tx_ring, i);
@@ -2244,3 +2446,86 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return ice_xmit_frame_ring(skb, tx_ring);
}
+
+/**
+ * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
+ * @tx_ring: tx_ring to clean
+ */
+void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
+{
+ struct ice_vsi *vsi = tx_ring->vsi;
+ s16 i = tx_ring->next_to_clean;
+ int budget = ICE_DFLT_IRQ_WORK;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+
+ tx_buf = &tx_ring->tx_buf[i];
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no pending work */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if the descriptor isn't done, no work to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+ tx_desc->buf_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+
+ /* move past filter desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap the data header */
+ if (dma_unmap_len(tx_buf, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ devm_kfree(tx_ring->dev, tx_buf->raw_buf);
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->raw_buf = NULL;
+ tx_buf->tx_flags = 0;
+ tx_buf->next_to_watch = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+ tx_desc->buf_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+
+ /* move past eop_desc for start of next FD desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ /* re-enable interrupt if needed */
+ ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 7ee00a128663..e70c4619edc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -38,7 +38,8 @@
*/
#if (PAGE_SIZE < 8192)
#define ICE_2K_TOO_SMALL_WITH_PADDING \
-((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
+ ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
+ SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
/**
* ice_compute_pad - compute the padding
@@ -107,12 +108,19 @@ static inline int ice_skb_pad(void)
#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
#define ICE_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+ (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
+/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
+ * freed instead of returned like skb packets.
+ */
+#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
+#define ICE_TX_FLAGS_IPV4 BIT(5)
+#define ICE_TX_FLAGS_IPV6 BIT(6)
+#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
@@ -155,17 +163,16 @@ struct ice_tx_offload_params {
};
struct ice_rx_buf {
- struct sk_buff *skb;
- dma_addr_t dma;
union {
struct {
+ struct sk_buff *skb;
+ dma_addr_t dma;
struct page *page;
unsigned int page_offset;
u16 pagecnt_bias;
};
struct {
- void *addr;
- u64 handle;
+ struct xdp_buff *xdp;
};
};
};
@@ -289,7 +296,6 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
struct xdp_umem *xsk_umem;
- struct zero_copy_allocator zca;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
@@ -373,5 +379,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring);
void ice_free_tx_ring(struct ice_ring *tx_ring);
void ice_free_rx_ring(struct ice_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
-
+int
+ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
+ u8 *raw_packet);
+int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);
+void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 6da048a6ca7c..02b12736ea80 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -8,7 +8,7 @@
* @rx_ring: ring to bump
* @val: new head index
*/
-void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
{
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
@@ -84,11 +84,11 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{
struct ice_rx_ptype_decoded decoded;
- u32 rx_error, rx_status;
+ u16 rx_status0, rx_status1;
bool ipv4, ipv6;
- rx_status = le16_to_cpu(rx_desc->wb.status_error0);
- rx_error = rx_status;
+ rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
decoded = ice_decode_rx_desc_ptype(ptype);
@@ -101,7 +101,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
return;
/* check if HW has decoded the packet and checksum */
- if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
+ if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
if (!(decoded.known && decoded.outer_ip))
@@ -112,19 +112,31 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
- if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
goto checksum_fail;
- else if (ipv6 && (rx_status &
- (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
+
+ if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
goto checksum_fail;
/* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed
*/
- if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ goto checksum_fail;
+
+ /* check for outer UDP checksum error in tunneled packets */
+ if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
+ (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
goto checksum_fail;
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
+ */
+ if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
@@ -215,8 +227,8 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
- size, 0);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ size, 0);
/* Make certain all of the status bits have been updated
* before next_to_watch is written.
@@ -242,7 +254,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
*/
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
{
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return ICE_XDP_CONSUMED;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index ba9164dad9ae..58ff58f0f972 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -22,7 +22,7 @@ ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
}
static inline __le64
-build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
+ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
{
return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
(td_cmd << ICE_TXD_QW1_CMD_S) |
@@ -49,7 +49,7 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
-void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val);
+void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4ce5f92fca4a..c1ad8622e65c 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -118,7 +118,8 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
- ICE_VSI_VF,
+ ICE_VSI_VF = 1,
+ ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_LB = 6,
};
@@ -161,6 +162,38 @@ struct ice_phy_info {
u8 get_link_info;
};
+/* protocol enumeration for filters */
+enum ice_fltr_ptype {
+ /* NONE - used for undef/error */
+ ICE_FLTR_PTYPE_NONF_NONE = 0,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ ICE_FLTR_PTYPE_FRAG_IPV4,
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV6_SCTP,
+ ICE_FLTR_PTYPE_NONF_IPV6_OTHER,
+ ICE_FLTR_PTYPE_MAX,
+};
+
+enum ice_fd_hw_seg {
+ ICE_FD_HW_SEG_NON_TUN = 0,
+ ICE_FD_HW_SEG_TUN,
+ ICE_FD_HW_SEG_MAX,
+};
+
+/* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */
+#define ICE_MAX_FDIR_VSI_PER_FILTER 2
+
+struct ice_fd_hw_prof {
+ struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX];
+ int cnt;
+ u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX];
+ u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER];
+};
+
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
u32 valid_functions;
@@ -197,6 +230,8 @@ struct ice_hw_func_caps {
u32 num_allocd_vfs; /* Number of allocated VFs */
u32 vf_base_id; /* Logical ID of the first VF */
u32 guar_num_vsi;
+ u32 fd_fltr_guar; /* Number of filters guaranteed */
+ u32 fd_fltr_best_effort; /* Number of best effort filters */
};
/* Device wide capabilities */
@@ -204,6 +239,7 @@ struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
u32 num_funcs;
};
@@ -259,6 +295,16 @@ struct ice_nvm_info {
#define ICE_NVM_VER_LEN 32
+/* netlist version information */
+struct ice_netlist_ver_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
/* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
@@ -479,6 +525,8 @@ struct ice_hw {
u64 debug_mask; /* bitmap for debug mask */
enum ice_mac_type mac_type;
+ u16 fd_ctr_base; /* FD counter base index */
+
/* pci info */
u16 device_id;
u16 vendor_id;
@@ -491,8 +539,8 @@ struct ice_hw {
u16 max_burst_size; /* driver sets this value */
/* Tx Scheduler values */
- u16 num_tx_sched_layers;
- u16 num_tx_sched_phys_layers;
+ u8 num_tx_sched_layers;
+ u8 num_tx_sched_phys_layers;
u8 flattened_layers;
u8 max_cgds;
u8 sw_entry_point_layer;
@@ -506,6 +554,7 @@ struct ice_hw {
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */
+ struct ice_netlist_ver_info netlist_ver; /* netlist version info */
struct ice_switch_info *switch_info; /* switch filter lists */
@@ -548,6 +597,7 @@ struct ice_hw {
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
+ u32 active_track_id;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
@@ -568,10 +618,29 @@ struct ice_hw {
u8 *pkg_copy;
u32 pkg_size;
+ /* tunneling info */
+ struct mutex tnl_lock;
+ struct ice_tunnel_table tnl;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
struct list_head fl_profs[ICE_BLK_COUNT];
+
+ /* Flow Director filter info */
+ int fdir_active_fltr;
+
+ struct mutex fdir_fltr_lock; /* protect Flow Director */
+ struct list_head fdir_list_head;
+
+ /* Book-keeping of side-band filter count per flow-type.
+ * This is used to detect and handle input set changes for
+ * respective flow-type.
+ */
+ u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX];
+
+ struct ice_fd_hw_prof **fdir_prof;
+ DECLARE_BITMAP(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX);
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
};
@@ -592,6 +661,8 @@ struct ice_eth_stats {
u64 tx_errors; /* tepc */
};
+#define ICE_MAX_UP 8
+
/* Statistics collected by the MAC */
struct ice_hw_port_stats {
/* eth stats collected by the port */
@@ -631,6 +702,9 @@ struct ice_hw_port_stats {
u64 tx_size_1023; /* ptc1023 */
u64 tx_size_1522; /* ptc1522 */
u64 tx_size_big; /* ptc9522 */
+ /* flow director stats */
+ u32 fd_sb_status;
+ u64 fd_sb_match;
};
/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 15191a325918..16a2f2526ccc 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -4,16 +4,18 @@
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
/**
* ice_validate_vf_id - helper to check if VF ID is valid
* @pf: pointer to the PF structure
* @vf_id: the ID of the VF to check
*/
-static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
+static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
{
+ /* vf_id range is only valid for 0-255, and should always be unsigned */
if (vf_id >= pf->num_alloc_vfs) {
- dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
+ dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
return -EINVAL;
}
return 0;
@@ -27,7 +29,7 @@ static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
{
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
+ dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
vf->vf_id);
return -EBUSY;
}
@@ -35,6 +37,37 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
}
/**
+ * ice_err_to_virt_err - translate errors for VF return code
+ * @ice_err: error return code
+ */
+static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
+{
+ switch (ice_err) {
+ case ICE_SUCCESS:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case ICE_ERR_BAD_PTR:
+ case ICE_ERR_INVAL_SIZE:
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ case ICE_ERR_PARAM:
+ case ICE_ERR_CFG:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case ICE_ERR_NO_MEMORY:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case ICE_ERR_NOT_READY:
+ case ICE_ERR_RESET_FAILED:
+ case ICE_ERR_FW_API_VER:
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_FULL:
+ case ICE_ERR_AQ_NO_WORK:
+ case ICE_ERR_AQ_EMPTY:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
+
+/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -47,7 +80,7 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
- int i;
+ unsigned int i;
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
@@ -149,6 +182,26 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
}
/**
+ * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * @vf: VF to remove access to VSI for
+ */
+static void ice_vf_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->lan_vsi_idx = ICE_NO_VSI;
+ vf->lan_vsi_num = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
+ * @vf: invalidate this VF's VSI after freeing it
+ */
+static void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
+ ice_vf_invalidate_vsi(vf);
+}
+
+/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
@@ -163,10 +216,8 @@ static void ice_free_vf_res(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
/* free VSI and disconnect it from the parent uplink */
- if (vf->lan_vsi_idx) {
- ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
- vf->lan_vsi_idx = 0;
- vf->lan_vsi_num = 0;
+ if (vf->lan_vsi_idx != ICE_NO_VSI) {
+ ice_vf_vsi_release(vf);
vf->num_mac = 0;
}
@@ -292,7 +343,7 @@ void ice_free_vfs(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- int tmp, i;
+ unsigned int tmp, i;
if (!pf->vf)
return;
@@ -337,7 +388,7 @@ void ice_free_vfs(struct ice_pf *pf)
* before this function ever gets called.
*/
if (!pci_vfs_assigned(pf->pdev)) {
- int vf_id;
+ unsigned int vf_id;
/* Acknowledge VFLR for all VFs. Without this, VFs will fail to
* work correctly when SR-IOV gets re-enabled.
@@ -368,9 +419,9 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
{
struct ice_pf *pf = vf->pf;
u32 reg, reg_idx, bit_idx;
+ unsigned int vf_abs_id, i;
struct device *dev;
struct ice_hw *hw;
- int vf_abs_id, i;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
@@ -380,10 +431,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
- * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
- * It's normally disabled in ice_free_vf_res(), but it's safer
- * to do it earlier to give some time to finish to any VF config
- * functions that may still be running at this point.
+ * when it's safe again to access VF's VSI.
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
@@ -418,7 +466,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
- dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
+ dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@@ -460,8 +508,9 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -475,18 +524,39 @@ out:
}
/**
+ * ice_vf_get_port_info - Get the VF's port info structure
+ * @vf: VF used to get the port info structure for
+ */
+static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
+{
+ return vf->pf->hw.port_info;
+}
+
+/**
* ice_vf_vsi_setup - Set up a VF VSI
- * @pf: board private structure
- * @pi: pointer to the port_info instance
- * @vf_id: defines VF ID to which this VSI connects.
+ * @vf: VF to setup VSI for
*
* Returns pointer to the successfully allocated VSI struct on success,
* otherwise returns NULL on failure.
*/
-static struct ice_vsi *
-ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
+static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
+ ice_vf_invalidate_vsi(vf);
+ return NULL;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return vsi;
}
/**
@@ -507,170 +577,158 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
}
/**
- * ice_alloc_vsi_res - Setup VF VSI and its resources
- * @vf: pointer to the VF structure
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
*
- * Returns 0 on success, negative value on failure
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
*/
-static int ice_alloc_vsi_res(struct ice_vf *vf)
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- LIST_HEAD(tmp_add_list);
- u8 broadcast[ETH_ALEN];
- struct ice_vsi *vsi;
- struct device *dev;
- int status = 0;
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u16 vlan_id = 0;
+ int err;
- dev = ice_pf_to_dev(pf);
- /* first vector index is the VFs OICR index */
- vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+ if (vf->port_vlan_info) {
+ err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
+ if (err) {
+ dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
- vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
- if (!vsi) {
- dev_err(dev, "Failed to create VF VSI\n");
- return -ENOMEM;
+ vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
}
- vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
-
- /* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_info) {
- ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
- if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK))
- dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
- vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
- } else {
- /* set VLAN 0 filter by default when no port VLAN is
- * enabled. If a port VLAN is enabled we don't want
- * untagged broadcast/multicast traffic seen on the VF
- * interface.
- */
- if (ice_vsi_add_vlan(vsi, 0))
- dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
- vf->vf_id);
+ /* vlan_id will either be 0 or the port VLAN number */
+ err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
+ vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
+ err);
+ return err;
}
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum ice_status status;
+ u8 broadcast[ETH_ALEN];
+
eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
+ vf->vf_id, ice_stat_str(status));
+ return ice_status_to_errno(status);
+ }
- status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
- if (status)
- goto ice_alloc_vsi_res_exit;
+ vf->num_mac++;
if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
- status = ice_add_mac_to_list(vsi, &tmp_add_list,
- vf->dflt_lan_addr.addr);
- if (status)
- goto ice_alloc_vsi_res_exit;
+ status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
+ &vf->dflt_lan_addr.addr[0], vf->vf_id,
+ ice_stat_str(status));
+ return ice_status_to_errno(status);
+ }
+ vf->num_mac++;
}
- status = ice_add_mac(&pf->hw, &tmp_add_list);
- if (status)
- dev_err(dev, "could not add mac filters error %d\n", status);
- else
- vf->num_mac = 1;
-
- /* Clear this bit after VF initialization since we shouldn't reclaim
- * and reassign interrupts for synchronous or asynchronous VFR events.
- * We don't want to reconfigure interrupts since AVF driver doesn't
- * expect vector assignment to be changed unless there is a request for
- * more vectors.
- */
-ice_alloc_vsi_res_exit:
- ice_free_fltr_list(dev, &tmp_add_list);
- return status;
+ return 0;
}
/**
- * ice_alloc_vf_res - Allocate VF resources
- * @vf: pointer to the VF structure
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
*/
-static int ice_alloc_vf_res(struct ice_vf *vf)
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- int tx_rx_queue_left;
- int status;
-
- /* Update number of VF queues, in case VF had requested for queue
- * changes
- */
- tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- tx_rx_queue_left += pf->num_qps_per_vf;
- if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
- vf->num_req_qs != vf->num_vf_qs)
- vf->num_vf_qs = vf->num_req_qs;
-
- /* setup VF VSI and necessary resources */
- status = ice_alloc_vsi_res(vf);
- if (status)
- goto ice_alloc_vf_res_exit;
-
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-
- /* VF is now completely initialized */
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
-
- return status;
-
-ice_alloc_vf_res_exit:
- ice_free_vf_res(vf);
- return status;
}
/**
- * ice_ena_vf_mappings
- * @vf: pointer to the VF structure
+ * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
+ * @vf: VF to enable MSIX mappings for
*
- * Enable VF vectors and queues allocation by writing the details into
- * respective registers.
+ * Some of the registers need to be indexed/configured using hardware global
+ * device values and other registers need 0-based values, which represent PF
+ * based values.
*/
-static void ice_ena_vf_mappings(struct ice_vf *vf)
+static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
{
- int abs_vf_id, abs_first, abs_last;
+ int device_based_first_msix, device_based_last_msix;
+ int pf_based_first_msix, pf_based_last_msix, v;
struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int first, last, v;
+ int device_based_vf_id;
struct ice_hw *hw;
u32 reg;
- dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vsi = pf->vsi[vf->lan_vsi_idx];
- first = vf->first_vector_idx;
- last = (first + pf->num_msix_per_vf) - 1;
- abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
- abs_last = (abs_first + pf->num_msix_per_vf) - 1;
- abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- /* VF Vector allocation */
- reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
- ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
- VPINT_ALLOC_VALID_M);
+ pf_based_first_msix = vf->first_vector_idx;
+ pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
+
+ device_based_first_msix = pf_based_first_msix +
+ pf->hw.func_caps.common_cap.msix_vector_first_id;
+ device_based_last_msix =
+ (device_based_first_msix + pf->num_msix_per_vf) - 1;
+ device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
+ VPINT_ALLOC_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
+ VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
- reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
+ reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
& VPINT_ALLOC_PCI_FIRST_M) |
- ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
- VPINT_ALLOC_PCI_VALID_M);
+ ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
+ VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+
/* map the interrupts to its functions */
- for (v = first; v <= last; v++) {
- reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
+ reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
GLINT_VECT2FUNC_VF_NUM_M) |
((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
GLINT_VECT2FUNC_PF_NUM_M));
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
- /* Map mailbox interrupt. We put an explicit 0 here to remind us that
- * VF admin queue interrupts will go to VF MSI-X vector 0.
- */
- wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
+ /* Map mailbox interrupt to VF MSI-X vector 0 */
+ wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
+ * @vf: VF to enable the mappings for
+ * @max_txq: max Tx queues allowed on the VF's VSI
+ * @max_rxq: max Rx queues allowed on the VF's VSI
+ */
+static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
/* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
@@ -682,7 +740,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
VPLAN_TX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
} else {
@@ -700,7 +758,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
VPLAN_RX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
} else {
@@ -709,6 +767,18 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
}
/**
+ * ice_ena_vf_mappings - enable VF MSIX and queue mapping
+ * @vf: pointer to the VF structure
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+
+ ice_ena_vf_msix_mappings(vf);
+ ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
+}
+
+/**
* ice_determine_res
* @pf: pointer to the PF structure
* @avail_res: available resources in the PF structure
@@ -906,51 +976,18 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
}
/**
- * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
- * @vf: pointer to the VF structure
- *
- * Cleanup a VF after the hardware reset is finished. Expects the caller to
- * have verified whether the reset is finished properly, and ensure the
- * minimum amount of wait time has passed. Reallocate VF resources back to make
- * VF state active
+ * ice_clear_vf_reset_trigger - enable VF to access hardware
+ * @vf: VF to enabled hardware access for
*/
-static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
+static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
+ struct ice_hw *hw = &vf->pf->hw;
u32 reg;
- hw = &pf->hw;
-
- /* PF software completes the flow by notifying VF that reset flow is
- * completed. This is done by enabling hardware by clearing the reset
- * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
- * register to VFR completed (done at the end of this function)
- * By doing this we allow HW to access VF memory at any point. If we
- * did it any sooner, HW could access memory while it was being freed
- * in ice_free_vf_res(), causing an IOMMU fault.
- *
- * On the other hand, this needs to be done ASAP, because the VF driver
- * is waiting for this to happen and may report a timeout. It's
- * harmless, but it gets logged into Guest OS kernel log, so best avoid
- * it.
- */
reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
reg &= ~VPGEN_VFRTRIG_VFSWR_M;
wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
-
- /* reallocate VF resources to finish resetting the VSI state */
- if (!ice_alloc_vf_res(vf)) {
- ice_ena_vf_mappings(vf);
- set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
- clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- }
-
- /* Tell the VF driver the reset is done. This needs to be done only
- * after VF has been fully initialized, because the VF driver may
- * request resources immediately after setting this flag.
- */
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ ice_flush(hw);
}
/**
@@ -994,44 +1031,124 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
return status;
}
+static void ice_vf_clear_counters(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+
+ vf->num_mac = 0;
+ vsi->num_vlan = 0;
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+}
+
/**
- * ice_config_res_vfs - Finalize allocation of VFs resources in one go
- * @pf: pointer to the PF structure
+ * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
+ * @vf: VF to perform pre VSI rebuild tasks
*
- * This function is being called as last part of resetting all VFs, or when
- * configuring VFs for the first time, where there is no resource to be freed
- * Returns true if resources were properly allocated for all VFs, and false
- * otherwise.
+ * These tasks are items that don't need to be amortized since they are most
+ * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
*/
-static bool ice_config_res_vfs(struct ice_pf *pf)
+static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int v;
+ ice_vf_clear_counters(vf);
+ ice_clear_vf_reset_trigger(vf);
+}
- if (ice_set_per_vf_res(pf)) {
- dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
- return false;
- }
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
- /* rearm global interrupts */
- if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
- ice_irq_dynamic_ena(hw, NULL, NULL);
+ ice_vf_set_host_trust_cfg(vf);
- /* Finish resetting each VF and allocate resources */
- ice_for_each_vf(pf, v) {
- struct ice_vf *vf = &pf->vf[v];
+ if (ice_vf_rebuild_host_mac_cfg(vf))
+ dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+ vf->vf_id);
- vf->num_vf_qs = pf->num_qps_per_vf;
- dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
- vf->num_vf_qs);
- ice_cleanup_and_realloc_vf(vf);
+ if (ice_vf_rebuild_host_vlan_cfg(vf))
+ dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+ vf->vf_id);
+}
+
+/**
+ * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
+ * @vf: VF to release and setup the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
+ * configuration change, etc.).
+ */
+static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
+{
+ ice_vf_vsi_release(vf);
+ if (!ice_vf_vsi_setup(vf))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_vsi - rebuild the VF's VSI
+ * @vf: VF to rebuild the VSI for
+ *
+ * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
+ * host, PFR, CORER, etc.).
+ */
+static int ice_vf_rebuild_vsi(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (ice_vsi_rebuild(vsi, true)) {
+ dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
+ vf->vf_id);
+ return -EIO;
}
+ /* vsi->idx will remain the same in this case so don't update
+ * vf->lan_vsi_idx
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+ vf->lan_vsi_num = vsi->vsi_num;
- ice_flush(hw);
- clear_bit(__ICE_VF_DIS, pf->state);
+ return 0;
+}
- return true;
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+static void ice_vf_set_initialized(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+}
+
+/**
+ * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
+ * @vf: VF to perform tasks on
+ */
+static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+
+ ice_vf_rebuild_host_cfg(vf);
+
+ ice_vf_set_initialized(vf);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
}
/**
@@ -1065,17 +1182,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v)
ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
- ice_for_each_vf(pf, v) {
- struct ice_vsi *vsi;
-
- vf = &pf->vf[v];
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
- ice_dis_vf_qs(vf);
- ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
- NULL, ICE_VF_RESET, vf->vf_id, NULL);
- }
-
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
* sequence to make sure that it has completed. We'll keep track of
@@ -1112,21 +1218,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
- ice_free_vf_res(vf);
-
- /* Free VF queues as well, and reallocate later.
- * If a given VF has different number of queues
- * configured, the request for update will come
- * via mailbox communication.
- */
- vf->num_vf_qs = 0;
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi(vf);
+ ice_vf_post_vsi_rebuild(vf);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
- if (!ice_config_res_vfs(pf))
- return false;
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
@@ -1238,12 +1336,9 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev_err(dev, "disabling promiscuous mode failed\n");
}
- /* free VF resources to begin resetting the VSI state */
- ice_free_vf_res(vf);
-
- ice_cleanup_and_realloc_vf(vf);
-
- ice_flush(hw);
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi_with_release(vf);
+ ice_vf_post_vsi_rebuild(vf);
return true;
}
@@ -1311,16 +1406,144 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
}
/**
- * ice_alloc_vfs - Allocate and set up VFs resources
+ * ice_init_vf_vsi_res - initialize/setup VF VSI resources
+ * @vf: VF to initialize/setup the VSI for
+ *
+ * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
+ * VF VSI's broadcast filter and is only used during initial VF creation.
+ */
+static int ice_init_vf_vsi_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ enum ice_status status;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
+
+ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
+ return -ENOMEM;
+
+ err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
+ vf->vf_id, ice_stat_str(status));
+ err = ice_status_to_errno(status);
+ goto release_vsi;
+ }
+
+ vf->num_mac = 1;
+
+ return 0;
+
+release_vsi:
+ ice_vf_vsi_release(vf);
+ return err;
+}
+
+/**
+ * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
+ * @pf: PF the VFs are associated with
+ */
+static int ice_start_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int retval, i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_clear_vf_reset_trigger(vf);
+
+ retval = ice_init_vf_vsi_res(vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
+ vf->vf_id, retval);
+ goto teardown;
+ }
+
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ }
+
+ ice_flush(hw);
+ return 0;
+
+teardown:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_dis_vf_mappings(vf);
+ ice_vf_vsi_release(vf);
+ }
+
+ return retval;
+}
+
+/**
+ * ice_set_dflt_settings - set VF defaults during initialization/creation
+ * @pf: PF holding reference to all VFs for default configuration
+ */
+static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ vf->pf = pf;
+ vf->vf_id = i;
+ vf->vf_sw_id = pf->first_sw;
+ /* assign default capabilities */
+ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
+ vf->spoofchk = true;
+ vf->num_vf_qs = pf->num_qps_per_vf;
+ }
+}
+
+/**
+ * ice_alloc_vfs - allocate num_vfs in the PF structure
+ * @pf: PF to store the allocated VFs in
+ * @num_vfs: number of VFs to allocate
+ */
+static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
+{
+ struct ice_vf *vfs;
+
+ vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
+ GFP_KERNEL);
+ if (!vfs)
+ return -ENOMEM;
+
+ pf->vf = vfs;
+ pf->num_alloc_vfs = num_vfs;
+
+ return 0;
+}
+
+/**
+ * ice_ena_vfs - enable VFs so they are ready to be used
* @pf: pointer to the PF structure
- * @num_alloc_vfs: number of VFs to allocate
+ * @num_vfs: number of VFs to enable
*/
-static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- struct ice_vf *vfs;
- int i, ret;
+ int ret;
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
@@ -1328,43 +1551,37 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
set_bit(__ICE_OICR_INTR_DIS, pf->state);
ice_flush(hw);
- ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ ret = pci_enable_sriov(pf->pdev, num_vfs);
if (ret) {
pf->num_alloc_vfs = 0;
goto err_unroll_intr;
}
- /* allocate memory */
- vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
- if (!vfs) {
- ret = -ENOMEM;
- goto err_pci_disable_sriov;
- }
- pf->vf = vfs;
- pf->num_alloc_vfs = num_alloc_vfs;
- /* apply default profile */
- ice_for_each_vf(pf, i) {
- vfs[i].pf = pf;
- vfs[i].vf_sw_id = pf->first_sw;
- vfs[i].vf_id = i;
+ ret = ice_alloc_vfs(pf, num_vfs);
+ if (ret)
+ goto err_pci_disable_sriov;
- /* assign default capabilities */
- set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
- vfs[i].spoofchk = true;
+ if (ice_set_per_vf_res(pf)) {
+ dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
+ num_vfs);
+ ret = -ENOSPC;
+ goto err_unroll_sriov;
}
- /* VF resources get allocated with initialization */
- if (!ice_config_res_vfs(pf)) {
- ret = -EIO;
+ ice_set_dflt_settings_vfs(pf);
+
+ if (ice_start_vfs(pf)) {
+ dev_err(dev, "Failed to start VF(s)\n");
+ ret = -EAGAIN;
goto err_unroll_sriov;
}
- return ret;
+ clear_bit(__ICE_VF_DIS, pf->state);
+ return 0;
err_unroll_sriov:
+ devm_kfree(dev, pf->vf);
pf->vf = NULL;
- devm_kfree(dev, vfs);
- vfs = NULL;
pf->num_alloc_vfs = 0;
err_pci_disable_sriov:
pci_disable_sriov(pf->pdev);
@@ -1404,6 +1621,8 @@ static bool ice_pf_state_is_nominal(struct ice_pf *pf)
* ice_pci_sriov_ena - Enable or change number of VFs
* @pf: pointer to the PF structure
* @num_vfs: number of VFs to allocate
+ *
+ * Returns 0 on success and negative on failure
*/
static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
@@ -1411,20 +1630,10 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
struct device *dev = ice_pf_to_dev(pf);
int err;
- if (!ice_pf_state_is_nominal(pf)) {
- dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
- return -EBUSY;
- }
-
- if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
- dev_err(dev, "This device is not capable of SR-IOV\n");
- return -EOPNOTSUPP;
- }
-
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
ice_free_vfs(pf);
else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
- return num_vfs;
+ return 0;
if (num_vfs > pf->num_vfs_supported) {
dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
@@ -1432,45 +1641,77 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
return -EOPNOTSUPP;
}
- dev_info(dev, "Allocating %d VFs\n", num_vfs);
- err = ice_alloc_vfs(pf, num_vfs);
+ dev_info(dev, "Enabling %d VFs\n", num_vfs);
+ err = ice_ena_vfs(pf, num_vfs);
if (err) {
dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
return err;
}
set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
- return num_vfs;
+ return 0;
+}
+
+/**
+ * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
+ * @pf: PF to enabled SR-IOV on
+ */
+static int ice_check_sriov_allowed(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_is_safe_mode(pf)) {
+ dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
/**
* ice_sriov_configure - Enable or change number of VFs via sysfs
* @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate
+ * @num_vfs: number of VFs to allocate or 0 to free VFs
*
- * This function is called when the user updates the number of VFs in sysfs.
+ * This function is called when the user updates the number of VFs in sysfs. On
+ * success return whatever num_vfs was set to by the caller. Return negative on
+ * failure.
*/
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
struct device *dev = ice_pf_to_dev(pf);
+ int err;
- if (ice_is_safe_mode(pf)) {
- dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
- return -EOPNOTSUPP;
- }
+ err = ice_check_sriov_allowed(pf);
+ if (err)
+ return err;
- if (num_vfs)
- return ice_pci_sriov_ena(pf, num_vfs);
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ ice_free_vfs(pf);
+ return 0;
+ }
- if (!pci_vfs_assigned(pdev)) {
- ice_free_vfs(pf);
- } else {
dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
return -EBUSY;
}
- return 0;
+ err = ice_pci_sriov_ena(pf, num_vfs);
+ if (err)
+ return err;
+
+ return num_vfs;
}
/**
@@ -1483,7 +1724,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
void ice_process_vflr_event(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- int vf_id;
+ unsigned int vf_id;
u32 reg;
if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
@@ -1524,7 +1765,7 @@ static void ice_vc_reset_vf(struct ice_vf *vf)
*/
static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
{
- int vf_id;
+ unsigned int vf_id;
ice_for_each_vf(pf, vf_id) {
struct ice_vf *vf = &pf->vf[vf_id];
@@ -1628,8 +1869,9 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
- vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
+ dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
+ vf->vf_id, ice_stat_str(aq_ret),
+ ice_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -1772,7 +2014,7 @@ err:
*/
static void ice_vc_reset_vf_msg(struct ice_vf *vf)
{
- if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
ice_reset_vf(vf, false);
}
@@ -2044,8 +2286,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
+ ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
+ ice_stat_str(status));
ret = -EIO;
goto out;
}
@@ -2060,6 +2303,174 @@ out:
}
/**
+ * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
+ * @pf: PF structure for accessing VF(s)
+ *
+ * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
+ * else return true
+ */
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+{
+ int vf_idx;
+
+ ice_for_each_vf(pf, vf_idx) {
+ struct ice_vf *vf = &pf->vf[vf_idx];
+
+ /* found a VF that has promiscuous mode configured */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_vc_cfg_promiscuous_mode_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure VF VSIs promiscuous mode
+ */
+static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_promisc_info *info =
+ (struct virtchnl_promisc_info *)msg;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ bool rm_promisc;
+ int ret = 0;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Leave v_ret alone, lie to the VF on purpose. */
+ goto error_param;
+ }
+
+ rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
+ !(info->flags & FLAG_VF_MULTICAST_PROMISC);
+
+ if (vsi->num_vlan || vf->port_vlan_info) {
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+ struct net_device *pf_netdev;
+
+ if (!pf_vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ pf_netdev = pf_vsi->netdev;
+
+ ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
+ if (ret) {
+ dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
+ rm_promisc ? "ON" : "OFF", vf->vf_id,
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+
+ ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
+ if (ret) {
+ dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
+
+ if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
+ /* only attempt to set the default forwarding VSI if
+ * it's not currently set
+ */
+ ret = ice_set_dflt_vsi(pf->first_sw, vsi);
+ else if (!set_dflt_vsi &&
+ ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
+ /* only attempt to free the default forwarding VSI if we
+ * are the owner
+ */
+ ret = ice_clear_dflt_vsi(pf->first_sw);
+
+ if (ret) {
+ dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
+ set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ goto error_param;
+ }
+ } else {
+ enum ice_status status;
+ u8 promisc_m;
+
+ if (info->flags & FLAG_VF_UNICAST_PROMISC) {
+ if (vf->port_vlan_info || vsi->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+ } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
+ if (vf->port_vlan_info || vsi->num_vlan)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+ } else {
+ if (vf->port_vlan_info || vsi->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+ }
+
+ /* Configure multicast/unicast with or without VLAN promiscuous
+ * mode
+ */
+ status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
+ if (status) {
+ dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
+ rm_promisc ? "dis" : "en", vf->vf_id,
+ ice_stat_str(status));
+ v_ret = ice_err_to_virt_err(status);
+ goto error_param;
+ } else {
+ dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
+ rm_promisc ? "dis" : "en", vf->vf_id);
+ }
+ }
+
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+ set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ else
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
+ set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ else
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ v_ret, NULL, 0);
+}
+
+/**
* ice_vc_get_stats_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2118,6 +2529,52 @@ static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
}
/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
* ice_vc_ena_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2177,6 +2634,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
set_bit(vf_q_id, vf->rxq_ena);
}
@@ -2192,6 +2650,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
if (test_bit(vf_q_id, vf->txq_ena))
continue;
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
set_bit(vf_q_id, vf->txq_ena);
}
@@ -2604,20 +3063,22 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
return -EPERM;
}
- status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
+ status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
return -EEXIST;
} else if (status) {
- dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
- mac_addr, vf->vf_id, status);
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
+ mac_addr, vf->vf_id, ice_stat_str(status));
return -EIO;
}
- /* only set dflt_lan_addr once */
- if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
- is_unicast_ether_addr(mac_addr))
+ /* Set the default LAN address to the latest unicast MAC address added
+ * by the VF. The default LAN address is reported by the PF via
+ * ndo_get_vf_config.
+ */
+ if (is_unicast_ether_addr(mac_addr))
ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
vf->num_mac++;
@@ -2641,14 +3102,14 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
return 0;
- status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
+ status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id);
return -ENOENT;
} else if (status) {
- dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
- mac_addr, vf->vf_id, status);
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
+ mac_addr, vf->vf_id, ice_stat_str(status));
return -EIO;
}
@@ -2834,7 +3295,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
struct device *dev;
struct ice_vf *vf;
u16 vlanprio;
@@ -2856,8 +3316,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
}
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
-
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
return ret;
@@ -2870,44 +3328,15 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
return 0;
}
- if (vlan_id || qos) {
- /* remove VLAN 0 filter set by default when transitioning from
- * no port VLAN to a port VLAN. No change to old port VLAN on
- * failure.
- */
- ret = ice_vsi_kill_vlan(vsi, 0);
- if (ret)
- return ret;
- ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
- if (ret)
- return ret;
- } else {
- /* add VLAN 0 filter back when transitioning from port VLAN to
- * no port VLAN. No change to old port VLAN on failure.
- */
- ret = ice_vsi_add_vlan(vsi, 0);
- if (ret)
- return ret;
- ret = ice_vsi_manage_pvid(vsi, 0, false);
- if (ret)
- return ret;
- }
+ vf->port_vlan_info = vlanprio;
- if (vlan_id) {
+ if (vf->port_vlan_info)
dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
+ else
+ dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
- /* add VLAN filter for the port VLAN */
- ret = ice_vsi_add_vlan(vsi, vlan_id);
- if (ret)
- return ret;
- }
- /* remove old port VLAN filter with valid VLAN ID or QoS fields */
- if (vf->port_vlan_info)
- ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
-
- /* keep port VLAN information persistent on resets */
- vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
+ ice_vc_reset_vf(vf);
return 0;
}
@@ -2992,8 +3421,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
+ test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
vlan_promisc = true;
if (add_v) {
@@ -3018,7 +3448,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!vid)
continue;
- status = ice_vsi_add_vlan(vsi, vid);
+ status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3317,6 +3747,9 @@ error_handler:
case VIRTCHNL_OP_GET_STATS:
err = ice_vc_get_stats_msg(vf, msg);
break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
+ break;
case VIRTCHNL_OP_ADD_VLAN:
err = ice_vc_add_vlan_msg(vf, msg);
break;
@@ -3390,6 +3823,39 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
}
/**
+ * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
+ * @pf: PF used to reference the switch's rules
+ * @umac: unicast MAC to compare against existing switch rules
+ *
+ * Return true on the first/any match, else return false
+ */
+static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
+{
+ struct ice_sw_recipe *mac_recipe_list =
+ &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* protect MAC filter list access */
+
+ rule_head = &mac_recipe_list->filt_rules;
+ rule_lock = &mac_recipe_list->filt_rule_lock;
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(list_itr, rule_head, list_entry) {
+ u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+
+ if (ether_addr_equal(existing_mac, umac)) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+
+ mutex_unlock(rule_lock);
+
+ return false;
+}
+
+/**
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -3406,25 +3872,41 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
+ if (is_multicast_ether_addr(mac)) {
netdev_err(netdev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
}
vf = &pf->vf[vf_id];
+ /* nothing left to do, unicast MAC already set */
+ if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
+ return 0;
+
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
return ret;
- /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
- * flow will use the updated dflt_lan_addr and add a MAC filter
- * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
- * set the MAC address for this VF.
+ if (ice_unicast_mac_exists(pf, mac)) {
+ netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
+ mac, vf_id, mac);
+ return -EINVAL;
+ }
+
+ /* VF is notified of its new MAC via the PF's response to the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
- vf->pf_set_mac = true;
- netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
- vf_id, mac);
+ if (is_zero_ether_addr(mac)) {
+ /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
+ vf->pf_set_mac = false;
+ netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
+ } else {
+ /* PF will add MAC rule for the VF */
+ vf->pf_set_mac = true;
+ netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
+ }
ice_vc_reset_vf(vf);
return 0;
@@ -3554,6 +4036,24 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
}
/**
+ * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dflt_lan_addr.addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
* ice_print_vfs_mdd_event - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -3582,12 +4082,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
vf->mdd_rx_events.last_printed =
vf->mdd_rx_events.count;
-
- dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
- vf->mdd_rx_events.count, hw->pf_id, i,
- vf->dflt_lan_addr.addr,
- test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
- ? "on" : "off");
+ ice_print_vf_rx_mdd_event(vf);
}
/* only print Tx MDD event message if there are new events */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 3f9464269bd2..67aa9110fdd1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -7,7 +7,10 @@
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
-#define ICE_MAX_MACADDR_PER_VF 12
+/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
+ * broadcast, and 16 for additional unicast/multicast filters
+ */
+#define ICE_MAX_MACADDR_PER_VF 18
/* Malicious Driver Detection */
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
@@ -64,7 +67,7 @@ struct ice_mdd_vf_events {
struct ice_vf {
struct ice_pf *pf;
- s16 vf_id; /* VF ID in the PF space */
+ u16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
/* first vector index of this VF in the PF space */
int first_vector_idx;
@@ -128,9 +131,11 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -140,6 +145,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf);
#define ice_set_vf_state_qs_dis(vf) do {} while (0)
#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
+#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
@@ -219,5 +225,10 @@ ice_get_vf_stats(struct net_device __always_unused *netdev,
{
return -EOPNOTSUPP;
}
+
+static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
+{
+ return false;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8279db15e870..b6f928c9e9c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
#include "ice_base.h"
@@ -280,28 +280,6 @@ static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
}
/**
- * ice_xsk_add_umem - add a UMEM region for XDP sockets
- * @vsi: VSI to which the UMEM will be added
- * @umem: pointer to a requested UMEM region
- * @qid: queue ID
- *
- * Returns 0 on success, negative on error
- */
-static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
-{
- int err;
-
- err = ice_xsk_alloc_umems(vsi);
- if (err)
- return err;
-
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
-
- return 0;
-}
-
-/**
* ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
* @vsi: VSI from which the VSI will be removed
* @qid: Ring/qid associated with the UMEM
@@ -318,65 +296,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
}
}
-/**
- * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets
- * @vsi: VSI to map the UMEM region
- * @umem: UMEM to map
- *
- * Returns 0 on success, negative on error
- */
-static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i;
-
- dev = ice_pf_to_dev(pf);
- for (i = 0; i < umem->npgs; i++) {
- dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
- PAGE_SIZE,
- DMA_BIDIRECTIONAL,
- ICE_RX_DMA_ATTR);
- if (dma_mapping_error(dev, dma)) {
- dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n",
- i);
- goto out_unmap;
- }
-
- umem->pages[i].dma = dma;
- }
-
- return 0;
-
-out_unmap:
- for (; i > 0; i--) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
- umem->pages[i].dma = 0;
- }
-
- return -EFAULT;
-}
-
-/**
- * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets
- * @vsi: VSI from which the UMEM will be unmapped
- * @umem: UMEM to unmap
- */
-static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i;
-
- dev = ice_pf_to_dev(pf);
- for (i = 0; i < umem->npgs; i++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
-
- umem->pages[i].dma = 0;
- }
-}
/**
* ice_xsk_umem_disable - disable a UMEM region
@@ -391,7 +310,7 @@ static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
!vsi->xsk_umems[qid])
return -EINVAL;
- ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
+ xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR);
ice_xsk_remove_umem(vsi, qid);
return 0;
@@ -408,7 +327,6 @@ static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
static int
ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
{
- struct xdp_umem_fq_reuse *reuseq;
int err;
if (vsi->type != ICE_VSI_PF)
@@ -419,20 +337,18 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (qid >= vsi->num_xsk_umems)
return -EINVAL;
+ err = ice_xsk_alloc_umems(vsi);
+ if (err)
+ return err;
+
if (vsi->xsk_umems && vsi->xsk_umems[qid])
return -EBUSY;
- reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
- if (!reuseq)
- return -ENOMEM;
-
- xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
- err = ice_xsk_umem_dma_map(vsi, umem);
- if (err)
- return err;
+ vsi->xsk_umems[qid] = umem;
+ vsi->num_xsk_umems_used++;
- err = ice_xsk_add_umem(vsi, umem, qid);
+ err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back),
+ ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -484,137 +400,22 @@ xsk_umem_if_up:
}
/**
- * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations
- * @zca: zero-cpoy allocator
- * @handle: Buffer handle
- */
-void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
-{
- struct ice_rx_buf *rx_buf;
- struct ice_ring *rx_ring;
- struct xdp_umem *umem;
- u64 hr, mask;
- u16 nta;
-
- rx_ring = container_of(zca, struct ice_ring, zca);
- umem = rx_ring->xsk_umem;
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- mask = umem->chunk_mask;
-
- nta = rx_ring->next_to_alloc;
- rx_buf = &rx_ring->rx_buf[nta];
-
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- handle &= mask;
-
- rx_buf->dma = xdp_umem_get_dma(umem, handle);
- rx_buf->dma += hr;
-
- rx_buf->addr = xdp_umem_get_data(umem, handle);
- rx_buf->addr += hr;
-
- rx_buf->handle = (u64)handle + umem->headroom;
-}
-
-/**
- * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem
- * @rx_ring: ring with an xdp_umem bound to it
- * @rx_buf: buffer to which xsk page address will be assigned
- *
- * This function allocates an Rx buffer in the hot path.
- * The buffer can come from fill queue or recycle queue.
- *
- * Returns true if an assignment was successful, false if not.
- */
-static __always_inline bool
-ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- void *addr = rx_buf->addr;
- u64 handle, hr;
-
- if (addr) {
- rx_ring->rx_stats.page_reuse_count++;
- return true;
- }
-
- if (!xsk_umem_peek_addr(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- rx_buf->dma = xdp_umem_get_dma(umem, handle);
- rx_buf->dma += hr;
-
- rx_buf->addr = xdp_umem_get_data(umem, handle);
- rx_buf->addr += hr;
-
- rx_buf->handle = handle + umem->headroom;
-
- xsk_umem_release_addr(umem);
- return true;
-}
-
-/**
- * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem
- * @rx_ring: ring with an xdp_umem bound to it
- * @rx_buf: buffer to which xsk page address will be assigned
- *
- * This function allocates an Rx buffer in the slow path.
- * The buffer can come from fill queue or recycle queue.
- *
- * Returns true if an assignment was successful, false if not.
- */
-static __always_inline bool
-ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- u64 handle, headroom;
-
- if (!xsk_umem_peek_addr_rq(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- handle &= umem->chunk_mask;
- headroom = umem->headroom + XDP_PACKET_HEADROOM;
-
- rx_buf->dma = xdp_umem_get_dma(umem, handle);
- rx_buf->dma += headroom;
-
- rx_buf->addr = xdp_umem_get_data(umem, handle);
- rx_buf->addr += headroom;
-
- rx_buf->handle = handle + umem->headroom;
-
- xsk_umem_release_addr_rq(umem);
- return true;
-}
-
-/**
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
* @count: The number of buffers to allocate
- * @alloc: the function pointer to call for allocation
*
* This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring.
*
* Returns false if all allocations were successful, true if any fail.
*/
-static bool
-ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
- bool (*alloc)(struct ice_ring *, struct ice_rx_buf *))
+bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
struct ice_rx_buf *rx_buf;
bool ret = false;
+ dma_addr_t dma;
if (!count)
return false;
@@ -623,16 +424,14 @@ ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
rx_buf = &rx_ring->rx_buf[ntu];
do {
- if (!alloc(rx_ring, rx_buf)) {
+ rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ if (!rx_buf->xdp) {
ret = true;
break;
}
- dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0,
- rx_ring->rx_buf_len,
- DMA_BIDIRECTIONAL);
-
- rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma);
+ dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
rx_desc++;
@@ -653,32 +452,6 @@ ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
}
/**
- * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path
- * @rx_ring: Rx ring
- * @count: number of bufs to allocate
- *
- * Returns false on success, true on failure.
- */
-static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count)
-{
- return ice_alloc_rx_bufs_zc(rx_ring, count,
- ice_alloc_buf_fast_zc);
-}
-
-/**
- * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path
- * @rx_ring: Rx ring
- * @count: number of bufs to allocate
- *
- * Returns false on success, true on failure.
- */
-bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count)
-{
- return ice_alloc_rx_bufs_zc(rx_ring, count,
- ice_alloc_buf_slow_zc);
-}
-
-/**
* ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
* @rx_ring: Rx ring
*/
@@ -692,76 +465,21 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
}
/**
- * ice_get_rx_buf_zc - Fetch the current Rx buffer
- * @rx_ring: Rx ring
- * @size: size of a buffer
- *
- * This function returns the current, received Rx buffer and does
- * DMA synchronization.
- *
- * Returns a pointer to the received Rx buffer.
- */
-static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size)
-{
- struct ice_rx_buf *rx_buf;
-
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
-
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0,
- size, DMA_BIDIRECTIONAL);
-
- return rx_buf;
-}
-
-/**
- * ice_reuse_rx_buf_zc - reuse an Rx buffer
- * @rx_ring: Rx ring
- * @old_buf: The buffer to recycle
- *
- * This function recycles a finished Rx buffer, and places it on the recycle
- * queue (next_to_alloc).
- */
-static void
-ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
-{
- unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
- u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
- u16 nta = rx_ring->next_to_alloc;
- struct ice_rx_buf *new_buf;
-
- new_buf = &rx_ring->rx_buf[nta++];
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- new_buf->dma = old_buf->dma & mask;
- new_buf->dma += hr;
-
- new_buf->addr = (void *)((unsigned long)old_buf->addr & mask);
- new_buf->addr += hr;
-
- new_buf->handle = old_buf->handle & mask;
- new_buf->handle += rx_ring->xsk_umem->headroom;
-
- old_buf->addr = NULL;
-}
-
-/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
* @rx_buf: zero-copy Rx buffer
- * @xdp: XDP buffer
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int datasize_hard = xdp->data_end -
- xdp->data_hard_start;
+ unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
+ unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
+ unsigned int datasize_hard = rx_buf->xdp->data_end -
+ rx_buf->xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -769,13 +487,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- ice_reuse_rx_buf_zc(rx_ring, rx_buf);
-
+ xsk_buff_free(rx_buf->xdp);
+ rx_buf->xdp = NULL;
return skb;
}
@@ -802,7 +520,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
}
act = bpf_prog_run_xdp(xdp_prog, xdp);
- xdp->handle += xdp->data - xdp->data_hard_start;
switch (act) {
case XDP_PASS:
break;
@@ -842,9 +559,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_xmit = 0;
bool failure = false;
- struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -856,8 +570,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
u8 rx_ptype;
if (cleaned_count >= ICE_RX_BUF_WRITE) {
- failure |= ice_alloc_rx_bufs_fast_zc(rx_ring,
- cleaned_count);
+ failure |= ice_alloc_rx_bufs_zc(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -878,25 +592,19 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
- rx_buf = ice_get_rx_buf_zc(rx_ring, size);
- if (!rx_buf->addr)
- break;
- xdp.data = rx_buf->addr;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + size;
- xdp.handle = rx_buf->handle;
+ rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ rx_buf->xdp->data_end = rx_buf->xdp->data + size;
+ xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
- xdp_res = ice_run_xdp_zc(rx_ring, &xdp);
+ xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
if (xdp_res) {
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
- rx_buf->addr = NULL;
- } else {
- ice_reuse_rx_buf_zc(rx_ring, rx_buf);
- }
+ else
+ xsk_buff_free(rx_buf->xdp);
+ rx_buf->xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
cleaned_count++;
@@ -906,7 +614,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
}
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp);
+ skb = ice_construct_skb_zc(rx_ring, rx_buf);
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
break;
@@ -977,17 +685,16 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
- dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
-
- dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
- DMA_BIDIRECTIONAL);
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ desc.len);
tx_buf->bytecount = desc.len;
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD,
- 0, desc.len, 0);
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
@@ -1163,11 +870,10 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
- if (!rx_buf->addr)
+ if (!rx_buf->xdp)
continue;
- xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle);
- rx_buf->addr = NULL;
+ rx_buf->xdp = NULL;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 8a4ba7c6d549..fc1a06b4df36 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -10,11 +10,10 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
-void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
@@ -27,12 +26,6 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
return -EOPNOTSUPP;
}
-static inline void
-ice_zca_free(struct zero_copy_allocator __always_unused *zca,
- unsigned long __always_unused handle)
-{
-}
-
static inline int
ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
int __always_unused budget)
@@ -48,8 +41,8 @@ ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
}
static inline bool
-ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring,
- u16 __always_unused count)
+ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
+ u16 __always_unused count)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 79ee0a747260..3254737c07a3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -12,7 +12,7 @@
#include "igb.h"
static s32 igb_set_default_fc(struct e1000_hw *hw);
-static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
+static void igb_set_fc_watermarks(struct e1000_hw *hw);
/**
* igb_get_bus_info_pcie - Get PCIe bus information
@@ -687,7 +687,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
wr32(E1000_FCTTV, hw->fc.pause_time);
- ret_val = igb_set_fc_watermarks(hw);
+ igb_set_fc_watermarks(hw);
out:
@@ -723,9 +723,8 @@ void igb_config_collision_dist(struct e1000_hw *hw)
* flow control XON frame transmission is enabled, then set XON frame
* tansmission as well.
**/
-static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
+static void igb_set_fc_watermarks(struct e1000_hw *hw)
{
- s32 ret_val = 0;
u32 fcrtl = 0, fcrth = 0;
/* Set the flow control receive threshold registers. Normally,
@@ -747,8 +746,6 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
}
wr32(E1000_FCRTL, fcrtl);
wr32(E1000_FCRTH, fcrth);
-
- return ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 39d3b76a6f5d..2cd003c5ad43 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev,
u32 speed;
u32 supported, advertising;
- status = rd32(E1000_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
supported = (SUPPORTED_10baseT_Half |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b46bff8fe056..8bb3db2cbd41 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3445,7 +3445,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_put_noidle(&pdev->dev);
return 0;
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index e3c164c12e10..1c3051db9085 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_ethtool.o igc_ptp.o igc_dump.o
+igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index a1f845a2aa80..5dbc5a156626 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -16,14 +16,210 @@
#include "igc_hw.h"
-/* forward declaration */
-void igc_set_ethtool_ops(struct net_device *);
+void igc_ethtool_set_ops(struct net_device *);
-struct igc_adapter;
-struct igc_ring;
+/* Transmit and receive queues */
+#define IGC_MAX_RX_QUEUES 4
+#define IGC_MAX_TX_QUEUES 4
+
+#define MAX_Q_VECTORS 8
+#define MAX_STD_JUMBO_FRAME_SIZE 9216
+
+#define MAX_ETYPE_FILTER 8
+#define IGC_RETA_SIZE 128
+
+enum igc_mac_filter_type {
+ IGC_MAC_FILTER_TYPE_DST = 0,
+ IGC_MAC_FILTER_TYPE_SRC
+};
+
+struct igc_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 restart_queue2;
+};
+
+struct igc_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+ u64 csum_err;
+ u64 alloc_failed;
+};
+
+struct igc_rx_packet_stats {
+ u64 ipv4_packets; /* IPv4 headers processed */
+ u64 ipv4e_packets; /* IPv4E headers with extensions processed */
+ u64 ipv6_packets; /* IPv6 headers processed */
+ u64 ipv6e_packets; /* IPv6E headers with extensions processed */
+ u64 tcp_packets; /* TCP headers processed */
+ u64 udp_packets; /* UDP headers processed */
+ u64 sctp_packets; /* SCTP headers processed */
+ u64 nfs_packets; /* NFS headers processe */
+ u64 other_packets;
+};
+
+struct igc_ring_container {
+ struct igc_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct igc_ring {
+ struct igc_q_vector *q_vector; /* backlink to q_vector */
+ struct net_device *netdev; /* back pointer to net_device */
+ struct device *dev; /* device for dma mapping */
+ union { /* array of buffer info structs */
+ struct igc_tx_buffer *tx_buffer_info;
+ struct igc_rx_buffer *rx_buffer_info;
+ };
+ void *desc; /* descriptor ring memory */
+ unsigned long flags; /* ring specific flags */
+ void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
+
+ u16 count; /* number of desc. in the ring */
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
+ bool launchtime_enable; /* true if LaunchTime is enabled */
+
+ u32 start_time;
+ u32 end_time;
+
+ /* everything past this point are written often */
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+
+ union {
+ /* TX */
+ struct {
+ struct igc_tx_queue_stats tx_stats;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync tx_syncp2;
+ };
+ /* RX */
+ struct {
+ struct igc_rx_queue_stats rx_stats;
+ struct igc_rx_packet_stats pkt_stats;
+ struct u64_stats_sync rx_syncp;
+ struct sk_buff *skb;
+ };
+ };
+} ____cacheline_internodealigned_in_smp;
+
+/* Board specific private data structure */
+struct igc_adapter {
+ struct net_device *netdev;
+
+ unsigned long state;
+ unsigned int flags;
+ unsigned int num_q_vectors;
+
+ struct msix_entry *msix_entries;
+
+ /* TX */
+ u16 tx_work_limit;
+ u32 tx_timeout_count;
+ int num_tx_queues;
+ struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
+
+ /* RX */
+ int num_rx_queues;
+ struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
+
+ struct timer_list watchdog_timer;
+ struct timer_list dma_err_timer;
+ struct timer_list phy_info_timer;
+
+ u32 wol;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+
+ u8 port_num;
+
+ u8 __iomem *io_addr;
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+ struct work_struct dma_err_task;
+ bool fc_autoneg;
+
+ u8 tx_timeout_factor;
+
+ int msg_enable;
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ ktime_t base_time;
+ ktime_t cycle_time;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+ /* lock for statistics */
+ spinlock_t stats64_lock;
+ struct rtnl_link_stats64 stats64;
+
+ /* structs defined in igc_hw.h */
+ struct igc_hw hw;
+ struct igc_hw_stats stats;
+
+ struct igc_q_vector *q_vector[MAX_Q_VECTORS];
+ u32 eims_enable_mask;
+ u32 eims_other;
+
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 rx_hwtstamp_cleared;
+
+ u32 rss_queues;
+ u32 rss_indir_tbl_init;
+
+ /* Any access to elements in nfc_rule_list is protected by the
+ * nfc_rule_lock.
+ */
+ struct mutex nfc_rule_lock;
+ struct list_head nfc_rule_list;
+ unsigned int nfc_rule_count;
+
+ u8 rss_indir_tbl[IGC_RETA_SIZE];
+
+ unsigned long link_check_timeout;
+ struct igc_info ei;
+
+ u32 test_icr;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
+ unsigned int ptp_flags;
+ /* System time value lock */
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+};
void igc_up(struct igc_adapter *adapter);
void igc_down(struct igc_adapter *adapter);
+int igc_open(struct net_device *netdev);
+int igc_close(struct net_device *netdev);
int igc_setup_tx_resources(struct igc_ring *ring);
int igc_setup_rx_resources(struct igc_ring *ring);
void igc_free_tx_resources(struct igc_ring *ring);
@@ -36,10 +232,6 @@ void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
-int igc_add_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags);
-int igc_del_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags);
void igc_update_stats(struct igc_adapter *adapter);
/* igc_dump declarations */
@@ -50,14 +242,10 @@ extern char igc_driver_name[];
extern char igc_driver_version[];
#define IGC_REGS_LEN 740
-#define IGC_RETA_SIZE 128
/* flags controlling PTP/1588 function */
#define IGC_PTP_ENABLED BIT(0)
-/* Interrupt defines */
-#define IGC_START_ITR 648 /* ~6000 ints/sec */
-
/* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
@@ -70,6 +258,7 @@ extern char igc_driver_version[];
#define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16)
+#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
@@ -78,6 +267,7 @@ extern char igc_driver_version[];
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
#define IGC_20K_ITR 196
@@ -99,13 +289,6 @@ extern char igc_driver_version[];
#define IGC_MIN_RXD 80
#define IGC_MAX_RXD 4096
-/* Transmit and receive queues */
-#define IGC_MAX_RX_QUEUES 4
-#define IGC_MAX_TX_QUEUES 4
-
-#define MAX_Q_VECTORS 8
-#define MAX_STD_JUMBO_FRAME_SIZE 9216
-
/* Supported Rx Buffer Sizes */
#define IGC_RXBUFFER_256 256
#define IGC_RXBUFFER_2048 2048
@@ -232,83 +415,6 @@ struct igc_rx_buffer {
__u16 pagecnt_bias;
};
-struct igc_tx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 restart_queue;
- u64 restart_queue2;
-};
-
-struct igc_rx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 drops;
- u64 csum_err;
- u64 alloc_failed;
-};
-
-struct igc_rx_packet_stats {
- u64 ipv4_packets; /* IPv4 headers processed */
- u64 ipv4e_packets; /* IPv4E headers with extensions processed */
- u64 ipv6_packets; /* IPv6 headers processed */
- u64 ipv6e_packets; /* IPv6E headers with extensions processed */
- u64 tcp_packets; /* TCP headers processed */
- u64 udp_packets; /* UDP headers processed */
- u64 sctp_packets; /* SCTP headers processed */
- u64 nfs_packets; /* NFS headers processe */
- u64 other_packets;
-};
-
-struct igc_ring_container {
- struct igc_ring *ring; /* pointer to linked list of rings */
- unsigned int total_bytes; /* total bytes processed this int */
- unsigned int total_packets; /* total packets processed this int */
- u16 work_limit; /* total work allowed per interrupt */
- u8 count; /* total number of rings in vector */
- u8 itr; /* current ITR setting for ring */
-};
-
-struct igc_ring {
- struct igc_q_vector *q_vector; /* backlink to q_vector */
- struct net_device *netdev; /* back pointer to net_device */
- struct device *dev; /* device for dma mapping */
- union { /* array of buffer info structs */
- struct igc_tx_buffer *tx_buffer_info;
- struct igc_rx_buffer *rx_buffer_info;
- };
- void *desc; /* descriptor ring memory */
- unsigned long flags; /* ring specific flags */
- void __iomem *tail; /* pointer to ring tail register */
- dma_addr_t dma; /* phys address of the ring */
- unsigned int size; /* length of desc. ring in bytes */
-
- u16 count; /* number of desc. in the ring */
- u8 queue_index; /* logical index of the ring*/
- u8 reg_idx; /* physical index of the ring */
- bool launchtime_enable; /* true if LaunchTime is enabled */
-
- /* everything past this point are written often */
- u16 next_to_clean;
- u16 next_to_use;
- u16 next_to_alloc;
-
- union {
- /* TX */
- struct {
- struct igc_tx_queue_stats tx_stats;
- struct u64_stats_sync tx_syncp;
- struct u64_stats_sync tx_syncp2;
- };
- /* RX */
- struct {
- struct igc_rx_queue_stats rx_stats;
- struct igc_rx_packet_stats pkt_stats;
- struct u64_stats_sync rx_syncp;
- struct sk_buff *skb;
- };
- };
-} ____cacheline_internodealigned_in_smp;
-
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
@@ -329,8 +435,6 @@ struct igc_q_vector {
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
};
-#define MAX_ETYPE_FILTER (4 - 1)
-
enum igc_filter_match_flags {
IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
IGC_FILTER_FLAG_VLAN_TCI = 0x2,
@@ -338,143 +442,25 @@ enum igc_filter_match_flags {
IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
};
-/* RX network flow classification data structure */
-struct igc_nfc_input {
- /* Byte layout in order, all values with MSB first:
- * match_flags - 1 byte
- * etype - 2 bytes
- * vlan_tci - 2 bytes
- */
+struct igc_nfc_filter {
u8 match_flags;
- __be16 etype;
- __be16 vlan_tci;
+ u16 etype;
+ u16 vlan_tci;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
};
-struct igc_nfc_filter {
- struct hlist_node nfc_node;
- struct igc_nfc_input filter;
- unsigned long cookie;
- u16 etype_reg_index;
- u16 sw_idx;
+struct igc_nfc_rule {
+ struct list_head list;
+ struct igc_nfc_filter filter;
+ u32 location;
u16 action;
};
-struct igc_mac_addr {
- u8 addr[ETH_ALEN];
- u8 queue;
- u8 state; /* bitmask */
-};
-
-#define IGC_MAC_STATE_DEFAULT 0x1
-#define IGC_MAC_STATE_IN_USE 0x2
-#define IGC_MAC_STATE_SRC_ADDR 0x4
-#define IGC_MAC_STATE_QUEUE_STEERING 0x8
-
-#define IGC_MAX_RXNFC_FILTERS 16
-
-/* Board specific private data structure */
-struct igc_adapter {
- struct net_device *netdev;
-
- unsigned long state;
- unsigned int flags;
- unsigned int num_q_vectors;
-
- struct msix_entry *msix_entries;
-
- /* TX */
- u16 tx_work_limit;
- u32 tx_timeout_count;
- int num_tx_queues;
- struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
-
- /* RX */
- int num_rx_queues;
- struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
-
- struct timer_list watchdog_timer;
- struct timer_list dma_err_timer;
- struct timer_list phy_info_timer;
-
- u32 wol;
- u32 en_mng_pt;
- u16 link_speed;
- u16 link_duplex;
-
- u8 port_num;
-
- u8 __iomem *io_addr;
- /* Interrupt Throttle Rate */
- u32 rx_itr_setting;
- u32 tx_itr_setting;
-
- struct work_struct reset_task;
- struct work_struct watchdog_task;
- struct work_struct dma_err_task;
- bool fc_autoneg;
-
- u8 tx_timeout_factor;
-
- int msg_enable;
- u32 max_frame_size;
- u32 min_frame_size;
-
- /* OS defined structs */
- struct pci_dev *pdev;
- /* lock for statistics */
- spinlock_t stats64_lock;
- struct rtnl_link_stats64 stats64;
-
- /* structs defined in igc_hw.h */
- struct igc_hw hw;
- struct igc_hw_stats stats;
-
- struct igc_q_vector *q_vector[MAX_Q_VECTORS];
- u32 eims_enable_mask;
- u32 eims_other;
-
- u16 tx_ring_count;
- u16 rx_ring_count;
-
- u32 tx_hwtstamp_timeouts;
- u32 tx_hwtstamp_skipped;
- u32 rx_hwtstamp_cleared;
-
- u32 rss_queues;
- u32 rss_indir_tbl_init;
-
- /* RX network flow classification support */
- struct hlist_head nfc_filter_list;
- struct hlist_head cls_flower_list;
- unsigned int nfc_filter_count;
-
- /* lock for RX network flow classification filter */
- spinlock_t nfc_lock;
- bool etype_bitmap[MAX_ETYPE_FILTER];
-
- struct igc_mac_addr *mac_table;
-
- u8 rss_indir_tbl[IGC_RETA_SIZE];
-
- unsigned long link_check_timeout;
- struct igc_info ei;
-
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info ptp_caps;
- struct work_struct ptp_tx_work;
- struct sk_buff *ptp_tx_skb;
- struct hwtstamp_config tstamp_config;
- unsigned long ptp_tx_start;
- unsigned long last_rx_ptp_check;
- unsigned long last_rx_timestamp;
- unsigned int ptp_flags;
- /* System time value lock */
- spinlock_t tmreg_lock;
- struct cyclecounter cc;
- struct timecounter tc;
-};
+/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority
+ * based, and 8 ethertype based.
+ */
+#define IGC_MAX_RXNFC_RULES 32
/* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring)
@@ -550,12 +536,11 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
return 0;
}
-/* forward declaration */
void igc_reinit_locked(struct igc_adapter *);
-int igc_add_filter(struct igc_adapter *adapter,
- struct igc_nfc_filter *input);
-int igc_erase_filter(struct igc_adapter *adapter,
- struct igc_nfc_filter *input);
+struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
+ u32 location);
+int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
+void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 5a506440560a..cc5a6cf531c7 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -26,7 +26,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
*/
ret_val = igc_disable_pcie_master(hw);
if (ret_val)
- hw_dbg("PCI-E Master disable polling has failed.\n");
+ hw_dbg("PCI-E Master disable polling has failed\n");
hw_dbg("Masking off all interrupts\n");
wr32(IGC_IMC, 0xffffffff);
@@ -177,7 +177,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
*/
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
+ hw_dbg("Error resetting the PHY\n");
goto out;
}
@@ -212,6 +212,9 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_I:
case IGC_DEV_ID_I220_V:
case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_K2:
+ case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I225_IT:
case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
@@ -364,7 +367,7 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw)
}
if (ms_wait == 10)
- pr_debug("Queue disable timed out after 10ms\n");
+ hw_dbg("Queue disable timed out after 10ms\n");
/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
* incoming packets are rejected. Set enable and wait 2ms so that
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 4ddccccf42cc..186deb1d9375 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -44,13 +44,9 @@
/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
#define IGC_WUPM_BYTES 128
-/* Physical Func Reset Done Indication */
-#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
-#define PHY_FORCE_LIMIT 20
/* Number of 100 microseconds we wait for PCI Express master disable */
#define MASTER_DISABLE_TIMEOUT 800
@@ -66,8 +62,14 @@
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
+#define IGC_RAH_RAH_MASK 0x0000FFFF
+#define IGC_RAH_ASEL_MASK 0x00030000
+#define IGC_RAH_ASEL_SRC_ADDR BIT(16)
+#define IGC_RAH_QSEL_MASK 0x000C0000
+#define IGC_RAH_QSEL_SHIFT 18
+#define IGC_RAH_QSEL_ENABLE BIT(28)
#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
-#define IGC_RAH_POOL_1 0x00040000
+
#define IGC_RAL_MAC_ADDR_LEN 4
#define IGC_RAH_MAC_ADDR_LEN 2
@@ -94,8 +96,6 @@
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define IGC_CONNSW_AUTOSENSE_EN 0x1
-
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
@@ -166,11 +166,6 @@
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
-
-#define NVM_PBA_OFFSET_0 8
-#define NVM_PBA_OFFSET_1 9
-#define NVM_RESERVED_WORD 0xFFFF
-#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* Collision related configuration parameters */
@@ -252,7 +247,6 @@
/* Interrupt Cause Set */
#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */
#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */
-#define IGC_ICS_DRSTA IGC_ICR_DRSTA /* Device Reset Aserted */
#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
@@ -271,21 +265,13 @@
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define IGC_TXD_CMD_RS 0x08000000 /* Report Status */
-#define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define IGC_TXD_STAT_EC 0x00000002 /* Excess Collisions */
-#define IGC_TXD_STAT_LC 0x00000004 /* Late Collisions */
-#define IGC_TXD_STAT_TU 0x00000008 /* Transmit underrun */
#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */
#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */
#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
-#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* IPSec Encrypt Enable */
@@ -377,6 +363,11 @@
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
+
+#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
+#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+
/* Time Sync Interrupt Causes */
#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
@@ -387,9 +378,6 @@
#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS
-/* PTP Queue Filter */
-#define IGC_ETQF_1588 BIT(30)
-
#define IGC_FTQF_VF_BP 0x00008000
#define IGC_FTQF_1588_TIME_STAMP 0x08000000
#define IGC_FTQF_MASK 0xF0000000
@@ -431,6 +419,14 @@
#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
+/* Transmit Scheduling */
+#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
+#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
+
+#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
+#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
+#define IGC_TXQCTL_STRICT_END 0x00000004
+
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
@@ -497,16 +493,15 @@
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
-#define IGC_MDIC_DEST 0x80000000
#define IGC_N0_QUEUE -1
#define IGC_MAX_MAC_HDR_LEN 127
#define IGC_MAX_NETWORK_HDR_LEN 511
-#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4))
-#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4))
-#define IGC_VLAPQF_QUEUE_MASK 0x03
+#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4))
+#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4))
+#define IGC_VLANPQF_QUEUE_MASK 0x03
#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */
diff --git a/drivers/net/ethernet/intel/igc/igc_diag.c b/drivers/net/ethernet/intel/igc/igc_diag.c
new file mode 100644
index 000000000000..cc621970c0cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_diag.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Intel Corporation */
+
+#include "igc.h"
+#include "igc_diag.h"
+
+static struct igc_reg_test reg_test[] = {
+ { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
+ { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB },
+ { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF },
+ { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { IGC_RA, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { IGC_RA, 16, TABLE64_TEST_HI,
+ 0x900FFFFF, 0xFFFFFFFF },
+ { IGC_MTA, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0}
+};
+
+static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 pat, val, before;
+ static const u32 test_pattern[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+ };
+
+ for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+ before = rd32(reg);
+ wr32(reg, test_pattern[pat] & write);
+ val = rd32(reg);
+ if (val != (test_pattern[pat] & write & mask)) {
+ netdev_err(adapter->netdev,
+ "pattern test reg %04X failed: got 0x%08X expected 0x%08X",
+ reg, val, test_pattern[pat] & write & mask);
+ *data = reg;
+ wr32(reg, before);
+ return false;
+ }
+ wr32(reg, before);
+ }
+ return true;
+}
+
+static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 val, before;
+
+ before = rd32(reg);
+ wr32(reg, write & mask);
+ val = rd32(reg);
+ if ((write & mask) != (val & mask)) {
+ netdev_err(adapter->netdev,
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X",
+ reg, (val & mask), (write & mask));
+ *data = reg;
+ wr32(reg, before);
+ return false;
+ }
+ wr32(reg, before);
+ return true;
+}
+
+bool igc_reg_test(struct igc_adapter *adapter, u64 *data)
+{
+ struct igc_reg_test *test = reg_test;
+ struct igc_hw *hw = &adapter->hw;
+ u32 value, before, after;
+ u32 i, toggle, b = false;
+
+ /* Because the status register is such a special case,
+ * we handle it separately from the rest of the register
+ * tests. Some bits are read-only, some toggle, and some
+ * are writeable.
+ */
+ toggle = 0x6800D3;
+ before = rd32(IGC_STATUS);
+ value = before & toggle;
+ wr32(IGC_STATUS, toggle);
+ after = rd32(IGC_STATUS) & toggle;
+ if (value != after) {
+ netdev_err(adapter->netdev,
+ "failed STATUS register test got: 0x%08X expected: 0x%08X",
+ after, value);
+ *data = 1;
+ return false;
+ }
+ /* restore previous status */
+ wr32(IGC_STATUS, before);
+
+ /* Perform the remainder of the register test, looping through
+ * the test table until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ b = reg_set_and_check(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ b = reg_pattern_test(adapter, data,
+ test->reg + 4 + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE32_TEST:
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ }
+ if (!b)
+ return false;
+ }
+ test++;
+ }
+ *data = 0;
+ return true;
+}
+
+bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ *data = 0;
+
+ if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) {
+ *data = 1;
+ return false;
+ }
+
+ return true;
+}
+
+bool igc_link_test(struct igc_adapter *adapter, u64 *data)
+{
+ bool link_up;
+
+ *data = 0;
+
+ /* add delay to give enough time for autonegotioation to finish */
+ if (adapter->hw.mac.autoneg)
+ ssleep(5);
+
+ link_up = igc_has_link(adapter);
+ if (!link_up) {
+ *data = 1;
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_diag.h b/drivers/net/ethernet/intel/igc/igc_diag.h
new file mode 100644
index 000000000000..600658e33bec
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_diag.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020 Intel Corporation */
+
+bool igc_reg_test(struct igc_adapter *adapter, u64 *data);
+bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data);
+bool igc_link_test(struct igc_adapter *adapter, u64 *data);
+
+struct igc_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define TABLE32_TEST 3
+#define TABLE64_TEST_LO 4
+#define TABLE64_TEST_HI 5
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
index 657ab50ae296..4b9ec7d0b727 100644
--- a/drivers/net/ethernet/intel/igc/igc_dump.c
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -35,10 +35,6 @@ static const struct igc_reg_info igc_reg_info_tbl[] = {
{IGC_TDH(0), "TDH"},
{IGC_TDT(0), "TDT"},
{IGC_TXDCTL(0), "TXDCTL"},
- {IGC_TDFH, "TDFH"},
- {IGC_TDFT, "TDFT"},
- {IGC_TDFHS, "TDFHS"},
- {IGC_TDFPC, "TDFPC"},
/* List Terminator */
{}
@@ -47,6 +43,7 @@ static const struct igc_reg_info igc_reg_info_tbl[] = {
/* igc_regdump - register printout routine */
static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
{
+ struct net_device *dev = igc_get_hw_dev(hw);
int n = 0;
char rname[16];
u32 regs[8];
@@ -101,13 +98,14 @@ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
regs[n] = rd32(IGC_TXDCTL(n));
break;
default:
- pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
+ netdev_info(dev, "%-15s %08x\n", reginfo->name,
+ rd32(reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
- pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
- regs[2], regs[3]);
+ netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+ regs[2], regs[3]);
}
/* igc_rings_dump - Tx-rings and Rx-rings */
@@ -125,39 +123,34 @@ void igc_rings_dump(struct igc_adapter *adapter)
if (!netif_msg_hw(adapter))
return;
- /* Print netdevice Info */
- if (netdev) {
- dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start\n");
- pr_info("%-15s %016lX %016lX\n", netdev->name,
- netdev->state, dev_trans_start(netdev));
- }
+ netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n",
+ netdev->state, dev_trans_start(netdev));
/* Print TX Ring Summary */
- if (!netdev || !netif_running(netdev))
+ if (!netif_running(netdev))
goto exit;
- dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ netdev_info(netdev, "TX Rings Summary\n");
+ netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
struct igc_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
- n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)dma_unmap_addr(buffer_info, dma),
- dma_unmap_len(buffer_info, len),
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp);
+ netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
}
/* Print TX Rings */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
- dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+ netdev_info(netdev, "TX Rings Dump\n");
/* Transmit Descriptor Formats
*
@@ -172,10 +165,11 @@ void igc_rings_dump(struct igc_adapter *adapter)
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
- pr_info("------------------------------------\n");
- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
- pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
+ netdev_info(netdev, "------------------------------------\n");
+ netdev_info(netdev, "TX QUEUE INDEX = %d\n",
+ tx_ring->queue_index);
+ netdev_info(netdev, "------------------------------------\n");
+ netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
@@ -194,14 +188,14 @@ void igc_rings_dump(struct igc_adapter *adapter)
else
next_desc = "";
- pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
- i, le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- (u64)dma_unmap_addr(buffer_info, dma),
- dma_unmap_len(buffer_info, len),
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp,
- buffer_info->skb, next_desc);
+ netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "",
@@ -214,19 +208,19 @@ void igc_rings_dump(struct igc_adapter *adapter)
/* Print RX Rings Summary */
rx_ring_summary:
- dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
- pr_info("Queue [NTU] [NTC]\n");
+ netdev_info(netdev, "RX Rings Summary\n");
+ netdev_info(netdev, "Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- pr_info(" %5d %5X %5X\n",
- n, rx_ring->next_to_use, rx_ring->next_to_clean);
+ netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use,
+ rx_ring->next_to_clean);
}
/* Print RX Rings */
if (!netif_msg_rx_status(adapter))
goto exit;
- dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ netdev_info(netdev, "RX Rings Dump\n");
/* Advanced Receive Descriptor (Read) Format
* 63 1 0
@@ -251,11 +245,12 @@ rx_ring_summary:
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- pr_info("------------------------------------\n");
- pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
- pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
+ netdev_info(netdev, "------------------------------------\n");
+ netdev_info(netdev, "RX QUEUE INDEX = %d\n",
+ rx_ring->queue_index);
+ netdev_info(netdev, "------------------------------------\n");
+ netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
@@ -275,18 +270,18 @@ rx_ring_summary:
if (staterr & IGC_RXD_STAT_DD) {
/* Descriptor Done */
- pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
- "RWB", i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- next_desc);
+ netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n",
+ "RWB", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ next_desc);
} else {
- pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
- "R ", i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
- next_desc);
+ netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n",
+ "R ", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ next_desc);
if (netif_msg_pktdata(adapter) &&
buffer_info->dma && buffer_info->page) {
@@ -314,8 +309,8 @@ void igc_regs_dump(struct igc_adapter *adapter)
struct igc_reg_info *reginfo;
/* Print Registers */
- dev_info(&adapter->pdev->dev, "Register Dump\n");
- pr_info(" Register Name Value\n");
+ netdev_info(adapter->netdev, "Register Dump\n");
+ netdev_info(adapter->netdev, "Register Name Value\n");
for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl;
reginfo->name; reginfo++) {
igc_regdump(hw, reginfo);
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index f530fc29b074..a938ec8db681 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -6,6 +6,7 @@
#include <linux/pm_runtime.h>
#include "igc.h"
+#include "igc_diag.h"
/* forward declaration */
struct igc_stats {
@@ -123,8 +124,8 @@ static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings)
-static void igc_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+static void igc_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -138,13 +139,13 @@ static void igc_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN;
}
-static int igc_get_regs_len(struct net_device *netdev)
+static int igc_ethtool_get_regs_len(struct net_device *netdev)
{
return IGC_REGS_LEN * sizeof(u32);
}
-static void igc_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+static void igc_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -153,7 +154,7 @@ static void igc_get_regs(struct net_device *netdev,
memset(p, 0, IGC_REGS_LEN * sizeof(u32));
- regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = rd32(IGC_CTRL);
@@ -306,9 +307,24 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[164 + i] = rd32(IGC_TDT(i));
for (i = 0; i < 4; i++)
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
+
+ /* XXX: Due to a bug few lines above, RAL and RAH registers are
+ * overwritten. To preserve the ABI, we write these registers again in
+ * regs_buff.
+ */
+ for (i = 0; i < 16; i++)
+ regs_buff[172 + i] = rd32(IGC_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[188 + i] = rd32(IGC_RAH(i));
+
+ regs_buff[204] = rd32(IGC_VLANPQF);
+
+ for (i = 0; i < 8; i++)
+ regs_buff[205 + i] = rd32(IGC_ETQF(i));
}
-static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void igc_ethtool_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -339,7 +355,8 @@ static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
wol->wolopts |= WAKE_PHY;
}
-static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int igc_ethtool_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -367,21 +384,21 @@ static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-static u32 igc_get_msglevel(struct net_device *netdev)
+static u32 igc_ethtool_get_msglevel(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
-static void igc_set_msglevel(struct net_device *netdev, u32 data)
+static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data)
{
struct igc_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
-static int igc_nway_reset(struct net_device *netdev)
+static int igc_ethtool_nway_reset(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -390,7 +407,7 @@ static int igc_nway_reset(struct net_device *netdev)
return 0;
}
-static u32 igc_get_link(struct net_device *netdev)
+static u32 igc_ethtool_get_link(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_mac_info *mac = &adapter->hw.mac;
@@ -407,15 +424,15 @@ static u32 igc_get_link(struct net_device *netdev)
return igc_has_link(adapter);
}
-static int igc_get_eeprom_len(struct net_device *netdev)
+static int igc_ethtool_get_eeprom_len(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
return adapter->hw.nvm.word_size * 2;
}
-static int igc_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static int igc_ethtool_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -461,8 +478,8 @@ static int igc_get_eeprom(struct net_device *netdev,
return ret_val;
}
-static int igc_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static int igc_ethtool_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -529,8 +546,8 @@ static int igc_set_eeprom(struct net_device *netdev,
return ret_val;
}
-static void igc_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void igc_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -540,8 +557,8 @@ static void igc_get_ringparam(struct net_device *netdev,
ring->tx_pending = adapter->tx_ring_count;
}
-static int igc_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static int igc_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_ring *temp_ring;
@@ -655,8 +672,8 @@ clear_reset:
return err;
}
-static void igc_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+static void igc_ethtool_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -674,8 +691,8 @@ static void igc_get_pauseparam(struct net_device *netdev,
}
}
-static int igc_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+static int igc_ethtool_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -714,7 +731,8 @@ static int igc_set_pauseparam(struct net_device *netdev,
return retval;
}
-static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u8 *p = data;
@@ -765,7 +783,7 @@ static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
-static int igc_get_sset_count(struct net_device *netdev, int sset)
+static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
@@ -779,7 +797,7 @@ static int igc_get_sset_count(struct net_device *netdev, int sset)
}
}
-static void igc_get_ethtool_stats(struct net_device *netdev,
+static void igc_ethtool_get_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -835,8 +853,8 @@ static void igc_get_ethtool_stats(struct net_device *netdev,
spin_unlock(&adapter->stats64_lock);
}
-static int igc_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+static int igc_ethtool_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -855,8 +873,8 @@ static int igc_get_coalesce(struct net_device *netdev,
return 0;
}
-static int igc_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+static int igc_ethtool_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
@@ -913,81 +931,83 @@ static int igc_set_coalesce(struct net_device *netdev,
}
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
-static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
- struct igc_nfc_filter *rule = NULL;
+ struct igc_nfc_rule *rule = NULL;
- /* report total rule count */
- cmd->data = IGC_MAX_RXNFC_FILTERS;
+ cmd->data = IGC_MAX_RXNFC_RULES;
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
- if (fsp->location <= rule->sw_idx)
- break;
+ mutex_lock(&adapter->nfc_rule_lock);
+
+ rule = igc_get_nfc_rule(adapter, fsp->location);
+ if (!rule)
+ goto out;
+
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->action;
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype);
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
}
- if (!rule || fsp->location != rule->sw_idx)
- return -EINVAL;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci);
+ fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ }
- if (rule->filter.match_flags) {
- fsp->flow_type = ETHER_FLOW;
- fsp->ring_cookie = rule->action;
- if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
- fsp->h_u.ether_spec.h_proto = rule->filter.etype;
- fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
- }
- if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
- fsp->flow_type |= FLOW_EXT;
- fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
- fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
- }
- if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
- ether_addr_copy(fsp->h_u.ether_spec.h_dest,
- rule->filter.dst_addr);
- /* As we only support matching by the full
- * mask, return the mask to userspace
- */
- eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
- }
- if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
- ether_addr_copy(fsp->h_u.ether_spec.h_source,
- rule->filter.src_addr);
- /* As we only support matching by the full
- * mask, return the mask to userspace
- */
- eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
- }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ rule->filter.dst_addr);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
- return 0;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_source,
+ rule->filter.src_addr);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
}
+
+ mutex_unlock(&adapter->nfc_rule_lock);
+ return 0;
+
+out:
+ mutex_unlock(&adapter->nfc_rule_lock);
return -EINVAL;
}
-static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
{
- struct igc_nfc_filter *rule;
+ struct igc_nfc_rule *rule;
int cnt = 0;
- /* report total rule count */
- cmd->data = IGC_MAX_RXNFC_FILTERS;
+ cmd->data = IGC_MAX_RXNFC_RULES;
+
+ mutex_lock(&adapter->nfc_rule_lock);
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
- if (cnt == cmd->rule_cnt)
+ list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
+ if (cnt == cmd->rule_cnt) {
+ mutex_unlock(&adapter->nfc_rule_lock);
return -EMSGSIZE;
- rule_locs[cnt] = rule->sw_idx;
+ }
+ rule_locs[cnt] = rule->location;
cnt++;
}
+ mutex_unlock(&adapter->nfc_rule_lock);
+
cmd->rule_cnt = cnt;
return 0;
}
-static int igc_get_rss_hash_opts(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
cmd->data = 0;
@@ -1036,41 +1056,33 @@ static int igc_get_rss_hash_opts(struct igc_adapter *adapter,
return 0;
}
-static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static int igc_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct igc_adapter *adapter = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_queues;
- ret = 0;
- break;
+ return 0;
case ETHTOOL_GRXCLSRLCNT:
- cmd->rule_cnt = adapter->nfc_filter_count;
- ret = 0;
- break;
+ cmd->rule_cnt = adapter->nfc_rule_count;
+ return 0;
case ETHTOOL_GRXCLSRULE:
- ret = igc_get_ethtool_nfc_entry(adapter, cmd);
- break;
+ return igc_ethtool_get_nfc_rule(adapter, cmd);
case ETHTOOL_GRXCLSRLALL:
- ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs);
- break;
+ return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs);
case ETHTOOL_GRXFH:
- ret = igc_get_rss_hash_opts(adapter, cmd);
- break;
+ return igc_ethtool_get_rss_hash_opts(adapter, cmd);
default:
- break;
+ return -EOPNOTSUPP;
}
-
- return ret;
}
#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
IGC_FLAG_RSS_FIELD_IPV6_UDP)
-static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
{
u32 flags = adapter->flags;
@@ -1145,8 +1157,8 @@ static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
if ((flags & UDP_RSS_FLAGS) &&
!(adapter->flags & UDP_RSS_FLAGS))
- dev_err(&adapter->pdev->dev,
- "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+ netdev_err(adapter->netdev,
+ "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
adapter->flags = flags;
@@ -1171,344 +1183,184 @@ static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
return 0;
}
-static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter,
- struct igc_nfc_filter *input)
+static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
+ const struct ethtool_rx_flow_spec *fsp)
{
- struct igc_hw *hw = &adapter->hw;
- u8 i;
- u32 etqf;
- u16 etype;
-
- /* find an empty etype filter register */
- for (i = 0; i < MAX_ETYPE_FILTER; ++i) {
- if (!adapter->etype_bitmap[i])
- break;
- }
- if (i == MAX_ETYPE_FILTER) {
- dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n");
- return -EINVAL;
- }
+ INIT_LIST_HEAD(&rule->list);
- adapter->etype_bitmap[i] = true;
+ rule->action = fsp->ring_cookie;
+ rule->location = fsp->location;
- etqf = rd32(IGC_ETQF(i));
- etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK);
-
- etqf |= IGC_ETQF_FILTER_ENABLE;
- etqf &= ~IGC_ETQF_ETYPE_MASK;
- etqf |= (etype & IGC_ETQF_ETYPE_MASK);
-
- etqf &= ~IGC_ETQF_QUEUE_MASK;
- etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT)
- & IGC_ETQF_QUEUE_MASK);
- etqf |= IGC_ETQF_QUEUE_ENABLE;
-
- wr32(IGC_ETQF(i), etqf);
-
- input->etype_reg_index = i;
-
- return 0;
-}
-
-static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter,
- struct igc_nfc_filter *input)
-{
- struct igc_hw *hw = &adapter->hw;
- u8 vlan_priority;
- u16 queue_index;
- u32 vlapqf;
-
- vlapqf = rd32(IGC_VLAPQF);
- vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
- >> VLAN_PRIO_SHIFT;
- queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK;
-
- /* check whether this vlan prio is already set */
- if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) &&
- queue_index != input->action) {
- dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n");
- return -EEXIST;
- }
-
- vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority);
- vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action);
-
- wr32(IGC_VLAPQF, vlapqf);
-
- return 0;
-}
-
-int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
-{
- struct igc_hw *hw = &adapter->hw;
- int err = -EINVAL;
-
- if (hw->mac.type == igc_i225 &&
- !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) {
- dev_err(&adapter->pdev->dev,
- "i225 doesn't support flow classification rules specifying only source addresses.\n");
- return -EOPNOTSUPP;
+ if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci);
+ rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
}
- if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
- err = igc_rxnfc_write_etype_filter(adapter, input);
- if (err)
- return err;
+ if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
+ rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto);
+ rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE;
}
- if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
- err = igc_add_mac_steering_filter(adapter,
- input->filter.dst_addr,
- input->action, 0);
- err = min_t(int, err, 0);
- if (err)
- return err;
+ /* Both source and destination address filters only support the full
+ * mask.
+ */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
+ rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(rule->filter.src_addr,
+ fsp->h_u.ether_spec.h_source);
}
- if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
- err = igc_add_mac_steering_filter(adapter,
- input->filter.src_addr,
- input->action,
- IGC_MAC_STATE_SRC_ADDR);
- err = min_t(int, err, 0);
- if (err)
- return err;
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(rule->filter.dst_addr,
+ fsp->h_u.ether_spec.h_dest);
}
-
- if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
- err = igc_rxnfc_write_vlan_prio_filter(adapter, input);
-
- return err;
-}
-
-static void igc_clear_etype_filter_regs(struct igc_adapter *adapter,
- u16 reg_index)
-{
- struct igc_hw *hw = &adapter->hw;
- u32 etqf = rd32(IGC_ETQF(reg_index));
-
- etqf &= ~IGC_ETQF_QUEUE_ENABLE;
- etqf &= ~IGC_ETQF_QUEUE_MASK;
- etqf &= ~IGC_ETQF_FILTER_ENABLE;
-
- wr32(IGC_ETQF(reg_index), etqf);
-
- adapter->etype_bitmap[reg_index] = false;
-}
-
-static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter,
- u16 vlan_tci)
-{
- struct igc_hw *hw = &adapter->hw;
- u8 vlan_priority;
- u32 vlapqf;
-
- vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-
- vlapqf = rd32(IGC_VLAPQF);
- vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority);
- vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority,
- IGC_VLAPQF_QUEUE_MASK);
-
- wr32(IGC_VLAPQF, vlapqf);
}
-int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+/**
+ * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid
+ * @adapter: Pointer to adapter
+ * @rule: Rule under evaluation
+ *
+ * The driver doesn't support rules with multiple matches so if more than
+ * one bit in filter flags is set, @rule is considered invalid.
+ *
+ * Also, if there is already another rule with the same filter in a different
+ * location, @rule is considered invalid.
+ *
+ * Context: Expects adapter->nfc_rule_lock to be held by caller.
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
+ */
+static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter,
+ struct igc_nfc_rule *rule)
{
- if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
- igc_clear_etype_filter_regs(adapter,
- input->etype_reg_index);
-
- if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
- igc_clear_vlan_prio_filter(adapter,
- ntohs(input->filter.vlan_tci));
-
- if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
- igc_del_mac_steering_filter(adapter, input->filter.src_addr,
- input->action,
- IGC_MAC_STATE_SRC_ADDR);
-
- if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
- igc_del_mac_steering_filter(adapter, input->filter.dst_addr,
- input->action, 0);
-
- return 0;
-}
-
-static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter,
- struct igc_nfc_filter *input,
- u16 sw_idx)
-{
- struct igc_nfc_filter *rule, *parent;
- int err = -EINVAL;
-
- parent = NULL;
- rule = NULL;
+ struct net_device *dev = adapter->netdev;
+ u8 flags = rule->filter.match_flags;
+ struct igc_nfc_rule *tmp;
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
- /* hash found, or no matching entry */
- if (rule->sw_idx >= sw_idx)
- break;
- parent = rule;
+ if (!flags) {
+ netdev_dbg(dev, "Rule with no match\n");
+ return -EINVAL;
}
- /* if there is an old rule occupying our place remove it */
- if (rule && rule->sw_idx == sw_idx) {
- if (!input)
- err = igc_erase_filter(adapter, rule);
-
- hlist_del(&rule->nfc_node);
- kfree(rule);
- adapter->nfc_filter_count--;
+ if (flags & (flags - 1)) {
+ netdev_dbg(dev, "Rule with multiple matches not supported\n");
+ return -EOPNOTSUPP;
}
- /* If no input this was a delete, err should be 0 if a rule was
- * successfully found and removed from the list else -EINVAL
- */
- if (!input)
- return err;
-
- /* initialize node */
- INIT_HLIST_NODE(&input->nfc_node);
-
- /* add filter to the list */
- if (parent)
- hlist_add_behind(&input->nfc_node, &parent->nfc_node);
- else
- hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
-
- /* update counts */
- adapter->nfc_filter_count++;
+ list_for_each_entry(tmp, &adapter->nfc_rule_list, list) {
+ if (!memcmp(&rule->filter, &tmp->filter,
+ sizeof(rule->filter)) &&
+ tmp->location != rule->location) {
+ netdev_dbg(dev, "Rule already exists\n");
+ return -EEXIST;
+ }
+ }
return 0;
}
-static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
struct net_device *netdev = adapter->netdev;
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
- struct igc_nfc_filter *input, *rule;
- int err = 0;
+ struct igc_nfc_rule *rule, *old_rule;
+ int err;
- if (!(netdev->hw_features & NETIF_F_NTUPLE))
+ if (!(netdev->hw_features & NETIF_F_NTUPLE)) {
+ netdev_dbg(netdev, "N-tuple filters disabled\n");
return -EOPNOTSUPP;
+ }
- /* Don't allow programming if the action is a queue greater than
- * the number of online Rx queues.
- */
- if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
- fsp->ring_cookie >= adapter->num_rx_queues) {
- dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n");
- return -EINVAL;
+ if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) {
+ netdev_dbg(netdev, "Only ethernet flow type is supported\n");
+ return -EOPNOTSUPP;
}
- /* Don't allow indexes to exist outside of available space */
- if (fsp->location >= IGC_MAX_RXNFC_FILTERS) {
- dev_err(&adapter->pdev->dev, "Location out of range\n");
- return -EINVAL;
+ if ((fsp->flow_type & FLOW_EXT) &&
+ fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
+ netdev_dbg(netdev, "VLAN mask not supported\n");
+ return -EOPNOTSUPP;
}
- if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ if (fsp->ring_cookie >= adapter->num_rx_queues) {
+ netdev_dbg(netdev, "Invalid action\n");
return -EINVAL;
-
- input = kzalloc(sizeof(*input), GFP_KERNEL);
- if (!input)
- return -ENOMEM;
-
- if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
- input->filter.etype = fsp->h_u.ether_spec.h_proto;
- input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE;
}
- /* Only support matching addresses by the full mask */
- if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
- input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR;
- ether_addr_copy(input->filter.src_addr,
- fsp->h_u.ether_spec.h_source);
+ if (fsp->location >= IGC_MAX_RXNFC_RULES) {
+ netdev_dbg(netdev, "Invalid location\n");
+ return -EINVAL;
}
- /* Only support matching addresses by the full mask */
- if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
- input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR;
- ether_addr_copy(input->filter.dst_addr,
- fsp->h_u.ether_spec.h_dest);
- }
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
- if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
- if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
- err = -EINVAL;
- goto err_out;
- }
- input->filter.vlan_tci = fsp->h_ext.vlan_tci;
- input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
- }
+ igc_ethtool_init_nfc_rule(rule, fsp);
- input->action = fsp->ring_cookie;
- input->sw_idx = fsp->location;
+ mutex_lock(&adapter->nfc_rule_lock);
- spin_lock(&adapter->nfc_lock);
+ err = igc_ethtool_check_nfc_rule(adapter, rule);
+ if (err)
+ goto err;
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
- if (!memcmp(&input->filter, &rule->filter,
- sizeof(input->filter))) {
- err = -EEXIST;
- dev_err(&adapter->pdev->dev,
- "ethtool: this filter is already set\n");
- goto err_out_w_lock;
- }
- }
+ old_rule = igc_get_nfc_rule(adapter, fsp->location);
+ if (old_rule)
+ igc_del_nfc_rule(adapter, old_rule);
- err = igc_add_filter(adapter, input);
+ err = igc_add_nfc_rule(adapter, rule);
if (err)
- goto err_out_w_lock;
-
- igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ goto err;
- spin_unlock(&adapter->nfc_lock);
+ mutex_unlock(&adapter->nfc_rule_lock);
return 0;
-err_out_w_lock:
- spin_unlock(&adapter->nfc_lock);
-err_out:
- kfree(input);
+err:
+ mutex_unlock(&adapter->nfc_rule_lock);
+ kfree(rule);
return err;
}
-static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
- int err;
+ struct igc_nfc_rule *rule;
- spin_lock(&adapter->nfc_lock);
- err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location);
- spin_unlock(&adapter->nfc_lock);
+ mutex_lock(&adapter->nfc_rule_lock);
- return err;
+ rule = igc_get_nfc_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->nfc_rule_lock);
+ return -EINVAL;
+ }
+
+ igc_del_nfc_rule(adapter, rule);
+
+ mutex_unlock(&adapter->nfc_rule_lock);
+ return 0;
}
-static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+static int igc_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
{
struct igc_adapter *adapter = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
- ret = igc_set_rss_hash_opt(adapter, cmd);
- break;
+ return igc_ethtool_set_rss_hash_opt(adapter, cmd);
case ETHTOOL_SRXCLSRLINS:
- ret = igc_add_ethtool_nfc_entry(adapter, cmd);
- break;
+ return igc_ethtool_add_nfc_rule(adapter, cmd);
case ETHTOOL_SRXCLSRLDEL:
- ret = igc_del_ethtool_nfc_entry(adapter, cmd);
+ return igc_ethtool_del_nfc_rule(adapter, cmd);
default:
- break;
+ return -EOPNOTSUPP;
}
-
- return ret;
}
void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
@@ -1533,13 +1385,13 @@ void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
}
}
-static u32 igc_get_rxfh_indir_size(struct net_device *netdev)
+static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev)
{
return IGC_RETA_SIZE;
}
-static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
@@ -1554,8 +1406,8 @@ static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
-static int igc_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u32 num_queues;
@@ -1583,18 +1435,13 @@ static int igc_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0;
}
-static unsigned int igc_max_channels(struct igc_adapter *adapter)
-{
- return igc_get_max_rss_queues(adapter);
-}
-
-static void igc_get_channels(struct net_device *netdev,
- struct ethtool_channels *ch)
+static void igc_ethtool_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
{
struct igc_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
- ch->max_combined = igc_max_channels(adapter);
+ ch->max_combined = igc_get_max_rss_queues(adapter);
/* Report info for other vector */
if (adapter->flags & IGC_FLAG_HAS_MSIX) {
@@ -1605,8 +1452,8 @@ static void igc_get_channels(struct net_device *netdev,
ch->combined_count = adapter->rss_queues;
}
-static int igc_set_channels(struct net_device *netdev,
- struct ethtool_channels *ch)
+static int igc_ethtool_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
{
struct igc_adapter *adapter = netdev_priv(netdev);
unsigned int count = ch->combined_count;
@@ -1621,7 +1468,7 @@ static int igc_set_channels(struct net_device *netdev,
return -EINVAL;
/* Verify the number of channels doesn't exceed hw limits */
- max_combined = igc_max_channels(adapter);
+ max_combined = igc_get_max_rss_queues(adapter);
if (count > max_combined)
return -EINVAL;
@@ -1638,8 +1485,8 @@ static int igc_set_channels(struct net_device *netdev,
return 0;
}
-static int igc_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+static int igc_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
{
struct igc_adapter *adapter = netdev_priv(dev);
@@ -1671,7 +1518,7 @@ static int igc_get_ts_info(struct net_device *dev,
}
}
-static u32 igc_get_priv_flags(struct net_device *netdev)
+static u32 igc_ethtool_get_priv_flags(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u32 priv_flags = 0;
@@ -1682,7 +1529,7 @@ static u32 igc_get_priv_flags(struct net_device *netdev)
return priv_flags;
}
-static int igc_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct igc_adapter *adapter = netdev_priv(netdev);
unsigned int flags = adapter->flags;
@@ -1717,8 +1564,8 @@ static void igc_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
-static int igc_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
+static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -1824,10 +1671,12 @@ static int igc_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static int igc_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
+static int
+igc_ethtool_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
u32 advertising;
@@ -1835,8 +1684,7 @@ static int igc_set_link_ksettings(struct net_device *netdev,
* cannot be changed
*/
if (igc_check_reset_block(hw)) {
- dev_err(&adapter->pdev->dev,
- "Cannot change link characteristics when reset is active.\n");
+ netdev_err(dev, "Cannot change link characteristics when reset is active\n");
return -EINVAL;
}
@@ -1847,7 +1695,7 @@ static int igc_set_link_ksettings(struct net_device *netdev,
if (cmd->base.eth_tp_mdix_ctrl) {
if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO &&
cmd->base.autoneg != AUTONEG_ENABLE) {
- dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
return -EINVAL;
}
}
@@ -1864,9 +1712,7 @@ static int igc_set_link_ksettings(struct net_device *netdev,
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
} else {
- /* calling this overrides forced MDI setting */
- dev_info(&adapter->pdev->dev,
- "Force mode currently not supported\n");
+ netdev_info(dev, "Force mode currently not supported\n");
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
@@ -1893,46 +1739,105 @@ static int igc_set_link_ksettings(struct net_device *netdev,
return 0;
}
+static void igc_ethtool_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ netdev_info(adapter->netdev, "Offline testing starting");
+ set_bit(__IGC_TESTING, &adapter->state);
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result
+ */
+ if (!igc_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ igc_close(netdev);
+ else
+ igc_reset(adapter);
+
+ netdev_info(adapter->netdev, "Register testing starting");
+ if (!igc_reg_test(adapter, &data[TEST_REG]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igc_reset(adapter);
+
+ netdev_info(adapter->netdev, "EEPROM testing starting");
+ if (!igc_eeprom_test(adapter, &data[TEST_EEP]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igc_reset(adapter);
+
+ /* loopback and interrupt tests
+ * will be implemented in the future
+ */
+ data[TEST_LOOP] = 0;
+ data[TEST_IRQ] = 0;
+
+ clear_bit(__IGC_TESTING, &adapter->state);
+ if (if_running)
+ igc_open(netdev);
+ } else {
+ netdev_info(adapter->netdev, "Online testing starting");
+
+ /* register, eeprom, intr and loopback tests not run online */
+ data[TEST_REG] = 0;
+ data[TEST_EEP] = 0;
+ data[TEST_IRQ] = 0;
+ data[TEST_LOOP] = 0;
+
+ if (!igc_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ msleep_interruptible(4 * 1000);
+}
+
static const struct ethtool_ops igc_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
- .get_drvinfo = igc_get_drvinfo,
- .get_regs_len = igc_get_regs_len,
- .get_regs = igc_get_regs,
- .get_wol = igc_get_wol,
- .set_wol = igc_set_wol,
- .get_msglevel = igc_get_msglevel,
- .set_msglevel = igc_set_msglevel,
- .nway_reset = igc_nway_reset,
- .get_link = igc_get_link,
- .get_eeprom_len = igc_get_eeprom_len,
- .get_eeprom = igc_get_eeprom,
- .set_eeprom = igc_set_eeprom,
- .get_ringparam = igc_get_ringparam,
- .set_ringparam = igc_set_ringparam,
- .get_pauseparam = igc_get_pauseparam,
- .set_pauseparam = igc_set_pauseparam,
- .get_strings = igc_get_strings,
- .get_sset_count = igc_get_sset_count,
- .get_ethtool_stats = igc_get_ethtool_stats,
- .get_coalesce = igc_get_coalesce,
- .set_coalesce = igc_set_coalesce,
- .get_rxnfc = igc_get_rxnfc,
- .set_rxnfc = igc_set_rxnfc,
- .get_rxfh_indir_size = igc_get_rxfh_indir_size,
- .get_rxfh = igc_get_rxfh,
- .set_rxfh = igc_set_rxfh,
- .get_ts_info = igc_get_ts_info,
- .get_channels = igc_get_channels,
- .set_channels = igc_set_channels,
- .get_priv_flags = igc_get_priv_flags,
- .set_priv_flags = igc_set_priv_flags,
+ .get_drvinfo = igc_ethtool_get_drvinfo,
+ .get_regs_len = igc_ethtool_get_regs_len,
+ .get_regs = igc_ethtool_get_regs,
+ .get_wol = igc_ethtool_get_wol,
+ .set_wol = igc_ethtool_set_wol,
+ .get_msglevel = igc_ethtool_get_msglevel,
+ .set_msglevel = igc_ethtool_set_msglevel,
+ .nway_reset = igc_ethtool_nway_reset,
+ .get_link = igc_ethtool_get_link,
+ .get_eeprom_len = igc_ethtool_get_eeprom_len,
+ .get_eeprom = igc_ethtool_get_eeprom,
+ .set_eeprom = igc_ethtool_set_eeprom,
+ .get_ringparam = igc_ethtool_get_ringparam,
+ .set_ringparam = igc_ethtool_set_ringparam,
+ .get_pauseparam = igc_ethtool_get_pauseparam,
+ .set_pauseparam = igc_ethtool_set_pauseparam,
+ .get_strings = igc_ethtool_get_strings,
+ .get_sset_count = igc_ethtool_get_sset_count,
+ .get_ethtool_stats = igc_ethtool_get_stats,
+ .get_coalesce = igc_ethtool_get_coalesce,
+ .set_coalesce = igc_ethtool_set_coalesce,
+ .get_rxnfc = igc_ethtool_get_rxnfc,
+ .set_rxnfc = igc_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size,
+ .get_rxfh = igc_ethtool_get_rxfh,
+ .set_rxfh = igc_ethtool_set_rxfh,
+ .get_ts_info = igc_ethtool_get_ts_info,
+ .get_channels = igc_ethtool_get_channels,
+ .set_channels = igc_ethtool_set_channels,
+ .get_priv_flags = igc_ethtool_get_priv_flags,
+ .set_priv_flags = igc_ethtool_set_priv_flags,
.begin = igc_ethtool_begin,
.complete = igc_ethtool_complete,
- .get_link_ksettings = igc_get_link_ksettings,
- .set_link_ksettings = igc_set_link_ksettings,
+ .get_link_ksettings = igc_ethtool_get_link_ksettings,
+ .set_link_ksettings = igc_ethtool_set_link_ksettings,
+ .self_test = igc_ethtool_diag_test,
};
-void igc_set_ethtool_ops(struct net_device *netdev)
+void igc_ethtool_set_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &igc_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 90ac0e0144d8..af34ae310327 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -21,6 +21,9 @@
#define IGC_DEV_ID_I225_I 0x15F8
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
+#define IGC_DEV_ID_I225_K2 0x3101
+#define IGC_DEV_ID_I225_LMVP 0x5502
+#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 12aa6b5fcb5d..410aeb01de5c 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -235,15 +235,14 @@ out:
void igc_clear_hw_cntrs_base(struct igc_hw *hw)
{
rd32(IGC_CRCERRS);
- rd32(IGC_SYMERRS);
rd32(IGC_MPC);
rd32(IGC_SCC);
rd32(IGC_ECOL);
rd32(IGC_MCC);
rd32(IGC_LATECOL);
rd32(IGC_COLC);
+ rd32(IGC_RERC);
rd32(IGC_DC);
- rd32(IGC_SEC);
rd32(IGC_RLEC);
rd32(IGC_XONRXC);
rd32(IGC_XONTXC);
@@ -288,7 +287,7 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw)
rd32(IGC_ALGNERRC);
rd32(IGC_RXERRC);
rd32(IGC_TNCRS);
- rd32(IGC_CEXTERR);
+ rd32(IGC_HTDPMC);
rd32(IGC_TSCTC);
rd32(IGC_TSCTFC);
@@ -307,12 +306,8 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw)
rd32(IGC_ICTXQMTC);
rd32(IGC_ICRXDMTC);
- rd32(IGC_CBTMPC);
- rd32(IGC_HTDPMC);
- rd32(IGC_CBRMPC);
rd32(IGC_RPTHC);
rd32(IGC_HGPTC);
- rd32(IGC_HTCBDPC);
rd32(IGC_HGORCL);
rd32(IGC_HGORCH);
rd32(IGC_HGOTCL);
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.h b/drivers/net/ethernet/intel/igc/igc_mac.h
index 832cccec87cd..b5963f86defb 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.h
+++ b/drivers/net/ethernet/intel/igc/igc_mac.h
@@ -8,10 +8,6 @@
#include "igc_phy.h"
#include "igc_defines.h"
-#ifndef IGC_REMOVED
-#define IGC_REMOVED(a) (0)
-#endif /* IGC_REMOVED */
-
/* forward declaration */
s32 igc_disable_pcie_master(struct igc_hw *hw);
s32 igc_check_for_copper_link(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 69fa1ce1f927..6919c50e449a 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -9,11 +9,13 @@
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/pm_runtime.h>
+#include <net/pkt_sched.h>
#include <net/ipv6.h>
#include "igc.h"
#include "igc_hw.h"
+#include "igc_tsn.h"
#define DRV_VERSION "0.0.1-k"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
@@ -45,6 +47,9 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
/* required last entry */
{0, }
@@ -71,7 +76,7 @@ static void igc_power_down_link(struct igc_adapter *adapter)
void igc_reset(struct igc_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
struct igc_fc_info *fc = &hw->fc;
u32 pba, hwm;
@@ -98,7 +103,7 @@ void igc_reset(struct igc_adapter *adapter)
hw->mac.ops.reset_hw(hw);
if (hw->mac.ops.init_hw(hw))
- dev_err(&pdev->dev, "Hardware Error\n");
+ netdev_err(dev, "Error on hardware initialization\n");
if (!netif_running(adapter->netdev))
igc_power_down_link(adapter);
@@ -106,6 +111,9 @@ void igc_reset(struct igc_adapter *adapter)
/* Re-enable PTP, where applicable. */
igc_ptp_reset(adapter);
+ /* Re-enable TSN offloading, where applicable. */
+ igc_tsn_offload_apply(adapter);
+
igc_get_phy_info(hw);
}
@@ -280,6 +288,7 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
*/
int igc_setup_tx_resources(struct igc_ring *tx_ring)
{
+ struct net_device *ndev = tx_ring->netdev;
struct device *dev = tx_ring->dev;
int size = 0;
@@ -305,8 +314,7 @@ int igc_setup_tx_resources(struct igc_ring *tx_ring)
err:
vfree(tx_ring->tx_buffer_info);
- dev_err(dev,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
return -ENOMEM;
}
@@ -318,14 +326,13 @@ err:
*/
static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
err = igc_setup_tx_resources(adapter->tx_ring[i]);
if (err) {
- dev_err(&pdev->dev,
- "Allocation for Tx Queue %u failed\n", i);
+ netdev_err(dev, "Error on Tx queue %u setup\n", i);
for (i--; i >= 0; i--)
igc_free_tx_resources(adapter->tx_ring[i]);
break;
@@ -436,6 +443,7 @@ static void igc_free_all_rx_resources(struct igc_adapter *adapter)
*/
int igc_setup_rx_resources(struct igc_ring *rx_ring)
{
+ struct net_device *ndev = rx_ring->netdev;
struct device *dev = rx_ring->dev;
int size, desc_len;
@@ -465,8 +473,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
err:
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dev_err(dev,
- "Unable to allocate memory for the receive descriptor ring\n");
+ netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
return -ENOMEM;
}
@@ -479,14 +486,13 @@ err:
*/
static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
err = igc_setup_rx_resources(adapter->rx_ring[i]);
if (err) {
- dev_err(&pdev->dev,
- "Allocation for Rx Queue %u failed\n", i);
+ netdev_err(dev, "Error on Rx queue %u setup\n", i);
for (i--; i >= 0; i--)
igc_free_rx_resources(adapter->rx_ring[i]);
break;
@@ -757,48 +763,76 @@ static void igc_setup_tctl(struct igc_adapter *adapter)
}
/**
- * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
- * @adapter: address of board private structure
- * @index: Index of the RAR entry which need to be synced with MAC table
+ * igc_set_mac_filter_hw() - Set MAC address filter in hardware
+ * @adapter: Pointer to adapter where the filter should be set
+ * @index: Filter index
+ * @type: MAC address filter type (source or destination)
+ * @addr: MAC address
+ * @queue: If non-negative, queue assignment feature is enabled and frames
+ * matching the filter are enqueued onto 'queue'. Otherwise, queue
+ * assignment is disabled.
*/
-static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
+static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
+ enum igc_mac_filter_type type,
+ const u8 *addr, int queue)
{
- u8 *addr = adapter->mac_table[index].addr;
+ struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
- u32 rar_low, rar_high;
+ u32 ral, rah;
- /* HW expects these to be in network order when they are plugged
- * into the registers which are little endian. In order to guarantee
- * that ordering we need to do an leXX_to_cpup here in order to be
- * ready for the byteswap that occurs with writel
- */
- rar_low = le32_to_cpup((__le32 *)(addr));
- rar_high = le16_to_cpup((__le16 *)(addr + 4));
+ if (WARN_ON(index >= hw->mac.rar_entry_count))
+ return;
- /* Indicate to hardware the Address is Valid. */
- if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
- if (is_valid_ether_addr(addr))
- rar_high |= IGC_RAH_AV;
+ ral = le32_to_cpup((__le32 *)(addr));
+ rah = le16_to_cpup((__le16 *)(addr + 4));
- rar_high |= IGC_RAH_POOL_1 <<
- adapter->mac_table[index].queue;
+ if (type == IGC_MAC_FILTER_TYPE_SRC) {
+ rah &= ~IGC_RAH_ASEL_MASK;
+ rah |= IGC_RAH_ASEL_SRC_ADDR;
}
- wr32(IGC_RAL(index), rar_low);
- wrfl();
- wr32(IGC_RAH(index), rar_high);
- wrfl();
+ if (queue >= 0) {
+ rah &= ~IGC_RAH_QSEL_MASK;
+ rah |= (queue << IGC_RAH_QSEL_SHIFT);
+ rah |= IGC_RAH_QSEL_ENABLE;
+ }
+
+ rah |= IGC_RAH_AV;
+
+ wr32(IGC_RAL(index), ral);
+ wr32(IGC_RAH(index), rah);
+
+ netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
+}
+
+/**
+ * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
+ * @adapter: Pointer to adapter where the filter should be cleared
+ * @index: Filter index
+ */
+static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
+{
+ struct net_device *dev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ if (WARN_ON(index >= hw->mac.rar_entry_count))
+ return;
+
+ wr32(IGC_RAL(index), 0);
+ wr32(IGC_RAH(index), 0);
+
+ netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
}
/* Set default MAC address for the PF in the first RAR entry */
static void igc_set_default_mac_filter(struct igc_adapter *adapter)
{
- struct igc_mac_addr *mac_table = &adapter->mac_table[0];
+ struct net_device *dev = adapter->netdev;
+ u8 *addr = adapter->hw.mac.addr;
- ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
- mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
- igc_rar_set_index(adapter, 0);
+ igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
}
/**
@@ -864,6 +898,23 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev);
}
+static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
+{
+ ktime_t cycle_time = adapter->cycle_time;
+ ktime_t base_time = adapter->base_time;
+ u32 launchtime;
+
+ /* FIXME: when using ETF together with taprio, we may have a
+ * case where 'delta' is larger than the cycle_time, this may
+ * cause problems if we don't read the current value of
+ * IGC_BASET, as the value writen into the launchtime
+ * descriptor field may be misinterpreted.
+ */
+ div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
+
+ return cpu_to_le32(launchtime);
+}
+
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first,
u32 vlan_macip_lens, u32 type_tucmd,
@@ -871,7 +922,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
{
struct igc_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
- struct timespec64 ts;
context_desc = IGC_TX_CTXTDESC(tx_ring, i);
@@ -893,9 +943,12 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
* should have been handled by the upper layers.
*/
if (tx_ring->launchtime_enable) {
- ts = ktime_to_timespec64(first->skb->tstamp);
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+ ktime_t txtime = first->skb->tstamp;
+
first->skb->tstamp = ktime_set(0, 0);
- context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
+ context_desc->launch_time = igc_tx_launchtime(adapter,
+ txtime);
} else {
context_desc->launch_time = 0;
}
@@ -1143,7 +1196,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
return 0;
dma_error:
- dev_err(tx_ring->dev, "TX DMA map failed\n");
+ netdev_err(tx_ring->netdev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
@@ -1406,8 +1459,8 @@ static void igc_rx_checksum(struct igc_ring *ring,
IGC_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(ring->dev, "cksum success: bits %08X\n",
- le32_to_cpu(rx_desc->wb.upper.status_error));
+ netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
+ le32_to_cpu(rx_desc->wb.upper.status_error));
}
static inline void igc_rx_hash(struct igc_ring *ring,
@@ -2069,27 +2122,27 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
(adapter->tx_timeout_factor * HZ)) &&
!(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
/* detected Tx unit hang */
- dev_err(tx_ring->dev,
- "Detected Tx Unit Hang\n"
- " Tx Queue <%d>\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%p>\n"
- " jiffies <%lx>\n"
- " desc.status <%x>\n",
- tx_ring->queue_index,
- rd32(IGC_TDH(tx_ring->reg_idx)),
- readl(tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_buffer->time_stamp,
- tx_buffer->next_to_watch,
- jiffies,
- tx_buffer->next_to_watch->wb.status);
+ netdev_err(tx_ring->netdev,
+ "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%p>\n"
+ " jiffies <%lx>\n"
+ " desc.status <%x>\n",
+ tx_ring->queue_index,
+ rd32(IGC_TDH(tx_ring->reg_idx)),
+ readl(tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_buffer->time_stamp,
+ tx_buffer->next_to_watch,
+ jiffies,
+ tx_buffer->next_to_watch->wb.status);
netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index);
@@ -2121,140 +2174,435 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
return !!budget;
}
-static void igc_nfc_filter_restore(struct igc_adapter *adapter)
+static int igc_find_mac_filter(struct igc_adapter *adapter,
+ enum igc_mac_filter_type type, const u8 *addr)
{
- struct igc_nfc_filter *rule;
+ struct igc_hw *hw = &adapter->hw;
+ int max_entries = hw->mac.rar_entry_count;
+ u32 ral, rah;
+ int i;
- spin_lock(&adapter->nfc_lock);
+ for (i = 0; i < max_entries; i++) {
+ ral = rd32(IGC_RAL(i));
+ rah = rd32(IGC_RAH(i));
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
- igc_add_filter(adapter, rule);
+ if (!(rah & IGC_RAH_AV))
+ continue;
+ if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
+ continue;
+ if ((rah & IGC_RAH_RAH_MASK) !=
+ le16_to_cpup((__le16 *)(addr + 4)))
+ continue;
+ if (ral != le32_to_cpup((__le32 *)(addr)))
+ continue;
+
+ return i;
+ }
- spin_unlock(&adapter->nfc_lock);
+ return -1;
}
-/* If the filter to be added and an already existing filter express
- * the same address and address type, it should be possible to only
- * override the other configurations, for example the queue to steer
- * traffic.
- */
-static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
- const u8 *addr, const u8 flags)
+static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
{
- if (!(entry->state & IGC_MAC_STATE_IN_USE))
- return true;
+ struct igc_hw *hw = &adapter->hw;
+ int max_entries = hw->mac.rar_entry_count;
+ u32 rah;
+ int i;
- if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
- (flags & IGC_MAC_STATE_SRC_ADDR))
- return false;
+ for (i = 0; i < max_entries; i++) {
+ rah = rd32(IGC_RAH(i));
- if (!ether_addr_equal(addr, entry->addr))
- return false;
+ if (!(rah & IGC_RAH_AV))
+ return i;
+ }
- return true;
+ return -1;
}
-/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
- * 'flags' is used to indicate what kind of match is made, match is by
- * default for the destination address, if matching by source address
- * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+/**
+ * igc_add_mac_filter() - Add MAC address filter
+ * @adapter: Pointer to adapter where the filter should be added
+ * @type: MAC address filter type (source or destination)
+ * @addr: MAC address
+ * @queue: If non-negative, queue assignment feature is enabled and frames
+ * matching the filter are enqueued onto 'queue'. Otherwise, queue
+ * assignment is disabled.
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
*/
static int igc_add_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
+ enum igc_mac_filter_type type, const u8 *addr,
+ int queue)
+{
+ struct net_device *dev = adapter->netdev;
+ int index;
+
+ index = igc_find_mac_filter(adapter, type, addr);
+ if (index >= 0)
+ goto update_filter;
+
+ index = igc_get_avail_mac_filter_slot(adapter);
+ if (index < 0)
+ return -ENOSPC;
+
+ netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
+ index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
+ addr, queue);
+
+update_filter:
+ igc_set_mac_filter_hw(adapter, index, type, addr, queue);
+ return 0;
+}
+
+/**
+ * igc_del_mac_filter() - Delete MAC address filter
+ * @adapter: Pointer to adapter where the filter should be deleted from
+ * @type: MAC address filter type (source or destination)
+ * @addr: MAC address
+ */
+static void igc_del_mac_filter(struct igc_adapter *adapter,
+ enum igc_mac_filter_type type, const u8 *addr)
{
+ struct net_device *dev = adapter->netdev;
+ int index;
+
+ index = igc_find_mac_filter(adapter, type, addr);
+ if (index < 0)
+ return;
+
+ if (index == 0) {
+ /* If this is the default filter, we don't actually delete it.
+ * We just reset to its default value i.e. disable queue
+ * assignment.
+ */
+ netdev_dbg(dev, "Disable default MAC filter queue assignment");
+
+ igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
+ } else {
+ netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
+ index,
+ type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
+ addr);
+
+ igc_clear_mac_filter_hw(adapter, index);
+ }
+}
+
+/**
+ * igc_add_vlan_prio_filter() - Add VLAN priority filter
+ * @adapter: Pointer to adapter where the filter should be added
+ * @prio: VLAN priority value
+ * @queue: Queue number which matching frames are assigned to
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
+ */
+static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
+ int queue)
+{
+ struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
+ u32 vlanpqf;
- if (is_zero_ether_addr(addr))
- return -EINVAL;
+ vlanpqf = rd32(IGC_VLANPQF);
- /* Search for the first empty entry in the MAC table.
- * Do not touch entries at the end of the table reserved for the VF MAC
- * addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
- addr, 0))
- continue;
+ if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
+ netdev_dbg(dev, "VLAN priority filter already in use\n");
+ return -EEXIST;
+ }
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
+ vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
+ vlanpqf |= IGC_VLANPQF_VALID(prio);
- igc_rar_set_index(adapter, i);
- return i;
+ wr32(IGC_VLANPQF, vlanpqf);
+
+ netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
+ prio, queue);
+ return 0;
+}
+
+/**
+ * igc_del_vlan_prio_filter() - Delete VLAN priority filter
+ * @adapter: Pointer to adapter where the filter should be deleted from
+ * @prio: VLAN priority value
+ */
+static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 vlanpqf;
+
+ vlanpqf = rd32(IGC_VLANPQF);
+
+ vlanpqf &= ~IGC_VLANPQF_VALID(prio);
+ vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
+
+ wr32(IGC_VLANPQF, vlanpqf);
+
+ netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
+ prio);
+}
+
+static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < MAX_ETYPE_FILTER; i++) {
+ u32 etqf = rd32(IGC_ETQF(i));
+
+ if (!(etqf & IGC_ETQF_FILTER_ENABLE))
+ return i;
}
- return -ENOSPC;
+ return -1;
}
-/* Remove a MAC filter for 'addr' directing matching traffic to
- * 'queue', 'flags' is used to indicate what kind of match need to be
- * removed, match is by default for the destination address, if
- * matching by source address is to be removed the flag
- * IGC_MAC_STATE_SRC_ADDR can be used.
+/**
+ * igc_add_etype_filter() - Add ethertype filter
+ * @adapter: Pointer to adapter where the filter should be added
+ * @etype: Ethertype value
+ * @queue: If non-negative, queue assignment feature is enabled and frames
+ * matching the filter are enqueued onto 'queue'. Otherwise, queue
+ * assignment is disabled.
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
*/
-static int igc_del_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
+static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
+ int queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int index;
+ u32 etqf;
+
+ index = igc_get_avail_etype_filter_slot(adapter);
+ if (index < 0)
+ return -ENOSPC;
+
+ etqf = rd32(IGC_ETQF(index));
+
+ etqf &= ~IGC_ETQF_ETYPE_MASK;
+ etqf |= etype;
+
+ if (queue >= 0) {
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
+ etqf |= IGC_ETQF_QUEUE_ENABLE;
+ }
+
+ etqf |= IGC_ETQF_FILTER_ENABLE;
+
+ wr32(IGC_ETQF(index), etqf);
+
+ netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
+ etype, queue);
+ return 0;
+}
+
+static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
{
struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
int i;
- if (is_zero_ether_addr(addr))
- return -EINVAL;
+ for (i = 0; i < MAX_ETYPE_FILTER; i++) {
+ u32 etqf = rd32(IGC_ETQF(i));
- /* Search for matching entry in the MAC table based on given address
- * and queue. Do not touch entries at the end of the table reserved
- * for the VF MAC addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
- continue;
- if (adapter->mac_table[i].state != 0)
- continue;
- if (adapter->mac_table[i].queue != queue)
- continue;
- if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
- continue;
+ if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
+ return i;
+ }
- /* When a filter for the default address is "deleted",
- * we return it to its initial configuration
- */
- if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
- adapter->mac_table[i].state =
- IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
- adapter->mac_table[i].queue = 0;
- } else {
- adapter->mac_table[i].state = 0;
- adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- }
+ return -1;
+}
- igc_rar_set_index(adapter, i);
- return 0;
+/**
+ * igc_del_etype_filter() - Delete ethertype filter
+ * @adapter: Pointer to adapter where the filter should be deleted from
+ * @etype: Ethertype value
+ */
+static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int index;
+
+ index = igc_find_etype_filter(adapter, etype);
+ if (index < 0)
+ return;
+
+ wr32(IGC_ETQF(index), 0);
+
+ netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
+ etype);
+}
+
+static int igc_enable_nfc_rule(struct igc_adapter *adapter,
+ const struct igc_nfc_rule *rule)
+{
+ int err;
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ err = igc_add_etype_filter(adapter, rule->filter.etype,
+ rule->action);
+ if (err)
+ return err;
+ }
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
+ rule->filter.src_addr, rule->action);
+ if (err)
+ return err;
+ }
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
+ rule->filter.dst_addr, rule->action);
+ if (err)
+ return err;
+ }
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+
+ err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void igc_disable_nfc_rule(struct igc_adapter *adapter,
+ const struct igc_nfc_rule *rule)
+{
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
+ igc_del_etype_filter(adapter, rule->filter.etype);
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+
+ igc_del_vlan_prio_filter(adapter, prio);
}
- return -ENOENT;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
+ rule->filter.src_addr);
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
+ rule->filter.dst_addr);
+}
+
+/**
+ * igc_get_nfc_rule() - Get NFC rule
+ * @adapter: Pointer to adapter
+ * @location: Rule location
+ *
+ * Context: Expects adapter->nfc_rule_lock to be held by caller.
+ *
+ * Return: Pointer to NFC rule at @location. If not found, NULL.
+ */
+struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
+ u32 location)
+{
+ struct igc_nfc_rule *rule;
+
+ list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
+ if (rule->location == location)
+ return rule;
+ if (rule->location > location)
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * igc_del_nfc_rule() - Delete NFC rule
+ * @adapter: Pointer to adapter
+ * @rule: Pointer to rule to be deleted
+ *
+ * Disable NFC rule in hardware and delete it from adapter.
+ *
+ * Context: Expects adapter->nfc_rule_lock to be held by caller.
+ */
+void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
+{
+ igc_disable_nfc_rule(adapter, rule);
+
+ list_del(&rule->list);
+ adapter->nfc_rule_count--;
+
+ kfree(rule);
+}
+
+static void igc_flush_nfc_rules(struct igc_adapter *adapter)
+{
+ struct igc_nfc_rule *rule, *tmp;
+
+ mutex_lock(&adapter->nfc_rule_lock);
+
+ list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
+ igc_del_nfc_rule(adapter, rule);
+
+ mutex_unlock(&adapter->nfc_rule_lock);
+}
+
+/**
+ * igc_add_nfc_rule() - Add NFC rule
+ * @adapter: Pointer to adapter
+ * @rule: Pointer to rule to be added
+ *
+ * Enable NFC rule in hardware and add it to adapter.
+ *
+ * Context: Expects adapter->nfc_rule_lock to be held by caller.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
+{
+ struct igc_nfc_rule *pred, *cur;
+ int err;
+
+ err = igc_enable_nfc_rule(adapter, rule);
+ if (err)
+ return err;
+
+ pred = NULL;
+ list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
+ if (cur->location >= rule->location)
+ break;
+ pred = cur;
+ }
+
+ list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
+ adapter->nfc_rule_count++;
+ return 0;
+}
+
+static void igc_restore_nfc_rules(struct igc_adapter *adapter)
+{
+ struct igc_nfc_rule *rule;
+
+ mutex_lock(&adapter->nfc_rule_lock);
+
+ list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
+ igc_enable_nfc_rule(adapter, rule);
+
+ mutex_unlock(&adapter->nfc_rule_lock);
}
static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- int ret;
-
- ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
- return min_t(int, ret, 0);
+ return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
}
static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
-
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
return 0;
}
@@ -2325,7 +2673,9 @@ static void igc_configure(struct igc_adapter *adapter)
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
- igc_nfc_filter_restore(adapter);
+ igc_set_default_mac_filter(adapter);
+ igc_restore_nfc_rules(adapter);
+
igc_configure_tx(adapter);
igc_configure_rx(adapter);
@@ -2518,12 +2868,7 @@ void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
{
- unsigned int max_rss_queues;
-
- /* Determine the maximum number of RSS queues supported. */
- max_rss_queues = IGC_MAX_RX_QUEUES;
-
- return max_rss_queues;
+ return IGC_MAX_RX_QUEUES;
}
static void igc_init_queue_configuration(struct igc_adapter *adapter)
@@ -3164,14 +3509,14 @@ err_out:
*/
static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
int err = 0;
igc_set_interrupt_capability(adapter, msix);
err = igc_alloc_q_vectors(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+ netdev_err(dev, "Unable to allocate memory for vectors\n");
goto err_alloc_q_vectors;
}
@@ -3198,8 +3543,6 @@ static int igc_sw_init(struct igc_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
- int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
-
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
/* set default ring sizes */
@@ -3218,20 +3561,19 @@ static int igc_sw_init(struct igc_adapter *adapter)
VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- spin_lock_init(&adapter->nfc_lock);
+ mutex_init(&adapter->nfc_rule_lock);
+ INIT_LIST_HEAD(&adapter->nfc_rule_list);
+ adapter->nfc_rule_count = 0;
+
spin_lock_init(&adapter->stats64_lock);
/* Assume MSI-X interrupts, will be checked during IRQ allocation */
adapter->flags |= IGC_FLAG_HAS_MSIX;
- adapter->mac_table = kzalloc(size, GFP_ATOMIC);
- if (!adapter->mac_table)
- return -ENOMEM;
-
igc_init_queue_configuration(adapter);
/* This call may decrease the number of queues */
if (igc_init_interrupt_scheme(adapter, true)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -3359,8 +3701,6 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.prc511 += rd32(IGC_PRC511);
adapter->stats.prc1023 += rd32(IGC_PRC1023);
adapter->stats.prc1522 += rd32(IGC_PRC1522);
- adapter->stats.symerrs += rd32(IGC_SYMERRS);
- adapter->stats.sec += rd32(IGC_SEC);
mpc = rd32(IGC_MPC);
adapter->stats.mpc += mpc;
@@ -3399,6 +3739,7 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.tpt += rd32(IGC_TPT);
adapter->stats.colc += rd32(IGC_COLC);
+ adapter->stats.colc += rd32(IGC_RERC);
adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
@@ -3449,21 +3790,6 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.mgpdc += rd32(IGC_MGTPDC);
}
-static void igc_nfc_filter_exit(struct igc_adapter *adapter)
-{
- struct igc_nfc_filter *rule;
-
- spin_lock(&adapter->nfc_lock);
-
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
- igc_erase_filter(adapter, rule);
-
- hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
- igc_erase_filter(adapter, rule);
-
- spin_unlock(&adapter->nfc_lock);
-}
-
/**
* igc_down - Close the interface
* @adapter: board private structure
@@ -3482,8 +3808,6 @@ void igc_down(struct igc_adapter *adapter)
wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
/* flush and sleep below */
- igc_nfc_filter_exit(adapter);
-
/* set trans_start so we don't get spurious watchdogs during reset */
netif_trans_update(netdev);
@@ -3577,8 +3901,7 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
igc_down(adapter);
- netdev_dbg(netdev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -3633,20 +3956,8 @@ static int igc_set_features(struct net_device *netdev,
if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0;
- if (!(features & NETIF_F_NTUPLE)) {
- struct hlist_node *node2;
- struct igc_nfc_filter *rule;
-
- spin_lock(&adapter->nfc_lock);
- hlist_for_each_entry_safe(rule, node2,
- &adapter->nfc_filter_list, nfc_node) {
- igc_erase_filter(adapter, rule);
- hlist_del(&rule->nfc_node);
- kfree(rule);
- }
- spin_unlock(&adapter->nfc_lock);
- adapter->nfc_filter_count = 0;
- }
+ if (!(features & NETIF_F_NTUPLE))
+ igc_flush_nfc_rules(adapter);
netdev->features = features;
@@ -3689,106 +4000,6 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
-/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
- * 'flags' is used to indicate what kind of match is made, match is by
- * default for the destination address, if matching by source address
- * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_add_mac_filter_flags(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue,
- const u8 flags)
-{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for the first empty entry in the MAC table.
- * Do not touch entries at the end of the table reserved for the VF MAC
- * addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
- addr, flags))
- continue;
-
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags;
-
- igc_rar_set_index(adapter, i);
- return i;
- }
-
- return -ENOSPC;
-}
-
-int igc_add_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags)
-{
- return igc_add_mac_filter_flags(adapter, addr, queue,
- IGC_MAC_STATE_QUEUE_STEERING | flags);
-}
-
-/* Remove a MAC filter for 'addr' directing matching traffic to
- * 'queue', 'flags' is used to indicate what kind of match need to be
- * removed, match is by default for the destination address, if
- * matching by source address is to be removed the flag
- * IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_del_mac_filter_flags(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue,
- const u8 flags)
-{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for matching entry in the MAC table based on given address
- * and queue. Do not touch entries at the end of the table reserved
- * for the VF MAC addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
- continue;
- if ((adapter->mac_table[i].state & flags) != flags)
- continue;
- if (adapter->mac_table[i].queue != queue)
- continue;
- if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
- continue;
-
- /* When a filter for the default address is "deleted",
- * we return it to its initial configuration
- */
- if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
- adapter->mac_table[i].state =
- IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
- } else {
- adapter->mac_table[i].state = 0;
- adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- }
-
- igc_rar_set_index(adapter, i);
- return 0;
- }
-
- return -ENOENT;
-}
-
-int igc_del_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags)
-{
- return igc_del_mac_filter_flags(adapter, addr, queue,
- IGC_MAC_STATE_QUEUE_STEERING | flags);
-}
-
static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -4009,7 +4220,6 @@ static void igc_watchdog_task(struct work_struct *work)
struct igc_hw *hw = &adapter->hw;
struct igc_phy_info *phy = &hw->phy;
u16 phy_data, retry_count = 20;
- u32 connsw;
u32 link;
int i;
@@ -4022,14 +4232,6 @@ static void igc_watchdog_task(struct work_struct *work)
link = false;
}
- /* Force link down if we have fiber to swap to */
- if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
- if (hw->phy.media_type == igc_media_type_copper) {
- connsw = rd32(IGC_CONNSW);
- if (!(connsw & IGC_CONNSW_AUTOSENSE_EN))
- link = 0;
- }
- }
if (link) {
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4044,8 +4246,7 @@ static void igc_watchdog_task(struct work_struct *work)
ctrl = rd32(IGC_CTRL);
/* Link status message must follow this format */
netdev_info(netdev,
- "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
- netdev->name,
+ "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
"Full" : "Half",
@@ -4083,10 +4284,10 @@ retry_read_status:
retry_count--;
goto retry_read_status;
} else if (!retry_count) {
- dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
+ netdev_err(netdev, "exceed max 2 second\n");
}
} else {
- dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
+ netdev_err(netdev, "read 1000Base-T Status Reg\n");
}
no_wait:
netif_carrier_on(netdev);
@@ -4102,8 +4303,7 @@ no_wait:
adapter->link_duplex = 0;
/* Links status message must follow this format */
- netdev_info(netdev, "igc: %s NIC Link is Down\n",
- netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
/* link state has changed, schedule phy info update */
@@ -4321,8 +4521,7 @@ static int igc_request_irq(struct igc_adapter *adapter)
netdev->name, adapter);
if (err)
- dev_err(&pdev->dev, "Error %d getting interrupt\n",
- err);
+ netdev_err(netdev, "Error %d getting interrupt\n", err);
request_done:
return err;
@@ -4424,7 +4623,7 @@ err_setup_tx:
return err;
}
-static int igc_open(struct net_device *netdev)
+int igc_open(struct net_device *netdev)
{
return __igc_open(netdev, false);
}
@@ -4466,7 +4665,7 @@ static int __igc_close(struct net_device *netdev, bool suspending)
return 0;
}
-static int igc_close(struct net_device *netdev)
+int igc_close(struct net_device *netdev)
{
if (netif_device_present(netdev) || netdev->dismantle)
return __igc_close(netdev, false);
@@ -4491,6 +4690,158 @@ static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
+static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
+ bool enable)
+{
+ struct igc_ring *ring;
+ int i;
+
+ if (queue < 0 || queue >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[queue];
+ ring->launchtime_enable = enable;
+
+ if (adapter->base_time)
+ return 0;
+
+ adapter->cycle_time = NSEC_PER_SEC;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ring = adapter->tx_ring[i];
+ ring->start_time = 0;
+ ring->end_time = NSEC_PER_SEC;
+ }
+
+ return 0;
+}
+
+static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt)
+{
+ int queue_uses[IGC_MAX_TX_QUEUES] = { };
+ size_t n;
+
+ if (qopt->cycle_time_extension)
+ return false;
+
+ for (n = 0; n < qopt->num_entries; n++) {
+ const struct tc_taprio_sched_entry *e;
+ int i;
+
+ e = &qopt->entries[n];
+
+ /* i225 only supports "global" frame preemption
+ * settings.
+ */
+ if (e->command != TC_TAPRIO_CMD_SET_GATES)
+ return false;
+
+ for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+ if (e->gate_mask & BIT(i))
+ queue_uses[i]++;
+
+ if (queue_uses[i] > 1)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
+ struct tc_etf_qopt_offload *qopt)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
+ if (err)
+ return err;
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u32 start_time = 0, end_time = 0;
+ size_t n;
+
+ if (!qopt->enable) {
+ adapter->base_time = 0;
+ return 0;
+ }
+
+ if (adapter->base_time)
+ return -EALREADY;
+
+ if (!validate_schedule(qopt))
+ return -EINVAL;
+
+ adapter->cycle_time = qopt->cycle_time;
+ adapter->base_time = qopt->base_time;
+
+ /* FIXME: be a little smarter about cases when the gate for a
+ * queue stays open for more than one entry.
+ */
+ for (n = 0; n < qopt->num_entries; n++) {
+ struct tc_taprio_sched_entry *e = &qopt->entries[n];
+ int i;
+
+ end_time += e->interval;
+
+ for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ if (!(e->gate_mask & BIT(i)))
+ continue;
+
+ ring->start_time = start_time;
+ ring->end_time = end_time;
+ }
+
+ start_time += e->interval;
+ }
+
+ return 0;
+}
+
+static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ err = igc_save_qbv_schedule(adapter, qopt);
+ if (err)
+ return err;
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return igc_tsn_enable_qbv_scheduling(adapter, type_data);
+
+ case TC_SETUP_QDISC_ETF:
+ return igc_tsn_enable_launchtime(adapter, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -4503,6 +4854,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
.ndo_do_ioctl = igc_ioctl,
+ .ndo_setup_tc = igc_setup_tc,
};
/* PCIe configuration access */
@@ -4550,9 +4902,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
- if (IGC_REMOVED(hw_addr))
- return ~value;
-
value = readl(&hw_addr[reg]);
/* reads should not return all F's */
@@ -4571,7 +4920,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
{
- struct pci_dev *pdev = adapter->pdev;
struct igc_mac_info *mac = &adapter->hw.mac;
mac->autoneg = 0;
@@ -4616,7 +4964,7 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
return 0;
err_inval:
- dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
+ netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
@@ -4697,7 +5045,7 @@ static int igc_probe(struct pci_dev *pdev,
hw->hw_addr = adapter->io_addr;
netdev->netdev_ops = &igc_netdev_ops;
- igc_set_ethtool_ops(netdev);
+ igc_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
netdev->mem_start = pci_resource_start(pdev, 0);
@@ -4723,9 +5071,21 @@ static int igc_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_SG;
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
+ netdev->features |= NETIF_F_TSO_ECN;
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->features |= NETIF_F_HW_TC;
+
+#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
/* setup the private structure */
err = igc_sw_init(adapter);
@@ -4750,8 +5110,7 @@ static int igc_probe(struct pci_dev *pdev,
if (igc_get_flash_presence_i225(hw)) {
if (hw->nvm.ops.validate(hw) < 0) {
- dev_err(&pdev->dev,
- "The NVM Checksum Is Not Valid\n");
+ dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -4825,7 +5184,7 @@ static int igc_probe(struct pci_dev *pdev,
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_put_noidle(&pdev->dev);
@@ -4865,6 +5224,8 @@ static void igc_remove(struct pci_dev *pdev)
pm_runtime_get_noresume(&pdev->dev);
+ igc_flush_nfc_rules(adapter);
+
igc_ptp_stop(adapter);
set_bit(__IGC_DOWN, &adapter->state);
@@ -4885,7 +5246,6 @@ static void igc_remove(struct pci_dev *pdev)
pci_iounmap(pdev, adapter->io_addr);
pci_release_mem_regions(pdev);
- kfree(adapter->mac_table);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
@@ -5014,8 +5374,7 @@ static int __maybe_unused igc_resume(struct device *dev)
return -ENODEV;
err = pci_enable_device_mem(pdev);
if (err) {
- dev_err(&pdev->dev,
- "igc: Cannot enable PCI device from suspend\n");
+ netdev_err(netdev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -5024,7 +5383,7 @@ static int __maybe_unused igc_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3cold, 0);
if (igc_init_interrupt_scheme(adapter, true)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -5128,8 +5487,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
pci_ers_result_t result;
if (pci_enable_device_mem(pdev)) {
- dev_err(&pdev->dev,
- "Could not re-enable PCI device after reset.\n");
+ netdev_err(netdev, "Could not re-enable PCI device after reset\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
@@ -5168,7 +5526,7 @@ static void igc_io_resume(struct pci_dev *pdev)
rtnl_lock();
if (netif_running(netdev)) {
if (igc_open(netdev)) {
- dev_err(&pdev->dev, "igc_open failed after reset\n");
+ netdev_err(netdev, "igc_open failed after reset\n");
return;
}
}
@@ -5215,7 +5573,6 @@ static struct pci_driver igc_driver = {
int igc_reinit_queues(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
int err = 0;
if (netif_running(netdev))
@@ -5224,7 +5581,7 @@ int igc_reinit_queues(struct igc_adapter *adapter)
igc_reset_interrupt_capability(adapter);
if (igc_init_interrupt_scheme(adapter, true)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index f99c514ad0f4..0d746f8588c8 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -305,7 +305,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
struct igc_hw *hw = &adapter->hw;
u32 tsync_rx_cfg = 0;
bool is_l4 = false;
- bool is_l2 = false;
u32 regval;
/* reserved for future extensions */
@@ -346,7 +345,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_EVENT_V2;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- is_l2 = true;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
@@ -370,7 +368,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- is_l2 = true;
is_l4 = true;
if (hw->mac.type == igc_i225) {
@@ -405,15 +402,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
/* define which PTP packets are time stamped */
wr32(IGC_TSYNCRXCFG, tsync_rx_cfg);
- /* define ethertype filter for timestamped packets */
- if (is_l2)
- wr32(IGC_ETQF(3),
- (IGC_ETQF_FILTER_ENABLE | /* enable filter */
- IGC_ETQF_1588 | /* enable timestamping */
- ETH_P_1588)); /* 1588 eth protocol type */
- else
- wr32(IGC_ETQF(3), 0);
-
/* L4 Queue Filter[3]: filter by destination port and protocol */
if (is_l4) {
u32 ftqf = (IPPROTO_UDP /* UDP */
@@ -466,7 +454,7 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter)
* interrupt
*/
rd32(IGC_TXSTMPH);
- dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n");
}
}
@@ -529,7 +517,7 @@ static void igc_ptp_tx_work(struct work_struct *work)
* interrupt
*/
rd32(IGC_TXSTMPH);
- dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n");
return;
}
@@ -626,10 +614,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
- dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+ netdev_err(netdev, "ptp_clock_register failed\n");
} else if (adapter->ptp_clock) {
- dev_info(&adapter->pdev->dev, "added PHC on %s\n",
- adapter->netdev->name);
+ netdev_info(netdev, "PHC added\n");
adapter->ptp_flags |= IGC_PTP_ENABLED;
}
}
@@ -666,8 +653,7 @@ void igc_ptp_stop(struct igc_adapter *adapter)
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
- dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
- adapter->netdev->name);
+ netdev_info(adapter->netdev, "PHC removed\n");
adapter->ptp_flags &= ~IGC_PTP_ENABLED;
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index d4af53a80f11..232e82dec62e 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -17,11 +17,6 @@
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
-#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
/* NVM Register Descriptions */
#define IGC_EERD 0x12014 /* EEprom mode read - RW */
@@ -35,10 +30,6 @@
#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */
#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */
#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */
-#define IGC_FCSTS 0x02464 /* FC Status - RO */
-
-/* PCIe Register Description */
-#define IGC_GCR 0x05B00 /* PCIe control- RW */
/* Semaphore registers */
#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
@@ -49,6 +40,7 @@
#define IGC_FACTPS 0x05B30
/* Interrupt Register Description */
+#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */
#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
@@ -76,13 +68,6 @@
#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */
#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */
-#define IGC_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
-#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
-#define IGC_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
-#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
-#define IGC_HGPTC 0x04118 /* Host Good Packets TX Count */
-#define IGC_HTCBDPC 0x04124 /* Host TX Circ.Breaker Drop Count */
-
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
@@ -119,10 +104,11 @@
#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */
#define IGC_RFCTL 0x05008 /* Receive Filter Control*/
#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define IGC_RA 0x05400 /* Receive Address - RW Array */
#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08))
#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08))
-#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */
+#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */
/* Transmit Register Descriptions */
#define IGC_TCTL 0x00400 /* Tx Control - RW */
@@ -138,13 +124,9 @@
#define IGC_MMDAC 13 /* MMD Access Control */
#define IGC_MMDAAD 14 /* MMD Access Address/Data */
-/* Good transmitted packets counter registers */
-#define IGC_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
-
/* Statistics Register Descriptions */
#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
-#define IGC_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */
#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */
#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */
@@ -152,10 +134,10 @@
#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */
#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */
#define IGC_COLC 0x04028 /* Collision Count - R/clr */
+#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */
#define IGC_DC 0x04030 /* Defer Count - R/clr */
#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */
-#define IGC_SEC 0x04038 /* Sequence Error Count - R/clr */
-#define IGC_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */
#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */
#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */
#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */
@@ -213,7 +195,6 @@
#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
#define IGC_LENERRS 0x04138 /* Length Errors Count */
-#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
/* Time sync registers */
#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */
@@ -229,7 +210,17 @@
#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
-#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+/* Transmit Scheduling Registers */
+#define IGC_TQAVCTRL 0x3570
+#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
+#define IGC_BASET_L 0x3314
+#define IGC_BASET_H 0x3318
+#define IGC_QBVCYCLET 0x331C
+#define IGC_QBVCYCLET_S 0x3320
+
+#define IGC_STQT(_n) (0x3324 + 0x4 * (_n))
+#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n))
+#define IGC_DTXMXPKTSZ 0x355C
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
@@ -265,8 +256,7 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
- if (!IGC_REMOVED(hw_addr)) \
- writel((val), &hw_addr[(reg)]); \
+ writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
new file mode 100644
index 000000000000..174103c4bea6
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Intel Corporation */
+
+#include "igc.h"
+#include "igc_tsn.h"
+
+static bool is_any_launchtime(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ if (ring->launchtime_enable)
+ return true;
+ }
+
+ return false;
+}
+
+/* Returns the TSN specific registers to their default values after
+ * TSN offloading is disabled.
+ */
+static int igc_tsn_disable_offload(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 tqavctrl;
+ int i;
+
+ if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
+ return 0;
+
+ adapter->cycle_time = 0;
+
+ wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+
+ tqavctrl = rd32(IGC_TQAVCTRL);
+ tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
+ IGC_TQAVCTRL_ENHANCED_QAV);
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ ring->start_time = 0;
+ ring->end_time = 0;
+ ring->launchtime_enable = false;
+
+ wr32(IGC_TXQCTL(i), 0);
+ wr32(IGC_STQT(i), 0);
+ wr32(IGC_ENDQT(i), NSEC_PER_SEC);
+ }
+
+ wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
+ wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
+
+ adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
+
+ return 0;
+}
+
+static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 tqavctrl, baset_l, baset_h;
+ u32 sec, nsec, cycle;
+ ktime_t base_time, systim;
+ int i;
+
+ if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
+ return 0;
+
+ cycle = adapter->cycle_time;
+ base_time = adapter->base_time;
+
+ wr32(IGC_TSAUXC, 0);
+ wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+
+ tqavctrl = rd32(IGC_TQAVCTRL);
+ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ wr32(IGC_QBVCYCLET_S, cycle);
+ wr32(IGC_QBVCYCLET, cycle);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+ u32 txqctl = 0;
+
+ wr32(IGC_STQT(i), ring->start_time);
+ wr32(IGC_ENDQT(i), ring->end_time);
+
+ if (adapter->base_time) {
+ /* If we have a base_time we are in "taprio"
+ * mode and we need to be strict about the
+ * cycles: only transmit a packet if it can be
+ * completed during that cycle.
+ */
+ txqctl |= IGC_TXQCTL_STRICT_CYCLE |
+ IGC_TXQCTL_STRICT_END;
+ }
+
+ if (ring->launchtime_enable)
+ txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+
+ wr32(IGC_TXQCTL(i), txqctl);
+ }
+
+ nsec = rd32(IGC_SYSTIML);
+ sec = rd32(IGC_SYSTIMH);
+
+ systim = ktime_set(sec, nsec);
+
+ if (ktime_compare(systim, base_time) > 0) {
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
+ base_time = ktime_add_ns(base_time, (n + 1) * cycle);
+ }
+
+ baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
+
+ wr32(IGC_BASET_H, baset_h);
+ wr32(IGC_BASET_L, baset_l);
+
+ adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED;
+
+ return 0;
+}
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter)
+{
+ bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);
+
+ if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
+ return 0;
+
+ if (!is_any_enabled) {
+ int err = igc_tsn_disable_offload(adapter);
+
+ if (err < 0)
+ return err;
+
+ /* The BASET registers aren't cleared when writing
+ * into them, force a reset if the interface is
+ * running.
+ */
+ if (netif_running(adapter->netdev))
+ schedule_work(&adapter->reset_task);
+
+ return 0;
+ }
+
+ return igc_tsn_enable_offload(adapter);
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
new file mode 100644
index 000000000000..f76bc86ddccd
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020 Intel Corporation */
+
+#ifndef _IGC_TSN_H_
+#define _IGC_TSN_H_
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter);
+
+#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 2833e4f041ce..5ddfc83a1e46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -224,17 +224,17 @@ struct ixgbe_tx_buffer {
};
struct ixgbe_rx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
union {
struct {
+ struct sk_buff *skb;
+ dma_addr_t dma;
struct page *page;
__u32 page_offset;
__u16 pagecnt_bias;
};
struct {
- void *addr;
- u64 handle;
+ bool discard;
+ struct xdp_buff *xdp;
};
};
};
@@ -351,7 +351,6 @@ struct ixgbe_ring {
};
struct xdp_rxq_info xdp_rxq;
struct xdp_umem *xsk_umem;
- struct zero_copy_allocator zca; /* ZC allocator anchor */
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 0bd1294ba517..17357a12cbdc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -64,8 +64,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
/* if link is down, assume supported */
if (link_up)
- supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
- true : false;
+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL;
else
supported = true;
}
@@ -2243,7 +2242,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
}
/* Configure pause time (2 TCs per register) */
- reg = hw->fc.pause_time * 0x00010001;
+ reg = hw->fc.pause_time * 0x00010001U;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 718931d951bc..f162b8b8f345 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -35,7 +35,7 @@
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
#include <net/mpls.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xfrm.h>
#include "ixgbe.h"
@@ -2215,7 +2215,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
case XDP_PASS:
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
result = IXGBE_XDP_CONSUMED;
break;
@@ -2244,19 +2244,30 @@ xdp_out:
return ERR_PTR(-result);
}
+static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
unsigned int size)
{
+ unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
-
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
- SKB_DATA_ALIGN(size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -2290,6 +2301,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
+#endif
+
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
@@ -2323,7 +2339,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp.data_hard_start = xdp.data -
ixgbe_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
+#endif
skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
@@ -2954,35 +2973,6 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
/* skip the flush */
}
-static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
-{
- u32 mask;
- struct ixgbe_hw *hw = &adapter->hw;
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- mask = (qmask & 0xFFFFFFFF);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
- mask = (qmask >> 32);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
- break;
- default:
- break;
- }
- /* skip the flush */
-}
-
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
@@ -3726,8 +3716,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
/* configure the packet buffer length */
if (rx_ring->xsk_umem) {
- u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_umem);
/* If the MAC support setting RXDCTL.RLPML, the
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
@@ -4074,11 +4063,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
if (ring->xsk_umem) {
- ring->zca.free = ixgbe_zca_free;
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &ring->zca));
-
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL));
@@ -4134,8 +4122,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
}
if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
- u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 537dfff585e0..d05a5690e66b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -102,7 +102,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
* indirection table and RSS hash key with PF therefore
* we want to disable the querying by default.
*/
- adapter->vfinfo[i].rss_query_enabled = 0;
+ adapter->vfinfo[i].rss_query_enabled = false;
/* Untrust all VFs */
adapter->vfinfo[i].trusted = false;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 6d01700b46bc..7887ae4aaf4f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -35,7 +35,7 @@ int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
-void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
+bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
const int budget);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 74b540ebb3dc..be9d2a8da515 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -2,7 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ixgbe.h"
@@ -20,54 +20,11 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
return xdp_get_umem_from_qid(adapter->netdev, qid);
}
-static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem)
-{
- struct device *dev = &adapter->pdev->dev;
- unsigned int i, j;
- dma_addr_t dma;
-
- for (i = 0; i < umem->npgs; i++) {
- dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
- if (dma_mapping_error(dev, dma))
- goto out_unmap;
-
- umem->pages[i].dma = dma;
- }
-
- return 0;
-
-out_unmap:
- for (j = 0; j < i; j++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
- umem->pages[i].dma = 0;
- }
-
- return -1;
-}
-
-static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem)
-{
- struct device *dev = &adapter->pdev->dev;
- unsigned int i;
-
- for (i = 0; i < umem->npgs; i++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
-
- umem->pages[i].dma = 0;
- }
-}
-
static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
struct xdp_umem *umem,
u16 qid)
{
struct net_device *netdev = adapter->netdev;
- struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
@@ -78,13 +35,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
- if (!reuseq)
- return -ENOMEM;
-
- xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
- err = ixgbe_xsk_umem_dma_map(adapter, umem);
+ err = xsk_buff_dma_map(umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
if (err)
return err;
@@ -124,7 +75,7 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
ixgbe_txrx_ring_disable(adapter, qid);
clear_bit(qid, adapter->af_xdp_zc_qps);
- ixgbe_xsk_umem_dma_unmap(adapter, umem);
+ xsk_buff_dma_unmap(umem, IXGBE_RX_DMA_ATTR);
if (if_running)
ixgbe_txrx_ring_enable(adapter, qid);
@@ -143,25 +94,20 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
{
- struct xdp_umem *umem = rx_ring->xsk_umem;
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
struct xdp_frame *xdpf;
- u64 offset;
u32 act;
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- offset = xdp->data - xdp->data_hard_start;
-
- xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
result = IXGBE_XDP_CONSUMED;
break;
@@ -186,140 +132,16 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
return result;
}
-static struct
-ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
- unsigned int size)
-{
- struct ixgbe_rx_buffer *bi;
-
- bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- bi->dma, 0,
- size,
- DMA_BIDIRECTIONAL);
-
- return bi;
-}
-
-static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *obi)
-{
- u16 nta = rx_ring->next_to_alloc;
- struct ixgbe_rx_buffer *nbi;
-
- nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- nbi->dma = obi->dma;
- nbi->addr = obi->addr;
- nbi->handle = obi->handle;
-
- obi->addr = NULL;
- obi->skb = NULL;
-}
-
-void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
-{
- struct ixgbe_rx_buffer *bi;
- struct ixgbe_ring *rx_ring;
- u64 hr, mask;
- u16 nta;
-
- rx_ring = container_of(alloc, struct ixgbe_ring, zca);
- hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
- mask = rx_ring->xsk_umem->chunk_mask;
-
- nta = rx_ring->next_to_alloc;
- bi = rx_ring->rx_buffer_info;
-
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- handle &= mask;
-
- bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
- rx_ring->xsk_umem->headroom);
-}
-
-static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- void *addr = bi->addr;
- u64 handle, hr;
-
- if (addr)
- return true;
-
- if (!xsk_umem_peek_addr(umem, &handle)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
- return false;
- }
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- bi->dma = xdp_umem_get_dma(umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
-
- xsk_umem_release_addr(umem);
- return true;
-}
-
-static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- u64 handle, hr;
-
- if (!xsk_umem_peek_addr_rq(umem, &handle)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
- return false;
- }
-
- handle &= rx_ring->xsk_umem->chunk_mask;
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- bi->dma = xdp_umem_get_dma(umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
-
- xsk_umem_release_addr_rq(umem);
- return true;
-}
-
-static __always_inline bool
-__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
- bool alloc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi))
+bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ dma_addr_t dma;
bool ok = true;
/* nothing to do */
- if (!cleaned_count)
+ if (!count)
return true;
rx_desc = IXGBE_RX_DESC(rx_ring, i);
@@ -327,21 +149,18 @@ __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
i -= rx_ring->count;
do {
- if (!alloc(rx_ring, bi)) {
+ bi->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ if (!bi->xdp) {
ok = false;
break;
}
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_BIDIRECTIONAL);
+ dma = xsk_buff_xdp_get_dma(bi->xdp);
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc++;
bi++;
@@ -355,17 +174,14 @@ __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
/* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0;
- cleaned_count--;
- } while (cleaned_count);
+ count--;
+ } while (count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = i;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -378,40 +194,27 @@ __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
return ok;
}
-void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
-{
- __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
- ixgbe_alloc_buffer_slow_zc);
-}
-
-static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
- u16 count)
-{
- return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
- ixgbe_alloc_buffer_zc);
-}
-
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi,
- struct xdp_buff *xdp)
+ struct ixgbe_rx_buffer *bi)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
+ unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
struct sk_buff *skb;
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ bi->xdp->data_end - bi->xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
return skb;
}
@@ -434,9 +237,6 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
struct sk_buff *skb;
- struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
@@ -446,8 +246,8 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
failure = failure ||
- !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
- cleaned_count);
+ !ixgbe_alloc_rx_buffers_zc(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -462,42 +262,40 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
- bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
+ bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
if (unlikely(!ixgbe_test_staterr(rx_desc,
IXGBE_RXD_STAT_EOP))) {
struct ixgbe_rx_buffer *next_bi;
- ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
ixgbe_inc_ntc(rx_ring);
next_bi =
&rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- next_bi->skb = ERR_PTR(-EINVAL);
+ next_bi->discard = true;
continue;
}
- if (unlikely(bi->skb)) {
- ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ if (unlikely(bi->discard)) {
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
+ bi->discard = false;
ixgbe_inc_ntc(rx_ring);
continue;
}
- xdp.data = bi->addr;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + size;
- xdp.handle = bi->handle;
-
- xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
+ bi->xdp->data_end = bi->xdp->data + size;
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
+ xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (xdp_res) {
- if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+ if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
xdp_xmit |= xdp_res;
- bi->addr = NULL;
- bi->skb = NULL;
- } else {
- ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
- }
+ else
+ xsk_buff_free(bi->xdp);
+
+ bi->xdp = NULL;
total_rx_packets++;
total_rx_bytes += size;
@@ -507,7 +305,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
/* XDP_PASS path */
- skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
+ skb = ixgbe_construct_skb_zc(rx_ring, bi);
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
@@ -559,17 +357,17 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
- u16 i = rx_ring->next_to_clean;
- struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
+ struct ixgbe_rx_buffer *bi;
+ u16 i;
- while (i != rx_ring->next_to_alloc) {
- xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
- i++;
- bi++;
- if (i == rx_ring->count) {
- i = 0;
- bi = rx_ring->rx_buffer_info;
- }
+ for (i = 0; i < rx_ring->count; i++) {
+ bi = &rx_ring->rx_buffer_info[i];
+
+ if (!bi->xdp)
+ continue;
+
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
}
}
@@ -592,10 +390,9 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
- dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
-
- dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
- DMA_BIDIRECTIONAL);
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ desc.len);
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4622c4ea2e46..a39e2cb384dd 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1095,19 +1095,31 @@ xdp_out:
return ERR_PTR(-result);
}
+static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
- SKB_DATA_ALIGN(size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -1125,6 +1137,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
+#endif
+
while (likely(total_rx_packets < budget)) {
struct ixgbevf_rx_buffer *rx_buffer;
union ixgbe_adv_rx_desc *rx_desc;
@@ -1157,7 +1174,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
xdp.data_hard_start = xdp.data -
ixgbevf_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
+#endif
skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 900affbdcc0e..1645e4e7ebdb 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -276,7 +276,8 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
return pkts;
}
-static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
{
struct xrx200_priv *priv = netdev_priv(net_dev);
struct xrx200_chan *ch = &priv->chan_tx;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 81d24481b22c..4d4b6243318a 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -666,11 +666,6 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
return 0;
}
-static inline __be16 sum16_as_be(__sum16 sum)
-{
- return (__force __be16)sum;
-}
-
static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
u16 *l4i_chk, u32 *command, int length)
{
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 51889770958d..011cd26953d9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -324,7 +324,8 @@
ETH_HLEN + ETH_FCS_LEN, \
cache_line_size())
-#define MVNETA_SKB_HEADROOM max(XDP_PACKET_HEADROOM, NET_SKB_PAD)
+/* Driver assumes that the last 3 bits are 0 */
+#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) & ~0x7)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM))
#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
@@ -2072,7 +2073,7 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
int cpu;
u32 ret;
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return MVNETA_XDP_DROPPED;
@@ -2148,12 +2149,17 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp,
struct mvneta_stats *stats)
{
- unsigned int len;
+ unsigned int len, sync;
+ struct page *page;
u32 ret, act;
len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
act = bpf_prog_run_xdp(prog, xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+ sync = max(sync, len);
+
switch (act) {
case XDP_PASS:
stats->xdp_pass++;
@@ -2164,9 +2170,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) {
ret = MVNETA_XDP_DROPPED;
- page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
} else {
ret = MVNETA_XDP_REDIR;
stats->xdp_redirect++;
@@ -2175,10 +2180,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
}
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
- if (ret != MVNETA_XDP_TX)
- page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ if (ret != MVNETA_XDP_TX) {
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ }
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2187,8 +2192,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
/* fall through */
case XDP_DROP:
- page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data), len, true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
@@ -2320,6 +2325,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rcu_read_lock();
xdp_prog = READ_ONCE(pp->xdp_prog);
xdp_buf.rxq = &rxq->xdp_rxq;
+ xdp_buf.frame_sz = PAGE_SIZE;
/* Fairness NAPI loop */
while (rx_proc < budget && rx_proc < rx_todo) {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 7352244c5e68..d4a4e241333d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1070,7 +1070,7 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
(port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
- val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
+ val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f1d2dea90a8c..5975521a4c86 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -379,40 +379,35 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- gfp_t gfp)
+dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
{
dma_addr_t iova;
+ u8 *buf;
- /* Check if request can be accommodated in previous allocated page */
- if (pool->page && ((pool->page_offset + pool->rbsize) <=
- (PAGE_SIZE << pool->rbpage_order))) {
- pool->pageref++;
- goto ret;
- }
-
- otx2_get_page(pool);
-
- /* Allocate a new page */
- pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- pool->rbpage_order);
- if (unlikely(!pool->page))
+ buf = napi_alloc_frag(pool->rbsize);
+ if (unlikely(!buf))
return -ENOMEM;
- pool->page_offset = 0;
-ret:
- iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
- pool->rbsize, DMA_FROM_DEVICE);
- if (!iova) {
- if (!pool->page_offset)
- __free_pages(pool->page, pool->rbpage_order);
- pool->page = NULL;
+ iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ page_frag_free(buf);
return -ENOMEM;
}
- pool->page_offset += pool->rbsize;
+
return iova;
}
+static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+{
+ dma_addr_t addr;
+
+ local_bh_disable();
+ addr = __otx2_alloc_rbuf(pfvf, pool);
+ local_bh_enable();
+ return addr;
+}
+
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -805,7 +800,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, rbpool);
if (bufptr <= 0) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
@@ -1064,7 +1059,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
return err;
pool->rbsize = buf_size;
- pool->rbpage_order = get_order(buf_size);
/* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
@@ -1152,13 +1146,12 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
return -ENOMEM;
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
- otx2_get_page(pool);
}
return 0;
@@ -1204,13 +1197,12 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM);
}
- otx2_get_page(pool);
}
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 018c283a0ac4..2fa29889522e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -309,7 +309,7 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
default:
blkaddr = BLKADDR_RVUM;
break;
- };
+ }
offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
@@ -434,18 +434,6 @@ static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
}
-/* Update page ref count */
-static inline void otx2_get_page(struct otx2_pool *pool)
-{
- if (!pool->page)
- return;
-
- if (pool->pageref)
- page_ref_add(pool->page, pool->pageref);
- pool->pageref = 0;
- pool->page = NULL;
-}
-
static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
{
if (type == AURA_NIX_SQ)
@@ -589,8 +577,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
-dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- gfp_t gfp);
+dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 411e5ea1031e..64786568af0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1856,13 +1856,17 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
- if (!hw->irq_name)
+ if (!hw->irq_name) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
sizeof(cpumask_var_t), GFP_KERNEL);
- if (!hw->affinity_mask)
+ if (!hw->affinity_mask) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
/* Map CSRs */
pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 45abe0cd0e7b..b04f5429d72d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -286,7 +286,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC);
+ bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
if (unlikely(bufptr <= 0)) {
struct refill_work *work;
struct delayed_work *dwork;
@@ -304,7 +304,6 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
- otx2_get_page(cq->rbpool);
return processed_cqe;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 4ab32d3adb78..da97f2d4416f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -113,11 +113,7 @@ struct otx2_cq_poll {
struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
- u8 rbpage_order;
u16 rbsize;
- u32 page_offset;
- u16 pageref;
- struct page *page;
};
struct otx2_cq_queue {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 187c633a7af5..f4227517dc8e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -497,13 +497,17 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
- if (!hw->irq_name)
+ if (!hw->irq_name) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
sizeof(cpumask_var_t), GFP_KERNEL);
- if (!hw->affinity_mask)
+ if (!hw->affinity_mask) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 7a0d785b826c..17243bb5ba91 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1418,7 +1418,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pep->base)) {
- err = -ENOMEM;
+ err = PTR_ERR(pep->base);
goto err_netdev;
}
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 4968352ba188..500c15e7ea4a 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
- bool "MediaTek ethernet driver"
+ bool "MediaTek devices"
depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
---help---
If you have a Mediatek SoC with ethernet, say Y.
@@ -14,4 +14,11 @@ config NET_MEDIATEK_SOC
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
+config NET_MEDIATEK_STAR_EMAC
+ tristate "MediaTek STAR Ethernet MAC support"
+ select PHYLIB
+ help
+ This driver supports the ethernet MAC IP first used on
+ MediaTek MT85** SoCs.
+
endif #NET_VENDOR_MEDIATEK
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 2d8362f9341b..3a777b4a6cd3 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -3,5 +3,6 @@
# Makefile for the Mediatek SoCs built-in ethernet macs
#
-obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
+obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o
+obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 09047109d0da..f6a1f8666f95 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -65,7 +65,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
return __raw_readl(eth->base + reg);
}
-u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
{
u32 val;
@@ -1122,7 +1122,7 @@ static void mtk_stop_queue(struct mtk_eth *eth)
}
}
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
new file mode 100644
index 000000000000..f1ace4fec19f
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -0,0 +1,1651 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 MediaTek Corporation
+ * Copyright (c) 2020 BayLibre SAS
+ *
+ * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define MTK_STAR_DRVNAME "mtk_star_emac"
+
+#define MTK_STAR_WAIT_TIMEOUT 300
+#define MTK_STAR_MAX_FRAME_SIZE 1514
+#define MTK_STAR_SKB_ALIGNMENT 16
+#define MTK_STAR_NAPI_WEIGHT 64
+#define MTK_STAR_HASHTABLE_MC_LIMIT 256
+#define MTK_STAR_HASHTABLE_SIZE_MAX 512
+
+/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
+ * work for this controller.
+ */
+#define MTK_STAR_IP_ALIGN 2
+
+static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
+#define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
+
+/* PHY Control Register 0 */
+#define MTK_STAR_REG_PHY_CTRL0 0x0000
+#define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13)
+#define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14)
+#define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15)
+#define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8)
+#define MTK_STAR_OFF_PHY_CTRL0_PREG 8
+#define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16)
+#define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16
+
+/* PHY Control Register 1 */
+#define MTK_STAR_REG_PHY_CTRL1 0x0004
+#define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0)
+#define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8)
+#define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9
+#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00
+#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01
+#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02
+#define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11)
+#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12)
+#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13)
+
+/* MAC Configuration Register */
+#define MTK_STAR_REG_MAC_CFG 0x0008
+#define MTK_STAR_OFF_MAC_CFG_IPG 10
+#define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0)
+#define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16)
+#define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19)
+#define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20)
+#define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22)
+#define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31)
+
+/* Flow-Control Configuration Register */
+#define MTK_STAR_REG_FC_CFG 0x000c
+#define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7)
+#define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8)
+#define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16
+#define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16)
+#define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800
+
+/* ARL Configuration Register */
+#define MTK_STAR_REG_ARL_CFG 0x0010
+#define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0)
+#define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4)
+
+/* MAC High and Low Bytes Registers */
+#define MTK_STAR_REG_MY_MAC_H 0x0014
+#define MTK_STAR_REG_MY_MAC_L 0x0018
+
+/* Hash Table Control Register */
+#define MTK_STAR_REG_HASH_CTRL 0x001c
+#define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0)
+#define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12)
+#define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13)
+#define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14)
+#define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16)
+#define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17)
+#define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31)
+
+/* TX DMA Control Register */
+#define MTK_STAR_REG_TX_DMA_CTRL 0x0034
+#define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0)
+#define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1)
+#define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2)
+
+/* RX DMA Control Register */
+#define MTK_STAR_REG_RX_DMA_CTRL 0x0038
+#define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0)
+#define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1)
+#define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2)
+
+/* DMA Address Registers */
+#define MTK_STAR_REG_TX_DPTR 0x003c
+#define MTK_STAR_REG_RX_DPTR 0x0040
+#define MTK_STAR_REG_TX_BASE_ADDR 0x0044
+#define MTK_STAR_REG_RX_BASE_ADDR 0x0048
+
+/* Interrupt Status Register */
+#define MTK_STAR_REG_INT_STS 0x0050
+#define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2)
+#define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3)
+#define MTK_STAR_BIT_INT_STS_FNRC BIT(6)
+#define MTK_STAR_BIT_INT_STS_TNTC BIT(8)
+
+/* Interrupt Mask Register */
+#define MTK_STAR_REG_INT_MASK 0x0054
+#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
+
+/* Misc. Config Register */
+#define MTK_STAR_REG_TEST1 0x005c
+#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
+
+/* Extended Configuration Register */
+#define MTK_STAR_REG_EXT_CFG 0x0060
+#define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16
+#define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16)
+#define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400
+
+/* EthSys Configuration Register */
+#define MTK_STAR_REG_SYS_CONF 0x0094
+#define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0)
+#define MTK_STAR_BIT_EXT_MDC_MODE BIT(1)
+#define MTK_STAR_BIT_SWC_MII_MODE BIT(2)
+
+/* MAC Clock Configuration Register */
+#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
+#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
+#define MTK_STAR_BIT_CLK_DIV_10 0x0a
+
+/* Counter registers. */
+#define MTK_STAR_REG_C_RXOKPKT 0x0100
+#define MTK_STAR_REG_C_RXOKBYTE 0x0104
+#define MTK_STAR_REG_C_RXRUNT 0x0108
+#define MTK_STAR_REG_C_RXLONG 0x010c
+#define MTK_STAR_REG_C_RXDROP 0x0110
+#define MTK_STAR_REG_C_RXCRC 0x0114
+#define MTK_STAR_REG_C_RXARLDROP 0x0118
+#define MTK_STAR_REG_C_RXVLANDROP 0x011c
+#define MTK_STAR_REG_C_RXCSERR 0x0120
+#define MTK_STAR_REG_C_RXPAUSE 0x0124
+#define MTK_STAR_REG_C_TXOKPKT 0x0128
+#define MTK_STAR_REG_C_TXOKBYTE 0x012c
+#define MTK_STAR_REG_C_TXPAUSECOL 0x0130
+#define MTK_STAR_REG_C_TXRTY 0x0134
+#define MTK_STAR_REG_C_TXSKIP 0x0138
+#define MTK_STAR_REG_C_TX_ARP 0x013c
+#define MTK_STAR_REG_C_RX_RERR 0x01d8
+#define MTK_STAR_REG_C_RX_UNI 0x01dc
+#define MTK_STAR_REG_C_RX_MULTI 0x01e0
+#define MTK_STAR_REG_C_RX_BROAD 0x01e4
+#define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8
+#define MTK_STAR_REG_C_TX_UNI 0x01ec
+#define MTK_STAR_REG_C_TX_MULTI 0x01f0
+#define MTK_STAR_REG_C_TX_BROAD 0x01f4
+#define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8
+#define MTK_STAR_REG_C_TX_LATECOL 0x01fc
+#define MTK_STAR_REG_C_RX_LENGTHERR 0x0214
+#define MTK_STAR_REG_C_RX_TWIST 0x0218
+
+/* Ethernet CFG Control */
+#define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4
+#define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0)
+
+/* Represents the actual structure of descriptors used by the MAC. We can
+ * reuse the same structure for both TX and RX - the layout is the same, only
+ * the flags differ slightly.
+ */
+struct mtk_star_ring_desc {
+ /* Contains both the status flags as well as packet length. */
+ u32 status;
+ u32 data_ptr;
+ u32 vtag;
+ u32 reserved;
+};
+
+#define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0)
+#define MTK_STAR_DESC_BIT_RX_CRCE BIT(24)
+#define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25)
+#define MTK_STAR_DESC_BIT_INT BIT(27)
+#define MTK_STAR_DESC_BIT_LS BIT(28)
+#define MTK_STAR_DESC_BIT_FS BIT(29)
+#define MTK_STAR_DESC_BIT_EOR BIT(30)
+#define MTK_STAR_DESC_BIT_COWN BIT(31)
+
+/* Helper structure for storing data read from/written to descriptors in order
+ * to limit reads from/writes to DMA memory.
+ */
+struct mtk_star_ring_desc_data {
+ unsigned int len;
+ unsigned int flags;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+};
+
+#define MTK_STAR_RING_NUM_DESCS 128
+#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
+#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
+#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
+#define MTK_STAR_DMA_SIZE \
+ (MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
+
+struct mtk_star_ring {
+ struct mtk_star_ring_desc *descs;
+ struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
+ dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct mtk_star_priv {
+ struct net_device *ndev;
+
+ struct regmap *regs;
+ struct regmap *pericfg;
+
+ struct clk_bulk_data clks[MTK_STAR_NCLKS];
+
+ void *ring_base;
+ struct mtk_star_ring_desc *descs_base;
+ dma_addr_t dma_addr;
+ struct mtk_star_ring tx_ring;
+ struct mtk_star_ring rx_ring;
+
+ struct mii_bus *mii;
+ struct napi_struct napi;
+
+ struct device_node *phy_node;
+ phy_interface_t phy_intf;
+ struct phy_device *phydev;
+ unsigned int link;
+ int speed;
+ int duplex;
+ int pause;
+
+ /* Protects against concurrent descriptor access. */
+ spinlock_t lock;
+
+ struct rtnl_link_stats64 stats;
+ struct work_struct stats_work;
+};
+
+static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
+{
+ return priv->ndev->dev.parent;
+}
+
+static const struct regmap_config mtk_star_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static void mtk_star_ring_init(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc *descs)
+{
+ memset(ring, 0, sizeof(*ring));
+ ring->descs = descs;
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
+ unsigned int status;
+
+ status = READ_ONCE(desc->status);
+ dma_rmb(); /* Make sure we read the status bits before checking it. */
+
+ if (!(status & MTK_STAR_DESC_BIT_COWN))
+ return -1;
+
+ desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
+ desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
+ desc_data->dma_addr = ring->dma_addrs[ring->tail];
+ desc_data->skb = ring->skbs[ring->tail];
+
+ ring->dma_addrs[ring->tail] = 0;
+ ring->skbs[ring->tail] = NULL;
+
+ status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
+
+ WRITE_ONCE(desc->data_ptr, 0);
+ WRITE_ONCE(desc->status, status);
+
+ ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
+
+ return 0;
+}
+
+static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data,
+ unsigned int flags)
+{
+ struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
+ unsigned int status;
+
+ status = READ_ONCE(desc->status);
+
+ ring->skbs[ring->head] = desc_data->skb;
+ ring->dma_addrs[ring->head] = desc_data->dma_addr;
+
+ status |= desc_data->len;
+ if (flags)
+ status |= flags;
+
+ WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
+ WRITE_ONCE(desc->status, status);
+ status &= ~MTK_STAR_DESC_BIT_COWN;
+ /* Flush previous modifications before ownership change. */
+ dma_wmb();
+ WRITE_ONCE(desc->status, status);
+
+ ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
+}
+
+static void
+mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ mtk_star_ring_push_head(ring, desc_data, 0);
+}
+
+static void
+mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
+ MTK_STAR_DESC_BIT_LS |
+ MTK_STAR_DESC_BIT_INT;
+
+ mtk_star_ring_push_head(ring, desc_data, flags);
+}
+
+static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
+{
+ return abs(ring->head - ring->tail);
+}
+
+static bool mtk_star_ring_full(struct mtk_star_ring *ring)
+{
+ return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
+}
+
+static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
+{
+ return mtk_star_ring_num_used_descs(ring) > 0;
+}
+
+static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
+ struct sk_buff *skb)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ /* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
+ return dma_map_single(dev, skb_tail_pointer(skb) - 2,
+ skb_tailroom(skb), DMA_FROM_DEVICE);
+}
+
+static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ dma_unmap_single(dev, desc_data->dma_addr,
+ skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
+}
+
+static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
+ struct sk_buff *skb)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+}
+
+static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ return dma_unmap_single(dev, desc_data->dma_addr,
+ skb_headlen(desc_data->skb), DMA_TO_DEVICE);
+}
+
+static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
+{
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
+ MTK_STAR_BIT_MAC_CFG_NIC_PD);
+}
+
+/* Unmask the three interrupts we care about, mask all others. */
+static void mtk_star_intr_enable(struct mtk_star_priv *priv)
+{
+ unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
+ MTK_STAR_BIT_INT_STS_FNRC |
+ MTK_STAR_REG_INT_STS_MIB_CNT_TH;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
+}
+
+static void mtk_star_intr_disable(struct mtk_star_priv *priv)
+{
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
+}
+
+static void mtk_star_intr_enable_tx(struct mtk_star_priv *priv)
+{
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_BIT_INT_STS_TNTC);
+}
+
+static void mtk_star_intr_enable_rx(struct mtk_star_priv *priv)
+{
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_BIT_INT_STS_FNRC);
+}
+
+static void mtk_star_intr_enable_stats(struct mtk_star_priv *priv)
+{
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_REG_INT_STS_MIB_CNT_TH);
+}
+
+static void mtk_star_intr_disable_tx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_BIT_INT_STS_TNTC);
+}
+
+static void mtk_star_intr_disable_rx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_BIT_INT_STS_FNRC);
+}
+
+static void mtk_star_intr_disable_stats(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_REG_INT_STS_MIB_CNT_TH);
+}
+
+static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
+
+ return val;
+}
+
+static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ val = mtk_star_intr_read(priv);
+ regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
+
+ return val;
+}
+
+static void mtk_star_dma_init(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring_desc *desc;
+ unsigned int val;
+ int i;
+
+ priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
+
+ for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
+ desc = &priv->descs_base[i];
+
+ memset(desc, 0, sizeof(*desc));
+ desc->status = MTK_STAR_DESC_BIT_COWN;
+ if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
+ (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
+ desc->status |= MTK_STAR_DESC_BIT_EOR;
+ }
+
+ mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
+ mtk_star_ring_init(&priv->rx_ring,
+ priv->descs_base + MTK_STAR_NUM_TX_DESCS);
+
+ /* Set DMA pointers. */
+ val = (unsigned int)priv->dma_addr;
+ regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
+ regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
+
+ val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
+ regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
+ regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
+}
+
+static void mtk_star_dma_start(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
+ MTK_STAR_BIT_TX_DMA_CTRL_START);
+ regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
+ MTK_STAR_BIT_RX_DMA_CTRL_START);
+}
+
+static void mtk_star_dma_stop(struct mtk_star_priv *priv)
+{
+ regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
+ MTK_STAR_BIT_TX_DMA_CTRL_STOP);
+ regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
+ MTK_STAR_BIT_RX_DMA_CTRL_STOP);
+}
+
+static void mtk_star_dma_disable(struct mtk_star_priv *priv)
+{
+ int i;
+
+ mtk_star_dma_stop(priv);
+
+ /* Take back all descriptors. */
+ for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
+ priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
+}
+
+static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
+ MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
+}
+
+static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
+ MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
+}
+
+static void mtk_star_set_mac_addr(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ u8 *mac_addr = ndev->dev_addr;
+ unsigned int high, low;
+
+ high = mac_addr[0] << 8 | mac_addr[1] << 0;
+ low = mac_addr[2] << 24 | mac_addr[3] << 16 |
+ mac_addr[4] << 8 | mac_addr[5];
+
+ regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
+ regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
+}
+
+static void mtk_star_reset_counters(struct mtk_star_priv *priv)
+{
+ static const unsigned int counter_regs[] = {
+ MTK_STAR_REG_C_RXOKPKT,
+ MTK_STAR_REG_C_RXOKBYTE,
+ MTK_STAR_REG_C_RXRUNT,
+ MTK_STAR_REG_C_RXLONG,
+ MTK_STAR_REG_C_RXDROP,
+ MTK_STAR_REG_C_RXCRC,
+ MTK_STAR_REG_C_RXARLDROP,
+ MTK_STAR_REG_C_RXVLANDROP,
+ MTK_STAR_REG_C_RXCSERR,
+ MTK_STAR_REG_C_RXPAUSE,
+ MTK_STAR_REG_C_TXOKPKT,
+ MTK_STAR_REG_C_TXOKBYTE,
+ MTK_STAR_REG_C_TXPAUSECOL,
+ MTK_STAR_REG_C_TXRTY,
+ MTK_STAR_REG_C_TXSKIP,
+ MTK_STAR_REG_C_TX_ARP,
+ MTK_STAR_REG_C_RX_RERR,
+ MTK_STAR_REG_C_RX_UNI,
+ MTK_STAR_REG_C_RX_MULTI,
+ MTK_STAR_REG_C_RX_BROAD,
+ MTK_STAR_REG_C_RX_ALIGNERR,
+ MTK_STAR_REG_C_TX_UNI,
+ MTK_STAR_REG_C_TX_MULTI,
+ MTK_STAR_REG_C_TX_BROAD,
+ MTK_STAR_REG_C_TX_TIMEOUT,
+ MTK_STAR_REG_C_TX_LATECOL,
+ MTK_STAR_REG_C_RX_LENGTHERR,
+ MTK_STAR_REG_C_RX_TWIST,
+ };
+
+ unsigned int i, val;
+
+ for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
+ regmap_read(priv->regs, counter_regs[i], &val);
+}
+
+static void mtk_star_update_stat(struct mtk_star_priv *priv,
+ unsigned int reg, u64 *stat)
+{
+ unsigned int val;
+
+ regmap_read(priv->regs, reg, &val);
+ *stat += val;
+}
+
+/* Try to get as many stats as possible from the internal registers instead
+ * of tracking them ourselves.
+ */
+static void mtk_star_update_stats(struct mtk_star_priv *priv)
+{
+ struct rtnl_link_stats64 *stats = &priv->stats;
+
+ /* OK packets and bytes. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
+
+ /* RX & TX multicast. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
+
+ /* Collisions. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
+ &stats->collisions);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
+ &stats->collisions);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
+
+ /* RX Errors. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
+ &stats->rx_length_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
+ &stats->rx_over_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
+ &stats->rx_frame_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
+ &stats->rx_fifo_errors);
+ /* Sum of the general RX error counter + all of the above. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
+ stats->rx_errors += stats->rx_length_errors;
+ stats->rx_errors += stats->rx_over_errors;
+ stats->rx_errors += stats->rx_crc_errors;
+ stats->rx_errors += stats->rx_frame_errors;
+ stats->rx_errors += stats->rx_fifo_errors;
+}
+
+/* This runs in process context and parallel TX and RX paths executing in
+ * napi context may result in losing some stats data but this should happen
+ * seldom enough to be acceptable.
+ */
+static void mtk_star_update_stats_work(struct work_struct *work)
+{
+ struct mtk_star_priv *priv = container_of(work, struct mtk_star_priv,
+ stats_work);
+
+ mtk_star_update_stats(priv);
+ mtk_star_reset_counters(priv);
+ mtk_star_intr_enable_stats(priv);
+}
+
+static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
+{
+ uintptr_t tail, offset;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
+ if (!skb)
+ return NULL;
+
+ /* Align to 16 bytes. */
+ tail = (uintptr_t)skb_tail_pointer(skb);
+ if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
+ offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
+ skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
+ }
+
+ /* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
+ * extract the Ethernet header (14 bytes) so we need two more bytes.
+ */
+ skb_reserve(skb, MTK_STAR_IP_ALIGN);
+
+ return skb;
+}
+
+static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct mtk_star_ring *ring = &priv->rx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int i;
+
+ for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
+ skb = mtk_star_alloc_skb(ndev);
+ if (!skb)
+ return -ENOMEM;
+
+ dma_addr = mtk_star_dma_map_rx(priv, skb);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ desc = &ring->descs[i];
+ desc->data_ptr = dma_addr;
+ desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
+ desc->status &= ~MTK_STAR_DESC_BIT_COWN;
+ ring->skbs[i] = skb;
+ ring->dma_addrs[i] = dma_addr;
+ }
+
+ return 0;
+}
+
+static void
+mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
+ void (*unmap_func)(struct mtk_star_priv *,
+ struct mtk_star_ring_desc_data *))
+{
+ struct mtk_star_ring_desc_data desc_data;
+ int i;
+
+ for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
+ if (!ring->dma_addrs[i])
+ continue;
+
+ desc_data.dma_addr = ring->dma_addrs[i];
+ desc_data.skb = ring->skbs[i];
+
+ unmap_func(priv, &desc_data);
+ dev_kfree_skb(desc_data.skb);
+ }
+}
+
+static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->rx_ring;
+
+ mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
+}
+
+static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->tx_ring;
+
+ mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
+}
+
+/* All processing for TX and RX happens in the napi poll callback. */
+static irqreturn_t mtk_star_handle_irq(int irq, void *data)
+{
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+ bool need_napi = false;
+ unsigned int status;
+
+ ndev = data;
+ priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ status = mtk_star_intr_read(priv);
+
+ if (status & MTK_STAR_BIT_INT_STS_TNTC) {
+ mtk_star_intr_disable_tx(priv);
+ need_napi = true;
+ }
+
+ if (status & MTK_STAR_BIT_INT_STS_FNRC) {
+ mtk_star_intr_disable_rx(priv);
+ need_napi = true;
+ }
+
+ if (need_napi)
+ napi_schedule(&priv->napi);
+
+ /* One of the counters reached 0x8000000 - update stats and
+ * reset all counters.
+ */
+ if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
+ mtk_star_intr_disable_stats(priv);
+ schedule_work(&priv->stats_work);
+ }
+
+ mtk_star_intr_ack_all(priv);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for the completion of any previous command - CMD_START bit must be
+ * cleared by hardware.
+ */
+static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout_atomic(priv->regs,
+ MTK_STAR_REG_HASH_CTRL, val,
+ !(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
+ 10, MTK_STAR_WAIT_TIMEOUT);
+}
+
+static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+ int ret;
+
+ /* Wait for BIST_DONE bit. */
+ ret = regmap_read_poll_timeout_atomic(priv->regs,
+ MTK_STAR_REG_HASH_CTRL, val,
+ val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
+ 10, MTK_STAR_WAIT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Check the BIST_OK bit. */
+ if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
+ MTK_STAR_BIT_HASH_CTRL_BIST_OK))
+ return -EIO;
+
+ return 0;
+}
+
+static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
+ unsigned int hash_addr)
+{
+ unsigned int val;
+ int ret;
+
+ ret = mtk_star_hash_wait_cmd_start(priv);
+ if (ret)
+ return ret;
+
+ val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
+ val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
+ val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
+ val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
+ val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
+ regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
+
+ return mtk_star_hash_wait_ok(priv);
+}
+
+static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
+{
+ int ret;
+
+ ret = mtk_star_hash_wait_cmd_start(priv);
+ if (ret)
+ return ret;
+
+ regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
+ MTK_STAR_BIT_HASH_CTRL_BIST_EN);
+ regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
+ MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
+
+ return mtk_star_hash_wait_ok(priv);
+}
+
+static void mtk_star_phy_config(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ if (priv->speed == SPEED_1000)
+ val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
+ else if (priv->speed == SPEED_100)
+ val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
+ else
+ val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
+ val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
+
+ val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ /* Only full-duplex supported for now. */
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
+
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
+
+ if (priv->pause) {
+ val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
+ val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
+ val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
+ } else {
+ val = 0;
+ }
+
+ regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
+ MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
+ MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
+
+ if (priv->pause) {
+ val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
+ val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
+ } else {
+ val = 0;
+ }
+
+ regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
+ MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
+}
+
+static void mtk_star_adjust_link(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ bool new_state = false;
+
+ if (phydev->link) {
+ if (!priv->link) {
+ priv->link = phydev->link;
+ new_state = true;
+ }
+
+ if (priv->speed != phydev->speed) {
+ priv->speed = phydev->speed;
+ new_state = true;
+ }
+
+ if (priv->pause != phydev->pause) {
+ priv->pause = phydev->pause;
+ new_state = true;
+ }
+ } else {
+ if (priv->link) {
+ priv->link = phydev->link;
+ new_state = true;
+ }
+ }
+
+ if (new_state) {
+ if (phydev->link)
+ mtk_star_phy_config(priv);
+
+ phy_print_status(ndev->phydev);
+ }
+}
+
+static void mtk_star_init_config(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
+ MTK_STAR_BIT_EXT_MDC_MODE |
+ MTK_STAR_BIT_SWC_MII_MODE);
+
+ regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
+ regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
+ MTK_STAR_MSK_MAC_CLK_CONF,
+ MTK_STAR_BIT_CLK_DIV_10);
+}
+
+static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
+{
+ regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
+ MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
+ MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
+}
+
+static int mtk_star_enable(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ unsigned int val;
+ int ret;
+
+ mtk_star_nic_disable_pd(priv);
+ mtk_star_intr_disable(priv);
+ mtk_star_dma_stop(priv);
+
+ mtk_star_set_mac_addr(ndev);
+
+ /* Configure the MAC */
+ val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
+ val <<= MTK_STAR_OFF_MAC_CFG_IPG;
+ val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
+ val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
+ val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
+ regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
+
+ /* Enable Hash Table BIST and reset it */
+ ret = mtk_star_reset_hash_table(priv);
+ if (ret)
+ return ret;
+
+ /* Setup the hashing algorithm */
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
+ MTK_STAR_BIT_ARL_CFG_HASH_ALG |
+ MTK_STAR_BIT_ARL_CFG_MISC_MODE);
+
+ /* Don't strip VLAN tags */
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
+ MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
+
+ /* Setup DMA */
+ mtk_star_dma_init(priv);
+
+ ret = mtk_star_prepare_rx_skbs(ndev);
+ if (ret)
+ goto err_out;
+
+ /* Request the interrupt */
+ ret = request_irq(ndev->irq, mtk_star_handle_irq,
+ IRQF_TRIGGER_FALLING, ndev->name, ndev);
+ if (ret)
+ goto err_free_skbs;
+
+ napi_enable(&priv->napi);
+
+ mtk_star_intr_ack_all(priv);
+ mtk_star_intr_enable(priv);
+
+ /* Connect to and start PHY */
+ priv->phydev = of_phy_connect(ndev, priv->phy_node,
+ mtk_star_adjust_link, 0, priv->phy_intf);
+ if (!priv->phydev) {
+ netdev_err(ndev, "failed to connect to PHY\n");
+ goto err_free_irq;
+ }
+
+ mtk_star_dma_start(priv);
+ phy_start(priv->phydev);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_free_irq:
+ free_irq(ndev->irq, ndev);
+err_free_skbs:
+ mtk_star_free_rx_skbs(priv);
+err_out:
+ return ret;
+}
+
+static void mtk_star_disable(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ mtk_star_intr_disable(priv);
+ mtk_star_dma_disable(priv);
+ mtk_star_intr_ack_all(priv);
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ free_irq(ndev->irq, ndev);
+ mtk_star_free_rx_skbs(priv);
+ mtk_star_free_tx_skbs(priv);
+}
+
+static int mtk_star_netdev_open(struct net_device *ndev)
+{
+ return mtk_star_enable(ndev);
+}
+
+static int mtk_star_netdev_stop(struct net_device *ndev)
+{
+ mtk_star_disable(ndev);
+
+ return 0;
+}
+
+static int mtk_star_netdev_ioctl(struct net_device *ndev,
+ struct ifreq *req, int cmd)
+{
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(ndev->phydev, req, cmd);
+}
+
+static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc_data desc_data;
+
+ desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
+ if (dma_mapping_error(dev, desc_data.dma_addr))
+ goto err_drop_packet;
+
+ desc_data.skb = skb;
+ desc_data.len = skb->len;
+
+ spin_lock_bh(&priv->lock);
+
+ mtk_star_ring_push_head_tx(ring, &desc_data);
+
+ netdev_sent_queue(ndev, skb->len);
+
+ if (mtk_star_ring_full(ring))
+ netif_stop_queue(ndev);
+
+ spin_unlock_bh(&priv->lock);
+
+ mtk_star_dma_resume_tx(priv);
+
+ return NETDEV_TX_OK;
+
+err_drop_packet:
+ dev_kfree_skb(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_BUSY;
+}
+
+/* Returns the number of bytes sent or a negative number on the first
+ * descriptor owned by DMA.
+ */
+static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct mtk_star_ring_desc_data desc_data;
+ int ret;
+
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ if (ret)
+ return ret;
+
+ mtk_star_dma_unmap_tx(priv, &desc_data);
+ ret = desc_data.skb->len;
+ dev_kfree_skb_irq(desc_data.skb);
+
+ return ret;
+}
+
+static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct net_device *ndev = priv->ndev;
+ int ret, pkts_compl, bytes_compl;
+ bool wake = false;
+
+ spin_lock(&priv->lock);
+
+ for (pkts_compl = 0, bytes_compl = 0;;
+ pkts_compl++, bytes_compl += ret, wake = true) {
+ if (!mtk_star_ring_descs_available(ring))
+ break;
+
+ ret = mtk_star_tx_complete_one(priv);
+ if (ret < 0)
+ break;
+ }
+
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+ if (wake && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ mtk_star_intr_enable_tx(priv);
+
+ spin_unlock(&priv->lock);
+}
+
+static void mtk_star_netdev_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+
+ mtk_star_update_stats(priv);
+
+ memcpy(stats, &priv->stats, sizeof(*stats));
+}
+
+static void mtk_star_set_rx_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *hw_addr;
+ unsigned int hash_addr, i;
+ int ret;
+
+ if (ndev->flags & IFF_PROMISC) {
+ regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
+ MTK_STAR_BIT_ARL_CFG_MISC_MODE);
+ } else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
+ ndev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
+ ret = mtk_star_set_hashbit(priv, i);
+ if (ret)
+ goto hash_fail;
+ }
+ } else {
+ /* Clear previous settings. */
+ ret = mtk_star_reset_hash_table(priv);
+ if (ret)
+ goto hash_fail;
+
+ netdev_for_each_mc_addr(hw_addr, ndev) {
+ hash_addr = (hw_addr->addr[0] & 0x01) << 8;
+ hash_addr += hw_addr->addr[5];
+ ret = mtk_star_set_hashbit(priv, hash_addr);
+ if (ret)
+ goto hash_fail;
+ }
+ }
+
+ return;
+
+hash_fail:
+ if (ret == -ETIMEDOUT)
+ netdev_err(ndev, "setting hash bit timed out\n");
+ else
+ /* Should be -EIO */
+ netdev_err(ndev, "unable to set hash bit");
+}
+
+static const struct net_device_ops mtk_star_netdev_ops = {
+ .ndo_open = mtk_star_netdev_open,
+ .ndo_stop = mtk_star_netdev_stop,
+ .ndo_start_xmit = mtk_star_netdev_start_xmit,
+ .ndo_get_stats64 = mtk_star_netdev_get_stats64,
+ .ndo_set_rx_mode = mtk_star_set_rx_mode,
+ .ndo_do_ioctl = mtk_star_netdev_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void mtk_star_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
+}
+
+/* TODO Add ethtool stats. */
+static const struct ethtool_ops mtk_star_ethtool_ops = {
+ .get_drvinfo = mtk_star_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->rx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc_data desc_data;
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *curr_skb, *new_skb;
+ dma_addr_t new_dma_addr;
+ int ret;
+
+ spin_lock(&priv->lock);
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ spin_unlock(&priv->lock);
+ if (ret)
+ return -1;
+
+ curr_skb = desc_data.skb;
+
+ if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
+ (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
+ /* Error packet -> drop and reuse skb. */
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
+
+ /* Prepare new skb before receiving the current one. Reuse the current
+ * skb if we fail at any point.
+ */
+ new_skb = mtk_star_alloc_skb(ndev);
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
+
+ new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
+ if (dma_mapping_error(dev, new_dma_addr)) {
+ ndev->stats.rx_dropped++;
+ dev_kfree_skb(new_skb);
+ new_skb = curr_skb;
+ netdev_err(ndev, "DMA mapping error of RX descriptor\n");
+ goto push_new_skb;
+ }
+
+ desc_data.dma_addr = new_dma_addr;
+
+ /* We can't fail anymore at this point: it's safe to unmap the skb. */
+ mtk_star_dma_unmap_rx(priv, &desc_data);
+
+ skb_put(desc_data.skb, desc_data.len);
+ desc_data.skb->ip_summed = CHECKSUM_NONE;
+ desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
+ desc_data.skb->dev = ndev;
+ netif_receive_skb(desc_data.skb);
+
+push_new_skb:
+ desc_data.len = skb_tailroom(new_skb);
+ desc_data.skb = new_skb;
+
+ spin_lock(&priv->lock);
+ mtk_star_ring_push_head_rx(ring, &desc_data);
+ spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
+{
+ int received, ret;
+
+ for (received = 0, ret = 0; received < budget && ret == 0; received++)
+ ret = mtk_star_receive_packet(priv);
+
+ mtk_star_dma_resume_rx(priv);
+
+ return received;
+}
+
+static int mtk_star_poll(struct napi_struct *napi, int budget)
+{
+ struct mtk_star_priv *priv;
+ int received = 0;
+
+ priv = container_of(napi, struct mtk_star_priv, napi);
+
+ /* Clean-up all TX descriptors. */
+ mtk_star_tx_complete_all(priv);
+ /* Receive up to $budget packets. */
+ received = mtk_star_process_rx(priv, budget);
+
+ if (received < budget) {
+ napi_complete_done(napi, received);
+ mtk_star_intr_enable_rx(priv);
+ }
+
+ return received;
+}
+
+static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
+{
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
+ MTK_STAR_BIT_PHY_CTRL0_RWOK);
+}
+
+static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
+ val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
+ 10, MTK_STAR_WAIT_TIMEOUT);
+}
+
+static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
+{
+ struct mtk_star_priv *priv = mii->priv;
+ unsigned int val, data;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ mtk_star_mdio_rwok_clear(priv);
+
+ val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
+ val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
+ val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
+
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
+
+ ret = mtk_star_mdio_rwok_wait(priv);
+ if (ret)
+ return ret;
+
+ regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
+
+ data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
+ data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
+
+ return data;
+}
+
+static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
+ int regnum, u16 data)
+{
+ struct mtk_star_priv *priv = mii->priv;
+ unsigned int val;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ mtk_star_mdio_rwok_clear(priv);
+
+ val = data;
+ val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
+ val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
+ regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
+ regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
+ val |= regnum;
+ val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
+
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
+
+ return mtk_star_mdio_rwok_wait(priv);
+}
+
+static int mtk_star_mdio_init(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ struct device_node *of_node, *mdio_node;
+ int ret;
+
+ of_node = dev->of_node;
+
+ mdio_node = of_get_child_by_name(of_node, "mdio");
+ if (!mdio_node)
+ return -ENODEV;
+
+ if (!of_device_is_available(mdio_node)) {
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ priv->mii = devm_mdiobus_alloc(dev);
+ if (!priv->mii) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+ priv->mii->name = "mtk-mac-mdio";
+ priv->mii->parent = dev;
+ priv->mii->read = mtk_star_mdio_read;
+ priv->mii->write = mtk_star_mdio_write;
+ priv->mii->priv = priv;
+
+ ret = of_mdiobus_register(priv->mii, mdio_node);
+
+out_put_node:
+ of_node_put(mdio_node);
+ return ret;
+}
+
+static __maybe_unused int mtk_star_suspend(struct device *dev)
+{
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+
+ ndev = dev_get_drvdata(dev);
+ priv = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ mtk_star_disable(ndev);
+
+ clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_star_resume(struct device *dev)
+{
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+ int ret;
+
+ ndev = dev_get_drvdata(dev);
+ priv = netdev_priv(ndev);
+
+ ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ if (netif_running(ndev)) {
+ ret = mtk_star_enable(ndev);
+ if (ret)
+ clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+ }
+
+ return ret;
+}
+
+static void mtk_star_clk_disable_unprepare(void *data)
+{
+ struct mtk_star_priv *priv = data;
+
+ clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+}
+
+static void mtk_star_mdiobus_unregister(void *data)
+{
+ struct mtk_star_priv *priv = data;
+
+ mdiobus_unregister(priv->mii);
+}
+
+static int mtk_star_probe(struct platform_device *pdev)
+{
+ struct device_node *of_node;
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+ struct device *dev;
+ void __iomem *base;
+ int ret, i;
+
+ dev = &pdev->dev;
+ of_node = dev->of_node;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ SET_NETDEV_DEV(ndev, dev);
+ platform_set_drvdata(pdev, ndev);
+
+ ndev->min_mtu = ETH_ZLEN;
+ ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
+
+ spin_lock_init(&priv->lock);
+ INIT_WORK(&priv->stats_work, mtk_star_update_stats_work);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* We won't be checking the return values of regmap read & write
+ * functions. They can only fail for mmio if there's a clock attached
+ * to regmap which is not the case here.
+ */
+ priv->regs = devm_regmap_init_mmio(dev, base,
+ &mtk_star_regmap_config);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
+ "mediatek,pericfg");
+ if (IS_ERR(priv->pericfg)) {
+ dev_err(dev, "Failed to lookup the PERICFG syscon\n");
+ return PTR_ERR(priv->pericfg);
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0)
+ return ndev->irq;
+
+ for (i = 0; i < MTK_STAR_NCLKS; i++)
+ priv->clks[i].id = mtk_star_clk_names[i];
+ ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev,
+ mtk_star_clk_disable_unprepare, priv);
+ if (ret)
+ return ret;
+
+ ret = of_get_phy_mode(of_node, &priv->phy_intf);
+ if (ret) {
+ return ret;
+ } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
+ dev_err(dev, "unsupported phy mode: %s\n",
+ phy_modes(priv->phy_intf));
+ return -EINVAL;
+ }
+
+ priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ dev_err(dev, "failed to retrieve the phy handle from device tree\n");
+ return -ENODEV;
+ }
+
+ mtk_star_set_mode_rmii(priv);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "unsupported DMA mask\n");
+ return ret;
+ }
+
+ priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
+ &priv->dma_addr,
+ GFP_KERNEL | GFP_DMA);
+ if (!priv->ring_base)
+ return -ENOMEM;
+
+ mtk_star_nic_disable_pd(priv);
+ mtk_star_init_config(priv);
+
+ ret = mtk_star_mdio_init(ndev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, mtk_star_mdiobus_unregister, priv);
+ if (ret)
+ return ret;
+
+ ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
+ if (ret || !is_valid_ether_addr(ndev->dev_addr))
+ eth_hw_addr_random(ndev);
+
+ ndev->netdev_ops = &mtk_star_netdev_ops;
+ ndev->ethtool_ops = &mtk_star_ethtool_ops;
+
+ netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
+
+ return devm_register_netdev(dev, ndev);
+}
+
+static const struct of_device_id mtk_star_of_match[] = {
+ { .compatible = "mediatek,mt8516-eth", },
+ { .compatible = "mediatek,mt8518-eth", },
+ { .compatible = "mediatek,mt8175-eth", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mtk_star_of_match);
+
+static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
+ mtk_star_suspend, mtk_star_resume);
+
+static struct platform_driver mtk_star_driver = {
+ .driver = {
+ .name = MTK_STAR_DRVNAME,
+ .pm = &mtk_star_pm_ops,
+ .of_match_table = of_match_ptr(mtk_star_of_match),
+ },
+ .probe = mtk_star_probe,
+};
+module_platform_driver(mtk_star_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index 73eae80e1cb7..ac5468b77488 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -197,6 +197,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
err = devlink_region_snapshot_id_get(devlink, &id);
if (err) {
mlx4_err(dev, "crdump: devlink get snapshot id err %d\n", err);
+ iounmap(cr_space);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 8a5ea2543670..b816154bc79a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1235,7 +1235,6 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
u32 i, rss_rings;
- int err = 0;
rss_rings = priv->prof->rss_rings ?: n;
rss_rings = rounddown_pow_of_two(rss_rings);
@@ -1249,7 +1248,7 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
if (hfunc)
*hfunc = priv->rss_hash_fn;
- return err;
+ return 0;
}
static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
@@ -1393,7 +1392,6 @@ static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
struct mlx4_spec_list *spec_l2,
unsigned char *mac)
{
- int err = 0;
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
@@ -1408,7 +1406,7 @@ static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
list_add_tail(&spec_l2->list, rule_list_h);
- return err;
+ return 0;
}
static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 43dcbd8214c6..5bd3cd37d50f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -51,7 +51,8 @@
#include "en_port.h"
#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
- XDP_PACKET_HEADROOM))
+ XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
int mlx4_en_setup_tc(struct net_device *dev, u8 up)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index db3552f2d087..8a10285b0e10 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -683,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
xdp.rxq = &ring->xdp_rxq;
+ xdp.frame_sz = priv->frag_info[0].frag_stride;
doorbell_pending = 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -946,7 +947,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) {
clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
- budget);
+ budget) < budget;
xdp_tx_cq->xdp_busy = !clean_complete;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a30edb436f4a..9dff7b086c9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -392,8 +392,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq, int napi_budget)
+int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -415,7 +415,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 ring_cons;
if (unlikely(!priv->port_up))
- return true;
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -492,7 +492,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP)
- return done < budget;
+ return done;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
@@ -504,7 +504,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
ring->wake_queue++;
}
- return done < budget;
+ return done;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -524,14 +524,14 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- bool clean_complete;
+ int work_done;
- clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
- if (!clean_complete)
+ work_done = mlx4_en_process_tx_cq(dev, cq, budget);
+ if (work_done >= budget)
return budget;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
+ if (napi_complete_done(napi, work_done))
+ mlx4_en_arm_cq(priv, cq);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 6e501af0e532..f6ff9620a137 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2734,7 +2734,7 @@ void mlx4_opreq_action(struct work_struct *work)
if (err) {
mlx4_err(dev, "Failed to retrieve required operation: %d\n",
err);
- return;
+ goto out;
}
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index c72c4e1ea383..3d9aa7da95e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2345,8 +2345,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto out_free;
}
- dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
-
if (enable_4k_uar || !dev->persist->num_vfs) {
init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 630f15977f09..9f5603612960 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -737,8 +737,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
-bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq, int napi_budget);
+int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget);
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 1a11bc0e1612..d2986f1f2db0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -966,189 +966,6 @@ void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
-static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
- int npages, u64 iova)
-{
- int i, page_mask;
-
- if (npages > fmr->max_pages)
- return -EINVAL;
-
- page_mask = (1 << fmr->page_shift) - 1;
-
- /* We are getting page lists, so va must be page aligned. */
- if (iova & page_mask)
- return -EINVAL;
-
- /* Trust the user not to pass misaligned data in page_list */
- if (0)
- for (i = 0; i < npages; ++i) {
- if (page_list[i] & ~page_mask)
- return -EINVAL;
- }
-
- if (fmr->maps >= fmr->max_maps)
- return -EINVAL;
-
- return 0;
-}
-
-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
- int npages, u64 iova, u32 *lkey, u32 *rkey)
-{
- u32 key;
- int i, err;
-
- err = mlx4_check_fmr(fmr, page_list, npages, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = key_to_hw_index(fmr->mr.key);
- key += dev->caps.num_mpts;
- *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
-
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
-
- /* Make sure MPT status is visible before writing MTT entries */
- wmb();
-
- dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
- npages * sizeof(u64), DMA_TO_DEVICE);
-
- for (i = 0; i < npages; ++i)
- fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
-
- dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
- npages * sizeof(u64), DMA_TO_DEVICE);
-
- fmr->mpt->key = cpu_to_be32(key);
- fmr->mpt->lkey = cpu_to_be32(key);
- fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
- fmr->mpt->start = cpu_to_be64(iova);
-
- /* Make MTT entries are visible before setting MPT status */
- wmb();
-
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
-
- /* Make sure MPT status is visible before consumer can use FMR */
- wmb();
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
-
-int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
- int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- int err = -ENOMEM;
-
- if (max_maps > dev->caps.max_fmr_maps)
- return -EINVAL;
-
- if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
- return -EINVAL;
-
- /* All MTTs must fit in the same page */
- if (max_pages * sizeof(*fmr->mtts) > PAGE_SIZE)
- return -EINVAL;
-
- fmr->page_shift = page_shift;
- fmr->max_pages = max_pages;
- fmr->max_maps = max_maps;
- fmr->maps = 0;
-
- err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
- page_shift, &fmr->mr);
- if (err)
- return err;
-
- fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
- fmr->mr.mtt.offset,
- &fmr->dma_handle);
-
- if (!fmr->mtts) {
- err = -ENOMEM;
- goto err_free;
- }
-
- return 0;
-
-err_free:
- (void) mlx4_mr_free(dev, &fmr->mr);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
-
-int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- int err;
-
- err = mlx4_mr_enable(dev, &fmr->mr);
- if (err)
- return err;
-
- fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
- key_to_hw_index(fmr->mr.key), NULL);
- if (!fmr->mpt)
- return -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
-
-void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
- u32 *lkey, u32 *rkey)
-{
- if (!fmr->maps)
- return;
-
- /* To unmap: it is sufficient to take back ownership from HW */
- *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
-
- /* Make sure MPT status is visible */
- wmb();
-
- fmr->maps = 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
-
-int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
-{
- int ret;
-
- if (fmr->maps)
- return -EBUSY;
- if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
- /* In case of FMR was enabled and unmapped
- * make sure to give ownership of MPT back to HW
- * so HW2SW_MPT command will success.
- */
- *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
- /* Make sure MPT status is visible before changing MPT fields */
- wmb();
- fmr->mpt->length = 0;
- fmr->mpt->start = 0;
- /* Make sure MPT data is visible after changing MPT status */
- wmb();
- *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
- /* make sure MPT status is visible */
- wmb();
- }
-
- ret = mlx4_mr_free(dev, &fmr->mr);
- if (ret)
- return ret;
- fmr->mr.enabled = MLX4_MPT_DISABLED;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_free);
-
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 7d69a3061f17..b6ffd1622cfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -78,9 +78,24 @@ config MLX5_ESWITCH
Legacy SRIOV mode (L2 mac vlan steering based).
Switchdev mode (eswitch offloads).
+config MLX5_CLS_ACT
+ bool "MLX5 TC classifier action support"
+ depends on MLX5_ESWITCH && NET_CLS_ACT
+ default y
+ help
+ mlx5 ConnectX offloads support for TC classifier action (NET_CLS_ACT),
+ works in both native NIC mode and Switchdev SRIOV mode.
+ Actions get attached to a Hardware offloaded classifiers and are
+ invoked after a successful classification. Actions are used to
+ overwrite the classification result, instantly drop or redirect and/or
+ reformat packets in wire speeds without involving the host cpu.
+
+ If set to N, TC offloads in both NIC and switchdev modes will be disabled.
+ If unsure, set to Y
+
config MLX5_TC_CT
bool "MLX5 TC connection tracking offload support"
- depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
default y
help
Say Y here if you want to support offloading connection tracking rules
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 6d32915000fc..b61e47bc16e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
# mlx5 core basic
#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
- health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
+ health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
@@ -33,17 +33,24 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
- lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
- en/tc_tun_geneve.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
+ en_rep.o en/rep/bond.o
+mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
+ en/mapping.o esw/chains.o en/tc_tun.o \
+ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
+ en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
#
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o esw/chains.o
+ ecpf.o rdma.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
+ esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
+ esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
+
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
index c13260467750..82b185121edb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
@@ -5,7 +5,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include "en.h"
static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
index eddc34e4a762..8a4985d8cbfe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
@@ -58,12 +58,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
+ u32 *sa_handle)
{
- return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr,
- spi, is_ipv6);
+ __be32 saddr[4] = {}, daddr[4] = {};
+
+ if (!xfrm->attrs.is_ipv6) {
+ saddr[3] = xfrm->attrs.saddr.a4;
+ daddr[3] = xfrm->attrs.daddr.a4;
+ } else {
+ memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
+ memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
+ }
+
+ return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr,
+ daddr, xfrm->attrs.spi,
+ xfrm->attrs.is_ipv6, sa_handle);
}
void mlx5_accel_esp_free_hw_context(void *context)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
index 530e428d46ab..e89747674712 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
@@ -48,9 +48,7 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6);
+ u32 *sa_handle);
void mlx5_accel_esp_free_hw_context(void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
@@ -64,9 +62,7 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
static inline void *
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
+ u32 *sa_handle)
{
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
index cab708af3422..cbf3d76c05a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -56,8 +56,8 @@ void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
}
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
- u64 rcd_sn)
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn)
{
return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index e09bc3858d57..aefea467f7b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -109,8 +109,8 @@ int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
bool direction_sx);
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx);
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
- u64 rcd_sn);
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
@@ -125,8 +125,8 @@ mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
bool direction_sx) { return -ENOTSUPP; }
static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx) { }
-static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
- u32 seq, u64 rcd_sn) { return 0; }
+static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return mlx5_accel_is_ktls_device(mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index cede5bdfd598..1d91a0d0ab1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -848,6 +848,14 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
struct mlx5_cmd_msg *msg);
+static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
+{
+ if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
+ return true;
+
+ return cmd->allowed_opcode == opcode;
+}
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -861,6 +869,7 @@ static void cmd_work_handler(struct work_struct *work)
int alloc_ret;
int cmd_mode;
+ complete(&ent->handling);
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
@@ -913,7 +922,9 @@ static void cmd_work_handler(struct work_struct *work)
/* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) ||
- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+ cmd->state != MLX5_CMDIF_STATE_UP ||
+ !opcode_allowed(&dev->cmd, ent->op)) {
u8 status = 0;
u32 drv_synd;
@@ -978,6 +989,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd *cmd = &dev->cmd;
int err;
+ if (!wait_for_completion_timeout(&ent->handling, timeout) &&
+ cancel_work_sync(&ent->work)) {
+ ent->ret = -ECANCELED;
+ goto out_err;
+ }
if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
@@ -985,12 +1001,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
+out_err:
err = ent->ret;
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
+ } else if (err == -ECANCELED) {
+ mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1026,6 +1047,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ent->token = token;
ent->polling = force_polling;
+ init_completion(&ent->handling);
if (!callback)
init_completion(&ent->done);
@@ -1045,10 +1067,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
err = wait_func(dev, ent);
if (err == -ETIMEDOUT)
goto out;
+ if (err == -ECANCELED)
+ goto out_free;
ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode);
- if (op < ARRAY_SIZE(cmd->stats)) {
+ if (op < MLX5_CMD_OP_MAX) {
stats = &cmd->stats[op];
spin_lock_irq(&stats->lock);
stats->sum += ds;
@@ -1391,6 +1415,22 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
mlx5_cmdif_debugfs_init(dev);
}
+void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ int i;
+
+ for (i = 0; i < cmd->max_reg_cmds; i++)
+ down(&cmd->sem);
+ down(&cmd->pages_sem);
+
+ cmd->allowed_opcode = opcode;
+
+ up(&cmd->pages_sem);
+ for (i = 0; i < cmd->max_reg_cmds; i++)
+ up(&cmd->sem);
+}
+
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
@@ -1511,7 +1551,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
if (ent->callback) {
ds = ent->ts2 - ent->ts1;
- if (ent->op < ARRAY_SIZE(cmd->stats)) {
+ if (ent->op < MLX5_CMD_OP_MAX) {
stats = &cmd->stats[ent->op];
spin_lock_irqsave(&stats->lock, flags);
stats->sum += ds;
@@ -1667,12 +1707,14 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int err;
u8 status = 0;
u32 drv_synd;
+ u16 opcode;
u8 token;
+ opcode = MLX5_GET(mbox_in, in, opcode);
if (pci_channel_offline(dev->pdev) ||
- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
-
+ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+ dev->cmd.state != MLX5_CMDIF_STATE_UP ||
+ !opcode_allowed(&dev->cmd, opcode)) {
err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
MLX5_SET(mbox_out, out, status, status);
MLX5_SET(mbox_out, out, syndrome, drv_synd);
@@ -1894,6 +1936,11 @@ static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
cmd->alloc_dma);
}
+static u16 cmdif_rev(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+}
+
int mlx5_cmd_init(struct mlx5_core_dev *dev)
{
int size = sizeof(struct mlx5_cmd_prot_block);
@@ -1913,10 +1960,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL;
}
- cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
- if (!cmd->pool)
+ cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL);
+ if (!cmd->stats)
return -ENOMEM;
+ cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
+ if (!cmd->pool) {
+ err = -ENOMEM;
+ goto dma_pool_err;
+ }
+
err = alloc_cmd_page(dev, cmd);
if (err)
goto err_free_pool;
@@ -1937,6 +1990,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_free_page;
}
+ cmd->state = MLX5_CMDIF_STATE_DOWN;
cmd->checksum_disabled = 1;
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
@@ -1951,7 +2005,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
spin_lock_init(&cmd->alloc_lock);
spin_lock_init(&cmd->token_lock);
- for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
+ for (i = 0; i < MLX5_CMD_OP_MAX; i++)
spin_lock_init(&cmd->stats[i].lock);
sema_init(&cmd->sem, cmd->max_reg_cmds);
@@ -1974,6 +2028,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
cmd->mode = CMD_MODE_POLLING;
+ cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
create_msg_cache(dev);
@@ -1997,7 +2052,8 @@ err_free_page:
err_free_pool:
dma_pool_destroy(cmd->pool);
-
+dma_pool_err:
+ kvfree(cmd->stats);
return err;
}
EXPORT_SYMBOL(mlx5_cmd_init);
@@ -2011,5 +2067,13 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
+ kvfree(cmd->stats);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);
+
+void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
+ enum mlx5_cmdif_state cmdif_state)
+{
+ dev->cmd.state = cmdif_state;
+}
+EXPORT_SYMBOL(mlx5_cmd_set_state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 818edc63e428..8379b24cb838 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include <rdma/ib_verbs.h>
#include <linux/mlx5/cq.h>
#include "mlx5_core.h"
@@ -91,8 +90,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
- u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
- u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
+ u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
struct mlx5_eq_comp *eq;
int err;
@@ -142,20 +140,17 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
err_cq_add:
mlx5_eq_del_cq(&eq->core, cq);
err_cmd:
- memset(din, 0, sizeof(din));
- memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, din, uid, cq->uid);
- mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
+ mlx5_cmd_exec_in(dev, destroy_cq, din);
return err;
}
EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
- u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
int err;
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
@@ -164,7 +159,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, destroy_cq, in);
if (err)
return err;
@@ -179,20 +174,20 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *out, int outlen)
+ u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {};
MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
MLX5_SET(query_cq_in, in, cqn, cq->cqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_inout(dev, query_cq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {};
MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
MLX5_SET(modify_cq_in, in, uid, cq->uid);
@@ -205,7 +200,7 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16 cq_period,
u16 cq_max_count)
{
- u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
void *cqc;
MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 04854e5fbcd7..07c8d9811bc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -101,15 +101,15 @@ void mlx5_unregister_debugfs(void)
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- atomic_set(&dev->num_qps, 0);
-
dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
}
+EXPORT_SYMBOL(mlx5_qp_debugfs_init);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
debugfs_remove_recursive(dev->priv.qp_debugfs);
}
+EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
@@ -171,7 +171,7 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
cmd = &dev->priv.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
- for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
+ for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
if (strcmp(namep, "unknown command opcode")) {
@@ -203,41 +203,41 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
int index, int *is_str)
{
int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
- struct mlx5_qp_context *ctx;
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
u64 param = 0;
u32 *out;
+ int state;
+ u32 *qpc;
int err;
- int no_sq;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
- return param;
+ return 0;
- err = mlx5_core_qp_query(dev, qp, out, outlen);
- if (err) {
- mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
+ MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
+ MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+ err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
+ if (err)
goto out;
- }
*is_str = 0;
- /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
- ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
-
+ qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
switch (index) {
case QP_PID:
param = qp->pid;
break;
case QP_STATE:
- param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+ state = MLX5_GET(qpc, qpc, state);
+ param = (unsigned long)mlx5_qp_state_str(state);
*is_str = 1;
break;
case QP_XPORT:
- param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+ param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
*is_str = 1;
break;
case QP_MTU:
- switch (ctx->mtu_msgmax >> 5) {
+ switch (MLX5_GET(qpc, qpc, mtu)) {
case IB_MTU_256:
param = 256;
break;
@@ -258,46 +258,32 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
}
break;
case QP_N_RECV:
- param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
+ param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
break;
case QP_RECV_SZ:
- param = 1 << ((ctx->rq_size_stride & 7) + 4);
+ param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
break;
case QP_N_SEND:
- no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
- if (!no_sq)
- param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
- else
- param = 0;
+ if (!MLX5_GET(qpc, qpc, no_sq))
+ param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
break;
case QP_LOG_PG_SZ:
- param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
- param += 12;
+ param = MLX5_GET(qpc, qpc, log_page_size) + 12;
break;
case QP_RQPN:
- param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
+ param = MLX5_GET(qpc, qpc, remote_qpn);
break;
}
-
out:
kfree(out);
return param;
}
-static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
-
- MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
- MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
+ u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
u64 param = 0;
void *ctx;
u32 *out;
@@ -307,7 +293,9 @@ static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
if (!out)
return param;
- err = mlx5_core_eq_query(dev, eq, out, outlen);
+ MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
+ MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
+ err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
if (err) {
mlx5_core_warn(dev, "failed to query eq\n");
goto out;
@@ -344,7 +332,7 @@ static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
if (!out)
return param;
- err = mlx5_core_query_cq(dev, cq, out, outlen);
+ err = mlx5_core_query_cq(dev, cq, out);
if (err) {
mlx5_core_warn(dev, "failed to query cq\n");
goto out;
@@ -461,6 +449,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
return err;
}
+EXPORT_SYMBOL(mlx5_debug_qp_add);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
{
@@ -470,6 +459,7 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
if (qp->dbg)
rem_res_tree(qp->dbg);
}
+EXPORT_SYMBOL(mlx5_debug_qp_remove);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 8ecac81a385d..a700f3c86899 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -76,58 +76,59 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,
.v = MLX5_GET(fte_match_set_lyr_2_4, value, dmac_47_16) << 16 |
MLX5_GET(fte_match_set_lyr_2_4, value, dmac_15_0)};
MASK_VAL_L2(u16, ethertype, ethertype);
+ MASK_VAL_L2(u8, ip_version, ip_version);
PRINT_MASKED_VALP(smac, u8 *, p, "%pM");
PRINT_MASKED_VALP(dmac, u8 *, p, "%pM");
PRINT_MASKED_VAL(ethertype, p, "%04x");
- if (ethertype.m == 0xffff) {
- if (ethertype.v == ETH_P_IP) {
+ if ((ethertype.m == 0xffff && ethertype.v == ETH_P_IP) ||
+ (ip_version.m == 0xf && ip_version.v == 4)) {
#define MASK_VAL_L2_BE(type, name, fld) \
MASK_VAL_BE(type, fte_match_set_lyr_2_4, name, mask, value, fld)
- MASK_VAL_L2_BE(u32, src_ipv4,
- src_ipv4_src_ipv6.ipv4_layout.ipv4);
- MASK_VAL_L2_BE(u32, dst_ipv4,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ MASK_VAL_L2_BE(u32, src_ipv4,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MASK_VAL_L2_BE(u32, dst_ipv4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
- PRINT_MASKED_VALP(src_ipv4, typeof(&src_ipv4.v), p,
- "%pI4");
- PRINT_MASKED_VALP(dst_ipv4, typeof(&dst_ipv4.v), p,
- "%pI4");
- } else if (ethertype.v == ETH_P_IPV6) {
- static const struct in6_addr full_ones = {
- .in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
- __constant_htonl(0xffffffff),
- __constant_htonl(0xffffffff),
- __constant_htonl(0xffffffff)},
- };
- DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
- DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
+ PRINT_MASKED_VALP(src_ipv4, typeof(&src_ipv4.v), p,
+ "%pI4");
+ PRINT_MASKED_VALP(dst_ipv4, typeof(&dst_ipv4.v), p,
+ "%pI4");
+ } else if ((ethertype.m == 0xffff && ethertype.v == ETH_P_IPV6) ||
+ (ip_version.m == 0xf && ip_version.v == 6)) {
+ static const struct in6_addr full_ones = {
+ .in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff)},
+ };
+ DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
+ DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
- memcpy(src_ipv6.m.in6_u.u6_addr8,
- MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(src_ipv6.m));
- memcpy(dst_ipv6.m.in6_u.u6_addr8,
- MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(dst_ipv6.m));
- memcpy(src_ipv6.v.in6_u.u6_addr8,
- MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(src_ipv6.v));
- memcpy(dst_ipv6.v.in6_u.u6_addr8,
- MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(dst_ipv6.v));
+ memcpy(src_ipv6.m.in6_u.u6_addr8,
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ sizeof(src_ipv6.m));
+ memcpy(dst_ipv6.m.in6_u.u6_addr8,
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ sizeof(dst_ipv6.m));
+ memcpy(src_ipv6.v.in6_u.u6_addr8,
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ sizeof(src_ipv6.v));
+ memcpy(dst_ipv6.v.in6_u.u6_addr8,
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ sizeof(dst_ipv6.v));
- if (!memcmp(&src_ipv6.m, &full_ones, sizeof(full_ones)))
- trace_seq_printf(p, "src_ipv6=%pI6 ",
- &src_ipv6.v);
- if (!memcmp(&dst_ipv6.m, &full_ones, sizeof(full_ones)))
- trace_seq_printf(p, "dst_ipv6=%pI6 ",
- &dst_ipv6.v);
- }
+ if (!memcmp(&src_ipv6.m, &full_ones, sizeof(full_ones)))
+ trace_seq_printf(p, "src_ipv6=%pI6 ",
+ &src_ipv6.v);
+ if (!memcmp(&dst_ipv6.m, &full_ones, sizeof(full_ones)))
+ trace_seq_printf(p, "dst_ipv6=%pI6 ",
+ &dst_ipv6.v);
}
#define PRINT_MASKED_VAL_L2(type, name, fld, p, format) {\
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 5ce6ebbc7f10..a7551274be58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -684,7 +684,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
while (block_timestamp > tracer->last_timestamp) {
- /* Check block override if its not the first block */
+ /* Check block override if it's not the first block */
if (!tracer->last_timestamp) {
u64 *ts_event;
/* To avoid block override be the HW in case of buffer
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index d2228e37450f..a894ea98c95a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -8,33 +8,13 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
}
-static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev)
-{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
-
- MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
- MLX5_SET(enable_hca_in, in, function_id, 0);
- MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
- return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-}
-
-static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev)
-{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
-
- MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
- MLX5_SET(disable_hca_in, in, function_id, 0);
- MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
{
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
int err;
- err = mlx5_peer_pf_enable_hca(dev);
+ MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+ err = mlx5_cmd_exec_in(dev, enable_hca, in);
if (err)
mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
err);
@@ -44,9 +24,11 @@ static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
{
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
int err;
- err = mlx5_peer_pf_disable_hca(dev);
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ err = mlx5_cmd_exec_in(dev, disable_hca, in);
if (err) {
mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 23701c0e36ec..842db20493df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -36,7 +36,6 @@
#include <linux/etherdevice.h>
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/crash_dump.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/qp.h>
@@ -53,6 +52,7 @@
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
+#include "en/dcbnl.h"
#include "en/fs.h"
#include "lib/hv_vhca.h"
@@ -69,8 +69,6 @@ struct page_pool;
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
-#define MLX5E_MAX_PRIORITY 8
-#define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8
#define MLX5_RX_HEADROOM NET_SKB_PAD
@@ -243,10 +241,6 @@ enum mlx5e_priv_flag {
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#endif
-
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -271,42 +265,6 @@ struct mlx5e_params {
int hard_mtu;
};
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-struct mlx5e_cee_config {
- /* bw pct for priority group */
- u8 pg_bw_pct[CEE_DCBX_MAX_PGS];
- u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO];
- bool pfc_setting[CEE_DCBX_MAX_PRIO];
- bool pfc_enable;
-};
-
-enum {
- MLX5_DCB_CHG_RESET,
- MLX5_DCB_NO_CHG,
- MLX5_DCB_CHG_NO_RESET,
-};
-
-struct mlx5e_dcbx {
- enum mlx5_dcbx_oper_mode mode;
- struct mlx5e_cee_config cee_cfg; /* pending configuration */
- u8 dscp_app_cnt;
-
- /* The only setting that cannot be read from FW */
- u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
- u8 cap;
-
- /* Buffer configuration */
- bool manual_buffer;
- u32 cable_len;
- u32 xoff;
-};
-
-struct mlx5e_dcbx_dp {
- u8 dscp2prio[MLX5E_MAX_DSCP];
- u8 trust_state;
-};
-#endif
-
enum {
MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_RECOVERING,
@@ -339,16 +297,6 @@ struct mlx5e_cq_decomp {
u16 wqe_counter;
} ____cacheline_aligned_in_smp;
-struct mlx5e_tx_wqe_info {
- struct sk_buff *skb;
- u32 num_bytes;
- u8 num_wqebbs;
- u8 num_dma;
-#ifdef CONFIG_MLX5_EN_TLS
- struct page *resync_dump_frag_page;
-#endif
-};
-
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
@@ -370,18 +318,6 @@ enum {
MLX5E_SQ_STATE_PENDING_XSK_TX,
};
-struct mlx5e_sq_wqe_info {
- u8 opcode;
- u8 num_wqebbs;
-
- /* Auxiliary data for different opcodes. */
- union {
- struct {
- struct mlx5e_rq *rq;
- } umr;
- };
-};
-
struct mlx5e_txqsq {
/* data path */
@@ -429,10 +365,7 @@ struct mlx5e_dma_info {
dma_addr_t addr;
union {
struct page *page;
- struct {
- u64 handle;
- void *data;
- } xsk;
+ struct xdp_buff *xsk;
};
};
@@ -484,11 +417,6 @@ struct mlx5e_xdp_info_fifo {
u32 mask;
};
-struct mlx5e_xdp_wqe_info {
- u8 num_wqebbs;
- u8 num_pkts;
-};
-
struct mlx5e_xdp_mpwqe {
/* Current MPWQE session */
struct mlx5e_tx_wqe *wqe;
@@ -552,7 +480,7 @@ struct mlx5e_icosq {
/* write@xmit, read@completion */
struct {
- struct mlx5e_sq_wqe_info *ico_wqe;
+ struct mlx5e_icosq_wqe_info *wqe_info;
} db;
/* read only */
@@ -650,8 +578,8 @@ struct mlx5e_rq {
} mpwqe;
};
struct {
- u16 umem_headroom;
u16 headroom;
+ u32 frame0_sz;
u8 map_dir; /* dma map direction */
} buff;
@@ -682,7 +610,6 @@ struct mlx5e_rq {
struct page_pool *page_pool;
/* AF_XDP zero-copy */
- struct zero_copy_allocator zca;
struct xdp_umem *umem;
struct work_struct recover_work;
@@ -919,8 +846,8 @@ void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
-netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
@@ -1013,7 +940,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
const struct mlx5e_tirc_config *ttconfig,
void *tirc, bool inner);
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen);
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
struct mlx5e_xsk_param;
@@ -1068,10 +995,12 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
- u8 cq_period_mode);
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
- u8 cq_period_mode);
+
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
@@ -1095,21 +1024,15 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
-int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
-void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
-void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
-void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
-#endif
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir, u32 *in, int inlen);
+int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
+ u32 *in);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
+ bool enable_mc_lb);
/* common netdev helpers */
void mlx5e_create_q_counters(struct mlx5e_priv *priv);
@@ -1121,7 +1044,7 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
new file mode 100644
index 000000000000..7be6b2d36b60
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __MLX5E_DCBNL_H__
+#define __MLX5E_DCBNL_H__
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+
+#define MLX5E_MAX_PRIORITY (8)
+
+struct mlx5e_cee_config {
+ /* bw pct for priority group */
+ u8 pg_bw_pct[CEE_DCBX_MAX_PGS];
+ u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO];
+ bool pfc_setting[CEE_DCBX_MAX_PRIO];
+ bool pfc_enable;
+};
+
+struct mlx5e_dcbx {
+ enum mlx5_dcbx_oper_mode mode;
+ struct mlx5e_cee_config cee_cfg; /* pending configuration */
+ u8 dscp_app_cnt;
+
+ /* The only setting that cannot be read from FW */
+ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
+ u8 cap;
+
+ /* Buffer configuration */
+ bool manual_buffer;
+ u32 cable_len;
+ u32 xoff;
+};
+
+#define MLX5E_MAX_DSCP (64)
+
+struct mlx5e_dcbx_dp {
+ u8 dscp2prio[MLX5E_MAX_DSCP];
+ u8 trust_state;
+};
+
+void mlx5e_dcbnl_build_netdev(struct net_device *netdev);
+void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev);
+void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
+#else
+static inline void mlx5e_dcbnl_build_netdev(struct net_device *netdev) {}
+static inline void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) {}
+static inline void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) {}
+static inline void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) {}
+static inline void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) {}
+#endif
+
+#endif /* __MLX5E_DCBNL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 3a199a03d929..7283443868f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -43,7 +43,7 @@ int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
void *cqc;
int err;
- err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out));
+ err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 7cd5b02e0f10..8fe8b4d6ad1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -38,12 +38,11 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
{
- u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
- u32 out[MLX5_ST_SZ_DW(arm_monitor_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
MLX5_SET(arm_monitor_counter_in, in, opcode,
MLX5_CMD_OP_ARM_MONITOR_COUNTER);
- mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(priv->mdev, arm_monitor_counter, in);
}
static void mlx5e_monitor_counters_work(struct work_struct *work)
@@ -66,19 +65,6 @@ static int mlx5e_monitor_event_handler(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void mlx5e_monitor_counter_start(struct mlx5e_priv *priv)
-{
- MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
- MONITOR_COUNTER);
- mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
-}
-
-static void mlx5e_monitor_counter_stop(struct mlx5e_priv *priv)
-{
- mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
- cancel_work_sync(&priv->monitor_counters_work);
-}
-
static int fill_monitor_counter_ppcnt_set1(int cnt, u32 *in)
{
enum mlx5_monitor_counter_ppcnt ppcnt_cnt;
@@ -118,8 +104,7 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
- u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
int q_counter = priv->q_counter;
int cnt = 0;
@@ -136,34 +121,31 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(mdev, set_monitor_counter, in);
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
{
INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
- mlx5e_monitor_counter_start(priv);
+ MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
+ MONITOR_COUNTER);
+ mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
+
mlx5e_set_monitor_counter(priv);
mlx5e_monitor_counter_arm(priv);
queue_work(priv->wq, &priv->update_stats_work);
}
-static void mlx5e_monitor_counter_disable(struct mlx5e_priv *priv)
+/* check if mlx5e_monitor_counter_supported before calling this function*/
+void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
{
- u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- MLX5_SET(set_monitor_counter_in, in, num_of_counters, 0);
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
-}
-
-/* check if mlx5e_monitor_counter_supported before calling this function*/
-void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
-{
- mlx5e_monitor_counter_disable(priv);
- mlx5e_monitor_counter_stop(priv);
+ mlx5_cmd_exec_in(priv->mdev, set_monitor_counter, in);
+ mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
+ cancel_work_sync(&priv->monitor_counters_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index eb2e1f2138e4..38e4f19d69f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -12,15 +12,16 @@ static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- u16 headroom = NET_IP_ALIGN;
+ u16 headroom;
- if (mlx5e_rx_is_xdp(params, xsk)) {
+ if (xsk)
+ return xsk->headroom;
+
+ headroom = NET_IP_ALIGN;
+ if (mlx5e_rx_is_xdp(params, xsk))
headroom += XDP_PACKET_HEADROOM;
- if (xsk)
- headroom += xsk->headroom;
- } else {
+ else
headroom += MLX5_RX_HEADROOM;
- }
return headroom;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 2c4a670c8ffd..2a8950b3056f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -369,17 +369,19 @@ enum mlx5e_fec_supported_link_mode {
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
} while (0)
-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
- do { \
- u16 *__policy = &(policy); \
- bool _write = (write); \
- \
- if (_write && *__policy) \
- *__policy = find_first_bit((u_long *)__policy, \
- sizeof(u16) * BITS_PER_BYTE);\
- MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
- if (!_write && *__policy) \
- *__policy = 1 << *__policy; \
+#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
+ do { \
+ unsigned long policy_long; \
+ u16 *__policy = &(policy); \
+ bool _write = (write); \
+ \
+ policy_long = *__policy; \
+ if (_write && *__policy) \
+ *__policy = find_first_bit(&policy_long, \
+ sizeof(policy_long) * BITS_PER_BYTE);\
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
+ if (!_write && *__policy) \
+ *__policy = 1 << *__policy; \
} while (0)
/* get/set FEC admin field for a given speed */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
new file mode 100644
index 000000000000..bdb71332cbf2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <net/lag.h>
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "esw/acl/ofld.h"
+#include "en_rep.h"
+
+struct mlx5e_rep_bond {
+ struct notifier_block nb;
+ struct netdev_net_notifier nn;
+ struct list_head metadata_list;
+};
+
+struct mlx5e_rep_bond_slave_entry {
+ struct list_head list;
+ struct net_device *netdev;
+};
+
+struct mlx5e_rep_bond_metadata {
+ struct list_head list; /* link to global list of rep_bond_metadata */
+ struct mlx5_eswitch *esw;
+ /* private of uplink holding rep bond metadata list */
+ struct net_device *lag_dev;
+ u32 metadata_reg_c_0;
+
+ struct list_head slaves_list; /* slaves list */
+ int slaves;
+};
+
+static struct mlx5e_rep_bond_metadata *
+mlx5e_lookup_rep_bond_metadata(struct mlx5_rep_uplink_priv *uplink_priv,
+ const struct net_device *lag_dev)
+{
+ struct mlx5e_rep_bond_metadata *found = NULL;
+ struct mlx5e_rep_bond_metadata *cur;
+
+ list_for_each_entry(cur, &uplink_priv->bond->metadata_list, list) {
+ if (cur->lag_dev == lag_dev) {
+ found = cur;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static struct mlx5e_rep_bond_slave_entry *
+mlx5e_lookup_rep_bond_slave_entry(struct mlx5e_rep_bond_metadata *mdata,
+ const struct net_device *netdev)
+{
+ struct mlx5e_rep_bond_slave_entry *found = NULL;
+ struct mlx5e_rep_bond_slave_entry *cur;
+
+ list_for_each_entry(cur, &mdata->slaves_list, list) {
+ if (cur->netdev == netdev) {
+ found = cur;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static void mlx5e_rep_bond_metadata_release(struct mlx5e_rep_bond_metadata *mdata)
+{
+ netdev_dbg(mdata->lag_dev, "destroy rep_bond_metadata(%d)\n",
+ mdata->metadata_reg_c_0);
+ list_del(&mdata->list);
+ mlx5_esw_match_metadata_free(mdata->esw, mdata->metadata_reg_c_0);
+ WARN_ON(!list_empty(&mdata->slaves_list));
+ kfree(mdata);
+}
+
+/* This must be called under rtnl_lock */
+int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
+ struct net_device *lag_dev)
+{
+ struct mlx5e_rep_bond_slave_entry *s_entry;
+ struct mlx5e_rep_bond_metadata *mdata;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
+ int err;
+
+ ASSERT_RTNL();
+
+ rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ mdata = mlx5e_lookup_rep_bond_metadata(&rpriv->uplink_priv, lag_dev);
+ if (!mdata) {
+ /* First netdev becomes slave, no metadata presents the lag_dev. Create one */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
+
+ mdata->lag_dev = lag_dev;
+ mdata->esw = esw;
+ INIT_LIST_HEAD(&mdata->slaves_list);
+ mdata->metadata_reg_c_0 = mlx5_esw_match_metadata_alloc(esw);
+ if (!mdata->metadata_reg_c_0) {
+ kfree(mdata);
+ return -ENOSPC;
+ }
+ list_add(&mdata->list, &rpriv->uplink_priv.bond->metadata_list);
+
+ netdev_dbg(lag_dev, "create rep_bond_metadata(%d)\n",
+ mdata->metadata_reg_c_0);
+ }
+
+ s_entry = kzalloc(sizeof(*s_entry), GFP_KERNEL);
+ if (!s_entry) {
+ err = -ENOMEM;
+ goto entry_alloc_err;
+ }
+
+ s_entry->netdev = netdev;
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+
+ err = mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport,
+ mdata->metadata_reg_c_0);
+ if (err)
+ goto ingress_err;
+
+ mdata->slaves++;
+ list_add_tail(&s_entry->list, &mdata->slaves_list);
+ netdev_dbg(netdev, "enslave rep vport(%d) lag_dev(%s) metadata(0x%x)\n",
+ rpriv->rep->vport, lag_dev->name, mdata->metadata_reg_c_0);
+
+ return 0;
+
+ingress_err:
+ kfree(s_entry);
+entry_alloc_err:
+ if (!mdata->slaves)
+ mlx5e_rep_bond_metadata_release(mdata);
+ return err;
+}
+
+/* This must be called under rtnl_lock */
+void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
+ const struct net_device *netdev,
+ const struct net_device *lag_dev)
+{
+ struct mlx5e_rep_bond_slave_entry *s_entry;
+ struct mlx5e_rep_bond_metadata *mdata;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
+
+ ASSERT_RTNL();
+
+ rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ mdata = mlx5e_lookup_rep_bond_metadata(&rpriv->uplink_priv, lag_dev);
+ if (!mdata)
+ return;
+
+ s_entry = mlx5e_lookup_rep_bond_slave_entry(mdata, netdev);
+ if (!s_entry)
+ return;
+
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+
+ /* Reset bond_metadata to zero first then reset all ingress/egress
+ * acls and rx rules of unslave representor's vport
+ */
+ mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport, 0);
+ mlx5_esw_acl_egress_vport_unbond(esw, rpriv->rep->vport);
+ mlx5e_rep_bond_update(priv, false);
+
+ list_del(&s_entry->list);
+
+ netdev_dbg(netdev, "unslave rep vport(%d) lag_dev(%s) metadata(0x%x)\n",
+ rpriv->rep->vport, lag_dev->name, mdata->metadata_reg_c_0);
+
+ if (--mdata->slaves == 0)
+ mlx5e_rep_bond_metadata_release(mdata);
+ kfree(s_entry);
+}
+
+static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ /* A given netdev is not a representor or not a slave of LAG configuration */
+ if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
+ return false;
+
+ /* Egress acl forward to vport is supported only non-uplink representor */
+ return rpriv->rep->vport != MLX5_VPORT_UPLINK;
+}
+
+static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
+{
+ struct netdev_notifier_changelowerstate_info *info;
+ struct netdev_lag_lower_state_info *lag_info;
+ struct mlx5e_rep_priv *rpriv;
+ struct net_device *lag_dev;
+ struct mlx5e_priv *priv;
+ struct list_head *iter;
+ struct net_device *dev;
+ u16 acl_vport_num;
+ u16 fwd_vport_num;
+ int err;
+
+ if (!mlx5e_rep_is_lag_netdev(netdev))
+ return;
+
+ info = ptr;
+ lag_info = info->lower_state_info;
+ /* This is not an event of a representor becoming active slave */
+ if (!lag_info->tx_enabled)
+ return;
+
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+ fwd_vport_num = rpriv->rep->vport;
+ lag_dev = netdev_master_upper_dev_get(netdev);
+
+ netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
+ lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
+
+ /* Point everyone's egress acl to the vport of the active representor */
+ netdev_for_each_lower_dev(lag_dev, dev, iter) {
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ acl_vport_num = rpriv->rep->vport;
+ if (acl_vport_num != fwd_vport_num) {
+ /* Only single rx_rule for unique bond_metadata should be
+ * present, delete it if it's saved as passive vport's
+ * rx_rule with destination as passive vport's root_ft
+ */
+ mlx5e_rep_bond_update(priv, true);
+ err = mlx5_esw_acl_egress_vport_bond(priv->mdev->priv.eswitch,
+ fwd_vport_num,
+ acl_vport_num);
+ if (err)
+ netdev_warn(dev,
+ "configure slave vport(%d) egress fwd, err(%d)",
+ acl_vport_num, err);
+ }
+ }
+
+ /* Insert new rx_rule for unique bond_metadata, save it as active vport's
+ * rx_rule with new destination as active vport's root_ft
+ */
+ err = mlx5e_rep_bond_update(netdev_priv(netdev), false);
+ if (err)
+ netdev_warn(netdev, "configure active slave vport(%d) rx_rule, err(%d)",
+ fwd_vport_num, err);
+}
+
+static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct mlx5e_rep_priv *rpriv;
+ struct net_device *lag_dev;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_rep_is_lag_netdev(netdev))
+ return;
+
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+ lag_dev = info->upper_dev;
+
+ netdev_dbg(netdev, "%sslave vport(%d) lag(%s)\n",
+ info->linking ? "en" : "un", rpriv->rep->vport, lag_dev->name);
+
+ if (info->linking)
+ mlx5e_rep_bond_enslave(priv->mdev->priv.eswitch, netdev, lag_dev);
+ else
+ mlx5e_rep_bond_unslave(priv->mdev->priv.eswitch, netdev, lag_dev);
+}
+
+/* Bond device of representors and netdev events are used here in specific way
+ * to support eswitch vports bonding and to perform failover of eswitch vport
+ * by modifying the vport's egress acl of lower dev representors. Thus this
+ * also change the traditional behavior of lower dev under bond device.
+ * All non-representor netdevs or representors of other vendors as lower dev
+ * of bond device are not supported.
+ */
+static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_CHANGELOWERSTATE:
+ mlx5e_rep_changelowerstate_event(netdev, ptr);
+ break;
+ case NETDEV_CHANGEUPPER:
+ mlx5e_rep_changeupper_event(netdev, ptr);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/* If HW support eswitch vports bonding, register a specific notifier to
+ * handle it when two or more representors are bonded
+ */
+int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ struct net_device *netdev = rpriv->netdev;
+ struct mlx5e_priv *priv;
+ int ret = 0;
+
+ priv = netdev_priv(netdev);
+ if (!mlx5_esw_acl_egress_fwd2vport_supported(priv->mdev->priv.eswitch))
+ goto out;
+
+ uplink_priv->bond = kvzalloc(sizeof(*uplink_priv->bond), GFP_KERNEL);
+ if (!uplink_priv->bond) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&uplink_priv->bond->metadata_list);
+ uplink_priv->bond->nb.notifier_call = mlx5e_rep_esw_bond_netevent;
+ ret = register_netdevice_notifier_dev_net(netdev,
+ &uplink_priv->bond->nb,
+ &uplink_priv->bond->nn);
+ if (ret) {
+ netdev_err(netdev, "register bonding netevent notifier, err(%d)\n", ret);
+ kvfree(uplink_priv->bond);
+ uplink_priv->bond = NULL;
+ }
+
+out:
+ return ret;
+}
+
+void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+
+ if (!mlx5_esw_acl_egress_fwd2vport_supported(priv->mdev->priv.eswitch) ||
+ !rpriv->uplink_priv.bond)
+ return;
+
+ unregister_netdevice_notifier_dev_net(rpriv->netdev,
+ &rpriv->uplink_priv.bond->nb,
+ &rpriv->uplink_priv.bond->nn);
+ kvfree(rpriv->uplink_priv.bond);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
new file mode 100644
index 000000000000..baa162432e75
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#include <linux/refcount.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <linux/rwlock.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include "neigh.h"
+#include "tc.h"
+#include "en_rep.h"
+#include "fs_core.h"
+#include "diag/en_rep_tracepoint.h"
+
+static unsigned long mlx5e_rep_ipv6_interval(void)
+{
+ if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
+ return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
+
+ return ~0UL;
+}
+
+static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
+{
+ unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
+ unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
+ struct net_device *netdev = rpriv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
+ mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
+}
+
+void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+
+ mlx5_fc_queue_stats_work(priv->mdev,
+ &neigh_update->neigh_stats_work,
+ neigh_update->min_interval);
+}
+
+static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
+{
+ return refcount_inc_not_zero(&nhe->refcnt);
+}
+
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
+
+void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
+{
+ if (refcount_dec_and_test(&nhe->refcnt)) {
+ mlx5e_rep_neigh_entry_remove(nhe);
+ kfree_rcu(nhe, rcu);
+ }
+}
+
+static struct mlx5e_neigh_hash_entry *
+mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh_hash_entry *next = NULL;
+
+ rcu_read_lock();
+
+ for (next = nhe ?
+ list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &nhe->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list) :
+ list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list);
+ next;
+ next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &next->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list))
+ if (mlx5e_rep_neigh_entry_hold(next))
+ break;
+
+ rcu_read_unlock();
+
+ if (nhe)
+ mlx5e_rep_neigh_entry_release(nhe);
+
+ return next;
+}
+
+static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
+{
+ struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
+ neigh_update.neigh_stats_work.work);
+ struct net_device *netdev = rpriv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_neigh_hash_entry *nhe = NULL;
+
+ rtnl_lock();
+ if (!list_empty(&rpriv->neigh_update.neigh_list))
+ mlx5e_rep_queue_neigh_stats_work(priv);
+
+ while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
+ mlx5e_tc_update_neigh_used_value(nhe);
+
+ rtnl_unlock();
+}
+
+static void mlx5e_rep_neigh_update(struct work_struct *work)
+{
+ struct mlx5e_neigh_hash_entry *nhe =
+ container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
+ struct neighbour *n = nhe->n;
+ struct mlx5e_encap_entry *e;
+ unsigned char ha[ETH_ALEN];
+ struct mlx5e_priv *priv;
+ bool neigh_connected;
+ u8 nud_state, dead;
+
+ rtnl_lock();
+
+ /* If these parameters are changed after we release the lock,
+ * we'll receive another event letting us know about it.
+ * We use this lock to avoid inconsistency between the neigh validity
+ * and it's hw address.
+ */
+ read_lock_bh(&n->lock);
+ memcpy(ha, n->ha, ETH_ALEN);
+ nud_state = n->nud_state;
+ dead = n->dead;
+ read_unlock_bh(&n->lock);
+
+ neigh_connected = (nud_state & NUD_VALID) && !dead;
+
+ trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
+
+ list_for_each_entry(e, &nhe->encap_list, encap_list) {
+ if (!mlx5e_encap_take(e))
+ continue;
+
+ priv = netdev_priv(e->out_dev);
+ mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ mlx5e_encap_put(priv, e);
+ }
+ mlx5e_rep_neigh_entry_release(nhe);
+ rtnl_unlock();
+ neigh_release(n);
+}
+
+static void mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe,
+ struct neighbour *n)
+{
+ /* Take a reference to ensure the neighbour and mlx5 encap
+ * entry won't be destructed until we drop the reference in
+ * delayed work.
+ */
+ neigh_hold(n);
+
+ /* This assignment is valid as long as the the neigh reference
+ * is taken
+ */
+ nhe->n = n;
+
+ if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
+ mlx5e_rep_neigh_entry_release(nhe);
+ neigh_release(n);
+ }
+}
+
+static int mlx5e_rep_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
+ neigh_update.netevent_nb);
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct net_device *netdev = rpriv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_neigh_hash_entry *nhe = NULL;
+ struct mlx5e_neigh m_neigh = {};
+ struct neigh_parms *p;
+ struct neighbour *n;
+ bool found = false;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ n = ptr;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
+#else
+ if (n->tbl != &arp_tbl)
+#endif
+ return NOTIFY_DONE;
+
+ m_neigh.dev = n->dev;
+ m_neigh.family = n->ops->family;
+ memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+
+ rcu_read_lock();
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
+ rcu_read_unlock();
+ if (!nhe)
+ return NOTIFY_DONE;
+
+ mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
+ break;
+
+ case NETEVENT_DELAY_PROBE_TIME_UPDATE:
+ p = ptr;
+
+ /* We check the device is present since we don't care about
+ * changes in the default table, we only care about changes
+ * done per device delay prob time parameter.
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
+#else
+ if (!p->dev || p->tbl != &arp_tbl)
+#endif
+ return NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
+ neigh_list) {
+ if (p->dev == nhe->m_neigh.dev) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!found)
+ return NOTIFY_DONE;
+
+ neigh_update->min_interval = min_t(unsigned long,
+ NEIGH_VAR(p, DELAY_PROBE_TIME),
+ neigh_update->min_interval);
+ mlx5_fc_update_sampling_interval(priv->mdev,
+ neigh_update->min_interval);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static const struct rhashtable_params mlx5e_neigh_ht_params = {
+ .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
+ .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
+ .key_len = sizeof(struct mlx5e_neigh),
+ .automatic_shrinking = true,
+};
+
+int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ int err;
+
+ err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&neigh_update->neigh_list);
+ mutex_init(&neigh_update->encap_lock);
+ INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
+ mlx5e_rep_neigh_stats_work);
+ mlx5e_rep_neigh_update_init_interval(rpriv);
+
+ rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
+ err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
+ if (err)
+ goto out_err;
+ return 0;
+
+out_err:
+ rhashtable_destroy(&neigh_update->neigh_ht);
+ return err;
+}
+
+void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+
+ unregister_netevent_notifier(&neigh_update->netevent_nb);
+
+ flush_workqueue(priv->wq); /* flush neigh update works */
+
+ cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
+
+ mutex_destroy(&neigh_update->encap_lock);
+ rhashtable_destroy(&neigh_update->neigh_ht);
+}
+
+static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ int err;
+
+ err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
+ &nhe->rhash_node,
+ mlx5e_neigh_ht_params);
+ if (err)
+ return err;
+
+ list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
+
+ return err;
+}
+
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
+
+ mutex_lock(&rpriv->neigh_update.encap_lock);
+
+ list_del_rcu(&nhe->neigh_list);
+
+ rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
+ &nhe->rhash_node,
+ mlx5e_neigh_ht_params);
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
+}
+
+/* This function must only be called under the representor's encap_lock or
+ * inside rcu read lock section.
+ */
+struct mlx5e_neigh_hash_entry *
+mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
+ struct mlx5e_neigh *m_neigh)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct mlx5e_neigh_hash_entry *nhe;
+
+ nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
+ mlx5e_neigh_ht_params);
+ return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
+}
+
+int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh_hash_entry **nhe)
+{
+ int err;
+
+ *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
+ if (!*nhe)
+ return -ENOMEM;
+
+ (*nhe)->priv = priv;
+ memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
+ INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
+ spin_lock_init(&(*nhe)->encap_list_lock);
+ INIT_LIST_HEAD(&(*nhe)->encap_list);
+ refcount_set(&(*nhe)->refcnt, 1);
+
+ err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
+ if (err)
+ goto out_free;
+ return 0;
+
+out_free:
+ kfree(*nhe);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
new file mode 100644
index 000000000000..32b239189c95
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_REP_NEIGH__
+#define __MLX5_EN_REP_NEIGH__
+
+#include "en.h"
+#include "en_rep.h"
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv);
+void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv);
+
+struct mlx5e_neigh_hash_entry *
+mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
+ struct mlx5e_neigh *m_neigh);
+int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh_hash_entry **nhe);
+void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe);
+
+void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline int
+mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) { return 0; }
+static inline void
+mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) {}
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+
+#endif /* __MLX5_EN_REP_NEIGH__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
new file mode 100644
index 000000000000..80713123de5c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#include <net/dst_metadata.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include "tc.h"
+#include "neigh.h"
+#include "en_rep.h"
+#include "eswitch.h"
+#include "esw/chains.h"
+#include "en/tc_ct.h"
+#include "en/mapping.h"
+#include "en/tc_tun.h"
+#include "lib/port_tun.h"
+
+struct mlx5e_rep_indr_block_priv {
+ struct net_device *netdev;
+ struct mlx5e_rep_priv *rpriv;
+
+ struct list_head list;
+};
+
+int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
+ struct mlx5e_neigh_hash_entry *nhe;
+ int err;
+
+ err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
+ if (err)
+ return err;
+
+ mutex_lock(&rpriv->neigh_update.encap_lock);
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+ if (!nhe) {
+ err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
+ if (err) {
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
+ mlx5_tun_entropy_refcount_dec(tun_entropy,
+ e->reformat_type);
+ return err;
+ }
+ }
+
+ e->nhe = nhe;
+ spin_lock(&nhe->encap_list_lock);
+ list_add_rcu(&e->encap_list, &nhe->encap_list);
+ spin_unlock(&nhe->encap_list_lock);
+
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
+
+ return 0;
+}
+
+void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
+
+ if (!e->nhe)
+ return;
+
+ spin_lock(&e->nhe->encap_list_lock);
+ list_del_rcu(&e->encap_list);
+ spin_unlock(&e->nhe->encap_list_lock);
+
+ mlx5e_rep_neigh_entry_release(e->nhe);
+ e->nhe = NULL;
+ mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
+}
+
+void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ bool neigh_connected,
+ unsigned char ha[ETH_ALEN])
+{
+ struct ethhdr *eth = (struct ethhdr *)e->encap_header;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ bool encap_connected;
+ LIST_HEAD(flow_list);
+
+ ASSERT_RTNL();
+
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&e->res_ready);
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+ if (e->compl_result < 0 || (encap_connected == neigh_connected &&
+ ether_addr_equal(e->h_dest, ha)))
+ goto unlock;
+
+ mlx5e_take_all_encap_flows(e, &flow_list);
+
+ if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
+ (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
+ mlx5e_tc_encap_flows_del(priv, e, &flow_list);
+
+ if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ ether_addr_copy(e->h_dest, ha);
+ ether_addr_copy(eth->h_dest, ha);
+ /* Update the encap source mac, in case that we delete
+ * the flows when encap source mac changed.
+ */
+ ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
+
+ mlx5e_tc_encap_flows_add(priv, e, &flow_list);
+ }
+unlock:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ mlx5e_put_encap_flow_list(priv, &flow_list);
+}
+
+static int
+mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
+ struct flow_cls_offload *cls_flower, int flags)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
+ flags);
+ case FLOW_CLS_DESTROY:
+ return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
+ flags);
+ case FLOW_CLS_STATS:
+ return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
+ flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static
+int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ switch (ma->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlx5e_tc_configure_matchall(priv, ma);
+ case TC_CLSMATCHALL_DESTROY:
+ return mlx5e_tc_delete_matchall(priv, ma);
+ case TC_CLSMATCHALL_STATS:
+ mlx5e_tc_stats_matchall(priv, ma);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
+ struct mlx5e_priv *priv = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
+ case TC_SETUP_CLSMATCHALL:
+ return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload tmp, *f = type_data;
+ struct mlx5e_priv *priv = cb_priv;
+ struct mlx5_eswitch *esw;
+ unsigned long flags;
+ int err;
+
+ flags = MLX5_TC_FLAG(INGRESS) |
+ MLX5_TC_FLAG(ESW_OFFLOAD) |
+ MLX5_TC_FLAG(FT_OFFLOAD);
+ esw = priv->mdev->priv.eswitch;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ memcpy(&tmp, f, sizeof(*f));
+
+ if (!mlx5_esw_chains_prios_supported(esw))
+ return -EOPNOTSUPP;
+
+ /* Re-use tc offload path by moving the ft flow to the
+ * reserved ft chain.
+ *
+ * FT offload can use prio range [0, INT_MAX], so we normalize
+ * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+ * as with tc, where prio 0 isn't supported.
+ *
+ * We only support chain 0 of FT offload.
+ */
+ if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
+ return -EOPNOTSUPP;
+ if (tmp.common.chain_index != 0)
+ return -EOPNOTSUPP;
+
+ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.prio++;
+ err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
+ memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
+static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
+int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct flow_block_offload *f = type_data;
+
+ f->unlocked_driver_cb = true;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &mlx5e_rep_block_tc_cb_list,
+ mlx5e_rep_setup_tc_cb,
+ priv, priv, true);
+ case TC_SETUP_FT:
+ return flow_block_cb_setup_simple(type_data,
+ &mlx5e_rep_block_ft_cb_list,
+ mlx5e_rep_setup_ft_cb,
+ priv, priv, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ int err;
+
+ mutex_init(&uplink_priv->unready_flows_lock);
+ INIT_LIST_HEAD(&uplink_priv->unready_flows);
+
+ /* init shared tc flow table */
+ err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ return err;
+}
+
+void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
+{
+ /* delete shared tc flow table */
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
+}
+
+void mlx5e_rep_tc_enable(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
+ mlx5e_tc_reoffload_flows_work);
+}
+
+void mlx5e_rep_tc_disable(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
+}
+
+int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
+
+ return NOTIFY_OK;
+}
+
+static struct mlx5e_rep_indr_block_priv *
+mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev)
+{
+ struct mlx5e_rep_indr_block_priv *cb_priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv,
+ &rpriv->uplink_priv.tc_indr_block_priv_list,
+ list)
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static int
+mlx5e_rep_indr_offload(struct net_device *netdev,
+ struct flow_cls_offload *flower,
+ struct mlx5e_rep_indr_block_priv *indr_priv,
+ unsigned long flags)
+{
+ struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
+ int err = 0;
+
+ switch (flower->command) {
+ case FLOW_CLS_REPLACE:
+ err = mlx5e_configure_flower(netdev, priv, flower, flags);
+ break;
+ case FLOW_CLS_DESTROY:
+ err = mlx5e_delete_flower(netdev, priv, flower, flags);
+ break;
+ case FLOW_CLS_STATS:
+ err = mlx5e_stats_flower(netdev, priv, flower, flags);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
+{
+ unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
+ struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
+ flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
+{
+ struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+ struct flow_cls_offload *f = type_data;
+ struct flow_cls_offload tmp;
+ struct mlx5e_priv *mpriv;
+ struct mlx5_eswitch *esw;
+ unsigned long flags;
+ int err;
+
+ mpriv = netdev_priv(priv->rpriv->netdev);
+ esw = mpriv->mdev->priv.eswitch;
+
+ flags = MLX5_TC_FLAG(EGRESS) |
+ MLX5_TC_FLAG(ESW_OFFLOAD) |
+ MLX5_TC_FLAG(FT_OFFLOAD);
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ memcpy(&tmp, f, sizeof(*f));
+
+ /* Re-use tc offload path by moving the ft flow to the
+ * reserved ft chain.
+ *
+ * FT offload can use prio range [0, INT_MAX], so we normalize
+ * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+ * as with tc, where prio 0 isn't supported.
+ *
+ * We only support chain 0 of FT offload.
+ */
+ if (!mlx5_esw_chains_prios_supported(esw) ||
+ tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
+ tmp.common.chain_index)
+ return -EOPNOTSUPP;
+
+ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.prio++;
+ err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
+ memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlx5e_rep_indr_block_unbind(void *cb_priv)
+{
+ struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
+
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+}
+
+static LIST_HEAD(mlx5e_block_cb_list);
+
+static int
+mlx5e_rep_indr_setup_block(struct net_device *netdev,
+ struct mlx5e_rep_priv *rpriv,
+ struct flow_block_offload *f,
+ flow_setup_cb_t *setup_cb)
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5e_rep_indr_block_priv *indr_priv;
+ struct flow_block_cb *block_cb;
+
+ if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
+ !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->unlocked_driver_cb = true;
+ f->driver_block_list = &mlx5e_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (indr_priv)
+ return -EEXIST;
+
+ indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
+ if (!indr_priv)
+ return -ENOMEM;
+
+ indr_priv->netdev = netdev;
+ indr_priv->rpriv = rpriv;
+ list_add(&indr_priv->list,
+ &rpriv->uplink_priv.tc_indr_block_priv_list);
+
+ block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
+ mlx5e_rep_indr_block_unbind);
+ if (IS_ERR(block_cb)) {
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+ return PTR_ERR(block_cb);
+ }
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
+
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (!indr_priv)
+ return -ENOENT;
+
+ block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static
+int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
+ mlx5e_rep_indr_setup_tc_cb);
+ case TC_SETUP_FT:
+ return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
+ mlx5e_rep_indr_setup_ft_cb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
+
+ return flow_indr_dev_register(mlx5e_rep_indr_setup_cb, rpriv);
+}
+
+void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
+{
+ flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
+ mlx5e_rep_indr_setup_tc_cb);
+}
+
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct tunnel_match_enc_opts enc_opts = {};
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct metadata_dst *tun_dst;
+ struct tunnel_match_key key;
+ u32 tun_id, enc_opts_id;
+ struct net_device *dev;
+ int err;
+
+ enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
+ tun_id = tunnel_id >> ENC_OPTS_BITS;
+
+ if (!tun_id)
+ return true;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
+ if (err) {
+ WARN_ON_ONCE(true);
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel for tun_id: %d, err: %d\n",
+ tun_id, err);
+ return false;
+ }
+
+ if (enc_opts_id) {
+ err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id, &enc_opts);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
+ enc_opts_id, err);
+ return false;
+ }
+ }
+
+ tun_dst = tun_rx_dst(enc_opts.key.len);
+ if (!tun_dst) {
+ WARN_ON_ONCE(true);
+ return false;
+ }
+
+ ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+ key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ 0, /* label */
+ key.enc_tp.src, key.enc_tp.dst,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ TUNNEL_KEY);
+
+ if (enc_opts.key.len)
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
+ enc_opts.key.data,
+ enc_opts.key.len,
+ enc_opts.key.dst_opt_type);
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ dev = dev_get_by_index(&init_net, key.filter_ifindex);
+ if (!dev) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel device with ifindex: %d\n",
+ key.filter_ifindex);
+ return false;
+ }
+
+ /* Set tun_dev so we do dev_put() after datapath */
+ tc_priv->tun_dev = dev;
+
+ skb->dev = dev;
+
+ return true;
+}
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tc_skb_ext *tc_skb_ext;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ int tunnel_moffset;
+ int err;
+
+ reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
+ if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
+ reg_c0 = 0;
+ reg_c1 = be32_to_cpu(cqe->ft_metadata);
+
+ if (!reg_c0)
+ return true;
+
+ priv = netdev_priv(skb->dev);
+ esw = priv->mdev->priv.eswitch;
+
+ err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find chain for chain tag: %d, err: %d\n",
+ reg_c0, err);
+ return false;
+ }
+
+ if (chain) {
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+
+ tc_skb_ext->chain = chain;
+
+ tuple_id = reg_c1 & TUPLE_ID_MAX;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
+ return false;
+ }
+
+ tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
+ tunnel_id = reg_c1 >> (8 * tunnel_moffset);
+ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+ return true;
+}
+
+void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (tc_priv->tun_dev)
+ dev_put(tc_priv->tun_dev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
new file mode 100644
index 000000000000..fdf9702c2d7d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_REP_TC_H__
+#define __MLX5_EN_REP_TC_H__
+
+#include <linux/skbuff.h>
+#include "en_tc.h"
+#include "en_rep.h"
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv);
+void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv);
+
+int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv);
+void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv);
+
+void mlx5e_rep_tc_enable(struct mlx5e_priv *priv);
+void mlx5e_rep_tc_disable(struct mlx5e_priv *priv);
+
+int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv);
+
+void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ bool neigh_connected,
+ unsigned char ha[ETH_ALEN]);
+
+int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+
+int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv);
+void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+struct mlx5e_rep_priv;
+static inline int
+mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv) { return 0; }
+static inline void
+mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv) {}
+
+static inline int
+mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) { return 0; }
+static inline void
+mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) {}
+
+static inline void
+mlx5e_rep_tc_enable(struct mlx5e_priv *priv) {}
+static inline void
+mlx5e_rep_tc_disable(struct mlx5e_priv *priv) {}
+
+static inline int
+mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv) { return NOTIFY_DONE; }
+
+static inline int
+mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data) { return -EOPNOTSUPP; }
+
+struct mlx5e_tc_update_priv;
+static inline bool
+mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv) { return true; }
+static inline void
+mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) {}
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+
+#endif /* __MLX5_EN_REP_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a172c5e39710..afc19dca1f5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -24,6 +24,7 @@
#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
+#define MLX5_CT_STATE_NAT_BIT BIT(3)
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
@@ -61,6 +62,15 @@ struct mlx5_ct_zone_rule {
bool nat;
};
+struct mlx5_tc_ct_pre {
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_group *flow_grp;
+ struct mlx5_flow_group *miss_grp;
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_handle *miss_rule;
+ struct mlx5_modify_hdr *modify_hdr;
+};
+
struct mlx5_ct_ft {
struct rhash_head node;
u16 zone;
@@ -68,14 +78,14 @@ struct mlx5_ct_ft {
struct nf_flowtable *nf_ft;
struct mlx5_tc_ct_priv *ct_priv;
struct rhashtable ct_entries_ht;
+ struct mlx5_tc_ct_pre pre_ct;
+ struct mlx5_tc_ct_pre pre_ct_nat;
};
struct mlx5_ct_entry {
u16 zone;
struct rhash_head node;
- struct flow_rule *flow_rule;
struct mlx5_fc *counter;
- unsigned long lastuse;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_zone_rule zone_rules[2];
@@ -109,7 +119,7 @@ mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
}
static int
-mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec,
+mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
struct flow_rule *rule)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -124,10 +134,8 @@ mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec,
flow_rule_match_basic(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(match.mask->n_proto));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(match.key->n_proto));
+ mlx5e_tc_set_ethertype(priv->mdev, &match, true, headers_c,
+ headers_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
match.mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
@@ -384,7 +392,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
char *modact;
int err, i;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -428,6 +436,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_modify_hdr *mod_hdr;
struct flow_action_entry *meta;
+ u16 ct_state = 0;
int err;
meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
@@ -446,11 +455,13 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
&mod_acts);
if (err)
goto err_mapping;
+
+ ct_state |= MLX5_CT_STATE_NAT_BIT;
}
+ ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT;
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
- (MLX5_CT_STATE_ESTABLISHED_BIT |
- MLX5_CT_STATE_TRK_BIT),
+ ct_state,
meta->ct_metadata.mark,
meta->ct_metadata.labels[0],
tupleid);
@@ -520,7 +531,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->counter = entry->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
- mlx5_tc_ct_set_tuple_match(spec, flow_rule);
+ mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
entry->zone & MLX5_CT_ZONE_MASK,
MLX5_CT_ZONE_MASK);
@@ -603,7 +614,6 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
return -ENOMEM;
entry->zone = ft->zone;
- entry->flow_rule = flow_rule;
entry->cookie = flow->cookie;
entry->restore_cookie = meta_action->ct_metadata.cookie;
@@ -687,7 +697,7 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
return mlx5_tc_ct_block_flow_offload_stats(ft, f);
default:
break;
- };
+ }
return -EOPNOTSUPP;
}
@@ -699,6 +709,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector_key_ct *mask, *key;
bool trk, est, untrk, unest, new;
u32 ctstate = 0, ctstate_mask = 0;
@@ -706,7 +717,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
u16 ct_state, ct_state_mask;
struct flow_match_ct match;
- if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
return 0;
if (!ct_priv) {
@@ -715,7 +726,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- flow_rule_match_ct(f->rule, &match);
+ flow_rule_match_ct(rule, &match);
key = match.key;
mask = match.mask;
@@ -794,6 +805,238 @@ mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
return 0;
}
+static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
+ struct mlx5_tc_ct_pre *pre_ct,
+ bool nat)
+{
+ struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
+ struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_flow_table *fdb = pre_ct->fdb;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 ctstate;
+ u16 zone;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ zone = ct_ft->zone & MLX5_CT_ZONE_MASK;
+ err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ZONE_TO_REG, zone);
+ if (err) {
+ ct_dbg("Failed to set zone register mapping");
+ goto err_mapping;
+ }
+
+ mod_hdr = mlx5_modify_header_alloc(dev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ pre_mod_acts.num_actions,
+ pre_mod_acts.actions);
+
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ ct_dbg("Failed to create pre ct mod hdr");
+ goto err_mapping;
+ }
+ pre_ct->modify_hdr = mod_hdr;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ flow_act.modify_hdr = mod_hdr;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ /* add flow rule */
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
+ zone, MLX5_CT_ZONE_MASK);
+ ctstate = MLX5_CT_STATE_TRK_BIT;
+ if (nat)
+ ctstate |= MLX5_CT_STATE_NAT_BIT;
+ mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate);
+
+ dest.ft = ct_priv->post_ct;
+ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add pre ct flow rule zone %d", zone);
+ goto err_flow_rule;
+ }
+ pre_ct->flow_rule = rule;
+
+ /* add miss rule */
+ memset(spec, 0, sizeof(*spec));
+ dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add pre ct miss rule zone %d", zone);
+ goto err_miss_rule;
+ }
+ pre_ct->miss_rule = rule;
+
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+ kvfree(spec);
+ return 0;
+
+err_miss_rule:
+ mlx5_del_flow_rules(pre_ct->flow_rule);
+err_flow_rule:
+ mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
+err_mapping:
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+ kvfree(spec);
+ return err;
+}
+
+static void
+tc_ct_pre_ct_del_rules(struct mlx5_ct_ft *ct_ft,
+ struct mlx5_tc_ct_pre *pre_ct)
+{
+ struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
+ struct mlx5_core_dev *dev = ct_priv->esw->dev;
+
+ mlx5_del_flow_rules(pre_ct->flow_rule);
+ mlx5_del_flow_rules(pre_ct->miss_rule);
+ mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
+}
+
+static int
+mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
+ struct mlx5_tc_ct_pre *pre_ct,
+ bool nat)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
+ struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ u32 metadata_reg_c_2_mask;
+ u32 *flow_group_in;
+ void *misc;
+ int err;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns) {
+ err = -EOPNOTSUPP;
+ ct_dbg("Failed to get FDB flow namespace");
+ return err;
+ }
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_TC_OFFLOAD;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ ct_dbg("Failed to create pre ct table");
+ goto out_free;
+ }
+ pre_ct->fdb = ft;
+
+ /* create flow group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+
+ misc = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria.misc_parameters_2);
+
+ metadata_reg_c_2_mask = MLX5_CT_ZONE_MASK;
+ metadata_reg_c_2_mask |= (MLX5_CT_STATE_TRK_BIT << 16);
+ if (nat)
+ metadata_reg_c_2_mask |= (MLX5_CT_STATE_NAT_BIT << 16);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_2,
+ metadata_reg_c_2_mask);
+
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ ct_dbg("Failed to create pre ct group");
+ goto err_flow_grp;
+ }
+ pre_ct->flow_grp = g;
+
+ /* create miss group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ ct_dbg("Failed to create pre ct miss group");
+ goto err_miss_grp;
+ }
+ pre_ct->miss_grp = g;
+
+ err = tc_ct_pre_ct_add_rules(ct_ft, pre_ct, nat);
+ if (err)
+ goto err_add_rules;
+
+ kvfree(flow_group_in);
+ return 0;
+
+err_add_rules:
+ mlx5_destroy_flow_group(pre_ct->miss_grp);
+err_miss_grp:
+ mlx5_destroy_flow_group(pre_ct->flow_grp);
+err_flow_grp:
+ mlx5_destroy_flow_table(ft);
+out_free:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void
+mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft *ct_ft,
+ struct mlx5_tc_ct_pre *pre_ct)
+{
+ tc_ct_pre_ct_del_rules(ct_ft, pre_ct);
+ mlx5_destroy_flow_group(pre_ct->miss_grp);
+ mlx5_destroy_flow_group(pre_ct->flow_grp);
+ mlx5_destroy_flow_table(pre_ct->fdb);
+}
+
+static int
+mlx5_tc_ct_alloc_pre_ct_tables(struct mlx5_ct_ft *ft)
+{
+ int err;
+
+ err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct, false);
+ if (err)
+ return err;
+
+ err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct_nat, true);
+ if (err)
+ goto err_pre_ct_nat;
+
+ return 0;
+
+err_pre_ct_nat:
+ mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
+ return err;
+}
+
+static void
+mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft)
+{
+ mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct_nat);
+ mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
+}
+
static struct mlx5_ct_ft *
mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
struct nf_flowtable *nf_ft)
@@ -816,6 +1059,10 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
ft->ct_priv = ct_priv;
refcount_set(&ft->refcount, 1);
+ err = mlx5_tc_ct_alloc_pre_ct_tables(ft);
+ if (err)
+ goto err_alloc_pre_ct;
+
err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params);
if (err)
goto err_init;
@@ -837,6 +1084,8 @@ err_add_cb:
err_insert:
rhashtable_destroy(&ft->ct_entries_ht);
err_init:
+ mlx5_tc_ct_free_pre_ct_tables(ft);
+err_alloc_pre_ct:
kfree(ft);
return ERR_PTR(err);
}
@@ -862,21 +1111,40 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
rhashtable_free_and_destroy(&ft->ct_entries_ht,
mlx5_tc_ct_flush_ft_entry,
ct_priv);
+ mlx5_tc_ct_free_pre_ct_tables(ft);
kfree(ft);
}
/* We translate the tc filter with CT action to the following HW model:
*
- * +-------------------+ +--------------------+ +--------------+
- * + pre_ct (tc chain) +----->+ CT (nat or no nat) +--->+ post_ct +----->
- * + original match + | + tuple + zone match + | + fte_id match + |
- * +-------------------+ | +--------------------+ | +--------------+ |
- * v v v
- * set chain miss mapping set mark original
- * set fte_id set label filter
- * set zone set established actions
- * set tunnel_id do nat (if needed)
- * do decap
+ * +---------------------+
+ * + fdb prio (tc chain) +
+ * + original match +
+ * +---------------------+
+ * | set chain miss mapping
+ * | set fte_id
+ * | set tunnel_id
+ * | do decap
+ * v
+ * +---------------------+
+ * + pre_ct/pre_ct_nat + if matches +---------------------+
+ * + zone+nat match +---------------->+ post_ct (see below) +
+ * +---------------------+ set zone +---------------------+
+ * | set zone
+ * v
+ * +--------------------+
+ * + CT (nat or no nat) +
+ * + tuple + zone match +
+ * +--------------------+
+ * | set mark
+ * | set label
+ * | set established
+ * | do nat (if needed)
+ * v
+ * +--------------+
+ * + post_ct + original filter actions
+ * + fte_id match +------------------------>
+ * +--------------+
*/
static int
__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
@@ -891,7 +1159,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
struct mlx5_flow_spec *post_ct_spec = NULL;
struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_esw_flow_attr *pre_ct_attr;
- struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_modify_hdr *mod_hdr;
struct mlx5_flow_handle *rule;
struct mlx5_ct_flow *ct_flow;
int chain_mapping = 0, err;
@@ -954,14 +1222,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
goto err_mapping;
}
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ZONE_TO_REG,
- attr->ct_attr.zone &
- MLX5_CT_ZONE_MASK);
- if (err) {
- ct_dbg("Failed to set zone register mapping");
- goto err_mapping;
- }
-
err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
FTEID_TO_REG, fte_id);
if (err) {
@@ -1021,7 +1281,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
/* Change original rule point to ct table */
pre_ct_attr->dest_chain = 0;
- pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.fdb : ft->pre_ct.fdb;
ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw,
orig_spec,
pre_ct_attr);
@@ -1131,7 +1391,7 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
{
bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
- struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *rule = ERR_PTR(-EINVAL);
int err;
if (!ct_priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 091d305b633e..626f6c04882e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -130,7 +130,9 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
struct netlink_ext_ack *extack)
{
- if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
return 0;
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index b45c3f46570b..7cce85faa16f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -4,8 +4,11 @@
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
+#include <net/bareudp.h>
#include "en/tc_tun.h"
#include "en_tc.h"
+#include "rep/tc.h"
+#include "rep/neigh.h"
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
{
@@ -16,6 +19,8 @@ struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
else if (netif_is_gretap(tunnel_dev) ||
netif_is_ip6gretap(tunnel_dev))
return &gre_tunnel;
+ else if (netif_is_bareudp(tunnel_dev))
+ return &mplsoudp_tunnel;
else
return NULL;
}
@@ -96,9 +101,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
}
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
- ret = PTR_ERR_OR_ZERO(rt);
- if (ret)
- return ret;
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
ip_rt_put(rt);
@@ -508,6 +512,13 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_basic key_basic = {};
+ struct flow_dissector_key_basic mask_basic = {
+ .n_proto = htons(0xFFFF),
+ };
+ struct flow_match_basic match_basic = {
+ .key = &key_basic, .mask = &mask_basic,
+ };
struct flow_match_control match;
u16 addr_type;
@@ -533,10 +544,9 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
ntohl(match.key->dst));
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IP);
+ key_basic.n_proto = htons(ETH_P_IP);
+ mlx5e_tc_set_ethertype(priv->mdev, &match_basic, true,
+ headers_c, headers_v);
} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_match_ipv6_addrs match;
@@ -559,10 +569,9 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
&match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
ipv6));
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IPV6);
+ key_basic.n_proto = htons(ETH_P_IPV6);
+ mlx5e_tc_set_ethertype(priv->mdev, &match_basic, true,
+ headers_c, headers_v);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 1630f0ec3ad7..704359df6095 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -16,6 +16,7 @@ enum {
MLX5E_TC_TUNNEL_TYPE_VXLAN,
MLX5E_TC_TUNNEL_TYPE_GENEVE,
MLX5E_TC_TUNNEL_TYPE_GRETAP,
+ MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
};
struct mlx5e_tc_tunnel {
@@ -46,6 +47,7 @@ struct mlx5e_tc_tunnel {
extern struct mlx5e_tc_tunnel vxlan_tunnel;
extern struct mlx5e_tc_tunnel geneve_tunnel;
extern struct mlx5e_tc_tunnel gre_tunnel;
+extern struct mlx5e_tc_tunnel mplsoudp_tunnel;
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
new file mode 100644
index 000000000000..1f9526244222
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/bareudp.h>
+#include <net/mpls.h>
+#include "en/tc_tun.h"
+
+static bool can_offload(struct mlx5e_priv *priv)
+{
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l3_tunnel_to_l2);
+}
+
+static int calc_hlen(struct mlx5e_encap_entry *e)
+{
+ return sizeof(struct udphdr) + MPLS_HLEN;
+}
+
+static int init_encap_attr(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ e->tunnel = &mplsoudp_tunnel;
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ return 0;
+}
+
+static int generate_ip_tun_hdr(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *r)
+{
+ const struct ip_tunnel_key *tun_key = &r->tun_info->key;
+ struct udphdr *udp = (struct udphdr *)(buf);
+ struct mpls_shim_hdr *mpls;
+ u32 tun_id;
+
+ tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id));
+ mpls = (struct mpls_shim_hdr *)(udp + 1);
+ *ip_proto = IPPROTO_UDP;
+
+ udp->dest = tun_key->tp_dst;
+ *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true);
+
+ return 0;
+}
+
+static int parse_udp_ports(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ return mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
+}
+
+static int parse_tunnel(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_enc_keyid enc_keyid;
+ struct flow_match_mpls match;
+ void *misc2_c;
+ void *misc2_v;
+
+ misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
+ return 0;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return 0;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
+ if (!enc_keyid.mask->keyid)
+ return 0;
+
+ if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_CW_MPLS_UDP))
+ return -EOPNOTSUPP;
+
+ flow_rule_match_mpls(rule, &match);
+
+ /* Only support matching the first LSE */
+ if (match.mask->used_lses != 1)
+ return -EOPNOTSUPP;
+
+ MLX5_SET(fte_match_set_misc2, misc2_c,
+ outer_first_mpls_over_udp.mpls_label,
+ match.mask->ls[0].mpls_label);
+ MLX5_SET(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp.mpls_label,
+ match.key->ls[0].mpls_label);
+
+ MLX5_SET(fte_match_set_misc2, misc2_c,
+ outer_first_mpls_over_udp.mpls_exp,
+ match.mask->ls[0].mpls_tc);
+ MLX5_SET(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp.mpls_exp, match.key->ls[0].mpls_tc);
+
+ MLX5_SET(fte_match_set_misc2, misc2_c,
+ outer_first_mpls_over_udp.mpls_s_bos,
+ match.mask->ls[0].mpls_bos);
+ MLX5_SET(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp.mpls_s_bos,
+ match.key->ls[0].mpls_bos);
+
+ MLX5_SET(fte_match_set_misc2, misc2_c,
+ outer_first_mpls_over_udp.mpls_ttl,
+ match.mask->ls[0].mpls_ttl);
+ MLX5_SET(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp.mpls_ttl,
+ match.key->ls[0].mpls_ttl);
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ return 0;
+}
+
+struct mlx5e_tc_tunnel mplsoudp_tunnel = {
+ .tunnel_type = MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
+ .match_level = MLX5_MATCH_L4,
+ .can_offload = can_offload,
+ .calc_hlen = calc_hlen,
+ .init_encap_attr = init_encap_attr,
+ .generate_ip_tun_hdr = generate_ip_tun_hdr,
+ .parse_udp_ports = parse_udp_ports,
+ .parse_tunnel = parse_tunnel,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index f07b1399744e..bfd3e1161bc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -6,46 +6,32 @@
#include "en.h"
-#define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
- MLX5E_SQ_NOPS_ROOM)
-
-#ifndef CONFIG_MLX5_EN_TLS
-#define MLX5E_SQ_TLS_ROOM (0)
-#else
-/* TLS offload requires additional stop_room for:
- * - a resync SKB.
- * kTLS offload requires fixed additional stop_room for:
- * - a static params WQE, and a progress params WQE.
- * The additional MTU-depending room for the resync DUMP WQEs
- * will be calculated and added in runtime.
- */
-#define MLX5E_SQ_TLS_ROOM \
- (MLX5_SEND_WQE_MAX_WQEBBS + \
- MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
-#endif
-
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+enum mlx5e_icosq_wqe_type {
+ MLX5E_ICOSQ_WQE_NOP,
+ MLX5E_ICOSQ_WQE_UMR_RX,
+};
+
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
-static inline void *
-mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi)
+static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
void *wqe;
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
- memset(wqe, 0, size);
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ memset(wqe, 0, wqe_size);
return wqe;
}
+#define MLX5E_TX_FETCH_WQE(sq, pi) \
+ ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
+
static inline struct mlx5e_tx_wqe *
mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
@@ -81,6 +67,84 @@ mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
return wqe;
}
+struct mlx5e_tx_wqe_info {
+ struct sk_buff *skb;
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+#ifdef CONFIG_MLX5_EN_TLS
+ struct page *resync_dump_frag_page;
+#endif
+};
+
+static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs < size)) {
+ struct mlx5e_tx_wqe_info *wi, *edge_wi;
+
+ wi = &sq->db.wqe_info[pi];
+ edge_wi = wi + contig_wqebbs;
+
+ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
+ for (; wi < edge_wi; wi++) {
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+ sq->stats->nop += contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ }
+
+ return pi;
+}
+
+struct mlx5e_icosq_wqe_info {
+ u8 wqe_type;
+ u8 num_wqebbs;
+
+ /* Auxiliary data for different wqe types. */
+ union {
+ struct {
+ struct mlx5e_rq *rq;
+ } umr;
+ };
+};
+
+static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs < size)) {
+ struct mlx5e_icosq_wqe_info *wi, *edge_wi;
+
+ wi = &sq->db.wqe_info[pi];
+ edge_wi = wi + contig_wqebbs;
+
+ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
+ for (; wi < edge_wi; wi++) {
+ *wi = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_NOP,
+ .num_wqebbs = 1,
+ };
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ }
+
+ return pi;
+}
+
static inline void
mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
u16 pi, u16 nnops)
@@ -102,7 +166,7 @@ static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
struct mlx5_wqe_ctrl_seg *ctrl)
{
- ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
@@ -189,6 +253,22 @@ static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
}
}
+static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
+ struct mlx5_err_cqe *err_cqe)
+{
+ struct mlx5_cqwq *wq = &cq->wq;
+ u32 ci;
+
+ ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
+
+ netdev_err(cq->channel->netdev,
+ "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+ cq->mcq.cqn, ci, sqn,
+ get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
+ err_cqe->syndrome, err_cqe->vendor_err_synd);
+ mlx5_dump_err_cqe(cq->mdev, err_cqe);
+}
+
/* SW parser related functions */
struct mlx5e_swp_spec {
@@ -232,4 +312,25 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
}
}
+static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
+{
+ BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
+
+ /* A WQE must not cross the page boundary, hence two conditions:
+ * 1. Its size must not exceed the page size.
+ * 2. If the WQE size is X, and the space remaining in a page is less
+ * than X, this space needs to be padded with NOPs. So, one WQE of
+ * size X may require up to X-1 WQEBBs of padding, which makes the
+ * stop room of X-1 + X.
+ * WQE size is also limited by the hardware limit.
+ */
+
+ if (__builtin_constant_p(wqe_size))
+ BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
+ else
+ WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
+
+ return wqe_size * 2 - 1;
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index f049e0ac308a..c9d308e91965 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -31,7 +31,7 @@
*/
#include <linux/bpf_trace.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include "en/xdp.h"
#include "en/params.h"
@@ -64,14 +64,14 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return false;
xdptxd.data = xdpf->data;
xdptxd.len = xdpf->len;
- if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) {
+ if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
/* The xdp_buff was in the UMEM and was copied into a newly
* allocated page. The UMEM page was returned via the ZCA, and
* this new page has to be mapped at this point and has to be
@@ -97,10 +97,10 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdpi.frame.xdpf = xdpf;
xdpi.frame.dma_addr = dma_addr;
} else {
- /* Driver assumes that convert_to_xdp_frame returns an xdp_frame
- * that points to the same memory region as the original
- * xdp_buff. It allows to map the memory only once and to use
- * the DMA_BIDIRECTIONAL mode.
+ /* Driver assumes that xdp_convert_buff_to_frame returns
+ * an xdp_frame that points to the same memory region as
+ * the original xdp_buff. It allows to map the memory only
+ * once and to use the DMA_BIDIRECTIONAL mode.
*/
xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
@@ -119,49 +119,33 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
/* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len, bool xsk)
+ u32 *len, struct xdp_buff *xdp)
{
struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
- struct xdp_umem *umem = rq->umem;
- struct xdp_buff xdp;
u32 act;
int err;
if (!prog)
return false;
- xdp.data = va + *rx_headroom;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.data_hard_start = va;
- if (xsk)
- xdp.handle = di->xsk.handle;
- xdp.rxq = &rq->xdp_rxq;
-
- act = bpf_prog_run_xdp(prog, &xdp);
- if (xsk) {
- u64 off = xdp.data - xdp.data_hard_start;
-
- xdp.handle = xsk_umem_adjust_offset(umem, xdp.handle, off);
- }
+ act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
- *rx_headroom = xdp.data - xdp.data_hard_start;
- *len = xdp.data_end - xdp.data;
+ *len = xdp->data_end - xdp->data;
return false;
case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, &xdp)))
+ if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp)))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true;
case XDP_REDIRECT:
/* When XDP enabled then page-refcnt==1 here */
- err = xdp_do_redirect(rq->netdev, &xdp, prog);
+ err = xdp_do_redirect(rq->netdev, xdp, prog);
if (unlikely(err))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
- if (!xsk)
+ if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
mlx5e_page_dma_unmap(rq, di);
rq->stats->xdp_redirect++;
return true;
@@ -178,20 +162,43 @@ xdp_abort:
}
}
-static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
+static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
- struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi, contig_wqebbs;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs < size)) {
+ struct mlx5e_xdp_wqe_info *wi, *edge_wi;
+
+ wi = &sq->db.wqe_info[pi];
+ edge_wi = wi + contig_wqebbs;
+
+ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
+ for (; wi < edge_wi; wi++) {
+ *wi = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = 1,
+ .num_pkts = 0,
+ };
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+ sq->stats->nops += contig_wqebbs;
- if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS))
- mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ }
+
+ return pi;
+}
- session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi);
+static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
+ u16 pi;
+
+ pi = mlx5e_xdpsq_get_next_pi(sq, MLX5_SEND_WQE_MAX_WQEBBS);
+ session->wqe = MLX5E_TX_FETCH_WQE(sq, pi);
prefetchw(session->wqe->data);
session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
@@ -233,8 +240,10 @@ enum {
static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
+ const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- MLX5E_XDPSQ_STOP_ROOM))) {
+ stop_room))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -408,22 +417,15 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
i = 0;
do {
- u16 wqe_counter;
+ struct mlx5e_xdp_wqe_info *wi;
+ u16 wqe_counter, ci;
bool last_wqe;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ))
- netdev_WARN_ONCE(sq->channel->netdev,
- "Bad OP in XDPSQ CQE: 0x%x\n",
- get_cqe_opcode(cqe));
-
do {
- struct mlx5e_xdp_wqe_info *wi;
- u16 ci;
-
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
@@ -432,6 +434,15 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
} while (!last_wqe);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ netdev_WARN_ONCE(sq->channel->netdev,
+ "Bad OP in XDPSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
+ (struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
+ }
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
if (xsk_frames)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index d7587f40ecae..ca48c293151b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -40,8 +40,6 @@
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
-#define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM)
-
#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
@@ -63,7 +61,7 @@
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len, bool xsk);
+ u32 *len, struct xdp_buff *xdp);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
@@ -137,22 +135,10 @@ mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
}
-static inline void
-mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
-
- edge_wi = wi + nnops;
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- wi->num_wqebbs = 1;
- wi->num_pkts = 0;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
-
- sq->stats->nops += nnops;
-}
+struct mlx5e_xdp_wqe_info {
+ u8 num_wqebbs;
+ u8 num_pkts;
+};
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
@@ -186,19 +172,6 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
session->ds_count++;
}
-static inline struct mlx5e_tx_wqe *
-mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *wqe;
-
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
- memset(wqe, 0, sizeof(*wqe));
-
- return wqe;
-}
-
static inline void
mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
struct mlx5e_xdp_info *xi)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 62fc8a128a8d..a33a1f762c70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -3,71 +3,10 @@
#include "rx.h"
#include "en/xdp.h"
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
/* RX data path */
-bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count)
-{
- /* Check in advance that we have enough frames, instead of allocating
- * one-by-one, failing and moving frames to the Reuse Ring.
- */
- return xsk_umem_has_addrs_rq(rq->umem, count);
-}
-
-int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- struct xdp_umem *umem = rq->umem;
- u64 handle;
-
- if (!xsk_umem_peek_addr_rq(umem, &handle))
- return -ENOMEM;
-
- dma_info->xsk.handle = xsk_umem_adjust_offset(umem, handle,
- rq->buff.umem_headroom);
- dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle);
-
- /* No need to add headroom to the DMA address. In striding RQ case, we
- * just provide pages for UMR, and headroom is counted at the setup
- * stage when creating a WQE. In non-striding RQ case, headroom is
- * accounted in mlx5e_alloc_rx_wqe.
- */
- dma_info->addr = xdp_umem_get_dma(umem, handle);
-
- xsk_umem_release_addr_rq(umem);
-
- dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
- return 0;
-}
-
-static inline void mlx5e_xsk_recycle_frame(struct mlx5e_rq *rq, u64 handle)
-{
- xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask);
-}
-
-/* XSKRQ uses pages from UMEM, they must not be released. They are returned to
- * the userspace if possible, and if not, this function is called to reuse them
- * in the driver.
- */
-void mlx5e_xsk_page_release(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- mlx5e_xsk_recycle_frame(rq, dma_info->xsk.handle);
-}
-
-/* Return a frame back to the hardware to fill in again. It is used by XDP when
- * the XDP program returns XDP_TX or XDP_REDIRECT not to an XSKMAP.
- */
-void mlx5e_xsk_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
-{
- struct mlx5e_rq *rq = container_of(zca, struct mlx5e_rq, zca);
-
- mlx5e_xsk_recycle_frame(rq, handle);
-}
-
static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
u32 cqe_bcnt)
{
@@ -90,11 +29,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
- struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
- u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom;
+ struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
u32 cqe_bcnt32 = cqe_bcnt;
- void *va, *data;
- u32 frag_size;
bool consumed;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -103,22 +39,20 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
return NULL;
}
- /* head_offset is not used in this function, because di->xsk.data and
- * di->addr point directly to the necessary place. Furthermore, in the
- * current implementation, UMR pages are mapped to XSK frames, so
+ /* head_offset is not used in this function, because xdp->data and the
+ * DMA address point directly to the necessary place. Furthermore, in
+ * the current implementation, UMR pages are mapped to XSK frames, so
* head_offset should always be 0.
*/
WARN_ON_ONCE(head_offset);
- va = di->xsk.data;
- data = va + rx_headroom;
- frag_size = rq->buff.headroom + cqe_bcnt32;
-
- dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL);
- prefetch(data);
+ xdp->data_end = xdp->data + cqe_bcnt32;
+ xdp_set_data_meta_invalid(xdp);
+ xsk_buff_dma_sync_for_cpu(xdp);
+ prefetch(xdp->data);
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, true);
+ consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp);
rcu_read_unlock();
/* Possible flows:
@@ -145,7 +79,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt32);
+ return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
@@ -153,25 +87,20 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
- struct mlx5e_dma_info *di = wi->di;
- u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom;
- void *va, *data;
+ struct xdp_buff *xdp = wi->di->xsk;
bool consumed;
- u32 frag_size;
- /* wi->offset is not used in this function, because di->xsk.data and
- * di->addr point directly to the necessary place. Furthermore, in the
- * current implementation, one page = one packet = one frame, so
+ /* wi->offset is not used in this function, because xdp->data and the
+ * DMA address point directly to the necessary place. Furthermore, the
+ * XSK allocator allocates frames per packet, instead of pages, so
* wi->offset should always be 0.
*/
WARN_ON_ONCE(wi->offset);
- va = di->xsk.data;
- data = va + rx_headroom;
- frag_size = rq->buff.headroom + cqe_bcnt;
-
- dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL);
- prefetch(data);
+ xdp->data_end = xdp->data + cqe_bcnt;
+ xdp_set_data_meta_invalid(xdp);
+ xsk_buff_dma_sync_for_cpu(xdp);
+ prefetch(xdp->data);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
@@ -179,7 +108,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
}
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, true);
+ consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp);
rcu_read_unlock();
if (likely(consumed))
@@ -189,5 +118,5 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
* will be handled by mlx5e_put_rx_frag.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt);
+ return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index cab0e93497ae..d147b2f13b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -5,16 +5,10 @@
#define __MLX5_EN_XSK_RX_H__
#include "en.h"
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
/* RX data path */
-bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count);
-int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info);
-void mlx5e_xsk_page_release(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info);
-void mlx5e_xsk_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
u16 cqe_bcnt,
@@ -25,6 +19,23 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
+static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ dma_info->xsk = xsk_buff_alloc(rq->umem);
+ if (!dma_info->xsk)
+ return -ENOMEM;
+
+ /* Store the DMA address without headroom. In striding RQ case, we just
+ * provide pages for UMR, and headroom is counted at the setup stage
+ * when creating a WQE. In non-striding RQ case, headroom is accounted
+ * in mlx5e_alloc_rx_wqe.
+ */
+ dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
+
+ return 0;
+}
+
static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
{
if (!xsk_umem_uses_need_wakeup(rq->umem))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 3bcdb5b2fc20..83dce9cdb8c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -5,7 +5,7 @@
#include "umem.h"
#include "en/xdp.h"
#include "en/params.h"
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
@@ -92,12 +92,11 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break;
}
- xdptxd.dma_addr = xdp_umem_get_dma(umem, desc.addr);
- xdptxd.data = xdp_umem_get_data(umem, desc.addr);
+ xdptxd.dma_addr = xsk_buff_raw_get_dma(umem, desc.addr);
+ xdptxd.data = xsk_buff_raw_get_data(umem, desc.addr);
xdptxd.len = desc.len;
- dma_sync_single_for_device(sq->pdev, xdptxd.dma_addr,
- xdptxd.len, DMA_BIDIRECTIONAL);
+ xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len);
if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) {
if (sq->mpwqe.wqe)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
index 79b487d89757..39fa0a705856 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -5,7 +5,7 @@
#define __MLX5_EN_XSK_TX_H__
#include "en.h"
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
/* TX data path */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
index 4baaa5788320..7b17fcd0a56d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include "umem.h"
#include "setup.h"
#include "en/params.h"
@@ -10,40 +10,14 @@ static int mlx5e_xsk_map_umem(struct mlx5e_priv *priv,
struct xdp_umem *umem)
{
struct device *dev = priv->mdev->device;
- u32 i;
- for (i = 0; i < umem->npgs; i++) {
- dma_addr_t dma = dma_map_page(dev, umem->pgs[i], 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(dev, dma)))
- goto err_unmap;
- umem->pages[i].dma = dma;
- }
-
- return 0;
-
-err_unmap:
- while (i--) {
- dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- umem->pages[i].dma = 0;
- }
-
- return -ENOMEM;
+ return xsk_buff_dma_map(umem, dev, 0);
}
static void mlx5e_xsk_unmap_umem(struct mlx5e_priv *priv,
struct xdp_umem *umem)
{
- struct device *dev = priv->mdev->device;
- u32 i;
-
- for (i = 0; i < umem->npgs; i++) {
- dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- umem->pages[i].dma = 0;
- }
+ return xsk_buff_dma_unmap(umem, 0);
}
static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
@@ -90,13 +64,14 @@ static void mlx5e_xsk_remove_umem(struct mlx5e_xsk *xsk, u16 ix)
static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem)
{
- return umem->headroom <= 0xffff && umem->chunk_size_nohr <= 0xffff;
+ return xsk_umem_get_headroom(umem) <= 0xffff &&
+ xsk_umem_get_chunk_size(umem) <= 0xffff;
}
void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk)
{
- xsk->headroom = umem->headroom;
- xsk->chunk_size = umem->chunk_size_nohr + umem->headroom;
+ xsk->headroom = xsk_umem_get_headroom(umem);
+ xsk->chunk_size = xsk_umem_get_chunk_size(umem);
}
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
@@ -241,18 +216,6 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
mlx5e_xsk_disable_umem(priv, ix);
}
-int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries)
-{
- struct xdp_umem_fq_reuse *reuseq;
-
- reuseq = xsk_reuseq_prepare(nentries);
- if (unlikely(!reuseq))
- return -ENOMEM;
- xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
- return 0;
-}
-
u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk)
{
u16 res = xsk->refcnt ? params->num_channels : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 3022463f2284..fac145dcf2ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -42,6 +42,8 @@
#include "en/txrx.h"
#if IS_ENABLED(CONFIG_GENEVE)
+#include <net/geneve.h>
+
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
{
return mlx5_tx_swp_supported(mdev);
@@ -100,33 +102,49 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
udp_hdr(skb)->len = htons(payload_len);
}
-static inline struct sk_buff *
-mlx5e_accel_handle_tx(struct sk_buff *skb,
- struct mlx5e_txqsq *sq,
- struct net_device *dev,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+struct mlx5e_accel_tx_state {
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5e_accel_tx_tls_state tls;
+#endif
+};
+
+static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *state)
{
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ mlx5e_udp_gso_handle_tx_skb(skb);
+
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
- skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
- if (unlikely(!skb))
- return NULL;
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
+ return false;
}
#endif
+ return true;
+}
+
+static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
- skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
- if (unlikely(!skb))
- return NULL;
+ if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
+ return false;
}
#endif
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- mlx5e_udp_gso_handle_tx_skb(skb);
-
- return skb;
+ return true;
}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 29626c6c9c25..92eb3bad4acd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -75,18 +75,23 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
return ret;
}
-static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
+static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry,
+ unsigned int handle)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5e_ipsec_sa_entry *_sa_entry;
unsigned long flags;
- int ret;
- ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
- if (ret < 0)
- return ret;
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
+ if (_sa_entry->handle == handle) {
+ rcu_read_unlock();
+ return -EEXIST;
+ }
+ rcu_read_unlock();
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
- sa_entry->handle = ret;
+ sa_entry->handle = handle;
hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
@@ -103,15 +108,6 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
}
-static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
-{
- struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-
- /* xfrm already doing sync rcu between del and free callbacks */
-
- ida_simple_remove(&ipsec->halloc, sa_entry->handle);
-}
-
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct xfrm_replay_state_esn *replay_esn;
@@ -199,6 +195,14 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
+
+ /* spi */
+ attrs->spi = x->id.spi;
+
+ /* source , destination ips */
+ memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
+ memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
+ attrs->is_ipv6 = (x->props.family != AF_INET);
}
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -284,8 +288,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
struct net_device *netdev = x->xso.dev;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv;
- __be32 saddr[4] = {0}, daddr[4] = {0}, spi;
- bool is_ipv6 = false;
+ unsigned int sa_handle;
int err;
priv = netdev_priv(netdev);
@@ -303,20 +306,6 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
sa_entry->x = x;
sa_entry->ipsec = priv->ipsec;
- /* Add the SA to handle processed incoming packets before the add SA
- * completion was received
- */
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
- err = mlx5e_ipsec_sadb_rx_add(sa_entry);
- if (err) {
- netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
- goto err_entry;
- }
- } else {
- sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
- mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
- }
-
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -327,41 +316,38 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
- goto err_sadb_rx;
+ goto err_sa_entry;
}
/* create hw context */
- if (x->props.family == AF_INET) {
- saddr[3] = x->props.saddr.a4;
- daddr[3] = x->id.daddr.a4;
- } else {
- memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
- memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
- is_ipv6 = true;
- }
- spi = x->id.spi;
sa_entry->hw_context =
mlx5_accel_esp_create_hw_context(priv->mdev,
sa_entry->xfrm,
- saddr, daddr, spi,
- is_ipv6);
+ &sa_handle);
if (IS_ERR(sa_entry->hw_context)) {
err = PTR_ERR(sa_entry->hw_context);
goto err_xfrm;
}
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
+ if (err)
+ goto err_hw_ctx;
+ } else {
+ sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
+ mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
+ }
+
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
+err_hw_ctx:
+ mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
err_xfrm:
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
-err_sadb_rx:
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
- mlx5e_ipsec_sadb_rx_del(sa_entry);
- mlx5e_ipsec_sadb_rx_free(sa_entry);
- }
-err_entry:
+err_sa_entry:
kfree(sa_entry);
+
out:
return err;
}
@@ -390,9 +376,6 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
}
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
- mlx5e_ipsec_sadb_rx_free(sa_entry);
-
kfree(sa_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 93bf10e6508c..c85151a1e008 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -109,11 +109,6 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
-int mlx5e_ipsec_get_count(struct mlx5e_priv *priv);
-int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv);
-int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data);
-
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
@@ -136,26 +131,6 @@ static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
{
}
-static inline int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
-{
- return 0;
-}
-
-static inline int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv,
- uint8_t *data)
-{
- return 0;
-}
-
-static inline void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
-{
-}
-
-static inline int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
-{
- return 0;
-}
-
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 0dd17514caae..824b87ac8f9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -233,11 +233,10 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq));
}
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_tx_wqe *wqe,
- struct sk_buff *skb)
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
+ struct mlx5_wqe_eth_seg *eseg,
+ struct sk_buff *skb)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry;
@@ -245,7 +244,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct sec_path *sp;
if (!xo)
- return skb;
+ return true;
sp = skb_sec_path(skb);
if (unlikely(sp->len != 1)) {
@@ -276,16 +275,16 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop;
}
- mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
+ mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo);
- return skb;
+ return true;
drop:
kfree_skb(skb);
- return NULL;
+ return false;
}
static inline struct xfrm_state *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index db84500b024f..ba02643586a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -52,9 +52,9 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_tx_wqe *wqe,
- struct sk_buff *skb);
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
+ struct mlx5_wqe_eth_seg *eseg,
+ struct sk_buff *skb);
#endif /* CONFIG_MLX5_EN_IPSEC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 6fea59223dc4..6c5c54bcd9be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -38,6 +38,7 @@
#include "accel/ipsec.h"
#include "fpga/sdk.h"
#include "en_accel/ipsec.h"
+#include "fpga/ipsec.h"
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
@@ -73,61 +74,74 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
-#define NUM_IPSEC_COUNTERS (NUM_IPSEC_HW_COUNTERS + NUM_IPSEC_SW_COUNTERS)
-
-int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
- if (!priv->ipsec)
- return 0;
-
- return NUM_IPSEC_COUNTERS;
+ return NUM_IPSEC_SW_COUNTERS;
}
-int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data)
-{
- unsigned int i, idx = 0;
+static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
- if (!priv->ipsec)
- return 0;
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_sw)
+{
+ unsigned int i;
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
+ if (priv->ipsec)
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_sw_stats_desc[i].format);
+ return idx;
+}
- for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_sw_stats_desc[i].format);
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
+{
+ int i;
- return NUM_IPSEC_COUNTERS;
+ if (priv->ipsec)
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
+ mlx5e_ipsec_sw_stats_desc, i);
+ return idx;
}
-void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
- int ret;
+ return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+}
- if (!priv->ipsec)
- return;
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
+{
+ int ret = 0;
- ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
- NUM_IPSEC_HW_COUNTERS);
+ if (priv->ipsec)
+ ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
+ NUM_IPSEC_HW_COUNTERS);
if (ret)
memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
}
-int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
{
- int i, idx = 0;
-
- if (!priv->ipsec)
- return 0;
+ unsigned int i;
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
- mlx5e_ipsec_hw_stats_desc, i);
+ if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_hw_stats_desc[i].format);
- for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
- mlx5e_ipsec_sw_stats_desc, i);
+ return idx;
+}
- return NUM_IPSEC_COUNTERS;
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
+{
+ int i;
+
+ if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
+ mlx5e_ipsec_hw_stats_desc,
+ i);
+ return idx;
}
+
+MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
+MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 46725cd743a3..452fcf59c36b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -4,6 +4,19 @@
#include "en.h"
#include "en_accel/ktls.h"
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq)
+{
+ u16 num_dumps, stop_room = 0;
+
+ num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
+
+ stop_room += mlx5e_stop_room_for_wqe(MLX5E_KTLS_STATIC_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(MLX5E_KTLS_PROGRESS_WQEBBS);
+ stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
+
+ return stop_room;
+}
+
static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
@@ -69,8 +82,8 @@ static void mlx5e_ktls_del(struct net_device *netdev,
struct mlx5e_ktls_offload_context_tx *tx_priv =
mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
- mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
+ mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
kvfree(tx_priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 63116be6b1d6..c6180892cfcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -9,6 +9,7 @@
#ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h>
#include "accel/tls.h"
+#include "en_accel/tls_rxtx.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
(offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
@@ -27,6 +28,14 @@ struct mlx5e_dump_wqe {
struct mlx5_wqe_data_seg data;
};
+#define MLX5E_TLS_FETCH_UMR_WQE(sq, pi) \
+ ((struct mlx5e_umr_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_STATIC_UMR_WQE_SZ))
+#define MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi) \
+ ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_PROGRESS_WQE_SZ))
+#define MLX5E_TLS_FETCH_DUMP_WQE(sq, pi) \
+ ((struct mlx5e_dump_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, \
+ sizeof(struct mlx5e_dump_wqe)))
+
#define MLX5E_KTLS_DUMP_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
@@ -87,22 +96,22 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe, u16 *pi);
+bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq);
+
static inline u8
-mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
- unsigned int sync_len)
+mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags,
+ unsigned int sync_len)
{
/* Given the MTU and sync_len, calculates an upper bound for the
- * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
+ * number of DUMP WQEs needed for the TX resync of a record.
*/
- return MLX5E_KTLS_DUMP_WQEBBS *
- (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
+ return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu);
}
#else
@@ -114,7 +123,6 @@ static inline void
mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc) {}
-
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 52a56622034a..3cd78d9503c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -108,10 +108,11 @@ static void tx_fill_wi(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = num_wqebbs;
- wi->num_bytes = num_bytes;
- wi->resync_dump_frag_page = page;
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_bytes = num_bytes,
+ .resync_dump_frag_page = page,
+ };
}
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -134,13 +135,14 @@ post_static_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
+ u16 pi, num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS;
struct mlx5e_umr_wqe *umr_wqe;
- u16 pi;
- umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
+ umr_wqe = MLX5E_TLS_FETCH_UMR_WQE(sq, pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
- sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
+ tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
+ sq->pc += num_wqebbs;
}
static void
@@ -148,13 +150,14 @@ post_progress_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
+ u16 pi, num_wqebbs = MLX5E_KTLS_PROGRESS_WQEBBS;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
- wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
+ wqe = MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
- sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
+ tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
+ sq->pc += num_wqebbs;
}
static void
@@ -163,14 +166,6 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
bool skip_static_post, bool fence_first_post)
{
bool progress_fence = skip_static_post || !fence_first_post;
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 contig_wqebbs_room, pi;
-
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room <
- MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
if (!skip_static_post)
post_static_params(sq, priv_tx, fence_first_post);
@@ -278,7 +273,9 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
int fsz;
u16 pi;
- wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+ BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
@@ -343,11 +340,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
u32 seq)
{
struct mlx5e_sq_stats *stats = sq->stats;
- struct mlx5_wq_cyc *wq = &sq->wq;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
- u16 contig_wqebbs_room, pi;
- u8 num_wqebbs;
int i = 0;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
@@ -376,13 +370,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
return MLX5E_KTLS_SYNC_DONE;
}
- num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
-
- if (unlikely(contig_wqebbs_room < num_wqebbs))
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
-
for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
@@ -422,34 +409,18 @@ err_out:
return MLX5E_KTLS_SYNC_FAIL;
}
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe, u16 *pi)
+bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
- struct mlx5_wqe_ctrl_seg *cseg;
- struct tls_context *tls_ctx;
- int datalen;
u32 seq;
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
-
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
- if (!datalen)
- goto out;
-
- tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
- goto err_out;
-
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
stats->tls_ctx++;
}
@@ -460,30 +431,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
/* fall-through */
- default: /* MLX5E_KTLS_SYNC_FAIL */
+ case MLX5E_KTLS_SYNC_FAIL:
goto err_out;
}
}
priv_tx->expected_seq = seq + datalen;
- cseg = &(*wqe)->ctrl;
- cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
+ state->tls_tisn = priv_tx->tisn;
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen;
out:
- return skb;
+ return true;
err_out:
dev_kfree_skb_any(skb);
- return NULL;
+ return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index fba561ffe1d4..1fbb5a90cb38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -167,7 +167,7 @@ static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_rx *rx_ctx;
- u64 rcd_sn = *(u64 *)rcd_sn_data;
+ __be64 rcd_sn = *(__be64 *)rcd_sn_data;
if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
return -EINVAL;
@@ -240,3 +240,17 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
kfree(tls);
priv->tls = NULL;
}
+
+u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+
+ if (!mlx5_accel_is_tls_device(mdev))
+ return 0;
+
+ if (MLX5_CAP_GEN(mdev, tls_tx))
+ return mlx5e_ktls_get_stop_room(sq);
+
+ /* Resync SKB. */
+ return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index 9015f3f7792d..9219bdb2786e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -94,6 +94,8 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv);
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
+u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq);
+
#else
static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
@@ -108,6 +110,11 @@ static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
+static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+{
+ return 0;
+}
+
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index ef1ed15a53b4..05454a843b28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -184,18 +184,17 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
nskb->queue_mapping = skb->queue_mapping;
}
-static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
- struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi,
- struct mlx5e_tls *tls)
+static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
+ struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ struct mlx5e_tx_wqe *wqe;
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
+ u16 pi;
int i;
sq->stats->tls_ooo++;
@@ -217,7 +216,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
if (likely(payload <= -info.sync_len))
/* SKB payload doesn't require offload
*/
- return skb;
+ return true;
atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
goto err_out;
@@ -247,20 +246,19 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
- return skb;
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ mlx5e_sq_xmit(sq, nskb, wqe, pi, true);
+
+ return true;
err_out:
dev_kfree_skb_any(skb);
- return NULL;
+ return false;
}
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_tx *context;
@@ -269,41 +267,45 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen;
u32 skb_seq;
- if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
- skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
- goto out;
- }
-
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
+ return true;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
- goto out;
+ return true;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != netdev))
- goto out;
+ if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ goto err_out;
+
+ if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
+ return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq;
- if (unlikely(expected_seq != skb_seq)) {
- skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
- goto out;
- }
+ if (unlikely(expected_seq != skb_seq))
+ return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
dev_kfree_skb_any(skb);
- skb = NULL;
- goto out;
+ return false;
}
context->expected_seq = skb_seq + datalen;
-out:
- return skb;
+ return true;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tisn = cpu_to_be32(state->tls_tisn << 8);
}
static int tls_update_resync_sn(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 90bc1f2384c8..a50d0394df0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -40,11 +40,14 @@
#include "en.h"
#include "en/txrx.h"
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi);
+struct mlx5e_accel_tx_tls_state {
+ u32 tls_tisn;
+};
+
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
u32 *cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index f7890e0ce96c..1e42c7ae621b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -36,12 +36,11 @@
* Global resources are common to all the netdevices crated on the same nic.
*/
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir, u32 *in, int inlen)
+int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
{
int err;
- err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
+ err = mlx5_core_create_tir(mdev, in, &tir->tirn);
if (err)
return err;
@@ -142,10 +141,12 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
memset(res, 0, sizeof(*res));
}
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
+ bool enable_mc_lb)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
+ u8 lb_flags = 0;
int err = 0;
u32 tirn = 0;
int inlen;
@@ -159,15 +160,20 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
}
if (enable_uc_lb)
- MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
+ lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+ if (enable_mc_lb)
+ lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ if (lb_flags)
+ MLX5_SET(modify_tir_in, in, ctx.self_lb_block, lb_flags);
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
tirn = tir->tirn;
- err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+ err = mlx5_core_modify_tir(mdev, tirn, in);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 47874d34156b..bc102d094bbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -35,6 +35,8 @@
#include "en/port.h"
#include "en/port_buffer.h"
+#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
+
#define MLX5E_100MB (100000)
#define MLX5E_1GB (1000000)
@@ -49,6 +51,12 @@ enum {
MLX5E_LOWEST_PRIO_GROUP = 0,
};
+enum {
+ MLX5_DCB_CHG_RESET,
+ MLX5_DCB_NO_CHG,
+ MLX5_DCB_CHG_NO_RESET,
+};
+
#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
MLX5_CAP_QCAM_REG(mdev, qpts) && \
MLX5_CAP_QCAM_REG(mdev, qpdpm))
@@ -238,7 +246,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
* Report both group #0 and #1 as ETS type.
* All the tcs in group #0 will be reported with 0% BW.
*/
-int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
+static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
@@ -977,7 +985,7 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
return err;
}
-const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
+static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_getets = mlx5e_dcbnl_ieee_getets,
.ieee_setets = mlx5e_dcbnl_ieee_setets,
.ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
@@ -1009,6 +1017,24 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.setpfcstate = mlx5e_dcbnl_setpfcstate,
};
+void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
+ netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
+}
+
+void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (MLX5_CAP_GEN(mdev, qos))
+ netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
+}
+
static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
enum mlx5_dcbx_oper_mode *mode)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 6d703ddee4e2..3ef2525e8de9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -432,7 +432,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
*cur_params = new_channels.params;
- mlx5e_num_channels_changed(priv);
+ err = mlx5e_num_channels_changed(priv);
goto out;
}
@@ -527,8 +527,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
+ bool reset_rx, reset_tx;
int err = 0;
- bool reset;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -EOPNOTSUPP;
@@ -566,15 +566,28 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) ||
- (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled);
+ reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
+ reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
- if (!reset) {
+ if (!reset_rx && !reset_tx) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
goto out;
}
+ if (reset_rx) {
+ u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ MLX5E_PFLAG_RX_CQE_BASED_MODER);
+
+ mlx5e_reset_rx_moderation(&new_channels.params, mode);
+ }
+ if (reset_tx) {
+ u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ MLX5E_PFLAG_TX_CQE_BASED_MODER);
+
+ mlx5e_reset_tx_moderation(&new_channels.params, mode);
+ }
+
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
out:
@@ -665,11 +678,12 @@ static const u32 pplm_fec_2_ethtool_linkmodes[] = {
static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- u_long active_fec = 0;
+ unsigned long active_fec_long;
+ u32 active_fec;
u32 bitn;
int err;
- err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL);
+ err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
if (err)
return (err == -EOPNOTSUPP) ? 0 : err;
@@ -682,10 +696,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1,
ETHTOOL_LINK_MODE_FEC_LLRS_BIT);
+ active_fec_long = active_fec;
/* active fec is a bit set, find out which bit is set and
* advertise the corresponding ethtool bit
*/
- bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE);
+ bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE);
if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes))
__set_bit(pplm_fec_2_ethtool_linkmodes[bitn],
link_ksettings->link_modes.advertising);
@@ -1204,7 +1219,7 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
}
if (hash_changed)
- mlx5e_modify_tirs_hash(priv, in, inlen);
+ mlx5e_modify_tirs_hash(priv, in);
mutex_unlock(&priv->state_lock);
@@ -1517,8 +1532,8 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u16 fec_configured = 0;
- u32 fec_active = 0;
+ u16 fec_configured;
+ u32 fec_active;
int err;
err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
@@ -1526,14 +1541,14 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
if (err)
return err;
- fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active,
- sizeof(u32) * BITS_PER_BYTE);
+ fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active,
+ sizeof(unsigned long) * BITS_PER_BYTE);
if (!fecparam->active_fec)
return -EOPNOTSUPP;
- fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
- sizeof(u16) * BITS_PER_BYTE);
+ fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured,
+ sizeof(unsigned long) * BITS_PER_BYTE);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3bc2ac3d53fc..83c9b2bbc4af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -858,7 +858,7 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
goto out;
priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
- mlx5e_modify_tirs_hash(priv, in, inlen);
+ mlx5e_modify_tirs_hash(priv, in);
out:
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b314adf438da..a836a02a2116 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -38,7 +38,7 @@
#include <linux/bpf.h>
#include <linux/if_bridge.h>
#include <net/page_pool.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include "eswitch.h"
#include "en.h"
#include "en/txrx.h"
@@ -66,7 +66,6 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
-
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -233,7 +232,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- cseg->imm = rq->mkey_be;
+ cseg->umr_mkey = rq->mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
ucseg->xlt_octowords =
@@ -374,7 +373,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = c->mdev;
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
- u32 num_xsk_frames = 0;
u32 rq_xdp_ix;
u32 pool_size;
int wq_sz;
@@ -414,7 +412,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
- rq->buff.umem_headroom = xsk ? xsk->headroom : 0;
pool_size = 1 << params->log_rq_mtu_frames;
switch (rq->wq_type) {
@@ -428,10 +425,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- if (xsk)
- num_xsk_frames = wq_sz <<
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
-
pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
mlx5e_mpwqe_get_log_rq_size(params, xsk);
@@ -462,6 +455,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
+
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_wq_destroy;
@@ -481,10 +476,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
- if (xsk)
- num_xsk_frames = wq_sz << rq->wqe.info.log_num_frags;
-
rq->wqe.info = rqp->frags_info;
+ rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
+
rq->wqe.frags =
kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
(wq_sz << rq->wqe.info.log_num_frags)),
@@ -522,17 +516,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
if (xsk) {
- err = mlx5e_xsk_resize_reuseq(umem, num_xsk_frames);
- if (unlikely(err)) {
- mlx5_core_err(mdev, "Unable to allocate the Reuse Ring for %u frames\n",
- num_xsk_frames);
- goto err_free;
- }
-
- rq->zca.free = mlx5e_xsk_zca_free;
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &rq->zca);
+ MEM_TYPE_XSK_BUFF_POOL, NULL);
+ xsk_buff_set_rxq_info(rq->umem, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
pp_params.order = 0;
@@ -721,7 +707,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
- err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
@@ -752,7 +738,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
MLX5_SET(rqc, rqc, scatter_fcs, enable);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
- err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
@@ -781,7 +767,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
MLX5_SET(rqc, rqc, vsd, vsd);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
- err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
@@ -1027,17 +1013,17 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
- kvfree(sq->db.ico_wqe);
+ kvfree(sq->db.wqe_info);
}
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ size_t size;
- sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
- sizeof(*sq->db.ico_wqe)),
- GFP_KERNEL, numa);
- if (!sq->db.ico_wqe)
+ size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
+ sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
+ if (!sq->db.wqe_info)
return -ENOMEM;
return 0;
@@ -1116,6 +1102,22 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
return 0;
}
+static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
+{
+ int sq_size = 1 << log_sq_size;
+
+ sq->stop_room = mlx5e_tls_get_stop_room(sq);
+ sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+
+ if (WARN_ON(sq->stop_room >= sq_size)) {
+ netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
+ sq->stop_room, sq_size);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
@@ -1140,20 +1142,16 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
- sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
-#ifdef CONFIG_MLX5_EN_TLS
- if (mlx5_accel_is_tls_device(c->priv->mdev)) {
+ if (mlx5_accel_is_tls_device(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
- sq->stop_room += MLX5E_SQ_TLS_ROOM +
- mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
- TLS_MAX_PAYLOAD_SIZE);
- }
-#endif
+ err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
+ if (err)
+ return err;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1259,7 +1257,7 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
}
- err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
+ err = mlx5_core_modify_sq(mdev, sqn, in);
kvfree(in);
@@ -1364,13 +1362,12 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
/* last doorbell out, godspeed .. */
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe_info *wi;
struct mlx5e_tx_wqe *nop;
- wi = &sq->db.wqe_info[pi];
+ sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = 1;
nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
}
@@ -1482,20 +1479,21 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
/* Pre initialize fixed WQE fields */
for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
+ sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = 1,
+ .num_pkts = 1,
+ };
+
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
dseg->lkey = sq->mkey_be;
-
- wi->num_wqebbs = 1;
- wi->num_pkts = 1;
}
}
@@ -2698,7 +2696,7 @@ static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
ttconfig->rx_hash_fields = rx_hash_fields;
}
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in)
{
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -2714,10 +2712,11 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5e_update_rx_hash_fields(&ttconfig, tt,
rss->rx_hash_fields[tt]);
mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
- mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
}
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ /* Verify inner tirs resources allocated */
+ if (!priv->inner_indir_tir[0].tirn)
return;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
@@ -2725,8 +2724,7 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5e_update_rx_hash_fields(&ttconfig, tt,
rss->rx_hash_fields[tt]);
mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
- mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
- inlen);
+ mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
}
}
@@ -2752,15 +2750,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
- inlen);
+ err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
if (err)
goto free_in;
}
for (ix = 0; ix < priv->max_nch; ix++) {
- err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
- in, inlen);
+ err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in);
if (err)
goto free_in;
}
@@ -2839,11 +2835,8 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
ETH_MAX_MTU);
}
-static void mlx5e_netdev_set_tcs(struct net_device *netdev)
+static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- int nch = priv->channels.params.num_channels;
- int ntc = priv->channels.params.num_tc;
int tc;
netdev_reset_tc(netdev);
@@ -2860,15 +2853,47 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
-static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv, u16 count)
+static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
- int num_txqs = count * priv->channels.params.num_tc;
- int num_rxqs = count * priv->profile->rq_groups;
struct net_device *netdev = priv->netdev;
+ int num_txqs, num_rxqs, nch, ntc;
+ int old_num_txqs, old_ntc;
+ int err;
+
+ old_num_txqs = netdev->real_num_tx_queues;
+ old_ntc = netdev->num_tc;
+
+ nch = priv->channels.params.num_channels;
+ ntc = priv->channels.params.num_tc;
+ num_txqs = nch * ntc;
+ num_rxqs = nch * priv->profile->rq_groups;
+
+ mlx5e_netdev_set_tcs(netdev, nch, ntc);
+
+ err = netif_set_real_num_tx_queues(netdev, num_txqs);
+ if (err) {
+ netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+ goto err_tcs;
+ }
+ err = netif_set_real_num_rx_queues(netdev, num_rxqs);
+ if (err) {
+ netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
+ goto err_txqs;
+ }
+
+ return 0;
- mlx5e_netdev_set_tcs(netdev);
- netif_set_real_num_tx_queues(netdev, num_txqs);
- netif_set_real_num_rx_queues(netdev, num_rxqs);
+err_txqs:
+ /* netif_set_real_num_rx_queues could fail only when nch increased. Only
+ * one of nch and ntc is changed in this function. That means, the call
+ * to netif_set_real_num_tx_queues below should not fail, because it
+ * decreases the number of TX queues.
+ */
+ WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
+
+err_tcs:
+ mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc);
+ return err;
}
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
@@ -2895,8 +2920,12 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
+ int err;
+
+ err = mlx5e_update_netdev_queues(priv);
+ if (err)
+ return err;
- mlx5e_update_netdev_queues(priv, count);
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
if (!netif_is_rxfh_configured(priv->netdev))
@@ -3214,7 +3243,7 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
- return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
+ return mlx5_core_create_tis(mdev, in, tisn);
}
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
@@ -3332,7 +3361,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
tir = &priv->indir_tir[tt];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_indir_tir_ctx(priv, tt, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ err = mlx5e_create_tir(priv->mdev, tir, in);
if (err) {
mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
@@ -3347,7 +3376,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
tir = &priv->inner_indir_tir[i];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ err = mlx5e_create_tir(priv->mdev, tir, in);
if (err) {
mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
@@ -3390,7 +3419,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
tir = &tirs[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ err = mlx5e_create_tir(priv->mdev, tir, in);
if (unlikely(err))
goto err_destroy_ch_tirs;
}
@@ -3408,14 +3437,15 @@ out:
return err;
}
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
{
int i;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
- if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ /* Verify inner tirs resources allocated */
+ if (!priv->inner_indir_tir[0].tirn)
return;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
@@ -3492,41 +3522,6 @@ out:
return err;
}
-#ifdef CONFIG_MLX5_ESWITCH
-static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct flow_cls_offload *cls_flower,
- unsigned long flags)
-{
- switch (cls_flower->command) {
- case FLOW_CLS_REPLACE:
- return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
- flags);
- case FLOW_CLS_DESTROY:
- return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
- flags);
- case FLOW_CLS_STATS:
- return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
- flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
- struct mlx5e_priv *priv = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-#endif
-
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -3535,7 +3530,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
struct mlx5e_priv *priv = netdev_priv(dev);
switch (type) {
-#ifdef CONFIG_MLX5_ESWITCH
case TC_SETUP_BLOCK: {
struct flow_block_offload *f = type_data;
@@ -3545,7 +3539,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
mlx5e_setup_tc_block_cb,
priv, priv, true);
}
-#endif
case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(priv, type_data);
default:
@@ -3718,7 +3711,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0;
}
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3829,7 +3822,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
#endif
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
@@ -4714,7 +4707,7 @@ static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
{
if (params->tx_dim_enabled) {
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
@@ -4723,13 +4716,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
} else {
params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
}
-
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
- params->tx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
{
if (params->rx_dim_enabled) {
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
@@ -4738,7 +4727,19 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
} else {
params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
}
+}
+
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_tx_moderation(params, cq_period_mode);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ params->tx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_rx_moderation(params, cq_period_mode);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
@@ -4879,10 +4880,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->netdev_ops = &mlx5e_netdev_ops;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
- netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
-#endif
+ mlx5e_dcbnl_build_netdev(netdev);
netdev->watchdog_timeo = 15 * HZ;
@@ -5002,29 +5000,40 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
void mlx5e_create_q_counters(struct mlx5e_priv *priv)
{
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
- if (err) {
- mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
- priv->q_counter = 0;
- }
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
+ if (!err)
+ priv->q_counter =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
- err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
- if (err) {
- mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
- priv->drop_rq_q_counter = 0;
- }
+ err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
+ if (!err)
+ priv->drop_rq_q_counter =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
- if (priv->q_counter)
- mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ if (priv->q_counter) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ priv->q_counter);
+ mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+ }
- if (priv->drop_rq_q_counter)
- mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
+ if (priv->drop_rq_q_counter) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ priv->drop_rq_q_counter);
+ mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+ }
}
static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
@@ -5123,7 +5132,7 @@ err_destroy_xsk_rqts:
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv, true);
+ mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
@@ -5142,7 +5151,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
- mlx5e_destroy_indirect_tirs(priv, true);
+ mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
@@ -5159,9 +5168,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
return err;
}
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_initialize(priv);
-#endif
return 0;
}
@@ -5188,9 +5195,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_hv_vhca_stats_create(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
-#endif
queue_work(priv->wq, &priv->set_rx_mode_work);
@@ -5205,10 +5210,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->netdev->reg_state == NETREG_REGISTERED)
mlx5e_dcbnl_delete_app(priv);
-#endif
rtnl_lock();
if (netif_running(priv->netdev))
@@ -5228,7 +5231,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
{
- return mlx5e_refresh_tirs(priv, false);
+ return mlx5e_refresh_tirs(priv, false, false);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -5363,9 +5366,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
*/
if (take_rtnl)
rtnl_lock();
- mlx5e_num_channels_changed(priv);
+ err = mlx5e_num_channels_changed(priv);
if (take_rtnl)
rtnl_unlock();
+ if (err)
+ goto out;
err = profile->init_tx(priv);
if (err)
@@ -5503,9 +5508,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
mlx5e_devlink_port_type_eth_set(priv);
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
-#endif
return priv;
err_devlink_port_unregister:
@@ -5528,9 +5531,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
}
#endif
priv = vpriv;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
-#endif
unregister_netdev(priv->netdev);
mlx5e_devlink_port_unregister(priv);
mlx5e_detach(mdev, vpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f372e94948fd..006807e04eda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -35,7 +35,6 @@
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/act_api.h>
-#include <net/netevent.h>
#include <net/arp.h>
#include <net/devlink.h>
#include <net/ipv6_stubs.h>
@@ -45,9 +44,9 @@
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
-#include "en/tc_tun.h"
+#include "en/rep/tc.h"
+#include "en/rep/neigh.h"
#include "fs_core.h"
-#include "lib/port_tun.h"
#include "lib/mlx5.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
@@ -58,16 +57,6 @@
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
-struct mlx5e_rep_indr_block_priv {
- struct net_device *netdev;
- struct mlx5e_rep_priv *rpriv;
-
- struct list_head list;
-};
-
-static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev);
-
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
@@ -485,706 +474,6 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
mlx5e_sqs2vport_stop(esw, rep);
}
-static unsigned long mlx5e_rep_ipv6_interval(void)
-{
- if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
- return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
-
- return ~0UL;
-}
-
-static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
-{
- unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
- unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
- struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
- mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
-}
-
-void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
-
- mlx5_fc_queue_stats_work(priv->mdev,
- &neigh_update->neigh_stats_work,
- neigh_update->min_interval);
-}
-
-static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
-{
- return refcount_inc_not_zero(&nhe->refcnt);
-}
-
-static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
-
-static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
-{
- if (refcount_dec_and_test(&nhe->refcnt)) {
- mlx5e_rep_neigh_entry_remove(nhe);
- kfree_rcu(nhe, rcu);
- }
-}
-
-static struct mlx5e_neigh_hash_entry *
-mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
- struct mlx5e_neigh_hash_entry *nhe)
-{
- struct mlx5e_neigh_hash_entry *next = NULL;
-
- rcu_read_lock();
-
- for (next = nhe ?
- list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
- &nhe->neigh_list,
- struct mlx5e_neigh_hash_entry,
- neigh_list) :
- list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
- struct mlx5e_neigh_hash_entry,
- neigh_list);
- next;
- next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
- &next->neigh_list,
- struct mlx5e_neigh_hash_entry,
- neigh_list))
- if (mlx5e_rep_neigh_entry_hold(next))
- break;
-
- rcu_read_unlock();
-
- if (nhe)
- mlx5e_rep_neigh_entry_release(nhe);
-
- return next;
-}
-
-static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
-{
- struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
- neigh_update.neigh_stats_work.work);
- struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_neigh_hash_entry *nhe = NULL;
-
- rtnl_lock();
- if (!list_empty(&rpriv->neigh_update.neigh_list))
- mlx5e_rep_queue_neigh_stats_work(priv);
-
- while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
- mlx5e_tc_update_neigh_used_value(nhe);
-
- rtnl_unlock();
-}
-
-static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
- bool neigh_connected,
- unsigned char ha[ETH_ALEN])
-{
- struct ethhdr *eth = (struct ethhdr *)e->encap_header;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- bool encap_connected;
- LIST_HEAD(flow_list);
-
- ASSERT_RTNL();
-
- /* wait for encap to be fully initialized */
- wait_for_completion(&e->res_ready);
-
- mutex_lock(&esw->offloads.encap_tbl_lock);
- encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- if (e->compl_result < 0 || (encap_connected == neigh_connected &&
- ether_addr_equal(e->h_dest, ha)))
- goto unlock;
-
- mlx5e_take_all_encap_flows(e, &flow_list);
-
- if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
- (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
- mlx5e_tc_encap_flows_del(priv, e, &flow_list);
-
- if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
- ether_addr_copy(e->h_dest, ha);
- ether_addr_copy(eth->h_dest, ha);
- /* Update the encap source mac, in case that we delete
- * the flows when encap source mac changed.
- */
- ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
-
- mlx5e_tc_encap_flows_add(priv, e, &flow_list);
- }
-unlock:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- mlx5e_put_encap_flow_list(priv, &flow_list);
-}
-
-static void mlx5e_rep_neigh_update(struct work_struct *work)
-{
- struct mlx5e_neigh_hash_entry *nhe =
- container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
- struct neighbour *n = nhe->n;
- struct mlx5e_encap_entry *e;
- unsigned char ha[ETH_ALEN];
- struct mlx5e_priv *priv;
- bool neigh_connected;
- u8 nud_state, dead;
-
- rtnl_lock();
-
- /* If these parameters are changed after we release the lock,
- * we'll receive another event letting us know about it.
- * We use this lock to avoid inconsistency between the neigh validity
- * and it's hw address.
- */
- read_lock_bh(&n->lock);
- memcpy(ha, n->ha, ETH_ALEN);
- nud_state = n->nud_state;
- dead = n->dead;
- read_unlock_bh(&n->lock);
-
- neigh_connected = (nud_state & NUD_VALID) && !dead;
-
- trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
-
- list_for_each_entry(e, &nhe->encap_list, encap_list) {
- if (!mlx5e_encap_take(e))
- continue;
-
- priv = netdev_priv(e->out_dev);
- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
- mlx5e_encap_put(priv, e);
- }
- mlx5e_rep_neigh_entry_release(nhe);
- rtnl_unlock();
- neigh_release(n);
-}
-
-static struct mlx5e_rep_indr_block_priv *
-mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev)
-{
- struct mlx5e_rep_indr_block_priv *cb_priv;
-
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
- list_for_each_entry(cb_priv,
- &rpriv->uplink_priv.tc_indr_block_priv_list,
- list)
- if (cb_priv->netdev == netdev)
- return cb_priv;
-
- return NULL;
-}
-
-static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
-{
- struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
- struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
-
- list_for_each_entry_safe(cb_priv, temp, head, list) {
- mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
- kfree(cb_priv);
- }
-}
-
-static int
-mlx5e_rep_indr_offload(struct net_device *netdev,
- struct flow_cls_offload *flower,
- struct mlx5e_rep_indr_block_priv *indr_priv,
- unsigned long flags)
-{
- struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
- int err = 0;
-
- switch (flower->command) {
- case FLOW_CLS_REPLACE:
- err = mlx5e_configure_flower(netdev, priv, flower, flags);
- break;
- case FLOW_CLS_DESTROY:
- err = mlx5e_delete_flower(netdev, priv, flower, flags);
- break;
- case FLOW_CLS_STATS:
- err = mlx5e_stats_flower(netdev, priv, flower, flags);
- break;
- default:
- err = -EOPNOTSUPP;
- }
-
- return err;
-}
-
-static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
- void *type_data, void *indr_priv)
-{
- unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
- struct mlx5e_rep_indr_block_priv *priv = indr_priv;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
- flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
- void *type_data, void *indr_priv)
-{
- struct mlx5e_rep_indr_block_priv *priv = indr_priv;
- struct flow_cls_offload *f = type_data;
- struct flow_cls_offload tmp;
- struct mlx5e_priv *mpriv;
- struct mlx5_eswitch *esw;
- unsigned long flags;
- int err;
-
- mpriv = netdev_priv(priv->rpriv->netdev);
- esw = mpriv->mdev->priv.eswitch;
-
- flags = MLX5_TC_FLAG(EGRESS) |
- MLX5_TC_FLAG(ESW_OFFLOAD) |
- MLX5_TC_FLAG(FT_OFFLOAD);
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- memcpy(&tmp, f, sizeof(*f));
-
- /* Re-use tc offload path by moving the ft flow to the
- * reserved ft chain.
- *
- * FT offload can use prio range [0, INT_MAX], so we normalize
- * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
- * as with tc, where prio 0 isn't supported.
- *
- * We only support chain 0 of FT offload.
- */
- if (!mlx5_esw_chains_prios_supported(esw) ||
- tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
- tmp.common.chain_index)
- return -EOPNOTSUPP;
-
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
- tmp.common.prio++;
- err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
- memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
- return err;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void mlx5e_rep_indr_block_unbind(void *cb_priv)
-{
- struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
-
- list_del(&indr_priv->list);
- kfree(indr_priv);
-}
-
-static LIST_HEAD(mlx5e_block_cb_list);
-
-static int
-mlx5e_rep_indr_setup_block(struct net_device *netdev,
- struct mlx5e_rep_priv *rpriv,
- struct flow_block_offload *f,
- flow_setup_cb_t *setup_cb)
-{
- struct mlx5e_rep_indr_block_priv *indr_priv;
- struct flow_block_cb *block_cb;
-
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- f->unlocked_driver_cb = true;
- f->driver_block_list = &mlx5e_block_cb_list;
-
- switch (f->command) {
- case FLOW_BLOCK_BIND:
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
- if (indr_priv)
- return -EEXIST;
-
- indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
- if (!indr_priv)
- return -ENOMEM;
-
- indr_priv->netdev = netdev;
- indr_priv->rpriv = rpriv;
- list_add(&indr_priv->list,
- &rpriv->uplink_priv.tc_indr_block_priv_list);
-
- block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
- mlx5e_rep_indr_block_unbind);
- if (IS_ERR(block_cb)) {
- list_del(&indr_priv->list);
- kfree(indr_priv);
- return PTR_ERR(block_cb);
- }
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
-
- return 0;
- case FLOW_BLOCK_UNBIND:
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
- if (!indr_priv)
- return -ENOENT;
-
- block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
- if (!block_cb)
- return -ENOENT;
-
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static
-int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
-{
- switch (type) {
- case TC_SETUP_BLOCK:
- return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
- mlx5e_rep_indr_setup_tc_cb);
- case TC_SETUP_FT:
- return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
- mlx5e_rep_indr_setup_ft_cb);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev)
-{
- int err;
-
- err = __flow_indr_block_cb_register(netdev, rpriv,
- mlx5e_rep_indr_setup_cb,
- rpriv);
- if (err) {
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
-
- mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
- netdev_name(netdev), err);
- }
- return err;
-}
-
-static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev)
-{
- __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_cb,
- rpriv);
-}
-
-static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
-{
- struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
- uplink_priv.netdevice_nb);
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-
- if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
- !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
- return NOTIFY_OK;
-
- switch (event) {
- case NETDEV_REGISTER:
- mlx5e_rep_indr_register_block(rpriv, netdev);
- break;
- case NETDEV_UNREGISTER:
- mlx5e_rep_indr_unregister_block(rpriv, netdev);
- break;
- }
- return NOTIFY_OK;
-}
-
-static void
-mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe,
- struct neighbour *n)
-{
- /* Take a reference to ensure the neighbour and mlx5 encap
- * entry won't be destructed until we drop the reference in
- * delayed work.
- */
- neigh_hold(n);
-
- /* This assignment is valid as long as the the neigh reference
- * is taken
- */
- nhe->n = n;
-
- if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
- mlx5e_rep_neigh_entry_release(nhe);
- neigh_release(n);
- }
-}
-
-static struct mlx5e_neigh_hash_entry *
-mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
- struct mlx5e_neigh *m_neigh);
-
-static int mlx5e_rep_netevent_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
-{
- struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
- neigh_update.netevent_nb);
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_neigh_hash_entry *nhe = NULL;
- struct mlx5e_neigh m_neigh = {};
- struct neigh_parms *p;
- struct neighbour *n;
- bool found = false;
-
- switch (event) {
- case NETEVENT_NEIGH_UPDATE:
- n = ptr;
-#if IS_ENABLED(CONFIG_IPV6)
- if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
-#else
- if (n->tbl != &arp_tbl)
-#endif
- return NOTIFY_DONE;
-
- m_neigh.dev = n->dev;
- m_neigh.family = n->ops->family;
- memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
-
- rcu_read_lock();
- nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
- rcu_read_unlock();
- if (!nhe)
- return NOTIFY_DONE;
-
- mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
- break;
-
- case NETEVENT_DELAY_PROBE_TIME_UPDATE:
- p = ptr;
-
- /* We check the device is present since we don't care about
- * changes in the default table, we only care about changes
- * done per device delay prob time parameter.
- */
-#if IS_ENABLED(CONFIG_IPV6)
- if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
-#else
- if (!p->dev || p->tbl != &arp_tbl)
-#endif
- return NOTIFY_DONE;
-
- rcu_read_lock();
- list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
- neigh_list) {
- if (p->dev == nhe->m_neigh.dev) {
- found = true;
- break;
- }
- }
- rcu_read_unlock();
- if (!found)
- return NOTIFY_DONE;
-
- neigh_update->min_interval = min_t(unsigned long,
- NEIGH_VAR(p, DELAY_PROBE_TIME),
- neigh_update->min_interval);
- mlx5_fc_update_sampling_interval(priv->mdev,
- neigh_update->min_interval);
- break;
- }
- return NOTIFY_DONE;
-}
-
-static const struct rhashtable_params mlx5e_neigh_ht_params = {
- .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
- .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
- .key_len = sizeof(struct mlx5e_neigh),
- .automatic_shrinking = true,
-};
-
-static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
-{
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- int err;
-
- err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&neigh_update->neigh_list);
- mutex_init(&neigh_update->encap_lock);
- INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
- mlx5e_rep_neigh_stats_work);
- mlx5e_rep_neigh_update_init_interval(rpriv);
-
- rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
- err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
- if (err)
- goto out_err;
- return 0;
-
-out_err:
- rhashtable_destroy(&neigh_update->neigh_ht);
- return err;
-}
-
-static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
-{
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
-
- unregister_netevent_notifier(&neigh_update->netevent_nb);
-
- flush_workqueue(priv->wq); /* flush neigh update works */
-
- cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
-
- mutex_destroy(&neigh_update->encap_lock);
- rhashtable_destroy(&neigh_update->neigh_ht);
-}
-
-static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- int err;
-
- err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
- &nhe->rhash_node,
- mlx5e_neigh_ht_params);
- if (err)
- return err;
-
- list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
-
- return err;
-}
-
-static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
-{
- struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
-
- mutex_lock(&rpriv->neigh_update.encap_lock);
-
- list_del_rcu(&nhe->neigh_list);
-
- rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
- &nhe->rhash_node,
- mlx5e_neigh_ht_params);
- mutex_unlock(&rpriv->neigh_update.encap_lock);
-}
-
-/* This function must only be called under the representor's encap_lock or
- * inside rcu read lock section.
- */
-static struct mlx5e_neigh_hash_entry *
-mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
- struct mlx5e_neigh *m_neigh)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct mlx5e_neigh_hash_entry *nhe;
-
- nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
- mlx5e_neigh_ht_params);
- return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
-}
-
-static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
- struct mlx5e_neigh_hash_entry **nhe)
-{
- int err;
-
- *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
- if (!*nhe)
- return -ENOMEM;
-
- (*nhe)->priv = priv;
- memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
- INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
- spin_lock_init(&(*nhe)->encap_list_lock);
- INIT_LIST_HEAD(&(*nhe)->encap_list);
- refcount_set(&(*nhe)->refcnt, 1);
-
- err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
- if (err)
- goto out_free;
- return 0;
-
-out_free:
- kfree(*nhe);
- return err;
-}
-
-int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
- struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
- struct mlx5e_neigh_hash_entry *nhe;
- int err;
-
- err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
- if (err)
- return err;
-
- mutex_lock(&rpriv->neigh_update.encap_lock);
- nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
- if (!nhe) {
- err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
- if (err) {
- mutex_unlock(&rpriv->neigh_update.encap_lock);
- mlx5_tun_entropy_refcount_dec(tun_entropy,
- e->reformat_type);
- return err;
- }
- }
-
- e->nhe = nhe;
- spin_lock(&nhe->encap_list_lock);
- list_add_rcu(&e->encap_list, &nhe->encap_list);
- spin_unlock(&nhe->encap_list_lock);
-
- mutex_unlock(&rpriv->neigh_update.encap_lock);
-
- return 0;
-}
-
-void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
- struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
-
- if (!e->nhe)
- return;
-
- spin_lock(&e->nhe->encap_list_lock);
- list_del_rcu(&e->encap_list);
- spin_unlock(&e->nhe->encap_list_lock);
-
- mlx5e_rep_neigh_entry_release(e->nhe);
- e->nhe = NULL;
- mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
-}
-
static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -1225,129 +514,6 @@ static int mlx5e_rep_close(struct net_device *dev)
return ret;
}
-static int
-mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct flow_cls_offload *cls_flower, int flags)
-{
- switch (cls_flower->command) {
- case FLOW_CLS_REPLACE:
- return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
- flags);
- case FLOW_CLS_DESTROY:
- return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
- flags);
- case FLOW_CLS_STATS:
- return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
- flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static
-int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
- struct tc_cls_matchall_offload *ma)
-{
- switch (ma->command) {
- case TC_CLSMATCHALL_REPLACE:
- return mlx5e_tc_configure_matchall(priv, ma);
- case TC_CLSMATCHALL_DESTROY:
- return mlx5e_tc_delete_matchall(priv, ma);
- case TC_CLSMATCHALL_STATS:
- mlx5e_tc_stats_matchall(priv, ma);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
- struct mlx5e_priv *priv = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
- case TC_SETUP_CLSMATCHALL:
- return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- struct flow_cls_offload tmp, *f = type_data;
- struct mlx5e_priv *priv = cb_priv;
- struct mlx5_eswitch *esw;
- unsigned long flags;
- int err;
-
- flags = MLX5_TC_FLAG(INGRESS) |
- MLX5_TC_FLAG(ESW_OFFLOAD) |
- MLX5_TC_FLAG(FT_OFFLOAD);
- esw = priv->mdev->priv.eswitch;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- memcpy(&tmp, f, sizeof(*f));
-
- if (!mlx5_esw_chains_prios_supported(esw))
- return -EOPNOTSUPP;
-
- /* Re-use tc offload path by moving the ft flow to the
- * reserved ft chain.
- *
- * FT offload can use prio range [0, INT_MAX], so we normalize
- * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
- * as with tc, where prio 0 isn't supported.
- *
- * We only support chain 0 of FT offload.
- */
- if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
- return -EOPNOTSUPP;
- if (tmp.common.chain_index != 0)
- return -EOPNOTSUPP;
-
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
- tmp.common.prio++;
- err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
- memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
- return err;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
-static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
-static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct flow_block_offload *f = type_data;
-
- f->unlocked_driver_cb = true;
-
- switch (type) {
- case TC_SETUP_BLOCK:
- return flow_block_cb_setup_simple(type_data,
- &mlx5e_rep_block_tc_cb_list,
- mlx5e_rep_setup_tc_cb,
- priv, priv, true);
- case TC_SETUP_FT:
- return flow_block_cb_setup_simple(type_data,
- &mlx5e_rep_block_ft_cb_list,
- mlx5e_rep_setup_ft_cb,
- priv, priv, true);
- default:
- return -EOPNOTSUPP;
- }
-}
-
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1484,13 +650,9 @@ bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
}
-bool mlx5e_eswitch_rep(struct net_device *netdev)
+bool mlx5e_eswitch_vf_rep(struct net_device *netdev)
{
- if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
- netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
- return true;
-
- return false;
+ return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
static void mlx5e_build_rep_params(struct net_device *netdev)
@@ -1544,10 +706,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
/* we want a persistent mac for the uplink rep */
mlx5_query_mac_address(mdev, netdev->dev_addr);
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (MLX5_CAP_GEN(mdev, qos))
- netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
-#endif
+ mlx5e_dcbnl_build_rep_netdev(netdev);
} else {
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
eth_hw_addr_random(netdev);
@@ -1695,6 +854,24 @@ static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
return 0;
}
+static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ if (!rpriv->vport_rx_rule)
+ return;
+
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
+ rpriv->vport_rx_rule = NULL;
+}
+
+int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
+{
+ rep_vport_rx_rule_destroy(priv);
+
+ return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
+}
+
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1747,7 +924,7 @@ err_destroy_ttc_table:
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv, false);
+ mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
@@ -1759,13 +936,11 @@ err_close_drop_rq:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
-
- mlx5_del_flow_rules(rpriv->vport_rx_rule);
+ rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
- mlx5e_destroy_indirect_tirs(priv, false);
+ mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
@@ -1794,31 +969,25 @@ static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
priv = netdev_priv(netdev);
uplink_priv = &rpriv->uplink_priv;
- mutex_init(&uplink_priv->unready_flows_lock);
- INIT_LIST_HEAD(&uplink_priv->unready_flows);
-
- /* init shared tc flow table */
- err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ err = mlx5e_rep_tc_init(rpriv);
if (err)
return err;
mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
- /* init indirect block notifications */
- INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
- uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
- err = register_netdevice_notifier_dev_net(rpriv->netdev,
- &uplink_priv->netdevice_nb,
- &uplink_priv->netdevice_nn);
+ mlx5e_rep_bond_init(rpriv);
+ err = mlx5e_rep_tc_netdevice_event_register(rpriv);
if (err) {
- mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
- goto tc_esw_cleanup;
+ mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
+ err);
+ goto err_event_reg;
}
return 0;
-tc_esw_cleanup:
- mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
+err_event_reg:
+ mlx5e_rep_bond_cleanup(rpriv);
+ mlx5e_rep_tc_cleanup(rpriv);
return err;
}
@@ -1848,17 +1017,9 @@ destroy_tises:
static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
{
- struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
-
- /* clean indirect TC block notifications */
- unregister_netdevice_notifier_dev_net(rpriv->netdev,
- &uplink_priv->netdevice_nb,
- &uplink_priv->netdevice_nn);
- mlx5e_rep_indr_clean_block_privs(rpriv);
-
- /* delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
- mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
+ mlx5e_rep_tc_netdevice_event_unregister(rpriv);
+ mlx5e_rep_bond_cleanup(rpriv);
+ mlx5e_rep_tc_cleanup(rpriv);
}
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
@@ -1900,13 +1061,8 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
return NOTIFY_OK;
}
- if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
-
- queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
-
- return NOTIFY_OK;
- }
+ if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
+ return mlx5e_rep_tc_event_port_affinity(priv);
return NOTIFY_DONE;
}
@@ -1915,7 +1071,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
u16 max_mtu;
netdev->min_mtu = ETH_MIN_MTU;
@@ -1923,28 +1078,22 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
mlx5e_set_dev_port_mtu(priv);
- INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
- mlx5e_tc_reoffload_flows_work);
+ mlx5e_rep_tc_enable(priv);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
-#endif
}
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
-#endif
mlx5_notifier_unregister(mdev, &priv->events_nb);
- cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
+ mlx5e_rep_tc_disable(priv);
mlx5_lag_remove(mdev);
}
@@ -2051,26 +1200,22 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
return 0;
mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
pfnum = PCI_FUNC(dev->pdev->devfn);
- if (rep->vport == MLX5_VPORT_UPLINK) {
+ if (rep->vport == MLX5_VPORT_UPLINK)
devlink_port_attrs_set(&rpriv->dl_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
pfnum, false, 0,
&ppid.id[0], ppid.id_len);
- dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
- } else if (rep->vport == MLX5_VPORT_PF) {
+ else if (rep->vport == MLX5_VPORT_PF)
devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
pfnum);
- dl_port_index = rep->vport;
- } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
- rpriv->rep->vport)) {
+ else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
pfnum, rep->vport - 1);
- dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
- }
return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 6a2337900420..1d5669801484 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -56,6 +56,7 @@ struct mlx5e_neigh_update_table {
};
struct mlx5_tc_ct_priv;
+struct mlx5e_rep_bond;
struct mlx5_rep_uplink_priv {
/* Filters DB - instantiated by the uplink representor and shared by
* the uplink's VFs
@@ -68,13 +69,8 @@ struct mlx5_rep_uplink_priv {
* tc_indr_block_cb_priv_list is used to lookup indirect callback
* private data
*
- * netdevice_nb is the netdev events notifier - used to register
- * tunnel devices for block events
- *
*/
struct list_head tc_indr_block_priv_list;
- struct notifier_block netdevice_nb;
- struct netdev_net_notifier netdevice_nn;
struct mlx5_tun_entropy tun_entropy;
@@ -89,6 +85,9 @@ struct mlx5_rep_uplink_priv {
struct mapping_ctx *tunnel_enc_opts_mapping;
struct mlx5_tc_ct_priv *ct_priv;
+
+ /* support eswitch vports bonding */
+ struct mlx5e_rep_bond *bond;
};
struct mlx5e_rep_priv {
@@ -158,6 +157,22 @@ struct mlx5e_neigh_hash_entry {
enum {
/* set when the encap entry is successfully offloaded into HW */
MLX5_ENCAP_ENTRY_VALID = BIT(0),
+ MLX5_REFORMAT_DECAP = BIT(1),
+};
+
+struct mlx5e_decap_key {
+ struct ethhdr key;
+};
+
+struct mlx5e_decap_entry {
+ struct mlx5e_decap_key key;
+ struct list_head flows;
+ struct hlist_node hlist;
+ refcount_t refcnt;
+ struct completion res_ready;
+ int compl_result;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct rcu_head rcu;
};
struct mlx5e_encap_entry {
@@ -195,6 +210,15 @@ struct mlx5e_rep_sq {
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev);
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev);
+int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv);
+void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv);
+int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
+ struct net_device *lag_dev);
+void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
+ const struct net_device *netdev,
+ const struct net_device *lag_dev);
+int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup);
+
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
@@ -203,15 +227,15 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe);
-int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
-void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
-
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
-bool mlx5e_eswitch_rep(struct net_device *netdev);
+bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
+static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
+{
+ return mlx5e_eswitch_vf_rep(netdev) ||
+ mlx5e_eswitch_uplink_rep(netdev);
+}
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e2beb89c1832..dbb1c6323967 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,6 +42,7 @@
#include "en_tc.h"
#include "eswitch.h"
#include "en_rep.h"
+#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
@@ -300,7 +301,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
* put into the Reuse Ring, because there is no way to return
* the page to the userspace when the interface goes down.
*/
- mlx5e_xsk_page_release(rq, dma_info);
+ xsk_buff_free(dma_info->xsk);
else
mlx5e_page_release_dynamic(rq, dma_info, recycle);
}
@@ -385,7 +386,11 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
if (rq->umem) {
int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
- if (unlikely(!mlx5e_xsk_pages_enough_umem(rq, pages_desired)))
+ /* Check in advance that we have enough frames, instead of
+ * allocating one-by-one, failing and moving frames to the
+ * Reuse Ring.
+ */
+ if (unlikely(!xsk_buff_can_alloc(rq->umem, pages_desired)))
return -ENOMEM;
}
@@ -468,22 +473,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq);
}
-static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
- struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
-
- edge_wi = wi + nnops;
-
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- wi->opcode = MLX5_OPCODE_NOP;
- wi->num_wqebbs = 1;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
-}
-
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -492,23 +481,20 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
- u16 pi, contig_wqebbs_room;
+ u16 pi;
int err;
int i;
+ /* Check in advance that we have enough frames, instead of allocating
+ * one-by-one, failing and moving frames to the Reuse Ring.
+ */
if (rq->umem &&
- unlikely(!mlx5e_xsk_pages_enough_umem(rq, MLX5_MPWRQ_PAGES_PER_WQE))) {
+ unlikely(!xsk_buff_can_alloc(rq->umem, MLX5_MPWRQ_PAGES_PER_WQE))) {
err = -ENOMEM;
goto err;
}
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
- mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- }
-
+ pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
@@ -527,9 +513,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
MLX5_OPCODE_UMR);
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
- sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
- sq->db.ico_wqe[pi].umr.rq = rq;
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
+ .num_wqebbs = MLX5E_UMR_WQEBBS,
+ .umr.rq = rq,
+ };
+
sq->pc += MLX5E_UMR_WQEBBS;
sq->doorbell_cseg = &umr_wqe->ctrl;
@@ -618,33 +607,38 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
- struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_icosq_wqe_info *wi;
u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- wi = &sq->db.ico_wqe[ci];
+ wi = &sq->db.wqe_info[ci];
sqcc += wi->num_wqebbs;
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
+ (struct mlx5_err_cqe *)cqe);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->channel->priv->wq, &sq->recover_work);
break;
}
- if (likely(wi->opcode == MLX5_OPCODE_UMR))
+ switch (wi->wqe_type) {
+ case MLX5E_ICOSQ_WQE_UMR_RX:
wi->umr.rq->mpwqe.umr_completed++;
- else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
+ break;
+ case MLX5E_ICOSQ_WQE_NOP:
+ break;
+ default:
netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- wi->opcode);
-
+ "Bad WQE type in ICOSQ WQE info: 0x%x\n",
+ wi->wqe_type);
+ }
} while (!last_wqe);
-
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
sq->cc = sqcc;
@@ -1058,12 +1052,24 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb;
}
+static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
+ u32 len, struct xdp_buff *xdp)
+{
+ xdp->data_hard_start = va;
+ xdp->data = va + headroom;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_end = xdp->data + len;
+ xdp->rxq = &rq->xdp_rxq;
+ xdp->frame_sz = rq->buff.frame0_sz;
+}
+
struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
struct mlx5e_dma_info *di = wi->di;
u16 rx_headroom = rq->buff.headroom;
+ struct xdp_buff xdp;
struct sk_buff *skb;
void *va, *data;
bool consumed;
@@ -1079,11 +1085,14 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetch(data);
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false);
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+ consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp);
rcu_read_unlock();
if (consumed)
return NULL; /* page/packet was consumed by XDP */
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
if (unlikely(!skb))
return NULL;
@@ -1229,12 +1238,12 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
goto free_wqe;
napi_gro_receive(rq->cq.napi, skb);
- mlx5_tc_rep_post_napi_receive(&tc_priv);
+ mlx5_rep_tc_post_napi_receive(&tc_priv);
free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
@@ -1285,12 +1294,12 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
goto mpwrq_cqe_out;
napi_gro_receive(rq->cq.napi, skb);
- mlx5_tc_rep_post_napi_receive(&tc_priv);
+ mlx5_rep_tc_post_napi_receive(&tc_priv);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
@@ -1356,6 +1365,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u16 rx_headroom = rq->buff.headroom;
u32 cqe_bcnt32 = cqe_bcnt;
+ struct xdp_buff xdp;
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
@@ -1377,7 +1387,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prefetch(data);
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, false);
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
+ consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp);
rcu_read_unlock();
if (consumed) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
@@ -1385,6 +1396,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL; /* page/packet was consumed by XDP */
}
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
if (unlikely(!skb))
return NULL;
@@ -1501,6 +1514,7 @@ out:
#ifdef CONFIG_MLX5_CORE_IPOIB
+#define MLX5_IB_GRH_SGID_OFFSET 8
#define MLX5_IB_GRH_DGID_OFFSET 24
#define MLX5_GID_SIZE 16
@@ -1514,6 +1528,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct net_device *netdev;
struct mlx5e_priv *priv;
char *pseudo_header;
+ u32 flags_rqpn;
u32 qpn;
u8 *dgid;
u8 g;
@@ -1535,7 +1550,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
tstamp = &priv->tstamp;
stats = &priv->channel_stats[rq->ix].rq;
- g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
+ flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
+ g = (flags_rqpn >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
skb->pkt_type = PACKET_HOST;
@@ -1544,9 +1560,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
else
skb->pkt_type = PACKET_MULTICAST;
- /* TODO: IB/ipoib: Allow mcast packets from other VFs
- * 68996a6e760e5c74654723eeb57bf65628ae87f4
+ /* Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
*/
+ if (g && (qpn == (flags_rqpn & 0xffffff)) &&
+ (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
+ MLX5_GID_SIZE) == 0)) {
+ skb->dev = NULL;
+ return;
+ }
skb_pull(skb, MLX5_IB_GRH_BYTES);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index bbff8d8ded76..46790216ce86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -234,7 +234,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
return err;
}
- err = mlx5e_refresh_tirs(priv, true);
+ err = mlx5e_refresh_tirs(priv, true, false);
if (err)
goto out;
@@ -263,7 +263,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
mlx5_nic_vport_update_local_lb(priv->mdev, false);
dev_remove_pack(&lbtp->pt);
- mlx5e_refresh_tirs(priv, false);
+ mlx5e_refresh_tirs(priv, false, false);
}
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 30b216d9284c..f009fe09e99b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,8 +32,8 @@
#include "lib/mlx5.h"
#include "en.h"
-#include "en_accel/ipsec.h"
#include "en_accel/tls.h"
+#include "en_accel/en_accel.h"
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
@@ -411,18 +411,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
- u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+ int ret;
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+
+ if (priv->q_counter) {
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
+ priv->q_counter);
+ ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
+ if (!ret)
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ }
- if (priv->q_counter &&
- !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
- sizeof(out)))
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
- out, out_of_buffer);
- if (priv->drop_rq_q_counter &&
- !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
- out, sizeof(out)))
- qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
- out_of_buffer);
+ if (priv->drop_rq_q_counter) {
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
+ priv->drop_rq_q_counter);
+ ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
+ if (!ret)
+ qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ }
}
#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
@@ -480,18 +491,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
{
u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
- int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
- u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
return;
- MLX5_SET(query_vnic_env_in, in, opcode,
- MLX5_CMD_OP_QUERY_VNIC_ENV);
- MLX5_SET(query_vnic_env_in, in, op_mod, 0);
- MLX5_SET(query_vnic_env_in, in, other_vport, 0);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+ MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
+ mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
}
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
@@ -566,15 +573,12 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
{
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
- MLX5_SET(query_vport_counter_in, in, op_mod, 0);
- MLX5_SET(query_vport_counter_in, in, other_vport, 0);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+ mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
}
#define PPORT_802_3_OFF(c) \
@@ -1424,27 +1428,6 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
-static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec)
-{
- return mlx5e_ipsec_get_count(priv);
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec)
-{
- return idx + mlx5e_ipsec_get_strings(priv,
- data + idx * ETH_GSTRING_LEN);
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec)
-{
- return idx + mlx5e_ipsec_get_stats(priv, data + idx);
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec)
-{
- mlx5e_ipsec_update_stats(priv);
-}
-
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
return mlx5e_tls_get_count(priv);
@@ -1714,7 +1697,6 @@ MLX5E_DEFINE_STATS_GRP(pme, 0);
MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
-static MLX5E_DEFINE_STATS_GRP(ipsec, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
/* The stats groups order is opposite to the update_stats() order calls */
@@ -1731,7 +1713,10 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(pcie),
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
- &MLX5E_STATS_GRP(ipsec),
+#ifdef CONFIG_MLX5_EN_IPSEC
+ &MLX5E_STATS_GRP(ipsec_sw),
+ &MLX5E_STATS_GRP(ipsec_hw),
+#endif
&MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 092b39ffa32a..2b83ba990714 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -390,5 +390,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
+extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
+extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index a574c588269a..7fc84f58e28a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -31,6 +31,7 @@
*/
#include <net/flow_dissector.h>
+#include <net/flow_offload.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
@@ -45,10 +46,15 @@
#include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
+#include <net/tc_act/tc_mpls.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
+#include <net/bareudp.h>
+#include <net/bonding.h>
#include "en.h"
#include "en_rep.h"
+#include "en/rep/tc.h"
+#include "en/rep/neigh.h"
#include "en_tc.h"
#include "eswitch.h"
#include "esw/chains.h"
@@ -61,7 +67,7 @@
#include "lib/geneve.h"
#include "diag/en_tc_tracepoint.h"
-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
struct mlx5_nic_flow_attr {
u32 action;
@@ -89,6 +95,7 @@ enum {
MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
+ MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -122,6 +129,11 @@ struct mlx5e_tc_flow {
u64 cookie;
unsigned long flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
+
+ /* flows sharing the same reformat object - currently mpls decap */
+ struct list_head l3_to_l2_reformat;
+ struct mlx5e_decap_entry *decap_reformat;
+
/* Flow can be associated with multiple encap IDs.
* The number of encaps is bounded by the number of supported
* destinations.
@@ -134,6 +146,7 @@ struct mlx5e_tc_flow {
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
+ struct net_device *orig_dev; /* netdev adding flow first */
int tmp_efi_index;
struct list_head tmp_list; /* temporary flow list used by neigh update */
refcount_t refcnt;
@@ -153,35 +166,12 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5_flow_spec spec;
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct ethhdr eth;
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
-struct tunnel_match_key {
- struct flow_dissector_key_control enc_control;
- struct flow_dissector_key_keyid enc_key_id;
- struct flow_dissector_key_ports enc_tp;
- struct flow_dissector_key_ip enc_ip;
- union {
- struct flow_dissector_key_ipv4_addrs enc_ipv4;
- struct flow_dissector_key_ipv6_addrs enc_ipv6;
- };
-
- int filter_ifindex;
-};
-
-/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
- * Upper TUNNEL_INFO_BITS for general tunnel info.
- * Lower ENC_OPTS_BITS bits for enc_opts.
- */
-#define TUNNEL_INFO_BITS 6
-#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
-#define ENC_OPTS_BITS 2
-#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
-#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
-#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
-
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
@@ -220,8 +210,8 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
fmask = headers_c + soffset;
fval = headers_v + soffset;
- mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
- data = cpu_to_be32(data) >> (32 - (match_len * 8));
+ mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
+ data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
memcpy(fmask, &mask, match_len);
memcpy(fval, &data, match_len);
@@ -568,7 +558,7 @@ struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
{
- u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
void *tirc;
int err;
@@ -582,7 +572,7 @@ static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
- err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+ err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
if (err)
goto create_tir_err;
@@ -666,7 +656,7 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
err = mlx5_core_create_tir(hp->func_mdev, in,
- MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
+ &hp->indir_tirn[tt]);
if (err) {
mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_tirs;
@@ -1092,7 +1082,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (IS_ERR(priv->fs.tc.t)) {
mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
- "Failed to create tc offload table\n");
+ "Failed to create tc offload table");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
return PTR_ERR(priv->fs.tc.t);
@@ -1144,6 +1134,11 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack,
struct net_device **encap_dev,
bool *encap_valid);
+static int mlx5e_attach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
+static void mlx5e_detach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
@@ -1319,6 +1314,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
+ err = mlx5e_attach_decap(priv, flow, extack);
+ if (err)
+ return err;
+ }
+
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
int mirred_ifindex;
@@ -1428,6 +1429,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(attr->counter_dev, attr->counter);
+
+ if (flow_flag_test(flow, L3_TO_L2_DECAP))
+ mlx5e_detach_decap(priv, flow);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -1704,6 +1708,17 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr
kfree_rcu(e, rcu);
}
+static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
+ struct mlx5e_decap_entry *d)
+{
+ WARN_ON(!list_empty(&d->flows));
+
+ if (!d->compl_result)
+ mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
+
+ kfree_rcu(d, rcu);
+}
+
void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -1716,6 +1731,18 @@ void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
mlx5e_encap_dealloc(priv, e);
}
+static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
+ return;
+ hash_del_rcu(&d->hlist);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ mlx5e_decap_dealloc(priv, d);
+}
+
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, int out_index)
{
@@ -1739,6 +1766,29 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
mlx5e_encap_dealloc(priv, e);
}
+static void mlx5e_detach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_decap_entry *d = flow->decap_reformat;
+
+ if (!d)
+ return;
+
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ list_del(&flow->l3_to_l2_reformat);
+ flow->decap_reformat = NULL;
+
+ if (!refcount_dec_and_test(&d->refcnt)) {
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return;
+ }
+ hash_del_rcu(&d->hlist);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ mlx5e_decap_dealloc(priv, d);
+}
+
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
@@ -1823,10 +1873,8 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
memchr_inv(opt->opt_data, 0, opt->length * 4)) {
*dont_care = false;
- if (opt->opt_class != U16_MAX ||
- opt->type != U8_MAX ||
- memchr_inv(opt->opt_data, 0xFF,
- opt->length * 4)) {
+ if (opt->opt_class != htons(U16_MAX) ||
+ opt->type != U8_MAX) {
NL_SET_ERR_MSG(extack,
"Partial match of tunnel options in chain > 0 isn't supported");
netdev_warn(priv->netdev,
@@ -1863,6 +1911,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct flow_match_enc_opts enc_opts_match;
+ struct tunnel_match_enc_opts tun_enc_opts;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct tunnel_match_key tunnel_key;
@@ -1905,8 +1954,14 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
goto err_enc_opts;
if (!enc_opts_is_dont_care) {
+ memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
+ memcpy(&tun_enc_opts.key, enc_opts_match.key,
+ sizeof(*enc_opts_match.key));
+ memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
+ sizeof(*enc_opts_match.mask));
+
err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
- enc_opts_match.key, &enc_opts_id);
+ &tun_enc_opts, &enc_opts_id);
if (err)
goto err_enc_opts;
}
@@ -1965,6 +2020,32 @@ u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
return flow->tunnel_id;
}
+void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
+ struct flow_match_basic *match, bool outer,
+ void *headers_c, void *headers_v)
+{
+ bool ip_version_cap;
+
+ ip_version_cap = outer ?
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_ip_version);
+
+ if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
+ (match->key->n_proto == htons(ETH_P_IP) ||
+ match->key->n_proto == htons(ETH_P_IPV6))) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
+ match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(match->mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(match->key->n_proto));
+ }
+}
+
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -2005,7 +2086,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return err;
}
- flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ /* With mpls over udp we decapsulate using packet reformat
+ * object
+ */
+ if (!netif_is_bareudp(filter_dev))
+ flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
if (!needs_mapping && !sets_mapping)
@@ -2068,7 +2153,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
flow_rule_match_meta(rule, &match);
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
ingress_dev = __dev_get_by_index(dev_net(filter_dev),
@@ -2076,18 +2161,32 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't find the ingress port to match on");
- return -EINVAL;
+ return -ENOENT;
}
if (ingress_dev != filter_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't match on the ingress filter port");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
return 0;
}
+static bool skip_key_basic(struct net_device *filter_dev,
+ struct flow_cls_offload *f)
+{
+ /* When doing mpls over udp decap, the user needs to provide
+ * MPLS_UC as the protocol in order to be able to match on mpls
+ * label fields. However, the actual ethertype is IP so we want to
+ * avoid matching on this, otherwise we'll fail the match.
+ */
+ if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
+ return true;
+
+ return false;
+}
+
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -2132,7 +2231,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys);
@@ -2162,14 +2262,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (err)
return err;
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
+ !skip_key_basic(filter_dev, f)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(match.mask->n_proto));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(match.key->n_proto));
+ mlx5e_tc_set_ethertype(priv->mdev, &match,
+ match_level == outer_match_level,
+ headers_c, headers_v);
if (match.mask->n_proto)
*match_level = MLX5_MATCH_L2;
@@ -2661,7 +2761,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
set_vals = &hdrs[0].vals;
add_vals = &hdrs[1].vals;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -2715,10 +2815,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
continue;
if (f->field_bsize == 32) {
- mask_be32 = (__be32)mask;
+ mask_be32 = (__force __be32)(mask);
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
} else if (f->field_bsize == 16) {
- mask_be32 = (__be32)mask;
+ mask_be32 = (__force __be32)(mask);
mask_be16 = *(__be16 *)&mask_be32;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
}
@@ -2794,7 +2894,7 @@ int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
return 0;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
namespace);
@@ -2827,10 +2927,12 @@ void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
static const struct pedit_headers zero_masks = {};
-static int parse_tc_pedit_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
+static int
+parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
{
u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
int err = -EOPNOTSUPP;
@@ -2866,6 +2968,46 @@ out_err:
return err;
}
+static int
+parse_pedit_to_reformat(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
+{
+ u32 mask, val, offset;
+ u32 *p;
+
+ if (act->id != FLOW_ACTION_MANGLE)
+ return -EOPNOTSUPP;
+
+ if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
+ NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
+ return -EOPNOTSUPP;
+ }
+
+ mask = ~act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+ p = (u32 *)&parse_attr->eth;
+ *(p + (offset >> 2)) |= (val & mask);
+
+ return 0;
+}
+
+static int parse_tc_pedit_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
+ return parse_pedit_to_reformat(priv, act, parse_attr, extack);
+
+ return parse_pedit_to_modify_hdr(priv, act, namespace,
+ parse_attr, hdrs, extack);
+}
+
static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
@@ -3003,16 +3145,19 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
{
const struct flow_action_entry *act;
bool modify_ip_header;
+ void *headers_c;
void *headers_v;
u16 ethertype;
u8 ip_proto;
int i, err;
+ headers_c = get_match_headers_criteria(actions, spec);
headers_v = get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
- if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
+ if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
+ ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
goto out_ok;
modify_ip_header = false;
@@ -3073,6 +3218,11 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return true;
}
+static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+ return priv->mdev == peer_priv->mdev;
+}
+
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
@@ -3124,7 +3274,7 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
return -EOPNOTSUPP;
}
- err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
+ err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
@@ -3190,7 +3340,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
- hdrs, extack);
+ parse_attr, hdrs, NULL, extack);
if (err)
return err;
@@ -3284,14 +3434,24 @@ static inline int cmp_encap_info(struct encap_key *a,
a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
}
+static inline int cmp_decap_info(struct mlx5e_decap_key *a,
+ struct mlx5e_decap_key *b)
+{
+ return memcmp(&a->key, &b->key, sizeof(b->key));
+}
+
static inline int hash_encap_info(struct encap_key *key)
{
return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
key->tc_tunnel->tunnel_type);
}
+static inline int hash_decap_info(struct mlx5e_decap_key *key)
+{
+ return jhash(&key->key, sizeof(key->key), 0);
+}
-static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
+static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
struct mlx5e_priv *peer_priv;
@@ -3299,18 +3459,21 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
peer_priv = netdev_priv(peer_netdev);
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
- mlx5e_eswitch_rep(priv->netdev) &&
- mlx5e_eswitch_rep(peer_netdev) &&
+ mlx5e_eswitch_vf_rep(priv->netdev) &&
+ mlx5e_eswitch_vf_rep(peer_netdev) &&
same_hw_devs(priv, peer_priv));
}
-
-
bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
{
return refcount_inc_not_zero(&e->refcnt);
}
+static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
+{
+ return refcount_inc_not_zero(&e->refcnt);
+}
+
static struct mlx5e_encap_entry *
mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
uintptr_t hash_key)
@@ -3331,6 +3494,24 @@ mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
return NULL;
}
+static struct mlx5e_decap_entry *
+mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
+ uintptr_t hash_key)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_decap_key r_key;
+ struct mlx5e_decap_entry *e;
+
+ hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
+ hlist, hash_key) {
+ r_key = e->key;
+ if (!cmp_decap_info(&r_key, key) &&
+ mlx5e_decap_take(e))
+ return e;
+ }
+ return NULL;
+}
+
static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
{
size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
@@ -3476,6 +3657,84 @@ out_err_init:
return err;
}
+static int mlx5e_attach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_decap_entry *d;
+ struct mlx5e_decap_key key;
+ uintptr_t hash_key;
+ int err = 0;
+
+ parse_attr = attr->parse_attr;
+ if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "encap header larger than max supported");
+ return -EOPNOTSUPP;
+ }
+
+ key.key = parse_attr->eth;
+ hash_key = hash_decap_info(&key);
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ d = mlx5e_decap_get(priv, &key, hash_key);
+ if (d) {
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ wait_for_completion(&d->res_ready);
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ if (d->compl_result) {
+ err = -EREMOTEIO;
+ goto out_free;
+ }
+ goto found;
+ }
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ d->key = key;
+ refcount_set(&d->refcnt, 1);
+ init_completion(&d->res_ready);
+ INIT_LIST_HEAD(&d->flows);
+ hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
+ sizeof(parse_attr->eth),
+ &parse_attr->eth,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(d->pkt_reformat)) {
+ err = PTR_ERR(d->pkt_reformat);
+ d->compl_result = err;
+ }
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ complete_all(&d->res_ready);
+ if (err)
+ goto out_free;
+
+found:
+ flow->decap_reformat = d;
+ attr->decap_pkt_reformat = d->pkt_reformat;
+ list_add(&flow->l3_to_l2_reformat, &d->flows);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return 0;
+
+out_free:
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ mlx5e_decap_put(priv, d);
+ return err;
+
+out_err:
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return err;
+}
+
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
@@ -3529,6 +3788,28 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
return 0;
}
+static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
+ struct net_device *out_dev)
+{
+ struct net_device *fdb_out_dev = out_dev;
+ struct net_device *uplink_upper;
+
+ rcu_read_lock();
+ uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
+ if (uplink_upper && netif_is_lag_master(uplink_upper) &&
+ uplink_upper == out_dev) {
+ fdb_out_dev = uplink_dev;
+ } else if (netif_is_lag_master(out_dev)) {
+ fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
+ if (fdb_out_dev &&
+ (!mlx5e_eswitch_rep(fdb_out_dev) ||
+ !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
+ fdb_out_dev = NULL;
+ }
+ rcu_read_unlock();
+ return fdb_out_dev;
+}
+
static int add_vlan_push_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr,
struct net_device **out_dev,
@@ -3575,14 +3856,37 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
return err;
}
+static bool same_hw_reps(struct mlx5e_priv *priv,
+ struct net_device *peer_netdev)
+{
+ struct mlx5e_priv *peer_priv;
+
+ peer_priv = netdev_priv(peer_netdev);
+
+ return mlx5e_eswitch_rep(priv->netdev) &&
+ mlx5e_eswitch_rep(peer_netdev) &&
+ same_hw_devs(priv, peer_priv);
+}
+
+static bool is_lag_dev(struct mlx5e_priv *priv,
+ struct net_device *peer_netdev)
+{
+ return ((mlx5_lag_is_sriov(priv->mdev) ||
+ mlx5_lag_is_multipath(priv->mdev)) &&
+ same_hw_reps(priv, peer_netdev));
+}
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev)
{
- if (is_merged_eswitch_dev(priv, out_dev))
+ if (is_merged_eswitch_vfs(priv, out_dev))
+ return true;
+
+ if (is_lag_dev(priv, out_dev))
return true;
return mlx5e_eswitch_rep(out_dev) &&
- same_hw_devs(priv, netdev_priv(out_dev));
+ same_port_devs(priv, netdev_priv(out_dev));
}
static bool is_duplicated_output_device(struct net_device *dev,
@@ -3687,7 +3991,8 @@ static int verify_uplink_forwarding(struct mlx5e_priv *priv,
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ struct net_device *filter_dev)
{
struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -3701,6 +4006,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
+ bool mpls_push = false;
if (!flow_action_has_entries(flow_action))
return -EINVAL;
@@ -3715,15 +4021,48 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
+ case FLOW_ACTION_MPLS_PUSH:
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ reformat_l2_to_l3_tunnel) ||
+ act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mpls push is supported only for mpls_uc protocol");
+ return -EOPNOTSUPP;
+ }
+ mpls_push = true;
+ break;
+ case FLOW_ACTION_MPLS_POP:
+ /* we only support mpls pop if it is the first action
+ * and the filter net device is bareudp. Subsequent
+ * actions can be pedit and the last can be mirred
+ * egress redirect.
+ */
+ if (i) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mpls pop supported only as first action");
+ return -EOPNOTSUPP;
+ }
+ if (!netif_is_bareudp(filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mpls pop supported only on bareudp devices");
+ return -EOPNOTSUPP;
+ }
+
+ parse_attr->eth.h_proto = act->mpls_pop.proto;
+ action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_flag_set(flow, L3_TO_L2_DECAP);
+ break;
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
- hdrs, extack);
+ parse_attr, hdrs, flow, extack);
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- attr->split_count = attr->out_count;
+ if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ attr->split_count = attr->out_count;
+ }
break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
@@ -3745,6 +4084,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EINVAL;
}
+ if (mpls_push && !netif_is_bareudp(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mpls is supported only through a bareudp device");
+ return -EOPNOTSUPP;
+ }
+
if (ft_flow && out_dev == priv->netdev) {
/* Ignore forward to self rules generated
* by adding both mlx5 devs to the flow table
@@ -3780,7 +4125,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- struct net_device *uplink_upper;
if (is_duplicated_output_device(priv->netdev,
out_dev,
@@ -3792,14 +4136,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
ifindexes[if_count] = out_dev->ifindex;
if_count++;
- rcu_read_lock();
- uplink_upper =
- netdev_master_upper_dev_get_rcu(uplink_dev);
- if (uplink_upper &&
- netif_is_lag_master(uplink_upper) &&
- uplink_upper == out_dev)
- out_dev = uplink_dev;
- rcu_read_unlock();
+ out_dev = get_fdb_out_dev(uplink_dev, out_dev);
+ if (!out_dev)
+ return -ENODEV;
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
@@ -3823,10 +4162,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
- netdev_warn(priv->netdev,
- "devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name,
- out_dev->name);
return -EOPNOTSUPP;
}
@@ -4075,6 +4410,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->mod_hdr);
INIT_LIST_HEAD(&flow->hairpin);
+ INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
@@ -4144,7 +4480,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
+ err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
if (err)
goto err_free;
@@ -4330,11 +4666,21 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
return err;
}
+static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
+ struct mlx5e_rep_priv *rpriv)
+{
+ /* Offloaded flow rule is allowed to duplicate on non-uplink representor
+ * sharing tc block with other slaves of a lag device.
+ */
+ return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK;
+}
+
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
struct netlink_ext_ack *extack = f->common.extack;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5e_tc_flow *flow;
int err = 0;
@@ -4342,6 +4688,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
rcu_read_unlock();
if (flow) {
+ /* Same flow rule offloaded to non-uplink representor sharing tc block,
+ * just return 0.
+ */
+ if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
+ goto out;
+
NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring");
netdev_warn_once(priv->netdev,
@@ -4356,6 +4708,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
if (err)
goto out;
+ /* Flow rule offloaded to non-uplink representor sharing tc block,
+ * set the flow's owner dev.
+ */
+ if (is_flow_rule_duplicate_allowed(dev, rpriv))
+ flow->orig_dev = dev;
+
err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err)
goto err_free;
@@ -4588,7 +4946,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
rpriv->prev_vf_vport_stats = cur_stats;
- flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
+ flow_stats_update(&ma->stats, dbytes, dpkts, jiffies,
FLOW_ACTION_HW_STATS_DELAYED);
}
@@ -4707,7 +5065,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
- const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
+ const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *priv;
struct mapping_ctx *mapping;
@@ -4796,146 +5154,35 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
mutex_unlock(&rpriv->unready_flows_lock);
}
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
-static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv,
- u32 tunnel_id)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct flow_dissector_key_enc_opts enc_opts = {};
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct metadata_dst *tun_dst;
- struct tunnel_match_key key;
- u32 tun_id, enc_opts_id;
- struct net_device *dev;
- int err;
-
- enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
- tun_id = tunnel_id >> ENC_OPTS_BITS;
-
- if (!tun_id)
- return true;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
-
- err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
- if (err) {
- WARN_ON_ONCE(true);
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel for tun_id: %d, err: %d\n",
- tun_id, err);
- return false;
- }
-
- if (enc_opts_id) {
- err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
- enc_opts_id, &enc_opts);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
- enc_opts_id, err);
- return false;
- }
- }
-
- tun_dst = tun_rx_dst(enc_opts.len);
- if (!tun_dst) {
- WARN_ON_ONCE(true);
- return false;
- }
-
- ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- key.enc_ipv4.src, key.enc_ipv4.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- 0, /* label */
- key.enc_tp.src, key.enc_tp.dst,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- TUNNEL_KEY);
-
- if (enc_opts.len)
- ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
- enc_opts.len, enc_opts.dst_opt_type);
-
- skb_dst_set(skb, (struct dst_entry *)tun_dst);
- dev = dev_get_by_index(&init_net, key.filter_ifindex);
- if (!dev) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel device with ifindex: %d\n",
- key.filter_ifindex);
- return false;
+static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
+ struct flow_cls_offload *cls_flower,
+ unsigned long flags)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
+ flags);
+ case FLOW_CLS_DESTROY:
+ return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
+ flags);
+ case FLOW_CLS_STATS:
+ return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
+ flags);
+ default:
+ return -EOPNOTSUPP;
}
-
- /* Set tun_dev so we do dev_put() after datapath */
- tc_priv->tun_dev = dev;
-
- skb->dev = dev;
-
- return true;
}
-#endif /* CONFIG_NET_TC_SKB_EXT */
-bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv)
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct tc_skb_ext *tc_skb_ext;
- struct mlx5_eswitch *esw;
- struct mlx5e_priv *priv;
- int tunnel_moffset;
- int err;
-
- reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
- if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
- reg_c0 = 0;
- reg_c1 = be32_to_cpu(cqe->imm_inval_pkey);
-
- if (!reg_c0)
- return true;
-
- priv = netdev_priv(skb->dev);
- esw = priv->mdev->priv.eswitch;
-
- err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find chain for chain tag: %d, err: %d\n",
- reg_c0, err);
- return false;
- }
-
- if (chain) {
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
- if (!tc_skb_ext) {
- WARN_ON(1);
- return false;
- }
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
+ struct mlx5e_priv *priv = cb_priv;
- tc_skb_ext->chain = chain;
-
- tuple_id = reg_c1 & TUPLE_ID_MAX;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
- return false;
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
+ default:
+ return -EOPNOTSUPP;
}
-
- tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
- tunnel_id = reg_c1 >> (8 * tunnel_moffset);
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
-#endif /* CONFIG_NET_TC_SKB_EXT */
-
- return true;
-}
-
-void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
-{
- if (tc_priv->tun_dev)
- dev_put(tc_priv->tun_dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index abdcfa4c4e0e..5c330b0cae21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -34,11 +34,41 @@
#define __MLX5_EN_TC_H__
#include <net/pkt_cls.h>
+#include "en.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
#ifdef CONFIG_MLX5_ESWITCH
+struct tunnel_match_key {
+ struct flow_dissector_key_control enc_control;
+ struct flow_dissector_key_keyid enc_key_id;
+ struct flow_dissector_key_ports enc_tp;
+ struct flow_dissector_key_ip enc_ip;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
+
+ int filter_ifindex;
+};
+
+struct tunnel_match_enc_opts {
+ struct flow_dissector_key_enc_opts key;
+ struct flow_dissector_key_enc_opts mask;
+};
+
+/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
+ * Upper TUNNEL_INFO_BITS for general tunnel info.
+ * Lower ENC_OPTS_BITS bits for enc_opts.
+ */
+#define TUNNEL_INFO_BITS 6
+#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
+#define ENC_OPTS_BITS 2
+#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
+#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
+#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
+
enum {
MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLAG_EGRESS_BIT,
@@ -50,9 +80,6 @@ enum {
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
-int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
-void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
-
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
@@ -119,11 +146,6 @@ struct mlx5e_tc_update_priv {
struct net_device *tun_dev;
};
-bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv);
-
-void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
-
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
@@ -148,6 +170,26 @@ void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
struct mlx5e_tc_flow;
u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
+void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
+ struct flow_match_basic *match, bool outer,
+ void *headers_c, void *headers_v);
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
+void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
+static inline int
+mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
@@ -156,6 +198,10 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
{
return 0;
}
+
+static inline int
+mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{ return -EOPNOTSUPP; }
#endif
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index fd6b2a1898c5..6d406063aca4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -265,8 +265,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
-netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
@@ -324,7 +324,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
#endif
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
#ifdef CONFIG_MLX5_EN_IPSEC
wqe->eth = cur_eth;
#endif
@@ -372,47 +373,38 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more);
- return NETDEV_TX_OK;
+ return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5e_tx_wqe *wqe;
struct mlx5e_txqsq *sq;
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
- wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
- /* might send skbs and update wqe and pi */
- skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
- if (unlikely(!skb))
- return NETDEV_TX_OK;
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
+ goto out;
- return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
-}
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
-static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
- struct mlx5_err_cqe *err_cqe)
-{
- struct mlx5_cqwq *wq = &sq->cq.wq;
- u32 ci;
+ /* May update the WQE, but may not post other WQEs. */
+ if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel)))
+ goto out;
- ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
+ mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
- netdev_err(sq->channel->netdev,
- "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
- sq->cq.mcq.cqn, ci, sq->sqn,
- get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
- err_cqe->syndrome, err_cqe->vendor_err_synd);
- mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
+out:
+ return NETDEV_TX_OK;
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
@@ -501,7 +493,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) {
- mlx5e_dump_error_cqe(sq,
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq,
@@ -537,10 +529,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
+ u32 dma_fifo_cc, nbytes = 0;
+ u16 ci, sqcc, npkts = 0;
struct sk_buff *skb;
- u32 dma_fifo_cc;
- u16 sqcc;
- u16 ci;
int i;
sqcc = sq->cc;
@@ -565,11 +556,15 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
}
dev_kfree_skb_any(skb);
+ npkts++;
+ nbytes += wi->num_bytes;
sqcc += wi->num_wqebbs;
}
sq->dma_fifo_cc = dma_fifo_cc;
sq->cc = sqcc;
+
+ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
}
#ifdef CONFIG_MLX5_CORE_IPOIB
@@ -582,11 +577,9 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
}
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey,
- bool xmit_more)
+void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5i_tx_wqe *wqe;
struct mlx5_wqe_datagram_seg *datagram;
@@ -596,9 +589,9 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, pi, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
u8 num_wqebbs, opcode;
+ u16 headlen, ihs, pi;
u32 num_bytes;
int num_dma;
__be16 mss;
@@ -634,14 +627,8 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < num_wqebbs)) {
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- }
-
- mlx5i_sq_fetch_wqe(sq, &wqe, pi);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
+ wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
/* fill wqe */
wi = &sq->db.wqe_info[pi];
@@ -669,12 +656,10 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more);
- return NETDEV_TX_OK;
+ return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index acb20215a33b..8480278f2ee2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,11 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
struct mlx5e_tx_wqe *nopwqe;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
- sq->db.ico_wqe[pi].num_wqebbs = 1;
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_NOP,
+ .num_wqebbs = 1,
+ };
+
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index cccea3a8eddd..31ef9f8420c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -36,7 +36,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
-#include <linux/mlx5/cmd.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
@@ -102,12 +101,11 @@ struct mlx5_eq_table {
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
- u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
MLX5_SET(destroy_eq_in, in, eq_number, eqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_eq, in);
}
/* caller must eventually call mlx5_cq_put on the returned cq */
@@ -611,11 +609,13 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
+ mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
if (err)
goto err1;
mlx5_cmd_use_events(dev);
+ mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
.irq_index = 0,
@@ -645,6 +645,7 @@ err2:
mlx5_cmd_use_polling(dev);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
err1:
+ mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
new file mode 100644
index 000000000000..d46f8b225ebe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+#include "lgcy.h"
+
+static void esw_acl_egress_lgcy_rules_destroy(struct mlx5_vport *vport)
+{
+ esw_acl_egress_vlan_destroy(vport);
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
+ mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
+ vport->egress.legacy.drop_rule = NULL;
+ }
+}
+
+static int esw_acl_egress_lgcy_groups_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_group *drop_grp;
+ u32 *flow_group_in;
+ int err = 0;
+
+ err = esw_acl_egress_vlan_grp_create(esw, vport);
+ if (err)
+ return err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ drop_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
+ if (IS_ERR(drop_grp)) {
+ err = PTR_ERR(drop_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto drop_grp_err;
+ }
+
+ vport->egress.legacy.drop_grp = drop_grp;
+ kvfree(flow_group_in);
+ return 0;
+
+drop_grp_err:
+ kvfree(flow_group_in);
+alloc_err:
+ esw_acl_egress_vlan_grp_destroy(vport);
+ return err;
+}
+
+static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_grp)) {
+ mlx5_destroy_flow_group(vport->egress.legacy.drop_grp);
+ vport->egress.legacy.drop_grp = NULL;
+ }
+ esw_acl_egress_vlan_grp_destroy(vport);
+}
+
+int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_destination drop_ctr_dst = {};
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_fc *drop_counter = NULL;
+ struct mlx5_flow_act flow_act = {};
+ /* The egress acl table contains 2 rules:
+ * 1)Allow traffic with vlan_tag=vst_vlan_id
+ * 2)Drop all other traffic.
+ */
+ int table_size = 2;
+ int dest_num = 0;
+ int err = 0;
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+ drop_counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(drop_counter))
+ esw_warn(esw->dev,
+ "vport[%d] configure egress drop rule counter err(%ld)\n",
+ vport->vport, PTR_ERR(drop_counter));
+ vport->egress.legacy.drop_counter = drop_counter;
+ }
+
+ esw_acl_egress_lgcy_rules_destroy(vport);
+
+ if (!vport->info.vlan && !vport->info.qos) {
+ esw_acl_egress_lgcy_cleanup(esw, vport);
+ return 0;
+ }
+
+ if (!IS_ERR_OR_NULL(vport->egress.acl))
+ return 0;
+
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ table_size);
+ if (IS_ERR_OR_NULL(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ goto out;
+ }
+
+ err = esw_acl_egress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
+
+ esw_debug(esw->dev,
+ "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->info.vlan, vport->info.qos);
+
+ /* Allowed vlan rule */
+ err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+ if (err)
+ goto out;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach egress drop flow counter */
+ if (!IS_ERR_OR_NULL(drop_counter)) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
+ vport->egress.legacy.drop_rule =
+ mlx5_add_flow_rules(vport->egress.acl, NULL,
+ &flow_act, dst, dest_num);
+ if (IS_ERR(vport->egress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->egress.legacy.drop_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress drop rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.legacy.drop_rule = NULL;
+ goto out;
+ }
+
+ return err;
+
+out:
+ esw_acl_egress_lgcy_cleanup(esw, vport);
+ return err;
+}
+
+void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->egress.acl))
+ goto clean_drop_counter;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
+
+ esw_acl_egress_lgcy_rules_destroy(vport);
+ esw_acl_egress_lgcy_groups_destroy(vport);
+ esw_acl_egress_table_destroy(vport);
+
+clean_drop_counter:
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) {
+ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
+ vport->egress.legacy.drop_counter = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
new file mode 100644
index 000000000000..07b2acd7e6b3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+#include "ofld.h"
+
+static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
+{
+ if (!vport->egress.offloads.fwd_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->egress.offloads.fwd_rule);
+ vport->egress.offloads.fwd_rule = NULL;
+}
+
+static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct mlx5_flow_destination *fwd_dest)
+{
+ struct mlx5_flow_act flow_act = {};
+ int err = 0;
+
+ esw_debug(esw->dev, "vport(%d) configure egress acl rule fwd2vport(%d)\n",
+ vport->vport, fwd_dest->vport.num);
+
+ /* Delete the old egress forward-to-vport rule if any */
+ esw_acl_egress_ofld_fwd2vport_destroy(vport);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ vport->egress.offloads.fwd_rule =
+ mlx5_add_flow_rules(vport->egress.acl, NULL,
+ &flow_act, fwd_dest, 1);
+ if (IS_ERR(vport->egress.offloads.fwd_rule)) {
+ err = PTR_ERR(vport->egress.offloads.fwd_rule);
+ esw_warn(esw->dev,
+ "vport(%d) failed to add fwd2vport acl rule err(%d)\n",
+ vport->vport, err);
+ vport->egress.offloads.fwd_rule = NULL;
+ }
+
+ return err;
+}
+
+static int esw_acl_egress_ofld_rules_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct mlx5_flow_destination *fwd_dest)
+{
+ int err = 0;
+ int action;
+
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) prio tag packets - pop the prio tag VLAN, allow
+ * Unmatched traffic is allowed by default
+ */
+ esw_debug(esw->dev,
+ "vport[%d] configure prio tag egress rules\n", vport->vport);
+
+ action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ action |= fwd_dest ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ /* prio tag vlan rule - pop it so vport receives untagged packets */
+ err = esw_egress_acl_vlan_create(esw, vport, fwd_dest, 0, action);
+ if (err)
+ goto prio_err;
+ }
+
+ if (fwd_dest) {
+ err = esw_acl_egress_ofld_fwd2vport_create(esw, vport, fwd_dest);
+ if (err)
+ goto fwd_err;
+ }
+
+ return 0;
+
+fwd_err:
+ esw_acl_egress_vlan_destroy(vport);
+prio_err:
+ return err;
+}
+
+static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport)
+{
+ esw_acl_egress_vlan_destroy(vport);
+ esw_acl_egress_ofld_fwd2vport_destroy(vport);
+}
+
+static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fwd_grp;
+ u32 *flow_group_in;
+ u32 flow_index = 0;
+ int ret = 0;
+
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
+ ret = esw_acl_egress_vlan_grp_create(esw, vport);
+ if (ret)
+ return ret;
+
+ flow_index++;
+ }
+
+ if (!mlx5_esw_acl_egress_fwd2vport_supported(esw))
+ goto out;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ ret = -ENOMEM;
+ goto fwd_grp_err;
+ }
+
+ /* This group holds 1 FTE to forward all packets to other vport
+ * when bond vports is supported.
+ */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+ fwd_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
+ if (IS_ERR(fwd_grp)) {
+ ret = PTR_ERR(fwd_grp);
+ esw_warn(esw->dev,
+ "Failed to create vport[%d] egress fwd2vport flow group, err(%d)\n",
+ vport->vport, ret);
+ kvfree(flow_group_in);
+ goto fwd_grp_err;
+ }
+ vport->egress.offloads.fwd_grp = fwd_grp;
+ kvfree(flow_group_in);
+ return 0;
+
+fwd_grp_err:
+ esw_acl_egress_vlan_grp_destroy(vport);
+out:
+ return ret;
+}
+
+static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.offloads.fwd_grp)) {
+ mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp);
+ vport->egress.offloads.fwd_grp = NULL;
+ }
+ esw_acl_egress_vlan_grp_destroy(vport);
+}
+
+int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ int table_size = 0;
+ int err;
+
+ if (!mlx5_esw_acl_egress_fwd2vport_supported(esw) &&
+ !MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ return 0;
+
+ esw_acl_egress_ofld_rules_destroy(vport);
+
+ if (mlx5_esw_acl_egress_fwd2vport_supported(esw))
+ table_size++;
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ table_size++;
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
+ if (IS_ERR_OR_NULL(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ return err;
+ }
+
+ err = esw_acl_egress_ofld_groups_create(esw, vport);
+ if (err)
+ goto group_err;
+
+ esw_debug(esw->dev, "vport[%d] configure egress rules\n", vport->vport);
+
+ err = esw_acl_egress_ofld_rules_create(esw, vport, NULL);
+ if (err)
+ goto rules_err;
+
+ return 0;
+
+rules_err:
+ esw_acl_egress_ofld_groups_destroy(vport);
+group_err:
+ esw_acl_egress_table_destroy(vport);
+ return err;
+}
+
+void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport)
+{
+ esw_acl_egress_ofld_rules_destroy(vport);
+ esw_acl_egress_ofld_groups_destroy(vport);
+ esw_acl_egress_table_destroy(vport);
+}
+
+int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
+ u16 passive_vport_num)
+{
+ struct mlx5_vport *passive_vport = mlx5_eswitch_get_vport(esw, passive_vport_num);
+ struct mlx5_vport *active_vport = mlx5_eswitch_get_vport(esw, active_vport_num);
+ struct mlx5_flow_destination fwd_dest = {};
+
+ if (IS_ERR(active_vport))
+ return PTR_ERR(active_vport);
+ if (IS_ERR(passive_vport))
+ return PTR_ERR(passive_vport);
+
+ /* Cleanup and recreate rules WITHOUT fwd2vport of active vport */
+ esw_acl_egress_ofld_rules_destroy(active_vport);
+ esw_acl_egress_ofld_rules_create(esw, active_vport, NULL);
+
+ /* Cleanup and recreate all rules + fwd2vport rule of passive vport to forward */
+ esw_acl_egress_ofld_rules_destroy(passive_vport);
+ fwd_dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ fwd_dest.vport.num = active_vport_num;
+ fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ fwd_dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+
+ return esw_acl_egress_ofld_rules_create(esw, passive_vport, &fwd_dest);
+}
+
+int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ esw_acl_egress_ofld_rules_destroy(vport);
+ return esw_acl_egress_ofld_rules_create(esw, vport, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
new file mode 100644
index 000000000000..22f4c1c28006
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+
+struct mlx5_flow_table *
+esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ int acl_supported;
+ int vport_index;
+ int err;
+
+ acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ?
+ MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) :
+ MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support);
+
+ if (!acl_supported)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
+ ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
+
+ vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
+ vport_num);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, size, 0, vport_num);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "vport[%d] create %s ACL table, err(%d)\n", vport_num,
+ ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress", err);
+ }
+ return acl;
+}
+
+int esw_egress_acl_vlan_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct mlx5_flow_destination *fwd_dest,
+ u16 vlan_id, u32 flow_action)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ if (vport->egress.allowed_vlan)
+ return -EEXIST;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = flow_action;
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, fwd_dest, 0);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ }
+
+ kvfree(spec);
+ return err;
+}
+
+void esw_acl_egress_vlan_destroy(struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+ mlx5_del_flow_rules(vport->egress.allowed_vlan);
+ vport->egress.allowed_vlan = NULL;
+ }
+}
+
+int esw_acl_egress_vlan_grp_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *vlan_grp;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int ret = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ vlan_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
+ if (IS_ERR(vlan_grp)) {
+ ret = PTR_ERR(vlan_grp);
+ esw_warn(esw->dev,
+ "Failed to create E-Switch vport[%d] egress pop vlans flow group, err(%d)\n",
+ vport->vport, ret);
+ goto out;
+ }
+ vport->egress.vlan_grp = vlan_grp;
+
+out:
+ kvfree(flow_group_in);
+ return ret;
+}
+
+void esw_acl_egress_vlan_grp_destroy(struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.vlan_grp)) {
+ mlx5_destroy_flow_group(vport->egress.vlan_grp);
+ vport->egress.vlan_grp = NULL;
+ }
+}
+
+void esw_acl_egress_table_destroy(struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ mlx5_destroy_flow_table(vport->egress.acl);
+ vport->egress.acl = NULL;
+}
+
+void esw_acl_ingress_table_destroy(struct mlx5_vport *vport)
+{
+ if (!vport->ingress.acl)
+ return;
+
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+}
+
+void esw_acl_ingress_allow_rule_destroy(struct mlx5_vport *vport)
+{
+ if (!vport->ingress.allow_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->ingress.allow_rule);
+ vport->ingress.allow_rule = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
new file mode 100644
index 000000000000..8dc4cab66a71
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#ifndef __MLX5_ESWITCH_ACL_HELPER_H__
+#define __MLX5_ESWITCH_ACL_HELPER_H__
+
+#include "eswitch.h"
+
+/* General acl helper functions */
+struct mlx5_flow_table *
+esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size);
+
+/* Egress acl helper functions */
+void esw_acl_egress_table_destroy(struct mlx5_vport *vport);
+int esw_egress_acl_vlan_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ struct mlx5_flow_destination *fwd_dest,
+ u16 vlan_id, u32 flow_action);
+void esw_acl_egress_vlan_destroy(struct mlx5_vport *vport);
+int esw_acl_egress_vlan_grp_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_egress_vlan_grp_destroy(struct mlx5_vport *vport);
+
+/* Ingress acl helper functions */
+void esw_acl_ingress_table_destroy(struct mlx5_vport *vport);
+void esw_acl_ingress_allow_rule_destroy(struct mlx5_vport *vport);
+
+#endif /* __MLX5_ESWITCH_ACL_HELPER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
new file mode 100644
index 000000000000..9bda4fe2eafa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+#include "lgcy.h"
+
+static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport)
+{
+ if (vport->ingress.legacy.drop_rule) {
+ mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
+ vport->ingress.legacy.drop_rule = NULL;
+ }
+ esw_acl_ingress_allow_rule_destroy(vport);
+}
+
+static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto spoof_err;
+ }
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
+ vport->vport, err);
+ goto untagged_err;
+ }
+ vport->ingress.legacy.allow_untagged_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto allow_spoof_err;
+ }
+ vport->ingress.legacy.allow_spoofchk_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto drop_err;
+ }
+ vport->ingress.legacy.drop_grp = g;
+ kvfree(flow_group_in);
+ return 0;
+
+drop_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
+ }
+allow_spoof_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+untagged_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+spoof_err:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
+{
+ if (vport->ingress.legacy.allow_spoofchk_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+ if (vport->ingress.legacy.drop_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
+ vport->ingress.legacy.drop_grp = NULL;
+ }
+}
+
+int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_destination drop_ctr_dst = {};
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec = NULL;
+ struct mlx5_fc *counter = NULL;
+ /* The ingress acl table contains 4 groups
+ * (2 active rules at the same time -
+ * 1 allow rule from one of the first 3 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with smac=original mac.
+ * 2)Allow untagged traffic.
+ * 3)Allow traffic with smac=original mac.
+ * 4)Drop all other traffic.
+ */
+ int table_size = 4;
+ int dest_num = 0;
+ int err = 0;
+ u8 *smac_v;
+
+ esw_acl_ingress_lgcy_rules_destroy(vport);
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+ counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(counter))
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress drop rule counter failed\n",
+ vport->vport);
+ vport->ingress.legacy.drop_counter = counter;
+ }
+
+ if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
+ return 0;
+ }
+
+ if (!vport->ingress.acl) {
+ vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ table_size);
+ if (IS_ERR_OR_NULL(vport->ingress.acl)) {
+ err = PTR_ERR(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+ return err;
+ }
+
+ err = esw_acl_ingress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
+ }
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->info.vlan, vport->info.qos);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (vport->info.vlan || vport->info.qos)
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
+
+ if (vport->info.spoofchk) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.smac_15_0);
+ smac_v = MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, vport->info.mac);
+ }
+
+ /* Create ingress allow rule */
+ memset(spec, 0, sizeof(*spec));
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ goto out;
+ }
+
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ /* Attach drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter_id = mlx5_fc_id(counter);
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
+ vport->ingress.legacy.drop_rule =
+ mlx5_add_flow_rules(vport->ingress.acl, NULL,
+ &flow_act, dst, dest_num);
+ if (IS_ERR(vport->ingress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->ingress.legacy.drop_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress drop rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.legacy.drop_rule = NULL;
+ goto out;
+ }
+ kvfree(spec);
+ return 0;
+
+out:
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
+ kvfree(spec);
+ return err;
+}
+
+void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->ingress.acl))
+ goto clean_drop_counter;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
+
+ esw_acl_ingress_lgcy_rules_destroy(vport);
+ esw_acl_ingress_lgcy_groups_destroy(vport);
+ esw_acl_ingress_table_destroy(vport);
+
+clean_drop_counter:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_counter)) {
+ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
+ vport->ingress.legacy.drop_counter = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
new file mode 100644
index 000000000000..4e55d7225a26
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+#include "ofld.h"
+
+static bool
+esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
+{
+ return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
+ mlx5_eswitch_is_vf_vport(esw, vport->vport));
+}
+
+static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) Untagged packets - push prio tag VLAN and modify metadata if
+ * required, allow
+ * Unmatched traffic is allowed by default
+ */
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Untagged packets - push prio tag VLAN, allow */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ flow_act.vlan[0].ethtype = ETH_P_8021Q;
+ flow_act.vlan[0].vid = 0;
+ flow_act.vlan[0].prio = 0;
+
+ if (vport->ingress.offloads.modify_metadata_rule) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ }
+
+ vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress untagged allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ }
+
+ kvfree(spec);
+ return err;
+}
+
+static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_flow_act flow_act = {};
+ int err = 0;
+ u32 key;
+
+ key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
+ key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+ MLX5_SET(set_action_in, action, data, key);
+ MLX5_SET(set_action_in, action, offset,
+ ESW_SOURCE_PORT_METADATA_OFFSET);
+ MLX5_SET(set_action_in, action, length,
+ ESW_SOURCE_PORT_METADATA_BITS);
+
+ vport->ingress.offloads.modify_metadata =
+ mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ 1, action);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata);
+ esw_warn(esw->dev,
+ "failed to alloc modify header for vport %d ingress acl (%d)\n",
+ vport->vport, err);
+ return err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ vport->ingress.offloads.modify_metadata_rule =
+ mlx5_add_flow_rules(vport->ingress.acl,
+ NULL, &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
+ esw_warn(esw->dev,
+ "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
+ vport->vport, err);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
+ vport->ingress.offloads.modify_metadata_rule = NULL;
+ }
+ return err;
+}
+
+static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!vport->ingress.offloads.modify_metadata_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
+ vport->ingress.offloads.modify_metadata_rule = NULL;
+}
+
+static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int err;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ err = esw_acl_ingress_mod_metadata_create(esw, vport);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport(%d) create ingress modify metadata, err(%d)\n",
+ vport->vport, err);
+ return err;
+ }
+ }
+
+ if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
+ err = esw_acl_ingress_prio_tag_create(esw, vport);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport(%d) create ingress prio tag rule, err(%d)\n",
+ vport->vport, err);
+ goto prio_tag_err;
+ }
+ }
+
+ return 0;
+
+prio_tag_err:
+ esw_acl_ingress_mod_metadata_destroy(esw, vport);
+ return err;
+}
+
+static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ esw_acl_ingress_allow_rule_destroy(vport);
+ esw_acl_ingress_mod_metadata_destroy(esw, vport);
+}
+
+static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+ u32 flow_index = 0;
+ int ret = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
+ /* This group is to hold FTE to match untagged packets when prio_tag
+ * is enabled.
+ */
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in, match_criteria);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
+ esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
+ vport->vport, ret);
+ goto prio_tag_err;
+ }
+ vport->ingress.offloads.metadata_prio_tag_grp = g;
+ flow_index++;
+ }
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ /* This group holds an FTE with no match to add metadata for
+ * tagged packets if prio-tag is enabled, or for all untagged
+ * traffic in case prio-tag is disabled.
+ */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
+ esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
+ vport->vport, ret);
+ goto metadata_err;
+ }
+ vport->ingress.offloads.metadata_allmatch_grp = g;
+ }
+
+ kvfree(flow_group_in);
+ return 0;
+
+metadata_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
+ vport->ingress.offloads.metadata_prio_tag_grp = NULL;
+ }
+prio_tag_err:
+ kvfree(flow_group_in);
+ return ret;
+}
+
+static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
+{
+ if (vport->ingress.offloads.metadata_allmatch_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
+ vport->ingress.offloads.metadata_allmatch_grp = NULL;
+ }
+
+ if (vport->ingress.offloads.metadata_prio_tag_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
+ vport->ingress.offloads.metadata_prio_tag_grp = NULL;
+ }
+}
+
+int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int num_ftes = 0;
+ int err;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ !esw_acl_ingress_prio_tag_enabled(esw, vport))
+ return 0;
+
+ esw_acl_ingress_allow_rule_destroy(vport);
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+ num_ftes++;
+ if (esw_acl_ingress_prio_tag_enabled(esw, vport))
+ num_ftes++;
+
+ vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ num_ftes);
+ if (IS_ERR_OR_NULL(vport->ingress.acl)) {
+ err = PTR_ERR(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+ return err;
+ }
+
+ err = esw_acl_ingress_ofld_groups_create(esw, vport);
+ if (err)
+ goto group_err;
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules\n", vport->vport);
+
+ err = esw_acl_ingress_ofld_rules_create(esw, vport);
+ if (err)
+ goto rules_err;
+
+ return 0;
+
+rules_err:
+ esw_acl_ingress_ofld_groups_destroy(vport);
+group_err:
+ esw_acl_ingress_table_destroy(vport);
+ return err;
+}
+
+void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ esw_acl_ingress_ofld_rules_destroy(esw, vport);
+ esw_acl_ingress_ofld_groups_destroy(vport);
+ esw_acl_ingress_table_destroy(vport);
+}
+
+/* Caller must hold rtnl_lock */
+int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 metadata)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+ int err;
+
+ if (WARN_ON_ONCE(IS_ERR(vport))) {
+ esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+ err = PTR_ERR(vport);
+ goto out;
+ }
+
+ esw_acl_ingress_ofld_rules_destroy(esw, vport);
+
+ vport->metadata = metadata ? metadata : vport->default_metadata;
+
+ /* Recreate ingress acl rules with vport->metadata */
+ err = esw_acl_ingress_ofld_rules_create(esw, vport);
+ if (err)
+ goto out;
+
+ return 0;
+
+out:
+ vport->metadata = vport->default_metadata;
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h
new file mode 100644
index 000000000000..44c152da3d83
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#ifndef __MLX5_ESWITCH_ACL_LGCY_H__
+#define __MLX5_ESWITCH_ACL_LGCY_H__
+
+#include "eswitch.h"
+
+/* Eswitch acl egress external APIs */
+int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+/* Eswitch acl ingress external APIs */
+int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+#endif /* __MLX5_ESWITCH_ACL_LGCY_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
new file mode 100644
index 000000000000..c57869b93d60
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#ifndef __MLX5_ESWITCH_ACL_OFLD_H__
+#define __MLX5_ESWITCH_ACL_OFLD_H__
+
+#include "eswitch.h"
+
+/* Eswitch acl egress external APIs */
+int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
+int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
+ u16 passive_vport_num);
+int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num);
+
+static inline bool mlx5_esw_acl_egress_fwd2vport_supported(struct mlx5_eswitch *esw)
+{
+ return esw && esw->mode == MLX5_ESWITCH_OFFLOADS &&
+ mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ MLX5_CAP_ESW_FLOWTABLE(esw->dev, egress_acl_forward_to_vport);
+}
+
+/* Eswitch acl ingress external APIs */
+int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 metadata);
+
+#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
index 029001040737..d5bf908dfecd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
@@ -274,7 +274,7 @@ mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
static int
create_fdb_chain_restore(struct fdb_chain *fdb_chain)
{
- char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)];
+ char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
struct mlx5_eswitch *esw = fdb_chain->esw;
struct mlx5_modify_hdr *mod_hdr;
u32 index;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
index f8c4239846ea..7679ac359e31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
@@ -6,6 +6,8 @@
#include "eswitch.h"
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
bool
@@ -46,4 +48,21 @@ void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
int
mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline struct mlx5_flow_table *
+mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void
+mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level) {}
+
+static inline struct mlx5_flow_table *
+mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) { return ERR_PTR(-EOPNOTSUPP); }
+
+static inline int mlx5_esw_chains_create(struct mlx5_eswitch *esw) { return 0; }
+static inline void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) {}
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7f618a443bfd..1116ab9bea6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
+#include "esw/acl/lgcy.h"
#include "mlx5_core.h"
#include "lib/eq.h"
#include "eswitch.h"
@@ -84,8 +85,7 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
- int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
- int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
void *nic_vport_ctx;
MLX5_SET(modify_nic_vport_context_in, in,
@@ -108,40 +108,24 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
}
/* E-Switch vport context HW commands */
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *in, int inlen)
+ bool other_vport, void *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
-
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
-
- MLX5_SET(query_esw_vport_context_in, in, opcode,
- MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
- MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
@@ -170,8 +154,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in,
field_select.vport_cvlan_insert, 1);
- return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
- in, sizeof(in));
+ return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
}
/* E-Switch FDB */
@@ -954,512 +937,6 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
-int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_group *vlan_grp = NULL;
- struct mlx5_flow_group *drop_grp = NULL;
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *acl;
- void *match_criteria;
- u32 *flow_group_in;
- /* The egress acl table contains 2 rules:
- * 1)Allow traffic with vlan_tag=vst_vlan_id
- * 2)Drop all other traffic.
- */
- int table_size = 2;
- int err = 0;
-
- if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- return -EOPNOTSUPP;
-
- if (!IS_ERR_OR_NULL(vport->egress.acl))
- return 0;
-
- esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
- vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
-
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- mlx5_eswitch_vport_num_to_index(esw, vport->vport));
- if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
- return -EOPNOTSUPP;
- }
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
- return -ENOMEM;
-
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR(acl)) {
- err = PTR_ERR(acl);
- esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
- vport->vport, err);
- goto out;
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
-
- vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR(vlan_grp)) {
- err = PTR_ERR(vlan_grp);
- esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
- vport->vport, err);
- goto out;
- }
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
- drop_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR(drop_grp)) {
- err = PTR_ERR(drop_grp);
- esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
- vport->vport, err);
- goto out;
- }
-
- vport->egress.acl = acl;
- vport->egress.drop_grp = drop_grp;
- vport->egress.allowed_vlans_grp = vlan_grp;
-out:
- kvfree(flow_group_in);
- if (err && !IS_ERR_OR_NULL(vlan_grp))
- mlx5_destroy_flow_group(vlan_grp);
- if (err && !IS_ERR_OR_NULL(acl))
- mlx5_destroy_flow_table(acl);
- return err;
-}
-
-void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
- mlx5_del_flow_rules(vport->egress.allowed_vlan);
- vport->egress.allowed_vlan = NULL;
- }
-
- if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
- mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
- vport->egress.legacy.drop_rule = NULL;
- }
-}
-
-void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (IS_ERR_OR_NULL(vport->egress.acl))
- return;
-
- esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
-
- esw_vport_cleanup_egress_rules(esw, vport);
- mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
- mlx5_destroy_flow_group(vport->egress.drop_grp);
- mlx5_destroy_flow_table(vport->egress.acl);
- vport->egress.allowed_vlans_grp = NULL;
- vport->egress.drop_grp = NULL;
- vport->egress.acl = NULL;
-}
-
-static int
-esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_group *g;
- void *match_criteria;
- u32 *flow_group_in;
- int err;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
- return -ENOMEM;
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
- vport->vport, err);
- goto spoof_err;
- }
- vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
- vport->vport, err);
- goto untagged_err;
- }
- vport->ingress.legacy.allow_untagged_only_grp = g;
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
- vport->vport, err);
- goto allow_spoof_err;
- }
- vport->ingress.legacy.allow_spoofchk_only_grp = g;
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
- vport->vport, err);
- goto drop_err;
- }
- vport->ingress.legacy.drop_grp = g;
- kvfree(flow_group_in);
- return 0;
-
-drop_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
- vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
- }
-allow_spoof_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
- vport->ingress.legacy.allow_untagged_only_grp = NULL;
- }
-untagged_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
- vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
- }
-spoof_err:
- kvfree(flow_group_in);
- return err;
-}
-
-int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport, int table_size)
-{
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *acl;
- int vport_index;
- int err;
-
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- return -EOPNOTSUPP;
-
- esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
- vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
-
- vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport_index);
- if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
- vport->vport);
- return -EOPNOTSUPP;
- }
-
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR(acl)) {
- err = PTR_ERR(acl);
- esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
- vport->vport, err);
- return err;
- }
- vport->ingress.acl = acl;
- return 0;
-}
-
-void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
-{
- if (!vport->ingress.acl)
- return;
-
- mlx5_destroy_flow_table(vport->ingress.acl);
- vport->ingress.acl = NULL;
-}
-
-void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (vport->ingress.legacy.drop_rule) {
- mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
- vport->ingress.legacy.drop_rule = NULL;
- }
-
- if (vport->ingress.allow_rule) {
- mlx5_del_flow_rules(vport->ingress.allow_rule);
- vport->ingress.allow_rule = NULL;
- }
-}
-
-static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (!vport->ingress.acl)
- return;
-
- esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
-
- esw_vport_cleanup_ingress_rules(esw, vport);
- if (vport->ingress.legacy.allow_spoofchk_only_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
- vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
- }
- if (vport->ingress.legacy.allow_untagged_only_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
- vport->ingress.legacy.allow_untagged_only_grp = NULL;
- }
- if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
- vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
- }
- if (vport->ingress.legacy.drop_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
- vport->ingress.legacy.drop_grp = NULL;
- }
- esw_vport_destroy_ingress_acl_table(vport);
-}
-
-static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
- struct mlx5_flow_destination drop_ctr_dst = {0};
- struct mlx5_flow_destination *dst = NULL;
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec = NULL;
- int dest_num = 0;
- int err = 0;
- u8 *smac_v;
-
- /* The ingress acl table contains 4 groups
- * (2 active rules at the same time -
- * 1 allow rule from one of the first 3 groups.
- * 1 drop rule from the last group):
- * 1)Allow untagged traffic with smac=original mac.
- * 2)Allow untagged traffic.
- * 3)Allow traffic with smac=original mac.
- * 4)Drop all other traffic.
- */
- int table_size = 4;
-
- esw_vport_cleanup_ingress_rules(esw, vport);
-
- if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
- esw_vport_disable_legacy_ingress_acl(esw, vport);
- return 0;
- }
-
- if (!vport->ingress.acl) {
- err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
- if (err) {
- esw_warn(esw->dev,
- "vport[%d] enable ingress acl err (%d)\n",
- err, vport->vport);
- return err;
- }
-
- err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
- if (err)
- goto out;
- }
-
- esw_debug(esw->dev,
- "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->info.vlan, vport->info.qos);
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out;
- }
-
- if (vport->info.vlan || vport->info.qos)
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
-
- if (vport->info.spoofchk) {
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
- smac_v = MLX5_ADDR_OF(fte_match_param,
- spec->match_value,
- outer_headers.smac_47_16);
- ether_addr_copy(smac_v, vport->info.mac);
- }
-
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- vport->ingress.allow_rule =
- mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.allow_rule)) {
- err = PTR_ERR(vport->ingress.allow_rule);
- esw_warn(esw->dev,
- "vport[%d] configure ingress allow rule, err(%d)\n",
- vport->vport, err);
- vport->ingress.allow_rule = NULL;
- goto out;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
-
- /* Attach drop flow counter */
- if (counter) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
- dst = &drop_ctr_dst;
- dest_num++;
- }
- vport->ingress.legacy.drop_rule =
- mlx5_add_flow_rules(vport->ingress.acl, NULL,
- &flow_act, dst, dest_num);
- if (IS_ERR(vport->ingress.legacy.drop_rule)) {
- err = PTR_ERR(vport->ingress.legacy.drop_rule);
- esw_warn(esw->dev,
- "vport[%d] configure ingress drop rule, err(%d)\n",
- vport->vport, err);
- vport->ingress.legacy.drop_rule = NULL;
- goto out;
- }
- kvfree(spec);
- return 0;
-
-out:
- esw_vport_disable_legacy_ingress_acl(esw, vport);
- kvfree(spec);
- return err;
-}
-
-int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- u16 vlan_id, u32 flow_action)
-{
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_spec *spec;
- int err = 0;
-
- if (vport->egress.allowed_vlan)
- return -EEXIST;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
-
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = flow_action;
- vport->egress.allowed_vlan =
- mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->egress.allowed_vlan)) {
- err = PTR_ERR(vport->egress.allowed_vlan);
- esw_warn(esw->dev,
- "vport[%d] configure egress vlan rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.allowed_vlan = NULL;
- }
-
- kvfree(spec);
- return err;
-}
-
-static int esw_vport_egress_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
- struct mlx5_flow_destination drop_ctr_dst = {0};
- struct mlx5_flow_destination *dst = NULL;
- struct mlx5_flow_act flow_act = {0};
- int dest_num = 0;
- int err = 0;
-
- esw_vport_cleanup_egress_rules(esw, vport);
-
- if (!vport->info.vlan && !vport->info.qos) {
- esw_vport_disable_egress_acl(esw, vport);
- return 0;
- }
-
- err = esw_vport_enable_egress_acl(esw, vport);
- if (err) {
- mlx5_core_warn(esw->dev,
- "failed to enable egress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
- }
-
- esw_debug(esw->dev,
- "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->info.vlan, vport->info.qos);
-
- /* Allowed vlan rule */
- err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
- MLX5_FLOW_CONTEXT_ACTION_ALLOW);
- if (err)
- return err;
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
-
- /* Attach egress drop flow counter */
- if (counter) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
- dst = &drop_ctr_dst;
- dest_num++;
- }
- vport->egress.legacy.drop_rule =
- mlx5_add_flow_rules(vport->egress.acl, NULL,
- &flow_act, dst, dest_num);
- if (IS_ERR(vport->egress.legacy.drop_rule)) {
- err = PTR_ERR(vport->egress.legacy.drop_rule);
- esw_warn(esw->dev,
- "vport[%d] configure egress drop rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.legacy.drop_rule = NULL;
- }
-
- return err;
-}
-
static bool element_type_supported(struct mlx5_eswitch *esw, int type)
{
const struct mlx5_core_dev *dev = esw->dev;
@@ -1671,44 +1148,19 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_esw_is_manager_vport(esw, vport->vport))
return 0;
- if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
- vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
- if (IS_ERR(vport->ingress.legacy.drop_counter)) {
- esw_warn(esw->dev,
- "vport[%d] configure ingress drop rule counter failed\n",
- vport->vport);
- vport->ingress.legacy.drop_counter = NULL;
- }
- }
-
- ret = esw_vport_ingress_config(esw, vport);
+ ret = esw_acl_ingress_lgcy_setup(esw, vport);
if (ret)
goto ingress_err;
- if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
- vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
- if (IS_ERR(vport->egress.legacy.drop_counter)) {
- esw_warn(esw->dev,
- "vport[%d] configure egress drop rule counter failed\n",
- vport->vport);
- vport->egress.legacy.drop_counter = NULL;
- }
- }
-
- ret = esw_vport_egress_config(esw, vport);
+ ret = esw_acl_egress_lgcy_setup(esw, vport);
if (ret)
goto egress_err;
return 0;
egress_err:
- esw_vport_disable_legacy_ingress_acl(esw, vport);
- mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
- vport->egress.legacy.drop_counter = NULL;
-
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
ingress_err:
- mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
- vport->ingress.legacy.drop_counter = NULL;
return ret;
}
@@ -1728,13 +1180,8 @@ static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_esw_is_manager_vport(esw, vport->vport))
return;
- esw_vport_disable_egress_acl(esw, vport);
- mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
- vport->egress.legacy.drop_counter = NULL;
-
- esw_vport_disable_legacy_ingress_acl(esw, vport);
- mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
- vport->ingress.legacy.drop_counter = NULL;
+ esw_acl_egress_lgcy_cleanup(esw, vport);
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
}
static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
@@ -1901,7 +1348,7 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out);
if (!err)
return out;
@@ -2280,7 +1727,10 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->offloads.mod_hdr.lock);
hash_init(esw->offloads.mod_hdr.hlist);
+ mutex_init(&esw->offloads.decap_tbl_lock);
+ hash_init(esw->offloads.decap_tbl);
atomic64_set(&esw->offloads.num_flows, 0);
+ ida_init(&esw->offloads.vport_metadata_ida);
mutex_init(&esw->state_lock);
mutex_init(&esw->mode_lock);
@@ -2319,8 +1769,10 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup_reps(esw);
mutex_destroy(&esw->mode_lock);
mutex_destroy(&esw->state_lock);
+ ida_destroy(&esw->offloads.vport_metadata_ida);
mutex_destroy(&esw->offloads.mod_hdr.lock);
mutex_destroy(&esw->offloads.encap_tbl_lock);
+ mutex_destroy(&esw->offloads.decap_tbl_lock);
kfree(esw->vports);
kfree(esw);
}
@@ -2363,7 +1815,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
ether_addr_copy(evport->info.mac, mac);
evport->info.node_guid = node_guid;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
- err = esw_vport_ingress_config(esw, evport);
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
unlock:
mutex_unlock(&esw->state_lock);
@@ -2445,10 +1897,10 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
evport->info.vlan = vlan;
evport->info.qos = qos;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
- err = esw_vport_ingress_config(esw, evport);
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
if (err)
return err;
- err = esw_vport_egress_config(esw, evport);
+ err = esw_acl_egress_lgcy_setup(esw, evport);
}
return err;
@@ -2490,7 +1942,7 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
"Spoofchk in set while MAC is invalid, vport(%d)\n",
evport->vport);
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
- err = esw_vport_ingress_config(esw, evport);
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
if (err)
evport->info.spoofchk = pschk;
mutex_unlock(&esw->state_lock);
@@ -2749,7 +2201,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
if (!vport->enabled)
goto unlock;
- if (vport->egress.legacy.drop_counter)
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
&stats->rx_dropped, &bytes);
@@ -2783,8 +2235,8 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
- struct mlx5_vport_drop_stats stats = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
+ struct mlx5_vport_drop_stats stats = {};
int err = 0;
u32 *out;
@@ -2801,7 +2253,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
- err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
if (err)
goto free_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index c1848b57f61c..a5175e98c0b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -99,13 +99,19 @@ struct vport_ingress {
struct vport_egress {
struct mlx5_flow_table *acl;
- struct mlx5_flow_group *allowed_vlans_grp;
- struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
- struct {
- struct mlx5_flow_handle *drop_rule;
- struct mlx5_fc *drop_counter;
- } legacy;
+ struct mlx5_flow_group *vlan_grp;
+ union {
+ struct {
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+ } legacy;
+ struct {
+ struct mlx5_flow_group *fwd_grp;
+ struct mlx5_flow_handle *fwd_rule;
+ } offloads;
+ };
};
struct mlx5_vport_drop_stats {
@@ -143,6 +149,8 @@ struct mlx5_vport {
struct vport_ingress ingress;
struct vport_egress egress;
+ u32 default_metadata;
+ u32 metadata;
struct mlx5_vport_info info;
@@ -209,6 +217,8 @@ struct mlx5_esw_offload {
struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */
DECLARE_HASHTABLE(encap_tbl, 8);
+ struct mutex decap_tbl_lock; /* protects decap_tbl */
+ DECLARE_HASHTABLE(decap_tbl, 8);
struct mod_hdr_tbl mod_hdr;
DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */
@@ -216,6 +226,7 @@ struct mlx5_esw_offload {
u8 inline_mode;
atomic64_t num_flows;
enum devlink_eswitch_encap_mode encap;
+ struct ida vport_metadata_ida;
};
/* E-Switch MC FDB table hash node */
@@ -283,18 +294,10 @@ void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
-void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- int table_size);
-void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
-void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
+
+u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
+void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
+
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
u32 rate_mbps);
@@ -329,11 +332,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *in, int inlen);
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *out, int outlen);
+ bool other_vport, void *in);
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
@@ -436,6 +435,7 @@ struct mlx5_esw_flow_attr {
struct mlx5_flow_table *fdb;
struct mlx5_flow_table *dest_ft;
struct mlx5_ct_attr ct_attr;
+ struct mlx5_pkt_reformat *decap_pkt_reformat;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
@@ -459,10 +459,6 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
-int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- u16 vlan_id, u32 flow_action);
-
static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
{
return esw->qos.enabled;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 5d9def18ae3a..060354bb211a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -31,12 +31,14 @@
*/
#include <linux/etherdevice.h>
+#include <linux/idr.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "esw/acl/ofld.h"
#include "esw/chains.h"
#include "rdma.h"
#include "en.h"
@@ -234,13 +236,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
-static bool
-esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
- const struct mlx5_vport *vport)
-{
- return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
- mlx5_eswitch_is_vf_vport(esw, vport->vport));
-}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@@ -366,6 +361,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
}
}
+
+ if (attr->decap_pkt_reformat)
+ flow_act.pkt_reformat = attr->decap_pkt_reformat;
+
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[i].counter_id = mlx5_fc_id(attr->counter);
@@ -784,7 +783,8 @@ static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
+ u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
u8 curr, wanted;
int err;
@@ -792,8 +792,9 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
- err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
- out, sizeof(out));
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
if (err)
return err;
@@ -808,14 +809,12 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
else
curr &= ~wanted;
- MLX5_SET(modify_esw_vport_context_in, in,
+ MLX5_SET(modify_esw_vport_context_in, min,
esw_vport_context.fdb_to_vport_reg_c_id, curr);
-
- MLX5_SET(modify_esw_vport_context_in, in,
+ MLX5_SET(modify_esw_vport_context_in, min,
field_select.fdb_to_vport_reg_c_id, 1);
- err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
- sizeof(in));
+ err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
if (!err) {
if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
@@ -1468,7 +1467,7 @@ query_vports:
out:
*mode = mlx5_mode;
return 0;
-}
+}
static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
{
@@ -1484,7 +1483,7 @@ static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
static int esw_create_restore_table(struct mlx5_eswitch *esw)
{
- u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
+ u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
@@ -1727,7 +1726,9 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
{
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
mlx5e_tc_clean_fdb_peer_flows(esw);
+#endif
esw_del_fdb_peer_miss_rules(esw);
}
@@ -1845,279 +1846,6 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
-static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
- int err = 0;
-
- /* For prio tag mode, there is only 1 FTEs:
- * 1) Untagged packets - push prio tag VLAN and modify metadata if
- * required, allow
- * Unmatched traffic is allowed by default
- */
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- /* Untagged packets - push prio tag VLAN, allow */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
- MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- flow_act.vlan[0].ethtype = ETH_P_8021Q;
- flow_act.vlan[0].vid = 0;
- flow_act.vlan[0].prio = 0;
-
- if (vport->ingress.offloads.modify_metadata_rule) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
- }
-
- vport->ingress.allow_rule =
- mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.allow_rule)) {
- err = PTR_ERR(vport->ingress.allow_rule);
- esw_warn(esw->dev,
- "vport[%d] configure ingress untagged allow rule, err(%d)\n",
- vport->vport, err);
- vport->ingress.allow_rule = NULL;
- }
-
- kvfree(spec);
- return err;
-}
-
-static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
- struct mlx5_flow_act flow_act = {};
- int err = 0;
- u32 key;
-
- key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
- key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
-
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, action, field,
- MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
- MLX5_SET(set_action_in, action, data, key);
- MLX5_SET(set_action_in, action, offset,
- ESW_SOURCE_PORT_METADATA_OFFSET);
- MLX5_SET(set_action_in, action, length,
- ESW_SOURCE_PORT_METADATA_BITS);
-
- vport->ingress.offloads.modify_metadata =
- mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- 1, action);
- if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
- err = PTR_ERR(vport->ingress.offloads.modify_metadata);
- esw_warn(esw->dev,
- "failed to alloc modify header for vport %d ingress acl (%d)\n",
- vport->vport, err);
- return err;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
- vport->ingress.offloads.modify_metadata_rule =
- mlx5_add_flow_rules(vport->ingress.acl,
- NULL, &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
- err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
- esw_warn(esw->dev,
- "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
- vport->vport, err);
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
- vport->ingress.offloads.modify_metadata_rule = NULL;
- }
- return err;
-}
-
-static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (vport->ingress.offloads.modify_metadata_rule) {
- mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
-
- vport->ingress.offloads.modify_metadata_rule = NULL;
- }
-}
-
-static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_group *g;
- void *match_criteria;
- u32 *flow_group_in;
- u32 flow_index = 0;
- int ret = 0;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
- return -ENOMEM;
-
- if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
- /* This group is to hold FTE to match untagged packets when prio_tag
- * is enabled.
- */
- memset(flow_group_in, 0, inlen);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- flow_group_in, match_criteria);
- MLX5_SET(create_flow_group_in, flow_group_in,
- match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- ret = PTR_ERR(g);
- esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
- vport->vport, ret);
- goto prio_tag_err;
- }
- vport->ingress.offloads.metadata_prio_tag_grp = g;
- flow_index++;
- }
-
- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- /* This group holds an FTE with no matches for add metadata for
- * tagged packets, if prio-tag is enabled (as a fallthrough),
- * or all traffic in case prio-tag is disabled.
- */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- ret = PTR_ERR(g);
- esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
- vport->vport, ret);
- goto metadata_err;
- }
- vport->ingress.offloads.metadata_allmatch_grp = g;
- }
-
- kvfree(flow_group_in);
- return 0;
-
-metadata_err:
- if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
- mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
- vport->ingress.offloads.metadata_prio_tag_grp = NULL;
- }
-prio_tag_err:
- kvfree(flow_group_in);
- return ret;
-}
-
-static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
-{
- if (vport->ingress.offloads.metadata_allmatch_grp) {
- mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
- vport->ingress.offloads.metadata_allmatch_grp = NULL;
- }
-
- if (vport->ingress.offloads.metadata_prio_tag_grp) {
- mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
- vport->ingress.offloads.metadata_prio_tag_grp = NULL;
- }
-}
-
-static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int num_ftes = 0;
- int err;
-
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
- !esw_check_ingress_prio_tag_enabled(esw, vport))
- return 0;
-
- esw_vport_cleanup_ingress_rules(esw, vport);
-
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- num_ftes++;
- if (esw_check_ingress_prio_tag_enabled(esw, vport))
- num_ftes++;
-
- err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
- if (err) {
- esw_warn(esw->dev,
- "failed to enable ingress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
- }
-
- err = esw_vport_create_ingress_acl_group(esw, vport);
- if (err)
- goto group_err;
-
- esw_debug(esw->dev,
- "vport[%d] configure ingress rules\n", vport->vport);
-
- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
- if (err)
- goto metadata_err;
- }
-
- if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
- err = esw_vport_ingress_prio_tag_config(esw, vport);
- if (err)
- goto prio_tag_err;
- }
- return 0;
-
-prio_tag_err:
- esw_vport_del_ingress_acl_modify_metadata(esw, vport);
-metadata_err:
- esw_vport_destroy_ingress_acl_group(vport);
-group_err:
- esw_vport_destroy_ingress_acl_table(vport);
- return err;
-}
-
-static int esw_vport_egress_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int err;
-
- if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
- return 0;
-
- esw_vport_cleanup_egress_rules(esw, vport);
-
- err = esw_vport_enable_egress_acl(esw, vport);
- if (err)
- return err;
-
- /* For prio tag mode, there is only 1 FTEs:
- * 1) prio tag packets - pop the prio tag VLAN, allow
- * Unmatched traffic is allowed by default
- */
- esw_debug(esw->dev,
- "vport[%d] configure prio tag egress rules\n", vport->vport);
-
- /* prio tag vlan rule - pop it so VF receives untagged packets */
- err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
- MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
- MLX5_FLOW_CONTEXT_ACTION_ALLOW);
- if (err)
- esw_vport_disable_egress_acl(esw, vport);
-
- return err;
-}
-
static bool
esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
{
@@ -2150,25 +1878,83 @@ static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
esw_check_vport_match_metadata_supported(esw);
}
+u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
+{
+ u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
+ u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
+ u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ u32 start;
+ u32 end;
+ int id;
+
+ /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
+ WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
+
+ /* Trim vhca_id to ESW_VHCA_ID_BITS */
+ vhca_id &= vhca_id_mask;
+
+ start = (vhca_id << ESW_VPORT_BITS);
+ end = start + num_vports;
+ if (!vhca_id)
+ start += 1; /* zero is reserved/invalid metadata */
+ id = ida_alloc_range(&esw->offloads.vport_metadata_ida, start, end, GFP_KERNEL);
+
+ return (id < 0) ? 0 : id;
+}
+
+void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
+{
+ ida_free(&esw->offloads.vport_metadata_ida, metadata);
+}
+
+static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ return 0;
+
+ vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
+ vport->metadata = vport->default_metadata;
+ return vport->metadata ? 0 : -ENOSPC;
+}
+
+static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata)
+ return;
+
+ WARN_ON(vport->metadata != vport->default_metadata);
+ mlx5_esw_match_metadata_free(esw, vport->default_metadata);
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int err;
- err = esw_vport_ingress_config(esw, vport);
+ err = esw_offloads_vport_metadata_setup(esw, vport);
if (err)
- return err;
+ goto metadata_err;
+
+ err = esw_acl_ingress_ofld_setup(esw, vport);
+ if (err)
+ goto ingress_err;
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
- err = esw_vport_egress_config(esw, vport);
- if (err) {
- esw_vport_cleanup_ingress_rules(esw, vport);
- esw_vport_del_ingress_acl_modify_metadata(esw, vport);
- esw_vport_destroy_ingress_acl_group(vport);
- esw_vport_destroy_ingress_acl_table(vport);
- }
+ err = esw_acl_egress_ofld_setup(esw, vport);
+ if (err)
+ goto egress_err;
}
+
+ return 0;
+
+egress_err:
+ esw_acl_ingress_ofld_cleanup(esw, vport);
+ingress_err:
+ esw_offloads_vport_metadata_cleanup(esw, vport);
+metadata_err:
return err;
}
@@ -2176,11 +1962,9 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_cleanup_ingress_rules(esw, vport);
- esw_vport_del_ingress_acl_modify_metadata(esw, vport);
- esw_vport_destroy_ingress_acl_group(vport);
- esw_vport_destroy_ingress_acl_table(vport);
+ esw_acl_egress_ofld_cleanup(vport);
+ esw_acl_ingress_ofld_cleanup(esw, vport);
+ esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
@@ -2846,38 +2630,11 @@ EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num)
{
- u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
- u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
- u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
- u32 val;
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
- /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
- WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
-
- /* Trim vhca_id to ESW_VHCA_ID_BITS */
- vhca_id &= vhca_id_mask;
-
- /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
- * don't overlap with VF numbers, and themselves, after trimming.
- */
- WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
- vport_num_mask - 1);
- WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
- vport_num_mask - 1);
- WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
- (MLX5_VPORT_ECPF & vport_num_mask));
-
- /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
- * overlap with pf and ecpf.
- */
- if (vport_num != MLX5_VPORT_UPLINK &&
- vport_num != MLX5_VPORT_ECPF)
- WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
-
- /* We can now trim vport_num to ESW_VPORT_BITS */
- vport_num &= vport_num_mask;
+ if (WARN_ON_ONCE(IS_ERR(vport)))
+ return 0;
- val = (vhca_id << ESW_VPORT_BITS) | vport_num;
- return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
+ return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 8bcf3426b9c6..3ce17c3d7a00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -346,8 +346,10 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
events->dev = dev;
dev->priv.events = events;
events->wq = create_singlethread_workqueue("mlx5_events");
- if (!events->wq)
+ if (!events->wq) {
+ kfree(events);
return -ENOMEM;
+ }
INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index c0fd2212e890..9a37077152aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -31,7 +31,6 @@
*/
#include <linux/etherdevice.h>
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
@@ -143,15 +142,15 @@ int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc,
u32 *fpga_qpn)
{
- u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)];
+ u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {};
int ret;
MLX5_SET(fpga_create_qp_in, in, opcode, MLX5_CMD_OP_FPGA_CREATE_QP);
memcpy(MLX5_ADDR_OF(fpga_create_qp_in, in, fpga_qpc), fpga_qpc,
MLX5_FLD_SZ_BYTES(fpga_create_qp_in, fpga_qpc));
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, fpga_create_qp, in, out);
if (ret)
return ret;
@@ -165,8 +164,7 @@ int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
enum mlx5_fpga_qpc_field_select fields,
void *fpga_qpc)
{
- u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_modify_qp_out)];
+ u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {};
MLX5_SET(fpga_modify_qp_in, in, opcode, MLX5_CMD_OP_FPGA_MODIFY_QP);
MLX5_SET(fpga_modify_qp_in, in, field_select, fields);
@@ -174,20 +172,20 @@ int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
memcpy(MLX5_ADDR_OF(fpga_modify_qp_in, in, fpga_qpc), fpga_qpc,
MLX5_FLD_SZ_BYTES(fpga_modify_qp_in, fpga_qpc));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, fpga_modify_qp, in);
}
int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
u32 fpga_qpn, void *fpga_qpc)
{
- u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)];
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {};
int ret;
MLX5_SET(fpga_query_qp_in, in, opcode, MLX5_CMD_OP_FPGA_QUERY_QP);
MLX5_SET(fpga_query_qp_in, in, fpga_qpn, fpga_qpn);
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, fpga_query_qp, in, out);
if (ret)
return ret;
@@ -198,20 +196,19 @@ int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn)
{
- u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_destroy_qp_out)];
+ u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {};
MLX5_SET(fpga_destroy_qp_in, in, opcode, MLX5_CMD_OP_FPGA_DESTROY_QP);
MLX5_SET(fpga_destroy_qp_in, in, fpga_qpn, fpga_qpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, fpga_destroy_qp, in);
}
int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
bool clear, struct mlx5_fpga_qp_counters *data)
{
- u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)];
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {};
int ret;
MLX5_SET(fpga_query_qp_counters_in, in, opcode,
@@ -219,7 +216,7 @@ int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
MLX5_SET(fpga_query_qp_counters_in, in, clear, clear);
MLX5_SET(fpga_query_qp_counters_in, in, fpga_qpn, fpga_qpn);
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, fpga_query_qp_counters, in, out);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 61021133029e..182d3ac3e73f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -165,7 +165,7 @@ static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
MLX5_OPCODE_SEND);
- ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
+ ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.qpn << 8));
conn->qp.sq.pc++;
conn->qp.sq.bufs[ix] = buf;
@@ -362,23 +362,6 @@ static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
}
-static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
- enum mlx5_event event)
-{
- struct mlx5_fpga_conn *conn;
-
- conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
- mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
-}
-
-static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
-{
- struct mlx5_fpga_conn *conn;
-
- conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
- mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
-}
-
static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
unsigned int budget)
{
@@ -493,7 +476,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
*conn->cq.mcq.arm_db = 0;
conn->cq.mcq.vector = 0;
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
- conn->cq.mcq.event = mlx5_fpga_conn_cq_event;
conn->cq.mcq.irqn = irqn;
conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
@@ -534,8 +516,9 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
unsigned int tx_size, unsigned int rx_size)
{
struct mlx5_fpga_device *fdev = conn->fdev;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
+ u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
void *in = NULL, *qpc;
int err, inlen;
@@ -600,12 +583,13 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
- err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
goto err_sq_bufs;
- conn->qp.mqp.event = mlx5_fpga_conn_event;
- mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
+ conn->qp.qpn = MLX5_GET(create_qp_out, out, qpn);
+ mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.qpn);
goto out;
@@ -658,7 +642,13 @@ static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
{
- mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
+ struct mlx5_core_dev *dev = conn->fdev->mdev;
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, conn->qp.qpn);
+ mlx5_cmd_exec_in(dev, destroy_qp, in);
+
mlx5_fpga_conn_free_recv_bufs(conn);
mlx5_fpga_conn_flush_send_bufs(conn);
kvfree(conn->qp.sq.bufs);
@@ -666,30 +656,29 @@ static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
mlx5_wq_destroy(&conn->qp.wq_ctrl);
}
-static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
{
struct mlx5_core_dev *mdev = conn->fdev->mdev;
+ u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
+
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.qpn);
- mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
+ MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
+ MLX5_SET(qp_2rst_in, in, qpn, conn->qp.qpn);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
- &conn->qp.mqp);
+ return mlx5_cmd_exec_in(mdev, qp_2rst, in);
}
-static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
{
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
struct mlx5_fpga_device *fdev = conn->fdev;
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 *qpc = NULL;
- int err;
+ u32 *qpc;
- mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.qpn);
- qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
- if (!qpc) {
- err = -ENOMEM;
- goto out;
- }
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
@@ -700,32 +689,22 @@ static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
- &conn->qp.mqp);
- if (err) {
- mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
- goto out;
- }
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, in, qpn, conn->qp.qpn);
-out:
- kfree(qpc);
- return err;
+ return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
}
-static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
{
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
struct mlx5_fpga_device *fdev = conn->fdev;
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 *qpc = NULL;
- int err;
+ u32 *qpc;
mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
- qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
- if (!qpc) {
- err = -ENOMEM;
- goto out;
- }
+ qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
@@ -745,33 +724,22 @@ static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
- &conn->qp.mqp);
- if (err) {
- mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
- goto out;
- }
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, in, qpn, conn->qp.qpn);
-out:
- kfree(qpc);
- return err;
+ return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
}
-static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
{
struct mlx5_fpga_device *fdev = conn->fdev;
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 *qpc = NULL;
- u32 opt_mask;
- int err;
+ u32 *qpc;
mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
- qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
- if (!qpc) {
- err = -ENOMEM;
- goto out;
- }
+ qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
@@ -781,17 +749,11 @@ static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, retry_count, 7);
MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
- opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
- &conn->qp.mqp);
- if (err) {
- mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
- goto out;
- }
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, conn->qp.qpn);
+ MLX5_SET(rtr2rts_qp_in, in, opt_param_mask, MLX5_QP_OPTPAR_RNR_TIMEOUT);
-out:
- kfree(qpc);
- return err;
+ return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
}
static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
@@ -931,7 +893,7 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
- MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.qpn);
MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
@@ -972,19 +934,11 @@ out:
void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
{
- struct mlx5_fpga_device *fdev = conn->fdev;
- struct mlx5_core_dev *mdev = fdev->mdev;
- int err = 0;
-
conn->qp.active = false;
tasklet_disable(&conn->cq.tasklet);
synchronize_irq(conn->cq.mcq.irqn);
mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
- &conn->qp.mqp);
- if (err)
- mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
mlx5_fpga_conn_destroy_qp(conn);
mlx5_fpga_conn_destroy_cq(conn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
index 634ae10e287b..5116e869a6e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
@@ -65,7 +65,7 @@ struct mlx5_fpga_conn {
int sgid_index;
struct mlx5_wq_qp wq;
struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5_core_qp mqp;
+ u32 qpn;
struct {
spinlock_t lock; /* Protects all SQ state */
unsigned int pc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index b794888fa3ba..b463787d6ca1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -65,6 +65,7 @@ struct mlx5_fpga_esp_xfrm;
struct mlx5_fpga_ipsec_sa_ctx {
struct rhash_head hash;
struct mlx5_ifc_fpga_ipsec_sa hw_sa;
+ u32 sa_handle;
struct mlx5_core_dev *dev;
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
};
@@ -119,6 +120,8 @@ struct mlx5_fpga_ipsec {
*/
struct rb_root rules_rb;
struct mutex rules_rb_lock; /* rules lock */
+
+ struct ida halloc;
};
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
@@ -602,7 +605,7 @@ static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
const u32 *match_c,
const u32 *match_v)
{
- u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
+ u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
bool ipv6_flow;
ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
@@ -666,7 +669,8 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
+ const __be32 spi, bool is_ipv6,
+ u32 *sa_handle)
{
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
@@ -704,6 +708,17 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
goto exists;
}
+ if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
+ err = ida_simple_get(&fipsec->halloc, 1, 0, GFP_KERNEL);
+ if (err < 0) {
+ context = ERR_PTR(err);
+ goto exists;
+ }
+
+ sa_ctx->sa_handle = err;
+ if (sa_handle)
+ *sa_handle = sa_ctx->sa_handle;
+ }
/* This is unbounded fpga_xfrm, try to add to hash */
mutex_lock(&fipsec->sa_hash_lock);
@@ -744,7 +759,8 @@ delete_hash:
rhash_sa));
unlock_hash:
mutex_unlock(&fipsec->sa_hash_lock);
-
+ if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
exists:
mutex_unlock(&fpga_xfrm->lock);
kfree(sa_ctx);
@@ -816,7 +832,7 @@ mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
/* create */
return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
saddr, daddr,
- spi, is_ipv6);
+ spi, is_ipv6, NULL);
}
static void
@@ -836,6 +852,10 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
return;
}
+ if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
+ MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
+
mutex_lock(&fipsec->sa_hash_lock);
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa));
@@ -1299,6 +1319,8 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
goto err_destroy_hash;
}
+ ida_init(&fdev->ipsec->halloc);
+
return 0;
err_destroy_hash:
@@ -1331,6 +1353,7 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
if (!mlx5_fpga_is_ipsec_device(mdev))
return;
+ ida_destroy(&fdev->ipsec->halloc);
destroy_rules_rb(&fdev->ipsec->rules_rb);
rhashtable_destroy(&fdev->ipsec->sa_hash);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
index 382985e65b48..9ba637f0f0f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -37,6 +37,7 @@
#include "accel/ipsec.h"
#include "fs_cmd.h"
+#ifdef CONFIG_MLX5_FPGA_IPSEC
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
@@ -46,7 +47,8 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
- const __be32 spi, bool is_ipv6);
+ const __be32 spi, bool is_ipv6,
+ u32 *sa_handle);
void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
@@ -63,5 +65,17 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
+#else
+static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
-#endif /* __MLX5_FPGA_SADB_H__ */
+static inline const struct mlx5_flow_cmds *
+mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
+{
+ return mlx5_fs_cmd_get_default(type);
+}
+
+#endif /* CONFIG_MLX5_FPGA_IPSEC */
+#endif /* __MLX5_FPGA_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 22a2ef111514..29b7339ebfa3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -194,8 +194,8 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
MLX5_GET(tls_flow, flow, direction_sx));
}
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
- u64 rcd_sn)
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn)
{
struct mlx5_fpga_dma_buf *buf;
int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
index 3b2e37bf76fe..5714cf391d1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
@@ -68,7 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
return mdev->fpga->tls->caps;
}
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
- u64 rcd_sn);
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn);
#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 90048697b2ff..465a1076a477 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -155,8 +155,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
@@ -167,13 +166,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
- if (disconnect) {
+ if (disconnect)
MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
- MLX5_SET(set_flow_table_root_in, in, table_id, 0);
- } else {
- MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
+ else
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
- }
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
if (ft->vport) {
@@ -181,7 +177,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
}
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
@@ -192,8 +188,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
- u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
int err;
@@ -239,7 +235,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
break;
}
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
if (!err)
ft->id = MLX5_GET(create_flow_table_out, out,
table_id);
@@ -249,8 +245,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_table_in, in, opcode,
@@ -262,15 +257,14 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
}
static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(modify_flow_table_in, in, opcode,
@@ -310,7 +304,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
}
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_flow_table, in);
}
static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
@@ -318,8 +312,7 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
u32 *in,
struct mlx5_flow_group *fg)
{
- u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
struct mlx5_core_dev *dev = ns->dev;
int err;
@@ -332,7 +325,7 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
fg->id = MLX5_GET(create_flow_group_out, out,
group_id);
@@ -343,8 +336,7 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_group_in, in, opcode,
@@ -357,7 +349,7 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
@@ -600,8 +592,7 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
@@ -613,22 +604,22 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
MLX5_SET(delete_fte_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, delete_fte, in);
}
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id)
{
- u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
int err;
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
if (!err)
*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
return err;
@@ -641,21 +632,20 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
MLX5_SET(dealloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
}
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes)
{
u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+ MLX5_ST_SZ_BYTES(traffic_counter)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
void *stats;
int err = 0;
@@ -683,11 +673,10 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out)
{
int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
- MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
@@ -700,7 +689,7 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
- u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
struct mlx5_core_dev *dev = ns->dev;
void *packet_reformat_context_in;
int max_encap_size;
@@ -732,7 +721,6 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
reformat_data);
inlen = reformat - (void *)in + size;
- memset(in, 0, inlen);
MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
@@ -741,7 +729,6 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
reformat_type, reformat_type);
memcpy(reformat, reformat_data, size);
- memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
@@ -753,17 +740,15 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat *pkt_reformat)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
- memset(in, 0, sizeof(in));
MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
pkt_reformat->id);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
}
static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
@@ -771,7 +756,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
void *modify_actions,
struct mlx5_modify_hdr *modify_hdr)
{
- u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
+ u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
int max_actions, actions_size, inlen, err;
struct mlx5_core_dev *dev = ns->dev;
void *actions_in;
@@ -796,6 +781,10 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
table_type = FS_FT_ESW_INGRESS_ACL;
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
+ table_type = FS_FT_RDMA_TX;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -806,7 +795,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
return -EOPNOTSUPP;
}
- actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
+ actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
in = kzalloc(inlen, GFP_KERNEL);
@@ -821,7 +810,6 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
memcpy(actions_in, modify_actions, actions_size);
- memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
@@ -832,17 +820,15 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
+ u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
- memset(in, 0, sizeof(in));
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
modify_hdr->id);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d5defe09339a..13e2fb79c21a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -254,7 +254,7 @@ static void del_sw_flow_group(struct fs_node *node);
static void del_sw_fte(struct fs_node *node);
static void del_sw_prio(struct fs_node *node);
static void del_sw_ns(struct fs_node *node);
-/* Delete rule (destination) is special case that
+/* Delete rule (destination) is special case that
* requires to lock the FTE for all the deletion process.
*/
static void del_sw_hw_rule(struct fs_node *node);
@@ -344,17 +344,12 @@ static void tree_put_node(struct fs_node *node, bool locked)
if (node->del_hw_func)
node->del_hw_func(node);
if (parent_node) {
- /* Only root namespace doesn't have parent and we just
- * need to free its node.
- */
down_write_ref_node(parent_node, locked);
list_del_init(&node->list);
- if (node->del_sw_func)
- node->del_sw_func(node);
- up_write_ref_node(parent_node, locked);
- } else {
- kfree(node);
}
+ node->del_sw_func(node);
+ if (parent_node)
+ up_write_ref_node(parent_node, locked);
node = NULL;
}
if (!node && parent_node)
@@ -384,6 +379,12 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
return NULL;
}
+static bool is_fwd_next_action(u32 action)
+{
+ return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
+}
+
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
{
int i;
@@ -468,8 +469,10 @@ static void del_sw_flow_table(struct fs_node *node)
fs_get_obj(ft, node);
rhltable_destroy(&ft->fgs_hash);
- fs_get_obj(prio, ft->node.parent);
- prio->num_ft--;
+ if (ft->node.parent) {
+ fs_get_obj(prio, ft->node.parent);
+ prio->num_ft--;
+ }
kfree(ft);
}
@@ -502,7 +505,7 @@ static void del_sw_hw_rule(struct fs_node *node)
fs_get_obj(rule, node);
fs_get_obj(fte, rule->node.parent);
trace_mlx5_fs_del_rule(rule);
- if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+ if (is_fwd_next_action(rule->sw_action)) {
mutex_lock(&rule->dest_attr.ft->lock);
list_del(&rule->next_ft);
mutex_unlock(&rule->dest_attr.ft->lock);
@@ -826,6 +829,36 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
return find_closest_ft(prio, true);
}
+static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
+ struct mlx5_flow_namespace *ns)
+{
+ struct mlx5_flow_namespace *root_ns = &root->ns;
+ struct fs_prio *iter_prio;
+ struct fs_prio *prio;
+
+ fs_get_obj(prio, ns->node.parent);
+ list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
+ if (iter_prio == prio &&
+ !list_is_last(&prio->node.children, &iter_prio->node.list))
+ return list_next_entry(iter_prio, node.list);
+ }
+ return NULL;
+}
+
+static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
+ struct mlx5_flow_act *flow_act)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ struct fs_prio *prio;
+
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
+ prio = find_fwd_ns_prio(root, ft->ns);
+ else
+ fs_get_obj(prio, ft->node.parent);
+
+ return (prio) ? find_next_chained_ft(prio) : NULL;
+}
+
static int connect_fts_in_prio(struct mlx5_core_dev *dev,
struct fs_prio *prio,
struct mlx5_flow_table *ft)
@@ -976,6 +1009,10 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
mutex_unlock(&old_next_ft->lock);
list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
+ if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
+ iter->ft->ns == new_next_ft->ns)
+ continue;
+
err = _mlx5_modify_rule_destination(iter, &dest);
if (err)
pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
@@ -1077,6 +1114,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
next_ft = unmanaged ? ft_attr->next_ft :
find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
+ ft->ns = ns;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
@@ -1755,11 +1793,13 @@ skip_search:
list_for_each_entry(iter, match_head, list) {
g = iter->g;
- if (!g->node.active)
- continue;
-
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ if (!g->node.active) {
+ up_write_ref_node(&g->node, false);
+ continue;
+ }
+
err = insert_fte(g, fte);
if (err) {
up_write_ref_node(&g->node, false);
@@ -1899,48 +1939,59 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
static const struct mlx5_flow_spec zero_spec = {};
- struct mlx5_flow_destination gen_dest = {};
+ struct mlx5_flow_destination *gen_dest = NULL;
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = flow_act->action;
- struct fs_prio *prio;
+ int i;
if (!spec)
spec = &zero_spec;
- fs_get_obj(prio, ft->node.parent);
- if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
- if (!fwd_next_prio_supported(ft))
- return ERR_PTR(-EOPNOTSUPP);
- if (num_dest)
- return ERR_PTR(-EINVAL);
- mutex_lock(&root->chain_lock);
- next_ft = find_next_chained_ft(prio);
- if (next_ft) {
- gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- gen_dest.ft = next_ft;
- dest = &gen_dest;
- num_dest = 1;
- flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- } else {
- mutex_unlock(&root->chain_lock);
- return ERR_PTR(-EOPNOTSUPP);
- }
- }
+ if (!is_fwd_next_action(sw_action))
+ return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
- handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
+ if (!fwd_next_prio_supported(ft))
+ return ERR_PTR(-EOPNOTSUPP);
- if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
- if (!IS_ERR_OR_NULL(handle) &&
- (list_empty(&handle->rule[0]->next_ft))) {
- mutex_lock(&next_ft->lock);
- list_add(&handle->rule[0]->next_ft,
- &next_ft->fwd_rules);
- mutex_unlock(&next_ft->lock);
- handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
- }
- mutex_unlock(&root->chain_lock);
- }
+ mutex_lock(&root->chain_lock);
+ next_ft = find_next_fwd_ft(ft, flow_act);
+ if (!next_ft) {
+ handle = ERR_PTR(-EOPNOTSUPP);
+ goto unlock;
+ }
+
+ gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
+ GFP_KERNEL);
+ if (!gen_dest) {
+ handle = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+ for (i = 0; i < num_dest; i++)
+ gen_dest[i] = dest[i];
+ gen_dest[i].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ gen_dest[i].ft = next_ft;
+ dest = gen_dest;
+ num_dest++;
+ flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
+ if (IS_ERR(handle))
+ goto unlock;
+
+ if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
+ mutex_lock(&next_ft->lock);
+ list_add(&handle->rule[num_dest - 1]->next_ft,
+ &next_ft->fwd_rules);
+ mutex_unlock(&next_ft->lock);
+ handle->rule[num_dest - 1]->sw_action = sw_action;
+ handle->rule[num_dest - 1]->ft = ft;
+ }
+unlock:
+ mutex_unlock(&root->chain_lock);
+ kfree(gen_dest);
return handle;
}
EXPORT_SYMBOL(mlx5_add_flow_rules);
@@ -2351,6 +2402,17 @@ static int init_root_tree(struct mlx5_flow_steering *steering,
return 0;
}
+static void del_sw_root_ns(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_namespace *ns;
+
+ fs_get_obj(ns, node);
+ root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
+ mutex_destroy(&root_ns->chain_lock);
+ kfree(node);
+}
+
static struct mlx5_flow_root_namespace
*create_root_ns(struct mlx5_flow_steering *steering,
enum fs_flow_table_type table_type)
@@ -2359,7 +2421,7 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
- if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
(table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
@@ -2377,7 +2439,7 @@ static struct mlx5_flow_root_namespace
ns = &root_ns->ns;
fs_init_namespace(ns);
mutex_init(&root_ns->chain_lock);
- tree_init_node(&ns->node, NULL, NULL);
+ tree_init_node(&ns->node, NULL, del_sw_root_ns);
tree_add_node(&ns->node, NULL);
return root_ns;
@@ -2943,7 +3005,8 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 508108c58dae..825b662f809b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -138,6 +138,7 @@ struct fs_node {
struct mlx5_flow_rule {
struct fs_node node;
+ struct mlx5_flow_table *ft;
struct mlx5_flow_destination dest_attr;
/* next_ft should be accessed under chain_lock and only of
* destination type is FWD_NEXT_fT.
@@ -175,6 +176,7 @@ struct mlx5_flow_table {
u32 flags;
struct rhltable fgs_hash;
enum mlx5_flow_table_miss_action def_miss_action;
+ struct mlx5_flow_namespace *ns;
};
struct mlx5_ft_underlay_qp {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 90e3d0233101..a5fbe7343508 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -31,7 +31,6 @@
*/
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/eswitch.h>
#include <linux/module.h>
#include "mlx5_core.h"
@@ -68,26 +67,19 @@ enum {
MCQI_FW_STORED_VERSION = 1,
};
-static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
- int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
-
- MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
int mlx5_query_board_id(struct mlx5_core_dev *dev)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {};
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- err = mlx5_cmd_query_adapter(dev, out, outlen);
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+ err = mlx5_cmd_exec_inout(dev, query_adapter, in, out);
if (err)
goto out;
@@ -106,13 +98,15 @@ int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {};
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- err = mlx5_cmd_query_adapter(mdev, out, outlen);
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+ err = mlx5_cmd_exec_inout(mdev, query_adapter, in, out);
if (err)
goto out;
@@ -260,8 +254,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
{
- u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {};
int i;
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
@@ -272,16 +265,15 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
sw_owner_id[i]);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, init_hca, in);
}
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
{
- u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, teardown_hca, in);
}
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
@@ -316,8 +308,8 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
{
unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
- u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
int state;
int ret;
@@ -330,7 +322,7 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
MLX5_SET(teardown_hca_in, in, profile,
MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, teardown_hca, in, out);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f99e1752d4e5..c0cfbab15fe9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -36,7 +36,6 @@
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/mlx5.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 673aaa815f57..690b822c6152 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -160,45 +160,54 @@ int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5i_priv *ipriv = priv->ppriv;
- struct mlx5_core_qp *qp = &ipriv->qp;
- struct mlx5_qp_context *context;
int ret;
- /* QP states */
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
+ {
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
+ u32 *qpc;
- context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
- context->pri_path.port = 1;
- context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
- context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
- goto err_qp_modify_to_err;
- }
- memset(context, 0, sizeof(*context));
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, primary_address_path.pkey_index,
+ ipriv->pkey_index);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY);
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
- goto err_qp_modify_to_err;
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn);
+ ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in);
+ if (ret)
+ goto err_qp_modify_to_err;
}
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
- goto err_qp_modify_to_err;
+ {
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
+
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn);
+ ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
+ if (ret)
+ goto err_qp_modify_to_err;
+ }
+ {
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
+
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn);
+ ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
+ if (ret)
+ goto err_qp_modify_to_err;
}
-
- kfree(context);
return 0;
err_qp_modify_to_err:
- mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
- kfree(context);
+ {
+ u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {};
+
+ MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP);
+ MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn);
+ mlx5_cmd_exec_in(mdev, qp_2err, in);
+ }
return ret;
}
@@ -206,30 +215,24 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_qp_context context;
- int err;
+ u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
- &ipriv->qp);
- if (err)
- mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
+ MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
+ MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn);
+ mlx5_cmd_exec_in(mdev, qp_2rst, in);
}
#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
-int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
{
- u32 *in = NULL;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
+ struct mlx5i_priv *ipriv = priv->ppriv;
void *addr_path;
int ret = 0;
- int inlen;
void *qpc;
- inlen = MLX5_ST_SZ_BYTES(create_qp_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
@@ -240,20 +243,28 @@ int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp
MLX5_SET(ads, addr_path, vhca_port_num, 1);
MLX5_SET(ads, addr_path, grh, 1);
- ret = mlx5_core_create_qp(mdev, qp, in, inlen);
- if (ret) {
- mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
- goto out;
- }
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out);
+ if (ret)
+ return ret;
-out:
- kvfree(in);
- return ret;
+ ipriv->qpn = MLX5_GET(create_qp_out, out, qpn);
+
+ return 0;
}
-void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
{
- mlx5_core_destroy_qp(mdev, qp);
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qpn);
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
+}
+
+int mlx5i_update_nic_rx(struct mlx5e_priv *priv)
+{
+ return mlx5e_refresh_tirs(priv, true, true);
}
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
@@ -273,13 +284,13 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
struct mlx5i_priv *ipriv = priv->ppriv;
int err;
- err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
+ err = mlx5i_create_underlay_qp(priv);
if (err) {
mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
return err;
}
- err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -288,7 +299,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return 0;
err_destroy_underlay_qp:
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
return err;
}
@@ -297,7 +308,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
struct mlx5i_priv *ipriv = priv->ppriv;
mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
}
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
@@ -396,7 +407,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv, true);
+ mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
@@ -412,7 +423,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
- mlx5e_destroy_indirect_tirs(priv, true);
+ mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
@@ -450,7 +461,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.cleanup_rx = mlx5i_cleanup_rx,
.enable = NULL, /* mlx5i_enable */
.disable = NULL, /* mlx5i_disable */
- .update_rx = mlx5e_update_nic_rx,
+ .update_rx = mlx5i_update_nic_rx,
.update_stats = NULL, /* mlx5i_update_stats */
.update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
@@ -500,12 +511,12 @@ int mlx5i_dev_init(struct net_device *dev)
struct mlx5i_priv *ipriv = priv->ppriv;
/* Set dev address using underlay QP */
- dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff;
- dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
- dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
+ dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff;
+ dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff;
+ dev->dev_addr[3] = (ipriv->qpn) & 0xff;
/* Add QPN to net-device mapping to HT */
- mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
+ mlx5i_pkey_add_qpn(dev, ipriv->qpn);
return 0;
}
@@ -532,7 +543,7 @@ void mlx5i_dev_cleanup(struct net_device *dev)
mlx5i_uninit_underlay_qp(priv);
/* Delete QPN to net-device mapping from HT */
- mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
+ mlx5i_pkey_del_qpn(dev, ipriv->qpn);
}
static int mlx5i_open(struct net_device *netdev)
@@ -552,7 +563,7 @@ static int mlx5i_open(struct net_device *netdev)
goto err_clear_state_opened_flag;
}
- err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
if (err) {
mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
goto err_reset_qp;
@@ -569,7 +580,7 @@ static int mlx5i_open(struct net_device *netdev)
return 0;
err_remove_fs_underlay_qp:
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_reset_qp:
mlx5i_uninit_underlay_qp(epriv);
err_clear_state_opened_flag:
@@ -595,7 +606,7 @@ static int mlx5i_close(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &epriv->state);
netif_carrier_off(epriv->netdev);
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
mlx5e_deactivate_priv_channels(epriv);
mlx5e_close_channels(&epriv->channels);
mlx5i_uninit_underlay_qp(epriv);
@@ -614,11 +625,12 @@ static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
struct mlx5i_priv *ipriv = epriv->ppriv;
int err;
- mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
- err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
+ mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
+ gid->raw);
+ err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn);
if (err)
mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
- ipriv->qp.qpn, gid->raw);
+ ipriv->qpn, gid->raw);
if (set_qkey) {
mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
@@ -637,12 +649,13 @@ static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
struct mlx5i_priv *ipriv = epriv->ppriv;
int err;
- mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
+ mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
+ gid->raw);
- err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
+ err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn);
if (err)
mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
- ipriv->qp.qpn, gid->raw);
+ ipriv->qpn, gid->raw);
return err;
}
@@ -655,7 +668,9 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
struct mlx5_ib_ah *mah = to_mah(address);
struct mlx5i_priv *ipriv = epriv->ppriv;
- return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
+ mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
+
+ return NETDEV_TX_OK;
}
static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index de7e01a027bb..79071a15c4ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -51,7 +51,7 @@ extern const struct ethtool_ops mlx5i_pkey_ethtool_ops;
/* ipoib rdma netdev's private data structure */
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
- struct mlx5_core_qp qp;
+ u32 qpn;
bool sub_interface;
u32 qkey;
u16 pkey_index;
@@ -62,8 +62,8 @@ struct mlx5i_priv {
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
/* Underlay QP create/destroy functions */
-int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
-void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
+int mlx5i_create_underlay_qp(struct mlx5e_priv *priv);
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn);
/* Underlay QP state modification init/uninit functions */
int mlx5i_init_underlay_qp(struct mlx5e_priv *priv);
@@ -92,6 +92,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
void *ppriv);
void mlx5i_cleanup(struct mlx5e_priv *priv);
+int mlx5i_update_nic_rx(struct mlx5e_priv *priv);
+
/* Get child interface nic profile */
const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
@@ -110,19 +112,11 @@ struct mlx5i_tx_wqe {
struct mlx5_wqe_data_seg data[];
};
-static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
- struct mlx5i_tx_wqe **wqe,
- u16 pi)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memset(*wqe, 0, sizeof(**wqe));
-}
+#define MLX5I_SQ_FETCH_WQE(sq, pi) \
+ ((struct mlx5i_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5i_tx_wqe)))
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey,
- bool xmit_more);
+void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 96e64187c089..f70367018862 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -204,13 +204,13 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_release_lock;
}
- err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
if (err) {
mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err);
goto err_unint_underlay_qp;
}
- err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]);
+ err = mlx5i_create_tis(mdev, ipriv->qpn, &epriv->tisn[0][0]);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -230,7 +230,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
err_clear_state_opened_flag:
mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
err_remove_rx_uderlay_qp:
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_unint_underlay_qp:
mlx5i_uninit_underlay_qp(epriv);
err_release_lock:
@@ -253,7 +253,7 @@ static int mlx5i_pkey_close(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
mlx5i_uninit_underlay_qp(priv);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
@@ -307,23 +307,20 @@ static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv)
static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv)
{
- struct mlx5i_priv *ipriv = priv->ppriv;
int err;
- err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
- if (err) {
+ err = mlx5i_create_underlay_qp(priv);
+ if (err)
mlx5_core_warn(priv->mdev, "create child underlay QP failed, %d\n", err);
- return err;
- }
- return 0;
+ return err;
}
static void mlx5i_pkey_cleanup_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
}
static int mlx5i_pkey_init_rx(struct mlx5e_priv *priv)
@@ -350,7 +347,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.cleanup_rx = mlx5i_pkey_cleanup_rx,
.enable = NULL,
.disable = NULL,
- .update_rx = mlx5e_update_nic_rx,
+ .update_rx = mlx5i_update_nic_rx,
.update_stats = NULL,
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 93052b07c76c..874c70e8cc54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -42,13 +42,12 @@
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
*/
-static DEFINE_MUTEX(lag_mutex);
+static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2)
{
- u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
@@ -56,14 +55,13 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, create_lag, in);
}
static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2)
{
- u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
@@ -72,52 +70,29 @@ static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
-
- MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_lag, in);
}
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, create_vport_lag, in);
}
EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
- bool reset, void *out, int out_size)
-{
- u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
-
- MLX5_SET(query_cong_statistics_in, in, opcode,
- MLX5_CMD_OP_QUERY_CONG_STATISTICS);
- MLX5_SET(query_cong_statistics_in, in, clear, reset);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev)
{
@@ -232,12 +207,14 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
int err;
ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
- err = mlx5_cmd_destroy_lag(dev0);
+ MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
+ err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
if (err) {
if (roce_lag) {
mlx5_core_err(dev0,
@@ -297,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!dev0 || !dev1)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
tracker = ldev->tracker;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
@@ -481,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev->tracker = tracker;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
if (changed)
mlx5_queue_bond_work(ldev, 0);
@@ -525,7 +502,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
if (fn >= MLX5_MAX_PORTS)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -533,7 +510,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
}
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
@@ -548,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
}
/* Must be called with intf_mutex held */
@@ -630,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -644,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -658,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -687,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -704,12 +681,36 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return ndev;
}
EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
+u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ struct net_device *slave)
+{
+ struct mlx5_lag *ldev;
+ u8 port = 0;
+
+ spin_lock(&lag_lock);
+ ldev = mlx5_lag_dev_get(dev);
+ if (!(ldev && __mlx5_lag_is_roce(ldev)))
+ goto unlock;
+
+ if (ldev->pf[MLX5_LAG_P1].netdev == slave)
+ port = MLX5_LAG_P1;
+ else
+ port = MLX5_LAG_P2;
+
+ port = ldev->v2p_map[port];
+
+unlock:
+ spin_unlock(&lag_lock);
+ return port;
+}
+EXPORT_SYMBOL(mlx5_lag_get_slave_port);
+
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
@@ -746,7 +747,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
@@ -756,18 +757,23 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
+ spin_unlock(&lag_lock);
for (i = 0; i < num_ports; ++i) {
- ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
+ u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
+
+ MLX5_SET(query_cong_statistics_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS);
+ ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
+ out);
if (ret)
- goto unlock;
+ goto free;
for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
}
-unlock:
- mutex_unlock(&lag_mutex);
+free:
kvfree(out);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 43f97601b500..ef0706d15a5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -32,6 +32,7 @@
#include <linux/clocksource.h>
#include <linux/highmem.h>
+#include <linux/ptp_clock_kernel.h>
#include <rdma/mlx5-abi.h>
#include "lib/eq.h"
#include "en.h"
@@ -66,6 +67,26 @@ enum {
MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
};
+static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
+ struct ptp_system_timestamp *sts)
+{
+ u32 timer_h, timer_h1, timer_l;
+
+ timer_h = ioread32be(&dev->iseg->internal_timer_h);
+ ptp_read_system_prets(sts);
+ timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ ptp_read_system_postts(sts);
+ timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
+ if (timer_h != timer_h1) {
+ /* wrap around */
+ ptp_read_system_prets(sts);
+ timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64)timer_l | (u64)timer_h1 << 32;
+}
+
static u64 read_internal_timer(const struct cyclecounter *cc)
{
struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 6cbccba56f70..3d5e57ff558c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -90,7 +90,8 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
}
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
- u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id)
+ u64 length, u32 log_alignment, u16 uid,
+ phys_addr_t *addr, u32 *obj_id)
{
u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
@@ -99,6 +100,7 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
unsigned long *block_map;
u64 icm_start_addr;
u32 log_icm_size;
+ u64 align_mask;
u32 max_blocks;
u64 block_idx;
void *sw_icm;
@@ -136,11 +138,14 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
return -EOPNOTSUPP;
max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ if (log_alignment < MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+ log_alignment = MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ align_mask = BIT(log_alignment - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) - 1;
+
spin_lock(&dm->lock);
- block_idx = bitmap_find_next_zero_area(block_map,
- max_blocks,
- 0,
- num_blocks, 0);
+ block_idx = bitmap_find_next_zero_area(block_map, max_blocks, 0,
+ num_blocks, align_mask);
if (block_idx < max_blocks)
bitmap_set(block_map,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 4be4d2d36218..4aaca7400fb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -27,7 +27,6 @@ struct mlx5_eq {
__be32 __iomem *doorbell;
u32 cons_index;
struct mlx5_frag_buf buf;
- int size;
unsigned int vecidx;
unsigned int irqn;
u8 eqn;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index 7722a3f9bb68..a68738c8f4bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -124,8 +124,7 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
const u8 *mac, bool vlan, u16 vlan_id, u8 port_num)
{
#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
- u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {};
void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
char *addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, in_addr,
source_l3_address);
@@ -153,6 +152,6 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_roce_address, in);
}
EXPORT_SYMBOL(mlx5_core_roce_gid_set);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 3118e8d66407..fd8449ff9e17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -40,8 +40,7 @@
/* HW L2 Table (MPFS) management */
static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac)
{
- u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {};
u8 *in_mac_addr;
MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
@@ -50,17 +49,16 @@ static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac)
in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
ether_addr_copy(&in_mac_addr[2], mac);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_l2_table_entry, in);
}
static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
{
- u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {};
MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, delete_l2_table_entry, in);
}
/* UC L2 table hash node */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
index 48b5c847b642..e042e0924079 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -4,7 +4,6 @@
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/port_tun.h"
@@ -145,11 +144,11 @@ static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy,
int reformat_type)
{
- /* the default is error for unknown (non VXLAN/GRE tunnel types) */
int err = -EOPNOTSUPP;
mutex_lock(&tun_entropy->lock);
- if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN &&
+ if ((reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN ||
+ reformat_type == MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL) &&
tun_entropy->enabled) {
/* in case entropy calculation is enabled for all tunneling
* types, it is ok for VXLAN, so approve.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 148b55c3db7a..82c766a95165 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -60,24 +60,22 @@ static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {};
MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, add_vxlan_udp_dport, in);
}
static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {};
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in);
}
static struct mlx5_vxlan_port*
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7af4210c1b96..df46b1fce3a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -177,6 +177,11 @@ static struct mlx5_profile profile[] = {
#define FW_PRE_INIT_TIMEOUT_MILI 120000
#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
+static int fw_initializing(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->initializing) >> 31;
+}
+
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
u32 warn_time_mili)
{
@@ -206,8 +211,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
{
int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
driver_version);
- u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
- u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
+ u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
int remaining_size = driver_ver_sz;
char *string;
@@ -234,7 +238,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
MLX5_SET(set_driver_version_in, in, opcode,
MLX5_CMD_OP_SET_DRIVER_VERSION);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, set_driver_version, in);
}
static int set_dma_caps(struct pci_dev *pdev)
@@ -366,7 +370,7 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
if (err) {
mlx5_core_warn(dev,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
@@ -407,30 +411,25 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
}
-static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
+static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod)
{
- u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
-
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
- return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_hca_cap, in);
}
-static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
+static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
{
- void *set_ctx;
void *set_hca_cap;
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
int req_endianness;
int err;
- if (MLX5_CAP_GEN(dev, atomic)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
- if (err)
- return err;
- } else {
+ if (!MLX5_CAP_GEN(dev, atomic))
return 0;
- }
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
+ if (err)
+ return err;
req_endianness =
MLX5_CAP_ATOMIC(dev,
@@ -439,27 +438,18 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
return 0;
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
- if (!set_ctx)
- return -ENOMEM;
-
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
/* Set requestor to host endianness */
MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
- err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
-
- kfree(set_ctx);
- return err;
+ return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
}
-static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
+static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
- void *set_ctx;
- int set_sz;
bool do_set = false;
int err;
@@ -471,11 +461,6 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
if (err)
return err;
- set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
- if (!set_ctx)
- return -ENOMEM;
-
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
MLX5_ST_SZ_BYTES(odp_cap));
@@ -504,30 +489,21 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
- if (do_set)
- err = set_caps(dev, set_ctx, set_sz,
- MLX5_SET_HCA_CAP_OP_MOD_ODP);
-
- kfree(set_ctx);
+ if (!do_set)
+ return 0;
- return err;
+ return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
}
-static int handle_hca_cap(struct mlx5_core_dev *dev)
+static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
- void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
- int err = -ENOMEM;
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
void *set_hca_cap;
-
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
- if (!set_ctx)
- goto query_ex;
+ int err;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
if (err)
- goto query_ex;
+ return err;
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
@@ -578,37 +554,76 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
num_vhca_ports,
MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
- err = set_caps(dev, set_ctx, set_sz,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+ if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
-query_ex:
- kfree(set_ctx);
+ return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+}
+
+static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, roce))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) ||
+ !MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+ memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE],
+ MLX5_ST_SZ_BYTES(roce_cap));
+ MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
+
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE);
return err;
}
static int set_hca_cap(struct mlx5_core_dev *dev)
{
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_ctx;
int err;
- err = handle_hca_cap(dev);
+ set_ctx = kzalloc(set_sz, GFP_KERNEL);
+ if (!set_ctx)
+ return -ENOMEM;
+
+ err = handle_hca_cap(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap failed\n");
goto out;
}
- err = handle_hca_cap_atomic(dev);
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_atomic(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
goto out;
}
- err = handle_hca_cap_odp(dev);
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_odp(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
goto out;
}
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_roce(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_roce failed\n");
+ goto out;
+ }
+
out:
+ kfree(set_ctx);
return err;
}
@@ -642,58 +657,35 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
- return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, enable_hca, in);
}
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
- struct ptp_system_timestamp *sts)
-{
- u32 timer_h, timer_h1, timer_l;
-
- timer_h = ioread32be(&dev->iseg->internal_timer_h);
- ptp_read_system_prets(sts);
- timer_l = ioread32be(&dev->iseg->internal_timer_l);
- ptp_read_system_postts(sts);
- timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
- if (timer_h != timer_h1) {
- /* wrap around */
- ptp_read_system_prets(sts);
- timer_l = ioread32be(&dev->iseg->internal_timer_l);
- ptp_read_system_postts(sts);
- }
-
- return (u64)timer_l | (u64)timer_h1 << 32;
+ return mlx5_cmd_exec_in(dev, disable_hca, in);
}
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
{
- u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
- u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {};
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {};
u32 sup_issi;
int err;
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
- err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
- query_out, sizeof(query_out));
+ err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
if (err) {
u32 syndrome;
u8 status;
@@ -713,13 +705,11 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
if (sup_issi & (1 << 1)) {
- u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
- u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {};
MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
MLX5_SET(set_issi_in, set_in, current_issi, 1);
- err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
- set_out, sizeof(set_out));
+ err = mlx5_cmd_exec_in(dev, set_issi, set_in);
if (err) {
mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
err);
@@ -782,7 +772,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
}
mlx5_pci_vsc_init(dev);
-
+ dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
return 0;
err_clr_master:
@@ -836,8 +826,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_cq_debugfs_init(dev);
- mlx5_init_qp_table(dev);
-
mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev);
@@ -896,7 +884,6 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
- mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
err_eq_cleanup:
@@ -924,7 +911,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
- mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
@@ -965,6 +951,8 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
goto err_cmd_cleanup;
}
+ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
+
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
@@ -1026,6 +1014,7 @@ reclaim_boot_pages:
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
+ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
return err;
@@ -1043,6 +1032,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
}
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
+ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
return 0;
@@ -1180,7 +1170,6 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
{
int err = 0;
- dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "interface is up, NOP\n");
@@ -1191,7 +1180,7 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
err = mlx5_function_setup(dev, boot);
if (err)
- goto out;
+ goto err_function;
if (boot) {
err = mlx5_init_once(dev);
@@ -1217,10 +1206,9 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
mlx5_register_device(dev);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
-out:
- mutex_unlock(&dev->intf_state_mutex);
- return err;
+ mutex_unlock(&dev->intf_state_mutex);
+ return 0;
err_devlink_reg:
mlx5_unload(dev);
@@ -1229,18 +1217,17 @@ err_load:
mlx5_cleanup_once(dev);
function_teardown:
mlx5_function_teardown(dev, boot);
+err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+out:
mutex_unlock(&dev->intf_state_mutex);
-
return err;
}
void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
- if (cleanup) {
+ if (cleanup)
mlx5_unregister_device(dev);
- mlx5_drain_health_wq(dev);
- }
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1290,7 +1277,7 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mlx5_debugfs_root);
if (!priv->dbg_root) {
dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n");
- return -ENOMEM;
+ goto err_dbg_root;
}
err = mlx5_health_init(dev);
@@ -1307,15 +1294,27 @@ err_pagealloc_init:
mlx5_health_cleanup(dev);
err_health_init:
debugfs_remove(dev->priv.dbg_root);
-
+err_dbg_root:
+ mutex_destroy(&priv->pgdir_mutex);
+ mutex_destroy(&priv->alloc_mutex);
+ mutex_destroy(&priv->bfregs.wc_head.lock);
+ mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->intf_state_mutex);
return err;
}
static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
+ struct mlx5_priv *priv = &dev->priv;
+
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg_root);
+ mutex_destroy(&priv->pgdir_mutex);
+ mutex_destroy(&priv->alloc_mutex);
+ mutex_destroy(&priv->bfregs.wc_head.lock);
+ mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->intf_state_mutex);
}
#define MLX5_IB_MOD "mlx5_ib"
@@ -1383,6 +1382,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink);
+ mlx5_drain_health_wq(dev);
mlx5_unload_one(dev, true);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
@@ -1544,6 +1544,22 @@ static void shutdown(struct pci_dev *pdev)
mlx5_pci_disable_device(dev);
}
+static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+ mlx5_unload_one(dev, false);
+
+ return 0;
+}
+
+static int mlx5_resume(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+ return mlx5_load_one(dev, false);
+}
+
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
@@ -1587,6 +1603,8 @@ static struct pci_driver mlx5_core_driver = {
.id_table = mlx5_core_pci_table,
.probe = init_one,
.remove = remove_one,
+ .suspend = mlx5_suspend,
+ .resume = mlx5_resume,
.shutdown = shutdown,
.err_handler = &mlx5_err_handler,
.sriov_configure = mlx5_core_sriov_configure,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index ba2b09cc192f..e019d68062d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -33,34 +33,31 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
void *gid;
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
}
EXPORT_SYMBOL(mlx5_core_attach_mcg);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
void *gid;
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
}
EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a8fb43a85d1d..fc1649dac11b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -38,7 +38,6 @@
#include <linux/sched.h>
#include <linux/if_link.h>
#include <linux/firmware.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h>
@@ -141,8 +140,6 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
- struct ptp_system_timestamp *sts);
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 366f2cbfc6db..9eb51f06d3ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -33,14 +33,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen)
{
- u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+ u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
u32 mkey_index;
void *mkc;
int err;
@@ -66,19 +65,18 @@ EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey)
{
- u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_mkey, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {};
memset(out, 0, outlen);
MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
@@ -100,8 +98,8 @@ static inline u32 mlx5_get_psv(u32 *out, int psv_index)
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index)
{
- u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {};
int i, err;
if (npsvs > MLX5_MAX_PSVS)
@@ -111,7 +109,7 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
MLX5_SET(create_psv_in, in, pd, pdn);
MLX5_SET(create_psv_in, in, num_psv, npsvs);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_psv, in, out);
if (err)
return err;
@@ -124,11 +122,10 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
{
- u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {};
MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
MLX5_SET(destroy_psv_in, in, psvn, psv_num);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_psv, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 91bd258ecf1b..5ddd18639a1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/eq.h"
@@ -51,6 +50,7 @@ struct mlx5_pages_req {
u8 ec_function;
s32 npages;
struct work_struct work;
+ u8 release_all;
};
struct fw_page {
@@ -136,8 +136,8 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s32 *npages, int boot)
{
- u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
int err;
MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
@@ -146,7 +146,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
if (err)
return err;
@@ -156,15 +156,21 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
{
- struct fw_page *fp;
+ struct fw_page *fp = NULL;
+ struct fw_page *iter;
unsigned n;
- if (list_empty(&dev->priv.free_list))
+ list_for_each_entry(iter, &dev->priv.free_list, list) {
+ if (iter->func_id != func_id)
+ continue;
+ fp = iter;
+ }
+
+ if (list_empty(&dev->priv.free_list) || !fp)
return -ENOMEM;
- fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
if (n >= MLX5_NUM_4K_IN_PAGE) {
mlx5_core_warn(dev, "alloc 4k bug\n");
@@ -182,6 +188,18 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
+static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
+ bool in_free_list)
+{
+ rb_erase(&fwp->rb_node, &dev->priv.page_root);
+ if (in_free_list)
+ list_del(&fwp->list);
+ dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(fwp->page);
+ kfree(fwp);
+}
+
static void free_4k(struct mlx5_core_dev *dev, u64 addr)
{
struct fw_page *fwp;
@@ -189,24 +207,16 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
if (!fwp) {
- mlx5_core_warn(dev, "page not found\n");
+ mlx5_core_warn_rl(dev, "page not found\n");
return;
}
-
n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
fwp->free_count++;
set_bit(n, &fwp->bitmask);
- if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
- rb_erase(&fwp->rb_node, &dev->priv.page_root);
- if (fwp->free_count != 1)
- list_del(&fwp->list);
- dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(fwp->page);
- kfree(fwp);
- } else if (fwp->free_count == 1) {
+ if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
+ free_fwp(dev, fwp, fwp->free_count != 1);
+ else if (fwp->free_count == 1)
list_add(&fwp->list, &dev->priv.free_list);
- }
}
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
@@ -257,8 +267,7 @@ err_mapping:
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
- u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int err;
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
@@ -266,7 +275,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, manage_pages, in);
if (err)
mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
func_id, err);
@@ -292,7 +301,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
for (i = 0; i < npages; i++) {
retry:
- err = alloc_4k(dev, &addr);
+ err = alloc_4k(dev, &addr, func_id);
if (err) {
if (err == -ENOMEM)
err = alloc_system_page(dev, func_id);
@@ -339,6 +348,33 @@ out_free:
return err;
}
+static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+ bool ec_function)
+{
+ struct rb_node *p;
+ int npages = 0;
+
+ p = rb_first(&dev->priv.page_root);
+ while (p) {
+ struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
+
+ p = rb_next(p);
+ if (fwp->func_id != func_id)
+ continue;
+ npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
+ free_fwp(dev, fwp, fwp->free_count);
+ }
+
+ dev->priv.fw_pages -= npages;
+ if (func_id)
+ dev->priv.vfs_pages -= npages;
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
+ dev->priv.peer_pf_pages -= npages;
+
+ mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
+ npages, ec_function, func_id);
+}
+
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 *in, int in_size, u32 *out, int out_size)
{
@@ -374,7 +410,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed, bool ec_function)
{
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
u32 *out;
int err;
@@ -432,7 +468,9 @@ static void pages_work_handler(struct work_struct *work)
struct mlx5_core_dev *dev = req->dev;
int err = 0;
- if (req->npages < 0)
+ if (req->release_all)
+ release_all_pages(dev, req->func_id, req->ec_function);
+ else if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
req->ec_function);
else if (req->npages > 0)
@@ -447,6 +485,7 @@ static void pages_work_handler(struct work_struct *work)
enum {
EC_FUNCTION_MASK = 0x8000,
+ RELEASE_ALL_PAGES_MASK = 0x4000,
};
static int req_pages_handler(struct notifier_block *nb,
@@ -457,6 +496,7 @@ static int req_pages_handler(struct notifier_block *nb,
struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
bool ec_function;
+ bool release_all;
u16 func_id;
s32 npages;
@@ -467,8 +507,10 @@ static int req_pages_handler(struct notifier_block *nb,
func_id = be16_to_cpu(eqe->data.req_pages.func_id);
npages = be32_to_cpu(eqe->data.req_pages.num_pages);
ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
- mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
- func_id, npages);
+ release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
+ RELEASE_ALL_PAGES_MASK;
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
+ func_id, npages, release_all);
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
mlx5_core_warn(dev, "failed to allocate pages request\n");
@@ -479,6 +521,7 @@ static int req_pages_handler(struct notifier_block *nb,
req->func_id = func_id;
req->npages = npages;
req->ec_function = ec_function;
+ req->release_all = release_all;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index bd830d8d6c5f..aabc53ad8bdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -33,17 +33,16 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
{
- u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
int err;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_pd, in, out);
if (!err)
*pdn = MLX5_GET(alloc_pd_out, out, pd);
return err;
@@ -52,11 +51,10 @@ EXPORT_SYMBOL(mlx5_core_alloc_pd);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_pd, in);
}
EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index cc262b30aed5..9f829e68fc73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -763,24 +763,23 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {};
MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {};
int err;
MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_wol_rol, in, out);
if (!err)
*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index f3b29d9ade1f..99039c47ef33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -33,15 +33,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
/* Scheduling element fw management */
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 *element_id)
{
- u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
void *schedc;
int err;
@@ -53,7 +52,7 @@ int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out);
if (err)
return err;
@@ -66,8 +65,7 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 element_id,
u32 modify_bitmask)
{
- u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {};
void *schedc;
schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
@@ -82,14 +80,13 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_scheduling_element, in);
}
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id)
{
- u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {};
MLX5_SET(destroy_scheduling_element_in, in, opcode,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
@@ -98,7 +95,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_scheduling_element, in);
}
static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
@@ -145,8 +142,7 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
struct mlx5_rl_entry *entry, bool set)
{
- u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
void *pp_context;
pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
@@ -156,7 +152,7 @@ static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
if (set)
memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_pp_rate_limit, in);
}
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 554811de4c9d..df1363a34a42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1662,7 +1662,7 @@ dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
}
static bool
-dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
+dr_action_modify_check_is_ttl_modify(const void *sw_action)
{
u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 461b39376daf..6bd34b293007 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -18,7 +18,7 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
if (err)
return err;
@@ -51,7 +51,7 @@ int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
HCA_CAP_OPMOD_GET_CUR);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
if (err) {
kfree(out);
return err;
@@ -141,7 +141,7 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(query_flow_table_in, in, table_type, type);
MLX5_SET(query_flow_table_in, in, table_id, table_id);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
if (err)
return err;
@@ -158,12 +158,11 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
{
- u32 out[MLX5_ST_SZ_DW(sync_steering_out)] = {};
u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, sync_steering, in);
}
int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
@@ -214,14 +213,13 @@ int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type,
u32 table_id)
{
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {};
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, table_type);
MLX5_SET(delete_fte_in, in, table_id, table_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, delete_fte, in);
}
int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
@@ -263,7 +261,6 @@ out:
int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
u32 modify_header_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
@@ -271,7 +268,7 @@ int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
modify_header_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
}
int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
@@ -292,7 +289,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_group_in, in, table_type, table_type);
MLX5_SET(create_flow_group_in, in, table_id, table_id);
- err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
if (err)
goto out;
@@ -309,14 +306,14 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
u32 group_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {};
- MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
}
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
@@ -360,7 +357,7 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
attr->reformat_en);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
if (err)
return err;
@@ -379,7 +376,6 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
u32 table_id,
u32 table_type)
{
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {};
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
MLX5_SET(destroy_flow_table_in, in, opcode,
@@ -387,7 +383,7 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
}
int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
@@ -434,7 +430,6 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
u32 reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
@@ -442,7 +437,7 @@ void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
reformat_id);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
}
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
@@ -458,7 +453,7 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
MLX5_SET(query_roce_address_in, in, roce_address_index, index);
MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 48b6358b6845..890767a2a7cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -297,7 +297,8 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
dmn->mdev = mdev;
dmn->type = type;
refcount_set(&dmn->refcount, 1);
- mutex_init(&dmn->mutex);
+ mutex_init(&dmn->info.rx.mutex);
+ mutex_init(&dmn->info.tx.mutex);
if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_err(dmn, "Failed init domain, no caps\n");
@@ -345,9 +346,9 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
int ret = 0;
if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
- mutex_lock(&dmn->mutex);
+ mlx5dr_domain_lock(dmn);
ret = mlx5dr_send_ring_force_drain(dmn);
- mutex_unlock(&dmn->mutex);
+ mlx5dr_domain_unlock(dmn);
if (ret) {
mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
flags, ret);
@@ -371,7 +372,8 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
dr_domain_uninit_cache(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
- mutex_destroy(&dmn->mutex);
+ mutex_destroy(&dmn->info.tx.mutex);
+ mutex_destroy(&dmn->info.rx.mutex);
kfree(dmn);
return 0;
}
@@ -379,7 +381,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn)
{
- mutex_lock(&dmn->mutex);
+ mlx5dr_domain_lock(dmn);
if (dmn->peer_dmn)
refcount_dec(&dmn->peer_dmn->refcount);
@@ -389,5 +391,5 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
if (dmn->peer_dmn)
refcount_inc(&dmn->peer_dmn->refcount);
- mutex_unlock(&dmn->mutex);
+ mlx5dr_domain_unlock(dmn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 30d2d7376f56..cc33515b9aba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -95,13 +95,12 @@ static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
}
static struct mlx5dr_icm_mr *
-dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
- enum mlx5_sw_icm_type type,
- size_t align_base)
+dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
{
struct mlx5_core_dev *mdev = pool->dmn->mdev;
+ enum mlx5_sw_icm_type dm_type;
struct mlx5dr_icm_mr *icm_mr;
- size_t align_diff;
+ size_t log_align_base;
int err;
icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
@@ -111,14 +110,22 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
icm_mr->pool = pool;
INIT_LIST_HEAD(&icm_mr->mr_list);
- icm_mr->dm.type = type;
-
- /* 2^log_biggest_table * entry-size * double-for-alignment */
icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
- pool->icm_type) * 2;
+ pool->icm_type);
+
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ dm_type = MLX5_SW_ICM_TYPE_STEERING;
+ log_align_base = ilog2(icm_mr->dm.length);
+ } else {
+ dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+ /* Align base is 64B */
+ log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
+ }
+ icm_mr->dm.type = dm_type;
- err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
- &icm_mr->dm.addr, &icm_mr->dm.obj_id);
+ err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
+ log_align_base, 0, &icm_mr->dm.addr,
+ &icm_mr->dm.obj_id);
if (err) {
mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
goto free_icm_mr;
@@ -137,15 +144,18 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
icm_mr->icm_start_addr = icm_mr->dm.addr;
- /* align_base is always a power of 2 */
- align_diff = icm_mr->icm_start_addr & (align_base - 1);
- if (align_diff)
- icm_mr->used_length = align_base - align_diff;
+ if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
+ mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
+ log_align_base);
+ goto free_mkey;
+ }
list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
return icm_mr;
+free_mkey:
+ mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
free_dm:
mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
icm_mr->dm.addr, icm_mr->dm.obj_id);
@@ -200,24 +210,11 @@ static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
struct mlx5dr_icm_pool *pool = bucket->pool;
struct mlx5dr_icm_mr *icm_mr = NULL;
struct mlx5dr_icm_chunk *chunk;
- enum mlx5_sw_icm_type dm_type;
- size_t align_base;
int i, err = 0;
mr_req_size = bucket->num_of_entries * bucket->entry_size;
mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
pool->icm_type);
-
- if (pool->icm_type == DR_ICM_TYPE_STE) {
- dm_type = MLX5_SW_ICM_TYPE_STEERING;
- /* Align base is the biggest chunk size / row size */
- align_base = mr_row_size;
- } else {
- dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
- /* Align base is 64B */
- align_base = DR_ICM_MODIFY_HDR_ALIGN_BASE;
- }
-
mutex_lock(&pool->mr_mutex);
if (!list_empty(&pool->icm_mr_list)) {
icm_mr = list_last_entry(&pool->icm_mr_list,
@@ -228,7 +225,7 @@ static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
}
if (!icm_mr || mr_free_size < mr_row_size) {
- icm_mr = dr_icm_pool_mr_create(pool, dm_type, align_base);
+ icm_mr = dr_icm_pool_mr_create(pool);
if (!icm_mr) {
err = -ENOMEM;
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index a95938874798..31abcbb95ca2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -690,7 +690,7 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
refcount_set(&matcher->refcount, 1);
INIT_LIST_HEAD(&matcher->matcher_list);
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
ret = dr_matcher_init(matcher, mask);
if (ret)
@@ -700,14 +700,14 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
if (ret)
goto matcher_uninit;
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
return matcher;
matcher_uninit:
dr_matcher_uninit(matcher);
free_matcher:
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
kfree(matcher);
dec_ref:
refcount_dec(&tbl->refcount);
@@ -791,13 +791,13 @@ int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
if (refcount_read(&matcher->refcount) > 1)
return -EBUSY;
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
dr_matcher_remove_from_tbl(matcher);
dr_matcher_uninit(matcher);
refcount_dec(&matcher->tbl->refcount);
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
kfree(matcher);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index cce3ee7a6614..cd708dcc2e3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -938,7 +938,10 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
+ mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
dr_rule_clean_rule_members(rule, nic_rule);
+ mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
+
return 0;
}
@@ -1039,18 +1042,18 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
return 0;
+ hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
+ if (!hw_ste_arr)
+ return -ENOMEM;
+
+ mlx5dr_domain_nic_lock(nic_dmn);
+
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
- goto out_err;
-
- hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
- if (!hw_ste_arr) {
- ret = -ENOMEM;
- goto out_err;
- }
+ goto free_hw_ste;
/* Set the tag values inside the ste array */
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
@@ -1115,6 +1118,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (htbl)
mlx5dr_htbl_put(htbl);
+ mlx5dr_domain_nic_unlock(nic_dmn);
+
kfree(hw_ste_arr);
return 0;
@@ -1129,8 +1134,8 @@ free_rule:
kfree(ste_info);
}
free_hw_ste:
+ mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
-out_err:
return ret;
}
@@ -1232,31 +1237,23 @@ struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_rule *rule;
- mutex_lock(&matcher->tbl->dmn->mutex);
refcount_inc(&matcher->refcount);
rule = dr_rule_create_rule(matcher, value, num_actions, actions);
if (!rule)
refcount_dec(&matcher->refcount);
- mutex_unlock(&matcher->tbl->dmn->mutex);
-
return rule;
}
int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
{
struct mlx5dr_matcher *matcher = rule->matcher;
- struct mlx5dr_table *tbl = rule->matcher->tbl;
int ret;
- mutex_lock(&tbl->dmn->mutex);
-
ret = dr_rule_destroy_rule(rule);
-
- mutex_unlock(&tbl->dmn->mutex);
-
if (!ret)
refcount_dec(&matcher->refcount);
+
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 18719acb7e54..f421013b0b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -100,14 +100,10 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
return err == CQ_POLL_ERR ? err : npolled;
}
-static void dr_qp_event(struct mlx5_core_qp *mqp, int event)
-{
- pr_info("DR QP event %u on QP #%u\n", event, mqp->qpn);
-}
-
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
struct dr_qp_init_attr *attr)
{
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
struct mlx5_wq_param wqp;
struct mlx5dr_qp *dr_qp;
@@ -180,14 +176,12 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
(__be64 *)MLX5_ADDR_OF(create_qp_in,
in, pas));
- err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn);
kfree(in);
-
- if (err) {
- mlx5_core_warn(mdev, " Can't create QP\n");
+ if (err)
goto err_in;
- }
- dr_qp->mqp.event = dr_qp_event;
dr_qp->uar = attr->uar;
return dr_qp;
@@ -204,7 +198,12 @@ err_wq:
static void dr_destroy_qp(struct mlx5_core_dev *mdev,
struct mlx5dr_qp *dr_qp)
{
- mlx5_core_destroy_qp(mdev, &dr_qp->mqp);
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, dr_qp->qpn);
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
+
kfree(dr_qp->sq.wqe_head);
mlx5_wq_destroy(&dr_qp->wq_ctrl);
kfree(dr_qp);
@@ -242,7 +241,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
MLX5_WQE_CTRL_CQ_UPDATE : 0;
wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) |
opcode);
- wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->mqp.qpn << 8);
+ wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8);
wq_raddr = (void *)(wq_ctrl + 1);
wq_raddr->raddr = cpu_to_be64(remote_addr);
wq_raddr->rkey = cpu_to_be32(rkey);
@@ -358,9 +357,11 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
u32 buff_offset;
int ret;
+ spin_lock(&send_ring->lock);
+
ret = dr_handle_pending_wc(dmn, send_ring);
if (ret)
- return ret;
+ goto out_unlock;
if (send_info->write.length > dmn->info.max_inline_size) {
buff_offset = (send_ring->tx_head &
@@ -378,7 +379,9 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
dr_fill_data_segs(send_ring, send_info);
dr_post_send(send_ring->qp, send_info);
- return 0;
+out_unlock:
+ spin_unlock(&send_ring->lock);
+ return ret;
}
static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
@@ -564,9 +567,7 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
send_info.remote_addr = action->rewrite.chunk->mr_addr;
send_info.rkey = action->rewrite.chunk->rkey;
- mutex_lock(&dmn->mutex);
ret = dr_postsend_icm_data(dmn, &send_info);
- mutex_unlock(&dmn->mutex);
return ret;
}
@@ -585,8 +586,10 @@ static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
MLX5_SET(qpc, qpc, rre, 1);
MLX5_SET(qpc, qpc, rwe, 1);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
- &dr_qp->mqp);
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, in, qpn, dr_qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
}
static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
@@ -598,14 +601,15 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
- MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->mqp.qpn);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
- MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, qpc,
- &dr_qp->mqp);
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
}
static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
@@ -617,7 +621,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
- MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->mqp.qpn);
+ MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
MLX5_SET(qpc, qpc, mtu, attr->mtu);
MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1);
@@ -636,8 +640,10 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
MLX5_SET(qpc, qpc, min_rnr_nak, 1);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
- &dr_qp->mqp);
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
}
static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
@@ -663,7 +669,7 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return ret;
rtr_attr.mtu = mtu;
- rtr_attr.qp_num = dr_qp->mqp.qpn;
+ rtr_attr.qp_num = dr_qp->qpn;
rtr_attr.min_rnr_timer = 12;
rtr_attr.port_num = port;
rtr_attr.sgid_index = gid_index;
@@ -689,12 +695,6 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return 0;
}
-static void dr_cq_event(struct mlx5_core_cq *mcq,
- enum mlx5_event event)
-{
- pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
-}
-
static void dr_cq_complete(struct mlx5_core_cq *mcq,
struct mlx5_eqe *eqe)
{
@@ -761,7 +761,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
- cq->mcq.event = dr_cq_event;
cq->mcq.comp = dr_cq_complete;
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
@@ -889,6 +888,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
init_attr.pdn = dmn->pdn;
init_attr.uar = dmn->uar;
init_attr.max_send_wr = QUEUE_SIZE;
+ spin_lock_init(&dmn->send_ring->lock);
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
if (!dmn->send_ring->qp) {
@@ -993,7 +993,9 @@ int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
return ret;
}
+ spin_lock(&send_ring->lock);
ret = dr_handle_pending_wc(dmn, send_ring);
+ spin_unlock(&send_ring->lock);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index c0e3a1e7389d..00c2f598f034 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -112,7 +112,7 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
{
u32 crc = crc32(0, input_data, length);
- return htonl(crc);
+ return (__force u32)htonl(crc);
}
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
@@ -869,7 +869,7 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
{
- u32 raw_ip[4];
+ __be32 raw_ip[4];
spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
@@ -961,7 +961,6 @@ static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
- spec->metadata_reg_b = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_b);
}
static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index c2fe48d7b75a..b599b6beb5b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -14,7 +14,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
if (action && action->action_type != DR_ACTION_TYP_FT)
return -EOPNOTSUPP;
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
if (!list_empty(&tbl->matcher_list))
last_matcher = list_last_entry(&tbl->matcher_list,
@@ -78,7 +78,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
refcount_inc(&action->refcount);
out:
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
return ret;
}
@@ -95,7 +95,7 @@ static void dr_table_uninit_fdb(struct mlx5dr_table *tbl)
static void dr_table_uninit(struct mlx5dr_table *tbl)
{
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
switch (tbl->dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
@@ -112,7 +112,7 @@ static void dr_table_uninit(struct mlx5dr_table *tbl)
break;
}
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
}
static int dr_table_init_nic(struct mlx5dr_domain *dmn,
@@ -177,7 +177,7 @@ static int dr_table_init(struct mlx5dr_table *tbl)
INIT_LIST_HEAD(&tbl->matcher_list);
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
switch (tbl->dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
@@ -201,7 +201,7 @@ static int dr_table_init(struct mlx5dr_table *tbl)
break;
}
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 3fa739951b34..0883956c58c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -554,8 +554,7 @@ struct mlx5dr_match_misc2 {
u32 metadata_reg_c_1; /* metadata_reg_c_1 */
u32 metadata_reg_c_0; /* metadata_reg_c_0 */
u32 metadata_reg_a; /* metadata_reg_a */
- u32 metadata_reg_b; /* metadata_reg_b */
- u8 reserved_auto2[8];
+ u8 reserved_auto2[12];
};
struct mlx5dr_match_misc3 {
@@ -636,6 +635,7 @@ struct mlx5dr_domain_rx_tx {
u64 drop_icm_addr;
u64 default_icm_addr;
enum mlx5dr_ste_entry_type ste_type;
+ struct mutex mutex; /* protect rx/tx domain */
};
struct mlx5dr_domain_info {
@@ -660,7 +660,6 @@ struct mlx5dr_domain {
struct mlx5_uars_page *uar;
enum mlx5dr_domain_type type;
refcount_t refcount;
- struct mutex mutex; /* protect domain */
struct mlx5dr_icm_pool *ste_icm_pool;
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring;
@@ -814,6 +813,28 @@ struct mlx5dr_icm_chunk {
struct list_head *miss_list;
};
+static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn)
+{
+ mutex_lock(&nic_dmn->mutex);
+}
+
+static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn)
+{
+ mutex_unlock(&nic_dmn->mutex);
+}
+
+static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn)
+{
+ mlx5dr_domain_nic_lock(&dmn->info.rx);
+ mlx5dr_domain_nic_lock(&dmn->info.tx);
+}
+
+static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
+{
+ mlx5dr_domain_nic_unlock(&dmn->info.tx);
+ mlx5dr_domain_nic_unlock(&dmn->info.rx);
+}
+
static inline int
mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
@@ -990,7 +1011,7 @@ struct mlx5dr_qp {
struct mlx5_wq_qp wq;
struct mlx5_uars_page *uar;
struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5_core_qp mqp;
+ u32 qpn;
struct {
unsigned int pc;
unsigned int cc;
@@ -1043,6 +1064,7 @@ struct mlx5dr_send_ring {
struct ib_wc wc[MAX_SEND_CQE];
u8 sync_buff[MIN_READ_SYNC];
struct mlx5dr_mr *sync_mr;
+ spinlock_t lock; /* Protect the data path of the send ring */
};
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 3b3f5b9d4f95..8887b2440c7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -576,7 +576,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action;
size_t actions_sz;
- actions_sz = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) *
+ actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) *
num_actions;
action = mlx5dr_action_create_modify_header(dr_domain, 0,
actions_sz,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index b1068500f1df..01cc00ad8acf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -36,14 +36,14 @@
int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
{
- u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
int err;
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@@ -54,19 +54,18 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
}
EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
- u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {};
int err;
MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
@@ -78,44 +77,39 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
}
EXPORT_SYMBOL(mlx5_core_create_rq);
-int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
-
MLX5_SET(modify_rq_in, in, rqn, rqn);
MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_rq, in);
}
EXPORT_SYMBOL(mlx5_core_modify_rq);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_rq, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
- int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
+ u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {};
MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
MLX5_SET(query_rq_in, in, rqn, rqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_inout(dev, query_rq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_rq);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
- u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
int err;
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
@@ -126,34 +120,30 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
return err;
}
-int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
-
MLX5_SET(modify_sq_in, in, sqn, sqn);
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_sq, in);
}
EXPORT_SYMBOL(mlx5_core_modify_sq);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_sq, in);
}
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {0};
- int outlen = MLX5_ST_SZ_BYTES(query_sq_out);
+ u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {};
MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
MLX5_SET(query_sq_in, in, sqn, sqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_inout(dev, query_sq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_sq);
@@ -182,24 +172,13 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
-int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
- u32 *in, int inlen,
- u32 *out, int outlen)
-{
- MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
-
- return mlx5_cmd_exec(dev, in, inlen, out, outlen);
-}
-EXPORT_SYMBOL(mlx5_core_create_tir_out);
-
-int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tirn)
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err;
- err = mlx5_core_create_tir_out(dev, in, inlen,
- out, sizeof(out));
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev, create_tir, in, out);
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
@@ -207,35 +186,30 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
}
EXPORT_SYMBOL(mlx5_core_create_tir);
-int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
- int inlen)
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0};
-
MLX5_SET(modify_tir_in, in, tirn, tirn);
MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_tir, in);
}
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tir, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_tir);
-int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tisn)
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn)
{
- u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_tis, in, out);
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
@@ -243,33 +217,29 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
}
EXPORT_SYMBOL(mlx5_core_create_tis);
-int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
- int inlen)
+int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_tis_out)] = {0};
-
MLX5_SET(modify_tis_in, in, tisn, tisn);
MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_tis, in);
}
EXPORT_SYMBOL(mlx5_core_modify_tis);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tis, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_tis);
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
@@ -284,7 +254,7 @@ EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {};
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
@@ -293,12 +263,11 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_rqt, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
@@ -383,7 +352,7 @@ static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
int curr_state, int next_state,
u16 peer_vhca, u32 peer_sq)
{
- u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
void *rqc;
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
@@ -396,8 +365,7 @@ static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
- return mlx5_core_modify_rq(func_mdev, rqn,
- in, MLX5_ST_SZ_BYTES(modify_rq_in));
+ return mlx5_core_modify_rq(func_mdev, rqn, in);
}
static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
@@ -417,8 +385,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
- return mlx5_core_modify_sq(peer_mdev, sqn,
- in, MLX5_ST_SZ_BYTES(modify_sq_in));
+ return mlx5_core_modify_sq(peer_mdev, sqn, in);
}
static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 0d006224d7b0..da481a7c12f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -34,17 +34,16 @@
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
- u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
int err;
MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
if (!err)
*uarn = MLX5_GET(alloc_uar_out, out, uar);
return err;
@@ -53,12 +52,11 @@ EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
MLX5_SET(dealloc_uar_in, in, uar, uarn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 23f879da9104..c107d92dc118 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -40,10 +40,11 @@
/* Mutex to hold while enabling or disabling RoCE */
static DEFINE_MUTEX(mlx5_roce_en_lock);
-static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport, u32 *out, int outlen)
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
{
- u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
+ int err;
MLX5_SET(query_vport_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_STATE);
@@ -52,14 +53,9 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
if (vport)
MLX5_SET(query_vport_state_in, in, other_vport, 1);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
-{
- u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
-
- _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
+ if (err)
+ return 0;
return MLX5_GET(query_vport_state_out, out, state);
}
@@ -67,8 +63,7 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 other_vport, u8 state)
{
- u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
MLX5_SET(modify_vport_state_in, in, opcode,
MLX5_CMD_OP_MODIFY_VPORT_STATE);
@@ -77,13 +72,13 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
MLX5_SET(modify_vport_state_in, in, admin_state, state);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
}
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
- u32 *out, int outlen)
+ u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
@@ -91,26 +86,16 @@ static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
- int inlen)
-{
- u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
-
- MLX5_SET(modify_nic_vport_context_in, in, opcode,
- MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
}
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 *min_inline)
{
- u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
int err;
- err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
+ err = mlx5_query_nic_vport_context(mdev, vport, out);
if (!err)
*min_inline = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.min_wqe_inline_mode);
@@ -139,8 +124,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline)
{
- u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
- int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
void *nic_vport_ctx;
MLX5_SET(modify_nic_vport_context_in, in,
@@ -152,23 +136,20 @@ int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
in, nic_vport_context);
MLX5_SET(nic_vport_context, nic_vport_ctx,
min_wqe_inline_mode, min_inline);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- return mlx5_modify_nic_vport_context(mdev, in, inlen);
+ return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
}
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, bool other, u8 *addr)
{
- int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
u8 *out_addr;
- u32 *out;
int err;
- out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
nic_vport_context.permanent_address);
@@ -177,11 +158,10 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
if (!err)
ether_addr_copy(addr, &out_addr[2]);
- kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
@@ -216,8 +196,10 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
permanent_address);
ether_addr_copy(&perm_mac[2], addr);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -235,7 +217,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
if (!err)
*mtu = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.mtu);
@@ -257,8 +239,10 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
return err;
@@ -292,7 +276,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
req_list_size = max_list_size;
}
- out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+ out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
out = kzalloc(out_sz, GFP_KERNEL);
@@ -332,7 +316,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int list_size)
{
- u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {};
void *nic_vport_ctx;
int max_list_size;
int in_sz;
@@ -350,7 +334,6 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- memset(out, 0, sizeof(out));
in = kzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -442,7 +425,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ mlx5_query_nic_vport_context(mdev, 0, out);
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
@@ -462,7 +445,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ mlx5_query_nic_vport_context(mdev, 0, out);
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
@@ -498,8 +481,10 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -516,7 +501,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ mlx5_query_nic_vport_context(mdev, 0, out);
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
@@ -664,7 +649,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep)
{
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
- int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {};
int is_group_manager;
void *out;
void *ctx;
@@ -691,7 +676,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
if (MLX5_CAP_GEN(dev, num_ports) == 2)
MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out);
if (err)
goto ex;
@@ -788,7 +773,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
+ err = mlx5_query_nic_vport_context(mdev, vport, out);
if (err)
goto out;
@@ -825,8 +810,10 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
nic_vport_context.promisc_mc, promisc_mc);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.promisc_all, promisc_all);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -865,8 +852,10 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
MLX5_SET(modify_nic_vport_context_in, in,
field_select.disable_uc_local_lb, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
if (!err)
mlx5_core_dbg(mdev, "%s local_lb\n",
@@ -888,7 +877,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
if (err)
goto out;
@@ -925,8 +914,10 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
state);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -965,16 +956,15 @@ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
mutex_unlock(&mlx5_roce_en_lock);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
+EXPORT_SYMBOL(mlx5_nic_vport_disable_roce);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
- int vf, u8 port_num, void *out,
- size_t out_sz)
+ int vf, u8 port_num, void *out)
{
- int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
- int is_group_manager;
- void *in;
- int err;
+ int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
+ int is_group_manager;
+ void *in;
+ int err;
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
in = kvzalloc(in_sz, GFP_KERNEL);
@@ -997,7 +987,7 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
if (MLX5_CAP_GEN(dev, num_ports) == 2)
MLX5_SET(query_vport_counter_in, in, port_num, port_num);
- err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out);
free:
kvfree(in);
return err;
@@ -1008,8 +998,8 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
u8 other_vport, u64 *rx_discard_vport_down,
u64 *tx_discard_vport_down)
{
- u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
int err;
MLX5_SET(query_vnic_env_in, in, opcode,
@@ -1018,7 +1008,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
MLX5_SET(query_vnic_env_in, in, vport_number, vport);
MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
if (err)
return err;
@@ -1035,11 +1025,10 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *req)
{
int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
- u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
int is_group_manager;
+ void *ctx;
void *in;
int err;
- void *ctx;
mlx5_core_dbg(dev, "vf %d\n", vf);
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
@@ -1047,7 +1036,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
if (!in)
return -ENOMEM;
- memset(out, 0, sizeof(out));
MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
if (other_vport) {
if (is_group_manager) {
@@ -1074,7 +1062,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
req->cap_mask1_perm);
- err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
ex:
kfree(in);
return err;
@@ -1103,8 +1091,10 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria,
MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
if (err)
mlx5_nic_vport_disable_roce(port_mdev);
@@ -1129,8 +1119,10 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
nic_vport_context.affiliated_vhca_id, 0);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria, 0);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
if (!err)
mlx5_nic_vport_disable_roce(port_mdev);
@@ -1170,4 +1162,4 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
}
-EXPORT_SYMBOL(mlx5_eswitch_get_total_vports);
+EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 046a0cb82ed8..7a04c626a2aa 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -76,7 +76,7 @@ static int mlxfw_fsm_state_err(struct mlxfw_dev *mlxfw_dev,
case MLXFW_FSM_STATE_ERR_MAX:
MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown error", err);
break;
- };
+ }
return mlxfw_fsm_state_errno[fsm_state_err];
};
@@ -159,7 +159,7 @@ mlxfw_fsm_reactivate_err(struct mlxfw_dev *mlxfw_dev,
case MLXFW_FSM_REACTIVATE_STATUS_MAX:
MLXFW_REACT_ERR("unexpected error", err);
break;
- };
+ }
return -EREMOTEIO;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0e86a581d45b..4aeabb35c943 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -21,6 +21,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_acl_atcam.o spectrum_acl_erp.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
spectrum_acl_bloom_filter.o spectrum_acl.o \
+ spectrum_flow.o spectrum_matchall.o \
spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 9b39b8e70519..fcb88d4271bf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3203,7 +3203,7 @@ MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8,
* Size of entries do be deleted. The unit is 1 entry, regardless of entry type.
* Access: OP
*/
-MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11,
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 13,
MLXSW_REG_IEDR_REC_LEN, 0x00, false);
/* reg_iedr_rec_index_start
@@ -5526,40 +5526,37 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
- MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_MC_SNOOPING,
MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
-
- __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
- MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
-};
-
-enum mlxsw_reg_htgt_discard_trap_group {
- MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_BFD,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_EXCEPTIONS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
+
+ __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
};
/* reg_htgt_trap_group
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 24ca8d5bc564..5ffa32b75e5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -25,9 +25,7 @@
#include <linux/log2.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
-#include <net/tc_act/tc_mirred.h>
#include <net/netevent.h>
-#include <net/tc_act/tc_sample.h>
#include <net/addrconf.h>
#include "spectrum.h"
@@ -582,16 +580,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool enable, u32 rate)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char mpsc_pl[MLXSW_REG_MPSC_LEN];
-
- mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
-}
-
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up)
{
@@ -1362,412 +1350,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
-static struct mlxsw_sp_port_mall_tc_entry *
-mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
- unsigned long cookie) {
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
-
- list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
- if (mall_tc_entry->cookie == cookie)
- return mall_tc_entry;
-
- return NULL;
-}
-
-static int
-mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
- const struct flow_action_entry *act,
- bool ingress)
-{
- enum mlxsw_sp_span_type span_type;
-
- if (!act->dev) {
- netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
- return -EINVAL;
- }
-
- mirror->ingress = ingress;
- span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
- true, &mirror->span_id);
-}
-
-static void
-mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
-{
- enum mlxsw_sp_span_type span_type;
-
- span_type = mirror->ingress ?
- MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
- span_type, true);
-}
-
-static int
-mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *cls,
- const struct flow_action_entry *act,
- bool ingress)
-{
- int err;
-
- if (!mlxsw_sp_port->sample)
- return -EOPNOTSUPP;
- if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
- netdev_err(mlxsw_sp_port->dev, "sample already active\n");
- return -EEXIST;
- }
- if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
- netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
- return -EOPNOTSUPP;
- }
-
- rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
- act->sample.psample_group);
- mlxsw_sp_port->sample->truncate = act->sample.truncate;
- mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
- mlxsw_sp_port->sample->rate = act->sample.rate;
-
- err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
- if (err)
- goto err_port_sample_set;
- return 0;
-
-err_port_sample_set:
- RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
- return err;
-}
-
-static void
-mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- if (!mlxsw_sp_port->sample)
- return;
-
- mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
- RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
-}
-
-static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f,
- bool ingress)
-{
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
- __be16 protocol = f->common.protocol;
- struct flow_action_entry *act;
- int err;
-
- if (!flow_offload_has_one_action(&f->rule->action)) {
- netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
- return -EOPNOTSUPP;
- }
-
- mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
- if (!mall_tc_entry)
- return -ENOMEM;
- mall_tc_entry->cookie = f->cookie;
-
- act = &f->rule->action.entries[0];
-
- if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
- struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
-
- mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
- mirror = &mall_tc_entry->mirror;
- err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
- mirror, act,
- ingress);
- } else if (act->id == FLOW_ACTION_SAMPLE &&
- protocol == htons(ETH_P_ALL)) {
- mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
- err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
- act, ingress);
- } else {
- err = -EOPNOTSUPP;
- }
-
- if (err)
- goto err_add_action;
-
- list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
- return 0;
-
-err_add_action:
- kfree(mall_tc_entry);
- return err;
-}
-
-static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f)
-{
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
-
- mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
- f->cookie);
- if (!mall_tc_entry) {
- netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
- return;
- }
- list_del(&mall_tc_entry->list);
-
- switch (mall_tc_entry->type) {
- case MLXSW_SP_PORT_MALL_MIRROR:
- mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
- &mall_tc_entry->mirror);
- break;
- case MLXSW_SP_PORT_MALL_SAMPLE:
- mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
- break;
- default:
- WARN_ON(1);
- }
-
- kfree(mall_tc_entry);
-}
-
-static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f,
- bool ingress)
-{
- switch (f->command) {
- case TC_CLSMATCHALL_REPLACE:
- return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
- ingress);
- case TC_CLSMATCHALL_DESTROY:
- mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
- struct flow_cls_offload *f)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
-
- switch (f->command) {
- case FLOW_CLS_REPLACE:
- return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
- case FLOW_CLS_DESTROY:
- mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
- return 0;
- case FLOW_CLS_STATS:
- return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
- case FLOW_CLS_TMPLT_CREATE:
- return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
- case FLOW_CLS_TMPLT_DESTROY:
- mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
- void *type_data,
- void *cb_priv, bool ingress)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSMATCHALL:
- if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
- type_data))
- return -EOPNOTSUPP;
-
- return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
- ingress);
- case TC_SETUP_CLSFLOWER:
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
- void *type_data,
- void *cb_priv)
-{
- return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
- cb_priv, true);
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
- void *type_data,
- void *cb_priv)
-{
- return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
- cb_priv, false);
-}
-
-static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
- void *type_data, void *cb_priv)
-{
- struct mlxsw_sp_acl_block *acl_block = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSMATCHALL:
- return 0;
- case TC_SETUP_CLSFLOWER:
- if (mlxsw_sp_acl_block_disabled(acl_block))
- return -EOPNOTSUPP;
-
- return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void mlxsw_sp_tc_block_flower_release(void *cb_priv)
-{
- struct mlxsw_sp_acl_block *acl_block = cb_priv;
-
- mlxsw_sp_acl_block_destroy(acl_block);
-}
-
-static LIST_HEAD(mlxsw_sp_block_cb_list);
-
-static int
-mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f, bool ingress)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_acl_block *acl_block;
- struct flow_block_cb *block_cb;
- bool register_block = false;
- int err;
-
- block_cb = flow_block_cb_lookup(f->block,
- mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp);
- if (!block_cb) {
- acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
- if (!acl_block)
- return -ENOMEM;
- block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp, acl_block,
- mlxsw_sp_tc_block_flower_release);
- if (IS_ERR(block_cb)) {
- mlxsw_sp_acl_block_destroy(acl_block);
- err = PTR_ERR(block_cb);
- goto err_cb_register;
- }
- register_block = true;
- } else {
- acl_block = flow_block_cb_priv(block_cb);
- }
- flow_block_cb_incref(block_cb);
- err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
- mlxsw_sp_port, ingress, f->extack);
- if (err)
- goto err_block_bind;
-
- if (ingress)
- mlxsw_sp_port->ing_acl_block = acl_block;
- else
- mlxsw_sp_port->eg_acl_block = acl_block;
-
- if (register_block) {
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
- }
-
- return 0;
-
-err_block_bind:
- if (!flow_block_cb_decref(block_cb))
- flow_block_cb_free(block_cb);
-err_cb_register:
- return err;
-}
-
-static void
-mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f, bool ingress)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_acl_block *acl_block;
- struct flow_block_cb *block_cb;
- int err;
-
- block_cb = flow_block_cb_lookup(f->block,
- mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp);
- if (!block_cb)
- return;
-
- if (ingress)
- mlxsw_sp_port->ing_acl_block = NULL;
- else
- mlxsw_sp_port->eg_acl_block = NULL;
-
- acl_block = flow_block_cb_priv(block_cb);
- err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
- mlxsw_sp_port, ingress);
- if (!err && !flow_block_cb_decref(block_cb)) {
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- }
-}
-
-static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f)
-{
- struct flow_block_cb *block_cb;
- flow_setup_cb_t *cb;
- bool ingress;
- int err;
-
- if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
- cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
- ingress = true;
- } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
- cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
- ingress = false;
- } else {
- return -EOPNOTSUPP;
- }
-
- f->driver_block_list = &mlxsw_sp_block_cb_list;
-
- switch (f->command) {
- case FLOW_BLOCK_BIND:
- if (flow_block_cb_is_busy(cb, mlxsw_sp_port,
- &mlxsw_sp_block_cb_list))
- return -EBUSY;
-
- block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
- mlxsw_sp_port, NULL);
- if (IS_ERR(block_cb))
- return PTR_ERR(block_cb);
- err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f,
- ingress);
- if (err) {
- flow_block_cb_free(block_cb);
- return err;
- }
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
- return 0;
- case FLOW_BLOCK_UNBIND:
- mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
- f, ingress);
- block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
- if (!block_cb)
- return -ENOENT;
-
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -1791,23 +1373,21 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-
static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
if (!enable) {
- if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
- mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
- !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+ if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
+ mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
- mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
- mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+ mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
+ mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
} else {
- mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
- mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
+ mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
+ mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
}
return 0;
}
@@ -3695,7 +3275,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
- INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats =
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
@@ -3704,13 +3283,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
- mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
- GFP_KERNEL);
- if (!mlxsw_sp_port->sample) {
- err = -ENOMEM;
- goto err_alloc_sample;
- }
-
INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
&update_stats_cache);
@@ -3897,8 +3469,6 @@ err_dev_addr_init:
err_port_swid_set:
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
err_port_module_map:
- kfree(mlxsw_sp_port->sample);
-err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
free_netdev(dev);
@@ -3926,7 +3496,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
- kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
@@ -3986,6 +3555,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
kfree(mlxsw_sp->ports);
+ mlxsw_sp->ports = NULL;
}
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
@@ -4022,6 +3592,7 @@ err_port_create:
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
kfree(mlxsw_sp->ports);
+ mlxsw_sp->ports = NULL;
return err;
}
@@ -4143,6 +3714,14 @@ static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
}
+static struct mlxsw_sp_port *
+mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
+ return mlxsw_sp->ports[local_port];
+ return NULL;
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count,
struct netlink_ext_ack *extack)
@@ -4156,7 +3735,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
int i;
int err;
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
local_port);
@@ -4251,7 +3830,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
int offset;
int i;
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
local_port);
@@ -4408,12 +3987,17 @@ static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
-static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
- void *priv)
+void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port)
+{
+ mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
+}
+
+void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port)
{
- struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- struct psample_group *psample_group;
+ struct mlxsw_sp_port_sample *sample;
u32 size;
if (unlikely(!mlxsw_sp_port)) {
@@ -4421,36 +4005,20 @@ static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
local_port);
goto out;
}
- if (unlikely(!mlxsw_sp_port->sample)) {
- dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
- local_port);
- goto out;
- }
-
- size = mlxsw_sp_port->sample->truncate ?
- mlxsw_sp_port->sample->trunc_size : skb->len;
rcu_read_lock();
- psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
- if (!psample_group)
+ sample = rcu_dereference(mlxsw_sp_port->sample);
+ if (!sample)
goto out_unlock;
- psample_sample_packet(psample_group, skb, size,
- mlxsw_sp_port->dev->ifindex, 0,
- mlxsw_sp_port->sample->rate);
+ size = sample->truncate ? sample->trunc_size : skb->len;
+ psample_sample_packet(sample->psample_group, skb, size,
+ mlxsw_sp_port->dev->ifindex, 0, sample->rate);
out_unlock:
rcu_read_unlock();
out:
consume_skb(skb);
}
-static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port,
- void *priv)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
-
- mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
-}
-
#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
@@ -4470,58 +4038,13 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* Events */
MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
/* L2 traps */
- MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
- MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
- MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU,
- false, SP_LLDP, DISCARD),
- MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
- MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
- MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
- MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
- MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
- false),
- MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
- false),
- MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
- false),
- MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
- false),
+ MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
/* L3 traps */
- MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
- MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
false),
- MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
- false),
- MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
- MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
- MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
- MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
- MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
- MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
- MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
- false),
- MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
- false),
- MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
- false),
- MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
- false),
- MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
false),
- MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
- MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
@@ -4530,23 +4053,11 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
ROUTER_EXP, false),
- /* PKT Sample trap */
- MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
- false, SP_IP2ME, DISCARD),
- /* ACL trap */
- MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
/* Multicast Router Traps */
- MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
- MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
- MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
- MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
- /* PTP traps */
- MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU,
- false, SP_PTP0, DISCARD),
- MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false),
+ MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
};
static const struct mlxsw_listener mlxsw_sp1_listener[] = {
@@ -4575,46 +4086,12 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
for (i = 0; i < max_cpu_policers; i++) {
is_bytes = false;
switch (i) {
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
- rate = 128;
- burst_size = 7;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
- rate = 16 * 1024;
- burst_size = 10;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
rate = 1024;
burst_size = 7;
break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
- rate = 1024;
- burst_size = 7;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
- rate = 24 * 1024;
- burst_size = 12;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
- rate = 19 * 1024;
- burst_size = 12;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
- rate = 360;
- burst_size = 7;
- break;
default:
continue;
}
@@ -4649,43 +4126,12 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
for (i = 0; i < max_trap_groups; i++) {
policer_id = i;
switch (i) {
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
- priority = 5;
- tc = 5;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
- priority = 4;
- tc = 4;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
- priority = 3;
- tc = 3;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
- priority = 2;
- tc = 2;
- break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
priority = 1;
tc = 1;
break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
- priority = 0;
- tc = 1;
- break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
tc = MLXSW_REG_HTGT_DEFAULT_TC;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ca56e72cb4b7..6f96ca50c9ba 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -109,25 +109,6 @@ struct mlxsw_sp_mid {
unsigned long *ports_in_mid; /* bits array */
};
-enum mlxsw_sp_port_mall_action_type {
- MLXSW_SP_PORT_MALL_MIRROR,
- MLXSW_SP_PORT_MALL_SAMPLE,
-};
-
-struct mlxsw_sp_port_mall_mirror_tc_entry {
- int span_id;
- bool ingress;
-};
-
-struct mlxsw_sp_port_mall_tc_entry {
- struct list_head list;
- unsigned long cookie;
- enum mlxsw_sp_port_mall_action_type type;
- union {
- struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
- };
-};
-
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
@@ -211,7 +192,7 @@ struct mlxsw_sp_port_pcpu_stats {
};
struct mlxsw_sp_port_sample {
- struct psample_group __rcu *psample_group;
+ struct psample_group *psample_group;
u32 trunc_size;
u32 rate;
bool truncate;
@@ -274,21 +255,19 @@ struct mlxsw_sp_port {
* the same localport can have
* different mapping.
*/
- /* TC handles */
- struct list_head mall_tc_list;
struct {
#define MLXSW_HW_STATS_UPDATE_TIME HZ
struct rtnl_link_stats64 stats;
struct mlxsw_sp_port_xstats xstats;
struct delayed_work update_dw;
} periodic_hw_stats;
- struct mlxsw_sp_port_sample *sample;
+ struct mlxsw_sp_port_sample __rcu *sample;
struct list_head vlans_list;
struct mlxsw_sp_port_vlan *default_vlan;
struct mlxsw_sp_qdisc_state *qdisc;
unsigned acl_rule_count;
- struct mlxsw_sp_acl_block *ing_acl_block;
- struct mlxsw_sp_acl_block *eg_acl_block;
+ struct mlxsw_sp_flow_block *ing_flow_block;
+ struct mlxsw_sp_flow_block *eg_flow_block;
struct {
struct delayed_work shaper_dw;
struct hwtstamp_config hwtstamp_config;
@@ -472,6 +451,10 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u8 local_port, void *priv);
+void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port);
+void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port);
int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
@@ -654,17 +637,14 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index;
};
-struct mlxsw_sp_acl_block;
-struct mlxsw_sp_acl_ruleset;
-
-/* spectrum_acl.c */
-enum mlxsw_sp_acl_profile {
- MLXSW_SP_ACL_PROFILE_FLOWER,
- MLXSW_SP_ACL_PROFILE_MR,
-};
-
-struct mlxsw_sp_acl_block {
+/* spectrum_flow.c */
+struct mlxsw_sp_flow_block {
struct list_head binding_list;
+ struct {
+ struct list_head list;
+ unsigned int min_prio;
+ unsigned int max_prio;
+ } mall;
struct mlxsw_sp_acl_ruleset *ruleset_zero;
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
@@ -676,40 +656,100 @@ struct mlxsw_sp_acl_block {
struct net *net;
};
+struct mlxsw_sp_flow_block_binding {
+ struct list_head list;
+ struct net_device *dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ bool ingress;
+};
+
+static inline struct mlxsw_sp *
+mlxsw_sp_flow_block_mlxsw_sp(struct mlxsw_sp_flow_block *block)
+{
+ return block->mlxsw_sp;
+}
+
+static inline unsigned int
+mlxsw_sp_flow_block_rule_count(const struct mlxsw_sp_flow_block *block)
+{
+ return block ? block->rule_count : 0;
+}
+
+static inline void
+mlxsw_sp_flow_block_disable_inc(struct mlxsw_sp_flow_block *block)
+{
+ if (block)
+ block->disable_count++;
+}
+
+static inline void
+mlxsw_sp_flow_block_disable_dec(struct mlxsw_sp_flow_block *block)
+{
+ if (block)
+ block->disable_count--;
+}
+
+static inline bool
+mlxsw_sp_flow_block_disabled(const struct mlxsw_sp_flow_block *block)
+{
+ return block->disable_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_egress_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->egress_binding_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_ingress_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ingress_binding_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_mixed_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ingress_binding_count && block->egress_binding_count;
+}
+
+struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net);
+void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block);
+int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f);
+
+/* spectrum_acl.c */
+struct mlxsw_sp_acl_ruleset;
+
+enum mlxsw_sp_acl_profile {
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+ MLXSW_SP_ACL_PROFILE_MR,
+};
+
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
-struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
-unsigned int
-mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block);
-void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
-void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block);
-struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
- struct net *net);
-void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
-int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress,
- struct netlink_ext_ack *extack);
-int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
-bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block);
+
+int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding);
+void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile,
struct mlxsw_afk_element_usage *tmplt_elusage);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
@@ -736,7 +776,7 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct net_device *out_dev,
struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
@@ -857,22 +897,39 @@ extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
+/* spectrum_matchall.c */
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
+
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
/* spectrum_qdisc.c */
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
index e31ec75ac035..a11d911302f1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -9,7 +9,7 @@
struct mlxsw_sp2_mr_tcam {
struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_acl_block *acl_block;
+ struct mlxsw_sp_flow_block *flow_block;
struct mlxsw_sp_acl_ruleset *ruleset4;
struct mlxsw_sp_acl_ruleset *ruleset6;
};
@@ -61,7 +61,7 @@ static int mlxsw_sp2_mr_tcam_ipv4_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
mlxsw_sp2_mr_tcam_usage_ipv4,
ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv4));
mr_tcam->ruleset4 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
- mr_tcam->acl_block,
+ mr_tcam->flow_block,
MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_ACL_PROFILE_MR,
&elusage);
@@ -111,7 +111,7 @@ static int mlxsw_sp2_mr_tcam_ipv6_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
mlxsw_sp2_mr_tcam_usage_ipv6,
ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv6));
mr_tcam->ruleset6 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
- mr_tcam->acl_block,
+ mr_tcam->flow_block,
MLXSW_SP_L3_PROTO_IPV6,
MLXSW_SP_ACL_PROFILE_MR,
&elusage);
@@ -289,8 +289,8 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
int err;
mr_tcam->mlxsw_sp = mlxsw_sp;
- mr_tcam->acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, NULL);
- if (!mr_tcam->acl_block)
+ mr_tcam->flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, NULL);
+ if (!mr_tcam->flow_block)
return -ENOMEM;
err = mlxsw_sp2_mr_tcam_ipv4_init(mr_tcam);
@@ -306,7 +306,7 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
err_ipv6_init:
mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
err_ipv4_init:
- mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
+ mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
return err;
}
@@ -316,7 +316,7 @@ static void mlxsw_sp2_mr_tcam_fini(void *priv)
mlxsw_sp2_mr_tcam_ipv6_fini(mr_tcam);
mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
- mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
+ mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
}
const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 01cff711bbd2..47da9ee0045d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -40,15 +40,8 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
-struct mlxsw_sp_acl_block_binding {
- struct list_head list;
- struct net_device *dev;
- struct mlxsw_sp_port *mlxsw_sp_port;
- bool ingress;
-};
-
struct mlxsw_sp_acl_ruleset_ht_key {
- struct mlxsw_sp_acl_block *block;
+ struct mlxsw_sp_flow_block *block;
u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops;
};
@@ -58,6 +51,8 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
unsigned int ref_count;
+ unsigned int min_prio;
+ unsigned int max_prio;
unsigned long priv[];
/* priv has to be always the last item */
};
@@ -94,49 +89,6 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp->acl->dummy_fid;
}
-struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
-{
- return block->mlxsw_sp;
-}
-
-unsigned int
-mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block)
-{
- return block ? block->rule_count : 0;
-}
-
-void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
-{
- if (block)
- block->disable_count++;
-}
-
-void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
-{
- if (block)
- block->disable_count--;
-}
-
-bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block)
-{
- return block->disable_count;
-}
-
-bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->egress_binding_count;
-}
-
-bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->ingress_binding_count;
-}
-
-bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->ingress_binding_count && block->egress_binding_count;
-}
-
static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
@@ -144,10 +96,9 @@ mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
return ruleset->ref_count == 2;
}
-static int
-mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_acl_block_binding *binding)
+int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding)
{
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
@@ -156,10 +107,9 @@ mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress);
}
-static void
-mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_acl_block_binding *binding)
+void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding)
{
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
@@ -168,18 +118,12 @@ mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress);
}
-static bool
-mlxsw_sp_acl_ruleset_block_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->ruleset_zero;
-}
-
static int
mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- struct mlxsw_sp_acl_block *block)
+ struct mlxsw_sp_flow_block *block)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_flow_block_binding *binding;
int err;
block->ruleset_zero = ruleset;
@@ -202,122 +146,18 @@ rollback:
static void
mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- struct mlxsw_sp_acl_block *block)
+ struct mlxsw_sp_flow_block *block)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_flow_block_binding *binding;
list_for_each_entry(binding, &block->binding_list, list)
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
block->ruleset_zero = NULL;
}
-struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
- struct net *net)
-{
- struct mlxsw_sp_acl_block *block;
-
- block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (!block)
- return NULL;
- INIT_LIST_HEAD(&block->binding_list);
- block->mlxsw_sp = mlxsw_sp;
- block->net = net;
- return block;
-}
-
-void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
-{
- WARN_ON(!list_empty(&block->binding_list));
- kfree(block);
-}
-
-static struct mlxsw_sp_acl_block_binding *
-mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
-{
- struct mlxsw_sp_acl_block_binding *binding;
-
- list_for_each_entry(binding, &block->binding_list, list)
- if (binding->mlxsw_sp_port == mlxsw_sp_port &&
- binding->ingress == ingress)
- return binding;
- return NULL;
-}
-
-int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_acl_block_binding *binding;
- int err;
-
- if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
- return -EEXIST;
-
- if (ingress && block->ingress_blocker_rule_count) {
- NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
- return -EOPNOTSUPP;
- }
-
- if (!ingress && block->egress_blocker_rule_count) {
- NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
- return -EOPNOTSUPP;
- }
-
- binding = kzalloc(sizeof(*binding), GFP_KERNEL);
- if (!binding)
- return -ENOMEM;
- binding->mlxsw_sp_port = mlxsw_sp_port;
- binding->ingress = ingress;
-
- if (mlxsw_sp_acl_ruleset_block_bound(block)) {
- err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
- if (err)
- goto err_ruleset_bind;
- }
-
- if (ingress)
- block->ingress_binding_count++;
- else
- block->egress_binding_count++;
- list_add(&binding->list, &block->binding_list);
- return 0;
-
-err_ruleset_bind:
- kfree(binding);
- return err;
-}
-
-int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress)
-{
- struct mlxsw_sp_acl_block_binding *binding;
-
- binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
- if (!binding)
- return -ENOENT;
-
- list_del(&binding->list);
-
- if (ingress)
- block->ingress_binding_count--;
- else
- block->egress_binding_count--;
-
- if (mlxsw_sp_acl_ruleset_block_bound(block))
- mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
-
- kfree(binding);
- return 0;
-}
-
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops,
struct mlxsw_afk_element_usage *tmplt_elusage)
{
@@ -340,7 +180,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_init;
err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
- tmplt_elusage);
+ tmplt_elusage, &ruleset->min_prio,
+ &ruleset->max_prio);
if (err)
goto err_ops_ruleset_add;
@@ -388,7 +229,7 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_acl_ruleset *
__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
@@ -403,7 +244,7 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
@@ -421,7 +262,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile,
struct mlxsw_afk_element_usage *tmplt_elusage)
{
@@ -455,6 +296,14 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
return ops->ruleset_group_id(ruleset->priv);
}
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ *p_min_prio = ruleset->min_prio;
+ *p_max_prio = ruleset->max_prio;
+}
+
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
struct mlxsw_afa_block *afa_block)
@@ -584,11 +433,11 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_flow_block_binding *binding;
struct mlxsw_sp_port *in_port;
if (!list_is_singular(&block->binding_list)) {
@@ -596,7 +445,7 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
binding = list_first_entry(&block->binding_list,
- struct mlxsw_sp_acl_block_binding, list);
+ struct mlxsw_sp_flow_block_binding, list);
in_port = binding->mlxsw_sp_port;
return mlxsw_afa_block_append_mirror(rulei->act_block,
@@ -818,7 +667,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
int err;
err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
@@ -862,18 +711,17 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
- ruleset->ht_key.block->rule_count--;
+ block->rule_count--;
mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list);
mutex_unlock(&mlxsw_sp->acl->rules_lock);
if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset))
- mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
- ruleset->ht_key.block);
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index e47d1d286e93..73d56012654b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -136,28 +136,35 @@ mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
- struct mlxsw_sp_port *in_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
- enum mlxsw_sp_span_type type;
+ int err;
- type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- in_port = mlxsw_sp->ports[local_in_port];
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, out_dev, p_span_id);
+ if (err)
+ return err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
+ if (err)
+ goto err_analyzed_port_get;
- return mlxsw_sp_span_mirror_add(in_port, out_dev, type,
- false, p_span_id);
+ return 0;
+
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id);
+ return err;
}
static void
mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
{
+ struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_port *in_port;
- enum mlxsw_sp_span_type type;
-
- type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- in_port = mlxsw_sp->ports[local_in_port];
- mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
+ mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
}
const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index a6e30e020b5c..5c020403342f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -179,6 +179,8 @@ struct mlxsw_sp_acl_tcam_vgroup {
bool tmplt_elusage_set;
struct mlxsw_afk_element_usage tmplt_elusage;
bool vregion_rehash_enabled;
+ unsigned int *p_min_prio;
+ unsigned int *p_max_prio;
};
struct mlxsw_sp_acl_tcam_rehash_ctx {
@@ -316,13 +318,17 @@ mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_pattern *patterns,
unsigned int patterns_count,
struct mlxsw_afk_element_usage *tmplt_elusage,
- bool vregion_rehash_enabled)
+ bool vregion_rehash_enabled,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
int err;
vgroup->patterns = patterns;
vgroup->patterns_count = patterns_count;
vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
+ vgroup->p_min_prio = p_min_prio;
+ vgroup->p_max_prio = p_max_prio;
if (tmplt_elusage) {
vgroup->tmplt_elusage_set = true;
@@ -416,6 +422,21 @@ mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
return vchunk->priority;
}
+static void
+mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+
+ if (list_empty(&vgroup->vregion_list))
+ return;
+ vregion = list_first_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
+ vregion = list_last_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
+}
+
static int
mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
@@ -1035,6 +1056,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
}
list_add_tail(&vchunk->list, pos);
mutex_unlock(&vregion->lock);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
return vchunk;
@@ -1066,6 +1088,7 @@ mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_vchunk_ht_params);
mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
kfree(vchunk);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
}
static struct mlxsw_sp_acl_tcam_vchunk *
@@ -1582,14 +1605,17 @@ static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage)
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage, true);
+ tmplt_elusage, true,
+ p_min_prio, p_max_prio);
}
static void
@@ -1698,7 +1724,9 @@ static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage)
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
int err;
@@ -1706,7 +1734,8 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage, false);
+ tmplt_elusage, false,
+ p_min_prio, p_max_prio);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 96437992b102..a41df10ade9b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -42,7 +42,8 @@ struct mlxsw_sp_acl_profile_ops {
size_t ruleset_priv_size;
int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage);
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 968f0902e4fe..21bfb2f6a6f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -614,7 +614,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
- MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
new file mode 100644
index 000000000000..47b66f347ff1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <net/net_namespace.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_flow_block *
+mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net)
+{
+ struct mlxsw_sp_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->mall.list);
+ block->mlxsw_sp = mlxsw_sp;
+ block->net = net;
+ return block;
+}
+
+void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block)
+{
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct mlxsw_sp_flow_block_binding *
+mlxsw_sp_flow_block_lookup(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+ binding->ingress == ingress)
+ return binding;
+ return NULL;
+}
+
+static bool
+mlxsw_sp_flow_block_ruleset_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress)))
+ return -EEXIST;
+
+ if (ingress && block->ingress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress && block->egress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port);
+ if (err)
+ return err;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding) {
+ err = -ENOMEM;
+ goto err_binding_alloc;
+ }
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->ingress = ingress;
+
+ if (mlxsw_sp_flow_block_ruleset_bound(block)) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ if (ingress)
+ block->ingress_binding_count++;
+ else
+ block->egress_binding_count++;
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+err_binding_alloc:
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
+ return err;
+}
+
+static int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+
+ binding = mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (ingress)
+ block->ingress_binding_count--;
+ else
+ block->egress_binding_count--;
+
+ if (mlxsw_sp_flow_block_ruleset_bound(block))
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+ kfree(binding);
+
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
+ return 0;
+}
+
+static int mlxsw_sp_flow_block_mall_cb(struct mlxsw_sp_flow_block *flow_block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlxsw_sp_mall_replace(mlxsw_sp, flow_block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ mlxsw_sp_mall_destroy(flow_block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_flow_block_flower_cb(struct mlxsw_sp_flow_block *flow_block,
+ struct flow_cls_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return mlxsw_sp_flower_replace(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_DESTROY:
+ mlxsw_sp_flower_destroy(mlxsw_sp, flow_block, f);
+ return 0;
+ case FLOW_CLS_STATS:
+ return mlxsw_sp_flower_stats(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_TMPLT_CREATE:
+ return mlxsw_sp_flower_tmplt_create(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_TMPLT_DESTROY:
+ mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, flow_block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_flow_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_flow_block *flow_block = cb_priv;
+
+ if (mlxsw_sp_flow_block_disabled(flow_block))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return mlxsw_sp_flow_block_mall_cb(flow_block, type_data);
+ case TC_SETUP_CLSFLOWER:
+ return mlxsw_sp_flow_block_flower_cb(flow_block, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlxsw_sp_tc_block_release(void *cb_priv)
+{
+ struct mlxsw_sp_flow_block *flow_block = cb_priv;
+
+ mlxsw_sp_flow_block_destroy(flow_block);
+}
+
+static LIST_HEAD(mlxsw_sp_block_cb_list);
+
+static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_flow_block *flow_block;
+ struct flow_block_cb *block_cb;
+ bool register_block = false;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
+ mlxsw_sp);
+ if (!block_cb) {
+ flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, f->net);
+ if (!flow_block)
+ return -ENOMEM;
+ block_cb = flow_block_cb_alloc(mlxsw_sp_flow_block_cb,
+ mlxsw_sp, flow_block,
+ mlxsw_sp_tc_block_release);
+ if (IS_ERR(block_cb)) {
+ mlxsw_sp_flow_block_destroy(flow_block);
+ err = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ register_block = true;
+ } else {
+ flow_block = flow_block_cb_priv(block_cb);
+ }
+ flow_block_cb_incref(block_cb);
+ err = mlxsw_sp_flow_block_bind(mlxsw_sp, flow_block,
+ mlxsw_sp_port, ingress, f->extack);
+ if (err)
+ goto err_block_bind;
+
+ if (ingress)
+ mlxsw_sp_port->ing_flow_block = flow_block;
+ else
+ mlxsw_sp_port->eg_flow_block = flow_block;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
+ }
+
+ return 0;
+
+err_block_bind:
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
+err_cb_register:
+ return err;
+}
+
+static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_flow_block *flow_block;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
+ mlxsw_sp);
+ if (!block_cb)
+ return;
+
+ if (ingress)
+ mlxsw_sp_port->ing_flow_block = NULL;
+ else
+ mlxsw_sp_port->eg_flow_block = NULL;
+
+ flow_block = flow_block_cb_priv(block_cb);
+ err = mlxsw_sp_flow_block_unbind(mlxsw_sp, flow_block,
+ mlxsw_sp_port, ingress);
+ if (!err && !flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
+
+int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ bool ingress;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ ingress = true;
+ else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ ingress = false;
+ else
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &mlxsw_sp_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return mlxsw_sp_setup_tc_block_bind(mlxsw_sp_port, f, ingress);
+ case FLOW_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_unbind(mlxsw_sp_port, f, ingress);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 890b078851c9..51e1b3930c56 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -15,7 +15,7 @@
#include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
@@ -30,14 +30,14 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
act = flow_action_first_entry_get(flow_action);
- if (act->hw_stats == FLOW_ACTION_HW_STATS_ANY ||
- act->hw_stats == FLOW_ACTION_HW_STATS_IMMEDIATE) {
+ if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
+ /* Nothing to do */
+ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) {
/* Count action is inserted first */
err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
if (err)
return err;
- } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED &&
- act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) {
+ } else {
NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
return -EOPNOTSUPP;
}
@@ -54,11 +54,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
case FLOW_ACTION_DROP: {
bool ingress;
- if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
return -EOPNOTSUPP;
}
- ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
+ ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
act->cookie, extack);
if (err) {
@@ -107,7 +107,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid;
u16 fid_index;
- if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ if (mlxsw_sp_flow_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
return -EOPNOTSUPP;
}
@@ -191,7 +191,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f,
- struct mlxsw_sp_acl_block *block)
+ struct mlxsw_sp_flow_block *block)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -372,7 +372,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f)
{
@@ -461,7 +461,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
- if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ if (mlxsw_sp_flow_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
@@ -505,8 +505,36 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
f->common.extack);
}
+static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+ unsigned int mall_min_prio;
+ unsigned int mall_max_prio;
+ int err;
+
+ err = mlxsw_sp_mall_prio_get(block, f->common.chain_index,
+ &mall_min_prio, &mall_max_prio);
+ if (err) {
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+ if (ingress && f->common.prio <= mall_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (!ingress && f->common.prio >= mall_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_rule_info *rulei;
@@ -514,6 +542,10 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule;
int err;
+ err = mlxsw_sp_flower_mall_prio_check(block, f);
+ if (err)
+ return err;
+
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
@@ -553,7 +585,7 @@ err_rule_create:
}
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -575,7 +607,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
}
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
@@ -612,7 +644,7 @@ err_rule_get_stats:
}
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -633,7 +665,7 @@ int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
}
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -647,3 +679,23 @@ void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
+
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
+ chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (IS_ERR(ruleset))
+ /* In case there are no flower rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return PTR_ERR(ruleset);
+ mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
new file mode 100644
index 000000000000..f1a44a8eda55
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+
+#include "spectrum.h"
+#include "spectrum_span.h"
+#include "reg.h"
+
+enum mlxsw_sp_mall_action_type {
+ MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
+ MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
+};
+
+struct mlxsw_sp_mall_mirror_entry {
+ const struct net_device *to_dev;
+ int span_id;
+};
+
+struct mlxsw_sp_mall_entry {
+ struct list_head list;
+ unsigned long cookie;
+ unsigned int priority;
+ enum mlxsw_sp_mall_action_type type;
+ bool ingress;
+ union {
+ struct mlxsw_sp_mall_mirror_entry mirror;
+ struct mlxsw_sp_port_sample sample;
+ };
+ struct rcu_head rcu;
+};
+
+static struct mlxsw_sp_mall_entry *
+mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list)
+ if (mall_entry->cookie == cookie)
+ return mall_entry;
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_trigger_parms parms;
+ enum mlxsw_sp_span_trigger trigger;
+ int err;
+
+ if (!mall_entry->mirror.to_dev) {
+ netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev,
+ &mall_entry->mirror.span_id);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
+ mall_entry->ingress);
+ if (err)
+ goto err_analyzed_port_get;
+
+ trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ parms.span_id = mall_entry->mirror.span_id;
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
+ &parms);
+ if (err)
+ goto err_agent_bind;
+
+ return 0;
+
+err_agent_bind:
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+ return err;
+}
+
+static void
+mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_trigger_parms parms;
+ enum mlxsw_sp_span_trigger trigger;
+
+ trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ parms.span_id = mall_entry->mirror.span_id;
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+}
+
+static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable, u32 rate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char mpsc_pl[MLXSW_REG_MPSC_LEN];
+
+ mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
+}
+
+static int
+mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ int err;
+
+ if (rtnl_dereference(mlxsw_sp_port->sample)) {
+ netdev_err(mlxsw_sp_port->dev, "sample already active\n");
+ return -EEXIST;
+ }
+ rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
+
+ err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
+ mall_entry->sample.rate);
+ if (err)
+ goto err_port_sample_set;
+ return 0;
+
+err_port_sample_set:
+ RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+ return err;
+}
+
+static void
+mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ if (!mlxsw_sp_port->sample)
+ return;
+
+ mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
+ RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+}
+
+static int
+mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
+ case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
+ return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+static void
+mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
+ break;
+ case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
+ mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ if (list_empty(&block->mall.list))
+ return;
+ block->mall.min_prio = UINT_MAX;
+ block->mall.max_prio = 0;
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
+ if (mall_entry->priority < block->mall.min_prio)
+ block->mall.min_prio = mall_entry->priority;
+ if (mall_entry->priority > block->mall.max_prio)
+ block->mall.max_prio = mall_entry->priority;
+ }
+}
+
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_mall_entry *mall_entry;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ unsigned int flower_min_prio;
+ unsigned int flower_max_prio;
+ bool flower_prio_valid;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (f->common.chain_index) {
+ NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
+ NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
+ &flower_min_prio, &flower_max_prio);
+ if (err) {
+ if (err != -ENOENT) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+ flower_prio_valid = false;
+ /* No flower filters are installed in specified chain. */
+ } else {
+ flower_prio_valid = true;
+ }
+
+ mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
+ if (!mall_entry)
+ return -ENOMEM;
+ mall_entry->cookie = f->cookie;
+ mall_entry->priority = f->common.prio;
+ mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+
+ act = &f->rule->action.entries[0];
+
+ if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
+ if (flower_prio_valid && mall_entry->ingress &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid && !mall_entry->ingress &&
+ mall_entry->priority <= flower_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
+ mall_entry->mirror.to_dev = act->dev;
+ } else if (act->id == FLOW_ACTION_SAMPLE &&
+ protocol == htons(ETH_P_ALL)) {
+ if (!mall_entry->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
+ NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
+ mall_entry->sample.psample_group = act->sample.psample_group;
+ mall_entry->sample.truncate = act->sample.truncate;
+ mall_entry->sample.trunc_size = act->sample.trunc_size;
+ mall_entry->sample.rate = act->sample.rate;
+ } else {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
+ mall_entry);
+ if (err)
+ goto rollback;
+ }
+
+ block->rule_count++;
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count++;
+ else
+ block->ingress_blocker_rule_count++;
+ list_add_tail(&mall_entry->list, &block->mall.list);
+ mlxsw_sp_mall_prio_update(block);
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
+errout:
+ kfree(mall_entry);
+ return err;
+}
+
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
+ if (!mall_entry) {
+ NL_SET_ERR_MSG(f->common.extack, "Entry not found");
+ return;
+ }
+
+ list_del(&mall_entry->list);
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count--;
+ else
+ block->ingress_blocker_rule_count--;
+ block->rule_count--;
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
+ kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
+ mlxsw_sp_mall_prio_update(block);
+}
+
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+ int err;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
+ err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
+ list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+ return err;
+}
+
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+}
+
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio)
+{
+ if (chain_index || list_empty(&block->mall.list))
+ /* In case there are no matchall rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return -ENOENT;
+ *p_min_prio = block->mall.min_prio;
+ *p_max_prio = block->mall.max_prio;
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index d5bca1be3ef5..770de0222e7b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2999,6 +2999,7 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
+ val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
}
return jhash(&val, sizeof(val), seed);
default:
@@ -3012,11 +3013,14 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
{
unsigned int val = fib6_entry->nrt6;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
- struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+ struct net_device *dev = fib6_nh->fib_nh_dev;
+ struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
+
val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
+ val ^= jhash(gw, sizeof(*gw), seed);
}
return jhash(&val, sizeof(val), seed);
@@ -4999,9 +5003,11 @@ static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
{
/* Packets with link-local destination IP arriving to the router
* are trapped to the CPU, so no need to program specific routes
- * for them.
+ * for them. Only allow prefix routes (usually one fe80::/64) so
+ * that packets are trapped for the right reason.
*/
- if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
+ if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
+ (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
return true;
/* Multicast routes aren't supported, so ignore them. Neighbour
@@ -7568,7 +7574,7 @@ static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
- struct net_device *br_dev = rif->dev;
+ struct net_device *br_dev;
u16 vid;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 9fb2e9d93929..304eb8c3d8bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -3,6 +3,8 @@
#include <linux/if_bridge.h>
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/rtnetlink.h>
#include <linux/workqueue.h>
#include <net/arp.h>
@@ -19,9 +21,27 @@
struct mlxsw_sp_span {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
+ struct list_head analyzed_ports_list;
+ struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
+ struct list_head trigger_entries_list;
atomic_t active_entries_count;
int entries_count;
- struct mlxsw_sp_span_entry entries[0];
+ struct mlxsw_sp_span_entry entries[];
+};
+
+struct mlxsw_sp_span_analyzed_port {
+ struct list_head list; /* Member of analyzed_ports_list */
+ refcount_t ref_count;
+ u8 local_port;
+ bool ingress;
+};
+
+struct mlxsw_sp_span_trigger_entry {
+ struct list_head list; /* Member of trigger_entries_list */
+ refcount_t ref_count;
+ u8 local_port;
+ enum mlxsw_sp_span_trigger trigger;
+ struct mlxsw_sp_span_trigger_parms parms;
};
static void mlxsw_sp_span_respin_work(struct work_struct *work);
@@ -48,15 +68,14 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
span->entries_count = entries_count;
atomic_set(&span->active_entries_count, 0);
+ mutex_init(&span->analyzed_ports_lock);
+ INIT_LIST_HEAD(&span->analyzed_ports_list);
+ INIT_LIST_HEAD(&span->trigger_entries_list);
span->mlxsw_sp = mlxsw_sp;
mlxsw_sp->span = span;
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
-
- INIT_LIST_HEAD(&curr->bound_ports_list);
- curr->id = i;
- }
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++)
+ mlxsw_sp->span->entries[i].id = i;
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
mlxsw_sp_span_occ_get, mlxsw_sp);
@@ -68,16 +87,13 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- int i;
cancel_work_sync(&mlxsw_sp->span->work);
devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
-
- WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
- }
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
+ mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
kfree(mlxsw_sp->span);
}
@@ -130,7 +146,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
.can_handle = mlxsw_sp_port_dev_check,
- .parms = mlxsw_sp_span_entry_phys_parms,
+ .parms_set = mlxsw_sp_span_entry_phys_parms,
.configure = mlxsw_sp_span_entry_phys_configure,
.deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
};
@@ -418,7 +434,7 @@ mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
.can_handle = netif_is_gretap,
- .parms = mlxsw_sp_span_entry_gretap4_parms,
+ .parms_set = mlxsw_sp_span_entry_gretap4_parms,
.configure = mlxsw_sp_span_entry_gretap4_configure,
.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
};
@@ -519,7 +535,7 @@ mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
.can_handle = netif_is_ip6gretap,
- .parms = mlxsw_sp_span_entry_gretap6_parms,
+ .parms_set = mlxsw_sp_span_entry_gretap6_parms,
.configure = mlxsw_sp_span_entry_gretap6_configure,
.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
};
@@ -575,7 +591,7 @@ mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
.can_handle = mlxsw_sp_span_vlan_can_handle,
- .parms = mlxsw_sp_span_entry_vlan_parms,
+ .parms_set = mlxsw_sp_span_entry_vlan_parms,
.configure = mlxsw_sp_span_entry_vlan_configure,
.deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
};
@@ -612,7 +628,7 @@ mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
}
static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
- .parms = mlxsw_sp_span_entry_nop_parms,
+ .parms_set = mlxsw_sp_span_entry_nop_parms,
.configure = mlxsw_sp_span_entry_nop_configure,
.deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
};
@@ -622,18 +638,27 @@ mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry,
struct mlxsw_sp_span_parms sparms)
{
- if (sparms.dest_port) {
- if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
- netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
- sparms.dest_port->dev->name);
- sparms.dest_port = NULL;
- } else if (span_entry->ops->configure(span_entry, sparms)) {
- netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
- sparms.dest_port->dev->name);
- sparms.dest_port = NULL;
- }
+ int err;
+
+ if (!sparms.dest_port)
+ goto set_parms;
+
+ if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
+ netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ goto set_parms;
+ }
+
+ err = span_entry->ops->configure(span_entry, sparms);
+ if (err) {
+ netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ goto set_parms;
}
+set_parms:
span_entry->parms = sparms;
}
@@ -655,7 +680,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
/* find a free entry to use */
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- if (!mlxsw_sp->span->entries[i].ref_count) {
+ if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
span_entry = &mlxsw_sp->span->entries[i];
break;
}
@@ -665,7 +690,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
atomic_inc(&mlxsw_sp->span->active_entries_count);
span_entry->ops = ops;
- span_entry->ref_count = 1;
+ refcount_set(&span_entry->ref_count, 1);
span_entry->to_dev = to_dev;
mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
@@ -688,7 +713,7 @@ mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
- if (curr->ref_count && curr->to_dev == to_dev)
+ if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
return curr;
}
return NULL;
@@ -709,7 +734,7 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
- if (curr->ref_count && curr->id == span_id)
+ if (refcount_read(&curr->ref_count) && curr->id == span_id)
return curr;
}
return NULL;
@@ -726,7 +751,7 @@ mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
if (span_entry) {
/* Already exists, just take a reference */
- span_entry->ref_count++;
+ refcount_inc(&span_entry->ref_count);
return span_entry;
}
@@ -736,32 +761,13 @@ mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry)
{
- WARN_ON(!span_entry->ref_count);
- if (--span_entry->ref_count == 0)
+ if (refcount_dec_and_test(&span_entry->ref_count))
mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
return 0;
}
-static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- struct mlxsw_sp_span_inspected_port *p;
- int i;
-
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
-
- list_for_each_entry(p, &curr->bound_ports_list, list)
- if (p->local_port == port->local_port &&
- p->type == MLXSW_SP_SPAN_EGRESS)
- return true;
- }
-
- return false;
-}
-
static int
-mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sbib_pl[MLXSW_REG_SBIB_LEN];
@@ -780,20 +786,54 @@ mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
+static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port)
+{
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+
+ mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+}
+
+static struct mlxsw_sp_span_analyzed_port *
+mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
+ bool ingress)
+{
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+
+ list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
+ if (analyzed_port->local_port == local_port &&
+ analyzed_port->ingress == ingress)
+ return analyzed_port;
+ }
+
+ return NULL;
+}
+
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ int err = 0;
+
/* If port is egress mirrored, the shared buffer size should be
* updated according to the mtu value
*/
- if (mlxsw_sp_span_is_egress_mirror(port))
- return mlxsw_sp_span_port_buffsize_update(port, mtu);
- return 0;
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
+ false))
+ err = mlxsw_sp_span_port_buffer_update(port, mtu);
+
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ return err;
}
void mlxsw_sp_span_speed_update_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
span.speed_update_dw);
@@ -801,237 +841,368 @@ void mlxsw_sp_span_speed_update_work(struct work_struct *work)
/* If port is egress mirrored, the shared buffer size should be
* updated according to the speed value.
*/
- if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
- mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
- mlxsw_sp_port->dev->mtu);
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ mlxsw_sp_port->local_port, false))
+ mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
+ mlxsw_sp_port->dev->mtu);
+
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
}
-static struct mlxsw_sp_span_inspected_port *
-mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- struct mlxsw_sp_port *port,
- bool bind)
+static const struct mlxsw_sp_span_entry_ops *
+mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
{
- struct mlxsw_sp_span_inspected_port *p;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
+ if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
+ return mlxsw_sp_span_entry_types[i];
- list_for_each_entry(p, &span_entry->bound_ports_list, list)
- if (type == p->type &&
- port->local_port == p->local_port &&
- bind == p->bound)
- return p;
return NULL;
}
-static int
-mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
- int pa_id = span_entry->id;
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp *mlxsw_sp;
+ int i, err;
+
+ span = container_of(work, struct mlxsw_sp_span, work);
+ mlxsw_sp = span->mlxsw_sp;
+
+ rtnl_lock();
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
+ struct mlxsw_sp_span_parms sparms = {NULL};
+
+ if (!refcount_read(&curr->ref_count))
+ continue;
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+ err = curr->ops->parms_set(curr->to_dev, &sparms);
+ if (err)
+ continue;
+
+ if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
+ mlxsw_sp_span_entry_deconfigure(curr);
+ mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
+ }
+ }
+ rtnl_unlock();
}
-static int
-mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int i;
+ if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ return;
+ mlxsw_core_schedule_work(&mlxsw_sp->span->work);
+}
+
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev, int *p_span_id)
+{
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp_span_parms sparms;
int err;
- /* A given (source port, direction) can only be bound to one analyzer,
- * so if a binding is requested, check for conflicts.
- */
- if (bind)
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr =
- &mlxsw_sp->span->entries[i];
-
- if (mlxsw_sp_span_entry_bound_port_find(curr, type,
- port, bind))
- return -EEXIST;
- }
+ ASSERT_RTNL();
- /* if it is an egress SPAN, bind a shared buffer to it */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu);
- if (err)
- return err;
+ ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
+ if (!ops) {
+ dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
+ return -EOPNOTSUPP;
}
- if (bind) {
- err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- true);
- if (err)
- goto err_port_bind;
- }
+ memset(&sparms, 0, sizeof(sparms));
+ err = ops->parms_set(to_dev, &sparms);
+ if (err)
+ return err;
- inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
- if (!inspected_port) {
- err = -ENOMEM;
- goto err_inspected_port_alloc;
- }
- inspected_port->local_port = port->local_port;
- inspected_port->type = type;
- inspected_port->bound = bind;
- list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
+ span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
+ if (!span_entry)
+ return -ENOBUFS;
+
+ *p_span_id = span_entry->id;
return 0;
+}
-err_inspected_port_alloc:
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
-err_port_bind:
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ ASSERT_RTNL();
+
+ span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
+ if (WARN_ON_ONCE(!span_entry))
+ return;
+
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+}
+
+static struct mlxsw_sp_span_analyzed_port *
+mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ int err;
+
+ analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
+ if (!analyzed_port)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&analyzed_port->ref_count, 1);
+ analyzed_port->local_port = mlxsw_sp_port->local_port;
+ analyzed_port->ingress = ingress;
+ list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
+
+ /* An egress mirror buffer should be allocated on the egress port which
+ * does the mirroring.
+ */
+ if (!ingress) {
+ u16 mtu = mlxsw_sp_port->dev->mtu;
+
+ err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
+ if (err)
+ goto err_buffer_update;
}
- return err;
+
+ return analyzed_port;
+
+err_buffer_update:
+ list_del(&analyzed_port->list);
+ kfree(analyzed_port);
+ return ERR_PTR(err);
}
static void
-mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
+mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_analyzed_port *
+ analyzed_port)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
+ struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
- inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
- port, bind);
- if (!inspected_port)
- return;
+ /* Remove egress mirror buffer now that port is no longer analyzed
+ * at egress.
+ */
+ if (!analyzed_port->ingress)
+ mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
+ analyzed_port->local_port);
+
+ list_del(&analyzed_port->list);
+ kfree(analyzed_port);
+}
+
+int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
- /* remove the SBIB buffer if it was egress SPAN */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ local_port, ingress);
+ if (analyzed_port) {
+ refcount_inc(&analyzed_port->ref_count);
+ goto out_unlock;
}
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+ analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
+ mlxsw_sp_port,
+ ingress);
+ if (IS_ERR(analyzed_port))
+ err = PTR_ERR(analyzed_port);
- list_del(&inspected_port->list);
- kfree(inspected_port);
+out_unlock:
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
+ return err;
}
-static const struct mlxsw_sp_span_entry_ops *
-mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *to_dev)
+void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
- size_t i;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ u8 local_port = mlxsw_sp_port->local_port;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
- if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
- return mlxsw_sp_span_entry_types[i];
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
- return NULL;
+ analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ local_port, ingress);
+ if (WARN_ON_ONCE(!analyzed_port))
+ goto out_unlock;
+
+ if (!refcount_dec_and_test(&analyzed_port->ref_count))
+ goto out_unlock;
+
+ mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
}
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- const struct net_device *to_dev,
- enum mlxsw_sp_span_type type, bool bind,
- int *p_span_id)
+static int
+__mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry, bool enable)
{
- struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
- const struct mlxsw_sp_span_entry_ops *ops;
- struct mlxsw_sp_span_parms sparms = {NULL};
- struct mlxsw_sp_span_entry *span_entry;
- int err;
-
- ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
- if (!ops) {
- netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
- return -EOPNOTSUPP;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ enum mlxsw_reg_mpar_i_e i_e;
+
+ switch (trigger_entry->trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
+ break;
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
}
- err = ops->parms(to_dev, &sparms);
- if (err)
- return err;
+ mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
+ trigger_entry->parms.span_id);
+ return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
- span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
- if (!span_entry)
- return -ENOBUFS;
+static int
+mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true);
+}
- netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
- span_entry->id);
+static void
+mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false);
+}
- err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
+static struct mlxsw_sp_span_trigger_entry *
+mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms
+ *parms)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+ int err;
+
+ trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
+ if (!trigger_entry)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&trigger_entry->ref_count, 1);
+ trigger_entry->local_port = mlxsw_sp_port->local_port;
+ trigger_entry->trigger = trigger;
+ memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
+ list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
+
+ err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry);
if (err)
- goto err_port_bind;
+ goto err_trigger_entry_bind;
- *p_span_id = span_entry->id;
- return 0;
+ return trigger_entry;
-err_port_bind:
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
- return err;
+err_trigger_entry_bind:
+ list_del(&trigger_entry->list);
+ kfree(trigger_entry);
+ return ERR_PTR(err);
}
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
- enum mlxsw_sp_span_type type, bool bind)
+static void
+mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
{
- struct mlxsw_sp_span_entry *span_entry;
+ mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry);
+ list_del(&trigger_entry->list);
+ kfree(trigger_entry);
+}
- span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
- if (!span_entry) {
- netdev_err(from->dev, "no span entry found\n");
- return;
+static struct mlxsw_sp_span_trigger_entry *
+mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
+ if (trigger_entry->trigger == trigger &&
+ trigger_entry->local_port == mlxsw_sp_port->local_port)
+ return trigger_entry;
}
- netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
- span_entry->id);
- mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
+ return NULL;
}
-static void mlxsw_sp_span_respin_work(struct work_struct *work)
+int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms)
{
- struct mlxsw_sp_span *span;
- struct mlxsw_sp *mlxsw_sp;
- int i, err;
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+ int err = 0;
- span = container_of(work, struct mlxsw_sp_span, work);
- mlxsw_sp = span->mlxsw_sp;
+ ASSERT_RTNL();
- rtnl_lock();
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
- struct mlxsw_sp_span_parms sparms = {NULL};
+ if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
+ return -EINVAL;
- if (!curr->ref_count)
- continue;
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (trigger_entry) {
+ if (trigger_entry->parms.span_id != parms->span_id)
+ return -EINVAL;
+ refcount_inc(&trigger_entry->ref_count);
+ goto out;
+ }
- err = curr->ops->parms(curr->to_dev, &sparms);
- if (err)
- continue;
+ trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port,
+ parms);
+ if (IS_ERR(trigger_entry))
+ err = PTR_ERR(trigger_entry);
- if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
- mlxsw_sp_span_entry_deconfigure(curr);
- mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
- }
- }
- rtnl_unlock();
+out:
+ return err;
}
-void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms)
{
- if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ ASSERT_RTNL();
+
+ if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
+ parms->span_id)))
return;
- mlxsw_core_schedule_work(&mlxsw_sp->span->work);
+
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (WARN_ON_ONCE(!trigger_entry))
+ return;
+
+ if (!refcount_dec_and_test(&trigger_entry->ref_count))
+ return;
+
+ mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 59724335525f..9f6dd2d0f4e6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -6,26 +6,13 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/refcount.h>
#include "spectrum_router.h"
struct mlxsw_sp;
struct mlxsw_sp_port;
-enum mlxsw_sp_span_type {
- MLXSW_SP_SPAN_EGRESS,
- MLXSW_SP_SPAN_INGRESS
-};
-
-struct mlxsw_sp_span_inspected_port {
- struct list_head list;
- enum mlxsw_sp_span_type type;
- u8 local_port;
-
- /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
- bool bound;
-};
-
struct mlxsw_sp_span_parms {
struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
unsigned int ttl;
@@ -36,21 +23,29 @@ struct mlxsw_sp_span_parms {
u16 vid;
};
+enum mlxsw_sp_span_trigger {
+ MLXSW_SP_SPAN_TRIGGER_INGRESS,
+ MLXSW_SP_SPAN_TRIGGER_EGRESS,
+};
+
+struct mlxsw_sp_span_trigger_parms {
+ int span_id;
+};
+
struct mlxsw_sp_span_entry_ops;
struct mlxsw_sp_span_entry {
const struct net_device *to_dev;
const struct mlxsw_sp_span_entry_ops *ops;
struct mlxsw_sp_span_parms parms;
- struct list_head bound_ports_list;
- int ref_count;
+ refcount_t ref_count;
int id;
};
struct mlxsw_sp_span_entry_ops {
bool (*can_handle)(const struct net_device *to_dev);
- int (*parms)(const struct net_device *to_dev,
- struct mlxsw_sp_span_parms *sparmsp);
+ int (*parms_set)(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp);
int (*configure)(struct mlxsw_sp_span_entry *span_entry,
struct mlxsw_sp_span_parms sparms);
void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry);
@@ -60,12 +55,6 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- const struct net_device *to_dev,
- enum mlxsw_sp_span_type type,
- bool bind, int *p_span_id);
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
- enum mlxsw_sp_span_type type, bool bind);
struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev);
@@ -76,4 +65,21 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
void mlxsw_sp_span_speed_update_work(struct work_struct *work);
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev, int *p_span_id);
+void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id);
+int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms);
+void
+mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index fbf714d027d8..157a42c63066 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -12,6 +12,24 @@
#include "spectrum.h"
#include "spectrum_trap.h"
+struct mlxsw_sp_trap_policer_item {
+ struct devlink_trap_policer policer;
+ u16 hw_id;
+};
+
+struct mlxsw_sp_trap_group_item {
+ struct devlink_trap_group group;
+ u16 hw_group_id;
+ u8 priority;
+};
+
+#define MLXSW_SP_TRAP_LISTENERS_MAX 3
+
+struct mlxsw_sp_trap_item {
+ struct devlink_trap trap;
+ struct mlxsw_listener listeners_arr[MLXSW_SP_TRAP_LISTENERS_MAX];
+};
+
/* All driver-specific traps must be documented in
* Documentation/networking/devlink/mlxsw.rst
*/
@@ -107,8 +125,8 @@ static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
-static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
- void *trap_ctx)
+static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
{
struct devlink_port *in_devlink_port;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -121,7 +139,7 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
if (err)
- return;
+ return err;
devlink = priv_to_devlink(mlxsw_sp->core);
in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
@@ -129,10 +147,71 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
skb_push(skb, ETH_HLEN);
devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, NULL);
skb_pull(skb, ETH_HLEN);
- skb->offload_fwd_mark = 1;
+
+ return 0;
+}
+
+static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
netif_receive_skb(skb);
}
+static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ skb->offload_fwd_mark = 1;
+ mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+}
+
+static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ skb->offload_l3_fwd_mark = 1;
+ skb->offload_fwd_mark = 1;
+ mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+}
+
+static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ /* The PTP handler expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_ptp_receive(mlxsw_sp, skb, local_port);
+}
+
+static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ /* The sample handler expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_sample_receive(mlxsw_sp, skb, local_port);
+}
+
#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
@@ -154,6 +233,11 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_CONTROL(_id, _group_id, _action) \
+ DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
MLXSW_RXL_DIS(mlxsw_sp_rx_drop_listener, DISCARD_##_id, \
TRAP_EXCEPTION_TO_CPU, false, SP_##_group_id, \
@@ -165,9 +249,21 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
SET_FW_DEFAULT, SP_##_dis_group_id)
#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
- MLXSW_RXL(mlxsw_sp_rx_exception_listener, _id, \
+ MLXSW_RXL(mlxsw_sp_rx_mark_listener, _id, \
_action, false, SP_##_group_id, SET_FW_DEFAULT)
+#define MLXSW_SP_RXL_NO_MARK(_id, _group_id, _action, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_no_mark_listener, _id, _action, \
+ _is_ctrl, SP_##_group_id, DISCARD)
+
+#define MLXSW_SP_RXL_MARK(_id, _group_id, _action, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_mark_listener, _id, _action, _is_ctrl, \
+ SP_##_group_id, DISCARD)
+
+#define MLXSW_SP_RXL_L3_MARK(_id, _group_id, _action, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_l3_mark_listener, _id, _action, _is_ctrl, \
+ SP_##_group_id, DISCARD)
+
#define MLXSW_SP_TRAP_POLICER(_id, _rate, _burst) \
DEVLINK_TRAP_POLICER(_id, _rate, _burst, \
MLXSW_REG_QPCR_HIGHEST_CIR, \
@@ -176,149 +272,753 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
1 << MLXSW_REG_QPCR_LOWEST_CBS)
/* Ordered by policer identifier */
-static const struct devlink_trap_policer mlxsw_sp_trap_policers_arr[] = {
- MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 128),
+static const struct mlxsw_sp_trap_policer_item
+mlxsw_sp_trap_policer_items_arr[] = {
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(2, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(3, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(4, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(5, 16 * 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(6, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(7, 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(8, 20 * 1024, 1024),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(9, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(10, 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(11, 360, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(12, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(13, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(14, 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(15, 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(16, 24 * 1024, 4096),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(17, 19 * 1024, 4096),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
+ },
};
-static const struct devlink_trap_group mlxsw_sp_trap_groups_arr[] = {
- DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 1),
- DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
- DEVLINK_TRAP_GROUP_GENERIC(TUNNEL_DROPS, 1),
- DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 1),
+static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_EXCEPTIONS,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(TUNNEL_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(STP, 2),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(LACP, 3),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(LLDP, 4),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(MC_SNOOPING, 5),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_MC_SNOOPING,
+ .priority = 3,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(DHCP, 6),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(NEIGH_DISCOVERY, 7),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(BFD, 8),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BFD,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(OSPF, 9),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(BGP, 10),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP,
+ .priority = 4,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(VRRP, 11),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(PIM, 12),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(UC_LB, 13),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(LOCAL_DELIVERY, 14),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(PTP_EVENT, 16),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(PTP_GENERAL, 17),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 18),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING,
+ .priority = 4,
+ },
};
-static const struct devlink_trap mlxsw_sp_traps_arr[] = {
- MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
- MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
- MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
- MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
- MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
- MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
- MLXSW_SP_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
- MLXSW_SP_TRAP_DROP(NON_IP_PACKET, L3_DROPS),
- MLXSW_SP_TRAP_DROP(UC_DIP_MC_DMAC, L3_DROPS),
- MLXSW_SP_TRAP_DROP(DIP_LB, L3_DROPS),
- MLXSW_SP_TRAP_DROP(SIP_MC, L3_DROPS),
- MLXSW_SP_TRAP_DROP(SIP_LB, L3_DROPS),
- MLXSW_SP_TRAP_DROP(CORRUPTED_IP_HDR, L3_DROPS),
- MLXSW_SP_TRAP_DROP(IPV4_SIP_BC, L3_DROPS),
- MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_RESERVED_SCOPE, L3_DROPS),
- MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(MTU_ERROR, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(RPF, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(REJECT_ROUTE, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS, L3_DROPS),
- MLXSW_SP_TRAP_DRIVER_DROP(IRIF_DISABLED, L3_DROPS),
- MLXSW_SP_TRAP_DRIVER_DROP(ERIF_DISABLED, L3_DROPS),
- MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
- MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
- MLXSW_SP_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP, ACL_DROPS,
- DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
- MLXSW_SP_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP, ACL_DROPS,
- DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
+ {
+ .trap = MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW,
+ L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_STP, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER2, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(NON_IP_PACKET, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_NON_IP_PACKET,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(UC_DIP_MC_DMAC, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_UC_DIP_MC_DMAC,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(DIP_LB, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_DIP_LB, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(SIP_MC, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_MC, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(SIP_LB, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_LB, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(CORRUPTED_IP_HDR, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_CORRUPTED_IP_HDR,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(IPV4_SIP_BC, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_RESERVED_SCOPE,
+ L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(MTU_ERROR, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(MTUERROR, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(TTLERROR, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(RPF, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(RPF, L3_EXCEPTIONS, TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(REJECT_ROUTE, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH,
+ L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, L3_EXCEPTIONS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS,
+ L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4,
+ L3_EXCEPTIONS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS,
+ L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6,
+ L3_EXCEPTIONS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DRIVER_DROP(IRIF_DISABLED, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DRIVER_DROP(ERIF_DISABLED, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR,
+ TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP,
+ ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ .listeners_arr = {
+ MLXSW_SP_RXL_ACL_DISCARD(INGRESS_ACL, ACL_DISCARDS,
+ DUMMY),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP,
+ ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ .listeners_arr = {
+ MLXSW_SP_RXL_ACL_DISCARD(EGRESS_ACL, ACL_DISCARDS,
+ DUMMY),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(STP, STP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(STP, STP, TRAP_TO_CPU, true),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(LACP, LACP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(LACP, LACP, TRAP_TO_CPU, true),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(LLDP, LLDP, TRAP),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_ptp_listener, LLDP, TRAP_TO_CPU,
+ false, SP_LLDP, DISCARD),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_QUERY, MC_SNOOPING, MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IGMP_QUERY, MC_SNOOPING,
+ MIRROR_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V1_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V2_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V3_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V2_LEAVE, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_QUERY, MC_SNOOPING, MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY,
+ MC_SNOOPING, MIRROR_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_V1_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT,
+ MC_SNOOPING, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_V2_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT,
+ MC_SNOOPING, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_V1_DONE, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE,
+ MC_SNOOPING, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_DHCP, DHCP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_DHCP, DHCP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_DHCP, DHCP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_DHCP, DHCP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ARPBC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ARPUC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(ARP_OVERLAY, NEIGH_DISCOVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_NEIGH_SOLICIT,
+ NEIGH_DISCOVERY, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION,
+ NEIGH_DISCOVERY, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_NEIGH_ADVERT,
+ NEIGH_DISCOVERY, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISEMENT,
+ NEIGH_DISCOVERY, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_BFD, BFD, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_BFD, BFD, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_BFD, BFD, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_BFD, BFD, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_OSPF, OSPF, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_OSPF, OSPF, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_OSPF, OSPF, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_OSPF, OSPF, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_BGP, BGP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_BGP, BGP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_BGP, BGP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_BGP, BGP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_VRRP, VRRP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, VRRP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_VRRP, VRRP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, VRRP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_PIM, PIM, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_PIM, PIM, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_PIM, PIM, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_PIM, PIM, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(UC_LB, UC_LB, MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_L3_MARK(LBERROR, LBERROR, MIRROR_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(LOCAL_ROUTE, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IP2ME, IP2ME, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_UC_DIP_LINK_LOCAL_SCOPE,
+ LOCAL_DELIVERY, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, IP2ME,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_ROUTER_ALERT, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, IP2ME, TRAP_TO_CPU,
+ false),
+ },
+ },
+ {
+ /* IPV6_ROUTER_ALERT is defined in uAPI as 22, but it is not
+ * used in this file, so undefine it.
+ */
+ #undef IPV6_ROUTER_ALERT
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_ROUTER_ALERT, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, IP2ME, TRAP_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_DIP_ALL_NODES, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_DIP_ALL_ROUTERS, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_ROUTER_SOLICIT, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_ROUTER_ADVERT, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISEMENT, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_REDIRECT, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(PTP_EVENT, PTP_EVENT, TRAP),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_ptp_listener, PTP0, TRAP_TO_CPU,
+ false, SP_PTP0, DISCARD),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(PTP_GENERAL, PTP_GENERAL, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(PTP1, PTP1, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE,
+ MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_TRAP, ACL_TRAP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(ACL0, FLOW_LOGGING, TRAP_TO_CPU,
+ false),
+ },
+ },
};
-static const struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
- MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_SWITCH_STP, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ROUTER2, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_NON_IP_PACKET, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_UC_DIP_MC_DMAC, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_DIP_LB, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_MC, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_LB, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_CORRUPTED_IP_HDR, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DISCARDS),
- MLXSW_SP_RXL_EXCEPTION(MTUERROR, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(TTLERROR, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(RPF, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, L3_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, L3_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, L3_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
- MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, TUNNEL_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR, TUNNEL_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
- MLXSW_SP_RXL_ACL_DISCARD(INGRESS_ACL, ACL_DISCARDS, DUMMY),
- MLXSW_SP_RXL_ACL_DISCARD(EGRESS_ACL, ACL_DISCARDS, DUMMY),
-};
+static struct mlxsw_sp_trap_policer_item *
+mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
+{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
-/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
- * be mapped to the same devlink trap. Order is according to
- * 'mlxsw_sp_listeners_arr'.
- */
-static const u16 mlxsw_sp_listener_devlink_map[] = {
- DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
- DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH,
- DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
- DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER,
- DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
- DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
- DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER,
- DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
- DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
- DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
- DEVLINK_TRAP_GENERIC_ID_DIP_LB,
- DEVLINK_TRAP_GENERIC_ID_SIP_MC,
- DEVLINK_TRAP_GENERIC_ID_SIP_LB,
- DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
- DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
- DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
- DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
- DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
- DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
- DEVLINK_TRAP_GENERIC_ID_RPF,
- DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
- DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
- DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
- DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
- DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
- DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
- DEVLINK_MLXSW_TRAP_ID_IRIF_DISABLED,
- DEVLINK_MLXSW_TRAP_ID_ERIF_DISABLED,
- DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
- DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
- DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
- DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
- DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
- DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP,
- DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP,
-};
+ for (i = 0; i < trap->policers_count; i++) {
+ if (trap->policer_items_arr[i].policer.id == id)
+ return &trap->policer_items_arr[i];
+ }
-#define MLXSW_SP_THIN_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1)
+ return NULL;
+}
-static struct mlxsw_sp_trap_policer_item *
-mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
+static struct mlxsw_sp_trap_group_item *
+mlxsw_sp_trap_group_item_lookup(struct mlxsw_sp *mlxsw_sp, u16 id)
+{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = 0; i < trap->groups_count; i++) {
+ if (trap->group_items_arr[i].group.id == id)
+ return &trap->group_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_trap_item *
+mlxsw_sp_trap_item_lookup(struct mlxsw_sp *mlxsw_sp, u16 id)
{
- struct mlxsw_sp_trap_policer_item *policer_item;
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
- list_for_each_entry(policer_item, &trap->policer_item_list, list) {
- if (policer_item->id == id)
- return policer_item;
+ for (i = 0; i < trap->traps_count; i++) {
+ if (trap->trap_items_arr[i].trap.id == id)
+ return &trap->trap_items_arr[i];
}
return NULL;
@@ -326,14 +1026,21 @@ mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
static int mlxsw_sp_trap_cpu_policers_set(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ u16 hw_id;
/* The purpose of "thin" policer is to drop as many packets
* as possible. The dummy group is using it.
*/
- __set_bit(MLXSW_SP_THIN_POLICER_ID, mlxsw_sp->trap->policers_usage);
- mlxsw_reg_qpcr_pack(qpcr_pl, MLXSW_SP_THIN_POLICER_ID,
- MLXSW_REG_QPCR_IR_UNITS_M, false, 1, 4);
+ hw_id = find_first_zero_bit(trap->policers_usage, trap->max_policers);
+ if (WARN_ON(hw_id == trap->max_policers))
+ return -ENOBUFS;
+
+ __set_bit(hw_id, trap->policers_usage);
+ trap->thin_policer_hw_id = hw_id;
+ mlxsw_reg_qpcr_pack(qpcr_pl, hw_id, MLXSW_REG_QPCR_IR_UNITS_M,
+ false, 1, 4);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
}
@@ -342,82 +1049,214 @@ static int mlxsw_sp_trap_dummy_group_init(struct mlxsw_sp *mlxsw_sp)
char htgt_pl[MLXSW_REG_HTGT_LEN];
mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
- MLXSW_SP_THIN_POLICER_ID, 0, 1);
+ mlxsw_sp->trap->thin_policer_hw_id, 0, 1);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
}
-static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_trap_policer_items_arr_init(struct mlxsw_sp *mlxsw_sp)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ size_t elem_size = sizeof(struct mlxsw_sp_trap_policer_item);
+ u64 arr_size = ARRAY_SIZE(mlxsw_sp_trap_policer_items_arr);
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
u64 free_policers = 0;
- u32 last_id = 0;
- int err, i;
+ u32 last_id;
+ int i;
for_each_clear_bit(i, trap->policers_usage, trap->max_policers)
free_policers++;
- if (ARRAY_SIZE(mlxsw_sp_trap_policers_arr) > free_policers) {
+ if (arr_size > free_policers) {
dev_err(mlxsw_sp->bus_info->dev, "Exceeded number of supported packet trap policers\n");
return -ENOBUFS;
}
- trap->policers_arr = kcalloc(free_policers,
- sizeof(struct devlink_trap_policer),
- GFP_KERNEL);
- if (!trap->policers_arr)
+ trap->policer_items_arr = kcalloc(free_policers, elem_size, GFP_KERNEL);
+ if (!trap->policer_items_arr)
return -ENOMEM;
trap->policers_count = free_policers;
- for (i = 0; i < free_policers; i++) {
- const struct devlink_trap_policer *policer;
-
- if (i < ARRAY_SIZE(mlxsw_sp_trap_policers_arr)) {
- policer = &mlxsw_sp_trap_policers_arr[i];
- trap->policers_arr[i] = *policer;
- last_id = policer->id;
- } else {
- /* Use parameters set for first policer and override
- * relevant ones.
- */
- policer = &mlxsw_sp_trap_policers_arr[0];
- trap->policers_arr[i] = *policer;
- trap->policers_arr[i].id = ++last_id;
- trap->policers_arr[i].init_rate = 1;
- trap->policers_arr[i].init_burst = 16;
- }
+ /* Initialize policer items array with pre-defined policers. */
+ memcpy(trap->policer_items_arr, mlxsw_sp_trap_policer_items_arr,
+ elem_size * arr_size);
+
+ /* Initialize policer items array with the rest of the available
+ * policers.
+ */
+ last_id = mlxsw_sp_trap_policer_items_arr[arr_size - 1].policer.id;
+ for (i = arr_size; i < trap->policers_count; i++) {
+ const struct mlxsw_sp_trap_policer_item *policer_item;
+
+ /* Use parameters set for first policer and override
+ * relevant ones.
+ */
+ policer_item = &mlxsw_sp_trap_policer_items_arr[0];
+ trap->policer_items_arr[i] = *policer_item;
+ trap->policer_items_arr[i].policer.id = ++last_id;
+ trap->policer_items_arr[i].policer.init_rate = 1;
+ trap->policer_items_arr[i].policer.init_burst = 16;
}
- INIT_LIST_HEAD(&trap->policer_item_list);
+ return 0;
+}
+
+static void mlxsw_sp_trap_policer_items_arr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->trap->policer_items_arr);
+}
+
+static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ const struct mlxsw_sp_trap_policer_item *policer_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int err, i;
- err = devlink_trap_policers_register(devlink, trap->policers_arr,
- trap->policers_count);
+ err = mlxsw_sp_trap_policer_items_arr_init(mlxsw_sp);
if (err)
- goto err_trap_policers_register;
+ return err;
+
+ for (i = 0; i < trap->policers_count; i++) {
+ policer_item = &trap->policer_items_arr[i];
+ err = devlink_trap_policers_register(devlink,
+ &policer_item->policer, 1);
+ if (err)
+ goto err_trap_policer_register;
+ }
return 0;
-err_trap_policers_register:
- kfree(trap->policers_arr);
+err_trap_policer_register:
+ for (i--; i >= 0; i--) {
+ policer_item = &trap->policer_items_arr[i];
+ devlink_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
+ }
+ mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
return err;
}
static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ const struct mlxsw_sp_trap_policer_item *policer_item;
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
- devlink_trap_policers_unregister(devlink, trap->policers_arr,
- trap->policers_count);
- WARN_ON(!list_empty(&trap->policer_item_list));
- kfree(trap->policers_arr);
+ for (i = trap->policers_count - 1; i >= 0; i--) {
+ policer_item = &trap->policer_items_arr[i];
+ devlink_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
+ }
+ mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
}
-int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
{
- size_t groups_count = ARRAY_SIZE(mlxsw_sp_trap_groups_arr);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ const struct mlxsw_sp_trap_group_item *group_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int err, i;
+
+ trap->group_items_arr = kmemdup(mlxsw_sp_trap_group_items_arr,
+ sizeof(mlxsw_sp_trap_group_items_arr),
+ GFP_KERNEL);
+ if (!trap->group_items_arr)
+ return -ENOMEM;
+
+ trap->groups_count = ARRAY_SIZE(mlxsw_sp_trap_group_items_arr);
+
+ for (i = 0; i < trap->groups_count; i++) {
+ group_item = &trap->group_items_arr[i];
+ err = devlink_trap_groups_register(devlink, &group_item->group,
+ 1);
+ if (err)
+ goto err_trap_group_register;
+ }
+
+ return 0;
+
+err_trap_group_register:
+ for (i--; i >= 0; i--) {
+ group_item = &trap->group_items_arr[i];
+ devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ }
+ kfree(trap->group_items_arr);
+ return err;
+}
+
+static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = trap->groups_count - 1; i >= 0; i--) {
+ const struct mlxsw_sp_trap_group_item *group_item;
+
+ group_item = &trap->group_items_arr[i];
+ devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ }
+ kfree(trap->group_items_arr);
+}
+
+static bool
+mlxsw_sp_trap_listener_is_valid(const struct mlxsw_listener *listener)
+{
+ return listener->trap_id != 0;
+}
+
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ const struct mlxsw_sp_trap_item *trap_item;
+ int err, i;
+
+ trap->trap_items_arr = kmemdup(mlxsw_sp_trap_items_arr,
+ sizeof(mlxsw_sp_trap_items_arr),
+ GFP_KERNEL);
+ if (!trap->trap_items_arr)
+ return -ENOMEM;
+
+ trap->traps_count = ARRAY_SIZE(mlxsw_sp_trap_items_arr);
+
+ for (i = 0; i < trap->traps_count; i++) {
+ trap_item = &trap->trap_items_arr[i];
+ err = devlink_traps_register(devlink, &trap_item->trap, 1,
+ mlxsw_sp);
+ if (err)
+ goto err_trap_register;
+ }
+
+ return 0;
+
+err_trap_register:
+ for (i--; i >= 0; i--) {
+ trap_item = &trap->trap_items_arr[i];
+ devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ }
+ kfree(trap->trap_items_arr);
+ return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = trap->traps_count - 1; i >= 0; i--) {
+ const struct mlxsw_sp_trap_item *trap_item;
+
+ trap_item = &trap->trap_items_arr[i];
+ devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ }
+ kfree(trap->trap_items_arr);
+}
+
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
int err;
err = mlxsw_sp_trap_cpu_policers_set(mlxsw_sp);
@@ -428,59 +1267,52 @@ int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- if (WARN_ON(ARRAY_SIZE(mlxsw_sp_listener_devlink_map) !=
- ARRAY_SIZE(mlxsw_sp_listeners_arr)))
- return -EINVAL;
-
err = mlxsw_sp_trap_policers_init(mlxsw_sp);
if (err)
return err;
- err = devlink_trap_groups_register(devlink, mlxsw_sp_trap_groups_arr,
- groups_count);
+ err = mlxsw_sp_trap_groups_init(mlxsw_sp);
if (err)
- goto err_trap_groups_register;
+ goto err_trap_groups_init;
- err = devlink_traps_register(devlink, mlxsw_sp_traps_arr,
- ARRAY_SIZE(mlxsw_sp_traps_arr), mlxsw_sp);
+ err = mlxsw_sp_traps_init(mlxsw_sp);
if (err)
- goto err_traps_register;
+ goto err_traps_init;
return 0;
-err_traps_register:
- devlink_trap_groups_unregister(devlink, mlxsw_sp_trap_groups_arr,
- groups_count);
-err_trap_groups_register:
+err_traps_init:
+ mlxsw_sp_trap_groups_fini(mlxsw_sp);
+err_trap_groups_init:
mlxsw_sp_trap_policers_fini(mlxsw_sp);
return err;
}
void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp)
{
- size_t groups_count = ARRAY_SIZE(mlxsw_sp_trap_groups_arr);
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
-
- devlink_traps_unregister(devlink, mlxsw_sp_traps_arr,
- ARRAY_SIZE(mlxsw_sp_traps_arr));
- devlink_trap_groups_unregister(devlink, mlxsw_sp_trap_groups_arr,
- groups_count);
+ mlxsw_sp_traps_fini(mlxsw_sp);
+ mlxsw_sp_trap_groups_fini(mlxsw_sp);
mlxsw_sp_trap_policers_fini(mlxsw_sp);
}
int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap, void *trap_ctx)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp_trap_item *trap_item;
int i;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ trap_item = mlxsw_sp_trap_item_lookup(mlxsw_sp, trap->id);
+ if (WARN_ON(!trap_item))
+ return -EINVAL;
+
+ for (i = 0; i < MLXSW_SP_TRAP_LISTENERS_MAX; i++) {
const struct mlxsw_listener *listener;
int err;
- if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ listener = &trap_item->listeners_arr[i];
+ if (!mlxsw_sp_trap_listener_is_valid(listener))
continue;
- listener = &mlxsw_sp_listeners_arr[i];
-
err = mlxsw_core_trap_register(mlxsw_core, listener, trap_ctx);
if (err)
return err;
@@ -492,15 +1324,20 @@ int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap, void *trap_ctx)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp_trap_item *trap_item;
int i;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ trap_item = mlxsw_sp_trap_item_lookup(mlxsw_sp, trap->id);
+ if (WARN_ON(!trap_item))
+ return;
+
+ for (i = MLXSW_SP_TRAP_LISTENERS_MAX - 1; i >= 0; i--) {
const struct mlxsw_listener *listener;
- if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ listener = &trap_item->listeners_arr[i];
+ if (!mlxsw_sp_trap_listener_is_valid(listener))
continue;
- listener = &mlxsw_sp_listeners_arr[i];
-
mlxsw_core_trap_unregister(mlxsw_core, listener, trap_ctx);
}
}
@@ -509,16 +1346,23 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap,
enum devlink_trap_action action)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp_trap_item *trap_item;
int i;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ trap_item = mlxsw_sp_trap_item_lookup(mlxsw_sp, trap->id);
+ if (WARN_ON(!trap_item))
+ return -EINVAL;
+
+ for (i = 0; i < MLXSW_SP_TRAP_LISTENERS_MAX; i++) {
const struct mlxsw_listener *listener;
bool enabled;
int err;
- if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ listener = &trap_item->listeners_arr[i];
+ if (!mlxsw_sp_trap_listener_is_valid(listener))
continue;
- listener = &mlxsw_sp_listeners_arr[i];
+
switch (action) {
case DEVLINK_TRAP_ACTION_DROP:
enabled = false;
@@ -544,33 +1388,12 @@ __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u16 hw_policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
+ const struct mlxsw_sp_trap_group_item *group_item;
char htgt_pl[MLXSW_REG_HTGT_LEN];
- u8 priority, tc, group_id;
-
- switch (group->id) {
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
- group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS;
- priority = 0;
- tc = 1;
- break;
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:
- group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS;
- priority = 0;
- tc = 1;
- break;
- case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
- group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS;
- priority = 0;
- tc = 1;
- break;
- case DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS:
- group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS;
- priority = 0;
- tc = 1;
- break;
- default:
+
+ group_item = mlxsw_sp_trap_group_item_lookup(mlxsw_sp, group->id);
+ if (WARN_ON(!group_item))
return -EINVAL;
- }
if (policer_id) {
struct mlxsw_sp_trap_policer_item *policer_item;
@@ -582,7 +1405,8 @@ __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
hw_policer_id = policer_item->hw_id;
}
- mlxsw_reg_htgt_pack(htgt_pl, group_id, hw_policer_id, priority, tc);
+ mlxsw_reg_htgt_pack(htgt_pl, group_item->hw_group_id, hw_policer_id,
+ group_item->priority, group_item->priority);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
@@ -602,10 +1426,10 @@ int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id);
}
-static struct mlxsw_sp_trap_policer_item *
-mlxsw_sp_trap_policer_item_init(struct mlxsw_sp *mlxsw_sp, u32 id)
+static int
+mlxsw_sp_trap_policer_item_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_trap_policer_item *policer_item)
{
- struct mlxsw_sp_trap_policer_item *policer_item;
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
u16 hw_id;
@@ -615,27 +1439,19 @@ mlxsw_sp_trap_policer_item_init(struct mlxsw_sp *mlxsw_sp, u32 id)
*/
hw_id = find_first_zero_bit(trap->policers_usage, trap->max_policers);
if (WARN_ON(hw_id == trap->max_policers))
- return ERR_PTR(-ENOBUFS);
-
- policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
- if (!policer_item)
- return ERR_PTR(-ENOMEM);
+ return -ENOBUFS;
__set_bit(hw_id, trap->policers_usage);
policer_item->hw_id = hw_id;
- policer_item->id = id;
- list_add_tail(&policer_item->list, &trap->policer_item_list);
- return policer_item;
+ return 0;
}
static void
mlxsw_sp_trap_policer_item_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_trap_policer_item *policer_item)
{
- list_del(&policer_item->list);
__clear_bit(policer_item->hw_id, mlxsw_sp->trap->policers_usage);
- kfree(policer_item);
}
static int mlxsw_sp_trap_policer_bs(u64 burst, u8 *p_burst_size,
@@ -678,9 +1494,13 @@ int mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp_trap_policer_item *policer_item;
int err;
- policer_item = mlxsw_sp_trap_policer_item_init(mlxsw_sp, policer->id);
- if (IS_ERR(policer_item))
- return PTR_ERR(policer_item);
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+
+ err = mlxsw_sp_trap_policer_item_init(mlxsw_sp, policer_item);
+ if (err)
+ return err;
err = __mlxsw_sp_trap_policer_set(mlxsw_sp, policer_item->hw_id,
policer->init_rate,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
index 8c54897ba173..13ac412f4d53 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
@@ -8,17 +8,19 @@
#include <net/devlink.h>
struct mlxsw_sp_trap {
- struct devlink_trap_policer *policers_arr; /* Registered policers */
+ struct mlxsw_sp_trap_policer_item *policer_items_arr;
u64 policers_count; /* Number of registered policers */
- struct list_head policer_item_list;
+
+ struct mlxsw_sp_trap_group_item *group_items_arr;
+ u64 groups_count; /* Number of registered groups */
+
+ struct mlxsw_sp_trap_item *trap_items_arr;
+ u64 traps_count; /* Number of registered traps */
+
+ u16 thin_policer_hw_id;
+
u64 max_policers;
unsigned long policers_usage[]; /* Usage bitmap */
};
-struct mlxsw_sp_trap_policer_item {
- u16 hw_id;
- u32 id;
- struct list_head list; /* Member of policer_item_list */
-};
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 90535820b559..b438f5576e18 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1259,6 +1259,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
if (mlxsw_sx_port_created(mlxsw_sx, i))
mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
+ mlxsw_sx->ports = NULL;
}
static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
@@ -1293,6 +1294,7 @@ err_port_module_info_get:
if (mlxsw_sx_port_created(mlxsw_sx, i))
mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
+ mlxsw_sx->ports = NULL;
return err;
}
@@ -1376,6 +1378,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
u8 module, width;
int err;
+ if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
+ local_port);
+ return -EINVAL;
+ }
+
if (new_type == DEVLINK_PORT_TYPE_AUTO)
return -EOPNOTSUPP;
@@ -1396,6 +1404,11 @@ err_port_module_info_get:
return err;
}
+enum {
+ MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX = 1,
+ MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL = 2,
+};
+
#define MLXSW_SX_RXL(_trap_id) \
MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
false, SX2_RX, FORWARD)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index eaa521b7561b..28e60697d14e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -55,16 +55,19 @@ enum {
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
- MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISMENT = 0x8B,
+ MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISEMENT = 0x8B,
MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_SOLICITATION = 0x8C,
- MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISMENT = 0x8D,
+ MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISEMENT = 0x8D,
MLXSW_TRAP_ID_L3_IPV6_REDIRECTION = 0x8E,
+ MLXSW_TRAP_ID_IPV4_DHCP = 0x8F,
MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
+ MLXSW_TRAP_ID_IPV4_BFD = 0xD0,
+ MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index b9c4d48e28e4..09f35209d43d 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -38,6 +38,8 @@ config KS8851_MLL
tristate "Micrel KS8851 MLL"
depends on HAS_IOMEM
select MII
+ select CRC32
+ select EEPROM_93CX6
---help---
This platform driver is for Micrel KS8851 Address/data bus
multiplexed network chip.
diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
index 6d8ac5527aef..5cc00d22c708 100644
--- a/drivers/net/ethernet/micrel/Makefile
+++ b/drivers/net/ethernet/micrel/Makefile
@@ -5,5 +5,7 @@
obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_KS8851) += ks8851.o
+ks8851-objs = ks8851_common.o ks8851_spi.o
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
+ks8851_mll-objs = ks8851_common.o ks8851_par.o
obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 8f834aef8e32..2b319e451121 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -7,6 +7,11 @@
* KS8851 register definitions
*/
+#ifndef __KS8851_H__
+#define __KS8851_H__
+
+#include <linux/eeprom_93cx6.h>
+
#define KS_CCR 0x08
#define CCR_LE (1 << 10) /* KSZ8851-16MLL */
#define CCR_EEPROM (1 << 9)
@@ -19,7 +24,7 @@
#define CCR_32PIN (1 << 0) /* KSZ8851SNL */
/* MAC address registers */
-#define KS_MAR(_m) (0x15 - (_m))
+#define KS_MAR(_m) (0x14 - (_m))
#define KS_MARL 0x10
#define KS_MARM 0x12
#define KS_MARH 0x14
@@ -300,3 +305,147 @@
#define TXFR_TXIC (1 << 15)
#define TXFR_TXFID_MASK (0x3f << 0)
#define TXFR_TXFID_SHIFT (0)
+
+/**
+ * struct ks8851_rxctrl - KS8851 driver rx control
+ * @mchash: Multicast hash-table data.
+ * @rxcr1: KS_RXCR1 register setting
+ * @rxcr2: KS_RXCR2 register setting
+ *
+ * Representation of the settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings. This
+ * is used to make the job of working out if the receive settings change and
+ * then issuing the new settings to the worker that will send the necessary
+ * commands.
+ */
+struct ks8851_rxctrl {
+ u16 mchash[4];
+ u16 rxcr1;
+ u16 rxcr2;
+};
+
+/**
+ * union ks8851_tx_hdr - tx header data
+ * @txb: The header as bytes
+ * @txw: The header as 16bit, little-endian words
+ *
+ * A dual representation of the tx header data to allow
+ * access to individual bytes, and to allow 16bit accesses
+ * with 16bit alignment.
+ */
+union ks8851_tx_hdr {
+ u8 txb[6];
+ __le16 txw[3];
+};
+
+/**
+ * struct ks8851_net - KS8851 driver private data
+ * @netdev: The network device we're bound to
+ * @statelock: Lock on this structure for tx list.
+ * @mii: The MII state information for the mii calls.
+ * @rxctrl: RX settings for @rxctrl_work.
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @txq: Queue of packets for transmission.
+ * @txh: Space for generating packet TX header in DMA-able data
+ * @rxd: Space for receiving SPI data, in DMA-able space.
+ * @txd: Space for transmitting SPI data, in DMA-able space.
+ * @msg_enable: The message flags controlling driver output (see ethtool).
+ * @fid: Incrementing frame id tag.
+ * @rc_ier: Cached copy of KS_IER.
+ * @rc_ccr: Cached copy of KS_CCR.
+ * @rc_rxqcr: Cached copy of KS_RXQCR.
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
+ * @vdd_reg: Optional regulator supplying the chip
+ * @vdd_io: Optional digital power supply for IO
+ * @gpio: Optional reset_n gpio
+ * @lock: Bus access lock callback
+ * @unlock: Bus access unlock callback
+ * @rdreg16: 16bit register read callback
+ * @wrreg16: 16bit register write callback
+ * @rdfifo: FIFO read callback
+ * @wrfifo: FIFO write callback
+ * @start_xmit: start_xmit() implementation callback
+ * @rx_skb: rx_skb() implementation callback
+ * @flush_tx_work: flush_tx_work() implementation callback
+ *
+ * The @statelock is used to protect information in the structure which may
+ * need to be accessed via several sources, such as the network driver layer
+ * or one of the work queues.
+ *
+ * We align the buffers we may use for rx/tx to ensure that if the SPI driver
+ * wants to DMA map them, it will not have any problems with data the driver
+ * modifies.
+ */
+struct ks8851_net {
+ struct net_device *netdev;
+ spinlock_t statelock;
+
+ union ks8851_tx_hdr txh ____cacheline_aligned;
+ u8 rxd[8];
+ u8 txd[8];
+
+ u32 msg_enable ____cacheline_aligned;
+ u16 tx_space;
+ u8 fid;
+
+ u16 rc_ier;
+ u16 rc_rxqcr;
+ u16 rc_ccr;
+
+ struct mii_if_info mii;
+ struct ks8851_rxctrl rxctrl;
+
+ struct work_struct rxctrl_work;
+
+ struct sk_buff_head txq;
+
+ struct eeprom_93cx6 eeprom;
+ struct regulator *vdd_reg;
+ struct regulator *vdd_io;
+ int gpio;
+
+ void (*lock)(struct ks8851_net *ks,
+ unsigned long *flags);
+ void (*unlock)(struct ks8851_net *ks,
+ unsigned long *flags);
+ unsigned int (*rdreg16)(struct ks8851_net *ks,
+ unsigned int reg);
+ void (*wrreg16)(struct ks8851_net *ks,
+ unsigned int reg, unsigned int val);
+ void (*rdfifo)(struct ks8851_net *ks, u8 *buff,
+ unsigned int len);
+ void (*wrfifo)(struct ks8851_net *ks,
+ struct sk_buff *txp, bool irq);
+ netdev_tx_t (*start_xmit)(struct sk_buff *skb,
+ struct net_device *dev);
+ void (*rx_skb)(struct ks8851_net *ks,
+ struct sk_buff *skb);
+ void (*flush_tx_work)(struct ks8851_net *ks);
+};
+
+int ks8851_probe_common(struct net_device *netdev, struct device *dev,
+ int msg_en);
+int ks8851_remove_common(struct device *dev);
+int ks8851_suspend(struct device *dev);
+int ks8851_resume(struct device *dev);
+
+static __maybe_unused SIMPLE_DEV_PM_OPS(ks8851_pm_ops,
+ ks8851_suspend, ks8851_resume);
+
+/**
+ * ks8851_done_tx - update and then free skbuff after transmitting
+ * @ks: The device state
+ * @txb: The buffer transmitted
+ */
+static void __maybe_unused ks8851_done_tx(struct ks8851_net *ks,
+ struct sk_buff *txb)
+{
+ struct net_device *dev = ks->netdev;
+
+ dev->stats.tx_bytes += txb->len;
+ dev->stats.tx_packets++;
+
+ dev_kfree_skb(txb);
+}
+
+#endif /* __KS8851_H__ */
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 33305c9c5a62..d65872172229 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -19,10 +19,8 @@
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/mii.h>
-#include <linux/eeprom_93cx6.h>
#include <linux/regulator/consumer.h>
-#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
@@ -30,255 +28,41 @@
#include "ks8851.h"
/**
- * struct ks8851_rxctrl - KS8851 driver rx control
- * @mchash: Multicast hash-table data.
- * @rxcr1: KS_RXCR1 register setting
- * @rxcr2: KS_RXCR2 register setting
- *
- * Representation of the settings needs to control the receive filtering
- * such as the multicast hash-filter and the receive register settings. This
- * is used to make the job of working out if the receive settings change and
- * then issuing the new settings to the worker that will send the necessary
- * commands.
- */
-struct ks8851_rxctrl {
- u16 mchash[4];
- u16 rxcr1;
- u16 rxcr2;
-};
-
-/**
- * union ks8851_tx_hdr - tx header data
- * @txb: The header as bytes
- * @txw: The header as 16bit, little-endian words
- *
- * A dual representation of the tx header data to allow
- * access to individual bytes, and to allow 16bit accesses
- * with 16bit alignment.
- */
-union ks8851_tx_hdr {
- u8 txb[6];
- __le16 txw[3];
-};
-
-/**
- * struct ks8851_net - KS8851 driver private data
- * @netdev: The network device we're bound to
- * @spidev: The spi device we're bound to.
- * @lock: Lock to ensure that the device is not accessed when busy.
- * @statelock: Lock on this structure for tx list.
- * @mii: The MII state information for the mii calls.
- * @rxctrl: RX settings for @rxctrl_work.
- * @tx_work: Work queue for tx packets
- * @rxctrl_work: Work queue for updating RX mode and multicast lists
- * @txq: Queue of packets for transmission.
- * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
- * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2.
- * @txh: Space for generating packet TX header in DMA-able data
- * @rxd: Space for receiving SPI data, in DMA-able space.
- * @txd: Space for transmitting SPI data, in DMA-able space.
- * @msg_enable: The message flags controlling driver output (see ethtool).
- * @fid: Incrementing frame id tag.
- * @rc_ier: Cached copy of KS_IER.
- * @rc_ccr: Cached copy of KS_CCR.
- * @rc_rxqcr: Cached copy of KS_RXQCR.
- * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
- * @vdd_reg: Optional regulator supplying the chip
- * @vdd_io: Optional digital power supply for IO
- * @gpio: Optional reset_n gpio
- *
- * The @lock ensures that the chip is protected when certain operations are
- * in progress. When the read or write packet transfer is in progress, most
- * of the chip registers are not ccessible until the transfer is finished and
- * the DMA has been de-asserted.
- *
- * The @statelock is used to protect information in the structure which may
- * need to be accessed via several sources, such as the network driver layer
- * or one of the work queues.
- *
- * We align the buffers we may use for rx/tx to ensure that if the SPI driver
- * wants to DMA map them, it will not have any problems with data the driver
- * modifies.
- */
-struct ks8851_net {
- struct net_device *netdev;
- struct spi_device *spidev;
- struct mutex lock;
- spinlock_t statelock;
-
- union ks8851_tx_hdr txh ____cacheline_aligned;
- u8 rxd[8];
- u8 txd[8];
-
- u32 msg_enable ____cacheline_aligned;
- u16 tx_space;
- u8 fid;
-
- u16 rc_ier;
- u16 rc_rxqcr;
- u16 rc_ccr;
-
- struct mii_if_info mii;
- struct ks8851_rxctrl rxctrl;
-
- struct work_struct tx_work;
- struct work_struct rxctrl_work;
-
- struct sk_buff_head txq;
-
- struct spi_message spi_msg1;
- struct spi_message spi_msg2;
- struct spi_transfer spi_xfer1;
- struct spi_transfer spi_xfer2[2];
-
- struct eeprom_93cx6 eeprom;
- struct regulator *vdd_reg;
- struct regulator *vdd_io;
- int gpio;
-};
-
-static int msg_enable;
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD (0x00)
-#define KS_SPIOP_WR (0x40)
-#define KS_SPIOP_RXFIFO (0x80)
-#define KS_SPIOP_TXFIFO (0xC0)
-
-/* shift for byte-enable data */
-#define BYTE_EN(_x) ((_x) << 2)
-
-/* turn register number and byte-enable mask into data for start of packet */
-#define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg) << (8+2) | (_reg) >> 6)
-
-/* SPI register read/write calls.
+ * ks8851_lock - register access lock
+ * @ks: The chip state
+ * @flags: Spinlock flags
*
- * All these calls issue SPI transactions to access the chip's registers. They
- * all require that the necessary lock is held to prevent accesses when the
- * chip is busy transferring packet data (RX/TX FIFO accesses).
+ * Claim chip register access lock
*/
+static void ks8851_lock(struct ks8851_net *ks, unsigned long *flags)
+{
+ ks->lock(ks, flags);
+}
/**
- * ks8851_wrreg16 - write 16bit register value to chip
+ * ks8851_unlock - register access unlock
* @ks: The chip state
- * @reg: The register address
- * @val: The value to write
+ * @flags: Spinlock flags
*
- * Issue a write to put the value @val into the register specified in @reg.
+ * Release chip register access lock
*/
-static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
+static void ks8851_unlock(struct ks8851_net *ks, unsigned long *flags)
{
- struct spi_transfer *xfer = &ks->spi_xfer1;
- struct spi_message *msg = &ks->spi_msg1;
- __le16 txb[2];
- int ret;
-
- txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR);
- txb[1] = cpu_to_le16(val);
-
- xfer->tx_buf = txb;
- xfer->rx_buf = NULL;
- xfer->len = 4;
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "spi_sync() failed\n");
+ ks->unlock(ks, flags);
}
/**
- * ks8851_wrreg8 - write 8bit register value to chip
+ * ks8851_wrreg16 - write 16bit register value to chip
* @ks: The chip state
* @reg: The register address
* @val: The value to write
*
* Issue a write to put the value @val into the register specified in @reg.
*/
-static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
-{
- struct spi_transfer *xfer = &ks->spi_xfer1;
- struct spi_message *msg = &ks->spi_msg1;
- __le16 txb[2];
- int ret;
- int bit;
-
- bit = 1 << (reg & 3);
-
- txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
- txb[1] = val;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = NULL;
- xfer->len = 3;
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "spi_sync() failed\n");
-}
-
-/**
- * ks8851_rdreg - issue read register command and return the data
- * @ks: The device state
- * @op: The register address and byte enables in message format.
- * @rxb: The RX buffer to return the result into
- * @rxl: The length of data expected.
- *
- * This is the low level read call that issues the necessary spi message(s)
- * to read data from the register specified in @op.
- */
-static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
- u8 *rxb, unsigned rxl)
-{
- struct spi_transfer *xfer;
- struct spi_message *msg;
- __le16 *txb = (__le16 *)ks->txd;
- u8 *trx = ks->rxd;
- int ret;
-
- txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
-
- if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
- msg = &ks->spi_msg2;
- xfer = ks->spi_xfer2;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = NULL;
- xfer->len = 2;
-
- xfer++;
- xfer->tx_buf = NULL;
- xfer->rx_buf = trx;
- xfer->len = rxl;
- } else {
- msg = &ks->spi_msg1;
- xfer = &ks->spi_xfer1;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = trx;
- xfer->len = rxl + 2;
- }
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "read: spi_sync() failed\n");
- else if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
- memcpy(rxb, trx, rxl);
- else
- memcpy(rxb, trx + 2, rxl);
-}
-
-/**
- * ks8851_rdreg8 - read 8 bit register from device
- * @ks: The chip information
- * @reg: The register address
- *
- * Read a 8bit register from the chip, returning the result
-*/
-static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg)
+static void ks8851_wrreg16(struct ks8851_net *ks, unsigned int reg,
+ unsigned int val)
{
- u8 rxb[1];
-
- ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1);
- return rxb[0];
+ ks->wrreg16(ks, reg, val);
}
/**
@@ -287,32 +71,11 @@ static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg)
* @reg: The register address
*
* Read a 16bit register from the chip, returning the result
-*/
-static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg)
-{
- __le16 rx = 0;
-
- ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2);
- return le16_to_cpu(rx);
-}
-
-/**
- * ks8851_rdreg32 - read 32 bit register from device
- * @ks: The chip information
- * @reg: The register address
- *
- * Read a 32bit register from the chip.
- *
- * Note, this read requires the address be aligned to 4 bytes.
-*/
-static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg)
+ */
+static unsigned int ks8851_rdreg16(struct ks8851_net *ks,
+ unsigned int reg)
{
- __le32 rx = 0;
-
- WARN_ON(reg & 3);
-
- ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4);
- return le32_to_cpu(rx);
+ return ks->rdreg16(ks, reg);
}
/**
@@ -368,21 +131,27 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
static int ks8851_write_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
+ u16 val;
int i;
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
/*
* Wake up chip in case it was powered off when stopped; otherwise,
* the first write to the MAC address does not take effect.
*/
ks8851_set_powermode(ks, PMECR_PM_NORMAL);
- for (i = 0; i < ETH_ALEN; i++)
- ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ val = (dev->dev_addr[i] << 8) | dev->dev_addr[i + 1];
+ ks8851_wrreg16(ks, KS_MAR(i), val);
+ }
+
if (!netif_running(dev))
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
return 0;
}
@@ -396,19 +165,25 @@ static int ks8851_write_mac_addr(struct net_device *dev)
static void ks8851_read_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
+ u16 reg;
int i;
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i));
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ reg = ks8851_rdreg16(ks, KS_MAR(i));
+ dev->dev_addr[i] = reg >> 8;
+ dev->dev_addr[i + 1] = reg & 0xff;
+ }
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
}
/**
* ks8851_init_mac - initialise the mac address
* @ks: The device structure
+ * @np: The device node pointer
*
* Get or create the initial mac address for the device and then set that
* into the station address register. A mac address supplied in the device
@@ -416,12 +191,12 @@ static void ks8851_read_mac_addr(struct net_device *dev)
* we try that. If no valid mac address is found we use eth_random_addr()
* to create a new one.
*/
-static void ks8851_init_mac(struct ks8851_net *ks)
+static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
{
struct net_device *dev = ks->netdev;
const u8 *mac_addr;
- mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
+ mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr)) {
ether_addr_copy(dev->dev_addr, mac_addr);
ks8851_write_mac_addr(dev);
@@ -442,48 +217,12 @@ static void ks8851_init_mac(struct ks8851_net *ks)
}
/**
- * ks8851_rdfifo - read data from the receive fifo
- * @ks: The device state.
- * @buff: The buffer address
- * @len: The length of the data to read
- *
- * Issue an RXQ FIFO read command and read the @len amount of data from
- * the FIFO into the buffer specified by @buff.
- */
-static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
-{
- struct spi_transfer *xfer = ks->spi_xfer2;
- struct spi_message *msg = &ks->spi_msg2;
- u8 txb[1];
- int ret;
-
- netif_dbg(ks, rx_status, ks->netdev,
- "%s: %d@%p\n", __func__, len, buff);
-
- /* set the operation we're issuing */
- txb[0] = KS_SPIOP_RXFIFO;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = NULL;
- xfer->len = 1;
-
- xfer++;
- xfer->rx_buf = buff;
- xfer->tx_buf = NULL;
- xfer->len = len;
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
-}
-
-/**
* ks8851_dbg_dumpkkt - dump initial packet contents to debug
* @ks: The device state
* @rxpkt: The data for the received packet
*
* Dump the initial data from the packet to dev_dbg().
-*/
+ */
static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
{
netdev_dbg(ks->netdev,
@@ -494,6 +233,16 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
}
/**
+ * ks8851_rx_skb - receive skbuff
+ * @ks: The device state.
+ * @skb: The skbuff
+ */
+static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
+{
+ ks->rx_skb(ks, skb);
+}
+
+/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
*
@@ -507,10 +256,9 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
unsigned rxfc;
unsigned rxlen;
unsigned rxstat;
- u32 rxh;
u8 *rxpkt;
- rxfc = ks8851_rdreg8(ks, KS_RXFC);
+ rxfc = (ks8851_rdreg16(ks, KS_RXFCTR) >> 8) & 0xff;
netif_dbg(ks, rx_status, ks->netdev,
"%s: %d packets\n", __func__, rxfc);
@@ -526,9 +274,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
*/
for (; rxfc != 0; rxfc--) {
- rxh = ks8851_rdreg32(ks, KS_RXFHSR);
- rxstat = rxh & 0xffff;
- rxlen = (rxh >> 16) & 0xfff;
+ rxstat = ks8851_rdreg16(ks, KS_RXFHSR);
+ rxlen = ks8851_rdreg16(ks, KS_RXFHBCR) & RXFHBCR_CNT_MASK;
netif_dbg(ks, rx_status, ks->netdev,
"rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
@@ -557,13 +304,13 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxpkt = skb_put(skb, rxlen) - 8;
- ks8851_rdfifo(ks, rxpkt, rxalign + 8);
+ ks->rdfifo(ks, rxpkt, rxalign + 8);
if (netif_msg_pktdata(ks))
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- netif_rx_ni(skb);
+ ks8851_rx_skb(ks, skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -590,10 +337,11 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
static irqreturn_t ks8851_irq(int irq, void *_ks)
{
struct ks8851_net *ks = _ks;
- unsigned status;
unsigned handled = 0;
+ unsigned long flags;
+ unsigned int status;
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
status = ks8851_rdreg16(ks, KS_ISR);
@@ -631,7 +379,7 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
handled |= IRQ_RXI;
if (status & IRQ_SPIBEI) {
- dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__);
+ netdev_err(ks->netdev, "%s: spi bus error\n", __func__);
handled |= IRQ_SPIBEI;
}
@@ -662,7 +410,7 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
}
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
@@ -674,108 +422,13 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
}
/**
- * calc_txlen - calculate size of message to send packet
- * @len: Length of data
- *
- * Returns the size of the TXFIFO message needed to send
- * this packet.
- */
-static inline unsigned calc_txlen(unsigned len)
-{
- return ALIGN(len + 4, 4);
-}
-
-/**
- * ks8851_wrpkt - write packet to TX FIFO
- * @ks: The device state.
- * @txp: The sk_buff to transmit.
- * @irq: IRQ on completion of the packet.
- *
- * Send the @txp to the chip. This means creating the relevant packet header
- * specifying the length of the packet and the other information the chip
- * needs, such as IRQ on completion. Send the header and the packet data to
- * the device.
- */
-static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
-{
- struct spi_transfer *xfer = ks->spi_xfer2;
- struct spi_message *msg = &ks->spi_msg2;
- unsigned fid = 0;
- int ret;
-
- netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
- __func__, txp, txp->len, txp->data, irq);
-
- fid = ks->fid++;
- fid &= TXFR_TXFID_MASK;
-
- if (irq)
- fid |= TXFR_TXIC; /* irq on completion */
-
- /* start header at txb[1] to align txw entries */
- ks->txh.txb[1] = KS_SPIOP_TXFIFO;
- ks->txh.txw[1] = cpu_to_le16(fid);
- ks->txh.txw[2] = cpu_to_le16(txp->len);
-
- xfer->tx_buf = &ks->txh.txb[1];
- xfer->rx_buf = NULL;
- xfer->len = 5;
-
- xfer++;
- xfer->tx_buf = txp->data;
- xfer->rx_buf = NULL;
- xfer->len = ALIGN(txp->len, 4);
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
-}
-
-/**
- * ks8851_done_tx - update and then free skbuff after transmitting
+ * ks8851_flush_tx_work - flush outstanding TX work
* @ks: The device state
- * @txb: The buffer transmitted
- */
-static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb)
-{
- struct net_device *dev = ks->netdev;
-
- dev->stats.tx_bytes += txb->len;
- dev->stats.tx_packets++;
-
- dev_kfree_skb(txb);
-}
-
-/**
- * ks8851_tx_work - process tx packet(s)
- * @work: The work strucutre what was scheduled.
- *
- * This is called when a number of packets have been scheduled for
- * transmission and need to be sent to the device.
*/
-static void ks8851_tx_work(struct work_struct *work)
+static void ks8851_flush_tx_work(struct ks8851_net *ks)
{
- struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work);
- struct sk_buff *txb;
- bool last = skb_queue_empty(&ks->txq);
-
- mutex_lock(&ks->lock);
-
- while (!last) {
- txb = skb_dequeue(&ks->txq);
- last = skb_queue_empty(&ks->txq);
-
- if (txb != NULL) {
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
- ks8851_wrpkt(ks, txb, last);
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
- ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
-
- ks8851_done_tx(ks, txb);
- }
- }
-
- mutex_unlock(&ks->lock);
+ if (ks->flush_tx_work)
+ ks->flush_tx_work(ks);
}
/**
@@ -788,6 +441,7 @@ static void ks8851_tx_work(struct work_struct *work)
static int ks8851_net_open(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
int ret;
ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
@@ -800,7 +454,7 @@ static int ks8851_net_open(struct net_device *dev)
/* lock the card, even if we may not actually be doing anything
* else at the moment */
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
netif_dbg(ks, ifup, ks->netdev, "opening\n");
@@ -844,23 +498,14 @@ static int ks8851_net_open(struct net_device *dev)
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
/* clear then enable interrupts */
-
-#define STD_IRQ (IRQ_LCI | /* Link Change */ \
- IRQ_TXI | /* TX done */ \
- IRQ_RXI | /* RX done */ \
- IRQ_SPIBEI | /* SPI bus error */ \
- IRQ_TXPSI | /* TX process stop */ \
- IRQ_RXPSI) /* RX process stop */
-
- ks->rc_ier = STD_IRQ;
- ks8851_wrreg16(ks, KS_ISR, STD_IRQ);
- ks8851_wrreg16(ks, KS_IER, STD_IRQ);
+ ks8851_wrreg16(ks, KS_ISR, ks->rc_ier);
+ ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
netif_start_queue(ks->netdev);
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
mii_check_link(&ks->mii);
return 0;
}
@@ -876,22 +521,23 @@ static int ks8851_net_open(struct net_device *dev)
static int ks8851_net_stop(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
netif_info(ks, ifdown, dev, "shutting down\n");
netif_stop_queue(dev);
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
/* turn off the IRQs and ack any outstanding */
ks8851_wrreg16(ks, KS_IER, 0x0000);
ks8851_wrreg16(ks, KS_ISR, 0xffff);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
/* stop any outstanding work */
- flush_work(&ks->tx_work);
+ ks8851_flush_tx_work(ks);
flush_work(&ks->rxctrl_work);
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
/* shutdown RX process */
ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
@@ -900,7 +546,7 @@ static int ks8851_net_stop(struct net_device *dev)
/* set powermode to soft power down to save power */
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
/* ensure any queued tx buffers are dumped */
while (!skb_queue_empty(&ks->txq)) {
@@ -934,26 +580,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned needed = calc_txlen(skb->len);
- netdev_tx_t ret = NETDEV_TX_OK;
-
- netif_dbg(ks, tx_queued, ks->netdev,
- "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
- spin_lock(&ks->statelock);
-
- if (needed > ks->tx_space) {
- netif_stop_queue(dev);
- ret = NETDEV_TX_BUSY;
- } else {
- ks->tx_space -= needed;
- skb_queue_tail(&ks->txq, skb);
- }
-
- spin_unlock(&ks->statelock);
- schedule_work(&ks->tx_work);
-
- return ret;
+ return ks->start_xmit(skb, dev);
}
/**
@@ -972,13 +600,14 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
static void ks8851_rxctrl_work(struct work_struct *work)
{
struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
+ unsigned long flags;
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
/* need to shutdown RXQ before modifying filter parameters */
ks8851_wrreg16(ks, KS_RXCR1, 0x00);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
}
static void ks8851_set_rx_mode(struct net_device *dev)
@@ -1160,11 +789,6 @@ static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
*/
static int ks8851_eeprom_claim(struct ks8851_net *ks)
{
- if (!(ks->rc_ccr & CCR_EEPROM))
- return -ENOENT;
-
- mutex_lock(&ks->lock);
-
/* start with clock low, cs high */
ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
return 0;
@@ -1181,7 +805,6 @@ static void ks8851_eeprom_release(struct ks8851_net *ks)
unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
- mutex_unlock(&ks->lock);
}
#define KS_EEPROM_MAGIC (0x00008851)
@@ -1191,6 +814,7 @@ static int ks8851_set_eeprom(struct net_device *dev,
{
struct ks8851_net *ks = netdev_priv(dev);
int offset = ee->offset;
+ unsigned long flags;
int len = ee->len;
u16 tmp;
@@ -1201,9 +825,13 @@ static int ks8851_set_eeprom(struct net_device *dev,
if (ee->magic != KS_EEPROM_MAGIC)
return -EINVAL;
- if (ks8851_eeprom_claim(ks))
+ if (!(ks->rc_ccr & CCR_EEPROM))
return -ENOENT;
+ ks8851_lock(ks, &flags);
+
+ ks8851_eeprom_claim(ks);
+
eeprom_93cx6_wren(&ks->eeprom, true);
/* ethtool currently only supports writing bytes, which means
@@ -1223,6 +851,7 @@ static int ks8851_set_eeprom(struct net_device *dev,
eeprom_93cx6_wren(&ks->eeprom, false);
ks8851_eeprom_release(ks);
+ ks8851_unlock(ks, &flags);
return 0;
}
@@ -1232,19 +861,25 @@ static int ks8851_get_eeprom(struct net_device *dev,
{
struct ks8851_net *ks = netdev_priv(dev);
int offset = ee->offset;
+ unsigned long flags;
int len = ee->len;
/* must be 2 byte aligned */
if (len & 1 || offset & 1)
return -EINVAL;
- if (ks8851_eeprom_claim(ks))
+ if (!(ks->rc_ccr & CCR_EEPROM))
return -ENOENT;
+ ks8851_lock(ks, &flags);
+
+ ks8851_eeprom_claim(ks);
+
ee->magic = KS_EEPROM_MAGIC;
eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
ks8851_eeprom_release(ks);
+ ks8851_unlock(ks, &flags);
return 0;
}
@@ -1318,6 +953,7 @@ static int ks8851_phy_reg(int reg)
static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
int ksreg;
int result;
@@ -1325,9 +961,9 @@ static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
if (!ksreg)
return 0x0; /* no error return allowed, so use zero */
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
result = ks8851_rdreg16(ks, ksreg);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
return result;
}
@@ -1336,13 +972,14 @@ static void ks8851_phy_write(struct net_device *dev,
int phy, int reg, int value)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
int ksreg;
ksreg = ks8851_phy_reg(reg);
if (ksreg) {
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
ks8851_wrreg16(ks, ksreg, value);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
}
}
@@ -1382,7 +1019,7 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
#ifdef CONFIG_PM_SLEEP
-static int ks8851_suspend(struct device *dev)
+int ks8851_suspend(struct device *dev)
{
struct ks8851_net *ks = dev_get_drvdata(dev);
struct net_device *netdev = ks->netdev;
@@ -1395,7 +1032,7 @@ static int ks8851_suspend(struct device *dev)
return 0;
}
-static int ks8851_resume(struct device *dev)
+int ks8851_resume(struct device *dev)
{
struct ks8851_net *ks = dev_get_drvdata(dev);
struct net_device *netdev = ks->netdev;
@@ -1409,46 +1046,32 @@ static int ks8851_resume(struct device *dev)
}
#endif
-static SIMPLE_DEV_PM_OPS(ks8851_pm_ops, ks8851_suspend, ks8851_resume);
-
-static int ks8851_probe(struct spi_device *spi)
+int ks8851_probe_common(struct net_device *netdev, struct device *dev,
+ int msg_en)
{
- struct net_device *ndev;
- struct ks8851_net *ks;
- int ret;
+ struct ks8851_net *ks = netdev_priv(netdev);
unsigned cider;
int gpio;
+ int ret;
- ndev = alloc_etherdev(sizeof(struct ks8851_net));
- if (!ndev)
- return -ENOMEM;
-
- spi->bits_per_word = 8;
-
- ks = netdev_priv(ndev);
-
- ks->netdev = ndev;
- ks->spidev = spi;
+ ks->netdev = netdev;
ks->tx_space = 6144;
- gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios",
- 0, NULL);
- if (gpio == -EPROBE_DEFER) {
- ret = gpio;
- goto err_gpio;
- }
+ gpio = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, NULL);
+ if (gpio == -EPROBE_DEFER)
+ return gpio;
ks->gpio = gpio;
if (gpio_is_valid(gpio)) {
- ret = devm_gpio_request_one(&spi->dev, gpio,
+ ret = devm_gpio_request_one(dev, gpio,
GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
if (ret) {
- dev_err(&spi->dev, "reset gpio request failed\n");
- goto err_gpio;
+ dev_err(dev, "reset gpio request failed\n");
+ return ret;
}
}
- ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io");
+ ks->vdd_io = devm_regulator_get(dev, "vdd-io");
if (IS_ERR(ks->vdd_io)) {
ret = PTR_ERR(ks->vdd_io);
goto err_reg_io;
@@ -1456,12 +1079,11 @@ static int ks8851_probe(struct spi_device *spi)
ret = regulator_enable(ks->vdd_io);
if (ret) {
- dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n",
- ret);
+ dev_err(dev, "regulator vdd_io enable fail: %d\n", ret);
goto err_reg_io;
}
- ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd");
+ ks->vdd_reg = devm_regulator_get(dev, "vdd");
if (IS_ERR(ks->vdd_reg)) {
ret = PTR_ERR(ks->vdd_reg);
goto err_reg;
@@ -1469,8 +1091,7 @@ static int ks8851_probe(struct spi_device *spi)
ret = regulator_enable(ks->vdd_reg);
if (ret) {
- dev_err(&spi->dev, "regulator vdd enable fail: %d\n",
- ret);
+ dev_err(dev, "regulator vdd enable fail: %d\n", ret);
goto err_reg;
}
@@ -1479,54 +1100,41 @@ static int ks8851_probe(struct spi_device *spi)
gpio_set_value(gpio, 1);
}
- mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
- INIT_WORK(&ks->tx_work, ks8851_tx_work);
INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
- /* initialise pre-made spi transfer messages */
-
- spi_message_init(&ks->spi_msg1);
- spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1);
-
- spi_message_init(&ks->spi_msg2);
- spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
- spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
-
/* setup EEPROM state */
-
ks->eeprom.data = ks;
ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
ks->eeprom.register_read = ks8851_eeprom_regread;
ks->eeprom.register_write = ks8851_eeprom_regwrite;
/* setup mii state */
- ks->mii.dev = ndev;
+ ks->mii.dev = netdev;
ks->mii.phy_id = 1,
ks->mii.phy_id_mask = 1;
ks->mii.reg_num_mask = 0xf;
ks->mii.mdio_read = ks8851_phy_read;
ks->mii.mdio_write = ks8851_phy_write;
- dev_info(&spi->dev, "message enable is %d\n", msg_enable);
+ dev_info(dev, "message enable is %d\n", msg_en);
/* set the default message enable */
- ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
- NETIF_MSG_PROBE |
- NETIF_MSG_LINK));
+ ks->msg_enable = netif_msg_init(msg_en, NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK);
skb_queue_head_init(&ks->txq);
- ndev->ethtool_ops = &ks8851_ethtool_ops;
- SET_NETDEV_DEV(ndev, &spi->dev);
+ netdev->ethtool_ops = &ks8851_ethtool_ops;
+ SET_NETDEV_DEV(netdev, dev);
- spi_set_drvdata(spi, ks);
+ dev_set_drvdata(dev, ks);
netif_carrier_off(ks->netdev);
- ndev->if_port = IF_PORT_100BASET;
- ndev->netdev_ops = &ks8851_netdev_ops;
- ndev->irq = spi->irq;
+ netdev->if_port = IF_PORT_100BASET;
+ netdev->netdev_ops = &ks8851_netdev_ops;
/* issue a global soft reset to reset the device. */
ks8851_soft_reset(ks, GRR_GSR);
@@ -1534,7 +1142,7 @@ static int ks8851_probe(struct spi_device *spi)
/* simple check for a valid chip being connected to the bus */
cider = ks8851_rdreg16(ks, KS_CIDER);
if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
- dev_err(&spi->dev, "failed to read device ID\n");
+ dev_err(dev, "failed to read device ID\n");
ret = -ENODEV;
goto err_id;
}
@@ -1543,16 +1151,16 @@ static int ks8851_probe(struct spi_device *spi)
ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
ks8851_read_selftest(ks);
- ks8851_init_mac(ks);
+ ks8851_init_mac(ks, dev->of_node);
- ret = register_netdev(ndev);
+ ret = register_netdev(netdev);
if (ret) {
- dev_err(&spi->dev, "failed to register network device\n");
+ dev_err(dev, "failed to register network device\n");
goto err_netdev;
}
- netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
- CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
+ netdev_info(netdev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
+ CIDER_REV_GET(cider), netdev->dev_addr, netdev->irq,
ks->rc_ccr & CCR_EEPROM ? "has" : "no");
return 0;
@@ -1565,49 +1173,21 @@ err_id:
err_reg:
regulator_disable(ks->vdd_io);
err_reg_io:
-err_gpio:
- free_netdev(ndev);
return ret;
}
-static int ks8851_remove(struct spi_device *spi)
+int ks8851_remove_common(struct device *dev)
{
- struct ks8851_net *priv = spi_get_drvdata(spi);
+ struct ks8851_net *priv = dev_get_drvdata(dev);
if (netif_msg_drv(priv))
- dev_info(&spi->dev, "remove\n");
+ dev_info(dev, "remove\n");
unregister_netdev(priv->netdev);
if (gpio_is_valid(priv->gpio))
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);
regulator_disable(priv->vdd_io);
- free_netdev(priv->netdev);
return 0;
}
-
-static const struct of_device_id ks8851_match_table[] = {
- { .compatible = "micrel,ks8851" },
- { }
-};
-MODULE_DEVICE_TABLE(of, ks8851_match_table);
-
-static struct spi_driver ks8851_driver = {
- .driver = {
- .name = "ks8851",
- .of_match_table = ks8851_match_table,
- .pm = &ks8851_pm_ops,
- },
- .probe = ks8851_probe,
- .remove = ks8851_remove,
-};
-module_spi_driver(ks8851_driver);
-
-MODULE_DESCRIPTION("KS8851 Network driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL");
-
-module_param_named(message, msg_enable, int, 0);
-MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
-MODULE_ALIAS("spi:ks8851");
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
deleted file mode 100644
index 45cc840d8e2e..000000000000
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ /dev/null
@@ -1,1393 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**
- * drivers/net/ethernet/micrel/ks8851_mll.c
- * Copyright (c) 2009 Micrel Inc.
- */
-
-/* Supports:
- * KS8851 16bit MLL chip from Micrel Inc.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/cache.h>
-#include <linux/crc32.h>
-#include <linux/crc32poly.h>
-#include <linux/mii.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/ks8851_mll.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_net.h>
-
-#include "ks8851.h"
-
-#define DRV_NAME "ks8851_mll"
-
-static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
-#define MAX_RECV_FRAMES 255
-#define MAX_BUF_SIZE 2048
-#define TX_BUF_SIZE 2000
-#define RX_BUF_SIZE 2000
-
-#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
- RXCR1_RXMAFMA | RXCR1_RXPAFMA)
-#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
-
-#define ENUM_BUS_NONE 0
-#define ENUM_BUS_8BIT 1
-#define ENUM_BUS_16BIT 2
-#define ENUM_BUS_32BIT 3
-
-#define MAX_MCAST_LST 32
-#define HW_MCAST_SIZE 8
-
-/**
- * union ks_tx_hdr - tx header data
- * @txb: The header as bytes
- * @txw: The header as 16bit, little-endian words
- *
- * A dual representation of the tx header data to allow
- * access to individual bytes, and to allow 16bit accesses
- * with 16bit alignment.
- */
-union ks_tx_hdr {
- u8 txb[4];
- __le16 txw[2];
-};
-
-/**
- * struct ks_net - KS8851 driver private data
- * @net_device : The network device we're bound to
- * @hw_addr : start address of data register.
- * @hw_addr_cmd : start address of command register.
- * @txh : temporaly buffer to save status/length.
- * @lock : Lock to ensure that the device is not accessed when busy.
- * @pdev : Pointer to platform device.
- * @mii : The MII state information for the mii calls.
- * @frame_head_info : frame header information for multi-pkt rx.
- * @statelock : Lock on this structure for tx list.
- * @msg_enable : The message flags controlling driver output (see ethtool).
- * @frame_cnt : number of frames received.
- * @bus_width : i/o bus width.
- * @rc_rxqcr : Cached copy of KS_RXQCR.
- * @rc_txcr : Cached copy of KS_TXCR.
- * @rc_ier : Cached copy of KS_IER.
- * @sharedbus : Multipex(addr and data bus) mode indicator.
- * @cmd_reg_cache : command register cached.
- * @cmd_reg_cache_int : command register cached. Used in the irq handler.
- * @promiscuous : promiscuous mode indicator.
- * @all_mcast : mutlicast indicator.
- * @mcast_lst_size : size of multicast list.
- * @mcast_lst : multicast list.
- * @mcast_bits : multicast enabed.
- * @mac_addr : MAC address assigned to this device.
- * @fid : frame id.
- * @extra_byte : number of extra byte prepended rx pkt.
- * @enabled : indicator this device works.
- *
- * The @lock ensures that the chip is protected when certain operations are
- * in progress. When the read or write packet transfer is in progress, most
- * of the chip registers are not accessible until the transfer is finished and
- * the DMA has been de-asserted.
- *
- * The @statelock is used to protect information in the structure which may
- * need to be accessed via several sources, such as the network driver layer
- * or one of the work queues.
- *
- */
-
-/* Receive multiplex framer header info */
-struct type_frame_head {
- u16 sts; /* Frame status */
- u16 len; /* Byte count */
-};
-
-struct ks_net {
- struct net_device *netdev;
- void __iomem *hw_addr;
- void __iomem *hw_addr_cmd;
- union ks_tx_hdr txh ____cacheline_aligned;
- struct mutex lock; /* spinlock to be interrupt safe */
- struct platform_device *pdev;
- struct mii_if_info mii;
- struct type_frame_head *frame_head_info;
- spinlock_t statelock;
- u32 msg_enable;
- u32 frame_cnt;
- int bus_width;
-
- u16 rc_rxqcr;
- u16 rc_txcr;
- u16 rc_ier;
- u16 sharedbus;
- u16 cmd_reg_cache;
- u16 cmd_reg_cache_int;
- u16 promiscuous;
- u16 all_mcast;
- u16 mcast_lst_size;
- u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
- u8 mcast_bits[HW_MCAST_SIZE];
- u8 mac_addr[6];
- u8 fid;
- u8 extra_byte;
- u8 enabled;
-};
-
-static int msg_enable;
-
-#define BE3 0x8000 /* Byte Enable 3 */
-#define BE2 0x4000 /* Byte Enable 2 */
-#define BE1 0x2000 /* Byte Enable 1 */
-#define BE0 0x1000 /* Byte Enable 0 */
-
-/* register read/write calls.
- *
- * All these calls issue transactions to access the chip's registers. They
- * all require that the necessary lock is held to prevent accesses when the
- * chip is busy transferring packet data (RX/TX FIFO accesses).
- */
-
-/**
- * ks_check_endian - Check whether endianness of the bus is correct
- * @ks : The chip information
- *
- * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit
- * bus. To maintain optimum performance, the bus endianness should be set
- * such that it matches the endianness of the CPU.
- */
-
-static int ks_check_endian(struct ks_net *ks)
-{
- u16 cider;
-
- /*
- * Read CIDER register first, however read it the "wrong" way around.
- * If the endian strap on the KS8851-16MLL in incorrect and the chip
- * is operating in different endianness than the CPU, then the meaning
- * of BE[3:0] byte-enable bits is also swapped such that:
- * BE[3,2,1,0] becomes BE[1,0,3,2]
- *
- * Luckily for us, the byte-enable bits are the top four MSbits of
- * the address register and the CIDER register is at offset 0xc0.
- * Hence, by reading address 0xc0c0, which is not impacted by endian
- * swapping, we assert either BE[3:2] or BE[1:0] while reading the
- * CIDER register.
- *
- * If the bus configuration is correct, reading 0xc0c0 asserts
- * BE[3:2] and this read returns 0x0000, because to read register
- * with bottom two LSbits of address set to 0, BE[1:0] must be
- * asserted.
- *
- * If the bus configuration is NOT correct, reading 0xc0c0 asserts
- * BE[1:0] and this read returns non-zero 0x8872 value.
- */
- iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd);
- cider = ioread16(ks->hw_addr);
- if (!cider)
- return 0;
-
- netdev_err(ks->netdev, "incorrect EESK endian strap setting\n");
-
- return -EINVAL;
-}
-
-/**
- * ks_rdreg16 - read 16 bit register from device
- * @ks : The chip information
- * @offset: The register address
- *
- * Read a 16bit register from the chip, returning the result
- */
-
-static u16 ks_rdreg16(struct ks_net *ks, int offset)
-{
- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
- return ioread16(ks->hw_addr);
-}
-
-/**
- * ks_wrreg16 - write 16bit register value to chip
- * @ks: The chip information
- * @offset: The register address
- * @value: The value to write
- *
- */
-
-static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
-{
- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
- iowrite16(value, ks->hw_addr);
-}
-
-/**
- * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
- * @ks: The chip state
- * @wptr: buffer address to save data
- * @len: length in byte to read
- *
- */
-static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
-{
- len >>= 1;
- while (len--)
- *wptr++ = (u16)ioread16(ks->hw_addr);
-}
-
-/**
- * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
- * @ks: The chip information
- * @wptr: buffer address
- * @len: length in byte to write
- *
- */
-static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
-{
- len >>= 1;
- while (len--)
- iowrite16(*wptr++, ks->hw_addr);
-}
-
-static void ks_disable_int(struct ks_net *ks)
-{
- ks_wrreg16(ks, KS_IER, 0x0000);
-} /* ks_disable_int */
-
-static void ks_enable_int(struct ks_net *ks)
-{
- ks_wrreg16(ks, KS_IER, ks->rc_ier);
-} /* ks_enable_int */
-
-/**
- * ks_tx_fifo_space - return the available hardware buffer size.
- * @ks: The chip information
- *
- */
-static inline u16 ks_tx_fifo_space(struct ks_net *ks)
-{
- return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
-}
-
-/**
- * ks_save_cmd_reg - save the command register from the cache.
- * @ks: The chip information
- *
- */
-static inline void ks_save_cmd_reg(struct ks_net *ks)
-{
- /*ks8851 MLL has a bug to read back the command register.
- * So rely on software to save the content of command register.
- */
- ks->cmd_reg_cache_int = ks->cmd_reg_cache;
-}
-
-/**
- * ks_restore_cmd_reg - restore the command register from the cache and
- * write to hardware register.
- * @ks: The chip information
- *
- */
-static inline void ks_restore_cmd_reg(struct ks_net *ks)
-{
- ks->cmd_reg_cache = ks->cmd_reg_cache_int;
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
-}
-
-/**
- * ks_set_powermode - set power mode of the device
- * @ks: The chip information
- * @pwrmode: The power mode value to write to KS_PMECR.
- *
- * Change the power mode of the chip.
- */
-static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
-{
- unsigned pmecr;
-
- netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
-
- ks_rdreg16(ks, KS_GRR);
- pmecr = ks_rdreg16(ks, KS_PMECR);
- pmecr &= ~PMECR_PM_MASK;
- pmecr |= pwrmode;
-
- ks_wrreg16(ks, KS_PMECR, pmecr);
-}
-
-/**
- * ks_read_config - read chip configuration of bus width.
- * @ks: The chip information
- *
- */
-static void ks_read_config(struct ks_net *ks)
-{
- u16 reg_data = 0;
-
- /* Regardless of bus width, 8 bit read should always work.*/
- reg_data = ks_rdreg16(ks, KS_CCR);
-
- /* addr/data bus are multiplexed */
- ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
-
- /* There are garbage data when reading data from QMU,
- depending on bus-width.
- */
-
- if (reg_data & CCR_8BIT) {
- ks->bus_width = ENUM_BUS_8BIT;
- ks->extra_byte = 1;
- } else if (reg_data & CCR_16BIT) {
- ks->bus_width = ENUM_BUS_16BIT;
- ks->extra_byte = 2;
- } else {
- ks->bus_width = ENUM_BUS_32BIT;
- ks->extra_byte = 4;
- }
-}
-
-/**
- * ks_soft_reset - issue one of the soft reset to the device
- * @ks: The device state.
- * @op: The bit(s) to set in the GRR
- *
- * Issue the relevant soft-reset command to the device's GRR register
- * specified by @op.
- *
- * Note, the delays are in there as a caution to ensure that the reset
- * has time to take effect and then complete. Since the datasheet does
- * not currently specify the exact sequence, we have chosen something
- * that seems to work with our device.
- */
-static void ks_soft_reset(struct ks_net *ks, unsigned op)
-{
- /* Disable interrupt first */
- ks_wrreg16(ks, KS_IER, 0x0000);
- ks_wrreg16(ks, KS_GRR, op);
- mdelay(10); /* wait a short time to effect reset */
- ks_wrreg16(ks, KS_GRR, 0);
- mdelay(1); /* wait for condition to clear */
-}
-
-
-static void ks_enable_qmu(struct ks_net *ks)
-{
- u16 w;
-
- w = ks_rdreg16(ks, KS_TXCR);
- /* Enables QMU Transmit (TXCR). */
- ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
-
- /*
- * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
- * Enable
- */
-
- w = ks_rdreg16(ks, KS_RXQCR);
- ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
-
- /* Enables QMU Receive (RXCR1). */
- w = ks_rdreg16(ks, KS_RXCR1);
- ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
- ks->enabled = true;
-} /* ks_enable_qmu */
-
-static void ks_disable_qmu(struct ks_net *ks)
-{
- u16 w;
-
- w = ks_rdreg16(ks, KS_TXCR);
-
- /* Disables QMU Transmit (TXCR). */
- w &= ~TXCR_TXE;
- ks_wrreg16(ks, KS_TXCR, w);
-
- /* Disables QMU Receive (RXCR1). */
- w = ks_rdreg16(ks, KS_RXCR1);
- w &= ~RXCR1_RXE ;
- ks_wrreg16(ks, KS_RXCR1, w);
-
- ks->enabled = false;
-
-} /* ks_disable_qmu */
-
-/**
- * ks_read_qmu - read 1 pkt data from the QMU.
- * @ks: The chip information
- * @buf: buffer address to save 1 pkt
- * @len: Pkt length
- * Here is the sequence to read 1 pkt:
- * 1. set sudo DMA mode
- * 2. read prepend data
- * 3. read pkt data
- * 4. reset sudo DMA Mode
- */
-static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
-{
- u32 r = ks->extra_byte & 0x1 ;
- u32 w = ks->extra_byte - r;
-
- /* 1. set sudo DMA mode */
- ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
-
- /* 2. read prepend data */
- /**
- * read 4 + extra bytes and discard them.
- * extra bytes for dummy, 2 for status, 2 for len
- */
-
- /* use likely(r) for 8 bit access for performance */
- if (unlikely(r))
- ioread8(ks->hw_addr);
- ks_inblk(ks, buf, w + 2 + 2);
-
- /* 3. read pkt data */
- ks_inblk(ks, buf, ALIGN(len, 4));
-
- /* 4. reset sudo DMA Mode */
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
-}
-
-/**
- * ks_rcv - read multiple pkts data from the QMU.
- * @ks: The chip information
- * @netdev: The network device being opened.
- *
- * Read all of header information before reading pkt content.
- * It is not allowed only port of pkts in QMU after issuing
- * interrupt ack.
- */
-static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
-{
- u32 i;
- struct type_frame_head *frame_hdr = ks->frame_head_info;
- struct sk_buff *skb;
-
- ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
-
- /* read all header information */
- for (i = 0; i < ks->frame_cnt; i++) {
- /* Checking Received packet status */
- frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
- /* Get packet len from hardware */
- frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
- frame_hdr++;
- }
-
- frame_hdr = ks->frame_head_info;
- while (ks->frame_cnt--) {
- if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
- frame_hdr->len >= RX_BUF_SIZE ||
- frame_hdr->len <= 0)) {
-
- /* discard an invalid packet */
- ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
- netdev->stats.rx_dropped++;
- if (!(frame_hdr->sts & RXFSHR_RXFV))
- netdev->stats.rx_frame_errors++;
- else
- netdev->stats.rx_length_errors++;
- frame_hdr++;
- continue;
- }
-
- skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
- if (likely(skb)) {
- skb_reserve(skb, 2);
- /* read data block including CRC 4 bytes */
- ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
- skb_put(skb, frame_hdr->len - 4);
- skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
- /* exclude CRC size */
- netdev->stats.rx_bytes += frame_hdr->len - 4;
- netdev->stats.rx_packets++;
- } else {
- ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
- netdev->stats.rx_dropped++;
- }
- frame_hdr++;
- }
-}
-
-/**
- * ks_update_link_status - link status update.
- * @netdev: The network device being opened.
- * @ks: The chip information
- *
- */
-
-static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
-{
- /* check the status of the link */
- u32 link_up_status;
- if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
- netif_carrier_on(netdev);
- link_up_status = true;
- } else {
- netif_carrier_off(netdev);
- link_up_status = false;
- }
- netif_dbg(ks, link, ks->netdev,
- "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
-}
-
-/**
- * ks_irq - device interrupt handler
- * @irq: Interrupt number passed from the IRQ handler.
- * @pw: The private word passed to register_irq(), our struct ks_net.
- *
- * This is the handler invoked to find out what happened
- *
- * Read the interrupt status, work out what needs to be done and then clear
- * any of the interrupts that are not needed.
- */
-
-static irqreturn_t ks_irq(int irq, void *pw)
-{
- struct net_device *netdev = pw;
- struct ks_net *ks = netdev_priv(netdev);
- unsigned long flags;
- u16 status;
-
- spin_lock_irqsave(&ks->statelock, flags);
- /*this should be the first in IRQ handler */
- ks_save_cmd_reg(ks);
-
- status = ks_rdreg16(ks, KS_ISR);
- if (unlikely(!status)) {
- ks_restore_cmd_reg(ks);
- spin_unlock_irqrestore(&ks->statelock, flags);
- return IRQ_NONE;
- }
-
- ks_wrreg16(ks, KS_ISR, status);
-
- if (likely(status & IRQ_RXI))
- ks_rcv(ks, netdev);
-
- if (unlikely(status & IRQ_LCI))
- ks_update_link_status(netdev, ks);
-
- if (unlikely(status & IRQ_TXI))
- netif_wake_queue(netdev);
-
- if (unlikely(status & IRQ_LDI)) {
-
- u16 pmecr = ks_rdreg16(ks, KS_PMECR);
- pmecr &= ~PMECR_WKEVT_MASK;
- ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
- }
-
- if (unlikely(status & IRQ_RXOI))
- ks->netdev->stats.rx_over_errors++;
- /* this should be the last in IRQ handler*/
- ks_restore_cmd_reg(ks);
- spin_unlock_irqrestore(&ks->statelock, flags);
- return IRQ_HANDLED;
-}
-
-
-/**
- * ks_net_open - open network device
- * @netdev: The network device being opened.
- *
- * Called when the network device is marked active, such as a user executing
- * 'ifconfig up' on the device.
- */
-static int ks_net_open(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- int err;
-
-#define KS_INT_FLAGS IRQF_TRIGGER_LOW
- /* lock the card, even if we may not actually do anything
- * else at the moment.
- */
-
- netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
-
- /* reset the HW */
- err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
-
- if (err) {
- pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
- return err;
- }
-
- /* wake up powermode to normal mode */
- ks_set_powermode(ks, PMECR_PM_NORMAL);
- mdelay(1); /* wait for normal mode to take effect */
-
- ks_wrreg16(ks, KS_ISR, 0xffff);
- ks_enable_int(ks);
- ks_enable_qmu(ks);
- netif_start_queue(ks->netdev);
-
- netif_dbg(ks, ifup, ks->netdev, "network device up\n");
-
- return 0;
-}
-
-/**
- * ks_net_stop - close network device
- * @netdev: The device being closed.
- *
- * Called to close down a network device which has been active. Cancell any
- * work, shutdown the RX and TX process and then place the chip into a low
- * power state whilst it is not being used.
- */
-static int ks_net_stop(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
-
- netif_info(ks, ifdown, netdev, "shutting down\n");
-
- netif_stop_queue(netdev);
-
- mutex_lock(&ks->lock);
-
- /* turn off the IRQs and ack any outstanding */
- ks_wrreg16(ks, KS_IER, 0x0000);
- ks_wrreg16(ks, KS_ISR, 0xffff);
-
- /* shutdown RX/TX QMU */
- ks_disable_qmu(ks);
- ks_disable_int(ks);
-
- /* set powermode to soft power down to save power */
- ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
- free_irq(netdev->irq, netdev);
- mutex_unlock(&ks->lock);
- return 0;
-}
-
-
-/**
- * ks_write_qmu - write 1 pkt data to the QMU.
- * @ks: The chip information
- * @pdata: buffer address to save 1 pkt
- * @len: Pkt length in byte
- * Here is the sequence to write 1 pkt:
- * 1. set sudo DMA mode
- * 2. write status/length
- * 3. write pkt data
- * 4. reset sudo DMA Mode
- * 5. reset sudo DMA mode
- * 6. Wait until pkt is out
- */
-static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
-{
- /* start header at txb[0] to align txw entries */
- ks->txh.txw[0] = 0;
- ks->txh.txw[1] = cpu_to_le16(len);
-
- /* 1. set sudo-DMA mode */
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
- /* 2. write status/lenth info */
- ks_outblk(ks, ks->txh.txw, 4);
- /* 3. write pkt data */
- ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
- /* 4. reset sudo-DMA mode */
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
- /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
- ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
- /* 6. wait until TXQCR_METFE is auto-cleared */
- while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
- ;
-}
-
-/**
- * ks_start_xmit - transmit packet
- * @skb : The buffer to transmit
- * @netdev : The device used to transmit the packet.
- *
- * Called by the network layer to transmit the @skb.
- * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
- * So while tx is in-progress, prevent IRQ interrupt from happenning.
- */
-static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- netdev_tx_t retv = NETDEV_TX_OK;
- struct ks_net *ks = netdev_priv(netdev);
- unsigned long flags;
-
- spin_lock_irqsave(&ks->statelock, flags);
-
- /* Extra space are required:
- * 4 byte for alignment, 4 for status/length, 4 for CRC
- */
-
- if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
- ks_write_qmu(ks, skb->data, skb->len);
- /* add tx statistics */
- netdev->stats.tx_bytes += skb->len;
- netdev->stats.tx_packets++;
- dev_kfree_skb(skb);
- } else
- retv = NETDEV_TX_BUSY;
- spin_unlock_irqrestore(&ks->statelock, flags);
- return retv;
-}
-
-/**
- * ks_start_rx - ready to serve pkts
- * @ks : The chip information
- *
- */
-static void ks_start_rx(struct ks_net *ks)
-{
- u16 cntl;
-
- /* Enables QMU Receive (RXCR1). */
- cntl = ks_rdreg16(ks, KS_RXCR1);
- cntl |= RXCR1_RXE ;
- ks_wrreg16(ks, KS_RXCR1, cntl);
-} /* ks_start_rx */
-
-/**
- * ks_stop_rx - stop to serve pkts
- * @ks : The chip information
- *
- */
-static void ks_stop_rx(struct ks_net *ks)
-{
- u16 cntl;
-
- /* Disables QMU Receive (RXCR1). */
- cntl = ks_rdreg16(ks, KS_RXCR1);
- cntl &= ~RXCR1_RXE ;
- ks_wrreg16(ks, KS_RXCR1, cntl);
-
-} /* ks_stop_rx */
-
-static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
-
-static unsigned long ether_gen_crc(int length, u8 *data)
-{
- long crc = -1;
- while (--length >= 0) {
- u8 current_octet = *data++;
- int bit;
-
- for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
- crc = (crc << 1) ^
- ((crc < 0) ^ (current_octet & 1) ?
- ethernet_polynomial : 0);
- }
- }
- return (unsigned long)crc;
-} /* ether_gen_crc */
-
-/**
-* ks_set_grpaddr - set multicast information
-* @ks : The chip information
-*/
-
-static void ks_set_grpaddr(struct ks_net *ks)
-{
- u8 i;
- u32 index, position, value;
-
- memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
-
- for (i = 0; i < ks->mcast_lst_size; i++) {
- position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
- index = position >> 3;
- value = 1 << (position & 7);
- ks->mcast_bits[index] |= (u8)value;
- }
-
- for (i = 0; i < HW_MCAST_SIZE; i++) {
- if (i & 1) {
- ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
- (ks->mcast_bits[i] << 8) |
- ks->mcast_bits[i - 1]);
- }
- }
-} /* ks_set_grpaddr */
-
-/**
-* ks_clear_mcast - clear multicast information
-*
-* @ks : The chip information
-* This routine removes all mcast addresses set in the hardware.
-*/
-
-static void ks_clear_mcast(struct ks_net *ks)
-{
- u16 i, mcast_size;
- for (i = 0; i < HW_MCAST_SIZE; i++)
- ks->mcast_bits[i] = 0;
-
- mcast_size = HW_MCAST_SIZE >> 2;
- for (i = 0; i < mcast_size; i++)
- ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
-}
-
-static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
-{
- u16 cntl;
- ks->promiscuous = promiscuous_mode;
- ks_stop_rx(ks); /* Stop receiving for reconfiguration */
- cntl = ks_rdreg16(ks, KS_RXCR1);
-
- cntl &= ~RXCR1_FILTER_MASK;
- if (promiscuous_mode)
- /* Enable Promiscuous mode */
- cntl |= RXCR1_RXAE | RXCR1_RXINVF;
- else
- /* Disable Promiscuous mode (default normal mode) */
- cntl |= RXCR1_RXPAFMA;
-
- ks_wrreg16(ks, KS_RXCR1, cntl);
-
- if (ks->enabled)
- ks_start_rx(ks);
-
-} /* ks_set_promis */
-
-static void ks_set_mcast(struct ks_net *ks, u16 mcast)
-{
- u16 cntl;
-
- ks->all_mcast = mcast;
- ks_stop_rx(ks); /* Stop receiving for reconfiguration */
- cntl = ks_rdreg16(ks, KS_RXCR1);
- cntl &= ~RXCR1_FILTER_MASK;
- if (mcast)
- /* Enable "Perfect with Multicast address passed mode" */
- cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
- else
- /**
- * Disable "Perfect with Multicast address passed
- * mode" (normal mode).
- */
- cntl |= RXCR1_RXPAFMA;
-
- ks_wrreg16(ks, KS_RXCR1, cntl);
-
- if (ks->enabled)
- ks_start_rx(ks);
-} /* ks_set_mcast */
-
-static void ks_set_rx_mode(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- struct netdev_hw_addr *ha;
-
- /* Turn on/off promiscuous mode. */
- if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
- ks_set_promis(ks,
- (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
- /* Turn on/off all mcast mode. */
- else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
- ks_set_mcast(ks,
- (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
- else
- ks_set_promis(ks, false);
-
- if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
- if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
- int i = 0;
-
- netdev_for_each_mc_addr(ha, netdev) {
- if (i >= MAX_MCAST_LST)
- break;
- memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
- }
- ks->mcast_lst_size = (u8)i;
- ks_set_grpaddr(ks);
- } else {
- /**
- * List too big to support so
- * turn on all mcast mode.
- */
- ks->mcast_lst_size = MAX_MCAST_LST;
- ks_set_mcast(ks, true);
- }
- } else {
- ks->mcast_lst_size = 0;
- ks_clear_mcast(ks);
- }
-} /* ks_set_rx_mode */
-
-static void ks_set_mac(struct ks_net *ks, u8 *data)
-{
- u16 *pw = (u16 *)data;
- u16 w, u;
-
- ks_stop_rx(ks); /* Stop receiving for reconfiguration */
-
- u = *pw++;
- w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
- ks_wrreg16(ks, KS_MARH, w);
-
- u = *pw++;
- w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
- ks_wrreg16(ks, KS_MARM, w);
-
- u = *pw;
- w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
- ks_wrreg16(ks, KS_MARL, w);
-
- memcpy(ks->mac_addr, data, ETH_ALEN);
-
- if (ks->enabled)
- ks_start_rx(ks);
-}
-
-static int ks_set_mac_address(struct net_device *netdev, void *paddr)
-{
- struct ks_net *ks = netdev_priv(netdev);
- struct sockaddr *addr = paddr;
- u8 *da;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- da = (u8 *)netdev->dev_addr;
-
- ks_set_mac(ks, da);
- return 0;
-}
-
-static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
-{
- struct ks_net *ks = netdev_priv(netdev);
-
- if (!netif_running(netdev))
- return -EINVAL;
-
- return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
-}
-
-static const struct net_device_ops ks_netdev_ops = {
- .ndo_open = ks_net_open,
- .ndo_stop = ks_net_stop,
- .ndo_do_ioctl = ks_net_ioctl,
- .ndo_start_xmit = ks_start_xmit,
- .ndo_set_mac_address = ks_set_mac_address,
- .ndo_set_rx_mode = ks_set_rx_mode,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* ethtool support */
-
-static void ks_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *di)
-{
- strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
- strlcpy(di->version, "1.00", sizeof(di->version));
- strlcpy(di->bus_info, dev_name(netdev->dev.parent),
- sizeof(di->bus_info));
-}
-
-static u32 ks_get_msglevel(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- return ks->msg_enable;
-}
-
-static void ks_set_msglevel(struct net_device *netdev, u32 to)
-{
- struct ks_net *ks = netdev_priv(netdev);
- ks->msg_enable = to;
-}
-
-static int ks_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct ks_net *ks = netdev_priv(netdev);
-
- mii_ethtool_get_link_ksettings(&ks->mii, cmd);
-
- return 0;
-}
-
-static int ks_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct ks_net *ks = netdev_priv(netdev);
- return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
-}
-
-static u32 ks_get_link(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- return mii_link_ok(&ks->mii);
-}
-
-static int ks_nway_reset(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- return mii_nway_restart(&ks->mii);
-}
-
-static const struct ethtool_ops ks_ethtool_ops = {
- .get_drvinfo = ks_get_drvinfo,
- .get_msglevel = ks_get_msglevel,
- .set_msglevel = ks_set_msglevel,
- .get_link = ks_get_link,
- .nway_reset = ks_nway_reset,
- .get_link_ksettings = ks_get_link_ksettings,
- .set_link_ksettings = ks_set_link_ksettings,
-};
-
-/* MII interface controls */
-
-/**
- * ks_phy_reg - convert MII register into a KS8851 register
- * @reg: MII register number.
- *
- * Return the KS8851 register number for the corresponding MII PHY register
- * if possible. Return zero if the MII register has no direct mapping to the
- * KS8851 register set.
- */
-static int ks_phy_reg(int reg)
-{
- switch (reg) {
- case MII_BMCR:
- return KS_P1MBCR;
- case MII_BMSR:
- return KS_P1MBSR;
- case MII_PHYSID1:
- return KS_PHY1ILR;
- case MII_PHYSID2:
- return KS_PHY1IHR;
- case MII_ADVERTISE:
- return KS_P1ANAR;
- case MII_LPA:
- return KS_P1ANLPR;
- }
-
- return 0x0;
-}
-
-/**
- * ks_phy_read - MII interface PHY register read.
- * @netdev: The network device the PHY is on.
- * @phy_addr: Address of PHY (ignored as we only have one)
- * @reg: The register to read.
- *
- * This call reads data from the PHY register specified in @reg. Since the
- * device does not support all the MII registers, the non-existent values
- * are always returned as zero.
- *
- * We return zero for unsupported registers as the MII code does not check
- * the value returned for any error status, and simply returns it to the
- * caller. The mii-tool that the driver was tested with takes any -ve error
- * as real PHY capabilities, thus displaying incorrect data to the user.
- */
-static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
-{
- struct ks_net *ks = netdev_priv(netdev);
- int ksreg;
- int result;
-
- ksreg = ks_phy_reg(reg);
- if (!ksreg)
- return 0x0; /* no error return allowed, so use zero */
-
- mutex_lock(&ks->lock);
- result = ks_rdreg16(ks, ksreg);
- mutex_unlock(&ks->lock);
-
- return result;
-}
-
-static void ks_phy_write(struct net_device *netdev,
- int phy, int reg, int value)
-{
- struct ks_net *ks = netdev_priv(netdev);
- int ksreg;
-
- ksreg = ks_phy_reg(reg);
- if (ksreg) {
- mutex_lock(&ks->lock);
- ks_wrreg16(ks, ksreg, value);
- mutex_unlock(&ks->lock);
- }
-}
-
-/**
- * ks_read_selftest - read the selftest memory info.
- * @ks: The device state
- *
- * Read and check the TX/RX memory selftest information.
- */
-static int ks_read_selftest(struct ks_net *ks)
-{
- unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
- int ret = 0;
- unsigned rd;
-
- rd = ks_rdreg16(ks, KS_MBIR);
-
- if ((rd & both_done) != both_done) {
- netdev_warn(ks->netdev, "Memory selftest not finished\n");
- return 0;
- }
-
- if (rd & MBIR_TXMBFA) {
- netdev_err(ks->netdev, "TX memory selftest fails\n");
- ret |= 1;
- }
-
- if (rd & MBIR_RXMBFA) {
- netdev_err(ks->netdev, "RX memory selftest fails\n");
- ret |= 2;
- }
-
- netdev_info(ks->netdev, "the selftest passes\n");
- return ret;
-}
-
-static void ks_setup(struct ks_net *ks)
-{
- u16 w;
-
- /**
- * Configure QMU Transmit
- */
-
- /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
- ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
-
- /* Setup Receive Frame Data Pointer Auto-Increment */
- ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
-
- /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
- ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
-
- /* Setup RxQ Command Control (RXQCR) */
- ks->rc_rxqcr = RXQCR_CMD_CNTL;
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
-
- /**
- * set the force mode to half duplex, default is full duplex
- * because if the auto-negotiation fails, most switch uses
- * half-duplex.
- */
-
- w = ks_rdreg16(ks, KS_P1MBCR);
- w &= ~BMCR_FULLDPLX;
- ks_wrreg16(ks, KS_P1MBCR, w);
-
- w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
- ks_wrreg16(ks, KS_TXCR, w);
-
- w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
-
- if (ks->promiscuous) /* bPromiscuous */
- w |= (RXCR1_RXAE | RXCR1_RXINVF);
- else if (ks->all_mcast) /* Multicast address passed mode */
- w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
- else /* Normal mode */
- w |= RXCR1_RXPAFMA;
-
- ks_wrreg16(ks, KS_RXCR1, w);
-} /*ks_setup */
-
-
-static void ks_setup_int(struct ks_net *ks)
-{
- ks->rc_ier = 0x00;
- /* Clear the interrupts status of the hardware. */
- ks_wrreg16(ks, KS_ISR, 0xffff);
-
- /* Enables the interrupts of the hardware. */
- ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
-} /* ks_setup_int */
-
-static int ks_hw_init(struct ks_net *ks)
-{
-#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
- ks->promiscuous = 0;
- ks->all_mcast = 0;
- ks->mcast_lst_size = 0;
-
- ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
- GFP_KERNEL);
- if (!ks->frame_head_info)
- return false;
-
- ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
- return true;
-}
-
-#if defined(CONFIG_OF)
-static const struct of_device_id ks8851_ml_dt_ids[] = {
- { .compatible = "micrel,ks8851-mll" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
-#endif
-
-static int ks8851_probe(struct platform_device *pdev)
-{
- int err;
- struct net_device *netdev;
- struct ks_net *ks;
- u16 id, data;
- const char *mac;
-
- netdev = alloc_etherdev(sizeof(struct ks_net));
- if (!netdev)
- return -ENOMEM;
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- ks = netdev_priv(netdev);
- ks->netdev = netdev;
-
- ks->hw_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(ks->hw_addr)) {
- err = PTR_ERR(ks->hw_addr);
- goto err_free;
- }
-
- ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(ks->hw_addr_cmd)) {
- err = PTR_ERR(ks->hw_addr_cmd);
- goto err_free;
- }
-
- err = ks_check_endian(ks);
- if (err)
- goto err_free;
-
- netdev->irq = platform_get_irq(pdev, 0);
-
- if ((int)netdev->irq < 0) {
- err = netdev->irq;
- goto err_free;
- }
-
- ks->pdev = pdev;
-
- mutex_init(&ks->lock);
- spin_lock_init(&ks->statelock);
-
- netdev->netdev_ops = &ks_netdev_ops;
- netdev->ethtool_ops = &ks_ethtool_ops;
-
- /* setup mii state */
- ks->mii.dev = netdev;
- ks->mii.phy_id = 1,
- ks->mii.phy_id_mask = 1;
- ks->mii.reg_num_mask = 0xf;
- ks->mii.mdio_read = ks_phy_read;
- ks->mii.mdio_write = ks_phy_write;
-
- netdev_info(netdev, "message enable is %d\n", msg_enable);
- /* set the default message enable */
- ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
- NETIF_MSG_PROBE |
- NETIF_MSG_LINK));
- ks_read_config(ks);
-
- /* simple check for a valid chip being connected to the bus */
- if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
- netdev_err(netdev, "failed to read device ID\n");
- err = -ENODEV;
- goto err_free;
- }
-
- if (ks_read_selftest(ks)) {
- netdev_err(netdev, "failed to read device ID\n");
- err = -ENODEV;
- goto err_free;
- }
-
- err = register_netdev(netdev);
- if (err)
- goto err_free;
-
- platform_set_drvdata(pdev, netdev);
-
- ks_soft_reset(ks, GRR_GSR);
- ks_hw_init(ks);
- ks_disable_qmu(ks);
- ks_setup(ks);
- ks_setup_int(ks);
-
- data = ks_rdreg16(ks, KS_OBCR);
- ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
-
- /* overwriting the default MAC address */
- if (pdev->dev.of_node) {
- mac = of_get_mac_address(pdev->dev.of_node);
- if (!IS_ERR(mac))
- ether_addr_copy(ks->mac_addr, mac);
- } else {
- struct ks8851_mll_platform_data *pdata;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- netdev_err(netdev, "No platform data\n");
- err = -ENODEV;
- goto err_pdata;
- }
- memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
- }
- if (!is_valid_ether_addr(ks->mac_addr)) {
- /* Use random MAC address if none passed */
- eth_random_addr(ks->mac_addr);
- netdev_info(netdev, "Using random mac address\n");
- }
- netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
-
- memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
-
- ks_set_mac(ks, netdev->dev_addr);
-
- id = ks_rdreg16(ks, KS_CIDER);
-
- netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
- (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
- return 0;
-
-err_pdata:
- unregister_netdev(netdev);
-err_free:
- free_netdev(netdev);
- return err;
-}
-
-static int ks8851_remove(struct platform_device *pdev)
-{
- struct net_device *netdev = platform_get_drvdata(pdev);
-
- unregister_netdev(netdev);
- free_netdev(netdev);
- return 0;
-
-}
-
-static struct platform_driver ks8851_platform_driver = {
- .driver = {
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(ks8851_ml_dt_ids),
- },
- .probe = ks8851_probe,
- .remove = ks8851_remove,
-};
-
-module_platform_driver(ks8851_platform_driver);
-
-MODULE_DESCRIPTION("KS8851 MLL Network driver");
-MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
-MODULE_LICENSE("GPL");
-module_param_named(message, msg_enable, int, 0);
-MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
-
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
new file mode 100644
index 000000000000..3bab0cb2b1a5
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* drivers/net/ethernet/micrel/ks8851.c
+ *
+ * Copyright 2009 Simtec Electronics
+ * http://www.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DEBUG
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+
+#include "ks8851.h"
+
+static int msg_enable;
+
+#define BE3 0x8000 /* Byte Enable 3 */
+#define BE2 0x4000 /* Byte Enable 2 */
+#define BE1 0x2000 /* Byte Enable 1 */
+#define BE0 0x1000 /* Byte Enable 0 */
+
+/**
+ * struct ks8851_net_par - KS8851 Parallel driver private data
+ * @ks8851: KS8851 driver common private data
+ * @lock: Lock to ensure that the device is not accessed when busy.
+ * @hw_addr : start address of data register.
+ * @hw_addr_cmd : start address of command register.
+ * @cmd_reg_cache : command register cached.
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not accessible until the transfer is finished
+ * and the DMA has been de-asserted.
+ */
+struct ks8851_net_par {
+ struct ks8851_net ks8851;
+ spinlock_t lock;
+ void __iomem *hw_addr;
+ void __iomem *hw_addr_cmd;
+ u16 cmd_reg_cache;
+};
+
+#define to_ks8851_par(ks) container_of((ks), struct ks8851_net_par, ks8851)
+
+/**
+ * ks8851_lock_par - register access lock
+ * @ks: The chip state
+ * @flags: Spinlock flags
+ *
+ * Claim chip register access lock
+ */
+static void ks8851_lock_par(struct ks8851_net *ks, unsigned long *flags)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ spin_lock_irqsave(&ksp->lock, *flags);
+}
+
+/**
+ * ks8851_unlock_par - register access unlock
+ * @ks: The chip state
+ * @flags: Spinlock flags
+ *
+ * Release chip register access lock
+ */
+static void ks8851_unlock_par(struct ks8851_net *ks, unsigned long *flags)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ spin_unlock_irqrestore(&ksp->lock, *flags);
+}
+
+/**
+ * ks_check_endian - Check whether endianness of the bus is correct
+ * @ks : The chip information
+ *
+ * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit
+ * bus. To maintain optimum performance, the bus endianness should be set
+ * such that it matches the endianness of the CPU.
+ */
+static int ks_check_endian(struct ks8851_net *ks)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+ u16 cider;
+
+ /*
+ * Read CIDER register first, however read it the "wrong" way around.
+ * If the endian strap on the KS8851-16MLL in incorrect and the chip
+ * is operating in different endianness than the CPU, then the meaning
+ * of BE[3:0] byte-enable bits is also swapped such that:
+ * BE[3,2,1,0] becomes BE[1,0,3,2]
+ *
+ * Luckily for us, the byte-enable bits are the top four MSbits of
+ * the address register and the CIDER register is at offset 0xc0.
+ * Hence, by reading address 0xc0c0, which is not impacted by endian
+ * swapping, we assert either BE[3:2] or BE[1:0] while reading the
+ * CIDER register.
+ *
+ * If the bus configuration is correct, reading 0xc0c0 asserts
+ * BE[3:2] and this read returns 0x0000, because to read register
+ * with bottom two LSbits of address set to 0, BE[1:0] must be
+ * asserted.
+ *
+ * If the bus configuration is NOT correct, reading 0xc0c0 asserts
+ * BE[1:0] and this read returns non-zero 0x8872 value.
+ */
+ iowrite16(BE3 | BE2 | KS_CIDER, ksp->hw_addr_cmd);
+ cider = ioread16(ksp->hw_addr);
+ if (!cider)
+ return 0;
+
+ netdev_err(ks->netdev, "incorrect EESK endian strap setting\n");
+
+ return -EINVAL;
+}
+
+/**
+ * ks8851_wrreg16_par - write 16bit register value to chip
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg16_par(struct ks8851_net *ks, unsigned int reg,
+ unsigned int val)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ ksp->cmd_reg_cache = (u16)reg | ((BE1 | BE0) << (reg & 0x02));
+ iowrite16(ksp->cmd_reg_cache, ksp->hw_addr_cmd);
+ iowrite16(val, ksp->hw_addr);
+}
+
+/**
+ * ks8851_rdreg16_par - read 16 bit register from chip
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+ */
+static unsigned int ks8851_rdreg16_par(struct ks8851_net *ks, unsigned int reg)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ ksp->cmd_reg_cache = (u16)reg | ((BE1 | BE0) << (reg & 0x02));
+ iowrite16(ksp->cmd_reg_cache, ksp->hw_addr_cmd);
+ return ioread16(ksp->hw_addr);
+}
+
+/**
+ * ks8851_rdfifo_par - read data from the receive fifo
+ * @ks: The device state.
+ * @buff: The buffer address
+ * @len: The length of the data to read
+ *
+ * Issue an RXQ FIFO read command and read the @len amount of data from
+ * the FIFO into the buffer specified by @buff.
+ */
+static void ks8851_rdfifo_par(struct ks8851_net *ks, u8 *buff, unsigned int len)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d@%p\n", __func__, len, buff);
+
+ ioread16_rep(ksp->hw_addr, (u16 *)buff + 1, len / 2);
+}
+
+/**
+ * ks8851_wrfifo_par - write packet to TX FIFO
+ * @ks: The device state.
+ * @txp: The sk_buff to transmit.
+ * @irq: IRQ on completion of the packet.
+ *
+ * Send the @txp to the chip. This means creating the relevant packet header
+ * specifying the length of the packet and the other information the chip
+ * needs, such as IRQ on completion. Send the header and the packet data to
+ * the device.
+ */
+static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
+ bool irq)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+ unsigned int len = ALIGN(txp->len, 4);
+ unsigned int fid = 0;
+
+ netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
+ __func__, txp, txp->len, txp->data, irq);
+
+ fid = ks->fid++;
+ fid &= TXFR_TXFID_MASK;
+
+ if (irq)
+ fid |= TXFR_TXIC; /* irq on completion */
+
+ iowrite16(fid, ksp->hw_addr);
+ iowrite16(txp->len, ksp->hw_addr);
+
+ iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
+}
+
+/**
+ * ks8851_rx_skb_par - receive skbuff
+ * @ks: The device state.
+ * @skb: The skbuff
+ */
+static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
+{
+ netif_rx(skb);
+}
+
+static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
+{
+ return ks8851_rdreg16_par(ks, KS_TXQCR);
+}
+
+/**
+ * ks8851_start_xmit_par - transmit packet
+ * @skb: The buffer to transmit
+ * @dev: The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb. Queue the packet for
+ * the device and schedule the necessary work to transmit the packet when
+ * it is free.
+ *
+ * We do this to firstly avoid sleeping with the network device locked,
+ * and secondly so we can round up more than one packet to transmit which
+ * means we can try and avoid generating too many transmit done interrupts.
+ */
+static netdev_tx_t ks8851_start_xmit_par(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ netdev_tx_t ret = NETDEV_TX_OK;
+ unsigned long flags;
+ unsigned int txqcr;
+ u16 txmir;
+ int err;
+
+ netif_dbg(ks, tx_queued, ks->netdev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
+
+ ks8851_lock_par(ks, &flags);
+
+ txmir = ks8851_rdreg16_par(ks, KS_TXMIR) & 0x1fff;
+
+ if (likely(txmir >= skb->len + 12)) {
+ ks8851_wrreg16_par(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+ ks8851_wrfifo_par(ks, skb, false);
+ ks8851_wrreg16_par(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks8851_wrreg16_par(ks, KS_TXQCR, TXQCR_METFE);
+
+ err = readx_poll_timeout_atomic(ks8851_rdreg16_par_txqcr, ks,
+ txqcr, !(txqcr & TXQCR_METFE),
+ 5, 1000000);
+ if (err)
+ ret = NETDEV_TX_BUSY;
+
+ ks8851_done_tx(ks, skb);
+ } else {
+ ret = NETDEV_TX_BUSY;
+ }
+
+ ks8851_unlock_par(ks, &flags);
+
+ return ret;
+}
+
+static int ks8851_probe_par(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ks8851_net_par *ksp;
+ struct net_device *netdev;
+ struct ks8851_net *ks;
+ int ret;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(struct ks8851_net_par));
+ if (!netdev)
+ return -ENOMEM;
+
+ ks = netdev_priv(netdev);
+
+ ks->lock = ks8851_lock_par;
+ ks->unlock = ks8851_unlock_par;
+ ks->rdreg16 = ks8851_rdreg16_par;
+ ks->wrreg16 = ks8851_wrreg16_par;
+ ks->rdfifo = ks8851_rdfifo_par;
+ ks->wrfifo = ks8851_wrfifo_par;
+ ks->start_xmit = ks8851_start_xmit_par;
+ ks->rx_skb = ks8851_rx_skb_par;
+
+#define STD_IRQ (IRQ_LCI | /* Link Change */ \
+ IRQ_RXI | /* RX done */ \
+ IRQ_RXPSI) /* RX process stop */
+ ks->rc_ier = STD_IRQ;
+
+ ksp = to_ks8851_par(ks);
+ spin_lock_init(&ksp->lock);
+
+ ksp->hw_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ksp->hw_addr))
+ return PTR_ERR(ksp->hw_addr);
+
+ ksp->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(ksp->hw_addr_cmd))
+ return PTR_ERR(ksp->hw_addr_cmd);
+
+ ret = ks_check_endian(ks);
+ if (ret)
+ return ret;
+
+ netdev->irq = platform_get_irq(pdev, 0);
+
+ return ks8851_probe_common(netdev, dev, msg_enable);
+}
+
+static int ks8851_remove_par(struct platform_device *pdev)
+{
+ return ks8851_remove_common(&pdev->dev);
+}
+
+static const struct of_device_id ks8851_match_table[] = {
+ { .compatible = "micrel,ks8851-mll" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ks8851_match_table);
+
+static struct platform_driver ks8851_driver = {
+ .driver = {
+ .name = "ks8851",
+ .of_match_table = ks8851_match_table,
+ .pm = &ks8851_pm_ops,
+ },
+ .probe = ks8851_probe_par,
+ .remove = ks8851_remove_par,
+};
+module_platform_driver(ks8851_driver);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
new file mode 100644
index 000000000000..4ec7f1615977
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* drivers/net/ethernet/micrel/ks8851.c
+ *
+ * Copyright 2009 Simtec Electronics
+ * http://www.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DEBUG
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include "ks8851.h"
+
+static int msg_enable;
+
+/**
+ * struct ks8851_net_spi - KS8851 SPI driver private data
+ * @lock: Lock to ensure that the device is not accessed when busy.
+ * @tx_work: Work queue for tx packets
+ * @ks8851: KS8851 driver common private data
+ * @spidev: The spi device we're bound to.
+ * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
+ * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2.
+ * @spi_xfer1: @spi_msg1 SPI transfer structure
+ * @spi_xfer2: @spi_msg2 SPI transfer structure
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not ccessible until the transfer is finished and
+ * the DMA has been de-asserted.
+ */
+struct ks8851_net_spi {
+ struct ks8851_net ks8851;
+ struct mutex lock;
+ struct work_struct tx_work;
+ struct spi_device *spidev;
+ struct spi_message spi_msg1;
+ struct spi_message spi_msg2;
+ struct spi_transfer spi_xfer1;
+ struct spi_transfer spi_xfer2[2];
+};
+
+#define to_ks8851_spi(ks) container_of((ks), struct ks8851_net_spi, ks8851)
+
+/* SPI frame opcodes */
+#define KS_SPIOP_RD 0x00
+#define KS_SPIOP_WR 0x40
+#define KS_SPIOP_RXFIFO 0x80
+#define KS_SPIOP_TXFIFO 0xC0
+
+/* shift for byte-enable data */
+#define BYTE_EN(_x) ((_x) << 2)
+
+/* turn register number and byte-enable mask into data for start of packet */
+#define MK_OP(_byteen, _reg) \
+ (BYTE_EN(_byteen) | (_reg) << (8 + 2) | (_reg) >> 6)
+
+/**
+ * ks8851_lock_spi - register access lock
+ * @ks: The chip state
+ * @flags: Spinlock flags
+ *
+ * Claim chip register access lock
+ */
+static void ks8851_lock_spi(struct ks8851_net *ks, unsigned long *flags)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+
+ mutex_lock(&kss->lock);
+}
+
+/**
+ * ks8851_unlock_spi - register access unlock
+ * @ks: The chip state
+ * @flags: Spinlock flags
+ *
+ * Release chip register access lock
+ */
+static void ks8851_unlock_spi(struct ks8851_net *ks, unsigned long *flags)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+
+ mutex_unlock(&kss->lock);
+}
+
+/* SPI register read/write calls.
+ *
+ * All these calls issue SPI transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transferring packet data (RX/TX FIFO accesses).
+ */
+
+/**
+ * ks8851_wrreg16_spi - write 16bit register value to chip via SPI
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg16_spi(struct ks8851_net *ks, unsigned int reg,
+ unsigned int val)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+ struct spi_transfer *xfer = &kss->spi_xfer1;
+ struct spi_message *msg = &kss->spi_msg1;
+ __le16 txb[2];
+ int ret;
+
+ txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR);
+ txb[1] = cpu_to_le16(val);
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 4;
+
+ ret = spi_sync(kss->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "spi_sync() failed\n");
+}
+
+/**
+ * ks8851_rdreg - issue read register command and return the data
+ * @ks: The device state
+ * @op: The register address and byte enables in message format.
+ * @rxb: The RX buffer to return the result into
+ * @rxl: The length of data expected.
+ *
+ * This is the low level read call that issues the necessary spi message(s)
+ * to read data from the register specified in @op.
+ */
+static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
+ u8 *rxb, unsigned int rxl)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+ struct spi_transfer *xfer;
+ struct spi_message *msg;
+ __le16 *txb = (__le16 *)ks->txd;
+ u8 *trx = ks->rxd;
+ int ret;
+
+ txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
+
+ if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ msg = &kss->spi_msg2;
+ xfer = kss->spi_xfer2;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 2;
+
+ xfer++;
+ xfer->tx_buf = NULL;
+ xfer->rx_buf = trx;
+ xfer->len = rxl;
+ } else {
+ msg = &kss->spi_msg1;
+ xfer = &kss->spi_xfer1;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = trx;
+ xfer->len = rxl + 2;
+ }
+
+ ret = spi_sync(kss->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "read: spi_sync() failed\n");
+ else if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
+ memcpy(rxb, trx, rxl);
+ else
+ memcpy(rxb, trx + 2, rxl);
+}
+
+/**
+ * ks8851_rdreg16_spi - read 16 bit register from device via SPI
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+ */
+static unsigned int ks8851_rdreg16_spi(struct ks8851_net *ks, unsigned int reg)
+{
+ __le16 rx = 0;
+
+ ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2);
+ return le16_to_cpu(rx);
+}
+
+/**
+ * ks8851_rdfifo_spi - read data from the receive fifo via SPI
+ * @ks: The device state.
+ * @buff: The buffer address
+ * @len: The length of the data to read
+ *
+ * Issue an RXQ FIFO read command and read the @len amount of data from
+ * the FIFO into the buffer specified by @buff.
+ */
+static void ks8851_rdfifo_spi(struct ks8851_net *ks, u8 *buff, unsigned int len)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+ struct spi_transfer *xfer = kss->spi_xfer2;
+ struct spi_message *msg = &kss->spi_msg2;
+ u8 txb[1];
+ int ret;
+
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d@%p\n", __func__, len, buff);
+
+ /* set the operation we're issuing */
+ txb[0] = KS_SPIOP_RXFIFO;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 1;
+
+ xfer++;
+ xfer->rx_buf = buff;
+ xfer->tx_buf = NULL;
+ xfer->len = len;
+
+ ret = spi_sync(kss->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_wrfifo_spi - write packet to TX FIFO via SPI
+ * @ks: The device state.
+ * @txp: The sk_buff to transmit.
+ * @irq: IRQ on completion of the packet.
+ *
+ * Send the @txp to the chip. This means creating the relevant packet header
+ * specifying the length of the packet and the other information the chip
+ * needs, such as IRQ on completion. Send the header and the packet data to
+ * the device.
+ */
+static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
+ bool irq)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+ struct spi_transfer *xfer = kss->spi_xfer2;
+ struct spi_message *msg = &kss->spi_msg2;
+ unsigned int fid = 0;
+ int ret;
+
+ netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
+ __func__, txp, txp->len, txp->data, irq);
+
+ fid = ks->fid++;
+ fid &= TXFR_TXFID_MASK;
+
+ if (irq)
+ fid |= TXFR_TXIC; /* irq on completion */
+
+ /* start header at txb[1] to align txw entries */
+ ks->txh.txb[1] = KS_SPIOP_TXFIFO;
+ ks->txh.txw[1] = cpu_to_le16(fid);
+ ks->txh.txw[2] = cpu_to_le16(txp->len);
+
+ xfer->tx_buf = &ks->txh.txb[1];
+ xfer->rx_buf = NULL;
+ xfer->len = 5;
+
+ xfer++;
+ xfer->tx_buf = txp->data;
+ xfer->rx_buf = NULL;
+ xfer->len = ALIGN(txp->len, 4);
+
+ ret = spi_sync(kss->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_rx_skb_spi - receive skbuff
+ * @ks: The device state
+ * @skb: The skbuff
+ */
+static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
+{
+ netif_rx_ni(skb);
+}
+
+/**
+ * ks8851_tx_work - process tx packet(s)
+ * @work: The work strucutre what was scheduled.
+ *
+ * This is called when a number of packets have been scheduled for
+ * transmission and need to be sent to the device.
+ */
+static void ks8851_tx_work(struct work_struct *work)
+{
+ struct ks8851_net_spi *kss;
+ struct ks8851_net *ks;
+ unsigned long flags;
+ struct sk_buff *txb;
+ bool last;
+
+ kss = container_of(work, struct ks8851_net_spi, tx_work);
+ ks = &kss->ks8851;
+ last = skb_queue_empty(&ks->txq);
+
+ ks8851_lock_spi(ks, &flags);
+
+ while (!last) {
+ txb = skb_dequeue(&ks->txq);
+ last = skb_queue_empty(&ks->txq);
+
+ if (txb) {
+ ks8851_wrreg16_spi(ks, KS_RXQCR,
+ ks->rc_rxqcr | RXQCR_SDA);
+ ks8851_wrfifo_spi(ks, txb, last);
+ ks8851_wrreg16_spi(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks8851_wrreg16_spi(ks, KS_TXQCR, TXQCR_METFE);
+
+ ks8851_done_tx(ks, txb);
+ }
+ }
+
+ ks8851_unlock_spi(ks, &flags);
+}
+
+/**
+ * ks8851_flush_tx_work_spi - flush outstanding TX work
+ * @ks: The device state
+ */
+static void ks8851_flush_tx_work_spi(struct ks8851_net *ks)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+
+ flush_work(&kss->tx_work);
+}
+
+/**
+ * calc_txlen - calculate size of message to send packet
+ * @len: Length of data
+ *
+ * Returns the size of the TXFIFO message needed to send
+ * this packet.
+ */
+static unsigned int calc_txlen(unsigned int len)
+{
+ return ALIGN(len + 4, 4);
+}
+
+/**
+ * ks8851_start_xmit_spi - transmit packet using SPI
+ * @skb: The buffer to transmit
+ * @dev: The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb. Queue the packet for
+ * the device and schedule the necessary work to transmit the packet when
+ * it is free.
+ *
+ * We do this to firstly avoid sleeping with the network device locked,
+ * and secondly so we can round up more than one packet to transmit which
+ * means we can try and avoid generating too many transmit done interrupts.
+ */
+static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ unsigned int needed = calc_txlen(skb->len);
+ struct ks8851_net *ks = netdev_priv(dev);
+ netdev_tx_t ret = NETDEV_TX_OK;
+ struct ks8851_net_spi *kss;
+
+ kss = to_ks8851_spi(ks);
+
+ netif_dbg(ks, tx_queued, ks->netdev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
+
+ spin_lock(&ks->statelock);
+
+ if (needed > ks->tx_space) {
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ } else {
+ ks->tx_space -= needed;
+ skb_queue_tail(&ks->txq, skb);
+ }
+
+ spin_unlock(&ks->statelock);
+ schedule_work(&kss->tx_work);
+
+ return ret;
+}
+
+static int ks8851_probe_spi(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ks8851_net_spi *kss;
+ struct net_device *netdev;
+ struct ks8851_net *ks;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(struct ks8851_net_spi));
+ if (!netdev)
+ return -ENOMEM;
+
+ spi->bits_per_word = 8;
+
+ ks = netdev_priv(netdev);
+
+ ks->lock = ks8851_lock_spi;
+ ks->unlock = ks8851_unlock_spi;
+ ks->rdreg16 = ks8851_rdreg16_spi;
+ ks->wrreg16 = ks8851_wrreg16_spi;
+ ks->rdfifo = ks8851_rdfifo_spi;
+ ks->wrfifo = ks8851_wrfifo_spi;
+ ks->start_xmit = ks8851_start_xmit_spi;
+ ks->rx_skb = ks8851_rx_skb_spi;
+ ks->flush_tx_work = ks8851_flush_tx_work_spi;
+
+#define STD_IRQ (IRQ_LCI | /* Link Change */ \
+ IRQ_TXI | /* TX done */ \
+ IRQ_RXI | /* RX done */ \
+ IRQ_SPIBEI | /* SPI bus error */ \
+ IRQ_TXPSI | /* TX process stop */ \
+ IRQ_RXPSI) /* RX process stop */
+ ks->rc_ier = STD_IRQ;
+
+ kss = to_ks8851_spi(ks);
+
+ kss->spidev = spi;
+ mutex_init(&kss->lock);
+ INIT_WORK(&kss->tx_work, ks8851_tx_work);
+
+ /* initialise pre-made spi transfer messages */
+ spi_message_init(&kss->spi_msg1);
+ spi_message_add_tail(&kss->spi_xfer1, &kss->spi_msg1);
+
+ spi_message_init(&kss->spi_msg2);
+ spi_message_add_tail(&kss->spi_xfer2[0], &kss->spi_msg2);
+ spi_message_add_tail(&kss->spi_xfer2[1], &kss->spi_msg2);
+
+ netdev->irq = spi->irq;
+
+ return ks8851_probe_common(netdev, dev, msg_enable);
+}
+
+static int ks8851_remove_spi(struct spi_device *spi)
+{
+ return ks8851_remove_common(&spi->dev);
+}
+
+static const struct of_device_id ks8851_match_table[] = {
+ { .compatible = "micrel,ks8851" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ks8851_match_table);
+
+static struct spi_driver ks8851_driver = {
+ .driver = {
+ .name = "ks8851",
+ .of_match_table = ks8851_match_table,
+ .pm = &ks8851_pm_ops,
+ },
+ .probe = ks8851_probe_spi,
+ .remove = ks8851_remove_spi,
+};
+module_spi_driver(ks8851_driver);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+MODULE_ALIAS("spi:ks8851");
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 1f496fac7033..5bd7fb917b7a 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -17,11 +17,6 @@
#include "encx24j600_hw.h"
-static inline bool is_bits_set(int value, int mask)
-{
- return (value & mask) == mask;
-}
-
static int encx24j600_switch_bank(struct encx24j600_context *ctx,
int bank)
{
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 39925e4bf2ec..2c0dcd7acf3f 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -604,9 +604,8 @@ static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
}
}
-static int encx24j600_hw_init(struct encx24j600_priv *priv)
+static void encx24j600_hw_init(struct encx24j600_priv *priv)
{
- int ret = 0;
u16 macon2;
priv->hw_enabled = false;
@@ -649,8 +648,6 @@ static int encx24j600_hw_init(struct encx24j600_priv *priv)
if (netif_msg_hw(priv))
encx24j600_dump_config(priv, "Hw is initialized");
-
- return ret;
}
static void encx24j600_hw_enable(struct encx24j600_priv *priv)
@@ -1042,12 +1039,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
}
/* Initialize the device HW to the consistent state */
- if (encx24j600_hw_init(priv)) {
- netif_err(priv, probe, ndev,
- DRV_NAME ": HW initialization error\n");
- ret = -EIO;
- goto out_free;
- }
+ encx24j600_hw_init(priv);
kthread_init_worker(&priv->kworker);
kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
@@ -1070,7 +1062,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
if (unlikely(ret)) {
netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
ret);
- goto out_free;
+ goto out_stop;
}
eidled = encx24j600_read_reg(priv, EIDLED);
@@ -1088,6 +1080,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
out_unregister:
unregister_netdev(priv->ndev);
+out_stop:
+ kthread_stop(priv->kworker_task);
out_free:
free_netdev(ndev);
@@ -1100,6 +1094,7 @@ static int encx24j600_spi_remove(struct spi_device *spi)
struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
unregister_netdev(priv->ndev);
+ kthread_stop(priv->kworker_task);
free_netdev(priv->ndev);
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 3a0b289d9771..c533d06fbe3a 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -2,11 +2,11 @@
/* Copyright (C) 2018 Microchip Technology Inc. */
#include <linux/netdevice.h>
-#include "lan743x_main.h"
-#include "lan743x_ethtool.h"
#include <linux/net_tstamp.h>
#include <linux/pci.h>
#include <linux/phy.h>
+#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
/* eeprom */
#define LAN743X_EEPROM_MAGIC (0x74A5)
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index a43140f7b5eb..36624e3c633b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -8,7 +8,10 @@
#include <linux/crc32.h>
#include <linux/microchipphy.h>
#include <linux/net_tstamp.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
#include <linux/crc16.h>
@@ -798,9 +801,9 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
netdev = adapter->netdev;
- /* setup auto duplex, and speed detection */
+ /* disable auto duplex, and speed detection. Phylib does that */
data = lan743x_csr_read(adapter, MAC_CR);
- data |= MAC_CR_ADD_ | MAC_CR_ASD_;
+ data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
data |= MAC_CR_CNTR_RST_;
lan743x_csr_write(adapter, MAC_CR, data);
@@ -946,6 +949,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
+ u32 data;
phy_print_status(phydev);
if (phydev->state == PHY_RUNNING) {
@@ -953,6 +957,39 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
int remote_advertisement = 0;
int local_advertisement = 0;
+ data = lan743x_csr_read(adapter, MAC_CR);
+
+ /* set interface mode */
+ if (phy_interface_mode_is_rgmii(adapter->phy_mode))
+ /* RGMII */
+ data &= ~MAC_CR_MII_EN_;
+ else
+ /* GMII */
+ data |= MAC_CR_MII_EN_;
+
+ /* set duplex mode */
+ if (phydev->duplex)
+ data |= MAC_CR_DPX_;
+ else
+ data &= ~MAC_CR_DPX_;
+
+ /* set bus speed */
+ switch (phydev->speed) {
+ case SPEED_10:
+ data &= ~MAC_CR_CFG_H_;
+ data &= ~MAC_CR_CFG_L_;
+ break;
+ case SPEED_100:
+ data &= ~MAC_CR_CFG_H_;
+ data |= MAC_CR_CFG_L_;
+ break;
+ case SPEED_1000:
+ data |= MAC_CR_CFG_H_;
+ data |= MAC_CR_CFG_L_;
+ break;
+ }
+ lan743x_csr_write(adapter, MAC_CR, data);
+
memset(&ksettings, 0, sizeof(ksettings));
phy_ethtool_get_link_ksettings(netdev, &ksettings);
local_advertisement =
@@ -980,20 +1017,44 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
struct lan743x_phy *phy = &adapter->phy;
+ struct device_node *phynode;
struct phy_device *phydev;
struct net_device *netdev;
int ret = -EIO;
netdev = adapter->netdev;
- phydev = phy_find_first(adapter->mdiobus);
- if (!phydev)
- goto return_error;
+ phynode = of_node_get(adapter->pdev->dev.of_node);
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+
+ if (phynode) {
+ of_get_phy_mode(phynode, &adapter->phy_mode);
+
+ if (of_phy_is_fixed_link(phynode)) {
+ ret = of_phy_register_fixed_link(phynode);
+ if (ret) {
+ netdev_err(netdev,
+ "cannot register fixed PHY\n");
+ of_node_put(phynode);
+ goto return_error;
+ }
+ }
+ phydev = of_phy_connect(netdev, phynode,
+ lan743x_phy_link_status_change, 0,
+ adapter->phy_mode);
+ of_node_put(phynode);
+ if (!phydev)
+ goto return_error;
+ } else {
+ phydev = phy_find_first(adapter->mdiobus);
+ if (!phydev)
+ goto return_error;
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- PHY_INTERFACE_MODE_GMII);
- if (ret)
- goto return_error;
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ adapter->phy_mode);
+ if (ret)
+ goto return_error;
+ }
/* MAC doesn't support 1000T Half */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 3b02eeae5f45..c61a40411317 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -4,6 +4,7 @@
#ifndef _LAN743X_H
#define _LAN743X_H
+#include <linux/phy.h>
#include "lan743x_ptp.h"
#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
@@ -104,10 +105,14 @@
((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_)
#define MAC_CR (0x100)
+#define MAC_CR_MII_EN_ BIT(19)
#define MAC_CR_EEE_EN_ BIT(17)
#define MAC_CR_ADD_ BIT(12)
#define MAC_CR_ASD_ BIT(11)
#define MAC_CR_CNTR_RST_ BIT(5)
+#define MAC_CR_DPX_ BIT(3)
+#define MAC_CR_CFG_H_ BIT(2)
+#define MAC_CR_CFG_L_ BIT(1)
#define MAC_CR_RST_ BIT(0)
#define MAC_RX (0x104)
@@ -698,6 +703,7 @@ struct lan743x_rx {
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
+ phy_interface_t phy_mode;
int msg_enable;
#ifdef CONFIG_PM
u32 wolopts;
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 9399f6a98748..ab6d719d40f0 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -2,12 +2,12 @@
/* Copyright (C) 2018 Microchip Technology Inc. */
#include <linux/netdevice.h>
-#include "lan743x_main.h"
#include <linux/ptp_clock_kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/net_tstamp.h>
+#include "lan743x_main.h"
#include "lan743x_ptp.h"
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index f70bb81e1ed6..49fd843c4c8a 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -331,14 +331,15 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
void *desc;
unsigned int len;
unsigned int tx_head;
u32 txdes1;
- int ret = NETDEV_TX_BUSY;
+ netdev_tx_t ret = NETDEV_TX_BUSY;
spin_lock_irq(&priv->txlock);
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 9a36c26095c8..91b33b55054e 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0 OR MIT)
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o
mscc_ocelot_common-y := ocelot.o ocelot_io.o
-mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o
+mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o ocelot_ptp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 02350c3d9d01..9cfe1fd98c30 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
#include <linux/iopoll.h>
#include <net/arp.h>
@@ -1205,18 +1204,19 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->chip_port;
- /* The function is only used for PTP operations for now */
- if (!ocelot->ptp)
- return -EOPNOTSUPP;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(ocelot, port, ifr);
- case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(ocelot, port, ifr);
- default:
- return -EOPNOTSUPP;
+ /* If the attached PHY device isn't capable of timestamping operations,
+ * use our own (when possible).
+ */
+ if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return ocelot_hwstamp_set(ocelot, port, ifr);
+ case SIOCGHWTSTAMP:
+ return ocelot_hwstamp_get(ocelot, port, ifr);
+ }
}
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static const struct net_device_ops ocelot_port_netdev_ops = {
@@ -1348,6 +1348,12 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
{
info->phc_index = ocelot->ptp_clock ?
ptp_clock_index(ocelot->ptp_clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
@@ -1467,7 +1473,7 @@ static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
- u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
ocelot_set_ageing_time(ocelot, ageing_time);
}
@@ -1996,200 +2002,6 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
-int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- unsigned long flags;
- time64_t s;
- u32 val;
- s64 ns;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
- s <<= 32;
- s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
- ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
-
- /* Deal with negative values */
- if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
- s--;
- ns &= 0xf;
- ns += 999999984;
- }
-
- set_normalized_timespec64(ts, s, ns);
- return 0;
-}
-EXPORT_SYMBOL(ocelot_ptp_gettime64);
-
-static int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
- TOD_ACC_PIN);
- ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
- TOD_ACC_PIN);
- ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- return 0;
-}
-
-static int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
- ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
- ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- } else {
- /* Fall back using ocelot_ptp_settime64 which is not exact. */
- struct timespec64 ts;
- u64 now;
-
- ocelot_ptp_gettime64(ptp, &ts);
-
- now = ktime_to_ns(timespec64_to_ktime(ts));
- ts = ns_to_timespec64(now + delta);
-
- ocelot_ptp_settime64(ptp, &ts);
- }
- return 0;
-}
-
-static int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
-{
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- u32 unit = 0, direction = 0;
- unsigned long flags;
- u64 adj = 0;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- if (!scaled_ppm)
- goto disable_adj;
-
- if (scaled_ppm < 0) {
- direction = PTP_CFG_CLK_ADJ_CFG_DIR;
- scaled_ppm = -scaled_ppm;
- }
-
- adj = PSEC_PER_SEC << 16;
- do_div(adj, scaled_ppm);
- do_div(adj, 1000);
-
- /* If the adjustment value is too large, use ns instead */
- if (adj >= (1L << 30)) {
- unit = PTP_CFG_CLK_ADJ_FREQ_NS;
- do_div(adj, 1000);
- }
-
- /* Still too big */
- if (adj >= (1L << 30))
- goto disable_adj;
-
- ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
- ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
- PTP_CLK_CFG_ADJ_CFG);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- return 0;
-
-disable_adj:
- ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- return 0;
-}
-
-static struct ptp_clock_info ocelot_ptp_clock_info = {
- .owner = THIS_MODULE,
- .name = "ocelot ptp",
- .max_adj = 0x7fffffff,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = 0,
- .n_pins = 0,
- .pps = 0,
- .gettime64 = ocelot_ptp_gettime64,
- .settime64 = ocelot_ptp_settime64,
- .adjtime = ocelot_ptp_adjtime,
- .adjfine = ocelot_ptp_adjfine,
-};
-
-static int ocelot_init_timestamp(struct ocelot *ocelot)
-{
- struct ptp_clock *ptp_clock;
-
- ocelot->ptp_info = ocelot_ptp_clock_info;
- ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
- if (IS_ERR(ptp_clock))
- return PTR_ERR(ptp_clock);
- /* Check if PHC support is missing at the configuration level */
- if (!ptp_clock)
- return 0;
-
- ocelot->ptp_clock = ptp_clock;
-
- ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
- ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
- ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
-
- ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
-
- /* There is no device reconfiguration, PTP Rx stamping is always
- * enabled.
- */
- ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-
- return 0;
-}
-
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
* In the special case that it's the NPI port that we're configuring, the
@@ -2535,15 +2347,6 @@ int ocelot_init(struct ocelot *ocelot)
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
- if (ocelot->ptp) {
- ret = ocelot_init_timestamp(ocelot);
- if (ret) {
- dev_err(ocelot->dev,
- "Timestamp initialization failed\n");
- return ret;
- }
- }
-
return 0;
}
EXPORT_SYMBOL(ocelot_init);
@@ -2556,8 +2359,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
- if (ocelot->ptp_clock)
- ptp_clock_unregister(ocelot->ptp_clock);
for (i = 0; i < ocelot->num_phys_ports; i++) {
port = ocelot->ports[i];
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 641af929497f..f0a15aa187f2 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -15,18 +15,17 @@
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot.h>
#include "ocelot_rew.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
-#include "ocelot_ptp.h"
#define OCELOT_BUFFER_CELL_SZ 60
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 3bd286044480..dfd82a3baab2 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -706,13 +706,124 @@ ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
return NULL;
}
+/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
+ * on destination and source MAC addresses, but only on higher-level protocol
+ * information. The only frame types to match on keys containing MAC addresses
+ * in this case are non-SNAP, non-ARP, non-IP and non-OAM frames.
+ *
+ * If @on=true, then the above frame types (SNAP, ARP, IP and OAM) will match
+ * on MAC_ETYPE keys such as destination and source MAC on this ingress port.
+ * However the setting has the side effect of making these frames not matching
+ * on any _other_ keys than MAC_ETYPE ones.
+ */
+static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port,
+ bool on)
+{
+ u32 val = 0;
+
+ if (on)
+ val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(3);
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M,
+ ANA_PORT_VCAP_S2_CFG, port);
+}
+
+static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace)
+{
+ u16 proto, mask;
+
+ if (ace->type != OCELOT_ACE_TYPE_ETYPE)
+ return false;
+
+ proto = ntohs(*(u16 *)ace->frame.etype.etype.value);
+ mask = ntohs(*(u16 *)ace->frame.etype.etype.mask);
+
+ /* ETH_P_ALL match, so all protocols below are included */
+ if (mask == 0)
+ return true;
+ if (proto == ETH_P_ARP)
+ return true;
+ if (proto == ETH_P_IP)
+ return true;
+ if (proto == ETH_P_IPV6)
+ return true;
+
+ return false;
+}
+
+static bool ocelot_ace_is_problematic_non_mac_etype(struct ocelot_ace_rule *ace)
+{
+ if (ace->type == OCELOT_ACE_TYPE_SNAP)
+ return true;
+ if (ace->type == OCELOT_ACE_TYPE_ARP)
+ return true;
+ if (ace->type == OCELOT_ACE_TYPE_IPV4)
+ return true;
+ if (ace->type == OCELOT_ACE_TYPE_IPV6)
+ return true;
+ return false;
+}
+
+static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot,
+ struct ocelot_ace_rule *ace)
+{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
+ struct ocelot_ace_rule *tmp;
+ unsigned long port;
+ int i;
+
+ if (ocelot_ace_is_problematic_mac_etype(ace)) {
+ /* Search for any non-MAC_ETYPE rules on the port */
+ for (i = 0; i < block->count; i++) {
+ tmp = ocelot_ace_rule_get_rule_index(block, i);
+ if (tmp->ingress_port_mask & ace->ingress_port_mask &&
+ ocelot_ace_is_problematic_non_mac_etype(tmp))
+ return false;
+ }
+
+ for_each_set_bit(port, &ace->ingress_port_mask,
+ ocelot->num_phys_ports)
+ ocelot_match_all_as_mac_etype(ocelot, port, true);
+ } else if (ocelot_ace_is_problematic_non_mac_etype(ace)) {
+ /* Search for any MAC_ETYPE rules on the port */
+ for (i = 0; i < block->count; i++) {
+ tmp = ocelot_ace_rule_get_rule_index(block, i);
+ if (tmp->ingress_port_mask & ace->ingress_port_mask &&
+ ocelot_ace_is_problematic_mac_etype(tmp))
+ return false;
+ }
+
+ for_each_set_bit(port, &ace->ingress_port_mask,
+ ocelot->num_phys_ports)
+ ocelot_match_all_as_mac_etype(ocelot, port, false);
+ }
+
+ return true;
+}
+
int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
- struct ocelot_ace_rule *rule)
+ struct ocelot_ace_rule *rule,
+ struct netlink_ext_ack *extack)
{
struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule *ace;
int i, index;
+ if (!ocelot_exclusive_mac_etype_ace_rules(ocelot, rule)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules");
+ return -EBUSY;
+ }
+
/* Add rule to the linked list */
ocelot_ace_rule_add(ocelot, block, rule);
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
index 29d22c566786..099e177f2617 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.h
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -194,7 +194,7 @@ struct ocelot_ace_rule {
enum ocelot_ace_action action;
struct ocelot_ace_stats stats;
- u16 ingress_port_mask;
+ unsigned long ingress_port_mask;
enum ocelot_vcap_bit dmac_mc;
enum ocelot_vcap_bit dmac_bc;
@@ -215,7 +215,8 @@ struct ocelot_ace_rule {
};
int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
- struct ocelot_ace_rule *rule);
+ struct ocelot_ace_rule *rule,
+ struct netlink_ext_ack *extack);
int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
struct ocelot_ace_rule *rule);
int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 0ac9fbf77a01..4a15d2ff8b70 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -189,7 +189,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
skb->offload_fwd_mark = 1;
skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
dev->stats.rx_bytes += len;
dev->stats.rx_packets++;
} while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp));
@@ -366,6 +367,23 @@ static const struct vcap_props vsc7514_vcap_props[] = {
},
};
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ocelot ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -469,6 +487,15 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->vcap = vsc7514_vcap_props;
ocelot_init(ocelot);
+ if (ocelot->ptp) {
+ err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+ if (err) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ ocelot->ptp = 0;
+ }
+ }
+
/* No NPI port */
ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
OCELOT_TAG_PREFIX_NONE);
@@ -574,6 +601,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 341923311fec..5ce172e22b43 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -51,6 +51,8 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
+ u16 proto = ntohs(f->common.protocol);
+ bool match_protocol = true;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -71,7 +73,6 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
- u16 proto = ntohs(f->common.protocol);
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
@@ -86,11 +87,6 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
BIT(FLOW_DISSECTOR_KEY_CONTROL)))
return -EOPNOTSUPP;
- if (proto == ETH_P_IP ||
- proto == ETH_P_IPV6 ||
- proto == ETH_P_ARP)
- return -EOPNOTSUPP;
-
flow_rule_match_eth_addrs(rule, &match);
ace->type = OCELOT_ACE_TYPE_ETYPE;
ether_addr_copy(ace->frame.etype.dmac.value,
@@ -114,6 +110,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
match.key->ip_proto;
ace->frame.ipv4.proto.mask[0] =
match.mask->ip_proto;
+ match_protocol = false;
}
if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
ace->type = OCELOT_ACE_TYPE_IPV6;
@@ -121,11 +118,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
match.key->ip_proto;
ace->frame.ipv6.proto.mask[0] =
match.mask->ip_proto;
+ match_protocol = false;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) &&
- ntohs(f->common.protocol) == ETH_P_IP) {
+ proto == ETH_P_IP) {
struct flow_match_ipv4_addrs match;
u8 *tmp;
@@ -141,10 +139,11 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
tmp = &ace->frame.ipv4.dip.mask.addr[0];
memcpy(tmp, &match.mask->dst, 4);
+ match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) &&
- ntohs(f->common.protocol) == ETH_P_IPV6) {
+ proto == ETH_P_IPV6) {
return -EOPNOTSUPP;
}
@@ -156,6 +155,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
ace->frame.ipv4.sport.mask = ntohs(match.mask->src);
ace->frame.ipv4.dport.value = ntohs(match.key->dst);
ace->frame.ipv4.dport.mask = ntohs(match.mask->dst);
+ match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -167,9 +167,20 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
ace->vlan.vid.mask = match.mask->vlan_id;
ace->vlan.pcp.value[0] = match.key->vlan_priority;
ace->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ match_protocol = false;
}
finished_key_parsing:
+ if (match_protocol && proto != ETH_P_ALL) {
+ /* TODO: support SNAP, LLC etc */
+ if (proto < ETH_P_802_3_MIN)
+ return -EOPNOTSUPP;
+ ace->type = OCELOT_ACE_TYPE_ETYPE;
+ *(u16 *)ace->frame.etype.etype.value = htons(proto);
+ *(u16 *)ace->frame.etype.etype.mask = 0xffff;
+ }
+ /* else, a rule of type OCELOT_ACE_TYPE_ANY is implicitly added */
+
ace->prio = f->common.prio;
ace->id = f->cookie;
return ocelot_flower_parse_action(f, ace);
@@ -205,7 +216,7 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
return ret;
}
- return ocelot_ace_rule_offload_add(ocelot, ace);
+ return ocelot_ace_rule_offload_add(ocelot, ace, f->common.extack);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
new file mode 100644
index 000000000000..a3088a1676ed
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot PTP clock driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright 2020 NXP
+ */
+#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
+
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ time64_t s;
+ u32 val;
+ s64 ns;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
+ s <<= 32;
+ s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_gettime64);
+
+int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_settime64);
+
+int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ struct ocelot *ocelot = container_of(ptp, struct ocelot,
+ ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
+ PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
+ PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using ocelot_ptp_settime64 which is not exact. */
+ struct timespec64 ts;
+ u64 now;
+
+ ocelot_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ ocelot_ptp_settime64(ptp, &ts);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_adjtime);
+
+int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ u32 unit = 0, direction = 0;
+ unsigned long flags;
+ u64 adj = 0;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ if (!scaled_ppm)
+ goto disable_adj;
+
+ if (scaled_ppm < 0) {
+ direction = PTP_CFG_CLK_ADJ_CFG_DIR;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = PSEC_PER_SEC << 16;
+ do_div(adj, scaled_ppm);
+ do_div(adj, 1000);
+
+ /* If the adjustment value is too large, use ns instead */
+ if (adj >= (1L << 30)) {
+ unit = PTP_CFG_CLK_ADJ_FREQ_NS;
+ do_div(adj, 1000);
+ }
+
+ /* Still too big */
+ if (adj >= (1L << 30))
+ goto disable_adj;
+
+ ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
+ ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
+ PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+
+disable_adj:
+ ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_adjfine);
+
+int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_verify);
+
+int ocelot_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ struct timespec64 ts_start, ts_period;
+ enum ocelot_ptp_pins ptp_pin;
+ unsigned long flags;
+ bool pps = false;
+ int pin = -1;
+ u32 val;
+ s64 ns;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin == 0)
+ ptp_pin = PTP_PIN_0;
+ else if (pin == 1)
+ ptp_pin = PTP_PIN_1;
+ else if (pin == 2)
+ ptp_pin = PTP_PIN_2;
+ else if (pin == 3)
+ ptp_pin = PTP_PIN_3;
+ else
+ return -EBUSY;
+
+ ts_start.tv_sec = rq->perout.start.sec;
+ ts_start.tv_nsec = rq->perout.start.nsec;
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ if (ts_period.tv_sec == 1 && ts_period.tv_nsec == 0)
+ pps = true;
+
+ if (ts_start.tv_sec || (ts_start.tv_nsec && !pps)) {
+ dev_warn(ocelot->dev,
+ "Absolute start time not supported!\n");
+ dev_warn(ocelot->dev,
+ "Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n");
+ return -EINVAL;
+ }
+
+ /* Handle turning off */
+ if (!on) {
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ }
+
+ /* Handle PPS request */
+ if (pps) {
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ /* Pulse generated perout.start.nsec after TOD has
+ * increased seconds.
+ * Pulse width is set to 1us.
+ */
+ ocelot_write_rix(ocelot, ts_start.tv_nsec,
+ PTP_PIN_WF_LOW_PERIOD, ptp_pin);
+ ocelot_write_rix(ocelot, 1000,
+ PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
+ val |= PTP_PIN_CFG_SYNC;
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ }
+
+ /* Handle periodic clock */
+ ns = timespec64_to_ns(&ts_period);
+ ns = ns >> 1;
+ if (ns > 0x3fffffff || ns <= 0x6)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ ocelot_write_rix(ocelot, ns, PTP_PIN_WF_LOW_PERIOD, ptp_pin);
+ ocelot_write_rix(ocelot, ns, PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_enable);
+
+int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info)
+{
+ struct ptp_clock *ptp_clock;
+ int i;
+
+ ocelot->ptp_info = *info;
+
+ for (i = 0; i < OCELOT_PTP_PINS_NUM; i++) {
+ struct ptp_pin_desc *p = &ocelot->ptp_pins[i];
+
+ snprintf(p->name, sizeof(p->name), "switch_1588_dat%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+
+ ocelot->ptp_info.pin_config = &ocelot->ptp_pins[0];
+
+ ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
+ if (IS_ERR(ptp_clock))
+ return PTR_ERR(ptp_clock);
+ /* Check if PHC support is missing at the configuration level */
+ if (!ptp_clock)
+ return 0;
+
+ ocelot->ptp_clock = ptp_clock;
+
+ ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
+
+ ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
+
+ /* There is no device reconfiguration, PTP Rx stamping is always
+ * enabled.
+ */
+ ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_init_timestamp);
+
+int ocelot_deinit_timestamp(struct ocelot *ocelot)
+{
+ if (ocelot->ptp_clock)
+ ptp_clock_unregister(ocelot->ptp_clock);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_deinit_timestamp);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.h b/drivers/net/ethernet/mscc/ocelot_ptp.h
deleted file mode 100644
index 9ede14a12573..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_ptp.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * License: Dual MIT/GPL
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_PTP_H_
-#define _MSCC_OCELOT_PTP_H_
-
-#define PTP_PIN_CFG_RSZ 0x20
-#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
-#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
-#define PTP_PIN_TOD_NSEC_RSZ PTP_PIN_CFG_RSZ
-
-#define PTP_PIN_CFG_DOM BIT(0)
-#define PTP_PIN_CFG_SYNC BIT(2)
-#define PTP_PIN_CFG_ACTION(x) ((x) << 3)
-#define PTP_PIN_CFG_ACTION_MASK PTP_PIN_CFG_ACTION(0x7)
-
-enum {
- PTP_PIN_ACTION_IDLE = 0,
- PTP_PIN_ACTION_LOAD,
- PTP_PIN_ACTION_SAVE,
- PTP_PIN_ACTION_CLOCK,
- PTP_PIN_ACTION_DELTA,
- PTP_PIN_ACTION_NOSYNC,
- PTP_PIN_ACTION_SYNC,
-};
-
-#define PTP_CFG_MISC_PTP_EN BIT(2)
-
-#define PSEC_PER_SEC 1000000000000LL
-
-#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0)
-#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1)
-
-#define PTP_CFG_CLK_ADJ_FREQ_NS BIT(30)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index 7d4fd1b6adda..81d81ff75646 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -239,6 +239,8 @@ static const u32 ocelot_ptp_regmap[] = {
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
REG(PTP_CFG_MISC, 0x0000a0),
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index d326e231f0ad..b7baf7624e18 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -48,7 +48,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
NL_SET_ERR_MSG_MOD(extack,
- "Only one policer per port is supported\n");
+ "Only one policer per port is supported");
return -EEXIST;
}
@@ -59,7 +59,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
err = ocelot_port_policer_add(ocelot, port, &pol);
if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
return err;
}
@@ -73,7 +73,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
err = ocelot_port_policer_del(ocelot, port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "Could not delete policer\n");
+ "Could not delete policer");
return err;
}
priv->tc.police_id = 0;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 2616fd735aab..e1e1f4e3639e 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1174,18 +1174,6 @@ myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
mb();
}
-static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
-{
- struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
-
- if ((skb->protocol == htons(ETH_P_8021Q)) &&
- (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
- vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
- skb->csum = hw_csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
-}
-
static void
myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 5e630f3a0189..a82a37094579 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -27,7 +27,7 @@ config S2IO
on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/neterion/s2io.txt>.
+ <file:Documentation/networking/device_drivers/neterion/s2io.rst>.
To compile this driver as a module, choose M here. The module
will be called s2io.
@@ -42,7 +42,7 @@ config VXGE
labeled as either one, depending on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/neterion/vxge.txt>.
+ <file:Documentation/networking/device_drivers/neterion/vxge.rst>.
To compile this driver as a module, choose M here. The module
will be called vxge.
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 354efffac0f9..bdbf0726145e 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -333,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
goto err_free_alink;
alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
- if (!alink->prio_map)
+ if (!alink->prio_map) {
+ err = -ENOMEM;
goto err_free_alink;
+ }
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 1c76e1592ca2..ff844e5cc41f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -209,7 +209,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
NFP_FL_OUT_FLAGS_USE_TUN);
output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
} else if (netif_is_lag_master(out_dev) &&
- priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
int gid;
output->flags = cpu_to_be16(tmp_flags);
@@ -956,7 +956,7 @@ nfp_flower_output_action(struct nfp_app *app,
*a_len += sizeof(struct nfp_fl_output);
- if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ if (priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
/* nfp_fl_pre_lag returns -err or size of prelag action added.
* This will be 0 if it is not egressing to a lag dev.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index a595ddb92bff..a050cb898782 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -264,7 +264,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_cmsg_portmod_rx(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) {
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE) {
nfp_flower_cmsg_merge_hint_rx(app, skb);
break;
}
@@ -285,7 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_stats_rlim_reply(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
break;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index d8ad9346a26a..c39327677a7d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -665,6 +665,77 @@ err_clear_nn:
return err;
}
+static void nfp_flower_wait_host_bit(struct nfp_app *app)
+{
+ unsigned long err_at;
+ u64 feat;
+ int err;
+
+ /* Wait for HOST_ACK flag bit to propagate */
+ err_at = jiffies + msecs_to_jiffies(100);
+ do {
+ feat = nfp_rtsym_read_le(app->pf->rtbl,
+ "_abi_flower_combined_features_global",
+ &err);
+ if (time_is_before_eq_jiffies(err_at)) {
+ nfp_warn(app->cpp,
+ "HOST_ACK bit not propagated in FW.\n");
+ break;
+ }
+ usleep_range(1000, 2000);
+ } while (!err && !(feat & NFP_FL_FEATS_HOST_ACK));
+
+ if (err)
+ nfp_warn(app->cpp,
+ "Could not read global features entry from FW\n");
+}
+
+static int nfp_flower_sync_feature_bits(struct nfp_app *app)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ int err;
+
+ /* Tell the firmware of the host supported features. */
+ err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_host_mask",
+ app_priv->flower_ext_feats |
+ NFP_FL_FEATS_HOST_ACK);
+ if (!err)
+ nfp_flower_wait_host_bit(app);
+ else if (err != -ENOENT)
+ return err;
+
+ /* Tell the firmware that the driver supports lag. */
+ err = nfp_rtsym_write_le(app->pf->rtbl,
+ "_abi_flower_balance_sync_enable", 1);
+ if (!err) {
+ app_priv->flower_en_feats |= NFP_FL_ENABLE_LAG;
+ nfp_flower_lag_init(&app_priv->nfp_lag);
+ } else if (err == -ENOENT) {
+ nfp_warn(app->cpp, "LAG not supported by FW.\n");
+ } else {
+ return err;
+ }
+
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
+ /* Tell the firmware that the driver supports flow merging. */
+ err = nfp_rtsym_write_le(app->pf->rtbl,
+ "_abi_flower_merge_hint_enable", 1);
+ if (!err) {
+ app_priv->flower_en_feats |= NFP_FL_ENABLE_FLOW_MERGE;
+ nfp_flower_internal_port_init(app_priv);
+ } else if (err == -ENOENT) {
+ nfp_warn(app->cpp,
+ "Flow merge not supported by FW.\n");
+ } else {
+ return err;
+ }
+ } else {
+ nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
+ }
+
+ return 0;
+}
+
static int nfp_flower_init(struct nfp_app *app)
{
u64 version, features, ctx_count, num_mems;
@@ -753,35 +824,15 @@ static int nfp_flower_init(struct nfp_app *app)
if (err)
app_priv->flower_ext_feats = 0;
else
- app_priv->flower_ext_feats = features;
+ app_priv->flower_ext_feats = features & NFP_FL_FEATS_HOST;
- /* Tell the firmware that the driver supports lag. */
- err = nfp_rtsym_write_le(app->pf->rtbl,
- "_abi_flower_balance_sync_enable", 1);
- if (!err) {
- app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
- nfp_flower_lag_init(&app_priv->nfp_lag);
- } else if (err == -ENOENT) {
- nfp_warn(app->cpp, "LAG not supported by FW.\n");
- } else {
- goto err_cleanup_metadata;
- }
+ err = nfp_flower_sync_feature_bits(app);
+ if (err)
+ goto err_cleanup;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
- /* Tell the firmware that the driver supports flow merging. */
- err = nfp_rtsym_write_le(app->pf->rtbl,
- "_abi_flower_merge_hint_enable", 1);
- if (!err) {
- app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
- nfp_flower_internal_port_init(app_priv);
- } else if (err == -ENOENT) {
- nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
- } else {
- goto err_lag_clean;
- }
- } else {
- nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
- }
+ err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
+ if (err)
+ goto err_cleanup;
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_init(app);
@@ -792,10 +843,9 @@ static int nfp_flower_init(struct nfp_app *app)
return 0;
-err_lag_clean:
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+err_cleanup:
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
-err_cleanup_metadata:
nfp_flower_metadata_cleanup(app);
err_free_app_priv:
vfree(app->priv);
@@ -810,13 +860,16 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
+ flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
+ nfp_flower_setup_indr_block_cb);
+
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app);
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE)
nfp_flower_internal_port_cleanup(app_priv);
nfp_flower_metadata_cleanup(app);
@@ -886,7 +939,7 @@ static int nfp_flower_start(struct nfp_app *app)
struct nfp_flower_priv *app_priv = app->priv;
int err;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
err = nfp_flower_lag_reset(&app_priv->nfp_lag);
if (err)
return err;
@@ -907,16 +960,12 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *app_priv = app->priv;
int ret;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
if (ret & NOTIFY_STOP_MASK)
return ret;
}
- ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
- if (ret & NOTIFY_STOP_MASK)
- return ret;
-
ret = nfp_flower_internal_port_event_handler(app, netdev, event);
if (ret & NOTIFY_STOP_MASK)
return ret;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index d55d0d33bc45..6c3dc3baf387 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -44,8 +44,20 @@ struct nfp_app;
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
-#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
-#define NFP_FL_FEATS_LAG BIT(31)
+#define NFP_FL_FEATS_HOST_ACK BIT(31)
+
+#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
+#define NFP_FL_ENABLE_LAG BIT(1)
+
+#define NFP_FL_FEATS_HOST \
+ (NFP_FL_FEATS_GENEVE | \
+ NFP_FL_NBI_MTU_SETTING | \
+ NFP_FL_FEATS_GENEVE_OPT | \
+ NFP_FL_FEATS_VLAN_PCP | \
+ NFP_FL_FEATS_VF_RLIM | \
+ NFP_FL_FEATS_FLOW_MOD | \
+ NFP_FL_FEATS_PRE_TUN_RULES | \
+ NFP_FL_FEATS_IPV6_TUN)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -145,6 +157,7 @@ struct nfp_fl_internal_ports {
* @mask_id_seed: Seed used for mask hash table
* @flower_version: HW version of flower
* @flower_ext_feats: Bitmap of extra features the HW supports
+ * @flower_en_feats: Bitmap of features enabled by HW
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
@@ -180,6 +193,7 @@ struct nfp_flower_priv {
u32 mask_id_seed;
u64 flower_version;
u64 flower_ext_feats;
+ u8 flower_en_feats;
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@@ -346,7 +360,7 @@ nfp_flower_internal_port_can_offload(struct nfp_app *app,
{
struct nfp_flower_priv *app_priv = app->priv;
- if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE))
+ if (!(app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE))
return false;
if (!netdev->rtnl_link_ops)
return false;
@@ -444,9 +458,10 @@ void nfp_flower_qos_cleanup(struct nfp_app *app);
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow);
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
-int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
- struct net_device *netdev,
- unsigned long event);
+int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data);
+int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
void
__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 546bc01d507d..f7f01e2e3dce 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -74,9 +74,10 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
-static void
+static int
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
- struct nfp_flower_mac_mpls *msk, struct flow_rule *rule)
+ struct nfp_flower_mac_mpls *msk, struct flow_rule *rule,
+ struct netlink_ext_ack *extack)
{
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
@@ -97,14 +98,28 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
u32 t_mpls;
flow_rule_match_mpls(rule, &match);
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
+
+ /* Only support matching the first LSE */
+ if (match.mask->used_lses != 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: invalid LSE depth for MPLS match offload");
+ return -EOPNOTSUPP;
+ }
+
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
+ match.key->ls[0].mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
+ match.key->ls[0].mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
+ match.key->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
ext->mpls_lse = cpu_to_be32(t_mpls);
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
+ match.mask->ls[0].mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
+ match.mask->ls[0].mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
+ match.mask->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
msk->mpls_lse = cpu_to_be32(t_mpls);
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -121,6 +136,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
}
}
+
+ return 0;
}
static void
@@ -461,9 +478,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_in_port);
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
- nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
- (struct nfp_flower_mac_mpls *)msk,
- rule);
+ err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
+ (struct nfp_flower_mac_mpls *)msk,
+ rule, extack);
+ if (err)
+ return err;
+
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index c694dbc239d0..695d24b9dd92 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1440,7 +1440,8 @@ __nfp_flower_update_merge_stats(struct nfp_app *app,
ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
priv->stats[ctx_id].pkts += pkts;
priv->stats[ctx_id].bytes += bytes;
- max_t(u64, priv->stats[ctx_id].used, used);
+ priv->stats[ctx_id].used = max_t(u64, used,
+ priv->stats[ctx_id].used);
}
}
@@ -1618,8 +1619,8 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
return NULL;
}
-static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
struct flow_cls_offload *flower = type_data;
@@ -1707,10 +1708,13 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
return 0;
}
-static int
+int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
enum tc_setup_type type, void *type_data)
{
+ if (!nfp_fl_is_netdev_to_offload(netdev))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_BLOCK:
return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
@@ -1719,29 +1723,3 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
return -EOPNOTSUPP;
}
}
-
-int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
- struct net_device *netdev,
- unsigned long event)
-{
- int err;
-
- if (!nfp_fl_is_netdev_to_offload(netdev))
- return NOTIFY_OK;
-
- if (event == NETDEV_REGISTER) {
- err = __flow_indr_block_cb_register(netdev, app,
- nfp_flower_indr_setup_tc_cb,
- app);
- if (err)
- nfp_flower_cmsg_warn(app,
- "Indirect block reg failed - %s\n",
- netdev->name);
- } else if (event == NETDEV_UNREGISTER) {
- __flow_indr_block_cb_unregister(netdev,
- nfp_flower_indr_setup_tc_cb,
- app);
- }
-
- return NOTIFY_OK;
-}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 4d282fc56009..7ff2ccbd43b0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -14,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/firmware.h>
-#include <linux/vermagic.h>
#include <linux/vmalloc.h>
#include <net/devlink.h>
@@ -31,7 +30,6 @@
#include "nfp_net.h"
static const char nfp_driver_name[] = "nfp";
-const char nfp_driver_version[] = VERMAGIC_STRING;
static const struct pci_device_id nfp_pci_device_ids[] = {
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
@@ -920,4 +918,3 @@ MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw");
MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
-MODULE_VERSION(UTS_RELEASE);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9bfb3b077bc1..0e0cc3d58bdc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1741,10 +1741,15 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
unsigned int pkt_len, bool *completed)
{
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
int wr_idx;
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+ if (pkt_len + dma_off > dma_map_sz)
+ return false;
+
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
if (!*completed) {
nfp_net_xdp_complete(tx_ring);
@@ -1817,6 +1822,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
xdp.rxq = &rx_ring->xdp_rxq;
tx_ring = r_vec->xdp_ring;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2779f1526d1e..6eb9fb9a1814 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -203,8 +203,6 @@ nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
char nsp_version[ETHTOOL_FWVERS_LEN] = {};
strlcpy(drvinfo->driver, pdev->driver->name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
-
nfp_net_get_nspinfo(app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%s %s %s %s", vnic_version, nsp_version,
@@ -1440,8 +1438,7 @@ static int nfp_net_set_channels(struct net_device *netdev,
unsigned int total_rx, total_tx;
/* Reject unsupported */
- if (!channel->combined_count ||
- channel->other_count != NFP_NET_NON_Q_VECTORS ||
+ if (channel->other_count != NFP_NET_NON_Q_VECTORS ||
(channel->rx_count && channel->tx_count))
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 79d72c88bbef..b3cabc274121 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -299,6 +299,20 @@ static void nfp_repr_clean(struct nfp_repr *repr)
nfp_port_free(repr->port);
}
+static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
+
+static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
+}
+
+static void nfp_repr_set_lockdep_class(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
+}
+
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev)
@@ -308,6 +322,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 repr_cap = nn->tlv_caps.repr_cap;
int err;
+ nfp_repr_set_lockdep_class(netdev);
+
repr->port = port;
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
if (!repr->dst)
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 2fdd0753b3af..d2708a57f2ff 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -502,7 +502,8 @@ static int nixge_check_tx_bd_space(struct nixge_priv *priv,
return 0;
}
-static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct nixge_hw_dma_bd *cur_p;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index d20cf03a3ea0..d3cbb4215f5c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -823,7 +823,8 @@ static int lpc_mii_init(struct netdata_local *pldat)
if (err)
goto err_out_unregister_bus;
- if (lpc_mii_probe(pldat->ndev) != 0)
+ err = lpc_mii_probe(pldat->ndev);
+ if (err)
goto err_out_unregister_bus;
return 0;
@@ -1029,7 +1030,8 @@ static int lpc_eth_close(struct net_device *ndev)
return 0;
}
-static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
u32 len, txidx;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index f4ae40ae1e53..d83eff0ae0ac 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -388,6 +388,19 @@ int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
}
/* LIF commands */
+void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
+ u16 lif_type, u8 qtype, u8 qver)
+{
+ union ionic_dev_cmd cmd = {
+ .q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
+ .q_identify.lif_type = lif_type,
+ .q_identify.type = qtype,
+ .q_identify.ver = qver,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
{
union ionic_dev_cmd cmd = {
@@ -431,6 +444,7 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
.q_init.opcode = IONIC_CMD_Q_INIT,
.q_init.lif_index = cpu_to_le16(lif_index),
.q_init.type = q->type,
+ .q_init.ver = qcq->q.lif->qtype_info[q->type].version,
.q_init.index = cpu_to_le32(q->index),
.q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_ENA),
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 587398b01997..525434f10025 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,7 +12,8 @@
#define IONIC_MIN_MTU ETH_MIN_MTU
#define IONIC_MAX_MTU 9194
-#define IONIC_MAX_TXRX_DESC 16384
+#define IONIC_MAX_TX_DESC 8192
+#define IONIC_MAX_RX_DESC 16384
#define IONIC_MIN_TXRX_DESC 16
#define IONIC_DEF_TXRX_DESC 4096
#define IONIC_LIFS_MAX 1024
@@ -83,6 +84,8 @@ static_assert(sizeof(struct ionic_q_init_cmd) == 64);
static_assert(sizeof(struct ionic_q_init_comp) == 16);
static_assert(sizeof(struct ionic_q_control_cmd) == 64);
static_assert(sizeof(ionic_q_control_comp) == 16);
+static_assert(sizeof(struct ionic_q_identify_cmd) == 64);
+static_assert(sizeof(struct ionic_q_identify_comp) == 16);
static_assert(sizeof(struct ionic_rx_mode_set_cmd) == 64);
static_assert(sizeof(ionic_rx_mode_set_comp) == 16);
@@ -179,7 +182,7 @@ struct ionic_desc_info {
void *cb_arg;
};
-#define QUEUE_NAME_MAX_SZ 32
+#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
u64 dbell_count;
@@ -204,14 +207,14 @@ struct ionic_queue {
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
- char name[QUEUE_NAME_MAX_SZ];
+ char name[IONIC_QUEUE_NAME_MAX_SZ];
};
-#define INTR_INDEX_NOT_ASSIGNED -1
-#define INTR_NAME_MAX_SZ 32
+#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
+#define IONIC_INTR_NAME_MAX_SZ 32
struct ionic_intr_info {
- char name[INTR_NAME_MAX_SZ];
+ char name[IONIC_INTR_NAME_MAX_SZ];
unsigned int index;
unsigned int vector;
u64 rearm_count;
@@ -283,6 +286,8 @@ void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
+void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
+ u16 lif_type, u8 qtype, u8 qver);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
dma_addr_t addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 6996229facfd..f7e3ce3de04d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -12,10 +12,11 @@
#include "ionic_stats.h"
static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define PRIV_F_SW_DBG_STATS BIT(0)
+#define IONIC_PRIV_F_SW_DBG_STATS BIT(0)
"sw-dbg-stats",
};
-#define PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
+
+#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
@@ -58,7 +59,7 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset)
count = ionic_get_stats_count(lif);
break;
case ETH_SS_PRIV_FLAGS:
- count = PRIV_FLAGS_COUNT;
+ count = IONIC_PRIV_FLAGS_COUNT;
break;
}
return count;
@@ -75,7 +76,7 @@ static void ionic_get_strings(struct net_device *netdev,
break;
case ETH_SS_PRIV_FLAGS:
memcpy(buf, ionic_priv_flags_strings,
- PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
+ IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
break;
}
}
@@ -159,6 +160,8 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
break;
+ case IONIC_XCVR_PID_QSFP_100G_CWDM4:
+ case IONIC_XCVR_PID_QSFP_100G_PSM4:
case IONIC_XCVR_PID_QSFP_100G_LR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
@@ -178,6 +181,7 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
break;
case IONIC_XCVR_PID_SFP_25GBASE_SR:
case IONIC_XCVR_PID_SFP_25GBASE_AOC:
+ case IONIC_XCVR_PID_SFP_25GBASE_ACC:
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
@@ -458,9 +462,9 @@ static void ionic_get_ringparam(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
- ring->tx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->tx_max_pending = IONIC_MAX_TX_DESC;
ring->tx_pending = lif->ntxq_descs;
- ring->rx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->rx_max_pending = IONIC_MAX_RX_DESC;
ring->rx_pending = lif->nrxq_descs;
}
@@ -554,7 +558,7 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
u32 priv_flags = 0;
if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- priv_flags |= PRIV_F_SW_DBG_STATS;
+ priv_flags |= IONIC_PRIV_F_SW_DBG_STATS;
return priv_flags;
}
@@ -564,7 +568,7 @@ static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
struct ionic_lif *lif = netdev_priv(netdev);
clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
- if (priv_flags & PRIV_F_SW_DBG_STATS)
+ if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS)
set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
return 0;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index ceeb7629e7a0..7e22ba4ed915 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
-/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */
+/* Copyright (c) 2017-2020 Pensando Systems, Inc. All rights reserved. */
#ifndef _IONIC_IF_H_
#define _IONIC_IF_H_
@@ -9,7 +9,7 @@
#define IONIC_IFNAMSIZ 16
/**
- * Commands
+ * enum ionic_cmd_opcode - Device commands
*/
enum ionic_cmd_opcode {
IONIC_CMD_NOP = 0,
@@ -40,6 +40,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_RX_FILTER_DEL = 32,
/* Queue commands */
+ IONIC_CMD_Q_IDENTIFY = 39,
IONIC_CMD_Q_INIT = 40,
IONIC_CMD_Q_CONTROL = 41,
@@ -57,6 +58,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
IONIC_CMD_QOS_CLASS_RESET = 242,
+ IONIC_CMD_QOS_CLASS_UPDATE = 243,
/* Firmware commands */
IONIC_CMD_FW_DOWNLOAD = 254,
@@ -64,7 +66,7 @@ enum ionic_cmd_opcode {
};
/**
- * Command Return codes
+ * enum ionic_status_code - Device command return codes
*/
enum ionic_status_code {
IONIC_RC_SUCCESS = 0, /* Success */
@@ -97,6 +99,7 @@ enum ionic_notifyq_opcode {
IONIC_EVENT_RESET = 2,
IONIC_EVENT_HEARTBEAT = 3,
IONIC_EVENT_LOG = 4,
+ IONIC_EVENT_XCVR = 5,
};
/**
@@ -114,12 +117,11 @@ struct ionic_admin_cmd {
/**
* struct ionic_admin_comp - General admin command completion format
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @cmd_data: Command-specific bytes.
- * @color: Color bit. (Always 0 for commands issued to the
- * Device Cmd Registers.)
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @cmd_data: Command-specific bytes
+ * @color: Color bit (Always 0 for commands issued to the
+ * Device Cmd Registers)
*/
struct ionic_admin_comp {
u8 status;
@@ -146,7 +148,7 @@ struct ionic_nop_cmd {
/**
* struct ionic_nop_comp - NOP command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_nop_comp {
u8 status;
@@ -156,7 +158,7 @@ struct ionic_nop_comp {
/**
* struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
- * @type: device type
+ * @type: Device type
*/
struct ionic_dev_init_cmd {
u8 opcode;
@@ -166,7 +168,7 @@ struct ionic_dev_init_cmd {
/**
* struct init_comp - Device init command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_dev_init_comp {
u8 status;
@@ -184,7 +186,7 @@ struct ionic_dev_reset_cmd {
/**
* struct reset_comp - Reset command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_dev_reset_comp {
u8 status;
@@ -205,8 +207,8 @@ struct ionic_dev_identify_cmd {
};
/**
- * struct dev_identify_comp - Driver/device identify command completion
- * @status: The status of the command (enum status_code)
+ * struct ionic_dev_identify_comp - Driver/device identify command completion
+ * @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
*/
struct ionic_dev_identify_comp {
@@ -225,8 +227,8 @@ enum ionic_os_type {
};
/**
- * union drv_identity - driver identity information
- * @os_type: OS type (see enum os_type)
+ * union ionic_drv_identity - driver identity information
+ * @os_type: OS type (see enum ionic_os_type)
* @os_dist: OS distribution, numeric format
* @os_dist_str: OS distribution, string format
* @kernel_ver: Kernel version, numeric format
@@ -242,26 +244,26 @@ union ionic_drv_identity {
char kernel_ver_str[32];
char driver_ver_str[32];
};
- __le32 words[512];
+ __le32 words[478];
};
/**
- * union dev_identity - device identity information
+ * union ionic_dev_identity - device identity information
* @version: Version of device identify
* @type: Identify type (0 for now)
* @nports: Number of ports provisioned
* @nlifs: Number of LIFs provisioned
* @nintrs: Number of interrupts provisioned
* @ndbpgs_per_lif: Number of doorbell pages per LIF
- * @intr_coal_mult: Interrupt coalescing multiplication factor.
+ * @intr_coal_mult: Interrupt coalescing multiplication factor
* Scale user-supplied interrupt coalescing
* value in usecs to device units using:
* device units = usecs * mult / div
- * @intr_coal_div: Interrupt coalescing division factor.
+ * @intr_coal_div: Interrupt coalescing division factor
* Scale user-supplied interrupt coalescing
* value in usecs to device units using:
* device units = usecs * mult / div
- *
+ * @eq_count: Number of shared event queues
*/
union ionic_dev_identity {
struct {
@@ -275,8 +277,9 @@ union ionic_dev_identity {
__le32 ndbpgs_per_lif;
__le32 intr_coal_mult;
__le32 intr_coal_div;
+ __le32 eq_count;
};
- __le32 words[512];
+ __le32 words[478];
};
enum ionic_lif_type {
@@ -286,10 +289,10 @@ enum ionic_lif_type {
};
/**
- * struct ionic_lif_identify_cmd - lif identify command
+ * struct ionic_lif_identify_cmd - LIF identify command
* @opcode: opcode
- * @type: lif type (enum lif_type)
- * @ver: version of identify returned by device
+ * @type: LIF type (enum ionic_lif_type)
+ * @ver: Version of identify returned by device
*/
struct ionic_lif_identify_cmd {
u8 opcode;
@@ -299,9 +302,9 @@ struct ionic_lif_identify_cmd {
};
/**
- * struct ionic_lif_identify_comp - lif identify command completion
- * @status: status of the command (enum status_code)
- * @ver: version of identify returned by device
+ * struct ionic_lif_identify_comp - LIF identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @ver: Version of identify returned by device
*/
struct ionic_lif_identify_comp {
u8 status;
@@ -309,13 +312,24 @@ struct ionic_lif_identify_comp {
u8 rsvd2[14];
};
+/**
+ * enum ionic_lif_capability - LIF capabilities
+ * @IONIC_LIF_CAP_ETH: LIF supports Ethernet
+ * @IONIC_LIF_CAP_RDMA: LIF support RDMA
+ */
enum ionic_lif_capability {
IONIC_LIF_CAP_ETH = BIT(0),
IONIC_LIF_CAP_RDMA = BIT(1),
};
/**
- * Logical Queue Types
+ * enum ionic_logical_qtype - Logical Queue Types
+ * @IONIC_QTYPE_ADMINQ: Administrative Queue
+ * @IONIC_QTYPE_NOTIFYQ: Notify Queue
+ * @IONIC_QTYPE_RXQ: Receive Queue
+ * @IONIC_QTYPE_TXQ: Transmit Queue
+ * @IONIC_QTYPE_EQ: Event Queue
+ * @IONIC_QTYPE_MAX: Max queue type supported
*/
enum ionic_logical_qtype {
IONIC_QTYPE_ADMINQ = 0,
@@ -327,10 +341,10 @@ enum ionic_logical_qtype {
};
/**
- * struct ionic_lif_logical_qtype - Descriptor of logical to hardware queue type.
- * @qtype: Hardware Queue Type.
- * @qid_count: Number of Queue IDs of the logical type.
- * @qid_base: Minimum Queue ID of the logical type.
+ * struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type
+ * @qtype: Hardware Queue Type
+ * @qid_count: Number of Queue IDs of the logical type
+ * @qid_base: Minimum Queue ID of the logical type
*/
struct ionic_lif_logical_qtype {
u8 qtype;
@@ -339,6 +353,12 @@ struct ionic_lif_logical_qtype {
__le32 qid_base;
};
+/**
+ * enum ionic_lif_state - LIF state
+ * @IONIC_LIF_DISABLE: LIF disabled
+ * @IONIC_LIF_ENABLE: LIF enabled
+ * @IONIC_LIF_HANG_RESET: LIF hung, being reset
+ */
enum ionic_lif_state {
IONIC_LIF_DISABLE = 0,
IONIC_LIF_ENABLE = 1,
@@ -346,13 +366,13 @@ enum ionic_lif_state {
};
/**
- * LIF configuration
- * @state: lif state (enum lif_state)
- * @name: lif name
- * @mtu: mtu
- * @mac: station mac address
- * @features: features (enum ionic_eth_hw_features)
- * @queue_count: queue counts per queue-type
+ * union ionic_lif_config - LIF configuration
+ * @state: LIF state (enum ionic_lif_state)
+ * @name: LIF name
+ * @mtu: MTU
+ * @mac: Station MAC address
+ * @features: Features (enum ionic_eth_hw_features)
+ * @queue_count: Queue counts per queue-type
*/
union ionic_lif_config {
struct {
@@ -369,37 +389,36 @@ union ionic_lif_config {
};
/**
- * struct ionic_lif_identity - lif identity information (type-specific)
+ * struct ionic_lif_identity - LIF identity information (type-specific)
*
- * @capabilities LIF capabilities
+ * @capabilities: LIF capabilities
*
- * Ethernet:
- * @version: Ethernet identify structure version.
- * @features: Ethernet features supported on this lif type.
- * @max_ucast_filters: Number of perfect unicast addresses supported.
- * @max_mcast_filters: Number of perfect multicast addresses supported.
- * @min_frame_size: Minimum size of frames to be sent
- * @max_frame_size: Maximim size of frames to be sent
- * @config: LIF config struct with features, mtu, mac, q counts
+ * @eth: Ethernet identify structure
+ * @version: Ethernet identify structure version
+ * @max_ucast_filters: Number of perfect unicast addresses supported
+ * @max_mcast_filters: Number of perfect multicast addresses supported
+ * @min_frame_size: Minimum size of frames to be sent
+ * @max_frame_size: Maximim size of frames to be sent
+ * @config: LIF config struct with features, mtu, mac, q counts
*
- * RDMA:
- * @version: RDMA version of opcodes and queue descriptors.
- * @qp_opcodes: Number of rdma queue pair opcodes supported.
- * @admin_opcodes: Number of rdma admin opcodes supported.
- * @npts_per_lif: Page table size per lif
- * @nmrs_per_lif: Number of memory regions per lif
- * @nahs_per_lif: Number of address handles per lif
- * @max_stride: Max work request stride.
- * @cl_stride: Cache line stride.
- * @pte_stride: Page table entry stride.
- * @rrq_stride: Remote RQ work request stride.
- * @rsq_stride: Remote SQ work request stride.
+ * @rdma: RDMA identify structure
+ * @version: RDMA version of opcodes and queue descriptors
+ * @qp_opcodes: Number of RDMA queue pair opcodes supported
+ * @admin_opcodes: Number of RDMA admin opcodes supported
+ * @npts_per_lif: Page table size per LIF
+ * @nmrs_per_lif: Number of memory regions per LIF
+ * @nahs_per_lif: Number of address handles per LIF
+ * @max_stride: Max work request stride
+ * @cl_stride: Cache line stride
+ * @pte_stride: Page table entry stride
+ * @rrq_stride: Remote RQ work request stride
+ * @rsq_stride: Remote SQ work request stride
* @dcqcn_profiles: Number of DCQCN profiles
- * @aq_qtype: RDMA Admin Qtype.
- * @sq_qtype: RDMA Send Qtype.
- * @rq_qtype: RDMA Receive Qtype.
- * @cq_qtype: RDMA Completion Qtype.
- * @eq_qtype: RDMA Event Qtype.
+ * @aq_qtype: RDMA Admin Qtype
+ * @sq_qtype: RDMA Send Qtype
+ * @rq_qtype: RDMA Receive Qtype
+ * @cq_qtype: RDMA Completion Qtype
+ * @eq_qtype: RDMA Event Qtype
*/
union ionic_lif_identity {
struct {
@@ -439,15 +458,15 @@ union ionic_lif_identity {
struct ionic_lif_logical_qtype eq_qtype;
} __packed rdma;
} __packed;
- __le32 words[512];
+ __le32 words[478];
};
/**
* struct ionic_lif_init_cmd - LIF init command
- * @opcode: opcode
- * @type: LIF type (enum lif_type)
+ * @opcode: Opcode
+ * @type: LIF type (enum ionic_lif_type)
* @index: LIF index
- * @info_pa: destination address for lif info (struct ionic_lif_info)
+ * @info_pa: Destination address for LIF info (struct ionic_lif_info)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -460,7 +479,8 @@ struct ionic_lif_init_cmd {
/**
* struct ionic_lif_init_comp - LIF init command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
+ * @hw_index: Hardware index of the initialized LIF
*/
struct ionic_lif_init_comp {
u8 status;
@@ -469,14 +489,74 @@ struct ionic_lif_init_comp {
u8 rsvd2[12];
};
+ /**
+ * struct ionic_q_identify_cmd - queue identify command
+ * @opcode: opcode
+ * @lif_type: LIF type (enum ionic_lif_type)
+ * @type: Logical queue type (enum ionic_logical_qtype)
+ * @ver: Highest queue type version that the driver supports
+ */
+struct ionic_q_identify_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_type;
+ u8 type;
+ u8 ver;
+ u8 rsvd2[58];
+};
+
+/**
+ * struct ionic_q_identify_comp - queue identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @ver: Queue type version that can be used with FW
+ */
+struct ionic_q_identify_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 ver;
+ u8 rsvd2[11];
+};
+
+/**
+ * union ionic_q_identity - queue identity information
+ * @version: Queue type version that can be used with FW
+ * @supported: Bitfield of queue versions, first bit = ver 0
+ * @features: Queue features
+ * @desc_sz: Descriptor size
+ * @comp_sz: Completion descriptor size
+ * @sg_desc_sz: Scatter/Gather descriptor size
+ * @max_sg_elems: Maximum number of Scatter/Gather elements
+ * @sg_desc_stride: Number of Scatter/Gather elements per descriptor
+ */
+union ionic_q_identity {
+ struct {
+ u8 version;
+ u8 supported;
+ u8 rsvd[6];
+#define IONIC_QIDENT_F_CQ 0x01 /* queue has completion ring */
+#define IONIC_QIDENT_F_SG 0x02 /* queue has scatter/gather ring */
+#define IONIC_QIDENT_F_EQ 0x04 /* queue can use event queue */
+#define IONIC_QIDENT_F_CMB 0x08 /* queue is in cmb bar */
+ __le64 features;
+ __le16 desc_sz;
+ __le16 comp_sz;
+ __le16 sg_desc_sz;
+ __le16 max_sg_elems;
+ __le16 sg_desc_stride;
+ };
+ __le32 words[478];
+};
+
/**
* struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
* @type: Logical queue type
- * @ver: Queue version (defines opcode/descriptor scope)
+ * @ver: Queue type version
* @lif_index: LIF index
- * @index: (lif, qtype) relative admin queue index
- * @intr_index: Interrupt control register index
+ * @index: (LIF, qtype) relative admin queue index
+ * @intr_index: Interrupt control register index, or Event queue index
* @pid: Process ID
* @flags:
* IRQ: Interrupt requested on completion
@@ -494,12 +574,11 @@ struct ionic_lif_init_comp {
* descriptors. Values of ring_size <2 and >16 are
* reserved.
* EQ: Enable the Event Queue
- * @cos: Class of service for this queue.
+ * @cos: Class of service for this queue
* @ring_size: Queue ring size, encoded as a log2(size)
* @ring_base: Queue ring base address
* @cq_ring_base: Completion queue ring base address
* @sg_ring_base: Scatter/Gather ring base address
- * @eq_index: Event queue index
*/
struct ionic_q_init_cmd {
u8 opcode;
@@ -516,29 +595,27 @@ struct ionic_q_init_cmd {
#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */
#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */
#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */
-#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */
+#define IONIC_QINIT_F_CMB 0x10 /* Enable cmb-based queue */
+#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */
u8 cos;
u8 ring_size;
__le64 ring_base;
__le64 cq_ring_base;
__le64 sg_ring_base;
- __le32 eq_index;
- u8 rsvd2[16];
+ u8 rsvd2[20];
} __packed;
/**
* struct ionic_q_init_comp - Queue init command completion
- * @status: The status of the command (enum status_code)
- * @ver: Queue version (defines opcode/descriptor scope)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @hw_index: Hardware Queue ID
* @hw_type: Hardware Queue type
* @color: Color
*/
struct ionic_q_init_comp {
u8 status;
- u8 ver;
+ u8 rsvd;
__le16 comp_index;
__le32 hw_index;
u8 hw_type;
@@ -559,10 +636,9 @@ enum ionic_txq_desc_opcode {
/**
* struct ionic_txq_desc - Ethernet Tx queue descriptor format
- * @opcode: Tx operation, see TXQ_DESC_OPCODE_*:
+ * @cmd: Tx operation, see IONIC_TXQ_DESC_OPCODE_*:
*
* IONIC_TXQ_DESC_OPCODE_CSUM_NONE:
- *
* Non-offload send. No segmentation,
* fragmentation or checksum calc/insertion is
* performed by device; packet is prepared
@@ -570,7 +646,6 @@ enum ionic_txq_desc_opcode {
* no further manipulation from device.
*
* IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL:
- *
* Offload 16-bit L4 checksum
* calculation/insertion. The device will
* calculate the L4 checksum value and
@@ -579,14 +654,16 @@ enum ionic_txq_desc_opcode {
* is calculated starting at @csum_start bytes
* into the packet to the end of the packet.
* The checksum insertion position is given
- * in @csum_offset. This feature is only
- * applicable to protocols such as TCP, UDP
- * and ICMP where a standard (i.e. the
- * 'IP-style' checksum) one's complement
- * 16-bit checksum is used, using an IP
- * pseudo-header to seed the calculation.
- * Software will preload the L4 checksum
- * field with the IP pseudo-header checksum.
+ * in @csum_offset, which is the offset from
+ * @csum_start to the checksum field in the L4
+ * header. This feature is only applicable to
+ * protocols such as TCP, UDP and ICMP where a
+ * standard (i.e. the 'IP-style' checksum)
+ * one's complement 16-bit checksum is used,
+ * using an IP pseudo-header to seed the
+ * calculation. Software will preload the L4
+ * checksum field with the IP pseudo-header
+ * checksum.
*
* For tunnel encapsulation, @csum_start and
* @csum_offset refer to the inner L4
@@ -602,7 +679,6 @@ enum ionic_txq_desc_opcode {
* for more info).
*
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
- *
* Offload 16-bit checksum computation to hardware.
* If @csum_l3 is set then the packet's L3 checksum is
* updated. Similarly, if @csum_l4 is set the the L4
@@ -610,7 +686,6 @@ enum ionic_txq_desc_opcode {
* checksums are also updated.
*
* IONIC_TXQ_DESC_OPCODE_TSO:
- *
* Device preforms TCP segmentation offload
* (TSO). @hdr_len is the number of bytes
* to the end of TCP header (the offset to
@@ -637,40 +712,41 @@ enum ionic_txq_desc_opcode {
* clear CWR in remaining segments.
* @flags:
* vlan:
- * Insert an L2 VLAN header using @vlan_tci.
+ * Insert an L2 VLAN header using @vlan_tci
* encap:
- * Calculate encap header checksum.
+ * Calculate encap header checksum
* csum_l3:
- * Compute L3 header checksum.
+ * Compute L3 header checksum
* csum_l4:
- * Compute L4 header checksum.
+ * Compute L4 header checksum
* tso_sot:
* TSO start
* tso_eot:
* TSO end
* @num_sg_elems: Number of scatter-gather elements in SG
* descriptor
- * @addr: First data buffer's DMA address.
- * (Subsequent data buffers are on txq_sg_desc).
+ * @addr: First data buffer's DMA address
+ * (Subsequent data buffers are on txq_sg_desc)
* @len: First data buffer's length, in bytes
* @vlan_tci: VLAN tag to insert in the packet (if requested
* by @V-bit). Includes .1p and .1q tags
* @hdr_len: Length of packet headers, including
- * encapsulating outer header, if applicable.
- * Valid for opcodes TXQ_DESC_OPCODE_CALC_CSUM and
- * TXQ_DESC_OPCODE_TSO. Should be set to zero for
+ * encapsulating outer header, if applicable
+ * Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and
+ * IONIC_TXQ_DESC_OPCODE_TSO. Should be set to zero for
* all other modes. For
- * TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length
+ * IONIC_TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length
* of headers up to inner-most L4 header. For
- * TXQ_DESC_OPCODE_TSO, @hdr_len is up to
+ * IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to
* inner-most L4 payload, so inclusive of
* inner-most L4 header.
- * @mss: Desired MSS value for TSO. Only applicable for
- * TXQ_DESC_OPCODE_TSO.
- * @csum_start: Offset into inner-most L3 header of checksum
- * @csum_offset: Offset into inner-most L4 header of checksum
+ * @mss: Desired MSS value for TSO; only applicable for
+ * IONIC_TXQ_DESC_OPCODE_TSO
+ * @csum_start: Offset from packet to first byte checked in L4 checksum
+ * @csum_offset: Offset from csum_start to L4 checksum field
*/
-
+struct ionic_txq_desc {
+ __le64 cmd;
#define IONIC_TXQ_DESC_OPCODE_MASK 0xf
#define IONIC_TXQ_DESC_OPCODE_SHIFT 4
#define IONIC_TXQ_DESC_FLAGS_MASK 0xf
@@ -692,8 +768,6 @@ enum ionic_txq_desc_opcode {
#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4
#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8
-struct ionic_txq_desc {
- __le64 cmd;
__le16 len;
union {
__le16 vlan_tci;
@@ -733,28 +807,38 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
*addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK;
};
-#define IONIC_TX_MAX_SG_ELEMS 8
-#define IONIC_RX_MAX_SG_ELEMS 8
-
/**
- * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
+ * struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
+struct ionic_txq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+};
+
+/**
+ * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
+ * @elems: Scatter-gather elements
+ */
struct ionic_txq_sg_desc {
- struct ionic_txq_sg_elem {
- __le64 addr;
- __le16 len;
- __le16 rsvd[3];
- } elems[IONIC_TX_MAX_SG_ELEMS];
+#define IONIC_TX_MAX_SG_ELEMS 8
+#define IONIC_TX_SG_DESC_STRIDE 8
+ struct ionic_txq_sg_elem elems[IONIC_TX_MAX_SG_ELEMS];
+};
+
+struct ionic_txq_sg_desc_v1 {
+#define IONIC_TX_MAX_SG_ELEMS_V1 15
+#define IONIC_TX_SG_DESC_STRIDE_V1 16
+ struct ionic_txq_sg_elem elems[IONIC_TX_SG_DESC_STRIDE_V1];
};
/**
* struct ionic_txq_comp - Ethernet transmit queue completion descriptor
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @color: Color bit.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @color: Color bit
*/
struct ionic_txq_comp {
u8 status;
@@ -771,16 +855,15 @@ enum ionic_rxq_desc_opcode {
/**
* struct ionic_rxq_desc - Ethernet Rx queue descriptor format
- * @opcode: Rx operation, see RXQ_DESC_OPCODE_*:
- *
- * RXQ_DESC_OPCODE_SIMPLE:
+ * @opcode: Rx operation, see IONIC_RXQ_DESC_OPCODE_*:
*
+ * IONIC_RXQ_DESC_OPCODE_SIMPLE:
* Receive full packet into data buffer
* starting at @addr. Results of
* receive, including actual bytes received,
* are recorded in Rx completion descriptor.
*
- * @len: Data buffer's length, in bytes.
+ * @len: Data buffer's length, in bytes
* @addr: Data buffer's DMA address
*/
struct ionic_rxq_desc {
@@ -791,26 +874,33 @@ struct ionic_rxq_desc {
};
/**
- * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
+ * struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
+struct ionic_rxq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+};
+
+/**
+ * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
+ * @elems: Scatter-gather elements
+ */
struct ionic_rxq_sg_desc {
- struct ionic_rxq_sg_elem {
- __le64 addr;
- __le16 len;
- __le16 rsvd[3];
- } elems[IONIC_RX_MAX_SG_ELEMS];
+#define IONIC_RX_MAX_SG_ELEMS 8
+#define IONIC_RX_SG_DESC_STRIDE 8
+ struct ionic_rxq_sg_elem elems[IONIC_RX_SG_DESC_STRIDE];
};
/**
* struct ionic_rxq_comp - Ethernet receive queue completion descriptor
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @num_sg_elems: Number of SG elements used by this descriptor
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @rss_hash: 32-bit RSS hash
- * @csum: 16-bit sum of the packet's L2 payload.
+ * @csum: 16-bit sum of the packet's L2 payload
* If the packet's L2 payload is odd length, an extra
* zero-value byte is included in the @csum calculation but
* not included in @len.
@@ -818,33 +908,51 @@ struct ionic_rxq_sg_desc {
* set. Includes .1p and .1q tags.
* @len: Received packet length, in bytes. Excludes FCS.
* @csum_calc L2 payload checksum is computed or not
- * @csum_tcp_ok: The TCP checksum calculated by the device
- * matched the checksum in the receive packet's
- * TCP header
- * @csum_tcp_bad: The TCP checksum calculated by the device did
- * not match the checksum in the receive packet's
- * TCP header.
- * @csum_udp_ok: The UDP checksum calculated by the device
- * matched the checksum in the receive packet's
- * UDP header
- * @csum_udp_bad: The UDP checksum calculated by the device did
- * not match the checksum in the receive packet's
- * UDP header.
- * @csum_ip_ok: The IPv4 checksum calculated by the device
- * matched the checksum in the receive packet's
- * first IPv4 header. If the receive packet
- * contains both a tunnel IPv4 header and a
- * transport IPv4 header, the device validates the
- * checksum for the both IPv4 headers.
- * @csum_ip_bad: The IPv4 checksum calculated by the device did
- * not match the checksum in the receive packet's
- * first IPv4 header. If the receive packet
- * contains both a tunnel IPv4 header and a
- * transport IPv4 header, the device validates the
- * checksum for both IP headers.
- * @VLAN: VLAN header was stripped and placed in @vlan_tci.
- * @pkt_type: Packet type
- * @color: Color bit.
+ * @csum_flags: See IONIC_RXQ_COMP_CSUM_F_*:
+ *
+ * IONIC_RXQ_COMP_CSUM_F_TCP_OK:
+ * The TCP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * TCP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_TCP_BAD:
+ * The TCP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * TCP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_UDP_OK:
+ * The UDP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * UDP header
+ *
+ * IONIC_RXQ_COMP_CSUM_F_UDP_BAD:
+ * The UDP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * UDP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_IP_OK:
+ * The IPv4 checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for the both IPv4 headers.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_IP_BAD:
+ * The IPv4 checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for both IP headers.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_VLAN:
+ * The VLAN header was stripped and placed in @vlan_tci.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_CALC:
+ * The checksum was calculated by the device.
+ *
+ * @pkt_type_color: Packet type and color bit; see IONIC_RXQ_COMP_PKT_TYPE_MASK
*/
struct ionic_rxq_comp {
u8 status;
@@ -891,8 +999,8 @@ enum ionic_eth_hw_features {
IONIC_ETH_HW_TSO_ECN = BIT(10),
IONIC_ETH_HW_TSO_GRE = BIT(11),
IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12),
- IONIC_ETH_HW_TSO_IPXIP4 = BIT(13),
- IONIC_ETH_HW_TSO_IPXIP6 = BIT(14),
+ IONIC_ETH_HW_TSO_IPXIP4 = BIT(13),
+ IONIC_ETH_HW_TSO_IPXIP6 = BIT(14),
IONIC_ETH_HW_TSO_UDP = BIT(15),
IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16),
};
@@ -923,7 +1031,10 @@ enum q_control_oper {
};
/**
- * Physical connection type
+ * enum ionic_phy_type - Physical connection type
+ * @IONIC_PHY_TYPE_NONE: No PHY installed
+ * @IONIC_PHY_TYPE_COPPER: Copper PHY
+ * @IONIC_PHY_TYPE_FIBER: Fiber PHY
*/
enum ionic_phy_type {
IONIC_PHY_TYPE_NONE = 0,
@@ -932,18 +1043,23 @@ enum ionic_phy_type {
};
/**
- * Transceiver status
+ * enum ionic_xcvr_state - Transceiver status
+ * @IONIC_XCVR_STATE_REMOVED: Transceiver removed
+ * @IONIC_XCVR_STATE_INSERTED: Transceiver inserted
+ * @IONIC_XCVR_STATE_PENDING: Transceiver pending
+ * @IONIC_XCVR_STATE_SPROM_READ: Transceiver data read
+ * @IONIC_XCVR_STATE_SPROM_READ_ERR: Transceiver data read error
*/
enum ionic_xcvr_state {
IONIC_XCVR_STATE_REMOVED = 0,
IONIC_XCVR_STATE_INSERTED = 1,
IONIC_XCVR_STATE_PENDING = 2,
IONIC_XCVR_STATE_SPROM_READ = 3,
- IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
+ IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
};
/**
- * Supported link modes
+ * enum ionic_xcvr_pid - Supported link modes
*/
enum ionic_xcvr_pid {
IONIC_XCVR_PID_UNKNOWN = 0,
@@ -977,64 +1093,83 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_10GBASE_CU = 68,
IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69,
IONIC_XCVR_PID_QSFP_100G_PSM4 = 70,
+ IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
};
/**
- * Port types
+ * enum ionic_port_type - Port types
+ * @IONIC_PORT_TYPE_NONE: Port type not configured
+ * @IONIC_PORT_TYPE_ETH: Port carries ethernet traffic (inband)
+ * @IONIC_PORT_TYPE_MGMT: Port carries mgmt traffic (out-of-band)
*/
enum ionic_port_type {
- IONIC_PORT_TYPE_NONE = 0, /* port type not configured */
- IONIC_PORT_TYPE_ETH = 1, /* port carries ethernet traffic (inband) */
- IONIC_PORT_TYPE_MGMT = 2, /* port carries mgmt traffic (out-of-band) */
+ IONIC_PORT_TYPE_NONE = 0,
+ IONIC_PORT_TYPE_ETH = 1,
+ IONIC_PORT_TYPE_MGMT = 2,
};
/**
- * Port config state
+ * enum ionic_port_admin_state - Port config state
+ * @IONIC_PORT_ADMIN_STATE_NONE: Port admin state not configured
+ * @IONIC_PORT_ADMIN_STATE_DOWN: Port admin disabled
+ * @IONIC_PORT_ADMIN_STATE_UP: Port admin enabled
*/
enum ionic_port_admin_state {
- IONIC_PORT_ADMIN_STATE_NONE = 0, /* port admin state not configured */
- IONIC_PORT_ADMIN_STATE_DOWN = 1, /* port is admin disabled */
- IONIC_PORT_ADMIN_STATE_UP = 2, /* port is admin enabled */
+ IONIC_PORT_ADMIN_STATE_NONE = 0,
+ IONIC_PORT_ADMIN_STATE_DOWN = 1,
+ IONIC_PORT_ADMIN_STATE_UP = 2,
};
/**
- * Port operational status
+ * enum ionic_port_oper_status - Port operational status
+ * @IONIC_PORT_OPER_STATUS_NONE: Port disabled
+ * @IONIC_PORT_OPER_STATUS_UP: Port link status up
+ * @IONIC_PORT_OPER_STATUS_DOWN: Port link status down
*/
enum ionic_port_oper_status {
- IONIC_PORT_OPER_STATUS_NONE = 0, /* port is disabled */
- IONIC_PORT_OPER_STATUS_UP = 1, /* port is linked up */
- IONIC_PORT_OPER_STATUS_DOWN = 2, /* port link status is down */
+ IONIC_PORT_OPER_STATUS_NONE = 0,
+ IONIC_PORT_OPER_STATUS_UP = 1,
+ IONIC_PORT_OPER_STATUS_DOWN = 2,
};
/**
- * Ethernet Forward error correction (fec) modes
+ * enum ionic_port_fec_type - Ethernet Forward error correction (FEC) modes
+ * @IONIC_PORT_FEC_TYPE_NONE: FEC Disabled
+ * @IONIC_PORT_FEC_TYPE_FC: FireCode FEC
+ * @IONIC_PORT_FEC_TYPE_RS: ReedSolomon FEC
*/
enum ionic_port_fec_type {
- IONIC_PORT_FEC_TYPE_NONE = 0, /* Disabled */
- IONIC_PORT_FEC_TYPE_FC = 1, /* FireCode */
- IONIC_PORT_FEC_TYPE_RS = 2, /* ReedSolomon */
+ IONIC_PORT_FEC_TYPE_NONE = 0,
+ IONIC_PORT_FEC_TYPE_FC = 1,
+ IONIC_PORT_FEC_TYPE_RS = 2,
};
/**
- * Ethernet pause (flow control) modes
+ * enum ionic_port_pause_type - Ethernet pause (flow control) modes
+ * @IONIC_PORT_PAUSE_TYPE_NONE: Disable Pause
+ * @IONIC_PORT_PAUSE_TYPE_LINK: Link level pause
+ * @IONIC_PORT_PAUSE_TYPE_PFC: Priority-Flow Control
*/
enum ionic_port_pause_type {
- IONIC_PORT_PAUSE_TYPE_NONE = 0, /* Disable Pause */
- IONIC_PORT_PAUSE_TYPE_LINK = 1, /* Link level pause */
- IONIC_PORT_PAUSE_TYPE_PFC = 2, /* Priority-Flow control */
+ IONIC_PORT_PAUSE_TYPE_NONE = 0,
+ IONIC_PORT_PAUSE_TYPE_LINK = 1,
+ IONIC_PORT_PAUSE_TYPE_PFC = 2,
};
/**
- * Loopback modes
+ * enum ionic_port_loopback_mode - Loopback modes
+ * @IONIC_PORT_LOOPBACK_MODE_NONE: Disable loopback
+ * @IONIC_PORT_LOOPBACK_MODE_MAC: MAC loopback
+ * @IONIC_PORT_LOOPBACK_MODE_PHY: PHY/SerDes loopback
*/
enum ionic_port_loopback_mode {
- IONIC_PORT_LOOPBACK_MODE_NONE = 0, /* Disable loopback */
- IONIC_PORT_LOOPBACK_MODE_MAC = 1, /* MAC loopback */
- IONIC_PORT_LOOPBACK_MODE_PHY = 2, /* PHY/Serdes loopback */
+ IONIC_PORT_LOOPBACK_MODE_NONE = 0,
+ IONIC_PORT_LOOPBACK_MODE_MAC = 1,
+ IONIC_PORT_LOOPBACK_MODE_PHY = 2,
};
/**
- * Transceiver Status information
+ * struct ionic_xcvr_status - Transceiver Status information
* @state: Transceiver status (enum ionic_xcvr_state)
* @phy: Physical connection type (enum ionic_phy_type)
* @pid: Transceiver link mode (enum pid)
@@ -1048,7 +1183,7 @@ struct ionic_xcvr_status {
};
/**
- * Port configuration
+ * union ionic_port_config - Port configuration
* @speed: port speed (in Mbps)
* @mtu: mtu
* @state: port admin state (enum port_admin_state)
@@ -1081,17 +1216,21 @@ union ionic_port_config {
};
/**
- * Port Status information
+ * struct ionic_port_status - Port Status information
* @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
+ * @link_down_count: number of times link went from from up to down
+ * @fec_type: fec type (enum ionic_port_fec_type)
* @xcvr: tranceiver status
*/
struct ionic_port_status {
__le32 id;
__le32 speed;
u8 status;
- u8 rsvd[51];
+ __le16 link_down_count;
+ u8 fec_type;
+ u8 rsvd[48];
struct ionic_xcvr_status xcvr;
} __packed;
@@ -1110,7 +1249,7 @@ struct ionic_port_identify_cmd {
/**
* struct ionic_port_identify_comp - Port identify command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
*/
struct ionic_port_identify_comp {
@@ -1135,7 +1274,7 @@ struct ionic_port_init_cmd {
/**
* struct ionic_port_init_comp - Port initialization command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_port_init_comp {
u8 status;
@@ -1155,7 +1294,7 @@ struct ionic_port_reset_cmd {
/**
* struct ionic_port_reset_comp - Port reset command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_port_reset_comp {
u8 status;
@@ -1163,15 +1302,23 @@ struct ionic_port_reset_comp {
};
/**
- * enum stats_ctl_cmd - List of commands for stats control
+ * enum ionic_stats_ctl_cmd - List of commands for stats control
+ * @IONIC_STATS_CTL_RESET: Reset statistics
*/
enum ionic_stats_ctl_cmd {
IONIC_STATS_CTL_RESET = 0,
};
-
/**
* enum ionic_port_attr - List of device attributes
+ * @IONIC_PORT_ATTR_STATE: Port state attribute
+ * @IONIC_PORT_ATTR_SPEED: Port speed attribute
+ * @IONIC_PORT_ATTR_MTU: Port MTU attribute
+ * @IONIC_PORT_ATTR_AUTONEG: Port autonegotation attribute
+ * @IONIC_PORT_ATTR_FEC: Port FEC attribute
+ * @IONIC_PORT_ATTR_PAUSE: Port pause attribute
+ * @IONIC_PORT_ATTR_LOOPBACK: Port loopback attribute
+ * @IONIC_PORT_ATTR_STATS_CTRL: Port statistics control attribute
*/
enum ionic_port_attr {
IONIC_PORT_ATTR_STATE = 0,
@@ -1186,9 +1333,17 @@ enum ionic_port_attr {
/**
* struct ionic_port_setattr_cmd - Set port attributes on the NIC
- * @opcode: Opcode
- * @index: port index
- * @attr: Attribute type (enum ionic_port_attr)
+ * @opcode: Opcode
+ * @index: Port index
+ * @attr: Attribute type (enum ionic_port_attr)
+ * @state: Port state
+ * @speed: Port speed
+ * @mtu: Port MTU
+ * @an_enable: Port autonegotiation setting
+ * @fec_type: Port FEC type setting
+ * @pause_type: Port pause type setting
+ * @loopback_mode: Port loopback mode
+ * @stats_ctl: Port stats setting
*/
struct ionic_port_setattr_cmd {
u8 opcode;
@@ -1203,14 +1358,14 @@ struct ionic_port_setattr_cmd {
u8 fec_type;
u8 pause_type;
u8 loopback_mode;
- u8 stats_ctl;
+ u8 stats_ctl;
u8 rsvd2[60];
};
};
/**
* struct ionic_port_setattr_comp - Port set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @color: Color bit
*/
struct ionic_port_setattr_comp {
@@ -1234,8 +1389,15 @@ struct ionic_port_getattr_cmd {
/**
* struct ionic_port_getattr_comp - Port get attr command completion
- * @status: The status of the command (enum status_code)
- * @color: Color bit
+ * @status: Status of the command (enum ionic_status_code)
+ * @state: Port state
+ * @speed: Port speed
+ * @mtu: Port MTU
+ * @an_enable: Port autonegotiation setting
+ * @fec_type: Port FEC type setting
+ * @pause_type: Port pause type setting
+ * @loopback_mode: Port loopback mode
+ * @color: Color bit
*/
struct ionic_port_getattr_comp {
u8 status;
@@ -1254,12 +1416,12 @@ struct ionic_port_getattr_comp {
};
/**
- * struct ionic_lif_status - Lif status register
+ * struct ionic_lif_status - LIF status register
* @eid: most recent NotifyQ event id
- * @port_num: port the lif is connected to
+ * @port_num: port the LIF is connected to
* @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
- * @link_down_count: number of times link status changes
+ * @link_down_count: number of times link went from up to down
*/
struct ionic_lif_status {
__le64 eid;
@@ -1293,6 +1455,9 @@ enum ionic_dev_state {
/**
* enum ionic_dev_attr - List of device attributes
+ * @IONIC_DEV_ATTR_STATE: Device state attribute
+ * @IONIC_DEV_ATTR_NAME: Device name attribute
+ * @IONIC_DEV_ATTR_FEATURES: Device feature attributes
*/
enum ionic_dev_attr {
IONIC_DEV_ATTR_STATE = 0,
@@ -1322,7 +1487,7 @@ struct ionic_dev_setattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @features: Device features
* @color: Color bit
*/
@@ -1349,7 +1514,7 @@ struct ionic_dev_getattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @features: Device features
* @color: Color bit
*/
@@ -1379,6 +1544,13 @@ enum ionic_rss_hash_types {
/**
* enum ionic_lif_attr - List of LIF attributes
+ * @IONIC_LIF_ATTR_STATE: LIF state attribute
+ * @IONIC_LIF_ATTR_NAME: LIF name attribute
+ * @IONIC_LIF_ATTR_MTU: LIF MTU attribute
+ * @IONIC_LIF_ATTR_MAC: LIF MAC attribute
+ * @IONIC_LIF_ATTR_FEATURES: LIF features attribute
+ * @IONIC_LIF_ATTR_RSS: LIF RSS attribute
+ * @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1393,18 +1565,18 @@ enum ionic_lif_attr {
/**
* struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC
* @opcode: Opcode
- * @type: Attribute type (enum ionic_lif_attr)
+ * @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
- * @state: lif state (enum lif_state)
+ * @state: LIF state (enum ionic_lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
- * @types: The hash types to enable (see rss_hash_types).
- * @key: The hash secret key.
- * @addr: Address for the indirection table shared memory.
- * @stats_ctl: stats control commands (enum stats_ctl_cmd)
+ * @types: The hash types to enable (see rss_hash_types)
+ * @key: The hash secret key
+ * @addr: Address for the indirection table shared memory
+ * @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd)
*/
struct ionic_lif_setattr_cmd {
u8 opcode;
@@ -1422,16 +1594,15 @@ struct ionic_lif_setattr_cmd {
u8 rsvd[6];
__le64 addr;
} rss;
- u8 stats_ctl;
+ u8 stats_ctl;
u8 rsvd[60];
} __packed;
};
/**
* struct ionic_lif_setattr_comp - LIF set attr command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @features: features (enum ionic_eth_hw_features)
* @color: Color bit
*/
@@ -1461,10 +1632,9 @@ struct ionic_lif_getattr_cmd {
/**
* struct ionic_lif_getattr_comp - LIF get attr command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @state: lif state (enum lif_state)
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @state: LIF state (enum ionic_lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
@@ -1486,11 +1656,12 @@ struct ionic_lif_getattr_comp {
};
enum ionic_rx_mode {
- IONIC_RX_MODE_F_UNICAST = BIT(0),
- IONIC_RX_MODE_F_MULTICAST = BIT(1),
- IONIC_RX_MODE_F_BROADCAST = BIT(2),
- IONIC_RX_MODE_F_PROMISC = BIT(3),
- IONIC_RX_MODE_F_ALLMULTI = BIT(4),
+ IONIC_RX_MODE_F_UNICAST = BIT(0),
+ IONIC_RX_MODE_F_MULTICAST = BIT(1),
+ IONIC_RX_MODE_F_BROADCAST = BIT(2),
+ IONIC_RX_MODE_F_PROMISC = BIT(3),
+ IONIC_RX_MODE_F_ALLMULTI = BIT(4),
+ IONIC_RX_MODE_F_RDMA_SNIFFER = BIT(5),
};
/**
@@ -1498,11 +1669,12 @@ enum ionic_rx_mode {
* @opcode: opcode
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
- * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets.
- * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets.
- * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets.
- * IONIC_RX_MODE_F_PROMISC: Accept any packets.
- * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets.
+ * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets
+ * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets
+ * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets
+ * IONIC_RX_MODE_F_PROMISC: Accept any packets
+ * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets
+ * IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets
*/
struct ionic_rx_mode_set_cmd {
u8 opcode;
@@ -1526,9 +1698,14 @@ enum ionic_rx_filter_match_type {
* @qtype: Queue type
* @lif_index: LIF index
* @qid: Queue ID
- * @match: Rx filter match type. (See IONIC_RX_FILTER_MATCH_xxx)
- * @vlan: VLAN ID
- * @addr: MAC address (network-byte order)
+ * @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx)
+ * @vlan: VLAN filter
+ * @vlan: VLAN ID
+ * @mac: MAC filter
+ * @addr: MAC address (network-byte order)
+ * @mac_vlan: MACVLAN filter
+ * @vlan: VLAN ID
+ * @addr: MAC address (network-byte order)
*/
struct ionic_rx_filter_add_cmd {
u8 opcode;
@@ -1553,11 +1730,10 @@ struct ionic_rx_filter_add_cmd {
/**
* struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @filter_id: Filter ID
- * @color: Color bit.
+ * @color: Color bit
*/
struct ionic_rx_filter_add_comp {
u8 status;
@@ -1584,63 +1760,6 @@ struct ionic_rx_filter_del_cmd {
typedef struct ionic_admin_comp ionic_rx_filter_del_comp;
-/**
- * struct ionic_qos_identify_cmd - QoS identify command
- * @opcode: opcode
- * @ver: Highest version of identify supported by driver
- *
- */
-struct ionic_qos_identify_cmd {
- u8 opcode;
- u8 ver;
- u8 rsvd[62];
-};
-
-/**
- * struct ionic_qos_identify_comp - QoS identify command completion
- * @status: The status of the command (enum status_code)
- * @ver: Version of identify returned by device
- */
-struct ionic_qos_identify_comp {
- u8 status;
- u8 ver;
- u8 rsvd[14];
-};
-
-#define IONIC_QOS_CLASS_MAX 7
-#define IONIC_QOS_CLASS_NAME_SZ 32
-#define IONIC_QOS_DSCP_MAX_VALUES 64
-
-/**
- * enum ionic_qos_class
- */
-enum ionic_qos_class {
- IONIC_QOS_CLASS_DEFAULT = 0,
- IONIC_QOS_CLASS_USER_DEFINED_1 = 1,
- IONIC_QOS_CLASS_USER_DEFINED_2 = 2,
- IONIC_QOS_CLASS_USER_DEFINED_3 = 3,
- IONIC_QOS_CLASS_USER_DEFINED_4 = 4,
- IONIC_QOS_CLASS_USER_DEFINED_5 = 5,
- IONIC_QOS_CLASS_USER_DEFINED_6 = 6,
-};
-
-/**
- * enum ionic_qos_class_type - Traffic classification criteria
- */
-enum ionic_qos_class_type {
- IONIC_QOS_CLASS_TYPE_NONE = 0,
- IONIC_QOS_CLASS_TYPE_PCP = 1, /* Dot1Q pcp */
- IONIC_QOS_CLASS_TYPE_DSCP = 2, /* IP dscp */
-};
-
-/**
- * enum ionic_qos_sched_type - Qos class scheduling type
- */
-enum ionic_qos_sched_type {
- IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */
- IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */
-};
-
enum ionic_vf_attr {
IONIC_VF_ATTR_SPOOFCHK = 1,
IONIC_VF_ATTR_TRUST = 2,
@@ -1652,26 +1771,29 @@ enum ionic_vf_attr {
};
/**
- * VF link status
+ * enum ionic_vf_link_status - Virtual Function link status
+ * @IONIC_VF_LINK_STATUS_AUTO: Use link state of the uplink
+ * @IONIC_VF_LINK_STATUS_UP: Link always up
+ * @IONIC_VF_LINK_STATUS_DOWN: Link always down
*/
enum ionic_vf_link_status {
- IONIC_VF_LINK_STATUS_AUTO = 0, /* link state of the uplink */
- IONIC_VF_LINK_STATUS_UP = 1, /* link is always up */
- IONIC_VF_LINK_STATUS_DOWN = 2, /* link is always down */
+ IONIC_VF_LINK_STATUS_AUTO = 0,
+ IONIC_VF_LINK_STATUS_UP = 1,
+ IONIC_VF_LINK_STATUS_DOWN = 2,
};
/**
* struct ionic_vf_setattr_cmd - Set VF attributes on the NIC
* @opcode: Opcode
- * @index: VF index
* @attr: Attribute type (enum ionic_vf_attr)
- * macaddr mac address
- * vlanid vlan ID
- * maxrate max Tx rate in Mbps
- * spoofchk enable address spoof checking
- * trust enable VF trust
- * linkstate set link up or down
- * stats_pa set DMA address for VF stats
+ * @vf_index: VF index
+ * @macaddr: mac address
+ * @vlanid: vlan ID
+ * @maxrate: max Tx rate in Mbps
+ * @spoofchk: enable address spoof checking
+ * @trust: enable VF trust
+ * @linkstate: set link up or down
+ * @stats_pa: set DMA address for VF stats
*/
struct ionic_vf_setattr_cmd {
u8 opcode;
@@ -1701,8 +1823,8 @@ struct ionic_vf_setattr_comp {
/**
* struct ionic_vf_getattr_cmd - Get VF attributes from the NIC
* @opcode: Opcode
- * @index: VF index
* @attr: Attribute type (enum ionic_vf_attr)
+ * @vf_index: VF index
*/
struct ionic_vf_getattr_cmd {
u8 opcode;
@@ -1729,19 +1851,85 @@ struct ionic_vf_getattr_comp {
};
/**
- * union ionic_qos_config - Qos configuration structure
+ * struct ionic_qos_identify_cmd - QoS identify command
+ * @opcode: opcode
+ * @ver: Highest version of identify supported by driver
+ *
+ */
+struct ionic_qos_identify_cmd {
+ u8 opcode;
+ u8 ver;
+ u8 rsvd[62];
+};
+
+/**
+ * struct ionic_qos_identify_comp - QoS identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @ver: Version of identify returned by device
+ */
+struct ionic_qos_identify_comp {
+ u8 status;
+ u8 ver;
+ u8 rsvd[14];
+};
+
+#define IONIC_QOS_TC_MAX 8
+/* Capri max supported, should be renamed. */
+#define IONIC_QOS_CLASS_MAX 7
+#define IONIC_QOS_PCP_MAX 8
+#define IONIC_QOS_CLASS_NAME_SZ 32
+#define IONIC_QOS_DSCP_MAX 64
+#define IONIC_QOS_ALL_PCP 0xFF
+
+/**
+ * enum ionic_qos_class
+ */
+enum ionic_qos_class {
+ IONIC_QOS_CLASS_DEFAULT = 0,
+ IONIC_QOS_CLASS_USER_DEFINED_1 = 1,
+ IONIC_QOS_CLASS_USER_DEFINED_2 = 2,
+ IONIC_QOS_CLASS_USER_DEFINED_3 = 3,
+ IONIC_QOS_CLASS_USER_DEFINED_4 = 4,
+ IONIC_QOS_CLASS_USER_DEFINED_5 = 5,
+ IONIC_QOS_CLASS_USER_DEFINED_6 = 6,
+};
+
+/**
+ * enum ionic_qos_class_type - Traffic classification criteria
+ * @IONIC_QOS_CLASS_TYPE_NONE: No QoS
+ * @IONIC_QOS_CLASS_TYPE_PCP: Dot1Q PCP
+ * @IONIC_QOS_CLASS_TYPE_DSCP: IP DSCP
+ */
+enum ionic_qos_class_type {
+ IONIC_QOS_CLASS_TYPE_NONE = 0,
+ IONIC_QOS_CLASS_TYPE_PCP = 1,
+ IONIC_QOS_CLASS_TYPE_DSCP = 2,
+};
+
+/**
+ * enum ionic_qos_sched_type - QoS class scheduling type
+ * @IONIC_QOS_SCHED_TYPE_STRICT: Strict priority
+ * @IONIC_QOS_SCHED_TYPE_DWRR: Deficit weighted round-robin
+ */
+enum ionic_qos_sched_type {
+ IONIC_QOS_SCHED_TYPE_STRICT = 0,
+ IONIC_QOS_SCHED_TYPE_DWRR = 1,
+};
+
+/**
+ * union ionic_qos_config - QoS configuration structure
* @flags: Configuration flags
* IONIC_QOS_CONFIG_F_ENABLE enable
- * IONIC_QOS_CONFIG_F_DROP drop/nodrop
+ * IONIC_QOS_CONFIG_F_NO_DROP drop/nodrop
* IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite
* IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite
- * @sched_type: Qos class scheduling type (enum ionic_qos_sched_type)
- * @class_type: Qos class type (enum ionic_qos_class_type)
- * @pause_type: Qos pause type (enum ionic_qos_pause_type)
- * @name: Qos class name
+ * @sched_type: QoS class scheduling type (enum ionic_qos_sched_type)
+ * @class_type: QoS class type (enum ionic_qos_class_type)
+ * @pause_type: QoS pause type (enum ionic_qos_pause_type)
+ * @name: QoS class name
* @mtu: MTU of the class
- * @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP)
- * @dwrr_weight: Qos class scheduling weight
+ * @pfc_cos: Priority-Flow Control class of service
+ * @dwrr_weight: QoS class scheduling weight
* @strict_rlmt: Rate limit for strict priority scheduling
* @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP)
* @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP)
@@ -1752,7 +1940,8 @@ struct ionic_vf_getattr_comp {
union ionic_qos_config {
struct {
#define IONIC_QOS_CONFIG_F_ENABLE BIT(0)
-#define IONIC_QOS_CONFIG_F_DROP BIT(1)
+#define IONIC_QOS_CONFIG_F_NO_DROP BIT(1)
+/* Used to rewrite PCP or DSCP value. */
#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2)
#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3)
u8 flags;
@@ -1769,6 +1958,7 @@ union ionic_qos_config {
__le64 strict_rlmt;
};
/* marking */
+ /* Used to rewrite PCP or DSCP value. */
union {
u8 rw_dot1q_pcp;
u8 rw_ip_dscp;
@@ -1778,7 +1968,7 @@ union ionic_qos_config {
u8 dot1q_pcp;
struct {
u8 ndscp;
- u8 ip_dscp[IONIC_QOS_DSCP_MAX_VALUES];
+ u8 ip_dscp[IONIC_QOS_DSCP_MAX];
};
};
};
@@ -1797,15 +1987,15 @@ union ionic_qos_identity {
u8 version;
u8 type;
u8 rsvd[62];
- union ionic_qos_config config[IONIC_QOS_CLASS_MAX];
+ union ionic_qos_config config[IONIC_QOS_CLASS_MAX];
};
- __le32 words[512];
+ __le32 words[478];
};
/**
- * struct qos_init_cmd - QoS config init command
+ * struct ionic_qos_init_cmd - QoS config init command
* @opcode: Opcode
- * @group: Qos class id
+ * @group: QoS class id
* @info_pa: destination address for qos info
*/
struct ionic_qos_init_cmd {
@@ -1819,8 +2009,9 @@ struct ionic_qos_init_cmd {
typedef struct ionic_admin_comp ionic_qos_init_comp;
/**
- * struct ionic_qos_reset_cmd - Qos config reset command
+ * struct ionic_qos_reset_cmd - QoS config reset command
* @opcode: Opcode
+ * @group: QoS class id
*/
struct ionic_qos_reset_cmd {
u8 opcode;
@@ -1847,10 +2038,16 @@ struct ionic_fw_download_cmd {
typedef struct ionic_admin_comp ionic_fw_download_comp;
+/**
+ * enum ionic_fw_control_oper - FW control operations
+ * @IONIC_FW_RESET: Reset firmware
+ * @IONIC_FW_INSTALL: Install firmware
+ * @IONIC_FW_ACTIVATE: Activate firmware
+ */
enum ionic_fw_control_oper {
- IONIC_FW_RESET = 0, /* Reset firmware */
- IONIC_FW_INSTALL = 1, /* Install firmware */
- IONIC_FW_ACTIVATE = 2, /* Activate firmware */
+ IONIC_FW_RESET = 0,
+ IONIC_FW_INSTALL = 1,
+ IONIC_FW_ACTIVATE = 2,
};
/**
@@ -1869,8 +2066,10 @@ struct ionic_fw_control_cmd {
/**
* struct ionic_fw_control_comp - Firmware control copletion
- * @opcode: opcode
- * @slot: slot where the firmware was installed
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @slot: Slot where the firmware was installed
+ * @color: Color bit
*/
struct ionic_fw_control_comp {
u8 status;
@@ -1888,11 +2087,11 @@ struct ionic_fw_control_comp {
/**
* struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
- * @lif_index: lif index
+ * @lif_index: LIF index
*
- * There is no rdma specific dev command completion struct. Completion uses
+ * There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
- * Nonzero status means the LIF does not support rdma.
+ * Nonzero status means the LIF does not support RDMA.
**/
struct ionic_rdma_reset_cmd {
u8 opcode;
@@ -1904,30 +2103,29 @@ struct ionic_rdma_reset_cmd {
/**
* struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
- * @lif_index lif index
- * @qid_ver: (qid | (rdma version << 24))
+ * @lif_index: LIF index
+ * @qid_ver: (qid | (RDMA version << 24))
* @cid: intr, eq_id, or cq_id
* @dbid: doorbell page id
* @depth_log2: log base two of queue depth
* @stride_log2: log base two of queue stride
* @dma_addr: address of the queue memory
- * @xxx_table_index: temporary, but should not need pgtbl for contig. queues.
*
- * The same command struct is used to create an rdma event queue, completion
- * queue, or rdma admin queue. The cid is an interrupt number for an event
+ * The same command struct is used to create an RDMA event queue, completion
+ * queue, or RDMA admin queue. The cid is an interrupt number for an event
* queue, an event queue id for a completion queue, or a completion queue id
- * for an rdma admin queue.
+ * for an RDMA admin queue.
*
* The queue created via a dev command must be contiguous in dma space.
*
* The dev commands are intended only to be used during driver initialization,
- * to create queues supporting the rdma admin queue. Other queues, and other
- * types of rdma resources like memory regions, will be created and registered
- * via the rdma admin queue, and will support a more complete interface
+ * to create queues supporting the RDMA admin queue. Other queues, and other
+ * types of RDMA resources like memory regions, will be created and registered
+ * via the RDMA admin queue, and will support a more complete interface
* providing scatter gather lists for larger, scattered queue buffers and
* memory registration.
*
- * There is no rdma specific dev command completion struct. Completion uses
+ * There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
**/
struct ionic_rdma_queue_cmd {
@@ -1940,8 +2138,7 @@ struct ionic_rdma_queue_cmd {
u8 depth_log2;
u8 stride_log2;
__le64 dma_addr;
- u8 rsvd2[36];
- __le32 xxx_table_index;
+ u8 rsvd2[40];
};
/******************************************************************
@@ -1949,7 +2146,7 @@ struct ionic_rdma_queue_cmd {
******************************************************************/
/**
- * struct ionic_notifyq_event
+ * struct ionic_notifyq_event - Generic event reporting structure
* @eid: event number
* @ecode: event code
* @data: unspecified data about the event
@@ -1964,9 +2161,9 @@ struct ionic_notifyq_event {
};
/**
- * struct ionic_link_change_event
+ * struct ionic_link_change_event - Link change event notification
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_LINK_CHANGE
+ * @ecode: event code = IONIC_EVENT_LINK_CHANGE
* @link_status: link up or down, with error bits (enum port_status)
* @link_speed: speed of the network link
*
@@ -1981,9 +2178,9 @@ struct ionic_link_change_event {
};
/**
- * struct ionic_reset_event
+ * struct ionic_reset_event - Reset event notification
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_RESET
+ * @ecode: event code = IONIC_EVENT_RESET
* @reset_code: reset type
* @state: 0=pending, 1=complete, 2=error
*
@@ -1999,11 +2196,9 @@ struct ionic_reset_event {
};
/**
- * struct ionic_heartbeat_event
+ * struct ionic_heartbeat_event - Sent periodically by NIC to indicate health
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_HEARTBEAT
- *
- * Sent periodically by the NIC to indicate continued health
+ * @ecode: event code = IONIC_EVENT_HEARTBEAT
*/
struct ionic_heartbeat_event {
__le64 eid;
@@ -2012,12 +2207,10 @@ struct ionic_heartbeat_event {
};
/**
- * struct ionic_log_event
+ * struct ionic_log_event - Sent to notify the driver of an internal error
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_LOG
+ * @ecode: event code = IONIC_EVENT_LOG
* @data: log data
- *
- * Sent to notify the driver of an internal error.
*/
struct ionic_log_event {
__le64 eid;
@@ -2026,7 +2219,18 @@ struct ionic_log_event {
};
/**
- * struct ionic_port_stats
+ * struct ionic_xcvr_event - Transceiver change event
+ * @eid: event number
+ * @ecode: event code = IONIC_EVENT_XCVR
+ */
+struct ionic_xcvr_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 rsvd[54];
+};
+
+/**
+ * struct ionic_port_stats - Port statistics structure
*/
struct ionic_port_stats {
__le64 frames_rx_ok;
@@ -2131,28 +2335,61 @@ struct ionic_mgmt_port_stats {
__le64 frames_rx_multicast;
__le64 frames_rx_broadcast;
__le64 frames_rx_pause;
- __le64 frames_rx_bad_length0;
- __le64 frames_rx_undersized1;
- __le64 frames_rx_oversized2;
- __le64 frames_rx_fragments3;
- __le64 frames_rx_jabber4;
- __le64 frames_rx_64b5;
- __le64 frames_rx_65b_127b6;
- __le64 frames_rx_128b_255b7;
- __le64 frames_rx_256b_511b8;
- __le64 frames_rx_512b_1023b9;
- __le64 frames_rx_1024b_1518b0;
- __le64 frames_rx_gt_1518b1;
- __le64 frames_rx_fifo_full2;
- __le64 frames_tx_ok3;
- __le64 frames_tx_all4;
- __le64 frames_tx_bad5;
- __le64 octets_tx_ok6;
- __le64 octets_tx_total7;
- __le64 frames_tx_unicast8;
- __le64 frames_tx_multicast9;
- __le64 frames_tx_broadcast0;
- __le64 frames_tx_pause1;
+ __le64 frames_rx_bad_length;
+ __le64 frames_rx_undersized;
+ __le64 frames_rx_oversized;
+ __le64 frames_rx_fragments;
+ __le64 frames_rx_jabber;
+ __le64 frames_rx_64b;
+ __le64 frames_rx_65b_127b;
+ __le64 frames_rx_128b_255b;
+ __le64 frames_rx_256b_511b;
+ __le64 frames_rx_512b_1023b;
+ __le64 frames_rx_1024b_1518b;
+ __le64 frames_rx_gt_1518b;
+ __le64 frames_rx_fifo_full;
+ __le64 frames_tx_ok;
+ __le64 frames_tx_all;
+ __le64 frames_tx_bad;
+ __le64 octets_tx_ok;
+ __le64 octets_tx_total;
+ __le64 frames_tx_unicast;
+ __le64 frames_tx_multicast;
+ __le64 frames_tx_broadcast;
+ __le64 frames_tx_pause;
+};
+
+enum ionic_pb_buffer_drop_stats {
+ IONIC_BUFFER_INTRINSIC_DROP = 0,
+ IONIC_BUFFER_DISCARDED,
+ IONIC_BUFFER_ADMITTED,
+ IONIC_BUFFER_OUT_OF_CELLS_DROP,
+ IONIC_BUFFER_OUT_OF_CELLS_DROP_2,
+ IONIC_BUFFER_OUT_OF_CREDIT_DROP,
+ IONIC_BUFFER_TRUNCATION_DROP,
+ IONIC_BUFFER_PORT_DISABLED_DROP,
+ IONIC_BUFFER_COPY_TO_CPU_TAIL_DROP,
+ IONIC_BUFFER_SPAN_TAIL_DROP,
+ IONIC_BUFFER_MIN_SIZE_VIOLATION_DROP,
+ IONIC_BUFFER_ENQUEUE_ERROR_DROP,
+ IONIC_BUFFER_INVALID_PORT_DROP,
+ IONIC_BUFFER_INVALID_OUTPUT_QUEUE_DROP,
+ IONIC_BUFFER_DROP_MAX,
+};
+
+/**
+ * struct port_pb_stats - packet buffers system stats
+ * uses ionic_pb_buffer_drop_stats for drop_counts[]
+ */
+struct ionic_port_pb_stats {
+ __le64 sop_count_in;
+ __le64 eop_count_in;
+ __le64 sop_count_out;
+ __le64 eop_count_out;
+ __le64 drop_counts[IONIC_BUFFER_DROP_MAX];
+ __le64 input_queue_buffer_occupancy[IONIC_QOS_TC_MAX];
+ __le64 input_queue_port_monitor[IONIC_QOS_TC_MAX];
+ __le64 output_queue_port_monitor[IONIC_QOS_TC_MAX];
};
/**
@@ -2184,22 +2421,31 @@ union ionic_port_identity {
u8 rsvd2[44];
union ionic_port_config config;
};
- __le32 words[512];
+ __le32 words[478];
};
/**
* struct ionic_port_info - port info structure
- * @port_status: port status
- * @port_stats: port stats
+ * @config: Port configuration data
+ * @status: Port status data
+ * @stats: Port statistics data
+ * @mgmt_stats: Port management statistics data
+ * @port_pb_drop_stats: uplink pb drop stats
*/
struct ionic_port_info {
union ionic_port_config config;
struct ionic_port_status status;
- struct ionic_port_stats stats;
+ union {
+ struct ionic_port_stats stats;
+ struct ionic_mgmt_port_stats mgmt_stats;
+ };
+ /* room for pb_stats to start at 2k offset */
+ u8 rsvd[760];
+ struct ionic_port_pb_stats pb_stats;
};
/**
- * struct ionic_lif_stats
+ * struct ionic_lif_stats - LIF statistics structure
*/
struct ionic_lif_stats {
/* RX */
@@ -2252,7 +2498,7 @@ struct ionic_lif_stats {
__le64 tx_queue_error;
__le64 tx_desc_fetch_error;
__le64 tx_desc_data_error;
- __le64 rsvd9;
+ __le64 tx_queue_empty;
__le64 rsvd10;
__le64 rsvd11;
__le64 rsvd12;
@@ -2353,7 +2599,10 @@ struct ionic_lif_stats {
};
/**
- * struct ionic_lif_info - lif info structure
+ * struct ionic_lif_info - LIF info structure
+ * @config: LIF configuration structure
+ * @status: LIF status structure
+ * @stats: LIF statistics structure
*/
struct ionic_lif_info {
union ionic_lif_config config;
@@ -2389,7 +2638,9 @@ union ionic_dev_cmd {
struct ionic_qos_init_cmd qos_init;
struct ionic_qos_reset_cmd qos_reset;
+ struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
+ struct ionic_q_control_cmd q_control;
};
union ionic_dev_cmd_comp {
@@ -2421,19 +2672,20 @@ union ionic_dev_cmd_comp {
ionic_qos_init_comp qos_init;
ionic_qos_reset_comp qos_reset;
+ struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
};
/**
- * union dev_info - Device info register format (read-only)
- * @signature: Signature value of 0x44455649 ('DEVI').
- * @version: Current version of info.
- * @asic_type: Asic type.
- * @asic_rev: Asic revision.
- * @fw_status: Firmware status.
- * @fw_heartbeat: Firmware heartbeat counter.
- * @serial_num: Serial number.
- * @fw_version: Firmware version.
+ * union ionic_dev_info_regs - Device info register format (read-only)
+ * @signature: Signature value of 0x44455649 ('DEVI')
+ * @version: Current version of info
+ * @asic_type: Asic type
+ * @asic_rev: Asic revision
+ * @fw_status: Firmware status
+ * @fw_heartbeat: Firmware heartbeat counter
+ * @serial_num: Serial number
+ * @fw_version: Firmware version
*/
union ionic_dev_info_regs {
#define IONIC_DEVINFO_FWVERS_BUFLEN 32
@@ -2454,10 +2706,10 @@ union ionic_dev_info_regs {
/**
* union ionic_dev_cmd_regs - Device command register format (read-write)
- * @doorbell: Device Cmd Doorbell, write-only.
+ * @doorbell: Device Cmd Doorbell, write-only
* Write a 1 to signal device to process cmd,
* poll done for completion.
- * @done: Done indicator, bit 0 == 1 when command is complete.
+ * @done: Done indicator, bit 0 == 1 when command is complete
* @cmd: Opcode-specific command bytes
* @comp: Opcode-specific response bytes
* @data: Opcode-specific side-data
@@ -2475,7 +2727,7 @@ union ionic_dev_cmd_regs {
};
/**
- * union ionic_dev_regs - Device register format in for bar 0 page 0
+ * union ionic_dev_regs - Device register format for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
*/
@@ -2490,6 +2742,7 @@ union ionic_dev_regs {
union ionic_adminq_cmd {
struct ionic_admin_cmd cmd;
struct ionic_nop_cmd nop;
+ struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
struct ionic_q_control_cmd q_control;
struct ionic_lif_setattr_cmd lif_setattr;
@@ -2506,6 +2759,7 @@ union ionic_adminq_cmd {
union ionic_adminq_comp {
struct ionic_admin_comp comp;
struct ionic_nop_comp nop;
+ struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
struct ionic_lif_setattr_comp lif_setattr;
struct ionic_lif_getattr_comp lif_getattr;
@@ -2531,14 +2785,14 @@ union ionic_adminq_comp {
/**
* struct ionic_doorbell - Doorbell register layout
* @p_index: Producer index
- * @ring: Selects the specific ring of the queue to update.
+ * @ring: Selects the specific ring of the queue to update
* Type-specific meaning:
- * ring=0: Default producer/consumer queue.
+ * ring=0: Default producer/consumer queue
* ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs
* send events to EQs when armed. EQs send
* interrupts when armed.
- * @qid: The queue id selects the queue destination for the
- * producer index and flags.
+ * @qid_lo: Queue destination for the producer index and flags (low bits)
+ * @qid_hi: Queue destination for the producer index and flags (high bits)
*/
struct ionic_doorbell {
__le16 p_index;
@@ -2571,6 +2825,7 @@ struct ionic_identity {
union ionic_lif_identity lif;
union ionic_port_identity port;
union ionic_qos_identity qos;
+ union ionic_q_identity txq;
};
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index d5293bfded29..7321a92f8395 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -17,6 +17,16 @@
#include "ionic_ethtool.h"
#include "ionic_debugfs.h"
+/* queuetype support level */
+static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
+ [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
+ [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
+ [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
+ [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
+ * 1 = ... with Tx SG version 1
+ */
+};
+
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
@@ -27,6 +37,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
+static void ionic_lif_queue_identify(struct ionic_lif *lif);
static void ionic_lif_deferred_work(struct work_struct *work)
{
@@ -186,10 +197,10 @@ static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
return 0;
}
-static void ionic_intr_free(struct ionic_lif *lif, int index)
+static void ionic_intr_free(struct ionic *ionic, int index)
{
- if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
- clear_bit(index, lif->ionic->intrs);
+ if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
+ clear_bit(index, ionic->intrs);
}
static int ionic_qcq_enable(struct ionic_qcq *qcq)
@@ -299,7 +310,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
- ionic_intr_free(lif, qcq->intr.index);
+ ionic_intr_free(lif->ionic, qcq->intr.index);
}
devm_kfree(dev, qcq->cq.info);
@@ -345,7 +356,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
struct ionic_qcq *n_qcq)
{
if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
- ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
+ ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
n_qcq->flags &= ~IONIC_QCQ_F_INTR;
}
@@ -444,7 +455,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
cpumask_set_cpu(new->intr.cpu,
&new->intr.affinity_mask);
} else {
- new->intr.index = INTR_INDEX_NOT_ASSIGNED;
+ new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
@@ -497,7 +508,7 @@ err_out_free_irq:
devm_free_irq(dev, new->intr.vector, &new->napi);
err_out_free_intr:
if (flags & IONIC_QCQ_F_INTR)
- ionic_intr_free(lif, new->intr.index);
+ ionic_intr_free(lif->ionic, new->intr.index);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -597,6 +608,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
@@ -614,6 +626,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
+ dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
q->tail = q->info;
q->head = q->tail;
@@ -646,6 +660,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
@@ -663,6 +678,8 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
+ dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
q->tail = q->info;
q->head = q->tail;
@@ -726,7 +743,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
}
break;
default:
- netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
+ netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
comp->event.ecode, eid);
break;
}
@@ -775,8 +792,8 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
return max(n_work, a_work);
}
-static void ionic_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *ns)
+void ionic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *ns)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_lif_stats *ls;
@@ -1509,17 +1526,25 @@ static void ionic_txrx_free(struct ionic_lif *lif)
static int ionic_txrx_alloc(struct ionic_lif *lif)
{
+ unsigned int sg_desc_sz;
unsigned int flags;
unsigned int i;
int err = 0;
+ if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
+ lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
+ sizeof(struct ionic_txq_sg_desc_v1))
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
+ else
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
+
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
lif->ntxq_descs,
sizeof(struct ionic_txq_desc),
sizeof(struct ionic_txq_comp),
- sizeof(struct ionic_txq_sg_desc),
+ sg_desc_sz,
lif->kern_pid, &lif->txqcqs[i].qcq);
if (err)
goto err_out;
@@ -1682,7 +1707,7 @@ int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ if (!netif_device_present(netdev))
return 0;
ionic_stop_queues(lif);
@@ -1699,6 +1724,9 @@ static int ionic_get_vf_config(struct net_device *netdev,
struct ionic *ionic = lif->ionic;
int ret = 0;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_read(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1726,6 +1754,9 @@ static int ionic_get_vf_stats(struct net_device *netdev, int vf,
struct ionic_lif_stats *vs;
int ret = 0;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_read(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1761,6 +1792,9 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
return -EINVAL;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1792,6 +1826,9 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1818,6 +1855,9 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
if (tx_min)
return -EINVAL;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1840,6 +1880,9 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
u8 data = set; /* convert to u8 for config */
int ret;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1862,6 +1905,9 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
u8 data = set; /* convert to u8 for config */
int ret;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1898,6 +1944,9 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
return -EINVAL;
}
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -2065,9 +2114,17 @@ int ionic_lifs_alloc(struct ionic *ionic)
/* only build the first lif, others are for later features */
set_bit(0, ionic->lifbits);
+
lif = ionic_lif_alloc(ionic, 0);
+ if (IS_ERR_OR_NULL(lif)) {
+ clear_bit(0, ionic->lifbits);
+ return -ENOMEM;
+ }
+
+ lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
+ ionic_lif_queue_identify(lif);
- return PTR_ERR_OR_ZERO(lif);
+ return 0;
}
static void ionic_lif_reset(struct ionic_lif *lif)
@@ -2118,6 +2175,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
dev_info(ionic->dev, "FW Up: restarting LIFs\n");
ionic_init_devinfo(ionic);
+ ionic_port_init(ionic);
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out;
@@ -2291,6 +2349,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_ENA),
@@ -2348,7 +2407,17 @@ static int ionic_station_set(struct ionic_lif *lif)
if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
return 0;
- if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) {
+ if (!is_zero_ether_addr(netdev->dev_addr)) {
+ /* If the netdev mac is non-zero and doesn't match the default
+ * device address, it was set by something earlier and we're
+ * likely here again after a fw-upgrade reset. We need to be
+ * sure the netdev mac is in our filter list.
+ */
+ if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
+ netdev->dev_addr))
+ ionic_lif_addr(lif, netdev->dev_addr, true);
+ } else {
+ /* Update the netdev mac with the device's mac */
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
@@ -2358,12 +2427,6 @@ static int ionic_station_set(struct ionic_lif *lif)
return 0;
}
- if (!is_zero_ether_addr(netdev->dev_addr)) {
- netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
- netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, false);
- }
-
eth_commit_mac_addr_change(netdev, &addr);
}
@@ -2573,6 +2636,80 @@ void ionic_lifs_unregister(struct ionic *ionic)
unregister_netdev(ionic->master_lif->netdev);
}
+static void ionic_lif_queue_identify(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ union ionic_q_identity *q_ident;
+ struct ionic_dev *idev;
+ int qtype;
+ int err;
+
+ idev = &lif->ionic->idev;
+ q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
+
+ for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
+ struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
+
+ /* filter out the ones we know about */
+ switch (qtype) {
+ case IONIC_QTYPE_ADMINQ:
+ case IONIC_QTYPE_NOTIFYQ:
+ case IONIC_QTYPE_RXQ:
+ case IONIC_QTYPE_TXQ:
+ break;
+ default:
+ continue;
+ }
+
+ memset(qti, 0, sizeof(*qti));
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
+ ionic_qtype_versions[qtype]);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ qti->version = q_ident->version;
+ qti->supported = q_ident->supported;
+ qti->features = le64_to_cpu(q_ident->features);
+ qti->desc_sz = le16_to_cpu(q_ident->desc_sz);
+ qti->comp_sz = le16_to_cpu(q_ident->comp_sz);
+ qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz);
+ qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
+ qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
+ }
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err == -EINVAL) {
+ dev_err(ionic->dev, "qtype %d not supported\n", qtype);
+ continue;
+ } else if (err == -EIO) {
+ dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
+ return;
+ } else if (err) {
+ dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
+ qtype, err);
+ return;
+ }
+
+ dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
+ qtype, qti->version);
+ dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
+ qtype, qti->supported);
+ dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
+ qtype, qti->features);
+ dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
+ qtype, qti->desc_sz);
+ dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
+ qtype, qti->comp_sz);
+ dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
+ qtype, qti->sg_desc_sz);
+ dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
+ qtype, qti->max_sg_elems);
+ dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
+ qtype, qti->sg_desc_stride);
+ }
+}
+
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lid)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 5d4ffda5c05f..c3428034a17b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -20,11 +20,13 @@ struct ionic_tx_stats {
u64 bytes;
u64 clean;
u64 linearize;
- u64 no_csum;
+ u64 csum_none;
u64 csum;
u64 crc32_csum;
u64 tso;
+ u64 tso_bytes;
u64 frags;
+ u64 vlan_inserted;
u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
};
@@ -38,6 +40,7 @@ struct ionic_rx_stats {
u64 csum_error;
u64 buffers_posted;
u64 dropped;
+ u64 vlan_stripped;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -114,11 +117,17 @@ struct ionic_lif_sw_stats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_tso;
- u64 tx_no_csum;
+ u64 tx_tso_bytes;
+ u64 tx_csum_none;
u64 tx_csum;
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_error;
+ u64 hw_tx_dropped;
+ u64 hw_rx_dropped;
+ u64 hw_rx_over_errors;
+ u64 hw_rx_missed_errors;
+ u64 hw_tx_aborted_errors;
};
enum ionic_lif_state_flags {
@@ -133,6 +142,17 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_STATE_SIZE
};
+struct ionic_qtype_info {
+ u8 version;
+ u8 supported;
+ u64 features;
+ u16 desc_sz;
+ u16 comp_sz;
+ u16 sg_desc_sz;
+ u16 max_sg_elems;
+ u16 sg_desc_stride;
+};
+
#define IONIC_LIF_NAME_MAX_SZ 32
struct ionic_lif {
char name[IONIC_LIF_NAME_MAX_SZ];
@@ -161,11 +181,13 @@ struct ionic_lif {
bool mc_overflow;
unsigned int nmcast;
bool uc_overflow;
+ u16 lif_type;
unsigned int nucast;
struct ionic_lif_info *info;
dma_addr_t info_pa;
u32 info_sz;
+ struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
u16 rss_types;
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
@@ -227,6 +249,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
}
void ionic_link_status_check_request(struct ionic_lif *lif);
+void ionic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *ns);
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
int ionic_lifs_alloc(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 588c62e9add7..df5b9bcc3aba 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/utsname.h>
-#include <linux/vermagic.h>
+#include <generated/utsrelease.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -152,6 +152,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_RX_FILTER_ADD";
case IONIC_CMD_RX_FILTER_DEL:
return "IONIC_CMD_RX_FILTER_DEL";
+ case IONIC_CMD_Q_IDENTIFY:
+ return "IONIC_CMD_Q_IDENTIFY";
case IONIC_CMD_Q_INIT:
return "IONIC_CMD_Q_INIT";
case IONIC_CMD_Q_CONTROL:
@@ -356,7 +358,7 @@ try_again:
done = ionic_dev_cmd_done(idev);
if (done)
break;
- msleep(20);
+ msleep(5);
hb = ionic_heartbeat_check(ionic);
} while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
@@ -413,6 +415,7 @@ int ionic_setup(struct ionic *ionic)
err = ionic_dev_setup(ionic);
if (err)
return err;
+ ionic_reset(ionic);
return 0;
}
@@ -509,16 +512,16 @@ int ionic_port_init(struct ionic *ionic)
size_t sz;
int err;
- if (idev->port_info)
- return 0;
-
- idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
- idev->port_info = dma_alloc_coherent(ionic->dev, idev->port_info_sz,
- &idev->port_info_pa,
- GFP_KERNEL);
if (!idev->port_info) {
- dev_err(ionic->dev, "Failed to allocate port info, aborting\n");
- return -ENOMEM;
+ idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
+ idev->port_info = dma_alloc_coherent(ionic->dev,
+ idev->port_info_sz,
+ &idev->port_info_pa,
+ GFP_KERNEL);
+ if (!idev->port_info) {
+ dev_err(ionic->dev, "Failed to allocate port info\n");
+ return -ENOMEM;
+ }
}
sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data));
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 8f2a8fb029f1..2a1885da58a6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -15,11 +15,109 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(rx_packets),
IONIC_LIF_STAT_DESC(rx_bytes),
IONIC_LIF_STAT_DESC(tx_tso),
- IONIC_LIF_STAT_DESC(tx_no_csum),
+ IONIC_LIF_STAT_DESC(tx_tso_bytes),
+ IONIC_LIF_STAT_DESC(tx_csum_none),
IONIC_LIF_STAT_DESC(tx_csum),
IONIC_LIF_STAT_DESC(rx_csum_none),
IONIC_LIF_STAT_DESC(rx_csum_complete),
IONIC_LIF_STAT_DESC(rx_csum_error),
+ IONIC_LIF_STAT_DESC(hw_tx_dropped),
+ IONIC_LIF_STAT_DESC(hw_rx_dropped),
+ IONIC_LIF_STAT_DESC(hw_rx_over_errors),
+ IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
+ IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
+};
+
+static const struct ionic_stat_desc ionic_port_stats_desc[] = {
+ IONIC_PORT_STAT_DESC(frames_rx_ok),
+ IONIC_PORT_STAT_DESC(frames_rx_all),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_fcs),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_all),
+ IONIC_PORT_STAT_DESC(octets_rx_ok),
+ IONIC_PORT_STAT_DESC(octets_rx_all),
+ IONIC_PORT_STAT_DESC(frames_rx_unicast),
+ IONIC_PORT_STAT_DESC(frames_rx_multicast),
+ IONIC_PORT_STAT_DESC(frames_rx_broadcast),
+ IONIC_PORT_STAT_DESC(frames_rx_pause),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_length),
+ IONIC_PORT_STAT_DESC(frames_rx_undersized),
+ IONIC_PORT_STAT_DESC(frames_rx_oversized),
+ IONIC_PORT_STAT_DESC(frames_rx_fragments),
+ IONIC_PORT_STAT_DESC(frames_rx_jabber),
+ IONIC_PORT_STAT_DESC(frames_rx_pripause),
+ IONIC_PORT_STAT_DESC(frames_rx_stomped_crc),
+ IONIC_PORT_STAT_DESC(frames_rx_too_long),
+ IONIC_PORT_STAT_DESC(frames_rx_vlan_good),
+ IONIC_PORT_STAT_DESC(frames_rx_dropped),
+ IONIC_PORT_STAT_DESC(frames_rx_less_than_64b),
+ IONIC_PORT_STAT_DESC(frames_rx_64b),
+ IONIC_PORT_STAT_DESC(frames_rx_65b_127b),
+ IONIC_PORT_STAT_DESC(frames_rx_128b_255b),
+ IONIC_PORT_STAT_DESC(frames_rx_256b_511b),
+ IONIC_PORT_STAT_DESC(frames_rx_512b_1023b),
+ IONIC_PORT_STAT_DESC(frames_rx_1024b_1518b),
+ IONIC_PORT_STAT_DESC(frames_rx_1519b_2047b),
+ IONIC_PORT_STAT_DESC(frames_rx_2048b_4095b),
+ IONIC_PORT_STAT_DESC(frames_rx_4096b_8191b),
+ IONIC_PORT_STAT_DESC(frames_rx_8192b_9215b),
+ IONIC_PORT_STAT_DESC(frames_rx_other),
+ IONIC_PORT_STAT_DESC(frames_tx_ok),
+ IONIC_PORT_STAT_DESC(frames_tx_all),
+ IONIC_PORT_STAT_DESC(frames_tx_bad),
+ IONIC_PORT_STAT_DESC(octets_tx_ok),
+ IONIC_PORT_STAT_DESC(octets_tx_total),
+ IONIC_PORT_STAT_DESC(frames_tx_unicast),
+ IONIC_PORT_STAT_DESC(frames_tx_multicast),
+ IONIC_PORT_STAT_DESC(frames_tx_broadcast),
+ IONIC_PORT_STAT_DESC(frames_tx_pause),
+ IONIC_PORT_STAT_DESC(frames_tx_pripause),
+ IONIC_PORT_STAT_DESC(frames_tx_vlan),
+ IONIC_PORT_STAT_DESC(frames_tx_less_than_64b),
+ IONIC_PORT_STAT_DESC(frames_tx_64b),
+ IONIC_PORT_STAT_DESC(frames_tx_65b_127b),
+ IONIC_PORT_STAT_DESC(frames_tx_128b_255b),
+ IONIC_PORT_STAT_DESC(frames_tx_256b_511b),
+ IONIC_PORT_STAT_DESC(frames_tx_512b_1023b),
+ IONIC_PORT_STAT_DESC(frames_tx_1024b_1518b),
+ IONIC_PORT_STAT_DESC(frames_tx_1519b_2047b),
+ IONIC_PORT_STAT_DESC(frames_tx_2048b_4095b),
+ IONIC_PORT_STAT_DESC(frames_tx_4096b_8191b),
+ IONIC_PORT_STAT_DESC(frames_tx_8192b_9215b),
+ IONIC_PORT_STAT_DESC(frames_tx_other),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_0),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_1),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_2),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_3),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_4),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_5),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_6),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_7),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_0),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_1),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_2),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_3),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_4),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_5),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_6),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_7),
+ IONIC_PORT_STAT_DESC(tx_pripause_0_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_1_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_2_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_3_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_4_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_5_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_6_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_7_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_0_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_1_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_2_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_3_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_4_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_5_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_6_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_7_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pause_1us_count),
+ IONIC_PORT_STAT_DESC(frames_tx_truncated),
};
static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
@@ -29,6 +127,11 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(dma_map_err),
IONIC_TX_STAT_DESC(linearize),
IONIC_TX_STAT_DESC(frags),
+ IONIC_TX_STAT_DESC(tso),
+ IONIC_TX_STAT_DESC(tso_bytes),
+ IONIC_TX_STAT_DESC(csum_none),
+ IONIC_TX_STAT_DESC(csum),
+ IONIC_TX_STAT_DESC(vlan_inserted),
};
static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
@@ -40,6 +143,7 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(csum_complete),
IONIC_RX_STAT_DESC(csum_error),
IONIC_RX_STAT_DESC(dropped),
+ IONIC_RX_STAT_DESC(vlan_stripped),
};
static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
@@ -62,6 +166,7 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
+#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
@@ -76,6 +181,7 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
{
struct ionic_tx_stats *tstats;
struct ionic_rx_stats *rstats;
+ struct rtnl_link_stats64 ns;
struct ionic_qcq *txqcq;
struct ionic_qcq *rxqcq;
int q_num;
@@ -89,7 +195,8 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
stats->tx_packets += tstats->pkts;
stats->tx_bytes += tstats->bytes;
stats->tx_tso += tstats->tso;
- stats->tx_no_csum += tstats->no_csum;
+ stats->tx_tso_bytes += tstats->tso_bytes;
+ stats->tx_csum_none += tstats->csum_none;
stats->tx_csum += tstats->csum;
}
@@ -103,6 +210,13 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
stats->rx_csum_error += rstats->csum_error;
}
}
+
+ ionic_get_stats64(lif->netdev, &ns);
+ stats->hw_tx_dropped = ns.tx_dropped;
+ stats->hw_rx_dropped = ns.rx_dropped;
+ stats->hw_rx_over_errors = ns.rx_over_errors;
+ stats->hw_rx_missed_errors = ns.rx_missed_errors;
+ stats->hw_tx_aborted_errors = ns.tx_aborted_errors;
}
static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
@@ -118,6 +232,9 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
+ /* port stats */
+ total += IONIC_NUM_PORT_STATS;
+
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
@@ -144,6 +261,13 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name);
*buf += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ ionic_port_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s",
@@ -225,6 +349,7 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
{
+ struct ionic_port_stats *port_stats;
struct ionic_lif_sw_stats lif_stats;
struct ionic_qcq *txqcq, *rxqcq;
struct ionic_tx_stats *txstats;
@@ -238,6 +363,13 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
+ port_stats = &lif->ionic->idev.port_info->stats;
+ for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
+ **buf = IONIC_READ_STAT_LE64(port_stats,
+ &ionic_port_stats_desc[i]);
+ (*buf)++;
+ }
+
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
txstats = &lif_to_txstats(lif, q_num);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
index d2c1122a2c6e..3f543512616e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
@@ -11,6 +11,9 @@
.offset = IONIC_STAT_TO_OFFSET(type, stat_name) \
}
+#define IONIC_PORT_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_port_stats, stat_name)
+
#define IONIC_LIF_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_lif_sw_stats, stat_name)
@@ -45,6 +48,9 @@ extern const int ionic_num_stats_grps;
#define IONIC_READ_STAT64(base_ptr, desc_ptr) \
(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
+ __le64_to_cpu(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+
struct ionic_stat_desc {
char name[ETH_GSTRING_LEN];
u64 offset;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index d233b6e77b1e..b7f900c11834 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -10,8 +10,10 @@
#include "ionic_lif.h"
#include "ionic_txrx.h"
-static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg);
+static void ionic_rx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
@@ -140,8 +142,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
return skb;
}
-static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+static void ionic_rx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg)
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
struct ionic_qcq *qcq = q_to_qcq(q);
@@ -210,10 +214,11 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
(comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
stats->csum_error++;
- if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(comp->vlan_tci));
+ if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(comp->vlan_tci));
+ stats->vlan_stripped++;
}
if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
@@ -475,7 +480,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
return work_done;
}
-static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len)
+static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+ void *data, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->lif->ionic->dev;
@@ -491,7 +497,8 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t
return dma_addr;
}
-static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag,
+static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+ const skb_frag_t *frag,
size_t offset, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
@@ -507,8 +514,10 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *fra
return dma_addr;
}
-static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+static void ionic_tx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg)
{
struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
struct ionic_txq_sg_elem *elem = sg_desc->elems;
@@ -852,6 +861,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts += total_pkts;
stats->bytes += total_bytes;
stats->tso++;
+ stats->tso_bytes += total_bytes;
return 0;
@@ -890,9 +900,12 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
flags, skb_shinfo(skb)->nr_frags, dma_addr);
desc->cmd = cpu_to_le64(cmd);
desc->len = cpu_to_le16(skb_headlen(skb));
- desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
+ if (has_vlan) {
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ stats->vlan_inserted++;
+ }
if (skb->csum_not_inet)
stats->crc32_csum++;
@@ -927,9 +940,12 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
flags, skb_shinfo(skb)->nr_frags, dma_addr);
desc->cmd = cpu_to_le64(cmd);
desc->len = cpu_to_le16(skb_headlen(skb));
- desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ if (has_vlan) {
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ stats->vlan_inserted++;
+ }
- stats->no_csum++;
+ stats->csum_none++;
return 0;
}
@@ -989,6 +1005,7 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
+ int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
int err;
@@ -997,7 +1014,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
return (skb->len / skb_shinfo(skb)->gso_size) + 1;
/* If non-TSO, just need 1 desc and nr_frags sg elems */
- if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
+ if (skb_shinfo(skb)->nr_frags <= sg_elems)
return 1;
/* Too many frags, so linearize */
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fa41bf08a589..a49743d56b9c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -740,12 +740,6 @@ struct qed_dbg_feature {
u32 dumped_dwords;
};
-struct qed_dbg_params {
- struct qed_dbg_feature features[DBG_FEATURE_NUM];
- u8 engine_for_debug;
- bool print_data;
-};
-
struct qed_dev {
u32 dp_module;
u8 dp_level;
@@ -844,6 +838,9 @@ struct qed_dev {
/* Recovery */
bool recov_in_prog;
+ /* Indicates whether should prevent attentions from being reasserted */
+ bool attn_clr_en;
+
/* LLH info */
u8 ppfid_bitmap;
struct qed_llh_info *p_llh_info;
@@ -872,17 +869,18 @@ struct qed_dev {
} protocol_ops;
void *ops_cookie;
- struct qed_dbg_params dbg_params;
-
#ifdef CONFIG_QED_LL2
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
bool disable_ilt_dump;
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
+ bool print_dbg_data;
+
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
@@ -1016,10 +1014,13 @@ int qed_device_num_ports(struct qed_dev *cdev);
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info);
void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type);
void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 1a636bad717d..7b76667acaba 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -110,6 +110,7 @@ struct src_ent {
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
+#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
@@ -293,18 +294,40 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL;
}
-static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
+ u32 num_srqs, u32 num_xrc_srqs)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
+ p_mgr->xrc_srq_count = num_xrc_srqs;
}
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
+
+ return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+}
+
+static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
+{
+ u32 page_size;
+
+ page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
+ return page_size / XRC_SRQ_CXT_SIZE;
+}
+
+u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ u32 total_srqs;
+
+ total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;
- return p_mgr->srq_count;
+ return total_srqs;
}
/* set the iids count per protocol */
@@ -692,7 +715,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
}
/* TSDM (SRQ CONTEXT) */
- total = qed_cxt_get_srq_count(p_hwfn);
+ total = qed_cxt_get_total_srq_count(p_hwfn);
if (total) {
p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
@@ -1962,11 +1985,9 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
struct qed_rdma_pf_params *p_params,
u32 num_tasks)
{
- u32 num_cons, num_qps, num_srqs;
+ u32 num_cons, num_qps;
enum protocol_type proto;
- num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
-
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
DP_NOTICE(p_hwfn,
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
@@ -1989,6 +2010,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
}
if (num_cons && num_tasks) {
+ u32 num_srqs, num_xrc_srqs;
+
qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
/* Deliberatly passing ROCE for tasks id. This is because
@@ -1997,7 +2020,13 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
QED_CXT_ROCE_TID_SEG, 1,
num_tasks, false);
- qed_cxt_set_srq_count(p_hwfn, num_srqs);
+
+ num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
+
+ /* XRC SRQs populate a single ILT page */
+ num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);
+
+ qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
} else {
DP_INFO(p_hwfn->cdev,
"RDMA personality used without setting params!\n");
@@ -2163,10 +2192,17 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
p_blk = &p_cli->pf_blks[CDUC_BLK];
break;
case QED_ELEM_SRQ:
+ /* The first ILT page is not used for regular SRQs. Skip it. */
+ iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
elem_size = SRQ_CXT_SIZE;
p_blk = &p_cli->pf_blks[SRQ_BLK];
break;
+ case QED_ELEM_XRC_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = XRC_SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
case QED_ELEM_TASK:
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
@@ -2386,8 +2422,12 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
return rc;
/* Free TSDM CXT */
- rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
- qed_cxt_get_srq_count(p_hwfn));
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
+ p_hwfn->p_cxt_mngr->xrc_srq_count);
+
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
+ p_hwfn->p_cxt_mngr->xrc_srq_count,
+ p_hwfn->p_cxt_mngr->srq_count);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c4e815f6cabd..ce08ae8d8498 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -82,7 +82,8 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
enum qed_cxt_elem_type {
QED_ELEM_CXT,
QED_ELEM_SRQ,
- QED_ELEM_TASK
+ QED_ELEM_TASK,
+ QED_ELEM_XRC_SRQ,
};
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
@@ -235,7 +236,6 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type);
u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
enum protocol_type type);
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn);
int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
@@ -358,6 +358,7 @@ struct qed_cxt_mngr {
/* total number of SRQ's for this hwfn */
u32 srq_count;
+ u32 xrc_srq_count;
/* Maximal number of L2 steering filters */
u32 arfs_count;
@@ -372,4 +373,9 @@ u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
+u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client);
+
+u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index f4eebaabb6d0..57a0dab88431 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7453,7 +7453,7 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
- &p_hwfn->cdev->dbg_params.features[feature_idx];
+ &p_hwfn->cdev->dbg_features[feature_idx];
u32 text_size_bytes, null_char_pos, i;
enum dbg_status rc;
char *text_buf;
@@ -7502,7 +7502,7 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
text_buf[i] = '\n';
/* Dump printable feature to log */
- if (p_hwfn->cdev->dbg_params.print_data)
+ if (p_hwfn->cdev->print_dbg_data)
qed_dbg_print_feature(text_buf, text_size_bytes);
/* Free the old dump_buf and point the dump_buf to the newly allocagted
@@ -7523,7 +7523,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
- &p_hwfn->cdev->dbg_params.features[feature_idx];
+ &p_hwfn->cdev->dbg_features[feature_idx];
u32 buf_size_dwords;
enum dbg_status rc;
@@ -7648,7 +7648,7 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
enum qed_nvm_images image_id)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
u32 len_rounded, i;
__be32 val;
int rc;
@@ -7780,7 +7780,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
int grc_params[MAX_DBG_GRC_PARAMS], i;
u32 offset = 0, feature_size;
@@ -8000,7 +8000,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
u8 cur_engine, org_engine;
@@ -8059,9 +8059,9 @@ int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
enum qed_dbg_features feature, u32 *num_dumped_bytes)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature =
- &cdev->dbg_params.features[feature];
+ &cdev->dbg_features[feature];
enum dbg_status dbg_rc;
struct qed_ptt *p_ptt;
int rc = 0;
@@ -8084,7 +8084,7 @@ int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
DP_VERBOSE(cdev, QED_MSG_DEBUG,
"copying debugfs feature to external buffer\n");
memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
- *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
+ *num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
4;
out:
@@ -8095,7 +8095,7 @@ out:
int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 buf_size_dwords;
@@ -8120,14 +8120,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
u8 qed_get_debug_engine(struct qed_dev *cdev)
{
- return cdev->dbg_params.engine_for_debug;
+ return cdev->engine_for_debug;
}
void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
{
DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
engine_number);
- cdev->dbg_params.engine_for_debug = engine_number;
+ cdev->engine_for_debug = engine_number;
}
void qed_dbg_pf_init(struct qed_dev *cdev)
@@ -8146,7 +8146,7 @@ void qed_dbg_pf_init(struct qed_dev *cdev)
}
/* Set the hwfn to be 0 as default */
- cdev->dbg_params.engine_for_debug = 0;
+ cdev->engine_for_debug = 0;
}
void qed_dbg_pf_exit(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 38a65b984e47..1eebf30fa798 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1972,7 +1972,7 @@ static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
return 0;
if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
- p_hwfn->hw_info.multi_tc_roce_en = 0;
+ p_hwfn->hw_info.multi_tc_roce_en = false;
DP_NOTICE(p_hwfn,
"multi-tc roce was disabled to reduce requested amount of pqs\n");
if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
@@ -2269,6 +2269,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* EQ */
n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
+ u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn);
enum protocol_type rdma_proto;
if (QED_IS_ROCE_PERSONALITY(p_hwfn))
@@ -2279,7 +2280,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
rdma_proto,
NULL) * 2;
- n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
+ /* EQ should be able to get events from all SRQ's
+ * at the same time
+ */
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
@@ -3085,7 +3089,9 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->rel_pf_id, false);
if (rc) {
- DP_NOTICE(p_hwfn, "Final cleanup failed\n");
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt,
+ QED_HW_ERR_RAMROD_FAIL,
+ "Final cleanup failed\n");
goto load_err;
}
}
@@ -4392,7 +4398,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
}
if (QED_IS_ROCE_PERSONALITY(p_hwfn))
- p_hwfn->hw_info.multi_tc_roce_en = 1;
+ p_hwfn->hw_info.multi_tc_roce_en = true;
p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
p_hwfn->hw_info.num_active_tc = 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 4597015b8bff..f00460d00cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12400,6 +12400,13 @@ struct load_rsp_stc {
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
@@ -12488,10 +12495,14 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+/* Send crash dump commands with param[3:0] - opcode */
+#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
+#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000
+
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
@@ -12517,6 +12528,21 @@ struct public_drv_mb {
#define RESOURCE_DUMP 0
+/* DRV_MSG_CODE_MDUMP_CMD parameters */
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
+
+#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
+#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b
+#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c
+
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
@@ -12626,6 +12652,17 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xFF
+
+/* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000
+
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF
#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
@@ -12678,6 +12715,14 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000
+#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000
+
+#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
+
u32 fw_mb_param;
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
@@ -12742,9 +12787,9 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_GET_FCOE_STATS,
MFW_DRV_MSG_GET_ISCSI_STATS,
MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_BW_UPDATE10,
+ MFW_DRV_MSG_FAILURE_DETECTED,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
- MFW_DRV_MSG_BW_UPDATE11,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
MFW_DRV_MSG_RESERVED,
MFW_DRV_MSG_GET_TLV_REQ,
MFW_DRV_MSG_OEM_CFG_UPDATE,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 4ab8cfaf63d1..5fa251489536 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -762,9 +762,10 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
dst_type,
length_cur);
if (qed_status) {
- DP_NOTICE(p_hwfn,
- "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
- qed_status, src_addr, dst_addr, length_cur);
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_DMAE_FAIL,
+ "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+ qed_status, src_addr,
+ dst_addr, length_cur);
break;
}
}
@@ -837,6 +838,41 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type, char *fmt, ...)
+{
+ char buf[QED_HW_ERR_MAX_STR_SIZE];
+ va_list vl;
+ int len;
+
+ if (fmt) {
+ va_start(vl, fmt);
+ len = vsnprintf(buf, QED_HW_ERR_MAX_STR_SIZE, fmt, vl);
+ va_end(vl);
+
+ if (len > QED_HW_ERR_MAX_STR_SIZE - 1)
+ len = QED_HW_ERR_MAX_STR_SIZE - 1;
+
+ DP_NOTICE(p_hwfn, "%s", buf);
+ }
+
+ /* Fan failure cannot be masked by handling of another HW error */
+ if (p_hwfn->cdev->recov_in_prog &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_DRV,
+ "Recovery is in progress. Avoid notifying about HW error %d.\n",
+ err_type);
+ return;
+ }
+
+ qed_hw_error_occurred(p_hwfn, err_type);
+
+ if (fmt)
+ qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, len);
+}
+
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 505e94db939d..f5b109b04b66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -315,4 +315,19 @@ int qed_init_fw_data(struct qed_dev *cdev,
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase);
+#define QED_HW_ERR_MAX_STR_SIZE 256
+
+/**
+ * @brief qed_hw_err_notify - Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param err_type
+ * @param fmt - debug data buffer to send to the MFW
+ * @param ... - buffer format args
+ */
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type, char *fmt, ...);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 9f5113639eaf..b7b974f0ef21 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -96,6 +96,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
#define ATTENTION_BB_DIFFERENT BIT(23)
+#define ATTENTION_CLEAR_ENABLE BIT(28)
unsigned int flags;
/* Callback to call if attention will be triggered */
@@ -363,6 +364,21 @@ static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
}
+static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
+{
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT,
+ "FW assertion!\n");
+
+ return -EINVAL;
+}
+
+static int qed_general_attention_35(struct qed_hwfn *p_hwfn)
+{
+ DP_INFO(p_hwfn, "General attention 35!\n");
+
+ return 0;
+}
+
#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
@@ -605,13 +621,15 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
{ /* After Invert 4 */
- {"General Attention 32", ATTENTION_SINGLE,
- NULL, MAX_BLOCK_ID},
+ {"General Attention 32", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_fw_assertion,
+ MAX_BLOCK_ID},
{"General Attention %d",
(2 << ATTENTION_LENGTH_SHIFT) |
(33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
- {"General Attention 35", ATTENTION_SINGLE,
- NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_general_attention_35,
+ MAX_BLOCK_ID},
{"NWS Parity",
ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
@@ -927,9 +945,12 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
qed_int_attn_print(p_hwfn, p_aeu->block_index,
ATTN_TYPE_INTERRUPT, !b_fatal);
-
- /* If the attention is benign, no need to prevent it */
- if (!rc)
+ /* Reach assertion if attention is fatal */
+ if (b_fatal)
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN,
+ "`%s': Fatal attention\n",
+ p_bit_name);
+ else /* If the attention is benign, no need to prevent it */
goto out;
/* Prevent this Attention from being asserted in the future */
@@ -2349,6 +2370,11 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev)
cdev->hwfns[i].b_int_requested = false;
}
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable)
+{
+ cdev->attn_clr_en = clr_enable;
+}
+
int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 9ad568d93ae6..e09db3386367 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -191,6 +191,17 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
+ * @brief qed_int_attn_clr_enable - sets whether the general behavior is
+ * preventing attentions from being reasserted, or following the
+ * attributes of the specific attention.
+ *
+ * @param cdev
+ * @param clr_enable
+ *
+ */
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
+
+/**
* @brief - Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 037e5978787e..4afd8572ada6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2331,7 +2331,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
cdev->ll2->cb_cookie = cookie;
}
-struct qed_ll2_cbs ll2_cbs = {
+static struct qed_ll2_cbs ll2_cbs = {
.rx_comp_cb = &qed_ll2b_complete_rx_packet,
.rx_release_cb = &qed_ll2b_release_rx_packet,
.tx_comp_cb = &qed_ll2b_complete_tx_packet,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 96356e897c80..11367a248d55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -49,6 +49,7 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
#include <net/devlink.h>
+#include <linux/aer.h>
#include "qed.h"
#include "qed_sriov.h"
@@ -129,6 +130,8 @@ static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
+ pci_disable_pcie_error_reporting(pdev);
+
if (cdev->doorbells && cdev->db_size)
iounmap(cdev->doorbells);
if (cdev->regview)
@@ -231,6 +234,12 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
return -ENOMEM;
}
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc)
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Failed to configure PCIe AER [%d]\n", rc);
+
return 0;
err2:
@@ -1940,6 +1949,15 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
op->link_update(cookie, &if_link);
}
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
+{
+ void *cookie = hwfn->cdev->ops_cookie;
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+
+ if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
+ op->bw_update(cookie);
+}
+
static int qed_drain(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
@@ -2459,6 +2477,39 @@ void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
ops->schedule_recovery_handler(cookie);
}
+char *qed_hw_err_type_descr[] = {
+ [QED_HW_ERR_FAN_FAIL] = "Fan Failure",
+ [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure",
+ [QED_HW_ERR_HW_ATTN] = "HW Attention",
+ [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure",
+ [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure",
+ [QED_HW_ERR_FW_ASSERT] = "FW Assertion",
+ [QED_HW_ERR_LAST] = "Unknown",
+};
+
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type)
+{
+ struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+ char *err_str;
+
+ if (err_type > QED_HW_ERR_LAST)
+ err_type = QED_HW_ERR_LAST;
+ err_str = qed_hw_err_type_descr[err_type];
+
+ DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
+
+ /* Call the HW error handler of the protocol driver.
+ * If it is not available - perform a minimal handling of preventing
+ * HW attentions from being reasserted.
+ */
+ if (ops && ops->schedule_hw_err_handler)
+ ops->schedule_hw_err_handler(cookie, err_type);
+ else
+ qed_int_attn_clr_enable(p_hwfn->cdev, true);
+}
+
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
void *handle)
{
@@ -2680,6 +2731,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.set_led = &qed_set_led,
.recovery_process = &qed_recovery_process,
.recovery_prolog = &qed_recovery_prolog,
+ .attn_clr_enable = &qed_int_attn_clr_enable,
.update_drv_state = &qed_update_drv_state,
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 280527cc0578..9624616806e7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -575,6 +575,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
qed_mcp_cmd_set_blocking(p_hwfn, true);
+ qed_hw_err_notify(p_hwfn, p_ptt,
+ QED_HW_ERR_MFW_RESP_FAIL, NULL);
return -EAGAIN;
}
@@ -1704,6 +1706,127 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
&resp, &param);
}
+static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* A single notification should be sent to upper driver in CMT mode */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
+ "Fan failure was detected on the network interface card and it's going to be shut down.\n");
+}
+
+struct qed_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
+static int
+qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mdump_cmd_params *p_mdump_cmd_params)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = -EOPNOTSUPP;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+ int rc;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = p_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
+
+ rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct mdump_retain_data_stc mdump_retain;
+ int rc;
+
+ /* In CMT mode - no need for more than a single acknowledgment to the
+ * MFW, and no more than a single notification to the upper driver.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == 0 && mdump_retain.valid)
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch,
+ mdump_retain.pf, mdump_retain.status);
+ else
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device\n");
+
+ DP_NOTICE(p_hwfn,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
+ qed_mcp_mdump_ack(p_hwfn, p_ptt);
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
+}
+
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct public_func shmem_info;
@@ -1850,6 +1973,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_S_TAG_UPDATE:
qed_mcp_update_stag(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_FAILURE_DETECTED:
+ qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
+ qed_mcp_handle_critical_error(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_GET_TLV_REQ:
qed_mfw_tlv_req(p_hwfn);
break;
@@ -3819,3 +3948,127 @@ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
DRV_MSG_CODE_SET_NVM_CFG_OPTION,
mb_param, &resp, &param, len, (u32 *)p_buf);
}
+
+#define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
+#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
+#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
+ (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
+
+static int
+__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
+ DP_ERR(p_hwfn,
+ "Debug data size is %d while it should not exceed %d\n",
+ size, QED_MCP_DBG_DATA_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
+ mb_params.p_data_src = p_buf;
+ mb_params.data_src_size = size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
+ DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
+ return -EBUSY;
+ } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send debug data to the MFW [resp 0x%08x]\n",
+ mb_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_mcp_dbg_data_type {
+ QED_MCP_DBG_DATA_TYPE_RAW,
+};
+
+/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
+#define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
+#define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
+#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
+#define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
+#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
+#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
+#define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
+#define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
+
+#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
+#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
+
+static int
+qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
+{
+ u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
+ u32 tmp_size = size, *p_header, *p_payload;
+ u8 flags = 0;
+ u16 seq;
+ int rc;
+
+ p_header = (u32 *)raw_data;
+ p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
+
+ seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
+
+ /* First chunk is marked as 'first' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+
+ *p_header = 0;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
+
+ while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
+ memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
+ rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ QED_MCP_DBG_DATA_MAX_SIZE);
+ if (rc)
+ return rc;
+
+ /* Clear the 'first' marking after sending the first chunk */
+ if (p_tmp_buf == p_buf) {
+ flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
+ flags);
+ }
+
+ p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ }
+
+ /* Last chunk is marked as 'last' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ memcpy(p_payload, p_tmp_buf, tmp_size);
+
+ /* Casting the left size to u8 is ok since at this point it is <= 32 */
+ return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
+ tmp_size));
+}
+
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
+{
+ return qed_mcp_send_debug_data(p_hwfn, p_ptt,
+ QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 9c4c2763de8d..5750b4c5ef63 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -685,6 +685,18 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
*/
int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+/**
+ * @brief Send raw debug data to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_buf - raw debug data buffer
+ * @param size - buffer size
+ */
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -731,6 +743,9 @@ struct qed_mcp_info {
/* Capabilties negotiated with the MFW */
u32 capabilities;
+
+ /* S/N for debug data mailbox commands */
+ atomic_t dbg_data_seq;
};
struct qed_mcp_mb_params {
@@ -1001,6 +1016,19 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
+/* @brief - Gets the mdump retained data from the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_retain
+ *
+ * @param return 0 upon success.
+ */
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain);
+
/**
* @brief - Sets the MFW's max value for the given resource
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 38b1f402f7ed..19c0c8864da1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -212,13 +212,22 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
goto free_rdma_port;
}
+ /* Allocate bit map for XRC Domains */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
+ QED_RDMA_MAX_XRCDS, "XRCD");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate xrcd_map,rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
/* Allocate DPI bitmap */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
p_hwfn->dpi_count, "DPI");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate DPI bitmap, rc = %d\n", rc);
- goto free_pd_map;
+ goto free_xrcd_map;
}
/* Allocate bitmap for cq's. The maximum number of CQs is bound to
@@ -271,14 +280,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
goto free_cid_map;
}
+ /* The first SRQ follows the last XRC SRQ. This means that the
+ * SRQ IDs start from an offset equals to max_xrc_srqs.
+ */
+ p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count;
+ rc = qed_rdma_bmap_alloc(p_hwfn,
+ &p_rdma_info->xrc_srq_map,
+ p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
+ goto free_real_cid_map;
+ }
+
/* Allocate bitmap for srqs */
- p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn);
+ p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count;
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
p_rdma_info->num_srqs, "SRQ");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate srq bitmap, rc = %d\n", rc);
- goto free_real_cid_map;
+ goto free_xrc_srq_map;
}
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
@@ -292,6 +314,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
free_srq_map:
kfree(p_rdma_info->srq_map.bitmap);
+free_xrc_srq_map:
+ kfree(p_rdma_info->xrc_srq_map.bitmap);
free_real_cid_map:
kfree(p_rdma_info->real_cid_map.bitmap);
free_cid_map:
@@ -304,6 +328,8 @@ free_cq_map:
kfree(p_rdma_info->cq_map.bitmap);
free_dpi_map:
kfree(p_rdma_info->dpi_map.bitmap);
+free_xrcd_map:
+ kfree(p_rdma_info->xrcd_map.bitmap);
free_pd_map:
kfree(p_rdma_info->pd_map.bitmap);
free_rdma_port:
@@ -377,6 +403,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
kfree(p_rdma_info->port);
kfree(p_rdma_info->dev);
@@ -499,7 +526,6 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
dev->max_mw = 0;
- dev->max_fmr = QED_RDMA_MAX_FMR;
dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
dev->max_pkey = QED_RDMA_MAX_P_KEY;
@@ -612,7 +638,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
QED_RDMA_CNQ_RAM);
p_params_header->num_cnqs = params->desired_cnq;
-
+ p_params_header->first_reg_srq_id =
+ cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset);
+ p_params_header->reg_srq_base_addr =
+ cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM));
if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
p_params_header->cq_ring_mode = 1;
else
@@ -983,6 +1012,41 @@ static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
+static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->xrcd_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n");
+ return rc;
+ }
+
+ *xrcd_id = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
+ return rc;
+}
+
+static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static enum qed_rdma_toggle_bit
qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
{
@@ -1306,11 +1370,14 @@ qed_rdma_create_qp(void *rdma_cxt,
qp->resp_offloaded = false;
qp->e2e_flow_control_en = qp->use_srq ? false : true;
qp->stats_queue = in_params->stats_queue;
+ qp->qp_type = in_params->qp_type;
+ qp->xrcd_id = in_params->xrcd_id;
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
qp->qpid = qp->icid;
} else {
+ qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE);
rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
qp->qpid = ((0xFF << 16) | qp->icid);
}
@@ -1418,6 +1485,18 @@ static int qed_rdma_modify_qp(void *rdma_cxt,
qp->cur_state);
}
+ switch (qp->qp_type) {
+ case QED_RDMA_QP_TYPE_XRC_INI:
+ qp->has_req = 1;
+ break;
+ case QED_RDMA_QP_TYPE_XRC_TGT:
+ qp->has_resp = 1;
+ break;
+ default:
+ qp->has_req = 1;
+ qp->has_resp = 1;
+ }
+
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
enum qed_iwarp_qp_state new_state =
qed_roce2iwarp_state(qp->cur_state);
@@ -1657,6 +1736,15 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
return QED_AFFIN_HWFN(cdev);
}
+static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn,
+ bool is_xrc)
+{
+ if (is_xrc)
+ return &p_hwfn->p_rdma_info->xrc_srq_map;
+
+ return &p_hwfn->p_rdma_info->srq_map;
+}
+
static int qed_rdma_modify_srq(void *rdma_cxt,
struct qed_rdma_modify_srq_in_params *in_params)
{
@@ -1686,8 +1774,8 @@ static int qed_rdma_modify_srq(void *rdma_cxt,
if (rc)
return rc;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x",
- in_params->srq_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
+ in_params->srq_id, in_params->is_xrc);
return rc;
}
@@ -1702,6 +1790,7 @@ qed_rdma_destroy_srq(void *rdma_cxt,
struct qed_spq_entry *p_ent;
struct qed_bmap *bmap;
u16 opaque_fid;
+ u16 offset;
int rc;
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1723,14 +1812,16 @@ qed_rdma_destroy_srq(void *rdma_cxt,
if (rc)
return rc;
- bmap = &p_hwfn->p_rdma_info->srq_map;
+ bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
+ offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
+ qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x",
- in_params->srq_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
+ in_params->srq_id, in_params->is_xrc);
return rc;
}
@@ -1748,24 +1839,26 @@ qed_rdma_create_srq(void *rdma_cxt,
u16 opaque_fid, srq_id;
struct qed_bmap *bmap;
u32 returned_id;
+ u16 offset;
int rc;
- bmap = &p_hwfn->p_rdma_info->srq_map;
+ bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
if (rc) {
- DP_NOTICE(p_hwfn, "failed to allocate srq id\n");
+ DP_NOTICE(p_hwfn,
+ "failed to allocate xrc/srq id (is_xrc=%u)\n",
+ in_params->is_xrc);
return rc;
}
- elem_type = QED_ELEM_SRQ;
+ elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ);
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
if (rc)
goto err;
- /* returned id is no greater than u16 */
- srq_id = (u16)returned_id;
+
opaque_fid = p_hwfn->hw_info.opaque_fid;
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1782,20 +1875,34 @@ qed_rdma_create_srq(void *rdma_cxt,
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
- p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
p_ramrod->page_size = cpu_to_le16(in_params->page_size);
DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
+ offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
+ srq_id = (u16)returned_id + offset;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
+ if (in_params->is_xrc) {
+ SET_FIELD(p_ramrod->flags,
+ RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1);
+ SET_FIELD(p_ramrod->flags,
+ RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
+ in_params->reserved_key_en);
+ p_ramrod->xrc_srq_cq_cid =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ in_params->cq_cid);
+ p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id);
+ }
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
goto err;
out_params->srq_id = srq_id;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "SRQ created Id = %x\n", out_params->srq_id);
-
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "XRC/SRQ created Id = %x (is_xrc=%u)\n",
+ out_params->srq_id, in_params->is_xrc);
return rc;
err:
@@ -1961,6 +2068,8 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
.rdma_alloc_pd = &qed_rdma_alloc_pd,
.rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd,
+ .rdma_dealloc_xrcd = &qed_rdma_free_xrcd,
.rdma_create_cq = &qed_rdma_create_cq,
.rdma_destroy_cq = &qed_rdma_destroy_cq,
.rdma_create_qp = &qed_rdma_create_qp,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 3689fe3e5935..1e69d5bb0a70 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -45,7 +45,6 @@
#include "qed_iwarp.h"
#include "qed_roce.h"
-#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
#define QED_RDMA_MAX_P_KEY (1)
#define QED_RDMA_MAX_WQE (0x7FFF)
#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
@@ -63,6 +62,11 @@
#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
+ * SRQs is much smaller so there's no need to have that many domains.
+ */
+#define QED_RDMA_MAX_XRCDS (roundup_pow_of_two(RDMA_MAX_XRC_SRQS))
+
enum qed_rdma_toggle_bit {
QED_RDMA_TOGGLE_BIT_CLEAR = 0,
QED_RDMA_TOGGLE_BIT_SET = 1
@@ -81,9 +85,11 @@ struct qed_rdma_info {
struct qed_bmap cq_map;
struct qed_bmap pd_map;
+ struct qed_bmap xrcd_map;
struct qed_bmap tid_map;
struct qed_bmap qp_map;
struct qed_bmap srq_map;
+ struct qed_bmap xrc_srq_map;
struct qed_bmap cid_map;
struct qed_bmap tcp_cid_map;
struct qed_bmap real_cid_map;
@@ -111,6 +117,7 @@ struct qed_rdma_qp {
u32 qpid;
u16 icid;
enum qed_roce_qp_state cur_state;
+ enum qed_rdma_qp_type qp_type;
enum qed_iwarp_qp_state iwarp_state;
bool use_srq;
bool signal_all;
@@ -153,18 +160,21 @@ struct qed_rdma_qp {
dma_addr_t orq_phys_addr;
u8 orq_num_pages;
bool req_offloaded;
+ bool has_req;
/* responder */
u8 max_rd_atomic_resp;
u32 rq_psn;
u16 rq_cq_id;
u16 rq_num_pages;
+ u16 xrcd_id;
dma_addr_t rq_pbl_ptr;
void *irq;
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
u32 cq_prod;
+ bool has_resp;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
@@ -172,8 +182,17 @@ struct qed_rdma_qp {
void *shared_queue;
dma_addr_t shared_queue_phys_addr;
struct qed_iwarp_ep *ep;
+ u8 edpm_mode;
};
+static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp)
+{
+ if (qp->qp_type == QED_RDMA_QP_TYPE_XRC_TGT ||
+ qp->qp_type == QED_RDMA_QP_TYPE_XRC_INI)
+ return true;
+
+ return false;
+}
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 37e70562a964..4566815f7b87 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -254,6 +254,9 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
int rc;
u8 tc;
+ if (!qp->has_resp)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
/* Allocate DMA-able memory for IRQ */
@@ -315,6 +318,10 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
qp->min_rnr_nak_timer);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
+ qed_rdma_is_xrc_qp(qp));
+
p_ramrod->max_ird = qp->max_rd_atomic_resp;
p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl;
@@ -335,6 +342,7 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
+ p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
tc = qed_roce_get_qp_tc(p_hwfn, qp);
regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
@@ -395,6 +403,9 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
int rc;
u8 tc;
+ if (!qp->has_req)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
/* Allocate DMA-able memory for ORQ */
@@ -444,6 +455,13 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
qp->rnr_retry_cnt);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
+ qed_rdma_is_xrc_qp(qp));
+
+ SET_FIELD(p_ramrod->flags2,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode);
+
p_ramrod->max_ord = qp->max_rd_atomic_req;
p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl;
@@ -517,6 +535,9 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent;
int rc;
+ if (!qp->has_resp)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (move_to_err && !qp->resp_offloaded)
@@ -611,6 +632,9 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent;
int rc;
+ if (!qp->has_req)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (move_to_err && !(qp->req_offloaded))
@@ -705,6 +729,11 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
dma_addr_t ramrod_res_phys;
int rc;
+ if (!qp->has_resp) {
+ *cq_prod = 0;
+ return 0;
+ }
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
*cq_prod = qp->cq_prod;
@@ -736,9 +765,9 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
- p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
- &ramrod_res_phys, GFP_KERNEL);
+ p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
if (!p_ramrod_res) {
rc = -ENOMEM;
@@ -785,6 +814,9 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
dma_addr_t ramrod_res_phys;
int rc = -ENOMEM;
+ if (!qp->has_req)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (!qp->req_offloaded)
@@ -872,10 +904,10 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
}
/* Send a query responder ramrod to FW to get RQ-PSN and state */
- p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(*p_resp_ramrod_res),
- &resp_ramrod_res_phys, GFP_KERNEL);
+ p_resp_ramrod_res =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_resp_ramrod_res),
+ &resp_ramrod_res_phys, GFP_KERNEL);
if (!p_resp_ramrod_res) {
DP_NOTICE(p_hwfn,
"qed query qp failed: cannot allocate memory (ramrod)\n");
@@ -920,8 +952,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
}
/* Send a query requester ramrod to FW to get SQ-PSN and state */
- p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*p_req_ramrod_res),
&req_ramrod_res_phys,
GFP_KERNEL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index f5f3c03b9dd2..790c28d696a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -160,12 +160,16 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
return 0;
}
err:
- DP_NOTICE(p_hwfn,
- "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
- le32_to_cpu(p_ent->elem.hdr.cid),
- p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
- le16_to_cpu(p_ent->elem.hdr.echo));
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
+ "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ le32_to_cpu(p_ent->elem.hdr.cid),
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ le16_to_cpu(p_ent->elem.hdr.echo));
+ qed_ptt_release(p_hwfn, p_ptt);
return -EBUSY;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 368e88565783..aabeaf03135e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -32,6 +32,7 @@
#ifndef _QED_SRIOV_H
#define _QED_SRIOV_H
+#include <linux/crash_dump.h>
#include <linux/types.h>
#include "qed_vf.h"
@@ -40,9 +41,12 @@
#define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV
-#define IS_VF(cdev) ((cdev)->b_is_vf)
-#define IS_PF(cdev) (!((cdev)->b_is_vf))
-#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
+#define IS_VF(cdev) (is_kdump_kernel() ? \
+ (0) : ((cdev)->b_is_vf))
+#define IS_PF(cdev) (is_kdump_kernel() ? \
+ (1) : !((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn) (is_kdump_kernel() ? \
+ (0) : !!((p_hwfn)->cdev->p_iov_info))
#else
#define IS_VF(cdev) (0)
#define IS_PF(cdev) (1)
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 234c6f30effb..8857da1208d7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -278,6 +278,14 @@ struct qede_dev {
struct qede_rdma_dev rdma_info;
struct bpf_prog *xdp_prog;
+
+ unsigned long err_flags;
+#define QEDE_ERR_IS_HANDLED 31
+#define QEDE_ERR_ATTN_CLR_EN 0
+#define QEDE_ERR_GET_DBG_INFO 1
+#define QEDE_ERR_IS_RECOVERABLE 2
+#define QEDE_ERR_WARN 3
+
struct qede_dump_info dump_info;
};
@@ -485,11 +493,15 @@ struct qede_fastpath {
#define QEDE_SP_RECOVERY 0
#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RSVD1 2
+#define QEDE_SP_RSVD2 3
+#define QEDE_SP_HW_ERR 4
+#define QEDE_SP_ARFS_CONFIG 5
+#define QEDE_SP_AER 7
#ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-#define QEDE_SP_ARFS_CONFIG 4
#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
#endif
@@ -521,7 +533,6 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
-void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
int qede_free_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq, int *len);
@@ -574,12 +585,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_KDUMP_MIN 63
#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_KDUMP_MIN 63
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_MIN_PKT_LEN 64
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 812c7766e096..24cc68391ac4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -190,12 +190,14 @@ static const struct {
enum {
QEDE_PRI_FLAG_CMT,
QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
+ QEDE_PRI_FLAG_RECOVER_ON_ERROR,
QEDE_PRI_FLAG_LEN,
};
static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Coupled-Function",
"SmartAN capable",
+ "Recover on error",
};
enum qede_ethtool_tests {
@@ -417,9 +419,30 @@ static u32 qede_get_priv_flags(struct net_device *dev)
if (edev->dev_info.common.smart_an)
flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT);
+ if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
+ flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
+
return flags;
}
+static int qede_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 cflags = qede_get_priv_flags(dev);
+ u32 dflags = flags ^ cflags;
+
+ /* can only change RECOVER_ON_ERROR flag */
+ if (dflags & ~BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ return -EINVAL;
+
+ if (flags & BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ set_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+ else
+ clear_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+
+ return 0;
+}
+
struct qede_link_mode_mapping {
u32 qed_link_mode;
u32 ethtool_link_mode;
@@ -2098,6 +2121,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_phys_id = qede_set_phys_id,
.get_ethtool_stats = qede_get_ethtool_stats,
.get_priv_flags = qede_get_priv_flags,
+ .set_priv_flags = qede_set_priv_flags,
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index c6c20776b474..7598ebe0962a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1066,6 +1066,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.rxq = &rxq->xdp_rxq;
+ xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 34fa3917eb33..b2d154258b07 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/crash_dump.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
@@ -60,6 +61,7 @@
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
+#include <linux/aer.h>
#include "qede.h"
#include "qede_ptp.h"
@@ -124,6 +126,8 @@ static const struct pci_device_id qede_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
#define TX_TIMEOUT (5 * HZ)
@@ -135,10 +139,12 @@ static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
static void qede_schedule_recovery_handler(void *dev);
static void qede_recovery_handler(struct qede_dev *edev);
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
static void qede_get_eth_tlv_data(void *edev, void *data);
static void qede_get_generic_tlv_data(void *edev,
struct qed_generic_tlvs *data);
-
+static void qede_generic_hw_err_handler(struct qede_dev *edev);
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -203,6 +209,10 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
}
#endif
+static const struct pci_error_handlers qede_err_handler = {
+ .error_detected = qede_io_error_detected,
+};
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
@@ -212,6 +222,7 @@ static struct pci_driver qede_pci_driver = {
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
+ .err_handler = &qede_err_handler,
};
static struct qed_eth_cb_ops qede_ll_ops = {
@@ -221,6 +232,7 @@ static struct qed_eth_cb_ops qede_ll_ops = {
#endif
.link_update = qede_link_update,
.schedule_recovery_handler = qede_schedule_recovery_handler,
+ .schedule_hw_err_handler = qede_schedule_hw_err_handler,
.get_generic_tlv_data = qede_get_generic_tlv_data,
.get_protocol_tlv_data = qede_get_eth_tlv_data,
},
@@ -527,6 +539,51 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
+static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ DP_NOTICE(edev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl),
+ jiffies);
+}
+
+static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_tx_queue *txq;
+ int cos;
+
+ netif_carrier_off(dev);
+ DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
+
+ if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
+ return;
+
+ for_each_cos_in_txq(edev, cos) {
+ txq = &edev->fp_array[txqueue].txq[cos];
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, txq);
+ }
+
+ if (IS_VF(edev))
+ return;
+
+ if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) {
+ DP_INFO(edev,
+ "Avoid handling a Tx timeout while another HW error is being handled\n");
+ return;
+ }
+
+ set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -614,6 +671,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
.ndo_do_ioctl = qede_ioctl,
+ .ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
.ndo_set_vf_vlan = qede_set_vf_vlan,
@@ -707,8 +765,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->dp_module = dp_module;
edev->dp_level = dp_level;
edev->ops = qed_ops;
- edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
- edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ if (is_kdump_kernel()) {
+ edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
+ edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
+ } else {
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ }
DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
info->num_queues, info->num_queues);
@@ -974,7 +1038,8 @@ static void qede_sp_task(struct work_struct *work)
/* SRIOV must be disabled outside the lock to avoid a deadlock.
* The recovery of the active VFs is currently not supported.
*/
- qede_sriov_configure(edev->pdev, 0);
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
#endif
qede_lock(edev);
qede_recovery_handler(edev);
@@ -993,7 +1058,20 @@ static void qede_sp_task(struct work_struct *work)
qede_process_arfs_filters(edev, false);
}
#endif
+ if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
+ qede_generic_hw_err_handler(edev);
__qede_unlock(edev);
+
+ if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ edev->ops->common->recovery_process(edev->cdev);
+ }
}
static void qede_update_pf_params(struct qed_dev *cdev)
@@ -1187,7 +1265,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case QEDE_PRIVATE_VF:
if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a VF\n");
- is_vf = true;
+ is_vf = is_kdump_kernel() ? false : true;
break;
default:
if (debug & QED_LOG_VERBOSE_MASK)
@@ -1398,7 +1476,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
if (rxq->rx_buf_size + size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE - size;
- /* Segment size to spilt a page in multiple equal parts ,
+ /* Segment size to split a page in multiple equal parts,
* unless XDP is used in which case we'd use the entire page.
*/
if (!edev->xdp_prog) {
@@ -1694,7 +1772,7 @@ static void qede_init_fp(struct qede_dev *edev)
txq->ndev_txq_id = ndev_tx_id;
if (edev->dev_info.is_legacy)
- txq->is_legacy = 1;
+ txq->is_legacy = true;
txq->dev = &edev->pdev->dev;
}
@@ -2482,6 +2560,100 @@ err:
qede_recovery_failed(edev);
}
+static void qede_atomic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Get a call trace of the flow that led to the error */
+ WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
+
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
+ edev->ops->common->attn_clr_enable(cdev, true);
+
+ DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
+}
+
+static void qede_generic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Trigger a recovery process.
+ * This is placed in the sleep requiring section just to make
+ * sure it is the last one, and that all the other operations
+ * were completed.
+ */
+ if (test_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags))
+ edev->ops->common->recovery_process(cdev);
+
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+
+ DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
+}
+
+static void qede_set_hw_err_flags(struct qede_dev *edev,
+ enum qed_hw_err_type err_type)
+{
+ unsigned long err_flags = 0;
+
+ switch (err_type) {
+ case QED_HW_ERR_DMAE_FAIL:
+ set_bit(QEDE_ERR_WARN, &err_flags);
+ fallthrough;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
+ set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
+ break;
+
+ default:
+ DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
+ break;
+ }
+
+ edev->err_flags |= err_flags;
+}
+
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qede_dev *edev = dev;
+
+ /* Fan failure cannot be masked by handling of another HW error or by a
+ * concurrent recovery process.
+ */
+ if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_INFO(edev,
+ "Avoid scheduling an error handling while another HW error is being handled\n");
+ return;
+ }
+
+ if (err_type >= QED_HW_ERR_LAST) {
+ DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+ return;
+ }
+
+ qede_set_hw_err_flags(edev, err_type);
+ qede_atomic_hw_err_handler(edev);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
+}
+
static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
@@ -2579,3 +2751,49 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
etlv->num_txqs_full_set = true;
etlv->num_rxqs_full_set = true;
}
+
+/**
+ * qede_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev)
+ return PCI_ERS_RESULT_NONE;
+
+ DP_NOTICE(edev, "IO error detected [%d]\n", state);
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev, "Device already in the recovery state\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* PF handles the recovery of its VFs */
+ if (IS_VF(edev)) {
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "VF recovery is handled by its PF\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ set_bit(QEDE_SP_AER, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ __qede_unlock(edev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 134611aa2c9a..d838774af5a6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1880,12 +1880,6 @@ static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
}
-static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
- ulong off, u32 data)
-{
- return adapter->ahw->hw_ops->write_reg(adapter, off, data);
-}
-
static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
u8 *mac, u8 function)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 2a533280b124..29b9c728a65e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
ahw->diag_cnt = 0;
ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
if (ret)
- goto fail_diag_irq;
+ goto fail_mbx_args;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
intrpt_id = ahw->intr_tbl[0].id;
@@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
done:
qlcnic_free_mbx_args(&cmd);
+
+fail_mbx_args:
qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_irq:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f7c2f32237cb..7adbb03cb931 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1582,10 +1582,10 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
!adapter->fdb_mac_learn) {
qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = 1;
+ adapter->drv_mac_learn = true;
adapter->rx_mac_learn = true;
} else {
- adapter->drv_mac_learn = 0;
+ adapter->drv_mac_learn = false;
adapter->rx_mac_learn = false;
}
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 251d4ac4af02..117188e3c7de 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1431,8 +1431,9 @@ error:
}
/* Transmit the packet using specified transmit queue */
-int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
- struct sk_buff *skb)
+netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb)
{
struct emac_tpd tpd;
u32 prod_idx;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index ae08bdd9046c..920123eb8ace 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -227,8 +227,9 @@ void emac_mac_stop(struct emac_adapter *adpt);
void emac_mac_mode_config(struct emac_adapter *adpt);
void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
int *num_pkts, int max_pkts);
-int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
- struct sk_buff *skb);
+netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb);
void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q);
void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
struct emac_adapter *adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 18b0c7a2d6dc..20b1b43a0e39 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -115,7 +115,8 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget)
}
/* Transmit the packet */
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 60d342f82fb3..e291e6ac40cb 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -2054,10 +2054,9 @@ static void cp_remove_one (struct pci_dev *pdev)
free_netdev(dev);
}
-#ifdef CONFIG_PM
-static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused cp_suspend(struct device *device)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
@@ -2075,16 +2074,14 @@ static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
spin_unlock_irqrestore (&cp->lock, flags);
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ device_set_wakeup_enable(device, cp->wol_enabled);
return 0;
}
-static int cp_resume (struct pci_dev *pdev)
+static int __maybe_unused cp_resume(struct device *device)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
@@ -2093,10 +2090,6 @@ static int cp_resume (struct pci_dev *pdev)
netif_device_attach (dev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D0, 0);
-
/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
cp_init_rings_index (cp);
cp_init_hw (cp);
@@ -2111,7 +2104,6 @@ static int cp_resume (struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
static const struct pci_device_id cp_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
@@ -2120,15 +2112,14 @@ static const struct pci_device_id cp_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
+static SIMPLE_DEV_PM_OPS(cp_pm_ops, cp_suspend, cp_resume);
+
static struct pci_driver cp_driver = {
.name = DRV_NAME,
.id_table = cp_pci_tbl,
.probe = cp_init_one,
.remove = cp_remove_one,
-#ifdef CONFIG_PM
- .resume = cp_resume,
- .suspend = cp_suspend,
-#endif
+ .driver.pm = &cp_pm_ops,
};
module_pci_driver(cp_driver);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 5caeb8368eab..227139d42227 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2603,17 +2603,13 @@ static void rtl8139_set_rx_mode (struct net_device *dev)
spin_unlock_irqrestore (&tp->lock, flags);
}
-#ifdef CONFIG_PM
-
-static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused rtl8139_suspend(struct device *device)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8139_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
- pci_save_state (pdev);
-
if (!netif_running (dev))
return 0;
@@ -2631,38 +2627,30 @@ static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state)
spin_unlock_irqrestore (&tp->lock, flags);
- pci_set_power_state (pdev, PCI_D3hot);
-
return 0;
}
-
-static int rtl8139_resume (struct pci_dev *pdev)
+static int __maybe_unused rtl8139_resume(struct device *device)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = dev_get_drvdata(device);
- pci_restore_state (pdev);
if (!netif_running (dev))
return 0;
- pci_set_power_state (pdev, PCI_D0);
+
rtl8139_init_ring (dev);
rtl8139_hw_start (dev);
netif_device_attach (dev);
return 0;
}
-#endif /* CONFIG_PM */
-
+static SIMPLE_DEV_PM_OPS(rtl8139_pm_ops, rtl8139_suspend, rtl8139_resume);
static struct pci_driver rtl8139_pci_driver = {
.name = DRV_NAME,
.id_table = rtl8139_pci_tbl,
.probe = rtl8139_init_one,
.remove = rtl8139_remove_one,
-#ifdef CONFIG_PM
- .suspend = rtl8139_suspend,
- .resume = rtl8139_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = &rtl8139_pm_ops,
};
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index bf5bf05970a2..4d2ec9742cee 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -19,7 +18,6 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
-#include <linux/crc32.h>
#include <linux/in.h>
#include <linux/io.h>
#include <linux/ip.h>
@@ -27,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
+#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
@@ -58,9 +57,6 @@
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
-#define R8169_MSG_DEFAULT \
- (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
#define MC_FILTER_LIMIT 32
@@ -75,6 +71,8 @@
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
+#define OCP_STD_PHY_BASE 0xa400
+
#define RTL_CFG_NO_GBIT 1
/* write/read MMIO register */
@@ -85,10 +83,10 @@
#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
-#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
-#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
-#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
-#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
+#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
static const struct {
const char *name;
@@ -176,10 +174,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
-static struct {
- u32 msg_enable;
-} debug = { -1 };
-
enum rtl_registers {
MAC0 = 0, /* Ethernet hardware address. */
MAC4 = 4,
@@ -227,10 +221,13 @@ enum rtl_registers {
CPlusCmd = 0xe0,
IntrMitigate = 0xe2,
-#define RTL_COALESCE_MASK 0x0f
-#define RTL_COALESCE_SHIFT 4
-#define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK)
-#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2)
+#define RTL_COALESCE_TX_USECS GENMASK(15, 12)
+#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8)
+#define RTL_COALESCE_RX_USECS GENMASK(7, 4)
+#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0)
+
+#define RTL_COALESCE_T_MAX 0x0fU
+#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4)
RxDescAddrLow = 0xe4,
RxDescAddrHigh = 0xe8,
@@ -386,10 +383,12 @@ enum rtl_register_content {
/* rx_mode_bits */
AcceptErr = 0x20,
AcceptRunt = 0x10,
+#define RX_CONFIG_ACCEPT_ERR_MASK 0x30
AcceptBroadcast = 0x08,
AcceptMulticast = 0x04,
AcceptMyPhys = 0x02,
AcceptAllPhys = 0x01,
+#define RX_CONFIG_ACCEPT_OK_MASK 0x0f
#define RX_CONFIG_ACCEPT_MASK 0x3f
/* TxConfigBits */
@@ -596,7 +595,6 @@ struct rtl8169_private {
struct net_device *dev;
struct phy_device *phydev;
struct napi_struct napi;
- u32 msg_enable;
enum mac_version mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
@@ -638,8 +636,6 @@ typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
-module_param_named(debug, debug.msg_enable, int, 0);
-MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_SOFTDEP("pre: realtek");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_8168D_1);
@@ -727,53 +723,35 @@ struct rtl_cond {
const char *msg;
};
-static void rtl_udelay(unsigned int d)
-{
- udelay(d);
-}
-
static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
- void (*delay)(unsigned int), unsigned int d, int n,
- bool high)
+ unsigned long usecs, int n, bool high)
{
int i;
for (i = 0; i < n; i++) {
if (c->check(tp) == high)
return true;
- delay(d);
+ fsleep(usecs);
}
- netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
- c->msg, !high, n, d);
- return false;
-}
-static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
-{
- return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
-}
-
-static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
-{
- return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
+ if (net_ratelimit())
+ netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n",
+ c->msg, !high, n, usecs);
+ return false;
}
-static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
+static bool rtl_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
{
- return rtl_loop_wait(tp, c, msleep, d, n, true);
+ return rtl_loop_wait(tp, c, d, n, true);
}
-static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
+static bool rtl_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
{
- return rtl_loop_wait(tp, c, msleep, d, n, false);
+ return rtl_loop_wait(tp, c, d, n, false);
}
#define DECLARE_RTL_COND(name) \
@@ -789,7 +767,8 @@ static bool name ## _check(struct rtl8169_private *tp)
static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
{
if (reg & 0xffff0001) {
- netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
+ if (net_ratelimit())
+ netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg);
return true;
}
return false;
@@ -807,7 +786,7 @@ static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
- rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
+ rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
}
static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
@@ -817,7 +796,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
RTL_W32(tp, GPHY_OCP, reg << 15);
- return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
+ return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
}
@@ -847,8 +826,6 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
}
-#define OCP_STD_PHY_BASE 0xa400
-
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
if (reg == 0x1f) {
@@ -897,7 +874,7 @@ static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
- rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
+ rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
/*
* According to hardware specs a 20us delay is required after write
* complete indication, but before sending next command.
@@ -911,7 +888,7 @@ static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
- value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
+ value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
/*
@@ -934,7 +911,7 @@ static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
RTL_W32(tp, EPHY_RXER_NUM, 0);
- rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
+ rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
}
static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
@@ -951,7 +928,7 @@ static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
RTL_W32(tp, EPHY_RXER_NUM, 0);
- return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
}
@@ -1037,7 +1014,7 @@ static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
+ rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
udelay(10);
}
@@ -1046,10 +1023,17 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
+static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
+{
+ /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
+ *cmd |= 0x7f0 << 18;
+}
+
DECLARE_RTL_COND(rtl_eriar_cond)
{
return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
@@ -1058,11 +1042,14 @@ DECLARE_RTL_COND(rtl_eriar_cond)
static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
u32 val, int type)
{
+ u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
+
BUG_ON((addr & 3) || (mask == 0));
RTL_W32(tp, ERIDR, val);
- RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
- rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
+ rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
@@ -1073,9 +1060,12 @@ static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
- RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
+ u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
+
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
- return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
RTL_R32(tp, ERIDR) : ~0;
}
@@ -1084,35 +1074,31 @@ static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
}
-static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
- u32 m)
+static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
{
- u32 val;
+ u32 val = rtl_eri_read(tp, addr);
- val = rtl_eri_read(tp, addr);
- rtl_eri_write(tp, addr, mask, (val & ~m) | p);
+ rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
}
-static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask,
- u32 p)
+static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
{
- rtl_w0w1_eri(tp, addr, mask, p, 0);
+ rtl_w0w1_eri(tp, addr, p, 0);
}
-static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask,
- u32 m)
+static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
{
- rtl_w0w1_eri(tp, addr, mask, 0, m);
+ rtl_w0w1_eri(tp, addr, 0, m);
}
-static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
+static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg)
{
- RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
+ RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff));
+ return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
RTL_R32(tp, OCPDR) : ~0;
}
-static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
+static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg)
{
return _rtl_eri_read(tp, reg, ERIAR_OOB);
}
@@ -1122,7 +1108,7 @@ static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
{
RTL_W32(tp, OCPDR, data);
RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
+ rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
}
static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
@@ -1154,12 +1140,12 @@ DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
reg = rtl8168_get_ocp_reg(tp);
- return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800;
+ return r8168dp_ocp_read(tp, reg) & 0x00000800;
}
DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
{
- return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001;
+ return r8168ep_ocp_read(tp, 0x124) & 0x00000001;
}
DECLARE_RTL_COND(rtl_ocp_tx_cond)
@@ -1170,7 +1156,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
{
RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
- rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
+ rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000);
RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
@@ -1178,15 +1164,14 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
+ rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
- r8168ep_ocp_write(tp, 0x01, 0x30,
- r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
- rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
+ r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
+ rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
@@ -1209,16 +1194,15 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
+ rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
- r8168ep_ocp_write(tp, 0x01, 0x30,
- r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
+ r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
+ rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
@@ -1242,12 +1226,12 @@ static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000);
+ return !!(r8168dp_ocp_read(tp, reg) & 0x00008000);
}
static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001);
+ return r8168ep_ocp_read(tp, 0x128) & 0x00000001;
}
static bool r8168_check_dash(struct rtl8169_private *tp)
@@ -1266,8 +1250,8 @@ static bool r8168_check_dash(struct rtl8169_private *tp)
static void rtl_reset_packet_filter(struct rtl8169_private *tp)
{
- rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
- rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
+ rtl_eri_clear_bits(tp, 0xdc, BIT(0));
+ rtl_eri_set_bits(tp, 0xdc, BIT(0));
}
DECLARE_RTL_COND(rtl_efusear_cond)
@@ -1279,7 +1263,7 @@ u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
- return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
+ return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
@@ -1394,11 +1378,9 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
if (rtl_is_8168evl_up(tp)) {
tmp--;
if (wolopts & WAKE_MAGIC)
- rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
- MagicPacket_v2);
+ rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2);
else
- rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
- MagicPacket_v2);
+ rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2);
} else if (rtl_is_8125(tp)) {
tmp--;
if (wolopts & WAKE_MAGIC)
@@ -1423,7 +1405,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_52:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_61:
options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
@@ -1497,19 +1479,15 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
return features;
}
-static int rtl8169_set_features(struct net_device *dev,
- netdev_features_t features)
+static void rtl_set_rx_config_features(struct rtl8169_private *tp,
+ netdev_features_t features)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- u32 rx_config;
+ u32 rx_config = RTL_R32(tp, RxConfig);
- rtl_lock_work(tp);
-
- rx_config = RTL_R32(tp, RxConfig);
if (features & NETIF_F_RXALL)
- rx_config |= (AcceptErr | AcceptRunt);
+ rx_config |= RX_CONFIG_ACCEPT_ERR_MASK;
else
- rx_config &= ~(AcceptErr | AcceptRunt);
+ rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK;
if (rtl_is_8125(tp)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -1519,6 +1497,16 @@ static int rtl8169_set_features(struct net_device *dev,
}
RTL_W32(tp, RxConfig, rx_config);
+}
+
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl_lock_work(tp);
+
+ rtl_set_rx_config_features(tp, features);
if (features & NETIF_F_RXCSUM)
tp->cp_cmd |= RxChkSum;
@@ -1568,20 +1556,6 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
rtl_unlock_work(tp);
}
-static u32 rtl8169_get_msglevel(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return tp->msg_enable;
-}
-
-static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- tp->msg_enable = value;
-}
-
static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
"tx_packets",
"rx_packets",
@@ -1613,7 +1587,7 @@ DECLARE_RTL_COND(rtl_counters_cond)
return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
}
-static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
+static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
{
dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
@@ -1624,22 +1598,20 @@ static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
RTL_W32(tp, CounterAddrLow, cmd);
RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
- return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+ rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
-static bool rtl8169_reset_counters(struct rtl8169_private *tp)
+static void rtl8169_reset_counters(struct rtl8169_private *tp)
{
/*
* Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
* tally counters.
*/
- if (tp->mac_version < RTL_GIGA_MAC_VER_19)
- return true;
-
- return rtl8169_do_counters(tp, CounterReset);
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_19)
+ rtl8169_do_counters(tp, CounterReset);
}
-static bool rtl8169_update_counters(struct rtl8169_private *tp)
+static void rtl8169_update_counters(struct rtl8169_private *tp)
{
u8 val = RTL_R8(tp, ChipCmd);
@@ -1647,16 +1619,13 @@ static bool rtl8169_update_counters(struct rtl8169_private *tp)
* Some chips are unable to dump tally counters when the receiver
* is disabled. If 0xff chip may be in a PCI power-save state.
*/
- if (!(val & CmdRxEnb) || val == 0xff)
- return true;
-
- return rtl8169_do_counters(tp, CounterDump);
+ if (val & CmdRxEnb && val != 0xff)
+ rtl8169_do_counters(tp, CounterDump);
}
-static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
+static void rtl8169_init_counter_offsets(struct rtl8169_private *tp)
{
struct rtl8169_counters *counters = tp->counters;
- bool ret = false;
/*
* rtl8169_init_counter_offsets is called from rtl_open. On chip
@@ -1674,22 +1643,16 @@ static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
*/
if (tp->tc_offset.inited)
- return true;
-
- /* If both, reset and update fail, propagate to caller. */
- if (rtl8169_reset_counters(tp))
- ret = true;
+ return;
- if (rtl8169_update_counters(tp))
- ret = true;
+ rtl8169_reset_counters(tp);
+ rtl8169_update_counters(tp);
tp->tc_offset.tx_errors = counters->tx_errors;
tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
tp->tc_offset.tx_aborted = counters->tx_aborted;
tp->tc_offset.rx_missed = counters->rx_missed;
tp->tc_offset.inited = true;
-
- return ret;
}
static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -1760,46 +1723,34 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
* 1 1 160us 81.92us 1.31ms
*/
-/* rx/tx scale factors for one particular CPlusCmd[0:1] value */
-struct rtl_coalesce_scale {
- /* Rx / Tx */
- u32 nsecs[2];
-};
-
/* rx/tx scale factors for all CPlusCmd[0:1] cases */
struct rtl_coalesce_info {
u32 speed;
- struct rtl_coalesce_scale scalev[4]; /* each CPlusCmd[0:1] case */
+ u32 scale_nsecs[4];
};
-/* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */
-#define rxtx_x1822(r, t) { \
- {{(r), (t)}}, \
- {{(r)*8, (t)*8}}, \
- {{(r)*8*2, (t)*8*2}}, \
- {{(r)*8*2*2, (t)*8*2*2}}, \
-}
+/* produce array with base delay *1, *8, *8*2, *8*2*2 */
+#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) }
+
static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
- /* speed delays: rx00 tx00 */
- { SPEED_10, rxtx_x1822(40960, 40960) },
- { SPEED_100, rxtx_x1822( 2560, 2560) },
- { SPEED_1000, rxtx_x1822( 320, 320) },
+ { SPEED_10, COALESCE_DELAY(40960) },
+ { SPEED_100, COALESCE_DELAY(2560) },
+ { SPEED_1000, COALESCE_DELAY(320) },
{ 0 },
};
static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
- /* speed delays: rx00 tx00 */
- { SPEED_10, rxtx_x1822(40960, 40960) },
- { SPEED_100, rxtx_x1822( 2560, 2560) },
- { SPEED_1000, rxtx_x1822( 5000, 5000) },
+ { SPEED_10, COALESCE_DELAY(40960) },
+ { SPEED_100, COALESCE_DELAY(2560) },
+ { SPEED_1000, COALESCE_DELAY(5000) },
{ 0 },
};
-#undef rxtx_x1822
+#undef COALESCE_DELAY
/* get rx/tx scale vector corresponding to current speed */
-static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
+static const struct rtl_coalesce_info *
+rtl_coalesce_info(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
const struct rtl_coalesce_info *ci;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
@@ -1819,16 +1770,8 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct rtl8169_private *tp = netdev_priv(dev);
const struct rtl_coalesce_info *ci;
- const struct rtl_coalesce_scale *scale;
- struct {
- u32 *max_frames;
- u32 *usecs;
- } coal_settings [] = {
- { &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs },
- { &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs }
- }, *p = coal_settings;
- int i;
- u16 w;
+ u32 scale, c_us, c_fr;
+ u16 intrmit;
if (rtl_is_8125(tp))
return -EOPNOTSUPP;
@@ -1836,111 +1779,111 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
memset(ec, 0, sizeof(*ec));
/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
- ci = rtl_coalesce_info(dev);
+ ci = rtl_coalesce_info(tp);
if (IS_ERR(ci))
return PTR_ERR(ci);
- scale = &ci->scalev[tp->cp_cmd & INTT_MASK];
+ scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK];
- /* read IntrMitigate and adjust according to scale */
- for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
- *p->max_frames = (w & RTL_COALESCE_MASK) << 2;
- w >>= RTL_COALESCE_SHIFT;
- *p->usecs = w & RTL_COALESCE_MASK;
- }
+ intrmit = RTL_R16(tp, IntrMitigate);
- for (i = 0; i < 2; i++) {
- p = coal_settings + i;
- *p->usecs = (*p->usecs * scale->nsecs[i]) / 1000;
+ c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit);
+ ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
- /*
- * ethtool_coalesce says it is illegal to set both usecs and
- * max_frames to 0.
- */
- if (!*p->usecs && !*p->max_frames)
- *p->max_frames = 1;
- }
+ c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit);
+ /* ethtool_coalesce states usecs and max_frames must not both be 0 */
+ ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
+
+ c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit);
+ ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
+
+ c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit);
+ ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
return 0;
}
-/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */
-static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale(
- struct net_device *dev, u32 nsec, u16 *cp01)
+/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */
+static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec,
+ u16 *cp01)
{
const struct rtl_coalesce_info *ci;
u16 i;
- ci = rtl_coalesce_info(dev);
+ ci = rtl_coalesce_info(tp);
if (IS_ERR(ci))
- return ERR_CAST(ci);
+ return PTR_ERR(ci);
for (i = 0; i < 4; i++) {
- u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0],
- ci->scalev[i].nsecs[1]);
- if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) {
+ if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) {
*cp01 = i;
- return &ci->scalev[i];
+ return ci->scale_nsecs[i];
}
}
- return ERR_PTR(-EINVAL);
+ return -ERANGE;
}
static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct rtl8169_private *tp = netdev_priv(dev);
- const struct rtl_coalesce_scale *scale;
- struct {
- u32 frames;
- u32 usecs;
- } coal_settings [] = {
- { ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs },
- { ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs }
- }, *p = coal_settings;
- u16 w = 0, cp01;
- int i;
+ u32 tx_fr = ec->tx_max_coalesced_frames;
+ u32 rx_fr = ec->rx_max_coalesced_frames;
+ u32 coal_usec_max, units;
+ u16 w = 0, cp01 = 0;
+ int scale;
if (rtl_is_8125(tp))
return -EOPNOTSUPP;
- scale = rtl_coalesce_choose_scale(dev,
- max(p[0].usecs, p[1].usecs) * 1000, &cp01);
- if (IS_ERR(scale))
- return PTR_ERR(scale);
+ if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX)
+ return -ERANGE;
- for (i = 0; i < 2; i++, p++) {
- u32 units;
+ coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs);
+ scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01);
+ if (scale < 0)
+ return scale;
- /*
- * accept max_frames=1 we returned in rtl_get_coalesce.
- * accept it not only when usecs=0 because of e.g. the following scenario:
- *
- * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
- * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
- * - then user does `ethtool -C eth0 rx-usecs 100`
- *
- * since ethtool sends to kernel whole ethtool_coalesce
- * settings, if we do not handle rx_usecs=!0, rx_frames=1
- * we'll reject it below in `frames % 4 != 0`.
- */
- if (p->frames == 1) {
- p->frames = 0;
- }
+ /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it
+ * not only when usecs=0 because of e.g. the following scenario:
+ *
+ * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
+ * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
+ * - then user does `ethtool -C eth0 rx-usecs 100`
+ *
+ * Since ethtool sends to kernel whole ethtool_coalesce settings,
+ * if we want to ignore rx_frames then it has to be set to 0.
+ */
+ if (rx_fr == 1)
+ rx_fr = 0;
+ if (tx_fr == 1)
+ tx_fr = 0;
+
+ /* HW requires time limit to be set if frame limit is set */
+ if ((tx_fr && !ec->tx_coalesce_usecs) ||
+ (rx_fr && !ec->rx_coalesce_usecs))
+ return -EINVAL;
- units = p->usecs * 1000 / scale->nsecs[i];
- if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4)
- return -EINVAL;
+ w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4));
+ w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4));
- w <<= RTL_COALESCE_SHIFT;
- w |= units;
- w <<= RTL_COALESCE_SHIFT;
- w |= p->frames >> 2;
- }
+ units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale);
+ w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units);
+ units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale);
+ w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units);
rtl_lock_work(tp);
- RTL_W16(tp, IntrMitigate, swab16(w));
+ RTL_W16(tp, IntrMitigate, w);
+
+ /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */
+ if (rtl_is_8168evl_up(tp)) {
+ if (!rx_fr && !tx_fr)
+ /* disable packet counter */
+ tp->cp_cmd |= PktCntrDisable;
+ else
+ tp->cp_cmd &= ~PktCntrDisable;
+ }
tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
@@ -1989,12 +1932,6 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
goto out;
}
- if (dev->phydev->autoneg == AUTONEG_DISABLE ||
- dev->phydev->duplex != DUPLEX_FULL) {
- ret = -EPROTONOSUPPORT;
- goto out;
- }
-
ret = phy_ethtool_set_eee(tp->phydev, data);
if (!ret)
@@ -2013,8 +1950,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_coalesce = rtl_get_coalesce,
.set_coalesce = rtl_set_coalesce,
- .get_msglevel = rtl8169_get_msglevel,
- .set_msglevel = rtl8169_set_msglevel,
.get_regs = rtl8169_get_regs,
.get_wol = rtl8169_get_wol,
.set_wol = rtl8169_set_wol,
@@ -2127,6 +2062,8 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
+ /* RTL8401, reportedly works if treated as RTL8101e */
+ { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 },
{ 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
{ 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
@@ -2187,7 +2124,7 @@ static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
if (tp->mac_version != RTL_GIGA_MAC_VER_38)
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
- rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003);
+ rtl_eri_set_bits(tp, 0x1b0, 0x0003);
}
static void rtl8125_config_eee_mac(struct rtl8169_private *tp)
@@ -2351,7 +2288,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
- rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
+ rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
default:
@@ -2384,15 +2321,13 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
+ rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
break;
default:
break;
}
phy_resume(tp->phydev);
- /* give MAC/PHY some time to resume */
- msleep(20);
}
static void rtl_init_rxcfg(struct rtl8169_private *tp)
@@ -2411,8 +2346,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
- RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_VLAN_8125 |
- RX_DMA_BURST);
+ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
default:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
@@ -2526,7 +2460,7 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
{
RTL_W8(tp, ChipCmd, CmdReset);
- rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
+ rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
static void rtl_request_firmware(struct rtl8169_private *tp)
@@ -2538,10 +2472,8 @@ static void rtl_request_firmware(struct rtl8169_private *tp)
return;
rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
- if (!rtl_fw) {
- netif_warn(tp, ifup, tp->dev, "Unable to load firmware, out of memory\n");
+ if (!rtl_fw)
return;
- }
rtl_fw->phy_write = rtl_writephy;
rtl_fw->phy_read = rtl_readphy;
@@ -2571,31 +2503,31 @@ DECLARE_RTL_COND(rtl_txcfg_empty_cond)
return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
}
-static void rtl8169_hw_reset(struct rtl8169_private *tp)
+DECLARE_RTL_COND(rtl_rxtx_empty_cond)
{
- /* Disable interrupts */
- rtl8169_irq_mask_and_ack(tp);
-
- rtl_rx_close(tp);
+ return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+}
+static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
+{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
- break;
- case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
- RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
- rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
+ rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
+ rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
+ break;
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
default:
- RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
- udelay(100);
break;
}
+}
- rtl_hw_reset(tp);
+static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
+{
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
+ fsleep(2000);
+ rtl_wait_txrx_fifo_empty(tp);
}
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
@@ -2628,7 +2560,7 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
}
-static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
+static void rtl8169_set_magic_reg(struct rtl8169_private *tp)
{
u32 val;
@@ -2654,8 +2586,6 @@ static void rtl_set_rx_mode(struct net_device *dev)
u32 tmp;
if (dev->flags & IFF_PROMISC) {
- /* Unconditionally log net taps. */
- netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
rx_mode |= AcceptAllPhys;
} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
dev->flags & IFF_ALLMULTI ||
@@ -2668,7 +2598,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
- u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ u32 bit_nr = eth_hw_addr_crc(ha) >> 26;
mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
}
@@ -2679,14 +2609,11 @@ static void rtl_set_rx_mode(struct net_device *dev)
}
}
- if (dev->features & NETIF_F_RXALL)
- rx_mode |= (AcceptErr | AcceptRunt);
-
RTL_W32(tp, MAR0 + 4, mc_filter[1]);
RTL_W32(tp, MAR0 + 0, mc_filter[0]);
tmp = RTL_R32(tp, RxConfig);
- RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode);
+ RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode);
}
DECLARE_RTL_COND(rtl_csiar_cond)
@@ -2702,7 +2629,7 @@ static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE | func << 16);
- rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
+ rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
@@ -2712,7 +2639,7 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
CSIAR_BYTE_ENABLE);
- return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
RTL_R32(tp, CSIDR) : ~0;
}
@@ -2969,12 +2896,14 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_2);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
+ rtl_eri_set_bits(tp, 0x0d4, 0x1f00);
+ rtl_eri_set_bits(tp, 0x1d0, BIT(1));
+ rtl_reset_packet_filter(tp);
+ rtl_eri_set_bits(tp, 0x1b0, BIT(4));
rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
- rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
rtl_disable_clock_request(tp);
@@ -2994,11 +2923,11 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
rtl_reset_packet_filter(tp);
- rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
- rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4));
+ rtl_eri_set_bits(tp, 0x1b0, BIT(4));
+ rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1));
rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
@@ -3027,7 +2956,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168f_1);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
+ rtl_eri_set_bits(tp, 0x0d4, 0x1f00);
}
static void rtl_hw_start_8411(struct rtl8169_private *tp)
@@ -3045,7 +2974,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168f_1);
- rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00);
+ rtl_eri_set_bits(tp, 0x0d4, 0x0c00);
}
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
@@ -3062,11 +2991,12 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_set_bits(tp, 0x0d4, 0x1f80);
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
- rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
+ rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
+ rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
}
@@ -3292,9 +3222,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
- rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
-
- rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00);
+ rtl_eri_set_bits(tp, 0xd4, 0x1f00);
+ rtl_eri_set_bits(tp, 0xdc, 0x001c);
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
@@ -3310,7 +3239,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
- rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
+ rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
@@ -3347,7 +3276,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
- rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
+ rtl_eri_set_bits(tp, 0xd4, 0x1f80);
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
@@ -3358,7 +3287,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
+ rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
@@ -3450,7 +3379,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
- rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f90);
+ rtl_eri_set_bits(tp, 0xd4, 0x1f90);
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
@@ -3466,7 +3395,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
- rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
+ rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
@@ -3591,7 +3520,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
+ rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00);
/* disable EEE */
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
@@ -3667,7 +3596,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe098, 0xc302);
- rtl_udelay_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
+ rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
rtl8125_config_eee_mac(tp);
@@ -3834,7 +3763,7 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl8169_set_magic_reg(tp, tp->mac_version);
+ rtl8169_set_magic_reg(tp);
/* disable interrupt coalescing */
RTL_W16(tp, IntrMitigate, 0x0000);
@@ -3844,7 +3773,6 @@ static void rtl_hw_start(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
- tp->cp_cmd &= CPCMD_MASK;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
@@ -3866,6 +3794,7 @@ static void rtl_hw_start(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
+ rtl_set_rx_config_features(tp, tp->dev->features);
rtl_set_rx_mode(tp->dev);
rtl_irq_enable(tp);
}
@@ -3881,21 +3810,14 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
-{
- desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
- desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
-}
-
-static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
+static void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
desc->opts2 = 0;
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
-
- desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
+ WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE));
}
static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
@@ -3912,8 +3834,7 @@ static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
+ netdev_err(tp->dev, "Failed to map RX DMA!\n");
__free_pages(data, get_order(R8169_RX_BUF_SIZE));
return NULL;
}
@@ -3934,15 +3855,11 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
__free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
tp->Rx_databuff[i] = NULL;
- rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
+ tp->RxDescArray[i].addr = 0;
+ tp->RxDescArray[i].opts1 = 0;
}
}
-static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
-{
- desc->opts1 |= cpu_to_le32(RingEnd);
-}
-
static int rtl8169_rx_fill(struct rtl8169_private *tp)
{
unsigned int i;
@@ -3958,7 +3875,8 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
tp->Rx_databuff[i] = data;
}
- rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
+ /* mark as last descriptor in the ring */
+ tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd);
return 0;
}
@@ -4007,10 +3925,45 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
- tp->cur_tx = tp->dirty_tx = 0;
netdev_reset_queue(tp->dev);
}
+static void rtl8169_hw_reset(struct rtl8169_private *tp)
+{
+ /* Give a racing hard_start_xmit a few cycles to complete. */
+ synchronize_rcu();
+
+ /* Disable interrupts */
+ rtl8169_irq_mask_and_ack(tp);
+
+ rtl_rx_close(tp);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
+ break;
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
+ rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
+ rtl_enable_rxdvgate(tp);
+ fsleep(2000);
+ break;
+ default:
+ RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
+ fsleep(100);
+ break;
+ }
+
+ rtl_hw_reset(tp);
+
+ rtl8169_tx_clear(tp);
+ rtl8169_init_ring_indexes(tp);
+}
+
static void rtl_reset_work(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
@@ -4018,16 +3971,12 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_disable(&tp->napi);
netif_stop_queue(dev);
- synchronize_rcu();
rtl8169_hw_reset(tp);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i);
- rtl8169_tx_clear(tp);
- rtl8169_init_ring_indexes(tp);
-
napi_enable(&tp->napi);
rtl_hw_start(tp);
netif_wake_queue(dev);
@@ -4053,7 +4002,7 @@ static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
ret = dma_mapping_error(d, mapping);
if (unlikely(ret)) {
if (net_ratelimit())
- netif_err(tp, drv, tp->dev, "Failed to map TX data!\n");
+ netdev_err(tp->dev, "Failed to map TX data!\n");
return ret;
}
@@ -4124,25 +4073,20 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
u32 transport_offset = (u32)skb_transport_offset(skb);
- u32 mss = skb_shinfo(skb)->gso_size;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 mss = shinfo->gso_size;
if (mss) {
- switch (vlan_get_protocol(skb)) {
- case htons(ETH_P_IP):
+ if (shinfo->gso_type & SKB_GSO_TCPV4) {
opts[0] |= TD1_GTSENV4;
- break;
-
- case htons(ETH_P_IPV6):
+ } else if (shinfo->gso_type & SKB_GSO_TCPV6) {
if (skb_cow_head(skb, 0))
return false;
tcp_v6_gso_csum_prep(skb);
opts[0] |= TD1_GTSENV6;
- break;
-
- default:
+ } else {
WARN_ON_ONCE(1);
- break;
}
opts[0] |= transport_offset << GTTCPHO_SHIFT;
@@ -4224,7 +4168,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first = tp->TxDescArray + entry;
if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
- netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
+ if (net_ratelimit())
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@@ -4262,8 +4207,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag);
- /* Force all memory writes to complete before notifying device */
- wmb();
+ /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */
+ smp_wmb();
tp->cur_tx += frags + 1;
@@ -4308,6 +4253,37 @@ err_stop_0:
return NETDEV_TX_BUSY;
}
+static unsigned int rtl_last_frag_len(struct sk_buff *skb)
+{
+ struct skb_shared_info *info = skb_shinfo(skb);
+ unsigned int nr_frags = info->nr_frags;
+
+ if (!nr_frags)
+ return UINT_MAX;
+
+ return skb_frag_size(info->frags + nr_frags - 1);
+}
+
+/* Workaround for hw issues with TSO on RTL8168evl */
+static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ /* IPv4 header has options field */
+ if (vlan_get_protocol(skb) == htons(ETH_P_IP) &&
+ ip_hdrlen(skb) > sizeof(struct iphdr))
+ features &= ~NETIF_F_ALL_TSO;
+
+ /* IPv4 TCP header has options field */
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 &&
+ tcp_hdrlen(skb) > sizeof(struct tcphdr))
+ features &= ~NETIF_F_ALL_TSO;
+
+ else if (rtl_last_frag_len(skb) <= 6)
+ features &= ~NETIF_F_ALL_TSO;
+
+ return features;
+}
+
static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
@@ -4316,6 +4292,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
struct rtl8169_private *tp = netdev_priv(dev);
if (skb_is_gso(skb)) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34)
+ features = rtl8168evl_fix_tso(skb, features);
+
if (transport_offset > GTTCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
@@ -4352,9 +4331,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_status_errs = pci_status_get_and_clear_errors(pdev);
- netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
- pci_cmd, pci_status_errs);
-
+ if (net_ratelimit())
+ netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
+ pci_cmd, pci_status_errs);
/*
* The recovery sequence below admits a very elaborated explanation:
* - it seems to work;
@@ -4450,15 +4429,17 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
{
- unsigned int cur_rx, rx_left;
- unsigned int count;
+ unsigned int cur_rx, rx_left, count;
+ struct device *d = tp_to_dev(tp);
cur_rx = tp->cur_rx;
for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
- unsigned int entry = cur_rx % NUM_RX_DESC;
- const void *rx_buf = page_address(tp->Rx_databuff[entry]);
+ unsigned int pkt_size, entry = cur_rx % NUM_RX_DESC;
struct RxDesc *desc = tp->RxDescArray + entry;
+ struct sk_buff *skb;
+ const void *rx_buf;
+ dma_addr_t addr;
u32 status;
status = le32_to_cpu(desc->opts1);
@@ -4472,69 +4453,65 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
dma_rmb();
if (unlikely(status & RxRES)) {
- netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
- status);
+ if (net_ratelimit())
+ netdev_warn(dev, "Rx ERROR. status = %08x\n",
+ status);
dev->stats.rx_errors++;
if (status & (RxRWT | RxRUNT))
dev->stats.rx_length_errors++;
if (status & RxCRC)
dev->stats.rx_crc_errors++;
- if (status & (RxRUNT | RxCRC) && !(status & RxRWT) &&
- dev->features & NETIF_F_RXALL) {
- goto process_pkt;
- }
- } else {
- unsigned int pkt_size;
- struct sk_buff *skb;
-
-process_pkt:
- pkt_size = status & GENMASK(13, 0);
- if (likely(!(dev->features & NETIF_F_RXFCS)))
- pkt_size -= ETH_FCS_LEN;
- /*
- * The driver does not support incoming fragmented
- * frames. They are seen as a symptom of over-mtu
- * sized frames.
- */
- if (unlikely(rtl8169_fragmented_frame(status))) {
- dev->stats.rx_dropped++;
- dev->stats.rx_length_errors++;
- goto release_descriptor;
- }
- skb = napi_alloc_skb(&tp->napi, pkt_size);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ if (!(dev->features & NETIF_F_RXALL))
goto release_descriptor;
- }
+ else if (status & RxRWT || !(status & (RxRUNT | RxCRC)))
+ goto release_descriptor;
+ }
- dma_sync_single_for_cpu(tp_to_dev(tp),
- le64_to_cpu(desc->addr),
- pkt_size, DMA_FROM_DEVICE);
- prefetch(rx_buf);
- skb_copy_to_linear_data(skb, rx_buf, pkt_size);
- skb->tail += pkt_size;
- skb->len = pkt_size;
+ pkt_size = status & GENMASK(13, 0);
+ if (likely(!(dev->features & NETIF_F_RXFCS)))
+ pkt_size -= ETH_FCS_LEN;
- dma_sync_single_for_device(tp_to_dev(tp),
- le64_to_cpu(desc->addr),
- pkt_size, DMA_FROM_DEVICE);
+ /* The driver does not support incoming fragmented frames.
+ * They are seen as a symptom of over-mtu sized frames.
+ */
+ if (unlikely(rtl8169_fragmented_frame(status))) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_length_errors++;
+ goto release_descriptor;
+ }
- rtl8169_rx_csum(skb, status);
- skb->protocol = eth_type_trans(skb, dev);
+ skb = napi_alloc_skb(&tp->napi, pkt_size);
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ goto release_descriptor;
+ }
- rtl8169_rx_vlan_tag(desc, skb);
+ addr = le64_to_cpu(desc->addr);
+ rx_buf = page_address(tp->Rx_databuff[entry]);
- if (skb->pkt_type == PACKET_MULTICAST)
- dev->stats.multicast++;
+ dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
+ prefetch(rx_buf);
+ skb_copy_to_linear_data(skb, rx_buf, pkt_size);
+ skb->tail += pkt_size;
+ skb->len = pkt_size;
+ dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
- napi_gro_receive(&tp->napi, skb);
+ rtl8169_rx_csum(skb, status);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ rtl8169_rx_vlan_tag(desc, skb);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
+
+ napi_gro_receive(&tp->napi, skb);
+
+ u64_stats_update_begin(&tp->rx_stats.syncp);
+ tp->rx_stats.packets++;
+ tp->rx_stats.bytes += pkt_size;
+ u64_stats_update_end(&tp->rx_stats.syncp);
- u64_stats_update_begin(&tp->rx_stats.syncp);
- tp->rx_stats.packets++;
- tp->rx_stats.bytes += pkt_size;
- u64_stats_update_end(&tp->rx_stats.syncp);
- }
release_descriptor:
rtl8169_mark_to_asic(desc);
}
@@ -4650,25 +4627,21 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
return 0;
}
-static void rtl8169_down(struct net_device *dev)
+static void rtl8169_down(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
+ rtl_lock_work(tp);
- phy_stop(tp->phydev);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+ phy_stop(tp->phydev);
napi_disable(&tp->napi);
- netif_stop_queue(dev);
rtl8169_hw_reset(tp);
- /* Give a racing hard_start_xmit a few cycles to complete. */
- synchronize_rcu();
-
- rtl8169_tx_clear(tp);
-
- rtl8169_rx_clear(tp);
-
rtl_pll_power_down(tp);
+
+ rtl_unlock_work(tp);
}
static int rtl8169_close(struct net_device *dev)
@@ -4681,12 +4654,9 @@ static int rtl8169_close(struct net_device *dev)
/* Update counters before going down */
rtl8169_update_counters(tp);
- rtl_lock_work(tp);
- /* Clear all task flags */
- bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
-
- rtl8169_down(dev);
- rtl_unlock_work(tp);
+ netif_stop_queue(dev);
+ rtl8169_down(tp);
+ rtl8169_rx_clear(tp);
cancel_work_sync(&tp->wk.work);
@@ -4764,8 +4734,7 @@ static int rtl_open(struct net_device *dev)
rtl_hw_start(tp);
- if (!rtl8169_init_counter_offsets(tp))
- netif_warn(tp, hw, dev, "counter reset/update failed\n");
+ rtl8169_init_counter_offsets(tp);
phy_start(tp->phydev);
netif_start_queue(dev);
@@ -4841,44 +4810,30 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
pm_runtime_put_noidle(&pdev->dev);
}
-static void rtl8169_net_suspend(struct net_device *dev)
+static void rtl8169_net_suspend(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (!netif_running(dev))
+ if (!netif_running(tp->dev))
return;
- phy_stop(tp->phydev);
- netif_device_detach(dev);
-
- rtl_lock_work(tp);
- napi_disable(&tp->napi);
- /* Clear all task flags */
- bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
-
- rtl_unlock_work(tp);
-
- rtl_pll_power_down(tp);
+ netif_device_detach(tp->dev);
+ rtl8169_down(tp);
}
#ifdef CONFIG_PM
-static int rtl8169_suspend(struct device *device)
+static int __maybe_unused rtl8169_suspend(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
- rtl8169_net_suspend(dev);
+ rtl8169_net_suspend(tp);
clk_disable_unprepare(tp->clk);
return 0;
}
-static void __rtl8169_resume(struct net_device *dev)
+static void __rtl8169_resume(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- netif_device_attach(dev);
+ netif_device_attach(tp->dev);
rtl_pll_power_up(tp);
rtl8169_init_phy(tp);
@@ -4892,34 +4847,32 @@ static void __rtl8169_resume(struct net_device *dev)
rtl_unlock_work(tp);
}
-static int rtl8169_resume(struct device *device)
+static int __maybe_unused rtl8169_resume(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
- rtl_rar_set(tp, dev->dev_addr);
+ rtl_rar_set(tp, tp->dev->dev_addr);
clk_prepare_enable(tp->clk);
- if (netif_running(dev))
- __rtl8169_resume(dev);
+ if (netif_running(tp->dev))
+ __rtl8169_resume(tp);
return 0;
}
static int rtl8169_runtime_suspend(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
if (!tp->TxDescArray)
return 0;
rtl_lock_work(tp);
- __rtl8169_set_wol(tp, WAKE_ANY);
+ __rtl8169_set_wol(tp, WAKE_PHY);
rtl_unlock_work(tp);
- rtl8169_net_suspend(dev);
+ rtl8169_net_suspend(tp);
/* Update counters before going runtime suspend */
rtl8169_update_counters(tp);
@@ -4929,10 +4882,9 @@ static int rtl8169_runtime_suspend(struct device *device)
static int rtl8169_runtime_resume(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
- rtl_rar_set(tp, dev->dev_addr);
+ rtl_rar_set(tp, tp->dev->dev_addr);
if (!tp->TxDescArray)
return 0;
@@ -4941,40 +4893,28 @@ static int rtl8169_runtime_resume(struct device *device)
__rtl8169_set_wol(tp, tp->saved_wolopts);
rtl_unlock_work(tp);
- __rtl8169_resume(dev);
+ __rtl8169_resume(tp);
return 0;
}
static int rtl8169_runtime_idle(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
- if (!netif_running(dev) || !netif_carrier_ok(dev))
+ if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
pm_schedule_suspend(device, 10000);
return -EBUSY;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
- .suspend = rtl8169_suspend,
- .resume = rtl8169_resume,
- .freeze = rtl8169_suspend,
- .thaw = rtl8169_resume,
- .poweroff = rtl8169_suspend,
- .restore = rtl8169_resume,
- .runtime_suspend = rtl8169_runtime_suspend,
- .runtime_resume = rtl8169_runtime_resume,
- .runtime_idle = rtl8169_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
+ SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
+ rtl8169_runtime_idle)
};
-#define RTL8169_PM_OPS (&rtl8169_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define RTL8169_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
+#endif /* CONFIG_PM */
static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
{
@@ -4995,13 +4935,12 @@ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
static void rtl_shutdown(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = pci_get_drvdata(pdev);
- rtl8169_net_suspend(dev);
+ rtl8169_net_suspend(tp);
/* Restore original MAC address */
- rtl_rar_set(tp, dev->perm_addr);
+ rtl_rar_set(tp, tp->dev->perm_addr);
rtl8169_hw_reset(tp);
@@ -5018,24 +4957,20 @@ static void rtl_shutdown(struct pci_dev *pdev)
static void rtl_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = pci_get_drvdata(pdev);
- if (r8168_check_dash(tp))
- rtl8168_driver_stop(tp);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
- netif_napi_del(&tp->napi);
+ unregister_netdev(tp->dev);
- unregister_netdev(dev);
- mdiobus_unregister(tp->phydev->mdio.bus);
+ if (r8168_check_dash(tp))
+ rtl8168_driver_stop(tp);
rtl_release_firmware(tp);
- if (pci_dev_run_wake(pdev))
- pm_runtime_get_noresume(&pdev->dev);
-
/* restore original MAC address */
- rtl_rar_set(tp, dev->perm_addr);
+ rtl_rar_set(tp, tp->dev->perm_addr);
}
static const struct net_device_ops rtl_netdev_ops = {
@@ -5117,9 +5052,9 @@ DECLARE_RTL_COND(rtl_link_list_ready_cond)
return RTL_R8(tp, MCU) & LINK_LIST_RDY;
}
-DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp)
{
- return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+ rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
}
static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
@@ -5164,20 +5099,19 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
- ret = mdiobus_register(new_bus);
+ ret = devm_mdiobus_register(new_bus);
if (ret)
return ret;
tp->phydev = mdiobus_get_phy(new_bus, 0);
if (!tp->phydev) {
- mdiobus_unregister(new_bus);
return -ENODEV;
} else if (!tp->phydev->drv) {
/* Most chip versions fail with the genphy driver.
* Therefore ensure that the dedicated PHY driver is loaded.
*/
- dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
- mdiobus_unregister(new_bus);
+ dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n",
+ tp->phydev->phy_id);
return -EUNATCH;
}
@@ -5189,53 +5123,34 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
- tp->ocp_base = OCP_STD_PHY_BASE;
-
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
- return;
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
- return;
+ rtl_enable_rxdvgate(tp);
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
+ r8168g_wait_ll_share_fifo_ready(tp);
r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
-
- rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+ r8168g_wait_ll_share_fifo_ready(tp);
}
static void rtl_hw_init_8125(struct rtl8169_private *tp)
{
- tp->ocp_base = OCP_STD_PHY_BASE;
-
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
- return;
+ rtl_enable_rxdvgate(tp);
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
+ r8168g_wait_ll_share_fifo_ready(tp);
r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
-
- rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+ r8168g_wait_ll_share_fifo_ready(tp);
}
static void rtl_hw_initialize(struct rtl8169_private *tp)
@@ -5350,9 +5265,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
tp->pci_dev = pdev;
- tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
tp->eee_adv = -1;
+ tp->ocp_base = OCP_STD_PHY_BASE;
/* Get the *optional* external "ether_clk" used on some boards */
rc = rtl_get_ether_clk(tp);
@@ -5408,7 +5323,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mac_version = chipset;
- tp->cp_cmd = RTL_R16(tp, CPlusCmd);
+ tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
@@ -5443,14 +5358,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_HIGHDMA;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- tp->cp_cmd |= RxChkSum;
- /* RTL8125 uses register RxConfig for VLAN offloading config */
- if (!rtl_is_8125(tp))
- tp->cp_cmd |= RxVlan;
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
@@ -5482,6 +5392,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ /* configure chip for default features */
+ rtl8169_set_features(dev, dev->features);
+
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
dev->max_mtu = jumbo_max;
@@ -5496,7 +5409,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!tp->counters)
return -ENOMEM;
- pci_set_drvdata(pdev, dev);
+ pci_set_drvdata(pdev, tp);
rc = r8169_mdio_register(tp);
if (rc)
@@ -5507,17 +5420,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = register_netdev(dev);
if (rc)
- goto err_mdio_unregister;
+ return rc;
- netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid,
- pci_irq_vector(pdev, 0));
+ netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
+ rtl_chip_infos[chipset].name, dev->dev_addr, xid,
+ pci_irq_vector(pdev, 0));
if (jumbo_max)
- netif_info(tp, probe, dev,
- "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
- jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
- "ok" : "ko");
+ netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
+ jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
+ "ok" : "ko");
if (r8168_check_dash(tp))
rtl8168_driver_start(tp);
@@ -5526,10 +5438,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pm_runtime_put_sync(&pdev->dev);
return 0;
-
-err_mdio_unregister:
- mdiobus_unregister(tp->phydev->mdio.bus);
- return rc;
}
static struct pci_driver rtl8169_pci_driver = {
@@ -5538,7 +5446,9 @@ static struct pci_driver rtl8169_pci_driver = {
.probe = rtl_init_one,
.remove = rtl_remove_one,
.shutdown = rtl_shutdown,
- .driver.pm = RTL8169_PM_OPS,
+#ifdef CONFIG_PM
+ .driver.pm = &rtl8169_pm_ops,
+#endif
};
module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 067ad25553b9..a442bcf64b9c 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1014,6 +1014,7 @@ static int ravb_phy_init(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
struct phy_device *phydev;
struct device_node *pn;
+ phy_interface_t iface;
int err;
priv->link = 0;
@@ -1032,8 +1033,13 @@ static int ravb_phy_init(struct net_device *ndev)
}
pn = of_node_get(np);
}
- phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
- priv->phy_interface);
+
+ iface = priv->phy_interface;
+ if (priv->chip_id != RCAR_GEN2 && phy_interface_mode_is_rgmii(iface)) {
+ /* ravb_set_delay_mode() takes care of internal delay mode */
+ iface = PHY_INTERFACE_MODE_RGMII;
+ }
+ phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
of_node_put(pn);
if (!phydev) {
netdev_err(ndev, "failed to connect PHY\n");
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8ed73f44405d..f45331ed90b0 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2472,7 +2472,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
}
/* Packet transmit function */
-static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 128ee7cda1ed..65c98837ec45 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -610,12 +610,9 @@ static int ether3_rx(struct net_device *dev, unsigned int maxcnt)
ether3_readbuffer(dev, addrs+2, 12);
if (next_ptr < RX_START || next_ptr >= RX_END) {
- int i;
printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head);
printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8);
- for (i = 2; i < 14; i++)
- printk("%02X ", addrs[i]);
- printk("\n");
+ printk("%pM %pM\n", addrs + 2, addrs + 8);
next_ptr = priv(dev)->rx_head;
break;
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 3f16bd807c6e..4b0e3695a71a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -553,7 +553,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
/* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this
@@ -1281,13 +1281,13 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_check_datapath_caps = false;
}
- if (nic_data->must_realloc_vis) {
+ if (efx->must_realloc_vis) {
/* We cannot let the number of VIs change now */
rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
nic_data->n_allocated_vis);
if (rc)
return rc;
- nic_data->must_realloc_vis = false;
+ efx->must_realloc_vis = false;
}
if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
@@ -1326,16 +1326,15 @@ static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
#endif
/* All our allocations have been reset */
- nic_data->must_realloc_vis = true;
- nic_data->must_restore_rss_contexts = true;
- nic_data->must_restore_filters = true;
+ efx->must_realloc_vis = true;
+ efx_mcdi_filter_table_reset_mc_allocations(efx);
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
#ifdef CONFIG_SFC_SRIOV
if (nic_data->vf)
for (i = 0; i < efx->vf_count; i++)
@@ -1820,6 +1819,7 @@ static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
}
static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
+ __must_hold(&efx->stats_lock)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -2389,6 +2389,86 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
+static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int enabled, implemented;
+ bool want_workaround_26807;
+ int rc;
+
+ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+ if (rc == -ENOSYS) {
+ /* GET_WORKAROUNDS was implemented before this workaround,
+ * thus it must be unavailable in this firmware.
+ */
+ nic_data->workaround_26807 = false;
+ return 0;
+ }
+ if (rc)
+ return rc;
+ want_workaround_26807 =
+ implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807;
+ nic_data->workaround_26807 =
+ !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
+
+ if (want_workaround_26807 && !nic_data->workaround_26807) {
+ unsigned int flags;
+
+ rc = efx_mcdi_set_workaround(efx,
+ MC_CMD_WORKAROUND_BUG26807,
+ true, &flags);
+ if (!rc) {
+ if (flags &
+ 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+ netif_info(efx, drv, efx->net_dev,
+ "other functions on NIC have been reset\n");
+
+ /* With MCFW v4.6.x and earlier, the
+ * boot count will have incremented,
+ * so re-read the warm_boot_count
+ * value now to ensure this function
+ * doesn't think it has changed next
+ * time it checks.
+ */
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0) {
+ nic_data->warm_boot_count = rc;
+ rc = 0;
+ }
+ }
+ nic_data->workaround_26807 = true;
+ } else if (rc == -EPERM) {
+ rc = 0;
+ }
+ }
+ return rc;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc = efx_ef10_probe_multicast_chaining(efx);
+ struct efx_mcdi_filter_vlan *vlan;
+
+ if (rc)
+ return rc;
+ rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
+
+ if (rc)
+ return rc;
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+ return 0;
+
+fail_add_vlan:
+ efx_mcdi_filter_table_remove(efx);
+ return rc;
+}
+
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -2464,75 +2544,14 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
- unsigned int enabled, implemented;
bool use_v2, cut_thru;
- int rc;
nic_data = efx->nic_data;
use_v2 = nic_data->datapath_caps2 &
1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
cut_thru = !(nic_data->datapath_caps &
1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
- rc = efx_mcdi_ev_init(channel, cut_thru, use_v2);
-
- /* IRQ return is ignored */
- if (channel->channel || rc)
- return rc;
-
- /* Successfully created event queue on channel 0 */
- rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
- if (rc == -ENOSYS) {
- /* GET_WORKAROUNDS was implemented before this workaround,
- * thus it must be unavailable in this firmware.
- */
- nic_data->workaround_26807 = false;
- rc = 0;
- } else if (rc) {
- goto fail;
- } else {
- nic_data->workaround_26807 =
- !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
-
- if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
- !nic_data->workaround_26807) {
- unsigned int flags;
-
- rc = efx_mcdi_set_workaround(efx,
- MC_CMD_WORKAROUND_BUG26807,
- true, &flags);
-
- if (!rc) {
- if (flags &
- 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
- netif_info(efx, drv, efx->net_dev,
- "other functions on NIC have been reset\n");
-
- /* With MCFW v4.6.x and earlier, the
- * boot count will have incremented,
- * so re-read the warm_boot_count
- * value now to ensure this function
- * doesn't think it has changed next
- * time it checks.
- */
- rc = efx_ef10_get_warm_boot_count(efx);
- if (rc >= 0) {
- nic_data->warm_boot_count = rc;
- rc = 0;
- }
- }
- nic_data->workaround_26807 = true;
- } else if (rc == -EPERM) {
- rc = 0;
- }
- }
- }
-
- if (!rc)
- return 0;
-
-fail:
- efx_mcdi_ev_fini(channel);
- return rc;
+ return efx_mcdi_ev_init(channel, cut_thru, use_v2);
}
static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
@@ -3100,16 +3119,15 @@ void efx_ef10_handle_drain_event(struct efx_nic *efx)
static int efx_ef10_fini_dmaq(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
int pending;
/* If the MC has just rebooted, the TX/RX queues will have already been
* torn down, but efx->active_queues needs to be set to zero.
*/
- if (nic_data->must_realloc_vis) {
+ if (efx->must_realloc_vis) {
atomic_set(&efx->active_queues, 0);
return 0;
}
@@ -3158,22 +3176,22 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
- rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
if (rc)
goto restore_filters;
ether_addr_copy(mac_old, nic_data->vport_mac);
- rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ rc = efx_ef10_vport_del_mac(efx, efx->vport_id,
nic_data->vport_mac);
if (rc)
goto restore_vadaptor;
- rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
+ rc = efx_ef10_vport_add_mac(efx, efx->vport_id,
efx->net_dev->dev_addr);
if (!rc) {
ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
} else {
- rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
+ rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old);
if (rc2) {
/* Failed to add original MAC, so clear vport_mac */
eth_zero_addr(nic_data->vport_mac);
@@ -3182,12 +3200,12 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
}
restore_vadaptor:
- rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
if (rc2)
goto reset_nic;
restore_filters:
down_write(&efx->filter_sem);
- rc2 = efx_mcdi_filter_table_probe(efx);
+ rc2 = efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -3211,7 +3229,6 @@ reset_nic:
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
bool was_enabled = efx->port_enabled;
int rc;
@@ -3225,11 +3242,11 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
+ efx->vport_id);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
sizeof(inbuf), NULL, 0, NULL);
- efx_mcdi_filter_table_probe(efx);
+ efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -3239,6 +3256,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
#ifdef CONFIG_SFC_SRIOV
if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
if (rc == -EPERM) {
@@ -3961,6 +3979,35 @@ out_unlock:
return rc;
}
+/* EF10 may have multiple datapath firmware variants within a
+ * single version. Report which variants are running.
+ */
+static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf,
+ size_t len)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return scnprintf(buf, len, " rx%x tx%x",
+ nic_data->rx_dpcpu_fw_id,
+ nic_data->tx_dpcpu_fw_id);
+}
+
+static unsigned int ef10_check_caps(const struct efx_nic *efx,
+ u8 flag,
+ u32 offset)
+{
+ const struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ switch (offset) {
+ case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST):
+ return nic_data->datapath_caps & BIT_ULL(flag);
+ case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST):
+ return nic_data->datapath_caps2 & BIT_ULL(flag);
+ default:
+ return 0;
+ }
+}
+
#define EF10_OFFLOAD_FEATURES \
(NETIF_F_IP_CSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
@@ -4027,7 +4074,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = efx_mcdi_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
@@ -4073,6 +4120,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -4139,7 +4188,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = efx_mcdi_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
@@ -4208,4 +4257,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
};
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 4580b30caae1..21fa6c0e8873 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -232,15 +232,14 @@ fail:
static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 port_flags;
int rc;
- rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
if (rc)
goto fail_vadaptor_alloc;
- rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id,
+ rc = efx_ef10_vadaptor_query(efx, efx->vport_id,
&port_flags, NULL, NULL);
if (rc)
goto fail_vadaptor_query;
@@ -281,11 +280,11 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
- EFX_EF10_NO_VLAN, &nic_data->vport_id);
+ EFX_EF10_NO_VLAN, &efx->vport_id);
if (rc)
goto fail2;
- rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+ rc = efx_ef10_vport_add_mac(efx, efx->vport_id, net_dev->dev_addr);
if (rc)
goto fail3;
ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
@@ -296,11 +295,11 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
return 0;
fail4:
- efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+ efx_ef10_vport_del_mac(efx, efx->vport_id, nic_data->vport_mac);
eth_zero_addr(nic_data->vport_mac);
fail3:
- efx_ef10_vport_free(efx, nic_data->vport_id);
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx_ef10_vport_free(efx, efx->vport_id);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
fail2:
efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
fail1:
@@ -355,22 +354,22 @@ void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
efx_ef10_sriov_free_vf_vswitching(efx);
- efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ efx_ef10_vadaptor_free(efx, efx->vport_id);
- if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+ if (efx->vport_id == EVB_PORT_ID_ASSIGNED)
return; /* No vswitch was ever created */
if (!is_zero_ether_addr(nic_data->vport_mac)) {
- efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ efx_ef10_vport_del_mac(efx, efx->vport_id,
efx->net_dev->dev_addr);
eth_zero_addr(nic_data->vport_mac);
}
- efx_ef10_vport_free(efx, nic_data->vport_id);
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx_ef10_vport_free(efx, efx->vport_id);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
/* Only free the vswitch if no VFs are assigned */
if (!pci_vfs_assigned(efx->pci_dev))
- efx_ef10_vswitch_free(efx, nic_data->vport_id);
+ efx_ef10_vswitch_free(efx, efx->vport_id);
}
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 15c731d04065..a8cc3881edce 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1425,23 +1425,16 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
le16_to_cpu(ver_words[2]),
le16_to_cpu(ver_words[3]));
- /* EF10 may have multiple datapath firmware variants within a
- * single version. Report which variants are running.
- */
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- offset += scnprintf(buf + offset, len - offset, " rx%x tx%x",
- nic_data->rx_dpcpu_fw_id,
- nic_data->tx_dpcpu_fw_id);
+ if (efx->type->print_additional_fwver)
+ offset += efx->type->print_additional_fwver(efx, buf + offset,
+ len - offset);
- /* It's theoretically possible for the string to exceed 31
- * characters, though in practice the first three version
- * components are short enough that this doesn't happen.
- */
- if (WARN_ON(offset >= len))
- buf[0] = 0;
- }
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
return;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 54a45010b576..b107e4c00285 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -326,6 +326,18 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+#define MCDI_CAPABILITY(field) \
+ MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _LBN
+
+#define MCDI_CAPABILITY_OFST(field) \
+ MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _OFST
+
+/* field is FLAGS1 or FLAGS2 */
+#define efx_has_cap(efx, flag, field) \
+ efx->type->check_caps(efx, \
+ MCDI_CAPABILITY(flag), \
+ MCDI_CAPABILITY_OFST(field))
+
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 4310ae5bd898..455a62814fb9 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -186,7 +186,6 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
struct efx_rss_context *ctx,
bool replacing)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 flags = spec->flags;
memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
@@ -211,7 +210,7 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
@@ -332,7 +331,6 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bool replace_equal)
{
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct efx_mcdi_filter_table *table;
struct efx_filter_spec *saved_spec;
struct efx_rss_context *ctx = NULL;
@@ -461,7 +459,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle,
ctx, replacing);
- if (rc == -EINVAL && nic_data->must_realloc_vis)
+ if (rc == -EINVAL && efx->must_realloc_vis)
/* The MC rebooted under us, causing it to reject our filter
* insertion as pointing to an invalid VI (spec->dmaq_id).
*/
@@ -813,7 +811,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
enum efx_encap_type encap_type,
bool multicast, bool rollback)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
@@ -830,8 +828,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
efx_filter_set_uc_def(&spec);
if (encap_type) {
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
efx_filter_set_encap_type(&spec, encap_type);
else
/*
@@ -899,7 +896,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
*id = efx_mcdi_filter_get_unsafe_id(rc);
- if (!nic_data->workaround_26807 && !encap_type) {
+ if (!table->mc_chaining && !encap_type) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags, 0);
@@ -965,7 +962,6 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
struct efx_mcdi_filter_vlan *vlan)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
/*
* Do not install unspecified VID if VLAN filtering is enabled.
@@ -1012,11 +1008,10 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
* If changing promiscuous state with cascaded multicast filters, remove
* old filters first, so that packets are dropped rather than duplicated
*/
- if (nic_data->workaround_26807 &&
- table->mc_promisc_last != table->mc_promisc)
+ if (table->mc_chaining && table->mc_promisc_last != table->mc_promisc)
efx_mcdi_filter_remove_old(efx);
if (table->mc_promisc) {
- if (nic_data->workaround_26807) {
+ if (table->mc_chaining) {
/*
* If we failed to insert promiscuous filters, rollback
* and fall back to individual multicast filters
@@ -1051,7 +1046,7 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
*/
if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
- if (nic_data->workaround_26807)
+ if (table->mc_chaining)
efx_mcdi_filter_remove_old(efx);
if (efx_mcdi_filter_insert_def(efx, vlan,
EFX_ENCAP_TYPE_NONE,
@@ -1288,12 +1283,10 @@ efx_mcdi_filter_table_probe_matches(struct efx_nic *efx,
return 0;
}
-int efx_mcdi_filter_table_probe(struct efx_nic *efx)
+int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
struct efx_mcdi_filter_table *table;
- struct efx_mcdi_filter_vlan *vlan;
int rc;
if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
@@ -1306,12 +1299,12 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx)
if (!table)
return -ENOMEM;
+ table->mc_chaining = multicast_chaining;
table->rx_match_count = 0;
rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
if (rc)
goto fail;
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
if (rc)
goto fail;
@@ -1342,22 +1335,22 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx)
efx->filter_state = table;
- list_for_each_entry(vlan, &nic_data->vlan_list, list) {
- rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
- if (rc)
- goto fail_add_vlan;
- }
-
return 0;
-
-fail_add_vlan:
- efx_mcdi_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
fail:
kfree(table);
return rc;
}
+void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ if (table) {
+ table->must_restore_filters = true;
+ table->must_restore_rss_contexts = true;
+ }
+}
+
/*
* Caller must hold efx->filter_sem for read if race against
* efx_mcdi_filter_table_remove() is possible
@@ -1365,7 +1358,6 @@ fail:
void efx_mcdi_filter_table_restore(struct efx_nic *efx)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int invalid_filters = 0, failed = 0;
struct efx_mcdi_filter_vlan *vlan;
struct efx_filter_spec *spec;
@@ -1377,10 +1369,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
WARN_ON(!rwsem_is_locked(&efx->filter_sem));
- if (!nic_data->must_restore_filters)
- return;
-
- if (!table)
+ if (!table || !table->must_restore_filters)
return;
down_write(&table->lock);
@@ -1456,7 +1445,7 @@ not_restored:
netif_err(efx, hw, efx->net_dev,
"unable to restore %u filters\n", failed);
else
- nic_data->must_restore_filters = false;
+ table->must_restore_filters = false;
}
void efx_mcdi_filter_table_remove(struct efx_nic *efx)
@@ -1921,7 +1910,6 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
u32 alloc_type = exclusive ?
@@ -1939,12 +1927,11 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
return 0;
}
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+ if (efx_has_cap(efx, RX_RSS_LIMITED, FLAGS1))
return -EOPNOTSUPP;
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
+ efx->vport_id);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
@@ -1961,8 +1948,7 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
if (context_size)
*context_size = rss_spread;
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
+ if (efx_has_cap(efx, ADDITIONAL_RSS_MODES, FLAGS1))
efx_mcdi_set_rss_context_flags(efx, ctx);
return 0;
@@ -2030,14 +2016,14 @@ void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
unsigned *context_size)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
context_size);
if (rc != 0)
return rc;
- nic_data->rx_rss_context_exclusive = false;
+ table->rx_rss_context_exclusive = false;
efx_set_default_rx_indir_table(efx, &efx->rss_context);
return 0;
}
@@ -2046,12 +2032,12 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
const u32 *rx_indir_table,
const u8 *key)
{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
u32 old_rx_rss_context = efx->rss_context.context_id;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
- !nic_data->rx_rss_context_exclusive) {
+ !table->rx_rss_context_exclusive) {
rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
NULL);
if (rc == -EOPNOTSUPP)
@@ -2068,7 +2054,7 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
if (efx->rss_context.context_id != old_rx_rss_context &&
old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
- nic_data->rx_rss_context_exclusive = true;
+ table->rx_rss_context_exclusive = true;
if (rx_indir_table != efx->rss_context.rx_indir_table)
memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
sizeof(efx->rss_context.rx_indir_table));
@@ -2182,13 +2168,13 @@ int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
struct efx_rss_context *ctx;
int rc;
WARN_ON(!mutex_is_locked(&efx->rss_lock));
- if (!nic_data->must_restore_rss_contexts)
+ if (!table->must_restore_rss_contexts)
return;
list_for_each_entry(ctx, &efx->rss_context.list, list) {
@@ -2204,7 +2190,7 @@ void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
"; RSS filters may fail to be applied\n",
ctx->user_id, rc);
}
- nic_data->must_restore_rss_contexts = false;
+ table->must_restore_rss_contexts = false;
}
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index 1837f4f5d661..03a8bf74c733 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -55,6 +55,8 @@ struct efx_mcdi_filter_table {
u32 rx_match_mcdi_flags[
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
unsigned int rx_match_count;
+ /* Our RSS context is exclusive (as opposed to shared) */
+ bool rx_rss_context_exclusive;
struct rw_semaphore lock; /* Protects entries */
struct {
@@ -75,14 +77,27 @@ struct efx_mcdi_filter_table {
/* Whether in multicast promiscuous mode when last changed */
bool mc_promisc_last;
bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
+ /* RSS contexts have yet to be restored after MC reboot */
+ bool must_restore_rss_contexts;
+ /* filters have yet to be restored after MC reboot */
+ bool must_restore_filters;
+ /* Multicast filter chaining allows less-specific filters to receive
+ * multicast packets that matched more-specific filters. Early EF10
+ * firmware didn't support this (SF bug 26807); if mc_chaining == false
+ * then we still subscribe the dev_mc_list even when mc_promisc to
+ * prevent another VI stealing the traffic.
+ */
+ bool mc_chaining;
bool vlan_filter;
struct list_head vlan_list;
};
-int efx_mcdi_filter_table_probe(struct efx_nic *efx);
+int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining);
void efx_mcdi_filter_table_remove(struct efx_nic *efx);
void efx_mcdi_filter_table_restore(struct efx_nic *efx);
+void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx);
+
/*
* The filter table(s) are managed by firmware and we have write-only
* access. When removing filters we must identify them to the
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
index dcfe78b0fa5a..962d8395d958 100644
--- a/drivers/net/ethernet/sfc/mcdi_functions.c
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -168,21 +168,18 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- struct efx_ef10_nic_data *nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc, i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
- nic_data = efx->nic_data;
-
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
@@ -276,7 +273,6 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc;
@@ -295,7 +291,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ab5227b13ae6..b807871d8f69 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -722,11 +722,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
- }
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b084e623b5f4..1afb58feb9ab 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -887,8 +887,10 @@ struct efx_async_filter_insertion {
* @rss_context: Main RSS context. Its @list member is the head of the list of
* RSS contexts created by user requests
* @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
* @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
* acknowledge but do nothing else.
* @irq_status: Interrupt status buffer
@@ -1044,10 +1046,12 @@ struct efx_nic {
bool rx_scatter;
struct efx_rss_context rss_context;
struct mutex rss_lock;
+ u32 vport_id;
unsigned int_error_count;
unsigned long int_error_expire;
+ bool must_realloc_vis;
bool irq_soft_enabled;
struct efx_buffer irq_status;
unsigned irq_zero_count;
@@ -1292,6 +1296,7 @@ struct efx_udp_tunnel {
* @udp_tnl_add_port: Add a UDP tunnel port
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @udp_tnl_del_port: Remove a UDP tunnel port
+ * @print_additional_fwver: Dump NIC-specific additional FW version info
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1352,6 +1357,9 @@ struct efx_nic_type {
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx);
+ unsigned int (*check_caps)(const struct efx_nic *efx,
+ u8 flag,
+ u32 offset);
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
void (*mcdi_request)(struct efx_nic *efx,
@@ -1462,6 +1470,8 @@ struct efx_nic_type {
int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
+ size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
+ size_t len);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 6670fda8f35a..8f73c5d996eb 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -360,10 +360,6 @@ enum {
* @warm_boot_count: Last seen MC warm boot count
* @vi_base: Absolute index of first VI in this function
* @n_allocated_vis: Number of VIs allocated to this function
- * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
- * @must_restore_rss_contexts: Flag: RSS contexts have yet to be restored after
- * MC reboot
- * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
* @n_piobufs: Number of PIO buffers allocated to this function
* @wc_membase: Base address of write-combining mapping of the memory BAR
* @pio_write_base: Base address for writing PIO buffers
@@ -372,7 +368,6 @@ enum {
* @piobuf_size: size of a single PIO buffer
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
- * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @workaround_26807: Flag: firmware supports workaround for bug 26807
@@ -385,7 +380,6 @@ enum {
* %MC_CMD_GET_CAPABILITIES response)
* @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
- * @vport_id: The function's vport ID, only relevant for PFs
* @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
* @pf_index: The number for this PF, or the parent PF if this is a VF
#ifdef CONFIG_SFC_SRIOV
@@ -404,16 +398,12 @@ struct efx_ef10_nic_data {
u16 warm_boot_count;
unsigned int vi_base;
unsigned int n_allocated_vis;
- bool must_realloc_vis;
- bool must_restore_rss_contexts;
- bool must_restore_filters;
unsigned int n_piobufs;
void __iomem *wc_membase, *pio_write_base;
unsigned int pio_write_vi_base;
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
u16 piobuf_size;
bool must_restore_piobufs;
- bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool workaround_26807;
@@ -423,7 +413,6 @@ struct efx_ef10_nic_data {
u32 datapath_caps2;
unsigned int rx_dpcpu_fw_id;
unsigned int tx_dpcpu_fw_id;
- unsigned int vport_id;
bool must_probe_vswitching;
unsigned int pf_index;
u8 port_id[ETH_ALEN];
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 59b4f16896a8..04c7283d205e 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -352,12 +352,7 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- return ((efx_nic_rev(efx) >= EFX_REV_HUNT_A0) &&
- (nic_data->datapath_caps2 &
- (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN)
- ));
+ return efx_has_cap(efx, TX_MAC_TIMESTAMPING, FLAGS2);
}
/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 260352d97d9d..c01916cff507 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -308,6 +308,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + rx_buf->len;
xdp.rxq = &rx_queue->xdp_rxq_info;
+ xdp.frame_sz = efx->rx_page_buf_step;
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock();
@@ -328,7 +329,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
case XDP_TX:
/* Buffer ownership passes to tx on success. */
- xdpf = convert_to_xdp_frame(&xdp);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
if (unlikely(err != 1)) {
efx_free_rx_buffers(rx_queue, rx_buf, 1);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index baa464161626..891e9fb6abec 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -948,6 +948,13 @@ fail:
#endif /* CONFIG_SFC_MTD */
+static unsigned int siena_check_caps(const struct efx_nic *efx,
+ u8 flag, u32 offset)
+{
+ /* Siena did not support MC_CMD_GET_CAPABILITIES */
+ return 0;
+}
+
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
@@ -1086,4 +1093,5 @@ const struct efx_nic_type siena_a0_nic_type = {
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
.rx_hash_key_size = 16,
+ .check_caps = siena_check_caps,
};
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 7305e8e86c51..6646eba9f57f 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -848,14 +848,14 @@ static int ioc3eth_probe(struct platform_device *pdev)
ip = netdev_priv(dev);
ip->dma_dev = pdev->dev.parent;
ip->regs = devm_platform_ioremap_resource(pdev, 0);
- if (!ip->regs) {
- err = -ENOMEM;
+ if (IS_ERR(ip->regs)) {
+ err = PTR_ERR(ip->regs);
goto out_free;
}
ip->ssram = devm_platform_ioremap_resource(pdev, 1);
- if (!ip->ssram) {
- err = -ENOMEM;
+ if (IS_ERR(ip->ssram)) {
+ err = PTR_ERR(ip->ssram);
goto out_free;
}
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 9e1c3752b200..4d2d91ec8b41 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -28,7 +28,7 @@ config SMC9194
option if you have a DELL laptop with the docking station, or
another SMC9192/9194 based chipset. Say Y if you want it compiled
into the kernel, and read the file
- <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.rst>.
To compile this driver as a module, choose M here. The module
will be called smc9194.
@@ -44,7 +44,7 @@ config SMC91X
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
compiled into the kernel, and read the file
- <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.rst>.
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 49a6a9167af4..fc168f85e7af 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2493,20 +2493,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_init(dev);
if (retval < 0)
- goto out_disable_resources;
+ goto out_init_fail;
netif_carrier_off(dev);
retval = smsc911x_mii_init(pdev, dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
- goto out_disable_resources;
+ goto out_init_fail;
}
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_disable_resources;
+ goto out_init_fail;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
@@ -2547,9 +2547,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
return 0;
-out_disable_resources:
+out_init_fail:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+out_disable_resources:
(void)smsc911x_disable_resources(pdev);
out_enable_resources_fail:
smsc911x_free_resources(pdev);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index a5a0fb60193a..328bc38848bb 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -867,7 +867,7 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
{
struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
u32 ret;
if (unlikely(!xdpf))
@@ -884,23 +884,28 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
struct xdp_buff *xdp)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- unsigned int len = xdp->data_end - xdp->data;
+ unsigned int sync, len = xdp->data_end - xdp->data;
u32 ret = NETSEC_XDP_PASS;
+ struct page *page;
int err;
u32 act;
act = bpf_prog_run_xdp(prog, xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
+ sync = max(sync, len);
+
switch (act) {
case XDP_PASS:
ret = NETSEC_XDP_PASS;
break;
case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp);
- if (ret != NETSEC_XDP_TX)
- page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ if (ret != NETSEC_XDP_TX) {
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(dring->page_pool, page, sync, true);
+ }
break;
case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -908,9 +913,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR;
} else {
ret = NETSEC_XDP_CONSUMED;
- page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(dring->page_pool, page, sync, true);
}
break;
default:
@@ -921,8 +925,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
- page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data), len, true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(dring->page_pool, page, sync, true);
break;
}
@@ -936,10 +940,14 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_rx_pkt_info rx_info;
enum dma_data_direction dma_dir;
struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
u16 xdp_xmit = 0;
u32 xdp_act = 0;
int done = 0;
+ xdp.rxq = &dring->xdp_rxq;
+ xdp.frame_sz = PAGE_SIZE;
+
rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog);
dma_dir = page_pool_get_dma_dir(dring->page_pool);
@@ -953,7 +961,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
- struct xdp_buff xdp;
void *buf_addr;
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
@@ -1002,7 +1009,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + pkt_len;
- xdp.rxq = &dring->xdp_rxq;
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 67ddf782d98a..f2638446b62e 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1394,7 +1394,7 @@ static int ave_stop(struct net_device *ndev)
return 0;
}
-static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ave_private *priv = netdev_priv(ndev);
u32 proc_idx, done_idx, ndesc, cmdsts;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index b46f8d2ae6d7..36bd2e18f23b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -196,6 +196,19 @@ config DWMAC_SUN8I
This selects Allwinner SoC glue layer support for the
stmmac device driver. This driver is used for H3/A83T/A64
EMAC ethernet controller.
+
+config DWMAC_IMX8
+ tristate "NXP IMX8 DWMAC support"
+ default ARCH_MXC
+ depends on OF && (ARCH_MXC || COMPILE_TEST)
+ select MFD_SYSCON
+ ---help---
+ Support for ethernet controller on NXP i.MX8 SOCs.
+
+ This selects NXP SoC glue layer support for the stmmac
+ device driver. This driver is used for i.MX8 series like
+ iMX8MP/iMX8DXL GMAC ethernet controller.
+
endif
config DWMAC_INTEL
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 5a6f265bc540..295615ab36a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -27,9 +27,10 @@ obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
+obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
-obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
-obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
+obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
+obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 6208a68a331d..127f75862962 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -473,6 +473,7 @@ struct mac_device_info {
unsigned int xlgmac;
unsigned int num_vlan;
u32 vlan_filter[32];
+ unsigned int promisc;
};
struct stmmac_rx_routing {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
new file mode 100644
index 000000000000..5010af7dab4a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwmac-imx.c - DWMAC Specific Glue layer for NXP imx8
+ *
+ * Copyright 2020 NXP
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define GPR_ENET_QOS_INTF_MODE_MASK GENMASK(21, 16)
+#define GPR_ENET_QOS_INTF_SEL_MII (0x0 << 16)
+#define GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 16)
+#define GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 16)
+#define GPR_ENET_QOS_CLK_GEN_EN (0x1 << 19)
+#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20)
+#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
+
+struct imx_dwmac_ops {
+ u32 addr_width;
+ bool mac_rgmii_txclk_auto_adj;
+
+ int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
+};
+
+struct imx_priv_data {
+ struct device *dev;
+ struct clk *clk_tx;
+ struct clk *clk_mem;
+ struct regmap *intf_regmap;
+ u32 intf_reg_off;
+ bool rmii_refclk_ext;
+
+ const struct imx_dwmac_ops *ops;
+ struct plat_stmmacenet_data *plat_dat;
+};
+
+static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct imx_priv_data *dwmac = plat_dat->bsp_priv;
+ int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = GPR_ENET_QOS_INTF_SEL_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = GPR_ENET_QOS_INTF_SEL_RMII;
+ val |= (dwmac->rmii_refclk_ext ? 0 : GPR_ENET_QOS_CLK_TX_CLK_SEL);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = GPR_ENET_QOS_INTF_SEL_RGMII |
+ GPR_ENET_QOS_RGMII_EN;
+ break;
+ default:
+ pr_debug("imx dwmac doesn't support %d interface\n",
+ plat_dat->interface);
+ return -EINVAL;
+ }
+
+ val |= GPR_ENET_QOS_CLK_GEN_EN;
+ return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
+ GPR_ENET_QOS_INTF_MODE_MASK, val);
+};
+
+static int
+imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ int ret = 0;
+
+ /* TBD: depends on imx8dxl scu interfaces to be upstreamed */
+ return ret;
+}
+
+static int imx_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct imx_priv_data *dwmac = priv;
+ int ret;
+
+ plat_dat = dwmac->plat_dat;
+
+ ret = clk_prepare_enable(dwmac->clk_mem);
+ if (ret) {
+ dev_err(&pdev->dev, "mem clock enable failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwmac->clk_tx);
+ if (ret) {
+ dev_err(&pdev->dev, "tx clock enable failed\n");
+ goto clk_tx_en_failed;
+ }
+
+ if (dwmac->ops->set_intf_mode) {
+ ret = dwmac->ops->set_intf_mode(plat_dat);
+ if (ret)
+ goto intf_mode_failed;
+ }
+
+ return 0;
+
+intf_mode_failed:
+ clk_disable_unprepare(dwmac->clk_tx);
+clk_tx_en_failed:
+ clk_disable_unprepare(dwmac->clk_mem);
+ return ret;
+}
+
+static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct imx_priv_data *dwmac = priv;
+
+ if (dwmac->clk_tx)
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_mem);
+}
+
+static void imx_dwmac_fix_speed(void *priv, unsigned int speed)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct imx_priv_data *dwmac = priv;
+ unsigned long rate;
+ int err;
+
+ plat_dat = dwmac->plat_dat;
+
+ if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
+ (plat_dat->interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->interface == PHY_INTERFACE_MODE_MII))
+ return;
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ default:
+ dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ return;
+ }
+
+ err = clk_set_rate(dwmac->clk_tx, rate);
+ if (err < 0)
+ dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
+}
+
+static int
+imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int err = 0;
+
+ if (of_get_property(np, "snps,rmii_refclk_ext", NULL))
+ dwmac->rmii_refclk_ext = true;
+
+ dwmac->clk_tx = devm_clk_get(dev, "tx");
+ if (IS_ERR(dwmac->clk_tx)) {
+ dev_err(dev, "failed to get tx clock\n");
+ return PTR_ERR(dwmac->clk_tx);
+ }
+
+ dwmac->clk_mem = NULL;
+ if (of_machine_is_compatible("fsl,imx8dxl")) {
+ dwmac->clk_mem = devm_clk_get(dev, "mem");
+ if (IS_ERR(dwmac->clk_mem)) {
+ dev_err(dev, "failed to get mem clock\n");
+ return PTR_ERR(dwmac->clk_mem);
+ }
+ }
+
+ if (of_machine_is_compatible("fsl,imx8mp")) {
+ /* Binding doc describes the propety:
+ is required by i.MX8MP.
+ is optinoal for i.MX8DXL.
+ */
+ dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
+ if (IS_ERR(dwmac->intf_regmap))
+ return PTR_ERR(dwmac->intf_regmap);
+
+ err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off);
+ if (err) {
+ dev_err(dev, "Can't get intf mode reg offset (%d)\n", err);
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static int imx_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct imx_priv_data *dwmac;
+ const struct imx_dwmac_ops *data;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return PTR_ERR(dwmac);
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ ret = -EINVAL;
+ goto err_match_data;
+ }
+
+ dwmac->ops = data;
+ dwmac->dev = &pdev->dev;
+
+ ret = imx_dwmac_parse_dt(dwmac, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse OF data\n");
+ goto err_parse_dt;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(dwmac->ops->addr_width));
+ if (ret) {
+ dev_err(&pdev->dev, "DMA mask set failed\n");
+ goto err_dma_mask;
+ }
+
+ plat_dat->init = imx_dwmac_init;
+ plat_dat->exit = imx_dwmac_exit;
+ plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
+ plat_dat->bsp_priv = dwmac;
+ dwmac->plat_dat = plat_dat;
+
+ ret = imx_dwmac_init(pdev, dwmac);
+ if (ret)
+ goto err_dwmac_init;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_drv_probe;
+
+ return 0;
+
+err_dwmac_init:
+err_drv_probe:
+ imx_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_dma_mask:
+err_parse_dt:
+err_match_data:
+ stmmac_remove_config_dt(pdev, plat_dat);
+ return ret;
+}
+
+static struct imx_dwmac_ops imx8mp_dwmac_data = {
+ .addr_width = 34,
+ .mac_rgmii_txclk_auto_adj = false,
+ .set_intf_mode = imx8mp_set_intf_mode,
+};
+
+static struct imx_dwmac_ops imx8dxl_dwmac_data = {
+ .addr_width = 32,
+ .mac_rgmii_txclk_auto_adj = true,
+ .set_intf_mode = imx8dxl_set_intf_mode,
+};
+
+static const struct of_device_id imx_dwmac_match[] = {
+ { .compatible = "nxp,imx8mp-dwmac-eqos", .data = &imx8mp_dwmac_data },
+ { .compatible = "nxp,imx8dxl-dwmac-eqos", .data = &imx8dxl_dwmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx_dwmac_match);
+
+static struct platform_driver imx_dwmac_driver = {
+ .probe = imx_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "imx-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = imx_dwmac_match,
+ },
+};
+module_platform_driver(imx_dwmac_driver);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP imx8 DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 2e4aaedb93f5..2ac9dfb3462c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -83,13 +83,9 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
/* assert clk_req */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data |= SERDES_PLL_CLK;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for clk_ack assertion */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -103,13 +99,9 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
}
/* assert lane reset */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data |= SERDES_RST;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for assert lane reset reflection */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -123,14 +115,12 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
}
/* move power state to P0 */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_PWR_ST_MASK;
data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* Check for P0 state */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -159,14 +149,12 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
/* move power state to P3 */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_PWR_ST_MASK;
data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* Check for P3 state */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -180,13 +168,9 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
/* de-assert clk_req */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_PLL_CLK;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for clk_ack de-assert */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -200,13 +184,9 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
/* de-assert lane reset */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_RST;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for de-assert lane reset reflection */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -252,6 +232,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
static int intel_mgbe_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ int ret;
int i;
plat->clk_csr = 5;
@@ -324,7 +305,12 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
plat->stmmac_clk = NULL;
}
- clk_prepare_enable(plat->stmmac_clk);
+
+ ret = clk_prepare_enable(plat->stmmac_clk);
+ if (ret) {
+ clk_unregister_fixed_rate(plat->stmmac_clk);
+ return ret;
+ }
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -341,16 +327,11 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
static int ehl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- int ret;
-
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
- return 0;
+ return intel_mgbe_common_data(pdev, plat);
}
static int ehl_sgmii_data(struct pci_dev *pdev,
@@ -366,7 +347,7 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
return ehl_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_sgmii1g_info = {
.setup = ehl_sgmii_data,
};
@@ -380,7 +361,7 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
return ehl_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_rgmii1g_info = {
.setup = ehl_rgmii_data,
};
@@ -399,7 +380,7 @@ static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
return ehl_pse0_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse0_rgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
.setup = ehl_pse0_rgmii1g_data,
};
@@ -412,7 +393,7 @@ static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
return ehl_pse0_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse0_sgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
.setup = ehl_pse0_sgmii1g_data,
};
@@ -431,7 +412,7 @@ static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
return ehl_pse1_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse1_rgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
.setup = ehl_pse1_rgmii1g_data,
};
@@ -444,23 +425,18 @@ static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
return ehl_pse1_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse1_sgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
.setup = ehl_pse1_sgmii1g_data,
};
static int tgl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- int ret;
-
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
- return 0;
+ return intel_mgbe_common_data(pdev, plat);
}
static int tgl_sgmii_data(struct pci_dev *pdev,
@@ -474,7 +450,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
return tgl_common_data(pdev, plat);
}
-static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
+static struct stmmac_pci_info tgl_sgmii1g_info = {
.setup = tgl_sgmii_data,
};
@@ -577,7 +553,7 @@ static int quark_default_data(struct pci_dev *pdev,
return 0;
}
-static const struct stmmac_pci_info quark_pci_info = {
+static const struct stmmac_pci_info quark_info = {
.setup = quark_default_data,
};
@@ -600,11 +576,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
struct intel_priv_data *intel_priv;
struct plat_stmmacenet_data *plat;
struct stmmac_resources res;
- int i;
int ret;
- intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv),
- GFP_KERNEL);
+ intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
if (!intel_priv)
return -ENOMEM;
@@ -631,15 +605,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
return ret;
}
- /* Get the base address of device */
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
- if (ret)
- return ret;
- break;
- }
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
pci_set_master(pdev);
@@ -650,14 +618,23 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- pci_enable_msi(pdev);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[i];
- res.wol_irq = pdev->irq;
- res.irq = pdev->irq;
+ res.addr = pcim_iomap_table(pdev)[0];
+ res.wol_irq = pci_irq_vector(pdev, 0);
+ res.irq = pci_irq_vector(pdev, 0);
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (ret) {
+ pci_free_irq_vectors(pdev);
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_unregister_fixed_rate(plat->stmmac_clk);
+ }
- return stmmac_dvr_probe(&pdev->dev, plat, &res);
+ return ret;
}
/**
@@ -671,19 +648,15 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
{
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
- int i;
stmmac_dvr_remove(&pdev->dev);
- if (priv->plat->stmmac_clk)
- clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+ pci_free_irq_vectors(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+
+ pcim_iounmap_regions(pdev, BIT(0));
pci_disable_device(pdev);
}
@@ -742,26 +715,19 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
- { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID,
- &ehl_pse0_rgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID,
- &ehl_pse0_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID,
- &ehl_pse0_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID,
- &ehl_pse1_rgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID,
- &ehl_pse1_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID,
- &ehl_pse1_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
{}
};
-
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
static struct pci_driver intel_eth_pci_driver = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 6ae13dc19510..02102c781a8c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -319,6 +319,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
/* Enable PTP clock */
regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+ break;
+ default:
+ /* We don't get here; the switch above will have errored out */
+ unreachable();
+ }
regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index a3934ca6a043..234e8b6816ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -32,7 +33,10 @@
#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
-#define PRG_ETH0_TXDLY_SHIFT 5
+/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
+ * cycle of the 125MHz RGMII TX clock):
+ * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
+ */
#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5)
/* divider for the result of m250_sel */
@@ -44,6 +48,27 @@
#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
+/* Bypass (= 0, the signal from the GPIO input directly connects to the
+ * internal sampling) or enable (= 1) the internal logic for RXEN and RXD[3:0]
+ * timing tuning.
+ */
+#define PRG_ETH0_ADJ_ENABLE BIT(13)
+/* Controls whether the RXEN and RXD[3:0] signals should be aligned with the
+ * input RX rising/falling edge and sent to the Ethernet internals. This sets
+ * the automatically delay and skew automatically (internally).
+ */
+#define PRG_ETH0_ADJ_SETUP BIT(14)
+/* An internal counter based on the "timing-adjustment" clock. The counter is
+ * cleared on both, the falling and rising edge of the RX_CLK. This selects the
+ * delay (= the counter value) when to start sampling RXEN and RXD[3:0].
+ */
+#define PRG_ETH0_ADJ_DELAY GENMASK(19, 15)
+/* Adjusts the skew between each bit of RXEN and RXD[3:0]. If a signal has a
+ * large input delay, the bit for that signal (RXEN = bit 0, RXD[3] = bit 1,
+ * ...) can be configured to be 1 to compensate for a delay of about 1ns.
+ */
+#define PRG_ETH0_ADJ_SKEW GENMASK(24, 20)
+
#define MUX_CLK_NUM_PARENTS 2
struct meson8b_dwmac;
@@ -60,6 +85,8 @@ struct meson8b_dwmac {
phy_interface_t phy_mode;
struct clk *rgmii_tx_clk;
u32 tx_delay_ns;
+ u32 rx_delay_ns;
+ struct clk *timing_adj_clk;
};
struct meson8b_dwmac_clk_configs {
@@ -240,30 +267,82 @@ static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
return 0;
}
+static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
+ struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ devm_add_action_or_reset(dwmac->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ dwmac->rgmii_tx_clk);
+
+ return 0;
+}
+
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
+ u32 tx_dly_config, rx_dly_config, delay_config;
int ret;
- u8 tx_dly_val = 0;
+
+ tx_dly_config = FIELD_PREP(PRG_ETH0_TXDLY_MASK,
+ dwmac->tx_delay_ns >> 1);
+
+ if (dwmac->rx_delay_ns == 2)
+ rx_dly_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
+ else
+ rx_dly_config = 0;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ delay_config = tx_dly_config | rx_dly_config;
+ break;
case PHY_INTERFACE_MODE_RGMII_RXID:
- /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where
- * 8ns are exactly one cycle of the 125MHz RGMII TX clock):
- * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
- */
- tx_dly_val = dwmac->tx_delay_ns >> 1;
- /* fall through */
-
- case PHY_INTERFACE_MODE_RGMII_ID:
+ delay_config = tx_dly_config;
+ break;
case PHY_INTERFACE_MODE_RGMII_TXID:
+ delay_config = rx_dly_config;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_config = 0;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ };
+
+ if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
+ if (!dwmac->timing_adj_clk) {
+ dev_err(dwmac->dev,
+ "The timing-adjustment clock is mandatory for the RX delay re-timing\n");
+ return -EINVAL;
+ }
+
+ /* The timing adjustment logic is driven by a separate clock */
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->timing_adj_clk);
+ if (ret) {
+ dev_err(dwmac->dev,
+ "Failed to enable the timing-adjustment clock\n");
+ return ret;
+ }
+ }
+
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK |
+ PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP |
+ PRG_ETH0_ADJ_DELAY | PRG_ETH0_ADJ_SKEW,
+ delay_config);
+
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) {
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK, 0);
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
-
/* Configure the 125MHz RGMII TX clock, the IP block changes
* the output automatically (= without us having to configure
* a register) based on the line-speed (125MHz for Gbit speeds,
@@ -276,34 +355,18 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return ret;
}
- ret = clk_prepare_enable(dwmac->rgmii_tx_clk);
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->rgmii_tx_clk);
if (ret) {
dev_err(dwmac->dev,
"failed to enable the RGMII TX clock\n");
return ret;
}
-
- devm_add_action_or_reset(dwmac->dev,
- (void(*)(void *))clk_disable_unprepare,
- dwmac->rgmii_tx_clk);
- break;
-
- case PHY_INTERFACE_MODE_RMII:
+ } else {
/* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK,
PRG_ETH0_INVERTED_RMII_CLK);
-
- /* TX clock delay cannot be configured in RMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- 0);
-
- break;
-
- default:
- dev_err(dwmac->dev, "unsupported phy-mode %s\n",
- phy_modes(dwmac->phy_mode));
- return -EINVAL;
}
/* enable TX_CLK and PHY_REF_CLK generator */
@@ -358,6 +421,25 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
+ /* use 0ns as fallback since this is what most boards actually use */
+ if (of_property_read_u32(pdev->dev.of_node, "amlogic,rx-delay-ns",
+ &dwmac->rx_delay_ns))
+ dwmac->rx_delay_ns = 0;
+
+ if (dwmac->rx_delay_ns != 0 && dwmac->rx_delay_ns != 2) {
+ dev_err(&pdev->dev,
+ "The only allowed RX delays values are: 0ns, 2ns");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
+ "timing-adjustment");
+ if (IS_ERR(dwmac->timing_adj_clk)) {
+ ret = PTR_ERR(dwmac->timing_adj_clk);
+ goto err_remove_config_dt;
+ }
+
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e0a5fe83d8e0..bfc4a92f1d92 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -75,6 +75,11 @@ struct ethqos_emac_por {
unsigned int value;
};
+struct ethqos_emac_driver_data {
+ const struct ethqos_emac_por *por;
+ unsigned int num_por;
+};
+
struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
@@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
{ .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
};
+static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
+ .por = emac_v2_3_0_por,
+ .num_por = ARRAY_SIZE(emac_v2_3_0_por),
+};
+
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
{
unsigned int val;
@@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
+ const struct ethqos_emac_driver_data *data;
struct qcom_ethqos *ethqos;
struct resource *res;
int ret;
@@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
goto err_mem;
}
- ethqos->por = of_device_get_match_data(&pdev->dev);
+ data = of_device_get_match_data(&pdev->dev);
+ ethqos->por = data->por;
+ ethqos->num_por = data->num_por;
ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
if (IS_ERR(ethqos->rgmii_clk)) {
@@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
}
static const struct of_device_id qcom_ethqos_match[] = {
- { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
+ { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
{ }
};
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index b2dc99289687..5d4df4c5254e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -29,6 +29,11 @@
#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16)
#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17)
+/* CLOCK feed to PHY*/
+#define ETH_CK_F_25M 25000000
+#define ETH_CK_F_50M 50000000
+#define ETH_CK_F_125M 125000000
+
/* Ethernet PHY interface selection in register SYSCFG Configuration
*------------------------------------------
* src |BIT(23)| BIT(22)| BIT(21)|BIT(20)|
@@ -58,33 +63,20 @@
*| | | 25MHz | 50MHz | |
* ---------------------------------------------------------------------------
*| MII | - | eth-ck | n/a | n/a |
- *| | | | | |
+ *| | | st,ext-phyclk | | |
* ---------------------------------------------------------------------------
*| GMII | - | eth-ck | n/a | n/a |
- *| | | | | |
+ *| | | st,ext-phyclk | | |
* ---------------------------------------------------------------------------
- *| RGMII | - | eth-ck | n/a | eth-ck (no pin) |
- *| | | | | st,eth-clk-sel |
+ *| RGMII | - | eth-ck | n/a | eth-ck |
+ *| | | st,ext-phyclk | | st,eth-clk-sel or|
+ *| | | | | st,ext-phyclk |
* ---------------------------------------------------------------------------
*| RMII | - | eth-ck | eth-ck | n/a |
- *| | | | st,eth-ref-clk-sel | |
+ *| | | st,ext-phyclk | st,eth-ref-clk-sel | |
+ *| | | | or st,ext-phyclk | |
* ---------------------------------------------------------------------------
*
- * BIT(17) : set this bit in RMII mode when you have PHY without crystal 50MHz
- * BIT(16) : set this bit in GMII/RGMII PHY when you do not want use 125Mhz
- * from PHY
- *-----------------------------------------------------
- * src | BIT(17) | BIT(16) |
- *-----------------------------------------------------
- * MII | n/a | n/a |
- *-----------------------------------------------------
- * GMII | n/a | st,eth-clk-sel |
- *-----------------------------------------------------
- * RGMII | n/a | st,eth-clk-sel |
- *-----------------------------------------------------
- * RMII | st,eth-ref-clk-sel | n/a |
- *-----------------------------------------------------
- *
*/
struct stm32_dwmac {
@@ -93,6 +85,8 @@ struct stm32_dwmac {
struct clk *clk_eth_ck;
struct clk *clk_ethstp;
struct clk *syscfg_clk;
+ int ext_phyclk;
+ int enable_eth_ck;
int eth_clk_sel_reg;
int eth_ref_clk_sel_reg;
int irq_pwr_wakeup;
@@ -155,14 +149,17 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(dwmac->clk_eth_ck);
- if (ret) {
- clk_disable_unprepare(dwmac->syscfg_clk);
- return ret;
+ if (dwmac->enable_eth_ck) {
+ ret = clk_prepare_enable(dwmac->clk_eth_ck);
+ if (ret) {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ return ret;
+ }
}
} else {
clk_disable_unprepare(dwmac->syscfg_clk);
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ if (dwmac->enable_eth_ck)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
}
return ret;
}
@@ -170,24 +167,34 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- u32 reg = dwmac->mode_reg;
+ u32 reg = dwmac->mode_reg, clk_rate;
int val;
+ clk_rate = clk_get_rate(dwmac->clk_eth_ck);
+ dwmac->enable_eth_ck = false;
switch (plat_dat->interface) {
case PHY_INTERFACE_MODE_MII:
+ if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
+ dwmac->enable_eth_ck = true;
val = SYSCFG_PMCR_ETH_SEL_MII;
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
break;
case PHY_INTERFACE_MODE_GMII:
val = SYSCFG_PMCR_ETH_SEL_GMII;
- if (dwmac->eth_clk_sel_reg)
+ if (clk_rate == ETH_CK_F_25M &&
+ (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ }
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n");
break;
case PHY_INTERFACE_MODE_RMII:
val = SYSCFG_PMCR_ETH_SEL_RMII;
- if (dwmac->eth_ref_clk_sel_reg)
+ if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) &&
+ (dwmac->eth_ref_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
+ }
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -195,8 +202,11 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
val = SYSCFG_PMCR_ETH_SEL_RGMII;
- if (dwmac->eth_clk_sel_reg)
+ if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) &&
+ (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ }
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n");
break;
default:
@@ -294,6 +304,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
struct device_node *np = dev->of_node;
int err = 0;
+ /* Ethernet PHY have no crystal */
+ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+
/* Gigabit Ethernet 125MHz clock selection. */
dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel");
@@ -431,7 +444,8 @@ static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->syscfg_clk);
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ if (dwmac->enable_eth_ck)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 28cac28253b8..61f3249bd724 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -90,6 +90,7 @@
#define GMAC_VLAN_CSVL BIT(19)
#define GMAC_VLAN_VLC GENMASK(17, 16)
#define GMAC_VLAN_VLC_SHIFT 16
+#define GMAC_VLAN_VLHT GENMASK(15, 0)
/* MAC VLAN Tag */
#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 39692d15d80c..ecd834e0e121 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -450,6 +450,12 @@ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
if (vid > 4095)
return -EINVAL;
+ if (hw->promisc) {
+ netdev_err(dev,
+ "Adding VLAN in promisc mode not supported\n");
+ return -EPERM;
+ }
+
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
/* For single VLAN filter, VID 0 means VLAN promiscuous */
@@ -499,6 +505,12 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
{
int i, ret = 0;
+ if (hw->promisc) {
+ netdev_err(dev,
+ "Deleting VLAN in promisc mode not supported\n");
+ return -EPERM;
+ }
+
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
@@ -523,9 +535,45 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
return ret;
}
+static void dwmac4_vlan_promisc_enable(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ dwmac4_write_single_vlan(dev, 0);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
+ dwmac4_write_vlan_filter(dev, hw, i, val);
+ }
+ }
+
+ hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
+ if (hash & GMAC_VLAN_VLHT) {
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+ if (value & GMAC_VLAN_VTHM) {
+ value &= ~GMAC_VLAN_VTHM;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+ }
+ }
+}
+
static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
struct mac_device_info *hw)
{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
u32 val;
int i;
@@ -542,6 +590,13 @@ static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
dwmac4_write_vlan_filter(dev, hw, i, val);
}
}
+
+ hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
+ if (hash & GMAC_VLAN_VLHT) {
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+ value |= GMAC_VLAN_VTHM;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+ }
}
static void dwmac4_set_filter(struct mac_device_info *hw,
@@ -624,6 +679,18 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
value |= GMAC_PACKET_FILTER_VTFE;
writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ if (dev->flags & IFF_PROMISC) {
+ if (!hw->promisc) {
+ hw->promisc = 1;
+ dwmac4_vlan_promisc_enable(dev, hw);
+ }
+ } else {
+ if (hw->promisc) {
+ hw->promisc = 0;
+ dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
+ }
+ }
}
static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a999d6b33a64..73677c3b33b6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -630,7 +630,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
- ts_event_en = PTP_TCR_TSEVNTENA;
+ if (priv->synopsys_id != DWMAC_CORE_5_10)
+ ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
ptp_over_ethernet = PTP_TCR_TSIPENA;
@@ -3543,15 +3544,6 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
}
}
-
-static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
-{
- if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
- return 0;
-
- return 1;
-}
-
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
@@ -5190,8 +5182,6 @@ int stmmac_resume(struct device *dev)
return ret;
}
- netif_device_attach(ndev);
-
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -5218,6 +5208,8 @@ int stmmac_resume(struct device *dev)
phylink_mac_change(priv->phylink, true);
+ netif_device_attach(ndev);
+
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_resume);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 3fb21f7ac9fb..272cb47af9f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -217,15 +217,10 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- struct net_device *ndev = dev_get_drvdata(&pdev->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
int i;
stmmac_dvr_remove(&pdev->dev);
- if (priv->plat->stmmac_clk)
- clk_unregister_fixed_rate(priv->plat->stmmac_clk);
-
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index bcda49dcf619..f32317fa75c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -507,7 +507,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a") ||
- of_device_is_compatible(np, "snps,dwmac-4.20a")) {
+ of_device_is_compatible(np, "snps,dwmac-4.20a") ||
+ of_device_is_compatible(np, "snps,dwmac-5.10a")) {
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index e6d1aa882fa5..debd3c3fa6fb 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -237,12 +237,6 @@ static inline void cas_lock_tx(struct cas *cp)
spin_lock_nested(&cp->tx_lock[i], i);
}
-static inline void cas_lock_all(struct cas *cp)
-{
- spin_lock_irq(&cp->lock);
- cas_lock_tx(cp);
-}
-
/* WTZ: QA was finding deadlock problems with the previous
* versions after long test runs with multiple cards per machine.
* See if replacing cas_lock_all with safer versions helps. The
@@ -266,12 +260,6 @@ static inline void cas_unlock_tx(struct cas *cp)
spin_unlock(&cp->tx_lock[i - 1]);
}
-static inline void cas_unlock_all(struct cas *cp)
-{
- cas_unlock_tx(cp);
- spin_unlock_irq(&cp->lock);
-}
-
#define cas_unlock_all_restore(cp, flags) \
do { \
struct cas *xxxcp = (cp); \
@@ -4963,7 +4951,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
cas_cacheline_size)) {
dev_err(&pdev->dev, "Could not set PCI cache "
"line size\n");
- goto err_write_cacheline;
+ goto err_out_free_res;
}
}
#endif
@@ -5059,7 +5047,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cp->cas_flags & CAS_FLAG_SATURN)
cas_saturn_firmware_init(cp);
- cp->init_block = (struct cas_init_block *)
+ cp->init_block =
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
@@ -5136,7 +5124,6 @@ err_out_iounmap:
err_out_free_res:
pci_release_regions(pdev);
-err_write_cacheline:
/* Try to restore it in case the error occurred after we
* set it.
*/
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 40a2ce0ca808..e28727297563 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1362,18 +1362,6 @@ static void print_rxfd(struct rxf_desc *rxfd)
* As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput.
*/
-/*************************************************************************
- * Tx DB *
- *************************************************************************/
-static inline int bdx_tx_db_size(struct txdb *db)
-{
- int taken = db->wptr - db->rptr;
- if (taken < 0)
- taken = db->size + 1 + taken; /* (size + 1) equals memsz */
-
- return db->size - taken;
-}
-
/**
* __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
* @db: tx data base
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 8e348780efb6..182d10f171f6 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,6 +49,7 @@ config TI_CPSW_PHY_SEL
config TI_CPSW
tristate "TI CPSW Switch Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on TI_CPTS || !TI_CPTS
select TI_DAVINCI_MDIO
select MFD_SYSCON
select PAGE_POOL
@@ -64,6 +65,7 @@ config TI_CPSW_SWITCHDEV
tristate "TI CPSW Switch Support with switchdev"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
depends on NET_SWITCHDEV
+ depends on TI_CPTS || !TI_CPTS
select PAGE_POOL
select TI_DAVINCI_MDIO
select MFD_SYSCON
@@ -77,28 +79,22 @@ config TI_CPSW_SWITCHDEV
will be called cpsw_new.
config TI_CPTS
- bool "TI Common Platform Time Sync (CPTS) Support"
- depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST
+ tristate "TI Common Platform Time Sync (CPTS) Support"
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
depends on COMMON_CLK
- depends on POSIX_TIMERS
+ depends on PTP_1588_CLOCK
---help---
This driver supports the Common Platform Time Sync unit of
the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
driver offers a PTP Hardware Clock.
-config TI_CPTS_MOD
- tristate
- depends on TI_CPTS
- depends on PTP_1588_CLOCK
- default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
- default m
-
config TI_K3_AM65_CPSW_NUSS
tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select TI_DAVINCI_MDIO
imply PHY_TI_GMII_SEL
+ depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
This driver supports TI K3 AM654/J721E CPSW2G Ethernet SubSystem.
The two-port Gigabit Ethernet MAC (MCU_CPSW0) subsystem provides
@@ -109,11 +105,34 @@ config TI_K3_AM65_CPSW_NUSS
To compile this driver as a module, choose M here: the module
will be called ti-am65-cpsw-nuss.
+config TI_K3_AM65_CPTS
+ tristate "TI K3 AM65x CPTS"
+ depends on ARCH_K3 && OF
+ depends on PTP_1588_CLOCK
+ help
+ Say y here to support the TI K3 AM65x CPTS with 1588 features such as
+ PTP hardware clock for each CPTS device and network packets
+ timestamping where applicable.
+ Depending on integration CPTS blocks enable compliance with
+ the IEEE 1588-2008 standard for a precision clock synchronization
+ protocol, Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn)
+ and PCIe Subsystem Precision Time Measurement (PTM).
+
+config TI_AM65_CPSW_TAS
+ bool "Enable TAS offload in AM65 CPSW"
+ depends on TI_K3_AM65_CPSW_NUSS && NET_SCH_TAPRIO && TI_K3_AM65_CPTS
+ help
+ Say y here to support Time Aware Shaper(TAS) offload in AM65 CPSW.
+ AM65 CPSW hardware supports Enhanced Scheduled Traffic (EST)
+ defined in IEEE 802.1Q 2018. The EST scheduler runs on CPTS and the
+ TAS/EST schedule is updated in the Fetch RAM memory of the CPSW.
+
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
select TI_DAVINCI_MDIO
depends on OF
depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
+ depends on TI_CPTS || !TI_CPTS
---help---
This driver supports TI's Keystone NETCP Core.
@@ -137,7 +156,7 @@ config TLAN
Devices currently supported by this driver are Compaq Netelligent,
Compaq NetFlex and Olicom cards. Please read the file
- <file:Documentation/networking/device_drivers/ti/tlan.txt>
+ <file:Documentation/networking/device_drivers/ti/tlan.rst>
for more details.
To compile this driver as a module, choose M here. The module
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 53792190e9c2..6e779292545d 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o
ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
-obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
+obj-$(CONFIG_TI_CPTS) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o
@@ -25,4 +25,5 @@ obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
-ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index c3502aa15ea0..8c4690f3ebcb 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -12,6 +12,7 @@
#include "am65-cpsw-nuss.h"
#include "cpsw_ale.h"
+#include "am65-cpts.h"
#define AM65_CPSW_REGDUMP_VER 0x1
@@ -694,6 +695,27 @@ static void am65_cpsw_get_ethtool_stats(struct net_device *ndev,
hw_stats[i].offset);
}
+static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return ethtool_op_get_ts_info(ndev, info);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = am65_cpts_phc_index(common->cpts);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
@@ -708,9 +730,17 @@ static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int rrobin;
+
+ rrobin = !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+
+ if (common->est_enabled && rrobin) {
+ netdev_err(ndev,
+ "p0-rx-ptype-rrobin flag conflicts with QOS\n");
+ return -EINVAL;
+ }
- common->pf_p0_rx_ptype_rrobin =
- !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+ common->pf_p0_rx_ptype_rrobin = rrobin;
am65_cpsw_nuss_set_p0_ptype(common);
return 0;
@@ -730,7 +760,7 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_sset_count = am65_cpsw_get_sset_count,
.get_strings = am65_cpsw_get_strings,
.get_ethtool_stats = am65_cpsw_get_ethtool_stats,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = am65_cpsw_get_ethtool_ts_info,
.get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
.set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2517ffba8178..87a4775ed53a 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -30,18 +30,21 @@
#include "cpsw_sl.h"
#include "am65-cpsw-nuss.h"
#include "k3-cppi-desc-pool.h"
+#include "am65-cpts.h"
#define AM65_CPSW_SS_BASE 0x0
#define AM65_CPSW_SGMII_BASE 0x100
#define AM65_CPSW_XGMII_BASE 0x2100
#define AM65_CPSW_CPSW_NU_BASE 0x20000
#define AM65_CPSW_NU_PORTS_BASE 0x1000
+#define AM65_CPSW_NU_FRAM_BASE 0x12000
#define AM65_CPSW_NU_STATS_BASE 0x1a000
#define AM65_CPSW_NU_ALE_BASE 0x1e000
#define AM65_CPSW_NU_CPTS_BASE 0x1d000
#define AM65_CPSW_NU_PORTS_OFFSET 0x1000
#define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
+#define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
#define AM65_CPSW_MAX_PORTS 8
@@ -187,9 +190,11 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+ am65_cpsw_qos_link_up(ndev, phy->speed);
netif_tx_wake_all_queues(ndev);
} else {
int tmo;
+
/* disable forwarding */
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
@@ -203,6 +208,7 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
cpsw_sl_ctl_reset(port->slave.mac_sl);
+ am65_cpsw_qos_link_down(ndev);
netif_tx_stop_all_queues(ndev);
}
@@ -668,6 +674,18 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ ns = ((u64)psdata[1] << 32) | psdata[0];
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
/* RX psdata[2] word format - checksum information */
#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
@@ -745,6 +763,9 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
skb->dev = ndev;
psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* add RX timestamp */
+ if (port->rx_ts_enabled)
+ am65_cpsw_nuss_rx_ts(skb, psdata);
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
@@ -904,6 +925,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
ndev = skb->dev;
+ am65_cpts_tx_timestamp(common->cpts, skb);
+
ndev_priv = netdev_priv(ndev);
stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp);
@@ -995,6 +1018,10 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
/* padding enabled in hw */
pkt_len = skb_headlen(skb);
+ /* SKB TX timestamp */
+ if (port->tx_ts_enabled)
+ am65_cpts_prep_tx_timestamp(common->cpts, skb);
+
q_idx = skb_get_queue_mapping(skb);
dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
@@ -1158,6 +1185,111 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
return 0;
}
+static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* TX HW timestamp */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ port->rx_ts_enabled = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ port->rx_ts_enabled = true;
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
+
+ /* cfg TX timestamp */
+ seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
+ AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
+
+ ts_vlan_ltype = ETH_P_8021Q;
+
+ ts_ctrl_ltype2 = ETH_P_1588 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
+
+ ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
+ AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
+
+ if (port->tx_ts_enabled)
+ ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
+ AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
+
+ writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
+ writel(ts_vlan_ltype, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
+ writel(ts_ctrl_ltype2, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
+ writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
+
+ /* en/dis RX timestamp */
+ am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = port->tx_ts_enabled ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = port->rx_ts_enabled ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
struct ifreq *req, int cmd)
{
@@ -1166,6 +1298,13 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
if (!netif_running(ndev))
return -EINVAL;
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_set(ndev, req);
+ case SIOCGHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_get(ndev, req);
+ }
+
if (!port->slave.phy)
return -EOPNOTSUPP;
@@ -1244,6 +1383,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
+ .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
};
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
@@ -1531,6 +1671,40 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
return 0;
}
+static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct device_node *node;
+ struct am65_cpts *cpts;
+ void __iomem *reg_base;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return 0;
+
+ node = of_get_child_by_name(dev->of_node, "cpts");
+ if (!node) {
+ dev_err(dev, "%s cpts not found\n", __func__);
+ return -ENOENT;
+ }
+
+ reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
+ cpts = am65_cpts_create(dev, reg_base, node);
+ if (IS_ERR(cpts)) {
+ int ret = PTR_ERR(cpts);
+
+ if (ret == -EOPNOTSUPP) {
+ dev_info(dev, "cpts disabled\n");
+ return 0;
+ }
+
+ dev_err(dev, "cpts create err %d\n", ret);
+ return ret;
+ }
+ common->cpts = cpts;
+
+ return 0;
+}
+
static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
{
struct device_node *node, *port_np;
@@ -1571,6 +1745,9 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
(AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
port->name = of_get_property(port_np, "label", NULL);
+ port->fetch_ram_base =
+ common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
+ (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
port->disabled = !of_device_is_available(port_np);
if (port->disabled)
@@ -1863,10 +2040,21 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return ret;
}
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- /* We do not want to force this, as in some cases may not have child */
- if (ret)
- dev_warn(dev, "populating child nodes err:%d\n", ret);
+ node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!node) {
+ dev_warn(dev, "MDIO node not found\n");
+ } else if (of_device_is_available(node)) {
+ struct platform_device *mdio_pdev;
+
+ mdio_pdev = of_platform_device_create(node, NULL, dev);
+ if (!mdio_pdev) {
+ ret = -ENODEV;
+ goto err_pm_clear;
+ }
+
+ common->mdio_dev = &mdio_pdev->dev;
+ }
+ of_node_put(node);
am65_cpsw_nuss_get_ver(common);
@@ -1895,11 +2083,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ale_params.nu_switch_ale = true;
common->ale = cpsw_ale_create(&ale_params);
- if (!common->ale) {
+ if (IS_ERR(common->ale)) {
dev_err(dev, "error initializing ale engine\n");
+ ret = PTR_ERR(common->ale);
goto err_of_clear;
}
+ ret = am65_cpsw_init_cpts(common);
+ if (ret)
+ goto err_of_clear;
+
/* init ports */
for (i = 0; i < common->port_num; i++)
am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
@@ -1918,7 +2111,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return 0;
err_of_clear:
- of_platform_depopulate(dev);
+ of_platform_device_destroy(common->mdio_dev, NULL);
+err_pm_clear:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
@@ -1943,7 +2137,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
- of_platform_depopulate(dev);
+ of_platform_device_destroy(common->mdio_dev, NULL);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 41ae5b4c7931..9faf4fb1409b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -9,6 +9,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include "am65-cpsw-qos.h"
+
+struct am65_cpts;
#define HOST_PORT_NUM 0
@@ -35,8 +40,12 @@ struct am65_cpsw_port {
u32 port_id;
void __iomem *port_base;
void __iomem *stat_base;
+ void __iomem *fetch_ram_base;
bool disabled;
struct am65_cpsw_slave_data slave;
+ bool tx_ts_enabled;
+ bool rx_ts_enabled;
+ struct am65_cpsw_qos qos;
};
struct am65_cpsw_host {
@@ -72,6 +81,7 @@ struct am65_cpsw_pdata {
struct am65_cpsw_common {
struct device *dev;
+ struct device *mdio_dev;
const struct am65_cpsw_pdata *pdata;
void __iomem *ss_base;
@@ -96,8 +106,9 @@ struct am65_cpsw_common {
u32 nuss_ver;
u32 cpsw_ver;
-
bool pf_p0_rx_ptype_rrobin;
+ struct am65_cpts *cpts;
+ int est_enabled;
};
struct am65_cpsw_ndev_stats {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
new file mode 100644
index 000000000000..32eac04468bb
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet QoS submodule
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * quality of service module includes:
+ * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/time.h>
+
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-qos.h"
+#include "am65-cpts.h"
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+
+/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_EST_EN BIT(18)
+
+/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
+
+/* AM65_CPSW_PN_REG_EST_CTL register fields */
+#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
+#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
+#define AM65_CPSW_PN_EST_TS_EN BIT(2)
+#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
+#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
+#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+
+/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
+#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
+#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
+#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
+#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
+#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
+
+/* EST FETCH COMMAND RAM */
+#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
+#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
+#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
+#define AM65_CPSW_FETCH_CNT_OFFSET 8
+#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
+#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+
+enum timer_act {
+ TACT_PROG, /* need program timer */
+ TACT_NEED_STOP, /* need stop first */
+ TACT_SKIP_PROG, /* just buffer can be updated */
+};
+
+static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
+{
+ return port->qos.est_oper || port->qos.est_admin;
+}
+
+static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
+{
+ u32 val;
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+
+ if (enable)
+ val |= AM65_CPSW_CTL_EST_EN;
+ else
+ val &= ~AM65_CPSW_CTL_EST_EN;
+
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+ common->est_enabled = enable;
+}
+
+static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (enable)
+ val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
+ else
+ val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
+}
+
+/* target new EST RAM buffer, actual toggle happens after cycle completion */
+static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
+ int buf_num)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ if (buf_num)
+ val |= AM65_CPSW_PN_EST_BUFSEL;
+ else
+ val &= ~AM65_CPSW_PN_EST_BUFSEL;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+}
+
+/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
+ * admin -> oper or not
+ *
+ * Return true if already transitioned. i.e oper is equal to admin and buf
+ * numbers match (est_oper->buf match with est_admin->buf).
+ * false if before transition. i.e oper is not equal to admin, (i.e a
+ * previous admin command is waiting to be transitioned to oper state
+ * and est_oper->buf not match with est_oper->buf).
+ */
+static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
+ int *admin)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
+ *oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ *admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
+
+ return *admin == *oper;
+}
+
+/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
+ * Admin to program the new schedule.
+ *
+ * Logic as follows:-
+ * If oper is same as admin, return the other buffer (!oper) as the admin
+ * buffer. If oper is not the same, driver let the current oper to continue
+ * as it is in the process of transitioning from admin -> oper. So keep the
+ * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
+ * EST CTL register. In the second iteration they will match and code returns.
+ * The actual buffer to write command is selected later before it is ready
+ * to update the schedule.
+ */
+static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
+{
+ int oper, admin;
+ int roll = 2;
+
+ while (roll--) {
+ if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return !oper;
+
+ /* admin is not set, so hinder transition as it's not allowed
+ * to touch memory in-flight, by targeting same oper buf.
+ */
+ am65_cpsw_port_est_assign_buf_num(ndev, oper);
+
+ dev_info(&ndev->dev,
+ "Prev. EST admin cycle is in transit %d -> %d\n",
+ oper, admin);
+ }
+
+ return admin;
+}
+
+static void am65_cpsw_admin_to_oper(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (port->qos.est_oper)
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = port->qos.est_admin;
+ port->qos.est_admin = NULL;
+}
+
+static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ val &= ~AM65_CPSW_PN_EST_ONEBUF;
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+
+ est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
+
+ /* rolled buf num means changed buf while configuring */
+ if (port->qos.est_oper && port->qos.est_admin &&
+ est_new->buf == port->qos.est_oper->buf)
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+static void am65_cpsw_est_set(struct net_device *ndev, int enable)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ int common_enable = 0;
+ int i;
+
+ am65_cpsw_port_est_enable(port, enable);
+
+ for (i = 0; i < common->port_num; i++)
+ common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
+
+ common_enable |= enable;
+ am65_cpsw_est_enable(common, common_enable);
+}
+
+/* This update is supposed to be used in any routine before getting real state
+ * of admin -> oper transition, particularly it's supposed to be used in some
+ * generic routine for providing real state to Taprio Qdisc.
+ */
+static void am65_cpsw_est_update_state(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int oper, admin;
+
+ if (!port->qos.est_admin)
+ return;
+
+ if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return;
+
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
+ * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
+ * bytes/nibbles that can be sent while transmission on given speed.
+ */
+static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
+{
+ u64 temp;
+
+ temp = ns * link_speed;
+ if (link_speed < SPEED_1000)
+ temp <<= 1;
+
+ return DIV_ROUND_UP(temp, 8 * 1000);
+}
+
+static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
+ int fetch_cnt,
+ int fetch_allow)
+{
+ u32 prio_mask, cmd_fetch_cnt, cmd;
+
+ do {
+ if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
+ fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
+ cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
+ } else {
+ cmd_fetch_cnt = fetch_cnt;
+ /* fetch count can't be less than 16? */
+ if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
+ cmd_fetch_cnt = 16;
+
+ fetch_cnt = 0;
+ }
+
+ prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
+ cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
+
+ writel(cmd, addr);
+ addr += 4;
+ } while (fetch_cnt);
+
+ return addr;
+}
+
+static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio,
+ int link_speed)
+{
+ int i, cmd_cnt, cmd_sum = 0;
+ u32 fetch_cnt;
+
+ for (i = 0; i < taprio->num_entries; i++) {
+ if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
+ dev_err(&ndev->dev, "Only SET command is supported");
+ return -EINVAL;
+ }
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
+ link_speed);
+
+ cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
+ if (!cmd_cnt)
+ cmd_cnt++;
+
+ cmd_sum += cmd_cnt;
+
+ if (!fetch_cnt)
+ break;
+ }
+
+ return cmd_sum;
+}
+
+static int am65_cpsw_est_check_scheds(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int cmd_num;
+
+ cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
+ port->qos.link_speed);
+ if (cmd_num < 0)
+ return cmd_num;
+
+ if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
+ dev_err(&ndev->dev, "No fetch RAM");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
+ void __iomem *ram_addr, *max_ram_addr;
+ struct tc_taprio_sched_entry *entry;
+ int i, ram_size;
+
+ ram_addr = port->fetch_ram_base;
+ ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
+ ram_addr += est_new->buf * ram_size;
+
+ max_ram_addr = ram_size + ram_addr;
+ for (i = 0; i < est_new->taprio.num_entries; i++) {
+ entry = &est_new->taprio.entries[i];
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
+ port->qos.link_speed);
+ fetch_allow = entry->gate_mask;
+ if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
+ dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
+ fetch_allow);
+
+ ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
+ fetch_allow);
+
+ if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
+ dev_info(&ndev->dev,
+ "next scheds after %d have no impact", i + 1);
+ break;
+ }
+
+ all_fetch_allow |= fetch_allow;
+ }
+
+ /* end cmd, enabling non-timed queues for potential over cycle time */
+ if (ram_addr < max_ram_addr)
+ writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
+}
+
+/**
+ * Enable ESTf periodic output, set cycle start time and interval.
+ */
+static int am65_cpsw_timer_set(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ struct am65_cpts *cpts = common->cpts;
+ struct am65_cpts_estf_cfg cfg;
+
+ cfg.ns_period = est_new->taprio.cycle_time;
+ cfg.ns_start = est_new->taprio.base_time;
+
+ return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
+}
+
+static void am65_cpsw_timer_stop(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+
+ am65_cpts_estf_disable(cpts, port->port_id - 1);
+}
+
+static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+ u64 cur_time;
+ s64 diff;
+
+ if (!port->qos.est_oper)
+ return TACT_PROG;
+
+ taprio_new = &est_new->taprio;
+ taprio_oper = &port->qos.est_oper->taprio;
+
+ if (taprio_new->cycle_time != taprio_oper->cycle_time)
+ return TACT_NEED_STOP;
+
+ /* in order to avoid timer reset get base_time form oper taprio */
+ if (!taprio_new->base_time && taprio_oper)
+ taprio_new->base_time = taprio_oper->base_time;
+
+ if (taprio_new->base_time == taprio_oper->base_time)
+ return TACT_SKIP_PROG;
+
+ /* base times are cycle synchronized */
+ diff = taprio_new->base_time - taprio_oper->base_time;
+ diff = diff < 0 ? -diff : diff;
+ if (diff % taprio_new->cycle_time)
+ return TACT_NEED_STOP;
+
+ cur_time = am65_cpts_ns_gettime(cpts);
+ if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
+ return TACT_SKIP_PROG;
+
+ /* TODO: Admin schedule at future time is not currently supported */
+ return TACT_NEED_STOP;
+}
+
+static void am65_cpsw_stop_est(struct net_device *ndev)
+{
+ am65_cpsw_est_set(ndev, 0);
+ am65_cpsw_timer_stop(ndev);
+}
+
+static void am65_cpsw_purge_est(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ am65_cpsw_stop_est(ndev);
+
+ if (port->qos.est_admin)
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+
+ if (port->qos.est_oper)
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = NULL;
+ port->qos.est_admin = NULL;
+}
+
+static int am65_cpsw_configure_taprio(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpts *cpts = common->cpts;
+ int ret = 0, tact = TACT_PROG;
+
+ am65_cpsw_est_update_state(ndev);
+
+ if (!est_new->taprio.enable) {
+ am65_cpsw_stop_est(ndev);
+ return ret;
+ }
+
+ ret = am65_cpsw_est_check_scheds(ndev, est_new);
+ if (ret < 0)
+ return ret;
+
+ tact = am65_cpsw_timer_act(ndev, est_new);
+ if (tact == TACT_NEED_STOP) {
+ dev_err(&ndev->dev,
+ "Can't toggle estf timer, stop taprio first");
+ return -EINVAL;
+ }
+
+ if (tact == TACT_PROG)
+ am65_cpsw_timer_stop(ndev);
+
+ if (!est_new->taprio.base_time)
+ est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
+
+ am65_cpsw_port_est_get_buf_num(ndev, est_new);
+ am65_cpsw_est_set_sched_list(ndev, est_new);
+ am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
+
+ am65_cpsw_est_set(ndev, est_new->taprio.enable);
+
+ if (tact == TACT_PROG) {
+ ret = am65_cpsw_timer_set(ndev, est_new);
+ if (ret) {
+ dev_err(&ndev->dev, "Failed to set cycle time");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
+ struct tc_taprio_qopt_offload *to)
+{
+ int i;
+
+ *to = *from;
+ for (i = 0; i < from->num_entries; i++)
+ to->entries[i] = from->entries[i];
+}
+
+static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct am65_cpsw_est *est_new;
+ size_t size;
+ int ret = 0;
+
+ if (taprio->cycle_time_extension) {
+ dev_err(&ndev->dev, "Failed to set cycle time extension");
+ return -EOPNOTSUPP;
+ }
+
+ size = sizeof(struct tc_taprio_sched_entry) * taprio->num_entries +
+ sizeof(struct am65_cpsw_est);
+
+ est_new = devm_kzalloc(&ndev->dev, size, GFP_KERNEL);
+ if (!est_new)
+ return -ENOMEM;
+
+ am65_cpsw_cp_taprio(taprio, &est_new->taprio);
+ ret = am65_cpsw_configure_taprio(ndev, est_new);
+ if (!ret) {
+ if (taprio->enable) {
+ if (port->qos.est_admin)
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+
+ port->qos.est_admin = est_new;
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ am65_cpsw_purge_est(ndev);
+ }
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ ktime_t cur_time;
+ s64 delta;
+
+ port->qos.link_speed = link_speed;
+ if (!am65_cpsw_port_est_enabled(port))
+ return;
+
+ if (port->qos.link_down_time) {
+ cur_time = ktime_get();
+ delta = ktime_us_delta(cur_time, port->qos.link_down_time);
+ if (delta > USEC_PER_SEC) {
+ dev_err(&ndev->dev,
+ "Link has been lost too long, stopping TAS");
+ goto purge_est;
+ }
+ }
+
+ return;
+
+purge_est:
+ am65_cpsw_purge_est(ndev);
+}
+
+static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return -ENODEV;
+
+ if (!netif_running(ndev)) {
+ dev_err(&ndev->dev, "interface is down, link speed unknown\n");
+ return -ENETDOWN;
+ }
+
+ if (common->pf_p0_rx_ptype_rrobin) {
+ dev_err(&ndev->dev,
+ "p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
+ return -EINVAL;
+ }
+
+ if (port->qos.link_speed == SPEED_UNKNOWN)
+ return -ENOLINK;
+
+ return am65_cpsw_set_taprio(ndev, type_data);
+}
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return am65_cpsw_setup_taprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ am65_cpsw_est_link_up(ndev, link_speed);
+ port->qos.link_down_time = 0;
+}
+
+void am65_cpsw_qos_link_down(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ if (!port->qos.link_down_time)
+ port->qos.link_down_time = ktime_get();
+
+ port->qos.link_speed = SPEED_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
new file mode 100644
index 000000000000..e8f1b6b59e93
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef AM65_CPSW_QOS_H_
+#define AM65_CPSW_QOS_H_
+
+#include <linux/netdevice.h>
+#include <net/pkt_sched.h>
+
+struct am65_cpsw_est {
+ int buf;
+ /* has to be the last one */
+ struct tc_taprio_qopt_offload taprio;
+};
+
+struct am65_cpsw_qos {
+ struct am65_cpsw_est *est_admin;
+ struct am65_cpsw_est *est_oper;
+ ktime_t link_down_time;
+ int link_speed;
+};
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed);
+void am65_cpsw_qos_link_down(struct net_device *ndev);
+
+#endif /* AM65_CPSW_QOS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
new file mode 100644
index 000000000000..c59a289e428c
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI K3 AM65x Common Platform Time Sync
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "am65-cpts.h"
+
+struct am65_genf_regs {
+ u32 comp_lo; /* Comparison Low Value 0:31 */
+ u32 comp_hi; /* Comparison High Value 32:63 */
+ u32 control; /* control */
+ u32 length; /* Length */
+ u32 ppm_low; /* PPM Load Low Value 0:31 */
+ u32 ppm_hi; /* PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Nudge value */
+} __aligned(32) __packed;
+
+#define AM65_CPTS_GENF_MAX_NUM 9
+#define AM65_CPTS_ESTF_MAX_NUM 8
+
+struct am65_cpts_regs {
+ u32 idver; /* Identification and version */
+ u32 control; /* Time sync control */
+ u32 rftclk_sel; /* Reference Clock Select Register */
+ u32 ts_push; /* Time stamp event push */
+ u32 ts_load_val_lo; /* Time Stamp Load Low Value 0:31 */
+ u32 ts_load_en; /* Time stamp load enable */
+ u32 ts_comp_lo; /* Time Stamp Comparison Low Value 0:31 */
+ u32 ts_comp_length; /* Time Stamp Comparison Length */
+ u32 intstat_raw; /* Time sync interrupt status raw */
+ u32 intstat_masked; /* Time sync interrupt status masked */
+ u32 int_enable; /* Time sync interrupt enable */
+ u32 ts_comp_nudge; /* Time Stamp Comparison Nudge Value */
+ u32 event_pop; /* Event interrupt pop */
+ u32 event_0; /* Event Time Stamp lo 0:31 */
+ u32 event_1; /* Event Type Fields */
+ u32 event_2; /* Event Type Fields domain */
+ u32 event_3; /* Event Time Stamp hi 32:63 */
+ u32 ts_load_val_hi; /* Time Stamp Load High Value 32:63 */
+ u32 ts_comp_hi; /* Time Stamp Comparison High Value 32:63 */
+ u32 ts_add_val; /* Time Stamp Add value */
+ u32 ts_ppm_low; /* Time Stamp PPM Load Low Value 0:31 */
+ u32 ts_ppm_hi; /* Time Stamp PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Time Stamp Nudge value */
+ u32 reserv[33];
+ struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
+ struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
+};
+
+/* CONTROL_REG */
+#define AM65_CPTS_CONTROL_EN BIT(0)
+#define AM65_CPTS_CONTROL_INT_TEST BIT(1)
+#define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
+#define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
+#define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
+#define AM65_CPTS_CONTROL_64MODE BIT(5)
+#define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
+#define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
+#define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
+#define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
+#define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
+#define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
+#define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
+#define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
+#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
+
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
+
+/* RFTCLK_SEL_REG */
+#define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
+
+/* TS_PUSH_REG */
+#define AM65_CPTS_TS_PUSH BIT(0)
+
+/* TS_LOAD_EN_REG */
+#define AM65_CPTS_TS_LOAD_EN BIT(0)
+
+/* INTSTAT_RAW_REG */
+#define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
+
+/* INTSTAT_MASKED_REG */
+#define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
+
+/* INT_ENABLE_REG */
+#define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
+
+/* TS_COMP_NUDGE_REG */
+#define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
+
+/* EVENT_POP_REG */
+#define AM65_CPTS_EVENT_POP BIT(0)
+
+/* EVENT_1_REG */
+#define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
+
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
+
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
+
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
+
+/* EVENT_2_REG */
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
+
+enum {
+ AM65_CPTS_EV_PUSH, /* Time Stamp Push Event */
+ AM65_CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+ AM65_CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+ AM65_CPTS_EV_HW, /* Hardware Time Stamp Push Event */
+ AM65_CPTS_EV_RX, /* Ethernet Receive Event */
+ AM65_CPTS_EV_TX, /* Ethernet Transmit Event */
+ AM65_CPTS_EV_TS_COMP, /* Time Stamp Compare Event */
+ AM65_CPTS_EV_HOST, /* Host Transmit Event */
+};
+
+struct am65_cpts_event {
+ struct list_head list;
+ unsigned long tmo;
+ u32 event1;
+ u32 event2;
+ u64 timestamp;
+};
+
+#define AM65_CPTS_FIFO_DEPTH (16)
+#define AM65_CPTS_MAX_EVENTS (32)
+#define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */
+#define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+#define AM65_CPTS_MIN_PPM 0x400
+
+struct am65_cpts {
+ struct device *dev;
+ struct am65_cpts_regs __iomem *reg;
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ int phc_index;
+ struct clk_hw *clk_mux_hw;
+ struct device_node *clk_mux_np;
+ struct clk *refclk;
+ u32 refclk_freq;
+ struct list_head events;
+ struct list_head pool;
+ struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
+ spinlock_t lock; /* protects events lists*/
+ u32 ext_ts_inputs;
+ u32 genf_num;
+ u32 ts_add_val;
+ int irq;
+ struct mutex ptp_clk_lock; /* PHC access sync */
+ u64 timestamp;
+ u32 genf_enable;
+ u32 hw_ts_enable;
+ struct sk_buff_head txq;
+};
+
+struct am65_cpts_skb_cb_data {
+ unsigned long tmo;
+ u32 skb_mtype_seqid;
+};
+
+#define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
+#define am65_cpts_read32(c, r) readl(&(c)->reg->r)
+
+static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
+{
+ u32 val;
+
+ val = upper_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_hi);
+ val = lower_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_lo);
+
+ am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
+}
+
+static void am65_cpts_set_add_val(struct am65_cpts *cpts)
+{
+ /* select coefficient according to the rate */
+ cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
+
+ am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
+}
+
+static void am65_cpts_disable(struct am65_cpts *cpts)
+{
+ am65_cpts_write32(cpts, 0, control);
+ am65_cpts_write32(cpts, 0, int_enable);
+}
+
+static int am65_cpts_event_get_port(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
+ AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
+}
+
+static int am65_cpts_event_get_type(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
+}
+
+static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
+ return removed ? 0 : -1;
+}
+
+static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ u32 r = am65_cpts_read32(cpts, intstat_raw);
+
+ if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
+ event->timestamp = am65_cpts_read32(cpts, event_0);
+ event->event1 = am65_cpts_read32(cpts, event_1);
+ event->event2 = am65_cpts_read32(cpts, event_2);
+ event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
+ am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
+ return false;
+ }
+ return true;
+}
+
+static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+{
+ struct ptp_clock_event pevent;
+ struct am65_cpts_event *event;
+ bool schedule = false;
+ int i, type, ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
+ event = list_first_entry_or_null(&cpts->pool,
+ struct am65_cpts_event, list);
+
+ if (!event) {
+ if (am65_cpts_cpts_purge_events(cpts)) {
+ dev_err(cpts->dev, "cpts: event pool empty\n");
+ ret = -1;
+ goto out;
+ }
+ continue;
+ }
+
+ if (am65_cpts_fifo_pop_event(cpts, event))
+ break;
+
+ type = am65_cpts_event_get_type(event);
+ switch (type) {
+ case AM65_CPTS_EV_PUSH:
+ cpts->timestamp = event->timestamp;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
+ cpts->timestamp);
+ break;
+ case AM65_CPTS_EV_RX:
+ case AM65_CPTS_EV_TX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_del_init(&event->list);
+ list_add_tail(&event->list, &cpts->events);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ schedule = true;
+ break;
+ case AM65_CPTS_EV_HW:
+ pevent.index = am65_cpts_event_get_port(event) - 1;
+ pevent.timestamp = event->timestamp;
+ pevent.type = PTP_CLOCK_EXTTS;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
+ pevent.index, event->timestamp);
+
+ ptp_clock_event(cpts->ptp_clock, &pevent);
+ break;
+ case AM65_CPTS_EV_HOST:
+ break;
+ case AM65_CPTS_EV_ROLL:
+ case AM65_CPTS_EV_HALF:
+ case AM65_CPTS_EV_TS_COMP:
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
+ type,
+ event->event1, event->event2,
+ event->timestamp);
+ break;
+ default:
+ dev_err(cpts->dev, "cpts: unknown event type\n");
+ ret = -1;
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (schedule)
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+
+ return ret;
+}
+
+static u64 am65_cpts_gettime(struct am65_cpts *cpts,
+ struct ptp_system_timestamp *sts)
+{
+ unsigned long flags;
+ u64 val = 0;
+
+ /* temporarily disable cpts interrupt to avoid intentional
+ * doubled read. Interrupt can be in-flight - it's Ok.
+ */
+ am65_cpts_write32(cpts, 0, int_enable);
+
+ /* use spin_lock_irqsave() here as it has to run very fast */
+ spin_lock_irqsave(&cpts->lock, flags);
+ ptp_read_system_prets(sts);
+ am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
+ am65_cpts_read32(cpts, ts_push);
+ ptp_read_system_postts(sts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ am65_cpts_fifo_read(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ val = cpts->timestamp;
+
+ return val;
+}
+
+static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
+{
+ struct am65_cpts *cpts = dev_id;
+
+ if (am65_cpts_fifo_read(cpts))
+ dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
+
+ return IRQ_HANDLED;
+}
+
+/* PTP clock operations */
+static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ int neg_adj = 0;
+ u64 adj_period;
+ u32 val;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ /* base freq = 1GHz = 1 000 000 000
+ * ppb_norm = ppb * base_freq / clock_freq;
+ * ppm_norm = ppb_norm / 1000
+ * adj_period = 1 000 000 / ppm_norm
+ * adj_period = 1 000 000 000 / ppb_norm
+ * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
+ * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
+ * adj_period = clock_freq / ppb
+ */
+ adj_period = div_u64(cpts->refclk_freq, ppb);
+
+ mutex_lock(&cpts->ptp_clk_lock);
+
+ val = am65_cpts_read32(cpts, control);
+ if (neg_adj)
+ val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
+ else
+ val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
+ am65_cpts_write32(cpts, val, control);
+
+ val = upper_32_bits(adj_period) & 0x3FF;
+ am65_cpts_write32(cpts, val, ts_ppm_hi);
+ val = lower_32_bits(adj_period);
+ am65_cpts_write32(cpts, val, ts_ppm_low);
+
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ s64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ ns += delta;
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, sts);
+ mutex_unlock(&cpts->ptp_clk_lock);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ u64 ns;
+
+ /* reuse ptp_clk_lock as it serialize ts push */
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return ns;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
+
+static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
+{
+ u32 v;
+
+ v = am65_cpts_read32(cpts, control);
+ if (on) {
+ v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable |= BIT(index);
+ } else {
+ v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable &= ~BIT(index);
+ }
+ am65_cpts_write32(cpts, v, control);
+}
+
+static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
+{
+ if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_extts_enable_hw(cpts, index, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
+ __func__, index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg)
+{
+ u64 cycles;
+ u32 val;
+
+ cycles = cfg->ns_period * cpts->refclk_freq;
+ cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
+ if (cycles > U32_MAX)
+ return -EINVAL;
+
+ /* according to TRM should be zeroed */
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ val = upper_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_hi);
+ val = lower_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
+
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
+
+static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ u64 ns_period, ns_start, cycles;
+ struct timespec64 ts;
+ u32 val;
+
+ if (on) {
+ ts.tv_sec = req->period.sec;
+ ts.tv_nsec = req->period.nsec;
+ ns_period = timespec64_to_ns(&ts);
+
+ cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
+
+ ts.tv_sec = req->start.sec;
+ ts.tv_nsec = req->start.nsec;
+ ns_start = timespec64_to_ns(&ts);
+
+ val = upper_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
+ val = lower_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, genf[req->index].length);
+
+ cpts->genf_enable |= BIT(req->index);
+ } else {
+ am65_cpts_write32(cpts, 0, genf[req->index].length);
+
+ cpts->genf_enable &= ~BIT(req->index);
+ }
+}
+
+static int am65_cpts_perout_enable(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_perout_enable_hw(cpts, req, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
+ __func__, req->index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return am65_cpts_extts_enable(cpts, rq->extts.index, on);
+ case PTP_CLK_REQ_PEROUT:
+ return am65_cpts_perout_enable(cpts, &rq->perout, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
+
+static struct ptp_clock_info am65_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "CTPS timer",
+ .adjfreq = am65_cpts_ptp_adjfreq,
+ .adjtime = am65_cpts_ptp_adjtime,
+ .gettimex64 = am65_cpts_ptp_gettimex,
+ .settime64 = am65_cpts_ptp_settime,
+ .enable = am65_cpts_ptp_enable,
+ .do_aux_work = am65_cpts_ts_work,
+};
+
+static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ struct sk_buff_head txq_list;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ bool found = false;
+ u32 mtype_seqid;
+
+ mtype_seqid = event->event1 &
+ (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
+ AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
+ AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ __skb_queue_head_init(&txq_list);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice_init(&cpts->txq, &txq_list);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ /* no need to grab txq.lock as access is always done under cpts->lock */
+ skb_queue_walk_safe(&txq_list, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ struct am65_cpts_skb_cb_data *skb_cb =
+ (struct am65_cpts_skb_cb_data *)skb->cb;
+
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ u64 ns = event->timestamp;
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev,
+ "match tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 100 ms */
+ dev_dbg(cpts->dev,
+ "expiring tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice(&txq_list, &cpts->txq);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return found;
+}
+
+static void am65_cpts_find_ts(struct am65_cpts *cpts)
+{
+ struct am65_cpts_event *event;
+ struct list_head *this, *next;
+ LIST_HEAD(events_free);
+ unsigned long flags;
+ LIST_HEAD(events);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_init(&cpts->events, &events);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ list_for_each_safe(this, next, &events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (am65_cpts_match_tx_ts(cpts, event) ||
+ time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &events_free);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events_free, &cpts->pool);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ unsigned long flags;
+ long delay = -1;
+
+ am65_cpts_find_ts(cpts);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return delay;
+}
+
+/**
+ * am65_cpts_rx_enable - enable rx timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions enables rx packets timestamping. The CPTS can timestamp all
+ * rx packets.
+ */
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+ u32 val;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ val = am65_cpts_read32(cpts, control);
+ if (en)
+ val |= AM65_CPTS_CONTROL_TSTAMP_EN;
+ else
+ val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
+ am65_cpts_write32(cpts, val, control);
+ mutex_unlock(&cpts->ptp_clk_lock);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
+
+static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
+{
+ unsigned int ptp_class = ptp_classify_raw(skb);
+ u8 *msgtype, *data = skb->data;
+ unsigned int offset = 0;
+ __be16 *seqid;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return 0;
+
+ if (ptp_class & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (ptp_class & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ return 0;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ msgtype = data + offset + OFF_PTP_CONTROL;
+ else
+ msgtype = data + offset;
+
+ seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ *mtype_seqid = (*msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
+ AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
+ *mtype_seqid |= (ntohs(*seqid) & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ return 1;
+}
+
+/**
+ * am65_cpts_tx_timestamp - save tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions saves tx packet for timestamping if packet can be timestamped.
+ * The future processing is done in from PTP auxiliary worker.
+ */
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ return;
+
+ /* add frame to queue for processing later.
+ * The periodic FIFO check will handle this.
+ */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+ skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
+
+/**
+ * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions should be called from .xmit().
+ * It checks if packet can be timestamped, fills internal cpts data
+ * in skb-cb and marks packet as SKBTX_IN_PROGRESS.
+ */
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+ int ret;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return;
+
+ ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+ skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
+
+int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return cpts->phc_index;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
+
+static void cpts_free_clk_mux(void *data)
+{
+ struct am65_cpts *cpts = data;
+
+ of_clk_del_provider(cpts->clk_mux_np);
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+ of_node_put(cpts->clk_mux_np);
+}
+
+static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
+ struct device_node *node)
+{
+ unsigned int num_parents;
+ const char **parent_names;
+ char *clk_mux_name;
+ void __iomem *reg;
+ int ret = -EINVAL;
+
+ cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
+ if (!cpts->clk_mux_np)
+ return 0;
+
+ num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
+ cpts->clk_mux_np);
+ goto mux_fail;
+ }
+
+ parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(cpts->dev), cpts->clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ reg = &cpts->reg->rftclk_sel;
+ /* dev must be NULL to avoid recursive incrementing
+ * of module refcnt
+ */
+ cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
+ parent_names, num_parents,
+ 0, reg, 0, 5, 0, NULL);
+ if (IS_ERR(cpts->clk_mux_hw)) {
+ ret = PTR_ERR(cpts->clk_mux_hw);
+ goto mux_fail;
+ }
+
+ ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
+ cpts->clk_mux_hw);
+ if (ret)
+ goto clk_hw_register;
+
+ ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
+ if (ret)
+ dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
+
+ return ret;
+
+clk_hw_register:
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+mux_fail:
+ of_node_put(cpts->clk_mux_np);
+ return ret;
+}
+
+static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
+{
+ u32 prop[2];
+
+ if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
+ cpts->ext_ts_inputs = prop[0];
+
+ if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
+ cpts->genf_num = prop[0];
+
+ return cpts_of_mux_clk_setup(cpts, node);
+}
+
+static void am65_cpts_release(void *data)
+{
+ struct am65_cpts *cpts = data;
+
+ ptp_clock_unregister(cpts->ptp_clock);
+ am65_cpts_disable(cpts);
+ clk_disable_unprepare(cpts->refclk);
+}
+
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node)
+{
+ struct am65_cpts *cpts;
+ int ret, i;
+
+ cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
+ if (!cpts)
+ return ERR_PTR(-ENOMEM);
+
+ cpts->dev = dev;
+ cpts->reg = (struct am65_cpts_regs __iomem *)regs;
+
+ cpts->irq = of_irq_get_byname(node, "cpts");
+ if (cpts->irq <= 0) {
+ ret = cpts->irq ?: -ENXIO;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get IRQ number (err = %d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = am65_cpts_of_parse(cpts, node);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_init(&cpts->ptp_clk_lock);
+ INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->pool);
+ spin_lock_init(&cpts->lock);
+ skb_queue_head_init(&cpts->txq);
+
+ for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
+ list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+ cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
+ if (IS_ERR(cpts->refclk)) {
+ ret = PTR_ERR(cpts->refclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get refclk %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(cpts->refclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable refclk %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ cpts->refclk_freq = clk_get_rate(cpts->refclk);
+
+ am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
+ cpts->ptp_info = am65_ptp_info;
+
+ if (cpts->ext_ts_inputs)
+ cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
+ if (cpts->genf_num)
+ cpts->ptp_info.n_per_out = cpts->genf_num;
+
+ am65_cpts_set_add_val(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN | AM65_CPTS_CONTROL_64MODE,
+ control);
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ /* set time to the current system time */
+ am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
+
+ cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
+ if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
+ dev_err(dev, "Failed to register ptp clk %ld\n",
+ PTR_ERR(cpts->ptp_clock));
+ if (!cpts->ptp_clock)
+ ret = -ENODEV;
+ goto refclk_disable;
+ }
+ cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
+
+ ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
+ if (ret) {
+ dev_err(dev, "failed to add ptpclk reset action %d", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
+ am65_cpts_interrupt,
+ IRQF_ONESHOT, dev_name(dev), cpts);
+ if (ret < 0) {
+ dev_err(cpts->dev, "error attaching irq %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+ am65_cpts_read32(cpts, idver),
+ cpts->refclk_freq, cpts->ts_add_val);
+
+ return cpts;
+
+refclk_disable:
+ clk_disable_unprepare(cpts->refclk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_create);
+
+static int am65_cpts_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct am65_cpts *cpts;
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpts");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ cpts = am65_cpts_create(dev, base, node);
+ return PTR_ERR_OR_ZERO(cpts);
+}
+
+static const struct of_device_id am65_cpts_of_match[] = {
+ { .compatible = "ti,am65-cpts", },
+ { .compatible = "ti,j721e-cpts", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
+
+static struct platform_driver am65_cpts_driver = {
+ .probe = am65_cpts_probe,
+ .driver = {
+ .name = "am65-cpts",
+ .of_match_table = am65_cpts_of_match,
+ },
+};
+module_platform_driver(am65_cpts_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
new file mode 100644
index 000000000000..cf9fbc28fd03
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* TI K3 AM65 CPTS driver interface
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_CPTS_H_
+#define K3_CPTS_H_
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct am65_cpts;
+
+struct am65_cpts_estf_cfg {
+ u64 ns_period;
+ u64 ns_start;
+};
+
+#if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node);
+int am65_cpts_phc_index(struct am65_cpts *cpts);
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg);
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
+#else
+static inline struct am65_cpts *am65_cpts_create(struct device *dev,
+ void __iomem *regs,
+ struct device_node *node)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return -1;
+}
+
+static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+}
+
+static inline s64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ return 0;
+}
+
+static inline int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg)
+{
+ return 0;
+}
+
+static inline void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+}
+#endif
+
+#endif /* K3_CPTS_H_ */
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index a530afe3ce12..c20715107075 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -532,7 +532,7 @@ fatal_error:
}
-static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int queue;
unsigned int len;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c2c5bf87da01..9b17bbbe102f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -406,6 +406,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
+ xdp.frame_sz = PAGE_SIZE;
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
@@ -1569,6 +1570,12 @@ static int cpsw_probe(struct platform_device *pdev)
return irq;
cpsw->irqs_table[1] = irq;
+ /* get misc irq*/
+ irq = platform_get_irq(pdev, 3);
+ if (irq <= 0)
+ return irq;
+ cpsw->misc_irq = irq;
+
/*
* This may be required here for child devices.
*/
@@ -1703,6 +1710,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_unregister_netdev_ret;
}
+ if (!cpsw->cpts)
+ goto skip_cpts;
+
+ ret = devm_request_irq(&pdev->dev, cpsw->misc_irq, cpsw_misc_interrupt,
+ 0, dev_name(&pdev->dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching misc irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
+ }
+
+ /* Enable misc CPTS evnt_pend IRQ */
+ cpts_set_irqpoll(cpsw->cpts, false);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+skip_cpts:
cpsw_notice(priv, probe,
"initialized device (regs %pa, irq %d, pool size %d)\n",
&ss_res->start, cpsw->irqs_table[0], descs_pool_size);
@@ -1753,11 +1775,15 @@ static int cpsw_suspend(struct device *dev)
struct cpsw_common *cpsw = dev_get_drvdata(dev);
int i;
+ rtnl_lock();
+
for (i = 0; i < cpsw->data.slaves; i++)
if (cpsw->slaves[i].ndev)
if (netif_running(cpsw->slaves[i].ndev))
cpsw_ndo_stop(cpsw->slaves[i].ndev);
+ rtnl_unlock();
+
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 0374e6936091..8dc6be11b2ff 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -955,7 +955,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
if (!ale)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ale->p0_untag_vid_mask =
devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 9209e613257d..1247d35d42ef 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -348,6 +348,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
+ xdp.frame_sz = PAGE_SIZE;
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
if (ret != CPSW_XDP_PASS)
@@ -1228,7 +1229,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
data->active_slave = 0;
data->channels = CPSW_MAX_QUEUES;
data->ale_entries = CPSW_ALE_NUM_ENTRIES;
- data->dual_emac = 1;
+ data->dual_emac = true;
data->bd_ram_size = CPSW_BD_RAM_SIZE;
data->mac_control = 0;
@@ -1896,6 +1897,11 @@ static int cpsw_probe(struct platform_device *pdev)
return irq;
cpsw->irqs_table[1] = irq;
+ irq = platform_get_irq_byname(pdev, "misc");
+ if (irq <= 0)
+ return irq;
+ cpsw->misc_irq = irq;
+
platform_set_drvdata(pdev, cpsw);
/* This may be required here for child devices. */
pm_runtime_enable(dev);
@@ -1916,7 +1922,7 @@ static int cpsw_probe(struct platform_device *pdev)
soc = soc_device_match(cpsw_soc_devices);
if (soc)
- cpsw->quirk_irq = 1;
+ cpsw->quirk_irq = true;
cpsw->rx_packet_max = rx_packet_max;
cpsw->descs_pool_size = descs_pool_size;
@@ -1975,6 +1981,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_unregister_netdev;
}
+ if (!cpsw->cpts)
+ goto skip_cpts;
+
+ ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
+ 0, dev_name(&pdev->dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching misc irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ /* Enable misc CPTS evnt_pend IRQ */
+ cpts_set_irqpoll(cpsw->cpts, false);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+skip_cpts:
ret = cpsw_register_notifiers(cpsw);
if (ret)
goto clean_unregister_netdev;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 97a058ca60ac..a399f3659346 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -28,6 +28,8 @@
#include "cpsw_sl.h"
#include "davinci_cpdma.h"
+#define CPTS_N_ETX_TS 4
+
int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -112,6 +114,18 @@ irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ writel(0, &cpsw->wr_regs->misc_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
+ cpts_misc_interrupt(cpsw->cpts);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+ return IRQ_HANDLED;
+}
+
int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
{
struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
@@ -490,9 +504,9 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
cpsw->ale = cpsw_ale_create(&ale_params);
- if (!cpsw->ale) {
+ if (IS_ERR(cpsw->ale)) {
dev_err(dev, "error initializing ale engine\n");
- return -ENODEV;
+ return PTR_ERR(cpsw->ale);
}
dma_params.dev = dev;
@@ -522,7 +536,8 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
if (!cpts_node)
cpts_node = cpsw->dev->of_node;
- cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node);
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
+ CPTS_N_ETX_TS);
if (IS_ERR(cpsw->cpts)) {
ret = PTR_ERR(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
@@ -1340,7 +1355,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
ret = CPSW_XDP_PASS;
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto drop;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index b8d7b924ee3d..bf4e179b4ca4 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -350,6 +350,7 @@ struct cpsw_common {
bool rx_irq_disabled;
bool tx_irq_disabled;
u32 irqs_table[IRQ_NUM];
+ int misc_irq;
struct cpts *cpts;
struct devlink *devlink;
int rx_ch_num, tx_ch_num;
@@ -442,6 +443,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
struct page *page, int port);
irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
+irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id);
int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget);
int cpsw_tx_poll(struct napi_struct *napi_tx, int budget);
int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 729ce09dded9..7c55d395de2c 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -21,16 +21,21 @@
#include "cpts.h"
#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+#define CPTS_SKB_RX_TX_TMO 100 /*ms */
+#define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */
struct cpts_skb_cb_data {
+ u32 skb_mtype_seqid;
unsigned long tmo;
};
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
-static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
- u16 ts_seqid, u8 ts_msgtype);
+static int cpts_event_port(struct cpts_event *event)
+{
+ return (event->high >> PORT_NUMBER_SHIFT) & PORT_NUMBER_MASK;
+}
static int event_expired(struct cpts_event *event)
{
@@ -71,7 +76,7 @@ static int cpts_purge_events(struct cpts *cpts)
}
if (removed)
- pr_debug("cpts: event pool cleaned up %d\n", removed);
+ dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed);
return removed ? 0 : -1;
}
@@ -94,132 +99,126 @@ static void cpts_purge_txq(struct cpts *cpts)
dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
}
-static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
-{
- struct sk_buff *skb, *tmp;
- u16 seqid;
- u8 mtype;
- bool found = false;
-
- mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
- seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
-
- /* no need to grab txq.lock as access is always done under cpts->lock */
- skb_queue_walk_safe(&cpts->txq, skb, tmp) {
- struct skb_shared_hwtstamps ssh;
- unsigned int class = ptp_classify_raw(skb);
- struct cpts_skb_cb_data *skb_cb =
- (struct cpts_skb_cb_data *)skb->cb;
-
- if (cpts_match(skb, class, seqid, mtype)) {
- u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
-
- memset(&ssh, 0, sizeof(ssh));
- ssh.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &ssh);
- found = true;
- __skb_unlink(skb, &cpts->txq);
- dev_consume_skb_any(skb);
- dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
- mtype, seqid);
- break;
- }
-
- if (time_after(jiffies, skb_cb->tmo)) {
- /* timeout any expired skbs over 1s */
- dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
- __skb_unlink(skb, &cpts->txq);
- dev_consume_skb_any(skb);
- }
- }
-
- return found;
-}
-
/*
* Returns zero if matching event type was found.
*/
static int cpts_fifo_read(struct cpts *cpts, int match)
{
+ struct ptp_clock_event pevent;
+ bool need_schedule = false;
+ struct cpts_event *event;
+ unsigned long flags;
int i, type = -1;
u32 hi, lo;
- struct cpts_event *event;
+
+ spin_lock_irqsave(&cpts->lock, flags);
for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
if (cpts_fifo_pop(cpts, &hi, &lo))
break;
if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
- pr_err("cpts: event pool empty\n");
- return -1;
+ dev_warn(cpts->dev, "cpts: event pool empty\n");
+ break;
}
event = list_first_entry(&cpts->pool, struct cpts_event, list);
- event->tmo = jiffies + 2;
event->high = hi;
event->low = lo;
+ event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
type = event_type(event);
+
+ dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n",
+ type, event->high, event->low);
switch (type) {
- case CPTS_EV_TX:
- if (cpts_match_tx_ts(cpts, event)) {
- /* if the new event matches an existing skb,
- * then don't queue it
- */
- break;
- }
- /* fall through */
case CPTS_EV_PUSH:
+ WRITE_ONCE(cpts->cur_timestamp, lo);
+ timecounter_read(&cpts->tc);
+ if (cpts->mult_new) {
+ cpts->cc.mult = cpts->mult_new;
+ cpts->mult_new = 0;
+ }
+ if (!cpts->irq_poll)
+ complete(&cpts->ts_push_complete);
+ break;
+ case CPTS_EV_TX:
case CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT);
+
list_del_init(&event->list);
list_add_tail(&event->list, &cpts->events);
+ need_schedule = true;
break;
case CPTS_EV_ROLL:
case CPTS_EV_HALF:
+ break;
case CPTS_EV_HW:
+ pevent.timestamp = event->timestamp;
+ pevent.type = PTP_CLOCK_EXTTS;
+ pevent.index = cpts_event_port(event) - 1;
+ ptp_clock_event(cpts->clock, &pevent);
break;
default:
- pr_err("cpts: unknown event type\n");
+ dev_err(cpts->dev, "cpts: unknown event type\n");
break;
}
if (type == match)
break;
}
+
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (!cpts->irq_poll && need_schedule)
+ ptp_schedule_worker(cpts->clock, 0);
+
return type == match ? 0 : -1;
}
+void cpts_misc_interrupt(struct cpts *cpts)
+{
+ cpts_fifo_read(cpts, -1);
+}
+EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
+
static u64 cpts_systim_read(const struct cyclecounter *cc)
{
- u64 val = 0;
- struct cpts_event *event;
- struct list_head *this, *next;
struct cpts *cpts = container_of(cc, struct cpts, cc);
+ return READ_ONCE(cpts->cur_timestamp);
+}
+
+static void cpts_update_cur_time(struct cpts *cpts, int match,
+ struct ptp_system_timestamp *sts)
+{
+ unsigned long flags;
+
+ reinit_completion(&cpts->ts_push_complete);
+
+ /* use spin_lock_irqsave() here as it has to run very fast */
+ spin_lock_irqsave(&cpts->lock, flags);
+ ptp_read_system_prets(sts);
cpts_write32(cpts, TS_PUSH, ts_push);
- if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
- pr_err("cpts: unable to obtain a time stamp\n");
+ cpts_read32(cpts, ts_push);
+ ptp_read_system_postts(sts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
- list_for_each_safe(this, next, &cpts->events) {
- event = list_entry(this, struct cpts_event, list);
- if (event_type(event) == CPTS_EV_PUSH) {
- list_del_init(&event->list);
- list_add(&event->list, &cpts->pool);
- val = event->low;
- break;
- }
- }
+ if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1)
+ dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n");
- return val;
+ if (!cpts->irq_poll &&
+ !wait_for_completion_timeout(&cpts->ts_push_complete, HZ))
+ dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n");
}
/* PTP clock operations */
static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
- u64 adj;
- u32 diff, mult;
- int neg_adj = 0;
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
+ int neg_adj = 0;
+ u32 diff, mult;
+ u64 adj;
if (ppb < 0) {
neg_adj = 1;
@@ -230,38 +229,40 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
adj *= ppb;
diff = div_u64(adj, 1000000000ULL);
- spin_lock_irqsave(&cpts->lock, flags);
+ mutex_lock(&cpts->ptp_clk_mutex);
- timecounter_read(&cpts->tc);
+ cpts->mult_new = neg_adj ? mult - diff : mult + diff;
- cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
-
- spin_unlock_irqrestore(&cpts->lock, flags);
+ cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
+ mutex_unlock(&cpts->ptp_clk_mutex);
return 0;
}
static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
- spin_lock_irqsave(&cpts->lock, flags);
+ mutex_lock(&cpts->ptp_clk_mutex);
timecounter_adjtime(&cpts->tc, delta);
- spin_unlock_irqrestore(&cpts->lock, flags);
+ mutex_unlock(&cpts->ptp_clk_mutex);
return 0;
}
-static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
- u64 ns;
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts);
- spin_lock_irqsave(&cpts->lock, flags);
ns = timecounter_read(&cpts->tc);
- spin_unlock_irqrestore(&cpts->lock, flags);
+ mutex_unlock(&cpts->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
@@ -271,15 +272,38 @@ static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
static int cpts_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- u64 ns;
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
+ u64 ns;
ns = timespec64_to_ns(ts);
- spin_lock_irqsave(&cpts->lock, flags);
+ mutex_lock(&cpts->ptp_clk_mutex);
timecounter_init(&cpts->tc, &cpts->cc, ns);
- spin_unlock_irqrestore(&cpts->lock, flags);
+ mutex_unlock(&cpts->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int cpts_extts_enable(struct cpts *cpts, u32 index, int on)
+{
+ u32 v;
+
+ if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ v = cpts_read32(cpts, control);
+ if (on) {
+ v |= BIT(8 + index);
+ cpts->hw_ts_enable |= BIT(index);
+ } else {
+ v &= ~BIT(8 + index);
+ cpts->hw_ts_enable &= ~BIT(index);
+ }
+ cpts_write32(cpts, v, control);
+
+ mutex_unlock(&cpts->ptp_clk_mutex);
return 0;
}
@@ -287,28 +311,120 @@ static int cpts_ptp_settime(struct ptp_clock_info *ptp,
static int cpts_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return cpts_extts_enable(cpts, rq->extts.index, on);
+ default:
+ break;
+ }
+
return -EOPNOTSUPP;
}
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+ struct sk_buff_head txq_list;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ bool found = false;
+ u32 mtype_seqid;
+
+ mtype_seqid = event->high &
+ ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
+ (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
+ (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
+
+ __skb_queue_head_init(&txq_list);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice_init(&cpts->txq, &txq_list);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ skb_queue_walk_safe(&txq_list, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(event->timestamp);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 1s */
+ dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice(&txq_list, &cpts->txq);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return found;
+}
+
+static void cpts_process_events(struct cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct cpts_event *event;
+ LIST_HEAD(events_free);
+ unsigned long flags;
+ LIST_HEAD(events);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_init(&cpts->events, &events);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ list_for_each_safe(this, next, &events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (cpts_match_tx_ts(cpts, event) ||
+ time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &events_free);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events_free, &cpts->pool);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+}
+
static long cpts_overflow_check(struct ptp_clock_info *ptp)
{
struct cpts *cpts = container_of(ptp, struct cpts, info);
unsigned long delay = cpts->ov_check_period;
- struct timespec64 ts;
unsigned long flags;
+ u64 ns;
- spin_lock_irqsave(&cpts->lock, flags);
- ts = ns_to_timespec64(timecounter_read(&cpts->tc));
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ cpts_update_cur_time(cpts, -1, NULL);
+ ns = timecounter_read(&cpts->tc);
+
+ cpts_process_events(cpts);
+ spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq)) {
cpts_purge_txq(cpts);
if (!skb_queue_empty(&cpts->txq))
delay = CPTS_SKB_TX_WORK_TIMEOUT;
}
- spin_unlock_irqrestore(&cpts->lock, flags);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
- pr_debug("cpts overflow check at %lld.%09ld\n",
- (long long)ts.tv_sec, ts.tv_nsec);
+ dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
+ mutex_unlock(&cpts->ptp_clk_mutex);
return (long)delay;
}
@@ -321,18 +437,21 @@ static const struct ptp_clock_info cpts_info = {
.pps = 0,
.adjfreq = cpts_ptp_adjfreq,
.adjtime = cpts_ptp_adjtime,
- .gettime64 = cpts_ptp_gettime,
+ .gettimex64 = cpts_ptp_gettimeex,
.settime64 = cpts_ptp_settime,
.enable = cpts_ptp_enable,
.do_aux_work = cpts_overflow_check,
};
-static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
- u16 ts_seqid, u8 ts_msgtype)
+static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
- u16 *seqid;
- unsigned int offset = 0;
+ unsigned int ptp_class = ptp_classify_raw(skb);
u8 *msgtype, *data = skb->data;
+ unsigned int offset = 0;
+ u16 *seqid;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return 0;
if (ptp_class & PTP_CLASS_VLAN)
offset += VLAN_HLEN;
@@ -360,25 +479,23 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
msgtype = data + offset;
seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ *mtype_seqid = (*msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
+ *mtype_seqid |= (ntohs(*seqid) & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
- return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
+ return 1;
}
-static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
+static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
+ int ev_type, u32 skb_mtype_seqid)
{
- u64 ns = 0;
- struct cpts_event *event;
struct list_head *this, *next;
- unsigned int class = ptp_classify_raw(skb);
+ struct cpts_event *event;
unsigned long flags;
- u16 seqid;
- u8 mtype;
-
- if (class == PTP_CLASS_NONE)
- return 0;
+ u32 mtype_seqid;
+ u64 ns = 0;
- spin_lock_irqsave(&cpts->lock, flags);
cpts_fifo_read(cpts, -1);
+ spin_lock_irqsave(&cpts->lock, flags);
list_for_each_safe(this, next, &cpts->events) {
event = list_entry(this, struct cpts_event, list);
if (event_expired(event)) {
@@ -386,29 +503,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
list_add(&event->list, &cpts->pool);
continue;
}
- mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
- seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
- if (ev_type == event_type(event) &&
- cpts_match(skb, class, seqid, mtype)) {
- ns = timecounter_cyc2time(&cpts->tc, event->low);
+
+ mtype_seqid = event->high &
+ ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
+ (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
+ (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
+
+ if (mtype_seqid == skb_mtype_seqid) {
+ ns = event->timestamp;
list_del_init(&event->list);
list_add(&event->list, &cpts->pool);
break;
}
}
-
- if (ev_type == CPTS_EV_TX && !ns) {
- struct cpts_skb_cb_data *skb_cb =
- (struct cpts_skb_cb_data *)skb->cb;
- /* Not found, add frame to queue for processing later.
- * The periodic FIFO check will handle this.
- */
- skb_get(skb);
- /* get the timestamp for timeouts */
- skb_cb->tmo = jiffies + msecs_to_jiffies(100);
- __skb_queue_tail(&cpts->txq, skb);
- ptp_schedule_worker(cpts->clock, 0);
- }
spin_unlock_irqrestore(&cpts->lock, flags);
return ns;
@@ -416,10 +523,21 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
- u64 ns;
+ struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
struct skb_shared_hwtstamps *ssh;
+ int ret;
+ u64 ns;
- ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
+ ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+
+ skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
+ __func__, skb_cb->skb_mtype_seqid);
+
+ ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid);
if (!ns)
return;
ssh = skb_hwtstamps(skb);
@@ -430,17 +548,27 @@ EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
- u64 ns;
- struct skb_shared_hwtstamps ssh;
+ struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ int ret;
if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
return;
- ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
- if (!ns)
+
+ ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
return;
- memset(&ssh, 0, sizeof(ssh));
- ssh.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &ssh);
+
+ skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
+ __func__, skb_cb->skb_mtype_seqid);
+
+ /* Always defer TX TS processing to PTP worker */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO);
+ skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->clock, 0);
}
EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
@@ -632,7 +760,7 @@ of_error:
}
struct cpts *cpts_create(struct device *dev, void __iomem *regs,
- struct device_node *node)
+ struct device_node *node, u32 n_ext_ts)
{
struct cpts *cpts;
int ret;
@@ -643,7 +771,10 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->dev = dev;
cpts->reg = (struct cpsw_cpts __iomem *)regs;
+ cpts->irq_poll = true;
spin_lock_init(&cpts->lock);
+ mutex_init(&cpts->ptp_clk_mutex);
+ init_completion(&cpts->ts_push_complete);
ret = cpts_of_parse(cpts, node);
if (ret)
@@ -668,6 +799,9 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ if (n_ext_ts)
+ cpts->info.n_ext_ts = n_ext_ts;
+
cpts_calc_mult_shift(cpts);
/* save cc.mult original value as it can be modified
* by cpts_ptp_adjfreq().
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index bb997c11ee15..07222f651d2e 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -94,6 +94,7 @@ struct cpts_event {
unsigned long tmo;
u32 high;
u32 low;
+ u64 timestamp;
};
struct cpts {
@@ -103,7 +104,7 @@ struct cpts {
int rx_enable;
struct ptp_clock_info info;
struct ptp_clock *clock;
- spinlock_t lock; /* protects time registers */
+ spinlock_t lock; /* protects fifo/events */
u32 cc_mult; /* for the nominal frequency */
struct cyclecounter cc;
struct timecounter tc;
@@ -114,6 +115,12 @@ struct cpts {
struct cpts_event pool_data[CPTS_MAX_EVENTS];
unsigned long ov_check_period;
struct sk_buff_head txq;
+ u64 cur_timestamp;
+ u32 mult_new;
+ struct mutex ptp_clk_mutex; /* sync PTP interface and worker */
+ bool irq_poll;
+ struct completion ts_push_complete;
+ u32 hw_ts_enable;
};
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
@@ -121,8 +128,9 @@ void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
int cpts_register(struct cpts *cpts);
void cpts_unregister(struct cpts *cpts);
struct cpts *cpts_create(struct device *dev, void __iomem *regs,
- struct device_node *node);
+ struct device_node *node, u32 n_ext_ts);
void cpts_release(struct cpts *cpts);
+void cpts_misc_interrupt(struct cpts *cpts);
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
@@ -134,6 +142,11 @@ static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
return true;
}
+static inline void cpts_set_irqpoll(struct cpts *cpts, bool en)
+{
+ cpts->irq_poll = en;
+}
+
#else
struct cpts;
@@ -146,7 +159,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
static inline
struct cpts *cpts_create(struct device *dev, void __iomem *regs,
- struct device_node *node)
+ struct device_node *node, u32 n_ext_ts)
{
return NULL;
}
@@ -169,6 +182,14 @@ static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
return false;
}
+
+static inline void cpts_misc_interrupt(struct cpts *cpts)
+{
+}
+
+static inline void cpts_set_irqpoll(struct cpts *cpts, bool en)
+{
+}
#endif
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 38b7f6d35759..702fdc393da0 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -397,6 +397,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
data->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!data->regs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
index ad7cfc1316ce..38cc12f9f133 100644
--- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -64,8 +64,8 @@ k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
return ERR_PTR(-ENOMEM);
pool->gen_pool = gen_pool_create(ilog2(pool->desc_size), -1);
- if (IS_ERR(pool->gen_pool)) {
- ret = PTR_ERR(pool->gen_pool);
+ if (!pool->gen_pool) {
+ ret = -ENOMEM;
dev_err(pool->dev, "pool create failed %d\n", ret);
kfree_const(pool_name);
goto gen_pool_create_fail;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index fb36115e9c51..28093923a7fb 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3704,9 +3704,9 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ale_params.nu_switch_ale = true;
}
gbe_dev->ale = cpsw_ale_create(&ale_params);
- if (!gbe_dev->ale) {
+ if (IS_ERR(gbe_dev->ale)) {
dev_err(gbe_dev->dev, "error initializing ale engine\n");
- ret = -ENODEV;
+ ret = PTR_ERR(gbe_dev->ale);
goto free_sec_ports;
} else {
dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
@@ -3716,7 +3716,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (!cpts_node)
cpts_node = of_node_get(node);
- gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, cpts_node);
+ gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
+ cpts_node, 0);
of_node_put(cpts_node);
if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
ret = PTR_ERR(gbe_dev->cpts);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index ad465202980a..857709828058 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -70,7 +70,7 @@ MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
MODULE_LICENSE("GPL");
/* Turn on debugging.
- * See Documentation/networking/device_drivers/ti/tlan.txt for details
+ * See Documentation/networking/device_drivers/ti/tlan.rst for details
*/
static int debug;
module_param(debug, int, 0);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 070dd6fa9401..d9a5722f561b 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -382,8 +382,6 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
if (!descr->skb) {
descr->buf_addr = 0; /* tell DMAC don't touch memory */
- dev_info(ctodev(card),
- "%s:allocate skb failed !!\n", __func__);
return -ENOMEM;
}
descr->buf_size = cpu_to_be32(bufsize);
@@ -1150,7 +1148,7 @@ static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
* gelic_net_poll_controller - artificial interrupt for netconsole etc.
* @netdev: interface device structure
*
- * see Documentation/networking/netconsole.txt
+ * see Documentation/networking/netconsole.rst
*/
void gelic_net_poll_controller(struct net_device *netdev)
{
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 6576271642c1..3902b3aeb0c2 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1615,7 +1615,7 @@ spider_net_interrupt(int irq, void *ptr)
* spider_net_poll_controller - artificial interrupt for netconsole etc.
* @netdev: interface device structure
*
- * see Documentation/networking/netconsole.txt
+ * see Documentation/networking/netconsole.rst
*/
static void
spider_net_poll_controller(struct net_device *netdev)
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index a962097b58c6..6cff5f7d57c4 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
+ depends on PCI || ARCH_VT8500 || COMPILE_TEST
depends on HAS_DMA
select CRC32
select MII
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 3e313e71ae36..929244064abd 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1410,9 +1410,9 @@ static int temac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(lp->regs)) {
+ if (!lp->regs) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
- return PTR_ERR(lp->regs);
+ return -ENOMEM;
}
/* Select register access functions with the specified
@@ -1505,10 +1505,10 @@ static int temac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(lp->sdma_regs)) {
+ if (!lp->sdma_regs) {
dev_err(&pdev->dev,
"could not map DMA registers\n");
- return PTR_ERR(lp->sdma_regs);
+ return -ENOMEM;
}
if (pdata->dma_little_endian) {
lp->dma_in = temac_dma_in32_le;
diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
index 3b412a56f2cb..da4f58eed08f 100644
--- a/drivers/net/fddi/Kconfig
+++ b/drivers/net/fddi/Kconfig
@@ -77,7 +77,7 @@ config SKFP
- Netelligent 100 FDDI SAS UTP
- Netelligent 100 FDDI SAS Fibre MIC
- Read <file:Documentation/networking/skfp.txt> for information about
+ Read <file:Documentation/networking/skfp.rst> for information about
the driver.
Questions concerning this driver can be addressed to:
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 8e05b5c31a77..f4500f04147d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -30,7 +30,7 @@ config 6PACK
Note that this driver is still experimental and might cause
problems. For details about the features and the usage of the
- driver, read <file:Documentation/networking/6pack.txt>.
+ driver, read <file:Documentation/networking/6pack.rst>.
To compile this driver as a module, choose M here: the module
will be called 6pack.
@@ -84,7 +84,7 @@ config SCC
---help---
These cards are used to connect your Linux box to an amateur radio
in order to communicate with other computers. If you want to use
- this, read <file:Documentation/networking/z8530drv.txt> and the
+ this, read <file:Documentation/networking/z8530drv.rst> and the
AX25-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. Also make sure to say Y
to "Amateur Radio AX.25 Level 2" support.
@@ -98,7 +98,7 @@ config SCC_DELAY
help
Say Y here if you experience problems with the SCC driver not
working properly; please read
- <file:Documentation/networking/z8530drv.txt> for details.
+ <file:Documentation/networking/z8530drv.rst> for details.
If unsure, say N.
@@ -127,7 +127,7 @@ config BAYCOM_SER_FDX
your serial interface chip. To configure the driver, use the sethdlc
utility available in the standard ax25 utilities package. For
information on the modems, see <http://www.baycom.de/> and
- <file:Documentation/networking/baycom.txt>.
+ <file:Documentation/networking/baycom.rst>.
To compile this driver as a module, choose M here: the module
will be called baycom_ser_fdx. This is recommended.
@@ -145,7 +145,7 @@ config BAYCOM_SER_HDX
the driver, use the sethdlc utility available in the standard ax25
utilities package. For information on the modems, see
<http://www.baycom.de/> and
- <file:Documentation/networking/baycom.txt>.
+ <file:Documentation/networking/baycom.rst>.
To compile this driver as a module, choose M here: the module
will be called baycom_ser_hdx. This is recommended.
@@ -160,7 +160,7 @@ config BAYCOM_PAR
par96 designs. To configure the driver, use the sethdlc utility
available in the standard ax25 utilities package. For information on
the modems, see <http://www.baycom.de/> and the file
- <file:Documentation/networking/baycom.txt>.
+ <file:Documentation/networking/baycom.rst>.
To compile this driver as a module, choose M here: the module
will be called baycom_par. This is recommended.
@@ -175,7 +175,7 @@ config BAYCOM_EPP
designs. To configure the driver, use the sethdlc utility available
in the standard ax25 utilities package. For information on the
modems, see <http://www.baycom.de/> and the file
- <file:Documentation/networking/baycom.txt>.
+ <file:Documentation/networking/baycom.rst>.
To compile this driver as a module, choose M here: the module
will be called baycom_epp. This is recommended.
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index fbea6f232819..60dcaf2a04a9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -107,6 +107,25 @@ struct bpqdev {
static LIST_HEAD(bpq_devices);
+/*
+ * bpqether network devices are paired with ethernet devices below them, so
+ * form a special "super class" of normal ethernet devices; split their locks
+ * off into a separate class since they always nest.
+ */
+static struct lock_class_key bpq_netdev_xmit_lock_key;
+
+static void bpq_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
+}
+
+static void bpq_set_lockdep_class(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
+}
+
/* ------------------------------------------------------------------------ */
@@ -127,7 +146,8 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
{
struct bpqdev *bpq;
- list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) {
+ list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list,
+ lockdep_rtnl_is_held()) {
if (bpq->ethdev == dev)
return bpq->axdev;
}
@@ -477,6 +497,7 @@ static int bpq_new_device(struct net_device *edev)
err = register_netdevice(ndev);
if (err)
goto error;
+ bpq_set_lockdep_class(ndev);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 6c03932d8a6b..33fdd55c6122 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -7,7 +7,7 @@
* ------------------
*
* You can find a subset of the documentation in
- * Documentation/networking/z8530drv.txt.
+ * Documentation/networking/z8530drv.rst.
*/
/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index ca68aa1df801..41f5cf0bb997 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -636,9 +636,12 @@ void netvsc_device_remove(struct hv_device *device)
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
- /* And disassociate NAPI context from device */
- for (i = 0; i < net_device->num_chn; i++)
+ /* Disable NAPI and disassociate its context from the device. */
+ for (i = 0; i < net_device->num_chn; i++) {
+ /* See also vmbus_reset_channel_cb(). */
+ napi_disable(&net_device->chan_table[i].napi);
netif_napi_del(&net_device->chan_table[i].napi);
+ }
/*
* At this point, no one should be accessing net_device
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index b86611041db6..8e4141552423 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -49,7 +49,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
xdp_set_data_meta_invalid(xdp);
xdp->data_end = xdp->data + len;
xdp->rxq = &nvchan->xdp_rxq;
- xdp->handle = 0;
+ xdp->frame_sz = PAGE_SIZE;
memcpy(xdp->data, data, len);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ebcfbae05690..6267f706e8ee 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -795,7 +795,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
if (xbuf) {
unsigned int hdroom = xdp->data - xdp->data_hard_start;
unsigned int xlen = xdp->data_end - xdp->data;
- unsigned int frag_size = netvsc_xdp_fraglen(hdroom + xlen);
+ unsigned int frag_size = xdp->frame_sz;
skb = build_skb(xbuf, frag_size);
@@ -2457,6 +2457,8 @@ static int netvsc_probe(struct hv_device *dev,
NETIF_F_HW_VLAN_CTAG_RX;
net->vlan_features = net->features;
+ netdev_lockdep_set_classes(net);
+
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index b671bea0aa7c..55226b264e3c 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -238,11 +238,6 @@ static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
}
-static void gsi_isr_ieob_clear(struct gsi *gsi, u32 mask)
-{
- iowrite32(mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
-}
-
static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
{
u32 val;
@@ -415,13 +410,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
evt_ring->state);
}
-/* Return the hardware's notion of the current state of a channel */
-static enum gsi_channel_state
-gsi_channel_state(struct gsi *gsi, u32 channel_id)
+/* Fetch the current state of a channel from hardware */
+static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
+ u32 channel_id = gsi_channel_id(channel);
+ void *virt = channel->gsi->virt;
u32 val;
- val = ioread32(gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+ val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
return u32_get_bits(val, CHSTATE_FMASK);
}
@@ -432,16 +428,18 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
+ struct gsi *gsi = channel->gsi;
u32 val;
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- if (gsi_command(channel->gsi, GSI_CH_CMD_OFFSET, val, completion))
+ if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
return 0; /* Success! */
- dev_err(channel->gsi->dev, "GSI command %u to channel %u timed out "
- "(state is %u)\n", opcode, channel_id, channel->state);
+ dev_err(gsi->dev,
+ "GSI command %u to channel %u timed out (state is %u)\n",
+ opcode, channel_id, gsi_channel_state(channel));
return -ETIMEDOUT;
}
@@ -450,18 +448,21 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ enum gsi_channel_state state;
int ret;
/* Get initial channel state */
- channel->state = gsi_channel_state(gsi, channel_id);
-
- if (channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
return -EINVAL;
ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
- if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
- channel->state);
+ state);
ret = -EIO;
}
@@ -471,18 +472,21 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
/* Start an ALLOCATED channel */
static int gsi_channel_start_command(struct gsi_channel *channel)
{
- enum gsi_channel_state state = channel->state;
+ enum gsi_channel_state state;
int ret;
+ state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
state != GSI_CHANNEL_STATE_STOPPED)
return -EINVAL;
ret = gsi_channel_command(channel, GSI_CH_START);
- if (!ret && channel->state != GSI_CHANNEL_STATE_STARTED) {
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
dev_err(channel->gsi->dev,
- "bad channel state (%u) after start\n",
- channel->state);
+ "bad channel state (%u) after start\n", state);
ret = -EIO;
}
@@ -492,23 +496,27 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
/* Stop a GSI channel in STARTED state */
static int gsi_channel_stop_command(struct gsi_channel *channel)
{
- enum gsi_channel_state state = channel->state;
+ enum gsi_channel_state state;
int ret;
+ state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STARTED &&
state != GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EINVAL;
ret = gsi_channel_command(channel, GSI_CH_STOP);
- if (ret || channel->state == GSI_CHANNEL_STATE_STOPPED)
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (ret || state == GSI_CHANNEL_STATE_STOPPED)
return ret;
/* We may have to try again if stop is in progress */
- if (channel->state == GSI_CHANNEL_STATE_STOP_IN_PROC)
+ if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EAGAIN;
- dev_err(channel->gsi->dev, "bad channel state (%u) after stop\n",
- channel->state);
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) after stop\n", state);
return -EIO;
}
@@ -516,41 +524,49 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
/* Reset a GSI channel in ALLOCATED or ERROR state. */
static void gsi_channel_reset_command(struct gsi_channel *channel)
{
+ enum gsi_channel_state state;
int ret;
msleep(1); /* A short delay is required before a RESET command */
- if (channel->state != GSI_CHANNEL_STATE_STOPPED &&
- channel->state != GSI_CHANNEL_STATE_ERROR) {
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_STOPPED &&
+ state != GSI_CHANNEL_STATE_ERROR) {
dev_err(channel->gsi->dev,
- "bad channel state (%u) before reset\n",
- channel->state);
+ "bad channel state (%u) before reset\n", state);
return;
}
ret = gsi_channel_command(channel, GSI_CH_RESET);
- if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED)
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(channel->gsi->dev,
- "bad channel state (%u) after reset\n",
- channel->state);
+ "bad channel state (%u) after reset\n", state);
}
/* Deallocate an ALLOCATED GSI channel */
static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ enum gsi_channel_state state;
int ret;
- if (channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad channel state (%u) before dealloc\n",
- channel->state);
+ state = gsi_channel_state(channel);
+ if (state != GSI_CHANNEL_STATE_ALLOCATED) {
+ dev_err(gsi->dev,
+ "bad channel state (%u) before dealloc\n", state);
return;
}
ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
- if (!ret && channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "bad channel state (%u) after dealloc\n",
- channel->state);
+
+ /* Channel state will normally have been updated */
+ state = gsi_channel_state(channel);
+ if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ dev_err(gsi->dev,
+ "bad channel state (%u) after dealloc\n", state);
}
/* Ring an event ring doorbell, reporting the last entry processed by the AP.
@@ -756,7 +772,6 @@ static void gsi_channel_deprogram(struct gsi_channel *channel)
int gsi_channel_start(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- u32 evt_ring_id = channel->evt_ring_id;
int ret;
mutex_lock(&gsi->mutex);
@@ -765,9 +780,6 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
mutex_unlock(&gsi->mutex);
- /* Clear the channel's event ring interrupt in case it's pending */
- gsi_isr_ieob_clear(gsi, BIT(evt_ring_id));
-
gsi_channel_thaw(channel);
return ret;
@@ -777,6 +789,7 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ enum gsi_channel_state state;
u32 retries;
int ret;
@@ -786,7 +799,8 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
* STOP command timed out. We won't stop a channel if stopping it
* was successful previously (so we still want the freeze above).
*/
- if (channel->state == GSI_CHANNEL_STATE_STOPPED)
+ state = gsi_channel_state(channel);
+ if (state == GSI_CHANNEL_STATE_STOPPED)
return 0;
/* RX channels might require a little time to enter STOPPED state */
@@ -811,18 +825,18 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
}
/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable)
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
mutex_lock(&gsi->mutex);
- /* Due to a hardware quirk we need to reset RX channels twice. */
gsi_channel_reset_command(channel);
- if (!channel->toward_ipa)
+ /* Due to a hardware quirk we may need to reset RX channels twice. */
+ if (legacy && !channel->toward_ipa)
gsi_channel_reset_command(channel);
- gsi_channel_program(channel, db_enable);
+ gsi_channel_program(channel, legacy);
gsi_channel_trans_cancel_pending(channel);
mutex_unlock(&gsi->mutex);
@@ -940,7 +954,6 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
channel_mask ^= BIT(channel_id);
channel = &gsi->channel[channel_id];
- channel->state = gsi_channel_state(gsi, channel_id);
complete(&channel->completion);
}
@@ -1071,7 +1084,7 @@ static void gsi_isr_ieob(struct gsi *gsi)
u32 event_mask;
event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
- gsi_isr_ieob_clear(gsi, event_mask);
+ iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
@@ -1345,7 +1358,7 @@ static void gsi_channel_update(struct gsi_channel *channel)
* gsi_channel_poll_one() - Return a single completed transaction on a channel
* @channel: Channel to be polled
*
- * @Return: Transaction pointer, or null if none are available
+ * @Return: Transaction pointer, or null if none are available
*
* This function returns the first entry on a channel's completed transaction
* list. If that list is empty, the hardware is consulted to determine
@@ -1392,6 +1405,7 @@ static int gsi_channel_poll(struct napi_struct *napi, int budget)
while (count < budget) {
struct gsi_trans *trans;
+ count++;
trans = gsi_channel_poll_one(channel);
if (!trans)
break;
@@ -1434,7 +1448,7 @@ static void gsi_evt_ring_teardown(struct gsi *gsi)
/* Setup function for a single channel */
static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
- bool db_enable)
+ bool legacy)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
@@ -1453,7 +1467,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
if (ret)
goto err_evt_ring_de_alloc;
- gsi_channel_program(channel, db_enable);
+ gsi_channel_program(channel, legacy);
if (channel->toward_ipa)
netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
@@ -1530,7 +1544,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
}
/* Setup function for channels */
-static int gsi_channel_setup(struct gsi *gsi, bool db_enable)
+static int gsi_channel_setup(struct gsi *gsi, bool legacy)
{
u32 channel_id = 0;
u32 mask;
@@ -1542,7 +1556,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool db_enable)
mutex_lock(&gsi->mutex);
do {
- ret = gsi_channel_setup_one(gsi, channel_id, db_enable);
+ ret = gsi_channel_setup_one(gsi, channel_id, legacy);
if (ret)
goto err_unwind;
} while (++channel_id < gsi->channel_count);
@@ -1628,7 +1642,7 @@ static void gsi_channel_teardown(struct gsi *gsi)
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
-int gsi_setup(struct gsi *gsi, bool db_enable)
+int gsi_setup(struct gsi *gsi, bool legacy)
{
u32 val;
@@ -1671,7 +1685,7 @@ int gsi_setup(struct gsi *gsi, bool db_enable)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
- return gsi_channel_setup(gsi, db_enable);
+ return gsi_channel_setup(gsi, legacy);
}
/* Inverse of gsi_setup() */
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 0698ff1ae7a6..90a02194e7ad 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -113,8 +113,7 @@ struct gsi_channel {
u16 tre_count;
u16 event_count;
- struct completion completion; /* signals channel state changes */
- enum gsi_channel_state state;
+ struct completion completion; /* signals channel command completion */
struct gsi_ring tre_ring;
u32 evt_ring_id;
@@ -166,14 +165,14 @@ struct gsi {
/**
* gsi_setup() - Set up the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
- * @db_enable: Whether to use the GSI doorbell engine
+ * @legacy: Set up for legacy hardware
*
* @Return: 0 if successful, or a negative error code
*
* Performs initialization that must wait until the GSI hardware is
* ready (including firmware loaded).
*/
-int gsi_setup(struct gsi *gsi, bool db_enable);
+int gsi_setup(struct gsi *gsi, bool legacy);
/**
* gsi_teardown() - Tear down GSI subsystem
@@ -221,15 +220,15 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
* gsi_channel_reset() - Reset an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to be reset
- * @db_enable: Whether doorbell engine should be enabled
+ * @legacy: Legacy behavior
*
- * Reset a channel and reconfigure it. The @db_enable flag indicates
- * whether the doorbell engine will be enabled following reconfiguration.
+ * Reset a channel and reconfigure it. The @legacy flag indicates
+ * that some steps should be done differently for legacy hardware.
*
* GSI hardware relinquishes ownership of all pending receive buffer
* transactions and they will complete with their cancelled flag set.
*/
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable);
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy);
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 2fd21d75367d..bdbfeed359db 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -399,13 +399,14 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
/* assert(which < trans->tre_count); */
/* Set the page information for the buffer. We also need to fill in
- * the DMA address for the buffer (something dma_map_sg() normally
- * does).
+ * the DMA address and length for the buffer (something dma_map_sg()
+ * normally does).
*/
sg = &trans->sgl[which];
sg_set_buf(sg, buf, size);
sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = sg->length;
info = &trans->info[which];
info->opcode = opcode;
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 23fb29889e5a..b10a85392952 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -47,6 +47,10 @@ struct ipa_interrupt;
* @mem_offset: Offset from @mem_virt used for access to IPA memory
* @mem_size: Total size (bytes) of memory at @mem_virt
* @mem: Array of IPA-local memory region descriptors
+ * @imem_iova: I/O virtual address of IPA region in IMEM
+ * @imem_size; Size of IMEM region
+ * @smem_iova: I/O virtual address of IPA region in SMEM
+ * @smem_size; Size of SMEM region
* @zero_addr: DMA address of preallocated zero-filled memory
* @zero_virt: Virtual address of preallocated zero-filled memory
* @zero_size: Size (bytes) of preallocated zero-filled memory
@@ -88,6 +92,12 @@ struct ipa {
u32 mem_size;
const struct ipa_mem *mem;
+ unsigned long imem_iova;
+ size_t imem_size;
+
+ unsigned long smem_iova;
+ size_t smem_size;
+
dma_addr_t zero_addr;
void *zero_virt;
size_t zero_size;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 374491ea11cf..c5204fd58ac4 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -66,8 +66,8 @@ ipa_interconnect_init_one(struct device *dev, const char *name)
path = of_icc_get(dev, name);
if (IS_ERR(path))
- dev_err(dev, "error %ld getting memory interconnect\n",
- PTR_ERR(path));
+ dev_err(dev, "error %ld getting %s interconnect\n",
+ PTR_ERR(path), name);
return path;
}
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index d226b858742d..c9ab865e7290 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -103,28 +103,6 @@ struct ipa_cmd_ip_packet_init {
/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
-/* IPA_CMD_DMA_TASK_32B_ADDR */
-
-/* This opcode gets modified with a DMA operation count */
-
-#define DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK GENMASK(15, 8)
-
-struct ipa_cmd_hw_dma_task_32b_addr {
- __le16 flags;
- __le16 size;
- __le32 addr;
- __le16 packet_size;
- u8 reserved[6];
-};
-
-/* Field masks for ipa_cmd_hw_dma_task_32b_addr flags field */
-#define DMA_TASK_32B_ADDR_FLAGS_SW_RSVD_FMASK GENMASK(10, 0)
-#define DMA_TASK_32B_ADDR_FLAGS_CMPLT_FMASK GENMASK(11, 11)
-#define DMA_TASK_32B_ADDR_FLAGS_EOF_FMASK GENMASK(12, 12)
-#define DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK GENMASK(13, 13)
-#define DMA_TASK_32B_ADDR_FLAGS_LOCK_FMASK GENMASK(14, 14)
-#define DMA_TASK_32B_ADDR_FLAGS_UNLOCK_FMASK GENMASK(15, 15)
-
/* IPA_CMD_DMA_SHARED_MEM */
/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
@@ -163,7 +141,6 @@ union ipa_cmd_payload {
struct ipa_cmd_hw_hdr_init_local hdr_init_local;
struct ipa_cmd_register_write register_write;
struct ipa_cmd_ip_packet_init ip_packet_init;
- struct ipa_cmd_hw_dma_task_32b_addr dma_task_32b_addr;
struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
};
@@ -508,42 +485,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
direction, opcode);
}
-/* Use a 32-bit DMA command to zero a block of memory */
-void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
- dma_addr_t addr, bool toward_ipa)
-{
- struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- enum ipa_cmd_opcode opcode = IPA_CMD_DMA_TASK_32B_ADDR;
- struct ipa_cmd_hw_dma_task_32b_addr *payload;
- union ipa_cmd_payload *cmd_payload;
- enum dma_data_direction direction;
- dma_addr_t payload_addr;
- u16 flags;
-
- /* assert(addr <= U32_MAX); */
- addr &= GENMASK_ULL(31, 0);
-
- /* The opcode encodes the number of DMA operations in the high byte */
- opcode |= u16_encode_bits(1, DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK);
-
- direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- /* complete: 0 = don't interrupt; eof: 0 = don't assert eot */
- flags = DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK;
- /* lock: 0 = don't lock endpoint; unlock: 0 = don't unlock */
-
- cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
- payload = &cmd_payload->dma_task_32b_addr;
-
- payload->flags = cpu_to_le16(flags);
- payload->size = cpu_to_le16(size);
- payload->addr = cpu_to_le32((u32)addr);
- payload->packet_size = cpu_to_le16(size);
-
- gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
-}
-
/* Use a DMA command to read or write a block of IPA-resident memory */
void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
dma_addr_t addr, bool toward_ipa)
@@ -628,23 +569,15 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
void ipa_cmd_tag_process_add(struct gsi_trans *trans)
{
- ipa_cmd_register_write_add(trans, 0, 0, 0, true);
-#if 1
- /* Reference these functions to avoid a compile error */
- (void)ipa_cmd_ip_packet_init_add;
- (void)ipa_cmd_ip_tag_status_add;
- (void) ipa_cmd_transfer_add;
-#else
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- struct gsi_endpoint *endpoint;
+ struct ipa_endpoint *endpoint;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
- ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
+ ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+ ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
-
ipa_cmd_transfer_add(trans, 4);
-#endif
}
/* Returns the number of commands required for the tag process */
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 4917525b3a47..e440aa69c8b5 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -35,7 +35,6 @@ enum ipa_cmd_opcode {
IPA_CMD_HDR_INIT_LOCAL = 9,
IPA_CMD_REGISTER_WRITE = 12,
IPA_CMD_IP_PACKET_INIT = 16,
- IPA_CMD_DMA_TASK_32B_ADDR = 17,
IPA_CMD_DMA_SHARED_MEM = 19,
IPA_CMD_IP_PACKET_TAG_STATUS = 20,
};
@@ -148,16 +147,6 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
u32 mask, bool clear_full);
/**
- * ipa_cmd_dma_task_32b_addr_add() - Add a 32-bit DMA command to a transaction
- * @trans: GSi transaction
- * @size: Number of bytes to be memory to be transferred
- * @addr: DMA address of buffer to be read into or written from
- * @toward_ipa: true means write to IPA memory; false means read
- */
-void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
- dma_addr_t addr, bool toward_ipa);
-
-/**
* ipa_cmd_dma_shared_mem_add() - Add a DMA memory command to a transaction
* @trans: GSI transaction
* @offset: Offset of IPA memory to be read or written
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index 042b5fc3c135..43faa35ae726 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -193,7 +193,7 @@ static const struct ipa_resource_data ipa_resource_data = {
};
/* IPA-resident memory region configuration for the SC7180 SoC. */
-static const struct ipa_mem ipa_mem_data[] = {
+static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
.offset = 0x0000,
.size = 0x0080,
@@ -296,12 +296,20 @@ static const struct ipa_mem ipa_mem_data[] = {
},
};
+static struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146a8000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00002000,
+};
+
/* Configuration data for the SC7180 SoC. */
const struct ipa_data ipa_data_sc7180 = {
.version = IPA_VERSION_4_2,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
- .mem_count = ARRAY_SIZE(ipa_mem_data),
- .mem_data = ipa_mem_data,
+ .mem_data = &ipa_mem_data,
};
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index 0d9c36e1e806..52d4b84e0dac 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -74,7 +74,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.tx = {
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
- .delay = true,
},
},
},
@@ -235,7 +234,7 @@ static const struct ipa_resource_data ipa_resource_data = {
};
/* IPA-resident memory region configuration for the SDM845 SoC. */
-static const struct ipa_mem ipa_mem_data[] = {
+static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
.offset = 0x0000,
.size = 0x0080,
@@ -318,12 +317,20 @@ static const struct ipa_mem ipa_mem_data[] = {
},
};
+static struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146bd000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00002000,
+};
+
/* Configuration data for the SDM845 SoC. */
const struct ipa_data ipa_data_sdm845 = {
.version = IPA_VERSION_3_5_1,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
- .mem_count = ARRAY_SIZE(ipa_mem_data),
- .mem_data = ipa_mem_data,
+ .mem_data = &ipa_mem_data,
};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 7110de2de817..7fc1058a5ca9 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -80,18 +80,12 @@ struct gsi_channel_data {
/**
* struct ipa_endpoint_tx_data - configuration data for TX endpoints
* @status_endpoint: endpoint to which status elements are sent
- * @delay: whether endpoint starts in delay mode
- *
- * Delay mode prevents a TX endpoint from transmitting anything, even if
- * commands have been presented to the hardware. Once the endpoint exits
- * delay mode, queued transfer commands are sent.
*
* The @status_endpoint is only valid if the endpoint's @status_enable
* flag is set.
*/
struct ipa_endpoint_tx_data {
enum ipa_endpoint_name status_endpoint;
- bool delay;
};
/**
@@ -245,15 +239,21 @@ struct ipa_resource_data {
};
/**
- * struct ipa_mem - IPA-local memory region description
- * @offset: offset in IPA memory space to base of the region
- * @size: size in bytes base of the region
- * @canary_count: number of 32-bit "canary" values that precede region
+ * struct ipa_mem - description of IPA memory regions
+ * @local_count: number of regions defined in the local[] array
+ * @local: array of IPA-local memory region descriptors
+ * @imem_addr: physical address of IPA region within IMEM
+ * @imem_size: size in bytes of IPA IMEM region
+ * @smem_id: item identifier for IPA region within SMEM memory
+ * @imem_size: size in bytes of the IPA SMEM region
*/
struct ipa_mem_data {
- u32 offset;
- u16 size;
- u16 canary_count;
+ u32 local_count;
+ const struct ipa_mem *local;
+ u32 imem_addr;
+ u32 imem_size;
+ u32 smem_id;
+ u32 smem_size;
};
/**
@@ -270,8 +270,7 @@ struct ipa_data {
u32 endpoint_count; /* # entries in endpoint_data[] */
const struct ipa_gsi_endpoint_data *endpoint_data;
const struct ipa_resource_data *resource_data;
- u32 mem_count; /* # entries in mem_data[] */
- const struct ipa_mem *mem_data;
+ const struct ipa_mem_data *mem_data;
};
extern const struct ipa_data ipa_data_sdm845;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index a21534f1462f..66649a806dd1 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -32,14 +32,9 @@
/* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
-#define IPA_ENDPOINT_STOP_RX_RETRIES 10
-#define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */
-
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
-#define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */
-
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
IPA_STATUS_OPCODE_PACKET = 0x01,
@@ -284,25 +279,52 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
/* suspend_delay represents suspend for RX, delay for TX endpoints.
* Note that suspend is not supported starting with IPA v4.0.
*/
-static int
+static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ bool state;
u32 mask;
u32 val;
- /* assert(ipa->version == IPA_VERSION_3_5_1 */
+ /* Suspend is not supported for IPA v4.0+. Delay doesn't work
+ * correctly on IPA v4.2.
+ *
+ * if (endpoint->toward_ipa)
+ * assert(ipa->version != IPA_VERSION_4.2);
+ * else
+ * assert(ipa->version == IPA_VERSION_3_5_1);
+ */
mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
val = ioread32(ipa->reg_virt + offset);
- if (suspend_delay == !!(val & mask))
- return -EALREADY; /* Already set to desired state */
+ /* Don't bother if it's already in the requested state */
+ state = !!(val & mask);
+ if (suspend_delay != state) {
+ val ^= mask;
+ iowrite32(val, ipa->reg_virt + offset);
+ }
- val ^= mask;
- iowrite32(val, ipa->reg_virt + offset);
+ return state;
+}
- return 0;
+/* We currently don't care what the previous state was for delay mode */
+static void
+ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
+{
+ /* assert(endpoint->toward_ipa); */
+
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
+}
+
+/* Returns previous suspend state (true means it was enabled) */
+static bool
+ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
+{
+ /* assert(!endpoint->toward_ipa); */
+
+ return ipa_endpoint_init_ctrl(endpoint, enable);
}
/* Enable or disable delay or suspend mode on all modem endpoints */
@@ -311,7 +333,7 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
bool support_suspend;
u32 endpoint_id;
- /* DELAY mode doesn't work right on IPA v4.2 */
+ /* DELAY mode doesn't work correctly on IPA v4.2 */
if (ipa->version == IPA_VERSION_4_2)
return;
@@ -325,8 +347,10 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
continue;
/* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
- if (endpoint->toward_ipa || support_suspend)
- (void)ipa_endpoint_init_ctrl(endpoint, enable);
+ if (endpoint->toward_ipa)
+ ipa_endpoint_program_delay(endpoint, enable);
+ else if (support_suspend)
+ (void)ipa_endpoint_program_suspend(endpoint, enable);
}
}
@@ -340,7 +364,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
/* We need one command per modem TX endpoint. We can get an upper
* bound on that by assuming all initialized endpoints are modem->IPA.
* That won't happen, and we could be more precise, but this is fine
- * for now. We need to end the transactio with a "tag process."
+ * for now. We need to end the transaction with a "tag process."
*/
count = hweight32(initialized) + ipa_cmd_tag_process_count();
trans = ipa_cmd_trans_alloc(ipa, count);
@@ -1133,10 +1157,10 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{
struct device *dev = &endpoint->ipa->pdev->dev;
struct ipa *ipa = endpoint->ipa;
- bool endpoint_suspended = false;
struct gsi *gsi = &ipa->gsi;
+ bool suspended = false;
dma_addr_t addr;
- bool db_enable;
+ bool legacy;
u32 retries;
u32 len = 1;
void *virt;
@@ -1164,8 +1188,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
/* Make sure the channel isn't suspended */
if (endpoint->ipa->version == IPA_VERSION_3_5_1)
- if (!ipa_endpoint_init_ctrl(endpoint, false))
- endpoint_suspended = true;
+ suspended = ipa_endpoint_program_suspend(endpoint, false);
/* Start channel and do a 1 byte read */
ret = gsi_channel_start(gsi, endpoint->channel_id);
@@ -1191,7 +1214,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
gsi_trans_read_byte_done(gsi, endpoint->channel_id);
- ret = ipa_endpoint_stop(endpoint);
+ ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
goto out_suspend_again;
@@ -1200,18 +1223,18 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
* complete the channel reset sequence. Finish by suspending the
* channel again (if necessary).
*/
- db_enable = ipa->version == IPA_VERSION_3_5_1;
- gsi_channel_reset(gsi, endpoint->channel_id, db_enable);
+ legacy = ipa->version == IPA_VERSION_3_5_1;
+ gsi_channel_reset(gsi, endpoint->channel_id, legacy);
msleep(1);
goto out_suspend_again;
err_endpoint_stop:
- ipa_endpoint_stop(endpoint);
+ (void)gsi_channel_stop(gsi, endpoint->channel_id);
out_suspend_again:
- if (endpoint_suspended)
- (void)ipa_endpoint_init_ctrl(endpoint, true);
+ if (suspended)
+ (void)ipa_endpoint_program_suspend(endpoint, true);
dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
out_kfree:
kfree(virt);
@@ -1223,8 +1246,8 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
{
u32 channel_id = endpoint->channel_id;
struct ipa *ipa = endpoint->ipa;
- bool db_enable;
bool special;
+ bool legacy;
int ret = 0;
/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
@@ -1233,12 +1256,12 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
*
* IPA v3.5.1 enables the doorbell engine. Newer versions do not.
*/
- db_enable = ipa->version == IPA_VERSION_3_5_1;
+ legacy = ipa->version == IPA_VERSION_3_5_1;
special = !endpoint->toward_ipa && endpoint->data->aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
- gsi_channel_reset(&ipa->gsi, channel_id, db_enable);
+ gsi_channel_reset(&ipa->gsi, channel_id, legacy);
if (ret)
dev_err(&ipa->pdev->dev,
@@ -1246,94 +1269,18 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
ret, endpoint->channel_id, endpoint->endpoint_id);
}
-static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
-{
- u16 size = IPA_ENDPOINT_STOP_RX_SIZE;
- struct gsi_trans *trans;
- dma_addr_t addr;
- int ret;
-
- trans = ipa_cmd_trans_alloc(ipa, 1);
- if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for RX endpoint STOP workaround\n");
- return -EBUSY;
- }
-
- /* Read into the highest part of the zero memory area */
- addr = ipa->zero_addr + ipa->zero_size - size;
-
- ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false);
-
- ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT);
- if (ret)
- gsi_trans_free(trans);
-
- return ret;
-}
-
-/**
- * ipa_endpoint_stop() - Stops a GSI channel in IPA
- * @client: Client whose endpoint should be stopped
- *
- * This function implements the sequence to stop a GSI channel
- * in IPA. This function returns when the channel is is STOP state.
- *
- * Return value: 0 on success, negative otherwise
- */
-int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
-{
- u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES;
- int ret;
-
- do {
- struct ipa *ipa = endpoint->ipa;
- struct gsi *gsi = &ipa->gsi;
-
- ret = gsi_channel_stop(gsi, endpoint->channel_id);
- if (ret != -EAGAIN || endpoint->toward_ipa)
- break;
-
- /* For IPA v3.5.1, send a DMA read task and check again */
- if (ipa->version == IPA_VERSION_3_5_1) {
- ret = ipa_endpoint_stop_rx_dma(ipa);
- if (ret)
- break;
- }
-
- msleep(1);
- } while (retries--);
-
- return retries ? ret : -EIO;
-}
-
static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
- int ret;
-
if (endpoint->toward_ipa) {
- bool delay_mode = endpoint->data->tx.delay;
-
- ret = ipa_endpoint_init_ctrl(endpoint, delay_mode);
- /* Endpoint is expected to not be in delay mode */
- if (!ret != delay_mode) {
- dev_warn(dev,
- "TX endpoint %u was %sin delay mode\n",
- endpoint->endpoint_id,
- delay_mode ? "already " : "");
- }
+ if (endpoint->ipa->version != IPA_VERSION_4_2)
+ ipa_endpoint_program_delay(endpoint, false);
ipa_endpoint_init_hdr_ext(endpoint);
ipa_endpoint_init_aggr(endpoint);
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_seq(endpoint);
} else {
- if (endpoint->ipa->version == IPA_VERSION_3_5_1) {
- if (!ipa_endpoint_init_ctrl(endpoint, false))
- dev_warn(dev,
- "RX endpoint %u was suspended\n",
- endpoint->endpoint_id);
- }
+ if (endpoint->ipa->version == IPA_VERSION_3_5_1)
+ (void)ipa_endpoint_program_suspend(endpoint, false);
ipa_endpoint_init_hdr_ext(endpoint);
ipa_endpoint_init_aggr(endpoint);
}
@@ -1374,12 +1321,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
int ret;
- if (!(endpoint->ipa->enabled & mask))
+ if (!(ipa->enabled & mask))
return;
- endpoint->ipa->enabled ^= mask;
+ ipa->enabled ^= mask;
if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint);
@@ -1388,7 +1336,7 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
}
/* Note that if stop fails, the channel's state is not well-defined */
- ret = ipa_endpoint_stop(endpoint);
+ ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
dev_err(&ipa->pdev->dev,
"error %d attempting to stop endpoint %u\n", ret,
@@ -1445,7 +1393,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
* aggregation frame, then simulating the arrival of such
* an interrupt.
*/
- WARN_ON(ipa_endpoint_init_ctrl(endpoint, true));
+ (void)ipa_endpoint_program_suspend(endpoint, true);
ipa_endpoint_suspend_aggr(endpoint);
}
@@ -1468,7 +1416,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
/* IPA v3.5.1 doesn't use channel start for resume */
start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
if (!endpoint->toward_ipa && !start_channel)
- WARN_ON(ipa_endpoint_init_ctrl(endpoint, false));
+ (void)ipa_endpoint_program_suspend(endpoint, false);
ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
if (ret)
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 4b336a1f759d..58a245de488e 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -41,7 +41,6 @@ enum ipa_endpoint_name {
/**
* struct ipa_endpoint - IPA endpoint information
- * @client: Client associated with the endpoint
* @channel_id: EP's GSI channel
* @evt_ring_id: EP's GSI channel event ring
*/
@@ -76,8 +75,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
-int ipa_endpoint_stop(struct ipa_endpoint *endpoint);
-
void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint);
int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 28998dcce3d2..76d5108b8403 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -108,7 +108,7 @@ int ipa_setup(struct ipa *ipa)
struct ipa_endpoint *command_endpoint;
int ret;
- /* IPA v4.0 and above don't use the doorbell engine. */
+ /* Setup for IPA v3.5.1 has some slight differences */
ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1);
if (ret)
return ret;
@@ -778,7 +778,7 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_kfree_ipa;
- ret = ipa_mem_init(ipa, data->mem_count, data->mem_data);
+ ret = ipa_mem_init(ipa, data->mem_data);
if (ret)
goto err_reg_exit;
@@ -933,8 +933,8 @@ static int ipa_resume(struct device *dev)
}
static const struct dev_pm_ops ipa_pm_ops = {
- .suspend_noirq = ipa_suspend,
- .resume_noirq = ipa_resume,
+ .suspend = ipa_suspend,
+ .resume = ipa_resume,
};
static struct platform_driver ipa_driver = {
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 42d2c29d9f0c..3ef814119aab 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -8,19 +8,24 @@
#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
#include <linux/io.h>
+#include <linux/soc/qcom/smem.h>
#include "ipa.h"
#include "ipa_reg.h"
+#include "ipa_data.h"
#include "ipa_cmd.h"
#include "ipa_mem.h"
-#include "ipa_data.h"
#include "ipa_table.h"
#include "gsi_trans.h"
/* "Canary" value placed between memory regions to detect overflow */
#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
+/* SMEM host id representing the modem. */
+#define QCOM_SMEM_HOST_MODEM 1
+
/* Add an immediate command to a transaction that zeroes a memory region */
static void
ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
@@ -265,16 +270,194 @@ int ipa_mem_zero_modem(struct ipa *ipa)
return 0;
}
+/**
+ * ipa_imem_init() - Initialize IMEM memory used by the IPA
+ * @ipa: IPA pointer
+ * @addr: Physical address of the IPA region in IMEM
+ * @size: Size (bytes) of the IPA region in IMEM
+ *
+ * IMEM is a block of shared memory separate from system DRAM, and
+ * a portion of this memory is available for the IPA to use. The
+ * modem accesses this memory directly, but the IPA accesses it
+ * via the IOMMU, using the AP's credentials.
+ *
+ * If this region exists (size > 0) we map it for read/write access
+ * through the IOMMU using the IPA device.
+ *
+ * Note: @addr and @size are not guaranteed to be page-aligned.
+ */
+static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct iommu_domain *domain;
+ unsigned long iova;
+ phys_addr_t phys;
+ int ret;
+
+ if (!size)
+ return 0; /* IMEM memory not used */
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain) {
+ dev_err(dev, "no IOMMU domain found for IMEM\n");
+ return -EINVAL;
+ }
+
+ /* Align the address down and the size up to page boundaries */
+ phys = addr & PAGE_MASK;
+ size = PAGE_ALIGN(size + addr - phys);
+ iova = phys; /* We just want a direct mapping */
+
+ ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ return ret;
+
+ ipa->imem_iova = iova;
+ ipa->imem_size = size;
+
+ return 0;
+}
+
+static void ipa_imem_exit(struct ipa *ipa)
+{
+ struct iommu_domain *domain;
+ struct device *dev;
+
+ if (!ipa->imem_size)
+ return;
+
+ dev = &ipa->pdev->dev;
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain) {
+ size_t size;
+
+ size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
+ if (size != ipa->imem_size)
+ dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
+ size, ipa->imem_size);
+ } else {
+ dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
+ }
+
+ ipa->imem_size = 0;
+ ipa->imem_iova = 0;
+}
+
+/**
+ * ipa_smem_init() - Initialize SMEM memory used by the IPA
+ * @ipa: IPA pointer
+ * @item: Item ID of SMEM memory
+ * @size: Size (bytes) of SMEM memory region
+ *
+ * SMEM is a managed block of shared DRAM, from which numbered "items"
+ * can be allocated. One item is designated for use by the IPA.
+ *
+ * The modem accesses SMEM memory directly, but the IPA accesses it
+ * via the IOMMU, using the AP's credentials.
+ *
+ * If size provided is non-zero, we allocate it and map it for
+ * access through the IOMMU.
+ *
+ * Note: @size and the item address are is not guaranteed to be page-aligned.
+ */
+static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct iommu_domain *domain;
+ unsigned long iova;
+ phys_addr_t phys;
+ phys_addr_t addr;
+ size_t actual;
+ void *virt;
+ int ret;
+
+ if (!size)
+ return 0; /* SMEM memory not used */
+
+ /* SMEM is memory shared between the AP and another system entity
+ * (in this case, the modem). An allocation from SMEM is persistent
+ * until the AP reboots; there is no way to free an allocated SMEM
+ * region. Allocation only reserves the space; to use it you need
+ * to "get" a pointer it (this implies no reference counting).
+ * The item might have already been allocated, in which case we
+ * use it unless the size isn't what we expect.
+ */
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
+ if (ret && ret != -EEXIST) {
+ dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
+ ret, size, item);
+ return ret;
+ }
+
+ /* Now get the address of the SMEM memory region */
+ virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
+ if (IS_ERR(virt)) {
+ ret = PTR_ERR(virt);
+ dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
+ return ret;
+ }
+
+ /* In case the region was already allocated, verify the size */
+ if (ret && actual != size) {
+ dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
+ item, actual, size);
+ return -EINVAL;
+ }
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain) {
+ dev_err(dev, "no IOMMU domain found for SMEM\n");
+ return -EINVAL;
+ }
+
+ /* Align the address down and the size up to a page boundary */
+ addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ phys = addr & PAGE_MASK;
+ size = PAGE_ALIGN(size + addr - phys);
+ iova = phys; /* We just want a direct mapping */
+
+ ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ return ret;
+
+ ipa->smem_iova = iova;
+ ipa->smem_size = size;
+
+ return 0;
+}
+
+static void ipa_smem_exit(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain) {
+ size_t size;
+
+ size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
+ if (size != ipa->smem_size)
+ dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
+ size, ipa->smem_size);
+
+ } else {
+ dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
+ }
+
+ ipa->smem_size = 0;
+ ipa->smem_iova = 0;
+}
+
/* Perform memory region-related initialization */
-int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem)
+int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
struct device *dev = &ipa->pdev->dev;
struct resource *res;
int ret;
- if (count > IPA_MEM_COUNT) {
+ if (mem_data->local_count > IPA_MEM_COUNT) {
dev_err(dev, "to many memory regions (%u > %u)\n",
- count, IPA_MEM_COUNT);
+ mem_data->local_count, IPA_MEM_COUNT);
return -EINVAL;
}
@@ -302,13 +485,30 @@ int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem)
ipa->mem_size = resource_size(res);
/* The ipa->mem[] array is indexed by enum ipa_mem_id values */
- ipa->mem = mem;
+ ipa->mem = mem_data->local;
+
+ ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
+ if (ret)
+ goto err_unmap;
+
+ ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
+ if (ret)
+ goto err_imem_exit;
return 0;
+
+err_imem_exit:
+ ipa_imem_exit(ipa);
+err_unmap:
+ memunmap(ipa->mem_virt);
+
+ return ret;
}
/* Inverse of ipa_mem_init() */
void ipa_mem_exit(struct ipa *ipa)
{
+ ipa_smem_exit(ipa);
+ ipa_imem_exit(ipa);
memunmap(ipa->mem_virt);
}
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 065cb499ebe5..f99180f84f0d 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -7,6 +7,7 @@
#define _IPA_MEM_H_
struct ipa;
+struct ipa_mem_data;
/**
* DOC: IPA Local Memory
@@ -84,7 +85,7 @@ void ipa_mem_teardown(struct ipa *ipa);
int ipa_mem_zero_modem(struct ipa *ipa);
-int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem);
+int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data);
void ipa_mem_exit(struct ipa *ipa);
#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 4d33aa7ebfbb..a5f7a79a1923 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -53,7 +53,7 @@
* @clock_on: Whether IPA clock is on
* @notified: Whether modem has been notified of clock state
* @disabled: Whether setup ready interrupt handling is disabled
- * @mutex mutex: Motex protecting ready interrupt/shutdown interlock
+ * @mutex: Mutex protecting ready-interrupt/shutdown interlock
* @panic_notifier: Panic notifier structure
*/
struct ipa_smp2p {
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f195f278a83a..15e87c097b0b 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -131,6 +131,8 @@ static int ipvlan_init(struct net_device *dev)
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
+ netdev_lockdep_set_classes(dev);
+
ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
return -ENOMEM;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d0d31cb99180..20b53e255f68 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -4049,6 +4049,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
return err;
+ netdev_lockdep_set_classes(dev);
+
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
goto unregister;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0482adc9916b..563aed5b3d9f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -123,7 +123,8 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
struct macvlan_dev *vlan;
u32 idx = macvlan_eth_hash(addr);
- hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist) {
+ hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist,
+ lockdep_rtnl_is_held()) {
if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
return vlan;
}
@@ -447,6 +448,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
int ret;
rx_handler_result_t handle_res;
+ /* Packets from dev_loopback_xmit() do not have L2 header, bail out */
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+ return RX_HANDLER_PASS;
+
port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
unsigned int hash;
@@ -541,12 +546,11 @@ xmit_world:
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
- if (vlan->netpoll)
- netpoll_send_skb(vlan->netpoll, skb);
+ return netpoll_send_skb(vlan->netpoll, skb);
#else
BUG();
-#endif
return NETDEV_TX_OK;
+#endif
}
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
@@ -889,6 +893,8 @@ static int macvlan_init(struct net_device *dev)
dev->gso_max_segs = lowerdev->gso_max_segs;
dev->hard_header_len = lowerdev->hard_header_len;
+ netdev_lockdep_set_classes(dev);
+
vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index b16a1221d19b..fb182bec8f06 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -61,7 +61,8 @@ static int net_failover_open(struct net_device *dev)
return 0;
err_standby_open:
- dev_close(primary_dev);
+ if (primary_dev)
+ dev_close(primary_dev);
err_primary_open:
netif_tx_disable(dev);
return err;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 68668a22b9dd..ec6b6f7818ac 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -431,6 +431,10 @@ enum {
DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
+#define NSIM_TRAP_CONTROL(_id, _group_id, _action) \
+ DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ NSIM_TRAP_METADATA)
#define NSIM_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, NSIM_TRAP_ID_##_id, \
NSIM_TRAP_NAME_##_id, \
@@ -458,8 +462,10 @@ static const struct devlink_trap_policer nsim_trap_policers_arr[] = {
static const struct devlink_trap_group nsim_trap_groups_arr[] = {
DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 1),
DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 2),
DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 3),
+ DEVLINK_TRAP_GROUP_GENERIC(MC_SNOOPING, 3),
};
static const struct devlink_trap nsim_traps_arr[] = {
@@ -471,12 +477,14 @@ static const struct devlink_trap nsim_traps_arr[] = {
NSIM_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
NSIM_TRAP_DRIVER_EXCEPTION(FID_MISS, L2_DROPS),
NSIM_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
- NSIM_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
+ NSIM_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS),
NSIM_TRAP_DROP(TAIL_DROP, BUFFER_DROPS),
NSIM_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP, ACL_DROPS,
DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
NSIM_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP, ACL_DROPS,
DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ NSIM_TRAP_CONTROL(IGMP_QUERY, MC_SNOOPING, MIRROR),
+ NSIM_TRAP_CONTROL(IGMP_V1_REPORT, MC_SNOOPING, TRAP),
};
#define NSIM_TRAP_L4_DATA_LEN 100
@@ -858,8 +866,7 @@ nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
return -EINVAL;
cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1];
- *p_drops = *cnt;
- *cnt += jiffies % 64;
+ *p_drops = (*cnt)++;
return 0;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3fa33d27eeba..047c27087b10 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -157,6 +157,13 @@ config MDIO_I2C
This is library mode.
+config MDIO_IPQ4019
+ tristate "Qualcomm IPQ4019 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This driver supports the MDIO interface found in Qualcomm
+ IPQ40xx series Soc-s.
+
config MDIO_IPQ8064
tristate "Qualcomm IPQ8064 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
@@ -177,7 +184,8 @@ config MDIO_MSCC_MIIM
depends on HAS_IOMEM
help
This driver supports the MIIM (MDIO) interface found in the network
- switches of the Microsemi SoCs
+ switches of the Microsemi SoCs; it is recommended to switch on
+ CONFIG_HIGH_RES_TIMERS
config MDIO_MVUSB
tristate "Marvell USB to MDIO Adapter"
@@ -346,6 +354,17 @@ config BROADCOM_PHY
Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
BCM5481, BCM54810 and BCM5482 PHYs.
+config BCM54140_PHY
+ tristate "Broadcom BCM54140 PHY"
+ depends on PHYLIB
+ depends on HWMON || HWMON=n
+ select BCM_NET_PHYLIB
+ help
+ Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
+
+ This driver also supports the hardware monitoring of this PHY and
+ exposes voltage and temperature sensors.
+
config BCM84881_PHY
tristate "Broadcom BCM84881 PHY"
depends on PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 2f5c7093a65b..dc9e53b511d6 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
+obj-$(CONFIG_MDIO_IPQ4019) += mdio-ipq4019.o
obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
@@ -68,6 +69,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+obj-$(CONFIG_BCM54140_PHY) += bcm54140.o
obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 31f731e6df72..97cbe593f0ea 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
#include <linux/of_gpio.h>
#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
@@ -43,6 +44,19 @@
#define AT803X_INTR_STATUS 0x13
#define AT803X_SMART_SPEED 0x14
+#define AT803X_SMART_SPEED_ENABLE BIT(5)
+#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
+#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
+#define AT803X_CDT 0x16
+#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
+#define AT803X_CDT_ENABLE_TEST BIT(0)
+#define AT803X_CDT_STATUS 0x1c
+#define AT803X_CDT_STATUS_STAT_NORMAL 0
+#define AT803X_CDT_STATUS_STAT_SHORT 1
+#define AT803X_CDT_STATUS_STAT_OPEN 2
+#define AT803X_CDT_STATUS_STAT_FAIL 3
+#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
+#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
#define AT803X_LED_CONTROL 0x18
#define AT803X_DEVICE_ADDR 0x03
@@ -103,11 +117,16 @@
#define AT803X_CLK_OUT_STRENGTH_HALF 1
#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
+#define AT803X_DEFAULT_DOWNSHIFT 5
+#define AT803X_MIN_DOWNSHIFT 2
+#define AT803X_MAX_DOWNSHIFT 9
+
#define ATH9331_PHY_ID 0x004dd041
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
+#define ATH8032_PHY_ID 0x004dd023
#define ATH8035_PHY_ID 0x004dd072
-#define AT803X_PHY_ID_MASK 0xffffffef
+#define AT8030_PHY_ID_MASK 0xffffffef
MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
@@ -712,15 +731,257 @@ static int at803x_read_status(struct phy_device *phydev)
return 0;
}
+static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+{
+ int val;
+
+ val = phy_read(phydev, AT803X_SMART_SPEED);
+ if (val < 0)
+ return val;
+
+ if (val & AT803X_SMART_SPEED_ENABLE)
+ *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
+ else
+ *d = DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, set;
+ int ret;
+
+ switch (cnt) {
+ case DOWNSHIFT_DEV_DEFAULT_COUNT:
+ cnt = AT803X_DEFAULT_DOWNSHIFT;
+ fallthrough;
+ case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
+ set = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER |
+ FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
+ mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
+ break;
+ case DOWNSHIFT_DEV_DISABLE:
+ set = 0;
+ mask = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
+
+ /* After changing the smart speed settings, we need to perform a
+ * software reset, use phy_init_hw() to make sure we set the
+ * reapply any values which might got lost during software reset.
+ */
+ if (ret == 1)
+ ret = phy_init_hw(phydev);
+
+ return ret;
+}
+
+static int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int at803x_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case AT803X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool at803x_cdt_test_failed(u16 status)
+{
+ return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
+ AT803X_CDT_STATUS_STAT_FAIL;
+}
+
+static bool at803x_cdt_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int at803x_cdt_fault_length(u16 status)
+{
+ int dt;
+
+ /* According to the datasheet the distance to the fault is
+ * DELTA_TIME * 0.824 meters.
+ *
+ * The author suspect the correct formula is:
+ *
+ * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
+ *
+ * where c is the speed of light, VF is the velocity factor of
+ * the twisted pair cable, 125MHz the counter frequency and
+ * we need to divide by 2 because the hardware will measure the
+ * round trip time to the fault and back to the PHY.
+ *
+ * With a VF of 0.69 we get the factor 0.824 mentioned in the
+ * datasheet.
+ */
+ dt = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, status);
+
+ return (dt * 824) / 10;
+}
+
+static int at803x_cdt_start(struct phy_device *phydev, int pair)
+{
+ u16 cdt;
+
+ cdt = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+ AT803X_CDT_ENABLE_TEST;
+
+ return phy_write(phydev, AT803X_CDT, cdt);
+}
+
+static int at803x_cdt_wait_for_completion(struct phy_device *phydev)
+{
+ int val, ret;
+
+ /* One test run takes about 25ms */
+ ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
+ !(val & AT803X_CDT_ENABLE_TEST),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+ int ret, val;
+
+ ret = at803x_cdt_start(phydev, pair);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, AT803X_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ if (at803x_cdt_test_failed(val))
+ return 0;
+
+ ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ at803x_cable_test_result_trans(val));
+
+ if (at803x_cdt_fault_length_valid(val))
+ ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+ at803x_cdt_fault_length(val));
+
+ return 1;
+}
+
+static int at803x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ unsigned long pair_mask;
+ int retries = 20;
+ int pair, ret;
+
+ if (phydev->phy_id == ATH9331_PHY_ID ||
+ phydev->phy_id == ATH8032_PHY_ID)
+ pair_mask = 0x3;
+ else
+ pair_mask = 0xf;
+
+ *finished = false;
+
+ /* According to the datasheet the CDT can be performed when
+ * there is no link partner or when the link partner is
+ * auto-negotiating. Starting the test will restart the AN
+ * automatically. It seems that doing this repeatedly we will
+ * get a slot where our link partner won't disturb our
+ * measurement.
+ */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = at803x_cable_test_one_pair(phydev, pair);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ clear_bit(pair, &pair_mask);
+ }
+ if (pair_mask)
+ msleep(250);
+ }
+
+ *finished = true;
+
+ return 0;
+}
+
+static int at803x_cable_test_start(struct phy_device *phydev)
+{
+ /* Enable auto-negotiation, but advertise no capabilities, no link
+ * will be established. A restart of the auto-negotiation is not
+ * required, because the cable test will automatically break the link.
+ */
+ phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+ phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+ if (phydev->phy_id != ATH9331_PHY_ID &&
+ phydev->phy_id != ATH8032_PHY_ID)
+ phy_write(phydev, MII_CTRL1000, 0);
+
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* Qualcomm Atheros AR8035 */
- .phy_id = ATH8035_PHY_ID,
+ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
.name = "Qualcomm Atheros AR8035",
- .phy_id_mask = AT803X_PHY_ID_MASK,
+ .flags = PHY_POLL_CABLE_TEST,
.probe = at803x_probe,
.remove = at803x_remove,
.config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
@@ -729,11 +990,15 @@ static struct phy_driver at803x_driver[] = {
.read_status = at803x_read_status,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
}, {
/* Qualcomm Atheros AR8030 */
.phy_id = ATH8030_PHY_ID,
.name = "Qualcomm Atheros AR8030",
- .phy_id_mask = AT803X_PHY_ID_MASK,
+ .phy_id_mask = AT8030_PHY_ID_MASK,
.probe = at803x_probe,
.remove = at803x_remove,
.config_init = at803x_config_init,
@@ -747,12 +1012,13 @@ static struct phy_driver at803x_driver[] = {
.config_intr = at803x_config_intr,
}, {
/* Qualcomm Atheros AR8031/AR8033 */
- .phy_id = ATH8031_PHY_ID,
+ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
.name = "Qualcomm Atheros AR8031/AR8033",
- .phy_id_mask = AT803X_PHY_ID_MASK,
+ .flags = PHY_POLL_CABLE_TEST,
.probe = at803x_probe,
.remove = at803x_remove,
.config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
@@ -762,23 +1028,49 @@ static struct phy_driver at803x_driver[] = {
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8032 */
+ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+ .name = "Qualcomm Atheros AR8032",
+ .probe = at803x_probe,
+ .remove = at803x_remove,
+ .flags = PHY_POLL_CABLE_TEST,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .ack_interrupt = at803x_ack_interrupt,
+ .config_intr = at803x_config_intr,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
}, {
/* ATHEROS AR9331 */
PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
.name = "Qualcomm Atheros AR9331 built-in PHY",
.suspend = at803x_suspend,
.resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
/* PHY_BASIC_FEATURES */
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
} };
module_phy_driver(at803x_driver);
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
- { ATH8030_PHY_ID, AT803X_PHY_ID_MASK },
- { ATH8031_PHY_ID, AT803X_PHY_ID_MASK },
- { ATH8035_PHY_ID, AT803X_PHY_ID_MASK },
+ { ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index e77b274a09fd..ef6825b30323 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -4,45 +4,103 @@
*/
#include "bcm-phy-lib.h"
+#include <linux/bitfield.h>
#include <linux/brcmphy.h>
#include <linux/export.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#define MII_BCM_CHANNEL_WIDTH 0x2000
#define BCM_CL45VEN_EEE_ADV 0x3c
-int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
+int __bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
{
int rc;
- rc = phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+ rc = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
if (rc < 0)
return rc;
- return phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
+ return __phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_write_exp);
+
+int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
+{
+ int rc;
+
+ phy_lock_mdio_bus(phydev);
+ rc = __bcm_phy_write_exp(phydev, reg, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(bcm_phy_write_exp);
-int bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
+int __bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
{
int val;
- val = phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+ val = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
if (val < 0)
return val;
- val = phy_read(phydev, MII_BCM54XX_EXP_DATA);
+ val = __phy_read(phydev, MII_BCM54XX_EXP_DATA);
/* Restore default value. It's O.K. if this write fails. */
- phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
+ __phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
return val;
}
+EXPORT_SYMBOL_GPL(__bcm_phy_read_exp);
+
+int bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
+{
+ int rc;
+
+ phy_lock_mdio_bus(phydev);
+ rc = __bcm_phy_read_exp(phydev, reg);
+ phy_unlock_mdio_bus(phydev);
+
+ return rc;
+}
EXPORT_SYMBOL_GPL(bcm_phy_read_exp);
+int __bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set)
+{
+ int new, ret;
+
+ ret = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_read(phydev, MII_BCM54XX_EXP_DATA);
+ if (ret < 0)
+ return ret;
+
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ return __phy_write(phydev, MII_BCM54XX_EXP_DATA, new);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_modify_exp);
+
+int bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __bcm_phy_modify_exp(phydev, reg, mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_modify_exp);
+
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
{
/* The register must be written to both the Shadow Register Select and
@@ -155,6 +213,86 @@ int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow,
}
EXPORT_SYMBOL_GPL(bcm_phy_write_shadow);
+int __bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb)
+{
+ int val;
+
+ val = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (val < 0)
+ return val;
+
+ return __phy_read(phydev, MII_BCM54XX_RDB_DATA);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_read_rdb);
+
+int bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __bcm_phy_read_rdb(phydev, rdb);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_read_rdb);
+
+int __bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (ret < 0)
+ return ret;
+
+ return __phy_write(phydev, MII_BCM54XX_RDB_DATA, val);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_write_rdb);
+
+int bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __bcm_phy_write_rdb(phydev, rdb, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_write_rdb);
+
+int __bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask, u16 set)
+{
+ int new, ret;
+
+ ret = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_read(phydev, MII_BCM54XX_RDB_DATA);
+ if (ret < 0)
+ return ret;
+
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ return __phy_write(phydev, MII_BCM54XX_RDB_DATA, new);
+}
+EXPORT_SYMBOL_GPL(__bcm_phy_modify_rdb);
+
+int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __bcm_phy_modify_rdb(phydev, rdb, mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_modify_rdb);
+
int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down)
{
int val;
@@ -445,6 +583,191 @@ int bcm_phy_enable_jumbo(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(bcm_phy_enable_jumbo);
+static int __bcm_phy_enable_rdb_access(struct phy_device *phydev)
+{
+ return __bcm_phy_write_exp(phydev, BCM54XX_EXP_REG7E, 0);
+}
+
+static int __bcm_phy_enable_legacy_access(struct phy_device *phydev)
+{
+ return __bcm_phy_write_rdb(phydev, BCM54XX_RDB_REG0087,
+ BCM54XX_ACCESS_MODE_LEGACY_EN);
+}
+
+static int _bcm_phy_cable_test_start(struct phy_device *phydev, bool is_rdb)
+{
+ u16 mask, set;
+ int ret;
+
+ /* Auto-negotiation must be enabled for cable diagnostics to work, but
+ * don't advertise any capabilities.
+ */
+ phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+ phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+ phy_write(phydev, MII_CTRL1000, 0);
+
+ phy_lock_mdio_bus(phydev);
+ if (is_rdb) {
+ ret = __bcm_phy_enable_legacy_access(phydev);
+ if (ret)
+ goto out;
+ }
+
+ mask = BCM54XX_ECD_CTRL_CROSS_SHORT_DIS | BCM54XX_ECD_CTRL_UNIT_MASK;
+ set = BCM54XX_ECD_CTRL_RUN | BCM54XX_ECD_CTRL_BREAK_LINK |
+ FIELD_PREP(BCM54XX_ECD_CTRL_UNIT_MASK,
+ BCM54XX_ECD_CTRL_UNIT_CM);
+
+ ret = __bcm_phy_modify_exp(phydev, BCM54XX_EXP_ECD_CTRL, mask, set);
+
+out:
+ /* re-enable the RDB access even if there was an error */
+ if (is_rdb)
+ ret = __bcm_phy_enable_rdb_access(phydev) ? : ret;
+
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+static int bcm_phy_cable_test_report_trans(int result)
+{
+ switch (result) {
+ case BCM54XX_ECD_FAULT_TYPE_OK:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case BCM54XX_ECD_FAULT_TYPE_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case BCM54XX_ECD_FAULT_TYPE_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ case BCM54XX_ECD_FAULT_TYPE_INVALID:
+ case BCM54XX_ECD_FAULT_TYPE_BUSY:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool bcm_phy_distance_valid(int result)
+{
+ switch (result) {
+ case BCM54XX_ECD_FAULT_TYPE_OPEN:
+ case BCM54XX_ECD_FAULT_TYPE_SAME_SHORT:
+ case BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int bcm_phy_report_length(struct phy_device *phydev, int pair)
+{
+ int val;
+
+ val = __bcm_phy_read_exp(phydev,
+ BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS + pair);
+ if (val < 0)
+ return val;
+
+ if (val == BCM54XX_ECD_LENGTH_RESULTS_INVALID)
+ return 0;
+
+ ethnl_cable_test_fault_length(phydev, pair, val);
+
+ return 0;
+}
+
+static int _bcm_phy_cable_test_get_status(struct phy_device *phydev,
+ bool *finished, bool is_rdb)
+{
+ int pair_a, pair_b, pair_c, pair_d, ret;
+
+ *finished = false;
+
+ phy_lock_mdio_bus(phydev);
+
+ if (is_rdb) {
+ ret = __bcm_phy_enable_legacy_access(phydev);
+ if (ret)
+ goto out;
+ }
+
+ ret = __bcm_phy_read_exp(phydev, BCM54XX_EXP_ECD_CTRL);
+ if (ret < 0)
+ goto out;
+
+ if (ret & BCM54XX_ECD_CTRL_IN_PROGRESS) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = __bcm_phy_read_exp(phydev, BCM54XX_EXP_ECD_FAULT_TYPE);
+ if (ret < 0)
+ goto out;
+
+ pair_a = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK, ret);
+ pair_b = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK, ret);
+ pair_c = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK, ret);
+ pair_d = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK, ret);
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ bcm_phy_cable_test_report_trans(pair_a));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
+ bcm_phy_cable_test_report_trans(pair_b));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
+ bcm_phy_cable_test_report_trans(pair_c));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
+ bcm_phy_cable_test_report_trans(pair_d));
+
+ if (bcm_phy_distance_valid(pair_a))
+ bcm_phy_report_length(phydev, 0);
+ if (bcm_phy_distance_valid(pair_b))
+ bcm_phy_report_length(phydev, 1);
+ if (bcm_phy_distance_valid(pair_c))
+ bcm_phy_report_length(phydev, 2);
+ if (bcm_phy_distance_valid(pair_d))
+ bcm_phy_report_length(phydev, 3);
+
+ ret = 0;
+ *finished = true;
+out:
+ /* re-enable the RDB access even if there was an error */
+ if (is_rdb)
+ ret = __bcm_phy_enable_rdb_access(phydev) ? : ret;
+
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+int bcm_phy_cable_test_start(struct phy_device *phydev)
+{
+ return _bcm_phy_cable_test_start(phydev, false);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_cable_test_start);
+
+int bcm_phy_cable_test_get_status(struct phy_device *phydev, bool *finished)
+{
+ return _bcm_phy_cable_test_get_status(phydev, finished, false);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_cable_test_get_status);
+
+/* We assume that all PHYs which support RDB access can be switched to legacy
+ * mode. If, in the future, this is not true anymore, we have to re-implement
+ * this with RDB access.
+ */
+int bcm_phy_cable_test_start_rdb(struct phy_device *phydev)
+{
+ return _bcm_phy_cable_test_start(phydev, true);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_cable_test_start_rdb);
+
+int bcm_phy_cable_test_get_status_rdb(struct phy_device *phydev,
+ bool *finished)
+{
+ return _bcm_phy_cable_test_get_status(phydev, finished, true);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_cable_test_get_status_rdb);
+
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 129df819be8c..237a8503c9b4 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -27,8 +27,12 @@
#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
+int __bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
+int __bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+int __bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set);
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+int bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set);
static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
u16 reg, u16 val)
@@ -48,6 +52,15 @@ int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow,
u16 val);
int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow);
+int __bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val);
+int bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val);
+int __bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb);
+int bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb);
+int __bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask,
+ u16 set);
+int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask,
+ u16 set);
+
int bcm_phy_ack_intr(struct phy_device *phydev);
int bcm_phy_config_intr(struct phy_device *phydev);
@@ -67,4 +80,10 @@ void bcm_phy_r_rc_cal_reset(struct phy_device *phydev);
int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev);
int bcm_phy_enable_jumbo(struct phy_device *phydev);
+int bcm_phy_cable_test_get_status_rdb(struct phy_device *phydev,
+ bool *finished);
+int bcm_phy_cable_test_start_rdb(struct phy_device *phydev);
+int bcm_phy_cable_test_start(struct phy_device *phydev);
+int bcm_phy_cable_test_get_status(struct phy_device *phydev, bool *finished);
+
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
new file mode 100644
index 000000000000..8998e68bb26b
--- /dev/null
+++ b/drivers/net/phy/bcm54140.c
@@ -0,0 +1,860 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Broadcom BCM54140 Quad SGMII/QSGMII Copper/Fiber Gigabit PHY
+ *
+ * Copyright (c) 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/brcmphy.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#include "bcm-phy-lib.h"
+
+/* RDB per-port registers
+ */
+#define BCM54140_RDB_ISR 0x00a /* interrupt status */
+#define BCM54140_RDB_IMR 0x00b /* interrupt mask */
+#define BCM54140_RDB_INT_LINK BIT(1) /* link status changed */
+#define BCM54140_RDB_INT_SPEED BIT(2) /* link speed change */
+#define BCM54140_RDB_INT_DUPLEX BIT(3) /* duplex mode changed */
+#define BCM54140_RDB_SPARE1 0x012 /* spare control 1 */
+#define BCM54140_RDB_SPARE1_LSLM BIT(2) /* link speed LED mode */
+#define BCM54140_RDB_SPARE2 0x014 /* spare control 2 */
+#define BCM54140_RDB_SPARE2_WS_RTRY_DIS BIT(8) /* wirespeed retry disable */
+#define BCM54140_RDB_SPARE2_WS_RTRY_LIMIT GENMASK(4, 2) /* retry limit */
+#define BCM54140_RDB_SPARE3 0x015 /* spare control 3 */
+#define BCM54140_RDB_SPARE3_BIT0 BIT(0)
+#define BCM54140_RDB_LED_CTRL 0x019 /* LED control */
+#define BCM54140_RDB_LED_CTRL_ACTLINK0 BIT(4)
+#define BCM54140_RDB_LED_CTRL_ACTLINK1 BIT(8)
+#define BCM54140_RDB_C_APWR 0x01a /* auto power down control */
+#define BCM54140_RDB_C_APWR_SINGLE_PULSE BIT(8) /* single pulse */
+#define BCM54140_RDB_C_APWR_APD_MODE_DIS 0 /* ADP disable */
+#define BCM54140_RDB_C_APWR_APD_MODE_EN 1 /* ADP enable */
+#define BCM54140_RDB_C_APWR_APD_MODE_DIS2 2 /* ADP disable */
+#define BCM54140_RDB_C_APWR_APD_MODE_EN_ANEG 3 /* ADP enable w/ aneg */
+#define BCM54140_RDB_C_APWR_APD_MODE_MASK GENMASK(6, 5)
+#define BCM54140_RDB_C_APWR_SLP_TIM_MASK BIT(4)/* sleep timer */
+#define BCM54140_RDB_C_APWR_SLP_TIM_2_7 0 /* 2.7s */
+#define BCM54140_RDB_C_APWR_SLP_TIM_5_4 1 /* 5.4s */
+#define BCM54140_RDB_C_PWR 0x02a /* copper power control */
+#define BCM54140_RDB_C_PWR_ISOLATE BIT(5) /* super isolate mode */
+#define BCM54140_RDB_C_MISC_CTRL 0x02f /* misc copper control */
+#define BCM54140_RDB_C_MISC_CTRL_WS_EN BIT(4) /* wirespeed enable */
+
+/* RDB global registers
+ */
+#define BCM54140_RDB_TOP_IMR 0x82d /* interrupt mask */
+#define BCM54140_RDB_TOP_IMR_PORT0 BIT(4)
+#define BCM54140_RDB_TOP_IMR_PORT1 BIT(5)
+#define BCM54140_RDB_TOP_IMR_PORT2 BIT(6)
+#define BCM54140_RDB_TOP_IMR_PORT3 BIT(7)
+#define BCM54140_RDB_MON_CTRL 0x831 /* monitor control */
+#define BCM54140_RDB_MON_CTRL_V_MODE BIT(3) /* voltage mode */
+#define BCM54140_RDB_MON_CTRL_SEL_MASK GENMASK(2, 1)
+#define BCM54140_RDB_MON_CTRL_SEL_TEMP 0 /* meassure temperature */
+#define BCM54140_RDB_MON_CTRL_SEL_1V0 1 /* meassure AVDDL 1.0V */
+#define BCM54140_RDB_MON_CTRL_SEL_3V3 2 /* meassure AVDDH 3.3V */
+#define BCM54140_RDB_MON_CTRL_SEL_RR 3 /* meassure all round-robin */
+#define BCM54140_RDB_MON_CTRL_PWR_DOWN BIT(0) /* power-down monitor */
+#define BCM54140_RDB_MON_TEMP_VAL 0x832 /* temperature value */
+#define BCM54140_RDB_MON_TEMP_MAX 0x833 /* temperature high thresh */
+#define BCM54140_RDB_MON_TEMP_MIN 0x834 /* temperature low thresh */
+#define BCM54140_RDB_MON_TEMP_DATA_MASK GENMASK(9, 0)
+#define BCM54140_RDB_MON_1V0_VAL 0x835 /* AVDDL 1.0V value */
+#define BCM54140_RDB_MON_1V0_MAX 0x836 /* AVDDL 1.0V high thresh */
+#define BCM54140_RDB_MON_1V0_MIN 0x837 /* AVDDL 1.0V low thresh */
+#define BCM54140_RDB_MON_1V0_DATA_MASK GENMASK(10, 0)
+#define BCM54140_RDB_MON_3V3_VAL 0x838 /* AVDDH 3.3V value */
+#define BCM54140_RDB_MON_3V3_MAX 0x839 /* AVDDH 3.3V high thresh */
+#define BCM54140_RDB_MON_3V3_MIN 0x83a /* AVDDH 3.3V low thresh */
+#define BCM54140_RDB_MON_3V3_DATA_MASK GENMASK(11, 0)
+#define BCM54140_RDB_MON_ISR 0x83b /* interrupt status */
+#define BCM54140_RDB_MON_ISR_3V3 BIT(2) /* AVDDH 3.3V alarm */
+#define BCM54140_RDB_MON_ISR_1V0 BIT(1) /* AVDDL 1.0V alarm */
+#define BCM54140_RDB_MON_ISR_TEMP BIT(0) /* temperature alarm */
+
+/* According to the datasheet the formula is:
+ * T = 413.35 - (0.49055 * bits[9:0])
+ */
+#define BCM54140_HWMON_TO_TEMP(v) (413350L - (v) * 491)
+#define BCM54140_HWMON_FROM_TEMP(v) DIV_ROUND_CLOSEST_ULL(413350L - (v), 491)
+
+/* According to the datasheet the formula is:
+ * U = bits[11:0] / 1024 * 220 / 0.2
+ *
+ * Normalized:
+ * U = bits[11:0] / 4096 * 2514
+ */
+#define BCM54140_HWMON_TO_IN_1V0(v) ((v) * 2514 >> 11)
+#define BCM54140_HWMON_FROM_IN_1V0(v) DIV_ROUND_CLOSEST_ULL(((v) << 11), 2514)
+
+/* According to the datasheet the formula is:
+ * U = bits[10:0] / 1024 * 880 / 0.7
+ *
+ * Normalized:
+ * U = bits[10:0] / 2048 * 4400
+ */
+#define BCM54140_HWMON_TO_IN_3V3(v) ((v) * 4400 >> 12)
+#define BCM54140_HWMON_FROM_IN_3V3(v) DIV_ROUND_CLOSEST_ULL(((v) << 12), 4400)
+
+#define BCM54140_HWMON_TO_IN(ch, v) ((ch) ? BCM54140_HWMON_TO_IN_3V3(v) \
+ : BCM54140_HWMON_TO_IN_1V0(v))
+#define BCM54140_HWMON_FROM_IN(ch, v) ((ch) ? BCM54140_HWMON_FROM_IN_3V3(v) \
+ : BCM54140_HWMON_FROM_IN_1V0(v))
+#define BCM54140_HWMON_IN_MASK(ch) ((ch) ? BCM54140_RDB_MON_3V3_DATA_MASK \
+ : BCM54140_RDB_MON_1V0_DATA_MASK)
+#define BCM54140_HWMON_IN_VAL_REG(ch) ((ch) ? BCM54140_RDB_MON_3V3_VAL \
+ : BCM54140_RDB_MON_1V0_VAL)
+#define BCM54140_HWMON_IN_MIN_REG(ch) ((ch) ? BCM54140_RDB_MON_3V3_MIN \
+ : BCM54140_RDB_MON_1V0_MIN)
+#define BCM54140_HWMON_IN_MAX_REG(ch) ((ch) ? BCM54140_RDB_MON_3V3_MAX \
+ : BCM54140_RDB_MON_1V0_MAX)
+#define BCM54140_HWMON_IN_ALARM_BIT(ch) ((ch) ? BCM54140_RDB_MON_ISR_3V3 \
+ : BCM54140_RDB_MON_ISR_1V0)
+
+/* This PHY has two different PHY IDs depening on its MODE_SEL pin. This
+ * pin choses between 4x SGMII and QSGMII mode:
+ * AE02_5009 4x SGMII
+ * AE02_5019 QSGMII
+ */
+#define BCM54140_PHY_ID_MASK 0xffffffe8
+
+#define BCM54140_PHY_ID_REV(phy_id) ((phy_id) & 0x7)
+#define BCM54140_REV_B0 1
+
+#define BCM54140_DEFAULT_DOWNSHIFT 5
+#define BCM54140_MAX_DOWNSHIFT 9
+
+struct bcm54140_priv {
+ int port;
+ int base_addr;
+#if IS_ENABLED(CONFIG_HWMON)
+ /* protect the alarm bits */
+ struct mutex alarm_lock;
+ u16 alarm;
+#endif
+};
+
+#if IS_ENABLED(CONFIG_HWMON)
+static umode_t bcm54140_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_min:
+ case hwmon_in_max:
+ return 0644;
+ case hwmon_in_label:
+ case hwmon_in_input:
+ case hwmon_in_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_input:
+ case hwmon_temp_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int bcm54140_hwmon_read_alarm(struct device *dev, unsigned int bit,
+ long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ struct bcm54140_priv *priv = phydev->priv;
+ int tmp, ret = 0;
+
+ mutex_lock(&priv->alarm_lock);
+
+ /* latch any alarm bits */
+ tmp = bcm_phy_read_rdb(phydev, BCM54140_RDB_MON_ISR);
+ if (tmp < 0) {
+ ret = tmp;
+ goto out;
+ }
+ priv->alarm |= tmp;
+
+ *val = !!(priv->alarm & bit);
+ priv->alarm &= ~bit;
+
+out:
+ mutex_unlock(&priv->alarm_lock);
+ return ret;
+}
+
+static int bcm54140_hwmon_read_temp(struct device *dev, u32 attr, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ u16 reg;
+ int tmp;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = BCM54140_RDB_MON_TEMP_VAL;
+ break;
+ case hwmon_temp_min:
+ reg = BCM54140_RDB_MON_TEMP_MIN;
+ break;
+ case hwmon_temp_max:
+ reg = BCM54140_RDB_MON_TEMP_MAX;
+ break;
+ case hwmon_temp_alarm:
+ return bcm54140_hwmon_read_alarm(dev,
+ BCM54140_RDB_MON_ISR_TEMP,
+ val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ tmp = bcm_phy_read_rdb(phydev, reg);
+ if (tmp < 0)
+ return tmp;
+
+ *val = BCM54140_HWMON_TO_TEMP(tmp & BCM54140_RDB_MON_TEMP_DATA_MASK);
+
+ return 0;
+}
+
+static int bcm54140_hwmon_read_in(struct device *dev, u32 attr,
+ int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ u16 bit, reg;
+ int tmp;
+
+ switch (attr) {
+ case hwmon_in_input:
+ reg = BCM54140_HWMON_IN_VAL_REG(channel);
+ break;
+ case hwmon_in_min:
+ reg = BCM54140_HWMON_IN_MIN_REG(channel);
+ break;
+ case hwmon_in_max:
+ reg = BCM54140_HWMON_IN_MAX_REG(channel);
+ break;
+ case hwmon_in_alarm:
+ bit = BCM54140_HWMON_IN_ALARM_BIT(channel);
+ return bcm54140_hwmon_read_alarm(dev, bit, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ tmp = bcm_phy_read_rdb(phydev, reg);
+ if (tmp < 0)
+ return tmp;
+
+ tmp &= BCM54140_HWMON_IN_MASK(channel);
+ *val = BCM54140_HWMON_TO_IN(channel, tmp);
+
+ return 0;
+}
+
+static int bcm54140_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return bcm54140_hwmon_read_temp(dev, attr, val);
+ case hwmon_in:
+ return bcm54140_hwmon_read_in(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const char *const bcm54140_hwmon_in_labels[] = {
+ "AVDDL",
+ "AVDDH",
+};
+
+static int bcm54140_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = bcm54140_hwmon_in_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bcm54140_hwmon_write_temp(struct device *dev, u32 attr,
+ int channel, long val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ u16 mask = BCM54140_RDB_MON_TEMP_DATA_MASK;
+ u16 reg;
+
+ val = clamp_val(val, BCM54140_HWMON_TO_TEMP(mask),
+ BCM54140_HWMON_TO_TEMP(0));
+
+ switch (attr) {
+ case hwmon_temp_min:
+ reg = BCM54140_RDB_MON_TEMP_MIN;
+ break;
+ case hwmon_temp_max:
+ reg = BCM54140_RDB_MON_TEMP_MAX;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return bcm_phy_modify_rdb(phydev, reg, mask,
+ BCM54140_HWMON_FROM_TEMP(val));
+}
+
+static int bcm54140_hwmon_write_in(struct device *dev, u32 attr,
+ int channel, long val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ u16 mask = BCM54140_HWMON_IN_MASK(channel);
+ u16 reg;
+
+ val = clamp_val(val, 0, BCM54140_HWMON_TO_IN(channel, mask));
+
+ switch (attr) {
+ case hwmon_in_min:
+ reg = BCM54140_HWMON_IN_MIN_REG(channel);
+ break;
+ case hwmon_in_max:
+ reg = BCM54140_HWMON_IN_MAX_REG(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return bcm_phy_modify_rdb(phydev, reg, mask,
+ BCM54140_HWMON_FROM_IN(channel, val));
+}
+
+static int bcm54140_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return bcm54140_hwmon_write_temp(dev, attr, channel, val);
+ case hwmon_in:
+ return bcm54140_hwmon_write_in(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *bcm54140_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_ALARM),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM | HWMON_I_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops bcm54140_hwmon_ops = {
+ .is_visible = bcm54140_hwmon_is_visible,
+ .read = bcm54140_hwmon_read,
+ .read_string = bcm54140_hwmon_read_string,
+ .write = bcm54140_hwmon_write,
+};
+
+static const struct hwmon_chip_info bcm54140_chip_info = {
+ .ops = &bcm54140_hwmon_ops,
+ .info = bcm54140_hwmon_info,
+};
+
+static int bcm54140_enable_monitoring(struct phy_device *phydev)
+{
+ u16 mask, set;
+
+ /* 3.3V voltage mode */
+ set = BCM54140_RDB_MON_CTRL_V_MODE;
+
+ /* select round-robin */
+ mask = BCM54140_RDB_MON_CTRL_SEL_MASK;
+ set |= FIELD_PREP(BCM54140_RDB_MON_CTRL_SEL_MASK,
+ BCM54140_RDB_MON_CTRL_SEL_RR);
+
+ /* remove power-down bit */
+ mask |= BCM54140_RDB_MON_CTRL_PWR_DOWN;
+
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_MON_CTRL, mask, set);
+}
+
+static int bcm54140_probe_once(struct phy_device *phydev)
+{
+ struct device *hwmon;
+ int ret;
+
+ /* enable hardware monitoring */
+ ret = bcm54140_enable_monitoring(phydev);
+ if (ret)
+ return ret;
+
+ hwmon = devm_hwmon_device_register_with_info(&phydev->mdio.dev,
+ "BCM54140", phydev,
+ &bcm54140_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+#endif
+
+static int bcm54140_base_read_rdb(struct phy_device *phydev, u16 rdb)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (ret < 0)
+ goto out;
+
+ ret = __phy_package_read(phydev, MII_BCM54XX_RDB_DATA);
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+static int bcm54140_base_write_rdb(struct phy_device *phydev,
+ u16 rdb, u16 val)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ if (ret < 0)
+ goto out;
+
+ ret = __phy_package_write(phydev, MII_BCM54XX_RDB_DATA, val);
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+/* Under some circumstances a core PLL may not lock, this will then prevent
+ * a successful link establishment. Restart the PLL after the voltages are
+ * stable to workaround this issue.
+ */
+static int bcm54140_b0_workaround(struct phy_device *phydev)
+{
+ int spare3;
+ int ret;
+
+ spare3 = bcm_phy_read_rdb(phydev, BCM54140_RDB_SPARE3);
+ if (spare3 < 0)
+ return spare3;
+
+ spare3 &= ~BCM54140_RDB_SPARE3_BIT0;
+
+ ret = bcm_phy_write_rdb(phydev, BCM54140_RDB_SPARE3, spare3);
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN);
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0);
+ if (ret)
+ return ret;
+
+ spare3 |= BCM54140_RDB_SPARE3_BIT0;
+
+ return bcm_phy_write_rdb(phydev, BCM54140_RDB_SPARE3, spare3);
+}
+
+/* The BCM54140 is a quad PHY where only the first port has access to the
+ * global register. Thus we need to find out its PHY address.
+ *
+ */
+static int bcm54140_get_base_addr_and_port(struct phy_device *phydev)
+{
+ struct bcm54140_priv *priv = phydev->priv;
+ struct mii_bus *bus = phydev->mdio.bus;
+ int addr, min_addr, max_addr;
+ int step = 1;
+ u32 phy_id;
+ int tmp;
+
+ min_addr = phydev->mdio.addr;
+ max_addr = phydev->mdio.addr;
+ addr = phydev->mdio.addr;
+
+ /* We scan forward and backwards and look for PHYs which have the
+ * same phy_id like we do. Step 1 will scan forward, step 2
+ * backwards. Once we are finished, we have a min_addr and
+ * max_addr which resembles the range of PHY addresses of the same
+ * type of PHY. There is one caveat; there may be many PHYs of
+ * the same type, but we know that each PHY takes exactly 4
+ * consecutive addresses. Therefore we can deduce our offset
+ * to the base address of this quad PHY.
+ */
+
+ while (1) {
+ if (step == 3) {
+ break;
+ } else if (step == 1) {
+ max_addr = addr;
+ addr++;
+ } else {
+ min_addr = addr;
+ addr--;
+ }
+
+ if (addr < 0 || addr >= PHY_MAX_ADDR) {
+ addr = phydev->mdio.addr;
+ step++;
+ continue;
+ }
+
+ /* read the PHY id */
+ tmp = mdiobus_read(bus, addr, MII_PHYSID1);
+ if (tmp < 0)
+ return tmp;
+ phy_id = tmp << 16;
+ tmp = mdiobus_read(bus, addr, MII_PHYSID2);
+ if (tmp < 0)
+ return tmp;
+ phy_id |= tmp;
+
+ /* see if it is still the same PHY */
+ if ((phy_id & phydev->drv->phy_id_mask) !=
+ (phydev->drv->phy_id & phydev->drv->phy_id_mask)) {
+ addr = phydev->mdio.addr;
+ step++;
+ }
+ }
+
+ /* The range we get should be a multiple of four. Please note that both
+ * the min_addr and max_addr are inclusive. So we have to add one if we
+ * subtract them.
+ */
+ if ((max_addr - min_addr + 1) % 4) {
+ dev_err(&phydev->mdio.dev,
+ "Detected Quad PHY IDs %d..%d doesn't make sense.\n",
+ min_addr, max_addr);
+ return -EINVAL;
+ }
+
+ priv->port = (phydev->mdio.addr - min_addr) % 4;
+ priv->base_addr = phydev->mdio.addr - priv->port;
+
+ return 0;
+}
+
+static int bcm54140_probe(struct phy_device *phydev)
+{
+ struct bcm54140_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ ret = bcm54140_get_base_addr_and_port(phydev);
+ if (ret)
+ return ret;
+
+ devm_phy_package_join(&phydev->mdio.dev, phydev, priv->base_addr, 0);
+
+#if IS_ENABLED(CONFIG_HWMON)
+ mutex_init(&priv->alarm_lock);
+
+ if (phy_package_init_once(phydev)) {
+ ret = bcm54140_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+#endif
+
+ phydev_dbg(phydev, "probed (port %d, base PHY address %d)\n",
+ priv->port, priv->base_addr);
+
+ return 0;
+}
+
+static int bcm54140_config_init(struct phy_device *phydev)
+{
+ u16 reg = 0xffff;
+ int ret;
+
+ /* Apply hardware errata */
+ if (BCM54140_PHY_ID_REV(phydev->phy_id) == BCM54140_REV_B0) {
+ ret = bcm54140_b0_workaround(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* Unmask events we are interested in. */
+ reg &= ~(BCM54140_RDB_INT_DUPLEX |
+ BCM54140_RDB_INT_SPEED |
+ BCM54140_RDB_INT_LINK);
+ ret = bcm_phy_write_rdb(phydev, BCM54140_RDB_IMR, reg);
+ if (ret)
+ return ret;
+
+ /* LED1=LINKSPD[1], LED2=LINKSPD[2], LED3=LINK/ACTIVITY */
+ ret = bcm_phy_modify_rdb(phydev, BCM54140_RDB_SPARE1,
+ 0, BCM54140_RDB_SPARE1_LSLM);
+ if (ret)
+ return ret;
+
+ ret = bcm_phy_modify_rdb(phydev, BCM54140_RDB_LED_CTRL,
+ 0, BCM54140_RDB_LED_CTRL_ACTLINK0);
+ if (ret)
+ return ret;
+
+ /* disable super isolate mode */
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_PWR,
+ BCM54140_RDB_C_PWR_ISOLATE, 0);
+}
+
+static int bcm54140_did_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+
+ return (ret < 0) ? 0 : ret;
+}
+
+static int bcm54140_ack_intr(struct phy_device *phydev)
+{
+ int reg;
+
+ /* clear pending interrupts */
+ reg = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+ if (reg < 0)
+ return reg;
+
+ return 0;
+}
+
+static int bcm54140_config_intr(struct phy_device *phydev)
+{
+ struct bcm54140_priv *priv = phydev->priv;
+ static const u16 port_to_imr_bit[] = {
+ BCM54140_RDB_TOP_IMR_PORT0, BCM54140_RDB_TOP_IMR_PORT1,
+ BCM54140_RDB_TOP_IMR_PORT2, BCM54140_RDB_TOP_IMR_PORT3,
+ };
+ int reg;
+
+ if (priv->port >= ARRAY_SIZE(port_to_imr_bit))
+ return -EINVAL;
+
+ reg = bcm54140_base_read_rdb(phydev, BCM54140_RDB_TOP_IMR);
+ if (reg < 0)
+ return reg;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ reg &= ~port_to_imr_bit[priv->port];
+ else
+ reg |= port_to_imr_bit[priv->port];
+
+ return bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+}
+
+static int bcm54140_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val;
+
+ val = bcm_phy_read_rdb(phydev, BCM54140_RDB_C_MISC_CTRL);
+ if (val < 0)
+ return val;
+
+ if (!(val & BCM54140_RDB_C_MISC_CTRL_WS_EN)) {
+ *data = DOWNSHIFT_DEV_DISABLE;
+ return 0;
+ }
+
+ val = bcm_phy_read_rdb(phydev, BCM54140_RDB_SPARE2);
+ if (val < 0)
+ return val;
+
+ if (val & BCM54140_RDB_SPARE2_WS_RTRY_DIS)
+ *data = 1;
+ else
+ *data = FIELD_GET(BCM54140_RDB_SPARE2_WS_RTRY_LIMIT, val) + 2;
+
+ return 0;
+}
+
+static int bcm54140_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, set;
+ int ret;
+
+ if (cnt > BCM54140_MAX_DOWNSHIFT && cnt != DOWNSHIFT_DEV_DEFAULT_COUNT)
+ return -EINVAL;
+
+ if (!cnt)
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_MISC_CTRL,
+ BCM54140_RDB_C_MISC_CTRL_WS_EN, 0);
+
+ if (cnt == DOWNSHIFT_DEV_DEFAULT_COUNT)
+ cnt = BCM54140_DEFAULT_DOWNSHIFT;
+
+ if (cnt == 1) {
+ mask = 0;
+ set = BCM54140_RDB_SPARE2_WS_RTRY_DIS;
+ } else {
+ mask = BCM54140_RDB_SPARE2_WS_RTRY_DIS;
+ mask |= BCM54140_RDB_SPARE2_WS_RTRY_LIMIT;
+ set = FIELD_PREP(BCM54140_RDB_SPARE2_WS_RTRY_LIMIT, cnt - 2);
+ }
+ ret = bcm_phy_modify_rdb(phydev, BCM54140_RDB_SPARE2,
+ mask, set);
+ if (ret)
+ return ret;
+
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_MISC_CTRL,
+ 0, BCM54140_RDB_C_MISC_CTRL_WS_EN);
+}
+
+static int bcm54140_get_edpd(struct phy_device *phydev, u16 *tx_interval)
+{
+ int val;
+
+ val = bcm_phy_read_rdb(phydev, BCM54140_RDB_C_APWR);
+ if (val < 0)
+ return val;
+
+ switch (FIELD_GET(BCM54140_RDB_C_APWR_APD_MODE_MASK, val)) {
+ case BCM54140_RDB_C_APWR_APD_MODE_DIS:
+ case BCM54140_RDB_C_APWR_APD_MODE_DIS2:
+ *tx_interval = ETHTOOL_PHY_EDPD_DISABLE;
+ break;
+ case BCM54140_RDB_C_APWR_APD_MODE_EN:
+ case BCM54140_RDB_C_APWR_APD_MODE_EN_ANEG:
+ switch (FIELD_GET(BCM54140_RDB_C_APWR_SLP_TIM_MASK, val)) {
+ case BCM54140_RDB_C_APWR_SLP_TIM_2_7:
+ *tx_interval = 2700;
+ break;
+ case BCM54140_RDB_C_APWR_SLP_TIM_5_4:
+ *tx_interval = 5400;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int bcm54140_set_edpd(struct phy_device *phydev, u16 tx_interval)
+{
+ u16 mask, set;
+
+ mask = BCM54140_RDB_C_APWR_APD_MODE_MASK;
+ if (tx_interval == ETHTOOL_PHY_EDPD_DISABLE)
+ set = FIELD_PREP(BCM54140_RDB_C_APWR_APD_MODE_MASK,
+ BCM54140_RDB_C_APWR_APD_MODE_DIS);
+ else
+ set = FIELD_PREP(BCM54140_RDB_C_APWR_APD_MODE_MASK,
+ BCM54140_RDB_C_APWR_APD_MODE_EN_ANEG);
+
+ /* enable single pulse mode */
+ set |= BCM54140_RDB_C_APWR_SINGLE_PULSE;
+
+ /* set sleep timer */
+ mask |= BCM54140_RDB_C_APWR_SLP_TIM_MASK;
+ switch (tx_interval) {
+ case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS:
+ case ETHTOOL_PHY_EDPD_DISABLE:
+ case 2700:
+ set |= BCM54140_RDB_C_APWR_SLP_TIM_2_7;
+ break;
+ case 5400:
+ set |= BCM54140_RDB_C_APWR_SLP_TIM_5_4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_APWR, mask, set);
+}
+
+static int bcm54140_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return bcm54140_get_downshift(phydev, data);
+ case ETHTOOL_PHY_EDPD:
+ return bcm54140_get_edpd(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bcm54140_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return bcm54140_set_downshift(phydev, *(const u8 *)data);
+ case ETHTOOL_PHY_EDPD:
+ return bcm54140_set_edpd(phydev, *(const u16 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct phy_driver bcm54140_drivers[] = {
+ {
+ .phy_id = PHY_ID_BCM54140,
+ .phy_id_mask = BCM54140_PHY_ID_MASK,
+ .name = "Broadcom BCM54140",
+ .flags = PHY_POLL_CABLE_TEST,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = bcm54140_config_init,
+ .did_interrupt = bcm54140_did_interrupt,
+ .ack_interrupt = bcm54140_ack_intr,
+ .config_intr = bcm54140_config_intr,
+ .probe = bcm54140_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = bcm54140_get_tunable,
+ .set_tunable = bcm54140_set_tunable,
+ .cable_test_start = bcm_phy_cable_test_start_rdb,
+ .cable_test_get_status = bcm_phy_cable_test_get_status_rdb,
+ },
+};
+module_phy_driver(bcm54140_drivers);
+
+static struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
+ { PHY_ID_BCM54140, BCM54140_PHY_ID_MASK },
+ { }
+};
+
+MODULE_AUTHOR("Michael Walle");
+MODULE_DESCRIPTION("Broadcom BCM54140 PHY driver");
+MODULE_DEVICE_TABLE(mdio, bcm54140_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index f6dce6850850..df360e1c5069 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -55,7 +55,7 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
u16 mask = be32_to_cpup(paddr++);
u16 val_bits = be32_to_cpup(paddr++);
int val;
- u32 regnum = MII_ADDR_C45 | (devid << 16) | reg;
+ u32 regnum = mdiobus_c45_addr(devid, reg);
val = 0;
if (mask) {
val = phy_read(phydev, regnum);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index ae4873f2f86e..cd271de9609b 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -195,7 +195,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810)
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810 &&
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811)
return;
val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
@@ -214,8 +215,10 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
clk125en = false;
} else {
if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
- /* Here, bit 0 _enables_ CLK125 when set */
- val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
+ if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811) {
+ /* Here, bit 0 _enables_ CLK125 when set */
+ val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
+ }
clk125en = false;
}
}
@@ -225,8 +228,13 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
else
val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
- if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY)
- val |= BCM54XX_SHD_SCR3_TRDDAPD;
+ if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
+ if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
+ BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
+ val |= BCM54810_SHD_SCR3_TRDDAPD;
+ else
+ val |= BCM54XX_SHD_SCR3_TRDDAPD;
+ }
if (orig != val)
bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
@@ -327,6 +335,32 @@ static int bcm54xx_resume(struct phy_device *phydev)
return bcm54xx_config_init(phydev);
}
+static int bcm54811_config_init(struct phy_device *phydev)
+{
+ int err, reg;
+
+ /* Disable BroadR-Reach function. */
+ reg = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
+ reg &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
+ err = bcm_phy_write_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
+ reg);
+ if (err < 0)
+ return err;
+
+ err = bcm54xx_config_init(phydev);
+
+ /* Enable CLK125 MUX on LED4 if ref clock is enabled. */
+ if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
+ reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0);
+ err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0,
+ BCM54612E_LED4_CLK125OUT_EN | reg);
+ if (err < 0)
+ return err;
+ }
+
+ return err;
+}
+
static int bcm5482_config_init(struct phy_device *phydev)
{
int err, reg;
@@ -723,6 +757,17 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = genphy_suspend,
.resume = bcm54xx_resume,
}, {
+ .phy_id = PHY_ID_BCM54811,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM54811",
+ /* PHY_GBIT_FEATURES */
+ .config_init = bcm54811_config_init,
+ .config_aneg = bcm5481_config_aneg,
+ .ack_interrupt = bcm_phy_ack_intr,
+ .config_intr = bcm_phy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = bcm54xx_resume,
+}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
@@ -782,6 +827,19 @@ static struct phy_driver broadcom_drivers[] = {
.get_stats = bcm53xx_phy_get_stats,
.probe = bcm53xx_phy_probe,
}, {
+ .phy_id = PHY_ID_BCM53125,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM53125",
+ .flags = PHY_IS_INTERNAL,
+ /* PHY_GBIT_FEATURES */
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm53xx_phy_get_stats,
+ .probe = bcm53xx_phy_probe,
+ .config_init = bcm54xx_config_init,
+ .ack_interrupt = bcm_phy_ack_intr,
+ .config_intr = bcm_phy_config_intr,
+}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
@@ -803,6 +861,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5464, 0xfffffff0 },
{ PHY_ID_BCM5481, 0xfffffff0 },
{ PHY_ID_BCM54810, 0xfffffff0 },
+ { PHY_ID_BCM54811, 0xfffffff0 },
{ PHY_ID_BCM5482, 0xfffffff0 },
{ PHY_ID_BCM50610, 0xfffffff0 },
{ PHY_ID_BCM50610M, 0xfffffff0 },
@@ -810,6 +869,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCMAC131, 0xfffffff0 },
{ PHY_ID_BCM5241, 0xfffffff0 },
{ PHY_ID_BCM5395, 0xfffffff0 },
+ { PHY_ID_BCM53125, 0xfffffff0 },
{ PHY_ID_BCM89610, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 856cdc36aacd..40514a94e6ff 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -17,8 +17,7 @@
static int cortina_read_reg(struct phy_device *phydev, u16 regnum)
{
- return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
- MII_ADDR_C45 | regnum);
+ return mdiobus_c45_read(phydev->mdio.bus, phydev->mdio.addr, 0, regnum);
}
static int cortina_read_status(struct phy_device *phydev)
@@ -82,7 +81,6 @@ static struct phy_driver cortina_driver[] = {
.features = PHY_10GBIT_FEATURES,
.config_aneg = gen10g_config_aneg,
.read_status = cortina_read_status,
- .soft_reset = genphy_no_soft_reset,
.probe = cortina_probe,
},
};
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index b55e3c0403ed..4017ae1692d8 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -365,7 +365,7 @@ static int dp83867_get_downshift(struct phy_device *phydev, u8 *data)
break;
default:
return -EINVAL;
- };
+ }
*data = enable ? count : DOWNSHIFT_DEV_DISABLE;
@@ -400,7 +400,7 @@ static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
phydev_err(phydev,
"Downshift count must be 1, 2, 4 or 8\n");
return -EINVAL;
- };
+ }
val = DP83867_DOWNSHIFT_EN;
val |= FIELD_PREP(DP83867_DOWNSHIFT_ATTEMPT_MASK, count);
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 7996a4aea8d2..cfb22a21a2e6 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -65,7 +65,9 @@
#define DP83869_RGMII_RX_CLK_DELAY_EN BIT(0)
/* STRAP_STS1 bits */
+#define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0)
#define DP83869_STRAP_STS1_RESERVED BIT(11)
+#define DP83869_STRAP_MIRROR_ENABLED BIT(12)
/* PHYCTRL bits */
#define DP83869_RX_FIFO_SHIFT 12
@@ -160,6 +162,20 @@ static int dp83869_config_port_mirroring(struct phy_device *phydev)
DP83869_CFG3_PORT_MIRROR_EN);
}
+static int dp83869_set_strapped_mode(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ int val;
+
+ val = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_STRAP_STS1);
+ if (val < 0)
+ return val;
+
+ dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK;
+
+ return 0;
+}
+
#ifdef CONFIG_OF_MDIO
static int dp83869_of_init(struct phy_device *phydev)
{
@@ -184,6 +200,10 @@ static int dp83869_of_init(struct phy_device *phydev)
if (dp83869->mode < DP83869_RGMII_COPPER_ETHERNET ||
dp83869->mode > DP83869_SGMII_COPPER_ETHERNET)
return -EINVAL;
+ } else {
+ ret = dp83869_set_strapped_mode(phydev);
+ if (ret)
+ return ret;
}
if (of_property_read_bool(of_node, "ti,max-output-impedance"))
@@ -191,10 +211,18 @@ static int dp83869_of_init(struct phy_device *phydev)
else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
dp83869->io_impedance = DP83869_IO_MUX_CFG_IO_IMPEDANCE_MIN;
- if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
+ if (of_property_read_bool(of_node, "enet-phy-lane-swap")) {
dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN;
- else
- dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS;
+ } else {
+ /* If the lane swap is not in the DT then check the straps */
+ ret = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_STRAP_STS1);
+ if (ret < 0)
+ return ret;
+ if (ret & DP83869_STRAP_MIRROR_ENABLED)
+ dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN;
+ else
+ dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS;
+ }
if (of_property_read_u32(of_node, "rx-fifo-depth",
&dp83869->rx_fifo_depth))
@@ -209,7 +237,7 @@ static int dp83869_of_init(struct phy_device *phydev)
#else
static int dp83869_of_init(struct phy_device *phydev)
{
- return 0;
+ return dp83869_set_strapped_mode(phydev);
}
#endif /* CONFIG_OF_MDIO */
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 7fc8e10c5f33..4ea226566cec 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/phy.h>
#include <linux/marvell_phy.h>
#include <linux/bitfield.h>
@@ -41,7 +42,9 @@
#define MII_MARVELL_FIBER_PAGE 0x01
#define MII_MARVELL_MSCR_PAGE 0x02
#define MII_MARVELL_LED_PAGE 0x03
+#define MII_MARVELL_VCT5_PAGE 0x05
#define MII_MARVELL_MISC_TEST_PAGE 0x06
+#define MII_MARVELL_VCT7_PAGE 0x07
#define MII_MARVELL_WOL_PAGE 0x11
#define MII_M1011_IEVENT 0x13
@@ -162,6 +165,90 @@
#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
+#define MII_VCT5_TX_RX_MDI0_COUPLING 0x10
+#define MII_VCT5_TX_RX_MDI1_COUPLING 0x11
+#define MII_VCT5_TX_RX_MDI2_COUPLING 0x12
+#define MII_VCT5_TX_RX_MDI3_COUPLING 0x13
+#define MII_VCT5_TX_RX_AMPLITUDE_MASK 0x7f00
+#define MII_VCT5_TX_RX_AMPLITUDE_SHIFT 8
+#define MII_VCT5_TX_RX_COUPLING_POSITIVE_REFLECTION BIT(15)
+
+#define MII_VCT5_CTRL 0x17
+#define MII_VCT5_CTRL_ENABLE BIT(15)
+#define MII_VCT5_CTRL_COMPLETE BIT(14)
+#define MII_VCT5_CTRL_TX_SAME_CHANNEL (0x0 << 11)
+#define MII_VCT5_CTRL_TX0_CHANNEL (0x4 << 11)
+#define MII_VCT5_CTRL_TX1_CHANNEL (0x5 << 11)
+#define MII_VCT5_CTRL_TX2_CHANNEL (0x6 << 11)
+#define MII_VCT5_CTRL_TX3_CHANNEL (0x7 << 11)
+#define MII_VCT5_CTRL_SAMPLES_2 (0x0 << 8)
+#define MII_VCT5_CTRL_SAMPLES_4 (0x1 << 8)
+#define MII_VCT5_CTRL_SAMPLES_8 (0x2 << 8)
+#define MII_VCT5_CTRL_SAMPLES_16 (0x3 << 8)
+#define MII_VCT5_CTRL_SAMPLES_32 (0x4 << 8)
+#define MII_VCT5_CTRL_SAMPLES_64 (0x5 << 8)
+#define MII_VCT5_CTRL_SAMPLES_128 (0x6 << 8)
+#define MII_VCT5_CTRL_SAMPLES_DEFAULT (0x6 << 8)
+#define MII_VCT5_CTRL_SAMPLES_256 (0x7 << 8)
+#define MII_VCT5_CTRL_SAMPLES_SHIFT 8
+#define MII_VCT5_CTRL_MODE_MAXIMUM_PEEK (0x0 << 6)
+#define MII_VCT5_CTRL_MODE_FIRST_LAST_PEEK (0x1 << 6)
+#define MII_VCT5_CTRL_MODE_OFFSET (0x2 << 6)
+#define MII_VCT5_CTRL_SAMPLE_POINT (0x3 << 6)
+#define MII_VCT5_CTRL_PEEK_HYST_DEFAULT 3
+
+#define MII_VCT5_SAMPLE_POINT_DISTANCE 0x18
+#define MII_VCT5_SAMPLE_POINT_DISTANCE_MAX 511
+#define MII_VCT5_TX_PULSE_CTRL 0x1c
+#define MII_VCT5_TX_PULSE_CTRL_DONT_WAIT_LINK_DOWN BIT(12)
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_128nS (0x0 << 10)
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_96nS (0x1 << 10)
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_64nS (0x2 << 10)
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_32nS (0x3 << 10)
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_SHIFT 10
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_1000mV (0x0 << 8)
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_750mV (0x1 << 8)
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_500mV (0x2 << 8)
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_250mV (0x3 << 8)
+#define MII_VCT5_TX_PULSE_CTRL_PULSE_AMPLITUDE_SHIFT 8
+#define MII_VCT5_TX_PULSE_CTRL_MAX_AMP BIT(7)
+#define MII_VCT5_TX_PULSE_CTRL_GT_140m_46_86mV (0x6 << 0)
+
+/* For TDR measurements less than 11 meters, a short pulse should be
+ * used.
+ */
+#define TDR_SHORT_CABLE_LENGTH 11
+
+#define MII_VCT7_PAIR_0_DISTANCE 0x10
+#define MII_VCT7_PAIR_1_DISTANCE 0x11
+#define MII_VCT7_PAIR_2_DISTANCE 0x12
+#define MII_VCT7_PAIR_3_DISTANCE 0x13
+
+#define MII_VCT7_RESULTS 0x14
+#define MII_VCT7_RESULTS_PAIR3_MASK 0xf000
+#define MII_VCT7_RESULTS_PAIR2_MASK 0x0f00
+#define MII_VCT7_RESULTS_PAIR1_MASK 0x00f0
+#define MII_VCT7_RESULTS_PAIR0_MASK 0x000f
+#define MII_VCT7_RESULTS_PAIR3_SHIFT 12
+#define MII_VCT7_RESULTS_PAIR2_SHIFT 8
+#define MII_VCT7_RESULTS_PAIR1_SHIFT 4
+#define MII_VCT7_RESULTS_PAIR0_SHIFT 0
+#define MII_VCT7_RESULTS_INVALID 0
+#define MII_VCT7_RESULTS_OK 1
+#define MII_VCT7_RESULTS_OPEN 2
+#define MII_VCT7_RESULTS_SAME_SHORT 3
+#define MII_VCT7_RESULTS_CROSS_SHORT 4
+#define MII_VCT7_RESULTS_BUSY 9
+
+#define MII_VCT7_CTRL 0x15
+#define MII_VCT7_CTRL_RUN_NOW BIT(15)
+#define MII_VCT7_CTRL_RUN_ANEG BIT(14)
+#define MII_VCT7_CTRL_DISABLE_CROSS BIT(13)
+#define MII_VCT7_CTRL_RUN_AFTER_BREAK_LINK BIT(12)
+#define MII_VCT7_CTRL_IN_PROGRESS BIT(11)
+#define MII_VCT7_CTRL_METERS BIT(10)
+#define MII_VCT7_CTRL_CENTIMETERS 0
+
#define LPA_PAUSE_FIBER 0x180
#define LPA_PAUSE_ASYM_FIBER 0x100
@@ -188,6 +275,11 @@ struct marvell_priv {
u64 stats[ARRAY_SIZE(marvell_hw_stats)];
char *hwmon_name;
struct device *hwmon_dev;
+ bool cable_test_tdr;
+ u32 first;
+ u32 last;
+ u32 step;
+ s8 pair;
};
static int marvell_read_page(struct phy_device *phydev)
@@ -1658,6 +1750,382 @@ static void marvell_get_stats(struct phy_device *phydev,
data[i] = marvell_get_stat(phydev, i);
}
+static int marvell_vct5_wait_complete(struct phy_device *phydev)
+{
+ int i;
+ int val;
+
+ for (i = 0; i < 32; i++) {
+ val = __phy_read(phydev, MII_VCT5_CTRL);
+ if (val < 0)
+ return val;
+
+ if (val & MII_VCT5_CTRL_COMPLETE)
+ return 0;
+ }
+
+ phydev_err(phydev, "Timeout while waiting for cable test to finish\n");
+ return -ETIMEDOUT;
+}
+
+static int marvell_vct5_amplitude(struct phy_device *phydev, int pair)
+{
+ int amplitude;
+ int val;
+ int reg;
+
+ reg = MII_VCT5_TX_RX_MDI0_COUPLING + pair;
+ val = __phy_read(phydev, reg);
+
+ if (val < 0)
+ return 0;
+
+ amplitude = (val & MII_VCT5_TX_RX_AMPLITUDE_MASK) >>
+ MII_VCT5_TX_RX_AMPLITUDE_SHIFT;
+
+ if (!(val & MII_VCT5_TX_RX_COUPLING_POSITIVE_REFLECTION))
+ amplitude = -amplitude;
+
+ return 1000 * amplitude / 128;
+}
+
+static u32 marvell_vct5_distance2cm(int distance)
+{
+ return distance * 805 / 10;
+}
+
+static u32 marvell_vct5_cm2distance(int cm)
+{
+ return cm * 10 / 805;
+}
+
+static int marvell_vct5_amplitude_distance(struct phy_device *phydev,
+ int distance, int pair)
+{
+ u16 reg;
+ int err;
+ int mV;
+ int i;
+
+ err = __phy_write(phydev, MII_VCT5_SAMPLE_POINT_DISTANCE,
+ distance);
+ if (err)
+ return err;
+
+ reg = MII_VCT5_CTRL_ENABLE |
+ MII_VCT5_CTRL_TX_SAME_CHANNEL |
+ MII_VCT5_CTRL_SAMPLES_DEFAULT |
+ MII_VCT5_CTRL_SAMPLE_POINT |
+ MII_VCT5_CTRL_PEEK_HYST_DEFAULT;
+ err = __phy_write(phydev, MII_VCT5_CTRL, reg);
+ if (err)
+ return err;
+
+ err = marvell_vct5_wait_complete(phydev);
+ if (err)
+ return err;
+
+ for (i = 0; i < 4; i++) {
+ if (pair != PHY_PAIR_ALL && i != pair)
+ continue;
+
+ mV = marvell_vct5_amplitude(phydev, i);
+ ethnl_cable_test_amplitude(phydev, i, mV);
+ }
+
+ return 0;
+}
+
+static int marvell_vct5_amplitude_graph(struct phy_device *phydev)
+{
+ struct marvell_priv *priv = phydev->priv;
+ int distance;
+ u16 width;
+ int page;
+ int err;
+ u16 reg;
+
+ if (priv->first <= TDR_SHORT_CABLE_LENGTH)
+ width = MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_32nS;
+ else
+ width = MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_128nS;
+
+ reg = MII_VCT5_TX_PULSE_CTRL_GT_140m_46_86mV |
+ MII_VCT5_TX_PULSE_CTRL_DONT_WAIT_LINK_DOWN |
+ MII_VCT5_TX_PULSE_CTRL_MAX_AMP | width;
+
+ err = phy_write_paged(phydev, MII_MARVELL_VCT5_PAGE,
+ MII_VCT5_TX_PULSE_CTRL, reg);
+ if (err)
+ return err;
+
+ /* Reading the TDR data is very MDIO heavy. We need to optimize
+ * access to keep the time to a minimum. So lock the bus once,
+ * and don't release it until complete. We can then avoid having
+ * to change the page for every access, greatly speeding things
+ * up.
+ */
+ page = phy_select_page(phydev, MII_MARVELL_VCT5_PAGE);
+ if (page < 0)
+ goto restore_page;
+
+ for (distance = priv->first;
+ distance <= priv->last;
+ distance += priv->step) {
+ err = marvell_vct5_amplitude_distance(phydev, distance,
+ priv->pair);
+ if (err)
+ goto restore_page;
+
+ if (distance > TDR_SHORT_CABLE_LENGTH &&
+ width == MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_32nS) {
+ width = MII_VCT5_TX_PULSE_CTRL_PULSE_WIDTH_128nS;
+ reg = MII_VCT5_TX_PULSE_CTRL_GT_140m_46_86mV |
+ MII_VCT5_TX_PULSE_CTRL_DONT_WAIT_LINK_DOWN |
+ MII_VCT5_TX_PULSE_CTRL_MAX_AMP | width;
+ err = __phy_write(phydev, MII_VCT5_TX_PULSE_CTRL, reg);
+ if (err)
+ goto restore_page;
+ }
+ }
+
+restore_page:
+ return phy_restore_page(phydev, page, err);
+}
+
+static int marvell_cable_test_start_common(struct phy_device *phydev)
+{
+ int bmcr, bmsr, ret;
+
+ /* If auto-negotiation is enabled, but not complete, the cable
+ * test never completes. So disable auto-neg.
+ */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ bmsr = phy_read(phydev, MII_BMSR);
+
+ if (bmsr < 0)
+ return bmsr;
+
+ if (bmcr & BMCR_ANENABLE) {
+ ret = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (ret < 0)
+ return ret;
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* If the link is up, allow it some time to go down */
+ if (bmsr & BMSR_LSTATUS)
+ msleep(1500);
+
+ return 0;
+}
+
+static int marvell_vct7_cable_test_start(struct phy_device *phydev)
+{
+ struct marvell_priv *priv = phydev->priv;
+ int ret;
+
+ ret = marvell_cable_test_start_common(phydev);
+ if (ret)
+ return ret;
+
+ priv->cable_test_tdr = false;
+
+ /* Reset the VCT5 API control to defaults, otherwise
+ * VCT7 does not work correctly.
+ */
+ ret = phy_write_paged(phydev, MII_MARVELL_VCT5_PAGE,
+ MII_VCT5_CTRL,
+ MII_VCT5_CTRL_TX_SAME_CHANNEL |
+ MII_VCT5_CTRL_SAMPLES_DEFAULT |
+ MII_VCT5_CTRL_MODE_MAXIMUM_PEEK |
+ MII_VCT5_CTRL_PEEK_HYST_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = phy_write_paged(phydev, MII_MARVELL_VCT5_PAGE,
+ MII_VCT5_SAMPLE_POINT_DISTANCE, 0);
+ if (ret)
+ return ret;
+
+ return phy_write_paged(phydev, MII_MARVELL_VCT7_PAGE,
+ MII_VCT7_CTRL,
+ MII_VCT7_CTRL_RUN_NOW |
+ MII_VCT7_CTRL_CENTIMETERS);
+}
+
+static int marvell_vct5_cable_test_tdr_start(struct phy_device *phydev,
+ const struct phy_tdr_config *cfg)
+{
+ struct marvell_priv *priv = phydev->priv;
+ int ret;
+
+ priv->cable_test_tdr = true;
+ priv->first = marvell_vct5_cm2distance(cfg->first);
+ priv->last = marvell_vct5_cm2distance(cfg->last);
+ priv->step = marvell_vct5_cm2distance(cfg->step);
+ priv->pair = cfg->pair;
+
+ if (priv->first > MII_VCT5_SAMPLE_POINT_DISTANCE_MAX)
+ return -EINVAL;
+
+ if (priv->last > MII_VCT5_SAMPLE_POINT_DISTANCE_MAX)
+ return -EINVAL;
+
+ /* Disable VCT7 */
+ ret = phy_write_paged(phydev, MII_MARVELL_VCT7_PAGE,
+ MII_VCT7_CTRL, 0);
+ if (ret)
+ return ret;
+
+ ret = marvell_cable_test_start_common(phydev);
+ if (ret)
+ return ret;
+
+ ret = ethnl_cable_test_pulse(phydev, 1000);
+ if (ret)
+ return ret;
+
+ return ethnl_cable_test_step(phydev,
+ marvell_vct5_distance2cm(priv->first),
+ marvell_vct5_distance2cm(priv->last),
+ marvell_vct5_distance2cm(priv->step));
+}
+
+static int marvell_vct7_distance_to_length(int distance, bool meter)
+{
+ if (meter)
+ distance *= 100;
+
+ return distance;
+}
+
+static bool marvell_vct7_distance_valid(int result)
+{
+ switch (result) {
+ case MII_VCT7_RESULTS_OPEN:
+ case MII_VCT7_RESULTS_SAME_SHORT:
+ case MII_VCT7_RESULTS_CROSS_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int marvell_vct7_report_length(struct phy_device *phydev,
+ int pair, bool meter)
+{
+ int length;
+ int ret;
+
+ ret = phy_read_paged(phydev, MII_MARVELL_VCT7_PAGE,
+ MII_VCT7_PAIR_0_DISTANCE + pair);
+ if (ret < 0)
+ return ret;
+
+ length = marvell_vct7_distance_to_length(ret, meter);
+
+ ethnl_cable_test_fault_length(phydev, pair, length);
+
+ return 0;
+}
+
+static int marvell_vct7_cable_test_report_trans(int result)
+{
+ switch (result) {
+ case MII_VCT7_RESULTS_OK:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case MII_VCT7_RESULTS_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case MII_VCT7_RESULTS_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case MII_VCT7_RESULTS_CROSS_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int marvell_vct7_cable_test_report(struct phy_device *phydev)
+{
+ int pair0, pair1, pair2, pair3;
+ bool meter;
+ int ret;
+
+ ret = phy_read_paged(phydev, MII_MARVELL_VCT7_PAGE,
+ MII_VCT7_RESULTS);
+ if (ret < 0)
+ return ret;
+
+ pair3 = (ret & MII_VCT7_RESULTS_PAIR3_MASK) >>
+ MII_VCT7_RESULTS_PAIR3_SHIFT;
+ pair2 = (ret & MII_VCT7_RESULTS_PAIR2_MASK) >>
+ MII_VCT7_RESULTS_PAIR2_SHIFT;
+ pair1 = (ret & MII_VCT7_RESULTS_PAIR1_MASK) >>
+ MII_VCT7_RESULTS_PAIR1_SHIFT;
+ pair0 = (ret & MII_VCT7_RESULTS_PAIR0_MASK) >>
+ MII_VCT7_RESULTS_PAIR0_SHIFT;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ marvell_vct7_cable_test_report_trans(pair0));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
+ marvell_vct7_cable_test_report_trans(pair1));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
+ marvell_vct7_cable_test_report_trans(pair2));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
+ marvell_vct7_cable_test_report_trans(pair3));
+
+ ret = phy_read_paged(phydev, MII_MARVELL_VCT7_PAGE, MII_VCT7_CTRL);
+ if (ret < 0)
+ return ret;
+
+ meter = ret & MII_VCT7_CTRL_METERS;
+
+ if (marvell_vct7_distance_valid(pair0))
+ marvell_vct7_report_length(phydev, 0, meter);
+ if (marvell_vct7_distance_valid(pair1))
+ marvell_vct7_report_length(phydev, 1, meter);
+ if (marvell_vct7_distance_valid(pair2))
+ marvell_vct7_report_length(phydev, 2, meter);
+ if (marvell_vct7_distance_valid(pair3))
+ marvell_vct7_report_length(phydev, 3, meter);
+
+ return 0;
+}
+
+static int marvell_vct7_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ struct marvell_priv *priv = phydev->priv;
+ int ret;
+
+ if (priv->cable_test_tdr) {
+ ret = marvell_vct5_amplitude_graph(phydev);
+ *finished = true;
+ return ret;
+ }
+
+ *finished = false;
+
+ ret = phy_read_paged(phydev, MII_MARVELL_VCT7_PAGE,
+ MII_VCT7_CTRL);
+
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MII_VCT7_CTRL_IN_PROGRESS)) {
+ *finished = true;
+
+ return marvell_vct7_cable_test_report(phydev);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_HWMON
static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
{
@@ -2353,6 +2821,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1510",
.features = PHY_GBIT_FIBRE_FEATURES,
+ .flags = PHY_POLL_CABLE_TEST,
.probe = &m88e1510_probe,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2372,12 +2841,16 @@ static struct phy_driver marvell_drivers[] = {
.set_loopback = genphy_loopback,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E1540,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1540",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.probe = m88e1510_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2394,6 +2867,9 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
@@ -2401,6 +2877,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2416,6 +2893,9 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E3016,
@@ -2442,6 +2922,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e6390_config_aneg,
@@ -2458,6 +2939,9 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1f1a01c98e44..d4c2e62b2439 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -753,7 +753,6 @@ static struct phy_driver mv3310_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x3310",
.get_features = mv3310_get_features,
- .soft_reset = genphy_no_soft_reset,
.config_init = mv3310_config_init,
.probe = mv3310_probe,
.suspend = mv3310_suspend,
@@ -771,7 +770,6 @@ static struct phy_driver mv3310_drivers[] = {
.probe = mv3310_probe,
.suspend = mv3310_suspend,
.resume = mv3310_resume,
- .soft_reset = genphy_no_soft_reset,
.config_init = mv3310_config_init,
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index f1ded03f0229..77fc970cdfde 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -159,7 +159,7 @@ static int iproc_mdio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- dev_info(&pdev->dev, "Broadcom iProc MDIO bus at 0x%p\n", priv->base);
+ dev_info(&pdev->dev, "Broadcom iProc MDIO bus registered\n");
return 0;
@@ -179,7 +179,7 @@ static int iproc_mdio_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-int iproc_mdio_resume(struct device *dev)
+static int iproc_mdio_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
diff --git a/drivers/net/phy/mdio-ipq4019.c b/drivers/net/phy/mdio-ipq4019.c
new file mode 100644
index 000000000000..1ce81ff2f41d
--- /dev/null
+++ b/drivers/net/phy/mdio-ipq4019.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2020 Sartura Ltd. */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#define MDIO_ADDR_REG 0x44
+#define MDIO_DATA_WRITE_REG 0x48
+#define MDIO_DATA_READ_REG 0x4c
+#define MDIO_CMD_REG 0x50
+#define MDIO_CMD_ACCESS_BUSY BIT(16)
+#define MDIO_CMD_ACCESS_START BIT(8)
+#define MDIO_CMD_ACCESS_CODE_READ 0
+#define MDIO_CMD_ACCESS_CODE_WRITE 1
+
+#define ipq4019_MDIO_TIMEOUT 10000
+#define ipq4019_MDIO_SLEEP 10
+
+struct ipq4019_mdio_data {
+ void __iomem *membase;
+};
+
+static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int busy;
+
+ return readl_poll_timeout(priv->membase + MDIO_CMD_REG, busy,
+ (busy & MDIO_CMD_ACCESS_BUSY) == 0,
+ ipq4019_MDIO_SLEEP, ipq4019_MDIO_TIMEOUT);
+}
+
+static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int cmd;
+
+ /* Reject clause 45 */
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+
+ /* issue read command */
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ /* Wait read complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ /* Read and return data */
+ return readl(priv->membase + MDIO_DATA_READ_REG);
+}
+
+static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int cmd;
+
+ /* Reject clause 45 */
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+
+ /* issue write data */
+ writel(value, priv->membase + MDIO_DATA_WRITE_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
+ /* issue write command */
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ /* Wait write complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ipq4019_mdio_probe(struct platform_device *pdev)
+{
+ struct ipq4019_mdio_data *priv;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
+ if (!bus)
+ return -ENOMEM;
+
+ priv = bus->priv;
+
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->membase))
+ return PTR_ERR(priv->membase);
+
+ bus->name = "ipq4019_mdio";
+ bus->read = ipq4019_mdio_read;
+ bus->write = ipq4019_mdio_write;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
+
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+}
+
+static int ipq4019_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+
+ return 0;
+}
+
+static const struct of_device_id ipq4019_mdio_dt_ids[] = {
+ { .compatible = "qcom,ipq4019-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids);
+
+static struct platform_driver ipq4019_mdio_driver = {
+ .probe = ipq4019_mdio_probe,
+ .remove = ipq4019_mdio_remove,
+ .driver = {
+ .name = "ipq4019-mdio",
+ .of_match_table = ipq4019_mdio_dt_ids,
+ },
+};
+
+module_platform_driver(ipq4019_mdio_driver);
+
+MODULE_DESCRIPTION("ipq4019 MDIO interface driver");
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
index 2d16fc4173c1..b72c6d185175 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/phy/mdio-moxart.c
@@ -12,7 +12,6 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
#define REG_PHY_CTRL 0
#define REG_PHY_WRITE_DATA 4
diff --git a/drivers/net/phy/mdio-mscc-miim.c b/drivers/net/phy/mdio-mscc-miim.c
index badbc99bedd3..11f583fd4611 100644
--- a/drivers/net/phy/mdio-mscc-miim.c
+++ b/drivers/net/phy/mdio-mscc-miim.c
@@ -16,6 +16,7 @@
#include <linux/of_mdio.h>
#define MSCC_MIIM_REG_STATUS 0x0
+#define MSCC_MIIM_STATUS_STAT_PENDING BIT(2)
#define MSCC_MIIM_STATUS_STAT_BUSY BIT(3)
#define MSCC_MIIM_REG_CMD 0x8
#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
@@ -38,17 +39,35 @@ struct mscc_miim_dev {
void __iomem *phy_regs;
};
+/* When high resolution timers aren't built-in: we can't use usleep_range() as
+ * we would sleep way too long. Use udelay() instead.
+ */
+#define mscc_readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+({ \
+ if (!IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) \
+ readl_poll_timeout_atomic(addr, val, cond, delay_us, \
+ timeout_us); \
+ readl_poll_timeout(addr, val, cond, delay_us, timeout_us); \
+})
+
static int mscc_miim_wait_ready(struct mii_bus *bus)
{
struct mscc_miim_dev *miim = bus->priv;
u32 val;
- readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val,
- !(val & MSCC_MIIM_STATUS_STAT_BUSY), 100, 250000);
- if (val & MSCC_MIIM_STATUS_STAT_BUSY)
- return -ETIMEDOUT;
+ return mscc_readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val,
+ !(val & MSCC_MIIM_STATUS_STAT_BUSY), 50,
+ 10000);
+}
- return 0;
+static int mscc_miim_wait_pending(struct mii_bus *bus)
+{
+ struct mscc_miim_dev *miim = bus->priv;
+ u32 val;
+
+ return mscc_readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val,
+ !(val & MSCC_MIIM_STATUS_STAT_PENDING),
+ 50, 10000);
}
static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -57,7 +76,7 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
u32 val;
int ret;
- ret = mscc_miim_wait_ready(bus);
+ ret = mscc_miim_wait_pending(bus);
if (ret)
goto out;
@@ -86,7 +105,7 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
struct mscc_miim_dev *miim = bus->priv;
int ret;
- ret = mscc_miim_wait_ready(bus);
+ ret = mscc_miim_wait_pending(bus);
if (ret < 0)
goto out;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 7a4eb3f2cb74..255fdfcc13a6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -42,14 +42,11 @@
static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
{
- int error;
-
/* Deassert the optional reset signal */
mdiodev->reset_gpio = gpiod_get_optional(&mdiodev->dev,
"reset", GPIOD_OUT_LOW);
- error = PTR_ERR_OR_ZERO(mdiodev->reset_gpio);
- if (error)
- return error;
+ if (IS_ERR(mdiodev->reset_gpio))
+ return PTR_ERR(mdiodev->reset_gpio);
if (mdiodev->reset_gpio)
gpiod_set_consumer_name(mdiodev->reset_gpio, "PHY reset");
@@ -170,7 +167,12 @@ EXPORT_SYMBOL(mdiobus_alloc_size);
static void _devm_mdiobus_free(struct device *dev, void *res)
{
- mdiobus_free(*(struct mii_bus **)res);
+ struct mii_bus *bus = *(struct mii_bus **)res;
+
+ if (bus->is_managed_registered && bus->state == MDIOBUS_REGISTERED)
+ mdiobus_unregister(bus);
+
+ mdiobus_free(bus);
}
static int devm_mdiobus_match(struct device *dev, void *res, void *data)
@@ -210,6 +212,7 @@ struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
if (bus) {
*ptr = bus;
devres_add(dev, ptr);
+ bus->is_managed = 1;
} else {
devres_free(ptr);
}
@@ -611,6 +614,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
}
mutex_init(&bus->mdio_lock);
+ mutex_init(&bus->shared_lock);
/* de-assert bus level PHY GPIO reset */
gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_LOW);
@@ -627,8 +631,11 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
gpiod_set_value_cansleep(gpiod, 0);
}
- if (bus->reset)
- bus->reset(bus);
+ if (bus->reset) {
+ err = bus->reset(bus);
+ if (err)
+ goto error_reset_gpiod;
+ }
for (i = 0; i < PHY_MAX_ADDR; i++) {
if ((bus->phy_mask & (1 << i)) == 0) {
@@ -657,7 +664,7 @@ error:
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
}
-
+error_reset_gpiod:
/* Put PHYs in RESET to save power */
if (bus->reset_gpiod)
gpiod_set_value_cansleep(bus->reset_gpiod, 1);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3a4d83fa52dc..3fe552675dd2 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,6 +19,7 @@
* ksz9477
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
@@ -490,9 +491,50 @@ static int ksz9021_config_init(struct phy_device *phydev)
/* MMD Address 0x2 */
#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
+#define MII_KSZ9031RN_RX_CTL_M GENMASK(7, 4)
+#define MII_KSZ9031RN_TX_CTL_M GENMASK(3, 0)
+
#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
+#define MII_KSZ9031RN_RXD3 GENMASK(15, 12)
+#define MII_KSZ9031RN_RXD2 GENMASK(11, 8)
+#define MII_KSZ9031RN_RXD1 GENMASK(7, 4)
+#define MII_KSZ9031RN_RXD0 GENMASK(3, 0)
+
#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
+#define MII_KSZ9031RN_TXD3 GENMASK(15, 12)
+#define MII_KSZ9031RN_TXD2 GENMASK(11, 8)
+#define MII_KSZ9031RN_TXD1 GENMASK(7, 4)
+#define MII_KSZ9031RN_TXD0 GENMASK(3, 0)
+
#define MII_KSZ9031RN_CLK_PAD_SKEW 8
+#define MII_KSZ9031RN_GTX_CLK GENMASK(9, 5)
+#define MII_KSZ9031RN_RX_CLK GENMASK(4, 0)
+
+/* KSZ9031 has internal RGMII_IDRX = 1.2ns and RGMII_IDTX = 0ns. To
+ * provide different RGMII options we need to configure delay offset
+ * for each pad relative to build in delay.
+ */
+/* keep rx as "No delay adjustment" and set rx_clk to +0.60ns to get delays of
+ * 1.80ns
+ */
+#define RX_ID 0x7
+#define RX_CLK_ID 0x19
+
+/* set rx to +0.30ns and rx_clk to -0.90ns to compensate the
+ * internal 1.2ns delay.
+ */
+#define RX_ND 0xc
+#define RX_CLK_ND 0x0
+
+/* set tx to -0.42ns and tx_clk to +0.96ns to get 1.38ns delay */
+#define TX_ID 0x0
+#define TX_CLK_ID 0x1f
+
+/* set tx and tx_clk to "No delay adjustment" to keep 0ns
+ * dealy
+ */
+#define TX_ND 0x7
+#define TX_CLK_ND 0xf
/* MMD Address 0x1C */
#define MII_KSZ9031RN_EDPD 0x23
@@ -501,7 +543,8 @@ static int ksz9021_config_init(struct phy_device *phydev)
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg, size_t field_sz,
- const char *field[], u8 numfields)
+ const char *field[], u8 numfields,
+ bool *update)
{
int val[4] = {-1, -2, -3, -4};
int matches = 0;
@@ -517,6 +560,8 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
if (!matches)
return 0;
+ *update |= true;
+
if (matches < numfields)
newval = phy_read_mmd(phydev, 2, reg);
else
@@ -565,6 +610,67 @@ static int ksz9031_enable_edpd(struct phy_device *phydev)
reg | MII_KSZ9031RN_EDPD_ENABLE);
}
+static int ksz9031_config_rgmii_delay(struct phy_device *phydev)
+{
+ u16 rx, tx, rx_clk, tx_clk;
+ int ret;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ tx = TX_ND;
+ tx_clk = TX_CLK_ND;
+ rx = RX_ND;
+ rx_clk = RX_CLK_ND;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ tx = TX_ID;
+ tx_clk = TX_CLK_ID;
+ rx = RX_ID;
+ rx_clk = RX_CLK_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ tx = TX_ND;
+ tx_clk = TX_CLK_ND;
+ rx = RX_ID;
+ rx_clk = RX_CLK_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ tx = TX_ID;
+ tx_clk = TX_CLK_ID;
+ rx = RX_ND;
+ rx_clk = RX_CLK_ND;
+ break;
+ default:
+ return 0;
+ }
+
+ ret = phy_write_mmd(phydev, 2, MII_KSZ9031RN_CONTROL_PAD_SKEW,
+ FIELD_PREP(MII_KSZ9031RN_RX_CTL_M, rx) |
+ FIELD_PREP(MII_KSZ9031RN_TX_CTL_M, tx));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, 2, MII_KSZ9031RN_RX_DATA_PAD_SKEW,
+ FIELD_PREP(MII_KSZ9031RN_RXD3, rx) |
+ FIELD_PREP(MII_KSZ9031RN_RXD2, rx) |
+ FIELD_PREP(MII_KSZ9031RN_RXD1, rx) |
+ FIELD_PREP(MII_KSZ9031RN_RXD0, rx));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, 2, MII_KSZ9031RN_TX_DATA_PAD_SKEW,
+ FIELD_PREP(MII_KSZ9031RN_TXD3, tx) |
+ FIELD_PREP(MII_KSZ9031RN_TXD2, tx) |
+ FIELD_PREP(MII_KSZ9031RN_TXD1, tx) |
+ FIELD_PREP(MII_KSZ9031RN_TXD0, tx));
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, 2, MII_KSZ9031RN_CLK_PAD_SKEW,
+ FIELD_PREP(MII_KSZ9031RN_GTX_CLK, tx_clk) |
+ FIELD_PREP(MII_KSZ9031RN_RX_CLK, rx_clk));
+}
+
static int ksz9031_config_init(struct phy_device *phydev)
{
const struct device *dev = &phydev->mdio.dev;
@@ -597,21 +703,33 @@ static int ksz9031_config_init(struct phy_device *phydev)
} while (!of_node && dev_walker);
if (of_node) {
+ bool update = false;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ result = ksz9031_config_rgmii_delay(phydev);
+ if (result < 0)
+ return result;
+ }
+
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_CLK_PAD_SKEW, 5,
- clk_skews, 2);
+ clk_skews, 2, &update);
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
- control_skews, 2);
+ control_skews, 2, &update);
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
- rx_data_skews, 4);
+ rx_data_skews, 4, &update);
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
- tx_data_skews, 4);
+ tx_data_skews, 4, &update);
+
+ if (update && phydev->interface != PHY_INTERFACE_MODE_RGMII)
+ phydev_warn(phydev,
+ "*-skew-ps values should be used only with phy-mode = \"rgmii\"\n");
/* Silicon Errata Sheet (DS80000691D or DS80000692D):
* When the device links in the 1000BASE-T slave mode only,
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 030bf8b600df..f828c917b9f7 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -353,7 +353,8 @@ struct vsc8531_private {
const struct vsc85xx_hw_stat *hw_stats;
u64 *stats;
int nstats;
- bool pkg_init;
+ /* PHY address within the package. */
+ u8 addr;
/* For multiple port PHYs; the MDIO address of the base PHY in the
* package.
*/
diff --git a/drivers/net/phy/mscc/mscc_mac.h b/drivers/net/phy/mscc/mscc_mac.h
index fcb5ba5e5d03..59b6837c60b3 100644
--- a/drivers/net/phy/mscc/mscc_mac.h
+++ b/drivers/net/phy/mscc/mscc_mac.h
@@ -152,8 +152,8 @@
#define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE BIT(0)
#define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN BIT(4)
-#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2
-#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x)
-#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0)
+#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2
+#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x)
+#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0)
#endif /* _MSCC_PHY_LINE_MAC_H_ */
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index e99e2cd72a0c..b4d3dc4068e2 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -316,6 +316,8 @@ static void vsc8584_macsec_mac_init(struct phy_device *phydev,
/* Must be called with mdio_lock taken */
static int __vsc8584_macsec_init(struct phy_device *phydev)
{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank proc_bank;
u32 val;
vsc8584_macsec_block_init(phydev, MACSEC_INGR);
@@ -351,12 +353,14 @@ static int __vsc8584_macsec_init(struct phy_device *phydev)
val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
- val = vsc8584_macsec_phy_read(phydev, IP_1588,
- MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL);
- val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
- val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
- vsc8584_macsec_phy_write(phydev, IP_1588,
- MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
+ proc_bank = (priv->addr < 2) ? PROC_0 : PROC_2;
+
+ val = vsc8584_macsec_phy_read(phydev, proc_bank,
+ MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL);
+ val &= ~MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
+ val |= MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
+ vsc8584_macsec_phy_write(phydev, proc_bank,
+ MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
return 0;
}
diff --git a/drivers/net/phy/mscc/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h
index d0783944d106..d751f2946b79 100644
--- a/drivers/net/phy/mscc/mscc_macsec.h
+++ b/drivers/net/phy/mscc/mscc_macsec.h
@@ -64,7 +64,8 @@ enum macsec_bank {
FC_BUFFER = 0x04,
HOST_MAC = 0x05,
LINE_MAC = 0x06,
- IP_1588 = 0x0e,
+ PROC_0 = 0x0e,
+ PROC_2 = 0x0f,
MACSEC_INGR = 0x38,
MACSEC_EGR = 0x3c,
};
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index acddef79f4e8..7ed0285206d0 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -691,27 +691,23 @@ out_unlock:
/* phydev->bus->mdio_lock should be locked when using this function */
static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
{
- struct vsc8531_private *priv = phydev->priv;
-
if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
dump_stack();
}
- return __mdiobus_write(phydev->mdio.bus, priv->base_addr, regnum, val);
+ return __phy_package_write(phydev, regnum, val);
}
/* phydev->bus->mdio_lock should be locked when using this function */
static int phy_base_read(struct phy_device *phydev, u32 regnum)
{
- struct vsc8531_private *priv = phydev->priv;
-
if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
dump_stack();
}
- return __mdiobus_read(phydev->mdio.bus, priv->base_addr, regnum);
+ return __phy_package_read(phydev, regnum);
}
/* bus->mdio_lock should be locked when using this function */
@@ -1287,66 +1283,40 @@ out:
return ret;
}
-/* Check if one PHY has already done the init of the parts common to all PHYs
- * in the Quad PHY package.
- */
-static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed)
+static void vsc8584_get_base_addr(struct phy_device *phydev)
{
- struct mdio_device **map = phydev->mdio.bus->mdio_map;
- struct vsc8531_private *vsc8531;
- struct phy_device *phy;
- int i, addr;
-
- /* VSC8584 is a Quad PHY */
- for (i = 0; i < 4; i++) {
- vsc8531 = phydev->priv;
-
- if (reversed)
- addr = vsc8531->base_addr - i;
- else
- addr = vsc8531->base_addr + i;
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u16 val, addr;
- if (!map[addr])
- continue;
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
- phy = container_of(map[addr], struct phy_device, mdio);
+ addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4);
+ addr >>= PHY_CNTL_4_ADDR_POS;
- if ((phy->phy_id & phydev->drv->phy_id_mask) !=
- (phydev->drv->phy_id & phydev->drv->phy_id_mask))
- continue;
+ val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
- vsc8531 = phy->priv;
+ __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
- if (vsc8531 && vsc8531->pkg_init)
- return true;
- }
+ if (val & PHY_ADDR_REVERSED)
+ vsc8531->base_addr = phydev->mdio.addr + addr;
+ else
+ vsc8531->base_addr = phydev->mdio.addr - addr;
- return false;
+ vsc8531->addr = addr;
}
static int vsc8584_config_init(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
- u16 addr, val;
int ret, i;
+ u16 val;
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->mdio.bus->mdio_lock);
- __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
- MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
- addr = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
- MSCC_PHY_EXT_PHY_CNTL_4);
- addr >>= PHY_CNTL_4_ADDR_POS;
-
- val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
- MSCC_PHY_ACTIPHY_CNTL);
- if (val & PHY_ADDR_REVERSED)
- vsc8531->base_addr = phydev->mdio.addr + addr;
- else
- vsc8531->base_addr = phydev->mdio.addr - addr;
-
/* Some parts of the init sequence are identical for every PHY in the
* package. Some parts are modifying the GPIO register bank which is a
* set of registers that are affecting all PHYs, a few resetting the
@@ -1360,7 +1330,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
* do the correct init sequence for all PHYs that are package-critical
* in this pre-init function.
*/
- if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) {
+ if (phy_package_init_once(phydev)) {
/* The following switch statement assumes that the lowest
* nibble of the phy_id_mask is always 0. This works because
* the lowest nibble of the PHY_ID's below are also 0.
@@ -1389,8 +1359,6 @@ static int vsc8584_config_init(struct phy_device *phydev)
goto err;
}
- vsc8531->pkg_init = true;
-
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
@@ -1428,7 +1396,8 @@ static int vsc8584_config_init(struct phy_device *phydev)
/* Disable SerDes for 100Base-FX */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
- PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_FIBER_PORT(vsc8531->base_addr) |
+ PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
if (ret)
@@ -1436,7 +1405,8 @@ static int vsc8584_config_init(struct phy_device *phydev)
/* Disable SerDes for 1000Base-X */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
- PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_FIBER_PORT(vsc8531->base_addr) |
+ PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
if (ret)
@@ -1751,26 +1721,14 @@ static int vsc8514_config_init(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
unsigned long deadline;
- u16 val, addr;
int ret, i;
+ u16 val;
u32 reg;
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->mdio.bus->mdio_lock);
- __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
-
- addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4);
- addr >>= PHY_CNTL_4_ADDR_POS;
-
- val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
-
- if (val & PHY_ADDR_REVERSED)
- vsc8531->base_addr = phydev->mdio.addr + addr;
- else
- vsc8531->base_addr = phydev->mdio.addr - addr;
-
/* Some parts of the init sequence are identical for every PHY in the
* package. Some parts are modifying the GPIO register bank which is a
* set of registers that are affecting all PHYs, a few resetting the
@@ -1782,11 +1740,9 @@ static int vsc8514_config_init(struct phy_device *phydev)
* do the correct init sequence for all PHYs that are package-critical
* in this pre-init function.
*/
- if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0))
+ if (phy_package_init_once(phydev))
vsc8514_config_pre_init(phydev);
- vsc8531->pkg_init = true;
-
phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
MSCC_PHY_PAGE_EXTENDED_GPIO);
@@ -1992,6 +1948,10 @@ static int vsc8514_probe(struct phy_device *phydev)
phydev->priv = vsc8531;
+ vsc8584_get_base_addr(phydev);
+ devm_phy_package_join(&phydev->mdio.dev, phydev,
+ vsc8531->base_addr, 0);
+
vsc8531->nleds = 4;
vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
vsc8531->hw_stats = vsc85xx_hw_stats;
@@ -2017,6 +1977,10 @@ static int vsc8574_probe(struct phy_device *phydev)
phydev->priv = vsc8531;
+ vsc8584_get_base_addr(phydev);
+ devm_phy_package_join(&phydev->mdio.dev, phydev,
+ vsc8531->base_addr, 0);
+
vsc8531->nleds = 4;
vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
vsc8531->hw_stats = vsc8584_hw_stats;
@@ -2047,6 +2011,10 @@ static int vsc8584_probe(struct phy_device *phydev)
phydev->priv = vsc8531;
+ vsc8584_get_base_addr(phydev);
+ devm_phy_package_join(&phydev->mdio.dev, phydev,
+ vsc8531->base_addr, 0);
+
vsc8531->nleds = 4;
vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
vsc8531->hw_stats = vsc8584_hw_stats;
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 47caae770ffc..a72fa0d2e7c7 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -5,16 +5,21 @@
*/
#include <linux/delay.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
+#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/hwmon.h>
#include <linux/bitfield.h>
+#include <linux/of_mdio.h>
+#include <linux/of_irq.h>
#define PHY_ID_MASK 0xfffffff0
#define PHY_ID_TJA1100 0x0180dc40
#define PHY_ID_TJA1101 0x0180dd00
+#define PHY_ID_TJA1102 0x0180dc80
#define MII_ECTRL 17
#define MII_ECTRL_LINK_CONTROL BIT(15)
@@ -22,10 +27,12 @@
#define MII_ECTRL_POWER_MODE_NO_CHANGE (0x0 << 11)
#define MII_ECTRL_POWER_MODE_NORMAL (0x3 << 11)
#define MII_ECTRL_POWER_MODE_STANDBY (0xc << 11)
+#define MII_ECTRL_CABLE_TEST BIT(5)
#define MII_ECTRL_CONFIG_EN BIT(2)
#define MII_ECTRL_WAKE_REQUEST BIT(0)
#define MII_CFG1 18
+#define MII_CFG1_MASTER_SLAVE BIT(15)
#define MII_CFG1_AUTO_OP BIT(14)
#define MII_CFG1_SLEEP_CONFIRM BIT(6)
#define MII_CFG1_LED_MODE_MASK GENMASK(5, 4)
@@ -40,18 +47,31 @@
#define MII_INTSRC_TEMP_ERR BIT(1)
#define MII_INTSRC_UV_ERR BIT(3)
+#define MII_INTEN 22
+#define MII_INTEN_LINK_FAIL BIT(10)
+#define MII_INTEN_LINK_UP BIT(9)
+
#define MII_COMMSTAT 23
#define MII_COMMSTAT_LINK_UP BIT(15)
+#define MII_COMMSTAT_SQI_STATE GENMASK(7, 5)
+#define MII_COMMSTAT_SQI_MAX 7
#define MII_GENSTAT 24
#define MII_GENSTAT_PLL_LOCKED BIT(14)
+#define MII_EXTSTAT 25
+#define MII_EXTSTAT_SHORT_DETECT BIT(8)
+#define MII_EXTSTAT_OPEN_DETECT BIT(7)
+#define MII_EXTSTAT_POLARITY_DETECT BIT(6)
+
#define MII_COMMCFG 27
#define MII_COMMCFG_AUTO_OP BIT(15)
struct tja11xx_priv {
char *hwmon_name;
struct device *hwmon_dev;
+ struct phy_device *phydev;
+ struct work_struct phy_register_work;
};
struct tja11xx_phy_stats {
@@ -100,6 +120,11 @@ static int tja11xx_enable_link_control(struct phy_device *phydev)
return phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_LINK_CONTROL);
}
+static int tja11xx_disable_link_control(struct phy_device *phydev)
+{
+ return phy_clear_bits(phydev, MII_ECTRL, MII_ECTRL_LINK_CONTROL);
+}
+
static int tja11xx_wakeup(struct phy_device *phydev)
{
int ret;
@@ -157,6 +182,70 @@ static int tja11xx_soft_reset(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
+static int tja11xx_config_aneg_cable_test(struct phy_device *phydev)
+{
+ bool finished = false;
+ int ret;
+
+ if (phydev->link)
+ return 0;
+
+ if (!phydev->drv->cable_test_start ||
+ !phydev->drv->cable_test_get_status)
+ return 0;
+
+ ret = ethnl_cable_test_alloc(phydev, ETHTOOL_MSG_CABLE_TEST_NTF);
+ if (ret)
+ return ret;
+
+ ret = phydev->drv->cable_test_start(phydev);
+ if (ret)
+ return ret;
+
+ /* According to the documentation this test takes 100 usec */
+ usleep_range(100, 200);
+
+ ret = phydev->drv->cable_test_get_status(phydev, &finished);
+ if (ret)
+ return ret;
+
+ if (finished)
+ ethnl_cable_test_finished(phydev);
+
+ return 0;
+}
+
+static int tja11xx_config_aneg(struct phy_device *phydev)
+{
+ int ret, changed = 0;
+ u16 ctl = 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= MII_CFG1_MASTER_SLAVE;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ goto do_test;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -ENOTSUPP;
+ }
+
+ changed = phy_modify_changed(phydev, MII_CFG1, MII_CFG1_MASTER_SLAVE, ctl);
+ if (changed < 0)
+ return changed;
+
+do_test:
+ ret = tja11xx_config_aneg_cable_test(phydev);
+ if (ret)
+ return ret;
+
+ return __genphy_config_aneg(phydev, changed);
+}
+
static int tja11xx_config_init(struct phy_device *phydev)
{
int ret;
@@ -180,6 +269,7 @@ static int tja11xx_config_init(struct phy_device *phydev)
return ret;
break;
case PHY_ID_TJA1101:
+ case PHY_ID_TJA1102:
ret = phy_set_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
if (ret)
return ret;
@@ -213,10 +303,22 @@ static int tja11xx_read_status(struct phy_device *phydev)
{
int ret;
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
+
ret = genphy_update_link(phydev);
if (ret)
return ret;
+ ret = phy_read(phydev, MII_CFG1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MII_CFG1_MASTER_SLAVE)
+ phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
+
if (phydev->link) {
ret = phy_read(phydev, MII_COMMSTAT);
if (ret < 0)
@@ -229,6 +331,22 @@ static int tja11xx_read_status(struct phy_device *phydev)
return 0;
}
+static int tja11xx_get_sqi(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_COMMSTAT);
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(MII_COMMSTAT_SQI_STATE, ret);
+}
+
+static int tja11xx_get_sqi_max(struct phy_device *phydev)
+{
+ return MII_COMMSTAT_SQI_MAX;
+}
+
static int tja11xx_get_sset_count(struct phy_device *phydev)
{
return ARRAY_SIZE(tja11xx_hw_stats);
@@ -317,16 +435,12 @@ static const struct hwmon_chip_info tja11xx_hwmon_chip_info = {
.info = tja11xx_hwmon_info,
};
-static int tja11xx_probe(struct phy_device *phydev)
+static int tja11xx_hwmon_register(struct phy_device *phydev,
+ struct tja11xx_priv *priv)
{
struct device *dev = &phydev->mdio.dev;
- struct tja11xx_priv *priv;
int i;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
priv->hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
if (!priv->hwmon_name)
return -ENOMEM;
@@ -344,6 +458,239 @@ static int tja11xx_probe(struct phy_device *phydev)
return PTR_ERR_OR_ZERO(priv->hwmon_dev);
}
+static int tja11xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct tja11xx_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phydev = phydev;
+
+ return tja11xx_hwmon_register(phydev, priv);
+}
+
+static void tja1102_p1_register(struct work_struct *work)
+{
+ struct tja11xx_priv *priv = container_of(work, struct tja11xx_priv,
+ phy_register_work);
+ struct phy_device *phydev_phy0 = priv->phydev;
+ struct mii_bus *bus = phydev_phy0->mdio.bus;
+ struct device *dev = &phydev_phy0->mdio.dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int ret;
+
+ for_each_available_child_of_node(np, child) {
+ struct phy_device *phy;
+ int addr;
+
+ addr = of_mdio_parse_addr(dev, child);
+ if (addr < 0) {
+ dev_err(dev, "Can't parse addr\n");
+ continue;
+ } else if (addr != phydev_phy0->mdio.addr + 1) {
+ /* Currently we care only about double PHY chip TJA1102.
+ * If some day NXP will decide to bring chips with more
+ * PHYs, this logic should be reworked.
+ */
+ dev_err(dev, "Unexpected address. Should be: %i\n",
+ phydev_phy0->mdio.addr + 1);
+ continue;
+ }
+
+ if (mdiobus_is_registered_device(bus, addr)) {
+ dev_err(dev, "device is already registered\n");
+ continue;
+ }
+
+ /* Real PHY ID of Port 1 is 0 */
+ phy = phy_device_create(bus, addr, PHY_ID_TJA1102, false, NULL);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "Can't create PHY device for Port 1: %i\n",
+ addr);
+ continue;
+ }
+
+ /* Overwrite parent device. phy_device_create() set parent to
+ * the mii_bus->dev, which is not correct in case.
+ */
+ phy->mdio.dev.parent = dev;
+
+ ret = of_mdiobus_phy_device_register(bus, phy, child, addr);
+ if (ret) {
+ /* All resources needed for Port 1 should be already
+ * available for Port 0. Both ports use the same
+ * interrupt line, so -EPROBE_DEFER would make no sense
+ * here.
+ */
+ dev_err(dev, "Can't register Port 1. Unexpected error: %i\n",
+ ret);
+ phy_device_free(phy);
+ }
+ }
+}
+
+static int tja1102_p0_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct tja11xx_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phydev = phydev;
+ INIT_WORK(&priv->phy_register_work, tja1102_p1_register);
+
+ ret = tja11xx_hwmon_register(phydev, priv);
+ if (ret)
+ return ret;
+
+ schedule_work(&priv->phy_register_work);
+
+ return 0;
+}
+
+static int tja1102_match_phy_device(struct phy_device *phydev, bool port0)
+{
+ int ret;
+
+ if ((phydev->phy_id & PHY_ID_MASK) != PHY_ID_TJA1102)
+ return 0;
+
+ ret = phy_read(phydev, MII_PHYSID2);
+ if (ret < 0)
+ return ret;
+
+ /* TJA1102 Port 1 has phyid 0 and doesn't support temperature
+ * and undervoltage alarms.
+ */
+ if (port0)
+ return ret ? 1 : 0;
+
+ return !ret;
+}
+
+static int tja1102_p0_match_phy_device(struct phy_device *phydev)
+{
+ return tja1102_match_phy_device(phydev, true);
+}
+
+static int tja1102_p1_match_phy_device(struct phy_device *phydev)
+{
+ return tja1102_match_phy_device(phydev, false);
+}
+
+static int tja11xx_ack_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_INTSRC);
+
+ return (ret < 0) ? ret : 0;
+}
+
+static int tja11xx_config_intr(struct phy_device *phydev)
+{
+ int value = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP;
+
+ return phy_write(phydev, MII_INTEN, value);
+}
+
+static int tja11xx_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_clear_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
+ if (ret)
+ return ret;
+
+ ret = tja11xx_wakeup(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = tja11xx_disable_link_control(phydev);
+ if (ret < 0)
+ return ret;
+
+ return phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_CABLE_TEST);
+}
+
+/*
+ * | BI_DA+ | BI_DA- | Result
+ * | open | open | open
+ * | + short to - | - short to + | short
+ * | short to Vdd | open | open
+ * | open | shot to Vdd | open
+ * | short to Vdd | short to Vdd | short
+ * | shot to GND | open | open
+ * | open | shot to GND | open
+ * | short to GND | shot to GND | short
+ * | connected to active link partner (master) | shot and open
+ */
+static int tja11xx_cable_test_report_trans(u32 result)
+{
+ u32 mask = MII_EXTSTAT_SHORT_DETECT | MII_EXTSTAT_OPEN_DETECT;
+
+ if ((result & mask) == mask) {
+ /* connected to active link partner (master) */
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ } else if ((result & mask) == 0) {
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ } else if (result & MII_EXTSTAT_SHORT_DETECT) {
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ } else if (result & MII_EXTSTAT_OPEN_DETECT) {
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ } else {
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int tja11xx_cable_test_report(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_EXTSTAT);
+ if (ret < 0)
+ return ret;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ tja11xx_cable_test_report_trans(ret));
+
+ return 0;
+}
+
+static int tja11xx_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret;
+
+ *finished = false;
+
+ ret = phy_read(phydev, MII_ECTRL);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MII_ECTRL_CABLE_TEST)) {
+ *finished = true;
+
+ ret = phy_set_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
+ if (ret)
+ return ret;
+
+ return tja11xx_cable_test_report(phydev);
+ }
+
+ return 0;
+}
+
static struct phy_driver tja11xx_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_TJA1100),
@@ -351,8 +698,11 @@ static struct phy_driver tja11xx_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
.probe = tja11xx_probe,
.soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
.config_init = tja11xx_config_init,
.read_status = tja11xx_read_status,
+ .get_sqi = tja11xx_get_sqi,
+ .get_sqi_max = tja11xx_get_sqi_max,
.suspend = genphy_suspend,
.resume = genphy_resume,
.set_loopback = genphy_loopback,
@@ -366,8 +716,53 @@ static struct phy_driver tja11xx_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
.probe = tja11xx_probe,
.soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .get_sqi = tja11xx_get_sqi,
+ .get_sqi_max = tja11xx_get_sqi_max,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ }, {
+ .name = "NXP TJA1102 Port 0",
+ .features = PHY_BASIC_T1_FEATURES,
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = tja1102_p0_probe,
+ .soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .get_sqi = tja11xx_get_sqi,
+ .get_sqi_max = tja11xx_get_sqi_max,
+ .match_phy_device = tja1102_p0_match_phy_device,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ .ack_interrupt = tja11xx_ack_interrupt,
+ .config_intr = tja11xx_config_intr,
+ .cable_test_start = tja11xx_cable_test_start,
+ .cable_test_get_status = tja11xx_cable_test_get_status,
+ }, {
+ .name = "NXP TJA1102 Port 1",
+ .features = PHY_BASIC_T1_FEATURES,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* currently no probe for Port 1 is need */
+ .soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
.config_init = tja11xx_config_init,
.read_status = tja11xx_read_status,
+ .get_sqi = tja11xx_get_sqi,
+ .get_sqi_max = tja11xx_get_sqi_max,
+ .match_phy_device = tja1102_p1_match_phy_device,
.suspend = genphy_suspend,
.resume = genphy_resume,
.set_loopback = genphy_loopback,
@@ -375,6 +770,10 @@ static struct phy_driver tja11xx_driver[] = {
.get_sset_count = tja11xx_get_sset_count,
.get_strings = tja11xx_get_strings,
.get_stats = tja11xx_get_stats,
+ .ack_interrupt = tja11xx_ack_interrupt,
+ .config_intr = tja11xx_config_intr,
+ .cable_test_start = tja11xx_cable_test_start,
+ .cable_test_get_status = tja11xx_cable_test_get_status,
}
};
@@ -383,6 +782,7 @@ module_phy_driver(tja11xx_driver);
static struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA1102) },
{ }
};
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 67ba47ae5284..defe09d94422 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -564,6 +564,5 @@ struct phy_driver genphy_c45_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic Clause 45 PHY",
- .soft_reset = genphy_no_soft_reset,
.read_status = genphy_c45_read_status,
};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 66b8c61ca74c..46bd68e9ecfa 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -428,9 +428,8 @@ int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
if (phydev->drv && phydev->drv->read_mmd) {
val = phydev->drv->read_mmd(phydev, devad, regnum);
} else if (phydev->is_c45) {
- u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
-
- val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
+ val = __mdiobus_c45_read(phydev->mdio.bus, phydev->mdio.addr,
+ devad, regnum);
} else {
struct mii_bus *bus = phydev->mdio.bus;
int phy_addr = phydev->mdio.addr;
@@ -485,10 +484,8 @@ int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
if (phydev->drv && phydev->drv->write_mmd) {
ret = phydev->drv->write_mmd(phydev, devad, regnum, val);
} else if (phydev->is_c45) {
- u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
-
- ret = __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
- addr, val);
+ ret = __mdiobus_c45_write(phydev->mdio.bus, phydev->mdio.addr,
+ devad, regnum, val);
} else {
struct mii_bus *bus = phydev->mdio.bus;
int phy_addr = phydev->mdio.addr;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 72c69a9c8a98..1de3938628f4 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -15,12 +15,14 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/netlink.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
#include <linux/sfp.h>
@@ -29,6 +31,9 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <net/sock.h>
#define PHY_STATE_TIME HZ
@@ -44,6 +49,7 @@ static const char *phy_state_to_str(enum phy_state st)
PHY_STATE_STR(UP)
PHY_STATE_STR(RUNNING)
PHY_STATE_STR(NOLINK)
+ PHY_STATE_STR(CABLETEST)
PHY_STATE_STR(HALTED)
}
@@ -52,13 +58,13 @@ static const char *phy_state_to_str(enum phy_state st)
static void phy_link_up(struct phy_device *phydev)
{
- phydev->phy_link_change(phydev, true, true);
+ phydev->phy_link_change(phydev, true);
phy_led_trigger_change_speed(phydev);
}
-static void phy_link_down(struct phy_device *phydev, bool do_carrier)
+static void phy_link_down(struct phy_device *phydev)
{
- phydev->phy_link_change(phydev, false, do_carrier);
+ phydev->phy_link_change(phydev, false);
phy_led_trigger_change_speed(phydev);
}
@@ -295,7 +301,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
phydev->advertising, autoneg == AUTONEG_ENABLE);
phydev->duplex = duplex;
-
+ phydev->master_slave_set = cmd->base.master_slave_cfg;
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
/* Restart the PHY */
@@ -314,6 +320,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.speed = phydev->speed;
cmd->base.duplex = phydev->duplex;
+ cmd->base.master_slave_cfg = phydev->master_slave_get;
+ cmd->base.master_slave_state = phydev->master_slave_state;
if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
cmd->base.port = PORT_BNC;
else
@@ -353,7 +361,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- devad = MII_ADDR_C45 | devad << 16 | mii_data->reg_num;
+ devad = mdiobus_c45_addr(devad, mii_data->reg_num);
} else {
prtad = mii_data->phy_id;
devad = mii_data->reg_num;
@@ -366,7 +374,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
prtad = mdio_phy_id_prtad(mii_data->phy_id);
devad = mdio_phy_id_devad(mii_data->phy_id);
- devad = MII_ADDR_C45 | devad << 16 | mii_data->reg_num;
+ devad = mdiobus_c45_addr(devad, mii_data->reg_num);
} else {
prtad = mii_data->phy_id;
devad = mii_data->reg_num;
@@ -470,6 +478,144 @@ static void phy_trigger_machine(struct phy_device *phydev)
phy_queue_state_machine(phydev, 0);
}
+static void phy_abort_cable_test(struct phy_device *phydev)
+{
+ int err;
+
+ ethnl_cable_test_finished(phydev);
+
+ err = phy_init_hw(phydev);
+ if (err)
+ phydev_err(phydev, "Error while aborting cable test");
+}
+
+int phy_start_cable_test(struct phy_device *phydev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = phydev->attached_dev;
+ int err = -ENOMEM;
+
+ if (!(phydev->drv &&
+ phydev->drv->cable_test_start &&
+ phydev->drv->cable_test_get_status)) {
+ NL_SET_ERR_MSG(extack,
+ "PHY driver does not support cable testing");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&phydev->lock);
+ if (phydev->state == PHY_CABLETEST) {
+ NL_SET_ERR_MSG(extack,
+ "PHY already performing a test");
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (phydev->state < PHY_UP ||
+ phydev->state > PHY_CABLETEST) {
+ NL_SET_ERR_MSG(extack,
+ "PHY not configured. Try setting interface up");
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = ethnl_cable_test_alloc(phydev, ETHTOOL_MSG_CABLE_TEST_NTF);
+ if (err)
+ goto out;
+
+ /* Mark the carrier down until the test is complete */
+ phy_link_down(phydev);
+
+ netif_testing_on(dev);
+ err = phydev->drv->cable_test_start(phydev);
+ if (err) {
+ netif_testing_off(dev);
+ phy_link_up(phydev);
+ goto out_free;
+ }
+
+ phydev->state = PHY_CABLETEST;
+
+ if (phy_polling_mode(phydev))
+ phy_trigger_machine(phydev);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+
+out_free:
+ ethnl_cable_test_free(phydev);
+out:
+ mutex_unlock(&phydev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_start_cable_test);
+
+int phy_start_cable_test_tdr(struct phy_device *phydev,
+ struct netlink_ext_ack *extack,
+ const struct phy_tdr_config *config)
+{
+ struct net_device *dev = phydev->attached_dev;
+ int err = -ENOMEM;
+
+ if (!(phydev->drv &&
+ phydev->drv->cable_test_tdr_start &&
+ phydev->drv->cable_test_get_status)) {
+ NL_SET_ERR_MSG(extack,
+ "PHY driver does not support cable test TDR");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&phydev->lock);
+ if (phydev->state == PHY_CABLETEST) {
+ NL_SET_ERR_MSG(extack,
+ "PHY already performing a test");
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (phydev->state < PHY_UP ||
+ phydev->state > PHY_CABLETEST) {
+ NL_SET_ERR_MSG(extack,
+ "PHY not configured. Try setting interface up");
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = ethnl_cable_test_alloc(phydev, ETHTOOL_MSG_CABLE_TEST_TDR_NTF);
+ if (err)
+ goto out;
+
+ /* Mark the carrier down until the test is complete */
+ phy_link_down(phydev);
+
+ netif_testing_on(dev);
+ err = phydev->drv->cable_test_tdr_start(phydev, config);
+ if (err) {
+ netif_testing_off(dev);
+ phy_link_up(phydev);
+ goto out_free;
+ }
+
+ phydev->state = PHY_CABLETEST;
+
+ if (phy_polling_mode(phydev))
+ phy_trigger_machine(phydev);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+
+out_free:
+ ethnl_cable_test_free(phydev);
+out:
+ mutex_unlock(&phydev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_start_cable_test_tdr);
+
static int phy_config_aneg(struct phy_device *phydev)
{
if (phydev->drv->config_aneg)
@@ -513,7 +659,7 @@ static int phy_check_link_status(struct phy_device *phydev)
phy_link_up(phydev);
} else if (!phydev->link && phydev->state != PHY_NOLINK) {
phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
+ phy_link_down(phydev);
}
return 0;
@@ -800,6 +946,8 @@ EXPORT_SYMBOL(phy_free_interrupt);
*/
void phy_stop(struct phy_device *phydev)
{
+ struct net_device *dev = phydev->attached_dev;
+
if (!phy_is_started(phydev)) {
WARN(1, "called from state %s\n",
phy_state_to_str(phydev->state));
@@ -808,6 +956,11 @@ void phy_stop(struct phy_device *phydev)
mutex_lock(&phydev->lock);
+ if (phydev->state == PHY_CABLETEST) {
+ phy_abort_cable_test(phydev);
+ netif_testing_off(dev);
+ }
+
if (phydev->sfp_bus)
sfp_upstream_stop(phydev->sfp_bus);
@@ -868,8 +1021,10 @@ void phy_state_machine(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
+ struct net_device *dev = phydev->attached_dev;
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
+ bool finished = false;
int err = 0;
mutex_lock(&phydev->lock);
@@ -888,10 +1043,27 @@ void phy_state_machine(struct work_struct *work)
case PHY_RUNNING:
err = phy_check_link_status(phydev);
break;
+ case PHY_CABLETEST:
+ err = phydev->drv->cable_test_get_status(phydev, &finished);
+ if (err) {
+ phy_abort_cable_test(phydev);
+ netif_testing_off(dev);
+ needs_aneg = true;
+ phydev->state = PHY_UP;
+ break;
+ }
+
+ if (finished) {
+ ethnl_cable_test_finished(phydev);
+ netif_testing_off(dev);
+ needs_aneg = true;
+ phydev->state = PHY_UP;
+ }
+ break;
case PHY_HALTED:
if (phydev->link) {
phydev->link = 0;
- phy_link_down(phydev, true);
+ phy_link_down(phydev);
}
do_suspend = true;
break;
@@ -1132,9 +1304,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
/* Restart autonegotiation so the new modes get sent to the
* link partner.
*/
- ret = phy_restart_aneg(phydev);
- if (ret < 0)
- return ret;
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = phy_restart_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ac2784192472..04946de74fa0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -675,16 +675,14 @@ EXPORT_SYMBOL(phy_device_create);
static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
u32 *devices_in_package)
{
- int phy_reg, reg_addr;
+ int phy_reg;
- reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS2;
- phy_reg = mdiobus_read(bus, addr, reg_addr);
+ phy_reg = mdiobus_c45_read(bus, addr, dev_addr, MDIO_DEVS2);
if (phy_reg < 0)
return -EIO;
*devices_in_package = phy_reg << 16;
- reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1;
- phy_reg = mdiobus_read(bus, addr, reg_addr);
+ phy_reg = mdiobus_c45_read(bus, addr, dev_addr, MDIO_DEVS1);
if (phy_reg < 0)
return -EIO;
*devices_in_package |= phy_reg;
@@ -709,11 +707,11 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
*
*/
static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
- struct phy_c45_device_ids *c45_ids) {
- int phy_reg;
- int i, reg_addr;
+ struct phy_c45_device_ids *c45_ids)
+{
const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
u32 *devs = &c45_ids->devices_in_package;
+ int i, phy_reg;
/* Find first non-zero Devices In package. Device zero is reserved
* for 802.3 c45 complied PHYs, so don't probe it at first.
@@ -747,14 +745,12 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
if (!(c45_ids->devices_in_package & (1 << i)))
continue;
- reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID1;
- phy_reg = mdiobus_read(bus, addr, reg_addr);
+ phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID1);
if (phy_reg < 0)
return -EIO;
c45_ids->device_ids[i] = phy_reg << 16;
- reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2;
- phy_reg = mdiobus_read(bus, addr, reg_addr);
+ phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID2);
if (phy_reg < 0)
return -EIO;
c45_ids->device_ids[i] |= phy_reg;
@@ -916,16 +912,14 @@ struct phy_device *phy_find_first(struct mii_bus *bus)
}
EXPORT_SYMBOL(phy_find_first);
-static void phy_link_change(struct phy_device *phydev, bool up, bool do_carrier)
+static void phy_link_change(struct phy_device *phydev, bool up)
{
struct net_device *netdev = phydev->attached_dev;
- if (do_carrier) {
- if (up)
- netif_carrier_on(netdev);
- else
- netif_carrier_off(netdev);
- }
+ if (up)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
phydev->adjust_link(netdev);
if (phydev->mii_ts && phydev->mii_ts->link_state)
phydev->mii_ts->link_state(phydev->mii_ts, phydev);
@@ -1082,8 +1076,12 @@ int phy_init_hw(struct phy_device *phydev)
if (!phydev->drv)
return 0;
- if (phydev->drv->soft_reset)
+ if (phydev->drv->soft_reset) {
ret = phydev->drv->soft_reset(phydev);
+ /* see comment in genphy_soft_reset for an explanation */
+ if (!ret)
+ phydev->suspended = 0;
+ }
if (ret < 0)
return ret;
@@ -1233,7 +1231,7 @@ int phy_sfp_probe(struct phy_device *phydev,
const struct sfp_upstream_ops *ops)
{
struct sfp_bus *bus;
- int ret;
+ int ret = 0;
if (phydev->mdio.dev.fwnode) {
bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode);
@@ -1245,7 +1243,7 @@ int phy_sfp_probe(struct phy_device *phydev,
ret = sfp_bus_add_upstream(bus, phydev, ops);
sfp_bus_put(bus);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL(phy_sfp_probe);
@@ -1458,6 +1456,144 @@ bool phy_driver_is_genphy_10g(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
/**
+ * phy_package_join - join a common PHY group
+ * @phydev: target phy_device struct
+ * @addr: cookie and PHY address for global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This joins a PHY group and provides a shared storage for all phydevs in
+ * this group. This is intended to be used for packages which contain
+ * more than one PHY, for example a quad PHY transceiver.
+ *
+ * The addr parameter serves as a cookie which has to have the same value
+ * for all members of one group and as a PHY address to access generic
+ * registers of a PHY package. Usually, one of the PHY addresses of the
+ * different PHYs in the package provides access to these global registers.
+ * The address which is given here, will be used in the phy_package_read()
+ * and phy_package_write() convenience functions. If your PHY doesn't have
+ * global registers you can just pick any of the PHY addresses.
+ *
+ * This will set the shared pointer of the phydev to the shared storage.
+ * If this is the first call for a this cookie the shared storage will be
+ * allocated. If priv_size is non-zero, the given amount of bytes are
+ * allocated for the priv member.
+ *
+ * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error.
+ */
+int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ struct phy_package_shared *shared;
+ int ret;
+
+ if (addr < 0 || addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mutex_lock(&bus->shared_lock);
+ shared = bus->shared[addr];
+ if (!shared) {
+ ret = -ENOMEM;
+ shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+ if (!shared)
+ goto err_unlock;
+ if (priv_size) {
+ shared->priv = kzalloc(priv_size, GFP_KERNEL);
+ if (!shared->priv)
+ goto err_free;
+ shared->priv_size = priv_size;
+ }
+ shared->addr = addr;
+ refcount_set(&shared->refcnt, 1);
+ bus->shared[addr] = shared;
+ } else {
+ ret = -EINVAL;
+ if (priv_size && priv_size != shared->priv_size)
+ goto err_unlock;
+ refcount_inc(&shared->refcnt);
+ }
+ mutex_unlock(&bus->shared_lock);
+
+ phydev->shared = shared;
+
+ return 0;
+
+err_free:
+ kfree(shared);
+err_unlock:
+ mutex_unlock(&bus->shared_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_package_join);
+
+/**
+ * phy_package_leave - leave a common PHY group
+ * @phydev: target phy_device struct
+ *
+ * This leaves a PHY group created by phy_package_join(). If this phydev
+ * was the last user of the shared data between the group, this data is
+ * freed. Resets the phydev->shared pointer to NULL.
+ */
+void phy_package_leave(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct mii_bus *bus = phydev->mdio.bus;
+
+ if (!shared)
+ return;
+
+ if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
+ bus->shared[shared->addr] = NULL;
+ mutex_unlock(&bus->shared_lock);
+ kfree(shared->priv);
+ kfree(shared);
+ }
+
+ phydev->shared = NULL;
+}
+EXPORT_SYMBOL_GPL(phy_package_leave);
+
+static void devm_phy_package_leave(struct device *dev, void *res)
+{
+ phy_package_leave(*(struct phy_device **)res);
+}
+
+/**
+ * devm_phy_package_join - resource managed phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @addr: cookie and PHY address for global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * phy_package_join() for more information.
+ */
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int addr, size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = phy_package_join(phydev, addr, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_phy_package_join);
+
+/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
*
@@ -1524,6 +1660,9 @@ int phy_suspend(struct phy_device *phydev)
struct phy_driver *phydrv = phydev->drv;
int ret;
+ if (phydev->suspended)
+ return 0;
+
/* If the device has WOL enabled, we cannot suspend the PHY */
phy_ethtool_get_wol(phydev, &wol);
if (wol.wolopts || (netdev && netdev->wol_enabled))
@@ -1768,6 +1907,90 @@ int genphy_setup_forced(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_setup_forced);
+static int genphy_setup_master_slave(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+
+ if (!phydev->is_gigabit_capable)
+ return 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ ctl |= CTL1000_PREFER_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ break;
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= CTL1000_AS_MASTER;
+ /* fallthrough */
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ ctl |= CTL1000_ENABLE_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return phy_modify_changed(phydev, MII_CTRL1000,
+ (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER |
+ CTL1000_PREFER_MASTER), ctl);
+}
+
+static int genphy_read_master_slave(struct phy_device *phydev)
+{
+ int cfg, state;
+ int val;
+
+ if (!phydev->is_gigabit_capable) {
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+
+ val = phy_read(phydev, MII_CTRL1000);
+ if (val < 0)
+ return val;
+
+ if (val & CTL1000_ENABLE_MASTER) {
+ if (val & CTL1000_AS_MASTER)
+ cfg = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ cfg = MASTER_SLAVE_CFG_SLAVE_FORCE;
+ } else {
+ if (val & CTL1000_PREFER_MASTER)
+ cfg = MASTER_SLAVE_CFG_MASTER_PREFERRED;
+ else
+ cfg = MASTER_SLAVE_CFG_SLAVE_PREFERRED;
+ }
+
+ val = phy_read(phydev, MII_STAT1000);
+ if (val < 0)
+ return val;
+
+ if (val & LPA_1000MSFAIL) {
+ state = MASTER_SLAVE_STATE_ERR;
+ } else if (phydev->link) {
+ /* this bits are valid only for active link */
+ if (val & LPA_1000MSRES)
+ state = MASTER_SLAVE_STATE_MASTER;
+ else
+ state = MASTER_SLAVE_STATE_SLAVE;
+ } else {
+ state = MASTER_SLAVE_STATE_UNKNOWN;
+ }
+
+ phydev->master_slave_get = cfg;
+ phydev->master_slave_state = state;
+
+ return 0;
+}
+
/**
* genphy_restart_aneg - Enable and Restart Autonegotiation
* @phydev: target phy_device struct
@@ -1826,6 +2049,12 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
if (genphy_config_eee_advert(phydev))
changed = true;
+ err = genphy_setup_master_slave(phydev);
+ if (err < 0)
+ return err;
+ else if (err)
+ changed = true;
+
if (AUTONEG_ENABLE != phydev->autoneg)
return genphy_setup_forced(phydev);
@@ -2060,6 +2289,10 @@ int genphy_read_status(struct phy_device *phydev)
phydev->pause = 0;
phydev->asym_pause = 0;
+ err = genphy_read_master_slave(phydev);
+ if (err < 0)
+ return err;
+
err = genphy_read_lpa(phydev);
if (err < 0)
return err;
@@ -2154,6 +2387,12 @@ int genphy_soft_reset(struct phy_device *phydev)
if (ret < 0)
return ret;
+ /* Clause 22 states that setting bit BMCR_RESET sets control registers
+ * to their default value. Therefore the POWER DOWN bit is supposed to
+ * be cleared after soft reset.
+ */
+ phydev->suspended = 0;
+
ret = phy_poll_reset(phydev);
if (ret)
return ret;
@@ -2627,7 +2866,6 @@ static struct phy_driver genphy_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
- .soft_reset = genphy_no_soft_reset,
.get_features = genphy_read_abilities,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 34ca12aec61b..0ab65fb75258 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -480,8 +480,8 @@ static void phylink_get_fixed_state(struct phylink *pl,
struct phylink_link_state *state)
{
*state = pl->link_config;
- if (pl->get_fixed_state)
- pl->get_fixed_state(pl->netdev, state);
+ if (pl->config->get_fixed_state)
+ pl->config->get_fixed_state(pl->config, state);
else if (pl->link_gpio)
state->link = !!gpiod_get_value_cansleep(pl->link_gpio);
@@ -803,8 +803,7 @@ void phylink_destroy(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_destroy);
-static void phylink_phy_change(struct phy_device *phydev, bool up,
- bool do_carrier)
+static void phylink_phy_change(struct phy_device *phydev, bool up)
{
struct phylink *pl = phydev->phylink;
bool tx_pause, rx_pause;
@@ -1045,32 +1044,6 @@ void phylink_disconnect_phy(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
/**
- * phylink_fixed_state_cb() - allow setting a fixed link callback
- * @pl: a pointer to a &struct phylink returned from phylink_create()
- * @cb: callback to execute to determine the fixed link state.
- *
- * The MAC driver should call this driver when the state of its link
- * can be determined through e.g: an out of band MMIO register.
- */
-int phylink_fixed_state_cb(struct phylink *pl,
- void (*cb)(struct net_device *dev,
- struct phylink_link_state *state))
-{
- /* It does not make sense to let the link be overriden unless we use
- * MLO_AN_FIXED
- */
- if (pl->cfg_link_an_mode != MLO_AN_FIXED)
- return -EINVAL;
-
- mutex_lock(&pl->state_mutex);
- pl->get_fixed_state = cb;
- mutex_unlock(&pl->state_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(phylink_fixed_state_cb);
-
-/**
* phylink_mac_change() - notify phylink of a change in MAC state
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @up: indicates whether the link is currently up.
@@ -1106,6 +1079,8 @@ static irqreturn_t phylink_link_handler(int irq, void *data)
*/
void phylink_start(struct phylink *pl)
{
+ bool poll = false;
+
ASSERT_RTNL();
phylink_info(pl, "configuring for %s/%s link mode\n",
@@ -1142,10 +1117,18 @@ void phylink_start(struct phylink *pl)
irq = 0;
}
if (irq <= 0)
- mod_timer(&pl->link_poll, jiffies + HZ);
+ poll = true;
+ }
+
+ switch (pl->cfg_link_an_mode) {
+ case MLO_AN_FIXED:
+ poll |= pl->config->poll_fixed_state;
+ break;
+ case MLO_AN_INBAND:
+ poll |= pl->config->pcs_poll;
+ break;
}
- if ((pl->cfg_link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) ||
- pl->config->pcs_poll)
+ if (poll)
mod_timer(&pl->link_poll, jiffies + HZ);
if (pl->phydev)
phy_start(pl->phydev);
@@ -1648,7 +1631,7 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id,
if (mdio_phy_id_is_c45(phy_id)) {
prtad = mdio_phy_id_prtad(phy_id);
devad = mdio_phy_id_devad(phy_id);
- devad = MII_ADDR_C45 | devad << 16 | reg;
+ devad = mdiobus_c45_addr(devad, reg);
} else if (phydev->is_c45) {
switch (reg) {
case MII_BMCR:
@@ -1671,7 +1654,7 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id,
return -EINVAL;
}
prtad = phy_id;
- devad = MII_ADDR_C45 | devad << 16 | reg;
+ devad = mdiobus_c45_addr(devad, reg);
} else {
prtad = phy_id;
devad = reg;
@@ -1688,7 +1671,7 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id,
if (mdio_phy_id_is_c45(phy_id)) {
prtad = mdio_phy_id_prtad(phy_id);
devad = mdio_phy_id_devad(phy_id);
- devad = MII_ADDR_C45 | devad << 16 | reg;
+ devad = mdiobus_c45_addr(devad, reg);
} else if (phydev->is_c45) {
switch (reg) {
case MII_BMCR:
@@ -1711,7 +1694,7 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id,
return -EINVAL;
}
prtad = phy_id;
- devad = MII_ADDR_C45 | devad << 16 | reg;
+ devad = mdiobus_c45_addr(devad, reg);
} else {
prtad = phy_id;
devad = reg;
@@ -2309,7 +2292,6 @@ void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs)
}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_an_restart);
-#define C45_ADDR(d,a) (MII_ADDR_C45 | (d) << 16 | (a))
void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state)
{
@@ -2317,7 +2299,7 @@ void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
int addr = pcs->addr;
int stat;
- stat = mdiobus_read(bus, addr, C45_ADDR(MDIO_MMD_PCS, MDIO_STAT1));
+ stat = mdiobus_c45_read(bus, addr, MDIO_MMD_PCS, MDIO_STAT1);
if (stat < 0) {
state->link = false;
return;
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 2d99e9de6ee1..c7229d022a27 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -11,6 +11,7 @@
#include <linux/bitops.h>
#include <linux/phy.h>
#include <linux/module.h>
+#include <linux/delay.h>
#define RTL821x_PHYSR 0x11
#define RTL821x_PHYSR_DUPLEX BIT(13)
@@ -526,6 +527,16 @@ static int rtl8125_match_phy_device(struct phy_device *phydev)
rtlgen_supports_2_5gbps(phydev);
}
+static int rtlgen_resume(struct phy_device *phydev)
+{
+ int ret = genphy_resume(phydev);
+
+ /* Internal PHY's from RTL8168h up may not be instantly ready */
+ msleep(20);
+
+ return ret;
+}
+
static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
@@ -609,7 +620,7 @@ static struct phy_driver realtek_drvs[] = {
.match_phy_device = rtlgen_match_phy_device,
.read_status = rtlgen_read_status,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.read_mmd = rtlgen_read_mmd,
@@ -621,7 +632,7 @@ static struct phy_driver realtek_drvs[] = {
.config_aneg = rtl8125_config_aneg,
.read_status = rtl8125_read_status,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.read_mmd = rtl8125_read_mmd,
diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
index 53c214a22b95..59f1ba4d49bc 100644
--- a/drivers/net/phy/swphy.c
+++ b/drivers/net/phy/swphy.c
@@ -2,7 +2,7 @@
/*
* Software PHY emulation
*
- * Code taken from fixed_phy.c by Russell King <rmk+kernel@arm.linux.org.uk>
+ * Code taken from fixed_phy.c by Russell King.
*
* Author: Vitaly Bordug <vbordug@ru.mvista.com>
* Anton Vorontsov <avorontsov@ru.mvista.com>
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index beb054b931ee..8057ea8dbc21 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -78,7 +78,6 @@ static struct phy_driver teranetics_driver[] = {
.phy_id_mask = 0xffffffff,
.name = "Teranetics TN2020",
.features = PHY_10GBIT_FEATURES,
- .soft_reset = genphy_no_soft_reset,
.aneg_done = teranetics_aneg_done,
.config_aneg = gen10g_config_aneg,
.read_status = teranetics_read_status,
diff --git a/drivers/net/plip/Kconfig b/drivers/net/plip/Kconfig
index b41035be2d51..e03556d1d0c2 100644
--- a/drivers/net/plip/Kconfig
+++ b/drivers/net/plip/Kconfig
@@ -21,7 +21,7 @@ config PLIP
bits at a time (mode 0) or with special PLIP cables, to be used on
bidirectional parallel ports only, which can transmit 8 bits at a
time (mode 1); you can find the wiring of these cables in
- <file:Documentation/networking/PLIP.txt>. The cables can be up to
+ <file:Documentation/networking/plip.rst>. The cables can be up to
15m long. Mode 0 works also if one of the machines runs DOS/Windows
and has some PLIP software installed, e.g. the Crynwr PLIP packet
driver (<http://oak.oakland.edu/simtel.net/msdos/pktdrvr-pre.html>)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 22cc2cb9d878..7d005896a0f9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1410,6 +1410,8 @@ static int ppp_dev_init(struct net_device *dev)
{
struct ppp *ppp;
+ netdev_lockdep_set_classes(dev);
+
ppp = netdev_priv(dev);
/* Let the netdevice take a reference on the ppp file. This ensures
* that ppp_destroy_interface() won't run before the device gets
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index d760a36db28c..beedaad08255 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -490,6 +490,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb)
goto out;
+ if (skb->pkt_type != PACKET_HOST)
+ goto abort;
+
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto abort;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 8eeb38c6199e..2056d6ad04b5 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -166,7 +166,8 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
return 0;
}
-static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t rionet_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
int i;
struct rionet_private *rnet = netdev_priv(ndev);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 04845a4017f9..8c1e02752ff6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1647,6 +1647,7 @@ static int team_init(struct net_device *dev)
lockdep_register_key(&team->team_lock_key);
__mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
+ netdev_lockdep_set_classes(dev);
return 0;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 44889eba1dbc..858b012074bd 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1295,7 +1295,7 @@ resample:
static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
{
- struct xdp_frame *frame = convert_to_xdp_frame(xdp);
+ struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
if (unlikely(!frame))
return -EOVERFLOW;
@@ -1671,6 +1671,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
xdp.rxq = &tfile->xdp_rxq;
+ xdp.frame_sz = buflen;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) {
@@ -1871,8 +1872,11 @@ drop:
skb->dev = tun->dev;
break;
case IFF_TAP:
- if (!frags)
- skb->protocol = eth_type_trans(skb, tun->dev);
+ if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
+ err = -ENOMEM;
+ goto drop;
+ }
+ skb->protocol = eth_type_trans(skb, tun->dev);
break;
}
@@ -1929,9 +1933,12 @@ drop:
}
if (frags) {
+ u32 headlen;
+
/* Exercise flow dissector code path. */
- u32 headlen = eth_get_headlen(tun->dev, skb->data,
- skb_headlen(skb));
+ skb_push(skb, ETH_HLEN);
+ headlen = eth_get_headlen(tun->dev, skb->data,
+ skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
@@ -2411,6 +2418,7 @@ static int tun_xdp_one(struct tun_struct *tun,
}
xdp_set_data_meta_invalid(xdp);
xdp->rxq = &tfile->xdp_rxq;
+ xdp->frame_sz = buflen;
act = bpf_prog_run_xdp(xdp_prog, xdp);
err = tun_xdp_act(tun, xdp_prog, xdp, act);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 93044cf1417a..950711448f39 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -31,6 +31,7 @@
#define AX_ACCESS_PHY 0x02
#define AX_ACCESS_EEPROM 0x04
#define AX_ACCESS_EFUS 0x05
+#define AX_RELOAD_EEPROM_EFUSE 0x06
#define AX_PAUSE_WATERLVL_HIGH 0x54
#define AX_PAUSE_WATERLVL_LOW 0x55
@@ -611,6 +612,81 @@ ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
return 0;
}
+static int
+ax88179_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret;
+ int i;
+
+ netdev_dbg(net, "write EEPROM len %d, offset %d, magic 0x%x\n",
+ eeprom->len, eeprom->offset, eeprom->magic);
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != AX88179_EEPROM_MAGIC)
+ return -EINVAL;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ /* align data to 16 bit boundaries, read the missing data from
+ the EEPROM */
+ if (eeprom->offset & 1) {
+ ret = ax88179_read_cmd(dev, AX_ACCESS_EEPROM, first_word, 1, 2,
+ &eeprom_buff[0]);
+ if (ret < 0) {
+ netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", first_word);
+ goto free;
+ }
+ }
+
+ if ((eeprom->offset + eeprom->len) & 1) {
+ ret = ax88179_read_cmd(dev, AX_ACCESS_EEPROM, last_word, 1, 2,
+ &eeprom_buff[last_word - first_word]);
+ if (ret < 0) {
+ netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", last_word);
+ goto free;
+ }
+ }
+
+ memcpy((u8 *)eeprom_buff + (eeprom->offset & 1), data, eeprom->len);
+
+ for (i = first_word; i <= last_word; i++) {
+ netdev_dbg(net, "write to EEPROM at offset 0x%02x, data 0x%04x\n",
+ i, eeprom_buff[i - first_word]);
+ ret = ax88179_write_cmd(dev, AX_ACCESS_EEPROM, i, 1, 2,
+ &eeprom_buff[i - first_word]);
+ if (ret < 0) {
+ netdev_err(net, "Failed to write EEPROM at offset 0x%02x.\n", i);
+ goto free;
+ }
+ msleep(20);
+ }
+
+ /* reload EEPROM data */
+ ret = ax88179_write_cmd(dev, AX_RELOAD_EEPROM_EFUSE, 0x0000, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_err(net, "Failed to reload EEPROM data\n");
+ goto free;
+ }
+
+ ret = 0;
+free:
+ kfree(eeprom_buff);
+ return ret;
+}
+
static int ax88179_get_link_ksettings(struct net_device *net,
struct ethtool_link_ksettings *cmd)
{
@@ -784,7 +860,7 @@ static int ax88179_set_eee(struct net_device *net, struct ethtool_eee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = (struct ax88179_data *)dev->data;
- int ret = -EOPNOTSUPP;
+ int ret;
priv->eee_enabled = edata->eee_enabled;
if (!priv->eee_enabled) {
@@ -822,6 +898,7 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
.set_wol = ax88179_set_wol,
.get_eeprom_len = ax88179_get_eeprom_len,
.get_eeprom = ax88179_get_eeprom,
+ .set_eeprom = ax88179_set_eeprom,
.get_eee = ax88179_get_eee,
.set_eee = ax88179_set_eee,
.nway_reset = usbnet_nway_reset,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 0cdb2ce47645..a657943c9f01 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -815,14 +815,21 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
-/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
+/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
- /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 417e42c9fd03..bb8c34d746ab 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2659,7 +2659,7 @@ static struct hso_device *hso_create_bulk_serial_device(
if (!
(serial->out_endp =
hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) {
- dev_err(&interface->dev, "Failed to find BULK IN ep\n");
+ dev_err(&interface->dev, "Failed to find BULK OUT ep\n");
goto exit2;
}
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 099d84827004..a87f0dabcdb7 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -67,7 +67,7 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
{
struct cdc_ncm_ctx *ctx;
struct usb_driver *subdriver = ERR_PTR(-ENODEV);
- int ret = -ENODEV;
+ int ret;
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
int drvflags = 0;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 4bb8552a00d3..31b1d4b959f6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -719,7 +719,7 @@ static int qmi_wwan_change_dtr(struct usbnet *dev, bool on)
static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int status = -1;
+ int status;
u8 *buf = intf->cur_altsetting->extra;
int len = intf->cur_altsetting->extralen;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
@@ -1324,6 +1324,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 8f8d9883d363..7d39f998535d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1504,15 +1504,19 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
sa->sa_family = dev->type;
- if (tp->version == RTL_VER_01) {
- ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
- } else {
- /* if device doesn't support MAC pass through this will
- * be expected to be non-zero
- */
- ret = vendor_mac_passthru_addr_read(tp, sa);
- if (ret < 0)
- ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa->sa_data);
+ ret = eth_platform_get_mac_address(&dev->dev, sa->sa_data);
+ if (ret < 0) {
+ if (tp->version == RTL_VER_01) {
+ ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
+ } else {
+ /* if device doesn't support MAC pass through this will
+ * be expected to be non-zero
+ */
+ ret = vendor_mac_passthru_addr_read(tp, sa);
+ if (ret < 0)
+ ret = pla_ocp_read(tp, PLA_BACKUP, 8,
+ sa->sa_data);
+ }
}
if (ret < 0) {
@@ -6880,6 +6884,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 389d19dd7909..0abd257b634c 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -354,11 +354,6 @@ static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
}
-static inline int sierra_net_is_valid_addrlen(u8 len)
-{
- return len == sizeof(struct in_addr);
-}
-
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
{
struct lsi_umts *lsi = (struct lsi_umts *)data;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index aece0e5eec8c..b594f03eeddb 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -405,10 +405,6 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
{
struct sk_buff *skb;
- if (!buflen) {
- buflen = SKB_DATA_ALIGN(headroom + len) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- }
skb = build_skb(head, buflen);
if (!skb)
return NULL;
@@ -545,7 +541,7 @@ out:
static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
struct veth_xdp_tx_bq *bq)
{
- struct xdp_frame *frame = convert_to_xdp_frame(xdp);
+ struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
if (unlikely(!frame))
return -EOVERFLOW;
@@ -564,23 +560,22 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
struct veth_stats *stats)
{
void *hard_start = frame->data - frame->headroom;
- void *head = hard_start - sizeof(struct xdp_frame);
int len = frame->len, delta = 0;
struct xdp_frame orig_frame;
struct bpf_prog *xdp_prog;
unsigned int headroom;
struct sk_buff *skb;
+ /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
+ hard_start -= sizeof(struct xdp_frame);
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (likely(xdp_prog)) {
struct xdp_buff xdp;
u32 act;
- xdp.data_hard_start = hard_start;
- xdp.data = frame->data;
- xdp.data_end = frame->data + frame->len;
- xdp.data_meta = frame->data - frame->metasize;
+ xdp_convert_frame_to_buff(frame, &xdp);
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -592,7 +587,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
break;
case XDP_TX:
orig_frame = *frame;
- xdp.data_hard_start = head;
xdp.rxq->mem = frame->mem;
if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
@@ -605,7 +599,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
case XDP_REDIRECT:
orig_frame = *frame;
- xdp.data_hard_start = head;
xdp.rxq->mem = frame->mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
frame = &orig_frame;
@@ -629,7 +622,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
rcu_read_unlock();
headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
- skb = veth_build_skb(head, headroom, len, 0);
+ skb = veth_build_skb(hard_start, headroom, len, frame->frame_sz);
if (!skb) {
xdp_return_frame(frame);
stats->rx_drops++;
@@ -695,9 +688,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
goto drop;
}
- nskb = veth_build_skb(head,
- VETH_XDP_HEADROOM + mac_len, skb->len,
- PAGE_SIZE);
+ nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
+ skb->len, PAGE_SIZE);
if (!nskb) {
page_frag_free(head);
goto drop;
@@ -715,6 +707,11 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
xdp.data_end = xdp.data + pktlen;
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
+
+ /* SKB "head" area always have tailroom for skb_shared_info */
+ xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
+ xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
orig_data = xdp.data;
orig_data_end = xdp.data_end;
@@ -758,6 +755,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
}
rcu_read_unlock();
+ /* check if bpf_xdp_adjust_head was used */
delta = orig_data - xdp.data;
off = mac_len + delta;
if (off > 0)
@@ -765,9 +763,11 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
else if (off < 0)
__skb_pull(skb, -off);
skb->mac_header -= delta;
+
+ /* check if bpf_xdp_adjust_tail was used */
off = xdp.data_end - orig_data_end;
if (off != 0)
- __skb_put(skb, off);
+ __skb_put(skb, off); /* positive on grow, negative on shrink */
skb->protocol = eth_type_trans(skb, rq->dev);
metalen = xdp.data - xdp.data_meta;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 11f722460513..ba38765dc490 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -689,6 +689,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data_end = xdp.data + len;
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = buflen;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -702,7 +703,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
break;
case XDP_TX:
stats->xdp_tx++;
- xdpf = convert_to_xdp_frame(&xdp);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
@@ -797,10 +798,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
int offset = buf - page_address(page);
struct sk_buff *head_skb, *curr_skb;
struct bpf_prog *xdp_prog;
- unsigned int truesize;
+ unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
- int err;
unsigned int metasize = 0;
+ unsigned int frame_sz;
+ int err;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
@@ -821,6 +823,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
+ /* Buffers with headroom use PAGE_SIZE as alloc size,
+ * see add_recvbuf_mergeable() + get_mergeable_buf_len()
+ */
+ frame_sz = headroom ? PAGE_SIZE : truesize;
+
/* This happens when rx buffer size is underestimated
* or headroom is not enough because of the buffer
* was refilled before XDP is set. This should only
@@ -834,6 +841,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
page, offset,
VIRTIO_XDP_HEADROOM,
&len);
+ frame_sz = PAGE_SIZE;
+
if (!xdp_page)
goto err_xdp;
offset = VIRTIO_XDP_HEADROOM;
@@ -850,6 +859,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.data_end = xdp.data + (len - vi->hdr_len);
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = frame_sz - vi->hdr_len;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -882,7 +892,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
break;
case XDP_TX:
stats->xdp_tx++;
- xdpf = convert_to_xdp_frame(&xdp);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
@@ -924,7 +934,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();
- truesize = mergeable_ctx_to_truesize(ctx);
if (unlikely(len > truesize)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)ctx);
@@ -1243,9 +1252,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
break;
} while (rq->vq->num_free);
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
- u64_stats_update_begin(&rq->stats.syncp);
+ unsigned long flags;
+
+ flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
rq->stats.kicks++;
- u64_stats_update_end(&rq->stats.syncp);
+ u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
}
return !oom;
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 8cdbb63d1bb0..c5a167a1c85c 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
-# Copyright (C) 2007-2016, VMware, Inc. All Rights Reserved.
+# Copyright (C) 2007-2020, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index db9f1fde3aac..8c014c98471c 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -92,5 +92,8 @@ enum {
UPT1_F_RSS = cpu_to_le64(0x0002),
UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
UPT1_F_LRO = cpu_to_le64(0x0008),
+ UPT1_F_RXINNEROFLD = cpu_to_le64(0x00010), /* Geneve/Vxlan rx csum
+ * offloading
+ */
};
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index c3a31646189f..a8d5ebd47c71 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -82,6 +82,7 @@ enum {
VMXNET3_CMD_RESERVED3,
VMXNET3_CMD_SET_COALESCE,
VMXNET3_CMD_REGISTER_MEMREGS,
+ VMXNET3_CMD_SET_RSS_FIELDS,
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -96,19 +97,20 @@ enum {
VMXNET3_CMD_GET_RESERVED1,
VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
VMXNET3_CMD_GET_COALESCE,
+ VMXNET3_CMD_GET_RSS_FIELDS,
};
/*
* Little Endian layout of bitfields -
* Byte 0 : 7.....len.....0
- * Byte 1 : rsvd gen 13.len.8
+ * Byte 1 : oco gen 13.len.8
* Byte 2 : 5.msscof.0 ext1 dtype
* Byte 3 : 13...msscof...6
*
* Big Endian layout of bitfields -
* Byte 0: 13...msscof...6
* Byte 1 : 5.msscof.0 ext1 dtype
- * Byte 2 : rsvd gen 13.len.8
+ * Byte 2 : oco gen 13.len.8
* Byte 3 : 7.....len.....0
*
* Thus, le32_to_cpu on the dword will allow the big endian driver to read
@@ -123,13 +125,13 @@ struct Vmxnet3_TxDesc {
u32 msscof:14; /* MSS, checksum offset, flags */
u32 ext1:1;
u32 dtype:1; /* descriptor type */
- u32 rsvd:1;
+ u32 oco:1;
u32 gen:1; /* generation bit */
u32 len:14;
#else
u32 len:14;
u32 gen:1; /* generation bit */
- u32 rsvd:1;
+ u32 oco:1;
u32 dtype:1; /* descriptor type */
u32 ext1:1;
u32 msscof:14; /* MSS, checksum offset, flags */
@@ -155,9 +157,10 @@ struct Vmxnet3_TxDesc {
};
/* TxDesc.OM values */
-#define VMXNET3_OM_NONE 0
-#define VMXNET3_OM_CSUM 2
-#define VMXNET3_OM_TSO 3
+#define VMXNET3_OM_NONE 0
+#define VMXNET3_OM_ENCAP 1
+#define VMXNET3_OM_CSUM 2
+#define VMXNET3_OM_TSO 3
/* fields in TxDesc we access w/o using bit fields */
#define VMXNET3_TXD_EOP_SHIFT 12
@@ -224,6 +227,8 @@ struct Vmxnet3_RxDesc {
#define VMXNET3_RXD_BTYPE_SHIFT 14
#define VMXNET3_RXD_GEN_SHIFT 31
+#define VMXNET3_RCD_HDR_INNER_SHIFT 13
+
struct Vmxnet3_RxCompDesc {
#ifdef __BIG_ENDIAN_BITFIELD
u32 ext2:1;
@@ -685,12 +690,22 @@ struct Vmxnet3_MemRegs {
struct Vmxnet3_MemoryRegion memRegs[1];
};
+enum Vmxnet3_RSSField {
+ VMXNET3_RSS_FIELDS_TCPIP4 = 0x0001,
+ VMXNET3_RSS_FIELDS_TCPIP6 = 0x0002,
+ VMXNET3_RSS_FIELDS_UDPIP4 = 0x0004,
+ VMXNET3_RSS_FIELDS_UDPIP6 = 0x0008,
+ VMXNET3_RSS_FIELDS_ESPIP4 = 0x0010,
+ VMXNET3_RSS_FIELDS_ESPIP6 = 0x0020,
+};
+
/* If the command data <= 16 bytes, use the shared memory directly.
* otherwise, use variable length configuration descriptor.
*/
union Vmxnet3_CmdInfo {
struct Vmxnet3_VariableLenConfDesc varConf;
struct Vmxnet3_SetPolling setPolling;
+ enum Vmxnet3_RSSField setRssFields;
__le64 data[2];
};
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 722cb054a5cd..ca395f9679d0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -842,21 +842,46 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
u8 protocol = 0;
if (ctx->mss) { /* TSO */
- ctx->eth_ip_hdr_size = skb_transport_offset(skb);
- ctx->l4_hdr_size = tcp_hdrlen(skb);
- ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
+ if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
+ ctx->l4_offset = skb_inner_transport_offset(skb);
+ ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
+ ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
+ } else {
+ ctx->l4_offset = skb_transport_offset(skb);
+ ctx->l4_hdr_size = tcp_hdrlen(skb);
+ ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
+ }
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
+ /* For encap packets, skb_checksum_start_offset refers
+ * to inner L4 offset. Thus, below works for encap as
+ * well as non-encap case
+ */
+ ctx->l4_offset = skb_checksum_start_offset(skb);
+
+ if (VMXNET3_VERSION_GE_4(adapter) &&
+ skb->encapsulation) {
+ struct iphdr *iph = inner_ip_hdr(skb);
- if (ctx->ipv4) {
- const struct iphdr *iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ protocol = iph->protocol;
+ } else {
+ const struct ipv6hdr *ipv6h;
+
+ ipv6h = inner_ipv6_hdr(skb);
+ protocol = ipv6h->nexthdr;
+ }
+ } else {
+ if (ctx->ipv4) {
+ const struct iphdr *iph = ip_hdr(skb);
- protocol = iph->protocol;
- } else if (ctx->ipv6) {
- const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ protocol = iph->protocol;
+ } else if (ctx->ipv6) {
+ const struct ipv6hdr *ipv6h;
- protocol = ipv6h->nexthdr;
+ ipv6h = ipv6_hdr(skb);
+ protocol = ipv6h->nexthdr;
+ }
}
switch (protocol) {
@@ -871,10 +896,10 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
break;
}
- ctx->copy_size = min(ctx->eth_ip_hdr_size +
+ ctx->copy_size = min(ctx->l4_offset +
ctx->l4_hdr_size, skb->len);
} else {
- ctx->eth_ip_hdr_size = 0;
+ ctx->l4_offset = 0;
ctx->l4_hdr_size = 0;
/* copy as much as allowed */
ctx->copy_size = min_t(unsigned int,
@@ -930,6 +955,25 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
static void
+vmxnet3_prepare_inner_tso(struct sk_buff *skb,
+ struct vmxnet3_tx_ctx *ctx)
+{
+ struct tcphdr *tcph = inner_tcp_hdr(skb);
+ struct iphdr *iph = inner_ip_hdr(skb);
+
+ if (iph->version == 4) {
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ } else {
+ struct ipv6hdr *iph = inner_ipv6_hdr(skb);
+
+ tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ }
+}
+
+static void
vmxnet3_prepare_tso(struct sk_buff *skb,
struct vmxnet3_tx_ctx *ctx)
{
@@ -987,6 +1031,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
/* Use temporary descriptor to avoid touching bits multiple times */
union Vmxnet3_GenericDesc tempTxDesc;
#endif
+ struct udphdr *udph;
count = txd_estimate(skb);
@@ -1003,7 +1048,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
tq->stats.copy_skb_header++;
}
- vmxnet3_prepare_tso(skb, &ctx);
+ if (skb->encapsulation) {
+ vmxnet3_prepare_inner_tso(skb, &ctx);
+ } else {
+ vmxnet3_prepare_tso(skb, &ctx);
+ }
} else {
if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
@@ -1026,14 +1075,14 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
BUG_ON(ret <= 0 && ctx.copy_size != 0);
/* hdrs parsed, check against other limits */
if (ctx.mss) {
- if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
+ if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
VMXNET3_MAX_TX_BUF_SIZE)) {
tq->stats.drop_oversized_hdr++;
goto drop_pkt;
}
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (unlikely(ctx.eth_ip_hdr_size +
+ if (unlikely(ctx.l4_offset +
skb->csum_offset >
VMXNET3_MAX_CSUM_OFFSET)) {
tq->stats.drop_oversized_hdr++;
@@ -1080,16 +1129,34 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
#endif
tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
if (ctx.mss) {
- gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
- gdesc->txd.om = VMXNET3_OM_TSO;
- gdesc->txd.msscof = ctx.mss;
+ if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
+ gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
+ gdesc->txd.om = VMXNET3_OM_ENCAP;
+ gdesc->txd.msscof = ctx.mss;
+
+ udph = udp_hdr(skb);
+ if (udph->check)
+ gdesc->txd.oco = 1;
+ } else {
+ gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
+ gdesc->txd.om = VMXNET3_OM_TSO;
+ gdesc->txd.msscof = ctx.mss;
+ }
num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- gdesc->txd.hlen = ctx.eth_ip_hdr_size;
- gdesc->txd.om = VMXNET3_OM_CSUM;
- gdesc->txd.msscof = ctx.eth_ip_hdr_size +
- skb->csum_offset;
+ if (VMXNET3_VERSION_GE_4(adapter) &&
+ skb->encapsulation) {
+ gdesc->txd.hlen = ctx.l4_offset +
+ ctx.l4_hdr_size;
+ gdesc->txd.om = VMXNET3_OM_ENCAP;
+ gdesc->txd.msscof = 0; /* Reserved */
+ } else {
+ gdesc->txd.hlen = ctx.l4_offset;
+ gdesc->txd.om = VMXNET3_OM_CSUM;
+ gdesc->txd.msscof = ctx.l4_offset +
+ skb->csum_offset;
+ }
} else {
gdesc->txd.om = 0;
gdesc->txd.msscof = 0;
@@ -1168,13 +1235,21 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
(le32_to_cpu(gdesc->dword[3]) &
VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
- BUG_ON(gdesc->rcd.frg);
+ WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
+ !(le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
+ WARN_ON_ONCE(gdesc->rcd.frg &&
+ !(le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
(1 << VMXNET3_RCD_TUC_SHIFT))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
- BUG_ON(gdesc->rcd.frg);
+ WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
+ !(le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
+ WARN_ON_ONCE(gdesc->rcd.frg &&
+ !(le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
} else {
if (gdesc->rcd.csum) {
skb->csum = htons(gdesc->rcd.csum);
@@ -2429,6 +2504,10 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+ if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM))
+ devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
+
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
devRead->misc.queueDescLen = cpu_to_le32(
@@ -2554,6 +2633,39 @@ vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
+static void
+vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_4(adapter))
+ return;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+
+ if (adapter->default_rss_fields) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_RSS_FIELDS);
+ adapter->rss_fields =
+ VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ } else {
+ cmdInfo->setRssFields = adapter->rss_fields;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_RSS_FIELDS);
+ /* Not all requested RSS may get applied, so get and
+ * cache what was actually applied.
+ */
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_RSS_FIELDS);
+ adapter->rss_fields =
+ VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ }
+
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+}
+
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
{
@@ -2603,6 +2715,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
}
vmxnet3_init_coalesce(adapter);
+ vmxnet3_init_rssfields(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
VMXNET3_WRITE_BAR0_REG(adapter,
@@ -3039,6 +3152,18 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_LRO;
+
+ if (VMXNET3_VERSION_GE_4(adapter)) {
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+
if (dma64)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->hw_features &
@@ -3382,7 +3507,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
- if (ver & (1 << VMXNET3_REV_3)) {
+ if (ver & (1 << VMXNET3_REV_4)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_4);
+ adapter->version = VMXNET3_REV_4 + 1;
+ } else if (ver & (1 << VMXNET3_REV_3)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_3);
@@ -3430,6 +3560,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->default_coal_mode = true;
}
+ if (VMXNET3_VERSION_GE_4(adapter)) {
+ adapter->default_rss_fields = true;
+ adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
+ }
+
SET_NETDEV_DEV(netdev, &pdev->dev);
vmxnet3_declare_features(adapter, dma64);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 6528940ce5f3..def27afa1c69 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -267,14 +267,43 @@ netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
return features;
}
+static void vmxnet3_enable_encap_offloads(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (VMXNET3_VERSION_GE_4(adapter)) {
+ netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+}
+
+static void vmxnet3_disable_encap_offloads(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (VMXNET3_VERSION_GE_4(adapter)) {
+ netdev->hw_enc_features &= ~(NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+ }
+}
+
int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
netdev_features_t changed = features ^ netdev->features;
+ netdev_features_t tun_offload_mask = NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ u8 udp_tun_enabled = (netdev->features & tun_offload_mask) != 0;
if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO |
- NETIF_F_HW_VLAN_CTAG_RX)) {
+ NETIF_F_HW_VLAN_CTAG_RX | tun_offload_mask)) {
if (features & NETIF_F_RXCSUM)
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXCSUM;
@@ -297,6 +326,17 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXVLAN;
+ if ((features & tun_offload_mask) != 0 && !udp_tun_enabled) {
+ vmxnet3_enable_encap_offloads(netdev);
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXINNEROFLD;
+ } else if ((features & tun_offload_mask) == 0 &&
+ udp_tun_enabled) {
+ vmxnet3_disable_encap_offloads(netdev);
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXINNEROFLD;
+ }
+
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
@@ -665,18 +705,244 @@ out:
return err;
}
+static int
+vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
+ struct ethtool_rxnfc *info)
+{
+ enum Vmxnet3_RSSField rss_fields;
+
+ if (netif_running(adapter->netdev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_RSS_FIELDS);
+ rss_fields = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ } else {
+ rss_fields = adapter->rss_fields;
+ }
+
+ info->data = 0;
+
+ /* Report default options for RSS on vmxnet3 */
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP4)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP6)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+vmxnet3_set_rss_hash_opt(struct net_device *netdev,
+ struct vmxnet3_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ enum Vmxnet3_RSSField rss_fields = adapter->rss_fields;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (rss_fields != adapter->rss_fields) {
+ adapter->default_rss_fields = false;
+ if (netif_running(netdev)) {
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ cmdInfo->setRssFields = rss_fields;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_RSS_FIELDS);
+
+ /* Not all requested RSS may get applied, so get and
+ * cache what was actually applied.
+ */
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_RSS_FIELDS);
+ adapter->rss_fields =
+ VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ } else {
+ /* When the device is activated, we will try to apply
+ * these rules and cache the applied value later.
+ */
+ adapter->rss_fields = rss_fields;
+ }
+ }
+ return 0;
+}
static int
vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
u32 *rules)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = adapter->num_rx_queues;
- return 0;
+ break;
+ case ETHTOOL_GRXFH:
+ if (!VMXNET3_VERSION_GE_4(adapter)) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+#ifdef VMXNET3_RSS
+ if (!adapter->rss) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+#endif
+ err = vmxnet3_get_rss_hash_opts(adapter, info);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int
+vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (!VMXNET3_VERSION_GE_4(adapter)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+#ifdef VMXNET3_RSS
+ if (!adapter->rss) {
+ err = -EOPNOTSUPP;
+ goto done;
}
- return -EOPNOTSUPP;
+#endif
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ err = vmxnet3_set_rss_hash_opt(netdev, adapter, info);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+done:
+ return err;
}
#ifdef VMXNET3_RSS
@@ -700,6 +966,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!p)
return 0;
+ if (n > UPT1_RSS_MAX_IND_TABLE_SIZE)
+ return 0;
while (n--)
p[n] = rssConf->indTable[n];
return 0;
@@ -887,6 +1155,7 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_ringparam = vmxnet3_get_ringparam,
.set_ringparam = vmxnet3_set_ringparam,
.get_rxnfc = vmxnet3_get_rxnfc,
+ .set_rxnfc = vmxnet3_set_rxnfc,
#ifdef VMXNET3_RSS
.get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
.get_rxfh = vmxnet3_get_rss,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 1cc1cd4aaa59..5d2b062215a2 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -69,18 +69,19 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.17.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.5.0.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
-#define VMXNET3_DRIVER_VERSION_NUM 0x01041100
+#define VMXNET3_DRIVER_VERSION_NUM 0x01050000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
+#define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */
#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
@@ -218,10 +219,16 @@ struct vmxnet3_tx_ctx {
bool ipv4;
bool ipv6;
u16 mss;
- u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
- * offloading
+ u32 l4_offset; /* only valid for pkts requesting tso or csum
+ * offloading. For encap offload, it refers to
+ * inner L4 offset i.e. it includes outer header
+ * encap header and inner eth and ip header size
+ */
+
+ u32 l4_hdr_size; /* only valid if mss != 0
+ * Refers to inner L4 hdr size for encap
+ * offload
*/
- u32 l4_hdr_size; /* only valid if mss != 0 */
u32 copy_size; /* # of bytes copied into the data ring */
union Vmxnet3_GenericDesc *sop_txd;
union Vmxnet3_GenericDesc *eop_txd;
@@ -376,6 +383,8 @@ struct vmxnet3_adapter {
u16 rxdata_desc_size;
bool rxdataring_enabled;
+ bool default_rss_fields;
+ enum Vmxnet3_RSSField rss_fields;
struct work_struct work;
@@ -412,6 +421,8 @@ struct vmxnet3_adapter {
(adapter->version >= VMXNET3_REV_2 + 1)
#define VMXNET3_VERSION_GE_3(adapter) \
(adapter->version >= VMXNET3_REV_3 + 1)
+#define VMXNET3_VERSION_GE_4(adapter) \
+ (adapter->version >= VMXNET3_REV_4 + 1)
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
@@ -435,6 +446,8 @@ struct vmxnet3_adapter {
#define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
#define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
+#define VMXNET3_RSS_FIELDS_DEFAULT (VMXNET3_RSS_FIELDS_TCPIP4 | \
+ VMXNET3_RSS_FIELDS_TCPIP6)
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 56f8aab46f89..43928a1c2f2a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -867,6 +867,7 @@ static int vrf_dev_init(struct net_device *dev)
/* similarly, oper state is irrelevant; set to up to avoid confusion */
dev->operstate = IF_OPER_UP;
+ netdev_lockdep_set_classes(dev);
return 0;
out_rth:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a5b415fed11e..5bb448ae6c9c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -26,6 +26,7 @@
#include <net/netns/generic.h>
#include <net/tun_proto.h>
#include <net/vxlan.h>
+#include <net/nexthop.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_tunnel.h>
@@ -78,6 +79,9 @@ struct vxlan_fdb {
u16 state; /* see ndm_state */
__be32 vni;
u16 flags; /* see ndm_flags and below */
+ struct list_head nh_list;
+ struct nexthop __rcu *nh;
+ struct vxlan_dev __rcu *vdev;
};
#define NTF_VXLAN_ADDED_BY_USER 0x100
@@ -174,11 +178,15 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port)
*/
static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
{
+ if (rcu_access_pointer(fdb->nh))
+ return NULL;
return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
}
static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
{
+ if (rcu_access_pointer(fdb->nh))
+ return NULL;
return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
}
@@ -251,9 +259,12 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
{
unsigned long now = jiffies;
struct nda_cacheinfo ci;
+ bool send_ip, send_eth;
struct nlmsghdr *nlh;
+ struct nexthop *nh;
struct ndmsg *ndm;
- bool send_ip, send_eth;
+ int nh_family;
+ u32 nh_id;
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
@@ -264,16 +275,28 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
send_eth = send_ip = true;
+ rcu_read_lock();
+ nh = rcu_dereference(fdb->nh);
+ if (nh) {
+ nh_family = nexthop_get_family(nh);
+ nh_id = nh->id;
+ }
+ rcu_read_unlock();
+
if (type == RTM_GETNEIGH) {
- send_ip = !vxlan_addr_any(&rdst->remote_ip);
+ if (rdst) {
+ send_ip = !vxlan_addr_any(&rdst->remote_ip);
+ ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
+ } else if (nh) {
+ ndm->ndm_family = nh_family;
+ }
send_eth = !is_zero_ether_addr(fdb->eth_addr);
- ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
} else
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_state = fdb->state;
ndm->ndm_ifindex = vxlan->dev->ifindex;
ndm->ndm_flags = fdb->flags;
- if (rdst->offloaded)
+ if (rdst && rdst->offloaded)
ndm->ndm_flags |= NTF_OFFLOADED;
ndm->ndm_type = RTN_UNICAST;
@@ -284,23 +307,30 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
+ if (nh) {
+ if (nla_put_u32(skb, NDA_NH_ID, nh_id))
+ goto nla_put_failure;
+ } else if (rdst) {
+ if (send_ip && vxlan_nla_put_addr(skb, NDA_DST,
+ &rdst->remote_ip))
+ goto nla_put_failure;
+
+ if (rdst->remote_port &&
+ rdst->remote_port != vxlan->cfg.dst_port &&
+ nla_put_be16(skb, NDA_PORT, rdst->remote_port))
+ goto nla_put_failure;
+ if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
+ nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
+ goto nla_put_failure;
+ if (rdst->remote_ifindex &&
+ nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
+ goto nla_put_failure;
+ }
- if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
- goto nla_put_failure;
-
- if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
- nla_put_be16(skb, NDA_PORT, rdst->remote_port))
- goto nla_put_failure;
- if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
- nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
- goto nla_put_failure;
if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
nla_put_u32(skb, NDA_SRC_VNI,
be32_to_cpu(fdb->vni)))
goto nla_put_failure;
- if (rdst->remote_ifindex &&
- nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
- goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
ci.ndm_confirmed = 0;
@@ -401,7 +431,7 @@ static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
{
int err;
- if (swdev_notify) {
+ if (swdev_notify && rd) {
switch (type) {
case RTM_NEWNEIGH:
err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
@@ -793,8 +823,9 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
-static struct vxlan_fdb *vxlan_fdb_alloc(const u8 *mac, __u16 state,
- __be32 src_vni, __u16 ndm_flags)
+static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac,
+ __u16 state, __be32 src_vni,
+ __u16 ndm_flags)
{
struct vxlan_fdb *f;
@@ -805,6 +836,9 @@ static struct vxlan_fdb *vxlan_fdb_alloc(const u8 *mac, __u16 state,
f->flags = ndm_flags;
f->updated = f->used = jiffies;
f->vni = src_vni;
+ f->nh = NULL;
+ RCU_INIT_POINTER(f->vdev, vxlan);
+ INIT_LIST_HEAD(&f->nh_list);
INIT_LIST_HEAD(&f->remotes);
memcpy(f->eth_addr, mac, ETH_ALEN);
@@ -819,11 +853,78 @@ static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
vxlan_fdb_head(vxlan, mac, src_vni));
}
+static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
+ u32 nhid, struct netlink_ext_ack *extack)
+{
+ struct nexthop *old_nh = rtnl_dereference(fdb->nh);
+ struct nh_group *nhg;
+ struct nexthop *nh;
+ int err = -EINVAL;
+
+ if (old_nh && old_nh->id == nhid)
+ return 0;
+
+ nh = nexthop_find_by_id(vxlan->net, nhid);
+ if (!nh) {
+ NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
+ goto err_inval;
+ }
+
+ if (nh) {
+ if (!nexthop_get(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+ nh = NULL;
+ goto err_inval;
+ }
+ if (!nh->is_fdb_nh) {
+ NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
+ goto err_inval;
+ }
+
+ nhg = rtnl_dereference(nh->nh_grp);
+ if (!nh->is_group || !nhg->mpath) {
+ NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
+ goto err_inval;
+ }
+
+ /* check nexthop group family */
+ switch (vxlan->default_dst.remote_ip.sa.sa_family) {
+ case AF_INET:
+ if (!nhg->has_v4) {
+ err = -EAFNOSUPPORT;
+ NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
+ goto err_inval;
+ }
+ break;
+ case AF_INET6:
+ if (nhg->has_v4) {
+ err = -EAFNOSUPPORT;
+ NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
+ goto err_inval;
+ }
+ }
+ }
+
+ if (old_nh) {
+ list_del_rcu(&fdb->nh_list);
+ nexthop_put(old_nh);
+ }
+ rcu_assign_pointer(fdb->nh, nh);
+ list_add_tail_rcu(&fdb->nh_list, &nh->fdb_list);
+ return 1;
+
+err_inval:
+ if (nh)
+ nexthop_put(nh);
+ return err;
+}
+
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni,
__be32 vni, __u32 ifindex, __u16 ndm_flags,
- struct vxlan_fdb **fdb)
+ u32 nhid, struct vxlan_fdb **fdb,
+ struct netlink_ext_ack *extack)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
@@ -834,24 +935,37 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return -ENOSPC;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
- f = vxlan_fdb_alloc(mac, state, src_vni, ndm_flags);
+ f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
if (!f)
return -ENOMEM;
- rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
- if (rc < 0) {
- kfree(f);
- return rc;
- }
+ if (nhid)
+ rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack);
+ else
+ rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+ if (rc < 0)
+ goto errout;
*fdb = f;
return 0;
+
+errout:
+ kfree(f);
+ return rc;
}
static void __vxlan_fdb_free(struct vxlan_fdb *f)
{
struct vxlan_rdst *rd, *nd;
+ struct nexthop *nh;
+
+ nh = rcu_dereference_raw(f->nh);
+ if (nh) {
+ rcu_assign_pointer(f->nh, NULL);
+ rcu_assign_pointer(f->vdev, NULL);
+ nexthop_put(nh);
+ }
list_for_each_entry_safe(rd, nd, &f->remotes, list) {
dst_cache_destroy(&rd->dst_cache);
@@ -875,12 +989,18 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr);
--vxlan->addrcnt;
- if (do_notify)
- list_for_each_entry(rd, &f->remotes, list)
- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
+ if (do_notify) {
+ if (rcu_access_pointer(f->nh))
+ vxlan_fdb_notify(vxlan, f, NULL, RTM_DELNEIGH,
swdev_notify, NULL);
+ else
+ list_for_each_entry(rd, &f->remotes, list)
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
+ swdev_notify, NULL);
+ }
hlist_del_rcu(&f->hlist);
+ list_del_rcu(&f->nh_list);
call_rcu(&f->rcu, vxlan_fdb_free);
}
@@ -897,7 +1017,7 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
__u16 state, __u16 flags,
__be16 port, __be32 vni,
__u32 ifindex, __u16 ndm_flags,
- struct vxlan_fdb *f,
+ struct vxlan_fdb *f, u32 nhid,
bool swdev_notify,
struct netlink_ext_ack *extack)
{
@@ -908,6 +1028,18 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
int rc = 0;
int err;
+ if (nhid && !rcu_access_pointer(f->nh)) {
+ NL_SET_ERR_MSG(extack,
+ "Cannot replace an existing non nexthop fdb with a nexthop");
+ return -EOPNOTSUPP;
+ }
+
+ if (nhid && (flags & NLM_F_APPEND)) {
+ NL_SET_ERR_MSG(extack,
+ "Cannot append to a nexthop fdb");
+ return -EOPNOTSUPP;
+ }
+
/* Do not allow an externally learned entry to take over an entry added
* by the user.
*/
@@ -929,10 +1061,17 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
/* Only change unicasts */
if (!(is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) {
- rc = vxlan_fdb_replace(f, ip, port, vni,
- ifindex, &oldrd);
+ if (nhid) {
+ rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = vxlan_fdb_replace(f, ip, port, vni,
+ ifindex, &oldrd);
+ }
notify |= rc;
} else {
+ NL_SET_ERR_MSG(extack, "Cannot replace non-unicast fdb entries");
return -EOPNOTSUPP;
}
}
@@ -962,6 +1101,8 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
return 0;
err_notify:
+ if (nhid)
+ return err;
if ((flags & NLM_F_REPLACE) && rc)
*rd = oldrd;
else if ((flags & NLM_F_APPEND) && rc) {
@@ -975,7 +1116,7 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 src_vni, __be32 vni,
- __u32 ifindex, __u16 ndm_flags,
+ __u32 ifindex, __u16 ndm_flags, u32 nhid,
bool swdev_notify,
struct netlink_ext_ack *extack)
{
@@ -990,7 +1131,7 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
- vni, ifindex, fdb_flags, &f);
+ vni, ifindex, fdb_flags, nhid, &f, extack);
if (rc < 0)
return rc;
@@ -1012,7 +1153,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 src_vni, __be32 vni,
- __u32 ifindex, __u16 ndm_flags,
+ __u32 ifindex, __u16 ndm_flags, u32 nhid,
bool swdev_notify,
struct netlink_ext_ack *extack)
{
@@ -1028,14 +1169,15 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
return vxlan_fdb_update_existing(vxlan, ip, state, flags, port,
vni, ifindex, ndm_flags, f,
- swdev_notify, extack);
+ nhid, swdev_notify, extack);
} else {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
return vxlan_fdb_update_create(vxlan, mac, ip, state, flags,
port, src_vni, vni, ifindex,
- ndm_flags, swdev_notify, extack);
+ ndm_flags, nhid, swdev_notify,
+ extack);
}
}
@@ -1049,11 +1191,15 @@ static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
- __be32 *vni, u32 *ifindex)
+ __be32 *vni, u32 *ifindex, u32 *nhid)
{
struct net *net = dev_net(vxlan->dev);
int err;
+ if (tb[NDA_NH_ID] && (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] ||
+ tb[NDA_PORT]))
+ return -EINVAL;
+
if (tb[NDA_DST]) {
err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
if (err)
@@ -1109,6 +1255,11 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
*ifindex = 0;
}
+ if (tb[NDA_NH_ID])
+ *nhid = nla_get_u32(tb[NDA_NH_ID]);
+ else
+ *nhid = 0;
+
return 0;
}
@@ -1123,7 +1274,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
union vxlan_addr ip;
__be16 port;
__be32 src_vni, vni;
- u32 ifindex;
+ u32 ifindex, nhid;
u32 hash_index;
int err;
@@ -1133,10 +1284,11 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EINVAL;
}
- if (tb[NDA_DST] == NULL)
+ if (!tb || (!tb[NDA_DST] && !tb[NDA_NH_ID]))
return -EINVAL;
- err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
+ err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex,
+ &nhid);
if (err)
return err;
@@ -1148,7 +1300,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
- true, extack);
+ nhid, true, extack);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
@@ -1159,8 +1311,8 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
__be16 port, __be32 src_vni, __be32 vni,
u32 ifindex, bool swdev_notify)
{
- struct vxlan_fdb *f;
struct vxlan_rdst *rd = NULL;
+ struct vxlan_fdb *f;
int err = -ENOENT;
f = vxlan_find_mac(vxlan, addr, src_vni);
@@ -1195,12 +1347,13 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct vxlan_dev *vxlan = netdev_priv(dev);
union vxlan_addr ip;
__be32 src_vni, vni;
- __be16 port;
- u32 ifindex;
+ u32 ifindex, nhid;
u32 hash_index;
+ __be16 port;
int err;
- err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
+ err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex,
+ &nhid);
if (err)
return err;
@@ -1228,6 +1381,17 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
+ if (rcu_access_pointer(f->nh)) {
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI, NULL);
+ if (err < 0)
+ goto out;
+ continue;
+ }
+
list_for_each_entry_rcu(rd, &f->remotes, list) {
if (*idx < cb->args[2])
goto skip;
@@ -1311,6 +1475,10 @@ static bool vxlan_snoop(struct net_device *dev,
if (f->state & (NUD_PERMANENT | NUD_NOARP))
return true;
+ /* Don't override an fdb with nexthop with a learnt entry */
+ if (rcu_access_pointer(f->nh))
+ return true;
+
if (net_ratelimit())
netdev_info(dev,
"%pM migrated from %pIS to %pIS\n",
@@ -1333,7 +1501,7 @@ static bool vxlan_snoop(struct net_device *dev,
vxlan->cfg.dst_port,
vni,
vxlan->default_dst.remote_vni,
- ifindex, NTF_SELF, true, NULL);
+ ifindex, NTF_SELF, 0, true, NULL);
spin_unlock(&vxlan->hash_lock[hash_index]);
}
@@ -1924,6 +2092,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
ns_olen = request->len - skb_network_offset(request) -
sizeof(struct ipv6hdr) - sizeof(*ns);
for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
+ if (!ns->opt[i + 1]) {
+ kfree_skb(reply);
+ return NULL;
+ }
if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
break;
@@ -2616,6 +2788,38 @@ tx_error:
kfree_skb(skb);
}
+static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
+ struct vxlan_fdb *f, __be32 vni, bool did_rsc)
+{
+ struct vxlan_rdst nh_rdst;
+ struct nexthop *nh;
+ bool do_xmit;
+ u32 hash;
+
+ memset(&nh_rdst, 0, sizeof(struct vxlan_rdst));
+ hash = skb_get_hash(skb);
+
+ rcu_read_lock();
+ nh = rcu_dereference(f->nh);
+ if (!nh) {
+ rcu_read_unlock();
+ goto drop;
+ }
+ do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst);
+ rcu_read_unlock();
+
+ if (likely(do_xmit))
+ vxlan_xmit_one(skb, dev, vni, &nh_rdst, did_rsc);
+ else
+ goto drop;
+
+ return;
+
+drop:
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+}
+
/* Transmit local packets over Vxlan
*
* Outer IP header inherits ECN and DF from inner header.
@@ -2692,22 +2896,27 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- list_for_each_entry_rcu(rdst, &f->remotes, list) {
- struct sk_buff *skb1;
+ if (rcu_access_pointer(f->nh)) {
+ vxlan_xmit_nh(skb, dev, f,
+ (vni ? : vxlan->default_dst.remote_vni), did_rsc);
+ } else {
+ list_for_each_entry_rcu(rdst, &f->remotes, list) {
+ struct sk_buff *skb1;
- if (!fdst) {
- fdst = rdst;
- continue;
+ if (!fdst) {
+ fdst = rdst;
+ continue;
+ }
+ skb1 = skb_clone(skb, GFP_ATOMIC);
+ if (skb1)
+ vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
}
- skb1 = skb_clone(skb, GFP_ATOMIC);
- if (skb1)
- vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
+ if (fdst)
+ vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
+ else
+ kfree_skb(skb);
}
- if (fdst)
- vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
- else
- kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -3615,7 +3824,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
dst->remote_vni,
dst->remote_vni,
dst->remote_ifindex,
- NTF_SELF, &f);
+ NTF_SELF, 0, &f, extack);
if (err)
return err;
}
@@ -4013,7 +4222,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
vxlan->cfg.dst_port,
conf.vni, conf.vni,
conf.remote_ifindex,
- NTF_SELF, true, extack);
+ NTF_SELF, 0, true, extack);
if (err) {
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
netdev_adjacent_change_abort(dst->remote_dev,
@@ -4335,7 +4544,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
fdb_info->remote_vni,
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
- false, extack);
+ 0, false, extack);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
@@ -4410,6 +4619,43 @@ static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = {
.notifier_call = vxlan_switchdev_event,
};
+static void vxlan_fdb_nh_flush(struct nexthop *nh)
+{
+ struct vxlan_fdb *fdb;
+ struct vxlan_dev *vxlan;
+ u32 hash_index;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(fdb, &nh->fdb_list, nh_list) {
+ vxlan = rcu_dereference(fdb->vdev);
+ WARN_ON(!vxlan);
+ hash_index = fdb_head_index(vxlan, fdb->eth_addr,
+ vxlan->default_dst.remote_vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ if (!hlist_unhashed(&fdb->hlist))
+ vxlan_fdb_destroy(vxlan, fdb, false, false);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ }
+ rcu_read_unlock();
+}
+
+static int vxlan_nexthop_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct nexthop *nh = ptr;
+
+ if (!nh || event != NEXTHOP_EVENT_DEL)
+ return NOTIFY_DONE;
+
+ vxlan_fdb_nh_flush(nh);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vxlan_nexthop_notifier_block __read_mostly = {
+ .notifier_call = vxlan_nexthop_event,
+};
+
static __net_init int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -4421,7 +4667,7 @@ static __net_init int vxlan_init_net(struct net *net)
for (h = 0; h < PORT_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vn->sock_list[h]);
- return 0;
+ return register_nexthop_notifier(net, &vxlan_nexthop_notifier_block);
}
static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
@@ -4454,6 +4700,8 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
+ unregister_nexthop_notifier(net, &vxlan_nexthop_notifier_block);
+ list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
unregister_netdevice_many(&list);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index dbc0e3f7a3e2..3e21726c36e8 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -336,7 +336,7 @@ config DLCI
To use frame relay, you need supporting hardware (called FRAD) and
certain programs from the net-tools package as explained in
- <file:Documentation/networking/framerelay.txt>.
+ <file:Documentation/networking/framerelay.rst>.
To compile this driver as a module, choose M here: the
module will be called dlci.
@@ -361,7 +361,7 @@ config SDLA
These are multi-protocol cards, but only Frame Relay is supported
by the driver at this time. Please read
- <file:Documentation/networking/framerelay.txt>.
+ <file:Documentation/networking/framerelay.rst>.
To compile this driver as a module, choose M here: the
module will be called sdla.
diff --git a/drivers/net/wireguard/messages.h b/drivers/net/wireguard/messages.h
index b8a7b9ce32ba..208da72673fc 100644
--- a/drivers/net/wireguard/messages.h
+++ b/drivers/net/wireguard/messages.h
@@ -32,7 +32,7 @@ enum cookie_values {
};
enum counter_values {
- COUNTER_BITS_TOTAL = 2048,
+ COUNTER_BITS_TOTAL = 8192,
COUNTER_REDUNDANT_BITS = BITS_PER_LONG,
COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS
};
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index 708dc61c974f..626433690abb 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -104,6 +104,7 @@ static struct noise_keypair *keypair_create(struct wg_peer *peer)
if (unlikely(!keypair))
return NULL;
+ spin_lock_init(&keypair->receiving_counter.lock);
keypair->internal_id = atomic64_inc_return(&keypair_counter);
keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
keypair->entry.peer = peer;
@@ -358,25 +359,16 @@ out:
memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
}
-static void symmetric_key_init(struct noise_symmetric_key *key)
-{
- spin_lock_init(&key->counter.receive.lock);
- atomic64_set(&key->counter.counter, 0);
- memset(key->counter.receive.backtrack, 0,
- sizeof(key->counter.receive.backtrack));
- key->birthdate = ktime_get_coarse_boottime_ns();
- key->is_valid = true;
-}
-
static void derive_keys(struct noise_symmetric_key *first_dst,
struct noise_symmetric_key *second_dst,
const u8 chaining_key[NOISE_HASH_LEN])
{
+ u64 birthdate = ktime_get_coarse_boottime_ns();
kdf(first_dst->key, second_dst->key, NULL, NULL,
NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
chaining_key);
- symmetric_key_init(first_dst);
- symmetric_key_init(second_dst);
+ first_dst->birthdate = second_dst->birthdate = birthdate;
+ first_dst->is_valid = second_dst->is_valid = true;
}
static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
@@ -715,6 +707,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
u8 e[NOISE_PUBLIC_KEY_LEN];
u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
u8 static_private[NOISE_PUBLIC_KEY_LEN];
+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
down_read(&wg->static_identity.lock);
@@ -733,6 +726,8 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
memcpy(ephemeral_private, handshake->ephemeral_private,
NOISE_PUBLIC_KEY_LEN);
+ memcpy(preshared_key, handshake->preshared_key,
+ NOISE_SYMMETRIC_KEY_LEN);
up_read(&handshake->lock);
if (state != HANDSHAKE_CREATED_INITIATION)
@@ -750,7 +745,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
goto fail;
/* psk */
- mix_psk(chaining_key, hash, key, handshake->preshared_key);
+ mix_psk(chaining_key, hash, key, preshared_key);
/* {} */
if (!message_decrypt(NULL, src->encrypted_nothing,
@@ -783,6 +778,7 @@ out:
memzero_explicit(chaining_key, NOISE_HASH_LEN);
memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
+ memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN);
up_read(&wg->static_identity.lock);
return ret_peer;
}
diff --git a/drivers/net/wireguard/noise.h b/drivers/net/wireguard/noise.h
index f532d59d3f19..c527253dba80 100644
--- a/drivers/net/wireguard/noise.h
+++ b/drivers/net/wireguard/noise.h
@@ -15,18 +15,14 @@
#include <linux/mutex.h>
#include <linux/kref.h>
-union noise_counter {
- struct {
- u64 counter;
- unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
- spinlock_t lock;
- } receive;
- atomic64_t counter;
+struct noise_replay_counter {
+ u64 counter;
+ spinlock_t lock;
+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
};
struct noise_symmetric_key {
u8 key[NOISE_SYMMETRIC_KEY_LEN];
- union noise_counter counter;
u64 birthdate;
bool is_valid;
};
@@ -34,7 +30,9 @@ struct noise_symmetric_key {
struct noise_keypair {
struct index_hashtable_entry entry;
struct noise_symmetric_key sending;
+ atomic64_t sending_counter;
struct noise_symmetric_key receiving;
+ struct noise_replay_counter receiving_counter;
__le32 remote_index;
bool i_am_the_initiator;
struct kref refcount;
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 3432232afe06..c58df439dbbe 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -87,12 +87,20 @@ static inline bool wg_check_packet_protocol(struct sk_buff *skb)
return real_protocol && skb->protocol == real_protocol;
}
-static inline void wg_reset_packet(struct sk_buff *skb)
+static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
{
+ u8 l4_hash = skb->l4_hash;
+ u8 sw_hash = skb->sw_hash;
+ u32 hash = skb->hash;
skb_scrub_packet(skb, true);
memset(&skb->headers_start, 0,
offsetof(struct sk_buff, headers_end) -
offsetof(struct sk_buff, headers_start));
+ if (encapsulating) {
+ skb->l4_hash = l4_hash;
+ skb->sw_hash = sw_hash;
+ skb->hash = hash;
+ }
skb->queue_mapping = 0;
skb->nohdr = 0;
skb->peeked = 0;
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 3bb5b9ae7cd1..91438144e4f7 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -245,20 +245,20 @@ static void keep_key_fresh(struct wg_peer *peer)
}
}
-static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
+static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
{
struct scatterlist sg[MAX_SKB_FRAGS + 8];
struct sk_buff *trailer;
unsigned int offset;
int num_frags;
- if (unlikely(!key))
+ if (unlikely(!keypair))
return false;
- if (unlikely(!READ_ONCE(key->is_valid) ||
- wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) ||
- key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
- WRITE_ONCE(key->is_valid, false);
+ if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
+ wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
+ keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
+ WRITE_ONCE(keypair->receiving.is_valid, false);
return false;
}
@@ -283,7 +283,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
PACKET_CB(skb)->nonce,
- key->key))
+ keypair->receiving.key))
return false;
/* Another ugly situation of pushing and pulling the header so as to
@@ -298,41 +298,41 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
}
/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
-static bool counter_validate(union noise_counter *counter, u64 their_counter)
+static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter)
{
unsigned long index, index_current, top, i;
bool ret = false;
- spin_lock_bh(&counter->receive.lock);
+ spin_lock_bh(&counter->lock);
- if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 ||
+ if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 ||
their_counter >= REJECT_AFTER_MESSAGES))
goto out;
++their_counter;
if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
- counter->receive.counter))
+ counter->counter))
goto out;
index = their_counter >> ilog2(BITS_PER_LONG);
- if (likely(their_counter > counter->receive.counter)) {
- index_current = counter->receive.counter >> ilog2(BITS_PER_LONG);
+ if (likely(their_counter > counter->counter)) {
+ index_current = counter->counter >> ilog2(BITS_PER_LONG);
top = min_t(unsigned long, index - index_current,
COUNTER_BITS_TOTAL / BITS_PER_LONG);
for (i = 1; i <= top; ++i)
- counter->receive.backtrack[(i + index_current) &
+ counter->backtrack[(i + index_current) &
((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
- counter->receive.counter = their_counter;
+ counter->counter = their_counter;
}
index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
- &counter->receive.backtrack[index]);
+ &counter->backtrack[index]);
out:
- spin_unlock_bh(&counter->receive.lock);
+ spin_unlock_bh(&counter->lock);
return ret;
}
@@ -472,19 +472,19 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
if (unlikely(state != PACKET_STATE_CRYPTED))
goto next;
- if (unlikely(!counter_validate(&keypair->receiving.counter,
+ if (unlikely(!counter_validate(&keypair->receiving_counter,
PACKET_CB(skb)->nonce))) {
net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
peer->device->dev->name,
PACKET_CB(skb)->nonce,
- keypair->receiving.counter.receive.counter);
+ keypair->receiving_counter.counter);
goto next;
}
if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
goto next;
- wg_reset_packet(skb);
+ wg_reset_packet(skb, false);
wg_packet_consume_data_done(peer, skb, &endpoint);
free = false;
@@ -511,8 +511,8 @@ void wg_packet_decrypt_worker(struct work_struct *work)
struct sk_buff *skb;
while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
- enum packet_state state = likely(decrypt_packet(skb,
- &PACKET_CB(skb)->keypair->receiving)) ?
+ enum packet_state state =
+ likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
wg_queue_enqueue_per_peer_napi(skb, state);
if (need_resched())
diff --git a/drivers/net/wireguard/selftest/counter.c b/drivers/net/wireguard/selftest/counter.c
index f4fbb9072ed7..ec3c156bf91b 100644
--- a/drivers/net/wireguard/selftest/counter.c
+++ b/drivers/net/wireguard/selftest/counter.c
@@ -6,18 +6,24 @@
#ifdef DEBUG
bool __init wg_packet_counter_selftest(void)
{
+ struct noise_replay_counter *counter;
unsigned int test_num = 0, i;
- union noise_counter counter;
bool success = true;
-#define T_INIT do { \
- memset(&counter, 0, sizeof(union noise_counter)); \
- spin_lock_init(&counter.receive.lock); \
+ counter = kmalloc(sizeof(*counter), GFP_KERNEL);
+ if (unlikely(!counter)) {
+ pr_err("nonce counter self-test malloc: FAIL\n");
+ return false;
+ }
+
+#define T_INIT do { \
+ memset(counter, 0, sizeof(*counter)); \
+ spin_lock_init(&counter->lock); \
} while (0)
#define T_LIM (COUNTER_WINDOW_SIZE + 1)
#define T(n, v) do { \
++test_num; \
- if (counter_validate(&counter, n) != (v)) { \
+ if (counter_validate(counter, n) != (v)) { \
pr_err("nonce counter self-test %u: FAIL\n", \
test_num); \
success = false; \
@@ -99,6 +105,7 @@ bool __init wg_packet_counter_selftest(void)
if (success)
pr_info("nonce counter self-tests: pass\n");
+ kfree(counter);
return success;
}
#endif
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index 6687db699803..f74b9341ab0f 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -129,7 +129,7 @@ static void keep_key_fresh(struct wg_peer *peer)
rcu_read_lock_bh();
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
send = keypair && READ_ONCE(keypair->sending.is_valid) &&
- (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES ||
+ (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES ||
(keypair->i_am_the_initiator &&
wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
rcu_read_unlock_bh();
@@ -167,6 +167,11 @@ static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
struct sk_buff *trailer;
int num_frags;
+ /* Force hash calculation before encryption so that flow analysis is
+ * consistent over the inner packet.
+ */
+ skb_get_hash(skb);
+
/* Calculate lengths. */
padding_len = calculate_skb_padding(skb);
trailer_len = padding_len + noise_encrypted_len(0);
@@ -295,7 +300,7 @@ void wg_packet_encrypt_worker(struct work_struct *work)
skb_list_walk_safe(first, skb, next) {
if (likely(encrypt_packet(skb,
PACKET_CB(first)->keypair))) {
- wg_reset_packet(skb);
+ wg_reset_packet(skb, true);
} else {
state = PACKET_STATE_DEAD;
break;
@@ -344,7 +349,6 @@ void wg_packet_purge_staged_packets(struct wg_peer *peer)
void wg_packet_send_staged_packets(struct wg_peer *peer)
{
- struct noise_symmetric_key *key;
struct noise_keypair *keypair;
struct sk_buff_head packets;
struct sk_buff *skb;
@@ -364,10 +368,9 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
rcu_read_unlock_bh();
if (unlikely(!keypair))
goto out_nokey;
- key = &keypair->sending;
- if (unlikely(!READ_ONCE(key->is_valid)))
+ if (unlikely(!READ_ONCE(keypair->sending.is_valid)))
goto out_nokey;
- if (unlikely(wg_birthdate_has_expired(key->birthdate,
+ if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
REJECT_AFTER_TIME)))
goto out_invalid;
@@ -382,7 +385,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
*/
PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
PACKET_CB(skb)->nonce =
- atomic64_inc_return(&key->counter.counter) - 1;
+ atomic64_inc_return(&keypair->sending_counter) - 1;
if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
goto out_invalid;
}
@@ -394,7 +397,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
return;
out_invalid:
- WRITE_ONCE(key->is_valid, false);
+ WRITE_ONCE(keypair->sending.is_valid, false);
out_nokey:
wg_noise_keypair_put(keypair, false);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 1c98d781ae49..15b0ad171f4c 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -57,7 +57,7 @@ config PCMCIA_RAYCS
---help---
Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
(PC-card) wireless Ethernet networking card to your computer.
- Please read the file <file:Documentation/networking/ray_cs.txt> for
+ Please read the file <file:Documentation/networking/ray_cs.rst> for
details.
To compile this driver as a module, choose M here: the module will be
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 6b3ff02a373d..b99fd0eff994 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -28,11 +28,10 @@ config ATH10K_AHB
This module adds support for AHB bus
config ATH10K_SDIO
- tristate "Atheros ath10k SDIO support (EXPERIMENTAL)"
+ tristate "Atheros ath10k SDIO support"
depends on ATH10K && MMC
---help---
- This module adds experimental support for SDIO/MMC bus. Currently
- work in progress and will not fully work.
+ This module adds support for SDIO/MMC bus.
config ATH10K_USB
tristate "Atheros ath10k USB support (EXPERIMENTAL)"
@@ -42,7 +41,7 @@ config ATH10K_USB
work in progress and will not fully work.
config ATH10K_SNOC
- tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
+ tristate "Qualcomm ath10k SNOC support"
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
select QCOM_QMI_HELPERS
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index ea908107581d..5b6db6e66f65 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -380,6 +380,7 @@ static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 l
NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device\n");
+ kfree(cmd);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index a7478c240f78..75df79d43120 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -110,7 +110,7 @@ struct ath10k_ce_ring {
struct ce_desc_64 *shadow_base;
/* keep last */
- void *per_transfer_context[0];
+ void *per_transfer_context[];
};
struct ath10k_ce_pipe {
@@ -419,7 +419,7 @@ struct ce_pipe_config {
#define PIPEDIR_INOUT 3 /* bidirectional */
/* Establish a mapping between a service/direction and a pipe. */
-struct service_to_pipe {
+struct ce_service_to_pipe {
__le32 service_id;
__le32 pipedir;
__le32 pipenum;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index f26cc6989dad..22b6937ac225 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -190,6 +190,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin_workaround = true,
.tx_stats_over_pktlog = false,
.bmi_large_size_download = true,
+ .supports_peer_stats_info = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -723,15 +724,12 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (ret)
return ret;
- /* Data transfer is not initiated, when reduced Tx completion
- * is used for SDIO. disable it until fixed
- */
- param &= ~HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
+ param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
- /* Alternate credit size of 1544 as used by SDIO firmware is
- * not big enough for mac80211 / native wifi frames. disable it
- */
- param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL)
+ param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
+ else
+ param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
if (mode == ATH10K_FIRMWARE_MODE_UTF)
param &= ~HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
@@ -2717,7 +2715,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
- status = ath10k_hif_swap_mailbox(ar);
+ status = ath10k_hif_start_post(ar);
if (status) {
ath10k_err(ar, "failed to swap mailbox: %d\n", status);
goto err_hif_stop;
@@ -3280,6 +3278,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->thermal.wmi_sync);
init_completion(&ar->bss_survey_done);
init_completion(&ar->peer_delete_done);
+ init_completion(&ar->peer_stats_info_complete);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
@@ -3291,6 +3290,11 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
if (!ar->workqueue_aux)
goto err_free_wq;
+ ar->workqueue_tx_complete =
+ create_singlethread_workqueue("ath10k_tx_complete_wq");
+ if (!ar->workqueue_tx_complete)
+ goto err_free_aux_wq;
+
mutex_init(&ar->conf_mutex);
mutex_init(&ar->dump_mutex);
spin_lock_init(&ar->data_lock);
@@ -3318,7 +3322,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ret = ath10k_coredump_create(ar);
if (ret)
- goto err_free_aux_wq;
+ goto err_free_tx_complete;
ret = ath10k_debug_create(ar);
if (ret)
@@ -3328,12 +3332,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
err_free_coredump:
ath10k_coredump_destroy(ar);
-
+err_free_tx_complete:
+ destroy_workqueue(ar->workqueue_tx_complete);
err_free_aux_wq:
destroy_workqueue(ar->workqueue_aux);
err_free_wq:
destroy_workqueue(ar->workqueue);
-
err_free_mac:
ath10k_mac_destroy(ar);
@@ -3349,6 +3353,9 @@ void ath10k_core_destroy(struct ath10k *ar)
flush_workqueue(ar->workqueue_aux);
destroy_workqueue(ar->workqueue_aux);
+ flush_workqueue(ar->workqueue_tx_complete);
+ destroy_workqueue(ar->workqueue_tx_complete);
+
ath10k_debug_destroy(ar);
ath10k_coredump_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index bd8ef576c590..5c18f6c20462 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -149,6 +149,26 @@ static inline u32 host_interest_item_address(u32 item_offset)
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
}
+enum ath10k_phy_mode {
+ ATH10K_PHY_MODE_LEGACY = 0,
+ ATH10K_PHY_MODE_HT = 1,
+ ATH10K_PHY_MODE_VHT = 2,
+};
+
+/* Data rate 100KBPS based on IE Index */
+struct ath10k_index_ht_data_rate_type {
+ u8 beacon_rate_index;
+ u16 supported_rate[4];
+};
+
+/* Data rate 100KBPS based on IE Index */
+struct ath10k_index_vht_data_rate_type {
+ u8 beacon_rate_index;
+ u16 supported_VHT80_rate[2];
+ u16 supported_VHT40_rate[2];
+ u16 supported_VHT20_rate[2];
+};
+
struct ath10k_bmi {
bool done_sent;
};
@@ -500,8 +520,14 @@ struct ath10k_sta {
u16 peer_id;
struct rate_info txrate;
struct ieee80211_tx_info tx_info;
+ u32 tx_retries;
+ u32 tx_failed;
u32 last_tx_bitrate;
+ u32 rx_rate_code;
+ u32 rx_bitrate_kbps;
+ u32 tx_rate_code;
+ u32 tx_bitrate_kbps;
struct work_struct update_wk;
u64 rx_duration;
struct ath10k_htt_tx_stats *tx_stats;
@@ -949,6 +975,11 @@ struct ath10k {
struct ieee80211_hw *hw;
struct ieee80211_ops *ops;
struct device *dev;
+ struct msa_region {
+ dma_addr_t paddr;
+ u32 mem_size;
+ void *vaddr;
+ } msa;
u8 mac_addr[ETH_ALEN];
enum ath10k_hw_rev hw_rev;
@@ -1087,11 +1118,12 @@ struct ath10k {
int last_wmi_vdev_start_status;
struct completion vdev_setup_done;
struct completion vdev_delete_done;
+ struct completion peer_stats_info_complete;
struct workqueue_struct *workqueue;
/* Auxiliary workqueue */
struct workqueue_struct *workqueue_aux;
-
+ struct workqueue_struct *workqueue_tx_complete;
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
@@ -1132,6 +1164,8 @@ struct ath10k {
struct work_struct register_work;
struct work_struct restart_work;
+ struct work_struct bundle_tx_work;
+ struct work_struct tx_complete_work;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock
@@ -1228,7 +1262,7 @@ struct ath10k {
int coex_gpio_pin;
/* must be last */
- u8 drv_priv[0] __aligned(sizeof(void *));
+ u8 drv_priv[] __aligned(sizeof(void *));
};
static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 8bf03e8c1d3a..e760ce1a5f1e 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -88,7 +88,7 @@ struct ath10k_dump_file_data {
u8 unused[128];
/* struct ath10k_tlv_dump_data + more */
- u8 data[0];
+ u8 data[];
} __packed;
struct ath10k_dump_ram_data_hdr {
@@ -100,7 +100,7 @@ struct ath10k_dump_ram_data_hdr {
/* length of payload data, not including this header */
__le32 length;
- u8 data[0];
+ u8 data[];
};
/* magic number to fill the holes not copied due to sections in regions */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index f811e6940fb0..e8250a665433 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -349,7 +349,7 @@ free:
spin_unlock_bh(&ar->data_lock);
}
-static int ath10k_debug_fw_stats_request(struct ath10k *ar)
+int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
unsigned long timeout, time_left;
int ret;
@@ -778,7 +778,7 @@ static ssize_t ath10k_mem_value_read(struct file *file,
ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
if (ret) {
- ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
+ ath10k_warn(ar, "failed to read address 0x%08x via diagnose window from debugfs: %d\n",
(u32)(*ppos), ret);
goto exit;
}
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 82f7eb8583d9..997c1c80aba7 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -65,7 +65,7 @@ struct ath10k_pktlog_hdr {
__le16 log_type; /* Type of log information foll this header */
__le16 size; /* Size of variable length log information in bytes */
__le32 timestamp;
- u8 payload[0];
+ u8 payload[];
} __packed;
/* FIXME: How to calculate the buffer size sanely? */
@@ -125,6 +125,9 @@ static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
{
return ar->debug.enable_extd_tx_stats;
}
+
+int ath10k_debug_fw_stats_request(struct ath10k *ar);
+
#else
static inline int ath10k_debug_start(struct ath10k *ar)
@@ -192,6 +195,11 @@ static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
return 0;
}
+static inline int ath10k_debug_fw_stats_request(struct ath10k *ar)
+{
+ return 0;
+}
+
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 496ee34a4d78..9e45fd9073a6 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -54,7 +54,9 @@ struct ath10k_hif_ops {
*/
void (*stop)(struct ath10k *ar);
- int (*swap_mailbox)(struct ath10k *ar);
+ int (*start_post)(struct ath10k *ar);
+
+ int (*get_htt_tx_complete)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
@@ -137,10 +139,17 @@ static inline void ath10k_hif_stop(struct ath10k *ar)
return ar->hif.ops->stop(ar);
}
-static inline int ath10k_hif_swap_mailbox(struct ath10k *ar)
+static inline int ath10k_hif_start_post(struct ath10k *ar)
+{
+ if (ar->hif.ops->start_post)
+ return ar->hif.ops->start_post(ar);
+ return 0;
+}
+
+static inline int ath10k_hif_get_htt_tx_complete(struct ath10k *ar)
{
- if (ar->hif.ops->swap_mailbox)
- return ar->hif.ops->swap_mailbox(ar);
+ if (ar->hif.ops->get_htt_tx_complete)
+ return ar->hif.ops->get_htt_tx_complete(ar);
return 0;
}
@@ -161,7 +170,8 @@ static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
u8 pipe_id, int force)
{
- ar->hif.ops->send_complete_check(ar, pipe_id, force);
+ if (ar->hif.ops->send_complete_check)
+ ar->hif.ops->send_complete_check(ar, pipe_id, force);
}
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 2248d6c022f4..31df6dd04bf6 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -51,10 +51,12 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k *ar = ep->htc->ar;
+ struct ath10k_htc_hdr *hdr;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
+ hdr = (struct ath10k_htc_hdr *)skb->data;
ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) {
@@ -63,6 +65,11 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
return;
}
+ if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
@@ -78,7 +85,7 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
- if (ep->tx_credit_flow_enabled)
+ if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
@@ -86,6 +93,63 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
+static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
+ unsigned int len,
+ bool consume)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ enum ath10k_htc_ep_id eid = ep->eid;
+ int credits, ret = 0;
+
+ if (!ep->tx_credit_flow_enabled)
+ return 0;
+
+ credits = DIV_ROUND_UP(len, ep->tx_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+
+ if (ep->tx_credits < credits) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d consume %d\n",
+ eid, credits, ep->tx_credits, consume);
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ if (consume) {
+ ep->tx_credits -= credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d consumed %d credits total %d\n",
+ eid, credits, ep->tx_credits);
+ }
+
+unlock:
+ spin_unlock_bh(&htc->tx_lock);
+ return ret;
+}
+
+static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ enum ath10k_htc_ep_id eid = ep->eid;
+ int credits;
+
+ if (!ep->tx_credit_flow_enabled)
+ return;
+
+ credits = DIV_ROUND_UP(len, ep->tx_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d reverted %d credits back total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+}
+
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
@@ -95,8 +159,8 @@ int ath10k_htc_send(struct ath10k_htc *htc,
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
struct device *dev = htc->ar->dev;
- int credits = 0;
int ret;
+ unsigned int skb_len;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
@@ -108,23 +172,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
skb_push(skb, sizeof(struct ath10k_htc_hdr));
- if (ep->tx_credit_flow_enabled) {
- credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
- spin_lock_bh(&htc->tx_lock);
- if (ep->tx_credits < credits) {
- ath10k_dbg(ar, ATH10K_DBG_HTC,
- "htc insufficient credits ep %d required %d available %d\n",
- eid, credits, ep->tx_credits);
- spin_unlock_bh(&htc->tx_lock);
- ret = -EAGAIN;
- goto err_pull;
- }
- ep->tx_credits -= credits;
- ath10k_dbg(ar, ATH10K_DBG_HTC,
- "htc ep %d consumed %d credits (total %d)\n",
- eid, credits, ep->tx_credits);
- spin_unlock_bh(&htc->tx_lock);
- }
+ skb_len = skb->len;
+ ret = ath10k_htc_consume_credit(ep, skb_len, true);
+ if (ret)
+ goto err_pull;
ath10k_htc_prepare_tx_skb(ep, skb);
@@ -155,17 +206,7 @@ err_unmap:
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
- if (ep->tx_credit_flow_enabled) {
- spin_lock_bh(&htc->tx_lock);
- ep->tx_credits += credits;
- ath10k_dbg(ar, ATH10K_DBG_HTC,
- "htc ep %d reverted %d credits back (total %d)\n",
- eid, credits, ep->tx_credits);
- spin_unlock_bh(&htc->tx_lock);
-
- if (ep->ep_ops.ep_tx_credits)
- ep->ep_ops.ep_tx_credits(htc->ar);
- }
+ ath10k_htc_release_credit(ep, skb_len);
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
@@ -581,6 +622,278 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
return allocation;
}
+static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
+ struct sk_buff *bundle_skb,
+ struct sk_buff_head *tx_save_head)
+{
+ struct ath10k_hif_sg_item sg_item;
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ struct sk_buff *skb;
+ int ret, cn = 0;
+ unsigned int skb_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
+ skb_len = bundle_skb->len;
+ ret = ath10k_htc_consume_credit(ep, skb_len, true);
+
+ if (!ret) {
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = bundle_skb;
+ sg_item.vaddr = bundle_skb->data;
+ sg_item.len = bundle_skb->len;
+
+ ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
+ if (ret)
+ ath10k_htc_release_credit(ep, skb_len);
+ }
+
+ if (ret)
+ dev_kfree_skb_any(bundle_skb);
+
+ for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
+ if (ret) {
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ skb_queue_head(&ep->tx_req_head, skb);
+ } else {
+ skb_queue_tail(&ep->tx_complete_head, skb);
+ }
+ }
+
+ if (!ret)
+ queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "bundle tx status %d eid %d req count %d count %d len %d\n",
+ ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
+ return ret;
+}
+
+static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ int ret;
+
+ ret = ath10k_htc_send(htc, ep->eid, skb);
+
+ if (ret)
+ skb_queue_head(&ep->tx_req_head, skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
+ ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
+}
+
+static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct sk_buff *bundle_skb, *skb;
+ struct sk_buff_head tx_save_head;
+ struct ath10k_htc_hdr *hdr;
+ u8 *bundle_buf;
+ int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
+
+ if (htc->ar->state == ATH10K_STATE_WEDGED)
+ return -ECOMM;
+
+ if (ep->tx_credit_flow_enabled &&
+ ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
+ return 0;
+
+ bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
+ bundle_skb = dev_alloc_skb(bundles_left);
+
+ if (!bundle_skb)
+ return -ENOMEM;
+
+ bundle_buf = bundle_skb->data;
+ skb_queue_head_init(&tx_save_head);
+
+ while (true) {
+ skb = skb_dequeue(&ep->tx_req_head);
+ if (!skb)
+ break;
+
+ credit_pad = 0;
+ trans_len = skb->len + sizeof(*hdr);
+ credit_remainder = trans_len % ep->tx_credit_size;
+
+ if (credit_remainder != 0) {
+ credit_pad = ep->tx_credit_size - credit_remainder;
+ trans_len += credit_pad;
+ }
+
+ ret = ath10k_htc_consume_credit(ep,
+ bundle_buf + trans_len - bundle_skb->data,
+ false);
+ if (ret) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ break;
+ }
+
+ if (bundles_left < trans_len) {
+ bundle_skb->len = bundle_buf - bundle_skb->data;
+ ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
+
+ if (ret) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return ret;
+ }
+
+ if (skb_queue_len(&ep->tx_req_head) == 0) {
+ ath10k_htc_send_one_skb(ep, skb);
+ return ret;
+ }
+
+ if (ep->tx_credit_flow_enabled &&
+ ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return 0;
+ }
+
+ bundles_left =
+ ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
+ bundle_skb = dev_alloc_skb(bundles_left);
+
+ if (!bundle_skb) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return -ENOMEM;
+ }
+ bundle_buf = bundle_skb->data;
+ skb_queue_head_init(&tx_save_head);
+ }
+
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ memcpy(bundle_buf, skb->data, skb->len);
+ hdr = (struct ath10k_htc_hdr *)bundle_buf;
+ hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
+ hdr->pad_len = __cpu_to_le16(credit_pad);
+ bundle_buf += trans_len;
+ bundles_left -= trans_len;
+ skb_queue_tail(&tx_save_head, skb);
+ }
+
+ if (bundle_buf != bundle_skb->data) {
+ bundle_skb->len = bundle_buf - bundle_skb->data;
+ ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
+ } else {
+ dev_kfree_skb_any(bundle_skb);
+ }
+
+ return ret;
+}
+
+static void ath10k_htc_bundle_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+
+ if (!ep->bundle_tx)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
+ ep->eid, skb_queue_len(&ep->tx_req_head));
+
+ if (skb_queue_len(&ep->tx_req_head) >=
+ ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
+ ath10k_htc_send_bundle_skbs(ep);
+ } else {
+ skb = skb_dequeue(&ep->tx_req_head);
+
+ if (!skb)
+ continue;
+ ath10k_htc_send_one_skb(ep, skb);
+ }
+ }
+}
+
+static void ath10k_htc_tx_complete_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
+ struct ath10k_htc_ep *ep;
+ enum ath10k_htc_ep_id eid;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+ eid = ep->eid;
+ if (ep->bundle_tx && eid == ar->htt.eid) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
+ ep->eid, skb_queue_len(&ep->tx_complete_head));
+
+ while (true) {
+ skb = skb_dequeue(&ep->tx_complete_head);
+ if (!skb)
+ break;
+ ath10k_htc_notify_tx_completion(ep, skb);
+ }
+ }
+ }
+}
+
+int ath10k_htc_send_hl(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k *ar = htc->ar;
+
+ if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
+ eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
+
+ if (ep->bundle_tx) {
+ skb_queue_tail(&ep->tx_req_head, skb);
+ queue_work(ar->workqueue, &ar->bundle_tx_work);
+ return 0;
+ } else {
+ return ath10k_htc_send(htc, eid, skb);
+ }
+}
+
+void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
+{
+ if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
+ !ep->bundle_tx) {
+ ep->bundle_tx = true;
+ skb_queue_head_init(&ep->tx_req_head);
+ skb_queue_head_init(&ep->tx_complete_head);
+ }
+}
+
+void ath10k_htc_stop_hl(struct ath10k *ar)
+{
+ struct ath10k_htc_ep *ep;
+ int i;
+
+ cancel_work_sync(&ar->bundle_tx_work);
+ cancel_work_sync(&ar->tx_complete_work);
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+
+ if (!ep->bundle_tx)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
+ ep->eid, skb_queue_len(&ep->tx_req_head));
+
+ skb_queue_purge(&ep->tx_req_head);
+ }
+}
+
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
@@ -649,17 +962,34 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
*/
if (htc->control_resp_len >=
sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
+ htc->alt_data_credit_size =
+ __le16_to_cpu(msg->ready_ext.reserved) &
+ ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC,
- "Extended ready message. RX bundle size: %d\n",
- htc->max_msgs_per_htc_bundle);
+ "Extended ready message RX bundle size %d alt size %d\n",
+ htc->max_msgs_per_htc_bundle,
+ htc->alt_data_credit_size);
}
+ INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
+ INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
+
return 0;
}
+void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ bool enable)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
+
+ ep->tx_credit_flow_enabled = enable;
+}
+
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp)
@@ -791,6 +1121,11 @@ setup:
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
ep->tx_credits = tx_alloc;
+ ep->tx_credit_size = htc->target_credit_size;
+
+ if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
+ htc->alt_data_credit_size != 0)
+ ep->tx_credit_size = htc->alt_data_credit_size;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 065c82d9d689..0d180faf3b77 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -83,8 +83,14 @@ struct ath10k_htc_hdr {
u8 seq_no; /* for tx */
u8 control_byte1;
} __packed;
- u8 pad0;
- u8 pad1;
+ union {
+ __le16 pad_len;
+ struct {
+ u8 pad0;
+ u8 pad1;
+ } __packed;
+ } __packed;
+
} __packed __aligned(4);
enum ath10k_ath10k_htc_msg_id {
@@ -113,6 +119,8 @@ enum ath10k_htc_conn_flags {
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
};
+#define ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK 0xFFF
+
enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
@@ -121,6 +129,10 @@ enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
};
+#define ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE 32
+#define ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE 2
+#define ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE 2
+
enum ath10k_htc_setup_complete_flags {
ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN = 1
};
@@ -145,8 +157,14 @@ struct ath10k_htc_ready_extended {
struct ath10k_htc_ready base;
u8 htc_version; /* @enum ath10k_htc_version */
u8 max_msgs_per_htc_bundle;
- u8 pad0;
- u8 pad1;
+ union {
+ __le16 reserved;
+ struct {
+ u8 pad0;
+ u8 pad1;
+ } __packed;
+ } __packed;
+
} __packed;
struct ath10k_htc_conn_svc {
@@ -353,7 +371,12 @@ struct ath10k_htc_ep {
u8 seq_no; /* for debugging */
int tx_credits;
+ int tx_credit_size;
bool tx_credit_flow_enabled;
+ bool bundle_tx;
+ struct sk_buff_head tx_req_head;
+ struct sk_buff_head tx_complete_head;
+
};
struct ath10k_htc_svc_tx_credits {
@@ -378,16 +401,25 @@ struct ath10k_htc {
int total_transmit_credits;
int target_credit_size;
u8 max_msgs_per_htc_bundle;
+ int alt_data_credit_size;
};
int ath10k_htc_init(struct ath10k *ar);
int ath10k_htc_wait_target(struct ath10k_htc *htc);
+void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep);
int ath10k_htc_start(struct ath10k_htc *htc);
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp);
+void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ bool enable);
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
+void ath10k_htc_stop_hl(struct ath10k *ar);
+
+int ath10k_htc_send_hl(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+ struct sk_buff *packet);
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 7b75200ceae5..127b4e4980ef 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -10,6 +10,7 @@
#include "htt.h"
#include "core.h"
#include "debug.h"
+#include "hif.h"
static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
[HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
@@ -134,6 +135,8 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
+ struct ath10k *ar = htt->ar;
+ struct ath10k_htc_ep *ep;
int status;
memset(&conn_req, 0, sizeof(conn_req));
@@ -141,6 +144,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
+ conn_req.ep_ops.ep_tx_credits = ath10k_htt_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@@ -153,6 +157,15 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
htt->eid = conn_resp.eid;
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ ep = &ar->htc.endpoint[htt->eid];
+ ath10k_htc_setup_tx_req(ep);
+ }
+
+ htt->disable_tx_comp = ath10k_hif_get_htt_tx_complete(htt->ar);
+ if (htt->disable_tx_comp)
+ ath10k_htc_change_tx_credit_flow(&htt->ar->htc, htt->eid, true);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 4a12564fc30e..cad59494f175 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -150,9 +150,19 @@ enum htt_data_tx_desc_flags1 {
HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
- HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15
+ HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15
};
+#define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000
+#define HTT_TX_CREDIT_DELTA_ABS_S 16
+#define HTT_TX_CREDIT_DELTA_ABS_GET(word) \
+ (((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S)
+
+#define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100
+#define HTT_TX_CREDIT_SIGN_BIT_S 8
+#define HTT_TX_CREDIT_SIGN_BIT_GET(word) \
+ (((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S)
+
enum htt_data_tx_ext_tid {
HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
HTT_DATA_TX_EXT_TID_MGMT = 17,
@@ -279,12 +289,12 @@ struct htt_rx_ring_setup_hdr {
struct htt_rx_ring_setup_32 {
struct htt_rx_ring_setup_hdr hdr;
- struct htt_rx_ring_setup_ring32 rings[0];
+ struct htt_rx_ring_setup_ring32 rings[];
} __packed;
struct htt_rx_ring_setup_64 {
struct htt_rx_ring_setup_hdr hdr;
- struct htt_rx_ring_setup_ring64 rings[0];
+ struct htt_rx_ring_setup_ring64 rings[];
} __packed;
/*
@@ -722,7 +732,7 @@ struct htt_rx_indication {
* %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
* and has %num_mpdu_ranges elements.
*/
- struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+ struct htt_rx_indication_mpdu_range mpdu_ranges[];
} __packed;
/* High latency version of the RX indication */
@@ -731,7 +741,7 @@ struct htt_rx_indication_hl {
struct htt_rx_indication_ppdu ppdu;
struct htt_rx_indication_prefix prefix;
struct fw_rx_desc_hl fw_desc;
- struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+ struct htt_rx_indication_mpdu_range mpdu_ranges[];
} __packed;
struct htt_hl_rx_desc {
@@ -898,7 +908,7 @@ struct htt_append_retries {
struct htt_data_tx_completion_ext {
struct htt_append_retries a_retries;
__le32 t_stamp;
- __le16 msdus_rssi[0];
+ __le16 msdus_rssi[];
} __packed;
/**
@@ -982,7 +992,7 @@ struct htt_data_tx_completion {
} __packed;
u8 num_msdus;
u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
- __le16 msdus[0]; /* variable length based on %num_msdus */
+ __le16 msdus[]; /* variable length based on %num_msdus */
} __packed;
#define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0)
@@ -997,7 +1007,7 @@ struct htt_data_tx_ppdu_dur {
struct htt_data_tx_compl_ppdu_dur {
__le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */
- struct htt_data_tx_ppdu_dur ppdu_dur[0];
+ struct htt_data_tx_ppdu_dur ppdu_dur[];
} __packed;
struct htt_tx_compl_ind_base {
@@ -1023,7 +1033,7 @@ struct htt_rc_update {
u8 addr[6];
u8 num_elems;
u8 rsvd0;
- struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */
+ struct htt_rc_tx_done_params params[]; /* variable length %num_elems */
} __packed;
/* see htt_rx_indication for similar fields and descriptions */
@@ -1040,7 +1050,7 @@ struct htt_rx_fragment_indication {
__le16 fw_rx_desc_bytes;
__le16 rsvd0;
- u8 fw_msdu_rx_desc[0];
+ u8 fw_msdu_rx_desc[];
} __packed;
#define ATH10K_IEEE80211_EXTIV BIT(5)
@@ -1065,7 +1075,7 @@ struct htt_rx_pn_ind {
u8 seqno_end;
u8 pn_ie_count;
u8 reserved;
- u8 pn_ies[0];
+ u8 pn_ies[];
} __packed;
struct htt_rx_offload_msdu {
@@ -1074,7 +1084,7 @@ struct htt_rx_offload_msdu {
u8 vdev_id;
u8 tid;
u8 fw_desc;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct htt_rx_offload_ind {
@@ -1157,7 +1167,7 @@ struct htt_rx_test {
* a) num_ints * sizeof(__le32)
* b) num_chars * sizeof(u8) aligned to 4bytes
*/
- u8 payload[0];
+ u8 payload[];
} __packed;
static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
@@ -1191,7 +1201,7 @@ static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
*/
struct htt_pktlog_msg {
u8 pad[3];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct htt_dbg_stats_rx_reorder_stats {
@@ -1480,7 +1490,7 @@ struct htt_stats_conf_item {
} __packed;
u8 pad;
__le16 length;
- u8 payload[0]; /* roundup(length, 4) long */
+ u8 payload[]; /* roundup(length, 4) long */
} __packed;
struct htt_stats_conf {
@@ -1489,7 +1499,7 @@ struct htt_stats_conf {
__le32 cookie_msb;
/* each item has variable length! */
- struct htt_stats_conf_item items[0];
+ struct htt_stats_conf_item items[];
} __packed;
static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
@@ -1663,8 +1673,8 @@ struct htt_tx_fetch_ind {
__le32 token;
__le16 num_resp_ids;
__le16 num_records;
- struct htt_tx_fetch_record records[0];
__le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
+ struct htt_tx_fetch_record records[];
} __packed;
static inline void *
@@ -1679,13 +1689,13 @@ struct htt_tx_fetch_resp {
__le16 fetch_seq_num;
__le16 num_records;
__le32 token;
- struct htt_tx_fetch_record records[0];
+ struct htt_tx_fetch_record records[];
} __packed;
struct htt_tx_fetch_confirm {
u8 pad0;
__le16 num_resp_ids;
- __le32 resp_ids[0];
+ __le32 resp_ids[];
} __packed;
enum htt_tx_mode_switch_mode {
@@ -1717,7 +1727,7 @@ struct htt_tx_mode_switch_ind {
__le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
__le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
u8 pad1[2];
- struct htt_tx_mode_switch_record records[0];
+ struct htt_tx_mode_switch_record records[];
} __packed;
struct htt_channel_change {
@@ -1747,7 +1757,7 @@ struct htt_peer_tx_stats {
u8 num_ppdu;
u8 ppdu_len;
u8 version;
- u8 payload[0];
+ u8 payload[];
} __packed;
#define ATH10K_10_2_TX_STATS_OFFSET 136
@@ -2021,6 +2031,10 @@ struct ath10k_htt {
bool tx_mem_allocated;
const struct ath10k_htt_tx_ops *tx_ops;
const struct ath10k_htt_rx_ops *rx_ops;
+ bool disable_tx_comp;
+ bool bundle_tx;
+ struct sk_buff_head tx_req_head;
+ struct sk_buff_head tx_complete_head;
};
struct ath10k_htt_tx_ops {
@@ -2035,6 +2049,7 @@ struct ath10k_htt_tx_ops {
int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
+ void (*htt_flush_tx)(struct ath10k_htt *htt);
};
static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
@@ -2074,6 +2089,12 @@ static inline int ath10k_htt_tx(struct ath10k_htt *htt,
return htt->tx_ops->htt_tx(htt, txmode, msdu);
}
+static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
+{
+ if (htt->tx_ops->htt_flush_tx)
+ htt->tx_ops->htt_flush_tx(htt);
+}
+
static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
{
if (!htt->tx_ops->htt_alloc_txbuff)
@@ -2185,7 +2206,7 @@ struct htt_rx_desc {
struct rx_ppdu_end ppdu_end;
} __packed;
u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
};
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
@@ -2267,6 +2288,7 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
__le16 fetch_seq_num,
struct htt_tx_fetch_record *records,
size_t num_records);
+void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index f883f2a724dd..d787cbead56a 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -3574,6 +3574,13 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
}
+ if (ar->htt.disable_tx_comp) {
+ arsta->tx_retries += peer_stats->retry_pkts;
+ arsta->tx_failed += peer_stats->failed_pkts;
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d tx failed %d\n",
+ arsta->tx_retries, arsta->tx_failed);
+ }
+
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
rate_idx);
@@ -3789,6 +3796,9 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
}
case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
struct htt_tx_done tx_done = {};
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
@@ -3814,6 +3824,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
+ if (htt->disable_tx_comp) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits++;
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
status = ath10k_txrx_tx_unref(htt, &tx_done);
if (!status) {
spin_lock_bh(&htt->tx_lock);
@@ -3888,8 +3904,32 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
- case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
+ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
+ u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
+ int htt_credit_delta;
+
+ htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
+ if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
+ htt_credit_delta = -htt_credit_delta;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt credit update delta %d\n",
+ htt_credit_delta);
+
+ if (htt->disable_tx_comp) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += htt_credit_delta;
+ spin_unlock_bh(&htc->tx_lock);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt credit total %d\n",
+ ep->tx_credits);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ }
break;
+ }
case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
u32 freq = __le32_to_cpu(resp->chan_change.freq);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index e9d12ea708b6..4fd10ac3a941 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -529,9 +529,15 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
htt->tx_mem_allocated = false;
}
-void ath10k_htt_tx_stop(struct ath10k_htt *htt)
+static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
{
+ ath10k_htc_stop_hl(htt->ar);
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
+}
+
+void ath10k_htt_tx_stop(struct ath10k_htt *htt)
+{
+ ath10k_htt_flush_tx_queue(htt);
idr_destroy(&htt->pending_tx);
}
@@ -541,9 +547,46 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
ath10k_htt_tx_destroy(htt);
}
+void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
+{
+ queue_work(ar->workqueue, &ar->bundle_tx_work);
+}
+
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_tx_done tx_done = {0};
+ struct htt_cmd_hdr *htt_hdr;
+ struct htt_data_tx_desc *desc_hdr = NULL;
+ u16 flags1 = 0;
+ u8 msg_type = 0;
+
+ if (htt->disable_tx_comp) {
+ htt_hdr = (struct htt_cmd_hdr *)skb->data;
+ msg_type = htt_hdr->msg_type;
+
+ if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) {
+ desc_hdr = (struct htt_data_tx_desc *)
+ (skb->data + sizeof(*htt_hdr));
+ flags1 = __le16_to_cpu(desc_hdr->flags1);
+ }
+ }
+
dev_kfree_skb_any(skb);
+
+ if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM))
+ return;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx complete msdu id:%u ,flags1:%x\n",
+ __le16_to_cpu(desc_hdr->id), flags1);
+
+ if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE)
+ return;
+
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
+ tx_done.msdu_id = __le16_to_cpu(desc_hdr->id);
+ ath10k_txrx_tx_unref(&ar->htt, &tx_done);
}
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -1279,6 +1322,9 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ if (htt->disable_tx_comp)
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE;
break;
}
@@ -1344,7 +1390,7 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
*/
tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
- res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu);
+ res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);
out:
return res;
@@ -1784,6 +1830,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
.htt_tx = ath10k_htt_tx_hl,
.htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
+ .htt_flush_tx = ath10k_htt_flush_tx_queue,
};
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 970c736ac6bb..f16edcb9f326 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -165,7 +165,7 @@ enum qca9377_chip_id_rev {
struct ath10k_fw_ie {
__le32 id;
__le32 len;
- u8 data[0];
+ u8 data[];
};
enum ath10k_fw_ie_type {
@@ -623,6 +623,9 @@ struct ath10k_hw_params {
/* tx stats support over pktlog */
bool tx_stats_over_pktlog;
+
+ /* provides bitrates for sta_statistics using WMI_TLV_PEER_STATS_INFO_EVENTID */
+ bool supports_peer_stats_info;
};
struct htt_rx_desc;
@@ -765,7 +768,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_TDLS_VDEVS 1
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
-#define TARGET_TLV_NUM_MSDU_DESC_HL 64
+#define TARGET_TLV_NUM_MSDU_DESC_HL 1024
#define TARGET_TLV_NUM_WOW_PATTERNS 22
#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 2d03b8dd3b8c..919d15584d4a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2505,6 +2505,30 @@ ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
return tx_mcs_set;
}
+static u32 get_160mhz_nss_from_maxrate(int rate)
+{
+ u32 nss;
+
+ switch (rate) {
+ case 780:
+ nss = 1;
+ break;
+ case 1560:
+ nss = 2;
+ break;
+ case 2106:
+ nss = 3; /* not support MCS9 from spec*/
+ break;
+ case 3120:
+ nss = 4;
+ break;
+ default:
+ nss = 1;
+ }
+
+ return nss;
+}
+
static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2512,6 +2536,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u16 *vht_mcs_mask;
@@ -2578,22 +2603,38 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ /* Configure bandwidth-NSS mapping to FW
+ * for the chip's tx chains setting on 160Mhz bw
+ */
+ if (arg->peer_phymode == MODE_11AC_VHT160 ||
+ arg->peer_phymode == MODE_11AC_VHT80_80) {
+ u32 rx_nss;
+ u32 max_rate;
- if (arg->peer_vht_rates.rx_max_rate &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
- switch (arg->peer_vht_rates.rx_max_rate) {
- case 1560:
- /* Must be 2x2 at 160Mhz is all it can do. */
- arg->peer_bw_rxnss_override = 2;
- break;
- case 780:
- /* Can only do 1x1 at 160Mhz (Long Guard Interval) */
- arg->peer_bw_rxnss_override = 1;
- break;
+ max_rate = arg->peer_vht_rates.rx_max_rate;
+ rx_nss = get_160mhz_nss_from_maxrate(max_rate);
+
+ if (rx_nss == 0)
+ rx_nss = arg->peer_num_spatial_streams;
+ else
+ rx_nss = min(arg->peer_num_spatial_streams, rx_nss);
+
+ max_rate = hw->vht160_mcs_tx_highest;
+ rx_nss = min(rx_nss, get_160mhz_nss_from_maxrate(max_rate));
+
+ arg->peer_bw_rxnss_override =
+ FIELD_PREP(WMI_PEER_NSS_MAP_ENABLE, 1) |
+ FIELD_PREP(WMI_PEER_NSS_160MHZ_MASK, (rx_nss - 1));
+
+ if (arg->peer_phymode == MODE_11AC_VHT80_80) {
+ arg->peer_bw_rxnss_override |=
+ FIELD_PREP(WMI_PEER_NSS_80_80MHZ_MASK, (rx_nss - 1));
}
}
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vht peer %pM max_mpdu %d flags 0x%x peer_rx_nss_override 0x%x\n",
+ sta->addr, arg->peer_max_mpdu,
+ arg->peer_flags, arg->peer_bw_rxnss_override);
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -2745,9 +2786,9 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
- ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
return 0;
}
@@ -2918,6 +2959,11 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
arvif->aid = bss_conf->aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->peer_stats_info_enable, 1);
+ if (ret)
+ ath10k_warn(ar, "failed to enable peer stats info: %d\n", ret);
+
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
if (ret) {
ath10k_warn(ar, "failed to set vdev %d up: %d\n",
@@ -3921,6 +3967,9 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
if (ret) {
ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
ret);
+ /* remove this msdu from idr tracking */
+ ath10k_wmi_cleanup_mgmt_tx_send(ar, skb);
+
dma_unmap_single(ar->dev, paddr, skb->len,
DMA_TO_DEVICE);
ieee80211_free_txskb(ar->hw, skb);
@@ -4488,17 +4537,18 @@ static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
-static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
+static bool ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
{
/* It is not clear that allowing gaps in chainmask
* is helpful. Probably it will not do what user
* is hoping for, so warn in that case.
*/
if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
- return;
+ return true;
- ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
+ ath10k_warn(ar, "mac %s antenna chainmask is invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
dbg, cm);
+ return false;
}
static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
@@ -4563,13 +4613,6 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
vht_cap.cap |= val;
}
- /* Currently the firmware seems to be buggy, don't enable 80+80
- * mode until that's resolved.
- */
- if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
- (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
- vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
-
mcs_map = 0;
for (i = 0; i < 8; i++) {
if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
@@ -4688,11 +4731,15 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
{
int ret;
+ bool is_valid_tx_chain_mask, is_valid_rx_chain_mask;
lockdep_assert_held(&ar->conf_mutex);
- ath10k_check_chain_mask(ar, tx_ant, "tx");
- ath10k_check_chain_mask(ar, rx_ant, "rx");
+ is_valid_tx_chain_mask = ath10k_check_chain_mask(ar, tx_ant, "tx");
+ is_valid_rx_chain_mask = ath10k_check_chain_mask(ar, rx_ant, "rx");
+
+ if (!is_valid_tx_chain_mask || !is_valid_rx_chain_mask)
+ return -EINVAL;
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
@@ -7190,6 +7237,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ath10k_wmi_peer_flush(ar, arvif->vdev_id,
arvif->bssid, bitmap);
}
+ ath10k_htt_flush_tx(&ar->htt);
}
return;
}
@@ -8260,6 +8308,215 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
peer->removed = true;
}
+/* HT MCS parameters with Nss = 1 */
+static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss1[] = {
+ /* MCS L20 L40 S20 S40 */
+ {0, { 65, 135, 72, 150} },
+ {1, { 130, 270, 144, 300} },
+ {2, { 195, 405, 217, 450} },
+ {3, { 260, 540, 289, 600} },
+ {4, { 390, 810, 433, 900} },
+ {5, { 520, 1080, 578, 1200} },
+ {6, { 585, 1215, 650, 1350} },
+ {7, { 650, 1350, 722, 1500} }
+};
+
+/* HT MCS parameters with Nss = 2 */
+static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss2[] = {
+ /* MCS L20 L40 S20 S40 */
+ {0, {130, 270, 144, 300} },
+ {1, {260, 540, 289, 600} },
+ {2, {390, 810, 433, 900} },
+ {3, {520, 1080, 578, 1200} },
+ {4, {780, 1620, 867, 1800} },
+ {5, {1040, 2160, 1156, 2400} },
+ {6, {1170, 2430, 1300, 2700} },
+ {7, {1300, 2700, 1444, 3000} }
+};
+
+/* MCS parameters with Nss = 1 */
+static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[] = {
+ /* MCS L80 S80 L40 S40 L20 S20 */
+ {0, {293, 325}, {135, 150}, {65, 72} },
+ {1, {585, 650}, {270, 300}, {130, 144} },
+ {2, {878, 975}, {405, 450}, {195, 217} },
+ {3, {1170, 1300}, {540, 600}, {260, 289} },
+ {4, {1755, 1950}, {810, 900}, {390, 433} },
+ {5, {2340, 2600}, {1080, 1200}, {520, 578} },
+ {6, {2633, 2925}, {1215, 1350}, {585, 650} },
+ {7, {2925, 3250}, {1350, 1500}, {650, 722} },
+ {8, {3510, 3900}, {1620, 1800}, {780, 867} },
+ {9, {3900, 4333}, {1800, 2000}, {780, 867} }
+};
+
+/*MCS parameters with Nss = 2 */
+static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[] = {
+ /* MCS L80 S80 L40 S40 L20 S20 */
+ {0, {585, 650}, {270, 300}, {130, 144} },
+ {1, {1170, 1300}, {540, 600}, {260, 289} },
+ {2, {1755, 1950}, {810, 900}, {390, 433} },
+ {3, {2340, 2600}, {1080, 1200}, {520, 578} },
+ {4, {3510, 3900}, {1620, 1800}, {780, 867} },
+ {5, {4680, 5200}, {2160, 2400}, {1040, 1156} },
+ {6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
+ {7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
+ {8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
+ {9, {7800, 8667}, {3600, 4000}, {1560, 1733} }
+};
+
+static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
+ u8 *flags, u8 *bw)
+{
+ struct ath10k_index_ht_data_rate_type *mcs_rate;
+
+ mcs_rate = (struct ath10k_index_ht_data_rate_type *)
+ ((nss == 1) ? &supported_ht_mcs_rate_nss1 :
+ &supported_ht_mcs_rate_nss2);
+
+ if (rate == mcs_rate[mcs].supported_rate[0]) {
+ *bw = RATE_INFO_BW_20;
+ } else if (rate == mcs_rate[mcs].supported_rate[1]) {
+ *bw |= RATE_INFO_BW_40;
+ } else if (rate == mcs_rate[mcs].supported_rate[2]) {
+ *bw |= RATE_INFO_BW_20;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate == mcs_rate[mcs].supported_rate[3]) {
+ *bw |= RATE_INFO_BW_40;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else {
+ ath10k_warn(ar, "invalid ht params rate %d 100kbps nss %d mcs %d",
+ rate, nss, mcs);
+ }
+}
+
+static void ath10k_mac_get_rate_flags_vht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
+ u8 *flags, u8 *bw)
+{
+ struct ath10k_index_vht_data_rate_type *mcs_rate;
+
+ mcs_rate = (struct ath10k_index_vht_data_rate_type *)
+ ((nss == 1) ? &supported_vht_mcs_rate_nss1 :
+ &supported_vht_mcs_rate_nss2);
+
+ if (rate == mcs_rate[mcs].supported_VHT80_rate[0]) {
+ *bw = RATE_INFO_BW_80;
+ } else if (rate == mcs_rate[mcs].supported_VHT80_rate[1]) {
+ *bw = RATE_INFO_BW_80;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate == mcs_rate[mcs].supported_VHT40_rate[0]) {
+ *bw = RATE_INFO_BW_40;
+ } else if (rate == mcs_rate[mcs].supported_VHT40_rate[1]) {
+ *bw = RATE_INFO_BW_40;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate == mcs_rate[mcs].supported_VHT20_rate[0]) {
+ *bw = RATE_INFO_BW_20;
+ } else if (rate == mcs_rate[mcs].supported_VHT20_rate[1]) {
+ *bw = RATE_INFO_BW_20;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else {
+ ath10k_warn(ar, "invalid vht params rate %d 100kbps nss %d mcs %d",
+ rate, nss, mcs);
+ }
+}
+
+static void ath10k_mac_get_rate_flags(struct ath10k *ar, u32 rate,
+ enum ath10k_phy_mode mode, u8 nss, u8 mcs,
+ u8 *flags, u8 *bw)
+{
+ if (mode == ATH10K_PHY_MODE_HT) {
+ *flags = RATE_INFO_FLAGS_MCS;
+ ath10k_mac_get_rate_flags_ht(ar, rate, nss, mcs, flags, bw);
+ } else if (mode == ATH10K_PHY_MODE_VHT) {
+ *flags = RATE_INFO_FLAGS_VHT_MCS;
+ ath10k_mac_get_rate_flags_vht(ar, rate, nss, mcs, flags, bw);
+ }
+}
+
+static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code,
+ u32 bitrate_kbps, struct rate_info *rate)
+{
+ enum ath10k_phy_mode mode = ATH10K_PHY_MODE_LEGACY;
+ enum wmi_rate_preamble preamble = WMI_TLV_GET_HW_RC_PREAM_V1(rate_code);
+ u8 nss = WMI_TLV_GET_HW_RC_NSS_V1(rate_code) + 1;
+ u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code);
+ u8 flags = 0, bw = 0;
+
+ if (preamble == WMI_RATE_PREAMBLE_HT)
+ mode = ATH10K_PHY_MODE_HT;
+ else if (preamble == WMI_RATE_PREAMBLE_VHT)
+ mode = ATH10K_PHY_MODE_VHT;
+
+ ath10k_mac_get_rate_flags(ar, bitrate_kbps / 100, mode, nss, mcs, &flags, &bw);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac parse bitrate preamble %d mode %d nss %d mcs %d flags %x bw %d\n",
+ preamble, mode, nss, mcs, flags, bw);
+
+ rate->flags = flags;
+ rate->bw = bw;
+ rate->legacy = bitrate_kbps / 100;
+ rate->nss = nss;
+ rate->mcs = mcs;
+}
+
+static void ath10k_mac_sta_get_peer_stats_info(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_peer *peer;
+ unsigned long time_left;
+ int ret;
+
+ if (!(ar->hw_params.supports_peer_stats_info &&
+ arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA))
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arsta->arvif->vdev_id, sta->addr);
+ spin_unlock_bh(&ar->data_lock);
+ if (!peer)
+ return;
+
+ reinit_completion(&ar->peer_stats_info_complete);
+
+ ret = ath10k_wmi_request_peer_stats_info(ar,
+ arsta->arvif->vdev_id,
+ WMI_REQUEST_ONE_PEER_STATS_INFO,
+ arsta->arvif->bssid,
+ 0);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "could not request peer stats info: %d\n", ret);
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->peer_stats_info_complete, 3 * HZ);
+ if (time_left == 0) {
+ ath10k_warn(ar, "timed out waiting peer stats info\n");
+ return;
+ }
+
+ if (arsta->rx_rate_code != 0 && arsta->rx_bitrate_kbps != 0) {
+ ath10k_mac_parse_bitrate(ar, arsta->rx_rate_code,
+ arsta->rx_bitrate_kbps,
+ &sinfo->rxrate);
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ arsta->rx_rate_code = 0;
+ arsta->rx_bitrate_kbps = 0;
+ }
+
+ if (arsta->tx_rate_code != 0 && arsta->tx_bitrate_kbps != 0) {
+ ath10k_mac_parse_bitrate(ar, arsta->tx_rate_code,
+ arsta->tx_bitrate_kbps,
+ &sinfo->txrate);
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ arsta->tx_rate_code = 0;
+ arsta->tx_bitrate_kbps = 0;
+ }
+}
+
static void ath10k_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -8271,6 +8528,8 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
if (!ath10k_peer_stats_enabled(ar))
return;
+ ath10k_debug_fw_stats_request(ar);
+
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@@ -8286,6 +8545,15 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
}
sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ if (ar->htt.disable_tx_comp) {
+ sinfo->tx_retries = arsta->tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ sinfo->tx_failed = arsta->tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+ }
+
+ ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
}
static const struct ieee80211_ops ath10k_ops = {
@@ -8625,7 +8893,9 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80),
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160),
#endif
},
};
@@ -8643,7 +8913,9 @@ ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80),
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160),
#endif
},
};
@@ -8919,7 +9191,6 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
- ar->hw->wiphy->max_sched_scan_reqs = 1;
ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ded7a220a4aa..1d941d53fdc9 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -116,7 +116,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
-static struct ce_attr host_ce_config_wlan[] = {
+static const struct ce_attr pci_host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
@@ -222,7 +222,7 @@ static struct ce_attr host_ce_config_wlan[] = {
};
/* Target firmware's Copy Engine configuration. */
-static struct ce_pipe_config target_ce_config_wlan[] = {
+static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
@@ -335,7 +335,7 @@ static struct ce_pipe_config target_ce_config_wlan[] = {
* This table is derived from the CE_PCI TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
-static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@@ -1787,6 +1787,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
if (!force) {
@@ -1804,7 +1806,7 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
* If at least 50% of the total resources are still available,
* don't bother checking again yet.
*/
- if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+ if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
return;
}
ath10k_ce_per_engine_service(ar, pipe);
@@ -1820,14 +1822,15 @@ static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
- const struct service_to_pipe *entry;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ const struct ce_service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
- for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
- entry = &target_service_to_ce_map_wlan[i];
+ for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
+ entry = &ar_pci->serv_to_pipe[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
@@ -2074,6 +2077,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_sync(ar);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
+ cancel_work_sync(&ar_pci->dump_work);
/* Most likely the device has HTT Rx ring configured. The only way to
* prevent the device from accessing (and possible corrupting) host
@@ -2315,6 +2319,7 @@ static int ath10k_bus_get_num_banks(struct ath10k *ar)
int ath10k_pci_init_config(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 interconnect_targ_addr;
u32 pcie_state_targ_addr = 0;
u32 pipe_cfg_targ_addr = 0;
@@ -2360,7 +2365,7 @@ int ath10k_pci_init_config(struct ath10k *ar)
}
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
- target_ce_config_wlan,
+ ar_pci->pipe_config,
sizeof(struct ce_pipe_config) *
NUM_TARGET_CE_CONFIG_WLAN);
@@ -2385,8 +2390,8 @@ int ath10k_pci_init_config(struct ath10k *ar)
}
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
- target_service_to_ce_map_wlan,
- sizeof(target_service_to_ce_map_wlan));
+ ar_pci->serv_to_pipe,
+ sizeof(pci_target_service_to_ce_map_wlan));
if (ret != 0) {
ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
return ret;
@@ -2458,23 +2463,24 @@ static void ath10k_pci_override_ce_config(struct ath10k *ar)
{
struct ce_attr *attr;
struct ce_pipe_config *config;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
/* For QCA6174 we're overriding the Copy Engine 5 configuration,
* since it is currently used for other feature.
*/
/* Override Host's Copy Engine 5 configuration */
- attr = &host_ce_config_wlan[5];
+ attr = &ar_pci->attr[5];
attr->src_sz_max = 0;
attr->dest_nentries = 0;
/* Override Target firmware's Copy Engine configuration */
- config = &target_ce_config_wlan[5];
+ config = &ar_pci->pipe_config[5];
config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
config->nbytes_max = __cpu_to_le32(2048);
/* Map from service/endpoint to Copy Engine */
- target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
+ ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
}
int ath10k_pci_alloc_pipes(struct ath10k *ar)
@@ -2490,7 +2496,7 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar)
pipe->pipe_num = i;
pipe->hif_ce_state = ar;
- ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret);
@@ -2503,7 +2509,7 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar)
continue;
}
- pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
+ pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
}
return 0;
@@ -2519,10 +2525,11 @@ void ath10k_pci_free_pipes(struct ath10k *ar)
int ath10k_pci_init_pipes(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
- ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+ ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
if (ret) {
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
i, ret);
@@ -3594,6 +3601,30 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
+ ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
+ sizeof(pci_host_ce_config_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->attr) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
+ sizeof(pci_target_ce_config_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->pipe_config) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
+ sizeof(pci_target_service_to_ce_map_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->serv_to_pipe) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
ret = ath10k_pci_setup_resource(ar);
if (ret) {
ath10k_err(ar, "failed to setup resource: %d\n", ret);
@@ -3689,6 +3720,11 @@ err_free_pipes:
err_core_destroy:
ath10k_core_destroy(ar);
+err_free:
+ kfree(ar_pci->attr);
+ kfree(ar_pci->pipe_config);
+ kfree(ar_pci->serv_to_pipe);
+
return ret;
}
@@ -3714,6 +3750,9 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
+ kfree(ar_pci->attr);
+ kfree(ar_pci->pipe_config);
+ kfree(ar_pci->serv_to_pipe);
}
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 4455ed6c5275..862d0901c5b8 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -178,11 +178,16 @@ struct ath10k_pci {
*/
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+ struct ce_attr *attr;
+ struct ce_pipe_config *pipe_config;
+ struct ce_service_to_pipe *serv_to_pipe;
+
/* Keep this entry in the last, memory for struct ath10k_ahb is
* allocated (ahb support enabled case) in the continuation of
* this struct.
*/
- struct ath10k_ahb ahb[0];
+ struct ath10k_ahb ahb[];
+
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 85dce43c5439..5468a41e928e 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -122,8 +122,8 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
int ret;
int i;
- req.msa_addr = qmi->msa_pa;
- req.size = qmi->msa_mem_size;
+ req.msa_addr = ar->msa.paddr;
+ req.size = ar->msa.mem_size;
ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
wlfw_msa_info_resp_msg_v01_ei, &resp);
@@ -157,12 +157,12 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
goto out;
}
- max_mapped_addr = qmi->msa_pa + qmi->msa_mem_size;
+ max_mapped_addr = ar->msa.paddr + ar->msa.mem_size;
qmi->nr_mem_region = resp.mem_region_info_len;
for (i = 0; i < resp.mem_region_info_len; i++) {
- if (resp.mem_region_info[i].size > qmi->msa_mem_size ||
+ if (resp.mem_region_info[i].size > ar->msa.mem_size ||
resp.mem_region_info[i].region_addr > max_mapped_addr ||
- resp.mem_region_info[i].region_addr < qmi->msa_pa ||
+ resp.mem_region_info[i].region_addr < ar->msa.paddr ||
resp.mem_region_info[i].size +
resp.mem_region_info[i].region_addr > max_mapped_addr) {
ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
@@ -961,7 +961,16 @@ static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
qmi->fw_ready = false;
- ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, NULL);
+
+ /*
+ * The del_server event is to be processed only if coming from
+ * the qmi server. The qmi infrastructure sends del_server, when
+ * any client releases the qmi handle. In this case do not process
+ * this del_server event.
+ */
+ if (qmi->state == ATH10K_QMI_STATE_INIT_DONE)
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT,
+ NULL);
}
static struct qmi_ops ath10k_qmi_ops = {
@@ -1006,54 +1015,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
spin_unlock(&qmi->event_lock);
}
-static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
-{
- struct ath10k *ar = qmi->ar;
- struct device *dev = ar->dev;
- struct device_node *node;
- struct resource r;
- int ret;
-
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node) {
- ret = of_address_to_resource(node, 0, &r);
- if (ret) {
- dev_err(dev, "failed to resolve msa fixed region\n");
- return ret;
- }
- of_node_put(node);
-
- qmi->msa_pa = r.start;
- qmi->msa_mem_size = resource_size(&r);
- qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
- MEMREMAP_WT);
- if (IS_ERR(qmi->msa_va)) {
- dev_err(dev, "failed to map memory region: %pa\n", &r.start);
- return PTR_ERR(qmi->msa_va);
- }
- } else {
- qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
- &qmi->msa_pa, GFP_KERNEL);
- if (!qmi->msa_va) {
- ath10k_err(ar, "failed to allocate dma memory for msa region\n");
- return -ENOMEM;
- }
- qmi->msa_mem_size = msa_size;
- }
-
- if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
- qmi->msa_fixed_perm = true;
-
- ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
- &qmi->msa_pa,
- qmi->msa_va);
-
- return 0;
-}
-
int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *dev = ar->dev;
struct ath10k_qmi *qmi;
int ret;
@@ -1064,9 +1029,8 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
qmi->ar = ar;
ar_snoc->qmi = qmi;
- ret = ath10k_qmi_setup_msa_resources(qmi, msa_size);
- if (ret)
- goto err;
+ if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
+ qmi->msa_fixed_perm = true;
ret = qmi_handle_init(&qmi->qmi_hdl,
WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
@@ -1091,6 +1055,7 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
if (ret)
goto err_qmi_lookup;
+ qmi->state = ATH10K_QMI_STATE_INIT_DONE;
return 0;
err_qmi_lookup:
@@ -1109,6 +1074,7 @@ int ath10k_qmi_deinit(struct ath10k *ar)
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct ath10k_qmi *qmi = ar_snoc->qmi;
+ qmi->state = ATH10K_QMI_STATE_DEINIT;
qmi_handle_release(&qmi->qmi_hdl);
cancel_work_sync(&qmi->event_work);
destroy_workqueue(qmi->event_wq);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index dc257375f161..89464239fe96 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -83,6 +83,11 @@ struct ath10k_qmi_driver_event {
void *data;
};
+enum ath10k_qmi_state {
+ ATH10K_QMI_STATE_INIT_DONE,
+ ATH10K_QMI_STATE_DEINIT,
+};
+
struct ath10k_qmi {
struct ath10k *ar;
struct qmi_handle qmi_hdl;
@@ -93,9 +98,6 @@ struct ath10k_qmi {
spinlock_t event_lock; /* spinlock for qmi event list */
u32 nr_mem_region;
struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
- dma_addr_t msa_pa;
- u32 msa_mem_size;
- void *msa_va;
struct ath10k_qmi_chip_info chip_info;
struct ath10k_qmi_board_info board_info;
struct ath10k_qmi_soc_info soc_info;
@@ -105,6 +107,7 @@ struct ath10k_qmi {
char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
bool msa_fixed_perm;
+ enum ath10k_qmi_state state;
};
int ath10k_qmi_wlan_enable(struct ath10k *ar,
@@ -112,7 +115,6 @@ int ath10k_qmi_wlan_enable(struct ath10k *ar,
enum wlfw_driver_mode_enum_v01 mode,
const char *version);
int ath10k_qmi_wlan_disable(struct ath10k *ar);
-int ath10k_qmi_register_service_notifier(struct notifier_block *nb);
int ath10k_qmi_init(struct ath10k *ar, u32 msa_size);
int ath10k_qmi_deinit(struct ath10k *ar);
int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode);
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 1f709b65c29b..63f882c690bf 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -542,7 +542,7 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
int pkt_cnt = 0;
if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
- ath10k_warn(ar, "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
+ ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
ret = -ENOMEM;
goto err;
@@ -1083,10 +1083,10 @@ static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
- dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device);
- dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device);
+ dev_id_base = (device & 0x0F00);
+ dev_id_chiprev = (device & 0x00FF);
switch (dev_id_base) {
- case QCA_MANUFACTURER_ID_AR6005_BASE:
+ case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):
if (dev_id_chiprev < 4)
mbox_info->ext_info[0].htc_ext_sz =
ATH10K_HIF_MBOX0_EXT_WIDTH;
@@ -1097,7 +1097,7 @@ static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
mbox_info->ext_info[0].htc_ext_sz =
ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
break;
- case QCA_MANUFACTURER_ID_QCA9377_BASE:
+ case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):
mbox_info->ext_info[0].htc_ext_sz =
ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
break;
@@ -1361,23 +1361,117 @@ static void ath10k_rx_indication_async_work(struct work_struct *work)
napi_schedule(&ar->napi);
}
+static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
+{
+ struct ath10k *ar = ar_sdio->ar;
+ unsigned char rtc_state = 0;
+ int ret = 0;
+
+ rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
+ if (ret) {
+ ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
+ return ret;
+ }
+
+ *state = rtc_state & 0x3;
+
+ return ret;
+}
+
+static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 val;
+ int retry = ATH10K_CIS_READ_RETRY, ret = 0;
+ unsigned char rtc_state = 0;
+
+ sdio_claim_host(ar_sdio->func);
+
+ ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
+ ret);
+ goto release;
+ }
+
+ if (enable_sleep) {
+ val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
+ ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
+ } else {
+ val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
+ ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
+ }
+
+ ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
+ ret);
+ }
+
+ if (!enable_sleep) {
+ do {
+ udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
+ ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
+ rtc_state);
+
+ if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
+ break;
+
+ udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
+ retry--;
+ } while (retry > 0);
+ }
+
+release:
+ sdio_release_host(ar_sdio->func);
+
+ return ret;
+}
+
+static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
+{
+ struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
+
+ ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
+}
+
static void ath10k_sdio_write_async_work(struct work_struct *work)
{
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
wr_async_work);
struct ath10k *ar = ar_sdio->ar;
struct ath10k_sdio_bus_request *req, *tmp_req;
+ struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list);
spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ if (req->address >= mbox_info->htc_addr &&
+ ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
+ ath10k_sdio_set_mbox_sleep(ar, false);
+ mod_timer(&ar_sdio->sleep_timer, jiffies +
+ msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
+ }
+
__ath10k_sdio_write_async(ar, req);
spin_lock_bh(&ar_sdio->wr_async_lock);
}
spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
+ ath10k_sdio_set_mbox_sleep(ar, true);
}
static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
@@ -1444,7 +1538,7 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
/* sdio HIF functions */
-static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
+static int ath10k_sdio_disable_intrs(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
@@ -1500,7 +1594,7 @@ static int ath10k_sdio_hif_power_up(struct ath10k *ar,
ar_sdio->is_disabled = false;
- ret = ath10k_sdio_hif_disable_intrs(ar);
+ ret = ath10k_sdio_disable_intrs(ar);
if (ret)
return ret;
@@ -1517,6 +1611,9 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
+ del_timer_sync(&ar_sdio->sleep_timer);
+ ath10k_sdio_set_mbox_sleep(ar, true);
+
/* Disable the card */
sdio_claim_host(ar_sdio->func);
@@ -1569,7 +1666,7 @@ static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
return 0;
}
-static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
+static int ath10k_sdio_enable_intrs(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
@@ -1617,33 +1714,6 @@ static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
return ret;
}
-static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
-{
- u32 val;
- int ret;
-
- ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
- if (ret) {
- ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
- ret);
- return ret;
- }
-
- if (enable_sleep)
- val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
- else
- val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
-
- ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
- if (ret) {
- ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
- ret);
- return ret;
- }
-
- return 0;
-}
-
/* HIF diagnostics */
static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
@@ -1679,8 +1749,8 @@ out:
return ret;
}
-static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
- u32 *value)
+static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
+ u32 *value)
{
__le32 *val;
int ret;
@@ -1725,7 +1795,7 @@ static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
return 0;
}
-static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
+static int ath10k_sdio_hif_start_post(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
u32 addr, val;
@@ -1733,7 +1803,7 @@ static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
- ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
+ ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) {
ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
return ret;
@@ -1749,9 +1819,33 @@ static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
ar_sdio->swap_mbox = false;
}
+ ath10k_sdio_set_mbox_sleep(ar, true);
+
return 0;
}
+static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
+{
+ u32 addr, val;
+ int ret;
+
+ addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
+
+ ret = ath10k_sdio_diag_read32(ar, addr, &val);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
+ return ret;
+ }
+
+ ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
+ ret ? " " : " not ");
+
+ return ret;
+}
+
/* HIF start/stop */
static int ath10k_sdio_hif_start(struct ath10k *ar)
@@ -1766,7 +1860,7 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
* request before interrupts are disabled.
*/
msleep(20);
- ret = ath10k_sdio_hif_disable_intrs(ar);
+ ret = ath10k_sdio_disable_intrs(ar);
if (ret)
return ret;
@@ -1788,19 +1882,19 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
sdio_release_host(ar_sdio->func);
- ret = ath10k_sdio_hif_enable_intrs(ar);
+ ret = ath10k_sdio_enable_intrs(ar);
if (ret)
ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
/* Enable sleep and then disable it again */
- ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
+ ret = ath10k_sdio_set_mbox_sleep(ar, true);
if (ret)
return ret;
/* Wait for 20ms for the written value to take effect */
msleep(20);
- ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
+ ret = ath10k_sdio_set_mbox_sleep(ar, false);
if (ret)
return ret;
@@ -2007,17 +2101,6 @@ static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
*dl_pipe = 0;
}
-/* This op is currently only used by htc_wait_target if the HTC ready
- * message times out. It is not applicable for SDIO since there is nothing
- * we can do if the HTC ready message does not arrive in time.
- * TODO: Make this op non mandatory by introducing a NULL check in the
- * hif op wrapper.
- */
-static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
- u8 pipe, int force)
-{
-}
-
static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
.tx_sg = ath10k_sdio_hif_tx_sg,
.diag_read = ath10k_sdio_hif_diag_read,
@@ -2025,10 +2108,10 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
.exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
.start = ath10k_sdio_hif_start,
.stop = ath10k_sdio_hif_stop,
- .swap_mailbox = ath10k_sdio_hif_swap_mailbox,
+ .start_post = ath10k_sdio_hif_start_post,
+ .get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,
.map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
.get_default_pipe = ath10k_sdio_hif_get_default_pipe,
- .send_complete_check = ath10k_sdio_hif_send_complete_check,
.power_up = ath10k_sdio_hif_power_up,
.power_down = ath10k_sdio_hif_power_down,
#ifdef CONFIG_PM
@@ -2053,6 +2136,8 @@ static int ath10k_sdio_pm_suspend(struct device *device)
if (!device_may_wakeup(ar->dev))
return 0;
+ ath10k_sdio_set_mbox_sleep(ar, true);
+
pm_flag = MMC_PM_KEEP_POWER;
ret = sdio_set_host_pm_flags(func, pm_flag);
@@ -2185,19 +2270,16 @@ static int ath10k_sdio_probe(struct sdio_func *func,
skb_queue_head_init(&ar_sdio->rx_head);
INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
- dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
- switch (dev_id_base) {
- case QCA_MANUFACTURER_ID_AR6005_BASE:
- case QCA_MANUFACTURER_ID_QCA9377_BASE:
- ar->dev_id = QCA9377_1_0_DEVICE_ID;
- break;
- default:
+ dev_id_base = (id->device & 0x0F00);
+ if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&
+ dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {
ret = -ENODEV;
ath10k_err(ar, "unsupported device id %u (0x%x)\n",
dev_id_base, id->device);
goto err_free_wq;
}
+ ar->dev_id = QCA9377_1_0_DEVICE_ID;
ar->id.vendor = id->vendor;
ar->id.device = id->device;
@@ -2216,6 +2298,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq;
}
+ timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
+
return 0;
err_free_wq:
@@ -2246,10 +2330,8 @@ static void ath10k_sdio_remove(struct sdio_func *func)
}
static const struct sdio_device_id ath10k_sdio_devices[] = {
- {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
- (QCA_SDIO_ID_AR6005_BASE | 0xA))},
- {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
- (QCA_SDIO_ID_QCA9377_BASE | 0x1))},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},
{},
};
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index 33195f49acab..b6ac927628b1 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -10,14 +10,6 @@
#define ATH10K_HIF_MBOX_BLOCK_SIZE 256
-#define QCA_MANUFACTURER_ID_BASE GENMASK(11, 8)
-#define QCA_MANUFACTURER_ID_AR6005_BASE 0x5
-#define QCA_MANUFACTURER_ID_QCA9377_BASE 0x7
-#define QCA_SDIO_ID_AR6005_BASE 0x500
-#define QCA_SDIO_ID_QCA9377_BASE 0x700
-#define QCA_MANUFACTURER_ID_REV_MASK 0x00FF
-#define QCA_MANUFACTURER_CODE 0x271 /* Qualcomm/Atheros */
-
#define ATH10K_SDIO_MAX_BUFFER_SIZE 4096 /*Unsure of this constant*/
/* Mailbox address in SDIO address space */
@@ -37,7 +29,7 @@
(ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr))
#define ATH10K_HIF_MBOX_NUM_MAX 4
-#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 64
+#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 1024
#define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ)
@@ -98,6 +90,21 @@
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
+enum sdio_mbox_state {
+ SDIO_MBOX_UNKNOWN_STATE = 0,
+ SDIO_MBOX_REQUEST_TO_SLEEP_STATE = 1,
+ SDIO_MBOX_SLEEP_STATE = 2,
+ SDIO_MBOX_AWAKE_STATE = 3,
+};
+
+#define ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US 125
+#define ATH10K_CIS_RTC_STATE_ADDR 0x1138
+#define ATH10K_CIS_RTC_STATE_ON 0x01
+#define ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US 1500
+#define ATH10K_CIS_READ_RETRY 10
+#define ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS 50
+
+/* TODO: remove this and use skb->cb instead, much cleaner approach */
struct ath10k_sdio_bus_request {
struct list_head list;
@@ -217,6 +224,8 @@ struct ath10k_sdio {
spinlock_t wr_async_lock;
struct work_struct async_work_rx;
+ struct timer_list sleep_timer;
+ enum sdio_mbox_state mbox_state;
};
static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 21081b4a27d7..354d49b1cd45 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -11,6 +11,8 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#include <linux/of_address.h>
+#include <linux/iommu.h>
#include "ce.h"
#include "coredump.h"
@@ -356,7 +358,7 @@ static struct ce_pipe_config target_ce_config_wlan[] = {
},
};
-static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@@ -769,7 +771,7 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
- const struct service_to_pipe *entry;
+ const struct ce_service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
@@ -1393,7 +1395,6 @@ static int ath10k_hw_power_off(struct ath10k *ar)
static void ath10k_msa_dump_memory(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
{
- struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
const struct ath10k_hw_mem_layout *mem_layout;
const struct ath10k_mem_region *current_region;
struct ath10k_dump_ram_data_hdr *hdr;
@@ -1419,15 +1420,15 @@ static void ath10k_msa_dump_memory(struct ath10k *ar,
buf_len -= sizeof(*hdr);
hdr->region_type = cpu_to_le32(current_region->type);
- hdr->start = cpu_to_le32((unsigned long)ar_snoc->qmi->msa_va);
- hdr->length = cpu_to_le32(ar_snoc->qmi->msa_mem_size);
+ hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
+ hdr->length = cpu_to_le32(ar->msa.mem_size);
- if (current_region->len < ar_snoc->qmi->msa_mem_size) {
- memcpy(buf, ar_snoc->qmi->msa_va, current_region->len);
+ if (current_region->len < ar->msa.mem_size) {
+ memcpy(buf, ar->msa.vaddr, current_region->len);
ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
- current_region->len, ar_snoc->qmi->msa_mem_size);
+ current_region->len, ar->msa.mem_size);
} else {
- memcpy(buf, ar_snoc->qmi->msa_va, ar_snoc->qmi->msa_mem_size);
+ memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
}
}
@@ -1455,6 +1456,155 @@ void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
mutex_unlock(&ar->dump_mutex);
}
+static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
+{
+ struct device *dev = ar->dev;
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node) {
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret) {
+ dev_err(dev, "failed to resolve msa fixed region\n");
+ return ret;
+ }
+ of_node_put(node);
+
+ ar->msa.paddr = r.start;
+ ar->msa.mem_size = resource_size(&r);
+ ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
+ ar->msa.mem_size,
+ MEMREMAP_WT);
+ if (IS_ERR(ar->msa.vaddr)) {
+ dev_err(dev, "failed to map memory region: %pa\n",
+ &r.start);
+ return PTR_ERR(ar->msa.vaddr);
+ }
+ } else {
+ ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
+ &ar->msa.paddr,
+ GFP_KERNEL);
+ if (!ar->msa.vaddr) {
+ ath10k_err(ar, "failed to allocate dma memory for msa region\n");
+ return -ENOMEM;
+ }
+ ar->msa.mem_size = msa_size;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
+ &ar->msa.paddr,
+ ar->msa.vaddr);
+
+ return 0;
+}
+
+static int ath10k_fw_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *host_dev = &ar_snoc->dev->dev;
+ struct platform_device_info info;
+ struct iommu_domain *iommu_dom;
+ struct platform_device *pdev;
+ struct device_node *node;
+ int ret;
+
+ node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
+ if (!node) {
+ ar_snoc->use_tz = true;
+ return 0;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.fwnode = &node->fwnode;
+ info.parent = host_dev;
+ info.name = node->name;
+ info.dma_mask = DMA_BIT_MASK(32);
+
+ pdev = platform_device_register_full(&info);
+ if (IS_ERR(pdev)) {
+ of_node_put(node);
+ return PTR_ERR(pdev);
+ }
+
+ pdev->dev.of_node = node;
+
+ ret = of_dma_configure(&pdev->dev, node, true);
+ if (ret) {
+ ath10k_err(ar, "dma configure fail: %d\n", ret);
+ goto err_unregister;
+ }
+
+ ar_snoc->fw.dev = &pdev->dev;
+
+ iommu_dom = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu_dom) {
+ ath10k_err(ar, "failed to allocate iommu domain\n");
+ ret = -ENOMEM;
+ goto err_unregister;
+ }
+
+ ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
+ if (ret) {
+ ath10k_err(ar, "could not attach device: %d\n", ret);
+ goto err_iommu_free;
+ }
+
+ ar_snoc->fw.iommu_domain = iommu_dom;
+ ar_snoc->fw.fw_start_addr = ar->msa.paddr;
+
+ ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
+ ar->msa.paddr, ar->msa.mem_size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ ath10k_err(ar, "failed to map firmware region: %d\n", ret);
+ goto err_iommu_detach;
+ }
+
+ of_node_put(node);
+
+ return 0;
+
+err_iommu_detach:
+ iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
+
+err_iommu_free:
+ iommu_domain_free(iommu_dom);
+
+err_unregister:
+ platform_device_unregister(pdev);
+ of_node_put(node);
+
+ return ret;
+}
+
+static int ath10k_fw_deinit(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
+ struct iommu_domain *iommu;
+ size_t unmapped_size;
+
+ if (ar_snoc->use_tz)
+ return 0;
+
+ iommu = ar_snoc->fw.iommu_domain;
+
+ unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
+ mapped_size);
+ if (unmapped_size != mapped_size)
+ ath10k_err(ar, "failed to unmap firmware: %zu\n",
+ unmapped_size);
+
+ iommu_detach_device(iommu, ar_snoc->fw.dev);
+ iommu_domain_free(iommu);
+
+ platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
+
+ return 0;
+}
+
static const struct of_device_id ath10k_snoc_dt_match[] = {
{ .compatible = "qcom,wcn3990-wifi",
.data = &drv_priv,
@@ -1557,16 +1707,31 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq;
}
+ ret = ath10k_setup_msa_resources(ar, msa_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
+ goto err_power_off;
+ }
+
+ ret = ath10k_fw_init(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
+ goto err_power_off;
+ }
+
ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
- goto err_power_off;
+ goto err_fw_deinit;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
return 0;
+err_fw_deinit:
+ ath10k_fw_deinit(ar);
+
err_power_off:
ath10k_hw_power_off(ar);
@@ -1598,6 +1763,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_core_unregister(ar);
ath10k_hw_power_off(ar);
+ ath10k_fw_deinit(ar);
ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar);
ath10k_qmi_deinit(ar);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index c05df45a3945..a3dd06f6ac62 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -55,6 +55,13 @@ struct regulator_bulk_data;
struct ath10k_snoc {
struct platform_device *dev;
struct ath10k *ar;
+ unsigned int use_tz;
+ struct ath10k_firmware {
+ struct device *dev;
+ dma_addr_t fw_start_addr;
+ struct iommu_domain *iommu_domain;
+ size_t mapped_mem_size;
+ } fw;
void __iomem *mem;
dma_addr_t mem_pa;
struct ath10k_snoc_target_info target_info;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 39abf8b12903..f46b9083bbf1 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -84,9 +84,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
+ rcu_read_lock();
if (txq && txq->sta && skb_cb->airtime_est)
ieee80211_sta_register_airtime(txq->sta, txq->tid,
skb_cb->airtime_est, 0);
+ rcu_read_unlock();
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 1e0343081be9..b7daf344d012 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -693,17 +693,6 @@ static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id,
return 0;
}
-/* This op is currently only used by htc_wait_target if the HTC ready
- * message times out. It is not applicable for USB since there is nothing
- * we can do if the HTC ready message does not arrive in time.
- * TODO: Make this op non mandatory by introducing a NULL check in the
- * hif op wrapper.
- */
-static void ath10k_usb_hif_send_complete_check(struct ath10k *ar,
- u8 pipe, int force)
-{
-}
-
static int ath10k_usb_hif_power_up(struct ath10k *ar,
enum ath10k_firmware_mode fw_mode)
{
@@ -737,7 +726,6 @@ static const struct ath10k_hif_ops ath10k_usb_hif_ops = {
.stop = ath10k_usb_hif_stop,
.map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe,
.get_default_pipe = ath10k_usb_hif_get_default_pipe,
- .send_complete_check = ath10k_usb_hif_send_complete_check,
.get_free_queue_number = ath10k_usb_hif_get_free_queue_number,
.power_up = ath10k_usb_hif_power_up,
.power_down = ath10k_usb_hif_power_down,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 1491c25518bb..0dd484f85082 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -126,6 +126,13 @@ struct wmi_ops {
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
+ struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
+ u32 vdev_id,
+ enum
+ wmi_peer_stats_info_request_type
+ type,
+ u8 *addr,
+ u32 reset);
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
enum wmi_force_fw_hang_type type,
u32 delay_ms);
@@ -133,6 +140,7 @@ struct wmi_ops {
struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
struct sk_buff *skb,
dma_addr_t paddr);
+ int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
u32 log_level);
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
@@ -442,6 +450,15 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
}
static inline int
+ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
+{
+ if (!ar->wmi.ops->cleanup_mgmt_tx_send)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
+}
+
+static inline int
ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
dma_addr_t paddr)
{
@@ -1065,6 +1082,29 @@ ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
}
static inline int
+ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_peer_stats_info_request_type type,
+ u8 *addr,
+ u32 reset)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_request_peer_stats_info)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
+ vdev_id,
+ type,
+ addr,
+ reset);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
+}
+
+static inline int
ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 4e68debda9bf..932266d1111b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -219,6 +219,91 @@ static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
complete(&ar->vdev_delete_done);
}
+static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_tlv_peer_stats_info *stat = ptr;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
+ return -EPROTO;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
+ stat->peer_macaddr.addr,
+ __le32_to_cpu(stat->last_rx_rate_code),
+ __le32_to_cpu(stat->last_rx_bitrate_kbps));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
+ __le32_to_cpu(stat->last_tx_rate_code),
+ __le32_to_cpu(stat->last_tx_bitrate_kbps));
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
+ if (!sta) {
+ ath10k_warn(ar, "not found station for peer stats\n");
+ return -EINVAL;
+ }
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
+ arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
+ arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
+ arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_peer_stats_info_ev *ev;
+ const void *data;
+ u32 num_peer_stats;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ num_peer_stats = __le32_to_cpu(ev->num_peers);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
+ __le32_to_cpu(ev->vdev_id),
+ num_peer_stats,
+ __le32_to_cpu(ev->more_data));
+
+ ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
+ ath10k_wmi_tlv_parse_peer_stats_info, NULL);
+ if (ret)
+ ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
+ ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
+ complete(&ar->peer_stats_info_complete);
+}
+
static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -576,6 +661,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb);
break;
+ case WMI_TLV_PEER_STATS_INFO_EVENTID:
+ ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
+ break;
case WMI_TLV_VDEV_START_RESP_EVENTID:
ath10k_wmi_event_vdev_start_resp(ar, skb);
break;
@@ -2123,7 +2211,7 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
tlv->len = __cpu_to_le16(sizeof(*ch));
ch = (void *)tlv->value;
- ath10k_wmi_put_wmi_channel(ch, &arg->channel);
+ ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
ptr += sizeof(*tlv);
ptr += sizeof(*ch);
@@ -2763,7 +2851,7 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
tlv->len = __cpu_to_le16(sizeof(*ci));
ci = (void *)tlv->value;
- ath10k_wmi_put_wmi_channel(ci, ch);
+ ath10k_wmi_put_wmi_channel(ar, ci, ch);
chans += sizeof(*tlv);
chans += sizeof(*ci);
@@ -2897,6 +2985,48 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
return skb;
}
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_peer_stats_info_request_type type,
+ u8 *addr,
+ u32 reset)
+{
+ struct wmi_tlv_request_peer_stats_info *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->request_type = __cpu_to_le32(type);
+
+ if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+
+ cmd->reset_after_request = __cpu_to_le32(reset);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
+ return skb;
+}
+
+static int
+ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
+ struct sk_buff *msdu)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_wmi *wmi = &ar->wmi;
+
+ idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
+
+ return 0;
+}
+
static int
ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
dma_addr_t paddr)
@@ -2971,6 +3101,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if (desc_id < 0)
goto err_free_skb;
+ cb->msdu_id = desc_id;
+
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
@@ -3450,7 +3582,7 @@ ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
tlv->len = __cpu_to_le16(sizeof(*chan));
chan = (void *)tlv->value;
- ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+ ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
ptr += sizeof(*tlv);
ptr += sizeof(*chan);
@@ -4113,6 +4245,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
+ .request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
.network_list_offload_config_cmdid =
WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
@@ -4269,6 +4402,7 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
.rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+ .peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
};
static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
@@ -4416,9 +4550,11 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
+ .gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
/* .gen_mgmt_tx = not implemented; HTT is used */
.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
+ .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 4972dc12991c..e77b97ca5c7f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -198,6 +198,12 @@ enum wmi_tlv_cmd_id {
WMI_TLV_REQUEST_LINK_STATS_CMDID,
WMI_TLV_START_LINK_STATS_CMDID,
WMI_TLV_CLEAR_LINK_STATS_CMDID,
+ WMI_TLV_CGET_FW_MEM_DUMP_CMDID,
+ WMI_TLV_CDEBUG_MESG_FLUSH_CMDID,
+ WMI_TLV_CDIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_TLV_CREQUEST_WLAN_STATS_CMDID,
+ WMI_TLV_CREQUEST_RCPI_CMDID,
+ WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
@@ -338,6 +344,13 @@ enum wmi_tlv_event_id {
WMI_TLV_IFACE_LINK_STATS_EVENTID,
WMI_TLV_PEER_LINK_STATS_EVENTID,
WMI_TLV_RADIO_LINK_STATS_EVENTID,
+ WMI_TLV_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_TLV_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_TLV_INST_RSSI_STATS_EVENTID,
+ WMI_TLV_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_TLV_REPORT_STATS_EVENTID,
+ WMI_TLV_UPDATE_RCPI_EVENTID,
+ WMI_TLV_PEER_STATS_INFO_EVENTID,
WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
WMI_TLV_APFIND_EVENTID,
@@ -451,6 +464,7 @@ enum wmi_tlv_pdev_param {
WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
+ WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE = 0x8b,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
};
@@ -1623,7 +1637,7 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
struct wmi_tlv {
__le16 len;
__le16 tag;
- u8 value[0];
+ u8 value[];
} __packed;
struct ath10k_mgmt_tx_pkt_addr {
@@ -2023,7 +2037,7 @@ struct wmi_tlv_bcn_tx_status_ev {
struct wmi_tlv_bcn_prb_info {
__le32 caps;
__le32 erp;
- u8 ies[0];
+ u8 ies[];
} __packed;
struct wmi_tlv_bcn_tmpl_cmd {
@@ -2054,7 +2068,7 @@ struct wmi_tlv_diag_item {
__le16 len;
__le32 timestamp;
__le32 code;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct wmi_tlv_diag_data_ev {
@@ -2081,6 +2095,94 @@ struct wmi_tlv_stats_ev {
__le32 num_peer_stats_extd;
} __packed;
+struct wmi_tlv_peer_stats_info_ev {
+ __le32 vdev_id;
+ __le32 num_peers;
+ __le32 more_data;
+} __packed;
+
+#define WMI_TLV_MAX_CHAINS 8
+
+struct wmi_tlv_peer_stats_info {
+ struct wmi_mac_addr peer_macaddr;
+ struct {
+ /* lower 32 bits of the tx_bytes value */
+ __le32 low_32;
+ /* upper 32 bits of the tx_bytes value */
+ __le32 high_32;
+ } __packed tx_bytes;
+ struct {
+ /* lower 32 bits of the tx_packets value */
+ __le32 low_32;
+ /* upper 32 bits of the tx_packets value */
+ __le32 high_32;
+ } __packed tx_packets;
+ struct {
+ /* lower 32 bits of the rx_bytes value */
+ __le32 low_32;
+ /* upper 32 bits of the rx_bytes value */
+ __le32 high_32;
+ } __packed rx_bytes;
+ struct {
+ /* lower 32 bits of the rx_packets value */
+ __le32 low_32;
+ /* upper 32 bits of the rx_packets value */
+ __le32 high_32;
+ } __packed rx_packets;
+ __le32 tx_retries;
+ __le32 tx_failed;
+
+ /* rate information, it is output of WMI_ASSEMBLE_RATECODE_V1
+ * (in format of 0x1000RRRR)
+ * The rate-code is a 4-bytes field in which,
+ * for given rate, nss and preamble
+ *
+ * b'31-b'29 unused / reserved
+ * b'28 indicate the version of rate-code (1 = RATECODE_V1)
+ * b'27-b'11 unused / reserved
+ * b'10-b'8 indicate the preamble (0 OFDM, 1 CCK, 2 HT, 3 VHT)
+ * b'7-b'5 indicate the NSS (0 - 1x1, 1 - 2x2, 2 - 3x3, 3 - 4x4)
+ * b'4-b'0 indicate the rate, which is indicated as follows:
+ * OFDM : 0: OFDM 48 Mbps
+ * 1: OFDM 24 Mbps
+ * 2: OFDM 12 Mbps
+ * 3: OFDM 6 Mbps
+ * 4: OFDM 54 Mbps
+ * 5: OFDM 36 Mbps
+ * 6: OFDM 18 Mbps
+ * 7: OFDM 9 Mbps
+ * CCK (pream == 1)
+ * 0: CCK 11 Mbps Long
+ * 1: CCK 5.5 Mbps Long
+ * 2: CCK 2 Mbps Long
+ * 3: CCK 1 Mbps Long
+ * 4: CCK 11 Mbps Short
+ * 5: CCK 5.5 Mbps Short
+ * 6: CCK 2 Mbps Short
+ * HT/VHT (pream == 2/3)
+ * 0..7: MCS0..MCS7 (HT)
+ * 0..9: MCS0..MCS9 (11AC VHT)
+ * 0..11: MCS0..MCS11 (11AX VHT)
+ * rate-code of the last transmission
+ */
+ __le32 last_tx_rate_code;
+ __le32 last_rx_rate_code;
+ __le32 last_tx_bitrate_kbps;
+ __le32 last_rx_bitrate_kbps;
+ __le32 peer_rssi;
+ __le32 tx_succeed;
+ __le32 peer_rssi_per_chain[WMI_TLV_MAX_CHAINS];
+} __packed;
+
+#define HW_RATECODE_PREAM_V1_MASK GENMASK(10, 8)
+#define WMI_TLV_GET_HW_RC_PREAM_V1(rc) FIELD_GET(HW_RATECODE_PREAM_V1_MASK, rc)
+
+#define HW_RATECODE_NSS_V1_MASK GENMASK(7, 5)
+#define WMI_TLV_GET_HW_RC_NSS_V1(rc) FIELD_GET(HW_RATECODE_NSS_V1_MASK, rc)
+
+#define HW_RATECODE_RATE_V1_MASK GENMASK(4, 0)
+#define WMI_TLV_GET_HW_RC_RATE_V1(rc) FIELD_GET(HW_RATECODE_RATE_V1_MASK, rc)
+
struct wmi_tlv_p2p_noa_ev {
__le32 vdev_id;
} __packed;
@@ -2097,6 +2199,14 @@ struct wmi_tlv_wow_add_del_event_cmd {
__le32 event_bitmap;
} __packed;
+struct wmi_tlv_request_peer_stats_info {
+ __le32 request_type;
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ __le32 reset_after_request;
+} __packed;
+
/* Command to set/unset chip in quiet mode */
struct wmi_tlv_set_quiet_cmd {
__le32 vdev_id;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2ea77bb880b1..a81a1ab2de19 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1694,10 +1694,11 @@ static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
.bw160 = WMI_10_2_PEER_160MHZ,
};
-void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
const struct wmi_channel_arg *arg)
{
u32 flags = 0;
+ struct ieee80211_channel *chan = NULL;
memset(ch, 0, sizeof(*ch));
@@ -1714,12 +1715,39 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
if (arg->chan_radar)
flags |= WMI_CHAN_FLAG_DFS;
+ ch->band_center_freq2 = 0;
ch->mhz = __cpu_to_le32(arg->freq);
ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
- if (arg->mode == MODE_11AC_VHT80_80)
+ if (arg->mode == MODE_11AC_VHT80_80) {
ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
- else
- ch->band_center_freq2 = 0;
+ chan = ieee80211_get_channel(ar->hw->wiphy,
+ arg->band_center_freq2 - 10);
+ }
+
+ if (arg->mode == MODE_11AC_VHT160) {
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+
+ if (arg->freq > arg->band_center_freq1) {
+ band_center_freq1 = arg->band_center_freq1 + 40;
+ band_center_freq2 = arg->band_center_freq1 - 40;
+ } else {
+ band_center_freq1 = arg->band_center_freq1 - 40;
+ band_center_freq2 = arg->band_center_freq1 + 40;
+ }
+
+ ch->band_center_freq1 =
+ __cpu_to_le32(band_center_freq1);
+ /* Minus 10 to get a defined 5G channel frequency*/
+ chan = ieee80211_get_channel(ar->hw->wiphy,
+ band_center_freq2 - 10);
+ /* The center frequency of the entire VHT160 */
+ ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1);
+ }
+
+ if (chan && chan->flags & IEEE80211_CHAN_RADAR)
+ flags |= WMI_CHAN_FLAG_DFS_CFREQ2;
+
ch->min_power = arg->min_power;
ch->max_power = arg->max_power;
ch->reg_power = arg->max_reg_power;
@@ -7165,7 +7193,7 @@ ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
}
- ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
+ ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
@@ -7537,7 +7565,7 @@ ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
ch = &arg->channels[i];
ci = &cmd->chan_info[i];
- ath10k_wmi_put_wmi_channel(ci, ch);
+ ath10k_wmi_put_wmi_channel(ar, ci, ch);
}
return skb;
@@ -7628,12 +7656,8 @@ ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
- if (arg->peer_bw_rxnss_override)
- cmd->peer_bw_rxnss_override =
- __cpu_to_le32((arg->peer_bw_rxnss_override - 1) |
- BIT(PEER_BW_RXNSS_OVERRIDE_OFFSET));
- else
- cmd->peer_bw_rxnss_override = 0;
+ cmd->peer_bw_rxnss_override =
+ __cpu_to_le32(arg->peer_bw_rxnss_override);
}
static int
@@ -8312,7 +8336,7 @@ ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to stack", pdev->loc_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Oversized AMSUs", pdev->oversize_amsdu);
+ "Oversized AMSDUs", pdev->oversize_amsdu);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors", pdev->phy_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
@@ -8945,7 +8969,7 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
for (i = 0; i < cap->peer_chan_len; i++) {
chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
- ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+ ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 6df415778374..511144b36231 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -940,6 +940,7 @@ struct wmi_cmd_map {
u32 vdev_spectral_scan_configure_cmdid;
u32 vdev_spectral_scan_enable_cmdid;
u32 request_stats_cmdid;
+ u32 request_peer_stats_info_cmdid;
u32 set_arp_ns_offload_cmdid;
u32 network_list_offload_config_cmdid;
u32 gtk_offload_cmdid;
@@ -2094,7 +2095,8 @@ enum wmi_channel_change_cause {
/* Indicate reason for channel switch */
#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
-
+/* DFS required on channel for 2nd segment of VHT160 and VHT80+80*/
+#define WMI_CHAN_FLAG_DFS_CFREQ2 (1 << 15)
#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */
/* HT Capabilities*/
@@ -2290,7 +2292,7 @@ struct wmi_service_ready_event {
* where FW can access this memory directly (or) by DMA.
*/
__le32 num_mem_reqs;
- struct wlan_host_mem_req mem_reqs[0];
+ struct wlan_host_mem_req mem_reqs[];
} __packed;
/* This is the definition from 10.X firmware branch */
@@ -2329,7 +2331,7 @@ struct wmi_10x_service_ready_event {
*/
__le32 num_mem_reqs;
- struct wlan_host_mem_req mem_reqs[0];
+ struct wlan_host_mem_req mem_reqs[];
} __packed;
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
@@ -3084,19 +3086,19 @@ struct wmi_chan_list_entry {
struct wmi_chan_list {
__le32 tag; /* WMI_CHAN_LIST_TAG */
__le32 num_chan;
- struct wmi_chan_list_entry channel_list[0];
+ struct wmi_chan_list_entry channel_list[];
} __packed;
struct wmi_bssid_list {
__le32 tag; /* WMI_BSSID_LIST_TAG */
__le32 num_bssid;
- struct wmi_mac_addr bssid_list[0];
+ struct wmi_mac_addr bssid_list[];
} __packed;
struct wmi_ie_data {
__le32 tag; /* WMI_IE_TAG */
__le32 ie_len;
- u8 ie_data[0];
+ u8 ie_data[];
} __packed;
struct wmi_ssid {
@@ -3107,7 +3109,7 @@ struct wmi_ssid {
struct wmi_ssid_list {
__le32 tag; /* WMI_SSID_LIST_TAG */
__le32 num_ssids;
- struct wmi_ssid ssids[0];
+ struct wmi_ssid ssids[];
} __packed;
/* prefix used by scan requestor ids on the host */
@@ -3309,7 +3311,7 @@ struct wmi_stop_scan_arg {
struct wmi_scan_chan_list_cmd {
__le32 num_scan_chans;
- struct wmi_channel chan_info[0];
+ struct wmi_channel chan_info[];
} __packed;
struct wmi_scan_chan_list_arg {
@@ -3393,12 +3395,12 @@ struct wmi_mgmt_rx_hdr_v2 {
struct wmi_mgmt_rx_event_v1 {
struct wmi_mgmt_rx_hdr_v1 hdr;
- u8 buf[0];
+ u8 buf[];
} __packed;
struct wmi_mgmt_rx_event_v2 {
struct wmi_mgmt_rx_hdr_v2 hdr;
- u8 buf[0];
+ u8 buf[];
} __packed;
struct wmi_10_4_mgmt_rx_hdr {
@@ -3413,7 +3415,7 @@ struct wmi_10_4_mgmt_rx_hdr {
struct wmi_10_4_mgmt_rx_event {
struct wmi_10_4_mgmt_rx_hdr hdr;
- u8 buf[0];
+ u8 buf[];
} __packed;
struct wmi_mgmt_rx_ext_info {
@@ -3453,14 +3455,14 @@ struct wmi_phyerr {
__le32 rssi_chains[4];
__le16 nf_chains[4];
__le32 buf_len;
- u8 buf[0];
+ u8 buf[];
} __packed;
struct wmi_phyerr_event {
__le32 num_phyerrs;
__le32 tsf_l32;
__le32 tsf_u32;
- struct wmi_phyerr phyerrs[0];
+ struct wmi_phyerr phyerrs[];
} __packed;
struct wmi_10_4_phyerr_event {
@@ -3477,7 +3479,7 @@ struct wmi_10_4_phyerr_event {
__le32 phy_err_mask[2];
__le32 tsf_timestamp;
__le32 buf_len;
- u8 buf[0];
+ u8 buf[];
} __packed;
struct wmi_radar_found_info {
@@ -3590,7 +3592,7 @@ struct wmi_mgmt_tx_hdr {
struct wmi_mgmt_tx_cmd {
struct wmi_mgmt_tx_hdr hdr;
- u8 buf[0];
+ u8 buf[];
} __packed;
struct wmi_echo_event {
@@ -3797,6 +3799,7 @@ struct wmi_pdev_param_map {
u32 enable_btcoex;
u32 rfkill_config;
u32 rfkill_enable;
+ u32 peer_stats_info_enable;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -4577,6 +4580,13 @@ struct wmi_request_stats_cmd {
struct wlan_inst_rssi_args inst_rssi_args;
} __packed;
+enum wmi_peer_stats_info_request_type {
+ /* request stats of one specified peer */
+ WMI_REQUEST_ONE_PEER_STATS_INFO = 0x01,
+ /* request stats of all peers belong to specified VDEV */
+ WMI_REQUEST_VDEV_ALL_PEER_STATS_INFO = 0x02,
+};
+
/* Suspend option */
enum {
/* suspend */
@@ -4618,7 +4628,7 @@ struct wmi_stats_event {
* By having a zero sized array, the pointer to data area
* becomes available without increasing the struct size
*/
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_10_2_stats_event {
@@ -4628,7 +4638,7 @@ struct wmi_10_2_stats_event {
__le32 num_vdev_stats;
__le32 num_peer_stats;
__le32 num_bcnflt_stats;
- u8 data[0];
+ u8 data[];
} __packed;
/*
@@ -5023,7 +5033,7 @@ struct wmi_vdev_install_key_cmd {
__le32 key_rxmic_len;
/* contains key followed by tx mic followed by rx mic */
- u8 key_data[0];
+ u8 key_data[];
} __packed;
struct wmi_vdev_install_key_arg {
@@ -5693,7 +5703,7 @@ struct wmi_bcn_tx_hdr {
struct wmi_bcn_tx_cmd {
struct wmi_bcn_tx_hdr hdr;
- u8 *bcn[0];
+ u8 *bcn[];
} __packed;
struct wmi_bcn_tx_arg {
@@ -6110,7 +6120,7 @@ struct wmi_bcn_info {
struct wmi_host_swba_event {
__le32 vdev_map;
- struct wmi_bcn_info bcn_info[0];
+ struct wmi_bcn_info bcn_info[];
} __packed;
struct wmi_10_2_4_bcn_info {
@@ -6120,7 +6130,7 @@ struct wmi_10_2_4_bcn_info {
struct wmi_10_2_4_host_swba_event {
__le32 vdev_map;
- struct wmi_10_2_4_bcn_info bcn_info[0];
+ struct wmi_10_2_4_bcn_info bcn_info[];
} __packed;
/* 16 words = 512 client + 1 word = for guard */
@@ -6161,7 +6171,7 @@ struct wmi_10_4_bcn_info {
struct wmi_10_4_host_swba_event {
__le32 vdev_map;
- struct wmi_10_4_bcn_info bcn_info[0];
+ struct wmi_10_4_bcn_info bcn_info[];
} __packed;
#define WMI_MAX_AP_VDEV 16
@@ -6508,7 +6518,10 @@ struct wmi_10_2_peer_assoc_complete_cmd {
__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
} __packed;
-#define PEER_BW_RXNSS_OVERRIDE_OFFSET 31
+/* NSS Mapping to FW */
+#define WMI_PEER_NSS_MAP_ENABLE BIT(31)
+#define WMI_PEER_NSS_160MHZ_MASK GENMASK(2, 0)
+#define WMI_PEER_NSS_80_80MHZ_MASK GENMASK(5, 3)
struct wmi_10_4_peer_assoc_complete_cmd {
struct wmi_10_2_peer_assoc_complete_cmd cmd;
@@ -7348,7 +7361,7 @@ void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
const struct wmi_start_scan_arg *arg);
void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
const struct wmi_wmm_params_arg *arg);
-void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
const struct wmi_channel_arg *arg);
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 59342d2797ca..30092841ac46 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include "ahb.h"
#include "debug.h"
+#include "hif.h"
#include <linux/remoteproc.h>
static const struct of_device_id ath11k_ahb_of_match[] = {
@@ -434,6 +435,16 @@ enum ext_irq_num {
tcl2host_status_ring,
};
+static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
+{
+ return ioread32(ab->mem + offset);
+}
+
+static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ iowrite32(value, ab->mem + offset);
+}
+
static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
{
int i;
@@ -575,7 +586,7 @@ static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
}
}
-int ath11k_ahb_start(struct ath11k_base *ab)
+static int ath11k_ahb_start(struct ath11k_base *ab)
{
ath11k_ahb_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
@@ -583,7 +594,7 @@ int ath11k_ahb_start(struct ath11k_base *ab)
return 0;
}
-void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
+static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
{
int i;
@@ -595,13 +606,13 @@ void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
}
}
-void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
{
__ath11k_ahb_ext_irq_disable(ab);
ath11k_ahb_sync_ext_irqs(ab);
}
-void ath11k_ahb_stop(struct ath11k_base *ab)
+static void ath11k_ahb_stop(struct ath11k_base *ab)
{
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath11k_ahb_ce_irqs_disable(ab);
@@ -611,7 +622,7 @@ void ath11k_ahb_stop(struct ath11k_base *ab)
ath11k_ce_cleanup_pipes(ab);
}
-int ath11k_ahb_power_up(struct ath11k_base *ab)
+static int ath11k_ahb_power_up(struct ath11k_base *ab)
{
int ret;
@@ -622,7 +633,7 @@ int ath11k_ahb_power_up(struct ath11k_base *ab)
return ret;
}
-void ath11k_ahb_power_down(struct ath11k_base *ab)
+static void ath11k_ahb_power_down(struct ath11k_base *ab)
{
rproc_shutdown(ab->tgt_rproc);
}
@@ -788,7 +799,7 @@ static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
irq = platform_get_irq_byname(ab->pdev,
irq_name[irq_idx]);
ab->irq_num[irq_idx] = irq;
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
IRQF_TRIGGER_RISING,
irq_name[irq_idx], irq_grp);
@@ -834,8 +845,8 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
return ret;
}
-int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
- u8 *ul_pipe, u8 *dl_pipe)
+static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
@@ -877,6 +888,18 @@ int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
return 0;
}
+static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
+ .start = ath11k_ahb_start,
+ .stop = ath11k_ahb_stop,
+ .read32 = ath11k_ahb_read32,
+ .write32 = ath11k_ahb_write32,
+ .irq_enable = ath11k_ahb_ext_irq_enable,
+ .irq_disable = ath11k_ahb_ext_irq_disable,
+ .map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
+ .power_down = ath11k_ahb_power_down,
+ .power_up = ath11k_ahb_power_up,
+};
+
static int ath11k_ahb_probe(struct platform_device *pdev)
{
struct ath11k_base *ab;
@@ -891,13 +914,7 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
return -EINVAL;
}
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- dev_err(&pdev->dev, "failed to get IO memory resource\n");
- return -ENXIO;
- }
-
- mem = devm_ioremap_resource(&pdev->dev, mem_res);
+ mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(mem)) {
dev_err(&pdev->dev, "ioremap error\n");
return PTR_ERR(mem);
@@ -909,12 +926,13 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
return ret;
}
- ab = ath11k_core_alloc(&pdev->dev);
+ ab = ath11k_core_alloc(&pdev->dev, 0, ATH11K_BUS_AHB);
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
}
+ ab->hif.ops = &ath11k_ahb_hif_ops;
ab->pdev = pdev;
ab->hw_rev = (enum ath11k_hw_rev)of_id->data;
ab->mem = mem;
@@ -993,12 +1011,17 @@ static struct platform_driver ath11k_ahb_driver = {
.remove = ath11k_ahb_remove,
};
-int ath11k_ahb_init(void)
+static int ath11k_ahb_init(void)
{
return platform_driver_register(&ath11k_ahb_driver);
}
+module_init(ath11k_ahb_init);
-void ath11k_ahb_exit(void)
+static void ath11k_ahb_exit(void)
{
platform_driver_unregister(&ath11k_ahb_driver);
}
+module_exit(ath11k_ahb_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
index 93f46dfe22df..6c7b26ac6545 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.h
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -10,26 +10,4 @@
#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
struct ath11k_base;
-static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
-{
- return ioread32(ab->mem + offset);
-}
-
-static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
-{
- iowrite32(value, ab->mem + offset);
-}
-
-void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab);
-void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab);
-int ath11k_ahb_start(struct ath11k_base *ab);
-void ath11k_ahb_stop(struct ath11k_base *ab);
-int ath11k_ahb_power_up(struct ath11k_base *ab);
-void ath11k_ahb_power_down(struct ath11k_base *ab);
-int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
- u8 *ul_pipe, u8 *dl_pipe);
-
-int ath11k_ahb_init(void);
-void ath11k_ahb_exit(void);
-
#endif
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index bf5657d2ae18..02501cc154fe 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -7,11 +7,11 @@
#include <linux/slab.h>
#include <linux/remoteproc.h>
#include <linux/firmware.h>
-#include "ahb.h"
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
+#include "hif.h"
unsigned int ath11k_debug_mask;
module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
@@ -41,6 +41,7 @@ u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx)
return ATH11K_INVALID_HW_MAC_ID;
}
}
+EXPORT_SYMBOL(ath11k_core_get_hw_mac_id);
static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
@@ -324,7 +325,7 @@ static void ath11k_core_stop(struct ath11k_base *ab)
{
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath11k_qmi_firmware_stop(ab);
- ath11k_ahb_stop(ab);
+ ath11k_hif_stop(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
@@ -347,7 +348,7 @@ static int ath11k_core_soc_create(struct ath11k_base *ab)
goto err_qmi_deinit;
}
- ret = ath11k_ahb_power_up(ab);
+ ret = ath11k_hif_power_up(ab);
if (ret) {
ath11k_err(ab, "failed to power up :%d\n", ret);
goto err_debugfs_reg;
@@ -415,7 +416,7 @@ static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
- ath11k_ahb_ext_irq_disable(ab);
+ ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_debug_pdev_destroy(ab);
}
@@ -443,7 +444,7 @@ static int ath11k_core_start(struct ath11k_base *ab,
goto err_wmi_detach;
}
- ret = ath11k_ahb_start(ab);
+ ret = ath11k_hif_start(ab);
if (ret) {
ath11k_err(ab, "failed to start HIF: %d\n", ret);
goto err_wmi_detach;
@@ -522,7 +523,7 @@ err_reo_cleanup:
err_mac_destroy:
ath11k_mac_destroy(ab);
err_hif_stop:
- ath11k_ahb_stop(ab);
+ ath11k_hif_stop(ab);
err_wmi_detach:
ath11k_wmi_detach(ab);
err_firmware_stop:
@@ -559,7 +560,7 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
ath11k_err(ab, "failed to create pdev core: %d\n", ret);
goto err_core_stop;
}
- ath11k_ahb_ext_irq_enable(ab);
+ ath11k_hif_irq_enable(ab);
mutex_unlock(&ab->core_lock);
return 0;
@@ -579,9 +580,9 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
mutex_lock(&ab->core_lock);
ath11k_thermal_unregister(ab);
- ath11k_ahb_ext_irq_disable(ab);
+ ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
- ath11k_ahb_stop(ab);
+ ath11k_hif_stop(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
@@ -744,7 +745,7 @@ void ath11k_core_deinit(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
- ath11k_ahb_power_down(ab);
+ ath11k_hif_power_down(ab);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
}
@@ -754,11 +755,12 @@ void ath11k_core_free(struct ath11k_base *ab)
kfree(ab);
}
-struct ath11k_base *ath11k_core_alloc(struct device *dev)
+struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath11k_bus bus)
{
struct ath11k_base *ab;
- ab = kzalloc(sizeof(*ab), GFP_KERNEL);
+ ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
if (!ab)
return NULL;
@@ -784,24 +786,3 @@ err_sc_free:
kfree(ab);
return NULL;
}
-
-static int __init ath11k_init(void)
-{
- int ret;
-
- ret = ath11k_ahb_init();
- if (ret)
- printk(KERN_ERR "failed to register ath11k ahb driver: %d\n",
- ret);
- return ret;
-}
-module_init(ath11k_init);
-
-static void __exit ath11k_exit(void)
-{
- ath11k_ahb_exit();
-}
-module_exit(ath11k_exit);
-
-MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 6e7b8ecd09a6..e04f0e711779 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -60,9 +60,14 @@ static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
WME_AC_VO);
}
+enum ath11k_skb_flags {
+ ATH11K_SKB_HW_80211_ENCAP = BIT(0),
+};
+
struct ath11k_skb_cb {
dma_addr_t paddr;
u8 eid;
+ u8 flags;
struct ath11k *ar;
struct ieee80211_vif *vif;
} __packed;
@@ -341,6 +346,11 @@ struct ath11k_sta {
u8 rssi_comb;
struct ath11k_htt_tx_stats *tx_stats;
struct ath11k_rx_peer_stats *rx_stats;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* protected by conf_mutex */
+ bool aggr_mode;
+#endif
};
#define ATH11K_NUM_CHANS 41
@@ -387,6 +397,7 @@ struct ath11k_debug {
u32 pktlog_mode;
u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN];
+ u32 rx_filter;
};
struct ath11k_per_peer_tx_stats {
@@ -596,7 +607,9 @@ struct ath11k_base {
void __iomem *mem;
unsigned long mem_len;
- const struct ath11k_hif_ops *hif_ops;
+ struct {
+ const struct ath11k_hif_ops *ops;
+ } hif;
struct ath11k_ce ce;
struct timer_list rx_replenish_retry;
@@ -650,6 +663,13 @@ struct ath11k_base {
/* protected by data_lock */
u32 fw_crash_counter;
} stats;
+ u32 pktlog_defs_checksum;
+
+ /* Round robbin based TCL ring selector */
+ atomic_t tcl_ring_selector;
+
+ /* must be last */
+ u8 drv_priv[0] __aligned(sizeof(void *));
};
struct ath11k_fw_stats_pdev {
@@ -786,7 +806,8 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);
void ath11k_core_deinit(struct ath11k_base *ath11k);
-struct ath11k_base *ath11k_core_alloc(struct device *dev);
+struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath11k_bus bus);
void ath11k_core_free(struct ath11k_base *ath11k);
int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
struct ath11k_board_data *bd);
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
index 8d485171b0b3..3fd6b5af073b 100644
--- a/drivers/net/wireless/ath/ath11k/debug.c
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -195,7 +195,7 @@ void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
total_vdevs_started += ar->num_started_vdevs;
}
- is_end = ((++num_vdev) == total_vdevs_started ? true : false);
+ is_end = ((++num_vdev) == total_vdevs_started);
list_splice_tail_init(&stats.vdevs,
&ar->debug.fw_stats.vdevs);
@@ -215,7 +215,7 @@ void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
- is_end = ((++num_bcn) == ar->num_started_vdevs ? true : false);
+ is_end = ((++num_bcn) == ar->num_started_vdevs);
list_splice_tail_init(&stats.bcn,
&ar->debug.fw_stats.bcn);
@@ -698,6 +698,8 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
tlv_filter = ath11k_mac_mon_status_filter_default;
}
+ ar->debug.rx_filter = tlv_filter.rx_filter;
+
ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
@@ -803,6 +805,9 @@ static const struct file_operations fops_soc_rx_stats = {
int ath11k_debug_pdev_create(struct ath11k_base *ab)
{
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index 97e7306c506d..c30085406bfb 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -67,7 +67,7 @@ struct debug_htt_stats_req {
u8 peer_addr[ETH_ALEN];
struct completion cmpln;
u32 buf_len;
- u8 buf[0];
+ u8 buf[];
};
struct ath_pktlog_hdr {
@@ -77,9 +77,11 @@ struct ath_pktlog_hdr {
u16 size;
u32 timestamp;
u32 type_specific_data;
- u8 payload[0];
+ u8 payload[];
};
+#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
+
#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
@@ -112,6 +114,12 @@ enum ath11k_pktlog_enum {
ATH11K_PKTLOG_TYPE_LITE_RX = 24,
};
+enum ath11k_dbg_aggr_mode {
+ ATH11K_DBG_AGGR_MODE_AUTO,
+ ATH11K_DBG_AGGR_MODE_MANUAL,
+ ATH11K_DBG_AGGR_MODE_MAX,
+};
+
__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
@@ -182,6 +190,11 @@ static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
return ar->debug.extd_rx_stats;
}
+static inline int ath11k_debug_rx_filter(struct ath11k *ar)
+{
+ return ar->debug.rx_filter;
+}
+
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
void
@@ -263,6 +276,11 @@ static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr
return false;
}
+static inline int ath11k_debug_rx_filter(struct ath11k *ar)
+{
+ return 0;
+}
+
static inline void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
struct ath11k_per_peer_tx_stats *peer_stats,
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
index 5db0c27de475..6b532dc99c98 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
@@ -4306,6 +4306,7 @@ void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
u32 len;
u64 cookie;
int ret;
+ bool send_completion = false;
u8 pdev_id;
msg = (struct ath11k_htt_extd_stats_msg *)skb->data;
@@ -4330,11 +4331,11 @@ void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
return;
spin_lock_bh(&ar->debug.htt_stats.lock);
- if (stats_req->done) {
- spin_unlock_bh(&ar->debug.htt_stats.lock);
- return;
- }
- stats_req->done = true;
+
+ stats_req->done = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_DONE, msg->info1);
+ if (stats_req->done)
+ send_completion = true;
+
spin_unlock_bh(&ar->debug.htt_stats.lock);
len = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_LENGTH, msg->info1);
@@ -4344,7 +4345,8 @@ void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
if (ret)
ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
- complete(&stats_req->cmpln);
+ if (send_completion)
+ complete(&stats_req->cmpln);
}
static ssize_t ath11k_read_htt_stats_type(struct file *file,
@@ -4497,28 +4499,54 @@ static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
return -EPERM;
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ if (ar->debug.htt_stats.stats_req) {
+ ret = -EAGAIN;
+ goto err_unlock;
+ }
+
stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
- if (!stats_req)
- return -ENOMEM;
+ if (!stats_req) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
- mutex_lock(&ar->conf_mutex);
ar->debug.htt_stats.stats_req = stats_req;
stats_req->type = type;
+
ret = ath11k_dbg_htt_stats_req(ar);
- mutex_unlock(&ar->conf_mutex);
if (ret < 0)
goto out;
file->private_data = stats_req;
+
+ mutex_unlock(&ar->conf_mutex);
+
return 0;
out:
vfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
return ret;
}
static int ath11k_release_htt_stats(struct inode *inode, struct file *file)
{
+ struct ath11k *ar = inode->i_private;
+
+ mutex_lock(&ar->conf_mutex);
vfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
index 23a6baa9e95a..682a6ff222bd 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
@@ -239,7 +239,7 @@ struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
*/
struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
u32 hist_bin_size;
- u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
+ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
};
/* == SOC ERROR STATS == */
@@ -550,7 +550,7 @@ struct htt_tx_hwq_stats_cmn_tlv {
struct htt_tx_hwq_difs_latency_stats_tlv_v {
u32 hist_intvl;
/* histogram of ppdu post to hwsch - > cmd status received */
- u32 difs_latency_hist[0]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
+ u32 difs_latency_hist[]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
};
/* NOTE: Variable length TLV, use length spec to infer array size */
@@ -586,7 +586,7 @@ struct htt_tx_hwq_fes_result_stats_tlv_v {
struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
u32 hist_bin_size;
/* Histogram of number of mpdus on tried mpdu */
- u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
+ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
};
/* NOTE: Variable length TLV, use length spec to infer array size
@@ -1584,7 +1584,7 @@ struct htt_pdev_stats_twt_session_tlv {
struct htt_pdev_stats_twt_sessions_tlv {
u32 pdev_id;
u32 num_sessions;
- struct htt_pdev_stats_twt_session_tlv twt_session[0];
+ struct htt_pdev_stats_twt_session_tlv twt_session[];
};
enum htt_rx_reo_resource_sample_id_enum {
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 389dac219238..7308ed254232 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -8,6 +8,8 @@
#include "core.h"
#include "peer.h"
#include "debug.h"
+#include "dp_tx.h"
+#include "debug_htt_stats.h"
void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
@@ -435,13 +437,22 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
return 0;
out:
vfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
return ret;
}
static int
ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
{
+ struct ieee80211_sta *sta = inode->i_private;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
vfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
return 0;
}
@@ -533,6 +544,282 @@ static const struct file_operations fops_peer_pktlog = {
.llseek = default_llseek,
};
+static ssize_t ath11k_dbg_sta_write_delba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, initiator, reason;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+ if (ret != 3)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, initiator, reason);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, initiator,
+ reason);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_delba = {
+ .write = ath11k_dbg_sta_write_delba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, status;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &status);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, status);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, status);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+ .write = ath11k_dbg_sta_write_addba_resp,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_addba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, buf_size;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &buf_size);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, buf_size);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba = {
+ .write = ath11k_dbg_sta_write_addba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[64];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len,
+ "aggregation mode: %s\n\n%s\n%s\n",
+ (arsta->aggr_mode == ATH11K_DBG_AGGR_MODE_AUTO) ?
+ "auto" : "manual", "auto = 0", "manual = 1");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 aggr_mode;
+ int ret;
+
+ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+ return -EINVAL;
+
+ if (aggr_mode >= ATH11K_DBG_AGGR_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ aggr_mode == arsta->aggr_mode) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear addba session ret: %d\n",
+ ret);
+ goto out;
+ }
+
+ arsta->aggr_mode = aggr_mode;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+ .read = ath11k_dbg_sta_read_aggr_mode,
+ .write = ath11k_dbg_sta_write_aggr_mode,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+ath11k_write_htt_peer_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ int ret;
+ u8 type;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (!type)
+ return ret;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params.cfg0 |= FIELD_PREP(GENMASK(15, 1),
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+
+ cfg_params.cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(7, 0), sta->addr[0]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(15, 8), sta->addr[1]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(23, 16), sta->addr[2]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(31, 24), sta->addr[3]);
+
+ cfg_params.cfg3 |= FIELD_PREP(GENMASK(7, 0), sta->addr[4]);
+ cfg_params.cfg3 |= FIELD_PREP(GENMASK(15, 8), sta->addr[5]);
+
+ cfg_params.cfg3 |= ATH11K_HTT_PEER_STATS_RESET;
+
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt peer stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_peer_stats_reset = {
+ .write = ath11k_write_htt_peer_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@@ -550,4 +837,14 @@ void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("peer_pktlog", 0644, dir, sta,
&fops_peer_pktlog);
+
+ debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
+ debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
+ debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
+ debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
+
+ if (test_bit(WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET,
+ ar->ab->wmi_ab.svc_map))
+ debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
+ &fops_htt_peer_stats_reset);
}
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 50350f77b309..9ae743e528af 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -701,6 +701,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
done:
return tot_work_done;
}
+EXPORT_SYMBOL(ath11k_dp_service_srng);
void ath11k_dp_pdev_free(struct ath11k_base *ab)
{
@@ -880,6 +881,8 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
spin_lock_init(&dp->reo_cmd_lock);
+ dp->reo_cmd_cache_flush_count = 0;
+
ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
@@ -909,8 +912,10 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
- if (!dp->tx_ring[i].tx_status)
+ if (!dp->tx_ring[i].tx_status) {
+ ret = -ENOMEM;
goto fail_cmn_srng_cleanup;
+ }
}
for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 551f9c9fb847..058a5c1d86ff 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -36,6 +36,7 @@ struct dp_rx_tid {
struct ath11k_base *ab;
};
+#define DP_REO_DESC_FREE_THRESHOLD 64
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
struct dp_reo_cache_flush_elem {
@@ -169,8 +170,8 @@ struct ath11k_pdev_dp {
#define DP_WBM_RELEASE_RING_SIZE 64
#define DP_TCL_DATA_RING_SIZE 512
-#define DP_TX_COMP_RING_SIZE 8192
-#define DP_TX_IDR_SIZE (DP_TX_COMP_RING_SIZE << 1)
+#define DP_TX_COMP_RING_SIZE 32768
+#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
#define DP_TCL_CMD_RING_SIZE 32
#define DP_TCL_STATUS_RING_SIZE 32
#define DP_REO_DST_RING_MAX 4
@@ -222,7 +223,13 @@ struct ath11k_dp {
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list;
struct list_head reo_cmd_cache_flush_list;
- /* protects access to reo_cmd_list and reo_cmd_cache_flush_list */
+ u32 reo_cmd_cache_flush_count;
+ /**
+ * protects access to below fields,
+ * - reo_cmd_list
+ * - reo_cmd_cache_flush_list
+ * - reo_cmd_cache_flush_count
+ */
spinlock_t reo_cmd_lock;
};
@@ -1510,6 +1517,7 @@ struct htt_ext_stats_cfg_params {
* 4 bytes.
*/
+#define HTT_T2H_EXT_STATS_INFO1_DONE BIT(11)
#define HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
struct ath11k_htt_extd_stats_msg {
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index f74a0e74bf3e..a54610d75c40 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -252,7 +252,7 @@ static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
__le32_to_cpu(rx_desc->mpdu_start_tag));
- return tlv_tag == HAL_RX_MPDU_START ? true : false;
+ return tlv_tag == HAL_RX_MPDU_START;
}
static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
@@ -565,6 +565,7 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
+ dp->reo_cmd_cache_flush_count--;
dma_unmap_single(ab->dev, cmd_cache->data.paddr,
cmd_cache->data.size, DMA_BIDIRECTIONAL);
kfree(cmd_cache->data.vaddr);
@@ -651,15 +652,18 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ dp->reo_cmd_cache_flush_count++;
spin_unlock_bh(&dp->reo_cmd_lock);
/* Flush and invalidate aged REO desc from HW cache */
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
list) {
- if (time_after(jiffies, elem->ts +
+ if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
+ time_after(jiffies, elem->ts +
msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
list_del(&elem->list);
+ dp->reo_cmd_cache_flush_count--;
spin_unlock_bh(&dp->reo_cmd_lock);
ath11k_dp_reo_cache_flush(ab, &elem->data);
@@ -892,7 +896,7 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL);
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
if (!vaddr) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
@@ -1491,7 +1495,8 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
return;
}
- trace_ath11k_htt_pktlog(ar, data->payload, hdr->size);
+ trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
+ ar->ab->pktlog_defs_checksum);
}
static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
@@ -2265,6 +2270,7 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
struct ieee80211_hdr *hdr;
struct sk_buff *last_buf;
u8 l3_pad_bytes;
+ u8 *hdr_status;
u16 msdu_len;
int ret;
@@ -2293,8 +2299,13 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
skb_pull(msdu, HAL_RX_DESC_SIZE);
} else if (!rxcb->is_continuation) {
if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
ret = -EINVAL;
ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
+ sizeof(struct ieee80211_hdr));
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(struct hal_rx_desc));
goto free_out;
}
skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
@@ -2402,12 +2413,12 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
try_again:
while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
- struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
- desc->buf_addr_info.info1);
+ desc.buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@@ -2435,7 +2446,7 @@ try_again:
total_msdu_reaped++;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
- desc->info0);
+ desc.info0);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
@@ -2443,15 +2454,15 @@ try_again:
continue;
}
- rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->mac_id = mac_id;
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
- desc->info0);
+ desc.info0);
__skb_queue_tail(&msdu_list, msdu);
@@ -2717,7 +2728,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
- continue;
+ goto move_next;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
@@ -2736,13 +2747,16 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
tlv = (struct hal_tlv_hdr *)skb->data;
if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
HAL_RX_STATUS_BUFFER_DONE) {
- ath11k_hal_srng_src_get_next_entry(ab, srng);
- continue;
+ ath11k_warn(ab, "mon status DONE not set %lx\n",
+ FIELD_GET(HAL_TLV_HDR_TAG,
+ tlv->tl));
+ dev_kfree_skb_any(skb);
+ goto move_next;
}
__skb_queue_tail(skb_list, skb);
}
-
+move_next:
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
&buf_id, GFP_ATOMIC);
@@ -2960,8 +2974,8 @@ static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer
return 0;
mic_fail:
- (ATH11K_SKB_RXCB(msdu))->is_first_msdu = 1;
- (ATH11K_SKB_RXCB(msdu))->is_last_msdu = 1;
+ (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
+ (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
@@ -3389,6 +3403,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
struct sk_buff *msdu;
struct ath11k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
+ u8 *hdr_status;
u16 msdu_len;
spin_lock_bh(&rx_ring->idr_lock);
@@ -3426,6 +3441,17 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
rx_desc = (struct hal_rx_desc *)msdu->data;
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
+ sizeof(struct ieee80211_hdr));
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(struct hal_rx_desc));
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 7aac4b0eea0c..41c990aec6b7 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -9,14 +9,14 @@
#include "hw.h"
#include "peer.h"
-/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
-static const u8
-ath11k_txq_tcl_ring_map[ATH11K_HW_MAX_QUEUES] = { 0x0, 0x1, 0x2, 0x2 };
-
static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
{
- /* TODO: Determine encap type based on vif_type and configuration */
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
+ return HAL_TCL_ENCAP_TYPE_ETHERNET;
+
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
}
@@ -40,8 +40,11 @@ static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
- if (!ieee80211_is_data_qos(hdr->frame_control))
+ if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ else if (!ieee80211_is_data_qos(hdr->frame_control))
return HAL_DESC_REO_NON_QOS_TID;
else
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
@@ -84,15 +87,31 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 pool_id;
u8 hal_ring_id;
int ret;
+ u8 ring_selector = 0, ring_map = 0;
+ bool tcl_ring_retry;
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
- if (!ieee80211_is_data(hdr->frame_control))
+ if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
+ !ieee80211_is_data(hdr->frame_control))
return -ENOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
- ti.ring_id = ath11k_txq_tcl_ring_map[pool_id];
+
+ /* Let the default ring selection be based on a round robin
+ * fashion where one of the 3 tcl rings are selected based on
+ * the tcl_ring_selector counter. In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ * //TODO Add throttling logic when all rings are full
+ */
+ ring_selector = atomic_inc_return(&ab->tcl_ring_selector);
+
+tcl_ring_sel:
+ tcl_ring_retry = false;
+ ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
+ ring_map |= BIT(ti.ring_id);
tx_ring = &dp->tx_ring[ti.ring_id];
@@ -101,8 +120,14 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
spin_unlock_bh(&tx_ring->tx_idr_lock);
- if (ret < 0)
- return -ENOSPC;
+ if (ret < 0) {
+ if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1))
+ return -ENOSPC;
+
+ /* Check if the next ring is available */
+ ring_selector++;
+ goto tcl_ring_sel;
+ }
ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
@@ -149,7 +174,10 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
* skb_checksum_help() is needed
*/
case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ /* no need to encap */
+ break;
case HAL_TCL_ENCAP_TYPE_802_3:
+ default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
goto fail_remove_idr;
@@ -178,11 +206,21 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (!hal_tcl_desc) {
/* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue.
- * So add tx packet throttling logic in future if required.
*/
ath11k_hal_srng_access_end(ab, tcl_ring);
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
+
+ /* Checking for available tcl descritors in another ring in
+ * case of failure due to full tcl ring now, is better than
+ * checking this ring earlier for each pkt tx.
+ * Restart ring selection if some rings are not checked yet.
+ */
+ if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
+ tcl_ring_retry = true;
+ ring_selector++;
+ }
+
goto fail_unmap_dma;
}
@@ -206,6 +244,9 @@ fail_remove_idr:
FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
spin_unlock_bh(&tx_ring->tx_idr_lock);
+ if (tcl_ring_retry)
+ goto tcl_ring_sel;
+
return ret;
}
@@ -543,8 +584,12 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+ /* cmd_num should start from 1, during failure return the error code */
+ if (cmd_num < 0)
+ return cmd_num;
+
/* reo cmd ring descriptors has cmd_num starting from 1 */
- if (cmd_num <= 0)
+ if (cmd_num == 0)
return -EINVAL;
if (!cb)
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 9e40c4bdd674..d63785178afa 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -3,10 +3,10 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/dma-mapping.h>
-#include "ahb.h"
#include "hal_tx.h"
#include "debug.h"
#include "hal_desc.h"
+#include "hif.h"
static const struct hal_srng_config hw_srng_config[] = {
/* TODO: max_rings can populated by querying HW capabilities */
@@ -351,11 +351,12 @@ static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
addr = HAL_CE_DST_RING_CTRL +
srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
- val = ath11k_ahb_read32(ab, addr);
+
+ val = ath11k_hif_read32(ab, addr);
val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
srng->u.dst_ring.max_buffer_length);
- ath11k_ahb_write32(ab, addr, val);
+ ath11k_hif_write32(ab, addr, val);
}
static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
@@ -369,34 +370,34 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
- ath11k_ahb_write32(ab, reg_base +
- HAL_REO1_RING_MSI1_BASE_LSB_OFFSET,
+ ath11k_hif_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_LSB_OFFSET,
(u32)srng->msi_addr);
val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
((u64)srng->msi_addr >>
HAL_ADDR_MSB_REG_SHIFT)) |
HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
- ath11k_ahb_write32(ab, reg_base +
+ ath11k_hif_write32(ab, reg_base +
HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val);
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET,
srng->msi_data);
}
- ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
((u64)srng->ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
(srng->entry_size * srng->num_entries));
- ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val);
val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
- ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val);
/* interrupt setup */
val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
@@ -406,22 +407,22 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
(srng->intr_batch_cntr_thres_entries *
srng->entry_size));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET,
val);
hp_addr = hal->rdp.paddr +
((unsigned long)srng->u.dst_ring.hp_addr -
(unsigned long)hal->rdp.vaddr);
- ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET,
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET,
hp_addr & HAL_ADDR_LSB_REG_MASK);
- ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET,
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET,
hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
/* Initialize head and tail pointers to indicate ring is empty */
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
- ath11k_ahb_write32(ab, reg_base, 0);
- ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
+ ath11k_hif_write32(ab, reg_base, 0);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
*srng->u.dst_ring.hp_addr = 0;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
@@ -434,7 +435,7 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
val |= HAL_REO1_RING_MISC_MSI_SWAP;
val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
- ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
}
static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
@@ -448,34 +449,34 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
- ath11k_ahb_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET,
+ ath11k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET,
(u32)srng->msi_addr);
val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
((u64)srng->msi_addr >>
HAL_ADDR_MSB_REG_SHIFT)) |
HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
- ath11k_ahb_write32(ab, reg_base +
+ ath11k_hif_write32(ab, reg_base +
HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET,
val);
- ath11k_ahb_write32(ab, reg_base +
+ ath11k_hif_write32(ab, reg_base +
HAL_TCL1_RING_MSI1_DATA_OFFSET,
srng->msi_data);
}
- ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
((u64)srng->ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
(srng->entry_size * srng->num_entries));
- ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
- ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val);
/* interrupt setup */
/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
@@ -488,7 +489,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
(srng->intr_batch_cntr_thres_entries *
srng->entry_size));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET,
val);
@@ -497,7 +498,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
srng->u.src_ring.low_threshold);
}
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET,
val);
@@ -505,18 +506,18 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
tp_addr = hal->rdp.paddr +
((unsigned long)srng->u.src_ring.tp_addr -
(unsigned long)hal->rdp.vaddr);
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET,
tp_addr & HAL_ADDR_LSB_REG_MASK);
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET,
tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
}
/* Initialize head and tail pointers to indicate ring is empty */
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
- ath11k_ahb_write32(ab, reg_base, 0);
- ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+ ath11k_hif_write32(ab, reg_base, 0);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
*srng->u.src_ring.tp_addr = 0;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
@@ -533,7 +534,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
- ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val);
}
static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
@@ -889,13 +890,13 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem,
srng->u.src_ring.hp);
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem,
srng->u.dst_ring.tp);
@@ -929,20 +930,20 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
HAL_WBM_IDLE_SCATTER_BUF_SIZE;
}
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
reg_scatter_buf_sz * nsbufs));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_RING_BASE_LSB,
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_RING_BASE_MSB,
FIELD_PREP(
@@ -953,12 +954,12 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
BASE_ADDR_MATCH_TAG_VAL));
/* Setup head and tail pointers for the idle list */
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
sbuf[nsbufs - 1].paddr));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
FIELD_PREP(
@@ -967,18 +968,18 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
(end_offset >> 2)));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
sbuf[0].paddr));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
sbuf[0].paddr));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
FIELD_PREP(
@@ -986,13 +987,13 @@ void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
0));
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
2 * tot_link_desc);
/* Enable the SRNG */
- ath11k_ahb_write32(ab,
+ ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40);
}
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 7722822a0456..780a3e11b609 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -599,7 +599,7 @@ struct hal_srng {
/* Interrupt mitigation - timer threshold in us */
#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
-#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256
/* HW SRNG configuration table */
struct hal_srng_config {
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index 5e200380cca4..8a592814efa0 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -2,6 +2,8 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
+#include "core.h"
+
#ifndef ATH11K_HAL_DESC_H
#define ATH11K_HAL_DESC_H
@@ -477,7 +479,7 @@ enum hal_tlv_tag {
struct hal_tlv_hdr {
u32 tl;
- u8 value[0];
+ u8 value[];
} __packed;
#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
@@ -1972,7 +1974,7 @@ struct hal_rx_reo_queue {
u32 processed_total_bytes;
u32 info5;
u32 rsvd[3];
- struct hal_rx_reo_queue_ext ext_desc[0];
+ struct hal_rx_reo_queue_ext ext_desc[];
} __packed;
/* hal_rx_reo_queue
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index f277c9434a25..129c9e1efeb9 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -3,12 +3,12 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
-#include "ahb.h"
#include "debug.h"
#include "hal.h"
#include "hal_tx.h"
#include "hal_rx.h"
#include "hal_desc.h"
+#include "hif.h"
static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
u8 owner, u8 buffer_type, u32 magic)
@@ -804,34 +804,34 @@ void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map)
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 val;
- val = ath11k_ahb_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
HAL_SRNG_RING_ID_REO2SW1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
- ath11k_ahb_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
- ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0,
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1,
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2,
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3,
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
ring_hash_map));
- ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
ring_hash_map));
- ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
ring_hash_map));
- ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
ring_hash_map));
}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index e863e4abfcc1..c436191ae1e8 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -23,7 +23,7 @@ struct hal_rx_wbm_rel_info {
struct hal_rx_mon_status_tlv_hdr {
u32 hdr;
- u8 value[0];
+ u8 value[];
};
enum hal_rx_su_mu_coding {
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
index e4aa7e8a1284..81937c29ffca 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -3,9 +3,10 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
-#include "ahb.h"
+#include "hal_desc.h"
#include "hal.h"
#include "hal_tx.h"
+#include "hif.h"
#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
@@ -83,11 +84,11 @@ void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
u32 value;
int cnt = 0;
- ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG);
/* Enable read/write access */
ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
- ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
@@ -118,15 +119,15 @@ void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
}
for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
- ath11k_ahb_write32(ab, addr, *(u32 *)&hw_map_val[i]);
+ ath11k_hif_write32(ab, addr, *(u32 *)&hw_map_val[i]);
addr += 4;
}
/* Disable read/write access */
- ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG);
ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
- ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG,
ctrl_reg_val);
}
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
index ce48a61bfb66..d4760a20fdac 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -7,6 +7,7 @@
#define ATH11K_HAL_TX_H
#include "hal_desc.h"
+#include "core.h"
#define HAL_TX_ADDRX_EN 1
#define HAL_TX_ADDRY_EN 2
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
new file mode 100644
index 000000000000..165f7e51c238
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+
+struct ath11k_hif_ops {
+ u32 (*read32)(struct ath11k_base *sc, u32 address);
+ void (*write32)(struct ath11k_base *sc, u32 address, u32 data);
+ void (*irq_enable)(struct ath11k_base *sc);
+ void (*irq_disable)(struct ath11k_base *sc);
+ int (*start)(struct ath11k_base *sc);
+ void (*stop)(struct ath11k_base *sc);
+ int (*power_up)(struct ath11k_base *sc);
+ void (*power_down)(struct ath11k_base *sc);
+ int (*map_service_to_pipe)(struct ath11k_base *sc, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+};
+
+static inline int ath11k_hif_start(struct ath11k_base *sc)
+{
+ return sc->hif.ops->start(sc);
+}
+
+static inline void ath11k_hif_stop(struct ath11k_base *sc)
+{
+ sc->hif.ops->stop(sc);
+}
+
+static inline void ath11k_hif_irq_enable(struct ath11k_base *sc)
+{
+ sc->hif.ops->irq_enable(sc);
+}
+
+static inline void ath11k_hif_irq_disable(struct ath11k_base *sc)
+{
+ sc->hif.ops->irq_disable(sc);
+}
+
+static inline int ath11k_hif_power_up(struct ath11k_base *sc)
+{
+ return sc->hif.ops->power_up(sc);
+}
+
+static inline void ath11k_hif_power_down(struct ath11k_base *sc)
+{
+ sc->hif.ops->power_down(sc);
+}
+
+static inline u32 ath11k_hif_read32(struct ath11k_base *sc, u32 address)
+{
+ return sc->hif.ops->read32(sc, address);
+}
+
+static inline void ath11k_hif_write32(struct ath11k_base *sc, u32 address, u32 data)
+{
+ sc->hif.ops->write32(sc, address, data);
+}
+
+static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ return sc->hif.ops->map_service_to_pipe(sc, service_id, ul_pipe, dl_pipe);
+}
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index 8f54f58b83e6..ad13c648b679 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -5,8 +5,8 @@
#include <linux/skbuff.h>
#include <linux/ctype.h>
-#include "ahb.h"
#include "debug.h"
+#include "hif.h"
struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size)
{
@@ -672,7 +672,7 @@ setup:
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
- status = ath11k_ahb_map_service_to_pipe(htc->ab,
+ status = ath11k_hif_map_service_to_pipe(htc->ab,
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id);
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 9973477ae373..dc4434aefbbe 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -99,6 +99,11 @@ enum ath11k_hw_rate_ofdm {
ATH11K_HW_RATE_OFDM_9M,
};
+enum ath11k_bus {
+ ATH11K_BUS_AHB,
+ ATH11K_BUS_PCI,
+};
+
struct ath11k_hw_params {
const char *name;
struct {
@@ -111,7 +116,7 @@ struct ath11k_hw_params {
struct ath11k_fw_ie {
__le32 id;
__le32 len;
- u8 data[0];
+ u8 data[];
};
enum ath11k_bd_ie_board_type {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 9f8bc19cc5ae..2836a0f197ab 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -33,6 +33,12 @@
.max_power = 30, \
}
+/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
+static unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
+module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
+MODULE_PARM_DESC(frame_mode,
+ "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
+
static const struct ieee80211_channel ath11k_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
@@ -1142,6 +1148,10 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
+ if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
+ IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
/* TODO: Check */
arg->tx_max_mcs_nss = 0xFF;
@@ -1168,8 +1178,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
sizeof(arg->peer_he_cap_macinfo));
memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
sizeof(arg->peer_he_cap_phyinfo));
- memcpy(&arg->peer_he_ops, &vif->bss_conf.he_operation,
- sizeof(arg->peer_he_ops));
+ arg->peer_he_ops = vif->bss_conf.he_oper.params;
/* the top most byte is used to indicate BSS color info */
arg->peer_he_ops &= 0xffffff;
@@ -3553,7 +3562,7 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar,
memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
sizeof(he_cap_elem->phy_cap_info));
- he_cap_elem->mac_cap_info[1] |=
+ he_cap_elem->mac_cap_info[1] &=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
he_cap_elem->phy_cap_info[4] &=
~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK;
@@ -3569,6 +3578,8 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar,
switch (i) {
case NL80211_IFTYPE_AP:
+ he_cap_elem->phy_cap_info[3] &=
+ ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
break;
@@ -3682,10 +3693,10 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
{
- struct ath11k *ar = ctx;
- struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu = skb;
struct ieee80211_tx_info *info;
+ struct ath11k *ar = ctx;
+ struct ath11k_base *ab = ar->ab;
spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id);
@@ -3725,6 +3736,7 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info;
dma_addr_t paddr;
int buf_id;
int ret;
@@ -3736,11 +3748,14 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
if (buf_id < 0)
return -ENOSPC;
- if ((ieee80211_is_action(hdr->frame_control) ||
- ieee80211_is_deauth(hdr->frame_control) ||
- ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control)) {
- skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ info = IEEE80211_SKB_CB(skb);
+ if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)) {
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ }
}
paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
@@ -3789,15 +3804,30 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
info = IEEE80211_SKB_CB(skb);
- arvif = ath11k_vif_to_arvif(info->control.vif);
-
- ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
- if (ret) {
- ath11k_warn(ar->ab, "failed to transmit management frame %d\n",
- ret);
+ if (!info->control.vif) {
+ ath11k_warn(ar->ab, "no vif found for mgmt frame, flags 0x%x\n",
+ info->control.flags);
ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+
+ arvif = ath11k_vif_to_arvif(info->control.vif);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
+ arvif->is_started) {
+ ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
+ arvif->vdev_id, ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ atomic_inc(&ar->num_pending_mgmt_tx);
+ }
} else {
- atomic_inc(&ar->num_pending_mgmt_tx);
+ ath11k_warn(ar->ab,
+ "dropping mgmt frame for vdev %d, flags 0x%x is_started %d\n",
+ arvif->vdev_id, info->control.flags,
+ arvif->is_started);
+ ieee80211_free_txskb(ar->hw, skb);
}
}
}
@@ -3837,6 +3867,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct ath11k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
@@ -3845,7 +3876,9 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
bool is_prb_rsp;
int ret;
- if (ieee80211_is_mgmt(hdr->frame_control)) {
+ if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
+ skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
@@ -3877,8 +3910,10 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
- if (enable)
+ if (enable) {
tlv_filter = ath11k_mac_mon_status_filter_default;
+ tlv_filter.rx_filter = ath11k_debug_rx_filter(ar);
+ }
ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
@@ -4124,6 +4159,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct vdev_create_params vdev_param = {0};
struct peer_create_params peer_param;
u32 param_id, param_value;
+ int hw_encap = 0;
u16 nss;
int i;
int ret;
@@ -4208,6 +4244,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
ar->num_created_vdevs++;
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
@@ -4216,7 +4254,22 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
- param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET)
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_AP:
+ hw_encap = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (ieee80211_set_hw_80211_encap(vif, hw_encap))
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
@@ -4378,6 +4431,8 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
ar->num_created_vdevs--;
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << (arvif->vdev_id);
@@ -4643,6 +4698,8 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
}
ar->num_started_vdevs++;
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
/* Enable CAC Flag in the driver by checking the channel DFS cac time,
* i.e dfs_cac_ms value which will be valid only for radar channels
@@ -4701,6 +4758,8 @@ static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
WARN_ON(ar->num_started_vdevs == 0);
ar->num_started_vdevs--;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
@@ -5891,6 +5950,9 @@ int ath11k_mac_register(struct ath11k_base *ab)
int i;
int ret;
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index f43deacc01bd..297172538620 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -17,7 +17,26 @@ struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
list_for_each_entry(peer, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
- if (memcmp(peer->addr, addr, ETH_ALEN))
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+static struct ath11k_peer *ath11k_peer_find_by_pdev_idx(struct ath11k_base *ab,
+ u8 pdev_idx, const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->pdev_idx != pdev_idx)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
@@ -34,7 +53,7 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
- if (memcmp(peer->addr, addr, ETH_ALEN))
+ if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
@@ -200,6 +219,17 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOBUFS;
}
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr);
+ if (peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_info(ar->ab,
+ "ignoring the peer %pM creation on same pdev idx %d\n",
+ param->peer_addr, ar->pdev_idx);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
ret = ath11k_wmi_send_peer_create_cmd(ar, param);
if (ret) {
ath11k_warn(ar->ab,
@@ -225,6 +255,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOENT;
}
+ peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
arvif->ast_hash = peer->ast_hash;
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index ccca1523a6ea..5d125ce8984e 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -13,6 +13,7 @@ struct ath11k_peer {
u8 addr[ETH_ALEN];
int peer_id;
u16 ast_hash;
+ u8 pdev_idx;
/* protected by ab->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index 259dddbda2c7..5a7e150c621b 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -174,9 +174,12 @@ int ath11k_thermal_register(struct ath11k_base *sc)
if (IS_ERR(cdev)) {
ath11k_err(sc, "failed to setup thermal device result: %ld\n",
PTR_ERR(cdev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_thermal_destroy;
}
+ ar->thermal.cdev = cdev;
+
ret = sysfs_create_link(&ar->hw->wiphy->dev.kobj, &cdev->device.kobj,
"cooling_device");
if (ret) {
@@ -184,7 +187,6 @@ int ath11k_thermal_register(struct ath11k_base *sc)
goto err_thermal_destroy;
}
- ar->thermal.cdev = cdev;
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index 8700a622be7b..66d0aae7816c 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -21,14 +21,16 @@ static inline void trace_ ## name(proto) {}
#define TRACE_SYSTEM ath11k
TRACE_EVENT(ath11k_htt_pktlog,
- TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len),
+ TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len,
+ u32 pktlog_checksum),
- TP_ARGS(ar, buf, buf_len),
+ TP_ARGS(ar, buf, buf_len, pktlog_checksum),
TP_STRUCT__entry(
__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__field(u16, buf_len)
+ __field(u32, pktlog_checksum)
__dynamic_array(u8, pktlog, buf_len)
),
@@ -36,14 +38,16 @@ TRACE_EVENT(ath11k_htt_pktlog,
__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
__entry->buf_len = buf_len;
+ __entry->pktlog_checksum = pktlog_checksum;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
),
TP_printk(
- "%s %s size %hu",
+ "%s %s size %hu pktlog_checksum %d",
__get_str(driver),
__get_str(device),
- __entry->buf_len
+ __entry->buf_len,
+ __entry->pktlog_checksum
)
);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index e7ce36966d6a..c2a972377687 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -87,8 +87,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
= { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
- [WMI_TAG_READY_EVENT]
- = {.min_len = sizeof(struct wmi_ready_event) },
+ [WMI_TAG_READY_EVENT] = {
+ .min_len = sizeof(struct wmi_ready_event_min) },
[WMI_TAG_SERVICE_AVAILABLE_EVENT]
= {.min_len = sizeof(struct wmi_service_available_event) },
[WMI_TAG_PEER_ASSOC_CONF_EVENT]
@@ -2368,6 +2368,146 @@ int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->initiator = initiator;
+ cmd->reasoncode = reason;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DELBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->statuscode = status;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->buffersize = buf_size;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
@@ -2779,7 +2919,7 @@ int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
if (ret) {
- ath11k_warn(ab, "Failed to send WMI_TWT_DIeABLE_CMDID");
+ ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
dev_kfree_skb(skb);
}
return ret;
@@ -3105,7 +3245,7 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
config.beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
config.rx_batchmode = TARGET_RX_BATCHMODE;
config.peer_map_unmap_v2_support = 1;
- config.twt_ap_pdev_count = 2;
+ config.twt_ap_pdev_count = ab->num_radios;
config.twt_ap_sta_count = 1000;
memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
@@ -3740,8 +3880,9 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
ieee80211_tx_status_irqsafe(ar->hw, msdu);
- WARN_ON_ONCE(atomic_read(&ar->num_pending_mgmt_tx) == 0);
- atomic_dec(&ar->num_pending_mgmt_tx);
+ /* WARN when we received this event without doing any mgmt tx */
+ if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
+ WARN_ON_ONCE(1);
return 0;
}
@@ -4851,7 +4992,7 @@ static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_rdy_parse *rdy_parse = data;
- struct wmi_ready_event *fixed_param;
+ struct wmi_ready_event fixed_param;
struct wmi_mac_addr *addr_list;
struct ath11k_pdev *pdev;
u32 num_mac_addr;
@@ -4859,11 +5000,16 @@ static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
switch (tag) {
case WMI_TAG_READY_EVENT:
- fixed_param = (struct wmi_ready_event *)ptr;
- ab->wlan_init_status = fixed_param->status;
- rdy_parse->num_extra_mac_addr = fixed_param->num_extra_mac_addr;
-
- ether_addr_copy(ab->mac_addr, fixed_param->mac_addr.addr);
+ memset(&fixed_param, 0, sizeof(fixed_param));
+ memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
+ min_t(u16, sizeof(fixed_param), len));
+ ab->wlan_init_status = fixed_param.ready_event_min.status;
+ rdy_parse->num_extra_mac_addr =
+ fixed_param.ready_event_min.num_extra_mac_addr;
+
+ ether_addr_copy(ab->mac_addr,
+ fixed_param.ready_event_min.mac_addr.addr);
+ ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
ab->wmi_ready = true;
break;
case WMI_TAG_ARRAY_FIXED_STRUCT:
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 510f9c6bc1d7..b9f3e559ced7 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -39,7 +39,7 @@ struct wmi_cmd_hdr {
struct wmi_tlv {
u32 header;
- u8 value[0];
+ u8 value[];
} __packed;
#define WMI_TLV_LEN GENMASK(15, 0)
@@ -1976,6 +1976,43 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
+ WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180,
+ WMI_TLV_SERVICE_FETCH_TX_PN = 181,
+ WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182,
+ WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183,
+ WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184,
+ WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185,
+ WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186,
+ WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187,
+ WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188,
+ WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189,
+ WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190,
+ WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191,
+ WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192,
+ WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193,
+ WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194,
+ WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195,
+ WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196,
+ WMI_TLV_SERVICE_VOW_ENABLE = 197,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198,
+ WMI_TLV_SERVICE_BROADCAST_TWT = 199,
+ WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200,
+ WMI_TLV_SERVICE_PS_TDCC = 201,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY = 202,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203,
+ WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204,
+ WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205,
+ WMI_TLV_SERVICE_WPA3_FT_FILS = 206,
+ WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207,
+ WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208,
+ WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209,
+ WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210,
+ WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
+ WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
+ WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_MAX_EXT_SERVICE
@@ -2345,7 +2382,7 @@ struct wmi_mac_addr {
} __packed;
} __packed;
-struct wmi_ready_event {
+struct wmi_ready_event_min {
struct wmi_abi_version fw_abi_vers;
struct wmi_mac_addr mac_addr;
u32 status;
@@ -2355,6 +2392,12 @@ struct wmi_ready_event {
u32 num_extra_peers;
} __packed;
+struct wmi_ready_event {
+ struct wmi_ready_event_min ready_event_min;
+ u32 max_ast_index;
+ u32 pktlog_defs_checksum;
+} __packed;
+
struct wmi_service_available_event {
u32 wmi_service_segment_offset;
u32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
@@ -3649,6 +3692,37 @@ struct wmi_therm_throt_level_config_info {
u32 prio;
} __packed;
+struct wmi_delba_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 initiator;
+ u32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 statuscode;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 buffersize;
+} __packed;
+
+struct wmi_addba_clear_resp_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
struct wmi_pdev_pktlog_filter_info {
u32 tlv_header;
struct wmi_mac_addr peer_macaddr;
@@ -4531,6 +4605,9 @@ enum wmi_sta_ps_param_rx_wake_policy {
WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
};
+/* Do not change existing values! Used by ath11k_frame_mode parameter
+ * module parameter.
+ */
enum ath11k_hw_txrx_mode {
ATH11K_HW_TXRX_RAW = 0,
ATH11K_HW_TXRX_NATIVE_WIFI = 1,
@@ -4822,6 +4899,13 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
struct scan_chan_list_params *chan_list);
int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
u32 pdev_id);
+int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac);
+int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size);
+int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status);
+int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason);
int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
u32 vdev_id, u32 bcn_ctrl_op);
int
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 0624333f5430..850c608b43a3 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -501,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
/* too many PHY errors - we have to raise immunity */
- bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
+ bool ofdm_flag = as->ofdm_errors > ofdm_high;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
ath5k_ani_period_restart(as);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 37cf602d8adf..67f8f2aa7a53 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3249,22 +3249,19 @@ static int ath6kl_get_antenna(struct wiphy *wiphy,
return 0;
}
-static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- u16 frame_type, bool reg)
+static void ath6kl_update_mgmt_frame_registrations(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct mgmt_frame_regs *upd)
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
- __func__, frame_type, reg);
- if (frame_type == IEEE80211_STYPE_PROBE_REQ) {
- /*
- * Note: This notification callback is not allowed to sleep, so
- * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
- * hardcode target to report Probe Request frames all the time.
- */
- vif->probe_req_report = reg;
- }
+ /*
+ * FIXME: send WMI_PROBE_REQ_REPORT_CMD here instead of hardcoding
+ * the reporting in the target all the time, this callback
+ * *is* allowed to sleep after all.
+ */
+ vif->probe_req_report =
+ upd->interface_stypes & BIT(IEEE80211_STYPE_PROBE_REQ >> 4);
}
static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
@@ -3464,7 +3461,8 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.remain_on_channel = ath6kl_remain_on_channel,
.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
.mgmt_tx = ath6kl_mgmt_tx,
- .mgmt_frame_register = ath6kl_mgmt_frame_register,
+ .update_mgmt_frame_registrations =
+ ath6kl_update_mgmt_frame_registrations,
.get_antenna = ath6kl_get_antenna,
.sched_scan_start = ath6kl_cfg80211_sscan_start,
.sched_scan_stop = ath6kl_cfg80211_sscan_stop,
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 0d30e762c090..77e052336eb5 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -160,7 +160,7 @@ enum ath6kl_fw_capability {
struct ath6kl_fw_ie {
__le32 id;
__le32 len;
- u8 data[0];
+ u8 data[];
};
enum ath6kl_hw_flags {
@@ -406,7 +406,7 @@ struct ath6kl_mgmt_buff {
u32 id;
bool no_cck;
size_t len;
- u8 buf[0];
+ u8 buf[];
};
struct ath6kl_sta {
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 54337d60f288..7506cea46f58 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -30,7 +30,7 @@ struct ath6kl_fwlog_slot {
__le32 length;
/* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */
- u8 payload[0];
+ u8 payload[];
};
#define ATH6KL_FWLOG_MAX_ENTRIES 20
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index dc6bd8cd9b83..f9d3f3a5edfe 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -35,12 +35,6 @@
#define MAX_SCATTER_ENTRIES_PER_REQ 16
#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
-#define MANUFACTURER_ID_AR6003_BASE 0x300
-#define MANUFACTURER_ID_AR6004_BASE 0x400
- /* SDIO manufacturer ID and Codes */
-#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
-#define MANUFACTURER_CODE 0x271 /* Atheros */
-
/* Mailbox address in SDIO address space */
#define HIF_MBOX_BASE_ADDR 0x800
#define HIF_MBOX_WIDTH 0x800
@@ -199,7 +193,7 @@ struct hif_scatter_req {
u32 scat_q_depth;
- struct hif_scatter_item scat_list[0];
+ struct hif_scatter_item scat_list[];
};
struct ath6kl_irq_proc_registers {
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index bb50680580f3..6b51a2dceadc 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -799,8 +799,7 @@ static int ath6kl_sdio_config(struct ath6kl *ar)
sdio_claim_host(func);
- if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
- MANUFACTURER_ID_AR6003_BASE) {
+ if (ar_sdio->id->device >= SDIO_DEVICE_ID_ATHEROS_AR6003_00) {
/* enable 4-bit ASYNC interrupt on AR6003 or later */
ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
CCCR_SDIO_IRQ_MODE_REG,
@@ -1409,13 +1408,13 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
}
static const struct sdio_device_id ath6kl_sdio_devices[] = {
- {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
- {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
- {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
- {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
- {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
- {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
- {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x19))},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_00)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_01)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_00)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_01)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_02)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_18)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_19)},
{},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index fd9db8ca99d7..fd53b5f9e9b5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -19,6 +19,8 @@
#include "ar9002_phy.h"
#define AR9285_CLCAL_REDO_THRESH 1
+/* AGC & I/Q calibrations time limit, ms */
+#define AR9002_CAL_MAX_TIME 30000
enum ar9002_cal_types {
ADC_GAIN_CAL = BIT(0),
@@ -37,9 +39,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
break;
case ADC_GAIN_CAL:
case ADC_DC_CAL:
- /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
- if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
- IS_CHAN_HT20(chan)))
+ /* Run even/odd ADCs calibrations for HT40 channels only */
+ if (IS_CHAN_HT40(chan))
supported = true;
break;
}
@@ -105,6 +106,14 @@ static bool ar9002_hw_per_calibration(struct ath_hw *ah,
} else {
ar9002_hw_setup_calibration(ah, currCal);
}
+ } else if (time_after(jiffies, ah->cal_start_time +
+ msecs_to_jiffies(AR9002_CAL_MAX_TIME))) {
+ REG_CLR_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_DO_CAL);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "calibration timeout\n");
+ currCal->calState = CAL_WAITING; /* Try later */
+ iscaldone = true;
}
} else if (!(caldata->CalValid & currCal->calData->calType)) {
ath9k_hw_reset_calibration(ah, currCal);
@@ -664,8 +673,13 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
int ret;
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
- if (ah->caldata)
+ if (ah->caldata) {
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
+ if (longcal) /* Remember to not miss */
+ set_bit(LONGCAL_PENDING, &ah->caldata->cal_flags);
+ else if (test_bit(LONGCAL_PENDING, &ah->caldata->cal_flags))
+ longcal = true; /* Respin a previous one */
+ }
percal_pending = (currCal &&
(currCal->calState == CAL_RUNNING ||
@@ -675,9 +689,24 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ar9002_hw_per_calibration(ah, chan, rxchainmask, currCal))
return 0;
- ah->cal_list_curr = currCal = currCal->calNext;
- if (currCal->calState == CAL_WAITING)
- ath9k_hw_reset_calibration(ah, currCal);
+ /* Looking for next waiting calibration if any */
+ for (currCal = currCal->calNext; currCal != ah->cal_list_curr;
+ currCal = currCal->calNext) {
+ if (currCal->calState == CAL_WAITING)
+ break;
+ }
+ if (currCal->calState == CAL_WAITING) {
+ percal_pending = true;
+ ah->cal_list_curr = currCal;
+ } else {
+ percal_pending = false;
+ ah->cal_list_curr = ah->cal_list;
+ }
+ }
+
+ /* Do not start a next calibration if the longcal is in action */
+ if (percal_pending && !nfcal && !longcal) {
+ ath9k_hw_reset_calibration(ah, currCal);
return 0;
}
@@ -701,6 +730,9 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
}
if (longcal) {
+ if (ah->caldata)
+ clear_bit(LONGCAL_PENDING,
+ &ah->caldata->cal_flags);
ath9k_hw_start_nfcal(ah, false);
/* Do periodic PAOffset Cal */
ar9002_hw_pa_cal(ah, false);
@@ -858,9 +890,6 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
- if (ah->caldata)
- set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
-
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
/* Enable IQ, ADC Gain and ADC DC offset CALs */
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 695c779ae8cf..0422a33395b7 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -176,6 +176,7 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
ath9k_hw_setup_calibration(ah, currCal);
+ ah->cal_start_time = jiffies;
currCal->calState = CAL_RUNNING;
for (i = 0; i < AR5416_MAX_CHAINS; i++) {
@@ -209,14 +210,17 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
}
- if (!(ah->supp_cals & currCal->calData->calType))
- return true;
+ currCal = ah->cal_list;
+ do {
+ ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
+ currCal->calData->calType,
+ ah->curchan->chan->center_freq);
- ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
- currCal->calData->calType, ah->curchan->chan->center_freq);
+ ah->caldata->CalValid &= ~currCal->calData->calType;
+ currCal->calState = CAL_WAITING;
- ah->caldata->CalValid &= ~currCal->calData->calType;
- currCal->calState = CAL_WAITING;
+ currCal = currCal->calNext;
+ } while (currCal != ah->cal_list);
return false;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index dd0c32379375..4ed21dad6a8e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -612,6 +612,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
hif_dev->remain_skb = nskb;
spin_unlock(&hif_dev->rx_lock);
} else {
+ if (pool_index == MAX_PKT_NUM_IN_TRANSFER) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: over RX MAX_PKT_NUM\n");
+ goto err;
+ }
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
@@ -638,9 +643,9 @@ err:
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
- struct sk_buff *skb = (struct sk_buff *) urb->context;
- struct hif_device_usb *hif_dev =
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
+ struct sk_buff *skb = rx_buf->skb;
int ret;
if (!skb)
@@ -680,14 +685,15 @@ resubmit:
return;
free:
kfree_skb(skb);
+ kfree(rx_buf);
}
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
- struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
+ struct sk_buff *skb = rx_buf->skb;
struct sk_buff *nskb;
- struct hif_device_usb *hif_dev =
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
@@ -745,6 +751,7 @@ resubmit:
return;
free:
kfree_skb(skb);
+ kfree(rx_buf);
urb->context = NULL;
}
@@ -790,7 +797,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
init_usb_anchor(&hif_dev->mgmt_submitted);
for (i = 0; i < MAX_TX_URB_NUM; i++) {
- tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
+ tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
if (!tx_buf)
goto err;
@@ -827,8 +834,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
- struct urb *urb = NULL;
+ struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
+ struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->rx_submitted);
@@ -836,6 +844,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
for (i = 0; i < MAX_RX_URB_NUM; i++) {
+ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
+ if (!rx_buf) {
+ ret = -ENOMEM;
+ goto err_rxb;
+ }
+
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@@ -850,11 +864,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
+ rx_buf->hif_dev = hif_dev;
+ rx_buf->skb = skb;
+
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
- ath9k_hif_usb_rx_cb, skb);
+ ath9k_hif_usb_rx_cb, rx_buf);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->rx_submitted);
@@ -880,6 +897,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
+ kfree(rx_buf);
+err_rxb:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
return ret;
}
@@ -891,14 +910,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
{
- struct urb *urb = NULL;
+ struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
+ struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->reg_in_submitted);
for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
+ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
+ if (!rx_buf) {
+ ret = -ENOMEM;
+ goto err_rxb;
+ }
+
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@@ -913,11 +939,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
+ rx_buf->hif_dev = hif_dev;
+ rx_buf->skb = skb;
+
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, skb, 1);
+ ath9k_hif_usb_reg_in_cb, rx_buf, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@@ -943,6 +972,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
+ kfree(rx_buf);
+err_rxb:
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
return ret;
}
@@ -973,7 +1004,7 @@ err:
return -ENOMEM;
}
-static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
+void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->regout_submitted);
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
@@ -1341,8 +1372,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
- ath9k_htc_hw_free(hif_dev->htc_handle);
ath9k_hif_usb_dev_deinit(hif_dev);
+ ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
}
usb_set_intfdata(interface, NULL);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 7846916aa01d..5985aa15ca93 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -86,6 +86,11 @@ struct tx_buf {
struct list_head list;
};
+struct rx_buf {
+ struct sk_buff *skb;
+ struct hif_device_usb *hif_dev;
+};
+
#define HIF_USB_TX_STOP BIT(0)
#define HIF_USB_TX_FLUSH BIT(1)
@@ -133,5 +138,6 @@ struct hif_device_usb {
int ath9k_hif_usb_init(void);
void ath9k_hif_usb_exit(void);
+void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev);
#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d961095ab01f..1d6ad8d46607 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -780,6 +780,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
}
static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
@@ -931,8 +933,9 @@ err_init:
int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
u16 devid, char *product, u32 drv_info)
{
- struct ieee80211_hw *hw;
+ struct hif_device_usb *hif_dev;
struct ath9k_htc_priv *priv;
+ struct ieee80211_hw *hw;
int ret;
hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
@@ -967,7 +970,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
return 0;
err_init:
- ath9k_deinit_wmi(priv);
+ ath9k_stop_wmi(priv);
+ hif_dev = (struct hif_device_usb *)htc_handle->hif_dev;
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ ath9k_destoy_wmi(priv);
err_free:
ieee80211_free_hw(hw);
return ret;
@@ -982,7 +988,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED;
ath9k_deinit_device(htc_handle->drv_priv);
- ath9k_deinit_wmi(htc_handle->drv_priv);
+ ath9k_stop_wmi(htc_handle->drv_priv);
ieee80211_free_hw(htc_handle->drv_priv->hw);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 791f6633667c..2b7832b1c800 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1251,6 +1251,7 @@ out:
FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_PROBE_REQ | \
+ FIF_MCAST_ACTION | \
FIF_FCSFAIL)
static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 9cec5c216e1f..b353995bdd45 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -893,7 +893,8 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
if (priv->rxfilter & FIF_PSPOLL)
rfilt |= ATH9K_RX_FILTER_PSPOLL;
- if (priv->nvifs > 1 || priv->rxfilter & FIF_OTHER_BSS)
+ if (priv->nvifs > 1 ||
+ priv->rxfilter & (FIF_OTHER_BSS | FIF_MCAST_ACTION))
rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
return rfilt;
@@ -999,9 +1000,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* which are not PHY_ERROR (short radar pulses have a length of 3)
*/
if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
- ath_warn(common,
- "Short RX data len, dropping (dlen: %d)\n",
- rs_datalen);
+ ath_dbg(common, ANY,
+ "Short RX data len, dropping (dlen: %d)\n",
+ rs_datalen);
goto rx_next;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index d091c8ebdcf0..d2e062eaf561 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target,
if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
epid = svc_rspmsg->endpoint_id;
+ if (epid < 0 || epid >= ENDPOINT_MAX)
+ return;
+
service_id = be16_to_cpu(svc_rspmsg->service_id);
max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
endpoint = &target->endpoint[epid];
@@ -170,7 +173,6 @@ static int htc_config_pipe_credits(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC credit config timeout\n");
- kfree_skb(skb);
return -ETIMEDOUT;
}
@@ -206,7 +208,6 @@ static int htc_setup_complete(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC start timeout\n");
- kfree_skb(skb);
return -ETIMEDOUT;
}
@@ -279,7 +280,6 @@ int htc_connect_service(struct htc_target *target,
if (!time_left) {
dev_err(target->dev, "Service connection timeout for: %d\n",
service_connreq->service_id);
- kfree_skb(skb);
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2e4489700a85..023599e10dd5 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -427,6 +427,7 @@ enum ath9k_cal_flags {
TXIQCAL_DONE,
TXCLCAL_DONE,
SW_PKDET_DONE,
+ LONGCAL_PENDING,
};
struct ath9k_hw_cal_data {
@@ -833,6 +834,7 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
+ unsigned long cal_start_time;
struct ath9k_cal_list iq_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 17c318902cb8..289a2444d534 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -1012,6 +1012,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
}
int ath9k_init_device(u16 devid, struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 457e9b0d21ca..a47f6e978095 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1476,6 +1476,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_PROBE_REQ | \
+ FIF_MCAST_ACTION | \
FIF_FCSFAIL)
/* FIXME: sc->sc_full_reset ? */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 06e660858766..0c0624a3b40d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -413,7 +413,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
- if (sc->cur_chan->nvifs > 1 || (sc->cur_chan->rxfilter & FIF_OTHER_BSS)) {
+ if (sc->cur_chan->nvifs > 1 ||
+ (sc->cur_chan->rxfilter & (FIF_OTHER_BSS | FIF_MCAST_ACTION))) {
/* This is needed for older chips */
if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
rfilt |= ATH9K_RX_FILTER_PROM;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index cdc146091194..e7a3127395be 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
return wmi;
}
-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
+void ath9k_stop_wmi(struct ath9k_htc_priv *priv)
{
struct wmi *wmi = priv->wmi;
mutex_lock(&wmi->op_mutex);
wmi->stopped = true;
mutex_unlock(&wmi->op_mutex);
+}
+void ath9k_destoy_wmi(struct ath9k_htc_priv *priv)
+{
kfree(priv->wmi);
}
@@ -336,7 +339,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
- kfree_skb(skb);
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 380175d5ecd7..d8b912206232 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -179,7 +179,6 @@ struct wmi {
};
struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
enum htc_endpoint_id *wmi_ctrl_epid);
int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
@@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
void ath9k_wmi_event_tasklet(unsigned long data);
void ath9k_fatal_work(struct work_struct *work);
void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
+void ath9k_stop_wmi(struct ath9k_htc_priv *priv);
+void ath9k_destoy_wmi(struct ath9k_htc_priv *priv);
#define WMI_CMD(_wmi_cmd) \
do { \
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 51934d191f33..1ab09e1c9ec5 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -338,9 +338,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
if (SUPP(CARL9170FW_WLANTX_CAB)) {
- if_comb_types |=
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ if_comb_types |= BIT(NL80211_IFTYPE_AP);
#ifdef CONFIG_MAC80211_MESH
if_comb_types |=
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index ea1d80f9a50e..56999a3b9d3b 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -127,7 +127,7 @@ struct carl9170_write_reg {
struct carl9170_write_reg_byte {
__le32 addr;
__le32 count;
- u8 val[0];
+ u8 val[];
} __packed;
#define CARL9170FW_PHY_HT_ENABLE 0x4
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index 08e0ae9c5836..555ad4975970 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -851,7 +851,7 @@ struct ar9170_stream {
__le16 length;
__le16 tag;
- u8 payload[0];
+ u8 payload[];
} __packed __aligned(4);
#define AR9170_STREAM_LEN 4
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 5914926a5c5b..816929fb5b14 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar,
ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
(vif->type != NL80211_IFTYPE_AP));
- /* While the driver supports HW offload in a single
- * P2P client configuration, it doesn't support HW
- * offload in the favourit, concurrent P2P GO+CLIENT
- * configuration. Hence, HW offload will always be
- * disabled for P2P.
+ /* The driver used to have P2P GO+CLIENT support,
+ * but since this was dropped and we don't know if
+ * there are any gremlins lurking in the shadows,
+ * so best we keep HW offload disabled for P2P.
*/
ar->disable_offload |= vif->p2p;
@@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_STATION)
break;
- /* P2P GO [master] use-case
- * Because the P2P GO station is selected dynamically
- * by all participating peers of a WIFI Direct network,
- * the driver has be able to change the main interface
- * operating mode on the fly.
- */
- if (main_vif->p2p && vif->p2p &&
- vif->type == NL80211_IFTYPE_AP) {
- old_main = main_vif;
- break;
- }
-
err = -EBUSY;
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 6ba0fd57c951..aab5a58616fc 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2240,7 +2240,7 @@ struct wcn36xx_hal_process_ptt_msg_req_msg {
struct wcn36xx_hal_msg_header header;
/* Actual FTM Command body */
- u8 ptt_msg[0];
+ u8 ptt_msg[];
} __packed;
struct wcn36xx_hal_process_ptt_msg_rsp_msg {
@@ -2249,7 +2249,7 @@ struct wcn36xx_hal_process_ptt_msg_rsp_msg {
/* FTM Command response status */
u32 ptt_msg_resp_status;
/* Actual FTM Command body */
- u8 ptt_msg[0];
+ u8 ptt_msg[];
} __packed;
struct update_edca_params_req_msg {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e49c306e0eef..702b689c06df 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1339,7 +1339,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
if (addr && ret != ETH_ALEN) {
wcn36xx_err("invalid local-mac-address\n");
ret = -EINVAL;
- goto out_wq;
+ goto out_destroy_ept;
} else if (addr) {
wcn36xx_info("mac address: %pM\n", addr);
SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
@@ -1347,7 +1347,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
ret = wcn36xx_platform_get_resources(wcn, pdev);
if (ret)
- goto out_wq;
+ goto out_destroy_ept;
wcn36xx_init_ieee80211(wcn);
ret = ieee80211_register_hw(wcn->hw);
@@ -1359,6 +1359,8 @@ static int wcn36xx_probe(struct platform_device *pdev)
out_unmap:
iounmap(wcn->ccu_base);
iounmap(wcn->dxe_base);
+out_destroy_ept:
+ rpmsg_destroy_ept(wcn->smd_channel);
out_wq:
ieee80211_free_hw(hw);
out_err:
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.h b/drivers/net/wireless/ath/wcn36xx/testmode.h
index 4c6cfdb46580..09d68fab9add 100644
--- a/drivers/net/wireless/ath/wcn36xx/testmode.h
+++ b/drivers/net/wireless/ath/wcn36xx/testmode.h
@@ -20,7 +20,7 @@ struct ftm_rsp_msg {
u16 msg_id;
u16 msg_body_length;
u32 resp_status;
- u8 msg_response[0];
+ u8 msg_response[];
} __packed;
/* The request buffer of FTM which contains a byte of command and the request */
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index 540fa1607794..440614d61156 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -33,7 +33,7 @@ struct wil_fw_record_head {
*/
struct wil_fw_record_data { /* type == wil_fw_type_data */
__le32 addr;
- __le32 data[0]; /* [data_size], see above */
+ __le32 data[]; /* [data_size], see above */
} __packed;
/* fill with constant @value, @size bytes starting from @addr */
@@ -61,7 +61,7 @@ struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
/* identifies capabilities record */
struct wil_fw_record_comment_hdr hdr;
/* capabilities (variable size), see enum wmi_fw_capability */
- u8 capabilities[0];
+ u8 capabilities[];
} __packed;
/* FW VIF concurrency encoded inside a comment record
@@ -80,7 +80,7 @@ struct wil_fw_concurrency_combo {
u8 n_diff_channels; /* total number of different channels allowed */
u8 same_bi; /* for APs, 1 if all APs must have same BI */
/* keep last - concurrency limits, variable size by n_limits */
- struct wil_fw_concurrency_limit limits[0];
+ struct wil_fw_concurrency_limit limits[];
} __packed;
struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */
@@ -93,7 +93,7 @@ struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */
/* number of concurrency combinations that follow */
__le16 n_combos;
/* keep last - combinations, variable size by n_combos */
- struct wil_fw_concurrency_combo combos[0];
+ struct wil_fw_concurrency_combo combos[];
} __packed;
/* brd file info encoded inside a comment record */
@@ -108,7 +108,7 @@ struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */
/* identifies brd file record */
struct wil_fw_record_comment_hdr hdr;
__le32 version;
- struct brd_info brd_info[0];
+ struct brd_info brd_info[];
} __packed;
/* perform action
@@ -116,7 +116,7 @@ struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */
*/
struct wil_fw_record_action { /* type == wil_fw_type_action */
__le32 action; /* action to perform: reset, wait for fw ready etc. */
- __le32 data[0]; /* action specific, [data_size], see above */
+ __le32 data[]; /* action specific, [data_size], see above */
} __packed;
/* data block for struct wil_fw_record_direct_write */
@@ -179,7 +179,7 @@ struct wil_fw_record_gateway_data { /* type == wil_fw_type_gateway_data */
#define WIL_FW_GW_CTL_BUSY BIT(29) /* gateway busy performing operation */
#define WIL_FW_GW_CTL_RUN BIT(30) /* start gateway operation */
__le32 command;
- struct wil_fw_data_gw data[0]; /* total size [data_size], see above */
+ struct wil_fw_data_gw data[]; /* total size [data_size], see above */
} __packed;
/* 4-dword gateway */
@@ -201,7 +201,7 @@ struct wil_fw_record_gateway_data4 { /* type == wil_fw_type_gateway_data4 */
__le32 gateway_cmd_addr;
__le32 gateway_ctrl_address; /* same logic as for 1-dword gw */
__le32 command;
- struct wil_fw_data_gw4 data[0]; /* total size [data_size], see above */
+ struct wil_fw_data_gw4 data[]; /* total size [data_size], see above */
} __packed;
#endif /* __WIL_FW_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 23e1ed6a9d6d..c7136ce567ee 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -222,7 +222,7 @@ struct auth_no_hdr {
__le16 auth_transaction;
__le16 status_code;
/* possibly followed by Challenge text */
- u8 variable[0];
+ u8 variable[];
} __packed;
u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index e3558136e0c4..9affa4525609 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -474,7 +474,7 @@ struct wmi_start_scan_cmd {
struct {
u8 channel;
u8 reserved;
- } channel_list[0];
+ } channel_list[];
} __packed;
#define WMI_MAX_PNO_SSID_NUM (16)
@@ -530,7 +530,7 @@ struct wmi_update_ft_ies_cmd {
/* Length of the FT IEs */
__le16 ie_len;
u8 reserved[2];
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
/* WMI_SET_PROBED_SSID_CMDID */
@@ -575,7 +575,7 @@ struct wmi_set_appie_cmd {
u8 reserved;
/* Length of the IE to be added to MGMT frame */
__le16 ie_len;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
/* WMI_PXMT_RANGE_CFG_CMDID */
@@ -850,7 +850,7 @@ struct wmi_pcp_start_cmd {
struct wmi_sw_tx_req_cmd {
u8 dst_mac[WMI_MAC_LEN];
__le16 len;
- u8 payload[0];
+ u8 payload[];
} __packed;
/* WMI_SW_TX_REQ_EXT_CMDID */
@@ -861,7 +861,7 @@ struct wmi_sw_tx_req_ext_cmd {
/* Channel to use, 0xFF for currently active channel */
u8 channel;
u8 reserved[5];
- u8 payload[0];
+ u8 payload[];
} __packed;
/* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */
@@ -1423,7 +1423,7 @@ struct wmi_rf_xpm_write_cmd {
u8 verify;
u8 reserved1[3];
/* actual size=num_bytes */
- u8 data_bytes[0];
+ u8 data_bytes[];
} __packed;
/* Possible modes for temperature measurement */
@@ -1572,7 +1572,7 @@ struct wmi_tof_session_start_cmd {
u8 aoa_type;
__le16 num_of_dest;
u8 reserved[4];
- struct wmi_ftm_dest_info ftm_dest_info[0];
+ struct wmi_ftm_dest_info ftm_dest_info[];
} __packed;
/* WMI_TOF_CFG_RESPONDER_CMDID */
@@ -1766,7 +1766,7 @@ struct wmi_internal_fw_ioctl_cmd {
/* payload max size is WMI_MAX_IOCTL_PAYLOAD_SIZE
* Must be the last member of the struct
*/
- __le32 payload[0];
+ __le32 payload[];
} __packed;
/* WMI_INTERNAL_FW_IOCTL_EVENTID */
@@ -1778,7 +1778,7 @@ struct wmi_internal_fw_ioctl_event {
/* payload max size is WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE
* Must be the last member of the struct
*/
- __le32 payload[0];
+ __le32 payload[];
} __packed;
/* WMI_INTERNAL_FW_EVENT_EVENTID */
@@ -1788,7 +1788,7 @@ struct wmi_internal_fw_event_event {
/* payload max size is WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE
* Must be the last member of the struct
*/
- __le32 payload[0];
+ __le32 payload[];
} __packed;
/* WMI_SET_VRING_PRIORITY_WEIGHT_CMDID */
@@ -1818,7 +1818,7 @@ struct wmi_set_vring_priority_cmd {
*/
u8 num_of_vrings;
u8 reserved[3];
- struct wmi_vring_priority vring_priority[0];
+ struct wmi_vring_priority vring_priority[];
} __packed;
/* WMI_BF_CONTROL_CMDID - deprecated */
@@ -1910,7 +1910,7 @@ struct wmi_bf_control_ex_cmd {
u8 each_mcs_cfg_size;
u8 reserved1;
/* Configuration for each MCS */
- struct wmi_bf_control_ex_mcs each_mcs_cfg[0];
+ struct wmi_bf_control_ex_mcs each_mcs_cfg[];
} __packed;
/* WMI_LINK_STATS_CMD */
@@ -2192,7 +2192,7 @@ struct wmi_fw_ver_event {
/* FW capabilities info
* Must be the last member of the struct
*/
- __le32 fw_capabilities[0];
+ __le32 fw_capabilities[];
} __packed;
/* WMI_GET_RF_STATUS_EVENTID */
@@ -2270,7 +2270,7 @@ struct wmi_mac_addr_resp_event {
struct wmi_eapol_rx_event {
u8 src_mac[WMI_MAC_LEN];
__le16 eapol_len;
- u8 eapol[0];
+ u8 eapol[];
} __packed;
/* WMI_READY_EVENTID */
@@ -2343,7 +2343,7 @@ struct wmi_connect_event {
u8 aid;
u8 reserved2[2];
/* not in use */
- u8 assoc_info[0];
+ u8 assoc_info[];
} __packed;
/* disconnect_reason */
@@ -2376,7 +2376,7 @@ struct wmi_disconnect_event {
/* last assoc req may passed to host - not in used */
u8 assoc_resp_len;
/* last assoc req may passed to host - not in used */
- u8 assoc_info[0];
+ u8 assoc_info[];
} __packed;
/* WMI_SCAN_COMPLETE_EVENTID */
@@ -2400,7 +2400,7 @@ struct wmi_ft_auth_status_event {
u8 reserved[3];
u8 mac_addr[WMI_MAC_LEN];
__le16 ie_len;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
/* WMI_FT_REASSOC_STATUS_EVENTID */
@@ -2418,7 +2418,7 @@ struct wmi_ft_reassoc_status_event {
__le16 reassoc_req_ie_len;
__le16 reassoc_resp_ie_len;
u8 reserved[4];
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
/* wmi_rx_mgmt_info */
@@ -2461,7 +2461,7 @@ struct wmi_stop_sched_scan_event {
struct wmi_sched_scan_result_event {
struct wmi_rx_mgmt_info info;
- u8 payload[0];
+ u8 payload[];
} __packed;
/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
@@ -2492,7 +2492,7 @@ struct wmi_acs_passive_scan_complete_event {
__le16 filled;
u8 num_scanned_channels;
u8 reserved;
- struct scan_acs_info scan_info_list[0];
+ struct scan_acs_info scan_info_list[];
} __packed;
/* WMI_BA_STATUS_EVENTID */
@@ -2751,7 +2751,7 @@ struct wmi_rf_xpm_read_result_event {
u8 status;
u8 reserved[3];
/* requested num_bytes of data */
- u8 data_bytes[0];
+ u8 data_bytes[];
} __packed;
/* EVENT: WMI_RF_XPM_WRITE_RESULT_EVENTID */
@@ -2769,7 +2769,7 @@ struct wmi_tx_mgmt_packet_event {
/* WMI_RX_MGMT_PACKET_EVENTID */
struct wmi_rx_mgmt_packet_event {
struct wmi_rx_mgmt_info info;
- u8 payload[0];
+ u8 payload[];
} __packed;
/* WMI_ECHO_RSP_EVENTID */
@@ -2969,7 +2969,7 @@ struct wmi_rs_cfg_ex_cmd {
u8 each_mcs_cfg_size;
u8 reserved[3];
/* Configuration for each MCS */
- struct wmi_rs_cfg_ex_mcs each_mcs_cfg[0];
+ struct wmi_rs_cfg_ex_mcs each_mcs_cfg[];
} __packed;
/* WMI_RS_CFG_EX_EVENTID */
@@ -3178,7 +3178,7 @@ struct wmi_get_detailed_rs_res_ex_event {
u8 each_mcs_results_size;
u8 reserved1[3];
/* Results for each MCS */
- struct wmi_rs_results_ex_mcs each_mcs_results[0];
+ struct wmi_rs_results_ex_mcs each_mcs_results[];
} __packed;
/* BRP antenna limit mode */
@@ -3320,7 +3320,7 @@ struct wmi_set_link_monitor_cmd {
u8 rssi_hyst;
u8 reserved[12];
u8 rssi_thresholds_list_size;
- s8 rssi_thresholds_list[0];
+ s8 rssi_thresholds_list[];
} __packed;
/* wmi_link_monitor_event_type */
@@ -3637,7 +3637,7 @@ struct wmi_tof_ftm_per_dest_res_event {
/* Measurments are from RFs, defined by the mask */
__le32 meas_rf_mask;
u8 reserved0[3];
- struct wmi_responder_ftm_res responder_ftm_res[0];
+ struct wmi_responder_ftm_res responder_ftm_res[];
} __packed;
/* WMI_TOF_CFG_RESPONDER_EVENTID */
@@ -3669,7 +3669,7 @@ struct wmi_tof_channel_info_event {
/* data report length */
u8 len;
/* data report payload */
- u8 report[0];
+ u8 report[];
} __packed;
/* WMI_TOF_SET_TX_RX_OFFSET_EVENTID */
@@ -4085,7 +4085,7 @@ struct wmi_link_stats_event {
u8 has_next;
u8 reserved[5];
/* a stream of wmi_link_stats_record_s */
- u8 payload[0];
+ u8 payload[];
} __packed;
/* WMI_LINK_STATS_EVENT */
@@ -4094,7 +4094,7 @@ struct wmi_link_stats_record {
u8 record_type_id;
u8 reserved;
__le16 record_size;
- u8 record[0];
+ u8 record[];
} __packed;
/* WMI_LINK_STATS_TYPE_BASIC */
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 74538085cfb7..d5875836068c 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -798,7 +798,6 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast,
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
{
- static const u8 SNAP_RFC1024[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
struct atmel_private *priv = netdev_priv(dev);
struct ieee80211_hdr header;
unsigned long flags;
@@ -853,7 +852,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
if (priv->use_wpa)
- memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN);
+ memcpy(&header.addr4, rfc1042_header, ETH_ALEN);
header.frame_control = cpu_to_le16(frame_ctl);
/* Copy the wireless header into the card */
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 39da1a4c30ac..3ad94dad2d89 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5569,7 +5569,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
/* fill hw info */
ieee80211_hw_set(hw, RX_INCLUDES_FCS);
ieee80211_hw_set(hw, SIGNAL_DBM);
-
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT) |
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index d3c001fa8eb4..c33b4235839d 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -5507,7 +5507,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
core = (cmd & 0x3000) >> 12;
type = (cmd & 0x0F00) >> 8;
- if (phy6or5x && updated[core] == 0) {
+ if (phy6or5x && !updated[core]) {
b43_nphy_update_tx_cal_ladder(dev, core);
updated[core] = true;
}
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 69f8b46c9015..1a11c5dfb8d9 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -765,7 +765,7 @@ void b43_pio_rx(struct b43_pio_rxqueue *q)
bool stop;
while (1) {
- stop = (pio_rx_frame(q) == 0);
+ stop = !pio_rx_frame(q);
if (stop)
break;
cond_resched();
diff --git a/drivers/net/wireless/broadcom/b43/sdio.c b/drivers/net/wireless/broadcom/b43/sdio.c
index 881a7938c494..02b0cfd535ab 100644
--- a/drivers/net/wireless/broadcom/b43/sdio.c
+++ b/drivers/net/wireless/broadcom/b43/sdio.c
@@ -180,8 +180,8 @@ static void b43_sdio_remove(struct sdio_func *func)
}
static const struct sdio_device_id b43_sdio_ids[] = {
- { SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */
- { SDIO_DEVICE(0x0092, 0x0004) }, /* C-guys, Inc. EW-CG1102GC */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_NINTENDO_WII) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_CGUYS, SDIO_DEVICE_ID_CGUYS_EW_CG1102GC) },
{ },
};
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index 058745219516..55babc6d1091 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -629,19 +629,6 @@ static s8 b43_rssi_postprocess(struct b43_wldev *dev,
return (s8) tmp;
}
-//TODO
-#if 0
-static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi)
-{
- struct b43_phy *phy = &dev->phy;
- s8 ret;
-
- ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1);
-
- return ret;
-}
-#endif
-
void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
{
struct ieee80211_rx_status status;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 8b6b657c4b85..5208a39fd6f7 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3801,6 +3801,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
/* fill hw info */
ieee80211_hw_set(hw, RX_INCLUDES_FCS);
ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c
index e9b23c2e5bd4..efd63f4ce74f 100644
--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c
+++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c
@@ -558,6 +558,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
default:
b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n",
chanstat);
+ goto drop;
}
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index b684a5b6d904..46346cb3bc84 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -43,7 +43,8 @@
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
-#define SDIO_4359_FUNC2_BLOCKSIZE 256
+#define SDIO_4373_FUNC2_BLOCKSIZE 256
+#define SDIO_435X_FUNC2_BLOCKSIZE 256
/* Maximum milliseconds to wait for F2 to come up */
#define SDIO_WAIT_F2RDY 3000
@@ -910,13 +911,28 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
sdio_release_host(sdiodev->func1);
goto out;
}
- if (sdiodev->func2->device == SDIO_DEVICE_ID_BROADCOM_4359)
- f2_blksz = SDIO_4359_FUNC2_BLOCKSIZE;
+ switch (sdiodev->func2->device) {
+ case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
+ f2_blksz = SDIO_4373_FUNC2_BLOCKSIZE;
+ break;
+ case SDIO_DEVICE_ID_BROADCOM_4359:
+ /* fallthrough */
+ case SDIO_DEVICE_ID_BROADCOM_4354:
+ /* fallthrough */
+ case SDIO_DEVICE_ID_BROADCOM_4356:
+ f2_blksz = SDIO_435X_FUNC2_BLOCKSIZE;
+ break;
+ default:
+ break;
+ }
+
ret = sdio_set_block_size(sdiodev->func2, f2_blksz);
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
sdio_release_host(sdiodev->func1);
goto out;
+ } else {
+ brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz);
}
/* increase F2 timeout */
@@ -961,7 +977,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
- BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
@@ -970,9 +986,9 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359),
- BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373),
- BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_43012),
- BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_89359),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359),
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 2ba165330038..a757abd7a599 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -23,6 +23,7 @@
#include "p2p.h"
#include "btcoex.h"
#include "pno.h"
+#include "fwsignal.h"
#include "cfg80211.h"
#include "feature.h"
#include "fwil.h"
@@ -1819,6 +1820,10 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
switch (sme->crypto.akm_suites[0]) {
case WLAN_AKM_SUITE_SAE:
val = WPA3_AUTH_SAE_PSK;
+ if (sme->crypto.sae_pwd) {
+ brcmf_dbg(INFO, "using SAE offload\n");
+ profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
+ }
break;
default:
bphy_err(drvr, "invalid cipher group (%d)\n",
@@ -2104,11 +2109,6 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- if (sme->crypto.sae_pwd) {
- brcmf_dbg(INFO, "using SAE offload\n");
- profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
- }
-
if (sme->crypto.psk &&
profile->use_fwsup != BRCMF_PROFILE_FWSUP_SAE) {
if (WARN_ON(profile->use_fwsup != BRCMF_PROFILE_FWSUP_NONE)) {
@@ -2468,6 +2468,17 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (!ext_key)
key->flags = BRCMF_PRIMARY_KEY;
+ if (params->seq && params->seq_len == 6) {
+ /* rx iv */
+ u8 *ivptr;
+
+ ivptr = (u8 *)params->seq;
+ key->rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key->rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key->iv_initialized = true;
+ }
+
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key->algo = CRYPTO_ALGO_WEP1;
@@ -4449,6 +4460,11 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
mgmt_ie_len = &saved_ie->assoc_req_ie_len;
mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie);
break;
+ case BRCMF_VNDR_IE_ASSOCRSP_FLAG:
+ mgmt_ie_buf = saved_ie->assoc_res_ie;
+ mgmt_ie_len = &saved_ie->assoc_res_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->assoc_res_ie);
+ break;
default:
err = -EPERM;
bphy_err(drvr, "not suitable type\n");
@@ -4595,6 +4611,57 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
else
brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+ /* Set Assoc Response IEs to FW */
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_ASSOCRSP_FLAG,
+ beacon->assocresp_ies,
+ beacon->assocresp_ies_len);
+ if (err)
+ brcmf_err("Set Assoc Resp IE Failed\n");
+ else
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc Resp\n");
+
+ return err;
+}
+
+static s32
+brcmf_parse_configure_security(struct brcmf_if *ifp,
+ struct cfg80211_ap_settings *settings,
+ enum nl80211_iftype dev_role)
+{
+ const struct brcmf_tlv *rsn_ie;
+ const struct brcmf_vs_tlv *wpa_ie;
+ s32 err = 0;
+
+ /* find the RSN_IE */
+ rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+ settings->beacon.tail_len, WLAN_EID_RSN);
+
+ /* find the WPA_IE */
+ wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
+ settings->beacon.tail_len);
+
+ if (wpa_ie || rsn_ie) {
+ brcmf_dbg(TRACE, "WPA(2) IE is found\n");
+ if (wpa_ie) {
+ /* WPA IE */
+ err = brcmf_configure_wpaie(ifp, wpa_ie, false);
+ if (err < 0)
+ return err;
+ } else {
+ struct brcmf_vs_tlv *tmp_ie;
+
+ tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
+
+ /* RSN IE */
+ err = brcmf_configure_wpaie(ifp, tmp_ie, true);
+ if (err < 0)
+ return err;
+ }
+ } else {
+ brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
+ brcmf_configure_opensecurity(ifp);
+ }
+
return err;
}
@@ -4610,8 +4677,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
const struct brcmf_tlv *country_ie;
struct brcmf_ssid_le ssid_le;
s32 err = -EPERM;
- const struct brcmf_tlv *rsn_ie;
- const struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
@@ -4665,36 +4730,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_configure_arp_nd_offload(ifp, false);
}
- /* find the RSN_IE */
- rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
- settings->beacon.tail_len, WLAN_EID_RSN);
-
- /* find the WPA_IE */
- wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
- settings->beacon.tail_len);
-
- if ((wpa_ie != NULL || rsn_ie != NULL)) {
- brcmf_dbg(TRACE, "WPA(2) IE is found\n");
- if (wpa_ie != NULL) {
- /* WPA IE */
- err = brcmf_configure_wpaie(ifp, wpa_ie, false);
- if (err < 0)
- goto exit;
- } else {
- struct brcmf_vs_tlv *tmp_ie;
-
- tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
-
- /* RSN IE */
- err = brcmf_configure_wpaie(ifp, tmp_ie, true);
- if (err < 0)
- goto exit;
- }
- } else {
- brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
- brcmf_configure_opensecurity(ifp);
- }
-
/* Parameters shared by all radio interfaces */
if (!mbss) {
if ((supports_11d) && (is_11d != ifp->vif->is_11d)) {
@@ -4727,7 +4762,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
if ((dev_role == NL80211_IFTYPE_AP) &&
((ifp->ifidx == 0) ||
- !brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
+ (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB) &&
+ !brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN)))) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0) {
bphy_err(drvr, "BRCMF_C_DOWN error %d\n",
@@ -4775,6 +4811,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
bphy_err(drvr, "BRCMF_C_UP error (%d)\n", err);
goto exit;
}
+
+ err = brcmf_parse_configure_security(ifp, settings,
+ NL80211_IFTYPE_AP);
+ if (err < 0) {
+ bphy_err(drvr, "brcmf_parse_configure_security error\n");
+ goto exit;
+ }
+
/* On DOWN the firmware removes the WEP keys, reconfigure
* them if they were set.
*/
@@ -4807,6 +4851,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
chanspec, err);
goto exit;
}
+
+ err = brcmf_parse_configure_security(ifp, settings,
+ NL80211_IFTYPE_P2P_GO);
+ if (err < 0) {
+ brcmf_err("brcmf_parse_configure_security error\n");
+ goto exit;
+ }
+
err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
sizeof(ssid_le));
if (err < 0) {
@@ -4979,21 +5031,15 @@ brcmf_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev,
}
static void
-brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- u16 frame_type, bool reg)
+brcmf_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct mgmt_frame_regs *upd)
{
struct brcmf_cfg80211_vif *vif;
- u16 mgmt_type;
-
- brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg);
- mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
- if (reg)
- vif->mgmt_rx_reg |= BIT(mgmt_type);
- else
- vif->mgmt_rx_reg &= ~BIT(mgmt_type);
+
+ vif->mgmt_rx_reg = upd->interface_stypes;
}
@@ -5408,7 +5454,8 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.change_station = brcmf_cfg80211_change_station,
.sched_scan_start = brcmf_cfg80211_sched_scan_start,
.sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
- .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register,
+ .update_mgmt_frame_registrations =
+ brcmf_cfg80211_update_mgmt_frame_registrations,
.mgmt_tx = brcmf_cfg80211_mgmt_tx,
.remain_on_channel = brcmf_p2p_remain_on_channel,
.cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
@@ -5495,7 +5542,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
u32 event = e->event_code;
u32 status = e->status;
- if (vif->profile.use_fwsup == BRCMF_PROFILE_FWSUP_PSK &&
+ if ((vif->profile.use_fwsup == BRCMF_PROFILE_FWSUP_PSK ||
+ vif->profile.use_fwsup == BRCMF_PROFILE_FWSUP_SAE) &&
event == BRCMF_E_PSK_SUP &&
status == BRCMF_E_STATUS_FWSUP_COMPLETED)
set_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state);
@@ -5571,12 +5619,151 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
conn_info->resp_ie_len = 0;
}
+u8 brcmf_map_prio_to_prec(void *config, u8 prio)
+{
+ struct brcmf_cfg80211_info *cfg = (struct brcmf_cfg80211_info *)config;
+
+ if (!cfg)
+ return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
+ (prio ^ 2) : prio;
+
+ /* For those AC(s) with ACM flag set to 1, convert its 4-level priority
+ * to an 8-level precedence which is the same as BE's
+ */
+ if (prio > PRIO_8021D_EE &&
+ cfg->ac_priority[prio] == cfg->ac_priority[PRIO_8021D_BE])
+ return cfg->ac_priority[prio] * 2;
+
+ /* Conversion of 4-level priority to 8-level precedence */
+ if (prio == PRIO_8021D_BE || prio == PRIO_8021D_BK ||
+ prio == PRIO_8021D_CL || prio == PRIO_8021D_VO)
+ return cfg->ac_priority[prio] * 2;
+ else
+ return cfg->ac_priority[prio] * 2 + 1;
+}
+
+u8 brcmf_map_prio_to_aci(void *config, u8 prio)
+{
+ /* Prio here refers to the 802.1d priority in range of 0 to 7.
+ * ACI here refers to the WLAN AC Index in range of 0 to 3.
+ * This function will return ACI corresponding to input prio.
+ */
+ struct brcmf_cfg80211_info *cfg = (struct brcmf_cfg80211_info *)config;
+
+ if (cfg)
+ return cfg->ac_priority[prio];
+
+ return prio;
+}
+
+static void brcmf_init_wmm_prio(u8 *priority)
+{
+ /* Initialize AC priority array to default
+ * 802.1d priority as per following table:
+ * 802.1d prio 0,3 maps to BE
+ * 802.1d prio 1,2 maps to BK
+ * 802.1d prio 4,5 maps to VI
+ * 802.1d prio 6,7 maps to VO
+ */
+ priority[0] = BRCMF_FWS_FIFO_AC_BE;
+ priority[3] = BRCMF_FWS_FIFO_AC_BE;
+ priority[1] = BRCMF_FWS_FIFO_AC_BK;
+ priority[2] = BRCMF_FWS_FIFO_AC_BK;
+ priority[4] = BRCMF_FWS_FIFO_AC_VI;
+ priority[5] = BRCMF_FWS_FIFO_AC_VI;
+ priority[6] = BRCMF_FWS_FIFO_AC_VO;
+ priority[7] = BRCMF_FWS_FIFO_AC_VO;
+}
+
+static void brcmf_wifi_prioritize_acparams(const
+ struct brcmf_cfg80211_edcf_acparam *acp, u8 *priority)
+{
+ u8 aci;
+ u8 aifsn;
+ u8 ecwmin;
+ u8 ecwmax;
+ u8 acm;
+ u8 ranking_basis[EDCF_AC_COUNT];
+ u8 aci_prio[EDCF_AC_COUNT]; /* AC_BE, AC_BK, AC_VI, AC_VO */
+ u8 index;
+
+ for (aci = 0; aci < EDCF_AC_COUNT; aci++, acp++) {
+ aifsn = acp->ACI & EDCF_AIFSN_MASK;
+ acm = (acp->ACI & EDCF_ACM_MASK) ? 1 : 0;
+ ecwmin = acp->ECW & EDCF_ECWMIN_MASK;
+ ecwmax = (acp->ECW & EDCF_ECWMAX_MASK) >> EDCF_ECWMAX_SHIFT;
+ brcmf_dbg(CONN, "ACI %d aifsn %d acm %d ecwmin %d ecwmax %d\n",
+ aci, aifsn, acm, ecwmin, ecwmax);
+ /* Default AC_VO will be the lowest ranking value */
+ ranking_basis[aci] = aifsn + ecwmin + ecwmax;
+ /* Initialise priority starting at 0 (AC_BE) */
+ aci_prio[aci] = 0;
+
+ /* If ACM is set, STA can't use this AC as per 802.11.
+ * Change the ranking to BE
+ */
+ if (aci != AC_BE && aci != AC_BK && acm == 1)
+ ranking_basis[aci] = ranking_basis[AC_BE];
+ }
+
+ /* Ranking method which works for AC priority
+ * swapping when values for cwmin, cwmax and aifsn are varied
+ * Compare each aci_prio against each other aci_prio
+ */
+ for (aci = 0; aci < EDCF_AC_COUNT; aci++) {
+ for (index = 0; index < EDCF_AC_COUNT; index++) {
+ if (index != aci) {
+ /* Smaller ranking value has higher priority,
+ * so increment priority for each ACI which has
+ * a higher ranking value
+ */
+ if (ranking_basis[aci] < ranking_basis[index])
+ aci_prio[aci]++;
+ }
+ }
+ }
+
+ /* By now, aci_prio[] will be in range of 0 to 3.
+ * Use ACI prio to get the new priority value for
+ * each 802.1d traffic type, in this range.
+ */
+ if (!(aci_prio[AC_BE] == aci_prio[AC_BK] &&
+ aci_prio[AC_BK] == aci_prio[AC_VI] &&
+ aci_prio[AC_VI] == aci_prio[AC_VO])) {
+ /* 802.1d 0,3 maps to BE */
+ priority[0] = aci_prio[AC_BE];
+ priority[3] = aci_prio[AC_BE];
+
+ /* 802.1d 1,2 maps to BK */
+ priority[1] = aci_prio[AC_BK];
+ priority[2] = aci_prio[AC_BK];
+
+ /* 802.1d 4,5 maps to VO */
+ priority[4] = aci_prio[AC_VI];
+ priority[5] = aci_prio[AC_VI];
+
+ /* 802.1d 6,7 maps to VO */
+ priority[6] = aci_prio[AC_VO];
+ priority[7] = aci_prio[AC_VO];
+ } else {
+ /* Initialize to default priority */
+ brcmf_init_wmm_prio(priority);
+ }
+
+ brcmf_dbg(CONN, "Adj prio BE 0->%d, BK 1->%d, BK 2->%d, BE 3->%d\n",
+ priority[0], priority[1], priority[2], priority[3]);
+
+ brcmf_dbg(CONN, "Adj prio VI 4->%d, VI 5->%d, VO 6->%d, VO 7->%d\n",
+ priority[4], priority[5], priority[6], priority[7]);
+}
+
static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
+ struct brcmf_cfg80211_edcf_acparam edcf_acparam_info[EDCF_AC_COUNT];
u32 req_len;
u32 resp_len;
s32 err = 0;
@@ -5625,6 +5812,17 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
GFP_KERNEL);
if (!conn_info->resp_ie)
conn_info->resp_ie_len = 0;
+
+ err = brcmf_fil_iovar_data_get(ifp, "wme_ac_sta",
+ edcf_acparam_info,
+ sizeof(edcf_acparam_info));
+ if (err) {
+ brcmf_err("could not get wme_ac_sta (%d)\n", err);
+ return err;
+ }
+
+ brcmf_wifi_prioritize_acparams(edcf_acparam_info,
+ cfg->ac_priority);
} else {
conn_info->resp_ie_len = 0;
conn_info->resp_ie = NULL;
@@ -6041,6 +6239,7 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
mutex_init(&cfg->usr_sync);
brcmf_init_escan(cfg);
brcmf_init_conf(cfg->conf);
+ brcmf_init_wmm_prio(cfg->ac_priority);
init_completion(&cfg->vif_disabled);
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 6ce48f6275a4..333fdf394f95 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -23,6 +23,23 @@
#define WL_ROAM_TRIGGER_LEVEL -75
#define WL_ROAM_DELTA 20
+/* WME Access Category Indices (ACIs) */
+#define AC_BE 0 /* Best Effort */
+#define AC_BK 1 /* Background */
+#define AC_VI 2 /* Video */
+#define AC_VO 3 /* Voice */
+#define EDCF_AC_COUNT 4
+#define MAX_8021D_PRIO 8
+
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_ACM_MASK 0x10
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_SHIFT 4
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_AIFSN_MAX 15
+#define EDCF_ECWMAX_MASK 0xf0
+
/* Keep BRCMF_ESCAN_BUF_SIZE below 64K (65536). Allocing over 64K can be
* problematic on some systems and should be avoided.
*/
@@ -153,19 +170,23 @@ enum brcmf_vif_status {
* @probe_req_ie: IE info for probe request.
* @probe_res_ie: IE info for probe response.
* @beacon_ie: IE info for beacon frame.
+ * @assoc_res_ie: IE info for association response frame.
* @probe_req_ie_len: IE info length for probe request.
* @probe_res_ie_len: IE info length for probe response.
* @beacon_ie_len: IE info length for beacon frame.
+ * @assoc_res_ie_len: IE info length for association response frame.
*/
struct vif_saved_ie {
u8 probe_req_ie[IE_MAX_LEN];
u8 probe_res_ie[IE_MAX_LEN];
u8 beacon_ie[IE_MAX_LEN];
u8 assoc_req_ie[IE_MAX_LEN];
+ u8 assoc_res_ie[IE_MAX_LEN];
u32 probe_req_ie_len;
u32 probe_res_ie_len;
u32 beacon_ie_len;
u32 assoc_req_ie_len;
+ u32 assoc_res_ie_len;
};
/**
@@ -205,6 +226,12 @@ struct brcmf_cfg80211_assoc_ielen_le {
__le32 resp_len;
};
+struct brcmf_cfg80211_edcf_acparam {
+ u8 ACI;
+ u8 ECW;
+ u16 TXOP; /* stored in network order (ls octet first) */
+};
+
/* dongle escan state */
enum wl_escan_state {
WL_ESCAN_STATE_IDLE,
@@ -323,6 +350,7 @@ struct brcmf_cfg80211_info {
struct brcmf_assoclist_le assoclist;
struct brcmf_cfg80211_wowl wowl;
struct brcmf_pno_info *pno;
+ u8 ac_priority[MAX_8021D_PRIO];
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 144cf4570bc3..8b5f49997c8b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -72,4 +72,8 @@ static inline void
brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {}
#endif
+u8 brcmf_map_prio_to_prec(void *cfg, u8 prio);
+
+u8 brcmf_map_prio_to_aci(void *cfg, u8 prio);
+
#endif /* BRCMFMAC_COMMON_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c
index 49db54d23e03..e44236cb210e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c
@@ -180,14 +180,8 @@ again:
int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
{
- void *address;
-
- address = commonring->buf_addr;
- address += (commonring->f_ptr * commonring->item_len);
- if (commonring->f_ptr > commonring->w_ptr) {
- address = commonring->buf_addr;
+ if (commonring->f_ptr > commonring->w_ptr)
commonring->f_ptr = 0;
- }
commonring->f_ptr = commonring->w_ptr;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 436f501be937..c88655acc78c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -579,9 +579,6 @@ static int brcmf_netdev_stop(struct net_device *ndev)
brcmf_cfg80211_down(ndev);
- if (ifp->drvr->bus_if->state == BRCMF_BUS_UP)
- brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0);
-
brcmf_net_setcarrier(ifp, false);
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
index 120515fe8250..eecf8a38d94a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
@@ -47,13 +47,10 @@ struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
return drvr->wiphy->debugfsdir;
}
-int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+void brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data))
{
- struct dentry *e;
-
WARN(!drvr->wiphy->debugfsdir, "wiphy not (yet) registered\n");
- e = debugfs_create_devm_seqfile(drvr->bus_if->dev, fn,
- drvr->wiphy->debugfsdir, read_fn);
- return PTR_ERR_OR_ZERO(e);
+ debugfs_create_devm_seqfile(drvr->bus_if->dev, fn,
+ drvr->wiphy->debugfsdir, read_fn);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index 9b221b509ade..4146faeed344 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -116,8 +116,8 @@ struct brcmf_bus;
struct brcmf_pub;
#ifdef DEBUG
struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
-int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
- int (*read_fn)(struct seq_file *seq, void *data));
+void brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+ int (*read_fn)(struct seq_file *seq, void *data));
int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
size_t len);
#else
@@ -126,11 +126,9 @@ static inline struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
return ERR_PTR(-ENOENT);
}
static inline
-int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
- int (*read_fn)(struct seq_file *seq, void *data))
-{
- return 0;
-}
+void brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+ int (*read_fn)(struct seq_file *seq, void *data))
+{ }
static inline
int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
size_t len)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 5da0dda0d899..0dcefbd0c000 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -285,13 +285,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
if (!err)
ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC);
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
+
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags,
drvr->settings->feature_disable);
ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
}
- brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
brcmf_feat_firmware_overrides(drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index 8e9d067bdfed..096f6b969dd8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -26,10 +26,10 @@
#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
static const u8 brcmf_flowring_prio2fifo[] = {
- 1,
- 0,
0,
1,
+ 1,
+ 0,
2,
2,
3,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 8cc52935fd41..09701262330d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -311,28 +311,6 @@ struct brcmf_skbuff_cb {
/* How long to defer borrowing in jiffies */
#define BRCMF_FWS_BORROW_DEFER_PERIOD (HZ / 10)
-/**
- * enum brcmf_fws_fifo - fifo indices used by dongle firmware.
- *
- * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
- * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
- * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
- * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
- * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic.
- * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only).
- * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only).
- * @BRCMF_FWS_FIFO_COUNT: number of fifos.
- */
-enum brcmf_fws_fifo {
- BRCMF_FWS_FIFO_FIRST,
- BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
- BRCMF_FWS_FIFO_AC_BE,
- BRCMF_FWS_FIFO_AC_VI,
- BRCMF_FWS_FIFO_AC_VO,
- BRCMF_FWS_FIFO_BCMC,
- BRCMF_FWS_FIFO_ATIM,
- BRCMF_FWS_FIFO_COUNT
-};
/**
* enum brcmf_fws_txstatus - txstatus flag values.
@@ -2130,8 +2108,10 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
skcb->if_flags = 0;
skcb->state = BRCMF_FWS_SKBSTATE_NEW;
brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
+
+ /* mapping from 802.1d priority to firmware fifo index */
if (!multicast)
- fifo = brcmf_fws_prio2fifo[skb->priority];
+ fifo = brcmf_map_prio_to_aci(drvr->config, skb->priority);
brcmf_fws_lock(fws);
if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
@@ -2356,7 +2336,7 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
fws->drvr = drvr;
fws->fcmode = drvr->settings->fcmode;
- if ((drvr->bus_if->always_use_fws_queue == false) &&
+ if (!drvr->bus_if->always_use_fws_queue &&
(fws->fcmode == BRCMF_FWS_FCMODE_NONE)) {
fws->avoid_queueing = true;
brcmf_dbg(INFO, "FWS queueing will be avoided\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index b486d578ec96..b16a9d1c0508 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -6,6 +6,29 @@
#ifndef FWSIGNAL_H_
#define FWSIGNAL_H_
+/**
+ * enum brcmf_fws_fifo - fifo indices used by dongle firmware.
+ *
+ * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
+ * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
+ * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
+ * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
+ * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic.
+ * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only).
+ * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only).
+ * @BRCMF_FWS_FIFO_COUNT: number of fifos.
+ */
+enum brcmf_fws_fifo {
+ BRCMF_FWS_FIFO_FIRST,
+ BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
+ BRCMF_FWS_FIFO_AC_BE,
+ BRCMF_FWS_FIFO_AC_VI,
+ BRCMF_FWS_FIFO_AC_VO,
+ BRCMF_FWS_FIFO_BCMC,
+ BRCMF_FWS_FIFO_ATIM,
+ BRCMF_FWS_FIFO_COUNT
+};
+
struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
void brcmf_fws_detach(struct brcmf_fws_info *fws);
void brcmf_fws_debugfs_create(struct brcmf_pub *drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 1f5deea5a288..d2795dc17c46 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -17,6 +17,7 @@
#include "fwil_types.h"
#include "p2p.h"
#include "cfg80211.h"
+#include "feature.h"
/* parameters used for p2p escan */
#define P2PAPI_SCAN_NPROBES 1
@@ -59,12 +60,13 @@
#define P2P_AF_MIN_DWELL_TIME 100
#define P2P_AF_MED_DWELL_TIME 400
#define P2P_AF_LONG_DWELL_TIME 1000
-#define P2P_AF_TX_MAX_RETRY 1
+#define P2P_AF_TX_MAX_RETRY 5
#define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000)
#define P2P_INVALID_CHANNEL -1
#define P2P_CHANNEL_SYNC_RETRY 5
#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450)
#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
+#define P2P_AF_RETRY_DELAY_TIME 40
/* WiFi P2P Public Action Frame OUI Subtypes */
#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */
@@ -92,6 +94,9 @@
#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */
#define BRCMF_P2P_DISABLE_TIMEOUT msecs_to_jiffies(500)
+
+/* Mask for retry counter of custom dwell time */
+#define CUSTOM_RETRY_MASK 0xff000000
/**
* struct brcmf_p2p_disc_st_le - set discovery state in firmware.
*
@@ -457,10 +462,21 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
*/
static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
{
+ struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
bool random_addr = false;
+ bool local_admin = false;
- if (!dev_addr || is_zero_ether_addr(dev_addr))
- random_addr = true;
+ if (!dev_addr || is_zero_ether_addr(dev_addr)) {
+ /* If the primary interface address is already locally
+ * administered, create a new random address.
+ */
+ if (pri_ifp->mac_addr[0] & 0x02) {
+ random_addr = true;
+ } else {
+ dev_addr = pri_ifp->mac_addr;
+ local_admin = true;
+ }
+ }
/* Generate the P2P Device Address obtaining a random ethernet
* address with the locally administered bit set.
@@ -470,13 +486,20 @@ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
else
memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
+ if (local_admin)
+ p2p->dev_addr[0] |= 0x02;
+
/* Generate the P2P Interface Address. If the discovery and connection
* BSSCFGs need to simultaneously co-exist, then this address must be
* different from the P2P Device Address, but also locally administered.
*/
- memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
- p2p->int_addr[0] |= 0x02;
- p2p->int_addr[4] ^= 0x80;
+ memcpy(p2p->conn_int_addr, p2p->dev_addr, ETH_ALEN);
+ p2p->conn_int_addr[0] |= 0x02;
+ p2p->conn_int_addr[4] ^= 0x80;
+
+ memcpy(p2p->conn2_int_addr, p2p->dev_addr, ETH_ALEN);
+ p2p->conn2_int_addr[0] |= 0x02;
+ p2p->conn2_int_addr[4] ^= 0x90;
}
/**
@@ -1245,6 +1268,30 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
}
/**
+ * brcmf_p2p_abort_action_frame() - abort action frame.
+ *
+ * @cfg: common configuration struct.
+ *
+ */
+static s32 brcmf_p2p_abort_action_frame(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_cfg80211_vif *vif;
+ s32 err;
+ s32 int_val = 1;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe_abort", &int_val,
+ sizeof(s32));
+ if (err)
+ brcmf_err(" aborting action frame has failed (%d)\n", err);
+
+ return err;
+}
+
+/**
* brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete.
*
* @cfg: common configuration struct.
@@ -1255,6 +1302,7 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_p2p_info *p2p = &cfg->p2p;
struct brcmf_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+ s32 err;
if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
(test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
@@ -1263,8 +1311,13 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
/* if channel is not zero, "actfame" uses off channel scan.
* So abort scan for off channel completion.
*/
- if (p2p->af_sent_channel)
- brcmf_notify_escan_complete(cfg, ifp, true, true);
+ if (p2p->af_sent_channel) {
+ /* abort actframe using actframe_abort or abort scan */
+ err = brcmf_p2p_abort_action_frame(cfg);
+ if (err)
+ brcmf_notify_escan_complete(cfg, ifp, true,
+ true);
+ }
} else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
&p2p->status)) {
brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n");
@@ -1491,6 +1544,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
{
struct brcmf_pub *drvr = p2p->cfg->pub;
struct brcmf_cfg80211_vif *vif;
+ struct brcmf_p2p_action_frame *p2p_af;
s32 err = 0;
s32 timeout = 0;
@@ -1500,7 +1554,13 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
- vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ /* check if it is a p2p_presence response */
+ p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data;
+ if (p2p_af->subtype == P2P_AF_PRESENCE_RSP)
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+ else
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+
err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
sizeof(*af_params));
if (err) {
@@ -1640,6 +1700,17 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
return err;
}
+static bool brcmf_p2p_check_dwell_overflow(s32 requested_dwell,
+ unsigned long dwell_jiffies)
+{
+ if ((requested_dwell & CUSTOM_RETRY_MASK) &&
+ (jiffies_to_msecs(jiffies - dwell_jiffies) >
+ (requested_dwell & ~CUSTOM_RETRY_MASK))) {
+ brcmf_err("Action frame TX retry time over dwell time!\n");
+ return true;
+ }
+ return false;
+}
/**
* brcmf_p2p_send_action_frame() - send action frame .
*
@@ -1664,6 +1735,10 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
s32 tx_retry;
s32 extra_listen_time;
uint delta_ms;
+ unsigned long dwell_jiffies = 0;
+ bool dwell_overflow = false;
+
+ s32 requested_dwell = af_params->dwell_time;
action_frame = &af_params->action_frame;
action_frame_len = le16_to_cpu(action_frame->len);
@@ -1775,14 +1850,23 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
/* update channel */
af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
}
+ dwell_jiffies = jiffies;
+ dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
+ dwell_jiffies);
tx_retry = 0;
while (!p2p->block_gon_req_tx &&
- (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
+ (!ack) && (tx_retry < P2P_AF_TX_MAX_RETRY) &&
+ !dwell_overflow) {
+ if (af_params->channel)
+ msleep(P2P_AF_RETRY_DELAY_TIME);
+
ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
tx_retry++;
+ dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
+ dwell_jiffies);
}
- if (ack == false) {
+ if (!ack) {
bphy_err(drvr, "Failed to send Action Frame(retry %d)\n",
tx_retry);
clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
@@ -1994,7 +2078,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
if_request.type = cpu_to_le16((u16)if_type);
if_request.chspec = cpu_to_le16(chanspec);
- memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr));
+ memcpy(if_request.addr, p2p->conn_int_addr, sizeof(if_request.addr));
brcmf_cfg80211_arm_vif_event(cfg, vif);
err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
@@ -2149,6 +2233,27 @@ fail:
return ERR_PTR(err);
}
+static int brcmf_p2p_get_conn_idx(struct brcmf_cfg80211_info *cfg)
+{
+ int i;
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+
+ if (!ifp)
+ return -ENODEV;
+
+ for (i = P2PAPI_BSSCFG_CONNECTION; i < P2PAPI_BSSCFG_MAX; i++) {
+ if (!cfg->p2p.bss_idx[i].vif) {
+ if (i == P2PAPI_BSSCFG_CONNECTION2 &&
+ !(brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
+ brcmf_err("Multi p2p not supported");
+ return -EIO;
+ }
+ return i;
+ }
+ }
+ return -EIO;
+}
+
/**
* brcmf_p2p_add_vif() - create a new P2P virtual interface.
*
@@ -2168,7 +2273,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_vif *vif;
enum brcmf_fil_p2p_if_types iftype;
- int err;
+ int err = 0;
+ int connidx;
+ u8 *p2p_intf_addr;
if (brcmf_cfg80211_vif_event_armed(cfg))
return ERR_PTR(-EBUSY);
@@ -2194,9 +2301,21 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
return (struct wireless_dev *)vif;
brcmf_cfg80211_arm_vif_event(cfg, vif);
- err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr,
- iftype);
+ connidx = brcmf_p2p_get_conn_idx(cfg);
+
+ if (connidx == P2PAPI_BSSCFG_CONNECTION)
+ p2p_intf_addr = cfg->p2p.conn_int_addr;
+ else if (connidx == P2PAPI_BSSCFG_CONNECTION2)
+ p2p_intf_addr = cfg->p2p.conn2_int_addr;
+ else
+ err = -EINVAL;
+
+ if (!err)
+ err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp,
+ p2p_intf_addr, iftype);
+
if (err) {
+ brcmf_err("request p2p interface failed\n");
brcmf_cfg80211_arm_vif_event(cfg, NULL);
goto fail;
}
@@ -2228,7 +2347,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
goto fail;
}
- cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
+ cfg->p2p.bss_idx[connidx].vif = vif;
/* Disable firmware roaming for P2P interface */
brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
if (iftype == BRCMF_FIL_P2P_IF_GO) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index 64ab9b6a677d..d2ecee565bf2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -14,13 +14,15 @@ struct brcmf_cfg80211_info;
*
* @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg.
* @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg.
- * @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION: maps to driver's 1st P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION2: maps to driver's 2nd P2P connection bsscfg.
* @P2PAPI_BSSCFG_MAX: used for range checking.
*/
enum p2p_bss_type {
P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
- P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_CONNECTION, /* driver's 1st P2P connection bsscfg */
+ P2PAPI_BSSCFG_CONNECTION2, /* driver's 2nd P2P connection bsscfg */
P2PAPI_BSSCFG_MAX
};
@@ -119,7 +121,8 @@ struct brcmf_p2p_info {
struct brcmf_cfg80211_info *cfg;
unsigned long status;
u8 dev_addr[ETH_ALEN];
- u8 int_addr[ETH_ALEN];
+ u8 conn_int_addr[ETH_ALEN];
+ u8 conn2_int_addr[ETH_ALEN];
struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
struct timer_list listen_timer;
u8 listen_channel;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 3a08252f1a53..310d8075f5d7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -41,9 +41,22 @@
/* watermark expressed in number of words */
#define DEFAULT_F2_WATERMARK 0x8
#define CY_4373_F2_WATERMARK 0x40
+#define CY_4373_F1_MESBUSYCTRL (CY_4373_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB)
#define CY_43012_F2_WATERMARK 0x60
-#define CY_4359_F2_WATERMARK 0x40
-#define CY_4359_F1_MESBUSYCTRL (CY_4359_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB)
+#define CY_43012_MES_WATERMARK 0x50
+#define CY_43012_MESBUSYCTRL (CY_43012_MES_WATERMARK | \
+ SBSDIO_MESBUSYCTRL_ENAB)
+#define CY_4339_F2_WATERMARK 48
+#define CY_4339_MES_WATERMARK 80
+#define CY_4339_MESBUSYCTRL (CY_4339_MES_WATERMARK | \
+ SBSDIO_MESBUSYCTRL_ENAB)
+#define CY_43455_F2_WATERMARK 0x60
+#define CY_43455_MES_WATERMARK 0x50
+#define CY_43455_MESBUSYCTRL (CY_43455_MES_WATERMARK | \
+ SBSDIO_MESBUSYCTRL_ENAB)
+#define CY_435X_F2_WATERMARK 0x40
+#define CY_435X_F1_MESBUSYCTRL (CY_435X_F2_WATERMARK | \
+ SBSDIO_MESBUSYCTRL_ENAB)
#ifdef DEBUG
@@ -315,15 +328,6 @@ struct rte_console {
#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
#define BRCMF_SDIO_MAX_ACCESS_ERRORS 5
-/*
- * Conversion of 802.1D priority to precedence level
- */
-static uint prio2prec(u32 prio)
-{
- return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
- (prio^2) : prio;
-}
-
#ifdef DEBUG
/* Device console log buffer state */
struct brcmf_console {
@@ -2774,7 +2778,13 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
skb_push(pkt, bus->tx_hdrlen);
/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
- prec = prio2prec((pkt->priority & PRIOMASK));
+ /* In WLAN, priority is always set by the AP using WMM parameters
+ * and this need not always follow the standard 802.1d priority.
+ * Based on AP WMM config, map from 802.1d priority to corresponding
+ * precedence level.
+ */
+ prec = brcmf_map_prio_to_prec(bus_if->drvr->config,
+ (pkt->priority & PRIOMASK));
/* Check for existing queue, current flow-control,
pending event, or pending clock */
@@ -4187,7 +4197,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
bus->hostintmask, NULL);
switch (sdiod->func1->device) {
- case SDIO_DEVICE_ID_CYPRESS_4373:
+ case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_4373_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
@@ -4198,10 +4208,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
&err);
brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
- CY_4373_F2_WATERMARK |
- SBSDIO_MESBUSYCTRL_ENAB, &err);
+ CY_4373_F1_MESBUSYCTRL, &err);
break;
- case SDIO_DEVICE_ID_CYPRESS_43012:
+ case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_43012_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
@@ -4211,19 +4220,51 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
&err);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+ CY_43012_MESBUSYCTRL, &err);
+ break;
+ case SDIO_DEVICE_ID_BROADCOM_4339:
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 4339\n",
+ CY_4339_F2_WATERMARK);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+ CY_4339_F2_WATERMARK, &err);
+ devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
+ &err);
+ devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+ brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
+ &err);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+ CY_4339_MESBUSYCTRL, &err);
+ break;
+ case SDIO_DEVICE_ID_BROADCOM_43455:
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 43455\n",
+ CY_43455_F2_WATERMARK);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+ CY_43455_F2_WATERMARK, &err);
+ devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
+ &err);
+ devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+ brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
+ &err);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+ CY_43455_MESBUSYCTRL, &err);
break;
case SDIO_DEVICE_ID_BROADCOM_4359:
+ /* fallthrough */
+ case SDIO_DEVICE_ID_BROADCOM_4354:
+ /* fallthrough */
+ case SDIO_DEVICE_ID_BROADCOM_4356:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
- CY_4359_F2_WATERMARK);
+ CY_435X_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
- CY_4359_F2_WATERMARK, &err);
+ CY_435X_F2_WATERMARK, &err);
devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
&err);
devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
&err);
brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
- CY_4359_F1_MESBUSYCTRL, &err);
+ CY_435X_F1_MESBUSYCTRL, &err);
break;
default:
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 8e8b685cfe09..648efcbc819f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -1431,6 +1431,7 @@ int brcms_up(struct brcms_info *wl)
* precondition: perimeter lock has been acquired
*/
void brcms_down(struct brcms_info *wl)
+ __must_hold(&wl->lock)
{
uint callbacks, ret_val = 0;
@@ -1717,6 +1718,7 @@ int brcms_check_firmwares(struct brcms_info *wl)
* precondition: perimeter lock has been acquired
*/
bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
+ __must_hold(&wl->lock)
{
bool blocked = brcms_c_check_radio_disabled(wl->wlc);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 7f2c15c799d2..77494fc30c2c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -1057,7 +1057,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs->lasttxtime = 0;
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
- if (*fatal == true)
+ if (*fatal)
return false;
n++;
}
@@ -3768,17 +3768,14 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
* Write this BSS config's MAC address to core.
* Updates RXE match engine.
*/
-static int brcms_c_set_mac(struct brcms_bss_cfg *bsscfg)
+static void brcms_c_set_mac(struct brcms_bss_cfg *bsscfg)
{
- int err = 0;
struct brcms_c_info *wlc = bsscfg->wlc;
/* enter the MAC addr into the RXE match registers */
brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, wlc->pub->cur_etheraddr);
brcms_c_ampdu_macaddr_upd(wlc);
-
- return err;
}
/* Write the BSS config's BSSID address to core (set_bssid in d11procs.tcl).
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c
index 0ab865de1491..79d4a7a4da8b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c
@@ -304,9 +304,8 @@ int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force)
* update wlc->stf->ss_opmode which represents the operational stf_ss mode
* we're using
*/
-int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
+void brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
{
- int ret_code = 0;
u8 prev_stf_ss;
u8 upd_stf_ss;
@@ -325,7 +324,7 @@ int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
PHY_TXC1_MODE_SISO : PHY_TXC1_MODE_CDD;
} else {
if (wlc->band != band)
- return ret_code;
+ return;
upd_stf_ss = (wlc->stf->txstreams == 1) ?
PHY_TXC1_MODE_SISO : band->band_stf_ss_mode;
}
@@ -333,8 +332,6 @@ int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
wlc->stf->ss_opmode = upd_stf_ss;
brcms_b_band_stf_ss_set(wlc->hw, upd_stf_ss);
}
-
- return ret_code;
}
int brcms_c_stf_attach(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
index ba9493009a33..aa4ab53bf634 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
@@ -25,7 +25,7 @@ void brcms_c_stf_detach(struct brcms_c_info *wlc);
void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
u16 *ss_algo_channel, u16 chanspec);
-int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
+void brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 8363f91df7ea..827bb6d74815 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -1925,6 +1925,10 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb,
airo_print_err(dev->name, "%s: skb == NULL!",__func__);
return NETDEV_TX_OK;
}
+ if (skb_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
npacks = skb_queue_len (&ai->txq);
if (npacks >= MAXTXQ - 1) {
@@ -2127,6 +2131,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
airo_print_err(dev->name, "%s: skb == NULL!", __func__);
return NETDEV_TX_OK;
}
+ if (skb_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
/* Find a vacant FID */
for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ );
@@ -2201,6 +2209,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
airo_print_err(dev->name, "%s: skb == NULL!", __func__);
return NETDEV_TX_OK;
}
+ if (skb_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
/* Find a vacant FID */
for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ );
diff --git a/drivers/net/wireless/intel/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig
index ab17903ba9f8..f42b3cdce611 100644
--- a/drivers/net/wireless/intel/ipw2x00/Kconfig
+++ b/drivers/net/wireless/intel/ipw2x00/Kconfig
@@ -16,7 +16,7 @@ config IPW2100
A driver for the Intel PRO/Wireless 2100 Network
Connection 802.11b wireless network adapter.
- See <file:Documentation/networking/device_drivers/intel/ipw2100.txt>
+ See <file:Documentation/networking/device_drivers/intel/ipw2100.rst>
for information on the capabilities currently enabled in this driver
and for tips for debugging issues and problems.
@@ -78,7 +78,7 @@ config IPW2200
A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
Connection adapters.
- See <file:Documentation/networking/device_drivers/intel/ipw2200.txt>
+ See <file:Documentation/networking/device_drivers/intel/ipw2200.rst>
for information on the capabilities currently enabled in this
driver and for tips for debugging issues and problems.
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 97ea6e2035e6..624fe721e2b5 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -8352,7 +8352,7 @@ static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) {
printk(KERN_WARNING DRV_NAME ": Firmware image not compatible "
"(detected version id of %u). "
- "See Documentation/networking/device_drivers/intel/ipw2100.txt\n",
+ "See Documentation/networking/device_drivers/intel/ipw2100.rst\n",
h->version);
return 1;
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 60b5e08dd6df..661e63bfc892 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3386,7 +3386,7 @@ struct ipw_fw {
__le32 boot_size;
__le32 ucode_size;
__le32 fw_size;
- u8 data[0];
+ u8 data[];
};
static int ipw_get_fw(struct ipw_priv *priv,
@@ -3770,10 +3770,8 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
struct pci_dev *dev = priv->pci_dev;
q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
- if (!q->txb) {
- IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
+ if (!q->txb)
return -ENOMEM;
- }
q->bd =
pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
@@ -7042,23 +7040,22 @@ static int ipw_qos_association(struct ipw_priv *priv,
* off the network from the associated setting, adjust the QoS
* setting
*/
-static int ipw_qos_association_resp(struct ipw_priv *priv,
+static void ipw_qos_association_resp(struct ipw_priv *priv,
struct libipw_network *network)
{
- int ret = 0;
unsigned long flags;
u32 size = sizeof(struct libipw_qos_parameters);
int set_qos_param = 0;
if ((priv == NULL) || (network == NULL) ||
(priv->assoc_network == NULL))
- return ret;
+ return;
if (!(priv->status & STATUS_ASSOCIATED))
- return ret;
+ return;
if ((priv->ieee->iw_mode != IW_MODE_INFRA))
- return ret;
+ return;
spin_lock_irqsave(&priv->ieee->lock, flags);
if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
@@ -7088,8 +7085,6 @@ static int ipw_qos_association_resp(struct ipw_priv *priv,
if (set_qos_param == 1)
schedule_work(&priv->qos_activate);
-
- return ret;
}
static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
@@ -10643,10 +10638,8 @@ static void ipw_bg_link_down(struct work_struct *work)
mutex_unlock(&priv->mutex);
}
-static int ipw_setup_deferred_work(struct ipw_priv *priv)
+static void ipw_setup_deferred_work(struct ipw_priv *priv)
{
- int ret = 0;
-
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
@@ -10680,8 +10673,6 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
tasklet_init(&priv->irq_tasklet,
ipw_irq_tasklet, (unsigned long)priv);
-
- return ret;
}
static void shim__set_security(struct net_device *dev,
@@ -11662,11 +11653,7 @@ static int ipw_pci_probe(struct pci_dev *pdev,
IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
- err = ipw_setup_deferred_work(priv);
- if (err) {
- IPW_ERROR("Unable to setup deferred work\n");
- goto out_iounmap;
- }
+ ipw_setup_deferred_work(priv);
ipw_sw_reset(priv, 1);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index 4346520545c4..e1ec1c96dcd8 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -448,7 +448,7 @@ struct tfd_command {
u8 index;
u8 length;
__le16 reserved;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct tfd_data {
@@ -675,7 +675,7 @@ struct ipw_rx_frame {
// is identical)
u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen
__le16 length;
- u8 data[0];
+ u8 data[];
} __packed;
struct ipw_rx_header {
@@ -1002,7 +1002,7 @@ struct ipw_cmd { /* XXX */
* Incoming parameters listed 1-st, followed by outcoming params.
* nParams=(len+3)/4+status_len
*/
- u32 param[0];
+ u32 param[];
} __packed;
#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */
@@ -1108,7 +1108,7 @@ struct ipw_fw_error { /* XXX */
u32 log_len;
struct ipw_error_elem *elem;
struct ipw_event *log;
- u8 payload[0];
+ u8 payload[];
} __packed;
#ifdef CONFIG_IPW2200_PROMISCUOUS
@@ -1153,7 +1153,7 @@ struct ipw_rt_hdr {
s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
s8 rt_dbmnoise;
u8 rt_antenna; /* antenna number */
- u8 payload[0]; /* payload... */
+ u8 payload[]; /* payload... */
} __packed;
#endif
@@ -1329,7 +1329,7 @@ struct ipw_priv {
s8 tx_power;
- /* Track time in suspend using CLOCK_BOOTIME */
+ /* Track time in suspend using CLOCK_BOOTTIME */
time64_t suspend_at;
time64_t suspend_time;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index e4a6ab4e8391..e87538a8b88b 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -334,7 +334,7 @@ struct libipw_hdr_1addr {
__le16 frame_ctl;
__le16 duration_id;
u8 addr1[ETH_ALEN];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct libipw_hdr_2addr {
@@ -342,7 +342,7 @@ struct libipw_hdr_2addr {
__le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct libipw_hdr_3addr {
@@ -352,7 +352,7 @@ struct libipw_hdr_3addr {
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct libipw_hdr_4addr {
@@ -363,7 +363,7 @@ struct libipw_hdr_4addr {
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
u8 addr4[ETH_ALEN];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct libipw_hdr_3addrqos {
@@ -380,7 +380,7 @@ struct libipw_hdr_3addrqos {
struct libipw_info_element {
u8 id;
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
/*
@@ -406,7 +406,7 @@ struct libipw_auth {
__le16 transaction;
__le16 status;
/* challenge */
- struct libipw_info_element info_element[0];
+ struct libipw_info_element info_element[];
} __packed;
struct libipw_channel_switch {
@@ -442,7 +442,7 @@ struct libipw_disassoc {
struct libipw_probe_request {
struct libipw_hdr_3addr header;
/* SSID, supported rates */
- struct libipw_info_element info_element[0];
+ struct libipw_info_element info_element[];
} __packed;
struct libipw_probe_response {
@@ -452,7 +452,7 @@ struct libipw_probe_response {
__le16 capability;
/* SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN */
- struct libipw_info_element info_element[0];
+ struct libipw_info_element info_element[];
} __packed;
/* Alias beacon for probe_response */
@@ -463,7 +463,7 @@ struct libipw_assoc_request {
__le16 capability;
__le16 listen_interval;
/* SSID, supported rates, RSN */
- struct libipw_info_element info_element[0];
+ struct libipw_info_element info_element[];
} __packed;
struct libipw_reassoc_request {
@@ -471,7 +471,7 @@ struct libipw_reassoc_request {
__le16 capability;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
- struct libipw_info_element info_element[0];
+ struct libipw_info_element info_element[];
} __packed;
struct libipw_assoc_response {
@@ -480,7 +480,7 @@ struct libipw_assoc_response {
__le16 status;
__le16 aid;
/* supported rates */
- struct libipw_info_element info_element[0];
+ struct libipw_info_element info_element[];
} __packed;
struct libipw_txb {
@@ -490,7 +490,7 @@ struct libipw_txb {
u8 reserved;
u16 frag_size;
u16 payload_size;
- struct sk_buff *fragments[0];
+ struct sk_buff *fragments[];
};
/* SWEEP TABLE ENTRIES NUMBER */
@@ -594,7 +594,7 @@ struct libipw_ibss_dfs {
struct libipw_info_element ie;
u8 owner[ETH_ALEN];
u8 recovery_interval;
- struct libipw_channel_map channel_map[0];
+ struct libipw_channel_map channel_map[];
};
struct libipw_csa {
@@ -830,7 +830,7 @@ struct libipw_device {
/* This must be the last item so that it points to the data
* allocated beyond this structure by alloc_libipw */
- u8 priv[0];
+ u8 priv[];
};
#define IEEE_A (1<<0)
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
index dd744135c956..89c6671b32bc 100644
--- a/drivers/net/wireless/intel/iwlegacy/commands.h
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -203,7 +203,7 @@ struct il_cmd_header {
__le16 sequence;
/* command or response/notification data follows immediately */
- u8 data[0];
+ u8 data[];
} __packed;
/**
@@ -1112,7 +1112,7 @@ struct il_wep_cmd {
u8 global_key_type;
u8 flags;
u8 reserved;
- struct il_wep_key key[0];
+ struct il_wep_key key[];
} __packed;
#define WEP_KEY_WEP_TYPE 1
@@ -1166,7 +1166,7 @@ struct il3945_rx_frame_stats {
u8 agc;
__le16 sig_avg;
__le16 noise_diff;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct il3945_rx_frame_hdr {
@@ -1175,7 +1175,7 @@ struct il3945_rx_frame_hdr {
u8 reserved1;
u8 rate;
__le16 len;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct il3945_rx_frame_end {
@@ -1211,7 +1211,7 @@ struct il4965_rx_non_cfg_phy {
__le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
__le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
- u8 pad[0];
+ u8 pad[];
} __packed;
/*
@@ -1409,7 +1409,7 @@ struct il3945_tx_cmd {
* length is 26 or 30 bytes, followed by payload data
*/
u8 payload[0];
- struct ieee80211_hdr hdr[0];
+ struct ieee80211_hdr hdr[];
} __packed;
/*
@@ -1511,7 +1511,7 @@ struct il_tx_cmd {
* length is 26 or 30 bytes, followed by payload data
*/
u8 payload[0];
- struct ieee80211_hdr hdr[0];
+ struct ieee80211_hdr hdr[];
} __packed;
/* TX command response is sent after *3945* transmission attempts.
@@ -2520,7 +2520,7 @@ struct il3945_scan_cmd {
* for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
- u8 data[0];
+ u8 data[];
} __packed;
struct il_scan_cmd {
@@ -2564,7 +2564,7 @@ struct il_scan_cmd {
* for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
- u8 data[0];
+ u8 data[];
} __packed;
/* Can abort will notify by complete notification with abort status. */
@@ -2664,7 +2664,7 @@ struct il3945_tx_beacon_cmd {
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
- struct ieee80211_hdr frame[0]; /* beacon frame */
+ struct ieee80211_hdr frame[]; /* beacon frame */
} __packed;
struct il_tx_beacon_cmd {
@@ -2672,7 +2672,7 @@ struct il_tx_beacon_cmd {
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
- struct ieee80211_hdr frame[0]; /* beacon frame */
+ struct ieee80211_hdr frame[]; /* beacon frame */
} __packed;
/******************************************************************************
diff --git a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
index a3b490501a70..1e8ab704dbfb 100644
--- a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
@@ -53,7 +53,7 @@ struct ieee80211_measurement_params {
struct ieee80211_info_element {
u8 id;
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct ieee80211_measurement_request {
@@ -61,7 +61,7 @@ struct ieee80211_measurement_request {
u8 token;
u8 mode;
u8 type;
- struct ieee80211_measurement_params params[0];
+ struct ieee80211_measurement_params params[];
} __packed;
struct ieee80211_measurement_report {
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 0aae3fa4128c..fbcd1405aeea 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -13,7 +13,8 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
-iwlwifi-objs += fw/notif-wait.o
+
+iwlwifi-objs += fw/img.o fw/notif-wait.o
iwlwifi-objs += fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index bc49cdd819df..efe427049a6e 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 53
+#define IWL_22000_UCODE_API_MAX 56
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -73,11 +73,8 @@
#define IWL_22000_SMEM_OFFSET 0x400000
#define IWL_22000_SMEM_LEN 0xD0000
-#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
-#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
-#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
+#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_QNJ_B_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-"
@@ -85,21 +82,18 @@
#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
-#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
-#define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-"
-#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
-#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
-#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
-#define IWL_22000_SOSNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
-
-#define IWL_22000_HR_MODULE_FIRMWARE(api) \
- IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_JF_MODULE_FIRMWARE(api) \
- IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
- IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
+#define IWL_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-"
+#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
+#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
+#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
+#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
+#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
+
+#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
+ IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QNJ_B_HR_B_MODULE_FIRMWARE(api) \
+ IWL_QNJ_B_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
@@ -112,14 +106,18 @@
IWL_QNJ_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_CC_A_MODULE_FIRMWARE(api) \
IWL_CC_A_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(api) \
- IWL_22000_SO_A_JF_B_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(api) \
- IWL_22000_SO_A_HR_B_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(api) \
- IWL_22000_SO_A_GF_A_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(api) \
- IWL_22000_TY_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \
+ IWL_SO_A_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_SO_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_SO_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_TY_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -213,7 +211,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 512, \
+ .min_256_ba_txq_size = 1024, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@@ -229,6 +227,15 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
+const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+};
+
const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
@@ -238,9 +245,10 @@ const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.base_params = &iwl_22000_base_params,
.integrated = true,
.xtal_latency = 5000,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
-const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
+const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
@@ -248,17 +256,21 @@ const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
.integrated = true,
- .xtal_latency = 12000,
- .low_latency_xtal = true,
+ .xtal_latency = 1820,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_1820US,
};
-const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
+const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
+ .integrated = true,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
/*
@@ -324,15 +336,16 @@ const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = {
};
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
+const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
const char iwl_ax200_killer_1650x_name[] =
"Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)";
-const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
- .name = "Intel(R) Wi-Fi 6 AX101",
- .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -346,7 +359,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -357,8 +370,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
- .name = "Intel(R) Wi-Fi 6 AX101",
+const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -384,8 +396,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
- .name = "Intel(R) Wi-Fi 6 AX101",
+const struct iwl_cfg iwl_quz_a0_hr1_b0 = {
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -451,7 +462,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = {
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -464,7 +475,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -501,9 +512,8 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_B_FW_PRE,
+const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg = {
+ .fw_name_pre = IWL_QNJ_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -516,60 +526,89 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
+ .fw_name_pre = IWL_SO_A_JF_B_FW_PRE,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
- .fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
+ .name = "Intel(R) Wi-Fi 6 AX210 160MHz",
+ .fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX211 160MHz",
- .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
+ .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
+ .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
+ .trans.xtal_latency = 12000,
+ .trans.low_latency_xtal = true,
};
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
- .fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
+ .name = "Intel(R) Wi-Fi 6 AX210 160MHz",
+ .fw_name_pre = IWL_TY_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
- .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
+ .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
+ .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
+ .trans.xtal_latency = 12000,
+ .trans.low_latency_xtal = true,
};
const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
- .fw_name_pre = IWL_22000_SOSNJ_A_GF4_A_FW_PRE,
+ .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
+ .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
-MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 8d8380026180..4bd792c06ff6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2020 Intel Corporation. All rights reserved.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -810,7 +810,6 @@ struct iwl_priv {
u8 bt_traffic_load, last_bt_traffic_load;
bool bt_ch_announce;
bool bt_full_concurrent;
- bool bt_ant_couple_ok;
__le32 kill_ack_mask;
__le32 kill_cts_mask;
__le16 bt_valid;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 598ee7315558..b882705ff66d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -53,7 +52,7 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
/* Please keep this array *SORTED* by hex value.
@@ -1370,12 +1369,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- /* is antenna coupling more than 35dB ? */
- priv->bt_ant_couple_ok =
- (iwlwifi_mod_params.antenna_coupling >
- IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
- true : false;
-
/* bt channel inhibition enabled*/
priv->bt_ch_announce = true;
IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index dac809df7f1d..4fa4eab2d7f3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2,6 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2019 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -846,16 +847,6 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_scale_tbl_info *tbl;
bool full_concurrent = priv->bt_full_concurrent;
- if (priv->bt_ant_couple_ok) {
- /*
- * Is there a need to switch between
- * full concurrency and 3-wire?
- */
- if (priv->bt_ci_compliance)
- full_concurrent = true;
- else
- full_concurrent = false;
- }
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
priv->bt_full_concurrent = full_concurrent;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index e3a33388be70..dc769b580431 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -58,44 +58,121 @@
*
*****************************************************************************/
+#include <linux/uuid.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
#include "fw/runtime.h"
-void *iwl_acpi_get_object(struct device *dev, acpi_string method)
+static const guid_t intel_wifi_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
+ 0xA5, 0xB3, 0x1F, 0x73,
+ 0x8E, 0x28, 0x5A, 0xDE);
+
+static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
+ acpi_handle *ret_handle)
{
acpi_handle root_handle;
- acpi_handle handle;
- struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
root_handle = ACPI_HANDLE(dev);
if (!root_handle) {
IWL_DEBUG_DEV_RADIO(dev,
- "Could not retrieve root port ACPI handle\n");
- return ERR_PTR(-ENOENT);
+ "ACPI: Could not retrieve root port handle\n");
+ return -ENOENT;
}
- /* Get the method's handle */
- status = acpi_get_handle(root_handle, method, &handle);
+ status = acpi_get_handle(root_handle, method, ret_handle);
if (ACPI_FAILURE(status)) {
- IWL_DEBUG_DEV_RADIO(dev, "%s method not found\n", method);
- return ERR_PTR(-ENOENT);
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: %s method not found\n", method);
+ return -ENOENT;
}
+ return 0;
+}
+
+void *iwl_acpi_get_object(struct device *dev, acpi_string method)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_handle handle;
+ acpi_status status;
+ int ret;
+
+ ret = iwl_acpi_get_handle(dev, method, &handle);
+ if (ret)
+ return ERR_PTR(-ENOENT);
/* Call the method with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &buf);
if (ACPI_FAILURE(status)) {
- IWL_DEBUG_DEV_RADIO(dev, "%s invocation failed (0x%x)\n",
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: %s method invocation failed (status: 0x%x)\n",
method, status);
return ERR_PTR(-ENOENT);
}
-
return buf.pointer;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
+/**
+* Generic function for evaluating a method defined in the device specific
+* method (DSM) interface. The returned acpi object must be freed by calling
+* function.
+*/
+void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
+ union acpi_object *args)
+{
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_wifi_guid, rev, func,
+ args);
+ if (!obj) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM method invocation failed (rev: %d, func:%d)\n",
+ rev, func);
+ return ERR_PTR(-ENOENT);
+ }
+ return obj;
+}
+
+/**
+ * Evaluate a DSM with no arguments and a single u8 return value (inside a
+ * buffer object), verify and return that value.
+ */
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+{
+ union acpi_object *obj;
+ int ret;
+
+ obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
+ if (IS_ERR(obj))
+ return -ENOENT;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM method did not return a valid object, type=%d\n",
+ obj->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (obj->buffer.length != sizeof(u8)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM method returned invalid buffer, length=%d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = obj->buffer.pointer[0];
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM method evaluated: func=%d, ret=%d\n",
+ func, ret);
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
+
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size, int *tbl_rev)
@@ -151,6 +228,82 @@ found:
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg);
+int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
+ __le32 *black_list_array,
+ int *black_list_size)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret, tbl_rev, i;
+ bool enabled;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WTAS_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!wifi_pkg->package.elements[0].integer.value;
+
+ if (!enabled) {
+ *black_list_size = -1;
+ IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
+ ret = 0;
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[1].integer.value >
+ APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
+ wifi_pkg->package.elements[1].integer.value);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ *black_list_size = wifi_pkg->package.elements[1].integer.value;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *black_list_size);
+ if (*black_list_size > APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
+ *black_list_size);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < *black_list_size; i++) {
+ u32 country;
+
+ if (wifi_pkg->package.elements[2 + i].type !=
+ ACPI_TYPE_INTEGER) {
+ IWL_DEBUG_RADIO(fwrt,
+ "TAS invalid array elem %d\n", 2 + i);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ country = wifi_pkg->package.elements[2 + i].integer.value;
+ black_list_array[i] = cpu_to_le32(country);
+ IWL_DEBUG_RADIO(fwrt, "TAS black list country %d\n", country);
+ }
+
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
+
int iwl_acpi_get_mcc(struct device *dev, char *mcc)
{
union acpi_object *wifi_pkg, *data;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 5590e5cc8fbb..0ada9eddb8b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -64,6 +64,7 @@
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
+#include "fw/api/nvm-reg.h"
#include "fw/img.h"
#include "iwl-trans.h"
@@ -75,6 +76,7 @@
#define ACPI_SPLC_METHOD "SPLC"
#define ACPI_ECKV_METHOD "ECKV"
#define ACPI_PPAG_METHOD "PPAG"
+#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -96,6 +98,12 @@
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
+/*
+ * 1 type, 1 enabled, 1 black list size, 16 black list array
+ */
+#define APCI_WTAS_BLACK_LIST_MAX 16
+#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
+
#define ACPI_WGDS_NUM_BANDS 2
#define ACPI_WGDS_TABLE_SIZE 3
@@ -119,12 +127,23 @@ struct iwl_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE];
};
+enum iwl_dsm_funcs_rev_0 {
+ DSM_FUNC_QUERY = 0,
+ DSM_FUNC_DISABLE_SRD = 1,
+ DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
+};
+
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
+void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
+ union acpi_object *args);
+
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
+
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size, int *tbl_rev);
@@ -174,6 +193,9 @@ int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
struct iwl_per_chain_offset_group *table);
+int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *black_list_array,
+ int *black_list_size);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -181,6 +203,17 @@ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
return ERR_PTR(-ENOENT);
}
+static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
+ int func, union acpi_object *args)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+{
+ return -ENOENT;
+}
+
static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
int data_size,
@@ -250,5 +283,11 @@ static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
return -ENOENT;
}
+static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
+ __le32 *black_list_array,
+ int *black_list_size)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
index 5e88fa2e6fb7..546fa60ed9fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -120,15 +120,48 @@ enum iwl_calib_cfg {
};
/**
+ * struct iwl_phy_specific_cfg - specific PHY filter configuration
+ *
+ * Sent as part of the phy configuration command (v3) to configure specific FW
+ * defined PHY filters that can be applied to each antenna.
+ *
+ * @filter_cfg_chain_a: filter config id for LMAC1 chain A
+ * @filter_cfg_chain_b: filter config id for LMAC1 chain B
+ * @filter_cfg_chain_c: filter config id for LMAC2 chain A
+ * @filter_cfg_chain_d: filter config id for LMAC2 chain B
+ * values: 0 - no filter; 0xffffffff - reserved; otherwise - filter id
+ */
+struct iwl_phy_specific_cfg {
+ __le32 filter_cfg_chain_a;
+ __le32 filter_cfg_chain_b;
+ __le32 filter_cfg_chain_c;
+ __le32 filter_cfg_chain_d;
+} __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/
+
+/**
* struct iwl_phy_cfg_cmd - Phy configuration command
+ *
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
*/
-struct iwl_phy_cfg_cmd {
+struct iwl_phy_cfg_cmd_v1 {
__le32 phy_cfg;
struct iwl_calib_ctrl calib_control;
} __packed;
+/**
+ * struct iwl_phy_cfg_cmd_v3 - Phy configuration command (v3)
+ *
+ * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
+ * @calib_control: calibration control data
+ * @phy_specific_cfg: configure predefined PHY filters
+ */
+struct iwl_phy_cfg_cmd_v3 {
+ __le32 phy_cfg;
+ struct iwl_calib_ctrl calib_control;
+ struct iwl_phy_specific_cfg phy_specific_cfg;
+} __packed; /* PHY_CONFIGURATION_CMD_API_S_VER_3 */
+
/*
* enum iwl_dc2dc_config_id - flag ids
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 3643b6ba6385..c4562e1f8d18 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -618,7 +618,7 @@ struct iwl_wowlan_status_v6 {
* @wake_packet_bufsize: wakeup packet buffer size
* @wake_packet: wakeup packet
*/
-struct iwl_wowlan_status {
+struct iwl_wowlan_status_v7 {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
@@ -634,6 +634,43 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
+/**
+ * struct iwl_wowlan_status - WoWLAN status
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @reserved: unused
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status {
+ struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 tid_tear_down;
+ u8 reserved[3];
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
+
static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
{
return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index b9d7ed93311c..74ac65bd545a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -25,7 +25,7 @@
*
* BSD LICENSE
*
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -304,6 +304,7 @@ enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_SRAM_PATH,
IWL_FW_INI_LOCATION_DRAM_PATH,
IWL_FW_INI_LOCATION_NPK_PATH,
+ IWL_FW_INI_LOCATION_NUM,
}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 98e957ecbeed..94b1a1268476 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -218,6 +216,8 @@ struct iwl_shared_mem_lmac_cfg {
* @page_buff_size: size of %page_buff_addr
* @lmac_num: number of LMACs (1 or 2)
* @lmac_smem: per - LMAC smem data
+ * @rxfifo2_control_addr: start addr of RXF2C
+ * @rxfifo2_control_size: size of RXF2C
*/
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
@@ -229,8 +229,10 @@ struct iwl_shared_mem_cfg {
__le32 page_buff_addr;
__le32 page_buff_size;
__le32 lmac_num;
- struct iwl_shared_mem_lmac_cfg lmac_smem[2];
-} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
+ struct iwl_shared_mem_lmac_cfg lmac_smem[3];
+ __le32 rxfifo2_control_addr;
+ __le32 rxfifo2_control_size;
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_4 */
/**
* struct iwl_mfuart_load_notif - mfuart image version & status
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 0214e553d5ae..1df2e497fabf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -6,8 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,8 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -147,6 +145,7 @@ struct iwl_tof_config_cmd {
* @IWL_TOF_BW_40: 40 MHz
* @IWL_TOF_BW_80: 80 MHz
* @IWL_TOF_BW_160: 160 MHz
+ * @IWL_TOF_BW_NUM: number of tof bandwidths
*/
enum iwl_tof_bandwidth {
IWL_TOF_BW_20_LEGACY,
@@ -154,6 +153,7 @@ enum iwl_tof_bandwidth {
IWL_TOF_BW_40,
IWL_TOF_BW_80,
IWL_TOF_BW_160,
+ IWL_TOF_BW_NUM,
}; /* LOCAT_BW_TYPE_E */
/*
@@ -430,6 +430,9 @@ struct iwl_tof_range_req_ap_entry_v2 {
* @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow
* @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow
* @IWL_INITIATOR_AP_FLAGS_SECURED: request secured measurement
+ * @IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK: Send LMR feedback
+ * @IWL_INITIATOR_AP_FLAGS_USE_CALIB: Use calibration values from the request
+ * instead of fw internal values.
*/
enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
@@ -442,6 +445,8 @@ enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_NON_TB = BIT(9),
IWL_INITIATOR_AP_FLAGS_TB = BIT(10),
IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11),
+ IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12),
+ IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13),
};
/**
@@ -508,7 +513,7 @@ enum iwl_location_bw {
#define LOCATION_BW_POS 4
/**
- * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * struct iwl_tof_range_req_ap_entry_v4 - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
@@ -527,7 +532,7 @@ enum iwl_location_bw {
* @hltk: HLTK to be used for secured 11az measurement
* @tk: TK to be used for secured 11az measurement
*/
-struct iwl_tof_range_req_ap_entry {
+struct iwl_tof_range_req_ap_entry_v4 {
__le32 initiator_ap_flags;
u8 channel_num;
u8 format_bw;
@@ -543,6 +548,65 @@ struct iwl_tof_range_req_ap_entry {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_4 */
/**
+ * enum iwl_location_cipher - location cipher selection
+ * @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128
+ * @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128
+ * @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256
+ */
+enum iwl_location_cipher {
+ IWL_LOCATION_CIPHER_CCMP_128,
+ IWL_LOCATION_CIPHER_GCMP_128,
+ IWL_LOCATION_CIPHER_GCMP_256,
+};
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ */
+struct iwl_tof_range_req_ap_entry {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_6 */
+
+/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
@@ -676,7 +740,7 @@ struct iwl_tof_range_req_cmd_v7 {
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
/**
- * struct iwl_tof_range_req_cmd - start measurement cmd
+ * struct iwl_tof_range_req_cmd_v8 - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
@@ -693,7 +757,7 @@ struct iwl_tof_range_req_cmd_v7 {
* @specific_calib: The specific calib value to inject to this measurement calc
* @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
*/
-struct iwl_tof_range_req_cmd {
+struct iwl_tof_range_req_cmd_v8 {
__le32 initiator_flags;
u8 request_id;
u8 num_of_ap;
@@ -704,9 +768,37 @@ struct iwl_tof_range_req_cmd {
__le32 tsf_mac_id;
__le16 common_calib;
__le16 specific_calib;
- struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v4 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */
+
/*
* enum iwl_tof_range_request_status - status of the sent request
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 97b49843e318..fd719c37428c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 - 2019 Intel Corporation
+ * Copyright(C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 - 2019 Intel Corporation
+ * Copyright(C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,11 +75,21 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
NVM_ACCESS_COMPLETE = 0x0,
/**
+ * @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd
+ */
+ LARI_CONFIG_CHANGE = 0x1,
+
+ /**
* @NVM_GET_INFO:
* Command is &struct iwl_nvm_get_info,
* response is &struct iwl_nvm_get_info_rsp
*/
NVM_GET_INFO = 0x2,
+
+ /**
+ * @TAS_CONFIG: &struct iwl_tas_config_cmd
+ */
+ TAS_CONFIG = 0x3,
};
/**
@@ -431,4 +441,39 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
+#define IWL_TAS_BLACK_LIST_MAX 16
+/**
+ * struct iwl_tas_config_cmd - configures the TAS
+ * @black_list_size: size of relevant field in black_list_array
+ * @black_list_array: black list countries (without TAS)
+ */
+struct iwl_tas_config_cmd {
+ __le32 black_list_size;
+ __le32 black_list_array[IWL_TAS_BLACK_LIST_MAX];
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * enum iwl_lari_configs - bit masks for the various LARI config operations
+ * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
+ * @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
+ * @LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK: ETSI 5.8GHz SRD disabled
+ * @LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK: enable 5.15/5.35GHz bands in
+ * Indonesia
+ */
+enum iwl_lari_config_masks {
+ LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = BIT(0),
+ LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = BIT(1),
+ LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = BIT(2),
+ LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3),
+};
+
+/**
+ * struct iwl_lari_config_change_cmd - change LARI configuration
+ * @config_bitmap: bit map of the config commands. each bit will trigger a
+ * different predefined FW config operation
+ */
+struct iwl_lari_config_change_cmd {
+ __le32 config_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */
+
#endif /* __iwl_fw_api_nvm_reg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 88bc7733065f..b8b36a4f9eb9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -535,9 +533,9 @@ struct iwl_rx_mpdu_desc_v3 {
__le32 filter_match;
/**
- * @phy_data2: depends on info type (see @phy_data1)
+ * @phy_data3: depends on info type (see @phy_data1)
*/
- __le32 phy_data2;
+ __le32 phy_data3;
};
/* DW8 - carries rss_hash only when rpa_en == 1 */
@@ -548,9 +546,9 @@ struct iwl_rx_mpdu_desc_v3 {
__le32 rss_hash;
/**
- * @phy_data3: depends on info type (see @phy_data1)
+ * @phy_data2: depends on info type (see @phy_data1)
*/
- __le32 phy_data3;
+ __le32 phy_data2;
};
/* DW9 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 3d770f406c38..5cc33a1b7172 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -1051,20 +1051,6 @@ struct iwl_scan_req_params_v12 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
- * struct iwl_scan_req_params_v13
- * @general_params: &struct iwl_scan_general_params_v10
- * @channel_params: &struct iwl_scan_channel_params_v4
- * @periodic_params: &struct iwl_scan_periodic_parms_v1
- * @probe_params: &struct iwl_scan_probe_params_v4
- */
-struct iwl_scan_req_params_v13 {
- struct iwl_scan_general_params_v10 general_params;
- struct iwl_scan_channel_params_v4 channel_params;
- struct iwl_scan_periodic_parms_v1 periodic_params;
- struct iwl_scan_probe_params_v4 probe_params;
-} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
-
-/**
* struct iwl_scan_req_params_v14
* @general_params: &struct iwl_scan_general_params_v10
* @channel_params: &struct iwl_scan_channel_params_v6
@@ -1091,18 +1077,6 @@ struct iwl_scan_req_umac_v12 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
- * struct iwl_scan_req_umac_v13
- * @uid: scan id, &enum iwl_umac_scan_uid_offsets
- * @ooc_priority: out of channel priority - &enum iwl_scan_priority
- * @scan_params: scan parameters
- */
-struct iwl_scan_req_umac_v13 {
- __le32 uid;
- __le32 ooc_priority;
- struct iwl_scan_req_params_v13 scan_params;
-} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_13 */
-
-/**
* struct iwl_scan_req_umac_v14
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
index aadca78e9846..0c6d7b3e1324 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Deutschland GmbH
+ * Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Deutschland GmbH
+ * Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,6 +66,12 @@
#define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0)
#define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1)
+#define SOC_FLAGS_LTR_APPLY_DELAY_MASK 0xc
+#define SOC_FLAGS_LTR_APPLY_DELAY_NONE 0
+#define SOC_FLAGS_LTR_APPLY_DELAY_200 1
+#define SOC_FLAGS_LTR_APPLY_DELAY_2500 2
+#define SOC_FLAGS_LTR_APPLY_DELAY_1820 3
+
/**
* struct iwl_soc_configuration_cmd - Set device stabilization latency
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index 970e9e508ad0..c010e6febbf4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -245,32 +245,6 @@ enum iwl_sta_sleep_flag {
#define STA_KEY_LEN_WEP40 (5)
#define STA_KEY_LEN_WEP104 (13)
-/**
- * struct iwl_mvm_keyinfo - key information
- * @key_flags: type &enum iwl_sta_key_flag
- * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
- * @reserved1: reserved
- * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
- * @key_offset: key offset in the fw's key table
- * @reserved2: reserved
- * @key: 16-byte unicast decryption key
- * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
- * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
- * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
- */
-struct iwl_mvm_keyinfo {
- __le16 key_flags;
- u8 tkip_rx_tsc_byte2;
- u8 reserved1;
- __le16 tkip_rx_ttak[5];
- u8 key_offset;
- u8 reserved2;
- u8 key[16];
- __le64 tx_secur_seq_cnt;
- __le64 hw_tkip_mic_rx_key;
- __le64 hw_tkip_mic_tx_key;
-} __packed;
-
#define IWL_ADD_STA_STATUS_MASK 0xFF
#define IWL_ADD_STA_BAID_VALID_MASK 0x8000
#define IWL_ADD_STA_BAID_MASK 0x7F00
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 14ac7153a3e7..4d3687cc83a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -818,7 +818,8 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
static struct iwl_fw_error_dump_file *
iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_dump_ptrs *fw_error_dump)
+ struct iwl_fw_dump_ptrs *fw_error_dump,
+ struct iwl_fwrt_dump_data *data)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
@@ -900,15 +901,15 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
/* If we only want a monitor dump, reset the file length */
- if (fwrt->dump.monitor_only) {
+ if (data->monitor_only) {
file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
- fwrt->dump.desc)
+ data->desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
- fwrt->dump.desc->len;
+ data->desc->len;
dump_file = vzalloc(file_len);
if (!dump_file)
@@ -984,19 +985,19 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
iwl_read_radio_regs(fwrt, &dump_data);
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
- fwrt->dump.desc) {
+ data->desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
- fwrt->dump.desc->len);
+ data->desc->len);
dump_trig = (void *)dump_data->data;
- memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
- sizeof(*dump_trig) + fwrt->dump.desc->len);
+ memcpy(dump_trig, &data->desc->trig_desc,
+ sizeof(*dump_trig) + data->desc->len);
dump_data = iwl_fw_error_next_data(dump_data);
}
/* In case we only want monitor dump, skip to dump trasport data */
- if (fwrt->dump.monitor_only)
+ if (data->monitor_only)
goto out;
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
@@ -1366,33 +1367,57 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
u32 fid1 = le32_to_cpu(reg->fifos.fid[0]);
u32 fid2 = le32_to_cpu(reg->fifos.fid[1]);
- u32 fifo_idx;
+ u8 fifo_idx;
if (!data)
return;
+ /* make sure only one bit is set in only one fid */
+ if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1,
+ "fid1=%x, fid2=%x\n", fid1, fid2))
+ return;
+
memset(data, 0, sizeof(*data));
- if (WARN_ON_ONCE((fid1 && fid2) || (!fid1 && !fid2)))
- return;
+ if (fid1) {
+ fifo_idx = ffs(fid1) - 1;
+ if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n",
+ fifo_idx))
+ return;
- fifo_idx = ffs(fid1) - 1;
- if (fid1 && !WARN_ON_ONCE((~BIT(fifo_idx) & fid1) ||
- fifo_idx >= MAX_NUM_LMAC)) {
data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size;
data->fifo_num = fifo_idx;
- return;
- }
+ } else {
+ u8 max_idx;
+
+ fifo_idx = ffs(fid2) - 1;
+ if (iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP,
+ SHARED_MEM_CFG_CMD, 0) <= 3)
+ max_idx = 0;
+ else
+ max_idx = 1;
+
+ if (WARN_ONCE(fifo_idx > max_idx,
+ "invalid umac fifo idx %d", fifo_idx))
+ return;
- fifo_idx = ffs(fid2) - 1;
- if (fid2 && !WARN_ON_ONCE(fifo_idx != 0)) {
- data->size = fwrt->smem_cfg.rxfifo2_size;
- data->offset = RXF_DIFF_FROM_PREV;
/* use bit 31 to distinguish between umac and lmac rxf while
* parsing the dump
*/
data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT;
- return;
+
+ switch (fifo_idx) {
+ case 0:
+ data->size = fwrt->smem_cfg.rxfifo2_size;
+ data->offset = iwl_umac_prph(fwrt->trans,
+ RXF_DIFF_FROM_PREV);
+ break;
+ case 1:
+ data->size = fwrt->smem_cfg.rxfifo2_control_size;
+ data->offset = iwl_umac_prph(fwrt->trans,
+ RXF2C_DIFF_FROM_PREV);
+ break;
+ }
}
}
@@ -1933,6 +1958,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_dump_cfg_name *cfg_name;
u32 size = sizeof(*tlv) + sizeof(*dump);
u32 num_of_cfg_names = 0;
+ u32 hw_type;
list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
size += sizeof(*cfg_name);
@@ -1961,7 +1987,26 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype);
dump->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
- dump->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
+
+ /*
+ * Several HWs all have type == 0x42, so we'll override this value
+ * according to the detected HW
+ */
+ hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
+ if (hw_type == IWL_AX210_HW_TYPE) {
+ u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
+ u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT);
+ u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT);
+ u32 masked_bits = is_jacket | (is_cdb << 1);
+
+ /*
+ * The HW type depends on certain bits in this case, so add
+ * these bits to the HW type. We won't have collisions since we
+ * add these bits after the highest possible bit in the mask.
+ */
+ hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT;
+ }
+ dump->hw_type = cpu_to_le32(hw_type);
dump->rf_id_flavor =
cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id));
@@ -2094,7 +2139,11 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
u32 size = 0;
u64 regions_mask = le64_to_cpu(trigger->regions_mask);
- for (i = 0; i < 64; i++) {
+ BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
+ BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
+ ARRAY_SIZE(fwrt->trans->dbg.active_regions));
+
+ for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
u32 reg_type;
struct iwl_fw_ini_region_tlv *reg;
@@ -2172,7 +2221,20 @@ static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt,
return le32_to_cpu(hdr->file_len);
}
-static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt,
+ const struct iwl_fw_dump_desc *desc)
+{
+ if (desc && desc != &iwl_dump_desc_assert)
+ kfree(desc);
+
+ fwrt->dump.lmac_err_id[0] = 0;
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ fwrt->dump.lmac_err_id[1] = 0;
+ fwrt->dump.umac_err_id = 0;
+}
+
+static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
{
struct iwl_fw_dump_ptrs fw_error_dump = {};
struct iwl_fw_error_dump_file *dump_file;
@@ -2180,11 +2242,11 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
u32 file_len;
u32 dump_mask = fwrt->fw->dbg.dump_mask;
- dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump);
+ dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump, dump_data);
if (!dump_file)
- goto out;
+ return;
- if (fwrt->dump.monitor_only)
+ if (dump_data->monitor_only)
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
@@ -2213,9 +2275,6 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
vfree(fw_error_dump.fwrt_ptr);
vfree(fw_error_dump.trans_ptr);
-
-out:
- iwl_fw_free_dump_desc(fwrt);
}
static void iwl_dump_ini_list_free(struct list_head *list)
@@ -2244,7 +2303,7 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
if (!file_len)
- goto out;
+ return;
sg_dump_data = alloc_sgtable(file_len);
if (sg_dump_data) {
@@ -2261,9 +2320,6 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
GFP_KERNEL);
}
iwl_dump_ini_list_free(&dump_list);
-
-out:
- iwl_fw_error_dump_data_free(dump_data);
}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
@@ -2278,27 +2334,40 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only,
unsigned int delay)
{
+ struct iwl_fwrt_wk_data *wk_data;
+ unsigned long idx;
+
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
- iwl_fw_free_dump_desc(fwrt);
+ iwl_fw_free_dump_desc(fwrt, desc);
return 0;
}
- /* use wks[0] since dump flow prior to ini does not need to support
- * consecutive triggers collection
+ /*
+ * Check there is an available worker.
+ * ffz return value is undefined if no zero exists,
+ * so check against ~0UL first.
*/
- if (test_and_set_bit(fwrt->dump.wks[0].idx, &fwrt->dump.active_wks))
+ if (fwrt->dump.active_wks == ~0UL)
return -EBUSY;
- if (WARN_ON(fwrt->dump.desc))
- iwl_fw_free_dump_desc(fwrt);
+ idx = ffz(fwrt->dump.active_wks);
+
+ if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM ||
+ test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
+ return -EBUSY;
+
+ wk_data = &fwrt->dump.wks[idx];
+
+ if (WARN_ON(wk_data->dump_data.desc))
+ iwl_fw_free_dump_desc(fwrt, wk_data->dump_data.desc);
+
+ wk_data->dump_data.desc = desc;
+ wk_data->dump_data.monitor_only = monitor_only;
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
- fwrt->dump.desc = desc;
- fwrt->dump.monitor_only = monitor_only;
-
- schedule_delayed_work(&fwrt->dump.wks[0].wk, usecs_to_jiffies(delay));
+ schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
return 0;
}
@@ -2307,26 +2376,40 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type)
{
- int ret;
- struct iwl_fw_dump_desc *iwl_dump_error_desc;
-
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status))
return -EIO;
- iwl_dump_error_desc = kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
- if (!iwl_dump_error_desc)
- return -ENOMEM;
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT)
+ return -EIO;
- iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
- iwl_dump_error_desc->len = 0;
+ iwl_dbg_tlv_time_point(fwrt,
+ IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT,
+ NULL);
+ } else {
+ struct iwl_fw_dump_desc *iwl_dump_error_desc;
+ int ret;
- ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
- if (ret)
- kfree(iwl_dump_error_desc);
- else
- iwl_trans_sync_nmi(fwrt->trans);
+ iwl_dump_error_desc =
+ kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
- return ret;
+ if (!iwl_dump_error_desc)
+ return -ENOMEM;
+
+ iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
+ iwl_dump_error_desc->len = 0;
+
+ ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc,
+ false, 0);
+ if (ret) {
+ kfree(iwl_dump_error_desc);
+ return ret;
+ }
+ }
+
+ iwl_trans_sync_nmi(fwrt->trans);
+
+ return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect);
@@ -2504,14 +2587,14 @@ IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
{
struct iwl_fw_dbg_params params = {0};
+ struct iwl_fwrt_dump_data *dump_data =
+ &fwrt->dump.wks[wk_idx].dump_data;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
- if (fwrt->ops && fwrt->ops->fw_running &&
- !fwrt->ops->fw_running(fwrt->ops_ctx)) {
- IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
- iwl_fw_free_dump_desc(fwrt);
+ if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
+ IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
goto out;
}
@@ -2527,12 +2610,19 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
if (iwl_trans_dbg_ini_valid(fwrt->trans))
iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
else
- iwl_fw_error_dump(fwrt);
+ iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
out:
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ iwl_fw_error_dump_data_free(dump_data);
+ } else {
+ iwl_fw_free_dump_desc(fwrt, dump_data->desc);
+ dump_data->desc = NULL;
+ }
+
clear_bit(wk_idx, &fwrt->dump.active_wks);
}
@@ -2690,7 +2780,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_params *params,
bool stop)
{
- int ret = 0;
+ int ret __maybe_unused = 0;
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 9d3513213f5f..11558df36b94 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -98,17 +98,6 @@ struct iwl_fw_dbg_params {
extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
-static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
-{
- if (fwrt->dump.desc != &iwl_dump_desc_assert)
- kfree(fwrt->dump.desc);
- fwrt->dump.desc = NULL;
- fwrt->dump.lmac_err_id[0] = 0;
- if (fwrt->smem_cfg.num_lmacs > 1)
- fwrt->dump.lmac_err_id[1] = 0;
- fwrt->dump.umac_err_id = 0;
-}
-
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
bool monitor_only, unsigned int delay);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 89f74116569d..6e72c27f527b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,6 +62,7 @@
#include "api/commands.h"
#include "debugfs.h"
#include "dbg.h"
+#include <linux/seq_file.h>
#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
struct dbgfs_##name##_data { \
@@ -329,11 +328,108 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
+struct iwl_dbgfs_fw_info_priv {
+ struct iwl_fw_runtime *fwrt;
+};
+
+struct iwl_dbgfs_fw_info_state {
+ loff_t pos;
+};
+
+static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq,
+ void *v, loff_t *pos)
+{
+ struct iwl_dbgfs_fw_info_state *state = v;
+ struct iwl_dbgfs_fw_info_priv *priv = seq->private;
+ const struct iwl_fw *fw = priv->fwrt->fw;
+
+ *pos = ++state->pos;
+ if (*pos >= fw->ucode_capa.n_cmd_versions)
+ return NULL;
+
+ return state;
+}
+
+static void iwl_dbgfs_fw_info_seq_stop(struct seq_file *seq,
+ void *v)
+{
+ kfree(v);
+}
+
+static void *iwl_dbgfs_fw_info_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct iwl_dbgfs_fw_info_priv *priv = seq->private;
+ const struct iwl_fw *fw = priv->fwrt->fw;
+ struct iwl_dbgfs_fw_info_state *state;
+
+ if (*pos >= fw->ucode_capa.n_cmd_versions)
+ return NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ state->pos = *pos;
+ return state;
+};
+
+static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
+{
+ struct iwl_dbgfs_fw_info_state *state = v;
+ struct iwl_dbgfs_fw_info_priv *priv = seq->private;
+ const struct iwl_fw *fw = priv->fwrt->fw;
+ const struct iwl_fw_cmd_version *ver;
+ u32 cmd_id;
+
+ if (!state->pos)
+ seq_puts(seq, "fw_api_ver:\n");
+
+ ver = &fw->ucode_capa.cmd_versions[state->pos];
+
+ cmd_id = iwl_cmd_id(ver->cmd, ver->group, 0);
+
+ seq_printf(seq, " 0x%04x:\n", cmd_id);
+ seq_printf(seq, " name: %s\n",
+ iwl_get_cmd_string(priv->fwrt->trans, cmd_id));
+ seq_printf(seq, " cmd_ver: %d\n", ver->cmd_ver);
+ seq_printf(seq, " notif_ver: %d\n", ver->notif_ver);
+ return 0;
+}
+
+static const struct seq_operations iwl_dbgfs_info_seq_ops = {
+ .start = iwl_dbgfs_fw_info_seq_start,
+ .next = iwl_dbgfs_fw_info_seq_next,
+ .stop = iwl_dbgfs_fw_info_seq_stop,
+ .show = iwl_dbgfs_fw_info_seq_show,
+};
+
+static int iwl_dbgfs_fw_info_open(struct inode *inode, struct file *filp)
+{
+ struct iwl_dbgfs_fw_info_priv *priv;
+
+ priv = __seq_open_private(filp, &iwl_dbgfs_info_seq_ops,
+ sizeof(*priv));
+
+ if (!priv)
+ return -ENOMEM;
+
+ priv->fwrt = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations iwl_dbgfs_fw_info_ops = {
+ .owner = THIS_MODULE,
+ .open = iwl_dbgfs_fw_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index f008e1bbfdf4..72bfc64580ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -8,7 +8,7 @@
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -394,6 +394,15 @@ struct iwl_fw_ini_dump_cfg_name {
u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
} __packed;
+/* AX210's HW type */
+#define IWL_AX210_HW_TYPE 0x42
+/* How many bits to roll when adding to the HW type of AX210 HW */
+#define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12
+/* This prph is used to tell apart HW_TYPE == 0x42 NICs */
+#define WFPM_OTP_CFG1_ADDR 0xd03098
+#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
+#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
+
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
* @time_point: time point that caused the dump collection
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 35f42e529a6d..1fb45fd30ffa 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -449,6 +449,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50,
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
+ IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
/* set 2 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
new file mode 100644
index 000000000000..de8cff463dbe
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -0,0 +1,99 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include "img.h"
+
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!fw->ucode_capa.cmd_versions ||
+ !fw->ucode_capa.n_cmd_versions)
+ return IWL_FW_CMD_VER_UNKNOWN;
+
+ entry = fw->ucode_capa.cmd_versions;
+ for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->cmd_ver;
+ }
+
+ return IWL_FW_CMD_VER_UNKNOWN;
+}
+EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_ver);
+
+u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!fw->ucode_capa.cmd_versions ||
+ !fw->ucode_capa.n_cmd_versions)
+ return def;
+
+ entry = fw->ucode_capa.cmd_versions;
+ for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd) {
+ if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return def;
+ return entry->notif_ver;
+ }
+ }
+
+ return def;
+}
+EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 90ca5f929cf9..a8630bf90b63 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -313,22 +313,7 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
-static inline u8 iwl_mvm_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
-{
- const struct iwl_fw_cmd_version *entry;
- unsigned int i;
-
- if (!fw->ucode_capa.cmd_versions ||
- !fw->ucode_capa.n_cmd_versions)
- return IWL_FW_CMD_VER_UNKNOWN;
-
- entry = fw->ucode_capa.cmd_versions;
- for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
- if (entry->group == grp && entry->cmd == cmd)
- return entry->cmd_ver;
- }
-
- return IWL_FW_CMD_VER_UNKNOWN;
-}
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd);
+u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
#endif /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index ba00d162ce72..b373606e1241 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -62,6 +62,9 @@
#include "dbg.h"
#include "debugfs.h"
+#include "fw/api/soc.h"
+#include "fw/api/commands.h"
+
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
@@ -95,3 +98,51 @@ void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
iwl_fw_resume_timestamp(fwrt);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
+
+/* set device type and latency */
+int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_soc_configuration_cmd cmd = {};
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ int ret;
+
+ /*
+ * In VER_1 of this command, the discrete value is considered
+ * an integer; In VER_2, it's a bitmask. Since we have only 2
+ * values in VER_1, this is backwards-compatible with VER_2,
+ * as long as we don't set any other bits.
+ */
+ if (!fwrt->trans->trans_cfg->integrated)
+ cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
+
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
+ SOC_FLAGS_LTR_APPLY_DELAY_NONE);
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US !=
+ SOC_FLAGS_LTR_APPLY_DELAY_200);
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US !=
+ SOC_FLAGS_LTR_APPLY_DELAY_2500);
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
+ SOC_FLAGS_LTR_APPLY_DELAY_1820);
+
+ if (fwrt->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
+ !WARN_ON(!fwrt->trans->trans_cfg->integrated))
+ cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
+ SOC_FLAGS_LTR_APPLY_DELAY_MASK);
+
+ if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC) >= 2 &&
+ fwrt->trans->trans_cfg->low_latency_xtal)
+ cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
+
+ cmd.latency = cpu_to_le32(fwrt->trans->trans_cfg->xtal_latency);
+
+ ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ if (ret)
+ IWL_ERR(fwrt, "Failed to set soc latency: %d\n", ret);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_set_soc_latency);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index da0d90e2b537..b5e5e32b6152 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,7 @@ struct iwl_fwrt_shared_mem_cfg {
u32 rxfifo1_size;
} lmac[MAX_NUM_LMAC];
u32 rxfifo2_size;
+ u32 rxfifo2_control_size;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
@@ -98,8 +99,16 @@ struct iwl_fwrt_shared_mem_cfg {
* @fw_pkt: packet received from FW
*/
struct iwl_fwrt_dump_data {
- struct iwl_fw_ini_trigger_tlv *trig;
- struct iwl_rx_packet *fw_pkt;
+ union {
+ struct {
+ struct iwl_fw_ini_trigger_tlv *trig;
+ struct iwl_rx_packet *fw_pkt;
+ };
+ struct {
+ const struct iwl_fw_dump_desc *desc;
+ bool monitor_only;
+ };
+ };
};
/**
@@ -162,8 +171,6 @@ struct iwl_fw_runtime {
/* debug */
struct {
- const struct iwl_fw_dump_desc *desc;
- bool monitor_only;
struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
unsigned long active_wks;
@@ -235,5 +242,6 @@ int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type);
void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
+int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index 409b2dd854ac..700fdab14209 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,6 +69,8 @@ static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
int i, lmac;
int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
+ u8 api_ver = iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP,
+ SHARED_MEM_CFG_CMD, 0);
if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
return;
@@ -80,6 +80,12 @@ static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
+ if (api_ver >= 4 &&
+ !WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) < sizeof(*mem_cfg))) {
+ fwrt->smem_cfg.rxfifo2_control_size =
+ le32_to_cpu(mem_cfg->rxfifo2_control_size);
+ }
+
for (lmac = 0; lmac < lmac_num; lmac++) {
struct iwl_shared_mem_lmac_cfg *lmac_cfg =
&mem_cfg->lmac_smem[lmac];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index d5d984d7ce83..244899f3f3bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -5,9 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,9 +26,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -284,6 +282,13 @@ struct iwl_pwr_tx_backoff {
u32 backoff;
};
+enum iwl_cfg_trans_ltr_delay {
+ IWL_CFG_TRANS_LTR_DELAY_NONE = 0,
+ IWL_CFG_TRANS_LTR_DELAY_200US = 1,
+ IWL_CFG_TRANS_LTR_DELAY_2500US = 2,
+ IWL_CFG_TRANS_LTR_DELAY_1820US = 3,
+};
+
/**
* struct iwl_cfg_trans - information needed to start the trans
*
@@ -304,6 +309,7 @@ struct iwl_pwr_tx_backoff {
* @mq_rx_supported: multi-queue rx support
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
+ * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
@@ -317,7 +323,8 @@ struct iwl_cfg_trans_params {
mq_rx_supported:1,
integrated:1,
low_latency_xtal:1,
- bisr_workaround:1;
+ bisr_workaround:1,
+ ltr_delay:2;
};
/**
@@ -470,12 +477,16 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_TH1 0x108
#define IWL_CFG_RF_TYPE_JF2 0x105
#define IWL_CFG_RF_TYPE_JF1 0x108
+#define IWL_CFG_RF_TYPE_HR2 0x10A
+#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_ID_TH 0x1
#define IWL_CFG_RF_ID_TH1 0x1
#define IWL_CFG_RF_ID_JF 0x3
#define IWL_CFG_RF_ID_JF1 0x6
#define IWL_CFG_RF_ID_JF1_DIV 0xA
+#define IWL_CFG_RF_ID_HR 0x7
+#define IWL_CFG_RF_ID_HR1 0x4
#define IWL_CFG_NO_160 0x0
#define IWL_CFG_160 0x1
@@ -506,9 +517,10 @@ struct iwl_dev_info {
extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
@@ -527,6 +539,8 @@ extern const char iwl9260_killer_1550_name[];
extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
+extern const char iwl_ax201_name[];
+extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
extern const char iwl_ax200_killer_1650x_name[];
@@ -601,9 +615,9 @@ extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg;
extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg;
extern const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
-extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
-extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
+extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
+extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
+extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
@@ -617,14 +631,16 @@ extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
+extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
+extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
+extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
+extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index ebea99189ca9..9d7a04833cd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018, 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018, 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,11 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
* There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
* 3: 256 bit.
+ * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored
+ * by older firmware versions, so set IWL_PRPH_SCRATCH_RB_SIZE_4K
+ * appropriately; use the below values for this.
+ * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
+ * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
*/
enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
@@ -103,6 +108,9 @@ enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
+ IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 0xf << 20,
+ IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20,
+ IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index bf2f00b89214..7987a288917b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,7 @@
*
* BSD LICENSE
*
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -165,28 +165,36 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
- u32 buf_location = le32_to_cpu(alloc->buf_location);
- u32 alloc_id = le32_to_cpu(alloc->alloc_id);
+ u32 buf_location;
+ u32 alloc_id;
- if (le32_to_cpu(tlv->length) != sizeof(*alloc) ||
- (buf_location != IWL_FW_INI_LOCATION_SRAM_PATH &&
- buf_location != IWL_FW_INI_LOCATION_DRAM_PATH))
+ if (le32_to_cpu(tlv->length) != sizeof(*alloc))
return -EINVAL;
- if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
- alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) ||
- (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
- (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
- alloc_id >= IWL_FW_INI_ALLOCATION_NUM))) {
- IWL_ERR(trans,
- "WRT: Invalid allocation id %u for allocation TLV\n",
- alloc_id);
- return -EINVAL;
- }
+ buf_location = le32_to_cpu(alloc->buf_location);
+ alloc_id = le32_to_cpu(alloc->alloc_id);
+
+ if (buf_location == IWL_FW_INI_LOCATION_INVALID ||
+ buf_location >= IWL_FW_INI_LOCATION_NUM)
+ goto err;
+
+ if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
+ alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
+ goto err;
+
+ if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH ||
+ buf_location == IWL_FW_INI_LOCATION_NPK_PATH) &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ goto err;
trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
return 0;
+err:
+ IWL_ERR(trans,
+ "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
+ alloc_id, buf_location);
+ return -EINVAL;
}
static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index eeb750bdbda1..04f14bfdd091 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -87,7 +85,7 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1776,7 +1774,6 @@ static int __init iwl_drv_init(void)
INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
pr_info(DRV_DESCRIPTION "\n");
- pr_info(DRV_COPYRIGHT "\n");
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Create the root of iwlwifi debugfs subsystem. */
@@ -1824,11 +1821,6 @@ MODULE_PARM_DESC(amsdu_size,
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
-module_param_named(antenna_coupling, iwlwifi_mod_params.antenna_coupling,
- int, 0444);
-MODULE_PARM_DESC(antenna_coupling,
- "specify antenna coupling in dB (default: 0 dB)");
-
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
MODULE_PARM_DESC(nvm_file, "NVM file name");
@@ -1872,10 +1864,6 @@ module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444);
MODULE_PARM_DESC(power_level,
"default power save level (range from 1 - 5, default: 1)");
-module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, 0444);
-MODULE_PARM_DESC(fw_monitor,
- "firmware monitor - to debug FW (default: false - needs lots of memory)");
-
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444);
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 2be30af7bdc3..8938a6467996 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
@@ -26,7 +26,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* All rights reserved.
*
@@ -63,8 +63,7 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
-#define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation"
-#define DRV_AUTHOR "<linuxwifi@intel.com>"
+#define DRV_AUTHOR "Intel Corporation <linuxwifi@intel.com>"
/* radio config bits (actual values from NVM definition) */
#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index bf673ce5f183..e77d8d13cb51 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -646,8 +646,7 @@ struct iwl_rb_status {
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \
- TFD_QUEUE_SIZE_BC_DUP)
+#define TFD_QUEUE_BC_SIZE_GEN3 1024
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 82e5cac23d8d..e8ce3a300857 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -5,8 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -26,8 +25,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -111,11 +109,9 @@ enum iwl_uapsd_disable {
* @power_save: enable power save, default = false
* @power_level: power level, default = 1
* @debug_level: levels are IWL_DL_*
- * @antenna_coupling: antenna coupling in dB, default = 0
* @nvm_file: specifies a external NVM file
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
- * @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
* @enable_ini: enable new FW debug infratructure (INI TLVs)
@@ -132,10 +128,8 @@ struct iwl_mod_params {
#ifdef CONFIG_IWLWIFI_DEBUG
u32 debug_level;
#endif
- int antenna_coupling;
char *nvm_file;
u32 uapsd_disable;
- bool fw_monitor;
bool disable_11ac;
/**
* @disable_11ax: disable HE capabilities, default = false
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index ccf0bc16465d..ee410417761d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -240,6 +240,7 @@ enum iwl_nvm_channel_flags {
* @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
* for this regulatory domain (valid only in 5Ghz).
* @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
+ * @REG_CAPA_11AX_DISABLED: 11ax is forbidden for this regulatory domain.
*/
enum iwl_reg_capa_flags {
REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
@@ -250,6 +251,7 @@ enum iwl_reg_capa_flags {
REG_CAPA_MCS_9_ALLOWED = BIT(5),
REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
REG_CAPA_DC_HIGH_ENABLED = BIT(9),
+ REG_CAPA_11AX_DISABLED = BIT(10),
};
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
@@ -1115,6 +1117,9 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags |= NL80211_RRF_NO_160MHZ;
}
+ if (cap_flags & REG_CAPA_11AX_DISABLED)
+ flags |= NL80211_RRF_NO_HE;
+
return flags;
}
@@ -1166,8 +1171,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
- band = (ch_idx < NUM_2GHZ_CHANNELS) ?
- NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ band = iwl_nl80211_band_from_channel_idx(ch_idx);
center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
band);
new_rule = false;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 1136d9784f9d..8e254c0eda13 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -326,6 +324,7 @@
#define RXF_SIZE_BYTE_CND_POS (7)
#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
#define RXF_DIFF_FROM_PREV (0x200)
+#define RXF2C_DIFF_FROM_PREV (0x4e00)
#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index bba527b339b5..a301e2484cdb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -795,6 +795,132 @@ struct iwl_trans_debug {
u32 domains_bitmap;
};
+struct iwl_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+struct iwl_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct iwl_host_cmd *source;
+ u32 flags;
+ u32 tbs;
+};
+
+/*
+ * The FH will write back to the first TB only, so we need to copy some data
+ * into the buffer regardless of whether it should be mapped or not.
+ * This indicates how big the first TB must be to include the scratch buffer
+ * and the assigned PN.
+ * Since PN location is 8 bytes at offset 12, it's 20 now.
+ * If we make it bigger then allocations will be bigger and copy slower, so
+ * that's probably not useful.
+ */
+#define IWL_FIRST_TB_SIZE 20
+#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
+
+struct iwl_pcie_txq_entry {
+ void *cmd;
+ struct sk_buff *skb;
+ /* buffer to free after command completes */
+ const void *free_buf;
+ struct iwl_cmd_meta meta;
+};
+
+struct iwl_pcie_first_tb_buf {
+ u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
+};
+
+/**
+ * struct iwl_txq - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @tfds: transmit frame descriptors (DMA memory)
+ * @first_tb_bufs: start of command headers, including scratch buffers, for
+ * the writeback -- this is DMA memory and an array holding one buffer
+ * for each command on the queue
+ * @first_tb_dma: DMA address for the first_tb_bufs start
+ * @entries: transmit entries (driver state)
+ * @lock: queue lock
+ * @stuck_timer: timer that fires if queue gets stuck
+ * @trans: pointer back to transport (for timer)
+ * @need_update: indicates need to update read/write index
+ * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
+ * @wd_timeout: queue watchdog timeout (jiffies) - per queue
+ * @frozen: tx stuck queue timer is frozen
+ * @frozen_expiry_remainder: remember how long until the timer fires
+ * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
+ * @write_ptr: 1-st empty entry (index) host_w
+ * @read_ptr: last used entry (index) host_r
+ * @dma_addr: physical addr for BD's
+ * @n_window: safe queue window
+ * @id: queue id
+ * @low_mark: low watermark, resume queue if free space more than this
+ * @high_mark: high watermark, stop queue if free space less than this
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ *
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
+ * This means that we end up with the following:
+ * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ * SW entries: | 0 | ... | 31 |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
+ */
+struct iwl_txq {
+ void *tfds;
+ struct iwl_pcie_first_tb_buf *first_tb_bufs;
+ dma_addr_t first_tb_dma;
+ struct iwl_pcie_txq_entry *entries;
+ /* lock for syncing changes on the queue */
+ spinlock_t lock;
+ unsigned long frozen_expiry_remainder;
+ struct timer_list stuck_timer;
+ struct iwl_trans *trans;
+ bool need_update;
+ bool frozen;
+ bool ampdu;
+ int block;
+ unsigned long wd_timeout;
+ struct sk_buff_head overflow_q;
+ struct iwl_dma_ptr bc_tbl;
+
+ int write_ptr;
+ int read_ptr;
+ dma_addr_t dma_addr;
+ int n_window;
+ u32 id;
+ int low_mark;
+ int high_mark;
+
+ bool overflow_tx;
+};
+
+/**
+ * struct iwl_trans_txqs - transport tx queues data
+ *
+ * @queue_used - bit mask of used queues
+ * @queue_stopped - bit mask of stopped queues
+ */
+struct iwl_trans_txqs {
+ unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ struct {
+ u8 fifo;
+ u8 q_id;
+ unsigned int wdg_timeout;
+ } cmd;
+
+};
+
/**
* struct iwl_trans - transport common data
*
@@ -828,6 +954,7 @@ struct iwl_trans_debug {
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
+ * @iwl_trans_txqs: transport tx queues data.
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -875,6 +1002,7 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode;
const char *name;
+ struct iwl_trans_txqs txqs;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 3d2abbc5c76c..5ae22cd7ecdb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
@@ -26,7 +26,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
@@ -216,8 +216,7 @@ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm)
goto send_cmd;
}
- mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
- bt_cmd.mode = cpu_to_le32(mode);
+ bt_cmd.mode = cpu_to_le32(BT_COEX_NW);
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd.enabled_modules |=
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 58df25e2fb32..b0268f44b2ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -155,5 +155,9 @@
#define IWL_MVM_USE_TWT false
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
#define IWL_MVM_USE_NSSN_SYNC 0
+#define IWL_MVM_PHY_FILTER_CHAIN_A 0
+#define IWL_MVM_PHY_FILTER_CHAIN_B 0
+#define IWL_MVM_PHY_FILTER_CHAIN_C 0
+#define IWL_MVM_PHY_FILTER_CHAIN_D 0
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 122ca7624073..2a94545d737f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -80,9 +78,6 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (iwlwifi_mod_params.swcrypto)
- return;
-
mutex_lock(&mvm->mutex);
memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
@@ -843,18 +838,16 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
return ret;
}
- if (!iwlwifi_mod_params.swcrypto) {
- /*
- * This needs to be unlocked due to lock ordering
- * constraints. Since we're in the suspend path
- * that isn't really a problem though.
- */
- mutex_unlock(&mvm->mutex);
- ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC);
- mutex_lock(&mvm->mutex);
- if (ret)
- return ret;
- }
+ /*
+ * This needs to be unlocked due to lock ordering
+ * constraints. Since we're in the suspend path
+ * that isn't really a problem though.
+ */
+ mutex_unlock(&mvm->mutex);
+ ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC);
+ mutex_lock(&mvm->mutex);
+ if (ret)
+ return ret;
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
sizeof(*wowlan_config_cmd),
@@ -1517,12 +1510,14 @@ out:
struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
{
- struct iwl_wowlan_status *v7, *status;
+ struct iwl_wowlan_status_v7 *v7;
+ struct iwl_wowlan_status *status;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
};
- int ret, len, status_size;
+ int ret, len, status_size, data_size;
+ u8 notif_ver;
lockdep_assert_held(&mvm->mutex);
@@ -1532,13 +1527,12 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
return ERR_PTR(ret);
}
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
- int data_size;
status_size = sizeof(*v6);
- len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@@ -1593,23 +1587,33 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
}
v7 = (void *)cmd.resp_pkt->data;
- status_size = sizeof(*v7);
- len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ WOWLAN_GET_STATUSES, 0);
+
+ status_size = sizeof(*status);
+
+ if (notif_ver == IWL_FW_CMD_VER_UNKNOWN || notif_ver < 9)
+ status_size = sizeof(*v7);
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
status = ERR_PTR(-EIO);
goto out_free_resp;
}
+ data_size = ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4);
- if (len != (status_size +
- ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) {
+ if (len != (status_size + data_size)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
status = ERR_PTR(-EIO);
goto out_free_resp;
}
- status = kmemdup(v7, len, GFP_KERNEL);
+ status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
+ if (!status)
+ goto out_free_resp;
+
+ memcpy(status, v7, status_size);
+ memcpy(status->wake_packet, (u8 *)v7 + status_size, data_size);
out_free_resp:
iwl_free_resp(&cmd);
@@ -1982,6 +1986,9 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto err;
}
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END,
+ NULL);
+
ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
if (ret)
goto err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 3beef8d077b8..8fae7e707374 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -481,6 +479,11 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
if (kstrtou16(buf, 0, &amsdu_len))
return -EINVAL;
+ /* only change from debug set <-> debug unset */
+ if ((amsdu_len && mvmsta->orig_amsdu_len) ||
+ (!!amsdu_len && mvmsta->orig_amsdu_len))
+ return -EBUSY;
+
if (amsdu_len) {
mvmsta->orig_amsdu_len = sta->max_amsdu_len;
sta->max_amsdu_len = amsdu_len;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 9e21f5e5d364..5ca45915cf7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -164,9 +164,10 @@ static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
eth_broadcast_addr(cmd->range_req_bssid);
}
-static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_tof_range_req_cmd *cmd,
- struct cfg80211_pmsr_request *req)
+static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_cmd *cmd,
+ struct cfg80211_pmsr_request *req)
{
int i;
@@ -210,6 +211,13 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->tsf_mac_id = cpu_to_le32(0xff);
}
+static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_cmd_v8 *cmd,
+ struct cfg80211_pmsr_request *req)
+{
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
+}
+
static int
iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
@@ -382,9 +390,10 @@ iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
return 0;
}
-static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry *target)
+static int
+iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v4 *target)
{
int ret;
@@ -394,11 +403,43 @@ static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
if (ret)
return ret;
- iwl_mvm_ftm_put_target_common(mvm, peer, target);
+ iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
return 0;
}
+static int
+iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
+
+ if (vif->bss_conf.assoc &&
+ !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ target->sta_id = mvmvif->ap_sta_id;
+ } else {
+ target->sta_id = IWL_MVM_INVALID_STA;
+ }
+
+ /*
+ * TODO: Beacon interval is currently unknown, so use the common value
+ * of 100 TUs.
+ */
+ target->beacon_interval = cpu_to_le16(100);
+ return 0;
+}
+
static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
{
u32 status;
@@ -456,7 +497,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Versions 7 and 8 has the same structure except from the responders
* list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
*/
- iwl_mvm_ftm_cmd(mvm, vif, (void *)&cmd_v7, req);
+ iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
for (i = 0; i < cmd_v7.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
@@ -472,7 +513,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
- struct iwl_tof_range_req_cmd cmd;
+ struct iwl_tof_range_req_cmd_v8 cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP,
@@ -482,7 +523,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u8 i;
int err;
- iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
+ iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
@@ -495,6 +536,33 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
}
+static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
@@ -508,14 +576,21 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
if (new_api) {
- u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD);
- if (cmd_ver == 8)
+ switch (cmd_ver) {
+ case 9:
+ case 10:
+ err = iwl_mvm_ftm_start_v9(mvm, vif, req);
+ break;
+ case 8:
err = iwl_mvm_ftm_start_v8(mvm, vif, req);
- else
+ break;
+ default:
err = iwl_mvm_ftm_start_v7(mvm, vif, req);
-
+ break;
+ }
} else {
err = iwl_mvm_ftm_start_v5(mvm, vif, req);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 834564198409..0b6c32098b5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -136,8 +136,8 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
- u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RESPONDER_CONFIG_CMD);
int err;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index e67c452fa92c..95a613537047 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -87,36 +87,6 @@ struct iwl_mvm_alive_data {
u32 scd_base_addr;
};
-/* set device type and latency */
-static int iwl_set_soc_latency(struct iwl_mvm *mvm)
-{
- struct iwl_soc_configuration_cmd cmd = {};
- int ret;
-
- /*
- * In VER_1 of this command, the discrete value is considered
- * an integer; In VER_2, it's a bitmask. Since we have only 2
- * values in VER_1, this is backwards-compatible with VER_2,
- * as long as we don't set any other bits.
- */
- if (!mvm->trans->trans_cfg->integrated)
- cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
-
- if (iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC) >= 2 &&
- (mvm->trans->trans_cfg->low_latency_xtal))
- cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
-
- cmd.latency = cpu_to_le32(mvm->trans->trans_cfg->xtal_latency);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SOC_CONFIGURATION_CMD,
- SYSTEM_GROUP, 0), 0,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(mvm, "Failed to set soc latency: %d\n", ret);
- return ret;
-}
-
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
@@ -550,10 +520,49 @@ error:
return ret;
}
+#ifdef CONFIG_ACPI
+static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
+ struct iwl_phy_specific_cfg *phy_filters)
+{
+ /*
+ * TODO: read specific phy config from BIOS
+ * ACPI table for this feature has not been defined yet,
+ * so for now we use hardcoded values.
+ */
+
+ if (IWL_MVM_PHY_FILTER_CHAIN_A) {
+ phy_filters->filter_cfg_chain_a =
+ cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A);
+ }
+ if (IWL_MVM_PHY_FILTER_CHAIN_B) {
+ phy_filters->filter_cfg_chain_b =
+ cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B);
+ }
+ if (IWL_MVM_PHY_FILTER_CHAIN_C) {
+ phy_filters->filter_cfg_chain_c =
+ cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C);
+ }
+ if (IWL_MVM_PHY_FILTER_CHAIN_D) {
+ phy_filters->filter_cfg_chain_d =
+ cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D);
+ }
+}
+
+#else /* CONFIG_ACPI */
+
+static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
+ struct iwl_phy_specific_cfg *phy_filters)
+{
+}
+#endif /* CONFIG_ACPI */
+
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
- struct iwl_phy_cfg_cmd phy_cfg_cmd;
+ struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+ struct iwl_phy_specific_cfg phy_filters = {};
+ u8 cmd_ver;
+ size_t cmd_size;
if (iwl_mvm_has_unified_ucode(mvm) &&
!mvm->trans->cfg->tx_with_siso_diversity)
@@ -580,11 +589,20 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.calib_control.flow_trigger =
mvm->fw->default_calib[ucode_type].flow_trigger;
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ PHY_CONFIGURATION_CMD);
+ if (cmd_ver == 3) {
+ iwl_mvm_phy_filter_init(mvm, &phy_filters);
+ memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters,
+ sizeof(struct iwl_phy_specific_cfg));
+ }
+
IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
phy_cfg_cmd.phy_cfg);
-
+ cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
+ sizeof(struct iwl_phy_cfg_cmd_v1);
return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
- sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+ cmd_size, &phy_cfg_cmd);
}
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
@@ -725,13 +743,12 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
union {
struct iwl_dev_tx_power_cmd v5;
struct iwl_dev_tx_power_cmd_v4 v4;
- } cmd;
-
+ } cmd = {
+ .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ };
int ret;
u16 len = 0;
- cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
-
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_REDUCE_TX_POWER))
len = sizeof(cmd.v5);
@@ -937,6 +954,78 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
return iwl_mvm_ppag_send_cmd(mvm);
}
+static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_tas_config_cmd cmd = {};
+ int list_size;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.black_list_array) <
+ APCI_WTAS_BLACK_LIST_MAX);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
+ IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
+ return;
+ }
+
+ ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.black_list_array, &list_size);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "TAS table invalid or unavailable. (%d)\n",
+ ret);
+ return;
+ }
+
+ if (list_size < 0)
+ return;
+
+ /* list size if TAS enabled can only be non-negative */
+ cmd.black_list_size = cpu_to_le32((u32)list_size);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ TAS_CONFIG),
+ 0, sizeof(cmd), &cmd);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
+}
+
+static bool iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
+{
+ int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
+ DSM_FUNC_ENABLE_INDONESIA_5G2);
+
+ IWL_DEBUG_RADIO(mvm,
+ "Evaluated DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
+ ret);
+
+ return ret == 1;
+}
+
+static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_lari_config_change_cmd cmd = {};
+
+ if (iwl_mvm_eval_dsm_indonesia_5g2(mvm))
+ cmd.config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+
+ /* apply more config masks here */
+
+ if (cmd.config_bitmap) {
+ IWL_DEBUG_RADIO(mvm,
+ "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
+ le32_to_cpu(cmd.config_bitmap));
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ 0, sizeof(cmd), &cmd);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "Failed to send LARI_CONFIG_CHANGE (%d)\n",
+ ret);
+ }
+}
#else /* CONFIG_ACPI */
inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
@@ -964,6 +1053,14 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
return 0;
}
+
+static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
+{
+}
+
+static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
+{
+}
#endif /* CONFIG_ACPI */
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
@@ -1138,7 +1235,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
- ret = iwl_set_soc_latency(mvm);
+ ret = iwl_set_soc_latency(&mvm->fwrt);
if (ret)
goto error;
}
@@ -1238,6 +1335,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ iwl_mvm_lari_cfg(mvm);
/*
* RTNL is not taken during Ct-kill, but we don't need to scan/Tx
* anyway, so don't init MCC.
@@ -1282,6 +1380,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret < 0)
goto error;
+ iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 7aa1350b093e..77916231ff7d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -475,23 +475,23 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
- /* Enable 11w if software crypto is not enabled (as the
- * firmware will interpret some mgmt packets, so enabling it
- * with software crypto isn't safe).
- */
- if (!iwlwifi_mod_params.swcrypto) {
- ieee80211_hw_set(hw, MFP_CAPABLE);
+ if (iwlwifi_mod_params.swcrypto)
+ IWL_ERR(mvm,
+ "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n");
+ if (!iwlwifi_mod_params.bt_coex_active)
+ IWL_ERR(mvm,
+ "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n");
+
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC;
+ hw->wiphy->n_cipher_suites++;
+ if (iwl_mvm_has_new_rx_api(mvm)) {
mvm->ciphers[hw->wiphy->n_cipher_suites] =
- WLAN_CIPHER_SUITE_AES_CMAC;
+ WLAN_CIPHER_SUITE_BIP_GMAC_128;
+ hw->wiphy->n_cipher_suites++;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_BIP_GMAC_256;
hw->wiphy->n_cipher_suites++;
- if (iwl_mvm_has_new_rx_api(mvm)) {
- mvm->ciphers[hw->wiphy->n_cipher_suites] =
- WLAN_CIPHER_SUITE_BIP_GMAC_128;
- hw->wiphy->n_cipher_suites++;
- mvm->ciphers[hw->wiphy->n_cipher_suites] =
- WLAN_CIPHER_SUITE_BIP_GMAC_256;
- hw->wiphy->n_cipher_suites++;
- }
}
/* currently FW API supports only one optional cipher scheme */
@@ -697,10 +697,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
WIPHY_WOWLAN_RFKILL_RELEASE |
WIPHY_WOWLAN_NET_DETECT;
- if (!iwlwifi_mod_params.swcrypto)
- mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
- WIPHY_WOWLAN_GTK_REKEY_FAILURE |
- WIPHY_WOWLAN_4WAY_HANDSHAKE;
+ mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
@@ -1209,14 +1208,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
*/
flush_work(&mvm->roc_done_wk);
+ iwl_mvm_rm_aux_sta(mvm);
+
iwl_mvm_stop_device(mvm);
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
- /* the fw is stopped, the aux sta is dead: clean up driver state */
- iwl_mvm_del_aux_sta(mvm);
-
/*
* Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
* hw (as restart_complete() won't be called in this case) and mac80211
@@ -1264,7 +1262,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
- iwl_fw_free_dump_desc(&mvm->fwrt);
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
@@ -2181,6 +2178,15 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
flags |= STA_CTXT_HE_PACKET_EXT;
}
}
+
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP)
+ flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
+
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN)
+ flags |= STA_CTXT_HE_ACK_ENABLED;
+
rcu_read_unlock();
/* Mark MU EDCA as enabled, unless none detected on some AC */
@@ -2205,11 +2211,6 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
cpu_to_le16(mu_edca->mu_edca_timer);
}
- if (vif->bss_conf.multi_sta_back_32bit)
- flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
-
- if (vif->bss_conf.ack_enabled)
- flags |= STA_CTXT_HE_ACK_ENABLED;
if (vif->bss_conf.uora_exists) {
flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
@@ -3367,11 +3368,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
int ret, i;
u8 key_offset;
- if (iwlwifi_mod_params.swcrypto) {
- IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (!mvm->trans->trans_cfg->gen2) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index afcf2b98a9cb..e2f7f6ec711e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -134,12 +132,10 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
* We will register to mac80211 to have testmode working. The NIC must not
* be up'ed after the INIT fw asserted. This is useful to be able to use
* proprietary tools over testmode to debug the INIT fw.
- * @tfd_q_hang_detect: enabled the detection of hung transmit queues
* @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mvm_mod_params {
bool init_dbg;
- bool tfd_q_hang_detect;
int power_scheme;
};
extern struct iwl_mvm_mod_params iwlmvm_mod_params;
@@ -2149,8 +2145,8 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
{
- u8 ver = iwl_mvm_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
+ u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index dfe02440d474..d095ff847be9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -86,7 +84,7 @@
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
static const struct iwl_op_mode_ops iwl_mvm_ops;
@@ -94,7 +92,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
struct iwl_mvm_mod_params iwlmvm_mod_params = {
.power_scheme = IWL_POWER_SCHEME_BPS,
- .tfd_q_hang_detect = true
/* rest of fields are 0 by default */
};
@@ -104,10 +101,6 @@ MODULE_PARM_DESC(init_dbg,
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
MODULE_PARM_DESC(power_scheme,
"power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
-module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
- bool, 0444);
-MODULE_PARM_DESC(tfd_q_hang_detect,
- "TFD queues hang detection (default: true");
/*
* module init and exit functions
@@ -505,6 +498,7 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
HCMD_NAME(NVM_ACCESS_COMPLETE),
HCMD_NAME(NVM_GET_INFO),
+ HCMD_NAME(TAS_CONFIG),
};
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
@@ -612,27 +606,6 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
-static u8 iwl_mvm_lookup_notif_ver(struct iwl_mvm *mvm, u8 grp, u8 cmd, u8 def)
-{
- const struct iwl_fw_cmd_version *entry;
- unsigned int i;
-
- if (!mvm->fw->ucode_capa.cmd_versions ||
- !mvm->fw->ucode_capa.n_cmd_versions)
- return def;
-
- entry = mvm->fw->ucode_capa.cmd_versions;
- for (i = 0; i < mvm->fw->ucode_capa.n_cmd_versions; i++, entry++) {
- if (entry->group == grp && entry->cmd == cmd) {
- if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
- return def;
- return entry->notif_ver;
- }
- }
-
- return def;
-}
-
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -745,7 +718,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
mvm->cmd_ver.d0i3_resp =
- iwl_mvm_lookup_notif_ver(mvm, LEGACY_GROUP, D0I3_END_CMD, 0);
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD,
+ 0);
/* we only support version 1 */
if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
goto out_free;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 15d11fb72aca..6f4d241d47e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -369,14 +369,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
- /*
- * In debug sta->max_amsdu_len < size
- * so also check with orig_amsdu_len which holds the original
- * data before debugfs changed the value
- */
- if (WARN_ON(sta->max_amsdu_len < size &&
- mvmsta->orig_amsdu_len < size))
+ if (sta->max_amsdu_len < size) {
+ /*
+ * In debug sta->max_amsdu_len < size
+ * so also check with orig_amsdu_len which holds the
+ * original data before debugfs changed the value
+ */
+ WARN_ON(mvmsta->orig_amsdu_len < size);
goto out;
+ }
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
mvmsta->max_amsdu_len = size;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 00e7fdbaeb7f..a7264b282d79 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -1430,7 +1429,8 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
*/
if (ieee80211_get_vht_max_nss(&vht_cap,
IEEE80211_VHT_CHANWIDTH_160MHZ,
- 0, true) < sta->rx_nss)
+ 0, true,
+ sta->rx_nss) < sta->rx_nss)
return RATE_MCS_CHAN_WIDTH_80;
return RATE_MCS_CHAN_WIDTH_160;
case IEEE80211_STA_RX_BW_80:
@@ -3740,11 +3740,12 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
}
return scnprintf(buf, bufsz,
- "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s",
+ "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s",
rate, type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "",
(rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 7a6ad1ff7055..51a061b138ba 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2051,40 +2051,6 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
-static int iwl_mvm_scan_umac_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_scan_params *params, int type,
- int uid)
-{
- struct iwl_scan_req_umac_v13 *cmd = mvm->scan_cmd;
- struct iwl_scan_req_params_v13 *scan_p = &cmd->scan_params;
- int ret;
- u16 gen_flags;
- u32 bitmap_ssid = 0;
-
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
- cmd->uid = cpu_to_le32(uid);
-
- gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
- iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
- &scan_p->general_params,
- gen_flags);
-
- ret = iwl_mvm_fill_scan_sched_params(params,
- scan_p->periodic_params.schedule,
- &scan_p->periodic_params.delay);
- if (ret)
- return ret;
-
- iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
- &bitmap_ssid);
- iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
- &scan_p->channel_params, bitmap_ssid);
-
- return 0;
-}
-
static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
int uid)
@@ -2235,7 +2201,6 @@ struct iwl_scan_umac_handler {
static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
IWL_SCAN_UMAC_HANDLER(14),
- IWL_SCAN_UMAC_HANDLER(13),
IWL_SCAN_UMAC_HANDLER(12),
};
@@ -2263,8 +2228,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
- scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC);
+ scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
const struct iwl_scan_umac_handler *ver_handler =
@@ -2594,7 +2559,6 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14);
- IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
}
@@ -2604,8 +2568,8 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
- u8 scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC);
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC);
base_size = iwl_scan_req_umac_get_size(scan_ver);
if (base_size)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 56ae72debb96..fee01cbbd3ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -751,16 +749,23 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
mvm->trans->cfg->min_txq_size);
}
- queue = iwl_trans_txq_alloc(mvm->trans,
- cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
- sta_id, tid, SCD_QUEUE_CFG, size, timeout);
- if (queue < 0) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
- sta_id, tid, queue);
+ do {
+ __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
+
+ queue = iwl_trans_txq_alloc(mvm->trans, enable,
+ sta_id, tid, SCD_QUEUE_CFG,
+ size, timeout);
+
+ if (queue < 0)
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
+ size, sta_id, tid, queue);
+ size /= 2;
+ } while (queue < 0 && size >= 16);
+
+ if (queue < 0)
return queue;
- }
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
@@ -1395,7 +1400,17 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
if (tid == IEEE80211_NUM_TIDS)
tid = IWL_MAX_TID_COUNT;
- iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
+ /*
+ * We can't really do much here, but if this fails we can't
+ * transmit anyway - so just don't transmit the frame etc.
+ * and let them back up ... we've tried our best to allocate
+ * a queue in the function itself.
+ */
+ if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
+ list_del_init(&mvmtxq->list);
+ continue;
+ }
+
list_del_init(&mvmtxq->list);
local_bh_disable();
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
@@ -1965,9 +1980,8 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
u8 sta_id, u8 fifo)
{
- unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
- mvm->trans->trans_cfg->base_params->wd_timeout :
- IWL_WATCHDOG_DISABLED;
+ unsigned int wdg_timeout =
+ mvm->trans->trans_cfg->base_params->wd_timeout;
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
.sta_id = sta_id,
@@ -1983,9 +1997,8 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
{
- unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
- mvm->trans->trans_cfg->base_params->wd_timeout :
- IWL_WATCHDOG_DISABLED;
+ unsigned int wdg_timeout =
+ mvm->trans->trans_cfg->base_params->wd_timeout;
WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
@@ -2080,16 +2093,24 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
-void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
+int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
{
- iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
-}
+ int ret;
-void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
-{
lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
+ ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+
+ return ret;
+}
+
+void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
+{
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 8d70093847cb..da2d1ac01229 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -541,7 +541,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start);
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
-void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
+int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index a8d0d17f79fd..2f6484e0d726 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -920,11 +920,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* No need to lock amsdu_in_ampdu_allowed since it can't be modified
* during an BA session.
*/
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
- return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
-
- if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) ||
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
!(mvmsta->amsdu_enabled & BIT(tid)))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 6096276cb0d0..be57b8391850 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -588,6 +586,23 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
}
+static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ u32 error;
+
+ error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
+
+ IWL_ERR(trans, "IML/ROM dump:\n");
+
+ if (error & 0xFFFF0000)
+ IWL_ERR(trans, "IML/ROM SYSASSERT:\n");
+
+ IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error);
+ IWL_ERR(mvm, "0x%08X | IML/ROM data1\n",
+ iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS));
+}
+
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
@@ -603,6 +618,9 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_mvm_dump_iml_error_log(mvm);
+
iwl_fw_error_print_fseq_regs(&mvm->fwrt);
}
@@ -952,8 +970,7 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
vif && vif->type == NL80211_IFTYPE_AP)
return IWL_WATCHDOG_DISABLED;
- return iwlmvm_mod_params.tfd_q_hang_detect ?
- default_timeout : IWL_WATCHDOG_DISABLED;
+ return default_timeout;
}
trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 9d5b1e51b50d..1ab136600415 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,32 +84,35 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
- if (le32_to_cpu(fw_mon_cfg->buf_location) ==
- IWL_FW_INI_LOCATION_SRAM_PATH) {
+ switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
+ case IWL_FW_INI_LOCATION_SRAM_PATH:
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
-
IWL_DEBUG_FW(trans,
- "WRT: Applying SMEM buffer destination\n");
-
- goto out;
- }
-
- if (le32_to_cpu(fw_mon_cfg->buf_location) ==
- IWL_FW_INI_LOCATION_DRAM_PATH &&
- trans->dbg.fw_mon_ini[alloc_id].num_frags) {
- struct iwl_dram_data *frag =
- &trans->dbg.fw_mon_ini[alloc_id].frags[0];
-
- dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ "WRT: Applying SMEM buffer destination\n");
+ break;
+ case IWL_FW_INI_LOCATION_NPK_PATH:
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
IWL_DEBUG_FW(trans,
- "WRT: Applying DRAM destination (alloc_id=%u)\n",
- alloc_id);
+ "WRT: Applying NPK buffer destination\n");
+ break;
- dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
- dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ case IWL_FW_INI_LOCATION_DRAM_PATH:
+ if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
+ struct iwl_dram_data *frag =
+ &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
+ alloc_id,
+ trans->dbg.fw_mon_ini[alloc_id].num_frags);
+ }
+ break;
+ default:
+ IWL_ERR(trans, "WRT: Invalid buffer destination\n");
}
-
out:
if (dbg_flags)
*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
@@ -135,9 +138,17 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
case IWL_AMSDU_2K:
break;
case IWL_AMSDU_4K:
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ break;
case IWL_AMSDU_8K:
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ /* if firmware supports the ext size, tell it */
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
+ break;
case IWL_AMSDU_12K:
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ /* if firmware supports the ext size, tell it */
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K;
break;
}
@@ -210,7 +221,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->tr_idx_arr_size =
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index acd01d86f101..23abfbd096b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,21 @@ static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
}
+static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const struct fw_desc *sec,
+ struct iwl_dram_data *dram)
+{
+ dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, sec->len,
+ &dram->physical);
+ if (!dram->block)
+ return -ENOMEM;
+
+ dram->size = sec->len;
+ memcpy(dram->block, sec->data, sec->len);
+
+ return 0;
+}
+
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -248,7 +263,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
- cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 6744c0281ffb..65d65c6baf4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,11 +27,10 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -524,8 +522,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Qu devices */
{IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
{IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
- {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+
+ {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
@@ -539,12 +539,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
@@ -585,94 +590,72 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
-/* Qu with Hr */
- IWL_DEV_INFO(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ /* QnJ with Hr */
+ IWL_DEV_INFO(0x2720, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
+
+ /* Qu with Hr */
IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
-
- IWL_DEV_INFO(0x2720, 0x0000, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0040, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0044, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0078, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x007C, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0310, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0A10, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x1080, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x1651, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x1652, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x2074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x4070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x4244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+
+ IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
@@ -771,7 +754,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_NO_160, IWL_CFG_CORES_BT,
iwl9260_2ac_cfg, iwl9260_name),
- /* Qu with Jf */
+/* Qu with Jf */
/* Qu B step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
@@ -947,6 +930,29 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
+
+/* Qu with Hr */
+ /* Qu B step */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_b0_hr1_b0, iwl_ax101_name),
+
+ /* Qu C step */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_c0_hr1_b0, iwl_ax101_name),
+
+ /* QuZ */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_quz_a0_hr1_b0, iwl_ax101_name),
+
#endif /* CONFIG_IWLMVM */
};
@@ -1044,29 +1050,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
- } else if (cfg == &iwl_ax101_cfg_qu_hr) {
- if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
- (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
- iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
- iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
- iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
- IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
- return -EINVAL;
- } else {
- IWL_ERR(iwl_trans, "Unrecognized RF ID 0x%08x\n",
- CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id));
- return -EINVAL;
- }
}
/*
@@ -1076,9 +1059,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* rest must be removed once we convert Qu with Hr as well.
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
- if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
- else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
@@ -1088,10 +1069,12 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* same thing for QuZ... */
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
- if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
- else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
+ else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr;
+ else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr;
}
#endif
@@ -1142,12 +1125,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* register transport layer debugfs here */
iwl_trans_pcie_dbgfs_register(iwl_trans);
- /* The PCI device starts with a reference taken and we are
- * supposed to release it here. But to simplify the
- * interaction with the opmode, we don't do it now, but let
- * the opmode release it when it's ready.
- */
-
return 0;
out_free_trans:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 595e6873d56e..55808ba10d27 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -189,6 +189,8 @@ struct iwl_rx_completion_desc {
* @rb_stts_dma: bus address of receive buffer status
* @lock:
* @queue: actual rx queue. Not used for multi-rx queue.
+ * @next_rb_is_fragment: indicates that the previous RB that we handled set
+ * the fragmented flag, so the next one is still another fragment
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
@@ -214,7 +216,7 @@ struct iwl_rxq {
u32 queue_size;
struct list_head rx_free;
struct list_head rx_used;
- bool need_update;
+ bool need_update, next_rb_is_fragment;
void *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
@@ -244,12 +246,6 @@ struct iwl_rb_allocator {
struct work_struct rx_alloc;
};
-struct iwl_dma_ptr {
- dma_addr_t dma;
- void *addr;
- size_t size;
-};
-
/**
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
@@ -288,107 +284,6 @@ static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
}
-struct iwl_cmd_meta {
- /* only for SYNC commands, iff the reply skb is wanted */
- struct iwl_host_cmd *source;
- u32 flags;
- u32 tbs;
-};
-
-/*
- * The FH will write back to the first TB only, so we need to copy some data
- * into the buffer regardless of whether it should be mapped or not.
- * This indicates how big the first TB must be to include the scratch buffer
- * and the assigned PN.
- * Since PN location is 8 bytes at offset 12, it's 20 now.
- * If we make it bigger then allocations will be bigger and copy slower, so
- * that's probably not useful.
- */
-#define IWL_FIRST_TB_SIZE 20
-#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
-
-struct iwl_pcie_txq_entry {
- void *cmd;
- struct sk_buff *skb;
- /* buffer to free after command completes */
- const void *free_buf;
- struct iwl_cmd_meta meta;
-};
-
-struct iwl_pcie_first_tb_buf {
- u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
-};
-
-/**
- * struct iwl_txq - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @tfds: transmit frame descriptors (DMA memory)
- * @first_tb_bufs: start of command headers, including scratch buffers, for
- * the writeback -- this is DMA memory and an array holding one buffer
- * for each command on the queue
- * @first_tb_dma: DMA address for the first_tb_bufs start
- * @entries: transmit entries (driver state)
- * @lock: queue lock
- * @stuck_timer: timer that fires if queue gets stuck
- * @trans_pcie: pointer back to transport (for timer)
- * @need_update: indicates need to update read/write index
- * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
- * @wd_timeout: queue watchdog timeout (jiffies) - per queue
- * @frozen: tx stuck queue timer is frozen
- * @frozen_expiry_remainder: remember how long until the timer fires
- * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
- * @write_ptr: 1-st empty entry (index) host_w
- * @read_ptr: last used entry (index) host_r
- * @dma_addr: physical addr for BD's
- * @n_window: safe queue window
- * @id: queue id
- * @low_mark: low watermark, resume queue if free space more than this
- * @high_mark: high watermark, stop queue if free space less than this
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- *
- * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
- * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
- * there might be HW changes in the future). For the normal TX
- * queues, n_window, which is the size of the software queue data
- * is also 256; however, for the command queue, n_window is only
- * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
- * This means that we end up with the following:
- * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
- * SW entries: | 0 | ... | 31 |
- * where N is a number between 0 and 7. This means that the SW
- * data is a window overlayed over the HW queue.
- */
-struct iwl_txq {
- void *tfds;
- struct iwl_pcie_first_tb_buf *first_tb_bufs;
- dma_addr_t first_tb_dma;
- struct iwl_pcie_txq_entry *entries;
- spinlock_t lock;
- unsigned long frozen_expiry_remainder;
- struct timer_list stuck_timer;
- struct iwl_trans_pcie *trans_pcie;
- bool need_update;
- bool frozen;
- bool ampdu;
- int block;
- unsigned long wd_timeout;
- struct sk_buff_head overflow_q;
- struct iwl_dma_ptr bc_tbl;
-
- int write_ptr;
- int read_ptr;
- dma_addr_t dma_addr;
- int n_window;
- u32 id;
- int low_mark;
- int high_mark;
-
- bool overflow_tx;
-};
-
static inline dma_addr_t
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
{
@@ -556,11 +451,9 @@ struct iwl_trans_pcie {
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
+ struct dma_pool *bc_pool;
struct iwl_txq *txq_memory;
- struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
- unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
/* PCI bus related data */
struct pci_dev *pci_dev;
@@ -574,10 +467,7 @@ struct iwl_trans_pcie {
u8 page_offs, dev_cmd_offs;
- u8 cmd_queue;
u8 def_rx_queue;
- u8 cmd_fifo;
- unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs;
@@ -792,22 +682,6 @@ static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
return i;
}
-static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
- const struct fw_desc *sec,
- struct iwl_dram_data *dram)
-{
- dram->block = dma_alloc_coherent(trans->dev, sec->len,
- &dram->physical,
- GFP_KERNEL);
- if (!dram->block)
- return -ENOMEM;
-
- dram->size = sec->len;
- memcpy(dram->block, sec->data, sec->len);
-
- return 0;
-}
-
static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -996,9 +870,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
+ if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
@@ -1007,9 +879,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
+ if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
iwl_op_mode_queue_full(trans->op_mode, txq->id);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 8c29071cb415..24cb1b1f21f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1284,7 +1284,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
@@ -1427,7 +1427,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
- struct iwl_rxq *rxq, int i)
+ struct iwl_rxq *rxq, int i,
+ bool *join)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
@@ -1441,10 +1442,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
vid = le16_to_cpu(rxq->cd[i].rbid);
- else
+ *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ } else {
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
+ }
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
goto out_err;
@@ -1502,6 +1505,7 @@ restart:
u32 rb_pending_alloc =
atomic_read(&trans_pcie->rba.req_pending) *
RX_CLAIM_REQ_ALLOC;
+ bool join = false;
if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
!emergency)) {
@@ -1514,11 +1518,29 @@ restart:
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
- rxb = iwl_pcie_get_rxb(trans, rxq, i);
+ rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
if (!rxb)
goto out;
- iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
+ if (unlikely(join || rxq->next_rb_is_fragment)) {
+ rxq->next_rb_is_fragment = join;
+ /*
+ * We can only get a multi-RB in the following cases:
+ * - firmware issue, sending a too big notification
+ * - sniffer mode with a large A-MSDU
+ * - large MTU frames (>2k)
+ * since the multi-RB functionality is limited to newer
+ * hardware that cannot put multiple entries into a
+ * single RB.
+ *
+ * Right now, the higher layers aren't set up to deal
+ * with that, so discard all of these.
+ */
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else {
+ iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
+ }
i = (i + 1) & (rxq->queue_size - 1);
@@ -1649,9 +1671,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
}
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- if (!trans_pcie->txq[i])
+ if (!trans->txqs.txq[i])
continue;
- del_timer(&trans_pcie->txq[i]->stuck_timer);
+ del_timer(&trans->txqs.txq[i]->stuck_timer);
}
/* The STATUS_FW_ERROR bit is set in this function. This must happen
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 19a2c72081ab..97c9e9c87436 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size))
+ if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
@@ -262,8 +262,9 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_pcie_reset_ict(trans);
/* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index e4cbd8daa7c6..e5160d620868 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +68,7 @@
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/wait.h>
+#include <linux/seq_file.h>
#include "iwl-drv.h"
#include "iwl-trans.h"
@@ -1018,21 +1017,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
return ret;
}
- /* supported for 7000 only for the moment */
- if (iwlwifi_mod_params.fw_monitor &&
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
-
- iwl_pcie_alloc_fw_monitor(trans, 0);
- if (fw_mon->size) {
- iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- fw_mon->physical >> 4);
- iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (fw_mon->physical + fw_mon->size) >> 4);
- }
- } else if (iwl_pcie_dbg_on(trans)) {
+ if (iwl_pcie_dbg_on(trans))
iwl_pcie_apply_destination(trans);
- }
iwl_enable_interrupts(trans);
@@ -1507,14 +1493,10 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- /*
- * Family IWL_DEVICE_FAMILY_AX210 and above persist mode is set by FW.
- */
- if (!reset && trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ if (!reset)
/* Enable persistence mode to avoid reset */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
- }
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
@@ -1922,9 +1904,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- trans_pcie->cmd_queue = trans_cfg->cmd_queue;
- trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
- trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+ trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
+ trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
+ trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
else
@@ -2217,11 +2199,10 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs,
bool freeze)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int queue;
for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
- struct iwl_txq *txq = trans_pcie->txq[queue];
+ struct iwl_txq *txq = trans->txqs.txq[queue];
unsigned long now;
spin_lock_bh(&txq->lock);
@@ -2269,13 +2250,12 @@ next_queue:
static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans_pcie->txq[i];
+ struct iwl_txq *txq = trans->txqs.txq[i];
- if (i == trans_pcie->cmd_queue)
+ if (i == trans->txqs.cmd.q_id)
continue;
spin_lock_bh(&txq->lock);
@@ -2344,7 +2324,6 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
unsigned long now = jiffies;
bool overflow_tx;
@@ -2354,11 +2333,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
- if (!test_bit(txq_idx, trans_pcie->queue_used))
+ if (!test_bit(txq_idx, trans->txqs.queue_used))
return -EINVAL;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
- txq = trans_pcie->txq[txq_idx];
+ txq = trans->txqs.txq[txq_idx];
spin_lock_bh(&txq->lock);
overflow_tx = txq->overflow_tx ||
@@ -2406,7 +2385,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cnt;
int ret = 0;
@@ -2415,9 +2393,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) {
- if (cnt == trans_pcie->cmd_queue)
+ if (cnt == trans->txqs.cmd.q_id)
continue;
- if (!test_bit(cnt, trans_pcie->queue_used))
+ if (!test_bit(cnt, trans->txqs.queue_used))
continue;
if (!(BIT(cnt) & txq_bm))
continue;
@@ -2544,44 +2522,94 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
.llseek = generic_file_llseek, \
};
-static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+struct iwl_dbgfs_tx_queue_priv {
+ struct iwl_trans *trans;
+};
+
+struct iwl_dbgfs_tx_queue_state {
+ loff_t pos;
+};
+
+static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq;
- char *buf;
- int pos = 0;
- int cnt;
- int ret;
- size_t bufsz;
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state;
+
+ if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ return NULL;
- bufsz = sizeof(char) * 75 *
- trans->trans_cfg->base_params->num_of_queues;
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ state->pos = *pos;
+ return state;
+}
- if (!trans_pcie->txq_memory)
- return -EAGAIN;
+static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
+ void *v, loff_t *pos)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state = v;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
+ *pos = ++state->pos;
+
+ if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ return NULL;
+
+ return state;
+}
+
+static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
+{
+ kfree(v);
+}
+
+static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state = v;
+ struct iwl_trans *trans = priv->trans;
+ struct iwl_txq *txq = trans->txqs.txq[state->pos];
+
+ seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
+ (unsigned int)state->pos,
+ !!test_bit(state->pos, trans->txqs.queue_used),
+ !!test_bit(state->pos, trans->txqs.queue_stopped));
+ if (txq)
+ seq_printf(seq,
+ "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
+ txq->read_ptr, txq->write_ptr,
+ txq->need_update, txq->frozen,
+ txq->n_window, txq->ampdu);
+ else
+ seq_puts(seq, "(unallocated)");
+
+ if (state->pos == trans->txqs.cmd.q_id)
+ seq_puts(seq, " (HCMD)");
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+
+static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
+ .start = iwl_dbgfs_tx_queue_seq_start,
+ .next = iwl_dbgfs_tx_queue_seq_next,
+ .stop = iwl_dbgfs_tx_queue_seq_stop,
+ .show = iwl_dbgfs_tx_queue_seq_show,
+};
+
+static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv;
+
+ priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
+ sizeof(*priv));
+
+ if (!priv)
return -ENOMEM;
- for (cnt = 0;
- cnt < trans->trans_cfg->base_params->num_of_queues;
- cnt++) {
- txq = trans_pcie->txq[cnt];
- pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
- cnt, txq->read_ptr, txq->write_ptr,
- !!test_bit(cnt, trans_pcie->queue_used),
- !!test_bit(cnt, trans_pcie->queue_stopped),
- txq->need_update, txq->frozen,
- (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ priv->trans = inode->i_private;
+ return 0;
}
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
@@ -2914,9 +2942,15 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
+static const struct file_operations iwl_dbgfs_tx_queue_ops = {
+ .owner = THIS_MODULE,
+ .open = iwl_dbgfs_tx_queue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
static const struct file_operations iwl_dbgfs_monitor_data_ops = {
.read = iwl_dbgfs_monitor_data_read,
@@ -3226,7 +3260,7 @@ static struct iwl_trans_dump_data
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
@@ -3627,6 +3661,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->sx_waitq);
+ /*
+ * For gen2 devices, we use a single allocation for each byte-count
+ * table, but they're pretty small (1k) so use a DMA pool that we
+ * allocate here.
+ */
+ if (cfg_trans->gen2) {
+ size_t bc_tbl_size;
+
+ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_AX210)
+ bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+ else
+ bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+
+ trans_pcie->bc_pool = dmam_pool_create("iwlwifi:bc", &pdev->dev,
+ bc_tbl_size, 256, 0);
+ if (!trans_pcie->bc_pool)
+ goto out_no_pci;
+ }
+
if (trans_pcie->msix_enabled) {
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 9664dbc70ef1..7fc7542535d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,7 +64,6 @@
*/
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id;
/*
@@ -72,12 +71,13 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
* queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped.
*/
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
- if (!trans_pcie->txq[txq_id])
+ for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
+ if (!trans->txqs.txq[txq_id])
continue;
iwl_pcie_gen2_txq_unmap(trans, txq_id);
}
@@ -90,9 +90,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
- struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
@@ -102,7 +100,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
return;
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
+ num_tbs * sizeof(struct iwl_tfh_tb);
/*
* filled_tfd_size contains the number of filled bytes in the TFD.
* Dividing it by 64 will give the number of chunks to fetch
@@ -114,12 +112,16 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+
/* Starting from AX210, the HW expects bytes */
WARN_ON(trans_pcie->bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
} else {
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+
/* Before AX210, the HW expects DW */
WARN_ON(!trans_pcie->bc_table_dword);
len = DIV_ROUND_UP(len, 4);
@@ -714,7 +716,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
u16 cmd_len;
int idx;
void *tfd;
@@ -723,7 +725,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
"queue %d out of range", txq_id))
return -EINVAL;
- if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
@@ -817,7 +819,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
@@ -929,7 +931,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -977,7 +979,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1054,7 +1056,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
int cmd_idx;
int ret;
@@ -1173,14 +1175,14 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->cmd_queue) {
+ if (txq_id != trans->txqs.cmd.q_id) {
int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
@@ -1222,7 +1224,9 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
}
kfree(txq->entries);
- iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
+ if (txq->bc_tbl.addr)
+ dma_pool_free(trans_pcie->bc_pool, txq->bc_tbl.addr,
+ txq->bc_tbl.dma);
kfree(txq);
}
@@ -1236,7 +1240,6 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
*/
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
int i;
@@ -1244,7 +1247,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
"queue %d out of range", txq_id))
return;
- txq = trans_pcie->txq[txq_id];
+ txq = trans->txqs.txq[txq_id];
if (WARN_ON(!txq))
return;
@@ -1252,7 +1255,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_gen2_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->cmd_queue)
+ if (txq_id == trans->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
@@ -1261,27 +1264,38 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_gen2_txq_free_memory(trans, txq);
- trans_pcie->txq[txq_id] = NULL;
+ trans->txqs.txq[txq_id] = NULL;
- clear_bit(txq_id, trans_pcie->queue_used);
+ clear_bit(txq_id, trans->txqs.queue_used);
}
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
struct iwl_txq **intxq, int size,
unsigned int timeout)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t bc_tbl_size, bc_tbl_entries;
+ struct iwl_txq *txq;
int ret;
- struct iwl_txq *txq;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+ } else {
+ bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+ }
+
+ if (WARN_ON(size > bc_tbl_entries))
+ return -EINVAL;
+
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
if (!txq)
return -ENOMEM;
- ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
- (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210) ?
- sizeof(struct iwl_gen3_bc_tbl) :
- sizeof(struct iwlagn_scd_bc_tbl));
- if (ret) {
+
+ txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->bc_pool, GFP_KERNEL,
+ &txq->bc_tbl.dma);
+ if (!txq->bc_tbl.addr) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
kfree(txq);
return -ENOMEM;
@@ -1312,7 +1326,6 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
struct iwl_txq *txq,
struct iwl_host_cmd *hcmd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue_cfg_rsp *rsp;
int ret, qid;
u32 wr_ptr;
@@ -1327,20 +1340,20 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
qid = le16_to_cpu(rsp->queue_number);
wr_ptr = le16_to_cpu(rsp->write_pointer);
- if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
+ if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
WARN_ONCE(1, "queue index %d unsupported", qid);
ret = -EIO;
goto error_free_resp;
}
- if (test_and_set_bit(qid, trans_pcie->queue_used)) {
+ if (test_and_set_bit(qid, trans->txqs.queue_used)) {
WARN_ONCE(1, "queue %d already used", qid);
ret = -EIO;
goto error_free_resp;
}
txq->id = qid;
- trans_pcie->txq[qid] = txq;
+ trans->txqs.txq[qid] = txq;
wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
@@ -1398,8 +1411,6 @@ error:
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
"queue %d out of range", queue))
return;
@@ -1410,7 +1421,7 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
* allow the op_mode to call txq_disable after it already called
* stop_device.
*/
- if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
+ if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", queue);
return;
@@ -1418,22 +1429,21 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
iwl_pcie_gen2_txq_unmap(trans, queue);
- iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
- trans_pcie->txq[queue] = NULL;
+ iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]);
+ trans->txqs.txq[queue] = NULL;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
}
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Free all TX queues */
- for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
- if (!trans_pcie->txq[i])
+ for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
+ if (!trans->txqs.txq[i])
continue;
iwl_pcie_gen2_txq_free(trans, i);
@@ -1442,35 +1452,34 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *queue;
int ret;
/* alloc and init the tx queue */
- if (!trans_pcie->txq[txq_id]) {
+ if (!trans->txqs.txq[txq_id]) {
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue) {
IWL_ERR(trans, "Not enough memory for tx queue\n");
return -ENOMEM;
}
- trans_pcie->txq[txq_id] = queue;
+ trans->txqs.txq[txq_id] = queue;
ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
}
} else {
- queue = trans_pcie->txq[txq_id];
+ queue = trans->txqs.txq[txq_id];
}
ret = iwl_pcie_txq_init(trans, queue, queue_size,
- (txq_id == trans_pcie->cmd_queue));
+ (txq_id == trans->txqs.cmd.q_id));
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
- trans_pcie->txq[txq_id]->id = txq_id;
- set_bit(txq_id, trans_pcie->queue_used);
+ trans->txqs.txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans->txqs.queue_used);
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 4582d418ba4d..5c6c3fa0d29f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -8,7 +8,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -183,8 +183,7 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
{
struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
- struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
- struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ struct iwl_trans *trans = txq->trans;
spin_lock(&txq->lock);
/* check if triggered erroneously */
@@ -262,7 +261,7 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != trans_pcie->cmd_queue)
+ if (txq_id != trans->txqs.cmd.q_id)
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -280,7 +279,6 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->id;
@@ -293,7 +291,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 3. there is a chance that the NIC is asleep
*/
if (!trans->trans_cfg->base_params->shadow_reg_enable &&
- txq_id != trans_pcie->cmd_queue &&
+ txq_id != trans->txqs.cmd.q_id &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
* wake up nic if it's powered down ...
@@ -324,13 +322,12 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans_pcie->txq[i];
+ struct iwl_txq *txq = trans->txqs.txq[i];
- if (!test_bit(i, trans_pcie->queue_used))
+ if (!test_bit(i, trans->txqs.queue_used))
continue;
spin_lock_bh(&txq->lock);
@@ -535,7 +532,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
tfd_sz = trans_pcie->tfd_size * slots_num;
timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
- txq->trans_pcie = trans_pcie;
+ txq->trans = trans;
txq->n_window = slots_num;
@@ -661,14 +658,14 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->cmd_queue) {
+ if (txq_id != trans->txqs.cmd.q_id) {
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
@@ -683,7 +680,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
- if (txq_id == trans_pcie->cmd_queue)
+ if (txq_id == trans->txqs.cmd.q_id)
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
@@ -712,7 +709,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
struct device *dev = trans->dev;
int i;
@@ -722,7 +719,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->cmd_queue)
+ if (txq_id == trans->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
@@ -761,8 +758,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -784,9 +782,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
if (trans->trans_cfg->base_params->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
- iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
- trans_pcie->cmd_fifo,
- trans_pcie->cmd_q_wdg_timeout);
+ iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
+ trans->txqs.cmd.fifo,
+ trans->txqs.cmd.wdg_timeout);
/* Activate all Tx DMA/FIFO channels */
iwl_scd_activate_fifos(trans);
@@ -822,7 +820,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
if (trans->trans_cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
@@ -898,8 +896,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
* queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped.
*/
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* This can happen: start_hw, stop_device */
if (!trans_pcie->txq_memory)
@@ -923,7 +922,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
int txq_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Tx queues */
if (trans_pcie->txq_memory) {
@@ -931,7 +930,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
- trans_pcie->txq[txq_id] = NULL;
+ trans->txqs.txq[txq_id] = NULL;
}
}
@@ -954,10 +953,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
- bc_tbls_size *= (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210) ?
- sizeof(struct iwl_gen3_bc_tbl) :
- sizeof(struct iwlagn_scd_bc_tbl);
+ if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
+ return -EINVAL;
+
+ bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
@@ -992,7 +991,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+ bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -1000,14 +999,14 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
- trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
- ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
+ trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id],
slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
- trans_pcie->txq[txq_id]->id = txq_id;
+ trans->txqs.txq[txq_id]->id = txq_id;
}
return 0;
@@ -1046,7 +1045,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+ bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -1054,7 +1053,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
- ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
+ ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id],
slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
@@ -1068,7 +1067,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
* Circular buffer (TFD queue in DRAM) physical base address
*/
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- trans_pcie->txq[txq_id]->dma_addr >> 8);
+ trans->txqs.txq[txq_id]->dma_addr >> 8);
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
@@ -1113,18 +1112,18 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans_pcie->cmd_queue))
+ if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
return;
spin_lock_bh(&txq->lock);
- if (!test_bit(txq_id, trans_pcie->queue_used)) {
+ if (!test_bit(txq_id, trans->txqs.queue_used)) {
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
txq_id, ssn);
goto out;
@@ -1176,7 +1175,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_progress(txq);
if (iwl_queue_space(trans, txq) > txq->low_mark &&
- test_bit(txq_id, trans_pcie->queue_stopped)) {
+ test_bit(txq_id, trans->txqs.queue_stopped)) {
struct sk_buff_head overflow_skbs;
__skb_queue_head_init(&overflow_skbs);
@@ -1229,8 +1228,7 @@ out:
/* Set wr_ptr of specific device and txq */
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock);
@@ -1290,7 +1288,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
unsigned long flags;
int nfreed = 0;
u16 r;
@@ -1302,7 +1300,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
(!iwl_queue_used(txq, idx))) {
- WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used),
+ WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
trans->trans_cfg->base_params->max_tfd_queue_size,
@@ -1364,11 +1362,11 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
int fifo = -1;
bool scd_bug = false;
- if (test_and_set_bit(txq_id, trans_pcie->queue_used))
+ if (test_and_set_bit(txq_id, trans->txqs.queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
@@ -1377,7 +1375,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
fifo = cfg->fifo;
/* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans_pcie->cmd_queue &&
+ if (txq_id == trans->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, 0);
@@ -1385,7 +1383,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
iwl_scd_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans_pcie->cmd_queue)
+ if (txq_id != trans->txqs.cmd.q_id)
iwl_scd_txq_set_chain(trans, txq_id);
if (cfg->aggregate) {
@@ -1455,7 +1453,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
SCD_QUEUE_STTS_REG_MSK);
/* enable the scheduler for this queue (only) */
- if (txq_id == trans_pcie->cmd_queue &&
+ if (txq_id == trans->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
@@ -1474,8 +1472,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
txq->ampdu = !shared_mode;
}
@@ -1488,8 +1485,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
- trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
- trans_pcie->txq[txq_id]->frozen = false;
+ trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
+ trans->txqs.txq[txq_id]->frozen = false;
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
@@ -1497,7 +1494,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
* allow the op_mode to call txq_disable after it already called
* stop_device.
*/
- if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
+ if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", txq_id);
return;
@@ -1511,7 +1508,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
}
iwl_pcie_txq_unmap(trans, txq_id);
- trans_pcie->txq[txq_id]->ampdu = false;
+ trans->txqs.txq[txq_id]->ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
@@ -1531,7 +1528,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
@@ -1657,7 +1654,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1665,7 +1662,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
} else {
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
@@ -1716,7 +1713,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1816,14 +1813,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans_pcie->cmd_queue,
+ if (WARN(txq_id != trans->txqs.cmd.q_id,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
+ txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
@@ -1895,7 +1892,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
int cmd_idx;
int ret;
@@ -2129,7 +2126,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
- struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(txq->trans);
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
@@ -2332,9 +2330,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
u16 wifi_seq;
bool amsdu;
- txq = trans_pcie->txq[txq_id];
+ txq = trans->txqs.txq[txq_id];
- if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 58212c532c90..aadf3dec5bf3 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -3041,6 +3041,27 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
}
}
+
+/*
+ * HostAP uses two layers of net devices, where the inner
+ * layer gets called all the time from the outer layer.
+ * This is a natural nesting, which needs a split lock type.
+ */
+static struct lock_class_key hostap_netdev_xmit_lock_key;
+
+static void prism2_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &hostap_netdev_xmit_lock_key);
+}
+
+static void prism2_set_lockdep_class(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
+}
+
static struct net_device *
prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
struct device *sdev)
@@ -3199,6 +3220,7 @@ while (0)
if (ret >= 0)
ret = register_netdevice(dev);
+ prism2_set_lockdep_class(dev);
rtnl_unlock();
if (ret < 0) {
printk(KERN_WARNING "%s: register netdevice failed!\n",
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index a2ee4693eaed..97c270845fd1 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -149,6 +149,7 @@ static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
}
static void *prism2_bss_list_proc_start(struct seq_file *m, loff_t *_pos)
+ __acquires(&local->lock)
{
local_info_t *local = PDE_DATA(file_inode(m->file));
spin_lock_bh(&local->lock);
@@ -162,6 +163,7 @@ static void *prism2_bss_list_proc_next(struct seq_file *m, void *v, loff_t *_pos
}
static void prism2_bss_list_proc_stop(struct seq_file *m, void *v)
+ __releases(&local->lock)
{
local_info_t *local = PDE_DATA(file_inode(m->file));
spin_unlock_bh(&local->lock);
diff --git a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
index b60048c95e0a..291ef97ed45e 100644
--- a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
@@ -278,12 +278,11 @@ static int
spectrum_cs_suspend(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- int err = 0;
/* Mark the device as stopped, to block IO until later */
orinoco_down(priv);
- return err;
+ return 0;
}
static int
diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
index b94764c88750..ff0e30c0c14c 100644
--- a/drivers/net/wireless/intersil/p54/p54usb.c
+++ b/drivers/net/wireless/intersil/p54/p54usb.c
@@ -61,6 +61,7 @@ static const struct usb_device_id p54u_table[] = {
{USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
{USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
+ {USB_DEVICE(0x124a, 0x4026)}, /* AirVasT USB wireless device */
{USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */
{USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */
{USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
diff --git a/drivers/net/wireless/intersil/prism54/isl_oid.h b/drivers/net/wireless/intersil/prism54/isl_oid.h
index 5441c1f9f2fc..1afc2ccf94ca 100644
--- a/drivers/net/wireless/intersil/prism54/isl_oid.h
+++ b/drivers/net/wireless/intersil/prism54/isl_oid.h
@@ -37,7 +37,7 @@ struct obj_mlmeex {
u16 state;
u16 code;
u16 size;
- u8 data[0];
+ u8 data[];
} __packed;
struct obj_buffer {
@@ -68,12 +68,12 @@ struct obj_bss {
struct obj_bsslist {
u32 nr;
- struct obj_bss bsslist[0];
+ struct obj_bss bsslist[];
} __packed;
struct obj_frequencies {
u16 nr;
- u16 mhz[0];
+ u16 mhz[];
} __packed;
struct obj_attachment {
@@ -81,7 +81,7 @@ struct obj_attachment {
char reserved;
short id;
short size;
- char data[0];
+ char data[];
} __packed;
/*
diff --git a/drivers/net/wireless/intersil/prism54/islpci_mgt.h b/drivers/net/wireless/intersil/prism54/islpci_mgt.h
index d6bbbac46b4a..1f87d0aea60c 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/intersil/prism54/islpci_mgt.h
@@ -99,7 +99,7 @@ struct islpci_mgmtframe {
pimfor_header_t *header; /* payload header, points into buf */
void *data; /* payload ex header, points into buf */
struct work_struct ws; /* argument for schedule_work() */
- char buf[0]; /* fragment buffer */
+ char buf[]; /* fragment buffer */
};
int
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0528d4cb4d37..1356e8cbe617 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1068,6 +1068,47 @@ static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data,
return res;
}
+static void mac80211_hwsim_config_mac_nl(struct ieee80211_hw *hw,
+ const u8 *addr, bool add)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ u32 _portid = READ_ONCE(data->wmediumd);
+ struct sk_buff *skb;
+ void *msg_head;
+
+ if (!_portid && !hwsim_virtio_enabled)
+ return;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
+ add ? HWSIM_CMD_ADD_MAC_ADDR :
+ HWSIM_CMD_DEL_MAC_ADDR);
+ if (!msg_head) {
+ pr_debug("mac80211_hwsim: problem with msg_head\n");
+ goto nla_put_failure;
+ }
+
+ if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
+ ETH_ALEN, data->addresses[1].addr))
+ goto nla_put_failure;
+
+ if (nla_put(skb, HWSIM_ATTR_ADDR_RECEIVER, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ genlmsg_end(skb, msg_head);
+
+ if (hwsim_virtio_enabled)
+ hwsim_tx_virtio(data, skb);
+ else
+ hwsim_unicast_netgroup(data, skb, _portid);
+ return;
+nla_put_failure:
+ nlmsg_free(skb);
+}
+
static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
{
u16 result = 0;
@@ -1545,6 +1586,9 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
vif->addr);
hwsim_set_magic(vif);
+ if (vif->type != NL80211_IFTYPE_MONITOR)
+ mac80211_hwsim_config_mac_nl(hw, vif->addr, true);
+
vif->cab_queue = 0;
vif->hw_queue[IEEE80211_AC_VO] = 0;
vif->hw_queue[IEEE80211_AC_VI] = 1;
@@ -1584,6 +1628,8 @@ static void mac80211_hwsim_remove_interface(
vif->addr);
hwsim_check_magic(vif);
hwsim_clear_magic(vif);
+ if (vif->type != NL80211_IFTYPE_MONITOR)
+ mac80211_hwsim_config_mac_nl(hw, vif->addr, false);
}
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -1781,6 +1827,8 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
data->rx_filter = 0;
if (*total_flags & FIF_ALLMULTI)
data->rx_filter |= FIF_ALLMULTI;
+ if (*total_flags & FIF_MCAST_ACTION)
+ data->rx_filter |= FIF_MCAST_ACTION;
*total_flags = data->rx_filter;
}
@@ -2104,6 +2152,8 @@ static void hw_scan_work(struct work_struct *work)
hwsim->hw_scan_vif = NULL;
hwsim->tmp_chan = NULL;
mutex_unlock(&hwsim->mutex);
+ mac80211_hwsim_config_mac_nl(hwsim->hw, hwsim->scan_addr,
+ false);
return;
}
@@ -2177,6 +2227,7 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data));
mutex_unlock(&hwsim->mutex);
+ mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, true);
wiphy_dbg(hw->wiphy, "hwsim hw_scan request\n");
ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0);
@@ -2220,6 +2271,7 @@ static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw,
pr_debug("hwsim sw_scan request, prepping stuff\n");
memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN);
+ mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, true);
hwsim->scanning = true;
memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data));
@@ -2236,6 +2288,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw,
pr_debug("hwsim sw_scan_complete\n");
hwsim->scanning = false;
+ mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, false);
eth_zero_addr(hwsim->scan_addr);
mutex_unlock(&hwsim->mutex);
@@ -2413,6 +2466,11 @@ static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw,
WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN);
}
+static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ return 1;
+}
+
#define HWSIM_COMMON_OPS \
.tx = mac80211_hwsim_tx, \
.start = mac80211_hwsim_start, \
@@ -2423,6 +2481,7 @@ static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw,
.config = mac80211_hwsim_config, \
.configure_filter = mac80211_hwsim_configure_filter, \
.bss_info_changed = mac80211_hwsim_bss_info_changed, \
+ .tx_last_beacon = mac80211_hwsim_tx_last_beacon, \
.sta_add = mac80211_hwsim_sta_add, \
.sta_remove = mac80211_hwsim_sta_remove, \
.sta_notify = mac80211_hwsim_sta_notify, \
@@ -2995,6 +3054,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_SUPPORTS_5_10_MHZ |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
@@ -3003,6 +3063,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
hw->wiphy->interface_modes = param->iftypes;
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 28ade92adcb4..9dceed77c5d6 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -75,6 +75,12 @@ enum hwsim_tx_control_flags {
* @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted
* @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses:
* %HWSIM_ATTR_RADIO_ID
+ * @HWSIM_CMD_ADD_MAC_ADDR: add a receive MAC address (given in the
+ * %HWSIM_ATTR_ADDR_RECEIVER attribute) to a device identified by
+ * %HWSIM_ATTR_ADDR_TRANSMITTER. This lets wmediumd forward frames
+ * to this receiver address for a given station.
+ * @HWSIM_CMD_DEL_MAC_ADDR: remove the MAC address again, the attributes
+ * are the same as to @HWSIM_CMD_ADD_MAC_ADDR.
* @__HWSIM_CMD_MAX: enum limit
*/
enum {
@@ -85,6 +91,8 @@ enum {
HWSIM_CMD_NEW_RADIO,
HWSIM_CMD_DEL_RADIO,
HWSIM_CMD_GET_RADIO,
+ HWSIM_CMD_ADD_MAC_ADDR,
+ HWSIM_CMD_DEL_MAC_ADDR,
__HWSIM_CMD_MAX,
};
#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1)
diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h
index 80878561cb90..3c193074662b 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.h
+++ b/drivers/net/wireless/marvell/libertas/cmd.h
@@ -76,7 +76,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv,
/* Events */
-int lbs_process_event(struct lbs_private *priv, u32 event);
+void lbs_process_event(struct lbs_private *priv, u32 event);
/* Actual commands */
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index b73d08381398..cb515c5584c1 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -220,9 +220,8 @@ done:
return ret;
}
-int lbs_process_event(struct lbs_private *priv, u32 event)
+void lbs_process_event(struct lbs_private *priv, u32 event)
{
- int ret = 0;
struct cmd_header cmd;
switch (event) {
@@ -351,6 +350,4 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
netdev_alert(priv->dev, "EVENT: unknown event id %d\n", event);
break;
}
-
- return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index acf61b93b782..44fbd0acb87a 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -65,7 +65,7 @@ static const struct sdio_device_id if_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL,
SDIO_DEVICE_ID_MARVELL_LIBERTAS) },
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL,
- SDIO_DEVICE_ID_MARVELL_8688WLAN) },
+ SDIO_DEVICE_ID_MARVELL_8688_WLAN) },
{ /* end: all zeroes */ },
};
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index 44c8a550da4c..f5b78257d551 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -828,10 +828,8 @@ static void lbs_persist_config_remove(struct net_device *dev)
* Check mesh FW version and appropriately send the mesh start
* command
*/
-int lbs_init_mesh(struct lbs_private *priv)
+void lbs_init_mesh(struct lbs_private *priv)
{
- int ret = 0;
-
/* Determine mesh_fw_ver from fwrelease and fwcapinfo */
/* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
/* 5.110.22 have mesh command with 0xa3 command id */
@@ -870,8 +868,6 @@ int lbs_init_mesh(struct lbs_private *priv)
/* Stop meshing until interface is brought up */
lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, 1);
-
- return ret;
}
void lbs_start_mesh(struct lbs_private *priv)
diff --git a/drivers/net/wireless/marvell/libertas/mesh.h b/drivers/net/wireless/marvell/libertas/mesh.h
index 1561018f226f..d49717b20c09 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.h
+++ b/drivers/net/wireless/marvell/libertas/mesh.h
@@ -16,7 +16,7 @@
struct net_device;
-int lbs_init_mesh(struct lbs_private *priv);
+void lbs_init_mesh(struct lbs_private *priv);
void lbs_start_mesh(struct lbs_private *priv);
int lbs_deinit_mesh(struct lbs_private *priv);
diff --git a/drivers/net/wireless/marvell/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c
index 58a1fc433b73..f28aa09d1f9e 100644
--- a/drivers/net/wireless/marvell/libertas/rx.c
+++ b/drivers/net/wireless/marvell/libertas/rx.c
@@ -62,9 +62,6 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
struct rxpd *p_rx_pd;
int hdrchop;
struct ethhdr *p_ethhdr;
- static const u8 rfc1042_eth_hdr[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
- };
BUG_ON(!skb);
@@ -102,7 +99,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
sizeof(p_rx_pkt->eth803_hdr.src_addr));
if (memcmp(&p_rx_pkt->rfc1042_hdr,
- rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)) == 0) {
+ rfc1042_header, sizeof(rfc1042_header)) == 0) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type (ethertype)
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 25ac9db35dbf..bedc09215088 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -247,10 +247,10 @@ static void if_usb_disconnect(struct usb_interface *intf)
lbtf_deb_enter(LBTF_DEB_MAIN);
- if_usb_reset_device(priv);
-
- if (priv)
+ if (priv) {
+ if_usb_reset_device(priv);
lbtf_remove_card(priv);
+ }
/* Unlink and free urb */
if_usb_free(cardp);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 1566d2197906..4e4f59c17ded 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -269,17 +269,12 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
* CFG802.11 operation handler to register a mgmt frame.
*/
static void
-mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- u16 frame_type, bool reg)
+mwifiex_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct mgmt_frame_regs *upd)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
- u32 mask;
-
- if (reg)
- mask = priv->mgmt_frame_mask | BIT(frame_type >> 4);
- else
- mask = priv->mgmt_frame_mask & ~BIT(frame_type >> 4);
+ u32 mask = upd->interface_stypes;
if (mask != priv->mgmt_frame_mask) {
priv->mgmt_frame_mask = mask;
@@ -1496,7 +1491,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- static struct mwifiex_sta_node *node;
+ struct mwifiex_sta_node *node;
+ int i;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
priv->media_connected && idx == 0) {
@@ -1506,13 +1502,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST,
HostCmd_ACT_GEN_GET, 0, NULL, true);
- if (node && (&node->list == &priv->sta_list)) {
- node = NULL;
- return -ENOENT;
- }
-
- node = list_prepare_entry(node, &priv->sta_list, list);
- list_for_each_entry_continue(node, &priv->sta_list, list) {
+ i = 0;
+ list_for_each_entry(node, &priv->sta_list, list) {
+ if (i++ != idx)
+ continue;
ether_addr_copy(mac, node->mac_addr);
return mwifiex_dump_station_info(priv, node, sinfo);
}
@@ -4189,7 +4182,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.del_key = mwifiex_cfg80211_del_key,
.set_default_mgmt_key = mwifiex_cfg80211_set_default_mgmt_key,
.mgmt_tx = mwifiex_cfg80211_mgmt_tx,
- .mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register,
+ .update_mgmt_frame_registrations =
+ mwifiex_cfg80211_update_mgmt_frame_registrations,
.remain_on_channel = mwifiex_cfg80211_remain_on_channel,
.cancel_remain_on_channel = mwifiex_cfg80211_cancel_remain_on_channel,
.set_default_key = mwifiex_cfg80211_set_default_key,
@@ -4341,6 +4335,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
wiphy->n_iface_combinations = 1;
+ if (adapter->max_sta_conn > adapter->max_p2p_conn)
+ wiphy->max_ap_assoc_sta = adapter->max_sta_conn;
+ else
+ wiphy->max_ap_assoc_sta = adapter->max_p2p_conn;
+
/* Initialize cipher suits */
wiphy->cipher_suites = mwifiex_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 7e4b8cd52605..d068b9075c32 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1495,6 +1495,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_header *tlv;
struct hw_spec_api_rev *api_rev;
+ struct hw_spec_max_conn *max_conn;
u16 resp_size, api_id;
int i, left_len, parsed_len = 0;
@@ -1581,8 +1582,21 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
adapter->fw_api_ver =
api_rev->major_ver;
mwifiex_dbg(adapter, INFO,
- "Firmware api version %d\n",
- adapter->fw_api_ver);
+ "Firmware api version %d.%d\n",
+ adapter->fw_api_ver,
+ api_rev->minor_ver);
+ break;
+ case UAP_FW_API_VER_ID:
+ mwifiex_dbg(adapter, INFO,
+ "uAP api version %d.%d\n",
+ api_rev->major_ver,
+ api_rev->minor_ver);
+ break;
+ case CHANRPT_API_VER_ID:
+ mwifiex_dbg(adapter, INFO,
+ "channel report api version %d.%d\n",
+ api_rev->major_ver,
+ api_rev->minor_ver);
break;
default:
mwifiex_dbg(adapter, FATAL,
@@ -1591,6 +1605,17 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
break;
}
break;
+ case TLV_TYPE_MAX_CONN:
+ max_conn = (struct hw_spec_max_conn *)tlv;
+ adapter->max_p2p_conn = max_conn->max_p2p_conn;
+ adapter->max_sta_conn = max_conn->max_sta_conn;
+ mwifiex_dbg(adapter, INFO,
+ "max p2p connections: %u\n",
+ adapter->max_p2p_conn);
+ mwifiex_dbg(adapter, INFO,
+ "max sta connections: %u\n",
+ adapter->max_sta_conn);
+ break;
default:
mwifiex_dbg(adapter, FATAL,
"Unknown GET_HW_SPEC TLV type: %#x\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index a415d73a73e6..8047e307892e 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -220,6 +220,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_BSS_MODE (PROPRIETARY_TLV_BASE_ID + 206)
#define TLV_TYPE_RANDOM_MAC (PROPRIETARY_TLV_BASE_ID + 236)
#define TLV_TYPE_CHAN_ATTR_CFG (PROPRIETARY_TLV_BASE_ID + 237)
+#define TLV_TYPE_MAX_CONN (PROPRIETARY_TLV_BASE_ID + 279)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -1052,6 +1053,8 @@ struct host_cmd_ds_802_11_ps_mode_enh {
enum API_VER_ID {
KEY_API_VER_ID = 1,
FW_API_VER_ID = 2,
+ UAP_FW_API_VER_ID = 3,
+ CHANRPT_API_VER_ID = 4,
};
struct hw_spec_api_rev {
@@ -2386,4 +2389,11 @@ struct mwifiex_opt_sleep_confirm {
__le16 action;
__le16 resp_ctrl;
} __packed;
+
+struct hw_spec_max_conn {
+ struct mwifiex_ie_types_header header;
+ u8 max_p2p_conn;
+ u8 max_sta_conn;
+} __packed;
+
#endif /* !_MWIFIEX_FW_H_ */
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index afaffc325452..5923c5c14c8d 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1022,6 +1022,7 @@ struct mwifiex_adapter {
bool ext_scan;
u8 fw_api_ver;
u8 key_api_major_ver, key_api_minor_ver;
+ u8 max_p2p_conn, max_sta_conn;
struct memory_type_mapping *mem_type_mapping_tbl;
u8 num_mem_types;
bool scan_chan_gap_enabled;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 6a2dcb01caf4..a042965962a2 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -480,45 +480,25 @@ static void mwifiex_sdio_coredump(struct device *dev)
schedule_work(&card->work);
}
-/* Device ID for SD8786 */
-#define SDIO_DEVICE_ID_MARVELL_8786 (0x9116)
-/* Device ID for SD8787 */
-#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
-/* Device ID for SD8797 */
-#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129)
-/* Device ID for SD8897 */
-#define SDIO_DEVICE_ID_MARVELL_8897 (0x912d)
-/* Device ID for SD8887 */
-#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135)
-/* Device ID for SD8801 */
-#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
-/* Device ID for SD8977 */
-#define SDIO_DEVICE_ID_MARVELL_8977 (0x9145)
-/* Device ID for SD8987 */
-#define SDIO_DEVICE_ID_MARVELL_8987 (0x9149)
-/* Device ID for SD8997 */
-#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141)
-
-
/* WLAN IDs */
static const struct sdio_device_id mwifiex_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786_WLAN),
.driver_data = (unsigned long) &mwifiex_sdio_sd8786},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787_WLAN),
.driver_data = (unsigned long) &mwifiex_sdio_sd8787},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_WLAN),
.driver_data = (unsigned long) &mwifiex_sdio_sd8797},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897_WLAN),
.driver_data = (unsigned long) &mwifiex_sdio_sd8897},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887_WLAN),
.driver_data = (unsigned long)&mwifiex_sdio_sd8887},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801_WLAN),
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977_WLAN),
.driver_data = (unsigned long)&mwifiex_sdio_sd8977},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987_WLAN),
.driver_data = (unsigned long)&mwifiex_sdio_sd8987},
- {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997_WLAN),
.driver_data = (unsigned long)&mwifiex_sdio_sd8997},
{},
};
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 0bd93f26bd7f..8bd355d7974e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -853,43 +853,36 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
memset(&key_material->key_param_set, 0,
sizeof(struct mwifiex_ie_type_key_param_set));
if (enc_key->is_wapi_key) {
+ struct mwifiex_ie_type_key_param_set *set;
+
mwifiex_dbg(priv->adapter, INFO, "info: Set WAPI Key\n");
- key_material->key_param_set.key_type_id =
- cpu_to_le16(KEY_TYPE_ID_WAPI);
+ set = &key_material->key_param_set;
+ set->key_type_id = cpu_to_le16(KEY_TYPE_ID_WAPI);
if (cmd_oid == KEY_INFO_ENABLED)
- key_material->key_param_set.key_info =
- cpu_to_le16(KEY_ENABLED);
+ set->key_info = cpu_to_le16(KEY_ENABLED);
else
- key_material->key_param_set.key_info =
- cpu_to_le16(!KEY_ENABLED);
+ set->key_info = cpu_to_le16(!KEY_ENABLED);
- key_material->key_param_set.key[0] = enc_key->key_index;
+ set->key[0] = enc_key->key_index;
if (!priv->sec_info.wapi_key_on)
- key_material->key_param_set.key[1] = 1;
+ set->key[1] = 1;
else
/* set 0 when re-key */
- key_material->key_param_set.key[1] = 0;
+ set->key[1] = 0;
if (!is_broadcast_ether_addr(enc_key->mac_addr)) {
/* WAPI pairwise key: unicast */
- key_material->key_param_set.key_info |=
- cpu_to_le16(KEY_UNICAST);
+ set->key_info |= cpu_to_le16(KEY_UNICAST);
} else { /* WAPI group key: multicast */
- key_material->key_param_set.key_info |=
- cpu_to_le16(KEY_MCAST);
+ set->key_info |= cpu_to_le16(KEY_MCAST);
priv->sec_info.wapi_key_on = true;
}
- key_material->key_param_set.type =
- cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
- key_material->key_param_set.key_len =
- cpu_to_le16(WAPI_KEY_LEN);
- memcpy(&key_material->key_param_set.key[2],
- enc_key->key_material, enc_key->key_len);
- memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
- enc_key->pn, PN_LEN);
- key_material->key_param_set.length =
- cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
+ set->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
+ set->key_len = cpu_to_le16(WAPI_KEY_LEN);
+ memcpy(&set->key[2], enc_key->key_material, enc_key->key_len);
+ memcpy(&set->key[2 + enc_key->key_len], enc_key->pn, PN_LEN);
+ set->length = cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
key_param_len = (WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN) +
sizeof(struct mwifiex_ie_types_header);
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 47fb4b3ea004..97f23f93f6e7 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -2668,7 +2668,7 @@ struct mwl8k_cmd_mac_multicast_adr {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 numaddr;
- __u8 addr[0][ETH_ALEN];
+ __u8 addr[][ETH_ALEN];
};
#define MWL8K_ENABLE_RX_DIRECTED 0x0001
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index cbc2d8a5d354..41533a0e1720 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -24,3 +24,4 @@ source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig"
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index d7a1ddc9e407..ef663b873b0b 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -26,4 +26,5 @@ mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
obj-$(CONFIG_MT7603E) += mt7603/
-obj-$(CONFIG_MT7615E) += mt7615/
+obj-$(CONFIG_MT7615_COMMON) += mt7615/
+obj-$(CONFIG_MT7915E) += mt7915/
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index f77f03530259..df25c00d9e06 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -119,7 +119,7 @@ static void
mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
+ struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct mt76_rx_tid *tid;
u16 seqno;
@@ -147,13 +147,13 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct ieee80211_sta *sta;
struct mt76_rx_tid *tid;
bool sn_less;
- u16 seqno, head, size;
- u8 ackp, idx;
+ u16 seqno, head, size, idx;
+ u8 ackp;
__skb_queue_tail(frames, skb);
@@ -239,7 +239,7 @@ out:
}
int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
- u16 ssn, u8 size)
+ u16 ssn, u16 size)
{
struct mt76_rx_tid *tid;
@@ -264,7 +264,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
{
- u8 size = tid->size;
+ u16 size = tid->size;
int i;
spin_lock_bh(&tid->lock);
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index d2202acb8dc6..3a5de1d1b121 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -46,6 +46,25 @@ int mt76_queues_read(struct seq_file *s, void *data)
}
EXPORT_SYMBOL_GPL(mt76_queues_read);
+static int mt76_rx_queues_read(struct seq_file *s, void *data)
+{
+ struct mt76_dev *dev = dev_get_drvdata(s->private);
+ int i, queued;
+
+ mt76_for_each_q_rx(dev, i) {
+ struct mt76_queue *q = &dev->q_rx[i];
+
+ if (!q->ndesc)
+ continue;
+
+ queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued;
+ seq_printf(s, "%d: queued=%d head=%d tail=%d\n",
+ i, queued, q->head, q->tail);
+ }
+
+ return 0;
+}
+
void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len)
{
@@ -92,6 +111,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
debugfs_create_blob("otp", 0400, dir, &dev->otp);
debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
mt76_read_rate_txpower);
+ debugfs_create_devm_seqfile(dev->dev, "rx-queues", dir,
+ mt76_rx_queues_read);
return dir;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 75e659774e07..f4d6074fe32a 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -576,7 +576,7 @@ mt76_dma_init(struct mt76_dev *dev)
init_dummy_netdev(&dev->napi_dev);
- for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+ mt76_for_each_q_rx(dev, i) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
@@ -610,7 +610,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
mt76_dma_tx_cleanup(dev, i, true);
- for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+ mt76_for_each_q_rx(dev, i) {
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index f44f99184c10..907098101898 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -116,12 +116,12 @@ static void mt76_led_cleanup(struct mt76_dev *dev)
led_classdev_unregister(&dev->led_cdev);
}
-static void mt76_init_stream_cap(struct mt76_dev *dev,
+static void mt76_init_stream_cap(struct mt76_phy *phy,
struct ieee80211_supported_band *sband,
bool vht)
{
struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
- int i, nstream = hweight8(dev->phy.antenna_mask);
+ int i, nstream = hweight8(phy->antenna_mask);
struct ieee80211_sta_vht_cap *vht_cap;
u16 mcs_map = 0;
@@ -153,12 +153,12 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
}
-void mt76_set_stream_caps(struct mt76_dev *dev, bool vht)
+void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
{
- if (dev->cap.has_2ghz)
- mt76_init_stream_cap(dev, &dev->phy.sband_2g.sband, false);
- if (dev->cap.has_5ghz)
- mt76_init_stream_cap(dev, &dev->phy.sband_5g.sband, vht);
+ if (phy->dev->cap.has_2ghz)
+ mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
+ if (phy->dev->cap.has_5ghz)
+ mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
}
EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
@@ -198,9 +198,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
- mt76_init_stream_cap(dev, sband, vht);
+ mt76_init_stream_cap(&dev->phy, sband, vht);
if (!vht)
return 0;
@@ -279,7 +278,8 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+ WIPHY_FLAG_SUPPORTS_TDLS;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
@@ -294,7 +294,6 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
hw->max_tx_fragments = 16;
ieee80211_hw_set(hw, SIGNAL_DBM);
- ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
@@ -314,6 +313,8 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC);
}
@@ -677,7 +678,6 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_hw **hw,
struct ieee80211_sta **sta)
{
-
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct mt76_rx_status mstat;
@@ -689,6 +689,9 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
status->enc_flags = mstat.enc_flags;
status->encoding = mstat.encoding;
status->bw = mstat.bw;
+ status->he_ru = mstat.he_ru;
+ status->he_gi = mstat.he_gi;
+ status->he_dcm = mstat.he_dcm;
status->rate_idx = mstat.rate_idx;
status->nss = mstat.nss;
status->band = mstat.band;
@@ -725,7 +728,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
* Validate the first fragment both here and in mac80211
* All further fragments will be validated by mac80211 only.
*/
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = mt76_skb_get_hdr(skb);
if (ieee80211_is_frag(hdr) &&
!ieee80211_is_first_frag(hdr->frame_control))
return 0;
@@ -798,7 +801,7 @@ mt76_airtime_flush_ampdu(struct mt76_dev *dev)
static void
mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_wcid *wcid = status->wcid;
@@ -835,7 +838,7 @@ static void
mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct ieee80211_sta *sta;
struct ieee80211_hw *hw;
struct mt76_wcid *wcid = status->wcid;
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index 4048f446e3ee..ade61a5334c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -6,10 +6,11 @@
#include "mt76.h"
struct sk_buff *
-mt76_mcu_msg_alloc(const void *data, int head_len,
- int data_len, int tail_len)
+mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
+ int data_len)
{
- int length = head_len + data_len + tail_len;
+ const struct mt76_mcu_ops *ops = dev->mcu_ops;
+ int length = ops->headroom + data_len + ops->tailroom;
struct sk_buff *skb;
skb = alloc_skb(length, GFP_KERNEL);
@@ -17,7 +18,7 @@ mt76_mcu_msg_alloc(const void *data, int head_len,
return NULL;
memset(skb->head, 0, length);
- skb_reserve(skb, head_len);
+ skb_reserve(skb, ops->headroom);
if (data && data_len)
skb_put_data(skb, data, data_len);
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 7ead6620bb8b..26353b6bce97 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -73,7 +73,8 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
spin_lock_irqsave(&dev->mmio.irq_lock, flags);
dev->mmio.irqmask &= ~clear;
dev->mmio.irqmask |= set;
- mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+ if (addr)
+ mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 8e4759bc8f59..dfe625a53c63 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -60,6 +60,7 @@ enum mt76_txq_id {
MT_TXQ_BK = IEEE80211_AC_BK,
MT_TXQ_PSD,
MT_TXQ_MCU,
+ MT_TXQ_MCU_WA,
MT_TXQ_BEACON,
MT_TXQ_CAB,
MT_TXQ_FWDL,
@@ -69,6 +70,7 @@ enum mt76_txq_id {
enum mt76_rxq_id {
MT_RXQ_MAIN,
MT_RXQ_MCU,
+ MT_RXQ_MCU_WA,
__MT_RXQ_MAX
};
@@ -137,6 +139,9 @@ struct mt76_sw_queue {
};
struct mt76_mcu_ops {
+ u32 headroom;
+ u32 tailroom;
+
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
@@ -178,7 +183,7 @@ enum mt76_wcid_flags {
MT_WCID_FLAG_PS,
};
-#define MT76_N_WCIDS 128
+#define MT76_N_WCIDS 288
/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
@@ -198,7 +203,7 @@ struct mt76_wcid {
struct ewma_signal rssi;
int inactive_count;
- u8 idx;
+ u16 idx;
u8 hw_key_idx;
u8 sta:1;
@@ -241,8 +246,8 @@ struct mt76_rx_tid {
struct delayed_work reorder_work;
u16 head;
- u8 size;
- u8 nframes;
+ u16 size;
+ u16 nframes;
u8 num;
@@ -265,7 +270,7 @@ struct mt76_rx_tid {
struct mt76_tx_cb {
unsigned long jiffies;
- u8 wcid;
+ u16 wcid;
u8 pktid;
u8 flags;
};
@@ -275,10 +280,16 @@ enum {
MT76_STATE_RUNNING,
MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
+ MT76_HW_SCANNING,
+ MT76_HW_SCHED_SCANNING,
+ MT76_RESTART,
MT76_RESET,
MT76_MCU_RESET,
MT76_REMOVED,
MT76_READING_STATS,
+ MT76_STATE_POWER_OFF,
+ MT76_STATE_SUSPEND,
+ MT76_STATE_ROC,
};
struct mt76_hw_cap {
@@ -372,6 +383,7 @@ enum mt_vendor_req {
MT_VEND_READ_CFG = 0x47,
MT_VEND_READ_EXT = 0x63,
MT_VEND_WRITE_EXT = 0x66,
+ MT_VEND_FEATURE_SET = 0x91,
};
enum mt76u_in_ep {
@@ -435,7 +447,7 @@ struct mt76_mmio {
struct mt76_rx_status {
union {
struct mt76_wcid *wcid;
- u8 wcid_idx;
+ u16 wcid_idx;
};
unsigned long reorder_time;
@@ -452,7 +464,8 @@ struct mt76_rx_status {
u16 freq;
u32 flag;
u8 enc_flags;
- u8 encoding:2, bw:3;
+ u8 encoding:2, bw:3, he_ru:3;
+ u8 he_gi:2, he_dcm:1;
u8 rate_idx;
u8 nss;
u8 band;
@@ -524,8 +537,8 @@ struct mt76_dev {
wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
- unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
- unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG];
+ u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
+ u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
@@ -570,6 +583,10 @@ enum mt76_phy_type {
MT_PHY_TYPE_HT,
MT_PHY_TYPE_HT_GF,
MT_PHY_TYPE_VHT,
+ MT_PHY_TYPE_HE_SU = 8,
+ MT_PHY_TYPE_HE_EXT_SU,
+ MT_PHY_TYPE_HE_TB,
+ MT_PHY_TYPE_HE_MU,
};
#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
@@ -611,7 +628,7 @@ enum mt76_phy_type {
#define mt76_hw(dev) (dev)->mphy.hw
static inline struct ieee80211_hw *
-mt76_wcid_hw(struct mt76_dev *dev, u8 wcid)
+mt76_wcid_hw(struct mt76_dev *dev, u16 wcid)
{
if (wcid <= MT76_N_WCIDS &&
mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
@@ -654,6 +671,10 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+#define mt76_for_each_q_rx(dev, i) \
+ for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
+ (dev)->q_rx[i].ndesc; i++)
+
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops);
@@ -735,6 +756,25 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
}
+static inline void *mt76_skb_get_hdr(struct sk_buff *skb)
+{
+ struct mt76_rx_status mstat;
+ u8 *data = skb->data;
+
+ /* Alignment concerns */
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
+
+ mstat = *((struct mt76_rx_status *)skb->cb);
+
+ if (mstat.flag & RX_FLAG_RADIOTAP_HE)
+ data += sizeof(struct ieee80211_radiotap_he);
+ if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU)
+ data += sizeof(struct ieee80211_radiotap_he_mu);
+
+ return data;
+}
+
static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
{
int len = ieee80211_get_hdrlen_from_skb(skb);
@@ -785,10 +825,10 @@ void mt76_set_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
-void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
+void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
- u16 ssn, u8 size);
+ u16 ssn, u16 size);
void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
@@ -911,8 +951,8 @@ int mt76u_resume_rx(struct mt76_dev *dev);
void mt76u_queues_deinit(struct mt76_dev *dev);
struct sk_buff *
-mt76_mcu_msg_alloc(const void *data, int head_len,
- int data_len, int tail_len);
+mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
+ int data_len);
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
index cc7c788abedd..8ce6880b2bb8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -113,7 +113,7 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
return;
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt76_queues_read);
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
index 2b6a4d8a8dc7..3ee06e2577b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: ISC
+#include <linux/of.h>
#include "mt7603.h"
#include "eeprom.h"
@@ -100,10 +101,14 @@ mt7603_apply_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
MT_EE_TX_POWER_1_START_2G,
MT_EE_TX_POWER_1_START_2G + 1,
};
+ struct device_node *np = dev->mt76.dev->of_node;
u8 *eeprom = dev->mt76.eeprom.data;
int n = ARRAY_SIZE(cal_free_bytes);
int i;
+ if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp"))
+ return;
+
if (!mt7603_has_cal_free_data(dev, efuse))
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index f641a8b56b39..94196599797e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -342,6 +342,8 @@ static const struct ieee80211_iface_limit if_limits[] = {
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP)
},
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 39b7c5d6e6cd..8060c1514396 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -51,10 +51,11 @@ void mt7603_mac_set_timing(struct mt7603_dev *dev)
int offset = 3 * dev->coverage_class;
u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+ bool is_5ghz = dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ;
int sifs;
u32 val;
- if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
+ if (is_5ghz)
sifs = 16;
else
sifs = 10;
@@ -71,7 +72,7 @@ void mt7603_mac_set_timing(struct mt7603_dev *dev)
FIELD_PREP(MT_IFS_SIFS, sifs) |
FIELD_PREP(MT_IFS_SLOT, dev->slottime));
- if (dev->slottime < 20)
+ if (dev->slottime < 20 || is_5ghz)
val = MT7603_CFEND_RATE_DEFAULT;
else
val = MT7603_CFEND_RATE_11B;
@@ -318,11 +319,16 @@ void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
{
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
int idx = msta->wcid.idx;
+ u8 ampdu_density;
u32 addr;
u32 val;
addr = mt7603_wtbl1_addr(idx);
+ ampdu_density = sta->ht_cap.ampdu_density;
+ if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
+ ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
val = mt76_rr(dev, addr + 2 * 4);
val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
@@ -467,7 +473,7 @@ mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
struct mt7603_sta *sta;
struct mt76_wcid *wcid;
- if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ if (idx >= MT7603_WTBL_SIZE)
return NULL;
wcid = rcu_dereference(dev->mt76.wcid[idx]);
@@ -1097,7 +1103,7 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
- first_idx = max_t(int, 0, last_idx - (count + 1) / MT7603_RATE_RETRY);
+ first_idx = max_t(int, 0, last_idx - (count - 1) / MT7603_RATE_RETRY);
if (fixed_rate && !probe) {
info->status.rates[0].count = count;
@@ -1232,7 +1238,7 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
if (pid == MT_PACKET_ID_NO_ACK)
return;
- if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
+ if (wcidx >= MT7603_WTBL_SIZE)
return;
rcu_read_lock();
@@ -1432,8 +1438,9 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+ mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
+ }
mt7603_dma_sched_reset(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 77985d81c447..a47a3a644ecc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -62,7 +62,7 @@ mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
struct sk_buff *skb;
int ret, seq;
- skb = mt7603_mcu_msg_alloc(data, len);
+ skb = mt76_mcu_msg_alloc(mdev, data, len);
if (!skb)
return -ENOMEM;
@@ -265,6 +265,7 @@ out:
int mt7603_mcu_init(struct mt7603_dev *dev)
{
static const struct mt76_mcu_ops mt7603_mcu_ops = {
+ .headroom = sizeof(struct mt7603_mcu_txd),
.mcu_send_msg = mt7603_mcu_msg_send,
.mcu_restart = mt7603_mcu_restart,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
index 1bba369d5c8a..30df8a3fd11a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
@@ -100,11 +100,4 @@ enum {
MCU_EXT_EVENT_BCN_UPDATE = 0x31,
};
-static inline struct sk_buff *
-mt7603_mcu_msg_alloc(const void *data, int len)
-{
- return mt76_mcu_msg_alloc(data, sizeof(struct mt7603_mcu_txd),
- len, 0);
-}
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index 68efb300c0d8..de170765e938 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -20,10 +20,8 @@ mt76_wmac_probe(struct platform_device *pdev)
return irq;
mem_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mem_base)) {
- dev_err(&pdev->dev, "Failed to get memory resource\n");
+ if (IS_ERR(mem_base))
return PTR_ERR(mem_base);
- }
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
&mt7603_drv_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
index 6afd4aea67ed..e25db1135eda 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
@@ -1,7 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
-config MT7615E
- tristate "MediaTek MT7615E (PCIe) support"
+
+config MT7615_COMMON
+ tristate
select MT76_CORE
+
+config MT7615E
+ tristate "MediaTek MT7615E and MT7663E (PCIe) support"
+ select MT7615_COMMON
depends on MAC80211
depends on PCI
help
@@ -22,3 +27,14 @@ config MT7622_WMAC
This adds support for the built-in WMAC on MT7622 SoC devices
which has the same feature set as a MT7615, but limited to
2.4 GHz only.
+
+config MT7663U
+ tristate "MediaTek MT7663U (USB) support"
+ select MT76_USB
+ select MT7615_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7663U 802.11ax 2x2:2 wireless devices.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
index 5c6a220ed7e3..99f353b8b9aa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -1,9 +1,15 @@
#SPDX-License-Identifier: ISC
+obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
obj-$(CONFIG_MT7615E) += mt7615e.o
+obj-$(CONFIG_MT7663U) += mt7663u.o
CFLAGS_trace.o := -I$(src)
-mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o mmio.o \
- debugfs.o trace.o
+mt7615-common-y := main.o init.o mcu.o eeprom.o mac.o \
+ debugfs.o trace.o
+
+mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o
mt7615e-$(CONFIG_MT7622_WMAC) += soc.o
+
+mt7663u-y := usb.o usb_mcu.o usb_init.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index b4d0795154e3..fd3ef483a87c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -20,11 +20,15 @@ static int
mt7615_scs_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
+ struct mt7615_phy *ext_phy;
if (!mt7615_wait_for_mcu_init(dev))
return 0;
- mt7615_mac_set_scs(dev, val);
+ mt7615_mac_set_scs(&dev->phy, val);
+ ext_phy = mt7615_ext_phy(dev);
+ if (ext_phy)
+ mt7615_mac_set_scs(ext_phy, val);
return 0;
}
@@ -34,7 +38,7 @@ mt7615_scs_get(void *data, u64 *val)
{
struct mt7615_dev *dev = data;
- *val = dev->scs_en;
+ *val = dev->phy.scs_en;
return 0;
}
@@ -120,28 +124,52 @@ mt7615_reset_test_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_reset_test, NULL,
mt7615_reset_test_set, "%lld\n");
-static int
-mt7615_ampdu_stat_read(struct seq_file *file, void *data)
+static void
+mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
+ struct seq_file *file)
{
struct mt7615_dev *dev = file->private;
+ u32 reg = is_mt7663(&dev->mt76) ? MT_MIB_ARNG(0) : MT_AGG_ASRCR0;
+ bool ext_phy = phy != &dev->phy;
int bound[7], i, range;
- range = mt76_rr(dev, MT_AGG_ASRCR0);
+ if (!phy)
+ return;
+
+ range = mt76_rr(dev, reg);
for (i = 0; i < 4; i++)
bound[i] = MT_AGG_ASRCR_RANGE(range, i) + 1;
- range = mt76_rr(dev, MT_AGG_ASRCR1);
+
+ range = mt76_rr(dev, reg + 4);
for (i = 0; i < 3; i++)
bound[i + 4] = MT_AGG_ASRCR_RANGE(range, i) + 1;
+ seq_printf(file, "\nPhy %d\n", ext_phy);
+
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
seq_printf(file, "%3d -%3d | ",
bound[i], bound[i + 1]);
seq_puts(file, "\nCount: ");
+
+ range = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < ARRAY_SIZE(bound); i++)
- seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]);
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + range]);
seq_puts(file, "\n");
+ seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
+ seq_printf(file, "PER: %ld.%1ld%%\n",
+ phy->mib.aggr_per / 10, phy->mib.aggr_per % 10);
+}
+
+static int
+mt7615_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt7615_dev *dev = file->private;
+
+ mt7615_ampdu_stat_read_phy(&dev->phy, file);
+ mt7615_ampdu_stat_read_phy(mt7615_ext_phy(dev), file);
+
return 0;
}
@@ -265,10 +293,10 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
return -ENOMEM;
if (is_mt7615(&dev->mt76))
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt7615_queues_read);
else
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt76_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7615_queues_acq);
@@ -297,3 +325,4 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(mt7615_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index b19f208e3d54..5a124610d4af 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -94,45 +94,6 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
return 0;
}
-void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- __le32 *rxd = (__le32 *)skb->data;
- __le32 *end = (__le32 *)&skb->data[skb->len];
- enum rx_pkt_type type;
- u16 flag;
-
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
- flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
- if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
- type = PKT_TYPE_NORMAL_MCU;
-
- switch (type) {
- case PKT_TYPE_TXS:
- for (rxd++; rxd + 7 <= end; rxd += 7)
- mt7615_mac_add_txs(dev, rxd);
- dev_kfree_skb(skb);
- break;
- case PKT_TYPE_TXRX_NOTIFY:
- mt7615_mac_tx_free(dev, skb);
- break;
- case PKT_TYPE_RX_EVENT:
- mt7615_mcu_rx_event(dev, skb);
- break;
- case PKT_TYPE_NORMAL_MCU:
- case PKT_TYPE_NORMAL:
- if (!mt7615_mac_fill_rx(dev, skb)) {
- mt76_rx(&dev->mt76, q, skb);
- return;
- }
- /* fall through */
- default:
- dev_kfree_skb(skb);
- break;
- }
-}
-
static void
mt7615_tx_cleanup(struct mt7615_dev *dev)
{
@@ -160,13 +121,52 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
mt7615_tx_cleanup(dev);
+ rcu_read_lock();
mt7615_mac_sta_poll(dev);
+ rcu_read_unlock();
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;
}
+int mt7615_wait_pdma_busy(struct mt7615_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+
+ if (!is_mt7663(mdev)) {
+ u32 mask = MT_PDMA_TX_BUSY | MT_PDMA_RX_BUSY;
+ u32 reg = mt7615_reg_map(dev, MT_PDMA_BUSY);
+
+ if (!mt76_poll_msec(dev, reg, mask, 0, 1000)) {
+ dev_err(mdev->dev, "PDMA engine busy\n");
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS,
+ MT_PDMA_TX_IDX_BUSY, 0, 1000)) {
+ dev_err(mdev->dev, "PDMA engine tx busy\n");
+ return -EIO;
+ }
+
+ if (!mt76_poll_msec(dev, MT_PSE_PG_INFO,
+ MT_PSE_SRC_CNT, 0, 1000)) {
+ dev_err(mdev->dev, "PSE engine busy\n");
+ return -EIO;
+ }
+
+ if (!mt76_poll_msec(dev, MT_PDMA_BUSY_STATUS,
+ MT_PDMA_BUSY_IDX, 0, 1000)) {
+ dev_err(mdev->dev, "PDMA engine busy\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static void mt7622_dma_sched_init(struct mt7615_dev *dev)
{
u32 reg = mt7615_reg_map(dev, MT_DMASHDL_BASE);
@@ -229,8 +229,13 @@ static void mt7663_dma_sched_init(struct mt7615_dev *dev)
int mt7615_dma_init(struct mt7615_dev *dev)
{
int rx_ring_size = MT7615_RX_RING_SIZE;
+ int rx_buf_size = MT_RX_BUF_SIZE;
int ret;
+ /* Increase buffer size to receive large VHT MPDUs */
+ if (dev->mt76.cap.has_5ghz)
+ rx_buf_size *= 2;
+
mt76_dma_attach(&dev->mt76);
mt76_wr(dev, MT_WPDMA_GLO_CFG,
@@ -271,7 +276,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
/* init rx queues */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
- MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
+ MT7615_RX_MCU_RING_SIZE, rx_buf_size,
MT_RX_RING_BASE);
if (ret)
return ret;
@@ -280,7 +285,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
rx_ring_size /= 2;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
- rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE);
+ rx_ring_size, rx_buf_size, MT_RX_RING_BASE);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index dfa9a08b896d..edac37e7847b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -5,6 +5,7 @@
* Felix Fietkau <nbd@nbd.name>
*/
+#include <linux/of.h>
#include "mt7615.h"
#include "eeprom.h"
@@ -40,11 +41,11 @@ static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
return 0;
}
-static int mt7615_efuse_init(struct mt7615_dev *dev)
+static int mt7615_efuse_init(struct mt7615_dev *dev, u32 base)
{
- u32 val, base = mt7615_reg_map(dev, MT_EFUSE_BASE);
int i, len = MT7615_EEPROM_SIZE;
void *buf;
+ u32 val;
val = mt76_rr(dev, base + MT_EFUSE_BASE_CTRL);
if (val & MT_EFUSE_BASE_CTRL_EMPTY)
@@ -67,15 +68,16 @@ static int mt7615_efuse_init(struct mt7615_dev *dev)
return 0;
}
-static int mt7615_eeprom_load(struct mt7615_dev *dev)
+static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr)
{
int ret;
- ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE);
+ ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE +
+ MT7615_EEPROM_EXTRA_DATA);
if (ret < 0)
return ret;
- return mt7615_efuse_init(dev);
+ return mt7615_efuse_init(dev, addr);
}
static int mt7615_check_eeprom(struct mt76_dev *dev)
@@ -109,6 +111,12 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
return;
}
+ if (is_mt7611(&dev->mt76)) {
+ /* 5GHz only */
+ dev->mt76.cap.has_5ghz = true;
+ return;
+ }
+
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
eeprom[MT_EE_WIFI_CONF]);
switch (val) {
@@ -128,14 +136,15 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
{
u8 *eeprom = dev->mt76.eeprom.data;
- u8 tx_mask;
+ u8 tx_mask, max_nss;
mt7615_eeprom_parse_hw_band_cap(dev);
if (is_mt7663(&dev->mt76)) {
- tx_mask = 2;
+ max_nss = 2;
+ tx_mask = FIELD_GET(MT_EE_HW_CONF1_TX_MASK,
+ eeprom[MT7663_EE_HW_CONF1]);
} else {
- u8 max_nss;
u32 val;
/* read tx-rx mask from eeprom */
@@ -144,21 +153,46 @@ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
eeprom[MT_EE_NIC_CONF_0]);
- if (!tx_mask || tx_mask > max_nss)
- tx_mask = max_nss;
}
+ if (!tx_mask || tx_mask > max_nss)
+ tx_mask = max_nss;
dev->chainmask = BIT(tx_mask) - 1;
dev->mphy.antenna_mask = dev->chainmask;
dev->phy.chainmask = dev->chainmask;
}
-int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
- struct ieee80211_channel *chan,
- u8 chain_idx)
+static int mt7663_eeprom_get_target_power_index(struct mt7615_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx)
+{
+ int index, group;
+
+ if (chain_idx > 1)
+ return -EINVAL;
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ return MT7663_EE_TX0_2G_TARGET_POWER + (chain_idx << 4);
+
+ group = mt7615_get_channel_group(chan->hw_value);
+ if (chain_idx == 1)
+ index = MT7663_EE_TX1_5G_G0_TARGET_POWER;
+ else
+ index = MT7663_EE_TX0_5G_G0_TARGET_POWER;
+
+ return index + group * 3;
+}
+
+int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx)
{
int index;
+ if (is_mt7663(&dev->mt76))
+ return mt7663_eeprom_get_target_power_index(dev, chan,
+ chain_idx);
+
if (chain_idx > 3)
return -EINVAL;
@@ -197,6 +231,23 @@ int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
return index;
}
+int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev,
+ enum nl80211_band band)
+{
+ /* assume the first rate has the highest power offset */
+ if (is_mt7663(&dev->mt76)) {
+ if (band == NL80211_BAND_2GHZ)
+ return MT_EE_TX0_5G_G0_TARGET_POWER;
+ else
+ return MT7663_EE_5G_RATE_POWER;
+ }
+
+ if (band == NL80211_BAND_2GHZ)
+ return MT_EE_2G_RATE_POWER;
+ else
+ return MT_EE_5G_RATE_POWER;
+}
+
static void mt7615_apply_cal_free_data(struct mt7615_dev *dev)
{
static const u16 ical[] = {
@@ -255,30 +306,38 @@ static void mt7622_apply_cal_free_data(struct mt7615_dev *dev)
static void mt7615_cal_free_data(struct mt7615_dev *dev)
{
+ struct device_node *np = dev->mt76.dev->of_node;
+
+ if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp"))
+ return;
+
switch (mt76_chip(&dev->mt76)) {
case 0x7622:
mt7622_apply_cal_free_data(dev);
break;
case 0x7615:
+ case 0x7611:
mt7615_apply_cal_free_data(dev);
break;
}
}
-int mt7615_eeprom_init(struct mt7615_dev *dev)
+int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr)
{
int ret;
- ret = mt7615_eeprom_load(dev);
+ ret = mt7615_eeprom_load(dev, addr);
if (ret < 0)
return ret;
ret = mt7615_check_eeprom(&dev->mt76);
- if (ret && dev->mt76.otp.data)
+ if (ret && dev->mt76.otp.data) {
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
MT7615_EEPROM_SIZE);
- else
+ } else {
+ dev->flash_eeprom = true;
mt7615_cal_free_data(dev);
+ }
mt7615_eeprom_parse_hw_cap(dev);
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
@@ -288,3 +347,4 @@ int mt7615_eeprom_init(struct mt7615_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(mt7615_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
index 8a2a64b7fcd3..40fed7adc58a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -6,6 +6,21 @@
#include "mt7615.h"
+
+#define MT7615_EEPROM_DCOC_OFFSET MT7615_EEPROM_SIZE
+#define MT7615_EEPROM_DCOC_SIZE 256
+#define MT7615_EEPROM_DCOC_COUNT 34
+
+#define MT7615_EEPROM_TXDPD_OFFSET (MT7615_EEPROM_SIZE + \
+ MT7615_EEPROM_DCOC_COUNT * \
+ MT7615_EEPROM_DCOC_SIZE)
+#define MT7615_EEPROM_TXDPD_SIZE 216
+#define MT7615_EEPROM_TXDPD_COUNT (44 + 3)
+
+#define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \
+ MT7615_EEPROM_TXDPD_COUNT * \
+ MT7615_EEPROM_TXDPD_SIZE)
+
enum mt7615_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
@@ -13,23 +28,39 @@ enum mt7615_eeprom_field {
MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036,
MT_EE_WIFI_CONF = 0x03e,
+ MT_EE_CALDATA_FLASH = 0x052,
MT_EE_TX0_2G_TARGET_POWER = 0x058,
MT_EE_TX0_5G_G0_TARGET_POWER = 0x070,
+ MT7663_EE_5G_RATE_POWER = 0x089,
MT_EE_TX1_5G_G0_TARGET_POWER = 0x098,
+ MT_EE_2G_RATE_POWER = 0x0be,
+ MT_EE_5G_RATE_POWER = 0x0d5,
+ MT7663_EE_TX0_2G_TARGET_POWER = 0x0e3,
MT_EE_EXT_PA_2G_TARGET_POWER = 0x0f2,
MT_EE_EXT_PA_5G_TARGET_POWER = 0x0f3,
- MT7663_EE_TX0_2G_TARGET_POWER = 0x123,
MT_EE_TX2_5G_G0_TARGET_POWER = 0x142,
MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a,
+ MT7663_EE_HW_CONF1 = 0x1b0,
+ MT7663_EE_TX0_5G_G0_TARGET_POWER = 0x245,
+ MT7663_EE_TX1_5G_G0_TARGET_POWER = 0x2b5,
MT7615_EE_MAX = 0x3bf,
MT7622_EE_MAX = 0x3db,
MT7663_EE_MAX = 0x400,
};
+#define MT_EE_RATE_POWER_MASK GENMASK(5, 0)
+#define MT_EE_RATE_POWER_SIGN BIT(6)
+#define MT_EE_RATE_POWER_EN BIT(7)
+
+#define MT_EE_CALDATA_FLASH_TX_DPD BIT(0)
+#define MT_EE_CALDATA_FLASH_RX_CAL BIT(1)
+
#define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4)
#define MT_EE_NIC_CONF_RX_MASK GENMASK(3, 0)
+#define MT_EE_HW_CONF1_TX_MASK GENMASK(2, 0)
+
#define MT_EE_NIC_CONF_TSSI_2G BIT(5)
#define MT_EE_NIC_CONF_TSSI_5G BIT(6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 03b1e56534d6..e2d80518e5af 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -12,17 +12,18 @@
#include "mac.h"
#include "eeprom.h"
-static void mt7615_phy_init(struct mt7615_dev *dev)
+void mt7615_phy_init(struct mt7615_dev *dev)
{
/* disable rf low power beacon mode */
mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(0), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(1), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
}
+EXPORT_SYMBOL_GPL(mt7615_phy_init);
static void
mt7615_init_mac_chain(struct mt7615_dev *dev, int chain)
{
- u32 val, mask, set;
+ u32 val;
if (!chain)
val = MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN;
@@ -62,18 +63,23 @@ mt7615_init_mac_chain(struct mt7615_dev *dev, int chain)
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
- mask = MT_DMA_RCFR0_MCU_RX_MGMT |
- MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR |
- MT_DMA_RCFR0_MCU_RX_CTL_BAR |
- MT_DMA_RCFR0_MCU_RX_BYPASS |
- MT_DMA_RCFR0_RX_DROPPED_UCAST |
- MT_DMA_RCFR0_RX_DROPPED_MCAST;
- set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
- FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
- mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set);
+ mt76_clear(dev, MT_DMA_RCFR0(chain), MT_DMA_RCFR0_MCU_RX_TDLS);
+ if (!mt7615_firmware_offload(dev)) {
+ u32 mask, set;
+
+ mask = MT_DMA_RCFR0_MCU_RX_MGMT |
+ MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR |
+ MT_DMA_RCFR0_MCU_RX_CTL_BAR |
+ MT_DMA_RCFR0_MCU_RX_BYPASS |
+ MT_DMA_RCFR0_RX_DROPPED_UCAST |
+ MT_DMA_RCFR0_RX_DROPPED_MCAST;
+ set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
+ FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
+ mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set);
+ }
}
-static void mt7615_mac_init(struct mt7615_dev *dev)
+void mt7615_mac_init(struct mt7615_dev *dev)
{
int i;
@@ -90,7 +96,7 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_TMAC_CTCR0_INS_DDLMT_EN);
mt7615_mcu_set_rts_thresh(&dev->phy, 0x92b);
- mt7615_mac_set_scs(dev, true);
+ mt7615_mac_set_scs(&dev->phy, true);
mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
@@ -112,67 +118,59 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
mt76_wr(dev, MT_DMA_DCR0,
FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) |
MT_DMA_DCR0_RX_VEC_DROP);
+ /* disable TDLS filtering */
+ mt76_clear(dev, MT_WF_PFCR, MT_WF_PFCR_TDLS_EN);
+ mt76_set(dev, MT_WF_MIB_SCR0, MT_MIB_SCR0_AGG_CNT_RANGE_EN);
if (is_mt7663(&dev->mt76)) {
- mt76_wr(dev, MT_CSR(0x010), 0x8208);
- mt76_wr(dev, 0x44064, 0x2000000);
mt76_wr(dev, MT_WF_AGG(0x160), 0x5c341c02);
mt76_wr(dev, MT_WF_AGG(0x164), 0x70708040);
} else {
mt7615_init_mac_chain(dev, 1);
}
}
+EXPORT_SYMBOL_GPL(mt7615_mac_init);
-bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
+void mt7615_check_offload_capability(struct mt7615_dev *dev)
{
- flush_work(&dev->mcu_work);
-
- return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
-}
-
-static void mt7615_init_work(struct work_struct *work)
-{
- struct mt7615_dev *dev = container_of(work, struct mt7615_dev, mcu_work);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
- if (mt7615_mcu_init(dev))
- return;
+ if (mt7615_firmware_offload(dev)) {
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
- mt7615_mcu_set_eeprom(dev);
- mt7615_mac_init(dev);
- mt7615_phy_init(dev);
- mt7615_mcu_del_wtbl_all(dev);
+ wiphy->max_remain_on_channel_duration = 5000;
+ wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ NL80211_FEATURE_P2P_GO_CTWIN |
+ NL80211_FEATURE_P2P_GO_OPPPS;
+ } else {
+ dev->ops->hw_scan = NULL;
+ dev->ops->cancel_hw_scan = NULL;
+ dev->ops->sched_scan_start = NULL;
+ dev->ops->sched_scan_stop = NULL;
+ dev->ops->set_rekey_data = NULL;
+ dev->ops->remain_on_channel = NULL;
+ dev->ops->cancel_remain_on_channel = NULL;
+
+ wiphy->max_sched_scan_plan_interval = 0;
+ wiphy->max_sched_scan_ie_len = 0;
+ wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ wiphy->max_sched_scan_ssids = 0;
+ wiphy->max_match_sets = 0;
+ wiphy->max_sched_scan_reqs = 0;
+ }
}
+EXPORT_SYMBOL_GPL(mt7615_check_offload_capability);
-static int mt7615_init_hardware(struct mt7615_dev *dev)
+bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
{
- int ret, idx;
-
- mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
-
- INIT_WORK(&dev->mcu_work, mt7615_init_work);
- spin_lock_init(&dev->token_lock);
- idr_init(&dev->token);
-
- ret = mt7615_eeprom_init(dev);
- if (ret < 0)
- return ret;
-
- ret = mt7615_dma_init(dev);
- if (ret)
- return ret;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
-
- /* Beacon and mgmt frames should occupy wcid 0 */
- idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
- if (idx)
- return -ENOSPC;
-
- dev->mt76.global_wcid.idx = idx;
- dev->mt76.global_wcid.hw_key_idx = -1;
- rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+ flush_work(&dev->mcu_work);
- return 0;
+ return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
}
+EXPORT_SYMBOL_GPL(mt7615_wait_for_mcu_init);
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
@@ -187,7 +185,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
}
-static struct ieee80211_rate mt7615_rates[] = {
+struct ieee80211_rate mt7615_rates[] = {
CCK_RATE(0, 10),
CCK_RATE(1, 20),
CCK_RATE(2, 55),
@@ -201,6 +199,7 @@ static struct ieee80211_rate mt7615_rates[] = {
OFDM_RATE(8, 480),
OFDM_RATE(12, 540),
};
+EXPORT_SYMBOL_GPL(mt7615_rates);
static const struct ieee80211_iface_limit if_limits[] = {
{
@@ -212,6 +211,8 @@ static const struct ieee80211_iface_limit if_limits[] = {
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_STATION)
}
};
@@ -226,68 +227,26 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
-static void
-mt7615_led_set_config(struct led_classdev *led_cdev,
- u8 delay_on, u8 delay_off)
-{
- struct mt7615_dev *dev;
- struct mt76_dev *mt76;
- u32 val, addr;
-
- mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
- dev = container_of(mt76, struct mt7615_dev, mt76);
- val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
- FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
- FIELD_PREP(MT_LED_STATUS_ON, delay_on);
-
- addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
- mt76_wr(dev, addr, val);
- addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
- mt76_wr(dev, addr, val);
-
- val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
- MT_LED_CTRL_KICK(mt76->led_pin);
- if (mt76->led_al)
- val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
- addr = mt7615_reg_map(dev, MT_LED_CTRL);
- mt76_wr(dev, addr, val);
-}
-
-static int
-mt7615_led_set_blink(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- u8 delta_on, delta_off;
-
- delta_off = max_t(u8, *delay_off / 10, 1);
- delta_on = max_t(u8, *delay_on / 10, 1);
-
- mt7615_led_set_config(led_cdev, delta_on, delta_off);
-
- return 0;
-}
-
-static void
-mt7615_led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- if (!brightness)
- mt7615_led_set_config(led_cdev, 0, 0xff);
- else
- mt7615_led_set_config(led_cdev, 0xff, 0);
-}
-
-static void
-mt7615_init_txpower(struct mt7615_dev *dev,
- struct ieee80211_supported_band *sband)
+void mt7615_init_txpower(struct mt7615_dev *dev,
+ struct ieee80211_supported_band *sband)
{
int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains;
+ int delta_idx, delta = mt76_tx_power_nss_delta(n_chains);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
- int delta = mt76_tx_power_nss_delta(n_chains);
+ u8 rate_val;
+
+ delta_idx = mt7615_eeprom_get_power_delta_index(dev, band);
+ rate_val = eep[delta_idx];
+ if ((rate_val & ~MT_EE_RATE_POWER_MASK) ==
+ (MT_EE_RATE_POWER_EN | MT_EE_RATE_POWER_SIGN))
+ delta += rate_val & MT_EE_RATE_POWER_MASK;
+
+ if (!is_mt7663(&dev->mt76) && mt7615_ext_pa_enabled(dev, band))
+ target_chains = 1;
+ else
+ target_chains = n_chains;
- target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
for (i = 0; i < sband->n_channels; i++) {
struct ieee80211_channel *chan = &sband->channels[i];
u8 target_power = 0;
@@ -296,7 +255,10 @@ mt7615_init_txpower(struct mt7615_dev *dev,
for (j = 0; j < target_chains; j++) {
int index;
- index = mt7615_eeprom_get_power_index(dev, chan, j);
+ index = mt7615_eeprom_get_target_power_index(dev, chan, j);
+ if (index < 0)
+ continue;
+
target_power = max(target_power, eep[index]);
}
@@ -306,6 +268,7 @@ mt7615_init_txpower(struct mt7615_dev *dev,
chan->orig_mpwr = target_power;
}
}
+EXPORT_SYMBOL_GPL(mt7615_init_txpower);
static void
mt7615_regd_notifier(struct wiphy *wiphy,
@@ -345,8 +308,18 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7615_regd_notifier;
+ wiphy->max_sched_scan_plan_interval = MT7615_MAX_SCHED_SCAN_INTERVAL;
+ wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ wiphy->max_scan_ie_len = MT7615_SCAN_IE_LEN;
+ wiphy->max_sched_scan_ssids = MT7615_MAX_SCHED_SCAN_SSID;
+ wiphy->max_match_sets = MT7615_MAX_SCAN_MATCH;
+ wiphy->max_sched_scan_reqs = 1;
+ wiphy->max_scan_ssids = 4;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
if (is_mt7615(&phy->dev->mt76))
@@ -368,7 +341,7 @@ mt7615_cap_dbdc_enable(struct mt7615_dev *dev)
dev->phy.chainmask = dev->mphy.antenna_mask;
dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask;
- mt76_set_stream_caps(&dev->mt76, true);
+ mt76_set_stream_caps(&dev->mphy, true);
}
static void
@@ -381,7 +354,7 @@ mt7615_cap_dbdc_disable(struct mt7615_dev *dev)
dev->phy.chainmask = dev->chainmask;
dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask;
- mt76_set_stream_caps(&dev->mt76, true);
+ mt76_set_stream_caps(&dev->mphy, true);
}
int mt7615_register_ext_phy(struct mt7615_dev *dev)
@@ -411,6 +384,16 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
mt7615_init_wiphy(mphy->hw);
+ INIT_DELAYED_WORK(&phy->mac_work, mt7615_mac_work);
+ INIT_DELAYED_WORK(&phy->scan_work, mt7615_scan_work);
+ skb_queue_head_init(&phy->scan_event_list);
+
+ INIT_WORK(&phy->roc_work, mt7615_roc_work);
+ timer_setup(&phy->roc_timer, mt7615_roc_timer, 0);
+ init_waitqueue_head(&phy->roc_wait);
+
+ mt7615_mac_set_scs(phy, true);
+
/*
* Make the secondary PHY MAC address local without overlapping with
* the usual MAC address allocation scheme on multiple virtual interfaces
@@ -431,6 +414,7 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
return ret;
}
+EXPORT_SYMBOL_GPL(mt7615_register_ext_phy);
void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
{
@@ -444,6 +428,7 @@ void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
mt76_unregister_phy(mphy);
ieee80211_free_hw(mphy->hw);
}
+EXPORT_SYMBOL_GPL(mt7615_unregister_ext_phy);
void mt7615_init_device(struct mt7615_dev *dev)
{
@@ -452,11 +437,17 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
- INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
+ INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work);
+ INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
+ skb_queue_head_init(&dev->phy.scan_event_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
spin_lock_init(&dev->sta_poll_lock);
init_waitqueue_head(&dev->reset_wait);
+ init_waitqueue_head(&dev->phy.roc_wait);
+
INIT_WORK(&dev->reset_work, mt7615_mac_reset_work);
+ INIT_WORK(&dev->phy.roc_work, mt7615_roc_work);
+ timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0);
mt7615_init_wiphy(hw);
dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
@@ -467,62 +458,4 @@ void mt7615_init_device(struct mt7615_dev *dev)
mt7615_cap_dbdc_disable(dev);
dev->phy.dfs_state = -1;
}
-
-int mt7615_register_device(struct mt7615_dev *dev)
-{
- int ret;
-
- mt7615_init_device(dev);
-
- /* init led callbacks */
- if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
- }
-
- ret = mt7622_wmac_init(dev);
- if (ret)
- return ret;
-
- ret = mt7615_init_hardware(dev);
- if (ret)
- return ret;
-
- ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
- ARRAY_SIZE(mt7615_rates));
- if (ret)
- return ret;
-
- ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work);
- mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
- mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
-
- return mt7615_init_debugfs(dev);
-}
-
-void mt7615_unregister_device(struct mt7615_dev *dev)
-{
- struct mt76_txwi_cache *txwi;
- bool mcu_running;
- int id;
-
- mcu_running = mt7615_wait_for_mcu_init(dev);
-
- mt7615_unregister_ext_phy(dev);
- mt76_unregister_device(&dev->mt76);
- if (mcu_running)
- mt7615_mcu_exit(dev);
- mt7615_dma_cleanup(dev);
-
- spin_lock_bh(&dev->token_lock);
- idr_for_each_entry(&dev->token, txwi, id) {
- mt7615_txp_skb_unmap(&dev->mt76, txwi);
- if (txwi->skb)
- dev_kfree_skb_any(txwi->skb);
- mt76_put_txwi(&dev->mt76, txwi);
- }
- spin_unlock_bh(&dev->token_lock);
- idr_destroy(&dev->token);
-
- mt76_free_device(&dev->mt76);
-}
+EXPORT_SYMBOL_GPL(mt7615_init_device);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index a27a6d164009..9f1c6ca7a665 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -61,7 +61,7 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
struct mt7615_sta *sta;
struct mt76_wcid *wcid;
- if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ if (idx >= MT7615_WTBL_SIZE)
return NULL;
wcid = rcu_dereference(dev->mt76.wcid[idx]);
@@ -82,8 +82,10 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev)
{
int i;
- for (i = 0; i < 4; i++)
- mt76_rr(dev, MT_TX_AGG_CNT(i));
+ for (i = 0; i < 4; i++) {
+ mt76_rr(dev, MT_TX_AGG_CNT(0, i));
+ mt76_rr(dev, MT_TX_AGG_CNT(1, i));
+ }
memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
dev->mt76.phy.survey_time = ktime_get_boottime();
@@ -113,10 +115,14 @@ void mt7615_mac_set_timing(struct mt7615_phy *phy)
u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
- FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
int sifs, offset;
+ bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
- if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ)
+ if (is_5ghz)
sifs = 16;
else
sifs = 10;
@@ -149,7 +155,7 @@ void mt7615_mac_set_timing(struct mt7615_phy *phy)
FIELD_PREP(MT_IFS_SIFS, sifs) |
FIELD_PREP(MT_IFS_SLOT, phy->slottime));
- if (phy->slottime < 20)
+ if (phy->slottime < 20 || is_5ghz)
val = MT7615_CFEND_RATE_DEFAULT;
else
val = MT7615_CFEND_RATE_11B;
@@ -164,7 +170,23 @@ void mt7615_mac_set_timing(struct mt7615_phy *phy)
}
-int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
+static void
+mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy,
+ struct mt76_rx_status *status, u8 chfreq)
+{
+ if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
+ !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
+ !test_bit(MT76_STATE_ROC, &mphy->state)) {
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ return;
+ }
+
+ status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
+}
+
+static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -282,11 +304,10 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->ext_phy = true;
}
- if (chfreq != phy->chfreq)
+ if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq)
return -EINVAL;
- status->freq = mphy->chandef.chan->center_freq;
- status->band = mphy->chandef.chan->band;
+ mt7615_get_status_freq_info(dev, mphy, status, chfreq);
if (status->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
else
@@ -408,40 +429,7 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
}
-
-void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
-{
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7615_dev *dev;
- struct mt7615_txp_common *txp;
- u16 token;
-
- dev = container_of(mdev, struct mt7615_dev, mt76);
- txp = mt7615_txwi_to_txp(mdev, e->txwi);
-
- if (is_mt7615(&dev->mt76))
- token = le16_to_cpu(txp->fw.token);
- else
- token = le16_to_cpu(txp->hw.msdu_id[0]) &
- ~MT_MSDU_ID_VALID;
-
- spin_lock_bh(&dev->token_lock);
- t = idr_remove(&dev->token, token);
- spin_unlock_bh(&dev->token_lock);
- e->skb = t ? t->skb : NULL;
- }
-
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->skb);
-}
+EXPORT_SYMBOL_GPL(mt7615_sta_ps);
static u16
mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
@@ -512,11 +500,12 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct ieee80211_vif *vif = info->control.vif;
struct mt76_phy *mphy = &dev->mphy;
bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
+ bool is_usb = mt76_is_usb(&dev->mt76);
int tx_count = 8;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
__le16 fc = hdr->frame_control;
+ u32 val, sz_txd = is_usb ? MT_USB_TXD_SIZE : MT_TXD_SIZE;
u16 seqno = 0;
- u32 val;
if (vif) {
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
@@ -540,7 +529,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
skb_get_queue_mapping(skb);
- p_fmt = MT_TX_TYPE_CT;
+ p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
} else if (beacon) {
if (ext_phy)
q_idx = MT_LMAC_BCN1;
@@ -552,10 +541,10 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
q_idx = MT_LMAC_ALTX1;
else
q_idx = MT_LMAC_ALTX0;
- p_fmt = MT_TX_TYPE_CT;
+ p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
}
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
txwi[0] = cpu_to_le32(val);
@@ -621,8 +610,11 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
}
if (!ieee80211_is_beacon(fc)) {
- val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
- FIELD_PREP(MT_TXD5_PID, pid);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid);
+ if (!ieee80211_hw_check(hw, SUPPORTS_PS))
+ val |= MT_TXD5_SW_POWER_MGMT;
txwi[5] = cpu_to_le32(val);
} else {
txwi[5] = 0;
@@ -648,10 +640,15 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
+ if (is_usb)
+ txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
return 0;
}
+EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi);
static void
mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
@@ -666,24 +663,27 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
static void
mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
{
+ u32 last_mask;
int i;
+ last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST;
+
for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
struct mt7615_txp_ptr *ptr = &txp->ptr[i];
bool last;
u16 len;
len = le16_to_cpu(ptr->len0);
- last = len & MT_TXD_LEN_MSDU_LAST;
- len &= ~MT_TXD_LEN_MSDU_LAST;
+ last = len & last_mask;
+ len &= MT_TXD_LEN_MASK;
dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
DMA_TO_DEVICE);
if (last)
break;
len = le16_to_cpu(ptr->len1);
- last = len & MT_TXD_LEN_MSDU_LAST;
- len &= ~MT_TXD_LEN_MSDU_LAST;
+ last = len & last_mask;
+ len &= MT_TXD_LEN_MASK;
dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
DMA_TO_DEVICE);
if (last)
@@ -702,11 +702,7 @@ void mt7615_txp_skb_unmap(struct mt76_dev *dev,
else
mt7615_txp_skb_unmap_hw(dev, &txp->hw);
}
-
-static u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid)
-{
- return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE;
-}
+EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
{
@@ -734,22 +730,20 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
struct ieee80211_sta *sta;
struct mt7615_sta *msta;
u32 addr, tx_time[4], rx_time[4];
+ struct list_head sta_poll_list;
int i;
- rcu_read_lock();
+ INIT_LIST_HEAD(&sta_poll_list);
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
- while (true) {
+ while (!list_empty(&sta_poll_list)) {
bool clear = false;
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&dev->sta_poll_list)) {
- spin_unlock_bh(&dev->sta_poll_lock);
- break;
- }
- msta = list_first_entry(&dev->sta_poll_list,
- struct mt7615_sta, poll_list);
+ msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
+ poll_list);
list_del_init(&msta->poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
@@ -789,30 +783,22 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
rx_cur);
}
}
-
- rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll);
-void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
- struct ieee80211_tx_rate *probe_rate,
- struct ieee80211_tx_rate *rates)
+static void
+mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates,
+ struct mt7615_rate_desc *rd)
{
struct mt7615_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_tx_rate *ref;
- int wcid = sta->wcid.idx;
- u32 addr = mt7615_mac_wtbl_addr(dev, wcid);
- bool stbc = false;
+ bool rateset, stbc = false;
int n_rates = sta->n_rates;
- u8 bw, bw_prev, bw_idx = 0;
- u16 val[4];
- u16 probe_val;
- u32 w5, w27;
- bool rateset;
- int i, k;
-
- if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
- return;
+ u8 bw, bw_prev;
+ int i, j;
for (i = n_rates; i < 4; i++)
rates[i] = rates[n_rates - 1];
@@ -840,10 +826,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
- for (k = 0; k < i; k++) {
- if (rates[i].idx != rates[k].idx)
+ for (j = 0; j < i; j++) {
+ if (rates[i].idx != rates[j].idx)
continue;
- if ((rates[i].flags ^ rates[k].flags) &
+ if ((rates[i].flags ^ rates[j].flags) &
(IEEE80211_TX_RC_40_MHZ_WIDTH |
IEEE80211_TX_RC_80_MHZ_WIDTH |
IEEE80211_TX_RC_160_MHZ_WIDTH))
@@ -856,65 +842,114 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
}
}
- val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
+ rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
bw_prev = bw;
if (probe_rate) {
- probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
- stbc, &bw);
+ rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
+ stbc, &bw);
if (bw)
- bw_idx = 1;
+ rd->bw_idx = 1;
else
bw_prev = 0;
} else {
- probe_val = val[0];
+ rd->probe_val = rd->val[0];
}
- val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
+ rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
if (bw_prev) {
- bw_idx = 3;
+ rd->bw_idx = 3;
bw_prev = bw;
}
- val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
+ rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
if (bw_prev) {
- bw_idx = 5;
+ rd->bw_idx = 5;
bw_prev = bw;
}
- val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
+ rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
if (bw_prev)
- bw_idx = 7;
+ rd->bw_idx = 7;
+
+ rd->rateset = rateset;
+ rd->bw = bw;
+}
+
+static int
+mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct mt7615_wtbl_desc *wd;
+
+ wd = kzalloc(sizeof(*wd), GFP_ATOMIC);
+ if (!wd)
+ return -ENOMEM;
+
+ wd->type = MT7615_WTBL_RATE_DESC;
+ wd->sta = sta;
+
+ mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates,
+ &wd->rate);
+ list_add_tail(&wd->node, &dev->wd_head);
+ queue_work(dev->mt76.usb.wq, &dev->wtbl_work);
+
+ return 0;
+}
+
+void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates)
+{
+ int wcid = sta->wcid.idx, n_rates = sta->n_rates;
+ struct mt7615_dev *dev = phy->dev;
+ struct mt7615_rate_desc rd;
+ u32 w5, w27, addr;
+
+ if (mt76_is_usb(&dev->mt76)) {
+ mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
+ return;
+ }
+
+ if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ return;
+ memset(&rd, 0, sizeof(struct mt7615_rate_desc));
+ mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd);
+
+ addr = mt7615_mac_wtbl_addr(dev, wcid);
w27 = mt76_rr(dev, addr + 27 * 4);
w27 &= ~MT_WTBL_W27_CC_BW_SEL;
- w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw);
+ w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw);
w5 = mt76_rr(dev, addr + 5 * 4);
w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
MT_WTBL_W5_MPDU_OK_COUNT |
MT_WTBL_W5_MPDU_FAIL_COUNT |
MT_WTBL_W5_RATE_IDX);
- w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) |
- FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7);
+ w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) |
+ FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
+ rd.bw_idx ? rd.bw_idx - 1 : 7);
mt76_wr(dev, MT_WTBL_RIUCR0, w5);
mt76_wr(dev, MT_WTBL_RIUCR1,
- FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
- FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
- FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1]));
mt76_wr(dev, MT_WTBL_RIUCR2,
- FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
- FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
- FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
- FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2]));
mt76_wr(dev, MT_WTBL_RIUCR3,
- FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
- FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
- FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3]));
mt76_wr(dev, MT_WTBL_UPDATE,
FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
@@ -924,7 +959,8 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
mt76_wr(dev, addr + 27 * 4, w27);
mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
- sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
+ sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
+ sta->rate_set_tsf |= rd.rateset;
if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
@@ -932,59 +968,33 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
}
+EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
-static enum mt7615_cipher_type
-mt7615_mac_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MT_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MT_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MT_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MT_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MT_CIPHER_WAPI;
- default:
- return MT_CIPHER_NONE;
- }
-}
-
-static int
-mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key,
- enum mt7615_cipher_type cipher,
- enum set_key_cmd cmd)
+int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ u8 *key, u8 keylen,
+ enum mt7615_cipher_type cipher,
+ enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
u8 data[32] = {};
- if (key->keylen > sizeof(data))
+ if (keylen > sizeof(data))
return -EINVAL;
mt76_rr_copy(dev, addr, data, sizeof(data));
if (cmd == SET_KEY) {
if (cipher == MT_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
- memcpy(data + 16, key->key + 24, 8);
- memcpy(data + 24, key->key + 16, 8);
+ memcpy(data + 16, key + 24, 8);
+ memcpy(data + 24, key + 16, 8);
}
if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
memmove(data + 16, data, 16);
if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
- memcpy(data, key->key, key->keylen);
+ memcpy(data, key, keylen);
else if (cipher == MT_CIPHER_BIP_CMAC_128)
- memcpy(data + 16, key->key, 16);
+ memcpy(data + 16, key, 16);
} else {
if (wcid->cipher & ~BIT(cipher)) {
if (cipher != MT_CIPHER_BIP_CMAC_128)
@@ -998,11 +1008,12 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
return 0;
}
+EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_key);
-static int
-mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- enum mt7615_cipher_type cipher, int keyidx,
- enum set_key_cmd cmd)
+int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ enum mt7615_cipher_type cipher,
+ int keyidx, enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
@@ -1034,11 +1045,12 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
return 0;
}
+EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_pk);
-static void
-mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- enum mt7615_cipher_type cipher,
- enum set_key_cmd cmd)
+void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ enum mt7615_cipher_type cipher,
+ enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
@@ -1056,6 +1068,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
}
}
+EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_cipher);
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
@@ -1072,7 +1085,8 @@ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
- err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
+ err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
+ cipher, cmd);
if (err < 0)
goto out;
@@ -1092,136 +1106,6 @@ out:
return err;
}
-static void
-mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
- void *txp_ptr, u32 id)
-{
- struct mt7615_hw_txp *txp = txp_ptr;
- struct mt7615_txp_ptr *ptr = &txp->ptr[0];
- int nbuf = tx_info->nbuf - 1;
- int i;
-
- tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
- tx_info->nbuf = 1;
-
- txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
-
- for (i = 0; i < nbuf; i++) {
- u32 addr = tx_info->buf[i + 1].addr;
- u16 len = tx_info->buf[i + 1].len;
-
- if (i == nbuf - 1)
- len |= MT_TXD_LEN_MSDU_LAST |
- MT_TXD_LEN_AMSDU_LAST;
-
- if (i & 1) {
- ptr->buf1 = cpu_to_le32(addr);
- ptr->len1 = cpu_to_le16(len);
- ptr++;
- } else {
- ptr->buf0 = cpu_to_le32(addr);
- ptr->len0 = cpu_to_le16(len);
- }
- }
-}
-
-static void
-mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
- void *txp_ptr, u32 id)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_vif *vif = info->control.vif;
- struct mt7615_fw_txp *txp = txp_ptr;
- int nbuf = tx_info->nbuf - 1;
- int i;
-
- for (i = 0; i < nbuf; i++) {
- txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
- txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
- }
- txp->nbuf = nbuf;
-
- /* pass partial skb header to fw */
- tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
- tx_info->buf[1].len = MT_CT_PARSE_LEN;
- tx_info->nbuf = MT_CT_DMA_BUF_NUM;
-
- txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
-
- if (!key)
- txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
-
- if (ieee80211_is_mgmt(hdr->frame_control))
- txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
-
- if (vif) {
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
-
- txp->bss_idx = mvif->idx;
- }
-
- txp->token = cpu_to_le16(id);
- txp->rept_wds_wcid = 0xff;
-}
-
-int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info)
-{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- int pid, id;
- u8 *txwi = (u8 *)txwi_ptr;
- struct mt76_txwi_cache *t;
- void *txp;
-
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
-
- pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
-
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
- struct mt7615_phy *phy = &dev->phy;
-
- if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
- phy = mdev->phy2->priv;
-
- spin_lock_bh(&dev->mt76.lock);
- mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
- msta->rates);
- msta->rate_probe = true;
- spin_unlock_bh(&dev->mt76.lock);
- }
-
- t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
- t->skb = tx_info->skb;
-
- spin_lock_bh(&dev->token_lock);
- id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
- spin_unlock_bh(&dev->token_lock);
- if (id < 0)
- return id;
-
- mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
- pid, key, false);
-
- txp = txwi + MT_TXD_SIZE;
- memset(txp, 0, sizeof(struct mt7615_txp_common));
- if (is_mt7615(&dev->mt76))
- mt7615_write_fw_txp(dev, tx_info, txp, id);
- else
- mt7615_write_hw_txp(dev, tx_info, txp, id);
-
- tx_info->skb = DMA_DUMMY_DATA;
-
- return 0;
-}
-
static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
struct ieee80211_tx_info *info, __le32 *txs_data)
{
@@ -1266,7 +1150,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
- first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY);
+ first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
if (fixed_rate && !probe) {
info->status.rates[0].count = count;
@@ -1399,7 +1283,7 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
return !!skb;
}
-void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
+static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
{
struct ieee80211_tx_info info = {};
struct ieee80211_sta *sta = NULL;
@@ -1419,7 +1303,7 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
if (pid == MT_PACKET_ID_NO_ACK)
return;
- if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
+ if (wcidx >= MT7615_WTBL_SIZE)
return;
rcu_read_lock();
@@ -1476,7 +1360,7 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
mt76_put_txwi(mdev, txwi);
}
-void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count;
@@ -1497,58 +1381,118 @@ void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
dev_kfree_skb(skb);
}
+void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+ u16 flag;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
+ type = PKT_TYPE_NORMAL_MCU;
+
+ switch (type) {
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 7 <= end; rxd += 7)
+ mt7615_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7615_mac_tx_free(dev, skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt7615_mcu_rx_event(dev, skb);
+ break;
+ case PKT_TYPE_NORMAL_MCU:
+ case PKT_TYPE_NORMAL:
+ if (!mt7615_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ /* fall through */
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb);
+
static void
-mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
+mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm)
{
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
- mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
- MT_WF_PHY_PD_OFDM_MASK(ext_phy),
- MT_WF_PHY_PD_OFDM(ext_phy, 0x13c));
- mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
- MT_WF_PHY_PD_CCK_MASK(ext_phy),
- MT_WF_PHY_PD_CCK(ext_phy, 0x92));
+ if (is_mt7663(&dev->mt76)) {
+ if (ofdm)
+ mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(0),
+ MT_WF_PHY_PD_OFDM(0, val));
+ else
+ mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, val));
+ return;
+ }
+
+ if (ofdm)
+ mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(ext_phy),
+ MT_WF_PHY_PD_OFDM(ext_phy, val));
+ else
+ mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, val));
+}
+
+static void
+mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
+{
+ /* ofdm */
+ mt7615_mac_set_sensitivity(phy, 0x13c, true);
+ /* cck */
+ mt7615_mac_set_sensitivity(phy, 0x92, false);
phy->ofdm_sensitivity = -98;
phy->cck_sensitivity = -110;
phy->last_cca_adj = jiffies;
}
-void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable)
+void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
{
- struct mt7615_phy *ext_phy;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 reg, mask;
mutex_lock(&dev->mt76.mutex);
- if (dev->scs_en == enable)
+ if (phy->scs_en == enable)
goto out;
- if (is_mt7663(&dev->mt76))
- goto out;
+ if (is_mt7663(&dev->mt76)) {
+ reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy);
+ mask = MT_WF_PHY_PD_BLK(0);
+ } else {
+ reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy);
+ mask = MT_WF_PHY_PD_BLK(ext_phy);
+ }
if (enable) {
- mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(0),
- MT_WF_PHY_PD_BLK(0));
- mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(1),
- MT_WF_PHY_PD_BLK(1));
+ mt76_set(dev, reg, mask);
if (is_mt7622(&dev->mt76)) {
- mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8);
- mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7);
+ mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8);
+ mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7);
}
} else {
- mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(0),
- MT_WF_PHY_PD_BLK(0));
- mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(1),
- MT_WF_PHY_PD_BLK(1));
+ mt76_clear(dev, reg, mask);
}
- mt7615_mac_set_default_sensitivity(&dev->phy);
- ext_phy = mt7615_ext_phy(dev);
- if (ext_phy)
- mt7615_mac_set_default_sensitivity(ext_phy);
-
- dev->scs_en = enable;
+ mt7615_mac_set_default_sensitivity(phy);
+ phy->scs_en = enable;
out:
mutex_unlock(&dev->mt76.mutex);
@@ -1556,10 +1500,12 @@ out:
void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
{
- u32 rxtd;
+ u32 rxtd, reg;
if (is_mt7663(&dev->mt76))
- return;
+ reg = MT7663_WF_PHY_R0_PHYMUX_5;
+ else
+ reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
if (ext_phy)
rxtd = MT_WF_PHY_RXTD2(10);
@@ -1567,15 +1513,21 @@ void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
rxtd = MT_WF_PHY_RXTD(12);
mt76_set(dev, rxtd, BIT(18) | BIT(29));
- mt76_set(dev, MT_WF_PHY_R0_PHYMUX_5(ext_phy), 0x5 << 12);
+ mt76_set(dev, reg, 0x5 << 12);
}
void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy)
{
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
- u32 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
+ u32 reg;
+ if (is_mt7663(&dev->mt76))
+ reg = MT7663_WF_PHY_R0_PHYMUX_5;
+ else
+ reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
+
+ /* reset PD and MDRDY counters */
mt76_clear(dev, reg, GENMASK(22, 20));
mt76_set(dev, reg, BIT(22) | BIT(20));
}
@@ -1627,19 +1579,9 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
}
if (update) {
- u16 val;
+ u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256;
- if (ofdm) {
- val = *sensitivity * 2 + 512;
- mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
- MT_WF_PHY_PD_OFDM_MASK(ext_phy),
- MT_WF_PHY_PD_OFDM(ext_phy, val));
- } else {
- val = *sensitivity + 256;
- mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
- MT_WF_PHY_PD_CCK_MASK(ext_phy),
- MT_WF_PHY_PD_CCK(ext_phy, val));
- }
+ mt7615_mac_set_sensitivity(phy, val, ofdm);
phy->last_cca_adj = jiffies;
}
}
@@ -1653,14 +1595,20 @@ mt7615_mac_scs_check(struct mt7615_phy *phy)
u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
bool ext_phy = phy != &dev->phy;
- if (!dev->scs_en)
+ if (!phy->scs_en)
return;
- val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
+ if (is_mt7663(&dev->mt76))
+ val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
+ else
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
- val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
+ if (is_mt7663(&dev->mt76))
+ val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
+ else
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
@@ -1685,10 +1633,14 @@ static u8
mt7615_phy_get_nf(struct mt7615_dev *dev, int idx)
{
static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
- u32 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
- u32 val, sum = 0, n = 0;
+ u32 reg, val, sum = 0, n = 0;
int i;
+ if (is_mt7663(&dev->mt76))
+ reg = MT7663_WF_PHY_RXTD(20);
+ else
+ reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
+
for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
val = mt76_rr(dev, reg);
sum += val * nf_power[i];
@@ -1744,6 +1696,7 @@ void mt7615_update_channel(struct mt76_dev *mdev)
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
+EXPORT_SYMBOL_GPL(mt7615_update_channel);
static void
mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
@@ -1751,64 +1704,71 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
struct mt7615_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
bool ext_phy = phy != &dev->phy;
- int i;
+ int i, aggr;
+ u32 val, val2;
memset(mib, 0, sizeof(*mib));
mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
MT_MIB_SDR3_FCS_ERR_MASK);
+ val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
+ MT_MIB_AMPDU_MPDU_COUNT);
+ if (val) {
+ val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy),
+ MT_MIB_AMPDU_ACK_COUNT);
+ mib->aggr_per = 1000 * (val - val2) / val;
+ }
+
+ aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 4; i++) {
- u32 data, val, val2;
-
- val = mt76_get_field(dev, MT_MIB_MB_SDR1(ext_phy, i),
- MT_MIB_ACK_FAIL_COUNT_MASK);
- if (val > mib->ack_fail_cnt)
- mib->ack_fail_cnt = val;
-
- val2 = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
- data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val2);
- if (data > mib->rts_retries_cnt) {
- mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val2);
- mib->rts_retries_cnt = data;
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
+
+ val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+ if (val2 > mib->ack_fail_cnt)
+ mib->ack_fail_cnt = val2;
+
+ val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ if (val2 > mib->ba_miss_cnt)
+ mib->ba_miss_cnt = val2;
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+ val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+ if (val2 > mib->rts_retries_cnt) {
+ mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt = val2;
}
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+
+ dev->mt76.aggr_stats[aggr++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr++] += val >> 16;
}
}
void mt7615_mac_work(struct work_struct *work)
{
- struct mt7615_dev *dev;
- struct mt7615_phy *ext_phy;
- int i, idx;
+ struct mt7615_phy *phy;
+ struct mt76_dev *mdev;
- dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
+ phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
mac_work.work);
+ mdev = &phy->dev->mt76;
- mutex_lock(&dev->mt76.mutex);
- mt76_update_survey(&dev->mt76);
- if (++dev->mac_work_count == 5) {
- ext_phy = mt7615_ext_phy(dev);
-
- mt7615_mac_update_mib_stats(&dev->phy);
- mt7615_mac_scs_check(&dev->phy);
- if (ext_phy) {
- mt7615_mac_update_mib_stats(ext_phy);
- mt7615_mac_scs_check(ext_phy);
- }
+ mutex_lock(&mdev->mutex);
- dev->mac_work_count = 0;
- }
+ mt76_update_survey(mdev);
+ if (++phy->mac_work_count == 5) {
+ phy->mac_work_count = 0;
- for (i = 0, idx = 0; i < 4; i++) {
- u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
-
- dev->mt76.aggr_stats[idx++] += val & 0xffff;
- dev->mt76.aggr_stats[idx++] += val >> 16;
+ mt7615_mac_update_mib_stats(phy);
+ mt7615_mac_scs_check(phy);
}
- mutex_unlock(&dev->mt76.mutex);
- mt76_tx_status_check(&dev->mt76, NULL, false);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ mutex_unlock(&mdev->mutex);
+
+ mt76_tx_status_check(mdev, NULL, false);
+ ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
MT7615_WATCHDOG_TIME);
}
@@ -1848,8 +1808,7 @@ mt7615_update_beacons(struct mt7615_dev *dev)
mt7615_update_vif_beacon, dev->mt76.phy2->hw);
}
-static void
-mt7615_dma_reset(struct mt7615_dev *dev)
+void mt7615_dma_reset(struct mt7615_dev *dev)
{
int i;
@@ -1861,36 +1820,49 @@ mt7615_dma_reset(struct mt7615_dev *dev)
for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+ mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
+ }
mt76_set(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
}
+EXPORT_SYMBOL_GPL(mt7615_dma_reset);
void mt7615_mac_reset_work(struct work_struct *work)
{
+ struct mt7615_phy *phy2;
+ struct mt76_phy *ext_phy;
struct mt7615_dev *dev;
dev = container_of(work, struct mt7615_dev, reset_work);
+ ext_phy = dev->mt76.phy2;
+ phy2 = ext_phy ? ext_phy->priv : NULL;
if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
return;
ieee80211_stop_queues(mt76_hw(dev));
- if (dev->mt76.phy2)
- ieee80211_stop_queues(dev->mt76.phy2->hw);
+ if (ext_phy)
+ ieee80211_stop_queues(ext_phy->hw);
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ cancel_delayed_work_sync(&dev->phy.mac_work);
+ del_timer_sync(&dev->phy.roc_timer);
+ cancel_work_sync(&dev->phy.roc_work);
+ if (phy2) {
+ cancel_delayed_work_sync(&phy2->mac_work);
+ del_timer_sync(&phy2->roc_timer);
+ cancel_work_sync(&phy2->roc_work);
+ }
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
- if (dev->mt76.phy2)
- mt76_txq_schedule_all(dev->mt76.phy2);
+ if (ext_phy)
+ mt76_txq_schedule_all(ext_phy);
tasklet_disable(&dev->mt76.tx_tasklet);
napi_disable(&dev->mt76.napi[0]);
@@ -1924,8 +1896,8 @@ void mt7615_mac_reset_work(struct work_struct *work)
napi_schedule(&dev->mt76.napi[1]);
ieee80211_wake_queues(mt76_hw(dev));
- if (dev->mt76.phy2)
- ieee80211_wake_queues(dev->mt76.phy2->hw);
+ if (ext_phy)
+ ieee80211_wake_queues(ext_phy->hw);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
@@ -1934,8 +1906,12 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt7615_update_beacons(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work,
MT7615_WATCHDOG_TIME);
+ if (phy2)
+ ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work,
+ MT7615_WATCHDOG_TIME);
+
}
static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
@@ -2031,6 +2007,9 @@ int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
bool ext_phy = phy != &dev->phy;
int err;
+ if (is_mt7663(&dev->mt76))
+ return 0;
+
if (dev->mt76.region == NL80211_DFS_UNSET) {
phy->dfs_state = -1;
if (phy->rdd_state)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index e0b89257db90..f0d4b29a52a2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -167,6 +167,10 @@ enum tx_phy_bandwidth {
#define MT_TXD_SIZE (8 * 4)
+#define MT_USB_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
+#define MT_USB_HDR_SIZE 4
+#define MT_USB_TAIL_SIZE 4
+
#define MT_TXD0_P_IDX BIT(31)
#define MT_TXD0_Q_IDX GENMASK(30, 26)
#define MT_TXD0_UDP_TCP_SUM BIT(24)
@@ -252,8 +256,11 @@ enum tx_phy_bandwidth {
#define MT_MSDU_ID_VALID BIT(15)
+#define MT_TXD_LEN_MASK GENMASK(11, 0)
#define MT_TXD_LEN_MSDU_LAST BIT(14)
#define MT_TXD_LEN_AMSDU_LAST BIT(15)
+/* mt7663 */
+#define MT_TXD_LEN_LAST BIT(15)
struct mt7615_txp_ptr {
__le32 buf0;
@@ -393,6 +400,33 @@ enum mt7615_cipher_type {
MT_CIPHER_GCMP_256,
};
+static inline enum mt7615_cipher_type
+mt7615_mac_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MT_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MT_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MT_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MT_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MT_CIPHER_WAPI;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
static inline struct mt7615_txp_common *
mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
@@ -406,4 +440,9 @@ mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
return (struct mt7615_txp_common *)(txwi + MT_TXD_SIZE);
}
+static inline u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid)
+{
+ return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE;
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 6586176c29af..c26f99b368d9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -4,11 +4,10 @@
* Author: Roy Luo <royluo@google.com>
* Ryder Lee <ryder.lee@mediatek.com>
* Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <linux/pci.h>
#include <linux/module.h>
#include "mt7615.h"
#include "mcu.h"
@@ -50,19 +49,17 @@ static int mt7615_start(struct ieee80211_hw *hw)
mt7615_mac_enable_nf(dev, 1);
}
+ mt7615_mcu_set_channel_domain(phy);
mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
- if (running)
- goto out;
-
- mt7615_mac_reset_counters(dev);
-
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(hw, &phy->mac_work,
MT7615_WATCHDOG_TIME);
-out:
+ if (!running)
+ mt7615_mac_reset_counters(dev);
+
mutex_unlock(&dev->mt76.mutex);
return 0;
@@ -73,9 +70,14 @@ static void mt7615_stop(struct ieee80211_hw *hw)
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ cancel_delayed_work_sync(&phy->mac_work);
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+
mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+ cancel_delayed_work_sync(&phy->scan_work);
if (phy != &dev->phy) {
mt7615_mcu_set_pm(dev, 1, 1);
@@ -83,8 +85,6 @@ static void mt7615_stop(struct ieee80211_hw *hw)
}
if (!mt7615_dev_running(dev)) {
- cancel_delayed_work_sync(&dev->mt76.mac_work);
-
mt7615_mcu_set_pm(dev, 0, 1);
mt7615_mcu_set_mac_enable(dev, 0, false);
}
@@ -157,10 +157,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
else
mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
- ret = mt7615_mcu_add_dev_info(dev, vif, true);
- if (ret)
- goto out;
-
dev->vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
phy->omac_mask |= BIT(mvif->omac_idx);
@@ -183,6 +179,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
mt76_txq_init(&dev->mt76, vif->txq);
}
+ ret = mt7615_mcu_add_dev_info(dev, vif, true);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -218,20 +215,44 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&dev->sta_poll_lock);
}
+static void mt7615_init_dfs_state(struct mt7615_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+
+ if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
+ mphy->chandef.width == chandef->width)
+ return;
+
+ phy->dfs_state = -1;
+}
+
static int mt7615_set_channel(struct mt7615_phy *phy)
{
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
int ret;
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ cancel_delayed_work_sync(&phy->mac_work);
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &phy->mt76->state);
- phy->dfs_state = -1;
+ mt7615_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
+ if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
+ mt7615_mcu_apply_rx_dcoc(phy);
+ mt7615_mcu_apply_tx_dpd(phy);
+ }
+
ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
if (ret)
goto out;
@@ -250,11 +271,41 @@ out:
mutex_unlock(&dev->mt76.mutex);
mt76_txq_schedule_all(phy->mt76);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
MT7615_WATCHDOG_TIME);
return ret;
}
+static int
+mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
+ struct mt7615_sta *msta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7615_wtbl_desc *wd;
+
+ wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+ if (!wd)
+ return -ENOMEM;
+
+ wd->type = MT7615_WTBL_KEY_DESC;
+ wd->sta = msta;
+
+ wd->key.key = kmemdup(key->key, key->keylen, GFP_KERNEL);
+ if (!wd->key.key) {
+ kfree(wd);
+ return -ENOMEM;
+ }
+ wd->key.cipher = key->cipher;
+ wd->key.keyidx = key->keyidx;
+ wd->key.keylen = key->keylen;
+ wd->key.cmd = cmd;
+
+ list_add_tail(&wd->node, &dev->wd_head);
+ queue_work(dev->mt76.usb.wq, &dev->wtbl_work);
+
+ return 0;
+}
+
static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -303,6 +354,9 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
+ if (mt76_is_usb(&dev->mt76))
+ return mt7615_queue_key_update(dev, cmd, msta, key);
+
return mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
}
@@ -408,15 +462,12 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
u32 changed)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
mutex_lock(&dev->mt76.mutex);
- if (changed & BSS_CHANGED_ASSOC)
- mt7615_mcu_add_bss_info(dev, vif, info->assoc);
-
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
- struct mt7615_phy *phy = mt7615_hw_phy(hw);
if (slottime != phy->slottime) {
phy->slottime = slottime;
@@ -425,14 +476,20 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- mt7615_mcu_add_bss_info(dev, vif, info->enable_beacon);
+ mt7615_mcu_add_bss_info(phy, vif, NULL, info->enable_beacon);
mt7615_mcu_sta_add(dev, vif, NULL, info->enable_beacon);
+
+ if (vif->p2p && info->enable_beacon)
+ mt7615_mcu_set_p2p_oppps(hw, vif);
}
if (changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED))
mt7615_mcu_add_beacon(dev, hw, vif, info->enable_beacon);
+ if (changed & BSS_CHANGED_PS)
+ mt7615_mcu_set_vif_ps(dev, vif);
+
mutex_unlock(&dev->mt76.mutex);
}
@@ -466,13 +523,19 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.idx = idx;
msta->wcid.ext_phy = mvif->band_idx;
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ struct mt7615_phy *phy;
+
+ phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy;
+ mt7615_mcu_add_bss_info(phy, vif, sta, true);
+ }
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
-
mt7615_mcu_sta_add(dev, vif, sta, true);
return 0;
}
+EXPORT_SYMBOL_GPL(mt7615_mac_sta_add);
void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -483,12 +546,20 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7615_mcu_sta_add(dev, vif, sta, false);
mt7615_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_phy *phy;
+
+ phy = mvif->band_idx ? mt7615_ext_phy(dev) : &dev->phy;
+ mt7615_mcu_add_bss_info(phy, vif, sta, false);
+ }
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
}
+EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove);
static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -694,13 +765,242 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
}
phy->chainmask = tx_ant;
- mt76_set_stream_caps(&dev->mt76, true);
+ mt76_set_stream_caps(phy->mt76, true);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
+static void mt7615_roc_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_phy *phy = priv;
+
+ mt7615_mcu_set_roc(phy, vif, NULL, 0);
+}
+
+void mt7615_roc_work(struct work_struct *work)
+{
+ struct mt7615_phy *phy;
+
+ phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
+ roc_work);
+
+ if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ return;
+
+ ieee80211_iterate_active_interfaces(phy->mt76->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_roc_iter, phy);
+ ieee80211_remain_on_channel_expired(phy->mt76->hw);
+}
+
+void mt7615_roc_timer(struct timer_list *timer)
+{
+ struct mt7615_phy *phy = from_timer(phy, timer, roc_timer);
+
+ ieee80211_queue_work(phy->mt76->hw, &phy->roc_work);
+}
+
+void mt7615_scan_work(struct work_struct *work)
+{
+ struct mt7615_phy *phy;
+
+ phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy,
+ scan_work.work);
+
+ while (true) {
+ struct mt7615_mcu_rxd *rxd;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&phy->dev->mt76.lock);
+ skb = __skb_dequeue(&phy->scan_event_list);
+ spin_unlock_bh(&phy->dev->mt76.lock);
+
+ if (!skb)
+ break;
+
+ rxd = (struct mt7615_mcu_rxd *)skb->data;
+ if (rxd->eid == MCU_EVENT_SCHED_SCAN_DONE) {
+ ieee80211_sched_scan_results(phy->mt76->hw);
+ } else if (test_and_clear_bit(MT76_HW_SCANNING,
+ &phy->mt76->state)) {
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
+ ieee80211_scan_completed(phy->mt76->hw, &info);
+ }
+ dev_kfree_skb(skb);
+ }
+}
+
+static int
+mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct mt76_phy *mphy = hw->priv;
+
+ return mt7615_mcu_hw_scan(mphy->priv, vif, req);
+}
+
+static void
+mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76_phy *mphy = hw->priv;
+
+ mt7615_mcu_cancel_hw_scan(mphy->priv, vif);
+}
+
+static int
+mt7615_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+
+ err = mt7615_mcu_sched_scan_req(mphy->priv, vif, req);
+ if (err < 0)
+ return err;
+
+ return mt7615_mcu_sched_scan_enable(mphy->priv, vif, true);
+}
+
+static int
+mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76_phy *mphy = hw->priv;
+
+ return mt7615_mcu_sched_scan_enable(mphy->priv, vif, false);
+}
+
+static int mt7615_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int err;
+
+ if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state))
+ return 0;
+
+ err = mt7615_mcu_set_roc(phy, vif, chan, duration);
+ if (err < 0) {
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+ return err;
+ }
+
+ if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) {
+ mt7615_mcu_set_roc(phy, vif, NULL, 0);
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ return 0;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+
+ mt7615_mcu_set_roc(phy, vif, NULL, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mt7615_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool ext_phy = phy != &dev->phy;
+ int err = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+ cancel_delayed_work_sync(&phy->scan_work);
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mt76_set(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
+
+ set_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_mcu_set_suspend_iter, phy);
+
+ if (!mt7615_dev_running(dev))
+ err = mt7615_mcu_set_hif_suspend(dev, true);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+
+static int mt7615_resume(struct ieee80211_hw *hw)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool running, ext_phy = phy != &dev->phy;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ running = mt7615_dev_running(dev);
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (!running) {
+ int err;
+
+ err = mt7615_mcu_set_hif_suspend(dev, false);
+ if (err < 0) {
+ mutex_unlock(&dev->mt76.mutex);
+ return err;
+ }
+ }
+
+ clear_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_mcu_set_suspend_iter, phy);
+
+ ieee80211_queue_delayed_work(hw, &phy->mac_work,
+ MT7615_WATCHDOG_TIME);
+ mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt7615_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_dev *mdev = &dev->mt76;
+
+ device_set_wakeup_enable(mdev->dev, enabled);
+}
+
+static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ mt7615_mcu_update_gtk_rekey(hw, vif, data);
+}
+#endif /* CONFIG_PM */
+
const struct ieee80211_ops mt7615_ops = {
.tx = mt7615_tx,
.start = mt7615_start,
@@ -730,32 +1030,19 @@ const struct ieee80211_ops mt7615_ops = {
.get_antenna = mt76_get_antenna,
.set_antenna = mt7615_set_antenna,
.set_coverage_class = mt7615_set_coverage_class,
+ .hw_scan = mt7615_hw_scan,
+ .cancel_hw_scan = mt7615_cancel_hw_scan,
+ .sched_scan_start = mt7615_start_sched_scan,
+ .sched_scan_stop = mt7615_stop_sched_scan,
+ .remain_on_channel = mt7615_remain_on_channel,
+ .cancel_remain_on_channel = mt7615_cancel_remain_on_channel,
+#ifdef CONFIG_PM
+ .suspend = mt7615_suspend,
+ .resume = mt7615_resume,
+ .set_wakeup = mt7615_set_wakeup,
+ .set_rekey_data = mt7615_set_rekey_data,
+#endif /* CONFIG_PM */
};
+EXPORT_SYMBOL_GPL(mt7615_ops);
-static int __init mt7615_init(void)
-{
- int ret;
-
- ret = pci_register_driver(&mt7615_pci_driver);
- if (ret)
- return ret;
-
- if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
- ret = platform_driver_register(&mt7622_wmac_driver);
- if (ret)
- pci_unregister_driver(&mt7615_pci_driver);
- }
-
- return ret;
-}
-
-static void __exit mt7615_exit(void)
-{
- if (IS_ENABLED(CONFIG_MT7622_WMAC))
- platform_driver_unregister(&mt7622_wmac_driver);
- pci_unregister_driver(&mt7615_pci_driver);
-}
-
-module_init(mt7615_init);
-module_exit(mt7615_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 610cfa918c7b..6e869b8c5e26 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -11,6 +11,11 @@
#include "mac.h"
#include "eeprom.h"
+static bool prefer_offload_fw = true;
+module_param(prefer_offload_fw, bool, 0644);
+MODULE_PARM_DESC(prefer_offload_fw,
+ "Prefer client mode offload firmware (MT7663)");
+
struct mt7615_patch_hdr {
char build_date[16];
char platform[4];
@@ -135,16 +140,24 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
mcu_txd->pkt_type = MCU_PKT_ID;
mcu_txd->seq = seq;
- if (cmd & MCU_FW_PREFIX) {
+ switch (cmd & ~MCU_CMD_MASK) {
+ case MCU_FW_PREFIX:
mcu_txd->set_query = MCU_Q_NA;
mcu_txd->cid = mcu_cmd;
- } else {
+ break;
+ case MCU_CE_PREFIX:
+ mcu_txd->set_query = MCU_Q_SET;
+ mcu_txd->cid = mcu_cmd;
+ break;
+ default:
mcu_txd->cid = MCU_CMD_EXT_CID;
mcu_txd->set_query = MCU_Q_SET;
mcu_txd->ext_cid = cmd;
mcu_txd->ext_cid_ack = 1;
+ break;
}
}
+EXPORT_SYMBOL_GPL(mt7615_mcu_fill_msg);
static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq)
@@ -179,6 +192,19 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
skb_pull(skb, sizeof(*rxd));
ret = le32_to_cpu(*(__le32 *)skb->data);
break;
+ case MCU_UNI_CMD_DEV_INFO_UPDATE:
+ case MCU_UNI_CMD_BSS_INFO_UPDATE:
+ case MCU_UNI_CMD_STA_REC_UPDATE:
+ case MCU_UNI_CMD_HIF_CTRL:
+ case MCU_UNI_CMD_OFFLOAD:
+ case MCU_UNI_CMD_SUSPEND: {
+ struct mt7615_mcu_uni_event *event;
+
+ skb_pull(skb, sizeof(*rxd));
+ event = (struct mt7615_mcu_uni_event *)skb->data;
+ ret = le32_to_cpu(event->status);
+ break;
+ }
default:
break;
}
@@ -208,6 +234,7 @@ int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq)
return ret;
}
+EXPORT_SYMBOL_GPL(mt7615_mcu_wait_response);
static int
mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
@@ -231,18 +258,18 @@ out:
return ret;
}
-static int
-mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
- int len, bool wait_resp)
+int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
{
struct sk_buff *skb;
- skb = mt7615_mcu_msg_alloc(data, len);
+ skb = mt76_mcu_msg_alloc(mdev, data, len);
if (!skb)
return -ENOMEM;
return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp);
}
+EXPORT_SYMBOL_GPL(mt7615_mcu_msg_send);
static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
@@ -311,6 +338,110 @@ mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb)
}
static void
+mt7615_mcu_scan_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ u8 *seq_num = skb->data + sizeof(struct mt7615_mcu_rxd);
+ struct mt7615_phy *phy;
+ struct mt76_phy *mphy;
+
+ if (*seq_num & BIT(7) && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+ else
+ mphy = &dev->mt76.phy;
+
+ phy = (struct mt7615_phy *)mphy->priv;
+
+ spin_lock_bh(&dev->mt76.lock);
+ __skb_queue_tail(&phy->scan_event_list, skb);
+ spin_unlock_bh(&dev->mt76.lock);
+
+ ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work,
+ MT7615_HW_SCAN_TIMEOUT);
+}
+
+static void
+mt7615_mcu_roc_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_roc_tlv *event;
+ struct mt7615_phy *phy;
+ struct mt76_phy *mphy;
+ int duration;
+
+ skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
+ event = (struct mt7615_roc_tlv *)skb->data;
+
+ if (event->dbdc_band && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+ else
+ mphy = &dev->mt76.phy;
+
+ ieee80211_ready_on_channel(mphy->hw);
+
+ phy = (struct mt7615_phy *)mphy->priv;
+ phy->roc_grant = true;
+ wake_up(&phy->roc_wait);
+
+ duration = le32_to_cpu(event->max_interval);
+ mod_timer(&phy->roc_timer,
+ round_jiffies_up(jiffies + msecs_to_jiffies(duration)));
+}
+
+static void
+mt7615_mcu_beacon_loss_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_beacon_loss_event *event = priv;
+
+ if (mvif->idx != event->bss_idx)
+ return;
+
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ return;
+
+ ieee80211_beacon_loss(vif);
+}
+
+static void
+mt7615_mcu_beacon_loss_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_beacon_loss_event *event;
+ struct mt76_phy *mphy;
+ u8 band_idx = 0; /* DBDC support */
+
+ skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
+ event = (struct mt7615_beacon_loss_event *)skb->data;
+ if (band_idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+ else
+ mphy = &dev->mt76.phy;
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_mcu_beacon_loss_iter, event);
+}
+
+static void
+mt7615_mcu_bss_event(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_mcu_bss_event *event;
+ struct mt76_phy *mphy;
+ u8 band_idx = 0; /* DBDC support */
+
+ event = (struct mt7615_mcu_bss_event *)(skb->data +
+ sizeof(struct mt7615_mcu_rxd));
+
+ if (band_idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+ else
+ mphy = &dev->mt76.phy;
+
+ if (event->is_absent)
+ ieee80211_stop_queues(mphy->hw);
+ else
+ ieee80211_wake_queues(mphy->hw);
+}
+
+static void
mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
@@ -319,6 +450,19 @@ mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb)
case MCU_EVENT_EXT:
mt7615_mcu_rx_ext_event(dev, skb);
break;
+ case MCU_EVENT_BSS_BEACON_LOSS:
+ mt7615_mcu_beacon_loss_event(dev, skb);
+ break;
+ case MCU_EVENT_ROC:
+ mt7615_mcu_roc_event(dev, skb);
+ break;
+ case MCU_EVENT_SCHED_SCAN_DONE:
+ case MCU_EVENT_SCAN_DONE:
+ mt7615_mcu_scan_event(dev, skb);
+ return;
+ case MCU_EVENT_BSS_ABSENCE:
+ mt7615_mcu_bss_event(dev, skb);
+ break;
default:
break;
}
@@ -333,6 +477,11 @@ void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb)
rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
+ rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
+ rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_BSS_ABSENCE ||
+ rxd->eid == MCU_EVENT_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_ROC ||
!rxd->seq)
mt7615_mcu_rx_unsolicited_event(dev, skb);
else
@@ -493,7 +642,8 @@ mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
}
static struct sk_buff *
-mt7615_mcu_alloc_sta_req(struct mt7615_vif *mvif, struct mt7615_sta *msta)
+mt7615_mcu_alloc_sta_req(struct mt7615_dev *dev, struct mt7615_vif *mvif,
+ struct mt7615_sta *msta)
{
struct sta_req_hdr hdr = {
.bss_idx = mvif->idx,
@@ -503,7 +653,7 @@ mt7615_mcu_alloc_sta_req(struct mt7615_vif *mvif, struct mt7615_sta *msta)
};
struct sk_buff *skb;
- skb = mt7615_mcu_msg_alloc(NULL, MT7615_STA_UPDATE_MAX_SIZE);
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, MT7615_STA_UPDATE_MAX_SIZE);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -513,8 +663,8 @@ mt7615_mcu_alloc_sta_req(struct mt7615_vif *mvif, struct mt7615_sta *msta)
}
static struct wtbl_req_hdr *
-mt7615_mcu_alloc_wtbl_req(struct mt7615_sta *msta, int cmd,
- void *sta_wtbl, struct sk_buff **skb)
+mt7615_mcu_alloc_wtbl_req(struct mt7615_dev *dev, struct mt7615_sta *msta,
+ int cmd, void *sta_wtbl, struct sk_buff **skb)
{
struct tlv *sta_hdr = sta_wtbl;
struct wtbl_req_hdr hdr = {
@@ -524,7 +674,8 @@ mt7615_mcu_alloc_wtbl_req(struct mt7615_sta *msta, int cmd,
struct sk_buff *nskb = *skb;
if (!nskb) {
- nskb = mt7615_mcu_msg_alloc(NULL, MT7615_WTBL_UPDATE_BA_SIZE);
+ nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ MT7615_WTBL_UPDATE_BA_SIZE);
if (!nskb)
return ERR_PTR(-ENOMEM);
@@ -572,12 +723,12 @@ mt7615_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
static int
mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- bool enable)
+ struct ieee80211_sta *sta, bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
struct bss_info_basic *bss;
u8 wlan_idx = mvif->sta.wcid.idx;
- u32 type = NETWORK_INFRA;
struct tlv *tlv;
tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
@@ -588,20 +739,11 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
break;
case NL80211_IFTYPE_STATION:
/* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (enable) {
- struct ieee80211_sta *sta;
+ if (enable && sta) {
struct mt7615_sta *msta;
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
msta = (struct mt7615_sta *)sta->drv_priv;
wlan_idx = msta->wcid.idx;
- rcu_read_unlock();
}
break;
case NL80211_IFTYPE_ADHOC:
@@ -638,10 +780,16 @@ mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
- type = CONNECTION_INFRA_AP;
+ if (vif->p2p)
+ type = CONNECTION_P2P_GO;
+ else
+ type = CONNECTION_INFRA_AP;
break;
case NL80211_IFTYPE_STATION:
- type = CONNECTION_INFRA_STA;
+ if (vif->p2p)
+ type = CONNECTION_P2P_GC;
+ else
+ type = CONNECTION_INFRA_STA;
break;
case NL80211_IFTYPE_ADHOC:
type = CONNECTION_IBSS_ADHOC;
@@ -704,6 +852,7 @@ mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
{
struct sta_rec_basic *basic;
struct tlv *tlv;
+ int conn_type;
tlv = mt7615_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
@@ -726,13 +875,24 @@ mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GC;
+ else
+ conn_type = CONNECTION_INFRA_STA;
+ basic->conn_type = cpu_to_le32(conn_type);
+ basic->aid = cpu_to_le16(sta->aid);
break;
case NL80211_IFTYPE_STATION:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GO;
+ else
+ conn_type = CONNECTION_INFRA_AP;
+ basic->conn_type = cpu_to_le32(conn_type);
+ basic->aid = cpu_to_le16(vif->bss_conf.aid);
break;
case NL80211_IFTYPE_ADHOC:
basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ basic->aid = cpu_to_le16(sta->aid);
break;
default:
WARN_ON(1);
@@ -740,7 +900,6 @@ mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
- basic->aid = cpu_to_le16(sta->aid);
basic->qos = sta->wme;
}
@@ -815,6 +974,7 @@ mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct wtbl_generic *generic;
struct wtbl_rx *rx;
+ struct wtbl_spe *spe;
struct tlv *tlv;
tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
@@ -823,8 +983,11 @@ mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
generic = (struct wtbl_generic *)tlv;
if (sta) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
+ else
+ generic->partial_aid = cpu_to_le16(sta->aid);
memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
- generic->partial_aid = cpu_to_le16(sta->aid);
generic->muar_idx = mvif->omac_idx;
generic->qos = sta->wme;
} else {
@@ -839,6 +1002,11 @@ mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
rx->rca2 = 1;
rx->rv = 1;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
+ wtbl_tlv, sta_wtbl);
+ spe = (struct wtbl_spe *)tlv;
+ spe->spe_idx = 24;
}
static void
@@ -846,11 +1014,10 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
void *sta_wtbl, void *wtbl_tlv)
{
struct tlv *tlv;
+ struct wtbl_ht *ht = NULL;
u32 flags = 0;
if (sta->ht_cap.ht_supported) {
- struct wtbl_ht *ht;
-
tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
@@ -867,6 +1034,7 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
if (sta->vht_cap.vht_supported) {
struct wtbl_vht *vht;
+ u8 af;
tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
wtbl_tlv, sta_wtbl);
@@ -874,6 +1042,13 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC,
vht->vht = 1;
+ af = (sta->vht_cap.cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ if (ht)
+ ht->af = max(ht->af, af);
+
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
flags |= MT_WTBL_W5_SHORT_GI_80;
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
@@ -908,20 +1083,21 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
}
static int
-mt7615_mcu_add_bss(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- bool enable)
+mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = phy->dev;
struct sk_buff *skb;
- skb = mt7615_mcu_alloc_sta_req(mvif, NULL);
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, NULL);
if (IS_ERR(skb))
return PTR_ERR(skb);
if (enable)
mt7615_mcu_bss_omac_tlv(skb, vif);
- mt7615_mcu_bss_basic_tlv(skb, vif, enable);
+ mt7615_mcu_bss_basic_tlv(skb, vif, sta, enable);
if (enable && mvif->omac_idx > EXT_BSSID_START)
mt7615_mcu_bss_ext_tlv(skb, mvif);
@@ -941,7 +1117,7 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev,
struct sk_buff *skb = NULL;
int err;
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, NULL, &skb);
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
@@ -952,7 +1128,7 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev,
if (err < 0)
return err;
- skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -973,7 +1149,7 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
struct sk_buff *skb;
int err;
- skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -985,7 +1161,7 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
return err;
skb = NULL;
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, NULL, &skb);
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
@@ -1007,7 +1183,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
- sskb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ sskb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
if (IS_ERR(sskb))
return PTR_ERR(sskb);
@@ -1015,8 +1191,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
if (enable && sta)
mt7615_mcu_sta_ht_tlv(sskb, sta);
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_RESET_AND_SET, NULL,
- &wskb);
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
+ NULL, &wskb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
@@ -1060,7 +1236,7 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
struct tlv *sta_wtbl;
struct sk_buff *skb;
- skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1068,7 +1244,8 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb);
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
mt7615_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
@@ -1103,7 +1280,7 @@ mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif,
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
- skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1113,7 +1290,7 @@ mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif,
sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_RESET_AND_SET,
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
sta_wtbl, &skb);
if (enable) {
mt7615_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
@@ -1148,7 +1325,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_dev *dev,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct {
- struct req_hdr {
+ struct {
u8 omac_idx;
u8 band_idx;
__le16 pad;
@@ -1160,7 +1337,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_dev *dev,
u8 pad;
u8 omac_addr[ETH_ALEN];
} __packed tlv;
- } data = {
+ } dev_req = {
.hdr = {
.omac_idx = mvif->omac_idx,
.band_idx = mvif->band_idx,
@@ -1171,11 +1348,65 @@ mt7615_mcu_uni_add_dev(struct mt7615_dev *dev,
.active = enable,
},
};
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_bss_basic_tlv basic;
+ } basic_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct mt7615_bss_basic_tlv)),
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .wmm_idx = mvif->wmm_idx,
+ .active = enable,
+ .bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx),
+ .sta_idx = cpu_to_le16(mvif->sta.wcid.idx),
+ .conn_state = 1,
+ },
+ };
+ int err, idx, cmd, len;
+ void *data;
- memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ break;
+ case NL80211_IFTYPE_STATION:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
- return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_DEV_INFO_UPDATE,
- &data, sizeof(data), true);
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ basic_req.basic.hw_bss_idx = idx;
+
+ memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+
+ cmd = enable ? MCU_UNI_CMD_DEV_INFO_UPDATE : MCU_UNI_CMD_BSS_INFO_UPDATE;
+ data = enable ? (void *)&dev_req : (void *)&basic_req;
+ len = enable ? sizeof(dev_req) : sizeof(basic_req);
+
+ err = __mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true);
+ if (err < 0)
+ return err;
+
+ cmd = enable ? MCU_UNI_CMD_BSS_INFO_UPDATE : MCU_UNI_CMD_DEV_INFO_UPDATE;
+ data = enable ? (void *)&basic_req : (void *)&dev_req;
+ len = enable ? sizeof(basic_req) : sizeof(dev_req);
+
+ return __mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true);
}
static int
@@ -1185,90 +1416,142 @@ mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
}
static int
-mt7615_mcu_uni_add_bss(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable)
+mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
+ struct mt7615_dev *dev = phy->dev;
struct {
- struct req_hdr {
+ struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
- struct basic_tlv {
- __le16 tag;
- __le16 len;
- u8 active;
- u8 omac_idx;
- u8 hw_bss_idx;
- u8 band_idx;
- __le32 conn_type;
- u8 conn_state;
- u8 wmm_idx;
- u8 bssid[ETH_ALEN];
- __le16 bmc_tx_wlan_idx;
- __le16 bcn_interval;
- u8 dtim_period;
- u8 phymode;
- __le16 sta_idx;
- u8 nonht_basic_phy;
- u8 pad[3];
- } __packed basic;
- } req = {
+ struct mt7615_bss_basic_tlv basic;
+ } basic_req = {
.hdr = {
.bss_idx = mvif->idx,
},
.basic = {
.tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
- .len = cpu_to_le16(sizeof(struct basic_tlv)),
+ .len = cpu_to_le16(sizeof(struct mt7615_bss_basic_tlv)),
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
.dtim_period = vif->bss_conf.dtim_period,
.omac_idx = mvif->omac_idx,
.band_idx = mvif->band_idx,
.wmm_idx = mvif->wmm_idx,
- .active = enable,
+ .active = true, /* keep bss deactivated */
+ .phymode = 0x38,
},
};
- u8 idx, tx_wlan_idx = 0;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct rlm_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 control_channel;
+ u8 center_chan;
+ u8 center_chan2;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 short_st;
+ u8 ht_op_info;
+ u8 sco;
+ u8 pad[3];
+ } __packed rlm;
+ } __packed rlm_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .rlm = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_RLM),
+ .len = cpu_to_le16(sizeof(struct rlm_tlv)),
+ .control_channel = chandef->chan->hw_value,
+ .center_chan = ieee80211_frequency_to_channel(freq1),
+ .center_chan2 = ieee80211_frequency_to_channel(freq2),
+ .tx_streams = hweight8(phy->mt76->antenna_mask),
+ .rx_streams = phy->chainmask,
+ .short_st = true,
+ },
+ };
+ int err, conn_type;
+ u8 idx;
idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
- req.basic.hw_bss_idx = idx;
+ basic_req.basic.hw_bss_idx = idx;
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
- req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
- tx_wlan_idx = mvif->sta.wcid.idx;
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GO;
+ else
+ conn_type = CONNECTION_INFRA_AP;
+ basic_req.basic.conn_type = cpu_to_le32(conn_type);
break;
case NL80211_IFTYPE_STATION:
- if (enable) {
- struct ieee80211_sta *sta;
- struct mt7615_sta *msta;
+ if (vif->p2p)
+ conn_type = CONNECTION_P2P_GC;
+ else
+ conn_type = CONNECTION_INFRA_STA;
+ basic_req.basic.conn_type = cpu_to_le32(conn_type);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic_req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
+ memcpy(basic_req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ basic_req.basic.bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx);
+ basic_req.basic.sta_idx = cpu_to_le16(mvif->sta.wcid.idx);
+ basic_req.basic.conn_state = !enable;
- msta = (struct mt7615_sta *)sta->drv_priv;
- tx_wlan_idx = msta->wcid.idx;
- rcu_read_unlock();
- }
- req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ err = __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ &basic_req, sizeof(basic_req), true);
+ if (err < 0)
+ return err;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ rlm_req.rlm.bw = CMD_CBW_40MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ rlm_req.rlm.bw = CMD_CBW_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ rlm_req.rlm.bw = CMD_CBW_8080MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ rlm_req.rlm.bw = CMD_CBW_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ rlm_req.rlm.bw = CMD_CBW_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ rlm_req.rlm.bw = CMD_CBW_10MHZ;
break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
default:
- WARN_ON(1);
+ rlm_req.rlm.bw = CMD_CBW_20MHZ;
break;
}
- memcpy(req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN);
- req.basic.bmc_tx_wlan_idx = cpu_to_le16(tx_wlan_idx);
- req.basic.sta_idx = cpu_to_le16(tx_wlan_idx);
- req.basic.conn_state = !enable;
+ if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
+ rlm_req.rlm.sco = 1; /* SCA */
+ else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
+ rlm_req.rlm.sco = 3; /* SCB */
return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
- &req, sizeof(req), true);
+ &rlm_req, sizeof(rlm_req), true);
}
static int
@@ -1355,13 +1638,14 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
struct sk_buff *skb;
int err;
- skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
if (IS_ERR(skb))
return PTR_ERR(skb);
sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb);
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
@@ -1373,7 +1657,7 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
if (err < 0)
return err;
- skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1395,7 +1679,7 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
struct sk_buff *skb;
int err;
- skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1406,13 +1690,14 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
if (err < 0 || !enable)
return err;
- skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ skb = mt7615_mcu_alloc_sta_req(dev, mvif, msta);
if (IS_ERR(skb))
return PTR_ERR(skb);
sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
- wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb);
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
@@ -1447,8 +1732,7 @@ static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
int ret = 0, cur_len;
while (len > 0) {
- cur_len = min_t(int, 4096 - sizeof(struct mt7615_mcu_txd),
- len);
+ cur_len = min_t(int, 4096 - dev->mt76.mcu_ops->headroom, len);
ret = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_SCATTER,
data, cur_len, false);
@@ -1480,11 +1764,12 @@ static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr,
&req, sizeof(req), true);
}
-static int mt7615_mcu_restart(struct mt76_dev *dev)
+int mt7615_mcu_restart(struct mt76_dev *dev)
{
return __mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL,
0, true);
}
+EXPORT_SYMBOL_GPL(mt7615_mcu_restart);
static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get)
{
@@ -1521,24 +1806,29 @@ static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
!en * MT_INFRACFG_MISC_AP2CONN_WAKE);
}
-static int mt7615_driver_own(struct mt7615_dev *dev)
+int mt7615_driver_own(struct mt7615_dev *dev)
{
+ struct mt76_dev *mdev = &dev->mt76;
u32 addr;
- addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
mt7622_trigger_hif_int(dev, true);
+
+ addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
}
+
mt7622_trigger_hif_int(dev, false);
return 0;
}
+EXPORT_SYMBOL_GPL(mt7615_driver_own);
-static int mt7615_firmware_own(struct mt7615_dev *dev)
+int mt7615_firmware_own(struct mt7615_dev *dev)
{
u32 addr;
@@ -1547,9 +1837,8 @@ static int mt7615_firmware_own(struct mt7615_dev *dev)
mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
- if (is_mt7622(&dev->mt76) &&
- !mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
- MT_CFG_LPCR_HOST_FW_OWN,
+ if (!is_mt7615(&dev->mt76) &&
+ !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
dev_err(dev->mt76.dev, "Timeout for firmware own\n");
return -EIO;
@@ -1558,6 +1847,7 @@ static int mt7615_firmware_own(struct mt7615_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(mt7615_firmware_own);
static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
{
@@ -1576,7 +1866,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
return -EAGAIN;
}
- ret = request_firmware(&fw, name, dev->mt76.dev);
+ ret = firmware_request_nowarn(&fw, name, dev->mt76.dev);
if (ret)
goto out;
@@ -1671,6 +1961,15 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
return 0;
}
+static const struct wiphy_wowlan_support mt7615_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_NET_DETECT,
+ .n_patterns = 1,
+ .pattern_min_len = 1,
+ .pattern_max_len = MT7615_WOW_PATTEN_MAX_LEN,
+ .max_nd_match_sets = 10,
+};
+
static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
{
const struct mt7615_fw_trailer *hdr;
@@ -1848,7 +2147,7 @@ int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl)
static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
{
- u32 offset = 0, override_addr = 0, flag = 0;
+ u32 offset = 0, override_addr = 0, flag = FW_START_DLYCAL;
const struct mt7663_fw_trailer *hdr;
const struct mt7663_fw_buf *buf;
const struct firmware *fw;
@@ -1904,18 +2203,21 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
}
}
- if (is_mt7663(&dev->mt76)) {
- flag |= FW_START_DLYCAL;
- if (override_addr)
- flag |= FW_START_OVERRIDE;
+ if (override_addr)
+ flag |= FW_START_OVERRIDE;
- dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n",
- override_addr, flag);
- }
+ dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n",
+ override_addr, flag);
ret = mt7615_mcu_start_firmware(dev, override_addr, flag);
- if (ret)
+ if (ret) {
dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
out:
release_firmware(fw);
@@ -1923,11 +2225,50 @@ out:
return ret;
}
-static int mt7663_load_firmware(struct mt7615_dev *dev)
+static int
+mt7663_load_rom_patch(struct mt7615_dev *dev, const char **n9_firmware)
{
+ const char *selected_rom, *secondary_rom = MT7663_ROM_PATCH;
+ const char *primary_rom = MT7663_OFFLOAD_ROM_PATCH;
int ret;
- mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+ if (!prefer_offload_fw) {
+ secondary_rom = MT7663_OFFLOAD_ROM_PATCH;
+ primary_rom = MT7663_ROM_PATCH;
+ }
+ selected_rom = primary_rom;
+
+ ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, primary_rom);
+ if (ret) {
+ dev_info(dev->mt76.dev, "%s not found, switching to %s",
+ primary_rom, secondary_rom);
+ ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS,
+ secondary_rom);
+ if (ret) {
+ dev_err(dev->mt76.dev, "failed to load %s",
+ secondary_rom);
+ return ret;
+ }
+ selected_rom = secondary_rom;
+ }
+
+ if (!strcmp(selected_rom, MT7663_OFFLOAD_ROM_PATCH)) {
+ *n9_firmware = MT7663_OFFLOAD_FIRMWARE_N9;
+ dev->fw_ver = MT7615_FIRMWARE_V3;
+ dev->mcu_ops = &uni_update_ops;
+ } else {
+ *n9_firmware = MT7663_FIRMWARE_N9;
+ dev->fw_ver = MT7615_FIRMWARE_V2;
+ dev->mcu_ops = &sta_update_ops;
+ }
+
+ return 0;
+}
+
+int __mt7663_load_firmware(struct mt7615_dev *dev)
+{
+ const char *n9_firmware;
+ int ret;
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
if (ret) {
@@ -1935,14 +2276,11 @@ static int mt7663_load_firmware(struct mt7615_dev *dev)
return -EIO;
}
- ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, MT7663_ROM_PATCH);
+ ret = mt7663_load_rom_patch(dev, &n9_firmware);
if (ret)
return ret;
- dev->fw_ver = MT7615_FIRMWARE_V3;
- dev->mcu_ops = &uni_update_ops;
-
- ret = mt7663_load_n9(dev, MT7663_FIRMWARE_N9);
+ ret = mt7663_load_n9(dev, n9_firmware);
if (ret)
return ret;
@@ -1954,16 +2292,36 @@ static int mt7663_load_firmware(struct mt7615_dev *dev)
return -EIO;
}
- mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+#ifdef CONFIG_PM
+ if (mt7615_firmware_offload(dev))
+ dev->mt76.hw->wiphy->wowlan = &mt7615_wowlan_support;
+#endif /* CONFIG_PM */
dev_dbg(dev->mt76.dev, "Firmware init done\n");
return 0;
}
+EXPORT_SYMBOL_GPL(__mt7663_load_firmware);
+
+static int mt7663_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ ret = __mt7663_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ return 0;
+}
int mt7615_mcu_init(struct mt7615_dev *dev)
{
static const struct mt76_mcu_ops mt7615_mcu_ops = {
+ .headroom = sizeof(struct mt7615_mcu_txd),
.mcu_skb_send_msg = mt7615_mcu_send_message,
.mcu_send_msg = mt7615_mcu_msg_send,
.mcu_restart = mt7615_mcu_restart,
@@ -1997,6 +2355,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(mt7615_mcu_init);
void mt7615_mcu_exit(struct mt7615_dev *dev)
{
@@ -2004,6 +2363,7 @@ void mt7615_mcu_exit(struct mt7615_dev *dev)
mt7615_firmware_own(dev);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
+EXPORT_SYMBOL_GPL(mt7615_mcu_exit);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
{
@@ -2036,7 +2396,7 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
req_hdr.len = cpu_to_le16(eep_len);
- skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + eep_len);
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + eep_len);
if (!skb)
return -ENOMEM;
@@ -2046,6 +2406,7 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_EFUSE_BUFFER_MODE, true);
}
+EXPORT_SYMBOL_GPL(mt7615_mcu_set_eeprom);
int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable)
{
@@ -2187,6 +2548,7 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
&req, sizeof(req), true);
}
+EXPORT_SYMBOL_GPL(mt7615_mcu_del_wtbl_all);
int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
enum mt7615_rdd_cmd cmd, u8 index,
@@ -2313,6 +2675,25 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
}
}
+static u8 mt7615_mcu_chan_bw(struct cfg80211_chan_def *chandef)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ };
+
+ if (chandef->width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[chandef->width];
+}
+
int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
{
struct mt7615_dev *dev = phy->dev;
@@ -2353,32 +2734,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
req.switch_reason = CH_SWITCH_NORMAL;
req.band_idx = phy != &dev->phy;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_40:
- req.bw = CMD_CBW_40MHZ;
- break;
- case NL80211_CHAN_WIDTH_80:
- req.bw = CMD_CBW_80MHZ;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- req.bw = CMD_CBW_8080MHZ;
- break;
- case NL80211_CHAN_WIDTH_160:
- req.bw = CMD_CBW_160MHZ;
- break;
- case NL80211_CHAN_WIDTH_5:
- req.bw = CMD_CBW_5MHZ;
- break;
- case NL80211_CHAN_WIDTH_10:
- req.bw = CMD_CBW_10MHZ;
- break;
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- default:
- req.bw = CMD_CBW_20MHZ;
- break;
- }
+ req.bw = mt7615_mcu_chan_bw(chandef);
mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
@@ -2415,3 +2771,906 @@ int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req,
sizeof(req), true);
}
+
+int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ u8 bss_idx;
+ u8 ps_state; /* 0: device awake
+ * 1: static power save
+ * 2: dynamic power saving
+ */
+ } req = {
+ .bss_idx = mvif->idx,
+ .ps_state = vif->bss_conf.ps ? 2 : 0,
+ };
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return -ENOTSUPP;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_PS_PROFILE,
+ &req, sizeof(req), false);
+}
+
+int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct mt7615_dev *dev = phy->dev;
+ struct mt7615_mcu_channel_domain {
+ __le32 country_code; /* regulatory_request.alpha2 */
+ u8 bw_2g; /* BW_20_40M 0
+ * BW_20M 1
+ * BW_20_40_80M 2
+ * BW_20_40_80_160M 3
+ * BW_20_40_80_8080M 4
+ */
+ u8 bw_5g;
+ __le16 pad;
+ u8 n_2ch;
+ u8 n_5ch;
+ __le16 pad2;
+ } __packed hdr = {
+ .bw_2g = 0,
+ .bw_5g = 3,
+ .n_2ch = mphy->sband_2g.sband.n_channels,
+ .n_5ch = mphy->sband_5g.sband.n_channels,
+ };
+ struct mt7615_mcu_chan {
+ __le16 hw_value;
+ __le16 pad;
+ __le32 flags;
+ } __packed;
+ int i, n_channels = hdr.n_2ch + hdr.n_5ch;
+ int len = sizeof(hdr) + n_channels * sizeof(struct mt7615_mcu_chan);
+ struct sk_buff *skb;
+
+ if (!mt7615_firmware_offload(dev))
+ return 0;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ for (i = 0; i < n_channels; i++) {
+ struct ieee80211_channel *chan;
+ struct mt7615_mcu_chan channel;
+
+ if (i < hdr.n_2ch)
+ chan = &mphy->sband_2g.sband.channels[i];
+ else
+ chan = &mphy->sband_5g.sband.channels[i - hdr.n_2ch];
+
+ channel.hw_value = cpu_to_le16(chan->hw_value);
+ channel.flags = cpu_to_le32(chan->flags);
+ channel.pad = 0;
+
+ skb_put_data(skb, &channel, sizeof(channel));
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_CMD_SET_CHAN_DOMAIN, false);
+}
+
+#define MT7615_SCAN_CHANNEL_TIME 60
+int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *sreq = &scan_req->req;
+ int n_ssids = 0, err, i, duration = MT7615_SCAN_CHANNEL_TIME;
+ int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
+ struct ieee80211_channel **scan_list = sreq->channels;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ struct mt7615_mcu_scan_channel *chan;
+ struct mt7615_hw_scan_req *req;
+ struct sk_buff *skb;
+
+ /* fall-back to sw-scan */
+ if (!mt7615_firmware_offload(dev))
+ return 1;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(*req));
+ if (!skb)
+ return -ENOMEM;
+
+ set_bit(MT76_HW_SCANNING, &phy->mt76->state);
+ mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+
+ req = (struct mt7615_hw_scan_req *)skb_put(skb, sizeof(*req));
+
+ req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+ req->bss_idx = mvif->idx;
+ req->scan_type = sreq->n_ssids ? 1 : 0;
+ req->probe_req_num = sreq->n_ssids ? 2 : 0;
+ req->version = 1;
+
+ for (i = 0; i < sreq->n_ssids; i++) {
+ if (!sreq->ssids[i].ssid_len)
+ continue;
+
+ req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
+ memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid,
+ sreq->ssids[i].ssid_len);
+ n_ssids++;
+ }
+ req->ssid_type = n_ssids ? BIT(2) : BIT(0);
+ req->ssid_type_ext = n_ssids ? BIT(0) : 0;
+ req->ssids_num = n_ssids;
+
+ /* increase channel time for passive scan */
+ if (!sreq->n_ssids)
+ duration *= 2;
+ req->timeout_value = cpu_to_le16(sreq->n_channels * duration);
+ req->channel_min_dwell_time = cpu_to_le16(duration);
+ req->channel_dwell_time = cpu_to_le16(duration);
+
+ req->channels_num = min_t(u8, sreq->n_channels, 32);
+ req->ext_channels_num = min_t(u8, ext_channels_num, 32);
+ for (i = 0; i < req->channels_num + req->ext_channels_num; i++) {
+ if (i >= 32)
+ chan = &req->ext_channels[i - 32];
+ else
+ chan = &req->channels[i];
+
+ chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
+ chan->channel_num = scan_list[i]->hw_value;
+ }
+ req->channel_type = sreq->n_channels ? 4 : 0;
+
+ if (sreq->ie_len > 0) {
+ memcpy(req->ies, sreq->ie, sreq->ie_len);
+ req->ies_len = cpu_to_le16(sreq->ie_len);
+ }
+
+ memcpy(req->bssid, sreq->bssid, ETH_ALEN);
+ if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ get_random_mask_addr(req->random_mac, sreq->mac_addr,
+ sreq->mac_addr_mask);
+ req->scan_func = 1;
+ }
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_START_HW_SCAN,
+ false);
+ if (err < 0)
+ clear_bit(MT76_HW_SCANNING, &phy->mt76->state);
+
+ return err;
+}
+
+int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ u8 seq_num;
+ u8 is_ext_channel;
+ u8 rsv[2];
+ } __packed req = {
+ .seq_num = mvif->scan_seq_num,
+ };
+
+ if (test_and_clear_bit(MT76_HW_SCANNING, &phy->mt76->state)) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(phy->mt76->hw, &info);
+ }
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_CANCEL_HW_SCAN, &req,
+ sizeof(req), false);
+}
+
+int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *sreq)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct ieee80211_channel **scan_list = sreq->channels;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ struct mt7615_mcu_scan_channel *chan;
+ struct mt7615_sched_scan_req *req;
+ struct cfg80211_match_set *match;
+ struct cfg80211_ssid *ssid;
+ struct sk_buff *skb;
+ int i;
+
+ if (!mt7615_firmware_offload(dev))
+ return -ENOTSUPP;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(*req) + sreq->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+
+ req = (struct mt7615_sched_scan_req *)skb_put(skb, sizeof(*req));
+ req->version = 1;
+ req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+
+ if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ get_random_mask_addr(req->random_mac, sreq->mac_addr,
+ sreq->mac_addr_mask);
+ req->scan_func = 1;
+ }
+
+ req->ssids_num = sreq->n_ssids;
+ for (i = 0; i < req->ssids_num; i++) {
+ ssid = &sreq->ssids[i];
+ memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len);
+ req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len);
+ }
+
+ req->match_num = sreq->n_match_sets;
+ for (i = 0; i < req->match_num; i++) {
+ match = &sreq->match_sets[i];
+ memcpy(req->match[i].ssid, match->ssid.ssid,
+ match->ssid.ssid_len);
+ req->match[i].rssi_th = cpu_to_le32(match->rssi_thold);
+ req->match[i].ssid_len = match->ssid.ssid_len;
+ }
+
+ req->channel_type = sreq->n_channels ? 4 : 0;
+ req->channels_num = min_t(u8, sreq->n_channels, 64);
+ for (i = 0; i < req->channels_num; i++) {
+ chan = &req->channels[i];
+ chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
+ chan->channel_num = scan_list[i]->hw_value;
+ }
+
+ req->intervals_num = sreq->n_scan_plans;
+ for (i = 0; i < req->intervals_num; i++)
+ req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval);
+
+ if (sreq->ie_len > 0) {
+ req->ie_len = cpu_to_le16(sreq->ie_len);
+ memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len);
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_CMD_SCHED_SCAN_REQ, false);
+}
+
+int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ u8 active; /* 0: enabled 1: disabled */
+ u8 rsv[3];
+ } __packed req = {
+ .active = !enable,
+ };
+
+ if (!mt7615_firmware_offload(dev))
+ return -ENOTSUPP;
+
+ if (enable)
+ set_bit(MT76_HW_SCHED_SCANNING, &phy->mt76->state);
+ else
+ clear_bit(MT76_HW_SCHED_SCANNING, &phy->mt76->state);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SCHED_SCAN_ENABLE,
+ &req, sizeof(req), false);
+}
+
+static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
+{
+ int i;
+
+ for (i = 0; i < n_freqs; i++)
+ if (cur == freqs[i])
+ return i;
+
+ return -1;
+}
+
+static int mt7615_dcoc_freq_idx(u16 freq, u8 bw)
+{
+ static const u16 freq_list[] = {
+ 4980, 5805, 5905, 5190,
+ 5230, 5270, 5310, 5350,
+ 5390, 5430, 5470, 5510,
+ 5550, 5590, 5630, 5670,
+ 5710, 5755, 5795, 5835,
+ 5875, 5210, 5290, 5370,
+ 5450, 5530, 5610, 5690,
+ 5775, 5855
+ };
+ static const u16 freq_bw40[] = {
+ 5190, 5230, 5270, 5310,
+ 5350, 5390, 5430, 5470,
+ 5510, 5550, 5590, 5630,
+ 5670, 5710, 5755, 5795,
+ 5835, 5875
+ };
+ int offset_2g = ARRAY_SIZE(freq_list);
+ int idx;
+
+ if (freq < 4000) {
+ if (freq < 2427)
+ return offset_2g;
+ if (freq < 2442)
+ return offset_2g + 1;
+ if (freq < 2457)
+ return offset_2g + 2;
+
+ return offset_2g + 3;
+ }
+
+ switch (bw) {
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ break;
+ default:
+ idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
+ freq + 10);
+ if (idx >= 0) {
+ freq = freq_bw40[idx];
+ break;
+ }
+
+ idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
+ freq - 10);
+ if (idx >= 0) {
+ freq = freq_bw40[idx];
+ break;
+ }
+ /* fall through */
+ case NL80211_CHAN_WIDTH_40:
+ idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
+ freq);
+ if (idx >= 0)
+ break;
+
+ return -1;
+
+ }
+
+ return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
+}
+
+int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq2 = chandef->center_freq2;
+ int ret;
+ struct {
+ u8 direction;
+ u8 runtime_calibration;
+ u8 _rsv[2];
+
+ __le16 center_freq;
+ u8 bw;
+ u8 band;
+ u8 is_freq2;
+ u8 success;
+ u8 dbdc_en;
+
+ u8 _rsv2;
+
+ struct {
+ __le32 sx0_i_lna[4];
+ __le32 sx0_q_lna[4];
+
+ __le32 sx2_i_lna[4];
+ __le32 sx2_q_lna[4];
+ } dcoc_data[4];
+ } req = {
+ .direction = 1,
+
+ .bw = mt7615_mcu_chan_bw(chandef),
+ .band = chandef->center_freq1 > 4000,
+ .dbdc_en = !!dev->mt76.phy2,
+ };
+ u16 center_freq = chandef->center_freq1;
+ int freq_idx;
+ u8 *eep = dev->mt76.eeprom.data;
+
+ if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_RX_CAL))
+ return 0;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_160) {
+ freq2 = center_freq + 40;
+ center_freq -= 40;
+ }
+
+again:
+ req.runtime_calibration = 1;
+ freq_idx = mt7615_dcoc_freq_idx(center_freq, chandef->width);
+ if (freq_idx < 0)
+ goto out;
+
+ memcpy(req.dcoc_data, eep + MT7615_EEPROM_DCOC_OFFSET +
+ freq_idx * MT7615_EEPROM_DCOC_SIZE,
+ sizeof(req.dcoc_data));
+ req.runtime_calibration = 0;
+
+out:
+ req.center_freq = cpu_to_le16(center_freq);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RXDCOC_CAL, &req,
+ sizeof(req), true);
+
+ if ((chandef->width == NL80211_CHAN_WIDTH_80P80 ||
+ chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) {
+ req.is_freq2 = true;
+ center_freq = freq2;
+ goto again;
+ }
+
+ return ret;
+}
+
+static int mt7615_dpd_freq_idx(u16 freq, u8 bw)
+{
+ static const u16 freq_list[] = {
+ 4920, 4940, 4960, 4980,
+ 5040, 5060, 5080, 5180,
+ 5200, 5220, 5240, 5260,
+ 5280, 5300, 5320, 5340,
+ 5360, 5380, 5400, 5420,
+ 5440, 5460, 5480, 5500,
+ 5520, 5540, 5560, 5580,
+ 5600, 5620, 5640, 5660,
+ 5680, 5700, 5720, 5745,
+ 5765, 5785, 5805, 5825,
+ 5845, 5865, 5885, 5905
+ };
+ int offset_2g = ARRAY_SIZE(freq_list);
+ int idx;
+
+ if (freq < 4000) {
+ if (freq < 2432)
+ return offset_2g;
+ if (freq < 2457)
+ return offset_2g + 1;
+
+ return offset_2g + 2;
+ }
+
+ if (bw != NL80211_CHAN_WIDTH_20) {
+ idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
+ freq + 10);
+ if (idx >= 0)
+ return idx;
+
+ idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
+ freq - 10);
+ if (idx >= 0)
+ return idx;
+ }
+
+ return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
+}
+
+
+int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq2 = chandef->center_freq2;
+ int ret;
+ struct {
+ u8 direction;
+ u8 runtime_calibration;
+ u8 _rsv[2];
+
+ __le16 center_freq;
+ u8 bw;
+ u8 band;
+ u8 is_freq2;
+ u8 success;
+ u8 dbdc_en;
+
+ u8 _rsv2;
+
+ struct {
+ struct {
+ u32 dpd_g0;
+ u8 data[32];
+ } wf0, wf1;
+
+ struct {
+ u32 dpd_g0_prim;
+ u32 dpd_g0_sec;
+ u8 data_prim[32];
+ u8 data_sec[32];
+ } wf2, wf3;
+ } dpd_data;
+ } req = {
+ .direction = 1,
+
+ .bw = mt7615_mcu_chan_bw(chandef),
+ .band = chandef->center_freq1 > 4000,
+ .dbdc_en = !!dev->mt76.phy2,
+ };
+ u16 center_freq = chandef->center_freq1;
+ int freq_idx;
+ u8 *eep = dev->mt76.eeprom.data;
+
+ if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_TX_DPD))
+ return 0;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_160) {
+ freq2 = center_freq + 40;
+ center_freq -= 40;
+ }
+
+again:
+ req.runtime_calibration = 1;
+ freq_idx = mt7615_dpd_freq_idx(center_freq, chandef->width);
+ if (freq_idx < 0)
+ goto out;
+
+ memcpy(&req.dpd_data, eep + MT7615_EEPROM_TXDPD_OFFSET +
+ freq_idx * MT7615_EEPROM_TXDPD_SIZE,
+ sizeof(req.dpd_data));
+ req.runtime_calibration = 0;
+
+out:
+ req.center_freq = cpu_to_le16(center_freq);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXDPD_CAL, &req,
+ sizeof(req), true);
+
+ if ((chandef->width == NL80211_CHAN_WIDTH_80P80 ||
+ chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) {
+ req.is_freq2 = true;
+ center_freq = freq2;
+ goto again;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend)
+{
+ struct {
+ struct {
+ u8 hif_type; /* 0x0: HIF_SDIO
+ * 0x1: HIF_USB
+ * 0x2: HIF_PCIE
+ */
+ u8 pad[3];
+ } __packed hdr;
+ struct hif_suspend_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 suspend;
+ } __packed hif_suspend;
+ } req = {
+ .hif_suspend = {
+ .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
+ .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
+ .suspend = suspend,
+ },
+ };
+
+ if (mt76_is_mmio(&dev->mt76))
+ req.hdr.hif_type = 2;
+ else if (mt76_is_usb(&dev->mt76))
+ req.hdr.hif_type = 1;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL,
+ &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend);
+
+static int
+mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ u8 bss_idx;
+ u8 dtim_period;
+ __le16 aid;
+ __le16 bcn_interval;
+ __le16 atim_window;
+ u8 uapsd;
+ u8 bmc_delivered_ac;
+ u8 bmc_triggered_ac;
+ u8 pad;
+ } req = {
+ .bss_idx = mvif->idx,
+ .aid = cpu_to_le16(vif->bss_conf.aid),
+ .dtim_period = vif->bss_conf.dtim_period,
+ .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ };
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } req_hdr = {
+ .bss_idx = mvif->idx,
+ };
+ int err;
+
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !mt7615_firmware_offload(dev))
+ return -ENOTSUPP;
+
+ err = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT,
+ &req_hdr, sizeof(req_hdr), false);
+ if (err < 0 || !enable)
+ return err;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED,
+ &req, sizeof(req), false);
+}
+
+static int
+mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ bool suspend, struct cfg80211_wowlan *wowlan)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_wow_ctrl_tlv wow_ctrl_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .wow_ctrl_tlv = {
+ .tag = cpu_to_le16(UNI_SUSPEND_WOW_CTRL),
+ .len = cpu_to_le16(sizeof(struct mt7615_wow_ctrl_tlv)),
+ .cmd = suspend ? 1 : 2,
+ },
+ };
+
+ if (wowlan->magic_pkt)
+ req.wow_ctrl_tlv.trigger |= BIT(0);
+ if (wowlan->disconnect)
+ req.wow_ctrl_tlv.trigger |= BIT(2);
+ if (wowlan->nd_config) {
+ mt7615_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
+ req.wow_ctrl_tlv.trigger |= BIT(5);
+ mt7615_mcu_sched_scan_enable(phy, vif, suspend);
+ }
+
+ if (mt76_is_mmio(&dev->mt76))
+ req.wow_ctrl_tlv.wakeup_hif = 2;
+ else if (mt76_is_usb(&dev->mt76))
+ req.wow_ctrl_tlv.wakeup_hif = 1;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_set_wow_pattern(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ u8 index, bool enable,
+ struct cfg80211_pkt_pattern *pattern)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_wow_pattern_tlv *ptlv;
+ struct sk_buff *skb;
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr = {
+ .bss_idx = mvif->idx,
+ };
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(hdr) + sizeof(*ptlv));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+ ptlv = (struct mt7615_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
+ ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
+ ptlv->len = cpu_to_le16(sizeof(*ptlv));
+ ptlv->data_len = pattern->pattern_len;
+ ptlv->enable = enable;
+ ptlv->index = index;
+
+ memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
+ memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_SUSPEND, true);
+}
+
+static int
+mt7615_mcu_set_suspend_mode(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ bool enable, u8 mdtim, bool wow_suspend)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_suspend_tlv suspend_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .suspend_tlv = {
+ .tag = cpu_to_le16(UNI_SUSPEND_MODE_SETTING),
+ .len = cpu_to_le16(sizeof(struct mt7615_suspend_tlv)),
+ .enable = enable,
+ .mdtim = mdtim,
+ .wow_suspend = wow_suspend,
+ },
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_set_gtk_rekey(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ bool suspend)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt7615_gtk_rekey_tlv gtk_tlv;
+ } __packed req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .gtk_tlv = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY),
+ .len = cpu_to_le16(sizeof(struct mt7615_gtk_rekey_tlv)),
+ .rekey_mode = !suspend,
+ },
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD,
+ &req, sizeof(req), true);
+}
+
+void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_phy *phy = priv;
+ bool suspend = test_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
+ struct ieee80211_hw *hw = phy->mt76->hw;
+ struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
+ int i;
+
+ mt7615_mcu_set_bss_pm(phy->dev, vif, suspend);
+
+ mt7615_mcu_set_gtk_rekey(phy->dev, vif, suspend);
+
+ mt7615_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true);
+
+ for (i = 0; i < wowlan->n_patterns; i++)
+ mt7615_mcu_set_wow_pattern(phy->dev, vif, i, suspend,
+ &wowlan->patterns[i]);
+ mt7615_mcu_set_wow_ctrl(phy, vif, suspend, wowlan);
+}
+
+static void
+mt7615_mcu_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct mt7615_gtk_rekey_tlv *gtk_tlv = data;
+ u32 cipher;
+
+ if (key->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+ key->cipher != WLAN_CIPHER_SUITE_CCMP &&
+ key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1);
+ cipher = BIT(3);
+ } else {
+ gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2);
+ cipher = BIT(4);
+ }
+
+ /* we are assuming here to have a single pairwise key */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ gtk_tlv->pairwise_cipher = cpu_to_le32(cipher);
+ gtk_tlv->group_cipher = cpu_to_le32(cipher);
+ gtk_tlv->keyid = key->keyidx;
+ }
+}
+
+int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *key)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_gtk_rekey_tlv *gtk_tlv;
+ struct sk_buff *skb;
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr = {
+ .bss_idx = mvif->idx,
+ };
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(hdr) + sizeof(*gtk_tlv));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+ gtk_tlv = (struct mt7615_gtk_rekey_tlv *)skb_put(skb,
+ sizeof(*gtk_tlv));
+ gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
+ gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
+ gtk_tlv->rekey_mode = 2;
+ gtk_tlv->option = 1;
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(hw, vif, mt7615_mcu_key_iter, gtk_tlv);
+ rcu_read_unlock();
+
+ memcpy(gtk_tlv->kek, key->kek, NL80211_KEK_LEN);
+ memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN);
+ memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_OFFLOAD, true);
+}
+#endif /* CONFIG_PM */
+
+int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = phy->dev;
+ struct mt7615_roc_tlv req = {
+ .bss_idx = mvif->idx,
+ .active = !chan,
+ .max_interval = cpu_to_le32(duration),
+ .primary_chan = chan ? chan->hw_value : 0,
+ .band = chan ? chan->band : 0,
+ .req_type = 2,
+ };
+
+ phy->roc_grant = false;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req,
+ sizeof(req), false);
+}
+
+int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ int ct_window = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct {
+ __le32 ct_win;
+ u8 bss_idx;
+ u8 rsv[3];
+ } __packed req = {
+ .ct_win = cpu_to_le32(ct_window),
+ .bss_idx = mvif->idx,
+ };
+
+ if (!mt7615_firmware_offload(dev))
+ return -ENOTSUPP;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS,
+ &req, sizeof(req), false);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index d1f7391472fc..2314d0b23af1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -81,7 +81,12 @@ enum {
MCU_EVENT_GENERIC = 0x01,
MCU_EVENT_ACCESS_REG = 0x02,
MCU_EVENT_MT_PATCH_SEM = 0x04,
+ MCU_EVENT_SCAN_DONE = 0x0d,
+ MCU_EVENT_ROC = 0x10,
+ MCU_EVENT_BSS_ABSENCE = 0x11,
+ MCU_EVENT_BSS_BEACON_LOSS = 0x13,
MCU_EVENT_CH_PRIVILEGE = 0x18,
+ MCU_EVENT_SCHED_SCAN_DONE = 0x23,
MCU_EVENT_EXT = 0xed,
MCU_EVENT_RESTART_DL = 0xef,
};
@@ -232,7 +237,9 @@ enum {
#define MCU_FW_PREFIX BIT(31)
#define MCU_UNI_PREFIX BIT(30)
-#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX)
+#define MCU_CE_PREFIX BIT(29)
+#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \
+ MCU_CE_PREFIX)
enum {
MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
@@ -265,6 +272,8 @@ enum {
MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+ MCU_EXT_CMD_RXDCOC_CAL = 0x59,
+ MCU_EXT_CMD_TXDPD_CAL = 0x60,
MCU_EXT_CMD_SET_RDD_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
};
@@ -273,6 +282,281 @@ enum {
MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01,
MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02,
MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03,
+ MCU_UNI_CMD_SUSPEND = MCU_UNI_PREFIX | 0x05,
+ MCU_UNI_CMD_OFFLOAD = MCU_UNI_PREFIX | 0x06,
+ MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07,
+};
+
+struct mt7615_mcu_uni_event {
+ u8 cid;
+ u8 pad[3];
+ __le32 status; /* 0: success, others: fail */
+} __packed;
+
+struct mt7615_beacon_loss_event {
+ u8 bss_idx;
+ u8 reason;
+ u8 pad[2];
+} __packed;
+
+struct mt7615_mcu_scan_ssid {
+ __le32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed;
+
+struct mt7615_mcu_scan_channel {
+ u8 band; /* 1: 2.4GHz
+ * 2: 5.0GHz
+ * Others: Reserved
+ */
+ u8 channel_num;
+} __packed;
+
+struct mt7615_mcu_scan_match {
+ __le32 rssi_th;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 rsv[3];
+} __packed;
+
+struct mt7615_hw_scan_req {
+ u8 seq_num;
+ u8 bss_idx;
+ u8 scan_type; /* 0: PASSIVE SCAN
+ * 1: ACTIVE SCAN
+ */
+ u8 ssid_type; /* BIT(0) wildcard SSID
+ * BIT(1) P2P wildcard SSID
+ * BIT(2) specified SSID + wildcard SSID
+ * BIT(2) + ssid_type_ext BIT(0) specified SSID only
+ */
+ u8 ssids_num;
+ u8 probe_req_num; /* Number of probe request for each SSID */
+ u8 scan_func; /* BIT(0) Enable random MAC scan
+ * BIT(1) Disable DBDC scan type 1~3.
+ * BIT(2) Use DBDC scan type 3 (dedicated one RF to scan).
+ */
+ u8 version; /* 0: Not support fields after ies.
+ * 1: Support fields after ies.
+ */
+ struct mt7615_mcu_scan_ssid ssids[4];
+ __le16 probe_delay_time;
+ __le16 channel_dwell_time; /* channel Dwell interval */
+ __le16 timeout_value;
+ u8 channel_type; /* 0: Full channels
+ * 1: Only 2.4GHz channels
+ * 2: Only 5GHz channels
+ * 3: P2P social channel only (channel #1, #6 and #11)
+ * 4: Specified channels
+ * Others: Reserved
+ */
+ u8 channels_num; /* valid when channel_type is 4 */
+ /* valid when channels_num is set */
+ struct mt7615_mcu_scan_channel channels[32];
+ __le16 ies_len;
+ u8 ies[MT7615_SCAN_IE_LEN];
+ /* following fields are valid if version > 0 */
+ u8 ext_channels_num;
+ u8 ext_ssids_num;
+ __le16 channel_min_dwell_time;
+ struct mt7615_mcu_scan_channel ext_channels[32];
+ struct mt7615_mcu_scan_ssid ext_ssids[6];
+ u8 bssid[ETH_ALEN];
+ u8 random_mac[ETH_ALEN]; /* valid when BIT(1) in scan_func is set. */
+ u8 pad[63];
+ u8 ssid_type_ext;
+} __packed;
+
+#define SCAN_DONE_EVENT_MAX_CHANNEL_NUM 64
+struct mt7615_hw_scan_done {
+ u8 seq_num;
+ u8 sparse_channel_num;
+ struct mt7615_mcu_scan_channel sparse_channel;
+ u8 complete_channel_num;
+ u8 current_state;
+ u8 version;
+ u8 pad;
+ __le32 beacon_scan_num;
+ u8 pno_enabled;
+ u8 pad2[3];
+ u8 sparse_channel_valid_num;
+ u8 pad3[3];
+ u8 channel_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ /* idle format for channel_idle_time
+ * 0: first bytes: idle time(ms) 2nd byte: dwell time(ms)
+ * 1: first bytes: idle time(8ms) 2nd byte: dwell time(8ms)
+ * 2: dwell time (16us)
+ */
+ __le16 channel_idle_time[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ /* beacon and probe response count */
+ u8 beacon_probe_num[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ u8 mdrdy_count[SCAN_DONE_EVENT_MAX_CHANNEL_NUM];
+ __le32 beacon_2g_num;
+ __le32 beacon_5g_num;
+} __packed;
+
+struct mt7615_sched_scan_req {
+ u8 version;
+ u8 seq_num;
+ u8 stop_on_match;
+ u8 ssids_num;
+ u8 match_num;
+ u8 pad;
+ __le16 ie_len;
+ struct mt7615_mcu_scan_ssid ssids[MT7615_MAX_SCHED_SCAN_SSID];
+ struct mt7615_mcu_scan_match match[MT7615_MAX_SCAN_MATCH];
+ u8 channel_type;
+ u8 channels_num;
+ u8 intervals_num;
+ u8 scan_func; /* BIT(0) eable random mac address */
+ struct mt7615_mcu_scan_channel channels[64];
+ __le16 intervals[MT7615_MAX_SCHED_SCAN_INTERVAL];
+ u8 random_mac[ETH_ALEN]; /* valid when BIT(0) in scan_func is set */
+ u8 pad2[58];
+} __packed;
+
+struct nt7615_sched_scan_done {
+ u8 seq_num;
+ u8 status; /* 0: ssid found */
+ __le16 pad;
+} __packed;
+
+struct mt7615_mcu_bss_event {
+ u8 bss_idx;
+ u8 is_absent;
+ u8 free_quota;
+ u8 pad;
+} __packed;
+
+struct mt7615_bss_basic_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 omac_idx;
+ u8 hw_bss_idx;
+ u8 band_idx;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 wmm_idx;
+ u8 bssid[ETH_ALEN];
+ __le16 bmc_tx_wlan_idx;
+ __le16 bcn_interval;
+ u8 dtim_period;
+ u8 phymode; /* bit(0): A
+ * bit(1): B
+ * bit(2): G
+ * bit(3): GN
+ * bit(4): AN
+ * bit(5): AC
+ */
+ __le16 sta_idx;
+ u8 nonht_basic_phy;
+ u8 pad[3];
+} __packed;
+
+struct mt7615_wow_ctrl_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 cmd; /* 0x1: PM_WOWLAN_REQ_START
+ * 0x2: PM_WOWLAN_REQ_STOP
+ * 0x3: PM_WOWLAN_PARAM_CLEAR
+ */
+ u8 trigger; /* 0: NONE
+ * BIT(0): NL80211_WOWLAN_TRIG_MAGIC_PKT
+ * BIT(1): NL80211_WOWLAN_TRIG_ANY
+ * BIT(2): NL80211_WOWLAN_TRIG_DISCONNECT
+ * BIT(3): NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE
+ * BIT(4): BEACON_LOST
+ * BIT(5): NL80211_WOWLAN_TRIG_NET_DETECT
+ */
+ u8 wakeup_hif; /* 0x0: HIF_SDIO
+ * 0x1: HIF_USB
+ * 0x2: HIF_PCIE
+ * 0x3: HIF_GPIO
+ */
+ u8 pad;
+ u8 rsv[4];
+} __packed;
+
+#define MT7615_WOW_MASK_MAX_LEN 16
+#define MT7615_WOW_PATTEN_MAX_LEN 128
+struct mt7615_wow_pattern_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 index; /* pattern index */
+ u8 enable; /* 0: disable
+ * 1: enable
+ */
+ u8 data_len; /* pattern length */
+ u8 pad;
+ u8 mask[MT7615_WOW_MASK_MAX_LEN];
+ u8 pattern[MT7615_WOW_PATTEN_MAX_LEN];
+ u8 rsv[4];
+} __packed;
+
+struct mt7615_suspend_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 enable; /* 0: suspend mode disabled
+ * 1: suspend mode enabled
+ */
+ u8 mdtim; /* LP parameter */
+ u8 wow_suspend; /* 0: update by origin policy
+ * 1: update by wow dtim
+ */
+ u8 pad[5];
+} __packed;
+
+struct mt7615_gtk_rekey_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 kek[NL80211_KEK_LEN];
+ u8 kck[NL80211_KCK_LEN];
+ u8 replay_ctr[NL80211_REPLAY_CTR_LEN];
+ u8 rekey_mode; /* 0: rekey offload enable
+ * 1: rekey offload disable
+ * 2: rekey update
+ */
+ u8 keyid;
+ u8 pad[2];
+ __le32 proto; /* WPA-RSN-WAPI-OPSN */
+ __le32 pairwise_cipher;
+ __le32 group_cipher;
+ __le32 key_mgmt; /* NONE-PSK-IEEE802.1X */
+ __le32 mgmt_group_cipher;
+ u8 option; /* 1: rekey data update without enabling offload */
+ u8 reserverd[3];
+} __packed;
+
+struct mt7615_roc_tlv {
+ u8 bss_idx;
+ u8 token;
+ u8 active;
+ u8 primary_chan;
+ u8 sco;
+ u8 band;
+ u8 width; /* To support 80/160MHz bandwidth */
+ u8 freq_seg1; /* To support 80/160MHz bandwidth */
+ u8 freq_seg2; /* To support 80/160MHz bandwidth */
+ u8 req_type;
+ u8 dbdc_band;
+ u8 rsv0;
+ __le32 max_interval; /* ms */
+ u8 rsv1[8];
+} __packed;
+
+/* offload mcu commands */
+enum {
+ MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
+ MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05,
+ MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f,
+ MCU_CMD_SET_BSS_CONNECTED = MCU_CE_PREFIX | 0x16,
+ MCU_CMD_SET_BSS_ABORT = MCU_CE_PREFIX | 0x17,
+ MCU_CMD_CANCEL_HW_SCAN = MCU_CE_PREFIX | 0x1b,
+ MCU_CMD_SET_ROC = MCU_CE_PREFIX | 0x1c,
+ MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33,
+ MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
+ MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
};
#define MCU_CMD_ACK BIT(0)
@@ -283,10 +567,26 @@ enum {
enum {
UNI_BSS_INFO_BASIC = 0,
+ UNI_BSS_INFO_RLM = 2,
UNI_BSS_INFO_BCN_CONTENT = 7,
};
enum {
+ UNI_SUSPEND_MODE_SETTING,
+ UNI_SUSPEND_WOW_CTRL,
+ UNI_SUSPEND_WOW_GPIO_PARAM,
+ UNI_SUSPEND_WOW_WAKEUP_PORT,
+ UNI_SUSPEND_WOW_PATTERN,
+};
+
+enum {
+ UNI_OFFLOAD_OFFLOAD_ARPNS_IPV4,
+ UNI_OFFLOAD_OFFLOAD_ARPNS_IPV6,
+ UNI_OFFLOAD_OFFLOAD_GTK_REKEY,
+ UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
+};
+
+enum {
PATCH_SEM_RELEASE = 0x0,
PATCH_SEM_GET = 0x1
};
@@ -306,6 +606,11 @@ enum {
FW_STATE_CR4_RDY = 7
};
+enum {
+ FW_STATE_PWR_ON = 1,
+ FW_STATE_N9_RDY = 2,
+};
+
#define STA_TYPE_STA BIT(0)
#define STA_TYPE_AP BIT(1)
#define STA_TYPE_ADHOC BIT(2)
@@ -704,11 +1009,4 @@ enum {
CH_SWITCH_SCAN_BYPASS_DPD = 9
};
-static inline struct sk_buff *
-mt7615_mcu_msg_alloc(const void *data, int len)
-{
- return mt76_mcu_msg_alloc(data, sizeof(struct mt7615_mcu_txd),
- len, 0);
-}
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index d2eff5442824..e670393506f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -1,5 +1,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
#include "mt7615.h"
#include "regs.h"
@@ -13,12 +15,15 @@ const u32 mt7615e_reg_map[] = {
[MT_ARB_BASE] = 0x20c00,
[MT_HIF_BASE] = 0x04000,
[MT_CSR_BASE] = 0x07000,
+ [MT_PLE_BASE] = 0x08000,
+ [MT_PSE_BASE] = 0x0c000,
[MT_PHY_BASE] = 0x10000,
[MT_CFG_BASE] = 0x20200,
[MT_AGG_BASE] = 0x20a00,
[MT_TMAC_BASE] = 0x21000,
[MT_RMAC_BASE] = 0x21200,
[MT_DMA_BASE] = 0x21800,
+ [MT_PF_BASE] = 0x22000,
[MT_WTBL_BASE_ON] = 0x23000,
[MT_WTBL_BASE_OFF] = 0x23400,
[MT_LPON_BASE] = 0x24200,
@@ -37,12 +42,15 @@ const u32 mt7663e_reg_map[] = {
[MT_ARB_BASE] = 0x20c00,
[MT_HIF_BASE] = 0x04000,
[MT_CSR_BASE] = 0x07000,
+ [MT_PLE_BASE] = 0x08000,
+ [MT_PSE_BASE] = 0x0c000,
[MT_PHY_BASE] = 0x10000,
[MT_CFG_BASE] = 0x20000,
[MT_AGG_BASE] = 0x22000,
[MT_TMAC_BASE] = 0x24000,
[MT_RMAC_BASE] = 0x25000,
[MT_DMA_BASE] = 0x27000,
+ [MT_PF_BASE] = 0x28000,
[MT_WTBL_BASE_ON] = 0x29000,
[MT_WTBL_BASE_OFF] = 0x29800,
[MT_LPON_BASE] = 0x2b000,
@@ -80,30 +88,42 @@ mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
{
struct mt7615_dev *dev = dev_instance;
- u32 intr;
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
- trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+ tasklet_schedule(&dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void mt7615_irq_tasklet(unsigned long data)
+{
+ struct mt7615_dev *dev = (struct mt7615_dev *)data;
+ u32 intr, mask = 0;
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
intr &= dev->mt76.mmio.irqmask;
if (intr & MT_INT_TX_DONE_ALL) {
- mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ mask |= MT_INT_TX_DONE_ALL;
napi_schedule(&dev->mt76.tx_napi);
}
if (intr & MT_INT_RX_DONE(0)) {
- mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
+ mask |= MT_INT_RX_DONE(0);
napi_schedule(&dev->mt76.napi[0]);
}
if (intr & MT_INT_RX_DONE(1)) {
- mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
+ mask |= MT_INT_RX_DONE(1);
napi_schedule(&dev->mt76.napi[1]);
}
@@ -117,7 +137,7 @@ static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
}
}
- return IRQ_HANDLED;
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
@@ -139,18 +159,25 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
};
+ struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
int ret;
- mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7615_ops, &drv_ops);
+ ops = devm_kmemdup(pdev, &mt7615_ops, sizeof(mt7615_ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
if (!mdev)
return -ENOMEM;
dev = container_of(mdev, struct mt7615_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ tasklet_init(&dev->irq_tasklet, mt7615_irq_tasklet, (unsigned long)dev);
dev->reg_map = map;
+ dev->ops = ops;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
@@ -172,3 +199,31 @@ error:
ieee80211_free_hw(mt76_hw(dev));
return ret;
}
+
+static int __init mt7615_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7615_pci_driver);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
+ ret = platform_driver_register(&mt7622_wmac_driver);
+ if (ret)
+ pci_unregister_driver(&mt7615_pci_driver);
+ }
+
+ return ret;
+}
+
+static void __exit mt7615_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MT7622_WMAC))
+ platform_driver_unregister(&mt7622_wmac_driver);
+ pci_unregister_driver(&mt7615_pci_driver);
+}
+
+module_init(mt7615_init);
+module_exit(mt7615_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 676ca622c35a..d6176d316bee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -12,12 +12,14 @@
#define MT7615_MAX_INTERFACES 4
#define MT7615_MAX_WMM_SETS 4
+#define MT7663_WTBL_SIZE 32
#define MT7615_WTBL_SIZE 128
-#define MT7615_WTBL_RESERVED (MT7615_WTBL_SIZE - 1)
+#define MT7615_WTBL_RESERVED (mt7615_wtbl_size(dev) - 1)
#define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \
MT7615_MAX_INTERFACES)
#define MT7615_WATCHDOG_TIME (HZ / 10)
+#define MT7615_HW_SCAN_TIMEOUT (HZ / 10)
#define MT7615_RESET_TIMEOUT (30 * HZ)
#define MT7615_RATE_RETRY 2
@@ -40,8 +42,10 @@
#define MT7615_FIRMWARE_V2 2
#define MT7615_FIRMWARE_V3 3
-#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_v3.bin"
-#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin"
+#define MT7663_OFFLOAD_ROM_PATCH "mediatek/mt7663pr2h.bin"
+#define MT7663_OFFLOAD_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin"
+#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_rebb.bin"
+#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_rebb.bin"
#define MT7615_EEPROM_SIZE 1024
#define MT7615_TOKEN_SIZE 4096
@@ -57,10 +61,16 @@
#define MT7615_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7615_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+#define MT7615_SCAN_IE_LEN 600
+#define MT7615_MAX_SCHED_SCAN_INTERVAL 10
+#define MT7615_MAX_SCHED_SCAN_SSID 10
+#define MT7615_MAX_SCAN_MATCH 16
+
struct mt7615_vif;
struct mt7615_sta;
struct mt7615_dfs_pulse;
struct mt7615_dfs_pattern;
+enum mt7615_cipher_type;
enum mt7615_hw_txq_id {
MT7615_TXQ_MAIN,
@@ -84,6 +94,39 @@ struct mt7615_rate_set {
struct ieee80211_tx_rate rates[4];
};
+struct mt7615_rate_desc {
+ bool rateset;
+ u16 probe_val;
+ u16 val[4];
+ u8 bw_idx;
+ u8 bw;
+};
+
+enum mt7615_wtbl_desc_type {
+ MT7615_WTBL_RATE_DESC,
+ MT7615_WTBL_KEY_DESC
+};
+
+struct mt7615_key_desc {
+ enum set_key_cmd cmd;
+ u32 cipher;
+ s8 keyidx;
+ u8 keylen;
+ u8 *key;
+};
+
+struct mt7615_wtbl_desc {
+ struct list_head node;
+
+ enum mt7615_wtbl_desc_type type;
+ struct mt7615_sta *sta;
+
+ union {
+ struct mt7615_rate_desc rate;
+ struct mt7615_key_desc key;
+ };
+};
+
struct mt7615_sta {
struct mt76_wcid wcid; /* must be first */
@@ -108,15 +151,18 @@ struct mt7615_vif {
u8 omac_idx;
u8 band_idx;
u8 wmm_idx;
+ u8 scan_seq_num;
struct mt7615_sta sta;
};
struct mib_stats {
- u32 ack_fail_cnt;
- u32 fcs_err_cnt;
- u32 rts_cnt;
- u32 rts_retries_cnt;
+ u16 ack_fail_cnt;
+ u16 fcs_err_cnt;
+ u16 rts_cnt;
+ u16 rts_retries_cnt;
+ u16 ba_miss_cnt;
+ unsigned long aggr_per;
};
struct mt7615_phy {
@@ -128,6 +174,8 @@ struct mt7615_phy {
u16 noise;
+ bool scs_en;
+
unsigned long last_cca_adj;
int false_cca_ofdm, false_cca_cck;
s8 ofdm_sensitivity;
@@ -146,13 +194,24 @@ struct mt7615_phy {
u32 ampdu_ref;
struct mib_stats mib;
+
+ struct delayed_work mac_work;
+ u8 mac_work_count;
+
+ struct sk_buff_head scan_event_list;
+ struct delayed_work scan_work;
+
+ struct work_struct roc_work;
+ struct timer_list roc_timer;
+ wait_queue_head_t roc_wait;
+ bool roc_grant;
};
#define mt7615_mcu_add_tx_ba(dev, ...) (dev)->mcu_ops->add_tx_ba((dev), __VA_ARGS__)
#define mt7615_mcu_add_rx_ba(dev, ...) (dev)->mcu_ops->add_rx_ba((dev), __VA_ARGS__)
#define mt7615_mcu_sta_add(dev, ...) (dev)->mcu_ops->sta_add((dev), __VA_ARGS__)
#define mt7615_mcu_add_dev_info(dev, ...) (dev)->mcu_ops->add_dev_info((dev), __VA_ARGS__)
-#define mt7615_mcu_add_bss_info(dev, ...) (dev)->mcu_ops->add_bss_info((dev), __VA_ARGS__)
+#define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__)
#define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__)
#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
struct mt7615_mcu_ops {
@@ -167,8 +226,8 @@ struct mt7615_mcu_ops {
struct ieee80211_sta *sta, bool enable);
int (*add_dev_info)(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool enable);
- int (*add_bss_info)(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- bool enable);
+ int (*add_bss_info)(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
int (*add_beacon_offload)(struct mt7615_dev *dev,
struct ieee80211_hw *hw,
struct ieee80211_vif *vif, bool enable);
@@ -181,12 +240,15 @@ struct mt7615_dev {
struct mt76_phy mphy;
};
+ struct tasklet_struct irq_tasklet;
+
struct mt7615_phy phy;
u32 vif_mask;
u32 omac_mask;
u16 chainmask;
+ struct ieee80211_ops *ops;
const struct mt7615_mcu_ops *mcu_ops;
struct regmap *infracfg;
const u32 *reg_map;
@@ -208,14 +270,16 @@ struct mt7615_dev {
} radar_pattern;
u32 hw_pattern;
- u8 mac_work_count;
- bool scs_en;
bool fw_debug;
+ bool flash_eeprom;
spinlock_t token_lock;
struct idr token;
u8 fw_ver;
+
+ struct work_struct wtbl_work;
+ struct list_head wd_head;
};
enum {
@@ -289,6 +353,7 @@ mt7615_ext_phy(struct mt7615_dev *dev)
return phy->priv;
}
+extern struct ieee80211_rate mt7615_rates[12];
extern const struct ieee80211_ops mt7615_ops;
extern const u32 mt7615e_reg_map[__MT_BASE_MAX];
extern const u32 mt7663e_reg_map[__MT_BASE_MAX];
@@ -308,15 +373,19 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
int irq, const u32 *map);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+void mt7615_check_offload_capability(struct mt7615_dev *dev);
void mt7615_init_device(struct mt7615_dev *dev);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
int mt7615_register_ext_phy(struct mt7615_dev *dev);
void mt7615_unregister_ext_phy(struct mt7615_dev *dev);
-int mt7615_eeprom_init(struct mt7615_dev *dev);
-int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
- struct ieee80211_channel *chan,
- u8 chain_idx);
+int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr);
+int mt7615_eeprom_get_target_power_index(struct mt7615_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx);
+int mt7615_eeprom_get_power_delta_index(struct mt7615_dev *dev,
+ enum nl80211_band band);
+int mt7615_wait_pdma_busy(struct mt7615_dev *dev);
int mt7615_dma_init(struct mt7615_dev *dev);
void mt7615_dma_cleanup(struct mt7615_dev *dev);
int mt7615_mcu_init(struct mt7615_dev *dev);
@@ -345,7 +414,7 @@ static inline bool is_mt7622(struct mt76_dev *dev)
static inline bool is_mt7615(struct mt76_dev *dev)
{
- return mt76_chip(dev) == 0x7615;
+ return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
}
static inline bool is_mt7663(struct mt76_dev *dev)
@@ -353,21 +422,46 @@ static inline bool is_mt7663(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7663;
}
+static inline bool is_mt7611(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7611;
+}
+
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
{
- mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+ mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
+
+ tasklet_schedule(&dev->irq_tasklet);
}
-static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
+static inline bool mt7615_firmware_offload(struct mt7615_dev *dev)
{
- mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+ return dev->fw_ver > MT7615_FIRMWARE_V2;
}
+static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev)
+{
+ if (is_mt7663(&dev->mt76) && mt7615_firmware_offload(dev))
+ return MT7663_WTBL_SIZE;
+ else
+ return MT7615_WTBL_SIZE;
+}
+
+void mt7615_dma_reset(struct mt7615_dev *dev);
+void mt7615_scan_work(struct work_struct *work);
+void mt7615_roc_work(struct work_struct *work);
+void mt7615_roc_timer(struct timer_list *timer);
+void mt7615_init_txpower(struct mt7615_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt7615_phy_init(struct mt7615_dev *dev);
+void mt7615_mac_init(struct mt7615_dev *dev);
+
+int mt7615_mcu_restart(struct mt76_dev *dev);
void mt7615_update_channel(struct mt76_dev *mdev);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
void mt7615_mac_reset_counters(struct mt7615_dev *dev);
void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
-void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable);
+void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable);
void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy);
void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
@@ -375,15 +469,27 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct ieee80211_sta *sta, int pid,
struct ieee80211_key_conf *key, bool beacon);
void mt7615_mac_set_timing(struct mt7615_phy *phy);
-int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
-void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
-void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
+int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ enum mt7615_cipher_type cipher,
+ int keyidx, enum set_key_cmd cmd);
+void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ enum mt7615_cipher_type cipher,
+ enum set_key_cmd cmd);
+int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
+ struct mt76_wcid *wcid,
+ u8 *key, u8 keylen,
+ enum mt7615_cipher_type cipher,
+ enum set_key_cmd cmd);
void mt7615_mac_reset_work(struct work_struct *work);
int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
+int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp);
int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable);
@@ -392,6 +498,17 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
void mt7615_mcu_exit(struct mt7615_dev *dev);
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq);
+int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy);
+int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req);
+int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif);
+int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *sreq);
+int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable);
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -417,8 +534,33 @@ int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
const struct mt7615_dfs_pattern *pattern);
int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
+int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy);
+int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy);
+int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif);
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy);
+int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration);
+int mt7615_firmware_own(struct mt7615_dev *dev);
+int mt7615_driver_own(struct mt7615_dev *dev);
+
int mt7615_init_debugfs(struct mt7615_dev *dev);
+int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
+
+int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend);
+void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif);
+int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *key);
+
+int __mt7663_load_firmware(struct mt7615_dev *dev);
+
+/* usb */
+void mt7663u_wtbl_work(struct work_struct *work);
+int mt7663u_mcu_init(struct mt7615_dev *dev);
+int mt7663u_register_device(struct mt7615_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index c8d0f893a47f..ba12f199bce0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -14,6 +14,7 @@
static const struct pci_device_id mt7615_pci_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7615) },
{ PCI_DEVICE(0x14c3, 0x7663) },
+ { PCI_DEVICE(0x14c3, 0x7611) },
{ },
};
@@ -33,13 +34,27 @@ static int mt7615_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret)
- return ret;
+ goto error;
+
+ mt76_pci_disable_aspm(pdev);
map = id->device == 0x7663 ? mt7663e_reg_map : mt7615e_reg_map;
- return mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
- pdev->irq, map);
+ ret = mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ pdev->irq, map);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ pci_free_irq_vectors(pdev);
+
+ return ret;
}
static void mt7615_pci_remove(struct pci_dev *pdev)
@@ -48,18 +63,132 @@ static void mt7615_pci_remove(struct pci_dev *pdev)
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mt7615_unregister_device(dev);
+ devm_free_irq(&pdev->dev, pdev->irq, dev);
+ pci_free_irq_vectors(pdev);
+}
+
+#ifdef CONFIG_PM
+static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ bool hif_suspend;
+ int i, err;
+
+ hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+ mt7615_firmware_offload(dev);
+ if (hif_suspend) {
+ err = mt7615_mcu_set_hif_suspend(dev, true);
+ if (err)
+ return err;
+ }
+
+ napi_disable(&mdev->tx_napi);
+ tasklet_kill(&mdev->tx_tasklet);
+
+ mt76_for_each_q_rx(mdev, i) {
+ napi_disable(&mdev->napi[i]);
+ }
+ tasklet_kill(&dev->irq_tasklet);
+
+ mt7615_dma_reset(dev);
+
+ err = mt7615_wait_pdma_busy(dev);
+ if (err)
+ goto restore;
+
+ if (is_mt7663(mdev)) {
+ mt76_set(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE);
+ if (!mt76_poll_msec(dev, MT_PDMA_SLP_PROT,
+ MT_PDMA_AXI_SLPPROT_RDY,
+ MT_PDMA_AXI_SLPPROT_RDY, 1000)) {
+ dev_err(mdev->dev, "PDMA sleep protection failed\n");
+ err = -EIO;
+ goto restore;
+ }
+ }
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+ pci_save_state(pdev);
+ err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ if (err)
+ goto restore;
+
+ err = mt7615_firmware_own(dev);
+ if (err)
+ goto restore;
+
+ return 0;
+
+restore:
+ mt76_for_each_q_rx(mdev, i) {
+ napi_enable(&mdev->napi[i]);
+ }
+ napi_enable(&mdev->tx_napi);
+ if (hif_suspend)
+ mt7615_mcu_set_hif_suspend(dev, false);
+
+ return err;
+}
+
+static int mt7615_pci_resume(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ bool pdma_reset;
+ int i, err;
+
+ err = mt7615_driver_own(dev);
+ if (err < 0)
+ return err;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ pci_restore_state(pdev);
+
+ if (is_mt7663(&dev->mt76)) {
+ mt76_clear(dev, MT_PDMA_SLP_PROT, MT_PDMA_AXI_SLPPROT_ENABLE);
+ mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
+ }
+
+ pdma_reset = !mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL0) &&
+ !mt76_rr(dev, MT_WPDMA_TX_RING0_CTRL1);
+ if (pdma_reset)
+ dev_err(mdev->dev, "PDMA engine must be reinitialized\n");
+
+ mt76_for_each_q_rx(mdev, i) {
+ napi_enable(&mdev->napi[i]);
+ napi_schedule(&mdev->napi[i]);
+ }
+ napi_enable(&mdev->tx_napi);
+ napi_schedule(&mdev->tx_napi);
+
+ if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+ mt7615_firmware_offload(dev))
+ err = mt7615_mcu_set_hif_suspend(dev, false);
+
+ return err;
}
+#endif /* CONFIG_PM */
struct pci_driver mt7615_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7615_pci_device_table,
.probe = mt7615_pci_probe,
.remove = mt7615_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = mt7615_pci_suspend,
+ .resume = mt7615_pci_resume,
+#endif /* CONFIG_PM */
};
MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
MODULE_FIRMWARE(MT7615_ROM_PATCH);
+MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
new file mode 100644
index 000000000000..69cba8609edf
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include "mt7615.h"
+#include "mac.h"
+#include "eeprom.h"
+
+static void mt7615_init_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev = container_of(work, struct mt7615_dev,
+ mcu_work);
+
+ if (mt7615_mcu_init(dev))
+ return;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_del_wtbl_all(dev);
+ mt7615_check_offload_capability(dev);
+}
+
+static int mt7615_init_hardware(struct mt7615_dev *dev)
+{
+ u32 addr = mt7615_reg_map(dev, MT_EFUSE_BASE);
+ int ret, idx;
+
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ INIT_WORK(&dev->mcu_work, mt7615_init_work);
+ spin_lock_init(&dev->token_lock);
+ idr_init(&dev->token);
+
+ ret = mt7615_eeprom_init(dev, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7615_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+ return 0;
+}
+
+static void
+mt7615_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7615_dev *dev;
+ struct mt76_dev *mt76;
+ u32 val, addr;
+
+ mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
+ dev = container_of(mt76, struct mt7615_dev, mt76);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+
+ val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+ MT_LED_CTRL_KICK(mt76->led_pin);
+ if (mt76->led_al)
+ val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ addr = mt7615_reg_map(dev, MT_LED_CTRL);
+ mt76_wr(dev, addr, val);
+}
+
+static int
+mt7615_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt7615_led_set_config(led_cdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void
+mt7615_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7615_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7615_led_set_config(led_cdev, 0xff, 0);
+}
+
+int mt7615_register_device(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt7615_init_device(dev);
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
+ }
+
+ ret = mt7622_wmac_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
+ ARRAY_SIZE(mt7615_rates));
+ if (ret)
+ return ret;
+
+ ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work);
+ mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
+
+ return mt7615_init_debugfs(dev);
+}
+
+void mt7615_unregister_device(struct mt7615_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ bool mcu_running;
+ int id;
+
+ mcu_running = mt7615_wait_for_mcu_init(dev);
+
+ mt7615_unregister_ext_phy(dev);
+ mt76_unregister_device(&dev->mt76);
+ if (mcu_running)
+ mt7615_mcu_exit(dev);
+ mt7615_dma_cleanup(dev);
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt7615_txp_skb_unmap(&dev->mt76, txwi);
+ if (txwi->skb)
+ dev_kfree_skb_any(txwi->skb);
+ mt76_put_txwi(&dev->mt76, txwi);
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
+
+ tasklet_disable(&dev->irq_tasklet);
+
+ mt76_free_device(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
new file mode 100644
index 000000000000..7ec91c0856f5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Roy Luo <royluo@google.com>
+ * Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+
+#include "mt7615.h"
+#include "../dma.h"
+#include "mac.h"
+
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
+{
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_txwi_cache *t;
+ struct mt7615_dev *dev;
+ struct mt7615_txp_common *txp;
+ u16 token;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ txp = mt7615_txwi_to_txp(mdev, e->txwi);
+
+ if (is_mt7615(&dev->mt76))
+ token = le16_to_cpu(txp->fw.token);
+ else
+ token = le16_to_cpu(txp->hw.msdu_id[0]) &
+ ~MT_MSDU_ID_VALID;
+
+ spin_lock_bh(&dev->token_lock);
+ t = idr_remove(&dev->token, token);
+ spin_unlock_bh(&dev->token_lock);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->skb);
+}
+
+static void
+mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct mt7615_hw_txp *txp = txp_ptr;
+ struct mt7615_txp_ptr *ptr = &txp->ptr[0];
+ int i, nbuf = tx_info->nbuf - 1;
+ u32 last_mask;
+
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->nbuf = 1;
+
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
+
+ if (is_mt7663(&dev->mt76))
+ last_mask = MT_TXD_LEN_LAST;
+ else
+ last_mask = MT_TXD_LEN_AMSDU_LAST |
+ MT_TXD_LEN_MSDU_LAST;
+
+ for (i = 0; i < nbuf; i++) {
+ u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
+ u32 addr = tx_info->buf[i + 1].addr;
+
+ if (i == nbuf - 1)
+ len |= last_mask;
+
+ if (i & 1) {
+ ptr->buf1 = cpu_to_le32(addr);
+ ptr->len1 = cpu_to_le16(len);
+ ptr++;
+ } else {
+ ptr->buf0 = cpu_to_le32(addr);
+ ptr->len0 = cpu_to_le16(len);
+ }
+ }
+}
+
+static void
+mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7615_fw_txp *txp = txp_ptr;
+ int nbuf = tx_info->nbuf - 1;
+ int i;
+
+ for (i = 0; i < nbuf; i++) {
+ txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ }
+ txp->nbuf = nbuf;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+
+ txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+
+ if (!key)
+ txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+
+ if (vif) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ txp->bss_idx = mvif->idx;
+ }
+
+ txp->token = cpu_to_le16(id);
+ txp->rept_wds_wcid = 0xff;
+}
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ int pid, id;
+ u8 *txwi = (u8 *)txwi_ptr;
+ struct mt76_txwi_cache *t;
+ void *txp;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ struct mt7615_phy *phy = &dev->phy;
+
+ if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
+ phy = mdev->phy2->priv;
+
+ spin_lock_bh(&dev->mt76.lock);
+ mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
+ msta->rates);
+ msta->rate_probe = true;
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ spin_lock_bh(&dev->token_lock);
+ id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
+ spin_unlock_bh(&dev->token_lock);
+ if (id < 0)
+ return id;
+
+ mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
+ pid, key, false);
+
+ txp = txwi + MT_TXD_SIZE;
+ memset(txp, 0, sizeof(struct mt7615_txp_common));
+ if (is_mt7615(&dev->mt76))
+ mt7615_write_fw_txp(dev, tx_info, txp, id);
+ else
+ mt7615_write_hw_txp(dev, tx_info, txp, id);
+
+ tx_info->skb = DMA_DUMMY_DATA;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index 1e0d95b917e1..aee433a9eff6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -12,12 +12,15 @@ enum mt7615_reg_base {
MT_ARB_BASE,
MT_HIF_BASE,
MT_CSR_BASE,
+ MT_PLE_BASE,
+ MT_PSE_BASE,
MT_PHY_BASE,
MT_CFG_BASE,
MT_AGG_BASE,
MT_TMAC_BASE,
MT_RMAC_BASE,
MT_DMA_BASE,
+ MT_PF_BASE,
MT_WTBL_BASE_ON,
MT_WTBL_BASE_OFF,
MT_LPON_BASE,
@@ -43,6 +46,7 @@ enum mt7615_reg_base {
#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
#define MT7663_TOP_MISC2_FW_STATE GENMASK(3, 1)
+#define MT_TOP_MISC2_FW_PWR_ON BIT(1)
#define MT_MCU_BASE 0x2000
#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
@@ -58,6 +62,19 @@ enum mt7615_reg_base {
#define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2])
#define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs))
+#define MT_HIF_RST MT_HIF(0x100)
+#define MT_HIF_LOGIC_RST_N BIT(4)
+
+#define MT_PDMA_SLP_PROT MT_HIF(0x154)
+#define MT_PDMA_AXI_SLPPROT_ENABLE BIT(0)
+#define MT_PDMA_AXI_SLPPROT_RDY BIT(16)
+
+#define MT_PDMA_BUSY_STATUS MT_HIF(0x168)
+#define MT_PDMA_TX_IDX_BUSY BIT(2)
+#define MT_PDMA_BUSY_IDX BIT(31)
+
+#define MT_WPDMA_TX_RING0_CTRL0 MT_HIF(0x300)
+#define MT_WPDMA_TX_RING0_CTRL1 MT_HIF(0x304)
#define MT7663_MCU_PCIE_REMAP_2_OFFSET GENMASK(15, 0)
#define MT7663_MCU_PCIE_REMAP_2_BASE GENMASK(31, 16)
@@ -65,6 +82,7 @@ enum mt7615_reg_base {
#define MT_HIF2_BASE 0xf0000
#define MT_HIF2(ofs) (MT_HIF2_BASE + (ofs))
#define MT_PCIE_IRQ_ENABLE MT_HIF2(0x188)
+#define MT_PCIE_DOORBELL_PUSH MT_HIF2(0x1484)
#define MT_CFG_LPCR_HOST MT_HIF(0x1f0)
#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
@@ -133,8 +151,7 @@ enum mt7615_reg_base {
#define MT_CSR(ofs) ((dev)->reg_map[MT_CSR_BASE] + (ofs))
#define MT_CONN_HIF_ON_LPCTL MT_CSR(0x000)
-#define MT_PLE_BASE 0x8000
-#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
+#define MT_PLE(ofs) ((dev)->reg_map[MT_PLE_BASE] + (ofs))
#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
@@ -144,6 +161,14 @@ enum mt7615_reg_base {
#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
((n) << 2))
+#define MT_PSE(ofs) ((dev)->reg_map[MT_PSE_BASE] + (ofs))
+#define MT_PSE_QUEUE_EMPTY MT_PSE(0x0b4)
+#define MT_HIF_0_EMPTY_MASK BIT(16)
+#define MT_HIF_1_EMPTY_MASK BIT(17)
+#define MT_HIF_ALL_EMPTY_MASK GENMASK(17, 16)
+#define MT_PSE_PG_INFO MT_PSE(0x194)
+#define MT_PSE_SRC_CNT GENMASK(27, 16)
+
#define MT_WF_PHY_BASE ((dev)->reg_map[MT_PHY_BASE])
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
@@ -151,29 +176,40 @@ enum mt7615_reg_base {
#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
#define MT_WF_PHY_R0_PHYMUX_5(_phy) MT_WF_PHY(0x0614 + ((_phy) << 9))
+#define MT7663_WF_PHY_R0_PHYMUX_5 MT_WF_PHY(0x0414)
#define MT_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x020c + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_PD_CCK GENMASK(15, 0)
+#define MT7663_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x0210 + ((_phy) << 12))
+
#define MT_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0220 + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0)
+#define MT7663_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0224 + ((_phy) << 12))
+
#define MT_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x084 : 0x229c)
#define MT_WF_PHY_PD_OFDM_MASK(_phy) ((_phy) ? GENMASK(24, 16) : \
GENMASK(28, 20))
#define MT_WF_PHY_PD_OFDM(_phy, v) ((v) << ((_phy) ? 16 : 20))
#define MT_WF_PHY_PD_BLK(_phy) ((_phy) ? BIT(25) : BIT(19))
+#define MT7663_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x2aec : 0x22f0)
+
#define MT_WF_PHY_RXTD_BASE MT_WF_PHY(0x2200)
#define MT_WF_PHY_RXTD(_n) (MT_WF_PHY_RXTD_BASE + ((_n) << 2))
+#define MT7663_WF_PHY_RXTD(_n) (MT_WF_PHY(0x25b0) + ((_n) << 2))
+
#define MT_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2314 : 0x2310)
#define MT_WF_PHY_PD_CCK_MASK(_phy) (_phy) ? GENMASK(31, 24) : \
GENMASK(8, 1)
#define MT_WF_PHY_PD_CCK(_phy, v) ((v) << ((_phy) ? 24 : 1))
+#define MT7663_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2350 : 0x234c)
+
#define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00)
#define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2))
@@ -306,10 +342,17 @@ enum mt7615_reg_base {
#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2)
#define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3)
#define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4)
+#define MT_DMA_RCFR0_MCU_RX_TDLS BIT(19)
#define MT_DMA_RCFR0_MCU_RX_BYPASS BIT(21)
#define MT_DMA_RCFR0_RX_DROPPED_UCAST GENMASK(25, 24)
#define MT_DMA_RCFR0_RX_DROPPED_MCAST GENMASK(27, 26)
+#define MT_WF_PF_BASE ((dev)->reg_map[MT_PF_BASE])
+#define MT_WF_PF(ofs) (MT_WF_PF_BASE + (ofs))
+
+#define MT_WF_PFCR MT_WF_PF(0x000)
+#define MT_WF_PFCR_TDLS_EN BIT(9)
+
#define MT_WTBL_BASE(dev) ((dev)->reg_map[MT_WTBL_BASE_ADDR])
#define MT_WTBL_ENTRY_SIZE 256
@@ -379,34 +422,44 @@ enum mt7615_reg_base {
#define MT_LPON_UTTR1 MT_LPON(0x01c)
#define MT_WF_MIB_BASE (dev->reg_map[MT_MIB_BASE])
-#define MT_WF_MIB(ofs) (MT_WF_MIB_BASE + (ofs))
+#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE + (ofs) + (_band) * 0x200)
-#define MT_MIB_M0_MISC_CR MT_WF_MIB(0x00c)
+#define MT_WF_MIB_SCR0 MT_WF_MIB(0, 0)
+#define MT_MIB_SCR0_AGG_CNT_RANGE_EN BIT(21)
-#define MT_MIB_SDR3(n) MT_WF_MIB(0x014 + ((n) << 9))
+#define MT_MIB_M0_MISC_CR(_band) MT_WF_MIB(_band, 0x00c)
+
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
-#define MT_MIB_SDR9(n) MT_WF_MIB(0x02c + ((n) << 9))
+#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c)
#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
-#define MT_MIB_SDR16(n) MT_WF_MIB(0x048 + ((n) << 9))
+#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040)
+#define MT_MIB_AMPDU_MPDU_COUNT GENMASK(23, 0)
+
+#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044)
+#define MT_MIB_AMPDU_ACK_COUNT GENMASK(23, 0)
+
+#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048)
#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
-#define MT_MIB_SDR36(n) MT_WF_MIB(0x098 + ((n) << 9))
+#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098)
#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR37(n) MT_WF_MIB(0x09c + ((n) << 9))
+#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c)
#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
-#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(0x100 + ((_band) << 9) + \
- ((n) << 4))
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
-#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(0x104 + ((_band) << 9) + \
- ((n) << 4))
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
+#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
-#define MT_TX_AGG_CNT(n) MT_WF_MIB(0xa8 + ((n) << 2))
+#define MT_MIB_ARNG(n) MT_WF_MIB(0, 0x4b8 + ((n) << 2))
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0xa8 + ((n) << 2))
#define MT_DMA_SHDL(ofs) (dev->reg_map[MT_DMA_SHDL_BASE] + (ofs))
@@ -449,6 +502,10 @@ enum mt7615_reg_base {
#define MT_LED_STATUS_ON GENMASK(23, 16)
#define MT_LED_STATUS_DURATION GENMASK(15, 0)
+#define MT_PDMA_BUSY 0x82000504
+#define MT_PDMA_TX_BUSY BIT(0)
+#define MT_PDMA_RX_BUSY BIT(1)
+
#define MT_EFUSE_BASE ((dev)->reg_map[MT_EFUSE_ADDR_BASE])
#define MT_EFUSE_BASE_CTRL 0x000
#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
@@ -470,4 +527,27 @@ enum mt7615_reg_base {
#define MT_INFRACFG_MISC 0x700
#define MT_INFRACFG_MISC_AP2CONN_WAKE BIT(1)
+#define MT_UMAC_BASE 0x7c000000
+#define MT_UMAC(ofs) (MT_UMAC_BASE + (ofs))
+#define MT_UDMA_TX_QSEL MT_UMAC(0x008)
+#define MT_FW_DL_EN BIT(3)
+
+#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c)
+#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0)
+#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8)
+
+#define MT_UDMA_WLCFG_0 MT_UMAC(0x18)
+#define MT_WL_RX_AGG_TO GENMASK(7, 0)
+#define MT_WL_RX_AGG_LMT GENMASK(15, 8)
+#define MT_WL_TX_TMOUT_FUNC_EN BIT(16)
+#define MT_WL_TX_DPH_CHK_EN BIT(17)
+#define MT_WL_RX_MPSZ_PAD0 BIT(18)
+#define MT_WL_RX_FLUSH BIT(19)
+#define MT_TICK_1US_EN BIT(20)
+#define MT_WL_RX_AGG_EN BIT(21)
+#define MT_WL_RX_EN BIT(22)
+#define MT_WL_TX_EN BIT(23)
+#define MT_WL_RX_BUSY BIT(30)
+#define MT_WL_TX_BUSY BIT(31)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
index 43aa49706c66..9aa5183c7a56 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
@@ -36,10 +36,8 @@ static int mt7622_wmac_probe(struct platform_device *pdev)
int irq;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get device IRQ\n");
+ if (irq < 0)
return irq;
- }
mem_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mem_base)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
new file mode 100644
index 000000000000..a50077eb24d7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "mcu.h"
+#include "regs.h"
+
+static const u32 mt7663u_reg_map[] = {
+ [MT_TOP_CFG_BASE] = 0x80020000,
+ [MT_HW_BASE] = 0x80000000,
+ [MT_DMA_SHDL_BASE] = 0x5000a000,
+ [MT_HIF_BASE] = 0x50000000,
+ [MT_CSR_BASE] = 0x40000000,
+ [MT_EFUSE_ADDR_BASE] = 0x78011000,
+ [MT_TOP_MISC_BASE] = 0x81020000,
+ [MT_PLE_BASE] = 0x82060000,
+ [MT_PSE_BASE] = 0x82068000,
+ [MT_PHY_BASE] = 0x82070000,
+ [MT_WTBL_BASE_ADDR] = 0x820e0000,
+ [MT_CFG_BASE] = 0x820f0000,
+ [MT_AGG_BASE] = 0x820f2000,
+ [MT_ARB_BASE] = 0x820f3000,
+ [MT_TMAC_BASE] = 0x820f4000,
+ [MT_RMAC_BASE] = 0x820f5000,
+ [MT_DMA_BASE] = 0x820f7000,
+ [MT_PF_BASE] = 0x820f8000,
+ [MT_WTBL_BASE_ON] = 0x820f9000,
+ [MT_WTBL_BASE_OFF] = 0x820f9800,
+ [MT_LPON_BASE] = 0x820fb000,
+ [MT_MIB_BASE] = 0x820fd000,
+};
+
+static const struct usb_device_id mt7615_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
+ { },
+};
+
+static void mt7663u_stop(struct ieee80211_hw *hw)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mt7615_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ cancel_delayed_work_sync(&phy->scan_work);
+ cancel_delayed_work_sync(&phy->mac_work);
+ mt76u_stop_tx(&dev->mt76);
+}
+
+static void mt7663u_cleanup(struct mt7615_dev *dev)
+{
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ mt76u_queues_deinit(&dev->mt76);
+}
+
+static void
+mt7663u_mac_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ __le32 *txwi;
+ int pid;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+
+ txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
+ memset(txwi, 0, MT_USB_TXD_SIZE);
+ mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
+ skb_push(skb, MT_USB_TXD_SIZE);
+}
+
+static int
+__mt7663u_mac_set_rates(struct mt7615_dev *dev,
+ struct mt7615_wtbl_desc *wd)
+{
+ struct mt7615_rate_desc *rate = &wd->rate;
+ struct mt7615_sta *sta = wd->sta;
+ u32 w5, w27, addr, val;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ if (!sta)
+ return -EINVAL;
+
+ if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ return -ETIMEDOUT;
+
+ addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx);
+
+ w27 = mt76_rr(dev, addr + 27 * 4);
+ w27 &= ~MT_WTBL_W27_CC_BW_SEL;
+ w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw);
+
+ w5 = mt76_rr(dev, addr + 5 * 4);
+ w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
+ MT_WTBL_W5_MPDU_OK_COUNT |
+ MT_WTBL_W5_MPDU_FAIL_COUNT |
+ MT_WTBL_W5_RATE_IDX);
+ w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) |
+ FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
+ rate->bw_idx ? rate->bw_idx - 1 : 7);
+
+ mt76_wr(dev, MT_WTBL_RIUCR0, w5);
+
+ mt76_wr(dev, MT_WTBL_RIUCR1,
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR2,
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR3,
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3]));
+
+ mt76_wr(dev, MT_WTBL_UPDATE,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) |
+ MT_WTBL_UPDATE_RATE_UPDATE |
+ MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+ mt76_wr(dev, addr + 27 * 4, w27);
+
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ val = mt76_rr(dev, MT_LPON_UTTR0);
+ sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
+
+ if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates;
+ sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+
+ return 0;
+}
+
+static int
+__mt7663u_mac_set_key(struct mt7615_dev *dev,
+ struct mt7615_wtbl_desc *wd)
+{
+ struct mt7615_key_desc *key = &wd->key;
+ struct mt7615_sta *sta = wd->sta;
+ enum mt7615_cipher_type cipher;
+ struct mt76_wcid *wcid;
+ int err;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ if (!sta)
+ return -EINVAL;
+
+ cipher = mt7615_mac_get_cipher(key->cipher);
+ if (cipher == MT_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ wcid = &wd->sta->wcid;
+
+ mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd);
+ err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
+ cipher, key->cmd);
+ if (err < 0)
+ return err;
+
+ err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
+ key->cmd);
+ if (err < 0)
+ return err;
+
+ if (key->cmd == SET_KEY)
+ wcid->cipher |= BIT(cipher);
+ else
+ wcid->cipher &= ~BIT(cipher);
+
+ return 0;
+}
+
+void mt7663u_wtbl_work(struct work_struct *work)
+{
+ struct mt7615_wtbl_desc *wd, *wd_next;
+ struct mt7615_dev *dev;
+
+ dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
+ wtbl_work);
+
+ list_for_each_entry_safe(wd, wd_next, &dev->wd_head, node) {
+ spin_lock_bh(&dev->mt76.lock);
+ list_del(&wd->node);
+ spin_unlock_bh(&dev->mt76.lock);
+
+ mutex_lock(&dev->mt76.mutex);
+ switch (wd->type) {
+ case MT7615_WTBL_RATE_DESC:
+ __mt7663u_mac_set_rates(dev, wd);
+ break;
+ case MT7615_WTBL_KEY_DESC:
+ __mt7663u_mac_set_key(dev, wd);
+ break;
+ }
+ mutex_unlock(&dev->mt76.mutex);
+
+ kfree(wd);
+ }
+}
+
+static void
+mt7663u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
+{
+ skb_pull(e->skb, MT_USB_HDR_SIZE + MT_USB_TXD_SIZE);
+ mt76_tx_complete_skb(mdev, e->skb);
+}
+
+static int
+mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ struct mt7615_sta *msta;
+
+ msta = container_of(wcid, struct mt7615_sta, wcid);
+ spin_lock_bh(&dev->mt76.lock);
+ mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
+ msta->rates);
+ msta->rate_probe = true;
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+ mt7663u_mac_write_txwi(dev, wcid, qid, sta, tx_info->skb);
+
+ return mt76u_skb_dma_info(tx_info->skb, tx_info->skb->len);
+}
+
+static bool mt7663u_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7615_mac_sta_poll(dev);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int mt7663u_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = MT_USB_TXD_SIZE,
+ .drv_flags = MT_DRV_RX_DMA_HDR,
+ .tx_prepare_skb = mt7663u_tx_prepare_skb,
+ .tx_complete_skb = mt7663u_tx_complete_skb,
+ .tx_status_data = mt7663u_tx_status_data,
+ .rx_skb = mt7615_queue_rx_skb,
+ .sta_ps = mt7615_sta_ps,
+ .sta_add = mt7615_mac_sta_add,
+ .sta_remove = mt7615_mac_sta_remove,
+ .update_survey = mt7615_update_channel,
+ };
+ struct usb_device *udev = interface_to_usbdev(usb_intf);
+ struct ieee80211_ops *ops;
+ struct mt7615_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ops = devm_kmemdup(&usb_intf->dev, &mt7615_ops, sizeof(mt7615_ops),
+ GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ ops->stop = mt7663u_stop;
+
+ mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ dev->reg_map = mt7663u_reg_map;
+ dev->ops = ops;
+ ret = mt76u_init(mdev, usb_intf, true);
+ if (ret < 0)
+ goto error;
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ if (mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
+ FW_STATE_PWR_ON << 1, 500)) {
+ dev_dbg(dev->mt76.dev, "Usb device already powered on\n");
+ set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
+ goto alloc_queues;
+ }
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ goto error;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
+ FW_STATE_PWR_ON << 1, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ return -EIO;
+ }
+
+alloc_queues:
+ ret = mt76u_alloc_mcu_queue(&dev->mt76);
+ if (ret)
+ goto error;
+
+ ret = mt76u_alloc_queues(&dev->mt76);
+ if (ret)
+ goto error;
+
+ ret = mt7663u_register_device(dev);
+ if (ret)
+ goto error_freeq;
+
+ return 0;
+
+error_freeq:
+ mt76u_queues_deinit(&dev->mt76);
+error:
+ mt76u_deinit(&dev->mt76);
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ ieee80211_free_hw(mdev->hw);
+
+ return ret;
+}
+
+static void mt7663u_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt7615_dev *dev = usb_get_intfdata(usb_intf);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return;
+
+ ieee80211_unregister_hw(dev->mt76.hw);
+ mt7663u_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76u_deinit(&dev->mt76);
+ ieee80211_free_hw(dev->mt76.hw);
+}
+
+#ifdef CONFIG_PM
+static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state)
+{
+ struct mt7615_dev *dev = usb_get_intfdata(intf);
+
+ if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+ mt7615_firmware_offload(dev)) {
+ int err;
+
+ err = mt7615_mcu_set_hif_suspend(dev, true);
+ if (err < 0)
+ return err;
+ }
+
+ mt76u_stop_rx(&dev->mt76);
+
+ mt76u_stop_tx(&dev->mt76);
+ tasklet_kill(&dev->mt76.tx_tasklet);
+
+ return 0;
+}
+
+static int mt7663u_resume(struct usb_interface *intf)
+{
+ struct mt7615_dev *dev = usb_get_intfdata(intf);
+ int err;
+
+ err = mt76u_vendor_request(&dev->mt76, MT_VEND_FEATURE_SET,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x5, 0x0, NULL, 0);
+ if (err)
+ return err;
+
+ err = mt76u_resume_rx(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+ mt7615_firmware_offload(dev))
+ err = mt7615_mcu_set_hif_suspend(dev, false);
+
+ return err;
+}
+#endif /* CONFIG_PM */
+
+MODULE_DEVICE_TABLE(usb, mt7615_device_table);
+MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
+MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_ROM_PATCH);
+
+static struct usb_driver mt7663u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7615_device_table,
+ .probe = mt7663u_probe,
+ .disconnect = mt7663u_disconnect,
+#ifdef CONFIG_PM
+ .suspend = mt7663u_suspend,
+ .resume = mt7663u_resume,
+ .reset_resume = mt7663u_resume,
+#endif /* CONFIG_PM */
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7663u_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c
new file mode 100644
index 000000000000..1fbc9601391d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "regs.h"
+
+static int mt7663u_dma_sched_init(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+ /* disable refill group 5 - group 15 and raise group 2
+ * and 3 as high priority.
+ */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006);
+ mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16));
+
+ for (i = 0; i < 5; i++)
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff));
+
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
+
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444);
+
+ /* group pririority from high to low:
+ * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0.
+ */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c);
+
+ mt76_wr(dev, MT_UDMA_WLCFG_1,
+ FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) |
+ FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1));
+
+ /* setup UDMA Rx Flush */
+ mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
+ /* hif reset */
+ mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N);
+
+ mt76_set(dev, MT_UDMA_WLCFG_0,
+ MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN |
+ MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN |
+ MT_WL_TX_TMOUT_FUNC_EN);
+ mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO,
+ FIELD_PREP(MT_WL_RX_AGG_LMT, 32) |
+ FIELD_PREP(MT_WL_RX_AGG_TO, 100));
+
+ return 0;
+}
+
+static int mt7663u_init_hardware(struct mt7615_dev *dev)
+{
+ int ret, idx;
+
+ ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7663u_dma_sched_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+ return 0;
+}
+
+static void mt7663u_init_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+
+ dev = container_of(work, struct mt7615_dev, mcu_work);
+ if (mt7663u_mcu_init(dev))
+ return;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_del_wtbl_all(dev);
+ mt7615_check_offload_capability(dev);
+}
+
+int mt7663u_register_device(struct mt7615_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ int err;
+
+ INIT_WORK(&dev->wtbl_work, mt7663u_wtbl_work);
+ INIT_WORK(&dev->mcu_work, mt7663u_init_work);
+ INIT_LIST_HEAD(&dev->wd_head);
+ mt7615_init_device(dev);
+
+ err = mt7663u_init_hardware(dev);
+ if (err)
+ return err;
+
+ hw->extra_tx_headroom += MT_USB_HDR_SIZE + MT_USB_TXD_SIZE;
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1;
+
+ err = mt76_register_device(&dev->mt76, true, mt7615_rates,
+ ARRAY_SIZE(mt7615_rates));
+ if (err < 0)
+ return err;
+
+ if (!dev->mt76.usb.sg_en) {
+ struct ieee80211_sta_vht_cap *vht_cap;
+
+ /* decrease max A-MSDU size if SG is not supported */
+ vht_cap = &dev->mphy.sband_5g.sband.vht_cap;
+ vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ }
+
+ ieee80211_queue_work(hw, &dev->mcu_work);
+ mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
+
+ return mt7615_init_debugfs(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
new file mode 100644
index 000000000000..cd709fd617db
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "mcu.h"
+#include "regs.h"
+
+static int
+mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ int ret, seq, ep;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ mt7615_mcu_fill_msg(dev, skb, cmd, &seq);
+ if (cmd != MCU_CMD_FW_SCATTER)
+ ep = MT_EP_OUT_INBAND_CMD;
+ else
+ ep = MT_EP_OUT_AC_BE;
+
+ ret = mt76u_skb_dma_info(skb, skb->len);
+ if (ret < 0)
+ goto out;
+
+ ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
+ 1000, ep);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out;
+
+ if (wait_resp)
+ ret = mt7615_mcu_wait_response(dev, cmd, seq);
+
+out:
+ mutex_unlock(&mdev->mcu.mutex);
+
+ return ret;
+}
+
+int mt7663u_mcu_init(struct mt7615_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7663u_mcu_ops = {
+ .headroom = MT_USB_HDR_SIZE + sizeof(struct mt7615_mcu_txd),
+ .tailroom = MT_USB_TAIL_SIZE,
+ .mcu_skb_send_msg = mt7663u_mcu_send_message,
+ .mcu_send_msg = mt7615_mcu_msg_send,
+ .mcu_restart = mt7615_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mt7663u_mcu_ops,
+
+ mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
+ mt7615_mcu_restart(&dev->mt76);
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+ MT_TOP_MISC2_FW_PWR_ON, 0, 500))
+ return -EIO;
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+ MT_TOP_MISC2_FW_PWR_ON,
+ FW_STATE_PWR_ON << 1, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ return -EIO;
+ }
+ }
+
+ ret = __mt7663_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 57f8d56737eb..dc8bf4c6969a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -12,24 +12,6 @@
#include "initvals.h"
#include "../mt76x02_phy.h"
-static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
-{
- struct ieee80211_sta_vht_cap *vht_cap = &sband->vht_cap;
- u16 mcs_map = 0;
- int i;
-
- vht_cap->cap &= ~IEEE80211_VHT_CAP_RXLDPC;
- for (i = 0; i < 8; i++) {
- if (!i)
- mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_7 << (i * 2));
- else
- mcs_map |=
- (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
- }
- vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
- vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
-}
-
static void
mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable)
{
@@ -263,9 +245,11 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
return ret;
if (dev->mt76.cap.has_5ghz) {
- /* overwrite unsupported features */
- mt76x0_vht_cap_mask(&dev->mphy.sband_5g.sband);
- mt76x0_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ struct ieee80211_supported_band *sband;
+
+ sband = &dev->mphy.sband_5g.sband;
+ sband->vht_cap.cap &= ~IEEE80211_VHT_CAP_RXLDPC;
+ mt76x0_init_txpower(dev, sband);
}
if (dev->mt76.cap.has_2ghz)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 0b520ae08d01..f7ec3400e368 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -29,6 +29,7 @@ static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mt76.mac_work);
+ clear_bit(MT76_RESTART, &dev->mphy.state);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
0, 1000))
@@ -83,6 +84,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.set_coverage_class = mt76x02_set_coverage_class,
.set_rts_threshold = mt76x02_set_rts_threshold,
.get_antenna = mt76_get_antenna,
+ .reconfig_complete = mt76x02_reconfig_complete,
};
static int mt76x0e_register_device(struct mt76x02_dev *dev)
@@ -216,6 +218,7 @@ mt76x0e_remove(struct pci_dev *pdev)
}
static const struct pci_device_id mt76x0e_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7610) },
{ PCI_DEVICE(0x14c3, 0x7630) },
{ PCI_DEVICE(0x14c3, 0x7650) },
{ },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 23040c193ca5..4c9bbc7ce023 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -15,6 +15,7 @@
#include "mt76x02_dfs.h"
#include "mt76x02_dma.h"
+#define MT76x02_N_WCIDS 128
#define MT_CALIBRATE_INTERVAL HZ
#define MT_MAC_WORK_INTERVAL (HZ / 10)
@@ -187,6 +188,8 @@ void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed);
+void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
struct beacon_bc_data {
struct mt76x02_dev *dev;
@@ -216,6 +219,7 @@ static inline bool is_mt76x0(struct mt76x02_dev *dev)
static inline bool is_mt76x2(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612 ||
+ mt76_chip(&dev->mt76) == 0x7632 ||
mt76_chip(&dev->mt76) == 0x7662 ||
mt76_chip(&dev->mt76) == 0x7602;
}
@@ -243,7 +247,7 @@ mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx)
{
struct mt76_wcid *wcid;
- if (idx >= ARRAY_SIZE(dev->wcid))
+ if (idx >= MT76x02_N_WCIDS)
return NULL;
wcid = rcu_dereference(dev->wcid[idx]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index 68b40d63a46d..ff448a1ad4e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -144,7 +144,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
if (!dir)
return;
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt76_queues_read);
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 8b072277ea10..e4e03beabe43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -409,6 +409,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
ba_size <<= sta->ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size - 1);
@@ -416,9 +417,11 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
ba_size = 0;
txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+ if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
+ ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
txwi_flags |= MT_TXWI_FLAGS_AMPDU |
- FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density);
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, ampdu_density);
}
if (ieee80211_is_probe_resp(hdr->frame_control) ||
@@ -558,7 +561,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
rcu_read_lock();
- if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
+ if (stat->wcid < MT76x02_N_WCIDS)
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
if (wcid && wcid->sta) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 5664749ad6c1..267058086a90 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -20,7 +20,10 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int ret;
u8 seq;
- skb = mt76x02_mcu_msg_alloc(data, len);
+ if (dev->mcu_timeout)
+ return -EIO;
+
+ skb = mt76_mcu_msg_alloc(mdev, data, len);
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
index c81a9655c4c9..5fba1266c648 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
@@ -85,12 +85,6 @@ struct mt76x02_patch_header {
u8 pad[2];
};
-static inline struct sk_buff *
-mt76x02_mcu_msg_alloc(const void *data, int len)
-{
- return mt76_mcu_msg_alloc(data, 0, len, 0);
-}
-
int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param);
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 7dcc5d342e9f..cbbe986655fe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -415,7 +415,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
rcu_read_unlock();
- for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
+ for (i = 0; i < MT76x02_N_WCIDS; i++) {
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct mt76x02_sta *msta;
@@ -489,8 +489,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+ mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
+ }
mt76x02_mac_start(dev);
@@ -520,6 +521,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
}
if (restart) {
+ set_bit(MT76_RESTART, &dev->mphy.state);
mt76x02_mcu_function_select(dev, Q_SELECT, 1);
ieee80211_restart_hw(dev->mt76.hw);
} else {
@@ -528,8 +530,23 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
}
}
+void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ clear_bit(MT76_RESTART, &dev->mphy.state);
+}
+EXPORT_SYMBOL_GPL(mt76x02_reconfig_complete);
+
static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
{
+ if (test_bit(MT76_RESTART, &dev->mphy.state))
+ return;
+
if (mt76x02_tx_hang(dev)) {
if (++dev->tx_hang_check >= MT_TX_HANG_TH)
goto restart;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index 843b86560ed4..a30bb536fc8a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -123,7 +123,7 @@ mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
struct sk_buff *skb;
int err;
- skb = mt76_mcu_msg_alloc(data, MT_CMD_HDR_LEN, len, 8);
+ skb = mt76_mcu_msg_alloc(dev, data, len);
if (!skb)
return -ENOMEM;
@@ -291,6 +291,8 @@ EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
void mt76x02u_init_mcu(struct mt76_dev *dev)
{
static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
+ .headroom = MT_CMD_HDR_LEN,
+ .tailroom = 8,
.mcu_send_msg = mt76x02u_mcu_send_msg,
.mcu_wr_rp = mt76x02u_mcu_wr_rp,
.mcu_rd_rp = mt76x02u_mcu_rd_rp,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index b7a120b0856d..44822a849eb1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -46,6 +46,8 @@ static const struct ieee80211_iface_limit mt76x02_if_limits[] = {
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP)
},
};
@@ -60,6 +62,8 @@ static const struct ieee80211_iface_limit mt76x02u_if_limits[] = {
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP)
},
};
@@ -245,7 +249,7 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
memset(msta, 0, sizeof(*msta));
- idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT76x02_N_WCIDS);
if (idx < 0)
return -ENOSPC;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 4a748a6f0ce2..410ffce3baff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -4,6 +4,7 @@
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <asm/unaligned.h>
#include "mt76x2.h"
#include "eeprom.h"
@@ -76,6 +77,7 @@ mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
};
+ struct device_node *np = dev->mt76.dev->of_node;
u8 *eeprom = dev->mt76.eeprom.data;
u8 prev_grp0[4] = {
eeprom[MT_EE_TX_POWER_0_START_5G],
@@ -86,6 +88,9 @@ mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
u16 val;
int i;
+ if (!np || !of_property_read_bool(np, "mediatek,eeprom-merge-otp"))
+ return;
+
if (!mt76x2_has_cal_free_data(dev, efuse))
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index c69579e5f647..f27774f57438 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -256,6 +256,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mt76.mac_work);
cancel_delayed_work_sync(&dev->wdt_work);
+ clear_bit(MT76_RESTART, &dev->mphy.state);
mt76x02_mcu_set_radio_state(dev, false);
mt76x2_mac_stop(dev, false);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 105e5b99b3f9..98f4cf398320 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -10,12 +10,9 @@ static int
mt76x2_start(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- int ret;
mt76x02_mac_start(dev);
- ret = mt76x2_phy_start(dev);
- if (ret)
- return ret;
+ mt76x2_phy_start(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
@@ -35,11 +32,9 @@ mt76x2_stop(struct ieee80211_hw *hw)
mt76x2_stop_hardware(dev);
}
-static int
+static void
mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
{
- int ret;
-
cancel_delayed_work_sync(&dev->cal_work);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
@@ -50,7 +45,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_set_channel(&dev->mphy);
mt76x2_mac_stop(dev, true);
- ret = mt76x2_phy_set_channel(dev, chandef);
+ mt76x2_phy_set_channel(dev, chandef);
mt76x02_mac_cc_reset(dev);
mt76x02_dfs_init_params(dev);
@@ -64,15 +59,12 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt76_txq_schedule_all(&dev->mphy);
-
- return ret;
}
static int
mt76x2_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
- int ret = 0;
mutex_lock(&dev->mt76.mutex);
@@ -101,11 +93,11 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
- ret = mt76x2_set_channel(dev, &hw->conf.chandef);
+ mt76x2_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
- return ret;
+ return 0;
}
static void
@@ -127,7 +119,7 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101;
dev->mphy.antenna_mask = tx_ant;
- mt76_set_stream_caps(&dev->mt76, true);
+ mt76_set_stream_caps(&dev->mphy, true);
mt76x2_phy_set_antenna(dev);
mutex_unlock(&dev->mt76.mutex);
@@ -162,5 +154,6 @@ const struct ieee80211_ops mt76x2_ops = {
.set_antenna = mt76x2_set_antenna,
.get_antenna = mt76_get_antenna,
.set_rts_threshold = mt76x02_set_rts_threshold,
+ .reconfig_complete = mt76x02_reconfig_complete,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index eafa283ca699..3a4e41724af1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -16,8 +16,10 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
{ USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
+ { USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
{ USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
+ { USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */
{ },
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
new file mode 100644
index 000000000000..d98225da694c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: ISC
+config MT7915E
+ tristate "MediaTek MT7915E (PCIe) support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7915-based wireless PCIe devices,
+ which support concurrent dual-band operation at both 5GHz
+ and 2.4GHz IEEE 802.11ax 4x4:4SS 1024-QAM, 160MHz channels,
+ OFDMA, spatial reuse and dual carrier modulation.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
new file mode 100644
index 000000000000..57fe726cc38b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
@@ -0,0 +1,6 @@
+#SPDX-License-Identifier: ISC
+
+obj-$(CONFIG_MT7915E) += mt7915e.o
+
+mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
+ debugfs.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
new file mode 100644
index 000000000000..5278bee812f1
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7915.h"
+#include "eeprom.h"
+
+/** global debugfs **/
+
+/* test knob of system layer 1/2 error recovery */
+static int mt7915_ser_trigger_set(void *data, u64 val)
+{
+ enum {
+ SER_SET_RECOVER_L1 = 1,
+ SER_SET_RECOVER_L2,
+ SER_ENABLE = 2,
+ SER_RECOVER
+ };
+ struct mt7915_dev *dev = data;
+ int ret = 0;
+
+ switch (val) {
+ case SER_SET_RECOVER_L1:
+ case SER_SET_RECOVER_L2:
+ /* fall through */
+ ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_set_ser(dev, SER_RECOVER, val, 0);
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ser_trigger, NULL,
+ mt7915_ser_trigger_set, "%lld\n");
+
+static int
+mt7915_radar_trigger(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ return mt7915_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, 1, 0, 0);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
+ mt7915_radar_trigger, "%lld\n");
+
+static int
+mt7915_dbdc_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ if (val)
+ mt7915_register_ext_phy(dev);
+ else
+ mt7915_unregister_ext_phy(dev);
+
+ return 0;
+}
+
+static int
+mt7915_dbdc_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = !!mt7915_ext_phy(dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7915_dbdc_get,
+ mt7915_dbdc_set, "%lld\n");
+
+static int
+mt7915_fw_debug_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+ enum {
+ DEBUG_TXCMD = 62,
+ DEBUG_CMD_RPT_TX,
+ DEBUG_CMD_RPT_TRIG,
+ DEBUG_SPL,
+ DEBUG_RPT_RX,
+ } debug;
+
+ dev->fw_debug = !!val;
+
+ mt7915_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0);
+
+ for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++)
+ mt7915_mcu_fw_dbg_ctrl(dev, debug, dev->fw_debug);
+
+ return 0;
+}
+
+static int
+mt7915_fw_debug_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw_debug;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7915_fw_debug_get,
+ mt7915_fw_debug_set, "%lld\n");
+
+static void
+mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
+ struct seq_file *file)
+{
+ struct mt7915_dev *dev = file->private;
+ bool ext_phy = phy != &dev->phy;
+ int bound[15], range[4], i, n;
+
+ if (!phy)
+ return;
+
+ /* Tx ampdu stat */
+ for (i = 0; i < ARRAY_SIZE(range); i++)
+ range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
+
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
+
+ seq_printf(file, "\nPhy %d\n", ext_phy);
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i] + 1, bound[i + 1]);
+
+ seq_puts(file, "\nCount: ");
+ n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + n]);
+ seq_puts(file, "\n");
+
+ seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
+}
+
+static void
+mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
+{
+ struct mt7915_dev *dev = s->private;
+ bool ext_phy = phy != &dev->phy;
+ int cnt;
+
+ if (!phy)
+ return;
+
+ /* Tx Beamformer monitor */
+ seq_puts(s, "\nTx Beamformer applied PPDU counts: ");
+
+ cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy));
+ seq_printf(s, "iBF: %ld, eBF: %ld\n",
+ FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt),
+ FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt));
+
+ /* Tx Beamformer Rx feedback monitor */
+ seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
+
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
+ seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld\n",
+ FIELD_GET(MT_ETBF_RX_FB_ALL, cnt),
+ FIELD_GET(MT_ETBF_RX_FB_HE, cnt),
+ FIELD_GET(MT_ETBF_RX_FB_VHT, cnt),
+ FIELD_GET(MT_ETBF_RX_FB_HT, cnt));
+
+ /* Tx Beamformee Rx NDPA & Tx feedback report */
+ cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
+ seq_printf(s, "Tx Beamformee successful feedback frames: %ld\n",
+ FIELD_GET(MT_ETBF_TX_FB_CPL, cnt));
+ seq_printf(s, "Tx Beamformee feedback triggered counts: %ld\n",
+ FIELD_GET(MT_ETBF_TX_FB_TRI, cnt));
+
+ /* Tx SU counters */
+ cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
+ seq_printf(s, "Tx single-user successful MPDU counts: %d\n", cnt);
+
+ seq_puts(s, "\n");
+}
+
+static int
+mt7915_tx_stats_read(struct seq_file *file, void *data)
+{
+ struct mt7915_dev *dev = file->private;
+ int stat[8], i, n;
+
+ mt7915_ampdu_stat_read_phy(&dev->phy, file);
+ mt7915_txbf_stat_read_phy(&dev->phy, file);
+
+ mt7915_ampdu_stat_read_phy(mt7915_ext_phy(dev), file);
+ mt7915_txbf_stat_read_phy(mt7915_ext_phy(dev), file);
+
+ /* Tx amsdu info */
+ seq_puts(file, "Tx MSDU stat:\n");
+ for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) {
+ stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
+ n += stat[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stat); i++) {
+ seq_printf(file, "AMSDU pack count of %d MSDU in TXD: 0x%x ",
+ i + 1, stat[i]);
+ if (n != 0)
+ seq_printf(file, "(%d%%)\n", stat[i] * 100 / n);
+ else
+ seq_puts(file, "\n");
+ }
+
+ return 0;
+}
+
+static int
+mt7915_tx_stats_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7915_tx_stats_read, inode->i_private);
+}
+
+static const struct file_operations fops_tx_stats = {
+ .open = mt7915_tx_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mt7915_read_temperature(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ int temp;
+
+ /* cpu */
+ temp = mt7915_mcu_get_temperature(dev, 0);
+ seq_printf(s, "Temperature: %d\n", temp);
+
+ return 0;
+}
+
+static int
+mt7915_queues_acq(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ int j, acs = i / 4, index = i % 4;
+ u32 ctrl, val, qlen = 0;
+
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
+ ctrl = BIT(31) | BIT(15) | (acs << 8);
+
+ for (j = 0; j < 32; j++) {
+ if (val & BIT(j))
+ continue;
+
+ mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
+ ctrl | (j + (index << 5)));
+ qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ }
+ seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ }
+
+ return 0;
+}
+
+static int
+mt7915_queues_read(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ static const struct {
+ char *queue;
+ int id;
+ } queue_map[] = {
+ { "WFDMA0", MT_TXQ_BE },
+ { "MCUWM", MT_TXQ_MCU },
+ { "MCUWA", MT_TXQ_MCU_WA },
+ { "MCUFWQ", MT_TXQ_FWDL },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
+ struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id];
+
+ if (!q->q)
+ continue;
+
+ seq_printf(s,
+ "%s: queued=%d head=%d tail=%d\n",
+ queue_map[i].queue, q->q->queued, q->q->head,
+ q->q->tail);
+ }
+
+ return 0;
+}
+
+static void
+mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta,
+ s8 txpower_cur, int band)
+{
+ static const char * const sku_group_name[] = {
+ "CCK", "OFDM", "HT20", "HT40",
+ "VHT20", "VHT40", "VHT80", "VHT160",
+ "RU26", "RU52", "RU106", "RU242/SU20",
+ "RU484/SU40", "RU996/SU80", "RU2x996/SU160"
+ };
+ s8 txpower[MT7915_SKU_RATE_NUM];
+ int i, idx = 0;
+
+ for (i = 0; i < MT7915_SKU_RATE_NUM; i++)
+ txpower[i] = DIV_ROUND_UP(txpower_cur + delta[i], 2);
+
+ for (i = 0; i < MAX_SKU_RATE_GROUP_NUM; i++) {
+ const struct sku_group *sku = &mt7915_sku_groups[i];
+ u32 offset = sku->offset[band];
+
+ if (!offset) {
+ idx += sku->len;
+ continue;
+ }
+
+ mt76_seq_puts_array(s, sku_group_name[i],
+ txpower + idx, sku->len);
+ idx += sku->len;
+ }
+}
+
+static int
+mt7915_read_rate_txpower(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ struct mt76_phy *mphy = &dev->mphy;
+ enum nl80211_band band = mphy->chandef.chan->band;
+ s8 *delta = dev->rate_power[band];
+ s8 txpower_base = mphy->txpower_cur - delta[MT7915_SKU_MAX_DELTA_IDX];
+
+ seq_puts(s, "Band 0:\n");
+ mt7915_puts_rate_txpower(s, delta, txpower_base, band);
+
+ if (dev->mt76.phy2) {
+ mphy = dev->mt76.phy2;
+ band = mphy->chandef.chan->band;
+ delta = dev->rate_power[band];
+ txpower_base = mphy->txpower_cur -
+ delta[MT7915_SKU_MAX_DELTA_IDX];
+
+ seq_puts(s, "Band 1:\n");
+ mt7915_puts_rate_txpower(s, delta, txpower_base, band);
+ }
+
+ return 0;
+}
+
+int mt7915_init_debugfs(struct mt7915_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return -ENOMEM;
+
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt7915_queues_read);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
+ mt7915_queues_acq);
+ debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats);
+ debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
+ debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
+ debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
+ /* test knobs */
+ debugfs_create_file("radar_trigger", 0200, dir, dev,
+ &fops_radar_trigger);
+ debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
+ mt7915_read_temperature);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
+ mt7915_read_rate_txpower);
+
+ return 0;
+}
+
+/** per-station debugfs **/
+
+/* usage: <tx mode> <ldpc> <stbc> <bw> <gi> <nss> <mcs> */
+static int mt7915_sta_fixed_rate_set(void *data, u64 rate)
+{
+ struct ieee80211_sta *sta = data;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ return mt7915_mcu_set_fixed_rate(msta->vif->dev, sta, rate);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL,
+ mt7915_sta_fixed_rate_set, "%llx\n");
+
+static int
+mt7915_sta_stats_read(struct seq_file *s, void *data)
+{
+ struct ieee80211_sta *sta = s->private;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_sta_stats *stats = &msta->stats;
+ struct rate_info *rate = &stats->prob_rate;
+ static const char * const bw[] = {
+ "BW20", "BW5", "BW10", "BW40",
+ "BW80", "BW160", "BW_HE_RU"
+ };
+
+ if (!rate->legacy && !rate->flags)
+ return 0;
+
+ seq_puts(s, "Probing rate - ");
+ if (rate->flags & RATE_INFO_FLAGS_MCS)
+ seq_puts(s, "HT ");
+ else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
+ seq_puts(s, "VHT ");
+ else if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
+ seq_puts(s, "HE ");
+ else
+ seq_printf(s, "Bitrate %d\n", rate->legacy);
+
+ if (rate->flags) {
+ seq_printf(s, "%s NSS%d MCS%d ",
+ bw[rate->bw], rate->nss, rate->mcs);
+
+ if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+ seq_puts(s, "SGI ");
+ else if (rate->he_gi)
+ seq_puts(s, "HE GI ");
+
+ if (rate->he_dcm)
+ seq_puts(s, "DCM ");
+ }
+
+ seq_printf(s, "\nPPDU PER: %ld.%1ld%%\n",
+ stats->per / 10, stats->per % 10);
+
+ return 0;
+}
+
+static int
+mt7915_sta_stats_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7915_sta_stats_read, inode->i_private);
+}
+
+static const struct file_operations fops_sta_stats = {
+ .open = mt7915_sta_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate);
+ debugfs_create_file("stats", 0400, dir, sta, &fops_sta_stats);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
new file mode 100644
index 000000000000..766185d1aa21
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7915.h"
+#include "../dma.h"
+#include "mac.h"
+
+static int
+mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc)
+{
+ struct mt76_sw_queue *q;
+ struct mt76_queue *hwq;
+ int err, i;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, MT7915_TXQ_BAND0, n_desc, 0,
+ MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < MT_TXQ_MCU; i++) {
+ q = &dev->mt76.q_tx[i];
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
+ }
+
+ return 0;
+}
+
+static int
+mt7915_init_mcu_queue(struct mt7915_dev *dev, struct mt76_sw_queue *q,
+ int idx, int n_desc)
+{
+ struct mt76_queue *hwq;
+ int err;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
+
+ return 0;
+}
+
+void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7915_mac_tx_free(dev, skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt7915_mcu_rx_event(dev, skb);
+ break;
+ case PKT_TYPE_NORMAL:
+ if (!mt7915_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ /* fall through */
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
+static int mt7915_poll_tx(struct napi_struct *napi, int budget)
+{
+ static const u8 queue_map[] = {
+ MT_TXQ_MCU,
+ MT_TXQ_MCU_WA,
+ MT_TXQ_BE
+ };
+ struct mt7915_dev *dev;
+ int i;
+
+ dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++)
+ mt76_queue_tx_cleanup(dev, queue_map[i], false);
+
+ if (napi_complete_done(napi, 0))
+ mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL);
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++)
+ mt76_queue_tx_cleanup(dev, queue_map[i], false);
+
+ mt7915_mac_sta_poll(dev);
+
+ tasklet_schedule(&dev->mt76.tx_tasklet);
+
+ return 0;
+}
+
+void mt7915_dma_prefetch(struct mt7915_dev *dev)
+{
+#define PREFETCH(base, depth) ((base) << 16 | (depth))
+
+ mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x80, 0x0));
+
+ mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL, PREFETCH(0xc0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL, PREFETCH(0x100, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL, PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL, PREFETCH(0x180, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL, PREFETCH(0x1c0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL, PREFETCH(0x200, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL, PREFETCH(0x240, 0x4));
+
+ mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL, PREFETCH(0x280, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL, PREFETCH(0x2c0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL, PREFETCH(0x300, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL, PREFETCH(0x340, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL, PREFETCH(0x380, 0x4));
+ mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL, PREFETCH(0x3c0, 0x0));
+
+ mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL, PREFETCH(0x3c0, 0x4));
+ mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL, PREFETCH(0x400, 0x4));
+ mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL, PREFETCH(0x440, 0x4));
+ mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL, PREFETCH(0x480, 0x0));
+}
+
+int mt7915_dma_init(struct mt7915_dev *dev)
+{
+ /* Increase buffer size to receive large VHT/HE MPDUs */
+ int rx_buf_size = MT_RX_BUF_SIZE * 2;
+ int ret;
+
+ mt76_dma_attach(&dev->mt76);
+
+ /* configure global setting */
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+
+ /* configure perfetch settings */
+ mt7915_dma_prefetch(dev);
+
+ /* reset dma idx */
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+
+ /* configure delay interrupt */
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+
+ /* init tx queue */
+ ret = mt7915_init_tx_queues(dev, MT7915_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* command to WM */
+ ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT7915_TXQ_MCU_WM,
+ MT7915_TX_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* command to WA */
+ ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU_WA],
+ MT7915_TXQ_MCU_WA,
+ MT7915_TX_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* firmware download */
+ ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+ MT7915_TXQ_FWDL,
+ MT7915_TX_FWDL_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* event from WM */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
+ MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
+ rx_buf_size, MT_RX_EVENT_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* event from WA */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
+ MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
+ rx_buf_size, MT_RX_EVENT_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* rx data */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
+ MT7915_RX_RING_SIZE, rx_buf_size,
+ MT_RX_DATA_RING_BASE);
+ if (ret)
+ return ret;
+
+ ret = mt76_init_queues(dev);
+ if (ret < 0)
+ return ret;
+
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ mt7915_poll_tx, NAPI_POLL_WEIGHT);
+ napi_enable(&dev->mt76.tx_napi);
+
+ /* hif wait WFDMA idle */
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA,
+ MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_BUSY_ENA_RX_FIFO);
+
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA,
+ MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_BUSY_ENA_RX_FIFO);
+
+ mt76_set(dev, MT_WFDMA0_PCIE1_BUSY_ENA,
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
+
+ mt76_set(dev, MT_WFDMA1_PCIE1_BUSY_ENA,
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
+
+ mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
+ MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
+
+ /* set WFDMA Tx/Rx */
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+
+ /* enable interrupts for TX/RX rings */
+ mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_MCU_CMD);
+
+ return 0;
+}
+
+void mt7915_dma_cleanup(struct mt7915_dev *dev)
+{
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+
+ /* reset */
+ mt76_clear(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ tasklet_kill(&dev->mt76.tx_tasklet);
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
new file mode 100644
index 000000000000..7deba7ebd68a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7915.h"
+#include "eeprom.h"
+
+static inline bool mt7915_efuse_valid(u8 val)
+{
+ return !(val == 0xff);
+}
+
+u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
+{
+ u8 *data = dev->mt76.eeprom.data;
+
+ if (!mt7915_efuse_valid(data[offset]))
+ mt7915_mcu_get_eeprom(dev, offset);
+
+ return data[offset];
+}
+
+static int mt7915_eeprom_load(struct mt7915_dev *dev)
+{
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7915_EEPROM_SIZE);
+ if (ret < 0)
+ return ret;
+
+ memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
+
+ return 0;
+}
+
+static int mt7915_check_eeprom(struct mt7915_dev *dev)
+{
+ u16 val;
+ u8 *eeprom = dev->mt76.eeprom.data;
+
+ mt7915_eeprom_read(dev, 0);
+ val = get_unaligned_le16(eeprom);
+
+ switch (val) {
+ case 0x7915:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 tx_mask, max_nss = 4;
+ u32 val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF);
+
+ val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val);
+ switch (val) {
+ case MT_EE_5GHZ:
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ case MT_EE_2GHZ:
+ dev->mt76.cap.has_2ghz = true;
+ break;
+ default:
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ }
+
+ /* read tx mask from eeprom */
+ tx_mask = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK,
+ eeprom[MT_EE_WIFI_CONF]);
+ if (!tx_mask || tx_mask > max_nss)
+ tx_mask = max_nss;
+
+ dev->chainmask = BIT(tx_mask) - 1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
+}
+
+int mt7915_eeprom_init(struct mt7915_dev *dev)
+{
+ int ret;
+
+ ret = mt7915_eeprom_load(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7915_check_eeprom(dev);
+ if (ret)
+ return ret;
+
+ mt7915_eeprom_parse_hw_cap(dev);
+ memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+
+ mt76_eeprom_override(&dev->mt76);
+
+ return 0;
+}
+
+int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx)
+{
+ int index;
+ bool tssi_on;
+
+ if (chain_idx > 3)
+ return -EINVAL;
+
+ tssi_on = mt7915_tssi_enabled(dev, chan->band);
+
+ if (chan->band == NL80211_BAND_2GHZ) {
+ index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on;
+ } else {
+ int group = tssi_on ?
+ mt7915_get_channel_group(chan->hw_value) : 8;
+
+ index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group;
+ }
+
+ return mt7915_eeprom_read(dev, index);
+}
+
+static const u8 sku_cck_delta_map[] = {
+ SKU_CCK_GROUP0,
+ SKU_CCK_GROUP0,
+ SKU_CCK_GROUP1,
+ SKU_CCK_GROUP1,
+};
+
+static const u8 sku_ofdm_delta_map[] = {
+ SKU_OFDM_GROUP0,
+ SKU_OFDM_GROUP0,
+ SKU_OFDM_GROUP1,
+ SKU_OFDM_GROUP1,
+ SKU_OFDM_GROUP2,
+ SKU_OFDM_GROUP2,
+ SKU_OFDM_GROUP3,
+ SKU_OFDM_GROUP4,
+};
+
+static const u8 sku_mcs_delta_map[] = {
+ SKU_MCS_GROUP0,
+ SKU_MCS_GROUP1,
+ SKU_MCS_GROUP1,
+ SKU_MCS_GROUP2,
+ SKU_MCS_GROUP2,
+ SKU_MCS_GROUP3,
+ SKU_MCS_GROUP4,
+ SKU_MCS_GROUP5,
+ SKU_MCS_GROUP6,
+ SKU_MCS_GROUP7,
+ SKU_MCS_GROUP8,
+ SKU_MCS_GROUP9,
+};
+
+#define SKU_GROUP(_mode, _len, _ofs_2g, _ofs_5g, _map) \
+ [_mode] = { \
+ .len = _len, \
+ .offset = { \
+ _ofs_2g, \
+ _ofs_5g, \
+ }, \
+ .delta_map = _map \
+}
+
+const struct sku_group mt7915_sku_groups[] = {
+ SKU_GROUP(SKU_CCK, 4, 0x252, 0, sku_cck_delta_map),
+ SKU_GROUP(SKU_OFDM, 8, 0x254, 0x29d, sku_ofdm_delta_map),
+
+ SKU_GROUP(SKU_HT_BW20, 8, 0x259, 0x2a2, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HT_BW40, 9, 0x262, 0x2ab, sku_mcs_delta_map),
+ SKU_GROUP(SKU_VHT_BW20, 12, 0x259, 0x2a2, sku_mcs_delta_map),
+ SKU_GROUP(SKU_VHT_BW40, 12, 0x262, 0x2ab, sku_mcs_delta_map),
+ SKU_GROUP(SKU_VHT_BW80, 12, 0, 0x2b4, sku_mcs_delta_map),
+ SKU_GROUP(SKU_VHT_BW160, 12, 0, 0, sku_mcs_delta_map),
+
+ SKU_GROUP(SKU_HE_RU26, 12, 0x27f, 0x2dd, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU52, 12, 0x289, 0x2e7, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU106, 12, 0x293, 0x2f1, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU242, 12, 0x26b, 0x2bf, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU484, 12, 0x275, 0x2c9, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU996, 12, 0, 0x2d3, sku_mcs_delta_map),
+ SKU_GROUP(SKU_HE_RU2x996, 12, 0, 0, sku_mcs_delta_map),
+};
+
+static s8
+mt7915_get_sku_delta(struct mt7915_dev *dev, u32 addr)
+{
+ u32 val = mt7915_eeprom_read(dev, addr);
+ s8 delta = FIELD_GET(SKU_DELTA_VAL, val);
+
+ if (!(val & SKU_DELTA_EN))
+ return 0;
+
+ return val & SKU_DELTA_ADD ? delta : -delta;
+}
+
+static void
+mt7915_eeprom_init_sku_band(struct mt7915_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ int i, band = sband->band;
+ s8 *rate_power = dev->rate_power[band], max_delta = 0;
+ u8 idx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mt7915_sku_groups); i++) {
+ const struct sku_group *sku = &mt7915_sku_groups[i];
+ u32 offset = sku->offset[band];
+ int j;
+
+ if (!offset) {
+ idx += sku->len;
+ continue;
+ }
+
+ rate_power[idx++] = mt7915_get_sku_delta(dev, offset);
+ if (rate_power[idx - 1] > max_delta)
+ max_delta = rate_power[idx - 1];
+
+ if (i == SKU_HT_BW20 || i == SKU_VHT_BW20)
+ offset += 1;
+
+ for (j = 1; j < sku->len; j++) {
+ u32 addr = offset + sku->delta_map[j];
+
+ rate_power[idx++] = mt7915_get_sku_delta(dev, addr);
+ if (rate_power[idx - 1] > max_delta)
+ max_delta = rate_power[idx - 1];
+ }
+ }
+
+ rate_power[idx] = max_delta;
+}
+
+void mt7915_eeprom_init_sku(struct mt7915_dev *dev)
+{
+ mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_2g.sband);
+ mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_5g.sband);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
new file mode 100644
index 000000000000..4e31d6ab4fa6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_EEPROM_H
+#define __MT7915_EEPROM_H
+
+#include "mt7915.h"
+
+struct cal_data {
+ u8 count;
+ u16 offset[60];
+};
+
+enum mt7915_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_DDIE_FT_VERSION = 0x050,
+ MT_EE_WIFI_CONF = 0x190,
+ MT_EE_TX0_POWER_2G = 0x2fc,
+ MT_EE_TX0_POWER_5G = 0x34b,
+ MT_EE_ADIE_FT_VERSION = 0x9a0,
+
+ __MT_EE_MAX = 0xe00
+};
+
+#define MT_EE_WIFI_CONF_TX_MASK GENMASK(2, 0)
+#define MT_EE_WIFI_CONF_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF_TSSI0_2G BIT(0)
+#define MT_EE_WIFI_CONF_TSSI0_5G BIT(2)
+#define MT_EE_WIFI_CONF_TSSI1_5G BIT(4)
+
+enum mt7915_eeprom_band {
+ MT_EE_DUAL_BAND,
+ MT_EE_5GHZ,
+ MT_EE_2GHZ,
+ MT_EE_DBDC,
+};
+
+#define SKU_DELTA_VAL GENMASK(5, 0)
+#define SKU_DELTA_ADD BIT(6)
+#define SKU_DELTA_EN BIT(7)
+
+enum mt7915_sku_delta_group {
+ SKU_CCK_GROUP0,
+ SKU_CCK_GROUP1,
+
+ SKU_OFDM_GROUP0 = 0,
+ SKU_OFDM_GROUP1,
+ SKU_OFDM_GROUP2,
+ SKU_OFDM_GROUP3,
+ SKU_OFDM_GROUP4,
+
+ SKU_MCS_GROUP0 = 0,
+ SKU_MCS_GROUP1,
+ SKU_MCS_GROUP2,
+ SKU_MCS_GROUP3,
+ SKU_MCS_GROUP4,
+ SKU_MCS_GROUP5,
+ SKU_MCS_GROUP6,
+ SKU_MCS_GROUP7,
+ SKU_MCS_GROUP8,
+ SKU_MCS_GROUP9,
+};
+
+enum mt7915_sku_rate_group {
+ SKU_CCK,
+ SKU_OFDM,
+ SKU_HT_BW20,
+ SKU_HT_BW40,
+ SKU_VHT_BW20,
+ SKU_VHT_BW40,
+ SKU_VHT_BW80,
+ SKU_VHT_BW160,
+ SKU_HE_RU26,
+ SKU_HE_RU52,
+ SKU_HE_RU106,
+ SKU_HE_RU242,
+ SKU_HE_RU484,
+ SKU_HE_RU996,
+ SKU_HE_RU2x996,
+ MAX_SKU_RATE_GROUP_NUM,
+};
+
+struct sku_group {
+ u8 len;
+ u16 offset[2];
+ const u8 *delta_map;
+};
+
+static inline int
+mt7915_get_channel_group(int channel)
+{
+ if (channel >= 184 && channel <= 196)
+ return 0;
+ if (channel <= 48)
+ return 1;
+ if (channel <= 64)
+ return 2;
+ if (channel <= 96)
+ return 3;
+ if (channel <= 112)
+ return 4;
+ if (channel <= 128)
+ return 5;
+ if (channel <= 144)
+ return 6;
+ return 7;
+}
+
+static inline bool
+mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
+{
+ u8 *eep = dev->mt76.eeprom.data;
+
+ /* TODO: DBDC */
+ if (band == NL80211_BAND_5GHZ)
+ return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF_TSSI0_5G;
+ else
+ return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF_TSSI0_2G;
+}
+
+extern const struct sku_group mt7915_sku_groups[];
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
new file mode 100644
index 000000000000..aadf56e80bae
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include "mt7915.h"
+#include "mac.h"
+#include "eeprom.h"
+
+static void
+mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
+{
+ u32 mask, set;
+
+ mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
+ MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ mt76_set(dev, MT_TMAC_CTCR0(band),
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+ mask = MT_MDP_RCFR0_MCU_RX_MGMT |
+ MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR |
+ MT_MDP_RCFR0_MCU_RX_CTL_BAR;
+ set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF);
+ mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set);
+
+ mask = MT_MDP_RCFR1_MCU_RX_BYPASS |
+ MT_MDP_RCFR1_RX_DROPPED_UCAST |
+ MT_MDP_RCFR1_RX_DROPPED_MCAST;
+ set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
+ mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
+}
+
+static void mt7915_mac_init(struct mt7915_dev *dev)
+{
+ int i;
+
+ mt76_rmw_field(dev, MT_DMA_DCR0, MT_DMA_DCR0_MAX_RX_LEN, 1536);
+ mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536);
+ /* enable rx rate report */
+ mt76_set(dev, MT_DMA_DCR0, MT_DMA_DCR0_RXD_G5_EN);
+ /* disable hardware de-agg */
+ mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
+
+ for (i = 0; i < MT7915_WTBL_SIZE; i++)
+ mt7915_mac_wtbl_update(dev, i,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ mt7915_mac_init_band(dev, 0);
+ mt7915_mac_init_band(dev, 1);
+ mt7915_mcu_set_rts_thresh(&dev->phy, 0x92b);
+}
+
+static int mt7915_txbf_init(struct mt7915_dev *dev)
+{
+ int ret;
+
+ /*
+ * TODO: DBDC & check whether iBF phase calibration data has
+ * been stored in eeprom offset 0x651~0x7b8, then write down
+ * 0x1111 into 0x651 and 0x651 to trigger iBF.
+ */
+
+ /* trigger sounding packets */
+ ret = mt7915_mcu_set_txbf_sounding(dev);
+ if (ret)
+ return ret;
+
+ /* enable iBF & eBF */
+ return mt7915_mcu_set_txbf_type(dev);
+}
+
+static void
+mt7915_init_txpower_band(struct mt7915_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ int i, n_chains = hweight8(dev->mphy.antenna_mask);
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *chan = &sband->channels[i];
+ u32 target_power = 0;
+ int j;
+
+ for (j = 0; j < n_chains; j++) {
+ u32 val;
+
+ val = mt7915_eeprom_get_target_power(dev, chan, j);
+ target_power = max(target_power, val);
+ }
+
+ chan->max_power = min_t(int, chan->max_reg_power,
+ target_power / 2);
+ chan->orig_mpwr = target_power / 2;
+ }
+}
+
+static void mt7915_init_txpower(struct mt7915_dev *dev)
+{
+ mt7915_init_txpower_band(dev, &dev->mphy.sband_2g.sband);
+ mt7915_init_txpower_band(dev, &dev->mphy.sband_5g.sband);
+
+ mt7915_eeprom_init_sku(dev);
+}
+
+static void mt7915_init_work(struct work_struct *work)
+{
+ struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
+ init_work);
+
+ mt7915_mcu_set_eeprom(dev);
+ mt7915_mac_init(dev);
+ mt7915_init_txpower(dev);
+ mt7915_txbf_init(dev);
+}
+
+static int mt7915_init_hardware(struct mt7915_dev *dev)
+{
+ int ret, idx;
+
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ INIT_WORK(&dev->init_work, mt7915_init_work);
+ spin_lock_init(&dev->token_lock);
+ idr_init(&dev->token);
+
+ ret = mt7915_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ ret = mt7915_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7915_eeprom_init(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+ return 0;
+}
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+static struct ieee80211_rate mt7915_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(11, 60),
+ OFDM_RATE(15, 90),
+ OFDM_RATE(10, 120),
+ OFDM_RATE(14, 180),
+ OFDM_RATE(9, 240),
+ OFDM_RATE(13, 360),
+ OFDM_RATE(8, 480),
+ OFDM_RATE(12, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = MT7915_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_STATION)
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160) |
+ BIT(NL80211_CHAN_WIDTH_80P80),
+ }
+};
+
+static void
+mt7915_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct mt7915_phy *phy = mphy->priv;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
+
+ dev->mt76.region = request->dfs_region;
+
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ mt7915_dfs_init_radar_detector(phy);
+}
+
+static void
+mt7915_init_wiphy(struct ieee80211_hw *hw)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct wiphy *wiphy = hw->wiphy;
+
+ hw->queues = 4;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+
+ phy->slottime = 9;
+
+ hw->sta_data_size = sizeof(struct mt7915_sta);
+ hw->vif_data_size = sizeof(struct mt7915_vif);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ wiphy->reg_notifier = mt7915_regd_notifier;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+
+ hw->max_tx_fragments = 4;
+}
+
+void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
+{
+ int nss = hweight8(phy->chainmask);
+ u32 *cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
+
+ *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+
+ *cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ if (nss < 2)
+ return;
+
+ *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE |
+ FIELD_PREP(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ nss - 1);
+}
+
+static void
+mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
+ int vif, int nss)
+{
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp *mcs = &he_cap->he_mcs_nss_supp;
+ u8 c;
+
+#ifdef CONFIG_MAC80211_MESH
+ if (vif == NL80211_IFTYPE_MESH_POINT)
+ return;
+#endif
+
+ elem->phy_cap_info[3] &= ~IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+ elem->phy_cap_info[4] &= ~IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
+ c = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ elem->phy_cap_info[5] &= ~c;
+
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB;
+ elem->phy_cap_info[6] &= ~c;
+
+ elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
+
+ c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ elem->phy_cap_info[2] |= c;
+
+ c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ elem->phy_cap_info[4] |= c;
+
+ /* do not support NG16 due to spec D4.0 changes subcarrier idx */
+ c = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU;
+
+ if (vif == NL80211_IFTYPE_STATION)
+ c |= IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+
+ elem->phy_cap_info[6] |= c;
+
+ if (nss < 2)
+ return;
+
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
+ /* num_snd_dim */
+ c = (nss - 1) | (max_t(int, mcs->tx_mcs_160, 1) << 3);
+ elem->phy_cap_info[5] |= c;
+
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB;
+ elem->phy_cap_info[6] |= c;
+
+ /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+ elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
+}
+
+static void
+mt7915_gen_ppe_thresh(u8 *he_ppet)
+{
+ int ru, nss, max_nss = 1, max_ru = 3;
+ u8 bit = 7, ru_bit_mask = 0x7;
+ u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
+
+ he_ppet[0] = max_nss & IEEE80211_PPE_THRES_NSS_MASK;
+ he_ppet[0] |= (ru_bit_mask <<
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
+
+ for (nss = 0; nss <= max_nss; nss++) {
+ for (ru = 0; ru < max_ru; ru++) {
+ u8 val;
+ int i;
+
+ if (!(ru_bit_mask & BIT(ru)))
+ continue;
+
+ val = (ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+ 0x3f;
+ val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+ for (i = 5; i >= 0; i--) {
+ he_ppet[bit / 8] |=
+ ((val >> i) & 0x1) << ((bit % 8));
+ bit++;
+ }
+ }
+ }
+}
+
+static int
+mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
+ struct ieee80211_sband_iftype_data *data)
+{
+ int i, idx = 0;
+ int nss = hweight8(phy->chainmask);
+ u16 mcs_map = 0;
+
+ for (i = 0; i < 8; i++) {
+ if (i < nss)
+ mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
+ else
+ mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
+ }
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp *he_mcs =
+ &he_cap->he_mcs_nss_supp;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+#endif
+ break;
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+
+ he_cap_elem->mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE;
+ he_cap_elem->mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1;
+ he_cap_elem->mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR;
+ he_cap_elem->mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED;
+ he_cap_elem->mac_cap_info[4] =
+ IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+ he_cap_elem->phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+
+ /* TODO: OFDMA */
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[4] |=
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[3] |=
+ IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] |=
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A;
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+#endif
+ }
+
+ he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map);
+
+ mt7915_set_stream_he_txbf_caps(he_cap, i, nss);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ mt7915_gen_ppe_thresh(he_cap->ppe_thres);
+ } else {
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ }
+ idx++;
+ }
+
+ return idx;
+}
+
+void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
+{
+ struct ieee80211_sband_iftype_data *data;
+ struct ieee80211_supported_band *band;
+ struct mt76_dev *mdev = &phy->dev->mt76;
+ int n;
+
+ if (mdev->cap.has_2ghz) {
+ data = phy->iftype[NL80211_BAND_2GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_2GHZ, data);
+
+ band = &phy->mt76->sband_2g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+
+ if (mdev->cap.has_5ghz) {
+ data = phy->iftype[NL80211_BAND_5GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_5GHZ, data);
+
+ band = &phy->mt76->sband_5g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+}
+
+static void
+mt7915_cap_dbdc_enable(struct mt7915_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap &=
+ ~(IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ);
+
+ if (dev->chainmask == 0xf)
+ dev->mphy.antenna_mask = dev->chainmask >> 2;
+ else
+ dev->mphy.antenna_mask = dev->chainmask >> 1;
+
+ dev->phy.chainmask = dev->mphy.antenna_mask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask;
+
+ mt76_set_stream_caps(&dev->mphy, true);
+ mt7915_set_stream_vht_txbf_caps(&dev->phy);
+ mt7915_set_stream_he_caps(&dev->phy);
+}
+
+static void
+mt7915_cap_dbdc_disable(struct mt7915_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask;
+
+ mt76_set_stream_caps(&dev->mphy, true);
+ mt7915_set_stream_vht_txbf_caps(&dev->phy);
+ mt7915_set_stream_he_caps(&dev->phy);
+}
+
+int mt7915_register_ext_phy(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy = mt7915_ext_phy(dev);
+ struct mt76_phy *mphy;
+ int ret;
+ bool bound;
+
+ /* TODO: enble DBDC */
+ bound = mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5);
+ if (!bound)
+ return -EINVAL;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return -EINVAL;
+
+ if (phy)
+ return 0;
+
+ mt7915_cap_dbdc_enable(dev);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops);
+ if (!mphy)
+ return -ENOMEM;
+
+ phy = mphy->priv;
+ phy->dev = dev;
+ phy->mt76 = mphy;
+ phy->chainmask = dev->chainmask & ~dev->phy.chainmask;
+ mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
+ mt7915_init_wiphy(mphy->hw);
+
+ INIT_DELAYED_WORK(&phy->mac_work, mt7915_mac_work);
+
+ /*
+ * Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ mphy->hw->wiphy->perm_addr[0] |= 2;
+ mphy->hw->wiphy->perm_addr[0] ^= BIT(7);
+
+ /* The second interface does not get any packets unless it has a vif */
+ ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF);
+
+ ret = mt76_register_phy(mphy);
+ if (ret)
+ ieee80211_free_hw(mphy->hw);
+
+ return ret;
+}
+
+void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy = mt7915_ext_phy(dev);
+ struct mt76_phy *mphy = dev->mt76.phy2;
+
+ if (!phy)
+ return;
+
+ mt7915_cap_dbdc_disable(dev);
+ mt76_unregister_phy(mphy);
+ ieee80211_free_hw(mphy->hw);
+}
+
+int mt7915_register_device(struct mt7915_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ int ret;
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ INIT_DELAYED_WORK(&dev->phy.mac_work, mt7915_mac_work);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+
+ init_waitqueue_head(&dev->reset_wait);
+ INIT_WORK(&dev->reset_work, mt7915_mac_reset_work);
+
+ ret = mt7915_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ mt7915_init_wiphy(hw);
+ dev->mphy.sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ dev->mphy.sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ mt7915_cap_dbdc_disable(dev);
+ dev->phy.dfs_state = -1;
+
+ ret = mt76_register_device(&dev->mt76, true, mt7915_rates,
+ ARRAY_SIZE(mt7915_rates));
+ if (ret)
+ return ret;
+
+ ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+
+ return mt7915_init_debugfs(dev);
+}
+
+void mt7915_unregister_device(struct mt7915_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ mt7915_unregister_ext_phy(dev);
+ mt76_unregister_device(&dev->mt76);
+ mt7915_mcu_exit(dev);
+ mt7915_dma_cleanup(dev);
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt7915_txp_skb_unmap(&dev->mt76, txwi);
+ if (txwi->skb)
+ dev_kfree_skb_any(txwi->skb);
+ mt76_put_txwi(&dev->mt76, txwi);
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
+
+ mt76_free_device(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
new file mode 100644
index 000000000000..a264e304a3df
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -0,0 +1,1477 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7915.h"
+#include "../dma.h"
+#include "mac.h"
+
+#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+
+#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
+#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
+ IEEE80211_RADIOTAP_HE_##f)
+
+static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
+ [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
+ [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
+ [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
+ [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
+ [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
+ [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
+ [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
+ },
+};
+
+static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
+ },
+};
+
+static const struct mt7915_dfs_radar_spec jp_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
+ [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
+ [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
+ [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
+ },
+};
+
+static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
+ u16 idx, bool unicast)
+{
+ struct mt7915_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7915_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+}
+
+bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
+ 0, 5000);
+}
+
+static u32 mt7915_mac_wtbl_lmac_read(struct mt7915_dev *dev, u16 wcid,
+ u16 addr)
+{
+ mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
+ FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
+
+ return mt76_rr(dev, MT_WTBL_LMAC_OFFS(wcid, addr));
+}
+
+/* TODO: use txfree airtime info to avoid runtime accessing in the long run */
+void mt7915_mac_sta_poll(struct mt7915_dev *dev)
+{
+ static const u8 ac_to_tid[] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ static const u8 hw_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+ struct ieee80211_sta *sta;
+ struct mt7915_sta *msta;
+ u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
+ int i;
+
+ rcu_read_lock();
+
+ while (true) {
+ bool clear = false;
+ u16 idx;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&dev->sta_poll_list)) {
+ spin_unlock_bh(&dev->sta_poll_lock);
+ break;
+ }
+ msta = list_first_entry(&dev->sta_poll_list,
+ struct mt7915_sta, poll_list);
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ for (i = 0, idx = msta->wcid.idx; i < IEEE80211_NUM_ACS; i++) {
+ u32 tx_last = msta->airtime_ac[i];
+ u32 rx_last = msta->airtime_ac[i + IEEE80211_NUM_ACS];
+
+ msta->airtime_ac[i] =
+ mt7915_mac_wtbl_lmac_read(dev, idx, 20 + i);
+ msta->airtime_ac[i + IEEE80211_NUM_ACS] =
+ mt7915_mac_wtbl_lmac_read(dev, idx, 21 + i);
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+ rx_time[i] = msta->airtime_ac[i + IEEE80211_NUM_ACS] -
+ rx_last;
+
+ if ((tx_last | rx_last) & BIT(30))
+ clear = true;
+ }
+
+ if (clear) {
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u32 tx_cur = tx_time[i];
+ u32 rx_cur = rx_time[hw_queue_map[i]];
+ u8 tid = ac_to_tid[i];
+
+ if (!tx_cur && !rx_cur)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, tx_cur,
+ rx_cur);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+static void
+mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
+ struct mt7915_rxv *rxv,
+ struct ieee80211_radiotap_he *he)
+{
+ u32 ru_h, ru_l;
+ u8 ru, offs = 0;
+
+ ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv->v[0]));
+ ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv->v[1]));
+ ru = (u8)(ru_l | ru_h << 4);
+
+ status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+
+ he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+ he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+}
+
+static void
+mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
+ struct mt76_rx_status *status,
+ struct mt7915_rxv *rxv)
+{
+ /* TODO: struct ieee80211_radiotap_he_mu */
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
+ HE_BITS(DATA1_DATA_DCM_KNOWN) |
+ HE_BITS(DATA1_STBC_KNOWN) |
+ HE_BITS(DATA1_CODING_KNOWN) |
+ HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
+ HE_BITS(DATA1_DOPPLER_KNOWN) |
+ HE_BITS(DATA1_BSS_COLOR_KNOWN),
+ .data2 = HE_BITS(DATA2_GI_KNOWN) |
+ HE_BITS(DATA2_TXBF_KNOWN) |
+ HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
+ HE_BITS(DATA2_TXOP_KNOWN),
+ };
+ struct ieee80211_radiotap_he *he = NULL;
+ __le32 v2 = rxv->v[2];
+ __le32 v11 = rxv->v[11];
+ __le32 v14 = rxv->v[14];
+ u32 ltf_size = le32_get_bits(v2, MT_CRXV_HE_LTF_SIZE) + 1;
+
+ he = skb_push(skb, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+
+ he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, v14) |
+ HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, v2);
+ he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, v2) |
+ le16_encode_bits(ltf_size,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+ he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, v14) |
+ HE_PREP(DATA6_DOPPLER, DOPPLER, v14);
+
+ switch (rxv->phy) {
+ case MT_PHY_TYPE_HE_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, v14) |
+ HE_PREP(DATA3_UL_DL, UPLINK, v2);
+ he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11);
+ break;
+ case MT_PHY_TYPE_HE_EXT_SU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
+ HE_BITS(DATA1_UL_DL_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2);
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+
+ he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2);
+ he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11);
+
+ mt7915_mac_decode_he_radiotap_ru(status, rxv, he);
+ break;
+ case MT_PHY_TYPE_HE_TB:
+ he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
+
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, v11) |
+ HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, v11) |
+ HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, v11) |
+ HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, v11);
+
+ mt7915_mac_decode_he_radiotap_ru(status, rxv, he);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_phy *phy = &dev->phy;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hdr *hdr;
+ struct mt7915_rxv rxv = {};
+ __le32 *rxd = (__le32 *)skb->data;
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ u32 rxd3 = le32_to_cpu(rxd[3]);
+ bool unicast, insert_ccmp_hdr = false;
+ u8 remove_pad;
+ int i, idx;
+
+ memset(status, 0, sizeof(*status));
+
+ if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) {
+ mphy = dev->mt76.phy2;
+ if (!mphy)
+ return -EINVAL;
+
+ phy = mphy->priv;
+ status->ext_phy = true;
+ }
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return -EINVAL;
+
+ unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
+ idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
+ status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
+
+ if (status->wcid) {
+ struct mt7915_sta *msta;
+
+ msta = container_of(status->wcid, struct mt7915_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
+ !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != rxd[14]) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = rxd[14];
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
+ remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ rxd += 6;
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ }
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ /* RXD Group 3 - P-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
+ u32 v0, v1, v2;
+
+ memcpy(rxv.v, rxd, sizeof(rxv.v));
+
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+
+ v0 = le32_to_cpu(rxv.v[0]);
+ v1 = le32_to_cpu(rxv.v[1]);
+ v2 = le32_to_cpu(rxv.v[2]);
+
+ if (v0 & MT_PRXV_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ status->chains = mphy->antenna_mask;
+ status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
+ status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
+ status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
+ status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
+ status->signal = status->chain_signal[0];
+
+ for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
+ if (!(status->chains & BIT(i)))
+ continue;
+
+ status->signal = max(status->signal,
+ status->chain_signal[i]);
+ }
+
+ /* RXD Group 5 - C-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+ u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
+ u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
+ bool cck = false;
+
+ rxd += 18;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+
+ idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ rxv.phy = FIELD_GET(MT_CRXV_TX_MODE, v2);
+
+ switch (rxv.phy) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ /* fall through */
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss =
+ FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+ status->encoding = RX_ENC_VHT;
+ if (i > 9)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ /* fall through */
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss =
+ FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+ status->encoding = RX_ENC_HE;
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ if (idx & MT_PRXV_TX_DCM)
+ status->he_dcm = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (rxv.phy & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
+ }
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (rxv.phy < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ }
+ }
+
+ skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt76_insert_ccmp_hdr(skb, key_id);
+ }
+
+ if (status->flag & RX_FLAG_RADIOTAP_HE)
+ mt7915_mac_decode_he_radiotap(skb, status, &rxv);
+
+ hdr = mt76_skb_get_hdr(skb);
+ if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control);
+ status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ return 0;
+}
+
+void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, bool beacon)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool multicast = is_multicast_ether_addr(hdr->addr1);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_phy *mphy = &dev->mphy;
+ bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
+ u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+ __le16 fc = hdr->frame_control;
+ u16 tx_count = 4, seqno = 0;
+ u32 val;
+
+ if (vif) {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ omac_idx = mvif->omac_idx;
+ wmm_idx = mvif->wmm_idx;
+ }
+
+ if (ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+ fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+ if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
+ q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
+ skb_get_queue_mapping(skb);
+ p_fmt = MT_TX_TYPE_CT;
+ } else if (beacon) {
+ q_idx = MT_LMAC_BCN0;
+ p_fmt = MT_TX_TYPE_FW;
+ } else {
+ q_idx = MT_LMAC_ALTX0;
+ p_fmt = MT_TX_TYPE_CT;
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO,
+ ieee80211_get_hdrlen_from_skb(skb) / 2) |
+ FIELD_PREP(MT_TXD1_TID,
+ skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+ FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+ if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+ val |= MT_TXD1_TGID;
+
+ txwi[1] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD2_MULTICAST, multicast);
+ if (key) {
+ if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
+ key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ val |= MT_TXD2_BIP;
+ txwi[3] = 0;
+ } else {
+ txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+ }
+ } else {
+ txwi[3] = 0;
+ }
+ txwi[2] = cpu_to_le32(val);
+
+ txwi[4] = 0;
+ txwi[5] = 0;
+ txwi[6] = 0;
+
+ if (!ieee80211_is_data(fc) || multicast) {
+ u16 rate;
+
+ /* hardware won't add HTC for mgmt/ctrl frame */
+ txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE | MT_TXD2_HTC_VLD);
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ rate = MT7915_5G_RATE_DEFAULT;
+ else
+ rate = MT7915_2G_RATE_DEFAULT;
+
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_TX_RATE, rate);
+ txwi[6] |= cpu_to_le32(val);
+ txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
+ }
+
+ if (!ieee80211_is_beacon(fc))
+ txwi[3] |= cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
+ else
+ tx_count = 0x1f;
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
+
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ txwi[7] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
+ if (ieee80211_is_data_qos(fc)) {
+ seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ val |= MT_TXD3_SN_VALID;
+ } else if (ieee80211_is_back_req(fc)) {
+ struct ieee80211_bar *bar;
+
+ bar = (struct ieee80211_bar *)skb->data;
+ seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
+ val |= MT_TXD3_SN_VALID;
+ }
+ val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
+ txwi[3] |= cpu_to_le32(val);
+}
+
+int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
+ struct mt76_txwi_cache *t;
+ struct mt7915_txp *txp;
+ int id, i, nbuf = tx_info->nbuf - 1;
+ u8 *txwi = (u8 *)txwi_ptr;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ cb->wcid = wcid->idx;
+
+ mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
+ false);
+
+ txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ }
+ txp->nbuf = nbuf;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+
+ txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+
+ if (!key)
+ txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+
+ if (vif) {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ txp->bss_idx = mvif->idx;
+ }
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ spin_lock_bh(&dev->token_lock);
+ id = idr_alloc(&dev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC);
+ spin_unlock_bh(&dev->token_lock);
+ if (id < 0)
+ return id;
+
+ txp->token = cpu_to_le16(id);
+ txp->rept_wds_wcid = 0xff;
+ tx_info->skb = DMA_DUMMY_DATA;
+
+ return 0;
+}
+
+static inline bool
+mt7915_tx_check_aggr_tid(struct mt7915_sta *msta, u8 tid)
+{
+ bool ret = false;
+
+ spin_lock_bh(&msta->ampdu_lock);
+ if (msta->ampdu_state[tid] == MT7915_AGGR_STOP)
+ ret = true;
+ spin_unlock_bh(&msta->ampdu_lock);
+
+ return ret;
+}
+
+static void
+mt7915_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct mt7915_sta *msta;
+ u16 tid;
+
+ if (!sta->ht_cap.ht_supported)
+ return;
+
+ if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ return;
+
+ if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
+ return;
+
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return;
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+ tid = ieee80211_get_tid(hdr);
+
+ if (mt7915_tx_check_aggr_tid(msta, tid)) {
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+ mt7915_set_aggr_state(msta, tid, MT7915_AGGR_PROGRESS);
+ }
+}
+
+static inline void
+mt7915_tx_status(struct ieee80211_sta *sta, struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info, struct sk_buff *skb)
+{
+ struct ieee80211_tx_status status = {
+ .sta = sta,
+ .info = info,
+ };
+
+ if (skb)
+ status.skb = skb;
+
+ if (sta) {
+ struct mt7915_sta *msta;
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+ status.rate = &msta->stats.tx_rate;
+ }
+
+ /* use status_ext to report HE rate */
+ ieee80211_tx_status_ext(hw, &status);
+}
+
+static void
+mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, u8 stat)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw;
+
+ hw = mt76_tx_status_get_hw(mdev, skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ else if (sta)
+ mt7915_tx_check_aggr(sta, skb);
+
+ if (stat)
+ ieee80211_tx_info_clear_status(info);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.tx_time = 0;
+
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ mt7915_tx_status(sta, hw, info, skb);
+ return;
+ }
+
+ if (sta || !(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ mt7915_tx_status(sta, hw, info, NULL);
+
+ dev_kfree_skb(skb);
+}
+
+void mt7915_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt7915_txp *txp;
+ int i;
+
+ txp = mt7915_txwi_to_txp(dev, t);
+ for (i = 1; i < txp->nbuf; i++)
+ dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+}
+
+void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ u8 i, count;
+
+ /*
+ * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
+ * to the time ack is received or dropped by hw (air + hw queue time).
+ * Should avoid accessing WTBL to get Tx airtime, and use it instead.
+ */
+ count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+ for (i = 0; i < count; i++) {
+ u32 msdu, info = le32_to_cpu(free->info[i]);
+ u8 stat;
+
+ /*
+ * 1'b1: new wcid pair.
+ * 1'b0: msdu_id with the same 'wcid pair' as above.
+ */
+ if (info & MT_TX_FREE_PAIR) {
+ struct mt7915_sta *msta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ count++;
+ idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ continue;
+
+ msta = container_of(wcid, struct mt7915_sta, wcid);
+ ieee80211_queue_work(mt76_hw(dev), &msta->stats_work);
+ continue;
+ }
+
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ stat = FIELD_GET(MT_TX_FREE_STATUS, info);
+
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, msdu);
+ spin_unlock_bh(&dev->token_lock);
+
+ if (!txwi)
+ continue;
+
+ mt7915_txp_skb_unmap(mdev, txwi);
+ if (txwi->skb) {
+ mt7915_tx_complete_status(mdev, txwi->skb, sta, stat);
+ txwi->skb = NULL;
+ }
+
+ mt76_put_txwi(mdev, txwi);
+ }
+ dev_kfree_skb(skb);
+}
+
+void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
+{
+ struct mt7915_dev *dev;
+
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_txwi_cache *t;
+ struct mt7915_txp *txp;
+
+ txp = mt7915_txwi_to_txp(mdev, e->txwi);
+
+ spin_lock_bh(&dev->token_lock);
+ t = idr_remove(&dev->token, le16_to_cpu(txp->token));
+ spin_unlock_bh(&dev->token_lock);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb) {
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
+ struct mt76_wcid *wcid;
+
+ wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
+
+ mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0);
+ }
+}
+
+void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
+
+ mt7915_l2_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
+ mt7915_l2_set(dev, reg, BIT(11) | BIT(9));
+}
+
+void mt7915_mac_reset_counters(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+ mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+ }
+
+ if (ext_phy) {
+ dev->mt76.phy2->survey_time = ktime_get_boottime();
+ i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
+ } else {
+ dev->mt76.phy.survey_time = ktime_get_boottime();
+ i = 0;
+ }
+ memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
+
+ /* reset airtime counters */
+ mt76_rr(dev, MT_MIB_SDR9(ext_phy));
+ mt76_rr(dev, MT_MIB_SDR36(ext_phy));
+ mt76_rr(dev, MT_MIB_SDR37(ext_phy));
+
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(ext_phy),
+ MT_WF_RMAC_MIB_RXTIME_CLR);
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
+ MT_WF_RMAC_MIB_RXTIME_CLR);
+}
+
+void mt7915_mac_set_timing(struct mt7915_phy *phy)
+{
+ s16 coverage_class = phy->coverage_class;
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 val, reg_offset;
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
+ int sifs, offset;
+ bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
+
+ if (is_5ghz)
+ sifs = 16;
+ else
+ sifs = 10;
+
+ if (ext_phy) {
+ coverage_class = max_t(s16, dev->phy.coverage_class,
+ coverage_class);
+ } else {
+ struct mt7915_phy *phy_ext = mt7915_ext_phy(dev);
+
+ if (phy_ext)
+ coverage_class = max_t(s16, phy_ext->coverage_class,
+ coverage_class);
+ }
+ mt76_set(dev, MT_ARB_SCR(ext_phy),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ udelay(1);
+
+ offset = 3 * coverage_class;
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+
+ mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
+ FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+ if (phy->slottime < 20 || is_5ghz)
+ val = MT7915_CFEND_RATE_DEFAULT;
+ else
+ val = MT7915_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_clear(dev, MT_ARB_SCR(ext_phy),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+}
+
+/*
+ * TODO: mib counters are read-clear and there're many HE functionalities need
+ * such info, hence firmware prepares a task to read the fields out to a shared
+ * structure. User should switch to use event format to avoid race condition.
+ */
+static void
+mt7915_phy_update_channel(struct mt76_phy *mphy, int idx)
+{
+ struct mt7915_dev *dev = container_of(mphy->dev, struct mt7915_dev, mt76);
+ struct mt76_channel_state *state;
+ u64 busy_time, tx_time, rx_time, obss_time;
+
+ busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
+ MT_MIB_SDR9_BUSY_MASK);
+ tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
+ MT_MIB_SDR36_TXTIME_MASK);
+ rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
+ MT_MIB_SDR37_RXTIME_MASK);
+ obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
+ MT_MIB_OBSSTIME_MASK);
+
+ /* TODO: state->noise */
+ state = mphy->chan_state;
+ state->cc_busy += busy_time;
+ state->cc_tx += tx_time;
+ state->cc_rx += rx_time + obss_time;
+ state->cc_bss_rx += rx_time;
+}
+
+void mt7915_update_channel(struct mt76_dev *mdev)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_phy_update_channel(&mdev->phy, 0);
+ if (mdev->phy2)
+ mt7915_phy_update_channel(mdev->phy2, 1);
+
+ /* reset obss airtime */
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
+ if (mdev->phy2)
+ mt76_set(dev, MT_WF_RMAC_MIB_TIME0(1),
+ MT_WF_RMAC_MIB_RXTIME_CLR);
+}
+
+static bool
+mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
+{
+ bool ret;
+
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->reset_state) & state),
+ MT7915_RESET_TIMEOUT);
+
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
+
+static void
+mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = priv;
+
+ mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
+}
+
+static void
+mt7915_update_beacons(struct mt7915_dev *dev)
+{
+ ieee80211_iterate_active_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_update_vif_beacon, dev->mt76.hw);
+
+ if (!dev->mt76.phy2)
+ return;
+
+ ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_update_vif_beacon, dev->mt76.phy2->hw);
+}
+
+static void
+mt7915_dma_reset(struct mt7915_dev *dev)
+{
+ int i;
+
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ /* re-init prefetch settings after reset */
+ mt7915_dma_prefetch(dev);
+
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+}
+
+/* system error recovery */
+void mt7915_mac_reset_work(struct work_struct *work)
+{
+ struct mt7915_phy *phy2;
+ struct mt76_phy *ext_phy;
+ struct mt7915_dev *dev;
+
+ dev = container_of(work, struct mt7915_dev, reset_work);
+ ext_phy = dev->mt76.phy2;
+ phy2 = ext_phy ? ext_phy->priv : NULL;
+
+ if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
+ return;
+
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_stop_queues(ext_phy->hw);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->phy.mac_work);
+ if (phy2)
+ cancel_delayed_work_sync(&phy2->mac_work);
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+ if (ext_phy)
+ mt76_txq_schedule_all(ext_phy);
+
+ tasklet_disable(&dev->mt76.tx_tasklet);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.napi[2]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
+
+ if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7915_dma_reset(dev);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
+ mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ tasklet_enable(&dev->mt76.tx_tasklet);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ napi_enable(&dev->mt76.napi[2]);
+ napi_schedule(&dev->mt76.napi[2]);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_wake_queues(ext_phy->hw);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt7915_update_beacons(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work,
+ MT7915_WATCHDOG_TIME);
+ if (phy2)
+ ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
+static void
+mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ bool ext_phy = phy != &dev->phy;
+ int i, aggr0, aggr1;
+
+ memset(mib, 0, sizeof(*mib));
+
+ mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+ MT_MIB_SDR3_FCS_ERR_MASK);
+
+ aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+ u32 val, val2;
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
+
+ val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+ if (val2 > mib->ack_fail_cnt)
+ mib->ack_fail_cnt = val2;
+
+ val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ if (val2 > mib->ba_miss_cnt)
+ mib->ba_miss_cnt = val2;
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+ val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+ if (val2 > mib->rts_retries_cnt) {
+ mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt = val2;
+ }
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+ val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+
+ dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr0++] += val >> 16;
+ dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
+ dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
+ }
+}
+
+void mt7915_mac_sta_stats_work(struct work_struct *work)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct mt7915_sta_stats *stats;
+ struct mt7915_sta *msta;
+ struct mt7915_dev *dev;
+
+ msta = container_of(work, struct mt7915_sta, stats_work);
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ dev = msta->vif->dev;
+ stats = &msta->stats;
+
+ /* use MT_TX_FREE_RATE to report Tx rate for further devices */
+ if (time_after(jiffies, stats->jiffies + HZ)) {
+ mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO,
+ msta->wcid.idx);
+
+ stats->jiffies = jiffies;
+ }
+
+ if (test_and_clear_bit(IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED |
+ IEEE80211_RC_BW_CHANGED, &stats->changed))
+ mt7915_mcu_add_rate_ctrl(dev, vif, sta);
+
+ if (test_and_clear_bit(IEEE80211_RC_SMPS_CHANGED, &stats->changed))
+ mt7915_mcu_add_smps(dev, vif, sta);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+void mt7915_mac_work(struct work_struct *work)
+{
+ struct mt7915_phy *phy;
+ struct mt76_dev *mdev;
+
+ phy = (struct mt7915_phy *)container_of(work, struct mt7915_phy,
+ mac_work.work);
+ mdev = &phy->dev->mt76;
+
+ mutex_lock(&mdev->mutex);
+
+ mt76_update_survey(mdev);
+ if (++phy->mac_work_count == 5) {
+ phy->mac_work_count = 0;
+
+ mt7915_mac_update_mib_stats(phy);
+ }
+
+ mutex_unlock(&mdev->mutex);
+
+ ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
+static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+
+ if (phy->rdd_state & BIT(0))
+ mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ if (phy->rdd_state & BIT(1))
+ mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+}
+
+static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
+{
+ int err;
+
+ err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
+}
+
+static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int err;
+
+ /* start CAC */
+ err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ err = mt7915_dfs_start_rdd(dev, ext_phy);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(ext_phy);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_160 ||
+ chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ err = mt7915_dfs_start_rdd(dev, 1);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(1);
+ }
+
+ return 0;
+}
+
+static int
+mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
+{
+ const struct mt7915_dfs_radar_spec *radar_specs;
+ struct mt7915_dev *dev = phy->dev;
+ int err, i;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs;
+ err = mt7915_mcu_set_fcc5_lpn(dev, 8);
+ if (err < 0)
+ return err;
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs;
+ break;
+ case NL80211_DFS_JP:
+ radar_specs = &jp_radar_specs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
+ err = mt7915_mcu_set_radar_th(dev, i,
+ &radar_specs->radar_pattern[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
+}
+
+int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int err;
+
+ if (dev->mt76.region == NL80211_DFS_UNSET) {
+ phy->dfs_state = -1;
+ if (phy->rdd_state)
+ goto stop;
+
+ return 0;
+ }
+
+ if (test_bit(MT76_SCANNING, &phy->mt76->state))
+ return 0;
+
+ if (phy->dfs_state == chandef->chan->dfs_state)
+ return 0;
+
+ err = mt7915_dfs_init_radar_specs(phy);
+ if (err < 0) {
+ phy->dfs_state = -1;
+ goto stop;
+ }
+
+ phy->dfs_state = chandef->chan->dfs_state;
+
+ if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
+ if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ return mt7915_dfs_start_radar_detector(phy);
+
+ return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
+ MT_RX_SEL0, 0);
+ }
+
+stop:
+ err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
+ MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ mt7915_dfs_stop_radar_detector(phy);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
new file mode 100644
index 000000000000..b9bc8b25b031
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_MAC_H
+#define __MT7915_MAC_H
+
+#define MT_CT_PARSE_LEN 72
+#define MT_CT_DMA_BUF_NUM 2
+
+#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_TYPE GENMASK(31, 27)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
+
+enum rx_pkt_type {
+ PKT_TYPE_TXS,
+ PKT_TYPE_TXRXV,
+ PKT_TYPE_NORMAL,
+ PKT_TYPE_RX_DUP_RFB,
+ PKT_TYPE_RX_TMR,
+ PKT_TYPE_RETRIEVE,
+ PKT_TYPE_TXRX_NOTIFY,
+ PKT_TYPE_RX_EVENT,
+};
+
+/* RXD DW1 */
+#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
+#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
+#define MT_RXD1_NORMAL_GROUP_2 BIT(12)
+#define MT_RXD1_NORMAL_GROUP_3 BIT(13)
+#define MT_RXD1_NORMAL_GROUP_4 BIT(14)
+#define MT_RXD1_NORMAL_GROUP_5 BIT(15)
+#define MT_RXD1_NORMAL_SEC_MODE GENMASK(20, 16)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21)
+#define MT_RXD1_NORMAL_CM BIT(23)
+#define MT_RXD1_NORMAL_CLM BIT(24)
+#define MT_RXD1_NORMAL_ICV_ERR BIT(25)
+#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26)
+#define MT_RXD1_NORMAL_FCS_ERR BIT(27)
+#define MT_RXD1_NORMAL_BAND_IDX BIT(28)
+#define MT_RXD1_NORMAL_SPP_EN BIT(29)
+#define MT_RXD1_NORMAL_ADD_OM BIT(30)
+#define MT_RXD1_NORMAL_SEC_DONE BIT(31)
+
+/* RXD DW2 */
+#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0)
+#define MT_RXD2_NORMAL_CO_ANT BIT(6)
+#define MT_RXD2_NORMAL_BF_CQI BIT(7)
+#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8)
+#define MT_RXD2_NORMAL_HDR_TRANS BIT(13)
+#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 14)
+#define MT_RXD2_NORMAL_TID GENMASK(19, 16)
+#define MT_RXD2_NORMAL_MU_BAR BIT(21)
+#define MT_RXD2_NORMAL_SW_BIT BIT(22)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
+#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(30)
+#define MT_RXD2_NORMAL_BF_REPORT BIT(31)
+
+/* RXD DW3 */
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16)
+#define MT_RXD3_NORMAL_U2M BIT(0)
+#define MT_RXD3_NORMAL_HTC_VLD BIT(0)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(19)
+#define MT_RXD3_NORMAL_BEACON_MC BIT(20)
+#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
+#define MT_RXD3_NORMAL_AMSDU BIT(22)
+#define MT_RXD3_NORMAL_MESH BIT(23)
+#define MT_RXD3_NORMAL_MHCP BIT(24)
+#define MT_RXD3_NORMAL_NO_INFO_WB BIT(25)
+#define MT_RXD3_NORMAL_DISABLE_RX_HDR_TRANS BIT(26)
+#define MT_RXD3_NORMAL_POWER_SAVE_STAT BIT(27)
+#define MT_RXD3_NORMAL_MORE BIT(28)
+#define MT_RXD3_NORMAL_UNWANT BIT(29)
+#define MT_RXD3_NORMAL_RX_DROP BIT(30)
+#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
+
+/* RXD DW4 */
+#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0)
+#define MT_RXD4_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD4_NORMAL_CLS BIT(10)
+#define MT_RXD4_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD4_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD4_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD4_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+
+/* P-RXV */
+#define MT_PRXV_TX_RATE GENMASK(6, 0)
+#define MT_PRXV_TX_DCM BIT(4)
+#define MT_PRXV_TX_ER_SU_106T BIT(5)
+#define MT_PRXV_NSTS GENMASK(9, 7)
+#define MT_PRXV_HT_AD_CODE BIT(11)
+#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
+#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
+#define MT_PRXV_RCPI3 GENMASK(31, 24)
+#define MT_PRXV_RCPI2 GENMASK(23, 16)
+#define MT_PRXV_RCPI1 GENMASK(15, 8)
+#define MT_PRXV_RCPI0 GENMASK(7, 0)
+
+/* C-RXV */
+#define MT_CRXV_HT_STBC GENMASK(1, 0)
+#define MT_CRXV_TX_MODE GENMASK(7, 4)
+#define MT_CRXV_FRAME_MODE GENMASK(10, 8)
+#define MT_CRXV_HT_SHORT_GI GENMASK(14, 13)
+#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
+#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
+#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
+#define MT_CRXV_HE_UPLINK BIT(31)
+
+#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
+#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
+#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
+#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21)
+
+#define MT_CRXV_HE_BSS_COLOR GENMASK(5, 0)
+#define MT_CRXV_HE_TXOP_DUR GENMASK(12, 6)
+#define MT_CRXV_HE_BEAM_CHNG BIT(13)
+#define MT_CRXV_HE_DOPPLER BIT(16)
+
+struct mt7915_rxv {
+ u32 phy;
+
+ /* P-RXV: bit 0~1, C-RXV: bit 2~19 */
+ __le32 v[20];
+};
+
+enum tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+ MT_TX_TYPE_CT,
+ MT_TX_TYPE_SF,
+ MT_TX_TYPE_CMD,
+ MT_TX_TYPE_FW,
+};
+
+enum tx_pkt_queue_idx {
+ MT_LMAC_AC00,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0 = 0x10,
+ MT_LMAC_BCN0 = 0x12,
+};
+
+enum tx_port_idx {
+ MT_TX_PORT_IDX_LMAC,
+ MT_TX_PORT_IDX_MCU
+};
+
+enum tx_mcu_port_q_idx {
+ MT_TX_MCU_PORT_RX_Q0 = 0x20,
+ MT_TX_MCU_PORT_RX_Q1,
+ MT_TX_MCU_PORT_RX_Q2,
+ MT_TX_MCU_PORT_RX_Q3,
+ MT_TX_MCU_PORT_RX_FWDL = 0x3e
+};
+
+#define MT_CT_INFO_APPLY_TXD BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
+#define MT_CT_INFO_MGMT_FRAME BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
+#define MT_CT_INFO_HSR2_TX BIT(4)
+
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_TXD0_Q_IDX GENMASK(31, 25)
+#define MT_TXD0_PKT_FMT GENMASK(24, 23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_LONG_FORMAT BIT(31)
+#define MT_TXD1_TGID BIT(30)
+#define MT_TXD1_OWN_MAC GENMASK(29, 24)
+#define MT_TXD1_AMSDU BIT(23)
+#define MT_TXD1_TID GENMASK(22, 20)
+#define MT_TXD1_HDR_PAD GENMASK(19, 18)
+#define MT_TXD1_HDR_FORMAT GENMASK(17, 16)
+#define MT_TXD1_HDR_INFO GENMASK(15, 11)
+#define MT_TXD1_VTA BIT(10)
+#define MT_TXD1_WLAN_IDX GENMASK(9, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_FIXED_RATE BIT(30)
+#define MT_TXD2_POWER_OFFSET GENMASK(29, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SW_POWER_MGMT BIT(29)
+#define MT_TXD3_BA_DISABLE BIT(28)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+#define MT_TXD3_TIMING_MEASURE BIT(5)
+#define MT_TXD3_DAS BIT(4)
+#define MT_TXD3_EEOSP BIT(3)
+#define MT_TXD3_EMRD BIT(2)
+#define MT_TXD3_PROTECT_FRAME BIT(1)
+#define MT_TXD3_NO_ACK BIT(0)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_MD BIT(15)
+#define MT_TXD5_ADD_BA BIT(14)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_TX_IBF BIT(31)
+#define MT_TXD6_TX_EBF BIT(30)
+#define MT_TXD6_TX_RATE GENMASK(29, 16)
+#define MT_TXD6_SGI GENMASK(15, 14)
+#define MT_TXD6_HELTF GENMASK(13, 12)
+#define MT_TXD6_LDPC BIT(11)
+#define MT_TXD6_SPE_ID_IDX BIT(10)
+#define MT_TXD6_ANT_ID GENMASK(7, 4)
+#define MT_TXD6_DYN_BW BIT(3)
+#define MT_TXD6_FIXED_BW BIT(2)
+#define MT_TXD6_BW GENMASK(2, 0)
+
+#define MT_TXD7_TXD_LEN GENMASK(31, 30)
+#define MT_TXD7_UDP_TCP_SUM BIT(29)
+#define MT_TXD7_IP_SUM BIT(28)
+
+#define MT_TXD7_TYPE GENMASK(21, 20)
+#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+
+#define MT_TXD7_PSE_FID GENMASK(27, 16)
+#define MT_TXD7_SPE_IDX GENMASK(15, 11)
+#define MT_TXD7_HW_AMSDU BIT(10)
+#define MT_TXD7_TX_TIME GENMASK(9, 0)
+
+#define MT_TX_RATE_STBC BIT(13)
+#define MT_TX_RATE_NSS GENMASK(12, 10)
+#define MT_TX_RATE_MODE GENMASK(9, 6)
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+#define MT_TXP_MAX_BUF_NUM 6
+
+struct mt7915_txp {
+ __le16 flags;
+ __le16 token;
+ u8 bss_idx;
+ u8 rept_wds_wcid;
+ u8 rsv;
+ u8 nbuf;
+ __le32 buf[MT_TXP_MAX_BUF_NUM];
+ __le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed __aligned(4);
+
+struct mt7915_tx_free {
+ __le16 rx_byte_cnt;
+ __le16 ctrl;
+ u8 txd_cnt;
+ u8 rsv[3];
+ __le32 info[];
+} __packed __aligned(4);
+
+#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
+#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
+#define MT_TX_FREE_LATENCY GENMASK(12, 0)
+/* 0: success, others: dropped */
+#define MT_TX_FREE_STATUS GENMASK(14, 13)
+#define MT_TX_FREE_MSDU_ID GENMASK(30, 16)
+#define MT_TX_FREE_PAIR BIT(31)
+/* will support this field in further revision */
+#define MT_TX_FREE_RATE GENMASK(13, 0)
+
+struct mt7915_dfs_pulse {
+ u32 max_width; /* us */
+ int max_pwr; /* dbm */
+ int min_pwr; /* dbm */
+ u32 min_stgr_pri; /* us */
+ u32 max_stgr_pri; /* us */
+ u32 min_cr_pri; /* us */
+ u32 max_cr_pri; /* us */
+};
+
+struct mt7915_dfs_pattern {
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u32 min_pri;
+ u32 max_pri;
+ u8 max_pw;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+ u8 rsv[2];
+ u32 min_stgpr_diff;
+} __packed;
+
+struct mt7915_dfs_radar_spec {
+ struct mt7915_dfs_pulse pulse_th;
+ struct mt7915_dfs_pattern radar_pattern[16];
+};
+
+static inline struct mt7915_txp *
+mt7915_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ u8 *txwi;
+
+ if (!t)
+ return NULL;
+
+ txwi = mt76_get_txwi_ptr(dev, t);
+
+ return (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
new file mode 100644
index 000000000000..0575c259f245
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7915.h"
+#include "mcu.h"
+
+static bool mt7915_dev_running(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return true;
+
+ phy = mt7915_ext_phy(dev);
+
+ return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+}
+
+static int mt7915_start(struct ieee80211_hw *hw)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool running;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ running = mt7915_dev_running(dev);
+
+ if (!running) {
+ mt7915_mcu_set_pm(dev, 0, 0);
+ mt7915_mcu_set_mac(dev, 0, true, false);
+ mt7915_mcu_set_scs(dev, 0, true);
+ }
+
+ if (phy != &dev->phy) {
+ mt7915_mcu_set_pm(dev, 1, 0);
+ mt7915_mcu_set_mac(dev, 1, true, false);
+ mt7915_mcu_set_scs(dev, 1, true);
+ }
+
+ mt7915_mcu_set_sku_en(phy, true);
+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ ieee80211_queue_delayed_work(hw, &phy->mac_work,
+ MT7915_WATCHDOG_TIME);
+
+ if (!running)
+ mt7915_mac_reset_counters(phy);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt7915_stop(struct ieee80211_hw *hw)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (phy != &dev->phy) {
+ mt7915_mcu_set_pm(dev, 1, 1);
+ mt7915_mcu_set_mac(dev, 1, false, false);
+ }
+
+ if (!mt7915_dev_running(dev)) {
+ mt7915_mcu_set_pm(dev, 0, 1);
+ mt7915_mcu_set_mac(dev, 0, false, false);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static int get_omac_idx(enum nl80211_iftype type, u32 mask)
+{
+ int i;
+
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP:
+ /* ap uses hw bssid 0 and ext bssid */
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++)
+ if (~mask & BIT(i))
+ return i;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ /* station uses hw bssid other than 0 */
+ for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++)
+ if (~mask & BIT(i))
+ return i;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return -1;
+}
+
+static int mt7915_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt76_txq *mtxq;
+ bool ext_phy = phy != &dev->phy;
+ int idx, ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mvif->idx = ffs(~phy->vif_mask) - 1;
+ if (mvif->idx >= MT7915_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ idx = get_omac_idx(vif->type, phy->omac_mask);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ mvif->omac_idx = idx;
+ mvif->dev = dev;
+ mvif->band_idx = ext_phy;
+
+ if (ext_phy)
+ mvif->wmm_idx = ext_phy * (MT7915_MAX_WMM_SETS / 2) +
+ mvif->idx % (MT7915_MAX_WMM_SETS / 2);
+ else
+ mvif->wmm_idx = mvif->idx % MT7915_MAX_WMM_SETS;
+
+ ret = mt7915_mcu_add_dev_info(dev, vif, true);
+ if (ret)
+ goto out;
+
+ phy->vif_mask |= BIT(mvif->idx);
+ phy->omac_mask |= BIT(mvif->omac_idx);
+
+ idx = MT7915_WTBL_RESERVED - mvif->idx;
+
+ INIT_LIST_HEAD(&mvif->sta.poll_list);
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.ext_phy = mvif->band_idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ if (vif->txq) {
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ mt76_txq_init(&dev->mt76, vif->txq);
+ }
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void mt7915_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = &mvif->sta;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ int idx = msta->wcid.idx;
+
+ /* TODO: disable beacon for the bss */
+
+ mt7915_mcu_add_dev_info(dev, vif, false);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ if (vif->txq)
+ mt76_txq_remove(&dev->mt76, vif->txq);
+
+ mutex_lock(&dev->mt76.mutex);
+ phy->vif_mask &= ~BIT(mvif->idx);
+ phy->omac_mask &= ~BIT(mvif->omac_idx);
+ mutex_unlock(&dev->mt76.mutex);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+static void mt7915_init_dfs_state(struct mt7915_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+
+ if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
+ mphy->chandef.width == chandef->width)
+ return;
+
+ phy->dfs_state = -1;
+}
+
+static int mt7915_set_channel(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int ret;
+
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+ set_bit(MT76_RESET, &phy->mt76->state);
+
+ mt7915_init_dfs_state(phy);
+ mt76_set_channel(phy->mt76);
+
+ ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
+ if (ret)
+ goto out;
+
+ mt7915_mac_set_timing(phy);
+ ret = mt7915_dfs_init_radar_detector(phy);
+ mt7915_mac_cca_stats_reset(phy);
+
+ mt7915_mac_reset_counters(phy);
+ phy->noise = 0;
+
+out:
+ clear_bit(MT76_RESET, &phy->mt76->state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76_txq_schedule_all(phy->mt76);
+ ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+ MT7915_WATCHDOG_TIME);
+
+ return ret;
+}
+
+static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ int idx = key->keyidx;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_SMS4:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid,
+ cmd == SET_KEY ? key : NULL);
+
+ return mt7915_mcu_add_key(dev, vif, msta, key, cmd);
+}
+
+static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ int ret;
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt7915_set_channel(phy);
+ if (ret)
+ return ret;
+ ieee80211_wake_queues(hw);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ ret = mt7915_mcu_set_sku(phy);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ /* no need to update right away, we'll get BSS_CHANGED_QOS */
+ mvif->wmm[queue].cw_min = params->cw_min;
+ mvif->wmm[queue].cw_max = params->cw_max;
+ mvif->wmm[queue].aifs = params->aifs;
+ mvif->wmm[queue].txop = params->txop;
+
+ return 0;
+}
+
+static void mt7915_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy != &dev->phy;
+
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ phy->rxfilter &= ~(_hw); \
+ phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+
+ if (*total_flags & FIF_CONTROL)
+ mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
+ else
+ mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
+}
+
+static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ /*
+ * station mode uses BSSID to map the wlan entry to a peer,
+ * and then peer references bss_info_rfch to set bandwidth cap.
+ */
+ if (changed & BSS_CHANGED_BSSID &&
+ vif->type == NL80211_IFTYPE_STATION) {
+ bool join = !is_zero_ether_addr(info->bssid);
+
+ mt7915_mcu_add_bss_info(phy, vif, join);
+ mt7915_mcu_add_sta(dev, vif, NULL, join);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ mt7915_mcu_add_bss_info(phy, vif, info->assoc);
+ mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7915_mac_set_timing(phy);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ mt7915_mcu_add_bss_info(phy, vif, info->enable_beacon);
+ mt7915_mcu_add_sta(dev, vif, NULL, info->enable_beacon);
+ }
+
+ /* ensure that enable txcmd_mode after bss_info */
+ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ mt7915_mcu_set_tx(dev, vif);
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
+
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7915_mcu_add_beacon(hw, vif, info->enable_beacon);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7915_mcu_add_beacon(hw, vif, true);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ int ret, idx;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
+
+ INIT_LIST_HEAD(&msta->poll_list);
+ INIT_WORK(&msta->stats_work, mt7915_mac_sta_stats_work);
+ spin_lock_init(&msta->ampdu_lock);
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.ext_phy = mvif->band_idx;
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->stats.jiffies = jiffies;
+
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ ret = mt7915_mcu_add_sta(dev, vif, sta, true);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_add_sta_adv(dev, vif, sta, true);
+}
+
+void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ mt7915_mcu_add_sta_adv(dev, vif, sta, false);
+ mt7915_mcu_add_sta(dev, vif, sta, false);
+
+ mt7915_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (!list_empty(&msta->poll_list))
+ list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+static void mt7915_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+ if (control->sta) {
+ struct mt7915_sta *sta;
+
+ sta = (struct mt7915_sta *)control->sta->drv_priv;
+ wcid = &sta->wcid;
+ }
+
+ if (vif && !control->sta) {
+ struct mt7915_vif *mvif;
+
+ mvif = (struct mt7915_vif *)vif->drv_priv;
+ wcid = &mvif->sta.wcid;
+ }
+
+ mt76_tx(mphy, control->sta, wcid, skb);
+}
+
+static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7915_mcu_set_rts_thresh(phy, val);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ struct mt76_txq *mtxq;
+ int ret = 0;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ params->buf_size);
+ mt7915_mcu_add_rx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt7915_mcu_add_rx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ mt7915_set_aggr_state(msta, tid, MT7915_AGGR_OPERATIONAL);
+ mt7915_mcu_add_tx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP);
+ mt7915_mcu_add_tx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
+ mt7915_set_aggr_state(msta, tid, MT7915_AGGR_START);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP);
+ mt7915_mcu_add_tx_ba(dev, params, false);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7915_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE);
+}
+
+static int
+mt7915_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+}
+
+static int
+mt7915_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mib_stats *mib = &phy->mib;
+
+ stats->dot11RTSSuccessCount = mib->rts_cnt;
+ stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+
+ return 0;
+}
+
+static u64
+mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf;
+ u16 n;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+ /* TSF software read */
+ mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE);
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return tsf.t64;
+}
+
+static void
+mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 timestamp)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy != &dev->phy;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+ u16 n;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+ mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
+ /* TSF software overwrite */
+ mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7915_mac_set_timing(phy);
+}
+
+static int
+mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ int max_nss = hweight8(hw->wiphy->available_antennas_tx);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ return -EINVAL;
+
+ if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+ tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ phy->mt76->antenna_mask = tx_ant;
+
+ if (ext_phy) {
+ if (dev->chainmask == 0xf)
+ tx_ant <<= 2;
+ else
+ tx_ant <<= 1;
+ }
+ phy->chainmask = tx_ant;
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7915_set_stream_vht_txbf_caps(phy);
+ mt7915_set_stream_he_caps(phy);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt7915_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_sta_stats *stats = &msta->stats;
+
+ if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
+ return;
+
+ if (stats->tx_rate.legacy) {
+ sinfo->txrate.legacy = stats->tx_rate.legacy;
+ } else {
+ sinfo->txrate.mcs = stats->tx_rate.mcs;
+ sinfo->txrate.nss = stats->tx_rate.nss;
+ sinfo->txrate.bw = stats->tx_rate.bw;
+ sinfo->txrate.he_gi = stats->tx_rate.he_gi;
+ sinfo->txrate.he_dcm = stats->tx_rate.he_dcm;
+ sinfo->txrate.he_ru_alloc = stats->tx_rate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = stats->tx_rate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
+static void
+mt7915_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, sta->addr);
+ if (!sta) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ set_bit(changed, &msta->stats.changed);
+ ieee80211_queue_work(hw, &msta->stats_work);
+}
+
+const struct ieee80211_ops mt7915_ops = {
+ .tx = mt7915_tx,
+ .start = mt7915_start,
+ .stop = mt7915_stop,
+ .add_interface = mt7915_add_interface,
+ .remove_interface = mt7915_remove_interface,
+ .config = mt7915_config,
+ .conf_tx = mt7915_conf_tx,
+ .configure_filter = mt7915_configure_filter,
+ .bss_info_changed = mt7915_bss_info_changed,
+ .sta_add = mt7915_sta_add,
+ .sta_remove = mt7915_sta_remove,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .sta_rc_update = mt7915_sta_rc_update,
+ .set_key = mt7915_set_key,
+ .ampdu_action = mt7915_ampdu_action,
+ .set_rts_threshold = mt7915_set_rts_threshold,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76_sw_scan_complete,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .get_txpower = mt76_get_txpower,
+ .channel_switch_beacon = mt7915_channel_switch_beacon,
+ .get_stats = mt7915_get_stats,
+ .get_tsf = mt7915_get_tsf,
+ .set_tsf = mt7915_set_tsf,
+ .get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
+ .set_antenna = mt7915_set_antenna,
+ .set_coverage_class = mt7915_set_coverage_class,
+ .sta_statistics = mt7915_sta_statistics,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .sta_add_debugfs = mt7915_sta_add_debugfs,
+#endif
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
new file mode 100644
index 000000000000..c8c12c740c1a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -0,0 +1,3182 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include "mt7915.h"
+#include "mcu.h"
+#include "mac.h"
+#include "eeprom.h"
+
+struct mt7915_patch_hdr {
+ char build_date[16];
+ char platform[4];
+ __be32 hw_sw_ver;
+ __be32 patch_ver;
+ __be16 checksum;
+ u16 reserved;
+ struct {
+ __be32 patch_ver;
+ __be32 subsys;
+ __be32 feature;
+ __be32 n_region;
+ __be32 crc;
+ u32 reserved[11];
+ } desc;
+} __packed;
+
+struct mt7915_patch_sec {
+ __be32 type;
+ __be32 offs;
+ __be32 size;
+ union {
+ __be32 spec[13];
+ struct {
+ __be32 addr;
+ __be32 len;
+ __be32 sec_key_idx;
+ __be32 align_len;
+ u32 reserved[9];
+ } info;
+ };
+} __packed;
+
+struct mt7915_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 n_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 reserved[2];
+ char fw_ver[10];
+ char build_date[15];
+ u32 crc;
+} __packed;
+
+struct mt7915_fw_region {
+ __le32 decomp_crc;
+ __le32 decomp_len;
+ __le32 decomp_blk_sz;
+ u8 reserved[4];
+ __le32 addr;
+ __le32 len;
+ u8 feature_set;
+ u8 reserved1[15];
+} __packed;
+
+#define MCU_PATCH_ADDRESS 0x200000
+
+#define MT_STA_BFER BIT(0)
+#define MT_STA_BFEE BIT(1)
+
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
+#define PATCH_SEC_TYPE_INFO 0x2
+
+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+
+#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
+#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
+
+static enum mt7915_cipher_type
+mt7915_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MT_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MT_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MT_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MT_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MT_CIPHER_WAPI;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+static u8 mt7915_mcu_chan_bw(struct cfg80211_chan_def *chandef)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ };
+
+ if (chandef->width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[chandef->width];
+}
+
+static const struct ieee80211_sta_he_cap *
+mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband;
+ enum nl80211_band band;
+
+ band = phy->mt76->chandef.chan->band;
+ sband = phy->mt76->hw->wiphy->bands[band];
+
+ return ieee80211_get_he_iftype_cap(sband, vif->type);
+}
+
+static u8
+mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ u8 mode = 0;
+
+ if (sta) {
+ ht_cap = &sta->ht_cap;
+ vht_cap = &sta->vht_cap;
+ he_cap = &sta->he_cap;
+ } else {
+ struct ieee80211_supported_band *sband;
+ struct mt7915_phy *phy;
+ struct mt7915_vif *mvif;
+
+ mvif = (struct mt7915_vif *)vif->drv_priv;
+ phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ sband = phy->mt76->hw->wiphy->bands[band];
+
+ ht_cap = &sband->ht_cap;
+ vht_cap = &sband->vht_cap;
+ he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
+ }
+
+ if (band == NL80211_BAND_2GHZ) {
+ mode |= PHY_MODE_B | PHY_MODE_G;
+
+ if (ht_cap->ht_supported)
+ mode |= PHY_MODE_GN;
+
+ if (he_cap->has_he)
+ mode |= PHY_MODE_AX_24G;
+ } else if (band == NL80211_BAND_5GHZ) {
+ mode |= PHY_MODE_A;
+
+ if (ht_cap->ht_supported)
+ mode |= PHY_MODE_AN;
+
+ if (vht_cap->vht_supported)
+ mode |= PHY_MODE_AC;
+
+ if (he_cap->has_he)
+ mode |= PHY_MODE_AX_5G;
+ }
+
+ return mode;
+}
+
+static u8
+mt7915_mcu_get_sta_nss(u16 mcs_map)
+{
+ u8 nss;
+
+ for (nss = 8; nss > 0; nss--) {
+ u8 nss_mcs = (mcs_map >> (2 * (nss - 1))) & 3;
+
+ if (nss_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ break;
+ }
+
+ return nss - 1;
+}
+
+static int __mt7915_mcu_msg_send(struct mt7915_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ struct mt7915_mcu_txd *mcu_txd;
+ u8 seq, pkt_fmt, qidx;
+ enum mt76_txq_id txq;
+ __le32 *txd;
+ u32 val;
+
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+
+ if (cmd == -MCU_CMD_FW_SCATTER) {
+ txq = MT_TXQ_FWDL;
+ goto exit;
+ }
+
+ mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
+
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
+ txq = MT_TXQ_MCU_WA;
+ qidx = MT_TX_MCU_PORT_RX_Q0;
+ pkt_fmt = MT_TX_TYPE_CMD;
+ } else {
+ txq = MT_TXQ_MCU;
+ qidx = MT_TX_MCU_PORT_RX_Q0;
+ pkt_fmt = MT_TX_TYPE_CMD;
+ }
+
+ txd = mcu_txd->txd;
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, pkt_fmt) |
+ FIELD_PREP(MT_TXD0_Q_IDX, qidx);
+ txd[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
+ txd[1] = cpu_to_le32(val);
+
+ mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, qidx));
+ mcu_txd->pkt_type = MCU_PKT_ID;
+ mcu_txd->seq = seq;
+
+ if (cmd < 0) {
+ mcu_txd->set_query = MCU_Q_NA;
+ mcu_txd->cid = -cmd;
+ } else {
+ mcu_txd->cid = MCU_CMD_EXT_CID;
+ mcu_txd->ext_cid = cmd;
+ mcu_txd->ext_cid_ack = 1;
+
+ /* do not use Q_SET for efuse */
+ if (cmd == MCU_EXT_CMD_EFUSE_ACCESS)
+ mcu_txd->set_query = MCU_Q_QUERY;
+ else
+ mcu_txd->set_query = MCU_Q_SET;
+ }
+
+ mcu_txd->s2d_index = MCU_S2D_H2N;
+ WARN_ON(cmd == MCU_EXT_CMD_EFUSE_ACCESS &&
+ mcu_txd->set_query != MCU_Q_QUERY);
+
+exit:
+ if (wait_seq)
+ *wait_seq = seq;
+
+ return mt76_tx_queue_skb_raw(dev, txq, skb, 0);
+}
+
+static int
+mt7915_mcu_parse_eeprom(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_eeprom_info *res;
+ u8 *buf;
+
+ if (!skb)
+ return -EINVAL;
+
+ skb_pull(skb, sizeof(struct mt7915_mcu_rxd));
+
+ res = (struct mt7915_mcu_eeprom_info *)skb->data;
+ buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
+ memcpy(buf, res->data, 16);
+
+ return 0;
+}
+
+static int
+mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd,
+ struct sk_buff *skb, int seq)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ int ret = 0;
+
+ if (seq != rxd->seq)
+ return -EAGAIN;
+
+ switch (cmd) {
+ case -MCU_CMD_PATCH_SEM_CONTROL:
+ skb_pull(skb, sizeof(*rxd) - 4);
+ ret = *skb->data;
+ break;
+ case MCU_EXT_CMD_THERMAL_CTRL:
+ skb_pull(skb, sizeof(*rxd) + 4);
+ ret = le32_to_cpu(*(__le32 *)skb->data);
+ break;
+ case MCU_EXT_CMD_EFUSE_ACCESS:
+ ret = mt7915_mcu_parse_eeprom(dev, skb);
+ break;
+ default:
+ break;
+ }
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int
+mt7915_mcu_wait_response(struct mt7915_dev *dev, int cmd, int seq)
+{
+ unsigned long expires = jiffies + 20 * HZ;
+ struct sk_buff *skb;
+ int ret = 0;
+
+ while (true) {
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
+ if (!skb) {
+ dev_err(dev->mt76.dev, "Message %d (seq %d) timeout\n",
+ cmd, seq);
+ return -ETIMEDOUT;
+ }
+
+ ret = mt7915_mcu_parse_response(dev, cmd, skb, seq);
+ if (ret != -EAGAIN)
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ int ret, seq;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ ret = __mt7915_mcu_msg_send(dev, skb, cmd, &seq);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt7915_mcu_wait_response(dev, cmd, seq);
+
+out:
+ mutex_unlock(&mdev->mcu.mutex);
+
+ return ret;
+}
+
+static int
+mt7915_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
+{
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(mdev, data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp);
+}
+
+static void
+mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (vif->csa_active)
+ ieee80211_csa_finish(vif);
+}
+
+static void
+mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_rdd_report *r;
+
+ r = (struct mt7915_mcu_rdd_report *)skb->data;
+
+ if (r->idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ieee80211_radar_detected(mphy->hw);
+ dev->hw_pattern++;
+}
+
+static void
+mt7915_mcu_tx_rate_cal(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
+ struct rate_info *rate, u16 r)
+{
+ struct ieee80211_supported_band *sband;
+ u16 ru_idx = le16_to_cpu(ra->ru_idx);
+ u16 flags = 0;
+
+ rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
+ rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
+
+ switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
+ case MT_PHY_TYPE_CCK:
+ case MT_PHY_TYPE_OFDM:
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate->legacy = sband->bitrates[rate->mcs].bitrate;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ rate->mcs += (rate->nss - 1) * 8;
+ flags |= RATE_INFO_FLAGS_MCS;
+
+ if (ra->gi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_VHT:
+ flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+ if (ra->gi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ rate->he_gi = ra->gi;
+ rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
+
+ flags |= RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ break;
+ }
+ rate->flags = flags;
+
+ if (ru_idx) {
+ switch (ru_idx) {
+ case 1 ... 2:
+ rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 3 ... 6:
+ rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case 7 ... 14:
+ rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ default:
+ rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ }
+ rate->bw = RATE_INFO_BW_HE_RU;
+ } else {
+ u8 bw = mt7915_mcu_chan_bw(&mphy->chandef) -
+ FIELD_GET(MT_RA_RATE_BW, r);
+
+ switch (bw) {
+ case IEEE80211_STA_RX_BW_160:
+ rate->bw = RATE_INFO_BW_160;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ rate->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ rate->bw = RATE_INFO_BW_40;
+ break;
+ default:
+ rate->bw = RATE_INFO_BW_20;
+ break;
+ }
+ }
+}
+
+static void
+mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_ra_info *ra = (struct mt7915_mcu_ra_info *)skb->data;
+ u16 wcidx = le16_to_cpu(ra->wlan_idx);
+ struct mt76_wcid *wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ struct mt7915_sta *msta = container_of(wcid, struct mt7915_sta, wcid);
+ struct mt7915_sta_stats *stats = &msta->stats;
+ struct mt76_phy *mphy = &dev->mphy;
+ struct rate_info rate = {}, prob_rate = {};
+ u16 attempts = le16_to_cpu(ra->attempts);
+ u16 curr = le16_to_cpu(ra->curr_rate);
+ u16 probe = le16_to_cpu(ra->prob_up_rate);
+
+ if (msta->wcid.ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ /* current rate */
+ mt7915_mcu_tx_rate_cal(mphy, ra, &rate, curr);
+ stats->tx_rate = rate;
+
+ /* probing rate */
+ mt7915_mcu_tx_rate_cal(mphy, ra, &prob_rate, probe);
+ stats->prob_rate = prob_rate;
+
+ if (attempts) {
+ u16 success = le16_to_cpu(ra->success);
+
+ stats->per = 1000 * (attempts - success) / attempts;
+ }
+}
+
+static void
+mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+ const char *data = (char *)&rxd[1];
+ const char *type;
+
+ switch (rxd->s2d_index) {
+ case 0:
+ type = "WM";
+ break;
+ case 2:
+ type = "WA";
+ break;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data);
+}
+
+static void
+mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+
+ switch (rxd->ext_eid) {
+ case MCU_EXT_EVENT_RDD_REPORT:
+ mt7915_mcu_rx_radar_detected(dev, skb);
+ break;
+ case MCU_EXT_EVENT_CSA_NOTIFY:
+ ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_mcu_csa_finish, dev);
+ break;
+ case MCU_EXT_EVENT_RATE_REPORT:
+ mt7915_mcu_tx_rate_report(dev, skb);
+ break;
+ case MCU_EXT_EVENT_FW_LOG_2_HOST:
+ mt7915_mcu_rx_log_message(dev, skb);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+
+ switch (rxd->eid) {
+ case MCU_EVENT_EXT:
+ mt7915_mcu_rx_ext_event(dev, skb);
+ break;
+ default:
+ break;
+ }
+ dev_kfree_skb(skb);
+}
+
+void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
+
+ if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
+ rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
+ rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
+ rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
+ rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
+ !rxd->seq)
+ mt7915_mcu_rx_unsolicited_event(dev, skb);
+ else
+ mt76_mcu_rx_event(&dev->mt76, skb);
+}
+
+static struct sk_buff *
+mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif,
+ struct mt7915_sta *msta, int len)
+{
+ struct sta_req_hdr hdr = {
+ .bss_idx = mvif->idx,
+ .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0,
+ .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0,
+ .muar_idx = msta ? mvif->omac_idx : 0,
+ .is_tlv_append = 1,
+ };
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ return skb;
+}
+
+static struct wtbl_req_hdr *
+mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta,
+ int cmd, void *sta_wtbl, struct sk_buff **skb)
+{
+ struct tlv *sta_hdr = sta_wtbl;
+ struct wtbl_req_hdr hdr = {
+ .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
+ .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
+ .operation = cmd,
+ };
+ struct sk_buff *nskb = *skb;
+
+ if (!nskb) {
+ nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ MT7915_WTBL_UPDATE_BA_SIZE);
+ if (!nskb)
+ return ERR_PTR(-ENOMEM);
+
+ *skb = nskb;
+ }
+
+ if (sta_hdr)
+ sta_hdr->len = cpu_to_le16(sizeof(hdr));
+
+ return skb_put_data(nskb, &hdr, sizeof(hdr));
+}
+
+static struct tlv *
+mt7915_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ void *sta_ntlv, void *sta_wtbl)
+{
+ struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
+ struct tlv *sta_hdr = sta_wtbl;
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(len),
+ };
+ u16 ntlv;
+
+ ptlv = skb_put(skb, len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+ ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
+
+ if (sta_hdr) {
+ u16 size = le16_to_cpu(sta_hdr->len);
+
+ sta_hdr->len = cpu_to_le16(size + len);
+ }
+
+ return ptlv;
+}
+
+static struct tlv *
+mt7915_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
+{
+ return mt7915_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
+}
+
+static struct tlv *
+mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
+ __le16 *sub_ntlv, __le16 *len)
+{
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(sub_tag),
+ .len = cpu_to_le16(sub_len),
+ };
+
+ ptlv = skb_put(skb, sub_len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ *sub_ntlv = cpu_to_le16(le16_to_cpu(*sub_ntlv) + 1);
+ *len = cpu_to_le16(le16_to_cpu(*len) + sub_len);
+
+ return ptlv;
+}
+
+/** bss info **/
+static int
+mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct bss_info_basic *bss;
+ u16 wlan_idx = mvif->sta.wcid.idx;
+ u32 type = NETWORK_INFRA;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (enable) {
+ struct ieee80211_sta *sta;
+ struct mt7915_sta *msta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+ wlan_idx = msta->wcid.idx;
+ rcu_read_unlock();
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = NETWORK_IBSS;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ bss = (struct bss_info_basic *)tlv;
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->network_type = cpu_to_le32(type);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
+ bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
+ bss->phy_mode = mt7915_get_phy_mode(phy->dev, vif, band, NULL);
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->active = enable;
+
+ return 0;
+}
+
+static void
+mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct bss_info_omac *omac;
+ struct tlv *tlv;
+ u32 type = 0;
+ u8 idx;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ omac = (struct bss_info_omac *)tlv;
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ omac->conn_type = cpu_to_le32(type);
+ omac->omac_idx = mvif->omac_idx;
+ omac->band_idx = mvif->band_idx;
+ omac->hw_bss_idx = idx;
+}
+
+struct mt7915_he_obss_narrow_bw_ru_data {
+ bool tolerated;
+};
+
+static void mt7915_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *_data)
+{
+ struct mt7915_he_obss_narrow_bw_ru_data *data = _data;
+ const struct element *elem;
+
+ elem = ieee80211_bss_get_elem(bss, WLAN_EID_EXT_CAPABILITY);
+
+ if (!elem || elem->datalen < 10 ||
+ !(elem->data[10] &
+ WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT))
+ data->tolerated = false;
+}
+
+static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_he_obss_narrow_bw_ru_data iter_data = {
+ .tolerated = true,
+ };
+
+ if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return false;
+
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ mt7915_check_he_obss_narrow_bw_ru_iter,
+ &iter_data);
+
+ /*
+ * If there is at least one AP on radar channel that cannot
+ * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU.
+ */
+ return !iter_data.tolerated;
+}
+
+static void
+mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct bss_info_rf_ch *ch;
+ struct tlv *tlv;
+ int freq1 = chandef->center_freq1;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
+
+ ch = (struct bss_info_rf_ch *)tlv;
+ ch->pri_ch = chandef->chan->hw_value;
+ ch->center_ch0 = ieee80211_frequency_to_channel(freq1);
+ ch->bw = mt7915_mcu_chan_bw(chandef);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ int freq2 = chandef->center_freq2;
+
+ ch->center_ch1 = ieee80211_frequency_to_channel(freq2);
+ }
+
+ if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) {
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ bool ext_phy = phy != &dev->phy;
+
+ if (ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ch->he_ru26_block =
+ mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif);
+ ch->he_all_disable = false;
+ } else {
+ ch->he_all_disable = true;
+ }
+}
+
+static void
+mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy)
+{
+ struct bss_info_ra *ra;
+ struct tlv *tlv;
+ int max_nss = hweight8(phy->chainmask);
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
+
+ ra = (struct bss_info_ra *)tlv;
+ ra->op_mode = vif->type == NL80211_IFTYPE_AP;
+ ra->adhoc_en = vif->type == NL80211_IFTYPE_ADHOC;
+ ra->short_preamble = true;
+ ra->tx_streams = max_nss;
+ ra->rx_streams = max_nss;
+ ra->algo = 4;
+ ra->train_up_rule = 2;
+ ra->train_up_high_thres = 110;
+ ra->train_up_rule_rssi = -70;
+ ra->low_traffic_thres = 2;
+ ra->phy_cap = cpu_to_le32(0xfdf);
+ ra->interval = cpu_to_le32(500);
+ ra->fast_interval = cpu_to_le32(100);
+}
+
+static void
+mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy)
+{
+#define DEFAULT_HE_PE_DURATION 4
+#define DEFAULT_HE_DURATION_RTS_THRES 1023
+ const struct ieee80211_sta_he_cap *cap;
+ struct bss_info_he *he;
+ struct tlv *tlv;
+
+ cap = mt7915_get_he_phy_cap(phy, vif);
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
+
+ he = (struct bss_info_he *)tlv;
+ he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext * 4;
+ if (!he->he_pe_duration)
+ he->he_pe_duration = DEFAULT_HE_PE_DURATION;
+
+ he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th * 32);
+ if (!he->he_rts_thres)
+ he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
+
+ he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80;
+ he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160;
+ he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
+}
+
+static void
+mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif)
+{
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+ struct bss_info_ext_bss *ext;
+ int ext_bss_idx, tsf_offset;
+ struct tlv *tlv;
+
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+
+ ext = (struct bss_info_ext_bss *)tlv;
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+
+static void
+mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
+{
+ struct bss_info_bmc_rate *bmc;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
+
+ bmc = (struct bss_info_bmc_rate *)tlv;
+ if (band == NL80211_BAND_2GHZ) {
+ bmc->short_preamble = true;
+ } else {
+ bmc->bc_trans = cpu_to_le16(0x2000);
+ bmc->mc_trans = cpu_to_le16(0x2080);
+ }
+}
+
+static void
+mt7915_mcu_bss_sync_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct bss_info_sync_mode *sync;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_SYNC_MODE, sizeof(*sync));
+
+ sync = (struct bss_info_sync_mode *)tlv;
+ sync->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ sync->dtim_period = vif->bss_conf.dtim_period;
+ sync->enable = true;
+}
+
+int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif, int enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+
+ skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL,
+ MT7915_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* bss_omac must be first */
+ if (enable)
+ mt7915_mcu_bss_omac_tlv(skb, vif);
+
+ mt7915_mcu_bss_basic_tlv(skb, vif, phy, enable);
+
+ if (enable) {
+ mt7915_mcu_bss_rfch_tlv(skb, vif, phy);
+ mt7915_mcu_bss_bmc_tlv(skb, phy);
+ mt7915_mcu_bss_ra_tlv(skb, vif, phy);
+
+ if (vif->bss_conf.he_support)
+ mt7915_mcu_bss_he_tlv(skb, vif, phy);
+
+ if (mvif->omac_idx > HW_BSSID_MAX)
+ mt7915_mcu_bss_ext_tlv(skb, mvif);
+ else
+ mt7915_mcu_bss_sync_tlv(skb, vif);
+ }
+
+ return __mt76_mcu_skb_send_msg(&phy->dev->mt76, skb,
+ MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+}
+
+/** starec & wtbl **/
+static int
+mt7915_mcu_sta_key_tlv(struct sk_buff *skb, struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct sta_rec_sec *sec;
+ struct tlv *tlv;
+ u32 len = sizeof(*sec);
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+
+ sec = (struct sta_rec_sec *)tlv;
+ sec->add = cmd;
+
+ if (cmd == SET_KEY) {
+ struct sec_key *sec_key;
+ u8 cipher;
+
+ cipher = mt7915_mcu_get_cipher(key->cipher);
+ if (cipher == MT_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key = &sec->key[0];
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_id = key->keyidx;
+
+ if (cipher == MT_CIPHER_BIP_CMAC_128) {
+ sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key, 16);
+
+ sec_key = &sec->key[1];
+ sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key + 16, 16);
+
+ sec->n_cipher = 2;
+ } else {
+ sec_key->cipher_id = cipher;
+ sec_key->key_len = key->keylen;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MT_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ }
+
+ len -= sizeof(*sec_key);
+ sec->n_cipher = 1;
+ }
+ } else {
+ len -= sizeof(sec->key);
+ sec->n_cipher = 0;
+ }
+ sec->len = cpu_to_le16(len);
+
+ return 0;
+}
+
+int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct mt7915_sta *msta, struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+ int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_sec);
+ int ret;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = mt7915_mcu_sta_key_tlv(skb, key, cmd);
+ if (ret)
+ return ret;
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static void
+mt7915_mcu_sta_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct sta_rec_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
+
+ ba = (struct sta_rec_ba *)tlv;
+ ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT,
+ ba->winsize = cpu_to_le16(params->buf_size);
+ ba->ssn = cpu_to_le16(params->ssn);
+ ba->ba_en = enable << params->tid;
+ ba->amsdu = params->amsdu;
+ ba->tid = params->tid;
+}
+
+static void
+mt7915_mcu_wtbl_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct wtbl_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
+ wtbl_tlv, sta_wtbl);
+
+ ba = (struct wtbl_ba *)tlv;
+ ba->tid = params->tid;
+
+ if (tx) {
+ ba->ba_type = MT_BA_TYPE_ORIGINATOR;
+ ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
+ ba->ba_en = enable;
+ } else {
+ memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
+ ba->ba_type = MT_BA_TYPE_RECIPIENT;
+ ba->rst_ba_tid = params->tid;
+ ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
+ ba->rst_ba_sb = 1;
+ }
+
+ if (enable && tx)
+ ba->ba_winsize = cpu_to_le16(params->buf_size);
+}
+
+static int
+mt7915_mcu_sta_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT7915_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
+ sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
+ mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7915_mcu_sta_ba(dev, params, enable, true);
+}
+
+int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7915_mcu_sta_ba(dev, params, enable, false);
+}
+
+static void
+mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct wtbl_generic *generic;
+ struct wtbl_rx *rx;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
+ wtbl_tlv, sta_wtbl);
+
+ generic = (struct wtbl_generic *)tlv;
+
+ if (sta) {
+ memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
+ generic->partial_aid = cpu_to_le16(sta->aid);
+ generic->muar_idx = mvif->omac_idx;
+ generic->qos = sta->wme;
+ } else {
+ /* use BSSID in station mode */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ memcpy(generic->peer_addr, vif->bss_conf.bssid,
+ ETH_ALEN);
+ else
+ eth_broadcast_addr(generic->peer_addr);
+
+ generic->muar_idx = 0xe;
+ }
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
+ wtbl_tlv, sta_wtbl);
+
+ rx = (struct wtbl_rx *)tlv;
+ rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
+ rx->rca2 = 1;
+ rx->rv = 1;
+}
+
+static void
+mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+#define EXTRA_INFO_VER BIT(0)
+#define EXTRA_INFO_NEW BIT(1)
+ struct sta_rec_basic *basic;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
+
+ basic = (struct sta_rec_basic *)tlv;
+ basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
+
+ if (enable) {
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = CONN_STATE_PORT_SECURE;
+ } else {
+ basic->conn_state = CONN_STATE_DISCONNECT;
+ }
+
+ if (!sta) {
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
+ eth_broadcast_addr(basic->peer_addr);
+ return;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ break;
+ case NL80211_IFTYPE_STATION:
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
+ basic->aid = cpu_to_le16(sta->aid);
+ basic->qos = sta->wme;
+}
+
+static void
+mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct sta_rec_he *he;
+ struct tlv *tlv;
+ u32 cap = 0;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
+
+ he = (struct sta_rec_he *)tlv;
+
+ if (elem->mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
+ cap |= STA_REC_HE_CAP_HTC;
+
+ if (elem->mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ cap |= STA_REC_HE_CAP_BSR;
+
+ if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ cap |= STA_REC_HE_CAP_OM;
+
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU)
+ cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU;
+
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ cap |= STA_REC_HE_CAP_BQR;
+
+ if (elem->phy_cap_info[0] &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
+ cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
+
+ if (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
+ cap |= STA_REC_HE_CAP_LDPC;
+
+ if (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US)
+ cap |= STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US)
+ cap |= STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ)
+ cap |= STA_REC_HE_CAP_LE_EQ_80M_TX_STBC;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC;
+
+ if (elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE)
+ cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ)
+ cap |= STA_REC_HE_CAP_GT_80M_TX_STBC;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
+ cap |= STA_REC_HE_CAP_GT_80M_RX_STBC;
+
+ if (elem->phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI;
+
+ if (elem->phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK)
+ cap |= STA_REC_HE_CAP_TRIG_CQI_FK;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU)
+ cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU)
+ cap |= STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242;
+
+ he->he_cap = cpu_to_le32(cap);
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (elem->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ he->max_nss_mcs[CMD_HE_MCS_BW8080] =
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80;
+
+ he->max_nss_mcs[CMD_HE_MCS_BW160] =
+ he_cap->he_mcs_nss_supp.rx_mcs_160;
+ /* fall through */
+ default:
+ he->max_nss_mcs[CMD_HE_MCS_BW80] =
+ he_cap->he_mcs_nss_supp.rx_mcs_80;
+ break;
+ }
+
+ he->t_frame_dur =
+ HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
+ he->max_ampdu_exp =
+ HE_MAC(CAP3_MAX_AMPDU_LEN_EXP_MASK, elem->mac_cap_info[3]);
+
+ he->bw_set =
+ HE_PHY(CAP0_CHANNEL_WIDTH_SET_MASK, elem->phy_cap_info[0]);
+ he->device_class =
+ HE_PHY(CAP1_DEVICE_CLASS_A, elem->phy_cap_info[1]);
+ he->punc_pream_rx =
+ HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
+
+ he->dcm_tx_mode =
+ HE_PHY(CAP3_DCM_MAX_CONST_TX_MASK, elem->phy_cap_info[3]);
+ he->dcm_tx_max_nss =
+ HE_PHY(CAP3_DCM_MAX_TX_NSS_2, elem->phy_cap_info[3]);
+ he->dcm_rx_mode =
+ HE_PHY(CAP3_DCM_MAX_CONST_RX_MASK, elem->phy_cap_info[3]);
+ he->dcm_rx_max_nss =
+ HE_PHY(CAP3_DCM_MAX_RX_NSS_2, elem->phy_cap_info[3]);
+ he->dcm_rx_max_nss =
+ HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]);
+
+ he->pkt_ext = 2;
+}
+
+static void
+mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct sta_rec_muru *muru;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
+
+ muru = (struct sta_rec_muru *)tlv;
+ muru->cfg.ofdma_dl_en = true;
+ muru->cfg.ofdma_ul_en = true;
+ muru->cfg.mimo_dl_en = true;
+ muru->cfg.mimo_ul_en = true;
+
+ muru->ofdma_dl.punc_pream_rx =
+ HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
+ muru->ofdma_dl.he_20m_in_40m_2g =
+ HE_PHY(CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G, elem->phy_cap_info[8]);
+ muru->ofdma_dl.he_20m_in_160m =
+ HE_PHY(CAP8_20MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]);
+ muru->ofdma_dl.he_80m_in_160m =
+ HE_PHY(CAP8_80MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]);
+ muru->ofdma_dl.lt16_sigb = 0;
+ muru->ofdma_dl.rx_su_comp_sigb = 0;
+ muru->ofdma_dl.rx_su_non_comp_sigb = 0;
+
+ muru->ofdma_ul.t_frame_dur =
+ HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
+ muru->ofdma_ul.mu_cascading =
+ HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]);
+ muru->ofdma_ul.uo_ra =
+ HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]);
+ muru->ofdma_ul.he_2x996_tone = 0;
+ muru->ofdma_ul.rx_t_frame_11ac = 0;
+
+ muru->mimo_dl.vht_mu_bfee =
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ muru->mimo_dl.partial_bw_dl_mimo =
+ HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
+
+ muru->mimo_ul.full_ul_mimo =
+ HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
+ muru->mimo_ul.partial_ul_mimo =
+ HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
+}
+
+static void
+mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ struct tlv *tlv;
+
+ if (sta->ht_cap.ht_supported) {
+ struct sta_rec_ht *ht;
+
+ /* starec ht */
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ ht = (struct sta_rec_ht *)tlv;
+ ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ }
+
+ /* starec vht */
+ if (sta->vht_cap.vht_supported) {
+ struct sta_rec_vht *vht;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+ vht = (struct sta_rec_vht *)tlv;
+ vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ }
+
+ /* starec he */
+ if (sta->he_cap.has_he)
+ mt7915_mcu_sta_he_tlv(skb, sta);
+
+ /* starec muru */
+ if (sta->he_cap.has_he || sta->vht_cap.vht_supported)
+ mt7915_mcu_sta_muru_tlv(skb, sta);
+}
+
+static void
+mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct wtbl_smps *smps;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
+ wtbl_tlv, sta_wtbl);
+ smps = (struct wtbl_smps *)tlv;
+
+ if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ smps->smps = true;
+}
+
+static void
+mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct wtbl_ht *ht = NULL;
+ struct tlv *tlv;
+
+ /* wtbl ht */
+ if (sta->ht_cap.ht_supported) {
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
+ wtbl_tlv, sta_wtbl);
+ ht = (struct wtbl_ht *)tlv;
+ ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ ht->af = sta->ht_cap.ampdu_factor;
+ ht->mm = sta->ht_cap.ampdu_density;
+ ht->ht = true;
+ }
+
+ /* wtbl vht */
+ if (sta->vht_cap.vht_supported) {
+ struct wtbl_vht *vht;
+ u8 af;
+
+ tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
+ wtbl_tlv, sta_wtbl);
+ vht = (struct wtbl_vht *)tlv;
+ vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC,
+ vht->vht = true;
+
+ af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ sta->vht_cap.cap);
+ if (ht)
+ ht->af = max_t(u8, ht->af, af);
+ }
+
+ mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
+}
+
+int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT7915_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
+ mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static void
+mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf)
+{
+ bf->sounding_phy = MT_PHY_TYPE_OFDM;
+ bf->ndp_rate = 0; /* mcs0 */
+ bf->ndpa_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */
+ bf->rept_poll_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */
+}
+
+static void
+mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct sta_rec_bf *bf)
+{
+ struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
+ u8 n = 0;
+
+ bf->tx_mode = MT_PHY_TYPE_HT;
+ bf->bf_cap |= MT_IBF;
+
+ if (mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF &&
+ (mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED))
+ n = FIELD_GET(IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK,
+ mcs->tx_params);
+ else if (mcs->rx_mask[3])
+ n = 3;
+ else if (mcs->rx_mask[2])
+ n = 2;
+ else if (mcs->rx_mask[1])
+ n = 1;
+
+ bf->nc = min_t(u8, bf->nr, n);
+ bf->ibf_ncol = bf->nc;
+
+ if (sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->nc)
+ bf->ibf_timeout = 0x48;
+}
+
+static void
+mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
+ struct sta_rec_bf *bf)
+{
+ struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
+ struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
+ u8 bfee_nr, bfer_nr, n, tx_ant = hweight8(phy->chainmask) - 1;
+ u16 mcs_map;
+
+ bf->tx_mode = MT_PHY_TYPE_VHT;
+ bf->bf_cap |= MT_EBF;
+
+ mt7915_mcu_sta_sounding_rate(bf);
+
+ bfee_nr = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
+ pc->cap);
+ bfer_nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ vc->cap);
+ mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
+
+ n = min_t(u8, bfer_nr, bfee_nr);
+ bf->nr = min_t(u8, n, tx_ant);
+ n = mt7915_mcu_get_sta_nss(mcs_map);
+
+ bf->nc = min_t(u8, n, bf->nr);
+ bf->ibf_ncol = bf->nc;
+
+ /* force nr from 4 to 2 */
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ bf->nr = 1;
+}
+
+static void
+mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy, struct sta_rec_bf *bf)
+{
+ struct ieee80211_sta_he_cap *pc = &sta->he_cap;
+ struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
+ const struct ieee80211_he_cap_elem *ve;
+ const struct ieee80211_sta_he_cap *vc;
+ u8 bfee_nr, bfer_nr, nss_mcs;
+ u16 mcs_map;
+
+ vc = mt7915_get_he_phy_cap(phy, vif);
+ ve = &vc->he_cap_elem;
+
+ bf->tx_mode = MT_PHY_TYPE_HE_SU;
+ bf->bf_cap |= MT_EBF;
+
+ mt7915_mcu_sta_sounding_rate(bf);
+
+ bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMER_FB,
+ pe->phy_cap_info[6]);
+ bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMER_FB,
+ pe->phy_cap_info[6]);
+ bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ ve->phy_cap_info[5]);
+ bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[4]);
+
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.tx_mcs_80);
+ nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+
+ bf->nr = min_t(u8, bfer_nr, bfee_nr);
+ bf->nc = min_t(u8, nss_mcs, bf->nr);
+ bf->ibf_ncol = bf->nc;
+
+ if (sta->bandwidth != IEEE80211_STA_RX_BW_160)
+ return;
+
+ /* go over for 160MHz and 80p80 */
+ if (pe->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) {
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160);
+ nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+
+ bf->nc_bw160 = nss_mcs;
+ }
+
+ if (pe->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80);
+ nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+
+ if (bf->nc_bw160)
+ bf->nc_bw160 = min_t(u8, bf->nc_bw160, nss_mcs);
+ else
+ bf->nc_bw160 = nss_mcs;
+ }
+
+ bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ ve->phy_cap_info[5]);
+ bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK,
+ pe->phy_cap_info[4]);
+
+ bf->nr_bw160 = min_t(int, bfer_nr, bfee_nr);
+}
+
+static void
+mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, struct mt7915_phy *phy,
+ bool enable)
+{
+ struct sta_rec_bf *bf;
+ struct tlv *tlv;
+ int tx_ant = hweight8(phy->chainmask) - 1;
+ const u8 matrix[4][4] = {
+ {0, 0, 0, 0},
+ {1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
+ {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
+ {3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */
+ };
+
+#define MT_BFER_FREE cpu_to_le16(GENMASK(15, 0))
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
+ bf = (struct sta_rec_bf *)tlv;
+
+ if (!enable) {
+ bf->pfmu = MT_BFER_FREE;
+ return;
+ }
+
+ bf->bw = sta->bandwidth;
+ bf->ibf_dbw = sta->bandwidth;
+ bf->ibf_nrow = tx_ant;
+ bf->ibf_timeout = 0x18;
+
+ if (sta->he_cap.has_he)
+ mt7915_mcu_sta_bfer_he(sta, vif, phy, bf);
+ else if (sta->vht_cap.vht_supported)
+ mt7915_mcu_sta_bfer_vht(sta, phy, bf);
+ else if (sta->ht_cap.ht_supported)
+ mt7915_mcu_sta_bfer_ht(sta, bf);
+
+ if (bf->bf_cap & MT_EBF && bf->nr != tx_ant)
+ bf->mem_20m = matrix[tx_ant][bf->nc];
+ else
+ bf->mem_20m = matrix[bf->nr][bf->nc];
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ case IEEE80211_STA_RX_BW_80:
+ bf->mem_total = bf->mem_20m * 2;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bf->mem_total = bf->mem_20m;
+ break;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ break;
+ }
+}
+
+static void
+mt7915_mcu_sta_bfee_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct mt7915_phy *phy)
+{
+ struct sta_rec_bfee *bfee;
+ struct tlv *tlv;
+ int tx_ant = hweight8(phy->chainmask) - 1;
+ u8 nr = 0;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
+ bfee = (struct sta_rec_bfee *)tlv;
+
+ if (sta->he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
+
+ nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[5]);
+ } else if (sta->vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
+
+ nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ pc->cap);
+ }
+
+ /* reply with identity matrix to avoid 2x2 BF negative gain */
+ if (nr == 1 && tx_ant == 2)
+ bfee->fb_identity_matrix = true;
+}
+
+static u8
+mt7915_mcu_sta_txbf_type(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ u8 type = 0;
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return 0;
+
+ if (sta->he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe;
+ const struct ieee80211_he_cap_elem *ve;
+ const struct ieee80211_sta_he_cap *vc;
+
+ pe = &sta->he_cap.he_cap_elem;
+ vc = mt7915_get_he_phy_cap(phy, vif);
+ ve = &vc->he_cap_elem;
+
+ if ((HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) ||
+ HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4])) &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4]))
+ type |= MT_STA_BFEE;
+
+ if ((HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) ||
+ HE_PHY(CAP4_MU_BEAMFORMER, ve->phy_cap_info[4])) &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]))
+ type |= MT_STA_BFER;
+ } else if (sta->vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc;
+ struct ieee80211_sta_vht_cap *vc;
+ u32 cr, ce;
+
+ pc = &sta->vht_cap;
+ vc = &phy->mt76->sband_5g.sband.vht_cap;
+ cr = IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
+ ce = IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ if ((pc->cap & cr) && (vc->cap & ce))
+ type |= MT_STA_BFEE;
+
+ if ((vc->cap & cr) && (pc->cap & ce))
+ type |= MT_STA_BFER;
+ } else if (sta->ht_cap.ht_supported) {
+ /* TODO: iBF */
+ }
+
+ return type;
+}
+
+static int
+mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_phy *phy;
+ struct sk_buff *skb;
+ int r, len;
+ u8 type;
+
+ phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+
+ type = mt7915_mcu_sta_txbf_type(phy, vif, sta);
+
+ /* must keep each tag independent */
+
+ /* starec bf */
+ if (type & MT_STA_BFER) {
+ len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bf);
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable);
+
+ r = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+ if (r)
+ return r;
+ }
+
+ /* starec bfee */
+ if (type & MT_STA_BFEE) {
+ len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bfee);
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_bfee_tlv(skb, sta, phy);
+
+ r = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static void
+mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
+ struct sta_rec_ra *ra;
+ struct tlv *tlv;
+ enum nl80211_band band = chandef->chan->band;
+ u32 supp_rate = sta->supp_rates[band];
+ int n_rates = hweight32(supp_rate);
+ u32 cap = sta->wme ? STA_CAP_WMM : 0;
+ u8 i, nss = sta->rx_nss, mcs = 0;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
+
+ ra = (struct sta_rec_ra *)tlv;
+ ra->valid = true;
+ ra->auto_rate = true;
+ ra->phy_mode = mt7915_get_phy_mode(dev, vif, band, sta);
+ ra->channel = chandef->chan->hw_value;
+ ra->bw = sta->bandwidth;
+ ra->rate_len = n_rates;
+ ra->phy.bw = sta->bandwidth;
+
+ if (n_rates) {
+ if (band == NL80211_BAND_2GHZ) {
+ ra->supp_mode = MODE_CCK;
+ ra->supp_cck_rate = supp_rate & GENMASK(3, 0);
+ ra->phy.type = MT_PHY_TYPE_CCK;
+
+ if (n_rates > 4) {
+ ra->supp_mode |= MODE_OFDM;
+ ra->supp_ofdm_rate = supp_rate >> 4;
+ ra->phy.type = MT_PHY_TYPE_OFDM;
+ }
+ } else {
+ ra->supp_mode = MODE_OFDM;
+ ra->supp_ofdm_rate = supp_rate;
+ ra->phy.type = MT_PHY_TYPE_OFDM;
+ }
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ for (i = 0; i < nss; i++)
+ ra->ht_mcs[i] = sta->ht_cap.mcs.rx_mask[i];
+
+ ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
+ ra->supp_mode |= MODE_HT;
+ mcs = hweight32(le32_to_cpu(ra->supp_ht_mcs)) - 1;
+ ra->af = sta->ht_cap.ampdu_factor;
+ ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+
+ cap |= STA_CAP_HT;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ cap |= STA_CAP_SGI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ cap |= STA_CAP_SGI_40;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
+ cap |= STA_CAP_TX_STBC;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ cap |= STA_CAP_RX_STBC;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ cap |= STA_CAP_LDPC;
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+ u16 vht_mcs;
+ u8 af, mcs_prev;
+
+ af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ sta->vht_cap.cap);
+ ra->af = max_t(u8, ra->af, af);
+
+ cap |= STA_CAP_VHT;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ cap |= STA_CAP_VHT_SGI_80;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ cap |= STA_CAP_VHT_SGI_160;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
+ cap |= STA_CAP_VHT_TX_STBC;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ cap |= STA_CAP_VHT_RX_STBC;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ cap |= STA_CAP_VHT_LDPC;
+
+ ra->supp_mode |= MODE_VHT;
+ for (mcs = 0, i = 0; i < nss; i++, mcs_map >>= 2) {
+ switch (mcs_map & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ vht_mcs = GENMASK(9, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ vht_mcs = GENMASK(8, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ vht_mcs = GENMASK(7, 0);
+ break;
+ default:
+ vht_mcs = 0;
+ }
+
+ ra->supp_vht_mcs[i] = cpu_to_le16(vht_mcs);
+
+ mcs_prev = hweight16(vht_mcs) - 1;
+ if (mcs_prev > mcs)
+ mcs = mcs_prev;
+
+ /* only support 2ss on 160MHz */
+ if (i > 1 && (ra->bw == CMD_CBW_160MHZ ||
+ ra->bw == CMD_CBW_8080MHZ))
+ break;
+ }
+ }
+
+ if (sta->he_cap.has_he) {
+ ra->supp_mode |= MODE_HE;
+ cap |= STA_CAP_HE;
+ }
+
+ ra->sta_status = cpu_to_le32(cap);
+
+ switch (BIT(fls(ra->supp_mode) - 1)) {
+ case MODE_VHT:
+ ra->phy.type = MT_PHY_TYPE_VHT;
+ ra->phy.mcs = mcs;
+ ra->phy.nss = nss;
+ ra->phy.stbc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC);
+ ra->phy.ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ ra->phy.sgi =
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ break;
+ case MODE_HT:
+ ra->phy.type = MT_PHY_TYPE_HT;
+ ra->phy.mcs = mcs;
+ ra->phy.ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ ra->phy.stbc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC);
+ ra->phy.sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sk_buff *skb;
+ int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_ra);
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ int ret;
+
+ if (!sta)
+ return 0;
+
+ /* must keep the order */
+ ret = mt7915_mcu_add_txbf(dev, vif, sta, enable);
+ if (ret)
+ return ret;
+
+ if (enable)
+ return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
+
+ return 0;
+}
+
+int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct mt7915_sta *msta;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT7915_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ if (enable && sta)
+ mt7915_mcu_sta_tlv(dev, skb, sta);
+
+ sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
+ sta_wtbl, &skb);
+ if (enable) {
+ mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
+ if (sta)
+ mt7915_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
+ struct ieee80211_sta *sta, u32 rate)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
+ struct sta_rec_ra_fixed *ra;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+ int len = sizeof(struct sta_req_hdr) + sizeof(*ra);
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
+ ra = (struct sta_rec_ra_fixed *)tlv;
+
+ if (!rate) {
+ ra->field = cpu_to_le32(RATE_PARAM_AUTO);
+ goto out;
+ } else {
+ ra->field = cpu_to_le32(RATE_PARAM_FIXED);
+ }
+
+ ra->phy.type = FIELD_GET(RATE_CFG_PHY_TYPE, rate);
+ ra->phy.bw = FIELD_GET(RATE_CFG_BW, rate);
+ ra->phy.nss = FIELD_GET(RATE_CFG_NSS, rate);
+ ra->phy.mcs = FIELD_GET(RATE_CFG_MCS, rate);
+ ra->phy.stbc = FIELD_GET(RATE_CFG_STBC, rate);
+
+ if (ra->phy.bw)
+ ra->phy.ldpc = 7;
+ else
+ ra->phy.ldpc = FIELD_GET(RATE_CFG_LDPC, rate) * 7;
+
+ /* HT/VHT - SGI: 1, LGI: 0; HE - SGI: 0, MGI: 1, LGI: 2 */
+ if (ra->phy.type > MT_PHY_TYPE_VHT)
+ ra->phy.sgi = ra->phy.mcs * 85;
+ else
+ ra->phy.sgi = ra->phy.mcs * 15;
+
+out:
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+int mt7915_mcu_add_dev_info(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 dbdc_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv[3];
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 dbdc_idx;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .dbdc_idx = mvif->band_idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ .dbdc_idx = mvif->band_idx,
+ },
+ };
+
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ &data, sizeof(data), true);
+}
+
+static void
+mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb,
+ struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ if (offs->csa_counter_offs[0]) {
+ struct tlv *tlv;
+ struct bss_info_bcn_csa *csa;
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CSA,
+ sizeof(*csa), &bcn->sub_ntlv,
+ &bcn->len);
+ csa = (struct bss_info_bcn_csa *)tlv;
+ csa->cnt = skb->data[offs->csa_counter_offs[0]];
+ }
+}
+
+static void
+mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
+ struct sk_buff *skb, struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct bss_info_bcn_cont *cont;
+ struct tlv *tlv;
+ u8 *buf;
+ int len = sizeof(*cont) + MT_TXD_SIZE + skb->len;
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT,
+ len, &bcn->sub_ntlv, &bcn->len);
+
+ cont = (struct bss_info_bcn_cont *)tlv;
+ cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ cont->tim_ofs = cpu_to_le16(offs->tim_offset);
+
+ if (offs->csa_counter_offs[0])
+ cont->csa_ofs = cpu_to_le16(offs->csa_counter_offs[0] - 4);
+
+ buf = (u8 *)tlv + sizeof(*cont);
+ mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL,
+ true);
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+}
+
+int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int en)
+{
+#define MAX_BEACON_SIZE 512
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_mutable_offsets offs;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb, *rskb;
+ struct tlv *tlv;
+ struct bss_info_bcn *bcn;
+ int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
+
+ rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+ tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ bcn = (struct bss_info_bcn *)tlv;
+ bcn->enable = en;
+
+ skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (mvif->band_idx) {
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ }
+
+ /* TODO: subtag - bss color count & 11v MBSSID */
+ mt7915_mcu_beacon_csa(rskb, skb, bcn, &offs);
+ mt7915_mcu_beacon_cont(dev, rskb, skb, bcn, &offs);
+ dev_kfree_skb(skb);
+
+ return __mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+}
+
+static int mt7915_mcu_send_firmware(struct mt7915_dev *dev, const void *data,
+ int len)
+{
+ int ret = 0, cur_len;
+
+ while (len > 0) {
+ cur_len = min_t(int, 4096 - sizeof(struct mt7915_mcu_txd),
+ len);
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ data, cur_len, false);
+ if (ret)
+ break;
+
+ data += cur_len;
+ len -= cur_len;
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ }
+
+ return ret;
+}
+
+static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr,
+ u32 option)
+{
+ struct {
+ __le32 option;
+ __le32 addr;
+ } req = {
+ .option = cpu_to_le32(option),
+ .addr = cpu_to_le32(addr),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+ &req, sizeof(req), true);
+}
+
+static int mt7915_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 power_mode;
+ u8 rsv[3];
+ } req = {
+ .power_mode = 1,
+ };
+
+ return __mt76_mcu_send_msg(dev, -MCU_CMD_NIC_POWER_CTRL, &req,
+ sizeof(req), false);
+}
+
+static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get)
+{
+ struct {
+ __le32 op;
+ } req = {
+ .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL,
+ &req, sizeof(req), true);
+}
+
+static int mt7915_mcu_start_patch(struct mt7915_dev *dev)
+{
+ struct {
+ u8 check_crc;
+ u8 reserved[3];
+ } req = {
+ .check_crc = 0,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ,
+ &req, sizeof(req), true);
+}
+
+static int mt7915_driver_own(struct mt7915_dev *dev)
+{
+ u32 reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
+
+ mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN,
+ 0, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for driver own\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mt7915_mcu_init_download(struct mt7915_dev *dev, u32 addr,
+ u32 len, u32 mode)
+{
+ struct {
+ __le32 addr;
+ __le32 len;
+ __le32 mode;
+ } req = {
+ .addr = cpu_to_le32(addr),
+ .len = cpu_to_le32(len),
+ .mode = cpu_to_le32(mode),
+ };
+ int attr;
+
+ if (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS))
+ attr = -MCU_CMD_PATCH_START_REQ;
+ else
+ attr = -MCU_CMD_TARGET_ADDRESS_LEN_REQ;
+
+ return __mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true);
+}
+
+static int mt7915_load_patch(struct mt7915_dev *dev)
+{
+ const struct mt7915_patch_hdr *hdr;
+ const struct firmware *fw = NULL;
+ int i, ret, sem;
+
+ sem = mt7915_mcu_patch_sem_ctrl(dev, 1);
+ switch (sem) {
+ case PATCH_IS_DL:
+ return 0;
+ case PATCH_NOT_DL_SEM_SUCCESS:
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
+ return -EAGAIN;
+ }
+
+ ret = request_firmware(&fw, MT7915_ROM_PATCH, dev->mt76.dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7915_patch_hdr *)(fw->data);
+
+ dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+ for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
+ struct mt7915_patch_sec *sec;
+ const u8 *dl;
+ u32 len, addr;
+
+ sec = (struct mt7915_patch_sec *)(fw->data + sizeof(*hdr) +
+ i * sizeof(*sec));
+ if ((be32_to_cpu(sec->type) & PATCH_SEC_TYPE_MASK) !=
+ PATCH_SEC_TYPE_INFO) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ addr = be32_to_cpu(sec->info.addr);
+ len = be32_to_cpu(sec->info.len);
+ dl = fw->data + be32_to_cpu(sec->offs);
+
+ ret = mt7915_mcu_init_download(dev, addr, len,
+ DL_MODE_NEED_RSP);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7915_mcu_send_firmware(dev, dl, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send patch\n");
+ goto out;
+ }
+ }
+
+ ret = mt7915_mcu_start_patch(dev);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start patch\n");
+
+out:
+ sem = mt7915_mcu_patch_sem_ctrl(dev, 0);
+ switch (sem) {
+ case PATCH_REL_SEM_SUCCESS:
+ break;
+ default:
+ ret = -EAGAIN;
+ dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
+ goto out;
+ }
+ release_firmware(fw);
+
+ return ret;
+}
+
+static u32 mt7915_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
+{
+ u32 ret = 0;
+
+ ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
+ (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
+static int
+mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
+ const struct mt7915_fw_trailer *hdr,
+ const u8 *data, bool is_wa)
+{
+ int i, offset = 0;
+ u32 override = 0, option = 0;
+
+ for (i = 0; i < hdr->n_region; i++) {
+ const struct mt7915_fw_region *region;
+ int err;
+ u32 len, addr, mode;
+
+ region = (const struct mt7915_fw_region *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
+ mode = mt7915_mcu_gen_dl_mode(region->feature_set, is_wa);
+ len = le32_to_cpu(region->len);
+ addr = le32_to_cpu(region->addr);
+
+ if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
+ override = addr;
+
+ err = mt7915_mcu_init_download(dev, addr, len, mode);
+ if (err) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ return err;
+ }
+
+ err = mt7915_mcu_send_firmware(dev, data + offset, len);
+ if (err) {
+ dev_err(dev->mt76.dev, "Failed to send firmware.\n");
+ return err;
+ }
+
+ offset += len;
+ }
+
+ if (override)
+ option |= FW_START_OVERRIDE;
+
+ if (is_wa)
+ option |= FW_START_WORKING_PDA_CR4;
+
+ return mt7915_mcu_start_firmware(dev, override, option);
+}
+
+static int mt7915_load_ram(struct mt7915_dev *dev)
+{
+ const struct mt7915_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, MT7915_FIRMWARE_WM, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size -
+ sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, false);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start WM firmware\n");
+ goto out;
+ }
+
+ release_firmware(fw);
+
+ ret = request_firmware(&fw, MT7915_FIRMWARE_WA, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7915_fw_trailer *)(fw->data + fw->size -
+ sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ ret = mt7915_mcu_send_ram_firmware(dev, hdr, fw->data, true);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start WA firmware\n");
+ goto out;
+ }
+
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7915_load_firmware(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
+
+ val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD);
+
+ if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) {
+ /* restart firmware once */
+ __mt76_mcu_restart(&dev->mt76);
+ if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+ val, 1000)) {
+ dev_err(dev->mt76.dev,
+ "Firmware is not ready for download\n");
+ return -EIO;
+ }
+ }
+
+ ret = mt7915_load_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7915_load_ram(dev);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+ FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ FW_STATE_WACPU_RDY), 1000)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+
+ return 0;
+}
+
+int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl)
+{
+ struct {
+ u8 ctrl_val;
+ u8 pad[3];
+ } data = {
+ .ctrl_val = ctrl
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST,
+ &data, sizeof(data), true);
+}
+
+int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level)
+{
+ struct {
+ u8 ver;
+ u8 pad;
+ u16 len;
+ u8 level;
+ u8 rsv[3];
+ __le32 module_idx;
+ } data = {
+ .module_idx = cpu_to_le32(module),
+ .level = level,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_DBG_CTRL,
+ &data, sizeof(data), false);
+}
+
+int mt7915_mcu_init(struct mt7915_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7915_mcu_ops = {
+ .headroom = sizeof(struct mt7915_mcu_txd),
+ .mcu_skb_send_msg = mt7915_mcu_send_message,
+ .mcu_send_msg = mt7915_mcu_msg_send,
+ .mcu_restart = mt7915_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mt7915_mcu_ops,
+
+ ret = mt7915_driver_own(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7915_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt7915_mcu_fw_log_2_host(dev, 0);
+
+ return 0;
+}
+
+void mt7915_mcu_exit(struct mt7915_dev *dev)
+{
+ u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
+
+ __mt76_mcu_restart(&dev->mt76);
+ if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+ FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ FW_STATE_FW_DOWNLOAD), 1000)) {
+ dev_err(dev->mt76.dev, "Failed to exit mcu\n");
+ return;
+ }
+
+ reg = mt7915_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
+ mt76_wr(dev, reg, MT_TOP_LPCR_HOST_FW_OWN);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+}
+
+int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band,
+ bool enable, bool hdr_trans)
+{
+ struct {
+ u8 operation;
+ u8 enable;
+ u8 check_bssid;
+ u8 insert_vlan;
+ u8 remove_vlan;
+ u8 tid;
+ u8 mode;
+ u8 rsv;
+ } __packed req_trans = {
+ .enable = hdr_trans,
+ };
+ struct {
+ u8 enable;
+ u8 band;
+ u8 rsv[2];
+ } __packed req_mac = {
+ .enable = enable,
+ .band = band,
+ };
+ int ret;
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS,
+ &req_trans, sizeof(req_trans), false);
+ if (ret)
+ return ret;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL,
+ &req_mac, sizeof(req_mac), true);
+}
+
+int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable)
+{
+ struct {
+ __le32 cmd;
+ u8 band;
+ u8 enable;
+ } __packed req = {
+ .cmd = cpu_to_le32(SCS_ENABLE),
+ .band = band,
+ .enable = enable + 1,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SCS_CTRL, &req,
+ sizeof(req), false);
+}
+
+int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 prot_idx;
+ u8 band;
+ u8 rsv[2];
+ __le32 len_thresh;
+ __le32 pkt_thresh;
+ } __packed req = {
+ .prot_idx = 1,
+ .band = phy != &dev->phy,
+ .len_thresh = cpu_to_le32(val),
+ .pkt_thresh = cpu_to_le32(0x2),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
+{
+#define WMM_AIFS_SET BIT(0)
+#define WMM_CW_MIN_SET BIT(1)
+#define WMM_CW_MAX_SET BIT(2)
+#define WMM_TXOP_SET BIT(3)
+#define WMM_PARAM_SET GENMASK(3, 0)
+#define TX_CMD_MODE 1
+ struct edca {
+ u8 queue;
+ u8 set;
+ u8 aifs;
+ u8 cw_min;
+ __le16 cw_max;
+ __le16 txop;
+ };
+ struct mt7915_mcu_tx {
+ u8 total;
+ u8 action;
+ u8 valid;
+ u8 mode;
+
+ struct edca edca[IEEE80211_NUM_ACS];
+ } __packed req = {
+ .valid = true,
+ .mode = TX_CMD_MODE,
+ .total = IEEE80211_NUM_ACS,
+ };
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct edca *e = &req.edca[ac];
+
+ e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS;
+ e->aifs = mvif->wmm[ac].aifs;
+ e->txop = cpu_to_le16(mvif->wmm[ac].txop);
+
+ if (mvif->wmm[ac].cw_min)
+ e->cw_min = fls(mvif->wmm[ac].cw_max);
+ else
+ e->cw_min = 5;
+
+ if (mvif->wmm[ac].cw_max)
+ e->cw_max = cpu_to_le16(fls(mvif->wmm[ac].cw_max));
+ else
+ e->cw_max = cpu_to_le16(10);
+ }
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter)
+{
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx_lo;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 wlan_idx_hi;
+ u8 rsv[2];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = band,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev,
+ enum mt7915_rdd_cmd cmd, u8 index,
+ u8 rx_sel, u8 val)
+{
+ struct {
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
+ } __packed req = {
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val)
+{
+ struct {
+ u32 tag;
+ u16 min_lpn;
+ u8 rsv[2];
+ } __packed req = {
+ .tag = 0x1,
+ .min_lpn = val,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
+ const struct mt7915_dfs_pulse *pulse)
+{
+ struct {
+ u32 tag;
+ struct mt7915_dfs_pulse pulse;
+ } __packed req = {
+ .tag = 0x3,
+ };
+
+ memcpy(&req.pulse, pulse, sizeof(*pulse));
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
+ const struct mt7915_dfs_pattern *pattern)
+{
+ struct {
+ u32 tag;
+ u16 radar_type;
+ struct mt7915_dfs_pattern pattern;
+ } __packed req = {
+ .tag = 0x2,
+ .radar_type = index,
+ };
+
+ memcpy(&req.pattern, pattern, sizeof(*pattern));
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = chandef->center_freq1;
+ struct {
+ u8 control_ch;
+ u8 center_ch;
+ u8 bw;
+ u8 tx_streams_num;
+ u8 rx_streams; /* mask or num */
+ u8 switch_reason;
+ u8 band_idx;
+ u8 center_ch2; /* for 80+80 only */
+ __le16 cac_case;
+ u8 channel_band;
+ u8 rsv0;
+ __le32 outband_freq;
+ u8 txpower_drop;
+ u8 ap_bw;
+ u8 ap_center_ch;
+ u8 rsv1[57];
+ } __packed req = {
+ .control_ch = chandef->chan->hw_value,
+ .center_ch = ieee80211_frequency_to_channel(freq1),
+ .bw = mt7915_mcu_chan_bw(chandef),
+ .tx_streams_num = hweight8(phy->mt76->antenna_mask),
+ .rx_streams = phy->chainmask,
+ .band_idx = phy != &dev->phy,
+ .channel_band = chandef->chan->band,
+ };
+
+ if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ req.switch_reason = CH_SWITCH_DFS;
+ else
+ req.switch_reason = CH_SWITCH_NORMAL;
+
+ if (cmd == MCU_EXT_CMD_CHANNEL_SWITCH)
+ req.rx_streams = hweight8(req.rx_streams);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ int freq2 = chandef->center_freq2;
+
+ req.center_ch2 = ieee80211_frequency_to_channel(freq2);
+ }
+
+ return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
+{
+ struct req_hdr {
+ u8 buffer_mode;
+ u8 format;
+ __le16 len;
+ } __packed req = {
+ .buffer_mode = EE_MODE_EFUSE,
+ .format = EE_FORMAT_WHOLE,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
+{
+ struct mt7915_mcu_eeprom_info req = {
+ .addr = cpu_to_le32(round_down(offset, 16)),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_ACCESS, &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index)
+{
+ struct {
+ u8 ctrl_id;
+ u8 action;
+ u8 band;
+ u8 rsv[5];
+ } req = {
+ .ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
+ .action = index,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_THERMAL_CTRL, &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_get_rate_info(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
+{
+ struct {
+ __le32 cmd;
+ __le16 wlan_idx;
+ __le16 ru_idx;
+ __le16 direction;
+ __le16 dump_group;
+ } req = {
+ .cmd = cpu_to_le32(cmd),
+ .wlan_idx = cpu_to_le16(wlan_idx),
+ .dump_group = cpu_to_le16(1),
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RATE_CTRL, &req,
+ sizeof(req), false);
+}
+
+int mt7915_mcu_set_sku(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct mt7915_sku_val {
+ u8 format_id;
+ u8 limit_type;
+ u8 dbdc_idx;
+ s8 val[MT7915_SKU_RATE_NUM];
+ } __packed req = {
+ .format_id = 4,
+ .dbdc_idx = phy != &dev->phy,
+ };
+ int i;
+ s8 *delta;
+
+ delta = dev->rate_power[mphy->chandef.chan->band];
+ mphy->txpower_cur = hw->conf.power_level * 2 +
+ delta[MT7915_SKU_MAX_DELTA_IDX];
+
+ for (i = 0; i < MT7915_SKU_RATE_NUM; i++)
+ req.val[i] = hw->conf.power_level * 2 + delta[i];
+
+ return __mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sku {
+ u8 format_id;
+ u8 sku_enable;
+ u8 dbdc_idx;
+ u8 rsv;
+ } __packed req = {
+ .format_id = 0,
+ .dbdc_idx = phy != &dev->phy,
+ .sku_enable = enable,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band)
+{
+ struct {
+ u8 action;
+ u8 set;
+ u8 band;
+ u8 rsv;
+ } req = {
+ .action = action,
+ .set = set,
+ .band = band,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SER_TRIGGER,
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev)
+{
+#define MT_BF_TYPE_UPDATE 20
+ struct {
+ u8 action;
+ bool ebf;
+ bool ibf;
+ u8 rsv;
+ } __packed req = {
+ .action = MT_BF_TYPE_UPDATE,
+ .ebf = true,
+ .ibf = false,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev)
+{
+#define MT_BF_PROCESSING 4
+ struct {
+ u8 action;
+ u8 snd_mode;
+ u8 sta_num;
+ u8 rsv;
+ u8 wlan_idx[4];
+ __le32 snd_period; /* ms */
+ } __packed req = {
+ .action = true,
+ .snd_mode = MT_BF_PROCESSING,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION,
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+#define MT_SPR_ENABLE 1
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct {
+ u8 action;
+ u8 arg_num;
+ u8 band_idx;
+ u8 status;
+ u8 drop_tx_idx;
+ u8 sta_idx; /* 256 sta */
+ u8 rsv[2];
+ u32 val;
+ } __packed req = {
+ .action = MT_SPR_ENABLE,
+ .arg_num = 1,
+ .band_idx = mvif->band_idx,
+ .val = enable,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SPR,
+ &req, sizeof(req), true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
new file mode 100644
index 000000000000..c241dd7c4c36
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_MCU_H
+#define __MT7915_MCU_H
+
+struct mt7915_mcu_txd {
+ __le32 txd[8];
+
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query; /* FW don't care */
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 s2d_index;
+ u8 ext_cid_ack;
+
+ u32 reserved[5];
+} __packed __aligned(4);
+
+/* event table */
+enum {
+ MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
+ MCU_EVENT_FW_START = 0x01,
+ MCU_EVENT_GENERIC = 0x01,
+ MCU_EVENT_ACCESS_REG = 0x02,
+ MCU_EVENT_MT_PATCH_SEM = 0x04,
+ MCU_EVENT_CH_PRIVILEGE = 0x18,
+ MCU_EVENT_EXT = 0xed,
+ MCU_EVENT_RESTART_DL = 0xef,
+};
+
+/* ext event table */
+enum {
+ MCU_EXT_EVENT_PS_SYNC = 0x5,
+ MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
+ MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
+ MCU_EXT_EVENT_RDD_REPORT = 0x3a,
+ MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
+ MCU_EXT_EVENT_RATE_REPORT = 0x87,
+};
+
+struct mt7915_mcu_rxd {
+ __le32 rxd[6];
+
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ __le16 __rsv;
+
+ u8 ext_eid;
+ u8 __rsv1[2];
+ u8 s2d_index;
+};
+
+struct mt7915_mcu_rdd_report {
+ struct mt7915_mcu_rxd rxd;
+
+ u8 idx;
+ u8 long_detected;
+ u8 constant_prf_detected;
+ u8 staggered_prf_detected;
+ u8 radar_type_idx;
+ u8 periodic_pulse_num;
+ u8 long_pulse_num;
+ u8 hw_pulse_num;
+
+ u8 out_lpn;
+ u8 out_spn;
+ u8 out_crpn;
+ u8 out_crpw;
+ u8 out_crbn;
+ u8 out_stgpn;
+ u8 out_stgpw;
+
+ u8 rsv;
+
+ __le32 out_pri_const;
+ __le32 out_pri_stg[3];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 mdrdy_flag;
+ u8 rsv[3];
+ } long_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 mdrdy_flag;
+ u8 rsv[3];
+ } periodic_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 sc_pass;
+ u8 sw_reset;
+ u8 mdrdy_flag;
+ u8 tx_active;
+ } hw_pulse[32];
+} __packed;
+
+struct mt7915_mcu_eeprom_info {
+ __le32 addr;
+ __le32 valid;
+ u8 data[16];
+} __packed;
+
+struct mt7915_mcu_ra_info {
+ struct mt7915_mcu_rxd rxd;
+
+ __le32 event_id;
+ __le16 wlan_idx;
+ __le16 ru_idx;
+ __le16 direction;
+ __le16 dump_group;
+
+ __le32 suggest_rate;
+ __le32 min_rate; /* for dynamic sounding */
+ __le32 max_rate; /* for dynamic sounding */
+ __le32 init_rate_down_rate;
+
+ __le16 curr_rate;
+ __le16 init_rate_down_total;
+ __le16 init_rate_down_succ;
+ __le16 success;
+ __le16 attempts;
+
+ __le16 prev_rate;
+ __le16 prob_up_rate;
+ u8 no_rate_up_cnt;
+ u8 ppdu_cnt;
+ u8 gi;
+
+ u8 try_up_fail;
+ u8 try_up_total;
+ u8 suggest_wf;
+ u8 try_up_check;
+ u8 prob_up_period;
+ u8 prob_down_pending;
+} __packed;
+
+#define MT_RA_RATE_NSS GENMASK(8, 6)
+#define MT_RA_RATE_MCS GENMASK(3, 0)
+#define MT_RA_RATE_TX_MODE GENMASK(12, 9)
+#define MT_RA_RATE_DCM_EN BIT(4)
+#define MT_RA_RATE_BW GENMASK(14, 13)
+
+#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID 0xa0
+
+enum {
+ MCU_Q_QUERY,
+ MCU_Q_SET,
+ MCU_Q_RESERVED,
+ MCU_Q_NA
+};
+
+enum {
+ MCU_S2D_H2N,
+ MCU_S2D_C2N,
+ MCU_S2D_H2C,
+ MCU_S2D_H2CN
+};
+
+enum {
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
+ MCU_CMD_FW_START_REQ = 0x02,
+ MCU_CMD_INIT_ACCESS_REG = 0x3,
+ MCU_CMD_NIC_POWER_CTRL = 0x4,
+ MCU_CMD_PATCH_START_REQ = 0x05,
+ MCU_CMD_PATCH_FINISH_REQ = 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_EXT_CID = 0xED,
+ MCU_CMD_FW_SCATTER = 0xEE,
+ MCU_CMD_RESTART_DL_REQ = 0xEF,
+};
+
+enum {
+ MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
+ MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+ MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+ MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_CMD_TXBF_ACTION = 0x1e,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
+ MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
+ MCU_EXT_CMD_EDCA_UPDATE = 0x27,
+ MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
+ MCU_EXT_CMD_THERMAL_CTRL = 0x2c,
+ MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
+ MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+ MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
+ MCU_EXT_CMD_RX_HDR_TRANS = 0x47,
+ MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+ MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
+ MCU_EXT_CMD_SCS_CTRL = 0x82,
+ MCU_EXT_CMD_RATE_CTRL = 0x87,
+ MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
+ MCU_EXT_CMD_SET_RDD_TH = 0x9d,
+ MCU_EXT_CMD_SET_SPR = 0xa8,
+};
+
+enum {
+ PATCH_SEM_RELEASE,
+ PATCH_SEM_GET
+};
+
+enum {
+ PATCH_NOT_DL_SEM_FAIL,
+ PATCH_IS_DL,
+ PATCH_NOT_DL_SEM_SUCCESS,
+ PATCH_REL_SEM_SUCCESS
+};
+
+enum {
+ FW_STATE_INITIAL,
+ FW_STATE_FW_DOWNLOAD,
+ FW_STATE_NORMAL_OPERATION,
+ FW_STATE_NORMAL_TRX,
+ FW_STATE_WACPU_RDY = 7
+};
+
+enum {
+ EE_MODE_EFUSE,
+ EE_MODE_BUFFER,
+};
+
+enum {
+ EE_FORMAT_BIN,
+ EE_FORMAT_WHOLE,
+ EE_FORMAT_MULTIPLE,
+};
+
+#define STA_TYPE_STA BIT(0)
+#define STA_TYPE_AP BIT(1)
+#define STA_TYPE_ADHOC BIT(2)
+#define STA_TYPE_WDS BIT(4)
+#define STA_TYPE_BC BIT(5)
+
+#define NETWORK_INFRA BIT(16)
+#define NETWORK_P2P BIT(17)
+#define NETWORK_IBSS BIT(18)
+#define NETWORK_WDS BIT(21)
+
+#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
+#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
+#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
+#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
+#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
+#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
+#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
+
+#define CONN_STATE_DISCONNECT 0
+#define CONN_STATE_CONNECT 1
+#define CONN_STATE_PORT_SECURE 2
+
+enum {
+ DEV_INFO_ACTIVE,
+ DEV_INFO_MAX_NUM
+};
+
+enum {
+ SCS_SEND_DATA,
+ SCS_SET_MANUAL_PD_TH,
+ SCS_CONFIG,
+ SCS_ENABLE,
+ SCS_SHOW_INFO,
+ SCS_GET_GLO_ADDR,
+ SCS_GET_GLO_ADDR_EVENT,
+};
+
+enum {
+ CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
+ CMD_CBW_40MHZ = IEEE80211_STA_RX_BW_40,
+ CMD_CBW_80MHZ = IEEE80211_STA_RX_BW_80,
+ CMD_CBW_160MHZ = IEEE80211_STA_RX_BW_160,
+ CMD_CBW_10MHZ,
+ CMD_CBW_5MHZ,
+ CMD_CBW_8080MHZ,
+
+ CMD_HE_MCS_BW80 = 0,
+ CMD_HE_MCS_BW160,
+ CMD_HE_MCS_BW8080,
+ CMD_HE_MCS_BW_NUM
+};
+
+struct tlv {
+ __le16 tag;
+ __le16 len;
+} __packed;
+
+struct bss_info_omac {
+ __le16 tag;
+ __le16 len;
+ u8 hw_bss_idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 rsv0;
+ __le32 conn_type;
+ u32 rsv1;
+} __packed;
+
+struct bss_info_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 network_type;
+ u8 active;
+ u8 rsv0;
+ __le16 bcn_interval;
+ u8 bssid[ETH_ALEN];
+ u8 wmm_idx;
+ u8 dtim_period;
+ u8 bmc_wcid_lo;
+ u8 cipher;
+ u8 phy_mode;
+ u8 max_bssid; /* max BSSID. range: 1 ~ 8, 0: MBSSID disabled */
+ u8 non_tx_bssid;/* non-transmitted BSSID, 0: transmitted BSSID */
+ u8 bmc_wcid_hi; /* high Byte and version */
+ u8 rsv[2];
+} __packed;
+
+struct bss_info_rf_ch {
+ __le16 tag;
+ __le16 len;
+ u8 pri_ch;
+ u8 center_ch0;
+ u8 center_ch1;
+ u8 bw;
+ u8 he_ru26_block; /* 1: don't send HETB in RU26, 0: allow */
+ u8 he_all_disable; /* 1: disallow all HETB, 0: allow */
+ u8 rsv[2];
+} __packed;
+
+struct bss_info_ext_bss {
+ __le16 tag;
+ __le16 len;
+ __le32 mbss_tsf_offset; /* in unit of us */
+ u8 rsv[8];
+} __packed;
+
+struct bss_info_sync_mode {
+ __le16 tag;
+ __le16 len;
+ __le16 bcn_interval;
+ u8 enable;
+ u8 dtim_period;
+ u8 rsv[8];
+} __packed;
+
+struct bss_info_bmc_rate {
+ __le16 tag;
+ __le16 len;
+ __le16 bc_trans;
+ __le16 mc_trans;
+ u8 short_preamble;
+ u8 rsv[7];
+} __packed;
+
+struct bss_info_ra {
+ __le16 tag;
+ __le16 len;
+ u8 op_mode;
+ u8 adhoc_en;
+ u8 short_preamble;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 algo;
+ u8 force_sgi;
+ u8 force_gf;
+ u8 ht_mode;
+ u8 has_20_sta; /* Check if any sta support GF. */
+ u8 bss_width_trigger_events;
+ u8 vht_nss_cap;
+ u8 vht_bw_signal; /* not use */
+ u8 vht_force_sgi; /* not use */
+ u8 se_off;
+ u8 antenna_idx;
+ u8 train_up_rule;
+ u8 rsv[3];
+ unsigned short train_up_high_thres;
+ short train_up_rule_rssi;
+ unsigned short low_traffic_thres;
+ __le16 max_phyrate;
+ __le32 phy_cap;
+ __le32 interval;
+ __le32 fast_interval;
+} __packed;
+
+struct bss_info_he {
+ __le16 tag;
+ __le16 len;
+ u8 he_pe_duration;
+ u8 vht_op_info_present;
+ __le16 he_rts_thres;
+ __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
+ u8 rsv[6];
+} __packed;
+
+struct bss_info_bcn {
+ __le16 tag;
+ __le16 len;
+ u8 ver;
+ u8 enable;
+ __le16 sub_ntlv;
+} __packed __aligned(4);
+
+struct bss_info_bcn_csa {
+ __le16 tag;
+ __le16 len;
+ u8 cnt;
+ u8 rsv[3];
+} __packed __aligned(4);
+
+struct bss_info_bcn_bcc {
+ __le16 tag;
+ __le16 len;
+ u8 cnt;
+ u8 rsv[3];
+} __packed __aligned(4);
+
+struct bss_info_bcn_mbss {
+#define MAX_BEACON_NUM 32
+ __le16 tag;
+ __le16 len;
+ __le32 bitmap;
+ __le16 offset[MAX_BEACON_NUM];
+ u8 rsv[8];
+} __packed __aligned(4);
+
+struct bss_info_bcn_cont {
+ __le16 tag;
+ __le16 len;
+ __le16 tim_ofs;
+ __le16 csa_ofs;
+ __le16 bcc_ofs;
+ __le16 pkt_len;
+} __packed __aligned(4);
+
+enum {
+ BSS_INFO_BCN_CSA,
+ BSS_INFO_BCN_BCC,
+ BSS_INFO_BCN_MBSSID,
+ BSS_INFO_BCN_CONTENT,
+ BSS_INFO_BCN_MAX
+};
+
+enum {
+ BSS_INFO_OMAC,
+ BSS_INFO_BASIC,
+ BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
+ BSS_INFO_PM, /* sta only */
+ BSS_INFO_UAPSD, /* sta only */
+ BSS_INFO_ROAM_DETECT, /* obsoleted */
+ BSS_INFO_LQ_RM, /* obsoleted */
+ BSS_INFO_EXT_BSS,
+ BSS_INFO_BMC_RATE, /* for bmc rate control in CR4 */
+ BSS_INFO_SYNC_MODE,
+ BSS_INFO_RA,
+ BSS_INFO_HW_AMSDU,
+ BSS_INFO_BSS_COLOR,
+ BSS_INFO_HE_BASIC,
+ BSS_INFO_PROTECT_INFO,
+ BSS_INFO_OFFLOAD,
+ BSS_INFO_11V_MBSSID,
+ BSS_INFO_MAX_NUM
+};
+
+enum {
+ WTBL_RESET_AND_SET = 1,
+ WTBL_SET,
+ WTBL_QUERY,
+ WTBL_RESET_ALL
+};
+
+struct wtbl_req_hdr {
+ u8 wlan_idx_lo;
+ u8 operation;
+ __le16 tlv_num;
+ u8 wlan_idx_hi;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_generic {
+ __le16 tag;
+ __le16 len;
+ u8 peer_addr[ETH_ALEN];
+ u8 muar_idx;
+ u8 skip_tx;
+ u8 cf_ack;
+ u8 qos;
+ u8 mesh;
+ u8 adm;
+ __le16 partial_aid;
+ u8 baf_en;
+ u8 aad_om;
+} __packed;
+
+struct wtbl_rx {
+ __le16 tag;
+ __le16 len;
+ u8 rcid;
+ u8 rca1;
+ u8 rca2;
+ u8 rv;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_ht {
+ __le16 tag;
+ __le16 len;
+ u8 ht;
+ u8 ldpc;
+ u8 af;
+ u8 mm;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_vht {
+ __le16 tag;
+ __le16 len;
+ u8 ldpc;
+ u8 dyn_bw;
+ u8 vht;
+ u8 txop_ps;
+ u8 rsv[4];
+} __packed;
+
+enum {
+ MT_BA_TYPE_INVALID,
+ MT_BA_TYPE_ORIGINATOR,
+ MT_BA_TYPE_RECIPIENT
+};
+
+enum {
+ RST_BA_MAC_TID_MATCH,
+ RST_BA_MAC_MATCH,
+ RST_BA_NO_MATCH
+};
+
+struct wtbl_ba {
+ __le16 tag;
+ __le16 len;
+ /* common */
+ u8 tid;
+ u8 ba_type;
+ u8 rsv0[2];
+ /* originator only */
+ __le16 sn;
+ u8 ba_en;
+ u8 ba_winsize_idx;
+ __le16 ba_winsize;
+ /* recipient only */
+ u8 peer_addr[ETH_ALEN];
+ u8 rst_ba_tid;
+ u8 rst_ba_sel;
+ u8 rst_ba_sb;
+ u8 band_idx;
+ u8 rsv1[4];
+} __packed;
+
+struct wtbl_smps {
+ __le16 tag;
+ __le16 len;
+ u8 smps;
+ u8 rsv[3];
+} __packed;
+
+enum {
+ WTBL_GENERIC,
+ WTBL_RX,
+ WTBL_HT,
+ WTBL_VHT,
+ WTBL_PEER_PS, /* not used */
+ WTBL_TX_PS,
+ WTBL_HDR_TRANS,
+ WTBL_SEC_KEY,
+ WTBL_BA,
+ WTBL_RDG, /* obsoleted */
+ WTBL_PROTECT, /* not used */
+ WTBL_CLEAR, /* not used */
+ WTBL_BF,
+ WTBL_SMPS,
+ WTBL_RAW_DATA, /* debug only */
+ WTBL_PN,
+ WTBL_SPE,
+ WTBL_MAX_NUM
+};
+
+struct sta_ntlv_hdr {
+ u8 rsv[2];
+ __le16 tlv_num;
+} __packed;
+
+struct sta_req_hdr {
+ u8 bss_idx;
+ u8 wlan_idx_lo;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 muar_idx;
+ u8 wlan_idx_hi;
+ u8 rsv;
+} __packed;
+
+struct sta_rec_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 qos;
+ __le16 aid;
+ u8 peer_addr[ETH_ALEN];
+ __le16 extra_info;
+} __packed;
+
+struct sta_rec_ht {
+ __le16 tag;
+ __le16 len;
+ __le16 ht_cap;
+ u16 rsv;
+} __packed;
+
+struct sta_rec_vht {
+ __le16 tag;
+ __le16 len;
+ __le32 vht_cap;
+ __le16 vht_rx_mcs_map;
+ __le16 vht_tx_mcs_map;
+ u8 rts_bw_sig;
+ u8 rsv[3];
+} __packed;
+
+struct sta_rec_muru {
+ __le16 tag;
+ __le16 len;
+
+ struct {
+ bool ofdma_dl_en;
+ bool ofdma_ul_en;
+ bool mimo_dl_en;
+ bool mimo_ul_en;
+ bool rsv[4];
+ } cfg;
+
+ struct {
+ u8 punc_pream_rx;
+ bool he_20m_in_40m_2g;
+ bool he_20m_in_160m;
+ bool he_80m_in_160m;
+ bool lt16_sigb;
+ bool rx_su_comp_sigb;
+ bool rx_su_non_comp_sigb;
+ bool rsv;
+ } ofdma_dl;
+
+ struct {
+ u8 t_frame_dur;
+ u8 mu_cascading;
+ u8 uo_ra;
+ u8 he_2x996_tone;
+ u8 rx_t_frame_11ac;
+ u8 rsv[3];
+ } ofdma_ul;
+
+ struct {
+ bool vht_mu_bfee;
+ bool partial_bw_dl_mimo;
+ u8 rsv[2];
+ } mimo_dl;
+
+ struct {
+ bool full_ul_mimo;
+ bool partial_ul_mimo;
+ u8 rsv[2];
+ } mimo_ul;
+} __packed;
+
+struct sta_rec_he {
+ __le16 tag;
+ __le16 len;
+
+ __le32 he_cap;
+
+ u8 t_frame_dur;
+ u8 max_ampdu_exp;
+ u8 bw_set;
+ u8 device_class;
+ u8 dcm_tx_mode;
+ u8 dcm_tx_max_nss;
+ u8 dcm_rx_mode;
+ u8 dcm_rx_max_nss;
+ u8 dcm_max_ru;
+ u8 punc_pream_rx;
+ u8 pkt_ext;
+ u8 rsv1;
+
+ __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
+
+ u8 rsv2[2];
+} __packed;
+
+struct sta_rec_ba {
+ __le16 tag;
+ __le16 len;
+ u8 tid;
+ u8 ba_type;
+ u8 amsdu;
+ u8 ba_en;
+ __le16 ssn;
+ __le16 winsize;
+} __packed;
+
+struct sec_key {
+ u8 cipher_id;
+ u8 cipher_len;
+ u8 key_id;
+ u8 key_len;
+ u8 key[32];
+} __packed;
+
+struct sta_rec_sec {
+ __le16 tag;
+ __le16 len;
+ u8 add;
+ u8 n_cipher;
+ u8 rsv[2];
+
+ struct sec_key key[2];
+} __packed;
+
+struct ra_phy {
+ u8 type;
+ u8 flag;
+ u8 stbc;
+ u8 sgi;
+ u8 bw;
+ u8 ldpc;
+ u8 mcs;
+ u8 nss;
+ u8 he_ltf;
+};
+
+struct sta_rec_ra {
+ __le16 tag;
+ __le16 len;
+
+ u8 valid;
+ u8 auto_rate;
+ u8 phy_mode;
+ u8 channel;
+ u8 bw;
+ u8 disable_cck;
+ u8 ht_mcs32;
+ u8 ht_gf;
+ u8 ht_mcs[4];
+ u8 mmps_mode;
+ u8 gband_256;
+ u8 af;
+ u8 auth_wapi_mode;
+ u8 rate_len;
+
+ u8 supp_mode;
+ u8 supp_cck_rate;
+ u8 supp_ofdm_rate;
+ __le32 supp_ht_mcs;
+ __le16 supp_vht_mcs[4];
+
+ u8 op_mode;
+ u8 op_vht_chan_width;
+ u8 op_vht_rx_nss;
+ u8 op_vht_rx_nss_type;
+
+ __le32 sta_status;
+
+ struct ra_phy phy;
+} __packed;
+
+struct sta_rec_ra_fixed {
+ __le16 tag;
+ __le16 len;
+
+ __le32 field;
+ u8 op_mode;
+ u8 op_vht_chan_width;
+ u8 op_vht_rx_nss;
+ u8 op_vht_rx_nss_type;
+
+ struct ra_phy phy;
+
+ u8 spe_en;
+ u8 short_preamble;
+ u8 is_5g;
+ u8 mmps_mode;
+} __packed;
+
+#define RATE_PARAM_FIXED 3
+#define RATE_PARAM_AUTO 20
+#define RATE_CFG_MCS GENMASK(3, 0)
+#define RATE_CFG_NSS GENMASK(7, 4)
+#define RATE_CFG_GI GENMASK(11, 8)
+#define RATE_CFG_BW GENMASK(15, 12)
+#define RATE_CFG_STBC GENMASK(19, 16)
+#define RATE_CFG_LDPC GENMASK(23, 20)
+#define RATE_CFG_PHY_TYPE GENMASK(27, 24)
+
+struct sta_rec_bf {
+ __le16 tag;
+ __le16 len;
+
+ __le16 pfmu; /* 0xffff: no access right for PFMU */
+ bool su_mu; /* 0: SU, 1: MU */
+ u8 bf_cap; /* 0: iBF, 1: eBF */
+ u8 sounding_phy; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT */
+ u8 ndpa_rate;
+ u8 ndp_rate;
+ u8 rept_poll_rate;
+ u8 tx_mode; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT ... */
+ u8 nc;
+ u8 nr;
+ u8 bw; /* 0: 20M, 1: 40M, 2: 80M, 3: 160M */
+
+ u8 mem_total;
+ u8 mem_20m;
+ struct {
+ u8 row;
+ u8 col: 6, row_msb: 2;
+ } mem[4];
+
+ __le16 smart_ant;
+ u8 se_idx;
+ u8 auto_sounding; /* b7: low traffic indicator
+ * b6: Stop sounding for this entry
+ * b5 ~ b0: postpone sounding
+ */
+ u8 ibf_timeout;
+ u8 ibf_dbw;
+ u8 ibf_ncol;
+ u8 ibf_nrow;
+ u8 nr_bw160;
+ u8 nc_bw160;
+ u8 ru_start_idx;
+ u8 ru_end_idx;
+
+ bool trigger_su;
+ bool trigger_mu;
+ bool ng16_su;
+ bool ng16_mu;
+ bool codebook42_su;
+ bool codebook75_mu;
+
+ u8 he_ltf;
+ u8 rsv[2];
+} __packed;
+
+struct sta_rec_bfee {
+ __le16 tag;
+ __le16 len;
+ bool fb_identity_matrix; /* 1: feedback identity matrix */
+ bool ignore_feedback; /* 1: ignore */
+ u8 rsv[2];
+} __packed;
+
+enum {
+ STA_REC_BASIC,
+ STA_REC_RA,
+ STA_REC_RA_CMM_INFO,
+ STA_REC_RA_UPDATE,
+ STA_REC_BF,
+ STA_REC_AMSDU,
+ STA_REC_BA,
+ STA_REC_RED, /* not used */
+ STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
+ STA_REC_HT,
+ STA_REC_VHT,
+ STA_REC_APPS,
+ STA_REC_KEY,
+ STA_REC_WTBL,
+ STA_REC_HE,
+ STA_REC_HW_AMSDU,
+ STA_REC_WTBL_AADOM,
+ STA_REC_KEY_V2,
+ STA_REC_MURU,
+ STA_REC_MUEDCA,
+ STA_REC_BFEE,
+ STA_REC_MAX_NUM
+};
+
+enum mt7915_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_WEP128,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CCMP_256,
+ MT_CIPHER_GCMP,
+ MT_CIPHER_GCMP_256,
+ MT_CIPHER_WAPI,
+ MT_CIPHER_BIP_CMAC_128,
+};
+
+enum {
+ CH_SWITCH_NORMAL = 0,
+ CH_SWITCH_SCAN = 3,
+ CH_SWITCH_MCC = 4,
+ CH_SWITCH_DFS = 5,
+ CH_SWITCH_BACKGROUND_SCAN_START = 6,
+ CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
+ CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
+ CH_SWITCH_SCAN_BYPASS_DPD = 9
+};
+
+enum {
+ THERMAL_SENSOR_TEMP_QUERY,
+ THERMAL_SENSOR_MANUAL_CTRL,
+ THERMAL_SENSOR_INFO_QUERY,
+ THERMAL_SENSOR_TASK_CTRL,
+};
+
+enum {
+ MT_EBF = BIT(0), /* explicit beamforming */
+ MT_IBF = BIT(1) /* implicit beamforming */
+};
+
+#define MT7915_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_generic) + \
+ sizeof(struct wtbl_rx) + \
+ sizeof(struct wtbl_ht) + \
+ sizeof(struct wtbl_vht) + \
+ sizeof(struct wtbl_ba) + \
+ sizeof(struct wtbl_smps))
+
+#define MT7915_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_he) + \
+ sizeof(struct sta_rec_ba) + \
+ sizeof(struct sta_rec_vht) + \
+ sizeof(struct tlv) + \
+ sizeof(struct sta_rec_muru) + \
+ MT7915_WTBL_UPDATE_MAX_SIZE)
+
+#define MT7915_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_ba))
+
+#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct bss_info_omac) + \
+ sizeof(struct bss_info_basic) +\
+ sizeof(struct bss_info_rf_ch) +\
+ sizeof(struct bss_info_ra) + \
+ sizeof(struct bss_info_he) + \
+ sizeof(struct bss_info_bmc_rate) +\
+ sizeof(struct bss_info_ext_bss) +\
+ sizeof(struct bss_info_sync_mode))
+
+#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct bss_info_bcn_csa) + \
+ sizeof(struct bss_info_bcn_bcc) + \
+ sizeof(struct bss_info_bcn_mbss) + \
+ sizeof(struct bss_info_bcn_cont))
+
+#define PHY_MODE_A BIT(0)
+#define PHY_MODE_B BIT(1)
+#define PHY_MODE_G BIT(2)
+#define PHY_MODE_GN BIT(3)
+#define PHY_MODE_AN BIT(4)
+#define PHY_MODE_AC BIT(5)
+#define PHY_MODE_AX_24G BIT(6)
+#define PHY_MODE_AX_5G BIT(7)
+#define PHY_MODE_AX_6G BIT(8)
+
+#define MODE_CCK BIT(0)
+#define MODE_OFDM BIT(1)
+#define MODE_HT BIT(2)
+#define MODE_VHT BIT(3)
+#define MODE_HE BIT(4)
+
+#define STA_CAP_WMM BIT(0)
+#define STA_CAP_SGI_20 BIT(4)
+#define STA_CAP_SGI_40 BIT(5)
+#define STA_CAP_TX_STBC BIT(6)
+#define STA_CAP_RX_STBC BIT(7)
+#define STA_CAP_VHT_SGI_80 BIT(16)
+#define STA_CAP_VHT_SGI_160 BIT(17)
+#define STA_CAP_VHT_TX_STBC BIT(18)
+#define STA_CAP_VHT_RX_STBC BIT(19)
+#define STA_CAP_VHT_LDPC BIT(23)
+#define STA_CAP_LDPC BIT(24)
+#define STA_CAP_HT BIT(26)
+#define STA_CAP_VHT BIT(27)
+#define STA_CAP_HE BIT(28)
+
+/* HE MAC */
+#define STA_REC_HE_CAP_HTC BIT(0)
+#define STA_REC_HE_CAP_BQR BIT(1)
+#define STA_REC_HE_CAP_BSR BIT(2)
+#define STA_REC_HE_CAP_OM BIT(3)
+#define STA_REC_HE_CAP_AMSDU_IN_AMPDU BIT(4)
+/* HE PHY */
+#define STA_REC_HE_CAP_DUAL_BAND BIT(5)
+#define STA_REC_HE_CAP_LDPC BIT(6)
+#define STA_REC_HE_CAP_TRIG_CQI_FK BIT(7)
+#define STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE BIT(8)
+/* STBC */
+#define STA_REC_HE_CAP_LE_EQ_80M_TX_STBC BIT(9)
+#define STA_REC_HE_CAP_LE_EQ_80M_RX_STBC BIT(10)
+#define STA_REC_HE_CAP_GT_80M_TX_STBC BIT(11)
+#define STA_REC_HE_CAP_GT_80M_RX_STBC BIT(12)
+/* GI */
+#define STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI BIT(13)
+#define STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI BIT(14)
+#define STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI BIT(15)
+#define STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI BIT(16)
+#define STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI BIT(17)
+/* 242 TONE */
+#define STA_REC_HE_CAP_BW20_RU242_SUPPORT BIT(18)
+#define STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242 BIT(19)
+#define STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242 BIT(20)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
new file mode 100644
index 000000000000..85d74ecd0351
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -0,0 +1,469 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_H
+#define __MT7915_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT7915_MAX_INTERFACES 4
+#define MT7915_MAX_WMM_SETS 4
+#define MT7915_WTBL_SIZE 288
+#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1)
+#define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \
+ MT7915_MAX_INTERFACES)
+
+#define MT7915_WATCHDOG_TIME (HZ / 10)
+#define MT7915_RESET_TIMEOUT (30 * HZ)
+
+#define MT7915_TX_RING_SIZE 2048
+#define MT7915_TX_MCU_RING_SIZE 256
+#define MT7915_TX_FWDL_RING_SIZE 128
+
+#define MT7915_RX_RING_SIZE 1536
+#define MT7915_RX_MCU_RING_SIZE 512
+
+#define MT7915_FIRMWARE_WA "mediatek/mt7915_wa.bin"
+#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin"
+#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
+
+#define MT7915_EEPROM_SIZE 3584
+#define MT7915_TOKEN_SIZE 8192
+
+#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+#define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */
+#define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */
+
+#define MT7915_SKU_RATE_NUM 161
+#define MT7915_SKU_MAX_DELTA_IDX MT7915_SKU_RATE_NUM
+#define MT7915_SKU_TABLE_SIZE (MT7915_SKU_RATE_NUM + 1)
+
+struct mt7915_vif;
+struct mt7915_sta;
+struct mt7915_dfs_pulse;
+struct mt7915_dfs_pattern;
+
+enum mt7915_txq_id {
+ MT7915_TXQ_FWDL = 16,
+ MT7915_TXQ_MCU_WM,
+ MT7915_TXQ_BAND0,
+ MT7915_TXQ_BAND1,
+ MT7915_TXQ_MCU_WA,
+};
+
+enum mt7915_rxq_id {
+ MT7915_RXQ_BAND0 = 0,
+ MT7915_RXQ_BAND1,
+ MT7915_RXQ_MCU_WM = 0,
+ MT7915_RXQ_MCU_WA,
+};
+
+enum mt7915_ampdu_state {
+ MT7915_AGGR_STOP,
+ MT7915_AGGR_PROGRESS,
+ MT7915_AGGR_START,
+ MT7915_AGGR_OPERATIONAL
+};
+
+struct mt7915_sta_stats {
+ struct rate_info prob_rate;
+ struct rate_info tx_rate;
+
+ unsigned long per;
+ unsigned long changed;
+ unsigned long jiffies;
+};
+
+struct mt7915_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7915_vif *vif;
+
+ struct list_head poll_list;
+ u32 airtime_ac[8];
+
+ struct mt7915_sta_stats stats;
+ struct work_struct stats_work;
+
+ spinlock_t ampdu_lock;
+ enum mt7915_ampdu_state ampdu_state[IEEE80211_NUM_TIDS];
+};
+
+struct mt7915_vif {
+ u16 idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 wmm_idx;
+
+ struct {
+ u16 cw_min;
+ u16 cw_max;
+ u16 txop;
+ u8 aifs;
+ } wmm[IEEE80211_NUM_ACS];
+
+ struct mt7915_sta sta;
+ struct mt7915_dev *dev;
+};
+
+struct mib_stats {
+ u16 ack_fail_cnt;
+ u16 fcs_err_cnt;
+ u16 rts_cnt;
+ u16 rts_retries_cnt;
+ u16 ba_miss_cnt;
+};
+
+struct mt7915_phy {
+ struct mt76_phy *mt76;
+ struct mt7915_dev *dev;
+
+ struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
+
+ u32 rxfilter;
+ u32 vif_mask;
+ u32 omac_mask;
+
+ u16 noise;
+ u16 chainmask;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ u8 rdd_state;
+ int dfs_state;
+
+ __le32 rx_ampdu_ts;
+ u32 ampdu_ref;
+
+ struct mib_stats mib;
+
+ struct delayed_work mac_work;
+ u8 mac_work_count;
+};
+
+struct mt7915_dev {
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ struct mt7915_phy phy;
+
+ u16 chainmask;
+
+ struct work_struct init_work;
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ u32 reset_state;
+
+ struct list_head sta_poll_list;
+ spinlock_t sta_poll_lock;
+
+ u32 hw_pattern;
+
+ spinlock_t token_lock;
+ struct idr token;
+
+ s8 **rate_power; /* TODO: use mt76_rate_power */
+
+ bool fw_debug;
+};
+
+enum {
+ HW_BSSID_0 = 0x0,
+ HW_BSSID_1,
+ HW_BSSID_2,
+ HW_BSSID_3,
+ HW_BSSID_MAX,
+ EXT_BSSID_START = 0x10,
+ EXT_BSSID_1,
+ EXT_BSSID_2,
+ EXT_BSSID_3,
+ EXT_BSSID_4,
+ EXT_BSSID_5,
+ EXT_BSSID_6,
+ EXT_BSSID_7,
+ EXT_BSSID_8,
+ EXT_BSSID_9,
+ EXT_BSSID_10,
+ EXT_BSSID_11,
+ EXT_BSSID_12,
+ EXT_BSSID_13,
+ EXT_BSSID_14,
+ EXT_BSSID_15,
+ EXT_BSSID_END
+};
+
+enum {
+ MT_RX_SEL0,
+ MT_RX_SEL1,
+};
+
+enum mt7915_rdd_cmd {
+ RDD_STOP,
+ RDD_START,
+ RDD_DET_MODE,
+ RDD_RADAR_EMULATE,
+ RDD_START_TXQ = 20,
+ RDD_CAC_START = 50,
+ RDD_CAC_END,
+ RDD_NORMAL_START,
+ RDD_DISABLE_DFS_CAL,
+ RDD_PULSE_DBG,
+ RDD_READ_PULSE,
+ RDD_RESUME_BF,
+ RDD_IRQ_OFF,
+};
+
+enum {
+ RATE_CTRL_RU_INFO,
+ RATE_CTRL_FIXED_RATE_INFO,
+ RATE_CTRL_DUMP_INFO,
+ RATE_CTRL_MU_INFO,
+};
+
+static inline struct mt7915_phy *
+mt7915_hw_phy(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return phy->priv;
+}
+
+static inline struct mt7915_dev *
+mt7915_hw_dev(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return container_of(phy->dev, struct mt7915_dev, mt76);
+}
+
+static inline struct mt7915_phy *
+mt7915_ext_phy(struct mt7915_dev *dev)
+{
+ struct mt76_phy *phy = dev->mt76.phy2;
+
+ if (!phy)
+ return NULL;
+
+ return phy->priv;
+}
+
+static inline void
+mt7915_set_aggr_state(struct mt7915_sta *msta, u8 tid,
+ enum mt7915_ampdu_state state)
+{
+ spin_lock_bh(&msta->ampdu_lock);
+ msta->ampdu_state[tid] = state;
+ spin_unlock_bh(&msta->ampdu_lock);
+}
+
+extern const struct ieee80211_ops mt7915_ops;
+extern struct pci_driver mt7915_pci_driver;
+
+u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
+
+int mt7915_register_device(struct mt7915_dev *dev);
+void mt7915_unregister_device(struct mt7915_dev *dev);
+int mt7915_register_ext_phy(struct mt7915_dev *dev);
+void mt7915_unregister_ext_phy(struct mt7915_dev *dev);
+int mt7915_eeprom_init(struct mt7915_dev *dev);
+u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset);
+int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx);
+void mt7915_eeprom_init_sku(struct mt7915_dev *dev);
+int mt7915_dma_init(struct mt7915_dev *dev);
+void mt7915_dma_prefetch(struct mt7915_dev *dev);
+void mt7915_dma_cleanup(struct mt7915_dev *dev);
+int mt7915_mcu_init(struct mt7915_dev *dev);
+int mt7915_mcu_add_dev_info(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
+int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif, int enable);
+int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct mt7915_sta *msta, struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd);
+int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int enable);
+int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd);
+int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif);
+int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
+ struct ieee80211_sta *sta, u32 rate);
+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
+int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
+ bool hdr_trans);
+int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable);
+int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
+int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
+int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
+int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
+int mt7915_mcu_set_sku(struct mt7915_phy *phy);
+int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev);
+int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev);
+int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
+int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
+ const struct mt7915_dfs_pulse *pulse);
+int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
+ const struct mt7915_dfs_pattern *pattern);
+int mt7915_mcu_get_rate_info(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx);
+int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index);
+int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd,
+ u8 index, u8 rx_sel, u8 val);
+int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl);
+int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
+void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
+void mt7915_mcu_exit(struct mt7915_dev *dev);
+
+static inline bool is_mt7915(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7915;
+}
+
+static inline void mt7915_irq_enable(struct mt7915_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+}
+
+static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+static inline u32
+mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+
+ mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
+ /* use read to push write */
+ mt76_rr(dev, MT_HIF_REMAP_L1);
+
+ return MT_HIF_REMAP_BASE_L1 + offset;
+}
+
+static inline u32
+mt7915_l1_rr(struct mt7915_dev *dev, u32 addr)
+{
+ return mt76_rr(dev, mt7915_reg_map_l1(dev, addr));
+}
+
+static inline void
+mt7915_l1_wr(struct mt7915_dev *dev, u32 addr, u32 val)
+{
+ mt76_wr(dev, mt7915_reg_map_l1(dev, addr), val);
+}
+
+static inline u32
+mt7915_l1_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val)
+{
+ val |= mt7915_l1_rr(dev, addr) & ~mask;
+ mt7915_l1_wr(dev, addr, val);
+
+ return val;
+}
+
+#define mt7915_l1_set(dev, addr, val) mt7915_l1_rmw(dev, addr, 0, val)
+#define mt7915_l1_clear(dev, addr, val) mt7915_l1_rmw(dev, addr, val, 0)
+
+static inline u32
+mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+ mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base);
+ /* use read to push write */
+ mt76_rr(dev, MT_HIF_REMAP_L2);
+
+ return MT_HIF_REMAP_BASE_L2 + offset;
+}
+
+static inline u32
+mt7915_l2_rr(struct mt7915_dev *dev, u32 addr)
+{
+ return mt76_rr(dev, mt7915_reg_map_l2(dev, addr));
+}
+
+static inline void
+mt7915_l2_wr(struct mt7915_dev *dev, u32 addr, u32 val)
+{
+ mt76_wr(dev, mt7915_reg_map_l2(dev, addr), val);
+}
+
+static inline u32
+mt7915_l2_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val)
+{
+ val |= mt7915_l2_rr(dev, addr) & ~mask;
+ mt7915_l2_wr(dev, addr, val);
+
+ return val;
+}
+
+#define mt7915_l2_set(dev, addr, val) mt7915_l2_rmw(dev, addr, 0, val)
+#define mt7915_l2_clear(dev, addr, val) mt7915_l2_rmw(dev, addr, val, 0)
+
+bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
+void mt7915_mac_reset_counters(struct mt7915_phy *phy);
+void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
+void mt7915_mac_sta_poll(struct mt7915_dev *dev);
+void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, bool beacon);
+void mt7915_mac_set_timing(struct mt7915_phy *phy);
+int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb);
+void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb);
+int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7915_mac_work(struct work_struct *work);
+void mt7915_mac_reset_work(struct work_struct *work);
+void mt7915_mac_sta_stats_work(struct work_struct *work);
+int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
+void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+void mt7915_stats_work(struct work_struct *work);
+void mt7915_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *txwi);
+int mt76_dfs_start_rdd(struct mt7915_dev *dev, bool force);
+int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy);
+void mt7915_set_stream_he_caps(struct mt7915_phy *phy);
+void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
+void mt7915_update_channel(struct mt76_dev *mdev);
+int mt7915_init_debugfs(struct mt7915_dev *dev);
+#ifdef CONFIG_MAC80211_DEBUGFS
+void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
new file mode 100644
index 000000000000..7937c6965f59
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7915.h"
+#include "mac.h"
+#include "../trace.h"
+
+static const struct pci_device_id mt7915_pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7915) },
+ { },
+};
+
+static void
+mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+/* TODO: support 2/4/6/8 MSI-X vectors */
+static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7915_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt7915_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ napi_schedule(&dev->mt76.tx_napi);
+ }
+
+ if (intr & MT_INT_RX_DONE_DATA) {
+ mt7915_irq_disable(dev, MT_INT_RX_DONE_DATA);
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE_WM) {
+ mt7915_irq_disable(dev, MT_INT_RX_DONE_WM);
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ if (intr & MT_INT_RX_DONE_WA) {
+ mt7915_irq_disable(dev, MT_INT_RX_DONE_WA);
+ napi_schedule(&dev->mt76.napi[2]);
+ }
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ mt76_wr(dev, MT_MCU_CMD, val);
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int
+mt7915_alloc_device(struct pci_dev *pdev, struct mt7915_dev *dev)
+{
+#define NUM_BANDS 2
+ int i;
+ s8 **sku;
+
+ sku = devm_kzalloc(&pdev->dev, NUM_BANDS * sizeof(*sku), GFP_KERNEL);
+ if (!sku)
+ return -ENOMEM;
+
+ for (i = 0; i < NUM_BANDS; i++) {
+ sku[i] = devm_kzalloc(&pdev->dev, MT7915_SKU_TABLE_SIZE *
+ sizeof(**sku), GFP_KERNEL);
+ if (!sku[i])
+ return -ENOMEM;
+ }
+ dev->rate_power = sku;
+
+ return 0;
+}
+
+static int mt7915_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
+ .drv_flags = MT_DRV_TXWI_NO_FREE,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7915_tx_prepare_skb,
+ .tx_complete_skb = mt7915_tx_complete_skb,
+ .rx_skb = mt7915_queue_rx_skb,
+ .rx_poll_complete = mt7915_rx_poll_complete,
+ .sta_ps = mt7915_sta_ps,
+ .sta_add = mt7915_mac_sta_add,
+ .sta_remove = mt7915_mac_sta_remove,
+ .update_survey = mt7915_update_channel,
+ };
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7915_ops,
+ &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+ ret = mt7915_alloc_device(pdev, dev);
+ if (ret)
+ return ret;
+
+ mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+ mdev->rev = (mt7915_l1_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt7915_l1_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ /* master switch of PCIe tnterrupt enable */
+ mt7915_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt7915_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
+
+static void mt7915_pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_unregister_device(dev);
+}
+
+struct pci_driver mt7915_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7915_pci_device_table,
+ .probe = mt7915_pci_probe,
+ .remove = mt7915_pci_remove,
+};
+
+module_pci_driver(mt7915_pci_driver);
+
+MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table);
+MODULE_FIRMWARE(MT7915_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7915_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7915_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
new file mode 100644
index 000000000000..c121715f8bff
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_REGS_H
+#define __MT7915_REGS_H
+
+/* MCU WFDMA1 */
+#define MT_MCU_WFDMA1_BASE 0x3000
+#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
+
+#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108)
+#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
+#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
+#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
+#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
+
+#define MT_PLE_BASE 0x8000
+#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
+
+#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
+#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
+#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
+#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
+ ((n) << 2))
+#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
+
+#define MT_MDP_BASE 0xf000
+#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
+
+#define MT_MDP_DCR0 MT_MDP(0x000)
+#define MT_MDP_DCR0_DAMSDU_EN BIT(15)
+
+#define MT_MDP_DCR1 MT_MDP(0x004)
+#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
+
+#define MT_MDP_BNRCFR0(_band) MT_MDP(0x070 + ((_band) << 8))
+#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
+#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6)
+#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8)
+
+#define MT_MDP_BNRCFR1(_band) MT_MDP(0x074 + ((_band) << 8))
+#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22)
+#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27)
+#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29)
+#define MT_MDP_TO_HIF 0
+#define MT_MDP_TO_WM 1
+
+/* TMAC: band 0(0x21000), band 1(0xa1000) */
+#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000)
+#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
+
+#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090)
+#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
+#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
+#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4)
+#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
+#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
+#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
+
+/* DMA Band 0 */
+#define MT_WF_DMA_BASE 0x21e00
+#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
+
+#define MT_DMA_DCR0 MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
+#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
+
+/* ETBF: band 0(0x24000), band 1(0xa4000) */
+#define MT_WF_ETBF_BASE(_band) ((_band) ? 0xa4000 : 0x24000)
+#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
+
+#define MT_ETBF_TX_NDP_BFRP(_band) MT_WF_ETBF(_band, 0x040)
+#define MT_ETBF_TX_FB_CPL GENMASK(31, 16)
+#define MT_ETBF_TX_FB_TRI GENMASK(15, 0)
+
+#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x0f0)
+#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16)
+#define MT_ETBF_TX_EBF_CNT GENMASK(15, 0)
+
+#define MT_ETBF_RX_FB_CNT(_band) MT_WF_ETBF(_band, 0x0f8)
+#define MT_ETBF_RX_FB_ALL GENMASK(31, 24)
+#define MT_ETBF_RX_FB_HE GENMASK(23, 16)
+#define MT_ETBF_RX_FB_VHT GENMASK(15, 8)
+#define MT_ETBF_RX_FB_HT GENMASK(7, 0)
+
+/* LPON: band 0(0x24200), band 1(0xa4200) */
+#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200)
+#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
+
+#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
+#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084)
+
+#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4)
+#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
+#define MT_LPON_TCR_SW_WRITE BIT(0)
+
+/* MIB: band 0(0x24800), band 1(0xa4800) */
+#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
+#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
+
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
+#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c)
+#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048)
+#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098)
+#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c)
+#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
+
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
+#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
+#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
+#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
+#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
+
+#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4))
+#define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0)
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
+#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
+#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
+
+#define MT_WTBLON_TOP_BASE 0x34000
+#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
+
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
+#define MT_WTBL_UPDATE_BUSY BIT(31)
+
+#define MT_WTBL_BASE 0x38000
+#define MT_WTBL_LMAC_ID GENMASK(14, 8)
+#define MT_WTBL_LMAC_DW GENMASK(7, 2)
+#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
+ FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+
+/* AGG: band 0(0x20800), band 1(0xa0800) */
+#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800)
+#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
+
+#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084)
+#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
+#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
+
+/* ARB: band 0(0x20c00), band 1(0xa0c00) */
+#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00)
+#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
+
+#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
+#define MT_ARB_SCR_TX_DISABLE BIT(8)
+#define MT_ARB_SCR_RX_DISABLE BIT(9)
+
+/* RMAC: band 0(0x21400), band 1(0xa1400) */
+#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400)
+#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
+
+#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_VERSION BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_WF_RFCR1(_band) MT_WF_RMAC(_band, 0x004)
+#define MT_WF_RFCR1_DROP_ACK BIT(4)
+#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
+#define MT_WF_RFCR1_DROP_BA BIT(6)
+#define MT_WF_RFCR1_DROP_CFEND BIT(7)
+#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+
+#define MT_WF_RMAC_MIB_TIME0(_band) MT_WF_RMAC(_band, 0x03c4)
+#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
+#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
+
+#define MT_WF_RMAC_MIB_AIRTIME14(_band) MT_WF_RMAC(_band, 0x03b8)
+#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
+#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
+
+/* WFDMA0 */
+#define MT_WFDMA0_BASE 0xd4000
+#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
+
+#define MT_WFDMA0_RST MT_WFDMA0(0x100)
+#define MT_WFDMA0_RST_LOGIC_RST BIT(4)
+#define MT_WFDMA0_RST_DMASHDL_ALL_RST BIT(5)
+
+#define MT_WFDMA0_BUSY_ENA MT_WFDMA0(0x13c)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
+#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
+
+#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
+#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
+
+#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500)
+
+#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
+#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
+#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
+
+/* WFDMA1 */
+#define MT_WFDMA1_BASE 0xd5000
+#define MT_WFDMA1(ofs) (MT_WFDMA1_BASE + (ofs))
+
+#define MT_WFDMA1_RST MT_WFDMA1(0x100)
+#define MT_WFDMA1_RST_LOGIC_RST BIT(4)
+#define MT_WFDMA1_RST_DMASHDL_ALL_RST BIT(5)
+
+#define MT_WFDMA1_BUSY_ENA MT_WFDMA1(0x13c)
+#define MT_WFDMA1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA1_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_MCU_CMD MT_WFDMA1(0x1f0)
+#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
+
+#define MT_WFDMA1_GLO_CFG MT_WFDMA1(0x208)
+#define MT_WFDMA1_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WFDMA1_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA1_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO BIT(27)
+
+#define MT_WFDMA1_RST_DTX_PTR MT_WFDMA1(0x20c)
+#define MT_WFDMA1_PRI_DLY_INT_CFG0 MT_WFDMA1(0x2f0)
+
+#define MT_TX_RING_BASE MT_WFDMA1(0x300)
+#define MT_RX_EVENT_RING_BASE MT_WFDMA1(0x500)
+
+#define MT_WFDMA1_TX_RING0_EXT_CTRL MT_WFDMA1(0x600)
+#define MT_WFDMA1_TX_RING1_EXT_CTRL MT_WFDMA1(0x604)
+#define MT_WFDMA1_TX_RING2_EXT_CTRL MT_WFDMA1(0x608)
+#define MT_WFDMA1_TX_RING3_EXT_CTRL MT_WFDMA1(0x60c)
+#define MT_WFDMA1_TX_RING4_EXT_CTRL MT_WFDMA1(0x610)
+#define MT_WFDMA1_TX_RING5_EXT_CTRL MT_WFDMA1(0x614)
+#define MT_WFDMA1_TX_RING6_EXT_CTRL MT_WFDMA1(0x618)
+#define MT_WFDMA1_TX_RING7_EXT_CTRL MT_WFDMA1(0x61c)
+
+#define MT_WFDMA1_TX_RING16_EXT_CTRL MT_WFDMA1(0x640)
+#define MT_WFDMA1_TX_RING17_EXT_CTRL MT_WFDMA1(0x644)
+#define MT_WFDMA1_TX_RING18_EXT_CTRL MT_WFDMA1(0x648)
+#define MT_WFDMA1_TX_RING19_EXT_CTRL MT_WFDMA1(0x64c)
+#define MT_WFDMA1_TX_RING20_EXT_CTRL MT_WFDMA1(0x650)
+#define MT_WFDMA1_TX_RING21_EXT_CTRL MT_WFDMA1(0x654)
+#define MT_WFDMA1_TX_RING22_EXT_CTRL MT_WFDMA1(0x658)
+#define MT_WFDMA1_TX_RING23_EXT_CTRL MT_WFDMA1(0x65c)
+
+#define MT_WFDMA1_RX_RING0_EXT_CTRL MT_WFDMA1(0x680)
+#define MT_WFDMA1_RX_RING1_EXT_CTRL MT_WFDMA1(0x684)
+#define MT_WFDMA1_RX_RING2_EXT_CTRL MT_WFDMA1(0x688)
+#define MT_WFDMA1_RX_RING3_EXT_CTRL MT_WFDMA1(0x68c)
+
+/* WFDMA CSR */
+#define MT_WFDMA_EXT_CSR_BASE 0xd7000
+#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
+
+#define MT_INT_SOURCE_CSR MT_WFDMA_EXT_CSR(0x10)
+#define MT_INT_MASK_CSR MT_WFDMA_EXT_CSR(0x14)
+#define MT_INT_RX_DONE_DATA BIT(16)
+#define MT_INT_RX_DONE_WM BIT(0)
+#define MT_INT_RX_DONE_WA BIT(1)
+#define MT_INT_RX_DONE(_n) ((_n) ? BIT((_n) - 1) : BIT(16))
+#define MT_INT_RX_DONE_ALL (BIT(0) | BIT(1) | BIT(16))
+#define MT_INT_TX_DONE_ALL (BIT(15) | GENMASK(27, 26) | BIT(30))
+#define MT_INT_MCU_CMD BIT(29)
+
+#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
+#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
+
+/* WFDMA0 PCIE1 */
+#define MT_WFDMA0_PCIE1_BASE 0xd8000
+#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+
+#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
+
+/* WFDMA1 PCIE1 */
+#define MT_WFDMA1_PCIE1_BASE 0xd9000
+#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+
+#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
+#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_INFRA_CFG_BASE 0xf1000
+#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
+
+#define MT_HIF_REMAP_L1 MT_INFRA(0x1ac)
+#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
+#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
+#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L1 0xe0000
+
+#define MT_HIF_REMAP_L2 MT_INFRA(0x1b0)
+#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
+#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
+#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
+#define MT_HIF_REMAP_BASE_L2 0x00000
+
+#define MT_TOP_BASE 0x18060000
+#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
+
+#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10)
+#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
+#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
+
+#define MT_TOP_MISC MT_TOP(0xf0)
+#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+
+#define MT_HW_BOUND 0x70010020
+#define MT_HW_CHIPID 0x70010200
+#define MT_HW_REV 0x70010204
+
+#define MT_PCIE_MAC_BASE 0x74030000
+#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
+#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
+
+/* PHY: band 0(0x83080000), band 1(0x83090000) */
+#define MT_WF_PHY_BASE 0x83080000
+#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
+
+#define MT_WF_PHY_RX_CTRL1(_phy) MT_WF_PHY(0x2004 + ((_phy) << 16))
+#define MT_WF_PHY_RX_CTRL1_STSCNT_EN GENMASK(11, 9)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index eff522dbda34..fca38ea2441f 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -101,19 +101,17 @@ mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
{
__skb_queue_head_init(list);
spin_lock_bh(&dev->status_list.lock);
- __acquire(&dev->status_list.lock);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
- __releases(&dev->status_list.unlock)
+ __releases(&dev->status_list.lock)
{
struct ieee80211_hw *hw;
struct sk_buff *skb;
spin_unlock_bh(&dev->status_list.lock);
- __release(&dev->status_list.unlock);
while ((skb = __skb_dequeue(list)) != NULL) {
hw = mt76_tx_status_get_hw(dev, skb);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index a981da6c35a5..fb97ea25b4d4 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -1009,8 +1009,19 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
{
- if (mt76_chip(dev) == 0x7663)
- return ac ^ 0x3;
+ if (mt76_chip(dev) == 0x7663) {
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_VO] = 0,
+ [IEEE80211_AC_VI] = 1,
+ [IEEE80211_AC_BE] = 2,
+ [IEEE80211_AC_BK] = 4,
+ };
+
+ if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map)))
+ return 2; /* BE */
+
+ return wmm_queue_map[ac];
+ }
return mt76_ac_to_hwq(ac);
}
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index 8c60c450125a..ecde87465bf6 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -42,17 +42,17 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
}
EXPORT_SYMBOL_GPL(__mt76_poll_msec);
-int mt76_wcid_alloc(unsigned long *mask, int size)
+int mt76_wcid_alloc(u32 *mask, int size)
{
int i, idx = 0, cur;
- for (i = 0; i < size / BITS_PER_LONG; i++) {
+ for (i = 0; i < DIV_ROUND_UP(size, 32); i++) {
idx = ffs(~mask[i]);
if (!idx)
continue;
idx--;
- cur = i * BITS_PER_LONG + idx;
+ cur = i * 32 + idx;
if (cur >= size)
break;
@@ -74,13 +74,13 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
rcu_read_lock();
for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
- unsigned long mask = dev->wcid_mask[i];
- unsigned long phy_mask = dev->wcid_phy_mask[i];
+ u32 mask = dev->wcid_mask[i];
+ u32 phy_mask = dev->wcid_phy_mask[i];
if (!mask)
continue;
- for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1, phy_mask >>= 1) {
+ for (j = i * 32; mask; j++, mask >>= 1, phy_mask >>= 1) {
if (!(mask & 1))
continue;
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
index 48a71e7479e5..fd1a68820e0a 100644
--- a/drivers/net/wireless/mediatek/mt76/util.h
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -14,24 +14,24 @@
#define MT76_INCR(_var, _size) \
(_var = (((_var) + 1) % (_size)))
-int mt76_wcid_alloc(unsigned long *mask, int size);
+int mt76_wcid_alloc(u32 *mask, int size);
static inline bool
-mt76_wcid_mask_test(unsigned long *mask, int idx)
+mt76_wcid_mask_test(u32 *mask, int idx)
{
- return mask[idx / BITS_PER_LONG] & BIT(idx % BITS_PER_LONG);
+ return mask[idx / 32] & BIT(idx % 32);
}
static inline void
-mt76_wcid_mask_set(unsigned long *mask, int idx)
+mt76_wcid_mask_set(u32 *mask, int idx)
{
- mask[idx / BITS_PER_LONG] |= BIT(idx % BITS_PER_LONG);
+ mask[idx / 32] |= BIT(idx % 32);
}
static inline void
-mt76_wcid_mask_clear(unsigned long *mask, int idx)
+mt76_wcid_mask_clear(u32 *mask, int idx)
{
- mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+ mask[idx / 32] &= ~BIT(idx % 32);
}
static inline void
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 87d048df09d1..3334c45aac13 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -69,7 +69,7 @@ struct qtnf_bus {
struct notifier_block netdev_nb;
u8 hw_id[ETH_ALEN];
/* bus private data */
- char bus_priv[0] __aligned(sizeof(void *));
+ char bus_priv[] __aligned(sizeof(void *));
};
static inline bool qtnf_fw_is_up(struct qtnf_bus *bus)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 8be17106008d..54cdf3ad09d7 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -389,55 +389,57 @@ static int qtnf_set_wiphy_params(struct wiphy *wiphy, u32 changed)
}
static void
-qtnf_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
- u16 frame_type, bool reg)
+qtnf_update_mgmt_frame_registrations(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct mgmt_frame_regs *upd)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
- u16 mgmt_type;
- u16 new_mask;
- u16 qlink_frame_type = 0;
+ u16 new_mask = upd->interface_stypes;
+ u16 old_mask = vif->mgmt_frames_bitmask;
+ static const struct {
+ u16 mask, qlink_type;
+ } updates[] = {
+ {
+ .mask = BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_ASSOC_REQ >> 4),
+ .qlink_type = QLINK_MGMT_FRAME_ASSOC_REQ,
+ },
+ {
+ .mask = BIT(IEEE80211_STYPE_AUTH >> 4),
+ .qlink_type = QLINK_MGMT_FRAME_AUTH,
+ },
+ {
+ .mask = BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ .qlink_type = QLINK_MGMT_FRAME_PROBE_REQ,
+ },
+ {
+ .mask = BIT(IEEE80211_STYPE_ACTION >> 4),
+ .qlink_type = QLINK_MGMT_FRAME_ACTION,
+ },
+ };
+ unsigned int i;
- mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
+ if (new_mask == old_mask)
+ return;
- if (reg)
- new_mask = vif->mgmt_frames_bitmask | BIT(mgmt_type);
- else
- new_mask = vif->mgmt_frames_bitmask & ~BIT(mgmt_type);
+ for (i = 0; i < ARRAY_SIZE(updates); i++) {
+ u16 mask = updates[i].mask;
+ u16 qlink_frame_type = updates[i].qlink_type;
+ bool reg;
- if (new_mask == vif->mgmt_frames_bitmask)
- return;
+ /* the ! are here due to the assoc/reassoc merge */
+ if (!(new_mask & mask) == !(old_mask & mask))
+ continue;
- switch (frame_type & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_REASSOC_REQ:
- case IEEE80211_STYPE_ASSOC_REQ:
- qlink_frame_type = QLINK_MGMT_FRAME_ASSOC_REQ;
- break;
- case IEEE80211_STYPE_AUTH:
- qlink_frame_type = QLINK_MGMT_FRAME_AUTH;
- break;
- case IEEE80211_STYPE_PROBE_REQ:
- qlink_frame_type = QLINK_MGMT_FRAME_PROBE_REQ;
- break;
- case IEEE80211_STYPE_ACTION:
- qlink_frame_type = QLINK_MGMT_FRAME_ACTION;
- break;
- default:
- pr_warn("VIF%u.%u: unsupported frame type: %X\n",
- vif->mac->macid, vif->vifid,
- (frame_type & IEEE80211_FCTL_STYPE) >> 4);
- return;
- }
+ reg = new_mask & mask;
- if (qtnf_cmd_send_register_mgmt(vif, qlink_frame_type, reg)) {
- pr_warn("VIF%u.%u: failed to %sregister mgmt frame type 0x%x\n",
- vif->mac->macid, vif->vifid, reg ? "" : "un",
- frame_type);
- return;
+ if (qtnf_cmd_send_register_mgmt(vif, qlink_frame_type, reg))
+ pr_warn("VIF%u.%u: failed to %sregister qlink frame type 0x%x\n",
+ vif->mac->macid, vif->vifid, reg ? "" : "un",
+ qlink_frame_type);
}
vif->mgmt_frames_bitmask = new_mask;
- pr_debug("VIF%u.%u: %sregistered mgmt frame type 0x%x\n",
- vif->mac->macid, vif->vifid, reg ? "" : "un", frame_type);
}
static int
@@ -1017,7 +1019,8 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.change_beacon = qtnf_change_beacon,
.stop_ap = qtnf_stop_ap,
.set_wiphy_params = qtnf_set_wiphy_params,
- .mgmt_frame_register = qtnf_mgmt_frame_register,
+ .update_mgmt_frame_registrations =
+ qtnf_update_mgmt_frame_registrations,
.mgmt_tx = qtnf_mgmt_tx,
.change_station = qtnf_change_station,
.del_station = qtnf_del_station,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index dbb241106d8a..eb67b66b846b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -286,7 +286,7 @@ static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
struct sk_buff *skb;
dma_addr_t paddr;
- skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE);
if (!skb) {
priv->rx_skb[index] = NULL;
return -ENOMEM;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index dbf3c5fd751f..d1b850aa4657 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -247,7 +247,7 @@ topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap)
struct sk_buff *skb;
dma_addr_t paddr;
- skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE);
if (!skb) {
ts->base.rx_skb[index] = NULL;
return -ENOMEM;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 4d22a54c034f..2dda4c5d7427 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -362,7 +362,7 @@ struct qlink_cmd {
struct qlink_cmd_init_fw {
struct qlink_cmd chdr;
__le32 qlink_proto_ver;
- u8 var_info[0];
+ u8 var_info[];
} __packed;
/**
@@ -434,7 +434,7 @@ struct qlink_cmd_frame_tx {
__le32 cookie;
__le16 freq;
__le16 flags;
- u8 frame_data[0];
+ u8 frame_data[];
} __packed;
/**
@@ -466,7 +466,7 @@ struct qlink_cmd_add_key {
__le32 cipher;
__le16 vlanid;
u8 rsvd[2];
- u8 key_data[0];
+ u8 key_data[];
} __packed;
/**
@@ -578,7 +578,7 @@ struct qlink_cmd_connect {
u8 mfp;
u8 pbss;
u8 rsvd[2];
- u8 payload[0];
+ u8 payload[];
} __packed;
/**
@@ -592,7 +592,7 @@ struct qlink_cmd_external_auth {
struct qlink_cmd chdr;
u8 peer[ETH_ALEN];
__le16 status;
- u8 payload[0];
+ u8 payload[];
} __packed;
/**
@@ -698,7 +698,7 @@ struct qlink_cmd_reg_notify {
u8 dfs_region;
u8 slave_radar;
u8 dfs_offload;
- u8 info[0];
+ u8 info[];
} __packed;
/**
@@ -773,7 +773,7 @@ struct qlink_cmd_start_ap {
struct qlink_sr_params sr_params;
u8 twt_responder;
u8 rsvd[3];
- u8 info[0];
+ u8 info[];
} __packed;
/**
@@ -807,7 +807,7 @@ struct qlink_mac_address {
struct qlink_acl_data {
__le32 policy;
__le32 num_entries;
- struct qlink_mac_address mac_addrs[0];
+ struct qlink_mac_address mac_addrs[];
} __packed;
/**
@@ -882,7 +882,7 @@ enum qlink_wowlan_trigger {
struct qlink_cmd_wowlan_set {
struct qlink_cmd chdr;
__le32 triggers;
- u8 data[0];
+ u8 data[];
} __packed;
enum qlink_ndev_event_type {
@@ -958,7 +958,7 @@ struct qlink_cmd_scan {
u8 bssid[ETH_ALEN];
u8 scan_width;
u8 rsvd[3];
- u8 var_info[0];
+ u8 var_info[];
} __packed;
/**
@@ -972,7 +972,7 @@ struct qlink_cmd_update_owe {
struct qlink_cmd chdr;
u8 peer[ETH_ALEN];
__le16 status;
- u8 ies[0];
+ u8 ies[];
} __packed;
/* QLINK Command Responses messages related definitions
@@ -1106,7 +1106,7 @@ struct qlink_resp_get_mac_info {
u8 n_reg_rules;
u8 dfs_region;
u8 rsvd[3];
- u8 var_info[0];
+ u8 var_info[];
} __packed;
/**
@@ -1131,7 +1131,7 @@ struct qlink_resp_get_hw_info {
u8 mac_bitmap;
u8 total_tx_chain;
u8 total_rx_chain;
- u8 info[0];
+ u8 info[];
} __packed;
/**
@@ -1167,7 +1167,7 @@ struct qlink_resp_get_sta_info {
struct qlink_resp rhdr;
u8 sta_addr[ETH_ALEN];
u8 rsvd[2];
- u8 info[0];
+ u8 info[];
} __packed;
/**
@@ -1184,7 +1184,7 @@ struct qlink_resp_band_info_get {
u8 num_chans;
u8 num_bitrates;
u8 rsvd[1];
- u8 info[0];
+ u8 info[];
} __packed;
/**
@@ -1196,7 +1196,7 @@ struct qlink_resp_band_info_get {
struct qlink_resp_get_chan_stats {
struct qlink_resp rhdr;
__le32 chan_freq;
- u8 info[0];
+ u8 info[];
} __packed;
/**
@@ -1270,7 +1270,7 @@ struct qlink_event_sta_assoc {
struct qlink_event ehdr;
u8 sta_addr[ETH_ALEN];
__le16 frame_control;
- u8 ies[0];
+ u8 ies[];
} __packed;
/**
@@ -1297,7 +1297,7 @@ struct qlink_event_bss_join {
struct qlink_chandef chan;
u8 bssid[ETH_ALEN];
__le16 status;
- u8 ies[0];
+ u8 ies[];
} __packed;
/**
@@ -1339,7 +1339,7 @@ struct qlink_event_rxmgmt {
__le32 flags;
s8 sig_dbm;
u8 rsvd[3];
- u8 frame_data[0];
+ u8 frame_data[];
} __packed;
/**
@@ -1367,7 +1367,7 @@ struct qlink_event_scan_result {
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 bssid[ETH_ALEN];
u8 rsvd[2];
- u8 payload[0];
+ u8 payload[];
} __packed;
/**
@@ -1456,7 +1456,7 @@ struct qlink_event_update_owe {
struct qlink_event ehdr;
u8 peer[ETH_ALEN];
u8 rsvd[2];
- u8 ies[0];
+ u8 ies[];
} __packed;
/* QLINK TLVs (Type-Length Values) definitions
@@ -1512,7 +1512,7 @@ enum qlink_tlv_id {
struct qlink_tlv_hdr {
__le16 type;
__le16 len;
- u8 val[0];
+ u8 val[];
} __packed;
struct qlink_iface_limit {
@@ -1524,7 +1524,7 @@ struct qlink_iface_limit_record {
__le16 max_interfaces;
u8 num_different_channels;
u8 n_limits;
- struct qlink_iface_limit limits[0];
+ struct qlink_iface_limit limits[];
} __packed;
#define QLINK_RSSI_OFFSET 120
@@ -1647,7 +1647,7 @@ struct qlink_tlv_ie_set {
u8 type;
u8 flags;
u8 rsvd[2];
- u8 ie_data[0];
+ u8 ie_data[];
} __packed;
/**
@@ -1660,7 +1660,7 @@ struct qlink_tlv_ext_ie {
struct qlink_tlv_hdr hdr;
u8 eid_ext;
u8 rsvd[3];
- u8 ie_data[0];
+ u8 ie_data[];
} __packed;
#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
@@ -1681,7 +1681,7 @@ struct qlink_tlv_iftype_data {
struct qlink_tlv_hdr hdr;
u8 n_iftype_data;
u8 rsvd[3];
- struct qlink_sband_iftype_data iftype_data[0];
+ struct qlink_sband_iftype_data iftype_data[];
} __packed;
/**
@@ -1867,7 +1867,7 @@ struct qlink_random_mac_addr {
struct qlink_wowlan_capab_data {
__le16 version;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
/**
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 6beac1f74e7c..a779fe771a55 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -9971,9 +9971,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_is_usb(rt2x00dev))
ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
- /* Set MFP if HW crypto is disabled. */
- if (rt2800_hwcrypt_disabled(rt2x00dev))
- ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE);
+ ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 32efbc8e9f92..2f68a31072ae 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -468,7 +468,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
- if (!rt2x00_has_cap_hw_crypto(rt2x00dev))
+ /* The hardware can't do MFP */
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || (sta && sta->mfp))
return -EOPNOTSUPP;
/*
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index c1d542bfa530..bf3fbd14eda3 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2794,8 +2794,7 @@ static int __init init_ray_cs(void)
proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_ops,
&translate);
#endif
- if (translate != 0)
- translate = 1;
+ translate = !!translate;
return 0;
} /* init_ray_cs */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
index b2616d61b66d..585784258c66 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
@@ -606,10 +606,6 @@ static const u8 rtl8225z2_tx_power_cck[] = {
0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03
};
-static const u8 rtl8225z2_tx_power_ofdm[] = {
- 0x42, 0x00, 0x40, 0x00, 0x40
-};
-
static const u8 rtl8225z2_tx_gain_cck_ofdm[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index c75192c4447f..a4489b9302d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -505,7 +505,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
rtlpriv->rfkill.rfkill_state = radio_state;
- blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ blocked = rtlpriv->rfkill.rfkill_state != 1;
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f73e690bbe8e..4dd82c6052f0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1722,7 +1722,7 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
"wireless radio switch turned %s\n",
radio_state ? "on" : "off");
- blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ blocked = !rtlpriv->rfkill.rfkill_state;
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
index 0f401ad92c2e..c376817a1bf4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
@@ -51,7 +51,7 @@ void rtl88e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
if (rtlefuse->eeprom_regulatory != 0)
turbo_scanoff = true;
- if (mac->act_scanning == true) {
+ if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
@@ -473,7 +473,7 @@ static bool _rtl88e_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
break;
}
- if (rtstatus != true) {
+ if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Radio[%d] Fail!!\n", rfpath);
return false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 4865639ac9ea..02b77521b5cd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -67,9 +67,9 @@ static int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
char *fw_name;
rtl8188ee_bt_reg_init(hw);
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(15);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 6dba576aa81e..bb291b951f4d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -2866,14 +2866,12 @@ void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
}
}
- for (i = 0; i < 4; i++) {
- reg_e94 = result[i][0];
- reg_e9c = result[i][1];
- reg_ea4 = result[i][2];
- reg_eb4 = result[i][4];
- reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- }
+ reg_e94 = result[3][0];
+ reg_e9c = result[3][1];
+ reg_ea4 = result[3][2];
+ reg_eb4 = result[3][4];
+ reg_ebc = result[3][5];
+ reg_ec4 = result[3][6];
if (final_candidate != 0xff) {
reg_e94 = result[final_candidate][0];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index b337d599b6f4..7a16563b3a5d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -75,9 +75,9 @@ static int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpci->transmit_config = CFENDFORM | BIT(15);
/*just 2.4G band*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 680198280f8f..652d8ff9cccb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -131,7 +131,7 @@ static bool rtl8723e_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
(rtlpriv->btcoexist.previous_state_h ==
rtlpriv->btcoexist.cstate_h)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[DM][BT], Coexist state do not chang!!\n");
+ "[DM][BT], Coexist state do not change!!\n");
return true;
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 655460f61bbc..7a46c6a9deae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -614,22 +614,22 @@ static bool _rtl8723e_llt_table_init(struct ieee80211_hw *hw)
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
status = _rtl8723e_llt_write(hw, i, i + 1);
- if (true != status)
+ if (!status)
return status;
}
status = _rtl8723e_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
- if (true != status)
+ if (!status)
return status;
for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl8723e_llt_write(hw, i, (i + 1));
- if (true != status)
+ if (!status)
return status;
}
status = _rtl8723e_llt_write(hw, maxpage, txpktbuf_bndy);
- if (true != status)
+ if (!status)
return status;
rtl_write_byte(rtlpriv, REG_CR, 0xff);
@@ -934,7 +934,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl8712e_init_mac(hw);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("Init MAC failed\n");
err = 1;
goto exit;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index ea86d5bf33d2..7828acb1de3f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -78,9 +78,9 @@ static int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 36209ac5b208..d220e8955e37 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -74,9 +74,9 @@ static int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
rtl8723be_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index d8df816753cb..950542a24e31 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -76,9 +76,9 @@ static int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtl8821ae_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 348b0072cdd6..c66c6dc00378 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -881,10 +881,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
WARN_ON(NULL == skb);
_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!_urb) {
- kfree_skb(skb);
+ if (!_urb)
return NULL;
- }
_rtl_install_trx_info(rtlusb, skb, ep_num);
usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev,
ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
@@ -898,7 +896,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
u32 ep_num;
struct urb *_urb = NULL;
- struct sk_buff *_skb = NULL;
WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
if (unlikely(IS_USB_STOP(rtlusb))) {
@@ -907,8 +904,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
return;
}
ep_num = rtlusb->ep_map.ep_mapping[qnum];
- _skb = skb;
- _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
+ _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num);
if (unlikely(!_urb)) {
pr_err("Can't allocate urb. Drop skb!\n");
kfree_skb(skb);
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
index 33bd7ed797ff..ca894c4f96ac 100644
--- a/drivers/net/wireless/realtek/rtw88/Kconfig
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -16,26 +16,48 @@ config RTW88_CORE
config RTW88_PCI
tristate
+config RTW88_8822B
+ tristate
+
+config RTW88_8822C
+ tristate
+
+config RTW88_8723D
+ tristate
+
config RTW88_8822BE
- bool "Realtek 8822BE PCI wireless network adapter"
+ tristate "Realtek 8822BE PCI wireless network adapter"
depends on PCI
select RTW88_CORE
select RTW88_PCI
+ select RTW88_8822B
help
Select this option will enable support for 8822BE chipset
802.11ac PCIe wireless network adapter
config RTW88_8822CE
- bool "Realtek 8822CE PCI wireless network adapter"
+ tristate "Realtek 8822CE PCI wireless network adapter"
depends on PCI
select RTW88_CORE
select RTW88_PCI
+ select RTW88_8822C
help
Select this option will enable support for 8822CE chipset
802.11ac PCIe wireless network adapter
+config RTW88_8723DE
+ tristate "Realtek 8723DE PCI wireless network adapter"
+ depends on PCI
+ select RTW88_CORE
+ select RTW88_PCI
+ select RTW88_8723D
+ help
+ Select this option will enable support for 8723DE chipset
+
+ 802.11n PCIe wireless network adapter
+
config RTW88_DEBUG
bool "Realtek rtw88 debug support"
depends on RTW88_CORE
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index cac148d13cf1..f31e78a6f146 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-obj-$(CONFIG_RTW88_CORE) += rtw88.o
-rtw88-y += main.o \
+obj-$(CONFIG_RTW88_CORE) += rtw88_core.o
+rtw88_core-y += main.o \
mac80211.o \
util.o \
debug.o \
@@ -18,8 +18,24 @@ rtw88-y += main.o \
wow.o \
regd.o
-rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
-rtw88-$(CONFIG_RTW88_8822CE) += rtw8822c.o rtw8822c_table.o
-obj-$(CONFIG_RTW88_PCI) += rtwpci.o
-rtwpci-objs := pci.o
+obj-$(CONFIG_RTW88_8822B) += rtw88_8822b.o
+rtw88_8822b-objs := rtw8822b.o rtw8822b_table.o
+
+obj-$(CONFIG_RTW88_8822BE) += rtw88_8822be.o
+rtw88_8822be-objs := rtw8822be.o
+
+obj-$(CONFIG_RTW88_8822C) += rtw88_8822c.o
+rtw88_8822c-objs := rtw8822c.o rtw8822c_table.o
+
+obj-$(CONFIG_RTW88_8822CE) += rtw88_8822ce.o
+rtw88_8822ce-objs := rtw8822ce.o
+
+obj-$(CONFIG_RTW88_8723D) += rtw88_8723d.o
+rtw88_8723d-objs := rtw8723d.o rtw8723d_table.o
+
+obj-$(CONFIG_RTW88_8723DE) += rtw88_8723de.o
+rtw88_8723de-objs := rtw8723de.o
+
+obj-$(CONFIG_RTW88_PCI) += rtw88_pci.o
+rtw88_pci-objs := pci.o
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index b6d1d71f4d30..8a070d5d9174 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -10,7 +10,6 @@
void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
struct rtw_bfee *bfee = &rtwvif->bfee;
struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
@@ -23,7 +22,7 @@ void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
else if (bfee->role == RTW_BFEE_SU)
bfinfo->bfer_su_cnt--;
- chip->ops->config_bfee(rtwdev, rtwvif, bfee, false);
+ rtw_chip_config_bfee(rtwdev, rtwvif, bfee, false);
bfee->role = RTW_BFEE_NONE;
}
@@ -71,7 +70,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
bfee->aid = bss_conf->aid;
bfinfo->bfer_mu_cnt++;
- chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true);
} else if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) &&
(vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
if (bfinfo->bfer_su_cnt >= chip->bfer_su_max_num) {
@@ -97,7 +96,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
}
- chip->ops->config_bfee(rtwdev, rtwvif, bfee, true);
+ rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true);
}
out_unlock:
@@ -221,6 +220,7 @@ void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
/* ndp rx standby timer */
rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, RTW_NDP_RX_STANDBY_TIME);
}
+EXPORT_SYMBOL(rtw_bf_enable_bfee_su);
/* nc index: 1 2T2R 0 1T1R
* nr index: 1 use Nsts 0 use reg setting
@@ -264,6 +264,7 @@ void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif,
/* accept NDPA and BF report poll */
rtw_write16_set(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF);
}
+EXPORT_SYMBOL(rtw_bf_enable_bfee_mu);
void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev,
struct rtw_bfee *bfee)
@@ -289,6 +290,7 @@ void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev,
clear_bit(bfee->su_reg_index, bfinfo->bfer_su_reg_maping);
bfee->su_reg_index = 0xFF;
}
+EXPORT_SYMBOL(rtw_bf_remove_bfee_su);
void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev,
struct rtw_bfee *bfee)
@@ -302,6 +304,7 @@ void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev,
if (bfinfo->bfer_su_cnt == 0 && bfinfo->bfer_mu_cnt == 0)
rtw_bf_del_sounding(rtwdev);
}
+EXPORT_SYMBOL(rtw_bf_remove_bfee_mu);
void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf)
@@ -330,6 +333,7 @@ void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtw_bf_cfg_mu_bfee(rtwdev, &param);
}
+EXPORT_SYMBOL(rtw_bf_set_gid_table);
void rtw_bf_phy_init(struct rtw_dev *rtwdev)
{
@@ -366,6 +370,7 @@ void rtw_bf_phy_init(struct rtw_dev *rtwdev)
rtw_write32_mask(rtwdev, REG_BBPSF_CTRL, BIT_MASK_CSI_RATE,
DESC_RATE6M);
}
+EXPORT_SYMBOL(rtw_bf_phy_init);
void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
u8 fixrate_en, u8 *new_rate)
@@ -396,3 +401,4 @@ void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
*new_rate = DESC_RATE24M;
}
}
+EXPORT_SYMBOL(rtw_bf_cfg_csi_rate);
diff --git a/drivers/net/wireless/realtek/rtw88/bf.h b/drivers/net/wireless/realtek/rtw88/bf.h
index 96a8216dd11f..17855edb5006 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.h
+++ b/drivers/net/wireless/realtek/rtw88/bf.h
@@ -89,4 +89,26 @@ void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
void rtw_bf_phy_init(struct rtw_dev *rtwdev);
void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
u8 fixrate_en, u8 *new_rate);
+static inline void rtw_chip_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+ struct rtw_bfee *bfee, bool enable)
+{
+ if (rtwdev->chip->ops->config_bfee)
+ rtwdev->chip->ops->config_bfee(rtwdev, vif, bfee, enable);
+}
+
+static inline void rtw_chip_set_gid_table(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf)
+{
+ if (rtwdev->chip->ops->set_gid_table)
+ rtwdev->chip->ops->set_gid_table(rtwdev, vif, conf);
+}
+
+static inline void rtw_chip_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
+ u8 fixrate_en, u8 *new_rate)
+{
+ if (rtwdev->chip->ops->cfg_csi_rate)
+ rtwdev->chip->ops->cfg_csi_rate(rtwdev, rssi, cur_rate,
+ fixrate_en, new_rate);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 567372fb4e12..cbf3d503df1c 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -283,6 +283,7 @@ void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set)
rtw_write16(rtwdev, REG_WIFI_BT_INFO, val);
}
}
+EXPORT_SYMBOL(rtw_coex_write_scbd);
static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev)
{
@@ -732,6 +733,7 @@ u32 rtw_coex_read_indirect_reg(struct rtw_dev *rtwdev, u16 addr)
return val;
}
+EXPORT_SYMBOL(rtw_coex_read_indirect_reg);
void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr,
u32 mask, u32 val)
@@ -745,13 +747,22 @@ void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr,
if (!ltecoex_reg_write(rtwdev, addr, tmp))
rtw_err(rtwdev, "failed to write indirect register\n");
}
+EXPORT_SYMBOL(rtw_coex_write_indirect_reg);
static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control)
{
- if (wifi_control)
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_hw_reg *btg_reg = chip->btg_reg;
+
+ if (wifi_control) {
rtw_write32_set(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH);
- else
+ if (btg_reg)
+ rtw_write8_set(rtwdev, btg_reg->addr, btg_reg->mask);
+ } else {
rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH);
+ if (btg_reg)
+ rtw_write8_clr(rtwdev, btg_reg->addr, btg_reg->mask);
+ }
}
static void rtw_coex_set_gnt_bt(struct rtw_dev *rtwdev, u8 state)
@@ -1343,12 +1354,15 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
tdma_case = 108;
else
tdma_case = 109;
+ } else if (coex_stat->wl_gl_busy) {
+ table_case = 114;
+ tdma_case = 121;
} else if (coex_stat->wl_connected) {
- table_case = 101;
- tdma_case = 110;
- } else {
table_case = 100;
tdma_case = 100;
+ } else {
+ table_case = 101;
+ tdma_case = 100;
}
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index b4964306de61..09f04feb8fe1 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -531,8 +531,8 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
u8 ch = hal->current_channel;
u8 regd = rtwdev->regd.txpwr_regd;
- seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s)\n",
- "path", "rate", "pwr", "", "base", "", "byr", "lmt");
+ seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n",
+ "path", "rate", "pwr", "", "base", "", "byr", "lmt", "rem");
mutex_lock(&hal->tx_power_mutex);
for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
@@ -554,13 +554,14 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
seq_printf(m, "%4c ", path + 'A');
rtw_print_rate(m, rate);
- seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d)\n",
+ seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d) %4d\n",
hal->tx_pwr_tbl[path][rate],
hal->tx_pwr_tbl[path][rate],
pwr_param.pwr_base,
min_t(s8, pwr_param.pwr_offset,
pwr_param.pwr_limit),
- pwr_param.pwr_offset, pwr_param.pwr_limit);
+ pwr_param.pwr_offset, pwr_param.pwr_limit,
+ pwr_param.pwr_remnant);
}
}
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.c b/drivers/net/wireless/realtek/rtw88/efuse.c
index 212c8376a8c9..c266c84ef233 100644
--- a/drivers/net/wireless/realtek/rtw88/efuse.c
+++ b/drivers/net/wireless/realtek/rtw88/efuse.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2018-2019 Realtek Corporation
*/
+#include <linux/iopoll.h>
+
#include "main.h"
#include "efuse.h"
#include "reg.h"
@@ -90,6 +92,8 @@ static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
u32 addr;
u32 cnt;
+ rtw_chip_efuse_grant_on(rtwdev);
+
switch_efuse_bank(rtwdev);
/* disable 2.5V LDO */
@@ -113,8 +117,31 @@ static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
*(map + addr) = (u8)(efuse_ctl & BIT_MASK_EF_DATA);
}
+ rtw_chip_efuse_grant_off(rtwdev);
+
+ return 0;
+}
+
+int rtw_read8_physical_efuse(struct rtw_dev *rtwdev, u16 addr, u8 *data)
+{
+ u32 efuse_ctl;
+ int ret;
+
+ rtw_write32_mask(rtwdev, REG_EFUSE_CTRL, 0x3ff00, addr);
+ rtw_write32_clr(rtwdev, REG_EFUSE_CTRL, BIT_EF_FLAG);
+
+ ret = read_poll_timeout(rtw_read32, efuse_ctl, efuse_ctl & BIT_EF_FLAG,
+ 1000, 100000, false, rtwdev, REG_EFUSE_CTRL);
+ if (ret) {
+ *data = EFUSE_READ_FAIL;
+ return ret;
+ }
+
+ *data = rtw_read8(rtwdev, REG_EFUSE_CTRL);
+
return 0;
}
+EXPORT_SYMBOL(rtw_read8_physical_efuse);
int rtw_parse_efuse_map(struct rtw_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.h b/drivers/net/wireless/realtek/rtw88/efuse.h
index 115bbe85946a..97a51f0b0e46 100644
--- a/drivers/net/wireless/realtek/rtw88/efuse.h
+++ b/drivers/net/wireless/realtek/rtw88/efuse.h
@@ -10,6 +10,8 @@
#define EFUSE_HW_CAP_SUPP_BW80 7
#define EFUSE_HW_CAP_SUPP_BW40 6
+#define EFUSE_READ_FAIL 0xff
+
#define GET_EFUSE_HW_CAP_HCI(hw_cap) \
le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(3, 0))
#define GET_EFUSE_HW_CAP_BW(hw_cap) \
@@ -22,5 +24,6 @@
le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(27, 26))
int rtw_parse_efuse_map(struct rtw_dev *rtwdev);
+int rtw_read8_physical_efuse(struct rtw_dev *rtwdev, u16 addr, u8 *data);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 05c430b3489c..6478fd7a78f6 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2018-2019 Realtek Corporation
*/
+#include <linux/iopoll.h>
+
#include "main.h"
#include "coex.h"
#include "fw.h"
@@ -23,7 +25,7 @@ static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
switch (sub_cmd_id) {
case C2H_CCX_RPT:
- rtw_tx_report_handle(rtwdev, skb);
+ rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
break;
default:
break;
@@ -140,6 +142,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
goto unlock;
switch (c2h->id) {
+ case C2H_CCX_TX_RPT:
+ rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT);
+ break;
case C2H_BT_INFO:
rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
break;
@@ -153,6 +158,7 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
break;
default:
+ rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
break;
}
@@ -193,8 +199,8 @@ static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
u8 box;
u8 box_state;
u32 box_reg, box_ex_reg;
- u32 h2c_wait;
int idx;
+ int ret;
rtw_dbg(rtwdev, RTW_DBG_FW,
"send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
@@ -226,12 +232,11 @@ static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
goto out;
}
- h2c_wait = 20;
- do {
- box_state = rtw_read8(rtwdev, REG_HMETFR);
- } while ((box_state >> box) & 0x1 && --h2c_wait > 0);
+ ret = read_poll_timeout_atomic(rtw_read8, box_state,
+ !((box_state >> box) & 0x1), 100, 3000,
+ false, rtwdev, REG_HMETFR);
- if (!h2c_wait) {
+ if (ret) {
rtw_err(rtwdev, "failed to send h2c command\n");
goto out;
}
@@ -270,6 +275,9 @@ rtw_fw_send_general_info(struct rtw_dev *rtwdev)
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u16 total_size = H2C_PKT_HDR_SIZE + 4;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return;
+
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
@@ -290,6 +298,9 @@ rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
u16 total_size = H2C_PKT_HDR_SIZE + 8;
u8 fw_rf_type = 0;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return;
+
if (hal->rf_type == RF_1T1R)
fw_rf_type = FW_RF_1T1R;
else if (hal->rf_type == RF_2T2R)
@@ -319,6 +330,7 @@ void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
+EXPORT_SYMBOL(rtw_fw_do_iqk);
void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
{
@@ -630,8 +642,8 @@ void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
- struct cfg80211_ssid *ssid)
+static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
{
struct rtw_rsvd_page *rsvd_pkt;
u8 location = 0;
@@ -647,8 +659,8 @@ u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
return location;
}
-u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
- struct cfg80211_ssid *ssid)
+static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
{
struct rtw_rsvd_page *rsvd_pkt;
u16 size = 0;
@@ -1078,6 +1090,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 bckp[2];
u8 val;
u16 rsvd_pg_head;
+ u32 bcn_valid_addr;
+ u32 bcn_valid_mask;
int ret;
lockdep_assert_held(&rtwdev->mutex);
@@ -1085,8 +1099,13 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
if (!size)
return -EINVAL;
- pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
- rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr | BIT_BCN_VALID_V1);
+ if (rtw_chip_wcpu_11n(rtwdev)) {
+ rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
+ } else {
+ pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
+ pg_addr |= BIT_BCN_VALID_V1;
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr);
+ }
val = rtw_read8(rtwdev, REG_CR + 1);
bckp[0] = val;
@@ -1104,7 +1123,15 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
goto restore;
}
- if (!check_hw_ready(rtwdev, REG_FIFOPAGE_CTRL_2, BIT_BCN_VALID_V1, 1)) {
+ if (rtw_chip_wcpu_11n(rtwdev)) {
+ bcn_valid_addr = REG_DWBCN0_CTRL;
+ bcn_valid_mask = BIT_BCN_VALID;
+ } else {
+ bcn_valid_addr = REG_FIFOPAGE_CTRL_2;
+ bcn_valid_mask = BIT_BCN_VALID_V1;
+ }
+
+ if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) {
rtw_err(rtwdev, "error beacon valid\n");
ret = -EBUSY;
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index cdd244857048..470e1809645a 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -19,7 +19,14 @@
#define RSVD_PAGE_START_ADDR 0x780
#define FIFO_DUMP_ADDR 0x8000
+#define DLFW_PAGE_SIZE_SHIFT_LEGACY 12
+#define DLFW_PAGE_SIZE_LEGACY 0x1000
+#define DLFW_BLK_SIZE_SHIFT_LEGACY 2
+#define DLFW_BLK_SIZE_LEGACY 4
+#define FW_START_ADDR_LEGACY 0x1000
+
enum rtw_c2h_cmd_id {
+ C2H_CCX_TX_RPT = 0x03,
C2H_BT_INFO = 0x09,
C2H_BT_MP_INFO = 0x0b,
C2H_RA_RPT = 0x0c,
@@ -192,9 +199,30 @@ struct rtw_fw_hdr {
__le32 imem_addr;
} __packed;
+struct rtw_fw_hdr_legacy {
+ __le16 signature;
+ u8 category;
+ u8 function;
+ __le16 version; /* 0x04 */
+ u8 subversion1;
+ u8 subversion2;
+ u8 month; /* 0x08 */
+ u8 day;
+ u8 hour;
+ u8 minute;
+ __le16 size;
+ __le16 rsvd2;
+ __le32 idx; /* 0x10 */
+ __le32 rsvd3;
+ __le32 rsvd4; /* 0x18 */
+ __le32 rsvd5;
+} __packed;
+
/* C2H */
-#define GET_CCX_REPORT_SEQNUM(c2h_payload) (c2h_payload[8] & 0xfc)
-#define GET_CCX_REPORT_STATUS(c2h_payload) (c2h_payload[9] & 0xc0)
+#define GET_CCX_REPORT_SEQNUM_V0(c2h_payload) (c2h_payload[6] & 0xfc)
+#define GET_CCX_REPORT_STATUS_V0(c2h_payload) (c2h_payload[0] & 0xc0)
+#define GET_CCX_REPORT_SEQNUM_V1(c2h_payload) (c2h_payload[8] & 0xfc)
+#define GET_CCX_REPORT_STATUS_V1(c2h_payload) (c2h_payload[9] & 0xc0)
#define GET_RA_REPORT_RATE(c2h_payload) (c2h_payload[0] & 0x7f)
#define GET_RA_REPORT_SGI(c2h_payload) ((c2h_payload[0] & 0x80) >> 7)
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 7b245779ff90..19b9b7ab016b 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -40,6 +40,9 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
}
rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return;
+
value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
@@ -53,6 +56,7 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
value8 |= BIT_CHECK_CCK_EN;
rtw_write8(rtwdev, REG_CCK_CHECK, value8);
}
+EXPORT_SYMBOL(rtw_set_channel_mac);
static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
{
@@ -61,6 +65,14 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_RSV_CTRL, 0);
+ if (rtw_chip_wcpu_11n(rtwdev)) {
+ if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO)
+ rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL);
+ else
+ rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL);
+ return 0;
+ }
+
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
@@ -100,42 +112,55 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
return 0;
}
+static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
+{
+ u32 cnt;
+
+ target &= mask;
+
+ for (cnt = 0; cnt < RTW_PWR_POLLING_CNT; cnt++) {
+ if ((rtw_read8(rtwdev, addr) & mask) == target)
+ return true;
+
+ udelay(50);
+ }
+
+ return false;
+}
+
static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
const struct rtw_pwr_seq_cmd *cmd)
{
u8 value;
- u8 flag = 0;
u32 offset;
- u32 cnt = RTW_PWR_POLLING_CNT;
if (cmd->base == RTW_PWR_ADDR_SDIO)
offset = cmd->offset | SDIO_LOCAL_OFFSET;
else
offset = cmd->offset;
- do {
- cnt--;
- value = rtw_read8(rtwdev, offset);
- value &= cmd->mask;
- if (value == (cmd->value & cmd->mask))
- return 0;
- if (cnt == 0) {
- if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
- flag == 0) {
- value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
- value |= BIT(3);
- rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
- value &= ~BIT(3);
- rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
- cnt = RTW_PWR_POLLING_CNT;
- flag = 1;
- } else {
- return -EBUSY;
- }
- } else {
- udelay(50);
- }
- } while (1);
+ if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
+ return 0;
+
+ if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE)
+ goto err;
+
+ /* if PCIE, toggle BIT_PFM_WOWL and try again */
+ value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
+
+ if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
+ return 0;
+
+err:
+ rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n",
+ offset, cmd->mask, cmd->value);
+ return -EBUSY;
}
static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
@@ -228,12 +253,14 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
u8 rpwm;
bool cur_pwr;
- rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
+ if (rtw_chip_wcpu_11ac(rtwdev)) {
+ rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
- /* Check FW still exist or not */
- if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
- rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
- rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
+ /* Check FW still exist or not */
+ if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
+ rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
+ rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
+ }
}
if (rtw_read8(rtwdev, REG_CR) == 0xea)
@@ -244,7 +271,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
else
cur_pwr = true;
- if (pwr_on && cur_pwr)
+ if (pwr_on == cur_pwr)
return -EALREADY;
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
@@ -254,7 +281,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
return 0;
}
-static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
+static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
{
u8 sys_func_en = rtwdev->chip->sys_func_en;
u8 value8;
@@ -279,6 +306,29 @@ static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
return 0;
}
+static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_CR, 0xff);
+ mdelay(2);
+ rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f);
+ mdelay(2);
+
+ rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN);
+ rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC);
+
+ rtw_write16(rtwdev, REG_CR, 0x2ff);
+
+ return 0;
+}
+
+static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
+{
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return __rtw_mac_init_system_cfg_legacy(rtwdev);
+
+ return __rtw_mac_init_system_cfg(rtwdev);
+}
+
int rtw_mac_power_on(struct rtw_dev *rtwdev)
{
int ret = 0;
@@ -650,7 +700,8 @@ static void download_firmware_end_flow(struct rtw_dev *rtwdev)
rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
}
-int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
+static int __rtw_download_firmware(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
{
struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
const u8 *data = fw->firmware->data;
@@ -704,6 +755,151 @@ dlfw_fail:
return ret;
}
+static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en)
+{
+ int try;
+
+ if (en) {
+ wlan_cpu_enable(rtwdev, false);
+ wlan_cpu_enable(rtwdev, true);
+
+ rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
+
+ for (try = 0; try < 10; try++) {
+ if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN)
+ goto fwdl_ready;
+ rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
+ msleep(20);
+ }
+ rtw_err(rtwdev, "failed to check fw download ready\n");
+fwdl_ready:
+ rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN);
+ } else {
+ rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
+ }
+}
+
+static void
+write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
+{
+ u32 val32;
+ u32 block_nr;
+ u32 remain_size;
+ u32 write_addr = FW_START_ADDR_LEGACY;
+ const __le32 *ptr = (const __le32 *)data;
+ u32 block;
+ __le32 remain_data = 0;
+
+ block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY;
+ remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1);
+
+ val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
+ val32 &= ~BIT_ROM_PGE;
+ val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE;
+ rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
+
+ for (block = 0; block < block_nr; block++) {
+ rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr));
+
+ write_addr += DLFW_BLK_SIZE_LEGACY;
+ ptr++;
+ }
+
+ if (remain_size) {
+ memcpy(&remain_data, ptr, remain_size);
+ rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
+ }
+}
+
+static int
+download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
+{
+ u32 page;
+ u32 total_page;
+ u32 last_page_size;
+
+ data += sizeof(struct rtw_fw_hdr_legacy);
+ size -= sizeof(struct rtw_fw_hdr_legacy);
+
+ total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY;
+ last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1);
+
+ rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
+
+ for (page = 0; page < total_page; page++) {
+ write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
+ data += DLFW_PAGE_SIZE_LEGACY;
+ }
+ if (last_page_size)
+ write_firmware_page(rtwdev, page, data, last_page_size);
+
+ if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
+ rtw_err(rtwdev, "failed to check download firmware report\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int download_firmware_validate_legacy(struct rtw_dev *rtwdev)
+{
+ u32 val32;
+ int try;
+
+ val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
+ val32 |= BIT_MCUFWDL_RDY;
+ val32 &= ~BIT_WINTINI_RDY;
+ rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
+
+ wlan_cpu_enable(rtwdev, false);
+ wlan_cpu_enable(rtwdev, true);
+
+ for (try = 0; try < 10; try++) {
+ val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
+ if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY)
+ return 0;
+ msleep(20);
+ }
+
+ rtw_err(rtwdev, "failed to validate firmware\n");
+ return -EINVAL;
+}
+
+static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ int ret = 0;
+
+ en_download_firmware_legacy(rtwdev, true);
+ ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size);
+ en_download_firmware_legacy(rtwdev, false);
+ if (ret)
+ goto out;
+
+ ret = download_firmware_validate_legacy(rtwdev);
+ if (ret)
+ goto out;
+
+ /* reset desc and index */
+ rtw_hci_setup(rtwdev);
+
+ rtwdev->h2c.last_box_num = 0;
+ rtwdev->h2c.seq = 0;
+
+ set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
+
+out:
+ return ret;
+}
+
+int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
+{
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return __rtw_download_firmware_legacy(rtwdev, fw);
+
+ return __rtw_download_firmware(rtwdev, fw);
+}
+
static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
{
const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
@@ -724,31 +920,24 @@ static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
u32 prio_queue, bool drop)
{
- u32 addr;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_prioq_addr *addr;
+ bool wsize;
u16 avail_page, rsvd_page;
int i;
- switch (prio_queue) {
- case RTW_DMA_MAPPING_EXTRA:
- addr = REG_FIFOPAGE_INFO_4;
- break;
- case RTW_DMA_MAPPING_LOW:
- addr = REG_FIFOPAGE_INFO_2;
- break;
- case RTW_DMA_MAPPING_NORMAL:
- addr = REG_FIFOPAGE_INFO_3;
- break;
- case RTW_DMA_MAPPING_HIGH:
- addr = REG_FIFOPAGE_INFO_1;
- break;
- default:
+ if (prio_queue >= RTW_DMA_MAPPING_MAX)
return;
- }
+
+ addr = &chip->prioq_addrs->prio[prio_queue];
+ wsize = chip->prioq_addrs->wsize;
/* check if all of the reserved pages are available for 100 msecs */
for (i = 0; i < 5; i++) {
- rsvd_page = rtw_read16(rtwdev, addr);
- avail_page = rtw_read16(rtwdev, addr + 2);
+ rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) :
+ rtw_read8(rtwdev, addr->rsvd);
+ avail_page = wsize ? rtw_read16(rtwdev, addr->avail) :
+ rtw_read8(rtwdev, addr->avail);
if (rsvd_page == avail_page)
return;
@@ -826,7 +1015,8 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_CR, 0);
rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
- rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
return 0;
}
@@ -841,13 +1031,16 @@ static int set_trx_fifo_info(struct rtw_dev *rtwdev)
/* config rsvd page num */
fifo->rsvd_drv_pg_num = 8;
fifo->txff_pg_num = chip->txff_size >> 7;
- fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
- RSVD_PG_H2C_EXTRAINFO_NUM +
- RSVD_PG_H2C_STATICINFO_NUM +
- RSVD_PG_H2CQ_NUM +
- RSVD_PG_CPU_INSTRUCTION_NUM +
- RSVD_PG_FW_TXBUF_NUM +
- csi_buf_pg_num;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num;
+ else
+ fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
+ RSVD_PG_H2C_EXTRAINFO_NUM +
+ RSVD_PG_H2C_STATICINFO_NUM +
+ RSVD_PG_H2CQ_NUM +
+ RSVD_PG_CPU_INSTRUCTION_NUM +
+ RSVD_PG_FW_TXBUF_NUM +
+ csi_buf_pg_num;
if (fifo->rsvd_pg_num > fifo->txff_pg_num)
return -ENOMEM;
@@ -856,18 +1049,20 @@ static int set_trx_fifo_info(struct rtw_dev *rtwdev)
fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
cur_pg_addr = fifo->txff_pg_num;
- cur_pg_addr -= csi_buf_pg_num;
- fifo->rsvd_csibuf_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
- fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
- fifo->rsvd_cpu_instr_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_H2CQ_NUM;
- fifo->rsvd_h2cq_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
- fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
- cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
- fifo->rsvd_h2c_info_addr = cur_pg_addr;
+ if (rtw_chip_wcpu_11ac(rtwdev)) {
+ cur_pg_addr -= csi_buf_pg_num;
+ fifo->rsvd_csibuf_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
+ fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
+ fifo->rsvd_cpu_instr_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2CQ_NUM;
+ fifo->rsvd_h2cq_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
+ fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
+ fifo->rsvd_h2c_info_addr = cur_pg_addr;
+ }
cur_pg_addr -= fifo->rsvd_drv_pg_num;
fifo->rsvd_drv_addr = cur_pg_addr;
@@ -879,6 +1074,65 @@ static int set_trx_fifo_info(struct rtw_dev *rtwdev)
return 0;
}
+static int __priority_queue_cfg(struct rtw_dev *rtwdev,
+ const struct rtw_page_table *pg_tbl,
+ u16 pubq_num)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
+ rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
+
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
+ rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
+
+ rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
+ rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
+ rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
+
+ if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
+ return -EBUSY;
+
+ rtw_write8(rtwdev, REG_CR + 3, 0);
+
+ return 0;
+}
+
+static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
+ const struct rtw_page_table *pg_tbl,
+ u16 pubq_num)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 val32;
+
+ val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
+ rtw_write32(rtwdev, REG_RQPN_NPQ, val32);
+ val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num);
+ rtw_write32(rtwdev, REG_RQPN, val32);
+
+ rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1);
+ rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary);
+ rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary);
+ rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary);
+ rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary);
+
+ rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT);
+
+ if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0))
+ return -EBUSY;
+
+ return 0;
+}
+
static int priority_queue_cfg(struct rtw_dev *rtwdev)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
@@ -911,28 +1165,10 @@ static int priority_queue_cfg(struct rtw_dev *rtwdev)
pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
- rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
- rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
-
- rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
- rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
-
- rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
- rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
- rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
- rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
- rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
-
- if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
- return -EBUSY;
-
- rtw_write8(rtwdev, REG_CR + 3, 0);
-
- return 0;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num);
+ else
+ return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num);
}
static int init_h2c(struct rtw_dev *rtwdev)
@@ -945,6 +1181,9 @@ static int init_h2c(struct rtw_dev *rtwdev)
u32 h2cq_free;
u32 wp, rp;
+ if (rtw_chip_wcpu_11n(rtwdev))
+ return 0;
+
h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
@@ -1009,11 +1248,13 @@ static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
u8 value8;
rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
- value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
- value8 &= 0xF0;
- /* For rxdesc len = 0 issue */
- value8 |= 0xF;
- rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
+ if (rtw_chip_wcpu_11ac(rtwdev)) {
+ value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
+ value8 &= 0xF0;
+ /* For rxdesc len = 0 issue */
+ value8 |= 0xF;
+ rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
+ }
rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
index 592dc830160c..ce64cdf7a565 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.h
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -10,6 +10,7 @@
#define SDIO_LOCAL_OFFSET 0x10250000
#define DDMA_POLLING_COUNT 1000
#define C2H_PKT_BUF 256
+#define REPORT_BUF 128
#define PHY_STATUS_SIZE 4
#define ILLEGAL_KEY_GROUP 0xFAAAAA00
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index d7d02e4c0184..c412bc54efde 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -341,13 +341,11 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_leave_lps_deep(rtwdev);
if (changed & BSS_CHANGED_ASSOC) {
- struct rtw_chip_info *chip = rtwdev->chip;
enum rtw_net_type net_type;
if (conf->assoc) {
rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH);
net_type = RTW_NET_MGD_LINKED;
- chip->ops->phy_calibration(rtwdev);
rtwvif->aid = conf->aid;
rtw_fw_download_rsvd_page(rtwdev);
@@ -375,11 +373,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON)
rtw_fw_download_rsvd_page(rtwdev);
- if (changed & BSS_CHANGED_MU_GROUPS) {
- struct rtw_chip_info *chip = rtwdev->chip;
-
- chip->ops->set_gid_table(rtwdev, vif, conf);
- }
+ if (changed & BSS_CHANGED_MU_GROUPS)
+ rtw_chip_set_gid_table(rtwdev, vif, conf);
if (changed & BSS_CHANGED_ERP_SLOT)
rtw_conf_tx(rtwdev, rtwvif);
@@ -666,6 +661,7 @@ static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START);
+ rtw_chip_prepare_tx(rtwdev);
mutex_unlock(&rtwdev->mutex);
}
@@ -754,6 +750,37 @@ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
return 0;
}
+static int rtw_ops_set_antenna(struct ieee80211_hw *hw,
+ u32 tx_antenna,
+ u32 rx_antenna)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ int ret;
+
+ if (!chip->ops->set_antenna)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = chip->ops->set_antenna(rtwdev, tx_antenna, rx_antenna);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static int rtw_ops_get_antenna(struct ieee80211_hw *hw,
+ u32 *tx_antenna,
+ u32 *rx_antenna)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ *tx_antenna = hal->antenna_tx;
+ *rx_antenna = hal->antenna_rx;
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int rtw_ops_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
@@ -815,6 +842,8 @@ const struct ieee80211_ops rtw_ops = {
.sta_statistics = rtw_ops_sta_statistics,
.flush = rtw_ops_flush,
.set_bitrate_mask = rtw_ops_set_bitrate_mask,
+ .set_antenna = rtw_ops_set_antenna,
+ .get_antenna = rtw_ops_get_antenna,
#ifdef CONFIG_PM
.suspend = rtw_ops_suspend,
.resume = rtw_ops_resume,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 7640e97706f5..0eefafc51c62 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -137,7 +137,6 @@ struct rtw_watch_dog_iter_data {
static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
{
struct rtw_bf_info *bf_info = &rtwdev->bf_info;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 fix_rate_enable = 0;
u8 new_csi_rate_idx;
@@ -145,9 +144,9 @@ static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
rtwvif->bfee.role != RTW_BFEE_MU)
return;
- chip->ops->cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi,
- bf_info->cur_csi_rpt_rate,
- fix_rate_enable, &new_csi_rate_idx);
+ rtw_chip_cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi,
+ bf_info->cur_csi_rpt_rate,
+ fix_rate_enable, &new_csi_rate_idx);
if (new_csi_rate_idx != bf_info->cur_csi_rpt_rate)
bf_info->cur_csi_rpt_rate = new_csi_rate_idx;
@@ -409,6 +408,23 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
}
rtw_phy_set_tx_power_level(rtwdev, center_chan);
+
+ /* if the channel isn't set for scanning, we will do RF calibration
+ * in ieee80211_ops::mgd_prepare_tx(). Performing the calibration
+ * during scanning on each channel takes too long.
+ */
+ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ rtwdev->need_rfk = true;
+}
+
+void rtw_chip_prepare_tx(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (rtwdev->need_rfk) {
+ rtwdev->need_rfk = false;
+ chip->ops->phy_calibration(rtwdev);
+ }
}
static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr)
@@ -473,6 +489,7 @@ static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
{
struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
if (hw_ant_num == EFUSE_HW_CAP_IGNORE ||
hw_ant_num >= hal->rf_path_num)
@@ -482,6 +499,8 @@ static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
case 1:
hal->rf_type = RF_1T1R;
hal->rf_path_num = 1;
+ if (!chip->fix_rf_phy_num)
+ hal->rf_phy_num = hal->rf_path_num;
hal->antenna_tx = BB_PATH_A;
hal->antenna_rx = BB_PATH_A;
break;
@@ -931,8 +950,11 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
ht_cap->cap = 0;
ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_MAX_AMSDU |
- IEEE80211_HT_CAP_LDPC_CODING |
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ if (rtw_chip_has_rx_ldpc(rtwdev))
+ ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40))
ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_DSSSCCK40 |
@@ -966,7 +988,6 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
vht_cap->vht_supported = true;
vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_RXSTBC_1 |
@@ -979,6 +1000,9 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
vht_cap->cap |= (rtwdev->hal.bfee_sts_cap <<
IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+ if (rtw_chip_has_rx_ldpc(rtwdev))
+ vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
+
mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
@@ -1040,11 +1064,43 @@ static void rtw_unset_supported_band(struct ieee80211_hw *hw,
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
}
+static void __update_firmware_info(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ const struct rtw_fw_hdr *fw_hdr =
+ (const struct rtw_fw_hdr *)fw->firmware->data;
+
+ fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver);
+ fw->version = le16_to_cpu(fw_hdr->version);
+ fw->sub_version = fw_hdr->subversion;
+ fw->sub_index = fw_hdr->subindex;
+}
+
+static void __update_firmware_info_legacy(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ struct rtw_fw_hdr_legacy *legacy =
+ (struct rtw_fw_hdr_legacy *)fw->firmware->data;
+
+ fw->h2c_version = 0;
+ fw->version = le16_to_cpu(legacy->version);
+ fw->sub_version = legacy->subversion1;
+ fw->sub_index = legacy->subversion2;
+}
+
+static void update_firmware_info(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ if (rtw_chip_wcpu_11n(rtwdev))
+ __update_firmware_info_legacy(rtwdev, fw);
+ else
+ __update_firmware_info(rtwdev, fw);
+}
+
static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
{
struct rtw_fw_state *fw = context;
struct rtw_dev *rtwdev = fw->rtwdev;
- const struct rtw_fw_hdr *fw_hdr;
if (!firmware || !firmware->data) {
rtw_err(rtwdev, "failed to request firmware\n");
@@ -1052,13 +1108,8 @@ static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
return;
}
- fw_hdr = (const struct rtw_fw_hdr *)firmware->data;
- fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver);
- fw->version = le16_to_cpu(fw_hdr->version);
- fw->sub_version = fw_hdr->subversion;
- fw->sub_index = fw_hdr->subindex;
-
fw->firmware = firmware;
+ update_firmware_info(rtwdev, fw);
complete_all(&fw->completion);
rtw_info(rtwdev, "Firmware version %u.%u.%u, H2C version %u\n",
@@ -1131,6 +1182,8 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
hal->antenna_tx = BB_PATH_A;
hal->antenna_rx = BB_PATH_A;
}
+ hal->rf_phy_num = chip->fix_rf_phy_num ? chip->fix_rf_phy_num :
+ hal->rf_path_num;
efuse->physical_size = chip->phy_efuse_size;
efuse->logical_size = chip->log_efuse_size;
@@ -1450,6 +1503,7 @@ EXPORT_SYMBOL(rtw_core_deinit);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
+ struct rtw_hal *hal = &rtwdev->hal;
int max_tx_headroom = 0;
int ret;
@@ -1478,6 +1532,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
+ hw->wiphy->available_antennas_tx = hal->antenna_tx;
+ hw->wiphy->available_antennas_rx = hal->antenna_rx;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index c6b590fdb573..0841f5fa4bf2 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -11,6 +11,7 @@
#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include "util.h"
@@ -39,8 +40,6 @@ extern bool rtw_bf_support;
extern unsigned int rtw_fw_lps_deep_mode;
extern unsigned int rtw_debug_mask;
extern const struct ieee80211_ops rtw_ops;
-extern struct rtw_chip_info rtw8822b_hw_spec;
-extern struct rtw_chip_info rtw8822c_hw_spec;
#define RTW_MAX_CHANNEL_NUM_2G 14
#define RTW_MAX_CHANNEL_NUM_5G 49
@@ -183,6 +182,7 @@ enum rtw_wireless_set {
enum rtw_chip_type {
RTW_CHIP_TYPE_8822B,
RTW_CHIP_TYPE_8822C,
+ RTW_CHIP_TYPE_8723D,
};
enum rtw_tx_queue_type {
@@ -337,6 +337,7 @@ enum rtw_regulatory_domains {
RTW_REGD_CHILE = 6,
RTW_REGD_UKRAINE = 7,
RTW_REGD_MEXICO = 8,
+ RTW_REGD_CN = 9,
RTW_REGD_WW,
RTW_REGD_MAX
@@ -515,6 +516,12 @@ struct rtw_hw_reg {
u32 mask;
};
+struct rtw_ltecoex_addr {
+ u32 ctrl;
+ u32 wdata;
+ u32 rdata;
+};
+
struct rtw_reg_domain {
u32 addr;
u32 mask;
@@ -527,6 +534,13 @@ struct rtw_reg_domain {
u8 domain;
};
+struct rtw_rf_sipi_addr {
+ u32 hssi_1;
+ u32 hssi_2;
+ u32 lssi_read;
+ u32 lssi_read_pi;
+};
+
struct rtw_backup_info {
u8 len;
u32 reg;
@@ -784,6 +798,7 @@ struct rtw_regulatory {
struct rtw_chip_ops {
int (*mac_init)(struct rtw_dev *rtwdev);
+ void (*shutdown)(struct rtw_dev *rtwdev);
int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
void (*phy_set_param)(struct rtw_dev *rtwdev);
void (*set_channel)(struct rtw_dev *rtwdev, u8 channel,
@@ -798,9 +813,11 @@ struct rtw_chip_ops {
void (*set_tx_power_index)(struct rtw_dev *rtwdev);
int (*rsvd_page_dump)(struct rtw_dev *rtwdev, u8 *buf, u32 offset,
u32 size);
- void (*set_antenna)(struct rtw_dev *rtwdev, u8 antenna_tx,
- u8 antenna_rx);
+ int (*set_antenna)(struct rtw_dev *rtwdev,
+ u32 antenna_tx,
+ u32 antenna_rx);
void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
+ void (*efuse_grant)(struct rtw_dev *rtwdev, bool enable);
void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
void (*phy_calibration)(struct rtw_dev *rtwdev);
void (*dpk_track)(struct rtw_dev *rtwdev);
@@ -844,6 +861,7 @@ struct rtw_chip_ops {
#define RTW_PWR_INTF_PCI_MSK BIT(2)
#define RTW_PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define RTW_PWR_CUT_TEST_MSK BIT(0)
#define RTW_PWR_CUT_A_MSK BIT(1)
#define RTW_PWR_CUT_B_MSK BIT(2)
#define RTW_PWR_CUT_C_MSK BIT(3)
@@ -930,6 +948,16 @@ struct rtw_rqpn {
enum rtw_dma_mapping dma_map_hi;
};
+struct rtw_prioq_addr {
+ u32 rsvd;
+ u32 avail;
+};
+
+struct rtw_prioq_addrs {
+ struct rtw_prioq_addr prio[RTW_DMA_MAPPING_MAX];
+ bool wsize;
+};
+
struct rtw_page_table {
u16 hq_num;
u16 nq_num;
@@ -1042,6 +1070,13 @@ struct rtw_pwr_track_tbl {
const u8 *pwrtrk_2g_cckb_p;
const u8 *pwrtrk_2g_ccka_n;
const u8 *pwrtrk_2g_ccka_p;
+ const s8 *pwrtrk_xtal_n;
+ const s8 *pwrtrk_xtal_p;
+};
+
+enum rtw_wlan_cpu {
+ RTW_WCPU_11AC,
+ RTW_WCPU_11N,
};
/* hardware configuration for each IC */
@@ -1050,6 +1085,7 @@ struct rtw_chip_info {
u8 id;
const char *fw_name;
+ enum rtw_wlan_cpu wlan_cpu;
u8 tx_pkt_desc_sz;
u8 tx_buf_desc_sz;
u8 rx_pkt_desc_sz;
@@ -1066,6 +1102,7 @@ struct rtw_chip_info {
u8 dig_min;
u8 txgi_factor;
bool is_pwr_by_rate_dec;
+ bool rx_ldpc;
u8 max_power_index;
bool ht_supported;
@@ -1077,12 +1114,17 @@ struct rtw_chip_info {
const struct rtw_pwr_seq_cmd **pwr_on_seq;
const struct rtw_pwr_seq_cmd **pwr_off_seq;
const struct rtw_rqpn *rqpn_table;
+ const struct rtw_prioq_addrs *prioq_addrs;
const struct rtw_page_table *page_table;
const struct rtw_intf_phy_para_table *intf_table;
const struct rtw_hw_reg *dig;
+ const struct rtw_hw_reg *dig_cck;
u32 rf_base_addr[2];
u32 rf_sipi_addr[2];
+ const struct rtw_rf_sipi_addr *rf_sipi_read_addr;
+ u8 fix_rf_phy_num;
+ const struct rtw_ltecoex_addr *ltecoex_addr;
const struct rtw_table *mac_tbl;
const struct rtw_table *agc_tbl;
@@ -1132,6 +1174,7 @@ struct rtw_chip_info {
const struct coex_rf_para *wl_rf_para_tx;
const struct coex_rf_para *wl_rf_para_rx;
const struct coex_5g_afh_map *afh_5g;
+ const struct rtw_hw_reg *btg_reg;
const struct rtw_reg_domain *coex_info_hw_regs;
};
@@ -1376,6 +1419,16 @@ struct rtw_pkt_count {
DECLARE_EWMA(evm, 10, 4);
DECLARE_EWMA(snr, 10, 4);
+struct rtw_iqk_info {
+ bool done;
+ struct {
+ u32 s1_x;
+ u32 s1_y;
+ u32 s0_x;
+ u32 s0_y;
+ } result;
+};
+
struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
@@ -1413,6 +1466,8 @@ struct rtw_dm_info {
bool pwr_trk_triggered;
bool pwr_trk_init_trigger;
struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX];
+ s8 txagc_remnant_cck;
+ s8 txagc_remnant_ofdm;
/* backup dack results for each path and I/Q */
u32 dack_adck[RTW_RF_PATH_MAX];
@@ -1435,6 +1490,8 @@ struct rtw_dm_info {
struct rtw_pkt_count last_pkt_count;
struct ewma_evm ewma_evm[RTW_EVM_NUM];
struct ewma_snr ewma_snr[RTW_SNR_NUM];
+
+ struct rtw_iqk_info iqk;
};
struct rtw_efuse {
@@ -1455,6 +1512,7 @@ struct rtw_efuse {
u8 ant_div_cfg;
u8 ant_div_type;
u8 regd;
+ u8 afe;
u8 lna_type_2g;
u8 lna_type_5g;
@@ -1567,8 +1625,9 @@ struct rtw_hal {
u8 sec_ch_offset;
u8 rf_type;
u8 rf_path_num;
- u8 antenna_tx;
- u8 antenna_rx;
+ u8 rf_phy_num;
+ u32 antenna_tx;
+ u32 antenna_rx;
u8 bfee_sts_cap;
/* protect tx power section */
@@ -1661,6 +1720,8 @@ struct rtw_dev {
struct rtw_fw_state wow_fw;
struct rtw_wow_param wow;
+ bool need_rfk;
+
/* hci related data, must be last */
u8 priv[] __aligned(sizeof(void *));
};
@@ -1698,6 +1759,33 @@ static inline bool rtw_ssid_equal(struct cfg80211_ssid *a,
return true;
}
+static inline void rtw_chip_efuse_grant_on(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->chip->ops->efuse_grant)
+ rtwdev->chip->ops->efuse_grant(rtwdev, true);
+}
+
+static inline void rtw_chip_efuse_grant_off(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->chip->ops->efuse_grant)
+ rtwdev->chip->ops->efuse_grant(rtwdev, false);
+}
+
+static inline bool rtw_chip_wcpu_11n(struct rtw_dev *rtwdev)
+{
+ return rtwdev->chip->wlan_cpu == RTW_WCPU_11N;
+}
+
+static inline bool rtw_chip_wcpu_11ac(struct rtw_dev *rtwdev)
+{
+ return rtwdev->chip->wlan_cpu == RTW_WCPU_11AC;
+}
+
+static inline bool rtw_chip_has_rx_ldpc(struct rtw_dev *rtwdev)
+{
+ return rtwdev->chip->rx_ldpc;
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
@@ -1707,6 +1795,7 @@ void rtw_restore_reg(struct rtw_dev *rtwdev,
struct rtw_backup_info *bckp, u32 num);
void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss);
void rtw_set_channel(struct rtw_dev *rtwdev);
+void rtw_chip_prepare_tx(struct rtw_dev *rtwdev);
void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
u32 config);
void rtw_tx_report_purge_timer(struct timer_list *t);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 1af87eb2e53a..8228db9a5fc8 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -411,12 +411,14 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
- len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
- dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
- rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
- rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
- rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
+ if (!rtw_chip_wcpu_11n(rtwdev)) {
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
+ }
len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
@@ -471,8 +473,9 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
/* reset H2C Queue index in a single write */
- rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
- BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
+ BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
}
static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
@@ -489,7 +492,9 @@ static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
- rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
+
rtwpci->irq_enabled = true;
spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
@@ -507,7 +512,9 @@ static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
- rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
+
rtwpci->irq_enabled = false;
out:
@@ -1012,13 +1019,17 @@ static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
- irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
+ else
+ irq_status[3] = 0;
irq_status[0] &= rtwpci->irq_mask[0];
irq_status[1] &= rtwpci->irq_mask[1];
irq_status[3] &= rtwpci->irq_mask[3];
rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
- rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
+ if (rtw_chip_wcpu_11ac(rtwdev))
+ rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
}
@@ -1091,6 +1102,7 @@ static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
len = pci_resource_len(pdev, bar_id);
rtwpci->mmap = pci_iomap(pdev, bar_id, len);
if (!rtwpci->mmap) {
+ pci_release_regions(pdev);
rtw_err(rtwdev, "failed to map pci memory\n");
return -ENOMEM;
}
@@ -1348,7 +1360,8 @@ static int __maybe_unused rtw_pci_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
+SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
+EXPORT_SYMBOL(rtw_pm_ops);
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
@@ -1461,8 +1474,8 @@ static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
}
-static int rtw_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+int rtw_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct ieee80211_hw *hw;
struct rtw_dev *rtwdev;
@@ -1539,8 +1552,9 @@ err_release_hw:
return ret;
}
+EXPORT_SYMBOL(rtw_pci_probe);
-static void rtw_pci_remove(struct pci_dev *pdev)
+void rtw_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtw_dev *rtwdev;
@@ -1560,26 +1574,24 @@ static void rtw_pci_remove(struct pci_dev *pdev)
rtw_core_deinit(rtwdev);
ieee80211_free_hw(hw);
}
+EXPORT_SYMBOL(rtw_pci_remove);
-static const struct pci_device_id rtw_pci_id_table[] = {
-#ifdef CONFIG_RTW88_8822BE
- { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
-#endif
-#ifdef CONFIG_RTW88_8822CE
- { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
-#endif
- {},
-};
-MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
-
-static struct pci_driver rtw_pci_driver = {
- .name = "rtw_pci",
- .id_table = rtw_pci_id_table,
- .probe = rtw_pci_probe,
- .remove = rtw_pci_remove,
- .driver.pm = &rtw_pm_ops,
-};
-module_pci_driver(rtw_pci_driver);
+void rtw_pci_shutdown(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtw_dev *rtwdev;
+ struct rtw_chip_info *chip;
+
+ if (!hw)
+ return;
+
+ rtwdev = hw->priv;
+ chip = rtwdev->chip;
+
+ if (chip->ops->shutdown)
+ chip->ops->shutdown(rtwdev);
+}
+EXPORT_SYMBOL(rtw_pci_shutdown);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 3ac4fb328d31..024c2bc275cb 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -5,10 +5,6 @@
#ifndef __RTK_PCI_H_
#define __RTK_PCI_H_
-#define RTK_PCI_DEVICE(vend, dev, hw_config) \
- PCI_DEVICE(vend, dev), \
- .driver_data = (kernel_ulong_t)&(hw_config),
-
#define RTK_DEFAULT_TX_DESC_NUM 128
#define RTK_BEQ_TX_DESC_NUM 256
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 8793dd22188f..8d93f3159746 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -82,6 +82,8 @@ u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
rtw_ht_1s_rates, rtw_ht_2s_rates,
rtw_vht_1s_rates, rtw_vht_2s_rates
};
+EXPORT_SYMBOL(rtw_rate_section);
+
u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
ARRAY_SIZE(rtw_cck_rates),
ARRAY_SIZE(rtw_ofdm_rates),
@@ -90,6 +92,8 @@ u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
ARRAY_SIZE(rtw_vht_1s_rates),
ARRAY_SIZE(rtw_vht_2s_rates)
};
+EXPORT_SYMBOL(rtw_rate_size);
+
static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
@@ -134,15 +138,22 @@ void rtw_phy_init(struct rtw_dev *rtwdev)
mask = chip->dig[0].mask;
dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask);
rtw_phy_cck_pd_init(rtwdev);
+
+ dm_info->iqk.done = false;
}
+EXPORT_SYMBOL(rtw_phy_init);
void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
+ const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
u32 addr, mask;
u8 path;
+ if (dig_cck)
+ rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
+
for (path = 0; path < hal->rf_path_num; path++) {
addr = chip->dig[path].addr;
mask = chip->dig[path].mask;
@@ -670,6 +681,7 @@ u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num)
return rtw_phy_linear_2_db(sum);
}
+EXPORT_SYMBOL(rtw_phy_rf_power_2_rssi);
u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask)
@@ -679,7 +691,7 @@ u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
const u32 *base_addr = chip->rf_base_addr;
u32 val, direct_addr;
- if (rf_path >= hal->rf_path_num) {
+ if (rf_path >= hal->rf_phy_num) {
rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
return INV_RF_DATA;
}
@@ -692,6 +704,56 @@ u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
return val;
}
+EXPORT_SYMBOL(rtw_phy_read_rf);
+
+u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_rf_sipi_addr *rf_sipi_addr;
+ const struct rtw_rf_sipi_addr *rf_sipi_addr_a;
+ u32 val32;
+ u32 en_pi;
+ u32 r_addr;
+ u32 shift;
+
+ if (rf_path >= hal->rf_phy_num) {
+ rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (!chip->rf_sipi_read_addr) {
+ rtw_err(rtwdev, "rf_sipi_read_addr isn't defined\n");
+ return INV_RF_DATA;
+ }
+
+ rf_sipi_addr = &chip->rf_sipi_read_addr[rf_path];
+ rf_sipi_addr_a = &chip->rf_sipi_read_addr[RF_PATH_A];
+
+ addr &= 0xff;
+
+ val32 = rtw_read32(rtwdev, rf_sipi_addr->hssi_2);
+ val32 = (val32 & ~LSSI_READ_ADDR_MASK) | (addr << 23);
+ rtw_write32(rtwdev, rf_sipi_addr->hssi_2, val32);
+
+ /* toggle read edge of path A */
+ val32 = rtw_read32(rtwdev, rf_sipi_addr_a->hssi_2);
+ rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 & ~LSSI_READ_EDGE_MASK);
+ rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 | LSSI_READ_EDGE_MASK);
+
+ udelay(120);
+
+ en_pi = rtw_read32_mask(rtwdev, rf_sipi_addr->hssi_1, BIT(8));
+ r_addr = en_pi ? rf_sipi_addr->lssi_read_pi : rf_sipi_addr->lssi_read;
+
+ val32 = rtw_read32_mask(rtwdev, r_addr, LSSI_READ_DATA_MASK);
+
+ shift = __ffs(mask);
+
+ return (val32 & mask) >> shift;
+}
+EXPORT_SYMBOL(rtw_phy_read_rf_sipi);
bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
@@ -703,7 +765,7 @@ bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 old_data = 0;
u32 shift;
- if (rf_path >= hal->rf_path_num) {
+ if (rf_path >= hal->rf_phy_num) {
rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
return false;
}
@@ -712,7 +774,7 @@ bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
mask &= RFREG_MASK;
if (mask != RFREG_MASK) {
- old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK);
+ old_data = chip->ops->read_rf(rtwdev, rf_path, addr, RFREG_MASK);
if (old_data == INV_RF_DATA) {
rtw_err(rtwdev, "Write fail, rf is disabled\n");
@@ -731,6 +793,7 @@ bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
return true;
}
+EXPORT_SYMBOL(rtw_phy_write_rf_reg_sipi);
bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
@@ -740,7 +803,7 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
const u32 *base_addr = chip->rf_base_addr;
u32 direct_addr;
- if (rf_path >= hal->rf_path_num) {
+ if (rf_path >= hal->rf_phy_num) {
rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
return false;
}
@@ -764,6 +827,7 @@ bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data);
}
+EXPORT_SYMBOL(rtw_phy_write_rf_reg_mix);
void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg)
{
@@ -856,6 +920,7 @@ void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
}
}
}
+EXPORT_SYMBOL(rtw_parse_tbl_phy_cond);
#define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
@@ -1219,6 +1284,7 @@ void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
p->data);
}
}
+EXPORT_SYMBOL(rtw_parse_tbl_bb_pg);
static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
36, 38, 40, 42, 44, 46, 48, /* Band 1 */
@@ -1363,18 +1429,21 @@ void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
rtw_xref_txpwr_lmt(rtwdev);
}
+EXPORT_SYMBOL(rtw_parse_tbl_txpwr_lmt);
void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data)
{
rtw_write8(rtwdev, addr, data);
}
+EXPORT_SYMBOL(rtw_phy_cfg_mac);
void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data)
{
rtw_write32(rtwdev, addr, data);
}
+EXPORT_SYMBOL(rtw_phy_cfg_agc);
void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data)
@@ -1394,6 +1463,7 @@ void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
else
rtw_write32(rtwdev, addr, data);
}
+EXPORT_SYMBOL(rtw_phy_cfg_bb);
void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data)
@@ -1407,6 +1477,7 @@ void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
udelay(1);
}
}
+EXPORT_SYMBOL(rtw_phy_cfg_rf);
static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
{
@@ -1444,6 +1515,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
rtw_load_table(rtwdev, tbl);
}
}
+EXPORT_SYMBOL(rtw_phy_load_tables);
static u8 rtw_get_channel_group(u8 channel)
{
@@ -1731,11 +1803,13 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
u8 ch, u8 regd, struct rtw_power_params *pwr_param)
{
struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct rtw_txpwr_idx *pwr_idx;
u8 group, band;
u8 *base = &pwr_param->pwr_base;
s8 *offset = &pwr_param->pwr_offset;
s8 *limit = &pwr_param->pwr_limit;
+ s8 *remnant = &pwr_param->pwr_remnant;
pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
group = rtw_get_channel_group(ch);
@@ -1757,6 +1831,8 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
*limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path,
rate, ch, regd);
+ *remnant = (rate <= DESC_RATE11M ? dm_info->txagc_remnant_cck :
+ dm_info->txagc_remnant_ofdm);
}
u8
@@ -1776,13 +1852,14 @@ rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate,
if (rtwdev->chip->en_dis_dpd)
offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate);
- tx_power += offset;
+ tx_power += offset + pwr_param.pwr_remnant;
if (tx_power > rtwdev->chip->max_power_index)
tx_power = rtwdev->chip->max_power_index;
return tx_power;
}
+EXPORT_SYMBOL(rtw_phy_get_tx_power_index);
static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
u8 ch, u8 path, u8 rs)
@@ -1845,6 +1922,7 @@ void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
chip->ops->set_tx_power_index(rtwdev);
mutex_unlock(&hal->tx_power_mutex);
}
+EXPORT_SYMBOL(rtw_phy_set_tx_power_level);
static void
rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
@@ -2002,6 +2080,7 @@ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
}
}
+EXPORT_SYMBOL(rtw_phy_config_swing_table);
void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path)
{
@@ -2011,6 +2090,7 @@ void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path)
dm_info->thermal_avg[path] =
ewma_thermal_read(&dm_info->avg_thermal[path]);
}
+EXPORT_SYMBOL(rtw_phy_pwrtrack_avg);
bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
u8 path)
@@ -2023,6 +2103,7 @@ bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
return true;
}
+EXPORT_SYMBOL(rtw_phy_pwrtrack_thermal_changed);
u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path)
{
@@ -2035,6 +2116,7 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path)
return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1);
}
+EXPORT_SYMBOL(rtw_phy_pwrtrack_get_delta);
s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
struct rtw_swing_table *swing_table,
@@ -2068,6 +2150,7 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
else
return -delta_swing_table_idx_neg[delta];
}
+EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
{
@@ -2081,3 +2164,4 @@ bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
}
return false;
}
+EXPORT_SYMBOL(rtw_phy_pwrtrack_need_iqk);
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index af916d8784cd..b924ed07630a 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -21,6 +21,8 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev);
u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num);
u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
@@ -139,6 +141,7 @@ struct rtw_power_params {
u8 pwr_base;
s8 pwr_offset;
s8 pwr_limit;
+ s8 pwr_remnant;
};
void
@@ -178,4 +181,8 @@ enum rtw_phy_cck_pd_lv {
#define CCK_FA_AVG_RESET 0xffffffff
+#define LSSI_READ_ADDR_MASK 0x7f800000
+#define LSSI_READ_EDGE_MASK 0x80000000
+#define LSSI_READ_DATA_MASK 0xfffff
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 9d94534c9674..5a3e9cc7c400 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -6,15 +6,23 @@
#define __RTW_REG_DEF_H__
#define REG_SYS_FUNC_EN 0x0002
+#define BIT_FEN_EN_25_1 BIT(13)
+#define BIT_FEN_ELDR BIT(12)
#define BIT_FEN_CPUEN BIT(2)
#define BIT_FEN_BB_GLB_RST BIT(1)
#define BIT_FEN_BB_RSTB BIT(0)
#define BIT_R_DIS_PRST BIT(6)
#define BIT_WLOCK_1C_B6 BIT(5)
#define REG_SYS_PW_CTRL 0x0004
+#define BIT_PFM_WOWL BIT(3)
#define REG_SYS_CLK_CTRL 0x0008
#define BIT_CPU_CLK_EN BIT(14)
+#define REG_SYS_CLKR 0x0008
+#define BIT_ANA8M BIT(1)
+#define BIT_WAKEPAD_EN BIT(3)
+#define BIT_LOADER_CLK_EN BIT(5)
+
#define REG_RSV_CTRL 0x001C
#define DISABLE_PI 0x3
#define ENABLE_PI 0x2
@@ -33,12 +41,23 @@
#define BIT_MASK_EF_ADDR 0x3ff
#define BIT_MASK_EF_DATA 0xff
#define BITS_EF_ADDR (BIT_MASK_EF_ADDR << BIT_SHIFT_EF_ADDR)
+#define BITS_PLL 0xf0
+
+#define REG_AFE_CTRL3 0x2c
+#define BIT_MASK_XTAL 0x00FFF000
+#define BIT_XTAL_GMP_BIT4 BIT(28)
#define REG_LDO_EFUSE_CTRL 0x0034
#define BIT_MASK_EFUSE_BANK_SEL (BIT(8) | BIT(9))
+#define BIT_LDO25_VOLTAGE_V25 0x03
+#define BIT_MASK_LDO25_VOLTAGE GENMASK(6, 4)
+#define BIT_SHIFT_LDO25_VOLTAGE 4
+#define BIT_LDO25_EN BIT(7)
+
#define REG_GPIO_MUXCFG 0x0040
#define BIT_FSPI_EN BIT(19)
+#define BIT_EN_SIC BIT(12)
#define BIT_BT_AOD_GPIO3 BIT(9)
#define BIT_BT_PTA_EN BIT(5)
#define BIT_WLRFE_4_5_EN BIT(2)
@@ -48,7 +67,9 @@
#define BIT_PAPE_SEL_EN BIT(25)
#define BIT_DPDT_WL_SEL BIT(24)
#define BIT_DPDT_SEL_EN BIT(23)
+#define REG_LEDCFG2 0x004E
#define REG_PAD_CTRL1 0x0064
+#define BIT_BT_BTG_SEL BIT(31)
#define BIT_PAPE_WLBT_SEL BIT(29)
#define BIT_LNAON_WLBT_SEL BIT(28)
#define BIT_BTGP_JTAG_EN BIT(24)
@@ -62,31 +83,56 @@
#define BIT_DBG_GNT_WL_BT BIT(27)
#define BIT_LTE_MUX_CTRL_PATH BIT(26)
#define REG_HCI_OPT_CTRL 0x0074
+#define BIT_USB_SUS_DIS BIT(8)
+
+#define REG_AFE_CTRL_4 0x0078
+#define BIT_CK320M_AFE_EN BIT(4)
+#define BIT_EN_SYN BIT(15)
+
+#define REG_LDO_SWR_CTRL 0x007C
+#define LDO_SEL 0xC3
+#define SPS_SEL 0x83
+#define BIT_XTA1 BIT(29)
+#define BIT_XTA0 BIT(28)
#define REG_MCUFW_CTRL 0x0080
#define BIT_ANA_PORT_EN BIT(22)
#define BIT_MAC_PORT_EN BIT(21)
#define BIT_BOOT_FSPI_EN BIT(20)
+#define BIT_ROM_DLEN BIT(19)
+#define BIT_ROM_PGE GENMASK(18, 16) /* legacy only */
+#define BIT_SHIFT_ROM_PGE 16
#define BIT_FW_INIT_RDY BIT(15)
#define BIT_FW_DW_RDY BIT(14)
#define BIT_RPWM_TOGGLE BIT(7)
+#define BIT_RAM_DL_SEL BIT(7) /* legacy only */
#define BIT_DMEM_CHKSUM_OK BIT(6)
+#define BIT_WINTINI_RDY BIT(6) /* legacy only */
#define BIT_DMEM_DW_OK BIT(5)
#define BIT_IMEM_CHKSUM_OK BIT(4)
#define BIT_IMEM_DW_OK BIT(3)
#define BIT_IMEM_BOOT_LOAD_CHECKSUM_OK BIT(2)
+#define BIT_FWDL_CHK_RPT BIT(2) /* legacy only */
+#define BIT_MCUFWDL_RDY BIT(1) /* legacy only */
#define BIT_MCUFWDL_EN BIT(0)
#define BIT_CHECK_SUM_OK (BIT(4) | BIT(6))
#define FW_READY (BIT_FW_INIT_RDY | BIT_FW_DW_RDY | \
BIT_IMEM_DW_OK | BIT_DMEM_DW_OK | \
BIT_CHECK_SUM_OK)
+#define FW_READY_LEGACY (BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT | \
+ BIT_WINTINI_RDY | BIT_RAM_DL_SEL)
#define FW_READY_MASK 0xffff
+#define REG_EFUSE_ACCESS 0x00CF
+#define EFUSE_ACCESS_ON 0x69
+#define EFUSE_ACCESS_OFF 0x00
+
#define REG_WLRF1 0x00EC
#define REG_WIFI_BT_INFO 0x00AA
#define BIT_BT_INT_EN BIT(15)
#define REG_SYS_CFG1 0x00F0
#define BIT_RTL_ID BIT(23)
+#define BIT_LDO BIT(24)
#define BIT_RF_TYPE_ID BIT(27)
#define BIT_SHIFT_VENDOR_ID 16
#define BIT_MASK_VENDOR_ID 0xf
@@ -166,6 +212,7 @@
#define BIT_FS_RXDONE BIT(16)
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_C2HEVT 0x01A0
+#define REG_MCUTST_1 0x01C0
#define REG_MCUTST_II 0x01C4
#define REG_WOWLAN_WAKE_REASON 0x01C7
#define REG_HMETFR 0x01CC
@@ -178,14 +225,42 @@
#define REG_HMEBOX2_EX 0x01F8
#define REG_HMEBOX3_EX 0x01FC
+#define REG_RQPN 0x0200
+#define BIT_MASK_HPQ 0xff
+#define BIT_SHIFT_HPQ 0
+#define BIT_RQPN_HPQ(x) (((x) & BIT_MASK_HPQ) << BIT_SHIFT_HPQ)
+#define BIT_MASK_LPQ 0xff
+#define BIT_SHIFT_LPQ 8
+#define BIT_RQPN_LPQ(x) (((x) & BIT_MASK_LPQ) << BIT_SHIFT_LPQ)
+#define BIT_MASK_PUBQ 0xff
+#define BIT_SHIFT_PUBQ 16
+#define BIT_RQPN_PUBQ(x) (((x) & BIT_MASK_PUBQ) << BIT_SHIFT_PUBQ)
+#define BIT_RQPN_HLP(h, l, p) (BIT_LD_RQPN | BIT_RQPN_HPQ(h) | \
+ BIT_RQPN_LPQ(l) | BIT_RQPN_PUBQ(p))
+
#define REG_FIFOPAGE_CTRL_2 0x0204
#define BIT_BCN_VALID_V1 BIT(15)
#define BIT_MASK_BCN_HEAD_1_V1 0xfff
#define REG_AUTO_LLT_V1 0x0208
#define BIT_AUTO_INIT_LLT_V1 BIT(0)
+#define REG_DWBCN0_CTRL 0x0208
+#define BIT_BCN_VALID BIT(16)
#define REG_TXDMA_OFFSET_CHK 0x020C
+#define BIT_DROP_DATA_EN BIT(9)
#define REG_TXDMA_STATUS 0x0210
#define BTI_PAGE_OVF BIT(2)
+
+#define REG_RQPN_NPQ 0x0214
+#define BIT_MASK_NPQ 0xff
+#define BIT_SHIFT_NPQ 0
+#define BIT_MASK_EPQ 0xff
+#define BIT_SHIFT_EPQ 16
+#define BIT_RQPN_NPQ(x) (((x) & BIT_MASK_NPQ) << BIT_SHIFT_NPQ)
+#define BIT_RQPN_EPQ(x) (((x) & BIT_MASK_EPQ) << BIT_SHIFT_EPQ)
+#define BIT_RQPN_NE(n, e) (BIT_RQPN_NPQ(n) | BIT_RQPN_EPQ(e))
+
+#define REG_AUTO_LLT 0x0224
+#define BIT_AUTO_INIT_LLT BIT(16)
#define REG_RQPN_CTRL_1 0x0228
#define REG_RQPN_CTRL_2 0x022C
#define BIT_LD_RQPN BIT(31)
@@ -213,7 +288,11 @@
#define REG_FWHW_TXQ_CTRL 0x0420
#define BIT_EN_BCNQ_DL BIT(22)
#define BIT_EN_WR_FREE_TAIL BIT(20)
+#define REG_HWSEQ_CTRL 0x0423
+
#define REG_BCNQ_BDNY_V1 0x0424
+#define REG_BCNQ_BDNY 0x0424
+#define REG_MGQ_BDNY 0x0425
#define REG_LIFETIME_EN 0x0426
#define BIT_BA_PARSER_EN BIT(5)
#define REG_SPEC_SIFS 0x0428
@@ -229,6 +308,8 @@
#define BIT_CHECK_CCK_EN BIT(7)
#define REG_AMPDU_MAX_TIME_V1 0x0455
#define REG_BCNQ1_BDNY_V1 0x0456
+#define REG_AMPDU_MAX_TIME 0x0456
+#define REG_WMAC_LBK_BF_HD 0x045D
#define REG_TX_HANG_CTRL 0x045E
#define BIT_EN_GNT_BT_AWAKE BIT(3)
#define BIT_EN_EOF_V1 BIT(2)
@@ -243,7 +324,10 @@
#define REG_QUEUE_CTRL 0x04C6
#define BIT_PTA_WL_TX_EN BIT(4)
#define BIT_PTA_EDCCA_EN BIT(5)
+#define REG_SINGLE_AMPDU_CTRL 0x04C7
+#define BIT_EN_SINGLE_APMDU BIT(7)
#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_MAX_AGGR_NUM 0x04CA
#define REG_BAR_MODE_CTRL 0x04CC
#define REG_PRECNT_CTRL 0x04E5
#define BIT_BTCCA_CTRL (BIT(0) | BIT(1))
@@ -263,6 +347,7 @@
#define BIT_SHIFT_SIFS_OFDM_CTX 8
#define BIT_SHIFT_SIFS_CCK_TRX 16
#define BIT_SHIFT_SIFS_OFDM_TRX 24
+#define REG_AGGR_BREAK_TIME 0x051A
#define REG_SLOT 0x051B
#define REG_TX_PTCL_CTRL 0x0520
#define BIT_SIFS_BK_EN BIT(12)
@@ -274,18 +359,23 @@
#define REG_TBTT_PROHIBIT 0x0540
#define BIT_SHIFT_TBTT_HOLD_TIME_AP 8
#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
#define BIT_DIS_TSF_UDT BIT(4)
#define BIT_EN_BCN_FUNCTION BIT(3)
+#define BIT_EN_TXBCN_RPT BIT(2)
#define REG_BCN_CTRL_CLINT0 0x0551
#define REG_DRVERLYINT 0x0558
#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
#define REG_USTIME_TSF 0x055C
#define REG_BCN_MAX_ERR 0x055D
#define REG_RXTSF_OFFSET_CCK 0x055E
#define REG_MISC_CTRL 0x0577
#define BIT_EN_FREE_CNT BIT(3)
#define BIT_DIS_SECOND_CCA (BIT(0) | BIT(1))
+#define REG_HIQ_NO_LMT_EN 0x5A7
+#define BIT_HIQ_NO_LMT_EN_ROOT BIT(0)
#define REG_TIMER0_SRC_SEL 0x05B4
#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
@@ -311,6 +401,7 @@
#define BIT_HTC_LOC_CTRL BIT(14)
#define BIT_RPFM_CAM_ENABLE BIT(12)
#define BIT_TA_BCN BIT(11)
+#define BIT_RCR_ADF BIT(11)
#define BIT_DISDECMYPKT BIT(10)
#define BIT_AICV BIT(9)
#define BIT_ACRC32 BIT(8)
@@ -328,6 +419,7 @@
#define REG_MAR 0x0620
#define REG_USTIME_EDCA 0x0638
#define REG_ACKTO_CCK 0x0639
+#define REG_MAC_SPEC_SIFS 0x063A
#define REG_RESP_SIFS_CCK 0x063C
#define REG_RESP_SIFS_OFDM 0x063E
#define REG_ACKTO 0x0640
@@ -370,12 +462,19 @@
#define BIT_LTE_COEX_EN BIT(7)
#define REG_BT_STAT_CTRL 0x0778
#define REG_BT_TDMA_TIME 0x0790
+#define REG_LTR_IDLE_LATENCY 0x0798
+#define REG_LTR_ACTIVE_LATENCY 0x079C
+#define REG_LTR_CTRL_BASIC 0x07A4
#define REG_WMAC_OPTION_FUNCTION 0x07D0
#define REG_WMAC_OPTION_FUNCTION_1 0x07D4
+#define REG_FPGA0_RFMOD 0x0800
+#define BIT_CCKEN BIT(24)
+#define BIT_OFDMEN BIT(25)
#define REG_RX_GAIN_EN 0x081c
#define REG_RFE_CTRL_E 0x0974
+#define REG_2ND_CCA_CTRL 0x0976
#define REG_DIS_DPD 0x0a70
#define DIS_DPD_MASK GENMASK(9, 0)
@@ -514,7 +613,10 @@
#define REG_IGN_GNTBT4 0x4160
+#define RF_MODE 0x00
#define RF_MODOPT 0x01
+#define RF_WLINT 0x01
+#define RF_WLSEL 0x02
#define RF_DTXLOK 0x08
#define RF_CFGCH 0x18
#define RF_RCK 0x1d
@@ -522,9 +624,15 @@
#define RF_LUTWD1 0x3e
#define RF_LUTWD0 0x3f
#define RF_T_METER 0x42
+#define RF_BSPAD 0x54
+#define RF_GAINTX 0x56
+#define RF_TXATANK 0x64
+#define RF_TRXIQ 0x66
+#define RF_RXIQGEN 0x8d
#define RF_XTALX2 0xb8
#define RF_MALSEL 0xbe
#define RF_RCKD 0xde
+#define RF_TXADBG 0xde
#define RF_LUTDBG 0xdf
#define RF_LUTWE2 0xee
#define RF_LUTWE 0xef
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
new file mode 100644
index 000000000000..4700195c8eef
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -0,0 +1,2753 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include "main.h"
+#include "coex.h"
+#include "fw.h"
+#include "tx.h"
+#include "rx.h"
+#include "phy.h"
+#include "rtw8723d.h"
+#include "rtw8723d_table.h"
+#include "mac.h"
+#include "reg.h"
+#include "debug.h"
+
+static const struct rtw_hw_reg rtw8723d_txagc[] = {
+ [DESC_RATE1M] = { .addr = 0xe08, .mask = 0x0000ff00 },
+ [DESC_RATE2M] = { .addr = 0x86c, .mask = 0x0000ff00 },
+ [DESC_RATE5_5M] = { .addr = 0x86c, .mask = 0x00ff0000 },
+ [DESC_RATE11M] = { .addr = 0x86c, .mask = 0xff000000 },
+ [DESC_RATE6M] = { .addr = 0xe00, .mask = 0x000000ff },
+ [DESC_RATE9M] = { .addr = 0xe00, .mask = 0x0000ff00 },
+ [DESC_RATE12M] = { .addr = 0xe00, .mask = 0x00ff0000 },
+ [DESC_RATE18M] = { .addr = 0xe00, .mask = 0xff000000 },
+ [DESC_RATE24M] = { .addr = 0xe04, .mask = 0x000000ff },
+ [DESC_RATE36M] = { .addr = 0xe04, .mask = 0x0000ff00 },
+ [DESC_RATE48M] = { .addr = 0xe04, .mask = 0x00ff0000 },
+ [DESC_RATE54M] = { .addr = 0xe04, .mask = 0xff000000 },
+ [DESC_RATEMCS0] = { .addr = 0xe10, .mask = 0x000000ff },
+ [DESC_RATEMCS1] = { .addr = 0xe10, .mask = 0x0000ff00 },
+ [DESC_RATEMCS2] = { .addr = 0xe10, .mask = 0x00ff0000 },
+ [DESC_RATEMCS3] = { .addr = 0xe10, .mask = 0xff000000 },
+ [DESC_RATEMCS4] = { .addr = 0xe14, .mask = 0x000000ff },
+ [DESC_RATEMCS5] = { .addr = 0xe14, .mask = 0x0000ff00 },
+ [DESC_RATEMCS6] = { .addr = 0xe14, .mask = 0x00ff0000 },
+ [DESC_RATEMCS7] = { .addr = 0xe14, .mask = 0xff000000 },
+};
+
+#define WLAN_TXQ_RPT_EN 0x1F
+#define WLAN_SLOT_TIME 0x09
+#define WLAN_RL_VAL 0x3030
+#define WLAN_BAR_VAL 0x0201ffff
+#define BIT_MASK_TBTT_HOLD 0x00000fff
+#define BIT_SHIFT_TBTT_HOLD 8
+#define BIT_MASK_TBTT_SETUP 0x000000ff
+#define BIT_SHIFT_TBTT_SETUP 0
+#define BIT_MASK_TBTT_MASK ((BIT_MASK_TBTT_HOLD << BIT_SHIFT_TBTT_HOLD) | \
+ (BIT_MASK_TBTT_SETUP << BIT_SHIFT_TBTT_SETUP))
+#define TBTT_TIME(s, h)((((s) & BIT_MASK_TBTT_SETUP) << BIT_SHIFT_TBTT_SETUP) |\
+ (((h) & BIT_MASK_TBTT_HOLD) << BIT_SHIFT_TBTT_HOLD))
+#define WLAN_TBTT_TIME_NORMAL TBTT_TIME(0x04, 0x80)
+#define WLAN_TBTT_TIME_STOP_BCN TBTT_TIME(0x04, 0x64)
+#define WLAN_PIFS_VAL 0
+#define WLAN_AGG_BRK_TIME 0x16
+#define WLAN_NAV_PROT_LEN 0x0040
+#define WLAN_SPEC_SIFS 0x100a
+#define WLAN_RX_PKT_LIMIT 0x17
+#define WLAN_MAX_AGG_NR 0x0A
+#define WLAN_AMPDU_MAX_TIME 0x1C
+#define WLAN_ANT_SEL 0x82
+#define WLAN_LTR_IDLE_LAT 0x883C883C
+#define WLAN_LTR_ACT_LAT 0x880B880B
+#define WLAN_LTR_CTRL1 0xCB004010
+#define WLAN_LTR_CTRL2 0x01233425
+
+static void rtw8723d_lck(struct rtw_dev *rtwdev)
+{
+ u32 lc_cal;
+ u8 val_ctx, rf_val;
+ int ret;
+
+ val_ctx = rtw_read8(rtwdev, REG_CTX);
+ if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
+ rtw_write8(rtwdev, REG_CTX, val_ctx & ~BIT_MASK_CTX_TYPE);
+ else
+ rtw_write8(rtwdev, REG_TXPAUSE, 0xFF);
+ lc_cal = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal | BIT_LCK);
+
+ ret = read_poll_timeout(rtw_read_rf, rf_val, rf_val != 0x1,
+ 10000, 1000000, false,
+ rtwdev, RF_PATH_A, RF_CFGCH, BIT_LCK);
+ if (ret)
+ rtw_warn(rtwdev, "failed to poll LCK status bit\n");
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal);
+ if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
+ rtw_write8(rtwdev, REG_CTX, val_ctx);
+ else
+ rtw_write8(rtwdev, REG_TXPAUSE, 0x00);
+}
+
+static const u32 rtw8723d_ofdm_swing_table[] = {
+ 0x0b40002d, 0x0c000030, 0x0cc00033, 0x0d800036, 0x0e400039, 0x0f00003c,
+ 0x10000040, 0x11000044, 0x12000048, 0x1300004c, 0x14400051, 0x15800056,
+ 0x16c0005b, 0x18000060, 0x19800066, 0x1b00006c, 0x1c800072, 0x1e400079,
+ 0x20000080, 0x22000088, 0x24000090, 0x26000098, 0x288000a2, 0x2ac000ab,
+ 0x2d4000b5, 0x300000c0, 0x32c000cb, 0x35c000d7, 0x390000e4, 0x3c8000f2,
+ 0x40000100, 0x43c0010f, 0x47c0011f, 0x4c000130, 0x50800142, 0x55400155,
+ 0x5a400169, 0x5fc0017f, 0x65400195, 0x6b8001ae, 0x71c001c7, 0x788001e2,
+ 0x7f8001fe,
+};
+
+static const u32 rtw8723d_cck_swing_table[] = {
+ 0x0CD, 0x0D9, 0x0E6, 0x0F3, 0x102, 0x111, 0x121, 0x132, 0x144, 0x158,
+ 0x16C, 0x182, 0x198, 0x1B1, 0x1CA, 0x1E5, 0x202, 0x221, 0x241, 0x263,
+ 0x287, 0x2AE, 0x2D6, 0x301, 0x32F, 0x35F, 0x392, 0x3C9, 0x402, 0x43F,
+ 0x47F, 0x4C3, 0x50C, 0x558, 0x5A9, 0x5FF, 0x65A, 0x6BA, 0x720, 0x78C,
+ 0x7FF,
+};
+
+#define RTW_OFDM_SWING_TABLE_SIZE ARRAY_SIZE(rtw8723d_ofdm_swing_table)
+#define RTW_CCK_SWING_TABLE_SIZE ARRAY_SIZE(rtw8723d_cck_swing_table)
+
+static void rtw8723d_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 path;
+
+ dm_info->default_ofdm_index = RTW_DEF_OFDM_SWING_INDEX;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->delta_power_index[path] = 0;
+ }
+ dm_info->pwr_trk_triggered = false;
+ dm_info->pwr_trk_init_trigger = true;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+ dm_info->txagc_remnant_cck = 0;
+ dm_info->txagc_remnant_ofdm = 0;
+}
+
+static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev)
+{
+ u8 xtal_cap;
+ u32 val32;
+
+ /* power on BB/RF domain */
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN,
+ BIT_FEN_EN_25_1 | BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB);
+ rtw_write8_set(rtwdev, REG_RF_CTRL,
+ BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+ rtw_write8(rtwdev, REG_AFE_CTRL1 + 1, 0x80);
+
+ rtw_phy_load_tables(rtwdev);
+
+ /* post init after header files config */
+ rtw_write32_clr(rtwdev, REG_RCR, BIT_RCR_ADF);
+ rtw_write8_set(rtwdev, REG_HIQ_NO_LMT_EN, BIT_HIQ_NO_LMT_EN_ROOT);
+ rtw_write16_set(rtwdev, REG_AFE_CTRL_4, BIT_CK320M_AFE_EN | BIT_EN_SYN);
+
+ xtal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+ rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL,
+ xtal_cap | (xtal_cap << 6));
+ rtw_write32_set(rtwdev, REG_FPGA0_RFMOD, BIT_CCKEN | BIT_OFDMEN);
+ if ((rtwdev->efuse.afe >> 4) == 14) {
+ rtw_write32_set(rtwdev, REG_AFE_CTRL3, BIT_XTAL_GMP_BIT4);
+ rtw_write32_clr(rtwdev, REG_AFE_CTRL1, BITS_PLL);
+ rtw_write32_set(rtwdev, REG_LDO_SWR_CTRL, BIT_XTA1);
+ rtw_write32_clr(rtwdev, REG_LDO_SWR_CTRL, BIT_XTA0);
+ }
+
+ rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, WLAN_RL_VAL);
+ rtw_write32(rtwdev, REG_BAR_MODE_CTRL, WLAN_BAR_VAL);
+ rtw_write8(rtwdev, REG_ATIMWND, 0x2);
+ rtw_write8(rtwdev, REG_BCN_CTRL,
+ BIT_DIS_TSF_UDT | BIT_EN_BCN_FUNCTION | BIT_EN_TXBCN_RPT);
+ val32 = rtw_read32(rtwdev, REG_TBTT_PROHIBIT);
+ val32 &= ~BIT_MASK_TBTT_MASK;
+ val32 |= WLAN_TBTT_TIME_STOP_BCN;
+ rtw_write8(rtwdev, REG_TBTT_PROHIBIT, val32);
+ rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_VAL);
+ rtw_write8(rtwdev, REG_AGGR_BREAK_TIME, WLAN_AGG_BRK_TIME);
+ rtw_write16(rtwdev, REG_NAV_PROT_LEN, WLAN_NAV_PROT_LEN);
+ rtw_write16(rtwdev, REG_MAC_SPEC_SIFS, WLAN_SPEC_SIFS);
+ rtw_write16(rtwdev, REG_SIFS, WLAN_SPEC_SIFS);
+ rtw_write16(rtwdev, REG_SIFS + 2, WLAN_SPEC_SIFS);
+ rtw_write8(rtwdev, REG_SINGLE_AMPDU_CTRL, BIT_EN_SINGLE_APMDU);
+ rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RX_PKT_LIMIT);
+ rtw_write8(rtwdev, REG_MAX_AGGR_NUM, WLAN_MAX_AGG_NR);
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME, WLAN_AMPDU_MAX_TIME);
+ rtw_write8(rtwdev, REG_LEDCFG2, WLAN_ANT_SEL);
+
+ rtw_write32(rtwdev, REG_LTR_IDLE_LATENCY, WLAN_LTR_IDLE_LAT);
+ rtw_write32(rtwdev, REG_LTR_ACTIVE_LATENCY, WLAN_LTR_ACT_LAT);
+ rtw_write32(rtwdev, REG_LTR_CTRL_BASIC, WLAN_LTR_CTRL1);
+ rtw_write32(rtwdev, REG_LTR_CTRL_BASIC + 4, WLAN_LTR_CTRL2);
+
+ rtw_phy_init(rtwdev);
+
+ rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN);
+
+ rtw8723d_lck(rtwdev);
+
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x20);
+
+ rtw8723d_pwrtrack_init(rtwdev);
+}
+
+static void rtw8723de_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8723d_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+}
+
+static int rtw8723d_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8723d_efuse *map;
+ int i;
+
+ map = (struct rtw8723d_efuse *)log_map;
+
+ efuse->rfe_option = 0;
+ efuse->rf_board_option = map->rf_board_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->pa_type_2g = map->pa_type;
+ efuse->lna_type_2g = map->lna_type_2g[0];
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[0] = map->thermal_meter;
+ efuse->thermal_meter_k = map->thermal_meter;
+ efuse->afe = map->afe;
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8723de_efuse_parsing(efuse, map);
+ break;
+ default:
+ /* unsupported now */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 min_rx_power = -120;
+ u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
+
+ pkt_stat->rx_power[RF_PATH_A] = pwdb - 97;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
+ pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+ min_rx_power);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
+}
+
+static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 rxsc, bw;
+ s8 min_rx_power = -120;
+ s8 rx_evm;
+
+ if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
+ rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
+ else
+ rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
+
+ if (GET_PHY_STAT_P1_RF_MODE(phy_status) == 0)
+ bw = RTW_CHANNEL_WIDTH_20;
+ else if ((rxsc == 1) || (rxsc == 2))
+ bw = RTW_CHANNEL_WIDTH_20;
+ else
+ bw = RTW_CHANNEL_WIDTH_40;
+
+ pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->bw = bw;
+ pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+ min_rx_power);
+ pkt_stat->rx_evm[RF_PATH_A] = GET_PHY_STAT_P1_RXEVM_A(phy_status);
+ pkt_stat->rx_snr[RF_PATH_A] = GET_PHY_STAT_P1_RXSNR_A(phy_status);
+ pkt_stat->cfo_tail[RF_PATH_A] = GET_PHY_STAT_P1_CFO_TAIL_A(phy_status);
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
+ dm_info->rx_snr[RF_PATH_A] = pkt_stat->rx_snr[RF_PATH_A] >> 1;
+ dm_info->cfo_tail[RF_PATH_A] = (pkt_stat->cfo_tail[RF_PATH_A] * 5) >> 1;
+
+ rx_evm = clamp_t(s8, -pkt_stat->rx_evm[RF_PATH_A] >> 1, 0, 64);
+ rx_evm &= 0x3F; /* 64->0: second path of 1SS rate is 64 */
+ dm_info->rx_evm_dbm[RF_PATH_A] = rx_evm;
+}
+
+static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 page;
+
+ page = *phy_status & 0xf;
+
+ switch (page) {
+ case 0:
+ query_phy_status_page0(rtwdev, phy_status, pkt_stat);
+ break;
+ case 1:
+ query_phy_status_page1(rtwdev, phy_status, pkt_stat);
+ break;
+ default:
+ rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
+ return;
+ }
+}
+
+static void rtw8723d_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u8 *phy_status = NULL;
+
+ memset(pkt_stat, 0, sizeof(*pkt_stat));
+
+ pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
+ pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
+ pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
+ pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
+ pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
+ pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+ pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
+ pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
+ pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
+ pkt_stat->ppdu_cnt = 0;
+ pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
+
+ /* drv_info_sz is in unit of 8-bytes */
+ pkt_stat->drv_info_sz *= 8;
+
+ /* c2h cmd pkt's rx/phy status is not interested */
+ if (pkt_stat->is_c2h)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
+ pkt_stat->drv_info_sz);
+ if (pkt_stat->phy_status) {
+ phy_status = rx_desc + desc_sz + pkt_stat->shift;
+ query_phy_status(rtwdev, phy_status, pkt_stat);
+ }
+
+ rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
+}
+
+static bool rtw8723d_check_spur_ov_thres(struct rtw_dev *rtwdev,
+ u8 channel, u32 thres)
+{
+ u32 freq;
+ bool ret = false;
+
+ if (channel == 13)
+ freq = FREQ_CH13;
+ else if (channel == 14)
+ freq = FREQ_CH14;
+ else
+ return false;
+
+ rtw_write32(rtwdev, REG_ANALOG_P4, DIS_3WIRE);
+ rtw_write32(rtwdev, REG_PSDFN, freq);
+ rtw_write32(rtwdev, REG_PSDFN, START_PSD | freq);
+
+ msleep(30);
+ if (rtw_read32(rtwdev, REG_PSDRPT) >= thres)
+ ret = true;
+
+ rtw_write32(rtwdev, REG_PSDFN, freq);
+ rtw_write32(rtwdev, REG_ANALOG_P4, EN_3WIRE);
+
+ return ret;
+}
+
+static void rtw8723d_cfg_notch(struct rtw_dev *rtwdev, u8 channel, bool notch)
+{
+ if (!notch) {
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x1f);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0);
+ return;
+ }
+
+ switch (channel) {
+ case 13:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0xb);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x04000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 14:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x5);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00080000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ default:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0);
+ break;
+ }
+}
+
+static void rtw8723d_spur_cal(struct rtw_dev *rtwdev, u8 channel)
+{
+ bool notch;
+
+ if (channel < 13) {
+ rtw8723d_cfg_notch(rtwdev, channel, false);
+ return;
+ }
+
+ notch = rtw8723d_check_spur_ov_thres(rtwdev, channel, SPUR_THRES);
+ rtw8723d_cfg_notch(rtwdev, channel, notch);
+}
+
+static void rtw8723d_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+ u32 rf_cfgch_a, rf_cfgch_b;
+
+ rf_cfgch_a = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK);
+ rf_cfgch_b = rtw_read_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK);
+
+ rf_cfgch_a &= ~RFCFGCH_CHANNEL_MASK;
+ rf_cfgch_b &= ~RFCFGCH_CHANNEL_MASK;
+ rf_cfgch_a |= (channel & RFCFGCH_CHANNEL_MASK);
+ rf_cfgch_b |= (channel & RFCFGCH_CHANNEL_MASK);
+
+ rf_cfgch_a &= ~RFCFGCH_BW_MASK;
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rf_cfgch_a |= RFCFGCH_BW_20M;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rf_cfgch_a |= RFCFGCH_BW_40M;
+ break;
+ default:
+ break;
+ }
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_cfgch_a);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_cfgch_b);
+
+ rtw8723d_spur_cal(rtwdev, channel);
+}
+
+static const struct rtw_backup_info cck_dfir_cfg[][CCK_DFIR_NR] = {
+ [0] = {
+ { .len = 4, .reg = 0xA24, .val = 0x64B80C1C },
+ { .len = 4, .reg = 0xA28, .val = 0x00008810 },
+ { .len = 4, .reg = 0xAAC, .val = 0x01235667 },
+ },
+ [1] = {
+ { .len = 4, .reg = 0xA24, .val = 0x0000B81C },
+ { .len = 4, .reg = 0xA28, .val = 0x00000000 },
+ { .len = 4, .reg = 0xAAC, .val = 0x00003667 },
+ },
+};
+
+static void rtw8723d_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ const struct rtw_backup_info *cck_dfir;
+ int i;
+
+ cck_dfir = channel <= 13 ? cck_dfir_cfg[0] : cck_dfir_cfg[1];
+
+ for (i = 0; i < CCK_DFIR_NR; i++, cck_dfir++)
+ rtw_write32(rtwdev, cck_dfir->reg, cck_dfir->val);
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x0);
+ rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x0);
+ rtw_write32_mask(rtwdev, REG_BBRX_DFIR, BIT_RXBB_DFIR_EN, 1);
+ rtw_write32_mask(rtwdev, REG_BBRX_DFIR, BIT_MASK_RXBB_DFIR, 0xa);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x1);
+ rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x1);
+ rtw_write32_mask(rtwdev, REG_BBRX_DFIR, BIT_RXBB_DFIR_EN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK0_SYS, BIT_CCK_SIDE_BAND,
+ (primary_ch_idx == RTW_SC_20_UPPER ? 1 : 0));
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8723d_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_chan_idx)
+{
+ rtw8723d_set_channel_rf(rtwdev, channel, bw);
+ rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
+ rtw8723d_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
+}
+
+#define BIT_CFENDFORM BIT(9)
+#define BIT_WMAC_TCR_ERR0 BIT(12)
+#define BIT_WMAC_TCR_ERR1 BIT(13)
+#define BIT_TCR_CFG (BIT_CFENDFORM | BIT_WMAC_TCR_ERR0 | \
+ BIT_WMAC_TCR_ERR1)
+#define WLAN_RX_FILTER0 0xFFFF
+#define WLAN_RX_FILTER1 0x400
+#define WLAN_RX_FILTER2 0xFFFF
+#define WLAN_RCR_CFG 0x700060CE
+
+static int rtw8723d_mac_init(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
+ rtw_write32(rtwdev, REG_TCR, BIT_TCR_CFG);
+
+ rtw_write16(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
+ rtw_write16(rtwdev, REG_RXFLTMAP1, WLAN_RX_FILTER1);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
+ rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
+
+ rtw_write32(rtwdev, REG_INT_MIG, 0);
+ rtw_write32(rtwdev, REG_MCUTST_1, 0x0);
+
+ rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA);
+ rtw_write8(rtwdev, REG_2ND_CCA_CTRL, 0);
+
+ return 0;
+}
+
+static void rtw8723d_shutdown(struct rtw_dev *rtwdev)
+{
+ rtw_write16_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS);
+}
+
+static void rtw8723d_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 ldo_pwr;
+
+ ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
+ if (enable) {
+ ldo_pwr &= ~BIT_MASK_LDO25_VOLTAGE;
+ ldo_pwr |= (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN;
+ } else {
+ ldo_pwr &= ~BIT_LDO25_EN;
+ }
+ rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
+}
+
+static void
+rtw8723d_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ const struct rtw_hw_reg *txagc;
+ u8 rate, pwr_index;
+ int j;
+
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+ pwr_index = hal->tx_pwr_tbl[path][rate];
+
+ if (rate >= ARRAY_SIZE(rtw8723d_txagc)) {
+ rtw_warn(rtwdev, "rate 0x%x isn't supported\n", rate);
+ continue;
+ }
+ txagc = &rtw8723d_txagc[rate];
+ if (!txagc->addr) {
+ rtw_warn(rtwdev, "rate 0x%x isn't defined\n", rate);
+ continue;
+ }
+
+ rtw_write32_mask(rtwdev, txagc->addr, txagc->mask, pwr_index);
+ }
+}
+
+static void rtw8723d_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int rs, path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ for (rs = 0; rs <= RTW_RATE_SECTION_HT_1S; rs++)
+ rtw8723d_set_tx_power_index_by_rate(rtwdev, path, rs);
+ }
+}
+
+static void rtw8723d_efuse_grant(struct rtw_dev *rtwdev, bool on)
+{
+ if (on) {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
+
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR);
+ rtw_write16_set(rtwdev, REG_SYS_CLKR, BIT_LOADER_CLK_EN | BIT_ANA8M);
+ } else {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+ }
+}
+
+static void rtw8723d_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+ u32 crc32_cnt;
+ u32 val32;
+
+ /* hold counter */
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 1);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KEEP, 1);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KEEP, 1);
+
+ cck_fa_cnt = rtw_read32_mask(rtwdev, REG_CCK_FA_LSB_11N, MASKBYTE0);
+ cck_fa_cnt += rtw_read32_mask(rtwdev, REG_CCK_FA_MSB_11N, MASKBYTE3) << 8;
+
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE1_11N);
+ ofdm_fa_cnt = u32_get_bits(val32, BIT_MASK_OFDM_FF_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_SF_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE2_11N);
+ dm_info->ofdm_cca_cnt = u32_get_bits(val32, BIT_MASK_OFDM_CCA_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_PF_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE3_11N);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_RI_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_CRC_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE4_11N);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_MNS_CNT);
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = cck_fa_cnt + ofdm_fa_cnt;
+
+ dm_info->cck_err_cnt = rtw_read32(rtwdev, REG_IGI_C_11N);
+ dm_info->cck_ok_cnt = rtw_read32(rtwdev, REG_IGI_D_11N);
+ crc32_cnt = rtw_read32(rtwdev, REG_OFDM_CRC32_CNT_11N);
+ dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_ERR);
+ dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_OK);
+ crc32_cnt = rtw_read32(rtwdev, REG_HT_CRC32_CNT_11N);
+ dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_ERR);
+ dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_OK);
+ dm_info->vht_err_cnt = 0;
+ dm_info->vht_ok_cnt = 0;
+
+ val32 = rtw_read32(rtwdev, REG_CCK_CCA_CNT_11N);
+ dm_info->cck_cca_cnt = (u32_get_bits(val32, BIT_MASK_CCK_FA_MSB) << 8) |
+ u32_get_bits(val32, BIT_MASK_CCK_FA_LSB);
+ dm_info->total_cca_cnt = dm_info->cck_cca_cnt + dm_info->ofdm_cca_cnt;
+
+ /* reset counter */
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 2);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 2);
+ rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 1);
+ rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 0);
+}
+
+static const u32 iqk_adda_regs[] = {
+ 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
+ 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec
+};
+
+static const u32 iqk_mac8_regs[] = {0x522, 0x550, 0x551};
+static const u32 iqk_mac32_regs[] = {0x40};
+
+static const u32 iqk_bb_regs[] = {
+ 0xc04, 0xc08, 0x874, 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0xa04
+};
+
+#define IQK_ADDA_REG_NUM ARRAY_SIZE(iqk_adda_regs)
+#define IQK_MAC8_REG_NUM ARRAY_SIZE(iqk_mac8_regs)
+#define IQK_MAC32_REG_NUM ARRAY_SIZE(iqk_mac32_regs)
+#define IQK_BB_REG_NUM ARRAY_SIZE(iqk_bb_regs)
+
+struct iqk_backup_regs {
+ u32 adda[IQK_ADDA_REG_NUM];
+ u8 mac8[IQK_MAC8_REG_NUM];
+ u32 mac32[IQK_MAC32_REG_NUM];
+ u32 bb[IQK_BB_REG_NUM];
+
+ u32 lte_path;
+ u32 lte_gnt;
+
+ u32 bb_sel_btg;
+ u8 btg_sel;
+
+ u8 igia;
+ u8 igib;
+};
+
+static void rtw8723d_iqk_backup_regs(struct rtw_dev *rtwdev,
+ struct iqk_backup_regs *backup)
+{
+ int i;
+
+ for (i = 0; i < IQK_ADDA_REG_NUM; i++)
+ backup->adda[i] = rtw_read32(rtwdev, iqk_adda_regs[i]);
+
+ for (i = 0; i < IQK_MAC8_REG_NUM; i++)
+ backup->mac8[i] = rtw_read8(rtwdev, iqk_mac8_regs[i]);
+ for (i = 0; i < IQK_MAC32_REG_NUM; i++)
+ backup->mac32[i] = rtw_read32(rtwdev, iqk_mac32_regs[i]);
+
+ for (i = 0; i < IQK_BB_REG_NUM; i++)
+ backup->bb[i] = rtw_read32(rtwdev, iqk_bb_regs[i]);
+
+ backup->igia = rtw_read32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0);
+ backup->igib = rtw_read32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0);
+
+ backup->bb_sel_btg = rtw_read32(rtwdev, REG_BB_SEL_BTG);
+}
+
+static void rtw8723d_iqk_restore_regs(struct rtw_dev *rtwdev,
+ const struct iqk_backup_regs *backup)
+{
+ int i;
+
+ for (i = 0; i < IQK_ADDA_REG_NUM; i++)
+ rtw_write32(rtwdev, iqk_adda_regs[i], backup->adda[i]);
+
+ for (i = 0; i < IQK_MAC8_REG_NUM; i++)
+ rtw_write8(rtwdev, iqk_mac8_regs[i], backup->mac8[i]);
+ for (i = 0; i < IQK_MAC32_REG_NUM; i++)
+ rtw_write32(rtwdev, iqk_mac32_regs[i], backup->mac32[i]);
+
+ for (i = 0; i < IQK_BB_REG_NUM; i++)
+ rtw_write32(rtwdev, iqk_bb_regs[i], backup->bb[i]);
+
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, backup->igia);
+
+ rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, backup->igib);
+
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x01008c00);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x01008c00);
+}
+
+static void rtw8723d_iqk_backup_path_ctrl(struct rtw_dev *rtwdev,
+ struct iqk_backup_regs *backup)
+{
+ backup->btg_sel = rtw_read8(rtwdev, REG_BTG_SEL);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] original 0x67 = 0x%x\n",
+ backup->btg_sel);
+}
+
+static void rtw8723d_iqk_config_path_ctrl(struct rtw_dev *rtwdev)
+{
+ rtw_write32_mask(rtwdev, REG_PAD_CTRL1, BIT_BT_BTG_SEL, 0x1);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] set 0x67 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+}
+
+static void rtw8723d_iqk_restore_path_ctrl(struct rtw_dev *rtwdev,
+ const struct iqk_backup_regs *backup)
+{
+ rtw_write8(rtwdev, REG_BTG_SEL, backup->btg_sel);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] restore 0x67 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+}
+
+static void rtw8723d_iqk_backup_lte_path_gnt(struct rtw_dev *rtwdev,
+ struct iqk_backup_regs *backup)
+{
+ backup->lte_path = rtw_read32(rtwdev, REG_LTECOEX_PATH_CONTROL);
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0038);
+ mdelay(1);
+ backup->lte_gnt = rtw_read32(rtwdev, REG_LTECOEX_READ_DATA);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] OriginalGNT = 0x%x\n",
+ backup->lte_gnt);
+}
+
+static void rtw8723d_iqk_config_lte_path_gnt(struct rtw_dev *rtwdev)
+{
+ rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, 0x0000ff00);
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc0020038);
+ rtw_write32_mask(rtwdev, REG_LTECOEX_PATH_CONTROL, BIT_LTE_MUX_CTRL_PATH, 0x1);
+}
+
+static void rtw8723d_iqk_restore_lte_path_gnt(struct rtw_dev *rtwdev,
+ const struct iqk_backup_regs *bak)
+{
+ rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, bak->lte_gnt);
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc00f0038);
+ rtw_write32(rtwdev, REG_LTECOEX_PATH_CONTROL, bak->lte_path);
+}
+
+struct rtw_8723d_iqk_cfg {
+ const char *name;
+ u32 val_bb_sel_btg;
+ u32 reg_lutwe;
+ u32 val_txiqk_pi;
+ u32 reg_padlut;
+ u32 reg_gaintx;
+ u32 reg_bspad;
+ u32 val_wlint;
+ u32 val_wlsel;
+ u32 val_iqkpts;
+};
+
+static const struct rtw_8723d_iqk_cfg iqk_tx_cfg[PATH_NR] = {
+ [PATH_S1] = {
+ .name = "S1",
+ .val_bb_sel_btg = 0x99000000,
+ .reg_lutwe = RF_LUTWE,
+ .val_txiqk_pi = 0x8214019f,
+ .reg_padlut = RF_LUTDBG,
+ .reg_gaintx = RF_GAINTX,
+ .reg_bspad = RF_BSPAD,
+ .val_wlint = 0xe0d,
+ .val_wlsel = 0x60d,
+ .val_iqkpts = 0xfa000000,
+ },
+ [PATH_S0] = {
+ .name = "S0",
+ .val_bb_sel_btg = 0x99000280,
+ .reg_lutwe = RF_LUTWE2,
+ .val_txiqk_pi = 0x8214018a,
+ .reg_padlut = RF_TXADBG,
+ .reg_gaintx = RF_TRXIQ,
+ .reg_bspad = RF_TXATANK,
+ .val_wlint = 0xe6d,
+ .val_wlsel = 0x66d,
+ .val_iqkpts = 0xf9000000,
+ },
+};
+
+static u8 rtw8723d_iqk_check_tx_failed(struct rtw_dev *rtwdev,
+ const struct rtw_8723d_iqk_cfg *iqk_cfg)
+{
+ s32 tx_x, tx_y;
+ u32 tx_fail;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xeac = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RES_RY));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe94 = 0x%x, 0xe9c = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RES_TX),
+ rtw_read32(rtwdev, REG_IQK_RES_TY));
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xe90(before IQK)= 0x%x, 0xe98(afer IQK) = 0x%x\n",
+ rtw_read32(rtwdev, 0xe90),
+ rtw_read32(rtwdev, 0xe98));
+
+ tx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_TX_FAIL);
+ tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX);
+ tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY);
+
+ if (!tx_fail && tx_x != IQK_TX_X_ERR && tx_y != IQK_TX_Y_ERR)
+ return IQK_TX_OK;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] %s TXIQK is failed\n",
+ iqk_cfg->name);
+
+ return 0;
+}
+
+static u8 rtw8723d_iqk_check_rx_failed(struct rtw_dev *rtwdev,
+ const struct rtw_8723d_iqk_cfg *iqk_cfg)
+{
+ s32 rx_x, rx_y;
+ u32 rx_fail;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xea4 = 0x%x, 0xeac = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RES_RX),
+ rtw_read32(rtwdev, REG_IQK_RES_RY));
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xea0(before IQK)= 0x%x, 0xea8(afer IQK) = 0x%x\n",
+ rtw_read32(rtwdev, 0xea0),
+ rtw_read32(rtwdev, 0xea8));
+
+ rx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_RX_FAIL);
+ rx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_RX, BIT_MASK_RES_RX);
+ rx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_MASK_RES_RY);
+ rx_y = abs(iqkxy_to_s32(rx_y));
+
+ if (!rx_fail && rx_x < IQK_RX_X_UPPER && rx_x > IQK_RX_X_LOWER &&
+ rx_y < IQK_RX_Y_LMT)
+ return IQK_RX_OK;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] %s RXIQK STEP2 is failed\n",
+ iqk_cfg->name);
+
+ return 0;
+}
+
+static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx,
+ const struct rtw_8723d_iqk_cfg *iqk_cfg)
+{
+ u32 pts = (tx ? iqk_cfg->val_iqkpts : 0xf9000000);
+
+ /* enter IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
+ rtw8723d_iqk_config_lte_path_gnt(rtwdev);
+
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0054);
+ mdelay(1);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] GNT_BT @%s %sIQK1 = 0x%x\n",
+ iqk_cfg->name, tx ? "TX" : "RX",
+ rtw_read32(rtwdev, REG_LTECOEX_READ_DATA));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x948 @%s %sIQK1 = 0x%x\n",
+ iqk_cfg->name, tx ? "TX" : "RX",
+ rtw_read32(rtwdev, REG_BB_SEL_BTG));
+
+ /* One shot, LOK & IQK */
+ rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, pts);
+ rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, 0xf8000000);
+
+ if (!check_hw_ready(rtwdev, REG_IQK_RES_RY, BIT_IQK_DONE, 1))
+ rtw_warn(rtwdev, "%s %s IQK isn't done\n", iqk_cfg->name,
+ tx ? "TX" : "RX");
+}
+
+static void rtw8723d_iqk_txrx_path_post(struct rtw_dev *rtwdev,
+ const struct rtw_8723d_iqk_cfg *iqk_cfg,
+ const struct iqk_backup_regs *backup)
+{
+ rtw8723d_iqk_restore_lte_path_gnt(rtwdev, backup);
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, backup->bb_sel_btg);
+
+ /* leave IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+ mdelay(1);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_padlut, 0x800, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_WLINT, BIT(0), 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_WLSEL, BIT(0), 0x0);
+}
+
+static u8 rtw8723d_iqk_tx_path(struct rtw_dev *rtwdev,
+ const struct rtw_8723d_iqk_cfg *iqk_cfg,
+ const struct iqk_backup_regs *backup)
+{
+ u8 status;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path %s TXIQK!!\n", iqk_cfg->name);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @%s TXIQK = 0x%x\n",
+ iqk_cfg->name,
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, iqk_cfg->val_bb_sel_btg);
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+ mdelay(1);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x80000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00004);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD1, RFREG_MASK, 0x0005d);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0xBFFE0);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x00000);
+
+ /* IQK setting */
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x08008c0c);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, iqk_cfg->val_txiqk_pi);
+ rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28160200);
+ rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00);
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+
+ /* LOK setting */
+ rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x00462911);
+
+ /* PA, PAD setting */
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_padlut, 0x800, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_gaintx, 0x600, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_gaintx, 0x1E0, 0x3);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RXIQGEN, 0x1F, 0xf);
+
+ /* LOK setting for 8723D */
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, 0x10, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_bspad, 0x1, 0x1);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK, iqk_cfg->val_wlint);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK, iqk_cfg->val_wlsel);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x1 @%s TXIQK = 0x%x\n",
+ iqk_cfg->name,
+ rtw_read_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x2 @%s TXIQK = 0x%x\n",
+ iqk_cfg->name,
+ rtw_read_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK));
+
+ rtw8723d_iqk_one_shot(rtwdev, true, iqk_cfg);
+ status = rtw8723d_iqk_check_tx_failed(rtwdev, iqk_cfg);
+
+ rtw8723d_iqk_txrx_path_post(rtwdev, iqk_cfg, backup);
+
+ return status;
+}
+
+static u8 rtw8723d_iqk_rx_path(struct rtw_dev *rtwdev,
+ const struct rtw_8723d_iqk_cfg *iqk_cfg,
+ const struct iqk_backup_regs *backup)
+{
+ u32 tx_x, tx_y;
+ u8 status;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path %s RXIQK Step1!!\n",
+ iqk_cfg->name);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @%s RXIQK1 = 0x%x\n",
+ iqk_cfg->name,
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, iqk_cfg->val_bb_sel_btg);
+
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+
+ /* IQK setting */
+ rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00);
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+
+ /* path IQK setting */
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x18008c1c);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x82160000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28160000);
+
+ /* LOK setting */
+ rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a911);
+
+ /* RXIQK mode */
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x80000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00006);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD1, RFREG_MASK, 0x0005f);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0xa7ffb);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x00000);
+
+ /* PA/PAD=0 */
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_padlut, 0x800, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_gaintx, 0x600, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK, iqk_cfg->val_wlint);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK, iqk_cfg->val_wlsel);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x1@ path %s RXIQK1 = 0x%x\n",
+ iqk_cfg->name,
+ rtw_read_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x2@ path %s RXIQK1 = 0x%x\n",
+ iqk_cfg->name,
+ rtw_read_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK));
+
+ rtw8723d_iqk_one_shot(rtwdev, false, iqk_cfg);
+ status = rtw8723d_iqk_check_tx_failed(rtwdev, iqk_cfg);
+
+ if (!status)
+ goto restore;
+
+ /* second round */
+ tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX);
+ tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY);
+
+ rtw_write32(rtwdev, REG_TXIQK_11N, BIT_SET_TXIQK_11N(tx_x, tx_y));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe40 = 0x%x u4tmp = 0x%x\n",
+ rtw_read32(rtwdev, REG_TXIQK_11N),
+ BIT_SET_TXIQK_11N(tx_x, tx_y));
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path %s RXIQK STEP2!!\n",
+ iqk_cfg->name);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @%s RXIQK2 = 0x%x\n",
+ iqk_cfg->name,
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x18008c1c);
+ rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x82170000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28171400);
+
+ /* LOK setting */
+ rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a8d1);
+
+ /* RXIQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+ mdelay(1);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, 0x80000, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00007);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD1, RFREG_MASK, 0x0005f);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0xb3fdb);
+ rtw_write_rf(rtwdev, RF_PATH_A, iqk_cfg->reg_lutwe, RFREG_MASK, 0x00000);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x1 @%s RXIQK2 = 0x%x\n",
+ iqk_cfg->name,
+ rtw_read_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] RF0x2 @%s RXIQK2 = 0x%x\n",
+ iqk_cfg->name,
+ rtw_read_rf(rtwdev, RF_PATH_A, RF_WLSEL, RFREG_MASK));
+
+ rtw8723d_iqk_one_shot(rtwdev, false, iqk_cfg);
+ status |= rtw8723d_iqk_check_rx_failed(rtwdev, iqk_cfg);
+
+restore:
+ rtw8723d_iqk_txrx_path_post(rtwdev, iqk_cfg, backup);
+
+ return status;
+}
+
+static
+void rtw8723d_iqk_fill_s1_matrix(struct rtw_dev *rtwdev, const s32 result[])
+{
+ s32 oldval_1;
+ s32 x, y;
+ s32 tx1_a, tx1_a_ext;
+ s32 tx1_c, tx1_c_ext;
+
+ if (result[IQK_S1_TX_X] == 0)
+ return;
+
+ oldval_1 = rtw_read32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
+ BIT_MASK_TXIQ_ELM_D);
+
+ x = iqkxy_to_s32(result[IQK_S1_TX_X]);
+ tx1_a = iqk_mult(x, oldval_1, &tx1_a_ext);
+ rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
+ BIT_MASK_TXIQ_ELM_A, tx1_a);
+ rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD,
+ BIT_MASK_OFDM0_EXT_A, tx1_a_ext);
+
+ y = iqkxy_to_s32(result[IQK_S1_TX_Y]);
+ tx1_c = iqk_mult(y, oldval_1, &tx1_c_ext);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS,
+ BIT_SET_TXIQ_ELM_C1(tx1_c));
+ rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
+ BIT_MASK_TXIQ_ELM_C, BIT_SET_TXIQ_ELM_C2(tx1_c));
+ rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD,
+ BIT_MASK_OFDM0_EXT_C, tx1_c_ext);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] X = 0x%x, TX1_A = 0x%x, oldval_1 0x%x\n",
+ x, tx1_a, oldval_1);
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] Y = 0x%x, TX1_C = 0x%x\n", y, tx1_c);
+
+ if (result[IQK_S1_RX_X] == 0)
+ return;
+
+ rtw_write32_mask(rtwdev, REG_A_RXIQI, BIT_MASK_RXIQ_S1_X,
+ result[IQK_S1_RX_X]);
+ rtw_write32_mask(rtwdev, REG_A_RXIQI, BIT_MASK_RXIQ_S1_Y1,
+ BIT_SET_RXIQ_S1_Y1(result[IQK_S1_RX_Y]));
+ rtw_write32_mask(rtwdev, REG_RXIQK_MATRIX_LSB_11N, BIT_MASK_RXIQ_S1_Y2,
+ BIT_SET_RXIQ_S1_Y2(result[IQK_S1_RX_Y]));
+}
+
+static
+void rtw8723d_iqk_fill_s0_matrix(struct rtw_dev *rtwdev, const s32 result[])
+{
+ s32 oldval_0;
+ s32 x, y;
+ s32 tx0_a, tx0_a_ext;
+ s32 tx0_c, tx0_c_ext;
+
+ if (result[IQK_S0_TX_X] == 0)
+ return;
+
+ oldval_0 = rtw_read32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_S0);
+
+ x = iqkxy_to_s32(result[IQK_S0_TX_X]);
+ tx0_a = iqk_mult(x, oldval_0, &tx0_a_ext);
+
+ rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_S0, tx0_a);
+ rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_EXT_S0, tx0_a_ext);
+
+ y = iqkxy_to_s32(result[IQK_S0_TX_Y]);
+ tx0_c = iqk_mult(y, oldval_0, &tx0_c_ext);
+
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_S0, tx0_c);
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_EXT_S0, tx0_c_ext);
+
+ if (result[IQK_S0_RX_X] == 0)
+ return;
+
+ rtw_write32_mask(rtwdev, REG_RXIQ_AB_S0, BIT_MASK_RXIQ_X_S0,
+ result[IQK_S0_RX_X]);
+ rtw_write32_mask(rtwdev, REG_RXIQ_AB_S0, BIT_MASK_RXIQ_Y_S0,
+ result[IQK_S0_RX_Y]);
+}
+
+static void rtw8723d_iqk_path_adda_on(struct rtw_dev *rtwdev)
+{
+ int i;
+
+ for (i = 0; i < IQK_ADDA_REG_NUM; i++)
+ rtw_write32(rtwdev, iqk_adda_regs[i], 0x03c00016);
+}
+
+static void rtw8723d_iqk_config_mac(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_TXPAUSE, 0xff);
+}
+
+static
+void rtw8723d_iqk_rf_standby(struct rtw_dev *rtwdev, enum rtw_rf_path path)
+{
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path-%s standby mode!\n",
+ path == RF_PATH_A ? "S1" : "S0");
+
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+ mdelay(1);
+ rtw_write_rf(rtwdev, path, RF_MODE, RFREG_MASK, 0x10000);
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
+}
+
+static
+bool rtw8723d_iqk_similarity_cmp(struct rtw_dev *rtwdev, s32 result[][IQK_NR],
+ u8 c1, u8 c2)
+{
+ u32 i, j, diff;
+ u32 bitmap = 0;
+ u8 candidate[PATH_NR] = {IQK_ROUND_INVALID, IQK_ROUND_INVALID};
+ bool ret = true;
+
+ s32 tmp1, tmp2;
+
+ for (i = 0; i < IQK_NR; i++) {
+ tmp1 = iqkxy_to_s32(result[c1][i]);
+ tmp2 = iqkxy_to_s32(result[c2][i]);
+
+ diff = abs(tmp1 - tmp2);
+
+ if (diff <= MAX_TOLERANCE)
+ continue;
+
+ if ((i == IQK_S1_RX_X || i == IQK_S0_RX_X) && !bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ candidate[i / IQK_SX_NR] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ candidate[i / IQK_SX_NR] = c1;
+ else
+ bitmap |= BIT(i);
+ } else {
+ bitmap |= BIT(i);
+ }
+ }
+
+ if (bitmap != 0)
+ goto check_sim;
+
+ for (i = 0; i < PATH_NR; i++) {
+ if (candidate[i] == IQK_ROUND_INVALID)
+ continue;
+
+ for (j = i * IQK_SX_NR; j < i * IQK_SX_NR + 2; j++)
+ result[IQK_ROUND_HYBRID][j] = result[candidate[i]][j];
+ ret = false;
+ }
+
+ return ret;
+
+check_sim:
+ for (i = 0; i < IQK_NR; i++) {
+ j = i & ~1; /* 2 bits are a pair for IQ[X, Y] */
+ if (bitmap & GENMASK(j + 1, j))
+ continue;
+
+ result[IQK_ROUND_HYBRID][i] = result[c1][i];
+ }
+
+ return false;
+}
+
+static
+void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723d_path path)
+{
+ if (path == PATH_S0) {
+ rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_A);
+ rtw8723d_iqk_path_adda_on(rtwdev);
+ }
+
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
+ rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00);
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+
+ if (path == PATH_S1) {
+ rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_B);
+ rtw8723d_iqk_path_adda_on(rtwdev);
+ }
+}
+
+static
+void rtw8723d_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t,
+ const struct iqk_backup_regs *backup)
+{
+ u32 i;
+ u8 s1_ok, s0_ok;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] IQ Calibration for 1T1R_S0/S1 for %d times\n", t);
+
+ rtw8723d_iqk_path_adda_on(rtwdev);
+ rtw8723d_iqk_config_mac(rtwdev);
+ rtw_write32_mask(rtwdev, REG_CCK_ANT_SEL_11N, 0x0f000000, 0xf);
+ rtw_write32(rtwdev, REG_BB_RX_PATH_11N, 0x03a05611);
+ rtw_write32(rtwdev, REG_TRMUX_11N, 0x000800e4);
+ rtw_write32(rtwdev, REG_BB_PWR_SAV1_11N, 0x25204200);
+ rtw8723d_iqk_precfg_path(rtwdev, PATH_S1);
+
+ for (i = 0; i < PATH_IQK_RETRY; i++) {
+ s1_ok = rtw8723d_iqk_tx_path(rtwdev, &iqk_tx_cfg[PATH_S1], backup);
+ if (s1_ok == IQK_TX_OK) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] path S1 Tx IQK Success!!\n");
+ result[t][IQK_S1_TX_X] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX);
+ result[t][IQK_S1_TX_Y] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY);
+ break;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S1 Tx IQK Fail!!\n");
+ result[t][IQK_S1_TX_X] = 0x100;
+ result[t][IQK_S1_TX_Y] = 0x0;
+ }
+
+ for (i = 0; i < PATH_IQK_RETRY; i++) {
+ s1_ok = rtw8723d_iqk_rx_path(rtwdev, &iqk_tx_cfg[PATH_S1], backup);
+ if (s1_ok == (IQK_TX_OK | IQK_RX_OK)) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] path S1 Rx IQK Success!!\n");
+ result[t][IQK_S1_RX_X] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_RX, BIT_MASK_RES_RX);
+ result[t][IQK_S1_RX_Y] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_MASK_RES_RY);
+ break;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S1 Rx IQK Fail!!\n");
+ result[t][IQK_S1_RX_X] = 0x100;
+ result[t][IQK_S1_RX_Y] = 0x0;
+ }
+
+ if (s1_ok == 0x0)
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S1 IQK is failed!!\n");
+
+ rtw8723d_iqk_precfg_path(rtwdev, PATH_S0);
+
+ for (i = 0; i < PATH_IQK_RETRY; i++) {
+ s0_ok = rtw8723d_iqk_tx_path(rtwdev, &iqk_tx_cfg[PATH_S0], backup);
+ if (s0_ok == IQK_TX_OK) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] path S0 Tx IQK Success!!\n");
+ result[t][IQK_S0_TX_X] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX);
+ result[t][IQK_S0_TX_Y] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY);
+ break;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S0 Tx IQK Fail!!\n");
+ result[t][IQK_S0_TX_X] = 0x100;
+ result[t][IQK_S0_TX_Y] = 0x0;
+ }
+
+ for (i = 0; i < PATH_IQK_RETRY; i++) {
+ s0_ok = rtw8723d_iqk_rx_path(rtwdev, &iqk_tx_cfg[PATH_S0], backup);
+ if (s0_ok == (IQK_TX_OK | IQK_RX_OK)) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] path S0 Rx IQK Success!!\n");
+
+ result[t][IQK_S0_RX_X] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_RX, BIT_MASK_RES_RX);
+ result[t][IQK_S0_RX_Y] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_MASK_RES_RY);
+ break;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S0 Rx IQK Fail!!\n");
+ result[t][IQK_S0_RX_X] = 0x100;
+ result[t][IQK_S0_RX_Y] = 0x0;
+ }
+
+ if (s0_ok == 0x0)
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path S0 IQK is failed!!\n");
+
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+ mdelay(1);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] back to BB mode, load original value!\n");
+}
+
+static void rtw8723d_phy_calibration(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s32 result[IQK_ROUND_SIZE][IQK_NR];
+ struct iqk_backup_regs backup;
+ u8 i, j;
+ u8 final_candidate = IQK_ROUND_INVALID;
+ bool good;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] Start!!!\n");
+
+ memset(result, 0, sizeof(result));
+
+ rtw8723d_iqk_backup_path_ctrl(rtwdev, &backup);
+ rtw8723d_iqk_backup_lte_path_gnt(rtwdev, &backup);
+ rtw8723d_iqk_backup_regs(rtwdev, &backup);
+
+ for (i = IQK_ROUND_0; i <= IQK_ROUND_2; i++) {
+ rtw8723d_iqk_config_path_ctrl(rtwdev);
+ rtw8723d_iqk_config_lte_path_gnt(rtwdev);
+
+ rtw8723d_iqk_one_round(rtwdev, result, i, &backup);
+
+ if (i > IQK_ROUND_0)
+ rtw8723d_iqk_restore_regs(rtwdev, &backup);
+ rtw8723d_iqk_restore_lte_path_gnt(rtwdev, &backup);
+ rtw8723d_iqk_restore_path_ctrl(rtwdev, &backup);
+
+ for (j = IQK_ROUND_0; j < i; j++) {
+ good = rtw8723d_iqk_similarity_cmp(rtwdev, result, j, i);
+
+ if (good) {
+ final_candidate = j;
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] cmp %d:%d final_candidate is %x\n",
+ j, i, final_candidate);
+ goto iqk_done;
+ }
+ }
+ }
+
+ if (final_candidate == IQK_ROUND_INVALID) {
+ s32 reg_tmp = 0;
+
+ for (i = 0; i < IQK_NR; i++)
+ reg_tmp += result[IQK_ROUND_HYBRID][i];
+
+ if (reg_tmp != 0) {
+ final_candidate = IQK_ROUND_HYBRID;
+ } else {
+ WARN(1, "IQK is failed\n");
+ goto out;
+ }
+ }
+
+iqk_done:
+ rtw8723d_iqk_fill_s1_matrix(rtwdev, result[final_candidate]);
+ rtw8723d_iqk_fill_s0_matrix(rtwdev, result[final_candidate]);
+
+ dm_info->iqk.result.s1_x = result[final_candidate][IQK_S1_TX_X];
+ dm_info->iqk.result.s1_y = result[final_candidate][IQK_S1_TX_Y];
+ dm_info->iqk.result.s0_x = result[final_candidate][IQK_S0_TX_X];
+ dm_info->iqk.result.s0_y = result[final_candidate][IQK_S0_TX_Y];
+ dm_info->iqk.done = true;
+
+out:
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, backup.bb_sel_btg);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] final_candidate is %x\n",
+ final_candidate);
+
+ for (i = IQK_ROUND_0; i < IQK_ROUND_SIZE; i++)
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] Result %u: rege94_s1=%x rege9c_s1=%x regea4_s1=%x regeac_s1=%x rege94_s0=%x rege9c_s0=%x regea4_s0=%x regeac_s0=%x %s\n",
+ i,
+ result[i][0], result[i][1], result[i][2], result[i][3],
+ result[i][4], result[i][5], result[i][6], result[i][7],
+ final_candidate == i ? "(final candidate)" : "");
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK]0xc80 = 0x%x 0xc94 = 0x%x 0xc14 = 0x%x 0xca0 = 0x%x\n",
+ rtw_read32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE),
+ rtw_read32(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N),
+ rtw_read32(rtwdev, REG_A_RXIQI),
+ rtw_read32(rtwdev, REG_RXIQK_MATRIX_LSB_11N));
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK]0xcd0 = 0x%x 0xcd4 = 0x%x 0xcd8 = 0x%x\n",
+ rtw_read32(rtwdev, REG_TXIQ_AB_S0),
+ rtw_read32(rtwdev, REG_TXIQ_CD_S0),
+ rtw_read32(rtwdev, REG_RXIQ_AB_S0));
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] finished\n");
+}
+
+/* for coex */
+static void rtw8723d_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+ /* enable TBTT nterrupt */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+
+ /* BT report packet sample rate */
+ /* 0x790[5:0]=0x5 */
+ rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05);
+
+ /* enable BT counter statistics */
+ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1);
+
+ /* enable PTA (3-wire function form BT side) */
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3);
+
+ /* enable PTA (tx/rx signal form WiFi side) */
+ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
+}
+
+static void rtw8723d_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8723d_coex_cfg_gnt_debug(struct rtw_dev *rtwdev)
+{
+ rtw_write8_mask(rtwdev, REG_LEDCFG2, BIT(6), 0);
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 3, BIT(0), 0);
+ rtw_write8_mask(rtwdev, REG_GPIO_INTM + 2, BIT(4), 0);
+ rtw_write8_mask(rtwdev, REG_GPIO_MUXCFG + 2, BIT(1), 0);
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 3, BIT(1), 0);
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 2, BIT(7), 0);
+ rtw_write8_mask(rtwdev, REG_SYS_CLKR + 1, BIT(1), 0);
+ rtw_write8_mask(rtwdev, REG_SYS_SDIO_CTRL + 3, BIT(3), 0);
+}
+
+static void rtw8723d_coex_cfg_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ bool aux = efuse->bt_setting & BIT(6);
+
+ coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option;
+ coex_rfe->ant_switch_polarity = 0;
+ coex_rfe->ant_switch_exist = false;
+ coex_rfe->ant_switch_with_bt = false;
+ coex_rfe->ant_switch_diversity = false;
+ coex_rfe->wlg_at_btg = true;
+
+ /* decide antenna at main or aux */
+ if (efuse->share_ant) {
+ if (aux)
+ rtw_write16(rtwdev, REG_BB_SEL_BTG, 0x80);
+ else
+ rtw_write16(rtwdev, REG_BB_SEL_BTG, 0x200);
+ } else {
+ if (aux)
+ rtw_write16(rtwdev, REG_BB_SEL_BTG, 0x280);
+ else
+ rtw_write16(rtwdev, REG_BB_SEL_BTG, 0x0);
+ }
+
+ /* disable LTE coex in wifi side */
+ rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, BIT_LTE_COEX_EN, 0x0);
+ rtw_coex_write_indirect_reg(rtwdev, LTE_WL_TRX_CTRL, MASKLWORD, 0xffff);
+ rtw_coex_write_indirect_reg(rtwdev, LTE_BT_TRX_CTRL, MASKLWORD, 0xffff);
+}
+
+static void rtw8723d_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ static const u8 wl_tx_power[] = {0xb2, 0x90};
+ u8 pwr;
+
+ if (wl_pwr == coex_dm->cur_wl_pwr_lvl)
+ return;
+
+ coex_dm->cur_wl_pwr_lvl = wl_pwr;
+
+ if (coex_dm->cur_wl_pwr_lvl >= ARRAY_SIZE(wl_tx_power))
+ coex_dm->cur_wl_pwr_lvl = ARRAY_SIZE(wl_tx_power) - 1;
+
+ pwr = wl_tx_power[coex_dm->cur_wl_pwr_lvl];
+
+ rtw_write8(rtwdev, REG_ANA_PARAM1 + 3, pwr);
+}
+
+static void rtw8723d_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ /* WL Rx Low gain on */
+ static const u32 wl_rx_low_gain_on[] = {
+ 0xec120101, 0xeb130101, 0xce140101, 0xcd150101, 0xcc160101,
+ 0xcb170101, 0xca180101, 0x8d190101, 0x8c1a0101, 0x8b1b0101,
+ 0x4f1c0101, 0x4e1d0101, 0x4d1e0101, 0x4c1f0101, 0x0e200101,
+ 0x0d210101, 0x0c220101, 0x0b230101, 0xcf240001, 0xce250001,
+ 0xcd260001, 0xcc270001, 0x8f280001
+ };
+ /* WL Rx Low gain off */
+ static const u32 wl_rx_low_gain_off[] = {
+ 0xec120101, 0xeb130101, 0xea140101, 0xe9150101, 0xe8160101,
+ 0xe7170101, 0xe6180101, 0xe5190101, 0xe41a0101, 0xe31b0101,
+ 0xe21c0101, 0xe11d0101, 0xe01e0101, 0x861f0101, 0x85200101,
+ 0x84210101, 0x83220101, 0x82230101, 0x81240101, 0x80250101,
+ 0x44260101, 0x43270101, 0x42280101
+ };
+ u8 i;
+
+ if (low_gain == coex_dm->cur_wl_rx_low_gain_en)
+ return;
+
+ coex_dm->cur_wl_rx_low_gain_en = low_gain;
+
+ if (coex_dm->cur_wl_rx_low_gain_en) {
+ for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_on); i++)
+ rtw_write32(rtwdev, REG_AGCRSSI, wl_rx_low_gain_on[i]);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_off); i++)
+ rtw_write32(rtwdev, REG_AGCRSSI, wl_rx_low_gain_off[i]);
+ }
+}
+
+static u8 rtw8723d_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 tx_rate = dm_info->tx_rate;
+ u8 limit_ofdm = 30;
+
+ switch (tx_rate) {
+ case DESC_RATE1M...DESC_RATE5_5M:
+ case DESC_RATE11M:
+ break;
+ case DESC_RATE6M...DESC_RATE48M:
+ limit_ofdm = 36;
+ break;
+ case DESC_RATE54M:
+ limit_ofdm = 34;
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS2:
+ limit_ofdm = 38;
+ break;
+ case DESC_RATEMCS3...DESC_RATEMCS4:
+ limit_ofdm = 36;
+ break;
+ case DESC_RATEMCS5...DESC_RATEMCS7:
+ limit_ofdm = 34;
+ break;
+ default:
+ rtw_warn(rtwdev, "pwrtrack unhandled tx_rate 0x%x\n", tx_rate);
+ break;
+ }
+
+ return limit_ofdm;
+}
+
+static void rtw8723d_set_iqk_matrix_by_result(struct rtw_dev *rtwdev,
+ u32 ofdm_swing, u8 rf_path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s32 ele_A, ele_D, ele_C;
+ s32 ele_A_ext, ele_C_ext, ele_D_ext;
+ s32 iqk_result_x;
+ s32 iqk_result_y;
+ s32 value32;
+
+ switch (rf_path) {
+ default:
+ case RF_PATH_A:
+ iqk_result_x = dm_info->iqk.result.s1_x;
+ iqk_result_y = dm_info->iqk.result.s1_y;
+ break;
+ case RF_PATH_B:
+ iqk_result_x = dm_info->iqk.result.s0_x;
+ iqk_result_y = dm_info->iqk.result.s0_y;
+ break;
+ }
+
+ /* new element D */
+ ele_D = OFDM_SWING_D(ofdm_swing);
+ iqk_mult(iqk_result_x, ele_D, &ele_D_ext);
+ /* new element A */
+ iqk_result_x = iqkxy_to_s32(iqk_result_x);
+ ele_A = iqk_mult(iqk_result_x, ele_D, &ele_A_ext);
+ /* new element C */
+ iqk_result_y = iqkxy_to_s32(iqk_result_y);
+ ele_C = iqk_mult(iqk_result_y, ele_D, &ele_C_ext);
+
+ switch (rf_path) {
+ case RF_PATH_A:
+ default:
+ /* write new elements A, C, D, and element B is always 0 */
+ value32 = BIT_SET_TXIQ_ELM_ACD(ele_A, ele_C, ele_D);
+ rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, value32);
+ value32 = BIT_SET_TXIQ_ELM_C1(ele_C);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS,
+ value32);
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS;
+ value32 |= BIT_SET_OFDM0_EXTS(ele_A_ext, ele_C_ext, ele_D_ext);
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+
+ case RF_PATH_B:
+ /* write new elements A, C, D, and element B is always 0 */
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_S0, ele_D);
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_S0, ele_C);
+ rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_S0, ele_A);
+
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_EXT_S0,
+ ele_D_ext);
+ rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_EXT_S0,
+ ele_A_ext);
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_EXT_S0,
+ ele_C_ext);
+ break;
+ }
+}
+
+static void rtw8723d_set_iqk_matrix(struct rtw_dev *rtwdev, s8 ofdm_index,
+ u8 rf_path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s32 value32;
+ u32 ofdm_swing;
+
+ if (ofdm_index >= RTW_OFDM_SWING_TABLE_SIZE)
+ ofdm_index = RTW_OFDM_SWING_TABLE_SIZE - 1;
+ else if (ofdm_index < 0)
+ ofdm_index = 0;
+
+ ofdm_swing = rtw8723d_ofdm_swing_table[ofdm_index];
+
+ if (dm_info->iqk.done) {
+ rtw8723d_set_iqk_matrix_by_result(rtwdev, ofdm_swing, rf_path);
+ return;
+ }
+
+ switch (rf_path) {
+ case RF_PATH_A:
+ default:
+ rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, ofdm_swing);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS,
+ 0x00);
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS;
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+
+ case RF_PATH_B:
+ /* image S1:c80 to S0:Cd0 and Cd4 */
+ rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_S0,
+ OFDM_SWING_A(ofdm_swing));
+ rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_B_S0,
+ OFDM_SWING_B(ofdm_swing));
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_S0,
+ OFDM_SWING_C(ofdm_swing));
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_S0,
+ OFDM_SWING_D(ofdm_swing));
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_D_EXT_S0, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXIQ_CD_S0, BIT_MASK_TXIQ_C_EXT_S0, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXIQ_AB_S0, BIT_MASK_TXIQ_A_EXT_S0, 0x0);
+ break;
+ }
+}
+
+static void rtw8723d_pwrtrack_set_ofdm_pwr(struct rtw_dev *rtwdev, s8 swing_idx,
+ s8 txagc_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->txagc_remnant_ofdm = txagc_idx;
+
+ rtw8723d_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_A);
+ rtw8723d_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_B);
+}
+
+static void rtw8723d_pwrtrack_set_cck_pwr(struct rtw_dev *rtwdev, s8 swing_idx,
+ s8 txagc_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->txagc_remnant_cck = txagc_idx;
+
+ rtw_write32_mask(rtwdev, 0xab4, 0x000007FF,
+ rtw8723d_cck_swing_table[swing_idx]);
+}
+
+static void rtw8723d_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 limit_ofdm;
+ u8 limit_cck = 40;
+ s8 final_ofdm_swing_index;
+ s8 final_cck_swing_index;
+
+ limit_ofdm = rtw8723d_pwrtrack_get_limit_ofdm(rtwdev);
+
+ final_ofdm_swing_index = RTW_DEF_OFDM_SWING_INDEX +
+ dm_info->delta_power_index[path];
+ final_cck_swing_index = RTW_DEF_CCK_SWING_INDEX +
+ dm_info->delta_power_index[path];
+
+ if (final_ofdm_swing_index > limit_ofdm)
+ rtw8723d_pwrtrack_set_ofdm_pwr(rtwdev, limit_ofdm,
+ final_ofdm_swing_index - limit_ofdm);
+ else if (final_ofdm_swing_index < 0)
+ rtw8723d_pwrtrack_set_ofdm_pwr(rtwdev, 0,
+ final_ofdm_swing_index);
+ else
+ rtw8723d_pwrtrack_set_ofdm_pwr(rtwdev, final_ofdm_swing_index, 0);
+
+ if (final_cck_swing_index > limit_cck)
+ rtw8723d_pwrtrack_set_cck_pwr(rtwdev, limit_cck,
+ final_cck_swing_index - limit_cck);
+ else if (final_cck_swing_index < 0)
+ rtw8723d_pwrtrack_set_cck_pwr(rtwdev, 0,
+ final_cck_swing_index);
+ else
+ rtw8723d_pwrtrack_set_cck_pwr(rtwdev, final_cck_swing_index, 0);
+
+ rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
+}
+
+static void rtw8723d_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path,
+ u8 delta)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
+ const s8 *pwrtrk_xtal;
+ s8 xtal_cap;
+
+ if (dm_info->thermal_avg[therm_path] >
+ rtwdev->efuse.thermal_meter[therm_path])
+ pwrtrk_xtal = tbl->pwrtrk_xtal_p;
+ else
+ pwrtrk_xtal = tbl->pwrtrk_xtal_n;
+
+ xtal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+ xtal_cap = clamp_t(s8, xtal_cap + pwrtrk_xtal[delta], 0, 0x3F);
+ rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL,
+ xtal_cap | (xtal_cap << 6));
+}
+
+static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_swing_table swing_table;
+ u8 thermal_value, delta, path;
+ bool do_iqk = false;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ if (rtwdev->efuse.thermal_meter[0] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A);
+
+ do_iqk = rtw_phy_pwrtrack_need_iqk(rtwdev);
+
+ if (do_iqk)
+ rtw8723d_lck(rtwdev);
+
+ if (dm_info->pwr_trk_init_trigger)
+ dm_info->pwr_trk_init_trigger = false;
+ else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value,
+ RF_PATH_A))
+ goto iqk;
+
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A);
+
+ delta = min_t(u8, delta, RTW_PWR_TRK_TBL_SZ - 1);
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ s8 delta_cur, delta_last;
+
+ delta_last = dm_info->delta_power_index[path];
+ delta_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, &swing_table,
+ path, RF_PATH_A, delta);
+ if (delta_last == delta_cur)
+ continue;
+
+ dm_info->delta_power_index[path] = delta_cur;
+ rtw8723d_pwrtrack_set(rtwdev, path);
+ }
+
+ rtw8723d_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta);
+
+iqk:
+ if (do_iqk)
+ rtw8723d_phy_calibration(rtwdev);
+}
+
+static void rtw8723d_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (efuse->power_track_type != 0)
+ return;
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER,
+ GENMASK(17, 16), 0x03);
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ rtw8723d_phy_pwrtrack(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
+static struct rtw_chip_ops rtw8723d_ops = {
+ .phy_set_param = rtw8723d_phy_set_param,
+ .read_efuse = rtw8723d_read_efuse,
+ .query_rx_desc = rtw8723d_query_rx_desc,
+ .set_channel = rtw8723d_set_channel,
+ .mac_init = rtw8723d_mac_init,
+ .shutdown = rtw8723d_shutdown,
+ .read_rf = rtw_phy_read_rf_sipi,
+ .write_rf = rtw_phy_write_rf_reg_sipi,
+ .set_tx_power_index = rtw8723d_set_tx_power_index,
+ .set_antenna = NULL,
+ .cfg_ldo25 = rtw8723d_cfg_ldo25,
+ .efuse_grant = rtw8723d_efuse_grant,
+ .false_alarm_statistics = rtw8723d_false_alarm_statistics,
+ .phy_calibration = rtw8723d_phy_calibration,
+ .pwr_track = rtw8723d_pwr_track,
+ .config_bfee = NULL,
+ .set_gid_table = NULL,
+ .cfg_csi_rate = NULL,
+
+ .coex_set_init = rtw8723d_coex_cfg_init,
+ .coex_set_ant_switch = NULL,
+ .coex_set_gnt_fix = rtw8723d_coex_cfg_gnt_fix,
+ .coex_set_gnt_debug = rtw8723d_coex_cfg_gnt_debug,
+ .coex_set_rfe_type = rtw8723d_coex_cfg_rfe_type,
+ .coex_set_wl_tx_power = rtw8723d_coex_cfg_wl_tx_power,
+ .coex_set_wl_rx_gain = rtw8723d_coex_cfg_wl_rx_gain,
+};
+
+/* Shared-Antenna Coex Table */
+static const struct coex_table_para table_sant_8723d[] = {
+ {0xffffffff, 0xffffffff}, /* case-0 */
+ {0x55555555, 0x55555555},
+ {0x65555555, 0x65555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-5 */
+ {0xa5555555, 0xaaaa5aaa},
+ {0x6a5a5a5a, 0x5a5a5a5a},
+ {0x6a5a5a5a, 0x6a5a5a5a},
+ {0x65555555, 0x5a5a5a5a},
+ {0x65555555, 0x6a5a5a5a}, /* case-10 */
+ {0x65555555, 0xfafafafa},
+ {0x65555555, 0x6a5a5aaa},
+ {0x65555555, 0x5aaa5aaa},
+ {0x65555555, 0xaaaa5aaa},
+ {0x65555555, 0xaaaaaaaa}, /* case-15 */
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x6afa5afa},
+ {0xaaffffaa, 0xfafafafa},
+ {0xaa5555aa, 0x5a5a5a5a},
+ {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */
+ {0xaa5555aa, 0xaaaaaaaa},
+ {0xffffffff, 0x5a5a5a5a},
+ {0xffffffff, 0x6a5a5a5a},
+ {0xffffffff, 0x55555555},
+ {0xffffffff, 0x6a5a5aaa}, /* case-25 */
+ {0x55555555, 0x5a5a5a5a},
+ {0x55555555, 0xaaaaaaaa},
+ {0x55555555, 0x6a6a6a6a},
+ {0x656a656a, 0x656a656a}
+};
+
+/* Non-Shared-Antenna Coex Table */
+static const struct coex_table_para table_nsant_8723d[] = {
+ {0xffffffff, 0xffffffff}, /* case-100 */
+ {0x55555555, 0x55555555},
+ {0x65555555, 0x65555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-105 */
+ {0x5afa5afa, 0x5afa5afa},
+ {0x55555555, 0xfafafafa},
+ {0x65555555, 0xfafafafa},
+ {0x65555555, 0x5a5a5a5a},
+ {0x65555555, 0x6a5a5a5a}, /* case-110 */
+ {0x65555555, 0xaaaaaaaa},
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x5afa5afa},
+ {0xffff55ff, 0xaaaaaaaa},
+ {0xaaffffaa, 0xfafafafa}, /* case-115 */
+ {0xaaffffaa, 0x5afa5afa},
+ {0xaaffffaa, 0xaaaaaaaa},
+ {0xffffffff, 0xfafafafa},
+ {0xffffffff, 0x5afa5afa},
+ {0xffffffff, 0xaaaaaaaa},/* case-120 */
+ {0x55ff55ff, 0x5afa5afa},
+ {0x55ff55ff, 0xaaaaaaaa},
+ {0x55ff55ff, 0x55ff55ff}
+};
+
+/* Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_sant_8723d[] = {
+ { {0x08, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */
+ { {0x61, 0x48, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */
+ { {0x61, 0x10, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x10, 0x03, 0x10, 0x54} },
+ { {0x51, 0x10, 0x03, 0x10, 0x55} },
+ { {0x51, 0x10, 0x07, 0x10, 0x54} }, /* case-15 */
+ { {0x51, 0x45, 0x03, 0x10, 0x50} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x15, 0x03, 0x10, 0x50} }, /* case-20 */
+ { {0x51, 0x4a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x0c, 0x03, 0x10, 0x54} },
+ { {0x55, 0x08, 0x03, 0x10, 0x54} },
+ { {0x65, 0x10, 0x03, 0x11, 0x11} },
+ { {0x51, 0x10, 0x03, 0x10, 0x51} },
+ { {0x61, 0x15, 0x03, 0x11, 0x10} }
+};
+
+/* Non-Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_nsant_8723d[] = {
+ { {0x00, 0x00, 0x00, 0x40, 0x01} }, /* case-100 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-101 */
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */
+ { {0x51, 0x45, 0x03, 0x10, 0x50} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-120 */
+ { {0x51, 0x08, 0x03, 0x10, 0x50} },
+};
+
+/* rssi in percentage % (dbm = % - 100) */
+static const u8 wl_rssi_step_8723d[] = {60, 50, 44, 30};
+static const u8 bt_rssi_step_8723d[] = {30, 30, 30, 30};
+static const struct coex_5g_afh_map afh_5g_8723d[] = { {0, 0, 0} };
+
+static const struct rtw_hw_reg btg_reg_8723d = {
+ .addr = REG_BTG_SEL, .mask = BIT_MASK_BTG_WL,
+};
+
+/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */
+static const struct coex_rf_para rf_para_tx_8723d[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 10, false, 7}, /* for WL-CPT */
+ {1, 0, true, 4},
+ {1, 2, true, 4},
+ {1, 10, true, 4},
+ {1, 15, true, 4}
+};
+
+static const struct coex_rf_para rf_para_rx_8723d[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 10, false, 7}, /* for WL-CPT */
+ {1, 0, true, 5},
+ {1, 2, true, 5},
+ {1, 10, true, 5},
+ {1, 15, true, 5}
+};
+
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8723d[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(7), 0},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), 0},
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8723d[] = {
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0001,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, (BIT(1) | BIT(0)), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0063,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0058,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x005A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0068,
+ RTW_PWR_CUT_TEST_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x0069,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x001f,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0077,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x001f,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x07},
+ {0x0077,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x07},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8723d[] = {
+ trans_carddis_to_cardemu_8723d,
+ trans_cardemu_to_act_8723d,
+ NULL
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_lps_8723d[] = {
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xFF},
+ {0x0522,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xFF},
+ {0x05F8,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xFF, 0},
+ {0x05F9,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xFF, 0},
+ {0x05FA,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xFF, 0},
+ {0x05FB,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xFF, 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 0, RTW_PWR_DELAY_US},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0100,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x03},
+ {0x0101,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0093,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0553,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_pre_carddis_8723d[] = {
+ {0x0003,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0080,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8723d[] = {
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), 0},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8723d[] = {
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x20},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 1},
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_post_carddis_8723d[] = {
+ {0x001D,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x001D,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x001C,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x0E},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8723d[] = {
+ trans_act_to_lps_8723d,
+ trans_act_to_pre_carddis_8723d,
+ trans_act_to_cardemu_8723d,
+ trans_cardemu_to_carddis_8723d,
+ trans_act_to_post_carddis_8723d,
+ NULL
+};
+
+static const struct rtw_page_table page_table_8723d[] = {
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+};
+
+static const struct rtw_rqpn rqpn_table_8723d[] = {
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+};
+
+static const struct rtw_prioq_addrs prioq_addrs_8723d = {
+ .prio[RTW_DMA_MAPPING_EXTRA] = {
+ .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3,
+ },
+ .prio[RTW_DMA_MAPPING_LOW] = {
+ .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1,
+ },
+ .prio[RTW_DMA_MAPPING_NORMAL] = {
+ .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1,
+ },
+ .prio[RTW_DMA_MAPPING_HIGH] = {
+ .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2,
+ },
+ .wsize = false,
+};
+
+static const struct rtw_intf_phy_para pcie_gen1_param_8723d[] = {
+ {0x0008, 0x4a22,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0009, 0x1000,
+ RTW_IP_SEL_PHY,
+ ~(RTW_INTF_PHY_CUT_A | RTW_INTF_PHY_CUT_B),
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static const struct rtw_intf_phy_para_table phy_para_table_8723d = {
+ .gen1_para = pcie_gen1_param_8723d,
+ .n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8723d),
+};
+
+static const struct rtw_hw_reg rtw8723d_dig[] = {
+ [0] = { .addr = 0xc50, .mask = 0x7f },
+ [1] = { .addr = 0xc50, .mask = 0x7f },
+};
+
+static const struct rtw_hw_reg rtw8723d_dig_cck[] = {
+ [0] = { .addr = 0xa0c, .mask = 0x3f00 },
+};
+
+static const struct rtw_rf_sipi_addr rtw8723d_rf_sipi_addr[] = {
+ [RF_PATH_A] = { .hssi_1 = 0x820, .lssi_read = 0x8a0,
+ .hssi_2 = 0x824, .lssi_read_pi = 0x8b8},
+ [RF_PATH_B] = { .hssi_1 = 0x828, .lssi_read = 0x8a4,
+ .hssi_2 = 0x82c, .lssi_read_pi = 0x8bc},
+};
+
+static const struct rtw_ltecoex_addr rtw8723d_ltecoex_addr = {
+ .ctrl = REG_LTECOEX_CTRL,
+ .wdata = REG_LTECOEX_WRITE_DATA,
+ .rdata = REG_LTECOEX_READ_DATA,
+};
+
+static const struct rtw_rfe_def rtw8723d_rfe_defs[] = {
+ [0] = { .phy_pg_tbl = &rtw8723d_bb_pg_tbl,
+ .txpwr_lmt_tbl = &rtw8723d_txpwr_lmt_tbl,},
+};
+
+static const u8 rtw8723d_pwrtrk_2gb_n[] = {
+ 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 5,
+ 6, 6, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8723d_pwrtrk_2gb_p[] = {
+ 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8723d_pwrtrk_2ga_n[] = {
+ 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 5,
+ 6, 6, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8723d_pwrtrk_2ga_p[] = {
+ 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8723d_pwrtrk_2g_cck_b_n[] = {
+ 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8723d_pwrtrk_2g_cck_b_p[] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7,
+ 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8723d_pwrtrk_2g_cck_a_n[] = {
+ 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8723d_pwrtrk_2g_cck_a_p[] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7,
+ 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11
+};
+
+static const s8 rtw8723d_pwrtrk_xtal_n[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static const s8 rtw8723d_pwrtrk_xtal_p[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, -10, -12, -14, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16
+};
+
+static const struct rtw_pwr_track_tbl rtw8723d_rtw_pwr_track_tbl = {
+ .pwrtrk_2gb_n = rtw8723d_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8723d_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8723d_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8723d_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckb_n = rtw8723d_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8723d_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8723d_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8723d_pwrtrk_2g_cck_a_p,
+ .pwrtrk_xtal_p = rtw8723d_pwrtrk_xtal_p,
+ .pwrtrk_xtal_n = rtw8723d_pwrtrk_xtal_n,
+};
+
+static const struct rtw_reg_domain coex_info_hw_regs_8723d[] = {
+ {0x948, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x67, BIT(7), RTW_REG_DOMAIN_MAC8},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x964, BIT(1), RTW_REG_DOMAIN_MAC8},
+ {0x864, BIT(0), RTW_REG_DOMAIN_MAC8},
+ {0xab7, BIT(5), RTW_REG_DOMAIN_MAC8},
+ {0xa01, BIT(7), RTW_REG_DOMAIN_MAC8},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+ {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8},
+ {0x40, BIT(5), RTW_REG_DOMAIN_MAC8},
+ {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x953, BIT(1), RTW_REG_DOMAIN_MAC8},
+};
+
+struct rtw_chip_info rtw8723d_hw_spec = {
+ .ops = &rtw8723d_ops,
+ .id = RTW_CHIP_TYPE_8723D,
+ .fw_name = "rtw88/rtw8723d_fw.bin",
+ .wlan_cpu = RTW_WCPU_11N,
+ .tx_pkt_desc_sz = 40,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 512,
+ .log_efuse_size = 512,
+ .ptct_efuse_size = 96 + 1,
+ .txff_size = 32768,
+ .rxff_size = 16384,
+ .txgi_factor = 1,
+ .is_pwr_by_rate_dec = true,
+ .max_power_index = 0x3f,
+ .csi_buf_pg_num = 0,
+ .band = RTW_BAND_2G,
+ .page_size = 128,
+ .dig_min = 0x20,
+ .ht_supported = true,
+ .vht_supported = false,
+ .lps_deep_mode_supported = 0,
+ .sys_func_en = 0xFD,
+ .pwr_on_seq = card_enable_flow_8723d,
+ .pwr_off_seq = card_disable_flow_8723d,
+ .page_table = page_table_8723d,
+ .rqpn_table = rqpn_table_8723d,
+ .prioq_addrs = &prioq_addrs_8723d,
+ .intf_table = &phy_para_table_8723d,
+ .dig = rtw8723d_dig,
+ .dig_cck = rtw8723d_dig_cck,
+ .rf_sipi_addr = {0x840, 0x844},
+ .rf_sipi_read_addr = rtw8723d_rf_sipi_addr,
+ .fix_rf_phy_num = 2,
+ .ltecoex_addr = &rtw8723d_ltecoex_addr,
+ .mac_tbl = &rtw8723d_mac_tbl,
+ .agc_tbl = &rtw8723d_agc_tbl,
+ .bb_tbl = &rtw8723d_bb_tbl,
+ .rf_tbl = {&rtw8723d_rf_a_tbl},
+ .rfe_defs = rtw8723d_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8723d_rfe_defs),
+ .rx_ldpc = false,
+ .pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl,
+ .iqk_threshold = 8,
+
+ .coex_para_ver = 0x1905302f,
+ .bt_desired_ver = 0x2f,
+ .scbd_support = true,
+ .new_scbd10_def = true,
+ .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+ .bt_rssi_type = COEX_BTRSSI_RATIO,
+ .ant_isolation = 15,
+ .rssi_tolerance = 2,
+ .wl_rssi_step = wl_rssi_step_8723d,
+ .bt_rssi_step = bt_rssi_step_8723d,
+ .table_sant_num = ARRAY_SIZE(table_sant_8723d),
+ .table_sant = table_sant_8723d,
+ .table_nsant_num = ARRAY_SIZE(table_nsant_8723d),
+ .table_nsant = table_nsant_8723d,
+ .tdma_sant_num = ARRAY_SIZE(tdma_sant_8723d),
+ .tdma_sant = tdma_sant_8723d,
+ .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8723d),
+ .tdma_nsant = tdma_nsant_8723d,
+ .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8723d),
+ .wl_rf_para_tx = rf_para_tx_8723d,
+ .wl_rf_para_rx = rf_para_rx_8723d,
+ .bt_afh_span_bw20 = 0x20,
+ .bt_afh_span_bw40 = 0x30,
+ .afh_5g_num = ARRAY_SIZE(afh_5g_8723d),
+ .afh_5g = afh_5g_8723d,
+ .btg_reg = &btg_reg_8723d,
+
+ .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8723d),
+ .coex_info_hw_regs = coex_info_hw_regs_8723d,
+};
+EXPORT_SYMBOL(rtw8723d_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8723d_fw.bin");
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11n wireless 8723d driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
new file mode 100644
index 000000000000..7894d321cd7e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8723D_H__
+#define __RTW8723D_H__
+
+enum rtw8723d_path {
+ PATH_S1,
+ PATH_S0,
+ PATH_NR,
+};
+
+enum rtw8723d_iqk_round {
+ IQK_ROUND_0,
+ IQK_ROUND_1,
+ IQK_ROUND_2,
+ IQK_ROUND_HYBRID,
+ IQK_ROUND_SIZE,
+ IQK_ROUND_INVALID = 0xff,
+};
+
+enum rtw8723d_iqk_result {
+ IQK_S1_TX_X,
+ IQK_S1_TX_Y,
+ IQK_S1_RX_X,
+ IQK_S1_RX_Y,
+ IQK_S0_TX_X,
+ IQK_S0_TX_Y,
+ IQK_S0_RX_X,
+ IQK_S0_RX_Y,
+ IQK_NR,
+ IQK_SX_NR = IQK_NR / PATH_NR,
+};
+
+struct rtw8723de_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0xd0 */
+ u8 vender_id[2];
+ u8 device_id[2];
+ u8 sub_vender_id[2];
+ u8 sub_device_id[2];
+};
+
+struct rtw8723d_efuse {
+ __le16 rtl_id;
+ u8 rsvd[2];
+ u8 afe;
+ u8 rsvd1[11];
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g[2]; /* 0xbd */
+ u8 lna_type_5g[2];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 tx_bb_swing_setting_2g;
+ u8 res_c7;
+ u8 tx_pwr_calibrate_rate;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 country_code[2];
+ u8 res[3];
+ struct rtw8723de_efuse e;
+};
+
+/* phy status page0 */
+#define GET_PHY_STAT_P0_PWDB(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+
+/* phy status page1 */
+#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_PWDB_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16))
+#define GET_PHY_STAT_P1_RF_MODE(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x03), GENMASK(29, 28))
+#define GET_PHY_STAT_P1_L_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
+#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
+
+static inline s32 iqkxy_to_s32(s32 val)
+{
+ /* val is Q10.8 */
+ return sign_extend32(val, 9);
+}
+
+static inline s32 iqk_mult(s32 x, s32 y, s32 *ext)
+{
+ /* x, y and return value are Q10.8 */
+ s32 t;
+
+ t = x * y;
+ if (ext)
+ *ext = (t >> 7) & 0x1; /* Q.16 --> Q.9; get LSB of Q.9 */
+
+ return (t >> 8); /* Q.16 --> Q.8 */
+}
+
+#define OFDM_SWING_A(swing) FIELD_GET(GENMASK(9, 0), swing)
+#define OFDM_SWING_B(swing) FIELD_GET(GENMASK(15, 10), swing)
+#define OFDM_SWING_C(swing) FIELD_GET(GENMASK(21, 16), swing)
+#define OFDM_SWING_D(swing) FIELD_GET(GENMASK(31, 22), swing)
+#define RTW_DEF_OFDM_SWING_INDEX 28
+#define RTW_DEF_CCK_SWING_INDEX 28
+
+#define MAX_TOLERANCE 5
+#define IQK_TX_X_ERR 0x142
+#define IQK_TX_Y_ERR 0x42
+#define IQK_RX_X_UPPER 0x11a
+#define IQK_RX_X_LOWER 0xe6
+#define IQK_RX_Y_LMT 0x1a
+#define IQK_TX_OK BIT(0)
+#define IQK_RX_OK BIT(1)
+#define PATH_IQK_RETRY 2
+
+#define SPUR_THRES 0x16
+#define CCK_DFIR_NR 3
+#define DIS_3WIRE 0xccf000c0
+#define EN_3WIRE 0xccc000c0
+#define START_PSD 0x400000
+#define FREQ_CH13 0xfccd
+#define FREQ_CH14 0xff9a
+#define RFCFGCH_CHANNEL_MASK GENMASK(7, 0)
+#define RFCFGCH_BW_MASK (BIT(11) | BIT(10))
+#define RFCFGCH_BW_20M (BIT(11) | BIT(10))
+#define RFCFGCH_BW_40M BIT(10)
+#define BIT_MASK_RFMOD BIT(0)
+#define BIT_LCK BIT(15)
+
+#define REG_GPIO_INTM 0x0048
+#define REG_BTG_SEL 0x0067
+#define BIT_MASK_BTG_WL BIT(7)
+#define REG_LTECOEX_PATH_CONTROL 0x0070
+#define REG_LTECOEX_CTRL 0x07c0
+#define REG_LTECOEX_WRITE_DATA 0x07c4
+#define REG_LTECOEX_READ_DATA 0x07c8
+#define REG_PSDFN 0x0808
+#define REG_BB_PWR_SAV1_11N 0x0874
+#define REG_ANA_PARAM1 0x0880
+#define REG_ANALOG_P4 0x088c
+#define REG_PSDRPT 0x08b4
+#define REG_FPGA1_RFMOD 0x0900
+#define REG_BB_SEL_BTG 0x0948
+#define REG_BBRX_DFIR 0x0954
+#define BIT_MASK_RXBB_DFIR GENMASK(27, 24)
+#define BIT_RXBB_DFIR_EN BIT(19)
+#define REG_CCK0_SYS 0x0a00
+#define BIT_CCK_SIDE_BAND BIT(4)
+#define REG_CCK_ANT_SEL_11N 0x0a04
+#define REG_CCK_FA_RST_11N 0x0a2c
+#define BIT_MASK_CCK_CNT_KEEP BIT(12)
+#define BIT_MASK_CCK_CNT_EN BIT(13)
+#define BIT_MASK_CCK_CNT_KPEN (BIT_MASK_CCK_CNT_KEEP | BIT_MASK_CCK_CNT_EN)
+#define BIT_MASK_CCK_FA_KEEP BIT(14)
+#define BIT_MASK_CCK_FA_EN BIT(15)
+#define BIT_MASK_CCK_FA_KPEN (BIT_MASK_CCK_FA_KEEP | BIT_MASK_CCK_FA_EN)
+#define REG_CCK_FA_LSB_11N 0x0a5c
+#define REG_CCK_FA_MSB_11N 0x0a58
+#define REG_CCK_CCA_CNT_11N 0x0a60
+#define BIT_MASK_CCK_FA_MSB GENMASK(7, 0)
+#define BIT_MASK_CCK_FA_LSB GENMASK(15, 8)
+#define REG_OFDM_FA_HOLDC_11N 0x0c00
+#define BIT_MASK_OFDM_FA_KEEP BIT(31)
+#define REG_BB_RX_PATH_11N 0x0c04
+#define REG_TRMUX_11N 0x0c08
+#define REG_OFDM_FA_RSTC_11N 0x0c0c
+#define BIT_MASK_OFDM_FA_RST BIT(31)
+#define REG_A_RXIQI 0x0c14
+#define BIT_MASK_RXIQ_S1_X 0x000003FF
+#define BIT_MASK_RXIQ_S1_Y1 0x0000FC00
+#define BIT_SET_RXIQ_S1_Y1(y) ((y) & 0x3F)
+#define REG_OFDM0_RXDSP 0x0c40
+#define BIT_MASK_RXDSP GENMASK(28, 24)
+#define BIT_EN_RXDSP BIT(9)
+#define REG_OFDM_0_ECCA_THRESHOLD 0x0c4c
+#define BIT_MASK_OFDM0_EXT_A BIT(31)
+#define BIT_MASK_OFDM0_EXT_C BIT(29)
+#define BIT_MASK_OFDM0_EXTS (BIT(31) | BIT(29) | BIT(28))
+#define BIT_SET_OFDM0_EXTS(a, c, d) (((a) << 31) | ((c) << 29) | ((d) << 28))
+#define REG_OFDM0_XAAGC1 0x0c50
+#define REG_OFDM0_XBAGC1 0x0c58
+#define REG_AGCRSSI 0x0c78
+#define REG_OFDM_0_XA_TX_IQ_IMBALANCE 0x0c80
+#define BIT_MASK_TXIQ_ELM_A 0x03ff
+#define BIT_SET_TXIQ_ELM_ACD(a, c, d) (((d) << 22) | (((c) & 0x3F) << 16) | \
+ ((a) & 0x03ff))
+#define BIT_MASK_TXIQ_ELM_C GENMASK(21, 16)
+#define BIT_SET_TXIQ_ELM_C2(c) ((c) & 0x3F)
+#define BIT_MASK_TXIQ_ELM_D GENMASK(31, 22)
+#define REG_TXIQK_MATRIXA_LSB2_11N 0x0c94
+#define BIT_SET_TXIQ_ELM_C1(c) (((c) & 0x000003C0) >> 6)
+#define REG_RXIQK_MATRIX_LSB_11N 0x0ca0
+#define BIT_MASK_RXIQ_S1_Y2 0xF0000000
+#define BIT_SET_RXIQ_S1_Y2(y) (((y) >> 6) & 0xF)
+#define REG_TXIQ_AB_S0 0x0cd0
+#define BIT_MASK_TXIQ_A_S0 0x000007FE
+#define BIT_MASK_TXIQ_A_EXT_S0 BIT(0)
+#define BIT_MASK_TXIQ_B_S0 0x0007E000
+#define REG_TXIQ_CD_S0 0x0cd4
+#define BIT_MASK_TXIQ_C_S0 0x000007FE
+#define BIT_MASK_TXIQ_C_EXT_S0 BIT(0)
+#define BIT_MASK_TXIQ_D_S0 GENMASK(22, 13)
+#define BIT_MASK_TXIQ_D_EXT_S0 BIT(12)
+#define REG_RXIQ_AB_S0 0x0cd8
+#define BIT_MASK_RXIQ_X_S0 0x000003FF
+#define BIT_MASK_RXIQ_Y_S0 0x003FF000
+#define REG_OFDM_FA_TYPE1_11N 0x0cf0
+#define BIT_MASK_OFDM_FF_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_SF_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_RSTD_11N 0x0d00
+#define BIT_MASK_OFDM_FA_RST1 BIT(27)
+#define BIT_MASK_OFDM_FA_KEEP1 BIT(31)
+#define REG_CTX 0x0d03
+#define BIT_MASK_CTX_TYPE GENMASK(6, 4)
+#define REG_OFDM1_CFOTRK 0x0d2c
+#define BIT_EN_CFOTRK BIT(28)
+#define REG_OFDM1_CSI1 0x0d40
+#define REG_OFDM1_CSI2 0x0d44
+#define REG_OFDM1_CSI3 0x0d48
+#define REG_OFDM1_CSI4 0x0d4c
+#define REG_OFDM_FA_TYPE2_11N 0x0da0
+#define BIT_MASK_OFDM_CCA_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_PF_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_TYPE3_11N 0x0da4
+#define BIT_MASK_OFDM_RI_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_CRC_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_TYPE4_11N 0x0da8
+#define BIT_MASK_OFDM_MNS_CNT GENMASK(15, 0)
+#define REG_FPGA0_IQK_11N 0x0e28
+#define BIT_MASK_IQK_MOD 0xffffff00
+#define EN_IQK 0x808000
+#define RST_IQK 0x000000
+#define REG_TXIQK_TONE_A_11N 0x0e30
+#define REG_RXIQK_TONE_A_11N 0x0e34
+#define REG_TXIQK_PI_A_11N 0x0e38
+#define REG_RXIQK_PI_A_11N 0x0e3c
+#define REG_TXIQK_11N 0x0e40
+#define BIT_SET_TXIQK_11N(x, y) (0x80007C00 | ((x) << 16) | (y))
+#define REG_RXIQK_11N 0x0e44
+#define REG_IQK_AGC_PTS_11N 0x0e48
+#define REG_IQK_AGC_RSP_11N 0x0e4c
+#define REG_TX_IQK_TONE_B 0x0e50
+#define REG_RX_IQK_TONE_B 0x0e54
+#define REG_IQK_RES_TX 0x0e94
+#define BIT_MASK_RES_TX GENMASK(25, 16)
+#define REG_IQK_RES_TY 0x0e9c
+#define BIT_MASK_RES_TY GENMASK(25, 16)
+#define REG_IQK_RES_RX 0x0ea4
+#define BIT_MASK_RES_RX GENMASK(25, 16)
+#define REG_IQK_RES_RY 0x0eac
+#define BIT_IQK_TX_FAIL BIT(28)
+#define BIT_IQK_RX_FAIL BIT(27)
+#define BIT_IQK_DONE BIT(26)
+#define BIT_MASK_RES_RY GENMASK(25, 16)
+#define REG_PAGE_F_RST_11N 0x0f14
+#define BIT_MASK_F_RST_ALL BIT(16)
+#define REG_IGI_C_11N 0x0f84
+#define REG_IGI_D_11N 0x0f88
+#define REG_HT_CRC32_CNT_11N 0x0f90
+#define BIT_MASK_HT_CRC_OK GENMASK(15, 0)
+#define BIT_MASK_HT_CRC_ERR GENMASK(31, 16)
+#define REG_OFDM_CRC32_CNT_11N 0x0f94
+#define BIT_MASK_OFDM_LCRC_OK GENMASK(15, 0)
+#define BIT_MASK_OFDM_LCRC_ERR GENMASK(31, 16)
+#define REG_HT_CRC32_CNT_11N_AGG 0x0fb8
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d_table.c b/drivers/net/wireless/realtek/rtw88/rtw8723d_table.c
new file mode 100644
index 000000000000..27a22b392df0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d_table.c
@@ -0,0 +1,1196 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8723d_table.h"
+
+static const u32 rtw8723d_mac[] = {
+ 0x020, 0x00000013,
+ 0x02F, 0x00000010,
+ 0x077, 0x00000007,
+ 0x421, 0x0000000F,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000002,
+ 0x435, 0x00000003,
+ 0x436, 0x00000005,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43A, 0x00000000,
+ 0x43B, 0x00000001,
+ 0x43C, 0x00000002,
+ 0x43D, 0x00000003,
+ 0x43E, 0x00000005,
+ 0x43F, 0x00000007,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x456, 0x0000005E,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000028,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000028,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000C,
+ 0x63F, 0x0000000C,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66A, 0x000000B0,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x765, 0x00000018,
+ 0x76E, 0x00000004,
+ 0x7C0, 0x00000038,
+ 0x7C2, 0x0000000F,
+ 0x7C3, 0x000000C0,
+ 0x073, 0x00000004,
+ 0x7C4, 0x00000077,
+ 0x07C, 0x00000003,
+ 0x016, 0x000000B3,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8723d_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8723d_agc[] = {
+ 0xC78, 0xFE000101,
+ 0xC78, 0xFD010101,
+ 0xC78, 0xFC020101,
+ 0xC78, 0xFB030101,
+ 0xC78, 0xFA040101,
+ 0xC78, 0xF9050101,
+ 0xC78, 0xF8060101,
+ 0xC78, 0xF7070101,
+ 0xC78, 0xF6080101,
+ 0xC78, 0xF5090101,
+ 0xC78, 0xF40A0101,
+ 0xC78, 0xF30B0101,
+ 0xC78, 0xF20C0101,
+ 0xC78, 0xF10D0101,
+ 0xC78, 0xF00E0101,
+ 0xC78, 0xEF0F0101,
+ 0xC78, 0xEE100101,
+ 0xC78, 0xED110101,
+ 0xC78, 0xEC120101,
+ 0xC78, 0xEB130101,
+ 0xC78, 0xEA140101,
+ 0xC78, 0xE9150101,
+ 0xC78, 0xE8160101,
+ 0xC78, 0xE7170101,
+ 0xC78, 0xE6180101,
+ 0xC78, 0xE5190101,
+ 0xC78, 0xE41A0101,
+ 0xC78, 0xE31B0101,
+ 0xC78, 0xE21C0101,
+ 0xC78, 0xE11D0101,
+ 0xC78, 0xE01E0101,
+ 0xC78, 0x861F0101,
+ 0xC78, 0x85200101,
+ 0xC78, 0x84210101,
+ 0xC78, 0x83220101,
+ 0xC78, 0x82230101,
+ 0xC78, 0x81240101,
+ 0xC78, 0x80250101,
+ 0xC78, 0x44260101,
+ 0xC78, 0x43270101,
+ 0xC78, 0x42280101,
+ 0xC78, 0x41290101,
+ 0xC78, 0x402A0101,
+ 0xC78, 0x022B0101,
+ 0xC78, 0x012C0101,
+ 0xC78, 0x002D0101,
+ 0xC78, 0xC52E0001,
+ 0xC78, 0xC42F0001,
+ 0xC78, 0xC3300001,
+ 0xC78, 0xC2310001,
+ 0xC78, 0xC1320001,
+ 0xC78, 0xC0330001,
+ 0xC78, 0x04340001,
+ 0xC78, 0x03350001,
+ 0xC78, 0x02360001,
+ 0xC78, 0x01370001,
+ 0xC78, 0x00380001,
+ 0xC78, 0x00390001,
+ 0xC78, 0x003A0001,
+ 0xC78, 0x003B0001,
+ 0xC78, 0x003C0001,
+ 0xC78, 0x003D0001,
+ 0xC78, 0x003E0001,
+ 0xC78, 0x003F0001,
+ 0xC78, 0x6F002001,
+ 0xC78, 0x6F012001,
+ 0xC78, 0x6F022001,
+ 0xC78, 0x6F032001,
+ 0xC78, 0x6F042001,
+ 0xC78, 0x6F052001,
+ 0xC78, 0x6F062001,
+ 0xC78, 0x6F072001,
+ 0xC78, 0x6F082001,
+ 0xC78, 0x6F092001,
+ 0xC78, 0x6F0A2001,
+ 0xC78, 0x6F0B2001,
+ 0xC78, 0x6F0C2001,
+ 0xC78, 0x6F0D2001,
+ 0xC78, 0x6F0E2001,
+ 0xC78, 0x6F0F2001,
+ 0xC78, 0x6F102001,
+ 0xC78, 0x6F112001,
+ 0xC78, 0x6F122001,
+ 0xC78, 0x6F132001,
+ 0xC78, 0x6F142001,
+ 0xC78, 0x6F152001,
+ 0xC78, 0x6F162001,
+ 0xC78, 0x6F172001,
+ 0xC78, 0x6F182001,
+ 0xC78, 0x6F192001,
+ 0xC78, 0x6F1A2001,
+ 0xC78, 0x6F1B2001,
+ 0xC78, 0x6F1C2001,
+ 0xC78, 0x6F1D2001,
+ 0xC78, 0x6F1E2001,
+ 0xC78, 0x6F1F2001,
+ 0xC78, 0x6F202001,
+ 0xC78, 0x6F212001,
+ 0xC78, 0x6F222001,
+ 0xC78, 0x6F232001,
+ 0xC78, 0x6E242001,
+ 0xC78, 0x6D252001,
+ 0xC78, 0x6C262001,
+ 0xC78, 0x6B272001,
+ 0xC78, 0x6A282001,
+ 0xC78, 0x69292001,
+ 0xC78, 0x4B2A2001,
+ 0xC78, 0x4A2B2001,
+ 0xC78, 0x492C2001,
+ 0xC78, 0x482D2001,
+ 0xC78, 0x472E2001,
+ 0xC78, 0x462F2001,
+ 0xC78, 0x45302001,
+ 0xC78, 0x44312001,
+ 0xC78, 0x43322001,
+ 0xC78, 0x42332001,
+ 0xC78, 0x41342001,
+ 0xC78, 0x40352001,
+ 0xC78, 0x02362001,
+ 0xC78, 0x01372001,
+ 0xC78, 0x00382001,
+ 0xC78, 0x00392001,
+ 0xC78, 0x003A2001,
+ 0xC78, 0x003B2001,
+ 0xC78, 0x003C2001,
+ 0xC78, 0x003D2001,
+ 0xC78, 0x003E2001,
+ 0xC78, 0x003F2001,
+ 0xC78, 0x7F003101,
+ 0xC78, 0x7F013101,
+ 0xC78, 0x7F023101,
+ 0xC78, 0x7F033101,
+ 0xC78, 0x7F043101,
+ 0xC78, 0x7F053101,
+ 0xC78, 0x7F063101,
+ 0xC78, 0x7F073101,
+ 0xC78, 0x7E083101,
+ 0xC78, 0x7D093101,
+ 0xC78, 0x7C0A3101,
+ 0xC78, 0x7B0B3101,
+ 0xC78, 0x7A0C3101,
+ 0xC78, 0x790D3101,
+ 0xC78, 0x780E3101,
+ 0xC78, 0x770F3101,
+ 0xC78, 0x76103101,
+ 0xC78, 0x75113101,
+ 0xC78, 0x74123101,
+ 0xC78, 0x73133101,
+ 0xC78, 0x72143101,
+ 0xC78, 0x71153101,
+ 0xC78, 0x70163101,
+ 0xC78, 0x6F173101,
+ 0xC78, 0x6E183101,
+ 0xC78, 0x6D193101,
+ 0xC78, 0x6C1A3101,
+ 0xC78, 0x6B1B3101,
+ 0xC78, 0x6A1C3101,
+ 0xC78, 0x691D3101,
+ 0xC78, 0x681E3101,
+ 0xC78, 0x4B1F3101,
+ 0xC78, 0x4A203101,
+ 0xC78, 0x49213101,
+ 0xC78, 0x48223101,
+ 0xC78, 0x47233101,
+ 0xC78, 0x46243101,
+ 0xC78, 0x45253101,
+ 0xC78, 0x44263101,
+ 0xC78, 0x43273101,
+ 0xC78, 0x42283101,
+ 0xC78, 0x41293101,
+ 0xC78, 0x402A3101,
+ 0xC78, 0x022B3101,
+ 0xC78, 0x012C3101,
+ 0xC78, 0x002D3101,
+ 0xC78, 0x002E3101,
+ 0xC78, 0x002F3101,
+ 0xC78, 0x00303101,
+ 0xC78, 0x00313101,
+ 0xC78, 0x00323101,
+ 0xC78, 0x00333101,
+ 0xC78, 0x00343101,
+ 0xC78, 0x00353101,
+ 0xC78, 0x00363101,
+ 0xC78, 0x00373101,
+ 0xC78, 0x00383101,
+ 0xC78, 0x00393101,
+ 0xC78, 0x003A3101,
+ 0xC78, 0x003B3101,
+ 0xC78, 0x003C3101,
+ 0xC78, 0x003D3101,
+ 0xC78, 0x003E3101,
+ 0xC78, 0x003F3101,
+ 0xC78, 0xFE403101,
+ 0xC78, 0xFD413101,
+ 0xC78, 0xFC423101,
+ 0xC78, 0xFB433101,
+ 0xC78, 0xFA443101,
+ 0xC78, 0xF9453101,
+ 0xC78, 0xF8463101,
+ 0xC78, 0xF7473101,
+ 0xC78, 0xF6483101,
+ 0xC78, 0xF5493101,
+ 0xC78, 0xF44A3101,
+ 0xC78, 0xF34B3101,
+ 0xC78, 0xF24C3101,
+ 0xC78, 0xF14D3101,
+ 0xC78, 0xF04E3101,
+ 0xC78, 0xEF4F3101,
+ 0xC78, 0xEE503101,
+ 0xC78, 0xED513101,
+ 0xC78, 0xEC523101,
+ 0xC78, 0xEB533101,
+ 0xC78, 0xEA543101,
+ 0xC78, 0xE9553101,
+ 0xC78, 0xE8563101,
+ 0xC78, 0xE7573101,
+ 0xC78, 0xE6583101,
+ 0xC78, 0xE5593101,
+ 0xC78, 0xE45A3101,
+ 0xC78, 0xE35B3101,
+ 0xC78, 0xE25C3101,
+ 0xC78, 0xE15D3101,
+ 0xC78, 0xE05E3101,
+ 0xC78, 0x865F3101,
+ 0xC78, 0x85603101,
+ 0xC78, 0x84613101,
+ 0xC78, 0x83623101,
+ 0xC78, 0x82633101,
+ 0xC78, 0x81643101,
+ 0xC78, 0x80653101,
+ 0xC78, 0x80663101,
+ 0xC78, 0x80673101,
+ 0xC78, 0x80683101,
+ 0xC78, 0x80693101,
+ 0xC78, 0x806A3101,
+ 0xC78, 0x806B3101,
+ 0xC78, 0x806C3101,
+ 0xC78, 0x806D3101,
+ 0xC78, 0x806E3101,
+ 0xC78, 0x806F3101,
+ 0xC78, 0x80703101,
+ 0xC78, 0x80713101,
+ 0xC78, 0x80723101,
+ 0xC78, 0x80733101,
+ 0xC78, 0x80743101,
+ 0xC78, 0x80753101,
+ 0xC78, 0x80763101,
+ 0xC78, 0x80773101,
+ 0xC78, 0x80783101,
+ 0xC78, 0x80793101,
+ 0xC78, 0x807A3101,
+ 0xC78, 0x807B3101,
+ 0xC78, 0x807C3101,
+ 0xC78, 0x807D3101,
+ 0xC78, 0x807E3101,
+ 0xC78, 0x807F3101,
+ 0xC78, 0xEF402001,
+ 0xC78, 0xEF412001,
+ 0xC78, 0xEF422001,
+ 0xC78, 0xEF432001,
+ 0xC78, 0xEF442001,
+ 0xC78, 0xEF452001,
+ 0xC78, 0xEF462001,
+ 0xC78, 0xEF472001,
+ 0xC78, 0xEF482001,
+ 0xC78, 0xEF492001,
+ 0xC78, 0xEF4A2001,
+ 0xC78, 0xEF4B2001,
+ 0xC78, 0xEF4C2001,
+ 0xC78, 0xEF4D2001,
+ 0xC78, 0xEF4E2001,
+ 0xC78, 0xEF4F2001,
+ 0xC78, 0xEF502001,
+ 0xC78, 0xEF512001,
+ 0xC78, 0xEF522001,
+ 0xC78, 0xEF532001,
+ 0xC78, 0xEF542001,
+ 0xC78, 0xEF552001,
+ 0xC78, 0xEF562001,
+ 0xC78, 0xEF572001,
+ 0xC78, 0xEF582001,
+ 0xC78, 0xEF592001,
+ 0xC78, 0xEF5A2001,
+ 0xC78, 0xEF5B2001,
+ 0xC78, 0xEF5C2001,
+ 0xC78, 0xEF5D2001,
+ 0xC78, 0xEF5E2001,
+ 0xC78, 0xEF5F2001,
+ 0xC78, 0xEF602001,
+ 0xC78, 0xEE612001,
+ 0xC78, 0xED622001,
+ 0xC78, 0xEC632001,
+ 0xC78, 0xEB642001,
+ 0xC78, 0xEA652001,
+ 0xC78, 0xE9662001,
+ 0xC78, 0xE8672001,
+ 0xC78, 0xCB682001,
+ 0xC78, 0xCA692001,
+ 0xC78, 0xC96A2001,
+ 0xC78, 0xC86B2001,
+ 0xC78, 0xC76C2001,
+ 0xC78, 0xC66D2001,
+ 0xC78, 0xC56E2001,
+ 0xC78, 0xC46F2001,
+ 0xC78, 0xC3702001,
+ 0xC78, 0xC2712001,
+ 0xC78, 0xC1722001,
+ 0xC78, 0xC0732001,
+ 0xC78, 0x82742001,
+ 0xC78, 0x81752001,
+ 0xC78, 0x80762001,
+ 0xC78, 0x80772001,
+ 0xC78, 0x80782001,
+ 0xC78, 0x80792001,
+ 0xC78, 0x807A2001,
+ 0xC78, 0x807B2001,
+ 0xC78, 0x807C2001,
+ 0xC78, 0x807D2001,
+ 0xC78, 0x807E2001,
+ 0xC78, 0x807F2001,
+ 0xC78, 0xFA001101,
+ 0xC78, 0xF9011101,
+ 0xC78, 0xF8021101,
+ 0xC78, 0xF7031101,
+ 0xC78, 0xF6041101,
+ 0xC78, 0xF5051101,
+ 0xC78, 0xF4061101,
+ 0xC78, 0xD7071101,
+ 0xC78, 0xD6081101,
+ 0xC78, 0xD5091101,
+ 0xC78, 0xD40A1101,
+ 0xC78, 0x970B1101,
+ 0xC78, 0x960C1101,
+ 0xC78, 0x950D1101,
+ 0xC78, 0x940E1101,
+ 0xC78, 0x930F1101,
+ 0xC78, 0x92101101,
+ 0xC78, 0x91111101,
+ 0xC78, 0x90121101,
+ 0xC78, 0x8F131101,
+ 0xC78, 0x8E141101,
+ 0xC78, 0x8D151101,
+ 0xC78, 0x8C161101,
+ 0xC78, 0x8B171101,
+ 0xC78, 0x8A181101,
+ 0xC78, 0x89191101,
+ 0xC78, 0x881A1101,
+ 0xC78, 0x871B1101,
+ 0xC78, 0x861C1101,
+ 0xC78, 0x851D1101,
+ 0xC78, 0x841E1101,
+ 0xC78, 0x831F1101,
+ 0xC78, 0x82201101,
+ 0xC78, 0x81211101,
+ 0xC78, 0x80221101,
+ 0xC78, 0x43231101,
+ 0xC78, 0x42241101,
+ 0xC78, 0x41251101,
+ 0xC78, 0x04261101,
+ 0xC78, 0x03271101,
+ 0xC78, 0x02281101,
+ 0xC78, 0x01291101,
+ 0xC78, 0x002A1101,
+ 0xC78, 0xC42B1001,
+ 0xC78, 0xC32C1001,
+ 0xC78, 0xC22D1001,
+ 0xC78, 0xC12E1001,
+ 0xC78, 0xC02F1001,
+ 0xC78, 0x85301001,
+ 0xC78, 0x84311001,
+ 0xC78, 0x83321001,
+ 0xC78, 0x82331001,
+ 0xC78, 0x81341001,
+ 0xC78, 0x80351001,
+ 0xC78, 0x05361001,
+ 0xC78, 0x04371001,
+ 0xC78, 0x03381001,
+ 0xC78, 0x02391001,
+ 0xC78, 0x013A1001,
+ 0xC78, 0x003B1001,
+ 0xC78, 0x003C1001,
+ 0xC78, 0x003D1001,
+ 0xC78, 0x003E1001,
+ 0xC78, 0x003F1001,
+ 0xC50, 0x69553422,
+ 0xC50, 0x69553420,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8723d_agc, rtw_phy_cfg_agc);
+
+static const u32 rtw8723d_bb[] = {
+ 0x800, 0x80046C00,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x00200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A11A9,
+ 0x85C, 0x01000014,
+ 0x860, 0x66F60110,
+ 0x864, 0x461F0641,
+ 0x868, 0x00000000,
+ 0x86C, 0x27272700,
+ 0x870, 0x07000460,
+ 0x874, 0x25004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x004F0201,
+ 0x880, 0xB2002E12,
+ 0x884, 0x00000007,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0x910, 0x00000402,
+ 0x914, 0x00000300,
+ 0x920, 0x18C6318C,
+ 0x924, 0x0000018C,
+ 0x948, 0x99000000,
+ 0x94C, 0x00000010,
+ 0x950, 0x00003800,
+ 0x954, 0x5A380000,
+ 0x958, 0x4BC6D87A,
+ 0x95C, 0x04EB9B79,
+ 0x96C, 0x00000003,
+ 0x970, 0x00000000,
+ 0x974, 0x00000000,
+ 0x978, 0x00000000,
+ 0x97C, 0x13000000,
+ 0x980, 0x00000000,
+ 0xA00, 0x00D046C8,
+ 0xA04, 0x80FF800C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E20100F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0xE82C0001,
+ 0xA24, 0x64B80C1C,
+ 0xA28, 0x00008810,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00008900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x2180FA74,
+ 0xA84, 0x00200000,
+ 0xA88, 0x040C0000,
+ 0xA8C, 0x12345678,
+ 0xA90, 0xABCDEF00,
+ 0xA94, 0x001B1B89,
+ 0xA98, 0x00000000,
+ 0xA9C, 0x00020000,
+ 0xAA0, 0x00000000,
+ 0xAA4, 0x0000000C,
+ 0xAA8, 0xCA100008,
+ 0xAAC, 0x01235667,
+ 0xAB0, 0x00000000,
+ 0xAB4, 0x20201402,
+ 0xB2C, 0x00000000,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x28800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC48,
+ 0xC34, 0x31000040,
+ 0xC38, 0x21688080,
+ 0xC3C, 0x000016D4,
+ 0xC40, 0x1F78403F,
+ 0xC44, 0x00010036,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69553420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x00015969,
+ 0xC5C, 0x00310492,
+ 0xC60, 0x00280A00,
+ 0xC64, 0x7112848B,
+ 0xC68, 0x47C074FF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x020600DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x390000E4,
+ 0xC84, 0x21F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00091521,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00121820,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00012000,
+ 0xCA4, 0x800000A0,
+ 0xCA8, 0x84E6C606,
+ 0xCAC, 0x00000060,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x0010A3D0,
+ 0xCC4, 0x00000F7D,
+ 0xCC8, 0x000442D6,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x000001C8,
+ 0xCD4, 0x001C8000,
+ 0xCD8, 0x00000100,
+ 0xCDC, 0x40100000,
+ 0xCE0, 0x00222220,
+ 0xCE4, 0x20000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00030740,
+ 0xD04, 0x40020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC53,
+ 0xD18, 0x7A8F5B6F,
+ 0xD2C, 0xCC979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x40608000,
+ 0xD38, 0x88000000,
+ 0xD3C, 0xC0127343,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x00000038,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000282,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xE00, 0x2D2D2D2D,
+ 0xE04, 0x2D2D2D2D,
+ 0xE08, 0x0390272D,
+ 0xE10, 0x2D2D2D2D,
+ 0xE14, 0x2D2D2D2D,
+ 0xE18, 0x2D2D2D2D,
+ 0xE1C, 0x2D2D2D2D,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000008,
+ 0xE68, 0x001B25A4,
+ 0xE6C, 0x01C00014,
+ 0xE70, 0x01C00016,
+ 0xE74, 0x02000014,
+ 0xE78, 0x02000014,
+ 0xE7C, 0x02000014,
+ 0xE80, 0x02000014,
+ 0xE84, 0x01C00014,
+ 0xE88, 0x02000014,
+ 0xE8C, 0x01C00014,
+ 0xED0, 0x01C00014,
+ 0xED4, 0x01C00014,
+ 0xED8, 0x01C00014,
+ 0xEDC, 0x00000014,
+ 0xEE0, 0x00000014,
+ 0xEE8, 0x21555448,
+ 0xEEC, 0x03C00014,
+ 0xF14, 0x00000003,
+ 0xF00, 0x00100300,
+ 0xF08, 0x0000800B,
+ 0xF0C, 0x0000F007,
+ 0xF10, 0x0000A487,
+ 0xF1C, 0x80000064,
+ 0xF38, 0x00030155,
+ 0xF3C, 0x0000003A,
+ 0xF4C, 0x13000000,
+ 0xF50, 0x00000000,
+ 0xF18, 0x00000000,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8723d_bb, rtw_phy_cfg_bb);
+
+static const struct rtw_phy_pg_cfg_pair rtw8723d_bb_pg[] = {
+ { 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003200, },
+ { 0, 0, 0, 0x0000086c, 0xffffff00, 0x32323200, },
+ { 0, 0, 0, 0x00000e00, 0xffffffff, 0x32343434, },
+ { 0, 0, 0, 0x00000e04, 0xffffffff, 0x28303032, },
+ { 0, 0, 0, 0x00000e10, 0xffffffff, 0x30323234, },
+ { 0, 0, 0, 0x00000e14, 0xffffffff, 0x26282830, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8723d_bb_pg);
+
+static const u32 rtw8723d_rf_a[] = {
+ 0x050, 0x0001C000,
+ 0x049, 0x0004AA00,
+ 0x000, 0x00010000,
+ 0x0B1, 0x00054573,
+ 0x0B4, 0x000508AB,
+ 0x0B7, 0x00014787,
+ 0x0B8, 0x000064CB,
+ 0x01B, 0x00073A40,
+ 0x051, 0x00038CAF,
+ 0x052, 0x000FCCA3,
+ 0x053, 0x00090F38,
+ 0x054, 0x00011083,
+ 0x057, 0x000D0000,
+ 0x08D, 0x00000A1A,
+ 0x082, 0x00082AAC,
+ 0x08E, 0x00076940,
+ 0x08F, 0x00088400,
+ 0x061, 0x00038CAF,
+ 0x062, 0x000FCCA3,
+ 0x063, 0x00090F38,
+ 0x064, 0x00011083,
+ 0x067, 0x000D0000,
+ 0x092, 0x00082AAC,
+ 0x0EF, 0x00000400,
+ 0x030, 0x000008CA,
+ 0x030, 0x000018CA,
+ 0x030, 0x000028CA,
+ 0x030, 0x000038CA,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00000400,
+ 0x030, 0x000008CA,
+ 0x030, 0x000018CA,
+ 0x030, 0x000028CA,
+ 0x030, 0x000038CA,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000007,
+ 0x03F, 0x0000CCA3,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00000100,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000CCA3,
+ 0x033, 0x00000007,
+ 0x03F, 0x0000CCA3,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x030, 0x0000002D,
+ 0x030, 0x0000122C,
+ 0x030, 0x0000222F,
+ 0x030, 0x0000326C,
+ 0x030, 0x0000466B,
+ 0x030, 0x0000566E,
+ 0x030, 0x000066EB,
+ 0x030, 0x000077EC,
+ 0x030, 0x000087EF,
+ 0x030, 0x000097F2,
+ 0x030, 0x0000A7F5,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00000800,
+ 0x030, 0x00000001,
+ 0x030, 0x00001011,
+ 0x030, 0x00002011,
+ 0x030, 0x00003013,
+ 0x030, 0x00004033,
+ 0x030, 0x00005033,
+ 0x030, 0x00006037,
+ 0x030, 0x0000703F,
+ 0x030, 0x0000803F,
+ 0x030, 0x0000903F,
+ 0x030, 0x0000A03F,
+ 0x0EE, 0x00000000,
+ 0x082, 0x00083B8C,
+ 0x0ED, 0x00000008,
+ 0x030, 0x000030F6,
+ 0x030, 0x00002004,
+ 0x030, 0x000010F6,
+ 0x030, 0x000000F6,
+ 0x0ED, 0x00000000,
+ 0x092, 0x00083B8C,
+ 0x0EC, 0x00000008,
+ 0x030, 0x000030F6,
+ 0x030, 0x00002004,
+ 0x030, 0x000010F6,
+ 0x030, 0x000000F6,
+ 0x0EC, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x030, 0x0001C11C,
+ 0x030, 0x000181F4,
+ 0x030, 0x00014108,
+ 0x030, 0x000101E4,
+ 0x030, 0x0000C11C,
+ 0x030, 0x000081F4,
+ 0x030, 0x00004108,
+ 0x030, 0x000001E4,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x030, 0x0001C11C,
+ 0x030, 0x000181F4,
+ 0x030, 0x00014108,
+ 0x030, 0x000101E4,
+ 0x030, 0x0000C11C,
+ 0x030, 0x000081F4,
+ 0x030, 0x00004108,
+ 0x030, 0x000001E4,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000007,
+ 0x03E, 0x0000005F,
+ 0x03F, 0x000B3FDB,
+ 0x033, 0x00000004,
+ 0x03E, 0x0000005D,
+ 0x03F, 0x000BFFE0,
+ 0x033, 0x00000005,
+ 0x03E, 0x0000005D,
+ 0x03F, 0x000FBFCE,
+ 0x033, 0x00000006,
+ 0x03E, 0x0000005F,
+ 0x03F, 0x000A7FFB,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00000002,
+ 0x030, 0x00000001,
+ 0x030, 0x00002001,
+ 0x030, 0x00004001,
+ 0x030, 0x00007001,
+ 0x030, 0x00006001,
+ 0x030, 0x00020001,
+ 0x030, 0x00022001,
+ 0x030, 0x00024001,
+ 0x030, 0x00027001,
+ 0x030, 0x00026001,
+ 0x030, 0x00034001,
+ 0x030, 0x00037001,
+ 0x030, 0x00036001,
+ 0x030, 0x00008000,
+ 0x030, 0x0000A000,
+ 0x030, 0x0000C000,
+ 0x83000100, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x0000E024,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x0000E000,
+ 0xB0000000, 0x00000000,
+ 0x030, 0x0001C000,
+ 0x030, 0x0001E000,
+ 0x0EE, 0x00000000,
+ 0x0EE, 0x00020000,
+ 0x0EF, 0x00020000,
+ 0x030, 0x00000F75,
+ 0x030, 0x00002F55,
+ 0x030, 0x00003F75,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00008401,
+ 0xFFE, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8723d_rf_a, A);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8723d_txpwr_lmt[] = {
+ {0, 0, 0, 0, 1, 30, },
+ {2, 0, 0, 0, 1, 30, },
+ {1, 0, 0, 0, 1, 30, },
+ {0, 0, 0, 0, 2, 30, },
+ {2, 0, 0, 0, 2, 30, },
+ {1, 0, 0, 0, 2, 30, },
+ {0, 0, 0, 0, 3, 30, },
+ {2, 0, 0, 0, 3, 30, },
+ {1, 0, 0, 0, 3, 30, },
+ {0, 0, 0, 0, 4, 30, },
+ {2, 0, 0, 0, 4, 30, },
+ {1, 0, 0, 0, 4, 30, },
+ {0, 0, 0, 0, 5, 30, },
+ {2, 0, 0, 0, 5, 30, },
+ {1, 0, 0, 0, 5, 30, },
+ {0, 0, 0, 0, 6, 30, },
+ {2, 0, 0, 0, 6, 30, },
+ {1, 0, 0, 0, 6, 30, },
+ {0, 0, 0, 0, 7, 30, },
+ {2, 0, 0, 0, 7, 30, },
+ {1, 0, 0, 0, 7, 30, },
+ {0, 0, 0, 0, 8, 30, },
+ {2, 0, 0, 0, 8, 30, },
+ {1, 0, 0, 0, 8, 30, },
+ {0, 0, 0, 0, 9, 30, },
+ {2, 0, 0, 0, 9, 30, },
+ {1, 0, 0, 0, 9, 30, },
+ {0, 0, 0, 0, 10, 30, },
+ {2, 0, 0, 0, 10, 30, },
+ {1, 0, 0, 0, 10, 30, },
+ {0, 0, 0, 0, 11, 30, },
+ {2, 0, 0, 0, 11, 30, },
+ {1, 0, 0, 0, 11, 30, },
+ {0, 0, 0, 0, 12, 30, },
+ {2, 0, 0, 0, 12, 30, },
+ {1, 0, 0, 0, 12, 30, },
+ {0, 0, 0, 0, 13, 17, },
+ {2, 0, 0, 0, 13, 30, },
+ {1, 0, 0, 0, 13, 30, },
+ {0, 0, 0, 0, 14, 63, },
+ {2, 0, 0, 0, 14, 63, },
+ {1, 0, 0, 0, 14, 30, },
+ {0, 0, 0, 1, 1, 26, },
+ {2, 0, 0, 1, 1, 31, },
+ {1, 0, 0, 1, 1, 31, },
+ {0, 0, 0, 1, 2, 28, },
+ {2, 0, 0, 1, 2, 31, },
+ {1, 0, 0, 1, 2, 31, },
+ {0, 0, 0, 1, 3, 30, },
+ {2, 0, 0, 1, 3, 31, },
+ {1, 0, 0, 1, 3, 31, },
+ {0, 0, 0, 1, 4, 30, },
+ {2, 0, 0, 1, 4, 31, },
+ {1, 0, 0, 1, 4, 31, },
+ {0, 0, 0, 1, 5, 30, },
+ {2, 0, 0, 1, 5, 31, },
+ {1, 0, 0, 1, 5, 31, },
+ {0, 0, 0, 1, 6, 30, },
+ {2, 0, 0, 1, 6, 31, },
+ {1, 0, 0, 1, 6, 31, },
+ {0, 0, 0, 1, 7, 30, },
+ {2, 0, 0, 1, 7, 31, },
+ {1, 0, 0, 1, 7, 31, },
+ {0, 0, 0, 1, 8, 30, },
+ {2, 0, 0, 1, 8, 31, },
+ {1, 0, 0, 1, 8, 31, },
+ {0, 0, 0, 1, 9, 30, },
+ {2, 0, 0, 1, 9, 31, },
+ {1, 0, 0, 1, 9, 31, },
+ {0, 0, 0, 1, 10, 28, },
+ {2, 0, 0, 1, 10, 31, },
+ {1, 0, 0, 1, 10, 31, },
+ {0, 0, 0, 1, 11, 26, },
+ {2, 0, 0, 1, 11, 31, },
+ {1, 0, 0, 1, 11, 31, },
+ {0, 0, 0, 1, 12, 24, },
+ {2, 0, 0, 1, 12, 31, },
+ {1, 0, 0, 1, 12, 31, },
+ {0, 0, 0, 1, 13, 14, },
+ {2, 0, 0, 1, 13, 31, },
+ {1, 0, 0, 1, 13, 31, },
+ {0, 0, 0, 1, 14, 63, },
+ {2, 0, 0, 1, 14, 63, },
+ {1, 0, 0, 1, 14, 63, },
+ {0, 0, 0, 2, 1, 24, },
+ {2, 0, 0, 2, 1, 31, },
+ {1, 0, 0, 2, 1, 31, },
+ {0, 0, 0, 2, 2, 26, },
+ {2, 0, 0, 2, 2, 31, },
+ {1, 0, 0, 2, 2, 31, },
+ {0, 0, 0, 2, 3, 30, },
+ {2, 0, 0, 2, 3, 31, },
+ {1, 0, 0, 2, 3, 31, },
+ {0, 0, 0, 2, 4, 30, },
+ {2, 0, 0, 2, 4, 31, },
+ {1, 0, 0, 2, 4, 31, },
+ {0, 0, 0, 2, 5, 30, },
+ {2, 0, 0, 2, 5, 31, },
+ {1, 0, 0, 2, 5, 31, },
+ {0, 0, 0, 2, 6, 30, },
+ {2, 0, 0, 2, 6, 31, },
+ {1, 0, 0, 2, 6, 31, },
+ {0, 0, 0, 2, 7, 30, },
+ {2, 0, 0, 2, 7, 31, },
+ {1, 0, 0, 2, 7, 31, },
+ {0, 0, 0, 2, 8, 30, },
+ {2, 0, 0, 2, 8, 31, },
+ {1, 0, 0, 2, 8, 31, },
+ {0, 0, 0, 2, 9, 30, },
+ {2, 0, 0, 2, 9, 31, },
+ {1, 0, 0, 2, 9, 31, },
+ {0, 0, 0, 2, 10, 26, },
+ {2, 0, 0, 2, 10, 31, },
+ {1, 0, 0, 2, 10, 31, },
+ {0, 0, 0, 2, 11, 24, },
+ {2, 0, 0, 2, 11, 31, },
+ {1, 0, 0, 2, 11, 31, },
+ {0, 0, 0, 2, 12, 23, },
+ {2, 0, 0, 2, 12, 31, },
+ {1, 0, 0, 2, 12, 31, },
+ {0, 0, 0, 2, 13, 13, },
+ {2, 0, 0, 2, 13, 31, },
+ {1, 0, 0, 2, 13, 31, },
+ {0, 0, 0, 2, 14, 63, },
+ {2, 0, 0, 2, 14, 63, },
+ {1, 0, 0, 2, 14, 63, },
+ {0, 0, 0, 3, 1, 28, },
+ {2, 0, 0, 3, 1, 30, },
+ {1, 0, 0, 3, 1, 30, },
+ {0, 0, 0, 3, 2, 28, },
+ {2, 0, 0, 3, 2, 30, },
+ {1, 0, 0, 3, 2, 30, },
+ {0, 0, 0, 3, 3, 30, },
+ {2, 0, 0, 3, 3, 30, },
+ {1, 0, 0, 3, 3, 30, },
+ {0, 0, 0, 3, 4, 30, },
+ {2, 0, 0, 3, 4, 30, },
+ {1, 0, 0, 3, 4, 30, },
+ {0, 0, 0, 3, 5, 30, },
+ {2, 0, 0, 3, 5, 30, },
+ {1, 0, 0, 3, 5, 30, },
+ {0, 0, 0, 3, 6, 30, },
+ {2, 0, 0, 3, 6, 30, },
+ {1, 0, 0, 3, 6, 30, },
+ {0, 0, 0, 3, 7, 30, },
+ {2, 0, 0, 3, 7, 30, },
+ {1, 0, 0, 3, 7, 30, },
+ {0, 0, 0, 3, 8, 30, },
+ {2, 0, 0, 3, 8, 30, },
+ {1, 0, 0, 3, 8, 30, },
+ {0, 0, 0, 3, 9, 28, },
+ {2, 0, 0, 3, 9, 30, },
+ {1, 0, 0, 3, 9, 30, },
+ {0, 0, 0, 3, 10, 28, },
+ {2, 0, 0, 3, 10, 30, },
+ {1, 0, 0, 3, 10, 30, },
+ {0, 0, 0, 3, 11, 28, },
+ {2, 0, 0, 3, 11, 30, },
+ {1, 0, 0, 3, 11, 30, },
+ {0, 0, 0, 3, 12, 63, },
+ {2, 0, 0, 3, 12, 30, },
+ {1, 0, 0, 3, 12, 30, },
+ {0, 0, 0, 3, 13, 63, },
+ {2, 0, 0, 3, 13, 30, },
+ {1, 0, 0, 3, 13, 30, },
+ {0, 0, 0, 3, 14, 63, },
+ {2, 0, 0, 3, 14, 63, },
+ {1, 0, 0, 3, 14, 63, },
+ {0, 0, 1, 2, 1, 63, },
+ {2, 0, 1, 2, 1, 63, },
+ {1, 0, 1, 2, 1, 63, },
+ {0, 0, 1, 2, 2, 63, },
+ {2, 0, 1, 2, 2, 63, },
+ {1, 0, 1, 2, 2, 63, },
+ {0, 0, 1, 2, 3, 24, },
+ {2, 0, 1, 2, 3, 30, },
+ {1, 0, 1, 2, 3, 30, },
+ {0, 0, 1, 2, 4, 24, },
+ {2, 0, 1, 2, 4, 30, },
+ {1, 0, 1, 2, 4, 30, },
+ {0, 0, 1, 2, 5, 24, },
+ {2, 0, 1, 2, 5, 30, },
+ {1, 0, 1, 2, 5, 30, },
+ {0, 0, 1, 2, 6, 24, },
+ {2, 0, 1, 2, 6, 30, },
+ {1, 0, 1, 2, 6, 30, },
+ {0, 0, 1, 2, 7, 24, },
+ {2, 0, 1, 2, 7, 30, },
+ {1, 0, 1, 2, 7, 30, },
+ {0, 0, 1, 2, 8, 24, },
+ {2, 0, 1, 2, 8, 30, },
+ {1, 0, 1, 2, 8, 30, },
+ {0, 0, 1, 2, 9, 24, },
+ {2, 0, 1, 2, 9, 30, },
+ {1, 0, 1, 2, 9, 30, },
+ {0, 0, 1, 2, 10, 22, },
+ {2, 0, 1, 2, 10, 30, },
+ {1, 0, 1, 2, 10, 30, },
+ {0, 0, 1, 2, 11, 20, },
+ {2, 0, 1, 2, 11, 30, },
+ {1, 0, 1, 2, 11, 30, },
+ {0, 0, 1, 2, 12, 63, },
+ {2, 0, 1, 2, 12, 30, },
+ {1, 0, 1, 2, 12, 30, },
+ {0, 0, 1, 2, 13, 63, },
+ {2, 0, 1, 2, 13, 30, },
+ {1, 0, 1, 2, 13, 30, },
+ {0, 0, 1, 2, 14, 63, },
+ {2, 0, 1, 2, 14, 63, },
+ {1, 0, 1, 2, 14, 63, },
+ {0, 0, 1, 3, 1, 63, },
+ {2, 0, 1, 3, 1, 63, },
+ {1, 0, 1, 3, 1, 63, },
+ {0, 0, 1, 3, 2, 63, },
+ {2, 0, 1, 3, 2, 63, },
+ {1, 0, 1, 3, 2, 63, },
+ {0, 0, 1, 3, 3, 26, },
+ {2, 0, 1, 3, 3, 26, },
+ {1, 0, 1, 3, 3, 26, },
+ {0, 0, 1, 3, 4, 26, },
+ {2, 0, 1, 3, 4, 26, },
+ {1, 0, 1, 3, 4, 26, },
+ {0, 0, 1, 3, 5, 26, },
+ {2, 0, 1, 3, 5, 26, },
+ {1, 0, 1, 3, 5, 26, },
+ {0, 0, 1, 3, 6, 26, },
+ {2, 0, 1, 3, 6, 26, },
+ {1, 0, 1, 3, 6, 26, },
+ {0, 0, 1, 3, 7, 26, },
+ {2, 0, 1, 3, 7, 26, },
+ {1, 0, 1, 3, 7, 26, },
+ {0, 0, 1, 3, 8, 26, },
+ {2, 0, 1, 3, 8, 26, },
+ {1, 0, 1, 3, 8, 26, },
+ {0, 0, 1, 3, 9, 26, },
+ {2, 0, 1, 3, 9, 26, },
+ {1, 0, 1, 3, 9, 26, },
+ {0, 0, 1, 3, 10, 26, },
+ {2, 0, 1, 3, 10, 26, },
+ {1, 0, 1, 3, 10, 26, },
+ {0, 0, 1, 3, 11, 26, },
+ {2, 0, 1, 3, 11, 26, },
+ {1, 0, 1, 3, 11, 26, },
+ {0, 0, 1, 3, 12, 63, },
+ {2, 0, 1, 3, 12, 26, },
+ {1, 0, 1, 3, 12, 26, },
+ {0, 0, 1, 3, 13, 63, },
+ {2, 0, 1, 3, 13, 26, },
+ {1, 0, 1, 3, 13, 26, },
+ {0, 0, 1, 3, 14, 63, },
+ {2, 0, 1, 3, 14, 63, },
+ {1, 0, 1, 3, 14, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8723d_txpwr_lmt);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d_table.h b/drivers/net/wireless/realtek/rtw88/rtw8723d_table.h
new file mode 100644
index 000000000000..4db996a1d982
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d_table.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8723D_TABLE_H__
+#define __RTW8723D_TABLE_H__
+
+extern const struct rtw_table rtw8723d_mac_tbl;
+extern const struct rtw_table rtw8723d_agc_tbl;
+extern const struct rtw_table rtw8723d_bb_tbl;
+extern const struct rtw_table rtw8723d_bb_pg_tbl;
+extern const struct rtw_table rtw8723d_rf_a_tbl;
+extern const struct rtw_table rtw8723d_txpwr_lmt_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.c b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
new file mode 100644
index 000000000000..c81eb4c33642
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "rtw8723de.h"
+
+static const struct pci_device_id rtw_8723de_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xD723),
+ .driver_data = (kernel_ulong_t)&rtw8723d_hw_spec
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, rtw_8723de_id_table);
+
+static struct pci_driver rtw_8723de_driver = {
+ .name = "rtw_8723de",
+ .id_table = rtw_8723de_id_table,
+ .probe = rtw_pci_probe,
+ .remove = rtw_pci_remove,
+ .driver.pm = &rtw_pm_ops,
+ .shutdown = rtw_pci_shutdown,
+};
+module_pci_driver(rtw_8723de_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11n wireless 8723de driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.h b/drivers/net/wireless/realtek/rtw88/rtw8723de.h
new file mode 100644
index 000000000000..ba3842360c20
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_8723DE_H_
+#define __RTW_8723DE_H_
+
+extern const struct dev_pm_ops rtw_pm_ops;
+extern struct rtw_chip_info rtw8723d_hw_spec;
+int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+void rtw_pci_remove(struct pci_dev *pdev);
+void rtw_pci_shutdown(struct pci_dev *pdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 4dd7d4143b04..e49bdd76ab9a 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2018-2019 Realtek Corporation
*/
+#include <linux/module.h>
#include "main.h"
#include "coex.h"
#include "fw.h"
@@ -998,8 +999,9 @@ static bool rtw8822b_check_rf_path(u8 antenna)
}
}
-static void rtw8822b_set_antenna(struct rtw_dev *rtwdev, u8 antenna_tx,
- u8 antenna_rx)
+static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
+ u32 antenna_tx,
+ u32 antenna_rx)
{
struct rtw_hal *hal = &rtwdev->hal;
@@ -1007,16 +1009,21 @@ static void rtw8822b_set_antenna(struct rtw_dev *rtwdev, u8 antenna_tx,
antenna_tx, antenna_rx);
if (!rtw8822b_check_rf_path(antenna_tx)) {
- rtw_info(rtwdev, "unsupport tx path, set to default path ab\n");
- antenna_tx = BB_PATH_AB;
+ rtw_info(rtwdev, "unsupport tx path 0x%x\n", antenna_tx);
+ return -EINVAL;
}
+
if (!rtw8822b_check_rf_path(antenna_rx)) {
- rtw_info(rtwdev, "unsupport rx path, set to default path ab\n");
- antenna_rx = BB_PATH_AB;
+ rtw_info(rtwdev, "unsupport rx path 0x%x\n", antenna_rx);
+ return -EINVAL;
}
+
hal->antenna_tx = antenna_tx;
hal->antenna_rx = antenna_rx;
+
rtw8822b_config_trx_mode(rtwdev, antenna_tx, antenna_rx, false);
+
+ return 0;
}
static void rtw8822b_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
@@ -1024,7 +1031,7 @@ static void rtw8822b_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
u8 ldo_pwr;
ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
- ldo_pwr = enable ? ldo_pwr | BIT(7) : ldo_pwr & ~BIT(7);
+ ldo_pwr = enable ? ldo_pwr | BIT_LDO25_EN : ldo_pwr & ~BIT_LDO25_EN;
rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
}
@@ -2051,6 +2058,12 @@ static const struct rtw_hw_reg rtw8822b_dig[] = {
[1] = { .addr = 0xe50, .mask = 0x7f },
};
+static const struct rtw_ltecoex_addr rtw8822b_ltecoex_addr = {
+ .ctrl = LTECOEX_ACCESS_CTRL,
+ .wdata = LTECOEX_WRITE_DATA,
+ .rdata = LTECOEX_READ_DATA,
+};
+
static const struct rtw_page_table page_table_8822b[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
@@ -2077,6 +2090,22 @@ static const struct rtw_rqpn rqpn_table_8822b[] = {
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
};
+static struct rtw_prioq_addrs prioq_addrs_8822b = {
+ .prio[RTW_DMA_MAPPING_EXTRA] = {
+ .rsvd = REG_FIFOPAGE_INFO_4, .avail = REG_FIFOPAGE_INFO_4 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_LOW] = {
+ .rsvd = REG_FIFOPAGE_INFO_2, .avail = REG_FIFOPAGE_INFO_2 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_NORMAL] = {
+ .rsvd = REG_FIFOPAGE_INFO_3, .avail = REG_FIFOPAGE_INFO_3 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_HIGH] = {
+ .rsvd = REG_FIFOPAGE_INFO_1, .avail = REG_FIFOPAGE_INFO_1 + 2,
+ },
+ .wsize = true,
+};
+
static struct rtw_chip_ops rtw8822b_ops = {
.phy_set_param = rtw8822b_phy_set_param,
.read_efuse = rtw8822b_read_efuse,
@@ -2402,6 +2431,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
.fw_name = "rtw88/rtw8822b_fw.bin",
+ .wlan_cpu = RTW_WCPU_11AC,
.tx_pkt_desc_sz = 48,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
@@ -2426,10 +2456,13 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.pwr_off_seq = card_disable_flow_8822b,
.page_table = page_table_8822b,
.rqpn_table = rqpn_table_8822b,
+ .prioq_addrs = &prioq_addrs_8822b,
.intf_table = &phy_para_table_8822b,
.dig = rtw8822b_dig,
+ .dig_cck = NULL,
.rf_base_addr = {0x2800, 0x2c00},
.rf_sipi_addr = {0xc90, 0xe90},
+ .ltecoex_addr = &rtw8822b_ltecoex_addr,
.mac_tbl = &rtw8822b_mac_tbl,
.agc_tbl = &rtw8822b_agc_tbl,
.bb_tbl = &rtw8822b_bb_tbl,
@@ -2440,6 +2473,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.iqk_threshold = 8,
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
+ .rx_ldpc = true,
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
@@ -2473,3 +2507,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
EXPORT_SYMBOL(rtw8822b_hw_spec);
MODULE_FIRMWARE("rtw88/rtw8822b_fw.bin");
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822b driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.c b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
new file mode 100644
index 000000000000..921916ae15ca
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "rtw8822be.h"
+
+static const struct pci_device_id rtw_8822be_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822),
+ .driver_data = (kernel_ulong_t)&rtw8822b_hw_spec
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, rtw_8822be_id_table);
+
+static struct pci_driver rtw_8822be_driver = {
+ .name = "rtw_8822be",
+ .id_table = rtw_8822be_id_table,
+ .probe = rtw_pci_probe,
+ .remove = rtw_pci_remove,
+ .driver.pm = &rtw_pm_ops,
+ .shutdown = rtw_pci_shutdown,
+};
+module_pci_driver(rtw_8822be_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822be driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.h b/drivers/net/wireless/realtek/rtw88/rtw8822be.h
new file mode 100644
index 000000000000..d823ca059f5c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_8822BE_H_
+#define __RTW_8822BE_H_
+
+extern const struct dev_pm_ops rtw_pm_ops;
+extern struct rtw_chip_info rtw8822b_hw_spec;
+int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+void rtw_pci_remove(struct pci_dev *pdev);
+void rtw_pci_shutdown(struct pci_dev *pdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index dc07e6be38e8..c3d72ef611c6 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2018-2019 Realtek Corporation
*/
+#include <linux/module.h>
#include "main.h"
#include "coex.h"
#include "fw.h"
@@ -15,6 +16,7 @@
#include "debug.h"
#include "util.h"
#include "bf.h"
+#include "efuse.h"
static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -1000,10 +1002,122 @@ static void rtw8822c_rf_x2_check(struct rtw_dev *rtwdev)
}
}
+static void rtw8822c_set_power_trim(struct rtw_dev *rtwdev, s8 bb_gain[2][8])
+{
+#define RF_SET_POWER_TRIM(_path, _seq, _idx) \
+ do { \
+ rtw_write_rf(rtwdev, _path, 0x33, RFREG_MASK, _seq); \
+ rtw_write_rf(rtwdev, _path, 0x3f, RFREG_MASK, \
+ bb_gain[_path][_idx]); \
+ } while (0)
+ u8 path;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_write_rf(rtwdev, path, 0xee, BIT(19), 1);
+ RF_SET_POWER_TRIM(path, 0x0, 0);
+ RF_SET_POWER_TRIM(path, 0x1, 1);
+ RF_SET_POWER_TRIM(path, 0x2, 2);
+ RF_SET_POWER_TRIM(path, 0x3, 2);
+ RF_SET_POWER_TRIM(path, 0x4, 3);
+ RF_SET_POWER_TRIM(path, 0x5, 4);
+ RF_SET_POWER_TRIM(path, 0x6, 5);
+ RF_SET_POWER_TRIM(path, 0x7, 6);
+ RF_SET_POWER_TRIM(path, 0x8, 7);
+ RF_SET_POWER_TRIM(path, 0x9, 3);
+ RF_SET_POWER_TRIM(path, 0xa, 4);
+ RF_SET_POWER_TRIM(path, 0xb, 5);
+ RF_SET_POWER_TRIM(path, 0xc, 6);
+ RF_SET_POWER_TRIM(path, 0xd, 7);
+ RF_SET_POWER_TRIM(path, 0xe, 7);
+ rtw_write_rf(rtwdev, path, 0xee, BIT(19), 0);
+ }
+#undef RF_SET_POWER_TRIM
+}
+
+static void rtw8822c_power_trim(struct rtw_dev *rtwdev)
+{
+ u8 pg_pwr = 0xff, i, path, idx;
+ s8 bb_gain[2][8] = {};
+ u16 rf_efuse_2g[3] = {PPG_2GL_TXAB, PPG_2GM_TXAB, PPG_2GH_TXAB};
+ u16 rf_efuse_5g[2][5] = {{PPG_5GL1_TXA, PPG_5GL2_TXA, PPG_5GM1_TXA,
+ PPG_5GM2_TXA, PPG_5GH1_TXA},
+ {PPG_5GL1_TXB, PPG_5GL2_TXB, PPG_5GM1_TXB,
+ PPG_5GM2_TXB, PPG_5GH1_TXB} };
+ bool set = false;
+
+ for (i = 0; i < ARRAY_SIZE(rf_efuse_2g); i++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse_2g[i], &pg_pwr);
+ if (pg_pwr == EFUSE_READ_FAIL)
+ continue;
+ set = true;
+ bb_gain[RF_PATH_A][i] = FIELD_GET(PPG_2G_A_MASK, pg_pwr);
+ bb_gain[RF_PATH_B][i] = FIELD_GET(PPG_2G_B_MASK, pg_pwr);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rf_efuse_5g[0]); i++) {
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse_5g[path][i],
+ &pg_pwr);
+ if (pg_pwr == EFUSE_READ_FAIL)
+ continue;
+ set = true;
+ idx = i + ARRAY_SIZE(rf_efuse_2g);
+ bb_gain[path][idx] = FIELD_GET(PPG_5G_MASK, pg_pwr);
+ }
+ }
+ if (set)
+ rtw8822c_set_power_trim(rtwdev, bb_gain);
+
+ rtw_write32_mask(rtwdev, REG_DIS_DPD, DIS_DPD_MASK, DIS_DPD_RATEALL);
+}
+
+static void rtw8822c_thermal_trim(struct rtw_dev *rtwdev)
+{
+ u16 rf_efuse[2] = {PPG_THERMAL_A, PPG_THERMAL_B};
+ u8 pg_therm = 0xff, thermal[2] = {0}, path;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse[path], &pg_therm);
+ if (pg_therm == EFUSE_READ_FAIL)
+ return;
+ /* Efuse value of BIT(0) shall be move to BIT(3), and the value
+ * of BIT(1) to BIT(3) should be right shifted 1 bit.
+ */
+ thermal[path] = FIELD_GET(GENMASK(3, 1), pg_therm);
+ thermal[path] |= FIELD_PREP(BIT(3), pg_therm & BIT(0));
+ rtw_write_rf(rtwdev, path, 0x43, RF_THEMAL_MASK, thermal[path]);
+ }
+}
+
+static void rtw8822c_pa_bias(struct rtw_dev *rtwdev)
+{
+ u16 rf_efuse_2g[2] = {PPG_PABIAS_2GA, PPG_PABIAS_2GB};
+ u16 rf_efuse_5g[2] = {PPG_PABIAS_5GA, PPG_PABIAS_5GB};
+ u8 pg_pa_bias = 0xff, path;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse_2g[path],
+ &pg_pa_bias);
+ if (pg_pa_bias == EFUSE_READ_FAIL)
+ return;
+ pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias);
+ rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_2G_MASK, pg_pa_bias);
+ }
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ rtw_read8_physical_efuse(rtwdev, rf_efuse_5g[path],
+ &pg_pa_bias);
+ pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias);
+ rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_5G_MASK, pg_pa_bias);
+ }
+}
+
static void rtw8822c_rf_init(struct rtw_dev *rtwdev)
{
rtw8822c_rf_dac_cal(rtwdev);
rtw8822c_rf_x2_check(rtwdev);
+ rtw8822c_thermal_trim(rtwdev);
+ rtw8822c_power_trim(rtwdev);
+ rtw8822c_pa_bias(rtwdev);
}
static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
@@ -1382,7 +1496,6 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
{
if (IS_CH_2G_BAND(channel)) {
rtw_write32_clr(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
- rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
rtw_write32_set(rtwdev, REG_TXF4, BIT(20));
rtw_write32_clr(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
rtw_write32_clr(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
@@ -1450,7 +1563,6 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_set(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
rtw_write32_set(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
rtw_write32_clr(rtwdev, REG_TXF4, BIT(20));
- rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x0);
rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0x22);
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
@@ -1890,6 +2002,40 @@ static void rtw8822c_set_tx_power_index(struct rtw_dev *rtwdev)
}
}
+static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
+ u32 antenna_tx,
+ u32 antenna_rx)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ switch (antenna_tx) {
+ case BB_PATH_A:
+ case BB_PATH_B:
+ case BB_PATH_AB:
+ break;
+ default:
+ rtw_info(rtwdev, "unsupport tx path 0x%x\n", antenna_tx);
+ return -EINVAL;
+ }
+
+ /* path B only is not available for RX */
+ switch (antenna_rx) {
+ case BB_PATH_A:
+ case BB_PATH_AB:
+ break;
+ default:
+ rtw_info(rtwdev, "unsupport rx path 0x%x\n", antenna_rx);
+ return -EINVAL;
+ }
+
+ hal->antenna_tx = antenna_tx;
+ hal->antenna_rx = antenna_rx;
+
+ rtw8822c_config_trx_mode(rtwdev, antenna_tx, antenna_rx, false);
+
+ return 0;
+}
+
static void rtw8822c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
{
u8 ldo_pwr;
@@ -3752,6 +3898,7 @@ static const struct rtw_rfe_def rtw8822c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8822c, 0, 0),
[1] = RTW_DEF_RFE(8822c, 0, 0),
[2] = RTW_DEF_RFE(8822c, 0, 0),
+ [5] = RTW_DEF_RFE(8822c, 0, 5),
};
static const struct rtw_hw_reg rtw8822c_dig[] = {
@@ -3759,6 +3906,12 @@ static const struct rtw_hw_reg rtw8822c_dig[] = {
[1] = { .addr = 0x1d70, .mask = 0x7f00 },
};
+static const struct rtw_ltecoex_addr rtw8822c_ltecoex_addr = {
+ .ctrl = LTECOEX_ACCESS_CTRL,
+ .wdata = LTECOEX_WRITE_DATA,
+ .rdata = LTECOEX_READ_DATA,
+};
+
static const struct rtw_page_table page_table_8822c[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
@@ -3785,6 +3938,22 @@ static const struct rtw_rqpn rqpn_table_8822c[] = {
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
};
+static struct rtw_prioq_addrs prioq_addrs_8822c = {
+ .prio[RTW_DMA_MAPPING_EXTRA] = {
+ .rsvd = REG_FIFOPAGE_INFO_4, .avail = REG_FIFOPAGE_INFO_4 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_LOW] = {
+ .rsvd = REG_FIFOPAGE_INFO_2, .avail = REG_FIFOPAGE_INFO_2 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_NORMAL] = {
+ .rsvd = REG_FIFOPAGE_INFO_3, .avail = REG_FIFOPAGE_INFO_3 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_HIGH] = {
+ .rsvd = REG_FIFOPAGE_INFO_1, .avail = REG_FIFOPAGE_INFO_1 + 2,
+ },
+ .wsize = true,
+};
+
static struct rtw_chip_ops rtw8822c_ops = {
.phy_set_param = rtw8822c_phy_set_param,
.read_efuse = rtw8822c_read_efuse,
@@ -3794,6 +3963,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
.read_rf = rtw_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_mix,
.set_tx_power_index = rtw8822c_set_tx_power_index,
+ .set_antenna = rtw8822c_set_antenna,
.cfg_ldo25 = rtw8822c_cfg_ldo25,
.false_alarm_statistics = rtw8822c_false_alarm_statistics,
.dpk_track = rtw8822c_dpk_track,
@@ -4121,6 +4291,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
.fw_name = "rtw88/rtw8822c_fw.bin",
+ .wlan_cpu = RTW_WCPU_11AC,
.tx_pkt_desc_sz = 48,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
@@ -4145,10 +4316,13 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.pwr_off_seq = card_disable_flow_8822c,
.page_table = page_table_8822c,
.rqpn_table = rqpn_table_8822c,
+ .prioq_addrs = &prioq_addrs_8822c,
.intf_table = &phy_para_table_8822c,
.dig = rtw8822c_dig,
+ .dig_cck = NULL,
.rf_base_addr = {0x3c00, 0x4c00},
.rf_sipi_addr = {0x1808, 0x4108},
+ .ltecoex_addr = &rtw8822c_ltecoex_addr,
.mac_tbl = &rtw8822c_mac_tbl,
.agc_tbl = &rtw8822c_agc_tbl,
.bb_tbl = &rtw8822c_bb_tbl,
@@ -4162,6 +4336,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.iqk_threshold = 8,
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
+ .rx_ldpc = true,
#ifdef CONFIG_PM
.wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
@@ -4201,3 +4376,7 @@ EXPORT_SYMBOL(rtw8822c_hw_spec);
MODULE_FIRMWARE("rtw88/rtw8822c_fw.bin");
MODULE_FIRMWARE("rtw88/rtw8822c_wow_fw.bin");
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822c driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index dfd8662a0c0e..32b4771e04d0 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -309,4 +309,32 @@ const struct rtw_table name ## _tbl = { \
#define BIT_GS_PWSF GENMASK(27, 0)
#define BIT_RPT_DGAIN GENMASK(27, 16)
#define BIT_TX_CFIR GENMASK(31, 30)
+
+#define PPG_THERMAL_A 0x1ef
+#define PPG_THERMAL_B 0x1b0
+#define RF_THEMAL_MASK GENMASK(19, 16)
+#define PPG_2GL_TXAB 0x1d4
+#define PPG_2GM_TXAB 0x1ee
+#define PPG_2GH_TXAB 0x1d2
+#define PPG_2G_A_MASK GENMASK(3, 0)
+#define PPG_2G_B_MASK GENMASK(7, 4)
+#define PPG_5GL1_TXA 0x1ec
+#define PPG_5GL2_TXA 0x1e8
+#define PPG_5GM1_TXA 0x1e4
+#define PPG_5GM2_TXA 0x1e0
+#define PPG_5GH1_TXA 0x1dc
+#define PPG_5GL1_TXB 0x1eb
+#define PPG_5GL2_TXB 0x1e7
+#define PPG_5GM1_TXB 0x1e3
+#define PPG_5GM2_TXB 0x1df
+#define PPG_5GH1_TXB 0x1db
+#define PPG_5G_MASK GENMASK(4, 0)
+#define PPG_PABIAS_2GA 0x1d6
+#define PPG_PABIAS_2GB 0x1d5
+#define PPG_PABIAS_5GA 0x1d8
+#define PPG_PABIAS_5GB 0x1d7
+#define PPG_PABIAS_MASK GENMASK(3, 0)
+#define RF_PABIAS_2G_MASK GENMASK(15, 12)
+#define RF_PABIAS_5G_MASK GENMASK(19, 16)
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index d102a2c27757..08d01a7bb1bf 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -13,6 +13,7 @@ static const u32 rtw8822c_mac[] = {
RTW_DECL_TABLE_PHY_COND(rtw8822c_mac, rtw_phy_cfg_mac);
static const u32 rtw8822c_agc[] = {
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x300001FF,
0x1D90, 0x300101FE,
0x1D90, 0x300201FD,
@@ -77,51 +78,313 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x303D0003,
0x1D90, 0x303E0002,
0x1D90, 0x303F0001,
- 0x1D90, 0x304000FF,
- 0x1D90, 0x304100FF,
- 0x1D90, 0x304200FF,
- 0x1D90, 0x304300FF,
- 0x1D90, 0x304400FE,
- 0x1D90, 0x304500FD,
- 0x1D90, 0x304600FC,
- 0x1D90, 0x304700FB,
- 0x1D90, 0x304800FA,
- 0x1D90, 0x304900F9,
- 0x1D90, 0x304A00F8,
- 0x1D90, 0x304B00F7,
- 0x1D90, 0x304C00F6,
- 0x1D90, 0x304D00F5,
- 0x1D90, 0x304E00F4,
- 0x1D90, 0x304F00F3,
- 0x1D90, 0x305000F2,
- 0x1D90, 0x305100F1,
- 0x1D90, 0x305200F0,
- 0x1D90, 0x305300EF,
- 0x1D90, 0x305400EE,
- 0x1D90, 0x305500ED,
- 0x1D90, 0x305600EC,
- 0x1D90, 0x305700EB,
- 0x1D90, 0x305800EA,
- 0x1D90, 0x305900E9,
- 0x1D90, 0x305A00E8,
- 0x1D90, 0x305B00E7,
- 0x1D90, 0x305C00E6,
- 0x1D90, 0x305D00C7,
- 0x1D90, 0x305E00C6,
- 0x1D90, 0x305F00C5,
- 0x1D90, 0x306000C4,
- 0x1D90, 0x306100C3,
- 0x1D90, 0x306200C2,
- 0x1D90, 0x306300A4,
- 0x1D90, 0x306400A3,
- 0x1D90, 0x306500A2,
- 0x1D90, 0x30660086,
- 0x1D90, 0x30670085,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x300001FF,
+ 0x1D90, 0x300101FE,
+ 0x1D90, 0x300201FD,
+ 0x1D90, 0x300301FC,
+ 0x1D90, 0x300401FB,
+ 0x1D90, 0x300501FA,
+ 0x1D90, 0x300601F9,
+ 0x1D90, 0x300701F8,
+ 0x1D90, 0x300801F7,
+ 0x1D90, 0x300901F6,
+ 0x1D90, 0x300A01F5,
+ 0x1D90, 0x300B01F4,
+ 0x1D90, 0x300C01F3,
+ 0x1D90, 0x300D01F2,
+ 0x1D90, 0x300E01F1,
+ 0x1D90, 0x300F01F0,
+ 0x1D90, 0x301001EF,
+ 0x1D90, 0x301101EE,
+ 0x1D90, 0x301201ED,
+ 0x1D90, 0x301301EC,
+ 0x1D90, 0x301401EB,
+ 0x1D90, 0x301501EA,
+ 0x1D90, 0x301601E9,
+ 0x1D90, 0x301701E8,
+ 0x1D90, 0x301801E7,
+ 0x1D90, 0x301901E5,
+ 0x1D90, 0x301A01E4,
+ 0x1D90, 0x301B01C5,
+ 0x1D90, 0x301C01C4,
+ 0x1D90, 0x301D01C3,
+ 0x1D90, 0x301E01C2,
+ 0x1D90, 0x301F0188,
+ 0x1D90, 0x30200187,
+ 0x1D90, 0x30210186,
+ 0x1D90, 0x30220184,
+ 0x1D90, 0x30230183,
+ 0x1D90, 0x30240182,
+ 0x1D90, 0x30250181,
+ 0x1D90, 0x30260148,
+ 0x1D90, 0x30270147,
+ 0x1D90, 0x30280146,
+ 0x1D90, 0x30290144,
+ 0x1D90, 0x302A0143,
+ 0x1D90, 0x302B0142,
+ 0x1D90, 0x302C0141,
+ 0x1D90, 0x302D00C8,
+ 0x1D90, 0x302E00C7,
+ 0x1D90, 0x302F00C6,
+ 0x1D90, 0x303000C5,
+ 0x1D90, 0x303100C4,
+ 0x1D90, 0x303200C3,
+ 0x1D90, 0x30330048,
+ 0x1D90, 0x30340047,
+ 0x1D90, 0x30350046,
+ 0x1D90, 0x30360045,
+ 0x1D90, 0x30370025,
+ 0x1D90, 0x30380024,
+ 0x1D90, 0x30390023,
+ 0x1D90, 0x303A0022,
+ 0x1D90, 0x303B0021,
+ 0x1D90, 0x303C0020,
+ 0x1D90, 0x303D0003,
+ 0x1D90, 0x303E0002,
+ 0x1D90, 0x303F0001,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x300001FF,
+ 0x1D90, 0x300101FE,
+ 0x1D90, 0x300201FD,
+ 0x1D90, 0x300301FC,
+ 0x1D90, 0x300401FB,
+ 0x1D90, 0x300501FA,
+ 0x1D90, 0x300601F9,
+ 0x1D90, 0x300701F8,
+ 0x1D90, 0x300801F7,
+ 0x1D90, 0x300901F6,
+ 0x1D90, 0x300A01F5,
+ 0x1D90, 0x300B01F4,
+ 0x1D90, 0x300C01F3,
+ 0x1D90, 0x300D01F2,
+ 0x1D90, 0x300E01F1,
+ 0x1D90, 0x300F01F0,
+ 0x1D90, 0x301001EF,
+ 0x1D90, 0x301101EE,
+ 0x1D90, 0x301201ED,
+ 0x1D90, 0x301301EC,
+ 0x1D90, 0x301401EB,
+ 0x1D90, 0x301501EA,
+ 0x1D90, 0x301601E9,
+ 0x1D90, 0x301701E8,
+ 0x1D90, 0x301801E7,
+ 0x1D90, 0x301901E5,
+ 0x1D90, 0x301A01E4,
+ 0x1D90, 0x301B01C5,
+ 0x1D90, 0x301C01C4,
+ 0x1D90, 0x301D01C3,
+ 0x1D90, 0x301E01C2,
+ 0x1D90, 0x301F0188,
+ 0x1D90, 0x30200187,
+ 0x1D90, 0x30210186,
+ 0x1D90, 0x30220184,
+ 0x1D90, 0x30230183,
+ 0x1D90, 0x30240182,
+ 0x1D90, 0x30250181,
+ 0x1D90, 0x30260148,
+ 0x1D90, 0x30270147,
+ 0x1D90, 0x30280146,
+ 0x1D90, 0x30290144,
+ 0x1D90, 0x302A0143,
+ 0x1D90, 0x302B0142,
+ 0x1D90, 0x302C0141,
+ 0x1D90, 0x302D00C8,
+ 0x1D90, 0x302E00C7,
+ 0x1D90, 0x302F00C6,
+ 0x1D90, 0x303000C5,
+ 0x1D90, 0x303100C4,
+ 0x1D90, 0x303200C3,
+ 0x1D90, 0x30330048,
+ 0x1D90, 0x30340047,
+ 0x1D90, 0x30350046,
+ 0x1D90, 0x30360045,
+ 0x1D90, 0x30370025,
+ 0x1D90, 0x30380024,
+ 0x1D90, 0x30390023,
+ 0x1D90, 0x303A0022,
+ 0x1D90, 0x303B0021,
+ 0x1D90, 0x303C0020,
+ 0x1D90, 0x303D0003,
+ 0x1D90, 0x303E0002,
+ 0x1D90, 0x303F0001,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x304001FD,
+ 0x1D90, 0x304101FC,
+ 0x1D90, 0x304201FB,
+ 0x1D90, 0x304301FA,
+ 0x1D90, 0x304401F9,
+ 0x1D90, 0x304501F8,
+ 0x1D90, 0x304601F7,
+ 0x1D90, 0x304701F6,
+ 0x1D90, 0x304801F5,
+ 0x1D90, 0x304901F4,
+ 0x1D90, 0x304A01F3,
+ 0x1D90, 0x304B01F2,
+ 0x1D90, 0x304C01F1,
+ 0x1D90, 0x304D01F0,
+ 0x1D90, 0x304E01EF,
+ 0x1D90, 0x304F00EE,
+ 0x1D90, 0x305000ED,
+ 0x1D90, 0x305100EC,
+ 0x1D90, 0x305200EB,
+ 0x1D90, 0x305300EA,
+ 0x1D90, 0x305400E9,
+ 0x1D90, 0x305500E8,
+ 0x1D90, 0x305600E7,
+ 0x1D90, 0x305700E6,
+ 0x1D90, 0x305800E5,
+ 0x1D90, 0x305900E4,
+ 0x1D90, 0x305A00E3,
+ 0x1D90, 0x305B00C3,
+ 0x1D90, 0x305C00C2,
+ 0x1D90, 0x305D00A4,
+ 0x1D90, 0x305E00A3,
+ 0x1D90, 0x305F00A2,
+ 0x1D90, 0x306000A1,
+ 0x1D90, 0x30610085,
+ 0x1D90, 0x30620084,
+ 0x1D90, 0x30630083,
+ 0x1D90, 0x30640082,
+ 0x1D90, 0x30650069,
+ 0x1D90, 0x30660068,
+ 0x1D90, 0x30670067,
+ 0x1D90, 0x30680066,
+ 0x1D90, 0x30690065,
+ 0x1D90, 0x306A0064,
+ 0x1D90, 0x306B0063,
+ 0x1D90, 0x306C0043,
+ 0x1D90, 0x306D0042,
+ 0x1D90, 0x306E0041,
+ 0x1D90, 0x306F0025,
+ 0x1D90, 0x30700024,
+ 0x1D90, 0x30710023,
+ 0x1D90, 0x30720022,
+ 0x1D90, 0x30730021,
+ 0x1D90, 0x30740020,
+ 0x1D90, 0x30750004,
+ 0x1D90, 0x30760003,
+ 0x1D90, 0x30770002,
+ 0x1D90, 0x30780001,
+ 0x1D90, 0x30790000,
+ 0x1D90, 0x307A0000,
+ 0x1D90, 0x307B0000,
+ 0x1D90, 0x307C0000,
+ 0x1D90, 0x307D0000,
+ 0x1D90, 0x307E0000,
+ 0x1D90, 0x307F0000,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x304001FD,
+ 0x1D90, 0x304101FC,
+ 0x1D90, 0x304201FB,
+ 0x1D90, 0x304301FA,
+ 0x1D90, 0x304401F9,
+ 0x1D90, 0x304501F8,
+ 0x1D90, 0x304601F7,
+ 0x1D90, 0x304701F6,
+ 0x1D90, 0x304801F5,
+ 0x1D90, 0x304901F4,
+ 0x1D90, 0x304A01F3,
+ 0x1D90, 0x304B01F2,
+ 0x1D90, 0x304C01F1,
+ 0x1D90, 0x304D01F0,
+ 0x1D90, 0x304E01EF,
+ 0x1D90, 0x304F00EE,
+ 0x1D90, 0x305000ED,
+ 0x1D90, 0x305100EC,
+ 0x1D90, 0x305200EB,
+ 0x1D90, 0x305300EA,
+ 0x1D90, 0x305400E9,
+ 0x1D90, 0x305500E8,
+ 0x1D90, 0x305600E7,
+ 0x1D90, 0x305700E6,
+ 0x1D90, 0x305800E5,
+ 0x1D90, 0x305900E4,
+ 0x1D90, 0x305A00E3,
+ 0x1D90, 0x305B00C3,
+ 0x1D90, 0x305C00C2,
+ 0x1D90, 0x305D00A4,
+ 0x1D90, 0x305E00A3,
+ 0x1D90, 0x305F00A2,
+ 0x1D90, 0x306000A1,
+ 0x1D90, 0x30610085,
+ 0x1D90, 0x30620084,
+ 0x1D90, 0x30630083,
+ 0x1D90, 0x30640082,
+ 0x1D90, 0x30650069,
+ 0x1D90, 0x30660068,
+ 0x1D90, 0x30670067,
+ 0x1D90, 0x30680066,
+ 0x1D90, 0x30690065,
+ 0x1D90, 0x306A0064,
+ 0x1D90, 0x306B0063,
+ 0x1D90, 0x306C0043,
+ 0x1D90, 0x306D0042,
+ 0x1D90, 0x306E0041,
+ 0x1D90, 0x306F0025,
+ 0x1D90, 0x30700024,
+ 0x1D90, 0x30710023,
+ 0x1D90, 0x30720022,
+ 0x1D90, 0x30730021,
+ 0x1D90, 0x30740020,
+ 0x1D90, 0x30750004,
+ 0x1D90, 0x30760003,
+ 0x1D90, 0x30770002,
+ 0x1D90, 0x30780001,
+ 0x1D90, 0x30790000,
+ 0x1D90, 0x307A0000,
+ 0x1D90, 0x307B0000,
+ 0x1D90, 0x307C0000,
+ 0x1D90, 0x307D0000,
+ 0x1D90, 0x307E0000,
+ 0x1D90, 0x307F0000,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x3040011F,
+ 0x1D90, 0x3041011F,
+ 0x1D90, 0x3042011F,
+ 0x1D90, 0x3043011F,
+ 0x1D90, 0x3044011F,
+ 0x1D90, 0x3045011F,
+ 0x1D90, 0x3046011F,
+ 0x1D90, 0x3047011F,
+ 0x1D90, 0x3048011F,
+ 0x1D90, 0x3049011F,
+ 0x1D90, 0x304A011F,
+ 0x1D90, 0x304B011F,
+ 0x1D90, 0x304C011F,
+ 0x1D90, 0x304D011F,
+ 0x1D90, 0x304E011F,
+ 0x1D90, 0x304F00F4,
+ 0x1D90, 0x305000F3,
+ 0x1D90, 0x305100F2,
+ 0x1D90, 0x305200F1,
+ 0x1D90, 0x305300F0,
+ 0x1D90, 0x305400EF,
+ 0x1D90, 0x305500EE,
+ 0x1D90, 0x305600ED,
+ 0x1D90, 0x305700EC,
+ 0x1D90, 0x305800EB,
+ 0x1D90, 0x305900EA,
+ 0x1D90, 0x305A00E9,
+ 0x1D90, 0x305B00E8,
+ 0x1D90, 0x305C00E7,
+ 0x1D90, 0x305D00E6,
+ 0x1D90, 0x305E00E4,
+ 0x1D90, 0x305F00E3,
+ 0x1D90, 0x306000E2,
+ 0x1D90, 0x306100C4,
+ 0x1D90, 0x306200C3,
+ 0x1D90, 0x306300C2,
+ 0x1D90, 0x306400A4,
+ 0x1D90, 0x306500A3,
+ 0x1D90, 0x306600A2,
+ 0x1D90, 0x306700A1,
0x1D90, 0x30680084,
0x1D90, 0x30690083,
0x1D90, 0x306A0082,
- 0x1D90, 0x306B0069,
- 0x1D90, 0x306C0068,
+ 0x1D90, 0x306B0081,
+ 0x1D90, 0x306C0080,
0x1D90, 0x306D0067,
0x1D90, 0x306E0066,
0x1D90, 0x306F0065,
@@ -130,131 +393,395 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x30720044,
0x1D90, 0x30730043,
0x1D90, 0x30740042,
- 0x1D90, 0x30750025,
+ 0x1D90, 0x30750041,
0x1D90, 0x30760024,
0x1D90, 0x30770023,
0x1D90, 0x30780022,
0x1D90, 0x30790021,
0x1D90, 0x307A0020,
- 0x1D90, 0x307B0003,
- 0x1D90, 0x307C0002,
- 0x1D90, 0x307D0001,
- 0x1D90, 0x307E0000,
+ 0x1D90, 0x307B0004,
+ 0x1D90, 0x307C0003,
+ 0x1D90, 0x307D0002,
+ 0x1D90, 0x307E0001,
0x1D90, 0x307F0000,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x308000FA,
+ 0x1D90, 0x308100F9,
+ 0x1D90, 0x308200F8,
+ 0x1D90, 0x308300F7,
+ 0x1D90, 0x308400F6,
+ 0x1D90, 0x308500F5,
+ 0x1D90, 0x308600F4,
+ 0x1D90, 0x308700F3,
+ 0x1D90, 0x308800F2,
+ 0x1D90, 0x308900F1,
+ 0x1D90, 0x308A00F0,
+ 0x1D90, 0x308B00EF,
+ 0x1D90, 0x308C00EE,
+ 0x1D90, 0x308D00ED,
+ 0x1D90, 0x308E00EC,
+ 0x1D90, 0x308F00EB,
+ 0x1D90, 0x309000EA,
+ 0x1D90, 0x309100E8,
+ 0x1D90, 0x309200E7,
+ 0x1D90, 0x309300E6,
+ 0x1D90, 0x309400E5,
+ 0x1D90, 0x309500E4,
+ 0x1D90, 0x309600C4,
+ 0x1D90, 0x309700C3,
+ 0x1D90, 0x309800C2,
+ 0x1D90, 0x309900C1,
+ 0x1D90, 0x309A00A3,
+ 0x1D90, 0x309B00A2,
+ 0x1D90, 0x309C00A1,
+ 0x1D90, 0x309D0085,
+ 0x1D90, 0x309E0084,
+ 0x1D90, 0x309F0083,
+ 0x1D90, 0x30A00082,
+ 0x1D90, 0x30A10081,
+ 0x1D90, 0x30A20067,
+ 0x1D90, 0x30A30066,
+ 0x1D90, 0x30A40065,
+ 0x1D90, 0x30A50064,
+ 0x1D90, 0x30A60063,
+ 0x1D90, 0x30A70044,
+ 0x1D90, 0x30A80043,
+ 0x1D90, 0x30A90042,
+ 0x1D90, 0x30AA0026,
+ 0x1D90, 0x30AB0025,
+ 0x1D90, 0x30AC0024,
+ 0x1D90, 0x30AD0023,
+ 0x1D90, 0x30AE0022,
+ 0x1D90, 0x30AF0021,
+ 0x1D90, 0x30B00005,
+ 0x1D90, 0x30B10004,
+ 0x1D90, 0x30B20003,
+ 0x1D90, 0x30B30002,
+ 0x1D90, 0x30B40001,
+ 0x1D90, 0x30B50000,
+ 0x1D90, 0x30B60000,
+ 0x1D90, 0x30B70000,
+ 0x1D90, 0x30B80000,
+ 0x1D90, 0x30B90000,
+ 0x1D90, 0x30BA0000,
+ 0x1D90, 0x30BB0000,
+ 0x1D90, 0x30BC0000,
+ 0x1D90, 0x30BD0000,
+ 0x1D90, 0x30BE0000,
+ 0x1D90, 0x30BF0000,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x308000FA,
+ 0x1D90, 0x308100F9,
+ 0x1D90, 0x308200F8,
+ 0x1D90, 0x308300F7,
+ 0x1D90, 0x308400F6,
+ 0x1D90, 0x308500F5,
+ 0x1D90, 0x308600F4,
+ 0x1D90, 0x308700F3,
+ 0x1D90, 0x308800F2,
+ 0x1D90, 0x308900F1,
+ 0x1D90, 0x308A00F0,
+ 0x1D90, 0x308B00EF,
+ 0x1D90, 0x308C00EE,
+ 0x1D90, 0x308D00ED,
+ 0x1D90, 0x308E00EC,
+ 0x1D90, 0x308F00EB,
+ 0x1D90, 0x309000EA,
+ 0x1D90, 0x309100E8,
+ 0x1D90, 0x309200E7,
+ 0x1D90, 0x309300E6,
+ 0x1D90, 0x309400E5,
+ 0x1D90, 0x309500E4,
+ 0x1D90, 0x309600C4,
+ 0x1D90, 0x309700C3,
+ 0x1D90, 0x309800C2,
+ 0x1D90, 0x309900C1,
+ 0x1D90, 0x309A00A3,
+ 0x1D90, 0x309B00A2,
+ 0x1D90, 0x309C00A1,
+ 0x1D90, 0x309D0085,
+ 0x1D90, 0x309E0084,
+ 0x1D90, 0x309F0083,
+ 0x1D90, 0x30A00082,
+ 0x1D90, 0x30A10081,
+ 0x1D90, 0x30A20067,
+ 0x1D90, 0x30A30066,
+ 0x1D90, 0x30A40065,
+ 0x1D90, 0x30A50064,
+ 0x1D90, 0x30A60063,
+ 0x1D90, 0x30A70044,
+ 0x1D90, 0x30A80043,
+ 0x1D90, 0x30A90042,
+ 0x1D90, 0x30AA0026,
+ 0x1D90, 0x30AB0025,
+ 0x1D90, 0x30AC0024,
+ 0x1D90, 0x30AD0023,
+ 0x1D90, 0x30AE0022,
+ 0x1D90, 0x30AF0021,
+ 0x1D90, 0x30B00005,
+ 0x1D90, 0x30B10004,
+ 0x1D90, 0x30B20003,
+ 0x1D90, 0x30B30002,
+ 0x1D90, 0x30B40001,
+ 0x1D90, 0x30B50000,
+ 0x1D90, 0x30B60000,
+ 0x1D90, 0x30B70000,
+ 0x1D90, 0x30B80000,
+ 0x1D90, 0x30B90000,
+ 0x1D90, 0x30BA0000,
+ 0x1D90, 0x30BB0000,
+ 0x1D90, 0x30BC0000,
+ 0x1D90, 0x30BD0000,
+ 0x1D90, 0x30BE0000,
+ 0x1D90, 0x30BF0000,
+ 0xA0000000, 0x00000000,
0x1D90, 0x308000FF,
0x1D90, 0x308100FF,
0x1D90, 0x308200FF,
0x1D90, 0x308300FF,
- 0x1D90, 0x308400FE,
- 0x1D90, 0x308500FD,
- 0x1D90, 0x308600FC,
- 0x1D90, 0x308700FB,
- 0x1D90, 0x308800FA,
- 0x1D90, 0x308900F9,
- 0x1D90, 0x308A00F8,
- 0x1D90, 0x308B00F7,
- 0x1D90, 0x308C00F6,
- 0x1D90, 0x308D00F5,
- 0x1D90, 0x308E00F4,
- 0x1D90, 0x308F00F3,
- 0x1D90, 0x309000F2,
- 0x1D90, 0x309100F1,
- 0x1D90, 0x309200F0,
- 0x1D90, 0x309300EF,
- 0x1D90, 0x309400EE,
- 0x1D90, 0x309500ED,
- 0x1D90, 0x309600EC,
- 0x1D90, 0x309700EB,
- 0x1D90, 0x309800EA,
- 0x1D90, 0x309900E9,
- 0x1D90, 0x309A00E8,
- 0x1D90, 0x309B00E7,
- 0x1D90, 0x309C00E6,
- 0x1D90, 0x309D00C7,
- 0x1D90, 0x309E00C6,
- 0x1D90, 0x309F00C5,
+ 0x1D90, 0x308400FF,
+ 0x1D90, 0x308500FF,
+ 0x1D90, 0x308600FE,
+ 0x1D90, 0x308700FD,
+ 0x1D90, 0x308800FC,
+ 0x1D90, 0x308900FB,
+ 0x1D90, 0x308A00FA,
+ 0x1D90, 0x308B00F9,
+ 0x1D90, 0x308C00F8,
+ 0x1D90, 0x308D00F7,
+ 0x1D90, 0x308E00F6,
+ 0x1D90, 0x308F00F5,
+ 0x1D90, 0x309000F4,
+ 0x1D90, 0x309100F3,
+ 0x1D90, 0x309200F2,
+ 0x1D90, 0x309300F1,
+ 0x1D90, 0x309400F0,
+ 0x1D90, 0x309500EF,
+ 0x1D90, 0x309600EE,
+ 0x1D90, 0x309700ED,
+ 0x1D90, 0x309800EC,
+ 0x1D90, 0x309900EB,
+ 0x1D90, 0x309A00EA,
+ 0x1D90, 0x309B00E8,
+ 0x1D90, 0x309C00E7,
+ 0x1D90, 0x309D00E6,
+ 0x1D90, 0x309E00E5,
+ 0x1D90, 0x309F00E4,
0x1D90, 0x30A000C4,
0x1D90, 0x30A100C3,
0x1D90, 0x30A200C2,
- 0x1D90, 0x30A300A4,
+ 0x1D90, 0x30A300C1,
0x1D90, 0x30A400A3,
0x1D90, 0x30A500A2,
- 0x1D90, 0x30A60086,
+ 0x1D90, 0x30A600A1,
0x1D90, 0x30A70085,
0x1D90, 0x30A80084,
0x1D90, 0x30A90083,
0x1D90, 0x30AA0082,
- 0x1D90, 0x30AB0069,
- 0x1D90, 0x30AC0068,
- 0x1D90, 0x30AD0067,
- 0x1D90, 0x30AE0066,
- 0x1D90, 0x30AF0065,
- 0x1D90, 0x30B00064,
- 0x1D90, 0x30B10063,
- 0x1D90, 0x30B20044,
- 0x1D90, 0x30B30043,
- 0x1D90, 0x30B40042,
+ 0x1D90, 0x30AB0081,
+ 0x1D90, 0x30AC0067,
+ 0x1D90, 0x30AD0066,
+ 0x1D90, 0x30AE0065,
+ 0x1D90, 0x30AF0064,
+ 0x1D90, 0x30B00063,
+ 0x1D90, 0x30B10044,
+ 0x1D90, 0x30B20043,
+ 0x1D90, 0x30B30042,
+ 0x1D90, 0x30B40026,
0x1D90, 0x30B50025,
0x1D90, 0x30B60024,
0x1D90, 0x30B70023,
0x1D90, 0x30B80022,
0x1D90, 0x30B90021,
- 0x1D90, 0x30BA0020,
- 0x1D90, 0x30BB0003,
- 0x1D90, 0x30BC0002,
- 0x1D90, 0x30BD0001,
- 0x1D90, 0x30BE0000,
+ 0x1D90, 0x30BA0005,
+ 0x1D90, 0x30BB0004,
+ 0x1D90, 0x30BC0003,
+ 0x1D90, 0x30BD0002,
+ 0x1D90, 0x30BE0001,
0x1D90, 0x30BF0000,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x30C000F8,
+ 0x1D90, 0x30C100F7,
+ 0x1D90, 0x30C200F6,
+ 0x1D90, 0x30C300F5,
+ 0x1D90, 0x30C400F4,
+ 0x1D90, 0x30C500F3,
+ 0x1D90, 0x30C600F2,
+ 0x1D90, 0x30C700F1,
+ 0x1D90, 0x30C800F0,
+ 0x1D90, 0x30C900EF,
+ 0x1D90, 0x30CA00EE,
+ 0x1D90, 0x30CB00ED,
+ 0x1D90, 0x30CC00EC,
+ 0x1D90, 0x30CD00EB,
+ 0x1D90, 0x30CE00EA,
+ 0x1D90, 0x30CF00E8,
+ 0x1D90, 0x30D000E7,
+ 0x1D90, 0x30D100E6,
+ 0x1D90, 0x30D200E5,
+ 0x1D90, 0x30D300E4,
+ 0x1D90, 0x30D400E3,
+ 0x1D90, 0x30D500E2,
+ 0x1D90, 0x30D600A6,
+ 0x1D90, 0x30D700A5,
+ 0x1D90, 0x30D800A4,
+ 0x1D90, 0x30D900A3,
+ 0x1D90, 0x30DA00A2,
+ 0x1D90, 0x30DB0086,
+ 0x1D90, 0x30DC0085,
+ 0x1D90, 0x30DD0084,
+ 0x1D90, 0x30DE0083,
+ 0x1D90, 0x30DF0081,
+ 0x1D90, 0x30E00068,
+ 0x1D90, 0x30E10067,
+ 0x1D90, 0x30E20066,
+ 0x1D90, 0x30E30065,
+ 0x1D90, 0x30E40064,
+ 0x1D90, 0x30E50045,
+ 0x1D90, 0x30E60044,
+ 0x1D90, 0x30E70043,
+ 0x1D90, 0x30E80042,
+ 0x1D90, 0x30E90025,
+ 0x1D90, 0x30EA0024,
+ 0x1D90, 0x30EB0023,
+ 0x1D90, 0x30EC0022,
+ 0x1D90, 0x30ED0021,
+ 0x1D90, 0x30EE0005,
+ 0x1D90, 0x30EF0004,
+ 0x1D90, 0x30F00003,
+ 0x1D90, 0x30F10002,
+ 0x1D90, 0x30F20001,
+ 0x1D90, 0x30F30000,
+ 0x1D90, 0x30F40000,
+ 0x1D90, 0x30F50000,
+ 0x1D90, 0x30F60000,
+ 0x1D90, 0x30F70000,
+ 0x1D90, 0x30F80000,
+ 0x1D90, 0x30F90000,
+ 0x1D90, 0x30FA0000,
+ 0x1D90, 0x30FB0000,
+ 0x1D90, 0x30FC0000,
+ 0x1D90, 0x30FD0000,
+ 0x1D90, 0x30FE0000,
+ 0x1D90, 0x30FF0000,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x30C000F8,
+ 0x1D90, 0x30C100F7,
+ 0x1D90, 0x30C200F6,
+ 0x1D90, 0x30C300F5,
+ 0x1D90, 0x30C400F4,
+ 0x1D90, 0x30C500F3,
+ 0x1D90, 0x30C600F2,
+ 0x1D90, 0x30C700F1,
+ 0x1D90, 0x30C800F0,
+ 0x1D90, 0x30C900EF,
+ 0x1D90, 0x30CA00EE,
+ 0x1D90, 0x30CB00ED,
+ 0x1D90, 0x30CC00EC,
+ 0x1D90, 0x30CD00EB,
+ 0x1D90, 0x30CE00EA,
+ 0x1D90, 0x30CF00E8,
+ 0x1D90, 0x30D000E7,
+ 0x1D90, 0x30D100E6,
+ 0x1D90, 0x30D200E5,
+ 0x1D90, 0x30D300E4,
+ 0x1D90, 0x30D400E3,
+ 0x1D90, 0x30D500E2,
+ 0x1D90, 0x30D600A6,
+ 0x1D90, 0x30D700A5,
+ 0x1D90, 0x30D800A4,
+ 0x1D90, 0x30D900A3,
+ 0x1D90, 0x30DA00A2,
+ 0x1D90, 0x30DB0086,
+ 0x1D90, 0x30DC0085,
+ 0x1D90, 0x30DD0084,
+ 0x1D90, 0x30DE0083,
+ 0x1D90, 0x30DF0081,
+ 0x1D90, 0x30E00068,
+ 0x1D90, 0x30E10067,
+ 0x1D90, 0x30E20066,
+ 0x1D90, 0x30E30065,
+ 0x1D90, 0x30E40064,
+ 0x1D90, 0x30E50045,
+ 0x1D90, 0x30E60044,
+ 0x1D90, 0x30E70043,
+ 0x1D90, 0x30E80042,
+ 0x1D90, 0x30E90025,
+ 0x1D90, 0x30EA0024,
+ 0x1D90, 0x30EB0023,
+ 0x1D90, 0x30EC0022,
+ 0x1D90, 0x30ED0021,
+ 0x1D90, 0x30EE0005,
+ 0x1D90, 0x30EF0004,
+ 0x1D90, 0x30F00003,
+ 0x1D90, 0x30F10002,
+ 0x1D90, 0x30F20001,
+ 0x1D90, 0x30F30000,
+ 0x1D90, 0x30F40000,
+ 0x1D90, 0x30F50000,
+ 0x1D90, 0x30F60000,
+ 0x1D90, 0x30F70000,
+ 0x1D90, 0x30F80000,
+ 0x1D90, 0x30F90000,
+ 0x1D90, 0x30FA0000,
+ 0x1D90, 0x30FB0000,
+ 0x1D90, 0x30FC0000,
+ 0x1D90, 0x30FD0000,
+ 0x1D90, 0x30FE0000,
+ 0x1D90, 0x30FF0000,
+ 0xA0000000, 0x00000000,
0x1D90, 0x30C000FF,
0x1D90, 0x30C100FF,
0x1D90, 0x30C200FF,
0x1D90, 0x30C300FF,
- 0x1D90, 0x30C400FE,
- 0x1D90, 0x30C500FD,
- 0x1D90, 0x30C600FC,
- 0x1D90, 0x30C700FB,
- 0x1D90, 0x30C800FA,
- 0x1D90, 0x30C900F9,
- 0x1D90, 0x30CA00F8,
- 0x1D90, 0x30CB00F7,
- 0x1D90, 0x30CC00F6,
- 0x1D90, 0x30CD00F5,
- 0x1D90, 0x30CE00F4,
- 0x1D90, 0x30CF00F3,
- 0x1D90, 0x30D000F2,
- 0x1D90, 0x30D100F1,
- 0x1D90, 0x30D200F0,
- 0x1D90, 0x30D300EF,
- 0x1D90, 0x30D400EE,
- 0x1D90, 0x30D500ED,
- 0x1D90, 0x30D600EC,
- 0x1D90, 0x30D700EB,
- 0x1D90, 0x30D800EA,
- 0x1D90, 0x30D900E9,
- 0x1D90, 0x30DA00E8,
- 0x1D90, 0x30DB00E7,
- 0x1D90, 0x30DC00E6,
- 0x1D90, 0x30DD00C7,
- 0x1D90, 0x30DE00C6,
- 0x1D90, 0x30DF00C5,
- 0x1D90, 0x30E000C4,
- 0x1D90, 0x30E100C3,
- 0x1D90, 0x30E200C2,
- 0x1D90, 0x30E300A4,
- 0x1D90, 0x30E400A3,
- 0x1D90, 0x30E500A2,
- 0x1D90, 0x30E60086,
- 0x1D90, 0x30E70085,
- 0x1D90, 0x30E80084,
- 0x1D90, 0x30E90083,
- 0x1D90, 0x30EA0082,
- 0x1D90, 0x30EB0069,
- 0x1D90, 0x30EC0068,
- 0x1D90, 0x30ED0067,
- 0x1D90, 0x30EE0066,
- 0x1D90, 0x30EF0065,
- 0x1D90, 0x30F00064,
- 0x1D90, 0x30F10063,
+ 0x1D90, 0x30C400FF,
+ 0x1D90, 0x30C500FF,
+ 0x1D90, 0x30C600FE,
+ 0x1D90, 0x30C700FD,
+ 0x1D90, 0x30C800FC,
+ 0x1D90, 0x30C900FB,
+ 0x1D90, 0x30CA00FA,
+ 0x1D90, 0x30CB00F9,
+ 0x1D90, 0x30CC00F8,
+ 0x1D90, 0x30CD00F7,
+ 0x1D90, 0x30CE00F6,
+ 0x1D90, 0x30CF00F5,
+ 0x1D90, 0x30D000F4,
+ 0x1D90, 0x30D100F3,
+ 0x1D90, 0x30D200F2,
+ 0x1D90, 0x30D300F1,
+ 0x1D90, 0x30D400F0,
+ 0x1D90, 0x30D500EF,
+ 0x1D90, 0x30D600EE,
+ 0x1D90, 0x30D700ED,
+ 0x1D90, 0x30D800EC,
+ 0x1D90, 0x30D900EB,
+ 0x1D90, 0x30DA00EA,
+ 0x1D90, 0x30DB00E8,
+ 0x1D90, 0x30DC00E7,
+ 0x1D90, 0x30DD00E6,
+ 0x1D90, 0x30DE00E5,
+ 0x1D90, 0x30DF00E4,
+ 0x1D90, 0x30E000E3,
+ 0x1D90, 0x30E100E2,
+ 0x1D90, 0x30E200A6,
+ 0x1D90, 0x30E300A5,
+ 0x1D90, 0x30E400A4,
+ 0x1D90, 0x30E500A3,
+ 0x1D90, 0x30E600A2,
+ 0x1D90, 0x30E70086,
+ 0x1D90, 0x30E80085,
+ 0x1D90, 0x30E90084,
+ 0x1D90, 0x30EA0083,
+ 0x1D90, 0x30EB0082,
+ 0x1D90, 0x30EC0067,
+ 0x1D90, 0x30ED0066,
+ 0x1D90, 0x30EE0065,
+ 0x1D90, 0x30EF0064,
+ 0x1D90, 0x30F00063,
+ 0x1D90, 0x30F10045,
0x1D90, 0x30F20044,
0x1D90, 0x30F30043,
0x1D90, 0x30F40042,
@@ -263,12 +790,14 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x30F70023,
0x1D90, 0x30F80022,
0x1D90, 0x30F90021,
- 0x1D90, 0x30FA0020,
- 0x1D90, 0x30FB0003,
- 0x1D90, 0x30FC0002,
- 0x1D90, 0x30FD0001,
- 0x1D90, 0x30FE0000,
+ 0x1D90, 0x30FA0005,
+ 0x1D90, 0x30FB0004,
+ 0x1D90, 0x30FC0003,
+ 0x1D90, 0x30FD0002,
+ 0x1D90, 0x30FE0001,
0x1D90, 0x30FF0000,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x310001FF,
0x1D90, 0x310101FF,
0x1D90, 0x310201FF,
@@ -333,6 +862,203 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x313D0045,
0x1D90, 0x313E0044,
0x1D90, 0x313F0043,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x310001FF,
+ 0x1D90, 0x310101FF,
+ 0x1D90, 0x310201FF,
+ 0x1D90, 0x310301FF,
+ 0x1D90, 0x310401FF,
+ 0x1D90, 0x310501FF,
+ 0x1D90, 0x310601FF,
+ 0x1D90, 0x310701FF,
+ 0x1D90, 0x310801FF,
+ 0x1D90, 0x310901FE,
+ 0x1D90, 0x310A01FD,
+ 0x1D90, 0x310B01FC,
+ 0x1D90, 0x310C01FB,
+ 0x1D90, 0x310D01FA,
+ 0x1D90, 0x310E01F9,
+ 0x1D90, 0x310F01F8,
+ 0x1D90, 0x311001F7,
+ 0x1D90, 0x311101F6,
+ 0x1D90, 0x311201F5,
+ 0x1D90, 0x311301F4,
+ 0x1D90, 0x311401F3,
+ 0x1D90, 0x311501F2,
+ 0x1D90, 0x311601F1,
+ 0x1D90, 0x311701F0,
+ 0x1D90, 0x311801EF,
+ 0x1D90, 0x311901EE,
+ 0x1D90, 0x311A01ED,
+ 0x1D90, 0x311B01EC,
+ 0x1D90, 0x311C01EB,
+ 0x1D90, 0x311D0192,
+ 0x1D90, 0x311E0191,
+ 0x1D90, 0x311F0190,
+ 0x1D90, 0x3120018F,
+ 0x1D90, 0x3121018E,
+ 0x1D90, 0x3122018D,
+ 0x1D90, 0x3123018C,
+ 0x1D90, 0x3124018B,
+ 0x1D90, 0x3125018A,
+ 0x1D90, 0x31260189,
+ 0x1D90, 0x31270188,
+ 0x1D90, 0x31280187,
+ 0x1D90, 0x31290186,
+ 0x1D90, 0x312A0185,
+ 0x1D90, 0x312B0149,
+ 0x1D90, 0x312C0148,
+ 0x1D90, 0x312D0147,
+ 0x1D90, 0x312E0146,
+ 0x1D90, 0x312F0145,
+ 0x1D90, 0x31300144,
+ 0x1D90, 0x31310143,
+ 0x1D90, 0x31320142,
+ 0x1D90, 0x31330141,
+ 0x1D90, 0x31340140,
+ 0x1D90, 0x313500C7,
+ 0x1D90, 0x313600C6,
+ 0x1D90, 0x313700C5,
+ 0x1D90, 0x313800C4,
+ 0x1D90, 0x313900C3,
+ 0x1D90, 0x313A0088,
+ 0x1D90, 0x313B0087,
+ 0x1D90, 0x313C0086,
+ 0x1D90, 0x313D0045,
+ 0x1D90, 0x313E0044,
+ 0x1D90, 0x313F0043,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x310001FF,
+ 0x1D90, 0x310101FF,
+ 0x1D90, 0x310201FF,
+ 0x1D90, 0x310301FF,
+ 0x1D90, 0x310401FF,
+ 0x1D90, 0x310501FF,
+ 0x1D90, 0x310601FF,
+ 0x1D90, 0x310701FF,
+ 0x1D90, 0x310801FF,
+ 0x1D90, 0x310901FE,
+ 0x1D90, 0x310A01FD,
+ 0x1D90, 0x310B01FC,
+ 0x1D90, 0x310C01FB,
+ 0x1D90, 0x310D01FA,
+ 0x1D90, 0x310E01F9,
+ 0x1D90, 0x310F01F8,
+ 0x1D90, 0x311001F7,
+ 0x1D90, 0x311101F6,
+ 0x1D90, 0x311201F5,
+ 0x1D90, 0x311301F4,
+ 0x1D90, 0x311401F3,
+ 0x1D90, 0x311501F2,
+ 0x1D90, 0x311601F1,
+ 0x1D90, 0x311701F0,
+ 0x1D90, 0x311801EF,
+ 0x1D90, 0x311901EE,
+ 0x1D90, 0x311A01ED,
+ 0x1D90, 0x311B01EC,
+ 0x1D90, 0x311C01EB,
+ 0x1D90, 0x311D0192,
+ 0x1D90, 0x311E0191,
+ 0x1D90, 0x311F0190,
+ 0x1D90, 0x3120018F,
+ 0x1D90, 0x3121018E,
+ 0x1D90, 0x3122018D,
+ 0x1D90, 0x3123018C,
+ 0x1D90, 0x3124018B,
+ 0x1D90, 0x3125018A,
+ 0x1D90, 0x31260189,
+ 0x1D90, 0x31270188,
+ 0x1D90, 0x31280187,
+ 0x1D90, 0x31290186,
+ 0x1D90, 0x312A0185,
+ 0x1D90, 0x312B0149,
+ 0x1D90, 0x312C0148,
+ 0x1D90, 0x312D0147,
+ 0x1D90, 0x312E0146,
+ 0x1D90, 0x312F0145,
+ 0x1D90, 0x31300144,
+ 0x1D90, 0x31310143,
+ 0x1D90, 0x31320142,
+ 0x1D90, 0x31330141,
+ 0x1D90, 0x31340140,
+ 0x1D90, 0x313500C7,
+ 0x1D90, 0x313600C6,
+ 0x1D90, 0x313700C5,
+ 0x1D90, 0x313800C4,
+ 0x1D90, 0x313900C3,
+ 0x1D90, 0x313A0088,
+ 0x1D90, 0x313B0087,
+ 0x1D90, 0x313C0086,
+ 0x1D90, 0x313D0045,
+ 0x1D90, 0x313E0044,
+ 0x1D90, 0x313F0043,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x314001FF,
+ 0x1D90, 0x314101FF,
+ 0x1D90, 0x314201FF,
+ 0x1D90, 0x314301FF,
+ 0x1D90, 0x314401FF,
+ 0x1D90, 0x314501FF,
+ 0x1D90, 0x314601FF,
+ 0x1D90, 0x314701FE,
+ 0x1D90, 0x314801FD,
+ 0x1D90, 0x314901FC,
+ 0x1D90, 0x314A01FB,
+ 0x1D90, 0x314B01FA,
+ 0x1D90, 0x314C01F9,
+ 0x1D90, 0x314D01F8,
+ 0x1D90, 0x314E01F7,
+ 0x1D90, 0x314F01F6,
+ 0x1D90, 0x315001F5,
+ 0x1D90, 0x315101F4,
+ 0x1D90, 0x315201F3,
+ 0x1D90, 0x315301F2,
+ 0x1D90, 0x315401F1,
+ 0x1D90, 0x315501F0,
+ 0x1D90, 0x315601EF,
+ 0x1D90, 0x315701EE,
+ 0x1D90, 0x315801ED,
+ 0x1D90, 0x315901EC,
+ 0x1D90, 0x315A01EB,
+ 0x1D90, 0x315B01EA,
+ 0x1D90, 0x315C01E9,
+ 0x1D90, 0x315D018F,
+ 0x1D90, 0x315E018E,
+ 0x1D90, 0x315F018D,
+ 0x1D90, 0x3160018C,
+ 0x1D90, 0x3161018B,
+ 0x1D90, 0x3162018A,
+ 0x1D90, 0x31630189,
+ 0x1D90, 0x31640188,
+ 0x1D90, 0x31650187,
+ 0x1D90, 0x31660186,
+ 0x1D90, 0x31670185,
+ 0x1D90, 0x31680184,
+ 0x1D90, 0x31690183,
+ 0x1D90, 0x316A0182,
+ 0x1D90, 0x316B0149,
+ 0x1D90, 0x316C0148,
+ 0x1D90, 0x316D0147,
+ 0x1D90, 0x316E0146,
+ 0x1D90, 0x316F0145,
+ 0x1D90, 0x31700144,
+ 0x1D90, 0x31710143,
+ 0x1D90, 0x31720142,
+ 0x1D90, 0x31730141,
+ 0x1D90, 0x31740140,
+ 0x1D90, 0x317500C7,
+ 0x1D90, 0x317600C6,
+ 0x1D90, 0x317700C5,
+ 0x1D90, 0x317800C4,
+ 0x1D90, 0x317900C3,
+ 0x1D90, 0x317A0088,
+ 0x1D90, 0x317B0087,
+ 0x1D90, 0x317C0086,
+ 0x1D90, 0x317D0045,
+ 0x1D90, 0x317E0044,
+ 0x1D90, 0x317F0043,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x314001FF,
0x1D90, 0x314101FF,
0x1D90, 0x314201FF,
@@ -397,6 +1123,73 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x317D0045,
0x1D90, 0x317E0044,
0x1D90, 0x317F0043,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x314001FF,
+ 0x1D90, 0x314101FF,
+ 0x1D90, 0x314201FF,
+ 0x1D90, 0x314301FF,
+ 0x1D90, 0x314401FF,
+ 0x1D90, 0x314501FF,
+ 0x1D90, 0x314601FF,
+ 0x1D90, 0x314701FE,
+ 0x1D90, 0x314801FD,
+ 0x1D90, 0x314901FC,
+ 0x1D90, 0x314A01FB,
+ 0x1D90, 0x314B01FA,
+ 0x1D90, 0x314C01F9,
+ 0x1D90, 0x314D01F8,
+ 0x1D90, 0x314E01F7,
+ 0x1D90, 0x314F01F6,
+ 0x1D90, 0x315001F5,
+ 0x1D90, 0x315101F4,
+ 0x1D90, 0x315201F3,
+ 0x1D90, 0x315301F2,
+ 0x1D90, 0x315401F1,
+ 0x1D90, 0x315501F0,
+ 0x1D90, 0x315601EF,
+ 0x1D90, 0x315701EE,
+ 0x1D90, 0x315801ED,
+ 0x1D90, 0x315901EC,
+ 0x1D90, 0x315A01EB,
+ 0x1D90, 0x315B01EA,
+ 0x1D90, 0x315C01E9,
+ 0x1D90, 0x315D018F,
+ 0x1D90, 0x315E018E,
+ 0x1D90, 0x315F018D,
+ 0x1D90, 0x3160018C,
+ 0x1D90, 0x3161018B,
+ 0x1D90, 0x3162018A,
+ 0x1D90, 0x31630189,
+ 0x1D90, 0x31640188,
+ 0x1D90, 0x31650187,
+ 0x1D90, 0x31660186,
+ 0x1D90, 0x31670185,
+ 0x1D90, 0x31680184,
+ 0x1D90, 0x31690183,
+ 0x1D90, 0x316A0182,
+ 0x1D90, 0x316B0149,
+ 0x1D90, 0x316C0148,
+ 0x1D90, 0x316D0147,
+ 0x1D90, 0x316E0146,
+ 0x1D90, 0x316F0145,
+ 0x1D90, 0x31700144,
+ 0x1D90, 0x31710143,
+ 0x1D90, 0x31720142,
+ 0x1D90, 0x31730141,
+ 0x1D90, 0x31740140,
+ 0x1D90, 0x317500C7,
+ 0x1D90, 0x317600C6,
+ 0x1D90, 0x317700C5,
+ 0x1D90, 0x317800C4,
+ 0x1D90, 0x317900C3,
+ 0x1D90, 0x317A0088,
+ 0x1D90, 0x317B0087,
+ 0x1D90, 0x317C0086,
+ 0x1D90, 0x317D0045,
+ 0x1D90, 0x317E0044,
+ 0x1D90, 0x317F0043,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
0x1D90, 0x318001FE,
0x1D90, 0x318101FD,
0x1D90, 0x318201FC,
@@ -461,8 +1254,147 @@ static const u32 rtw8822c_agc[] = {
0x1D90, 0x31BD0003,
0x1D90, 0x31BE0002,
0x1D90, 0x31BF0001,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D90, 0x318001FE,
+ 0x1D90, 0x318101FD,
+ 0x1D90, 0x318201FC,
+ 0x1D90, 0x318301FB,
+ 0x1D90, 0x318401FA,
+ 0x1D90, 0x318501F9,
+ 0x1D90, 0x318601F8,
+ 0x1D90, 0x318701F7,
+ 0x1D90, 0x318801F6,
+ 0x1D90, 0x318901F5,
+ 0x1D90, 0x318A01F4,
+ 0x1D90, 0x318B01F3,
+ 0x1D90, 0x318C01F2,
+ 0x1D90, 0x318D01F1,
+ 0x1D90, 0x318E01F0,
+ 0x1D90, 0x318F01EF,
+ 0x1D90, 0x319001EE,
+ 0x1D90, 0x319101ED,
+ 0x1D90, 0x319201EC,
+ 0x1D90, 0x319301EB,
+ 0x1D90, 0x319401EA,
+ 0x1D90, 0x319501E9,
+ 0x1D90, 0x319601E7,
+ 0x1D90, 0x319701E6,
+ 0x1D90, 0x319801E5,
+ 0x1D90, 0x319901E4,
+ 0x1D90, 0x319A01A8,
+ 0x1D90, 0x319B01A7,
+ 0x1D90, 0x319C01A6,
+ 0x1D90, 0x319D01A5,
+ 0x1D90, 0x319E0185,
+ 0x1D90, 0x319F0184,
+ 0x1D90, 0x31A00183,
+ 0x1D90, 0x31A10182,
+ 0x1D90, 0x31A20149,
+ 0x1D90, 0x31A30148,
+ 0x1D90, 0x31A40147,
+ 0x1D90, 0x31A50145,
+ 0x1D90, 0x31A60144,
+ 0x1D90, 0x31A70143,
+ 0x1D90, 0x31A80142,
+ 0x1D90, 0x31A900E6,
+ 0x1D90, 0x31AA00E5,
+ 0x1D90, 0x31AB00C9,
+ 0x1D90, 0x31AC00C8,
+ 0x1D90, 0x31AD00C7,
+ 0x1D90, 0x31AE00C6,
+ 0x1D90, 0x31AF00C5,
+ 0x1D90, 0x31B000C4,
+ 0x1D90, 0x31B100C3,
+ 0x1D90, 0x31B20088,
+ 0x1D90, 0x31B30087,
+ 0x1D90, 0x31B40086,
+ 0x1D90, 0x31B50085,
+ 0x1D90, 0x31B60026,
+ 0x1D90, 0x31B70025,
+ 0x1D90, 0x31B80024,
+ 0x1D90, 0x31B90023,
+ 0x1D90, 0x31BA0022,
+ 0x1D90, 0x31BB0021,
+ 0x1D90, 0x31BC0020,
+ 0x1D90, 0x31BD0003,
+ 0x1D90, 0x31BE0002,
+ 0x1D90, 0x31BF0001,
+ 0xA0000000, 0x00000000,
+ 0x1D90, 0x318001FE,
+ 0x1D90, 0x318101FD,
+ 0x1D90, 0x318201FC,
+ 0x1D90, 0x318301FB,
+ 0x1D90, 0x318401FA,
+ 0x1D90, 0x318501F9,
+ 0x1D90, 0x318601F8,
+ 0x1D90, 0x318701F7,
+ 0x1D90, 0x318801F6,
+ 0x1D90, 0x318901F5,
+ 0x1D90, 0x318A01F4,
+ 0x1D90, 0x318B01F3,
+ 0x1D90, 0x318C01F2,
+ 0x1D90, 0x318D01F1,
+ 0x1D90, 0x318E01F0,
+ 0x1D90, 0x318F01EF,
+ 0x1D90, 0x319001EE,
+ 0x1D90, 0x319101ED,
+ 0x1D90, 0x319201EC,
+ 0x1D90, 0x319301EB,
+ 0x1D90, 0x319401EA,
+ 0x1D90, 0x319501E9,
+ 0x1D90, 0x319601E7,
+ 0x1D90, 0x319701E6,
+ 0x1D90, 0x319801E5,
+ 0x1D90, 0x319901E4,
+ 0x1D90, 0x319A01A8,
+ 0x1D90, 0x319B01A7,
+ 0x1D90, 0x319C01A6,
+ 0x1D90, 0x319D01A5,
+ 0x1D90, 0x319E0185,
+ 0x1D90, 0x319F0184,
+ 0x1D90, 0x31A00183,
+ 0x1D90, 0x31A10182,
+ 0x1D90, 0x31A20149,
+ 0x1D90, 0x31A30148,
+ 0x1D90, 0x31A40147,
+ 0x1D90, 0x31A50145,
+ 0x1D90, 0x31A60144,
+ 0x1D90, 0x31A70143,
+ 0x1D90, 0x31A80142,
+ 0x1D90, 0x31A900E6,
+ 0x1D90, 0x31AA00E5,
+ 0x1D90, 0x31AB00C9,
+ 0x1D90, 0x31AC00C8,
+ 0x1D90, 0x31AD00C7,
+ 0x1D90, 0x31AE00C6,
+ 0x1D90, 0x31AF00C5,
+ 0x1D90, 0x31B000C4,
+ 0x1D90, 0x31B100C3,
+ 0x1D90, 0x31B20088,
+ 0x1D90, 0x31B30087,
+ 0x1D90, 0x31B40086,
+ 0x1D90, 0x31B50085,
+ 0x1D90, 0x31B60026,
+ 0x1D90, 0x31B70025,
+ 0x1D90, 0x31B80024,
+ 0x1D90, 0x31B90023,
+ 0x1D90, 0x31BA0022,
+ 0x1D90, 0x31BB0021,
+ 0x1D90, 0x31BC0020,
+ 0x1D90, 0x31BD0003,
+ 0x1D90, 0x31BE0002,
+ 0x1D90, 0x31BF0001,
+ 0xB0000000, 0x00000000,
+ 0x80000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D70, 0x22222222,
+ 0x1D70, 0x20202020,
+ 0x90000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1D70, 0x22222222,
+ 0x1D70, 0x20202020,
+ 0xA0000000, 0x00000000,
0x1D70, 0x22222222,
0x1D70, 0x20202020,
+ 0xB0000000, 0x00000000,
};
RTW_DECL_TABLE_PHY_COND(rtw8822c_agc, rtw_phy_cfg_agc);
@@ -732,7 +1664,7 @@ static const u32 rtw8822c_bb[] = {
0xC18, 0x00087672,
0xC1C, 0x15260000,
0xC20, 0x00000000,
- 0xC24, 0x40600000,
+ 0xC24, 0x406000FF,
0xC28, 0x06400F76,
0xC2C, 0xE30020E1,
0xC30, 0x140C9494,
@@ -861,9 +1793,29 @@ static const u32 rtw8822c_bb[] = {
0x1828, 0x000004FD,
0x182C, 0x00000000,
0x1834, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1838, 0x20100000,
+ 0xA0000000, 0x00000000,
0x1838, 0x20000000,
+ 0xB0000000, 0x00000000,
0x183C, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1840, 0x00002300,
+ 0xA0000000, 0x00000000,
0x1840, 0x00000000,
+ 0xB0000000, 0x00000000,
0x1844, 0x00000000,
0x1848, 0x00000000,
0x184C, 0x00000000,
@@ -874,13 +1826,33 @@ static const u32 rtw8822c_bb[] = {
0x1860, 0xF0040FF8,
0x1864, 0x7F000000,
0x1868, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x186C, 0x0000FF02,
+ 0xA0000000, 0x00000000,
0x186C, 0x0000FF00,
+ 0xB0000000, 0x00000000,
0x1870, 0x00000000,
0x1874, 0x00000000,
0x1878, 0x00000000,
0x187C, 0x00000000,
0x1880, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1884, 0x03B00000,
+ 0xA0000000, 0x00000000,
0x1884, 0x02B00000,
+ 0xB0000000, 0x00000000,
0x1888, 0x00000000,
0x188C, 0x00000000,
0x1890, 0x00000000,
@@ -999,7 +1971,7 @@ static const u32 rtw8822c_bb[] = {
0x1C58, 0x00000000,
0x1C5C, 0xFFFFFFFF,
0x1C60, 0x0F030032,
- 0x1C64, 0x360F0000,
+ 0x1C64, 0x360F0008,
0x1C68, 0x007F0000,
0x1C6C, 0x00010000,
0x1C70, 0x00037FFE,
@@ -1010,8 +1982,22 @@ static const u32 rtw8822c_bb[] = {
0x1C84, 0x245120D4,
0x1C88, 0xC8400483,
0x1C8C, 0x40005A20,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x1C94, 0x00000B0E,
+ 0x1C98, 0x00450000,
+ 0xA0000000, 0x00000000,
0x1C94, 0x00000000,
0x1C98, 0x00000000,
+ 0xB0000000, 0x00000000,
0x1C9C, 0x00000000,
0x1CA0, 0x00000000,
0x1CA4, 0x20000000,
@@ -1125,7 +2111,7 @@ static const u32 rtw8822c_bb[] = {
0x1E60, 0x00000000,
0x1E64, 0xF3A00001,
0x1E68, 0x0028846E,
- 0x1E6C, 0x40374906,
+ 0x1E6C, 0x40274906,
0x1E70, 0x00001000,
0x1E74, 0x00000000,
0x1E78, 0x00000000,
@@ -1344,10 +2330,30 @@ static const u32 rtw8822c_bb[] = {
0x4128, 0x000004FD,
0x412C, 0x00000000,
0x4134, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4138, 0x20100000,
+ 0xA0000000, 0x00000000,
0x4138, 0x20000000,
+ 0xB0000000, 0x00000000,
0x413C, 0x00000000,
0x4140, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4144, 0x00002030,
+ 0xA0000000, 0x00000000,
0x4144, 0x00000000,
+ 0xB0000000, 0x00000000,
0x4148, 0x00000000,
0x414C, 0x00000000,
0x4150, 0x00000000,
@@ -1357,13 +2363,33 @@ static const u32 rtw8822c_bb[] = {
0x4160, 0xF0040FF8,
0x4164, 0x7F000000,
0x4168, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x416C, 0x00008002,
+ 0xA0000000, 0x00000000,
0x416C, 0x00008000,
+ 0xB0000000, 0x00000000,
0x4170, 0x00000000,
0x4174, 0x00000000,
0x4178, 0x00000000,
0x417C, 0x00000000,
0x4180, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x4184, 0x03B00000,
+ 0xA0000000, 0x00000000,
0x4184, 0x02B00000,
+ 0xB0000000, 0x00000000,
0x4188, 0x00000000,
0x418C, 0x00000000,
0x4190, 0x00000000,
@@ -1483,7 +2509,7 @@ static const u32 rtw8822c_bb[] = {
0x1AC4, 0x00000000,
0x1AC8, 0x00000807,
0x1ACC, 0x00000707,
- 0x1AD0, 0xA33529AD,
+ 0x1AD0, 0xA33529CE,
0x1AD4, 0x0D8D8452,
0x1AD8, 0x08024024,
0x1ADC, 0x000D0001,
@@ -1757,56 +2783,55 @@ static const u32 rtw8822c_bb[] = {
0x1D94, 0x40FF0000,
0xC0C, 0x02F1D8B7,
0x1EE8, 0x00000000,
-
};
RTW_DECL_TABLE_PHY_COND(rtw8822c_bb, rtw_phy_cfg_bb);
static const struct rtw_phy_pg_cfg_pair rtw8822c_bb_pg_type0[] = {
{ 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054, },
- { 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585858, },
{ 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
- { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x50545858, },
{ 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
- { 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x50545858, },
{ 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
- { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x50545858, },
{ 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
- { 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x5858383c, },
{ 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
{ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
{ 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054, },
- { 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585858, },
{ 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
- { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x50545858, },
{ 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
- { 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x50545858, },
{ 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
- { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x50545858, },
{ 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
- { 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x5858383c, },
{ 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
{ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
- { 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585858, },
{ 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50, },
- { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x50545858, },
{ 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c, },
- { 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x50545858, },
{ 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c, },
- { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x50545858, },
{ 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c, },
- { 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x5858383c, },
{ 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054, },
{ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044, },
- { 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585858, },
{ 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50, },
- { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x50545858, },
{ 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c, },
- { 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x50545858, },
{ 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c, },
- { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x50545858, },
{ 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c, },
- { 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x5858383c, },
{ 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054, },
{ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044, },
};
@@ -1834,6 +2859,26 @@ static const u32 rtw8822c_rf_a[] = {
0x08E, 0x000A5540,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0xA0000000, 0x00000000,
0x08E, 0x000A5540,
0xB0000000, 0x00000000,
@@ -1856,6 +2901,26 @@ static const u32 rtw8822c_rf_a[] = {
0x085, 0x0006A06C,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x085, 0x0006A06C,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
0xA0000000, 0x00000000,
0x085, 0x0006A06C,
0xB0000000, 0x00000000,
@@ -1931,6 +2996,96 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0xA0000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -2149,6 +3304,266 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -2378,6 +3793,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773E8,
@@ -2606,6 +4271,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773E8,
@@ -2834,6 +4749,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773E8,
@@ -3062,6 +5227,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773E8,
@@ -3290,6 +5705,256 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773E8,
@@ -3334,6 +5999,26 @@ static const u32 rtw8822c_rf_a[] = {
0x0EF, 0x00000000,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0xA0000000, 0x00000000,
0x0EF, 0x00000000,
0xB0000000, 0x00000000,
@@ -3378,8 +6063,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -3498,7 +6330,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -3525,8 +6357,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -3645,7 +6624,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -3672,8 +6651,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -3792,7 +6918,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -3819,8 +6945,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -3939,7 +7212,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -3966,8 +7239,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -4086,7 +7506,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -4113,8 +7533,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -4233,7 +7800,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -4260,8 +7827,155 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -4380,7 +8094,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -4407,8 +8121,449 @@ static const u32 rtw8822c_rf_a[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -4695,6 +8850,26 @@ static const u32 rtw8822c_rf_a[] = {
0x063, 0x00000002,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0xA0000000, 0x00000000,
0x063, 0x00000C02,
0xB0000000, 0x00000000,
@@ -4915,6 +9090,276 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00017239,
0x030, 0x00018209,
0x030, 0x00019239,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
0xA0000000, 0x00000000,
0x030, 0x00000233,
0x030, 0x00001233,
@@ -5049,6 +9494,136 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0xA0000000, 0x00000000,
0x030, 0x00000232,
0x030, 0x00001232,
@@ -5076,6 +9651,99 @@ static const u32 rtw8822c_rf_a[] = {
0x030, 0x0000C330,
0x0EF, 0x00000000,
0x0EE, 0x00010000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0xA0000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x0000006A,
0x033, 0x00000201,
@@ -5098,6 +9766,100 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000CF4,
0x033, 0x0000020A,
0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0xA0000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x0000006A,
0x033, 0x00000281,
@@ -5120,6 +9882,104 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000CF4,
0x033, 0x0000028A,
0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0xA0000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x0000006A,
0x033, 0x00000301,
@@ -5143,6 +10003,7 @@ static const u32 rtw8822c_rf_a[] = {
0x033, 0x0000030A,
0x03F, 0x00000CF7,
0x0EE, 0x00000000,
+ 0xB0000000, 0x00000000,
0x051, 0x0003C800,
0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
@@ -5160,6 +10021,26 @@ static const u32 rtw8822c_rf_a[] = {
0x052, 0x000902CA,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0xA0000000, 0x00000000,
0x052, 0x000942CA,
0xB0000000, 0x00000000,
@@ -5185,6 +10066,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5206,6 +10107,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5227,6 +10148,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5248,6 +10189,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5269,6 +10230,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5290,6 +10271,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5311,6 +10312,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5332,6 +10353,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5353,6 +10394,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5374,6 +10435,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5395,6 +10476,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5416,6 +10517,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5437,6 +10558,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5458,6 +10599,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00028246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00028246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00028246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5479,6 +10640,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00030246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00030246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5493,13 +10674,33 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5514,13 +10715,33 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00028246,
+ 0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5535,13 +10756,33 @@ static const u32 rtw8822c_rf_a[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0000EA46,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x00031E46,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x00031E46,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x00030246,
+ 0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5563,6 +10804,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5584,6 +10845,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5605,6 +10886,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5626,6 +10927,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5647,6 +10968,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5668,6 +11009,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5689,6 +11050,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5710,6 +11091,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5731,6 +11132,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5752,6 +11173,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5773,6 +11214,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5794,6 +11255,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5815,6 +11296,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5836,6 +11337,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5857,6 +11378,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5878,6 +11419,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5899,6 +11460,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5920,6 +11501,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5941,6 +11542,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5962,6 +11583,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -5983,6 +11624,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6004,6 +11665,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6025,6 +11706,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00025E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00025E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00025E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6046,6 +11747,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00031E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00031E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6067,6 +11788,26 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00021E46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00021E46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
0xA0000000, 0x00000000,
0x03F, 0x00002A46,
0xB0000000, 0x00000000,
@@ -6256,6 +11997,236 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000487,
@@ -6464,6 +12435,236 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000487,
@@ -6489,7 +12690,7 @@ static const u32 rtw8822c_rf_a[] = {
0x03F, 0x00000DF7,
0xB0000000, 0x00000000,
0x0EE, 0x00000000,
- 0x05C, 0x000FCC00,
+ 0x05C, 0x000FC000,
0x067, 0x0000A505,
0x0D3, 0x00000542,
0x043, 0x00005000,
@@ -6513,6 +12714,26 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0xA0000000, 0x00000000,
0x0B3, 0x0007C760,
0xB0000000, 0x00000000,
@@ -6522,6 +12743,18 @@ static const u32 rtw8822c_rf_a[] = {
0x0B6, 0x000387F8,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0B6, 0x000387F8,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B6, 0x000387F8,
0xA0000000, 0x00000000,
0x0B6, 0x000187F8,
0xB0000000, 0x00000000,
@@ -6552,12 +12785,33 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0xA0000000, 0x00000000,
0x0B3, 0x0007C700,
0xB0000000, 0x00000000,
0x018, 0x0001B124,
0xFFE, 0x00000000,
0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x0007C760,
0x91000002, 0x00000000, 0x40000000, 0x00000000,
@@ -6574,6 +12828,26 @@ static const u32 rtw8822c_rf_a[] = {
0x0B3, 0x000FC760,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0B3, 0x000FC760,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
0xA0000000, 0x00000000,
0x0B3, 0x0007C760,
0xB0000000, 0x00000000,
@@ -6605,6 +12879,26 @@ static const u32 rtw8822c_rf_a[] = {
0x0DD, 0x00000540,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0DD, 0x00000540,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DD, 0x00000540,
0xA0000000, 0x00000000,
0x0DD, 0x00000500,
0xB0000000, 0x00000000,
@@ -6714,7 +13008,37 @@ static const u32 rtw8822c_rf_b[] = {
0x093, 0x0008483F,
0x0EF, 0x00080000,
0x033, 0x00000001,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0009123E,
+ 0xA0000000, 0x00000000,
0x03F, 0x00091230,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0DE, 0x00000020,
0x81000001, 0x00000000, 0x40000000, 0x00000000,
@@ -6733,6 +13057,26 @@ static const u32 rtw8822c_rf_b[] = {
0x08E, 0x000A5540,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x08E, 0x000A5540,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
0xA0000000, 0x00000000,
0x08E, 0x000A5540,
0xB0000000, 0x00000000,
@@ -6812,6 +13156,96 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000002,
0x03F, 0x0000002A,
0x0EE, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
0xA0000000, 0x00000000,
0x0EE, 0x00000010,
0x033, 0x00000001,
@@ -7030,6 +13464,266 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000004,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x0EF, 0x00010000,
0x033, 0x0000000F,
@@ -7259,6 +13953,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000014,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000001F,
0x03F, 0x000773E8,
@@ -7487,6 +14431,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000024,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000002F,
0x03F, 0x000773E8,
@@ -7715,6 +14909,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000034,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000003F,
0x03F, 0x000773E8,
@@ -7943,6 +15387,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000044,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000004F,
0x03F, 0x000773E8,
@@ -8171,6 +15865,256 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000180,
0x033, 0x00000054,
0x03F, 0x00000040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000287,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000207,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
0xA0000000, 0x00000000,
0x033, 0x0000005F,
0x03F, 0x000773E8,
@@ -8215,6 +16159,26 @@ static const u32 rtw8822c_rf_b[] = {
0x0EF, 0x00000000,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
0xA0000000, 0x00000000,
0x0EF, 0x00000000,
0xB0000000, 0x00000000,
@@ -8257,10 +16221,10 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
- 0x03F, 0x0002F81C,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
- 0x03E, 0x00001C86,
- 0x03F, 0x00020000,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8404,10 +16368,157 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
- 0x03F, 0x0002F81C,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8526,7 +16637,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -8551,10 +16662,304 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8673,7 +17078,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -8698,10 +17103,304 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8820,7 +17519,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -8847,8 +17546,155 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -8967,7 +17813,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -8992,10 +17838,157 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
- 0x03F, 0x0002F81C,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -9114,7 +18107,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -9139,10 +18132,304 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00020000,
0x033, 0x00000007,
0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -9261,7 +18548,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002C010,
0x0EF, 0x00000000,
- 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
0x0EF, 0x00020000,
0x033, 0x00000000,
0x03E, 0x00001C86,
@@ -9288,8 +18575,155 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000000,
0x03F, 0x0002F81C,
0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
0x03E, 0x00001C86,
0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
0x033, 0x00000009,
0x03E, 0x00001C02,
0x03F, 0x00020000,
@@ -9576,6 +19010,26 @@ static const u32 rtw8822c_rf_b[] = {
0x063, 0x00000002,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x00000002,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
0xA0000000, 0x00000000,
0x063, 0x00000C02,
0xB0000000, 0x00000000,
@@ -9796,6 +19250,276 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00017239,
0x030, 0x00018209,
0x030, 0x00019239,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000238,
+ 0x030, 0x00001238,
+ 0x030, 0x00002238,
+ 0x030, 0x00003238,
+ 0x030, 0x00004228,
+ 0x030, 0x00005238,
+ 0x030, 0x00006238,
+ 0x030, 0x00007238,
+ 0x030, 0x00008228,
+ 0x030, 0x00009238,
+ 0x030, 0x0000A238,
+ 0x030, 0x0000B238,
+ 0x030, 0x0000C238,
+ 0x030, 0x0000D238,
+ 0x030, 0x0000E228,
+ 0x030, 0x0000F238,
+ 0x030, 0x00010238,
+ 0x030, 0x00011238,
+ 0x030, 0x00012228,
+ 0x030, 0x00013238,
+ 0x030, 0x00014238,
+ 0x030, 0x00015238,
+ 0x030, 0x00016228,
+ 0x030, 0x00017238,
+ 0x030, 0x00018228,
+ 0x030, 0x00019238,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000239,
+ 0x030, 0x00001239,
+ 0x030, 0x00002239,
+ 0x030, 0x00003239,
+ 0x030, 0x00004239,
+ 0x030, 0x00005239,
+ 0x030, 0x00006239,
+ 0x030, 0x00007239,
+ 0x030, 0x00008239,
+ 0x030, 0x00009239,
+ 0x030, 0x0000A239,
+ 0x030, 0x0000B239,
+ 0x030, 0x0000C239,
+ 0x030, 0x0000D239,
+ 0x030, 0x0000E209,
+ 0x030, 0x0000F239,
+ 0x030, 0x00010239,
+ 0x030, 0x00011239,
+ 0x030, 0x00012209,
+ 0x030, 0x00013239,
+ 0x030, 0x00014239,
+ 0x030, 0x00015239,
+ 0x030, 0x00016209,
+ 0x030, 0x00017239,
+ 0x030, 0x00018209,
+ 0x030, 0x00019239,
0xA0000000, 0x00000000,
0x030, 0x00000233,
0x030, 0x00001233,
@@ -9930,6 +19654,136 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x00009334,
0x030, 0x0000A334,
0x030, 0x0000B334,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
0xA0000000, 0x00000000,
0x030, 0x00000232,
0x030, 0x00001232,
@@ -9957,6 +19811,99 @@ static const u32 rtw8822c_rf_b[] = {
0x030, 0x0000C330,
0x0EF, 0x00000000,
0x0EE, 0x00010000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000200,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000201,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000204,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000205,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000206,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000207,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000077,
+ 0xA0000000, 0x00000000,
0x033, 0x00000200,
0x03F, 0x0000006A,
0x033, 0x00000201,
@@ -9979,6 +19926,100 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000CF4,
0x033, 0x0000020A,
0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000280,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000281,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000284,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000285,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000286,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000287,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000077,
+ 0xA0000000, 0x00000000,
0x033, 0x00000280,
0x03F, 0x0000006A,
0x033, 0x00000281,
@@ -10001,6 +20042,104 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000CF4,
0x033, 0x0000028A,
0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000300,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000301,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000304,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000305,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000034,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000053,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000056,
+ 0x033, 0x0000030A,
+ 0x03F, 0x000000D1,
+ 0x0EE, 0x00000000,
+ 0xA0000000, 0x00000000,
0x033, 0x00000300,
0x03F, 0x0000006A,
0x033, 0x00000301,
@@ -10024,6 +20163,7 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x0000030A,
0x03F, 0x00000CF7,
0x0EE, 0x00000000,
+ 0xB0000000, 0x00000000,
0x051, 0x0003C800,
0x81000001, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
@@ -10041,6 +20181,26 @@ static const u32 rtw8822c_rf_b[] = {
0x052, 0x000902CA,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x052, 0x000902CA,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
0xA0000000, 0x00000000,
0x052, 0x000942C0,
0xB0000000, 0x00000000,
@@ -10057,6 +20217,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10076,6 +20256,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10088,6 +20288,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10107,6 +20327,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10128,6 +20368,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10140,6 +20400,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10159,6 +20439,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10171,6 +20471,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10190,6 +20510,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10211,6 +20551,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10223,6 +20583,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10242,6 +20622,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10254,6 +20654,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10273,6 +20693,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10294,6 +20734,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x0000C246,
0xB0000000, 0x00000000,
@@ -10306,6 +20766,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10325,6 +20805,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10337,6 +20837,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10356,6 +20876,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10377,6 +20917,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10389,6 +20949,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10408,6 +20988,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10420,6 +21020,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10439,6 +21059,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000241C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000241C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000241C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10460,6 +21100,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002C246,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002C246,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10472,6 +21132,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10484,13 +21164,33 @@ static const u32 rtw8822c_rf_b[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10503,6 +21203,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10515,13 +21235,33 @@ static const u32 rtw8822c_rf_b[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x000241C6,
+ 0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10536,13 +21276,33 @@ static const u32 rtw8822c_rf_b[] = {
0x92000002, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x00024246,
0x93000001, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x0002C246,
+ 0x03F, 0x0002CA46,
0x93000002, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x0002C246,
+ 0x03F, 0x0002CA46,
0x93000003, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x0002C246,
+ 0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
- 0x03F, 0x0002C246,
+ 0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10555,6 +21315,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10574,6 +21354,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10586,6 +21386,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10605,6 +21425,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10626,6 +21466,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10638,6 +21498,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10657,6 +21537,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10669,6 +21569,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10688,6 +21608,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10709,6 +21649,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10721,6 +21681,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10740,6 +21720,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10752,6 +21752,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10771,6 +21791,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10792,6 +21832,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10804,6 +21864,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10823,6 +21903,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10835,6 +21935,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10854,6 +21974,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10875,6 +22015,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10887,6 +22047,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10906,6 +22086,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10918,6 +22118,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10937,6 +22157,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10958,6 +22198,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -10970,6 +22230,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -10989,6 +22269,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11001,6 +22301,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11020,6 +22340,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11032,6 +22372,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000020,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000020,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000020,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11051,6 +22411,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11063,6 +22443,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11082,6 +22482,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11094,6 +22514,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11113,6 +22553,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11134,6 +22594,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11146,6 +22626,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11165,6 +22665,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11177,6 +22697,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03E, 0x00000030,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03E, 0x00000030,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00000030,
0xA0000000, 0x00000000,
0x03E, 0x00000020,
0xB0000000, 0x00000000,
@@ -11196,6 +22736,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x000209C6,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x000209C6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000209C6,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11217,6 +22777,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0002CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0002CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11238,6 +22818,26 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x0001CA46,
0x93000004, 0x00000000, 0x40000000, 0x00000000,
0x03F, 0x0001CA46,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
0xA0000000, 0x00000000,
0x03F, 0x00008E46,
0xB0000000, 0x00000000,
@@ -11427,6 +23027,236 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000006A,
0x03F, 0x00000DF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000060,
0x03F, 0x00000487,
@@ -11635,6 +23465,236 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF4,
0x033, 0x0000002A,
0x03F, 0x00000DF7,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000015, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x94000016, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000467,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000867,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000908,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D09,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D49,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
0xA0000000, 0x00000000,
0x033, 0x00000020,
0x03F, 0x00000487,
@@ -11660,7 +23720,7 @@ static const u32 rtw8822c_rf_b[] = {
0x03F, 0x00000DF7,
0xB0000000, 0x00000000,
0x0EE, 0x00000000,
- 0x05C, 0x000FCC00,
+ 0x05C, 0x000FC000,
0x067, 0x0000A505,
0x0D3, 0x00000542,
0x043, 0x00005000,
@@ -11710,6 +23770,10 @@ static const u32 rtw8822c_rf_b[] = {
0x033, 0x00000007,
0x03F, 0x00000002,
0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000001,
+ 0x03F, 0x000916BF,
+ 0x0EF, 0x00000000,
};
RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B);
@@ -11717,394 +23781,1961 @@ RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B);
static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 0, 0, 0, 0, 1, 72, },
{ 2, 0, 0, 0, 1, 60, },
+ { 1, 0, 0, 0, 1, 68, },
+ { 3, 0, 0, 0, 1, 72, },
+ { 4, 0, 0, 0, 1, 76, },
+ { 5, 0, 0, 0, 1, 60, },
+ { 6, 0, 0, 0, 1, 72, },
+ { 7, 0, 0, 0, 1, 60, },
+ { 8, 0, 0, 0, 1, 72, },
+ { 9, 0, 0, 0, 1, 60, },
{ 0, 0, 0, 0, 2, 72, },
{ 2, 0, 0, 0, 2, 60, },
+ { 1, 0, 0, 0, 2, 68, },
+ { 3, 0, 0, 0, 2, 72, },
+ { 4, 0, 0, 0, 2, 76, },
+ { 5, 0, 0, 0, 2, 60, },
+ { 6, 0, 0, 0, 2, 72, },
+ { 7, 0, 0, 0, 2, 60, },
+ { 8, 0, 0, 0, 2, 72, },
+ { 9, 0, 0, 0, 2, 60, },
{ 0, 0, 0, 0, 3, 76, },
{ 2, 0, 0, 0, 3, 60, },
+ { 1, 0, 0, 0, 3, 68, },
+ { 3, 0, 0, 0, 3, 76, },
+ { 4, 0, 0, 0, 3, 76, },
+ { 5, 0, 0, 0, 3, 60, },
+ { 6, 0, 0, 0, 3, 76, },
+ { 7, 0, 0, 0, 3, 60, },
+ { 8, 0, 0, 0, 3, 76, },
+ { 9, 0, 0, 0, 3, 60, },
{ 0, 0, 0, 0, 4, 76, },
{ 2, 0, 0, 0, 4, 60, },
+ { 1, 0, 0, 0, 4, 68, },
+ { 3, 0, 0, 0, 4, 76, },
+ { 4, 0, 0, 0, 4, 76, },
+ { 5, 0, 0, 0, 4, 60, },
+ { 6, 0, 0, 0, 4, 76, },
+ { 7, 0, 0, 0, 4, 60, },
+ { 8, 0, 0, 0, 4, 76, },
+ { 9, 0, 0, 0, 4, 60, },
{ 0, 0, 0, 0, 5, 76, },
{ 2, 0, 0, 0, 5, 60, },
+ { 1, 0, 0, 0, 5, 68, },
+ { 3, 0, 0, 0, 5, 76, },
+ { 4, 0, 0, 0, 5, 76, },
+ { 5, 0, 0, 0, 5, 60, },
+ { 6, 0, 0, 0, 5, 76, },
+ { 7, 0, 0, 0, 5, 60, },
+ { 8, 0, 0, 0, 5, 76, },
+ { 9, 0, 0, 0, 5, 60, },
{ 0, 0, 0, 0, 6, 76, },
{ 2, 0, 0, 0, 6, 60, },
+ { 1, 0, 0, 0, 6, 68, },
+ { 3, 0, 0, 0, 6, 76, },
+ { 4, 0, 0, 0, 6, 76, },
+ { 5, 0, 0, 0, 6, 60, },
+ { 6, 0, 0, 0, 6, 76, },
+ { 7, 0, 0, 0, 6, 60, },
+ { 8, 0, 0, 0, 6, 76, },
+ { 9, 0, 0, 0, 6, 60, },
{ 0, 0, 0, 0, 7, 76, },
{ 2, 0, 0, 0, 7, 60, },
+ { 1, 0, 0, 0, 7, 68, },
+ { 3, 0, 0, 0, 7, 76, },
+ { 4, 0, 0, 0, 7, 76, },
+ { 5, 0, 0, 0, 7, 60, },
+ { 6, 0, 0, 0, 7, 76, },
+ { 7, 0, 0, 0, 7, 60, },
+ { 8, 0, 0, 0, 7, 76, },
+ { 9, 0, 0, 0, 7, 60, },
{ 0, 0, 0, 0, 8, 76, },
{ 2, 0, 0, 0, 8, 60, },
+ { 1, 0, 0, 0, 8, 68, },
+ { 3, 0, 0, 0, 8, 76, },
+ { 4, 0, 0, 0, 8, 76, },
+ { 5, 0, 0, 0, 8, 60, },
+ { 6, 0, 0, 0, 8, 76, },
+ { 7, 0, 0, 0, 8, 60, },
+ { 8, 0, 0, 0, 8, 76, },
+ { 9, 0, 0, 0, 8, 60, },
{ 0, 0, 0, 0, 9, 76, },
{ 2, 0, 0, 0, 9, 60, },
+ { 1, 0, 0, 0, 9, 68, },
+ { 3, 0, 0, 0, 9, 76, },
+ { 4, 0, 0, 0, 9, 76, },
+ { 5, 0, 0, 0, 9, 60, },
+ { 6, 0, 0, 0, 9, 76, },
+ { 7, 0, 0, 0, 9, 60, },
+ { 8, 0, 0, 0, 9, 76, },
+ { 9, 0, 0, 0, 9, 60, },
{ 0, 0, 0, 0, 10, 72, },
{ 2, 0, 0, 0, 10, 60, },
+ { 1, 0, 0, 0, 10, 68, },
+ { 3, 0, 0, 0, 10, 72, },
+ { 4, 0, 0, 0, 10, 76, },
+ { 5, 0, 0, 0, 10, 60, },
+ { 6, 0, 0, 0, 10, 72, },
+ { 7, 0, 0, 0, 10, 60, },
+ { 8, 0, 0, 0, 10, 72, },
+ { 9, 0, 0, 0, 10, 60, },
{ 0, 0, 0, 0, 11, 72, },
{ 2, 0, 0, 0, 11, 60, },
+ { 1, 0, 0, 0, 11, 68, },
+ { 3, 0, 0, 0, 11, 72, },
+ { 4, 0, 0, 0, 11, 76, },
+ { 5, 0, 0, 0, 11, 60, },
+ { 6, 0, 0, 0, 11, 72, },
+ { 7, 0, 0, 0, 11, 60, },
+ { 8, 0, 0, 0, 11, 72, },
+ { 9, 0, 0, 0, 11, 60, },
{ 0, 0, 0, 0, 12, 52, },
{ 2, 0, 0, 0, 12, 60, },
+ { 1, 0, 0, 0, 12, 68, },
+ { 3, 0, 0, 0, 12, 52, },
+ { 4, 0, 0, 0, 12, 76, },
+ { 5, 0, 0, 0, 12, 60, },
+ { 6, 0, 0, 0, 12, 52, },
+ { 7, 0, 0, 0, 12, 60, },
+ { 8, 0, 0, 0, 12, 52, },
+ { 9, 0, 0, 0, 12, 60, },
{ 0, 0, 0, 0, 13, 48, },
{ 2, 0, 0, 0, 13, 60, },
+ { 1, 0, 0, 0, 13, 68, },
+ { 3, 0, 0, 0, 13, 48, },
+ { 4, 0, 0, 0, 13, 76, },
+ { 5, 0, 0, 0, 13, 60, },
+ { 6, 0, 0, 0, 13, 48, },
+ { 7, 0, 0, 0, 13, 60, },
+ { 8, 0, 0, 0, 13, 48, },
+ { 9, 0, 0, 0, 13, 60, },
{ 0, 0, 0, 0, 14, 127, },
{ 2, 0, 0, 0, 14, 127, },
+ { 1, 0, 0, 0, 14, 68, },
+ { 3, 0, 0, 0, 14, 127, },
+ { 4, 0, 0, 0, 14, 127, },
+ { 5, 0, 0, 0, 14, 127, },
+ { 6, 0, 0, 0, 14, 127, },
+ { 7, 0, 0, 0, 14, 127, },
+ { 8, 0, 0, 0, 14, 127, },
+ { 9, 0, 0, 0, 14, 127, },
{ 0, 0, 0, 1, 1, 52, },
{ 2, 0, 0, 1, 1, 60, },
+ { 1, 0, 0, 1, 1, 76, },
+ { 3, 0, 0, 1, 1, 52, },
+ { 4, 0, 0, 1, 1, 76, },
+ { 5, 0, 0, 1, 1, 60, },
+ { 6, 0, 0, 1, 1, 52, },
+ { 7, 0, 0, 1, 1, 60, },
+ { 8, 0, 0, 1, 1, 52, },
+ { 9, 0, 0, 1, 1, 60, },
{ 0, 0, 0, 1, 2, 60, },
{ 2, 0, 0, 1, 2, 60, },
+ { 1, 0, 0, 1, 2, 76, },
+ { 3, 0, 0, 1, 2, 60, },
+ { 4, 0, 0, 1, 2, 76, },
+ { 5, 0, 0, 1, 2, 60, },
+ { 6, 0, 0, 1, 2, 60, },
+ { 7, 0, 0, 1, 2, 60, },
+ { 8, 0, 0, 1, 2, 60, },
+ { 9, 0, 0, 1, 2, 60, },
{ 0, 0, 0, 1, 3, 64, },
{ 2, 0, 0, 1, 3, 60, },
+ { 1, 0, 0, 1, 3, 76, },
+ { 3, 0, 0, 1, 3, 64, },
+ { 4, 0, 0, 1, 3, 76, },
+ { 5, 0, 0, 1, 3, 60, },
+ { 6, 0, 0, 1, 3, 64, },
+ { 7, 0, 0, 1, 3, 60, },
+ { 8, 0, 0, 1, 3, 64, },
+ { 9, 0, 0, 1, 3, 60, },
{ 0, 0, 0, 1, 4, 68, },
{ 2, 0, 0, 1, 4, 60, },
+ { 1, 0, 0, 1, 4, 76, },
+ { 3, 0, 0, 1, 4, 68, },
+ { 4, 0, 0, 1, 4, 76, },
+ { 5, 0, 0, 1, 4, 60, },
+ { 6, 0, 0, 1, 4, 68, },
+ { 7, 0, 0, 1, 4, 60, },
+ { 8, 0, 0, 1, 4, 68, },
+ { 9, 0, 0, 1, 4, 60, },
{ 0, 0, 0, 1, 5, 76, },
{ 2, 0, 0, 1, 5, 60, },
+ { 1, 0, 0, 1, 5, 76, },
+ { 3, 0, 0, 1, 5, 76, },
+ { 4, 0, 0, 1, 5, 76, },
+ { 5, 0, 0, 1, 5, 60, },
+ { 6, 0, 0, 1, 5, 76, },
+ { 7, 0, 0, 1, 5, 60, },
+ { 8, 0, 0, 1, 5, 76, },
+ { 9, 0, 0, 1, 5, 60, },
{ 0, 0, 0, 1, 6, 76, },
{ 2, 0, 0, 1, 6, 60, },
+ { 1, 0, 0, 1, 6, 76, },
+ { 3, 0, 0, 1, 6, 76, },
+ { 4, 0, 0, 1, 6, 76, },
+ { 5, 0, 0, 1, 6, 60, },
+ { 6, 0, 0, 1, 6, 76, },
+ { 7, 0, 0, 1, 6, 60, },
+ { 8, 0, 0, 1, 6, 76, },
+ { 9, 0, 0, 1, 6, 60, },
{ 0, 0, 0, 1, 7, 76, },
{ 2, 0, 0, 1, 7, 60, },
+ { 1, 0, 0, 1, 7, 76, },
+ { 3, 0, 0, 1, 7, 76, },
+ { 4, 0, 0, 1, 7, 76, },
+ { 5, 0, 0, 1, 7, 60, },
+ { 6, 0, 0, 1, 7, 76, },
+ { 7, 0, 0, 1, 7, 60, },
+ { 8, 0, 0, 1, 7, 76, },
+ { 9, 0, 0, 1, 7, 60, },
{ 0, 0, 0, 1, 8, 68, },
{ 2, 0, 0, 1, 8, 60, },
+ { 1, 0, 0, 1, 8, 76, },
+ { 3, 0, 0, 1, 8, 68, },
+ { 4, 0, 0, 1, 8, 76, },
+ { 5, 0, 0, 1, 8, 60, },
+ { 6, 0, 0, 1, 8, 68, },
+ { 7, 0, 0, 1, 8, 60, },
+ { 8, 0, 0, 1, 8, 68, },
+ { 9, 0, 0, 1, 8, 60, },
{ 0, 0, 0, 1, 9, 64, },
{ 2, 0, 0, 1, 9, 60, },
+ { 1, 0, 0, 1, 9, 76, },
+ { 3, 0, 0, 1, 9, 64, },
+ { 4, 0, 0, 1, 9, 76, },
+ { 5, 0, 0, 1, 9, 60, },
+ { 6, 0, 0, 1, 9, 64, },
+ { 7, 0, 0, 1, 9, 60, },
+ { 8, 0, 0, 1, 9, 64, },
+ { 9, 0, 0, 1, 9, 60, },
{ 0, 0, 0, 1, 10, 60, },
{ 2, 0, 0, 1, 10, 60, },
+ { 1, 0, 0, 1, 10, 76, },
+ { 3, 0, 0, 1, 10, 60, },
+ { 4, 0, 0, 1, 10, 76, },
+ { 5, 0, 0, 1, 10, 60, },
+ { 6, 0, 0, 1, 10, 60, },
+ { 7, 0, 0, 1, 10, 60, },
+ { 8, 0, 0, 1, 10, 60, },
+ { 9, 0, 0, 1, 10, 60, },
{ 0, 0, 0, 1, 11, 52, },
{ 2, 0, 0, 1, 11, 60, },
+ { 1, 0, 0, 1, 11, 76, },
+ { 3, 0, 0, 1, 11, 52, },
+ { 4, 0, 0, 1, 11, 76, },
+ { 5, 0, 0, 1, 11, 60, },
+ { 6, 0, 0, 1, 11, 52, },
+ { 7, 0, 0, 1, 11, 60, },
+ { 8, 0, 0, 1, 11, 52, },
+ { 9, 0, 0, 1, 11, 60, },
{ 0, 0, 0, 1, 12, 40, },
{ 2, 0, 0, 1, 12, 60, },
+ { 1, 0, 0, 1, 12, 76, },
+ { 3, 0, 0, 1, 12, 40, },
+ { 4, 0, 0, 1, 12, 76, },
+ { 5, 0, 0, 1, 12, 60, },
+ { 6, 0, 0, 1, 12, 40, },
+ { 7, 0, 0, 1, 12, 60, },
+ { 8, 0, 0, 1, 12, 40, },
+ { 9, 0, 0, 1, 12, 60, },
{ 0, 0, 0, 1, 13, 28, },
{ 2, 0, 0, 1, 13, 60, },
+ { 1, 0, 0, 1, 13, 76, },
+ { 3, 0, 0, 1, 13, 28, },
+ { 4, 0, 0, 1, 13, 70, },
+ { 5, 0, 0, 1, 13, 60, },
+ { 6, 0, 0, 1, 13, 28, },
+ { 7, 0, 0, 1, 13, 60, },
+ { 8, 0, 0, 1, 13, 28, },
+ { 9, 0, 0, 1, 13, 60, },
{ 0, 0, 0, 1, 14, 127, },
{ 2, 0, 0, 1, 14, 127, },
+ { 1, 0, 0, 1, 14, 127, },
+ { 3, 0, 0, 1, 14, 127, },
+ { 4, 0, 0, 1, 14, 127, },
+ { 5, 0, 0, 1, 14, 127, },
+ { 6, 0, 0, 1, 14, 127, },
+ { 7, 0, 0, 1, 14, 127, },
+ { 8, 0, 0, 1, 14, 127, },
+ { 9, 0, 0, 1, 14, 127, },
{ 0, 0, 0, 2, 1, 52, },
{ 2, 0, 0, 2, 1, 60, },
+ { 1, 0, 0, 2, 1, 76, },
+ { 3, 0, 0, 2, 1, 52, },
+ { 4, 0, 0, 2, 1, 76, },
+ { 5, 0, 0, 2, 1, 60, },
+ { 6, 0, 0, 2, 1, 52, },
+ { 7, 0, 0, 2, 1, 60, },
+ { 8, 0, 0, 2, 1, 52, },
+ { 9, 0, 0, 2, 1, 60, },
{ 0, 0, 0, 2, 2, 60, },
{ 2, 0, 0, 2, 2, 60, },
+ { 1, 0, 0, 2, 2, 76, },
+ { 3, 0, 0, 2, 2, 60, },
+ { 4, 0, 0, 2, 2, 76, },
+ { 5, 0, 0, 2, 2, 60, },
+ { 6, 0, 0, 2, 2, 60, },
+ { 7, 0, 0, 2, 2, 60, },
+ { 8, 0, 0, 2, 2, 60, },
+ { 9, 0, 0, 2, 2, 60, },
{ 0, 0, 0, 2, 3, 64, },
{ 2, 0, 0, 2, 3, 60, },
+ { 1, 0, 0, 2, 3, 76, },
+ { 3, 0, 0, 2, 3, 64, },
+ { 4, 0, 0, 2, 3, 76, },
+ { 5, 0, 0, 2, 3, 60, },
+ { 6, 0, 0, 2, 3, 64, },
+ { 7, 0, 0, 2, 3, 60, },
+ { 8, 0, 0, 2, 3, 64, },
+ { 9, 0, 0, 2, 3, 60, },
{ 0, 0, 0, 2, 4, 68, },
{ 2, 0, 0, 2, 4, 60, },
+ { 1, 0, 0, 2, 4, 76, },
+ { 3, 0, 0, 2, 4, 68, },
+ { 4, 0, 0, 2, 4, 76, },
+ { 5, 0, 0, 2, 4, 60, },
+ { 6, 0, 0, 2, 4, 68, },
+ { 7, 0, 0, 2, 4, 60, },
+ { 8, 0, 0, 2, 4, 68, },
+ { 9, 0, 0, 2, 4, 60, },
{ 0, 0, 0, 2, 5, 76, },
{ 2, 0, 0, 2, 5, 60, },
+ { 1, 0, 0, 2, 5, 76, },
+ { 3, 0, 0, 2, 5, 76, },
+ { 4, 0, 0, 2, 5, 76, },
+ { 5, 0, 0, 2, 5, 60, },
+ { 6, 0, 0, 2, 5, 76, },
+ { 7, 0, 0, 2, 5, 60, },
+ { 8, 0, 0, 2, 5, 76, },
+ { 9, 0, 0, 2, 5, 60, },
{ 0, 0, 0, 2, 6, 76, },
{ 2, 0, 0, 2, 6, 60, },
+ { 1, 0, 0, 2, 6, 76, },
+ { 3, 0, 0, 2, 6, 76, },
+ { 4, 0, 0, 2, 6, 76, },
+ { 5, 0, 0, 2, 6, 60, },
+ { 6, 0, 0, 2, 6, 76, },
+ { 7, 0, 0, 2, 6, 60, },
+ { 8, 0, 0, 2, 6, 76, },
+ { 9, 0, 0, 2, 6, 60, },
{ 0, 0, 0, 2, 7, 76, },
{ 2, 0, 0, 2, 7, 60, },
+ { 1, 0, 0, 2, 7, 76, },
+ { 3, 0, 0, 2, 7, 76, },
+ { 4, 0, 0, 2, 7, 76, },
+ { 5, 0, 0, 2, 7, 60, },
+ { 6, 0, 0, 2, 7, 76, },
+ { 7, 0, 0, 2, 7, 60, },
+ { 8, 0, 0, 2, 7, 76, },
+ { 9, 0, 0, 2, 7, 60, },
{ 0, 0, 0, 2, 8, 68, },
{ 2, 0, 0, 2, 8, 60, },
+ { 1, 0, 0, 2, 8, 76, },
+ { 3, 0, 0, 2, 8, 68, },
+ { 4, 0, 0, 2, 8, 76, },
+ { 5, 0, 0, 2, 8, 60, },
+ { 6, 0, 0, 2, 8, 68, },
+ { 7, 0, 0, 2, 8, 60, },
+ { 8, 0, 0, 2, 8, 68, },
+ { 9, 0, 0, 2, 8, 60, },
{ 0, 0, 0, 2, 9, 64, },
{ 2, 0, 0, 2, 9, 60, },
+ { 1, 0, 0, 2, 9, 76, },
+ { 3, 0, 0, 2, 9, 64, },
+ { 4, 0, 0, 2, 9, 76, },
+ { 5, 0, 0, 2, 9, 60, },
+ { 6, 0, 0, 2, 9, 64, },
+ { 7, 0, 0, 2, 9, 60, },
+ { 8, 0, 0, 2, 9, 64, },
+ { 9, 0, 0, 2, 9, 60, },
{ 0, 0, 0, 2, 10, 60, },
{ 2, 0, 0, 2, 10, 60, },
+ { 1, 0, 0, 2, 10, 76, },
+ { 3, 0, 0, 2, 10, 60, },
+ { 4, 0, 0, 2, 10, 76, },
+ { 5, 0, 0, 2, 10, 60, },
+ { 6, 0, 0, 2, 10, 60, },
+ { 7, 0, 0, 2, 10, 60, },
+ { 8, 0, 0, 2, 10, 60, },
+ { 9, 0, 0, 2, 10, 60, },
{ 0, 0, 0, 2, 11, 52, },
{ 2, 0, 0, 2, 11, 60, },
+ { 1, 0, 0, 2, 11, 76, },
+ { 3, 0, 0, 2, 11, 52, },
+ { 4, 0, 0, 2, 11, 76, },
+ { 5, 0, 0, 2, 11, 60, },
+ { 6, 0, 0, 2, 11, 52, },
+ { 7, 0, 0, 2, 11, 60, },
+ { 8, 0, 0, 2, 11, 52, },
+ { 9, 0, 0, 2, 11, 60, },
{ 0, 0, 0, 2, 12, 40, },
{ 2, 0, 0, 2, 12, 60, },
+ { 1, 0, 0, 2, 12, 76, },
+ { 3, 0, 0, 2, 12, 40, },
+ { 4, 0, 0, 2, 12, 76, },
+ { 5, 0, 0, 2, 12, 60, },
+ { 6, 0, 0, 2, 12, 40, },
+ { 7, 0, 0, 2, 12, 60, },
+ { 8, 0, 0, 2, 12, 40, },
+ { 9, 0, 0, 2, 12, 60, },
{ 0, 0, 0, 2, 13, 28, },
{ 2, 0, 0, 2, 13, 60, },
+ { 1, 0, 0, 2, 13, 76, },
+ { 3, 0, 0, 2, 13, 28, },
+ { 4, 0, 0, 2, 13, 72, },
+ { 5, 0, 0, 2, 13, 60, },
+ { 6, 0, 0, 2, 13, 28, },
+ { 7, 0, 0, 2, 13, 60, },
+ { 8, 0, 0, 2, 13, 28, },
+ { 9, 0, 0, 2, 13, 60, },
{ 0, 0, 0, 2, 14, 127, },
{ 2, 0, 0, 2, 14, 127, },
+ { 1, 0, 0, 2, 14, 127, },
+ { 3, 0, 0, 2, 14, 127, },
+ { 4, 0, 0, 2, 14, 127, },
+ { 5, 0, 0, 2, 14, 127, },
+ { 6, 0, 0, 2, 14, 127, },
+ { 7, 0, 0, 2, 14, 127, },
+ { 8, 0, 0, 2, 14, 127, },
+ { 9, 0, 0, 2, 14, 127, },
{ 0, 0, 0, 3, 1, 52, },
{ 2, 0, 0, 3, 1, 36, },
+ { 1, 0, 0, 3, 1, 66, },
+ { 3, 0, 0, 3, 1, 52, },
+ { 4, 0, 0, 3, 1, 68, },
+ { 5, 0, 0, 3, 1, 36, },
+ { 6, 0, 0, 3, 1, 52, },
+ { 7, 0, 0, 3, 1, 36, },
+ { 8, 0, 0, 3, 1, 52, },
+ { 9, 0, 0, 3, 1, 36, },
{ 0, 0, 0, 3, 2, 60, },
{ 2, 0, 0, 3, 2, 36, },
+ { 1, 0, 0, 3, 2, 66, },
+ { 3, 0, 0, 3, 2, 60, },
+ { 4, 0, 0, 3, 2, 70, },
+ { 5, 0, 0, 3, 2, 36, },
+ { 6, 0, 0, 3, 2, 60, },
+ { 7, 0, 0, 3, 2, 36, },
+ { 8, 0, 0, 3, 2, 60, },
+ { 9, 0, 0, 3, 2, 36, },
{ 0, 0, 0, 3, 3, 64, },
{ 2, 0, 0, 3, 3, 36, },
+ { 1, 0, 0, 3, 3, 66, },
+ { 3, 0, 0, 3, 3, 64, },
+ { 4, 0, 0, 3, 3, 70, },
+ { 5, 0, 0, 3, 3, 36, },
+ { 6, 0, 0, 3, 3, 64, },
+ { 7, 0, 0, 3, 3, 36, },
+ { 8, 0, 0, 3, 3, 64, },
+ { 9, 0, 0, 3, 3, 36, },
{ 0, 0, 0, 3, 4, 68, },
{ 2, 0, 0, 3, 4, 36, },
+ { 1, 0, 0, 3, 4, 66, },
+ { 3, 0, 0, 3, 4, 68, },
+ { 4, 0, 0, 3, 4, 70, },
+ { 5, 0, 0, 3, 4, 36, },
+ { 6, 0, 0, 3, 4, 68, },
+ { 7, 0, 0, 3, 4, 36, },
+ { 8, 0, 0, 3, 4, 68, },
+ { 9, 0, 0, 3, 4, 36, },
{ 0, 0, 0, 3, 5, 76, },
{ 2, 0, 0, 3, 5, 36, },
+ { 1, 0, 0, 3, 5, 66, },
+ { 3, 0, 0, 3, 5, 76, },
+ { 4, 0, 0, 3, 5, 70, },
+ { 5, 0, 0, 3, 5, 36, },
+ { 6, 0, 0, 3, 5, 76, },
+ { 7, 0, 0, 3, 5, 36, },
+ { 8, 0, 0, 3, 5, 76, },
+ { 9, 0, 0, 3, 5, 36, },
{ 0, 0, 0, 3, 6, 76, },
{ 2, 0, 0, 3, 6, 36, },
+ { 1, 0, 0, 3, 6, 66, },
+ { 3, 0, 0, 3, 6, 76, },
+ { 4, 0, 0, 3, 6, 70, },
+ { 5, 0, 0, 3, 6, 36, },
+ { 6, 0, 0, 3, 6, 76, },
+ { 7, 0, 0, 3, 6, 36, },
+ { 8, 0, 0, 3, 6, 76, },
+ { 9, 0, 0, 3, 6, 36, },
{ 0, 0, 0, 3, 7, 76, },
{ 2, 0, 0, 3, 7, 36, },
+ { 1, 0, 0, 3, 7, 66, },
+ { 3, 0, 0, 3, 7, 76, },
+ { 4, 0, 0, 3, 7, 70, },
+ { 5, 0, 0, 3, 7, 36, },
+ { 6, 0, 0, 3, 7, 76, },
+ { 7, 0, 0, 3, 7, 36, },
+ { 8, 0, 0, 3, 7, 76, },
+ { 9, 0, 0, 3, 7, 36, },
{ 0, 0, 0, 3, 8, 68, },
{ 2, 0, 0, 3, 8, 36, },
+ { 1, 0, 0, 3, 8, 66, },
+ { 3, 0, 0, 3, 8, 68, },
+ { 4, 0, 0, 3, 8, 70, },
+ { 5, 0, 0, 3, 8, 36, },
+ { 6, 0, 0, 3, 8, 68, },
+ { 7, 0, 0, 3, 8, 36, },
+ { 8, 0, 0, 3, 8, 68, },
+ { 9, 0, 0, 3, 8, 36, },
{ 0, 0, 0, 3, 9, 64, },
{ 2, 0, 0, 3, 9, 36, },
+ { 1, 0, 0, 3, 9, 66, },
+ { 3, 0, 0, 3, 9, 64, },
+ { 4, 0, 0, 3, 9, 70, },
+ { 5, 0, 0, 3, 9, 36, },
+ { 6, 0, 0, 3, 9, 64, },
+ { 7, 0, 0, 3, 9, 36, },
+ { 8, 0, 0, 3, 9, 64, },
+ { 9, 0, 0, 3, 9, 36, },
{ 0, 0, 0, 3, 10, 60, },
{ 2, 0, 0, 3, 10, 36, },
+ { 1, 0, 0, 3, 10, 66, },
+ { 3, 0, 0, 3, 10, 60, },
+ { 4, 0, 0, 3, 10, 70, },
+ { 5, 0, 0, 3, 10, 36, },
+ { 6, 0, 0, 3, 10, 60, },
+ { 7, 0, 0, 3, 10, 36, },
+ { 8, 0, 0, 3, 10, 60, },
+ { 9, 0, 0, 3, 10, 36, },
{ 0, 0, 0, 3, 11, 52, },
{ 2, 0, 0, 3, 11, 36, },
+ { 1, 0, 0, 3, 11, 66, },
+ { 3, 0, 0, 3, 11, 52, },
+ { 4, 0, 0, 3, 11, 70, },
+ { 5, 0, 0, 3, 11, 36, },
+ { 6, 0, 0, 3, 11, 52, },
+ { 7, 0, 0, 3, 11, 36, },
+ { 8, 0, 0, 3, 11, 52, },
+ { 9, 0, 0, 3, 11, 36, },
{ 0, 0, 0, 3, 12, 40, },
{ 2, 0, 0, 3, 12, 36, },
+ { 1, 0, 0, 3, 12, 66, },
+ { 3, 0, 0, 3, 12, 40, },
+ { 4, 0, 0, 3, 12, 70, },
+ { 5, 0, 0, 3, 12, 36, },
+ { 6, 0, 0, 3, 12, 40, },
+ { 7, 0, 0, 3, 12, 36, },
+ { 8, 0, 0, 3, 12, 40, },
+ { 9, 0, 0, 3, 12, 36, },
{ 0, 0, 0, 3, 13, 28, },
{ 2, 0, 0, 3, 13, 36, },
+ { 1, 0, 0, 3, 13, 66, },
+ { 3, 0, 0, 3, 13, 28, },
+ { 4, 0, 0, 3, 13, 62, },
+ { 5, 0, 0, 3, 13, 36, },
+ { 6, 0, 0, 3, 13, 28, },
+ { 7, 0, 0, 3, 13, 36, },
+ { 8, 0, 0, 3, 13, 28, },
+ { 9, 0, 0, 3, 13, 36, },
{ 0, 0, 0, 3, 14, 127, },
{ 2, 0, 0, 3, 14, 127, },
+ { 1, 0, 0, 3, 14, 127, },
+ { 3, 0, 0, 3, 14, 127, },
+ { 4, 0, 0, 3, 14, 127, },
+ { 5, 0, 0, 3, 14, 127, },
+ { 6, 0, 0, 3, 14, 127, },
+ { 7, 0, 0, 3, 14, 127, },
+ { 8, 0, 0, 3, 14, 127, },
+ { 9, 0, 0, 3, 14, 127, },
{ 0, 0, 1, 2, 1, 127, },
{ 2, 0, 1, 2, 1, 127, },
+ { 1, 0, 1, 2, 1, 127, },
+ { 3, 0, 1, 2, 1, 127, },
+ { 4, 0, 1, 2, 1, 127, },
+ { 5, 0, 1, 2, 1, 127, },
+ { 6, 0, 1, 2, 1, 127, },
+ { 7, 0, 1, 2, 1, 127, },
+ { 8, 0, 1, 2, 1, 127, },
+ { 9, 0, 1, 2, 1, 127, },
{ 0, 0, 1, 2, 2, 127, },
{ 2, 0, 1, 2, 2, 127, },
+ { 1, 0, 1, 2, 2, 127, },
+ { 3, 0, 1, 2, 2, 127, },
+ { 4, 0, 1, 2, 2, 127, },
+ { 5, 0, 1, 2, 2, 127, },
+ { 6, 0, 1, 2, 2, 127, },
+ { 7, 0, 1, 2, 2, 127, },
+ { 8, 0, 1, 2, 2, 127, },
+ { 9, 0, 1, 2, 2, 127, },
{ 0, 0, 1, 2, 3, 52, },
{ 2, 0, 1, 2, 3, 60, },
+ { 1, 0, 1, 2, 3, 72, },
+ { 3, 0, 1, 2, 3, 52, },
+ { 4, 0, 1, 2, 3, 72, },
+ { 5, 0, 1, 2, 3, 60, },
+ { 6, 0, 1, 2, 3, 52, },
+ { 7, 0, 1, 2, 3, 60, },
+ { 8, 0, 1, 2, 3, 52, },
+ { 9, 0, 1, 2, 3, 60, },
{ 0, 0, 1, 2, 4, 52, },
{ 2, 0, 1, 2, 4, 60, },
+ { 1, 0, 1, 2, 4, 72, },
+ { 3, 0, 1, 2, 4, 52, },
+ { 4, 0, 1, 2, 4, 72, },
+ { 5, 0, 1, 2, 4, 60, },
+ { 6, 0, 1, 2, 4, 52, },
+ { 7, 0, 1, 2, 4, 60, },
+ { 8, 0, 1, 2, 4, 52, },
+ { 9, 0, 1, 2, 4, 60, },
{ 0, 0, 1, 2, 5, 60, },
{ 2, 0, 1, 2, 5, 60, },
+ { 1, 0, 1, 2, 5, 72, },
+ { 3, 0, 1, 2, 5, 60, },
+ { 4, 0, 1, 2, 5, 72, },
+ { 5, 0, 1, 2, 5, 60, },
+ { 6, 0, 1, 2, 5, 60, },
+ { 7, 0, 1, 2, 5, 60, },
+ { 8, 0, 1, 2, 5, 60, },
+ { 9, 0, 1, 2, 5, 60, },
{ 0, 0, 1, 2, 6, 64, },
{ 2, 0, 1, 2, 6, 60, },
+ { 1, 0, 1, 2, 6, 72, },
+ { 3, 0, 1, 2, 6, 64, },
+ { 4, 0, 1, 2, 6, 72, },
+ { 5, 0, 1, 2, 6, 60, },
+ { 6, 0, 1, 2, 6, 64, },
+ { 7, 0, 1, 2, 6, 60, },
+ { 8, 0, 1, 2, 6, 64, },
+ { 9, 0, 1, 2, 6, 60, },
{ 0, 0, 1, 2, 7, 60, },
{ 2, 0, 1, 2, 7, 60, },
+ { 1, 0, 1, 2, 7, 72, },
+ { 3, 0, 1, 2, 7, 60, },
+ { 4, 0, 1, 2, 7, 72, },
+ { 5, 0, 1, 2, 7, 60, },
+ { 6, 0, 1, 2, 7, 60, },
+ { 7, 0, 1, 2, 7, 60, },
+ { 8, 0, 1, 2, 7, 60, },
+ { 9, 0, 1, 2, 7, 60, },
{ 0, 0, 1, 2, 8, 52, },
{ 2, 0, 1, 2, 8, 60, },
+ { 1, 0, 1, 2, 8, 72, },
+ { 3, 0, 1, 2, 8, 52, },
+ { 4, 0, 1, 2, 8, 72, },
+ { 5, 0, 1, 2, 8, 60, },
+ { 6, 0, 1, 2, 8, 52, },
+ { 7, 0, 1, 2, 8, 60, },
+ { 8, 0, 1, 2, 8, 52, },
+ { 9, 0, 1, 2, 8, 60, },
{ 0, 0, 1, 2, 9, 52, },
{ 2, 0, 1, 2, 9, 60, },
+ { 1, 0, 1, 2, 9, 72, },
+ { 3, 0, 1, 2, 9, 52, },
+ { 4, 0, 1, 2, 9, 72, },
+ { 5, 0, 1, 2, 9, 60, },
+ { 6, 0, 1, 2, 9, 52, },
+ { 7, 0, 1, 2, 9, 60, },
+ { 8, 0, 1, 2, 9, 52, },
+ { 9, 0, 1, 2, 9, 60, },
{ 0, 0, 1, 2, 10, 40, },
{ 2, 0, 1, 2, 10, 60, },
+ { 1, 0, 1, 2, 10, 72, },
+ { 3, 0, 1, 2, 10, 40, },
+ { 4, 0, 1, 2, 10, 72, },
+ { 5, 0, 1, 2, 10, 60, },
+ { 6, 0, 1, 2, 10, 40, },
+ { 7, 0, 1, 2, 10, 60, },
+ { 8, 0, 1, 2, 10, 40, },
+ { 9, 0, 1, 2, 10, 60, },
{ 0, 0, 1, 2, 11, 28, },
{ 2, 0, 1, 2, 11, 60, },
+ { 1, 0, 1, 2, 11, 72, },
+ { 3, 0, 1, 2, 11, 28, },
+ { 4, 0, 1, 2, 11, 70, },
+ { 5, 0, 1, 2, 11, 60, },
+ { 6, 0, 1, 2, 11, 28, },
+ { 7, 0, 1, 2, 11, 60, },
+ { 8, 0, 1, 2, 11, 28, },
+ { 9, 0, 1, 2, 11, 60, },
{ 0, 0, 1, 2, 12, 127, },
{ 2, 0, 1, 2, 12, 127, },
+ { 1, 0, 1, 2, 12, 127, },
+ { 3, 0, 1, 2, 12, 127, },
+ { 4, 0, 1, 2, 12, 127, },
+ { 5, 0, 1, 2, 12, 127, },
+ { 6, 0, 1, 2, 12, 127, },
+ { 7, 0, 1, 2, 12, 127, },
+ { 8, 0, 1, 2, 12, 127, },
+ { 9, 0, 1, 2, 12, 127, },
{ 0, 0, 1, 2, 13, 127, },
{ 2, 0, 1, 2, 13, 127, },
+ { 1, 0, 1, 2, 13, 127, },
+ { 3, 0, 1, 2, 13, 127, },
+ { 4, 0, 1, 2, 13, 127, },
+ { 5, 0, 1, 2, 13, 127, },
+ { 6, 0, 1, 2, 13, 127, },
+ { 7, 0, 1, 2, 13, 127, },
+ { 8, 0, 1, 2, 13, 127, },
+ { 9, 0, 1, 2, 13, 127, },
{ 0, 0, 1, 2, 14, 127, },
{ 2, 0, 1, 2, 14, 127, },
+ { 1, 0, 1, 2, 14, 127, },
+ { 3, 0, 1, 2, 14, 127, },
+ { 4, 0, 1, 2, 14, 127, },
+ { 5, 0, 1, 2, 14, 127, },
+ { 6, 0, 1, 2, 14, 127, },
+ { 7, 0, 1, 2, 14, 127, },
+ { 8, 0, 1, 2, 14, 127, },
+ { 9, 0, 1, 2, 14, 127, },
{ 0, 0, 1, 3, 1, 127, },
{ 2, 0, 1, 3, 1, 127, },
+ { 1, 0, 1, 3, 1, 127, },
+ { 3, 0, 1, 3, 1, 127, },
+ { 4, 0, 1, 3, 1, 127, },
+ { 5, 0, 1, 3, 1, 127, },
+ { 6, 0, 1, 3, 1, 127, },
+ { 7, 0, 1, 3, 1, 127, },
+ { 8, 0, 1, 3, 1, 127, },
+ { 9, 0, 1, 3, 1, 127, },
{ 0, 0, 1, 3, 2, 127, },
{ 2, 0, 1, 3, 2, 127, },
+ { 1, 0, 1, 3, 2, 127, },
+ { 3, 0, 1, 3, 2, 127, },
+ { 4, 0, 1, 3, 2, 127, },
+ { 5, 0, 1, 3, 2, 127, },
+ { 6, 0, 1, 3, 2, 127, },
+ { 7, 0, 1, 3, 2, 127, },
+ { 8, 0, 1, 3, 2, 127, },
+ { 9, 0, 1, 3, 2, 127, },
{ 0, 0, 1, 3, 3, 48, },
{ 2, 0, 1, 3, 3, 36, },
+ { 1, 0, 1, 3, 3, 66, },
+ { 3, 0, 1, 3, 3, 48, },
+ { 4, 0, 1, 3, 3, 66, },
+ { 5, 0, 1, 3, 3, 36, },
+ { 6, 0, 1, 3, 3, 48, },
+ { 7, 0, 1, 3, 3, 36, },
+ { 8, 0, 1, 3, 3, 48, },
+ { 9, 0, 1, 3, 3, 36, },
{ 0, 0, 1, 3, 4, 48, },
{ 2, 0, 1, 3, 4, 36, },
+ { 1, 0, 1, 3, 4, 66, },
+ { 3, 0, 1, 3, 4, 48, },
+ { 4, 0, 1, 3, 4, 70, },
+ { 5, 0, 1, 3, 4, 36, },
+ { 6, 0, 1, 3, 4, 48, },
+ { 7, 0, 1, 3, 4, 36, },
+ { 8, 0, 1, 3, 4, 48, },
+ { 9, 0, 1, 3, 4, 36, },
{ 0, 0, 1, 3, 5, 60, },
{ 2, 0, 1, 3, 5, 36, },
+ { 1, 0, 1, 3, 5, 66, },
+ { 3, 0, 1, 3, 5, 60, },
+ { 4, 0, 1, 3, 5, 70, },
+ { 5, 0, 1, 3, 5, 36, },
+ { 6, 0, 1, 3, 5, 60, },
+ { 7, 0, 1, 3, 5, 36, },
+ { 8, 0, 1, 3, 5, 60, },
+ { 9, 0, 1, 3, 5, 36, },
{ 0, 0, 1, 3, 6, 64, },
{ 2, 0, 1, 3, 6, 36, },
+ { 1, 0, 1, 3, 6, 66, },
+ { 3, 0, 1, 3, 6, 64, },
+ { 4, 0, 1, 3, 6, 70, },
+ { 5, 0, 1, 3, 6, 36, },
+ { 6, 0, 1, 3, 6, 64, },
+ { 7, 0, 1, 3, 6, 36, },
+ { 8, 0, 1, 3, 6, 64, },
+ { 9, 0, 1, 3, 6, 36, },
{ 0, 0, 1, 3, 7, 60, },
{ 2, 0, 1, 3, 7, 36, },
+ { 1, 0, 1, 3, 7, 66, },
+ { 3, 0, 1, 3, 7, 60, },
+ { 4, 0, 1, 3, 7, 70, },
+ { 5, 0, 1, 3, 7, 36, },
+ { 6, 0, 1, 3, 7, 60, },
+ { 7, 0, 1, 3, 7, 36, },
+ { 8, 0, 1, 3, 7, 60, },
+ { 9, 0, 1, 3, 7, 36, },
{ 0, 0, 1, 3, 8, 52, },
{ 2, 0, 1, 3, 8, 36, },
+ { 1, 0, 1, 3, 8, 66, },
+ { 3, 0, 1, 3, 8, 52, },
+ { 4, 0, 1, 3, 8, 70, },
+ { 5, 0, 1, 3, 8, 36, },
+ { 6, 0, 1, 3, 8, 52, },
+ { 7, 0, 1, 3, 8, 36, },
+ { 8, 0, 1, 3, 8, 52, },
+ { 9, 0, 1, 3, 8, 36, },
{ 0, 0, 1, 3, 9, 52, },
{ 2, 0, 1, 3, 9, 36, },
+ { 1, 0, 1, 3, 9, 66, },
+ { 3, 0, 1, 3, 9, 52, },
+ { 4, 0, 1, 3, 9, 70, },
+ { 5, 0, 1, 3, 9, 36, },
+ { 6, 0, 1, 3, 9, 52, },
+ { 7, 0, 1, 3, 9, 36, },
+ { 8, 0, 1, 3, 9, 52, },
+ { 9, 0, 1, 3, 9, 36, },
{ 0, 0, 1, 3, 10, 40, },
{ 2, 0, 1, 3, 10, 36, },
+ { 1, 0, 1, 3, 10, 66, },
+ { 3, 0, 1, 3, 10, 40, },
+ { 4, 0, 1, 3, 10, 70, },
+ { 5, 0, 1, 3, 10, 36, },
+ { 6, 0, 1, 3, 10, 40, },
+ { 7, 0, 1, 3, 10, 36, },
+ { 8, 0, 1, 3, 10, 40, },
+ { 9, 0, 1, 3, 10, 36, },
{ 0, 0, 1, 3, 11, 26, },
{ 2, 0, 1, 3, 11, 36, },
+ { 1, 0, 1, 3, 11, 66, },
+ { 3, 0, 1, 3, 11, 26, },
+ { 4, 0, 1, 3, 11, 66, },
+ { 5, 0, 1, 3, 11, 36, },
+ { 6, 0, 1, 3, 11, 26, },
+ { 7, 0, 1, 3, 11, 36, },
+ { 8, 0, 1, 3, 11, 26, },
+ { 9, 0, 1, 3, 11, 36, },
{ 0, 0, 1, 3, 12, 127, },
{ 2, 0, 1, 3, 12, 127, },
+ { 1, 0, 1, 3, 12, 127, },
+ { 3, 0, 1, 3, 12, 127, },
+ { 4, 0, 1, 3, 12, 127, },
+ { 5, 0, 1, 3, 12, 127, },
+ { 6, 0, 1, 3, 12, 127, },
+ { 7, 0, 1, 3, 12, 127, },
+ { 8, 0, 1, 3, 12, 127, },
+ { 9, 0, 1, 3, 12, 127, },
{ 0, 0, 1, 3, 13, 127, },
{ 2, 0, 1, 3, 13, 127, },
+ { 1, 0, 1, 3, 13, 127, },
+ { 3, 0, 1, 3, 13, 127, },
+ { 4, 0, 1, 3, 13, 127, },
+ { 5, 0, 1, 3, 13, 127, },
+ { 6, 0, 1, 3, 13, 127, },
+ { 7, 0, 1, 3, 13, 127, },
+ { 8, 0, 1, 3, 13, 127, },
+ { 9, 0, 1, 3, 13, 127, },
{ 0, 0, 1, 3, 14, 127, },
{ 2, 0, 1, 3, 14, 127, },
+ { 1, 0, 1, 3, 14, 127, },
+ { 3, 0, 1, 3, 14, 127, },
+ { 4, 0, 1, 3, 14, 127, },
+ { 5, 0, 1, 3, 14, 127, },
+ { 6, 0, 1, 3, 14, 127, },
+ { 7, 0, 1, 3, 14, 127, },
+ { 8, 0, 1, 3, 14, 127, },
+ { 9, 0, 1, 3, 14, 127, },
{ 0, 1, 0, 1, 36, 74, },
{ 2, 1, 0, 1, 36, 62, },
+ { 1, 1, 0, 1, 36, 60, },
+ { 3, 1, 0, 1, 36, 62, },
+ { 4, 1, 0, 1, 36, 76, },
+ { 5, 1, 0, 1, 36, 62, },
+ { 6, 1, 0, 1, 36, 64, },
+ { 7, 1, 0, 1, 36, 54, },
+ { 8, 1, 0, 1, 36, 62, },
+ { 9, 1, 0, 1, 36, 62, },
{ 0, 1, 0, 1, 40, 76, },
{ 2, 1, 0, 1, 40, 62, },
+ { 1, 1, 0, 1, 40, 62, },
+ { 3, 1, 0, 1, 40, 62, },
+ { 4, 1, 0, 1, 40, 76, },
+ { 5, 1, 0, 1, 40, 62, },
+ { 6, 1, 0, 1, 40, 64, },
+ { 7, 1, 0, 1, 40, 54, },
+ { 8, 1, 0, 1, 40, 62, },
+ { 9, 1, 0, 1, 40, 62, },
{ 0, 1, 0, 1, 44, 76, },
{ 2, 1, 0, 1, 44, 62, },
+ { 1, 1, 0, 1, 44, 62, },
+ { 3, 1, 0, 1, 44, 62, },
+ { 4, 1, 0, 1, 44, 76, },
+ { 5, 1, 0, 1, 44, 62, },
+ { 6, 1, 0, 1, 44, 64, },
+ { 7, 1, 0, 1, 44, 54, },
+ { 8, 1, 0, 1, 44, 62, },
+ { 9, 1, 0, 1, 44, 62, },
{ 0, 1, 0, 1, 48, 76, },
{ 2, 1, 0, 1, 48, 62, },
+ { 1, 1, 0, 1, 48, 62, },
+ { 3, 1, 0, 1, 48, 62, },
+ { 4, 1, 0, 1, 48, 54, },
+ { 5, 1, 0, 1, 48, 62, },
+ { 6, 1, 0, 1, 48, 64, },
+ { 7, 1, 0, 1, 48, 54, },
+ { 8, 1, 0, 1, 48, 62, },
+ { 9, 1, 0, 1, 48, 62, },
{ 0, 1, 0, 1, 52, 76, },
{ 2, 1, 0, 1, 52, 62, },
+ { 1, 1, 0, 1, 52, 62, },
+ { 3, 1, 0, 1, 52, 64, },
+ { 4, 1, 0, 1, 52, 76, },
+ { 5, 1, 0, 1, 52, 62, },
+ { 6, 1, 0, 1, 52, 76, },
+ { 7, 1, 0, 1, 52, 54, },
+ { 8, 1, 0, 1, 52, 76, },
+ { 9, 1, 0, 1, 52, 62, },
{ 0, 1, 0, 1, 56, 76, },
{ 2, 1, 0, 1, 56, 62, },
+ { 1, 1, 0, 1, 56, 62, },
+ { 3, 1, 0, 1, 56, 64, },
+ { 4, 1, 0, 1, 56, 76, },
+ { 5, 1, 0, 1, 56, 62, },
+ { 6, 1, 0, 1, 56, 76, },
+ { 7, 1, 0, 1, 56, 54, },
+ { 8, 1, 0, 1, 56, 76, },
+ { 9, 1, 0, 1, 56, 62, },
{ 0, 1, 0, 1, 60, 76, },
{ 2, 1, 0, 1, 60, 62, },
+ { 1, 1, 0, 1, 60, 62, },
+ { 3, 1, 0, 1, 60, 64, },
+ { 4, 1, 0, 1, 60, 76, },
+ { 5, 1, 0, 1, 60, 62, },
+ { 6, 1, 0, 1, 60, 76, },
+ { 7, 1, 0, 1, 60, 54, },
+ { 8, 1, 0, 1, 60, 76, },
+ { 9, 1, 0, 1, 60, 62, },
{ 0, 1, 0, 1, 64, 74, },
{ 2, 1, 0, 1, 64, 62, },
+ { 1, 1, 0, 1, 64, 60, },
+ { 3, 1, 0, 1, 64, 64, },
+ { 4, 1, 0, 1, 64, 76, },
+ { 5, 1, 0, 1, 64, 62, },
+ { 6, 1, 0, 1, 64, 74, },
+ { 7, 1, 0, 1, 64, 54, },
+ { 8, 1, 0, 1, 64, 74, },
+ { 9, 1, 0, 1, 64, 62, },
{ 0, 1, 0, 1, 100, 72, },
{ 2, 1, 0, 1, 100, 62, },
+ { 1, 1, 0, 1, 100, 76, },
+ { 3, 1, 0, 1, 100, 72, },
+ { 4, 1, 0, 1, 100, 76, },
+ { 5, 1, 0, 1, 100, 62, },
+ { 6, 1, 0, 1, 100, 72, },
+ { 7, 1, 0, 1, 100, 54, },
+ { 8, 1, 0, 1, 100, 72, },
+ { 9, 1, 0, 1, 100, 127, },
{ 0, 1, 0, 1, 104, 76, },
{ 2, 1, 0, 1, 104, 62, },
+ { 1, 1, 0, 1, 104, 76, },
+ { 3, 1, 0, 1, 104, 76, },
+ { 4, 1, 0, 1, 104, 76, },
+ { 5, 1, 0, 1, 104, 62, },
+ { 6, 1, 0, 1, 104, 76, },
+ { 7, 1, 0, 1, 104, 54, },
+ { 8, 1, 0, 1, 104, 76, },
+ { 9, 1, 0, 1, 104, 127, },
{ 0, 1, 0, 1, 108, 76, },
{ 2, 1, 0, 1, 108, 62, },
+ { 1, 1, 0, 1, 108, 76, },
+ { 3, 1, 0, 1, 108, 76, },
+ { 4, 1, 0, 1, 108, 76, },
+ { 5, 1, 0, 1, 108, 62, },
+ { 6, 1, 0, 1, 108, 76, },
+ { 7, 1, 0, 1, 108, 54, },
+ { 8, 1, 0, 1, 108, 76, },
+ { 9, 1, 0, 1, 108, 127, },
{ 0, 1, 0, 1, 112, 76, },
{ 2, 1, 0, 1, 112, 62, },
+ { 1, 1, 0, 1, 112, 76, },
+ { 3, 1, 0, 1, 112, 76, },
+ { 4, 1, 0, 1, 112, 76, },
+ { 5, 1, 0, 1, 112, 62, },
+ { 6, 1, 0, 1, 112, 76, },
+ { 7, 1, 0, 1, 112, 54, },
+ { 8, 1, 0, 1, 112, 76, },
+ { 9, 1, 0, 1, 112, 127, },
{ 0, 1, 0, 1, 116, 76, },
{ 2, 1, 0, 1, 116, 62, },
+ { 1, 1, 0, 1, 116, 76, },
+ { 3, 1, 0, 1, 116, 76, },
+ { 4, 1, 0, 1, 116, 76, },
+ { 5, 1, 0, 1, 116, 62, },
+ { 6, 1, 0, 1, 116, 76, },
+ { 7, 1, 0, 1, 116, 54, },
+ { 8, 1, 0, 1, 116, 76, },
+ { 9, 1, 0, 1, 116, 127, },
{ 0, 1, 0, 1, 120, 76, },
{ 2, 1, 0, 1, 120, 62, },
+ { 1, 1, 0, 1, 120, 76, },
+ { 3, 1, 0, 1, 120, 127, },
+ { 4, 1, 0, 1, 120, 76, },
+ { 5, 1, 0, 1, 120, 127, },
+ { 6, 1, 0, 1, 120, 76, },
+ { 7, 1, 0, 1, 120, 54, },
+ { 8, 1, 0, 1, 120, 76, },
+ { 9, 1, 0, 1, 120, 127, },
{ 0, 1, 0, 1, 124, 76, },
{ 2, 1, 0, 1, 124, 62, },
+ { 1, 1, 0, 1, 124, 76, },
+ { 3, 1, 0, 1, 124, 127, },
+ { 4, 1, 0, 1, 124, 76, },
+ { 5, 1, 0, 1, 124, 127, },
+ { 6, 1, 0, 1, 124, 76, },
+ { 7, 1, 0, 1, 124, 54, },
+ { 8, 1, 0, 1, 124, 76, },
+ { 9, 1, 0, 1, 124, 127, },
{ 0, 1, 0, 1, 128, 76, },
{ 2, 1, 0, 1, 128, 62, },
+ { 1, 1, 0, 1, 128, 76, },
+ { 3, 1, 0, 1, 128, 127, },
+ { 4, 1, 0, 1, 128, 76, },
+ { 5, 1, 0, 1, 128, 127, },
+ { 6, 1, 0, 1, 128, 76, },
+ { 7, 1, 0, 1, 128, 54, },
+ { 8, 1, 0, 1, 128, 76, },
+ { 9, 1, 0, 1, 128, 127, },
{ 0, 1, 0, 1, 132, 76, },
{ 2, 1, 0, 1, 132, 62, },
+ { 1, 1, 0, 1, 132, 76, },
+ { 3, 1, 0, 1, 132, 76, },
+ { 4, 1, 0, 1, 132, 76, },
+ { 5, 1, 0, 1, 132, 62, },
+ { 6, 1, 0, 1, 132, 76, },
+ { 7, 1, 0, 1, 132, 54, },
+ { 8, 1, 0, 1, 132, 76, },
+ { 9, 1, 0, 1, 132, 127, },
{ 0, 1, 0, 1, 136, 76, },
{ 2, 1, 0, 1, 136, 62, },
+ { 1, 1, 0, 1, 136, 76, },
+ { 3, 1, 0, 1, 136, 76, },
+ { 4, 1, 0, 1, 136, 76, },
+ { 5, 1, 0, 1, 136, 62, },
+ { 6, 1, 0, 1, 136, 76, },
+ { 7, 1, 0, 1, 136, 54, },
+ { 8, 1, 0, 1, 136, 76, },
+ { 9, 1, 0, 1, 136, 127, },
{ 0, 1, 0, 1, 140, 72, },
{ 2, 1, 0, 1, 140, 62, },
+ { 1, 1, 0, 1, 140, 76, },
+ { 3, 1, 0, 1, 140, 72, },
+ { 4, 1, 0, 1, 140, 76, },
+ { 5, 1, 0, 1, 140, 62, },
+ { 6, 1, 0, 1, 140, 72, },
+ { 7, 1, 0, 1, 140, 54, },
+ { 8, 1, 0, 1, 140, 72, },
+ { 9, 1, 0, 1, 140, 127, },
{ 0, 1, 0, 1, 144, 76, },
{ 2, 1, 0, 1, 144, 127, },
+ { 1, 1, 0, 1, 144, 127, },
+ { 3, 1, 0, 1, 144, 76, },
+ { 4, 1, 0, 1, 144, 76, },
+ { 5, 1, 0, 1, 144, 127, },
+ { 6, 1, 0, 1, 144, 76, },
+ { 7, 1, 0, 1, 144, 127, },
+ { 8, 1, 0, 1, 144, 76, },
+ { 9, 1, 0, 1, 144, 127, },
{ 0, 1, 0, 1, 149, 76, },
{ 2, 1, 0, 1, 149, -128, },
+ { 1, 1, 0, 1, 149, 127, },
+ { 3, 1, 0, 1, 149, 76, },
+ { 4, 1, 0, 1, 149, 74, },
+ { 5, 1, 0, 1, 149, 76, },
+ { 6, 1, 0, 1, 149, 76, },
+ { 7, 1, 0, 1, 149, 54, },
+ { 8, 1, 0, 1, 149, 76, },
+ { 9, 1, 0, 1, 149, -128, },
{ 0, 1, 0, 1, 153, 76, },
{ 2, 1, 0, 1, 153, -128, },
+ { 1, 1, 0, 1, 153, 127, },
+ { 3, 1, 0, 1, 153, 76, },
+ { 4, 1, 0, 1, 153, 74, },
+ { 5, 1, 0, 1, 153, 76, },
+ { 6, 1, 0, 1, 153, 76, },
+ { 7, 1, 0, 1, 153, 54, },
+ { 8, 1, 0, 1, 153, 76, },
+ { 9, 1, 0, 1, 153, -128, },
{ 0, 1, 0, 1, 157, 76, },
{ 2, 1, 0, 1, 157, -128, },
+ { 1, 1, 0, 1, 157, 127, },
+ { 3, 1, 0, 1, 157, 76, },
+ { 4, 1, 0, 1, 157, 74, },
+ { 5, 1, 0, 1, 157, 76, },
+ { 6, 1, 0, 1, 157, 76, },
+ { 7, 1, 0, 1, 157, 54, },
+ { 8, 1, 0, 1, 157, 76, },
+ { 9, 1, 0, 1, 157, -128, },
{ 0, 1, 0, 1, 161, 76, },
{ 2, 1, 0, 1, 161, -128, },
+ { 1, 1, 0, 1, 161, 127, },
+ { 3, 1, 0, 1, 161, 76, },
+ { 4, 1, 0, 1, 161, 74, },
+ { 5, 1, 0, 1, 161, 76, },
+ { 6, 1, 0, 1, 161, 76, },
+ { 7, 1, 0, 1, 161, 54, },
+ { 8, 1, 0, 1, 161, 76, },
+ { 9, 1, 0, 1, 161, -128, },
{ 0, 1, 0, 1, 165, 76, },
{ 2, 1, 0, 1, 165, -128, },
+ { 1, 1, 0, 1, 165, 127, },
+ { 3, 1, 0, 1, 165, 76, },
+ { 4, 1, 0, 1, 165, 74, },
+ { 5, 1, 0, 1, 165, 76, },
+ { 6, 1, 0, 1, 165, 76, },
+ { 7, 1, 0, 1, 165, 54, },
+ { 8, 1, 0, 1, 165, 76, },
+ { 9, 1, 0, 1, 165, -128, },
{ 0, 1, 0, 2, 36, 72, },
{ 2, 1, 0, 2, 36, 62, },
+ { 1, 1, 0, 2, 36, 62, },
+ { 3, 1, 0, 2, 36, 62, },
+ { 4, 1, 0, 2, 36, 76, },
+ { 5, 1, 0, 2, 36, 62, },
+ { 6, 1, 0, 2, 36, 64, },
+ { 7, 1, 0, 2, 36, 54, },
+ { 8, 1, 0, 2, 36, 62, },
+ { 9, 1, 0, 2, 36, 62, },
{ 0, 1, 0, 2, 40, 76, },
{ 2, 1, 0, 2, 40, 62, },
+ { 1, 1, 0, 2, 40, 62, },
+ { 3, 1, 0, 2, 40, 62, },
+ { 4, 1, 0, 2, 40, 76, },
+ { 5, 1, 0, 2, 40, 62, },
+ { 6, 1, 0, 2, 40, 64, },
+ { 7, 1, 0, 2, 40, 54, },
+ { 8, 1, 0, 2, 40, 62, },
+ { 9, 1, 0, 2, 40, 62, },
{ 0, 1, 0, 2, 44, 76, },
{ 2, 1, 0, 2, 44, 62, },
+ { 1, 1, 0, 2, 44, 62, },
+ { 3, 1, 0, 2, 44, 62, },
+ { 4, 1, 0, 2, 44, 76, },
+ { 5, 1, 0, 2, 44, 62, },
+ { 6, 1, 0, 2, 44, 64, },
+ { 7, 1, 0, 2, 44, 54, },
+ { 8, 1, 0, 2, 44, 62, },
+ { 9, 1, 0, 2, 44, 62, },
{ 0, 1, 0, 2, 48, 76, },
{ 2, 1, 0, 2, 48, 62, },
+ { 1, 1, 0, 2, 48, 62, },
+ { 3, 1, 0, 2, 48, 62, },
+ { 4, 1, 0, 2, 48, 54, },
+ { 5, 1, 0, 2, 48, 62, },
+ { 6, 1, 0, 2, 48, 64, },
+ { 7, 1, 0, 2, 48, 54, },
+ { 8, 1, 0, 2, 48, 62, },
+ { 9, 1, 0, 2, 48, 62, },
{ 0, 1, 0, 2, 52, 76, },
{ 2, 1, 0, 2, 52, 62, },
+ { 1, 1, 0, 2, 52, 62, },
+ { 3, 1, 0, 2, 52, 64, },
+ { 4, 1, 0, 2, 52, 76, },
+ { 5, 1, 0, 2, 52, 62, },
+ { 6, 1, 0, 2, 52, 76, },
+ { 7, 1, 0, 2, 52, 54, },
+ { 8, 1, 0, 2, 52, 76, },
+ { 9, 1, 0, 2, 52, 62, },
{ 0, 1, 0, 2, 56, 76, },
{ 2, 1, 0, 2, 56, 62, },
+ { 1, 1, 0, 2, 56, 62, },
+ { 3, 1, 0, 2, 56, 64, },
+ { 4, 1, 0, 2, 56, 76, },
+ { 5, 1, 0, 2, 56, 62, },
+ { 6, 1, 0, 2, 56, 76, },
+ { 7, 1, 0, 2, 56, 54, },
+ { 8, 1, 0, 2, 56, 76, },
+ { 9, 1, 0, 2, 56, 62, },
{ 0, 1, 0, 2, 60, 76, },
{ 2, 1, 0, 2, 60, 62, },
+ { 1, 1, 0, 2, 60, 62, },
+ { 3, 1, 0, 2, 60, 64, },
+ { 4, 1, 0, 2, 60, 76, },
+ { 5, 1, 0, 2, 60, 62, },
+ { 6, 1, 0, 2, 60, 76, },
+ { 7, 1, 0, 2, 60, 54, },
+ { 8, 1, 0, 2, 60, 76, },
+ { 9, 1, 0, 2, 60, 62, },
{ 0, 1, 0, 2, 64, 74, },
{ 2, 1, 0, 2, 64, 62, },
+ { 1, 1, 0, 2, 64, 60, },
+ { 3, 1, 0, 2, 64, 64, },
+ { 4, 1, 0, 2, 64, 74, },
+ { 5, 1, 0, 2, 64, 62, },
+ { 6, 1, 0, 2, 64, 74, },
+ { 7, 1, 0, 2, 64, 54, },
+ { 8, 1, 0, 2, 64, 74, },
+ { 9, 1, 0, 2, 64, 62, },
{ 0, 1, 0, 2, 100, 70, },
{ 2, 1, 0, 2, 100, 62, },
+ { 1, 1, 0, 2, 100, 76, },
+ { 3, 1, 0, 2, 100, 70, },
+ { 4, 1, 0, 2, 100, 76, },
+ { 5, 1, 0, 2, 100, 62, },
+ { 6, 1, 0, 2, 100, 70, },
+ { 7, 1, 0, 2, 100, 54, },
+ { 8, 1, 0, 2, 100, 70, },
+ { 9, 1, 0, 2, 100, 127, },
{ 0, 1, 0, 2, 104, 76, },
{ 2, 1, 0, 2, 104, 62, },
+ { 1, 1, 0, 2, 104, 76, },
+ { 3, 1, 0, 2, 104, 76, },
+ { 4, 1, 0, 2, 104, 76, },
+ { 5, 1, 0, 2, 104, 62, },
+ { 6, 1, 0, 2, 104, 76, },
+ { 7, 1, 0, 2, 104, 54, },
+ { 8, 1, 0, 2, 104, 76, },
+ { 9, 1, 0, 2, 104, 127, },
{ 0, 1, 0, 2, 108, 76, },
{ 2, 1, 0, 2, 108, 62, },
+ { 1, 1, 0, 2, 108, 76, },
+ { 3, 1, 0, 2, 108, 76, },
+ { 4, 1, 0, 2, 108, 76, },
+ { 5, 1, 0, 2, 108, 62, },
+ { 6, 1, 0, 2, 108, 76, },
+ { 7, 1, 0, 2, 108, 54, },
+ { 8, 1, 0, 2, 108, 76, },
+ { 9, 1, 0, 2, 108, 127, },
{ 0, 1, 0, 2, 112, 76, },
{ 2, 1, 0, 2, 112, 62, },
+ { 1, 1, 0, 2, 112, 76, },
+ { 3, 1, 0, 2, 112, 76, },
+ { 4, 1, 0, 2, 112, 76, },
+ { 5, 1, 0, 2, 112, 62, },
+ { 6, 1, 0, 2, 112, 76, },
+ { 7, 1, 0, 2, 112, 54, },
+ { 8, 1, 0, 2, 112, 76, },
+ { 9, 1, 0, 2, 112, 127, },
{ 0, 1, 0, 2, 116, 76, },
{ 2, 1, 0, 2, 116, 62, },
+ { 1, 1, 0, 2, 116, 76, },
+ { 3, 1, 0, 2, 116, 76, },
+ { 4, 1, 0, 2, 116, 76, },
+ { 5, 1, 0, 2, 116, 62, },
+ { 6, 1, 0, 2, 116, 76, },
+ { 7, 1, 0, 2, 116, 54, },
+ { 8, 1, 0, 2, 116, 76, },
+ { 9, 1, 0, 2, 116, 127, },
{ 0, 1, 0, 2, 120, 76, },
{ 2, 1, 0, 2, 120, 62, },
+ { 1, 1, 0, 2, 120, 76, },
+ { 3, 1, 0, 2, 120, 127, },
+ { 4, 1, 0, 2, 120, 76, },
+ { 5, 1, 0, 2, 120, 127, },
+ { 6, 1, 0, 2, 120, 76, },
+ { 7, 1, 0, 2, 120, 54, },
+ { 8, 1, 0, 2, 120, 76, },
+ { 9, 1, 0, 2, 120, 127, },
{ 0, 1, 0, 2, 124, 76, },
{ 2, 1, 0, 2, 124, 62, },
+ { 1, 1, 0, 2, 124, 76, },
+ { 3, 1, 0, 2, 124, 127, },
+ { 4, 1, 0, 2, 124, 76, },
+ { 5, 1, 0, 2, 124, 127, },
+ { 6, 1, 0, 2, 124, 76, },
+ { 7, 1, 0, 2, 124, 54, },
+ { 8, 1, 0, 2, 124, 76, },
+ { 9, 1, 0, 2, 124, 127, },
{ 0, 1, 0, 2, 128, 76, },
{ 2, 1, 0, 2, 128, 62, },
+ { 1, 1, 0, 2, 128, 76, },
+ { 3, 1, 0, 2, 128, 127, },
+ { 4, 1, 0, 2, 128, 76, },
+ { 5, 1, 0, 2, 128, 127, },
+ { 6, 1, 0, 2, 128, 76, },
+ { 7, 1, 0, 2, 128, 54, },
+ { 8, 1, 0, 2, 128, 76, },
+ { 9, 1, 0, 2, 128, 127, },
{ 0, 1, 0, 2, 132, 76, },
{ 2, 1, 0, 2, 132, 62, },
+ { 1, 1, 0, 2, 132, 76, },
+ { 3, 1, 0, 2, 132, 76, },
+ { 4, 1, 0, 2, 132, 76, },
+ { 5, 1, 0, 2, 132, 62, },
+ { 6, 1, 0, 2, 132, 76, },
+ { 7, 1, 0, 2, 132, 54, },
+ { 8, 1, 0, 2, 132, 76, },
+ { 9, 1, 0, 2, 132, 127, },
{ 0, 1, 0, 2, 136, 76, },
{ 2, 1, 0, 2, 136, 62, },
+ { 1, 1, 0, 2, 136, 76, },
+ { 3, 1, 0, 2, 136, 76, },
+ { 4, 1, 0, 2, 136, 76, },
+ { 5, 1, 0, 2, 136, 62, },
+ { 6, 1, 0, 2, 136, 76, },
+ { 7, 1, 0, 2, 136, 54, },
+ { 8, 1, 0, 2, 136, 76, },
+ { 9, 1, 0, 2, 136, 127, },
{ 0, 1, 0, 2, 140, 70, },
{ 2, 1, 0, 2, 140, 62, },
+ { 1, 1, 0, 2, 140, 76, },
+ { 3, 1, 0, 2, 140, 70, },
+ { 4, 1, 0, 2, 140, 76, },
+ { 5, 1, 0, 2, 140, 62, },
+ { 6, 1, 0, 2, 140, 70, },
+ { 7, 1, 0, 2, 140, 54, },
+ { 8, 1, 0, 2, 140, 70, },
+ { 9, 1, 0, 2, 140, 127, },
{ 0, 1, 0, 2, 144, 76, },
{ 2, 1, 0, 2, 144, 127, },
+ { 1, 1, 0, 2, 144, 127, },
+ { 3, 1, 0, 2, 144, 76, },
+ { 4, 1, 0, 2, 144, 76, },
+ { 5, 1, 0, 2, 144, 127, },
+ { 6, 1, 0, 2, 144, 76, },
+ { 7, 1, 0, 2, 144, 127, },
+ { 8, 1, 0, 2, 144, 76, },
+ { 9, 1, 0, 2, 144, 127, },
{ 0, 1, 0, 2, 149, 76, },
{ 2, 1, 0, 2, 149, -128, },
+ { 1, 1, 0, 2, 149, 127, },
+ { 3, 1, 0, 2, 149, 76, },
+ { 4, 1, 0, 2, 149, 74, },
+ { 5, 1, 0, 2, 149, 76, },
+ { 6, 1, 0, 2, 149, 76, },
+ { 7, 1, 0, 2, 149, 54, },
+ { 8, 1, 0, 2, 149, 76, },
+ { 9, 1, 0, 2, 149, -128, },
{ 0, 1, 0, 2, 153, 76, },
{ 2, 1, 0, 2, 153, -128, },
+ { 1, 1, 0, 2, 153, 127, },
+ { 3, 1, 0, 2, 153, 76, },
+ { 4, 1, 0, 2, 153, 74, },
+ { 5, 1, 0, 2, 153, 76, },
+ { 6, 1, 0, 2, 153, 76, },
+ { 7, 1, 0, 2, 153, 54, },
+ { 8, 1, 0, 2, 153, 76, },
+ { 9, 1, 0, 2, 153, -128, },
{ 0, 1, 0, 2, 157, 76, },
{ 2, 1, 0, 2, 157, -128, },
+ { 1, 1, 0, 2, 157, 127, },
+ { 3, 1, 0, 2, 157, 76, },
+ { 4, 1, 0, 2, 157, 74, },
+ { 5, 1, 0, 2, 157, 76, },
+ { 6, 1, 0, 2, 157, 76, },
+ { 7, 1, 0, 2, 157, 54, },
+ { 8, 1, 0, 2, 157, 76, },
+ { 9, 1, 0, 2, 157, -128, },
{ 0, 1, 0, 2, 161, 76, },
{ 2, 1, 0, 2, 161, -128, },
+ { 1, 1, 0, 2, 161, 127, },
+ { 3, 1, 0, 2, 161, 76, },
+ { 4, 1, 0, 2, 161, 74, },
+ { 5, 1, 0, 2, 161, 76, },
+ { 6, 1, 0, 2, 161, 76, },
+ { 7, 1, 0, 2, 161, 54, },
+ { 8, 1, 0, 2, 161, 76, },
+ { 9, 1, 0, 2, 161, -128, },
{ 0, 1, 0, 2, 165, 76, },
{ 2, 1, 0, 2, 165, -128, },
+ { 1, 1, 0, 2, 165, 127, },
+ { 3, 1, 0, 2, 165, 76, },
+ { 4, 1, 0, 2, 165, 74, },
+ { 5, 1, 0, 2, 165, 76, },
+ { 6, 1, 0, 2, 165, 76, },
+ { 7, 1, 0, 2, 165, 54, },
+ { 8, 1, 0, 2, 165, 76, },
+ { 9, 1, 0, 2, 165, -128, },
{ 0, 1, 0, 3, 36, 68, },
{ 2, 1, 0, 3, 36, 38, },
+ { 1, 1, 0, 3, 36, 50, },
+ { 3, 1, 0, 3, 36, 38, },
+ { 4, 1, 0, 3, 36, 66, },
+ { 5, 1, 0, 3, 36, 38, },
+ { 6, 1, 0, 3, 36, 52, },
+ { 7, 1, 0, 3, 36, 30, },
+ { 8, 1, 0, 3, 36, 50, },
+ { 9, 1, 0, 3, 36, 38, },
{ 0, 1, 0, 3, 40, 68, },
{ 2, 1, 0, 3, 40, 38, },
+ { 1, 1, 0, 3, 40, 50, },
+ { 3, 1, 0, 3, 40, 38, },
+ { 4, 1, 0, 3, 40, 66, },
+ { 5, 1, 0, 3, 40, 38, },
+ { 6, 1, 0, 3, 40, 52, },
+ { 7, 1, 0, 3, 40, 30, },
+ { 8, 1, 0, 3, 40, 50, },
+ { 9, 1, 0, 3, 40, 38, },
{ 0, 1, 0, 3, 44, 68, },
{ 2, 1, 0, 3, 44, 38, },
+ { 1, 1, 0, 3, 44, 50, },
+ { 3, 1, 0, 3, 44, 38, },
+ { 4, 1, 0, 3, 44, 66, },
+ { 5, 1, 0, 3, 44, 38, },
+ { 6, 1, 0, 3, 44, 52, },
+ { 7, 1, 0, 3, 44, 30, },
+ { 8, 1, 0, 3, 44, 50, },
+ { 9, 1, 0, 3, 44, 38, },
{ 0, 1, 0, 3, 48, 68, },
{ 2, 1, 0, 3, 48, 38, },
+ { 1, 1, 0, 3, 48, 50, },
+ { 3, 1, 0, 3, 48, 38, },
+ { 4, 1, 0, 3, 48, 36, },
+ { 5, 1, 0, 3, 48, 38, },
+ { 6, 1, 0, 3, 48, 52, },
+ { 7, 1, 0, 3, 48, 30, },
+ { 8, 1, 0, 3, 48, 50, },
+ { 9, 1, 0, 3, 48, 38, },
{ 0, 1, 0, 3, 52, 68, },
{ 2, 1, 0, 3, 52, 38, },
+ { 1, 1, 0, 3, 52, 50, },
+ { 3, 1, 0, 3, 52, 40, },
+ { 4, 1, 0, 3, 52, 66, },
+ { 5, 1, 0, 3, 52, 38, },
+ { 6, 1, 0, 3, 52, 68, },
+ { 7, 1, 0, 3, 52, 30, },
+ { 8, 1, 0, 3, 52, 68, },
+ { 9, 1, 0, 3, 52, 38, },
{ 0, 1, 0, 3, 56, 68, },
{ 2, 1, 0, 3, 56, 38, },
+ { 1, 1, 0, 3, 56, 50, },
+ { 3, 1, 0, 3, 56, 40, },
+ { 4, 1, 0, 3, 56, 66, },
+ { 5, 1, 0, 3, 56, 38, },
+ { 6, 1, 0, 3, 56, 68, },
+ { 7, 1, 0, 3, 56, 30, },
+ { 8, 1, 0, 3, 56, 68, },
+ { 9, 1, 0, 3, 56, 38, },
{ 0, 1, 0, 3, 60, 66, },
{ 2, 1, 0, 3, 60, 38, },
+ { 1, 1, 0, 3, 60, 50, },
+ { 3, 1, 0, 3, 60, 40, },
+ { 4, 1, 0, 3, 60, 66, },
+ { 5, 1, 0, 3, 60, 38, },
+ { 6, 1, 0, 3, 60, 66, },
+ { 7, 1, 0, 3, 60, 30, },
+ { 8, 1, 0, 3, 60, 66, },
+ { 9, 1, 0, 3, 60, 38, },
{ 0, 1, 0, 3, 64, 68, },
{ 2, 1, 0, 3, 64, 38, },
+ { 1, 1, 0, 3, 64, 50, },
+ { 3, 1, 0, 3, 64, 40, },
+ { 4, 1, 0, 3, 64, 66, },
+ { 5, 1, 0, 3, 64, 38, },
+ { 6, 1, 0, 3, 64, 68, },
+ { 7, 1, 0, 3, 64, 30, },
+ { 8, 1, 0, 3, 64, 68, },
+ { 9, 1, 0, 3, 64, 38, },
{ 0, 1, 0, 3, 100, 60, },
{ 2, 1, 0, 3, 100, 38, },
+ { 1, 1, 0, 3, 100, 70, },
+ { 3, 1, 0, 3, 100, 60, },
+ { 4, 1, 0, 3, 100, 64, },
+ { 5, 1, 0, 3, 100, 38, },
+ { 6, 1, 0, 3, 100, 60, },
+ { 7, 1, 0, 3, 100, 30, },
+ { 8, 1, 0, 3, 100, 60, },
+ { 9, 1, 0, 3, 100, 127, },
{ 0, 1, 0, 3, 104, 68, },
{ 2, 1, 0, 3, 104, 38, },
+ { 1, 1, 0, 3, 104, 70, },
+ { 3, 1, 0, 3, 104, 68, },
+ { 4, 1, 0, 3, 104, 64, },
+ { 5, 1, 0, 3, 104, 38, },
+ { 6, 1, 0, 3, 104, 68, },
+ { 7, 1, 0, 3, 104, 30, },
+ { 8, 1, 0, 3, 104, 68, },
+ { 9, 1, 0, 3, 104, 127, },
{ 0, 1, 0, 3, 108, 68, },
{ 2, 1, 0, 3, 108, 38, },
+ { 1, 1, 0, 3, 108, 70, },
+ { 3, 1, 0, 3, 108, 68, },
+ { 4, 1, 0, 3, 108, 64, },
+ { 5, 1, 0, 3, 108, 38, },
+ { 6, 1, 0, 3, 108, 68, },
+ { 7, 1, 0, 3, 108, 30, },
+ { 8, 1, 0, 3, 108, 68, },
+ { 9, 1, 0, 3, 108, 127, },
{ 0, 1, 0, 3, 112, 68, },
{ 2, 1, 0, 3, 112, 38, },
+ { 1, 1, 0, 3, 112, 70, },
+ { 3, 1, 0, 3, 112, 68, },
+ { 4, 1, 0, 3, 112, 64, },
+ { 5, 1, 0, 3, 112, 38, },
+ { 6, 1, 0, 3, 112, 68, },
+ { 7, 1, 0, 3, 112, 30, },
+ { 8, 1, 0, 3, 112, 68, },
+ { 9, 1, 0, 3, 112, 127, },
{ 0, 1, 0, 3, 116, 68, },
{ 2, 1, 0, 3, 116, 38, },
+ { 1, 1, 0, 3, 116, 70, },
+ { 3, 1, 0, 3, 116, 68, },
+ { 4, 1, 0, 3, 116, 64, },
+ { 5, 1, 0, 3, 116, 38, },
+ { 6, 1, 0, 3, 116, 68, },
+ { 7, 1, 0, 3, 116, 30, },
+ { 8, 1, 0, 3, 116, 68, },
+ { 9, 1, 0, 3, 116, 127, },
{ 0, 1, 0, 3, 120, 68, },
{ 2, 1, 0, 3, 120, 38, },
+ { 1, 1, 0, 3, 120, 70, },
+ { 3, 1, 0, 3, 120, 127, },
+ { 4, 1, 0, 3, 120, 64, },
+ { 5, 1, 0, 3, 120, 127, },
+ { 6, 1, 0, 3, 120, 68, },
+ { 7, 1, 0, 3, 120, 30, },
+ { 8, 1, 0, 3, 120, 68, },
+ { 9, 1, 0, 3, 120, 127, },
{ 0, 1, 0, 3, 124, 68, },
{ 2, 1, 0, 3, 124, 38, },
+ { 1, 1, 0, 3, 124, 70, },
+ { 3, 1, 0, 3, 124, 127, },
+ { 4, 1, 0, 3, 124, 64, },
+ { 5, 1, 0, 3, 124, 127, },
+ { 6, 1, 0, 3, 124, 68, },
+ { 7, 1, 0, 3, 124, 30, },
+ { 8, 1, 0, 3, 124, 68, },
+ { 9, 1, 0, 3, 124, 127, },
{ 0, 1, 0, 3, 128, 68, },
{ 2, 1, 0, 3, 128, 38, },
+ { 1, 1, 0, 3, 128, 70, },
+ { 3, 1, 0, 3, 128, 127, },
+ { 4, 1, 0, 3, 128, 64, },
+ { 5, 1, 0, 3, 128, 127, },
+ { 6, 1, 0, 3, 128, 68, },
+ { 7, 1, 0, 3, 128, 30, },
+ { 8, 1, 0, 3, 128, 68, },
+ { 9, 1, 0, 3, 128, 127, },
{ 0, 1, 0, 3, 132, 68, },
{ 2, 1, 0, 3, 132, 38, },
+ { 1, 1, 0, 3, 132, 70, },
+ { 3, 1, 0, 3, 132, 68, },
+ { 4, 1, 0, 3, 132, 64, },
+ { 5, 1, 0, 3, 132, 38, },
+ { 6, 1, 0, 3, 132, 68, },
+ { 7, 1, 0, 3, 132, 30, },
+ { 8, 1, 0, 3, 132, 68, },
+ { 9, 1, 0, 3, 132, 127, },
{ 0, 1, 0, 3, 136, 68, },
{ 2, 1, 0, 3, 136, 38, },
+ { 1, 1, 0, 3, 136, 70, },
+ { 3, 1, 0, 3, 136, 68, },
+ { 4, 1, 0, 3, 136, 64, },
+ { 5, 1, 0, 3, 136, 38, },
+ { 6, 1, 0, 3, 136, 68, },
+ { 7, 1, 0, 3, 136, 30, },
+ { 8, 1, 0, 3, 136, 68, },
+ { 9, 1, 0, 3, 136, 127, },
{ 0, 1, 0, 3, 140, 60, },
{ 2, 1, 0, 3, 140, 38, },
+ { 1, 1, 0, 3, 140, 70, },
+ { 3, 1, 0, 3, 140, 60, },
+ { 4, 1, 0, 3, 140, 64, },
+ { 5, 1, 0, 3, 140, 38, },
+ { 6, 1, 0, 3, 140, 60, },
+ { 7, 1, 0, 3, 140, 30, },
+ { 8, 1, 0, 3, 140, 60, },
+ { 9, 1, 0, 3, 140, 127, },
{ 0, 1, 0, 3, 144, 68, },
{ 2, 1, 0, 3, 144, 127, },
+ { 1, 1, 0, 3, 144, 127, },
+ { 3, 1, 0, 3, 144, 68, },
+ { 4, 1, 0, 3, 144, 64, },
+ { 5, 1, 0, 3, 144, 127, },
+ { 6, 1, 0, 3, 144, 68, },
+ { 7, 1, 0, 3, 144, 127, },
+ { 8, 1, 0, 3, 144, 68, },
+ { 9, 1, 0, 3, 144, 127, },
{ 0, 1, 0, 3, 149, 76, },
{ 2, 1, 0, 3, 149, -128, },
+ { 1, 1, 0, 3, 149, 127, },
+ { 3, 1, 0, 3, 149, 76, },
+ { 4, 1, 0, 3, 149, 60, },
+ { 5, 1, 0, 3, 149, 76, },
+ { 6, 1, 0, 3, 149, 76, },
+ { 7, 1, 0, 3, 149, 30, },
+ { 8, 1, 0, 3, 149, 72, },
+ { 9, 1, 0, 3, 149, -128, },
{ 0, 1, 0, 3, 153, 76, },
{ 2, 1, 0, 3, 153, -128, },
+ { 1, 1, 0, 3, 153, 127, },
+ { 3, 1, 0, 3, 153, 76, },
+ { 4, 1, 0, 3, 153, 60, },
+ { 5, 1, 0, 3, 153, 76, },
+ { 6, 1, 0, 3, 153, 76, },
+ { 7, 1, 0, 3, 153, 30, },
+ { 8, 1, 0, 3, 153, 76, },
+ { 9, 1, 0, 3, 153, -128, },
{ 0, 1, 0, 3, 157, 76, },
{ 2, 1, 0, 3, 157, -128, },
+ { 1, 1, 0, 3, 157, 127, },
+ { 3, 1, 0, 3, 157, 76, },
+ { 4, 1, 0, 3, 157, 60, },
+ { 5, 1, 0, 3, 157, 76, },
+ { 6, 1, 0, 3, 157, 76, },
+ { 7, 1, 0, 3, 157, 30, },
+ { 8, 1, 0, 3, 157, 76, },
+ { 9, 1, 0, 3, 157, -128, },
{ 0, 1, 0, 3, 161, 76, },
{ 2, 1, 0, 3, 161, -128, },
+ { 1, 1, 0, 3, 161, 127, },
+ { 3, 1, 0, 3, 161, 76, },
+ { 4, 1, 0, 3, 161, 60, },
+ { 5, 1, 0, 3, 161, 76, },
+ { 6, 1, 0, 3, 161, 76, },
+ { 7, 1, 0, 3, 161, 30, },
+ { 8, 1, 0, 3, 161, 76, },
+ { 9, 1, 0, 3, 161, -128, },
{ 0, 1, 0, 3, 165, 76, },
{ 2, 1, 0, 3, 165, -128, },
+ { 1, 1, 0, 3, 165, 127, },
+ { 3, 1, 0, 3, 165, 76, },
+ { 4, 1, 0, 3, 165, 60, },
+ { 5, 1, 0, 3, 165, 76, },
+ { 6, 1, 0, 3, 165, 76, },
+ { 7, 1, 0, 3, 165, 30, },
+ { 8, 1, 0, 3, 165, 76, },
+ { 9, 1, 0, 3, 165, -128, },
{ 0, 1, 1, 2, 38, 66, },
{ 2, 1, 1, 2, 38, 64, },
+ { 1, 1, 1, 2, 38, 62, },
+ { 3, 1, 1, 2, 38, 64, },
+ { 4, 1, 1, 2, 38, 72, },
+ { 5, 1, 1, 2, 38, 64, },
+ { 6, 1, 1, 2, 38, 64, },
+ { 7, 1, 1, 2, 38, 54, },
+ { 8, 1, 1, 2, 38, 62, },
+ { 9, 1, 1, 2, 38, 64, },
{ 0, 1, 1, 2, 46, 72, },
{ 2, 1, 1, 2, 46, 64, },
+ { 1, 1, 1, 2, 46, 62, },
+ { 3, 1, 1, 2, 46, 64, },
+ { 4, 1, 1, 2, 46, 60, },
+ { 5, 1, 1, 2, 46, 64, },
+ { 6, 1, 1, 2, 46, 64, },
+ { 7, 1, 1, 2, 46, 54, },
+ { 8, 1, 1, 2, 46, 62, },
+ { 9, 1, 1, 2, 46, 64, },
{ 0, 1, 1, 2, 54, 72, },
{ 2, 1, 1, 2, 54, 64, },
+ { 1, 1, 1, 2, 54, 62, },
+ { 3, 1, 1, 2, 54, 64, },
+ { 4, 1, 1, 2, 54, 72, },
+ { 5, 1, 1, 2, 54, 64, },
+ { 6, 1, 1, 2, 54, 72, },
+ { 7, 1, 1, 2, 54, 54, },
+ { 8, 1, 1, 2, 54, 72, },
+ { 9, 1, 1, 2, 54, 64, },
{ 0, 1, 1, 2, 62, 64, },
{ 2, 1, 1, 2, 62, 64, },
+ { 1, 1, 1, 2, 62, 62, },
+ { 3, 1, 1, 2, 62, 64, },
+ { 4, 1, 1, 2, 62, 70, },
+ { 5, 1, 1, 2, 62, 64, },
+ { 6, 1, 1, 2, 62, 64, },
+ { 7, 1, 1, 2, 62, 54, },
+ { 8, 1, 1, 2, 62, 64, },
+ { 9, 1, 1, 2, 62, 64, },
{ 0, 1, 1, 2, 102, 58, },
{ 2, 1, 1, 2, 102, 64, },
+ { 1, 1, 1, 2, 102, 72, },
+ { 3, 1, 1, 2, 102, 58, },
+ { 4, 1, 1, 2, 102, 72, },
+ { 5, 1, 1, 2, 102, 64, },
+ { 6, 1, 1, 2, 102, 58, },
+ { 7, 1, 1, 2, 102, 54, },
+ { 8, 1, 1, 2, 102, 58, },
+ { 9, 1, 1, 2, 102, 127, },
{ 0, 1, 1, 2, 110, 72, },
{ 2, 1, 1, 2, 110, 64, },
+ { 1, 1, 1, 2, 110, 72, },
+ { 3, 1, 1, 2, 110, 72, },
+ { 4, 1, 1, 2, 110, 72, },
+ { 5, 1, 1, 2, 110, 64, },
+ { 6, 1, 1, 2, 110, 72, },
+ { 7, 1, 1, 2, 110, 54, },
+ { 8, 1, 1, 2, 110, 72, },
+ { 9, 1, 1, 2, 110, 127, },
{ 0, 1, 1, 2, 118, 72, },
{ 2, 1, 1, 2, 118, 64, },
+ { 1, 1, 1, 2, 118, 72, },
+ { 3, 1, 1, 2, 118, 127, },
+ { 4, 1, 1, 2, 118, 72, },
+ { 5, 1, 1, 2, 118, 127, },
+ { 6, 1, 1, 2, 118, 72, },
+ { 7, 1, 1, 2, 118, 54, },
+ { 8, 1, 1, 2, 118, 72, },
+ { 9, 1, 1, 2, 118, 127, },
{ 0, 1, 1, 2, 126, 72, },
{ 2, 1, 1, 2, 126, 64, },
+ { 1, 1, 1, 2, 126, 72, },
+ { 3, 1, 1, 2, 126, 127, },
+ { 4, 1, 1, 2, 126, 72, },
+ { 5, 1, 1, 2, 126, 127, },
+ { 6, 1, 1, 2, 126, 72, },
+ { 7, 1, 1, 2, 126, 54, },
+ { 8, 1, 1, 2, 126, 72, },
+ { 9, 1, 1, 2, 126, 127, },
{ 0, 1, 1, 2, 134, 72, },
{ 2, 1, 1, 2, 134, 64, },
+ { 1, 1, 1, 2, 134, 72, },
+ { 3, 1, 1, 2, 134, 72, },
+ { 4, 1, 1, 2, 134, 72, },
+ { 5, 1, 1, 2, 134, 64, },
+ { 6, 1, 1, 2, 134, 72, },
+ { 7, 1, 1, 2, 134, 54, },
+ { 8, 1, 1, 2, 134, 72, },
+ { 9, 1, 1, 2, 134, 127, },
{ 0, 1, 1, 2, 142, 72, },
{ 2, 1, 1, 2, 142, 127, },
+ { 1, 1, 1, 2, 142, 127, },
+ { 3, 1, 1, 2, 142, 72, },
+ { 4, 1, 1, 2, 142, 72, },
+ { 5, 1, 1, 2, 142, 127, },
+ { 6, 1, 1, 2, 142, 72, },
+ { 7, 1, 1, 2, 142, 127, },
+ { 8, 1, 1, 2, 142, 72, },
+ { 9, 1, 1, 2, 142, 127, },
{ 0, 1, 1, 2, 151, 72, },
{ 2, 1, 1, 2, 151, -128, },
+ { 1, 1, 1, 2, 151, 127, },
+ { 3, 1, 1, 2, 151, 72, },
+ { 4, 1, 1, 2, 151, 72, },
+ { 5, 1, 1, 2, 151, 72, },
+ { 6, 1, 1, 2, 151, 72, },
+ { 7, 1, 1, 2, 151, 54, },
+ { 8, 1, 1, 2, 151, 72, },
+ { 9, 1, 1, 2, 151, -128, },
{ 0, 1, 1, 2, 159, 72, },
{ 2, 1, 1, 2, 159, -128, },
+ { 1, 1, 1, 2, 159, 127, },
+ { 3, 1, 1, 2, 159, 72, },
+ { 4, 1, 1, 2, 159, 72, },
+ { 5, 1, 1, 2, 159, 72, },
+ { 6, 1, 1, 2, 159, 72, },
+ { 7, 1, 1, 2, 159, 54, },
+ { 8, 1, 1, 2, 159, 72, },
+ { 9, 1, 1, 2, 159, -128, },
{ 0, 1, 1, 3, 38, 60, },
{ 2, 1, 1, 3, 38, 40, },
+ { 1, 1, 1, 3, 38, 50, },
+ { 3, 1, 1, 3, 38, 40, },
+ { 4, 1, 1, 3, 38, 62, },
+ { 5, 1, 1, 3, 38, 40, },
+ { 6, 1, 1, 3, 38, 52, },
+ { 7, 1, 1, 3, 38, 30, },
+ { 8, 1, 1, 3, 38, 50, },
+ { 9, 1, 1, 3, 38, 40, },
{ 0, 1, 1, 3, 46, 68, },
{ 2, 1, 1, 3, 46, 40, },
+ { 1, 1, 1, 3, 46, 50, },
+ { 3, 1, 1, 3, 46, 40, },
+ { 4, 1, 1, 3, 46, 46, },
+ { 5, 1, 1, 3, 46, 40, },
+ { 6, 1, 1, 3, 46, 52, },
+ { 7, 1, 1, 3, 46, 30, },
+ { 8, 1, 1, 3, 46, 50, },
+ { 9, 1, 1, 3, 46, 40, },
{ 0, 1, 1, 3, 54, 68, },
{ 2, 1, 1, 3, 54, 40, },
+ { 1, 1, 1, 3, 54, 50, },
+ { 3, 1, 1, 3, 54, 40, },
+ { 4, 1, 1, 3, 54, 62, },
+ { 5, 1, 1, 3, 54, 40, },
+ { 6, 1, 1, 3, 54, 68, },
+ { 7, 1, 1, 3, 54, 30, },
+ { 8, 1, 1, 3, 54, 68, },
+ { 9, 1, 1, 3, 54, 40, },
{ 0, 1, 1, 3, 62, 58, },
{ 2, 1, 1, 3, 62, 40, },
+ { 1, 1, 1, 3, 62, 48, },
+ { 3, 1, 1, 3, 62, 40, },
+ { 4, 1, 1, 3, 62, 58, },
+ { 5, 1, 1, 3, 62, 40, },
+ { 6, 1, 1, 3, 62, 58, },
+ { 7, 1, 1, 3, 62, 30, },
+ { 8, 1, 1, 3, 62, 58, },
+ { 9, 1, 1, 3, 62, 40, },
{ 0, 1, 1, 3, 102, 54, },
{ 2, 1, 1, 3, 102, 40, },
+ { 1, 1, 1, 3, 102, 70, },
+ { 3, 1, 1, 3, 102, 54, },
+ { 4, 1, 1, 3, 102, 64, },
+ { 5, 1, 1, 3, 102, 40, },
+ { 6, 1, 1, 3, 102, 54, },
+ { 7, 1, 1, 3, 102, 30, },
+ { 8, 1, 1, 3, 102, 54, },
+ { 9, 1, 1, 3, 102, 127, },
{ 0, 1, 1, 3, 110, 68, },
{ 2, 1, 1, 3, 110, 40, },
+ { 1, 1, 1, 3, 110, 70, },
+ { 3, 1, 1, 3, 110, 68, },
+ { 4, 1, 1, 3, 110, 64, },
+ { 5, 1, 1, 3, 110, 40, },
+ { 6, 1, 1, 3, 110, 68, },
+ { 7, 1, 1, 3, 110, 30, },
+ { 8, 1, 1, 3, 110, 68, },
+ { 9, 1, 1, 3, 110, 127, },
{ 0, 1, 1, 3, 118, 68, },
{ 2, 1, 1, 3, 118, 40, },
+ { 1, 1, 1, 3, 118, 70, },
+ { 3, 1, 1, 3, 118, 127, },
+ { 4, 1, 1, 3, 118, 64, },
+ { 5, 1, 1, 3, 118, 127, },
+ { 6, 1, 1, 3, 118, 68, },
+ { 7, 1, 1, 3, 118, 30, },
+ { 8, 1, 1, 3, 118, 68, },
+ { 9, 1, 1, 3, 118, 127, },
{ 0, 1, 1, 3, 126, 68, },
{ 2, 1, 1, 3, 126, 40, },
+ { 1, 1, 1, 3, 126, 70, },
+ { 3, 1, 1, 3, 126, 127, },
+ { 4, 1, 1, 3, 126, 64, },
+ { 5, 1, 1, 3, 126, 127, },
+ { 6, 1, 1, 3, 126, 68, },
+ { 7, 1, 1, 3, 126, 30, },
+ { 8, 1, 1, 3, 126, 68, },
+ { 9, 1, 1, 3, 126, 127, },
{ 0, 1, 1, 3, 134, 68, },
{ 2, 1, 1, 3, 134, 40, },
+ { 1, 1, 1, 3, 134, 70, },
+ { 3, 1, 1, 3, 134, 68, },
+ { 4, 1, 1, 3, 134, 64, },
+ { 5, 1, 1, 3, 134, 40, },
+ { 6, 1, 1, 3, 134, 68, },
+ { 7, 1, 1, 3, 134, 30, },
+ { 8, 1, 1, 3, 134, 68, },
+ { 9, 1, 1, 3, 134, 127, },
{ 0, 1, 1, 3, 142, 68, },
{ 2, 1, 1, 3, 142, 127, },
+ { 1, 1, 1, 3, 142, 127, },
+ { 3, 1, 1, 3, 142, 68, },
+ { 4, 1, 1, 3, 142, 64, },
+ { 5, 1, 1, 3, 142, 127, },
+ { 6, 1, 1, 3, 142, 68, },
+ { 7, 1, 1, 3, 142, 127, },
+ { 8, 1, 1, 3, 142, 68, },
+ { 9, 1, 1, 3, 142, 127, },
{ 0, 1, 1, 3, 151, 72, },
{ 2, 1, 1, 3, 151, -128, },
+ { 1, 1, 1, 3, 151, 127, },
+ { 3, 1, 1, 3, 151, 72, },
+ { 4, 1, 1, 3, 151, 66, },
+ { 5, 1, 1, 3, 151, 72, },
+ { 6, 1, 1, 3, 151, 72, },
+ { 7, 1, 1, 3, 151, 30, },
+ { 8, 1, 1, 3, 151, 68, },
+ { 9, 1, 1, 3, 151, -128, },
{ 0, 1, 1, 3, 159, 72, },
{ 2, 1, 1, 3, 159, -128, },
+ { 1, 1, 1, 3, 159, 127, },
+ { 3, 1, 1, 3, 159, 72, },
+ { 4, 1, 1, 3, 159, 66, },
+ { 5, 1, 1, 3, 159, 72, },
+ { 6, 1, 1, 3, 159, 72, },
+ { 7, 1, 1, 3, 159, 30, },
+ { 8, 1, 1, 3, 159, 72, },
+ { 9, 1, 1, 3, 159, -128, },
{ 0, 1, 2, 4, 42, 64, },
{ 2, 1, 2, 4, 42, 64, },
+ { 1, 1, 2, 4, 42, 64, },
+ { 3, 1, 2, 4, 42, 64, },
+ { 4, 1, 2, 4, 42, 68, },
+ { 5, 1, 2, 4, 42, 64, },
+ { 6, 1, 2, 4, 42, 64, },
+ { 7, 1, 2, 4, 42, 54, },
+ { 8, 1, 2, 4, 42, 62, },
+ { 9, 1, 2, 4, 42, 64, },
{ 0, 1, 2, 4, 58, 62, },
{ 2, 1, 2, 4, 58, 64, },
+ { 1, 1, 2, 4, 58, 64, },
+ { 3, 1, 2, 4, 58, 62, },
+ { 4, 1, 2, 4, 58, 64, },
+ { 5, 1, 2, 4, 58, 64, },
+ { 6, 1, 2, 4, 58, 62, },
+ { 7, 1, 2, 4, 58, 54, },
+ { 8, 1, 2, 4, 58, 62, },
+ { 9, 1, 2, 4, 58, 64, },
{ 0, 1, 2, 4, 106, 58, },
{ 2, 1, 2, 4, 106, 64, },
+ { 1, 1, 2, 4, 106, 72, },
+ { 3, 1, 2, 4, 106, 58, },
+ { 4, 1, 2, 4, 106, 66, },
+ { 5, 1, 2, 4, 106, 64, },
+ { 6, 1, 2, 4, 106, 58, },
+ { 7, 1, 2, 4, 106, 54, },
+ { 8, 1, 2, 4, 106, 58, },
+ { 9, 1, 2, 4, 106, 127, },
{ 0, 1, 2, 4, 122, 72, },
{ 2, 1, 2, 4, 122, 64, },
+ { 1, 1, 2, 4, 122, 72, },
+ { 3, 1, 2, 4, 122, 127, },
+ { 4, 1, 2, 4, 122, 68, },
+ { 5, 1, 2, 4, 122, 127, },
+ { 6, 1, 2, 4, 122, 72, },
+ { 7, 1, 2, 4, 122, 54, },
+ { 8, 1, 2, 4, 122, 72, },
+ { 9, 1, 2, 4, 122, 127, },
{ 0, 1, 2, 4, 138, 72, },
{ 2, 1, 2, 4, 138, 127, },
+ { 1, 1, 2, 4, 138, 127, },
+ { 3, 1, 2, 4, 138, 72, },
+ { 4, 1, 2, 4, 138, 68, },
+ { 5, 1, 2, 4, 138, 127, },
+ { 6, 1, 2, 4, 138, 72, },
+ { 7, 1, 2, 4, 138, 127, },
+ { 8, 1, 2, 4, 138, 72, },
+ { 9, 1, 2, 4, 138, 127, },
{ 0, 1, 2, 4, 155, 72, },
{ 2, 1, 2, 4, 155, -128, },
+ { 1, 1, 2, 4, 155, 127, },
+ { 3, 1, 2, 4, 155, 72, },
+ { 4, 1, 2, 4, 155, 68, },
+ { 5, 1, 2, 4, 155, 72, },
+ { 6, 1, 2, 4, 155, 72, },
+ { 7, 1, 2, 4, 155, 54, },
+ { 8, 1, 2, 4, 155, 68, },
+ { 9, 1, 2, 4, 155, -128, },
{ 0, 1, 2, 5, 42, 54, },
{ 2, 1, 2, 5, 42, 40, },
+ { 1, 1, 2, 5, 42, 50, },
+ { 3, 1, 2, 5, 42, 40, },
+ { 4, 1, 2, 5, 42, 58, },
+ { 5, 1, 2, 5, 42, 40, },
+ { 6, 1, 2, 5, 42, 52, },
+ { 7, 1, 2, 5, 42, 30, },
+ { 8, 1, 2, 5, 42, 50, },
+ { 9, 1, 2, 5, 42, 40, },
{ 0, 1, 2, 5, 58, 52, },
{ 2, 1, 2, 5, 58, 40, },
+ { 1, 1, 2, 5, 58, 50, },
+ { 3, 1, 2, 5, 58, 40, },
+ { 4, 1, 2, 5, 58, 56, },
+ { 5, 1, 2, 5, 58, 40, },
+ { 6, 1, 2, 5, 58, 52, },
+ { 7, 1, 2, 5, 58, 30, },
+ { 8, 1, 2, 5, 58, 52, },
+ { 9, 1, 2, 5, 58, 40, },
{ 0, 1, 2, 5, 106, 50, },
{ 2, 1, 2, 5, 106, 40, },
+ { 1, 1, 2, 5, 106, 72, },
+ { 3, 1, 2, 5, 106, 50, },
+ { 4, 1, 2, 5, 106, 56, },
+ { 5, 1, 2, 5, 106, 40, },
+ { 6, 1, 2, 5, 106, 50, },
+ { 7, 1, 2, 5, 106, 30, },
+ { 8, 1, 2, 5, 106, 50, },
+ { 9, 1, 2, 5, 106, 127, },
{ 0, 1, 2, 5, 122, 66, },
{ 2, 1, 2, 5, 122, 40, },
+ { 1, 1, 2, 5, 122, 72, },
+ { 3, 1, 2, 5, 122, 127, },
+ { 4, 1, 2, 5, 122, 56, },
+ { 5, 1, 2, 5, 122, 127, },
+ { 6, 1, 2, 5, 122, 66, },
+ { 7, 1, 2, 5, 122, 30, },
+ { 8, 1, 2, 5, 122, 66, },
+ { 9, 1, 2, 5, 122, 127, },
{ 0, 1, 2, 5, 138, 66, },
{ 2, 1, 2, 5, 138, 127, },
+ { 1, 1, 2, 5, 138, 127, },
+ { 3, 1, 2, 5, 138, 66, },
+ { 4, 1, 2, 5, 138, 58, },
+ { 5, 1, 2, 5, 138, 127, },
+ { 6, 1, 2, 5, 138, 66, },
+ { 7, 1, 2, 5, 138, 127, },
+ { 8, 1, 2, 5, 138, 66, },
+ { 9, 1, 2, 5, 138, 127, },
{ 0, 1, 2, 5, 155, 62, },
{ 2, 1, 2, 5, 155, -128, },
+ { 1, 1, 2, 5, 155, 127, },
+ { 3, 1, 2, 5, 155, 62, },
+ { 4, 1, 2, 5, 155, 58, },
+ { 5, 1, 2, 5, 155, 72, },
+ { 6, 1, 2, 5, 155, 62, },
+ { 7, 1, 2, 5, 155, 30, },
+ { 8, 1, 2, 5, 155, 62, },
+ { 9, 1, 2, 5, 155, -128, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
+ { 0, 0, 0, 0, 1, 72, },
+ { 2, 0, 0, 0, 1, 60, },
{ 1, 0, 0, 0, 1, 68, },
{ 3, 0, 0, 0, 1, 72, },
{ 4, 0, 0, 0, 1, 76, },
@@ -12112,6 +25743,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 1, 72, },
{ 7, 0, 0, 0, 1, 60, },
{ 8, 0, 0, 0, 1, 72, },
+ { 9, 0, 0, 0, 1, 60, },
+ { 0, 0, 0, 0, 2, 72, },
+ { 2, 0, 0, 0, 2, 60, },
{ 1, 0, 0, 0, 2, 68, },
{ 3, 0, 0, 0, 2, 72, },
{ 4, 0, 0, 0, 2, 76, },
@@ -12119,6 +25753,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 2, 72, },
{ 7, 0, 0, 0, 2, 60, },
{ 8, 0, 0, 0, 2, 72, },
+ { 9, 0, 0, 0, 2, 60, },
+ { 0, 0, 0, 0, 3, 76, },
+ { 2, 0, 0, 0, 3, 60, },
{ 1, 0, 0, 0, 3, 68, },
{ 3, 0, 0, 0, 3, 76, },
{ 4, 0, 0, 0, 3, 76, },
@@ -12126,6 +25763,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 3, 76, },
{ 7, 0, 0, 0, 3, 60, },
{ 8, 0, 0, 0, 3, 76, },
+ { 9, 0, 0, 0, 3, 60, },
+ { 0, 0, 0, 0, 4, 76, },
+ { 2, 0, 0, 0, 4, 60, },
{ 1, 0, 0, 0, 4, 68, },
{ 3, 0, 0, 0, 4, 76, },
{ 4, 0, 0, 0, 4, 76, },
@@ -12133,6 +25773,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 4, 76, },
{ 7, 0, 0, 0, 4, 60, },
{ 8, 0, 0, 0, 4, 76, },
+ { 9, 0, 0, 0, 4, 60, },
+ { 0, 0, 0, 0, 5, 76, },
+ { 2, 0, 0, 0, 5, 60, },
{ 1, 0, 0, 0, 5, 68, },
{ 3, 0, 0, 0, 5, 76, },
{ 4, 0, 0, 0, 5, 76, },
@@ -12140,6 +25783,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 5, 76, },
{ 7, 0, 0, 0, 5, 60, },
{ 8, 0, 0, 0, 5, 76, },
+ { 9, 0, 0, 0, 5, 60, },
+ { 0, 0, 0, 0, 6, 76, },
+ { 2, 0, 0, 0, 6, 60, },
{ 1, 0, 0, 0, 6, 68, },
{ 3, 0, 0, 0, 6, 76, },
{ 4, 0, 0, 0, 6, 76, },
@@ -12147,6 +25793,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 6, 76, },
{ 7, 0, 0, 0, 6, 60, },
{ 8, 0, 0, 0, 6, 76, },
+ { 9, 0, 0, 0, 6, 60, },
+ { 0, 0, 0, 0, 7, 76, },
+ { 2, 0, 0, 0, 7, 60, },
{ 1, 0, 0, 0, 7, 68, },
{ 3, 0, 0, 0, 7, 76, },
{ 4, 0, 0, 0, 7, 76, },
@@ -12154,6 +25803,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 7, 76, },
{ 7, 0, 0, 0, 7, 60, },
{ 8, 0, 0, 0, 7, 76, },
+ { 9, 0, 0, 0, 7, 60, },
+ { 0, 0, 0, 0, 8, 76, },
+ { 2, 0, 0, 0, 8, 60, },
{ 1, 0, 0, 0, 8, 68, },
{ 3, 0, 0, 0, 8, 76, },
{ 4, 0, 0, 0, 8, 76, },
@@ -12161,6 +25813,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 8, 76, },
{ 7, 0, 0, 0, 8, 60, },
{ 8, 0, 0, 0, 8, 76, },
+ { 9, 0, 0, 0, 8, 60, },
+ { 0, 0, 0, 0, 9, 76, },
+ { 2, 0, 0, 0, 9, 60, },
{ 1, 0, 0, 0, 9, 68, },
{ 3, 0, 0, 0, 9, 76, },
{ 4, 0, 0, 0, 9, 76, },
@@ -12168,6 +25823,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 9, 76, },
{ 7, 0, 0, 0, 9, 60, },
{ 8, 0, 0, 0, 9, 76, },
+ { 9, 0, 0, 0, 9, 60, },
+ { 0, 0, 0, 0, 10, 72, },
+ { 2, 0, 0, 0, 10, 60, },
{ 1, 0, 0, 0, 10, 68, },
{ 3, 0, 0, 0, 10, 72, },
{ 4, 0, 0, 0, 10, 76, },
@@ -12175,6 +25833,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 10, 72, },
{ 7, 0, 0, 0, 10, 60, },
{ 8, 0, 0, 0, 10, 72, },
+ { 9, 0, 0, 0, 10, 60, },
+ { 0, 0, 0, 0, 11, 72, },
+ { 2, 0, 0, 0, 11, 60, },
{ 1, 0, 0, 0, 11, 68, },
{ 3, 0, 0, 0, 11, 72, },
{ 4, 0, 0, 0, 11, 76, },
@@ -12182,6 +25843,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 11, 72, },
{ 7, 0, 0, 0, 11, 60, },
{ 8, 0, 0, 0, 11, 72, },
+ { 9, 0, 0, 0, 11, 60, },
+ { 0, 0, 0, 0, 12, 52, },
+ { 2, 0, 0, 0, 12, 60, },
{ 1, 0, 0, 0, 12, 68, },
{ 3, 0, 0, 0, 12, 52, },
{ 4, 0, 0, 0, 12, 76, },
@@ -12189,6 +25853,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 12, 52, },
{ 7, 0, 0, 0, 12, 60, },
{ 8, 0, 0, 0, 12, 52, },
+ { 9, 0, 0, 0, 12, 60, },
+ { 0, 0, 0, 0, 13, 48, },
+ { 2, 0, 0, 0, 13, 60, },
{ 1, 0, 0, 0, 13, 68, },
{ 3, 0, 0, 0, 13, 48, },
{ 4, 0, 0, 0, 13, 76, },
@@ -12196,6 +25863,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 13, 48, },
{ 7, 0, 0, 0, 13, 60, },
{ 8, 0, 0, 0, 13, 48, },
+ { 9, 0, 0, 0, 13, 60, },
+ { 0, 0, 0, 0, 14, 127, },
+ { 2, 0, 0, 0, 14, 127, },
{ 1, 0, 0, 0, 14, 68, },
{ 3, 0, 0, 0, 14, 127, },
{ 4, 0, 0, 0, 14, 127, },
@@ -12203,6 +25873,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 0, 14, 127, },
{ 7, 0, 0, 0, 14, 127, },
{ 8, 0, 0, 0, 14, 127, },
+ { 9, 0, 0, 0, 14, 127, },
+ { 0, 0, 0, 1, 1, 52, },
+ { 2, 0, 0, 1, 1, 60, },
{ 1, 0, 0, 1, 1, 76, },
{ 3, 0, 0, 1, 1, 52, },
{ 4, 0, 0, 1, 1, 76, },
@@ -12210,6 +25883,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 1, 52, },
{ 7, 0, 0, 1, 1, 60, },
{ 8, 0, 0, 1, 1, 52, },
+ { 9, 0, 0, 1, 1, 60, },
+ { 0, 0, 0, 1, 2, 60, },
+ { 2, 0, 0, 1, 2, 60, },
{ 1, 0, 0, 1, 2, 76, },
{ 3, 0, 0, 1, 2, 60, },
{ 4, 0, 0, 1, 2, 76, },
@@ -12217,6 +25893,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 2, 60, },
{ 7, 0, 0, 1, 2, 60, },
{ 8, 0, 0, 1, 2, 60, },
+ { 9, 0, 0, 1, 2, 60, },
+ { 0, 0, 0, 1, 3, 64, },
+ { 2, 0, 0, 1, 3, 60, },
{ 1, 0, 0, 1, 3, 76, },
{ 3, 0, 0, 1, 3, 64, },
{ 4, 0, 0, 1, 3, 76, },
@@ -12224,6 +25903,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 3, 64, },
{ 7, 0, 0, 1, 3, 60, },
{ 8, 0, 0, 1, 3, 64, },
+ { 9, 0, 0, 1, 3, 60, },
+ { 0, 0, 0, 1, 4, 68, },
+ { 2, 0, 0, 1, 4, 60, },
{ 1, 0, 0, 1, 4, 76, },
{ 3, 0, 0, 1, 4, 68, },
{ 4, 0, 0, 1, 4, 76, },
@@ -12231,6 +25913,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 4, 68, },
{ 7, 0, 0, 1, 4, 60, },
{ 8, 0, 0, 1, 4, 68, },
+ { 9, 0, 0, 1, 4, 60, },
+ { 0, 0, 0, 1, 5, 76, },
+ { 2, 0, 0, 1, 5, 60, },
{ 1, 0, 0, 1, 5, 76, },
{ 3, 0, 0, 1, 5, 76, },
{ 4, 0, 0, 1, 5, 76, },
@@ -12238,6 +25923,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 5, 76, },
{ 7, 0, 0, 1, 5, 60, },
{ 8, 0, 0, 1, 5, 76, },
+ { 9, 0, 0, 1, 5, 60, },
+ { 0, 0, 0, 1, 6, 76, },
+ { 2, 0, 0, 1, 6, 60, },
{ 1, 0, 0, 1, 6, 76, },
{ 3, 0, 0, 1, 6, 76, },
{ 4, 0, 0, 1, 6, 76, },
@@ -12245,6 +25933,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 6, 76, },
{ 7, 0, 0, 1, 6, 60, },
{ 8, 0, 0, 1, 6, 76, },
+ { 9, 0, 0, 1, 6, 60, },
+ { 0, 0, 0, 1, 7, 76, },
+ { 2, 0, 0, 1, 7, 60, },
{ 1, 0, 0, 1, 7, 76, },
{ 3, 0, 0, 1, 7, 76, },
{ 4, 0, 0, 1, 7, 76, },
@@ -12252,6 +25943,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 7, 76, },
{ 7, 0, 0, 1, 7, 60, },
{ 8, 0, 0, 1, 7, 76, },
+ { 9, 0, 0, 1, 7, 60, },
+ { 0, 0, 0, 1, 8, 68, },
+ { 2, 0, 0, 1, 8, 60, },
{ 1, 0, 0, 1, 8, 76, },
{ 3, 0, 0, 1, 8, 68, },
{ 4, 0, 0, 1, 8, 76, },
@@ -12259,6 +25953,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 8, 68, },
{ 7, 0, 0, 1, 8, 60, },
{ 8, 0, 0, 1, 8, 68, },
+ { 9, 0, 0, 1, 8, 60, },
+ { 0, 0, 0, 1, 9, 64, },
+ { 2, 0, 0, 1, 9, 60, },
{ 1, 0, 0, 1, 9, 76, },
{ 3, 0, 0, 1, 9, 64, },
{ 4, 0, 0, 1, 9, 76, },
@@ -12266,6 +25963,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 9, 64, },
{ 7, 0, 0, 1, 9, 60, },
{ 8, 0, 0, 1, 9, 64, },
+ { 9, 0, 0, 1, 9, 60, },
+ { 0, 0, 0, 1, 10, 60, },
+ { 2, 0, 0, 1, 10, 60, },
{ 1, 0, 0, 1, 10, 76, },
{ 3, 0, 0, 1, 10, 60, },
{ 4, 0, 0, 1, 10, 76, },
@@ -12273,6 +25973,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 10, 60, },
{ 7, 0, 0, 1, 10, 60, },
{ 8, 0, 0, 1, 10, 60, },
+ { 9, 0, 0, 1, 10, 60, },
+ { 0, 0, 0, 1, 11, 52, },
+ { 2, 0, 0, 1, 11, 60, },
{ 1, 0, 0, 1, 11, 76, },
{ 3, 0, 0, 1, 11, 52, },
{ 4, 0, 0, 1, 11, 76, },
@@ -12280,6 +25983,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 11, 52, },
{ 7, 0, 0, 1, 11, 60, },
{ 8, 0, 0, 1, 11, 52, },
+ { 9, 0, 0, 1, 11, 60, },
+ { 0, 0, 0, 1, 12, 40, },
+ { 2, 0, 0, 1, 12, 60, },
{ 1, 0, 0, 1, 12, 76, },
{ 3, 0, 0, 1, 12, 40, },
{ 4, 0, 0, 1, 12, 76, },
@@ -12287,6 +25993,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 12, 40, },
{ 7, 0, 0, 1, 12, 60, },
{ 8, 0, 0, 1, 12, 40, },
+ { 9, 0, 0, 1, 12, 60, },
+ { 0, 0, 0, 1, 13, 28, },
+ { 2, 0, 0, 1, 13, 60, },
{ 1, 0, 0, 1, 13, 76, },
{ 3, 0, 0, 1, 13, 28, },
{ 4, 0, 0, 1, 13, 70, },
@@ -12294,6 +26003,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 13, 28, },
{ 7, 0, 0, 1, 13, 60, },
{ 8, 0, 0, 1, 13, 28, },
+ { 9, 0, 0, 1, 13, 60, },
+ { 0, 0, 0, 1, 14, 127, },
+ { 2, 0, 0, 1, 14, 127, },
{ 1, 0, 0, 1, 14, 127, },
{ 3, 0, 0, 1, 14, 127, },
{ 4, 0, 0, 1, 14, 127, },
@@ -12301,6 +26013,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 1, 14, 127, },
{ 7, 0, 0, 1, 14, 127, },
{ 8, 0, 0, 1, 14, 127, },
+ { 9, 0, 0, 1, 14, 127, },
+ { 0, 0, 0, 2, 1, 52, },
+ { 2, 0, 0, 2, 1, 60, },
{ 1, 0, 0, 2, 1, 76, },
{ 3, 0, 0, 2, 1, 52, },
{ 4, 0, 0, 2, 1, 76, },
@@ -12308,6 +26023,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 1, 52, },
{ 7, 0, 0, 2, 1, 60, },
{ 8, 0, 0, 2, 1, 52, },
+ { 9, 0, 0, 2, 1, 60, },
+ { 0, 0, 0, 2, 2, 60, },
+ { 2, 0, 0, 2, 2, 60, },
{ 1, 0, 0, 2, 2, 76, },
{ 3, 0, 0, 2, 2, 60, },
{ 4, 0, 0, 2, 2, 76, },
@@ -12315,6 +26033,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 2, 60, },
{ 7, 0, 0, 2, 2, 60, },
{ 8, 0, 0, 2, 2, 60, },
+ { 9, 0, 0, 2, 2, 60, },
+ { 0, 0, 0, 2, 3, 64, },
+ { 2, 0, 0, 2, 3, 60, },
{ 1, 0, 0, 2, 3, 76, },
{ 3, 0, 0, 2, 3, 64, },
{ 4, 0, 0, 2, 3, 76, },
@@ -12322,6 +26043,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 3, 64, },
{ 7, 0, 0, 2, 3, 60, },
{ 8, 0, 0, 2, 3, 64, },
+ { 9, 0, 0, 2, 3, 60, },
+ { 0, 0, 0, 2, 4, 68, },
+ { 2, 0, 0, 2, 4, 60, },
{ 1, 0, 0, 2, 4, 76, },
{ 3, 0, 0, 2, 4, 68, },
{ 4, 0, 0, 2, 4, 76, },
@@ -12329,6 +26053,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 4, 68, },
{ 7, 0, 0, 2, 4, 60, },
{ 8, 0, 0, 2, 4, 68, },
+ { 9, 0, 0, 2, 4, 60, },
+ { 0, 0, 0, 2, 5, 76, },
+ { 2, 0, 0, 2, 5, 60, },
{ 1, 0, 0, 2, 5, 76, },
{ 3, 0, 0, 2, 5, 76, },
{ 4, 0, 0, 2, 5, 76, },
@@ -12336,6 +26063,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 5, 76, },
{ 7, 0, 0, 2, 5, 60, },
{ 8, 0, 0, 2, 5, 76, },
+ { 9, 0, 0, 2, 5, 60, },
+ { 0, 0, 0, 2, 6, 76, },
+ { 2, 0, 0, 2, 6, 60, },
{ 1, 0, 0, 2, 6, 76, },
{ 3, 0, 0, 2, 6, 76, },
{ 4, 0, 0, 2, 6, 76, },
@@ -12343,6 +26073,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 6, 76, },
{ 7, 0, 0, 2, 6, 60, },
{ 8, 0, 0, 2, 6, 76, },
+ { 9, 0, 0, 2, 6, 60, },
+ { 0, 0, 0, 2, 7, 76, },
+ { 2, 0, 0, 2, 7, 60, },
{ 1, 0, 0, 2, 7, 76, },
{ 3, 0, 0, 2, 7, 76, },
{ 4, 0, 0, 2, 7, 76, },
@@ -12350,6 +26083,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 7, 76, },
{ 7, 0, 0, 2, 7, 60, },
{ 8, 0, 0, 2, 7, 76, },
+ { 9, 0, 0, 2, 7, 60, },
+ { 0, 0, 0, 2, 8, 68, },
+ { 2, 0, 0, 2, 8, 60, },
{ 1, 0, 0, 2, 8, 76, },
{ 3, 0, 0, 2, 8, 68, },
{ 4, 0, 0, 2, 8, 76, },
@@ -12357,6 +26093,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 8, 68, },
{ 7, 0, 0, 2, 8, 60, },
{ 8, 0, 0, 2, 8, 68, },
+ { 9, 0, 0, 2, 8, 60, },
+ { 0, 0, 0, 2, 9, 64, },
+ { 2, 0, 0, 2, 9, 60, },
{ 1, 0, 0, 2, 9, 76, },
{ 3, 0, 0, 2, 9, 64, },
{ 4, 0, 0, 2, 9, 76, },
@@ -12364,6 +26103,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 9, 64, },
{ 7, 0, 0, 2, 9, 60, },
{ 8, 0, 0, 2, 9, 64, },
+ { 9, 0, 0, 2, 9, 60, },
+ { 0, 0, 0, 2, 10, 60, },
+ { 2, 0, 0, 2, 10, 60, },
{ 1, 0, 0, 2, 10, 76, },
{ 3, 0, 0, 2, 10, 60, },
{ 4, 0, 0, 2, 10, 76, },
@@ -12371,6 +26113,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 10, 60, },
{ 7, 0, 0, 2, 10, 60, },
{ 8, 0, 0, 2, 10, 60, },
+ { 9, 0, 0, 2, 10, 60, },
+ { 0, 0, 0, 2, 11, 52, },
+ { 2, 0, 0, 2, 11, 60, },
{ 1, 0, 0, 2, 11, 76, },
{ 3, 0, 0, 2, 11, 52, },
{ 4, 0, 0, 2, 11, 76, },
@@ -12378,6 +26123,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 11, 52, },
{ 7, 0, 0, 2, 11, 60, },
{ 8, 0, 0, 2, 11, 52, },
+ { 9, 0, 0, 2, 11, 60, },
+ { 0, 0, 0, 2, 12, 40, },
+ { 2, 0, 0, 2, 12, 60, },
{ 1, 0, 0, 2, 12, 76, },
{ 3, 0, 0, 2, 12, 40, },
{ 4, 0, 0, 2, 12, 76, },
@@ -12385,6 +26133,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 12, 40, },
{ 7, 0, 0, 2, 12, 60, },
{ 8, 0, 0, 2, 12, 40, },
+ { 9, 0, 0, 2, 12, 60, },
+ { 0, 0, 0, 2, 13, 28, },
+ { 2, 0, 0, 2, 13, 60, },
{ 1, 0, 0, 2, 13, 76, },
{ 3, 0, 0, 2, 13, 28, },
{ 4, 0, 0, 2, 13, 72, },
@@ -12392,6 +26143,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 13, 28, },
{ 7, 0, 0, 2, 13, 60, },
{ 8, 0, 0, 2, 13, 28, },
+ { 9, 0, 0, 2, 13, 60, },
+ { 0, 0, 0, 2, 14, 127, },
+ { 2, 0, 0, 2, 14, 127, },
{ 1, 0, 0, 2, 14, 127, },
{ 3, 0, 0, 2, 14, 127, },
{ 4, 0, 0, 2, 14, 127, },
@@ -12399,6 +26153,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 2, 14, 127, },
{ 7, 0, 0, 2, 14, 127, },
{ 8, 0, 0, 2, 14, 127, },
+ { 9, 0, 0, 2, 14, 127, },
+ { 0, 0, 0, 3, 1, 52, },
+ { 2, 0, 0, 3, 1, 36, },
{ 1, 0, 0, 3, 1, 66, },
{ 3, 0, 0, 3, 1, 52, },
{ 4, 0, 0, 3, 1, 68, },
@@ -12406,6 +26163,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 1, 52, },
{ 7, 0, 0, 3, 1, 36, },
{ 8, 0, 0, 3, 1, 52, },
+ { 9, 0, 0, 3, 1, 36, },
+ { 0, 0, 0, 3, 2, 60, },
+ { 2, 0, 0, 3, 2, 36, },
{ 1, 0, 0, 3, 2, 66, },
{ 3, 0, 0, 3, 2, 60, },
{ 4, 0, 0, 3, 2, 70, },
@@ -12413,6 +26173,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 2, 60, },
{ 7, 0, 0, 3, 2, 36, },
{ 8, 0, 0, 3, 2, 60, },
+ { 9, 0, 0, 3, 2, 36, },
+ { 0, 0, 0, 3, 3, 64, },
+ { 2, 0, 0, 3, 3, 36, },
{ 1, 0, 0, 3, 3, 66, },
{ 3, 0, 0, 3, 3, 64, },
{ 4, 0, 0, 3, 3, 70, },
@@ -12420,6 +26183,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 3, 64, },
{ 7, 0, 0, 3, 3, 36, },
{ 8, 0, 0, 3, 3, 64, },
+ { 9, 0, 0, 3, 3, 36, },
+ { 0, 0, 0, 3, 4, 68, },
+ { 2, 0, 0, 3, 4, 36, },
{ 1, 0, 0, 3, 4, 66, },
{ 3, 0, 0, 3, 4, 68, },
{ 4, 0, 0, 3, 4, 70, },
@@ -12427,6 +26193,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 4, 68, },
{ 7, 0, 0, 3, 4, 36, },
{ 8, 0, 0, 3, 4, 68, },
+ { 9, 0, 0, 3, 4, 36, },
+ { 0, 0, 0, 3, 5, 76, },
+ { 2, 0, 0, 3, 5, 36, },
{ 1, 0, 0, 3, 5, 66, },
{ 3, 0, 0, 3, 5, 76, },
{ 4, 0, 0, 3, 5, 70, },
@@ -12434,6 +26203,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 5, 76, },
{ 7, 0, 0, 3, 5, 36, },
{ 8, 0, 0, 3, 5, 76, },
+ { 9, 0, 0, 3, 5, 36, },
+ { 0, 0, 0, 3, 6, 76, },
+ { 2, 0, 0, 3, 6, 36, },
{ 1, 0, 0, 3, 6, 66, },
{ 3, 0, 0, 3, 6, 76, },
{ 4, 0, 0, 3, 6, 70, },
@@ -12441,6 +26213,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 6, 76, },
{ 7, 0, 0, 3, 6, 36, },
{ 8, 0, 0, 3, 6, 76, },
+ { 9, 0, 0, 3, 6, 36, },
+ { 0, 0, 0, 3, 7, 76, },
+ { 2, 0, 0, 3, 7, 36, },
{ 1, 0, 0, 3, 7, 66, },
{ 3, 0, 0, 3, 7, 76, },
{ 4, 0, 0, 3, 7, 70, },
@@ -12448,6 +26223,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 7, 76, },
{ 7, 0, 0, 3, 7, 36, },
{ 8, 0, 0, 3, 7, 76, },
+ { 9, 0, 0, 3, 7, 36, },
+ { 0, 0, 0, 3, 8, 68, },
+ { 2, 0, 0, 3, 8, 36, },
{ 1, 0, 0, 3, 8, 66, },
{ 3, 0, 0, 3, 8, 68, },
{ 4, 0, 0, 3, 8, 70, },
@@ -12455,6 +26233,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 8, 68, },
{ 7, 0, 0, 3, 8, 36, },
{ 8, 0, 0, 3, 8, 68, },
+ { 9, 0, 0, 3, 8, 36, },
+ { 0, 0, 0, 3, 9, 64, },
+ { 2, 0, 0, 3, 9, 36, },
{ 1, 0, 0, 3, 9, 66, },
{ 3, 0, 0, 3, 9, 64, },
{ 4, 0, 0, 3, 9, 70, },
@@ -12462,6 +26243,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 9, 64, },
{ 7, 0, 0, 3, 9, 36, },
{ 8, 0, 0, 3, 9, 64, },
+ { 9, 0, 0, 3, 9, 36, },
+ { 0, 0, 0, 3, 10, 60, },
+ { 2, 0, 0, 3, 10, 36, },
{ 1, 0, 0, 3, 10, 66, },
{ 3, 0, 0, 3, 10, 60, },
{ 4, 0, 0, 3, 10, 70, },
@@ -12469,6 +26253,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 10, 60, },
{ 7, 0, 0, 3, 10, 36, },
{ 8, 0, 0, 3, 10, 60, },
+ { 9, 0, 0, 3, 10, 36, },
+ { 0, 0, 0, 3, 11, 52, },
+ { 2, 0, 0, 3, 11, 36, },
{ 1, 0, 0, 3, 11, 66, },
{ 3, 0, 0, 3, 11, 52, },
{ 4, 0, 0, 3, 11, 70, },
@@ -12476,6 +26263,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 11, 52, },
{ 7, 0, 0, 3, 11, 36, },
{ 8, 0, 0, 3, 11, 52, },
+ { 9, 0, 0, 3, 11, 36, },
+ { 0, 0, 0, 3, 12, 40, },
+ { 2, 0, 0, 3, 12, 36, },
{ 1, 0, 0, 3, 12, 66, },
{ 3, 0, 0, 3, 12, 40, },
{ 4, 0, 0, 3, 12, 70, },
@@ -12483,6 +26273,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 12, 40, },
{ 7, 0, 0, 3, 12, 36, },
{ 8, 0, 0, 3, 12, 40, },
+ { 9, 0, 0, 3, 12, 36, },
+ { 0, 0, 0, 3, 13, 28, },
+ { 2, 0, 0, 3, 13, 36, },
{ 1, 0, 0, 3, 13, 66, },
{ 3, 0, 0, 3, 13, 28, },
{ 4, 0, 0, 3, 13, 62, },
@@ -12490,6 +26283,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 13, 28, },
{ 7, 0, 0, 3, 13, 36, },
{ 8, 0, 0, 3, 13, 28, },
+ { 9, 0, 0, 3, 13, 36, },
+ { 0, 0, 0, 3, 14, 127, },
+ { 2, 0, 0, 3, 14, 127, },
{ 1, 0, 0, 3, 14, 127, },
{ 3, 0, 0, 3, 14, 127, },
{ 4, 0, 0, 3, 14, 127, },
@@ -12497,6 +26293,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 0, 3, 14, 127, },
{ 7, 0, 0, 3, 14, 127, },
{ 8, 0, 0, 3, 14, 127, },
+ { 9, 0, 0, 3, 14, 127, },
+ { 0, 0, 1, 2, 1, 127, },
+ { 2, 0, 1, 2, 1, 127, },
{ 1, 0, 1, 2, 1, 127, },
{ 3, 0, 1, 2, 1, 127, },
{ 4, 0, 1, 2, 1, 127, },
@@ -12504,6 +26303,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 1, 127, },
{ 7, 0, 1, 2, 1, 127, },
{ 8, 0, 1, 2, 1, 127, },
+ { 9, 0, 1, 2, 1, 127, },
+ { 0, 0, 1, 2, 2, 127, },
+ { 2, 0, 1, 2, 2, 127, },
{ 1, 0, 1, 2, 2, 127, },
{ 3, 0, 1, 2, 2, 127, },
{ 4, 0, 1, 2, 2, 127, },
@@ -12511,6 +26313,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 2, 127, },
{ 7, 0, 1, 2, 2, 127, },
{ 8, 0, 1, 2, 2, 127, },
+ { 9, 0, 1, 2, 2, 127, },
+ { 0, 0, 1, 2, 3, 52, },
+ { 2, 0, 1, 2, 3, 60, },
{ 1, 0, 1, 2, 3, 72, },
{ 3, 0, 1, 2, 3, 52, },
{ 4, 0, 1, 2, 3, 72, },
@@ -12518,6 +26323,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 3, 52, },
{ 7, 0, 1, 2, 3, 60, },
{ 8, 0, 1, 2, 3, 52, },
+ { 9, 0, 1, 2, 3, 60, },
+ { 0, 0, 1, 2, 4, 52, },
+ { 2, 0, 1, 2, 4, 60, },
{ 1, 0, 1, 2, 4, 72, },
{ 3, 0, 1, 2, 4, 52, },
{ 4, 0, 1, 2, 4, 72, },
@@ -12525,6 +26333,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 4, 52, },
{ 7, 0, 1, 2, 4, 60, },
{ 8, 0, 1, 2, 4, 52, },
+ { 9, 0, 1, 2, 4, 60, },
+ { 0, 0, 1, 2, 5, 60, },
+ { 2, 0, 1, 2, 5, 60, },
{ 1, 0, 1, 2, 5, 72, },
{ 3, 0, 1, 2, 5, 60, },
{ 4, 0, 1, 2, 5, 72, },
@@ -12532,6 +26343,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 5, 60, },
{ 7, 0, 1, 2, 5, 60, },
{ 8, 0, 1, 2, 5, 60, },
+ { 9, 0, 1, 2, 5, 60, },
+ { 0, 0, 1, 2, 6, 64, },
+ { 2, 0, 1, 2, 6, 60, },
{ 1, 0, 1, 2, 6, 72, },
{ 3, 0, 1, 2, 6, 64, },
{ 4, 0, 1, 2, 6, 72, },
@@ -12539,6 +26353,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 6, 64, },
{ 7, 0, 1, 2, 6, 60, },
{ 8, 0, 1, 2, 6, 64, },
+ { 9, 0, 1, 2, 6, 60, },
+ { 0, 0, 1, 2, 7, 60, },
+ { 2, 0, 1, 2, 7, 60, },
{ 1, 0, 1, 2, 7, 72, },
{ 3, 0, 1, 2, 7, 60, },
{ 4, 0, 1, 2, 7, 72, },
@@ -12546,6 +26363,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 7, 60, },
{ 7, 0, 1, 2, 7, 60, },
{ 8, 0, 1, 2, 7, 60, },
+ { 9, 0, 1, 2, 7, 60, },
+ { 0, 0, 1, 2, 8, 52, },
+ { 2, 0, 1, 2, 8, 60, },
{ 1, 0, 1, 2, 8, 72, },
{ 3, 0, 1, 2, 8, 52, },
{ 4, 0, 1, 2, 8, 72, },
@@ -12553,6 +26373,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 8, 52, },
{ 7, 0, 1, 2, 8, 60, },
{ 8, 0, 1, 2, 8, 52, },
+ { 9, 0, 1, 2, 8, 60, },
+ { 0, 0, 1, 2, 9, 52, },
+ { 2, 0, 1, 2, 9, 60, },
{ 1, 0, 1, 2, 9, 72, },
{ 3, 0, 1, 2, 9, 52, },
{ 4, 0, 1, 2, 9, 72, },
@@ -12560,6 +26383,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 9, 52, },
{ 7, 0, 1, 2, 9, 60, },
{ 8, 0, 1, 2, 9, 52, },
+ { 9, 0, 1, 2, 9, 60, },
+ { 0, 0, 1, 2, 10, 40, },
+ { 2, 0, 1, 2, 10, 60, },
{ 1, 0, 1, 2, 10, 72, },
{ 3, 0, 1, 2, 10, 40, },
{ 4, 0, 1, 2, 10, 72, },
@@ -12567,6 +26393,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 10, 40, },
{ 7, 0, 1, 2, 10, 60, },
{ 8, 0, 1, 2, 10, 40, },
+ { 9, 0, 1, 2, 10, 60, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 60, },
{ 1, 0, 1, 2, 11, 72, },
{ 3, 0, 1, 2, 11, 28, },
{ 4, 0, 1, 2, 11, 70, },
@@ -12574,6 +26403,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 11, 28, },
{ 7, 0, 1, 2, 11, 60, },
{ 8, 0, 1, 2, 11, 28, },
+ { 9, 0, 1, 2, 11, 60, },
+ { 0, 0, 1, 2, 12, 127, },
+ { 2, 0, 1, 2, 12, 127, },
{ 1, 0, 1, 2, 12, 127, },
{ 3, 0, 1, 2, 12, 127, },
{ 4, 0, 1, 2, 12, 127, },
@@ -12581,6 +26413,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 12, 127, },
{ 7, 0, 1, 2, 12, 127, },
{ 8, 0, 1, 2, 12, 127, },
+ { 9, 0, 1, 2, 12, 127, },
+ { 0, 0, 1, 2, 13, 127, },
+ { 2, 0, 1, 2, 13, 127, },
{ 1, 0, 1, 2, 13, 127, },
{ 3, 0, 1, 2, 13, 127, },
{ 4, 0, 1, 2, 13, 127, },
@@ -12588,6 +26423,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 13, 127, },
{ 7, 0, 1, 2, 13, 127, },
{ 8, 0, 1, 2, 13, 127, },
+ { 9, 0, 1, 2, 13, 127, },
+ { 0, 0, 1, 2, 14, 127, },
+ { 2, 0, 1, 2, 14, 127, },
{ 1, 0, 1, 2, 14, 127, },
{ 3, 0, 1, 2, 14, 127, },
{ 4, 0, 1, 2, 14, 127, },
@@ -12595,6 +26433,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 2, 14, 127, },
{ 7, 0, 1, 2, 14, 127, },
{ 8, 0, 1, 2, 14, 127, },
+ { 9, 0, 1, 2, 14, 127, },
+ { 0, 0, 1, 3, 1, 127, },
+ { 2, 0, 1, 3, 1, 127, },
{ 1, 0, 1, 3, 1, 127, },
{ 3, 0, 1, 3, 1, 127, },
{ 4, 0, 1, 3, 1, 127, },
@@ -12602,6 +26443,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 1, 127, },
{ 7, 0, 1, 3, 1, 127, },
{ 8, 0, 1, 3, 1, 127, },
+ { 9, 0, 1, 3, 1, 127, },
+ { 0, 0, 1, 3, 2, 127, },
+ { 2, 0, 1, 3, 2, 127, },
{ 1, 0, 1, 3, 2, 127, },
{ 3, 0, 1, 3, 2, 127, },
{ 4, 0, 1, 3, 2, 127, },
@@ -12609,6 +26453,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 2, 127, },
{ 7, 0, 1, 3, 2, 127, },
{ 8, 0, 1, 3, 2, 127, },
+ { 9, 0, 1, 3, 2, 127, },
+ { 0, 0, 1, 3, 3, 48, },
+ { 2, 0, 1, 3, 3, 36, },
{ 1, 0, 1, 3, 3, 66, },
{ 3, 0, 1, 3, 3, 48, },
{ 4, 0, 1, 3, 3, 66, },
@@ -12616,6 +26463,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 3, 48, },
{ 7, 0, 1, 3, 3, 36, },
{ 8, 0, 1, 3, 3, 48, },
+ { 9, 0, 1, 3, 3, 36, },
+ { 0, 0, 1, 3, 4, 48, },
+ { 2, 0, 1, 3, 4, 36, },
{ 1, 0, 1, 3, 4, 66, },
{ 3, 0, 1, 3, 4, 48, },
{ 4, 0, 1, 3, 4, 70, },
@@ -12623,6 +26473,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 4, 48, },
{ 7, 0, 1, 3, 4, 36, },
{ 8, 0, 1, 3, 4, 48, },
+ { 9, 0, 1, 3, 4, 36, },
+ { 0, 0, 1, 3, 5, 60, },
+ { 2, 0, 1, 3, 5, 36, },
{ 1, 0, 1, 3, 5, 66, },
{ 3, 0, 1, 3, 5, 60, },
{ 4, 0, 1, 3, 5, 70, },
@@ -12630,6 +26483,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 5, 60, },
{ 7, 0, 1, 3, 5, 36, },
{ 8, 0, 1, 3, 5, 60, },
+ { 9, 0, 1, 3, 5, 36, },
+ { 0, 0, 1, 3, 6, 64, },
+ { 2, 0, 1, 3, 6, 36, },
{ 1, 0, 1, 3, 6, 66, },
{ 3, 0, 1, 3, 6, 64, },
{ 4, 0, 1, 3, 6, 70, },
@@ -12637,6 +26493,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 6, 64, },
{ 7, 0, 1, 3, 6, 36, },
{ 8, 0, 1, 3, 6, 64, },
+ { 9, 0, 1, 3, 6, 36, },
+ { 0, 0, 1, 3, 7, 60, },
+ { 2, 0, 1, 3, 7, 36, },
{ 1, 0, 1, 3, 7, 66, },
{ 3, 0, 1, 3, 7, 60, },
{ 4, 0, 1, 3, 7, 70, },
@@ -12644,6 +26503,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 7, 60, },
{ 7, 0, 1, 3, 7, 36, },
{ 8, 0, 1, 3, 7, 60, },
+ { 9, 0, 1, 3, 7, 36, },
+ { 0, 0, 1, 3, 8, 52, },
+ { 2, 0, 1, 3, 8, 36, },
{ 1, 0, 1, 3, 8, 66, },
{ 3, 0, 1, 3, 8, 52, },
{ 4, 0, 1, 3, 8, 70, },
@@ -12651,6 +26513,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 8, 52, },
{ 7, 0, 1, 3, 8, 36, },
{ 8, 0, 1, 3, 8, 52, },
+ { 9, 0, 1, 3, 8, 36, },
+ { 0, 0, 1, 3, 9, 52, },
+ { 2, 0, 1, 3, 9, 36, },
{ 1, 0, 1, 3, 9, 66, },
{ 3, 0, 1, 3, 9, 52, },
{ 4, 0, 1, 3, 9, 70, },
@@ -12658,6 +26523,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 9, 52, },
{ 7, 0, 1, 3, 9, 36, },
{ 8, 0, 1, 3, 9, 52, },
+ { 9, 0, 1, 3, 9, 36, },
+ { 0, 0, 1, 3, 10, 40, },
+ { 2, 0, 1, 3, 10, 36, },
{ 1, 0, 1, 3, 10, 66, },
{ 3, 0, 1, 3, 10, 40, },
{ 4, 0, 1, 3, 10, 70, },
@@ -12665,6 +26533,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 10, 40, },
{ 7, 0, 1, 3, 10, 36, },
{ 8, 0, 1, 3, 10, 40, },
+ { 9, 0, 1, 3, 10, 36, },
+ { 0, 0, 1, 3, 11, 26, },
+ { 2, 0, 1, 3, 11, 36, },
{ 1, 0, 1, 3, 11, 66, },
{ 3, 0, 1, 3, 11, 26, },
{ 4, 0, 1, 3, 11, 66, },
@@ -12672,6 +26543,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 11, 26, },
{ 7, 0, 1, 3, 11, 36, },
{ 8, 0, 1, 3, 11, 26, },
+ { 9, 0, 1, 3, 11, 36, },
+ { 0, 0, 1, 3, 12, 127, },
+ { 2, 0, 1, 3, 12, 127, },
{ 1, 0, 1, 3, 12, 127, },
{ 3, 0, 1, 3, 12, 127, },
{ 4, 0, 1, 3, 12, 127, },
@@ -12679,6 +26553,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 12, 127, },
{ 7, 0, 1, 3, 12, 127, },
{ 8, 0, 1, 3, 12, 127, },
+ { 9, 0, 1, 3, 12, 127, },
+ { 0, 0, 1, 3, 13, 127, },
+ { 2, 0, 1, 3, 13, 127, },
{ 1, 0, 1, 3, 13, 127, },
{ 3, 0, 1, 3, 13, 127, },
{ 4, 0, 1, 3, 13, 127, },
@@ -12686,6 +26563,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 13, 127, },
{ 7, 0, 1, 3, 13, 127, },
{ 8, 0, 1, 3, 13, 127, },
+ { 9, 0, 1, 3, 13, 127, },
+ { 0, 0, 1, 3, 14, 127, },
+ { 2, 0, 1, 3, 14, 127, },
{ 1, 0, 1, 3, 14, 127, },
{ 3, 0, 1, 3, 14, 127, },
{ 4, 0, 1, 3, 14, 127, },
@@ -12693,6 +26573,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 0, 1, 3, 14, 127, },
{ 7, 0, 1, 3, 14, 127, },
{ 8, 0, 1, 3, 14, 127, },
+ { 9, 0, 1, 3, 14, 127, },
+ { 0, 1, 0, 1, 36, 74, },
+ { 2, 1, 0, 1, 36, 62, },
{ 1, 1, 0, 1, 36, 60, },
{ 3, 1, 0, 1, 36, 62, },
{ 4, 1, 0, 1, 36, 76, },
@@ -12700,6 +26583,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 36, 64, },
{ 7, 1, 0, 1, 36, 54, },
{ 8, 1, 0, 1, 36, 62, },
+ { 9, 1, 0, 1, 36, 62, },
+ { 0, 1, 0, 1, 40, 76, },
+ { 2, 1, 0, 1, 40, 62, },
{ 1, 1, 0, 1, 40, 62, },
{ 3, 1, 0, 1, 40, 62, },
{ 4, 1, 0, 1, 40, 76, },
@@ -12707,6 +26593,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 40, 64, },
{ 7, 1, 0, 1, 40, 54, },
{ 8, 1, 0, 1, 40, 62, },
+ { 9, 1, 0, 1, 40, 62, },
+ { 0, 1, 0, 1, 44, 76, },
+ { 2, 1, 0, 1, 44, 62, },
{ 1, 1, 0, 1, 44, 62, },
{ 3, 1, 0, 1, 44, 62, },
{ 4, 1, 0, 1, 44, 76, },
@@ -12714,13 +26603,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 44, 64, },
{ 7, 1, 0, 1, 44, 54, },
{ 8, 1, 0, 1, 44, 62, },
+ { 9, 1, 0, 1, 44, 62, },
+ { 0, 1, 0, 1, 48, 76, },
+ { 2, 1, 0, 1, 48, 62, },
{ 1, 1, 0, 1, 48, 62, },
{ 3, 1, 0, 1, 48, 62, },
- { 4, 1, 0, 1, 48, 76, },
+ { 4, 1, 0, 1, 48, 54, },
{ 5, 1, 0, 1, 48, 62, },
{ 6, 1, 0, 1, 48, 64, },
{ 7, 1, 0, 1, 48, 54, },
{ 8, 1, 0, 1, 48, 62, },
+ { 9, 1, 0, 1, 48, 62, },
+ { 0, 1, 0, 1, 52, 76, },
+ { 2, 1, 0, 1, 52, 62, },
{ 1, 1, 0, 1, 52, 62, },
{ 3, 1, 0, 1, 52, 64, },
{ 4, 1, 0, 1, 52, 76, },
@@ -12728,6 +26623,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 52, 76, },
{ 7, 1, 0, 1, 52, 54, },
{ 8, 1, 0, 1, 52, 76, },
+ { 9, 1, 0, 1, 52, 62, },
+ { 0, 1, 0, 1, 56, 76, },
+ { 2, 1, 0, 1, 56, 62, },
{ 1, 1, 0, 1, 56, 62, },
{ 3, 1, 0, 1, 56, 64, },
{ 4, 1, 0, 1, 56, 76, },
@@ -12735,6 +26633,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 56, 76, },
{ 7, 1, 0, 1, 56, 54, },
{ 8, 1, 0, 1, 56, 76, },
+ { 9, 1, 0, 1, 56, 62, },
+ { 0, 1, 0, 1, 60, 76, },
+ { 2, 1, 0, 1, 60, 62, },
{ 1, 1, 0, 1, 60, 62, },
{ 3, 1, 0, 1, 60, 64, },
{ 4, 1, 0, 1, 60, 76, },
@@ -12742,6 +26643,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 60, 76, },
{ 7, 1, 0, 1, 60, 54, },
{ 8, 1, 0, 1, 60, 76, },
+ { 9, 1, 0, 1, 60, 62, },
+ { 0, 1, 0, 1, 64, 74, },
+ { 2, 1, 0, 1, 64, 62, },
{ 1, 1, 0, 1, 64, 60, },
{ 3, 1, 0, 1, 64, 64, },
{ 4, 1, 0, 1, 64, 76, },
@@ -12749,6 +26653,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 64, 74, },
{ 7, 1, 0, 1, 64, 54, },
{ 8, 1, 0, 1, 64, 74, },
+ { 9, 1, 0, 1, 64, 62, },
+ { 0, 1, 0, 1, 100, 72, },
+ { 2, 1, 0, 1, 100, 62, },
{ 1, 1, 0, 1, 100, 76, },
{ 3, 1, 0, 1, 100, 72, },
{ 4, 1, 0, 1, 100, 76, },
@@ -12756,6 +26663,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 100, 72, },
{ 7, 1, 0, 1, 100, 54, },
{ 8, 1, 0, 1, 100, 72, },
+ { 9, 1, 0, 1, 100, 127, },
+ { 0, 1, 0, 1, 104, 76, },
+ { 2, 1, 0, 1, 104, 62, },
{ 1, 1, 0, 1, 104, 76, },
{ 3, 1, 0, 1, 104, 76, },
{ 4, 1, 0, 1, 104, 76, },
@@ -12763,6 +26673,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 104, 76, },
{ 7, 1, 0, 1, 104, 54, },
{ 8, 1, 0, 1, 104, 76, },
+ { 9, 1, 0, 1, 104, 127, },
+ { 0, 1, 0, 1, 108, 76, },
+ { 2, 1, 0, 1, 108, 62, },
{ 1, 1, 0, 1, 108, 76, },
{ 3, 1, 0, 1, 108, 76, },
{ 4, 1, 0, 1, 108, 76, },
@@ -12770,6 +26683,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 108, 76, },
{ 7, 1, 0, 1, 108, 54, },
{ 8, 1, 0, 1, 108, 76, },
+ { 9, 1, 0, 1, 108, 127, },
+ { 0, 1, 0, 1, 112, 76, },
+ { 2, 1, 0, 1, 112, 62, },
{ 1, 1, 0, 1, 112, 76, },
{ 3, 1, 0, 1, 112, 76, },
{ 4, 1, 0, 1, 112, 76, },
@@ -12777,6 +26693,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 112, 76, },
{ 7, 1, 0, 1, 112, 54, },
{ 8, 1, 0, 1, 112, 76, },
+ { 9, 1, 0, 1, 112, 127, },
+ { 0, 1, 0, 1, 116, 76, },
+ { 2, 1, 0, 1, 116, 62, },
{ 1, 1, 0, 1, 116, 76, },
{ 3, 1, 0, 1, 116, 76, },
{ 4, 1, 0, 1, 116, 76, },
@@ -12784,6 +26703,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 116, 76, },
{ 7, 1, 0, 1, 116, 54, },
{ 8, 1, 0, 1, 116, 76, },
+ { 9, 1, 0, 1, 116, 127, },
+ { 0, 1, 0, 1, 120, 76, },
+ { 2, 1, 0, 1, 120, 62, },
{ 1, 1, 0, 1, 120, 76, },
{ 3, 1, 0, 1, 120, 127, },
{ 4, 1, 0, 1, 120, 76, },
@@ -12791,6 +26713,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 120, 76, },
{ 7, 1, 0, 1, 120, 54, },
{ 8, 1, 0, 1, 120, 76, },
+ { 9, 1, 0, 1, 120, 127, },
+ { 0, 1, 0, 1, 124, 76, },
+ { 2, 1, 0, 1, 124, 62, },
{ 1, 1, 0, 1, 124, 76, },
{ 3, 1, 0, 1, 124, 127, },
{ 4, 1, 0, 1, 124, 76, },
@@ -12798,6 +26723,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 124, 76, },
{ 7, 1, 0, 1, 124, 54, },
{ 8, 1, 0, 1, 124, 76, },
+ { 9, 1, 0, 1, 124, 127, },
+ { 0, 1, 0, 1, 128, 76, },
+ { 2, 1, 0, 1, 128, 62, },
{ 1, 1, 0, 1, 128, 76, },
{ 3, 1, 0, 1, 128, 127, },
{ 4, 1, 0, 1, 128, 76, },
@@ -12805,6 +26733,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 128, 76, },
{ 7, 1, 0, 1, 128, 54, },
{ 8, 1, 0, 1, 128, 76, },
+ { 9, 1, 0, 1, 128, 127, },
+ { 0, 1, 0, 1, 132, 76, },
+ { 2, 1, 0, 1, 132, 62, },
{ 1, 1, 0, 1, 132, 76, },
{ 3, 1, 0, 1, 132, 76, },
{ 4, 1, 0, 1, 132, 76, },
@@ -12812,20 +26743,29 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 132, 76, },
{ 7, 1, 0, 1, 132, 54, },
{ 8, 1, 0, 1, 132, 76, },
+ { 9, 1, 0, 1, 132, 127, },
+ { 0, 1, 0, 1, 136, 76, },
+ { 2, 1, 0, 1, 136, 62, },
{ 1, 1, 0, 1, 136, 76, },
{ 3, 1, 0, 1, 136, 76, },
{ 4, 1, 0, 1, 136, 76, },
{ 5, 1, 0, 1, 136, 62, },
{ 6, 1, 0, 1, 136, 76, },
- { 7, 1, 0, 1, 136, 127, },
+ { 7, 1, 0, 1, 136, 54, },
{ 8, 1, 0, 1, 136, 76, },
+ { 9, 1, 0, 1, 136, 127, },
+ { 0, 1, 0, 1, 140, 72, },
+ { 2, 1, 0, 1, 140, 62, },
{ 1, 1, 0, 1, 140, 76, },
{ 3, 1, 0, 1, 140, 72, },
{ 4, 1, 0, 1, 140, 76, },
{ 5, 1, 0, 1, 140, 62, },
{ 6, 1, 0, 1, 140, 72, },
- { 7, 1, 0, 1, 140, 127, },
+ { 7, 1, 0, 1, 140, 54, },
{ 8, 1, 0, 1, 140, 72, },
+ { 9, 1, 0, 1, 140, 127, },
+ { 0, 1, 0, 1, 144, 76, },
+ { 2, 1, 0, 1, 144, 127, },
{ 1, 1, 0, 1, 144, 127, },
{ 3, 1, 0, 1, 144, 76, },
{ 4, 1, 0, 1, 144, 76, },
@@ -12833,6 +26773,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 144, 76, },
{ 7, 1, 0, 1, 144, 127, },
{ 8, 1, 0, 1, 144, 76, },
+ { 9, 1, 0, 1, 144, 127, },
+ { 0, 1, 0, 1, 149, 76, },
+ { 2, 1, 0, 1, 149, -128, },
{ 1, 1, 0, 1, 149, 127, },
{ 3, 1, 0, 1, 149, 76, },
{ 4, 1, 0, 1, 149, 74, },
@@ -12840,6 +26783,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 149, 76, },
{ 7, 1, 0, 1, 149, 54, },
{ 8, 1, 0, 1, 149, 76, },
+ { 9, 1, 0, 1, 149, -128, },
+ { 0, 1, 0, 1, 153, 76, },
+ { 2, 1, 0, 1, 153, -128, },
{ 1, 1, 0, 1, 153, 127, },
{ 3, 1, 0, 1, 153, 76, },
{ 4, 1, 0, 1, 153, 74, },
@@ -12847,6 +26793,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 153, 76, },
{ 7, 1, 0, 1, 153, 54, },
{ 8, 1, 0, 1, 153, 76, },
+ { 9, 1, 0, 1, 153, -128, },
+ { 0, 1, 0, 1, 157, 76, },
+ { 2, 1, 0, 1, 157, -128, },
{ 1, 1, 0, 1, 157, 127, },
{ 3, 1, 0, 1, 157, 76, },
{ 4, 1, 0, 1, 157, 74, },
@@ -12854,6 +26803,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 157, 76, },
{ 7, 1, 0, 1, 157, 54, },
{ 8, 1, 0, 1, 157, 76, },
+ { 9, 1, 0, 1, 157, -128, },
+ { 0, 1, 0, 1, 161, 76, },
+ { 2, 1, 0, 1, 161, -128, },
{ 1, 1, 0, 1, 161, 127, },
{ 3, 1, 0, 1, 161, 76, },
{ 4, 1, 0, 1, 161, 74, },
@@ -12861,6 +26813,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 161, 76, },
{ 7, 1, 0, 1, 161, 54, },
{ 8, 1, 0, 1, 161, 76, },
+ { 9, 1, 0, 1, 161, -128, },
+ { 0, 1, 0, 1, 165, 76, },
+ { 2, 1, 0, 1, 165, -128, },
{ 1, 1, 0, 1, 165, 127, },
{ 3, 1, 0, 1, 165, 76, },
{ 4, 1, 0, 1, 165, 74, },
@@ -12868,6 +26823,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 1, 165, 76, },
{ 7, 1, 0, 1, 165, 54, },
{ 8, 1, 0, 1, 165, 76, },
+ { 9, 1, 0, 1, 165, -128, },
+ { 0, 1, 0, 2, 36, 72, },
+ { 2, 1, 0, 2, 36, 62, },
{ 1, 1, 0, 2, 36, 62, },
{ 3, 1, 0, 2, 36, 62, },
{ 4, 1, 0, 2, 36, 76, },
@@ -12875,6 +26833,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 36, 64, },
{ 7, 1, 0, 2, 36, 54, },
{ 8, 1, 0, 2, 36, 62, },
+ { 9, 1, 0, 2, 36, 62, },
+ { 0, 1, 0, 2, 40, 76, },
+ { 2, 1, 0, 2, 40, 62, },
{ 1, 1, 0, 2, 40, 62, },
{ 3, 1, 0, 2, 40, 62, },
{ 4, 1, 0, 2, 40, 76, },
@@ -12882,6 +26843,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 40, 64, },
{ 7, 1, 0, 2, 40, 54, },
{ 8, 1, 0, 2, 40, 62, },
+ { 9, 1, 0, 2, 40, 62, },
+ { 0, 1, 0, 2, 44, 76, },
+ { 2, 1, 0, 2, 44, 62, },
{ 1, 1, 0, 2, 44, 62, },
{ 3, 1, 0, 2, 44, 62, },
{ 4, 1, 0, 2, 44, 76, },
@@ -12889,13 +26853,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 44, 64, },
{ 7, 1, 0, 2, 44, 54, },
{ 8, 1, 0, 2, 44, 62, },
+ { 9, 1, 0, 2, 44, 62, },
+ { 0, 1, 0, 2, 48, 76, },
+ { 2, 1, 0, 2, 48, 62, },
{ 1, 1, 0, 2, 48, 62, },
{ 3, 1, 0, 2, 48, 62, },
- { 4, 1, 0, 2, 48, 76, },
+ { 4, 1, 0, 2, 48, 54, },
{ 5, 1, 0, 2, 48, 62, },
{ 6, 1, 0, 2, 48, 64, },
{ 7, 1, 0, 2, 48, 54, },
{ 8, 1, 0, 2, 48, 62, },
+ { 9, 1, 0, 2, 48, 62, },
+ { 0, 1, 0, 2, 52, 76, },
+ { 2, 1, 0, 2, 52, 62, },
{ 1, 1, 0, 2, 52, 62, },
{ 3, 1, 0, 2, 52, 64, },
{ 4, 1, 0, 2, 52, 76, },
@@ -12903,6 +26873,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 52, 76, },
{ 7, 1, 0, 2, 52, 54, },
{ 8, 1, 0, 2, 52, 76, },
+ { 9, 1, 0, 2, 52, 62, },
+ { 0, 1, 0, 2, 56, 76, },
+ { 2, 1, 0, 2, 56, 62, },
{ 1, 1, 0, 2, 56, 62, },
{ 3, 1, 0, 2, 56, 64, },
{ 4, 1, 0, 2, 56, 76, },
@@ -12910,6 +26883,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 56, 76, },
{ 7, 1, 0, 2, 56, 54, },
{ 8, 1, 0, 2, 56, 76, },
+ { 9, 1, 0, 2, 56, 62, },
+ { 0, 1, 0, 2, 60, 76, },
+ { 2, 1, 0, 2, 60, 62, },
{ 1, 1, 0, 2, 60, 62, },
{ 3, 1, 0, 2, 60, 64, },
{ 4, 1, 0, 2, 60, 76, },
@@ -12917,6 +26893,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 60, 76, },
{ 7, 1, 0, 2, 60, 54, },
{ 8, 1, 0, 2, 60, 76, },
+ { 9, 1, 0, 2, 60, 62, },
+ { 0, 1, 0, 2, 64, 74, },
+ { 2, 1, 0, 2, 64, 62, },
{ 1, 1, 0, 2, 64, 60, },
{ 3, 1, 0, 2, 64, 64, },
{ 4, 1, 0, 2, 64, 74, },
@@ -12924,6 +26903,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 64, 74, },
{ 7, 1, 0, 2, 64, 54, },
{ 8, 1, 0, 2, 64, 74, },
+ { 9, 1, 0, 2, 64, 62, },
+ { 0, 1, 0, 2, 100, 70, },
+ { 2, 1, 0, 2, 100, 62, },
{ 1, 1, 0, 2, 100, 76, },
{ 3, 1, 0, 2, 100, 70, },
{ 4, 1, 0, 2, 100, 76, },
@@ -12931,6 +26913,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 100, 70, },
{ 7, 1, 0, 2, 100, 54, },
{ 8, 1, 0, 2, 100, 70, },
+ { 9, 1, 0, 2, 100, 127, },
+ { 0, 1, 0, 2, 104, 76, },
+ { 2, 1, 0, 2, 104, 62, },
{ 1, 1, 0, 2, 104, 76, },
{ 3, 1, 0, 2, 104, 76, },
{ 4, 1, 0, 2, 104, 76, },
@@ -12938,6 +26923,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 104, 76, },
{ 7, 1, 0, 2, 104, 54, },
{ 8, 1, 0, 2, 104, 76, },
+ { 9, 1, 0, 2, 104, 127, },
+ { 0, 1, 0, 2, 108, 76, },
+ { 2, 1, 0, 2, 108, 62, },
{ 1, 1, 0, 2, 108, 76, },
{ 3, 1, 0, 2, 108, 76, },
{ 4, 1, 0, 2, 108, 76, },
@@ -12945,6 +26933,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 108, 76, },
{ 7, 1, 0, 2, 108, 54, },
{ 8, 1, 0, 2, 108, 76, },
+ { 9, 1, 0, 2, 108, 127, },
+ { 0, 1, 0, 2, 112, 76, },
+ { 2, 1, 0, 2, 112, 62, },
{ 1, 1, 0, 2, 112, 76, },
{ 3, 1, 0, 2, 112, 76, },
{ 4, 1, 0, 2, 112, 76, },
@@ -12952,6 +26943,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 112, 76, },
{ 7, 1, 0, 2, 112, 54, },
{ 8, 1, 0, 2, 112, 76, },
+ { 9, 1, 0, 2, 112, 127, },
+ { 0, 1, 0, 2, 116, 76, },
+ { 2, 1, 0, 2, 116, 62, },
{ 1, 1, 0, 2, 116, 76, },
{ 3, 1, 0, 2, 116, 76, },
{ 4, 1, 0, 2, 116, 76, },
@@ -12959,6 +26953,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 116, 76, },
{ 7, 1, 0, 2, 116, 54, },
{ 8, 1, 0, 2, 116, 76, },
+ { 9, 1, 0, 2, 116, 127, },
+ { 0, 1, 0, 2, 120, 76, },
+ { 2, 1, 0, 2, 120, 62, },
{ 1, 1, 0, 2, 120, 76, },
{ 3, 1, 0, 2, 120, 127, },
{ 4, 1, 0, 2, 120, 76, },
@@ -12966,6 +26963,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 120, 76, },
{ 7, 1, 0, 2, 120, 54, },
{ 8, 1, 0, 2, 120, 76, },
+ { 9, 1, 0, 2, 120, 127, },
+ { 0, 1, 0, 2, 124, 76, },
+ { 2, 1, 0, 2, 124, 62, },
{ 1, 1, 0, 2, 124, 76, },
{ 3, 1, 0, 2, 124, 127, },
{ 4, 1, 0, 2, 124, 76, },
@@ -12973,6 +26973,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 124, 76, },
{ 7, 1, 0, 2, 124, 54, },
{ 8, 1, 0, 2, 124, 76, },
+ { 9, 1, 0, 2, 124, 127, },
+ { 0, 1, 0, 2, 128, 76, },
+ { 2, 1, 0, 2, 128, 62, },
{ 1, 1, 0, 2, 128, 76, },
{ 3, 1, 0, 2, 128, 127, },
{ 4, 1, 0, 2, 128, 76, },
@@ -12980,6 +26983,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 128, 76, },
{ 7, 1, 0, 2, 128, 54, },
{ 8, 1, 0, 2, 128, 76, },
+ { 9, 1, 0, 2, 128, 127, },
+ { 0, 1, 0, 2, 132, 76, },
+ { 2, 1, 0, 2, 132, 62, },
{ 1, 1, 0, 2, 132, 76, },
{ 3, 1, 0, 2, 132, 76, },
{ 4, 1, 0, 2, 132, 76, },
@@ -12987,20 +26993,29 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 132, 76, },
{ 7, 1, 0, 2, 132, 54, },
{ 8, 1, 0, 2, 132, 76, },
+ { 9, 1, 0, 2, 132, 127, },
+ { 0, 1, 0, 2, 136, 76, },
+ { 2, 1, 0, 2, 136, 62, },
{ 1, 1, 0, 2, 136, 76, },
{ 3, 1, 0, 2, 136, 76, },
{ 4, 1, 0, 2, 136, 76, },
{ 5, 1, 0, 2, 136, 62, },
{ 6, 1, 0, 2, 136, 76, },
- { 7, 1, 0, 2, 136, 127, },
+ { 7, 1, 0, 2, 136, 54, },
{ 8, 1, 0, 2, 136, 76, },
+ { 9, 1, 0, 2, 136, 127, },
+ { 0, 1, 0, 2, 140, 70, },
+ { 2, 1, 0, 2, 140, 62, },
{ 1, 1, 0, 2, 140, 76, },
{ 3, 1, 0, 2, 140, 70, },
{ 4, 1, 0, 2, 140, 76, },
{ 5, 1, 0, 2, 140, 62, },
{ 6, 1, 0, 2, 140, 70, },
- { 7, 1, 0, 2, 140, 127, },
+ { 7, 1, 0, 2, 140, 54, },
{ 8, 1, 0, 2, 140, 70, },
+ { 9, 1, 0, 2, 140, 127, },
+ { 0, 1, 0, 2, 144, 76, },
+ { 2, 1, 0, 2, 144, 127, },
{ 1, 1, 0, 2, 144, 127, },
{ 3, 1, 0, 2, 144, 76, },
{ 4, 1, 0, 2, 144, 76, },
@@ -13008,6 +27023,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 144, 76, },
{ 7, 1, 0, 2, 144, 127, },
{ 8, 1, 0, 2, 144, 76, },
+ { 9, 1, 0, 2, 144, 127, },
+ { 0, 1, 0, 2, 149, 76, },
+ { 2, 1, 0, 2, 149, -128, },
{ 1, 1, 0, 2, 149, 127, },
{ 3, 1, 0, 2, 149, 76, },
{ 4, 1, 0, 2, 149, 74, },
@@ -13015,6 +27033,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 149, 76, },
{ 7, 1, 0, 2, 149, 54, },
{ 8, 1, 0, 2, 149, 76, },
+ { 9, 1, 0, 2, 149, -128, },
+ { 0, 1, 0, 2, 153, 76, },
+ { 2, 1, 0, 2, 153, -128, },
{ 1, 1, 0, 2, 153, 127, },
{ 3, 1, 0, 2, 153, 76, },
{ 4, 1, 0, 2, 153, 74, },
@@ -13022,6 +27043,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 153, 76, },
{ 7, 1, 0, 2, 153, 54, },
{ 8, 1, 0, 2, 153, 76, },
+ { 9, 1, 0, 2, 153, -128, },
+ { 0, 1, 0, 2, 157, 76, },
+ { 2, 1, 0, 2, 157, -128, },
{ 1, 1, 0, 2, 157, 127, },
{ 3, 1, 0, 2, 157, 76, },
{ 4, 1, 0, 2, 157, 74, },
@@ -13029,6 +27053,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 157, 76, },
{ 7, 1, 0, 2, 157, 54, },
{ 8, 1, 0, 2, 157, 76, },
+ { 9, 1, 0, 2, 157, -128, },
+ { 0, 1, 0, 2, 161, 76, },
+ { 2, 1, 0, 2, 161, -128, },
{ 1, 1, 0, 2, 161, 127, },
{ 3, 1, 0, 2, 161, 76, },
{ 4, 1, 0, 2, 161, 74, },
@@ -13036,6 +27063,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 161, 76, },
{ 7, 1, 0, 2, 161, 54, },
{ 8, 1, 0, 2, 161, 76, },
+ { 9, 1, 0, 2, 161, -128, },
+ { 0, 1, 0, 2, 165, 76, },
+ { 2, 1, 0, 2, 165, -128, },
{ 1, 1, 0, 2, 165, 127, },
{ 3, 1, 0, 2, 165, 76, },
{ 4, 1, 0, 2, 165, 74, },
@@ -13043,6 +27073,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 2, 165, 76, },
{ 7, 1, 0, 2, 165, 54, },
{ 8, 1, 0, 2, 165, 76, },
+ { 9, 1, 0, 2, 165, -128, },
+ { 0, 1, 0, 3, 36, 68, },
+ { 2, 1, 0, 3, 36, 38, },
{ 1, 1, 0, 3, 36, 50, },
{ 3, 1, 0, 3, 36, 38, },
{ 4, 1, 0, 3, 36, 66, },
@@ -13050,6 +27083,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 36, 52, },
{ 7, 1, 0, 3, 36, 30, },
{ 8, 1, 0, 3, 36, 50, },
+ { 9, 1, 0, 3, 36, 38, },
+ { 0, 1, 0, 3, 40, 68, },
+ { 2, 1, 0, 3, 40, 38, },
{ 1, 1, 0, 3, 40, 50, },
{ 3, 1, 0, 3, 40, 38, },
{ 4, 1, 0, 3, 40, 66, },
@@ -13057,6 +27093,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 40, 52, },
{ 7, 1, 0, 3, 40, 30, },
{ 8, 1, 0, 3, 40, 50, },
+ { 9, 1, 0, 3, 40, 38, },
+ { 0, 1, 0, 3, 44, 68, },
+ { 2, 1, 0, 3, 44, 38, },
{ 1, 1, 0, 3, 44, 50, },
{ 3, 1, 0, 3, 44, 38, },
{ 4, 1, 0, 3, 44, 66, },
@@ -13064,13 +27103,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 44, 52, },
{ 7, 1, 0, 3, 44, 30, },
{ 8, 1, 0, 3, 44, 50, },
+ { 9, 1, 0, 3, 44, 38, },
+ { 0, 1, 0, 3, 48, 68, },
+ { 2, 1, 0, 3, 48, 38, },
{ 1, 1, 0, 3, 48, 50, },
{ 3, 1, 0, 3, 48, 38, },
- { 4, 1, 0, 3, 48, 66, },
+ { 4, 1, 0, 3, 48, 36, },
{ 5, 1, 0, 3, 48, 38, },
{ 6, 1, 0, 3, 48, 52, },
{ 7, 1, 0, 3, 48, 30, },
{ 8, 1, 0, 3, 48, 50, },
+ { 9, 1, 0, 3, 48, 38, },
+ { 0, 1, 0, 3, 52, 68, },
+ { 2, 1, 0, 3, 52, 38, },
{ 1, 1, 0, 3, 52, 50, },
{ 3, 1, 0, 3, 52, 40, },
{ 4, 1, 0, 3, 52, 66, },
@@ -13078,6 +27123,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 52, 68, },
{ 7, 1, 0, 3, 52, 30, },
{ 8, 1, 0, 3, 52, 68, },
+ { 9, 1, 0, 3, 52, 38, },
+ { 0, 1, 0, 3, 56, 68, },
+ { 2, 1, 0, 3, 56, 38, },
{ 1, 1, 0, 3, 56, 50, },
{ 3, 1, 0, 3, 56, 40, },
{ 4, 1, 0, 3, 56, 66, },
@@ -13085,6 +27133,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 56, 68, },
{ 7, 1, 0, 3, 56, 30, },
{ 8, 1, 0, 3, 56, 68, },
+ { 9, 1, 0, 3, 56, 38, },
+ { 0, 1, 0, 3, 60, 66, },
+ { 2, 1, 0, 3, 60, 38, },
{ 1, 1, 0, 3, 60, 50, },
{ 3, 1, 0, 3, 60, 40, },
{ 4, 1, 0, 3, 60, 66, },
@@ -13092,6 +27143,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 60, 66, },
{ 7, 1, 0, 3, 60, 30, },
{ 8, 1, 0, 3, 60, 66, },
+ { 9, 1, 0, 3, 60, 38, },
+ { 0, 1, 0, 3, 64, 68, },
+ { 2, 1, 0, 3, 64, 38, },
{ 1, 1, 0, 3, 64, 50, },
{ 3, 1, 0, 3, 64, 40, },
{ 4, 1, 0, 3, 64, 66, },
@@ -13099,6 +27153,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 64, 68, },
{ 7, 1, 0, 3, 64, 30, },
{ 8, 1, 0, 3, 64, 68, },
+ { 9, 1, 0, 3, 64, 38, },
+ { 0, 1, 0, 3, 100, 60, },
+ { 2, 1, 0, 3, 100, 38, },
{ 1, 1, 0, 3, 100, 70, },
{ 3, 1, 0, 3, 100, 60, },
{ 4, 1, 0, 3, 100, 64, },
@@ -13106,6 +27163,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 100, 60, },
{ 7, 1, 0, 3, 100, 30, },
{ 8, 1, 0, 3, 100, 60, },
+ { 9, 1, 0, 3, 100, 127, },
+ { 0, 1, 0, 3, 104, 68, },
+ { 2, 1, 0, 3, 104, 38, },
{ 1, 1, 0, 3, 104, 70, },
{ 3, 1, 0, 3, 104, 68, },
{ 4, 1, 0, 3, 104, 64, },
@@ -13113,6 +27173,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 104, 68, },
{ 7, 1, 0, 3, 104, 30, },
{ 8, 1, 0, 3, 104, 68, },
+ { 9, 1, 0, 3, 104, 127, },
+ { 0, 1, 0, 3, 108, 68, },
+ { 2, 1, 0, 3, 108, 38, },
{ 1, 1, 0, 3, 108, 70, },
{ 3, 1, 0, 3, 108, 68, },
{ 4, 1, 0, 3, 108, 64, },
@@ -13120,6 +27183,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 108, 68, },
{ 7, 1, 0, 3, 108, 30, },
{ 8, 1, 0, 3, 108, 68, },
+ { 9, 1, 0, 3, 108, 127, },
+ { 0, 1, 0, 3, 112, 68, },
+ { 2, 1, 0, 3, 112, 38, },
{ 1, 1, 0, 3, 112, 70, },
{ 3, 1, 0, 3, 112, 68, },
{ 4, 1, 0, 3, 112, 64, },
@@ -13127,6 +27193,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 112, 68, },
{ 7, 1, 0, 3, 112, 30, },
{ 8, 1, 0, 3, 112, 68, },
+ { 9, 1, 0, 3, 112, 127, },
+ { 0, 1, 0, 3, 116, 68, },
+ { 2, 1, 0, 3, 116, 38, },
{ 1, 1, 0, 3, 116, 70, },
{ 3, 1, 0, 3, 116, 68, },
{ 4, 1, 0, 3, 116, 64, },
@@ -13134,6 +27203,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 116, 68, },
{ 7, 1, 0, 3, 116, 30, },
{ 8, 1, 0, 3, 116, 68, },
+ { 9, 1, 0, 3, 116, 127, },
+ { 0, 1, 0, 3, 120, 68, },
+ { 2, 1, 0, 3, 120, 38, },
{ 1, 1, 0, 3, 120, 70, },
{ 3, 1, 0, 3, 120, 127, },
{ 4, 1, 0, 3, 120, 64, },
@@ -13141,6 +27213,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 120, 68, },
{ 7, 1, 0, 3, 120, 30, },
{ 8, 1, 0, 3, 120, 68, },
+ { 9, 1, 0, 3, 120, 127, },
+ { 0, 1, 0, 3, 124, 68, },
+ { 2, 1, 0, 3, 124, 38, },
{ 1, 1, 0, 3, 124, 70, },
{ 3, 1, 0, 3, 124, 127, },
{ 4, 1, 0, 3, 124, 64, },
@@ -13148,6 +27223,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 124, 68, },
{ 7, 1, 0, 3, 124, 30, },
{ 8, 1, 0, 3, 124, 68, },
+ { 9, 1, 0, 3, 124, 127, },
+ { 0, 1, 0, 3, 128, 68, },
+ { 2, 1, 0, 3, 128, 38, },
{ 1, 1, 0, 3, 128, 70, },
{ 3, 1, 0, 3, 128, 127, },
{ 4, 1, 0, 3, 128, 64, },
@@ -13155,6 +27233,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 128, 68, },
{ 7, 1, 0, 3, 128, 30, },
{ 8, 1, 0, 3, 128, 68, },
+ { 9, 1, 0, 3, 128, 127, },
+ { 0, 1, 0, 3, 132, 68, },
+ { 2, 1, 0, 3, 132, 38, },
{ 1, 1, 0, 3, 132, 70, },
{ 3, 1, 0, 3, 132, 68, },
{ 4, 1, 0, 3, 132, 64, },
@@ -13162,20 +27243,29 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 132, 68, },
{ 7, 1, 0, 3, 132, 30, },
{ 8, 1, 0, 3, 132, 68, },
+ { 9, 1, 0, 3, 132, 127, },
+ { 0, 1, 0, 3, 136, 68, },
+ { 2, 1, 0, 3, 136, 38, },
{ 1, 1, 0, 3, 136, 70, },
{ 3, 1, 0, 3, 136, 68, },
{ 4, 1, 0, 3, 136, 64, },
{ 5, 1, 0, 3, 136, 38, },
{ 6, 1, 0, 3, 136, 68, },
- { 7, 1, 0, 3, 136, 127, },
+ { 7, 1, 0, 3, 136, 30, },
{ 8, 1, 0, 3, 136, 68, },
+ { 9, 1, 0, 3, 136, 127, },
+ { 0, 1, 0, 3, 140, 60, },
+ { 2, 1, 0, 3, 140, 38, },
{ 1, 1, 0, 3, 140, 70, },
{ 3, 1, 0, 3, 140, 60, },
{ 4, 1, 0, 3, 140, 64, },
{ 5, 1, 0, 3, 140, 38, },
{ 6, 1, 0, 3, 140, 60, },
- { 7, 1, 0, 3, 140, 127, },
+ { 7, 1, 0, 3, 140, 30, },
{ 8, 1, 0, 3, 140, 60, },
+ { 9, 1, 0, 3, 140, 127, },
+ { 0, 1, 0, 3, 144, 68, },
+ { 2, 1, 0, 3, 144, 127, },
{ 1, 1, 0, 3, 144, 127, },
{ 3, 1, 0, 3, 144, 68, },
{ 4, 1, 0, 3, 144, 64, },
@@ -13183,6 +27273,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 144, 68, },
{ 7, 1, 0, 3, 144, 127, },
{ 8, 1, 0, 3, 144, 68, },
+ { 9, 1, 0, 3, 144, 127, },
+ { 0, 1, 0, 3, 149, 76, },
+ { 2, 1, 0, 3, 149, -128, },
{ 1, 1, 0, 3, 149, 127, },
{ 3, 1, 0, 3, 149, 76, },
{ 4, 1, 0, 3, 149, 60, },
@@ -13190,6 +27283,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 149, 76, },
{ 7, 1, 0, 3, 149, 30, },
{ 8, 1, 0, 3, 149, 72, },
+ { 9, 1, 0, 3, 149, -128, },
+ { 0, 1, 0, 3, 153, 76, },
+ { 2, 1, 0, 3, 153, -128, },
{ 1, 1, 0, 3, 153, 127, },
{ 3, 1, 0, 3, 153, 76, },
{ 4, 1, 0, 3, 153, 60, },
@@ -13197,6 +27293,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 153, 76, },
{ 7, 1, 0, 3, 153, 30, },
{ 8, 1, 0, 3, 153, 76, },
+ { 9, 1, 0, 3, 153, -128, },
+ { 0, 1, 0, 3, 157, 76, },
+ { 2, 1, 0, 3, 157, -128, },
{ 1, 1, 0, 3, 157, 127, },
{ 3, 1, 0, 3, 157, 76, },
{ 4, 1, 0, 3, 157, 60, },
@@ -13204,6 +27303,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 157, 76, },
{ 7, 1, 0, 3, 157, 30, },
{ 8, 1, 0, 3, 157, 76, },
+ { 9, 1, 0, 3, 157, -128, },
+ { 0, 1, 0, 3, 161, 76, },
+ { 2, 1, 0, 3, 161, -128, },
{ 1, 1, 0, 3, 161, 127, },
{ 3, 1, 0, 3, 161, 76, },
{ 4, 1, 0, 3, 161, 60, },
@@ -13211,6 +27313,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 161, 76, },
{ 7, 1, 0, 3, 161, 30, },
{ 8, 1, 0, 3, 161, 76, },
+ { 9, 1, 0, 3, 161, -128, },
+ { 0, 1, 0, 3, 165, 76, },
+ { 2, 1, 0, 3, 165, -128, },
{ 1, 1, 0, 3, 165, 127, },
{ 3, 1, 0, 3, 165, 76, },
{ 4, 1, 0, 3, 165, 60, },
@@ -13218,6 +27323,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 0, 3, 165, 76, },
{ 7, 1, 0, 3, 165, 30, },
{ 8, 1, 0, 3, 165, 76, },
+ { 9, 1, 0, 3, 165, -128, },
+ { 0, 1, 1, 2, 38, 66, },
+ { 2, 1, 1, 2, 38, 64, },
{ 1, 1, 1, 2, 38, 62, },
{ 3, 1, 1, 2, 38, 64, },
{ 4, 1, 1, 2, 38, 72, },
@@ -13225,13 +27333,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 38, 64, },
{ 7, 1, 1, 2, 38, 54, },
{ 8, 1, 1, 2, 38, 62, },
+ { 9, 1, 1, 2, 38, 64, },
+ { 0, 1, 1, 2, 46, 72, },
+ { 2, 1, 1, 2, 46, 64, },
{ 1, 1, 1, 2, 46, 62, },
{ 3, 1, 1, 2, 46, 64, },
- { 4, 1, 1, 2, 46, 72, },
+ { 4, 1, 1, 2, 46, 60, },
{ 5, 1, 1, 2, 46, 64, },
{ 6, 1, 1, 2, 46, 64, },
{ 7, 1, 1, 2, 46, 54, },
{ 8, 1, 1, 2, 46, 62, },
+ { 9, 1, 1, 2, 46, 64, },
+ { 0, 1, 1, 2, 54, 72, },
+ { 2, 1, 1, 2, 54, 64, },
{ 1, 1, 1, 2, 54, 62, },
{ 3, 1, 1, 2, 54, 64, },
{ 4, 1, 1, 2, 54, 72, },
@@ -13239,6 +27353,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 54, 72, },
{ 7, 1, 1, 2, 54, 54, },
{ 8, 1, 1, 2, 54, 72, },
+ { 9, 1, 1, 2, 54, 64, },
+ { 0, 1, 1, 2, 62, 64, },
+ { 2, 1, 1, 2, 62, 64, },
{ 1, 1, 1, 2, 62, 62, },
{ 3, 1, 1, 2, 62, 64, },
{ 4, 1, 1, 2, 62, 70, },
@@ -13246,6 +27363,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 62, 64, },
{ 7, 1, 1, 2, 62, 54, },
{ 8, 1, 1, 2, 62, 64, },
+ { 9, 1, 1, 2, 62, 64, },
+ { 0, 1, 1, 2, 102, 58, },
+ { 2, 1, 1, 2, 102, 64, },
{ 1, 1, 1, 2, 102, 72, },
{ 3, 1, 1, 2, 102, 58, },
{ 4, 1, 1, 2, 102, 72, },
@@ -13253,6 +27373,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 102, 58, },
{ 7, 1, 1, 2, 102, 54, },
{ 8, 1, 1, 2, 102, 58, },
+ { 9, 1, 1, 2, 102, 127, },
+ { 0, 1, 1, 2, 110, 72, },
+ { 2, 1, 1, 2, 110, 64, },
{ 1, 1, 1, 2, 110, 72, },
{ 3, 1, 1, 2, 110, 72, },
{ 4, 1, 1, 2, 110, 72, },
@@ -13260,6 +27383,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 110, 72, },
{ 7, 1, 1, 2, 110, 54, },
{ 8, 1, 1, 2, 110, 72, },
+ { 9, 1, 1, 2, 110, 127, },
+ { 0, 1, 1, 2, 118, 72, },
+ { 2, 1, 1, 2, 118, 64, },
{ 1, 1, 1, 2, 118, 72, },
{ 3, 1, 1, 2, 118, 127, },
{ 4, 1, 1, 2, 118, 72, },
@@ -13267,6 +27393,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 118, 72, },
{ 7, 1, 1, 2, 118, 54, },
{ 8, 1, 1, 2, 118, 72, },
+ { 9, 1, 1, 2, 118, 127, },
+ { 0, 1, 1, 2, 126, 72, },
+ { 2, 1, 1, 2, 126, 64, },
{ 1, 1, 1, 2, 126, 72, },
{ 3, 1, 1, 2, 126, 127, },
{ 4, 1, 1, 2, 126, 72, },
@@ -13274,13 +27403,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 126, 72, },
{ 7, 1, 1, 2, 126, 54, },
{ 8, 1, 1, 2, 126, 72, },
+ { 9, 1, 1, 2, 126, 127, },
+ { 0, 1, 1, 2, 134, 72, },
+ { 2, 1, 1, 2, 134, 64, },
{ 1, 1, 1, 2, 134, 72, },
{ 3, 1, 1, 2, 134, 72, },
{ 4, 1, 1, 2, 134, 72, },
{ 5, 1, 1, 2, 134, 64, },
{ 6, 1, 1, 2, 134, 72, },
- { 7, 1, 1, 2, 134, 127, },
+ { 7, 1, 1, 2, 134, 54, },
{ 8, 1, 1, 2, 134, 72, },
+ { 9, 1, 1, 2, 134, 127, },
+ { 0, 1, 1, 2, 142, 72, },
+ { 2, 1, 1, 2, 142, 127, },
{ 1, 1, 1, 2, 142, 127, },
{ 3, 1, 1, 2, 142, 72, },
{ 4, 1, 1, 2, 142, 72, },
@@ -13288,6 +27423,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 142, 72, },
{ 7, 1, 1, 2, 142, 127, },
{ 8, 1, 1, 2, 142, 72, },
+ { 9, 1, 1, 2, 142, 127, },
+ { 0, 1, 1, 2, 151, 72, },
+ { 2, 1, 1, 2, 151, -128, },
{ 1, 1, 1, 2, 151, 127, },
{ 3, 1, 1, 2, 151, 72, },
{ 4, 1, 1, 2, 151, 72, },
@@ -13295,6 +27433,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 151, 72, },
{ 7, 1, 1, 2, 151, 54, },
{ 8, 1, 1, 2, 151, 72, },
+ { 9, 1, 1, 2, 151, -128, },
+ { 0, 1, 1, 2, 159, 72, },
+ { 2, 1, 1, 2, 159, -128, },
{ 1, 1, 1, 2, 159, 127, },
{ 3, 1, 1, 2, 159, 72, },
{ 4, 1, 1, 2, 159, 72, },
@@ -13302,6 +27443,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 2, 159, 72, },
{ 7, 1, 1, 2, 159, 54, },
{ 8, 1, 1, 2, 159, 72, },
+ { 9, 1, 1, 2, 159, -128, },
+ { 0, 1, 1, 3, 38, 60, },
+ { 2, 1, 1, 3, 38, 40, },
{ 1, 1, 1, 3, 38, 50, },
{ 3, 1, 1, 3, 38, 40, },
{ 4, 1, 1, 3, 38, 62, },
@@ -13309,13 +27453,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 38, 52, },
{ 7, 1, 1, 3, 38, 30, },
{ 8, 1, 1, 3, 38, 50, },
+ { 9, 1, 1, 3, 38, 40, },
+ { 0, 1, 1, 3, 46, 68, },
+ { 2, 1, 1, 3, 46, 40, },
{ 1, 1, 1, 3, 46, 50, },
{ 3, 1, 1, 3, 46, 40, },
- { 4, 1, 1, 3, 46, 62, },
+ { 4, 1, 1, 3, 46, 46, },
{ 5, 1, 1, 3, 46, 40, },
{ 6, 1, 1, 3, 46, 52, },
{ 7, 1, 1, 3, 46, 30, },
{ 8, 1, 1, 3, 46, 50, },
+ { 9, 1, 1, 3, 46, 40, },
+ { 0, 1, 1, 3, 54, 68, },
+ { 2, 1, 1, 3, 54, 40, },
{ 1, 1, 1, 3, 54, 50, },
{ 3, 1, 1, 3, 54, 40, },
{ 4, 1, 1, 3, 54, 62, },
@@ -13323,6 +27473,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 54, 68, },
{ 7, 1, 1, 3, 54, 30, },
{ 8, 1, 1, 3, 54, 68, },
+ { 9, 1, 1, 3, 54, 40, },
+ { 0, 1, 1, 3, 62, 58, },
+ { 2, 1, 1, 3, 62, 40, },
{ 1, 1, 1, 3, 62, 48, },
{ 3, 1, 1, 3, 62, 40, },
{ 4, 1, 1, 3, 62, 58, },
@@ -13330,6 +27483,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 62, 58, },
{ 7, 1, 1, 3, 62, 30, },
{ 8, 1, 1, 3, 62, 58, },
+ { 9, 1, 1, 3, 62, 40, },
+ { 0, 1, 1, 3, 102, 54, },
+ { 2, 1, 1, 3, 102, 40, },
{ 1, 1, 1, 3, 102, 70, },
{ 3, 1, 1, 3, 102, 54, },
{ 4, 1, 1, 3, 102, 64, },
@@ -13337,6 +27493,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 102, 54, },
{ 7, 1, 1, 3, 102, 30, },
{ 8, 1, 1, 3, 102, 54, },
+ { 9, 1, 1, 3, 102, 127, },
+ { 0, 1, 1, 3, 110, 68, },
+ { 2, 1, 1, 3, 110, 40, },
{ 1, 1, 1, 3, 110, 70, },
{ 3, 1, 1, 3, 110, 68, },
{ 4, 1, 1, 3, 110, 64, },
@@ -13344,6 +27503,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 110, 68, },
{ 7, 1, 1, 3, 110, 30, },
{ 8, 1, 1, 3, 110, 68, },
+ { 9, 1, 1, 3, 110, 127, },
+ { 0, 1, 1, 3, 118, 68, },
+ { 2, 1, 1, 3, 118, 40, },
{ 1, 1, 1, 3, 118, 70, },
{ 3, 1, 1, 3, 118, 127, },
{ 4, 1, 1, 3, 118, 64, },
@@ -13351,6 +27513,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 118, 68, },
{ 7, 1, 1, 3, 118, 30, },
{ 8, 1, 1, 3, 118, 68, },
+ { 9, 1, 1, 3, 118, 127, },
+ { 0, 1, 1, 3, 126, 68, },
+ { 2, 1, 1, 3, 126, 40, },
{ 1, 1, 1, 3, 126, 70, },
{ 3, 1, 1, 3, 126, 127, },
{ 4, 1, 1, 3, 126, 64, },
@@ -13358,13 +27523,19 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 126, 68, },
{ 7, 1, 1, 3, 126, 30, },
{ 8, 1, 1, 3, 126, 68, },
+ { 9, 1, 1, 3, 126, 127, },
+ { 0, 1, 1, 3, 134, 68, },
+ { 2, 1, 1, 3, 134, 40, },
{ 1, 1, 1, 3, 134, 70, },
{ 3, 1, 1, 3, 134, 68, },
{ 4, 1, 1, 3, 134, 64, },
{ 5, 1, 1, 3, 134, 40, },
{ 6, 1, 1, 3, 134, 68, },
- { 7, 1, 1, 3, 134, 127, },
+ { 7, 1, 1, 3, 134, 30, },
{ 8, 1, 1, 3, 134, 68, },
+ { 9, 1, 1, 3, 134, 127, },
+ { 0, 1, 1, 3, 142, 68, },
+ { 2, 1, 1, 3, 142, 127, },
{ 1, 1, 1, 3, 142, 127, },
{ 3, 1, 1, 3, 142, 68, },
{ 4, 1, 1, 3, 142, 64, },
@@ -13372,6 +27543,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 142, 68, },
{ 7, 1, 1, 3, 142, 127, },
{ 8, 1, 1, 3, 142, 68, },
+ { 9, 1, 1, 3, 142, 127, },
+ { 0, 1, 1, 3, 151, 72, },
+ { 2, 1, 1, 3, 151, -128, },
{ 1, 1, 1, 3, 151, 127, },
{ 3, 1, 1, 3, 151, 72, },
{ 4, 1, 1, 3, 151, 66, },
@@ -13379,6 +27553,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 151, 72, },
{ 7, 1, 1, 3, 151, 30, },
{ 8, 1, 1, 3, 151, 68, },
+ { 9, 1, 1, 3, 151, -128, },
+ { 0, 1, 1, 3, 159, 72, },
+ { 2, 1, 1, 3, 159, -128, },
{ 1, 1, 1, 3, 159, 127, },
{ 3, 1, 1, 3, 159, 72, },
{ 4, 1, 1, 3, 159, 66, },
@@ -13386,6 +27563,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 1, 3, 159, 72, },
{ 7, 1, 1, 3, 159, 30, },
{ 8, 1, 1, 3, 159, 72, },
+ { 9, 1, 1, 3, 159, -128, },
+ { 0, 1, 2, 4, 42, 64, },
+ { 2, 1, 2, 4, 42, 64, },
{ 1, 1, 2, 4, 42, 64, },
{ 3, 1, 2, 4, 42, 64, },
{ 4, 1, 2, 4, 42, 68, },
@@ -13393,6 +27573,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 42, 64, },
{ 7, 1, 2, 4, 42, 54, },
{ 8, 1, 2, 4, 42, 62, },
+ { 9, 1, 2, 4, 42, 64, },
+ { 0, 1, 2, 4, 58, 62, },
+ { 2, 1, 2, 4, 58, 64, },
{ 1, 1, 2, 4, 58, 64, },
{ 3, 1, 2, 4, 58, 62, },
{ 4, 1, 2, 4, 58, 64, },
@@ -13400,6 +27583,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 58, 62, },
{ 7, 1, 2, 4, 58, 54, },
{ 8, 1, 2, 4, 58, 62, },
+ { 9, 1, 2, 4, 58, 64, },
+ { 0, 1, 2, 4, 106, 58, },
+ { 2, 1, 2, 4, 106, 64, },
{ 1, 1, 2, 4, 106, 72, },
{ 3, 1, 2, 4, 106, 58, },
{ 4, 1, 2, 4, 106, 66, },
@@ -13407,6 +27593,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 106, 58, },
{ 7, 1, 2, 4, 106, 54, },
{ 8, 1, 2, 4, 106, 58, },
+ { 9, 1, 2, 4, 106, 127, },
+ { 0, 1, 2, 4, 122, 72, },
+ { 2, 1, 2, 4, 122, 64, },
{ 1, 1, 2, 4, 122, 72, },
{ 3, 1, 2, 4, 122, 127, },
{ 4, 1, 2, 4, 122, 68, },
@@ -13414,6 +27603,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 122, 72, },
{ 7, 1, 2, 4, 122, 54, },
{ 8, 1, 2, 4, 122, 72, },
+ { 9, 1, 2, 4, 122, 127, },
+ { 0, 1, 2, 4, 138, 72, },
+ { 2, 1, 2, 4, 138, 127, },
{ 1, 1, 2, 4, 138, 127, },
{ 3, 1, 2, 4, 138, 72, },
{ 4, 1, 2, 4, 138, 68, },
@@ -13421,6 +27613,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 138, 72, },
{ 7, 1, 2, 4, 138, 127, },
{ 8, 1, 2, 4, 138, 72, },
+ { 9, 1, 2, 4, 138, 127, },
+ { 0, 1, 2, 4, 155, 72, },
+ { 2, 1, 2, 4, 155, -128, },
{ 1, 1, 2, 4, 155, 127, },
{ 3, 1, 2, 4, 155, 72, },
{ 4, 1, 2, 4, 155, 68, },
@@ -13428,6 +27623,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 4, 155, 72, },
{ 7, 1, 2, 4, 155, 54, },
{ 8, 1, 2, 4, 155, 68, },
+ { 9, 1, 2, 4, 155, -128, },
+ { 0, 1, 2, 5, 42, 54, },
+ { 2, 1, 2, 5, 42, 40, },
{ 1, 1, 2, 5, 42, 50, },
{ 3, 1, 2, 5, 42, 40, },
{ 4, 1, 2, 5, 42, 58, },
@@ -13435,6 +27633,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 42, 52, },
{ 7, 1, 2, 5, 42, 30, },
{ 8, 1, 2, 5, 42, 50, },
+ { 9, 1, 2, 5, 42, 40, },
+ { 0, 1, 2, 5, 58, 52, },
+ { 2, 1, 2, 5, 58, 40, },
{ 1, 1, 2, 5, 58, 50, },
{ 3, 1, 2, 5, 58, 40, },
{ 4, 1, 2, 5, 58, 56, },
@@ -13442,6 +27643,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 58, 52, },
{ 7, 1, 2, 5, 58, 30, },
{ 8, 1, 2, 5, 58, 52, },
+ { 9, 1, 2, 5, 58, 40, },
+ { 0, 1, 2, 5, 106, 50, },
+ { 2, 1, 2, 5, 106, 40, },
{ 1, 1, 2, 5, 106, 72, },
{ 3, 1, 2, 5, 106, 50, },
{ 4, 1, 2, 5, 106, 56, },
@@ -13449,6 +27653,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 106, 50, },
{ 7, 1, 2, 5, 106, 30, },
{ 8, 1, 2, 5, 106, 50, },
+ { 9, 1, 2, 5, 106, 127, },
+ { 0, 1, 2, 5, 122, 66, },
+ { 2, 1, 2, 5, 122, 40, },
{ 1, 1, 2, 5, 122, 72, },
{ 3, 1, 2, 5, 122, 127, },
{ 4, 1, 2, 5, 122, 56, },
@@ -13456,6 +27663,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 122, 66, },
{ 7, 1, 2, 5, 122, 30, },
{ 8, 1, 2, 5, 122, 66, },
+ { 9, 1, 2, 5, 122, 127, },
+ { 0, 1, 2, 5, 138, 66, },
+ { 2, 1, 2, 5, 138, 127, },
{ 1, 1, 2, 5, 138, 127, },
{ 3, 1, 2, 5, 138, 66, },
{ 4, 1, 2, 5, 138, 58, },
@@ -13463,6 +27673,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 138, 66, },
{ 7, 1, 2, 5, 138, 127, },
{ 8, 1, 2, 5, 138, 66, },
+ { 9, 1, 2, 5, 138, 127, },
+ { 0, 1, 2, 5, 155, 62, },
+ { 2, 1, 2, 5, 155, -128, },
{ 1, 1, 2, 5, 155, 127, },
{ 3, 1, 2, 5, 155, 62, },
{ 4, 1, 2, 5, 155, 58, },
@@ -13470,9 +27683,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 6, 1, 2, 5, 155, 62, },
{ 7, 1, 2, 5, 155, 30, },
{ 8, 1, 2, 5, 155, 62, },
+ { 9, 1, 2, 5, 155, -128, },
};
-RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type5);
static const u32 rtw8822c_dpk_afe_no_dpk[] = {
0x18a4, BIT(7), 0,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
index 80c06c4f8184..2ae2b0aa5699 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
@@ -12,6 +12,7 @@ extern const struct rtw_table rtw8822c_bb_pg_type0_tbl;
extern const struct rtw_table rtw8822c_rf_a_tbl;
extern const struct rtw_table rtw8822c_rf_b_tbl;
extern const struct rtw_table rtw8822c_txpwr_lmt_type0_tbl;
+extern const struct rtw_table rtw8822c_txpwr_lmt_type5_tbl;
extern const struct rtw_table rtw8822c_dpk_afe_no_dpk_tbl;
extern const struct rtw_table rtw8822c_dpk_afe_is_dpk_tbl;
extern const struct rtw_table rtw8822c_dpk_mac_bb_tbl;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
new file mode 100644
index 000000000000..7b6bd990651e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "rtw8822ce.h"
+
+static const struct pci_device_id rtw_8822ce_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822),
+ .driver_data = (kernel_ulong_t)&rtw8822c_hw_spec
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, rtw_8822ce_id_table);
+
+static struct pci_driver rtw_8822ce_driver = {
+ .name = "rtw_8822ce",
+ .id_table = rtw_8822ce_id_table,
+ .probe = rtw_pci_probe,
+ .remove = rtw_pci_remove,
+ .driver.pm = &rtw_pm_ops,
+ .shutdown = rtw_pci_shutdown,
+};
+module_pci_driver(rtw_8822ce_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822ce driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.h b/drivers/net/wireless/realtek/rtw88/rtw8822ce.h
new file mode 100644
index 000000000000..c2c0e8675d74
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_8822CE_H_
+#define __RTW_8822CE_H_
+
+extern const struct dev_pm_ops rtw_pm_ops;
+extern struct rtw_chip_info rtw8822c_hw_spec;
+int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+void rtw_pci_remove(struct pci_dev *pdev);
+void rtw_pci_shutdown(struct pci_dev *pdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index 9b90339ab697..7087e385a9b3 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -191,3 +191,4 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
rtw_rx_addr_match(rtwdev, pkt_stat, hdr);
}
+EXPORT_SYMBOL(rtw_rx_fill_rx_status);
diff --git a/drivers/net/wireless/realtek/rtw88/sec.c b/drivers/net/wireless/realtek/rtw88/sec.c
index d0d7fbb10d58..ce46e5b4a60a 100644
--- a/drivers/net/wireless/realtek/rtw88/sec.c
+++ b/drivers/net/wireless/realtek/rtw88/sec.c
@@ -44,7 +44,7 @@ void rtw_sec_write_cam(struct rtw_dev *rtwdev,
write_cmd = RTW_SEC_CMD_WRITE_ENABLE | RTW_SEC_CMD_POLLING;
addr = hw_key_idx << RTW_SEC_CAM_ENTRY_SHIFT;
- for (i = 5; i >= 0; i--) {
+ for (i = 7; i >= 0; i--) {
switch (i) {
case 0:
content = ((key->keyidx & 0x3)) |
@@ -60,6 +60,10 @@ void rtw_sec_write_cam(struct rtw_dev *rtwdev,
(cam->addr[4] << 16) |
(cam->addr[5] << 24);
break;
+ case 6:
+ case 7:
+ content = 0;
+ break;
default:
j = (i - 2) << 2;
content = (key->key[j]) |
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 60989987f67b..79c42118825f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -196,7 +196,7 @@ static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev,
ieee80211_tx_status_irqsafe(rtwdev->hw, skb);
}
-void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
+void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src)
{
struct rtw_tx_report *tx_report = &rtwdev->tx_report;
struct rtw_c2h_cmd *c2h;
@@ -207,8 +207,13 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
c2h = get_c2h_from_skb(skb);
- sn = GET_CCX_REPORT_SEQNUM(c2h->payload);
- st = GET_CCX_REPORT_STATUS(c2h->payload);
+ if (src == C2H_CCX_TX_RPT) {
+ sn = GET_CCX_REPORT_SEQNUM_V0(c2h->payload);
+ st = GET_CCX_REPORT_STATUS_V0(c2h->payload);
+ } else {
+ sn = GET_CCX_REPORT_SEQNUM_V1(c2h->payload);
+ st = GET_CCX_REPORT_STATUS_V1(c2h->payload);
+ }
spin_lock_irqsave(&tx_report->q_lock, flags);
skb_queue_walk_safe(&tx_report->queue, cur, tmp) {
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index b973de0f4dc0..72dfd4059f03 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -95,7 +95,7 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct sk_buff *skb);
void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb);
void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn);
-void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src);
void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index 10f1117c0cfb..2c515af214e7 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -19,25 +19,32 @@ bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
return false;
}
+EXPORT_SYMBOL(check_hw_ready);
bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
{
- if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1))
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr;
+
+ if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1))
return false;
- rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0x800F0000 | offset);
- *val = rtw_read32(rtwdev, LTECOEX_READ_DATA);
+ rtw_write32(rtwdev, ltecoex->ctrl, 0x800F0000 | offset);
+ *val = rtw_read32(rtwdev, ltecoex->rdata);
return true;
}
bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value)
{
- if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1))
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr;
+
+ if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1))
return false;
- rtw_write32(rtwdev, LTECOEX_WRITE_DATA, value);
- rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0xC00F0000 | offset);
+ rtw_write32(rtwdev, ltecoex->wdata, value);
+ rtw_write32(rtwdev, ltecoex->ctrl, 0xC00F0000 | offset);
return true;
}
@@ -70,6 +77,7 @@ void rtw_restore_reg(struct rtw_dev *rtwdev,
}
}
}
+EXPORT_SYMBOL(rtw_restore_reg);
void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
{
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index c8f8fe5497a8..8852a1832951 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -201,7 +201,7 @@ struct ndis_80211_pmkid_candidate {
struct ndis_80211_pmkid_cand_list {
__le32 version;
__le32 num_candidates;
- struct ndis_80211_pmkid_candidate candidate_list[0];
+ struct ndis_80211_pmkid_candidate candidate_list[];
} __packed;
struct ndis_80211_status_indication {
@@ -246,12 +246,12 @@ struct ndis_80211_bssid_ex {
__le32 net_infra;
u8 rates[NDIS_802_11_LENGTH_RATES_EX];
__le32 ie_length;
- u8 ies[0];
+ u8 ies[];
} __packed;
struct ndis_80211_bssid_list_ex {
__le32 num_items;
- struct ndis_80211_bssid_ex bssid[0];
+ struct ndis_80211_bssid_ex bssid[];
} __packed;
struct ndis_80211_fixed_ies {
@@ -312,17 +312,11 @@ struct ndis_80211_assoc_info {
__le32 offset_resp_ies;
} __packed;
-struct ndis_80211_auth_encr_pair {
- __le32 auth_mode;
- __le32 encr_mode;
-} __packed;
-
struct ndis_80211_capability {
__le32 length;
__le32 version;
__le32 num_pmkids;
__le32 num_auth_encr_pair;
- struct ndis_80211_auth_encr_pair auth_encr_pair[0];
} __packed;
struct ndis_80211_bssid_info {
@@ -333,7 +327,7 @@ struct ndis_80211_bssid_info {
struct ndis_80211_pmkid {
__le32 length;
__le32 bssid_info_count;
- struct ndis_80211_bssid_info bssid_info[0];
+ struct ndis_80211_bssid_info bssid_info[];
} __packed;
/*
@@ -3109,8 +3103,7 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
__le32 num_items;
__le32 items[8];
} networks_supported;
- struct ndis_80211_capability *caps;
- u8 caps_buf[sizeof(*caps) + sizeof(caps->auth_encr_pair) * 16];
+ struct ndis_80211_capability caps;
int len, retval, i, n;
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
@@ -3140,19 +3133,18 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
}
/* get device 802.11 capabilities, number of PMKIDs */
- caps = (struct ndis_80211_capability *)caps_buf;
- len = sizeof(caps_buf);
+ len = sizeof(caps);
retval = rndis_query_oid(usbdev,
RNDIS_OID_802_11_CAPABILITY,
- caps, &len);
+ &caps, &len);
if (retval >= 0) {
netdev_dbg(usbdev->net, "RNDIS_OID_802_11_CAPABILITY -> len %d, "
"ver %d, pmkids %d, auth-encr-pairs %d\n",
- le32_to_cpu(caps->length),
- le32_to_cpu(caps->version),
- le32_to_cpu(caps->num_pmkids),
- le32_to_cpu(caps->num_auth_encr_pair));
- wiphy->max_num_pmkids = le32_to_cpu(caps->num_pmkids);
+ le32_to_cpu(caps.length),
+ le32_to_cpu(caps.version),
+ le32_to_cpu(caps.num_pmkids),
+ le32_to_cpu(caps.num_auth_encr_pair));
+ wiphy->max_num_pmkids = le32_to_cpu(caps.num_pmkids);
} else
wiphy->max_num_pmkids = 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 440088293aff..5c0adb0efc5d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -832,7 +832,7 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
common->cqm_info.last_cqm_event_rssi = 0;
common->cqm_info.rssi_thold = bss_conf->cqm_rssi_thold;
common->cqm_info.rssi_hyst = bss_conf->cqm_rssi_hyst;
- rsi_dbg(INFO_ZONE, "RSSI throld & hysteresis are: %d %d\n",
+ rsi_dbg(INFO_ZONE, "RSSI threshold & hysteresis are: %d %d\n",
common->cqm_info.rssi_thold,
common->cqm_info.rssi_hyst);
}
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index 43e012073dbf..b65ec14136c7 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -14,6 +14,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_ids.h>
#include <net/mac80211.h>
#include "cw1200.h"
@@ -48,14 +49,6 @@ struct hwbus_priv {
const struct cw1200_platform_data_sdio *pdata;
};
-#ifndef SDIO_VENDOR_ID_STE
-#define SDIO_VENDOR_ID_STE 0x0020
-#endif
-
-#ifndef SDIO_DEVICE_ID_STE_CW1200
-#define SDIO_DEVICE_ID_STE_CW1200 0x2280
-#endif
-
static const struct sdio_device_id cw1200_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
{ /* end: all zeroes */ },
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index ef01caac629c..271ed2ce2d7f 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -268,15 +268,11 @@ exit:
return ret;
}
-static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
+static void cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
{
- int ret = 0;
-
pr_debug("SW IRQ unsubscribe\n");
disable_irq_wake(self->func->irq);
free_irq(self->func->irq, self);
-
- return ret;
}
static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index f2609d5b6bf7..9acd8a41ea61 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -458,6 +458,7 @@ enum wl1271_cmd_key_type {
KEY_TKIP = 2,
KEY_AES = 3,
KEY_GEM = 4,
+ KEY_IGTK = 5,
};
struct wl1271_cmd_set_keys {
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index f140f7d7f553..de6c8a7589ca 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -548,7 +548,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
- goto out;
+ goto err_ret;
wlcore_hw_tx_immediate_compl(wl);
@@ -565,7 +565,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
ret = -EIO;
/* restarting the chip. ignore any other interrupt. */
- goto out;
+ goto err_ret;
}
if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
@@ -575,7 +575,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
ret = -EIO;
/* restarting the chip. ignore any other interrupt. */
- goto out;
+ goto err_ret;
}
if (likely(intr & WL1271_ACX_INTR_DATA)) {
@@ -583,7 +583,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
ret = wlcore_rx(wl, wl->fw_status);
if (ret < 0)
- goto out;
+ goto err_ret;
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -596,7 +596,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
*/
ret = wlcore_tx_work_locked(wl);
if (ret < 0)
- goto out;
+ goto err_ret;
} else {
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@@ -604,7 +604,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
/* check for tx results */
ret = wlcore_hw_tx_delayed_compl(wl);
if (ret < 0)
- goto out;
+ goto err_ret;
/* Make sure the deferred queues don't get too long */
defer_count = skb_queue_len(&wl->deferred_tx_queue) +
@@ -617,14 +617,14 @@ static int wlcore_irq_locked(struct wl1271 *wl)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
ret = wl1271_event_handle(wl, 0);
if (ret < 0)
- goto out;
+ goto err_ret;
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
ret = wl1271_event_handle(wl, 1);
if (ret < 0)
- goto out;
+ goto err_ret;
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -635,6 +635,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
}
+err_ret:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
@@ -1746,9 +1747,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
ret = wl1271_configure_suspend(wl, wlvif, wow);
if (ret < 0) {
- mutex_unlock(&wl->mutex);
- wl1271_warning("couldn't prepare device to suspend");
- return ret;
+ goto out_sleep;
}
}
@@ -2698,12 +2697,16 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (!wlcore_is_p2p_mgmt(wlvif)) {
ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto deinit;
+ }
} else {
ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto deinit;
+ }
}
pm_runtime_mark_last_busy(wl->dev);
@@ -3547,6 +3550,9 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
case WL1271_CIPHER_SUITE_GEM:
key_type = KEY_GEM;
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key_type = KEY_IGTK;
+ break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -3662,8 +3668,10 @@ void wlcore_regdomain_config(struct wl1271 *wl)
goto out;
ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(wl->dev);
goto out;
+ }
ret = wlcore_cmd_regdomain_config_locked(wl);
if (ret < 0) {
@@ -6214,6 +6222,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WL1271_CIPHER_SUITE_GEM,
+ WLAN_CIPHER_SUITE_AES_CMAC,
};
/* The tx descriptor buffer */
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 90e56d4c3df3..e20e18cd04ae 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -863,6 +863,7 @@ void wl1271_tx_work(struct work_struct *work)
ret = wlcore_tx_work_locked(wl);
if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
wl12xx_queue_recovery_work(wl);
goto out;
}
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index de613c623a2c..69857f080704 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -434,15 +434,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
goto out;
}
- {
- SHASH_DESC_ON_STACK(desc, tfm);
-
- desc->tfm = tfm;
-
- ret = crypto_shash_digest(desc, fw->image, image_size,
- hash_data);
- shash_desc_zero(desc);
- }
+ ret = crypto_shash_tfm_digest(tfm, fw->image, image_size, hash_data);
crypto_free_shash(tfm);
if (ret) {
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index a1d69f9b2d4a..0b9ca6d20ffa 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -173,8 +173,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev,
memcpy(atr_res->gbi, atr_req->gbi, gb_len);
r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi,
gb_len);
- if (r < 0)
+ if (r < 0) {
+ kfree_skb(skb);
return r;
+ }
}
info->dep_info.curr_nfc_dep_pni = 0;
diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
index 2581ab724c34..f8f75a504a58 100644
--- a/drivers/ntb/core.c
+++ b/drivers/ntb/core.c
@@ -214,10 +214,8 @@ int ntb_default_port_number(struct ntb_dev *ntb)
case NTB_TOPO_B2B_DSD:
return NTB_PORT_SEC_DSD;
default:
- break;
+ return 0;
}
-
- return -EINVAL;
}
EXPORT_SYMBOL(ntb_default_port_number);
@@ -240,10 +238,8 @@ int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx)
case NTB_TOPO_B2B_DSD:
return NTB_PORT_PRI_USD;
default:
- break;
+ return 0;
}
-
- return -EINVAL;
}
EXPORT_SYMBOL(ntb_default_peer_port_number);
@@ -315,4 +311,3 @@ static void __exit ntb_driver_exit(void)
bus_unregister(&ntb_bus);
}
module_exit(ntb_driver_exit);
-
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 9e310e1ad4d0..88e1db65be02 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1191,10 +1191,6 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
- rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
- dma_get_mask(&pdev->dev));
- if (rc)
- goto err_dma_mask;
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index edae52384b8a..d54261f50851 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2660,12 +2660,6 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
dev_warn(&pdev->dev,
"Cannot set consistent DMA highmem bit mask\n");
}
- ret = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
- dma_get_mask(&pdev->dev));
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to set NTB device DMA bit mask\n");
- return ret;
- }
/*
* Enable the device advanced error reporting. It's not critical to
diff --git a/drivers/ntb/hw/intel/Makefile b/drivers/ntb/hw/intel/Makefile
index 60ec8a773eea..f80da0ba15b2 100644
--- a/drivers/ntb/hw/intel/Makefile
+++ b/drivers/ntb/hw/intel/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o
-ntb_hw_intel-y := ntb_hw_gen1.o ntb_hw_gen3.o
+ntb_hw_intel-y := ntb_hw_gen1.o ntb_hw_gen3.o ntb_hw_gen4.o
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index bb57ec239029..423f9b8fbbcf 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -60,6 +60,7 @@
#include "ntb_hw_intel.h"
#include "ntb_hw_gen1.h"
#include "ntb_hw_gen3.h"
+#include "ntb_hw_gen4.h"
#define NTB_NAME "ntb_hw_intel"
#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
@@ -762,6 +763,8 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_gen3(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
+ else if (pdev_is_gen4(ndev->ntb.pdev))
+ return ndev_ntb4_debugfs_read(filp, ubuf, count, offp);
return -ENXIO;
}
@@ -1783,10 +1786,6 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
- rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
- dma_get_mask(&pdev->dev));
- if (rc)
- goto err_dma_mask;
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
@@ -1858,16 +1857,15 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
int rc, node;
node = dev_to_node(&pdev->dev);
+ ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto err_ndev;
+ }
- if (pdev_is_gen1(pdev)) {
- ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
- if (!ndev) {
- rc = -ENOMEM;
- goto err_ndev;
- }
-
- ndev_init_struct(ndev, pdev);
+ ndev_init_struct(ndev, pdev);
+ if (pdev_is_gen1(pdev)) {
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
@@ -1875,17 +1873,8 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
rc = xeon_init_dev(ndev);
if (rc)
goto err_init_dev;
-
} else if (pdev_is_gen3(pdev)) {
- ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
- if (!ndev) {
- rc = -ENOMEM;
- goto err_ndev;
- }
-
- ndev_init_struct(ndev, pdev);
ndev->ntb.ops = &intel_ntb3_ops;
-
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
@@ -1893,7 +1882,15 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
rc = gen3_init_dev(ndev);
if (rc)
goto err_init_dev;
+ } else if (pdev_is_gen4(pdev)) {
+ ndev->ntb.ops = &intel_ntb4_ops;
+ rc = intel_ntb_init_pci(ndev, pdev);
+ if (rc)
+ goto err_init_pci;
+ rc = gen4_init_dev(ndev);
+ if (rc)
+ goto err_init_dev;
} else {
rc = -EINVAL;
goto err_ndev;
@@ -1915,7 +1912,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
err_register:
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
@@ -1931,7 +1928,7 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
@@ -2036,6 +2033,7 @@ static const struct file_operations intel_ntb_debugfs_info = {
};
static const struct pci_device_id intel_ntb_pci_tbl[] = {
+ /* GEN1 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
@@ -2051,7 +2049,12 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
+
+ /* GEN3 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
+
+ /* GEN4 */
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h
index 544cf5c06f4d..1b759942d8af 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h
@@ -140,6 +140,7 @@
#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1)
#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3)
+#define NTB_HWERR_BAR_ALIGN BIT_ULL(4)
extern struct intel_b2b_addr xeon_b2b_usd_addr;
extern struct intel_b2b_addr xeon_b2b_dsd_addr;
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c
index c3397160db7f..ffcfc3e02c35 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen3.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c
@@ -415,9 +415,8 @@ ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
return ret;
}
-static int intel_ntb3_link_enable(struct ntb_dev *ntb,
- enum ntb_speed max_speed,
- enum ntb_width max_width)
+int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed,
+ enum ntb_width max_width)
{
struct intel_ntb_dev *ndev;
u32 ntb_ctl;
@@ -532,7 +531,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return 0;
}
-static int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
+int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
resource_size_t *db_size,
u64 *db_data, int db_bit)
{
@@ -563,7 +562,7 @@ static int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
return 0;
}
-static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
int bit;
@@ -581,7 +580,7 @@ static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
return 0;
}
-static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
+u64 intel_ntb3_db_read(struct ntb_dev *ntb)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
@@ -590,7 +589,7 @@ static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
ndev->self_reg->db_clear);
}
-static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
+int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.h b/drivers/ntb/hw/intel/ntb_hw_gen3.h
index 75fb86ca27bb..2bc5d8356045 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen3.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.h
@@ -104,6 +104,14 @@ static inline void gen3_db_iowrite(u64 bits, void __iomem *mmio)
ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp);
int gen3_init_dev(struct intel_ntb_dev *ndev);
+int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed,
+ enum ntb_width max_width);
+u64 intel_ntb3_db_read(struct ntb_dev *ntb);
+int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits);
+int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits);
+int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
+ resource_size_t *db_size,
+ u64 *db_data, int db_bit);
extern const struct ntb_dev_ops intel_ntb3_ops;
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
new file mode 100644
index 000000000000..bc4541cbf8c6
--- /dev/null
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/ntb.h>
+#include <linux/log2.h>
+
+#include "ntb_hw_intel.h"
+#include "ntb_hw_gen1.h"
+#include "ntb_hw_gen3.h"
+#include "ntb_hw_gen4.h"
+
+static int gen4_poll_link(struct intel_ntb_dev *ndev);
+static int gen4_link_is_up(struct intel_ntb_dev *ndev);
+
+static const struct intel_ntb_reg gen4_reg = {
+ .poll_link = gen4_poll_link,
+ .link_is_up = gen4_link_is_up,
+ .db_ioread = gen3_db_ioread,
+ .db_iowrite = gen3_db_iowrite,
+ .db_size = sizeof(u32),
+ .ntb_ctl = GEN4_NTBCNTL_OFFSET,
+ .mw_bar = {2, 4},
+};
+
+static const struct intel_ntb_alt_reg gen4_pri_reg = {
+ .db_clear = GEN4_IM_INT_STATUS_OFFSET,
+ .db_mask = GEN4_IM_INT_DISABLE_OFFSET,
+ .spad = GEN4_IM_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg gen4_sec_xlat = {
+ .bar2_limit = GEN4_IM23XLMT_OFFSET,
+ .bar2_xlat = GEN4_IM23XBASE_OFFSET,
+ .bar2_idx = GEN4_IM23XBASEIDX_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg gen4_b2b_reg = {
+ .db_bell = GEN4_IM_DOORBELL_OFFSET,
+ .spad = GEN4_EM_SPAD_OFFSET,
+};
+
+static int gen4_poll_link(struct intel_ntb_dev *ndev)
+{
+ u16 reg_val;
+
+ /*
+ * We need to write to DLLSCS bit in the SLOTSTS before we
+ * can clear the hardware link interrupt on ICX NTB.
+ */
+ iowrite16(GEN4_SLOTSTS_DLLSCS, ndev->self_mmio + GEN4_SLOTSTS);
+ ndev->reg->db_iowrite(ndev->db_link_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_clear);
+
+ reg_val = ioread16(ndev->self_mmio + GEN4_LINK_STATUS_OFFSET);
+ if (reg_val == ndev->lnk_sta)
+ return 0;
+
+ ndev->lnk_sta = reg_val;
+
+ return 1;
+}
+
+static int gen4_link_is_up(struct intel_ntb_dev *ndev)
+{
+ return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
+}
+
+static int gen4_init_isr(struct intel_ntb_dev *ndev)
+{
+ int i;
+
+ /*
+ * The MSIX vectors and the interrupt status bits are not lined up
+ * on Gen3 (Skylake) and Gen4. By default the link status bit is bit
+ * 32, however it is by default MSIX vector0. We need to fixup to
+ * line them up. The vectors at reset is 1-32,0. We need to reprogram
+ * to 0-32.
+ */
+ for (i = 0; i < GEN4_DB_MSIX_VECTOR_COUNT; i++)
+ iowrite8(i, ndev->self_mmio + GEN4_INTVEC_OFFSET + i);
+
+ return ndev_init_isr(ndev, GEN4_DB_MSIX_VECTOR_COUNT,
+ GEN4_DB_MSIX_VECTOR_COUNT,
+ GEN4_DB_MSIX_VECTOR_SHIFT,
+ GEN4_DB_TOTAL_SHIFT);
+}
+
+static int gen4_setup_b2b_mw(struct intel_ntb_dev *ndev,
+ const struct intel_b2b_addr *addr,
+ const struct intel_b2b_addr *peer_addr)
+{
+ struct pci_dev *pdev;
+ void __iomem *mmio;
+ phys_addr_t bar_addr;
+
+ pdev = ndev->ntb.pdev;
+ mmio = ndev->self_mmio;
+
+ /* setup incoming bar limits == base addrs (zero length windows) */
+ bar_addr = addr->bar2_addr64;
+ iowrite64(bar_addr, mmio + GEN4_IM23XLMT_OFFSET);
+ bar_addr = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
+ dev_dbg(&pdev->dev, "IM23XLMT %#018llx\n", bar_addr);
+
+ bar_addr = addr->bar4_addr64;
+ iowrite64(bar_addr, mmio + GEN4_IM45XLMT_OFFSET);
+ bar_addr = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
+ dev_dbg(&pdev->dev, "IM45XLMT %#018llx\n", bar_addr);
+
+ /* zero incoming translation addrs */
+ iowrite64(0, mmio + GEN4_IM23XBASE_OFFSET);
+ iowrite64(0, mmio + GEN4_IM45XBASE_OFFSET);
+
+ ndev->peer_mmio = ndev->self_mmio;
+
+ return 0;
+}
+
+static int gen4_init_ntb(struct intel_ntb_dev *ndev)
+{
+ int rc;
+
+
+ ndev->mw_count = XEON_MW_COUNT;
+ ndev->spad_count = GEN4_SPAD_COUNT;
+ ndev->db_count = GEN4_DB_COUNT;
+ ndev->db_link_mask = GEN4_DB_LINK_BIT;
+
+ ndev->self_reg = &gen4_pri_reg;
+ ndev->xlat_reg = &gen4_sec_xlat;
+ ndev->peer_reg = &gen4_b2b_reg;
+
+ if (ndev->ntb.topo == NTB_TOPO_B2B_USD)
+ rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_dsd_addr,
+ &xeon_b2b_usd_addr);
+ else
+ rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_usd_addr,
+ &xeon_b2b_dsd_addr);
+ if (rc)
+ return rc;
+
+ ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+ ndev->reg->db_iowrite(ndev->db_valid_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_mask);
+
+ return 0;
+}
+
+static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
+{
+ switch (ppd & GEN4_PPD_TOPO_MASK) {
+ case GEN4_PPD_TOPO_B2B_USD:
+ return NTB_TOPO_B2B_USD;
+ case GEN4_PPD_TOPO_B2B_DSD:
+ return NTB_TOPO_B2B_DSD;
+ }
+
+ return NTB_TOPO_NONE;
+}
+
+int gen4_init_dev(struct intel_ntb_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+ u32 ppd1/*, ppd0*/;
+ u16 lnkctl;
+ int rc;
+
+ ndev->reg = &gen4_reg;
+
+ if (pdev_is_ICX(pdev))
+ ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN;
+
+ ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
+ ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+ dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
+ ntb_topo_string(ndev->ntb.topo));
+ if (ndev->ntb.topo == NTB_TOPO_NONE)
+ return -EINVAL;
+
+ rc = gen4_init_ntb(ndev);
+ if (rc)
+ return rc;
+
+ /* init link setup */
+ lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+ lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
+ iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+
+ return gen4_init_isr(ndev);
+}
+
+ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct intel_ntb_dev *ndev;
+ void __iomem *mmio;
+ char *buf;
+ size_t buf_size;
+ ssize_t ret, off;
+ union { u64 v64; u32 v32; u16 v16; } u;
+
+ ndev = filp->private_data;
+ mmio = ndev->self_mmio;
+
+ buf_size = min(count, 0x800ul);
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ off = 0;
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB Device Information:\n");
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Connection Topology -\t%s\n",
+ ntb_topo_string(ndev->ntb.topo));
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
+ off += scnprintf(buf + off, buf_size - off,
+ "LNK STA (cached) -\t\t%#06x\n", ndev->lnk_sta);
+
+ if (!ndev->reg->link_is_up(ndev))
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tDown\n");
+ else {
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tUp\n");
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Speed -\t\tPCI-E Gen %u\n",
+ NTB_LNK_STA_SPEED(ndev->lnk_sta));
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Width -\t\tx%u\n",
+ NTB_LNK_STA_WIDTH(ndev->lnk_sta));
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Memory Window Count -\t%u\n", ndev->mw_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Scratchpad Count -\t%u\n", ndev->spad_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Count -\t%u\n", ndev->db_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
+
+ u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask -\t\t%#llx\n", u.v64);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Incoming XLAT:\n");
+
+ u.v64 = ioread64(mmio + GEN4_IM23XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IM23XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + GEN4_IM45XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IM45XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IM23XLMT -\t\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IM45XLMT -\t\t\t%#018llx\n", u.v64);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Statistics:\n");
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Hardware Errors:\n");
+
+ if (!pci_read_config_word(ndev->ntb.pdev,
+ GEN4_DEVSTS_OFFSET, &u.v16))
+ off += scnprintf(buf + off, buf_size - off,
+ "DEVSTS -\t\t%#06x\n", u.v16);
+
+ u.v16 = ioread16(mmio + GEN4_LINK_STATUS_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "LNKSTS -\t\t%#06x\n", u.v16);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ GEN4_UNCERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "UNCERRSTS -\t\t%#06x\n", u.v32);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ GEN4_CORERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "CORERRSTS -\t\t%#06x\n", u.v32);
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
+ kfree(buf);
+ return ret;
+}
+
+static int intel_ntb4_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ unsigned long xlat_reg, limit_reg, idx_reg;
+ unsigned short base_idx, reg_val16;
+ resource_size_t bar_size, mw_size;
+ void __iomem *mmio;
+ u64 base, limit, reg_val;
+ int bar;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+ idx += 1;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+ if (idx == ndev->b2b_idx)
+ mw_size = bar_size - ndev->b2b_off;
+ else
+ mw_size = bar_size;
+
+ if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
+ /* hardware requires that addr is aligned to bar size */
+ if (addr & (bar_size - 1))
+ return -EINVAL;
+ } else {
+ if (addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+ }
+
+ /* make sure the range fits in the usable mw size */
+ if (size > mw_size)
+ return -EINVAL;
+
+ mmio = ndev->self_mmio;
+ xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
+ limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
+ base = pci_resource_start(ndev->ntb.pdev, bar);
+
+ /* Set the limit if supported, if size is not mw_size */
+ if (limit_reg && size != mw_size) {
+ limit = base + size;
+ base_idx = __ilog2_u64(size);
+ } else {
+ limit = base + mw_size;
+ base_idx = __ilog2_u64(mw_size);
+ }
+
+
+ /* set and verify setting the translation address */
+ iowrite64(addr, mmio + xlat_reg);
+ reg_val = ioread64(mmio + xlat_reg);
+ if (reg_val != addr) {
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(&ntb->pdev->dev, "BAR %d IMXBASE: %#Lx\n", bar, reg_val);
+
+ /* set and verify setting the limit */
+ iowrite64(limit, mmio + limit_reg);
+ reg_val = ioread64(mmio + limit_reg);
+ if (reg_val != limit) {
+ iowrite64(base, mmio + limit_reg);
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(&ntb->pdev->dev, "BAR %d IMXLMT: %#Lx\n", bar, reg_val);
+
+ if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
+ idx_reg = ndev->xlat_reg->bar2_idx + (idx * 0x2);
+ iowrite16(base_idx, mmio + idx_reg);
+ reg_val16 = ioread16(mmio + idx_reg);
+ if (reg_val16 != base_idx) {
+ iowrite64(base, mmio + limit_reg);
+ iowrite64(0, mmio + xlat_reg);
+ iowrite16(0, mmio + idx_reg);
+ return -EIO;
+ }
+ dev_dbg(&ntb->pdev->dev, "BAR %d IMBASEIDX: %#x\n", bar, reg_val16);
+ }
+
+
+ return 0;
+}
+
+static int intel_ntb4_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed, enum ntb_width max_width)
+{
+ struct intel_ntb_dev *ndev;
+ u32 ntb_ctl, ppd0;
+ u16 lnkctl;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ dev_dbg(&ntb->pdev->dev,
+ "Enabling link with max_speed %d max_width %d\n",
+ max_speed, max_width);
+
+ if (max_speed != NTB_SPEED_AUTO)
+ dev_dbg(&ntb->pdev->dev,
+ "ignoring max_speed %d\n", max_speed);
+ if (max_width != NTB_WIDTH_AUTO)
+ dev_dbg(&ntb->pdev->dev,
+ "ignoring max_width %d\n", max_width);
+
+ ntb_ctl = NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP;
+ ntb_ctl |= NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP;
+ iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+ lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+ lnkctl &= ~GEN4_LINK_CTRL_LINK_DISABLE;
+ iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+
+ /* start link training in PPD0 */
+ ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
+ ppd0 |= GEN4_PPD_LINKTRN;
+ iowrite32(ppd0, ndev->self_mmio + GEN4_PPD0_OFFSET);
+
+ /* make sure link training has started */
+ ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
+ if (!(ppd0 & GEN4_PPD_LINKTRN)) {
+ dev_warn(&ntb->pdev->dev, "Link is not training\n");
+ return -ENXIO;
+ }
+
+ ndev->dev_up = 1;
+
+ return 0;
+}
+
+static int intel_ntb4_link_disable(struct ntb_dev *ntb)
+{
+ struct intel_ntb_dev *ndev;
+ u32 ntb_cntl;
+ u16 lnkctl;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ dev_dbg(&ntb->pdev->dev, "Disabling link\n");
+
+ /* clear the snoop bits */
+ ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+ ntb_cntl &= ~(NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP);
+ ntb_cntl &= ~(NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP);
+ iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+ lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+ lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
+ iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+
+ ndev->dev_up = 0;
+
+ return 0;
+}
+
+static int intel_ntb4_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ resource_size_t bar_size, mw_size;
+ int bar;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+ idx += 1;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+ if (idx == ndev->b2b_idx)
+ mw_size = bar_size - ndev->b2b_off;
+ else
+ mw_size = bar_size;
+
+ if (addr_align) {
+ if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN)
+ *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
+ else
+ *addr_align = PAGE_SIZE;
+ }
+
+ if (size_align)
+ *size_align = 1;
+
+ if (size_max)
+ *size_max = mw_size;
+
+ return 0;
+}
+
+const struct ntb_dev_ops intel_ntb4_ops = {
+ .mw_count = intel_ntb_mw_count,
+ .mw_get_align = intel_ntb4_mw_get_align,
+ .mw_set_trans = intel_ntb4_mw_set_trans,
+ .peer_mw_count = intel_ntb_peer_mw_count,
+ .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
+ .link_is_up = intel_ntb_link_is_up,
+ .link_enable = intel_ntb4_link_enable,
+ .link_disable = intel_ntb4_link_disable,
+ .db_valid_mask = intel_ntb_db_valid_mask,
+ .db_vector_count = intel_ntb_db_vector_count,
+ .db_vector_mask = intel_ntb_db_vector_mask,
+ .db_read = intel_ntb3_db_read,
+ .db_clear = intel_ntb3_db_clear,
+ .db_set_mask = intel_ntb_db_set_mask,
+ .db_clear_mask = intel_ntb_db_clear_mask,
+ .peer_db_addr = intel_ntb3_peer_db_addr,
+ .peer_db_set = intel_ntb3_peer_db_set,
+ .spad_is_unsafe = intel_ntb_spad_is_unsafe,
+ .spad_count = intel_ntb_spad_count,
+ .spad_read = intel_ntb_spad_read,
+ .spad_write = intel_ntb_spad_write,
+ .peer_spad_addr = intel_ntb_peer_spad_addr,
+ .peer_spad_read = intel_ntb_peer_spad_read,
+ .peer_spad_write = intel_ntb_peer_spad_write,
+};
+
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h
new file mode 100644
index 000000000000..a868c788de02
--- /dev/null
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#ifndef _NTB_INTEL_GEN4_H_
+#define _NTB_INTEL_GEN4_H_
+
+#include "ntb_hw_intel.h"
+
+/* Supported PCI device revision range for ICX */
+#define PCI_DEVICE_REVISION_ICX_MIN 0x2
+#define PCI_DEVICE_REVISION_ICX_MAX 0xF
+
+/* Intel Gen4 NTB hardware */
+/* PCIe config space */
+#define GEN4_IMBAR23SZ_OFFSET 0x00c4
+#define GEN4_IMBAR45SZ_OFFSET 0x00c5
+#define GEN4_EMBAR23SZ_OFFSET 0x00c6
+#define GEN4_EMBAR45SZ_OFFSET 0x00c7
+#define GEN4_DEVCTRL_OFFSET 0x0048
+#define GEN4_DEVSTS_OFFSET 0x004a
+#define GEN4_UNCERRSTS_OFFSET 0x0104
+#define GEN4_CORERRSTS_OFFSET 0x0110
+
+/* BAR0 MMIO */
+#define GEN4_NTBCNTL_OFFSET 0x0000
+#define GEN4_IM23XBASE_OFFSET 0x0010 /* IMBAR1XBASE */
+#define GEN4_IM23XLMT_OFFSET 0x0018 /* IMBAR1XLMT */
+#define GEN4_IM45XBASE_OFFSET 0x0020 /* IMBAR2XBASE */
+#define GEN4_IM45XLMT_OFFSET 0x0028 /* IMBAR2XLMT */
+#define GEN4_IM_INT_STATUS_OFFSET 0x0040
+#define GEN4_IM_INT_DISABLE_OFFSET 0x0048
+#define GEN4_INTVEC_OFFSET 0x0050 /* 0-32 vecs */
+#define GEN4_IM23XBASEIDX_OFFSET 0x0074
+#define GEN4_IM45XBASEIDX_OFFSET 0x0076
+#define GEN4_IM_SPAD_OFFSET 0x0080 /* 0-15 SPADs */
+#define GEN4_IM_SPAD_SEM_OFFSET 0x00c0 /* SPAD hw semaphore */
+#define GEN4_IM_SPAD_STICKY_OFFSET 0x00c4 /* sticky SPAD */
+#define GEN4_IM_DOORBELL_OFFSET 0x0100 /* 0-31 doorbells */
+#define GEN4_EM_SPAD_OFFSET 0x8080
+/* note, link status is now in MMIO and not config space for NTB */
+#define GEN4_LINK_CTRL_OFFSET 0xb050
+#define GEN4_LINK_STATUS_OFFSET 0xb052
+#define GEN4_PPD0_OFFSET 0xb0d4
+#define GEN4_PPD1_OFFSET 0xb4c0
+#define GEN4_LTSSMSTATEJMP 0xf040
+
+#define GEN4_PPD_CLEAR_TRN 0x0001
+#define GEN4_PPD_LINKTRN 0x0008
+#define GEN4_PPD_CONN_MASK 0x0300
+#define GEN4_PPD_CONN_B2B 0x0200
+#define GEN4_PPD_DEV_MASK 0x1000
+#define GEN4_PPD_DEV_DSD 0x1000
+#define GEN4_PPD_DEV_USD 0x0000
+#define GEN4_LINK_CTRL_LINK_DISABLE 0x0010
+
+#define GEN4_SLOTSTS 0xb05a
+#define GEN4_SLOTSTS_DLLSCS 0x100
+
+#define GEN4_PPD_TOPO_MASK (GEN4_PPD_CONN_MASK | GEN4_PPD_DEV_MASK)
+#define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD)
+#define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD)
+
+#define GEN4_DB_COUNT 32
+#define GEN4_DB_LINK 32
+#define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK)
+#define GEN4_DB_MSIX_VECTOR_COUNT 33
+#define GEN4_DB_MSIX_VECTOR_SHIFT 1
+#define GEN4_DB_TOTAL_SHIFT 33
+#define GEN4_SPAD_COUNT 16
+
+#define NTB_CTL_E2I_BAR23_SNOOP 0x000004
+#define NTB_CTL_E2I_BAR23_NOSNOOP 0x000008
+#define NTB_CTL_I2E_BAR23_SNOOP 0x000010
+#define NTB_CTL_I2E_BAR23_NOSNOOP 0x000020
+#define NTB_CTL_E2I_BAR45_SNOOP 0x000040
+#define NTB_CTL_E2I_BAR45_NOSNOO 0x000080
+#define NTB_CTL_I2E_BAR45_SNOOP 0x000100
+#define NTB_CTL_I2E_BAR45_NOSNOOP 0x000200
+#define NTB_CTL_BUSNO_DIS_INC 0x000400
+#define NTB_CTL_LINK_DOWN 0x010000
+
+#define NTB_SJC_FORCEDETECT 0x000004
+
+ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp);
+int gen4_init_dev(struct intel_ntb_dev *ndev);
+ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp);
+
+extern const struct ntb_dev_ops intel_ntb4_ops;
+
+static inline int pdev_is_ICX(struct pci_dev *pdev)
+{
+ if (pdev_is_gen4(pdev) &&
+ pdev->revision >= PCI_DEVICE_REVISION_ICX_MIN &&
+ pdev->revision <= PCI_DEVICE_REVISION_ICX_MAX)
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index e071e28bca3f..d61fcd91714b 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -72,6 +72,7 @@
#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_ICX 0x347e
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)
@@ -120,6 +121,7 @@ struct intel_ntb_xlat_reg {
unsigned long bar0_base;
unsigned long bar2_xlat;
unsigned long bar2_limit;
+ unsigned short bar2_idx;
};
struct intel_b2b_addr {
@@ -182,6 +184,9 @@ struct intel_ntb_dev {
struct dentry *debugfs_dir;
struct dentry *debugfs_info;
+
+ /* gen4 entries */
+ int dev_up;
};
#define ntb_ndev(__ntb) container_of(__ntb, struct intel_ntb_dev, ntb)
@@ -219,4 +224,11 @@ static inline int pdev_is_gen3(struct pci_dev *pdev)
return 0;
}
+static inline int pdev_is_gen4(struct pci_dev *pdev)
+{
+ if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)
+ return 1;
+
+ return 0;
+}
#endif
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 972f6d984f6d..89df1350fefd 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -101,8 +101,8 @@ MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
#define DMA_MDELAY 10
#define MSG_TRIES 1000
-#define MSG_UDELAY_LOW 1000
-#define MSG_UDELAY_HIGH 2000
+#define MSG_UDELAY_LOW 1000000
+#define MSG_UDELAY_HIGH 2000000
#define PERF_BUF_LEN 1024
@@ -159,6 +159,8 @@ struct perf_peer {
/* NTB connection setup service */
struct work_struct service;
unsigned long sts;
+
+ struct completion init_comp;
};
#define to_peer_service(__work) \
container_of(__work, struct perf_peer, service)
@@ -547,6 +549,7 @@ static int perf_setup_outbuf(struct perf_peer *peer)
/* Initialization is finally done */
set_bit(PERF_STS_DONE, &peer->sts);
+ complete_all(&peer->init_comp);
return 0;
}
@@ -557,7 +560,7 @@ static void perf_free_inbuf(struct perf_peer *peer)
return;
(void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
- dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size,
+ dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size,
peer->inbuf, peer->inbuf_xlat);
peer->inbuf = NULL;
}
@@ -586,8 +589,9 @@ static int perf_setup_inbuf(struct perf_peer *peer)
perf_free_inbuf(peer);
- peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size,
- &peer->inbuf_xlat, GFP_KERNEL);
+ peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev,
+ peer->inbuf_size, &peer->inbuf_xlat,
+ GFP_KERNEL);
if (!peer->inbuf) {
dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n",
&peer->inbuf_size);
@@ -637,6 +641,7 @@ static void perf_service_work(struct work_struct *work)
perf_setup_outbuf(peer);
if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
+ init_completion(&peer->init_comp);
clear_bit(PERF_STS_DONE, &peer->sts);
if (test_bit(0, &peer->perf->busy_flag) &&
peer == peer->perf->test_peer) {
@@ -653,7 +658,7 @@ static int perf_init_service(struct perf_ctx *perf)
{
u64 mask;
- if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) {
+ if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) {
dev_err(&perf->ntb->dev, "Not enough memory windows\n");
return -EINVAL;
}
@@ -803,7 +808,7 @@ static int perf_copy_chunk(struct perf_thread *pthr,
dst_vaddr = dst;
dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase);
- unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT);
+ unmap = dmaengine_get_unmap_data(dma_dev, 1, GFP_NOWAIT);
if (!unmap)
return -ENOMEM;
@@ -816,15 +821,8 @@ static int perf_copy_chunk(struct perf_thread *pthr,
}
unmap->to_cnt = 1;
- unmap->addr[1] = dst_dma_addr;
- if (dma_mapping_error(dma_dev, unmap->addr[1])) {
- ret = -EIO;
- goto err_free_resource;
- }
- unmap->from_cnt = 1;
-
do {
- tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, unmap->addr[1],
+ tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, dst_dma_addr,
unmap->addr[0], len, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx)
msleep(DMA_MDELAY);
@@ -1010,8 +1008,8 @@ static void perf_clear_test(struct perf_thread *pthr)
pthr->perf->test_peer->dma_dst_addr,
pthr->perf->test_peer->outbuf_size,
DMA_FROM_DEVICE, 0);
- if (pthr->dma_chan)
- dma_release_channel(pthr->dma_chan);
+
+ dma_release_channel(pthr->dma_chan);
no_dma_notify:
atomic_dec(&perf->tsync);
@@ -1083,8 +1081,9 @@ static int perf_submit_test(struct perf_peer *peer)
struct perf_thread *pthr;
int tidx, ret;
- if (!test_bit(PERF_STS_DONE, &peer->sts))
- return -ENOLINK;
+ ret = wait_for_completion_interruptible(&peer->init_comp);
+ if (ret < 0)
+ return ret;
if (test_and_set_bit_lock(0, &perf->busy_flag))
return -EBUSY;
@@ -1455,10 +1454,21 @@ static int perf_init_peers(struct perf_ctx *perf)
peer->gidx = pidx;
}
INIT_WORK(&peer->service, perf_service_work);
+ init_completion(&peer->init_comp);
}
if (perf->gidx == -1)
perf->gidx = pidx;
+ /*
+ * Hardware with only two ports may not have unique port
+ * numbers. In this case, the gidxs should all be zero.
+ */
+ if (perf->pcnt == 1 && ntb_port_number(perf->ntb) == 0 &&
+ ntb_peer_port_number(perf->ntb, 0) == 0) {
+ perf->gidx = 0;
+ perf->peers[0].gidx = 0;
+ }
+
for (pidx = 0; pidx < perf->pcnt; pidx++) {
ret = perf_setup_peer_mw(&perf->peers[pidx]);
if (ret)
@@ -1554,4 +1564,3 @@ static void __exit perf_exit(void)
destroy_workqueue(perf_wq);
}
module_exit(perf_exit);
-
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 04dd46647db3..2164e8492772 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -121,15 +121,14 @@ static int pp_find_next_peer(struct pp_ctx *pp)
link = ntb_link_is_up(pp->ntb, NULL, NULL);
/* Find next available peer */
- if (link & pp->nmask) {
+ if (link & pp->nmask)
pidx = __ffs64(link & pp->nmask);
- out_db = BIT_ULL(pidx + 1);
- } else if (link & pp->pmask) {
+ else if (link & pp->pmask)
pidx = __ffs64(link & pp->pmask);
- out_db = BIT_ULL(pidx);
- } else {
+ else
return -ENODEV;
- }
+
+ out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx));
spin_lock(&pp->lock);
pp->out_pidx = pidx;
@@ -303,7 +302,7 @@ static void pp_init_flds(struct pp_ctx *pp)
break;
}
- pp->in_db = BIT_ULL(pidx);
+ pp->in_db = BIT_ULL(lport);
pp->pmask = GENMASK_ULL(pidx, 0) >> 1;
pp->nmask = GENMASK_ULL(pcnt - 1, pidx);
@@ -432,4 +431,3 @@ static void __exit pp_exit(void)
debugfs_remove_recursive(pp_dbgfs_topdir);
}
module_exit(pp_exit);
-
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index 69da758fe64c..b7bf3f863d79 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -504,7 +504,7 @@ static ssize_t tool_peer_link_read(struct file *filep, char __user *ubuf,
buf[1] = '\n';
buf[2] = '\0';
- return simple_read_from_buffer(ubuf, size, offp, buf, 3);
+ return simple_read_from_buffer(ubuf, size, offp, buf, 2);
}
static TOOL_FOPS_RDWR(tool_peer_link_fops,
@@ -590,7 +590,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx,
inmw->size = min_t(resource_size_t, req_size, size);
inmw->size = round_up(inmw->size, addr_align);
inmw->size = round_up(inmw->size, size_align);
- inmw->mm_base = dma_alloc_coherent(&tc->ntb->dev, inmw->size,
+ inmw->mm_base = dma_alloc_coherent(&tc->ntb->pdev->dev, inmw->size,
&inmw->dma_base, GFP_KERNEL);
if (!inmw->mm_base)
return -ENOMEM;
@@ -612,7 +612,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx,
return 0;
err_free_dma:
- dma_free_coherent(&tc->ntb->dev, inmw->size, inmw->mm_base,
+ dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, inmw->mm_base,
inmw->dma_base);
inmw->mm_base = NULL;
inmw->dma_base = 0;
@@ -629,7 +629,7 @@ static void tool_free_mw(struct tool_ctx *tc, int pidx, int widx)
if (inmw->mm_base != NULL) {
ntb_mw_clear_trans(tc->ntb, pidx, widx);
- dma_free_coherent(&tc->ntb->dev, inmw->size,
+ dma_free_coherent(&tc->ntb->pdev->dev, inmw->size,
inmw->mm_base, inmw->dma_base);
}
@@ -1690,4 +1690,3 @@ static void __exit tool_exit(void)
debugfs_remove_recursive(tool_dbgfs_topdir);
}
module_exit(tool_exit);
-
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 43751fab9d36..036e23aef9b0 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -178,7 +178,9 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
bip = bio_integrity(bio);
nsblk = q->queuedata;
rw = bio_data_dir(bio);
- do_acct = nd_iostat_start(bio, &start);
+ do_acct = blk_queue_io_stat(bio->bi_disk->queue);
+ if (do_acct)
+ start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
@@ -195,7 +197,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
}
}
if (do_acct)
- nd_iostat_end(bio, start);
+ bio_end_io_acct(bio, start);
bio_endio(bio);
return BLK_QC_T_NONE;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 3b09419218d6..90c0c4bbe77b 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1452,7 +1452,9 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
if (!bio_integrity_prep(bio))
return BLK_QC_T_NONE;
- do_acct = nd_iostat_start(bio, &start);
+ do_acct = blk_queue_io_stat(bio->bi_disk->queue);
+ if (do_acct)
+ start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
@@ -1477,7 +1479,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}
}
if (do_acct)
- nd_iostat_end(bio, start);
+ bio_end_io_acct(bio, start);
bio_endio(bio);
return BLK_QC_T_NONE;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 85dbb2a322b9..85c1ae813ea3 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -396,25 +396,6 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
#endif
int nd_blk_region_init(struct nd_region *nd_region);
int nd_region_activate(struct nd_region *nd_region);
-void __nd_iostat_start(struct bio *bio, unsigned long *start);
-static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
-{
- struct gendisk *disk = bio->bi_disk;
-
- if (!blk_queue_io_stat(disk->queue))
- return false;
-
- *start = jiffies;
- generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio),
- &disk->part0);
- return true;
-}
-static inline void nd_iostat_end(struct bio *bio, unsigned long start)
-{
- struct gendisk *disk = bio->bi_disk;
-
- generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
-}
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
unsigned int len)
{
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2df6994acf83..97f948f8f4e6 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -202,7 +202,9 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
if (bio->bi_opf & REQ_PREFLUSH)
ret = nvdimm_flush(nd_region, bio);
- do_acct = nd_iostat_start(bio, &start);
+ do_acct = blk_queue_io_stat(bio->bi_disk->queue);
+ if (do_acct)
+ start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
if (op_is_write(bio_op(bio)))
rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
@@ -216,7 +218,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
}
}
if (do_acct)
- nd_iostat_end(bio, start);
+ bio_end_io_acct(bio, start);
if (bio->bi_opf & REQ_FUA)
ret = nvdimm_flush(nd_region, bio);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f3c037f5a9ba..0585efa47d8f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -19,7 +19,6 @@
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
-#include <linux/t10-pi.h>
#include <linux/pm_qos.h>
#include <asm/unaligned.h>
@@ -204,11 +203,6 @@ static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
nvme_put_ctrl(ctrl);
}
-static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
-{
- return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
-}
-
static blk_status_t nvme_error_status(u16 status)
{
switch (status & 0x7ff) {
@@ -310,7 +304,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
return true;
nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
- blk_mq_complete_request(req);
+ blk_mq_force_complete_rq(req);
return true;
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
@@ -433,7 +427,6 @@ static void nvme_free_ns_head(struct kref *ref)
nvme_mpath_remove_disk(head);
ida_simple_remove(&head->subsys->ns_ida, head->instance);
- list_del_init(&head->entry);
cleanup_srcu_struct(&head->srcu);
nvme_put_subsystem(head->subsys);
kfree(head);
@@ -530,7 +523,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid);
- c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
+ c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS;
@@ -553,19 +546,22 @@ static int nvme_configure_directives(struct nvme_ctrl *ctrl)
ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
if (ret)
- return ret;
+ goto out_disable_stream;
ctrl->nssa = le16_to_cpu(s.nssa);
if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
dev_info(ctrl->device, "too few streams (%u) available\n",
ctrl->nssa);
- nvme_disable_streams(ctrl);
- return 0;
+ goto out_disable_stream;
}
ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
return 0;
+
+out_disable_stream:
+ nvme_disable_streams(ctrl);
+ return ret;
}
/*
@@ -1027,6 +1023,19 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+/*
+ * In NVMe 1.0 the CNS field was just a binary controller or namespace
+ * flag, thus sending any new CNS opcodes has a big chance of not working.
+ * Qemu unfortunately had that bug after reporting a 1.1 version compliance
+ * (but not for any later version).
+ */
+static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
+ return ctrl->vs < NVME_VS(1, 2, 0);
+ return ctrl->vs < NVME_VS(1, 1, 0);
+}
+
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{
struct nvme_command c = { };
@@ -1290,7 +1299,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
meta_len = (io.nblocks + 1) * ns->ms;
metadata = nvme_to_user_ptr(io.metadata);
- if (ns->ext) {
+ if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
meta_len = 0;
} else if (meta_len) {
@@ -1392,8 +1401,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
}
if (effects & NVME_CMD_EFFECTS_CCC)
nvme_init_identify(ctrl);
- if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
+ if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
nvme_queue_scan(ctrl);
+ flush_work(&ctrl->scan_work);
+ }
}
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
@@ -1682,7 +1693,8 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
+static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
+ u32 max_integrity_segments)
{
struct blk_integrity integrity;
@@ -1705,20 +1717,15 @@ static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
}
integrity.tuple_size = ms;
blk_integrity_register(disk, &integrity);
- blk_queue_max_integrity_segments(disk->queue, 1);
+ blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
}
#else
-static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
+static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
+ u32 max_integrity_segments)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_set_chunk_size(struct nvme_ns *ns)
-{
- u32 chunk_size = nvme_lba_to_sect(ns, ns->noiob);
- blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
-}
-
static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
{
struct nvme_ctrl *ctrl = ns->ctrl;
@@ -1804,12 +1811,37 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
}
+static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u32 *phys_bs, u32 *io_opt)
+{
+ struct streams_directive_params s;
+ int ret;
+
+ if (!ctrl->nr_streams)
+ return 0;
+
+ ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
+ if (ret)
+ return ret;
+
+ ns->sws = le32_to_cpu(s.sws);
+ ns->sgs = le16_to_cpu(s.sgs);
+
+ if (ns->sws) {
+ *phys_bs = ns->sws * (1 << ns->lba_shift);
+ if (ns->sgs)
+ *io_opt = *phys_bs * ns->sgs;
+ }
+
+ return 0;
+}
+
static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
unsigned short bs = 1 << ns->lba_shift;
- u32 atomic_bs, phys_bs, io_opt;
+ u32 atomic_bs, phys_bs, io_opt = 0;
if (ns->lba_shift > PAGE_SHIFT) {
/* unsupported block size, set capacity to 0 later */
@@ -1818,26 +1850,25 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_mq_freeze_queue(disk->queue);
blk_integrity_unregister(disk);
+ atomic_bs = phys_bs = bs;
+ nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
if (id->nabo == 0) {
/*
* Bit 1 indicates whether NAWUPF is defined for this namespace
* and whether it should be used instead of AWUPF. If NAWUPF ==
* 0 then AWUPF must be used instead.
*/
- if (id->nsfeat & (1 << 1) && id->nawupf)
+ if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
- } else {
- atomic_bs = bs;
}
- phys_bs = bs;
- io_opt = bs;
- if (id->nsfeat & (1 << 4)) {
+
+ if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
/* NPWG = Namespace Preferred Write Granularity */
- phys_bs *= 1 + le16_to_cpu(id->npwg);
+ phys_bs = bs * (1 + le16_to_cpu(id->npwg));
/* NOWS = Namespace Optimal Write Size */
- io_opt *= 1 + le16_to_cpu(id->nows);
+ io_opt = bs * (1 + le16_to_cpu(id->nows));
}
blk_queue_logical_block_size(disk->queue, bs);
@@ -1850,19 +1881,34 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_queue_io_min(disk->queue, phys_bs);
blk_queue_io_opt(disk->queue, io_opt);
- if (ns->ms && !ns->ext &&
- (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- nvme_init_integrity(disk, ns->ms, ns->pi_type);
- if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
- ns->lba_shift > PAGE_SHIFT)
+ /*
+ * The block layer can't support LBA sizes larger than the page size
+ * yet, so catch this early and don't allow block I/O.
+ */
+ if (ns->lba_shift > PAGE_SHIFT)
capacity = 0;
+ /*
+ * Register a metadata profile for PI, or the plain non-integrity NVMe
+ * metadata masquerading as Type 0 if supported, otherwise reject block
+ * I/O to namespaces with metadata except when the namespace supports
+ * PI, as it can strip/insert in that case.
+ */
+ if (ns->ms) {
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ (ns->features & NVME_NS_METADATA_SUPPORTED))
+ nvme_init_integrity(disk, ns->ms, ns->pi_type,
+ ns->ctrl->max_integrity_segments);
+ else if (!nvme_ns_has_pi(ns))
+ capacity = 0;
+ }
+
set_capacity_revalidate_and_notify(disk, capacity, false);
nvme_config_discard(disk, ns);
nvme_config_write_zeroes(disk, ns);
- if (id->nsattr & (1 << 0))
+ if (id->nsattr & NVME_NS_ATTR_RO)
set_disk_ro(disk, true);
else
set_disk_ro(disk, false);
@@ -1870,9 +1916,11 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_mq_unfreeze_queue(disk->queue);
}
-static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{
struct nvme_ns *ns = disk->private_data;
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ u32 iob;
/*
* If identify namespace failed, use default 512 byte block size so
@@ -1881,32 +1929,55 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
if (ns->lba_shift == 0)
ns->lba_shift = 9;
- ns->noiob = le16_to_cpu(id->noiob);
+
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+ is_power_of_2(ctrl->max_hw_sectors))
+ iob = ctrl->max_hw_sectors;
+ else
+ iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
+
+ ns->features = 0;
ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
- ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
/* the PI implementation requires metadata equal t10 pi tuple size */
if (ns->ms == sizeof(struct t10_pi_tuple))
ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
else
ns->pi_type = 0;
- if (ns->noiob)
- nvme_set_chunk_size(ns);
+ if (ns->ms) {
+ /*
+ * For PCIe only the separate metadata pointer is supported,
+ * as the block layer supplies metadata in a separate bio_vec
+ * chain. For Fabrics, only metadata as part of extended data
+ * LBA is supported on the wire per the Fabrics specification,
+ * but the HBA/HCA will do the remapping from the separate
+ * metadata buffers for us.
+ */
+ if (id->flbas & NVME_NS_FLBAS_META_EXT) {
+ ns->features |= NVME_NS_EXT_LBAS;
+ if ((ctrl->ops->flags & NVME_F_FABRICS) &&
+ (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) &&
+ ctrl->max_integrity_segments)
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
+ } else {
+ if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS))
+ return -EINVAL;
+ if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
+ }
+ }
+
+ if (iob)
+ blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
nvme_update_disk_info(disk, ns, id);
#ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
- if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
- struct backing_dev_info *info =
- ns->head->disk->queue->backing_dev_info;
-
- info->capabilities |= BDI_CAP_STABLE_WRITES;
- }
-
revalidate_disk(ns->head->disk);
}
#endif
+ return 0;
}
static int nvme_revalidate_disk(struct gendisk *disk)
@@ -1931,7 +2002,6 @@ static int nvme_revalidate_disk(struct gendisk *disk)
goto free_id;
}
- __nvme_revalidate_disk(disk, id);
ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
if (ret)
goto free_id;
@@ -1940,8 +2010,10 @@ static int nvme_revalidate_disk(struct gendisk *disk)
dev_err(ctrl->device,
"identifiers changed for nsid %d\n", ns->head->ns_id);
ret = -ENODEV;
+ goto free_id;
}
+ ret = __nvme_revalidate_disk(disk, id);
free_id:
kfree(id);
out:
@@ -2249,10 +2321,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
- is_power_of_2(ctrl->max_hw_sectors))
- blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
+ blk_queue_dma_alignment(q, 7);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
blk_queue_write_cache(q, vwc, vwc);
@@ -2655,7 +2725,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
return false;
}
- if ((id->cmic & (1 << 1)) ||
+ if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
(ctrl->opts && ctrl->opts->discovery_nqn))
continue;
@@ -2746,7 +2816,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
void *log, size_t size, u64 offset)
{
struct nvme_command c = { };
- unsigned long dwlen = size / 4 - 1;
+ u32 dwlen = nvme_bytes_to_numd(size);
c.get_log_page.opcode = nvme_admin_get_log_page;
c.get_log_page.nsid = cpu_to_le32(nsid);
@@ -3401,7 +3471,6 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
list_for_each_entry(h, &subsys->nsheads, entry) {
if (nvme_ns_ids_valid(&new->ids) &&
- !list_empty(&h->list) &&
nvme_ns_ids_equal(&new->ids, &h->ids))
return -EINVAL;
}
@@ -3410,8 +3479,7 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_id_ns *id,
- struct nvme_ns_ids *ids)
+ unsigned nsid, struct nvme_ns_ids *ids)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3469,42 +3537,51 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
struct nvme_id_ns *id)
{
struct nvme_ctrl *ctrl = ns->ctrl;
- bool is_shared = id->nmic & (1 << 0);
+ bool is_shared = id->nmic & NVME_NS_NMIC_SHARED;
struct nvme_ns_head *head = NULL;
struct nvme_ns_ids ids;
int ret = 0;
ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
- if (ret)
- goto out;
+ if (ret) {
+ if (ret < 0)
+ return ret;
+ return blk_status_to_errno(nvme_error_status(ret));
+ }
mutex_lock(&ctrl->subsys->lock);
- if (is_shared)
- head = nvme_find_ns_head(ctrl->subsys, nsid);
+ head = nvme_find_ns_head(ctrl->subsys, nsid);
if (!head) {
- head = nvme_alloc_ns_head(ctrl, nsid, id, &ids);
+ head = nvme_alloc_ns_head(ctrl, nsid, &ids);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
+ head->shared = is_shared;
} else {
+ ret = -EINVAL;
+ if (!is_shared || !head->shared) {
+ dev_err(ctrl->device,
+ "Duplicate unshared namespace %d\n", nsid);
+ goto out_put_ns_head;
+ }
if (!nvme_ns_ids_equal(&head->ids, &ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
nsid);
- ret = -EINVAL;
- goto out_unlock;
+ goto out_put_ns_head;
}
}
list_add_tail(&ns->siblings, &head->list);
ns->head = head;
+ mutex_unlock(&ctrl->subsys->lock);
+ return 0;
+out_put_ns_head:
+ nvme_put_ns_head(head);
out_unlock:
mutex_unlock(&ctrl->subsys->lock);
-out:
- if (ret > 0)
- ret = blk_status_to_errno(nvme_error_status(ret));
return ret;
}
@@ -3535,32 +3612,6 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
return ret;
}
-static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
-{
- struct streams_directive_params s;
- int ret;
-
- if (!ctrl->nr_streams)
- return 0;
-
- ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
- if (ret)
- return ret;
-
- ns->sws = le32_to_cpu(s.sws);
- ns->sgs = le16_to_cpu(s.sgs);
-
- if (ns->sws) {
- unsigned int bs = 1 << ns->lba_shift;
-
- blk_queue_io_min(ns->queue, bs * ns->sws);
- if (ns->sgs)
- blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
- }
-
- return 0;
-}
-
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns;
@@ -3604,7 +3655,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ret = nvme_init_ns_head(ns, nsid, id);
if (ret)
goto out_free_id;
- nvme_setup_streams_ns(ctrl, ns);
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
disk = alloc_disk_node(0, node);
@@ -3618,7 +3668,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
ns->disk = disk;
- __nvme_revalidate_disk(disk, id);
+ if (__nvme_revalidate_disk(disk, id))
+ goto out_free_disk;
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
ret = nvme_nvm_register(ns, disk_name, node);
@@ -3645,9 +3696,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
/* prevent double queue cleanup */
ns->disk->queue = NULL;
put_disk(ns->disk);
+ out_free_disk:
+ del_gendisk(ns->disk);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
+ if (list_empty(&ns->head->list))
+ list_del_init(&ns->head->entry);
mutex_unlock(&ctrl->subsys->lock);
nvme_put_ns_head(ns->head);
out_free_id:
@@ -3667,7 +3722,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
+ if (list_empty(&ns->head->list))
+ list_del_init(&ns->head->entry);
mutex_unlock(&ns->ctrl->subsys->lock);
+
synchronize_rcu(); /* guarantee not available in head->list */
nvme_mpath_clear_current_path(ns);
synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
@@ -3687,6 +3745,16 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_put_ns(ns);
}
+static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
+{
+ struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
+
+ if (ns) {
+ nvme_ns_remove(ns);
+ nvme_put_ns(ns);
+ }
+}
+
static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns;
@@ -3718,39 +3786,34 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
}
-static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
+static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
{
- struct nvme_ns *ns;
+ const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
__le32 *ns_list;
- unsigned i, j, nsid, prev = 0;
- unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
- int ret = 0;
+ u32 prev = 0;
+ int ret = 0, i;
+
+ if (nvme_ctrl_limited_cns(ctrl))
+ return -EOPNOTSUPP;
ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!ns_list)
return -ENOMEM;
- for (i = 0; i < num_lists; i++) {
+ for (;;) {
ret = nvme_identify_ns_list(ctrl, prev, ns_list);
if (ret)
goto free;
- for (j = 0; j < min(nn, 1024U); j++) {
- nsid = le32_to_cpu(ns_list[j]);
- if (!nsid)
- goto out;
+ for (i = 0; i < nr_entries; i++) {
+ u32 nsid = le32_to_cpu(ns_list[i]);
+ if (!nsid) /* end of the list? */
+ goto out;
nvme_validate_ns(ctrl, nsid);
-
- while (++prev < nsid) {
- ns = nvme_find_get_ns(ctrl, prev);
- if (ns) {
- nvme_ns_remove(ns);
- nvme_put_ns(ns);
- }
- }
+ while (++prev < nsid)
+ nvme_ns_remove_by_nsid(ctrl, prev);
}
- nn -= j;
}
out:
nvme_remove_invalid_namespaces(ctrl, prev);
@@ -3759,9 +3822,15 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
return ret;
}
-static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
{
- unsigned i;
+ struct nvme_id_ctrl *id;
+ u32 nn, i;
+
+ if (nvme_identify_ctrl(ctrl, &id))
+ return;
+ nn = le32_to_cpu(id->nn);
+ kfree(id);
for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i);
@@ -3798,8 +3867,6 @@ static void nvme_scan_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, scan_work);
- struct nvme_id_ctrl *id;
- unsigned nn;
/* No tagset on a live ctrl means IO queues could not created */
if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
@@ -3810,20 +3877,11 @@ static void nvme_scan_work(struct work_struct *work)
nvme_clear_changed_ns_log(ctrl);
}
- if (nvme_identify_ctrl(ctrl, &id))
- return;
-
mutex_lock(&ctrl->scan_lock);
- nn = le32_to_cpu(id->nn);
- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
- !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
- if (!nvme_scan_ns_list(ctrl, nn))
- goto out_free_id;
- }
- nvme_scan_ns_sequential(ctrl, nn);
-out_free_id:
+ if (nvme_scan_ns_list(ctrl) != 0)
+ nvme_scan_ns_sequential(ctrl);
mutex_unlock(&ctrl->scan_lock);
- kfree(id);
+
down_write(&ctrl->namespaces_rwsem);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
up_write(&ctrl->namespaces_rwsem);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7dfc4a2ecf1e..cb0007592c12 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -14,6 +14,7 @@
#include "fabrics.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
+#include "fc.h"
#include <scsi/scsi_transport_fc.h>
/* *************************** Data Structures/Defines ****************** */
@@ -61,6 +62,17 @@ struct nvmefc_ls_req_op {
bool req_queued;
};
+struct nvmefc_ls_rcv_op {
+ struct nvme_fc_rport *rport;
+ struct nvmefc_ls_rsp *lsrsp;
+ union nvmefc_ls_requests *rqstbuf;
+ union nvmefc_ls_responses *rspbuf;
+ u16 rqstdatalen;
+ bool handled;
+ dma_addr_t rspdma;
+ struct list_head lsrcv_list; /* rport->ls_rcv_list */
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
enum nvme_fcpop_state {
FCPOP_STATE_UNINIT = 0,
FCPOP_STATE_IDLE = 1,
@@ -96,7 +108,7 @@ struct nvme_fc_fcp_op {
struct nvme_fcp_op_w_sgl {
struct nvme_fc_fcp_op op;
struct scatterlist sgl[NVME_INLINE_SG_CNT];
- uint8_t priv[0];
+ uint8_t priv[];
};
struct nvme_fc_lport {
@@ -117,6 +129,7 @@ struct nvme_fc_rport {
struct list_head endp_list; /* for lport->endp_list */
struct list_head ctrl_list;
struct list_head ls_req_list;
+ struct list_head ls_rcv_list;
struct list_head disc_list;
struct device *dev; /* physical device for dma */
struct nvme_fc_lport *lport;
@@ -124,11 +137,12 @@ struct nvme_fc_rport {
struct kref ref;
atomic_t act_ctrl_cnt;
unsigned long dev_loss_end;
+ struct work_struct lsrcv_work;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
-enum nvme_fcctrl_flags {
- FCCTRL_TERMIO = (1 << 0),
-};
+/* fc_ctrl flags values - specified as bit positions */
+#define ASSOC_ACTIVE 0
+#define FCCTRL_TERMIO 1
struct nvme_fc_ctrl {
spinlock_t lock;
@@ -139,9 +153,9 @@ struct nvme_fc_ctrl {
u32 cnum;
bool ioq_live;
- bool assoc_active;
atomic_t err_work_active;
u64 association_id;
+ struct nvmefc_ls_rcv_op *rcv_disconn;
struct list_head ctrl_list; /* rport->ctrl_list */
@@ -152,7 +166,7 @@ struct nvme_fc_ctrl {
struct work_struct err_work;
struct kref ref;
- u32 flags;
+ unsigned long flags;
u32 iocnt;
wait_queue_head_t ioabort_wait;
@@ -219,6 +233,9 @@ static struct device *fc_udev_device;
static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
struct nvme_fc_queue *, unsigned int);
+static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
+
+
static void
nvme_fc_free_lport(struct kref *ref)
{
@@ -394,7 +411,10 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
newrec->ops = template;
newrec->dev = dev;
ida_init(&newrec->endp_cnt);
- newrec->localport.private = &newrec[1];
+ if (template->local_priv_sz)
+ newrec->localport.private = &newrec[1];
+ else
+ newrec->localport.private = NULL;
newrec->localport.node_name = pinfo->node_name;
newrec->localport.port_name = pinfo->port_name;
newrec->localport.port_role = pinfo->port_role;
@@ -701,9 +721,13 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
atomic_set(&newrec->act_ctrl_cnt, 0);
spin_lock_init(&newrec->lock);
newrec->remoteport.localport = &lport->localport;
+ INIT_LIST_HEAD(&newrec->ls_rcv_list);
newrec->dev = lport->dev;
newrec->lport = lport;
- newrec->remoteport.private = &newrec[1];
+ if (lport->ops->remote_priv_sz)
+ newrec->remoteport.private = &newrec[1];
+ else
+ newrec->remoteport.private = NULL;
newrec->remoteport.port_role = pinfo->port_role;
newrec->remoteport.node_name = pinfo->node_name;
newrec->remoteport.port_name = pinfo->port_name;
@@ -711,6 +735,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
newrec->remoteport.port_num = idx;
__nvme_fc_set_dev_loss_tmo(newrec, pinfo);
+ INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
spin_lock_irqsave(&nvme_fc_lock, flags);
list_add_tail(&newrec->endp_list, &lport->endp_list);
@@ -1000,6 +1025,7 @@ fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
+static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static void
__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
@@ -1140,41 +1166,6 @@ nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
return __nvme_fc_send_ls_req(rport, lsop, done);
}
-/* Validation Error indexes into the string table below */
-enum {
- VERR_NO_ERROR = 0,
- VERR_LSACC = 1,
- VERR_LSDESC_RQST = 2,
- VERR_LSDESC_RQST_LEN = 3,
- VERR_ASSOC_ID = 4,
- VERR_ASSOC_ID_LEN = 5,
- VERR_CONN_ID = 6,
- VERR_CONN_ID_LEN = 7,
- VERR_CR_ASSOC = 8,
- VERR_CR_ASSOC_ACC_LEN = 9,
- VERR_CR_CONN = 10,
- VERR_CR_CONN_ACC_LEN = 11,
- VERR_DISCONN = 12,
- VERR_DISCONN_ACC_LEN = 13,
-};
-
-static char *validation_errors[] = {
- "OK",
- "Not LS_ACC",
- "Not LSDESC_RQST",
- "Bad LSDESC_RQST Length",
- "Not Association ID",
- "Bad Association ID Length",
- "Not Connection ID",
- "Bad Connection ID Length",
- "Not CR_ASSOC Rqst",
- "Bad CR_ASSOC ACC Length",
- "Not CR_CONN Rqst",
- "Bad CR_CONN ACC Length",
- "Not Disconnect Rqst",
- "Bad Disconnect ACC Length",
-};
-
static int
nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
@@ -1183,21 +1174,27 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
struct nvmefc_ls_req *lsreq;
struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
struct fcnvme_ls_cr_assoc_acc *assoc_acc;
+ unsigned long flags;
int ret, fcret = 0;
lsop = kzalloc((sizeof(*lsop) +
- ctrl->lport->ops->lsrqst_priv_sz +
- sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
+ sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
+ ctrl->cnum);
ret = -ENOMEM;
goto out_no_memory;
}
- lsreq = &lsop->ls_req;
- lsreq->private = (void *)&lsop[1];
- assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
- (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = &assoc_acc[1];
+ else
+ lsreq->private = NULL;
assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
assoc_rqst->desc_list_len =
@@ -1267,11 +1264,13 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
"q %d Create Association LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
+ spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id =
be64_to_cpu(assoc_acc->associd.association_id);
queue->connection_id =
be64_to_cpu(assoc_acc->connectid.connection_id);
set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
}
out_free_buffer:
@@ -1295,18 +1294,23 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
int ret, fcret = 0;
lsop = kzalloc((sizeof(*lsop) +
- ctrl->lport->ops->lsrqst_priv_sz +
- sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
+ sizeof(*conn_rqst) + sizeof(*conn_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
+ ctrl->cnum);
ret = -ENOMEM;
goto out_no_memory;
}
- lsreq = &lsop->ls_req;
- lsreq->private = (void *)&lsop[1];
- conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
- (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&conn_acc[1];
+ else
+ lsreq->private = NULL;
conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
conn_rqst->desc_list_len = cpu_to_be32(
@@ -1420,54 +1424,385 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
int ret;
lsop = kzalloc((sizeof(*lsop) +
- ctrl->lport->ops->lsrqst_priv_sz +
- sizeof(*discon_rqst) + sizeof(*discon_acc)),
- GFP_KERNEL);
- if (!lsop)
- /* couldn't sent it... too bad */
+ sizeof(*discon_rqst) + sizeof(*discon_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Disconnect Association "
+ "failed: ENOMEM\n",
+ ctrl->cnum);
return;
+ }
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&discon_acc[1];
+ else
+ lsreq->private = NULL;
- lsreq->private = (void *)&lsop[1];
- discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)
- (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
- discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
+ nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
+ ctrl->association_id);
- discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
- discon_rqst->desc_list_len = cpu_to_be32(
- sizeof(struct fcnvme_lsdesc_assoc_id) +
- sizeof(struct fcnvme_lsdesc_disconn_cmd));
+ ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
+ nvme_fc_disconnect_assoc_done);
+ if (ret)
+ kfree(lsop);
+}
- discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
- discon_rqst->associd.desc_len =
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_assoc_id));
+static void
+nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
+{
+ struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_del(&lsop->lsrcv_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+ fc_dma_unmap_single(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+
+ kfree(lsop);
+
+ nvme_fc_rport_put(rport);
+}
+
+static void
+nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
+ int ret;
+
+ fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+
+ ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
+ lsop->lsrsp);
+ if (ret) {
+ dev_warn(lport->dev,
+ "LLDD rejected LS RSP xmt: LS %d status %d\n",
+ w0->ls_cmd, ret);
+ nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
+ return;
+ }
+}
+
+static struct nvme_fc_ctrl *
+nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
+ struct nvmefc_ls_rcv_op *lsop)
+{
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &lsop->rqstbuf->rq_dis_assoc;
+ struct nvme_fc_ctrl *ctrl, *ret = NULL;
+ struct nvmefc_ls_rcv_op *oldls = NULL;
+ u64 association_id = be64_to_cpu(rqst->associd.association_id);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ if (!nvme_fc_ctrl_get(ctrl))
+ continue;
+ spin_lock(&ctrl->lock);
+ if (association_id == ctrl->association_id) {
+ oldls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = lsop;
+ ret = ctrl;
+ }
+ spin_unlock(&ctrl->lock);
+ if (ret)
+ /* leave the ctrl get reference */
+ break;
+ nvme_fc_ctrl_put(ctrl);
+ }
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ /* transmit a response for anything that was pending */
+ if (oldls) {
+ dev_info(rport->lport->dev,
+ "NVME-FC{%d}: Multiple Disconnect Association "
+ "LS's received\n", ctrl->cnum);
+ /* overwrite good response with bogus failure */
+ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
+ sizeof(*oldls->rspbuf),
+ rqst->w0.ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ nvme_fc_xmt_ls_rsp(oldls);
+ }
+
+ return ret;
+}
+
+/*
+ * returns true to mean LS handled and ls_rsp can be sent
+ * returns false to defer ls_rsp xmt (will be done as part of
+ * association termination)
+ */
+static bool
+nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &lsop->rqstbuf->rq_dis_assoc;
+ struct fcnvme_ls_disconnect_assoc_acc *acc =
+ &lsop->rspbuf->rsp_dis_assoc;
+ struct nvme_fc_ctrl *ctrl = NULL;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
+ if (!ret) {
+ /* match an active association */
+ ctrl = nvme_fc_match_disconn_ls(rport, lsop);
+ if (!ctrl)
+ ret = VERR_NO_ASSOC;
+ }
+
+ if (ret) {
+ dev_info(rport->lport->dev,
+ "Disconnect LS failed: %s\n",
+ validation_errors[ret]);
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
+ (ret == VERR_NO_ASSOC) ?
+ FCNVME_RJT_RC_INV_ASSOC :
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
+ return true;
+ }
- discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
+ /* format an ACCept response */
- discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
- FCNVME_LSDESC_DISCONN_CMD);
- discon_rqst->discon_cmd.desc_len =
+ lsop->lsrsp->rsplen = sizeof(*acc);
+
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_disconn_cmd));
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
- lsreq->rqstaddr = discon_rqst;
- lsreq->rqstlen = sizeof(*discon_rqst);
- lsreq->rspaddr = discon_acc;
- lsreq->rsplen = sizeof(*discon_acc);
- lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
+ /*
+ * the transmit of the response will occur after the exchanges
+ * for the association have been ABTS'd by
+ * nvme_fc_delete_association().
+ */
- ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
- nvme_fc_disconnect_assoc_done);
- if (ret)
- kfree(lsop);
+ /* fail the association */
+ nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
+
+ /* release the reference taken by nvme_fc_match_disconn_ls() */
+ nvme_fc_ctrl_put(ctrl);
+
+ return false;
}
+/*
+ * Actual Processing routine for received FC-NVME LS Requests from the LLD
+ * returns true if a response should be sent afterward, false if rsp will
+ * be sent asynchronously.
+ */
+static bool
+nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
+ bool ret = true;
+
+ lsop->lsrsp->nvme_fc_private = lsop;
+ lsop->lsrsp->rspbuf = lsop->rspbuf;
+ lsop->lsrsp->rspdma = lsop->rspdma;
+ lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
+ /* Be preventative. handlers will later set to valid length */
+ lsop->lsrsp->rsplen = 0;
-/* *********************** NVME Ctrl Routines **************************** */
+ /*
+ * handlers:
+ * parse request input, execute the request, and format the
+ * LS response
+ */
+ switch (w0->ls_cmd) {
+ case FCNVME_LS_DISCONNECT_ASSOC:
+ ret = nvme_fc_ls_disconnect_assoc(lsop);
+ break;
+ case FCNVME_LS_DISCONNECT_CONN:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ case FCNVME_LS_CREATE_ASSOCIATION:
+ case FCNVME_LS_CREATE_CONNECTION:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ default:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ }
-static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
+ return(ret);
+}
+
+static void
+nvme_fc_handle_ls_rqst_work(struct work_struct *work)
+{
+ struct nvme_fc_rport *rport =
+ container_of(work, struct nvme_fc_rport, lsrcv_work);
+ struct fcnvme_ls_rqst_w0 *w0;
+ struct nvmefc_ls_rcv_op *lsop;
+ unsigned long flags;
+ bool sendrsp;
+
+restart:
+ sendrsp = true;
+ spin_lock_irqsave(&rport->lock, flags);
+ list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
+ if (lsop->handled)
+ continue;
+
+ lsop->handled = true;
+ if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ sendrsp = nvme_fc_handle_ls_rqst(lsop);
+ } else {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ w0 = &lsop->rqstbuf->w0;
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(
+ lsop->rspbuf,
+ sizeof(*lsop->rspbuf),
+ w0->ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ }
+ if (sendrsp)
+ nvme_fc_xmt_ls_rsp(lsop);
+ goto restart;
+ }
+ spin_unlock_irqrestore(&rport->lock, flags);
+}
+
+/**
+ * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
+ * upon the reception of a NVME LS request.
+ *
+ * The nvme-fc layer will copy payload to an internal structure for
+ * processing. As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the LS request buffer passed in the call.
+ *
+ * If this routine returns error, the LLDD should abort the exchange.
+ *
+ * @remoteport: pointer to the (registered) remote port that the LS
+ * was received from. The remoteport is associated with
+ * a specific localport.
+ * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
+ * used to reference the exchange corresponding to the LS
+ * when issuing an ls response.
+ * @lsreqbuf: pointer to the buffer containing the LS Request
+ * @lsreqbuf_len: length, in bytes, of the received LS request
+ */
+int
+nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
+ struct nvmefc_ls_rsp *lsrsp,
+ void *lsreqbuf, u32 lsreqbuf_len)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ struct nvme_fc_lport *lport = rport->lport;
+ struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
+ struct nvmefc_ls_rcv_op *lsop;
+ unsigned long flags;
+ int ret;
+
+ nvme_fc_rport_get(rport);
+
+ /* validate there's a routine to transmit a response */
+ if (!lport->ops->xmt_ls_rsp) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: payload too large\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -E2BIG;
+ goto out_put;
+ }
+
+ lsop = kzalloc(sizeof(*lsop) +
+ sizeof(union nvmefc_ls_requests) +
+ sizeof(union nvmefc_ls_responses),
+ GFP_KERNEL);
+ if (!lsop) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: No memory\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
+ lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
+
+ lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
+ sizeof(*lsop->rspbuf),
+ DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: DMA mapping failure\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ lsop->rport = rport;
+ lsop->lsrsp = lsrsp;
+
+ memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
+ lsop->rqstdatalen = lsreqbuf_len;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ ret = -ENOTCONN;
+ goto out_unmap;
+ }
+ list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ schedule_work(&rport->lsrcv_work);
+
+ return 0;
+
+out_unmap:
+ fc_dma_unmap_single(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+out_free:
+ kfree(lsop);
+out_put:
+ nvme_fc_rport_put(rport);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
+
+
+/* *********************** NVME Ctrl Routines **************************** */
static void
__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
@@ -1500,7 +1835,7 @@ __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
if (opstate != FCPOP_STATE_ACTIVE)
atomic_set(&op->state, opstate);
- else if (ctrl->flags & FCCTRL_TERMIO)
+ else if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
ctrl->iocnt++;
spin_unlock_irqrestore(&ctrl->lock, flags);
@@ -1537,7 +1872,7 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
if (opstate == FCPOP_STATE_ABORTED) {
spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO) {
+ if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
if (!--ctrl->iocnt)
wake_up(&ctrl->ioabort_wait);
}
@@ -1771,7 +2106,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
if (res)
return res;
- op->op.fcp_req.first_sgl = &op->sgl[0];
+ op->op.fcp_req.first_sgl = op->sgl;
op->op.fcp_req.private = &op->priv[0];
nvme_req(rq)->ctrl = &ctrl->ctrl;
return res;
@@ -1783,15 +2118,17 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op;
struct nvme_fc_cmd_iu *cmdiu;
struct nvme_command *sqe;
- void *private;
+ void *private = NULL;
int i, ret;
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
- private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
+ if (ctrl->lport->ops->fcprqst_priv_sz) {
+ private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
GFP_KERNEL);
- if (!private)
- return -ENOMEM;
+ if (!private)
+ return -ENOMEM;
+ }
cmdiu = &aen_op->cmd_iu;
sqe = &cmdiu->sqe;
@@ -1822,9 +2159,6 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
- if (!aen_op->fcp_req.private)
- continue;
-
__nvme_fc_exit_request(ctrl, aen_op);
kfree(aen_op->fcp_req.private);
@@ -2366,16 +2700,9 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
struct nvme_fc_fcp_op *aen_op;
- unsigned long flags;
- bool terminating = false;
blk_status_t ret;
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO)
- terminating = true;
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- if (terminating)
+ if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
return;
aen_op = &ctrl->aen_ops[0];
@@ -2584,10 +2911,9 @@ nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_rport *rport = ctrl->rport;
u32 cnt;
- if (ctrl->assoc_active)
+ if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
return 1;
- ctrl->assoc_active = true;
cnt = atomic_inc_return(&rport->act_ctrl_cnt);
if (cnt == 1)
nvme_fc_rport_active_on_lport(rport);
@@ -2602,7 +2928,7 @@ nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_lport *lport = rport->lport;
u32 cnt;
- /* ctrl->assoc_active=false will be set independently */
+ /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
cnt = atomic_dec_return(&rport->act_ctrl_cnt);
if (cnt == 0) {
@@ -2622,6 +2948,8 @@ static int
nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ struct nvmefc_ls_rcv_op *disls = NULL;
+ unsigned long flags;
int ret;
bool changed;
@@ -2739,12 +3067,18 @@ out_term_aen_ops:
out_disconnect_admin_queue:
/* send a Disconnect(association) LS to fc-nvme target */
nvme_fc_xmt_disconnect_assoc(ctrl);
+ spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id = 0;
+ disls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = NULL;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (disls)
+ nvme_fc_xmt_ls_rsp(disls);
out_delete_hw_queue:
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
out_free_queue:
nvme_fc_free_queue(&ctrl->queues[0]);
- ctrl->assoc_active = false;
+ clear_bit(ASSOC_ACTIVE, &ctrl->flags);
nvme_fc_ctlr_inactive_on_rport(ctrl);
return ret;
@@ -2759,14 +3093,14 @@ out_free_queue:
static void
nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
{
+ struct nvmefc_ls_rcv_op *disls = NULL;
unsigned long flags;
- if (!ctrl->assoc_active)
+ if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
return;
- ctrl->assoc_active = false;
spin_lock_irqsave(&ctrl->lock, flags);
- ctrl->flags |= FCCTRL_TERMIO;
+ set_bit(FCCTRL_TERMIO, &ctrl->flags);
ctrl->iocnt = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
@@ -2817,7 +3151,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
/* wait for all io that had to be aborted */
spin_lock_irq(&ctrl->lock);
wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
- ctrl->flags &= ~FCCTRL_TERMIO;
+ clear_bit(FCCTRL_TERMIO, &ctrl->flags);
spin_unlock_irq(&ctrl->lock);
nvme_fc_term_aen_ops(ctrl);
@@ -2831,7 +3165,17 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->association_id)
nvme_fc_xmt_disconnect_assoc(ctrl);
+ spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id = 0;
+ disls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = NULL;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (disls)
+ /*
+ * if a Disconnect Request was waiting for a response, send
+ * now that all ABTS's have been issued (and are complete).
+ */
+ nvme_fc_xmt_ls_rsp(disls);
if (ctrl->ctrl.tagset) {
nvme_fc_delete_hw_io_queues(ctrl);
@@ -2902,7 +3246,9 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: dev_loss_tmo (%d) expired "
"while waiting for remoteport connectivity.\n",
- ctrl->cnum, portptr->dev_loss_tmo);
+ ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
+ (ctrl->ctrl.opts->max_reconnects *
+ ctrl->ctrl.opts->reconnect_delay)));
WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
}
}
@@ -3089,7 +3435,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->dev = lport->dev;
ctrl->cnum = idx;
ctrl->ioq_live = false;
- ctrl->assoc_active = false;
atomic_set(&ctrl->err_work_active, 0);
init_waitqueue_head(&ctrl->ioabort_wait);
diff --git a/drivers/nvme/host/fc.h b/drivers/nvme/host/fc.h
new file mode 100644
index 000000000000..05ce566f2caf
--- /dev/null
+++ b/drivers/nvme/host/fc.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016, Avago Technologies
+ */
+
+#ifndef _NVME_FC_TRANSPORT_H
+#define _NVME_FC_TRANSPORT_H 1
+
+
+/*
+ * Common definitions between the nvme_fc (host) transport and
+ * nvmet_fc (target) transport implementation.
+ */
+
+/*
+ * ****************** FC-NVME LS HANDLING ******************
+ */
+
+union nvmefc_ls_requests {
+ struct fcnvme_ls_rqst_w0 w0;
+ struct fcnvme_ls_cr_assoc_rqst rq_cr_assoc;
+ struct fcnvme_ls_cr_conn_rqst rq_cr_conn;
+ struct fcnvme_ls_disconnect_assoc_rqst rq_dis_assoc;
+ struct fcnvme_ls_disconnect_conn_rqst rq_dis_conn;
+} __aligned(128); /* alignment for other things alloc'd with */
+
+union nvmefc_ls_responses {
+ struct fcnvme_ls_rjt rsp_rjt;
+ struct fcnvme_ls_cr_assoc_acc rsp_cr_assoc;
+ struct fcnvme_ls_cr_conn_acc rsp_cr_conn;
+ struct fcnvme_ls_disconnect_assoc_acc rsp_dis_assoc;
+ struct fcnvme_ls_disconnect_conn_acc rsp_dis_conn;
+} __aligned(128); /* alignment for other things alloc'd with */
+
+static inline void
+nvme_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
+{
+ struct fcnvme_ls_acc_hdr *acc = buf;
+
+ acc->w0.ls_cmd = ls_cmd;
+ acc->desc_list_len = desc_len;
+ acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ acc->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ acc->rqst.w0.ls_cmd = rqst_ls_cmd;
+}
+
+static inline int
+nvme_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
+ u8 reason, u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ nvme_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
+ ls_cmd);
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+
+ return sizeof(struct fcnvme_ls_rjt);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+ VERR_NO_ERROR = 0,
+ VERR_CR_ASSOC_LEN = 1,
+ VERR_CR_ASSOC_RQST_LEN = 2,
+ VERR_CR_ASSOC_CMD = 3,
+ VERR_CR_ASSOC_CMD_LEN = 4,
+ VERR_ERSP_RATIO = 5,
+ VERR_ASSOC_ALLOC_FAIL = 6,
+ VERR_QUEUE_ALLOC_FAIL = 7,
+ VERR_CR_CONN_LEN = 8,
+ VERR_CR_CONN_RQST_LEN = 9,
+ VERR_ASSOC_ID = 10,
+ VERR_ASSOC_ID_LEN = 11,
+ VERR_NO_ASSOC = 12,
+ VERR_CONN_ID = 13,
+ VERR_CONN_ID_LEN = 14,
+ VERR_INVAL_CONN = 15,
+ VERR_CR_CONN_CMD = 16,
+ VERR_CR_CONN_CMD_LEN = 17,
+ VERR_DISCONN_LEN = 18,
+ VERR_DISCONN_RQST_LEN = 19,
+ VERR_DISCONN_CMD = 20,
+ VERR_DISCONN_CMD_LEN = 21,
+ VERR_DISCONN_SCOPE = 22,
+ VERR_RS_LEN = 23,
+ VERR_RS_RQST_LEN = 24,
+ VERR_RS_CMD = 25,
+ VERR_RS_CMD_LEN = 26,
+ VERR_RS_RCTL = 27,
+ VERR_RS_RO = 28,
+ VERR_LSACC = 29,
+ VERR_LSDESC_RQST = 30,
+ VERR_LSDESC_RQST_LEN = 31,
+ VERR_CR_ASSOC = 32,
+ VERR_CR_ASSOC_ACC_LEN = 33,
+ VERR_CR_CONN = 34,
+ VERR_CR_CONN_ACC_LEN = 35,
+ VERR_DISCONN = 36,
+ VERR_DISCONN_ACC_LEN = 37,
+};
+
+static char *validation_errors[] = {
+ "OK",
+ "Bad CR_ASSOC Length",
+ "Bad CR_ASSOC Rqst Length",
+ "Not CR_ASSOC Cmd",
+ "Bad CR_ASSOC Cmd Length",
+ "Bad Ersp Ratio",
+ "Association Allocation Failed",
+ "Queue Allocation Failed",
+ "Bad CR_CONN Length",
+ "Bad CR_CONN Rqst Length",
+ "Not Association ID",
+ "Bad Association ID Length",
+ "No Association",
+ "Not Connection ID",
+ "Bad Connection ID Length",
+ "Invalid Connection ID",
+ "Not CR_CONN Cmd",
+ "Bad CR_CONN Cmd Length",
+ "Bad DISCONN Length",
+ "Bad DISCONN Rqst Length",
+ "Not DISCONN Cmd",
+ "Bad DISCONN Cmd Length",
+ "Bad Disconnect Scope",
+ "Bad RS Length",
+ "Bad RS Rqst Length",
+ "Not RS Cmd",
+ "Bad RS Cmd Length",
+ "Bad RS R_CTL",
+ "Bad RS Relative Offset",
+ "Not LS_ACC",
+ "Not LSDESC_RQST",
+ "Bad LSDESC_RQST Length",
+ "Not CR_ASSOC Rqst",
+ "Bad CR_ASSOC ACC Length",
+ "Not CR_CONN Rqst",
+ "Bad CR_CONN ACC Length",
+ "Not Disconnect Rqst",
+ "Bad Disconnect ACC Length",
+};
+
+#define NVME_FC_LAST_LS_CMD_VALUE FCNVME_LS_DISCONNECT_CONN
+
+static char *nvmefc_ls_names[] = {
+ "Reserved (0)",
+ "RJT (1)",
+ "ACC (2)",
+ "Create Association",
+ "Create Connection",
+ "Disconnect Association",
+ "Disconnect Connection",
+};
+
+static inline void
+nvmefc_fmt_lsreq_discon_assoc(struct nvmefc_ls_req *lsreq,
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst,
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc,
+ u64 association_id)
+{
+ lsreq->rqstaddr = discon_rqst;
+ lsreq->rqstlen = sizeof(*discon_rqst);
+ lsreq->rspaddr = discon_acc;
+ lsreq->rsplen = sizeof(*discon_acc);
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
+
+ discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
+ discon_rqst->desc_list_len = cpu_to_be32(
+ sizeof(struct fcnvme_lsdesc_assoc_id) +
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+
+ discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ discon_rqst->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+
+ discon_rqst->associd.association_id = cpu_to_be64(association_id);
+
+ discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
+ FCNVME_LSDESC_DISCONN_CMD);
+ discon_rqst->discon_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+}
+
+static inline int
+nvmefc_vldt_lsreq_discon_assoc(u32 rqstlen,
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst)
+{
+ int ret = 0;
+
+ if (rqstlen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
+ ret = VERR_DISCONN_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
+ ret = VERR_DISCONN_RQST_LEN;
+ else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ ret = VERR_ASSOC_ID;
+ else if (rqst->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ ret = VERR_ASSOC_ID_LEN;
+ else if (rqst->discon_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
+ ret = VERR_DISCONN_CMD;
+ else if (rqst->discon_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd)))
+ ret = VERR_DISCONN_CMD_LEN;
+ /*
+ * As the standard changed on the LS, check if old format and scope
+ * something other than Association (e.g. 0).
+ */
+ else if (rqst->discon_cmd.rsvd8[0])
+ ret = VERR_DISCONN_SCOPE;
+
+ return ret;
+}
+
+#endif /* _NVME_FC_TRANSPORT_H */
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index ec46693f6b64..69608755d415 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -171,7 +171,7 @@ struct nvme_nvm_bb_tbl {
__le32 tdresv;
__le32 thresv;
__le32 rsvd2[8];
- __u8 blk[0];
+ __u8 blk[];
};
struct nvme_nvm_id20_addrf {
@@ -961,7 +961,10 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
geo = &dev->geo;
geo->csecs = 1 << ns->lba_shift;
geo->sos = ns->ms;
- geo->ext = ns->ext;
+ if (ns->features & NVME_NS_EXT_LBAS)
+ geo->ext = true;
+ else
+ geo->ext = false;
geo->mdts = ns->ctrl->max_hw_sectors;
dev->q = q;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 54603bd3e02d..da78e499947a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -3,6 +3,7 @@
* Copyright (c) 2017-2018 Christoph Hellwig.
*/
+#include <linux/backing-dev.h>
#include <linux/moduleparam.h>
#include <trace/events/block.h>
#include "nvme.h"
@@ -293,7 +294,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
struct bio *bio)
{
- struct nvme_ns_head *head = q->queuedata;
+ struct nvme_ns_head *head = bio->bi_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
struct nvme_ns *ns;
blk_qc_t ret = BLK_QC_T_NONE;
@@ -371,13 +372,12 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
* We also do this for private namespaces as the namespace sharing data could
* change after a rescan.
*/
- if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
+ if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
return 0;
q = blk_alloc_queue(nvme_ns_head_make_request, ctrl->numa_node);
if (!q)
goto out;
- q->queuedata = head;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);
@@ -666,6 +666,13 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
nvme_mpath_set_live(ns);
mutex_unlock(&ns->head->lock);
}
+
+ if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
+ struct backing_dev_info *info =
+ ns->head->disk->queue->backing_dev_info;
+
+ info->capabilities |= BDI_CAP_STABLE_WRITES;
+ }
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -687,7 +694,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
int error;
/* check if multipath is enabled and we have the capability */
- if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
+ if (!multipath || !ctrl->subsys ||
+ !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
return 0;
ctrl->anacap = id->anacap;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2e04a36296d9..fa5c75501049 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -16,6 +16,7 @@
#include <linux/fault-inject.h>
#include <linux/rcupdate.h>
#include <linux/wait.h>
+#include <linux/t10-pi.h>
#include <trace/events/block.h>
@@ -30,8 +31,10 @@ extern unsigned int admin_timeout;
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define NVME_INLINE_SG_CNT 0
+#define NVME_INLINE_METADATA_SG_CNT 0
#else
#define NVME_INLINE_SG_CNT 2
+#define NVME_INLINE_METADATA_SG_CNT 1
#endif
extern struct workqueue_struct *nvme_wq;
@@ -228,6 +231,7 @@ struct nvme_ctrl {
u32 page_size;
u32 max_hw_sectors;
u32 max_segments;
+ u32 max_integrity_segments;
u16 crdt[3];
u16 oncs;
u16 oacs;
@@ -352,6 +356,7 @@ struct nvme_ns_head {
struct nvme_ns_ids ids;
struct list_head entry;
struct kref ref;
+ bool shared;
int instance;
#ifdef CONFIG_NVME_MULTIPATH
struct gendisk *disk;
@@ -363,6 +368,11 @@ struct nvme_ns_head {
#endif
};
+enum nvme_ns_features {
+ NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
+ NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
+};
+
struct nvme_ns {
struct list_head list;
@@ -382,18 +392,23 @@ struct nvme_ns {
u16 ms;
u16 sgs;
u32 sws;
- bool ext;
u8 pi_type;
+ unsigned long features;
unsigned long flags;
#define NVME_NS_REMOVING 0
#define NVME_NS_DEAD 1
#define NVME_NS_ANA_PENDING 2
- u16 noiob;
struct nvme_fault_inject fault_inject;
};
+/* NVMe ns supports metadata actions by the controller (generate/strip) */
+static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
+{
+ return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
+}
+
struct nvme_ctrl_ops {
const char *name;
struct module *module;
@@ -449,6 +464,14 @@ static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
return lba << (ns->lba_shift - SECTOR_SHIFT);
}
+/*
+ * Convert byte length to nvme's 0-based num dwords
+ */
+static inline u32 nvme_bytes_to_numd(size_t len)
+{
+ return (len >> 2) - 1;
+}
+
static inline void nvme_end_request(struct request *req, __le16 status,
union nvme_result result)
{
@@ -489,7 +512,6 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
-void nvme_put_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e13c370de830..d690d5593a80 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -68,14 +68,30 @@ static int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+static int io_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned int n;
+ int ret;
+
+ ret = kstrtouint(val, 10, &n);
+ if (ret != 0 || n > num_possible_cpus())
+ return -EINVAL;
+ return param_set_uint(val, kp);
+}
+
+static const struct kernel_param_ops io_queue_count_ops = {
+ .set = io_queue_count_set,
+ .get = param_get_uint,
+};
+
static unsigned int write_queues;
-module_param(write_queues, uint, 0644);
+module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644);
MODULE_PARM_DESC(write_queues,
"Number of queues to use for writes. If not set, reads and writes "
"will share a queue set.");
static unsigned int poll_queues;
-module_param(poll_queues, uint, 0644);
+module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
struct nvme_dev;
@@ -128,6 +144,9 @@ struct nvme_dev {
dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs;
+ unsigned int nr_allocated_queues;
+ unsigned int nr_write_queues;
+ unsigned int nr_poll_queues;
};
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
@@ -166,14 +185,13 @@ struct nvme_queue {
void *sq_cmds;
/* only used for poll queues: */
spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
- volatile struct nvme_completion *cqes;
+ struct nvme_completion *cqes;
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
u16 q_depth;
u16 cq_vector;
u16 sq_tail;
- u16 last_sq_tail;
u16 cq_head;
u16 qid;
u8 cq_phase;
@@ -209,25 +227,14 @@ struct nvme_iod {
struct scatterlist *sg;
};
-static unsigned int max_io_queues(void)
-{
- return num_possible_cpus() + write_queues + poll_queues;
-}
-
-static unsigned int max_queue_count(void)
-{
- /* IO queues + admin queue */
- return 1 + max_io_queues();
-}
-
-static inline unsigned int nvme_dbbuf_size(u32 stride)
+static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
{
- return (max_queue_count() * 8 * stride);
+ return dev->nr_allocated_queues * 8 * dev->db_stride;
}
static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
- unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
+ unsigned int mem_size = nvme_dbbuf_size(dev);
if (dev->dbbuf_dbs)
return 0;
@@ -252,7 +259,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
{
- unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
+ unsigned int mem_size = nvme_dbbuf_size(dev);
if (dev->dbbuf_dbs) {
dma_free_coherent(dev->dev, mem_size,
@@ -446,24 +453,11 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
return 0;
}
-/*
- * Write sq tail if we are asked to, or if the next command would wrap.
- */
-static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
+static inline void nvme_write_sq_db(struct nvme_queue *nvmeq)
{
- if (!write_sq) {
- u16 next_tail = nvmeq->sq_tail + 1;
-
- if (next_tail == nvmeq->q_depth)
- next_tail = 0;
- if (next_tail != nvmeq->last_sq_tail)
- return;
- }
-
if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
writel(nvmeq->sq_tail, nvmeq->q_db);
- nvmeq->last_sq_tail = nvmeq->sq_tail;
}
/**
@@ -480,7 +474,8 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
cmd, sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
- nvme_write_sq_db(nvmeq, write_sq);
+ if (write_sq)
+ nvme_write_sq_db(nvmeq);
spin_unlock(&nvmeq->sq_lock);
}
@@ -489,8 +484,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
struct nvme_queue *nvmeq = hctx->driver_data;
spin_lock(&nvmeq->sq_lock);
- if (nvmeq->sq_tail != nvmeq->last_sq_tail)
- nvme_write_sq_db(nvmeq, true);
+ nvme_write_sq_db(nvmeq);
spin_unlock(&nvmeq->sq_lock);
}
@@ -922,8 +916,9 @@ static void nvme_pci_complete_rq(struct request *req)
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
{
- return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
- nvmeq->cq_phase;
+ struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
+
+ return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
}
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
@@ -944,7 +939,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
- volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ struct nvme_completion *cqe = &nvmeq->cqes[idx];
struct request *req;
if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
@@ -989,6 +984,11 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq)
while (nvme_cqe_pending(nvmeq)) {
found++;
+ /*
+ * load-load control dependency between phase and the rest of
+ * the cqe requires a full read memory barrier
+ */
+ dma_rmb();
nvme_handle_cqe(nvmeq, nvmeq->cq_head);
nvme_update_cq_head(nvmeq);
}
@@ -1377,16 +1377,19 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
/*
* Called only on a device that has been disabled and after all other threads
- * that can check this device's completion queues have synced. This is the
- * last chance for the driver to see a natural completion before
- * nvme_cancel_request() terminates all incomplete requests.
+ * that can check this device's completion queues have synced, except
+ * nvme_poll(). This is the last chance for the driver to see a natural
+ * completion before nvme_cancel_request() terminates all incomplete requests.
*/
static void nvme_reap_pending_cqes(struct nvme_dev *dev)
{
int i;
- for (i = dev->ctrl.queue_count - 1; i > 0; i--)
+ for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
+ spin_lock(&dev->queues[i].cq_poll_lock);
nvme_process_cq(&dev->queues[i]);
+ spin_unlock(&dev->queues[i].cq_poll_lock);
+ }
}
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
@@ -1493,7 +1496,6 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
struct nvme_dev *dev = nvmeq->dev;
nvmeq->sq_tail = 0;
- nvmeq->last_sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
@@ -1995,7 +1997,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
{
struct nvme_dev *dev = affd->priv;
- unsigned int nr_read_queues;
+ unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
/*
* If there is no interupt available for queues, ensure that
@@ -2011,12 +2013,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
if (!nrirqs) {
nrirqs = 1;
nr_read_queues = 0;
- } else if (nrirqs == 1 || !write_queues) {
+ } else if (nrirqs == 1 || !nr_write_queues) {
nr_read_queues = 0;
- } else if (write_queues >= nrirqs) {
+ } else if (nr_write_queues >= nrirqs) {
nr_read_queues = 1;
} else {
- nr_read_queues = nrirqs - write_queues;
+ nr_read_queues = nrirqs - nr_write_queues;
}
dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
@@ -2040,7 +2042,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
* Poll queues don't need interrupts, but we need at least one IO
* queue left over for non-polled IO.
*/
- this_p_queues = poll_queues;
+ this_p_queues = dev->nr_poll_queues;
if (this_p_queues >= nr_io_queues) {
this_p_queues = nr_io_queues - 1;
irq_queues = 1;
@@ -2070,14 +2072,25 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
__nvme_disable_io_queues(dev, nvme_admin_delete_cq);
}
+static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
+{
+ return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
+}
+
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int result, nr_io_queues;
+ unsigned int nr_io_queues;
unsigned long size;
+ int result;
- nr_io_queues = max_io_queues();
+ /*
+ * Sample the module parameters once at reset time so that we have
+ * stable values to work with.
+ */
+ dev->nr_write_queues = write_queues;
+ dev->nr_poll_queues = poll_queues;
/*
* If tags are shared with admin queue (Apple bug), then
@@ -2085,6 +2098,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
nr_io_queues = 1;
+ else
+ nr_io_queues = min(nvme_max_io_queues(dev),
+ dev->nr_allocated_queues - 1);
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
@@ -2557,6 +2573,12 @@ static void nvme_reset_work(struct work_struct *work)
goto out;
}
+ /*
+ * We do not support an SGL for metadata (yet), so we are limited to a
+ * single integrity segment for the separate metadata pointer.
+ */
+ dev->ctrl.max_integrity_segments = 1;
+
result = nvme_init_identify(&dev->ctrl);
if (result)
goto out;
@@ -2759,8 +2781,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!dev)
return -ENOMEM;
- dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
- GFP_KERNEL, node);
+ dev->nr_write_queues = write_queues;
+ dev->nr_poll_queues = poll_queues;
+ dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
+ dev->queues = kcalloc_node(dev->nr_allocated_queues,
+ sizeof(struct nvme_queue), GFP_KERNEL, node);
if (!dev->queues)
goto free;
@@ -3123,8 +3148,6 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
- write_queues = min(write_queues, num_possible_cpus());
- poll_queues = min(poll_queues, num_possible_cpus());
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index cac8a930396a..f8f856dc0c67 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -34,6 +34,11 @@
#define NVME_RDMA_MAX_INLINE_SEGMENTS 4
+#define NVME_RDMA_DATA_SGL_SIZE \
+ (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT)
+#define NVME_RDMA_METADATA_SGL_SIZE \
+ (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT)
+
struct nvme_rdma_device {
struct ib_device *dev;
struct ib_pd *pd;
@@ -48,6 +53,11 @@ struct nvme_rdma_qe {
u64 dma;
};
+struct nvme_rdma_sgl {
+ int nents;
+ struct sg_table sg_table;
+};
+
struct nvme_rdma_queue;
struct nvme_rdma_request {
struct nvme_request req;
@@ -58,12 +68,12 @@ struct nvme_rdma_request {
refcount_t ref;
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
- int nents;
struct ib_reg_wr reg_wr;
struct ib_cqe reg_cqe;
struct nvme_rdma_queue *queue;
- struct sg_table sg_table;
- struct scatterlist first_sgl[];
+ struct nvme_rdma_sgl data_sgl;
+ struct nvme_rdma_sgl *metadata_sgl;
+ bool use_sig_mr;
};
enum nvme_rdma_queue_flags {
@@ -85,6 +95,7 @@ struct nvme_rdma_queue {
struct rdma_cm_id *cm_id;
int cm_error;
struct completion cm_done;
+ bool pi_support;
};
struct nvme_rdma_ctrl {
@@ -261,6 +272,8 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
init_attr.qp_type = IB_QPT_RC;
init_attr.send_cq = queue->ib_cq;
init_attr.recv_cq = queue->ib_cq;
+ if (queue->pi_support)
+ init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
@@ -290,6 +303,12 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
if (!req->sqe.data)
return -ENOMEM;
+ /* metadata nvme_rdma_sgl struct is located after command's data SGL */
+ if (queue->pi_support)
+ req->metadata_sgl = (void *)nvme_req(rq) +
+ sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+
req->queue = queue;
return 0;
@@ -400,6 +419,8 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
dev = queue->device;
ibdev = dev->dev;
+ if (queue->pi_support)
+ ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs);
ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
/*
@@ -416,10 +437,16 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
nvme_rdma_dev_put(dev);
}
-static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
{
- return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
- ibdev->attrs.max_fast_reg_page_list_len - 1);
+ u32 max_page_list_len;
+
+ if (pi_support)
+ max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
+ else
+ max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
+
+ return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
}
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
@@ -476,7 +503,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
* misaligned we'll end up using two entries for a single data page,
* so one additional entry is required.
*/
- pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
+ pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
queue->queue_size,
IB_MR_TYPE_MEM_REG,
@@ -488,10 +515,24 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
goto out_destroy_ring;
}
+ if (queue->pi_support) {
+ ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs,
+ queue->queue_size, IB_MR_TYPE_INTEGRITY,
+ pages_per_mr, pages_per_mr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to initialize PI MR pool sized %d for QID %d\n",
+ queue->queue_size, idx);
+ goto out_destroy_mr_pool;
+ }
+ }
+
set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
return 0;
+out_destroy_mr_pool:
+ ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
out_destroy_ring:
nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
sizeof(struct nvme_completion), DMA_FROM_DEVICE);
@@ -513,6 +554,10 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
queue = &ctrl->queues[idx];
queue->ctrl = ctrl;
+ if (idx && ctrl->ctrl.max_integrity_segments)
+ queue->pi_support = true;
+ else
+ queue->pi_support = false;
init_completion(&queue->cm_done);
if (idx > 0)
@@ -723,7 +768,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->reserved_tags = 2; /* connect + keep-alive */
set->numa_node = nctrl->numa_node;
set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
+ NVME_RDMA_DATA_SGL_SIZE;
set->driver_data = ctrl;
set->nr_hw_queues = 1;
set->timeout = ADMIN_TIMEOUT;
@@ -737,7 +782,10 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
+ NVME_RDMA_DATA_SGL_SIZE;
+ if (nctrl->max_integrity_segments)
+ set->cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
@@ -770,6 +818,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool new)
{
+ bool pi_capable = false;
int error;
error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
@@ -779,7 +828,13 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
ctrl->device = ctrl->queues[0].device;
ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device);
- ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
+ /* T10-PI support */
+ if (ctrl->device->dev->attrs.device_cap_flags &
+ IB_DEVICE_INTEGRITY_HANDOVER)
+ pi_capable = true;
+
+ ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
+ pi_capable);
/*
* Bind the async event SQE DMA mapping to the admin queue lifetime.
@@ -821,6 +876,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
ctrl->ctrl.max_segments = ctrl->max_fr_pages;
ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
+ if (pi_capable)
+ ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
+ else
+ ctrl->ctrl.max_integrity_segments = 0;
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1149,17 +1208,29 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
+ struct list_head *pool = &queue->qp->rdma_mrs;
if (!blk_rq_nr_phys_segments(rq))
return;
+ if (blk_integrity_rq(rq)) {
+ ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents, rq_dma_dir(rq));
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+ }
+
+ if (req->use_sig_mr)
+ pool = &queue->qp->sig_mrs;
+
if (req->mr) {
- ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+ ib_mr_pool_put(queue->qp, pool, req->mr);
req->mr = NULL;
}
- ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
- sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
}
static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1178,7 +1249,7 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
int count)
{
struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
- struct scatterlist *sgl = req->sg_table.sgl;
+ struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
struct ib_sge *sge = &req->sge[1];
u32 len = 0;
int i;
@@ -1203,8 +1274,8 @@ static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
{
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
- sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
- put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
+ sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
+ put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
return 0;
@@ -1225,7 +1296,8 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
* Align the MR to a 4K page size to match the ctrl page size and
* the block virtual boundary.
*/
- nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
+ nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
+ SZ_4K);
if (unlikely(nr < count)) {
ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
req->mr = NULL;
@@ -1256,12 +1328,125 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
return 0;
}
+static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_domain *domain,
+ u16 control, u8 pi_type)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.bg_type = IB_T10DIF_CRC;
+ domain->sig.dif.pi_interval = 1 << bi->interval_exp;
+ domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ domain->sig.dif.ref_remap = true;
+
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_escape = true;
+ if (pi_type == NVME_NS_DPS_PI_TYPE3)
+ domain->sig.dif.ref_escape = true;
+}
+
+static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
+ u8 pi_type)
+{
+ u16 control = le16_to_cpu(cmd->rw.control);
+
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+ if (control & NVME_RW_PRINFO_PRACT) {
+ /* for WRITE_INSERT/READ_STRIP no memory domain */
+ sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ /* Clear the PRACT bit since HCA will generate/verify the PI */
+ control &= ~NVME_RW_PRINFO_PRACT;
+ cmd->rw.control = cpu_to_le16(control);
+ } else {
+ /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ }
+}
+
+static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
+{
+ *mask = 0;
+ if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF)
+ *mask |= IB_SIG_CHECK_REFTAG;
+ if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD)
+ *mask |= IB_SIG_CHECK_GUARD;
+}
+
+static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "SIG");
+}
+
+static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count, int pi_count)
+{
+ struct nvme_rdma_sgl *sgl = &req->data_sgl;
+ struct ib_reg_wr *wr = &req->reg_wr;
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct nvme_ns *ns = rq->q->queuedata;
+ struct bio *bio = rq->bio;
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ int nr;
+
+ req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
+ if (WARN_ON_ONCE(!req->mr))
+ return -EAGAIN;
+
+ nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
+ req->metadata_sgl->sg_table.sgl, pi_count, NULL,
+ SZ_4K);
+ if (unlikely(nr))
+ goto mr_put;
+
+ nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c,
+ req->mr->sig_attrs, ns->pi_type);
+ nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
+
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ req->reg_cqe.done = nvme_rdma_sig_done;
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
+ wr->wr.wr_cqe = &req->reg_cqe;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = req->mr;
+ wr->key = req->mr->rkey;
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ sg->addr = cpu_to_le64(req->mr->iova);
+ put_unaligned_le24(req->mr->length, sg->length);
+ put_unaligned_le32(req->mr->rkey, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+
+ return 0;
+
+mr_put:
+ ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
+ req->mr = NULL;
+ if (nr < 0)
+ return nr;
+ return -EINVAL;
+}
+
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct request *rq, struct nvme_command *c)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
+ int pi_count = 0;
int count, ret;
req->num_sge = 1;
@@ -1272,22 +1457,52 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
return nvme_rdma_set_sg_null(c);
- req->sg_table.sgl = req->first_sgl;
- ret = sg_alloc_table_chained(&req->sg_table,
- blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
+ req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
+ ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
+ blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
NVME_INLINE_SG_CNT);
if (ret)
return -ENOMEM;
- req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
+ req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
+ req->data_sgl.sg_table.sgl);
- count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
- rq_dma_dir(rq));
+ count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
+ req->data_sgl.nents, rq_dma_dir(rq));
if (unlikely(count <= 0)) {
ret = -EIO;
goto out_free_table;
}
+ if (blk_integrity_rq(rq)) {
+ req->metadata_sgl->sg_table.sgl =
+ (struct scatterlist *)(req->metadata_sgl + 1);
+ ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
+ blk_rq_count_integrity_sg(rq->q, rq->bio),
+ req->metadata_sgl->sg_table.sgl,
+ NVME_INLINE_METADATA_SG_CNT);
+ if (unlikely(ret)) {
+ ret = -ENOMEM;
+ goto out_unmap_sg;
+ }
+
+ req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
+ rq->bio, req->metadata_sgl->sg_table.sgl);
+ pi_count = ib_dma_map_sg(ibdev,
+ req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents,
+ rq_dma_dir(rq));
+ if (unlikely(pi_count <= 0)) {
+ ret = -EIO;
+ goto out_free_pi_table;
+ }
+ }
+
+ if (req->use_sig_mr) {
+ ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
+ goto out;
+ }
+
if (count <= dev->num_inline_segments) {
if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
queue->ctrl->use_inline_data &&
@@ -1306,14 +1521,23 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
if (unlikely(ret))
- goto out_unmap_sg;
+ goto out_unmap_pi_sg;
return 0;
+out_unmap_pi_sg:
+ if (blk_integrity_rq(rq))
+ ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents, rq_dma_dir(rq));
+out_free_pi_table:
+ if (blk_integrity_rq(rq))
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
out_unmap_sg:
- ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
out_free_table:
- sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
return ret;
}
@@ -1761,6 +1985,15 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ queue->pi_support &&
+ (c->common.opcode == nvme_cmd_write ||
+ c->common.opcode == nvme_cmd_read) &&
+ nvme_ns_has_pi(ns))
+ req->use_sig_mr = true;
+ else
+ req->use_sig_mr = false;
+
err = nvme_rdma_map_data(queue, rq, c);
if (unlikely(err < 0)) {
dev_err(queue->ctrl->ctrl.device,
@@ -1801,12 +2034,46 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
return ib_process_cq_direct(queue->ib_cq, -1);
}
+static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct ib_mr_status mr_status;
+ int ret;
+
+ ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ nvme_req(rq)->status = NVME_SC_INVALID_PI;
+ return;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
+ break;
+ }
+ pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type, mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ }
+}
+
static void nvme_rdma_complete_rq(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct ib_device *ibdev = queue->device->dev;
+ if (req->use_sig_mr)
+ nvme_rdma_check_pi_status(req);
+
nvme_rdma_unmap_data(queue, rq);
ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
DMA_TO_DEVICE);
@@ -1926,7 +2193,7 @@ out_fail:
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.name = "rdma",
.module = THIS_MODULE,
- .flags = NVME_F_FABRICS,
+ .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED,
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c15a92163c1f..1843110ec34f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -60,6 +60,7 @@ struct nvme_tcp_request {
enum nvme_tcp_queue_flags {
NVME_TCP_Q_ALLOCATED = 0,
NVME_TCP_Q_LIVE = 1,
+ NVME_TCP_Q_POLLING = 2,
};
enum nvme_tcp_recv_state {
@@ -75,6 +76,7 @@ struct nvme_tcp_queue {
int io_cpu;
spinlock_t lock;
+ struct mutex send_mutex;
struct list_head send_list;
/* recv state */
@@ -131,6 +133,7 @@ static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
static struct workqueue_struct *nvme_tcp_wq;
static struct blk_mq_ops nvme_tcp_mq_ops;
static struct blk_mq_ops nvme_tcp_admin_mq_ops;
+static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
{
@@ -257,15 +260,29 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
}
-static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
+static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+ bool sync)
{
struct nvme_tcp_queue *queue = req->queue;
+ bool empty;
spin_lock(&queue->lock);
+ empty = list_empty(&queue->send_list) && !queue->request;
list_add_tail(&req->entry, &queue->send_list);
spin_unlock(&queue->lock);
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ /*
+ * if we're the first on the send_list and we can try to send
+ * directly, otherwise queue io_work. Also, only do that if we
+ * are on the same cpu, so we don't introduce contention.
+ */
+ if (queue->io_cpu == smp_processor_id() &&
+ sync && empty && mutex_trylock(&queue->send_mutex)) {
+ nvme_tcp_try_send(queue);
+ mutex_unlock(&queue->send_mutex);
+ } else {
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ }
}
static inline struct nvme_tcp_request *
@@ -578,7 +595,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
req->state = NVME_TCP_SEND_H2C_PDU;
req->offset = 0;
- nvme_tcp_queue_request(req);
+ nvme_tcp_queue_request(req, false);
return 0;
}
@@ -794,11 +811,12 @@ static void nvme_tcp_data_ready(struct sock *sk)
{
struct nvme_tcp_queue *queue;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
- if (likely(queue && queue->rd_enabled))
+ if (likely(queue && queue->rd_enabled) &&
+ !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void nvme_tcp_write_space(struct sock *sk)
@@ -867,7 +885,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
if (last && !queue->data_digest)
flags |= MSG_EOR;
else
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
/* can't zcopy slab pages */
if (unlikely(PageSlab(page))) {
@@ -906,11 +924,16 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
struct nvme_tcp_queue *queue = req->queue;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
bool inline_data = nvme_tcp_has_inline_data(req);
- int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) + hdgst - req->offset;
+ int flags = MSG_DONTWAIT;
int ret;
+ if (inline_data)
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
+ else
+ flags |= MSG_EOR;
+
if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
@@ -949,7 +972,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
offset_in_page(pdu) + req->offset, len,
- MSG_DONTWAIT | MSG_MORE);
+ MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
if (unlikely(ret <= 0))
return ret;
@@ -1063,11 +1086,14 @@ static void nvme_tcp_io_work(struct work_struct *w)
bool pending = false;
int result;
- result = nvme_tcp_try_send(queue);
- if (result > 0)
- pending = true;
- else if (unlikely(result < 0))
- break;
+ if (mutex_trylock(&queue->send_mutex)) {
+ result = nvme_tcp_try_send(queue);
+ mutex_unlock(&queue->send_mutex);
+ if (result > 0)
+ pending = true;
+ else if (unlikely(result < 0))
+ break;
+ }
result = nvme_tcp_try_recv(queue);
if (result > 0)
@@ -1313,12 +1339,12 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- struct linger sol = { .l_onoff = 1, .l_linger = 0 };
- int ret, opt, rcv_pdu_size;
+ int ret, rcv_pdu_size;
queue->ctrl = ctrl;
INIT_LIST_HEAD(&queue->send_list);
spin_lock_init(&queue->lock);
+ mutex_init(&queue->send_mutex);
INIT_WORK(&queue->io_work, nvme_tcp_io_work);
queue->queue_size = queue_size;
@@ -1337,60 +1363,24 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
}
/* Single syn retry */
- opt = 1;
- ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
- (char *)&opt, sizeof(opt));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set TCP_SYNCNT sock opt %d\n", ret);
- goto err_sock;
- }
+ tcp_sock_set_syncnt(queue->sock->sk, 1);
/* Set TCP no delay */
- opt = 1;
- ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
- TCP_NODELAY, (char *)&opt, sizeof(opt));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set TCP_NODELAY sock opt %d\n", ret);
- goto err_sock;
- }
+ tcp_sock_set_nodelay(queue->sock->sk);
/*
* Cleanup whatever is sitting in the TCP transmit queue on socket
* close. This is done to prevent stale data from being sent should
* the network connection be restored before TCP times out.
*/
- ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
- (char *)&sol, sizeof(sol));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set SO_LINGER sock opt %d\n", ret);
- goto err_sock;
- }
+ sock_no_linger(queue->sock->sk);
- if (so_priority > 0) {
- ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_PRIORITY,
- (char *)&so_priority, sizeof(so_priority));
- if (ret) {
- dev_err(ctrl->ctrl.device,
- "failed to set SO_PRIORITY sock opt, ret %d\n",
- ret);
- goto err_sock;
- }
- }
+ if (so_priority > 0)
+ sock_set_priority(queue->sock->sk, so_priority);
/* Set socket type of service */
- if (nctrl->opts->tos >= 0) {
- opt = nctrl->opts->tos;
- ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
- (char *)&opt, sizeof(opt));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set IP_TOS sock opt %d\n", ret);
- goto err_sock;
- }
- }
+ if (nctrl->opts->tos >= 0)
+ ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
queue->sock->sk->sk_allocation = GFP_ATOMIC;
nvme_tcp_set_queue_io_cpu(queue);
@@ -1543,6 +1533,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
set->reserved_tags = 2; /* connect + keep-alive */
set->numa_node = NUMA_NO_NODE;
+ set->flags = BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
set->nr_hw_queues = 1;
@@ -1554,7 +1545,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
set->queue_depth = nctrl->sqsize + 1;
set->reserved_tags = 1; /* fabric connect */
set->numa_node = NUMA_NO_NODE;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1;
@@ -2113,7 +2104,7 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
ctrl->async_req.curr_bio = NULL;
ctrl->async_req.data_len = 0;
- nvme_tcp_queue_request(&ctrl->async_req);
+ nvme_tcp_queue_request(&ctrl->async_req, true);
}
static enum blk_eh_timer_return
@@ -2244,7 +2235,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- nvme_tcp_queue_request(req);
+ nvme_tcp_queue_request(req, true);
return BLK_STS_OK;
}
@@ -2302,9 +2293,11 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return 0;
+ set_bit(NVME_TCP_Q_POLLING, &queue->flags);
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue);
+ clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
return queue->nr_cqe;
}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index d7f48c0fb311..4474952d64c6 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -4,6 +4,7 @@ config NVME_TARGET
tristate "NVMe Target support"
depends on BLOCK
depends on CONFIGFS_FS
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
select SGL_ALLOC
help
This enabled target side support for the NVMe protocol, that is
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 9d6f75cfa77c..1db8c0498668 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -295,7 +295,7 @@ out:
static void nvmet_execute_get_log_page(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
+ if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
return;
switch (req->cmd->get_log_page.lid) {
@@ -341,6 +341,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
+ u32 cmd_capsule_size;
u16 status = 0;
id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -433,9 +434,15 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
- /* Max command capsule size is sqe + single page of in-capsule data */
- id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
- req->port->inline_data_size) / 16);
+ /*
+ * Max command capsule size is sqe + in-capsule data size.
+ * Disable in-capsule data for Metadata capable controllers.
+ */
+ cmd_capsule_size = sizeof(struct nvme_command);
+ if (!ctrl->pi_support)
+ cmd_capsule_size += req->port->inline_data_size;
+ id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
+
/* Max response capsule size is cqe */
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
@@ -465,6 +472,7 @@ out:
static void nvmet_execute_identify_ns(struct nvmet_req *req)
{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_ns *ns;
struct nvme_id_ns *id;
u16 status = 0;
@@ -482,10 +490,12 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
}
/* return an all zeroed buffer if we can't find an active namespace */
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
+ ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
if (!ns)
goto done;
+ nvmet_ns_revalidate(ns);
+
/*
* nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
@@ -521,6 +531,16 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id->lbaf[0].ds = ns->blksize_shift;
+ if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
+ id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
+ NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
+ NVME_NS_DPC_PI_TYPE3;
+ id->mc = NVME_MC_EXTENDED_LBA;
+ id->dps = ns->pi_type;
+ id->flbas = NVME_NS_FLBAS_META_EXT;
+ id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
+ }
+
if (ns->readonly)
id->nsattr |= (1 << 0);
nvmet_put_namespace(ns);
@@ -625,7 +645,7 @@ out:
static void nvmet_execute_identify(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
return;
switch (req->cmd->identify.cns) {
@@ -654,7 +674,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
*/
static void nvmet_execute_abort(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
nvmet_set_result(req, 1);
nvmet_req_complete(req, 0);
@@ -743,7 +763,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
u16 nsqr;
u16 ncqr;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
switch (cdw10 & 0xff) {
@@ -815,7 +835,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
- if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
+ if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
return;
switch (cdw10 & 0xff) {
@@ -882,7 +902,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
mutex_lock(&ctrl->lock);
@@ -901,7 +921,7 @@ void nvmet_execute_keep_alive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 58cabd7b6fc5..419e0d4ce79b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -20,61 +20,71 @@ static const struct config_item_type nvmet_subsys_type;
static LIST_HEAD(nvmet_ports_list);
struct list_head *nvmet_ports = &nvmet_ports_list;
-static const struct nvmet_transport_name {
+struct nvmet_type_name_map {
u8 type;
const char *name;
-} nvmet_transport_names[] = {
+};
+
+static struct nvmet_type_name_map nvmet_transport[] = {
{ NVMF_TRTYPE_RDMA, "rdma" },
{ NVMF_TRTYPE_FC, "fc" },
{ NVMF_TRTYPE_TCP, "tcp" },
{ NVMF_TRTYPE_LOOP, "loop" },
};
+static const struct nvmet_type_name_map nvmet_addr_family[] = {
+ { NVMF_ADDR_FAMILY_PCI, "pcie" },
+ { NVMF_ADDR_FAMILY_IP4, "ipv4" },
+ { NVMF_ADDR_FAMILY_IP6, "ipv6" },
+ { NVMF_ADDR_FAMILY_IB, "ib" },
+ { NVMF_ADDR_FAMILY_FC, "fc" },
+ { NVMF_ADDR_FAMILY_LOOP, "loop" },
+};
+
+static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
+{
+ if (p->enabled)
+ pr_err("Disable port '%u' before changing attribute in %s\n",
+ le16_to_cpu(p->disc_addr.portid), caller);
+ return p->enabled;
+}
+
/*
* nvmet_port Generic ConfigFS definitions.
* Used in any place in the ConfigFS tree that refers to an address.
*/
-static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
- char *page)
+static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
{
- switch (to_nvmet_port(item)->disc_addr.adrfam) {
- case NVMF_ADDR_FAMILY_IP4:
- return sprintf(page, "ipv4\n");
- case NVMF_ADDR_FAMILY_IP6:
- return sprintf(page, "ipv6\n");
- case NVMF_ADDR_FAMILY_IB:
- return sprintf(page, "ib\n");
- case NVMF_ADDR_FAMILY_FC:
- return sprintf(page, "fc\n");
- default:
- return sprintf(page, "\n");
+ u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
+ if (nvmet_addr_family[i].type == adrfam)
+ return sprintf(page, "%s\n", nvmet_addr_family[i].name);
}
+
+ return sprintf(page, "\n");
}
static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
+ int i;
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
- if (sysfs_streq(page, "ipv4")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4;
- } else if (sysfs_streq(page, "ipv6")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
- } else if (sysfs_streq(page, "ib")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
- } else if (sysfs_streq(page, "fc")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
- } else {
- pr_err("Invalid value '%s' for adrfam\n", page);
- return -EINVAL;
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
+ if (sysfs_streq(page, nvmet_addr_family[i].name))
+ goto found;
}
+ pr_err("Invalid value '%s' for adrfam\n", page);
+ return -EINVAL;
+
+found:
+ port->disc_addr.adrfam = nvmet_addr_family[i].type;
return count;
}
@@ -100,11 +110,9 @@ static ssize_t nvmet_addr_portid_store(struct config_item *item,
return -EINVAL;
}
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
+
port->disc_addr.portid = cpu_to_le16(portid);
return count;
}
@@ -130,11 +138,8 @@ static ssize_t nvmet_addr_traddr_store(struct config_item *item,
return -EINVAL;
}
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
return -EINVAL;
@@ -143,20 +148,24 @@ static ssize_t nvmet_addr_traddr_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, addr_traddr);
-static ssize_t nvmet_addr_treq_show(struct config_item *item,
- char *page)
+static const struct nvmet_type_name_map nvmet_addr_treq[] = {
+ { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
+ { NVMF_TREQ_REQUIRED, "required" },
+ { NVMF_TREQ_NOT_REQUIRED, "not required" },
+};
+
+static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
{
- switch (to_nvmet_port(item)->disc_addr.treq &
- NVME_TREQ_SECURE_CHANNEL_MASK) {
- case NVMF_TREQ_NOT_SPECIFIED:
- return sprintf(page, "not specified\n");
- case NVMF_TREQ_REQUIRED:
- return sprintf(page, "required\n");
- case NVMF_TREQ_NOT_REQUIRED:
- return sprintf(page, "not required\n");
- default:
- return sprintf(page, "\n");
+ u8 treq = to_nvmet_port(item)->disc_addr.treq &
+ NVME_TREQ_SECURE_CHANNEL_MASK;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
+ if (treq == nvmet_addr_treq[i].type)
+ return sprintf(page, "%s\n", nvmet_addr_treq[i].name);
}
+
+ return sprintf(page, "\n");
}
static ssize_t nvmet_addr_treq_store(struct config_item *item,
@@ -164,25 +173,22 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
+ int i;
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
- if (sysfs_streq(page, "not specified")) {
- treq |= NVMF_TREQ_NOT_SPECIFIED;
- } else if (sysfs_streq(page, "required")) {
- treq |= NVMF_TREQ_REQUIRED;
- } else if (sysfs_streq(page, "not required")) {
- treq |= NVMF_TREQ_NOT_REQUIRED;
- } else {
- pr_err("Invalid value '%s' for treq\n", page);
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
+ if (sysfs_streq(page, nvmet_addr_treq[i].name))
+ goto found;
}
- port->disc_addr.treq = treq;
+ pr_err("Invalid value '%s' for treq\n", page);
+ return -EINVAL;
+
+found:
+ treq |= nvmet_addr_treq[i].type;
+ port->disc_addr.treq = treq;
return count;
}
@@ -206,11 +212,8 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
pr_err("Invalid value '%s' for trsvcid\n", page);
return -EINVAL;
}
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
return -EINVAL;
@@ -233,11 +236,8 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
struct nvmet_port *port = to_nvmet_port(item);
int ret;
- if (port->enabled) {
- pr_err("Cannot modify inline_data_size while port enabled\n");
- pr_err("Disable the port before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
ret = kstrtoint(page, 0, &port->inline_data_size);
if (ret) {
pr_err("Invalid value '%s' for inline_data_size\n", page);
@@ -248,16 +248,45 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
+}
+
+static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ bool val;
+
+ if (strtobool(page, &val))
+ return -EINVAL;
+
+ if (port->enabled) {
+ pr_err("Disable port before setting pi_enable value.\n");
+ return -EACCES;
+ }
+
+ port->pi_enable = val;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_pi_enable);
+#endif
+
static ssize_t nvmet_addr_trtype_show(struct config_item *item,
char *page)
{
struct nvmet_port *port = to_nvmet_port(item);
int i;
- for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
- if (port->disc_addr.trtype != nvmet_transport_names[i].type)
- continue;
- return sprintf(page, "%s\n", nvmet_transport_names[i].name);
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
+ if (port->disc_addr.trtype == nvmet_transport[i].type)
+ return sprintf(page, "%s\n", nvmet_transport[i].name);
}
return sprintf(page, "\n");
@@ -276,22 +305,20 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item,
struct nvmet_port *port = to_nvmet_port(item);
int i;
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
- for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
- if (sysfs_streq(page, nvmet_transport_names[i].name))
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
+ if (sysfs_streq(page, nvmet_transport[i].name))
goto found;
}
pr_err("Invalid value '%s' for trtype\n", page);
return -EINVAL;
+
found:
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
- port->disc_addr.trtype = nvmet_transport_names[i].type;
+ port->disc_addr.trtype = nvmet_transport[i].type;
if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
nvmet_port_init_tsas_rdma(port);
return count;
@@ -327,7 +354,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
kfree(ns->device_path);
ret = -ENOMEM;
- ns->device_path = kstrndup(page, len, GFP_KERNEL);
+ ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
if (!ns->device_path)
goto out_unlock;
@@ -543,6 +570,31 @@ static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_ns_, buffered_io);
+static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool val;
+
+ if (strtobool(page, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&ns->subsys->lock);
+ if (!ns->enabled) {
+ pr_err("enable ns before revalidate.\n");
+ mutex_unlock(&ns->subsys->lock);
+ return -EINVAL;
+ }
+ nvmet_ns_revalidate(ns);
+ mutex_unlock(&ns->subsys->lock);
+ return count;
+}
+
+CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
+
static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid,
@@ -550,6 +602,7 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_ana_grpid,
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
+ &nvmet_ns_attr_revalidate_size,
#ifdef CONFIG_PCI_P2PDMA
&nvmet_ns_attr_p2pmem,
#endif
@@ -963,7 +1016,7 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
return -EINVAL;
}
- new_model_number = kstrndup(page, len, GFP_KERNEL);
+ new_model_number = kmemdup_nul(page, len, GFP_KERNEL);
if (!new_model_number)
return -ENOMEM;
@@ -987,6 +1040,28 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
+}
+
+static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ bool pi_enable;
+
+ if (strtobool(page, &pi_enable))
+ return -EINVAL;
+
+ subsys->pi_support = pi_enable;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
+#endif
+
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
&nvmet_subsys_attr_attr_version,
@@ -994,6 +1069,9 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ &nvmet_subsys_attr_attr_pi_enable,
+#endif
NULL,
};
@@ -1149,10 +1227,7 @@ static const struct config_item_type nvmet_referrals_type = {
.ct_group_ops = &nvmet_referral_group_ops,
};
-static struct {
- enum nvme_ana_state state;
- const char *name;
-} nvmet_ana_state_names[] = {
+static struct nvmet_type_name_map nvmet_ana_state[] = {
{ NVME_ANA_OPTIMIZED, "optimized" },
{ NVME_ANA_NONOPTIMIZED, "non-optimized" },
{ NVME_ANA_INACCESSIBLE, "inaccessible" },
@@ -1167,10 +1242,9 @@ static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
int i;
- for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
- if (state != nvmet_ana_state_names[i].state)
- continue;
- return sprintf(page, "%s\n", nvmet_ana_state_names[i].name);
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
+ if (state == nvmet_ana_state[i].type)
+ return sprintf(page, "%s\n", nvmet_ana_state[i].name);
}
return sprintf(page, "\n");
@@ -1180,10 +1254,11 @@ static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_ana_group *grp = to_ana_group(item);
+ enum nvme_ana_state *ana_state = grp->port->ana_state;
int i;
- for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
- if (sysfs_streq(page, nvmet_ana_state_names[i].name))
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
+ if (sysfs_streq(page, nvmet_ana_state[i].name))
goto found;
}
@@ -1192,10 +1267,9 @@ static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
found:
down_write(&nvmet_ana_sem);
- grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state;
+ ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
nvmet_ana_chgcnt++;
up_write(&nvmet_ana_sem);
-
nvmet_port_send_ana_event(grp->port);
return count;
}
@@ -1297,6 +1371,9 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_trsvcid,
&nvmet_attr_addr_trtype,
&nvmet_attr_param_inline_data_size,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ &nvmet_attr_param_pi_enable,
+#endif
NULL,
};
@@ -1346,6 +1423,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
config_group_init_type_name(&port->group, name, &nvmet_port_type);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b685f99d56a1..6392bcd30bd7 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -134,15 +134,10 @@ static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
struct nvmet_async_event *aen;
struct nvmet_req *req;
- while (1) {
- mutex_lock(&ctrl->lock);
- aen = list_first_entry_or_null(&ctrl->async_events,
- struct nvmet_async_event, entry);
- if (!aen || !ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- break;
- }
-
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
+ aen = list_first_entry(&ctrl->async_events,
+ struct nvmet_async_event, entry);
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
if (status == 0)
nvmet_set_result(req, nvmet_async_event_result(aen));
@@ -151,20 +146,21 @@ static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
kfree(aen);
mutex_unlock(&ctrl->lock);
+ trace_nvmet_async_event(ctrl, req->cqe->result.u32);
nvmet_req_complete(req, status);
+ mutex_lock(&ctrl->lock);
}
+ mutex_unlock(&ctrl->lock);
}
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
{
- struct nvmet_req *req;
+ struct nvmet_async_event *aen, *tmp;
mutex_lock(&ctrl->lock);
- while (ctrl->nr_async_event_cmds) {
- req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
- mutex_lock(&ctrl->lock);
+ list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
+ list_del(&aen->entry);
+ kfree(aen);
}
mutex_unlock(&ctrl->lock);
}
@@ -322,12 +318,21 @@ int nvmet_enable_port(struct nvmet_port *port)
if (!try_module_get(ops->owner))
return -EINVAL;
- ret = ops->add_port(port);
- if (ret) {
- module_put(ops->owner);
- return ret;
+ /*
+ * If the user requested PI support and the transport isn't pi capable,
+ * don't enable the port.
+ */
+ if (port->pi_enable && !ops->metadata_support) {
+ pr_err("T10-PI is not supported by transport type %d\n",
+ port->disc_addr.trtype);
+ ret = -EINVAL;
+ goto out_put;
}
+ ret = ops->add_port(port);
+ if (ret)
+ goto out_put;
+
/* If the transport didn't set inline_data_size, then disable it. */
if (port->inline_data_size < 0)
port->inline_data_size = 0;
@@ -335,6 +340,10 @@ int nvmet_enable_port(struct nvmet_port *port)
port->enabled = true;
port->tr_ops = ops;
return 0;
+
+out_put:
+ module_put(ops->owner);
+ return ret;
}
void nvmet_disable_port(struct nvmet_port *port)
@@ -514,6 +523,19 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
ns->nsid);
}
+void nvmet_ns_revalidate(struct nvmet_ns *ns)
+{
+ loff_t oldsize = ns->size;
+
+ if (ns->bdev)
+ nvmet_bdev_ns_revalidate(ns);
+ else
+ nvmet_file_ns_revalidate(ns);
+
+ if (oldsize != ns->size)
+ nvmet_ns_changed(ns->subsys, ns->nsid);
+}
+
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
@@ -764,10 +786,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
* If this is the admin queue, complete all AERs so that our
* queue doesn't have outstanding requests on it.
*/
- if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) {
+ if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
nvmet_async_events_process(ctrl, status);
- nvmet_async_events_free(ctrl);
- }
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
@@ -873,8 +893,11 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sq = sq;
req->ops = ops;
req->sg = NULL;
+ req->metadata_sg = NULL;
req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
req->transfer_len = 0;
+ req->metadata_len = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
@@ -936,9 +959,9 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
-bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{
- if (unlikely(data_len != req->transfer_len)) {
+ if (unlikely(len != req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
return false;
@@ -946,7 +969,7 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
return true;
}
-EXPORT_SYMBOL_GPL(nvmet_check_data_len);
+EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{
@@ -959,50 +982,90 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
return true;
}
-int nvmet_req_alloc_sgl(struct nvmet_req *req)
+static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
{
- struct pci_dev *p2p_dev = NULL;
+ return req->transfer_len - req->metadata_len;
+}
- if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
- if (req->sq->ctrl && req->ns)
- p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
- req->ns->nsid);
+static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
+{
+ req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
+ nvmet_data_transfer_len(req));
+ if (!req->sg)
+ goto out_err;
- req->p2p_dev = NULL;
- if (req->sq->qid && p2p_dev) {
- req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
- req->transfer_len);
- if (req->sg) {
- req->p2p_dev = p2p_dev;
- return 0;
- }
- }
+ if (req->metadata_len) {
+ req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
+ &req->metadata_sg_cnt, req->metadata_len);
+ if (!req->metadata_sg)
+ goto out_free_sg;
+ }
+ return 0;
+out_free_sg:
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+out_err:
+ return -ENOMEM;
+}
- /*
- * If no P2P memory was available we fallback to using
- * regular memory
- */
+static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
+{
+ if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
+ return false;
+
+ if (req->sq->ctrl && req->sq->qid && req->ns) {
+ req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
+ req->ns->nsid);
+ if (req->p2p_dev)
+ return true;
}
- req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
+ req->p2p_dev = NULL;
+ return false;
+}
+
+int nvmet_req_alloc_sgls(struct nvmet_req *req)
+{
+ if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
+ return 0;
+
+ req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
+ &req->sg_cnt);
if (unlikely(!req->sg))
- return -ENOMEM;
+ goto out;
+
+ if (req->metadata_len) {
+ req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
+ &req->metadata_sg_cnt);
+ if (unlikely(!req->metadata_sg))
+ goto out_free;
+ }
return 0;
+out_free:
+ sgl_free(req->sg);
+out:
+ return -ENOMEM;
}
-EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
-void nvmet_req_free_sgl(struct nvmet_req *req)
+void nvmet_req_free_sgls(struct nvmet_req *req)
{
- if (req->p2p_dev)
+ if (req->p2p_dev) {
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
- else
+ if (req->metadata_sg)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+ } else {
sgl_free(req->sg);
+ if (req->metadata_sg)
+ sgl_free(req->metadata_sg);
+ }
req->sg = NULL;
+ req->metadata_sg = NULL;
req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
}
-EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
static inline bool nvmet_cc_en(u32 cc)
{
@@ -1357,6 +1420,7 @@ static void nvmet_ctrl_free(struct kref *ref)
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
+ nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
kfree(ctrl->cqs);
kfree(ctrl->changed_ns_list);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 0c2274b21e15..40cf0b6e6c9d 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -171,7 +171,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
u16 status = 0;
void *buffer;
- if (!nvmet_check_data_len(req, data_len))
+ if (!nvmet_check_transfer_len(req, data_len))
return;
if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
@@ -244,7 +244,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
const char model[] = "Linux";
u16 status = 0;
- if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
return;
if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
@@ -298,7 +298,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
switch (cdw10 & 0xff) {
@@ -324,7 +324,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
switch (cdw10 & 0xff) {
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index feef15c38ec9..42bd12b8bf00 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -12,7 +12,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
u64 val = le64_to_cpu(req->cmd->prop_set.value);
u16 status = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
if (req->cmd->prop_set.attrib & 1) {
@@ -41,7 +41,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
u16 status = 0;
u64 val = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
if (req->cmd->prop_get.attrib & 1) {
@@ -156,7 +156,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
- if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
d = kmalloc(sizeof(*d), GFP_KERNEL);
@@ -197,6 +197,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
+ ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
+
uuid_copy(&ctrl->hostid, &d->hostid);
status = nvmet_install_queue(ctrl, req);
@@ -205,8 +207,9 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
- pr_info("creating controller %d for subsystem %s for NQN %s.\n",
- ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
+ pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
+ ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
+ ctrl->pi_support ? " T10-PI is enabled" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
out:
@@ -223,7 +226,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
u16 qid = le16_to_cpu(c->qid);
u16 status = 0;
- if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
d = kmalloc(sizeof(*d), GFP_KERNEL);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index a8ceb7721640..27fd3b5aa621 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -14,6 +14,7 @@
#include "nvmet.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
+#include "../host/fc.h"
/* *************************** Data Structures/Defines ****************** */
@@ -21,23 +22,21 @@
#define NVMET_LS_CTX_COUNT 256
-/* for this implementation, assume small single frame rqst/rsp */
-#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
-
struct nvmet_fc_tgtport;
struct nvmet_fc_tgt_assoc;
-struct nvmet_fc_ls_iod {
- struct nvmefc_tgt_ls_req *lsreq;
+struct nvmet_fc_ls_iod { /* for an LS RQST RCV */
+ struct nvmefc_ls_rsp *lsrsp;
struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
- struct list_head ls_list; /* tgtport->ls_list */
+ struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
struct nvmet_fc_tgtport *tgtport;
struct nvmet_fc_tgt_assoc *assoc;
+ void *hosthandle;
- u8 *rqstbuf;
- u8 *rspbuf;
+ union nvmefc_ls_requests *rqstbuf;
+ union nvmefc_ls_responses *rspbuf;
u16 rqstdatalen;
dma_addr_t rspdma;
@@ -46,6 +45,18 @@ struct nvmet_fc_ls_iod {
struct work_struct work;
} __aligned(sizeof(unsigned long long));
+struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
+ struct nvmefc_ls_req ls_req;
+
+ struct nvmet_fc_tgtport *tgtport;
+ void *hosthandle;
+
+ int ls_error;
+ struct list_head lsreq_list; /* tgtport->ls_req_list */
+ bool req_queued;
+};
+
+
/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
@@ -83,7 +94,6 @@ struct nvmet_fc_fcp_iod {
};
struct nvmet_fc_tgtport {
-
struct nvmet_fc_target_port fc_target_port;
struct list_head tgt_list; /* nvmet_fc_target_list */
@@ -92,9 +102,11 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_ls_iod *iod;
spinlock_t lock;
- struct list_head ls_list;
+ struct list_head ls_rcv_list;
+ struct list_head ls_req_list;
struct list_head ls_busylist;
struct list_head assoc_list;
+ struct list_head host_list;
struct ida assoc_cnt;
struct nvmet_fc_port_entry *pe;
struct kref ref;
@@ -136,14 +148,26 @@ struct nvmet_fc_tgt_queue {
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
+struct nvmet_fc_hostport {
+ struct nvmet_fc_tgtport *tgtport;
+ void *hosthandle;
+ struct list_head host_list;
+ struct kref ref;
+ u8 invalid;
+};
+
struct nvmet_fc_tgt_assoc {
u64 association_id;
u32 a_id;
+ atomic_t terminating;
struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_hostport *hostport;
+ struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
+ atomic_t del_work_active;
};
@@ -227,6 +251,8 @@ static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
+static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod);
/* *********************** FC-NVME DMA Handling **************************** */
@@ -318,6 +344,188 @@ fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
}
+/* ********************** FC-NVME LS XMT Handling ************************* */
+
+
+static void
+__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+{
+ struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+
+ if (!lsop->req_queued) {
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return;
+ }
+
+ list_del(&lsop->lsreq_list);
+
+ lsop->req_queued = false;
+
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static int
+__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!tgtport->ops->ls_req)
+ return -EOPNOTSUPP;
+
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return -ESHUTDOWN;
+
+ lsreq->done = done;
+ lsop->req_queued = false;
+ INIT_LIST_HEAD(&lsop->lsreq_list);
+
+ lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
+ lsreq->rqstlen + lsreq->rsplen,
+ DMA_BIDIRECTIONAL);
+ if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
+ ret = -EFAULT;
+ goto out_puttgtport;
+ }
+ lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+
+ list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
+
+ lsop->req_queued = true;
+
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
+ lsreq);
+ if (ret)
+ goto out_unlink;
+
+ return 0;
+
+out_unlink:
+ lsop->ls_error = ret;
+ spin_lock_irqsave(&tgtport->lock, flags);
+ lsop->req_queued = false;
+ list_del(&lsop->lsreq_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+out_puttgtport:
+ nvmet_fc_tgtport_put(tgtport);
+
+ return ret;
+}
+
+static int
+nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ /* don't wait for completion */
+
+ return __nvmet_fc_send_ls_req(tgtport, lsop, done);
+}
+
+static void
+nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmet_fc_ls_req_op *lsop =
+ container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
+
+ __nvmet_fc_finish_ls_req(lsop);
+
+ /* fc-nvme target doesn't care about success or failure of cmd */
+
+ kfree(lsop);
+}
+
+/*
+ * This routine sends a FC-NVME LS to disconnect (aka terminate)
+ * the FC-NVME Association. Terminating the association also
+ * terminates the FC-NVME connections (per queue, both admin and io
+ * queues) that are part of the association. E.g. things are torn
+ * down, and the related FC-NVME Association ID and Connection IDs
+ * become invalid.
+ *
+ * The behavior of the fc-nvme target is such that it's
+ * understanding of the association and connections will implicitly
+ * be torn down. The action is implicit as it may be due to a loss of
+ * connectivity with the fc-nvme host, so the target may never get a
+ * response even if it tried. As such, the action of this routine
+ * is to asynchronously send the LS, ignore any results of the LS, and
+ * continue on with terminating the association. If the fc-nvme host
+ * is present and receives the LS, it too can tear down.
+ */
+static void
+nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
+ struct nvmet_fc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ int ret;
+
+ /*
+ * If ls_req is NULL or no hosthandle, it's an older lldd and no
+ * message is normal. Otherwise, send unless the hostport has
+ * already been invalidated by the lldd.
+ */
+ if (!tgtport->ops->ls_req || !assoc->hostport ||
+ assoc->hostport->invalid)
+ return;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ sizeof(*discon_rqst) + sizeof(*discon_acc) +
+ tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(tgtport->dev,
+ "{%d:%d} send Disconnect Association failed: ENOMEM\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ return;
+ }
+
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (tgtport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&discon_acc[1];
+ else
+ lsreq->private = NULL;
+
+ lsop->tgtport = tgtport;
+ lsop->hosthandle = assoc->hostport->hosthandle;
+
+ nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
+ assoc->association_id);
+
+ ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
+ nvmet_fc_disconnect_assoc_done);
+ if (ret) {
+ dev_info(tgtport->dev,
+ "{%d:%d} XMT Disconnect Association failed: %d\n",
+ tgtport->fc_target_port.port_num, assoc->a_id, ret);
+ kfree(lsop);
+ }
+}
+
+
/* *********************** FC-NVME Port Management ************************ */
@@ -337,17 +545,18 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
iod->tgtport = tgtport;
- list_add_tail(&iod->ls_list, &tgtport->ls_list);
+ list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
- iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
- GFP_KERNEL);
+ iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
+ sizeof(union nvmefc_ls_responses),
+ GFP_KERNEL);
if (!iod->rqstbuf)
goto out_fail;
- iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
+ iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
- NVME_FC_MAX_LS_BUFFER_SIZE,
+ sizeof(*iod->rspbuf),
DMA_TO_DEVICE);
if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
goto out_fail;
@@ -357,12 +566,12 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
out_fail:
kfree(iod->rqstbuf);
- list_del(&iod->ls_list);
+ list_del(&iod->ls_rcv_list);
for (iod--, i--; i >= 0; iod--, i--) {
fc_dma_unmap_single(tgtport->dev, iod->rspdma,
- NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
kfree(iod->rqstbuf);
- list_del(&iod->ls_list);
+ list_del(&iod->ls_rcv_list);
}
kfree(iod);
@@ -378,10 +587,10 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
fc_dma_unmap_single(tgtport->dev,
- iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
+ iod->rspdma, sizeof(*iod->rspbuf),
DMA_TO_DEVICE);
kfree(iod->rqstbuf);
- list_del(&iod->ls_list);
+ list_del(&iod->ls_rcv_list);
}
kfree(tgtport->iod);
}
@@ -393,10 +602,10 @@ nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
- iod = list_first_entry_or_null(&tgtport->ls_list,
- struct nvmet_fc_ls_iod, ls_list);
+ iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
+ struct nvmet_fc_ls_iod, ls_rcv_list);
if (iod)
- list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
+ list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
spin_unlock_irqrestore(&tgtport->lock, flags);
return iod;
}
@@ -409,7 +618,7 @@ nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
- list_move(&iod->ls_list, &tgtport->ls_list);
+ list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
spin_unlock_irqrestore(&tgtport->lock, flags);
}
@@ -678,31 +887,33 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
struct nvmet_fc_fcp_iod *fod = queue->fod;
struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
unsigned long flags;
- int i, writedataactive;
+ int i;
bool disconnect;
disconnect = atomic_xchg(&queue->connected, 0);
+ /* if not connected, nothing to do */
+ if (!disconnect)
+ return;
+
spin_lock_irqsave(&queue->qlock, flags);
/* abort outstanding io's */
for (i = 0; i < queue->sqsize; fod++, i++) {
if (fod->active) {
spin_lock(&fod->flock);
fod->abort = true;
- writedataactive = fod->writedataactive;
- spin_unlock(&fod->flock);
/*
* only call lldd abort routine if waiting for
* writedata. other outstanding ops should finish
* on their own.
*/
- if (writedataactive) {
- spin_lock(&fod->flock);
+ if (fod->writedataactive) {
fod->aborted = true;
spin_unlock(&fod->flock);
tgtport->ops->fcp_abort(
&tgtport->fc_target_port, fod->fcpreq);
- }
+ } else
+ spin_unlock(&fod->flock);
}
}
@@ -742,8 +953,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
flush_workqueue(queue->work_q);
- if (disconnect)
- nvmet_sq_destroy(&queue->nvme_sq);
+ nvmet_sq_destroy(&queue->nvme_sq);
nvmet_fc_tgt_q_put(queue);
}
@@ -778,17 +988,114 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
}
static void
+nvmet_fc_hostport_free(struct kref *ref)
+{
+ struct nvmet_fc_hostport *hostport =
+ container_of(ref, struct nvmet_fc_hostport, ref);
+ struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del(&hostport->host_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ if (tgtport->ops->host_release && hostport->invalid)
+ tgtport->ops->host_release(hostport->hosthandle);
+ kfree(hostport);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
+{
+ kref_put(&hostport->ref, nvmet_fc_hostport_free);
+}
+
+static int
+nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
+{
+ return kref_get_unless_zero(&hostport->ref);
+}
+
+static void
+nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
+{
+ /* if LLDD not implemented, leave as NULL */
+ if (!hostport->hosthandle)
+ return;
+
+ nvmet_fc_hostport_put(hostport);
+}
+
+static struct nvmet_fc_hostport *
+nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+{
+ struct nvmet_fc_hostport *newhost, *host, *match = NULL;
+ unsigned long flags;
+
+ /* if LLDD not implemented, leave as NULL */
+ if (!hosthandle)
+ return NULL;
+
+ /* take reference for what will be the newly allocated hostport */
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return ERR_PTR(-EINVAL);
+
+ newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
+ if (!newhost) {
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(host, &tgtport->host_list, host_list) {
+ if (host->hosthandle == hosthandle && !host->invalid) {
+ if (nvmet_fc_hostport_get(host)) {
+ match = host;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ /* no allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ return (match) ? match : ERR_PTR(-ENOMEM);
+ }
+
+ newhost->tgtport = tgtport;
+ newhost->hosthandle = hosthandle;
+ INIT_LIST_HEAD(&newhost->host_list);
+ kref_init(&newhost->ref);
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(host, &tgtport->host_list, host_list) {
+ if (host->hosthandle == hosthandle && !host->invalid) {
+ if (nvmet_fc_hostport_get(host)) {
+ match = host;
+ break;
+ }
+ }
+ }
+ if (match) {
+ kfree(newhost);
+ newhost = NULL;
+ /* releasing allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ } else
+ list_add_tail(&newhost->host_list, &tgtport->host_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ return (match) ? match : newhost;
+}
+
+static void
nvmet_fc_delete_assoc(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
nvmet_fc_delete_target_assoc(assoc);
+ atomic_set(&assoc->del_work_active, 0);
nvmet_fc_tgt_a_put(assoc);
}
static struct nvmet_fc_tgt_assoc *
-nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
+nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
unsigned long flags;
@@ -805,13 +1112,19 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
goto out_free_assoc;
if (!nvmet_fc_tgtport_get(tgtport))
- goto out_ida_put;
+ goto out_ida;
+
+ assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
+ if (IS_ERR(assoc->hostport))
+ goto out_put;
assoc->tgtport = tgtport;
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+ atomic_set(&assoc->del_work_active, 0);
+ atomic_set(&assoc->terminating, 0);
while (needrandom) {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
@@ -819,11 +1132,12 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
spin_lock_irqsave(&tgtport->lock, flags);
needrandom = false;
- list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
+ list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
if (ran == tmpassoc->association_id) {
needrandom = true;
break;
}
+ }
if (!needrandom) {
assoc->association_id = ran;
list_add_tail(&assoc->a_list, &tgtport->assoc_list);
@@ -833,7 +1147,9 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
return assoc;
-out_ida_put:
+out_put:
+ nvmet_fc_tgtport_put(tgtport);
+out_ida:
ida_simple_remove(&tgtport->assoc_cnt, idx);
out_free_assoc:
kfree(assoc);
@@ -846,12 +1162,24 @@ nvmet_fc_target_assoc_free(struct kref *ref)
struct nvmet_fc_tgt_assoc *assoc =
container_of(ref, struct nvmet_fc_tgt_assoc, ref);
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct nvmet_fc_ls_iod *oldls;
unsigned long flags;
+ /* Send Disconnect now that all i/o has completed */
+ nvmet_fc_xmt_disconnect_assoc(assoc);
+
+ nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
list_del(&assoc->a_list);
+ oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
+ /* if pending Rcv Disconnect Association LS, send rsp now */
+ if (oldls)
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
+ dev_info(tgtport->dev,
+ "{%d:%d} Association freed\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
kfree(assoc);
nvmet_fc_tgtport_put(tgtport);
}
@@ -874,7 +1202,13 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_tgt_queue *queue;
unsigned long flags;
- int i;
+ int i, terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+
+ /* if already terminating, do nothing */
+ if (terminating)
+ return;
spin_lock_irqsave(&tgtport->lock, flags);
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
@@ -890,6 +1224,10 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
}
spin_unlock_irqrestore(&tgtport->lock, flags);
+ dev_info(tgtport->dev,
+ "{%d:%d} Association deleted\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+
nvmet_fc_tgt_a_put(assoc);
}
@@ -1048,16 +1386,21 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
newrec->fc_target_port.node_name = pinfo->node_name;
newrec->fc_target_port.port_name = pinfo->port_name;
- newrec->fc_target_port.private = &newrec[1];
+ if (template->target_priv_sz)
+ newrec->fc_target_port.private = &newrec[1];
+ else
+ newrec->fc_target_port.private = NULL;
newrec->fc_target_port.port_id = pinfo->port_id;
newrec->fc_target_port.port_num = idx;
INIT_LIST_HEAD(&newrec->tgt_list);
newrec->dev = dev;
newrec->ops = template;
spin_lock_init(&newrec->lock);
- INIT_LIST_HEAD(&newrec->ls_list);
+ INIT_LIST_HEAD(&newrec->ls_rcv_list);
+ INIT_LIST_HEAD(&newrec->ls_req_list);
INIT_LIST_HEAD(&newrec->ls_busylist);
INIT_LIST_HEAD(&newrec->assoc_list);
+ INIT_LIST_HEAD(&newrec->host_list);
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
@@ -1134,17 +1477,90 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
{
struct nvmet_fc_tgt_assoc *assoc, *next;
unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry_safe(assoc, next,
+ &tgtport->assoc_list, a_list) {
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+ ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1);
+ if (ret == 0) {
+ if (!schedule_work(&assoc->del_work))
+ nvmet_fc_tgt_a_put(assoc);
+ } else {
+ /* already deleting - release local reference */
+ nvmet_fc_tgt_a_put(assoc);
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+/**
+ * nvmet_fc_invalidate_host - transport entry point called by an LLDD
+ * to remove references to a hosthandle for LS's.
+ *
+ * The nvmet-fc layer ensures that any references to the hosthandle
+ * on the targetport are forgotten (set to NULL). The LLDD will
+ * typically call this when a login with a remote host port has been
+ * lost, thus LS's for the remote host port are no longer possible.
+ *
+ * If an LS request is outstanding to the targetport/hosthandle (or
+ * issued concurrently with the call to invalidate the host), the
+ * LLDD is responsible for terminating/aborting the LS and completing
+ * the LS request. It is recommended that these terminations/aborts
+ * occur after calling to invalidate the host handle to avoid additional
+ * retries by the nvmet-fc transport. The nvmet-fc transport may
+ * continue to reference host handle while it cleans up outstanding
+ * NVME associations. The nvmet-fc transport will call the
+ * ops->host_release() callback to notify the LLDD that all references
+ * are complete and the related host handle can be recovered.
+ * Note: if there are no references, the callback may be called before
+ * the invalidate host call returns.
+ *
+ * @target_port: pointer to the (registered) target port that a prior
+ * LS was received on and which supplied the transport the
+ * hosthandle.
+ * @hosthandle: the handle (pointer) that represents the host port
+ * that no longer has connectivity and that LS's should
+ * no longer be directed to.
+ */
+void
+nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
+ void *hosthandle)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ struct nvmet_fc_tgt_assoc *assoc, *next;
+ unsigned long flags;
+ bool noassoc = true;
+ int ret;
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) {
+ if (!assoc->hostport ||
+ assoc->hostport->hosthandle != hosthandle)
+ continue;
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!schedule_work(&assoc->del_work))
+ assoc->hostport->invalid = 1;
+ noassoc = false;
+ ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1);
+ if (ret == 0) {
+ if (!schedule_work(&assoc->del_work))
+ nvmet_fc_tgt_a_put(assoc);
+ } else {
+ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
+ }
}
spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ /* if there's nothing to wait for - call the callback */
+ if (noassoc && tgtport->ops->host_release)
+ tgtport->ops->host_release(hosthandle);
}
+EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
/*
* nvmet layer has called to terminate an association
@@ -1157,6 +1573,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
struct nvmet_fc_tgt_queue *queue;
unsigned long flags;
bool found_ctrl = false;
+ int ret;
/* this is a bit ugly, but don't want to make locks layered */
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
@@ -1180,8 +1597,14 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- if (!schedule_work(&assoc->del_work))
+ ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1);
+ if (ret == 0) {
+ if (!schedule_work(&assoc->del_work))
+ nvmet_fc_tgt_a_put(assoc);
+ } else {
+ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
+ }
return;
}
@@ -1211,6 +1634,13 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
+ /*
+ * should terminate LS's as well. However, LS's will be generated
+ * at the tail end of association termination, so they likely don't
+ * exist yet. And even if they did, it's worthwhile to just let
+ * them finish and targetport ref counting will clean things up.
+ */
+
nvmet_fc_tgtport_put(tgtport);
return 0;
@@ -1218,113 +1648,15 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
-/* *********************** FC-NVME LS Handling **************************** */
-
-
-static void
-nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
-{
- struct fcnvme_ls_acc_hdr *acc = buf;
-
- acc->w0.ls_cmd = ls_cmd;
- acc->desc_list_len = desc_len;
- acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
- acc->rqst.desc_len =
- fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
- acc->rqst.w0.ls_cmd = rqst_ls_cmd;
-}
+/* ********************** FC-NVME LS RCV Handling ************************* */
-static int
-nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
- u8 reason, u8 explanation, u8 vendor)
-{
- struct fcnvme_ls_rjt *rjt = buf;
-
- nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
- fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
- ls_cmd);
- rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
- rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
- rjt->rjt.reason_code = reason;
- rjt->rjt.reason_explanation = explanation;
- rjt->rjt.vendor = vendor;
-
- return sizeof(struct fcnvme_ls_rjt);
-}
-
-/* Validation Error indexes into the string table below */
-enum {
- VERR_NO_ERROR = 0,
- VERR_CR_ASSOC_LEN = 1,
- VERR_CR_ASSOC_RQST_LEN = 2,
- VERR_CR_ASSOC_CMD = 3,
- VERR_CR_ASSOC_CMD_LEN = 4,
- VERR_ERSP_RATIO = 5,
- VERR_ASSOC_ALLOC_FAIL = 6,
- VERR_QUEUE_ALLOC_FAIL = 7,
- VERR_CR_CONN_LEN = 8,
- VERR_CR_CONN_RQST_LEN = 9,
- VERR_ASSOC_ID = 10,
- VERR_ASSOC_ID_LEN = 11,
- VERR_NO_ASSOC = 12,
- VERR_CONN_ID = 13,
- VERR_CONN_ID_LEN = 14,
- VERR_NO_CONN = 15,
- VERR_CR_CONN_CMD = 16,
- VERR_CR_CONN_CMD_LEN = 17,
- VERR_DISCONN_LEN = 18,
- VERR_DISCONN_RQST_LEN = 19,
- VERR_DISCONN_CMD = 20,
- VERR_DISCONN_CMD_LEN = 21,
- VERR_DISCONN_SCOPE = 22,
- VERR_RS_LEN = 23,
- VERR_RS_RQST_LEN = 24,
- VERR_RS_CMD = 25,
- VERR_RS_CMD_LEN = 26,
- VERR_RS_RCTL = 27,
- VERR_RS_RO = 28,
-};
-
-static char *validation_errors[] = {
- "OK",
- "Bad CR_ASSOC Length",
- "Bad CR_ASSOC Rqst Length",
- "Not CR_ASSOC Cmd",
- "Bad CR_ASSOC Cmd Length",
- "Bad Ersp Ratio",
- "Association Allocation Failed",
- "Queue Allocation Failed",
- "Bad CR_CONN Length",
- "Bad CR_CONN Rqst Length",
- "Not Association ID",
- "Bad Association ID Length",
- "No Association",
- "Not Connection ID",
- "Bad Connection ID Length",
- "No Connection",
- "Not CR_CONN Cmd",
- "Bad CR_CONN Cmd Length",
- "Bad DISCONN Length",
- "Bad DISCONN Rqst Length",
- "Not DISCONN Cmd",
- "Bad DISCONN Cmd Length",
- "Bad Disconnect Scope",
- "Bad RS Length",
- "Bad RS Rqst Length",
- "Not RS Cmd",
- "Bad RS Cmd Length",
- "Bad RS R_CTL",
- "Bad RS Relative Offset",
-};
static void
nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_cr_assoc_rqst *rqst =
- (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
- struct fcnvme_ls_cr_assoc_acc *acc =
- (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
+ struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
+ struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
struct nvmet_fc_tgt_queue *queue;
int ret = 0;
@@ -1356,7 +1688,8 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
else {
/* new association w/ admin queue */
- iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
+ iod->assoc = nvmet_fc_alloc_target_assoc(
+ tgtport, iod->hosthandle);
if (!iod->assoc)
ret = VERR_ASSOC_ALLOC_FAIL;
else {
@@ -1371,8 +1704,8 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
dev_err(tgtport->dev,
"Create Association LS failed: %s\n",
validation_errors[ret]);
- iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
- NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
FCNVME_RJT_RC_LOGIC,
FCNVME_RJT_EXP_NONE, 0);
return;
@@ -1382,11 +1715,15 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
atomic_set(&queue->connected, 1);
queue->sqhd = 0; /* best place to init value */
+ dev_info(tgtport->dev,
+ "{%d:%d} Association created\n",
+ tgtport->fc_target_port.port_num, iod->assoc->a_id);
+
/* format a response */
- iod->lsreq->rsplen = sizeof(*acc);
+ iod->lsrsp->rsplen = sizeof(*acc);
- nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
sizeof(struct fcnvme_ls_cr_assoc_acc)),
FCNVME_LS_CREATE_ASSOCIATION);
@@ -1407,10 +1744,8 @@ static void
nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_cr_conn_rqst *rqst =
- (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
- struct fcnvme_ls_cr_conn_acc *acc =
- (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
+ struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
+ struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
struct nvmet_fc_tgt_queue *queue;
int ret = 0;
@@ -1462,8 +1797,8 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
dev_err(tgtport->dev,
"Create Connection LS failed: %s\n",
validation_errors[ret]);
- iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
- NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
FCNVME_RJT_RC_INV_ASSOC :
FCNVME_RJT_RC_LOGIC,
@@ -1477,9 +1812,9 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
/* format a response */
- iod->lsreq->rsplen = sizeof(*acc);
+ iod->lsrsp->rsplen = sizeof(*acc);
- nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
FCNVME_LS_CREATE_CONNECTION);
acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
@@ -1491,46 +1826,28 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
be16_to_cpu(rqst->connect_cmd.qid)));
}
-static void
+/*
+ * Returns true if the LS response is to be transmit
+ * Returns false if the LS response is to be delayed
+ */
+static int
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
struct fcnvme_ls_disconnect_assoc_rqst *rqst =
- (struct fcnvme_ls_disconnect_assoc_rqst *)iod->rqstbuf;
+ &iod->rqstbuf->rq_dis_assoc;
struct fcnvme_ls_disconnect_assoc_acc *acc =
- (struct fcnvme_ls_disconnect_assoc_acc *)iod->rspbuf;
- struct nvmet_fc_tgt_assoc *assoc;
+ &iod->rspbuf->rsp_dis_assoc;
+ struct nvmet_fc_tgt_assoc *assoc = NULL;
+ struct nvmet_fc_ls_iod *oldls = NULL;
+ unsigned long flags;
int ret = 0;
memset(acc, 0, sizeof(*acc));
- if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
- ret = VERR_DISCONN_LEN;
- else if (rqst->desc_list_len !=
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
- ret = VERR_DISCONN_RQST_LEN;
- else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
- ret = VERR_ASSOC_ID;
- else if (rqst->associd.desc_len !=
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_assoc_id)))
- ret = VERR_ASSOC_ID_LEN;
- else if (rqst->discon_cmd.desc_tag !=
- cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
- ret = VERR_DISCONN_CMD;
- else if (rqst->discon_cmd.desc_len !=
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_disconn_cmd)))
- ret = VERR_DISCONN_CMD_LEN;
- /*
- * As the standard changed on the LS, check if old format and scope
- * something other than Association (e.g. 0).
- */
- else if (rqst->discon_cmd.rsvd8[0])
- ret = VERR_DISCONN_SCOPE;
- else {
- /* match an active association */
+ ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
+ if (!ret) {
+ /* match an active association - takes an assoc ref if !NULL */
assoc = nvmet_fc_find_target_assoc(tgtport,
be64_to_cpu(rqst->associd.association_id));
iod->assoc = assoc;
@@ -1538,34 +1855,63 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
ret = VERR_NO_ASSOC;
}
- if (ret) {
+ if (ret || !assoc) {
dev_err(tgtport->dev,
"Disconnect LS failed: %s\n",
validation_errors[ret]);
- iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
- NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
FCNVME_RJT_RC_INV_ASSOC :
- (ret == VERR_NO_CONN) ?
- FCNVME_RJT_RC_INV_CONN :
- FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_RC_LOGIC,
FCNVME_RJT_EXP_NONE, 0);
- return;
+ return true;
}
/* format a response */
- iod->lsreq->rsplen = sizeof(*acc);
+ iod->lsrsp->rsplen = sizeof(*acc);
- nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
FCNVME_LS_DISCONNECT_ASSOC);
/* release get taken in nvmet_fc_find_target_assoc */
- nvmet_fc_tgt_a_put(iod->assoc);
+ nvmet_fc_tgt_a_put(assoc);
+
+ /*
+ * The rules for LS response says the response cannot
+ * go back until ABTS's have been sent for all outstanding
+ * I/O and a Disconnect Association LS has been sent.
+ * So... save off the Disconnect LS to send the response
+ * later. If there was a prior LS already saved, replace
+ * it with the newer one and send a can't perform reject
+ * on the older one.
+ */
+ spin_lock_irqsave(&tgtport->lock, flags);
+ oldls = assoc->rcv_disconn;
+ assoc->rcv_disconn = iod;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_delete_target_assoc(iod->assoc);
+ if (oldls) {
+ dev_info(tgtport->dev,
+ "{%d:%d} Multiple Disconnect Association LS's "
+ "received\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ /* overwrite good response with bogus failure */
+ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
+ sizeof(*iod->rspbuf),
+ /* ok to use rqst, LS is same */
+ rqst->w0.ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
+ }
+
+ return false;
}
@@ -1577,13 +1923,13 @@ static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
static void
-nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
+nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
{
- struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
+ struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
struct nvmet_fc_tgtport *tgtport = iod->tgtport;
fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
- NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
nvmet_fc_free_ls_iod(tgtport, iod);
nvmet_fc_tgtport_put(tgtport);
}
@@ -1595,11 +1941,11 @@ nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
int ret;
fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
- NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
- ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
+ ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
if (ret)
- nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
+ nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
}
/*
@@ -1609,15 +1955,15 @@ static void
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_rqst_w0 *w0 =
- (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
+ struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
+ bool sendrsp = true;
- iod->lsreq->nvmet_fc_private = iod;
- iod->lsreq->rspbuf = iod->rspbuf;
- iod->lsreq->rspdma = iod->rspdma;
- iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
+ iod->lsrsp->nvme_fc_private = iod;
+ iod->lsrsp->rspbuf = iod->rspbuf;
+ iod->lsrsp->rspdma = iod->rspdma;
+ iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
/* Be preventative. handlers will later set to valid length */
- iod->lsreq->rsplen = 0;
+ iod->lsrsp->rsplen = 0;
iod->assoc = NULL;
@@ -1637,15 +1983,16 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
break;
case FCNVME_LS_DISCONNECT_ASSOC:
/* Terminate a Queue/Connection or the Association */
- nvmet_fc_ls_disconnect(tgtport, iod);
+ sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
break;
default:
- iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
- NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
+ sizeof(*iod->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
}
- nvmet_fc_xmt_ls_rsp(tgtport, iod);
+ if (sendrsp)
+ nvmet_fc_xmt_ls_rsp(tgtport, iod);
}
/*
@@ -1674,35 +2021,53 @@ nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
*
* @target_port: pointer to the (registered) target port the LS was
* received on.
- * @lsreq: pointer to a lsreq request structure to be used to reference
+ * @lsrsp: pointer to a lsrsp structure to be used to reference
* the exchange corresponding to the LS.
* @lsreqbuf: pointer to the buffer containing the LS Request
* @lsreqbuf_len: length, in bytes, of the received LS request
*/
int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
- struct nvmefc_tgt_ls_req *lsreq,
+ void *hosthandle,
+ struct nvmefc_ls_rsp *lsrsp,
void *lsreqbuf, u32 lsreqbuf_len)
{
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
struct nvmet_fc_ls_iod *iod;
-
- if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
+ struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
+
+ if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: payload too large (%d)\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "",
+ lsreqbuf_len);
return -E2BIG;
+ }
- if (!nvmet_fc_tgtport_get(tgtport))
+ if (!nvmet_fc_tgtport_get(tgtport)) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: target deleting\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
return -ESHUTDOWN;
+ }
iod = nvmet_fc_alloc_ls_iod(tgtport);
if (!iod) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: context allocation failed\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
nvmet_fc_tgtport_put(tgtport);
return -ENOENT;
}
- iod->lsreq = lsreq;
+ iod->lsrsp = lsrsp;
iod->fcpreq = NULL;
memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
iod->rqstdatalen = lsreqbuf_len;
+ iod->hosthandle = hosthandle;
schedule_work(&iod->work);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index f69ce66e2d44..2ff1d1334a03 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -208,10 +208,13 @@ struct fcloop_rport {
};
struct fcloop_tport {
- struct nvmet_fc_target_port *targetport;
- struct nvme_fc_remote_port *remoteport;
- struct fcloop_nport *nport;
- struct fcloop_lport *lport;
+ struct nvmet_fc_target_port *targetport;
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
};
struct fcloop_nport {
@@ -228,7 +231,8 @@ struct fcloop_nport {
struct fcloop_lsreq {
struct nvmefc_ls_req *lsreq;
- struct nvmefc_tgt_ls_req tgt_ls_req;
+ struct nvmefc_ls_rsp ls_rsp;
+ int lsdir; /* H2T or T2H */
int status;
struct list_head ls_list; /* fcloop_rport->ls_list */
};
@@ -267,9 +271,9 @@ struct fcloop_ini_fcpreq {
};
static inline struct fcloop_lsreq *
-tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
+ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
{
- return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
+ return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
}
static inline struct fcloop_fcpreq *
@@ -323,7 +327,7 @@ fcloop_rport_lsrqst_work(struct work_struct *work)
}
static int
-fcloop_ls_req(struct nvme_fc_local_port *localport,
+fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
struct nvmefc_ls_req *lsreq)
{
@@ -344,27 +348,28 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
}
tls_req->status = 0;
- ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
- lsreq->rqstaddr, lsreq->rqstlen);
+ ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
+ &tls_req->ls_rsp,
+ lsreq->rqstaddr, lsreq->rqstlen);
return ret;
}
static int
-fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
- struct nvmefc_tgt_ls_req *tgt_lsreq)
+fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
+ struct nvmefc_ls_rsp *lsrsp)
{
- struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
struct fcloop_tport *tport = targetport->private;
struct nvme_fc_remote_port *remoteport = tport->remoteport;
struct fcloop_rport *rport;
- memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
- ((lsreq->rsplen < tgt_lsreq->rsplen) ?
- lsreq->rsplen : tgt_lsreq->rsplen));
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+ ((lsreq->rsplen < lsrsp->rsplen) ?
+ lsreq->rsplen : lsrsp->rsplen));
- tgt_lsreq->done(tgt_lsreq);
+ lsrsp->done(lsrsp);
if (remoteport) {
rport = remoteport->private;
@@ -377,6 +382,99 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
return 0;
}
+static void
+fcloop_tport_lsrqst_work(struct work_struct *work)
+{
+ struct fcloop_tport *tport =
+ container_of(work, struct fcloop_tport, ls_work);
+ struct fcloop_lsreq *tls_req;
+
+ spin_lock(&tport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&tport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
+
+ list_del(&tls_req->ls_list);
+ spin_unlock(&tport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&tport->lock);
+ }
+ spin_unlock(&tport->lock);
+}
+
+static int
+fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
+ struct nvmefc_ls_req *lsreq)
+{
+ struct fcloop_lsreq *tls_req = lsreq->private;
+ struct fcloop_tport *tport = targetport->private;
+ int ret = 0;
+
+ /*
+ * hosthandle should be the dst.rport value.
+ * hosthandle ignored as fcloop currently is
+ * 1:1 tgtport vs remoteport
+ */
+ tls_req->lsreq = lsreq;
+ INIT_LIST_HEAD(&tls_req->ls_list);
+
+ if (!tport->remoteport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ schedule_work(&tport->ls_work);
+ return ret;
+ }
+
+ tls_req->status = 0;
+ ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
+ lsreq->rqstaddr, lsreq->rqstlen);
+
+ return ret;
+}
+
+static int
+fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_rsp *lsrsp)
+{
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_rport *rport = remoteport->private;
+ struct nvmet_fc_target_port *targetport = rport->targetport;
+ struct fcloop_tport *tport;
+
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+ ((lsreq->rsplen < lsrsp->rsplen) ?
+ lsreq->rsplen : lsrsp->rsplen));
+ lsrsp->done(lsrsp);
+
+ if (targetport) {
+ tport = targetport->private;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ schedule_work(&tport->ls_work);
+ }
+
+ return 0;
+}
+
+static void
+fcloop_t2h_host_release(void *hosthandle)
+{
+ /* host handle ignored for now */
+}
+
/*
* Simulate reception of RSCN and converting it to a initiator transport
* call to rescan a remote port.
@@ -762,13 +860,19 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
}
static void
-fcloop_ls_abort(struct nvme_fc_local_port *localport,
+fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
struct nvmefc_ls_req *lsreq)
{
}
static void
+fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
+ void *hosthandle, struct nvmefc_ls_req *lsreq)
+{
+}
+
+static void
fcloop_fcp_abort(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
void *hw_queue_handle,
@@ -867,6 +971,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
{
struct fcloop_tport *tport = targetport->private;
+ flush_work(&tport->ls_work);
fcloop_nport_put(tport->nport);
}
@@ -879,10 +984,11 @@ static struct nvme_fc_port_template fctemplate = {
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,
.delete_queue = fcloop_delete_queue,
- .ls_req = fcloop_ls_req,
+ .ls_req = fcloop_h2t_ls_req,
.fcp_io = fcloop_fcp_req,
- .ls_abort = fcloop_ls_abort,
+ .ls_abort = fcloop_h2t_ls_abort,
.fcp_abort = fcloop_fcp_abort,
+ .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
@@ -896,11 +1002,14 @@ static struct nvme_fc_port_template fctemplate = {
static struct nvmet_fc_target_template tgttemplate = {
.targetport_delete = fcloop_targetport_delete,
- .xmt_ls_rsp = fcloop_xmt_ls_rsp,
+ .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
.fcp_op = fcloop_fcp_op,
.fcp_abort = fcloop_tgt_fcp_abort,
.fcp_req_release = fcloop_fcp_req_release,
.discovery_event = fcloop_tgt_discovery_evt,
+ .ls_req = fcloop_t2h_ls_req,
+ .ls_abort = fcloop_t2h_ls_abort,
+ .host_release = fcloop_t2h_host_release,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
@@ -909,6 +1018,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
+ .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
};
static ssize_t
@@ -1258,6 +1368,9 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
tport->nport = nport;
tport->lport = nport->lport;
nport->tport = tport;
+ spin_lock_init(&tport->lock);
+ INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
+ INIT_LIST_HEAD(&tport->ls_list);
return count;
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index ea0e596be15d..3dd6f566a240 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -47,6 +47,22 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->nows = to0based(ql->io_opt / ql->logical_block_size);
}
+static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
+{
+ struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
+
+ if (bi) {
+ ns->metadata_size = bi->tuple_size;
+ if (bi->profile == &t10_pi_type1_crc)
+ ns->pi_type = NVME_NS_DPS_PI_TYPE1;
+ else if (bi->profile == &t10_pi_type3_crc)
+ ns->pi_type = NVME_NS_DPS_PI_TYPE3;
+ else
+ /* Unsupported metadata type */
+ ns->metadata_size = 0;
+ }
+}
+
int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
int ret;
@@ -64,6 +80,12 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
}
ns->size = i_size_read(ns->bdev->bd_inode);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ ns->pi_type = 0;
+ ns->metadata_size = 0;
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
+ nvmet_bdev_ns_enable_integrity(ns);
+
return 0;
}
@@ -75,6 +97,11 @@ void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
}
}
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
+{
+ ns->size = i_size_read(ns->bdev->bd_inode);
+}
+
static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
{
u16 status = NVME_SC_SUCCESS;
@@ -142,6 +169,61 @@ static void nvmet_bio_done(struct bio *bio)
bio_put(bio);
}
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+ struct sg_mapping_iter *miter)
+{
+ struct blk_integrity *bi;
+ struct bio_integrity_payload *bip;
+ struct block_device *bdev = req->ns->bdev;
+ int rc;
+ size_t resid, len;
+
+ bi = bdev_get_integrity(bdev);
+ if (unlikely(!bi)) {
+ pr_err("Unable to locate bio_integrity\n");
+ return -ENODEV;
+ }
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO,
+ min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
+ if (IS_ERR(bip)) {
+ pr_err("Unable to allocate bio_integrity_payload\n");
+ return PTR_ERR(bip);
+ }
+
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+ /* virtual start sector must be in integrity interval units */
+ bip_set_seed(bip, bio->bi_iter.bi_sector >>
+ (bi->interval_exp - SECTOR_SHIFT));
+
+ resid = bip->bip_iter.bi_size;
+ while (resid > 0 && sg_miter_next(miter)) {
+ len = min_t(size_t, miter->length, resid);
+ rc = bio_integrity_add_page(bio, miter->page, len,
+ offset_in_page(miter->addr));
+ if (unlikely(rc != len)) {
+ pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ sg_miter_stop(miter);
+ return -ENOMEM;
+ }
+
+ resid -= len;
+ if (len < miter->length)
+ miter->consumed -= miter->length - len;
+ }
+ sg_miter_stop(miter);
+
+ return 0;
+}
+#else
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+ struct sg_mapping_iter *miter)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{
int sg_cnt = req->sg_cnt;
@@ -149,9 +231,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
struct scatterlist *sg;
struct blk_plug plug;
sector_t sector;
- int op, i;
+ int op, i, rc;
+ struct sg_mapping_iter prot_miter;
+ unsigned int iter_flags;
+ unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
- if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ if (!nvmet_check_transfer_len(req, total_len))
return;
if (!req->sg_cnt) {
@@ -163,8 +248,10 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
op |= REQ_FUA;
+ iter_flags = SG_MITER_TO_SG;
} else {
op = REQ_OP_READ;
+ iter_flags = SG_MITER_FROM_SG;
}
if (is_pci_p2pdma_page(sg_page(req->sg)))
@@ -186,11 +273,24 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
bio->bi_opf = op;
blk_start_plug(&plug);
+ if (req->metadata_len)
+ sg_miter_start(&prot_miter, req->metadata_sg,
+ req->metadata_sg_cnt, iter_flags);
+
for_each_sg(req->sg, sg, req->sg_cnt, i) {
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
struct bio *prev = bio;
+ if (req->metadata_len) {
+ rc = nvmet_bdev_alloc_bip(req, bio,
+ &prot_miter);
+ if (unlikely(rc)) {
+ bio_io_error(bio);
+ return;
+ }
+ }
+
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
@@ -204,6 +304,14 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
sg_cnt--;
}
+ if (req->metadata_len) {
+ rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
+ if (unlikely(rc)) {
+ bio_io_error(bio);
+ return;
+ }
+ }
+
submit_bio(bio);
blk_finish_plug(&plug);
}
@@ -212,7 +320,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
@@ -226,7 +334,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
- if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+ if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
@@ -304,7 +412,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
sector_t nr_sector;
int ret;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
sector = le64_to_cpu(write_zeroes->slba) <<
@@ -331,6 +439,8 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_bdev_execute_rw;
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
+ req->metadata_len = nvmet_rw_metadata_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_bdev_execute_flush;
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index cd5670b83118..0abbefd9925e 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -13,6 +13,18 @@
#define NVMET_MAX_MPOOL_BVEC 16
#define NVMET_MIN_MPOOL_OBJ 16
+int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
+{
+ struct kstat stat;
+ int ret;
+
+ ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
+ AT_STATX_FORCE_SYNC);
+ if (!ret)
+ ns->size = stat.size;
+ return ret;
+}
+
void nvmet_file_ns_disable(struct nvmet_ns *ns)
{
if (ns->file) {
@@ -30,7 +42,6 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns)
{
int flags = O_RDWR | O_LARGEFILE;
- struct kstat stat;
int ret;
if (!ns->buffered_io)
@@ -43,12 +54,10 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
return PTR_ERR(ns->file);
}
- ret = vfs_getattr(&ns->file->f_path,
- &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
+ ret = nvmet_file_ns_revalidate(ns);
if (ret)
goto err;
- ns->size = stat.size;
/*
* i_blkbits can be greater than the universally accepted upper bound,
* so make sure we export a sane namespace lba_shift.
@@ -232,7 +241,7 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
{
ssize_t nr_bvec = req->sg_cnt;
- if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
return;
if (!req->sg_cnt || !nr_bvec) {
@@ -276,7 +285,7 @@ static void nvmet_file_flush_work(struct work_struct *w)
static void nvmet_file_execute_flush(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
schedule_work(&req->f.work);
@@ -366,7 +375,7 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
schedule_work(&req->f.work);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 421dff3ea143..809691291e73 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -19,6 +19,7 @@
#include <linux/rcupdate.h>
#include <linux/blkdev.h>
#include <linux/radix-tree.h>
+#include <linux/t10-pi.h>
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
@@ -77,6 +78,8 @@ struct nvmet_ns {
int use_p2pmem;
struct pci_dev *p2p_dev;
+ int pi_type;
+ int metadata_size;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -142,6 +145,7 @@ struct nvmet_port {
bool enabled;
int inline_data_size;
const struct nvmet_fabrics_ops *tr_ops;
+ bool pi_enable;
};
static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
@@ -201,6 +205,7 @@ struct nvmet_ctrl {
spinlock_t error_lock;
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
+ bool pi_support;
};
struct nvmet_subsys_model {
@@ -230,6 +235,7 @@ struct nvmet_subsys {
u64 ver;
u64 serial;
char *subsysnqn;
+ bool pi_support;
struct config_group group;
@@ -281,6 +287,7 @@ struct nvmet_fabrics_ops {
unsigned int type;
unsigned int msdbd;
bool has_keyed_sgls : 1;
+ bool metadata_support : 1;
void (*queue_response)(struct nvmet_req *req);
int (*add_port)(struct nvmet_port *port);
void (*remove_port)(struct nvmet_port *port);
@@ -302,6 +309,7 @@ struct nvmet_req {
struct nvmet_cq *cq;
struct nvmet_ns *ns;
struct scatterlist *sg;
+ struct scatterlist *metadata_sg;
struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
union {
struct {
@@ -315,8 +323,10 @@ struct nvmet_req {
} f;
};
int sg_cnt;
+ int metadata_sg_cnt;
/* data length as parsed from the SGL descriptor: */
size_t transfer_len;
+ size_t metadata_len;
struct nvmet_port *port;
@@ -384,11 +394,11 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
-bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
-int nvmet_req_alloc_sgl(struct nvmet_req *req);
-void nvmet_req_free_sgl(struct nvmet_req *req);
+int nvmet_req_alloc_sgls(struct nvmet_req *req);
+void nvmet_req_free_sgls(struct nvmet_req *req);
void nvmet_execute_keep_alive(struct nvmet_req *req);
@@ -498,13 +508,24 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns);
u16 nvmet_bdev_flush(struct nvmet_req *req);
u16 nvmet_file_flush(struct nvmet_req *req);
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
+int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
+void nvmet_ns_revalidate(struct nvmet_ns *ns);
-static inline u32 nvmet_rw_len(struct nvmet_req *req)
+static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
{
return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
req->ns->blksize_shift;
}
+static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return 0;
+ return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
+ req->ns->metadata_size;
+}
+
static inline u32 nvmet_dsm_len(struct nvmet_req *req)
{
return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
@@ -519,4 +540,11 @@ static inline __le16 to0based(u32 a)
return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
}
+static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return false;
+ return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index fd47de0e4e4e..76ea23a2c2be 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -20,6 +20,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/rw.h>
+#include <rdma/ib_cm.h>
#include <linux/nvme-rdma.h>
#include "nvmet.h"
@@ -33,6 +34,9 @@
/* Assume mpsmin == device_page_size == 4KB */
#define NVMET_RDMA_MAX_MDTS 8
+#define NVMET_RDMA_MAX_METADATA_MDTS 5
+
+struct nvmet_rdma_srq;
struct nvmet_rdma_cmd {
struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
@@ -41,6 +45,7 @@ struct nvmet_rdma_cmd {
struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
struct nvme_command *nvme_cmd;
struct nvmet_rdma_queue *queue;
+ struct nvmet_rdma_srq *nsrq;
};
enum {
@@ -57,6 +62,7 @@ struct nvmet_rdma_rsp {
struct nvmet_rdma_queue *queue;
struct ib_cqe read_cqe;
+ struct ib_cqe write_cqe;
struct rdma_rw_ctx rw;
struct nvmet_req req;
@@ -83,6 +89,7 @@ struct nvmet_rdma_queue {
struct ib_cq *cq;
atomic_t sq_wr_avail;
struct nvmet_rdma_device *dev;
+ struct nvmet_rdma_srq *nsrq;
spinlock_t state_lock;
enum nvmet_rdma_queue_state state;
struct nvmet_cq nvme_cq;
@@ -100,6 +107,7 @@ struct nvmet_rdma_queue {
int idx;
int host_qid;
+ int comp_vector;
int recv_queue_size;
int send_queue_size;
@@ -113,11 +121,17 @@ struct nvmet_rdma_port {
struct delayed_work repair_work;
};
+struct nvmet_rdma_srq {
+ struct ib_srq *srq;
+ struct nvmet_rdma_cmd *cmds;
+ struct nvmet_rdma_device *ndev;
+};
+
struct nvmet_rdma_device {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_srq *srq;
- struct nvmet_rdma_cmd *srq_cmds;
+ struct nvmet_rdma_srq **srqs;
+ int srq_count;
size_t srq_size;
struct kref ref;
struct list_head entry;
@@ -129,6 +143,16 @@ static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
+static int srq_size_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops srq_size_ops = {
+ .set = srq_size_set,
+ .get = param_get_int,
+};
+
+static int nvmet_rdma_srq_size = 1024;
+module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
+MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
+
static DEFINE_IDA(nvmet_rdma_queue_ida);
static LIST_HEAD(nvmet_rdma_queue_list);
static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
@@ -140,6 +164,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
@@ -149,6 +174,17 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
+static int srq_size_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0, ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 256)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
static int num_pages(int len)
{
return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
@@ -391,6 +427,9 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
/* Data In / RDMA READ */
r->read_cqe.done = nvmet_rdma_read_data_done;
+ /* Data Out / RDMA WRITE */
+ r->write_cqe.done = nvmet_rdma_write_data_done;
+
return 0;
out_free_rsp:
@@ -466,8 +505,8 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
cmd->sge[0].addr, cmd->sge[0].length,
DMA_FROM_DEVICE);
- if (ndev->srq)
- ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+ if (cmd->nsrq)
+ ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL);
else
ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
@@ -500,6 +539,129 @@ static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
spin_unlock(&queue->rsp_wr_wait_lock);
}
+static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
+{
+ struct ib_mr_status mr_status;
+ int ret;
+ u16 status = 0;
+
+ ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ return NVME_SC_INVALID_PI;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ status = NVME_SC_GUARD_CHECK;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ status = NVME_SC_REFTAG_CHECK;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ status = NVME_SC_APPTAG_CHECK;
+ break;
+ }
+ pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ }
+
+ return status;
+}
+
+static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_domain *domain,
+ u16 control, u8 pi_type)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.bg_type = IB_T10DIF_CRC;
+ domain->sig.dif.pi_interval = 1 << bi->interval_exp;
+ domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ domain->sig.dif.ref_remap = true;
+
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_escape = true;
+ if (pi_type == NVME_NS_DPS_PI_TYPE3)
+ domain->sig.dif.ref_escape = true;
+}
+
+static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
+ struct ib_sig_attrs *sig_attrs)
+{
+ struct nvme_command *cmd = req->cmd;
+ u16 control = le16_to_cpu(cmd->rw.control);
+ u8 pi_type = req->ns->pi_type;
+ struct blk_integrity *bi;
+
+ bi = bdev_get_integrity(req->ns->bdev);
+
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+
+ if (control & NVME_RW_PRINFO_PRACT) {
+ /* for WRITE_INSERT/READ_STRIP no wire domain */
+ sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ /* Clear the PRACT bit since HCA will generate/verify the PI */
+ control &= ~NVME_RW_PRINFO_PRACT;
+ cmd->rw.control = cpu_to_le16(control);
+ /* PI is added by the HW */
+ req->transfer_len += req->metadata_len;
+ } else {
+ /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ }
+
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
+ if (control & NVME_RW_PRINFO_PRCHK_GUARD)
+ sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
+ if (control & NVME_RW_PRINFO_PRCHK_APP)
+ sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
+}
+
+static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
+ struct ib_sig_attrs *sig_attrs)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct nvmet_req *req = &rsp->req;
+ int ret;
+
+ if (req->metadata_len)
+ ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
+ cm_id->port_num, req->sg, req->sg_cnt,
+ req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
+ addr, key, nvmet_data_dir(req));
+ else
+ ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
+ req->sg, req->sg_cnt, 0, addr, key,
+ nvmet_data_dir(req));
+
+ return ret;
+}
+
+static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct nvmet_req *req = &rsp->req;
+
+ if (req->metadata_len)
+ rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
+ cm_id->port_num, req->sg, req->sg_cnt,
+ req->metadata_sg, req->metadata_sg_cnt,
+ nvmet_data_dir(req));
+ else
+ rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
+ req->sg, req->sg_cnt, nvmet_data_dir(req));
+}
static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
{
@@ -507,14 +669,11 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
- if (rsp->n_rdma) {
- rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
- queue->cm_id->port_num, rsp->req.sg,
- rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
- }
+ if (rsp->n_rdma)
+ nvmet_rdma_rw_ctx_destroy(rsp);
if (rsp->req.sg != rsp->cmd->inline_sg)
- nvmet_req_free_sgl(&rsp->req);
+ nvmet_req_free_sgls(&rsp->req);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -566,11 +725,16 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
rsp->send_wr.opcode = IB_WR_SEND;
}
- if (nvmet_rdma_need_data_out(rsp))
- first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
- cm_id->port_num, NULL, &rsp->send_wr);
- else
+ if (nvmet_rdma_need_data_out(rsp)) {
+ if (rsp->req.metadata_len)
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, &rsp->write_cqe, NULL);
+ else
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, NULL, &rsp->send_wr);
+ } else {
first_wr = &rsp->send_wr;
+ }
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
@@ -589,15 +753,14 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
struct nvmet_rdma_queue *queue = cq->cq_context;
+ u16 status = 0;
WARN_ON(rsp->n_rdma <= 0);
atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
- rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
- queue->cm_id->port_num, rsp->req.sg,
- rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
rsp->n_rdma = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_rw_ctx_destroy(rsp);
nvmet_req_uninit(&rsp->req);
nvmet_rdma_release_rsp(rsp);
if (wc->status != IB_WC_WR_FLUSH_ERR) {
@@ -608,7 +771,58 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
- rsp->req.execute(&rsp->req);
+ if (rsp->req.metadata_len)
+ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
+ nvmet_rdma_rw_ctx_destroy(rsp);
+
+ if (unlikely(status))
+ nvmet_req_complete(&rsp->req, status);
+ else
+ rsp->req.execute(&rsp->req);
+}
+
+static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
+ struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ u16 status;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return;
+
+ WARN_ON(rsp->n_rdma <= 0);
+ atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+ rsp->n_rdma = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_rw_ctx_destroy(rsp);
+ nvmet_req_uninit(&rsp->req);
+ nvmet_rdma_release_rsp(rsp);
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_info("RDMA WRITE for CQE 0x%p failed with status %s (%d).\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status),
+ wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ /*
+ * Upon RDMA completion check the signature status
+ * - if succeeded send good NVMe response
+ * - if failed send bad NVMe response with appropriate error
+ */
+ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
+ if (unlikely(status))
+ rsp->req.cqe->status = cpu_to_le16(status << 1);
+ nvmet_rdma_rw_ctx_destroy(rsp);
+
+ if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
+ pr_err("sending cmd response failed\n");
+ nvmet_rdma_release_rsp(rsp);
+ }
}
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -665,9 +879,9 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
struct nvme_keyed_sgl_desc *sgl, bool invalidate)
{
- struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u64 addr = le64_to_cpu(sgl->addr);
u32 key = get_unaligned_le32(sgl->key);
+ struct ib_sig_attrs sig_attrs;
int ret;
rsp->req.transfer_len = get_unaligned_le24(sgl->length);
@@ -676,13 +890,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
if (!rsp->req.transfer_len)
return 0;
- ret = nvmet_req_alloc_sgl(&rsp->req);
+ if (rsp->req.metadata_len)
+ nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
+
+ ret = nvmet_req_alloc_sgls(&rsp->req);
if (unlikely(ret < 0))
goto error_out;
- ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
- rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
- nvmet_data_dir(&rsp->req));
+ ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
if (unlikely(ret < 0))
goto error_out;
rsp->n_rdma += ret;
@@ -845,23 +1060,40 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
nvmet_rdma_handle_command(queue, rsp);
}
-static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
+static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
{
- if (!ndev->srq)
+ nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size,
+ false);
+ ib_destroy_srq(nsrq->srq);
+
+ kfree(nsrq);
+}
+
+static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
+{
+ int i;
+
+ if (!ndev->srqs)
return;
- nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
- ib_destroy_srq(ndev->srq);
+ for (i = 0; i < ndev->srq_count; i++)
+ nvmet_rdma_destroy_srq(ndev->srqs[i]);
+
+ kfree(ndev->srqs);
}
-static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
+static struct nvmet_rdma_srq *
+nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
{
struct ib_srq_init_attr srq_attr = { NULL, };
+ size_t srq_size = ndev->srq_size;
+ struct nvmet_rdma_srq *nsrq;
struct ib_srq *srq;
- size_t srq_size;
int ret, i;
- srq_size = 4095; /* XXX: tune */
+ nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL);
+ if (!nsrq)
+ return ERR_PTR(-ENOMEM);
srq_attr.attr.max_wr = srq_size;
srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
@@ -869,35 +1101,73 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
srq_attr.srq_type = IB_SRQT_BASIC;
srq = ib_create_srq(ndev->pd, &srq_attr);
if (IS_ERR(srq)) {
- /*
- * If SRQs aren't supported we just go ahead and use normal
- * non-shared receive queues.
- */
- pr_info("SRQ requested but not supported.\n");
- return 0;
+ ret = PTR_ERR(srq);
+ goto out_free;
}
- ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
- if (IS_ERR(ndev->srq_cmds)) {
- ret = PTR_ERR(ndev->srq_cmds);
+ nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
+ if (IS_ERR(nsrq->cmds)) {
+ ret = PTR_ERR(nsrq->cmds);
goto out_destroy_srq;
}
- ndev->srq = srq;
- ndev->srq_size = srq_size;
+ nsrq->srq = srq;
+ nsrq->ndev = ndev;
for (i = 0; i < srq_size; i++) {
- ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+ nsrq->cmds[i].nsrq = nsrq;
+ ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]);
if (ret)
goto out_free_cmds;
}
- return 0;
+ return nsrq;
out_free_cmds:
- nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
+ nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false);
out_destroy_srq:
ib_destroy_srq(srq);
+out_free:
+ kfree(nsrq);
+ return ERR_PTR(ret);
+}
+
+static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
+{
+ int i, ret;
+
+ if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) {
+ /*
+ * If SRQs aren't supported we just go ahead and use normal
+ * non-shared receive queues.
+ */
+ pr_info("SRQ requested but not supported.\n");
+ return 0;
+ }
+
+ ndev->srq_size = min(ndev->device->attrs.max_srq_wr,
+ nvmet_rdma_srq_size);
+ ndev->srq_count = min(ndev->device->num_comp_vectors,
+ ndev->device->attrs.max_srq);
+
+ ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL);
+ if (!ndev->srqs)
+ return -ENOMEM;
+
+ for (i = 0; i < ndev->srq_count; i++) {
+ ndev->srqs[i] = nvmet_rdma_init_srq(ndev);
+ if (IS_ERR(ndev->srqs[i])) {
+ ret = PTR_ERR(ndev->srqs[i]);
+ goto err_srq;
+ }
+ }
+
+ return 0;
+
+err_srq:
+ while (--i >= 0)
+ nvmet_rdma_destroy_srq(ndev->srqs[i]);
+ kfree(ndev->srqs);
return ret;
}
@@ -910,7 +1180,7 @@ static void nvmet_rdma_free_dev(struct kref *ref)
list_del(&ndev->entry);
mutex_unlock(&device_list_mutex);
- nvmet_rdma_destroy_srq(ndev);
+ nvmet_rdma_destroy_srqs(ndev);
ib_dealloc_pd(ndev->pd);
kfree(ndev);
@@ -957,7 +1227,7 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
goto out_free_dev;
if (nvmet_rdma_use_srq) {
- ret = nvmet_rdma_init_srq(ndev);
+ ret = nvmet_rdma_init_srqs(ndev);
if (ret)
goto out_free_pd;
}
@@ -981,14 +1251,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
{
struct ib_qp_init_attr qp_attr;
struct nvmet_rdma_device *ndev = queue->dev;
- int comp_vector, nr_cqe, ret, i, factor;
-
- /*
- * Spread the io queues across completion vectors,
- * but still keep all admin queues on vector 0.
- */
- comp_vector = !queue->host_qid ? 0 :
- queue->idx % ndev->device->num_comp_vectors;
+ int nr_cqe, ret, i, factor;
/*
* Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
@@ -996,7 +1259,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
queue->cq = ib_alloc_cq(ndev->device, queue,
- nr_cqe + 1, comp_vector,
+ nr_cqe + 1, queue->comp_vector,
IB_POLL_WORKQUEUE);
if (IS_ERR(queue->cq)) {
ret = PTR_ERR(queue->cq);
@@ -1020,14 +1283,17 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
ndev->device->attrs.max_send_sge);
- if (ndev->srq) {
- qp_attr.srq = ndev->srq;
+ if (queue->nsrq) {
+ qp_attr.srq = queue->nsrq->srq;
} else {
/* +1 for drain */
qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
}
+ if (queue->port->pi_enable && queue->host_qid)
+ qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+
ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
if (ret) {
pr_err("failed to create_qp ret= %d\n", ret);
@@ -1041,7 +1307,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
__func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
qp_attr.cap.max_send_wr, queue->cm_id);
- if (!ndev->srq) {
+ if (!queue->nsrq) {
for (i = 0; i < queue->recv_queue_size; i++) {
queue->cmds[i].queue = queue;
ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
@@ -1076,7 +1342,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
nvmet_sq_destroy(&queue->nvme_sq);
nvmet_rdma_destroy_queue_ib(queue);
- if (!queue->dev->srq) {
+ if (!queue->nsrq) {
nvmet_rdma_free_cmds(queue->dev, queue->cmds,
queue->recv_queue_size,
!queue->host_qid);
@@ -1138,7 +1404,8 @@ static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
rej.sts = cpu_to_le16(status);
- return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
+ return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
}
static struct nvmet_rdma_queue *
@@ -1146,6 +1413,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
+ struct nvmet_rdma_port *port = cm_id->context;
struct nvmet_rdma_queue *queue;
int ret;
@@ -1172,6 +1440,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
queue->dev = ndev;
queue->cm_id = cm_id;
+ queue->port = port->nport;
spin_lock_init(&queue->state_lock);
queue->state = NVMET_RDMA_Q_CONNECTING;
@@ -1188,13 +1457,23 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
goto out_destroy_sq;
}
+ /*
+ * Spread the io queues across completion vectors,
+ * but still keep all admin queues on vector 0.
+ */
+ queue->comp_vector = !queue->host_qid ? 0 :
+ queue->idx % ndev->device->num_comp_vectors;
+
+
ret = nvmet_rdma_alloc_rsps(queue);
if (ret) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_ida_remove;
}
- if (!ndev->srq) {
+ if (ndev->srqs) {
+ queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count];
+ } else {
queue->cmds = nvmet_rdma_alloc_cmds(ndev,
queue->recv_queue_size,
!queue->host_qid);
@@ -1215,7 +1494,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
return queue;
out_free_cmds:
- if (!ndev->srq) {
+ if (!queue->nsrq) {
nvmet_rdma_free_cmds(queue->dev, queue->cmds,
queue->recv_queue_size,
!queue->host_qid);
@@ -1241,6 +1520,10 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
case IB_EVENT_COMM_EST:
rdma_notify(queue->cm_id, event->event);
break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ pr_debug("received last WQE reached event for queue=0x%p\n",
+ queue);
+ break;
default:
pr_err("received IB QP event: %s (%d)\n",
ib_event_msg(event->event), event->event);
@@ -1275,7 +1558,6 @@ static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
- struct nvmet_rdma_port *port = cm_id->context;
struct nvmet_rdma_device *ndev;
struct nvmet_rdma_queue *queue;
int ret = -EINVAL;
@@ -1291,7 +1573,6 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = -ENOMEM;
goto put_device;
}
- queue->port = port->nport;
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
@@ -1563,6 +1844,14 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
+ if (port->nport->pi_enable &&
+ !(cm_id->device->attrs.device_cap_flags &
+ IB_DEVICE_INTEGRITY_HANDOVER)) {
+ pr_err("T10-PI is not supported for %pISpcs\n", addr);
+ ret = -EINVAL;
+ goto out_destroy_id;
+ }
+
port->cm_id = cm_id;
return 0;
@@ -1672,6 +1961,8 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
{
+ if (ctrl->pi_support)
+ return NVMET_RDMA_MAX_METADATA_MDTS;
return NVMET_RDMA_MAX_MDTS;
}
@@ -1680,6 +1971,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.type = NVMF_TRTYPE_RDMA,
.msdbd = 1,
.has_keyed_sgls = 1,
+ .metadata_support = 1,
.add_port = nvmet_rdma_add_port,
.remove_port = nvmet_rdma_remove_port,
.queue_response = nvmet_rdma_queue_response,
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index f0da04e960f4..1669177cd26c 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -325,6 +325,14 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
}
+static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
+{
+ if (status == -EPIPE || status == -ECONNRESET)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ else
+ nvmet_tcp_fatal_error(queue);
+}
+
static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
{
struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
@@ -510,7 +518,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
offset_in_page(cmd->data_pdu) + cmd->offset,
- left, MSG_DONTWAIT | MSG_MORE);
+ left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
if (ret <= 0)
return ret;
@@ -538,7 +546,7 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
if ((!last_in_batch && cmd->queue->send_list_len) ||
cmd->wbytes_done + left < cmd->req.transfer_len ||
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
left, flags);
@@ -585,7 +593,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
int ret;
if (!last_in_batch && cmd->queue->send_list_len)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
else
flags |= MSG_EOR;
@@ -614,7 +622,7 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
int ret;
if (!last_in_batch && cmd->queue->send_list_len)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
else
flags |= MSG_EOR;
@@ -644,6 +652,8 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
if (!last_in_batch && cmd->queue->send_list_len)
msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (unlikely(ret <= 0))
@@ -716,11 +726,15 @@ static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
- if (ret <= 0)
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
break;
+ }
(*sends)++;
}
-
+done:
return ret;
}
@@ -1157,11 +1171,15 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_recv_one(queue);
- if (ret <= 0)
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
break;
+ }
(*recvs)++;
}
-
+done:
return ret;
}
@@ -1186,27 +1204,16 @@ static void nvmet_tcp_io_work(struct work_struct *w)
pending = false;
ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
- if (ret > 0) {
+ if (ret > 0)
pending = true;
- } else if (ret < 0) {
- if (ret == -EPIPE || ret == -ECONNRESET)
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- else
- nvmet_tcp_fatal_error(queue);
+ else if (ret < 0)
return;
- }
ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
- if (ret > 0) {
- /* transmitted message/data */
+ if (ret > 0)
pending = true;
- } else if (ret < 0) {
- if (ret == -EPIPE || ret == -ECONNRESET)
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- else
- nvmet_tcp_fatal_error(queue);
+ else if (ret < 0)
return;
- }
} while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
@@ -1429,7 +1436,6 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
{
struct socket *sock = queue->sock;
struct inet_sock *inet = inet_sk(sock->sk);
- struct linger sol = { .l_onoff = 1, .l_linger = 0 };
int ret;
ret = kernel_getsockname(sock,
@@ -1447,27 +1453,14 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
* close. This is done to prevent stale data from being sent should
* the network connection be restored before TCP times out.
*/
- ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
- (char *)&sol, sizeof(sol));
- if (ret)
- return ret;
+ sock_no_linger(sock->sk);
- if (so_priority > 0) {
- ret = kernel_setsockopt(sock, SOL_SOCKET, SO_PRIORITY,
- (char *)&so_priority, sizeof(so_priority));
- if (ret)
- return ret;
- }
+ if (so_priority > 0)
+ sock_set_priority(sock->sk, so_priority);
/* Set socket type of service */
- if (inet->rcv_tos > 0) {
- int tos = inet->rcv_tos;
-
- ret = kernel_setsockopt(sock, SOL_IP, IP_TOS,
- (char *)&tos, sizeof(tos));
- if (ret)
- return ret;
- }
+ if (inet->rcv_tos > 0)
+ ip_sock_set_tos(sock->sk, inet->rcv_tos);
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = queue;
@@ -1588,7 +1581,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
{
struct nvmet_tcp_port *port;
__kernel_sa_family_t af;
- int opt, ret;
+ int ret;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
@@ -1632,30 +1625,10 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
port->sock->sk->sk_user_data = port;
port->data_ready = port->sock->sk->sk_data_ready;
port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
-
- opt = 1;
- ret = kernel_setsockopt(port->sock, IPPROTO_TCP,
- TCP_NODELAY, (char *)&opt, sizeof(opt));
- if (ret) {
- pr_err("failed to set TCP_NODELAY sock opt %d\n", ret);
- goto err_sock;
- }
-
- ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR,
- (char *)&opt, sizeof(opt));
- if (ret) {
- pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret);
- goto err_sock;
- }
-
- if (so_priority > 0) {
- ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_PRIORITY,
- (char *)&so_priority, sizeof(so_priority));
- if (ret) {
- pr_err("failed to set SO_PRIORITY sock opt %d\n", ret);
- goto err_sock;
- }
- }
+ sock_set_reuseaddr(port->sock->sk);
+ tcp_sock_set_nodelay(port->sock->sk);
+ if (so_priority > 0)
+ sock_set_priority(port->sock->sk, so_priority);
ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
sizeof(port->addr));
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index e645caa882dd..0458046d6501 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -130,6 +130,34 @@ TRACE_EVENT(nvmet_req_complete,
);
+#define aer_name(aer) { aer, #aer }
+
+TRACE_EVENT(nvmet_async_event,
+ TP_PROTO(struct nvmet_ctrl *ctrl, __le32 result),
+ TP_ARGS(ctrl, result),
+ TP_STRUCT__entry(
+ __field(int, ctrl_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = ctrl->cntlid;
+ __entry->result = (le32_to_cpu(result) & 0xff00) >> 8;
+ ),
+ TP_printk("nvmet%d: NVME_AEN=%#08x [%s]",
+ __entry->ctrl_id, __entry->result,
+ __print_symbolic(__entry->result,
+ aer_name(NVME_AER_NOTICE_NS_CHANGED),
+ aer_name(NVME_AER_NOTICE_ANA),
+ aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+ aer_name(NVME_AER_NOTICE_DISC_CHANGED),
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
+ )
+);
+#undef aer_name
+
#endif /* _TRACE_NVMET_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 05c6ae4b0b97..927eb5f6003f 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -66,6 +66,30 @@ static LIST_HEAD(nvmem_lookup_list);
static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
+static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ if (nvmem->reg_read)
+ return nvmem->reg_read(nvmem->priv, offset, val, bytes);
+
+ return -EINVAL;
+}
+
+static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ int ret;
+
+ if (nvmem->reg_write) {
+ gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
+ ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
+ gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
#ifdef CONFIG_NVMEM_SYSFS
static const char * const nvmem_type_str[] = {
[NVMEM_TYPE_UNKNOWN] = "Unknown",
@@ -122,7 +146,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
if (!nvmem->reg_read)
return -EPERM;
- rc = nvmem->reg_read(nvmem->priv, pos, buf, count);
+ rc = nvmem_reg_read(nvmem, pos, buf, count);
if (rc)
return rc;
@@ -159,7 +183,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
if (!nvmem->reg_write)
return -EPERM;
- rc = nvmem->reg_write(nvmem->priv, pos, buf, count);
+ rc = nvmem_reg_write(nvmem, pos, buf, count);
if (rc)
return rc;
@@ -167,11 +191,8 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
return count;
}
-static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *attr, int i)
+static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nvmem_device *nvmem = to_nvmem_device(dev);
umode_t mode = 0400;
if (!nvmem->root_only)
@@ -189,6 +210,15 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
return mode;
}
+static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return nvmem_bin_attr_get_umode(nvmem);
+}
+
/* default read/write permissions */
static struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
@@ -215,34 +245,14 @@ static const struct attribute_group *nvmem_dev_groups[] = {
NULL,
};
-/* read only permission */
-static struct bin_attribute bin_attr_ro_nvmem = {
+static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
- .name = "nvmem",
- .mode = 0444,
- },
- .read = bin_attr_nvmem_read,
-};
-
-/* default read/write permissions, root only */
-static struct bin_attribute bin_attr_rw_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0600,
+ .name = "eeprom",
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
};
-/* read only permission, root only */
-static struct bin_attribute bin_attr_ro_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0400,
- },
- .read = bin_attr_nvmem_read,
-};
-
/*
* nvmem_setup_compat() - Create an additional binary entry in
* drivers sys directory, to be backwards compatible with the older
@@ -259,18 +269,8 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
if (!config->base_dev)
return -EINVAL;
- if (nvmem->read_only) {
- if (config->root_only)
- nvmem->eeprom = bin_attr_ro_root_nvmem;
- else
- nvmem->eeprom = bin_attr_ro_nvmem;
- } else {
- if (config->root_only)
- nvmem->eeprom = bin_attr_rw_root_nvmem;
- else
- nvmem->eeprom = bin_attr_rw_nvmem;
- }
- nvmem->eeprom.attr.name = "eeprom";
+ nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
+ nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
nvmem->eeprom.attr.key = &eeprom_lock_key;
@@ -311,30 +311,6 @@ static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
#endif /* CONFIG_NVMEM_SYSFS */
-static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
- void *val, size_t bytes)
-{
- if (nvmem->reg_read)
- return nvmem->reg_read(nvmem->priv, offset, val, bytes);
-
- return -EINVAL;
-}
-
-static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
- void *val, size_t bytes)
-{
- int ret;
-
- if (nvmem->reg_write) {
- gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
- ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
- gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
- return ret;
- }
-
- return -EINVAL;
-}
-
static void nvmem_release(struct device *dev)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 50bea2aadc1b..7a1ebd6fd08b 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -196,7 +196,6 @@ static int imx_ocotp_read(void *context, unsigned int offset,
if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL)
imx_ocotp_clr_err_if_set(priv);
}
- ret = 0;
read_end:
clk_disable_unprepare(priv->clk);
@@ -435,17 +434,13 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
priv->base + IMX_OCOTP_ADDR_CTRL_SET);
ret = imx_ocotp_wait_for_busy(priv,
priv->params->ctrl.bm_rel_shadows);
- if (ret < 0) {
+ if (ret < 0)
dev_err(priv->dev, "timeout during shadow register reload\n");
- goto write_end;
- }
write_end:
clk_disable_unprepare(priv->clk);
mutex_unlock(&ocotp_mutex);
- if (ret < 0)
- return ret;
- return bytes;
+ return ret < 0 ? ret : bytes;
}
static struct nvmem_config imx_ocotp_nvmem_config = {
diff --git a/drivers/nvmem/jz4780-efuse.c b/drivers/nvmem/jz4780-efuse.c
index 512e1872ba36..0b01b840edd9 100644
--- a/drivers/nvmem/jz4780-efuse.c
+++ b/drivers/nvmem/jz4780-efuse.c
@@ -211,10 +211,8 @@ static int jz4780_efuse_probe(struct platform_device *pdev)
cfg.priv = efuse;
nvmem = devm_nvmem_register(dev, &cfg);
- if (IS_ERR(nvmem))
- return PTR_ERR(nvmem);
- return 0;
+ return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id jz4780_efuse_match[] = {
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index d057f1bfb2e9..8a91717600be 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -27,25 +27,11 @@ static int qfprom_reg_read(void *context,
return 0;
}
-static int qfprom_reg_write(void *context,
- unsigned int reg, void *_val, size_t bytes)
-{
- struct qfprom_priv *priv = context;
- u8 *val = _val;
- int i = 0, words = bytes;
-
- while (words--)
- writeb(*val++, priv->base + reg + i++);
-
- return 0;
-}
-
static struct nvmem_config econfig = {
.name = "qfprom",
.stride = 1,
.word_size = 1,
.reg_read = qfprom_reg_read,
- .reg_write = qfprom_reg_write,
};
static int qfprom_probe(struct platform_device *pdev)
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 5893543918c8..e28d7b133e11 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -16,8 +16,6 @@ struct zynqmp_nvmem_data {
struct nvmem_device *nvmem;
};
-static const struct zynqmp_eemi_ops *eemi_ops;
-
static int zynqmp_nvmem_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
@@ -25,10 +23,7 @@ static int zynqmp_nvmem_read(void *context, unsigned int offset,
int idcode, version;
struct zynqmp_nvmem_data *priv = context;
- if (!eemi_ops->get_chipid)
- return -ENXIO;
-
- ret = eemi_ops->get_chipid(&idcode, &version);
+ ret = zynqmp_pm_get_chipid(&idcode, &version);
if (ret < 0)
return ret;
@@ -61,10 +56,6 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
priv->dev = dev;
econfig.dev = dev;
econfig.reg_read = zynqmp_nvmem_read;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 08fd823edac9..fe64430b438a 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -286,7 +286,6 @@ int of_detach_node(struct device_node *np)
{
struct of_reconfig_data rd;
unsigned long flags;
- int rc = 0;
memset(&rd, 0, sizeof(rd));
rd.dn = np;
@@ -301,7 +300,7 @@ int of_detach_node(struct device_node *np)
of_reconfig_notify(OF_RECONFIG_DETACH_NODE, &rd);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(of_detach_node);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2cdf64d2456f..4602e467ca8b 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -471,7 +471,7 @@ void *initial_boot_params __ro_after_init;
static u32 of_fdt_crc32;
/**
- * res_mem_reserve_reg() - reserve all memory described in 'reg' property
+ * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
*/
static int __init __reserved_mem_reserve_reg(unsigned long node,
const char *uname)
@@ -643,8 +643,6 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
offset = fdt_next_node(blob, offset, &depth)) {
pathp = fdt_get_name(blob, offset, NULL);
- if (*pathp == '/')
- pathp = kbasename(pathp);
rc = it(offset, pathp, depth, data);
}
return rc;
@@ -671,8 +669,6 @@ int __init of_scan_flat_dt_subnodes(unsigned long parent,
int rc;
pathp = fdt_get_name(blob, node, NULL);
- if (*pathp == '/')
- pathp = kbasename(pathp);
rc = it(node, pathp, data);
if (rc)
return rc;
@@ -1078,7 +1074,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
#endif
#endif /* CONFIG_CMDLINE */
- pr_debug("Command line is: %s\n", (char*)data);
+ pr_debug("Command line is: %s\n", (char *)data);
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
if (rng_seed && l > 0) {
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index c72eef988041..a32e60b024b8 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -134,8 +134,6 @@ int __of_attach_node_sysfs(struct device_node *np)
if (!name)
return -ENOMEM;
- of_node_get(np);
-
rc = kobject_add(&np->kobj, parent, "%s", name);
kfree(name);
if (rc)
@@ -144,6 +142,7 @@ int __of_attach_node_sysfs(struct device_node *np)
for_each_property_of_node(np, pp)
__of_add_property_sysfs(np, pp);
+ of_node_get(np);
return 0;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 9f982c0627a0..a04afe79529c 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -60,39 +60,15 @@ static struct mii_timestamper *of_find_mii_timestamper(struct device_node *node)
return register_mii_timestamper(arg.np, arg.args[0]);
}
-static int of_mdiobus_register_phy(struct mii_bus *mdio,
- struct device_node *child, u32 addr)
+int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy,
+ struct device_node *child, u32 addr)
{
- struct mii_timestamper *mii_ts;
- struct phy_device *phy;
- bool is_c45;
int rc;
- u32 phy_id;
-
- mii_ts = of_find_mii_timestamper(child);
- if (IS_ERR(mii_ts))
- return PTR_ERR(mii_ts);
-
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
-
- if (!is_c45 && !of_get_phy_id(child, &phy_id))
- phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
- else
- phy = get_phy_device(mdio, addr, is_c45);
- if (IS_ERR(phy)) {
- if (mii_ts)
- unregister_mii_timestamper(mii_ts);
- return PTR_ERR(phy);
- }
rc = of_irq_get(child, 0);
- if (rc == -EPROBE_DEFER) {
- if (mii_ts)
- unregister_mii_timestamper(mii_ts);
- phy_device_free(phy);
+ if (rc == -EPROBE_DEFER)
return rc;
- }
+
if (rc > 0) {
phy->irq = rc;
mdio->irq[addr] = rc;
@@ -118,10 +94,47 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
* register it */
rc = phy_device_register(phy);
if (rc) {
+ of_node_put(child);
+ return rc;
+ }
+
+ dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
+ child, addr);
+ return 0;
+}
+EXPORT_SYMBOL(of_mdiobus_phy_device_register);
+
+static int of_mdiobus_register_phy(struct mii_bus *mdio,
+ struct device_node *child, u32 addr)
+{
+ struct mii_timestamper *mii_ts;
+ struct phy_device *phy;
+ bool is_c45;
+ int rc;
+ u32 phy_id;
+
+ mii_ts = of_find_mii_timestamper(child);
+ if (IS_ERR(mii_ts))
+ return PTR_ERR(mii_ts);
+
+ is_c45 = of_device_is_compatible(child,
+ "ethernet-phy-ieee802.3-c45");
+
+ if (!is_c45 && !of_get_phy_id(child, &phy_id))
+ phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
+ else
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (IS_ERR(phy)) {
+ if (mii_ts)
+ unregister_mii_timestamper(mii_ts);
+ return PTR_ERR(phy);
+ }
+
+ rc = of_mdiobus_phy_device_register(mdio, phy, child, addr);
+ if (rc) {
if (mii_ts)
unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
- of_node_put(child);
return rc;
}
@@ -132,8 +145,6 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
if (mii_ts)
phy->mii_ts = mii_ts;
- dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
- child, addr);
return 0;
}
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 1a84bc0d5fa8..6877080c8af9 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -46,7 +46,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
}
/**
- * res_mem_save_node() - save fdt node for second pass initialization
+ * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
*/
void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size)
@@ -68,8 +68,8 @@ void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
}
/**
- * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
- * and 'alloc-ranges' properties
+ * __reserved_mem_alloc_size() - allocate reserved memory described by
+ * 'size', 'align' and 'alloc-ranges' properties.
*/
static int __init __reserved_mem_alloc_size(unsigned long node,
const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
@@ -165,7 +165,7 @@ static const struct of_device_id __rmem_of_table_sentinel
__used __section(__reservedmem_of_table_end);
/**
- * res_mem_init_node() - call region specific reserved memory init code
+ * __reserved_mem_init_node() - call region specific reserved memory init code
*/
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
{
@@ -232,7 +232,7 @@ static void __init __rmem_check_for_overlap(void)
}
/**
- * fdt_init_reserved_mem - allocate and init all saved reserved memory regions
+ * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
*/
void __init fdt_init_reserved_mem(void)
{
@@ -358,6 +358,25 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
/**
+ * of_reserved_mem_device_init_by_name() - assign named reserved memory region
+ * to given device
+ * @dev: pointer to the device to configure
+ * @np: pointer to the device node with 'memory-region' property
+ * @name: name of the selected memory region
+ *
+ * Returns: 0 on success or a negative error-code on failure.
+ */
+int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name)
+{
+ int idx = of_property_match_string(np, "memory-region-names", name);
+
+ return of_reserved_mem_device_init_by_idx(dev, np, idx);
+}
+EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
+
+/**
* of_reserved_mem_device_release() - release reserved memory device structures
* @dev: Pointer to the device to deconfigure
*
@@ -366,24 +385,22 @@ EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
*/
void of_reserved_mem_device_release(struct device *dev)
{
- struct rmem_assigned_device *rd;
- struct reserved_mem *rmem = NULL;
+ struct rmem_assigned_device *rd, *tmp;
+ LIST_HEAD(release_list);
mutex_lock(&of_rmem_assigned_device_mutex);
- list_for_each_entry(rd, &of_rmem_assigned_device_list, list) {
- if (rd->dev == dev) {
- rmem = rd->rmem;
- list_del(&rd->list);
- kfree(rd);
- break;
- }
+ list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
+ if (rd->dev == dev)
+ list_move_tail(&rd->list, &release_list);
}
mutex_unlock(&of_rmem_assigned_device_mutex);
- if (!rmem || !rmem->ops || !rmem->ops->device_release)
- return;
+ list_for_each_entry_safe(rd, tmp, &release_list, list) {
+ if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
+ rd->rmem->ops->device_release(rd->rmem, dev);
- rmem->ops->device_release(rmem, dev);
+ kfree(rd);
+ }
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 3371e4a06248..071f04da32c8 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -291,7 +291,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
#endif /* CONFIG_ARM_AMBA */
/**
- * of_devname_lookup() - Given a device node, lookup the preferred Linux name
+ * of_dev_lookup() - Given a device node, lookup the preferred Linux name
*/
static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup,
struct device_node *np)
@@ -538,7 +538,9 @@ static int __init of_platform_default_populate_init(void)
}
/* Populate everything else. */
+ fw_devlink_pause();
of_platform_default_populate(NULL, NULL, NULL);
+ fw_devlink_resume();
return 0;
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index b4916dcc9e72..1f2086f4e7ce 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1045,8 +1045,20 @@ static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
* Find the device node that contains the supplier phandle. It may be
* @sup_np or it may be an ancestor of @sup_np.
*/
- while (sup_np && !of_find_property(sup_np, "compatible", NULL))
+ while (sup_np) {
+
+ /* Don't allow linking to a disabled supplier */
+ if (!of_device_is_available(sup_np)) {
+ of_node_put(sup_np);
+ sup_np = NULL;
+ }
+
+ if (of_find_property(sup_np, "compatible", NULL))
+ break;
+
sup_np = of_get_next_parent(sup_np);
+ }
+
if (!sup_np) {
dev_dbg(dev, "Not linking to %pOFP - No device\n", tmp_np);
return -ENODEV;
@@ -1074,7 +1086,7 @@ static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
return -EAGAIN;
}
if (!device_link_add(dev, sup_dev, dl_flags))
- ret = -EAGAIN;
+ ret = -EINVAL;
put_device(sup_dev);
return ret;
}
@@ -1206,6 +1218,7 @@ DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
+DEFINE_SIMPLE_PROP(extcon, "extcon", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
@@ -1230,6 +1243,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_dmas, },
{ .parse_prop = parse_power_domains, },
{ .parse_prop = parse_hwlocks, },
+ { .parse_prop = parse_extcon, },
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
@@ -1296,7 +1310,7 @@ static int of_link_to_suppliers(struct device *dev,
if (of_link_property(dev, con_np, p->name))
ret = -ENODEV;
- for_each_child_of_node(con_np, child)
+ for_each_available_child_of_node(con_np, child)
if (of_link_to_suppliers(dev, child) && !ret)
ret = -EAGAIN;
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 12ea4a4ad607..6c9edc8bbc95 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -113,7 +113,7 @@ static int event_buffer_open(struct inode *inode, struct file *file)
{
int err = -EPERM;
- if (!capable(CAP_SYS_ADMIN))
+ if (!perfmon_capable())
return -EPERM;
if (test_and_set_bit_lock(0, &buffer_opened))
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 3b00e2c8e2e9..6d78ec3a762f 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -30,12 +30,6 @@
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK(stuff...) printk(stuff)
-#else
-#define DPRINTK(stuff...)
-#endif
-
static struct daisydev {
struct daisydev *next;
struct parport *port;
@@ -145,8 +139,7 @@ again:
((num_ports = num_mux_ports(port)) == 2 || num_ports == 4)) {
/* Leave original as port zero. */
port->muxport = 0;
- printk(KERN_INFO
- "%s: 1st (default) port of %d-way multiplexor\n",
+ pr_info("%s: 1st (default) port of %d-way multiplexor\n",
port->name, num_ports);
for (i = 1; i < num_ports; i++) {
/* Clone the port. */
@@ -159,8 +152,7 @@ again:
continue;
}
- printk(KERN_INFO
- "%s: %d%s port of %d-way multiplexor on %s\n",
+ pr_info("%s: %d%s port of %d-way multiplexor on %s\n",
extra->name, i + 1, th[i + 1], num_ports,
port->name);
@@ -323,8 +315,7 @@ static int cpp_daisy(struct parport *port, int cmd)
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: cpp_daisy: aa5500ff(%02x)\n",
- port->name, s);
+ pr_debug("%s: cpp_daisy: aa5500ff(%02x)\n", port->name, s);
return -ENXIO;
}
@@ -334,8 +325,7 @@ static int cpp_daisy(struct parport *port, int cmd)
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: cpp_daisy: aa5500ff87(%02x)\n",
- port->name, s);
+ pr_debug("%s: cpp_daisy: aa5500ff87(%02x)\n", port->name, s);
return -ENXIO;
}
@@ -370,7 +360,7 @@ static int cpp_mux(struct parport *port, int cmd)
s = parport_read_status(port);
if (!(s & PARPORT_STATUS_ACK)) {
- DPRINTK(KERN_DEBUG "%s: cpp_mux: aa55f00f52ad%02x(%02x)\n",
+ pr_debug("%s: cpp_mux: aa55f00f52ad%02x(%02x)\n",
port->name, cmd, s);
return -EIO;
}
@@ -456,8 +446,7 @@ static int assign_addrs(struct parport *port)
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff(%02x)\n",
- port->name, s);
+ pr_debug("%s: assign_addrs: aa5500ff(%02x)\n", port->name, s);
return 0;
}
@@ -467,8 +456,7 @@ static int assign_addrs(struct parport *port)
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff87(%02x)\n",
- port->name, s);
+ pr_debug("%s: assign_addrs: aa5500ff87(%02x)\n", port->name, s);
return 0;
}
@@ -505,8 +493,7 @@ static int assign_addrs(struct parport *port)
parport_write_data(port, 0xff); udelay(2);
detected = numdevs - thisdev;
- DPRINTK(KERN_DEBUG "%s: Found %d daisy-chained devices\n", port->name,
- detected);
+ pr_debug("%s: Found %d daisy-chained devices\n", port->name, detected);
/* Ask the new devices to introduce themselves. */
deviceid = kmalloc(1024, GFP_KERNEL);
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 90fb73575495..f28d6a3c5a68 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -31,12 +31,6 @@
#undef DEBUG /* Don't want a garbled console */
#endif
-#ifdef DEBUG
-#define DPRINTK(stuff...) printk (stuff)
-#else
-#define DPRINTK(stuff...)
-#endif
-
/* Make parport_wait_peripheral wake up.
* It will be useful to call this from an interrupt handler. */
static void parport_ieee1284_wakeup (struct parport *port)
@@ -258,12 +252,11 @@ static void parport_ieee1284_terminate (struct parport *port)
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
- DPRINTK (KERN_INFO "%s: Timeout at event 49\n",
+ pr_debug("%s: Timeout at event 49\n",
port->name);
parport_data_forward (port);
- DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
- port->name);
+ pr_debug("%s: ECP direction: forward\n", port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
@@ -281,8 +274,7 @@ static void parport_ieee1284_terminate (struct parport *port)
/* Event 24: nAck goes low */
r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0);
if (r)
- DPRINTK (KERN_INFO "%s: Timeout at event 24\n",
- port->name);
+ pr_debug("%s: Timeout at event 24\n", port->name);
/* Event 25: Set nAutoFd low */
parport_frob_control (port,
@@ -294,8 +286,7 @@ static void parport_ieee1284_terminate (struct parport *port)
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK);
if (r)
- DPRINTK (KERN_INFO "%s: Timeout at event 27\n",
- port->name);
+ pr_debug("%s: Timeout at event 27\n", port->name);
/* Event 29: Set nAutoFd high */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
@@ -304,8 +295,7 @@ static void parport_ieee1284_terminate (struct parport *port)
port->ieee1284.mode = IEEE1284_MODE_COMPAT;
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
- DPRINTK (KERN_DEBUG "%s: In compatibility (forward idle) mode\n",
- port->name);
+ pr_debug("%s: In compatibility (forward idle) mode\n", port->name);
}
#endif /* IEEE1284 support */
@@ -329,7 +319,7 @@ int parport_negotiate (struct parport *port, int mode)
#ifndef CONFIG_PARPORT_1284
if (mode == IEEE1284_MODE_COMPAT)
return 0;
- printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
+ pr_err("parport: IEEE1284 not supported in this kernel\n");
return -1;
#else
int m = mode & ~IEEE1284_ADDR;
@@ -406,8 +396,7 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_SELECT);
- DPRINTK (KERN_DEBUG
- "%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
+ pr_debug("%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
port->name, parport_read_status (port));
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return -1; /* Not IEEE1284 compliant */
@@ -430,8 +419,7 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant device. */
- DPRINTK (KERN_DEBUG
- "%s: Mode 0x%02x not supported? (0x%02x)\n",
+ pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode, port->ops->read_status (port));
parport_ieee1284_terminate (port);
return 1;
@@ -442,7 +430,7 @@ int parport_negotiate (struct parport *port, int mode)
/* xflag should be high for all modes other than nibble (0). */
if (mode && !xflag) {
/* Mode not supported. */
- DPRINTK (KERN_DEBUG "%s: Mode 0x%02x rejected by peripheral\n",
+ pr_debug("%s: Mode 0x%02x rejected by peripheral\n",
port->name, mode);
parport_ieee1284_terminate (port);
return 1;
@@ -463,9 +451,7 @@ int parport_negotiate (struct parport *port, int mode)
/* Event 52: nAck goes low */
if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) {
/* This peripheral is _very_ slow. */
- DPRINTK (KERN_DEBUG
- "%s: Event 52 didn't happen\n",
- port->name);
+ pr_debug("%s: Event 52 didn't happen\n", port->name);
parport_ieee1284_terminate (port);
return 1;
}
@@ -481,10 +467,9 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant
* device. */
- DPRINTK (KERN_DEBUG
- "%s: Mode 0x%02x not supported? (0x%02x)\n",
+ pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode,
- port->ops->read_status (port));
+ port->ops->read_status(port));
parport_ieee1284_terminate (port);
return 1;
}
@@ -495,8 +480,8 @@ int parport_negotiate (struct parport *port, int mode)
/* xflag should be high. */
if (!xflag) {
/* Extended mode not supported. */
- DPRINTK (KERN_DEBUG "%s: Extended mode 0x%02x not "
- "supported\n", port->name, mode);
+ pr_debug("%s: Extended mode 0x%02x not supported\n",
+ port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
@@ -505,7 +490,7 @@ int parport_negotiate (struct parport *port, int mode)
}
/* Mode is supported */
- DPRINTK (KERN_DEBUG "%s: In mode 0x%02x\n", port->name, mode);
+ pr_debug("%s: In mode 0x%02x\n", port->name, mode);
port->ieee1284.mode = mode;
/* But ECP is special */
@@ -522,13 +507,11 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- DPRINTK (KERN_INFO "%s: Timeout at event 31\n",
- port->name);
+ pr_debug("%s: Timeout at event 31\n", port->name);
}
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
- DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
- port->name);
+ pr_debug("%s: ECP direction: forward\n", port->name);
} else switch (mode) {
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
@@ -573,7 +556,7 @@ void parport_ieee1284_interrupt (void *handle)
if (port->ieee1284.phase == IEEE1284_PH_REV_IDLE) {
/* An interrupt in this phase means that data
* is now available. */
- DPRINTK (KERN_DEBUG "%s: Data available\n", port->name);
+ pr_debug("%s: Data available\n", port->name);
parport_ieee1284_ack_data_avail (port);
}
#endif /* IEEE1284 support */
@@ -617,13 +600,12 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
parport_negotiate (port, IEEE1284_MODE_COMPAT);
/* fall through */
case IEEE1284_MODE_COMPAT:
- DPRINTK (KERN_DEBUG "%s: Using compatibility mode\n",
- port->name);
+ pr_debug("%s: Using compatibility mode\n", port->name);
fn = port->ops->compat_write_data;
break;
case IEEE1284_MODE_EPP:
- DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
+ pr_debug("%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_write_addr;
} else {
@@ -631,8 +613,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
}
break;
case IEEE1284_MODE_EPPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated EPP mode\n", port->name);
if (addr) {
fn = parport_ieee1284_epp_write_addr;
} else {
@@ -641,7 +622,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
- DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
+ pr_debug("%s: Using ECP mode\n", port->name);
if (addr) {
fn = port->ops->ecp_write_addr;
} else {
@@ -650,8 +631,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
break;
case IEEE1284_MODE_ECPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated ECP mode\n", port->name);
/* The caller has specified that it must be emulated,
* even if we have ECP hardware! */
if (addr) {
@@ -662,13 +642,13 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
break;
default:
- DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
- port->ieee1284.mode);
+ pr_debug("%s: Unknown mode 0x%02x\n",
+ port->name, port->ieee1284.mode);
return -ENOSYS;
}
retval = (*fn) (port, buffer, len, 0);
- DPRINTK (KERN_DEBUG "%s: wrote %d/%d bytes\n", port->name, retval, len);
+ pr_debug("%s: wrote %zd/%zu bytes\n", port->name, retval, len);
return retval;
#endif /* IEEE1284 support */
}
@@ -694,7 +674,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
ssize_t parport_read (struct parport *port, void *buffer, size_t len)
{
#ifndef CONFIG_PARPORT_1284
- printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
+ pr_err("parport: IEEE1284 not supported in this kernel\n");
return -ENODEV;
#else
int mode = port->physport->ieee1284.mode;
@@ -715,7 +695,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
if ((port->physport->modes & PARPORT_MODE_TRISTATE) &&
!parport_negotiate (port, IEEE1284_MODE_BYTE)) {
/* got into BYTE mode OK */
- DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
+ pr_debug("%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
}
@@ -724,17 +704,17 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
}
/* fall through - to NIBBLE */
case IEEE1284_MODE_NIBBLE:
- DPRINTK (KERN_DEBUG "%s: Using nibble mode\n", port->name);
+ pr_debug("%s: Using nibble mode\n", port->name);
fn = port->ops->nibble_read_data;
break;
case IEEE1284_MODE_BYTE:
- DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
+ pr_debug("%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
case IEEE1284_MODE_EPP:
- DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
+ pr_debug("%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_read_addr;
} else {
@@ -742,8 +722,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
}
break;
case IEEE1284_MODE_EPPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated EPP mode\n", port->name);
if (addr) {
fn = parport_ieee1284_epp_read_addr;
} else {
@@ -752,19 +731,18 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
- DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
+ pr_debug("%s: Using ECP mode\n", port->name);
fn = port->ops->ecp_read_data;
break;
case IEEE1284_MODE_ECPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated ECP mode\n", port->name);
fn = parport_ieee1284_ecp_read_data;
break;
default:
- DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
- port->physport->ieee1284.mode);
+ pr_debug("%s: Unknown mode 0x%02x\n",
+ port->name, port->physport->ieee1284.mode);
return -ENOSYS;
}
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index 5d41dda6da4e..2c11bd3fe1fd 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -27,12 +27,6 @@
#undef DEBUG /* Don't want a garbled console */
#endif
-#ifdef DEBUG
-#define DPRINTK(stuff...) printk (stuff)
-#else
-#define DPRINTK(stuff...)
-#endif
-
/*** *
* One-way data transfer functions. *
* ***/
@@ -115,7 +109,7 @@ size_t parport_ieee1284_write_compat (struct parport *port,
if (signal_pending (current))
break;
- DPRINTK (KERN_DEBUG "%s: Timed out\n", port->name);
+ pr_debug("%s: Timed out\n", port->name);
break;
ready:
@@ -178,9 +172,8 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK, 0)) {
/* Timeout -- no more data? */
- DPRINTK (KERN_DEBUG
- "%s: Nibble timeout at event 9 (%d bytes)\n",
- port->name, i/2);
+ pr_debug("%s: Nibble timeout at event 9 (%d bytes)\n",
+ port->name, i / 2);
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
break;
}
@@ -201,8 +194,7 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* Timeout -- no more data? */
- DPRINTK (KERN_DEBUG
- "%s: Nibble timeout at event 11\n",
+ pr_debug("%s: Nibble timeout at event 11\n",
port->name);
break;
}
@@ -219,9 +211,8 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
/* Read the last nibble without checking data avail. */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
end_of_data:
- DPRINTK (KERN_DEBUG
- "%s: No more nibble data (%d bytes)\n",
- port->name, i/2);
+ pr_debug("%s: No more nibble data (%d bytes)\n",
+ port->name, i / 2);
/* Go to reverse idle phase. */
parport_frob_control (port,
@@ -272,8 +263,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
/* Timeout -- no more data? */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD,
0);
- DPRINTK (KERN_DEBUG "%s: Byte timeout at event 9\n",
- port->name);
+ pr_debug("%s: Byte timeout at event 9\n", port->name);
break;
}
@@ -288,8 +278,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* Timeout -- no more data? */
- DPRINTK (KERN_DEBUG "%s: Byte timeout at event 11\n",
- port->name);
+ pr_debug("%s: Byte timeout at event 11\n", port->name);
break;
}
@@ -307,8 +296,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
/* Read the last byte without checking data avail. */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
end_of_data:
- DPRINTK (KERN_DEBUG
- "%s: No more byte data (%zd bytes)\n",
+ pr_debug("%s: No more byte data (%zd bytes)\n",
port->name, count);
/* Go to reverse idle phase. */
@@ -353,12 +341,10 @@ int ecp_forward_to_reverse (struct parport *port)
PARPORT_STATUS_PAPEROUT, 0);
if (!retval) {
- DPRINTK (KERN_DEBUG "%s: ECP direction: reverse\n",
- port->name);
+ pr_debug("%s: ECP direction: reverse\n", port->name);
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
} else {
- DPRINTK (KERN_DEBUG "%s: ECP direction: failed to reverse\n",
- port->name);
+ pr_debug("%s: ECP direction: failed to reverse\n", port->name);
port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
}
@@ -384,12 +370,10 @@ int ecp_reverse_to_forward (struct parport *port)
if (!retval) {
parport_data_forward (port);
- DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
- port->name);
+ pr_debug("%s: ECP direction: forward\n", port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
} else {
- DPRINTK (KERN_DEBUG
- "%s: ECP direction: failed to switch forward\n",
+ pr_debug("%s: ECP direction: failed to switch forward\n",
port->name);
port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
}
@@ -450,7 +434,7 @@ size_t parport_ieee1284_ecp_write_data (struct parport *port,
}
/* Time for Host Transfer Recovery (page 41 of IEEE1284) */
- DPRINTK (KERN_DEBUG "%s: ECP transfer stalled!\n", port->name);
+ pr_debug("%s: ECP transfer stalled!\n", port->name);
parport_frob_control (port, PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
@@ -466,8 +450,7 @@ size_t parport_ieee1284_ecp_write_data (struct parport *port,
if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
break;
- DPRINTK (KERN_DEBUG "%s: Host transfer recovered\n",
- port->name);
+ pr_debug("%s: Host transfer recovered\n", port->name);
if (time_after_eq (jiffies, expire)) break;
goto try_again;
@@ -565,23 +548,20 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
command or a normal data byte, don't accept it. */
if (command) {
if (byte & 0x80) {
- DPRINTK (KERN_DEBUG "%s: stopping short at "
- "channel command (%02x)\n",
+ pr_debug("%s: stopping short at channel command (%02x)\n",
port->name, byte);
goto out;
}
else if (port->ieee1284.mode != IEEE1284_MODE_ECPRLE)
- DPRINTK (KERN_DEBUG "%s: device illegally "
- "using RLE; accepting anyway\n",
+ pr_debug("%s: device illegally using RLE; accepting anyway\n",
port->name);
rle_count = byte + 1;
/* Are we allowed to read that many bytes? */
if (rle_count > (len - count)) {
- DPRINTK (KERN_DEBUG "%s: leaving %d RLE bytes "
- "for next time\n", port->name,
- rle_count);
+ pr_debug("%s: leaving %d RLE bytes for next time\n",
+ port->name, rle_count);
break;
}
@@ -596,11 +576,10 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
PARPORT_STATUS_ACK)) {
/* It's gone wrong. Return what data we have
to the caller. */
- DPRINTK (KERN_DEBUG "ECP read timed out at 45\n");
+ pr_debug("ECP read timed out at 45\n");
if (command)
- printk (KERN_WARNING
- "%s: command ignored (%02x)\n",
+ pr_warn("%s: command ignored (%02x)\n",
port->name, byte);
break;
@@ -620,7 +599,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
memset (buf, byte, rle_count);
buf += rle_count;
count += rle_count;
- DPRINTK (KERN_DEBUG "%s: decompressed to %d bytes\n",
+ pr_debug("%s: decompressed to %d bytes\n",
port->name, rle_count);
} else {
/* Normal data byte. */
@@ -686,7 +665,7 @@ size_t parport_ieee1284_ecp_write_addr (struct parport *port,
}
/* Time for Host Transfer Recovery (page 41 of IEEE1284) */
- DPRINTK (KERN_DEBUG "%s: ECP transfer stalled!\n", port->name);
+ pr_debug("%s: ECP transfer stalled!\n", port->name);
parport_frob_control (port, PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
@@ -702,8 +681,7 @@ size_t parport_ieee1284_ecp_write_addr (struct parport *port,
if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
break;
- DPRINTK (KERN_DEBUG "%s: Host transfer recovered\n",
- port->name);
+ pr_debug("%s: Host transfer recovered\n", port->name);
if (time_after_eq (jiffies, expire)) break;
goto try_again;
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 3301861f69fa..1e88bcfe0d7b 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -28,16 +28,10 @@
#include <asm/amigaints.h>
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-#define DPRINTK(x...) do { } while (0)
-#endif
-
static void amiga_write_data(struct parport *p, unsigned char data)
{
- DPRINTK(KERN_DEBUG "write_data %c\n",data);
+ pr_debug("write_data %c\n", data);
/* Triggers also /STROBE. This behavior cannot be changed */
ciaa.prb = data;
mb();
@@ -59,13 +53,13 @@ static unsigned char control_amiga_to_pc(unsigned char control)
static void amiga_write_control(struct parport *p, unsigned char control)
{
- DPRINTK(KERN_DEBUG "write_control %02x\n",control);
+ pr_debug("write_control %02x\n", control);
/* No implementation possible */
}
static unsigned char amiga_read_control( struct parport *p)
{
- DPRINTK(KERN_DEBUG "read_control \n");
+ pr_debug("read_control\n");
return control_amiga_to_pc(0);
}
@@ -73,7 +67,7 @@ static unsigned char amiga_frob_control( struct parport *p, unsigned char mask,
{
unsigned char old;
- DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val);
+ pr_debug("frob_control mask %02x, value %02x\n", mask, val);
old = amiga_read_control(p);
amiga_write_control(p, (old & ~mask) ^ val);
return old;
@@ -99,7 +93,7 @@ static unsigned char amiga_read_status(struct parport *p)
unsigned char status;
status = status_amiga_to_pc(ciab.pra & 7);
- DPRINTK(KERN_DEBUG "read_status %02x\n", status);
+ pr_debug("read_status %02x\n", status);
return status;
}
@@ -115,14 +109,14 @@ static void amiga_disable_irq(struct parport *p)
static void amiga_data_forward(struct parport *p)
{
- DPRINTK(KERN_DEBUG "forward\n");
+ pr_debug("forward\n");
ciaa.ddrb = 0xff; /* all pins output */
mb();
}
static void amiga_data_reverse(struct parport *p)
{
- DPRINTK(KERN_DEBUG "reverse\n");
+ pr_debug("reverse\n");
ciaa.ddrb = 0; /* all pins input */
mb();
}
@@ -212,7 +206,7 @@ static int __init amiga_parallel_probe(struct platform_device *pdev)
if (err)
goto out_irq;
- printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name);
+ pr_info("%s: Amiga built-in port using irq\n", p->name);
/* XXX: set operating mode */
parport_announce_port(p);
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c
index f8dd368bfdbb..2ff0fe053e6e 100644
--- a/drivers/parport/parport_atari.c
+++ b/drivers/parport/parport_atari.c
@@ -200,7 +200,7 @@ static int __init parport_atari_init(void)
}
this_port = p;
- printk(KERN_INFO "%s: Atari built-in port using irq\n", p->name);
+ pr_info("%s: Atari built-in port using irq\n", p->name);
parport_announce_port (p);
return 0;
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index e77044c2bf62..8e7e3ac4bb87 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -142,10 +142,8 @@ static int parport_config(struct pcmcia_device *link)
link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED);
if (p == NULL) {
- printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at "
- "0x%3x, irq %u failed\n",
- (unsigned int) link->resource[0]->start,
- link->irq);
+ pr_notice("parport_cs: parport_pc_probe_port() at 0x%3x, irq %u failed\n",
+ (unsigned int)link->resource[0]->start, link->irq);
goto failed;
}
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 922535a118ba..9228e8f90309 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -238,14 +238,14 @@ struct parport *parport_gsc_probe_port(unsigned long base,
priv = kzalloc (sizeof (struct parport_gsc_private), GFP_KERNEL);
if (!priv) {
- printk (KERN_DEBUG "parport (0x%lx): no memory!\n", base);
+ printk(KERN_DEBUG "parport (0x%lx): no memory!\n", base);
return NULL;
}
ops = kmemdup(&parport_gsc_ops, sizeof(struct parport_operations),
GFP_KERNEL);
if (!ops) {
- printk (KERN_DEBUG "parport (0x%lx): no memory for ops!\n",
- base);
+ printk(KERN_DEBUG "parport (0x%lx): no memory for ops!\n",
+ base);
kfree (priv);
return NULL;
}
@@ -282,7 +282,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
p->private_data = priv;
- printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
+ pr_info("%s: PC-style at 0x%lx", p->name, p->base);
p->irq = irq;
if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE;
@@ -299,12 +299,16 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->dma = PARPORT_DMA_NONE;
pr_cont(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
{
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
- printmode(COMPAT)
+ printmode(COMPAT);
printmode(EPP);
// printmode(ECP);
// printmode(DMA);
@@ -315,8 +319,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,
0, p->name, p)) {
- printk (KERN_WARNING "%s: irq %d in use, "
- "resorting to polled operation\n",
+ pr_warn("%s: irq %d in use, resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
p->dma = PARPORT_DMA_NONE;
@@ -347,7 +350,7 @@ static int __init parport_init_chip(struct parisc_device *dev)
unsigned long port;
if (!dev->irq) {
- printk(KERN_WARNING "IRQ not found for parallel device at 0x%llx\n",
+ pr_warn("IRQ not found for parallel device at 0x%llx\n",
(unsigned long long)dev->hpa.start);
return -ENODEV;
}
@@ -360,11 +363,11 @@ static int __init parport_init_chip(struct parisc_device *dev)
if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) {
/* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */
- printk("%s: initialize bidirectional-mode.\n", __func__);
+ pr_info("%s: initialize bidirectional-mode\n", __func__);
parport_writeb ( (0x10 + 0x20), port + 4);
} else {
- printk("%s: enhanced parport-modes not supported.\n", __func__);
+ pr_info("%s: enhanced parport-modes not supported\n", __func__);
}
p = parport_gsc_probe_port(port, 0, dev->irq,
diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h
index 4c4d3c6cd77e..9301217edf12 100644
--- a/drivers/parport/parport_gsc.h
+++ b/drivers/parport/parport_gsc.h
@@ -71,7 +71,7 @@ struct parport_gsc_private {
static inline void parport_gsc_write_data(struct parport *p, unsigned char d)
{
#ifdef DEBUG_PARPORT
- printk (KERN_DEBUG "parport_gsc_write_data(%p,0x%02x)\n", p, d);
+ printk(KERN_DEBUG "%s(%p,0x%02x)\n", __func__, p, d);
#endif
parport_writeb(d, DATA(p));
}
@@ -80,8 +80,7 @@ static inline unsigned char parport_gsc_read_data(struct parport *p)
{
unsigned char val = parport_readb (DATA (p));
#ifdef DEBUG_PARPORT
- printk (KERN_DEBUG "parport_gsc_read_data(%p) = 0x%02x\n",
- p, val);
+ printk(KERN_DEBUG "%s(%p) = 0x%02x\n", __func__, p, val);
#endif
return val;
}
@@ -95,9 +94,9 @@ static inline unsigned char __parport_gsc_frob_control(struct parport *p,
struct parport_gsc_private *priv = p->physport->private_data;
unsigned char ctr = priv->ctr;
#ifdef DEBUG_PARPORT
- printk (KERN_DEBUG
- "__parport_gsc_frob_control(%02x,%02x): %02x -> %02x\n",
- mask, val, ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable);
+ printk(KERN_DEBUG "%s(%02x,%02x): %02x -> %02x\n",
+ __func__, mask, val,
+ ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable);
#endif
ctr = (ctr & ~mask) ^ val;
ctr &= priv->ctr_writable; /* only write writable bits. */
@@ -126,8 +125,8 @@ static inline void parport_gsc_write_control(struct parport *p,
/* Take this out when drivers have adapted to newer interface. */
if (d & 0x20) {
- printk (KERN_DEBUG "%s (%s): use data_reverse for this!\n",
- p->name, p->cad->name);
+ printk(KERN_DEBUG "%s (%s): use data_reverse for this!\n",
+ p->name, p->cad->name);
parport_gsc_data_reverse (p);
}
@@ -155,9 +154,9 @@ static inline unsigned char parport_gsc_frob_control(struct parport *p,
/* Take this out when drivers have adapted to newer interface. */
if (mask & 0x20) {
- printk (KERN_DEBUG "%s (%s): use data_%s for this!\n",
- p->name, p->cad->name,
- (val & 0x20) ? "reverse" : "forward");
+ printk(KERN_DEBUG "%s (%s): use data_%s for this!\n",
+ p->name, p->cad->name,
+ (val & 0x20) ? "reverse" : "forward");
if (val & 0x20)
parport_gsc_data_reverse (p);
else
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index ab215b650f41..48b084e86dc6 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -328,19 +328,19 @@ static void parport_ip32_dump_state(struct parport *p, char *str,
"TST", "CFG"};
unsigned int ecr = readb(priv->regs.ecr);
printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
- printk(" %s",
- ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
+ pr_cont(" %s",
+ ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
if (ecr & ECR_nERRINTR)
- printk(",nErrIntrEn");
+ pr_cont(",nErrIntrEn");
if (ecr & ECR_DMAEN)
- printk(",dmaEn");
+ pr_cont(",dmaEn");
if (ecr & ECR_SERVINTR)
- printk(",serviceIntr");
+ pr_cont(",serviceIntr");
if (ecr & ECR_F_FULL)
- printk(",f_full");
+ pr_cont(",f_full");
if (ecr & ECR_F_EMPTY)
- printk(",f_empty");
- printk("\n");
+ pr_cont(",f_empty");
+ pr_cont("\n");
}
if (show_ecp_config) {
unsigned int oecr, cnfgA, cnfgB;
@@ -352,52 +352,53 @@ static void parport_ip32_dump_state(struct parport *p, char *str,
writeb(ECR_MODE_PS2, priv->regs.ecr);
writeb(oecr, priv->regs.ecr);
printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
- printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
+ pr_cont(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
switch (cnfgA & CNFGA_ID_MASK) {
case CNFGA_ID_8:
- printk(",8 bits");
+ pr_cont(",8 bits");
break;
case CNFGA_ID_16:
- printk(",16 bits");
+ pr_cont(",16 bits");
break;
case CNFGA_ID_32:
- printk(",32 bits");
+ pr_cont(",32 bits");
break;
default:
- printk(",unknown ID");
+ pr_cont(",unknown ID");
break;
}
if (!(cnfgA & CNFGA_nBYTEINTRANS))
- printk(",ByteInTrans");
+ pr_cont(",ByteInTrans");
if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
- printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT,
- ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
- printk("\n");
+ pr_cont(",%d byte%s left",
+ cnfgA & CNFGA_PWORDLEFT,
+ ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
+ pr_cont("\n");
printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
- printk(" irq=%u,dma=%u",
- (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
- (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
- printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
+ pr_cont(" irq=%u,dma=%u",
+ (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
+ (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
+ pr_cont(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
if (cnfgB & CNFGB_COMPRESS)
- printk(",compress");
- printk("\n");
+ pr_cont(",compress");
+ pr_cont("\n");
}
for (i = 0; i < 2; i++) {
unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
i ? "soft" : "hard", dcr);
- printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
+ pr_cont(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
if (dcr & DCR_IRQ)
- printk(",ackIntEn");
+ pr_cont(",ackIntEn");
if (!(dcr & DCR_SELECT))
- printk(",nSelectIn");
+ pr_cont(",nSelectIn");
if (dcr & DCR_nINIT)
- printk(",nInit");
+ pr_cont(",nInit");
if (!(dcr & DCR_AUTOFD))
- printk(",nAutoFD");
+ pr_cont(",nAutoFD");
if (!(dcr & DCR_STROBE))
- printk(",nStrobe");
- printk("\n");
+ pr_cont(",nStrobe");
+ pr_cont("\n");
}
#define sep (f++ ? ',' : ' ')
{
@@ -405,20 +406,20 @@ static void parport_ip32_dump_state(struct parport *p, char *str,
unsigned int dsr = readb(priv->regs.dsr);
printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
if (!(dsr & DSR_nBUSY))
- printk("%cBusy", sep);
+ pr_cont("%cBusy", sep);
if (dsr & DSR_nACK)
- printk("%cnAck", sep);
+ pr_cont("%cnAck", sep);
if (dsr & DSR_PERROR)
- printk("%cPError", sep);
+ pr_cont("%cPError", sep);
if (dsr & DSR_SELECT)
- printk("%cSelect", sep);
+ pr_cont("%cSelect", sep);
if (dsr & DSR_nFAULT)
- printk("%cnFault", sep);
+ pr_cont("%cnFault", sep);
if (!(dsr & DSR_nPRINT))
- printk("%c(Print)", sep);
+ pr_cont("%c(Print)", sep);
if (dsr & DSR_TIMEOUT)
- printk("%cTimeout", sep);
- printk("\n");
+ pr_cont("%cTimeout", sep);
+ pr_cont("\n");
}
#undef sep
}
@@ -1337,9 +1338,8 @@ static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
ecr = parport_ip32_read_econtrol(p);
if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
&& !lost_interrupt) {
- printk(KERN_WARNING PPIP32
- "%s: lost interrupt in %s\n",
- p->name, __func__);
+ pr_warn(PPIP32 "%s: lost interrupt in %s\n",
+ p->name, __func__);
lost_interrupt = 1;
}
}
@@ -1643,8 +1643,8 @@ static size_t parport_ip32_compat_write_data(struct parport *p,
DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */
if (ready_before)
- printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
- p->name, __func__);
+ pr_info(PPIP32 "%s: not ready in %s\n",
+ p->name, __func__);
ready_before = 0;
goto stop;
}
@@ -1704,7 +1704,7 @@ static size_t parport_ip32_ecp_write_data(struct parport *p,
/* Event 49: PError goes high. */
if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
- printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s",
+ printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s\n",
p->name, __func__);
physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
return 0;
@@ -1724,8 +1724,8 @@ static size_t parport_ip32_ecp_write_data(struct parport *p,
DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */
if (ready_before)
- printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
- p->name, __func__);
+ pr_info(PPIP32 "%s: not ready in %s\n",
+ p->name, __func__);
ready_before = 0;
goto stop;
}
@@ -2064,8 +2064,7 @@ static __init struct parport *parport_ip32_probe_port(void)
p->modes |= PARPORT_MODE_TRISTATE;
if (!parport_ip32_fifo_supported(p)) {
- printk(KERN_WARNING PPIP32
- "%s: error: FIFO disabled\n", p->name);
+ pr_warn(PPIP32 "%s: error: FIFO disabled\n", p->name);
/* Disable hardware modes depending on a working FIFO. */
features &= ~PARPORT_IP32_ENABLE_SPP;
features &= ~PARPORT_IP32_ENABLE_ECP;
@@ -2077,8 +2076,7 @@ static __init struct parport *parport_ip32_probe_port(void)
if (features & PARPORT_IP32_ENABLE_IRQ) {
int irq = MACEISA_PARALLEL_IRQ;
if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
- printk(KERN_WARNING PPIP32
- "%s: error: IRQ disabled\n", p->name);
+ pr_warn(PPIP32 "%s: error: IRQ disabled\n", p->name);
/* DMA cannot work without interrupts. */
features &= ~PARPORT_IP32_ENABLE_DMA;
} else {
@@ -2091,8 +2089,7 @@ static __init struct parport *parport_ip32_probe_port(void)
/* Allocate DMA resources */
if (features & PARPORT_IP32_ENABLE_DMA) {
if (parport_ip32_dma_register())
- printk(KERN_WARNING PPIP32
- "%s: error: DMA disabled\n", p->name);
+ pr_warn(PPIP32 "%s: error: DMA disabled\n", p->name);
else {
pr_probe(p, "DMA support enabled\n");
p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
@@ -2134,13 +2131,15 @@ static __init struct parport *parport_ip32_probe_port(void)
parport_ip32_dump_state(p, "end init", 0);
/* Print out what we found */
- printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)",
- p->name, p->base, p->base_hi);
+ pr_info("%s: SGI IP32 at 0x%lx (0x%lx)", p->name, p->base, p->base_hi);
if (p->irq != PARPORT_IRQ_NONE)
- printk(", irq %d", p->irq);
- printk(" [");
-#define printmode(x) if (p->modes & PARPORT_MODE_##x) \
- printk("%s%s", f++ ? "," : "", #x)
+ pr_cont(", irq %d", p->irq);
+ pr_cont(" [");
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
{
unsigned int f = 0;
printmode(PCSPP);
@@ -2151,7 +2150,7 @@ static __init struct parport *parport_ip32_probe_port(void)
printmode(DMA);
}
#undef printmode
- printk("]\n");
+ pr_cont("]\n");
parport_announce_port(p);
return p;
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 9f87faf939e3..d6bbe8446301 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -70,11 +70,6 @@
#define MAX_MFC 5
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-static inline int DPRINTK(void *nothing, ...) {return 0;}
-#endif
static struct parport *this_port[MAX_MFC] = {NULL, };
static volatile int dummy; /* for trigger readds */
@@ -84,7 +79,7 @@ static struct parport_operations pp_mfc3_ops;
static void mfc3_write_data(struct parport *p, unsigned char data)
{
-DPRINTK(KERN_DEBUG "write_data %c\n",data);
+ pr_debug("write_data %c\n", data);
dummy = pia(p)->pprb; /* clears irq bit */
/* Triggers also /STROBE.*/
@@ -128,13 +123,13 @@ static unsigned char control_mfc3_to_pc(unsigned char control)
static void mfc3_write_control(struct parport *p, unsigned char control)
{
-DPRINTK(KERN_DEBUG "write_control %02x\n",control);
+ pr_debug("write_control %02x\n", control);
pia(p)->ppra = (pia(p)->ppra & 0x1f) | control_pc_to_mfc3(control);
}
static unsigned char mfc3_read_control( struct parport *p)
{
-DPRINTK(KERN_DEBUG "read_control \n");
+ pr_debug("read_control\n");
return control_mfc3_to_pc(pia(p)->ppra & 0xe0);
}
@@ -142,7 +137,7 @@ static unsigned char mfc3_frob_control( struct parport *p, unsigned char mask, u
{
unsigned char old;
-DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val);
+ pr_debug("frob_control mask %02x, value %02x\n", mask, val);
old = mfc3_read_control(p);
mfc3_write_control(p, (old & ~mask) ^ val);
return old;
@@ -171,7 +166,7 @@ static unsigned char mfc3_read_status(struct parport *p)
unsigned char status;
status = status_mfc3_to_pc(pia(p)->ppra & 0x1f);
-DPRINTK(KERN_DEBUG "read_status %02x\n", status);
+ pr_debug("read_status %02x\n", status);
return status;
}
@@ -202,7 +197,7 @@ static void mfc3_disable_irq(struct parport *p)
static void mfc3_data_forward(struct parport *p)
{
- DPRINTK(KERN_DEBUG "forward\n");
+ pr_debug("forward\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 255; /* all pins output */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
@@ -210,7 +205,7 @@ static void mfc3_data_forward(struct parport *p)
static void mfc3_data_reverse(struct parport *p)
{
- DPRINTK(KERN_DEBUG "reverse\n");
+ pr_debug("reverse\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 0; /* all pins input */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
@@ -325,7 +320,7 @@ static int __init parport_mfc3_init(void)
p->dev = &z->dev;
this_port[pias++] = p;
- printk(KERN_INFO "%s: Multiface III port using irq\n", p->name);
+ pr_info("%s: Multiface III port using irq\n", p->name);
/* XXX: set operating mode */
p->private_data = (void *)piabase;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1f17a39eabe8..77e37e3cb3a0 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -87,13 +87,6 @@
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-#define DPRINTK(stuff...)
-#endif
-
-
#define NR_SUPERIOS 3
static struct superio_struct { /* For Super-IO chips autodetection */
int io;
@@ -118,8 +111,8 @@ static void frob_econtrol(struct parport *pb, unsigned char m,
if (m != 0xff)
ectr = inb(ECONTROL(pb));
- DPRINTK(KERN_DEBUG "frob_econtrol(%02x,%02x): %02x -> %02x\n",
- m, v, ectr, (ectr & ~m) ^ v);
+ pr_debug("frob_econtrol(%02x,%02x): %02x -> %02x\n",
+ m, v, ectr, (ectr & ~m) ^ v);
outb((ectr & ~m) ^ v, ECONTROL(pb));
}
@@ -142,7 +135,7 @@ static int change_mode(struct parport *p, int m)
unsigned char oecr;
int mode;
- DPRINTK(KERN_INFO "parport change_mode ECP-ISA to mode 0x%02x\n", m);
+ pr_debug("parport change_mode ECP-ISA to mode 0x%02x\n", m);
if (!priv->ecr) {
printk(KERN_DEBUG "change_mode: but there's no ECR!\n");
@@ -298,8 +291,8 @@ static size_t parport_pc_epp_read_data(struct parport *port, void *buf,
status = inb(STATUS(port));
if (status & 0x01) {
/* EPP timeout should never occur... */
- printk(KERN_DEBUG
-"%s: EPP timeout occurred while talking to w91284pic (should not have done)\n", port->name);
+ printk(KERN_DEBUG "%s: EPP timeout occurred while talking to w91284pic (should not have done)\n",
+ port->name);
clear_epp_timeout(port);
}
}
@@ -727,7 +720,7 @@ static size_t parport_pc_compat_write_block_pio(struct parport *port,
r = change_mode(port, ECR_PPF); /* Parallel port FIFO */
if (r)
printk(KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n",
- port->name);
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
@@ -770,9 +763,8 @@ static size_t parport_pc_compat_write_block_pio(struct parport *port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
- printk(KERN_DEBUG
- "%s: BUSY timeout (%d) in compat_write_block_pio\n",
- port->name, r);
+ printk(KERN_DEBUG "%s: BUSY timeout (%d) in compat_write_block_pio\n",
+ port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
@@ -810,8 +802,8 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- printk(KERN_DEBUG "%s: PError timeout (%d) "
- "in ecp_write_block_pio\n", port->name, r);
+ printk(KERN_DEBUG "%s: PError timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
}
}
@@ -824,7 +816,7 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
r = change_mode(port, ECR_ECP); /* ECP FIFO */
if (r)
printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
- port->name);
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* Write the data to the FIFO. */
@@ -867,8 +859,8 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
parport_frob_control(port, PARPORT_CONTROL_INIT, 0);
r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
if (r)
- printk(KERN_DEBUG "%s: PE,1 timeout (%d) "
- "in ecp_write_block_pio\n", port->name, r);
+ printk(KERN_DEBUG "%s: PE,1 timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
parport_frob_control(port,
PARPORT_CONTROL_INIT,
@@ -877,17 +869,16 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
- printk(KERN_DEBUG "%s: PE,2 timeout (%d) "
- "in ecp_write_block_pio\n", port->name, r);
+ printk(KERN_DEBUG "%s: PE,2 timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
}
r = parport_wait_peripheral(port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
- printk(KERN_DEBUG
- "%s: BUSY timeout (%d) in ecp_write_block_pio\n",
- port->name, r);
+ printk(KERN_DEBUG "%s: BUSY timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
@@ -982,28 +973,24 @@ static void show_parconfig_smsc37c669(int io, int key)
outb(0xaa, io);
if (verbose_probing) {
- printk(KERN_INFO
- "SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, "
- "A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
+ pr_info("SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
cr1, cr4, cra, cr23, cr26, cr27);
/* The documentation calls DMA and IRQ-Lines by letters, so
the board maker can/will wire them
appropriately/randomly... G=reserved H=IDE-irq, */
- printk(KERN_INFO
- "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
- cr23 * 4,
- (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
- (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
- cra & 0x0f);
- printk(KERN_INFO "SMSC LPT Config: enabled=%s power=%s\n",
- (cr23 * 4 >= 0x100) ? "yes" : "no",
- (cr1 & 4) ? "yes" : "no");
- printk(KERN_INFO
- "SMSC LPT Config: Port mode=%s, EPP version =%s\n",
- (cr1 & 0x08) ? "Standard mode only (SPP)"
- : modes[cr4 & 0x03],
- (cr4 & 0x40) ? "1.7" : "1.9");
+ pr_info("SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
+ cr23 * 4,
+ (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
+ (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
+ cra & 0x0f);
+ pr_info("SMSC LPT Config: enabled=%s power=%s\n",
+ (cr23 * 4 >= 0x100) ? "yes" : "no",
+ (cr1 & 4) ? "yes" : "no");
+ pr_info("SMSC LPT Config: Port mode=%s, EPP version =%s\n",
+ (cr1 & 0x08) ? "Standard mode only (SPP)"
+ : modes[cr4 & 0x03],
+ (cr4 & 0x40) ? "1.7" : "1.9");
}
/* Heuristics ! BIOS setup for this mainboard device limits
@@ -1013,7 +1000,7 @@ static void show_parconfig_smsc37c669(int io, int key)
if (cr23 * 4 >= 0x100) { /* if active */
s = find_free_superio();
if (s == NULL)
- printk(KERN_INFO "Super-IO: too many chips!\n");
+ pr_info("Super-IO: too many chips!\n");
else {
int d;
switch (cr23 * 4) {
@@ -1078,26 +1065,24 @@ static void show_parconfig_winbond(int io, int key)
outb(0xaa, io);
if (verbose_probing) {
- printk(KERN_INFO
- "Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
- cr30, cr60, cr61, cr70, cr74, crf0);
- printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
- (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
+ pr_info("Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
+ cr30, cr60, cr61, cr70, cr74, crf0);
+ pr_info("Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
+ (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
if ((cr74 & 0x07) > 3)
pr_cont("dma=none\n");
else
pr_cont("dma=%d\n", cr74 & 0x07);
- printk(KERN_INFO
- "Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
- irqtypes[crf0>>7], (crf0>>3)&0x0f);
- printk(KERN_INFO "Winbond LPT Config: Port mode=%s\n",
- modes[crf0 & 0x07]);
+ pr_info("Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
+ irqtypes[crf0 >> 7], (crf0 >> 3) & 0x0f);
+ pr_info("Winbond LPT Config: Port mode=%s\n",
+ modes[crf0 & 0x07]);
}
if (cr30 & 0x01) { /* the settings can be interrogated later ... */
s = find_free_superio();
if (s == NULL)
- printk(KERN_INFO "Super-IO: too many chips!\n");
+ pr_info("Super-IO: too many chips!\n");
else {
s->io = (cr60 << 8) | cr61;
s->irq = cr70 & 0x0f;
@@ -1151,9 +1136,8 @@ static void decode_winbond(int efer, int key, int devid, int devrev, int oldid)
progif = 0;
if (verbose_probing)
- printk(KERN_INFO "Winbond chip at EFER=0x%x key=0x%02x "
- "devid=%02x devrev=%02x oldid=%02x type=%s\n",
- efer, key, devid, devrev, oldid, type);
+ pr_info("Winbond chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x oldid=%02x type=%s\n",
+ efer, key, devid, devrev, oldid, type);
if (progif == 2)
show_parconfig_winbond(efer, key);
@@ -1184,9 +1168,8 @@ static void decode_smsc(int efer, int key, int devid, int devrev)
type = "37c666GT";
if (verbose_probing)
- printk(KERN_INFO "SMSC chip at EFER=0x%x "
- "key=0x%02x devid=%02x devrev=%02x type=%s\n",
- efer, key, devid, devrev, type);
+ pr_info("SMSC chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x type=%s\n",
+ efer, key, devid, devrev, type);
if (func)
func(efer, key);
@@ -1358,7 +1341,7 @@ static void detect_and_report_it87(void)
dev |= inb(0x2f);
if (dev == 0x8712 || dev == 0x8705 || dev == 0x8715 ||
dev == 0x8716 || dev == 0x8718 || dev == 0x8726) {
- printk(KERN_INFO "IT%04X SuperIO detected.\n", dev);
+ pr_info("IT%04X SuperIO detected\n", dev);
outb(0x07, 0x2E); /* Parallel Port */
outb(0x03, 0x2F);
outb(0xF0, 0x2E); /* BOOT 0x80 off */
@@ -1445,8 +1428,8 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified)
/* That didn't work, but the user thinks there's a
* port here. */
- printk(KERN_INFO "parport 0x%lx (WARNING): CTR: "
- "wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
+ pr_info("parport 0x%lx (WARNING): CTR: wrote 0x%02x, read 0x%02x\n",
+ pb->base, w, r);
/* Try the data register. The data lines aren't tri-stated at
* this stage, so we expect back what we wrote. */
@@ -1464,10 +1447,9 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified) {
/* Didn't work, but the user is convinced this is the
* place. */
- printk(KERN_INFO "parport 0x%lx (WARNING): DATA: "
- "wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
- printk(KERN_INFO "parport 0x%lx: You gave this address, "
- "but there is probably no parallel port there!\n",
+ pr_info("parport 0x%lx (WARNING): DATA: wrote 0x%02x, read 0x%02x\n",
+ pb->base, w, r);
+ pr_info("parport 0x%lx: You gave this address, but there is probably no parallel port there!\n",
pb->base);
}
@@ -1620,7 +1602,7 @@ static int parport_ECP_supported(struct parport *pb)
if (i <= priv->fifo_depth) {
if (verbose_probing)
printk(KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
- pb->base, i);
+ pb->base, i);
} else
/* Number of bytes we know we can write if we get an
interrupt. */
@@ -1642,7 +1624,7 @@ static int parport_ECP_supported(struct parport *pb)
if (i <= priv->fifo_depth) {
if (verbose_probing)
- printk(KERN_INFO "0x%lx: readIntrThreshold is %d\n",
+ pr_info("0x%lx: readIntrThreshold is %d\n",
pb->base, i);
} else
/* Number of bytes we can read if we get an interrupt. */
@@ -1657,17 +1639,14 @@ static int parport_ECP_supported(struct parport *pb)
switch (pword) {
case 0:
pword = 2;
- printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
- pb->base);
+ pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
break;
case 2:
pword = 4;
- printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
- pb->base);
+ pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
break;
default:
- printk(KERN_WARNING "0x%lx: Unknown implementation ID\n",
- pb->base);
+ pr_warn("0x%lx: Unknown implementation ID\n", pb->base);
/* Fall through - Assume 1 */
case 1:
pword = 1;
@@ -1676,14 +1655,14 @@ static int parport_ECP_supported(struct parport *pb)
if (verbose_probing) {
printk(KERN_DEBUG "0x%lx: PWord is %d bits\n",
- pb->base, 8 * pword);
+ pb->base, 8 * pword);
- printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n", pb->base,
- config & 0x80 ? "Level" : "Pulses");
+ printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n",
+ pb->base, config & 0x80 ? "Level" : "Pulses");
configb = inb(CONFIGB(pb));
printk(KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
- pb->base, config, configb);
+ pb->base, config, configb);
printk(KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
if ((configb >> 3) & 0x07)
pr_cont("%d", intrline[(configb >> 3) & 0x07]);
@@ -2107,9 +2086,9 @@ struct parport *parport_pc_probe_port(unsigned long int base,
p->size = (p->modes & PARPORT_MODE_EPP) ? 8 : 3;
- printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
+ pr_info("%s: PC-style at 0x%lx", p->name, p->base);
if (p->base_hi && priv->ecr)
- printk(KERN_CONT " (0x%lx)", p->base_hi);
+ pr_cont(" (0x%lx)", p->base_hi);
if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE;
parport_irq_probe(p);
@@ -2120,7 +2099,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
- printk(KERN_CONT ", irq %d", p->irq);
+ pr_cont(", irq %d", p->irq);
priv->ctr_writable |= 0x10;
if (p->dma == PARPORT_DMA_AUTO) {
@@ -2144,41 +2123,39 @@ struct parport *parport_pc_probe_port(unsigned long int base,
/* p->ops->ecp_read_data = parport_pc_ecp_read_block_pio; */
#endif /* IEEE 1284 support */
if (p->dma != PARPORT_DMA_NONE) {
- printk(KERN_CONT ", dma %d", p->dma);
+ pr_cont(", dma %d", p->dma);
p->modes |= PARPORT_MODE_DMA;
} else
- printk(KERN_CONT ", using FIFO");
+ pr_cont(", using FIFO");
} else
/* We can't use the DMA channel after all. */
p->dma = PARPORT_DMA_NONE;
#endif /* Allowed to use FIFO/DMA */
- printk(KERN_CONT " [");
+ pr_cont(" [");
-#define printmode(x) \
- {\
- if (p->modes & PARPORT_MODE_##x) {\
- printk(KERN_CONT "%s%s", f ? "," : "", #x);\
- f++;\
- } \
- }
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
{
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
- printmode(COMPAT)
+ printmode(COMPAT);
printmode(EPP);
printmode(ECP);
printmode(DMA);
}
#undef printmode
#ifndef CONFIG_PARPORT_1284
- printk(KERN_CONT "(,...)");
+ pr_cont("(,...)");
#endif /* CONFIG_PARPORT_1284 */
- printk(KERN_CONT "]\n");
+ pr_cont("]\n");
if (probedirq != PARPORT_IRQ_NONE)
- printk(KERN_INFO "%s: irq %d detected\n", p->name, probedirq);
+ pr_info("%s: irq %d detected\n", p->name, probedirq);
/* If No ECP release the ports grabbed above. */
if (ECR_res && (p->modes & PARPORT_MODE_ECP) == 0) {
@@ -2193,8 +2170,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq(p->irq, parport_irq_handler,
irqflags, p->name, p)) {
- printk(KERN_WARNING "%s: irq %d in use, "
- "resorting to polled operation\n",
+ pr_warn("%s: irq %d in use, resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
p->dma = PARPORT_DMA_NONE;
@@ -2204,8 +2180,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
#ifdef HAS_DMA
if (p->dma != PARPORT_DMA_NONE) {
if (request_dma(p->dma, p->name)) {
- printk(KERN_WARNING "%s: dma %d in use, "
- "resorting to PIO operation\n",
+ pr_warn("%s: dma %d in use, resorting to PIO operation\n",
p->name, p->dma);
p->dma = PARPORT_DMA_NONE;
} else {
@@ -2215,9 +2190,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
&priv->dma_handle,
GFP_KERNEL);
if (!priv->dma_buf) {
- printk(KERN_WARNING "%s: "
- "cannot get buffer for DMA, "
- "resorting to PIO operation\n",
+ pr_warn("%s: cannot get buffer for DMA, resorting to PIO operation\n",
p->name);
free_dma(p->dma);
p->dma = PARPORT_DMA_NONE;
@@ -2313,7 +2286,7 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
int irq;
int i;
- DPRINTK(KERN_DEBUG "sio_ite_8872_probe()\n");
+ pr_debug("sio_ite_8872_probe()\n");
/* make sure which one chip */
for (i = 0; i < 5; i++) {
@@ -2330,7 +2303,7 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
}
}
if (i >= 5) {
- printk(KERN_INFO "parport_pc: cannot find ITE8872 INTA\n");
+ pr_info("parport_pc: cannot find ITE8872 INTA\n");
return 0;
}
@@ -2339,29 +2312,28 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
switch (type) {
case 0x2:
- printk(KERN_INFO "parport_pc: ITE8871 found (1P)\n");
+ pr_info("parport_pc: ITE8871 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xa:
- printk(KERN_INFO "parport_pc: ITE8875 found (1P)\n");
+ pr_info("parport_pc: ITE8875 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xe:
- printk(KERN_INFO "parport_pc: ITE8872 found (2S1P)\n");
+ pr_info("parport_pc: ITE8872 found (2S1P)\n");
ite8872set = 0x64e00000;
break;
case 0x6:
- printk(KERN_INFO "parport_pc: ITE8873 found (1S)\n");
+ pr_info("parport_pc: ITE8873 found (1S)\n");
release_region(inta_addr[i], 32);
return 0;
case 0x8:
- printk(KERN_INFO "parport_pc: ITE8874 found (2S)\n");
+ pr_info("parport_pc: ITE8874 found (2S)\n");
release_region(inta_addr[i], 32);
return 0;
default:
- printk(KERN_INFO "parport_pc: unknown ITE887x\n");
- printk(KERN_INFO "parport_pc: please mail 'lspci -nvv' "
- "output to Rich.Liu@ite.com.tw\n");
+ pr_info("parport_pc: unknown ITE887x\n");
+ pr_info("parport_pc: please mail 'lspci -nvv' output to Rich.Liu@ite.com.tw\n");
release_region(inta_addr[i], 32);
return 0;
}
@@ -2379,11 +2351,9 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
pci_write_config_dword(pdev, 0x9c,
ite8872set | (ite8872_irq * 0x11111));
- DPRINTK(KERN_DEBUG "ITE887x: The IRQ is %d.\n", ite8872_irq);
- DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O port is 0x%x.\n",
- ite8872_lpt);
- DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O porthi is 0x%x.\n",
- ite8872_lpthi);
+ pr_debug("ITE887x: The IRQ is %d\n", ite8872_irq);
+ pr_debug("ITE887x: The PARALLEL I/O port is 0x%x\n", ite8872_lpt);
+ pr_debug("ITE887x: The PARALLEL I/O porthi is 0x%x\n", ite8872_lpthi);
/* Let the user (or defaults) steer us away from interrupts */
irq = ite8872_irq;
@@ -2396,9 +2366,8 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
release_region(inta_addr[i], 32);
if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
- printk(KERN_INFO
- "parport_pc: ITE 8872 parallel port: io=0x%X",
- ite8872_lpt);
+ pr_info("parport_pc: ITE 8872 parallel port: io=0x%X",
+ ite8872_lpt);
if (irq != PARPORT_IRQ_NONE)
pr_cont(", irq=%d", irq);
pr_cont("\n");
@@ -2471,8 +2440,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
have_epp = 1;
break;
default:
- printk(KERN_DEBUG
- "parport_pc: probing current configuration\n");
+ printk(KERN_DEBUG "parport_pc: probing current configuration\n");
siofunc = VIA_FUNCTION_PROBE;
break;
}
@@ -2508,12 +2476,11 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
port1 = inb(VIA_CONFIG_DATA) << 2;
printk(KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",
- port1);
+ port1);
if (port1 == 0x3BC && have_epp) {
outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
outb((0x378 >> 2), VIA_CONFIG_DATA);
- printk(KERN_DEBUG
- "parport_pc: Parallel port base changed to 0x378\n");
+ printk(KERN_DEBUG "parport_pc: Parallel port base changed to 0x378\n");
port1 = 0x378;
}
@@ -2525,7 +2492,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
pci_write_config_byte(pdev, via->via_pci_superio_config_reg, tmp);
if (siofunc == VIA_FUNCTION_PARPORT_DISABLE) {
- printk(KERN_INFO "parport_pc: VIA parallel port disabled in BIOS\n");
+ pr_info("parport_pc: VIA parallel port disabled in BIOS\n");
return 0;
}
@@ -2558,9 +2525,8 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
case 0x278:
port2 = 0x678; break;
default:
- printk(KERN_INFO
- "parport_pc: Weird VIA parport base 0x%X, ignoring\n",
- port1);
+ pr_info("parport_pc: Weird VIA parport base 0x%X, ignoring\n",
+ port1);
return 0;
}
@@ -2579,8 +2545,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
/* finally, do the probe with values obtained */
if (parport_pc_probe_port(port1, port2, irq, dma, &pdev->dev, 0)) {
- printk(KERN_INFO
- "parport_pc: VIA parallel port: io=0x%X", port1);
+ pr_info("parport_pc: VIA parallel port: io=0x%X", port1);
if (irq != PARPORT_IRQ_NONE)
pr_cont(", irq=%d", irq);
if (dma != PARPORT_DMA_NONE)
@@ -2589,7 +2554,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
return 1;
}
- printk(KERN_WARNING "parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
+ pr_warn("parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
port1, irq, dma);
return 0;
}
@@ -2854,14 +2819,12 @@ static int parport_pc_pci_probe(struct pci_dev *dev,
/* TODO: test if sharing interrupts works */
irq = dev->irq;
if (irq == IRQ_NONE) {
- printk(KERN_DEBUG
- "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
- id->vendor, id->device, io_lo, io_hi);
+ printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
+ id->vendor, id->device, io_lo, io_hi);
irq = PARPORT_IRQ_NONE;
} else {
- printk(KERN_DEBUG
- "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
- id->vendor, id->device, io_lo, io_hi, irq);
+ printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
+ id->vendor, id->device, io_lo, io_hi, irq);
}
data->ports[count] =
parport_pc_probe_port(io_lo, io_hi, irq,
@@ -3111,7 +3074,7 @@ static int __init parport_parse_param(const char *s, int *val,
if (ep != s)
*val = r;
else {
- printk(KERN_ERR "parport: bad specifier `%s'\n", s);
+ pr_err("parport: bad specifier `%s'\n", s);
return -1;
}
}
@@ -3133,8 +3096,8 @@ static int __init parport_parse_dma(const char *dmastr, int *val)
#ifdef CONFIG_PCI
static int __init parport_init_mode_setup(char *str)
{
- printk(KERN_DEBUG
- "parport_pc.c: Specified parameter parport_init_mode=%s\n", str);
+ printk(KERN_DEBUG "parport_pc.c: Specified parameter parport_init_mode=%s\n",
+ str);
if (!strcmp(str, "spp"))
parport_init_mode = 1;
@@ -3201,10 +3164,7 @@ static int __init parse_parport_params(void)
irqval[0] = val;
break;
default:
- printk(KERN_WARNING
- "parport_pc: irq specified "
- "without base address. Use 'io=' "
- "to specify one\n");
+ pr_warn("parport_pc: irq specified without base address. Use 'io=' to specify one\n");
}
if (dma[0] && !parport_parse_dma(dma[0], &val))
@@ -3214,10 +3174,7 @@ static int __init parse_parport_params(void)
dmaval[0] = val;
break;
default:
- printk(KERN_WARNING
- "parport_pc: dma specified "
- "without base address. Use 'io=' "
- "to specify one\n");
+ pr_warn("parport_pc: dma specified without base address. Use 'io=' to specify one\n");
}
}
return 0;
@@ -3256,12 +3213,12 @@ static int __init parport_setup(char *str)
val = simple_strtoul(str, &endptr, 0);
if (endptr == str) {
- printk(KERN_WARNING "parport=%s not understood\n", str);
+ pr_warn("parport=%s not understood\n", str);
return 1;
}
if (parport_setup_ptr == PARPORT_PC_MAX_PORTS) {
- printk(KERN_ERR "parport=%s ignored, too many ports\n", str);
+ pr_err("parport=%s ignored, too many ports\n", str);
return 1;
}
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index d5a669b60c27..e840c1b5ab90 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -314,7 +314,7 @@ static int bpp_probe(struct platform_device *op)
value_tcr &= ~P_TCR_DIR;
sbus_writeb(value_tcr, &regs->p_tcr);
- printk(KERN_INFO "%s: sunbpp at 0x%lx\n", p->name, p->base);
+ pr_info("%s: sunbpp at 0x%lx\n", p->name, p->base);
dev_set_drvdata(&op->dev, p);
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e5e6a463a941..7e6d713fa5ac 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -38,16 +38,16 @@ static void pretty_print(struct parport *port, int device)
{
struct parport_device_info *info = &port->probe_info[device + 1];
- printk(KERN_INFO "%s", port->name);
+ pr_info("%s", port->name);
if (device >= 0)
- printk (" (addr %d)", device);
+ pr_cont(" (addr %d)", device);
- printk (": %s", classes[info->class].descr);
+ pr_cont(": %s", classes[info->class].descr);
if (info->class)
- printk(", %s %s", info->mfr, info->model);
+ pr_cont(", %s %s", info->mfr, info->model);
- printk("\n");
+ pr_cont("\n");
}
static void parse_data(struct parport *port, int device, char *str)
@@ -58,7 +58,7 @@ static void parse_data(struct parport *port, int device, char *str)
struct parport_device_info *info = &port->probe_info[device + 1];
if (!txt) {
- printk(KERN_WARNING "%s probe: memory squeeze\n", port->name);
+ pr_warn("%s probe: memory squeeze\n", port->name);
return;
}
strcpy(txt, str);
@@ -98,7 +98,8 @@ static void parse_data(struct parport *port, int device, char *str)
goto rock_on;
}
}
- printk(KERN_WARNING "%s probe: warning, class '%s' not understood.\n", port->name, sep);
+ pr_warn("%s probe: warning, class '%s' not understood\n",
+ port->name, sep);
info->class = PARPORT_CLASS_OTHER;
} else if (!strcmp(p, "CMD") ||
!strcmp(p, "COMMAND SET")) {
@@ -177,9 +178,8 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
* just return constant nibble forever. This catches
* also those cases. */
if (idlens[0] == 0 || idlens[0] > 0xFFF) {
- printk (KERN_DEBUG "%s: reported broken Device ID"
- " length of %#zX bytes\n",
- port->name, idlens[0]);
+ printk(KERN_DEBUG "%s: reported broken Device ID length of %#zX bytes\n",
+ port->name, idlens[0]);
return -EIO;
}
numidlens = 2;
@@ -201,10 +201,8 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
if (belen != len) {
- printk (KERN_DEBUG "%s: Device ID was %zd bytes"
- " while device told it would be %d"
- " bytes\n",
- port->name, len, belen);
+ printk(KERN_DEBUG "%s: Device ID was %zd bytes while device told it would be %d bytes\n",
+ port->name, len, belen);
}
goto done;
}
@@ -214,11 +212,9 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
* the first 256 bytes or so that we must have read so
* far. */
if (buffer[len-1] == ';') {
- printk (KERN_DEBUG "%s: Device ID reading stopped"
- " before device told data not available. "
- "Current idlen %u of %u, len bytes %02X %02X\n",
- port->name, current_idlen, numidlens,
- length[0], length[1]);
+ printk(KERN_DEBUG "%s: Device ID reading stopped before device told data not available. Current idlen %u of %u, len bytes %02X %02X\n",
+ port->name, current_idlen, numidlens,
+ length[0], length[1]);
goto done;
}
}
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index 48804049d697..d740eba3c099 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -34,7 +34,7 @@
#define PARPORT_MAX_SPINTIME_VALUE 1000
static int do_active_device(struct ctl_table *table, int write,
- void __user *result, size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[256];
@@ -65,13 +65,13 @@ static int do_active_device(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
#ifdef CONFIG_PARPORT_1284
static int do_autoprobe(struct ctl_table *table, int write,
- void __user *result, size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport_device_info *info = table->extra2;
const char *str;
@@ -108,13 +108,13 @@ static int do_autoprobe(struct ctl_table *table, int write,
*ppos += len;
- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
#endif /* IEEE1284.3 support. */
static int do_hardware_base_addr(struct ctl_table *table, int write,
- void __user *result,
- size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
@@ -136,13 +136,12 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
static int do_hardware_irq(struct ctl_table *table, int write,
- void __user *result,
- size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
@@ -164,13 +163,12 @@ static int do_hardware_irq(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
static int do_hardware_dma(struct ctl_table *table, int write,
- void __user *result,
- size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
@@ -192,13 +190,12 @@ static int do_hardware_dma(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
static int do_hardware_modes(struct ctl_table *table, int write,
- void __user *result,
- size_t *lenp, loff_t *ppos)
+ void *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[40];
@@ -213,7 +210,11 @@ static int do_hardware_modes(struct ctl_table *table, int write,
return -EACCES;
{
-#define printmode(x) {if(port->modes&PARPORT_MODE_##x){len+=sprintf(buffer+len,"%s%s",f?",":"",#x);f++;}}
+#define printmode(x) \
+do { \
+ if (port->modes & PARPORT_MODE_##x) \
+ len += sprintf(buffer + len, "%s%s", f++ ? "," : "", #x); \
+} while (0)
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
@@ -231,8 +232,8 @@ static int do_hardware_modes(struct ctl_table *table, int write,
*lenp = len;
*ppos += len;
-
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+ memcpy(result, buffer, len);
+ return 0;
}
#define PARPORT_PORT_DIR(CHILD) { .procname = NULL, .mode = 0555, .child = CHILD }
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index d6920ebeabcd..7fec4fefe151 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -278,46 +278,32 @@ static int port_detect(struct device *dev, void *dev_drv)
int __parport_register_driver(struct parport_driver *drv, struct module *owner,
const char *mod_name)
{
- if (drv->devmodel) {
- /* using device model */
- int ret;
-
- /* initialize common driver fields */
- drv->driver.name = drv->name;
- drv->driver.bus = &parport_bus_type;
- drv->driver.owner = owner;
- drv->driver.mod_name = mod_name;
- ret = driver_register(&drv->driver);
- if (ret)
- return ret;
+ /* using device model */
+ int ret;
- /*
- * check if bus has any parallel port registered, if
- * none is found then load the lowlevel driver.
- */
- ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
- port_detect);
- if (!ret)
- get_lowlevel_driver();
-
- mutex_lock(&registration_lock);
- if (drv->match_port)
- bus_for_each_dev(&parport_bus_type, NULL, drv,
- port_check);
- mutex_unlock(&registration_lock);
- } else {
- struct parport *port;
-
- drv->devmodel = false;
-
- if (list_empty(&portlist))
- get_lowlevel_driver();
- mutex_lock(&registration_lock);
- list_for_each_entry(port, &portlist, list)
- drv->attach(port);
- list_add(&drv->list, &drivers);
- mutex_unlock(&registration_lock);
- }
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &parport_bus_type;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
+ ret = driver_register(&drv->driver);
+ if (ret)
+ return ret;
+
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
+ mutex_lock(&registration_lock);
+ if (drv->match_port)
+ bus_for_each_dev(&parport_bus_type, NULL, drv,
+ port_check);
+ mutex_unlock(&registration_lock);
return 0;
}
@@ -352,17 +338,9 @@ static int port_detach(struct device *dev, void *_drv)
void parport_unregister_driver(struct parport_driver *drv)
{
- struct parport *port;
-
mutex_lock(&registration_lock);
- if (drv->devmodel) {
- bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
- driver_unregister(&drv->driver);
- } else {
- list_del_init(&drv->list);
- list_for_each_entry(port, &portlist, list)
- drv->detach(port);
- }
+ bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
+ driver_unregister(&drv->driver);
mutex_unlock(&registration_lock);
}
EXPORT_SYMBOL(parport_unregister_driver);
@@ -554,8 +532,8 @@ void parport_announce_port(struct parport *port)
#endif
if (!port->dev)
- printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n",
- port->name);
+ pr_warn("%s: fix this legacy no-device port driver!\n",
+ port->name);
parport_proc_register(port);
mutex_lock(&registration_lock);
@@ -641,47 +619,48 @@ void parport_remove_port(struct parport *port)
}
EXPORT_SYMBOL(parport_remove_port);
+static void free_pardevice(struct device *dev)
+{
+ struct pardevice *par_dev = to_pardevice(dev);
+
+ kfree(par_dev->name);
+ kfree(par_dev);
+}
+
/**
- * parport_register_device - register a device on a parallel port
+ * parport_register_dev_model - register a device on a parallel port
* @port: port to which the device is attached
* @name: a name to refer to the device
- * @pf: preemption callback
- * @kf: kick callback (wake-up)
- * @irq_func: interrupt handler
- * @flags: registration flags
- * @handle: data for callback functions
+ * @par_dev_cb: struct containing callbacks
+ * @id: device number to be given to the device
*
* This function, called by parallel port device drivers,
* declares that a device is connected to a port, and tells the
* system all it needs to know.
*
- * The @name is allocated by the caller and must not be
- * deallocated until the caller calls @parport_unregister_device
- * for that device.
- *
- * The preemption callback function, @pf, is called when this
- * device driver has claimed access to the port but another
- * device driver wants to use it. It is given @handle as its
- * parameter, and should return zero if it is willing for the
- * system to release the port to another driver on its behalf.
- * If it wants to keep control of the port it should return
- * non-zero, and no action will be taken. It is good manners for
- * the driver to try to release the port at the earliest
- * opportunity after its preemption callback rejects a preemption
- * attempt. Note that if a preemption callback is happy for
- * preemption to go ahead, there is no need to release the port;
- * it is done automatically. This function may not block, as it
- * may be called from interrupt context. If the device driver
- * does not support preemption, @pf can be %NULL.
+ * The struct pardev_cb contains pointer to callbacks. preemption
+ * callback function, @preempt, is called when this device driver
+ * has claimed access to the port but another device driver wants
+ * to use it. It is given, @private, as its parameter, and should
+ * return zero if it is willing for the system to release the port
+ * to another driver on its behalf. If it wants to keep control of
+ * the port it should return non-zero, and no action will be taken.
+ * It is good manners for the driver to try to release the port at
+ * the earliest opportunity after its preemption callback rejects a
+ * preemption attempt. Note that if a preemption callback is happy
+ * for preemption to go ahead, there is no need to release the
+ * port; it is done automatically. This function may not block, as
+ * it may be called from interrupt context. If the device driver
+ * does not support preemption, @preempt can be %NULL.
*
- * The wake-up ("kick") callback function, @kf, is called when
+ * The wake-up ("kick") callback function, @wakeup, is called when
* the port is available to be claimed for exclusive access; that
* is, parport_claim() is guaranteed to succeed when called from
* inside the wake-up callback function. If the driver wants to
* claim the port it should do so; otherwise, it need not take
* any action. This function may not block, as it may be called
* from interrupt context. If the device driver does not want to
- * be explicitly invited to claim the port in this way, @kf can
+ * be explicitly invited to claim the port in this way, @wakeup can
* be %NULL.
*
* The interrupt handler, @irq_func, is called when an interrupt
@@ -711,138 +690,6 @@ EXPORT_SYMBOL(parport_remove_port);
**/
struct pardevice *
-parport_register_device(struct parport *port, const char *name,
- int (*pf)(void *), void (*kf)(void *),
- void (*irq_func)(void *),
- int flags, void *handle)
-{
- struct pardevice *tmp;
-
- if (port->physport->flags & PARPORT_FLAG_EXCL) {
- /* An exclusive device is registered. */
- printk(KERN_DEBUG "%s: no more devices allowed\n",
- port->name);
- return NULL;
- }
-
- if (flags & PARPORT_DEV_LURK) {
- if (!pf || !kf) {
- printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
- return NULL;
- }
- }
-
- if (flags & PARPORT_DEV_EXCL) {
- if (port->physport->devices) {
- /*
- * If a device is already registered and this new
- * device wants exclusive access, then no need to
- * continue as we can not grant exclusive access to
- * this device.
- */
- pr_err("%s: cannot grant exclusive access for device %s\n",
- port->name, name);
- return NULL;
- }
- }
-
- /*
- * We up our own module reference count, and that of the port
- * on which a device is to be registered, to ensure that
- * neither of us gets unloaded while we sleep in (e.g.)
- * kmalloc.
- */
- if (!try_module_get(port->ops->owner))
- return NULL;
-
- parport_get_port(port);
-
- tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
- if (!tmp)
- goto out;
-
- tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
- if (!tmp->state)
- goto out_free_pardevice;
-
- tmp->name = name;
- tmp->port = port;
- tmp->daisy = -1;
- tmp->preempt = pf;
- tmp->wakeup = kf;
- tmp->private = handle;
- tmp->flags = flags;
- tmp->irq_func = irq_func;
- tmp->waiting = 0;
- tmp->timeout = 5 * HZ;
- tmp->devmodel = false;
-
- /* Chain this onto the list */
- tmp->prev = NULL;
- /*
- * This function must not run from an irq handler so we don' t need
- * to clear irq on the local CPU. -arca
- */
- spin_lock(&port->physport->pardevice_lock);
-
- if (flags & PARPORT_DEV_EXCL) {
- if (port->physport->devices) {
- spin_unlock(&port->physport->pardevice_lock);
- printk(KERN_DEBUG
- "%s: cannot grant exclusive access for device %s\n",
- port->name, name);
- goto out_free_all;
- }
- port->flags |= PARPORT_FLAG_EXCL;
- }
-
- tmp->next = port->physport->devices;
- wmb(); /*
- * Make sure that tmp->next is written before it's
- * added to the list; see comments marked 'no locking
- * required'
- */
- if (port->physport->devices)
- port->physport->devices->prev = tmp;
- port->physport->devices = tmp;
- spin_unlock(&port->physport->pardevice_lock);
-
- init_waitqueue_head(&tmp->wait_q);
- tmp->timeslice = parport_default_timeslice;
- tmp->waitnext = tmp->waitprev = NULL;
-
- /*
- * This has to be run as last thing since init_state may need other
- * pardevice fields. -arca
- */
- port->ops->init_state(tmp, tmp->state);
- if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
- port->proc_device = tmp;
- parport_device_proc_register(tmp);
- }
- return tmp;
-
- out_free_all:
- kfree(tmp->state);
- out_free_pardevice:
- kfree(tmp);
- out:
- parport_put_port(port);
- module_put(port->ops->owner);
-
- return NULL;
-}
-EXPORT_SYMBOL(parport_register_device);
-
-static void free_pardevice(struct device *dev)
-{
- struct pardevice *par_dev = to_pardevice(dev);
-
- kfree(par_dev->name);
- kfree(par_dev);
-}
-
-struct pardevice *
parport_register_dev_model(struct parport *port, const char *name,
const struct pardev_cb *par_dev_cb, int id)
{
@@ -996,7 +843,7 @@ void parport_unregister_device(struct pardevice *dev)
#ifdef PARPORT_PARANOID
if (!dev) {
- printk(KERN_ERR "parport_unregister_device: passed NULL\n");
+ pr_err("%s: passed NULL\n", __func__);
return;
}
#endif
@@ -1046,10 +893,7 @@ void parport_unregister_device(struct pardevice *dev)
spin_unlock_irq(&port->waitlist_lock);
kfree(dev->state);
- if (dev->devmodel)
- device_unregister(&dev->dev);
- else
- kfree(dev);
+ device_unregister(&dev->dev);
module_put(port->ops->owner);
parport_put_port(port);
@@ -1137,8 +981,7 @@ int parport_claim(struct pardevice *dev)
unsigned long flags;
if (port->cad == dev) {
- printk(KERN_INFO "%s: %s already owner\n",
- dev->port->name,dev->name);
+ pr_info("%s: %s already owner\n", dev->port->name, dev->name);
return 0;
}
@@ -1158,9 +1001,8 @@ int parport_claim(struct pardevice *dev)
* I think we'll actually deadlock rather than
* get here, but just in case..
*/
- printk(KERN_WARNING
- "%s: %s released port when preempted!\n",
- port->name, oldcad->name);
+ pr_warn("%s: %s released port when preempted!\n",
+ port->name, oldcad->name);
if (port->cad)
goto blocked;
}
@@ -1260,7 +1102,8 @@ int parport_claim_or_block(struct pardevice *dev)
r = parport_claim(dev);
if (r == -EAGAIN) {
#ifdef PARPORT_DEBUG_SHARING
- printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
+ printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
+ dev->name);
#endif
/*
* FIXME!!! Use the proper locking for dev->waiting,
@@ -1293,7 +1136,7 @@ int parport_claim_or_block(struct pardevice *dev)
if (dev->port->physport->cad != dev)
printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
dev->name, dev->port->physport->cad ?
- dev->port->physport->cad->name:"nobody");
+ dev->port->physport->cad->name : "nobody");
#endif
}
dev->waiting = 0;
@@ -1320,8 +1163,8 @@ void parport_release(struct pardevice *dev)
write_lock_irqsave(&port->cad_lock, flags);
if (port->cad != dev) {
write_unlock_irqrestore(&port->cad_lock, flags);
- printk(KERN_WARNING "%s: %s tried to release parport when not owner\n",
- port->name, dev->name);
+ pr_warn("%s: %s tried to release parport when not owner\n",
+ port->name, dev->name);
return;
}
@@ -1361,7 +1204,8 @@ void parport_release(struct pardevice *dev)
if (dev->port->cad) /* racy but no matter */
return;
} else {
- printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
+ pr_err("%s: don't know how to wake %s\n",
+ port->name, pd->name);
}
}
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 91bfdb784829..b08efea39496 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -58,15 +58,33 @@ config PCIE_RCAR
bool "Renesas R-Car PCIe controller"
depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_RCAR_HOST
help
Say Y here if you want PCIe controller support on R-Car SoCs.
+ This option will be removed after arm64 defconfig is updated.
+
+config PCIE_RCAR_HOST
+ bool "Renesas R-Car PCIe host controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here if you want PCIe controller support on R-Car SoCs in host
+ mode.
+
+config PCIE_RCAR_EP
+ bool "Renesas R-Car PCIe endpoint controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ help
+ Say Y here if you want PCIe controller support on R-Car SoCs in
+ endpoint mode.
config PCI_HOST_COMMON
- bool
+ tristate
select PCI_ECAM
config PCI_HOST_GENERIC
- bool "Generic PCI host controller"
+ tristate "Generic PCI host controller"
depends on OF
select PCI_HOST_COMMON
select IRQ_DOMAIN
@@ -258,6 +276,16 @@ config PCI_HYPERV_INTERFACE
The Hyper-V PCI Interface is a helper driver allows other drivers to
have a common interface with the Hyper-V PCI frontend driver.
+config PCI_LOONGSON
+ bool "LOONGSON PCI Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on OF
+ depends on PCI_QUIRKS
+ default MACH_LOONGSON64
+ help
+ Say Y here if you want to enable PCI controller support on
+ Loongson systems.
+
source "drivers/pci/controller/dwc/Kconfig"
source "drivers/pci/controller/mobiveil/Kconfig"
source "drivers/pci/controller/cadence/Kconfig"
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 158c59771824..efd9733ead26 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -7,7 +7,8 @@ obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
-obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
+obj-$(CONFIG_PCIE_RCAR_HOST) += pcie-rcar.o pcie-rcar-host.o
+obj-$(CONFIG_PCIE_RCAR_EP) += pcie-rcar.o pcie-rcar-ep.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
@@ -28,6 +29,7 @@ obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
+obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
obj-y += mobiveil/
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 1c173dad67d1..1c15c8352125 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -450,7 +450,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
epc->max_functions = 1;
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
- resource_size(pcie->mem_res));
+ resource_size(pcie->mem_res), PAGE_SIZE);
if (ret < 0) {
dev_err(dev, "failed to initialize the memory space\n");
goto err_init;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 9b1c3966414b..8c2543f28ba0 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -140,9 +140,6 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
for_each_of_pci_range(&parser, &range) {
bool is_io;
- if (r >= rc->max_regions)
- break;
-
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
is_io = false;
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
@@ -219,17 +216,14 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
pcie = &rc->pcie;
pcie->is_rc = true;
- rc->max_regions = 32;
- of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions);
-
rc->no_bar_nbits = 32;
of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits);
rc->vendor_id = 0xffff;
- of_property_read_u16(np, "vendor-id", &rc->vendor_id);
+ of_property_read_u32(np, "vendor-id", &rc->vendor_id);
rc->device_id = 0xffff;
- of_property_read_u16(np, "device-id", &rc->device_id);
+ of_property_read_u32(np, "device-id", &rc->device_id);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
pcie->reg_base = devm_ioremap_resource(dev, res);
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index a2b28b912ca4..df14ad002fe9 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -251,7 +251,6 @@ struct cdns_pcie {
* @bus_range: first/last buses behind the PCIe host controller
* @cfg_base: IO mapped window to access the PCI configuration space of a
* single function at a time
- * @max_regions: maximum number of regions supported by the hardware
* @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
* translation (nbits sets into the "no BAR match" register)
* @vendor_id: PCI vendor ID
@@ -262,10 +261,9 @@ struct cdns_pcie_rc {
struct resource *cfg_res;
struct resource *bus_range;
void __iomem *cfg_base;
- u32 max_regions;
u32 no_bar_nbits;
- u16 vendor_id;
- u16 device_id;
+ u32 vendor_id;
+ u32 device_id;
};
/**
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 03dcaf65d159..044a3761c44f 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -26,7 +26,7 @@ config PCI_DRA7XX_HOST
depends on OF && HAS_IOMEM && TI_PIPE3
select PCIE_DW_HOST
select PCI_DRA7XX
- default y
+ default y if SOC_DRA7XX
help
Enables support for the PCIe controller in the DRA7xx SoC to work in
host mode. There are two instances of PCIe controller in DRA7xx.
@@ -111,7 +111,6 @@ config PCI_KEYSTONE_HOST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCI_KEYSTONE
- default y
help
Enables support for the PCIe controller in the Keystone SoC to
work in host mode. The PCI controller on Keystone is based on
@@ -281,15 +280,25 @@ config PCIE_TEGRA194_EP
selected. This uses the DesignWare core.
config PCIE_UNIPHIER
- bool "Socionext UniPhier PCIe controllers"
+ bool "Socionext UniPhier PCIe host controllers"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && HAS_IOMEM
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
- Say Y here if you want PCIe controller support on UniPhier SoCs.
+ Say Y here if you want PCIe host controller support on UniPhier SoCs.
This driver supports LD20 and PXs3 SoCs.
+config PCIE_UNIPHIER_EP
+ bool "Socionext UniPhier PCIe endpoint controllers"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here if you want PCIe endpoint controller support on
+ UniPhier SoCs. This driver supports Pro5 SoC.
+
config PCIE_AL
bool "Amazon Annapurna Labs PCIe controller"
depends on OF && (ARM64 || COMPILE_TEST)
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 8a637cfcf6e9..a751553fa0db 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 3b0e58f2de58..6184ebc9392d 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -840,7 +840,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
struct phy **phy;
struct device_link **link;
void __iomem *base;
- struct resource *res;
struct dw_pcie *pci;
struct dra7xx_pcie *dra7xx;
struct device *dev = &pdev->dev;
@@ -877,10 +876,9 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
return irq;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
- base = devm_ioremap(dev, res->start, resource_size(res));
- if (!base)
- return -ENOMEM;
+ base = devm_platform_ioremap_resource_byname(pdev, "ti_conf");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 0) {
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index acfbd34032a8..8f08ae53f53e 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -868,9 +868,9 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq <= 0) {
+ if (pp->msi_irq < 0) {
dev_err(dev, "failed to get MSI irq\n");
- return -ENODEV;
+ return pp->msi_irq;
}
}
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 3715dceca1bf..ca59ba9e0ecd 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -289,11 +289,11 @@ static void meson_pcie_init_dw(struct meson_pcie *mp)
meson_cfg_writel(mp, val, PCIE_CFG0);
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val &= ~LINK_CAPABLE_MASK;
+ val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE);
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val |= LINK_CAPABLE_X1 | FAST_LINK_MODE;
+ val |= LINK_CAPABLE_X1;
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index 1eeda2f6371f..270868f3859a 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -80,7 +80,7 @@ static int al_pcie_init(struct pci_config_window *cfg)
return 0;
}
-struct pci_ecam_ops al_pcie_ops = {
+const struct pci_ecam_ops al_pcie_ops = {
.bus_shift = 20,
.init = al_pcie_init,
.pci_ops = {
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1cdcbd102ce8..5e5b8821bed8 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -412,11 +412,11 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
reg = ep->msi_cap + PCI_MSI_DATA_32;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
- aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
+ aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
msg_addr = ((u64)msg_addr_upper) << 32 |
(msg_addr_lower & ~aligned_offset);
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
- epc->mem->page_size);
+ epc->mem->window.page_size);
if (ret)
return ret;
@@ -433,7 +433,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epf_msix_tbl *msix_tbl;
struct pci_epc *epc = ep->epc;
- struct pci_epf_bar *epf_bar;
u32 reg, msg_data, vec_ctrl;
unsigned int aligned_offset;
u32 tbl_offset;
@@ -446,10 +445,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- epf_bar = ep->epf_bar[bir];
- msix_tbl = epf_bar->addr;
- msix_tbl = (struct pci_epf_msix_tbl *)((char *)msix_tbl + tbl_offset);
-
+ msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
@@ -459,9 +455,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return -EPERM;
}
- aligned_offset = msg_addr & (epc->mem->page_size - 1);
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
- epc->mem->page_size);
+ epc->mem->window.page_size);
if (ret)
return ret;
@@ -477,7 +473,7 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
struct pci_epc *epc = ep->epc;
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->page_size);
+ epc->mem->window.page_size);
pci_epc_mem_exit(epc);
}
@@ -610,15 +606,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ret < 0)
epc->max_functions = 1;
- ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
- ep->page_size);
+ ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+ ep->page_size);
if (ret < 0) {
dev_err(dev, "Failed to initialize address space\n");
return ret;
}
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
- epc->mem->page_size);
+ epc->mem->window.page_size);
if (!ep->msi_mem) {
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 395feb8ca051..0a4a5aa6fe46 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -236,7 +236,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct pcie_port *pp = domain->host_data;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
@@ -264,6 +264,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return -ENOMEM;
}
+ irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
+
pp->msi_domain = pci_msi_create_irq_domain(fwnode,
&dw_pcie_msi_domain_info,
pp->irq_domain);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 681548c88282..c92496e36fd5 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -244,13 +244,16 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
u64 pci_addr, u32 size)
{
u32 retries, val;
+ u64 limit_addr = cpu_addr + size - 1;
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
upper_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
+ lower_32_bits(limit_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index d6e1f397e6b0..656e00f8fbeb 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -112,9 +112,10 @@
#define PCIE_ATU_UNR_REGION_CTRL2 0x04
#define PCIE_ATU_UNR_LOWER_BASE 0x08
#define PCIE_ATU_UNR_UPPER_BASE 0x0C
-#define PCIE_ATU_UNR_LIMIT 0x10
+#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
/*
* The default address offset between dbi_base and atu_base. Root controller
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 6d9e1b2b8f7b..0ad4e07dd4c2 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -104,7 +104,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
return 0;
}
-struct pci_ecam_ops hisi_pcie_ops = {
+const struct pci_ecam_ops hisi_pcie_ops = {
.bus_shift = 20,
.init = hisi_pcie_init,
.pci_ops = {
@@ -332,15 +332,6 @@ static struct platform_driver hisi_pcie_driver = {
};
builtin_platform_driver(hisi_pcie_driver);
-static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct pci_ecam_ops *ops;
-
- ops = (struct pci_ecam_ops *)of_device_get_match_data(dev);
- return pci_host_common_probe(pdev, ops);
-}
-
static int hisi_pcie_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -362,7 +353,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
return 0;
}
-struct pci_ecam_ops hisi_pcie_platform_ops = {
+static const struct pci_ecam_ops hisi_pcie_platform_ops = {
.bus_shift = 20,
.init = hisi_pcie_platform_init,
.pci_ops = {
@@ -375,17 +366,17 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
{
.compatible = "hisilicon,hip06-pcie-ecam",
- .data = (void *) &hisi_pcie_platform_ops,
+ .data = &hisi_pcie_platform_ops,
},
{
.compatible = "hisilicon,hip07-pcie-ecam",
- .data = (void *) &hisi_pcie_platform_ops,
+ .data = &hisi_pcie_platform_ops,
},
{},
};
static struct platform_driver hisi_pcie_almost_ecam_driver = {
- .probe = hisi_pcie_almost_ecam_probe,
+ .probe = pci_host_common_probe,
.driver = {
.name = "hisi-pcie-almost-ecam",
.of_match_table = hisi_pcie_almost_ecam_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index fc2a12212dec..2d8dbb318087 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -453,7 +453,7 @@ static int intel_pcie_msi_init(struct pcie_port *pp)
return 0;
}
-u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
+static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
{
return cpu_addr + BUS_IATU_OFFSET;
}
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index ae30a2fd3716..92b77f7d8354 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1623,7 +1623,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
ret = pinctrl_pm_select_default_state(dev);
if (ret < 0) {
dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
- goto fail_pinctrl;
+ goto fail_pm_get_sync;
}
tegra_pcie_init_controller(pcie);
@@ -1650,9 +1650,8 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
fail_host_init:
tegra_pcie_deinit_controller(pcie);
-fail_pinctrl:
- pm_runtime_put_sync(dev);
fail_pm_get_sync:
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2190,9 +2189,9 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
}
pp->irq = platform_get_irq_byname(pdev, "intr");
- if (!pp->irq) {
+ if (pp->irq < 0) {
dev_err(dev, "Failed to get \"intr\" interrupt\n");
- return -ENODEV;
+ return pp->irq;
}
pcie->bpmp = tegra_bpmp_get(dev);
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
new file mode 100644
index 000000000000..148355960061
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe endpoint controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/* Link Glue registers */
+#define PCL_RSTCTRL0 0x0010
+#define PCL_RSTCTRL_AXI_REG BIT(3)
+#define PCL_RSTCTRL_AXI_SLAVE BIT(2)
+#define PCL_RSTCTRL_AXI_MASTER BIT(1)
+#define PCL_RSTCTRL_PIPE3 BIT(0)
+
+#define PCL_RSTCTRL1 0x0020
+#define PCL_RSTCTRL_PERST BIT(0)
+
+#define PCL_RSTCTRL2 0x0024
+#define PCL_RSTCTRL_PHY_RESET BIT(0)
+
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
+#define PCL_APP_CLK_CTRL 0x8004
+#define PCL_APP_CLK_REQ BIT(0)
+
+#define PCL_APP_READY_CTRL 0x8008
+#define PCL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCL_APP_MSI0 0x8040
+#define PCL_APP_VEN_MSI_TC_MASK GENMASK(10, 8)
+#define PCL_APP_VEN_MSI_VECTOR_MASK GENMASK(4, 0)
+
+#define PCL_APP_MSI1 0x8044
+#define PCL_APP_MSI_REQ BIT(0)
+
+#define PCL_APP_INTX 0x8074
+#define PCL_APP_INTX_SYS_INT BIT(0)
+
+/* assertion time of INTx in usec */
+#define PCL_INTX_WIDTH_USEC 30
+
+struct uniphier_pcie_ep_priv {
+ void __iomem *base;
+ struct dw_pcie pci;
+ struct clk *clk, *clk_gio;
+ struct reset_control *rst, *rst_gio;
+ struct phy *phy;
+ const struct pci_epc_features *features;
+};
+
+#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_ep_priv *priv,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_APP_READY_CTRL);
+ if (enable)
+ val |= PCL_APP_LTSSM_ENABLE;
+ else
+ val &= ~PCL_APP_LTSSM_ENABLE;
+ writel(val, priv->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv,
+ bool assert)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_RSTCTRL2);
+ if (assert)
+ val |= PCL_RSTCTRL_PHY_RESET;
+ else
+ val &= ~PCL_RSTCTRL_PHY_RESET;
+ writel(val, priv->base + PCL_RSTCTRL2);
+}
+
+static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* clock request */
+ val = readl(priv->base + PCL_APP_CLK_CTRL);
+ val &= ~PCL_APP_CLK_REQ;
+ writel(val, priv->base + PCL_APP_CLK_CTRL);
+
+ /* deassert PIPE3 and AXI reset */
+ val = readl(priv->base + PCL_RSTCTRL0);
+ val |= PCL_RSTCTRL_AXI_REG | PCL_RSTCTRL_AXI_SLAVE
+ | PCL_RSTCTRL_AXI_MASTER | PCL_RSTCTRL_PIPE3;
+ writel(val, priv->base + PCL_RSTCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ msleep(100);
+}
+
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, true);
+
+ return 0;
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+}
+
+static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ /*
+ * This makes pulse signal to send INTx to the RC, so this should
+ * be cleared as soon as possible. This sequence is covered with
+ * mutex in pci_epc_raise_irq().
+ */
+ /* assert INTx */
+ val = readl(priv->base + PCL_APP_INTX);
+ val |= PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ udelay(PCL_INTX_WIDTH_USEC);
+
+ /* deassert INTx */
+ val &= ~PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
+ u8 func_no, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ val = FIELD_PREP(PCL_APP_VEN_MSI_TC_MASK, func_no)
+ | FIELD_PREP(PCL_APP_VEN_MSI_VECTOR_MASK, interrupt_num - 1);
+ writel(val, priv->base + PCL_APP_MSI0);
+
+ val = readl(priv->base + PCL_APP_MSI1);
+ val |= PCL_APP_MSI_REQ;
+ writel(val, priv->base + PCL_APP_MSI1);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return uniphier_pcie_ep_raise_legacy_irq(ep);
+ case PCI_EPC_IRQ_MSI:
+ return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type (%d)\n", type);
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features*
+uniphier_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ return priv->features;
+}
+
+static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
+ .ep_init = uniphier_pcie_ep_init,
+ .raise_irq = uniphier_pcie_ep_raise_irq,
+ .get_features = uniphier_pcie_get_features,
+};
+
+static int uniphier_add_pcie_ep(struct uniphier_pcie_ep_priv *priv,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &priv->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+
+ ep->ops = &uniphier_pcie_ep_ops;
+
+ pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret)
+ dev_err(dev, "Failed to initialize endpoint (%d)\n", ret);
+
+ return ret;
+}
+
+static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk_gio);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ uniphier_pcie_init_ep(priv);
+
+ uniphier_pcie_phy_reset(priv, true);
+
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto out_rst_gio_assert;
+
+ uniphier_pcie_phy_reset(priv, false);
+
+ return 0;
+
+out_rst_gio_assert:
+ reset_control_assert(priv->rst_gio);
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_gio);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = uniphier_pcie_start_link,
+ .stop_link = uniphier_pcie_stop_link,
+};
+
+static int uniphier_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pcie_ep_priv *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->features = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->features))
+ return -EINVAL;
+
+ priv->pci.dev = dev;
+ priv->pci.ops = &dw_pcie_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(priv->pci.dbi_base))
+ return PTR_ERR(priv->pci.dbi_base);
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_gio))
+ return PTR_ERR(priv->clk_gio);
+
+ priv->rst_gio = devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_gio))
+ return PTR_ERR(priv->rst_gio);
+
+ priv->clk = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ dev_err(dev, "Failed to get phy (%d)\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = uniphier_pcie_ep_enable(priv);
+ if (ret)
+ return ret;
+
+ return uniphier_add_pcie_ep(priv, pdev);
+}
+
+static const struct pci_epc_features uniphier_pro5_data = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 16,
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .reserved_bar = BIT(BAR_4),
+};
+
+static const struct of_device_id uniphier_pcie_ep_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro5-pcie-ep",
+ .data = &uniphier_pro5_data,
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver uniphier_pcie_ep_driver = {
+ .probe = uniphier_pcie_ep_probe,
+ .driver = {
+ .name = "uniphier-pcie-ep",
+ .of_match_table = uniphier_pcie_ep_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(uniphier_pcie_ep_driver);
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index a94be264240f..5907baa9b1f2 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -522,9 +522,9 @@ static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie)
mobiveil_pcie_enable_msi(pcie);
rp->irq = platform_get_irq(pdev, 0);
- if (rp->irq <= 0) {
+ if (rp->irq < 0) {
dev_err(dev, "failed to map IRQ: %d\n", rp->irq);
- return -ENODEV;
+ return rp->irq;
}
/* initialize the IRQ domains */
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 2a20b649f40c..90ff291c24f0 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -9,15 +9,18 @@
*/
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/msi.h>
#include <linux/of_address.h>
+#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include "../pci.h"
@@ -31,16 +34,6 @@
#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
#define PCIE_CORE_DEV_REV_REG 0x8
#define PCIE_CORE_PCIEXP_CAP 0xc0
-#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
-#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
-#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
-#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
-#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
-#define PCIE_CORE_LINK_TRAINING BIT(5)
-#define PCIE_CORE_LINK_WIDTH_SHIFT 20
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
@@ -101,6 +94,8 @@
#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
+#define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14)
+#define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1)
#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
#define PCIE_MSG_PM_PME_MASK BIT(7)
@@ -201,7 +196,10 @@ struct advk_pcie {
struct mutex msi_used_lock;
u16 msi_msg;
int root_bus_nr;
+ int link_gen;
struct pci_bridge_emul bridge;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
};
static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
@@ -214,6 +212,11 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
return readl(pcie->base + reg);
}
+static inline u16 advk_read16(struct advk_pcie *pcie, u64 reg)
+{
+ return advk_readl(pcie, (reg & ~0x3)) >> ((reg & 0x3) * 8);
+}
+
static int advk_pcie_link_up(struct advk_pcie *pcie)
{
u32 val, ltssm_state;
@@ -225,20 +228,16 @@ static int advk_pcie_link_up(struct advk_pcie *pcie)
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
{
- struct device *dev = &pcie->pdev->dev;
int retries;
/* check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (advk_pcie_link_up(pcie)) {
- dev_info(dev, "link up\n");
+ if (advk_pcie_link_up(pcie))
return 0;
- }
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(dev, "link never came up\n");
return -ETIMEDOUT;
}
@@ -253,10 +252,115 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
}
}
+static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen)
+{
+ int ret, neg_gen;
+ u32 reg;
+
+ /* Setup link speed */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~PCIE_GEN_SEL_MSK;
+ if (gen == 3)
+ reg |= SPEED_GEN_3;
+ else if (gen == 2)
+ reg |= SPEED_GEN_2;
+ else
+ reg |= SPEED_GEN_1;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /*
+ * Enable link training. This is not needed in every call to this
+ * function, just once suffices, but it does not break anything either.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg |= LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /*
+ * Start link training immediately after enabling it.
+ * This solves problems for some buggy cards.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
+ reg |= PCI_EXP_LNKCTL_RL;
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
+
+ ret = advk_pcie_wait_for_link(pcie);
+ if (ret)
+ return ret;
+
+ reg = advk_read16(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA);
+ neg_gen = reg & PCI_EXP_LNKSTA_CLS;
+
+ return neg_gen;
+}
+
+static void advk_pcie_train_link(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ int neg_gen = -1, gen;
+
+ /*
+ * Try link training at link gen specified by device tree property
+ * 'max-link-speed'. If this fails, iteratively train at lower gen.
+ */
+ for (gen = pcie->link_gen; gen > 0; --gen) {
+ neg_gen = advk_pcie_train_at_gen(pcie, gen);
+ if (neg_gen > 0)
+ break;
+ }
+
+ if (neg_gen < 0)
+ goto err;
+
+ /*
+ * After successful training if negotiated gen is lower than requested,
+ * train again on negotiated gen. This solves some stability issues for
+ * some buggy gen1 cards.
+ */
+ if (neg_gen < gen) {
+ gen = neg_gen;
+ neg_gen = advk_pcie_train_at_gen(pcie, gen);
+ }
+
+ if (neg_gen == gen) {
+ dev_info(dev, "link up at gen %i\n", gen);
+ return;
+ }
+
+err:
+ dev_err(dev, "link never came up\n");
+}
+
+static void advk_pcie_issue_perst(struct advk_pcie *pcie)
+{
+ u32 reg;
+
+ if (!pcie->reset_gpio)
+ return;
+
+ /* PERST does not work for some cards when link training is enabled */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* 10ms delay is needed for some cards */
+ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
+ advk_pcie_issue_perst(pcie);
+
+ /* Enable TX */
+ reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
+ reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG);
+
/* Set to Direct mode */
reg = advk_readl(pcie, CTRL_CONFIG_REG);
reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
@@ -275,36 +379,26 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
- /* Set PCIe Device Control and Status 1 PF0 register */
- reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
- (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
- PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
- (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
- PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
- advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
+ /* Set PCIe Device Control register */
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
+ reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+ reg &= ~PCI_EXP_DEVCTL_READRQ;
+ reg |= PCI_EXP_DEVCTL_PAYLOAD; /* Set max payload size */
+ reg |= PCI_EXP_DEVCTL_READRQ_512B;
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
/* Program PCIe Control 2 to disable strict ordering */
reg = PCIE_CORE_CTRL2_RESERVED |
PCIE_CORE_CTRL2_TD_ENABLE;
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
- /* Set GEN2 */
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
- reg &= ~PCIE_GEN_SEL_MSK;
- reg |= SPEED_GEN_2;
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
-
/* Set lane X1 */
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
reg &= ~LANE_CNT_MSK;
reg |= LANE_COUNT_1;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
- /* Enable link training */
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
- reg |= LINK_TRAINING_EN;
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
-
/* Enable MSI */
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
@@ -340,23 +434,22 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
/*
* PERST# signal could have been asserted by pinctrl subsystem before
- * probe() callback has been called, making the endpoint going into
+ * probe() callback has been called or issued explicitly by reset gpio
+ * function advk_pcie_issue_perst(), making the endpoint going into
* fundamental reset. As required by PCI Express spec a delay for at
* least 100ms after such a reset before link training is needed.
*/
msleep(PCI_PM_D3COLD_WAIT);
- /* Start link training */
- reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
- reg |= PCIE_CORE_LINK_TRAINING;
- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
-
- advk_pcie_wait_for_link(pcie);
-
- reg = PCIE_CORE_LINK_L0S_ENTRY |
- (1 << PCIE_CORE_LINK_WIDTH_SHIFT);
- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+ advk_pcie_train_link(pcie);
+ /*
+ * FIXME: The following register update is suspicious. This register is
+ * applicable only when the PCI controller is configured for Endpoint
+ * mode, not as a Root Complex. But apparently when this code is
+ * removed, some cards stop working. This should be investigated and
+ * a comment explaining this should be put here.
+ */
reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
PCIE_CORE_CMD_IO_ACCESS_EN |
@@ -952,6 +1045,62 @@ static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie)
+{
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+}
+
+static int advk_pcie_enable_phy(struct advk_pcie *pcie)
+{
+ int ret;
+
+ if (!pcie->phy)
+ return 0;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
+ if (ret) {
+ phy_exit(pcie->phy);
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ phy_exit(pcie->phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int advk_pcie_setup_phy(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+
+ pcie->phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(pcie->phy) && (PTR_ERR(pcie->phy) == -EPROBE_DEFER))
+ return PTR_ERR(pcie->phy);
+
+ /* Old bindings miss the PHY handle */
+ if (IS_ERR(pcie->phy)) {
+ dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy));
+ pcie->phy = NULL;
+ return 0;
+ }
+
+ ret = advk_pcie_enable_phy(pcie);
+ if (ret)
+ dev_err(dev, "Failed to initialize PHY (%d)\n", ret);
+
+ return ret;
+}
+
static int advk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -973,6 +1122,9 @@ static int advk_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->base);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
pcie);
@@ -989,6 +1141,32 @@ static int advk_pcie_probe(struct platform_device *pdev)
}
pcie->root_bus_nr = bus->start;
+ pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
+ "reset-gpios", 0,
+ GPIOD_OUT_LOW,
+ "pcie1-reset");
+ ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
+ if (ret) {
+ if (ret == -ENOENT) {
+ pcie->reset_gpio = NULL;
+ } else {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset-gpio: %i\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = of_pci_get_max_link_speed(dev->of_node);
+ if (ret <= 0 || ret > 3)
+ pcie->link_gen = 3;
+ else
+ pcie->link_gen = ret;
+
+ ret = advk_pcie_setup_phy(pcie);
+ if (ret)
+ return ret;
+
advk_pcie_setup_hw(pcie);
advk_sw_pci_bridge_init(pcie);
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index 250a3fc80ec6..953de57f6c57 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -8,7 +8,9 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
@@ -19,7 +21,7 @@ static void gen_pci_unmap_cfg(void *ptr)
}
static struct pci_config_window *gen_pci_init(struct device *dev,
- struct list_head *resources, struct pci_ecam_ops *ops)
+ struct list_head *resources, const struct pci_ecam_ops *ops)
{
int err;
struct resource cfgres;
@@ -54,15 +56,19 @@ err_out:
return ERR_PTR(err);
}
-int pci_host_common_probe(struct platform_device *pdev,
- struct pci_ecam_ops *ops)
+int pci_host_common_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
struct list_head resources;
+ const struct pci_ecam_ops *ops;
int ret;
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -ENODEV;
+
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -82,7 +88,7 @@ int pci_host_common_probe(struct platform_device *pdev,
bridge->dev.parent = dev;
bridge->sysdata = cfg;
bridge->busnr = cfg->busr.start;
- bridge->ops = &ops->pci_ops;
+ bridge->ops = (struct pci_ops *)&ops->pci_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
@@ -95,6 +101,7 @@ int pci_host_common_probe(struct platform_device *pdev,
platform_set_drvdata(pdev, bridge->bus);
return 0;
}
+EXPORT_SYMBOL_GPL(pci_host_common_probe);
int pci_host_common_remove(struct platform_device *pdev)
{
@@ -107,3 +114,6 @@ int pci_host_common_remove(struct platform_device *pdev)
return 0;
}
+EXPORT_SYMBOL_GPL(pci_host_common_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 75a2fb930d4b..b51977abfdf1 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -10,12 +10,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_address.h>
-#include <linux/of_pci.h>
+#include <linux/module.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
+static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -49,7 +48,7 @@ static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus,
return pci_ecam_map_bus(bus, devfn, where);
}
-static struct pci_ecam_ops pci_dw_ecam_bus_ops = {
+static const struct pci_ecam_ops pci_dw_ecam_bus_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_dw_ecam_map_bus,
@@ -76,25 +75,16 @@ static const struct of_device_id gen_pci_of_match[] = {
{ },
};
-
-static int gen_pci_probe(struct platform_device *pdev)
-{
- const struct of_device_id *of_id;
- struct pci_ecam_ops *ops;
-
- of_id = of_match_node(gen_pci_of_match, pdev->dev.of_node);
- ops = (struct pci_ecam_ops *)of_id->data;
-
- return pci_host_common_probe(pdev, ops);
-}
+MODULE_DEVICE_TABLE(of, gen_pci_of_match);
static struct platform_driver gen_pci_driver = {
.driver = {
.name = "pci-host-generic",
.of_match_table = gen_pci_of_match,
- .suppress_bind_attrs = true,
},
- .probe = gen_pci_probe,
+ .probe = pci_host_common_probe,
.remove = pci_host_common_remove,
};
-builtin_platform_driver(gen_pci_driver);
+module_platform_driver(gen_pci_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index e15022ff63e3..bf40ff09c99d 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -480,6 +480,9 @@ struct hv_pcibus_device {
struct workqueue_struct *wq;
+ /* Highest slot of child device with resources allocated */
+ int wslot_res_allocated;
+
/* hypercall arg, must not cross page boundary */
struct hv_retarget_device_interrupt retarget_msi_interrupt_params;
@@ -1356,11 +1359,11 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct irq_cfg *cfg = irqd_cfg(data);
struct hv_pcibus_device *hbus;
+ struct vmbus_channel *channel;
struct hv_pci_dev *hpdev;
struct pci_bus *pbus;
struct pci_dev *pdev;
struct cpumask *dest;
- unsigned long flags;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct {
@@ -1378,6 +1381,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ channel = hbus->hdev->channel;
hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
if (!hpdev)
goto return_null_message;
@@ -1436,42 +1440,51 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
}
/*
+ * Prevents hv_pci_onchannelcallback() from running concurrently
+ * in the tasklet.
+ */
+ tasklet_disable(&channel->callback_event);
+
+ /*
* Since this function is called with IRQ locks held, can't
* do normal wait for completion; instead poll.
*/
while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
+ unsigned long flags;
+
/* 0xFFFF means an invalid PCI VENDOR ID. */
if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
dev_err_once(&hbus->hdev->device,
"the device has gone\n");
- goto free_int_desc;
+ goto enable_tasklet;
}
/*
- * When the higher level interrupt code calls us with
- * interrupt disabled, we must poll the channel by calling
- * the channel callback directly when channel->target_cpu is
- * the current CPU. When the higher level interrupt code
- * calls us with interrupt enabled, let's add the
- * local_irq_save()/restore() to avoid race:
- * hv_pci_onchannelcallback() can also run in tasklet.
+ * Make sure that the ring buffer data structure doesn't get
+ * freed while we dereference the ring buffer pointer. Test
+ * for the channel's onchannel_callback being NULL within a
+ * sched_lock critical section. See also the inline comments
+ * in vmbus_reset_channel_cb().
*/
- local_irq_save(flags);
-
- if (hbus->hdev->channel->target_cpu == smp_processor_id())
- hv_pci_onchannelcallback(hbus);
-
- local_irq_restore(flags);
+ spin_lock_irqsave(&channel->sched_lock, flags);
+ if (unlikely(channel->onchannel_callback == NULL)) {
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
+ goto enable_tasklet;
+ }
+ hv_pci_onchannelcallback(hbus);
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
if (hpdev->state == hv_pcichild_ejecting) {
dev_err_once(&hbus->hdev->device,
"the device is being ejected\n");
- goto free_int_desc;
+ goto enable_tasklet;
}
udelay(100);
}
+ tasklet_enable(&channel->callback_event);
+
if (comp.comp_pkt.completion_status < 0) {
dev_err(&hbus->hdev->device,
"Request for interrupt failed: 0x%x",
@@ -1495,6 +1508,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
put_pcichild(hpdev);
return;
+enable_tasklet:
+ tasklet_enable(&channel->callback_event);
free_int_desc:
kfree(int_desc);
drop_reference:
@@ -2198,10 +2213,8 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
struct hv_dr_state *dr;
int i;
- dr = kzalloc(offsetof(struct hv_dr_state, func) +
- (sizeof(struct hv_pcidev_description) *
- (relations->device_count)), GFP_NOWAIT);
-
+ dr = kzalloc(struct_size(dr, func, relations->device_count),
+ GFP_NOWAIT);
if (!dr)
return;
@@ -2235,10 +2248,8 @@ static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
struct hv_dr_state *dr;
int i;
- dr = kzalloc(offsetof(struct hv_dr_state, func) +
- (sizeof(struct hv_pcidev_description) *
- (relations->device_count)), GFP_NOWAIT);
-
+ dr = kzalloc(struct_size(dr, func, relations->device_count),
+ GFP_NOWAIT);
if (!dr)
return;
@@ -2432,9 +2443,8 @@ static void hv_pci_onchannelcallback(void *context)
bus_rel = (struct pci_bus_relations *)buffer;
if (bytes_recvd <
- offsetof(struct pci_bus_relations, func) +
- (sizeof(struct pci_function_description) *
- (bus_rel->device_count))) {
+ struct_size(bus_rel, func,
+ bus_rel->device_count)) {
dev_err(&hbus->hdev->device,
"bus relations too small\n");
break;
@@ -2447,9 +2457,8 @@ static void hv_pci_onchannelcallback(void *context)
bus_rel2 = (struct pci_bus_relations2 *)buffer;
if (bytes_recvd <
- offsetof(struct pci_bus_relations2, func) +
- (sizeof(struct pci_function_description2) *
- (bus_rel2->device_count))) {
+ struct_size(bus_rel2, func,
+ bus_rel2->device_count)) {
dev_err(&hbus->hdev->device,
"bus relations v2 too small\n");
break;
@@ -2736,6 +2745,8 @@ static void hv_free_config_window(struct hv_pcibus_device *hbus)
vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
}
+static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
+
/**
* hv_pci_enter_d0() - Bring the "bus" into the D0 power state
* @hdev: VMBus's tracking struct for this root PCI bus
@@ -2748,8 +2759,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
struct pci_bus_d0_entry *d0_entry;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
+ bool retry = true;
int ret;
+enter_d0_retry:
/*
* Tell the host that the bus is ready to use, and moved into the
* powered-on state. This includes telling the host which region
@@ -2776,6 +2789,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
if (ret)
goto exit;
+ /*
+ * In certain case (Kdump) the pci device of interest was
+ * not cleanly shut down and resource is still held on host
+ * side, the host could return invalid device status.
+ * We need to explicitly request host to release the resource
+ * and try to enter D0 again.
+ */
+ if (comp_pkt.completion_status < 0 && retry) {
+ retry = false;
+
+ dev_err(&hdev->device, "Retrying D0 Entry\n");
+
+ /*
+ * Hv_pci_bus_exit() calls hv_send_resource_released()
+ * to free up resources of its child devices.
+ * In the kdump kernel we need to set the
+ * wslot_res_allocated to 255 so it scans all child
+ * devices to release resources allocated in the
+ * normal kernel before panic happened.
+ */
+ hbus->wslot_res_allocated = 255;
+
+ ret = hv_pci_bus_exit(hdev, true);
+
+ if (ret == 0) {
+ kfree(pkt);
+ goto enter_d0_retry;
+ }
+ dev_err(&hdev->device,
+ "Retrying D0 failed with ret %d\n", ret);
+ }
+
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -2847,7 +2892,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
struct hv_pci_dev *hpdev;
struct pci_packet *pkt;
size_t size_res;
- u32 wslot;
+ int wslot;
int ret;
size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
@@ -2900,6 +2945,8 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
comp_pkt.completion_status);
break;
}
+
+ hbus->wslot_res_allocated = wslot;
}
kfree(pkt);
@@ -2918,10 +2965,10 @@ static int hv_send_resources_released(struct hv_device *hdev)
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct pci_child_message pkt;
struct hv_pci_dev *hpdev;
- u32 wslot;
+ int wslot;
int ret;
- for (wslot = 0; wslot < 256; wslot++) {
+ for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
hpdev = get_pcichild_wslot(hbus, wslot);
if (!hpdev)
continue;
@@ -2936,8 +2983,12 @@ static int hv_send_resources_released(struct hv_device *hdev)
VM_PKT_DATA_INBAND, 0);
if (ret)
return ret;
+
+ hbus->wslot_res_allocated = wslot - 1;
}
+ hbus->wslot_res_allocated = -1;
+
return 0;
}
@@ -3037,6 +3088,7 @@ static int hv_pci_probe(struct hv_device *hdev,
if (!hbus)
return -ENOMEM;
hbus->state = hv_pcibus_init;
+ hbus->wslot_res_allocated = -1;
/*
* The PCI bus "domain" is what is called "segment" in ACPI and other
@@ -3136,7 +3188,7 @@ static int hv_pci_probe(struct hv_device *hdev,
ret = hv_pci_allocate_bridge_windows(hbus);
if (ret)
- goto free_irq_domain;
+ goto exit_d0;
ret = hv_send_resources_allocated(hdev);
if (ret)
@@ -3154,6 +3206,8 @@ static int hv_pci_probe(struct hv_device *hdev,
free_windows:
hv_pci_free_bridge_windows(hbus);
+exit_d0:
+ (void) hv_pci_bus_exit(hdev, true);
free_irq_domain:
irq_domain_remove(hbus->irq_domain);
free_fwnode:
@@ -3173,7 +3227,7 @@ free_bus:
return ret;
}
-static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
+static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct {
@@ -3191,7 +3245,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
if (hdev->channel->rescind)
return 0;
- if (!hibernating) {
+ if (!keep_devs) {
/* Delete any children which might still exist. */
dr = kzalloc(sizeof(*dr), GFP_KERNEL);
if (dr && hv_pci_start_relations_work(hbus, dr))
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
new file mode 100644
index 000000000000..459009c8a4a0
--- /dev/null
+++ b/drivers/pci/controller/pci-loongson.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Loongson PCI Host Controller Driver
+ *
+ * Copyright (C) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ */
+
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include "../pci.h"
+
+/* Device IDs */
+#define DEV_PCIE_PORT_0 0x7a09
+#define DEV_PCIE_PORT_1 0x7a19
+#define DEV_PCIE_PORT_2 0x7a29
+
+#define DEV_LS2K_APB 0x7a02
+#define DEV_LS7A_CONF 0x7a10
+#define DEV_LS7A_LPC 0x7a0c
+
+#define FLAG_CFG0 BIT(0)
+#define FLAG_CFG1 BIT(1)
+#define FLAG_DEV_FIX BIT(2)
+
+struct loongson_pci {
+ void __iomem *cfg0_base;
+ void __iomem *cfg1_base;
+ struct platform_device *pdev;
+ u32 flags;
+};
+
+/* Fixup wrong class code in PCIe bridges */
+static void bridge_class_quirk(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON,
+ DEV_PCIE_PORT_0, bridge_class_quirk);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON,
+ DEV_PCIE_PORT_1, bridge_class_quirk);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON,
+ DEV_PCIE_PORT_2, bridge_class_quirk);
+
+static void system_bus_quirk(struct pci_dev *pdev)
+{
+ /*
+ * The address space consumed by these devices is outside the
+ * resources of the host bridge.
+ */
+ pdev->mmio_always_on = 1;
+ pdev->non_compliant_bars = 1;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS2K_APB, system_bus_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_CONF, system_bus_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_LPC, system_bus_quirk);
+
+static void loongson_mrrs_quirk(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ struct pci_dev *bridge;
+ static const struct pci_device_id bridge_devids[] = {
+ { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_0) },
+ { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_1) },
+ { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_2) },
+ { 0, },
+ };
+
+ /* look for the matching bridge */
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ bus = bus->parent;
+ /*
+ * Some Loongson PCIe ports have a h/w limitation of
+ * 256 bytes maximum read request size. They can't handle
+ * anything larger than this. So force this limit on
+ * any devices attached under these ports.
+ */
+ if (pci_match_id(bridge_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+ pci_info(dev, "limiting MRRS to 256\n");
+ pcie_set_readrq(dev, 256);
+ }
+ break;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
+
+static void __iomem *cfg1_map(struct loongson_pci *priv, int bus,
+ unsigned int devfn, int where)
+{
+ unsigned long addroff = 0x0;
+
+ if (bus != 0)
+ addroff |= BIT(28); /* Type 1 Access */
+ addroff |= (where & 0xff) | ((where & 0xf00) << 16);
+ addroff |= (bus << 16) | (devfn << 8);
+ return priv->cfg1_base + addroff;
+}
+
+static void __iomem *cfg0_map(struct loongson_pci *priv, int bus,
+ unsigned int devfn, int where)
+{
+ unsigned long addroff = 0x0;
+
+ if (bus != 0)
+ addroff |= BIT(24); /* Type 1 Access */
+ addroff |= (bus << 16) | (devfn << 8) | where;
+ return priv->cfg0_base + addroff;
+}
+
+static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ unsigned char busnum = bus->number;
+ struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
+ struct loongson_pci *priv = pci_host_bridge_priv(bridge);
+
+ /*
+ * Do not read more than one device on the bus other than
+ * the host bus. For our hardware the root bus is always bus 0.
+ */
+ if (priv->flags & FLAG_DEV_FIX && busnum != 0 &&
+ PCI_SLOT(devfn) > 0)
+ return NULL;
+
+ /* CFG0 can only access standard space */
+ if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base)
+ return cfg0_map(priv, busnum, devfn, where);
+
+ /* CFG1 can access extended space */
+ if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base)
+ return cfg1_map(priv, busnum, devfn, where);
+
+ return NULL;
+}
+
+static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int irq;
+ u8 val;
+
+ irq = of_irq_parse_and_map_pci(dev, slot, pin);
+ if (irq > 0)
+ return irq;
+
+ /* Care i8259 legacy systems */
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &val);
+ /* i8259 only have 15 IRQs */
+ if (val > 15)
+ return 0;
+
+ return val;
+}
+
+/* H/w only accept 32-bit PCI operations */
+static struct pci_ops loongson_pci_ops = {
+ .map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static const struct of_device_id loongson_pci_of_match[] = {
+ { .compatible = "loongson,ls2k-pci",
+ .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), },
+ { .compatible = "loongson,ls7a-pci",
+ .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), },
+ { .compatible = "loongson,rs780e-pci",
+ .data = (void *)(FLAG_CFG0), },
+ {}
+};
+
+static int loongson_pci_probe(struct platform_device *pdev)
+{
+ struct loongson_pci *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct pci_host_bridge *bridge;
+ struct resource *regs;
+ int err;
+
+ if (!node)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*priv));
+ if (!bridge)
+ return -ENODEV;
+
+ priv = pci_host_bridge_priv(bridge);
+ priv->pdev = pdev;
+ priv->flags = (unsigned long)of_device_get_match_data(dev);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(dev, "missing mem resources for cfg0\n");
+ return -EINVAL;
+ }
+
+ priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(priv->cfg0_base))
+ return PTR_ERR(priv->cfg0_base);
+
+ /* CFG1 is optional */
+ if (priv->flags & FLAG_CFG1) {
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!regs)
+ dev_info(dev, "missing mem resource for cfg1\n");
+ else {
+ priv->cfg1_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(priv->cfg1_base))
+ priv->cfg1_base = NULL;
+ }
+ }
+
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
+ if (err) {
+ dev_err(dev, "failed to get bridge resources\n");
+ return err;
+ }
+
+ bridge->dev.parent = dev;
+ bridge->sysdata = priv;
+ bridge->ops = &loongson_pci_ops;
+ bridge->map_irq = loongson_map_irq;
+
+ err = pci_host_probe(bridge);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static struct platform_driver loongson_pci_driver = {
+ .driver = {
+ .name = "loongson-pci",
+ .of_match_table = loongson_pci_of_match,
+ },
+ .probe = loongson_pci_probe,
+};
+builtin_platform_driver(loongson_pci_driver);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 3e64ba6a36a8..235b456698fc 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2219,8 +2219,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
rp->reset_gpio = NULL;
} else {
- dev_err(dev, "failed to get reset GPIO: %d\n",
- err);
+ dev_err(dev, "failed to get reset GPIO: %ld\n",
+ PTR_ERR(rp->reset_gpio));
return PTR_ERR(rp->reset_gpio);
}
}
@@ -2712,7 +2712,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = pm_runtime_get_sync(pcie->dev);
if (err < 0) {
dev_err(dev, "fail to enable pcie controller: %d\n", err);
- goto teardown_msi;
+ goto pm_runtime_put;
}
host->busnr = bus->start;
@@ -2746,7 +2746,6 @@ static int tegra_pcie_probe(struct platform_device *pdev)
pm_runtime_put:
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
-teardown_msi:
tegra_pcie_msi_teardown(pcie);
put_resources:
tegra_pcie_put_resources(pcie);
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index 32d1d7b81ef4..7e8835fee5f7 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -345,7 +345,7 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-struct pci_ecam_ops pci_thunder_ecam_ops = {
+const struct pci_ecam_ops pci_thunder_ecam_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -357,22 +357,20 @@ struct pci_ecam_ops pci_thunder_ecam_ops = {
#ifdef CONFIG_PCI_HOST_THUNDER_ECAM
static const struct of_device_id thunder_ecam_of_match[] = {
- { .compatible = "cavium,pci-host-thunder-ecam" },
+ {
+ .compatible = "cavium,pci-host-thunder-ecam",
+ .data = &pci_thunder_ecam_ops,
+ },
{ },
};
-static int thunder_ecam_probe(struct platform_device *pdev)
-{
- return pci_host_common_probe(pdev, &pci_thunder_ecam_ops);
-}
-
static struct platform_driver thunder_ecam_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = thunder_ecam_of_match,
.suppress_bind_attrs = true,
},
- .probe = thunder_ecam_probe,
+ .probe = pci_host_common_probe,
};
builtin_platform_driver(thunder_ecam_driver);
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 9491e266b1ea..3f847969143e 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -403,7 +403,7 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
return thunder_pem_init(dev, cfg, res_pem);
}
-struct pci_ecam_ops thunder_pem_ecam_ops = {
+const struct pci_ecam_ops thunder_pem_ecam_ops = {
.bus_shift = 24,
.init = thunder_pem_acpi_init,
.pci_ops = {
@@ -440,7 +440,7 @@ static int thunder_pem_platform_init(struct pci_config_window *cfg)
return thunder_pem_init(dev, cfg, res_pem);
}
-static struct pci_ecam_ops pci_thunder_pem_ops = {
+static const struct pci_ecam_ops pci_thunder_pem_ops = {
.bus_shift = 24,
.init = thunder_pem_platform_init,
.pci_ops = {
@@ -451,22 +451,20 @@ static struct pci_ecam_ops pci_thunder_pem_ops = {
};
static const struct of_device_id thunder_pem_of_match[] = {
- { .compatible = "cavium,pci-host-thunder-pem" },
+ {
+ .compatible = "cavium,pci-host-thunder-pem",
+ .data = &pci_thunder_pem_ops,
+ },
{ },
};
-static int thunder_pem_probe(struct platform_device *pdev)
-{
- return pci_host_common_probe(pdev, &pci_thunder_pem_ops);
-}
-
static struct platform_driver thunder_pem_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = thunder_pem_of_match,
.suppress_bind_attrs = true,
},
- .probe = thunder_pem_probe,
+ .probe = pci_host_common_probe,
};
builtin_platform_driver(thunder_pem_driver);
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index bd05221f5a22..3681e5af3878 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -720,7 +720,7 @@ static int v3_pci_probe(struct platform_device *pdev)
int irq;
int ret;
- host = pci_alloc_host_bridge(sizeof(*v3));
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*v3));
if (!host)
return -ENOMEM;
@@ -777,9 +777,9 @@ static int v3_pci_probe(struct platform_device *pdev)
/* Get and request error IRQ resource */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ if (irq < 0) {
dev_err(dev, "unable to obtain PCIv3 error IRQ\n");
- return -ENODEV;
+ return irq;
}
ret = devm_request_irq(dev, irq, v3_irq, 0,
"PCIv3 error", v3);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index de195fd430dc..d1efa8ffbae1 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -256,7 +256,7 @@ static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
}
-struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
+const struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
.bus_shift = 16,
.init = xgene_v1_pcie_ecam_init,
.pci_ops = {
@@ -271,7 +271,7 @@ static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
}
-struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
+const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
.bus_shift = 16,
.init = xgene_v2_pcie_ecam_init,
.pci_ops = {
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index b447c3e4abad..24cb1c331058 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -193,7 +193,7 @@ static bool altera_pcie_valid_device(struct altera_pcie *pcie,
if (bus->number == pcie->root_bus_nr && dev > 0)
return false;
- return true;
+ return true;
}
static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 6d79d14527a6..7730ea845ff2 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -28,6 +28,8 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
#include "../pci.h"
/* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
@@ -41,6 +43,9 @@
#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
+
#define PCIE_RC_DL_MDIO_ADDR 0x1100
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
@@ -54,11 +59,11 @@
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c
#define PCIE_MEM_WIN0_LO(win) \
- PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 4)
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8)
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010
#define PCIE_MEM_WIN0_HI(win) \
- PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 4)
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
@@ -693,10 +698,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
int num_out_wins = 0;
u16 nlw, cls, lnksta;
int i, ret;
- u32 tmp;
+ u32 tmp, aspm_support;
/* Reset the bridge */
brcm_pcie_bridge_sw_init_set(pcie, 1);
+ brcm_pcie_perst_set(pcie, 1);
usleep_range(100, 200);
@@ -803,6 +809,15 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
/*
* For config space accesses on the RC, show the right class for
* a PCIe-PCIe bridge (the default setting is to be EP mode).
@@ -899,7 +914,6 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
brcm_msi_remove(pcie);
brcm_pcie_turn_off(pcie);
clk_disable_unprepare(pcie->clk);
- clk_put(pcie->clk);
}
static int brcm_pcie_remove(struct platform_device *pdev)
@@ -917,11 +931,26 @@ static int brcm_pcie_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *msi_np;
struct pci_host_bridge *bridge;
+ struct device_node *fw_np;
struct brcm_pcie *pcie;
struct pci_bus *child;
struct resource *res;
int ret;
+ /*
+ * We have to wait for Raspberry Pi's firmware interface to be up as a
+ * PCI fixup, rpi_firmware_init_vl805(), depends on it. This driver's
+ * probe can race with the firmware interface's (see
+ * drivers/firmware/raspberrypi.c) and potentially break the PCI fixup.
+ */
+ fw_np = of_find_compatible_node(NULL, NULL,
+ "raspberrypi,bcm2835-firmware");
+ if (fw_np && !rpi_firmware_get(fw_np)) {
+ of_node_put(fw_np);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(fw_np);
+
bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
if (!bridge)
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index cb982891b22b..ebfa7d5a4e2d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -651,6 +651,9 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
}
port->irq = platform_get_irq(pdev, port->slot);
+ if (port->irq < 0)
+ return port->irq;
+
irq_set_chained_handler_and_data(port->irq,
mtk_pcie_intr_handler, port);
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
new file mode 100644
index 000000000000..b4a288e24aaf
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe endpoint driver for Renesas R-Car SoCs
+ * Copyright (c) 2020 Renesas Electronics Europe GmbH
+ *
+ * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-epc.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#include "pcie-rcar.h"
+
+#define RCAR_EPC_MAX_FUNCTIONS 1
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie_endpoint {
+ struct rcar_pcie pcie;
+ phys_addr_t *ob_mapped_addr;
+ struct pci_epc_mem_window *ob_window;
+ u8 max_functions;
+ unsigned int bar_to_atu[MAX_NR_INBOUND_MAPS];
+ unsigned long *ib_window_map;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
+};
+
+static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
+{
+ u32 val;
+
+ rcar_pci_write_reg(pcie, 0, PCIETCTLR);
+
+ /* Set endpoint mode */
+ rcar_pci_write_reg(pcie, 0, PCIEMSR);
+
+ /* Initialize default capabilities. */
+ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ PCI_HEADER_TYPE_NORMAL);
+
+ /* Write out the physical slot number = 0 */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+ val = rcar_pci_read_reg(pcie, EXPCAP(1));
+ /* device supports fixed 128 bytes MPSS */
+ val &= ~GENMASK(2, 0);
+ rcar_pci_write_reg(pcie, val, EXPCAP(1));
+
+ val = rcar_pci_read_reg(pcie, EXPCAP(2));
+ /* read requests size 128 bytes */
+ val &= ~GENMASK(14, 12);
+ /* payload size 128 bytes */
+ val &= ~GENMASK(7, 5);
+ rcar_pci_write_reg(pcie, val, EXPCAP(2));
+
+ /* Set target link speed to 5.0 GT/s */
+ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
+ PCI_EXP_LNKSTA_CLS_5_0GB);
+
+ /* Set the completion timer timeout to the maximum 50ms. */
+ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
+
+ /* Terminate list of capabilities (Next Capability Offset=0) */
+ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
+
+ /* flush modifications */
+ wmb();
+}
+
+static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep,
+ phys_addr_t addr)
+{
+ int i;
+
+ for (i = 0; i < ep->num_ob_windows; i++)
+ if (ep->ob_window[i].phys_base == addr)
+ return i;
+
+ return -EINVAL;
+}
+
+static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
+ struct platform_device *pdev)
+{
+ struct rcar_pcie *pcie = &ep->pcie;
+ char outbound_name[10];
+ struct resource *res;
+ unsigned int i = 0;
+
+ ep->num_ob_windows = 0;
+ for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
+ sprintf(outbound_name, "memory%u", i);
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ outbound_name);
+ if (!res) {
+ dev_err(pcie->dev, "missing outbound window %u\n", i);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res),
+ outbound_name)) {
+ dev_err(pcie->dev, "Cannot request memory region %s.\n",
+ outbound_name);
+ return -EIO;
+ }
+
+ ep->ob_window[i].phys_base = res->start;
+ ep->ob_window[i].size = resource_size(res);
+ /* controller doesn't support multiple allocation
+ * from same window, so set page_size to window size
+ */
+ ep->ob_window[i].page_size = resource_size(res);
+ }
+ ep->num_ob_windows = i;
+
+ return 0;
+}
+
+static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
+ struct platform_device *pdev)
+{
+ struct rcar_pcie *pcie = &ep->pcie;
+ struct pci_epc_mem_window *window;
+ struct device *dev = pcie->dev;
+ struct resource res;
+ int err;
+
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+ pcie->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES,
+ sizeof(*window), GFP_KERNEL);
+ if (!ep->ob_window)
+ return -ENOMEM;
+
+ rcar_pcie_parse_outbound_ranges(ep, pdev);
+
+ err = of_property_read_u8(dev->of_node, "max-functions",
+ &ep->max_functions);
+ if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS)
+ ep->max_functions = RCAR_EPC_MAX_FUNCTIONS;
+
+ return 0;
+}
+
+static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+ struct pci_epf_header *hdr)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 val;
+
+ if (!fn)
+ val = hdr->vendorid;
+ else
+ val = rcar_pci_read_reg(pcie, IDSETR0);
+ val |= hdr->deviceid << 16;
+ rcar_pci_write_reg(pcie, val, IDSETR0);
+
+ val = hdr->revid;
+ val |= hdr->progif_code << 8;
+ val |= hdr->subclass_code << 16;
+ val |= hdr->baseclass_code << 24;
+ rcar_pci_write_reg(pcie, val, IDSETR1);
+
+ if (!fn)
+ val = hdr->subsys_vendor_id;
+ else
+ val = rcar_pci_read_reg(pcie, SUBIDSETR);
+ val |= hdr->subsys_id << 16;
+ rcar_pci_write_reg(pcie, val, SUBIDSETR);
+
+ if (hdr->interrupt_pin > PCI_INTERRUPT_INTA)
+ return -EINVAL;
+ val = rcar_pci_read_reg(pcie, PCICONF(15));
+ val |= (hdr->interrupt_pin << 8);
+ rcar_pci_write_reg(pcie, val, PCICONF(15));
+
+ return 0;
+}
+
+static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ u64 size = 1ULL << fls64(epf_bar->size - 1);
+ dma_addr_t cpu_addr = epf_bar->phys_addr;
+ enum pci_barno bar = epf_bar->barno;
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 mask;
+ int idx;
+ int err;
+
+ idx = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
+ if (idx >= ep->num_ib_windows) {
+ dev_err(pcie->dev, "no free inbound window\n");
+ return -EINVAL;
+ }
+
+ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
+ flags |= IO_SPACE;
+
+ ep->bar_to_atu[bar] = idx;
+ /* use 64-bit BARs */
+ set_bit(idx, ep->ib_window_map);
+ set_bit(idx + 1, ep->ib_window_map);
+
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
+
+ size = min(size, alignment);
+ }
+
+ size = min(size, 1ULL << 32);
+
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
+
+ rcar_pcie_set_inbound(pcie, cpu_addr,
+ 0x0, mask | flags, idx, false);
+
+ err = rcar_pcie_wait_for_phyrdy(pcie);
+ if (err) {
+ dev_err(pcie->dev, "phy not ready\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ enum pci_barno bar = epf_bar->barno;
+ u32 atu_index = ep->bar_to_atu[bar];
+
+ rcar_pcie_set_inbound(&ep->pcie, 0x0, 0x0, 0x0, bar, false);
+
+ clear_bit(atu_index, ep->ib_window_map);
+ clear_bit(atu_index + 1, ep->ib_window_map);
+}
+
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 flags;
+
+ flags = rcar_pci_read_reg(pcie, MSICAP(fn));
+ flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
+ rcar_pci_write_reg(pcie, flags, MSICAP(fn));
+
+ return 0;
+}
+
+static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 flags;
+
+ flags = rcar_pci_read_reg(pcie, MSICAP(fn));
+ if (!(flags & MSICAP0_MSIE))
+ return -EINVAL;
+
+ return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+}
+
+static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ struct resource_entry win;
+ struct resource res;
+ int window;
+ int err;
+
+ /* check if we have a link. */
+ err = rcar_pcie_wait_for_dl(pcie);
+ if (err) {
+ dev_err(pcie->dev, "link not up\n");
+ return err;
+ }
+
+ window = rcar_pcie_ep_get_window(ep, addr);
+ if (window < 0) {
+ dev_err(pcie->dev, "failed to get corresponding window\n");
+ return -EINVAL;
+ }
+
+ memset(&win, 0x0, sizeof(win));
+ memset(&res, 0x0, sizeof(res));
+ res.start = pci_addr;
+ res.end = pci_addr + size - 1;
+ res.flags = IORESOURCE_MEM;
+ win.res = &res;
+
+ rcar_pcie_set_outbound(pcie, window, &win);
+
+ ep->ob_mapped_addr[window] = addr;
+
+ return 0;
+}
+
+static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct resource_entry win;
+ struct resource res;
+ int idx;
+
+ for (idx = 0; idx < ep->num_ob_windows; idx++)
+ if (ep->ob_mapped_addr[idx] == addr)
+ break;
+
+ if (idx >= ep->num_ob_windows)
+ return;
+
+ memset(&win, 0x0, sizeof(win));
+ memset(&res, 0x0, sizeof(res));
+ win.res = &res;
+ rcar_pcie_set_outbound(&ep->pcie, idx, &win);
+
+ ep->ob_mapped_addr[idx] = 0;
+}
+
+static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep,
+ u8 fn, u8 intx)
+{
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 val;
+
+ val = rcar_pci_read_reg(pcie, PCIEMSITXR);
+ if ((val & PCI_MSI_FLAGS_ENABLE)) {
+ dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n");
+ return -EINVAL;
+ }
+
+ val = rcar_pci_read_reg(pcie, PCICONF(1));
+ if ((val & INTDIS)) {
+ dev_err(pcie->dev, "INTx message transmission is disabled\n");
+ return -EINVAL;
+ }
+
+ val = rcar_pci_read_reg(pcie, PCIEINTXR);
+ if ((val & ASTINTX)) {
+ dev_err(pcie->dev, "INTx is already asserted\n");
+ return -EINVAL;
+ }
+
+ val |= ASTINTX;
+ rcar_pci_write_reg(pcie, val, PCIEINTXR);
+ usleep_range(1000, 1001);
+ val = rcar_pci_read_reg(pcie, PCIEINTXR);
+ val &= ~ASTINTX;
+ rcar_pci_write_reg(pcie, val, PCIEINTXR);
+
+ return 0;
+}
+
+static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
+ u8 fn, u8 interrupt_num)
+{
+ u16 msi_count;
+ u32 val;
+
+ /* Check MSI enable bit */
+ val = rcar_pci_read_reg(pcie, MSICAP(fn));
+ if (!(val & MSICAP0_MSIE))
+ return -EINVAL;
+
+ /* Get MSI numbers from MME */
+ msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+ msi_count = 1 << msi_count;
+
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ val = rcar_pci_read_reg(pcie, PCIEMSITXR);
+ rcar_pci_write_reg(pcie, val | (interrupt_num - 1), PCIEMSITXR);
+
+ return 0;
+}
+
+static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return rcar_pcie_ep_assert_intx(ep, fn, 0);
+
+ case PCI_EPC_IRQ_MSI:
+ return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rcar_pcie_ep_start(struct pci_epc *epc)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+
+ rcar_pci_write_reg(&ep->pcie, MACCTLR_INIT_VAL, MACCTLR);
+ rcar_pci_write_reg(&ep->pcie, CFINIT, PCIETCTLR);
+
+ return 0;
+}
+
+static void rcar_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+
+ rcar_pci_write_reg(&ep->pcie, 0, PCIETCTLR);
+}
+
+static const struct pci_epc_features rcar_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ /* use 64-bit BARs so mark BAR[1,3,5] as reserved */
+ .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
+ .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
+ .bar_fixed_size[0] = 128,
+ .bar_fixed_size[2] = 256,
+ .bar_fixed_size[4] = 256,
+};
+
+static const struct pci_epc_features*
+rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+ return &rcar_pcie_epc_features;
+}
+
+static const struct pci_epc_ops rcar_pcie_epc_ops = {
+ .write_header = rcar_pcie_ep_write_header,
+ .set_bar = rcar_pcie_ep_set_bar,
+ .clear_bar = rcar_pcie_ep_clear_bar,
+ .set_msi = rcar_pcie_ep_set_msi,
+ .get_msi = rcar_pcie_ep_get_msi,
+ .map_addr = rcar_pcie_ep_map_addr,
+ .unmap_addr = rcar_pcie_ep_unmap_addr,
+ .raise_irq = rcar_pcie_ep_raise_irq,
+ .start = rcar_pcie_ep_start,
+ .stop = rcar_pcie_ep_stop,
+ .get_features = rcar_pcie_ep_get_features,
+};
+
+static const struct of_device_id rcar_pcie_ep_of_match[] = {
+ { .compatible = "renesas,r8a774c0-pcie-ep", },
+ { .compatible = "renesas,rcar-gen3-pcie-ep" },
+ { },
+};
+
+static int rcar_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_pcie_endpoint *ep;
+ struct rcar_pcie *pcie;
+ struct pci_epc *epc;
+ int err;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ pcie = &ep->pcie;
+ pcie->dev = dev;
+
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_pm_disable;
+ }
+
+ err = rcar_pcie_ep_get_pdata(ep, pdev);
+ if (err < 0) {
+ dev_err(dev, "failed to request resources: %d\n", err);
+ goto err_pm_put;
+ }
+
+ ep->num_ib_windows = MAX_NR_INBOUND_MAPS;
+ ep->ib_window_map =
+ devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows),
+ sizeof(long), GFP_KERNEL);
+ if (!ep->ib_window_map) {
+ err = -ENOMEM;
+ dev_err(dev, "failed to allocate memory for inbound map\n");
+ goto err_pm_put;
+ }
+
+ ep->ob_mapped_addr = devm_kcalloc(dev, ep->num_ob_windows,
+ sizeof(*ep->ob_mapped_addr),
+ GFP_KERNEL);
+ if (!ep->ob_mapped_addr) {
+ err = -ENOMEM;
+ dev_err(dev, "failed to allocate memory for outbound memory pointers\n");
+ goto err_pm_put;
+ }
+
+ epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ err = PTR_ERR(epc);
+ goto err_pm_put;
+ }
+
+ epc->max_functions = ep->max_functions;
+ epc_set_drvdata(epc, ep);
+
+ rcar_pcie_ep_hw_init(pcie);
+
+ err = pci_epc_multi_mem_init(epc, ep->ob_window, ep->num_ob_windows);
+ if (err < 0) {
+ dev_err(dev, "failed to initialize the epc memory space\n");
+ goto err_pm_put;
+ }
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_put(dev);
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+
+ return err;
+}
+
+static struct platform_driver rcar_pcie_ep_driver = {
+ .driver = {
+ .name = "rcar-pcie-ep",
+ .of_match_table = rcar_pcie_ep_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_ep_probe,
+};
+builtin_platform_driver(rcar_pcie_ep_driver);
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
new file mode 100644
index 000000000000..d210a36561be
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -0,0 +1,1130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
+ *
+ * Based on:
+ * arch/sh/drivers/pci/pcie-sh7786.c
+ * arch/sh/drivers/pci/ops-sh7786.c
+ * Copyright (C) 2009 - 2011 Paul Mundt
+ *
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "pcie-rcar.h"
+
+struct rcar_msi {
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ struct msi_controller chip;
+ unsigned long pages;
+ struct mutex lock;
+ int irq1;
+ int irq2;
+};
+
+static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
+{
+ return container_of(chip, struct rcar_msi, chip);
+}
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie_host {
+ struct rcar_pcie pcie;
+ struct device *dev;
+ struct phy *phy;
+ void __iomem *base;
+ struct list_head resources;
+ int root_bus_nr;
+ struct clk *bus_clk;
+ struct rcar_msi msi;
+ int (*phy_init_fn)(struct rcar_pcie_host *host);
+};
+
+static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
+{
+ unsigned int shift = BITS_PER_BYTE * (where & 3);
+ u32 val = rcar_pci_read_reg(pcie, where & ~3);
+
+ return val >> shift;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_config_access(struct rcar_pcie_host *host,
+ unsigned char access_type, struct pci_bus *bus,
+ unsigned int devfn, int where, u32 *data)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ unsigned int dev, func, reg, index;
+
+ dev = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+ reg = where & ~3;
+ index = reg / 4;
+
+ /*
+ * While each channel has its own memory-mapped extended config
+ * space, it's generally only accessible when in endpoint mode.
+ * When in root complex mode, the controller is unable to target
+ * itself with either type 0 or type 1 accesses, and indeed, any
+ * controller initiated target transfer to its own config space
+ * result in a completer abort.
+ *
+ * Each channel effectively only supports a single device, but as
+ * the same channel <-> device access works for any PCI_SLOT()
+ * value, we cheat a bit here and bind the controller's config
+ * space to devfn 0 in order to enable self-enumeration. In this
+ * case the regular ECAR/ECDR path is sidelined and the mangled
+ * config access itself is initiated as an internal bus transaction.
+ */
+ if (pci_is_root_bus(bus)) {
+ if (dev != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == RCAR_PCI_ACCESS_READ) {
+ *data = rcar_pci_read_reg(pcie, PCICONF(index));
+ } else {
+ /* Keep an eye out for changes to the root bus number */
+ if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
+ host->root_bus_nr = *data & 0xff;
+
+ rcar_pci_write_reg(pcie, *data, PCICONF(index));
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (host->root_bus_nr < 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Clear errors */
+ rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
+
+ /* Set the PIO address */
+ rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
+ PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
+
+ /* Enable the configuration access */
+ if (bus->parent->number == host->root_bus_nr)
+ rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+ else
+ rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+
+ /* Check for errors */
+ if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Check for master and target aborts */
+ if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
+ (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == RCAR_PCI_ACCESS_READ)
+ *data = rcar_pci_read_reg(pcie, PCIECDR);
+ else
+ rcar_pci_write_reg(pcie, *data, PCIECDR);
+
+ /* Disable the configuration access */
+ rcar_pci_write_reg(pcie, 0, PCIECCTLR);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct rcar_pcie_host *host = bus->sysdata;
+ int ret;
+
+ ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
+ bus, devfn, where, val);
+ if (ret != PCIBIOS_SUCCESSFUL) {
+ *val = 0xffffffff;
+ return ret;
+ }
+
+ if (size == 1)
+ *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
+
+ dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, *val);
+
+ return ret;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rcar_pcie_host *host = bus->sysdata;
+ unsigned int shift;
+ u32 data;
+ int ret;
+
+ ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
+ bus, devfn, where, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, val);
+
+ if (size == 1) {
+ shift = BITS_PER_BYTE * (where & 3);
+ data &= ~(0xff << shift);
+ data |= ((val & 0xff) << shift);
+ } else if (size == 2) {
+ shift = BITS_PER_BYTE * (where & 2);
+ data &= ~(0xffff << shift);
+ data |= ((val & 0xffff) << shift);
+ } else
+ data = val;
+
+ ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
+ bus, devfn, where, &data);
+
+ return ret;
+}
+
+static struct pci_ops rcar_pcie_ops = {
+ .read = rcar_pcie_read_conf,
+ .write = rcar_pcie_write_conf,
+};
+
+static int rcar_pcie_setup(struct list_head *resource,
+ struct rcar_pcie_host *host)
+{
+ struct resource_entry *win;
+ int i = 0;
+
+ /* Setup PCI resources */
+ resource_list_for_each_entry(win, &host->resources) {
+ struct resource *res = win->res;
+
+ if (!res->flags)
+ continue;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ case IORESOURCE_MEM:
+ rcar_pcie_set_outbound(&host->pcie, i, win);
+ i++;
+ break;
+ case IORESOURCE_BUS:
+ host->root_bus_nr = res->start;
+ break;
+ default:
+ continue;
+ }
+
+ pci_add_resource(resource, res);
+ }
+
+ return 1;
+}
+
+static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int timeout = 1000;
+ u32 macsr;
+
+ if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
+ return;
+
+ if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
+ dev_err(dev, "Speed change already in progress\n");
+ return;
+ }
+
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
+ goto done;
+
+ /* Set target link speed to 5.0 GT/s */
+ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
+ PCI_EXP_LNKSTA_CLS_5_0GB);
+
+ /* Set speed change reason as intentional factor */
+ rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
+
+ /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
+ if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ /* Start link speed change */
+ rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
+
+ while (timeout--) {
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if (macsr & SPCHGFIN) {
+ /* Clear the interrupt bits */
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ if (macsr & SPCHGFAIL)
+ dev_err(dev, "Speed change failed\n");
+
+ goto done;
+ }
+
+ msleep(1);
+ }
+
+ dev_err(dev, "Speed change timed out\n");
+
+done:
+ dev_info(dev, "Current link speed is %s GT/s\n",
+ (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
+}
+
+static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct resource_entry *win;
+ LIST_HEAD(res);
+ int i = 0;
+
+ /* Try setting 5 GT/s link speed */
+ rcar_pcie_force_speedup(pcie);
+
+ /* Setup PCI resources */
+ resource_list_for_each_entry(win, &host->resources) {
+ struct resource *res = win->res;
+
+ if (!res->flags)
+ continue;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ case IORESOURCE_MEM:
+ rcar_pcie_set_outbound(pcie, i, win);
+ i++;
+ break;
+ }
+ }
+}
+
+static int rcar_pcie_enable(struct rcar_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct rcar_pcie *pcie = &host->pcie;
+ struct device *dev = pcie->dev;
+ struct pci_bus *bus, *child;
+ int ret;
+
+ /* Try setting 5 GT/s link speed */
+ rcar_pcie_force_speedup(pcie);
+
+ rcar_pcie_setup(&bridge->windows, host);
+
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
+
+ bridge->dev.parent = dev;
+ bridge->sysdata = host;
+ bridge->busnr = host->root_bus_nr;
+ bridge->ops = &rcar_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ bridge->msi = &host->msi.chip;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0)
+ return ret;
+
+ bus = bridge->bus;
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+
+ return 0;
+}
+
+static int phy_wait_for_ack(struct rcar_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int timeout = 100;
+
+ while (timeout--) {
+ if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
+ return 0;
+
+ udelay(100);
+ }
+
+ dev_err(dev, "Access to PCIe phy timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static void phy_write_reg(struct rcar_pcie *pcie,
+ unsigned int rate, u32 addr,
+ unsigned int lane, u32 data)
+{
+ u32 phyaddr;
+
+ phyaddr = WRITE_CMD |
+ ((rate & 1) << RATE_POS) |
+ ((lane & 0xf) << LANE_POS) |
+ ((addr & 0xff) << ADR_POS);
+
+ /* Set write data */
+ rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
+ rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+
+ /* Clear command */
+ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
+ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+}
+
+static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
+{
+ int err;
+
+ /* Begin initialization */
+ rcar_pci_write_reg(pcie, 0, PCIETCTLR);
+
+ /* Set mode */
+ rcar_pci_write_reg(pcie, 1, PCIEMSR);
+
+ err = rcar_pcie_wait_for_phyrdy(pcie);
+ if (err)
+ return err;
+
+ /*
+ * Initial header for port config space is type 1, set the device
+ * class to match. Hardware takes care of propagating the IDSETR
+ * settings, so there is no need to bother with a quirk.
+ */
+ rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+
+ /*
+ * Setup Secondary Bus Number & Subordinate Bus Number, even though
+ * they aren't used, to avoid bridge being detected as broken.
+ */
+ rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
+ rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
+
+ /* Initialize default capabilities. */
+ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ PCI_HEADER_TYPE_BRIDGE);
+
+ /* Enable data link layer active state reporting */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
+ PCI_EXP_LNKCAP_DLLLARC);
+
+ /* Write out the physical slot number = 0 */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+ /* Set the completion timer timeout to the maximum 50ms. */
+ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
+
+ /* Terminate list of capabilities (Next Capability Offset=0) */
+ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
+
+ /* Enable MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
+
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+
+ /* Finish initialization - establish a PCI Express link */
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+
+ /* This will timeout if we don't have a link. */
+ err = rcar_pcie_wait_for_dl(pcie);
+ if (err)
+ return err;
+
+ /* Enable INTx interrupts */
+ rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
+
+ wmb();
+
+ return 0;
+}
+
+static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+
+ /* Initialize the phy */
+ phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
+ phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
+ phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
+ phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
+ phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
+ phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
+
+ phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
+ phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
+ phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
+
+ return 0;
+}
+
+static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+
+ /*
+ * These settings come from the R-Car Series, 2nd Generation User's
+ * Manual, section 50.3.1 (2) Initialization of the physical layer.
+ */
+ rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
+ rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
+ rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
+ rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
+
+ rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
+ /* The following value is for DC connection, no termination resistor */
+ rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
+ rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
+ rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
+
+ return 0;
+}
+
+static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
+{
+ int err;
+
+ err = phy_init(host->phy);
+ if (err)
+ return err;
+
+ err = phy_power_on(host->phy);
+ if (err)
+ phy_exit(host->phy);
+
+ return err;
+}
+
+static int rcar_msi_alloc(struct rcar_msi *chip)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+
+ msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+ if (msi < INT_PCI_MSI_NR)
+ set_bit(msi, chip->used);
+ else
+ msi = -ENOSPC;
+
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+ msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
+ order_base_2(no_irqs));
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
+{
+ mutex_lock(&chip->lock);
+ clear_bit(irq, chip->used);
+ mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
+{
+ struct rcar_pcie_host *host = data;
+ struct rcar_pcie *pcie = &host->pcie;
+ struct rcar_msi *msi = &host->msi;
+ struct device *dev = pcie->dev;
+ unsigned long reg;
+
+ reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+
+ /* MSI & INTx share an interrupt - we only handle MSI here */
+ if (!reg)
+ return IRQ_NONE;
+
+ while (reg) {
+ unsigned int index = find_first_bit(&reg, 32);
+ unsigned int msi_irq;
+
+ /* clear the interrupt */
+ rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
+
+ msi_irq = irq_find_mapping(msi->domain, index);
+ if (msi_irq) {
+ if (test_bit(index, msi->used))
+ generic_handle_irq(msi_irq);
+ else
+ dev_info(dev, "unhandled MSI\n");
+ } else {
+ /* Unknown MSI, just clear it */
+ dev_dbg(dev, "unexpected MSI\n");
+ }
+
+ /* see if there's any more pending in this vector */
+ reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
+ msi.chip);
+ struct rcar_pcie *pcie = &host->pcie;
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+
+ hwirq = rcar_msi_alloc(msi);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_find_mapping(msi->domain, hwirq);
+ if (!irq) {
+ rcar_msi_free(msi, hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(irq, desc);
+
+ msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg.data = hwirq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static int rcar_msi_setup_irqs(struct msi_controller *chip,
+ struct pci_dev *pdev, int nvec, int type)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
+ msi.chip);
+ struct rcar_pcie *pcie = &host->pcie;
+ struct msi_desc *desc;
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+ int i;
+
+ /* MSI-X interrupts are not supported */
+ if (type == PCI_CAP_ID_MSIX)
+ return -EINVAL;
+
+ WARN_ON(!list_is_singular(&pdev->dev.msi_list));
+ desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+
+ hwirq = rcar_msi_alloc_region(msi, nvec);
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ irq = irq_find_mapping(msi->domain, hwirq);
+ if (!irq)
+ return -ENOSPC;
+
+ for (i = 0; i < nvec; i++) {
+ /*
+ * irq_create_mapping() called from rcar_pcie_probe() pre-
+ * allocates descs, so there is no need to allocate descs here.
+ * We can therefore assume that if irq_find_mapping() above
+ * returns non-zero, then the descs are also successfully
+ * allocated.
+ */
+ if (irq_set_msi_desc_off(irq, i, desc)) {
+ /* TODO: clear */
+ return -EINVAL;
+ }
+ }
+
+ desc->nvec_used = nvec;
+ desc->msi_attrib.multiple = order_base_2(nvec);
+
+ msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg.data = hwirq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ rcar_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip rcar_msi_irq_chip = {
+ .name = "R-Car PCIe MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = rcar_msi_map,
+};
+
+static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_msi *msi = &host->msi;
+ int i, irq;
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++) {
+ irq = irq_find_mapping(msi->domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(msi->domain);
+}
+
+static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct rcar_msi *msi = &host->msi;
+ unsigned long base;
+
+ /* setup MSI data target */
+ base = virt_to_phys((void *)msi->pages);
+
+ rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
+ rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
+
+ /* enable all MSI interrupts */
+ rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+}
+
+static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct device *dev = pcie->dev;
+ struct rcar_msi *msi = &host->msi;
+ int err, i;
+
+ mutex_init(&msi->lock);
+
+ msi->chip.dev = dev;
+ msi->chip.setup_irq = rcar_msi_setup_irq;
+ msi->chip.setup_irqs = rcar_msi_setup_irqs;
+ msi->chip.teardown_irq = rcar_msi_teardown_irq;
+
+ msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
+ &msi_domain_ops, &msi->chip);
+ if (!msi->domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++)
+ irq_create_mapping(msi->domain, i);
+
+ /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, host);
+ if (err < 0) {
+ dev_err(dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, host);
+ if (err < 0) {
+ dev_err(dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ /* setup MSI data target */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ rcar_pcie_hw_enable_msi(host);
+
+ return 0;
+
+err:
+ rcar_pcie_unmap_msi(host);
+ return err;
+}
+
+static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct rcar_msi *msi = &host->msi;
+
+ /* Disable all MSI interrupts */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+ /* Disable address decoding of the MSI interrupt, MSIFE */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
+
+ free_pages(msi->pages, 0);
+
+ rcar_pcie_unmap_msi(host);
+}
+
+static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct device *dev = pcie->dev;
+ struct resource res;
+ int err, i;
+
+ host->phy = devm_phy_optional_get(dev, "pcie");
+ if (IS_ERR(host->phy))
+ return PTR_ERR(host->phy);
+
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+
+ pcie->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ host->bus_clk = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(host->bus_clk)) {
+ dev_err(dev, "cannot get pcie bus clock\n");
+ return PTR_ERR(host->bus_clk);
+ }
+
+ i = irq_of_parse_and_map(dev->of_node, 0);
+ if (!i) {
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_irq1;
+ }
+ host->msi.irq1 = i;
+
+ i = irq_of_parse_and_map(dev->of_node, 1);
+ if (!i) {
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_irq2;
+ }
+ host->msi.irq2 = i;
+
+ return 0;
+
+err_irq2:
+ irq_dispose_mapping(host->msi.irq1);
+err_irq1:
+ return err;
+}
+
+static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
+ struct resource_entry *entry,
+ int *index)
+{
+ u64 restype = entry->res->flags;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_addr = entry->res->start - entry->offset;
+ u32 flags = LAM_64BIT | LAR_ENABLE;
+ u64 mask;
+ u64 size = resource_size(entry->res);
+ int idx = *index;
+
+ if (restype & IORESOURCE_PREFETCH)
+ flags |= LAM_PREFETCH;
+
+ while (cpu_addr < cpu_end) {
+ if (idx >= MAX_NR_INBOUND_MAPS - 1) {
+ dev_err(pcie->dev, "Failed to map inbound regions!\n");
+ return -EINVAL;
+ }
+ /*
+ * If the size of the range is larger than the alignment of
+ * the start address, we have to use multiple entries to
+ * perform the mapping.
+ */
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
+
+ size = min(size, alignment);
+ }
+ /* Hardware supports max 4GiB inbound region */
+ size = min(size, 1ULL << 32);
+
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
+
+ rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
+ lower_32_bits(mask) | flags, idx, true);
+
+ pci_addr += size;
+ cpu_addr += size;
+ idx += 2;
+ }
+ *index = idx;
+
+ return 0;
+}
+
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *entry;
+ int index = 0, err = 0;
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static const struct of_device_id rcar_pcie_of_match[] = {
+ { .compatible = "renesas,pcie-r8a7779",
+ .data = rcar_pcie_phy_init_h1 },
+ { .compatible = "renesas,pcie-r8a7790",
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7791",
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-rcar-gen2",
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7795",
+ .data = rcar_pcie_phy_init_gen3 },
+ { .compatible = "renesas,pcie-rcar-gen3",
+ .data = rcar_pcie_phy_init_gen3 },
+ {},
+};
+
+static int rcar_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_pcie_host *host;
+ struct rcar_pcie *pcie;
+ u32 data;
+ int err;
+ struct pci_host_bridge *bridge;
+
+ bridge = pci_alloc_host_bridge(sizeof(*host));
+ if (!bridge)
+ return -ENOMEM;
+
+ host = pci_host_bridge_priv(bridge);
+ pcie = &host->pcie;
+ pcie->dev = dev;
+ platform_set_drvdata(pdev, host);
+
+ err = pci_parse_request_of_pci_ranges(dev, &host->resources,
+ &bridge->dma_ranges, NULL);
+ if (err)
+ goto err_free_bridge;
+
+ pm_runtime_enable(pcie->dev);
+ err = pm_runtime_get_sync(pcie->dev);
+ if (err < 0) {
+ dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+ goto err_pm_disable;
+ }
+
+ err = rcar_pcie_get_resources(host);
+ if (err < 0) {
+ dev_err(dev, "failed to request resources: %d\n", err);
+ goto err_pm_put;
+ }
+
+ err = clk_prepare_enable(host->bus_clk);
+ if (err) {
+ dev_err(dev, "failed to enable bus clock: %d\n", err);
+ goto err_unmap_msi_irqs;
+ }
+
+ err = rcar_pcie_parse_map_dma_ranges(host);
+ if (err)
+ goto err_clk_disable;
+
+ host->phy_init_fn = of_device_get_match_data(dev);
+ err = host->phy_init_fn(host);
+ if (err) {
+ dev_err(dev, "failed to init PCIe PHY\n");
+ goto err_clk_disable;
+ }
+
+ /* Failure to get a link might just be that no cards are inserted */
+ if (rcar_pcie_hw_init(pcie)) {
+ dev_info(dev, "PCIe link down\n");
+ err = -ENODEV;
+ goto err_phy_shutdown;
+ }
+
+ data = rcar_pci_read_reg(pcie, MACSR);
+ dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = rcar_pcie_enable_msi(host);
+ if (err < 0) {
+ dev_err(dev,
+ "failed to enable MSI support: %d\n",
+ err);
+ goto err_phy_shutdown;
+ }
+ }
+
+ err = rcar_pcie_enable(host);
+ if (err)
+ goto err_msi_teardown;
+
+ return 0;
+
+err_msi_teardown:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pcie_teardown_msi(host);
+
+err_phy_shutdown:
+ if (host->phy) {
+ phy_power_off(host->phy);
+ phy_exit(host->phy);
+ }
+
+err_clk_disable:
+ clk_disable_unprepare(host->bus_clk);
+
+err_unmap_msi_irqs:
+ irq_dispose_mapping(host->msi.irq2);
+ irq_dispose_mapping(host->msi.irq1);
+
+err_pm_put:
+ pm_runtime_put(dev);
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+ pci_free_resource_list(&host->resources);
+
+err_free_bridge:
+ pci_free_host_bridge(bridge);
+
+ return err;
+}
+
+static int __maybe_unused rcar_pcie_resume(struct device *dev)
+{
+ struct rcar_pcie_host *host = dev_get_drvdata(dev);
+ struct rcar_pcie *pcie = &host->pcie;
+ unsigned int data;
+ int err;
+
+ err = rcar_pcie_parse_map_dma_ranges(host);
+ if (err)
+ return 0;
+
+ /* Failure to get a link might just be that no cards are inserted */
+ err = host->phy_init_fn(host);
+ if (err) {
+ dev_info(dev, "PCIe link down\n");
+ return 0;
+ }
+
+ data = rcar_pci_read_reg(pcie, MACSR);
+ dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+ /* Enable MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pcie_hw_enable_msi(host);
+
+ rcar_pcie_hw_enable(host);
+
+ return 0;
+}
+
+static int rcar_pcie_resume_noirq(struct device *dev)
+{
+ struct rcar_pcie_host *host = dev_get_drvdata(dev);
+ struct rcar_pcie *pcie = &host->pcie;
+
+ if (rcar_pci_read_reg(pcie, PMSR) &&
+ !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
+ return 0;
+
+ /* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+ return rcar_pcie_wait_for_dl(pcie);
+}
+
+static const struct dev_pm_ops rcar_pcie_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
+ .resume_noirq = rcar_pcie_resume_noirq,
+};
+
+static struct platform_driver rcar_pcie_driver = {
+ .driver = {
+ .name = "rcar-pcie",
+ .of_match_table = rcar_pcie_of_match,
+ .pm = &rcar_pcie_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_probe,
+};
+builtin_platform_driver(rcar_pcie_driver);
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index 759c6542c5c8..7583699ef7b6 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -1,177 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
/*
* PCIe driver for Renesas R-Car SoCs
- * Copyright (C) 2014 Renesas Electronics Europe Ltd
- *
- * Based on:
- * arch/sh/drivers/pci/pcie-sh7786.c
- * arch/sh/drivers/pci/ops-sh7786.c
- * Copyright (C) 2009 - 2011 Paul Mundt
+ * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
*
* Author: Phil Edworthy <phil.edworthy@renesas.com>
*/
-#include <linux/bitops.h>
-#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/msi.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_pci.h>
-#include <linux/of_platform.h>
#include <linux/pci.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-
-#define PCIECAR 0x000010
-#define PCIECCTLR 0x000018
-#define CONFIG_SEND_ENABLE BIT(31)
-#define TYPE0 (0 << 8)
-#define TYPE1 BIT(8)
-#define PCIECDR 0x000020
-#define PCIEMSR 0x000028
-#define PCIEINTXR 0x000400
-#define PCIEPHYSR 0x0007f0
-#define PHYRDY BIT(0)
-#define PCIEMSITXR 0x000840
-
-/* Transfer control */
-#define PCIETCTLR 0x02000
-#define DL_DOWN BIT(3)
-#define CFINIT BIT(0)
-#define PCIETSTR 0x02004
-#define DATA_LINK_ACTIVE BIT(0)
-#define PCIEERRFR 0x02020
-#define UNSUPPORTED_REQUEST BIT(4)
-#define PCIEMSIFR 0x02044
-#define PCIEMSIALR 0x02048
-#define MSIFE BIT(0)
-#define PCIEMSIAUR 0x0204c
-#define PCIEMSIIER 0x02050
-
-/* root port address */
-#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
-
-/* local address reg & mask */
-#define PCIELAR(x) (0x02200 + ((x) * 0x20))
-#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
-#define LAM_PREFETCH BIT(3)
-#define LAM_64BIT BIT(2)
-#define LAR_ENABLE BIT(1)
-
-/* PCIe address reg & mask */
-#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
-#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
-#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
-#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
-#define PAR_ENABLE BIT(31)
-#define IO_SPACE BIT(8)
-
-/* Configuration */
-#define PCICONF(x) (0x010000 + ((x) * 0x4))
-#define PMCAP(x) (0x010040 + ((x) * 0x4))
-#define EXPCAP(x) (0x010070 + ((x) * 0x4))
-#define VCCAP(x) (0x010100 + ((x) * 0x4))
-
-/* link layer */
-#define IDSETR1 0x011004
-#define TLCTLR 0x011048
-#define MACSR 0x011054
-#define SPCHGFIN BIT(4)
-#define SPCHGFAIL BIT(6)
-#define SPCHGSUC BIT(7)
-#define LINK_SPEED (0xf << 16)
-#define LINK_SPEED_2_5GTS (1 << 16)
-#define LINK_SPEED_5_0GTS (2 << 16)
-#define MACCTLR 0x011058
-#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */
-#define SPEED_CHANGE BIT(24)
-#define SCRAMBLE_DISABLE BIT(27)
-#define LTSMDIS BIT(31)
-#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
-#define PMSR 0x01105c
-#define MACS2R 0x011078
-#define MACCGSPSETR 0x011084
-#define SPCNGRSN BIT(31)
-
-/* R-Car H1 PHY */
-#define H1_PCIEPHYADRR 0x04000c
-#define WRITE_CMD BIT(16)
-#define PHY_ACK BIT(24)
-#define RATE_POS 12
-#define LANE_POS 8
-#define ADR_POS 0
-#define H1_PCIEPHYDOUTR 0x040014
-
-/* R-Car Gen2 PHY */
-#define GEN2_PCIEPHYADDR 0x780
-#define GEN2_PCIEPHYDATA 0x784
-#define GEN2_PCIEPHYCTRL 0x78c
-
-#define INT_PCI_MSI_NR 32
-
-#define RCONF(x) (PCICONF(0) + (x))
-#define RPMCAP(x) (PMCAP(0) + (x))
-#define REXPCAP(x) (EXPCAP(0) + (x))
-#define RVCCAP(x) (VCCAP(0) + (x))
-#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
-#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
-#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+#include "pcie-rcar.h"
-#define RCAR_PCI_MAX_RESOURCES 4
-#define MAX_NR_INBOUND_MAPS 6
-
-struct rcar_msi {
- DECLARE_BITMAP(used, INT_PCI_MSI_NR);
- struct irq_domain *domain;
- struct msi_controller chip;
- unsigned long pages;
- struct mutex lock;
- int irq1;
- int irq2;
-};
-
-static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
-{
- return container_of(chip, struct rcar_msi, chip);
-}
-
-/* Structure representing the PCIe interface */
-struct rcar_pcie {
- struct device *dev;
- struct phy *phy;
- void __iomem *base;
- struct list_head resources;
- int root_bus_nr;
- struct clk *bus_clk;
- struct rcar_msi msi;
-};
-
-static void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val,
- unsigned int reg)
+void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, unsigned int reg)
{
writel(val, pcie->base + reg);
}
-static u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg)
+u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg)
{
return readl(pcie->base + reg);
}
-enum {
- RCAR_PCI_ACCESS_READ,
- RCAR_PCI_ACCESS_WRITE,
-};
-
-static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
+void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
{
unsigned int shift = BITS_PER_BYTE * (where & 3);
u32 val = rcar_pci_read_reg(pcie, where & ~3);
@@ -181,163 +31,42 @@ static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
rcar_pci_write_reg(pcie, val, where & ~3);
}
-static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
+int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
{
- unsigned int shift = BITS_PER_BYTE * (where & 3);
- u32 val = rcar_pci_read_reg(pcie, where & ~3);
-
- return val >> shift;
-}
-
-/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
-static int rcar_pcie_config_access(struct rcar_pcie *pcie,
- unsigned char access_type, struct pci_bus *bus,
- unsigned int devfn, int where, u32 *data)
-{
- unsigned int dev, func, reg, index;
-
- dev = PCI_SLOT(devfn);
- func = PCI_FUNC(devfn);
- reg = where & ~3;
- index = reg / 4;
-
- /*
- * While each channel has its own memory-mapped extended config
- * space, it's generally only accessible when in endpoint mode.
- * When in root complex mode, the controller is unable to target
- * itself with either type 0 or type 1 accesses, and indeed, any
- * controller initiated target transfer to its own config space
- * result in a completer abort.
- *
- * Each channel effectively only supports a single device, but as
- * the same channel <-> device access works for any PCI_SLOT()
- * value, we cheat a bit here and bind the controller's config
- * space to devfn 0 in order to enable self-enumeration. In this
- * case the regular ECAR/ECDR path is sidelined and the mangled
- * config access itself is initiated as an internal bus transaction.
- */
- if (pci_is_root_bus(bus)) {
- if (dev != 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (access_type == RCAR_PCI_ACCESS_READ) {
- *data = rcar_pci_read_reg(pcie, PCICONF(index));
- } else {
- /* Keep an eye out for changes to the root bus number */
- if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
- pcie->root_bus_nr = *data & 0xff;
-
- rcar_pci_write_reg(pcie, *data, PCICONF(index));
- }
-
- return PCIBIOS_SUCCESSFUL;
- }
-
- if (pcie->root_bus_nr < 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /* Clear errors */
- rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
-
- /* Set the PIO address */
- rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
- PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
-
- /* Enable the configuration access */
- if (bus->parent->number == pcie->root_bus_nr)
- rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
- else
- rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
-
- /* Check for errors */
- if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /* Check for master and target aborts */
- if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
- (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (access_type == RCAR_PCI_ACCESS_READ)
- *data = rcar_pci_read_reg(pcie, PCIECDR);
- else
- rcar_pci_write_reg(pcie, *data, PCIECDR);
-
- /* Disable the configuration access */
- rcar_pci_write_reg(pcie, 0, PCIECCTLR);
-
- return PCIBIOS_SUCCESSFUL;
-}
+ unsigned int timeout = 10;
-static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *val)
-{
- struct rcar_pcie *pcie = bus->sysdata;
- int ret;
+ while (timeout--) {
+ if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
+ return 0;
- ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
- bus, devfn, where, val);
- if (ret != PCIBIOS_SUCCESSFUL) {
- *val = 0xffffffff;
- return ret;
+ msleep(5);
}
- if (size == 1)
- *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
- else if (size == 2)
- *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
-
- dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
- bus->number, devfn, where, size, *val);
-
- return ret;
+ return -ETIMEDOUT;
}
-/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
-static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 val)
+int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
{
- struct rcar_pcie *pcie = bus->sysdata;
- unsigned int shift;
- u32 data;
- int ret;
-
- ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
- bus, devfn, where, &data);
- if (ret != PCIBIOS_SUCCESSFUL)
- return ret;
-
- dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
- bus->number, devfn, where, size, val);
+ unsigned int timeout = 10000;
- if (size == 1) {
- shift = BITS_PER_BYTE * (where & 3);
- data &= ~(0xff << shift);
- data |= ((val & 0xff) << shift);
- } else if (size == 2) {
- shift = BITS_PER_BYTE * (where & 2);
- data &= ~(0xffff << shift);
- data |= ((val & 0xffff) << shift);
- } else
- data = val;
+ while (timeout--) {
+ if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ return 0;
- ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
- bus, devfn, where, &data);
+ udelay(5);
+ cpu_relax();
+ }
- return ret;
+ return -ETIMEDOUT;
}
-static struct pci_ops rcar_pcie_ops = {
- .read = rcar_pcie_read_conf,
- .write = rcar_pcie_write_conf,
-};
-
-static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
- struct resource *res)
+void rcar_pcie_set_outbound(struct rcar_pcie *pcie, int win,
+ struct resource_entry *window)
{
/* Setup PCIe address space mappings for each resource */
- resource_size_t size;
+ struct resource *res = window->res;
resource_size_t res_start;
+ resource_size_t size;
u32 mask;
rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
@@ -347,13 +76,16 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
* keeps things pretty simple.
*/
size = resource_size(res);
- mask = (roundup_pow_of_two(size) / SZ_128) - 1;
+ if (size > 128)
+ mask = (roundup_pow_of_two(size) / SZ_128) - 1;
+ else
+ mask = 0x0;
rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
if (res->flags & IORESOURCE_IO)
- res_start = pci_pio_to_address(res->start);
+ res_start = pci_pio_to_address(res->start) - window->offset;
else
- res_start = res->start;
+ res_start = res->start - window->offset;
rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
@@ -367,883 +99,22 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
}
-static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
-{
- struct resource_entry *win;
- int i = 0;
-
- /* Setup PCI resources */
- resource_list_for_each_entry(win, &pci->resources) {
- struct resource *res = win->res;
-
- if (!res->flags)
- continue;
-
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- case IORESOURCE_MEM:
- rcar_pcie_setup_window(i, pci, res);
- i++;
- break;
- case IORESOURCE_BUS:
- pci->root_bus_nr = res->start;
- break;
- default:
- continue;
- }
-
- pci_add_resource(resource, res);
- }
-
- return 1;
-}
-
-static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- unsigned int timeout = 1000;
- u32 macsr;
-
- if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
- return;
-
- if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
- dev_err(dev, "Speed change already in progress\n");
- return;
- }
-
- macsr = rcar_pci_read_reg(pcie, MACSR);
- if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
- goto done;
-
- /* Set target link speed to 5.0 GT/s */
- rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
- PCI_EXP_LNKSTA_CLS_5_0GB);
-
- /* Set speed change reason as intentional factor */
- rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
-
- /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
- if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
- rcar_pci_write_reg(pcie, macsr, MACSR);
-
- /* Start link speed change */
- rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
-
- while (timeout--) {
- macsr = rcar_pci_read_reg(pcie, MACSR);
- if (macsr & SPCHGFIN) {
- /* Clear the interrupt bits */
- rcar_pci_write_reg(pcie, macsr, MACSR);
-
- if (macsr & SPCHGFAIL)
- dev_err(dev, "Speed change failed\n");
-
- goto done;
- }
-
- msleep(1);
- }
-
- dev_err(dev, "Speed change timed out\n");
-
-done:
- dev_info(dev, "Current link speed is %s GT/s\n",
- (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
-}
-
-static int rcar_pcie_enable(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- struct pci_bus *bus, *child;
- int ret;
-
- /* Try setting 5 GT/s link speed */
- rcar_pcie_force_speedup(pcie);
-
- rcar_pcie_setup(&bridge->windows, pcie);
-
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-
- bridge->dev.parent = dev;
- bridge->sysdata = pcie;
- bridge->busnr = pcie->root_bus_nr;
- bridge->ops = &rcar_pcie_ops;
- bridge->map_irq = of_irq_parse_and_map_pci;
- bridge->swizzle_irq = pci_common_swizzle;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- bridge->msi = &pcie->msi.chip;
-
- ret = pci_scan_root_bus_bridge(bridge);
- if (ret < 0)
- return ret;
-
- bus = bridge->bus;
-
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
-
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(bus);
-
- return 0;
-}
-
-static int phy_wait_for_ack(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- unsigned int timeout = 100;
-
- while (timeout--) {
- if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
- return 0;
-
- udelay(100);
- }
-
- dev_err(dev, "Access to PCIe phy timed out\n");
-
- return -ETIMEDOUT;
-}
-
-static void phy_write_reg(struct rcar_pcie *pcie,
- unsigned int rate, u32 addr,
- unsigned int lane, u32 data)
-{
- u32 phyaddr;
-
- phyaddr = WRITE_CMD |
- ((rate & 1) << RATE_POS) |
- ((lane & 0xf) << LANE_POS) |
- ((addr & 0xff) << ADR_POS);
-
- /* Set write data */
- rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
- rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
-
- /* Ignore errors as they will be dealt with if the data link is down */
- phy_wait_for_ack(pcie);
-
- /* Clear command */
- rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
- rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
-
- /* Ignore errors as they will be dealt with if the data link is down */
- phy_wait_for_ack(pcie);
-}
-
-static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
-{
- unsigned int timeout = 10;
-
- while (timeout--) {
- if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
- return 0;
-
- msleep(5);
- }
-
- return -ETIMEDOUT;
-}
-
-static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
-{
- unsigned int timeout = 10000;
-
- while (timeout--) {
- if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
- return 0;
-
- udelay(5);
- cpu_relax();
- }
-
- return -ETIMEDOUT;
-}
-
-static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
-{
- int err;
-
- /* Begin initialization */
- rcar_pci_write_reg(pcie, 0, PCIETCTLR);
-
- /* Set mode */
- rcar_pci_write_reg(pcie, 1, PCIEMSR);
-
- err = rcar_pcie_wait_for_phyrdy(pcie);
- if (err)
- return err;
-
- /*
- * Initial header for port config space is type 1, set the device
- * class to match. Hardware takes care of propagating the IDSETR
- * settings, so there is no need to bother with a quirk.
- */
- rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
-
- /*
- * Setup Secondary Bus Number & Subordinate Bus Number, even though
- * they aren't used, to avoid bridge being detected as broken.
- */
- rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
- rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
-
- /* Initialize default capabilities. */
- rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
- rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
- PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
- rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
- PCI_HEADER_TYPE_BRIDGE);
-
- /* Enable data link layer active state reporting */
- rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
- PCI_EXP_LNKCAP_DLLLARC);
-
- /* Write out the physical slot number = 0 */
- rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
-
- /* Set the completion timer timeout to the maximum 50ms. */
- rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
-
- /* Terminate list of capabilities (Next Capability Offset=0) */
- rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
-
- /* Enable MSI */
- if (IS_ENABLED(CONFIG_PCI_MSI))
- rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
-
- rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
-
- /* Finish initialization - establish a PCI Express link */
- rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
-
- /* This will timeout if we don't have a link. */
- err = rcar_pcie_wait_for_dl(pcie);
- if (err)
- return err;
-
- /* Enable INTx interrupts */
- rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
-
- wmb();
-
- return 0;
-}
-
-static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie)
-{
- /* Initialize the phy */
- phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
- phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
- phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
- phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
- phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
- phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
- phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
- phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
- phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
- phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
- phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
- phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
-
- phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
- phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
- phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
-
- return 0;
-}
-
-static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie)
+void rcar_pcie_set_inbound(struct rcar_pcie *pcie, u64 cpu_addr,
+ u64 pci_addr, u64 flags, int idx, bool host)
{
/*
- * These settings come from the R-Car Series, 2nd Generation User's
- * Manual, section 50.3.1 (2) Initialization of the physical layer.
+ * Set up 64-bit inbound regions as the range parser doesn't
+ * distinguish between 32 and 64-bit types.
*/
- rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
- rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
- rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
- rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
-
- rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
- /* The following value is for DC connection, no termination resistor */
- rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
- rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
- rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
-
- return 0;
-}
-
-static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
-{
- int err;
-
- err = phy_init(pcie->phy);
- if (err)
- return err;
-
- err = phy_power_on(pcie->phy);
- if (err)
- phy_exit(pcie->phy);
-
- return err;
-}
-
-static int rcar_msi_alloc(struct rcar_msi *chip)
-{
- int msi;
-
- mutex_lock(&chip->lock);
-
- msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
- if (msi < INT_PCI_MSI_NR)
- set_bit(msi, chip->used);
- else
- msi = -ENOSPC;
-
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
-{
- int msi;
-
- mutex_lock(&chip->lock);
- msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
- order_base_2(no_irqs));
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
-{
- mutex_lock(&chip->lock);
- clear_bit(irq, chip->used);
- mutex_unlock(&chip->lock);
-}
-
-static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
-{
- struct rcar_pcie *pcie = data;
- struct rcar_msi *msi = &pcie->msi;
- struct device *dev = pcie->dev;
- unsigned long reg;
-
- reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
-
- /* MSI & INTx share an interrupt - we only handle MSI here */
- if (!reg)
- return IRQ_NONE;
-
- while (reg) {
- unsigned int index = find_first_bit(&reg, 32);
- unsigned int msi_irq;
-
- /* clear the interrupt */
- rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
-
- msi_irq = irq_find_mapping(msi->domain, index);
- if (msi_irq) {
- if (test_bit(index, msi->used))
- generic_handle_irq(msi_irq);
- else
- dev_info(dev, "unhandled MSI\n");
- } else {
- /* Unknown MSI, just clear it */
- dev_dbg(dev, "unexpected MSI\n");
- }
-
- /* see if there's any more pending in this vector */
- reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
- }
-
- return IRQ_HANDLED;
-}
-
-static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
- struct msi_desc *desc)
-{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
-
- hwirq = rcar_msi_alloc(msi);
- if (hwirq < 0)
- return hwirq;
-
- irq = irq_find_mapping(msi->domain, hwirq);
- if (!irq) {
- rcar_msi_free(msi, hwirq);
- return -EINVAL;
- }
-
- irq_set_msi_desc(irq, desc);
-
- msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
- msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
- msg.data = hwirq;
-
- pci_write_msi_msg(irq, &msg);
-
- return 0;
-}
-
-static int rcar_msi_setup_irqs(struct msi_controller *chip,
- struct pci_dev *pdev, int nvec, int type)
-{
- struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct msi_desc *desc;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
- int i;
-
- /* MSI-X interrupts are not supported */
- if (type == PCI_CAP_ID_MSIX)
- return -EINVAL;
-
- WARN_ON(!list_is_singular(&pdev->dev.msi_list));
- desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
-
- hwirq = rcar_msi_alloc_region(msi, nvec);
- if (hwirq < 0)
- return -ENOSPC;
-
- irq = irq_find_mapping(msi->domain, hwirq);
- if (!irq)
- return -ENOSPC;
-
- for (i = 0; i < nvec; i++) {
- /*
- * irq_create_mapping() called from rcar_pcie_probe() pre-
- * allocates descs, so there is no need to allocate descs here.
- * We can therefore assume that if irq_find_mapping() above
- * returns non-zero, then the descs are also successfully
- * allocated.
- */
- if (irq_set_msi_desc_off(irq, i, desc)) {
- /* TODO: clear */
- return -EINVAL;
- }
- }
-
- desc->nvec_used = nvec;
- desc->msi_attrib.multiple = order_base_2(nvec);
-
- msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
- msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
- msg.data = hwirq;
-
- pci_write_msi_msg(irq, &msg);
-
- return 0;
-}
-
-static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
-{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct irq_data *d = irq_get_irq_data(irq);
-
- rcar_msi_free(msi, d->hwirq);
-}
-
-static struct irq_chip rcar_msi_irq_chip = {
- .name = "R-Car PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops msi_domain_ops = {
- .map = rcar_msi_map,
-};
-
-static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie)
-{
- struct rcar_msi *msi = &pcie->msi;
- int i, irq;
-
- for (i = 0; i < INT_PCI_MSI_NR; i++) {
- irq = irq_find_mapping(msi->domain, i);
- if (irq > 0)
- irq_dispose_mapping(irq);
- }
-
- irq_domain_remove(msi->domain);
-}
-
-static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- struct rcar_msi *msi = &pcie->msi;
- phys_addr_t base;
- int err, i;
-
- mutex_init(&msi->lock);
-
- msi->chip.dev = dev;
- msi->chip.setup_irq = rcar_msi_setup_irq;
- msi->chip.setup_irqs = rcar_msi_setup_irqs;
- msi->chip.teardown_irq = rcar_msi_teardown_irq;
-
- msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
- &msi_domain_ops, &msi->chip);
- if (!msi->domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < INT_PCI_MSI_NR; i++)
- irq_create_mapping(msi->domain, i);
-
- /* Two irqs are for MSI, but they are also used for non-MSI irqs */
- err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
- IRQF_SHARED | IRQF_NO_THREAD,
- rcar_msi_irq_chip.name, pcie);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ: %d\n", err);
- goto err;
- }
-
- err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
- IRQF_SHARED | IRQF_NO_THREAD,
- rcar_msi_irq_chip.name, pcie);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ: %d\n", err);
- goto err;
- }
-
- /* setup MSI data target */
- msi->pages = __get_free_pages(GFP_KERNEL, 0);
- if (!msi->pages) {
- err = -ENOMEM;
- goto err;
- }
- base = virt_to_phys((void *)msi->pages);
-
- rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
- rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
-
- /* enable all MSI interrupts */
- rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
-
- return 0;
-
-err:
- rcar_pcie_unmap_msi(pcie);
- return err;
-}
-
-static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie)
-{
- struct rcar_msi *msi = &pcie->msi;
-
- /* Disable all MSI interrupts */
- rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
-
- /* Disable address decoding of the MSI interrupt, MSIFE */
- rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
-
- free_pages(msi->pages, 0);
-
- rcar_pcie_unmap_msi(pcie);
-}
-
-static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- struct resource res;
- int err, i;
-
- pcie->phy = devm_phy_optional_get(dev, "pcie");
- if (IS_ERR(pcie->phy))
- return PTR_ERR(pcie->phy);
-
- err = of_address_to_resource(dev->of_node, 0, &res);
- if (err)
- return err;
-
- pcie->base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(pcie->base))
- return PTR_ERR(pcie->base);
-
- pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(pcie->bus_clk)) {
- dev_err(dev, "cannot get pcie bus clock\n");
- return PTR_ERR(pcie->bus_clk);
- }
-
- i = irq_of_parse_and_map(dev->of_node, 0);
- if (!i) {
- dev_err(dev, "cannot get platform resources for msi interrupt\n");
- err = -ENOENT;
- goto err_irq1;
- }
- pcie->msi.irq1 = i;
-
- i = irq_of_parse_and_map(dev->of_node, 1);
- if (!i) {
- dev_err(dev, "cannot get platform resources for msi interrupt\n");
- err = -ENOENT;
- goto err_irq2;
- }
- pcie->msi.irq2 = i;
-
- return 0;
-
-err_irq2:
- irq_dispose_mapping(pcie->msi.irq1);
-err_irq1:
- return err;
-}
-
-static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
- struct resource_entry *entry,
- int *index)
-{
- u64 restype = entry->res->flags;
- u64 cpu_addr = entry->res->start;
- u64 cpu_end = entry->res->end;
- u64 pci_addr = entry->res->start - entry->offset;
- u32 flags = LAM_64BIT | LAR_ENABLE;
- u64 mask;
- u64 size = resource_size(entry->res);
- int idx = *index;
-
- if (restype & IORESOURCE_PREFETCH)
- flags |= LAM_PREFETCH;
-
- while (cpu_addr < cpu_end) {
- if (idx >= MAX_NR_INBOUND_MAPS - 1) {
- dev_err(pcie->dev, "Failed to map inbound regions!\n");
- return -EINVAL;
- }
- /*
- * If the size of the range is larger than the alignment of
- * the start address, we have to use multiple entries to
- * perform the mapping.
- */
- if (cpu_addr > 0) {
- unsigned long nr_zeros = __ffs64(cpu_addr);
- u64 alignment = 1ULL << nr_zeros;
-
- size = min(size, alignment);
- }
- /* Hardware supports max 4GiB inbound region */
- size = min(size, 1ULL << 32);
-
- mask = roundup_pow_of_two(size) - 1;
- mask &= ~0xf;
-
- /*
- * Set up 64-bit inbound regions as the range parser doesn't
- * distinguish between 32 and 64-bit types.
- */
+ if (host)
rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
PCIEPRAR(idx));
- rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
- rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags,
- PCIELAMR(idx));
+ rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
+ rcar_pci_write_reg(pcie, flags, PCIELAMR(idx));
+ if (host)
rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
PCIEPRAR(idx + 1));
- rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr),
- PCIELAR(idx + 1));
- rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
-
- pci_addr += size;
- cpu_addr += size;
- idx += 2;
- }
- *index = idx;
-
- return 0;
-}
-
-static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie)
-{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- struct resource_entry *entry;
- int index = 0, err = 0;
-
- resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- err = rcar_pcie_inbound_ranges(pcie, entry, &index);
- if (err)
- break;
- }
-
- return err;
-}
-
-static const struct of_device_id rcar_pcie_of_match[] = {
- { .compatible = "renesas,pcie-r8a7779",
- .data = rcar_pcie_phy_init_h1 },
- { .compatible = "renesas,pcie-r8a7790",
- .data = rcar_pcie_phy_init_gen2 },
- { .compatible = "renesas,pcie-r8a7791",
- .data = rcar_pcie_phy_init_gen2 },
- { .compatible = "renesas,pcie-rcar-gen2",
- .data = rcar_pcie_phy_init_gen2 },
- { .compatible = "renesas,pcie-r8a7795",
- .data = rcar_pcie_phy_init_gen3 },
- { .compatible = "renesas,pcie-rcar-gen3",
- .data = rcar_pcie_phy_init_gen3 },
- {},
-};
-
-static int rcar_pcie_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct rcar_pcie *pcie;
- u32 data;
- int err;
- int (*phy_init_fn)(struct rcar_pcie *);
- struct pci_host_bridge *bridge;
-
- bridge = pci_alloc_host_bridge(sizeof(*pcie));
- if (!bridge)
- return -ENOMEM;
-
- pcie = pci_host_bridge_priv(bridge);
-
- pcie->dev = dev;
- platform_set_drvdata(pdev, pcie);
-
- err = pci_parse_request_of_pci_ranges(dev, &pcie->resources,
- &bridge->dma_ranges, NULL);
- if (err)
- goto err_free_bridge;
-
- pm_runtime_enable(pcie->dev);
- err = pm_runtime_get_sync(pcie->dev);
- if (err < 0) {
- dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
- goto err_pm_disable;
- }
-
- err = rcar_pcie_get_resources(pcie);
- if (err < 0) {
- dev_err(dev, "failed to request resources: %d\n", err);
- goto err_pm_put;
- }
-
- err = clk_prepare_enable(pcie->bus_clk);
- if (err) {
- dev_err(dev, "failed to enable bus clock: %d\n", err);
- goto err_unmap_msi_irqs;
- }
-
- err = rcar_pcie_parse_map_dma_ranges(pcie);
- if (err)
- goto err_clk_disable;
-
- phy_init_fn = of_device_get_match_data(dev);
- err = phy_init_fn(pcie);
- if (err) {
- dev_err(dev, "failed to init PCIe PHY\n");
- goto err_clk_disable;
- }
-
- /* Failure to get a link might just be that no cards are inserted */
- if (rcar_pcie_hw_init(pcie)) {
- dev_info(dev, "PCIe link down\n");
- err = -ENODEV;
- goto err_phy_shutdown;
- }
-
- data = rcar_pci_read_reg(pcie, MACSR);
- dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- err = rcar_pcie_enable_msi(pcie);
- if (err < 0) {
- dev_err(dev,
- "failed to enable MSI support: %d\n",
- err);
- goto err_phy_shutdown;
- }
- }
-
- err = rcar_pcie_enable(pcie);
- if (err)
- goto err_msi_teardown;
-
- return 0;
-
-err_msi_teardown:
- if (IS_ENABLED(CONFIG_PCI_MSI))
- rcar_pcie_teardown_msi(pcie);
-
-err_phy_shutdown:
- if (pcie->phy) {
- phy_power_off(pcie->phy);
- phy_exit(pcie->phy);
- }
-
-err_clk_disable:
- clk_disable_unprepare(pcie->bus_clk);
-
-err_unmap_msi_irqs:
- irq_dispose_mapping(pcie->msi.irq2);
- irq_dispose_mapping(pcie->msi.irq1);
-
-err_pm_put:
- pm_runtime_put(dev);
-
-err_pm_disable:
- pm_runtime_disable(dev);
- pci_free_resource_list(&pcie->resources);
-
-err_free_bridge:
- pci_free_host_bridge(bridge);
-
- return err;
+ rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx + 1));
+ rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
}
-
-static int rcar_pcie_resume_noirq(struct device *dev)
-{
- struct rcar_pcie *pcie = dev_get_drvdata(dev);
-
- if (rcar_pci_read_reg(pcie, PMSR) &&
- !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
- return 0;
-
- /* Re-establish the PCIe link */
- rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
- rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
- return rcar_pcie_wait_for_dl(pcie);
-}
-
-static const struct dev_pm_ops rcar_pcie_pm_ops = {
- .resume_noirq = rcar_pcie_resume_noirq,
-};
-
-static struct platform_driver rcar_pcie_driver = {
- .driver = {
- .name = "rcar-pcie",
- .of_match_table = rcar_pcie_of_match,
- .pm = &rcar_pcie_pm_ops,
- .suppress_bind_attrs = true,
- },
- .probe = rcar_pcie_probe,
-};
-builtin_platform_driver(rcar_pcie_driver);
diff --git a/drivers/pci/controller/pcie-rcar.h b/drivers/pci/controller/pcie-rcar.h
new file mode 100644
index 000000000000..d4c698b5f821
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
+ *
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ */
+
+#ifndef _PCIE_RCAR_H
+#define _PCIE_RCAR_H
+
+#define PCIECAR 0x000010
+#define PCIECCTLR 0x000018
+#define CONFIG_SEND_ENABLE BIT(31)
+#define TYPE0 (0 << 8)
+#define TYPE1 BIT(8)
+#define PCIECDR 0x000020
+#define PCIEMSR 0x000028
+#define PCIEINTXR 0x000400
+#define ASTINTX BIT(16)
+#define PCIEPHYSR 0x0007f0
+#define PHYRDY BIT(0)
+#define PCIEMSITXR 0x000840
+
+/* Transfer control */
+#define PCIETCTLR 0x02000
+#define DL_DOWN BIT(3)
+#define CFINIT BIT(0)
+#define PCIETSTR 0x02004
+#define DATA_LINK_ACTIVE BIT(0)
+#define PCIEERRFR 0x02020
+#define UNSUPPORTED_REQUEST BIT(4)
+#define PCIEMSIFR 0x02044
+#define PCIEMSIALR 0x02048
+#define MSIFE BIT(0)
+#define PCIEMSIAUR 0x0204c
+#define PCIEMSIIER 0x02050
+
+/* root port address */
+#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
+
+/* local address reg & mask */
+#define PCIELAR(x) (0x02200 + ((x) * 0x20))
+#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
+#define LAM_PREFETCH BIT(3)
+#define LAM_64BIT BIT(2)
+#define LAR_ENABLE BIT(1)
+
+/* PCIe address reg & mask */
+#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
+#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
+#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
+#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
+#define PAR_ENABLE BIT(31)
+#define IO_SPACE BIT(8)
+
+/* Configuration */
+#define PCICONF(x) (0x010000 + ((x) * 0x4))
+#define INTDIS BIT(10)
+#define PMCAP(x) (0x010040 + ((x) * 0x4))
+#define MSICAP(x) (0x010050 + ((x) * 0x4))
+#define MSICAP0_MSIE BIT(16)
+#define MSICAP0_MMESCAP_OFFSET 17
+#define MSICAP0_MMESE_OFFSET 20
+#define MSICAP0_MMESE_MASK GENMASK(22, 20)
+#define EXPCAP(x) (0x010070 + ((x) * 0x4))
+#define VCCAP(x) (0x010100 + ((x) * 0x4))
+
+/* link layer */
+#define IDSETR0 0x011000
+#define IDSETR1 0x011004
+#define SUBIDSETR 0x011024
+#define TLCTLR 0x011048
+#define MACSR 0x011054
+#define SPCHGFIN BIT(4)
+#define SPCHGFAIL BIT(6)
+#define SPCHGSUC BIT(7)
+#define LINK_SPEED (0xf << 16)
+#define LINK_SPEED_2_5GTS (1 << 16)
+#define LINK_SPEED_5_0GTS (2 << 16)
+#define MACCTLR 0x011058
+#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */
+#define SPEED_CHANGE BIT(24)
+#define SCRAMBLE_DISABLE BIT(27)
+#define LTSMDIS BIT(31)
+#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
+#define PMSR 0x01105c
+#define MACS2R 0x011078
+#define MACCGSPSETR 0x011084
+#define SPCNGRSN BIT(31)
+
+/* R-Car H1 PHY */
+#define H1_PCIEPHYADRR 0x04000c
+#define WRITE_CMD BIT(16)
+#define PHY_ACK BIT(24)
+#define RATE_POS 12
+#define LANE_POS 8
+#define ADR_POS 0
+#define H1_PCIEPHYDOUTR 0x040014
+
+/* R-Car Gen2 PHY */
+#define GEN2_PCIEPHYADDR 0x780
+#define GEN2_PCIEPHYDATA 0x784
+#define GEN2_PCIEPHYCTRL 0x78c
+
+#define INT_PCI_MSI_NR 32
+
+#define RCONF(x) (PCICONF(0) + (x))
+#define RPMCAP(x) (PMCAP(0) + (x))
+#define REXPCAP(x) (EXPCAP(0) + (x))
+#define RVCCAP(x) (VCCAP(0) + (x))
+
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+
+#define RCAR_PCI_MAX_RESOURCES 4
+#define MAX_NR_INBOUND_MAPS 6
+
+struct rcar_pcie {
+ struct device *dev;
+ void __iomem *base;
+};
+
+enum {
+ RCAR_PCI_ACCESS_READ,
+ RCAR_PCI_ACCESS_WRITE,
+};
+
+void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, unsigned int reg);
+u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg);
+void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data);
+int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie);
+int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie);
+void rcar_pcie_set_outbound(struct rcar_pcie *pcie, int win,
+ struct resource_entry *window);
+void rcar_pcie_set_inbound(struct rcar_pcie *pcie, u64 cpu_addr,
+ u64 pci_addr, u64 flags, int idx, bool host);
+
+#endif
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index d743b0a48988..5eaf36629a75 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -615,7 +615,7 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
err = pci_epc_mem_init(epc, rockchip->mem_res->start,
- resource_size(rockchip->mem_res));
+ resource_size(rockchip->mem_res), PAGE_SIZE);
if (err < 0) {
dev_err(dev, "failed to initialize the memory space\n");
goto err_uninit_port;
diff --git a/drivers/pci/controller/pcie-tango.c b/drivers/pci/controller/pcie-tango.c
index 21a208da3f59..8f640c70f936 100644
--- a/drivers/pci/controller/pcie-tango.c
+++ b/drivers/pci/controller/pcie-tango.c
@@ -207,7 +207,7 @@ static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
return ret;
}
-static struct pci_ecam_ops smp8759_ecam_ops = {
+static const struct pci_ecam_ops smp8759_ecam_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -273,9 +273,9 @@ static int tango_pcie_probe(struct platform_device *pdev)
writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset);
virq = platform_get_irq(pdev, 1);
- if (virq <= 0) {
+ if (virq < 0) {
dev_err(dev, "Failed to map IRQ\n");
- return -ENXIO;
+ return virq;
}
irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie);
@@ -295,11 +295,14 @@ static int tango_pcie_probe(struct platform_device *pdev)
spin_lock_init(&pcie->used_msi_lock);
irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie);
- return pci_host_common_probe(pdev, &smp8759_ecam_ops);
+ return pci_host_common_probe(pdev);
}
static const struct of_device_id tango_pcie_ids[] = {
- { .compatible = "sigma,smp8759-pcie" },
+ {
+ .compatible = "sigma,smp8759-pcie",
+ .data = &smp8759_ecam_ops,
+ },
{ },
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index dac91d60701d..e386d4eac407 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -445,9 +445,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
if (!membar2)
return -ENOMEM;
offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
- readq(membar2 + MB2_SHADOW_OFFSET);
+ (readq(membar2 + MB2_SHADOW_OFFSET) &
+ PCI_BASE_ADDRESS_MEM_MASK);
offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
- readq(membar2 + MB2_SHADOW_OFFSET + 8);
+ (readq(membar2 + MB2_SHADOW_OFFSET + 8) &
+ PCI_BASE_ADDRESS_MEM_MASK);
pci_iounmap(vmd->dev, membar2);
}
}
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 1a81af0ba961..8f065a42fc1a 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -26,7 +26,7 @@ static const bool per_bus_mapping = !IS_ENABLED(CONFIG_64BIT);
*/
struct pci_config_window *pci_ecam_create(struct device *dev,
struct resource *cfgres, struct resource *busr,
- struct pci_ecam_ops *ops)
+ const struct pci_ecam_ops *ops)
{
struct pci_config_window *cfg;
unsigned int bus_range, bus_range_max, bsz;
@@ -101,6 +101,7 @@ err_exit:
pci_ecam_free(cfg);
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(pci_ecam_create);
void pci_ecam_free(struct pci_config_window *cfg)
{
@@ -121,6 +122,7 @@ void pci_ecam_free(struct pci_config_window *cfg)
release_resource(&cfg->res);
kfree(cfg);
}
+EXPORT_SYMBOL_GPL(pci_ecam_free);
/*
* Function to implement the pci_ops ->map_bus method
@@ -143,9 +145,10 @@ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
base = cfg->win + (busn << cfg->ops->bus_shift);
return base + (devfn << devfn_shift) + where;
}
+EXPORT_SYMBOL_GPL(pci_ecam_map_bus);
/* ECAM ops */
-struct pci_ecam_ops pci_generic_ecam_ops = {
+const struct pci_ecam_ops pci_generic_ecam_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -153,10 +156,11 @@ struct pci_ecam_ops pci_generic_ecam_ops = {
.write = pci_generic_config_write,
}
};
+EXPORT_SYMBOL_GPL(pci_generic_ecam_ops);
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
/* ECAM ops for 32-bit access only (non-compliant) */
-struct pci_ecam_ops pci_32b_ops = {
+const struct pci_ecam_ops pci_32b_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 60330f3e3751..c89a9561439f 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -187,6 +187,9 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
*/
static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
{
+ if (!epf_test->dma_supported)
+ return;
+
dma_release_channel(epf_test->dma_chan);
epf_test->dma_chan = NULL;
}
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index abfac1109a13..80c46f3a4590 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -23,7 +23,7 @@
static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size)
{
int order;
- unsigned int page_shift = ilog2(mem->page_size);
+ unsigned int page_shift = ilog2(mem->window.page_size);
size--;
size >>= page_shift;
@@ -36,62 +36,97 @@ static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size)
}
/**
- * __pci_epc_mem_init() - initialize the pci_epc_mem structure
+ * pci_epc_multi_mem_init() - initialize the pci_epc_mem structure
* @epc: the EPC device that invoked pci_epc_mem_init
- * @phys_base: the physical address of the base
- * @size: the size of the address space
- * @page_size: size of each page
+ * @windows: pointer to windows supported by the device
+ * @num_windows: number of windows device supports
*
* Invoke to initialize the pci_epc_mem structure used by the
* endpoint functions to allocate mapped PCI address.
*/
-int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
- size_t page_size)
+int pci_epc_multi_mem_init(struct pci_epc *epc,
+ struct pci_epc_mem_window *windows,
+ unsigned int num_windows)
{
- int ret;
- struct pci_epc_mem *mem;
- unsigned long *bitmap;
+ struct pci_epc_mem *mem = NULL;
+ unsigned long *bitmap = NULL;
unsigned int page_shift;
- int pages;
+ size_t page_size;
int bitmap_size;
+ int pages;
+ int ret;
+ int i;
- if (page_size < PAGE_SIZE)
- page_size = PAGE_SIZE;
+ epc->num_windows = 0;
- page_shift = ilog2(page_size);
- pages = size >> page_shift;
- bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+ if (!windows || !num_windows)
+ return -EINVAL;
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem) {
- ret = -ENOMEM;
- goto err;
- }
+ epc->windows = kcalloc(num_windows, sizeof(*epc->windows), GFP_KERNEL);
+ if (!epc->windows)
+ return -ENOMEM;
- bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!bitmap) {
- ret = -ENOMEM;
- goto err_mem;
- }
+ for (i = 0; i < num_windows; i++) {
+ page_size = windows[i].page_size;
+ if (page_size < PAGE_SIZE)
+ page_size = PAGE_SIZE;
+ page_shift = ilog2(page_size);
+ pages = windows[i].size >> page_shift;
+ bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
- mem->bitmap = bitmap;
- mem->phys_base = phys_base;
- mem->page_size = page_size;
- mem->pages = pages;
- mem->size = size;
- mutex_init(&mem->lock);
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ ret = -ENOMEM;
+ i--;
+ goto err_mem;
+ }
+
+ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!bitmap) {
+ ret = -ENOMEM;
+ kfree(mem);
+ i--;
+ goto err_mem;
+ }
+
+ mem->window.phys_base = windows[i].phys_base;
+ mem->window.size = windows[i].size;
+ mem->window.page_size = page_size;
+ mem->bitmap = bitmap;
+ mem->pages = pages;
+ mutex_init(&mem->lock);
+ epc->windows[i] = mem;
+ }
- epc->mem = mem;
+ epc->mem = epc->windows[0];
+ epc->num_windows = num_windows;
return 0;
err_mem:
- kfree(mem);
+ for (; i >= 0; i--) {
+ mem = epc->windows[i];
+ kfree(mem->bitmap);
+ kfree(mem);
+ }
+ kfree(epc->windows);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_multi_mem_init);
+
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
+ size_t size, size_t page_size)
+{
+ struct pci_epc_mem_window mem_window;
+
+ mem_window.phys_base = base;
+ mem_window.size = size;
+ mem_window.page_size = page_size;
-err:
-return ret;
+ return pci_epc_multi_mem_init(epc, &mem_window, 1);
}
-EXPORT_SYMBOL_GPL(__pci_epc_mem_init);
+EXPORT_SYMBOL_GPL(pci_epc_mem_init);
/**
* pci_epc_mem_exit() - cleanup the pci_epc_mem structure
@@ -102,11 +137,22 @@ EXPORT_SYMBOL_GPL(__pci_epc_mem_init);
*/
void pci_epc_mem_exit(struct pci_epc *epc)
{
- struct pci_epc_mem *mem = epc->mem;
+ struct pci_epc_mem *mem;
+ int i;
+
+ if (!epc->num_windows)
+ return;
+ for (i = 0; i < epc->num_windows; i++) {
+ mem = epc->windows[i];
+ kfree(mem->bitmap);
+ kfree(mem);
+ }
+ kfree(epc->windows);
+
+ epc->windows = NULL;
epc->mem = NULL;
- kfree(mem->bitmap);
- kfree(mem);
+ epc->num_windows = 0;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
@@ -122,31 +168,60 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
- int pageno;
void __iomem *virt_addr = NULL;
- struct pci_epc_mem *mem = epc->mem;
- unsigned int page_shift = ilog2(mem->page_size);
+ struct pci_epc_mem *mem;
+ unsigned int page_shift;
+ size_t align_size;
+ int pageno;
int order;
+ int i;
- size = ALIGN(size, mem->page_size);
- order = pci_epc_mem_get_order(mem, size);
-
- mutex_lock(&mem->lock);
- pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
- if (pageno < 0)
- goto ret;
+ for (i = 0; i < epc->num_windows; i++) {
+ mem = epc->windows[i];
+ mutex_lock(&mem->lock);
+ align_size = ALIGN(size, mem->window.page_size);
+ order = pci_epc_mem_get_order(mem, align_size);
- *phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
- virt_addr = ioremap(*phys_addr, size);
- if (!virt_addr)
- bitmap_release_region(mem->bitmap, pageno, order);
+ pageno = bitmap_find_free_region(mem->bitmap, mem->pages,
+ order);
+ if (pageno >= 0) {
+ page_shift = ilog2(mem->window.page_size);
+ *phys_addr = mem->window.phys_base +
+ ((phys_addr_t)pageno << page_shift);
+ virt_addr = ioremap(*phys_addr, align_size);
+ if (!virt_addr) {
+ bitmap_release_region(mem->bitmap,
+ pageno, order);
+ mutex_unlock(&mem->lock);
+ continue;
+ }
+ mutex_unlock(&mem->lock);
+ return virt_addr;
+ }
+ mutex_unlock(&mem->lock);
+ }
-ret:
- mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
+static struct pci_epc_mem *pci_epc_get_matching_window(struct pci_epc *epc,
+ phys_addr_t phys_addr)
+{
+ struct pci_epc_mem *mem;
+ int i;
+
+ for (i = 0; i < epc->num_windows; i++) {
+ mem = epc->windows[i];
+
+ if (phys_addr >= mem->window.phys_base &&
+ phys_addr < (mem->window.phys_base + mem->window.size))
+ return mem;
+ }
+
+ return NULL;
+}
+
/**
* pci_epc_mem_free_addr() - free the allocated memory address
* @epc: the EPC device on which memory was allocated
@@ -159,14 +234,23 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
void __iomem *virt_addr, size_t size)
{
+ struct pci_epc_mem *mem;
+ unsigned int page_shift;
+ size_t page_size;
int pageno;
- struct pci_epc_mem *mem = epc->mem;
- unsigned int page_shift = ilog2(mem->page_size);
int order;
+ mem = pci_epc_get_matching_window(epc, phys_addr);
+ if (!mem) {
+ pr_err("failed to get matching window\n");
+ return;
+ }
+
+ page_size = mem->window.page_size;
+ page_shift = ilog2(page_size);
iounmap(virt_addr);
- pageno = (phys_addr - mem->phys_base) >> page_shift;
- size = ALIGN(size, mem->page_size);
+ pageno = (phys_addr - mem->window.phys_base) >> page_shift;
+ size = ALIGN(size, page_size);
order = pci_epc_mem_get_order(mem, size);
mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index b3869951c0eb..b4c92cee13f8 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -385,19 +385,12 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
- union acpi_object params[2];
- struct acpi_object_list arg_list;
list_for_each_entry(func, &slot->funcs, sibling) {
- arg_list.count = 2;
- arg_list.pointer = params;
- params[0].type = ACPI_TYPE_INTEGER;
- params[0].integer.value = ACPI_ADR_SPACE_PCI_CONFIG;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 1;
/* _REG is optional, we don't care about if there is failure */
- acpi_evaluate_object(func_to_handle(func), "_REG", &arg_list,
- NULL);
+ acpi_evaluate_reg(func_to_handle(func),
+ ACPI_ADR_SPACE_PCI_CONFIG,
+ ACPI_REG_CONNECT);
}
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index ae44f46d1bf3..4fd200d8b0a9 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -148,8 +148,6 @@ struct controller {
#define MRL_SENS(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_MRLSP)
#define ATTN_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_AIP)
#define PWR_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PIP)
-#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS)
-#define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP)
#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19)
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 312cc45c44c7..bf779f291f15 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -275,7 +275,7 @@ static int pciehp_suspend(struct pcie_device *dev)
* If the port is already runtime suspended we can keep it that
* way.
*/
- if (dev_pm_smart_suspend_and_suspended(&dev->port->dev))
+ if (dev_pm_skip_suspend(&dev->port->dev))
return 0;
pciehp_disable_interrupt(dev);
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 6504869efabc..9887c9de08c3 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -435,7 +435,7 @@ static int rpaphp_drc_add_slot(struct device_node *dn)
*/
int rpaphp_add_slot(struct device_node *dn)
{
- if (!dn->name || strcmp(dn->name, "pci"))
+ if (!of_node_name_eq(dn, "pci"))
return 0;
if (of_find_property(dn, "ibm,drc-info", NULL))
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index f7f13ee5d06e..6e85885b554c 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -164,7 +164,7 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
int shpchp_configure_device(struct slot *p_slot);
-int shpchp_unconfigure_device(struct slot *p_slot);
+void shpchp_unconfigure_device(struct slot *p_slot);
void cleanup_slots(struct controller *ctrl);
void shpchp_queue_pushbutton_work(struct work_struct *work);
int shpc_init(struct controller *ctrl, struct pci_dev *pdev);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 078003dcde5b..afdc52d1cae7 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -341,8 +341,7 @@ static int remove_board(struct slot *p_slot)
u8 hp_slot;
int rc;
- if (shpchp_unconfigure_device(p_slot))
- return(1);
+ shpchp_unconfigure_device(p_slot);
hp_slot = p_slot->device - ctrl->slot_device_offset;
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 115701301487..36db0c3c4ea6 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -61,9 +61,8 @@ int shpchp_configure_device(struct slot *p_slot)
return ret;
}
-int shpchp_unconfigure_device(struct slot *p_slot)
+void shpchp_unconfigure_device(struct slot *p_slot)
{
- int rc = 0;
struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
struct pci_dev *dev, *temp;
struct controller *ctrl = p_slot->ctrl;
@@ -83,6 +82,4 @@ int shpchp_unconfigure_device(struct slot *p_slot)
}
pci_unlock_rescan_remove();
- return rc;
}
-
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 81ceeaa6f1d5..27839cd2459f 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -592,7 +592,7 @@ int of_pci_get_max_link_speed(struct device_node *node)
u32 max_link_speed;
if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
- max_link_speed > 4)
+ max_link_speed == 0 || max_link_speed > 4)
return -EINVAL;
return max_link_speed;
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index b73b10bce0df..e8e444eeb1cd 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -282,6 +282,8 @@ static const struct pci_p2pdma_whitelist_entry {
} pci_p2pdma_whitelist[] = {
/* AMD ZEN */
{PCI_VENDOR_ID_AMD, 0x1450, 0},
+ {PCI_VENDOR_ID_AMD, 0x15d0, 0},
+ {PCI_VENDOR_ID_AMD, 0x1630, 0},
/* Intel Xeon E5/Core i7 */
{PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE},
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d21969fba6ab..7224b1e5f2a8 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -948,7 +948,7 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev)
* Look for a special _DSD property for the root port and if it
* is set we know the hierarchy behind it supports D3 just fine.
*/
- root = pci_find_pcie_root_port(dev);
+ root = pcie_find_root_port(dev);
if (!root)
return false;
@@ -1128,7 +1128,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
return;
obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
- RESET_DELAY_DSM, NULL);
+ DSM_PCI_POWER_ON_RESET_DELAY, NULL);
if (!obj)
return;
@@ -1193,7 +1193,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
pdev->d3cold_delay = 0;
obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
- FUNCTION_DELAY_DSM, NULL);
+ DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
if (!obj)
return;
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 4f4f54bc732e..ccf26d12ec61 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -24,6 +24,17 @@
#define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
+/**
+ * struct pci_bridge_reg_behavior - register bits behaviors
+ * @ro: Read-Only bits
+ * @rw: Read-Write bits
+ * @w1c: Write-1-to-Clear bits
+ *
+ * Reads and Writes will be filtered by specified behavior. All other bits not
+ * declared are assumed 'Reserved' and will return 0 on reads, per PCIe 5.0:
+ * "Reserved register fields must be read only and must return 0 (all 0's for
+ * multi-bit fields) when read".
+ */
struct pci_bridge_reg_behavior {
/* Read-only bits */
u32 ro;
@@ -33,9 +44,6 @@ struct pci_bridge_reg_behavior {
/* Write-1-to-clear bits */
u32 w1c;
-
- /* Reserved bits (hardwired to 0) */
- u32 rsvd;
};
static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
@@ -49,7 +57,6 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
PCI_COMMAND_FAST_BACK) |
(PCI_STATUS_CAP_LIST | PCI_STATUS_66MHZ |
PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16),
- .rsvd = GENMASK(15, 10) | ((BIT(6) | GENMASK(3, 0)) << 16),
.w1c = PCI_STATUS_ERROR_BITS << 16,
},
[PCI_CLASS_REVISION / 4] = { .ro = ~0 },
@@ -96,8 +103,6 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
GENMASK(11, 8) | GENMASK(3, 0)),
.w1c = PCI_STATUS_ERROR_BITS << 16,
-
- .rsvd = ((BIT(6) | GENMASK(4, 0)) << 16),
},
[PCI_MEMORY_BASE / 4] = {
@@ -130,12 +135,10 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
[PCI_CAPABILITY_LIST / 4] = {
.ro = GENMASK(7, 0),
- .rsvd = GENMASK(31, 8),
},
[PCI_ROM_ADDRESS1 / 4] = {
.rw = GENMASK(31, 11) | BIT(0),
- .rsvd = GENMASK(10, 1),
},
/*
@@ -158,8 +161,6 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
.ro = (GENMASK(15, 8) | ((PCI_BRIDGE_CTL_FAST_BACK) << 16)),
.w1c = BIT(10) << 16,
-
- .rsvd = (GENMASK(15, 12) | BIT(4)) << 16,
},
};
@@ -181,31 +182,29 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
.rw = GENMASK(15, 0),
/*
- * Device status register has 4 bits W1C, then 2 bits
- * RO, the rest is reserved
+ * Device status register has bits 6 and [3:0] W1C, [5:4] RO,
+ * the rest is reserved
*/
- .w1c = GENMASK(19, 16),
- .ro = GENMASK(20, 19),
- .rsvd = GENMASK(31, 21),
+ .w1c = (BIT(6) | GENMASK(3, 0)) << 16,
+ .ro = GENMASK(5, 4) << 16,
},
[PCI_EXP_LNKCAP / 4] = {
/* All bits are RO, except bit 23 which is reserved */
.ro = lower_32_bits(~BIT(23)),
- .rsvd = BIT(23),
},
[PCI_EXP_LNKCTL / 4] = {
/*
- * Link control has bits [1:0] and [11:3] RW, the
- * other bits are reserved.
- * Link status has bits [13:0] RO, and bits [14:15]
+ * Link control has bits [15:14], [11:3] and [1:0] RW, the
+ * rest is reserved.
+ *
+ * Link status has bits [13:0] RO, and bits [15:14]
* W1C.
*/
- .rw = GENMASK(11, 3) | GENMASK(1, 0),
+ .rw = GENMASK(15, 14) | GENMASK(11, 3) | GENMASK(1, 0),
.ro = GENMASK(13, 0) << 16,
.w1c = GENMASK(15, 14) << 16,
- .rsvd = GENMASK(15, 12) | BIT(2),
},
[PCI_EXP_SLTCAP / 4] = {
@@ -214,19 +213,18 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
[PCI_EXP_SLTCTL / 4] = {
/*
- * Slot control has bits [12:0] RW, the rest is
+ * Slot control has bits [14:0] RW, the rest is
* reserved.
*
- * Slot status has a mix of W1C and RO bits, as well
- * as reserved bits.
+ * Slot status has bits 8 and [4:0] W1C, bits [7:5] RO, the
+ * rest is reserved.
*/
- .rw = GENMASK(12, 0),
+ .rw = GENMASK(14, 0),
.w1c = (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16,
.ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS |
PCI_EXP_SLTSTA_EIS) << 16,
- .rsvd = GENMASK(15, 12) | (GENMASK(15, 9) << 16),
},
[PCI_EXP_RTCTL / 4] = {
@@ -234,19 +232,21 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
* Root control has bits [4:0] RW, the rest is
* reserved.
*
- * Root status has bit 0 RO, the rest is reserved.
+ * Root capabilities has bit 0 RO, the rest is reserved.
*/
.rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
PCI_EXP_RTCTL_CRSSVE),
.ro = PCI_EXP_RTCAP_CRSVIS << 16,
- .rsvd = GENMASK(15, 5) | (GENMASK(15, 1) << 16),
},
[PCI_EXP_RTSTA / 4] = {
+ /*
+ * Root status has bits 17 and [15:0] RO, bit 16 W1C, the rest
+ * is reserved.
+ */
.ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING,
.w1c = PCI_EXP_RTSTA_PME,
- .rsvd = GENMASK(31, 18),
},
};
@@ -354,7 +354,8 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
* Make sure we never return any reserved bit with a value
* different from 0.
*/
- *value &= ~behavior[reg / 4].rsvd;
+ *value &= behavior[reg / 4].ro | behavior[reg / 4].rw |
+ behavior[reg / 4].w1c;
if (size == 1)
*value = (*value >> (8 * (where & 3))) & 0xff;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 0454ca0e4e3f..da6510af1221 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -776,7 +776,7 @@ static int pci_pm_suspend(struct device *dev)
static int pci_pm_suspend_late(struct device *dev)
{
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
@@ -789,10 +789,8 @@ static int pci_pm_suspend_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev->power.may_skip_resume = true;
+ if (dev_pm_skip_suspend(dev))
return 0;
- }
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -880,8 +878,8 @@ Fixup:
* pci_pm_complete() to take care of fixing up the device's state
* anyway, if need be.
*/
- dev->power.may_skip_resume = device_may_wakeup(dev) ||
- !device_can_wakeup(dev);
+ if (device_can_wakeup(dev) && !device_may_wakeup(dev))
+ dev->power.may_skip_resume = false;
return 0;
}
@@ -893,18 +891,10 @@ static int pci_pm_resume_noirq(struct device *dev)
pci_power_t prev_state = pci_dev->current_state;
bool skip_bus_pm = pci_dev->skip_bus_pm;
- if (dev_pm_may_skip_resume(dev))
+ if (dev_pm_skip_resume(dev))
return 0;
/*
- * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
- * during system suspend, so update their runtime PM status to "active"
- * as they are going to be put into D0 shortly.
- */
- if (dev_pm_smart_suspend_and_suspended(dev))
- pm_runtime_set_active(dev);
-
- /*
* In the suspend-to-idle case, devices left in D0 during suspend will
* stay in D0, so it is not necessary to restore or update their
* configuration here and attempting to put them into D0 again is
@@ -928,6 +918,14 @@ static int pci_pm_resume_noirq(struct device *dev)
return 0;
}
+static int pci_pm_resume_early(struct device *dev)
+{
+ if (dev_pm_skip_resume(dev))
+ return 0;
+
+ return pm_generic_resume_early(dev);
+}
+
static int pci_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -961,6 +959,7 @@ static int pci_pm_resume(struct device *dev)
#define pci_pm_suspend_late NULL
#define pci_pm_suspend_noirq NULL
#define pci_pm_resume NULL
+#define pci_pm_resume_early NULL
#define pci_pm_resume_noirq NULL
#endif /* !CONFIG_SUSPEND */
@@ -1127,7 +1126,7 @@ static int pci_pm_poweroff(struct device *dev)
static int pci_pm_poweroff_late(struct device *dev)
{
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
@@ -1140,7 +1139,7 @@ static int pci_pm_poweroff_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
if (pci_has_legacy_pm_support(pci_dev))
@@ -1358,6 +1357,7 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
.suspend = pci_pm_suspend,
.suspend_late = pci_pm_suspend_late,
.resume = pci_pm_resume,
+ .resume_early = pci_pm_resume_early,
.freeze = pci_pm_freeze,
.thaw = pci_pm_thaw,
.poweroff = pci_pm_poweroff,
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index a5910f942857..707dd9808676 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -178,7 +178,7 @@ static int dsm_get_label(struct device *dev, char *buf,
return -1;
obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 0x2,
- DEVICE_LABEL_DSM, NULL);
+ DSM_PCI_DEVICE_NAME, NULL);
if (!obj)
return -1;
@@ -218,7 +218,7 @@ static bool device_has_dsm(struct device *dev)
return false;
return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
- 1 << DEVICE_LABEL_DSM);
+ 1 << DSM_PCI_DEVICE_NAME);
}
static umode_t acpi_index_string_exist(struct kobject *kobj,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 595fcf59843f..ce096272f52b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -752,30 +752,6 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
EXPORT_SYMBOL(pci_find_resource);
/**
- * pci_find_pcie_root_port - return PCIe Root Port
- * @dev: PCI device to query
- *
- * Traverse up the parent chain and return the PCIe Root Port PCI Device
- * for a given PCI Device.
- */
-struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
-{
- struct pci_dev *bridge, *highest_pcie_bridge = dev;
-
- bridge = pci_upstream_bridge(dev);
- while (bridge && pci_is_pcie(bridge)) {
- highest_pcie_bridge = bridge;
- bridge = pci_upstream_bridge(bridge);
- }
-
- if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
- return NULL;
-
- return highest_pcie_bridge;
-}
-EXPORT_SYMBOL(pci_find_pcie_root_port);
-
-/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
* @dev: the PCI device to operate on
* @pos: config space offset of status word
@@ -868,7 +844,9 @@ static inline bool platform_pci_need_resume(struct pci_dev *dev)
static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
{
- return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
+ if (pci_platform_pm && pci_platform_pm->bridge_d3)
+ return pci_platform_pm->bridge_d3(dev);
+ return false;
}
/**
@@ -1578,7 +1556,7 @@ EXPORT_SYMBOL(pci_restore_state);
struct pci_saved_state {
u32 config_space[16];
- struct pci_cap_saved_data cap[0];
+ struct pci_cap_saved_data cap[];
};
/**
@@ -4660,7 +4638,8 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
* pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
- * @delay: Delay to wait after link has become active (in ms)
+ * @delay: Delay to wait after link has become active (in ms). Specify %0
+ * for no delay.
*
* Use this to wait till link becomes active or inactive.
*/
@@ -4673,10 +4652,10 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
/*
* Some controllers might not implement link active reporting. In this
- * case, we wait for 1000 + 100 ms.
+ * case, we wait for 1000 ms + any delay requested by the caller.
*/
if (!pdev->link_active_reporting) {
- msleep(1100);
+ msleep(timeout + delay);
return true;
}
@@ -4701,7 +4680,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
msleep(10);
timeout -= 10;
}
- if (active && ret)
+ if (active && ret && delay)
msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
@@ -4822,17 +4801,28 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
if (!pcie_downstream_port(dev))
return;
- if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
- pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
- msleep(delay);
- } else {
- pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
- delay);
- if (!pcie_wait_for_link_delay(dev, true, delay)) {
+ /*
+ * Per PCIe r5.0, sec 6.6.1, for downstream ports that support
+ * speeds > 5 GT/s, we must wait for link training to complete
+ * before the mandatory delay.
+ *
+ * We can only tell when link training completes via DLL Link
+ * Active, which is required for downstream ports that support
+ * speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common
+ * devices do not implement Link Active reporting even when it's
+ * required, so we'll check for that directly instead of checking
+ * the supported link speed. We assume devices without Link Active
+ * reporting can train in 100 ms regardless of speed.
+ */
+ if (dev->link_active_reporting) {
+ pci_dbg(dev, "waiting for link to train\n");
+ if (!pcie_wait_for_link_delay(dev, true, 0)) {
/* Did not train, no need to wait any further */
return;
}
}
+ pci_dbg(child, "waiting %d ms to become accessible\n", delay);
+ msleep(delay);
if (!pci_device_is_present(child)) {
pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 66386811cfde..9cd31331aee9 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -25,7 +25,6 @@ config PCIEAER
bool "PCI Express Advanced Error Reporting support"
depends on PCIEPORTBUS
select RAS
- default y
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) driver support. Error reporting messages sent to Root
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index f4274d301235..3acf56683915 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -136,22 +136,18 @@ static const char * const ecrc_policy_str[] = {
*/
static int enable_ecrc_checking(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 reg32;
- if (!pci_is_pcie(dev))
+ if (!aer)
return -ENODEV;
- pos = dev->aer_cap;
- if (!pos)
- return -ENODEV;
-
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &reg32);
if (reg32 & PCI_ERR_CAP_ECRC_GENC)
reg32 |= PCI_ERR_CAP_ECRC_GENE;
if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
reg32 |= PCI_ERR_CAP_ECRC_CHKE;
- pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_CAP, reg32);
return 0;
}
@@ -164,19 +160,15 @@ static int enable_ecrc_checking(struct pci_dev *dev)
*/
static int disable_ecrc_checking(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 reg32;
- if (!pci_is_pcie(dev))
+ if (!aer)
return -ENODEV;
- pos = dev->aer_cap;
- if (!pos)
- return -ENODEV;
-
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &reg32);
reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
- pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_CAP, reg32);
return 0;
}
@@ -217,142 +209,22 @@ void pcie_ecrc_get_policy(char *str)
}
#endif /* CONFIG_PCIE_ECRC */
-#ifdef CONFIG_ACPI_APEI
-static inline int hest_match_pci(struct acpi_hest_aer_common *p,
- struct pci_dev *pci)
-{
- return ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) &&
- ACPI_HEST_BUS(p->bus) == pci->bus->number &&
- p->device == PCI_SLOT(pci->devfn) &&
- p->function == PCI_FUNC(pci->devfn);
-}
-
-static inline bool hest_match_type(struct acpi_hest_header *hest_hdr,
- struct pci_dev *dev)
-{
- u16 hest_type = hest_hdr->type;
- u8 pcie_type = pci_pcie_type(dev);
-
- if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT &&
- pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
- (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT &&
- pcie_type == PCI_EXP_TYPE_ENDPOINT) ||
- (hest_type == ACPI_HEST_TYPE_AER_BRIDGE &&
- (dev->class >> 16) == PCI_BASE_CLASS_BRIDGE))
- return true;
- return false;
-}
-
-struct aer_hest_parse_info {
- struct pci_dev *pci_dev;
- int firmware_first;
-};
-
-static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
-{
- if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
- hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
- hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
- return 1;
- return 0;
-}
-
-static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
-{
- struct aer_hest_parse_info *info = data;
- struct acpi_hest_aer_common *p;
- int ff;
-
- if (!hest_source_is_pcie_aer(hest_hdr))
- return 0;
-
- p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
- ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
-
- /*
- * If no specific device is supplied, determine whether
- * FIRMWARE_FIRST is set for *any* PCIe device.
- */
- if (!info->pci_dev) {
- info->firmware_first |= ff;
- return 0;
- }
-
- /* Otherwise, check the specific device */
- if (p->flags & ACPI_HEST_GLOBAL) {
- if (hest_match_type(hest_hdr, info->pci_dev))
- info->firmware_first = ff;
- } else
- if (hest_match_pci(p, info->pci_dev))
- info->firmware_first = ff;
-
- return 0;
-}
-
-static void aer_set_firmware_first(struct pci_dev *pci_dev)
-{
- int rc;
- struct aer_hest_parse_info info = {
- .pci_dev = pci_dev,
- .firmware_first = 0,
- };
-
- rc = apei_hest_parse(aer_hest_parse, &info);
-
- if (rc)
- pci_dev->__aer_firmware_first = 0;
- else
- pci_dev->__aer_firmware_first = info.firmware_first;
- pci_dev->__aer_firmware_first_valid = 1;
-}
+#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
+ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
-int pcie_aer_get_firmware_first(struct pci_dev *dev)
+int pcie_aer_is_native(struct pci_dev *dev)
{
- if (!pci_is_pcie(dev))
- return 0;
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
- if (pcie_ports_native)
+ if (!dev->aer_cap)
return 0;
- if (!dev->__aer_firmware_first_valid)
- aer_set_firmware_first(dev);
- return dev->__aer_firmware_first;
-}
-
-static bool aer_firmware_first;
-
-/**
- * aer_acpi_firmware_first - Check if APEI should control AER.
- */
-bool aer_acpi_firmware_first(void)
-{
- static bool parsed = false;
- struct aer_hest_parse_info info = {
- .pci_dev = NULL, /* Check all PCIe devices */
- .firmware_first = 0,
- };
-
- if (pcie_ports_native)
- return false;
-
- if (!parsed) {
- apei_hest_parse(aer_hest_parse, &info);
- aer_firmware_first = info.firmware_first;
- parsed = true;
- }
- return aer_firmware_first;
+ return pcie_ports_native || host->native_aer;
}
-#endif
-
-#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
- PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
- if (pcie_aer_get_firmware_first(dev))
- return -EIO;
-
- if (!dev->aer_cap)
+ if (!pcie_aer_is_native(dev))
return -EIO;
return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
@@ -361,7 +233,7 @@ EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
- if (pcie_aer_get_firmware_first(dev))
+ if (!pcie_aer_is_native(dev))
return -EIO;
return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
@@ -379,22 +251,18 @@ void pci_aer_clear_device_status(struct pci_dev *dev)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 status, sev;
- pos = dev->aer_cap;
- if (!pos)
- return -EIO;
-
- if (pcie_aer_get_firmware_first(dev))
+ if (!pcie_aer_is_native(dev))
return -EIO;
/* Clear status bits for ERR_NONFATAL errors only */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, &sev);
status &= ~sev;
if (status)
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
return 0;
}
@@ -402,22 +270,18 @@ EXPORT_SYMBOL_GPL(pci_aer_clear_nonfatal_status);
void pci_aer_clear_fatal_status(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 status, sev;
- pos = dev->aer_cap;
- if (!pos)
- return;
-
- if (pcie_aer_get_firmware_first(dev))
+ if (!pcie_aer_is_native(dev))
return;
/* Clear status bits for ERR_FATAL errors only */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, &sev);
status &= sev;
if (status)
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
}
/**
@@ -431,35 +295,31 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev)
*/
int pci_aer_raw_clear_status(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 status;
int port_type;
- if (!pci_is_pcie(dev))
- return -ENODEV;
-
- pos = dev->aer_cap;
- if (!pos)
+ if (!aer)
return -EIO;
port_type = pci_pcie_type(dev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &status);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, status);
}
- pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS, &status);
+ pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS, status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
return 0;
}
int pci_aer_clear_status(struct pci_dev *dev)
{
- if (pcie_aer_get_firmware_first(dev))
+ if (!pcie_aer_is_native(dev))
return -EIO;
return pci_aer_raw_clear_status(dev);
@@ -467,12 +327,11 @@ int pci_aer_clear_status(struct pci_dev *dev)
void pci_save_aer_state(struct pci_dev *dev)
{
+ int aer = dev->aer_cap;
struct pci_cap_saved_state *save_state;
u32 *cap;
- int pos;
- pos = dev->aer_cap;
- if (!pos)
+ if (!aer)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
@@ -480,22 +339,21 @@ void pci_save_aer_state(struct pci_dev *dev)
return;
cap = &save_state->cap.data[0];
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, cap++);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, cap++);
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, cap++);
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, cap++);
if (pcie_cap_has_rtctl(dev))
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, cap++);
}
void pci_restore_aer_state(struct pci_dev *dev)
{
+ int aer = dev->aer_cap;
struct pci_cap_saved_state *save_state;
u32 *cap;
- int pos;
- pos = dev->aer_cap;
- if (!pos)
+ if (!aer)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
@@ -503,12 +361,12 @@ void pci_restore_aer_state(struct pci_dev *dev)
return;
cap = &save_state->cap.data[0];
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, *cap++);
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, *cap++);
- pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, *cap++);
- pci_write_config_dword(dev, pos + PCI_ERR_CAP, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_COR_MASK, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_CAP, *cap++);
if (pcie_cap_has_rtctl(dev))
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, *cap++);
}
void pci_aer_init(struct pci_dev *dev)
@@ -939,7 +797,7 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
*/
static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
{
- int pos;
+ int aer = dev->aer_cap;
u32 status, mask;
u16 reg16;
@@ -974,17 +832,16 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
if (!(reg16 & PCI_EXP_AER_FLAGS))
return false;
- pos = dev->aer_cap;
- if (!pos)
+ if (!aer)
return false;
/* Check if error is recorded */
if (e_info->severity == AER_CORRECTABLE) {
- pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS, &status);
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, &mask);
} else {
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &mask);
}
if (status & ~mask)
return true;
@@ -1055,16 +912,15 @@ static bool find_source_device(struct pci_dev *parent,
*/
static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
{
- int pos;
+ int aer = dev->aer_cap;
if (info->severity == AER_CORRECTABLE) {
/*
* Correctable error does not need software intervention.
* No need to go through error recovery process.
*/
- pos = dev->aer_cap;
- if (pos)
- pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
+ if (aer)
+ pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS,
info->status);
pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
@@ -1155,22 +1011,21 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
*/
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
- int pos, temp;
+ int aer = dev->aer_cap;
+ int temp;
/* Must reset in this function */
info->status = 0;
info->tlp_header_valid = 0;
- pos = dev->aer_cap;
-
/* The device might not support AER */
- if (!pos)
+ if (!aer)
return 0;
if (info->severity == AER_CORRECTABLE) {
- pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS,
&info->status);
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK,
&info->mask);
if (!(info->status & ~info->mask))
return 0;
@@ -1179,27 +1034,27 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
&info->status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK,
&info->mask);
if (!(info->status & ~info->mask))
return 0;
/* Get First Error Pointer */
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp);
info->first_error = PCI_ERR_CAP_FEP(temp);
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
pci_read_config_dword(dev,
- pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
+ aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
pci_read_config_dword(dev,
- pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
+ aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
pci_read_config_dword(dev,
- pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
+ aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
pci_read_config_dword(dev,
- pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
+ aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
}
}
@@ -1305,15 +1160,15 @@ static irqreturn_t aer_irq(int irq, void *context)
struct pcie_device *pdev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(pdev);
struct pci_dev *rp = rpc->rpd;
+ int aer = rp->aer_cap;
struct aer_err_source e_src = {};
- int pos = rp->aer_cap;
- pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status);
+ pci_read_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, &e_src.status);
if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
return IRQ_NONE;
- pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
- pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status);
+ pci_read_config_dword(rp, aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
+ pci_write_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, e_src.status);
if (!kfifo_put(&rpc->aer_fifo, e_src))
return IRQ_HANDLED;
@@ -1365,7 +1220,7 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
static void aer_enable_rootport(struct aer_rpc *rpc)
{
struct pci_dev *pdev = rpc->rpd;
- int aer_pos;
+ int aer = pdev->aer_cap;
u16 reg16;
u32 reg32;
@@ -1377,14 +1232,13 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
SYSTEM_ERROR_INTR_ON_MESG_MASK);
- aer_pos = pdev->aer_cap;
/* Clear error status */
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_COR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_COR_STATUS, reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, reg32);
/*
* Enable error reporting for the root port device and downstream port
@@ -1393,9 +1247,9 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
set_downstream_devices_error_reporting(pdev, true);
/* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
}
/**
@@ -1407,8 +1261,8 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
static void aer_disable_rootport(struct aer_rpc *rpc)
{
struct pci_dev *pdev = rpc->rpd;
+ int aer = pdev->aer_cap;
u32 reg32;
- int pos;
/*
* Disable error reporting for the root port device and downstream port
@@ -1416,15 +1270,14 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
*/
set_downstream_devices_error_reporting(pdev, false);
- pos = pdev->aer_cap;
/* Disable Root's interrupt in response to error messages */
- pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
/* Clear Root's error status reg */
- pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, reg32);
}
/**
@@ -1481,28 +1334,27 @@ static int aer_probe(struct pcie_device *dev)
*/
static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
+ int aer = dev->aer_cap;
u32 reg32;
- int pos;
int rc;
- pos = dev->aer_cap;
/* Disable Root's interrupt in response to error messages */
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32);
rc = pci_bus_error_reset(dev);
pci_info(dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, reg32);
/* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32);
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
@@ -1523,7 +1375,7 @@ static struct pcie_port_service_driver aerdriver = {
*/
int __init pcie_aer_init(void)
{
- if (!pci_aer_available() || aer_acpi_firmware_first())
+ if (!pci_aer_available())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 2378ed692534..b17e5ffd31b1 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -628,16 +628,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Setup initial capable state. Will be updated later */
link->aspm_capable = link->aspm_support;
- /*
- * If the downstream component has pci bridge function, don't
- * do ASPM for now.
- */
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) {
- link->aspm_disable = ASPM_STATE_ALL;
- break;
- }
- }
/* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 762170423fdd..daa9a4153776 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -284,7 +284,7 @@ static int dpc_probe(struct pcie_device *dev)
int status;
u16 ctl, cap;
- if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native)
+ if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
@@ -301,6 +301,7 @@ static int dpc_probe(struct pcie_device *dev)
ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
+ pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index 594622a6cb16..a6b9b479b97a 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -148,11 +148,11 @@ static void edr_handle_event(acpi_handle handle, u32 event, void *data)
pci_ers_result_t estate = PCI_ERS_RESULT_DISCONNECT;
u16 status;
- pci_info(pdev, "ACPI event %#x received\n", event);
-
if (event != ACPI_NOTIFY_DISCONNECT_RECOVER)
return;
+ pci_info(pdev, "EDR event received\n");
+
/* Locate the port which issued EDR event */
edev = acpi_dpc_port_get(pdev);
if (!edev) {
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index f38e6c19dd50..6a32970bb731 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -408,7 +408,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
/**
* pcie_pme_resume - Resume PCIe PME service device.
- * @srv - PCIe service device to resume.
+ * @srv: PCIe service device to resume.
*/
static int pcie_pme_resume(struct pcie_device *srv)
{
@@ -431,7 +431,7 @@ static int pcie_pme_resume(struct pcie_device *srv)
/**
* pcie_pme_remove - Prepare PCIe PME service device for removal.
- * @srv - PCIe service device to remove.
+ * @srv: PCIe service device to remove.
*/
static void pcie_pme_remove(struct pcie_device *srv)
{
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 64b5e081cdb2..af7cf237432a 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -29,8 +29,10 @@ extern bool pcie_ports_dpc_native;
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pcie_aer_init(void) { return 0; }
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
#ifdef CONFIG_HOTPLUG_PCI_PCIE
@@ -147,16 +149,5 @@ static inline bool pcie_pme_no_msi(void) { return false; }
static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
-#ifdef CONFIG_ACPI_APEI
-int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
-#else
-static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
-{
- if (pci_dev->__aer_firmware_first_valid)
- return pci_dev->__aer_firmware_first;
- return 0;
-}
-#endif
-
struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 160d67c59310..3acf151ae015 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -115,7 +115,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
- dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NEVER_SKIP |
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE |
DPM_FLAG_SMART_SUSPEND);
if (pci_bridge_d3_possible(dev)) {
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 9361f3aa26ab..357a454cafa0 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -39,10 +39,6 @@ void pci_ptm_init(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return;
-
/*
* Enable PTM only on interior devices (root ports, switch ports,
* etc.) on the assumption that it causes no link traffic until an
@@ -52,6 +48,23 @@ void pci_ptm_init(struct pci_dev *dev)
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
return;
+ /*
+ * Switch Downstream Ports are not permitted to have a PTM
+ * capability; their PTM behavior is controlled by the Upstream
+ * Port (PCIe r5.0, sec 7.9.16).
+ */
+ ups = pci_upstream_bridge(dev);
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM &&
+ ups && ups->ptm_enabled) {
+ dev->ptm_granularity = ups->ptm_granularity;
+ dev->ptm_enabled = 1;
+ return;
+ }
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!pos)
+ return;
+
pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
@@ -61,7 +74,6 @@ void pci_ptm_init(struct pci_dev *dev)
* the spec recommendation (PCIe r3.1, sec 7.32.3), select the
* furthest upstream Time Source as the PTM Root.
*/
- ups = pci_upstream_bridge(dev);
if (ups && ups->ptm_enabled) {
ctrl = PCI_PTM_CTRL_ENABLE;
if (ups->ptm_granularity == 0)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 77b8a145c39b..2f66988cea25 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -565,7 +565,7 @@ static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
return b;
}
-static void devm_pci_release_host_bridge_dev(struct device *dev)
+static void pci_release_host_bridge_dev(struct device *dev)
{
struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
@@ -574,12 +574,7 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
pci_free_resource_list(&bridge->windows);
pci_free_resource_list(&bridge->dma_ranges);
-}
-
-static void pci_release_host_bridge_dev(struct device *dev)
-{
- devm_pci_release_host_bridge_dev(dev);
- kfree(to_pci_host_bridge(dev));
+ kfree(bridge);
}
static void pci_init_host_bridge(struct pci_host_bridge *bridge)
@@ -599,6 +594,8 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_pme = 1;
bridge->native_ltr = 1;
bridge->native_dpc = 1;
+
+ device_initialize(&bridge->dev);
}
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -616,17 +613,25 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
}
EXPORT_SYMBOL(pci_alloc_host_bridge);
+static void devm_pci_alloc_host_bridge_release(void *data)
+{
+ pci_free_host_bridge(data);
+}
+
struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
size_t priv)
{
+ int ret;
struct pci_host_bridge *bridge;
- bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
+ bridge = pci_alloc_host_bridge(priv);
if (!bridge)
return NULL;
- pci_init_host_bridge(bridge);
- bridge->dev.release = devm_pci_release_host_bridge_dev;
+ ret = devm_add_action_or_reset(dev, devm_pci_alloc_host_bridge_release,
+ bridge);
+ if (ret)
+ return NULL;
return bridge;
}
@@ -634,10 +639,7 @@ EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
void pci_free_host_bridge(struct pci_host_bridge *bridge)
{
- pci_free_resource_list(&bridge->windows);
- pci_free_resource_list(&bridge->dma_ranges);
-
- kfree(bridge);
+ put_device(&bridge->dev);
}
EXPORT_SYMBOL(pci_free_host_bridge);
@@ -908,10 +910,11 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (err)
goto free;
- err = device_register(&bridge->dev);
- if (err)
+ err = device_add(&bridge->dev);
+ if (err) {
put_device(&bridge->dev);
-
+ goto free;
+ }
bus->bridge = get_device(&bridge->dev);
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
@@ -977,7 +980,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
unregister:
put_device(&bridge->dev);
- device_unregister(&bridge->dev);
+ device_del(&bridge->dev);
free:
kfree(bus);
@@ -1822,7 +1825,7 @@ int pci_setup_device(struct pci_dev *dev)
/* Device class may be changed after fixup */
class = dev->class >> 8;
- if (dev->non_compliant_bars) {
+ if (dev->non_compliant_bars && !dev->mmio_always_on) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
@@ -1934,13 +1937,33 @@ static void pci_configure_mps(struct pci_dev *dev)
struct pci_dev *bridge = pci_upstream_bridge(dev);
int mps, mpss, p_mps, rc;
- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ if (!pci_is_pcie(dev))
return;
/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
if (dev->is_virtfn)
return;
+ /*
+ * For Root Complex Integrated Endpoints, program the maximum
+ * supported value unless limited by the PCIE_BUS_PEER2PEER case.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ mps = 128;
+ else
+ mps = 128 << dev->pcie_mpss;
+ rc = pcie_set_mps(dev, mps);
+ if (rc) {
+ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps);
+ }
+ return;
+ }
+
+ if (!bridge || !pci_is_pcie(bridge))
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
@@ -2056,7 +2079,7 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
* For now, we only deal with Relaxed Ordering issues with Root
* Ports. Peer-to-Peer DMA is another can of worms.
*/
- root = pci_find_pcie_root_port(dev);
+ root = pcie_find_root_port(dev);
if (!root)
return;
@@ -2952,7 +2975,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
return bridge->bus;
err_out:
- kfree(bridge);
+ put_device(&bridge->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(pci_create_root_bus);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ca9ed5774eb1..812bfc32ecb8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4319,7 +4319,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED,
*/
static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
{
- struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
+ struct pci_dev *root_port = pcie_find_root_port(pdev);
if (!root_port) {
pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
@@ -4682,6 +4682,20 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
}
+static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Intel RCiEP's are required to allow p2p only on translated
+ * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16,
+ * "Root-Complex Peer to Peer Considerations".
+ */
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
+ return -ENOTTY;
+
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
@@ -4764,6 +4778,7 @@ static const struct pci_dev_acs_enabled {
/* I219 */
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
/* QCOM QDF2xxx root ports */
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
@@ -5129,13 +5144,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
-/* FLR may cause some 82579 devices to hang */
-static void quirk_intel_no_flr(struct pci_dev *dev)
+/*
+ * FLR may cause the following to devices to hang:
+ *
+ * AMD Starship/Matisse HD Audio Controller 0x1487
+ * AMD Starship USB 3.0 Host Controller 0x148c
+ * AMD Matisse USB 3.0 Host Controller 0x149c
+ * Intel 82579LM Gigabit Ethernet Controller 0x1502
+ * Intel 82579V Gigabit Ethernet Controller 0x1503
+ *
+ */
+static void quirk_no_flr(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
static void quirk_no_ext_tags(struct pci_dev *pdev)
{
@@ -5568,6 +5595,19 @@ static void pci_fixup_no_d0_pme(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
+/*
+ * Device [12d8:0x400e] and [12d8:0x400f]
+ * These devices advertise PME# support in all power states but don't
+ * reliably assert it.
+ */
+static void pci_fixup_no_pme(struct pci_dev *dev)
+{
+ pci_info(dev, "PME# is unreliable, disabling it\n");
+ dev->pme_support = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme);
+
static void apex_pci_fixup_class(struct pci_dev *pdev)
{
pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index e9c6b120cf45..95dec03d9f2a 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -160,6 +160,6 @@ void pci_remove_root_bus(struct pci_bus *bus)
host_bridge->bus = NULL;
/* remove the host bridge */
- device_unregister(&host_bridge->dev);
+ device_del(&host_bridge->dev);
}
EXPORT_SYMBOL_GPL(pci_remove_root_bus);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index bbcef1a053ab..9b94b1f16d80 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -26,6 +26,7 @@
#include "pci.h"
unsigned int pci_flags;
+EXPORT_SYMBOL_GPL(pci_flags);
struct pci_dev_resource {
struct list_head list;
@@ -583,7 +584,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
io_mask = PCI_IO_1K_RANGE_MASK;
/* Set up the top and bottom of the PCI I/O segment for this bus */
- res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -613,7 +614,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
u32 l;
/* Set up the top and bottom of the PCI Memory segment for this bus */
- res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
@@ -640,7 +641,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
/* Set up PREF base/limit */
bu = lu = 0;
- res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
@@ -707,14 +708,14 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
if (!pci_bus_clip_resource(bridge, i))
return -EINVAL; /* Clipping didn't change anything */
- switch (i - PCI_BRIDGE_RESOURCES) {
- case 0:
+ switch (i) {
+ case PCI_BRIDGE_IO_WINDOW:
pci_setup_bridge_io(bridge);
break;
- case 1:
+ case PCI_BRIDGE_MEM_WINDOW:
pci_setup_bridge_mmio(bridge);
break;
- case 2:
+ case PCI_BRIDGE_PREF_MEM_WINDOW:
pci_setup_bridge_mmio_pref(bridge);
break;
default:
@@ -735,18 +736,22 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
static void pci_bridge_check_ranges(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
- struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
+ struct resource *b_res;
- b_res[1].flags |= IORESOURCE_MEM;
+ b_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+ b_res->flags |= IORESOURCE_MEM;
- if (bridge->io_window)
- b_res[0].flags |= IORESOURCE_IO;
+ if (bridge->io_window) {
+ b_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ b_res->flags |= IORESOURCE_IO;
+ }
if (bridge->pref_window) {
- b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ b_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
if (bridge->pref_64_window) {
- b_res[2].flags |= IORESOURCE_MEM_64;
- b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
+ b_res->flags |= IORESOURCE_MEM_64 |
+ PCI_PREF_RANGE_TYPE_64;
}
}
}
@@ -1105,35 +1110,37 @@ static void pci_bus_size_cardbus(struct pci_bus *bus,
struct list_head *realloc_head)
{
struct pci_dev *bridge = bus->self;
- struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
+ struct resource *b_res;
resource_size_t b_res_3_size = pci_cardbus_mem_size * 2;
u16 ctrl;
- if (b_res[0].parent)
+ b_res = &bridge->resource[PCI_CB_BRIDGE_IO_0_WINDOW];
+ if (b_res->parent)
goto handle_b_res_1;
/*
* Reserve some resources for CardBus. We reserve a fixed amount
* of bus space for CardBus bridges.
*/
- b_res[0].start = pci_cardbus_io_size;
- b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1;
- b_res[0].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
+ b_res->start = pci_cardbus_io_size;
+ b_res->end = b_res->start + pci_cardbus_io_size - 1;
+ b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
if (realloc_head) {
- b_res[0].end -= pci_cardbus_io_size;
+ b_res->end -= pci_cardbus_io_size;
add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
- pci_cardbus_io_size);
+ pci_cardbus_io_size);
}
handle_b_res_1:
- if (b_res[1].parent)
+ b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW];
+ if (b_res->parent)
goto handle_b_res_2;
- b_res[1].start = pci_cardbus_io_size;
- b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1;
- b_res[1].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
+ b_res->start = pci_cardbus_io_size;
+ b_res->end = b_res->start + pci_cardbus_io_size - 1;
+ b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
if (realloc_head) {
- b_res[1].end -= pci_cardbus_io_size;
- add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size,
- pci_cardbus_io_size);
+ b_res->end -= pci_cardbus_io_size;
+ add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
+ pci_cardbus_io_size);
}
handle_b_res_2:
@@ -1153,21 +1160,22 @@ handle_b_res_2:
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
}
- if (b_res[2].parent)
+ b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_0_WINDOW];
+ if (b_res->parent)
goto handle_b_res_3;
/*
* If we have prefetchable memory support, allocate two regions.
* Otherwise, allocate one region of twice the size.
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
- b_res[2].start = pci_cardbus_mem_size;
- b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1;
- b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
- IORESOURCE_STARTALIGN;
+ b_res->start = pci_cardbus_mem_size;
+ b_res->end = b_res->start + pci_cardbus_mem_size - 1;
+ b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
+ IORESOURCE_STARTALIGN;
if (realloc_head) {
- b_res[2].end -= pci_cardbus_mem_size;
- add_to_list(realloc_head, bridge, b_res+2,
- pci_cardbus_mem_size, pci_cardbus_mem_size);
+ b_res->end -= pci_cardbus_mem_size;
+ add_to_list(realloc_head, bridge, b_res,
+ pci_cardbus_mem_size, pci_cardbus_mem_size);
}
/* Reduce that to half */
@@ -1175,15 +1183,16 @@ handle_b_res_2:
}
handle_b_res_3:
- if (b_res[3].parent)
+ b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW];
+ if (b_res->parent)
goto handle_done;
- b_res[3].start = pci_cardbus_mem_size;
- b_res[3].end = b_res[3].start + b_res_3_size - 1;
- b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
+ b_res->start = pci_cardbus_mem_size;
+ b_res->end = b_res->start + b_res_3_size - 1;
+ b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
if (realloc_head) {
- b_res[3].end -= b_res_3_size;
- add_to_list(realloc_head, bridge, b_res+3, b_res_3_size,
- pci_cardbus_mem_size);
+ b_res->end -= b_res_3_size;
+ add_to_list(realloc_head, bridge, b_res, b_res_3_size,
+ pci_cardbus_mem_size);
}
handle_done:
@@ -1227,7 +1236,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
break;
hdr_type = -1; /* Intentionally invalid - not a PCI device. */
} else {
- pref = &bus->self->resource[PCI_BRIDGE_RESOURCES + 2];
+ pref = &bus->self->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
hdr_type = bus->self->hdr_type;
}
@@ -1885,9 +1894,9 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
struct pci_dev *dev, *bridge = bus->self;
resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align;
- io_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
- mmio_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
- mmio_pref_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+ mmio_pref_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
/*
* The alignment of this bridge is yet to be considered, hence it must
@@ -1960,21 +1969,21 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* Reduce the available resource space by what the
* bridge and devices below it occupy.
*/
- res = &dev->resource[PCI_BRIDGE_RESOURCES + 0];
+ res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(io.start, align) - io.start : 0;
used_size = align + resource_size(res);
if (!res->parent)
io.start = min(io.start + used_size, io.end + 1);
- res = &dev->resource[PCI_BRIDGE_RESOURCES + 1];
+ res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(mmio.start, align) - mmio.start : 0;
used_size = align + resource_size(res);
if (!res->parent)
mmio.start = min(mmio.start + used_size, mmio.end + 1);
- res = &dev->resource[PCI_BRIDGE_RESOURCES + 2];
+ res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(mmio_pref.start, align) -
mmio_pref.start : 0;
@@ -2027,9 +2036,9 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
return;
/* Take the initial extra resources from the hotplug port */
- available_io = bridge->resource[PCI_BRIDGE_RESOURCES + 0];
- available_mmio = bridge->resource[PCI_BRIDGE_RESOURCES + 1];
- available_mmio_pref = bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+ available_mmio_pref = bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
pci_bus_distribute_available_resources(bridge->subordinate,
add_list, available_io,
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index d8ca40a97693..d21fa04fa44d 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -439,10 +439,11 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
/* Check if the new config works by trying to assign everything. */
- ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
- if (ret)
- goto error_resize;
-
+ if (dev->bus->self) {
+ ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
+ if (ret)
+ goto error_resize;
+ }
return 0;
error_resize:
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index e69cac84b605..850cfeb74608 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -25,7 +25,7 @@ static int max_devices = 16;
module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
-static bool use_dma_mrpc = 1;
+static bool use_dma_mrpc = true;
module_param(use_dma_mrpc, bool, 0644);
MODULE_PARM_DESC(use_dma_mrpc,
"Enable the use of the DMA MRPC feature");
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index fb9b17fa0fb5..580369f3c0b0 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -164,12 +164,6 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count);
int verify_cis_cache(struct pcmcia_socket *s);
-int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
- cisdata_t code, cisparse_t *parse, void *priv_data,
- int (*loop_tuple) (tuple_t *tuple,
- cisparse_t *parse,
- void *priv_data));
-
int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function,
tuple_t *tuple);
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index f2741c04289d..35158cfd9c1a 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -178,10 +178,9 @@ static int electra_cf_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct electra_cf_socket *cf;
struct resource mem, io;
- int status;
+ int status = -ENOMEM;
const unsigned int *prop;
int err;
- struct vm_struct *area;
err = of_address_to_resource(np, 0, &mem);
if (err)
@@ -202,30 +201,19 @@ static int electra_cf_probe(struct platform_device *ofdev)
cf->mem_phys = mem.start;
cf->mem_size = PAGE_ALIGN(resource_size(&mem));
cf->mem_base = ioremap(cf->mem_phys, cf->mem_size);
+ if (!cf->mem_base)
+ goto out_free_cf;
cf->io_size = PAGE_ALIGN(resource_size(&io));
-
- area = __get_vm_area(cf->io_size, 0, PHB_IO_BASE, PHB_IO_END);
- if (area == NULL) {
- status = -ENOMEM;
- goto fail1;
- }
-
- cf->io_virt = (void __iomem *)(area->addr);
+ cf->io_virt = ioremap_phb(io.start, cf->io_size);
+ if (!cf->io_virt)
+ goto out_unmap_mem;
cf->gpio_base = ioremap(0xfc103000, 0x1000);
+ if (!cf->gpio_base)
+ goto out_unmap_virt;
dev_set_drvdata(device, cf);
- if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
- (__ioremap_at(io.start, cf->io_virt, cf->io_size,
- pgprot_noncached(PAGE_KERNEL)) == NULL)) {
- dev_err(device, "can't ioremap ranges\n");
- status = -ENOMEM;
- goto fail1;
- }
-
-
cf->io_base = (unsigned long)cf->io_virt - VMALLOC_END;
-
cf->iomem.start = (unsigned long)cf->mem_base;
cf->iomem.end = (unsigned long)cf->mem_base + (mem.end - mem.start);
cf->iomem.flags = IORESOURCE_MEM;
@@ -305,14 +293,13 @@ fail1:
if (cf->irq)
free_irq(cf->irq, cf);
- if (cf->io_virt)
- __iounmap_at(cf->io_virt, cf->io_size);
- if (cf->mem_base)
- iounmap(cf->mem_base);
- if (cf->gpio_base)
- iounmap(cf->gpio_base);
- if (area)
- device_init_wakeup(&ofdev->dev, 0);
+ iounmap(cf->gpio_base);
+out_unmap_virt:
+ device_init_wakeup(&ofdev->dev, 0);
+ iounmap(cf->io_virt);
+out_unmap_mem:
+ iounmap(cf->mem_base);
+out_free_cf:
kfree(cf);
return status;
@@ -330,7 +317,7 @@ static int electra_cf_remove(struct platform_device *ofdev)
free_irq(cf->irq, cf);
del_timer_sync(&cf->timer);
- __iounmap_at(cf->io_virt, cf->io_size);
+ iounmap(cf->io_virt);
iounmap(cf->mem_base);
iounmap(cf->gpio_base);
release_mem_region(cf->mem_phys, cf->mem_size);
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
index b553f7ab532f..e4c4daf92038 100644
--- a/drivers/pcmcia/pcmcia_cis.c
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -78,9 +78,9 @@ done:
* calls the @loop_tuple function for each entry. If the call to @loop_tuple
* returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
*/
-int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
- cisdata_t code, cisparse_t *parse, void *priv_data,
- int (*loop_tuple) (tuple_t *tuple,
+static int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
+ cisdata_t code, cisparse_t *parse, void *priv_data,
+ int (*loop_tuple) (tuple_t *tuple,
cisparse_t *parse,
void *priv_data))
{
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index bf6529b0b5b0..84bfc0e85d6b 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -694,7 +694,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
struct pci_bus_region region;
unsigned mask;
- res = dev->resource + PCI_BRIDGE_RESOURCES + nr;
+ res = &dev->resource[nr];
/* Already allocated? */
if (res->parent)
return 0;
@@ -711,7 +711,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
region.end = config_readl(socket, addr_end) | ~mask;
if (region.start && region.end > region.start && !override_bios) {
pcibios_bus_to_resource(dev->bus, res, &region);
- if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0)
+ if (pci_claim_resource(dev, nr) == 0)
return 0;
dev_info(&dev->dev,
"Preassigned resource %d busy or not available, reconfiguring...\n",
@@ -745,19 +745,35 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
return 0;
}
+static void yenta_free_res(struct yenta_socket *socket, int nr)
+{
+ struct pci_dev *dev = socket->dev;
+ struct resource *res;
+
+ res = &dev->resource[nr];
+ if (res->start != 0 && res->end != 0)
+ release_resource(res);
+
+ res->start = res->end = res->flags = 0;
+}
+
/*
* Allocate the bridge mappings for the device..
*/
static void yenta_allocate_resources(struct yenta_socket *socket)
{
int program = 0;
- program += yenta_allocate_res(socket, 0, IORESOURCE_IO,
+ program += yenta_allocate_res(socket, PCI_CB_BRIDGE_IO_0_WINDOW,
+ IORESOURCE_IO,
PCI_CB_IO_BASE_0, PCI_CB_IO_LIMIT_0);
- program += yenta_allocate_res(socket, 1, IORESOURCE_IO,
+ program += yenta_allocate_res(socket, PCI_CB_BRIDGE_IO_1_WINDOW,
+ IORESOURCE_IO,
PCI_CB_IO_BASE_1, PCI_CB_IO_LIMIT_1);
- program += yenta_allocate_res(socket, 2, IORESOURCE_MEM|IORESOURCE_PREFETCH,
+ program += yenta_allocate_res(socket, PCI_CB_BRIDGE_MEM_0_WINDOW,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH,
PCI_CB_MEMORY_BASE_0, PCI_CB_MEMORY_LIMIT_0);
- program += yenta_allocate_res(socket, 3, IORESOURCE_MEM,
+ program += yenta_allocate_res(socket, PCI_CB_BRIDGE_MEM_1_WINDOW,
+ IORESOURCE_MEM,
PCI_CB_MEMORY_BASE_1, PCI_CB_MEMORY_LIMIT_1);
if (program)
pci_setup_cardbus(socket->dev->subordinate);
@@ -769,14 +785,10 @@ static void yenta_allocate_resources(struct yenta_socket *socket)
*/
static void yenta_free_resources(struct yenta_socket *socket)
{
- int i;
- for (i = 0; i < 4; i++) {
- struct resource *res;
- res = socket->dev->resource + PCI_BRIDGE_RESOURCES + i;
- if (res->start != 0 && res->end != 0)
- release_resource(res);
- res->start = res->end = res->flags = 0;
- }
+ yenta_free_res(socket, PCI_CB_BRIDGE_IO_0_WINDOW);
+ yenta_free_res(socket, PCI_CB_BRIDGE_IO_1_WINDOW);
+ yenta_free_res(socket, PCI_CB_BRIDGE_MEM_0_WINDOW);
+ yenta_free_res(socket, PCI_CB_BRIDGE_MEM_1_WINDOW);
}
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 09ae8a970880..a9261cf48293 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -79,13 +79,6 @@ config FSL_IMX8_DDR_PMU
can give information about memory throughput and other related
events.
-config HISI_PMU
- bool "HiSilicon SoC PMU"
- depends on ARM64 && ACPI
- help
- Support for HiSilicon SoC uncore performance monitoring
- unit (PMU), such as: L3C, HHA and DDRC.
-
config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && ACPI
@@ -129,4 +122,6 @@ config ARM_SPE_PMU
Extension, which provides periodic sampling of operations in
the CPU pipeline and reports this via the perf AUX interface.
+source "drivers/perf/hisilicon/Kconfig"
+
endmenu
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 70968c8c09d7..518d0603e24f 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -690,10 +690,8 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_warn(&pdev->dev, "Failed to find IRQ\n");
+ if (irq < 0)
return -EINVAL;
- }
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
PMUNAME, atomic_inc_return(&pmu_idx));
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index f01a57e5a5f3..48e28ef93a70 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -814,7 +814,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
if (err) {
dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
err, &res_0->start);
- return err;
+ goto out_clear_affinity;
}
err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
@@ -833,6 +833,8 @@ static int smmu_pmu_probe(struct platform_device *pdev)
out_unregister:
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
+out_clear_affinity:
+ irq_set_affinity_hint(smmu_pmu->irq, NULL);
return err;
}
@@ -842,6 +844,7 @@ static int smmu_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&smmu_pmu->pmu);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
+ irq_set_affinity_hint(smmu_pmu->irq, NULL);
return 0;
}
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index b72c04852599..d80f48798bce 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -274,7 +274,7 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event)
if (!attr->exclude_kernel)
reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
- if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && capable(CAP_SYS_ADMIN))
+ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
return reg;
@@ -700,7 +700,7 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
return -EOPNOTSUPP;
reg = arm_spe_event_to_pmscr(event);
- if (!capable(CAP_SYS_ADMIN) &&
+ if (!perfmon_capable() &&
(reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
BIT(SYS_PMSCR_EL1_CX_SHIFT) |
BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
@@ -1133,10 +1133,8 @@ static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
struct platform_device *pdev = spe_pmu->pdev;
int irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ (%d)\n", irq);
+ if (irq < 0)
return -ENXIO;
- }
if (!irq_is_percpu(irq)) {
dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
diff --git a/drivers/perf/hisilicon/Kconfig b/drivers/perf/hisilicon/Kconfig
new file mode 100644
index 000000000000..c5d1b7019fff
--- /dev/null
+++ b/drivers/perf/hisilicon/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HISI_PMU
+ tristate "HiSilicon SoC PMU drivers"
+ depends on ARM64 && ACPI
+ help
+ Support for HiSilicon SoC L3 Cache performance monitor, Hydra Home
+ Agent performance monitor and DDR Controller performance monitor.
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index c3a96ec2bf66..e8377061845f 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o
+obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
+ hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 453f1c6a16ca..15713faaa07e 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -394,8 +394,9 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
if (ret) {
dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
- &ddrc_pmu->node);
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
+ irq_set_affinity_hint(ddrc_pmu->irq, NULL);
}
return ret;
@@ -406,8 +407,9 @@ static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&ddrc_pmu->pmu);
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
- &ddrc_pmu->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ &ddrc_pmu->node);
+ irq_set_affinity_hint(ddrc_pmu->irq, NULL);
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 6a1dd72d8abb..dcc5600788a9 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -283,7 +283,7 @@ static struct attribute *hisi_hha_pmu_events_attr[] = {
HISI_PMU_EVENT_ATTR(rx_wbip, 0x05),
HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11),
HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c),
- HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d),
+ HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d),
HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e),
HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f),
HISI_PMU_EVENT_ATTR(spill_num, 0x20),
@@ -406,8 +406,9 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
if (ret) {
dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
- &hha_pmu->node);
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node);
+ irq_set_affinity_hint(hha_pmu->irq, NULL);
}
return ret;
@@ -418,8 +419,9 @@ static int hisi_hha_pmu_remove(struct platform_device *pdev)
struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&hha_pmu->pmu);
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
- &hha_pmu->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ &hha_pmu->node);
+ irq_set_affinity_hint(hha_pmu->irq, NULL);
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 1151e99b241c..8dd1278bec04 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -396,8 +396,9 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
if (ret) {
dev_err(l3c_pmu->dev, "L3C PMU register failed!\n");
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
- &l3c_pmu->node);
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node);
+ irq_set_affinity_hint(l3c_pmu->irq, NULL);
}
return ret;
@@ -408,8 +409,9 @@ static int hisi_l3c_pmu_remove(struct platform_device *pdev)
struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&l3c_pmu->pmu);
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
- &l3c_pmu->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ &l3c_pmu->node);
+ irq_set_affinity_hint(l3c_pmu->irq, NULL);
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 584de8f807cc..97aff877a4e7 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -35,6 +35,7 @@ ssize_t hisi_format_sysfs_show(struct device *dev,
return sprintf(buf, "%s\n", (char *)eattr->var);
}
+EXPORT_SYMBOL_GPL(hisi_format_sysfs_show);
/*
* PMU event attributes
@@ -48,6 +49,7 @@ ssize_t hisi_event_sysfs_show(struct device *dev,
return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
}
+EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
/*
* sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
@@ -59,6 +61,7 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
return sprintf(buf, "%d\n", hisi_pmu->on_cpu);
}
+EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
static bool hisi_validate_event_group(struct perf_event *event)
{
@@ -97,6 +100,7 @@ int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx)
{
return idx >= 0 && idx < hisi_pmu->num_counters;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_counter_valid);
int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
{
@@ -113,6 +117,7 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
return idx;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{
@@ -173,6 +178,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
return 0;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init);
/*
* Set the counter to count the event that we're interested in,
@@ -220,6 +226,7 @@ void hisi_uncore_pmu_set_event_period(struct perf_event *event)
/* Write start value to the hardware event counter */
hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period);
void hisi_uncore_pmu_event_update(struct perf_event *event)
{
@@ -240,6 +247,7 @@ void hisi_uncore_pmu_event_update(struct perf_event *event)
HISI_MAX_PERIOD(hisi_pmu->counter_bits);
local64_add(delta, &event->count);
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update);
void hisi_uncore_pmu_start(struct perf_event *event, int flags)
{
@@ -262,6 +270,7 @@ void hisi_uncore_pmu_start(struct perf_event *event, int flags)
hisi_uncore_pmu_enable_event(event);
perf_event_update_userpage(event);
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start);
void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
{
@@ -278,6 +287,7 @@ void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
hisi_uncore_pmu_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop);
int hisi_uncore_pmu_add(struct perf_event *event, int flags)
{
@@ -300,6 +310,7 @@ int hisi_uncore_pmu_add(struct perf_event *event, int flags)
return 0;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add);
void hisi_uncore_pmu_del(struct perf_event *event, int flags)
{
@@ -311,12 +322,14 @@ void hisi_uncore_pmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del);
void hisi_uncore_pmu_read(struct perf_event *event)
{
/* Read hardware counter and update the perf counter statistics */
hisi_uncore_pmu_event_update(event);
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
void hisi_uncore_pmu_enable(struct pmu *pmu)
{
@@ -329,6 +342,7 @@ void hisi_uncore_pmu_enable(struct pmu *pmu)
hisi_pmu->ops->start_counters(hisi_pmu);
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable);
void hisi_uncore_pmu_disable(struct pmu *pmu)
{
@@ -336,6 +350,7 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
hisi_pmu->ops->stop_counters(hisi_pmu);
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable);
/*
@@ -414,10 +429,11 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
hisi_pmu->on_cpu = cpu;
/* Overflow interrupt also should use the same CPU */
- WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
+ WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(cpu)));
return 0;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu);
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
@@ -446,7 +462,10 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
/* Use this CPU for event counting */
hisi_pmu->on_cpu = target;
- WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
+ WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(target)));
return 0;
}
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
index 71801e30d601..617cf073e9aa 100644
--- a/drivers/phy/amlogic/Kconfig
+++ b/drivers/phy/amlogic/Kconfig
@@ -3,12 +3,13 @@
# Phy drivers for Amlogic platforms
#
config PHY_MESON8B_USB2
- tristate "Meson8, Meson8b and GXBB USB2 PHY driver"
+ tristate "Meson8, Meson8b, Meson8m2 and GXBB USB2 PHY driver"
default ARCH_MESON
depends on OF && (ARCH_MESON || COMPILE_TEST)
depends on USB_SUPPORT
select USB_COMMON
select GENERIC_PHY
+ select REGMAP_MMIO
help
Enable this to support the Meson USB2 PHYs found in Meson8,
Meson8b and GXBB SoCs.
@@ -26,18 +27,6 @@ config PHY_MESON_GXL_USB2
GXL and GXM SoCs.
If unsure, say N.
-config PHY_MESON_GXL_USB3
- tristate "Meson GXL and GXM USB3 PHY drivers"
- default ARCH_MESON
- depends on OF && (ARCH_MESON || COMPILE_TEST)
- depends on USB_SUPPORT
- select GENERIC_PHY
- select REGMAP_MMIO
- help
- Enable this to support the Meson USB3 PHY and OTG detection
- IP block found in Meson GXL and GXM SoCs.
- If unsure, say N.
-
config PHY_MESON_G12A_USB2
tristate "Meson G12A USB2 PHY driver"
default ARCH_MESON
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
index e2baa133f7af..99702a45e9be 100644
--- a/drivers/phy/amlogic/Makefile
+++ b/drivers/phy/amlogic/Makefile
@@ -2,7 +2,6 @@
obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
-obj-$(CONFIG_PHY_MESON_GXL_USB3) += phy-meson-gxl-usb3.o
obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE) += phy-meson-g12a-usb3-pcie.o
obj-$(CONFIG_PHY_MESON_AXG_PCIE) += phy-meson-axg-pcie.o
obj-$(CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG) += phy-meson-axg-mipi-pcie-analog.o
diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb3.c b/drivers/phy/amlogic/phy-meson-gxl-usb3.c
deleted file mode 100644
index c0e9e4c16149..000000000000
--- a/drivers/phy/amlogic/phy-meson-gxl-usb3.c
+++ /dev/null
@@ -1,283 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Meson GXL USB3 PHY and OTG mode detection driver
- *
- * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
- */
-
-#include <linux/bitfield.h>
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/phy/phy.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <linux/platform_device.h>
-
-#define USB_R0 0x00
- #define USB_R0_P30_FSEL_MASK GENMASK(5, 0)
- #define USB_R0_P30_PHY_RESET BIT(6)
- #define USB_R0_P30_TEST_POWERDOWN_HSP BIT(7)
- #define USB_R0_P30_TEST_POWERDOWN_SSP BIT(8)
- #define USB_R0_P30_ACJT_LEVEL_MASK GENMASK(13, 9)
- #define USB_R0_P30_TX_BOOST_LEVEL_MASK GENMASK(16, 14)
- #define USB_R0_P30_LANE0_TX2RX_LOOPBACK BIT(17)
- #define USB_R0_P30_LANE0_EXT_PCLK_REQ BIT(18)
- #define USB_R0_P30_PCS_RX_LOS_MASK_VAL_MASK GENMASK(28, 19)
- #define USB_R0_U2D_SS_SCALEDOWN_MODE_MASK GENMASK(30, 29)
- #define USB_R0_U2D_ACT BIT(31)
-
-#define USB_R1 0x04
- #define USB_R1_U3H_BIGENDIAN_GS BIT(0)
- #define USB_R1_U3H_PME_ENABLE BIT(1)
- #define USB_R1_U3H_HUB_PORT_OVERCURRENT_MASK GENMASK(6, 2)
- #define USB_R1_U3H_HUB_PORT_PERM_ATTACH_MASK GENMASK(11, 7)
- #define USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK GENMASK(15, 12)
- #define USB_R1_U3H_HOST_U3_PORT_DISABLE BIT(16)
- #define USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT BIT(17)
- #define USB_R1_U3H_HOST_MSI_ENABLE BIT(18)
- #define USB_R1_U3H_FLADJ_30MHZ_REG_MASK GENMASK(24, 19)
- #define USB_R1_P30_PCS_TX_SWING_FULL_MASK GENMASK(31, 25)
-
-#define USB_R2 0x08
- #define USB_R2_P30_CR_DATA_IN_MASK GENMASK(15, 0)
- #define USB_R2_P30_CR_READ BIT(16)
- #define USB_R2_P30_CR_WRITE BIT(17)
- #define USB_R2_P30_CR_CAP_ADDR BIT(18)
- #define USB_R2_P30_CR_CAP_DATA BIT(19)
- #define USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK GENMASK(25, 20)
- #define USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK GENMASK(31, 26)
-
-#define USB_R3 0x0c
- #define USB_R3_P30_SSC_ENABLE BIT(0)
- #define USB_R3_P30_SSC_RANGE_MASK GENMASK(3, 1)
- #define USB_R3_P30_SSC_REF_CLK_SEL_MASK GENMASK(12, 4)
- #define USB_R3_P30_REF_SSP_EN BIT(13)
- #define USB_R3_P30_LOS_BIAS_MASK GENMASK(18, 16)
- #define USB_R3_P30_LOS_LEVEL_MASK GENMASK(23, 19)
- #define USB_R3_P30_MPLL_MULTIPLIER_MASK GENMASK(30, 24)
-
-#define USB_R4 0x10
- #define USB_R4_P21_PORT_RESET_0 BIT(0)
- #define USB_R4_P21_SLEEP_M0 BIT(1)
- #define USB_R4_MEM_PD_MASK GENMASK(3, 2)
- #define USB_R4_P21_ONLY BIT(4)
-
-#define USB_R5 0x14
- #define USB_R5_ID_DIG_SYNC BIT(0)
- #define USB_R5_ID_DIG_REG BIT(1)
- #define USB_R5_ID_DIG_CFG_MASK GENMASK(3, 2)
- #define USB_R5_ID_DIG_EN_0 BIT(4)
- #define USB_R5_ID_DIG_EN_1 BIT(5)
- #define USB_R5_ID_DIG_CURR BIT(6)
- #define USB_R5_ID_DIG_IRQ BIT(7)
- #define USB_R5_ID_DIG_TH_MASK GENMASK(15, 8)
- #define USB_R5_ID_DIG_CNT_MASK GENMASK(23, 16)
-
-/* read-only register */
-#define USB_R6 0x18
- #define USB_R6_P30_CR_DATA_OUT_MASK GENMASK(15, 0)
- #define USB_R6_P30_CR_ACK BIT(16)
-
-struct phy_meson_gxl_usb3_priv {
- struct regmap *regmap;
- enum phy_mode mode;
- struct clk *clk_phy;
- struct clk *clk_peripheral;
- struct reset_control *reset;
-};
-
-static const struct regmap_config phy_meson_gxl_usb3_regmap_conf = {
- .reg_bits = 8,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = USB_R6,
-};
-
-static int phy_meson_gxl_usb3_power_on(struct phy *phy)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
-
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_EN_0,
- USB_R5_ID_DIG_EN_0);
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_EN_1,
- USB_R5_ID_DIG_EN_1);
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_TH_MASK,
- FIELD_PREP(USB_R5_ID_DIG_TH_MASK, 0xff));
-
- return 0;
-}
-
-static int phy_meson_gxl_usb3_power_off(struct phy *phy)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
-
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_EN_0, 0);
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_EN_1, 0);
-
- return 0;
-}
-
-static int phy_meson_gxl_usb3_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
-
- switch (mode) {
- case PHY_MODE_USB_HOST:
- regmap_update_bits(priv->regmap, USB_R0, USB_R0_U2D_ACT, 0);
- regmap_update_bits(priv->regmap, USB_R4, USB_R4_P21_SLEEP_M0,
- 0);
- break;
-
- case PHY_MODE_USB_DEVICE:
- regmap_update_bits(priv->regmap, USB_R0, USB_R0_U2D_ACT,
- USB_R0_U2D_ACT);
- regmap_update_bits(priv->regmap, USB_R4, USB_R4_P21_SLEEP_M0,
- USB_R4_P21_SLEEP_M0);
- break;
-
- default:
- dev_err(&phy->dev, "unsupported PHY mode %d\n", mode);
- return -EINVAL;
- }
-
- priv->mode = mode;
-
- return 0;
-}
-
-static int phy_meson_gxl_usb3_init(struct phy *phy)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
- int ret;
-
- ret = reset_control_reset(priv->reset);
- if (ret)
- goto err;
-
- ret = clk_prepare_enable(priv->clk_phy);
- if (ret)
- goto err;
-
- ret = clk_prepare_enable(priv->clk_peripheral);
- if (ret)
- goto err_disable_clk_phy;
-
- ret = phy_meson_gxl_usb3_set_mode(phy, priv->mode, 0);
- if (ret)
- goto err_disable_clk_peripheral;
-
- regmap_update_bits(priv->regmap, USB_R1,
- USB_R1_U3H_FLADJ_30MHZ_REG_MASK,
- FIELD_PREP(USB_R1_U3H_FLADJ_30MHZ_REG_MASK, 0x20));
-
- return 0;
-
-err_disable_clk_peripheral:
- clk_disable_unprepare(priv->clk_peripheral);
-err_disable_clk_phy:
- clk_disable_unprepare(priv->clk_phy);
-err:
- return ret;
-}
-
-static int phy_meson_gxl_usb3_exit(struct phy *phy)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
-
- clk_disable_unprepare(priv->clk_peripheral);
- clk_disable_unprepare(priv->clk_phy);
-
- return 0;
-}
-
-static const struct phy_ops phy_meson_gxl_usb3_ops = {
- .power_on = phy_meson_gxl_usb3_power_on,
- .power_off = phy_meson_gxl_usb3_power_off,
- .set_mode = phy_meson_gxl_usb3_set_mode,
- .init = phy_meson_gxl_usb3_init,
- .exit = phy_meson_gxl_usb3_exit,
- .owner = THIS_MODULE,
-};
-
-static int phy_meson_gxl_usb3_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct phy_meson_gxl_usb3_priv *priv;
- struct resource *res;
- struct phy *phy;
- struct phy_provider *phy_provider;
- void __iomem *base;
- int ret;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- priv->regmap = devm_regmap_init_mmio(dev, base,
- &phy_meson_gxl_usb3_regmap_conf);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
-
- priv->clk_phy = devm_clk_get(dev, "phy");
- if (IS_ERR(priv->clk_phy))
- return PTR_ERR(priv->clk_phy);
-
- priv->clk_peripheral = devm_clk_get(dev, "peripheral");
- if (IS_ERR(priv->clk_peripheral))
- return PTR_ERR(priv->clk_peripheral);
-
- priv->reset = devm_reset_control_array_get_shared(dev);
- if (IS_ERR(priv->reset))
- return PTR_ERR(priv->reset);
-
- /*
- * default to host mode as hardware defaults and/or boot-loader
- * behavior can result in this PHY starting up in device mode. this
- * default and the initialization in phy_meson_gxl_usb3_init ensure
- * that we reproducibly start in a known mode on all devices.
- */
- priv->mode = PHY_MODE_USB_HOST;
-
- phy = devm_phy_create(dev, np, &phy_meson_gxl_usb3_ops);
- if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to create PHY\n");
-
- return ret;
- }
-
- phy_set_drvdata(phy, priv);
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-
- return PTR_ERR_OR_ZERO(phy_provider);
-}
-
-static const struct of_device_id phy_meson_gxl_usb3_of_match[] = {
- { .compatible = "amlogic,meson-gxl-usb3-phy", },
- { },
-};
-MODULE_DEVICE_TABLE(of, phy_meson_gxl_usb3_of_match);
-
-static struct platform_driver phy_meson_gxl_usb3_driver = {
- .probe = phy_meson_gxl_usb3_probe,
- .driver = {
- .name = "phy-meson-gxl-usb3",
- .of_match_table = phy_meson_gxl_usb3_of_match,
- },
-};
-module_platform_driver(phy_meson_gxl_usb3_driver);
-
-MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
-MODULE_DESCRIPTION("Meson GXL USB3 PHY and OTG detection driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c
index bd66bd723e4a..03c061dd5f0d 100644
--- a/drivers/phy/amlogic/phy-meson8b-usb2.c
+++ b/drivers/phy/amlogic/phy-meson8b-usb2.c
@@ -10,6 +10,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -76,6 +78,17 @@
#define REG_ADP_BC_ACA_PIN_FLOAT BIT(26)
#define REG_DBG_UART 0x10
+ #define REG_DBG_UART_BYPASS_SEL BIT(0)
+ #define REG_DBG_UART_BYPASS_DM_EN BIT(1)
+ #define REG_DBG_UART_BYPASS_DP_EN BIT(2)
+ #define REG_DBG_UART_BYPASS_DM_DATA BIT(3)
+ #define REG_DBG_UART_BYPASS_DP_DATA BIT(4)
+ #define REG_DBG_UART_FSV_MINUS BIT(5)
+ #define REG_DBG_UART_FSV_PLUS BIT(6)
+ #define REG_DBG_UART_FSV_BURN_IN_TEST BIT(7)
+ #define REG_DBG_UART_LOOPBACK_EN_B BIT(8)
+ #define REG_DBG_UART_SET_IDDQ BIT(9)
+ #define REG_DBG_UART_ATE_RESET BIT(10)
#define REG_TEST 0x14
#define REG_TEST_DATA_IN_MASK GENMASK(3, 0)
@@ -104,35 +117,30 @@
#define RESET_COMPLETE_TIME 500
#define ACA_ENABLE_COMPLETE_TIME 50
-struct phy_meson8b_usb2_priv {
- void __iomem *regs;
- enum usb_dr_mode dr_mode;
- struct clk *clk_usb_general;
- struct clk *clk_usb;
- struct reset_control *reset;
+struct phy_meson8b_usb2_match_data {
+ bool host_enable_aca;
};
-static u32 phy_meson8b_usb2_read(struct phy_meson8b_usb2_priv *phy_priv,
- u32 reg)
-{
- return readl(phy_priv->regs + reg);
-}
-
-static void phy_meson8b_usb2_mask_bits(struct phy_meson8b_usb2_priv *phy_priv,
- u32 reg, u32 mask, u32 value)
-{
- u32 data;
-
- data = phy_meson8b_usb2_read(phy_priv, reg);
- data &= ~mask;
- data |= (value & mask);
+struct phy_meson8b_usb2_priv {
+ struct regmap *regmap;
+ enum usb_dr_mode dr_mode;
+ struct clk *clk_usb_general;
+ struct clk *clk_usb;
+ struct reset_control *reset;
+ const struct phy_meson8b_usb2_match_data *match;
+};
- writel(data, phy_priv->regs + reg);
-}
+static const struct regmap_config phy_meson8b_usb2_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_TUNE,
+};
static int phy_meson8b_usb2_power_on(struct phy *phy)
{
struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+ u32 reg;
int ret;
if (!IS_ERR_OR_NULL(priv->reset)) {
@@ -156,38 +164,43 @@ static int phy_meson8b_usb2_power_on(struct phy *phy)
return ret;
}
- phy_meson8b_usb2_mask_bits(priv, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL,
- REG_CONFIG_CLK_32k_ALTSEL);
+ regmap_update_bits(priv->regmap, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL,
+ REG_CONFIG_CLK_32k_ALTSEL);
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_REF_CLK_SEL_MASK,
- 0x2 << REG_CTRL_REF_CLK_SEL_SHIFT);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_REF_CLK_SEL_MASK,
+ 0x2 << REG_CTRL_REF_CLK_SEL_SHIFT);
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_FSEL_MASK,
- 0x5 << REG_CTRL_FSEL_SHIFT);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_FSEL_MASK,
+ 0x5 << REG_CTRL_FSEL_SHIFT);
/* reset the PHY */
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET,
- REG_CTRL_POWER_ON_RESET);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET,
+ REG_CTRL_POWER_ON_RESET);
udelay(RESET_COMPLETE_TIME);
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET, 0);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET, 0);
udelay(RESET_COMPLETE_TIME);
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT,
- REG_CTRL_SOF_TOGGLE_OUT);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT,
+ REG_CTRL_SOF_TOGGLE_OUT);
if (priv->dr_mode == USB_DR_MODE_HOST) {
- phy_meson8b_usb2_mask_bits(priv, REG_ADP_BC,
+ regmap_update_bits(priv->regmap, REG_DBG_UART,
+ REG_DBG_UART_SET_IDDQ, 0);
+
+ if (priv->match->host_enable_aca) {
+ regmap_update_bits(priv->regmap, REG_ADP_BC,
REG_ADP_BC_ACA_ENABLE,
REG_ADP_BC_ACA_ENABLE);
- udelay(ACA_ENABLE_COMPLETE_TIME);
+ udelay(ACA_ENABLE_COMPLETE_TIME);
- if (phy_meson8b_usb2_read(priv, REG_ADP_BC) &
- REG_ADP_BC_ACA_PIN_FLOAT) {
- dev_warn(&phy->dev, "USB ID detect failed!\n");
- clk_disable_unprepare(priv->clk_usb);
- clk_disable_unprepare(priv->clk_usb_general);
- return -EINVAL;
+ regmap_read(priv->regmap, REG_ADP_BC, &reg);
+ if (reg & REG_ADP_BC_ACA_PIN_FLOAT) {
+ dev_warn(&phy->dev, "USB ID detect failed!\n");
+ clk_disable_unprepare(priv->clk_usb);
+ clk_disable_unprepare(priv->clk_usb_general);
+ return -EINVAL;
+ }
}
}
@@ -198,6 +211,11 @@ static int phy_meson8b_usb2_power_off(struct phy *phy)
{
struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+ if (priv->dr_mode == USB_DR_MODE_HOST)
+ regmap_update_bits(priv->regmap, REG_DBG_UART,
+ REG_DBG_UART_SET_IDDQ,
+ REG_DBG_UART_SET_IDDQ);
+
clk_disable_unprepare(priv->clk_usb);
clk_disable_unprepare(priv->clk_usb_general);
@@ -213,18 +231,26 @@ static const struct phy_ops phy_meson8b_usb2_ops = {
static int phy_meson8b_usb2_probe(struct platform_device *pdev)
{
struct phy_meson8b_usb2_priv *priv;
- struct resource *res;
struct phy *phy;
struct phy_provider *phy_provider;
+ void __iomem *base;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->regs))
- return PTR_ERR(priv->regs);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->match = device_get_match_data(&pdev->dev);
+ if (!priv->match)
+ return -ENODEV;
+
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &phy_meson8b_usb2_regmap_conf);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
priv->clk_usb_general = devm_clk_get(&pdev->dev, "usb_general");
if (IS_ERR(priv->clk_usb_general))
@@ -259,11 +285,32 @@ static int phy_meson8b_usb2_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
+static const struct phy_meson8b_usb2_match_data phy_meson8_usb2_match_data = {
+ .host_enable_aca = false,
+};
+
+static const struct phy_meson8b_usb2_match_data phy_meson8b_usb2_match_data = {
+ .host_enable_aca = true,
+};
+
static const struct of_device_id phy_meson8b_usb2_of_match[] = {
- { .compatible = "amlogic,meson8-usb2-phy", },
- { .compatible = "amlogic,meson8b-usb2-phy", },
- { .compatible = "amlogic,meson-gxbb-usb2-phy", },
- { },
+ {
+ .compatible = "amlogic,meson8-usb2-phy",
+ .data = &phy_meson8_usb2_match_data
+ },
+ {
+ .compatible = "amlogic,meson8b-usb2-phy",
+ .data = &phy_meson8b_usb2_match_data
+ },
+ {
+ .compatible = "amlogic,meson8m2-usb2-phy",
+ .data = &phy_meson8b_usb2_match_data
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-usb2-phy",
+ .data = &phy_meson8b_usb2_match_data
+ },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, phy_meson8b_usb2_of_match);
@@ -277,5 +324,5 @@ static struct platform_driver phy_meson8b_usb2_driver = {
module_platform_driver(phy_meson8b_usb2_driver);
MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
-MODULE_DESCRIPTION("Meson8, Meson8b and GXBB USB2 PHY driver");
+MODULE_DESCRIPTION("Meson8, Meson8b, Meson8m2 and GXBB USB2 PHY driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
index 7ceea5ae2704..527625912b78 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -279,7 +279,7 @@ static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
.init = ns2_drd_phy_init,
.power_on = ns2_drd_phy_poweron,
.power_off = ns2_drd_phy_poweroff,
diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c
index fe6c58910e4c..77c025a0720c 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-usb.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c
@@ -16,8 +16,6 @@ enum bcm_usb_phy_version {
};
enum bcm_usb_phy_reg {
- PLL_NDIV_FRAC,
- PLL_NDIV_INT,
PLL_CTRL,
PHY_CTRL,
PHY_PLL_CTRL,
@@ -31,18 +29,11 @@ static const u8 bcm_usb_combo_phy_ss[] = {
};
static const u8 bcm_usb_combo_phy_hs[] = {
- [PLL_NDIV_FRAC] = 0x04,
- [PLL_NDIV_INT] = 0x08,
[PLL_CTRL] = 0x0c,
[PHY_CTRL] = 0x10,
};
-#define HSPLL_NDIV_INT_VAL 0x13
-#define HSPLL_NDIV_FRAC_VAL 0x1005
-
static const u8 bcm_usb_hs_phy[] = {
- [PLL_NDIV_FRAC] = 0x0,
- [PLL_NDIV_INT] = 0x4,
[PLL_CTRL] = 0x8,
[PHY_CTRL] = 0xc,
};
@@ -52,7 +43,6 @@ enum pll_ctrl_bits {
SSPLL_SUSPEND_EN,
PLL_SEQ_START,
PLL_LOCK,
- PLL_PDIV,
};
static const u8 u3pll_ctrl[] = {
@@ -66,29 +56,17 @@ static const u8 u3pll_ctrl[] = {
#define HSPLL_PDIV_VAL 0x1
static const u8 u2pll_ctrl[] = {
- [PLL_PDIV] = 1,
[PLL_RESETB] = 5,
[PLL_LOCK] = 6,
};
enum bcm_usb_phy_ctrl_bits {
CORERDY,
- AFE_LDO_PWRDWNB,
- AFE_PLL_PWRDWNB,
- AFE_BG_PWRDWNB,
- PHY_ISO,
PHY_RESETB,
PHY_PCTL,
};
#define PHY_PCTL_MASK 0xffff
-/*
- * 0x0806 of PCTL_VAL has below bits set
- * BIT-8 : refclk divider 1
- * BIT-3:2: device mode; mode is not effect
- * BIT-1: soft reset active low
- */
-#define HSPHY_PCTL_VAL 0x0806
#define SSPHY_PCTL_VAL 0x0006
static const u8 u3phy_ctrl[] = {
@@ -98,10 +76,6 @@ static const u8 u3phy_ctrl[] = {
static const u8 u2phy_ctrl[] = {
[CORERDY] = 0,
- [AFE_LDO_PWRDWNB] = 1,
- [AFE_PLL_PWRDWNB] = 2,
- [AFE_BG_PWRDWNB] = 3,
- [PHY_ISO] = 4,
[PHY_RESETB] = 5,
[PHY_PCTL] = 6,
};
@@ -186,38 +160,13 @@ static int bcm_usb_hs_phy_init(struct bcm_usb_phy_cfg *phy_cfg)
int ret = 0;
void __iomem *regs = phy_cfg->regs;
const u8 *offset;
- u32 rd_data;
offset = phy_cfg->offset;
- writel(HSPLL_NDIV_INT_VAL, regs + offset[PLL_NDIV_INT]);
- writel(HSPLL_NDIV_FRAC_VAL, regs + offset[PLL_NDIV_FRAC]);
-
- rd_data = readl(regs + offset[PLL_CTRL]);
- rd_data &= ~(HSPLL_PDIV_MASK << u2pll_ctrl[PLL_PDIV]);
- rd_data |= (HSPLL_PDIV_VAL << u2pll_ctrl[PLL_PDIV]);
- writel(rd_data, regs + offset[PLL_CTRL]);
-
- /* Set Core Ready high */
- bcm_usb_reg32_setbits(regs + offset[PHY_CTRL],
- BIT(u2phy_ctrl[CORERDY]));
-
- /* Maximum timeout for Core Ready done */
- msleep(30);
-
+ bcm_usb_reg32_clrbits(regs + offset[PLL_CTRL],
+ BIT(u2pll_ctrl[PLL_RESETB]));
bcm_usb_reg32_setbits(regs + offset[PLL_CTRL],
BIT(u2pll_ctrl[PLL_RESETB]));
- bcm_usb_reg32_setbits(regs + offset[PHY_CTRL],
- BIT(u2phy_ctrl[PHY_RESETB]));
-
-
- rd_data = readl(regs + offset[PHY_CTRL]);
- rd_data &= ~(PHY_PCTL_MASK << u2phy_ctrl[PHY_PCTL]);
- rd_data |= (HSPHY_PCTL_VAL << u2phy_ctrl[PHY_PCTL]);
- writel(rd_data, regs + offset[PHY_CTRL]);
-
- /* Maximum timeout for PLL reset done */
- msleep(30);
ret = bcm_usb_pll_lock_check(regs + offset[PLL_CTRL],
BIT(u2pll_ctrl[PLL_LOCK]));
@@ -256,7 +205,7 @@ static int bcm_usb_phy_init(struct phy *phy)
return ret;
}
-static struct phy_ops sr_phy_ops = {
+static const struct phy_ops sr_phy_ops = {
.init = bcm_usb_phy_init,
.reset = bcm_usb_phy_reset,
.owner = THIS_MODULE,
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 491bbd46c5b3..99fbc7e4138b 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -39,14 +39,14 @@ struct match_chip_info {
u8 optional_reg;
};
-static struct value_to_name_map brcm_dr_mode_to_name[] = {
+static const struct value_to_name_map brcm_dr_mode_to_name[] = {
{ USB_CTLR_MODE_HOST, "host" },
{ USB_CTLR_MODE_DEVICE, "peripheral" },
{ USB_CTLR_MODE_DRD, "drd" },
{ USB_CTLR_MODE_TYPEC_PD, "typec-pd" }
};
-static struct value_to_name_map brcm_dual_mode_to_name[] = {
+static const struct value_to_name_map brcm_dual_mode_to_name[] = {
{ 0, "host" },
{ 1, "device" },
{ 2, "auto" },
@@ -138,7 +138,7 @@ static int brcm_usb_phy_exit(struct phy *gphy)
return 0;
}
-static struct phy_ops brcm_usb_phy_ops = {
+static const struct phy_ops brcm_usb_phy_ops = {
.init = brcm_usb_phy_init,
.exit = brcm_usb_phy_exit,
.owner = THIS_MODULE,
@@ -170,7 +170,7 @@ static struct phy *brcm_usb_phy_xlate(struct device *dev,
return ERR_PTR(-ENODEV);
}
-static int name_to_value(struct value_to_name_map *table, int count,
+static int name_to_value(const struct value_to_name_map *table, int count,
const char *name, int *value)
{
int x;
@@ -185,7 +185,7 @@ static int name_to_value(struct value_to_name_map *table, int count,
return -EINVAL;
}
-static const char *value_to_name(struct value_to_name_map *table, int count,
+static const char *value_to_name(const struct value_to_name_map *table, int count,
int value)
{
if (value >= count)
@@ -252,7 +252,7 @@ static const struct attribute_group brcm_usb_phy_group = {
.attrs = brcm_usb_phy_attrs,
};
-static struct match_chip_info chip_info_7216 = {
+static const struct match_chip_info chip_info_7216 = {
.init_func = &brcm_usb_dvr_init_7216,
.required_regs = {
BRCM_REGS_CTRL,
@@ -262,7 +262,7 @@ static struct match_chip_info chip_info_7216 = {
},
};
-static struct match_chip_info chip_info_7211b0 = {
+static const struct match_chip_info chip_info_7211b0 = {
.init_func = &brcm_usb_dvr_init_7211b0,
.required_regs = {
BRCM_REGS_CTRL,
@@ -275,7 +275,7 @@ static struct match_chip_info chip_info_7211b0 = {
.optional_reg = BRCM_REGS_BDC_EC,
};
-static struct match_chip_info chip_info_7445 = {
+static const struct match_chip_info chip_info_7445 = {
.init_func = &brcm_usb_dvr_init_7445,
.required_regs = {
BRCM_REGS_CTRL,
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
index 459545871608..432832bdbd16 100644
--- a/drivers/phy/cadence/Kconfig
+++ b/drivers/phy/cadence/Kconfig
@@ -27,3 +27,12 @@ config PHY_CADENCE_SIERRA
select GENERIC_PHY
help
Enable this to support the Cadence Sierra PHY driver
+
+config PHY_CADENCE_SALVO
+ tristate "Cadence Salvo PHY Driver"
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to support the Cadence SALVO PHY driver,
+ this PHY is a legacy PHY, and only are used for USB3
+ and USB2.
diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile
index 6a7ffc6ea599..26e16bd34efe 100644
--- a/drivers/phy/cadence/Makefile
+++ b/drivers/phy/cadence/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_PHY_CADENCE_TORRENT) += phy-cadence-torrent.o
obj-$(CONFIG_PHY_CADENCE_DPHY) += cdns-dphy.o
obj-$(CONFIG_PHY_CADENCE_SIERRA) += phy-cadence-sierra.o
+obj-$(CONFIG_PHY_CADENCE_SALVO) += phy-cadence-salvo.o
diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c
new file mode 100644
index 000000000000..1ecbb964cd21
--- /dev/null
+++ b/drivers/phy/cadence/phy-cadence-salvo.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Salvo PHY is a 28nm PHY, it is a legacy PHY, and only
+ * for USB3 and USB2.
+ *
+ * Copyright (c) 2019-2020 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+/* PHY register definition */
+#define PHY_PMA_CMN_CTRL1 0xC800
+#define TB_ADDR_CMN_DIAG_HSCLK_SEL 0x01e0
+#define TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR 0x0084
+#define TB_ADDR_CMN_PLL0_VCOCAL_ITER_TMR 0x0085
+#define TB_ADDR_CMN_PLL0_INTDIV 0x0094
+#define TB_ADDR_CMN_PLL0_FRACDIV 0x0095
+#define TB_ADDR_CMN_PLL0_HIGH_THR 0x0096
+#define TB_ADDR_CMN_PLL0_SS_CTRL1 0x0098
+#define TB_ADDR_CMN_PLL0_SS_CTRL2 0x0099
+#define TB_ADDR_CMN_PLL0_DSM_DIAG 0x0097
+#define TB_ADDR_CMN_DIAG_PLL0_OVRD 0x01c2
+#define TB_ADDR_CMN_DIAG_PLL0_FBH_OVRD 0x01c0
+#define TB_ADDR_CMN_DIAG_PLL0_FBL_OVRD 0x01c1
+#define TB_ADDR_CMN_DIAG_PLL0_V2I_TUNE 0x01C5
+#define TB_ADDR_CMN_DIAG_PLL0_CP_TUNE 0x01C6
+#define TB_ADDR_CMN_DIAG_PLL0_LF_PROG 0x01C7
+#define TB_ADDR_CMN_DIAG_PLL0_TEST_MODE 0x01c4
+#define TB_ADDR_CMN_PSM_CLK_CTRL 0x0061
+#define TB_ADDR_XCVR_DIAG_RX_LANE_CAL_RST_TMR 0x40ea
+#define TB_ADDR_XCVR_PSM_RCTRL 0x4001
+#define TB_ADDR_TX_PSC_A0 0x4100
+#define TB_ADDR_TX_PSC_A1 0x4101
+#define TB_ADDR_TX_PSC_A2 0x4102
+#define TB_ADDR_TX_PSC_A3 0x4103
+#define TB_ADDR_TX_DIAG_ECTRL_OVRD 0x41f5
+#define TB_ADDR_TX_PSC_CAL 0x4106
+#define TB_ADDR_TX_PSC_RDY 0x4107
+#define TB_ADDR_RX_PSC_A0 0x8000
+#define TB_ADDR_RX_PSC_A1 0x8001
+#define TB_ADDR_RX_PSC_A2 0x8002
+#define TB_ADDR_RX_PSC_A3 0x8003
+#define TB_ADDR_RX_PSC_CAL 0x8006
+#define TB_ADDR_RX_PSC_RDY 0x8007
+#define TB_ADDR_TX_TXCC_MGNLS_MULT_000 0x4058
+#define TB_ADDR_TX_DIAG_BGREF_PREDRV_DELAY 0x41e7
+#define TB_ADDR_RX_SLC_CU_ITER_TMR 0x80e3
+#define TB_ADDR_RX_SIGDET_HL_FILT_TMR 0x8090
+#define TB_ADDR_RX_SAMP_DAC_CTRL 0x8058
+#define TB_ADDR_RX_DIAG_SIGDET_TUNE 0x81dc
+#define TB_ADDR_RX_DIAG_LFPSDET_TUNE2 0x81df
+#define TB_ADDR_RX_DIAG_BS_TM 0x81f5
+#define TB_ADDR_RX_DIAG_DFE_CTRL1 0x81d3
+#define TB_ADDR_RX_DIAG_ILL_IQE_TRIM4 0x81c7
+#define TB_ADDR_RX_DIAG_ILL_E_TRIM0 0x81c2
+#define TB_ADDR_RX_DIAG_ILL_IQ_TRIM0 0x81c1
+#define TB_ADDR_RX_DIAG_ILL_IQE_TRIM6 0x81c9
+#define TB_ADDR_RX_DIAG_RXFE_TM3 0x81f8
+#define TB_ADDR_RX_DIAG_RXFE_TM4 0x81f9
+#define TB_ADDR_RX_DIAG_LFPSDET_TUNE 0x81dd
+#define TB_ADDR_RX_DIAG_DFE_CTRL3 0x81d5
+#define TB_ADDR_RX_DIAG_SC2C_DELAY 0x81e1
+#define TB_ADDR_RX_REE_VGA_GAIN_NODFE 0x81bf
+#define TB_ADDR_XCVR_PSM_CAL_TMR 0x4002
+#define TB_ADDR_XCVR_PSM_A0BYP_TMR 0x4004
+#define TB_ADDR_XCVR_PSM_A0IN_TMR 0x4003
+#define TB_ADDR_XCVR_PSM_A1IN_TMR 0x4005
+#define TB_ADDR_XCVR_PSM_A2IN_TMR 0x4006
+#define TB_ADDR_XCVR_PSM_A3IN_TMR 0x4007
+#define TB_ADDR_XCVR_PSM_A4IN_TMR 0x4008
+#define TB_ADDR_XCVR_PSM_A5IN_TMR 0x4009
+#define TB_ADDR_XCVR_PSM_A0OUT_TMR 0x400a
+#define TB_ADDR_XCVR_PSM_A1OUT_TMR 0x400b
+#define TB_ADDR_XCVR_PSM_A2OUT_TMR 0x400c
+#define TB_ADDR_XCVR_PSM_A3OUT_TMR 0x400d
+#define TB_ADDR_XCVR_PSM_A4OUT_TMR 0x400e
+#define TB_ADDR_XCVR_PSM_A5OUT_TMR 0x400f
+#define TB_ADDR_TX_RCVDET_EN_TMR 0x4122
+#define TB_ADDR_TX_RCVDET_ST_TMR 0x4123
+#define TB_ADDR_XCVR_DIAG_LANE_FCM_EN_MGN_TMR 0x40f2
+#define TB_ADDR_TX_RCVDETSC_CTRL 0x4124
+
+/* TB_ADDR_TX_RCVDETSC_CTRL */
+#define RXDET_IN_P3_32KHZ BIT(1)
+
+struct cdns_reg_pairs {
+ u16 val;
+ u32 off;
+};
+
+struct cdns_salvo_data {
+ u8 reg_offset_shift;
+ struct cdns_reg_pairs *init_sequence_val;
+ u8 init_sequence_length;
+};
+
+struct cdns_salvo_phy {
+ struct phy *phy;
+ struct clk *clk;
+ void __iomem *base;
+ struct cdns_salvo_data *data;
+};
+
+static const struct of_device_id cdns_salvo_phy_of_match[];
+static u16 cdns_salvo_read(struct cdns_salvo_phy *salvo_phy, u32 reg)
+{
+ return (u16)readl(salvo_phy->base +
+ reg * (1 << salvo_phy->data->reg_offset_shift));
+}
+
+static void cdns_salvo_write(struct cdns_salvo_phy *salvo_phy,
+ u32 reg, u16 val)
+{
+ writel(val, salvo_phy->base +
+ reg * (1 << salvo_phy->data->reg_offset_shift));
+}
+
+/*
+ * Below bringup sequence pair are from Cadence PHY's User Guide
+ * and NXP platform tuning results.
+ */
+static struct cdns_reg_pairs cdns_nxp_sequence_pair[] = {
+ {0x0830, PHY_PMA_CMN_CTRL1},
+ {0x0010, TB_ADDR_CMN_DIAG_HSCLK_SEL},
+ {0x00f0, TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0018, TB_ADDR_CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x00d0, TB_ADDR_CMN_PLL0_INTDIV},
+ {0x4aaa, TB_ADDR_CMN_PLL0_FRACDIV},
+ {0x0034, TB_ADDR_CMN_PLL0_HIGH_THR},
+ {0x01ee, TB_ADDR_CMN_PLL0_SS_CTRL1},
+ {0x7f03, TB_ADDR_CMN_PLL0_SS_CTRL2},
+ {0x0020, TB_ADDR_CMN_PLL0_DSM_DIAG},
+ {0x0000, TB_ADDR_CMN_DIAG_PLL0_OVRD},
+ {0x0000, TB_ADDR_CMN_DIAG_PLL0_FBH_OVRD},
+ {0x0000, TB_ADDR_CMN_DIAG_PLL0_FBL_OVRD},
+ {0x0007, TB_ADDR_CMN_DIAG_PLL0_V2I_TUNE},
+ {0x0027, TB_ADDR_CMN_DIAG_PLL0_CP_TUNE},
+ {0x0008, TB_ADDR_CMN_DIAG_PLL0_LF_PROG},
+ {0x0022, TB_ADDR_CMN_DIAG_PLL0_TEST_MODE},
+ {0x000a, TB_ADDR_CMN_PSM_CLK_CTRL},
+ {0x0139, TB_ADDR_XCVR_DIAG_RX_LANE_CAL_RST_TMR},
+ {0xbefc, TB_ADDR_XCVR_PSM_RCTRL},
+
+ {0x7799, TB_ADDR_TX_PSC_A0},
+ {0x7798, TB_ADDR_TX_PSC_A1},
+ {0x509b, TB_ADDR_TX_PSC_A2},
+ {0x0003, TB_ADDR_TX_DIAG_ECTRL_OVRD},
+ {0x509b, TB_ADDR_TX_PSC_A3},
+ {0x2090, TB_ADDR_TX_PSC_CAL},
+ {0x2090, TB_ADDR_TX_PSC_RDY},
+
+ {0xA6FD, TB_ADDR_RX_PSC_A0},
+ {0xA6FD, TB_ADDR_RX_PSC_A1},
+ {0xA410, TB_ADDR_RX_PSC_A2},
+ {0x2410, TB_ADDR_RX_PSC_A3},
+
+ {0x23FF, TB_ADDR_RX_PSC_CAL},
+ {0x2010, TB_ADDR_RX_PSC_RDY},
+
+ {0x0020, TB_ADDR_TX_TXCC_MGNLS_MULT_000},
+ {0x00ff, TB_ADDR_TX_DIAG_BGREF_PREDRV_DELAY},
+ {0x0002, TB_ADDR_RX_SLC_CU_ITER_TMR},
+ {0x0013, TB_ADDR_RX_SIGDET_HL_FILT_TMR},
+ {0x0000, TB_ADDR_RX_SAMP_DAC_CTRL},
+ {0x1004, TB_ADDR_RX_DIAG_SIGDET_TUNE},
+ {0x4041, TB_ADDR_RX_DIAG_LFPSDET_TUNE2},
+ {0x0480, TB_ADDR_RX_DIAG_BS_TM},
+ {0x8006, TB_ADDR_RX_DIAG_DFE_CTRL1},
+ {0x003f, TB_ADDR_RX_DIAG_ILL_IQE_TRIM4},
+ {0x543f, TB_ADDR_RX_DIAG_ILL_E_TRIM0},
+ {0x543f, TB_ADDR_RX_DIAG_ILL_IQ_TRIM0},
+ {0x0000, TB_ADDR_RX_DIAG_ILL_IQE_TRIM6},
+ {0x8000, TB_ADDR_RX_DIAG_RXFE_TM3},
+ {0x0003, TB_ADDR_RX_DIAG_RXFE_TM4},
+ {0x2408, TB_ADDR_RX_DIAG_LFPSDET_TUNE},
+ {0x05ca, TB_ADDR_RX_DIAG_DFE_CTRL3},
+ {0x0258, TB_ADDR_RX_DIAG_SC2C_DELAY},
+ {0x1fff, TB_ADDR_RX_REE_VGA_GAIN_NODFE},
+
+ {0x02c6, TB_ADDR_XCVR_PSM_CAL_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A0BYP_TMR},
+ {0x02c6, TB_ADDR_XCVR_PSM_A0IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A1IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A2IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A3IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A4IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A5IN_TMR},
+
+ {0x0002, TB_ADDR_XCVR_PSM_A0OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A1OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A2OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A3OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A4OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A5OUT_TMR},
+ /* Change rx detect parameter */
+ {0x0960, TB_ADDR_TX_RCVDET_EN_TMR},
+ {0x01e0, TB_ADDR_TX_RCVDET_ST_TMR},
+ {0x0090, TB_ADDR_XCVR_DIAG_LANE_FCM_EN_MGN_TMR},
+};
+
+static int cdns_salvo_phy_init(struct phy *phy)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+ struct cdns_salvo_data *data = salvo_phy->data;
+ int ret, i;
+ u16 value;
+
+ ret = clk_prepare_enable(salvo_phy->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < data->init_sequence_length; i++) {
+ struct cdns_reg_pairs *reg_pair = data->init_sequence_val + i;
+
+ cdns_salvo_write(salvo_phy, reg_pair->off, reg_pair->val);
+ }
+
+ /* RXDET_IN_P3_32KHZ, Receiver detect slow clock enable */
+ value = cdns_salvo_read(salvo_phy, TB_ADDR_TX_RCVDETSC_CTRL);
+ value |= RXDET_IN_P3_32KHZ;
+ cdns_salvo_write(salvo_phy, TB_ADDR_TX_RCVDETSC_CTRL,
+ RXDET_IN_P3_32KHZ);
+
+ udelay(10);
+
+ clk_disable_unprepare(salvo_phy->clk);
+
+ return ret;
+}
+
+static int cdns_salvo_phy_power_on(struct phy *phy)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+
+ return clk_prepare_enable(salvo_phy->clk);
+}
+
+static int cdns_salvo_phy_power_off(struct phy *phy)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(salvo_phy->clk);
+
+ return 0;
+}
+
+static struct phy_ops cdns_salvo_phy_ops = {
+ .init = cdns_salvo_phy_init,
+ .power_on = cdns_salvo_phy_power_on,
+ .power_off = cdns_salvo_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int cdns_salvo_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct cdns_salvo_phy *salvo_phy;
+ struct resource *res;
+ const struct of_device_id *match;
+ struct cdns_salvo_data *data;
+
+ match = of_match_device(cdns_salvo_phy_of_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct cdns_salvo_data *)match->data;
+ salvo_phy = devm_kzalloc(dev, sizeof(*salvo_phy), GFP_KERNEL);
+ if (!salvo_phy)
+ return -ENOMEM;
+
+ salvo_phy->data = data;
+ salvo_phy->clk = devm_clk_get_optional(dev, "salvo_phy_clk");
+ if (IS_ERR(salvo_phy->clk))
+ return PTR_ERR(salvo_phy->clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ salvo_phy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(salvo_phy->base))
+ return PTR_ERR(salvo_phy->base);
+
+ salvo_phy->phy = devm_phy_create(dev, NULL, &cdns_salvo_phy_ops);
+ if (IS_ERR(salvo_phy->phy))
+ return PTR_ERR(salvo_phy->phy);
+
+ phy_set_drvdata(salvo_phy->phy, salvo_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct cdns_salvo_data cdns_nxp_salvo_data = {
+ 2,
+ cdns_nxp_sequence_pair,
+ ARRAY_SIZE(cdns_nxp_sequence_pair),
+};
+
+static const struct of_device_id cdns_salvo_phy_of_match[] = {
+ {
+ .compatible = "nxp,salvo-phy",
+ .data = &cdns_nxp_salvo_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_salvo_phy_of_match);
+
+static struct platform_driver cdns_salvo_phy_driver = {
+ .probe = cdns_salvo_phy_probe,
+ .driver = {
+ .name = "cdns-salvo-phy",
+ .of_match_table = cdns_salvo_phy_of_match,
+ }
+};
+module_platform_driver(cdns_salvo_phy_driver);
+
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence SALVO PHY Driver");
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index a5c08e5bd2bf..faed652b73f7 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -685,10 +685,10 @@ static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0xFE0A, SIERRA_DET_STANDEC_A_PREG},
{0x000F, SIERRA_DET_STANDEC_B_PREG},
- {0x00A5, SIERRA_DET_STANDEC_C_PREG},
+ {0x55A5, SIERRA_DET_STANDEC_C_PREG},
{0x69ad, SIERRA_DET_STANDEC_D_PREG},
{0x0241, SIERRA_DET_STANDEC_E_PREG},
- {0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
+ {0x0110, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
{0x0014, SIERRA_PSM_A0IN_TMR_PREG},
{0xCF00, SIERRA_PSM_DIAG_PREG},
{0x001F, SIERRA_PSC_TX_A0_PREG},
@@ -696,7 +696,7 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x0003, SIERRA_PSC_TX_A2_PREG},
{0x0003, SIERRA_PSC_TX_A3_PREG},
{0x0FFF, SIERRA_PSC_RX_A0_PREG},
- {0x0619, SIERRA_PSC_RX_A1_PREG},
+ {0x0003, SIERRA_PSC_RX_A1_PREG},
{0x0003, SIERRA_PSC_RX_A2_PREG},
{0x0001, SIERRA_PSC_RX_A3_PREG},
{0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
@@ -705,19 +705,19 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
{0x2512, SIERRA_DFE_BIASTRIM_PREG},
{0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
- {0x873E, SIERRA_CLKPATHCTRL_TMR_PREG},
- {0x03CF, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
- {0x01CE, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x823E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
- {0x033F, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x023C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
{0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
- {0x8000, SIERRA_CREQ_SPARE_PREG},
+ {0x0000, SIERRA_CREQ_SPARE_PREG},
{0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
- {0x8453, SIERRA_CTLELUT_CTRL_PREG},
- {0x4110, SIERRA_DFE_ECMP_RATESEL_PREG},
- {0x4110, SIERRA_DFE_SMP_RATESEL_PREG},
- {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x8452, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4121, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4121, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0003, SIERRA_DEQ_PHALIGN_CTRL},
{0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
{0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
@@ -725,7 +725,7 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
- {0x9A8A, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x9999, SIERRA_DEQ_VGATUNE_CTRL_PREG},
{0x0014, SIERRA_DEQ_GLUT0},
{0x0014, SIERRA_DEQ_GLUT1},
{0x0014, SIERRA_DEQ_GLUT2},
@@ -772,6 +772,7 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x000F, SIERRA_LFPSFILT_NS_PREG},
{0x0009, SIERRA_LFPSFILT_RD_PREG},
{0x0001, SIERRA_LFPSFILT_MP_PREG},
+ {0x6013, SIERRA_SIGDET_SUPPORT_PREG},
{0x8013, SIERRA_SDFILT_H2L_A_PREG},
{0x8009, SIERRA_SDFILT_L2H_PREG},
{0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
index 4ea6a8897cd7..7b47682a4e0e 100644
--- a/drivers/phy/intel/Kconfig
+++ b/drivers/phy/intel/Kconfig
@@ -2,8 +2,23 @@
#
# Phy drivers for Intel Lightning Mountain(LGM) platform
#
+config PHY_INTEL_COMBO
+ bool "Intel ComboPHY driver"
+ depends on X86 || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select MFD_SYSCON
+ select GENERIC_PHY
+ select REGMAP
+ help
+ Enable this to support Intel ComboPhy.
+
+ This driver configures ComboPhy subsystem on Intel gateway
+ chipsets which provides PHYs for various controllers, EMAC,
+ SATA and PCIe.
+
config PHY_INTEL_EMMC
tristate "Intel EMMC PHY driver"
+ depends on X86 || COMPILE_TEST
select GENERIC_PHY
help
Enable this to support the Intel EMMC PHY
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
index 6b876a75599d..233d530dadde 100644
--- a/drivers/phy/intel/Makefile
+++ b/drivers/phy/intel/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_INTEL_COMBO) += phy-intel-combo.o
obj-$(CONFIG_PHY_INTEL_EMMC) += phy-intel-emmc.o
diff --git a/drivers/phy/intel/phy-intel-combo.c b/drivers/phy/intel/phy-intel-combo.c
new file mode 100644
index 000000000000..c2a35be4cdfb
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-combo.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Combo-PHY driver
+ *
+ * Copyright (C) 2019-2020 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#define PCIE_PHY_GEN_CTRL 0x00
+#define PCIE_PHY_CLK_PAD BIT(17)
+
+#define PAD_DIS_CFG 0x174
+
+#define PCS_XF_ATE_OVRD_IN_2 0x3008
+#define ADAPT_REQ_MSK GENMASK(5, 4)
+
+#define PCS_XF_RX_ADAPT_ACK 0x3010
+#define RX_ADAPT_ACK_BIT BIT(0)
+
+#define CR_ADDR(addr, lane) (((addr) + (lane) * 0x100) << 2)
+#define REG_COMBO_MODE(x) ((x) * 0x200)
+#define REG_CLK_DISABLE(x) ((x) * 0x200 + 0x124)
+
+#define COMBO_PHY_ID(x) ((x)->parent->id)
+#define PHY_ID(x) ((x)->id)
+
+#define CLK_100MHZ 100000000
+#define CLK_156_25MHZ 156250000
+
+static const unsigned long intel_iphy_clk_rates[] = {
+ CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
+};
+
+enum {
+ PHY_0,
+ PHY_1,
+ PHY_MAX_NUM
+};
+
+/*
+ * Clock Register bit fields to enable clocks
+ * for ComboPhy according to the mode.
+ */
+enum intel_phy_mode {
+ PHY_PCIE_MODE = 0,
+ PHY_XPCS_MODE,
+ PHY_SATA_MODE,
+};
+
+/* ComboPhy mode Register values */
+enum intel_combo_mode {
+ PCIE0_PCIE1_MODE = 0,
+ PCIE_DL_MODE,
+ RXAUI_MODE,
+ XPCS0_XPCS1_MODE,
+ SATA0_SATA1_MODE,
+};
+
+enum aggregated_mode {
+ PHY_SL_MODE,
+ PHY_DL_MODE,
+};
+
+struct intel_combo_phy;
+
+struct intel_cbphy_iphy {
+ struct phy *phy;
+ struct intel_combo_phy *parent;
+ struct reset_control *app_rst;
+ u32 id;
+};
+
+struct intel_combo_phy {
+ struct device *dev;
+ struct clk *core_clk;
+ unsigned long clk_rate;
+ void __iomem *app_base;
+ void __iomem *cr_base;
+ struct regmap *syscfg;
+ struct regmap *hsiocfg;
+ u32 id;
+ u32 bid;
+ struct reset_control *phy_rst;
+ struct reset_control *core_rst;
+ struct intel_cbphy_iphy iphy[PHY_MAX_NUM];
+ enum intel_phy_mode phy_mode;
+ enum aggregated_mode aggr_mode;
+ u32 init_cnt;
+ struct mutex lock;
+};
+
+static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
+ u32 val;
+
+ /* Register: 0 is enable, 1 is disable */
+ val = set ? 0 : mask;
+
+ return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
+ mask, val);
+}
+
+static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ u32 mask = BIT(cbphy->id * 2 + iphy->id);
+ u32 val;
+
+ /* Register: 0 is enable, 1 is disable */
+ val = set ? 0 : mask;
+
+ return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
+}
+
+static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
+ u32 mask, u32 val)
+{
+ u32 reg_val;
+
+ reg_val = readl(base + reg);
+ reg_val &= ~mask;
+ reg_val |= FIELD_PREP(mask, val);
+ writel(reg_val, base + reg);
+}
+
+static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
+ int (*phy_cfg)(struct intel_cbphy_iphy *))
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = phy_cfg(iphy);
+ if (ret)
+ return ret;
+
+ if (cbphy->aggr_mode != PHY_DL_MODE)
+ return 0;
+
+ return phy_cfg(&cbphy->iphy[PHY_1]);
+}
+
+static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
+ return ret;
+ }
+
+ if (cbphy->init_cnt)
+ return 0;
+
+ combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
+ PCIE_PHY_CLK_PAD, 0);
+
+ /* Delay for stable clock PLL */
+ usleep_range(50, 100);
+
+ return 0;
+}
+
+static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
+ return ret;
+ }
+
+ if (cbphy->init_cnt)
+ return 0;
+
+ combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
+ PCIE_PHY_CLK_PAD, 1);
+
+ return 0;
+}
+
+static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
+{
+ enum intel_combo_mode cb_mode = PHY_PCIE_MODE;
+ enum aggregated_mode aggr = cbphy->aggr_mode;
+ struct device *dev = cbphy->dev;
+ enum intel_phy_mode mode;
+ int ret;
+
+ mode = cbphy->phy_mode;
+
+ switch (mode) {
+ case PHY_PCIE_MODE:
+ cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
+ break;
+
+ case PHY_XPCS_MODE:
+ cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
+ break;
+
+ case PHY_SATA_MODE:
+ if (aggr == PHY_DL_MODE) {
+ dev_err(dev, "Mode:%u not support dual lane!\n", mode);
+ return -EINVAL;
+ }
+
+ cb_mode = SATA0_SATA1_MODE;
+ break;
+ }
+
+ ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
+ if (ret)
+ dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
+
+ return ret;
+}
+
+static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
+{
+ reset_control_assert(cbphy->core_rst);
+ reset_control_assert(cbphy->phy_rst);
+}
+
+static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
+{
+ reset_control_deassert(cbphy->core_rst);
+ reset_control_deassert(cbphy->phy_rst);
+ /* Delay to ensure reset process is done */
+ usleep_range(10, 20);
+}
+
+static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ if (!cbphy->init_cnt) {
+ ret = clk_prepare_enable(cbphy->core_clk);
+ if (ret) {
+ dev_err(cbphy->dev, "Clock enable failed!\n");
+ return ret;
+ }
+
+ ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
+ if (ret) {
+ dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
+ cbphy->clk_rate);
+ goto clk_err;
+ }
+
+ intel_cbphy_rst_assert(cbphy);
+ intel_cbphy_rst_deassert(cbphy);
+ ret = intel_cbphy_set_mode(cbphy);
+ if (ret)
+ goto clk_err;
+ }
+
+ ret = intel_cbphy_iphy_enable(iphy, true);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed enabling PHY core\n");
+ goto clk_err;
+ }
+
+ ret = reset_control_deassert(iphy->app_rst);
+ if (ret) {
+ dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
+ COMBO_PHY_ID(iphy), PHY_ID(iphy));
+ goto clk_err;
+ }
+
+ /* Delay to ensure reset process is done */
+ udelay(1);
+
+ return 0;
+
+clk_err:
+ clk_disable_unprepare(cbphy->core_clk);
+
+ return ret;
+}
+
+static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = reset_control_assert(iphy->app_rst);
+ if (ret) {
+ dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
+ COMBO_PHY_ID(iphy), PHY_ID(iphy));
+ return ret;
+ }
+
+ ret = intel_cbphy_iphy_enable(iphy, false);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed disabling PHY core\n");
+ return ret;
+ }
+
+ if (cbphy->init_cnt)
+ return 0;
+
+ clk_disable_unprepare(cbphy->core_clk);
+ intel_cbphy_rst_assert(cbphy);
+
+ return 0;
+}
+
+static int intel_cbphy_init(struct phy *phy)
+{
+ struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ mutex_lock(&cbphy->lock);
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
+ if (ret)
+ goto err;
+
+ if (cbphy->phy_mode == PHY_PCIE_MODE) {
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
+ if (ret)
+ goto err;
+ }
+
+ cbphy->init_cnt++;
+
+err:
+ mutex_unlock(&cbphy->lock);
+
+ return ret;
+}
+
+static int intel_cbphy_exit(struct phy *phy)
+{
+ struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ mutex_lock(&cbphy->lock);
+ cbphy->init_cnt--;
+ if (cbphy->phy_mode == PHY_PCIE_MODE) {
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
+ if (ret)
+ goto err;
+ }
+
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
+
+err:
+ mutex_unlock(&cbphy->lock);
+
+ return ret;
+}
+
+static int intel_cbphy_calibrate(struct phy *phy)
+{
+ struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
+ struct intel_combo_phy *cbphy = iphy->parent;
+ void __iomem *cr_base = cbphy->cr_base;
+ int val, ret, id;
+
+ if (cbphy->phy_mode != PHY_XPCS_MODE)
+ return 0;
+
+ id = PHY_ID(iphy);
+
+ /* trigger auto RX adaptation */
+ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
+ ADAPT_REQ_MSK, 3);
+ /* Wait RX adaptation to finish */
+ ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
+ val, val & RX_ADAPT_ACK_BIT, 10, 5000);
+ if (ret)
+ dev_err(cbphy->dev, "RX Adaptation failed!\n");
+ else
+ dev_dbg(cbphy->dev, "RX Adaptation success!\n");
+
+ /* Stop RX adaptation */
+ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
+ ADAPT_REQ_MSK, 0);
+
+ return ret;
+}
+
+static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
+{
+ struct device *dev = cbphy->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct fwnode_reference_args ref;
+ int ret;
+ u32 val;
+
+ cbphy->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cbphy->core_clk)) {
+ ret = PTR_ERR(cbphy->core_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get clk failed:%d!\n", ret);
+ return ret;
+ }
+
+ cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
+ if (IS_ERR(cbphy->core_rst)) {
+ ret = PTR_ERR(cbphy->core_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get core reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
+ if (IS_ERR(cbphy->phy_rst)) {
+ ret = PTR_ERR(cbphy->phy_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get PHY reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
+ if (IS_ERR(cbphy->iphy[0].app_rst)) {
+ ret = PTR_ERR(cbphy->iphy[0].app_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
+ if (IS_ERR(cbphy->iphy[1].app_rst)) {
+ ret = PTR_ERR(cbphy->iphy[1].app_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(cbphy->app_base))
+ return PTR_ERR(cbphy->app_base);
+
+ cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(cbphy->cr_base))
+ return PTR_ERR(cbphy->cr_base);
+
+ /*
+ * syscfg and hsiocfg variables stores the handle of the registers set
+ * in which ComboPhy subsytem specific registers are subset. Using
+ * Register map framework to access the registers set.
+ */
+ ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
+ 1, 0, &ref);
+ if (ret < 0)
+ return ret;
+
+ cbphy->id = ref.args[0];
+ cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
+ fwnode_handle_put(ref.fwnode);
+
+ ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
+ 0, &ref);
+ if (ret < 0)
+ return ret;
+
+ cbphy->bid = ref.args[0];
+ cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
+ fwnode_handle_put(ref.fwnode);
+
+ ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case PHY_TYPE_PCIE:
+ cbphy->phy_mode = PHY_PCIE_MODE;
+ break;
+
+ case PHY_TYPE_SATA:
+ cbphy->phy_mode = PHY_SATA_MODE;
+ break;
+
+ case PHY_TYPE_XPCS:
+ cbphy->phy_mode = PHY_XPCS_MODE;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PHY mode: %u\n", val);
+ return -EINVAL;
+ }
+
+ cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
+
+ if (fwnode_property_present(fwnode, "intel,aggregation"))
+ cbphy->aggr_mode = PHY_DL_MODE;
+ else
+ cbphy->aggr_mode = PHY_SL_MODE;
+
+ return 0;
+}
+
+static const struct phy_ops intel_cbphy_ops = {
+ .init = intel_cbphy_init,
+ .exit = intel_cbphy_exit,
+ .calibrate = intel_cbphy_calibrate,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *intel_cbphy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
+ u32 iphy_id;
+
+ if (args->args_count < 1) {
+ dev_err(dev, "Invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ iphy_id = args->args[0];
+ if (iphy_id >= PHY_MAX_NUM) {
+ dev_err(dev, "Invalid phy instance %d\n", iphy_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
+ dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cbphy->iphy[iphy_id].phy;
+}
+
+static int intel_cbphy_create(struct intel_combo_phy *cbphy)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = cbphy->dev;
+ struct intel_cbphy_iphy *iphy;
+ int i;
+
+ for (i = 0; i < PHY_MAX_NUM; i++) {
+ iphy = &cbphy->iphy[i];
+ iphy->parent = cbphy;
+ iphy->id = i;
+
+ /* In dual lane mode skip phy creation for the second phy */
+ if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
+ continue;
+
+ iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
+ if (IS_ERR(iphy->phy)) {
+ dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
+ COMBO_PHY_ID(iphy), PHY_ID(iphy));
+
+ return PTR_ERR(iphy->phy);
+ }
+
+ phy_set_drvdata(iphy->phy, iphy);
+ }
+
+ dev_set_drvdata(dev, cbphy);
+ phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
+ if (IS_ERR(phy_provider))
+ dev_err(dev, "Register PHY provider failed!\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static int intel_cbphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_combo_phy *cbphy;
+ int ret;
+
+ cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
+ if (!cbphy)
+ return -ENOMEM;
+
+ cbphy->dev = dev;
+ cbphy->init_cnt = 0;
+ mutex_init(&cbphy->lock);
+ ret = intel_cbphy_fwnode_parse(cbphy);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, cbphy);
+
+ return intel_cbphy_create(cbphy);
+}
+
+static int intel_cbphy_remove(struct platform_device *pdev)
+{
+ struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
+
+ intel_cbphy_rst_assert(cbphy);
+ clk_disable_unprepare(cbphy->core_clk);
+ return 0;
+}
+
+static const struct of_device_id of_intel_cbphy_match[] = {
+ { .compatible = "intel,combo-phy" },
+ { .compatible = "intel,combophy-lgm" },
+ {}
+};
+
+static struct platform_driver intel_cbphy_driver = {
+ .probe = intel_cbphy_probe,
+ .remove = intel_cbphy_remove,
+ .driver = {
+ .name = "intel-combo-phy",
+ .of_match_table = of_intel_cbphy_match,
+ }
+};
+
+module_platform_driver(intel_cbphy_driver);
+
+MODULE_DESCRIPTION("Intel Combo-phy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 12e71a315a2c..089db0dea703 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -122,7 +122,6 @@ enum cpcap_gpio_mode {
struct cpcap_phy_ddata {
struct regmap *reg;
struct device *dev;
- struct clk *refclk;
struct usb_phy phy;
struct delayed_work detect_work;
struct pinctrl *pins;
@@ -707,7 +706,6 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev)
usb_remove_phy(&ddata->phy);
cancel_delayed_work_sync(&ddata->detect_work);
- clk_unprepare(ddata->refclk);
regulator_disable(ddata->vusb);
return 0;
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 98674ed094d9..ca9ce7e84a5c 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -18,6 +18,13 @@ config PHY_QCOM_APQ8064_SATA
depends on OF
select GENERIC_PHY
+config PHY_QCOM_IPQ4019_USB
+ tristate "Qualcomm IPQ4019 USB PHY driver"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Support for the USB PHY-s on Qualcomm IPQ40xx SoC-s.
+
config PHY_QCOM_IPQ806X_SATA
tristate "Qualcomm IPQ806x SATA SerDes/PHY driver"
depends on ARCH_QCOM
@@ -85,6 +92,16 @@ config PHY_QCOM_USB_HS
Support for the USB high-speed ULPI compliant phy on Qualcomm
chipsets.
+config PHY_QCOM_USB_SNPS_FEMTO_V2
+ tristate "Qualcomm SNPS FEMTO USB HS PHY V2 module"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable support for the USB high-speed SNPS Femto phy on Qualcomm
+ chipsets. This PHY has differences in the register map compared
+ to the V1 variants. The PHY is paired with a Synopsys DWC3 USB
+ controller on Qualcomm SOCs.
+
config PHY_QCOM_USB_HSIC
tristate "Qualcomm USB HSIC ULPI PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 1f14aeacbd70..86fb32efab79 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ATH79_USB) += phy-ath79-usb.o
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
+obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
@@ -12,3 +13,4 @@ obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o
obj-$(CONFIG_PHY_QCOM_USB_SS) += phy-qcom-usb-ss.o
+obj-$(CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2)+= phy-qcom-snps-femto-v2.o
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
new file mode 100644
index 000000000000..b8ef331e1545
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2018 John Crispin <john@phrozen.org>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+struct ipq4019_usb_phy {
+ struct device *dev;
+ struct phy *phy;
+ void __iomem *base;
+ struct reset_control *por_rst;
+ struct reset_control *srif_rst;
+};
+
+static int ipq4019_ss_phy_power_off(struct phy *_phy)
+{
+ struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
+
+ reset_control_assert(phy->por_rst);
+ msleep(10);
+
+ return 0;
+}
+
+static int ipq4019_ss_phy_power_on(struct phy *_phy)
+{
+ struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
+
+ ipq4019_ss_phy_power_off(_phy);
+
+ reset_control_deassert(phy->por_rst);
+
+ return 0;
+}
+
+static struct phy_ops ipq4019_usb_ss_phy_ops = {
+ .power_on = ipq4019_ss_phy_power_on,
+ .power_off = ipq4019_ss_phy_power_off,
+};
+
+static int ipq4019_hs_phy_power_off(struct phy *_phy)
+{
+ struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
+
+ reset_control_assert(phy->por_rst);
+ msleep(10);
+
+ reset_control_assert(phy->srif_rst);
+ msleep(10);
+
+ return 0;
+}
+
+static int ipq4019_hs_phy_power_on(struct phy *_phy)
+{
+ struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
+
+ ipq4019_hs_phy_power_off(_phy);
+
+ reset_control_deassert(phy->srif_rst);
+ msleep(10);
+
+ reset_control_deassert(phy->por_rst);
+
+ return 0;
+}
+
+static struct phy_ops ipq4019_usb_hs_phy_ops = {
+ .power_on = ipq4019_hs_phy_power_on,
+ .power_off = ipq4019_hs_phy_power_off,
+};
+
+static const struct of_device_id ipq4019_usb_phy_of_match[] = {
+ { .compatible = "qcom,usb-hs-ipq4019-phy", .data = &ipq4019_usb_hs_phy_ops},
+ { .compatible = "qcom,usb-ss-ipq4019-phy", .data = &ipq4019_usb_ss_phy_ops},
+ { },
+};
+MODULE_DEVICE_TABLE(of, ipq4019_usb_phy_of_match);
+
+static int ipq4019_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+ struct ipq4019_usb_phy *phy;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(phy->base)) {
+ dev_err(dev, "failed to remap register memory\n");
+ return PTR_ERR(phy->base);
+ }
+
+ phy->por_rst = devm_reset_control_get(phy->dev, "por_rst");
+ if (IS_ERR(phy->por_rst)) {
+ if (PTR_ERR(phy->por_rst) != -EPROBE_DEFER)
+ dev_err(dev, "POR reset is missing\n");
+ return PTR_ERR(phy->por_rst);
+ }
+
+ phy->srif_rst = devm_reset_control_get_optional(phy->dev, "srif_rst");
+ if (IS_ERR(phy->srif_rst))
+ return PTR_ERR(phy->srif_rst);
+
+ phy->phy = devm_phy_create(dev, NULL, of_device_get_match_data(dev));
+ if (IS_ERR(phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(phy->phy);
+ }
+ phy_set_drvdata(phy->phy, phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver ipq4019_usb_phy_driver = {
+ .probe = ipq4019_usb_phy_probe,
+ .driver = {
+ .of_match_table = ipq4019_usb_phy_of_match,
+ .name = "ipq4019-usb-phy",
+ }
+};
+module_platform_driver(ipq4019_usb_phy_driver);
+
+MODULE_DESCRIPTION("QCOM/IPQ4019 USB phy driver");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index c190406246ab..e91040af3394 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -119,14 +119,17 @@ enum qphy_reg_layout {
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
};
-static const unsigned int msm8996_ufsphy_regs_layout[] = {
+static const unsigned int msm8996_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_START_CTRL] = 0x00,
[QPHY_PCS_READY_STATUS] = 0x168,
};
-static const unsigned int pciephy_regs_layout[] = {
+static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_SW_RESET] = 0x400,
[QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
[QPHY_COM_START_CONTROL] = 0x408,
@@ -142,7 +145,7 @@ static const unsigned int pciephy_regs_layout[] = {
[QPHY_PCS_STATUS] = 0x174,
};
-static const unsigned int usb3phy_regs_layout[] = {
+static const unsigned int usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_FLL_CNTRL1] = 0xc0,
[QPHY_FLL_CNTRL2] = 0xc4,
[QPHY_FLL_CNT_VAL_L] = 0xc8,
@@ -156,7 +159,7 @@ static const unsigned int usb3phy_regs_layout[] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
};
-static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
+static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
[QPHY_PCS_STATUS] = 0x174,
@@ -165,27 +168,34 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
};
-static const unsigned int sdm845_qmp_pciephy_regs_layout[] = {
+static const unsigned int sdm845_qmp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
[QPHY_PCS_STATUS] = 0x174,
};
-static const unsigned int sdm845_qhp_pciephy_regs_layout[] = {
+static const unsigned int sdm845_qhp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
[QPHY_PCS_STATUS] = 0x2ac,
};
-static const unsigned int sdm845_ufsphy_regs_layout[] = {
+static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+};
+
+static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_START_CTRL] = 0x00,
[QPHY_PCS_READY_STATUS] = 0x160,
};
-static const unsigned int sm8150_ufsphy_regs_layout[] = {
- [QPHY_START_CTRL] = QPHY_V4_PHY_START,
- [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_READY_STATUS,
- [QPHY_SW_RESET] = QPHY_V4_SW_RESET,
+static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = QPHY_V4_PCS_UFS_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_UFS_READY_STATUS,
+ [QPHY_SW_RESET] = QPHY_V4_PCS_UFS_SW_RESET,
};
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
@@ -1272,13 +1282,121 @@ static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
};
static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_RX_SIGDET_CTRL2, 0x6d),
- QMP_PHY_INIT_CFG(QPHY_V4_TX_LARGE_AMP_DRV_LVL, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_TX_SMALL_AMP_DRV_LVL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V4_TX_MID_TERM_CTRL1, 0x43),
- QMP_PHY_INIT_CFG(QPHY_V4_DEBUG_BUS_CLKSEL, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V4_RX_MIN_HIBERN8_TIME, 0xff),
- QMP_PHY_INIT_CFG(QPHY_V4_MULTI_LANE_CTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
};
/* struct qmp_phy_cfg - per-PHY initialization config */
@@ -1445,6 +1563,10 @@ static const char * const sdm845_pciephy_clk_l[] = {
"aux", "cfg_ahb", "ref", "refgen",
};
+static const char * const qmp_v4_phy_clk_l[] = {
+ "aux", "ref_clk_src", "ref", "com_aux",
+};
+
static const char * const sdm845_ufs_phy_clk_l[] = {
"ref", "ref_aux",
};
@@ -1458,6 +1580,10 @@ static const char * const msm8996_usb3phy_reset_l[] = {
"phy", "common",
};
+static const char * const sc7180_usb3phy_reset_l[] = {
+ "phy",
+};
+
static const char * const sdm845_pciephy_reset_l[] = {
"phy",
};
@@ -1671,6 +1797,37 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.is_dual_lane_phy = true,
};
+static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = sc7180_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -1798,6 +1955,37 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.is_dual_lane_phy = true,
};
+static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8150_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl),
+ .rx_tbl = sm8150_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl),
+ .pcs_tbl = sm8150_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -1880,11 +2068,18 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
}
- if (cfg->has_phy_com_ctrl)
+ if (cfg->has_phy_com_ctrl) {
qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
SW_PWRDN);
- else
- qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+ } else {
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
+ qphy_setbits(pcs,
+ cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ else
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
/* Serdes configuration */
qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
@@ -2110,7 +2305,13 @@ static int qcom_qmp_phy_disable(struct phy *phy)
qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
/* Put PHY into POWER DOWN state: active low */
- qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ } else {
+ qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
if (cfg->has_lane_rst)
reset_control_assert(qphy->lane_rst);
@@ -2516,6 +2717,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,ipq8074-qmp-pcie-phy",
.data = &ipq8074_pciephy_cfg,
}, {
+ .compatible = "qcom,sc7180-qmp-usb3-phy",
+ .data = &sc7180_usb3phy_cfg,
+ }, {
.compatible = "qcom,sdm845-qhp-pcie-phy",
.data = &sdm845_qhp_pciephy_cfg,
}, {
@@ -2536,6 +2740,12 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,sm8150-qmp-ufs-phy",
.data = &sm8150_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-usb3-phy",
+ .data = &sm8150_usb3phy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index dece0e67704b..6d017a0c0c8d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -125,7 +125,7 @@
#define QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1DC
#define QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1E0
-/* Only for QMP V3 PHY - DP COM registers */
+/* Only for QMP V3 & V4 PHY - DP COM registers */
#define QPHY_V3_DP_COM_PHY_MODE_CTRL 0x00
#define QPHY_V3_DP_COM_SW_RESET 0x04
#define QPHY_V3_DP_COM_POWER_DOWN_CTRL 0x08
@@ -314,6 +314,14 @@
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
/* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_SSC_EN_CENTER 0x010
+#define QSERDES_V4_COM_SSC_PER1 0x01c
+#define QSERDES_V4_COM_SSC_PER2 0x020
+#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0 0x024
+#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0 0x028
+#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1 0x030
+#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1 0x034
+#define QSERDES_V4_COM_SYSCLK_BUF_ENABLE 0x050
#define QSERDES_V4_COM_PLL_IVCO 0x058
#define QSERDES_V4_COM_CMN_IPTRIM 0x060
#define QSERDES_V4_COM_CP_CTRL_MODE0 0x074
@@ -330,10 +338,22 @@
#define QSERDES_V4_COM_DEC_START_MODE0 0x0bc
#define QSERDES_V4_COM_LOCK_CMP2_MODE1 0x0b8
#define QSERDES_V4_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V4_COM_DIV_FRAC_START1_MODE0 0x0cc
+#define QSERDES_V4_COM_DIV_FRAC_START2_MODE0 0x0d0
+#define QSERDES_V4_COM_DIV_FRAC_START3_MODE0 0x0d4
+#define QSERDES_V4_COM_DIV_FRAC_START1_MODE1 0x0d8
+#define QSERDES_V4_COM_DIV_FRAC_START2_MODE1 0x0dc
+#define QSERDES_V4_COM_DIV_FRAC_START3_MODE1 0x0e0
#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V4_COM_VCO_TUNE1_MODE0 0x110
+#define QSERDES_V4_COM_VCO_TUNE2_MODE0 0x114
+#define QSERDES_V4_COM_VCO_TUNE1_MODE1 0x118
+#define QSERDES_V4_COM_VCO_TUNE2_MODE1 0x11c
#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124
#define QSERDES_V4_COM_HSCLK_SEL 0x158
#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V4_COM_CORECLK_DIV_MODE1 0x16c
+#define QSERDES_V4_COM_SVS_MODE_CLK_SEL 0x184
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
@@ -341,12 +361,16 @@
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
/* Only for QMP V4 PHY - TX registers */
+#define QSERDES_V4_TX_RES_CODE_LANE_TX 0x34
+#define QSERDES_V4_TX_RES_CODE_LANE_RX 0x38
#define QSERDES_V4_TX_LANE_MODE_1 0x84
+#define QSERDES_V4_TX_RCV_DETECT_LVL_2 0x9c
#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0xe0
#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0xe4
#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
+#define QSERDES_V4_TX_PI_QEC_CTRL 0x104
/* Only for QMP V4 PHY - RX registers */
#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
@@ -354,17 +378,27 @@
#define QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN 0x030
#define QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
#define QSERDES_V4_RX_UCDR_PI_CONTROLS 0x044
#define QSERDES_V4_RX_UCDR_PI_CTRL2 0x048
+#define QSERDES_V4_RX_UCDR_SB2_THRESH1 0x04c
+#define QSERDES_V4_RX_UCDR_SB2_THRESH2 0x050
+#define QSERDES_V4_RX_UCDR_SB2_GAIN1 0x054
+#define QSERDES_V4_RX_UCDR_SB2_GAIN2 0x058
+#define QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE 0x060
#define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068
#define QSERDES_V4_RX_AC_JTAG_MODE 0x078
#define QSERDES_V4_RX_RX_TERM_BW 0x080
+#define QSERDES_V4_RX_VGA_CAL_CNTRL1 0x0d4
+#define QSERDES_V4_RX_VGA_CAL_CNTRL2 0x0d8
+#define QSERDES_V4_RX_GM_CAL 0x0dc
#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
#define QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW 0x0f8
#define QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
#define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100
+#define QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
#define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
#define QSERDES_V4_RX_SIGDET_CNTRL 0x11c
#define QSERDES_V4_RX_SIGDET_LVL 0x120
@@ -385,29 +419,32 @@
#define QSERDES_V4_RX_RX_MODE_10_HIGH2 0x1a0
#define QSERDES_V4_RX_RX_MODE_10_HIGH3 0x1a4
#define QSERDES_V4_RX_RX_MODE_10_HIGH4 0x1a8
+#define QSERDES_V4_RX_DFE_EN_TIMER 0x1b4
+#define QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET 0x1b8
#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
+#define QSERDES_V4_RX_VTH_CODE 0x1c4
-/* Only for QMP V4 PHY - PCS registers */
-#define QPHY_V4_PHY_START 0x000
-#define QPHY_V4_POWER_DOWN_CONTROL 0x004
-#define QPHY_V4_SW_RESET 0x008
-#define QPHY_V4_TIMER_20US_CORECLK_STEPS_MSB 0x00c
-#define QPHY_V4_TIMER_20US_CORECLK_STEPS_LSB 0x010
-#define QPHY_V4_PLL_CNTL 0x02c
-#define QPHY_V4_TX_LARGE_AMP_DRV_LVL 0x030
-#define QPHY_V4_TX_SMALL_AMP_DRV_LVL 0x038
-#define QPHY_V4_BIST_FIXED_PAT_CTRL 0x060
-#define QPHY_V4_TX_HSGEAR_CAPABILITY 0x074
-#define QPHY_V4_RX_HSGEAR_CAPABILITY 0x0b4
-#define QPHY_V4_DEBUG_BUS_CLKSEL 0x124
-#define QPHY_V4_LINECFG_DISABLE 0x148
-#define QPHY_V4_RX_MIN_HIBERN8_TIME 0x150
-#define QPHY_V4_RX_SIGDET_CTRL2 0x158
-#define QPHY_V4_TX_PWM_GEAR_BAND 0x160
-#define QPHY_V4_TX_HS_GEAR_BAND 0x168
-#define QPHY_V4_PCS_READY_STATUS 0x180
-#define QPHY_V4_TX_MID_TERM_CTRL1 0x1d8
-#define QPHY_V4_MULTI_LANE_CTRL1 0x1e0
+/* Only for QMP V4 PHY - UFS PCS registers */
+#define QPHY_V4_PCS_UFS_PHY_START 0x000
+#define QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL 0x004
+#define QPHY_V4_PCS_UFS_SW_RESET 0x008
+#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V4_PCS_UFS_PLL_CNTL 0x02c
+#define QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V4_PCS_UFS_BIST_FIXED_PAT_CTRL 0x060
+#define QPHY_V4_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V4_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4
+#define QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL 0x124
+#define QPHY_V4_PCS_UFS_LINECFG_DISABLE 0x148
+#define QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME 0x150
+#define QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2 0x158
+#define QPHY_V4_PCS_UFS_TX_PWM_GEAR_BAND 0x160
+#define QPHY_V4_PCS_UFS_TX_HS_GEAR_BAND 0x168
+#define QPHY_V4_PCS_UFS_READY_STATUS 0x180
+#define QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8
+#define QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1 0x1e0
/* PCIE GEN3 COM registers */
#define PCIE_GEN3_QHP_COM_SSC_EN_CENTER 0x14
@@ -523,4 +560,161 @@
#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5 0x16c
#define PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG 0x174
+/* Only for QMP V4 PHY - USB/PCIe PCS registers */
+#define QPHY_V4_PCS_SW_RESET 0x000
+#define QPHY_V4_PCS_REVISION_ID0 0x004
+#define QPHY_V4_PCS_REVISION_ID1 0x008
+#define QPHY_V4_PCS_REVISION_ID2 0x00c
+#define QPHY_V4_PCS_REVISION_ID3 0x010
+#define QPHY_V4_PCS_PCS_STATUS1 0x014
+#define QPHY_V4_PCS_PCS_STATUS2 0x018
+#define QPHY_V4_PCS_PCS_STATUS3 0x01c
+#define QPHY_V4_PCS_PCS_STATUS4 0x020
+#define QPHY_V4_PCS_PCS_STATUS5 0x024
+#define QPHY_V4_PCS_PCS_STATUS6 0x028
+#define QPHY_V4_PCS_PCS_STATUS7 0x02c
+#define QPHY_V4_PCS_DEBUG_BUS_0_STATUS 0x030
+#define QPHY_V4_PCS_DEBUG_BUS_1_STATUS 0x034
+#define QPHY_V4_PCS_DEBUG_BUS_2_STATUS 0x038
+#define QPHY_V4_PCS_DEBUG_BUS_3_STATUS 0x03c
+#define QPHY_V4_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V4_PCS_START_CONTROL 0x044
+#define QPHY_V4_PCS_INSIG_SW_CTRL1 0x048
+#define QPHY_V4_PCS_INSIG_SW_CTRL2 0x04c
+#define QPHY_V4_PCS_INSIG_SW_CTRL3 0x050
+#define QPHY_V4_PCS_INSIG_SW_CTRL4 0x054
+#define QPHY_V4_PCS_INSIG_SW_CTRL5 0x058
+#define QPHY_V4_PCS_INSIG_SW_CTRL6 0x05c
+#define QPHY_V4_PCS_INSIG_SW_CTRL7 0x060
+#define QPHY_V4_PCS_INSIG_SW_CTRL8 0x064
+#define QPHY_V4_PCS_INSIG_MX_CTRL1 0x068
+#define QPHY_V4_PCS_INSIG_MX_CTRL2 0x06c
+#define QPHY_V4_PCS_INSIG_MX_CTRL3 0x070
+#define QPHY_V4_PCS_INSIG_MX_CTRL4 0x074
+#define QPHY_V4_PCS_INSIG_MX_CTRL5 0x078
+#define QPHY_V4_PCS_INSIG_MX_CTRL7 0x07c
+#define QPHY_V4_PCS_INSIG_MX_CTRL8 0x080
+#define QPHY_V4_PCS_OUTSIG_SW_CTRL1 0x084
+#define QPHY_V4_PCS_OUTSIG_MX_CTRL1 0x088
+#define QPHY_V4_PCS_CLAMP_ENABLE 0x08c
+#define QPHY_V4_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V4_PCS_POWER_STATE_CONFIG2 0x094
+#define QPHY_V4_PCS_FLL_CNTRL1 0x098
+#define QPHY_V4_PCS_FLL_CNTRL2 0x09c
+#define QPHY_V4_PCS_FLL_CNT_VAL_L 0x0a0
+#define QPHY_V4_PCS_FLL_CNT_VAL_H_TOL 0x0a4
+#define QPHY_V4_PCS_FLL_MAN_CODE 0x0a8
+#define QPHY_V4_PCS_TEST_CONTROL1 0x0ac
+#define QPHY_V4_PCS_TEST_CONTROL2 0x0b0
+#define QPHY_V4_PCS_TEST_CONTROL3 0x0b4
+#define QPHY_V4_PCS_TEST_CONTROL4 0x0b8
+#define QPHY_V4_PCS_TEST_CONTROL5 0x0bc
+#define QPHY_V4_PCS_TEST_CONTROL6 0x0c0
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG4 0x0d0
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG5 0x0d4
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V4_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V4_PCS_REFGEN_REQ_CONFIG2 0x0e0
+#define QPHY_V4_PCS_REFGEN_REQ_CONFIG3 0x0e4
+#define QPHY_V4_PCS_BIST_CTRL 0x0e8
+#define QPHY_V4_PCS_PRBS_POLY0 0x0ec
+#define QPHY_V4_PCS_PRBS_POLY1 0x0f0
+#define QPHY_V4_PCS_FIXED_PAT0 0x0f4
+#define QPHY_V4_PCS_FIXED_PAT1 0x0f8
+#define QPHY_V4_PCS_FIXED_PAT2 0x0fc
+#define QPHY_V4_PCS_FIXED_PAT3 0x100
+#define QPHY_V4_PCS_FIXED_PAT4 0x104
+#define QPHY_V4_PCS_FIXED_PAT5 0x108
+#define QPHY_V4_PCS_FIXED_PAT6 0x10c
+#define QPHY_V4_PCS_FIXED_PAT7 0x110
+#define QPHY_V4_PCS_FIXED_PAT8 0x114
+#define QPHY_V4_PCS_FIXED_PAT9 0x118
+#define QPHY_V4_PCS_FIXED_PAT10 0x11c
+#define QPHY_V4_PCS_FIXED_PAT11 0x120
+#define QPHY_V4_PCS_FIXED_PAT12 0x124
+#define QPHY_V4_PCS_FIXED_PAT13 0x128
+#define QPHY_V4_PCS_FIXED_PAT14 0x12c
+#define QPHY_V4_PCS_FIXED_PAT15 0x130
+#define QPHY_V4_PCS_TXMGN_CONFIG 0x134
+#define QPHY_V4_PCS_G12S1_TXMGN_V0 0x138
+#define QPHY_V4_PCS_G12S1_TXMGN_V1 0x13c
+#define QPHY_V4_PCS_G12S1_TXMGN_V2 0x140
+#define QPHY_V4_PCS_G12S1_TXMGN_V3 0x144
+#define QPHY_V4_PCS_G12S1_TXMGN_V4 0x148
+#define QPHY_V4_PCS_G12S1_TXMGN_V0_RS 0x14c
+#define QPHY_V4_PCS_G12S1_TXMGN_V1_RS 0x150
+#define QPHY_V4_PCS_G12S1_TXMGN_V2_RS 0x154
+#define QPHY_V4_PCS_G12S1_TXMGN_V3_RS 0x158
+#define QPHY_V4_PCS_G12S1_TXMGN_V4_RS 0x15c
+#define QPHY_V4_PCS_G3S2_TXMGN_MAIN 0x160
+#define QPHY_V4_PCS_G3S2_TXMGN_MAIN_RS 0x164
+#define QPHY_V4_PCS_G12S1_TXDEEMPH_M6DB 0x168
+#define QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB 0x16c
+#define QPHY_V4_PCS_G3S2_PRE_GAIN 0x170
+#define QPHY_V4_PCS_G3S2_POST_GAIN 0x174
+#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET 0x178
+#define QPHY_V4_PCS_G3S2_PRE_GAIN_RS 0x17c
+#define QPHY_V4_PCS_G3S2_POST_GAIN_RS 0x180
+#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET_RS 0x184
+#define QPHY_V4_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V4_PCS_RX_SIGDET_DTCT_CNTRL 0x18c
+#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_V4_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V4_PCS_RATE_SLEW_CNTRL2 0x19c
+#define QPHY_V4_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x1a0
+#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L 0x1a4
+#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H 0x1a8
+#define QPHY_V4_PCS_TSYNC_RSYNC_TIME 0x1ac
+#define QPHY_V4_PCS_CDR_RESET_TIME 0x1b0
+#define QPHY_V4_PCS_TSYNC_DLY_TIME 0x1b4
+#define QPHY_V4_PCS_ELECIDLE_DLY_SEL 0x1b8
+#define QPHY_V4_PCS_CMN_ACK_OUT_SEL 0x1bc
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG3 0x1c8
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG4 0x1cc
+#define QPHY_V4_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V4_PCS_RX_IDLE_DTCT_CNTRL 0x1d4
+#define QPHY_V4_PCS_RX_DCC_CAL_CONFIG 0x1d8
+#define QPHY_V4_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V4_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V4_PCS_EQ_CONFIG3 0x1e4
+#define QPHY_V4_PCS_EQ_CONFIG4 0x1e8
+#define QPHY_V4_PCS_EQ_CONFIG5 0x1ec
+#define QPHY_V4_PCS_USB3_POWER_STATE_CONFIG1 0x300
+#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x304
+#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x308
+#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x30c
+#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x310
+#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x314
+#define QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x318
+#define QPHY_V4_PCS_USB3_LFPS_TX_ECSTART 0x31c
+#define QPHY_V4_PCS_USB3_LFPS_PER_TIMER_VAL 0x320
+#define QPHY_V4_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x324
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x328
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x32c
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x330
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x334
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x338
+#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x33c
+#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x340
+#define QPHY_V4_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x344
+#define QPHY_V4_PCS_USB3_ARCVR_DTCT_CM_DLY 0x348
+#define QPHY_V4_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x34c
+#define QPHY_V4_PCS_USB3_ALFPS_DEGLITCH_VAL 0x350
+#define QPHY_V4_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x354
+#define QPHY_V4_PCS_USB3_TEST_CONTROL 0x358
+
+/* Only for QMP V4 PHY - PCS_MISC registers */
+#define QPHY_V4_PCS_MISC_TYPEC_CTRL 0x00
+#define QPHY_V4_PCS_MISC_TYPEC_PWRDN_CTRL 0x04
+#define QPHY_V4_PCS_MISC_PCS_MISC_CONFIG1 0x08
+#define QPHY_V4_PCS_MISC_CLAMP_ENABLE 0x0c
+#define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10
+#define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
new file mode 100644
index 000000000000..4d74045271eb
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define USB2_PHY_USB_PHY_UTMI_CTRL0 (0x3c)
+#define SLEEPM BIT(0)
+#define OPMODE_MASK GENMASK(4, 3)
+#define OPMODE_NORMAL (0x00)
+#define OPMODE_NONDRIVING BIT(3)
+#define TERMSEL BIT(5)
+
+#define USB2_PHY_USB_PHY_UTMI_CTRL1 (0x40)
+#define XCVRSEL BIT(0)
+
+#define USB2_PHY_USB_PHY_UTMI_CTRL5 (0x50)
+#define POR BIT(1)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0 (0x54)
+#define RETENABLEN BIT(3)
+#define FSEL_MASK GENMASK(7, 5)
+#define FSEL_DEFAULT (0x3 << 4)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON1 (0x58)
+#define VBUSVLDEXTSEL0 BIT(4)
+#define PLLBTUNE BIT(5)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2 (0x5c)
+#define VREGBYPASS BIT(0)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL1 (0x60)
+#define VBUSVLDEXT0 BIT(0)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL2 (0x64)
+#define USB2_AUTO_RESUME BIT(0)
+#define USB2_SUSPEND_N BIT(2)
+#define USB2_SUSPEND_N_SEL BIT(3)
+
+#define USB2_PHY_USB_PHY_CFG0 (0x94)
+#define UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN BIT(0)
+#define UTMI_PHY_CMN_CTRL_OVERRIDE_EN BIT(1)
+
+#define USB2_PHY_USB_PHY_REFCLK_CTRL (0xa0)
+#define REFCLK_SEL_MASK GENMASK(1, 0)
+#define REFCLK_SEL_DEFAULT (0x2 << 0)
+
+static const char * const qcom_snps_hsphy_vreg_names[] = {
+ "vdda-pll", "vdda33", "vdda18",
+};
+
+#define SNPS_HS_NUM_VREGS ARRAY_SIZE(qcom_snps_hsphy_vreg_names)
+
+/**
+ * struct qcom_snps_hsphy - snps hs phy attributes
+ *
+ * @phy: generic phy
+ * @base: iomapped memory space for snps hs phy
+ *
+ * @cfg_ahb_clk: AHB2PHY interface clock
+ * @ref_clk: phy reference clock
+ * @iface_clk: phy interface clock
+ * @phy_reset: phy reset control
+ * @vregs: regulator supplies bulk data
+ * @phy_initialized: if PHY has been initialized correctly
+ */
+struct qcom_snps_hsphy {
+ struct phy *phy;
+ void __iomem *base;
+
+ struct clk *cfg_ahb_clk;
+ struct clk *ref_clk;
+ struct reset_control *phy_reset;
+ struct regulator_bulk_data vregs[SNPS_HS_NUM_VREGS];
+
+ bool phy_initialized;
+};
+
+static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
+ u32 mask, u32 val)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~mask;
+ reg |= val & mask;
+ writel_relaxed(reg, base + offset);
+
+ /* Ensure above write is completed */
+ readl_relaxed(base + offset);
+}
+
+static int qcom_snps_hsphy_init(struct phy *phy)
+{
+ struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
+ int ret;
+
+ dev_vdbg(&phy->dev, "%s(): Initializing SNPS HS phy\n", __func__);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
+ if (ret) {
+ dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
+ goto poweroff_phy;
+ }
+
+ ret = reset_control_assert(hsphy->phy_reset);
+ if (ret) {
+ dev_err(&phy->dev, "failed to assert phy_reset, %d\n", ret);
+ goto disable_ahb_clk;
+ }
+
+ usleep_range(100, 150);
+
+ ret = reset_control_deassert(hsphy->phy_reset);
+ if (ret) {
+ dev_err(&phy->dev, "failed to de-assert phy_reset, %d\n", ret);
+ goto disable_ahb_clk;
+ }
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_CFG0,
+ UTMI_PHY_CMN_CTRL_OVERRIDE_EN,
+ UTMI_PHY_CMN_CTRL_OVERRIDE_EN);
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_UTMI_CTRL5,
+ POR, POR);
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0,
+ FSEL_MASK, 0);
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON1,
+ PLLBTUNE, PLLBTUNE);
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_REFCLK_CTRL,
+ REFCLK_SEL_DEFAULT, REFCLK_SEL_MASK);
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON1,
+ VBUSVLDEXTSEL0, VBUSVLDEXTSEL0);
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL1,
+ VBUSVLDEXT0, VBUSVLDEXT0);
+
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2,
+ VREGBYPASS, VREGBYPASS);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_UTMI_CTRL0,
+ SLEEPM, SLEEPM);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_UTMI_CTRL5,
+ POR, 0);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL, 0);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_CFG0,
+ UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0);
+
+ hsphy->phy_initialized = true;
+
+ return 0;
+
+disable_ahb_clk:
+ clk_disable_unprepare(hsphy->cfg_ahb_clk);
+poweroff_phy:
+ regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
+
+ return ret;
+}
+
+static int qcom_snps_hsphy_exit(struct phy *phy)
+{
+ struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
+
+ reset_control_assert(hsphy->phy_reset);
+ clk_disable_unprepare(hsphy->cfg_ahb_clk);
+ regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
+ hsphy->phy_initialized = false;
+
+ return 0;
+}
+
+static const struct phy_ops qcom_snps_hsphy_gen_ops = {
+ .init = qcom_snps_hsphy_init,
+ .exit = qcom_snps_hsphy_exit,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id qcom_snps_hsphy_of_match_table[] = {
+ { .compatible = "qcom,sm8150-usb-hs-phy", },
+ { .compatible = "qcom,usb-snps-hs-7nm-phy", },
+ { .compatible = "qcom,usb-snps-femto-v2-phy", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_of_match_table);
+
+static int qcom_snps_hsphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_snps_hsphy *hsphy;
+ struct phy_provider *phy_provider;
+ struct phy *generic_phy;
+ int ret, i;
+ int num;
+
+ hsphy = devm_kzalloc(dev, sizeof(*hsphy), GFP_KERNEL);
+ if (!hsphy)
+ return -ENOMEM;
+
+ hsphy->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hsphy->base))
+ return PTR_ERR(hsphy->base);
+
+ hsphy->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(hsphy->ref_clk)) {
+ ret = PTR_ERR(hsphy->ref_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get ref clk, %d\n", ret);
+ return ret;
+ }
+
+ hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(hsphy->phy_reset)) {
+ dev_err(dev, "failed to get phy core reset\n");
+ return PTR_ERR(hsphy->phy_reset);
+ }
+
+ num = ARRAY_SIZE(hsphy->vregs);
+ for (i = 0; i < num; i++)
+ hsphy->vregs[i].supply = qcom_snps_hsphy_vreg_names[i];
+
+ ret = devm_regulator_bulk_get(dev, num, hsphy->vregs);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ generic_phy = devm_phy_create(dev, NULL, &qcom_snps_hsphy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create phy, %d\n", ret);
+ return ret;
+ }
+ hsphy->phy = generic_phy;
+
+ dev_set_drvdata(dev, hsphy);
+ phy_set_drvdata(generic_phy, hsphy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_dbg(dev, "Registered Qcom-SNPS HS phy\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver qcom_snps_hsphy_driver = {
+ .probe = qcom_snps_hsphy_probe,
+ .driver = {
+ .name = "qcom-snps-hs-femto-v2-phy",
+ .of_match_table = qcom_snps_hsphy_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_snps_hsphy_driver);
+
+MODULE_DESCRIPTION("Qualcomm SNPS FEMTO USB HS PHY V2 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c
index 56a5083fe6f9..32be62e49804 100644
--- a/drivers/phy/samsung/phy-s5pv210-usb2.c
+++ b/drivers/phy/samsung/phy-s5pv210-usb2.c
@@ -139,6 +139,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
udelay(10);
rst &= ~rstbits;
writel(rst, drv->reg_phy + S5PV210_UPHYRST);
+ /* The following delay is necessary for the reset sequence to be
+ * completed
+ */
+ udelay(80);
} else {
pwr = readl(drv->reg_phy + S5PV210_UPHYPWR);
pwr |= phypwr;
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 88a047b9fa6f..0a166d5a6414 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -77,6 +77,7 @@ static struct regmap_config serdes_am654_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
.fast_io = true,
+ .max_register = 0x1ffc,
};
static const struct reg_field cmu_master_cdn_o = REG_FIELD(CMU_R07C, 24, 24);
@@ -200,9 +201,91 @@ static int serdes_am654_power_off(struct phy *x)
return 0;
}
-static int serdes_am654_init(struct phy *x)
+#define SERDES_AM654_CFG(offset, a, b, val) \
+ regmap_update_bits(phy->regmap, (offset),\
+ GENMASK((a), (b)), (val) << (b))
+
+static int serdes_am654_usb3_init(struct serdes_am654 *phy)
+{
+ SERDES_AM654_CFG(0x0000, 31, 24, 0x17);
+ SERDES_AM654_CFG(0x0004, 15, 8, 0x02);
+ SERDES_AM654_CFG(0x0004, 7, 0, 0x0e);
+ SERDES_AM654_CFG(0x0008, 23, 16, 0x2e);
+ SERDES_AM654_CFG(0x0008, 31, 24, 0x2e);
+ SERDES_AM654_CFG(0x0060, 7, 0, 0x4b);
+ SERDES_AM654_CFG(0x0060, 15, 8, 0x98);
+ SERDES_AM654_CFG(0x0060, 23, 16, 0x60);
+ SERDES_AM654_CFG(0x00d0, 31, 24, 0x45);
+ SERDES_AM654_CFG(0x00e8, 15, 8, 0x0e);
+ SERDES_AM654_CFG(0x0220, 7, 0, 0x34);
+ SERDES_AM654_CFG(0x0220, 15, 8, 0x34);
+ SERDES_AM654_CFG(0x0220, 31, 24, 0x37);
+ SERDES_AM654_CFG(0x0224, 7, 0, 0x37);
+ SERDES_AM654_CFG(0x0224, 15, 8, 0x37);
+ SERDES_AM654_CFG(0x0228, 23, 16, 0x37);
+ SERDES_AM654_CFG(0x0228, 31, 24, 0x37);
+ SERDES_AM654_CFG(0x022c, 7, 0, 0x37);
+ SERDES_AM654_CFG(0x022c, 15, 8, 0x37);
+ SERDES_AM654_CFG(0x0230, 15, 8, 0x2a);
+ SERDES_AM654_CFG(0x0230, 23, 16, 0x2a);
+ SERDES_AM654_CFG(0x0240, 23, 16, 0x10);
+ SERDES_AM654_CFG(0x0240, 31, 24, 0x34);
+ SERDES_AM654_CFG(0x0244, 7, 0, 0x40);
+ SERDES_AM654_CFG(0x0244, 23, 16, 0x34);
+ SERDES_AM654_CFG(0x0248, 15, 8, 0x0d);
+ SERDES_AM654_CFG(0x0258, 15, 8, 0x16);
+ SERDES_AM654_CFG(0x0258, 23, 16, 0x84);
+ SERDES_AM654_CFG(0x0258, 31, 24, 0xf2);
+ SERDES_AM654_CFG(0x025c, 7, 0, 0x21);
+ SERDES_AM654_CFG(0x0260, 7, 0, 0x27);
+ SERDES_AM654_CFG(0x0260, 15, 8, 0x04);
+ SERDES_AM654_CFG(0x0268, 15, 8, 0x04);
+ SERDES_AM654_CFG(0x0288, 15, 8, 0x2c);
+ SERDES_AM654_CFG(0x0330, 31, 24, 0xa0);
+ SERDES_AM654_CFG(0x0338, 23, 16, 0x03);
+ SERDES_AM654_CFG(0x0338, 31, 24, 0x00);
+ SERDES_AM654_CFG(0x033c, 7, 0, 0x00);
+ SERDES_AM654_CFG(0x0344, 31, 24, 0x18);
+ SERDES_AM654_CFG(0x034c, 7, 0, 0x18);
+ SERDES_AM654_CFG(0x039c, 23, 16, 0x3b);
+ SERDES_AM654_CFG(0x0a04, 7, 0, 0x03);
+ SERDES_AM654_CFG(0x0a14, 31, 24, 0x3c);
+ SERDES_AM654_CFG(0x0a18, 15, 8, 0x3c);
+ SERDES_AM654_CFG(0x0a38, 7, 0, 0x3e);
+ SERDES_AM654_CFG(0x0a38, 15, 8, 0x3e);
+ SERDES_AM654_CFG(0x0ae0, 7, 0, 0x07);
+ SERDES_AM654_CFG(0x0b6c, 23, 16, 0xcd);
+ SERDES_AM654_CFG(0x0b6c, 31, 24, 0x04);
+ SERDES_AM654_CFG(0x0b98, 23, 16, 0x03);
+ SERDES_AM654_CFG(0x1400, 7, 0, 0x3f);
+ SERDES_AM654_CFG(0x1404, 23, 16, 0x6f);
+ SERDES_AM654_CFG(0x1404, 31, 24, 0x6f);
+ SERDES_AM654_CFG(0x140c, 7, 0, 0x6f);
+ SERDES_AM654_CFG(0x140c, 15, 8, 0x6f);
+ SERDES_AM654_CFG(0x1410, 15, 8, 0x27);
+ SERDES_AM654_CFG(0x1414, 7, 0, 0x0c);
+ SERDES_AM654_CFG(0x1414, 23, 16, 0x07);
+ SERDES_AM654_CFG(0x1418, 23, 16, 0x40);
+ SERDES_AM654_CFG(0x141c, 7, 0, 0x00);
+ SERDES_AM654_CFG(0x141c, 15, 8, 0x1f);
+ SERDES_AM654_CFG(0x1428, 31, 24, 0x08);
+ SERDES_AM654_CFG(0x1434, 31, 24, 0x00);
+ SERDES_AM654_CFG(0x1444, 7, 0, 0x94);
+ SERDES_AM654_CFG(0x1460, 31, 24, 0x7f);
+ SERDES_AM654_CFG(0x1464, 7, 0, 0x43);
+ SERDES_AM654_CFG(0x1464, 23, 16, 0x6f);
+ SERDES_AM654_CFG(0x1464, 31, 24, 0x43);
+ SERDES_AM654_CFG(0x1484, 23, 16, 0x8f);
+ SERDES_AM654_CFG(0x1498, 7, 0, 0x4f);
+ SERDES_AM654_CFG(0x1498, 23, 16, 0x4f);
+ SERDES_AM654_CFG(0x007c, 31, 24, 0x0d);
+ SERDES_AM654_CFG(0x0b90, 15, 8, 0x0f);
+
+ return 0;
+}
+
+static int serdes_am654_pcie_init(struct serdes_am654 *phy)
{
- struct serdes_am654 *phy = phy_get_drvdata(x);
int ret;
ret = regmap_field_write(phy->config_version, VERSION);
@@ -220,11 +303,28 @@ static int serdes_am654_init(struct phy *x)
return 0;
}
+static int serdes_am654_init(struct phy *x)
+{
+ struct serdes_am654 *phy = phy_get_drvdata(x);
+
+ switch (phy->type) {
+ case PHY_TYPE_PCIE:
+ return serdes_am654_pcie_init(phy);
+ case PHY_TYPE_USB3:
+ return serdes_am654_usb3_init(phy);
+ default:
+ return -EINVAL;
+ }
+}
+
static int serdes_am654_reset(struct phy *x)
{
struct serdes_am654 *phy = phy_get_drvdata(x);
int ret;
+ serdes_am654_disable_pll(phy);
+ serdes_am654_disable_txrx(phy);
+
ret = regmap_field_write(phy->por_en, 0x1);
if (ret)
return ret;
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 7b51045df783..30ea5b207285 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
+#include <dt-bindings/phy/phy.h>
#define WIZ_SERDES_CTRL 0x404
#define WIZ_SERDES_TOP_CTRL 0x408
@@ -78,6 +79,8 @@ static const struct reg_field p_enable[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(3), 30, 31),
};
+enum p_enable { P_ENABLE = 2, P_ENABLE_FORCE = 1, P_ENABLE_DISABLE = 0 };
+
static const struct reg_field p_align[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 29, 29),
REG_FIELD(WIZ_LANECTL(1), 29, 29),
@@ -220,6 +223,7 @@ struct wiz {
struct reset_controller_dev wiz_phy_reset_dev;
struct gpio_desc *gpio_typec_dir;
int typec_dir_delay;
+ u32 lane_phy_type[WIZ_MAX_LANES];
};
static int wiz_reset(struct wiz *wiz)
@@ -242,12 +246,17 @@ static int wiz_reset(struct wiz *wiz)
static int wiz_mode_select(struct wiz *wiz)
{
u32 num_lanes = wiz->num_lanes;
+ enum wiz_lane_standard_mode mode;
int ret;
int i;
for (i = 0; i < num_lanes; i++) {
- ret = regmap_field_write(wiz->p_standard_mode[i],
- LANE_MODE_GEN4);
+ if (wiz->lane_phy_type[i] == PHY_TYPE_DP)
+ mode = LANE_MODE_GEN1;
+ else
+ mode = LANE_MODE_GEN4;
+
+ ret = regmap_field_write(wiz->p_standard_mode[i], mode);
if (ret)
return ret;
}
@@ -707,7 +716,7 @@ static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
return ret;
}
- ret = regmap_field_write(wiz->p_enable[id - 1], false);
+ ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_DISABLE);
return ret;
}
@@ -734,7 +743,11 @@ static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
return ret;
}
- ret = regmap_field_write(wiz->p_enable[id - 1], true);
+ if (wiz->lane_phy_type[id - 1] == PHY_TYPE_DP)
+ ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE);
+ else
+ ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_FORCE);
+
return ret;
}
@@ -761,6 +774,40 @@ static const struct of_device_id wiz_id_table[] = {
};
MODULE_DEVICE_TABLE(of, wiz_id_table);
+static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+{
+ struct device_node *serdes, *subnode;
+
+ serdes = of_get_child_by_name(dev->of_node, "serdes");
+ if (!serdes) {
+ dev_err(dev, "%s: Getting \"serdes\"-node failed\n", __func__);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(serdes, subnode) {
+ u32 reg, num_lanes = 1, phy_type = PHY_NONE;
+ int ret, i;
+
+ ret = of_property_read_u32(subnode, "reg", &reg);
+ if (ret) {
+ dev_err(dev,
+ "%s: Reading \"reg\" from \"%s\" failed: %d\n",
+ __func__, subnode->name, ret);
+ return ret;
+ }
+ of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
+ of_property_read_u32(subnode, "cdns,phy-type", &phy_type);
+
+ dev_dbg(dev, "%s: Lanes %u-%u have phy-type %u\n", __func__,
+ reg, reg + num_lanes - 1, phy_type);
+
+ for (i = reg; i < reg + num_lanes; i++)
+ wiz->lane_phy_type[i] = phy_type;
+ }
+
+ return 0;
+}
+
static int wiz_probe(struct platform_device *pdev)
{
struct reset_controller_dev *phy_reset_dev;
@@ -794,8 +841,10 @@ static int wiz_probe(struct platform_device *pdev)
}
base = devm_ioremap(dev, res.start, resource_size(&res));
- if (!base)
+ if (!base) {
+ ret = -ENOMEM;
goto err_addr_to_resource;
+ }
regmap = devm_regmap_init_mmio(dev, base, &wiz_regmap_config);
if (IS_ERR(regmap)) {
@@ -812,6 +861,7 @@ static int wiz_probe(struct platform_device *pdev)
if (num_lanes > WIZ_MAX_LANES) {
dev_err(dev, "Cannot support %d lanes\n", num_lanes);
+ ret = -ENODEV;
goto err_addr_to_resource;
}
@@ -844,6 +894,10 @@ static int wiz_probe(struct platform_device *pdev)
}
}
+ ret = wiz_get_lane_phy_types(dev, wiz);
+ if (ret)
+ return ret;
+
wiz->dev = dev;
wiz->regmap = regmap;
wiz->num_lanes = num_lanes;
@@ -897,6 +951,7 @@ static int wiz_probe(struct platform_device *pdev)
serdes_pdev = of_platform_device_create(child_node, NULL, dev);
if (!serdes_pdev) {
dev_WARN(dev, "Unable to create SERDES platform device\n");
+ ret = -ENOMEM;
goto err_pdev_create;
}
wiz->serdes_pdev = serdes_pdev;
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 3d74629d7423..cb2dd3230fa7 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * omap-usb2.c - USB PHY, talking to musb controller in OMAP.
+ * omap-usb2.c - USB PHY, talking to USB controller on TI SoCs.
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
@@ -23,13 +23,65 @@
#include <linux/regmap.h>
#include <linux/of_platform.h>
-#define USB2PHY_DISCON_BYP_LATCH (1 << 31)
-#define USB2PHY_ANA_CONFIG1 0x4c
+#define USB2PHY_ANA_CONFIG1 0x4c
+#define USB2PHY_DISCON_BYP_LATCH BIT(31)
+/* SoC Specific USB2_OTG register definitions */
#define AM654_USB2_OTG_PD BIT(8)
#define AM654_USB2_VBUS_DET_EN BIT(5)
#define AM654_USB2_VBUSVALID_DET_EN BIT(4)
+#define OMAP_DEV_PHY_PD BIT(0)
+#define OMAP_USB2_PHY_PD BIT(28)
+
+#define AM437X_USB2_PHY_PD BIT(0)
+#define AM437X_USB2_OTG_PD BIT(1)
+#define AM437X_USB2_OTGVDET_EN BIT(19)
+#define AM437X_USB2_OTGSESSEND_EN BIT(20)
+
+/* Driver Flags */
+#define OMAP_USB2_HAS_START_SRP BIT(0)
+#define OMAP_USB2_HAS_SET_VBUS BIT(1)
+#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT BIT(2)
+
+struct omap_usb {
+ struct usb_phy phy;
+ struct phy_companion *comparator;
+ void __iomem *pll_ctrl_base;
+ void __iomem *phy_base;
+ struct device *dev;
+ struct device *control_dev;
+ struct clk *wkupclk;
+ struct clk *optclk;
+ u8 flags;
+ struct regmap *syscon_phy_power; /* ctrl. reg. acces */
+ unsigned int power_reg; /* power reg. index within syscon */
+ u32 mask;
+ u32 power_on;
+ u32 power_off;
+};
+
+#define phy_to_omapusb(x) container_of((x), struct omap_usb, phy)
+
+struct usb_phy_data {
+ const char *label;
+ u8 flags;
+ u32 mask;
+ u32 power_on;
+ u32 power_off;
+};
+
+static inline u32 omap_usb_readl(void __iomem *addr, unsigned int offset)
+{
+ return __raw_readl(addr + offset);
+}
+
+static inline void omap_usb_writel(void __iomem *addr, unsigned int offset,
+ u32 data)
+{
+ __raw_writel(data, addr + offset);
+}
+
/**
* omap_usb2_set_comparator - links the comparator present in the sytem with
* this phy
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 834c59950d1c..8828613c4e0e 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -82,7 +82,7 @@ config PINCTRL_AT91
config PINCTRL_AT91PIO4
bool "AT91 PIO4 pinctrl driver"
depends on OF
- depends on ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
select PINMUX
select GENERIC_PINCONF
select GPIOLIB
@@ -95,6 +95,7 @@ config PINCTRL_AT91PIO4
config PINCTRL_AMD
tristate "AMD GPIO pin control"
depends on HAS_IOMEM
+ depends on ACPI || COMPILE_TEST
select GPIOLIB
select GPIOLIB_IRQCHIP
select PINMUX
@@ -172,15 +173,22 @@ config PINCTRL_GEMINI
select GENERIC_PINCONF
select MFD_SYSCON
+config PINCTRL_MCP23S08_I2C
+ tristate
+ select REGMAP_I2C
+
+config PINCTRL_MCP23S08_SPI
+ tristate
+ select REGMAP_SPI
+
config PINCTRL_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
depends on SPI_MASTER || I2C
- depends on I2C || I2C=n
select GPIOLIB
select GPIOLIB_IRQCHIP
- select REGMAP_I2C if I2C
- select REGMAP_SPI if SPI_MASTER
select GENERIC_PINCONF
+ select PINCTRL_MCP23S08_I2C if I2C
+ select PINCTRL_MCP23S08_SPI if SPI_MASTER
help
SPI/I2C driver for Microchip MCP23S08 / MCP23S17 / MCP23S18 /
MCP23008 / MCP23017 / MCP23018 I/O expanders.
@@ -435,6 +443,7 @@ config PINCTRL_TB10X
config PINCTRL_EQUILIBRIUM
tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
depends on OF && HAS_IOMEM
+ depends on X86 || COMPILE_TEST
select PINMUX
select PINCONF
select GPIOLIB
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 0b36a1cfca8a..1731b2154df9 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o
obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o
+obj-$(CONFIG_PINCTRL_MCP23S08_I2C) += pinctrl-mcp23s08_i2c.o
+obj-$(CONFIG_PINCTRL_MCP23S08_SPI) += pinctrl-mcp23s08_spi.o
obj-$(CONFIG_PINCTRL_MCP23S08) += pinctrl-mcp23s08.o
obj-$(CONFIG_PINCTRL_MESON) += meson/
obj-$(CONFIG_PINCTRL_OXNAS) += pinctrl-oxnas.o
diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c
index 47a4ccd9fed4..f579a6593f37 100644
--- a/drivers/pinctrl/actions/pinctrl-s700.c
+++ b/drivers/pinctrl/actions/pinctrl-s700.c
@@ -1435,7 +1435,7 @@ static const char * const sd2_groups[] = {
static const char * const i2c0_groups[] = {
"uart0_rx_mfp",
"uart0_tx_mfp",
- "i2c0_mfp_mfp",
+ "i2c0_mfp",
};
static const char * const i2c1_groups[] = {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index f690fc5cd688..71e666178300 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1406,7 +1406,7 @@ static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->reg_base)) {
dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
- return -ENODEV;
+ return PTR_ERR(pdata->reg_base);
}
/* Initialize the dynamic part of pinctrl_desc */
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 06bd2b70af3c..1d21129f7751 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -76,6 +77,7 @@
struct bcm2835_pinctrl {
struct device *dev;
void __iomem *base;
+ int *wake_irq;
/* note: locking assumes each bank will have its own unsigned long */
unsigned long enabled_irq_map[BCM2835_NUM_BANKS];
@@ -435,6 +437,11 @@ static void bcm2835_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(host_chip, desc);
}
+static irqreturn_t bcm2835_gpio_wake_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
static inline void __bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
unsigned reg, unsigned offset, bool enable)
{
@@ -634,6 +641,34 @@ static void bcm2835_gpio_irq_ack(struct irq_data *data)
bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
}
+static int bcm2835_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
+ unsigned gpio = irqd_to_hwirq(data);
+ unsigned int irqgroup;
+ int ret = -EINVAL;
+
+ if (!pc->wake_irq)
+ return ret;
+
+ if (gpio <= 27)
+ irqgroup = 0;
+ else if (gpio >= 28 && gpio <= 45)
+ irqgroup = 1;
+ else if (gpio >= 46 && gpio <= 57)
+ irqgroup = 2;
+ else
+ return ret;
+
+ if (on)
+ ret = enable_irq_wake(pc->wake_irq[irqgroup]);
+ else
+ ret = disable_irq_wake(pc->wake_irq[irqgroup]);
+
+ return ret;
+}
+
static struct irq_chip bcm2835_gpio_irq_chip = {
.name = MODULE_NAME,
.irq_enable = bcm2835_gpio_irq_enable,
@@ -642,6 +677,8 @@ static struct irq_chip bcm2835_gpio_irq_chip = {
.irq_ack = bcm2835_gpio_irq_ack,
.irq_mask = bcm2835_gpio_irq_disable,
.irq_unmask = bcm2835_gpio_irq_enable,
+ .irq_set_wake = bcm2835_gpio_irq_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
@@ -1137,6 +1174,10 @@ static const struct of_device_id bcm2835_pinctrl_match[] = {
.compatible = "brcm,bcm2711-gpio",
.data = &bcm2711_plat_data,
},
+ {
+ .compatible = "brcm,bcm7211-gpio",
+ .data = &bcm2711_plat_data,
+ },
{}
};
@@ -1150,6 +1191,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
struct resource iomem;
int err, i;
const struct of_device_id *match;
+ int is_7211 = 0;
BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_pins) != BCM2711_NUM_GPIOS);
BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_groups) != BCM2711_NUM_GPIOS);
@@ -1176,6 +1218,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
pdata = match->data;
+ is_7211 = of_device_is_compatible(np, "brcm,bcm7211-gpio");
pc->gpio_chip = *pdata->gpio_chip;
pc->gpio_chip.parent = dev;
@@ -1210,6 +1253,15 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
+
+ if (is_7211) {
+ pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
+ sizeof(*pc->wake_irq),
+ GFP_KERNEL);
+ if (!pc->wake_irq)
+ return -ENOMEM;
+ }
+
/*
* Use the same handler for all groups: this is necessary
* since we use one gpiochip to cover all lines - the
@@ -1217,8 +1269,34 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
* bank that was firing the IRQ and look up the per-group
* and bank data.
*/
- for (i = 0; i < BCM2835_NUM_IRQS; i++)
+ for (i = 0; i < BCM2835_NUM_IRQS; i++) {
+ int len;
+ char *name;
+
girq->parents[i] = irq_of_parse_and_map(np, i);
+ if (!is_7211)
+ continue;
+
+ /* Skip over the all banks interrupts */
+ pc->wake_irq[i] = irq_of_parse_and_map(np, i +
+ BCM2835_NUM_IRQS + 1);
+
+ len = strlen(dev_name(pc->dev)) + 16;
+ name = devm_kzalloc(pc->dev, len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i);
+
+ /* These are optional interrupts */
+ err = devm_request_irq(dev, pc->wake_irq[i],
+ bcm2835_gpio_wake_irq_handler,
+ IRQF_SHARED, name, pc);
+ if (err)
+ dev_warn(dev, "unable to request wake IRQ %d\n",
+ pc->wake_irq[i]);
+ }
+
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index c784663b00ad..4ca44dd69e53 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -165,6 +165,13 @@ config PINCTRL_IMX8QXP
help
Say Y here to enable the imx8qxp pinctrl driver
+config PINCTRL_IMX8DXL
+ bool "IMX8DXL pinctrl driver"
+ depends on IMX_SCU && ARCH_MXC && ARM64
+ select PINCTRL_IMX_SCU
+ help
+ Say Y here to enable the imx8dxl pinctrl driver
+
config PINCTRL_VF610
bool "Freescale Vybrid VF610 pinctrl driver"
depends on SOC_VF610
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 0ebd3af21e4d..c61722565289 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PINCTRL_IMX8MP) += pinctrl-imx8mp.o
obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o
obj-$(CONFIG_PINCTRL_IMX8QM) += pinctrl-imx8qm.o
obj-$(CONFIG_PINCTRL_IMX8QXP) += pinctrl-imx8qxp.o
+obj-$(CONFIG_PINCTRL_IMX8DXL) += pinctrl-imx8dxl.o
obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 9f42036c5fbb..cb7e0f08d2cf 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -774,16 +774,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
return 0;
}
-/*
- * imx_free_resources() - free memory used by this driver
- * @info: info driver instance
- */
-static void imx_free_resources(struct imx_pinctrl *ipctl)
-{
- if (ipctl->pctl)
- pinctrl_unregister(ipctl->pctl);
-}
-
int imx_pinctrl_probe(struct platform_device *pdev,
const struct imx_pinctrl_soc_info *info)
{
@@ -834,12 +824,13 @@ int imx_pinctrl_probe(struct platform_device *pdev,
return -EINVAL;
}
- ipctl->input_sel_base = of_iomap(np, 0);
+ ipctl->input_sel_base = devm_of_iomap(&pdev->dev, np,
+ 0, NULL);
of_node_put(np);
- if (!ipctl->input_sel_base) {
+ if (IS_ERR(ipctl->input_sel_base)) {
dev_err(&pdev->dev,
"iomuxc input select base address not found\n");
- return -ENOMEM;
+ return PTR_ERR(ipctl->input_sel_base);
}
}
}
@@ -874,23 +865,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
&ipctl->pctl);
if (ret) {
dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
- goto free;
+ return ret;
}
ret = imx_pinctrl_probe_dt(pdev, ipctl);
if (ret) {
dev_err(&pdev->dev, "fail to probe dt properties\n");
- goto free;
+ return ret;
}
dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
return pinctrl_enable(ipctl->pctl);
-
-free:
- imx_free_resources(ipctl);
-
- return ret;
}
static int __maybe_unused imx_pinctrl_suspend(struct device *dev)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index c00d0022d311..08d110078c43 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -60,7 +60,7 @@ struct imx1_pinctrl {
/*
* IMX1 IOMUXC manages the pins based on ports. Each port has 32 pins. IOMUX
- * control register are seperated into function, output configuration, input
+ * control registers are separated into function, output configuration, input
* configuration A, input configuration B, GPIO in use and data direction.
*
* Those controls that are represented by 1 bit have a direct mapping between
@@ -638,7 +638,6 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (ret) {
- pinctrl_unregister(ipctl->pctl);
dev_err(&pdev->dev, "Failed to populate subdevices\n");
return ret;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
new file mode 100644
index 000000000000..7f32e57b7f6a
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019~2020 NXP
+ */
+
+#include <dt-bindings/pinctrl/pads-imx8dxl.h>
+#include <linux/err.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+static const struct pinctrl_pin_desc imx8dxl_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(IMX8DXL_PCIE_CTRL0_PERST_B),
+ IMX_PINCTRL_PIN(IMX8DXL_PCIE_CTRL0_CLKREQ_B),
+ IMX_PINCTRL_PIN(IMX8DXL_PCIE_CTRL0_WAKE_B),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP),
+ IMX_PINCTRL_PIN(IMX8DXL_USB_SS3_TC0),
+ IMX_PINCTRL_PIN(IMX8DXL_USB_SS3_TC1),
+ IMX_PINCTRL_PIN(IMX8DXL_USB_SS3_TC2),
+ IMX_PINCTRL_PIN(IMX8DXL_USB_SS3_TC3),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_CLK),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_CMD),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA0),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA1),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA2),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA3),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA4),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA5),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA6),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA7),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_STROBE),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_RESET_B),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0),
+ IMX_PINCTRL_PIN(IMX8DXL_USDHC1_RESET_B),
+ IMX_PINCTRL_PIN(IMX8DXL_USDHC1_VSELECT),
+ IMX_PINCTRL_PIN(IMX8DXL_CTL_NAND_RE_P_N),
+ IMX_PINCTRL_PIN(IMX8DXL_USDHC1_WP),
+ IMX_PINCTRL_PIN(IMX8DXL_USDHC1_CD_B),
+ IMX_PINCTRL_PIN(IMX8DXL_CTL_NAND_DQS_P_N),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXC),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TX_CTL),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXD0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXD1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXD2),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXD3),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXC),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RX_CTL),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXD0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXD1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXD2),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXD3),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_REFCLK_125M_25M),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_MDIO),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_MDC),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXC),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXD2),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TX_CTL),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXD3),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXC),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXD3),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXD2),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXD1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXD0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXD1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXD0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RX_CTL),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_REFCLK_125M_25M),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_SCK),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_SDO),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_SDI),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_CS0),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_CS1),
+ IMX_PINCTRL_PIN(IMX8DXL_MCLK_IN1),
+ IMX_PINCTRL_PIN(IMX8DXL_MCLK_IN0),
+ IMX_PINCTRL_PIN(IMX8DXL_MCLK_OUT0),
+ IMX_PINCTRL_PIN(IMX8DXL_UART1_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART1_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART1_RTS_B),
+ IMX_PINCTRL_PIN(IMX8DXL_UART1_CTS_B),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_SCK),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_SDI),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_SDO),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_CS1),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_CS0),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN1),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN0),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN3),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN2),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN5),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN4),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN0_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN0_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN1_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN1_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN2_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN2_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART0_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART0_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART2_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART2_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH),
+ IMX_PINCTRL_PIN(IMX8DXL_JTAG_TRST_B),
+ IMX_PINCTRL_PIN(IMX8DXL_PMIC_I2C_SCL),
+ IMX_PINCTRL_PIN(IMX8DXL_PMIC_I2C_SDA),
+ IMX_PINCTRL_PIN(IMX8DXL_PMIC_INT_B),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_GPIO0_01),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_PMIC_STANDBY),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_BOOT_MODE1),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_BOOT_MODE0),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_BOOT_MODE2),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_OUT1),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_OUT2),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_OUT3),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_OUT4),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_IN0),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_IN1),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_IN2),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_IN3),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI1_SCK),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI1_SDO),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI1_SDI),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI1_CS0),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DATA1),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DATA0),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DATA3),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DATA2),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_SS0_B),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DQS),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_SCLK),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_SCLK),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DQS),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DATA1),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DATA0),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DATA3),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DATA2),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_SS0_B),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B)
+};
+
+
+static struct imx_pinctrl_soc_info imx8dxl_pinctrl_info = {
+ .pins = imx8dxl_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8dxl_pinctrl_pads),
+ .flags = IMX_USE_SCU,
+};
+
+static const struct of_device_id imx8dxl_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8dxl-iomuxc", },
+ { /* sentinel */ }
+};
+
+static int imx8dxl_pinctrl_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = imx_pinctrl_sc_ipc_init(pdev);
+ if (ret)
+ return ret;
+
+ return imx_pinctrl_probe(pdev, &imx8dxl_pinctrl_info);
+}
+
+static struct platform_driver imx8dxl_pinctrl_driver = {
+ .driver = {
+ .name = "fsl,imx8dxl-iomuxc",
+ .of_match_table = of_match_ptr(imx8dxl_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx8dxl_pinctrl_probe,
+};
+
+static int __init imx8dxl_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8dxl_pinctrl_driver);
+}
+arch_initcall(imx8dxl_pinctrl_init);
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index ee440ec4c94c..787833e343a4 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -111,6 +111,14 @@ config PINCTRL_ICELAKE
This pinctrl driver provides an interface that allows configuring
of Intel Ice Lake PCH pins and using them as GPIOs.
+config PINCTRL_JASPERLAKE
+ tristate "Intel Jasper Lake PCH pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Jasper Lake PCH pins and using them as GPIOs.
+
config PINCTRL_LEWISBURG
tristate "Intel Lewisburg pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index f60f99cfa7aa..f6f63eb8100f 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o
obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o
obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
+obj-$(CONFIG_PINCTRL_JASPERLAKE) += pinctrl-jasperlake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index b409642f168d..0ff7c55173da 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1286,6 +1286,7 @@ static const struct gpio_chip byt_gpio_chip = {
.direction_output = byt_gpio_direction_output,
.get = byt_gpio_get,
.set = byt_gpio_set,
+ .set_config = gpiochip_generic_config,
.dbg_show = byt_gpio_dbg_show,
};
@@ -1505,8 +1506,7 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
{
struct platform_device *pdev = to_platform_device(vg->dev);
struct gpio_chip *gc;
- struct resource *irq_rc;
- int ret;
+ int irq, ret;
/* Set up gpio chip */
vg->chip = byt_gpio_chip;
@@ -1526,8 +1526,8 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
#endif
/* set up interrupts */
- irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq_rc && irq_rc->start) {
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq > 0) {
struct gpio_irq_chip *girq;
vg->irqchip.name = "BYT-GPIO",
@@ -1547,7 +1547,7 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
- girq->parents[0] = (unsigned int)irq_rc->start;
+ girq->parents[0] = irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
}
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index f51b27bbf9f1..515f57a0d180 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -30,8 +30,6 @@
.gpio_base = (g), \
}
-#define CNL_NO_GPIO -1
-
#define CNL_COMMUNITY(b, s, e, o, g) \
{ \
.barno = (b), \
@@ -377,27 +375,27 @@ static const struct intel_padgroup cnlh_community0_gpps[] = {
};
static const struct intel_padgroup cnlh_community1_gpps[] = {
- CNL_GPP(0, 51, 74, 64), /* GPP_C */
- CNL_GPP(1, 75, 98, 96), /* GPP_D */
- CNL_GPP(2, 99, 106, 128), /* GPP_G */
- CNL_GPP(3, 107, 114, CNL_NO_GPIO), /* AZA */
- CNL_GPP(4, 115, 146, 160), /* vGPIO_0 */
- CNL_GPP(5, 147, 154, CNL_NO_GPIO), /* vGPIO_1 */
+ CNL_GPP(0, 51, 74, 64), /* GPP_C */
+ CNL_GPP(1, 75, 98, 96), /* GPP_D */
+ CNL_GPP(2, 99, 106, 128), /* GPP_G */
+ CNL_GPP(3, 107, 114, INTEL_GPIO_BASE_NOMAP), /* AZA */
+ CNL_GPP(4, 115, 146, 160), /* vGPIO_0 */
+ CNL_GPP(5, 147, 154, INTEL_GPIO_BASE_NOMAP), /* vGPIO_1 */
};
static const struct intel_padgroup cnlh_community3_gpps[] = {
- CNL_GPP(0, 155, 178, 192), /* GPP_K */
- CNL_GPP(1, 179, 202, 224), /* GPP_H */
- CNL_GPP(2, 203, 215, 256), /* GPP_E */
- CNL_GPP(3, 216, 239, 288), /* GPP_F */
- CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */
+ CNL_GPP(0, 155, 178, 192), /* GPP_K */
+ CNL_GPP(1, 179, 202, 224), /* GPP_H */
+ CNL_GPP(2, 203, 215, 256), /* GPP_E */
+ CNL_GPP(3, 216, 239, 288), /* GPP_F */
+ CNL_GPP(4, 240, 248, INTEL_GPIO_BASE_NOMAP), /* SPI */
};
static const struct intel_padgroup cnlh_community4_gpps[] = {
- CNL_GPP(0, 249, 259, CNL_NO_GPIO), /* CPU */
- CNL_GPP(1, 260, 268, CNL_NO_GPIO), /* JTAG */
- CNL_GPP(2, 269, 286, 320), /* GPP_I */
- CNL_GPP(3, 287, 298, 352), /* GPP_J */
+ CNL_GPP(0, 249, 259, INTEL_GPIO_BASE_NOMAP), /* CPU */
+ CNL_GPP(1, 260, 268, INTEL_GPIO_BASE_NOMAP), /* JTAG */
+ CNL_GPP(2, 269, 286, 320), /* GPP_I */
+ CNL_GPP(3, 287, 298, 352), /* GPP_J */
};
static const unsigned int cnlh_spi0_pins[] = { 40, 41, 42, 43 };
@@ -790,25 +788,25 @@ static const struct intel_function cnllp_functions[] = {
};
static const struct intel_padgroup cnllp_community0_gpps[] = {
- CNL_GPP(0, 0, 24, 0), /* GPP_A */
- CNL_GPP(1, 25, 50, 32), /* GPP_B */
- CNL_GPP(2, 51, 58, 64), /* GPP_G */
- CNL_GPP(3, 59, 67, CNL_NO_GPIO), /* SPI */
+ CNL_GPP(0, 0, 24, 0), /* GPP_A */
+ CNL_GPP(1, 25, 50, 32), /* GPP_B */
+ CNL_GPP(2, 51, 58, 64), /* GPP_G */
+ CNL_GPP(3, 59, 67, INTEL_GPIO_BASE_NOMAP), /* SPI */
};
static const struct intel_padgroup cnllp_community1_gpps[] = {
- CNL_GPP(0, 68, 92, 96), /* GPP_D */
- CNL_GPP(1, 93, 116, 128), /* GPP_F */
- CNL_GPP(2, 117, 140, 160), /* GPP_H */
- CNL_GPP(3, 141, 172, 192), /* vGPIO */
- CNL_GPP(4, 173, 180, 224), /* vGPIO */
+ CNL_GPP(0, 68, 92, 96), /* GPP_D */
+ CNL_GPP(1, 93, 116, 128), /* GPP_F */
+ CNL_GPP(2, 117, 140, 160), /* GPP_H */
+ CNL_GPP(3, 141, 172, 192), /* vGPIO */
+ CNL_GPP(4, 173, 180, 224), /* vGPIO */
};
static const struct intel_padgroup cnllp_community4_gpps[] = {
- CNL_GPP(0, 181, 204, 256), /* GPP_C */
- CNL_GPP(1, 205, 228, 288), /* GPP_E */
- CNL_GPP(2, 229, 237, CNL_NO_GPIO), /* JTAG */
- CNL_GPP(3, 238, 243, CNL_NO_GPIO), /* HVCMOS */
+ CNL_GPP(0, 181, 204, 256), /* GPP_C */
+ CNL_GPP(1, 205, 228, 288), /* GPP_E */
+ CNL_GPP(2, 229, 237, INTEL_GPIO_BASE_NOMAP), /* JTAG */
+ CNL_GPP(3, 238, 243, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
};
static const struct intel_community cnllp_communities[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 4c74fdde576d..8e3953a223d0 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -35,18 +35,18 @@
#define CHV_PADCTRL0 0x000
#define CHV_PADCTRL0_INTSEL_SHIFT 28
-#define CHV_PADCTRL0_INTSEL_MASK (0xf << CHV_PADCTRL0_INTSEL_SHIFT)
+#define CHV_PADCTRL0_INTSEL_MASK GENMASK(31, 28)
#define CHV_PADCTRL0_TERM_UP BIT(23)
#define CHV_PADCTRL0_TERM_SHIFT 20
-#define CHV_PADCTRL0_TERM_MASK (7 << CHV_PADCTRL0_TERM_SHIFT)
+#define CHV_PADCTRL0_TERM_MASK GENMASK(22, 20)
#define CHV_PADCTRL0_TERM_20K 1
#define CHV_PADCTRL0_TERM_5K 2
#define CHV_PADCTRL0_TERM_1K 4
#define CHV_PADCTRL0_PMODE_SHIFT 16
-#define CHV_PADCTRL0_PMODE_MASK (0xf << CHV_PADCTRL0_PMODE_SHIFT)
+#define CHV_PADCTRL0_PMODE_MASK GENMASK(19, 16)
#define CHV_PADCTRL0_GPIOEN BIT(15)
#define CHV_PADCTRL0_GPIOCFG_SHIFT 8
-#define CHV_PADCTRL0_GPIOCFG_MASK (7 << CHV_PADCTRL0_GPIOCFG_SHIFT)
+#define CHV_PADCTRL0_GPIOCFG_MASK GENMASK(10, 8)
#define CHV_PADCTRL0_GPIOCFG_GPIO 0
#define CHV_PADCTRL0_GPIOCFG_GPO 1
#define CHV_PADCTRL0_GPIOCFG_GPI 2
@@ -57,58 +57,17 @@
#define CHV_PADCTRL1 0x004
#define CHV_PADCTRL1_CFGLOCK BIT(31)
#define CHV_PADCTRL1_INVRXTX_SHIFT 4
-#define CHV_PADCTRL1_INVRXTX_MASK (0xf << CHV_PADCTRL1_INVRXTX_SHIFT)
-#define CHV_PADCTRL1_INVRXTX_TXENABLE (2 << CHV_PADCTRL1_INVRXTX_SHIFT)
+#define CHV_PADCTRL1_INVRXTX_MASK GENMASK(7, 4)
+#define CHV_PADCTRL1_INVRXTX_RXDATA BIT(6)
+#define CHV_PADCTRL1_INVRXTX_TXENABLE BIT(5)
#define CHV_PADCTRL1_ODEN BIT(3)
-#define CHV_PADCTRL1_INVRXTX_RXDATA (4 << CHV_PADCTRL1_INVRXTX_SHIFT)
-#define CHV_PADCTRL1_INTWAKECFG_MASK 7
+#define CHV_PADCTRL1_INTWAKECFG_MASK GENMASK(2, 0)
#define CHV_PADCTRL1_INTWAKECFG_FALLING 1
#define CHV_PADCTRL1_INTWAKECFG_RISING 2
#define CHV_PADCTRL1_INTWAKECFG_BOTH 3
#define CHV_PADCTRL1_INTWAKECFG_LEVEL 4
/**
- * struct chv_alternate_function - A per group or per pin alternate function
- * @pin: Pin number (only used in per pin configs)
- * @mode: Mode the pin should be set in
- * @invert_oe: Invert OE for this pin
- */
-struct chv_alternate_function {
- unsigned int pin;
- u8 mode;
- bool invert_oe;
-};
-
-/**
- * struct chv_pincgroup - describes a CHV pin group
- * @name: Name of the group
- * @pins: An array of pins in this group
- * @npins: Number of pins in this group
- * @altfunc: Alternate function applied to all pins in this group
- * @overrides: Alternate function override per pin or %NULL if not used
- * @noverrides: Number of per pin alternate function overrides if
- * @overrides != NULL.
- */
-struct chv_pingroup {
- const char *name;
- const unsigned int *pins;
- size_t npins;
- struct chv_alternate_function altfunc;
- const struct chv_alternate_function *overrides;
- size_t noverrides;
-};
-
-/**
- * struct chv_gpio_pinrange - A range of pins that can be used as GPIOs
- * @base: Start pin number
- * @npins: Number of pins in this range
- */
-struct chv_gpio_pinrange {
- unsigned int base;
- unsigned int npins;
-};
-
-/**
* struct chv_community - A community specific configuration
* @uid: ACPI _UID used to match the community
* @pins: All pins in this community
@@ -117,8 +76,8 @@ struct chv_gpio_pinrange {
* @ngroups: Number of groups
* @functions: All functions in this community
* @nfunctions: Number of functions
- * @gpio_ranges: An array of GPIO ranges in this community
- * @ngpio_ranges: Number of GPIO ranges
+ * @gpps: Pad groups
+ * @ngpps: Number of pad groups in this community
* @nirqs: Total number of IRQs this community can generate
* @acpi_space_id: An address space ID for ACPI OpRegion handler
*/
@@ -126,12 +85,12 @@ struct chv_community {
const char *uid;
const struct pinctrl_pin_desc *pins;
size_t npins;
- const struct chv_pingroup *groups;
+ const struct intel_pingroup *groups;
size_t ngroups;
const struct intel_function *functions;
size_t nfunctions;
- const struct chv_gpio_pinrange *gpio_ranges;
- size_t ngpio_ranges;
+ const struct intel_padgroup *gpps;
+ size_t ngpps;
size_t nirqs;
acpi_adr_space_type acpi_space_id;
};
@@ -173,37 +132,14 @@ struct chv_pinctrl {
struct chv_pin_context *saved_pin_context;
};
-#define ALTERNATE_FUNCTION(p, m, i) \
- { \
- .pin = (p), \
- .mode = (m), \
- .invert_oe = (i), \
- }
-
-#define PIN_GROUP_WITH_ALT(n, p, m, i) \
- { \
- .name = (n), \
- .pins = (p), \
- .npins = ARRAY_SIZE((p)), \
- .altfunc.mode = (m), \
- .altfunc.invert_oe = (i), \
- }
+#define PINMODE_INVERT_OE BIT(15)
-#define PIN_GROUP_WITH_OVERRIDE(n, p, m, i, o) \
- { \
- .name = (n), \
- .pins = (p), \
- .npins = ARRAY_SIZE((p)), \
- .altfunc.mode = (m), \
- .altfunc.invert_oe = (i), \
- .overrides = (o), \
- .noverrides = ARRAY_SIZE((o)), \
- }
+#define PINMODE(m, i) ((m) | ((i) * PINMODE_INVERT_OE))
-#define GPIO_PINRANGE(start, end) \
+#define CHV_GPP(start, end) \
{ \
.base = (start), \
- .npins = (end) - (start) + 1, \
+ .size = (end) - (start) + 1, \
}
static const struct pinctrl_pin_desc southwest_pins[] = {
@@ -288,40 +224,37 @@ static const unsigned southwest_i2c6_pins[] = { 47, 51 };
static const unsigned southwest_i2c_nfc_pins[] = { 49, 52 };
static const unsigned southwest_spi3_pins[] = { 76, 79, 80, 81, 82 };
-/* LPE I2S TXD pins need to have invert_oe set */
-static const struct chv_alternate_function southwest_lpe_altfuncs[] = {
- ALTERNATE_FUNCTION(30, 1, true),
- ALTERNATE_FUNCTION(34, 1, true),
- ALTERNATE_FUNCTION(97, 1, true),
+/* Some of LPE I2S TXD pins need to have OE inversion set */
+static const unsigned int southwest_lpe_altfuncs[] = {
+ PINMODE(1, 1), PINMODE(1, 0), PINMODE(1, 0), PINMODE(1, 0), /* 30, 31, 32, 33 */
+ PINMODE(1, 1), PINMODE(1, 0), PINMODE(1, 0), PINMODE(1, 0), /* 34, 35, 36, 37 */
+ PINMODE(1, 0), PINMODE(1, 0), PINMODE(1, 0), PINMODE(1, 1), /* 92, 94, 96, 97 */
};
/*
* Two spi3 chipselects are available in different mode than the main spi3
- * functionality, which is using mode 1.
+ * functionality, which is using mode 2.
*/
-static const struct chv_alternate_function southwest_spi3_altfuncs[] = {
- ALTERNATE_FUNCTION(76, 3, false),
- ALTERNATE_FUNCTION(80, 3, false),
+static const unsigned int southwest_spi3_altfuncs[] = {
+ PINMODE(3, 0), PINMODE(2, 0), PINMODE(3, 0), PINMODE(2, 0), /* 76, 79, 80, 81 */
+ PINMODE(2, 0), /* 82 */
};
-static const struct chv_pingroup southwest_groups[] = {
- PIN_GROUP_WITH_ALT("uart0_grp", southwest_uart0_pins, 2, false),
- PIN_GROUP_WITH_ALT("uart1_grp", southwest_uart1_pins, 1, false),
- PIN_GROUP_WITH_ALT("uart2_grp", southwest_uart2_pins, 1, false),
- PIN_GROUP_WITH_ALT("hda_grp", southwest_hda_pins, 2, false),
- PIN_GROUP_WITH_ALT("i2c0_grp", southwest_i2c0_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c1_grp", southwest_i2c1_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c2_grp", southwest_i2c2_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c3_grp", southwest_i2c3_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c4_grp", southwest_i2c4_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c5_grp", southwest_i2c5_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c6_grp", southwest_i2c6_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c_nfc_grp", southwest_i2c_nfc_pins, 2, true),
-
- PIN_GROUP_WITH_OVERRIDE("lpe_grp", southwest_lpe_pins, 1, false,
- southwest_lpe_altfuncs),
- PIN_GROUP_WITH_OVERRIDE("spi3_grp", southwest_spi3_pins, 2, false,
- southwest_spi3_altfuncs),
+static const struct intel_pingroup southwest_groups[] = {
+ PIN_GROUP("uart0_grp", southwest_uart0_pins, PINMODE(2, 0)),
+ PIN_GROUP("uart1_grp", southwest_uart1_pins, PINMODE(1, 0)),
+ PIN_GROUP("uart2_grp", southwest_uart2_pins, PINMODE(1, 0)),
+ PIN_GROUP("hda_grp", southwest_hda_pins, PINMODE(2, 0)),
+ PIN_GROUP("i2c0_grp", southwest_i2c0_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c1_grp", southwest_i2c1_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c2_grp", southwest_i2c2_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c3_grp", southwest_i2c3_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c4_grp", southwest_i2c4_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c5_grp", southwest_i2c5_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c6_grp", southwest_i2c6_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c_nfc_grp", southwest_i2c_nfc_pins, PINMODE(2, 1)),
+ PIN_GROUP("lpe_grp", southwest_lpe_pins, southwest_lpe_altfuncs),
+ PIN_GROUP("spi3_grp", southwest_spi3_pins, southwest_spi3_altfuncs),
};
static const char * const southwest_uart0_groups[] = { "uart0_grp" };
@@ -360,14 +293,14 @@ static const struct intel_function southwest_functions[] = {
FUNCTION("spi3", southwest_spi3_groups),
};
-static const struct chv_gpio_pinrange southwest_gpio_ranges[] = {
- GPIO_PINRANGE(0, 7),
- GPIO_PINRANGE(15, 22),
- GPIO_PINRANGE(30, 37),
- GPIO_PINRANGE(45, 52),
- GPIO_PINRANGE(60, 67),
- GPIO_PINRANGE(75, 82),
- GPIO_PINRANGE(90, 97),
+static const struct intel_padgroup southwest_gpps[] = {
+ CHV_GPP(0, 7),
+ CHV_GPP(15, 22),
+ CHV_GPP(30, 37),
+ CHV_GPP(45, 52),
+ CHV_GPP(60, 67),
+ CHV_GPP(75, 82),
+ CHV_GPP(90, 97),
};
static const struct chv_community southwest_community = {
@@ -378,8 +311,8 @@ static const struct chv_community southwest_community = {
.ngroups = ARRAY_SIZE(southwest_groups),
.functions = southwest_functions,
.nfunctions = ARRAY_SIZE(southwest_functions),
- .gpio_ranges = southwest_gpio_ranges,
- .ngpio_ranges = ARRAY_SIZE(southwest_gpio_ranges),
+ .gpps = southwest_gpps,
+ .ngpps = ARRAY_SIZE(southwest_gpps),
/*
* Southwest community can generate GPIO interrupts only for the
* first 8 interrupts. The upper half (8-15) can only be used to
@@ -455,20 +388,20 @@ static const struct pinctrl_pin_desc north_pins[] = {
PINCTRL_PIN(72, "PANEL0_VDDEN"),
};
-static const struct chv_gpio_pinrange north_gpio_ranges[] = {
- GPIO_PINRANGE(0, 8),
- GPIO_PINRANGE(15, 27),
- GPIO_PINRANGE(30, 41),
- GPIO_PINRANGE(45, 56),
- GPIO_PINRANGE(60, 72),
+static const struct intel_padgroup north_gpps[] = {
+ CHV_GPP(0, 8),
+ CHV_GPP(15, 27),
+ CHV_GPP(30, 41),
+ CHV_GPP(45, 56),
+ CHV_GPP(60, 72),
};
static const struct chv_community north_community = {
.uid = "2",
.pins = north_pins,
.npins = ARRAY_SIZE(north_pins),
- .gpio_ranges = north_gpio_ranges,
- .ngpio_ranges = ARRAY_SIZE(north_gpio_ranges),
+ .gpps = north_gpps,
+ .ngpps = ARRAY_SIZE(north_gpps),
/*
* North community can generate GPIO interrupts only for the first
* 8 interrupts. The upper half (8-15) can only be used to trigger
@@ -506,17 +439,17 @@ static const struct pinctrl_pin_desc east_pins[] = {
PINCTRL_PIN(26, "MF_ISH_I2C1_SDA"),
};
-static const struct chv_gpio_pinrange east_gpio_ranges[] = {
- GPIO_PINRANGE(0, 11),
- GPIO_PINRANGE(15, 26),
+static const struct intel_padgroup east_gpps[] = {
+ CHV_GPP(0, 11),
+ CHV_GPP(15, 26),
};
static const struct chv_community east_community = {
.uid = "3",
.pins = east_pins,
.npins = ARRAY_SIZE(east_pins),
- .gpio_ranges = east_gpio_ranges,
- .ngpio_ranges = ARRAY_SIZE(east_gpio_ranges),
+ .gpps = east_gpps,
+ .ngpps = ARRAY_SIZE(east_gpps),
.nirqs = 16,
.acpi_space_id = 0x93,
};
@@ -596,14 +529,14 @@ static const unsigned southeast_sdmmc3_pins[] = {
static const unsigned southeast_spi1_pins[] = { 60, 61, 62, 64, 66 };
static const unsigned southeast_spi2_pins[] = { 2, 3, 4, 6, 7 };
-static const struct chv_pingroup southeast_groups[] = {
- PIN_GROUP_WITH_ALT("pwm0_grp", southeast_pwm0_pins, 1, false),
- PIN_GROUP_WITH_ALT("pwm1_grp", southeast_pwm1_pins, 1, false),
- PIN_GROUP_WITH_ALT("sdmmc1_grp", southeast_sdmmc1_pins, 1, false),
- PIN_GROUP_WITH_ALT("sdmmc2_grp", southeast_sdmmc2_pins, 1, false),
- PIN_GROUP_WITH_ALT("sdmmc3_grp", southeast_sdmmc3_pins, 1, false),
- PIN_GROUP_WITH_ALT("spi1_grp", southeast_spi1_pins, 1, false),
- PIN_GROUP_WITH_ALT("spi2_grp", southeast_spi2_pins, 4, false),
+static const struct intel_pingroup southeast_groups[] = {
+ PIN_GROUP("pwm0_grp", southeast_pwm0_pins, PINMODE(1, 0)),
+ PIN_GROUP("pwm1_grp", southeast_pwm1_pins, PINMODE(1, 0)),
+ PIN_GROUP("sdmmc1_grp", southeast_sdmmc1_pins, PINMODE(1, 0)),
+ PIN_GROUP("sdmmc2_grp", southeast_sdmmc2_pins, PINMODE(1, 0)),
+ PIN_GROUP("sdmmc3_grp", southeast_sdmmc3_pins, PINMODE(1, 0)),
+ PIN_GROUP("spi1_grp", southeast_spi1_pins, PINMODE(1, 0)),
+ PIN_GROUP("spi2_grp", southeast_spi2_pins, PINMODE(4, 0)),
};
static const char * const southeast_pwm0_groups[] = { "pwm0_grp" };
@@ -624,13 +557,13 @@ static const struct intel_function southeast_functions[] = {
FUNCTION("spi2", southeast_spi2_groups),
};
-static const struct chv_gpio_pinrange southeast_gpio_ranges[] = {
- GPIO_PINRANGE(0, 7),
- GPIO_PINRANGE(15, 26),
- GPIO_PINRANGE(30, 35),
- GPIO_PINRANGE(45, 52),
- GPIO_PINRANGE(60, 69),
- GPIO_PINRANGE(75, 85),
+static const struct intel_padgroup southeast_gpps[] = {
+ CHV_GPP(0, 7),
+ CHV_GPP(15, 26),
+ CHV_GPP(30, 35),
+ CHV_GPP(45, 52),
+ CHV_GPP(60, 69),
+ CHV_GPP(75, 85),
};
static const struct chv_community southeast_community = {
@@ -641,8 +574,8 @@ static const struct chv_community southeast_community = {
.ngroups = ARRAY_SIZE(southeast_groups),
.functions = southeast_functions,
.nfunctions = ARRAY_SIZE(southeast_functions),
- .gpio_ranges = southeast_gpio_ranges,
- .ngpio_ranges = ARRAY_SIZE(southeast_gpio_ranges),
+ .gpps = southeast_gpps,
+ .ngpps = ARRAY_SIZE(southeast_gpps),
.nirqs = 16,
.acpi_space_id = 0x94,
};
@@ -789,7 +722,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int function, unsigned int group)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- const struct chv_pingroup *grp;
+ const struct intel_pingroup *grp;
unsigned long flags;
int i;
@@ -808,22 +741,21 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
}
for (i = 0; i < grp->npins; i++) {
- const struct chv_alternate_function *altfunc = &grp->altfunc;
int pin = grp->pins[i];
void __iomem *reg;
+ unsigned int mode;
+ bool invert_oe;
u32 value;
/* Check if there is pin-specific config */
- if (grp->overrides) {
- int j;
-
- for (j = 0; j < grp->noverrides; j++) {
- if (grp->overrides[j].pin == pin) {
- altfunc = &grp->overrides[j];
- break;
- }
- }
- }
+ if (grp->modes)
+ mode = grp->modes[i];
+ else
+ mode = grp->mode;
+
+ /* Extract OE inversion */
+ invert_oe = mode & PINMODE_INVERT_OE;
+ mode &= ~PINMODE_INVERT_OE;
reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
value = readl(reg);
@@ -831,18 +763,18 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
value &= ~CHV_PADCTRL0_GPIOEN;
/* Set to desired mode */
value &= ~CHV_PADCTRL0_PMODE_MASK;
- value |= altfunc->mode << CHV_PADCTRL0_PMODE_SHIFT;
+ value |= mode << CHV_PADCTRL0_PMODE_SHIFT;
chv_writel(value, reg);
/* Update for invert_oe */
reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
value = readl(reg) & ~CHV_PADCTRL1_INVRXTX_MASK;
- if (altfunc->invert_oe)
+ if (invert_oe)
value |= CHV_PADCTRL1_INVRXTX_TXENABLE;
chv_writel(value, reg);
dev_dbg(pctrl->dev, "configured pin %u mode %u OE %sinverted\n",
- pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
+ pin, mode, invert_oe ? "" : "not ");
}
raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -1479,11 +1411,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long pending;
+ unsigned long flags;
u32 intr_line;
chained_irq_enter(chip, desc);
+ raw_spin_lock_irqsave(&chv_lock, flags);
pending = readl(pctrl->regs + CHV_INTSTAT);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
unsigned int irq, offset;
@@ -1590,14 +1526,14 @@ static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
{
struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
const struct chv_community *community = pctrl->community;
- const struct chv_gpio_pinrange *range;
+ const struct intel_padgroup *gpp;
int ret, i;
- for (i = 0; i < community->ngpio_ranges; i++) {
- range = &community->gpio_ranges[i];
+ for (i = 0; i < community->ngpps; i++) {
+ gpp = &community->gpps[i];
ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev),
- range->base, range->base,
- range->npins);
+ gpp->base, gpp->base,
+ gpp->size);
if (ret) {
dev_err(pctrl->dev, "failed to add GPIO pin range\n");
return ret;
@@ -1609,7 +1545,7 @@ static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
{
- const struct chv_gpio_pinrange *range;
+ const struct intel_padgroup *gpp;
struct gpio_chip *chip = &pctrl->chip;
bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
const struct chv_community *community = pctrl->community;
@@ -1657,12 +1593,12 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
}
if (!need_valid_mask) {
- for (i = 0; i < community->ngpio_ranges; i++) {
- range = &community->gpio_ranges[i];
+ for (i = 0; i < community->ngpps; i++) {
+ gpp = &community->gpps[i];
irq_domain_associate_many(chip->irq.domain, irq_base,
- range->base, range->npins);
- irq_base += range->npins;
+ gpp->base, gpp->size);
+ irq_base += gpp->size;
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-icelake.c b/drivers/pinctrl/intel/pinctrl-icelake.c
index 6489e9bbb61f..429b5a83acf0 100644
--- a/drivers/pinctrl/intel/pinctrl-icelake.c
+++ b/drivers/pinctrl/intel/pinctrl-icelake.c
@@ -29,8 +29,6 @@
.gpio_base = (g), \
}
-#define ICL_NO_GPIO -1
-
#define ICL_COMMUNITY(b, s, e, g) \
{ \
.barno = (b), \
@@ -305,29 +303,29 @@ static const struct pinctrl_pin_desc icllp_pins[] = {
};
static const struct intel_padgroup icllp_community0_gpps[] = {
- ICL_GPP(0, 0, 7, 0), /* GPP_G */
- ICL_GPP(1, 8, 33, 32), /* GPP_B */
- ICL_GPP(2, 34, 58, 64), /* GPP_A */
+ ICL_GPP(0, 0, 7, 0), /* GPP_G */
+ ICL_GPP(1, 8, 33, 32), /* GPP_B */
+ ICL_GPP(2, 34, 58, 64), /* GPP_A */
};
static const struct intel_padgroup icllp_community1_gpps[] = {
- ICL_GPP(0, 59, 82, 96), /* GPP_H */
- ICL_GPP(1, 83, 103, 128), /* GPP_D */
- ICL_GPP(2, 104, 123, 160), /* GPP_F */
- ICL_GPP(3, 124, 152, 192), /* vGPIO */
+ ICL_GPP(0, 59, 82, 96), /* GPP_H */
+ ICL_GPP(1, 83, 103, 128), /* GPP_D */
+ ICL_GPP(2, 104, 123, 160), /* GPP_F */
+ ICL_GPP(3, 124, 152, 192), /* vGPIO */
};
static const struct intel_padgroup icllp_community4_gpps[] = {
- ICL_GPP(0, 153, 176, 224), /* GPP_C */
- ICL_GPP(1, 177, 182, ICL_NO_GPIO), /* HVCMOS */
- ICL_GPP(2, 183, 206, 256), /* GPP_E */
- ICL_GPP(3, 207, 215, ICL_NO_GPIO), /* JTAG */
+ ICL_GPP(0, 153, 176, 224), /* GPP_C */
+ ICL_GPP(1, 177, 182, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
+ ICL_GPP(2, 183, 206, 256), /* GPP_E */
+ ICL_GPP(3, 207, 215, INTEL_GPIO_BASE_NOMAP), /* JTAG */
};
static const struct intel_padgroup icllp_community5_gpps[] = {
- ICL_GPP(0, 216, 223, 288), /* GPP_R */
- ICL_GPP(1, 224, 231, 320), /* GPP_S */
- ICL_GPP(2, 232, 240, ICL_NO_GPIO), /* SPI */
+ ICL_GPP(0, 216, 223, 288), /* GPP_R */
+ ICL_GPP(1, 224, 231, 320), /* GPP_S */
+ ICL_GPP(2, 232, 240, INTEL_GPIO_BASE_NOMAP), /* SPI */
};
static const struct intel_community icllp_communities[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 74fdfd2b9ff5..6a274e20d926 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -798,7 +798,7 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
for (j = 0; j < comm->ngpps; j++) {
const struct intel_padgroup *pgrp = &comm->gpps[j];
- if (pgrp->gpio_base < 0)
+ if (pgrp->gpio_base == INTEL_GPIO_BASE_NOMAP)
continue;
if (offset >= pgrp->gpio_base &&
@@ -1138,7 +1138,7 @@ static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
for (i = 0; i < community->ngpps; i++) {
const struct intel_padgroup *gpp = &community->gpps[i];
- if (gpp->gpio_base < 0)
+ if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
continue;
ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
@@ -1180,7 +1180,7 @@ static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
for (j = 0; j < community->ngpps; j++) {
const struct intel_padgroup *gpp = &community->gpps[j];
- if (gpp->gpio_base < 0)
+ if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
continue;
if (gpp->gpio_base + gpp->size > ngpio)
@@ -1276,8 +1276,18 @@ static int intel_pinctrl_add_padgroups(struct intel_pinctrl *pctrl,
if (gpps[i].size > 32)
return -EINVAL;
- if (!gpps[i].gpio_base)
- gpps[i].gpio_base = gpps[i].base;
+ /* Special treatment for GPIO base */
+ switch (gpps[i].gpio_base) {
+ case INTEL_GPIO_BASE_MATCH:
+ gpps[i].gpio_base = gpps[i].base;
+ break;
+ case INTEL_GPIO_BASE_ZERO:
+ gpps[i].gpio_base = 0;
+ break;
+ case INTEL_GPIO_BASE_NOMAP:
+ default:
+ break;
+ }
gpps[i].padown_num = padown_num;
@@ -1596,7 +1606,7 @@ static void intel_restore_hostown(struct intel_pinctrl *pctrl, unsigned int c,
struct device *dev = pctrl->dev;
u32 requested;
- if (padgrp->gpio_base < 0)
+ if (padgrp->gpio_base == INTEL_GPIO_BASE_NOMAP)
return;
requested = intel_gpio_is_requested(&pctrl->chip, padgrp->gpio_base, padgrp->size);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index c6f066f6d3fb..cc78c483518f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -53,8 +53,7 @@ struct intel_function {
* @reg_num: GPI_IS register number
* @base: Starting pin of this group
* @size: Size of this group (maximum is 32).
- * @gpio_base: Starting GPIO base of this group (%0 if matches with @base,
- * and %-1 if no GPIO mapping should be created)
+ * @gpio_base: Starting GPIO base of this group
* @padown_num: PAD_OWN register number (assigned by the core driver)
*
* If pad groups of a community are not the same size, use this structure
@@ -69,6 +68,19 @@ struct intel_padgroup {
};
/**
+ * enum - Special treatment for GPIO base in pad group
+ *
+ * @INTEL_GPIO_BASE_ZERO: force GPIO base to be 0
+ * @INTEL_GPIO_BASE_NOMAP: no GPIO mapping should be created
+ * @INTEL_GPIO_BASE_MATCH: matches with starting pin number
+ */
+enum {
+ INTEL_GPIO_BASE_ZERO = -2,
+ INTEL_GPIO_BASE_NOMAP = -1,
+ INTEL_GPIO_BASE_MATCH = 0,
+};
+
+/**
* struct intel_community - Intel pin community description
* @barno: MMIO BAR number where registers for this community reside
* @padown_offset: Register offset of PAD_OWN register from @regs. If %0
@@ -82,20 +94,20 @@ struct intel_padgroup {
* @ie_offset: Register offset of GPI_IE from @regs.
* @features: Additional features supported by the hardware
* @pin_base: Starting pin of pins in this community
+ * @npins: Number of pins in this community
* @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
- * HOSTSW_OWN, GPI_IS, GPI_IE, etc. Used when @gpps is %NULL.
+ * HOSTSW_OWN, GPI_IS, GPI_IE. Used when @gpps is %NULL.
* @gpp_num_padown_regs: Number of pad registers each pad group consumes at
* minimum. Use %0 if the number of registers can be
* determined by the size of the group.
- * @npins: Number of pins in this community
* @gpps: Pad groups if the controller has variable size pad groups
* @ngpps: Number of pad groups in this community
* @pad_map: Optional non-linear mapping of the pads
* @regs: Community specific common registers (reserved for core driver)
* @pad_regs: Community specific pad registers (reserved for core driver)
*
- * Most Intel GPIO host controllers this driver supports each pad group is
- * of equal size (except the last one). In that case the driver can just
+ * In some of Intel GPIO host controllers this driver supports each pad group
+ * is of equal size (except the last one). In that case the driver can just
* fill in @gpp_size field and let the core driver to handle the rest. If
* the controller has pad groups of variable size the client driver can
* pass custom @gpps and @ngpps instead.
@@ -109,12 +121,13 @@ struct intel_community {
unsigned int ie_offset;
unsigned int features;
unsigned int pin_base;
+ size_t npins;
unsigned int gpp_size;
unsigned int gpp_num_padown_regs;
- size_t npins;
const struct intel_padgroup *gpps;
size_t ngpps;
const unsigned int *pad_map;
+
/* Reserved for the core driver */
void __iomem *regs;
void __iomem *pad_regs;
diff --git a/drivers/pinctrl/intel/pinctrl-jasperlake.c b/drivers/pinctrl/intel/pinctrl-jasperlake.c
new file mode 100644
index 000000000000..9bd0e8e6310c
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-jasperlake.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Jasper Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define JSL_PAD_OWN 0x020
+#define JSL_PADCFGLOCK 0x080
+#define JSL_HOSTSW_OWN 0x0b0
+#define JSL_GPI_IS 0x100
+#define JSL_GPI_IE 0x120
+
+#define JSL_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define JSL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = JSL_PAD_OWN, \
+ .padcfglock_offset = JSL_PADCFGLOCK, \
+ .hostown_offset = JSL_HOSTSW_OWN, \
+ .is_offset = JSL_GPI_IS, \
+ .ie_offset = JSL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Jasper Lake */
+static const struct pinctrl_pin_desc jsl_pins[] = {
+ /* GPP_F */
+ PINCTRL_PIN(0, "CNV_BRI_DT_UART0_RTSB"),
+ PINCTRL_PIN(1, "CNV_BRI_RSP_UART0_RXD"),
+ PINCTRL_PIN(2, "EMMC_HIP_MON"),
+ PINCTRL_PIN(3, "CNV_RGI_RSP_UART0_CTSB"),
+ PINCTRL_PIN(4, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(5, "MODEM_CLKREQ"),
+ PINCTRL_PIN(6, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(7, "EMMC_CMD"),
+ PINCTRL_PIN(8, "EMMC_DATA0"),
+ PINCTRL_PIN(9, "EMMC_DATA1"),
+ PINCTRL_PIN(10, "EMMC_DATA2"),
+ PINCTRL_PIN(11, "EMMC_DATA3"),
+ PINCTRL_PIN(12, "EMMC_DATA4"),
+ PINCTRL_PIN(13, "EMMC_DATA5"),
+ PINCTRL_PIN(14, "EMMC_DATA6"),
+ PINCTRL_PIN(15, "EMMC_DATA7"),
+ PINCTRL_PIN(16, "EMMC_RCLK"),
+ PINCTRL_PIN(17, "EMMC_CLK"),
+ PINCTRL_PIN(18, "EMMC_RESETB"),
+ PINCTRL_PIN(19, "A4WP_PRESENT"),
+ /* GPP_B */
+ PINCTRL_PIN(20, "CORE_VID_0"),
+ PINCTRL_PIN(21, "CORE_VID_1"),
+ PINCTRL_PIN(22, "VRALERTB"),
+ PINCTRL_PIN(23, "CPU_GP_2"),
+ PINCTRL_PIN(24, "CPU_GP_3"),
+ PINCTRL_PIN(25, "SRCCLKREQB_0"),
+ PINCTRL_PIN(26, "SRCCLKREQB_1"),
+ PINCTRL_PIN(27, "SRCCLKREQB_2"),
+ PINCTRL_PIN(28, "SRCCLKREQB_3"),
+ PINCTRL_PIN(29, "SRCCLKREQB_4"),
+ PINCTRL_PIN(30, "SRCCLKREQB_5"),
+ PINCTRL_PIN(31, "PMCALERTB"),
+ PINCTRL_PIN(32, "SLP_S0B"),
+ PINCTRL_PIN(33, "PLTRSTB"),
+ PINCTRL_PIN(34, "SPKR"),
+ PINCTRL_PIN(35, "GSPI0_CS0B"),
+ PINCTRL_PIN(36, "GSPI0_CLK"),
+ PINCTRL_PIN(37, "GSPI0_MISO"),
+ PINCTRL_PIN(38, "GSPI0_MOSI"),
+ PINCTRL_PIN(39, "GSPI1_CS0B"),
+ PINCTRL_PIN(40, "GSPI1_CLK"),
+ PINCTRL_PIN(41, "GSPI1_MISO"),
+ PINCTRL_PIN(42, "GSPI1_MOSI"),
+ PINCTRL_PIN(43, "DDSP_HPD_A"),
+ PINCTRL_PIN(44, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(45, "GSPI1_CLK_LOOPBK"),
+ /* GPP_A */
+ PINCTRL_PIN(46, "ESPI_IO_0"),
+ PINCTRL_PIN(47, "ESPI_IO_1"),
+ PINCTRL_PIN(48, "ESPI_IO_2"),
+ PINCTRL_PIN(49, "ESPI_IO_3"),
+ PINCTRL_PIN(50, "ESPI_CSB"),
+ PINCTRL_PIN(51, "ESPI_CLK"),
+ PINCTRL_PIN(52, "ESPI_RESETB"),
+ PINCTRL_PIN(53, "SMBCLK"),
+ PINCTRL_PIN(54, "SMBDATA"),
+ PINCTRL_PIN(55, "SMBALERTB"),
+ PINCTRL_PIN(56, "CPU_GP_0"),
+ PINCTRL_PIN(57, "CPU_GP_1"),
+ PINCTRL_PIN(58, "USB2_OCB_1"),
+ PINCTRL_PIN(59, "USB2_OCB_2"),
+ PINCTRL_PIN(60, "USB2_OCB_3"),
+ PINCTRL_PIN(61, "DDSP_HPD_A_TIME_SYNC_0"),
+ PINCTRL_PIN(62, "DDSP_HPD_B"),
+ PINCTRL_PIN(63, "DDSP_HPD_C"),
+ PINCTRL_PIN(64, "USB2_OCB_0"),
+ PINCTRL_PIN(65, "PCHHOTB"),
+ PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+ /* GPP_S */
+ PINCTRL_PIN(67, "SNDW1_CLK"),
+ PINCTRL_PIN(68, "SNDW1_DATA"),
+ PINCTRL_PIN(69, "SNDW2_CLK"),
+ PINCTRL_PIN(70, "SNDW2_DATA"),
+ PINCTRL_PIN(71, "SNDW1_CLK"),
+ PINCTRL_PIN(72, "SNDW1_DATA"),
+ PINCTRL_PIN(73, "SNDW4_CLK_DMIC_CLK_0"),
+ PINCTRL_PIN(74, "SNDW4_DATA_DMIC_DATA_0"),
+ /* GPP_R */
+ PINCTRL_PIN(75, "HDA_BCLK"),
+ PINCTRL_PIN(76, "HDA_SYNC"),
+ PINCTRL_PIN(77, "HDA_SDO"),
+ PINCTRL_PIN(78, "HDA_SDI_0"),
+ PINCTRL_PIN(79, "HDA_RSTB"),
+ PINCTRL_PIN(80, "HDA_SDI_1"),
+ PINCTRL_PIN(81, "I2S1_SFRM"),
+ PINCTRL_PIN(82, "I2S1_TXD"),
+ /* GPP_H */
+ PINCTRL_PIN(83, "GPPC_H_0"),
+ PINCTRL_PIN(84, "SD_PWR_EN_B"),
+ PINCTRL_PIN(85, "MODEM_CLKREQ"),
+ PINCTRL_PIN(86, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(87, "I2C2_SDA"),
+ PINCTRL_PIN(88, "I2C2_SCL"),
+ PINCTRL_PIN(89, "I2C3_SDA"),
+ PINCTRL_PIN(90, "I2C3_SCL"),
+ PINCTRL_PIN(91, "I2C4_SDA"),
+ PINCTRL_PIN(92, "I2C4_SCL"),
+ PINCTRL_PIN(93, "CPU_VCCIO_PWR_GATEB"),
+ PINCTRL_PIN(94, "I2S2_SCLK"),
+ PINCTRL_PIN(95, "I2S2_SFRM"),
+ PINCTRL_PIN(96, "I2S2_TXD"),
+ PINCTRL_PIN(97, "I2S2_RXD"),
+ PINCTRL_PIN(98, "I2S1_SCLK"),
+ PINCTRL_PIN(99, "GPPC_H_16"),
+ PINCTRL_PIN(100, "GPPC_H_17"),
+ PINCTRL_PIN(101, "GPPC_H_18"),
+ PINCTRL_PIN(102, "GPPC_H_19"),
+ PINCTRL_PIN(103, "GPPC_H_20"),
+ PINCTRL_PIN(104, "GPPC_H_21"),
+ PINCTRL_PIN(105, "GPPC_H_22"),
+ PINCTRL_PIN(106, "GPPC_H_23"),
+ /* GPP_D */
+ PINCTRL_PIN(107, "SPI1_CSB"),
+ PINCTRL_PIN(108, "SPI1_CLK"),
+ PINCTRL_PIN(109, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(110, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(111, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(112, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(113, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(114, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(115, "ISH_SPI_CSB"),
+ PINCTRL_PIN(116, "ISH_SPI_CLK"),
+ PINCTRL_PIN(117, "ISH_SPI_MISO"),
+ PINCTRL_PIN(118, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(119, "ISH_UART0_RXD"),
+ PINCTRL_PIN(120, "ISH_UART0_TXD"),
+ PINCTRL_PIN(121, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(122, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(123, "SPI1_IO_2"),
+ PINCTRL_PIN(124, "SPI1_IO_3"),
+ PINCTRL_PIN(125, "I2S_MCLK"),
+ PINCTRL_PIN(126, "CNV_MFUART2_RXD"),
+ PINCTRL_PIN(127, "CNV_MFUART2_TXD"),
+ PINCTRL_PIN(128, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(129, "I2C5_SDA"),
+ PINCTRL_PIN(130, "I2C5_SCL"),
+ PINCTRL_PIN(131, "GSPI2_CLK_LOOPBK"),
+ PINCTRL_PIN(132, "SPI1_CLK_LOOPBK"),
+ /* vGPIO */
+ PINCTRL_PIN(133, "CNV_BTEN"),
+ PINCTRL_PIN(134, "CNV_WCEN"),
+ PINCTRL_PIN(135, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(136, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(137, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(138, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(139, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(140, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(141, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(142, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(143, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(144, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(145, "vUART0_TXD"),
+ PINCTRL_PIN(146, "vUART0_RXD"),
+ PINCTRL_PIN(147, "vUART0_CTS_B"),
+ PINCTRL_PIN(148, "vUART0_RTS_B"),
+ PINCTRL_PIN(149, "vISH_UART0_TXD"),
+ PINCTRL_PIN(150, "vISH_UART0_RXD"),
+ PINCTRL_PIN(151, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(152, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(153, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(154, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(155, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(156, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(157, "vI2S2_SCLK"),
+ PINCTRL_PIN(158, "vI2S2_SFRM"),
+ PINCTRL_PIN(159, "vI2S2_TXD"),
+ PINCTRL_PIN(160, "vI2S2_RXD"),
+ PINCTRL_PIN(161, "vSD3_CD_B"),
+ /* GPP_C */
+ PINCTRL_PIN(162, "GPPC_C_0"),
+ PINCTRL_PIN(163, "GPPC_C_1"),
+ PINCTRL_PIN(164, "GPPC_C_2"),
+ PINCTRL_PIN(165, "GPPC_C_3"),
+ PINCTRL_PIN(166, "GPPC_C_4"),
+ PINCTRL_PIN(167, "GPPC_C_5"),
+ PINCTRL_PIN(168, "SUSWARNB_SUSPWRDNACK"),
+ PINCTRL_PIN(169, "SUSACKB"),
+ PINCTRL_PIN(170, "UART0_RXD"),
+ PINCTRL_PIN(171, "UART0_TXD"),
+ PINCTRL_PIN(172, "UART0_RTSB"),
+ PINCTRL_PIN(173, "UART0_CTSB"),
+ PINCTRL_PIN(174, "UART1_RXD"),
+ PINCTRL_PIN(175, "UART1_TXD"),
+ PINCTRL_PIN(176, "UART1_RTSB"),
+ PINCTRL_PIN(177, "UART1_CTSB"),
+ PINCTRL_PIN(178, "I2C0_SDA"),
+ PINCTRL_PIN(179, "I2C0_SCL"),
+ PINCTRL_PIN(180, "I2C1_SDA"),
+ PINCTRL_PIN(181, "I2C1_SCL"),
+ PINCTRL_PIN(182, "UART2_RXD"),
+ PINCTRL_PIN(183, "UART2_TXD"),
+ PINCTRL_PIN(184, "UART2_RTSB"),
+ PINCTRL_PIN(185, "UART2_CTSB"),
+ /* HVCMOS */
+ PINCTRL_PIN(186, "L_BKLTEN"),
+ PINCTRL_PIN(187, "L_BKLTCTL"),
+ PINCTRL_PIN(188, "L_VDDEN"),
+ PINCTRL_PIN(189, "SYS_PWROK"),
+ PINCTRL_PIN(190, "SYS_RESETB"),
+ PINCTRL_PIN(191, "MLK_RSTB"),
+ /* GPP_E */
+ PINCTRL_PIN(192, "ISH_GP_0"),
+ PINCTRL_PIN(193, "ISH_GP_1"),
+ PINCTRL_PIN(194, "IMGCLKOUT_1"),
+ PINCTRL_PIN(195, "ISH_GP_2"),
+ PINCTRL_PIN(196, "IMGCLKOUT_2"),
+ PINCTRL_PIN(197, "SATA_LEDB"),
+ PINCTRL_PIN(198, "IMGCLKOUT_3"),
+ PINCTRL_PIN(199, "ISH_GP_3"),
+ PINCTRL_PIN(200, "ISH_GP_4"),
+ PINCTRL_PIN(201, "ISH_GP_5"),
+ PINCTRL_PIN(202, "ISH_GP_6"),
+ PINCTRL_PIN(203, "ISH_GP_7"),
+ PINCTRL_PIN(204, "IMGCLKOUT_4"),
+ PINCTRL_PIN(205, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(206, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(207, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(208, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(209, "DDPC_CTRLCLK"),
+ PINCTRL_PIN(210, "DDPC_CTRLDATA"),
+ PINCTRL_PIN(211, "IMGCLKOUT_5"),
+ PINCTRL_PIN(212, "CNV_BRI_DT"),
+ PINCTRL_PIN(213, "CNV_BRI_RSP"),
+ PINCTRL_PIN(214, "CNV_RGI_DT"),
+ PINCTRL_PIN(215, "CNV_RGI_RSP"),
+ /* GPP_G */
+ PINCTRL_PIN(216, "SD3_CMD"),
+ PINCTRL_PIN(217, "SD3_D0"),
+ PINCTRL_PIN(218, "SD3_D1"),
+ PINCTRL_PIN(219, "SD3_D2"),
+ PINCTRL_PIN(220, "SD3_D3"),
+ PINCTRL_PIN(221, "SD3_CDB"),
+ PINCTRL_PIN(222, "SD3_CLK"),
+ PINCTRL_PIN(223, "SD3_WP"),
+};
+
+static const struct intel_padgroup jsl_community0_gpps[] = {
+ JSL_GPP(0, 0, 19, 320), /* GPP_F */
+ JSL_GPP(1, 20, 45, 32), /* GPP_B */
+ JSL_GPP(2, 46, 66, 64), /* GPP_A */
+ JSL_GPP(3, 67, 74, 96), /* GPP_S */
+ JSL_GPP(4, 75, 82, 128), /* GPP_R */
+};
+
+static const struct intel_padgroup jsl_community1_gpps[] = {
+ JSL_GPP(0, 83, 106, 160), /* GPP_H */
+ JSL_GPP(1, 107, 132, 192), /* GPP_D */
+ JSL_GPP(2, 133, 161, 224), /* vGPIO */
+ JSL_GPP(3, 162, 185, 256), /* GPP_C */
+};
+
+static const struct intel_padgroup jsl_community4_gpps[] = {
+ JSL_GPP(0, 186, 191, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
+ JSL_GPP(1, 192, 215, 288), /* GPP_E */
+};
+
+static const struct intel_padgroup jsl_community5_gpps[] = {
+ JSL_GPP(0, 216, 223, INTEL_GPIO_BASE_ZERO), /* GPP_G */
+};
+
+static const struct intel_community jsl_communities[] = {
+ JSL_COMMUNITY(0, 0, 82, jsl_community0_gpps),
+ JSL_COMMUNITY(1, 83, 185, jsl_community1_gpps),
+ JSL_COMMUNITY(2, 186, 215, jsl_community4_gpps),
+ JSL_COMMUNITY(3, 216, 223, jsl_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data jsl_soc_data = {
+ .pins = jsl_pins,
+ .npins = ARRAY_SIZE(jsl_pins),
+ .communities = jsl_communities,
+ .ncommunities = ARRAY_SIZE(jsl_communities),
+};
+
+static const struct acpi_device_id jsl_pinctrl_acpi_match[] = {
+ { "INT34C8", (kernel_ulong_t)&jsl_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, jsl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(jsl_pinctrl_pm_ops);
+
+static struct platform_driver jsl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_hid,
+ .driver = {
+ .name = "jasperlake-pinctrl",
+ .acpi_match_table = jsl_pinctrl_acpi_match,
+ .pm = &jsl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(jsl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Jasper Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index e928742c7181..a45b8f2182fd 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -794,11 +794,11 @@ static int lp_gpio_probe(struct platform_device *pdev)
const struct intel_pinctrl_soc_data *soc;
struct intel_pinctrl *lg;
struct gpio_chip *gc;
- struct resource *io_rc, *irq_rc;
struct device *dev = &pdev->dev;
+ struct resource *io_rc;
void __iomem *regs;
unsigned int i;
- int ret;
+ int irq, ret;
soc = (const struct intel_pinctrl_soc_data *)device_get_match_data(dev);
if (!soc)
@@ -870,8 +870,8 @@ static int lp_gpio_probe(struct platform_device *pdev)
gc->parent = dev;
/* set up interrupts */
- irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq_rc && irq_rc->start) {
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq > 0) {
struct gpio_irq_chip *girq;
girq = &gc->irq;
@@ -884,7 +884,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
- girq->parents[0] = (unsigned int)irq_rc->start;
+ girq->parents[0] = irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
}
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 330c8f077b73..4d7a86a5a37b 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -15,17 +15,18 @@
#include "pinctrl-intel.h"
-#define SPT_PAD_OWN 0x020
-#define SPT_PADCFGLOCK 0x0a0
-#define SPT_HOSTSW_OWN 0x0d0
-#define SPT_GPI_IS 0x100
-#define SPT_GPI_IE 0x120
+#define SPT_PAD_OWN 0x020
+#define SPT_H_PADCFGLOCK 0x090
+#define SPT_LP_PADCFGLOCK 0x0a0
+#define SPT_HOSTSW_OWN 0x0d0
+#define SPT_GPI_IS 0x100
+#define SPT_GPI_IE 0x120
#define SPT_COMMUNITY(b, s, e) \
{ \
.barno = (b), \
.padown_offset = SPT_PAD_OWN, \
- .padcfglock_offset = SPT_PADCFGLOCK, \
+ .padcfglock_offset = SPT_LP_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
.is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
@@ -47,7 +48,7 @@
{ \
.barno = (b), \
.padown_offset = SPT_PAD_OWN, \
- .padcfglock_offset = SPT_PADCFGLOCK, \
+ .padcfglock_offset = SPT_H_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
.is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 08a86f6fdea6..bcfd7548e282 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -21,8 +21,6 @@
#define TGL_GPI_IS 0x100
#define TGL_GPI_IE 0x120
-#define TGL_NO_GPIO -1
-
#define TGL_GPP(r, s, e, g) \
{ \
.reg_num = (r), \
@@ -342,30 +340,30 @@ static const struct pinctrl_pin_desc tgllp_pins[] = {
};
static const struct intel_padgroup tgllp_community0_gpps[] = {
- TGL_GPP(0, 0, 25, 0), /* GPP_B */
- TGL_GPP(1, 26, 41, 32), /* GPP_T */
- TGL_GPP(2, 42, 66, 64), /* GPP_A */
+ TGL_GPP(0, 0, 25, 0), /* GPP_B */
+ TGL_GPP(1, 26, 41, 32), /* GPP_T */
+ TGL_GPP(2, 42, 66, 64), /* GPP_A */
};
static const struct intel_padgroup tgllp_community1_gpps[] = {
- TGL_GPP(0, 67, 74, 96), /* GPP_S */
- TGL_GPP(1, 75, 98, 128), /* GPP_H */
- TGL_GPP(2, 99, 119, 160), /* GPP_D */
- TGL_GPP(3, 120, 143, 192), /* GPP_U */
- TGL_GPP(4, 144, 170, 224), /* vGPIO */
+ TGL_GPP(0, 67, 74, 96), /* GPP_S */
+ TGL_GPP(1, 75, 98, 128), /* GPP_H */
+ TGL_GPP(2, 99, 119, 160), /* GPP_D */
+ TGL_GPP(3, 120, 143, 192), /* GPP_U */
+ TGL_GPP(4, 144, 170, 224), /* vGPIO */
};
static const struct intel_padgroup tgllp_community4_gpps[] = {
- TGL_GPP(0, 171, 194, 256), /* GPP_C */
- TGL_GPP(1, 195, 219, 288), /* GPP_F */
- TGL_GPP(2, 220, 225, TGL_NO_GPIO), /* HVCMOS */
- TGL_GPP(3, 226, 250, 320), /* GPP_E */
- TGL_GPP(4, 251, 259, TGL_NO_GPIO), /* JTAG */
+ TGL_GPP(0, 171, 194, 256), /* GPP_C */
+ TGL_GPP(1, 195, 219, 288), /* GPP_F */
+ TGL_GPP(2, 220, 225, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
+ TGL_GPP(3, 226, 250, 320), /* GPP_E */
+ TGL_GPP(4, 251, 259, INTEL_GPIO_BASE_NOMAP), /* JTAG */
};
static const struct intel_padgroup tgllp_community5_gpps[] = {
- TGL_GPP(0, 260, 267, 352), /* GPP_R */
- TGL_GPP(1, 268, 276, TGL_NO_GPIO), /* SPI */
+ TGL_GPP(0, 260, 267, 352), /* GPP_R */
+ TGL_GPP(1, 268, 276, INTEL_GPIO_BASE_NOMAP), /* SPI */
};
static const struct intel_community tgllp_communities[] = {
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 701f9af63f5e..f32d3644c509 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -3,10 +3,12 @@ menu "MediaTek pinctrl drivers"
depends on ARCH_MEDIATEK || COMPILE_TEST
config EINT_MTK
- bool "MediaTek External Interrupt Support"
+ tristate "MediaTek External Interrupt Support"
depends on PINCTRL_MTK || PINCTRL_MTK_MOORE || PINCTRL_MTK_PARIS || COMPILE_TEST
select GPIOLIB
select IRQ_DOMAIN
+ default y if PINCTRL_MTK || PINCTRL_MTK_MOORE
+ default PINCTRL_MTK_PARIS
config PINCTRL_MTK
bool
@@ -17,6 +19,9 @@ config PINCTRL_MTK
select EINT_MTK
select OF_GPIO
+config PINCTRL_MTK_V2
+ tristate
+
config PINCTRL_MTK_MOORE
bool
depends on OF
@@ -25,15 +30,17 @@ config PINCTRL_MTK_MOORE
select GENERIC_PINMUX_FUNCTIONS
select GPIOLIB
select OF_GPIO
+ select PINCTRL_MTK_V2
config PINCTRL_MTK_PARIS
- bool
+ tristate
depends on OF
select PINMUX
select GENERIC_PINCONF
select GPIOLIB
select EINT_MTK
select OF_GPIO
+ select PINCTRL_MTK_V2
# For ARMv7 SoCs
config PINCTRL_MT2701
@@ -80,7 +87,7 @@ config PINCTRL_MT2712
select PINCTRL_MTK
config PINCTRL_MT6765
- bool "Mediatek MT6765 pin control"
+ tristate "Mediatek MT6765 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index a74325abd877..4b7132876e71 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -2,8 +2,9 @@
# Core
obj-$(CONFIG_EINT_MTK) += mtk-eint.o
obj-$(CONFIG_PINCTRL_MTK) += pinctrl-mtk-common.o
-obj-$(CONFIG_PINCTRL_MTK_MOORE) += pinctrl-moore.o pinctrl-mtk-common-v2.o
-obj-$(CONFIG_PINCTRL_MTK_PARIS) += pinctrl-paris.o pinctrl-mtk-common-v2.o
+obj-$(CONFIG_PINCTRL_MTK_V2) += pinctrl-mtk-common-v2.o
+obj-$(CONFIG_PINCTRL_MTK_MOORE) += pinctrl-moore.o
+obj-$(CONFIG_PINCTRL_MTK_PARIS) += pinctrl-paris.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_MT2701) += pinctrl-mt2701.o
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index 7e526bcf5e0b..22736f60c16c 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
@@ -379,6 +380,7 @@ int mtk_eint_do_suspend(struct mtk_eint *eint)
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_eint_do_suspend);
int mtk_eint_do_resume(struct mtk_eint *eint)
{
@@ -386,6 +388,7 @@ int mtk_eint_do_resume(struct mtk_eint *eint)
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_eint_do_resume);
int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
unsigned int debounce)
@@ -440,6 +443,7 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_eint_set_debounce);
int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
{
@@ -451,6 +455,7 @@ int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
return irq;
}
+EXPORT_SYMBOL_GPL(mtk_eint_find_irq);
int mtk_eint_do_init(struct mtk_eint *eint)
{
@@ -495,3 +500,7 @@ int mtk_eint_do_init(struct mtk_eint *eint)
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_eint_do_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek EINT Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6765.c b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
index 905dae8c3fd8..2c59d3936256 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6765.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/module.h>
#include "pinctrl-mtk-mt6765.h"
#include "pinctrl-paris.h"
@@ -1103,3 +1104,6 @@ static int __init mt6765_pinctrl_init(void)
return platform_driver_register(&mt6765_pinctrl_driver);
}
arch_initcall(mt6765_pinctrl_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek MT6765 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index d3169a87e1b3..b77b18fe5adc 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -12,6 +12,7 @@
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_irq.h>
#include "mtk-eint.h"
@@ -204,6 +205,7 @@ int mtk_hw_set_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_hw_set_value);
int mtk_hw_get_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
int field, int *value)
@@ -223,6 +225,7 @@ int mtk_hw_get_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_hw_get_value);
static int mtk_xt_find_eint_num(struct mtk_pinctrl *hw, unsigned long eint_n)
{
@@ -361,6 +364,7 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
return mtk_eint_do_init(hw->eint);
}
+EXPORT_SYMBOL_GPL(mtk_build_eint);
/* Revision 0 */
int mtk_pinconf_bias_disable_set(struct mtk_pinctrl *hw,
@@ -380,6 +384,7 @@ int mtk_pinconf_bias_disable_set(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_set);
int mtk_pinconf_bias_disable_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *res)
@@ -402,6 +407,7 @@ int mtk_pinconf_bias_disable_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_get);
int mtk_pinconf_bias_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup)
@@ -421,6 +427,7 @@ int mtk_pinconf_bias_set(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_set);
int mtk_pinconf_bias_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup, int *res)
@@ -440,6 +447,7 @@ int mtk_pinconf_bias_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get);
/* Revision 1 */
int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw,
@@ -454,6 +462,7 @@ int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_set_rev1);
int mtk_pinconf_bias_disable_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *res)
@@ -471,6 +480,7 @@ int mtk_pinconf_bias_disable_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_get_rev1);
int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup)
@@ -490,6 +500,7 @@ int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_set_rev1);
int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
@@ -515,6 +526,7 @@ int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get_rev1);
/* Combo for the following pull register type:
* 1. PU + PD
@@ -715,6 +727,7 @@ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
out:
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_set_combo);
int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
@@ -735,6 +748,7 @@ int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
out:
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get_combo);
/* Revision 0 */
int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
@@ -764,6 +778,7 @@ int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_set);
int mtk_pinconf_drive_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *val)
@@ -788,6 +803,7 @@ int mtk_pinconf_drive_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_get);
/* Revision 1 */
int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
@@ -809,6 +825,7 @@ int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_set_rev1);
int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *val)
@@ -826,18 +843,21 @@ int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_get_rev1);
int mtk_pinconf_drive_set_raw(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg)
{
return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV, arg);
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_set_raw);
int mtk_pinconf_drive_get_raw(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *val)
{
return mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV, val);
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_get_raw);
int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
@@ -878,6 +898,7 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_pull_set);
int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
@@ -920,6 +941,7 @@ int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_pull_get);
int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg)
@@ -946,6 +968,7 @@ int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_set);
int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 *val)
@@ -969,3 +992,8 @@ int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_get);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("Pin configuration library module for mediatek SoCs");
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 3853ec3a2a8e..90a432bf9fed 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -10,6 +10,7 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/module.h>
#include <dt-bindings/pinctrl/mt65xx.h>
#include "pinctrl-paris.h"
@@ -164,8 +165,6 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
case MTK_PIN_CONFIG_PU_ADV:
case MTK_PIN_CONFIG_PD_ADV:
if (hw->soc->adv_pull_get) {
- bool pullup;
-
pullup = param == MTK_PIN_CONFIG_PU_ADV;
err = hw->soc->adv_pull_get(hw, desc, pullup, &ret);
} else
@@ -633,6 +632,7 @@ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
return len;
}
+EXPORT_SYMBOL_GPL(mtk_pctrl_show_one_pin);
#define PIN_DBG_BUF_SZ 96
static void mtk_pctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
@@ -1021,6 +1021,7 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_paris_pinctrl_probe);
static int mtk_paris_pinctrl_suspend(struct device *device)
{
@@ -1040,3 +1041,6 @@ const struct dev_pm_ops mtk_paris_pinctrl_pm_ops = {
.suspend_noirq = mtk_paris_pinctrl_suspend,
.resume_noirq = mtk_paris_pinctrl_resume,
};
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek Pinctrl Common Driver V2 Paris");
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index bbc919bef2bf..079f8ee8d353 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -549,6 +549,18 @@ static const struct pinconf_ops meson_pinconf_ops = {
.is_generic = true,
};
+static int meson_gpio_get_direction(struct gpio_chip *chip, unsigned gpio)
+{
+ struct meson_pinctrl *pc = gpiochip_get_data(chip);
+ int ret;
+
+ ret = meson_pinconf_get_output(pc, gpio);
+ if (ret < 0)
+ return ret;
+
+ return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
static int meson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
return meson_pinconf_set_output(gpiochip_get_data(chip), gpio, false);
@@ -591,6 +603,8 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.parent = pc->dev;
pc->chip.request = gpiochip_generic_request;
pc->chip.free = gpiochip_generic_free;
+ pc->chip.set_config = gpiochip_generic_config;
+ pc->chip.get_direction = meson_gpio_get_direction;
pc->chip.direction_input = meson_gpio_direction_input;
pc->chip.direction_output = meson_gpio_direction_output;
pc->chip.get = meson_gpio_get;
diff --git a/drivers/pinctrl/nomadik/pinctrl-ab8505.c b/drivers/pinctrl/nomadik/pinctrl-ab8505.c
index 5e6e7d28390a..b93af1fb37f0 100644
--- a/drivers/pinctrl/nomadik/pinctrl-ab8505.c
+++ b/drivers/pinctrl/nomadik/pinctrl-ab8505.c
@@ -178,6 +178,7 @@ static const struct abx500_pingroup ab8505_groups[] = {
AB8505_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(uartrxdata_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio50_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(pdmdata_b_1, ABX500_ALT_B),
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index b9246e0b4fe2..acad3887cc74 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -691,18 +691,21 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(lcd_d8_d11_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(lcd_d12_d23_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(kpskaskb_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc2_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ssp1_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ssp0_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ipgpio0_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ipgpio1_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(modem_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(kp_a_2, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp2sck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp2_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc4_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc1_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc1_a_2, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(mc1dir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(hsir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(hsit_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(hsit_a_2, NMK_GPIO_ALT_A),
@@ -760,7 +763,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(u0_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(ipgpio4_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(ipgpio5_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio6_c_1, NMK_GPIO_ALT_C),
+ DB8500_PIN_GROUP(ipgpio6_c_2, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(ipgpio7_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(smcleale_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(stmape_c_1, NMK_GPIO_ALT_C),
@@ -955,6 +958,7 @@ static const struct nmk_function nmk_db8500_functions[] = {
FUNCTION(spi0),
FUNCTION(spi2),
FUNCTION(remap),
+ FUNCTION(sbag),
FUNCTION(ptm),
FUNCTION(rf),
FUNCTION(hx),
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index ca7bbe4164c0..ba25c4654391 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1343,8 +1343,6 @@ static const struct nmk_cfg_param nmk_cfg_params[] = {
static int nmk_dt_pin_config(int index, int val, unsigned long *config)
{
- int ret = 0;
-
if (nmk_cfg_params[index].choice == NULL)
*config = nmk_cfg_params[index].config;
else {
@@ -1354,7 +1352,7 @@ static int nmk_dt_pin_config(int index, int val, unsigned long *config)
nmk_cfg_params[index].choice[val];
}
}
- return ret;
+ return 0;
}
static const char *nmk_find_pin_name(struct pinctrl_dev *pctldev, const char *pin_name)
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 694912409fd9..54222ccddfb1 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -1019,7 +1019,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pioctrl->reg_base))
- return -EINVAL;
+ return PTR_ERR(atmel_pioctrl->reg_base);
atmel_pioctrl->clk = devm_clk_get(dev, NULL);
if (IS_ERR(atmel_pioctrl->clk)) {
diff --git a/drivers/pinctrl/pinctrl-bm1880.c b/drivers/pinctrl/pinctrl-bm1880.c
index f7dff4f14101..d1a7d9836787 100644
--- a/drivers/pinctrl/pinctrl-bm1880.c
+++ b/drivers/pinctrl/pinctrl-bm1880.c
@@ -408,6 +408,7 @@ static const struct bm1880_pctrl_group bm1880_pctrl_groups[] = {
BM1880_PINCTRL_GRP(pwm34),
BM1880_PINCTRL_GRP(pwm35),
BM1880_PINCTRL_GRP(pwm36),
+ BM1880_PINCTRL_GRP(pwm37),
BM1880_PINCTRL_GRP(i2c0),
BM1880_PINCTRL_GRP(i2c1),
BM1880_PINCTRL_GRP(i2c2),
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index e5dcf77fe43d..6a8d44504f94 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -1977,6 +1977,25 @@ static const struct pinctrl_ops ingenic_pctlops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
+static int ingenic_gpio_irq_request(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = ingenic_gpio_direction_input(gpio_chip, data->hwirq);
+ if (ret)
+ return ret;
+
+ return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+}
+
+static void ingenic_gpio_irq_release(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+
+ return gpiochip_relres_irq(gpio_chip, data->hwirq);
+}
+
static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
int pin, int func)
{
@@ -2338,6 +2357,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->irq_chip.irq_ack = ingenic_gpio_irq_ack;
jzgc->irq_chip.irq_set_type = ingenic_gpio_irq_set_type;
jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
+ jzgc->irq_chip.irq_request_resources = ingenic_gpio_irq_request;
+ jzgc->irq_chip.irq_release_resources = ingenic_gpio_irq_release;
jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
girq = &jzgc->gc.irq;
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index aa92f141b865..626e02d7a1ba 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -221,7 +221,7 @@ static int match_mux(const struct ltq_mfp_pin *mfp, unsigned mux)
return i;
}
-/* dont assume .mfp is linearly mapped. find the mfp with the correct .pin */
+/* don't assume .mfp is linearly mapped. find the mfp with the correct .pin */
static int match_mfp(const struct ltq_pinmux_info *info, int pin)
{
int i;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 3a235487e38d..151931b593f6 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -1,34 +1,23 @@
// SPDX-License-Identifier: GPL-2.0-only
/* MCP23S08 SPI/I2C GPIO driver */
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/export.h>
#include <linux/gpio/driver.h>
-#include <linux/i2c.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/mcp23s08.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
-/*
- * MCP types supported by driver
- */
-#define MCP_TYPE_S08 0
-#define MCP_TYPE_S17 1
-#define MCP_TYPE_008 2
-#define MCP_TYPE_017 3
-#define MCP_TYPE_S18 4
-#define MCP_TYPE_018 5
-
-#define MCP_MAX_DEV_PER_CS 8
+#include "pinctrl-mcp23s08.h"
/* Registers are all 8 bits wide.
*
@@ -53,31 +42,6 @@
#define MCP_GPIO 0x09
#define MCP_OLAT 0x0a
-struct mcp23s08;
-
-struct mcp23s08 {
- u8 addr;
- bool irq_active_high;
- bool reg_shift;
-
- u16 irq_rise;
- u16 irq_fall;
- int irq;
- bool irq_controller;
- int cached_gpio;
- /* lock protects regmap access with bypass/cache flags */
- struct mutex lock;
-
- struct gpio_chip chip;
- struct irq_chip irq_chip;
-
- struct regmap *regmap;
- struct device *dev;
-
- struct pinctrl_dev *pctldev;
- struct pinctrl_desc pinctrl_desc;
-};
-
static const struct reg_default mcp23x08_defaults[] = {
{.reg = MCP_IODIR, .def = 0xff},
{.reg = MCP_IPOL, .def = 0x00},
@@ -109,7 +73,7 @@ static const struct regmap_access_table mcp23x08_precious_table = {
.n_yes_ranges = 1,
};
-static const struct regmap_config mcp23x08_regmap = {
+const struct regmap_config mcp23x08_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -121,6 +85,7 @@ static const struct regmap_config mcp23x08_regmap = {
.cache_type = REGCACHE_FLAT,
.max_register = MCP_OLAT,
};
+EXPORT_SYMBOL_GPL(mcp23x08_regmap);
static const struct reg_default mcp23x16_defaults[] = {
{.reg = MCP_IODIR << 1, .def = 0xffff},
@@ -153,7 +118,7 @@ static const struct regmap_access_table mcp23x16_precious_table = {
.n_yes_ranges = 1,
};
-static const struct regmap_config mcp23x17_regmap = {
+const struct regmap_config mcp23x17_regmap = {
.reg_bits = 8,
.val_bits = 16,
@@ -166,6 +131,7 @@ static const struct regmap_config mcp23x17_regmap = {
.cache_type = REGCACHE_FLAT,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+EXPORT_SYMBOL_GPL(mcp23x17_regmap);
static int mcp_read(struct mcp23s08 *mcp, unsigned int reg, unsigned int *val)
{
@@ -309,80 +275,6 @@ static const struct pinconf_ops mcp_pinconf_ops = {
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_SPI_MASTER
-
-static int mcp23sxx_spi_write(void *context, const void *data, size_t count)
-{
- struct mcp23s08 *mcp = context;
- struct spi_device *spi = to_spi_device(mcp->dev);
- struct spi_message m;
- struct spi_transfer t[2] = { { .tx_buf = &mcp->addr, .len = 1, },
- { .tx_buf = data, .len = count, }, };
-
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
- return spi_sync(spi, &m);
-}
-
-static int mcp23sxx_spi_gather_write(void *context,
- const void *reg, size_t reg_size,
- const void *val, size_t val_size)
-{
- struct mcp23s08 *mcp = context;
- struct spi_device *spi = to_spi_device(mcp->dev);
- struct spi_message m;
- struct spi_transfer t[3] = { { .tx_buf = &mcp->addr, .len = 1, },
- { .tx_buf = reg, .len = reg_size, },
- { .tx_buf = val, .len = val_size, }, };
-
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
- spi_message_add_tail(&t[2], &m);
-
- return spi_sync(spi, &m);
-}
-
-static int mcp23sxx_spi_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct mcp23s08 *mcp = context;
- struct spi_device *spi = to_spi_device(mcp->dev);
- u8 tx[2];
-
- if (reg_size != 1)
- return -EINVAL;
-
- tx[0] = mcp->addr | 0x01;
- tx[1] = *((u8 *) reg);
-
- return spi_write_then_read(spi, tx, sizeof(tx), val, val_size);
-}
-
-static const struct regmap_bus mcp23sxx_spi_regmap = {
- .write = mcp23sxx_spi_write,
- .gather_write = mcp23sxx_spi_gather_write,
- .read = mcp23sxx_spi_read,
-};
-
-#endif /* CONFIG_SPI_MASTER */
-
-/*----------------------------------------------------------------------*/
-
-/* A given spi_device can represent up to eight mcp23sxx chips
- * sharing the same chipselect but using different addresses
- * (e.g. chips #0 and #3 might be populated, but not #1 or $2).
- * Driver data holds all the per-chip data.
- */
-struct mcp23s08_driver_data {
- unsigned ngpio;
- struct mcp23s08 *mcp[8];
- struct mcp23s08 chip[];
-};
-
-
static int mcp23s08_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct mcp23s08 *mcp = gpiochip_get_data(chip);
@@ -562,7 +454,6 @@ static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct mcp23s08 *mcp = gpiochip_get_data(gc);
unsigned int pos = data->hwirq;
- int status = 0;
if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
mcp_set_bit(mcp, MCP_INTCON, pos, false);
@@ -585,7 +476,7 @@ static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
} else
return -EINVAL;
- return status;
+ return 0;
}
static void mcp23s08_irq_bus_lock(struct irq_data *data)
@@ -656,21 +547,25 @@ static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
/*----------------------------------------------------------------------*/
-static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
- void *data, unsigned addr, unsigned type,
- unsigned int base, int cs)
+int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ unsigned int addr, unsigned int type, unsigned int base)
{
int status, ret;
bool mirror = false;
bool open_drain = false;
- struct regmap_config *one_regmap_config = NULL;
- int raw_chip_address = (addr & ~0x40) >> 1;
mutex_init(&mcp->lock);
mcp->dev = dev;
mcp->addr = addr;
+
mcp->irq_active_high = false;
+ mcp->irq_chip.name = dev_name(dev);
+ mcp->irq_chip.irq_mask = mcp23s08_irq_mask;
+ mcp->irq_chip.irq_unmask = mcp23s08_irq_unmask;
+ mcp->irq_chip.irq_set_type = mcp23s08_irq_set_type;
+ mcp->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock;
+ mcp->irq_chip.irq_bus_sync_unlock = mcp23s08_irq_bus_unlock;
mcp->chip.direction_input = mcp23s08_direction_input;
mcp->chip.get = mcp23s08_get;
@@ -681,83 +576,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.of_node = dev->of_node;
#endif
- switch (type) {
-#ifdef CONFIG_SPI_MASTER
- case MCP_TYPE_S08:
- case MCP_TYPE_S17:
- switch (type) {
- case MCP_TYPE_S08:
- one_regmap_config =
- devm_kmemdup(dev, &mcp23x08_regmap,
- sizeof(struct regmap_config), GFP_KERNEL);
- mcp->reg_shift = 0;
- mcp->chip.ngpio = 8;
- mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL,
- "mcp23s08.%d", raw_chip_address);
- break;
- case MCP_TYPE_S17:
- one_regmap_config =
- devm_kmemdup(dev, &mcp23x17_regmap,
- sizeof(struct regmap_config), GFP_KERNEL);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL,
- "mcp23s17.%d", raw_chip_address);
- break;
- }
- if (!one_regmap_config)
- return -ENOMEM;
-
- one_regmap_config->name = devm_kasprintf(dev, GFP_KERNEL, "%d", raw_chip_address);
- mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
- one_regmap_config);
- break;
-
- case MCP_TYPE_S18:
- one_regmap_config =
- devm_kmemdup(dev, &mcp23x17_regmap,
- sizeof(struct regmap_config), GFP_KERNEL);
- if (!one_regmap_config)
- return -ENOMEM;
- mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
- one_regmap_config);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = "mcp23s18";
- break;
-#endif /* CONFIG_SPI_MASTER */
-
-#if IS_ENABLED(CONFIG_I2C)
- case MCP_TYPE_008:
- mcp->regmap = devm_regmap_init_i2c(data, &mcp23x08_regmap);
- mcp->reg_shift = 0;
- mcp->chip.ngpio = 8;
- mcp->chip.label = "mcp23008";
- break;
-
- case MCP_TYPE_017:
- mcp->regmap = devm_regmap_init_i2c(data, &mcp23x17_regmap);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = "mcp23017";
- break;
-
- case MCP_TYPE_018:
- mcp->regmap = devm_regmap_init_i2c(data, &mcp23x17_regmap);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = "mcp23018";
- break;
-#endif /* CONFIG_I2C */
-
- default:
- dev_err(dev, "invalid device type (%d)\n", type);
- return -EINVAL;
- }
-
- if (IS_ERR(mcp->regmap))
- return PTR_ERR(mcp->regmap);
-
mcp->chip.base = base;
mcp->chip.can_sleep = true;
mcp->chip.parent = dev;
@@ -816,14 +634,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
- if (one_regmap_config) {
- mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
- "mcp23xxx-pinctrl.%d", raw_chip_address);
- if (!mcp->pinctrl_desc.name)
- return -ENOMEM;
- } else {
- mcp->pinctrl_desc.name = "mcp23xxx-pinctrl";
- }
mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops;
mcp->pinctrl_desc.confops = &mcp_pinconf_ops;
mcp->pinctrl_desc.npins = mcp->chip.ngpio;
@@ -847,291 +657,5 @@ fail:
dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
return ret;
}
-
-/*----------------------------------------------------------------------*/
-
-#ifdef CONFIG_OF
-#ifdef CONFIG_SPI_MASTER
-static const struct of_device_id mcp23s08_spi_of_match[] = {
- {
- .compatible = "microchip,mcp23s08",
- .data = (void *) MCP_TYPE_S08,
- },
- {
- .compatible = "microchip,mcp23s17",
- .data = (void *) MCP_TYPE_S17,
- },
- {
- .compatible = "microchip,mcp23s18",
- .data = (void *) MCP_TYPE_S18,
- },
-/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
- {
- .compatible = "mcp,mcp23s08",
- .data = (void *) MCP_TYPE_S08,
- },
- {
- .compatible = "mcp,mcp23s17",
- .data = (void *) MCP_TYPE_S17,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match);
-#endif
-
-#if IS_ENABLED(CONFIG_I2C)
-static const struct of_device_id mcp23s08_i2c_of_match[] = {
- {
- .compatible = "microchip,mcp23008",
- .data = (void *) MCP_TYPE_008,
- },
- {
- .compatible = "microchip,mcp23017",
- .data = (void *) MCP_TYPE_017,
- },
- {
- .compatible = "microchip,mcp23018",
- .data = (void *) MCP_TYPE_018,
- },
-/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
- {
- .compatible = "mcp,mcp23008",
- .data = (void *) MCP_TYPE_008,
- },
- {
- .compatible = "mcp,mcp23017",
- .data = (void *) MCP_TYPE_017,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, mcp23s08_i2c_of_match);
-#endif
-#endif /* CONFIG_OF */
-
-
-#if IS_ENABLED(CONFIG_I2C)
-
-static int mcp230xx_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct mcp23s08_platform_data *pdata, local_pdata;
- struct mcp23s08 *mcp;
- int status;
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- pdata = &local_pdata;
- pdata->base = -1;
- }
-
- mcp = devm_kzalloc(&client->dev, sizeof(*mcp), GFP_KERNEL);
- if (!mcp)
- return -ENOMEM;
-
- mcp->irq = client->irq;
- mcp->irq_chip.name = dev_name(&client->dev);
- mcp->irq_chip.irq_mask = mcp23s08_irq_mask;
- mcp->irq_chip.irq_unmask = mcp23s08_irq_unmask;
- mcp->irq_chip.irq_set_type = mcp23s08_irq_set_type;
- mcp->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock;
- mcp->irq_chip.irq_bus_sync_unlock = mcp23s08_irq_bus_unlock;
-
- status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr,
- id->driver_data, pdata->base, 0);
- if (status)
- return status;
-
- i2c_set_clientdata(client, mcp);
-
- return 0;
-}
-
-static const struct i2c_device_id mcp230xx_id[] = {
- { "mcp23008", MCP_TYPE_008 },
- { "mcp23017", MCP_TYPE_017 },
- { "mcp23018", MCP_TYPE_018 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
-
-static struct i2c_driver mcp230xx_driver = {
- .driver = {
- .name = "mcp230xx",
- .of_match_table = of_match_ptr(mcp23s08_i2c_of_match),
- },
- .probe = mcp230xx_probe,
- .id_table = mcp230xx_id,
-};
-
-static int __init mcp23s08_i2c_init(void)
-{
- return i2c_add_driver(&mcp230xx_driver);
-}
-
-static void mcp23s08_i2c_exit(void)
-{
- i2c_del_driver(&mcp230xx_driver);
-}
-
-#else
-
-static int __init mcp23s08_i2c_init(void) { return 0; }
-static void mcp23s08_i2c_exit(void) { }
-
-#endif /* CONFIG_I2C */
-
-/*----------------------------------------------------------------------*/
-
-#ifdef CONFIG_SPI_MASTER
-
-static int mcp23s08_probe(struct spi_device *spi)
-{
- struct mcp23s08_platform_data *pdata, local_pdata;
- unsigned addr;
- int chips = 0;
- struct mcp23s08_driver_data *data;
- int status, type;
- unsigned ngpio = 0;
- const struct of_device_id *match;
-
- match = of_match_device(of_match_ptr(mcp23s08_spi_of_match), &spi->dev);
- if (match)
- type = (int)(uintptr_t)match->data;
- else
- type = spi_get_device_id(spi)->driver_data;
-
- pdata = dev_get_platdata(&spi->dev);
- if (!pdata) {
- pdata = &local_pdata;
- pdata->base = -1;
-
- status = device_property_read_u32(&spi->dev,
- "microchip,spi-present-mask", &pdata->spi_present_mask);
- if (status) {
- status = device_property_read_u32(&spi->dev,
- "mcp,spi-present-mask",
- &pdata->spi_present_mask);
-
- if (status) {
- dev_err(&spi->dev, "missing spi-present-mask");
- return -ENODEV;
- }
- }
- }
-
- if (!pdata->spi_present_mask || pdata->spi_present_mask > 0xff) {
- dev_err(&spi->dev, "invalid spi-present-mask");
- return -ENODEV;
- }
-
- for (addr = 0; addr < MCP_MAX_DEV_PER_CS; addr++) {
- if (pdata->spi_present_mask & BIT(addr))
- chips++;
- }
-
- if (!chips)
- return -ENODEV;
-
- data = devm_kzalloc(&spi->dev,
- struct_size(data, chip, chips), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- spi_set_drvdata(spi, data);
-
- for (addr = 0; addr < MCP_MAX_DEV_PER_CS; addr++) {
- if (!(pdata->spi_present_mask & BIT(addr)))
- continue;
- chips--;
- data->mcp[addr] = &data->chip[chips];
- data->mcp[addr]->irq = spi->irq;
- data->mcp[addr]->irq_chip.name = dev_name(&spi->dev);
- data->mcp[addr]->irq_chip.irq_mask = mcp23s08_irq_mask;
- data->mcp[addr]->irq_chip.irq_unmask = mcp23s08_irq_unmask;
- data->mcp[addr]->irq_chip.irq_set_type = mcp23s08_irq_set_type;
- data->mcp[addr]->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock;
- data->mcp[addr]->irq_chip.irq_bus_sync_unlock =
- mcp23s08_irq_bus_unlock;
- status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
- 0x40 | (addr << 1), type,
- pdata->base, addr);
- if (status < 0)
- return status;
-
- if (pdata->base != -1)
- pdata->base += data->mcp[addr]->chip.ngpio;
- ngpio += data->mcp[addr]->chip.ngpio;
- }
- data->ngpio = ngpio;
-
- return 0;
-}
-
-static const struct spi_device_id mcp23s08_ids[] = {
- { "mcp23s08", MCP_TYPE_S08 },
- { "mcp23s17", MCP_TYPE_S17 },
- { "mcp23s18", MCP_TYPE_S18 },
- { },
-};
-MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
-
-static struct spi_driver mcp23s08_driver = {
- .probe = mcp23s08_probe,
- .id_table = mcp23s08_ids,
- .driver = {
- .name = "mcp23s08",
- .of_match_table = of_match_ptr(mcp23s08_spi_of_match),
- },
-};
-
-static int __init mcp23s08_spi_init(void)
-{
- return spi_register_driver(&mcp23s08_driver);
-}
-
-static void mcp23s08_spi_exit(void)
-{
- spi_unregister_driver(&mcp23s08_driver);
-}
-
-#else
-
-static int __init mcp23s08_spi_init(void) { return 0; }
-static void mcp23s08_spi_exit(void) { }
-
-#endif /* CONFIG_SPI_MASTER */
-
-/*----------------------------------------------------------------------*/
-
-static int __init mcp23s08_init(void)
-{
- int ret;
-
- ret = mcp23s08_spi_init();
- if (ret)
- goto spi_fail;
-
- ret = mcp23s08_i2c_init();
- if (ret)
- goto i2c_fail;
-
- return 0;
-
- i2c_fail:
- mcp23s08_spi_exit();
- spi_fail:
- return ret;
-}
-/* register after spi/i2c postcore initcall and before
- * subsys initcalls that may rely on these GPIOs
- */
-subsys_initcall(mcp23s08_init);
-
-static void __exit mcp23s08_exit(void)
-{
- mcp23s08_spi_exit();
- mcp23s08_i2c_exit();
-}
-module_exit(mcp23s08_exit);
-
+EXPORT_SYMBOL_GPL(mcp23s08_probe_one);
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.h b/drivers/pinctrl/pinctrl-mcp23s08.h
new file mode 100644
index 000000000000..90dc27081a3c
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mcp23s08.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* MCP23S08 SPI/I2C GPIO driver */
+
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/types.h>
+
+/*
+ * MCP types supported by driver
+ */
+#define MCP_TYPE_S08 1
+#define MCP_TYPE_S17 2
+#define MCP_TYPE_008 3
+#define MCP_TYPE_017 4
+#define MCP_TYPE_S18 5
+#define MCP_TYPE_018 6
+
+struct device;
+struct regmap;
+
+struct pinctrl_dev;
+
+struct mcp23s08 {
+ u8 addr;
+ bool irq_active_high;
+ bool reg_shift;
+
+ u16 irq_rise;
+ u16 irq_fall;
+ int irq;
+ bool irq_controller;
+ int cached_gpio;
+ /* lock protects regmap access with bypass/cache flags */
+ struct mutex lock;
+
+ struct gpio_chip chip;
+ struct irq_chip irq_chip;
+
+ struct regmap *regmap;
+ struct device *dev;
+
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pinctrl_desc;
+};
+
+extern const struct regmap_config mcp23x08_regmap;
+extern const struct regmap_config mcp23x17_regmap;
+
+int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ unsigned int addr, unsigned int type, unsigned int base);
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_i2c.c b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
new file mode 100644
index 000000000000..e0b001c8c08c
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* MCP23S08 I2C GPIO driver */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "pinctrl-mcp23s08.h"
+
+static int mcp230xx_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ unsigned int type = id->driver_data;
+ struct mcp23s08 *mcp;
+ int ret;
+
+ mcp = devm_kzalloc(dev, sizeof(*mcp), GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ switch (type) {
+ case MCP_TYPE_008:
+ mcp->regmap = devm_regmap_init_i2c(client, &mcp23x08_regmap);
+ mcp->reg_shift = 0;
+ mcp->chip.ngpio = 8;
+ mcp->chip.label = "mcp23008";
+ break;
+
+ case MCP_TYPE_017:
+ mcp->regmap = devm_regmap_init_i2c(client, &mcp23x17_regmap);
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23017";
+ break;
+
+ case MCP_TYPE_018:
+ mcp->regmap = devm_regmap_init_i2c(client, &mcp23x17_regmap);
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23018";
+ break;
+
+ default:
+ dev_err(dev, "invalid device type (%d)\n", type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(mcp->regmap))
+ return PTR_ERR(mcp->regmap);
+
+ mcp->irq = client->irq;
+ mcp->pinctrl_desc.name = "mcp23xxx-pinctrl";
+
+ ret = mcp23s08_probe_one(mcp, dev, client->addr, type, -1);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(client, mcp);
+
+ return 0;
+}
+
+static const struct i2c_device_id mcp230xx_id[] = {
+ { "mcp23008", MCP_TYPE_008 },
+ { "mcp23017", MCP_TYPE_017 },
+ { "mcp23018", MCP_TYPE_018 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
+
+static const struct of_device_id mcp23s08_i2c_of_match[] = {
+ {
+ .compatible = "microchip,mcp23008",
+ .data = (void *) MCP_TYPE_008,
+ },
+ {
+ .compatible = "microchip,mcp23017",
+ .data = (void *) MCP_TYPE_017,
+ },
+ {
+ .compatible = "microchip,mcp23018",
+ .data = (void *) MCP_TYPE_018,
+ },
+/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
+ {
+ .compatible = "mcp,mcp23008",
+ .data = (void *) MCP_TYPE_008,
+ },
+ {
+ .compatible = "mcp,mcp23017",
+ .data = (void *) MCP_TYPE_017,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp23s08_i2c_of_match);
+
+static struct i2c_driver mcp230xx_driver = {
+ .driver = {
+ .name = "mcp230xx",
+ .of_match_table = mcp23s08_i2c_of_match,
+ },
+ .probe = mcp230xx_probe,
+ .id_table = mcp230xx_id,
+};
+
+static int __init mcp23s08_i2c_init(void)
+{
+ return i2c_add_driver(&mcp230xx_driver);
+}
+
+/*
+ * Register after I²C postcore initcall and before
+ * subsys initcalls that may rely on these GPIOs.
+ */
+subsys_initcall(mcp23s08_i2c_init);
+
+static void mcp23s08_i2c_exit(void)
+{
+ i2c_del_driver(&mcp230xx_driver);
+}
+module_exit(mcp23s08_i2c_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
new file mode 100644
index 000000000000..e06fb885fd2b
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* MCP23S08 SPI GPIO driver */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "pinctrl-mcp23s08.h"
+
+#define MCP_MAX_DEV_PER_CS 8
+
+/*
+ * A given spi_device can represent up to eight mcp23sxx chips
+ * sharing the same chipselect but using different addresses
+ * (e.g. chips #0 and #3 might be populated, but not #1 or #2).
+ * Driver data holds all the per-chip data.
+ */
+struct mcp23s08_driver_data {
+ unsigned ngpio;
+ struct mcp23s08 *mcp[8];
+ struct mcp23s08 chip[];
+};
+
+static int mcp23sxx_spi_write(void *context, const void *data, size_t count)
+{
+ struct mcp23s08 *mcp = context;
+ struct spi_device *spi = to_spi_device(mcp->dev);
+ struct spi_message m;
+ struct spi_transfer t[2] = { { .tx_buf = &mcp->addr, .len = 1, },
+ { .tx_buf = data, .len = count, }, };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int mcp23sxx_spi_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ struct mcp23s08 *mcp = context;
+ struct spi_device *spi = to_spi_device(mcp->dev);
+ struct spi_message m;
+ struct spi_transfer t[3] = { { .tx_buf = &mcp->addr, .len = 1, },
+ { .tx_buf = reg, .len = reg_size, },
+ { .tx_buf = val, .len = val_size, }, };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+ spi_message_add_tail(&t[2], &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int mcp23sxx_spi_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct mcp23s08 *mcp = context;
+ struct spi_device *spi = to_spi_device(mcp->dev);
+ u8 tx[2];
+
+ if (reg_size != 1)
+ return -EINVAL;
+
+ tx[0] = mcp->addr | 0x01;
+ tx[1] = *((u8 *) reg);
+
+ return spi_write_then_read(spi, tx, sizeof(tx), val, val_size);
+}
+
+static const struct regmap_bus mcp23sxx_spi_regmap = {
+ .write = mcp23sxx_spi_write,
+ .gather_write = mcp23sxx_spi_gather_write,
+ .read = mcp23sxx_spi_read,
+};
+
+static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
+ unsigned int addr, unsigned int type)
+{
+ const struct regmap_config *config;
+ struct regmap_config *copy;
+ const char *name;
+
+ switch (type) {
+ case MCP_TYPE_S08:
+ mcp->reg_shift = 0;
+ mcp->chip.ngpio = 8;
+ mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s08.%d", addr);
+
+ config = &mcp23x08_regmap;
+ name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
+ break;
+
+ case MCP_TYPE_S17:
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s17.%d", addr);
+
+ config = &mcp23x17_regmap;
+ name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
+ break;
+
+ case MCP_TYPE_S18:
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23s18";
+
+ config = &mcp23x17_regmap;
+ name = config->name;
+ break;
+
+ default:
+ dev_err(dev, "invalid device type (%d)\n", type);
+ return -EINVAL;
+ }
+
+ copy = devm_kmemdup(dev, &config, sizeof(config), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+
+ copy->name = name;
+
+ mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, copy);
+ if (IS_ERR(mcp->regmap))
+ return PTR_ERR(mcp->regmap);
+
+ return 0;
+}
+
+static int mcp23s08_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mcp23s08_driver_data *data;
+ unsigned long spi_present_mask;
+ const void *match;
+ unsigned int addr;
+ unsigned int ngpio = 0;
+ int chips;
+ int type;
+ int ret;
+ u32 v;
+
+ match = device_get_match_data(dev);
+ if (match)
+ type = (int)(uintptr_t)match;
+ else
+ type = spi_get_device_id(spi)->driver_data;
+
+ ret = device_property_read_u32(dev, "microchip,spi-present-mask", &v);
+ if (ret) {
+ ret = device_property_read_u32(dev, "mcp,spi-present-mask", &v);
+ if (ret) {
+ dev_err(dev, "missing spi-present-mask");
+ return ret;
+ }
+ }
+ spi_present_mask = v;
+
+ if (!spi_present_mask || spi_present_mask >= BIT(MCP_MAX_DEV_PER_CS)) {
+ dev_err(dev, "invalid spi-present-mask");
+ return -ENODEV;
+ }
+
+ chips = hweight_long(spi_present_mask);
+
+ data = devm_kzalloc(dev, struct_size(data, chip, chips), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, data);
+
+ for_each_set_bit(addr, &spi_present_mask, MCP_MAX_DEV_PER_CS) {
+ data->mcp[addr] = &data->chip[--chips];
+ data->mcp[addr]->irq = spi->irq;
+
+ ret = mcp23s08_spi_regmap_init(data->mcp[addr], dev, addr, type);
+ if (ret)
+ return ret;
+
+ data->mcp[addr]->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
+ "mcp23xxx-pinctrl.%d",
+ addr);
+ if (!data->mcp[addr]->pinctrl_desc.name)
+ return -ENOMEM;
+
+ ret = mcp23s08_probe_one(data->mcp[addr], dev, 0x40 | (addr << 1), type, -1);
+ if (ret < 0)
+ return ret;
+
+ ngpio += data->mcp[addr]->chip.ngpio;
+ }
+ data->ngpio = ngpio;
+
+ return 0;
+}
+
+static const struct spi_device_id mcp23s08_ids[] = {
+ { "mcp23s08", MCP_TYPE_S08 },
+ { "mcp23s17", MCP_TYPE_S17 },
+ { "mcp23s18", MCP_TYPE_S18 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
+
+static const struct of_device_id mcp23s08_spi_of_match[] = {
+ {
+ .compatible = "microchip,mcp23s08",
+ .data = (void *) MCP_TYPE_S08,
+ },
+ {
+ .compatible = "microchip,mcp23s17",
+ .data = (void *) MCP_TYPE_S17,
+ },
+ {
+ .compatible = "microchip,mcp23s18",
+ .data = (void *) MCP_TYPE_S18,
+ },
+/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
+ {
+ .compatible = "mcp,mcp23s08",
+ .data = (void *) MCP_TYPE_S08,
+ },
+ {
+ .compatible = "mcp,mcp23s17",
+ .data = (void *) MCP_TYPE_S17,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match);
+
+static struct spi_driver mcp23s08_driver = {
+ .probe = mcp23s08_probe,
+ .id_table = mcp23s08_ids,
+ .driver = {
+ .name = "mcp23s08",
+ .of_match_table = mcp23s08_spi_of_match,
+ },
+};
+
+static int __init mcp23s08_spi_init(void)
+{
+ return spi_register_driver(&mcp23s08_driver);
+}
+
+/*
+ * Register after SPI postcore initcall and before
+ * subsys initcalls that may rely on these GPIOs.
+ */
+subsys_initcall(mcp23s08_spi_init);
+
+static void mcp23s08_spi_exit(void)
+{
+ spi_unregister_driver(&mcp23s08_driver);
+}
+module_exit(mcp23s08_spi_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index ed8eac6c1494..95c225bc7572 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -46,32 +46,15 @@ enum {
FUNC_IRQ0_OUT,
FUNC_IRQ1_IN,
FUNC_IRQ1_OUT,
- FUNC_MIIM1,
- FUNC_MIIM2,
+ FUNC_MIIM,
FUNC_PCI_WAKE,
FUNC_PTP0,
FUNC_PTP1,
FUNC_PTP2,
FUNC_PTP3,
FUNC_PWM,
- FUNC_RECO_CLK0,
- FUNC_RECO_CLK1,
- FUNC_SFP0,
- FUNC_SFP1,
- FUNC_SFP2,
- FUNC_SFP3,
- FUNC_SFP4,
- FUNC_SFP5,
- FUNC_SFP6,
- FUNC_SFP7,
- FUNC_SFP8,
- FUNC_SFP9,
- FUNC_SFP10,
- FUNC_SFP11,
- FUNC_SFP12,
- FUNC_SFP13,
- FUNC_SFP14,
- FUNC_SFP15,
+ FUNC_RECO_CLK,
+ FUNC_SFP,
FUNC_SG0,
FUNC_SG1,
FUNC_SG2,
@@ -92,32 +75,15 @@ static const char *const ocelot_function_names[] = {
[FUNC_IRQ0_OUT] = "irq0_out",
[FUNC_IRQ1_IN] = "irq1_in",
[FUNC_IRQ1_OUT] = "irq1_out",
- [FUNC_MIIM1] = "miim1",
- [FUNC_MIIM2] = "miim2",
+ [FUNC_MIIM] = "miim",
[FUNC_PCI_WAKE] = "pci_wake",
[FUNC_PTP0] = "ptp0",
[FUNC_PTP1] = "ptp1",
[FUNC_PTP2] = "ptp2",
[FUNC_PTP3] = "ptp3",
[FUNC_PWM] = "pwm",
- [FUNC_RECO_CLK0] = "reco_clk0",
- [FUNC_RECO_CLK1] = "reco_clk1",
- [FUNC_SFP0] = "sfp0",
- [FUNC_SFP1] = "sfp1",
- [FUNC_SFP2] = "sfp2",
- [FUNC_SFP3] = "sfp3",
- [FUNC_SFP4] = "sfp4",
- [FUNC_SFP5] = "sfp5",
- [FUNC_SFP6] = "sfp6",
- [FUNC_SFP7] = "sfp7",
- [FUNC_SFP8] = "sfp8",
- [FUNC_SFP9] = "sfp9",
- [FUNC_SFP10] = "sfp10",
- [FUNC_SFP11] = "sfp11",
- [FUNC_SFP12] = "sfp12",
- [FUNC_SFP13] = "sfp13",
- [FUNC_SFP14] = "sfp14",
- [FUNC_SFP15] = "sfp15",
+ [FUNC_RECO_CLK] = "reco_clk",
+ [FUNC_SFP] = "sfp",
[FUNC_SG0] = "sg0",
[FUNC_SG1] = "sg1",
[FUNC_SG2] = "sg2",
@@ -168,18 +134,18 @@ OCELOT_P(6, UART, TWI_SCL_M, NONE);
OCELOT_P(7, UART, TWI_SCL_M, NONE);
OCELOT_P(8, SI, TWI_SCL_M, IRQ0_OUT);
OCELOT_P(9, SI, TWI_SCL_M, IRQ1_OUT);
-OCELOT_P(10, PTP2, TWI_SCL_M, SFP0);
-OCELOT_P(11, PTP3, TWI_SCL_M, SFP1);
-OCELOT_P(12, UART2, TWI_SCL_M, SFP2);
-OCELOT_P(13, UART2, TWI_SCL_M, SFP3);
-OCELOT_P(14, MIIM1, TWI_SCL_M, SFP4);
-OCELOT_P(15, MIIM1, TWI_SCL_M, SFP5);
+OCELOT_P(10, PTP2, TWI_SCL_M, SFP);
+OCELOT_P(11, PTP3, TWI_SCL_M, SFP);
+OCELOT_P(12, UART2, TWI_SCL_M, SFP);
+OCELOT_P(13, UART2, TWI_SCL_M, SFP);
+OCELOT_P(14, MIIM, TWI_SCL_M, SFP);
+OCELOT_P(15, MIIM, TWI_SCL_M, SFP);
OCELOT_P(16, TWI, NONE, SI);
OCELOT_P(17, TWI, TWI_SCL_M, SI);
OCELOT_P(18, PTP0, TWI_SCL_M, NONE);
OCELOT_P(19, PTP1, TWI_SCL_M, NONE);
-OCELOT_P(20, RECO_CLK0, TACHO, NONE);
-OCELOT_P(21, RECO_CLK1, PWM, NONE);
+OCELOT_P(20, RECO_CLK, TACHO, TWI_SCL_M);
+OCELOT_P(21, RECO_CLK, PWM, TWI_SCL_M);
#define OCELOT_PIN(n) { \
.number = n, \
@@ -264,22 +230,22 @@ JAGUAR2_P(40, NONE, TWI_SCL_M);
JAGUAR2_P(41, NONE, TWI_SCL_M);
JAGUAR2_P(42, NONE, TWI_SCL_M);
JAGUAR2_P(43, NONE, TWI_SCL_M);
-JAGUAR2_P(44, NONE, SFP8);
-JAGUAR2_P(45, NONE, SFP9);
-JAGUAR2_P(46, NONE, SFP10);
-JAGUAR2_P(47, NONE, SFP11);
-JAGUAR2_P(48, SFP0, NONE);
-JAGUAR2_P(49, SFP1, SI);
-JAGUAR2_P(50, SFP2, SI);
-JAGUAR2_P(51, SFP3, SI);
-JAGUAR2_P(52, SFP4, NONE);
-JAGUAR2_P(53, SFP5, NONE);
-JAGUAR2_P(54, SFP6, NONE);
-JAGUAR2_P(55, SFP7, NONE);
-JAGUAR2_P(56, MIIM1, SFP12);
-JAGUAR2_P(57, MIIM1, SFP13);
-JAGUAR2_P(58, MIIM2, SFP14);
-JAGUAR2_P(59, MIIM2, SFP15);
+JAGUAR2_P(44, NONE, SFP);
+JAGUAR2_P(45, NONE, SFP);
+JAGUAR2_P(46, NONE, SFP);
+JAGUAR2_P(47, NONE, SFP);
+JAGUAR2_P(48, SFP, NONE);
+JAGUAR2_P(49, SFP, SI);
+JAGUAR2_P(50, SFP, SI);
+JAGUAR2_P(51, SFP, SI);
+JAGUAR2_P(52, SFP, NONE);
+JAGUAR2_P(53, SFP, NONE);
+JAGUAR2_P(54, SFP, NONE);
+JAGUAR2_P(55, SFP, NONE);
+JAGUAR2_P(56, MIIM, SFP);
+JAGUAR2_P(57, MIIM, SFP);
+JAGUAR2_P(58, MIIM, SFP);
+JAGUAR2_P(59, MIIM, SFP);
JAGUAR2_P(60, NONE, NONE);
JAGUAR2_P(61, NONE, NONE);
JAGUAR2_P(62, NONE, NONE);
@@ -714,11 +680,12 @@ static void ocelot_irq_handler(struct irq_desc *desc)
struct irq_chip *parent_chip = irq_desc_get_chip(desc);
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int id_reg = OCELOT_GPIO_INTR_IDENT * info->stride;
unsigned int reg = 0, irq, i;
unsigned long irqs;
for (i = 0; i < info->stride; i++) {
- regmap_read(info->map, OCELOT_GPIO_INTR_IDENT + 4 * i, &reg);
+ regmap_read(info->map, id_reg + 4 * i, &reg);
if (!reg)
continue;
@@ -751,21 +718,21 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
gc->of_node = info->dev->of_node;
gc->label = "ocelot-gpio";
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (irq <= 0)
- return irq;
-
- girq = &gc->irq;
- girq->chip = &ocelot_irqchip;
- girq->parent_handler = ocelot_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = irq;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
+ irq = irq_of_parse_and_map(gc->of_node, 0);
+ if (irq) {
+ girq = &gc->irq;
+ girq->chip = &ocelot_irqchip;
+ girq->parent_handler = ocelot_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+ }
ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index cccbe072274e..c6f4229eb106 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -73,7 +73,7 @@ struct rk805_pctrl_info {
int num_pin_groups;
const struct pinctrl_pin_desc *pins;
unsigned int num_pins;
- struct rk805_pin_config *pin_cfg;
+ const struct rk805_pin_config *pin_cfg;
};
enum rk805_pinmux_option {
@@ -121,7 +121,7 @@ static const struct rk805_pin_group rk805_pin_groups[] = {
#define RK805_GPIO0_VAL_MSK BIT(0)
#define RK805_GPIO1_VAL_MSK BIT(1)
-static struct rk805_pin_config rk805_gpio_cfgs[] = {
+static const struct rk805_pin_config rk805_gpio_cfgs[] = {
{
.reg = RK805_OUT_REG,
.val_msk = RK805_GPIO0_VAL_MSK,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 098951346339..c07324d1f265 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -508,8 +508,8 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
}
map_num += grp->npins;
- new_map = devm_kcalloc(pctldev->dev, map_num, sizeof(*new_map),
- GFP_KERNEL);
+
+ new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL);
if (!new_map)
return -ENOMEM;
@@ -519,7 +519,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create mux map */
parent = of_get_parent(np);
if (!parent) {
- devm_kfree(pctldev->dev, new_map);
+ kfree(new_map);
return -EINVAL;
}
new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
@@ -546,6 +546,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
static void rockchip_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
+ kfree(map);
}
static const struct pinctrl_ops rockchip_pctrl_ops = {
@@ -2940,14 +2941,14 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
sizeof(struct rockchip_pmx_func),
GFP_KERNEL);
if (!info->functions)
- return -EINVAL;
+ return -ENOMEM;
info->groups = devm_kcalloc(dev,
info->ngroups,
sizeof(struct rockchip_pin_group),
GFP_KERNEL);
if (!info->groups)
- return -EINVAL;
+ return -ENOMEM;
i = 0;
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index da2d8365c690..38a14bbced5f 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -93,7 +93,7 @@ struct rza1_bidir_entry {
};
/**
- * rza1_swio_pin - describe a single pin that needs bidir flag applied.
+ * rza1_swio_pin - describe a single pin that needs swio flag applied.
*/
struct rza1_swio_pin {
u16 pin: 4;
@@ -418,7 +418,7 @@ static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = {
};
static const struct rza1_swio_entry rza1l_swio_entries[] = {
- [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins },
+ [0] = { ARRAY_SIZE(rza1l_swio_pins), rza1l_swio_pins },
};
/* RZ/A1L (r7s72102x) pinmux flags table */
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 60100b45f5e5..1aae803c12cd 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -288,7 +288,7 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
struct pinctrl_gpio_range *range;
enum pin_config_param param;
u32 arg;
- int dir, i, ret;
+ int i, ret;
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
if (!range) {
@@ -296,10 +296,6 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
}
- dir = stmfx_gpio_get_direction(&pctl->gpio_chip, pin);
- if (dir < 0)
- return dir;
-
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 6e74bd87d959..708bc91862fe 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -988,7 +988,7 @@ static unsigned int sx150x_maybe_swizzle(struct sx150x_pinctrl *pctl,
/*
* In order to mask the differences between 16 and 8 bit expander
* devices we set up a sligthly ficticious regmap that pretends to be
- * a set of 32-bit (to accomodate RegSenseLow/RegSenseHigh
+ * a set of 32-bit (to accommodate RegSenseLow/RegSenseHigh
* pair/quartet) registers and transparently reconstructs those
* registers via multiple I2C/SMBus reads
*
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index bddf2c5dd3bf..eab029a21643 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -425,15 +425,6 @@ int pxa2xx_pinctrl_init(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_init);
-int pxa2xx_pinctrl_exit(struct platform_device *pdev)
-{
- struct pxa_pinctrl *pctl = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pctl->pctl_dev);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_exit);
-
MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
MODULE_DESCRIPTION("Marvell PXA2xx pinctrl driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index c5d4428f1f94..ff1ee159dca2 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -216,4 +216,13 @@ config PINCTRL_SM8150
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8150 platform.
+config PINCTRL_SM8250
+ tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8250 platform.
+
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index d9e09045a776..061ec9fb659b 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_PINCTRL_SC7180) += pinctrl-sc7180.o
obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o
obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
+obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 9a398a211d30..83b7d64bc4c1 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -23,7 +23,6 @@
#include <linux/pm.h>
#include <linux/log2.h>
#include <linux/qcom_scm.h>
-#include <linux/io.h>
#include <linux/soc/qcom/irq.h>
@@ -697,7 +696,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
pol = msm_readl_intr_cfg(pctrl, g);
pol ^= BIT(g->intr_polarity_bit);
- msm_writel_intr_cfg(val, pctrl, g);
+ msm_writel_intr_cfg(pol, pctrl, g);
val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit);
intstat = msm_readl_intr_status(pctrl, g);
@@ -1034,6 +1033,29 @@ static void msm_gpio_irq_relres(struct irq_data *d)
module_put(gc->owner);
}
+static int msm_gpio_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *dest, bool force)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return irq_chip_set_affinity_parent(d, dest, force);
+
+ return 0;
+}
+
+static int msm_gpio_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return irq_chip_set_vcpu_affinity_parent(d, vcpu_info);
+
+ return 0;
+}
+
static void msm_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -1132,6 +1154,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
+ pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
+ pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
if (np) {
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
new file mode 100644
index 000000000000..a660f1274b66
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -0,0 +1,1361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const char * const sm8250_tiles[] = {
+ "west",
+ "south",
+ "north",
+};
+
+enum {
+ WEST,
+ SOUTH,
+ NORTH,
+};
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = REG_SIZE * id + 0x4, \
+ .intr_cfg_reg = REG_SIZE * id + 0x8, \
+ .intr_status_reg = REG_SIZE * id + 0xc, \
+ .intr_target_reg = REG_SIZE * id + 0x8, \
+ .tile = _tile, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = NORTH, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = SOUTH, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm8250_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "SDC2_CLK"),
+ PINCTRL_PIN(181, "SDC2_CMD"),
+ PINCTRL_PIN(182, "SDC2_DATA"),
+ PINCTRL_PIN(183, "UFS_RESET"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+
+static const unsigned int ufs_reset_pins[] = { 180 };
+static const unsigned int sdc2_clk_pins[] = { 181 };
+static const unsigned int sdc2_cmd_pins[] = { 182 };
+static const unsigned int sdc2_data_pins[] = { 183 };
+
+enum sm8250_functions {
+ msm_mux_aoss_cti,
+ msm_mux_atest,
+ msm_mux_audio_ref,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_dp_lcd,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gpio,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_lpass_slimbus,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mi2s0_data0,
+ msm_mux_mi2s0_data1,
+ msm_mux_mi2s0_sck,
+ msm_mux_mi2s0_ws,
+ msm_mux_mi2s1_data0,
+ msm_mux_mi2s1_data1,
+ msm_mux_mi2s1_sck,
+ msm_mux_mi2s1_ws,
+ msm_mux_mi2s2_data0,
+ msm_mux_mi2s2_data1,
+ msm_mux_mi2s2_sck,
+ msm_mux_mi2s2_ws,
+ msm_mux_pci_e0,
+ msm_mux_pci_e1,
+ msm_mux_pci_e2,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_clk,
+ msm_mux_pll_reset,
+ msm_mux_pri_mi2s,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qspi0,
+ msm_mux_qspi1,
+ msm_mux_qspi2,
+ msm_mux_qspi3,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qup0,
+ msm_mux_qup1,
+ msm_mux_qup10,
+ msm_mux_qup11,
+ msm_mux_qup12,
+ msm_mux_qup13,
+ msm_mux_qup14,
+ msm_mux_qup15,
+ msm_mux_qup16,
+ msm_mux_qup17,
+ msm_mux_qup18,
+ msm_mux_qup19,
+ msm_mux_qup2,
+ msm_mux_qup3,
+ msm_mux_qup4,
+ msm_mux_qup5,
+ msm_mux_qup6,
+ msm_mux_qup7,
+ msm_mux_qup8,
+ msm_mux_qup9,
+ msm_mux_qup_l4,
+ msm_mux_qup_l5,
+ msm_mux_qup_l6,
+ msm_mux_sd_write,
+ msm_mux_sdc40,
+ msm_mux_sdc41,
+ msm_mux_sdc42,
+ msm_mux_sdc43,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sec_mi2s,
+ msm_mux_sp_cmu,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_tsif0_clk,
+ msm_mux_tsif0_data,
+ msm_mux_tsif0_en,
+ msm_mux_tsif0_error,
+ msm_mux_tsif0_sync,
+ msm_mux_tsif1_clk,
+ msm_mux_tsif1_data,
+ msm_mux_tsif1_en,
+ msm_mux_tsif1_error,
+ msm_mux_tsif1_sync,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const tsif1_data_groups[] = {
+ "gpio75",
+};
+static const char * const sdc41_groups[] = {
+ "gpio75",
+};
+static const char * const tsif1_sync_groups[] = {
+ "gpio76",
+};
+static const char * const sdc40_groups[] = {
+ "gpio76",
+};
+static const char * const aoss_cti_groups[] = {
+ "gpio77",
+};
+static const char * const phase_flag_groups[] = {
+ "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50", "gpio51",
+ "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio103", "gpio104", "gpio115", "gpio116", "gpio117", "gpio118",
+ "gpio119", "gpio120", "gpio122", "gpio124", "gpio125",
+};
+static const char * const sd_write_groups[] = {
+ "gpio78",
+};
+static const char * const pci_e0_groups[] = {
+ "gpio79", "gpio80",
+};
+static const char * const pci_e1_groups[] = {
+ "gpio82", "gpio83",
+};
+static const char * const pci_e2_groups[] = {
+ "gpio85", "gpio86",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio85",
+};
+static const char * const atest_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27", "gpio32", "gpio33", "gpio34",
+ "gpio35", "gpio36", "gpio37", "gpio85", "gpio86", "gpio87", "gpio88",
+ "gpio89",
+};
+static const char * const tgu_ch3_groups[] = {
+ "gpio86",
+};
+static const char * const tsif1_error_groups[] = {
+ "gpio90",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio90",
+};
+static const char * const tsif0_error_groups[] = {
+ "gpio91",
+};
+static const char * const tgu_ch2_groups[] = {
+ "gpio91",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99", "gpio100",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio94", "gpio95", "gpio143", "gpio144",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio96",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio97",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106",
+ "gpio107", "gpio108",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99", "gpio100",
+ "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106",
+ "gpio107", "gpio108", "gpio109", "gpio110", "gpio111", "gpio160",
+ "gpio161", "gpio162", "gpio163", "gpio164", "gpio165", "gpio166",
+ "gpio167", "gpio168", "gpio169", "gpio170", "gpio171", "gpio172",
+ "gpio173", "gpio174", "gpio175", "gpio176", "gpio177",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio106", "gpio136",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio107", "gpio137",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio108", "gpio138",
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio109",
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio110",
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio111",
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio112",
+};
+static const char * const cci_async_groups[] = {
+ "gpio112", "gpio113", "gpio114",
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio113",
+};
+static const char * const qup2_groups[] = {
+ "gpio115", "gpio116", "gpio117", "gpio118",
+};
+static const char * const qup3_groups[] = {
+ "gpio119", "gpio120", "gpio121", "gpio122",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio123",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio123",
+};
+static const char * const qup9_groups[] = {
+ "gpio125", "gpio126", "gpio127", "gpio128",
+};
+static const char * const qup10_groups[] = {
+ "gpio129", "gpio130", "gpio131", "gpio132",
+};
+static const char * const mi2s2_sck_groups[] = {
+ "gpio133",
+};
+static const char * const mi2s2_data0_groups[] = {
+ "gpio134",
+};
+static const char * const mi2s2_ws_groups[] = {
+ "gpio135",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio136",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio137",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio137",
+};
+static const char * const mi2s2_data1_groups[] = {
+ "gpio137",
+};
+static const char * const mi2s0_sck_groups[] = {
+ "gpio138",
+};
+static const char * const mi2s0_data0_groups[] = {
+ "gpio139",
+};
+static const char * const mi2s0_data1_groups[] = {
+ "gpio140",
+};
+static const char * const mi2s0_ws_groups[] = {
+ "gpio141",
+};
+static const char * const lpass_slimbus_groups[] = {
+ "gpio142", "gpio143", "gpio144", "gpio145",
+};
+static const char * const mi2s1_sck_groups[] = {
+ "gpio142",
+};
+static const char * const mi2s1_data0_groups[] = {
+ "gpio143",
+};
+static const char * const mi2s1_data1_groups[] = {
+ "gpio144",
+};
+static const char * const mi2s1_ws_groups[] = {
+ "gpio145",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio159",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio160",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio161",
+};
+static const char * const sp_cmu_groups[] = {
+ "gpio162",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio163",
+};
+static const char * const qup19_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+ "gpio153", "gpio154", "gpio155", "gpio156", "gpio157", "gpio158",
+ "gpio159", "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+ "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
+ "gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
+ "gpio177", "gpio178", "gpio179",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio0", "gpio2", "gpio2", "gpio44", "gpio45", "gpio46", "gpio92",
+ "gpio93",
+};
+static const char * const qup1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const ibi_i3c_groups[] = {
+ "gpio4", "gpio5", "gpio24", "gpio25", "gpio28", "gpio29", "gpio40",
+ "gpio41",
+};
+static const char * const qup_l4_groups[] = {
+ "gpio6", "gpio14", "gpio46", "gpio123",
+};
+static const char * const qup_l5_groups[] = {
+ "gpio7", "gpio15", "gpio47", "gpio124",
+};
+static const char * const qup4_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const qup5_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const qup6_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const qup7_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const qup8_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+static const char * const qup0_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char * const qup12_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+static const char * const qup13_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const qup14_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+static const char * const ddr_pxi3_groups[] = {
+ "gpio40", "gpio43",
+};
+static const char * const ddr_pxi1_groups[] = {
+ "gpio41", "gpio42",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio42",
+};
+static const char * const qup15_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio44",
+};
+static const char * const qup16_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+static const char * const qup17_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio52", "gpio53",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio54",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio55",
+};
+static const char * const ddr_pxi2_groups[] = {
+ "gpio55", "gpio56",
+};
+static const char * const qup18_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+static const char * const qup11_groups[] = {
+ "gpio60", "gpio61", "gpio62", "gpio63",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio64", "gpio90",
+};
+static const char * const qup_l6_groups[] = {
+ "gpio64", "gpio77", "gpio92", "gpio93",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio65",
+};
+static const char * const pll_clk_groups[] = {
+ "gpio65",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio66", "gpio67", "gpio68", "gpio122", "gpio124",
+};
+static const char * const dp_lcd_groups[] = {
+ "gpio67",
+};
+static const char * const dp_hot_groups[] = {
+ "gpio68",
+};
+static const char * const qspi_cs_groups[] = {
+ "gpio69", "gpio75",
+};
+static const char * const tsif0_clk_groups[] = {
+ "gpio69",
+};
+static const char * const qspi0_groups[] = {
+ "gpio70",
+};
+static const char * const tsif0_en_groups[] = {
+ "gpio70",
+};
+static const char * const mdp_vsync0_groups[] = {
+ "gpio70",
+};
+static const char * const mdp_vsync1_groups[] = {
+ "gpio70",
+};
+static const char * const mdp_vsync2_groups[] = {
+ "gpio70",
+};
+static const char * const mdp_vsync3_groups[] = {
+ "gpio70",
+};
+static const char * const qspi1_groups[] = {
+ "gpio71",
+};
+static const char * const tsif0_data_groups[] = {
+ "gpio71",
+};
+static const char * const sdc4_cmd_groups[] = {
+ "gpio71",
+};
+static const char * const qspi2_groups[] = {
+ "gpio72",
+};
+static const char * const tsif0_sync_groups[] = {
+ "gpio72",
+};
+static const char * const sdc43_groups[] = {
+ "gpio72",
+};
+static const char * const qspi_clk_groups[] = {
+ "gpio73",
+};
+static const char * const tsif1_clk_groups[] = {
+ "gpio73",
+};
+static const char * const sdc4_clk_groups[] = {
+ "gpio73",
+};
+static const char * const qspi3_groups[] = {
+ "gpio74",
+};
+static const char * const tsif1_en_groups[] = {
+ "gpio74",
+};
+static const char * const sdc42_groups[] = {
+ "gpio74",
+};
+
+static const struct msm_function sm8250_functions[] = {
+ FUNCTION(aoss_cti),
+ FUNCTION(atest),
+ FUNCTION(audio_ref),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(dp_lcd),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gpio),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mi2s0_data0),
+ FUNCTION(mi2s0_data1),
+ FUNCTION(mi2s0_sck),
+ FUNCTION(mi2s0_ws),
+ FUNCTION(mi2s1_data0),
+ FUNCTION(mi2s1_data1),
+ FUNCTION(mi2s1_sck),
+ FUNCTION(mi2s1_ws),
+ FUNCTION(mi2s2_data0),
+ FUNCTION(mi2s2_data1),
+ FUNCTION(mi2s2_sck),
+ FUNCTION(mi2s2_ws),
+ FUNCTION(pci_e0),
+ FUNCTION(pci_e1),
+ FUNCTION(pci_e2),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_clk),
+ FUNCTION(pll_reset),
+ FUNCTION(pri_mi2s),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qspi0),
+ FUNCTION(qspi1),
+ FUNCTION(qspi2),
+ FUNCTION(qspi3),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(qup0),
+ FUNCTION(qup1),
+ FUNCTION(qup10),
+ FUNCTION(qup11),
+ FUNCTION(qup12),
+ FUNCTION(qup13),
+ FUNCTION(qup14),
+ FUNCTION(qup15),
+ FUNCTION(qup16),
+ FUNCTION(qup17),
+ FUNCTION(qup18),
+ FUNCTION(qup19),
+ FUNCTION(qup2),
+ FUNCTION(qup3),
+ FUNCTION(qup4),
+ FUNCTION(qup5),
+ FUNCTION(qup6),
+ FUNCTION(qup7),
+ FUNCTION(qup8),
+ FUNCTION(qup9),
+ FUNCTION(qup_l4),
+ FUNCTION(qup_l5),
+ FUNCTION(qup_l6),
+ FUNCTION(sd_write),
+ FUNCTION(sdc40),
+ FUNCTION(sdc41),
+ FUNCTION(sdc42),
+ FUNCTION(sdc43),
+ FUNCTION(sdc4_clk),
+ FUNCTION(sdc4_cmd),
+ FUNCTION(sec_mi2s),
+ FUNCTION(sp_cmu),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(tsif0_clk),
+ FUNCTION(tsif0_data),
+ FUNCTION(tsif0_en),
+ FUNCTION(tsif0_error),
+ FUNCTION(tsif0_sync),
+ FUNCTION(tsif1_clk),
+ FUNCTION(tsif1_data),
+ FUNCTION(tsif1_en),
+ FUNCTION(tsif1_error),
+ FUNCTION(tsif1_sync),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vsense_trigger),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm8250_groups[] = {
+ [0] = PINGROUP(0, SOUTH, qup19, qdss_cti, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, SOUTH, qup19, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, SOUTH, qup19, qdss_cti, qdss_cti, _, _, _, _, _, _),
+ [3] = PINGROUP(3, SOUTH, qup19, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, NORTH, qup1, ibi_i3c, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, NORTH, qup1, ibi_i3c, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, NORTH, qup1, qup_l4, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, NORTH, qup1, qup_l5, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, NORTH, qup4, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, NORTH, qup4, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, NORTH, qup4, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, NORTH, qup4, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, NORTH, qup5, _, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, NORTH, qup5, _, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, NORTH, qup5, qup_l4, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, NORTH, qup5, qup_l5, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, NORTH, qup6, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, NORTH, qup6, _, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, NORTH, qup6, _, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, NORTH, qup6, _, _, _, _, _, _, _, _),
+ [20] = PINGROUP(20, NORTH, qup7, _, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, NORTH, qup7, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, NORTH, qup7, _, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, NORTH, qup7, _, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, SOUTH, qup8, ibi_i3c, atest, _, _, _, _, _, _),
+ [25] = PINGROUP(25, SOUTH, qup8, ibi_i3c, atest, _, _, _, _, _, _),
+ [26] = PINGROUP(26, SOUTH, qup8, atest, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, SOUTH, qup8, atest, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, NORTH, qup0, ibi_i3c, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, NORTH, qup0, ibi_i3c, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, NORTH, qup0, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, NORTH, qup0, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, SOUTH, qup12, _, atest, _, _, _, _, _, _),
+ [33] = PINGROUP(33, SOUTH, qup12, atest, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, SOUTH, qup12, atest, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, SOUTH, qup12, atest, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, SOUTH, qup13, atest, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, SOUTH, qup13, atest, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, SOUTH, qup13, _, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, SOUTH, qup13, _, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, SOUTH, qup14, ibi_i3c, _, ddr_pxi3, _, _, _, _, _),
+ [41] = PINGROUP(41, SOUTH, qup14, ibi_i3c, _, ddr_pxi1, _, _, _, _, _),
+ [42] = PINGROUP(42, SOUTH, qup14, vsense_trigger, ddr_pxi1, _, _, _, _, _, _),
+ [43] = PINGROUP(43, SOUTH, qup14, ddr_pxi3, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, SOUTH, qup15, qdss_cti, dbg_out, _, _, _, _, _, _),
+ [45] = PINGROUP(45, SOUTH, qup15, qdss_cti, phase_flag, _, _, _, _, _, _),
+ [46] = PINGROUP(46, SOUTH, qup15, qup_l4, qdss_cti, phase_flag, _, _, _, _, _),
+ [47] = PINGROUP(47, SOUTH, qup15, qup_l5, phase_flag, _, _, _, _, _, _),
+ [48] = PINGROUP(48, SOUTH, qup16, phase_flag, _, _, _, _, _, _, _),
+ [49] = PINGROUP(49, SOUTH, qup16, phase_flag, _, _, _, _, _, _, _),
+ [50] = PINGROUP(50, SOUTH, qup16, phase_flag, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, SOUTH, qup16, phase_flag, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, SOUTH, qup17, ddr_pxi0, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, SOUTH, qup17, ddr_pxi0, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, SOUTH, qup17, jitter_bist, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, SOUTH, qup17, pll_bist, ddr_pxi2, _, _, _, _, _, _),
+ [56] = PINGROUP(56, SOUTH, qup18, ddr_pxi2, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, SOUTH, qup18, _, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, SOUTH, qup18, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, SOUTH, qup18, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, SOUTH, qup11, _, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, SOUTH, qup11, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, SOUTH, qup11, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, SOUTH, qup11, _, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, SOUTH, usb2phy_ac, qup_l6, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, SOUTH, usb_phy, pll_clk, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, NORTH, mdp_vsync, _, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, NORTH, mdp_vsync, dp_lcd, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, NORTH, mdp_vsync, dp_hot, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, SOUTH, qspi_cs, tsif0_clk, phase_flag, _, _, _, _, _, _),
+ [70] = PINGROUP(70, SOUTH, qspi0, tsif0_en, mdp_vsync0, mdp_vsync1, mdp_vsync2, mdp_vsync3, phase_flag, _, _),
+ [71] = PINGROUP(71, SOUTH, qspi1, tsif0_data, sdc4_cmd, phase_flag, _, _, _, _, _),
+ [72] = PINGROUP(72, SOUTH, qspi2, tsif0_sync, sdc43, phase_flag, _, _, _, _, _),
+ [73] = PINGROUP(73, SOUTH, qspi_clk, tsif1_clk, sdc4_clk, phase_flag, _, _, _, _, _),
+ [74] = PINGROUP(74, SOUTH, qspi3, tsif1_en, sdc42, phase_flag, _, _, _, _, _),
+ [75] = PINGROUP(75, SOUTH, qspi_cs, tsif1_data, sdc41, _, _, _, _, _, _),
+ [76] = PINGROUP(76, SOUTH, tsif1_sync, sdc40, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, NORTH, qup_l6, aoss_cti, phase_flag, _, _, _, _, _, _),
+ [78] = PINGROUP(78, NORTH, sd_write, phase_flag, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, NORTH, pci_e0, phase_flag, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, NORTH, pci_e0, phase_flag, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, NORTH, phase_flag, _, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, NORTH, pci_e1, phase_flag, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, NORTH, pci_e1, phase_flag, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, NORTH, phase_flag, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, SOUTH, pci_e2, tgu_ch0, atest, _, _, _, _, _, _),
+ [86] = PINGROUP(86, SOUTH, pci_e2, tgu_ch3, atest, _, _, _, _, _, _),
+ [87] = PINGROUP(87, SOUTH, atest, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, SOUTH, _, atest, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, SOUTH, _, atest, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, SOUTH, tsif1_error, usb2phy_ac, tgu_ch1, _, _, _, _, _, _),
+ [91] = PINGROUP(91, SOUTH, tsif0_error, tgu_ch2, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, NORTH, qup_l6, qdss_cti, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, NORTH, qup_l6, qdss_cti, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, NORTH, cam_mclk, ddr_bist, qdss_gpio, _, _, _, _, _, _),
+ [95] = PINGROUP(95, NORTH, cam_mclk, ddr_bist, qdss_gpio, _, _, _, _, _, _),
+ [96] = PINGROUP(96, NORTH, cam_mclk, pll_bypassnl, qdss_gpio, _, _, _, _, _, _),
+ [97] = PINGROUP(97, NORTH, cam_mclk, pll_reset, qdss_gpio, _, _, _, _, _, _),
+ [98] = PINGROUP(98, NORTH, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, NORTH, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, NORTH, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, NORTH, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, NORTH, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, NORTH, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [104] = PINGROUP(104, NORTH, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [105] = PINGROUP(105, NORTH, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, NORTH, cci_i2c, gcc_gp1, qdss_gpio, _, _, _, _, _, _),
+ [107] = PINGROUP(107, NORTH, cci_i2c, gcc_gp2, qdss_gpio, _, _, _, _, _, _),
+ [108] = PINGROUP(108, NORTH, cci_i2c, gcc_gp3, qdss_gpio, _, _, _, _, _, _),
+ [109] = PINGROUP(109, NORTH, cci_timer0, qdss_gpio, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, NORTH, cci_timer1, qdss_gpio, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, NORTH, cci_timer2, qdss_gpio, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, NORTH, cci_timer3, cci_async, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, NORTH, cci_timer4, cci_async, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, NORTH, cci_async, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, NORTH, qup2, phase_flag, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, NORTH, qup2, phase_flag, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, NORTH, qup2, phase_flag, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, NORTH, qup2, phase_flag, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, NORTH, qup3, phase_flag, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, NORTH, qup3, phase_flag, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, NORTH, qup3, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, NORTH, qup3, mdp_vsync, phase_flag, _, _, _, _, _, _),
+ [123] = PINGROUP(123, NORTH, qup_l4, tsense_pwm1, tsense_pwm2, _, _, _, _, _, _),
+ [124] = PINGROUP(124, NORTH, qup_l5, mdp_vsync, phase_flag, _, _, _, _, _, _),
+ [125] = PINGROUP(125, SOUTH, qup9, phase_flag, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, SOUTH, qup9, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, SOUTH, qup9, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, SOUTH, qup9, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, SOUTH, qup10, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, SOUTH, qup10, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, SOUTH, qup10, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, SOUTH, qup10, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, WEST, mi2s2_sck, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, WEST, mi2s2_data0, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, WEST, mi2s2_ws, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, WEST, pri_mi2s, gcc_gp1, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, WEST, sec_mi2s, audio_ref, mi2s2_data1, gcc_gp2, _, _, _, _, _),
+ [138] = PINGROUP(138, WEST, mi2s0_sck, gcc_gp3, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, WEST, mi2s0_data0, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, WEST, mi2s0_data1, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, WEST, mi2s0_ws, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, WEST, lpass_slimbus, mi2s1_sck, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, WEST, lpass_slimbus, mi2s1_data0, ddr_bist, _, _, _, _, _, _),
+ [144] = PINGROUP(144, WEST, lpass_slimbus, mi2s1_data1, ddr_bist, _, _, _, _, _, _),
+ [145] = PINGROUP(145, WEST, lpass_slimbus, mi2s1_ws, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, WEST, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, WEST, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, WEST, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, WEST, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, WEST, _, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, WEST, _, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, WEST, _, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, WEST, _, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, WEST, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, WEST, _, _, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, WEST, _, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, WEST, _, _, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, WEST, _, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, WEST, cri_trng0, _, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, WEST, cri_trng1, qdss_gpio, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, WEST, cri_trng, qdss_gpio, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, WEST, sp_cmu, qdss_gpio, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, WEST, prng_rosc, qdss_gpio, _, _, _, _, _, _, _),
+ [164] = PINGROUP(164, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [165] = PINGROUP(165, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [177] = PINGROUP(177, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP(178, WEST, _, _, _, _, _, _, _, _, _),
+ [179] = PINGROUP(179, WEST, _, _, _, _, _, _, _, _, _),
+ [180] = UFS_RESET(ufs_reset, 0xb8000),
+ [181] = SDC_PINGROUP(sdc2_clk, 0x7000, 14, 6),
+ [182] = SDC_PINGROUP(sdc2_cmd, 0xb7000, 11, 3),
+ [183] = SDC_PINGROUP(sdc2_data, 0xb7000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
+ .pins = sm8250_pins,
+ .npins = ARRAY_SIZE(sm8250_pins),
+ .functions = sm8250_functions,
+ .nfunctions = ARRAY_SIZE(sm8250_functions),
+ .groups = sm8250_groups,
+ .ngroups = ARRAY_SIZE(sm8250_groups),
+ .ngpios = 181,
+ .tiles = sm8250_tiles,
+ .ntiles = ARRAY_SIZE(sm8250_tiles),
+};
+
+static int sm8250_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm8250_pinctrl);
+}
+
+static const struct of_device_id sm8250_pinctrl_of_match[] = {
+ { .compatible = "qcom,sm8250-pinctrl", },
+ { },
+};
+
+static struct platform_driver sm8250_pinctrl_driver = {
+ .driver = {
+ .name = "sm8250-pinctrl",
+ .of_match_table = sm8250_pinctrl_of_match,
+ },
+ .probe = sm8250_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm8250_pinctrl_init(void)
+{
+ return platform_driver_register(&sm8250_pinctrl_driver);
+}
+arch_initcall(sm8250_pinctrl_init);
+
+static void __exit sm8250_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sm8250_pinctrl_driver);
+}
+module_exit(sm8250_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sm8250 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sm8250_pinctrl_of_match);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 0599f5127b01..84501c785473 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -40,6 +40,8 @@ struct exynos_irq_chip {
u32 eint_pend;
u32 eint_wake_mask_value;
u32 eint_wake_mask_reg;
+ void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip);
};
static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
@@ -265,6 +267,7 @@ struct exynos_eint_gpio_save {
u32 eint_con;
u32 eint_fltcon0;
u32 eint_fltcon1;
+ u32 eint_mask;
};
/*
@@ -342,6 +345,47 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
return 0;
}
+static void
+exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip)
+{
+ struct regmap *pmu_regs;
+
+ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
+ dev_warn(drvdata->dev,
+ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
+ return;
+ }
+
+ pmu_regs = drvdata->retention_ctrl->priv;
+ dev_info(drvdata->dev,
+ "Setting external wakeup interrupt mask: 0x%x\n",
+ irq_chip->eint_wake_mask_value);
+
+ regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
+ irq_chip->eint_wake_mask_value);
+}
+
+static void
+s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip)
+
+{
+ void __iomem *clk_base;
+
+ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
+ dev_warn(drvdata->dev,
+ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
+ return;
+ }
+
+
+ clk_base = (void __iomem *) drvdata->retention_ctrl->priv;
+
+ __raw_writel(irq_chip->eint_wake_mask_value,
+ clk_base + irq_chip->eint_wake_mask_reg);
+}
+
/*
* irq_chip for wakeup interrupts
*/
@@ -360,8 +404,9 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = {
.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
.eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
- /* Only difference with exynos4210_wkup_irq_chip: */
+ /* Only differences with exynos4210_wkup_irq_chip: */
.eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask,
};
static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
@@ -380,6 +425,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
.eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
.eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
@@ -398,6 +444,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
.eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
.eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
.eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
/* list of external wakeup controllers supported */
@@ -574,27 +621,6 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
return 0;
}
-static void
-exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
- struct exynos_irq_chip *irq_chip)
-{
- struct regmap *pmu_regs;
-
- if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
- dev_warn(drvdata->dev,
- "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
- return;
- }
-
- pmu_regs = drvdata->retention_ctrl->priv;
- dev_info(drvdata->dev,
- "Setting external wakeup interrupt mask: 0x%x\n",
- irq_chip->eint_wake_mask_value);
-
- regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
- irq_chip->eint_wake_mask_value);
-}
-
static void exynos_pinctrl_suspend_bank(
struct samsung_pinctrl_drv_data *drvdata,
struct samsung_pin_bank *bank)
@@ -608,10 +634,13 @@ static void exynos_pinctrl_suspend_bank(
+ 2 * bank->eint_offset);
save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ 2 * bank->eint_offset + 4);
+ save->eint_mask = readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
+ pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
}
void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
@@ -626,8 +655,8 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
else if (bank->eint_type == EINT_TYPE_WKUP) {
if (!irq_chip) {
irq_chip = bank->irq_chip;
- exynos_pinctrl_set_eint_wakeup_mask(drvdata,
- irq_chip);
+ irq_chip->set_eint_wakeup_mask(drvdata,
+ irq_chip);
} else if (bank->irq_chip != irq_chip) {
dev_warn(drvdata->dev,
"More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n",
@@ -653,6 +682,9 @@ static void exynos_pinctrl_resume_bank(
pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ 2 * bank->eint_offset + 4), save->eint_fltcon1);
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset), save->eint_mask);
writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+ bank->eint_offset);
@@ -660,6 +692,8 @@ static void exynos_pinctrl_resume_bank(
+ 2 * bank->eint_offset);
writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ 2 * bank->eint_offset + 4);
+ writel(save->eint_mask, regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
}
void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 9552851b96f1..c461a2f1927a 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -12,6 +12,7 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_EMEV2 if ARCH_EMEV2
select PINCTRL_PFC_R8A73A4 if ARCH_R8A73A4
select PINCTRL_PFC_R8A7740 if ARCH_R8A7740
+ select PINCTRL_PFC_R8A7742 if ARCH_R8A7742
select PINCTRL_PFC_R8A7743 if ARCH_R8A7743
select PINCTRL_PFC_R8A7744 if ARCH_R8A7744
select PINCTRL_PFC_R8A7745 if ARCH_R8A7745
@@ -74,6 +75,9 @@ config PINCTRL_PFC_R8A7740
bool "R-Mobile A1 pin control support" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
+config PINCTRL_PFC_R8A7742
+ bool "RZ/G1H pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A7743
bool "RZ/G1M pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 9ebe321d24c4..3855d82069c9 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PINCTRL_SH_PFC_GPIO) += gpio.o
obj-$(CONFIG_PINCTRL_PFC_EMEV2) += pfc-emev2.o
obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7742) += pfc-r8a7790.o
obj-$(CONFIG_PINCTRL_PFC_R8A7743) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7744) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index a2e19efa26e3..f368383cba61 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -485,6 +485,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7740_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7742
+ {
+ .compatible = "renesas,pfc-r8a7742",
+ .data = &r8a7742_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7743
{
.compatible = "renesas,pfc-r8a7743",
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 3366ed561cce..f524401fec5f 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -3938,297 +3938,304 @@ static const unsigned int vin3_clk_mux[] = {
VI3_CLK_MARK,
};
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(audio_clk_a),
- SH_PFC_PIN_GROUP(audio_clk_b),
- SH_PFC_PIN_GROUP(audio_clk_c),
- SH_PFC_PIN_GROUP(audio_clkout),
- SH_PFC_PIN_GROUP(audio_clkout_b),
- SH_PFC_PIN_GROUP(audio_clkout_c),
- SH_PFC_PIN_GROUP(audio_clkout_d),
- SH_PFC_PIN_GROUP(avb_link),
- SH_PFC_PIN_GROUP(avb_magic),
- SH_PFC_PIN_GROUP(avb_phy_int),
- SH_PFC_PIN_GROUP(avb_mdio),
- SH_PFC_PIN_GROUP(avb_mii),
- SH_PFC_PIN_GROUP(avb_gmii),
- SH_PFC_PIN_GROUP(du_rgb666),
- SH_PFC_PIN_GROUP(du_rgb888),
- SH_PFC_PIN_GROUP(du_clk_out_0),
- SH_PFC_PIN_GROUP(du_clk_out_1),
- SH_PFC_PIN_GROUP(du_sync_0),
- SH_PFC_PIN_GROUP(du_sync_1),
- SH_PFC_PIN_GROUP(du_cde),
- SH_PFC_PIN_GROUP(du0_clk_in),
- SH_PFC_PIN_GROUP(du1_clk_in),
- SH_PFC_PIN_GROUP(du2_clk_in),
- SH_PFC_PIN_GROUP(eth_link),
- SH_PFC_PIN_GROUP(eth_magic),
- SH_PFC_PIN_GROUP(eth_mdio),
- SH_PFC_PIN_GROUP(eth_rmii),
- SH_PFC_PIN_GROUP(hscif0_data),
- SH_PFC_PIN_GROUP(hscif0_clk),
- SH_PFC_PIN_GROUP(hscif0_ctrl),
- SH_PFC_PIN_GROUP(hscif0_data_b),
- SH_PFC_PIN_GROUP(hscif0_ctrl_b),
- SH_PFC_PIN_GROUP(hscif0_data_c),
- SH_PFC_PIN_GROUP(hscif0_ctrl_c),
- SH_PFC_PIN_GROUP(hscif0_data_d),
- SH_PFC_PIN_GROUP(hscif0_ctrl_d),
- SH_PFC_PIN_GROUP(hscif0_data_e),
- SH_PFC_PIN_GROUP(hscif0_ctrl_e),
- SH_PFC_PIN_GROUP(hscif0_data_f),
- SH_PFC_PIN_GROUP(hscif0_ctrl_f),
- SH_PFC_PIN_GROUP(hscif1_data),
- SH_PFC_PIN_GROUP(hscif1_clk),
- SH_PFC_PIN_GROUP(hscif1_ctrl),
- SH_PFC_PIN_GROUP(hscif1_data_b),
- SH_PFC_PIN_GROUP(hscif1_clk_b),
- SH_PFC_PIN_GROUP(hscif1_ctrl_b),
- SH_PFC_PIN_GROUP(i2c0),
- SH_PFC_PIN_GROUP(i2c1),
- SH_PFC_PIN_GROUP(i2c1_b),
- SH_PFC_PIN_GROUP(i2c1_c),
- SH_PFC_PIN_GROUP(i2c2),
- SH_PFC_PIN_GROUP(i2c2_b),
- SH_PFC_PIN_GROUP(i2c2_c),
- SH_PFC_PIN_GROUP(i2c2_d),
- SH_PFC_PIN_GROUP(i2c2_e),
- SH_PFC_PIN_GROUP(i2c3),
- SH_PFC_PIN_GROUP(iic0),
- SH_PFC_PIN_GROUP(iic1),
- SH_PFC_PIN_GROUP(iic1_b),
- SH_PFC_PIN_GROUP(iic1_c),
- SH_PFC_PIN_GROUP(iic2),
- SH_PFC_PIN_GROUP(iic2_b),
- SH_PFC_PIN_GROUP(iic2_c),
- SH_PFC_PIN_GROUP(iic2_d),
- SH_PFC_PIN_GROUP(iic2_e),
- SH_PFC_PIN_GROUP(iic3),
- SH_PFC_PIN_GROUP(intc_irq0),
- SH_PFC_PIN_GROUP(intc_irq1),
- SH_PFC_PIN_GROUP(intc_irq2),
- SH_PFC_PIN_GROUP(intc_irq3),
- SH_PFC_PIN_GROUP(mlb_3pin),
- SH_PFC_PIN_GROUP(mmc0_data1),
- SH_PFC_PIN_GROUP(mmc0_data4),
- SH_PFC_PIN_GROUP(mmc0_data8),
- SH_PFC_PIN_GROUP(mmc0_ctrl),
- SH_PFC_PIN_GROUP(mmc1_data1),
- SH_PFC_PIN_GROUP(mmc1_data4),
- SH_PFC_PIN_GROUP(mmc1_data8),
- SH_PFC_PIN_GROUP(mmc1_ctrl),
- SH_PFC_PIN_GROUP(msiof0_clk),
- SH_PFC_PIN_GROUP(msiof0_sync),
- SH_PFC_PIN_GROUP(msiof0_ss1),
- SH_PFC_PIN_GROUP(msiof0_ss2),
- SH_PFC_PIN_GROUP(msiof0_rx),
- SH_PFC_PIN_GROUP(msiof0_tx),
- SH_PFC_PIN_GROUP(msiof0_clk_b),
- SH_PFC_PIN_GROUP(msiof0_ss1_b),
- SH_PFC_PIN_GROUP(msiof0_ss2_b),
- SH_PFC_PIN_GROUP(msiof0_rx_b),
- SH_PFC_PIN_GROUP(msiof0_tx_b),
- SH_PFC_PIN_GROUP(msiof1_clk),
- SH_PFC_PIN_GROUP(msiof1_sync),
- SH_PFC_PIN_GROUP(msiof1_ss1),
- SH_PFC_PIN_GROUP(msiof1_ss2),
- SH_PFC_PIN_GROUP(msiof1_rx),
- SH_PFC_PIN_GROUP(msiof1_tx),
- SH_PFC_PIN_GROUP(msiof1_clk_b),
- SH_PFC_PIN_GROUP(msiof1_ss1_b),
- SH_PFC_PIN_GROUP(msiof1_ss2_b),
- SH_PFC_PIN_GROUP(msiof1_rx_b),
- SH_PFC_PIN_GROUP(msiof1_tx_b),
- SH_PFC_PIN_GROUP(msiof2_clk),
- SH_PFC_PIN_GROUP(msiof2_sync),
- SH_PFC_PIN_GROUP(msiof2_ss1),
- SH_PFC_PIN_GROUP(msiof2_ss2),
- SH_PFC_PIN_GROUP(msiof2_rx),
- SH_PFC_PIN_GROUP(msiof2_tx),
- SH_PFC_PIN_GROUP(msiof3_clk),
- SH_PFC_PIN_GROUP(msiof3_sync),
- SH_PFC_PIN_GROUP(msiof3_ss1),
- SH_PFC_PIN_GROUP(msiof3_ss2),
- SH_PFC_PIN_GROUP(msiof3_rx),
- SH_PFC_PIN_GROUP(msiof3_tx),
- SH_PFC_PIN_GROUP(msiof3_clk_b),
- SH_PFC_PIN_GROUP(msiof3_sync_b),
- SH_PFC_PIN_GROUP(msiof3_rx_b),
- SH_PFC_PIN_GROUP(msiof3_tx_b),
- SH_PFC_PIN_GROUP(pwm0),
- SH_PFC_PIN_GROUP(pwm0_b),
- SH_PFC_PIN_GROUP(pwm1),
- SH_PFC_PIN_GROUP(pwm1_b),
- SH_PFC_PIN_GROUP(pwm2),
- SH_PFC_PIN_GROUP(pwm3),
- SH_PFC_PIN_GROUP(pwm4),
- SH_PFC_PIN_GROUP(pwm5),
- SH_PFC_PIN_GROUP(pwm6),
- SH_PFC_PIN_GROUP(qspi_ctrl),
- SH_PFC_PIN_GROUP(qspi_data2),
- SH_PFC_PIN_GROUP(qspi_data4),
- SH_PFC_PIN_GROUP(scif0_data),
- SH_PFC_PIN_GROUP(scif0_clk),
- SH_PFC_PIN_GROUP(scif0_ctrl),
- SH_PFC_PIN_GROUP(scif0_data_b),
- SH_PFC_PIN_GROUP(scif1_data),
- SH_PFC_PIN_GROUP(scif1_clk),
- SH_PFC_PIN_GROUP(scif1_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_b),
- SH_PFC_PIN_GROUP(scif1_data_c),
- SH_PFC_PIN_GROUP(scif1_data_d),
- SH_PFC_PIN_GROUP(scif1_clk_d),
- SH_PFC_PIN_GROUP(scif1_data_e),
- SH_PFC_PIN_GROUP(scif1_clk_e),
- SH_PFC_PIN_GROUP(scif2_data),
- SH_PFC_PIN_GROUP(scif2_clk),
- SH_PFC_PIN_GROUP(scif2_data_b),
- SH_PFC_PIN_GROUP(scifa0_data),
- SH_PFC_PIN_GROUP(scifa0_clk),
- SH_PFC_PIN_GROUP(scifa0_ctrl),
- SH_PFC_PIN_GROUP(scifa0_data_b),
- SH_PFC_PIN_GROUP(scifa0_clk_b),
- SH_PFC_PIN_GROUP(scifa0_ctrl_b),
- SH_PFC_PIN_GROUP(scifa1_data),
- SH_PFC_PIN_GROUP(scifa1_clk),
- SH_PFC_PIN_GROUP(scifa1_ctrl),
- SH_PFC_PIN_GROUP(scifa1_data_b),
- SH_PFC_PIN_GROUP(scifa1_clk_b),
- SH_PFC_PIN_GROUP(scifa1_ctrl_b),
- SH_PFC_PIN_GROUP(scifa1_data_c),
- SH_PFC_PIN_GROUP(scifa1_clk_c),
- SH_PFC_PIN_GROUP(scifa1_ctrl_c),
- SH_PFC_PIN_GROUP(scifa1_data_d),
- SH_PFC_PIN_GROUP(scifa1_clk_d),
- SH_PFC_PIN_GROUP(scifa1_ctrl_d),
- SH_PFC_PIN_GROUP(scifa2_data),
- SH_PFC_PIN_GROUP(scifa2_clk),
- SH_PFC_PIN_GROUP(scifa2_ctrl),
- SH_PFC_PIN_GROUP(scifa2_data_b),
- SH_PFC_PIN_GROUP(scifa2_data_c),
- SH_PFC_PIN_GROUP(scifa2_clk_c),
- SH_PFC_PIN_GROUP(scifb0_data),
- SH_PFC_PIN_GROUP(scifb0_clk),
- SH_PFC_PIN_GROUP(scifb0_ctrl),
- SH_PFC_PIN_GROUP(scifb0_data_b),
- SH_PFC_PIN_GROUP(scifb0_clk_b),
- SH_PFC_PIN_GROUP(scifb0_ctrl_b),
- SH_PFC_PIN_GROUP(scifb0_data_c),
- SH_PFC_PIN_GROUP(scifb1_data),
- SH_PFC_PIN_GROUP(scifb1_clk),
- SH_PFC_PIN_GROUP(scifb1_ctrl),
- SH_PFC_PIN_GROUP(scifb1_data_b),
- SH_PFC_PIN_GROUP(scifb1_clk_b),
- SH_PFC_PIN_GROUP(scifb1_ctrl_b),
- SH_PFC_PIN_GROUP(scifb1_data_c),
- SH_PFC_PIN_GROUP(scifb1_data_d),
- SH_PFC_PIN_GROUP(scifb1_data_e),
- SH_PFC_PIN_GROUP(scifb1_clk_e),
- SH_PFC_PIN_GROUP(scifb1_data_f),
- SH_PFC_PIN_GROUP(scifb1_data_g),
- SH_PFC_PIN_GROUP(scifb1_clk_g),
- SH_PFC_PIN_GROUP(scifb2_data),
- SH_PFC_PIN_GROUP(scifb2_clk),
- SH_PFC_PIN_GROUP(scifb2_ctrl),
- SH_PFC_PIN_GROUP(scifb2_data_b),
- SH_PFC_PIN_GROUP(scifb2_clk_b),
- SH_PFC_PIN_GROUP(scifb2_ctrl_b),
- SH_PFC_PIN_GROUP(scifb2_data_c),
- SH_PFC_PIN_GROUP(scif_clk),
- SH_PFC_PIN_GROUP(scif_clk_b),
- SH_PFC_PIN_GROUP(sdhi0_data1),
- SH_PFC_PIN_GROUP(sdhi0_data4),
- SH_PFC_PIN_GROUP(sdhi0_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_cd),
- SH_PFC_PIN_GROUP(sdhi0_wp),
- SH_PFC_PIN_GROUP(sdhi1_data1),
- SH_PFC_PIN_GROUP(sdhi1_data4),
- SH_PFC_PIN_GROUP(sdhi1_ctrl),
- SH_PFC_PIN_GROUP(sdhi1_cd),
- SH_PFC_PIN_GROUP(sdhi1_wp),
- SH_PFC_PIN_GROUP(sdhi2_data1),
- SH_PFC_PIN_GROUP(sdhi2_data4),
- SH_PFC_PIN_GROUP(sdhi2_ctrl),
- SH_PFC_PIN_GROUP(sdhi2_cd),
- SH_PFC_PIN_GROUP(sdhi2_wp),
- SH_PFC_PIN_GROUP(sdhi3_data1),
- SH_PFC_PIN_GROUP(sdhi3_data4),
- SH_PFC_PIN_GROUP(sdhi3_ctrl),
- SH_PFC_PIN_GROUP(sdhi3_cd),
- SH_PFC_PIN_GROUP(sdhi3_wp),
- SH_PFC_PIN_GROUP(ssi0_data),
- SH_PFC_PIN_GROUP(ssi0129_ctrl),
- SH_PFC_PIN_GROUP(ssi1_data),
- SH_PFC_PIN_GROUP(ssi1_ctrl),
- SH_PFC_PIN_GROUP(ssi2_data),
- SH_PFC_PIN_GROUP(ssi2_ctrl),
- SH_PFC_PIN_GROUP(ssi3_data),
- SH_PFC_PIN_GROUP(ssi34_ctrl),
- SH_PFC_PIN_GROUP(ssi4_data),
- SH_PFC_PIN_GROUP(ssi4_ctrl),
- SH_PFC_PIN_GROUP(ssi5),
- SH_PFC_PIN_GROUP(ssi5_b),
- SH_PFC_PIN_GROUP(ssi5_c),
- SH_PFC_PIN_GROUP(ssi6),
- SH_PFC_PIN_GROUP(ssi6_b),
- SH_PFC_PIN_GROUP(ssi7_data),
- SH_PFC_PIN_GROUP(ssi7_b_data),
- SH_PFC_PIN_GROUP(ssi7_c_data),
- SH_PFC_PIN_GROUP(ssi78_ctrl),
- SH_PFC_PIN_GROUP(ssi78_b_ctrl),
- SH_PFC_PIN_GROUP(ssi78_c_ctrl),
- SH_PFC_PIN_GROUP(ssi8_data),
- SH_PFC_PIN_GROUP(ssi8_b_data),
- SH_PFC_PIN_GROUP(ssi8_c_data),
- SH_PFC_PIN_GROUP(ssi9_data),
- SH_PFC_PIN_GROUP(ssi9_ctrl),
- SH_PFC_PIN_GROUP(tpu0_to0),
- SH_PFC_PIN_GROUP(tpu0_to1),
- SH_PFC_PIN_GROUP(tpu0_to2),
- SH_PFC_PIN_GROUP(tpu0_to3),
- SH_PFC_PIN_GROUP(usb0),
- SH_PFC_PIN_GROUP(usb0_ovc_vbus),
- SH_PFC_PIN_GROUP(usb1),
- SH_PFC_PIN_GROUP(usb2),
- VIN_DATA_PIN_GROUP(vin0_data, 24),
- VIN_DATA_PIN_GROUP(vin0_data, 20),
- SH_PFC_PIN_GROUP(vin0_data18),
- VIN_DATA_PIN_GROUP(vin0_data, 16),
- VIN_DATA_PIN_GROUP(vin0_data, 12),
- VIN_DATA_PIN_GROUP(vin0_data, 10),
- VIN_DATA_PIN_GROUP(vin0_data, 8),
- VIN_DATA_PIN_GROUP(vin0_data, 4),
- SH_PFC_PIN_GROUP(vin0_sync),
- SH_PFC_PIN_GROUP(vin0_field),
- SH_PFC_PIN_GROUP(vin0_clkenb),
- SH_PFC_PIN_GROUP(vin0_clk),
- VIN_DATA_PIN_GROUP(vin1_data, 24),
- VIN_DATA_PIN_GROUP(vin1_data, 20),
- SH_PFC_PIN_GROUP(vin1_data18),
- VIN_DATA_PIN_GROUP(vin1_data, 16),
- VIN_DATA_PIN_GROUP(vin1_data, 12),
- VIN_DATA_PIN_GROUP(vin1_data, 10),
- VIN_DATA_PIN_GROUP(vin1_data, 8),
- VIN_DATA_PIN_GROUP(vin1_data, 4),
- SH_PFC_PIN_GROUP(vin1_sync),
- SH_PFC_PIN_GROUP(vin1_field),
- SH_PFC_PIN_GROUP(vin1_clkenb),
- SH_PFC_PIN_GROUP(vin1_clk),
- VIN_DATA_PIN_GROUP(vin2_data, 24),
- SH_PFC_PIN_GROUP(vin2_data18),
- VIN_DATA_PIN_GROUP(vin2_data, 16),
- VIN_DATA_PIN_GROUP(vin2_data, 8),
- VIN_DATA_PIN_GROUP(vin2_data, 4),
- SH_PFC_PIN_GROUP(vin2_sync),
- SH_PFC_PIN_GROUP(vin2_field),
- SH_PFC_PIN_GROUP(vin2_clkenb),
- SH_PFC_PIN_GROUP(vin2_clk),
- SH_PFC_PIN_GROUP(vin3_data8),
- SH_PFC_PIN_GROUP(vin3_sync),
- SH_PFC_PIN_GROUP(vin3_field),
- SH_PFC_PIN_GROUP(vin3_clkenb),
- SH_PFC_PIN_GROUP(vin3_clk),
+static const struct {
+ struct sh_pfc_pin_group common[289];
+ struct sh_pfc_pin_group automotive[1];
+} pinmux_groups = {
+ .common = {
+ SH_PFC_PIN_GROUP(audio_clk_a),
+ SH_PFC_PIN_GROUP(audio_clk_b),
+ SH_PFC_PIN_GROUP(audio_clk_c),
+ SH_PFC_PIN_GROUP(audio_clkout),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_gmii),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync_0),
+ SH_PFC_PIN_GROUP(du_sync_1),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du0_clk_in),
+ SH_PFC_PIN_GROUP(du1_clk_in),
+ SH_PFC_PIN_GROUP(du2_clk_in),
+ SH_PFC_PIN_GROUP(eth_link),
+ SH_PFC_PIN_GROUP(eth_magic),
+ SH_PFC_PIN_GROUP(eth_mdio),
+ SH_PFC_PIN_GROUP(eth_rmii),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif0_data_b),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif0_data_c),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif0_data_d),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_d),
+ SH_PFC_PIN_GROUP(hscif0_data_e),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_e),
+ SH_PFC_PIN_GROUP(hscif0_data_f),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_f),
+ SH_PFC_PIN_GROUP(hscif1_data),
+ SH_PFC_PIN_GROUP(hscif1_clk),
+ SH_PFC_PIN_GROUP(hscif1_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c1_c),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c2_c),
+ SH_PFC_PIN_GROUP(i2c2_d),
+ SH_PFC_PIN_GROUP(i2c2_e),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(iic0),
+ SH_PFC_PIN_GROUP(iic1),
+ SH_PFC_PIN_GROUP(iic1_b),
+ SH_PFC_PIN_GROUP(iic1_c),
+ SH_PFC_PIN_GROUP(iic2),
+ SH_PFC_PIN_GROUP(iic2_b),
+ SH_PFC_PIN_GROUP(iic2_c),
+ SH_PFC_PIN_GROUP(iic2_d),
+ SH_PFC_PIN_GROUP(iic2_e),
+ SH_PFC_PIN_GROUP(iic3),
+ SH_PFC_PIN_GROUP(intc_irq0),
+ SH_PFC_PIN_GROUP(intc_irq1),
+ SH_PFC_PIN_GROUP(intc_irq2),
+ SH_PFC_PIN_GROUP(intc_irq3),
+ SH_PFC_PIN_GROUP(mmc0_data1),
+ SH_PFC_PIN_GROUP(mmc0_data4),
+ SH_PFC_PIN_GROUP(mmc0_data8),
+ SH_PFC_PIN_GROUP(mmc0_ctrl),
+ SH_PFC_PIN_GROUP(mmc1_data1),
+ SH_PFC_PIN_GROUP(mmc1_data4),
+ SH_PFC_PIN_GROUP(mmc1_data8),
+ SH_PFC_PIN_GROUP(mmc1_ctrl),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_rx),
+ SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof0_clk_b),
+ SH_PFC_PIN_GROUP(msiof0_ss1_b),
+ SH_PFC_PIN_GROUP(msiof0_ss2_b),
+ SH_PFC_PIN_GROUP(msiof0_rx_b),
+ SH_PFC_PIN_GROUP(msiof0_tx_b),
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_rx),
+ SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_rx_b),
+ SH_PFC_PIN_GROUP(msiof1_tx_b),
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_rx),
+ SH_PFC_PIN_GROUP(msiof2_tx),
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_rx),
+ SH_PFC_PIN_GROUP(msiof3_tx),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_rx_b),
+ SH_PFC_PIN_GROUP(msiof3_tx_b),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm0_b),
+ SH_PFC_PIN_GROUP(pwm1),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2),
+ SH_PFC_PIN_GROUP(pwm3),
+ SH_PFC_PIN_GROUP(pwm4),
+ SH_PFC_PIN_GROUP(pwm5),
+ SH_PFC_PIN_GROUP(pwm6),
+ SH_PFC_PIN_GROUP(qspi_ctrl),
+ SH_PFC_PIN_GROUP(qspi_data2),
+ SH_PFC_PIN_GROUP(qspi_data4),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif1_data),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_data_c),
+ SH_PFC_PIN_GROUP(scif1_data_d),
+ SH_PFC_PIN_GROUP(scif1_clk_d),
+ SH_PFC_PIN_GROUP(scif1_data_e),
+ SH_PFC_PIN_GROUP(scif1_clk_e),
+ SH_PFC_PIN_GROUP(scif2_data),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scifa0_data),
+ SH_PFC_PIN_GROUP(scifa0_clk),
+ SH_PFC_PIN_GROUP(scifa0_ctrl),
+ SH_PFC_PIN_GROUP(scifa0_data_b),
+ SH_PFC_PIN_GROUP(scifa0_clk_b),
+ SH_PFC_PIN_GROUP(scifa0_ctrl_b),
+ SH_PFC_PIN_GROUP(scifa1_data),
+ SH_PFC_PIN_GROUP(scifa1_clk),
+ SH_PFC_PIN_GROUP(scifa1_ctrl),
+ SH_PFC_PIN_GROUP(scifa1_data_b),
+ SH_PFC_PIN_GROUP(scifa1_clk_b),
+ SH_PFC_PIN_GROUP(scifa1_ctrl_b),
+ SH_PFC_PIN_GROUP(scifa1_data_c),
+ SH_PFC_PIN_GROUP(scifa1_clk_c),
+ SH_PFC_PIN_GROUP(scifa1_ctrl_c),
+ SH_PFC_PIN_GROUP(scifa1_data_d),
+ SH_PFC_PIN_GROUP(scifa1_clk_d),
+ SH_PFC_PIN_GROUP(scifa1_ctrl_d),
+ SH_PFC_PIN_GROUP(scifa2_data),
+ SH_PFC_PIN_GROUP(scifa2_clk),
+ SH_PFC_PIN_GROUP(scifa2_ctrl),
+ SH_PFC_PIN_GROUP(scifa2_data_b),
+ SH_PFC_PIN_GROUP(scifa2_data_c),
+ SH_PFC_PIN_GROUP(scifa2_clk_c),
+ SH_PFC_PIN_GROUP(scifb0_data),
+ SH_PFC_PIN_GROUP(scifb0_clk),
+ SH_PFC_PIN_GROUP(scifb0_ctrl),
+ SH_PFC_PIN_GROUP(scifb0_data_b),
+ SH_PFC_PIN_GROUP(scifb0_clk_b),
+ SH_PFC_PIN_GROUP(scifb0_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb0_data_c),
+ SH_PFC_PIN_GROUP(scifb1_data),
+ SH_PFC_PIN_GROUP(scifb1_clk),
+ SH_PFC_PIN_GROUP(scifb1_ctrl),
+ SH_PFC_PIN_GROUP(scifb1_data_b),
+ SH_PFC_PIN_GROUP(scifb1_clk_b),
+ SH_PFC_PIN_GROUP(scifb1_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb1_data_c),
+ SH_PFC_PIN_GROUP(scifb1_data_d),
+ SH_PFC_PIN_GROUP(scifb1_data_e),
+ SH_PFC_PIN_GROUP(scifb1_clk_e),
+ SH_PFC_PIN_GROUP(scifb1_data_f),
+ SH_PFC_PIN_GROUP(scifb1_data_g),
+ SH_PFC_PIN_GROUP(scifb1_clk_g),
+ SH_PFC_PIN_GROUP(scifb2_data),
+ SH_PFC_PIN_GROUP(scifb2_clk),
+ SH_PFC_PIN_GROUP(scifb2_ctrl),
+ SH_PFC_PIN_GROUP(scifb2_data_b),
+ SH_PFC_PIN_GROUP(scifb2_clk_b),
+ SH_PFC_PIN_GROUP(scifb2_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb2_data_c),
+ SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd),
+ SH_PFC_PIN_GROUP(sdhi2_wp),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi0129_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data),
+ SH_PFC_PIN_GROUP(ssi1_ctrl),
+ SH_PFC_PIN_GROUP(ssi2_data),
+ SH_PFC_PIN_GROUP(ssi2_ctrl),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5),
+ SH_PFC_PIN_GROUP(ssi5_b),
+ SH_PFC_PIN_GROUP(ssi5_c),
+ SH_PFC_PIN_GROUP(ssi6),
+ SH_PFC_PIN_GROUP(ssi6_b),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi7_b_data),
+ SH_PFC_PIN_GROUP(ssi7_c_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi78_b_ctrl),
+ SH_PFC_PIN_GROUP(ssi78_c_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi8_b_data),
+ SH_PFC_PIN_GROUP(ssi8_c_data),
+ SH_PFC_PIN_GROUP(ssi9_data),
+ SH_PFC_PIN_GROUP(ssi9_ctrl),
+ SH_PFC_PIN_GROUP(tpu0_to0),
+ SH_PFC_PIN_GROUP(tpu0_to1),
+ SH_PFC_PIN_GROUP(tpu0_to2),
+ SH_PFC_PIN_GROUP(tpu0_to3),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb0_ovc_vbus),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb2),
+ VIN_DATA_PIN_GROUP(vin0_data, 24),
+ VIN_DATA_PIN_GROUP(vin0_data, 20),
+ SH_PFC_PIN_GROUP(vin0_data18),
+ VIN_DATA_PIN_GROUP(vin0_data, 16),
+ VIN_DATA_PIN_GROUP(vin0_data, 12),
+ VIN_DATA_PIN_GROUP(vin0_data, 10),
+ VIN_DATA_PIN_GROUP(vin0_data, 8),
+ VIN_DATA_PIN_GROUP(vin0_data, 4),
+ SH_PFC_PIN_GROUP(vin0_sync),
+ SH_PFC_PIN_GROUP(vin0_field),
+ SH_PFC_PIN_GROUP(vin0_clkenb),
+ SH_PFC_PIN_GROUP(vin0_clk),
+ VIN_DATA_PIN_GROUP(vin1_data, 24),
+ VIN_DATA_PIN_GROUP(vin1_data, 20),
+ SH_PFC_PIN_GROUP(vin1_data18),
+ VIN_DATA_PIN_GROUP(vin1_data, 16),
+ VIN_DATA_PIN_GROUP(vin1_data, 12),
+ VIN_DATA_PIN_GROUP(vin1_data, 10),
+ VIN_DATA_PIN_GROUP(vin1_data, 8),
+ VIN_DATA_PIN_GROUP(vin1_data, 4),
+ SH_PFC_PIN_GROUP(vin1_sync),
+ SH_PFC_PIN_GROUP(vin1_field),
+ SH_PFC_PIN_GROUP(vin1_clkenb),
+ SH_PFC_PIN_GROUP(vin1_clk),
+ VIN_DATA_PIN_GROUP(vin2_data, 24),
+ SH_PFC_PIN_GROUP(vin2_data18),
+ VIN_DATA_PIN_GROUP(vin2_data, 16),
+ VIN_DATA_PIN_GROUP(vin2_data, 8),
+ VIN_DATA_PIN_GROUP(vin2_data, 4),
+ SH_PFC_PIN_GROUP(vin2_sync),
+ SH_PFC_PIN_GROUP(vin2_field),
+ SH_PFC_PIN_GROUP(vin2_clkenb),
+ SH_PFC_PIN_GROUP(vin2_clk),
+ SH_PFC_PIN_GROUP(vin3_data8),
+ SH_PFC_PIN_GROUP(vin3_sync),
+ SH_PFC_PIN_GROUP(vin3_field),
+ SH_PFC_PIN_GROUP(vin3_clkenb),
+ SH_PFC_PIN_GROUP(vin3_clk),
+ },
+ .automotive = {
+ SH_PFC_PIN_GROUP(mlb_3pin),
+ }
};
static const char * const audio_clk_groups[] = {
@@ -4689,63 +4696,70 @@ static const char * const vin3_groups[] = {
"vin3_clk",
};
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(audio_clk),
- SH_PFC_FUNCTION(avb),
- SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(du0),
- SH_PFC_FUNCTION(du1),
- SH_PFC_FUNCTION(du2),
- SH_PFC_FUNCTION(eth),
- SH_PFC_FUNCTION(hscif0),
- SH_PFC_FUNCTION(hscif1),
- SH_PFC_FUNCTION(i2c0),
- SH_PFC_FUNCTION(i2c1),
- SH_PFC_FUNCTION(i2c2),
- SH_PFC_FUNCTION(i2c3),
- SH_PFC_FUNCTION(iic0),
- SH_PFC_FUNCTION(iic1),
- SH_PFC_FUNCTION(iic2),
- SH_PFC_FUNCTION(iic3),
- SH_PFC_FUNCTION(intc),
- SH_PFC_FUNCTION(mlb),
- SH_PFC_FUNCTION(mmc0),
- SH_PFC_FUNCTION(mmc1),
- SH_PFC_FUNCTION(msiof0),
- SH_PFC_FUNCTION(msiof1),
- SH_PFC_FUNCTION(msiof2),
- SH_PFC_FUNCTION(msiof3),
- SH_PFC_FUNCTION(pwm0),
- SH_PFC_FUNCTION(pwm1),
- SH_PFC_FUNCTION(pwm2),
- SH_PFC_FUNCTION(pwm3),
- SH_PFC_FUNCTION(pwm4),
- SH_PFC_FUNCTION(pwm5),
- SH_PFC_FUNCTION(pwm6),
- SH_PFC_FUNCTION(qspi),
- SH_PFC_FUNCTION(scif0),
- SH_PFC_FUNCTION(scif1),
- SH_PFC_FUNCTION(scif2),
- SH_PFC_FUNCTION(scifa0),
- SH_PFC_FUNCTION(scifa1),
- SH_PFC_FUNCTION(scifa2),
- SH_PFC_FUNCTION(scifb0),
- SH_PFC_FUNCTION(scifb1),
- SH_PFC_FUNCTION(scifb2),
- SH_PFC_FUNCTION(scif_clk),
- SH_PFC_FUNCTION(sdhi0),
- SH_PFC_FUNCTION(sdhi1),
- SH_PFC_FUNCTION(sdhi2),
- SH_PFC_FUNCTION(sdhi3),
- SH_PFC_FUNCTION(ssi),
- SH_PFC_FUNCTION(tpu0),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb1),
- SH_PFC_FUNCTION(usb2),
- SH_PFC_FUNCTION(vin0),
- SH_PFC_FUNCTION(vin1),
- SH_PFC_FUNCTION(vin2),
- SH_PFC_FUNCTION(vin3),
+static const struct {
+ struct sh_pfc_function common[55];
+ struct sh_pfc_function automotive[1];
+} pinmux_functions = {
+ .common = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(du0),
+ SH_PFC_FUNCTION(du1),
+ SH_PFC_FUNCTION(du2),
+ SH_PFC_FUNCTION(eth),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(iic0),
+ SH_PFC_FUNCTION(iic1),
+ SH_PFC_FUNCTION(iic2),
+ SH_PFC_FUNCTION(iic3),
+ SH_PFC_FUNCTION(intc),
+ SH_PFC_FUNCTION(mmc0),
+ SH_PFC_FUNCTION(mmc1),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(qspi),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scifa0),
+ SH_PFC_FUNCTION(scifa1),
+ SH_PFC_FUNCTION(scifa2),
+ SH_PFC_FUNCTION(scifb0),
+ SH_PFC_FUNCTION(scifb1),
+ SH_PFC_FUNCTION(scifb2),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(tpu0),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb2),
+ SH_PFC_FUNCTION(vin0),
+ SH_PFC_FUNCTION(vin1),
+ SH_PFC_FUNCTION(vin2),
+ SH_PFC_FUNCTION(vin3),
+ },
+ .automotive = {
+ SH_PFC_FUNCTION(mlb),
+ }
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -5736,6 +5750,29 @@ static const struct sh_pfc_soc_operations r8a7790_pinmux_ops = {
.pin_to_pocctrl = r8a7790_pin_to_pocctrl,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A7742
+const struct sh_pfc_soc_info r8a7742_pinmux_info = {
+ .name = "r8a77420_pfc",
+ .ops = &r8a7790_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A7790
const struct sh_pfc_soc_info r8a7790_pinmux_info = {
.name = "r8a77900_pfc",
.ops = &r8a7790_pinmux_ops,
@@ -5745,13 +5782,16 @@ const struct sh_pfc_soc_info r8a7790_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index d20974a55d93..e2916aaa8304 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -1963,8 +1963,9 @@ static const struct pinmux_func pinmux_func_gpios[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* "name" addr register_size Field_Width */
- /* where Field_Width is 1 for single mode registers or 4 for upto 16
- mode registers and modes are described in assending order [0..16] */
+ /* where Field_Width is 1 for single mode registers or 4 for up to 16
+ * mode registers and modes are described in assending order [0..15]
+ */
{ PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index d57e633e99c0..0f013827baf9 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -304,6 +304,7 @@ struct sh_pfc_soc_info {
extern const struct sh_pfc_soc_info emev2_pinmux_info;
extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7742_pinmux_info;
extern const struct sh_pfc_soc_info r8a7743_pinmux_info;
extern const struct sh_pfc_soc_info r8a7744_pinmux_info;
extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 1ebcb957c654..63a287d5795f 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -794,13 +794,17 @@ static int sirfsoc_gpio_probe(struct device_node *np)
return -ENODEV;
sgpio = devm_kzalloc(&pdev->dev, sizeof(*sgpio), GFP_KERNEL);
- if (!sgpio)
- return -ENOMEM;
+ if (!sgpio) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
spin_lock_init(&sgpio->lock);
regs = of_iomap(np, 0);
- if (!regs)
- return -ENOMEM;
+ if (!regs) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
sgpio->chip.gc.request = sirfsoc_gpio_request;
sgpio->chip.gc.free = sirfsoc_gpio_free;
@@ -824,8 +828,10 @@ static int sirfsoc_gpio_probe(struct device_node *np)
girq->parents = devm_kcalloc(&pdev->dev, SIRFSOC_GPIO_NO_OF_BANKS,
sizeof(*girq->parents),
GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
+ if (!girq->parents) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
bank = &sgpio->sgpio_bank[i];
spin_lock_init(&bank->lock);
@@ -868,6 +874,8 @@ out_no_range:
gpiochip_remove(&sgpio->chip.gc);
out:
iounmap(regs);
+out_put_device:
+ put_device(&pdev->dev);
return err;
}
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 48cbf2a2837f..08dc1931b358 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -68,8 +68,8 @@
#define SLEEP_PULL_UP_MASK 0x1
#define SLEEP_PULL_UP_SHIFT 3
-#define PULL_UP_20K (BIT(12) | BIT(7))
-#define PULL_UP_4_7K BIT(12)
+#define PULL_UP_4_7K (BIT(12) | BIT(7))
+#define PULL_UP_20K BIT(7)
#define PULL_UP_MASK 0x21
#define PULL_UP_SHIFT 7
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 8a08c4afc6a8..9e5b61449999 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -103,8 +103,11 @@ static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc)) {
- dev_err(&pdev->dev, "Reset controller missing\n");
- return PTR_ERR(rstc);
+ ret = PTR_ERR(rstc);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_err(&pdev->dev, "Reset controller missing err=%d\n", ret);
+ return ret;
}
ret = reset_control_deassert(rstc);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 6f7b3767f453..43922ab81666 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -123,7 +123,7 @@ static int tegra_xusb_padctl_get_group_pins(struct pinctrl_dev *pinctrl,
unsigned *num_pins)
{
/*
- * For the tegra-xusb pad controller groups are synonomous
+ * For the tegra-xusb pad controller groups are synonymous
* with lanes/pins and there is always one lane/pin per group.
*/
*pins = &pinctrl->desc->pins[group].number;
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 786bf89487d6..80d00ab8c110 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -94,7 +94,7 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
if (data->aon_pin) {
/*
* It's an AON pin, whose mux register offset and bit position
- * can be caluculated from pin number. Each register covers 16
+ * can be calculated from pin number. Each register covers 16
* pins, and each pin occupies 2 bits.
*/
u16 aoffset = pindesc->number / 16 * 4;
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 03ea5129ed0c..a484ab2c91ff 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -217,6 +217,7 @@ config CROS_EC_SYSFS
config CROS_EC_TYPEC
tristate "ChromeOS EC Type-C Connector Control"
depends on MFD_CROS_EC_DEV && TYPEC
+ depends on CROS_USBPD_NOTIFY
default MFD_CROS_EC_DEV
help
If you say Y here, you get support for accessing Type C connector
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
index d13770785fb5..f37c0ef4af1f 100644
--- a/drivers/platform/chrome/chromeos_pstore.c
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -57,7 +57,8 @@ static struct ramoops_platform_data chromeos_ramoops_data = {
.record_size = 0x40000,
.console_size = 0x20000,
.ftrace_size = 0x20000,
- .dump_oops = 1,
+ .pmsg_size = 0x20000,
+ .max_reason = KMSG_DUMP_OOPS,
};
static struct platform_device chromeos_ramoops = {
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 6119eccd8a18..30c8938c27d5 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -16,7 +16,7 @@
#include "cros_ec.h"
-/**
+/*
* Request format for protocol v3
* byte 0 0xda (EC_COMMAND_PROTOCOL_3)
* byte 1-8 struct ec_host_request
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 93a71e93a2f1..ed794a7ddba9 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -48,7 +48,8 @@ static const guid_t cros_ish_guid =
struct header {
u8 channel;
u8 status;
- u8 reserved[2];
+ u8 token;
+ u8 reserved;
} __packed;
struct cros_ish_out_msg {
@@ -90,6 +91,7 @@ static DECLARE_RWSEM(init_lock);
* data exceeds this value, we log an error.
* @size: Actual size of data received from firmware.
* @error: 0 for success, negative error code for a failure in process_recv().
+ * @token: Expected token for response that we are waiting on.
* @received: Set to true on receiving a valid firmware response to host command
* @wait_queue: Wait queue for host to wait for firmware response.
*/
@@ -98,6 +100,7 @@ struct response_info {
size_t max_size;
size_t size;
int error;
+ u8 token;
bool received;
wait_queue_head_t wait_queue;
};
@@ -162,6 +165,7 @@ static int ish_send(struct ishtp_cl_data *client_data,
u8 *out_msg, size_t out_size,
u8 *in_msg, size_t in_size)
{
+ static u8 next_token;
int rv;
struct header *out_hdr = (struct header *)out_msg;
struct ishtp_cl *cros_ish_cl = client_data->cros_ish_cl;
@@ -174,8 +178,11 @@ static int ish_send(struct ishtp_cl_data *client_data,
client_data->response.data = in_msg;
client_data->response.max_size = in_size;
client_data->response.error = 0;
+ client_data->response.token = next_token++;
client_data->response.received = false;
+ out_hdr->token = client_data->response.token;
+
rv = ishtp_cl_send(cros_ish_cl, out_msg, out_size);
if (rv) {
dev_err(cl_data_to_dev(client_data),
@@ -249,17 +256,23 @@ static void process_recv(struct ishtp_cl *cros_ish_cl,
switch (in_msg->hdr.channel) {
case CROS_EC_COMMAND:
- /* Sanity check */
- if (!client_data->response.data) {
+ if (client_data->response.received) {
dev_err(dev,
- "Receiving buffer is null. Should be allocated by calling function\n");
- client_data->response.error = -EINVAL;
- goto error_wake_up;
+ "Previous firmware message not yet processed\n");
+ goto end_error;
}
- if (client_data->response.received) {
+ if (client_data->response.token != in_msg->hdr.token) {
+ dev_err_ratelimited(dev,
+ "Dropping old response token %d\n",
+ in_msg->hdr.token);
+ goto end_error;
+ }
+
+ /* Sanity check */
+ if (!client_data->response.data) {
dev_err(dev,
- "Previous firmware message not yet processed\n");
+ "Receiving buffer is null. Should be allocated by calling function\n");
client_data->response.error = -EINVAL;
goto error_wake_up;
}
@@ -289,21 +302,28 @@ static void process_recv(struct ishtp_cl *cros_ish_cl,
memcpy(client_data->response.data,
rb_in_proc->buffer.data, data_len);
+error_wake_up:
+ /* Free the buffer since we copied data or didn't need it */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
+
/* Set flag before waking up the caller */
client_data->response.received = true;
-error_wake_up:
+
/* Wake the calling thread */
wake_up_interruptible(&client_data->response.wait_queue);
break;
case CROS_MKBP_EVENT:
+ /* Free the buffer. This is just an event without data */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
/*
* Set timestamp from beginning of function since we actually
* got an incoming MKBP event
*/
client_data->ec_dev->last_event_time = timestamp;
- /* The event system doesn't send any data in buffer */
schedule_work(&client_data->work_ec_evt);
break;
@@ -313,8 +333,9 @@ error_wake_up:
}
end_error:
- /* Free the buffer */
- ishtp_cl_io_rb_recycle(rb_in_proc);
+ /* Free the buffer if we already haven't */
+ if (rb_in_proc)
+ ishtp_cl_io_rb_recycle(rb_in_proc);
up_read(&init_lock);
}
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 874269c07073..66b8d21092af 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -11,11 +11,22 @@
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#include <linux/usb/typec.h>
#define DRV_NAME "cros-ec-typec"
+/* Per port data. */
+struct cros_typec_port {
+ struct typec_port *port;
+ /* Initial capabilities for the port. */
+ struct typec_capability caps;
+ struct typec_partner *partner;
+ /* Port partner PD identity info. */
+ struct usb_pd_identity p_identity;
+};
+
/* Platform-specific data for the Chrome OS EC Type C controller. */
struct cros_typec_data {
struct device *dev;
@@ -23,9 +34,8 @@ struct cros_typec_data {
int num_ports;
unsigned int cmd_ver;
/* Array of ports, indexed by port number. */
- struct typec_port *ports[EC_USB_PD_MAX_PORTS];
- /* Initial capabilities for each port. */
- struct typec_capability *caps[EC_USB_PD_MAX_PORTS];
+ struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
+ struct notifier_block nb;
};
static int cros_typec_parse_port_props(struct typec_capability *cap,
@@ -74,14 +84,25 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
return 0;
}
+static void cros_unregister_ports(struct cros_typec_data *typec)
+{
+ int i;
+
+ for (i = 0; i < typec->num_ports; i++) {
+ if (!typec->ports[i])
+ continue;
+ typec_unregister_port(typec->ports[i]->port);
+ }
+}
+
static int cros_typec_init_ports(struct cros_typec_data *typec)
{
struct device *dev = typec->dev;
struct typec_capability *cap;
struct fwnode_handle *fwnode;
+ struct cros_typec_port *cros_port;
const char *port_prop;
int ret;
- int i;
int nports;
u32 port_num = 0;
@@ -113,22 +134,23 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
dev_dbg(dev, "Registering port %d\n", port_num);
- cap = devm_kzalloc(dev, sizeof(*cap), GFP_KERNEL);
- if (!cap) {
+ cros_port = devm_kzalloc(dev, sizeof(*cros_port), GFP_KERNEL);
+ if (!cros_port) {
ret = -ENOMEM;
goto unregister_ports;
}
- typec->caps[port_num] = cap;
+ typec->ports[port_num] = cros_port;
+ cap = &cros_port->caps;
ret = cros_typec_parse_port_props(cap, fwnode, dev);
if (ret < 0)
goto unregister_ports;
- typec->ports[port_num] = typec_register_port(dev, cap);
- if (IS_ERR(typec->ports[port_num])) {
+ cros_port->port = typec_register_port(dev, cap);
+ if (IS_ERR(cros_port->port)) {
dev_err(dev, "Failed to register port %d\n", port_num);
- ret = PTR_ERR(typec->ports[port_num]);
+ ret = PTR_ERR(cros_port->port);
goto unregister_ports;
}
}
@@ -136,8 +158,7 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
return 0;
unregister_ports:
- for (i = 0; i < typec->num_ports; i++)
- typec_unregister_port(typec->ports[i]);
+ cros_unregister_ports(typec);
return ret;
}
@@ -172,10 +193,34 @@ static int cros_typec_ec_command(struct cros_typec_data *typec,
return ret;
}
+static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
+ bool pd_en)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_partner_desc p_desc = {
+ .usb_pd = pd_en,
+ };
+ int ret = 0;
+
+ /*
+ * Fill an initial PD identity, which will then be updated with info
+ * from the EC.
+ */
+ p_desc.identity = &port->p_identity;
+
+ port->partner = typec_register_partner(port->port, &p_desc);
+ if (IS_ERR(port->partner)) {
+ ret = PTR_ERR(port->partner);
+ port->partner = NULL;
+ }
+
+ return ret;
+}
+
static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
int port_num, struct ec_response_usb_pd_control *resp)
{
- struct typec_port *port = typec->ports[port_num];
+ struct typec_port *port = typec->ports[port_num]->port;
enum typec_orientation polarity;
if (!resp->enabled)
@@ -192,8 +237,10 @@ static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
int port_num, struct ec_response_usb_pd_control_v1 *resp)
{
- struct typec_port *port = typec->ports[port_num];
+ struct typec_port *port = typec->ports[port_num]->port;
enum typec_orientation polarity;
+ bool pd_en;
+ int ret;
if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
polarity = TYPEC_ORIENTATION_NONE;
@@ -208,6 +255,25 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
TYPEC_SOURCE : TYPEC_SINK);
typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
TYPEC_SOURCE : TYPEC_SINK);
+
+ /* Register/remove partners when a connect/disconnect occurs. */
+ if (resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED) {
+ if (typec->ports[port_num]->partner)
+ return;
+
+ pd_en = resp->enabled & PD_CTRL_RESP_ENABLED_PD_CAPABLE;
+ ret = cros_typec_add_partner(typec, port_num, pd_en);
+ if (ret)
+ dev_warn(typec->dev,
+ "Failed to register partner on port: %d\n",
+ port_num);
+ } else {
+ if (!typec->ports[port_num]->partner)
+ return;
+
+ typec_unregister_partner(typec->ports[port_num]->partner);
+ typec->ports[port_num]->partner = NULL;
+ }
}
static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
@@ -272,6 +338,22 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
return 0;
}
+static int cros_ec_typec_event(struct notifier_block *nb,
+ unsigned long host_event, void *_notify)
+{
+ struct cros_typec_data *typec = container_of(nb, struct cros_typec_data,
+ nb);
+ int ret, i;
+
+ for (i = 0; i < typec->num_ports; i++) {
+ ret = cros_typec_port_update(typec, i);
+ if (ret < 0)
+ dev_warn(typec->dev, "Update failed for port: %d\n", i);
+ }
+
+ return NOTIFY_OK;
+}
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id cros_typec_acpi_id[] = {
{ "GOOG0014", 0 },
@@ -332,12 +414,15 @@ static int cros_typec_probe(struct platform_device *pdev)
goto unregister_ports;
}
+ typec->nb.notifier_call = cros_ec_typec_event;
+ ret = cros_usbpd_register_notify(&typec->nb);
+ if (ret < 0)
+ goto unregister_ports;
+
return 0;
unregister_ports:
- for (i = 0; i < typec->num_ports; i++)
- if (typec->ports[i])
- typec_unregister_port(typec->ports[i]);
+ cros_unregister_ports(typec);
return ret;
}
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index 7de3ea75ef46..d16931203d82 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -46,6 +46,7 @@ static const char * const fault_names[] = {
"---", "OCP", "fast OCP", "OVP", "Discharge"
};
+__printf(3, 4)
static int append_str(char *buf, int pos, const char *fmt, ...)
{
va_list args;
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index df5a5f6c3ec6..a812788a0bdc 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -208,7 +208,12 @@ static int send_ec_cmd(struct wilco_ec_device *ec, u8 sub_cmd, u8 *out_val)
*/
static int h1_gpio_get(void *arg, u64 *val)
{
- return send_ec_cmd(arg, SUB_CMD_H1_GPIO, (u8 *)val);
+ int ret;
+
+ ret = send_ec_cmd(arg, SUB_CMD_H1_GPIO, (u8 *)val);
+ if (ret == 0)
+ *val &= 0xFF;
+ return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_h1_gpio, h1_gpio_get, NULL, "0x%02llx\n");
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index 5e77b0dc5fd6..8ac149173c64 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -24,4 +24,10 @@ config CPU_HWMON
help
Loongson-3A/3B CPU Hwmon (temperature sensor) driver.
+config RS780E_ACPI
+ bool "Loongson RS780E ACPI Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ help
+ Loongson RS780E PCH ACPI Controller driver.
+
endif # MIPS_PLATFORM_DEVICES
diff --git a/drivers/platform/mips/Makefile b/drivers/platform/mips/Makefile
index be8146c20dc8..178149098777 100644
--- a/drivers/platform/mips/Makefile
+++ b/drivers/platform/mips/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o
+obj-$(CONFIG_RS780E_ACPI) += rs780e-acpi.o
diff --git a/drivers/platform/mips/rs780e-acpi.c b/drivers/platform/mips/rs780e-acpi.c
new file mode 100644
index 000000000000..e5a643b78ac9
--- /dev/null
+++ b/drivers/platform/mips/rs780e-acpi.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static unsigned long acpi_iobase;
+
+#define ACPI_PM_EVT_BLK (acpi_iobase + 0x00) /* 4 bytes */
+#define ACPI_PM_CNT_BLK (acpi_iobase + 0x04) /* 2 bytes */
+#define ACPI_PMA_CNT_BLK (acpi_iobase + 0x0F) /* 1 byte */
+#define ACPI_PM_TMR_BLK (acpi_iobase + 0x18) /* 4 bytes */
+#define ACPI_GPE0_BLK (acpi_iobase + 0x10) /* 8 bytes */
+#define ACPI_END (acpi_iobase + 0x80)
+
+#define PM_INDEX 0xCD6
+#define PM_DATA 0xCD7
+#define PM2_INDEX 0xCD0
+#define PM2_DATA 0xCD1
+
+static void pmio_write_index(u16 index, u8 reg, u8 value)
+{
+ outb(reg, index);
+ outb(value, index + 1);
+}
+
+static u8 pmio_read_index(u16 index, u8 reg)
+{
+ outb(reg, index);
+ return inb(index + 1);
+}
+
+void pm_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm_iowrite);
+
+u8 pm_ioread(u8 reg)
+{
+ return pmio_read_index(PM_INDEX, reg);
+}
+EXPORT_SYMBOL(pm_ioread);
+
+void pm2_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM2_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm2_iowrite);
+
+u8 pm2_ioread(u8 reg)
+{
+ return pmio_read_index(PM2_INDEX, reg);
+}
+EXPORT_SYMBOL(pm2_ioread);
+
+static void acpi_hw_clear_status(void)
+{
+ u16 value;
+
+ /* PMStatus: Clear WakeStatus/PwrBtnStatus */
+ value = inw(ACPI_PM_EVT_BLK);
+ value |= (1 << 8 | 1 << 15);
+ outw(value, ACPI_PM_EVT_BLK);
+
+ /* GPEStatus: Clear all generated events */
+ outl(inl(ACPI_GPE0_BLK), ACPI_GPE0_BLK);
+}
+
+void acpi_registers_setup(void)
+{
+ u32 value;
+
+ /* PM Status Base */
+ pm_iowrite(0x20, ACPI_PM_EVT_BLK & 0xff);
+ pm_iowrite(0x21, ACPI_PM_EVT_BLK >> 8);
+
+ /* PM Control Base */
+ pm_iowrite(0x22, ACPI_PM_CNT_BLK & 0xff);
+ pm_iowrite(0x23, ACPI_PM_CNT_BLK >> 8);
+
+ /* GPM Base */
+ pm_iowrite(0x28, ACPI_GPE0_BLK & 0xff);
+ pm_iowrite(0x29, ACPI_GPE0_BLK >> 8);
+
+ /* ACPI End */
+ pm_iowrite(0x2e, ACPI_END & 0xff);
+ pm_iowrite(0x2f, ACPI_END >> 8);
+
+ /* IO Decode: When AcpiDecodeEnable set, South-Bridge uses the contents
+ * of the PM registers at index 0x20~0x2B to decode ACPI I/O address. */
+ pm_iowrite(0x0e, 1 << 3);
+
+ /* SCI_EN set */
+ outw(1, ACPI_PM_CNT_BLK);
+
+ /* Enable to generate SCI */
+ pm_iowrite(0x10, pm_ioread(0x10) | 1);
+
+ /* GPM3/GPM9 enable */
+ value = inl(ACPI_GPE0_BLK + 4);
+ outl(value | (1 << 14) | (1 << 22), ACPI_GPE0_BLK + 4);
+
+ /* Set GPM9 as input */
+ pm_iowrite(0x8d, pm_ioread(0x8d) & (~(1 << 1)));
+
+ /* Set GPM9 as non-output */
+ pm_iowrite(0x94, pm_ioread(0x94) | (1 << 3));
+
+ /* GPM3 config ACPI trigger SCIOUT */
+ pm_iowrite(0x33, pm_ioread(0x33) & (~(3 << 4)));
+
+ /* GPM9 config ACPI trigger SCIOUT */
+ pm_iowrite(0x3d, pm_ioread(0x3d) & (~(3 << 2)));
+
+ /* GPM3 config falling edge trigger */
+ pm_iowrite(0x37, pm_ioread(0x37) & (~(1 << 6)));
+
+ /* No wait for STPGNT# in ACPI Sx state */
+ pm_iowrite(0x7c, pm_ioread(0x7c) | (1 << 6));
+
+ /* Set GPM3 pull-down enable */
+ value = pm2_ioread(0xf6);
+ value |= ((1 << 7) | (1 << 3));
+ pm2_iowrite(0xf6, value);
+
+ /* Set GPM9 pull-down enable */
+ value = pm2_ioread(0xf8);
+ value |= ((1 << 5) | (1 << 1));
+ pm2_iowrite(0xf8, value);
+}
+
+static int rs780e_acpi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -ENODEV;
+
+ /* SCI interrupt need acpi space, allocate here */
+ if (!request_region(res->start, resource_size(res), "acpi")) {
+ pr_err("RS780E-ACPI: Failed to request IO Region\n");
+ return -EBUSY;
+ }
+
+ acpi_iobase = res->start;
+
+ acpi_registers_setup();
+ acpi_hw_clear_status();
+
+ return 0;
+}
+
+static const struct of_device_id rs780e_acpi_match[] = {
+ { .compatible = "loongson,rs780e-acpi" },
+ {},
+};
+
+static struct platform_driver rs780e_acpi_driver = {
+ .probe = rs780e_acpi_probe,
+ .driver = {
+ .name = "RS780E-ACPI",
+ .of_match_table = rs780e_acpi_match,
+ },
+};
+builtin_platform_driver(rs780e_acpi_driver);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0ad7ad8cf8e1..ee93dcd065b8 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -78,6 +78,16 @@ config HUAWEI_WMI
To compile this driver as a module, choose M here: the module
will be called huawei-wmi.
+config INTEL_WMI_SBL_FW_UPDATE
+ tristate "Intel WMI Slim Bootloader firmware update signaling driver"
+ depends on ACPI_WMI
+ help
+ Say Y here if you want to be able to use the WMI interface to signal
+ Slim Bootloader to trigger update on next reboot.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-sbl-fw-update.
+
config INTEL_WMI_THUNDERBOLT
tristate "Intel WMI thunderbolt force power driver"
depends on ACPI_WMI
@@ -741,6 +751,7 @@ config THINKPAD_ACPI_HOTKEY_POLL
config INTEL_ATOMISP2_PM
tristate "Intel AtomISP2 dummy / power-management driver"
depends on PCI && IOSF_MBI && PM
+ depends on !INTEL_ATOMISP
help
Power-management driver for Intel's Image Signal Processor found on
Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
@@ -1269,7 +1280,8 @@ config INTEL_UNCORE_FREQ_CONTROL
config INTEL_BXTWC_PMIC_TMU
tristate "Intel BXT Whiskey Cove TMU Driver"
depends on REGMAP
- depends on INTEL_SOC_PMIC_BXTWC && INTEL_PMC_IPC
+ depends on MFD_INTEL_PMC_BXT
+ depends on INTEL_SOC_PMIC_BXTWC
---help---
Select this driver to use Intel BXT Whiskey Cove PMIC TMU feature.
This driver enables the alarm wakeup functionality in the TMU unit
@@ -1295,7 +1307,7 @@ config INTEL_MFLD_THERMAL
config INTEL_MID_POWER_BUTTON
tristate "power button driver for Intel MID platforms"
- depends on INTEL_SCU_IPC && INPUT
+ depends on INTEL_SCU && INPUT
help
This driver handles the power button on the Intel MID platforms.
@@ -1327,14 +1339,6 @@ config INTEL_PMC_CORE
- LTR Ignore
- MPHY/PLL gating status (Sunrisepoint PCH only)
-config INTEL_PMC_IPC
- tristate "Intel PMC IPC Driver"
- depends on ACPI && PCI
- ---help---
- This driver provides support for PMC control on some Intel platforms.
- The PMC is an ARC processor which defines IPC commands for communication
- with other entities in the CPU.
-
config INTEL_PUNIT_IPC
tristate "Intel P-Unit IPC Driver"
---help---
@@ -1342,17 +1346,39 @@ config INTEL_PUNIT_IPC
which is used to bridge the communications between kernel and P-Unit.
config INTEL_SCU_IPC
- bool "Intel SCU IPC Support"
- depends on X86_INTEL_MID
- default y
- ---help---
- IPC is used to bridge the communications between kernel and SCU on
- some embedded Intel x86 platforms. This is not needed for PC-type
- machines.
+ bool
+
+config INTEL_SCU
+ bool
+ select INTEL_SCU_IPC
+
+config INTEL_SCU_PCI
+ bool "Intel SCU PCI driver"
+ depends on PCI
+ select INTEL_SCU
+ help
+ This driver is used to bridge the communications between kernel
+ and SCU on some embedded Intel x86 platforms. It also creates
+ devices that are connected to the SoC through the SCU.
+ Platforms supported:
+ Medfield
+ Clovertrail
+ Merrifield
+ Broxton
+ Apollo Lake
+
+config INTEL_SCU_PLATFORM
+ tristate "Intel SCU platform driver"
+ depends on ACPI
+ select INTEL_SCU
+ help
+ This driver is used to bridge the communications between kernel
+ and SCU (sometimes called PMC as well). The driver currently
+ supports Intel Elkhart Lake and compatible platforms.
config INTEL_SCU_IPC_UTIL
tristate "Intel SCU IPC utility driver"
- depends on INTEL_SCU_IPC
+ depends on INTEL_SCU
---help---
The IPC Util driver provides an interface with the SCU enabling
low level access for debug work and updating the firmware. Say
@@ -1360,7 +1386,9 @@ config INTEL_SCU_IPC_UTIL
config INTEL_TELEMETRY
tristate "Intel SoC Telemetry Driver"
- depends on INTEL_PMC_IPC && INTEL_PUNIT_IPC && X86_64
+ depends on X86_64
+ depends on MFD_INTEL_PMC_BXT
+ depends on INTEL_PUNIT_IPC
---help---
This driver provides interfaces to configure and use
telemetry for INTEL SoC from APL onwards. It is also
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 53408d965874..2b85852a1a87 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
# WMI drivers
obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
obj-$(CONFIG_HUAWEI_WMI) += huawei-wmi.o
+obj-$(CONFIG_INTEL_WMI_SBL_FW_UPDATE) += intel-wmi-sbl-fw-update.o
obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
@@ -138,9 +139,10 @@ obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o intel_pmc_core_pltdrv.o
-obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
+obj-$(CONFIG_INTEL_SCU_PCI) += intel_scu_pcidrv.o
+obj-$(CONFIG_INTEL_SCU_PLATFORM) += intel_scu_pltdrv.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
intel_telemetry_pltdrv.o \
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 8cc86f4e3ac1..4df7609b4aa9 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -827,7 +827,7 @@ MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:");
-MODULE_ALIAS("dmi:*:*Acer*:pnExtensa 5420*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnExtensa*5420*:");
module_init(acerhdf_init);
module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index a666fbc2e73b..0edafe687fa9 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -640,22 +640,15 @@ static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev)
static void asus_led_exit(struct asus_laptop *asus)
{
- if (!IS_ERR_OR_NULL(asus->wled.led.dev))
- led_classdev_unregister(&asus->wled.led);
- if (!IS_ERR_OR_NULL(asus->bled.led.dev))
- led_classdev_unregister(&asus->bled.led);
- if (!IS_ERR_OR_NULL(asus->mled.led.dev))
- led_classdev_unregister(&asus->mled.led);
- if (!IS_ERR_OR_NULL(asus->tled.led.dev))
- led_classdev_unregister(&asus->tled.led);
- if (!IS_ERR_OR_NULL(asus->pled.led.dev))
- led_classdev_unregister(&asus->pled.led);
- if (!IS_ERR_OR_NULL(asus->rled.led.dev))
- led_classdev_unregister(&asus->rled.led);
- if (!IS_ERR_OR_NULL(asus->gled.led.dev))
- led_classdev_unregister(&asus->gled.led);
- if (!IS_ERR_OR_NULL(asus->kled.led.dev))
- led_classdev_unregister(&asus->kled.led);
+ led_classdev_unregister(&asus->wled.led);
+ led_classdev_unregister(&asus->bled.led);
+ led_classdev_unregister(&asus->mled.led);
+ led_classdev_unregister(&asus->tled.led);
+ led_classdev_unregister(&asus->pled.led);
+ led_classdev_unregister(&asus->rled.led);
+ led_classdev_unregister(&asus->gled.led);
+ led_classdev_unregister(&asus->kled.led);
+
if (asus->led_workqueue) {
destroy_workqueue(asus->led_workqueue);
asus->led_workqueue = NULL;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index c4404d9c1de4..8c4d00482ef0 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -472,6 +472,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
{ KE_IGNORE, 0x6E, }, /* Low Battery notification */
{ KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
+ { KE_IGNORE, 0x79, }, /* Charger type dectection notification */
{ KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
{ KE_KEY, 0x7c, { KEY_MICMUTE } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index bb7c529d7d16..877aade19497 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -57,6 +57,7 @@ MODULE_LICENSE("GPL");
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
#define NOTIFY_FNLOCK_TOGGLE 0x4e
+#define NOTIFY_KBD_DOCK_CHANGE 0x75
#define NOTIFY_KBD_BRTUP 0xc4
#define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7
@@ -116,6 +117,8 @@ struct bios_args {
u32 arg0;
u32 arg1;
u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */
+ u32 arg4;
+ u32 arg5;
} __packed;
/*
@@ -222,45 +225,6 @@ struct asus_wmi {
struct asus_wmi_driver *driver;
};
-/* Input **********************************************************************/
-
-static int asus_wmi_input_init(struct asus_wmi *asus)
-{
- int err;
-
- asus->inputdev = input_allocate_device();
- if (!asus->inputdev)
- return -ENOMEM;
-
- asus->inputdev->name = asus->driver->input_name;
- asus->inputdev->phys = asus->driver->input_phys;
- asus->inputdev->id.bustype = BUS_HOST;
- asus->inputdev->dev.parent = &asus->platform_device->dev;
- set_bit(EV_REP, asus->inputdev->evbit);
-
- err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
- if (err)
- goto err_free_dev;
-
- err = input_register_device(asus->inputdev);
- if (err)
- goto err_free_dev;
-
- return 0;
-
-err_free_dev:
- input_free_device(asus->inputdev);
- return err;
-}
-
-static void asus_wmi_input_exit(struct asus_wmi *asus)
-{
- if (asus->inputdev)
- input_unregister_device(asus->inputdev);
-
- asus->inputdev = NULL;
-}
-
/* WMI ************************************************************************/
static int asus_wmi_evaluate_method3(u32 method_id,
@@ -309,7 +273,7 @@ static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
struct acpi_buffer input;
u64 phys_addr;
u32 retval;
- u32 status = -1;
+ u32 status;
/*
* Copy to dma capable address otherwise memory corruption occurs as
@@ -381,6 +345,53 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT);
}
+/* Input **********************************************************************/
+
+static int asus_wmi_input_init(struct asus_wmi *asus)
+{
+ int err, result;
+
+ asus->inputdev = input_allocate_device();
+ if (!asus->inputdev)
+ return -ENOMEM;
+
+ asus->inputdev->name = asus->driver->input_name;
+ asus->inputdev->phys = asus->driver->input_phys;
+ asus->inputdev->id.bustype = BUS_HOST;
+ asus->inputdev->dev.parent = &asus->platform_device->dev;
+ set_bit(EV_REP, asus->inputdev->evbit);
+
+ err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_KBD_DOCK);
+ if (result >= 0) {
+ input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
+ input_report_switch(asus->inputdev, SW_TABLET_MODE, !result);
+ } else if (result != -ENODEV) {
+ pr_err("Error checking for keyboard-dock: %d\n", result);
+ }
+
+ err = input_register_device(asus->inputdev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(asus->inputdev);
+ return err;
+}
+
+static void asus_wmi_input_exit(struct asus_wmi *asus)
+{
+ if (asus->inputdev)
+ input_unregister_device(asus->inputdev);
+
+ asus->inputdev = NULL;
+}
+
/* Battery ********************************************************************/
/* The battery maximum charging percentage */
@@ -675,14 +686,11 @@ static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
- if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
- led_classdev_unregister(&asus->kbd_led);
- if (!IS_ERR_OR_NULL(asus->tpd_led.dev))
- led_classdev_unregister(&asus->tpd_led);
- if (!IS_ERR_OR_NULL(asus->wlan_led.dev))
- led_classdev_unregister(&asus->wlan_led);
- if (!IS_ERR_OR_NULL(asus->lightbar_led.dev))
- led_classdev_unregister(&asus->lightbar_led);
+ led_classdev_unregister(&asus->kbd_led);
+ led_classdev_unregister(&asus->tpd_led);
+ led_classdev_unregister(&asus->wlan_led);
+ led_classdev_unregister(&asus->lightbar_led);
+
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
}
@@ -2058,9 +2066,9 @@ static int asus_wmi_get_event_code(u32 value)
static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
{
- int orig_code;
unsigned int key_value = 1;
bool autorelease = 1;
+ int result, orig_code;
orig_code = code;
@@ -2105,6 +2113,17 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
return;
}
+ if (code == NOTIFY_KBD_DOCK_CHANGE) {
+ result = asus_wmi_get_devstate_simple(asus,
+ ASUS_WMI_DEVID_KBD_DOCK);
+ if (result >= 0) {
+ input_report_switch(asus->inputdev, SW_TABLET_MODE,
+ !result);
+ input_sync(asus->inputdev);
+ }
+ return;
+ }
+
if (asus->fan_boost_mode_available && code == NOTIFY_KBD_FBM) {
fan_boost_mode_switch_next(asus);
return;
diff --git a/drivers/platform/x86/dcdbas.c b/drivers/platform/x86/dcdbas.c
index 84f4cc839cc3..d513a59a5d47 100644
--- a/drivers/platform/x86/dcdbas.c
+++ b/drivers/platform/x86/dcdbas.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/dma-mapping.h>
+#include <linux/dmi.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/gfp.h>
@@ -34,7 +35,7 @@
#include "dcdbas.h"
#define DRIVER_NAME "dcdbas"
-#define DRIVER_VERSION "5.6.0-3.3"
+#define DRIVER_VERSION "5.6.0-3.4"
#define DRIVER_DESCRIPTION "Dell Systems Management Base Driver"
static struct platform_device *dcdbas_pdev;
@@ -45,7 +46,7 @@ static unsigned long smi_data_buf_size;
static unsigned long max_smi_data_buf_size = MAX_SMI_DATA_BUF_SIZE;
static u32 smi_data_buf_phys_addr;
static DEFINE_MUTEX(smi_data_lock);
-static u8 *eps_buffer;
+static u8 *bios_buffer;
static unsigned int host_control_action;
static unsigned int host_control_smi_type;
@@ -518,8 +519,10 @@ static inline struct smm_eps_table *check_eps_table(u8 *addr)
static int dcdbas_check_wsmt(void)
{
+ const struct dmi_device *dev = NULL;
struct acpi_table_wsmt *wsmt = NULL;
struct smm_eps_table *eps = NULL;
+ u64 bios_buf_paddr;
u64 remap_size;
u8 *addr;
@@ -532,6 +535,17 @@ static int dcdbas_check_wsmt(void)
!(wsmt->protection_flags & ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION))
return 0;
+ /*
+ * BIOS could provide the address/size of the protected buffer
+ * in an SMBIOS string or in an EPS structure in 0xFxxxx.
+ */
+
+ /* Check SMBIOS for buffer address */
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev)))
+ if (sscanf(dev->name, "30[%16llx;%8llx]", &bios_buf_paddr,
+ &remap_size) == 2)
+ goto remap;
+
/* Scan for EPS (entry point structure) */
for (addr = (u8 *)__va(0xf0000);
addr < (u8 *)__va(0x100000 - sizeof(struct smm_eps_table));
@@ -542,34 +556,37 @@ static int dcdbas_check_wsmt(void)
}
if (!eps) {
- dev_dbg(&dcdbas_pdev->dev, "found WSMT, but no EPS found\n");
+ dev_dbg(&dcdbas_pdev->dev, "found WSMT, but no firmware buffer found\n");
return -ENODEV;
}
+ bios_buf_paddr = eps->smm_comm_buff_addr;
+ remap_size = eps->num_of_4k_pages * PAGE_SIZE;
+remap:
/*
* Get physical address of buffer and map to virtual address.
* Table gives size in 4K pages, regardless of actual system page size.
*/
- if (upper_32_bits(eps->smm_comm_buff_addr + 8)) {
- dev_warn(&dcdbas_pdev->dev, "found WSMT, but EPS buffer address is above 4GB\n");
+ if (upper_32_bits(bios_buf_paddr + 8)) {
+ dev_warn(&dcdbas_pdev->dev, "found WSMT, but buffer address is above 4GB\n");
return -EINVAL;
}
/*
* Limit remap size to MAX_SMI_DATA_BUF_SIZE + 8 (since the first 8
* bytes are used for a semaphore, not the data buffer itself).
*/
- remap_size = eps->num_of_4k_pages * PAGE_SIZE;
if (remap_size > MAX_SMI_DATA_BUF_SIZE + 8)
remap_size = MAX_SMI_DATA_BUF_SIZE + 8;
- eps_buffer = memremap(eps->smm_comm_buff_addr, remap_size, MEMREMAP_WB);
- if (!eps_buffer) {
- dev_warn(&dcdbas_pdev->dev, "found WSMT, but failed to map EPS buffer\n");
+
+ bios_buffer = memremap(bios_buf_paddr, remap_size, MEMREMAP_WB);
+ if (!bios_buffer) {
+ dev_warn(&dcdbas_pdev->dev, "found WSMT, but failed to map buffer\n");
return -ENOMEM;
}
/* First 8 bytes is for a semaphore, not part of the smi_data_buf */
- smi_data_buf_phys_addr = eps->smm_comm_buff_addr + 8;
- smi_data_buf = eps_buffer + 8;
+ smi_data_buf_phys_addr = bios_buf_paddr + 8;
+ smi_data_buf = bios_buffer + 8;
smi_data_buf_size = remap_size - 8;
max_smi_data_buf_size = smi_data_buf_size;
wsmt_enabled = true;
@@ -736,8 +753,8 @@ static void __exit dcdbas_exit(void)
*/
if (dcdbas_pdev)
smi_data_buf_free();
- if (eps_buffer)
- memunmap(eps_buffer);
+ if (bios_buffer)
+ memunmap(bios_buffer);
platform_device_unregister(dcdbas_pdev_reg);
platform_driver_unregister(&dcdbas_driver);
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index f8d3e3bd1bb5..5e9c2296931c 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2204,10 +2204,13 @@ static int __init dell_init(void)
dell_laptop_register_notifier(&dell_laptop_notifier);
- micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
- ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
- if (ret < 0)
- goto fail_led;
+ if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) &&
+ dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE)) {
+ micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
+ if (ret < 0)
+ goto fail_led;
+ }
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 86e8dd6a8b33..c25a4286d766 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -310,6 +310,16 @@ static const struct key_entry dell_wmi_keymap_type_0011[] = {
/* Battery inserted */
{ KE_IGNORE, 0xfff1, { KEY_RESERVED } },
+ /*
+ * Detachable keyboard detached / undocked
+ * Note SW_TABLET_MODE is already reported through the intel_vbtn
+ * driver for this, so we ignore it.
+ */
+ { KE_IGNORE, 0xfff2, { KEY_RESERVED } },
+
+ /* Detachable keyboard attached / docked */
+ { KE_IGNORE, 0xfff3, { KEY_RESERVED } },
+
/* Keyboard backlight level changed */
{ KE_IGNORE, KBD_LED_OFF_TOKEN, { KEY_RESERVED } },
{ KE_IGNORE, KBD_LED_ON_TOKEN, { KEY_RESERVED } },
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 776868d5e458..ba08c9235f76 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -541,13 +541,11 @@ static int eeepc_led_init(struct eeepc_laptop *eeepc)
static void eeepc_led_exit(struct eeepc_laptop *eeepc)
{
- if (!IS_ERR_OR_NULL(eeepc->tpd_led.dev))
- led_classdev_unregister(&eeepc->tpd_led);
+ led_classdev_unregister(&eeepc->tpd_led);
if (eeepc->led_workqueue)
destroy_workqueue(eeepc->led_workqueue);
}
-
/*
* PCI hotplug (for wlan rfkill)
*/
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index a881b709af25..1762f335bac9 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -111,10 +111,10 @@ enum hp_wireless2_bits {
HPWMI_POWER_SOFT = 0x02,
HPWMI_POWER_BIOS = 0x04,
HPWMI_POWER_HARD = 0x08,
+ HPWMI_POWER_FW_OR_HW = HPWMI_POWER_BIOS | HPWMI_POWER_HARD,
};
-#define IS_HWBLOCKED(x) ((x & (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) \
- != (HPWMI_POWER_BIOS | HPWMI_POWER_HARD))
+#define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
struct bios_rfkill2_device_state {
@@ -461,8 +461,14 @@ static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
static ssize_t als_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- u32 tmp = simple_strtoul(buf, NULL, 10);
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
+ u32 tmp;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &tmp);
+ if (ret)
+ return ret;
+
+ ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
return ret < 0 ? ret : -EINVAL;
@@ -473,22 +479,20 @@ static ssize_t als_store(struct device *dev, struct device_attribute *attr,
static ssize_t postcode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- long unsigned int tmp2;
+ u32 tmp = 1;
+ bool clear;
int ret;
- u32 tmp;
- ret = kstrtoul(buf, 10, &tmp2);
- if (!ret && tmp2 != 1)
- ret = -EINVAL;
+ ret = kstrtobool(buf, &clear);
if (ret)
- goto out;
+ return ret;
+
+ if (clear == false)
+ return -EINVAL;
/* Clear the POST error code. It is kept until until cleared. */
- tmp = (u32) tmp2;
ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, HPWMI_WRITE, &tmp,
sizeof(tmp), sizeof(tmp));
-
-out:
if (ret)
return ret < 0 ? ret : -EINVAL;
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index cc7dd4d87cce..9ee79b74311c 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -79,6 +79,13 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 16"),
},
},
+ {
+ .ident = "HP Spectre x2 (2015)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index b5880936d785..0487b606a274 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -40,28 +40,70 @@ static const struct key_entry intel_vbtn_keymap[] = {
{ KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */
{ KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */
{ KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */
+};
+
+static const struct key_entry intel_vbtn_switchmap[] = {
{ KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
{ KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
{ KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */
{ KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */
- { KE_END },
};
+#define KEYMAP_LEN \
+ (ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1)
+
struct intel_vbtn_priv {
+ struct key_entry keymap[KEYMAP_LEN];
struct input_dev *input_dev;
+ bool has_buttons;
+ bool has_switches;
bool wakeup_mode;
};
+static void detect_tablet_mode(struct platform_device *device)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ unsigned long long vgbs;
+ acpi_status status;
+ int m;
+
+ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
+ if (ACPI_FAILURE(status))
+ return;
+
+ m = !(vgbs & TABLET_MODE_FLAG);
+ input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
+ m = (vgbs & DOCK_MODE_FLAG) ? 1 : 0;
+ input_report_switch(priv->input_dev, SW_DOCK, m);
+}
+
static int intel_vbtn_input_setup(struct platform_device *device)
{
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
- int ret;
+ int ret, keymap_len = 0;
+
+ if (priv->has_buttons) {
+ memcpy(&priv->keymap[keymap_len], intel_vbtn_keymap,
+ ARRAY_SIZE(intel_vbtn_keymap) *
+ sizeof(struct key_entry));
+ keymap_len += ARRAY_SIZE(intel_vbtn_keymap);
+ }
+
+ if (priv->has_switches) {
+ memcpy(&priv->keymap[keymap_len], intel_vbtn_switchmap,
+ ARRAY_SIZE(intel_vbtn_switchmap) *
+ sizeof(struct key_entry));
+ keymap_len += ARRAY_SIZE(intel_vbtn_switchmap);
+ }
+
+ priv->keymap[keymap_len].type = KE_END;
priv->input_dev = devm_input_allocate_device(&device->dev);
if (!priv->input_dev)
return -ENOMEM;
- ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL);
+ ret = sparse_keymap_setup(priv->input_dev, priv->keymap, NULL);
if (ret)
return ret;
@@ -69,6 +111,9 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->input_dev->name = "Intel Virtual Button driver";
priv->input_dev->id.bustype = BUS_HOST;
+ if (priv->has_switches)
+ detect_tablet_mode(device);
+
return input_register_device(priv->input_dev);
}
@@ -114,44 +159,46 @@ out_unknown:
dev_dbg(&device->dev, "unknown event index 0x%x\n", event);
}
-static void detect_tablet_mode(struct platform_device *device)
+static bool intel_vbtn_has_buttons(acpi_handle handle)
{
- const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
- struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
- acpi_handle handle = ACPI_HANDLE(&device->dev);
- struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
acpi_status status;
- int m;
- if (!(chassis_type && strcmp(chassis_type, "31") == 0))
- goto out;
+ status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ return ACPI_SUCCESS(status);
+}
- status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
- if (ACPI_FAILURE(status))
- goto out;
+static bool intel_vbtn_has_switches(acpi_handle handle)
+{
+ const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+ unsigned long long vgbs;
+ acpi_status status;
- obj = vgbs_output.pointer;
- if (!(obj && obj->type == ACPI_TYPE_INTEGER))
- goto out;
+ /*
+ * Some normal laptops have a VGBS method despite being non-convertible
+ * and their VGBS method always returns 0, causing detect_tablet_mode()
+ * to report SW_TABLET_MODE=1 to userspace, which causes issues.
+ * These laptops have a DMI chassis_type of 9 ("Laptop"), do not report
+ * switches on any devices with a DMI chassis_type of 9.
+ */
+ if (chassis_type && strcmp(chassis_type, "9") == 0)
+ return false;
- m = !(obj->integer.value & TABLET_MODE_FLAG);
- input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
- m = (obj->integer.value & DOCK_MODE_FLAG) ? 1 : 0;
- input_report_switch(priv->input_dev, SW_DOCK, m);
-out:
- kfree(vgbs_output.pointer);
+ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
+ return ACPI_SUCCESS(status);
}
static int intel_vbtn_probe(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
+ bool has_buttons, has_switches;
struct intel_vbtn_priv *priv;
acpi_status status;
int err;
- status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
- if (ACPI_FAILURE(status)) {
+ has_buttons = intel_vbtn_has_buttons(handle);
+ has_switches = intel_vbtn_has_switches(handle);
+
+ if (!has_buttons && !has_switches) {
dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
return -ENODEV;
}
@@ -161,14 +208,15 @@ static int intel_vbtn_probe(struct platform_device *device)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
+ priv->has_buttons = has_buttons;
+ priv->has_switches = has_switches;
+
err = intel_vbtn_input_setup(device);
if (err) {
pr_err("Failed to setup Intel Virtual Button\n");
return err;
}
- detect_tablet_mode(device);
-
status = acpi_install_notify_handler(handle,
ACPI_DEVICE_NOTIFY,
notify_handler,
diff --git a/drivers/platform/x86/intel-wmi-sbl-fw-update.c b/drivers/platform/x86/intel-wmi-sbl-fw-update.c
new file mode 100644
index 000000000000..ea87fa0786e8
--- /dev/null
+++ b/drivers/platform/x86/intel-wmi-sbl-fw-update.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Slim Bootloader(SBL) firmware update signaling driver
+ *
+ * Slim Bootloader is a small, open-source, non UEFI compliant, boot firmware
+ * optimized for running on certain Intel platforms.
+ *
+ * SBL exposes an ACPI-WMI device via /sys/bus/wmi/devices/<INTEL_WMI_SBL_GUID>.
+ * This driver further adds "firmware_update_request" device attribute.
+ * This attribute normally has a value of 0 and userspace can signal SBL
+ * to update firmware, on next reboot, by writing a value of 1.
+ *
+ * More details of SBL firmware update process is available at:
+ * https://slimbootloader.github.io/security/firmware-update.html
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/wmi.h>
+
+#define INTEL_WMI_SBL_GUID "44FADEB1-B204-40F2-8581-394BBDC1B651"
+
+static int get_fwu_request(struct device *dev, u32 *out)
+{
+ struct acpi_buffer result = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_query_block(INTEL_WMI_SBL_GUID, 0, &result);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "wmi_query_block failed\n");
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)result.pointer;
+ if (!obj || obj->type != ACPI_TYPE_INTEGER) {
+ dev_warn(dev, "wmi_query_block returned invalid value\n");
+ kfree(obj);
+ return -EINVAL;
+ }
+
+ *out = obj->integer.value;
+ kfree(obj);
+
+ return 0;
+}
+
+static int set_fwu_request(struct device *dev, u32 in)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ u32 value;
+
+ value = in;
+ input.length = sizeof(u32);
+ input.pointer = &value;
+
+ status = wmi_set_block(INTEL_WMI_SBL_GUID, 0, &input);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "wmi_set_block failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static ssize_t firmware_update_request_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ int ret;
+
+ ret = get_fwu_request(dev, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t firmware_update_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ /* May later be extended to support values other than 0 and 1 */
+ if (val > 1)
+ return -ERANGE;
+
+ ret = set_fwu_request(dev, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(firmware_update_request);
+
+static struct attribute *firmware_update_attrs[] = {
+ &dev_attr_firmware_update_request.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(firmware_update);
+
+static int intel_wmi_sbl_fw_update_probe(struct wmi_device *wdev,
+ const void *context)
+{
+ dev_info(&wdev->dev, "Slim Bootloader signaling driver attached\n");
+ return 0;
+}
+
+static int intel_wmi_sbl_fw_update_remove(struct wmi_device *wdev)
+{
+ dev_info(&wdev->dev, "Slim Bootloader signaling driver removed\n");
+ return 0;
+}
+
+static const struct wmi_device_id intel_wmi_sbl_id_table[] = {
+ { .guid_string = INTEL_WMI_SBL_GUID },
+ {}
+};
+MODULE_DEVICE_TABLE(wmi, intel_wmi_sbl_id_table);
+
+static struct wmi_driver intel_wmi_sbl_fw_update_driver = {
+ .driver = {
+ .name = "intel-wmi-sbl-fw-update",
+ .dev_groups = firmware_update_groups,
+ },
+ .probe = intel_wmi_sbl_fw_update_probe,
+ .remove = intel_wmi_sbl_fw_update_remove,
+ .id_table = intel_wmi_sbl_id_table,
+};
+module_wmi_driver(intel_wmi_sbl_fw_update_driver);
+
+MODULE_AUTHOR("Jithu Joseph <jithu.joseph@intel.com>");
+MODULE_DESCRIPTION("Slim Bootloader firmware update signaling driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_cht_int33fe_typec.c b/drivers/platform/x86/intel_cht_int33fe_typec.c
index 04138215956b..48638d1c56e5 100644
--- a/drivers/platform/x86/intel_cht_int33fe_typec.c
+++ b/drivers/platform/x86/intel_cht_int33fe_typec.c
@@ -6,14 +6,14 @@
*
* Some Intel Cherry Trail based device which ship with Windows 10, have
* this weird INT33FE ACPI device with a CRS table with 4 I2cSerialBusV2
- * resources, for 4 different chips attached to various i2c busses:
- * 1. The Whiskey Cove pmic, which is also described by the INT34D3 ACPI device
+ * resources, for 4 different chips attached to various I²C buses:
+ * 1. The Whiskey Cove PMIC, which is also described by the INT34D3 ACPI device
* 2. Maxim MAX17047 Fuel Gauge Controller
* 3. FUSB302 USB Type-C Controller
* 4. PI3USB30532 USB switch
*
* So this driver is a stub / pseudo driver whose only purpose is to
- * instantiate i2c-clients for chips 2 - 4, so that standard i2c drivers
+ * instantiate I²C clients for chips 2 - 4, so that standard I²C drivers
* for these chips can bind to the them.
*/
@@ -21,43 +21,32 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/pd.h>
#include "intel_cht_int33fe_common.h"
-enum {
- INT33FE_NODE_FUSB302,
- INT33FE_NODE_MAX17047,
- INT33FE_NODE_PI3USB30532,
- INT33FE_NODE_DISPLAYPORT,
- INT33FE_NODE_USB_CONNECTOR,
- INT33FE_NODE_MAX,
-};
-
/*
- * Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates
+ * Grrr, I severely dislike buggy BIOS-es. At least one BIOS enumerates
* the max17047 both through the INT33FE ACPI device (it is right there
* in the resources table) as well as through a separate MAX17047 device.
*
- * These helpers are used to work around this by checking if an i2c-client
+ * These helpers are used to work around this by checking if an I²C client
* for the max17047 has already been registered.
*/
static int cht_int33fe_check_for_max17047(struct device *dev, void *data)
{
struct i2c_client **max17047 = data;
struct acpi_device *adev;
- const char *hid;
adev = ACPI_COMPANION(dev);
if (!adev)
return 0;
- hid = acpi_device_hid(adev);
-
/* The MAX17047 ACPI node doesn't have an UID, so we don't check that */
- if (strcmp(hid, "MAX17047"))
+ if (!acpi_dev_hid_uid_match(adev, "MAX17047", NULL))
return 0;
*max17047 = to_i2c_client(dev);
@@ -66,11 +55,16 @@ static int cht_int33fe_check_for_max17047(struct device *dev, void *data)
static const char * const max17047_suppliers[] = { "bq24190-charger" };
-static const struct property_entry max17047_props[] = {
+static const struct property_entry max17047_properties[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", max17047_suppliers),
{ }
};
+static const struct software_node max17047_node = {
+ .name = "max17047",
+ .properties = max17047_properties,
+};
+
/*
* We are not using inline property here because those are constant,
* and we need to adjust this one at runtime to point to real
@@ -80,12 +74,17 @@ static struct software_node_ref_args fusb302_mux_refs[] = {
{ .node = NULL },
};
-static const struct property_entry fusb302_props[] = {
+static const struct property_entry fusb302_properties[] = {
PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"),
PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs),
{ }
};
+static const struct software_node fusb302_node = {
+ .name = "fusb302",
+ .properties = fusb302_properties,
+};
+
#define PDO_FIXED_FLAGS \
(PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)
@@ -98,31 +97,40 @@ static const u32 snk_pdo[] = {
PDO_VAR(5000, 12000, 3000),
};
-static const struct software_node nodes[];
+static const struct software_node pi3usb30532_node = {
+ .name = "pi3usb30532",
+};
+
+static const struct software_node displayport_node = {
+ .name = "displayport",
+};
-static const struct property_entry usb_connector_props[] = {
+static const struct property_entry usb_connector_properties[] = {
PROPERTY_ENTRY_STRING("data-role", "dual"),
PROPERTY_ENTRY_STRING("power-role", "dual"),
PROPERTY_ENTRY_STRING("try-power-role", "sink"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
- PROPERTY_ENTRY_REF("orientation-switch",
- &nodes[INT33FE_NODE_PI3USB30532]),
- PROPERTY_ENTRY_REF("mode-switch",
- &nodes[INT33FE_NODE_PI3USB30532]),
- PROPERTY_ENTRY_REF("displayport",
- &nodes[INT33FE_NODE_DISPLAYPORT]),
+ PROPERTY_ENTRY_REF("orientation-switch", &pi3usb30532_node),
+ PROPERTY_ENTRY_REF("mode-switch", &pi3usb30532_node),
+ PROPERTY_ENTRY_REF("displayport", &displayport_node),
{ }
};
-static const struct software_node nodes[] = {
- { "fusb302", NULL, fusb302_props },
- { "max17047", NULL, max17047_props },
- { "pi3usb30532" },
- { "displayport" },
- { "connector", &nodes[0], usb_connector_props },
- { }
+static const struct software_node usb_connector_node = {
+ .name = "connector",
+ .parent = &fusb302_node,
+ .properties = usb_connector_properties,
+};
+
+static const struct software_node *node_group[] = {
+ &fusb302_node,
+ &max17047_node,
+ &pi3usb30532_node,
+ &displayport_node,
+ &usb_connector_node,
+ NULL
};
static int cht_int33fe_setup_dp(struct cht_int33fe_data *data)
@@ -130,7 +138,7 @@ static int cht_int33fe_setup_dp(struct cht_int33fe_data *data)
struct fwnode_handle *fwnode;
struct pci_dev *pdev;
- fwnode = software_node_fwnode(&nodes[INT33FE_NODE_DISPLAYPORT]);
+ fwnode = software_node_fwnode(&displayport_node);
if (!fwnode)
return -ENODEV;
@@ -155,11 +163,10 @@ static int cht_int33fe_setup_dp(struct cht_int33fe_data *data)
static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
{
- software_node_unregister_nodes(nodes);
+ software_node_unregister_node_group(node_group);
if (fusb302_mux_refs[0].node) {
- fwnode_handle_put(
- software_node_fwnode(fusb302_mux_refs[0].node));
+ fwnode_handle_put(software_node_fwnode(fusb302_mux_refs[0].node));
fusb302_mux_refs[0].node = NULL;
}
@@ -192,7 +199,7 @@ static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
*/
fusb302_mux_refs[0].node = mux_ref_node;
- ret = software_node_register_nodes(nodes);
+ ret = software_node_register_node_group(node_group);
if (ret)
return ret;
@@ -222,16 +229,15 @@ cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
struct fwnode_handle *fwnode;
int ret;
- fwnode = software_node_fwnode(&nodes[INT33FE_NODE_MAX17047]);
+ fwnode = software_node_fwnode(&max17047_node);
if (!fwnode)
return -ENODEV;
i2c_for_each_dev(&max17047, cht_int33fe_check_for_max17047);
if (max17047) {
- /* Pre-existing i2c-client for the max17047, add device-props */
- fwnode->secondary = ERR_PTR(-ENODEV);
- max17047->dev.fwnode->secondary = fwnode;
- /* And re-probe to get the new device-props applied. */
+ /* Pre-existing I²C client for the max17047, add device properties */
+ set_secondary_fwnode(&max17047->dev, fwnode);
+ /* And re-probe to get the new device properties applied */
ret = device_reprobe(&max17047->dev);
if (ret)
dev_warn(dev, "Reprobing max17047 error: %d\n", ret);
@@ -266,7 +272,7 @@ int cht_int33fe_typec_probe(struct cht_int33fe_data *data)
* must be registered before the fusb302 is instantiated, otherwise
* it will end up with a dummy-regulator.
* Note "cht_wc_usb_typec_vbus" comes from the regulator_init_data
- * which is defined in i2c-cht-wc.c from where the bq24292i i2c-client
+ * which is defined in i2c-cht-wc.c from where the bq24292i I²C client
* gets instantiated. We use regulator_get_optional here so that we
* don't end up getting a dummy-regulator ourselves.
*/
@@ -277,7 +283,7 @@ int cht_int33fe_typec_probe(struct cht_int33fe_data *data)
}
regulator_put(regulator);
- /* The FUSB302 uses the irq at index 1 and is the only irq user */
+ /* The FUSB302 uses the IRQ at index 1 and is the only IRQ user */
fusb302_irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 1);
if (fusb302_irq < 0) {
if (fusb302_irq != -EPROBE_DEFER)
@@ -289,12 +295,12 @@ int cht_int33fe_typec_probe(struct cht_int33fe_data *data)
if (ret)
return ret;
- /* Work around BIOS bug, see comment on cht_int33fe_check_for_max17047 */
+ /* Work around BIOS bug, see comment on cht_int33fe_check_for_max17047() */
ret = cht_int33fe_register_max17047(dev, data);
if (ret)
goto out_remove_nodes;
- fwnode = software_node_fwnode(&nodes[INT33FE_NODE_FUSB302]);
+ fwnode = software_node_fwnode(&fusb302_node);
if (!fwnode) {
ret = -ENODEV;
goto out_unregister_max17047;
@@ -312,7 +318,7 @@ int cht_int33fe_typec_probe(struct cht_int33fe_data *data)
goto out_unregister_max17047;
}
- fwnode = software_node_fwnode(&nodes[INT33FE_NODE_PI3USB30532]);
+ fwnode = software_node_fwnode(&pi3usb30532_node);
if (!fwnode) {
ret = -ENODEV;
goto out_unregister_fusb302;
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 9c9f209c8a33..df434abbb66f 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -46,6 +46,7 @@ struct mid_pb_ddata {
unsigned short mirqlvl1_addr;
unsigned short pbstat_addr;
u8 pbstat_mask;
+ struct intel_scu_ipc_dev *scu;
int (*setup)(struct mid_pb_ddata *ddata);
};
@@ -55,7 +56,8 @@ static int mid_pbstat(struct mid_pb_ddata *ddata, int *value)
int ret;
u8 pbstat;
- ret = intel_scu_ipc_ioread8(ddata->pbstat_addr, &pbstat);
+ ret = intel_scu_ipc_dev_ioread8(ddata->scu, ddata->pbstat_addr,
+ &pbstat);
if (ret)
return ret;
@@ -67,14 +69,15 @@ static int mid_pbstat(struct mid_pb_ddata *ddata, int *value)
static int mid_irq_ack(struct mid_pb_ddata *ddata)
{
- return intel_scu_ipc_update_register(ddata->mirqlvl1_addr, 0, MSIC_PWRBTNM);
+ return intel_scu_ipc_dev_update(ddata->scu, ddata->mirqlvl1_addr, 0,
+ MSIC_PWRBTNM);
}
static int mrfld_setup(struct mid_pb_ddata *ddata)
{
/* Unmask the PBIRQ and MPBIRQ on Tangier */
- intel_scu_ipc_update_register(BCOVE_PBIRQ, 0, MSIC_PWRBTNM);
- intel_scu_ipc_update_register(BCOVE_PBIRQMASK, 0, MSIC_PWRBTNM);
+ intel_scu_ipc_dev_update(ddata->scu, BCOVE_PBIRQ, 0, MSIC_PWRBTNM);
+ intel_scu_ipc_dev_update(ddata->scu, BCOVE_PBIRQMASK, 0, MSIC_PWRBTNM);
return 0;
}
@@ -161,6 +164,10 @@ static int mid_pb_probe(struct platform_device *pdev)
return error;
}
+ ddata->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ if (!ddata->scu)
+ return -EPROBE_DEFER;
+
error = devm_request_threaded_irq(&pdev->dev, irq, NULL, mid_pb_isr,
IRQF_ONESHOT, DRIVER_NAME, ddata);
if (error) {
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
deleted file mode 100644
index 2433bf73f1ed..000000000000
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ /dev/null
@@ -1,949 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for the Intel PMC IPC mechanism
- *
- * (C) Copyright 2014-2015 Intel Corporation
- *
- * This driver is based on Intel SCU IPC driver(intel_scu_ipc.c) by
- * Sreedhara DS <sreedhara.ds@intel.com>
- *
- * PMC running in ARC processor communicates with other entity running in IA
- * core through IPC mechanism which in turn messaging between IA core ad PMC.
- */
-
-#include <linux/acpi.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-
-#include <asm/intel_pmc_ipc.h>
-
-#include <linux/platform_data/itco_wdt.h>
-
-/*
- * IPC registers
- * The IA write to IPC_CMD command register triggers an interrupt to the ARC,
- * The ARC handles the interrupt and services it, writing optional data to
- * the IPC1 registers, updates the IPC_STS response register with the status.
- */
-#define IPC_CMD 0x00
-#define IPC_CMD_MSI BIT(8)
-#define IPC_CMD_SIZE 16
-#define IPC_CMD_SUBCMD 12
-#define IPC_STATUS 0x04
-#define IPC_STATUS_IRQ BIT(2)
-#define IPC_STATUS_ERR BIT(1)
-#define IPC_STATUS_BUSY BIT(0)
-#define IPC_SPTR 0x08
-#define IPC_DPTR 0x0C
-#define IPC_WRITE_BUFFER 0x80
-#define IPC_READ_BUFFER 0x90
-
-/* Residency with clock rate at 19.2MHz to usecs */
-#define S0IX_RESIDENCY_IN_USECS(d, s) \
-({ \
- u64 result = 10ull * ((d) + (s)); \
- do_div(result, 192); \
- result; \
-})
-
-/*
- * 16-byte buffer for sending data associated with IPC command.
- */
-#define IPC_DATA_BUFFER_SIZE 16
-
-#define IPC_LOOP_CNT 3000000
-#define IPC_MAX_SEC 3
-
-#define IPC_TRIGGER_MODE_IRQ true
-
-/* exported resources from IFWI */
-#define PLAT_RESOURCE_IPC_INDEX 0
-#define PLAT_RESOURCE_IPC_SIZE 0x1000
-#define PLAT_RESOURCE_GCR_OFFSET 0x1000
-#define PLAT_RESOURCE_GCR_SIZE 0x1000
-#define PLAT_RESOURCE_BIOS_DATA_INDEX 1
-#define PLAT_RESOURCE_BIOS_IFACE_INDEX 2
-#define PLAT_RESOURCE_TELEM_SSRAM_INDEX 3
-#define PLAT_RESOURCE_ISP_DATA_INDEX 4
-#define PLAT_RESOURCE_ISP_IFACE_INDEX 5
-#define PLAT_RESOURCE_GTD_DATA_INDEX 6
-#define PLAT_RESOURCE_GTD_IFACE_INDEX 7
-#define PLAT_RESOURCE_ACPI_IO_INDEX 0
-
-/*
- * BIOS does not create an ACPI device for each PMC function,
- * but exports multiple resources from one ACPI device(IPC) for
- * multiple functions. This driver is responsible to create a
- * platform device and to export resources for those functions.
- */
-#define TCO_DEVICE_NAME "iTCO_wdt"
-#define SMI_EN_OFFSET 0x40
-#define SMI_EN_SIZE 4
-#define TCO_BASE_OFFSET 0x60
-#define TCO_REGS_SIZE 16
-#define PUNIT_DEVICE_NAME "intel_punit_ipc"
-#define TELEMETRY_DEVICE_NAME "intel_telemetry"
-#define TELEM_SSRAM_SIZE 240
-#define TELEM_PMC_SSRAM_OFFSET 0x1B00
-#define TELEM_PUNIT_SSRAM_OFFSET 0x1A00
-#define TCO_PMC_OFFSET 0x08
-#define TCO_PMC_SIZE 0x04
-
-/* PMC register bit definitions */
-
-/* PMC_CFG_REG bit masks */
-#define PMC_CFG_NO_REBOOT_MASK BIT_MASK(4)
-#define PMC_CFG_NO_REBOOT_EN (1 << 4)
-#define PMC_CFG_NO_REBOOT_DIS (0 << 4)
-
-static struct intel_pmc_ipc_dev {
- struct device *dev;
- void __iomem *ipc_base;
- bool irq_mode;
- int irq;
- int cmd;
- struct completion cmd_complete;
-
- /* The following PMC BARs share the same ACPI device with the IPC */
- resource_size_t acpi_io_base;
- int acpi_io_size;
- struct platform_device *tco_dev;
-
- /* gcr */
- void __iomem *gcr_mem_base;
- bool has_gcr_regs;
- spinlock_t gcr_lock;
-
- /* punit */
- struct platform_device *punit_dev;
- unsigned int punit_res_count;
-
- /* Telemetry */
- resource_size_t telem_pmc_ssram_base;
- resource_size_t telem_punit_ssram_base;
- int telem_pmc_ssram_size;
- int telem_punit_ssram_size;
- u8 telem_res_inval;
- struct platform_device *telemetry_dev;
-} ipcdev;
-
-static char *ipc_err_sources[] = {
- [IPC_ERR_NONE] =
- "no error",
- [IPC_ERR_CMD_NOT_SUPPORTED] =
- "command not supported",
- [IPC_ERR_CMD_NOT_SERVICED] =
- "command not serviced",
- [IPC_ERR_UNABLE_TO_SERVICE] =
- "unable to service",
- [IPC_ERR_CMD_INVALID] =
- "command invalid",
- [IPC_ERR_CMD_FAILED] =
- "command failed",
- [IPC_ERR_EMSECURITY] =
- "Invalid Battery",
- [IPC_ERR_UNSIGNEDKERNEL] =
- "Unsigned kernel",
-};
-
-/* Prevent concurrent calls to the PMC */
-static DEFINE_MUTEX(ipclock);
-
-static inline void ipc_send_command(u32 cmd)
-{
- ipcdev.cmd = cmd;
- if (ipcdev.irq_mode) {
- reinit_completion(&ipcdev.cmd_complete);
- cmd |= IPC_CMD_MSI;
- }
- writel(cmd, ipcdev.ipc_base + IPC_CMD);
-}
-
-static inline u32 ipc_read_status(void)
-{
- return readl(ipcdev.ipc_base + IPC_STATUS);
-}
-
-static inline void ipc_data_writel(u32 data, u32 offset)
-{
- writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
-}
-
-static inline u32 ipc_data_readl(u32 offset)
-{
- return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
-}
-
-static inline u64 gcr_data_readq(u32 offset)
-{
- return readq(ipcdev.gcr_mem_base + offset);
-}
-
-static inline int is_gcr_valid(u32 offset)
-{
- if (!ipcdev.has_gcr_regs)
- return -EACCES;
-
- if (offset > PLAT_RESOURCE_GCR_SIZE)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register
- * @offset: offset of GCR register from GCR address base
- * @data: data pointer for storing the register output
- *
- * Reads the 64-bit PMC GCR register at given offset.
- *
- * Return: negative value on error or 0 on success.
- */
-int intel_pmc_gcr_read64(u32 offset, u64 *data)
-{
- int ret;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0) {
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
- }
-
- *data = readq(ipcdev.gcr_mem_base + offset);
-
- spin_unlock(&ipcdev.gcr_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64);
-
-/**
- * intel_pmc_gcr_update() - Update PMC GCR register bits
- * @offset: offset of GCR register from GCR address base
- * @mask: bit mask for update operation
- * @val: update value
- *
- * Updates the bits of given GCR register as specified by
- * @mask and @val.
- *
- * Return: negative value on error or 0 on success.
- */
-static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
-{
- u32 new_val;
- int ret = 0;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0)
- goto gcr_ipc_unlock;
-
- new_val = readl(ipcdev.gcr_mem_base + offset);
-
- new_val &= ~mask;
- new_val |= val & mask;
-
- writel(new_val, ipcdev.gcr_mem_base + offset);
-
- new_val = readl(ipcdev.gcr_mem_base + offset);
-
- /* check whether the bit update is successful */
- if ((new_val & mask) != (val & mask)) {
- ret = -EIO;
- goto gcr_ipc_unlock;
- }
-
-gcr_ipc_unlock:
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
-}
-
-static int update_no_reboot_bit(void *priv, bool set)
-{
- u32 value = set ? PMC_CFG_NO_REBOOT_EN : PMC_CFG_NO_REBOOT_DIS;
-
- return intel_pmc_gcr_update(PMC_GCR_PMC_CFG_REG,
- PMC_CFG_NO_REBOOT_MASK, value);
-}
-
-static int intel_pmc_ipc_check_status(void)
-{
- int status;
- int ret = 0;
-
- if (ipcdev.irq_mode) {
- if (0 == wait_for_completion_timeout(
- &ipcdev.cmd_complete, IPC_MAX_SEC * HZ))
- ret = -ETIMEDOUT;
- } else {
- int loop_count = IPC_LOOP_CNT;
-
- while ((ipc_read_status() & IPC_STATUS_BUSY) && --loop_count)
- udelay(1);
- if (loop_count == 0)
- ret = -ETIMEDOUT;
- }
-
- status = ipc_read_status();
- if (ret == -ETIMEDOUT) {
- dev_err(ipcdev.dev,
- "IPC timed out, TS=0x%x, CMD=0x%x\n",
- status, ipcdev.cmd);
- return ret;
- }
-
- if (status & IPC_STATUS_ERR) {
- int i;
-
- ret = -EIO;
- i = (status >> IPC_CMD_SIZE) & 0xFF;
- if (i < ARRAY_SIZE(ipc_err_sources))
- dev_err(ipcdev.dev,
- "IPC failed: %s, STS=0x%x, CMD=0x%x\n",
- ipc_err_sources[i], status, ipcdev.cmd);
- else
- dev_err(ipcdev.dev,
- "IPC failed: unknown, STS=0x%x, CMD=0x%x\n",
- status, ipcdev.cmd);
- if ((i == IPC_ERR_UNSIGNEDKERNEL) || (i == IPC_ERR_EMSECURITY))
- ret = -EACCES;
- }
-
- return ret;
-}
-
-/**
- * intel_pmc_ipc_simple_command() - Simple IPC command
- * @cmd: IPC command code.
- * @sub: IPC command sub type.
- *
- * Send a simple IPC command to PMC when don't need to specify
- * input/output data and source/dest pointers.
- *
- * Return: an IPC error code or 0 on success.
- */
-static int intel_pmc_ipc_simple_command(int cmd, int sub)
-{
- int ret;
-
- mutex_lock(&ipclock);
- if (ipcdev.dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
- ipc_send_command(sub << IPC_CMD_SUBCMD | cmd);
- ret = intel_pmc_ipc_check_status();
- mutex_unlock(&ipclock);
-
- return ret;
-}
-
-/**
- * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
- * @cmd: IPC command code.
- * @sub: IPC command sub type.
- * @in: input data of this IPC command.
- * @inlen: input data length in bytes.
- * @out: output data of this IPC command.
- * @outlen: output data length in dwords.
- * @sptr: data writing to SPTR register.
- * @dptr: data writing to DPTR register.
- *
- * Send an IPC command to PMC with input/output data and source/dest pointers.
- *
- * Return: an IPC error code or 0 on success.
- */
-static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
- u32 outlen, u32 dptr, u32 sptr)
-{
- u32 wbuf[4] = { 0 };
- int ret;
- int i;
-
- if (inlen > IPC_DATA_BUFFER_SIZE || outlen > IPC_DATA_BUFFER_SIZE / 4)
- return -EINVAL;
-
- mutex_lock(&ipclock);
- if (ipcdev.dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
- memcpy(wbuf, in, inlen);
- writel(dptr, ipcdev.ipc_base + IPC_DPTR);
- writel(sptr, ipcdev.ipc_base + IPC_SPTR);
- /* The input data register is 32bit register and inlen is in Byte */
- for (i = 0; i < ((inlen + 3) / 4); i++)
- ipc_data_writel(wbuf[i], 4 * i);
- ipc_send_command((inlen << IPC_CMD_SIZE) |
- (sub << IPC_CMD_SUBCMD) | cmd);
- ret = intel_pmc_ipc_check_status();
- if (!ret) {
- /* out is read from 32bit register and outlen is in 32bit */
- for (i = 0; i < outlen; i++)
- *out++ = ipc_data_readl(4 * i);
- }
- mutex_unlock(&ipclock);
-
- return ret;
-}
-
-/**
- * intel_pmc_ipc_command() - IPC command with input/output data
- * @cmd: IPC command code.
- * @sub: IPC command sub type.
- * @in: input data of this IPC command.
- * @inlen: input data length in bytes.
- * @out: output data of this IPC command.
- * @outlen: output data length in dwords.
- *
- * Send an IPC command to PMC with input/output data.
- *
- * Return: an IPC error code or 0 on success.
- */
-int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
- u32 *out, u32 outlen)
-{
- return intel_pmc_ipc_raw_cmd(cmd, sub, in, inlen, out, outlen, 0, 0);
-}
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_command);
-
-static irqreturn_t ioc(int irq, void *dev_id)
-{
- int status;
-
- if (ipcdev.irq_mode) {
- status = ipc_read_status();
- writel(status | IPC_STATUS_IRQ, ipcdev.ipc_base + IPC_STATUS);
- }
- complete(&ipcdev.cmd_complete);
-
- return IRQ_HANDLED;
-}
-
-static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct intel_pmc_ipc_dev *pmc = &ipcdev;
- int ret;
-
- /* Only one PMC is supported */
- if (pmc->dev)
- return -EBUSY;
-
- pmc->irq_mode = IPC_TRIGGER_MODE_IRQ;
-
- spin_lock_init(&ipcdev.gcr_lock);
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
- ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
- if (ret)
- return ret;
-
- init_completion(&pmc->cmd_complete);
-
- pmc->ipc_base = pcim_iomap_table(pdev)[0];
-
- ret = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_pmc_ipc",
- pmc);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request irq\n");
- return ret;
- }
-
- pmc->dev = &pdev->dev;
-
- pci_set_drvdata(pdev, pmc);
-
- return 0;
-}
-
-static const struct pci_device_id ipc_pci_ids[] = {
- {PCI_VDEVICE(INTEL, 0x0a94), 0},
- {PCI_VDEVICE(INTEL, 0x1a94), 0},
- {PCI_VDEVICE(INTEL, 0x5a94), 0},
- { 0,}
-};
-MODULE_DEVICE_TABLE(pci, ipc_pci_ids);
-
-static struct pci_driver ipc_pci_driver = {
- .name = "intel_pmc_ipc",
- .id_table = ipc_pci_ids,
- .probe = ipc_pci_probe,
-};
-
-static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int subcmd;
- int cmd;
- int ret;
-
- ret = sscanf(buf, "%d %d", &cmd, &subcmd);
- if (ret != 2) {
- dev_err(dev, "Error args\n");
- return -EINVAL;
- }
-
- ret = intel_pmc_ipc_simple_command(cmd, subcmd);
- if (ret) {
- dev_err(dev, "command %d error with %d\n", cmd, ret);
- return ret;
- }
- return (ssize_t)count;
-}
-static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store);
-
-static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long val;
- int subcmd;
- int ret;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret)
- return ret;
-
- if (val)
- subcmd = 1;
- else
- subcmd = 0;
- ret = intel_pmc_ipc_simple_command(PMC_IPC_NORTHPEAK_CTRL, subcmd);
- if (ret) {
- dev_err(dev, "command north %d error with %d\n", subcmd, ret);
- return ret;
- }
- return (ssize_t)count;
-}
-static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store);
-
-static struct attribute *intel_ipc_attrs[] = {
- &dev_attr_northpeak.attr,
- &dev_attr_simplecmd.attr,
- NULL
-};
-
-static const struct attribute_group intel_ipc_group = {
- .attrs = intel_ipc_attrs,
-};
-
-static const struct attribute_group *intel_ipc_groups[] = {
- &intel_ipc_group,
- NULL
-};
-
-static struct resource punit_res_array[] = {
- /* Punit BIOS */
- {
- .flags = IORESOURCE_MEM,
- },
- {
- .flags = IORESOURCE_MEM,
- },
- /* Punit ISP */
- {
- .flags = IORESOURCE_MEM,
- },
- {
- .flags = IORESOURCE_MEM,
- },
- /* Punit GTD */
- {
- .flags = IORESOURCE_MEM,
- },
- {
- .flags = IORESOURCE_MEM,
- },
-};
-
-#define TCO_RESOURCE_ACPI_IO 0
-#define TCO_RESOURCE_SMI_EN_IO 1
-#define TCO_RESOURCE_GCR_MEM 2
-static struct resource tco_res[] = {
- /* ACPI - TCO */
- {
- .flags = IORESOURCE_IO,
- },
- /* ACPI - SMI */
- {
- .flags = IORESOURCE_IO,
- },
-};
-
-static struct itco_wdt_platform_data tco_info = {
- .name = "Apollo Lake SoC",
- .version = 5,
- .no_reboot_priv = &ipcdev,
- .update_no_reboot_bit = update_no_reboot_bit,
-};
-
-#define TELEMETRY_RESOURCE_PUNIT_SSRAM 0
-#define TELEMETRY_RESOURCE_PMC_SSRAM 1
-static struct resource telemetry_res[] = {
- /*Telemetry*/
- {
- .flags = IORESOURCE_MEM,
- },
- {
- .flags = IORESOURCE_MEM,
- },
-};
-
-static int ipc_create_punit_device(void)
-{
- struct platform_device *pdev;
- const struct platform_device_info pdevinfo = {
- .parent = ipcdev.dev,
- .name = PUNIT_DEVICE_NAME,
- .id = -1,
- .res = punit_res_array,
- .num_res = ipcdev.punit_res_count,
- };
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- ipcdev.punit_dev = pdev;
-
- return 0;
-}
-
-static int ipc_create_tco_device(void)
-{
- struct platform_device *pdev;
- struct resource *res;
- const struct platform_device_info pdevinfo = {
- .parent = ipcdev.dev,
- .name = TCO_DEVICE_NAME,
- .id = -1,
- .res = tco_res,
- .num_res = ARRAY_SIZE(tco_res),
- .data = &tco_info,
- .size_data = sizeof(tco_info),
- };
-
- res = tco_res + TCO_RESOURCE_ACPI_IO;
- res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET;
- res->end = res->start + TCO_REGS_SIZE - 1;
-
- res = tco_res + TCO_RESOURCE_SMI_EN_IO;
- res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET;
- res->end = res->start + SMI_EN_SIZE - 1;
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- ipcdev.tco_dev = pdev;
-
- return 0;
-}
-
-static int ipc_create_telemetry_device(void)
-{
- struct platform_device *pdev;
- struct resource *res;
- const struct platform_device_info pdevinfo = {
- .parent = ipcdev.dev,
- .name = TELEMETRY_DEVICE_NAME,
- .id = -1,
- .res = telemetry_res,
- .num_res = ARRAY_SIZE(telemetry_res),
- };
-
- res = telemetry_res + TELEMETRY_RESOURCE_PUNIT_SSRAM;
- res->start = ipcdev.telem_punit_ssram_base;
- res->end = res->start + ipcdev.telem_punit_ssram_size - 1;
-
- res = telemetry_res + TELEMETRY_RESOURCE_PMC_SSRAM;
- res->start = ipcdev.telem_pmc_ssram_base;
- res->end = res->start + ipcdev.telem_pmc_ssram_size - 1;
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- ipcdev.telemetry_dev = pdev;
-
- return 0;
-}
-
-static int ipc_create_pmc_devices(void)
-{
- int ret;
-
- /* If we have ACPI based watchdog use that instead */
- if (!acpi_has_watchdog()) {
- ret = ipc_create_tco_device();
- if (ret) {
- dev_err(ipcdev.dev, "Failed to add tco platform device\n");
- return ret;
- }
- }
-
- ret = ipc_create_punit_device();
- if (ret) {
- dev_err(ipcdev.dev, "Failed to add punit platform device\n");
- platform_device_unregister(ipcdev.tco_dev);
- return ret;
- }
-
- if (!ipcdev.telem_res_inval) {
- ret = ipc_create_telemetry_device();
- if (ret) {
- dev_warn(ipcdev.dev,
- "Failed to add telemetry platform device\n");
- platform_device_unregister(ipcdev.punit_dev);
- platform_device_unregister(ipcdev.tco_dev);
- }
- }
-
- return ret;
-}
-
-static int ipc_plat_get_res(struct platform_device *pdev)
-{
- struct resource *res, *punit_res = punit_res_array;
- void __iomem *addr;
- int size;
-
- res = platform_get_resource(pdev, IORESOURCE_IO,
- PLAT_RESOURCE_ACPI_IO_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get io resource\n");
- return -ENXIO;
- }
- size = resource_size(res);
- ipcdev.acpi_io_base = res->start;
- ipcdev.acpi_io_size = size;
- dev_info(&pdev->dev, "io res: %pR\n", res);
-
- ipcdev.punit_res_count = 0;
-
- /* This is index 0 to cover BIOS data register */
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_BIOS_DATA_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit BIOS data\n");
- return -ENXIO;
- }
- punit_res[ipcdev.punit_res_count++] = *res;
- dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
-
- /* This is index 1 to cover BIOS interface register */
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_BIOS_IFACE_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
- return -ENXIO;
- }
- punit_res[ipcdev.punit_res_count++] = *res;
- dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
-
- /* This is index 2 to cover ISP data register, optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_ISP_DATA_INDEX);
- if (res) {
- punit_res[ipcdev.punit_res_count++] = *res;
- dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
- }
-
- /* This is index 3 to cover ISP interface register, optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_ISP_IFACE_INDEX);
- if (res) {
- punit_res[ipcdev.punit_res_count++] = *res;
- dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
- }
-
- /* This is index 4 to cover GTD data register, optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_GTD_DATA_INDEX);
- if (res) {
- punit_res[ipcdev.punit_res_count++] = *res;
- dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
- }
-
- /* This is index 5 to cover GTD interface register, optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_GTD_IFACE_INDEX);
- if (res) {
- punit_res[ipcdev.punit_res_count++] = *res;
- dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_IPC_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get ipc resource\n");
- return -ENXIO;
- }
- size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE;
- res->end = res->start + size - 1;
-
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
-
- ipcdev.ipc_base = addr;
-
- ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET;
- dev_info(&pdev->dev, "ipc res: %pR\n", res);
-
- ipcdev.telem_res_inval = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_TELEM_SSRAM_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get telemetry ssram resource\n");
- ipcdev.telem_res_inval = 1;
- } else {
- ipcdev.telem_punit_ssram_base = res->start +
- TELEM_PUNIT_SSRAM_OFFSET;
- ipcdev.telem_punit_ssram_size = TELEM_SSRAM_SIZE;
- ipcdev.telem_pmc_ssram_base = res->start +
- TELEM_PMC_SSRAM_OFFSET;
- ipcdev.telem_pmc_ssram_size = TELEM_SSRAM_SIZE;
- dev_info(&pdev->dev, "telemetry ssram res: %pR\n", res);
- }
-
- return 0;
-}
-
-/**
- * intel_pmc_s0ix_counter_read() - Read S0ix residency.
- * @data: Out param that contains current S0ix residency count.
- *
- * Return: an error code or 0 on success.
- */
-int intel_pmc_s0ix_counter_read(u64 *data)
-{
- u64 deep, shlw;
-
- if (!ipcdev.has_gcr_regs)
- return -EACCES;
-
- deep = gcr_data_readq(PMC_GCR_TELEM_DEEP_S0IX_REG);
- shlw = gcr_data_readq(PMC_GCR_TELEM_SHLW_S0IX_REG);
-
- *data = S0IX_RESIDENCY_IN_USECS(deep, shlw);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_s0ix_counter_read);
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id ipc_acpi_ids[] = {
- { "INT34D2", 0},
- { }
-};
-MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids);
-#endif
-
-static int ipc_plat_probe(struct platform_device *pdev)
-{
- int ret;
-
- ipcdev.dev = &pdev->dev;
- ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
- init_completion(&ipcdev.cmd_complete);
- spin_lock_init(&ipcdev.gcr_lock);
-
- ipcdev.irq = platform_get_irq(pdev, 0);
- if (ipcdev.irq < 0)
- return -EINVAL;
-
- ret = ipc_plat_get_res(pdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request resource\n");
- return ret;
- }
-
- ret = ipc_create_pmc_devices();
- if (ret) {
- dev_err(&pdev->dev, "Failed to create pmc devices\n");
- return ret;
- }
-
- if (devm_request_irq(&pdev->dev, ipcdev.irq, ioc, IRQF_NO_SUSPEND,
- "intel_pmc_ipc", &ipcdev)) {
- dev_err(&pdev->dev, "Failed to request irq\n");
- ret = -EBUSY;
- goto err_irq;
- }
-
- ipcdev.has_gcr_regs = true;
-
- return 0;
-
-err_irq:
- platform_device_unregister(ipcdev.tco_dev);
- platform_device_unregister(ipcdev.punit_dev);
- platform_device_unregister(ipcdev.telemetry_dev);
-
- return ret;
-}
-
-static int ipc_plat_remove(struct platform_device *pdev)
-{
- devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
- platform_device_unregister(ipcdev.tco_dev);
- platform_device_unregister(ipcdev.punit_dev);
- platform_device_unregister(ipcdev.telemetry_dev);
- ipcdev.dev = NULL;
- return 0;
-}
-
-static struct platform_driver ipc_plat_driver = {
- .remove = ipc_plat_remove,
- .probe = ipc_plat_probe,
- .driver = {
- .name = "pmc-ipc-plat",
- .acpi_match_table = ACPI_PTR(ipc_acpi_ids),
- .dev_groups = intel_ipc_groups,
- },
-};
-
-static int __init intel_pmc_ipc_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&ipc_plat_driver);
- if (ret) {
- pr_err("Failed to register PMC ipc platform driver\n");
- return ret;
- }
- ret = pci_register_driver(&ipc_pci_driver);
- if (ret) {
- pr_err("Failed to register PMC ipc pci driver\n");
- platform_driver_unregister(&ipc_plat_driver);
- return ret;
- }
- return ret;
-}
-
-static void __exit intel_pmc_ipc_exit(void)
-{
- pci_unregister_driver(&ipc_pci_driver);
- platform_driver_unregister(&ipc_plat_driver);
-}
-
-MODULE_AUTHOR("Zha Qipeng <qipeng.zha@intel.com>");
-MODULE_DESCRIPTION("Intel PMC IPC driver");
-MODULE_LICENSE("GPL v2");
-
-/* Some modules are dependent on this, so init earlier */
-fs_initcall(intel_pmc_ipc_init);
-module_exit(intel_pmc_ipc_exit);
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 3d7da5266136..d9cf7f7602b0 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -18,11 +18,10 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/pm.h>
-#include <linux/sfi.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
-#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
@@ -55,14 +54,14 @@
#define IPC_IOC 0x100 /* IPC command register IOC bit */
struct intel_scu_ipc_dev {
- struct device *dev;
+ struct device dev;
+ struct resource mem;
+ struct module *owner;
+ int irq;
void __iomem *ipc_base;
struct completion cmd_complete;
- u8 irq_mode;
};
-static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
-
#define IPC_STATUS 0x04
#define IPC_STATUS_IRQ BIT(2)
#define IPC_STATUS_ERR BIT(1)
@@ -78,8 +77,110 @@ static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
/* Timeout in jiffies */
#define IPC_TIMEOUT (3 * HZ)
+static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
+static struct class intel_scu_ipc_class = {
+ .name = "intel_scu_ipc",
+ .owner = THIS_MODULE,
+};
+
+/**
+ * intel_scu_ipc_dev_get() - Get SCU IPC instance
+ *
+ * The recommended new API takes SCU IPC instance as parameter and this
+ * function can be called by driver to get the instance. This also makes
+ * sure the driver providing the IPC functionality cannot be unloaded
+ * while the caller has the instance.
+ *
+ * Call intel_scu_ipc_dev_put() to release the instance.
+ *
+ * Returns %NULL if SCU IPC is not currently available.
+ */
+struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
+{
+ struct intel_scu_ipc_dev *scu = NULL;
+
+ mutex_lock(&ipclock);
+ if (ipcdev) {
+ get_device(&ipcdev->dev);
+ /*
+ * Prevent the IPC provider from being unloaded while it
+ * is being used.
+ */
+ if (!try_module_get(ipcdev->owner))
+ put_device(&ipcdev->dev);
+ else
+ scu = ipcdev;
+ }
+
+ mutex_unlock(&ipclock);
+ return scu;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
+
+/**
+ * intel_scu_ipc_dev_put() - Put SCU IPC instance
+ * @scu: SCU IPC instance
+ *
+ * This function releases the SCU IPC instance retrieved from
+ * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
+ * unloaded.
+ */
+void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
+{
+ if (scu) {
+ module_put(scu->owner);
+ put_device(&scu->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
+
+struct intel_scu_ipc_devres {
+ struct intel_scu_ipc_dev *scu;
+};
+
+static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
+{
+ struct intel_scu_ipc_devres *dr = res;
+ struct intel_scu_ipc_dev *scu = dr->scu;
+
+ intel_scu_ipc_dev_put(scu);
+}
+
+/**
+ * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
+ * @dev: Device requesting the SCU IPC device
+ *
+ * The recommended new API takes SCU IPC instance as parameter and this
+ * function can be called by driver to get the instance. This also makes
+ * sure the driver providing the IPC functionality cannot be unloaded
+ * while the caller has the instance.
+ *
+ * Returns %NULL if SCU IPC is not currently available.
+ */
+struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
+{
+ struct intel_scu_ipc_devres *dr;
+ struct intel_scu_ipc_dev *scu;
+
+ dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return NULL;
+
+ scu = intel_scu_ipc_dev_get();
+ if (!scu) {
+ devres_free(dr);
+ return NULL;
+ }
+
+ dr->scu = scu;
+ devres_add(dev, dr);
+
+ return scu;
+}
+EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
+
/*
* Send ipc command
* Command Register (Write Only):
@@ -143,7 +244,6 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
usleep_range(50, 100);
} while (time_before(jiffies, end));
- dev_err(scu->dev, "IPC timed out");
return -ETIMEDOUT;
}
@@ -152,10 +252,8 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
- if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) {
- dev_err(scu->dev, "IPC timed out\n");
+ if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
return -ETIMEDOUT;
- }
status = ipc_read_status(scu);
if (status & IPC_STATUS_ERR)
@@ -166,13 +264,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
{
- return scu->irq_mode ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
+ return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
}
/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
-static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
+static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ u32 count, u32 op, u32 id)
{
- struct intel_scu_ipc_dev *scu = &ipcdev;
int nc;
u32 offset = 0;
int err;
@@ -182,8 +280,9 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
memset(cbuf, 0, sizeof(cbuf));
mutex_lock(&ipclock);
-
- if (scu->dev == NULL) {
+ if (!scu)
+ scu = ipcdev;
+ if (!scu) {
mutex_unlock(&ipclock);
return -ENODEV;
}
@@ -222,7 +321,8 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
}
/**
- * intel_scu_ipc_ioread8 - read a word via the SCU
+ * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
+ * @scu: Optional SCU IPC instance
* @addr: Register on SCU
* @data: Return pointer for read byte
*
@@ -231,14 +331,15 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
*
* This function may sleep.
*/
-int intel_scu_ipc_ioread8(u16 addr, u8 *data)
+int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
{
- return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+ return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
}
-EXPORT_SYMBOL(intel_scu_ipc_ioread8);
+EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
/**
- * intel_scu_ipc_iowrite8 - write a byte via the SCU
+ * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
+ * @scu: Optional SCU IPC instance
* @addr: Register on SCU
* @data: Byte to write
*
@@ -247,14 +348,15 @@ EXPORT_SYMBOL(intel_scu_ipc_ioread8);
*
* This function may sleep.
*/
-int intel_scu_ipc_iowrite8(u16 addr, u8 data)
+int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
{
- return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+ return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
+EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
/**
- * intel_scu_ipc_readvv - read a set of registers
+ * intel_scu_ipc_dev_readv() - Read a set of registers
+ * @scu: Optional SCU IPC instance
* @addr: Register list
* @data: Bytes to return
* @len: Length of array
@@ -266,14 +368,16 @@ EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
*
* This function may sleep.
*/
-int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
+int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ size_t len)
{
- return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+ return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
}
-EXPORT_SYMBOL(intel_scu_ipc_readv);
+EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
/**
- * intel_scu_ipc_writev - write a set of registers
+ * intel_scu_ipc_dev_writev() - Write a set of registers
+ * @scu: Optional SCU IPC instance
* @addr: Register list
* @data: Bytes to write
* @len: Length of array
@@ -285,16 +389,18 @@ EXPORT_SYMBOL(intel_scu_ipc_readv);
*
* This function may sleep.
*/
-int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
+int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ size_t len)
{
- return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+ return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
}
-EXPORT_SYMBOL(intel_scu_ipc_writev);
+EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
/**
- * intel_scu_ipc_update_register - r/m/w a register
+ * intel_scu_ipc_dev_update() - Update a register
+ * @scu: Optional SCU IPC instance
* @addr: Register address
- * @bits: Bits to update
+ * @data: Bits to update
* @mask: Mask of bits to update
*
* Read-modify-write power control unit register. The first data argument
@@ -305,15 +411,17 @@ EXPORT_SYMBOL(intel_scu_ipc_writev);
* This function may sleep. Locking between SCU accesses is handled
* for the caller.
*/
-int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
+int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
+ u8 mask)
{
- u8 data[2] = { bits, mask };
- return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
+ u8 tmp[2] = { data, mask };
+ return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
}
-EXPORT_SYMBOL(intel_scu_ipc_update_register);
+EXPORT_SYMBOL(intel_scu_ipc_dev_update);
/**
- * intel_scu_ipc_simple_command - send a simple command
+ * intel_scu_ipc_dev_simple_command() - Send a simple command
+ * @scu: Optional SCU IPC instance
* @cmd: Command
* @sub: Sub type
*
@@ -324,62 +432,89 @@ EXPORT_SYMBOL(intel_scu_ipc_update_register);
* This function may sleep. Locking for SCU accesses is handled for the
* caller.
*/
-int intel_scu_ipc_simple_command(int cmd, int sub)
+int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub)
{
- struct intel_scu_ipc_dev *scu = &ipcdev;
+ u32 cmdval;
int err;
mutex_lock(&ipclock);
- if (scu->dev == NULL) {
+ if (!scu)
+ scu = ipcdev;
+ if (!scu) {
mutex_unlock(&ipclock);
return -ENODEV;
}
- ipc_command(scu, sub << 12 | cmd);
+ scu = ipcdev;
+ cmdval = sub << 12 | cmd;
+ ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
mutex_unlock(&ipclock);
+ if (err)
+ dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
return err;
}
-EXPORT_SYMBOL(intel_scu_ipc_simple_command);
+EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
/**
- * intel_scu_ipc_command - command with data
+ * intel_scu_ipc_command_with_size() - Command with data
+ * @scu: Optional SCU IPC instance
* @cmd: Command
* @sub: Sub type
* @in: Input data
- * @inlen: Input length in dwords
+ * @inlen: Input length in bytes
+ * @size: Input size written to the IPC command register in whatever
+ * units (dword, byte) the particular firmware requires. Normally
+ * should be the same as @inlen.
* @out: Output data
- * @outlen: Output length in dwords
+ * @outlen: Output length in bytes
*
* Issue a command to the SCU which involves data transfers. Do the
* data copies under the lock but leave it for the caller to interpret.
*/
-int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
- u32 *out, int outlen)
+int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub, const void *in, size_t inlen,
+ size_t size, void *out, size_t outlen)
{
- struct intel_scu_ipc_dev *scu = &ipcdev;
+ size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
+ size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
+ u32 cmdval, inbuf[4] = {};
int i, err;
+ if (inbuflen > 4 || outbuflen > 4)
+ return -EINVAL;
+
mutex_lock(&ipclock);
- if (scu->dev == NULL) {
+ if (!scu)
+ scu = ipcdev;
+ if (!scu) {
mutex_unlock(&ipclock);
return -ENODEV;
}
- for (i = 0; i < inlen; i++)
- ipc_data_writel(scu, *in++, 4 * i);
+ memcpy(inbuf, in, inlen);
+ for (i = 0; i < inbuflen; i++)
+ ipc_data_writel(scu, inbuf[i], 4 * i);
- ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
+ cmdval = (size << 16) | (sub << 12) | cmd;
+ ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
if (!err) {
- for (i = 0; i < outlen; i++)
- *out++ = ipc_data_readl(scu, 4 * i);
+ u32 outbuf[4] = {};
+
+ for (i = 0; i < outbuflen; i++)
+ outbuf[i] = ipc_data_readl(scu, 4 * i);
+
+ memcpy(out, outbuf, outlen);
}
mutex_unlock(&ipclock);
+ if (err)
+ dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
return err;
}
-EXPORT_SYMBOL(intel_scu_ipc_command);
+EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
/*
* Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
@@ -399,61 +534,179 @@ static irqreturn_t ioc(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void intel_scu_ipc_release(struct device *dev)
+{
+ struct intel_scu_ipc_dev *scu;
+
+ scu = container_of(dev, struct intel_scu_ipc_dev, dev);
+ if (scu->irq > 0)
+ free_irq(scu->irq, scu);
+ iounmap(scu->ipc_base);
+ release_mem_region(scu->mem.start, resource_size(&scu->mem));
+ kfree(scu);
+}
+
/**
- * ipc_probe - probe an Intel SCU IPC
- * @pdev: the PCI device matching
- * @id: entry in the match table
+ * __intel_scu_ipc_register() - Register SCU IPC device
+ * @parent: Parent device
+ * @scu_data: Data used to configure SCU IPC
+ * @owner: Module registering the SCU IPC device
*
- * Enable and install an intel SCU IPC. This appears in the PCI space
- * but uses some hard coded addresses as well.
+ * Call this function to register SCU IPC mechanism under @parent.
+ * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
+ * failure. The caller may use the returned instance if it needs to do
+ * SCU IPC calls itself.
*/
-static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+struct intel_scu_ipc_dev *
+__intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner)
{
int err;
- struct intel_scu_ipc_dev *scu = &ipcdev;
+ struct intel_scu_ipc_dev *scu;
+ void __iomem *ipc_base;
- if (scu->dev) /* We support only one SCU */
- return -EBUSY;
+ mutex_lock(&ipclock);
+ /* We support only one IPC */
+ if (ipcdev) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
- err = pcim_enable_device(pdev);
- if (err)
- return err;
+ scu = kzalloc(sizeof(*scu), GFP_KERNEL);
+ if (!scu) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
- err = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
- if (err)
- return err;
+ scu->owner = owner;
+ scu->dev.parent = parent;
+ scu->dev.class = &intel_scu_ipc_class;
+ scu->dev.release = intel_scu_ipc_release;
+ dev_set_name(&scu->dev, "intel_scu_ipc");
+
+ if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
+ "intel_scu_ipc")) {
+ err = -EBUSY;
+ goto err_free;
+ }
+ ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
+ if (!ipc_base) {
+ err = -ENOMEM;
+ goto err_release;
+ }
+
+ scu->ipc_base = ipc_base;
+ scu->mem = scu_data->mem;
+ scu->irq = scu_data->irq;
init_completion(&scu->cmd_complete);
- scu->ipc_base = pcim_iomap_table(pdev)[0];
+ if (scu->irq > 0) {
+ err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
+ if (err)
+ goto err_unmap;
+ }
- err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
- scu);
- if (err)
- return err;
+ /*
+ * After this point intel_scu_ipc_release() takes care of
+ * releasing the SCU IPC resources once refcount drops to zero.
+ */
+ err = device_register(&scu->dev);
+ if (err) {
+ put_device(&scu->dev);
+ goto err_unlock;
+ }
/* Assign device at last */
- scu->dev = &pdev->dev;
+ ipcdev = scu;
+ mutex_unlock(&ipclock);
- intel_scu_devices_create();
+ return scu;
- pci_set_drvdata(pdev, scu);
- return 0;
+err_unmap:
+ iounmap(ipc_base);
+err_release:
+ release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
+err_free:
+ kfree(scu);
+err_unlock:
+ mutex_unlock(&ipclock);
+
+ return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
-static const struct pci_device_id pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x080e) },
- { PCI_VDEVICE(INTEL, 0x08ea) },
- { PCI_VDEVICE(INTEL, 0x11a0) },
- {}
-};
+/**
+ * intel_scu_ipc_unregister() - Unregister SCU IPC
+ * @scu: SCU IPC handle
+ *
+ * This unregisters the SCU IPC device and releases the acquired
+ * resources once the refcount goes to zero.
+ */
+void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
+{
+ mutex_lock(&ipclock);
+ if (!WARN_ON(!ipcdev)) {
+ ipcdev = NULL;
+ device_unregister(&scu->dev);
+ }
+ mutex_unlock(&ipclock);
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
-static struct pci_driver ipc_driver = {
- .driver = {
- .suppress_bind_attrs = true,
- },
- .name = "intel_scu_ipc",
- .id_table = pci_ids,
- .probe = ipc_probe,
-};
-builtin_pci_driver(ipc_driver);
+static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
+{
+ struct intel_scu_ipc_devres *dr = res;
+ struct intel_scu_ipc_dev *scu = dr->scu;
+
+ intel_scu_ipc_unregister(scu);
+}
+
+/**
+ * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
+ * @parent: Parent device
+ * @scu_data: Data used to configure SCU IPC
+ * @owner: Module registering the SCU IPC device
+ *
+ * Call this function to register managed SCU IPC mechanism under
+ * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
+ * case of failure. The caller may use the returned instance if it needs
+ * to do SCU IPC calls itself.
+ */
+struct intel_scu_ipc_dev *
+__devm_intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner)
+{
+ struct intel_scu_ipc_devres *dr;
+ struct intel_scu_ipc_dev *scu;
+
+ dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return NULL;
+
+ scu = __intel_scu_ipc_register(parent, scu_data, owner);
+ if (IS_ERR(scu)) {
+ devres_free(dr);
+ return scu;
+ }
+
+ dr->scu = scu;
+ devres_add(parent, dr);
+
+ return scu;
+}
+EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
+
+static int __init intel_scu_ipc_init(void)
+{
+ return class_register(&intel_scu_ipc_class);
+}
+subsys_initcall(intel_scu_ipc_init);
+
+static void __exit intel_scu_ipc_exit(void)
+{
+ class_unregister(&intel_scu_ipc_class);
+}
+module_exit(intel_scu_ipc_exit);
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index 8afe6fa06d7b..b7c10c15a3d6 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -22,6 +22,9 @@
static int major;
+struct intel_scu_ipc_dev *scu;
+static DEFINE_MUTEX(scu_lock);
+
/* IOCTL commands */
#define INTE_SCU_IPC_REGISTER_READ 0
#define INTE_SCU_IPC_REGISTER_WRITE 1
@@ -52,12 +55,12 @@ static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
switch (cmd) {
case INTE_SCU_IPC_REGISTER_READ:
- return intel_scu_ipc_readv(data->addr, data->data, count);
+ return intel_scu_ipc_dev_readv(scu, data->addr, data->data, count);
case INTE_SCU_IPC_REGISTER_WRITE:
- return intel_scu_ipc_writev(data->addr, data->data, count);
+ return intel_scu_ipc_dev_writev(scu, data->addr, data->data, count);
case INTE_SCU_IPC_REGISTER_UPDATE:
- return intel_scu_ipc_update_register(data->addr[0],
- data->data[0], data->mask);
+ return intel_scu_ipc_dev_update(scu, data->addr[0], data->data[0],
+ data->mask);
default:
return -ENOTTY;
}
@@ -91,8 +94,40 @@ static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
return 0;
}
+static int scu_ipc_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ /* Only single open at the time */
+ mutex_lock(&scu_lock);
+ if (scu) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ scu = intel_scu_ipc_dev_get();
+ if (!scu)
+ ret = -ENODEV;
+
+unlock:
+ mutex_unlock(&scu_lock);
+ return ret;
+}
+
+static int scu_ipc_release(struct inode *inode, struct file *file)
+{
+ mutex_lock(&scu_lock);
+ intel_scu_ipc_dev_put(scu);
+ scu = NULL;
+ mutex_unlock(&scu_lock);
+
+ return 0;
+}
+
static const struct file_operations scu_ipc_fops = {
.unlocked_ioctl = scu_ipc_ioctl,
+ .open = scu_ipc_open,
+ .release = scu_ipc_release,
};
static int __init ipc_module_init(void)
diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c
new file mode 100644
index 000000000000..8c5fd8240da9
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_pcidrv.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI driver for the Intel SCU.
+ *
+ * Copyright (C) 2008-2010, 2015, 2020 Intel Corporation
+ * Authors: Sreedhara DS (sreedhara.ds@intel.com)
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+static int intel_scu_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ void (*setup_fn)(void) = (void (*)(void))id->driver_data;
+ struct intel_scu_ipc_data scu_data = {};
+ struct intel_scu_ipc_dev *scu;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ scu_data.mem = pdev->resource[0];
+ scu_data.irq = pdev->irq;
+
+ scu = intel_scu_ipc_register(&pdev->dev, &scu_data);
+ if (IS_ERR(scu))
+ return PTR_ERR(scu);
+
+ if (setup_fn)
+ setup_fn();
+ return 0;
+}
+
+static void intel_mid_scu_setup(void)
+{
+ intel_scu_devices_create();
+}
+
+static const struct pci_device_id pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x080e),
+ .driver_data = (kernel_ulong_t)intel_mid_scu_setup },
+ { PCI_VDEVICE(INTEL, 0x08ea),
+ .driver_data = (kernel_ulong_t)intel_mid_scu_setup },
+ { PCI_VDEVICE(INTEL, 0x0a94) },
+ { PCI_VDEVICE(INTEL, 0x11a0),
+ .driver_data = (kernel_ulong_t)intel_mid_scu_setup },
+ { PCI_VDEVICE(INTEL, 0x1a94) },
+ { PCI_VDEVICE(INTEL, 0x5a94) },
+ {}
+};
+
+static struct pci_driver intel_scu_pci_driver = {
+ .driver = {
+ .suppress_bind_attrs = true,
+ },
+ .name = "intel_scu",
+ .id_table = pci_ids,
+ .probe = intel_scu_pci_probe,
+};
+
+builtin_pci_driver(intel_scu_pci_driver);
diff --git a/drivers/platform/x86/intel_scu_pltdrv.c b/drivers/platform/x86/intel_scu_pltdrv.c
new file mode 100644
index 000000000000..56ec6ae4c824
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_pltdrv.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform driver for the Intel SCU.
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Divya Sasidharan <divya.s.sasidharan@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel_scu_ipc.h>
+
+static int intel_scu_platform_probe(struct platform_device *pdev)
+{
+ struct intel_scu_ipc_data scu_data = {};
+ struct intel_scu_ipc_dev *scu;
+ const struct resource *res;
+
+ scu_data.irq = platform_get_irq_optional(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ scu_data.mem = *res;
+
+ scu = devm_intel_scu_ipc_register(&pdev->dev, &scu_data);
+ if (IS_ERR(scu))
+ return PTR_ERR(scu);
+
+ platform_set_drvdata(pdev, scu);
+ return 0;
+}
+
+static const struct acpi_device_id intel_scu_acpi_ids[] = {
+ { "INTC1026" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, intel_scu_acpi_ids);
+
+static struct platform_driver intel_scu_platform_driver = {
+ .probe = intel_scu_platform_probe,
+ .driver = {
+ .name = "intel_scu",
+ .acpi_match_table = intel_scu_acpi_ids,
+ },
+};
+module_platform_driver(intel_scu_platform_driver);
+
+MODULE_AUTHOR("Divya Sasidharan <divya.s.sasidharan@intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com");
+MODULE_AUTHOR("Rajmohan Mani <rajmohan.mani@intel.com>");
+MODULE_DESCRIPTION("Intel SCU platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
index de4169d0796b..d84e2174cbde 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
@@ -21,13 +21,12 @@
#define PUNIT_MAILBOX_BUSY_BIT 31
/*
- * Commands has variable amount of processing time. Most of the commands will
- * be done in 0-3 tries, but some takes up to 50.
- * The real processing time was observed as 25us for the most of the commands
- * at 2GHz. It is possible to optimize this count taking samples on customer
- * systems.
+ * The average time to complete some commands is about 40us. The current
+ * count is enough to satisfy 40us. But when the firmware is very busy, this
+ * causes timeout occasionally. So increase to deal with some worst case
+ * scenarios. Most of the command still complete in few us.
*/
-#define OS_MAILBOX_RETRY_COUNT 50
+#define OS_MAILBOX_RETRY_COUNT 100
struct isst_if_device {
struct mutex mutex;
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index d4040bb222b4..fdf55b5d6948 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -353,21 +353,16 @@ int telemetry_clear_pltdata(void)
EXPORT_SYMBOL_GPL(telemetry_clear_pltdata);
/**
- * telemetry_pltconfig_valid() - Checkif platform config is valid
+ * telemetry_get_pltdata() - Return telemetry platform config
*
- * Usage by other than telemetry module is invalid
- *
- * Return: 0 success, < 0 for failure
+ * May be used by other telemetry modules to get platform specific
+ * configuration.
*/
-int telemetry_pltconfig_valid(void)
+struct telemetry_plt_config *telemetry_get_pltdata(void)
{
- if (telm_core_conf.plt_config)
- return 0;
-
- else
- return -EINVAL;
+ return telm_core_conf.plt_config;
}
-EXPORT_SYMBOL_GPL(telemetry_pltconfig_valid);
+EXPORT_SYMBOL_GPL(telemetry_get_pltdata);
static inline int telemetry_get_pssevtname(enum telemetry_unit telem_unit,
const char **name, int len)
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index 8a53d3b485b3..1d4d0fbfd63c 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -15,6 +15,7 @@
*/
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/mfd/intel_pmc_bxt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
@@ -22,7 +23,6 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
-#include <asm/intel_pmc_ipc.h>
#include <asm/intel_telemetry.h>
#define DRIVER_NAME "telemetry_soc_debugfs"
@@ -647,10 +647,11 @@ DEFINE_SHOW_ATTRIBUTE(telem_soc_states);
static int telem_s0ix_res_get(void *data, u64 *val)
{
+ struct telemetry_plt_config *plt_config = telemetry_get_pltdata();
u64 s0ix_total_res;
int ret;
- ret = intel_pmc_s0ix_counter_read(&s0ix_total_res);
+ ret = intel_pmc_s0ix_counter_read(plt_config->pmc, &s0ix_total_res);
if (ret) {
pr_err("Failed to read S0ix residency");
return ret;
@@ -837,12 +838,15 @@ static int pm_suspend_exit_cb(void)
*/
if (suspend_shlw_ctr_exit == suspend_shlw_ctr_temp &&
suspend_deep_ctr_exit == suspend_deep_ctr_temp) {
- ret = intel_pmc_gcr_read64(PMC_GCR_TELEM_SHLW_S0IX_REG,
+ struct telemetry_plt_config *plt_config = telemetry_get_pltdata();
+ struct intel_pmc_dev *pmc = plt_config->pmc;
+
+ ret = intel_pmc_gcr_read64(pmc, PMC_GCR_TELEM_SHLW_S0IX_REG,
&suspend_shlw_res_exit);
if (ret < 0)
goto out;
- ret = intel_pmc_gcr_read64(PMC_GCR_TELEM_DEEP_S0IX_REG,
+ ret = intel_pmc_gcr_read64(pmc, PMC_GCR_TELEM_DEEP_S0IX_REG,
&suspend_deep_res_exit);
if (ret < 0)
goto out;
@@ -910,8 +914,7 @@ static int __init telemetry_debugfs_init(void)
debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data;
- err = telemetry_pltconfig_valid();
- if (err < 0) {
+ if (!telemetry_get_pltdata()) {
pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 987a24e3344e..405dea87de6b 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -15,7 +15,6 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
-#include <asm/intel_pmc_ipc.h>
#include <asm/intel_punit_ipc.h>
#include <asm/intel_telemetry.h>
@@ -35,6 +34,7 @@
#define TELEM_SSRAM_STARTTIME_OFFSET 8
#define TELEM_SSRAM_EVTLOG_OFFSET 16
+#define IOSS_TELEM 0xeb
#define IOSS_TELEM_EVENT_READ 0x0
#define IOSS_TELEM_EVENT_WRITE 0x1
#define IOSS_TELEM_INFO_READ 0x2
@@ -42,9 +42,6 @@
#define IOSS_TELEM_TRACE_CTL_WRITE 0x6
#define IOSS_TELEM_EVENT_CTL_READ 0x7
#define IOSS_TELEM_EVENT_CTL_WRITE 0x8
-#define IOSS_TELEM_EVT_CTRL_WRITE_SIZE 0x4
-#define IOSS_TELEM_READ_WORD 0x1
-#define IOSS_TELEM_WRITE_FOURBYTES 0x4
#define IOSS_TELEM_EVT_WRITE_SIZE 0x3
#define TELEM_INFO_SRAMEVTS_MASK 0xFF00
@@ -250,17 +247,14 @@ static int telemetry_check_evtid(enum telemetry_unit telem_unit,
static inline int telemetry_plt_config_ioss_event(u32 evt_id, int index)
{
u32 write_buf;
- int ret;
write_buf = evt_id | TELEM_EVENT_ENABLE;
write_buf <<= BITS_PER_BYTE;
write_buf |= index;
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
- IOSS_TELEM_EVENT_WRITE, (u8 *)&write_buf,
- IOSS_TELEM_EVT_WRITE_SIZE, NULL, 0);
-
- return ret;
+ return intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_WRITE, &write_buf,
+ IOSS_TELEM_EVT_WRITE_SIZE, NULL, 0);
}
static inline int telemetry_plt_config_pss_event(u32 evt_id, int index)
@@ -278,6 +272,7 @@ static inline int telemetry_plt_config_pss_event(u32 evt_id, int index)
static int telemetry_setup_iossevtconfig(struct telemetry_evtconfig evtconfig,
enum telemetry_action action)
{
+ struct intel_scu_ipc_dev *scu = telm_conf->scu;
u8 num_ioss_evts, ioss_period;
int ret, index, idx;
u32 *ioss_evtmap;
@@ -288,9 +283,9 @@ static int telemetry_setup_iossevtconfig(struct telemetry_evtconfig evtconfig,
ioss_evtmap = evtconfig.evtmap;
/* Get telemetry EVENT CTL */
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
IOSS_TELEM_EVENT_CTL_READ, NULL, 0,
- &telem_ctrl, IOSS_TELEM_READ_WORD);
+ &telem_ctrl, sizeof(telem_ctrl));
if (ret) {
pr_err("IOSS TELEM_CTRL Read Failed\n");
return ret;
@@ -299,11 +294,9 @@ static int telemetry_setup_iossevtconfig(struct telemetry_evtconfig evtconfig,
/* Disable Telemetry */
TELEM_DISABLE(telem_ctrl);
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
- IOSS_TELEM_EVENT_CTL_WRITE,
- (u8 *)&telem_ctrl,
- IOSS_TELEM_EVT_CTRL_WRITE_SIZE,
- NULL, 0);
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE, &telem_ctrl,
+ sizeof(telem_ctrl), NULL, 0);
if (ret) {
pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
return ret;
@@ -315,10 +308,9 @@ static int telemetry_setup_iossevtconfig(struct telemetry_evtconfig evtconfig,
/* Clear All Events */
TELEM_CLEAR_EVENTS(telem_ctrl);
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
IOSS_TELEM_EVENT_CTL_WRITE,
- (u8 *)&telem_ctrl,
- IOSS_TELEM_EVT_CTRL_WRITE_SIZE,
+ &telem_ctrl, sizeof(telem_ctrl),
NULL, 0);
if (ret) {
pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
@@ -344,10 +336,9 @@ static int telemetry_setup_iossevtconfig(struct telemetry_evtconfig evtconfig,
/* Clear All Events */
TELEM_CLEAR_EVENTS(telem_ctrl);
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
IOSS_TELEM_EVENT_CTL_WRITE,
- (u8 *)&telem_ctrl,
- IOSS_TELEM_EVT_CTRL_WRITE_SIZE,
+ &telem_ctrl, sizeof(telem_ctrl),
NULL, 0);
if (ret) {
pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
@@ -396,10 +387,9 @@ static int telemetry_setup_iossevtconfig(struct telemetry_evtconfig evtconfig,
TELEM_ENABLE_PERIODIC(telem_ctrl);
telem_ctrl |= ioss_period;
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
IOSS_TELEM_EVENT_CTL_WRITE,
- (u8 *)&telem_ctrl,
- IOSS_TELEM_EVT_CTRL_WRITE_SIZE, NULL, 0);
+ &telem_ctrl, sizeof(telem_ctrl), NULL, 0);
if (ret) {
pr_err("IOSS TELEM_CTRL Event Enable Write Failed\n");
return ret;
@@ -586,8 +576,9 @@ static int telemetry_setup(struct platform_device *pdev)
u32 read_buf, events, event_regs;
int ret;
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY, IOSS_TELEM_INFO_READ,
- NULL, 0, &read_buf, IOSS_TELEM_READ_WORD);
+ ret = intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_INFO_READ, NULL, 0,
+ &read_buf, sizeof(read_buf));
if (ret) {
dev_err(&pdev->dev, "IOSS TELEM_INFO Read Failed\n");
return ret;
@@ -681,6 +672,8 @@ static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
mutex_lock(&(telm_conf->telem_lock));
if (ioss_period) {
+ struct intel_scu_ipc_dev *scu = telm_conf->scu;
+
if (TELEM_SAMPLE_PERIOD_INVALID(ioss_period)) {
pr_err("IOSS Sampling Period Out of Range\n");
ret = -EINVAL;
@@ -688,9 +681,9 @@ static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
}
/* Get telemetry EVENT CTL */
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
IOSS_TELEM_EVENT_CTL_READ, NULL, 0,
- &telem_ctrl, IOSS_TELEM_READ_WORD);
+ &telem_ctrl, sizeof(telem_ctrl));
if (ret) {
pr_err("IOSS TELEM_CTRL Read Failed\n");
goto out;
@@ -699,11 +692,10 @@ static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
/* Disable Telemetry */
TELEM_DISABLE(telem_ctrl);
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
- IOSS_TELEM_EVENT_CTL_WRITE,
- (u8 *)&telem_ctrl,
- IOSS_TELEM_EVT_CTRL_WRITE_SIZE,
- NULL, 0);
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
if (ret) {
pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
goto out;
@@ -715,11 +707,10 @@ static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
TELEM_ENABLE_PERIODIC(telem_ctrl);
telem_ctrl |= ioss_period;
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
- IOSS_TELEM_EVENT_CTL_WRITE,
- (u8 *)&telem_ctrl,
- IOSS_TELEM_EVT_CTRL_WRITE_SIZE,
- NULL, 0);
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
if (ret) {
pr_err("IOSS TELEM_CTRL Event Enable Write Failed\n");
goto out;
@@ -1014,9 +1005,9 @@ static int telemetry_plt_get_trace_verbosity(enum telemetry_unit telem_unit,
break;
case TELEM_IOSS:
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
- IOSS_TELEM_TRACE_CTL_READ, NULL, 0, &temp,
- IOSS_TELEM_READ_WORD);
+ ret = intel_scu_ipc_dev_command(telm_conf->scu,
+ IOSS_TELEM, IOSS_TELEM_TRACE_CTL_READ,
+ NULL, 0, &temp, sizeof(temp));
if (ret) {
pr_err("IOSS TRACE_CTL Read Failed\n");
goto out;
@@ -1068,9 +1059,9 @@ static int telemetry_plt_set_trace_verbosity(enum telemetry_unit telem_unit,
break;
case TELEM_IOSS:
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
- IOSS_TELEM_TRACE_CTL_READ, NULL, 0, &temp,
- IOSS_TELEM_READ_WORD);
+ ret = intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_TRACE_CTL_READ,
+ NULL, 0, &temp, sizeof(temp));
if (ret) {
pr_err("IOSS TRACE_CTL Read Failed\n");
goto out;
@@ -1079,9 +1070,9 @@ static int telemetry_plt_set_trace_verbosity(enum telemetry_unit telem_unit,
TELEM_CLEAR_VERBOSITY_BITS(temp);
TELEM_SET_VERBOSITY_BITS(temp, verbosity);
- ret = intel_pmc_ipc_command(PMC_IPC_PMC_TELEMTRY,
- IOSS_TELEM_TRACE_CTL_WRITE, (u8 *)&temp,
- IOSS_TELEM_WRITE_FOURBYTES, NULL, 0);
+ ret = intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_TRACE_CTL_WRITE,
+ &temp, sizeof(temp), NULL, 0);
if (ret) {
pr_err("IOSS TRACE_CTL Verbosity Set Failed\n");
goto out;
@@ -1124,6 +1115,8 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
telm_conf = (struct telemetry_plt_config *)id->driver_data;
+ telm_conf->pmc = dev_get_drvdata(pdev->dev.parent);
+
mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
@@ -1136,6 +1129,12 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
telm_conf->ioss_config.regmap = mem;
+ telm_conf->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ if (!telm_conf->scu) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
+
mutex_init(&telm_conf->telem_lock);
mutex_init(&telm_conf->telem_trace_lock);
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index c0bb1f864dfe..dd900a76d8de 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -67,9 +67,7 @@ static u32 inited;
#define INIT_INPUT_WMI_0 0x01
#define INIT_INPUT_WMI_2 0x02
#define INIT_INPUT_ACPI 0x04
-#define INIT_TPAD_LED 0x08
-#define INIT_KBD_LED 0x10
-#define INIT_SPARSE_KEYMAP 0x80
+#define INIT_SPARSE_KEYMAP 0x80
static const struct key_entry wmi_keymap[] = {
{KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
@@ -626,11 +624,9 @@ static int acpi_add(struct acpi_device *device)
if (ret)
goto out_platform_device;
- if (!led_classdev_register(&pf_device->dev, &kbd_backlight))
- inited |= INIT_KBD_LED;
-
- if (!led_classdev_register(&pf_device->dev, &tpad_led))
- inited |= INIT_TPAD_LED;
+ /* LEDs are optional */
+ led_classdev_register(&pf_device->dev, &kbd_backlight);
+ led_classdev_register(&pf_device->dev, &tpad_led);
wmi_input_setup();
@@ -646,11 +642,9 @@ out_platform_registered:
static int acpi_remove(struct acpi_device *device)
{
sysfs_remove_group(&pf_device->dev.kobj, &dev_attribute_group);
- if (inited & INIT_KBD_LED)
- led_classdev_unregister(&kbd_backlight);
- if (inited & INIT_TPAD_LED)
- led_classdev_unregister(&tpad_led);
+ led_classdev_unregister(&tpad_led);
+ led_classdev_unregister(&kbd_backlight);
wmi_input_destroy();
platform_device_unregister(pf_device);
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 23e40aa2176e..d5cec6e35bb8 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1138,8 +1138,7 @@ static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
static void samsung_leds_exit(struct samsung_laptop *samsung)
{
- if (!IS_ERR_OR_NULL(samsung->kbd_led.dev))
- led_classdev_unregister(&samsung->kbd_led);
+ led_classdev_unregister(&samsung->kbd_led);
if (samsung->led_workqueue)
destroy_workqueue(samsung->led_workqueue);
}
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 51309f7ceede..e5a1b5533408 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -757,33 +757,6 @@ static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
return result;
}
-static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
- int *result)
-{
- union acpi_object *object = NULL;
- if (value) {
- u64 v = *value;
- object = __call_snc_method(handle, name, &v);
- } else
- object = __call_snc_method(handle, name, NULL);
-
- if (!object)
- return -EINVAL;
-
- if (object->type != ACPI_TYPE_INTEGER) {
- pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
- ACPI_TYPE_INTEGER, object->type);
- kfree(object);
- return -EINVAL;
- }
-
- if (result)
- *result = object->integer.value;
-
- kfree(object);
- return 0;
-}
-
#define MIN(a, b) (a > b ? b : a)
static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
void *buffer, size_t buflen)
@@ -795,17 +768,20 @@ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
if (!object)
return -EINVAL;
- if (object->type == ACPI_TYPE_BUFFER) {
+ if (!buffer) {
+ /* do nothing */
+ } else if (object->type == ACPI_TYPE_BUFFER) {
len = MIN(buflen, object->buffer.length);
+ memset(buffer, 0, buflen);
memcpy(buffer, object->buffer.pointer, len);
} else if (object->type == ACPI_TYPE_INTEGER) {
len = MIN(buflen, sizeof(object->integer.value));
+ memset(buffer, 0, buflen);
memcpy(buffer, &object->integer.value, len);
} else {
- pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
- ACPI_TYPE_BUFFER, object->type);
+ pr_warn("Unexpected acpi_object: 0x%x\n", object->type);
ret = -EINVAL;
}
@@ -813,6 +789,23 @@ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
return ret;
}
+static int sony_nc_int_call(acpi_handle handle, char *name, int *value, int
+ *result)
+{
+ int ret;
+
+ if (value) {
+ u64 v = *value;
+
+ ret = sony_nc_buffer_call(handle, name, &v, result,
+ sizeof(*result));
+ } else {
+ ret = sony_nc_buffer_call(handle, name, NULL, result,
+ sizeof(*result));
+ }
+ return ret;
+}
+
struct sony_nc_handles {
u16 cap[0x10];
struct device_attribute devattr;
@@ -2295,7 +2288,12 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd)
#ifdef CONFIG_PM_SLEEP
static void sony_nc_thermal_resume(void)
{
- unsigned int status = sony_nc_thermal_mode_get();
+ int status;
+
+ if (!th_handle)
+ return;
+
+ status = sony_nc_thermal_mode_get();
if (status != th_handle->mode)
sony_nc_thermal_mode_set(th_handle->mode);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0f704484ae1d..ff7f0a4f2475 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -318,6 +318,7 @@ static struct {
u32 uwb:1;
u32 fan_ctrl_status_undef:1;
u32 second_fan:1;
+ u32 second_fan_ctl:1;
u32 beep_needs_two_args:1;
u32 mixer_no_level_control:1;
u32 battery_force_primary:1;
@@ -884,20 +885,11 @@ static ssize_t dispatch_proc_write(struct file *file,
if (!ibm || !ibm->write)
return -EINVAL;
- if (count > PAGE_SIZE - 2)
- return -EINVAL;
-
- kernbuf = kmalloc(count + 2, GFP_KERNEL);
- if (!kernbuf)
- return -ENOMEM;
- if (copy_from_user(kernbuf, userbuf, count)) {
- kfree(kernbuf);
- return -EFAULT;
- }
+ kernbuf = strndup_user(userbuf, PAGE_SIZE);
+ if (IS_ERR(kernbuf))
+ return PTR_ERR(kernbuf);
- kernbuf[count] = 0;
- strcat(kernbuf, ",");
ret = ibm->write(kernbuf);
if (ret == 0)
ret = count;
@@ -915,23 +907,6 @@ static const struct proc_ops dispatch_proc_ops = {
.proc_write = dispatch_proc_write,
};
-static char *next_cmd(char **cmds)
-{
- char *start = *cmds;
- char *end;
-
- while ((end = strchr(start, ',')) && end == start)
- start = end + 1;
-
- if (!end)
- return NULL;
-
- *end = 0;
- *cmds = end + 1;
- return start;
-}
-
-
/****************************************************************************
****************************************************************************
*
@@ -1422,7 +1397,7 @@ static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
if (id >= TPACPI_RFK_SW_MAX)
return -ENODEV;
- while ((cmd = next_cmd(&buf))) {
+ while ((cmd = strsep(&buf, ","))) {
if (strlencmp(cmd, "enable") == 0)
status = TPACPI_RFK_RADIO_ON;
else if (strlencmp(cmd, "disable") == 0)
@@ -4305,7 +4280,7 @@ static int hotkey_write(char *buf)
mask = hotkey_user_mask;
res = 0;
- while ((cmd = next_cmd(&buf))) {
+ while ((cmd = strsep(&buf, ","))) {
if (strlencmp(cmd, "enable") == 0) {
hotkey_enabledisable_warn(1);
} else if (strlencmp(cmd, "disable") == 0) {
@@ -5232,7 +5207,7 @@ static int video_write(char *buf)
enable = 0;
disable = 0;
- while ((cmd = next_cmd(&buf))) {
+ while ((cmd = strsep(&buf, ","))) {
if (strlencmp(cmd, "lcd_enable") == 0) {
enable |= TP_ACPI_VIDEO_S_LCD;
} else if (strlencmp(cmd, "lcd_disable") == 0) {
@@ -5433,8 +5408,7 @@ static int __init kbdlight_init(struct ibm_init_struct *iibm)
static void kbdlight_exit(void)
{
- if (tp_features.kbdlight)
- led_classdev_unregister(&tpacpi_led_kbdlight.led_classdev);
+ led_classdev_unregister(&tpacpi_led_kbdlight.led_classdev);
}
static int kbdlight_set_level_and_update(int level)
@@ -5472,23 +5446,18 @@ static int kbdlight_read(struct seq_file *m)
static int kbdlight_write(char *buf)
{
char *cmd;
- int level = -1;
+ int res, level = -EINVAL;
if (!tp_features.kbdlight)
return -ENODEV;
- while ((cmd = next_cmd(&buf))) {
- if (strlencmp(cmd, "0") == 0)
- level = 0;
- else if (strlencmp(cmd, "1") == 0)
- level = 1;
- else if (strlencmp(cmd, "2") == 0)
- level = 2;
- else
- return -EINVAL;
+ while ((cmd = strsep(&buf, ","))) {
+ res = kstrtoint(cmd, 10, &level);
+ if (res < 0)
+ return res;
}
- if (level == -1)
+ if (level >= 3 || level < 0)
return -EINVAL;
return kbdlight_set_level_and_update(level);
@@ -5657,7 +5626,7 @@ static int light_write(char *buf)
if (!tp_features.light)
return -ENODEV;
- while ((cmd = next_cmd(&buf))) {
+ while ((cmd = strsep(&buf, ","))) {
if (strlencmp(cmd, "on") == 0) {
newstatus = 1;
} else if (strlencmp(cmd, "off") == 0) {
@@ -5742,7 +5711,7 @@ static int cmos_write(char *buf)
char *cmd;
int cmos_cmd, res;
- while ((cmd = next_cmd(&buf))) {
+ while ((cmd = strsep(&buf, ","))) {
if (sscanf(cmd, "%u", &cmos_cmd) == 1 &&
cmos_cmd >= 0 && cmos_cmd <= 21) {
/* cmos_cmd set */
@@ -5948,20 +5917,14 @@ static void led_exit(void)
{
unsigned int i;
- for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
- if (tpacpi_leds[i].led_classdev.name)
- led_classdev_unregister(&tpacpi_leds[i].led_classdev);
- }
+ for (i = 0; i < TPACPI_LED_NUMLEDS; i++)
+ led_classdev_unregister(&tpacpi_leds[i].led_classdev);
kfree(tpacpi_leds);
}
static int __init tpacpi_init_led(unsigned int led)
{
- int rc;
-
- tpacpi_leds[led].led = led;
-
/* LEDs with no name don't get registered */
if (!tpacpi_led_names[led])
return 0;
@@ -5969,17 +5932,12 @@ static int __init tpacpi_init_led(unsigned int led)
tpacpi_leds[led].led_classdev.brightness_set_blocking = &led_sysfs_set;
tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set;
if (led_supported == TPACPI_LED_570)
- tpacpi_leds[led].led_classdev.brightness_get =
- &led_sysfs_get;
+ tpacpi_leds[led].led_classdev.brightness_get = &led_sysfs_get;
tpacpi_leds[led].led_classdev.name = tpacpi_led_names[led];
+ tpacpi_leds[led].led = led;
- rc = led_classdev_register(&tpacpi_pdev->dev,
- &tpacpi_leds[led].led_classdev);
- if (rc < 0)
- tpacpi_leds[led].led_classdev.name = NULL;
-
- return rc;
+ return led_classdev_register(&tpacpi_pdev->dev, &tpacpi_leds[led].led_classdev);
}
static const struct tpacpi_quirk led_useful_qtable[] __initconst = {
@@ -6089,8 +6047,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
tpacpi_leds[i].led = -1;
- if (!tpacpi_is_led_restricted(i) &&
- test_bit(i, &useful_leds)) {
+ if (!tpacpi_is_led_restricted(i) && test_bit(i, &useful_leds)) {
rc = tpacpi_init_led(i);
if (rc < 0) {
led_exit();
@@ -6143,12 +6100,14 @@ static int led_write(char *buf)
if (!led_supported)
return -ENODEV;
- while ((cmd = next_cmd(&buf))) {
+ while ((cmd = strsep(&buf, ","))) {
if (sscanf(cmd, "%d", &led) != 1)
return -EINVAL;
- if (led < 0 || led > (TPACPI_LED_NUMLEDS - 1) ||
- tpacpi_leds[led].led < 0)
+ if (led < 0 || led > (TPACPI_LED_NUMLEDS - 1))
+ return -ENODEV;
+
+ if (tpacpi_leds[led].led < 0)
return -ENODEV;
if (strstr(cmd, "off")) {
@@ -6228,7 +6187,7 @@ static int beep_write(char *buf)
if (!beep_handle)
return -ENODEV;
- while ((cmd = next_cmd(&buf))) {
+ while ((cmd = strsep(&buf, ","))) {
if (sscanf(cmd, "%u", &beep_cmd) == 1 &&
beep_cmd >= 0 && beep_cmd <= 17) {
/* beep_cmd set */
@@ -7116,7 +7075,7 @@ static int brightness_write(char *buf)
if (level < 0)
return level;
- while ((cmd = next_cmd(&buf))) {
+ while ((cmd = strsep(&buf, ","))) {
if (strlencmp(cmd, "up") == 0) {
if (level < bright_maxlvl)
level++;
@@ -7868,7 +7827,7 @@ static int volume_write(char *buf)
new_level = s & TP_EC_AUDIO_LVL_MSK;
new_mute = s & TP_EC_AUDIO_MUTESW_MSK;
- while ((cmd = next_cmd(&buf))) {
+ while ((cmd = strsep(&buf, ","))) {
if (!tp_features.mixer_no_level_control) {
if (strlencmp(cmd, "up") == 0) {
if (new_mute)
@@ -8324,11 +8283,19 @@ static int fan_set_level(int level)
switch (fan_control_access_mode) {
case TPACPI_FAN_WR_ACPI_SFAN:
- if (level >= 0 && level <= 7) {
- if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level))
- return -EIO;
- } else
+ if ((level < 0) || (level > 7))
return -EINVAL;
+
+ if (tp_features.second_fan_ctl) {
+ if (!fan_select_fan2() ||
+ !acpi_evalf(sfan_handle, NULL, NULL, "vd", level)) {
+ pr_warn("Couldn't set 2nd fan level, disabling support\n");
+ tp_features.second_fan_ctl = 0;
+ }
+ fan_select_fan1();
+ }
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level))
+ return -EIO;
break;
case TPACPI_FAN_WR_ACPI_FANS:
@@ -8345,6 +8312,15 @@ static int fan_set_level(int level)
else if (level & TP_EC_FAN_AUTO)
level |= 4; /* safety min speed 4 */
+ if (tp_features.second_fan_ctl) {
+ if (!fan_select_fan2() ||
+ !acpi_ec_write(fan_status_offset, level)) {
+ pr_warn("Couldn't set 2nd fan level, disabling support\n");
+ tp_features.second_fan_ctl = 0;
+ }
+ fan_select_fan1();
+
+ }
if (!acpi_ec_write(fan_status_offset, level))
return -EIO;
else
@@ -8763,6 +8739,7 @@ static const struct attribute_group fan_attr_group = {
#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
+#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8771,6 +8748,13 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('7', '0', TPACPI_FAN_Q1),
TPACPI_QEC_LNV('7', 'M', TPACPI_FAN_2FAN),
TPACPI_Q_LNV('N', '1', TPACPI_FAN_2FAN),
+ TPACPI_Q_LNV3('N', '1', 'D', TPACPI_FAN_2CTL), /* P70 */
+ TPACPI_Q_LNV3('N', '1', 'E', TPACPI_FAN_2CTL), /* P50 */
+ TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */
+ TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */
+ TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */
+ TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
+ TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -8788,6 +8772,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
fan_watchdog_maxinterval = 0;
tp_features.fan_ctrl_status_undef = 0;
tp_features.second_fan = 0;
+ tp_features.second_fan_ctl = 0;
fan_control_desired_level = 7;
if (tpacpi_is_ibm()) {
@@ -8812,8 +8797,12 @@ static int __init fan_init(struct ibm_init_struct *iibm)
fan_quirk1_setup();
if (quirks & TPACPI_FAN_2FAN) {
tp_features.second_fan = 1;
- dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
- "secondary fan support enabled\n");
+ pr_info("secondary fan support enabled\n");
+ }
+ if (quirks & TPACPI_FAN_2CTL) {
+ tp_features.second_fan = 1;
+ tp_features.second_fan_ctl = 1;
+ pr_info("secondary fan control enabled\n");
}
} else {
pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
@@ -9148,7 +9137,7 @@ static int fan_write(char *buf)
char *cmd;
int rc = 0;
- while (!rc && (cmd = next_cmd(&buf))) {
+ while (!rc && (cmd = strsep(&buf, ","))) {
if (!((fan_control_commands & TPACPI_FAN_CMD_LEVEL) &&
fan_write_cmd_level(cmd, &rc)) &&
!((fan_control_commands & TPACPI_FAN_CMD_ENABLE) &&
@@ -9271,10 +9260,8 @@ static int mute_led_init(struct ibm_init_struct *iibm)
mute_led_cdev[i].brightness = ledtrig_audio_get(i);
err = led_classdev_register(&tpacpi_pdev->dev, &mute_led_cdev[i]);
if (err < 0) {
- while (i--) {
- if (led_tables[i].state >= 0)
- led_classdev_unregister(&mute_led_cdev[i]);
- }
+ while (i--)
+ led_classdev_unregister(&mute_led_cdev[i]);
return err;
}
}
@@ -9286,10 +9273,8 @@ static void mute_led_exit(void)
int i;
for (i = 0; i < TPACPI_LED_MAX; i++) {
- if (led_tables[i].state >= 0) {
- led_classdev_unregister(&mute_led_cdev[i]);
- tpacpi_led_set(i, false);
- }
+ led_classdev_unregister(&mute_led_cdev[i]);
+ tpacpi_led_set(i, false);
}
}
@@ -9786,19 +9771,18 @@ static int lcdshadow_read(struct seq_file *m)
static int lcdshadow_write(char *buf)
{
char *cmd;
- int state = -1;
+ int res, state = -EINVAL;
if (lcdshadow_state < 0)
return -ENODEV;
- while ((cmd = next_cmd(&buf))) {
- if (strlencmp(cmd, "0") == 0)
- state = 0;
- else if (strlencmp(cmd, "1") == 0)
- state = 1;
+ while ((cmd = strsep(&buf, ","))) {
+ res = kstrtoint(cmd, 10, &state);
+ if (res < 0)
+ return res;
}
- if (state == -1)
+ if (state >= 2 || state < 0)
return -EINVAL;
return lcdshadow_set(state);
@@ -10314,10 +10298,9 @@ static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
continue;
if (strcmp(ibm->name, kp->name) == 0 && ibm->write) {
- if (strlen(val) > sizeof(ibms_init[i].param) - 2)
+ if (strlen(val) > sizeof(ibms_init[i].param) - 1)
return -ENOSPC;
strcpy(ibms_init[i].param, val);
- strcat(ibms_init[i].param, ",");
return 0;
}
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 808944546739..1ddab5a6dead 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -205,9 +205,6 @@ struct toshiba_acpi_dev {
unsigned int special_functions;
bool kbd_event_generated;
- bool kbd_led_registered;
- bool illumination_led_registered;
- bool eco_led_registered;
bool killswitch;
};
@@ -458,7 +455,6 @@ static void toshiba_illumination_available(struct toshiba_acpi_dev *dev)
acpi_status status;
dev->illumination_supported = 0;
- dev->illumination_led_registered = false;
if (!sci_open(dev))
return;
@@ -528,7 +524,6 @@ static void toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
acpi_status status;
dev->kbd_illum_supported = 0;
- dev->kbd_led_registered = false;
dev->kbd_event_generated = false;
if (!sci_open(dev))
@@ -673,7 +668,6 @@ static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
acpi_status status;
dev->eco_supported = 0;
- dev->eco_led_registered = false;
status = tci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
@@ -2993,14 +2987,9 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
backlight_device_unregister(dev->backlight_dev);
- if (dev->illumination_led_registered)
- led_classdev_unregister(&dev->led_dev);
-
- if (dev->kbd_led_registered)
- led_classdev_unregister(&dev->kbd_led);
-
- if (dev->eco_led_registered)
- led_classdev_unregister(&dev->eco_led);
+ led_classdev_unregister(&dev->led_dev);
+ led_classdev_unregister(&dev->kbd_led);
+ led_classdev_unregister(&dev->eco_led);
if (dev->wwan_rfk) {
rfkill_unregister(dev->wwan_rfk);
@@ -3092,8 +3081,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
dev->led_dev.max_brightness = 1;
dev->led_dev.brightness_set = toshiba_illumination_set;
dev->led_dev.brightness_get = toshiba_illumination_get;
- if (!led_classdev_register(&acpi_dev->dev, &dev->led_dev))
- dev->illumination_led_registered = true;
+ led_classdev_register(&acpi_dev->dev, &dev->led_dev);
}
toshiba_eco_mode_available(dev);
@@ -3102,8 +3090,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
dev->eco_led.max_brightness = 1;
dev->eco_led.brightness_set = toshiba_eco_mode_set_status;
dev->eco_led.brightness_get = toshiba_eco_mode_get_status;
- if (!led_classdev_register(&dev->acpi_dev->dev, &dev->eco_led))
- dev->eco_led_registered = true;
+ led_classdev_register(&dev->acpi_dev->dev, &dev->eco_led);
}
toshiba_kbd_illum_available(dev);
@@ -3119,8 +3106,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
dev->kbd_led.max_brightness = 1;
dev->kbd_led.brightness_set = toshiba_kbd_backlight_set;
dev->kbd_led.brightness_get = toshiba_kbd_backlight_get;
- if (!led_classdev_register(&dev->acpi_dev->dev, &dev->kbd_led))
- dev->kbd_led_registered = true;
+ led_classdev_register(&dev->acpi_dev->dev, &dev->kbd_led);
}
ret = toshiba_touchpad_get(dev, &dummy);
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 6ec8923dec1a..5c223015ee71 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -373,6 +373,23 @@ static const struct ts_dmi_data jumper_ezpad_mini3_data = {
.properties = jumper_ezpad_mini3_props,
};
+static const struct property_entry mpman_mpwin895cl_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 3),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 9),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-mpman-mpwin895cl.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data mpman_mpwin895cl_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = mpman_mpwin895cl_props,
+};
+
static const struct property_entry myria_my8307_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -448,6 +465,24 @@ static const struct ts_dmi_data onda_v820w_32g_data = {
.properties = onda_v820w_32g_props,
};
+static const struct property_entry onda_v891_v5_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl3676-onda-v891-v5.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data onda_v891_v5_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = onda_v891_v5_props,
+};
+
static const struct property_entry onda_v891w_v1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 46),
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
@@ -588,6 +623,22 @@ static const struct ts_dmi_data schneider_sct101ctm_data = {
.properties = schneider_sct101ctm_props,
};
+static const struct property_entry techbite_arc_11_6_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1981),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-techbite-arc-11-6.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data techbite_arc_11_6_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = techbite_arc_11_6_props,
+};
+
static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -662,11 +713,14 @@ static const struct ts_dmi_data trekstor_primetab_t13b_data = {
};
static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 0),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1890),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -691,6 +745,20 @@ static const struct ts_dmi_data trekstor_surftab_wintron70_data = {
.properties = trekstor_surftab_wintron70_props,
};
+static const struct property_entry vinga_twizzle_j116_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1920),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-vinga-twizzle_j116.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data vinga_twizzle_j116_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = vinga_twizzle_j116_props,
+};
+
/* NOTE: Please keep this table sorted alphabetically */
const struct dmi_system_id touchscreen_dmi_table[] = {
{
@@ -909,6 +977,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* MP Man MPWIN895CL */
+ .driver_data = (void *)&mpman_mpwin895cl_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MPWIN8900CL"),
+ },
+ },
+ {
/* Myria MY8307 */
.driver_data = (void *)&myria_my8307_data,
.matches = {
@@ -941,6 +1017,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* ONDA V891 v5 */
+ .driver_data = (void *)&onda_v891_v5_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ONDA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ONDA Tablet"),
+ DMI_MATCH(DMI_BIOS_VERSION, "ONDA.D869CJABNRBA06"),
+ },
+ },
+ {
/* ONDA V891w revision P891WBEBV1B00 aka v1 */
.driver_data = (void *)&onda_v891w_v1_data,
.matches = {
@@ -1030,6 +1115,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Techbite Arc 11.6 */
+ .driver_data = (void *)&techbite_arc_11_6_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "mPTech"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "techBite Arc 11.6"),
+ DMI_MATCH(DMI_BOARD_NAME, "G8316_272B"),
+ },
+ },
+ {
/* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data,
.matches = {
@@ -1107,6 +1201,21 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Trekstor Yourbook C11B (same touchscreen as the Primebook C11) */
+ .driver_data = (void *)&trekstor_primebook_c11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "YOURBOOK C11B"),
+ },
+ },
+ {
+ /* Vinga Twizzle J116 */
+ .driver_data = (void *)&vinga_twizzle_j116_data,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "VINGA Twizzle J116"),
+ },
+ },
+ {
/* Yours Y8W81, same case and touchscreen as Chuwi Vi8 */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
@@ -1114,7 +1223,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Y8W81"),
},
},
- { },
+ { }
};
static const struct ts_dmi_data *ts_data;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 941739db7199..d88f388a3450 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -111,11 +111,11 @@ static struct platform_driver acpi_wmi_driver = {
static bool find_guid(const char *guid_string, struct wmi_block **out)
{
- uuid_le guid_input;
+ guid_t guid_input;
struct wmi_block *wblock;
struct guid_block *block;
- if (uuid_le_to_bin(guid_string, &guid_input))
+ if (guid_parse(guid_string, &guid_input))
return false;
list_for_each_entry(wblock, &wmi_block_list, list) {
@@ -134,7 +134,7 @@ static const void *find_guid_context(struct wmi_block *wblock,
struct wmi_driver *wdriver)
{
const struct wmi_device_id *id;
- uuid_le guid_input;
+ guid_t guid_input;
if (wblock == NULL || wdriver == NULL)
return NULL;
@@ -143,7 +143,7 @@ static const void *find_guid_context(struct wmi_block *wblock,
id = wdriver->id_table;
while (*id->guid_string) {
- if (uuid_le_to_bin(id->guid_string, &guid_input))
+ if (guid_parse(id->guid_string, &guid_input))
continue;
if (!memcmp(wblock->gblock.guid, &guid_input, 16))
return id->context;
@@ -202,7 +202,7 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
/**
* set_required_buffer_size - Sets the buffer size needed for performing IOCTL
* @wdev: A wmi bus device from a driver
- * @instance: Instance index
+ * @length: Required buffer size
*
* Allocates memory needed for buffer, stores the buffer size in that memory
*/
@@ -222,8 +222,8 @@ EXPORT_SYMBOL_GPL(set_required_buffer_size);
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @instance: Instance index
* @method_id: Method ID to call
- * &in: Buffer containing input for the method call
- * &out: Empty buffer to return the method results
+ * @in: Buffer containing input for the method call
+ * @out: Empty buffer to return the method results
*
* Call an ACPI-WMI method
*/
@@ -244,8 +244,8 @@ EXPORT_SYMBOL_GPL(wmi_evaluate_method);
* @wdev: A wmi bus device from a driver
* @instance: Instance index
* @method_id: Method ID to call
- * &in: Buffer containing input for the method call
- * &out: Empty buffer to return the method results
+ * @in: Buffer containing input for the method call
+ * @out: Empty buffer to return the method results
*
* Call an ACPI-WMI method
*/
@@ -364,7 +364,7 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
* wmi_query_block - Return contents of a WMI block (deprecated)
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @instance: Instance index
- * &out: Empty buffer to return the contents of the data block to
+ * @out: Empty buffer to return the contents of the data block to
*
* Return the contents of an ACPI-WMI data block to a buffer
*/
@@ -399,7 +399,7 @@ EXPORT_SYMBOL_GPL(wmidev_block_query);
* wmi_set_block - Write to a WMI block
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @instance: Instance index
- * &in: Buffer containing new values for the data block
+ * @in: Buffer containing new values for the data block
*
* Write the contents of the input buffer to an ACPI-WMI data block
*/
@@ -510,6 +510,7 @@ static void wmi_notify_debug(u32 value, void *context)
/**
* wmi_install_notify_handler - Register handler for WMI events
+ * @guid: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @handler: Function to handle notifications
* @data: Data to be returned to handler when event is fired
*
@@ -520,12 +521,12 @@ wmi_notify_handler handler, void *data)
{
struct wmi_block *block;
acpi_status status = AE_NOT_EXIST;
- uuid_le guid_input;
+ guid_t guid_input;
if (!guid || !handler)
return AE_BAD_PARAMETER;
- if (uuid_le_to_bin(guid, &guid_input))
+ if (guid_parse(guid, &guid_input))
return AE_BAD_PARAMETER;
list_for_each_entry(block, &wmi_block_list, list) {
@@ -552,6 +553,7 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
/**
* wmi_uninstall_notify_handler - Unregister handler for WMI events
+ * @guid: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
*
* Unregister handler for events sent to the ACPI-WMI mapper device.
*/
@@ -559,12 +561,12 @@ acpi_status wmi_remove_notify_handler(const char *guid)
{
struct wmi_block *block;
acpi_status status = AE_NOT_EXIST;
- uuid_le guid_input;
+ guid_t guid_input;
if (!guid)
return AE_BAD_PARAMETER;
- if (uuid_le_to_bin(guid, &guid_input))
+ if (guid_parse(guid, &guid_input))
return AE_BAD_PARAMETER;
list_for_each_entry(block, &wmi_block_list, list) {
@@ -795,9 +797,9 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
return 0;
while (*id->guid_string) {
- uuid_le driver_guid;
+ guid_t driver_guid;
- if (WARN_ON(uuid_le_to_bin(id->guid_string, &driver_guid)))
+ if (WARN_ON(guid_parse(id->guid_string, &driver_guid)))
continue;
if (!memcmp(&driver_guid, wblock->gblock.guid, 16))
return 1;
@@ -1116,8 +1118,7 @@ static void wmi_free_devices(struct acpi_device *device)
}
}
-static bool guid_already_parsed(struct acpi_device *device,
- const u8 *guid)
+static bool guid_already_parsed(struct acpi_device *device, const u8 *guid)
{
struct wmi_block *wblock;
@@ -1327,10 +1328,8 @@ static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
wblock->handler(event, wblock->handler_data);
}
- if (debug_event) {
- pr_info("DEBUG Event GUID: %pUL\n",
- wblock->gblock.guid);
- }
+ if (debug_event)
+ pr_info("DEBUG Event GUID: %pUL\n", wblock->gblock.guid);
acpi_bus_generate_netlink_event(
wblock->acpi_device->pnp.device_class,
diff --git a/drivers/pnp/pnpbios/pnpbios.h b/drivers/pnp/pnpbios/pnpbios.h
index 37acb8378f39..2ce739ff9c1a 100644
--- a/drivers/pnp/pnpbios/pnpbios.h
+++ b/drivers/pnp/pnpbios/pnpbios.h
@@ -107,7 +107,7 @@ struct pnp_bios_node {
__u32 eisa_id;
__u8 type_code[3];
__u16 flags;
- __u8 data[0];
+ __u8 data[];
};
#pragma pack()
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 890380302080..df479c49be49 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -184,7 +184,7 @@ config POWER_RESET_VERSATILE
config POWER_RESET_VEXPRESS
bool "ARM Versatile Express power-off and reset driver"
depends on ARM || ARM64
- depends on VEXPRESS_CONFIG
+ depends on VEXPRESS_CONFIG=y
help
Power off and reset support for the ARM Ltd. Versatile
Express boards.
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
index 1caf43d9e46d..0532803e6cbc 100644
--- a/drivers/power/reset/mt6323-poweroff.c
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -30,7 +30,7 @@ static void mt6323_do_pwroff(void)
int ret;
regmap_write(pwrc->regmap, pwrc->base + RTC_BBPU, RTC_BBPU_KEY);
- regmap_write(pwrc->regmap, pwrc->base + RTC_WRTGR, 1);
+ regmap_write(pwrc->regmap, pwrc->base + RTC_WRTGR_MT6323, 1);
ret = regmap_read_poll_timeout(pwrc->regmap,
pwrc->base + RTC_BBPU, val,
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 90cbaa8341e3..1fdbcbd95fc2 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -143,11 +143,7 @@ static struct platform_driver vexpress_reset_driver = {
.driver = {
.name = "vexpress-reset",
.of_match_table = vexpress_reset_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-static int __init vexpress_reset_init(void)
-{
- return platform_driver_register(&vexpress_reset_driver);
-}
-device_initcall(vexpress_reset_init);
+builtin_platform_driver(vexpress_reset_driver);
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index f3424fdce341..a46643f5e2fb 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -541,6 +541,16 @@ config CHARGER_MAX8998
Say Y to enable support for the battery charger control sysfs and
platform data of MAX8998/LP3974 PMICs.
+config CHARGER_MP2629
+ tristate "Monolithic power system MP2629 Battery charger"
+ depends on MFD_MP2629
+ depends on MP2629_ADC
+ depends on IIO
+ help
+ Select this option to enable support for Monolithic power system
+ Battery charger. This driver provides Battery charger power management
+ functions on the systems.
+
config CHARGER_QCOM_SMBB
tristate "Qualcomm Switch-Mode Battery Charger and Boost"
depends on MFD_SPMI_PMIC || COMPILE_TEST
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 6c7da920ea83..41cb64f09e49 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_CHARGER_MAX77650) += max77650-charger.o
obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_CHARGER_MP2629) += mp2629_charger.o
obj-$(CONFIG_CHARGER_QCOM_SMBB) += qcom_smbb.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c
index b8e1ec106627..3b820110ecfa 100644
--- a/drivers/power/supply/bd70528-charger.c
+++ b/drivers/power/supply/bd70528-charger.c
@@ -335,14 +335,14 @@ static int bd70528_get_present(struct bd70528_psy *bdpsy, int *val)
return 0;
}
-struct linear_range {
+struct bd70528_linear_range {
int min;
int step;
int vals;
int low_sel;
};
-static const struct linear_range current_limit_ranges[] = {
+static const struct bd70528_linear_range current_limit_ranges[] = {
{
.min = 5,
.step = 1,
@@ -374,7 +374,7 @@ static const struct linear_range current_limit_ranges[] = {
* voltage for low temperatures. The driver currently only reads
* the charge current at room temperature. We do set both though.
*/
-static const struct linear_range warm_charge_curr[] = {
+static const struct bd70528_linear_range warm_charge_curr[] = {
{
.min = 10,
.step = 10,
@@ -398,7 +398,7 @@ static const struct linear_range warm_charge_curr[] = {
#define MAX_WARM_CHG_CURR_SEL 0x1f
#define MIN_CHG_CURR_SEL 0x0
-static int find_value_for_selector_low(const struct linear_range *r,
+static int find_value_for_selector_low(const struct bd70528_linear_range *r,
int selectors, unsigned int sel,
unsigned int *val)
{
@@ -420,7 +420,7 @@ static int find_value_for_selector_low(const struct linear_range *r,
* I guess it is enough if we use voltage/current which is closest (below)
* the requested?
*/
-static int find_selector_for_value_low(const struct linear_range *r,
+static int find_selector_for_value_low(const struct bd70528_linear_range *r,
int selectors, unsigned int val,
unsigned int *sel, bool *found)
{
diff --git a/drivers/power/supply/mp2629_charger.c b/drivers/power/supply/mp2629_charger.c
new file mode 100644
index 000000000000..bdf924b73e47
--- /dev/null
+++ b/drivers/power/supply/mp2629_charger.c
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MP2629 battery charger driver
+ *
+ * Copyright 2020 Monolithic Power Systems, Inc
+ *
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/types.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/mp2629.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+#define MP2629_REG_INPUT_ILIM 0x00
+#define MP2629_REG_INPUT_VLIM 0x01
+#define MP2629_REG_CHARGE_CTRL 0x04
+#define MP2629_REG_CHARGE_ILIM 0x05
+#define MP2629_REG_PRECHARGE 0x06
+#define MP2629_REG_TERM_CURRENT 0x06
+#define MP2629_REG_CHARGE_VLIM 0x07
+#define MP2629_REG_TIMER_CTRL 0x08
+#define MP2629_REG_IMPEDANCE_COMP 0x09
+#define MP2629_REG_INTERRUPT 0x0b
+#define MP2629_REG_STATUS 0x0c
+#define MP2629_REG_FAULT 0x0d
+
+#define MP2629_MASK_INPUT_TYPE GENMASK(7, 5)
+#define MP2629_MASK_CHARGE_TYPE GENMASK(4, 3)
+#define MP2629_MASK_CHARGE_CTRL GENMASK(5, 4)
+#define MP2629_MASK_WDOG_CTRL GENMASK(5, 4)
+#define MP2629_MASK_IMPEDANCE GENMASK(7, 4)
+
+#define MP2629_INPUTSOURCE_CHANGE GENMASK(7, 5)
+#define MP2629_CHARGING_CHANGE GENMASK(4, 3)
+#define MP2629_FAULT_BATTERY BIT(3)
+#define MP2629_FAULT_THERMAL BIT(4)
+#define MP2629_FAULT_INPUT BIT(5)
+#define MP2629_FAULT_OTG BIT(6)
+
+#define MP2629_MAX_BATT_CAPACITY 100
+
+#define MP2629_PROPS(_idx, _min, _max, _step) \
+ [_idx] = { \
+ .min = _min, \
+ .max = _max, \
+ .step = _step, \
+}
+
+enum mp2629_source_type {
+ MP2629_SOURCE_TYPE_NO_INPUT,
+ MP2629_SOURCE_TYPE_NON_STD,
+ MP2629_SOURCE_TYPE_SDP,
+ MP2629_SOURCE_TYPE_CDP,
+ MP2629_SOURCE_TYPE_DCP,
+ MP2629_SOURCE_TYPE_OTG = 7,
+};
+
+enum mp2629_field {
+ INPUT_ILIM,
+ INPUT_VLIM,
+ CHARGE_ILIM,
+ CHARGE_VLIM,
+ PRECHARGE,
+ TERM_CURRENT,
+ MP2629_MAX_FIELD
+};
+
+struct mp2629_charger {
+ struct device *dev;
+ int status;
+ int fault;
+
+ struct regmap *regmap;
+ struct regmap_field *regmap_fields[MP2629_MAX_FIELD];
+ struct mutex lock;
+ struct power_supply *usb;
+ struct power_supply *battery;
+ struct iio_channel *iiochan[MP2629_ADC_CHAN_END];
+};
+
+struct mp2629_prop {
+ int reg;
+ int mask;
+ int min;
+ int max;
+ int step;
+ int shift;
+};
+
+static enum power_supply_usb_type mp2629_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_PD_DRP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN
+};
+
+static enum power_supply_property mp2629_charger_usb_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+};
+
+static enum power_supply_property mp2629_charger_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+};
+
+static struct mp2629_prop props[] = {
+ MP2629_PROPS(INPUT_ILIM, 100000, 3250000, 50000),
+ MP2629_PROPS(INPUT_VLIM, 3800000, 5300000, 100000),
+ MP2629_PROPS(CHARGE_ILIM, 320000, 4520000, 40000),
+ MP2629_PROPS(CHARGE_VLIM, 3400000, 4670000, 10000),
+ MP2629_PROPS(PRECHARGE, 120000, 720000, 40000),
+ MP2629_PROPS(TERM_CURRENT, 80000, 680000, 40000),
+};
+
+static const struct reg_field mp2629_reg_fields[] = {
+ [INPUT_ILIM] = REG_FIELD(MP2629_REG_INPUT_ILIM, 0, 5),
+ [INPUT_VLIM] = REG_FIELD(MP2629_REG_INPUT_VLIM, 0, 3),
+ [CHARGE_ILIM] = REG_FIELD(MP2629_REG_CHARGE_ILIM, 0, 6),
+ [CHARGE_VLIM] = REG_FIELD(MP2629_REG_CHARGE_VLIM, 1, 7),
+ [PRECHARGE] = REG_FIELD(MP2629_REG_PRECHARGE, 4, 7),
+ [TERM_CURRENT] = REG_FIELD(MP2629_REG_TERM_CURRENT, 0, 3),
+};
+
+static char *adc_chan_name[] = {
+ "mp2629-batt-volt",
+ "mp2629-system-volt",
+ "mp2629-input-volt",
+ "mp2629-batt-current",
+ "mp2629-input-current",
+};
+
+static int mp2629_read_adc(struct mp2629_charger *charger,
+ enum mp2629_adc_chan ch,
+ union power_supply_propval *val)
+{
+ int ret;
+ int chval;
+
+ ret = iio_read_channel_processed(charger->iiochan[ch], &chval);
+ if (ret)
+ return ret;
+
+ val->intval = chval * 1000;
+
+ return 0;
+}
+
+static int mp2629_get_prop(struct mp2629_charger *charger,
+ enum mp2629_field fld,
+ union power_supply_propval *val)
+{
+ int ret;
+ unsigned int rval;
+
+ ret = regmap_field_read(charger->regmap_fields[fld], &rval);
+ if (ret)
+ return ret;
+
+ val->intval = rval * props[fld].step + props[fld].min;
+
+ return 0;
+}
+
+static int mp2629_set_prop(struct mp2629_charger *charger,
+ enum mp2629_field fld,
+ const union power_supply_propval *val)
+{
+ unsigned int rval;
+
+ if (val->intval < props[fld].min || val->intval > props[fld].max)
+ return -EINVAL;
+
+ rval = (val->intval - props[fld].min) / props[fld].step;
+ return regmap_field_write(charger->regmap_fields[fld], rval);
+}
+
+static int mp2629_get_battery_capacity(struct mp2629_charger *charger,
+ union power_supply_propval *val)
+{
+ union power_supply_propval vnow, vlim;
+ int ret;
+
+ ret = mp2629_read_adc(charger, MP2629_BATT_VOLT, &vnow);
+ if (ret)
+ return ret;
+
+ ret = mp2629_get_prop(charger, CHARGE_VLIM, &vlim);
+ if (ret)
+ return ret;
+
+ val->intval = (vnow.intval * 100) / vlim.intval;
+ val->intval = min(val->intval, MP2629_MAX_BATT_CAPACITY);
+
+ return 0;
+}
+
+static int mp2629_charger_battery_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(psy->dev.parent);
+ unsigned int rval;
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = mp2629_read_adc(charger, MP2629_BATT_VOLT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = mp2629_read_adc(charger, MP2629_BATT_CURRENT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = 4520000;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = 4670000;
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = mp2629_get_battery_capacity(charger, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ ret = mp2629_get_prop(charger, TERM_CURRENT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ ret = mp2629_get_prop(charger, PRECHARGE, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = mp2629_get_prop(charger, CHARGE_VLIM, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = mp2629_get_prop(charger, CHARGE_ILIM, val);
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (!charger->fault)
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ if (MP2629_FAULT_BATTERY & charger->fault)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (MP2629_FAULT_THERMAL & charger->fault)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (MP2629_FAULT_INPUT & charger->fault)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ break;
+
+ rval = (rval & MP2629_MASK_CHARGE_TYPE) >> 3;
+ switch (rval) {
+ case 0x00:
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 0x01:
+ case 0x10:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x11:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ break;
+
+ rval = (rval & MP2629_MASK_CHARGE_TYPE) >> 3;
+ switch (rval) {
+ case 0x00:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ case 0x01:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case 0x10:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mp2629_charger_battery_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(psy->dev.parent);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return mp2629_set_prop(charger, TERM_CURRENT, val);
+
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return mp2629_set_prop(charger, PRECHARGE, val);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return mp2629_set_prop(charger, CHARGE_VLIM, val);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return mp2629_set_prop(charger, CHARGE_ILIM, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mp2629_charger_usb_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(psy->dev.parent);
+ unsigned int rval;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ break;
+
+ val->intval = !!(rval & MP2629_MASK_INPUT_TYPE);
+ break;
+
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ break;
+
+ rval = (rval & MP2629_MASK_INPUT_TYPE) >> 5;
+ switch (rval) {
+ case MP2629_SOURCE_TYPE_SDP:
+ val->intval = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case MP2629_SOURCE_TYPE_CDP:
+ val->intval = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case MP2629_SOURCE_TYPE_DCP:
+ val->intval = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case MP2629_SOURCE_TYPE_OTG:
+ val->intval = POWER_SUPPLY_USB_TYPE_PD_DRP;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = mp2629_read_adc(charger, MP2629_INPUT_VOLT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = mp2629_read_adc(charger, MP2629_INPUT_CURRENT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ ret = mp2629_get_prop(charger, INPUT_VLIM, val);
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = mp2629_get_prop(charger, INPUT_ILIM, val);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mp2629_charger_usb_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(psy->dev.parent);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return mp2629_set_prop(charger, INPUT_VLIM, val);
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return mp2629_set_prop(charger, INPUT_ILIM, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mp2629_charger_battery_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return (psp == POWER_SUPPLY_PROP_PRECHARGE_CURRENT) ||
+ (psp == POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT) ||
+ (psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT) ||
+ (psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE);
+}
+
+static int mp2629_charger_usb_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return (psp == POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT) ||
+ (psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT);
+}
+
+static irqreturn_t mp2629_irq_handler(int irq, void *dev_id)
+{
+ struct mp2629_charger *charger = dev_id;
+ unsigned int rval;
+ int ret;
+
+ mutex_lock(&charger->lock);
+
+ ret = regmap_read(charger->regmap, MP2629_REG_FAULT, &rval);
+ if (ret)
+ goto unlock;
+
+ if (rval) {
+ charger->fault = rval;
+ if (MP2629_FAULT_BATTERY & rval)
+ dev_err(charger->dev, "Battery fault OVP\n");
+ else if (MP2629_FAULT_THERMAL & rval)
+ dev_err(charger->dev, "Thermal shutdown fault\n");
+ else if (MP2629_FAULT_INPUT & rval)
+ dev_err(charger->dev, "no input or input OVP\n");
+ else if (MP2629_FAULT_OTG & rval)
+ dev_err(charger->dev, "VIN overloaded\n");
+
+ goto unlock;
+ }
+
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ goto unlock;
+
+ if (rval & MP2629_INPUTSOURCE_CHANGE)
+ power_supply_changed(charger->usb);
+ else if (rval & MP2629_CHARGING_CHANGE)
+ power_supply_changed(charger->battery);
+
+unlock:
+ mutex_unlock(&charger->lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct power_supply_desc mp2629_usb_desc = {
+ .name = "mp2629_usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = mp2629_usb_types,
+ .num_usb_types = ARRAY_SIZE(mp2629_usb_types),
+ .properties = mp2629_charger_usb_props,
+ .num_properties = ARRAY_SIZE(mp2629_charger_usb_props),
+ .get_property = mp2629_charger_usb_get_prop,
+ .set_property = mp2629_charger_usb_set_prop,
+ .property_is_writeable = mp2629_charger_usb_prop_writeable,
+};
+
+static const struct power_supply_desc mp2629_battery_desc = {
+ .name = "mp2629_battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = mp2629_charger_bat_props,
+ .num_properties = ARRAY_SIZE(mp2629_charger_bat_props),
+ .get_property = mp2629_charger_battery_get_prop,
+ .set_property = mp2629_charger_battery_set_prop,
+ .property_is_writeable = mp2629_charger_battery_prop_writeable,
+};
+
+static ssize_t batt_impedance_compensation_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(dev->parent);
+ unsigned int rval;
+ int ret;
+
+ ret = regmap_read(charger->regmap, MP2629_REG_IMPEDANCE_COMP, &rval);
+ if (ret)
+ return ret;
+
+ rval = (rval >> 4) * 10;
+ return sprintf(buf, "%d mohm\n", rval);
+}
+
+static ssize_t batt_impedance_compensation_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(dev->parent);
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val > 140)
+ return -ERANGE;
+
+ /* multiples of 10 mohm so round off */
+ val = val / 10;
+ ret = regmap_update_bits(charger->regmap, MP2629_REG_IMPEDANCE_COMP,
+ MP2629_MASK_IMPEDANCE, val << 4);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(batt_impedance_compensation);
+
+static struct attribute *mp2629_charger_sysfs_attrs[] = {
+ &dev_attr_batt_impedance_compensation.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(mp2629_charger_sysfs);
+
+static void mp2629_charger_disable(void *data)
+{
+ struct mp2629_charger *charger = data;
+
+ regmap_update_bits(charger->regmap, MP2629_REG_CHARGE_CTRL,
+ MP2629_MASK_CHARGE_CTRL, 0);
+}
+
+static int mp2629_charger_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mp2629_data *ddata = dev_get_drvdata(dev->parent);
+ struct mp2629_charger *charger;
+ struct power_supply_config psy_cfg = {};
+ int ret, i, irq;
+
+ charger = devm_kzalloc(dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->regmap = ddata->regmap;
+ charger->dev = dev;
+ platform_set_drvdata(pdev, charger);
+
+ irq = platform_get_irq_optional(to_platform_device(dev->parent), 0);
+ if (irq < 0) {
+ dev_err(dev, "get irq fail: %d\n", irq);
+ return irq;
+ }
+
+ for (i = 0; i < MP2629_MAX_FIELD; i++) {
+ charger->regmap_fields[i] = devm_regmap_field_alloc(dev,
+ charger->regmap, mp2629_reg_fields[i]);
+ if (IS_ERR(charger->regmap_fields[i])) {
+ dev_err(dev, "regmap field alloc fail %d\n", i);
+ return PTR_ERR(charger->regmap_fields[i]);
+ }
+ }
+
+ for (i = 0; i < MP2629_ADC_CHAN_END; i++) {
+ charger->iiochan[i] = devm_iio_channel_get(dev,
+ adc_chan_name[i]);
+ if (IS_ERR(charger->iiochan[i])) {
+ dev_err(dev, "iio chan get %s err\n", adc_chan_name[i]);
+ return PTR_ERR(charger->iiochan[i]);
+ }
+ }
+
+ ret = devm_add_action_or_reset(dev, mp2629_charger_disable, charger);
+ if (ret)
+ return ret;
+
+ charger->usb = devm_power_supply_register(dev, &mp2629_usb_desc, NULL);
+ if (IS_ERR(charger->usb)) {
+ dev_err(dev, "power supply register usb failed\n");
+ return PTR_ERR(charger->usb);
+ }
+
+ psy_cfg.drv_data = charger;
+ psy_cfg.attr_grp = mp2629_charger_sysfs_groups;
+ charger->battery = devm_power_supply_register(dev,
+ &mp2629_battery_desc, &psy_cfg);
+ if (IS_ERR(charger->battery)) {
+ dev_err(dev, "power supply register battery failed\n");
+ return PTR_ERR(charger->battery);
+ }
+
+ ret = regmap_update_bits(charger->regmap, MP2629_REG_CHARGE_CTRL,
+ MP2629_MASK_CHARGE_CTRL, BIT(4));
+ if (ret) {
+ dev_err(dev, "enable charge fail: %d\n", ret);
+ return ret;
+ }
+
+ regmap_update_bits(charger->regmap, MP2629_REG_TIMER_CTRL,
+ MP2629_MASK_WDOG_CTRL, 0);
+
+ mutex_init(&charger->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mp2629_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "mp2629-charger", charger);
+ if (ret) {
+ dev_err(dev, "failed to request gpio IRQ\n");
+ return ret;
+ }
+
+ regmap_update_bits(charger->regmap, MP2629_REG_INTERRUPT,
+ GENMASK(6, 5), BIT(6) | BIT(5));
+
+ return 0;
+}
+
+static const struct of_device_id mp2629_charger_of_match[] = {
+ { .compatible = "mps,mp2629_charger"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2629_charger_of_match);
+
+static struct platform_driver mp2629_charger_driver = {
+ .driver = {
+ .name = "mp2629_charger",
+ .of_match_table = mp2629_charger_of_match,
+ },
+ .probe = mp2629_charger_probe,
+};
+module_platform_driver(mp2629_charger_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MP2629 Charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 65c23ef6408d..b3c05ff05783 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -16,7 +16,7 @@
#include <linux/power_supply.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/vermagic.h>
+#include <generated/utsrelease.h>
enum test_power_id {
TEST_AC,
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index eb328655bc01..61a63a16b5e7 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -26,9 +26,6 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
-/* Local defines */
-#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
-
/* bitmasks for RAPL MSRs, used by primitive access functions */
#define ENERGY_STATUS_MASK 0xffffffff
@@ -989,6 +986,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &rapl_defaults_core),
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 83c45659bc9d..e54aa2d82f50 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -1096,8 +1096,8 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
lpm_priv->tb_cache_internal = NULL;
lpm_priv->tb_cache = NULL;
} else if (tb_cache) {
- if (tb_cache != (void *)_ALIGN_UP((unsigned long)tb_cache, 128)
- || tb_cache_size != _ALIGN_UP(tb_cache_size, 128)) {
+ if (tb_cache != (void *)ALIGN((unsigned long)tb_cache, 128)
+ || tb_cache_size != ALIGN(tb_cache_size, 128)) {
dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n",
__func__, __LINE__);
result = -EINVAL;
@@ -1111,12 +1111,10 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
lpm_priv->tb_cache_internal = kzalloc(
lpm_priv->tb_cache_size + 127, GFP_KERNEL);
if (!lpm_priv->tb_cache_internal) {
- dev_err(sbd_core(), "%s:%u: alloc internal tb_cache "
- "failed\n", __func__, __LINE__);
result = -ENOMEM;
goto fail_malloc;
}
- lpm_priv->tb_cache = (void *)_ALIGN_UP(
+ lpm_priv->tb_cache = (void *)ALIGN(
(unsigned long)lpm_priv->tb_cache_internal, 128);
}
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index ddaa5ea5801a..4ed131eaff51 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -858,13 +858,13 @@ static int ps3_vuart_handle_port_interrupt(struct ps3_system_bus_device *dev)
return 0;
}
-struct vuart_bus_priv {
+static struct vuart_bus_priv {
struct ports_bmp *bmp;
unsigned int virq;
struct mutex probe_mutex;
int use_count;
struct ps3_system_bus_device *devices[PORT_COUNT];
-} static vuart_bus_priv;
+} vuart_bus_priv;
/**
* ps3_vuart_irq_handler - first stage interrupt handler
@@ -917,7 +917,6 @@ static int ps3_vuart_bus_interrupt_get(void)
vuart_bus_priv.bmp = kzalloc(sizeof(struct ports_bmp), GFP_KERNEL);
if (!vuart_bus_priv.bmp) {
- pr_debug("%s:%d: kzalloc failed.\n", __func__, __LINE__);
result = -ENOMEM;
goto fail_bmp_malloc;
}
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 93d574faf1fe..375cd6e4aade 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -136,6 +136,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
caps.pps = ptp->info->pps;
caps.n_pins = ptp->info->n_pins;
caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
+ caps.adjust_phase = ptp->info->adjphase != NULL;
if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
err = -EFAULT;
break;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index acabbe72e55e..03a246e60fd9 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -146,6 +146,15 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
else
err = ops->adjfreq(ops, ppb);
ptp->dialed_frequency = tx->freq;
+ } else if (tx->modes & ADJ_OFFSET) {
+ if (ops->adjphase) {
+ s32 offset = tx->offset;
+
+ if (!(tx->modes & ADJ_NANO))
+ offset *= NSEC_PER_USEC;
+
+ err = ops->adjphase(ops, offset);
+ }
} else if (tx->modes == 0) {
tx->freq = ptp->dialed_frequency;
err = 0;
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index 032e112c3dd9..ceb6bc58f3b4 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/timekeeping.h>
@@ -24,6 +25,16 @@ MODULE_LICENSE("GPL");
#define SETTIME_CORRECTION (0)
+static long set_write_phase_ready(struct ptp_clock_info *ptp)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ channel->write_phase_ready = 1;
+
+ return 0;
+}
+
static int char_array_to_timespec(u8 *buf,
u8 count,
struct timespec64 *ts)
@@ -780,7 +791,7 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
/* Page size 128, last 4 bytes of page skipped */
if (((loaddr > 0x7b) && (loaddr <= 0x7f))
- || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ || loaddr > 0xfb)
continue;
err = idtcm_write(idtcm, regaddr, 0, &val, sizeof(val));
@@ -871,6 +882,64 @@ static int idtcm_set_pll_mode(struct idtcm_channel *channel,
/* PTP Hardware Clock interface */
+/**
+ * @brief Maximum absolute value for write phase offset in picoseconds
+ *
+ * Destination signed register is 32-bit register in resolution of 50ps
+ *
+ * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
+ */
+static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ int err;
+ u8 i;
+ u8 buf[4] = {0};
+ s32 phase_50ps;
+ s64 offset_ps;
+
+ if (channel->pll_mode != PLL_MODE_WRITE_PHASE) {
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
+
+ if (err)
+ return err;
+
+ channel->write_phase_ready = 0;
+
+ ptp_schedule_worker(channel->ptp_clock,
+ msecs_to_jiffies(WR_PHASE_SETUP_MS));
+ }
+
+ if (!channel->write_phase_ready)
+ delta_ns = 0;
+
+ offset_ps = (s64)delta_ns * 1000;
+
+ /*
+ * Check for 32-bit signed max * 50:
+ *
+ * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
+ */
+ if (offset_ps > MAX_ABS_WRITE_PHASE_PICOSECONDS)
+ offset_ps = MAX_ABS_WRITE_PHASE_PICOSECONDS;
+ else if (offset_ps < -MAX_ABS_WRITE_PHASE_PICOSECONDS)
+ offset_ps = -MAX_ABS_WRITE_PHASE_PICOSECONDS;
+
+ phase_50ps = DIV_ROUND_CLOSEST(div64_s64(offset_ps, 50), 1);
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = phase_50ps & 0xff;
+ phase_50ps >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase, DPLL_WR_PHASE,
+ buf, sizeof(buf));
+
+ return err;
+}
+
static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct idtcm_channel *channel =
@@ -977,6 +1046,24 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
return err;
}
+static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ struct idtcm *idtcm = channel->idtcm;
+
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjphase(channel, delta);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
static int idtcm_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -1055,13 +1142,16 @@ static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 1,
+ .adjphase = &idtcm_adjphase,
.adjfreq = &idtcm_adjfreq,
.adjtime = &idtcm_adjtime,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime,
.enable = &idtcm_enable,
+ .do_aux_work = &set_write_phase_ready,
};
+
static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
{
struct idtcm_channel *channel;
@@ -1146,6 +1236,8 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
if (!channel->ptp_clock)
return -ENOTSUPP;
+ channel->write_phase_ready = 0;
+
dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n",
index, channel->ptp_clock->index);
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index 6c1f93ab46f3..3de0eb72889c 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -15,6 +15,8 @@
#define FW_FILENAME "idtcm.bin"
#define MAX_PHC_PLL 4
+#define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL)
+
#define PLL_MASK_ADDR (0xFFA5)
#define DEFAULT_PLL_MASK (0x04)
@@ -33,8 +35,9 @@
#define POST_SM_RESET_DELAY_MS (3000)
#define PHASE_PULL_IN_THRESHOLD_NS (150000)
-#define TOD_WRITE_OVERHEAD_COUNT_MAX (5)
-#define TOD_BYTE_COUNT (11)
+#define TOD_WRITE_OVERHEAD_COUNT_MAX (5)
+#define TOD_BYTE_COUNT (11)
+#define WR_PHASE_SETUP_MS (5000)
/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
enum pll_mode {
@@ -77,6 +80,7 @@ struct idtcm_channel {
u16 hw_dpll_n;
enum pll_mode pll_mode;
u16 output_mask;
+ int write_phase_ready;
};
struct idtcm {
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
index b63ac240308b..179f6c472e50 100644
--- a/drivers/ptp/ptp_idt82p33.c
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -23,12 +23,12 @@ MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
/* Module Parameters */
-u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
+static u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
module_param(sync_tod_timeout, uint, 0);
MODULE_PARM_DESC(sync_tod_timeout,
"duration in second to keep SYNC_TOD on (set to 0 to keep it always on)");
-u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
+static u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
module_param(phase_snap_threshold, uint, 0);
MODULE_PARM_DESC(phase_snap_threshold,
"threshold (150000ns by default) below which adjtime would ignore");
@@ -881,7 +881,7 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
/* Page size 128, last 4 bytes of page skipped */
if (((loaddr > 0x7b) && (loaddr <= 0x7f))
- || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ || loaddr > 0xfb)
continue;
err = idt82p33_write(idt82p33, _ADDR(page, loaddr),
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index 52d77db39829..7711651ff19e 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -783,16 +783,10 @@ static struct mii_timestamping_ctrl ines_ctrl = {
static int ines_ptp_ctrl_probe(struct platform_device *pld)
{
struct ines_clock *clock;
- struct resource *res;
void __iomem *addr;
int err = 0;
- res = platform_get_resource(pld, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pld->dev, "missing memory resource\n");
- return -EINVAL;
- }
- addr = devm_ioremap_resource(&pld->dev, res);
+ addr = devm_platform_ioremap_resource(pld, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto out;
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
index fc7d0b77e118..658d33fc3195 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm.c
@@ -22,7 +22,7 @@ struct kvm_ptp_clock {
struct ptp_clock_info caps;
};
-DEFINE_SPINLOCK(kvm_ptp_lock);
+static DEFINE_SPINLOCK(kvm_ptp_lock);
static struct pvclock_vsyscall_time_info *hv_clock;
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 8155f59ece38..451608e960a1 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -572,14 +572,12 @@ static void dma_req_free(struct kref *ref)
struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
refcount);
struct mport_cdev_priv *priv = req->priv;
- unsigned int i;
dma_unmap_sg(req->dmach->device->dev,
req->sgt.sgl, req->sgt.nents, req->dir);
sg_free_table(&req->sgt);
if (req->page_list) {
- for (i = 0; i < req->nr_pages; i++)
- put_page(req->page_list[i]);
+ unpin_user_pages(req->page_list, req->nr_pages);
kfree(req->page_list);
}
@@ -815,7 +813,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
struct mport_dma_req *req;
struct mport_dev *md = priv->md;
struct dma_chan *chan;
- int i, ret;
+ int ret;
int nents;
if (xfer->length == 0)
@@ -862,7 +860,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
goto err_req;
}
- pinned = get_user_pages_fast(
+ pinned = pin_user_pages_fast(
(unsigned long)xfer->loc_addr & PAGE_MASK,
nr_pages,
dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
@@ -870,13 +868,18 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
if (pinned != nr_pages) {
if (pinned < 0) {
- rmcd_error("get_user_pages_unlocked err=%ld",
+ rmcd_error("pin_user_pages_fast err=%ld",
pinned);
nr_pages = 0;
} else
rmcd_error("pinned %ld out of %ld pages",
pinned, nr_pages);
ret = -EFAULT;
+ /*
+ * Set nr_pages up to mean "how many pages to unpin, in
+ * the error handler:
+ */
+ nr_pages = pinned;
goto err_pg;
}
@@ -946,8 +949,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
err_pg:
if (!req->page_list) {
- for (i = 0; i < nr_pages; i++)
- put_page(page_list[i]);
+ unpin_user_pages(page_list, nr_pages);
kfree(page_list);
}
err_req:
@@ -2379,13 +2381,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
cdev_init(&md->cdev, &mport_fops);
md->cdev.owner = THIS_MODULE;
- ret = cdev_device_add(&md->cdev, &md->dev);
- if (ret) {
- rmcd_error("Failed to register mport %d (err=%d)",
- mport->id, ret);
- goto err_cdev;
- }
-
INIT_LIST_HEAD(&md->doorbells);
spin_lock_init(&md->db_lock);
INIT_LIST_HEAD(&md->portwrites);
@@ -2405,6 +2400,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
#else
md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
#endif
+
+ ret = cdev_device_add(&md->cdev, &md->dev);
+ if (ret) {
+ rmcd_error("Failed to register mport %d (err=%d)",
+ mport->id, ret);
+ goto err_cdev;
+ }
ret = rio_query_mport(mport, &attr);
if (!ret) {
md->properties.flags = attr.flags;
diff --git a/drivers/regulator/88pg86x.c b/drivers/regulator/88pg86x.c
index d5ef55c81185..71cfa2c5de5e 100644
--- a/drivers/regulator/88pg86x.c
+++ b/drivers/regulator/88pg86x.c
@@ -11,13 +11,13 @@ static const struct regulator_ops pg86x_ops = {
.list_voltage = regulator_list_voltage_linear_range,
};
-static const struct regulator_linear_range pg86x_buck1_ranges[] = {
+static const struct linear_range pg86x_buck1_ranges[] = {
REGULATOR_LINEAR_RANGE( 0, 0, 10, 0),
REGULATOR_LINEAR_RANGE(1000000, 11, 34, 25000),
REGULATOR_LINEAR_RANGE(1600000, 35, 47, 50000),
};
-static const struct regulator_linear_range pg86x_buck2_ranges[] = {
+static const struct linear_range pg86x_buck2_ranges[] = {
REGULATOR_LINEAR_RANGE( 0, 0, 15, 0),
REGULATOR_LINEAR_RANGE(1000000, 16, 39, 25000),
REGULATOR_LINEAR_RANGE(1600000, 40, 52, 50000),
diff --git a/drivers/regulator/88pm800-regulator.c b/drivers/regulator/88pm800-regulator.c
index 69ae25886181..d08ee81ed1ac 100644
--- a/drivers/regulator/88pm800-regulator.c
+++ b/drivers/regulator/88pm800-regulator.c
@@ -134,13 +134,13 @@ struct pm800_regulator_info {
}
/* Ranges are sorted in ascending order. */
-static const struct regulator_linear_range buck1_volt_range[] = {
+static const struct linear_range buck1_volt_range[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x54, 50000),
};
/* BUCK 2~5 have same ranges. */
-static const struct regulator_linear_range buck2_5_volt_range[] = {
+static const struct linear_range buck2_5_volt_range[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x72, 50000),
};
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index f4b72cb098ef..8f677f5d79b4 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig REGULATOR
bool "Voltage and Current Regulator Support"
+ select LINEAR_RANGES
help
Generic Voltage and Current Regulator support.
@@ -585,6 +586,16 @@ config REGULATOR_MAX77802
Exynos5420/Exynos5800 SoCs to control various voltages.
It includes support for control of voltage and ramp speed.
+config REGULATOR_MAX77826
+ tristate "Maxim 77826 regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls a Maxim 77826 regulator via I2C bus.
+ The regulator include 15 LDOs, BUCK and BUCK BOOST regulator.
+ It includes support for control of output voltage. This
+ regulator is found on the Samsung Galaxy S5 (klte) smartphone.
+
config REGULATOR_MC13XXX_CORE
tristate
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 6610ee001d9a..e8f163371071 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
obj-$(CONFIG_REGULATOR_MAX77693) += max77693-regulator.o
obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
+obj-$(CONFIG_REGULATOR_MAX77826) += max77826-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index f60e1b26c2d2..716ca5bb178e 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -139,17 +139,6 @@ static const unsigned int ldo_vintcore_voltages[] = {
1350000,
};
-static const unsigned int ldo_sdio_voltages[] = {
- 1160000,
- 1050000,
- 1100000,
- 1500000,
- 1800000,
- 2200000,
- 2910000,
- 3050000,
-};
-
static const unsigned int fixed_1200000_voltage[] = {
1200000,
};
@@ -166,10 +155,6 @@ static const unsigned int fixed_2050000_voltage[] = {
2050000,
};
-static const unsigned int fixed_3300000_voltage[] = {
- 3300000,
-};
-
static const unsigned int ldo_vana_voltages[] = {
1050000,
1075000,
@@ -192,13 +177,6 @@ static const unsigned int ldo_vaudio_voltages[] = {
2600000, /* Duplicated in Vaudio and IsoUicc Control register. */
};
-static const unsigned int ldo_vdmic_voltages[] = {
- 1800000,
- 1900000,
- 2000000,
- 2850000,
-};
-
static DEFINE_MUTEX(shared_mode_mutex);
static struct ab8500_shared_mode ldo_anamic1_shared;
static struct ab8500_shared_mode ldo_anamic2_shared;
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 0fa97f934df4..19b9742c9ecc 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -220,13 +220,13 @@ static const struct regmap_config act8865_regmap_config = {
.val_bits = 8,
};
-static const struct regulator_linear_range act8865_voltage_ranges[] = {
+static const struct linear_range act8865_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
};
-static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
+static const struct linear_range act8600_sudcdc_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0),
REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000),
REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000),
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
index d2f804dbc785..6a62f946ccae 100644
--- a/drivers/regulator/act8945a-regulator.c
+++ b/drivers/regulator/act8945a-regulator.c
@@ -73,7 +73,7 @@ struct act8945a_pmic {
u32 op_mode[ACT8945A_ID_MAX];
};
-static const struct regulator_linear_range act8945a_voltage_ranges[] = {
+static const struct linear_range act8945a_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 1a3d7b720f5e..ade0bef4569d 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -87,7 +87,7 @@ static const struct regulator_ops arizona_ldo1_hc_ops = {
.set_bypass = regulator_set_bypass_regmap,
};
-static const struct regulator_linear_range arizona_ldo1_hc_ranges[] = {
+static const struct linear_range arizona_ldo1_hc_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 0x6, 50000),
REGULATOR_LINEAR_RANGE(1800000, 0x7, 0x7, 0),
};
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index ae1a5de3e57d..f6cfd3f6f0dd 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -125,7 +125,7 @@ static const struct regulator_ops arizona_micsupp_ops = {
.set_bypass = arizona_micsupp_set_bypass,
};
-static const struct regulator_linear_range arizona_micsupp_ranges[] = {
+static const struct linear_range arizona_micsupp_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 0x1e, 50000),
REGULATOR_LINEAR_RANGE(3300000, 0x1f, 0x1f, 0),
};
@@ -152,7 +152,7 @@ static const struct regulator_desc arizona_micsupp = {
.owner = THIS_MODULE,
};
-static const struct regulator_linear_range arizona_micsupp_ext_ranges[] = {
+static const struct linear_range arizona_micsupp_ext_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 0x14, 25000),
REGULATOR_LINEAR_RANGE(1500000, 0x15, 0x27, 100000),
};
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index ece88103f2fd..b6b9206969ae 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -103,18 +103,18 @@ static const struct regulator_ops as3711_dldo_ops = {
.map_voltage = regulator_map_voltage_linear_range,
};
-static const struct regulator_linear_range as3711_sd_ranges[] = {
+static const struct linear_range as3711_sd_ranges[] = {
REGULATOR_LINEAR_RANGE(612500, 0x1, 0x40, 12500),
REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7f, 50000),
};
-static const struct regulator_linear_range as3711_aldo_ranges[] = {
+static const struct linear_range as3711_aldo_ranges[] = {
REGULATOR_LINEAR_RANGE(1200000, 0, 0xf, 50000),
REGULATOR_LINEAR_RANGE(1800000, 0x10, 0x1f, 100000),
};
-static const struct regulator_linear_range as3711_dldo_ranges[] = {
+static const struct linear_range as3711_dldo_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 0x10, 50000),
REGULATOR_LINEAR_RANGE(1750000, 0x20, 0x3f, 50000),
};
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index bd5d0bacb08d..33ca197860b3 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -389,7 +389,7 @@ static const struct regulator_ops as3722_ldo6_extcntrl_ops = {
.set_bypass = regulator_set_bypass_regmap,
};
-static const struct regulator_linear_range as3722_ldo_ranges[] = {
+static const struct linear_range as3722_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
@@ -487,7 +487,7 @@ static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
return false;
}
-static const struct regulator_linear_range as3722_sd2345_ranges[] = {
+static const struct linear_range as3722_sd2345_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 1e6eb5b1f8d8..fbc95cadaf53 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -510,7 +510,7 @@ static const struct regulator_ops axp20x_ops_sw = {
.is_enabled = regulator_is_enabled_regmap,
};
-static const struct regulator_linear_range axp20x_ldo4_ranges[] = {
+static const struct linear_range axp20x_ldo4_ranges[] = {
REGULATOR_LINEAR_RANGE(1250000,
AXP20X_LDO4_V_OUT_1250mV_START,
AXP20X_LDO4_V_OUT_1250mV_END,
@@ -638,7 +638,7 @@ static const struct regulator_desc axp22x_drivevbus_regulator = {
};
/* DCDC ranges shared with AXP813 */
-static const struct regulator_linear_range axp803_dcdc234_ranges[] = {
+static const struct linear_range axp803_dcdc234_ranges[] = {
REGULATOR_LINEAR_RANGE(500000,
AXP803_DCDC234_500mV_START,
AXP803_DCDC234_500mV_END,
@@ -649,7 +649,7 @@ static const struct regulator_linear_range axp803_dcdc234_ranges[] = {
20000),
};
-static const struct regulator_linear_range axp803_dcdc5_ranges[] = {
+static const struct linear_range axp803_dcdc5_ranges[] = {
REGULATOR_LINEAR_RANGE(800000,
AXP803_DCDC5_800mV_START,
AXP803_DCDC5_800mV_END,
@@ -660,7 +660,7 @@ static const struct regulator_linear_range axp803_dcdc5_ranges[] = {
20000),
};
-static const struct regulator_linear_range axp803_dcdc6_ranges[] = {
+static const struct linear_range axp803_dcdc6_ranges[] = {
REGULATOR_LINEAR_RANGE(600000,
AXP803_DCDC6_600mV_START,
AXP803_DCDC6_600mV_END,
@@ -672,7 +672,7 @@ static const struct regulator_linear_range axp803_dcdc6_ranges[] = {
};
/* AXP806's CLDO2 and AXP809's DLDO1 share the same range */
-static const struct regulator_linear_range axp803_dldo2_ranges[] = {
+static const struct linear_range axp803_dldo2_ranges[] = {
REGULATOR_LINEAR_RANGE(700000,
AXP803_DLDO2_700mV_START,
AXP803_DLDO2_700mV_END,
@@ -758,7 +758,7 @@ static const struct regulator_desc axp803_regulators[] = {
AXP_DESC_FIXED(AXP803, RTC_LDO, "rtc-ldo", "ips", 3000),
};
-static const struct regulator_linear_range axp806_dcdca_ranges[] = {
+static const struct linear_range axp806_dcdca_ranges[] = {
REGULATOR_LINEAR_RANGE(600000,
AXP806_DCDCA_600mV_START,
AXP806_DCDCA_600mV_END,
@@ -769,7 +769,7 @@ static const struct regulator_linear_range axp806_dcdca_ranges[] = {
20000),
};
-static const struct regulator_linear_range axp806_dcdcd_ranges[] = {
+static const struct linear_range axp806_dcdcd_ranges[] = {
REGULATOR_LINEAR_RANGE(600000,
AXP806_DCDCD_600mV_START,
AXP806_DCDCD_600mV_END,
@@ -834,7 +834,7 @@ static const struct regulator_desc axp806_regulators[] = {
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_SW_MASK),
};
-static const struct regulator_linear_range axp809_dcdc4_ranges[] = {
+static const struct linear_range axp809_dcdc4_ranges[] = {
REGULATOR_LINEAR_RANGE(600000,
AXP809_DCDC4_600mV_START,
AXP809_DCDC4_600mV_END,
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 8c98c3f07660..65e23fc5f9c3 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -116,14 +116,14 @@ static const unsigned int ldo_vbus[] = {
};
/* DCDC group CSR: supported voltages in microvolts */
-static const struct regulator_linear_range dcdc_csr_ranges[] = {
+static const struct linear_range dcdc_csr_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
REGULATOR_LINEAR_RANGE(1360000, 51, 55, 20000),
REGULATOR_LINEAR_RANGE(900000, 56, 63, 0),
};
/* DCDC group IOSR1: supported voltages in microvolts */
-static const struct regulator_linear_range dcdc_iosr1_ranges[] = {
+static const struct linear_range dcdc_iosr1_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 51, 10000),
REGULATOR_LINEAR_RANGE(1500000, 52, 52, 0),
REGULATOR_LINEAR_RANGE(1800000, 53, 53, 0),
@@ -131,7 +131,7 @@ static const struct regulator_linear_range dcdc_iosr1_ranges[] = {
};
/* DCDC group SDSR1: supported voltages in microvolts */
-static const struct regulator_linear_range dcdc_sdsr1_ranges[] = {
+static const struct linear_range dcdc_sdsr1_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
REGULATOR_LINEAR_RANGE(1340000, 51, 51, 0),
REGULATOR_LINEAR_RANGE(900000, 52, 63, 0),
@@ -143,7 +143,7 @@ struct bcm590xx_info {
u8 n_voltages;
const unsigned int *volt_table;
u8 n_linear_ranges;
- const struct regulator_linear_range *linear_ranges;
+ const struct linear_range *linear_ranges;
};
#define BCM590XX_REG_TABLE(_name, _table) \
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
index 5bf8a2dc5fe7..d44adf7e875a 100644
--- a/drivers/regulator/bd70528-regulator.c
+++ b/drivers/regulator/bd70528-regulator.c
@@ -20,22 +20,22 @@
#define BUCK_RAMPRATE_125MV 1
#define BUCK_RAMP_MAX 250
-static const struct regulator_linear_range bd70528_buck1_volts[] = {
+static const struct linear_range bd70528_buck1_volts[] = {
REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 600000),
REGULATOR_LINEAR_RANGE(2750000, 0x2, 0xf, 50000),
};
-static const struct regulator_linear_range bd70528_buck2_volts[] = {
+static const struct linear_range bd70528_buck2_volts[] = {
REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 300000),
REGULATOR_LINEAR_RANGE(1550000, 0x2, 0xd, 50000),
REGULATOR_LINEAR_RANGE(3000000, 0xe, 0xf, 300000),
};
-static const struct regulator_linear_range bd70528_buck3_volts[] = {
+static const struct linear_range bd70528_buck3_volts[] = {
REGULATOR_LINEAR_RANGE(800000, 0x00, 0xd, 50000),
REGULATOR_LINEAR_RANGE(1800000, 0xe, 0xf, 0),
};
/* All LDOs have same voltage ranges */
-static const struct regulator_linear_range bd70528_ldo_volts[] = {
+static const struct linear_range bd70528_ldo_volts[] = {
REGULATOR_LINEAR_RANGE(1650000, 0x0, 0x07, 50000),
REGULATOR_LINEAR_RANGE(2100000, 0x8, 0x0f, 100000),
REGULATOR_LINEAR_RANGE(2850000, 0x10, 0x19, 50000),
diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
index b2fa17be4988..85c0b9000963 100644
--- a/drivers/regulator/bd71828-regulator.c
+++ b/drivers/regulator/bd71828-regulator.c
@@ -65,27 +65,27 @@ static const struct reg_init buck7_inits[] = {
},
};
-static const struct regulator_linear_range bd71828_buck1267_volts[] = {
+static const struct linear_range bd71828_buck1267_volts[] = {
REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250),
REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0),
};
-static const struct regulator_linear_range bd71828_buck3_volts[] = {
+static const struct linear_range bd71828_buck3_volts[] = {
REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000),
REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0),
};
-static const struct regulator_linear_range bd71828_buck4_volts[] = {
+static const struct linear_range bd71828_buck4_volts[] = {
REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000),
REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0),
};
-static const struct regulator_linear_range bd71828_buck5_volts[] = {
+static const struct linear_range bd71828_buck5_volts[] = {
REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000),
REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0),
};
-static const struct regulator_linear_range bd71828_ldo_volts[] = {
+static const struct linear_range bd71828_ldo_volts[] = {
REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000),
REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0),
};
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index cf3872837abc..7b311389f925 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -55,12 +55,20 @@ static int bd718xx_buck1234_set_ramp_delay(struct regulator_dev *rdev,
BUCK_RAMPRATE_MASK, ramp_value << 6);
}
-/* Bucks 1 to 4 support DVS. PWM mode is used when voltage is changed.
+/*
+ * On BD71837 (not on BD71847, BD71850, ...)
+ * Bucks 1 to 4 support DVS. PWM mode is used when voltage is changed.
* Bucks 5 to 8 and LDOs can use PFM and must be disabled when voltage
* is changed. Hence we return -EBUSY for these if voltage is changed
* when BUCK/LDO is enabled.
+ *
+ * On BD71847, BD71850, ... The LDO voltage can be changed when LDO is
+ * enabled. But if voltage is increased the LDO power-good monitoring
+ * must be disabled for the duration of changing + 1mS to ensure voltage
+ * has reached the higher level before HW does next under voltage detection
+ * cycle.
*/
-static int bd718xx_set_voltage_sel_restricted(struct regulator_dev *rdev,
+static int bd71837_set_voltage_sel_restricted(struct regulator_dev *rdev,
unsigned int sel)
{
if (regulator_is_enabled_regmap(rdev))
@@ -69,9 +77,124 @@ static int bd718xx_set_voltage_sel_restricted(struct regulator_dev *rdev,
return regulator_set_voltage_sel_regmap(rdev, sel);
}
+static void voltage_change_done(struct regulator_dev *rdev, unsigned int sel,
+ unsigned int *mask)
+{
+ int ret;
+
+ if (*mask) {
+ /*
+ * Let's allow scheduling as we use I2C anyways. We just need to
+ * guarantee minimum of 1ms sleep - it shouldn't matter if we
+ * exceed it due to the scheduling.
+ */
+ msleep(1);
+ /*
+ * Note for next hacker. The PWRGOOD should not be masked on
+ * BD71847 so we will just unconditionally enable detection
+ * when voltage is set.
+ * If someone want's to disable PWRGOOD he must implement
+ * caching and restoring the old value here. I am not
+ * aware of such use-cases so for the sake of the simplicity
+ * we just always enable PWRGOOD here.
+ */
+ ret = regmap_update_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+ *mask, 0);
+ if (ret)
+ dev_err(&rdev->dev,
+ "Failed to re-enable voltage monitoring (%d)\n",
+ ret);
+ }
+}
+
+static int voltage_change_prepare(struct regulator_dev *rdev, unsigned int sel,
+ unsigned int *mask)
+{
+ int ret;
+
+ *mask = 0;
+ if (regulator_is_enabled_regmap(rdev)) {
+ int now, new;
+
+ now = rdev->desc->ops->get_voltage_sel(rdev);
+ if (now < 0)
+ return now;
+
+ now = rdev->desc->ops->list_voltage(rdev, now);
+ if (now < 0)
+ return now;
+
+ new = rdev->desc->ops->list_voltage(rdev, sel);
+ if (new < 0)
+ return new;
+
+ /*
+ * If we increase LDO voltage when LDO is enabled we need to
+ * disable the power-good detection until voltage has reached
+ * the new level. According to HW colleagues the maximum time
+ * it takes is 1000us. I assume that on systems with light load
+ * this might be less - and we could probably use DT to give
+ * system specific delay value if performance matters.
+ *
+ * Well, knowing we use I2C here and can add scheduling delays
+ * I don't think it is worth the hassle and I just add fixed
+ * 1ms sleep here (and allow scheduling). If this turns out to
+ * be a problem we can change it to delay and make the delay
+ * time configurable.
+ */
+ if (new > now) {
+ int ldo_offset = rdev->desc->id - BD718XX_LDO1;
+
+ *mask = BD718XX_LDO1_VRMON80 << ldo_offset;
+ ret = regmap_update_bits(rdev->regmap,
+ BD718XX_REG_MVRFLTMASK2,
+ *mask, *mask);
+ if (ret) {
+ dev_err(&rdev->dev,
+ "Failed to stop voltage monitoring\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int bd718xx_set_voltage_sel_restricted(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ int ret;
+ int mask;
+
+ ret = voltage_change_prepare(rdev, sel, &mask);
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage_sel_regmap(rdev, sel);
+ voltage_change_done(rdev, sel, &mask);
+
+ return ret;
+}
+
static int bd718xx_set_voltage_sel_pickable_restricted(
struct regulator_dev *rdev, unsigned int sel)
{
+ int ret;
+ int mask;
+
+ ret = voltage_change_prepare(rdev, sel, &mask);
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage_sel_pickable_regmap(rdev, sel);
+ voltage_change_done(rdev, sel, &mask);
+
+ return ret;
+}
+
+static int bd71837_set_voltage_sel_pickable_restricted(
+ struct regulator_dev *rdev, unsigned int sel)
+{
if (regulator_is_enabled_regmap(rdev))
return -EBUSY;
@@ -85,6 +208,16 @@ static const struct regulator_ops bd718xx_pickable_range_ldo_ops = {
.list_voltage = regulator_list_voltage_pickable_linear_range,
.set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
.get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+
+};
+
+static const struct regulator_ops bd71837_pickable_range_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_pickable_linear_range,
+ .set_voltage_sel = bd71837_set_voltage_sel_pickable_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
};
static const struct regulator_ops bd718xx_pickable_range_buck_ops = {
@@ -92,11 +225,30 @@ static const struct regulator_ops bd718xx_pickable_range_buck_ops = {
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_pickable_linear_range,
- .set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
+ .set_voltage_sel = regulator_set_voltage_sel_pickable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static const struct regulator_ops bd71837_pickable_range_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_pickable_linear_range,
+ .set_voltage_sel = bd71837_set_voltage_sel_pickable_restricted,
.get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
+static const struct regulator_ops bd71837_ldo_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = bd71837_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
static const struct regulator_ops bd718xx_ldo_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -106,6 +258,15 @@ static const struct regulator_ops bd718xx_ldo_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
+static const struct regulator_ops bd71837_ldo_regulator_nolinear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = bd71837_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
static const struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -120,7 +281,17 @@ static const struct regulator_ops bd718xx_buck_regulator_ops = {
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = bd718xx_set_voltage_sel_restricted,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static const struct regulator_ops bd71837_buck_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = bd71837_set_voltage_sel_restricted,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
@@ -131,6 +302,17 @@ static const struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static const struct regulator_ops bd71837_buck_regulator_nolinear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = bd718xx_set_voltage_sel_restricted,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
@@ -152,7 +334,7 @@ static const struct regulator_ops bd718xx_dvs_buck_regulator_ops = {
* BD71847 BUCK1/2
* 0.70 to 1.30V (10mV step)
*/
-static const struct regulator_linear_range bd718xx_dvs_buck_volts[] = {
+static const struct linear_range bd718xx_dvs_buck_volts[] = {
REGULATOR_LINEAR_RANGE(700000, 0x00, 0x3C, 10000),
REGULATOR_LINEAR_RANGE(1300000, 0x3D, 0x3F, 0),
};
@@ -163,7 +345,7 @@ static const struct regulator_linear_range bd718xx_dvs_buck_volts[] = {
* and
* 0.675 to 1.325 (range 1)
*/
-static const struct regulator_linear_range bd71837_buck5_volts[] = {
+static const struct linear_range bd71837_buck5_volts[] = {
/* Ranges when VOLT_SEL bit is 0 */
REGULATOR_LINEAR_RANGE(700000, 0x00, 0x03, 100000),
REGULATOR_LINEAR_RANGE(1050000, 0x04, 0x05, 50000),
@@ -185,7 +367,7 @@ static const unsigned int bd71837_buck5_volt_range_sel[] = {
/*
* BD71847 BUCK3
*/
-static const struct regulator_linear_range bd71847_buck3_volts[] = {
+static const struct linear_range bd71847_buck3_volts[] = {
/* Ranges when VOLT_SEL bits are 00 */
REGULATOR_LINEAR_RANGE(700000, 0x00, 0x03, 100000),
REGULATOR_LINEAR_RANGE(1050000, 0x04, 0x05, 50000),
@@ -202,7 +384,7 @@ static const unsigned int bd71847_buck3_volt_range_sel[] = {
0x0, 0x0, 0x0, 0x40, 0x80, 0x80, 0x80
};
-static const struct regulator_linear_range bd71847_buck4_volts[] = {
+static const struct linear_range bd71847_buck4_volts[] = {
REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
REGULATOR_LINEAR_RANGE(2600000, 0x00, 0x03, 100000),
};
@@ -213,7 +395,7 @@ static const unsigned int bd71847_buck4_volt_range_sel[] = { 0x0, 0x40 };
* BUCK6
* 3.0V to 3.3V (step 100mV)
*/
-static const struct regulator_linear_range bd71837_buck6_volts[] = {
+static const struct linear_range bd71837_buck6_volts[] = {
REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
};
@@ -237,7 +419,7 @@ static const unsigned int bd718xx_3rd_nodvs_buck_volts[] = {
* BUCK8
* 0.8V to 1.40V (step 10mV)
*/
-static const struct regulator_linear_range bd718xx_4th_nodvs_buck_volts[] = {
+static const struct linear_range bd718xx_4th_nodvs_buck_volts[] = {
REGULATOR_LINEAR_RANGE(800000, 0x00, 0x3C, 10000),
};
@@ -245,7 +427,7 @@ static const struct regulator_linear_range bd718xx_4th_nodvs_buck_volts[] = {
* LDO1
* 3.0 to 3.3V (100mV step)
*/
-static const struct regulator_linear_range bd718xx_ldo1_volts[] = {
+static const struct linear_range bd718xx_ldo1_volts[] = {
REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
REGULATOR_LINEAR_RANGE(1600000, 0x00, 0x03, 100000),
};
@@ -264,7 +446,7 @@ static const unsigned int ldo_2_volts[] = {
* LDO3
* 1.8 to 3.3V (100mV step)
*/
-static const struct regulator_linear_range bd718xx_ldo3_volts[] = {
+static const struct linear_range bd718xx_ldo3_volts[] = {
REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
};
@@ -272,7 +454,7 @@ static const struct regulator_linear_range bd718xx_ldo3_volts[] = {
* LDO4
* 0.9 to 1.8V (100mV step)
*/
-static const struct regulator_linear_range bd718xx_ldo4_volts[] = {
+static const struct linear_range bd718xx_ldo4_volts[] = {
REGULATOR_LINEAR_RANGE(900000, 0x00, 0x09, 100000),
};
@@ -280,7 +462,7 @@ static const struct regulator_linear_range bd718xx_ldo4_volts[] = {
* LDO5 for BD71837
* 1.8 to 3.3V (100mV step)
*/
-static const struct regulator_linear_range bd71837_ldo5_volts[] = {
+static const struct linear_range bd71837_ldo5_volts[] = {
REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
};
@@ -288,7 +470,7 @@ static const struct regulator_linear_range bd71837_ldo5_volts[] = {
* LDO5 for BD71837
* 1.8 to 3.3V (100mV step)
*/
-static const struct regulator_linear_range bd71847_ldo5_volts[] = {
+static const struct linear_range bd71847_ldo5_volts[] = {
REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
REGULATOR_LINEAR_RANGE(800000, 0x00, 0x0F, 100000),
};
@@ -299,7 +481,7 @@ static const unsigned int bd71847_ldo5_volt_range_sel[] = { 0x0, 0x20 };
* LDO6
* 0.9 to 1.8V (100mV step)
*/
-static const struct regulator_linear_range bd718xx_ldo6_volts[] = {
+static const struct linear_range bd718xx_ldo6_volts[] = {
REGULATOR_LINEAR_RANGE(900000, 0x00, 0x09, 100000),
};
@@ -307,7 +489,7 @@ static const struct regulator_linear_range bd718xx_ldo6_volts[] = {
* LDO7
* 1.8 to 3.3V (100mV step)
*/
-static const struct regulator_linear_range bd71837_ldo7_volts[] = {
+static const struct linear_range bd71837_ldo7_volts[] = {
REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
};
@@ -805,7 +987,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK5"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK5,
- .ops = &bd718xx_pickable_range_buck_ops,
+ .ops = &bd71837_pickable_range_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71837_BUCK5_VOLTAGE_NUM,
.linear_ranges = bd71837_buck5_volts,
@@ -832,7 +1014,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK6"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK6,
- .ops = &bd718xx_buck_regulator_ops,
+ .ops = &bd71837_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71837_BUCK6_VOLTAGE_NUM,
.linear_ranges = bd71837_buck6_volts,
@@ -856,7 +1038,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK7"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK7,
- .ops = &bd718xx_buck_regulator_nolinear_ops,
+ .ops = &bd71837_buck_regulator_nolinear_ops,
.type = REGULATOR_VOLTAGE,
.volt_table = &bd718xx_3rd_nodvs_buck_volts[0],
.n_voltages = ARRAY_SIZE(bd718xx_3rd_nodvs_buck_volts),
@@ -878,7 +1060,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK8"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK8,
- .ops = &bd718xx_buck_regulator_ops,
+ .ops = &bd71837_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM,
.linear_ranges = bd718xx_4th_nodvs_buck_volts,
@@ -902,7 +1084,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO1"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO1,
- .ops = &bd718xx_pickable_range_ldo_ops,
+ .ops = &bd71837_pickable_range_ldo_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO1_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo1_volts,
@@ -928,7 +1110,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO2"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO2,
- .ops = &bd718xx_ldo_regulator_nolinear_ops,
+ .ops = &bd71837_ldo_regulator_nolinear_ops,
.type = REGULATOR_VOLTAGE,
.volt_table = &ldo_2_volts[0],
.vsel_reg = BD718XX_REG_LDO2_VOLT,
@@ -950,7 +1132,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO3"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO3,
- .ops = &bd718xx_ldo_regulator_ops,
+ .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO3_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo3_volts,
@@ -973,7 +1155,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO4"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO4,
- .ops = &bd718xx_ldo_regulator_ops,
+ .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO4_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo4_volts,
@@ -996,7 +1178,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO5,
- .ops = &bd718xx_ldo_regulator_ops,
+ .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71837_LDO5_VOLTAGE_NUM,
.linear_ranges = bd71837_ldo5_volts,
@@ -1023,7 +1205,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO6"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO6,
- .ops = &bd718xx_ldo_regulator_ops,
+ .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO6_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo6_volts,
@@ -1050,7 +1232,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO7"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO7,
- .ops = &bd718xx_ldo_regulator_ops,
+ .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71837_LDO7_VOLTAGE_NUM,
.linear_ranges = bd71837_ldo7_volts,
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7486f6e4e613..03154f5b939f 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3642,36 +3642,19 @@ finish:
return done;
}
-static int regulator_balance_voltage(struct regulator_dev *rdev,
- suspend_state_t state)
+int regulator_do_balance_voltage(struct regulator_dev *rdev,
+ suspend_state_t state, bool skip_coupled)
{
struct regulator_dev **c_rdevs;
struct regulator_dev *best_rdev;
struct coupling_desc *c_desc = &rdev->coupling_desc;
- struct regulator_coupler *coupler = c_desc->coupler;
int i, ret, n_coupled, best_min_uV, best_max_uV, best_c_rdev;
unsigned int delta, best_delta;
unsigned long c_rdev_done = 0;
bool best_c_rdev_done;
c_rdevs = c_desc->coupled_rdevs;
- n_coupled = c_desc->n_coupled;
-
- /*
- * If system is in a state other than PM_SUSPEND_ON, don't check
- * other coupled regulators.
- */
- if (state != PM_SUSPEND_ON)
- n_coupled = 1;
-
- if (c_desc->n_resolved < n_coupled) {
- rdev_err(rdev, "Not all coupled regulators registered\n");
- return -EPERM;
- }
-
- /* Invoke custom balancer for customized couplers */
- if (coupler && coupler->balance_voltage)
- return coupler->balance_voltage(coupler, rdev, state);
+ n_coupled = skip_coupled ? 1 : c_desc->n_coupled;
/*
* Find the best possible voltage change on each loop. Leave the loop
@@ -3742,6 +3725,32 @@ out:
return ret;
}
+static int regulator_balance_voltage(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ struct regulator_coupler *coupler = c_desc->coupler;
+ bool skip_coupled = false;
+
+ /*
+ * If system is in a state other than PM_SUSPEND_ON, don't check
+ * other coupled regulators.
+ */
+ if (state != PM_SUSPEND_ON)
+ skip_coupled = true;
+
+ if (c_desc->n_resolved < c_desc->n_coupled) {
+ rdev_err(rdev, "Not all coupled regulators registered\n");
+ return -EPERM;
+ }
+
+ /* Invoke custom balancer for customized couplers */
+ if (coupler && coupler->balance_voltage)
+ return coupler->balance_voltage(coupler, rdev, state);
+
+ return regulator_do_balance_voltage(rdev, state, skip_coupled);
+}
+
/**
* regulator_set_voltage - set regulator output voltage
* @regulator: regulator source
@@ -4312,6 +4321,7 @@ EXPORT_SYMBOL_GPL(regulator_set_load);
int regulator_allow_bypass(struct regulator *regulator, bool enable)
{
struct regulator_dev *rdev = regulator->rdev;
+ const char *name = rdev_get_name(rdev);
int ret = 0;
if (!rdev->desc->ops->set_bypass)
@@ -4326,18 +4336,26 @@ int regulator_allow_bypass(struct regulator *regulator, bool enable)
rdev->bypass_count++;
if (rdev->bypass_count == rdev->open_count) {
+ trace_regulator_bypass_enable(name);
+
ret = rdev->desc->ops->set_bypass(rdev, enable);
if (ret != 0)
rdev->bypass_count--;
+ else
+ trace_regulator_bypass_enable_complete(name);
}
} else if (!enable && regulator->bypass) {
rdev->bypass_count--;
if (rdev->bypass_count != rdev->open_count) {
+ trace_regulator_bypass_disable(name);
+
ret = rdev->desc->ops->set_bypass(rdev, enable);
if (ret != 0)
rdev->bypass_count++;
+ else
+ trace_regulator_bypass_disable_complete(name);
}
}
@@ -5496,6 +5514,7 @@ static void regulator_summary_show_subtree(struct seq_file *s,
seq_printf(s, "%*s%-*s ",
(level + 1) * 3 + 1, "",
30 - (level + 1) * 3,
+ consumer->supply_name ? consumer->supply_name :
consumer->dev ? dev_name(consumer->dev) : "deviceless");
switch (rdev->desc->type) {
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 5493c3a86426..770e694824ac 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -248,7 +248,7 @@ static int da9034_set_dvc_voltage_sel(struct regulator_dev *rdev,
return ret;
}
-static const struct regulator_linear_range da9034_ldo12_ranges[] = {
+static const struct linear_range da9034_ldo12_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 50000),
REGULATOR_LINEAR_RANGE(2700000, 8, 15, 50000),
};
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 88a2dcb9fe8a..0ce6ec4933af 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -181,7 +181,7 @@ static int db8500_regulator_switch_disable(struct regulator_dev *rdev)
goto out;
}
- info->is_enabled = 0;
+ info->is_enabled = false;
out:
return ret;
}
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index bb16c465426e..e970e9d2f8be 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -131,10 +131,11 @@ int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev)
unsigned int r_val;
int range;
unsigned int val;
- int ret, i;
- unsigned int voltages_in_range = 0;
+ int ret;
+ unsigned int voltages = 0;
+ const struct linear_range *r = rdev->desc->linear_ranges;
- if (!rdev->desc->linear_ranges)
+ if (!r)
return -EINVAL;
ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
@@ -152,11 +153,9 @@ int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev)
if (range < 0)
return -EINVAL;
- for (i = 0; i < range; i++)
- voltages_in_range += (rdev->desc->linear_ranges[i].max_sel -
- rdev->desc->linear_ranges[i].min_sel) + 1;
+ voltages = linear_range_values_in_range_array(r, range);
- return val + voltages_in_range;
+ return val + voltages;
}
EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_pickable_regmap);
@@ -179,8 +178,11 @@ int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
unsigned int voltages_in_range = 0;
for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
- voltages_in_range = (rdev->desc->linear_ranges[i].max_sel -
- rdev->desc->linear_ranges[i].min_sel) + 1;
+ const struct linear_range *r;
+
+ r = &rdev->desc->linear_ranges[i];
+ voltages_in_range = linear_range_values_in_range(r);
+
if (sel < voltages_in_range)
break;
sel -= voltages_in_range;
@@ -405,8 +407,10 @@ EXPORT_SYMBOL_GPL(regulator_map_voltage_linear);
int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
- const struct regulator_linear_range *range;
+ const struct linear_range *range;
int ret = -EINVAL;
+ unsigned int sel;
+ bool found;
int voltage, i;
if (!rdev->desc->n_linear_ranges) {
@@ -415,35 +419,19 @@ int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
}
for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
- int linear_max_uV;
-
range = &rdev->desc->linear_ranges[i];
- linear_max_uV = range->min_uV +
- (range->max_sel - range->min_sel) * range->uV_step;
- if (!(min_uV <= linear_max_uV && max_uV >= range->min_uV))
+ ret = linear_range_get_selector_high(range, min_uV, &sel,
+ &found);
+ if (ret)
continue;
-
- if (min_uV <= range->min_uV)
- min_uV = range->min_uV;
-
- /* range->uV_step == 0 means fixed voltage range */
- if (range->uV_step == 0) {
- ret = 0;
- } else {
- ret = DIV_ROUND_UP(min_uV - range->min_uV,
- range->uV_step);
- if (ret < 0)
- return ret;
- }
-
- ret += range->min_sel;
+ ret = sel;
/*
* Map back into a voltage to verify we're still in bounds.
* If we are not, then continue checking rest of the ranges.
*/
- voltage = rdev->desc->ops->list_voltage(rdev, ret);
+ voltage = rdev->desc->ops->list_voltage(rdev, sel);
if (voltage >= min_uV && voltage <= max_uV)
break;
}
@@ -468,7 +456,7 @@ EXPORT_SYMBOL_GPL(regulator_map_voltage_linear_range);
int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
- const struct regulator_linear_range *range;
+ const struct linear_range *range;
int ret = -EINVAL;
int voltage, i;
unsigned int selector = 0;
@@ -480,30 +468,25 @@ int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
int linear_max_uV;
+ bool found;
+ unsigned int sel;
range = &rdev->desc->linear_ranges[i];
- linear_max_uV = range->min_uV +
- (range->max_sel - range->min_sel) * range->uV_step;
+ linear_max_uV = linear_range_get_max_value(range);
- if (!(min_uV <= linear_max_uV && max_uV >= range->min_uV)) {
- selector += (range->max_sel - range->min_sel + 1);
+ if (!(min_uV <= linear_max_uV && max_uV >= range->min)) {
+ selector += linear_range_values_in_range(range);
continue;
}
- if (min_uV <= range->min_uV)
- min_uV = range->min_uV;
-
- /* range->uV_step == 0 means fixed voltage range */
- if (range->uV_step == 0) {
- ret = 0;
- } else {
- ret = DIV_ROUND_UP(min_uV - range->min_uV,
- range->uV_step);
- if (ret < 0)
- return ret;
+ ret = linear_range_get_selector_high(range, min_uV, &sel,
+ &found);
+ if (ret) {
+ selector += linear_range_values_in_range(range);
+ continue;
}
- ret += selector;
+ ret = selector + sel;
voltage = rdev->desc->ops->list_voltage(rdev, ret);
@@ -513,7 +496,7 @@ int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
* exit but retry until we have checked all ranges.
*/
if (voltage < min_uV || voltage > max_uV)
- selector += (range->max_sel - range->min_sel + 1);
+ selector += linear_range_values_in_range(range);
else
break;
}
@@ -561,7 +544,7 @@ EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev,
unsigned int selector)
{
- const struct regulator_linear_range *range;
+ const struct linear_range *range;
int i;
unsigned int all_sels = 0;
@@ -571,18 +554,28 @@ int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev,
}
for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
- unsigned int sels_in_range;
+ unsigned int sel_indexes;
range = &rdev->desc->linear_ranges[i];
- sels_in_range = range->max_sel - range->min_sel;
+ sel_indexes = linear_range_values_in_range(range) - 1;
- if (all_sels + sels_in_range >= selector) {
+ if (all_sels + sel_indexes >= selector) {
selector -= all_sels;
- return range->min_uV + (range->uV_step * selector);
+ /*
+ * As we see here, pickable ranges work only as
+ * long as the first selector for each pickable
+ * range is 0, and the each subsequent range for
+ * this 'pick' follow immediately at next unused
+ * selector (Eg. there is no gaps between ranges).
+ * I think this is fine but it probably should be
+ * documented. OTOH, whole pickable range stuff
+ * might benefit from some documentation
+ */
+ return range->min + (range->step * selector);
}
- all_sels += (sels_in_range + 1);
+ all_sels += (sel_indexes + 1);
}
return -EINVAL;
@@ -604,27 +597,18 @@ EXPORT_SYMBOL_GPL(regulator_list_voltage_pickable_linear_range);
int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
unsigned int selector)
{
- const struct regulator_linear_range *range;
- int i;
-
- if (!desc->n_linear_ranges) {
- BUG_ON(!desc->n_linear_ranges);
- return -EINVAL;
- }
-
- for (i = 0; i < desc->n_linear_ranges; i++) {
- range = &desc->linear_ranges[i];
-
- if (!(selector >= range->min_sel &&
- selector <= range->max_sel))
- continue;
+ unsigned int val;
+ int ret;
- selector -= range->min_sel;
+ BUG_ON(!desc->n_linear_ranges);
- return range->min_uV + (range->uV_step * selector);
- }
+ ret = linear_range_get_value_array(desc->linear_ranges,
+ desc->n_linear_ranges, selector,
+ &val);
+ if (ret)
+ return ret;
- return -EINVAL;
+ return val;
}
EXPORT_SYMBOL_GPL(regulator_desc_list_voltage_linear_range);
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 5ac3d7c29725..66219d8dfc1a 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -87,7 +87,7 @@ static const unsigned int ldo_8_voltages[] = {
};
/* Ranges are sorted in ascending order. */
-static const struct regulator_linear_range ldo_audio_volt_range[] = {
+static const struct linear_range ldo_audio_volt_range[] = {
REGULATOR_LINEAR_RANGE(2800000, 0, 3, 50000),
REGULATOR_LINEAR_RANGE(3000000, 4, 7, 100000),
};
@@ -195,7 +195,7 @@ static const struct regulator_ops hi6421_buck345_ops;
* _id - LDO id name string
* _match - of match name string
* n_volt - number of votages available
- * volt_ranges - array of regulator_linear_range
+ * volt_ranges - array of linear_range
* vstep - voltage increase in each linear step in uV
* vreg - voltage select register
* vmask - voltage select mask
diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c
index 9b05e03ba830..5ea3e4141684 100644
--- a/drivers/regulator/lochnagar-regulator.c
+++ b/drivers/regulator/lochnagar-regulator.c
@@ -36,7 +36,7 @@ static const struct regulator_ops lochnagar_micvdd_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
-static const struct regulator_linear_range lochnagar_micvdd_ranges[] = {
+static const struct linear_range lochnagar_micvdd_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 0xC, 50000),
REGULATOR_LINEAR_RANGE(1700000, 0xD, 0x1F, 100000),
};
@@ -97,7 +97,7 @@ static const struct regulator_ops lochnagar_vddcore_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
-static const struct regulator_linear_range lochnagar_vddcore_ranges[] = {
+static const struct linear_range lochnagar_vddcore_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0x8, 0x41, 12500),
};
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
index b55de293ca7a..fe049b67e7d5 100644
--- a/drivers/regulator/lp873x-regulator.c
+++ b/drivers/regulator/lp873x-regulator.c
@@ -54,14 +54,14 @@ struct lp873x_regulator {
static const struct lp873x_regulator regulators[];
-static const struct regulator_linear_range buck0_buck1_ranges[] = {
+static const struct linear_range buck0_buck1_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x13, 0),
REGULATOR_LINEAR_RANGE(700000, 0x14, 0x17, 10000),
REGULATOR_LINEAR_RANGE(735000, 0x18, 0x9d, 5000),
REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
};
-static const struct regulator_linear_range ldo0_ldo1_ranges[] = {
+static const struct linear_range ldo0_ldo1_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0x0, 0x19, 100000),
};
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index 4ae12ac1f4c6..5d525dacf959 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -46,7 +46,7 @@ struct lp87565_regulator {
static const struct lp87565_regulator regulators[];
-static const struct regulator_linear_range buck0_1_2_3_ranges[] = {
+static const struct linear_range buck0_1_2_3_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0xA, 0x17, 10000),
REGULATOR_LINEAR_RANGE(735000, 0x18, 0x9d, 5000),
REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index 222502a29658..74b7b496b12d 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -92,7 +92,7 @@ struct lp8788_buck {
};
/* BUCK 1 ~ 4 voltage ranges */
-static const struct regulator_linear_range buck_volt_ranges[] = {
+static const struct linear_range buck_volt_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0, 0),
REGULATOR_LINEAR_RANGE(800000, 1, 25, 50000),
};
diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c
index ac89a412f665..ca08f94a368d 100644
--- a/drivers/regulator/max77650-regulator.c
+++ b/drivers/regulator/max77650-regulator.c
@@ -49,7 +49,7 @@ static const unsigned int max77651_sbb1_volt_range_sel[] = {
0x0, 0x1, 0x2, 0x3
};
-static const struct regulator_linear_range max77651_sbb1_volt_ranges[] = {
+static const struct linear_range max77651_sbb1_volt_ranges[] = {
/* range index 0 */
REGULATOR_LINEAR_RANGE(2400000, 0x00, 0x0f, 50000),
/* range index 1 */
diff --git a/drivers/regulator/max77826-regulator.c b/drivers/regulator/max77826-regulator.c
new file mode 100644
index 000000000000..502ab6afc814
--- /dev/null
+++ b/drivers/regulator/max77826-regulator.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// max77826-regulator.c - regulator driver for Maxim MAX77826
+//
+// Author: Iskren Chernev <iskren.chernev@gmail.com>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+enum max77826_registers {
+ MAX77826_REG_INT_SRC = 0x00,
+ MAX77826_REG_SYS_INT,
+ MAX77826_REG_INT1,
+ MAX77826_REG_INT2,
+ MAX77826_REG_BB_INT,
+ MAX77826_REG_INT_SRC_M,
+ MAX77826_REG_TOPSYS_INT_M,
+ MAX77826_REG_INT1_M,
+ MAX77826_REG_INT2_M,
+ MAX77826_REG_BB_INT_M,
+ MAX77826_REG_TOPSYS_STAT,
+ MAX77826_REG_STAT1,
+ MAX77826_REG_STAT2,
+ MAX77826_REG_BB_STAT,
+ /* 0x0E - 0x0F: Reserved */
+ MAX77826_REG_LDO_OPMD1 = 0x10,
+ MAX77826_REG_LDO_OPMD2,
+ MAX77826_REG_LDO_OPMD3,
+ MAX77826_REG_LDO_OPMD4,
+ MAX77826_REG_B_BB_OPMD,
+ /* 0x15 - 0x1F: Reserved */
+ MAX77826_REG_LDO1_CFG = 0x20,
+ MAX77826_REG_LDO2_CFG,
+ MAX77826_REG_LDO3_CFG,
+ MAX77826_REG_LDO4_CFG,
+ MAX77826_REG_LDO5_CFG,
+ MAX77826_REG_LDO6_CFG,
+ MAX77826_REG_LDO7_CFG,
+ MAX77826_REG_LDO8_CFG,
+ MAX77826_REG_LDO9_CFG,
+ MAX77826_REG_LDO10_CFG,
+ MAX77826_REG_LDO11_CFG,
+ MAX77826_REG_LDO12_CFG,
+ MAX77826_REG_LDO13_CFG,
+ MAX77826_REG_LDO14_CFG,
+ MAX77826_REG_LDO15_CFG,
+ /* 0x2F: Reserved */
+ MAX77826_REG_BUCK_CFG = 0x30,
+ MAX77826_REG_BUCK_VOUT,
+ MAX77826_REG_BB_CFG,
+ MAX77826_REG_BB_VOUT,
+ /* 0x34 - 0x3F: Reserved */
+ MAX77826_REG_BUCK_SS_FREQ = 0x40,
+ MAX77826_REG_UVLO_FALL,
+ /* 0x42 - 0xCE: Reserved */
+ MAX77826_REG_DEVICE_ID = 0xCF,
+};
+
+enum max77826_regulators {
+ MAX77826_LDO1 = 0,
+ MAX77826_LDO2,
+ MAX77826_LDO3,
+ MAX77826_LDO4,
+ MAX77826_LDO5,
+ MAX77826_LDO6,
+ MAX77826_LDO7,
+ MAX77826_LDO8,
+ MAX77826_LDO9,
+ MAX77826_LDO10,
+ MAX77826_LDO11,
+ MAX77826_LDO12,
+ MAX77826_LDO13,
+ MAX77826_LDO14,
+ MAX77826_LDO15,
+ MAX77826_BUCK,
+ MAX77826_BUCKBOOST,
+ MAX77826_MAX_REGULATORS,
+};
+
+#define MAX77826_MASK_LDO 0x7f
+#define MAX77826_MASK_BUCK 0xff
+#define MAX77826_MASK_BUCKBOOST 0x7f
+#define MAX77826_BUCK_RAMP_DELAY 12500
+
+/* values in mV */
+/* for LDO1-3 */
+#define MAX77826_NMOS_LDO_VOLT_MIN 600000
+#define MAX77826_NMOS_LDO_VOLT_MAX 2187500
+#define MAX77826_NMOS_LDO_VOLT_STEP 12500
+
+/* for LDO4-15 */
+#define MAX77826_PMOS_LDO_VOLT_MIN 800000
+#define MAX77826_PMOS_LDO_VOLT_MAX 3975000
+#define MAX77826_PMOS_LDO_VOLT_STEP 25000
+
+/* for BUCK */
+#define MAX77826_BUCK_VOLT_MIN 500000
+#define MAX77826_BUCK_VOLT_MAX 1800000
+#define MAX77826_BUCK_VOLT_STEP 6250
+
+/* for BUCKBOOST */
+#define MAX77826_BUCKBOOST_VOLT_MIN 2600000
+#define MAX77826_BUCKBOOST_VOLT_MAX 4187500
+#define MAX77826_BUCKBOOST_VOLT_STEP 12500
+#define MAX77826_VOLT_RANGE(_type) \
+ ((MAX77826_ ## _type ## _VOLT_MAX - \
+ MAX77826_ ## _type ## _VOLT_MIN) / \
+ MAX77826_ ## _type ## _VOLT_STEP + 1)
+
+#define MAX77826_LDO(_id, _type) \
+ [MAX77826_LDO ## _id] = { \
+ .id = MAX77826_LDO ## _id, \
+ .name = "LDO"#_id, \
+ .of_match = of_match_ptr("LDO"#_id), \
+ .regulators_node = "regulators", \
+ .ops = &max77826_most_ops, \
+ .min_uV = MAX77826_ ## _type ## _LDO_VOLT_MIN, \
+ .uV_step = MAX77826_ ## _type ## _LDO_VOLT_STEP, \
+ .n_voltages = MAX77826_VOLT_RANGE(_type ## _LDO), \
+ .enable_reg = MAX77826_REG_LDO_OPMD1 + (_id - 1) / 4, \
+ .enable_mask = BIT(((_id - 1) % 4) * 2 + 1), \
+ .vsel_reg = MAX77826_REG_LDO1_CFG + (_id - 1), \
+ .vsel_mask = MAX77826_MASK_LDO, \
+ .owner = THIS_MODULE, \
+ }
+
+#define MAX77826_BUCK(_idx, _id, _ops) \
+ [MAX77826_ ## _id] = { \
+ .id = MAX77826_ ## _id, \
+ .name = #_id, \
+ .of_match = of_match_ptr(#_id), \
+ .regulators_node = "regulators", \
+ .ops = &_ops, \
+ .min_uV = MAX77826_ ## _id ## _VOLT_MIN, \
+ .uV_step = MAX77826_ ## _id ## _VOLT_STEP, \
+ .n_voltages = MAX77826_VOLT_RANGE(_id), \
+ .enable_reg = MAX77826_REG_B_BB_OPMD, \
+ .enable_mask = BIT(_idx * 2 + 1), \
+ .vsel_reg = MAX77826_REG_BUCK_VOUT + _idx * 2, \
+ .vsel_mask = MAX77826_MASK_ ## _id, \
+ .owner = THIS_MODULE, \
+ }
+
+
+
+struct max77826_regulator_info {
+ struct regmap *regmap;
+ struct regulator_desc *rdesc;
+};
+
+static const struct regmap_config max77826_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77826_REG_DEVICE_ID,
+};
+
+static int max77826_set_voltage_time_sel(struct regulator_dev *,
+ unsigned int old_selector,
+ unsigned int new_selector);
+
+static const struct regulator_ops max77826_most_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops max77826_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = max77826_set_voltage_time_sel,
+};
+
+static struct regulator_desc max77826_regulators_desc[] = {
+ MAX77826_LDO(1, NMOS),
+ MAX77826_LDO(2, NMOS),
+ MAX77826_LDO(3, NMOS),
+ MAX77826_LDO(4, PMOS),
+ MAX77826_LDO(5, PMOS),
+ MAX77826_LDO(6, PMOS),
+ MAX77826_LDO(7, PMOS),
+ MAX77826_LDO(8, PMOS),
+ MAX77826_LDO(9, PMOS),
+ MAX77826_LDO(10, PMOS),
+ MAX77826_LDO(11, PMOS),
+ MAX77826_LDO(12, PMOS),
+ MAX77826_LDO(13, PMOS),
+ MAX77826_LDO(14, PMOS),
+ MAX77826_LDO(15, PMOS),
+ MAX77826_BUCK(0, BUCK, max77826_buck_ops),
+ MAX77826_BUCK(1, BUCKBOOST, max77826_most_ops),
+};
+
+static int max77826_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector)
+{
+ if (new_selector > old_selector) {
+ return DIV_ROUND_UP(MAX77826_BUCK_VOLT_STEP *
+ (new_selector - old_selector),
+ MAX77826_BUCK_RAMP_DELAY);
+ }
+
+ return 0;
+}
+
+static int max77826_read_device_id(struct regmap *regmap, struct device *dev)
+{
+ unsigned int device_id;
+ int res;
+
+ res = regmap_read(regmap, MAX77826_REG_DEVICE_ID, &device_id);
+ if (!res)
+ dev_dbg(dev, "DEVICE_ID: 0x%x\n", device_id);
+
+ return res;
+}
+
+static int max77826_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max77826_regulator_info *info;
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i;
+
+ info = devm_kzalloc(dev, sizeof(struct max77826_regulator_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->rdesc = max77826_regulators_desc;
+ regmap = devm_regmap_init_i2c(client, &max77826_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+
+ info->regmap = regmap;
+ i2c_set_clientdata(client, info);
+
+ config.dev = dev;
+ config.regmap = regmap;
+ config.driver_data = info;
+
+ for (i = 0; i < MAX77826_MAX_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev,
+ &max77826_regulators_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator!\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return max77826_read_device_id(regmap, dev);
+}
+
+static const struct of_device_id max77826_of_match[] = {
+ { .compatible = "maxim,max77826" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max77826_of_match);
+
+static const struct i2c_device_id max77826_id[] = {
+ { "max77826-regulator" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, max77826_id);
+
+static struct i2c_driver max77826_regulator_driver = {
+ .driver = {
+ .name = "max77826",
+ .of_match_table = of_match_ptr(max77826_of_match),
+ },
+ .probe_new = max77826_i2c_probe,
+ .id_table = max77826_id,
+};
+module_i2c_driver(max77826_regulator_driver);
+
+MODULE_AUTHOR("Iskren Chernev <iskren.chernev@gmail.com>");
+MODULE_DESCRIPTION("MAX77826 PMIC regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 60599c3bb845..340413bba0c5 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -33,6 +33,10 @@ struct max8998_data {
unsigned int buck2_idx;
};
+static const unsigned int charger_current_table[] = {
+ 90000, 380000, 475000, 550000, 570000, 600000, 700000, 800000,
+};
+
static int max8998_get_enable_register(struct regulator_dev *rdev,
int *reg, int *shift)
{
@@ -63,6 +67,10 @@ static int max8998_get_enable_register(struct regulator_dev *rdev,
*reg = MAX8998_REG_CHGR2;
*shift = 7 - (ldo - MAX8998_ESAFEOUT1);
break;
+ case MAX8998_CHARGER:
+ *reg = MAX8998_REG_CHGR2;
+ *shift = 0;
+ break;
default:
return -EINVAL;
}
@@ -88,6 +96,11 @@ static int max8998_ldo_is_enabled(struct regulator_dev *rdev)
return val & (1 << shift);
}
+static int max8998_ldo_is_enabled_inverted(struct regulator_dev *rdev)
+{
+ return (!max8998_ldo_is_enabled(rdev));
+}
+
static int max8998_ldo_enable(struct regulator_dev *rdev)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
@@ -358,6 +371,74 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
return 0;
}
+static int max8998_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8998->iodev->i2c;
+ unsigned int n_currents = rdev->desc->n_current_limits;
+ int i, sel = -1;
+
+ if (n_currents == 0)
+ return -EINVAL;
+
+ if (rdev->desc->curr_table) {
+ const unsigned int *curr_table = rdev->desc->curr_table;
+ bool ascend = curr_table[n_currents - 1] > curr_table[0];
+
+ /* search for closest to maximum */
+ if (ascend) {
+ for (i = n_currents - 1; i >= 0; i--) {
+ if (min_uA <= curr_table[i] &&
+ curr_table[i] <= max_uA) {
+ sel = i;
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < n_currents; i++) {
+ if (min_uA <= curr_table[i] &&
+ curr_table[i] <= max_uA) {
+ sel = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (sel < 0)
+ return -EINVAL;
+
+ sel <<= ffs(rdev->desc->csel_mask) - 1;
+
+ return max8998_update_reg(i2c, rdev->desc->csel_reg,
+ sel, rdev->desc->csel_mask);
+}
+
+int max8998_get_current_limit(struct regulator_dev *rdev)
+{
+ struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8998->iodev->i2c;
+ u8 val;
+ int ret;
+
+ ret = max8998_read_reg(i2c, rdev->desc->csel_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ val &= rdev->desc->csel_mask;
+ val >>= ffs(rdev->desc->csel_mask) - 1;
+
+ if (rdev->desc->curr_table) {
+ if (val >= rdev->desc->n_current_limits)
+ return -EINVAL;
+
+ return rdev->desc->curr_table[val];
+ }
+
+ return -EINVAL;
+}
+
static const struct regulator_ops max8998_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
@@ -379,6 +460,15 @@ static const struct regulator_ops max8998_buck_ops = {
.set_voltage_time_sel = max8998_set_voltage_buck_time_sel,
};
+static const struct regulator_ops max8998_charger_ops = {
+ .set_current_limit = max8998_set_current_limit,
+ .get_current_limit = max8998_get_current_limit,
+ .is_enabled = max8998_ldo_is_enabled_inverted,
+ /* Swapped as register is inverted */
+ .enable = max8998_ldo_disable,
+ .disable = max8998_ldo_enable,
+};
+
static const struct regulator_ops max8998_others_ops = {
.is_enabled = max8998_ldo_is_enabled,
.enable = max8998_ldo_enable,
@@ -397,6 +487,19 @@ static const struct regulator_ops max8998_others_ops = {
.owner = THIS_MODULE, \
}
+#define MAX8998_CURRENT_REG(_name, _ops, _table, _reg, _mask) \
+ { \
+ .name = #_name, \
+ .id = MAX8998_##_name, \
+ .ops = _ops, \
+ .curr_table = _table, \
+ .n_current_limits = ARRAY_SIZE(_table), \
+ .csel_reg = _reg, \
+ .csel_mask = _mask, \
+ .type = REGULATOR_CURRENT, \
+ .owner = THIS_MODULE, \
+ }
+
#define MAX8998_OTHERS_REG(_name, _id) \
{ \
.name = #_name, \
@@ -432,6 +535,8 @@ static const struct regulator_desc regulators[] = {
MAX8998_OTHERS_REG(ENVICHG, MAX8998_ENVICHG),
MAX8998_OTHERS_REG(ESAFEOUT1, MAX8998_ESAFEOUT1),
MAX8998_OTHERS_REG(ESAFEOUT2, MAX8998_ESAFEOUT2),
+ MAX8998_CURRENT_REG(CHARGER, &max8998_charger_ops,
+ charger_current_table, MAX8998_REG_CHGR1, 0x7),
};
static int max8998_pmic_dt_parse_dvs_gpio(struct max8998_dev *iodev,
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index e5a02711cb46..6d0ad74935b3 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -391,11 +391,11 @@ static const struct of_device_id mcp16502_ids[] = {
};
MODULE_DEVICE_TABLE(of, mcp16502_ids);
-static const struct regulator_linear_range b1l12_ranges[] = {
+static const struct linear_range b1l12_ranges[] = {
REGULATOR_LINEAR_RANGE(1200000, VDD_LOW_SEL, VDD_HIGH_SEL, 50000),
};
-static const struct regulator_linear_range b234_ranges[] = {
+static const struct linear_range b234_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, VDD_LOW_SEL, VDD_HIGH_SEL, 25000),
};
diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c
index 6ed987648188..f2300714d5a9 100644
--- a/drivers/regulator/mp8859.c
+++ b/drivers/regulator/mp8859.c
@@ -73,7 +73,7 @@ static int mp8859_get_voltage_sel(struct regulator_dev *rdev)
return val;
}
-static const struct regulator_linear_range mp8859_dcdc_ranges[] = {
+static const struct linear_range mp8859_dcdc_ranges[] = {
REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000),
};
diff --git a/drivers/regulator/mt6323-regulator.c b/drivers/regulator/mt6323-regulator.c
index 893ea190788a..ff9016170db3 100644
--- a/drivers/regulator/mt6323-regulator.c
+++ b/drivers/regulator/mt6323-regulator.c
@@ -102,15 +102,15 @@ struct mt6323_regulator_info {
.modeset_mask = _modeset_mask, \
}
-static const struct regulator_linear_range buck_volt_range1[] = {
+static const struct linear_range buck_volt_range1[] = {
REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
};
-static const struct regulator_linear_range buck_volt_range2[] = {
+static const struct linear_range buck_volt_range2[] = {
REGULATOR_LINEAR_RANGE(1400000, 0, 0x7f, 12500),
};
-static const struct regulator_linear_range buck_volt_range3[] = {
+static const struct linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
};
diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
index ba42682e06f3..13cb6ac9a892 100644
--- a/drivers/regulator/mt6358-regulator.c
+++ b/drivers/regulator/mt6358-regulator.c
@@ -137,19 +137,19 @@ struct mt6358_regulator_info {
.qi = BIT(15), \
}
-static const struct regulator_linear_range buck_volt_range1[] = {
+static const struct linear_range buck_volt_range1[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 6250),
};
-static const struct regulator_linear_range buck_volt_range2[] = {
+static const struct linear_range buck_volt_range2[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 12500),
};
-static const struct regulator_linear_range buck_volt_range3[] = {
+static const struct linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
};
-static const struct regulator_linear_range buck_volt_range4[] = {
+static const struct linear_range buck_volt_range4[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 0x7f, 12500),
};
diff --git a/drivers/regulator/mt6380-regulator.c b/drivers/regulator/mt6380-regulator.c
index b6aed090b5e0..9efd8710a6f3 100644
--- a/drivers/regulator/mt6380-regulator.c
+++ b/drivers/regulator/mt6380-regulator.c
@@ -152,15 +152,15 @@ struct mt6380_regulator_info {
.modeset_mask = _modeset_mask, \
}
-static const struct regulator_linear_range buck_volt_range1[] = {
+static const struct linear_range buck_volt_range1[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0xfe, 6250),
};
-static const struct regulator_linear_range buck_volt_range2[] = {
+static const struct linear_range buck_volt_range2[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0xfe, 6250),
};
-static const struct regulator_linear_range buck_volt_range3[] = {
+static const struct linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(1200000, 0, 0x3c, 25000),
};
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
index fd9ed864a0c1..269c2a6028e8 100644
--- a/drivers/regulator/mt6397-regulator.c
+++ b/drivers/regulator/mt6397-regulator.c
@@ -102,15 +102,15 @@ struct mt6397_regulator_info {
.qi = BIT(15), \
}
-static const struct regulator_linear_range buck_volt_range1[] = {
+static const struct linear_range buck_volt_range1[] = {
REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
};
-static const struct regulator_linear_range buck_volt_range2[] = {
+static const struct linear_range buck_volt_range2[] = {
REGULATOR_LINEAR_RANGE(800000, 0, 0x7f, 6250),
};
-static const struct regulator_linear_range buck_volt_range3[] = {
+static const struct linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000),
};
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 31325912d311..337dd614695e 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -22,14 +22,14 @@
#include <linux/of_platform.h>
#include <linux/regulator/of_regulator.h>
-static const struct regulator_linear_range smps_low_ranges[] = {
+static const struct linear_range smps_low_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(500000, 0x1, 0x6, 0),
REGULATOR_LINEAR_RANGE(510000, 0x7, 0x79, 10000),
REGULATOR_LINEAR_RANGE(1650000, 0x7A, 0x7f, 0),
};
-static const struct regulator_linear_range smps_high_ranges[] = {
+static const struct linear_range smps_high_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1000000, 0x1, 0x6, 0),
REGULATOR_LINEAR_RANGE(1020000, 0x7, 0x79, 20000),
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index c86ad40015ce..79bdc129cb50 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -86,7 +86,7 @@ enum rpmh_regulator_type {
struct rpmh_vreg_hw_data {
enum rpmh_regulator_type regulator_type;
const struct regulator_ops *ops;
- const struct regulator_linear_range voltage_range;
+ const struct linear_range voltage_range;
int n_voltages;
int hpm_min_load_uA;
const int *pmic_mode_map;
@@ -832,11 +832,11 @@ static const struct rpmh_vreg_init_data pm8150_vreg_data[] = {
RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l2-l10"),
RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l1-l8-l11"),
RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
- RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l6-l17"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l16-l17"),
RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
- RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l6-l17"),
- RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l6-l17"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16-l17"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l16-l17"),
RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
{},
};
@@ -857,7 +857,7 @@ static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = {
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l4-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l4-l5-l6"),
RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-l11"),
- RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8"),
RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l9-l10"),
RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"),
RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"),
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 7fc97f23fcf4..0066f850f15d 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -148,41 +148,41 @@ static const struct rpm_reg_parts rpm8960_ncp_parts = {
/*
* Physically available PMIC regulator voltage ranges
*/
-static const struct regulator_linear_range pldo_ranges[] = {
+static const struct linear_range pldo_ranges[] = {
REGULATOR_LINEAR_RANGE( 750000, 0, 59, 12500),
REGULATOR_LINEAR_RANGE(1500000, 60, 123, 25000),
REGULATOR_LINEAR_RANGE(3100000, 124, 160, 50000),
};
-static const struct regulator_linear_range nldo_ranges[] = {
+static const struct linear_range nldo_ranges[] = {
REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
};
-static const struct regulator_linear_range nldo1200_ranges[] = {
+static const struct linear_range nldo1200_ranges[] = {
REGULATOR_LINEAR_RANGE( 375000, 0, 59, 6250),
REGULATOR_LINEAR_RANGE( 750000, 60, 123, 12500),
};
-static const struct regulator_linear_range smps_ranges[] = {
+static const struct linear_range smps_ranges[] = {
REGULATOR_LINEAR_RANGE( 375000, 0, 29, 12500),
REGULATOR_LINEAR_RANGE( 750000, 30, 89, 12500),
REGULATOR_LINEAR_RANGE(1500000, 90, 153, 25000),
};
-static const struct regulator_linear_range ftsmps_ranges[] = {
+static const struct linear_range ftsmps_ranges[] = {
REGULATOR_LINEAR_RANGE( 350000, 0, 6, 50000),
REGULATOR_LINEAR_RANGE( 700000, 7, 63, 12500),
REGULATOR_LINEAR_RANGE(1500000, 64, 100, 50000),
};
-static const struct regulator_linear_range smb208_ranges[] = {
+static const struct linear_range smb208_ranges[] = {
REGULATOR_LINEAR_RANGE( 375000, 0, 29, 12500),
REGULATOR_LINEAR_RANGE( 750000, 30, 89, 12500),
REGULATOR_LINEAR_RANGE(1500000, 90, 153, 25000),
REGULATOR_LINEAR_RANGE(3100000, 154, 234, 25000),
};
-static const struct regulator_linear_range ncp_ranges[] = {
+static const struct linear_range ncp_ranges[] = {
REGULATOR_LINEAR_RANGE(1500000, 0, 31, 50000),
};
@@ -604,16 +604,6 @@ static const struct qcom_rpm_reg pm8921_smps = {
.supports_force_mode_bypass = false,
};
-static const struct qcom_rpm_reg pm8921_ftsmps = {
- .desc.linear_ranges = ftsmps_ranges,
- .desc.n_linear_ranges = ARRAY_SIZE(ftsmps_ranges),
- .desc.n_voltages = 101,
- .desc.ops = &uV_ops,
- .parts = &rpm8960_smps_parts,
- .supports_force_mode_auto = true,
- .supports_force_mode_bypass = false,
-};
-
static const struct qcom_rpm_reg pm8921_ncp = {
.desc.linear_ranges = ncp_ranges,
.desc.n_linear_ranges = ARRAY_SIZE(ncp_ranges),
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index fdde4195cefb..53a64d856926 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -199,7 +199,7 @@ static const struct regulator_ops rpm_bob_ops = {
};
static const struct regulator_desc pma8084_hfsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
},
@@ -209,7 +209,7 @@ static const struct regulator_desc pma8084_hfsmps = {
};
static const struct regulator_desc pma8084_ftsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
},
@@ -219,7 +219,7 @@ static const struct regulator_desc pma8084_ftsmps = {
};
static const struct regulator_desc pma8084_pldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
@@ -230,7 +230,7 @@ static const struct regulator_desc pma8084_pldo = {
};
static const struct regulator_desc pma8084_nldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
},
.n_linear_ranges = 1,
@@ -243,7 +243,7 @@ static const struct regulator_desc pma8084_switch = {
};
static const struct regulator_desc pm8x41_hfsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(1575000, 96, 158, 25000),
},
@@ -253,7 +253,7 @@ static const struct regulator_desc pm8x41_hfsmps = {
};
static const struct regulator_desc pm8841_ftsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
},
@@ -263,7 +263,7 @@ static const struct regulator_desc pm8841_ftsmps = {
};
static const struct regulator_desc pm8941_boost = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
},
.n_linear_ranges = 1,
@@ -272,7 +272,7 @@ static const struct regulator_desc pm8941_boost = {
};
static const struct regulator_desc pm8941_pldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
@@ -283,7 +283,7 @@ static const struct regulator_desc pm8941_pldo = {
};
static const struct regulator_desc pm8941_nldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
},
.n_linear_ranges = 1,
@@ -302,7 +302,7 @@ static const struct regulator_desc pm8941_switch = {
};
static const struct regulator_desc pm8916_pldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(750000, 0, 208, 12500),
},
.n_linear_ranges = 1,
@@ -311,7 +311,7 @@ static const struct regulator_desc pm8916_pldo = {
};
static const struct regulator_desc pm8916_nldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 93, 12500),
},
.n_linear_ranges = 1,
@@ -320,7 +320,7 @@ static const struct regulator_desc pm8916_nldo = {
};
static const struct regulator_desc pm8916_buck_lvo_smps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(750000, 96, 127, 25000),
},
@@ -330,7 +330,7 @@ static const struct regulator_desc pm8916_buck_lvo_smps = {
};
static const struct regulator_desc pm8916_buck_hvo_smps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1550000, 0, 31, 25000),
},
.n_linear_ranges = 1,
@@ -339,7 +339,7 @@ static const struct regulator_desc pm8916_buck_hvo_smps = {
};
static const struct regulator_desc pm8950_hfsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(1550000, 96, 127, 25000),
},
@@ -349,7 +349,7 @@ static const struct regulator_desc pm8950_hfsmps = {
};
static const struct regulator_desc pm8950_ftsmps2p5 = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(80000, 0, 255, 5000),
REGULATOR_LINEAR_RANGE(160000, 256, 460, 10000),
},
@@ -359,7 +359,7 @@ static const struct regulator_desc pm8950_ftsmps2p5 = {
};
static const struct regulator_desc pm8950_ult_nldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 202, 12500),
},
.n_linear_ranges = 1,
@@ -368,7 +368,7 @@ static const struct regulator_desc pm8950_ult_nldo = {
};
static const struct regulator_desc pm8950_ult_pldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1750000, 0, 127, 12500),
},
.n_linear_ranges = 1,
@@ -377,7 +377,7 @@ static const struct regulator_desc pm8950_ult_pldo = {
};
static const struct regulator_desc pm8950_pldo_lv = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1500000, 0, 16, 25000),
},
.n_linear_ranges = 1,
@@ -386,7 +386,7 @@ static const struct regulator_desc pm8950_pldo_lv = {
};
static const struct regulator_desc pm8950_pldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(975000, 0, 164, 12500),
},
.n_linear_ranges = 1,
@@ -396,7 +396,7 @@ static const struct regulator_desc pm8950_pldo = {
static const struct regulator_desc pm8994_hfsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
},
@@ -406,7 +406,7 @@ static const struct regulator_desc pm8994_hfsmps = {
};
static const struct regulator_desc pm8994_ftsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 199, 5000),
REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000),
},
@@ -416,7 +416,7 @@ static const struct regulator_desc pm8994_ftsmps = {
};
static const struct regulator_desc pm8994_nldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
},
.n_linear_ranges = 1,
@@ -425,7 +425,7 @@ static const struct regulator_desc pm8994_nldo = {
};
static const struct regulator_desc pm8994_pldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
@@ -446,7 +446,7 @@ static const struct regulator_desc pm8994_lnldo = {
};
static const struct regulator_desc pmi8994_ftsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 199, 5000),
REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000),
},
@@ -456,7 +456,7 @@ static const struct regulator_desc pmi8994_ftsmps = {
};
static const struct regulator_desc pmi8994_hfsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 80, 12500),
REGULATOR_LINEAR_RANGE(700000, 81, 141, 25000),
},
@@ -466,7 +466,7 @@ static const struct regulator_desc pmi8994_hfsmps = {
};
static const struct regulator_desc pmi8994_bby = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(3000000, 0, 44, 50000),
},
.n_linear_ranges = 1,
@@ -475,7 +475,7 @@ static const struct regulator_desc pmi8994_bby = {
};
static const struct regulator_desc pmi8994_boost = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
},
.n_linear_ranges = 1,
@@ -484,7 +484,7 @@ static const struct regulator_desc pmi8994_boost = {
};
static const struct regulator_desc pm8998_ftsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
},
.n_linear_ranges = 1,
@@ -493,7 +493,7 @@ static const struct regulator_desc pm8998_ftsmps = {
};
static const struct regulator_desc pm8998_hfsmps = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
},
.n_linear_ranges = 1,
@@ -502,7 +502,7 @@ static const struct regulator_desc pm8998_hfsmps = {
};
static const struct regulator_desc pm8998_nldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
},
.n_linear_ranges = 1,
@@ -511,7 +511,7 @@ static const struct regulator_desc pm8998_nldo = {
};
static const struct regulator_desc pm8998_pldo = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000),
},
.n_linear_ranges = 1,
@@ -520,7 +520,7 @@ static const struct regulator_desc pm8998_pldo = {
};
static const struct regulator_desc pm8998_pldo_lv = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000),
},
.n_linear_ranges = 1,
@@ -533,7 +533,7 @@ static const struct regulator_desc pm8998_switch = {
};
static const struct regulator_desc pmi8998_bob = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000),
},
.n_linear_ranges = 1,
@@ -542,7 +542,7 @@ static const struct regulator_desc pmi8998_bob = {
};
static const struct regulator_desc pms405_hfsmps3 = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
},
.n_linear_ranges = 1,
@@ -551,7 +551,7 @@ static const struct regulator_desc pms405_hfsmps3 = {
};
static const struct regulator_desc pms405_nldo300 = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
},
.n_linear_ranges = 1,
@@ -560,7 +560,7 @@ static const struct regulator_desc pms405_nldo300 = {
};
static const struct regulator_desc pms405_nldo1200 = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
},
.n_linear_ranges = 1,
@@ -569,7 +569,7 @@ static const struct regulator_desc pms405_nldo1200 = {
};
static const struct regulator_desc pms405_pldo50 = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1664000, 0, 128, 16000),
},
.n_linear_ranges = 1,
@@ -578,7 +578,7 @@ static const struct regulator_desc pms405_pldo50 = {
};
static const struct regulator_desc pms405_pldo150 = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1664000, 0, 128, 16000),
},
.n_linear_ranges = 1,
@@ -587,7 +587,7 @@ static const struct regulator_desc pms405_pldo150 = {
};
static const struct regulator_desc pms405_pldo600 = {
- .linear_ranges = (struct regulator_linear_range[]) {
+ .linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1256000, 0, 98, 8000),
},
.n_linear_ranges = 1,
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 31f79fda3238..e926c1a85846 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -165,14 +165,14 @@ static const int rk808_buck_config_regs[] = {
RK808_BUCK4_CONFIG_REG,
};
-static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
+static const struct linear_range rk808_ldo3_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0, 13, 100000),
REGULATOR_LINEAR_RANGE(2500000, 15, 15, 0),
};
#define RK809_BUCK5_SEL_CNT (8)
-static const struct regulator_linear_range rk809_buck5_voltage_ranges[] = {
+static const struct linear_range rk809_buck5_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(1500000, 0, 0, 0),
REGULATOR_LINEAR_RANGE(1800000, 1, 3, 200000),
REGULATOR_LINEAR_RANGE(2800000, 4, 5, 200000),
@@ -201,14 +201,14 @@ static const struct regulator_linear_range rk809_buck5_voltage_ranges[] = {
#define RK817_BUCK1_SEL_CNT (RK817_BUCK1_SEL0 + RK817_BUCK1_SEL1 + 1)
#define RK817_BUCK3_SEL_CNT (RK817_BUCK1_SEL0 + RK817_BUCK3_SEL1 + 1)
-static const struct regulator_linear_range rk817_buck1_voltage_ranges[] = {
+static const struct linear_range rk817_buck1_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(RK817_BUCK1_MIN0, 0,
RK817_BUCK1_SEL0, RK817_BUCK1_STP0),
REGULATOR_LINEAR_RANGE(RK817_BUCK1_MIN1, RK817_BUCK1_SEL0 + 1,
RK817_BUCK1_SEL_CNT, RK817_BUCK1_STP1),
};
-static const struct regulator_linear_range rk817_buck3_voltage_ranges[] = {
+static const struct linear_range rk817_buck3_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(RK817_BUCK1_MIN0, 0,
RK817_BUCK1_SEL0, RK817_BUCK1_STP0),
REGULATOR_LINEAR_RANGE(RK817_BUCK1_MIN1, RK817_BUCK1_SEL0 + 1,
@@ -665,7 +665,7 @@ static const struct regulator_ops rk808_switch_ops = {
.set_suspend_disable = rk808_set_suspend_disable,
};
-static const struct regulator_linear_range rk805_buck_1_2_voltage_ranges[] = {
+static const struct linear_range rk805_buck_1_2_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(712500, 0, 59, 12500),
REGULATOR_LINEAR_RANGE(1800000, 60, 62, 200000),
REGULATOR_LINEAR_RANGE(2300000, 63, 63, 0),
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 23d288278957..33cf84bce05a 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -749,37 +749,37 @@ static const struct regulator_ops s2mps15_reg_buck_ops = {
}
/* voltage range for s2mps15 LDO 3, 5, 15, 16, 18, 20, 23 and 27 */
-static const struct regulator_linear_range s2mps15_ldo_voltage_ranges1[] = {
+static const struct linear_range s2mps15_ldo_voltage_ranges1[] = {
REGULATOR_LINEAR_RANGE(1000000, 0xc, 0x38, 25000),
};
/* voltage range for s2mps15 LDO 2, 6, 14, 17, 19, 21, 24 and 25 */
-static const struct regulator_linear_range s2mps15_ldo_voltage_ranges2[] = {
+static const struct linear_range s2mps15_ldo_voltage_ranges2[] = {
REGULATOR_LINEAR_RANGE(1800000, 0x0, 0x3f, 25000),
};
/* voltage range for s2mps15 LDO 4, 11, 12, 13, 22 and 26 */
-static const struct regulator_linear_range s2mps15_ldo_voltage_ranges3[] = {
+static const struct linear_range s2mps15_ldo_voltage_ranges3[] = {
REGULATOR_LINEAR_RANGE(700000, 0x0, 0x34, 12500),
};
/* voltage range for s2mps15 LDO 7, 8, 9 and 10 */
-static const struct regulator_linear_range s2mps15_ldo_voltage_ranges4[] = {
+static const struct linear_range s2mps15_ldo_voltage_ranges4[] = {
REGULATOR_LINEAR_RANGE(700000, 0x10, 0x20, 25000),
};
/* voltage range for s2mps15 LDO 1 */
-static const struct regulator_linear_range s2mps15_ldo_voltage_ranges5[] = {
+static const struct linear_range s2mps15_ldo_voltage_ranges5[] = {
REGULATOR_LINEAR_RANGE(500000, 0x0, 0x20, 12500),
};
/* voltage range for s2mps15 BUCK 1, 2, 3, 4, 5, 6 and 7 */
-static const struct regulator_linear_range s2mps15_buck_voltage_ranges1[] = {
+static const struct linear_range s2mps15_buck_voltage_ranges1[] = {
REGULATOR_LINEAR_RANGE(500000, 0x20, 0xc0, 6250),
};
/* voltage range for s2mps15 BUCK 8, 9 and 10 */
-static const struct regulator_linear_range s2mps15_buck_voltage_ranges2[] = {
+static const struct linear_range s2mps15_buck_voltage_ranges2[] = {
REGULATOR_LINEAR_RANGE(1000000, 0x20, 0x78, 12500),
};
diff --git a/drivers/regulator/sky81452-regulator.c b/drivers/regulator/sky81452-regulator.c
index 177dede82a61..37658affe072 100644
--- a/drivers/regulator/sky81452-regulator.c
+++ b/drivers/regulator/sky81452-regulator.c
@@ -32,7 +32,7 @@ static const struct regulator_ops sky81452_reg_ops = {
.is_enabled = regulator_is_enabled_regmap,
};
-static const struct regulator_linear_range sky81452_reg_ranges[] = {
+static const struct linear_range sky81452_reg_ranges[] = {
REGULATOR_LINEAR_RANGE(4500000, 0, 14, 250000),
REGULATOR_LINEAR_RANGE(9000000, 15, 31, 1000000),
};
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index f3d7d007ecbb..adc9973d1b2f 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -57,13 +57,13 @@ enum {
/* Ramp delay worst case is (2250uV/uS) */
#define PMIC_RAMP_DELAY 2200
-static const struct regulator_linear_range buck1_ranges[] = {
+static const struct linear_range buck1_ranges[] = {
REGULATOR_LINEAR_RANGE(725000, 0, 4, 0),
REGULATOR_LINEAR_RANGE(725000, 5, 36, 25000),
REGULATOR_LINEAR_RANGE(1500000, 37, 63, 0),
};
-static const struct regulator_linear_range buck2_ranges[] = {
+static const struct linear_range buck2_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 17, 0),
REGULATOR_LINEAR_RANGE(1050000, 18, 19, 0),
REGULATOR_LINEAR_RANGE(1100000, 20, 21, 0),
@@ -77,7 +77,7 @@ static const struct regulator_linear_range buck2_ranges[] = {
REGULATOR_LINEAR_RANGE(1500000, 36, 63, 0),
};
-static const struct regulator_linear_range buck3_ranges[] = {
+static const struct linear_range buck3_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 19, 0),
REGULATOR_LINEAR_RANGE(1100000, 20, 23, 0),
REGULATOR_LINEAR_RANGE(1200000, 24, 27, 0),
@@ -87,7 +87,7 @@ static const struct regulator_linear_range buck3_ranges[] = {
REGULATOR_LINEAR_RANGE(3400000, 56, 63, 0),
};
-static const struct regulator_linear_range buck4_ranges[] = {
+static const struct linear_range buck4_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 27, 25000),
REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0),
REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0),
@@ -97,19 +97,19 @@ static const struct regulator_linear_range buck4_ranges[] = {
REGULATOR_LINEAR_RANGE(3900000, 61, 63, 0),
};
-static const struct regulator_linear_range ldo1_ranges[] = {
+static const struct linear_range ldo1_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
};
-static const struct regulator_linear_range ldo2_ranges[] = {
+static const struct linear_range ldo2_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
};
-static const struct regulator_linear_range ldo3_ranges[] = {
+static const struct linear_range ldo3_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
@@ -117,13 +117,13 @@ static const struct regulator_linear_range ldo3_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 31, 31, 0),
};
-static const struct regulator_linear_range ldo5_ranges[] = {
+static const struct linear_range ldo5_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 30, 100000),
REGULATOR_LINEAR_RANGE(3900000, 31, 31, 0),
};
-static const struct regulator_linear_range ldo6_ranges[] = {
+static const struct linear_range ldo6_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
};
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index 5a5e9b5bf4be..9910e949373c 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -71,23 +71,23 @@ struct tps65086_regulator {
unsigned int decay_mask;
};
-static const struct regulator_linear_range tps65086_10mv_ranges[] = {
+static const struct linear_range tps65086_10mv_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(410000, 0x1, 0x7F, 10000),
};
-static const struct regulator_linear_range tps65086_buck126_25mv_ranges[] = {
+static const struct linear_range tps65086_buck126_25mv_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1000000, 0x1, 0x18, 0),
REGULATOR_LINEAR_RANGE(1025000, 0x19, 0x7F, 25000),
};
-static const struct regulator_linear_range tps65086_buck345_25mv_ranges[] = {
+static const struct linear_range tps65086_buck345_25mv_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(425000, 0x1, 0x7F, 25000),
};
-static const struct regulator_linear_range tps65086_ldoa1_ranges[] = {
+static const struct linear_range tps65086_ldoa1_ranges[] = {
REGULATOR_LINEAR_RANGE(1350000, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1500000, 0x1, 0x7, 100000),
REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xB, 100000),
@@ -95,7 +95,7 @@ static const struct regulator_linear_range tps65086_ldoa1_ranges[] = {
REGULATOR_LINEAR_RANGE(3300000, 0xE, 0xE, 0),
};
-static const struct regulator_linear_range tps65086_ldoa23_ranges[] = {
+static const struct linear_range tps65086_ldoa23_ranges[] = {
REGULATOR_LINEAR_RANGE(700000, 0x0, 0xD, 50000),
REGULATOR_LINEAR_RANGE(1400000, 0xE, 0xF, 100000),
};
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 67ba78da77ec..d27dbbafcf72 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -56,14 +56,14 @@ static const unsigned int LDO1_VSEL_table[] = {
2800000, 3000000, 3100000, 3300000,
};
-static const struct regulator_linear_range tps65217_uv1_ranges[] = {
+static const struct linear_range tps65217_uv1_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 24, 25000),
REGULATOR_LINEAR_RANGE(1550000, 25, 52, 50000),
REGULATOR_LINEAR_RANGE(3000000, 53, 55, 100000),
REGULATOR_LINEAR_RANGE(3300000, 56, 63, 0),
};
-static const struct regulator_linear_range tps65217_uv2_ranges[] = {
+static const struct linear_range tps65217_uv2_ranges[] = {
REGULATOR_LINEAR_RANGE(1500000, 0, 8, 50000),
REGULATOR_LINEAR_RANGE(2000000, 9, 13, 100000),
REGULATOR_LINEAR_RANGE(2450000, 14, 31, 50000),
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index b72035610013..05d13f807918 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -56,17 +56,17 @@
.bypass_mask = _sm, \
} \
-static const struct regulator_linear_range dcdc1_dcdc2_ranges[] = {
+static const struct linear_range dcdc1_dcdc2_ranges[] = {
REGULATOR_LINEAR_RANGE(850000, 0x0, 0x32, 10000),
REGULATOR_LINEAR_RANGE(1375000, 0x33, 0x3f, 25000),
};
-static const struct regulator_linear_range ldo1_dcdc3_ranges[] = {
+static const struct linear_range ldo1_dcdc3_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0x0, 0x1a, 25000),
REGULATOR_LINEAR_RANGE(1600000, 0x1b, 0x3f, 50000),
};
-static const struct regulator_linear_range dcdc4_ranges[] = {
+static const struct linear_range dcdc4_ranges[] = {
REGULATOR_LINEAR_RANGE(1175000, 0x0, 0xf, 25000),
REGULATOR_LINEAR_RANGE(1600000, 0x10, 0x34, 50000),
};
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 276faeddc370..15c79931ea89 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -46,11 +46,11 @@ enum tps65912_regulators { DCDC1, DCDC2, DCDC3, DCDC4, LDO1, LDO2, LDO3,
.n_linear_ranges = ARRAY_SIZE(_lr), \
}
-static const struct regulator_linear_range tps65912_dcdc_ranges[] = {
+static const struct linear_range tps65912_dcdc_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0x0, 0x3f, 50000),
};
-static const struct regulator_linear_range tps65912_ldo_ranges[] = {
+static const struct linear_range tps65912_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0x0, 0x20, 25000),
REGULATOR_LINEAR_RANGE(1650000, 0x21, 0x3c, 50000),
REGULATOR_LINEAR_RANGE(3100000, 0x3d, 0x3f, 100000),
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index 85a6a8ca8c1b..a29e65230132 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -271,7 +271,7 @@ static int tps80031_vbus_is_enabled(struct regulator_dev *rdev)
{
struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps80031_dev(rdev);
- int ret = -EIO;
+ int ret;
uint8_t ctrl1 = 0;
uint8_t ctrl3 = 0;
@@ -322,7 +322,7 @@ static int tps80031_vbus_disable(struct regulator_dev *rdev)
{
struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps80031_dev(rdev);
- int ret = 0;
+ int ret;
if (ri->config_flags & TPS80031_VBUS_DISCHRG_EN_PDN) {
ret = tps80031_write(parent, TPS80031_SLAVE_ID2,
@@ -530,7 +530,8 @@ static int tps80031_regulator_config(struct device *parent,
case TPS80031_REGULATOR_LDOUSB:
if (ri->config_flags & (TPS80031_USBLDO_INPUT_VSYS |
TPS80031_USBLDO_INPUT_PMID)) {
- unsigned val = 0;
+ unsigned val;
+
if (ri->config_flags & TPS80031_USBLDO_INPUT_VSYS)
val = MISC2_LDOUSB_IN_VSYS;
else
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 866b4dd01da9..4a51cfea45ac 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -360,12 +360,12 @@ static const u16 VINTANA2_VSEL_table[] = {
};
/* 600mV to 1450mV in 12.5 mV steps */
-static const struct regulator_linear_range VDD1_ranges[] = {
+static const struct linear_range VDD1_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500)
};
/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */
-static const struct regulator_linear_range VDD2_ranges[] = {
+static const struct linear_range VDD2_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500),
REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500)
};
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index b8100c3cedad..f7db250a7583 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -495,7 +495,7 @@ static const struct regulator_ops twlsmps_ops = {
};
/*----------------------------------------------------------------------*/
-static const struct regulator_linear_range twl6030ldo_linear_range[] = {
+static const struct linear_range twl6030ldo_linear_range[] = {
REGULATOR_LINEAR_RANGE(0, 0, 0, 0),
REGULATOR_LINEAR_RANGE(1000000, 1, 24, 100000),
REGULATOR_LINEAR_RANGE(2750000, 31, 31, 0),
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 018dbbd96771..ad2203d11a88 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -204,7 +204,7 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data)
* BUCKV specifics
*/
-static const struct regulator_linear_range wm831x_buckv_ranges[] = {
+static const struct linear_range wm831x_buckv_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0x7, 0),
REGULATOR_LINEAR_RANGE(600000, 0x8, 0x68, 12500),
};
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 56754686c982..7b6cf4810cb7 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -59,7 +59,7 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
* General purpose LDOs
*/
-static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
+static const struct linear_range wm831x_gp_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 14, 50000),
REGULATOR_LINEAR_RANGE(1700000, 15, 31, 100000),
};
@@ -312,7 +312,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
* Analogue LDOs
*/
-static const struct regulator_linear_range wm831x_aldo_ranges[] = {
+static const struct linear_range wm831x_aldo_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 12, 50000),
REGULATOR_LINEAR_RANGE(1700000, 13, 31, 100000),
};
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 56d6168a888d..ae5f0e7fce8b 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -470,7 +470,7 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
return 0;
}
-static const struct regulator_linear_range wm8350_ldo_ranges[] = {
+static const struct linear_range wm8350_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 15, 50000),
REGULATOR_LINEAR_RANGE(1800000, 16, 31, 100000),
};
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 6f331b51e479..4cb1fbb59722 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -13,7 +13,7 @@
#include <linux/regulator/driver.h>
#include <linux/mfd/wm8400-private.h>
-static const struct regulator_linear_range wm8400_ldo_ranges[] = {
+static const struct linear_range wm8400_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 14, 50000),
REGULATOR_LINEAR_RANGE(1700000, 15, 31, 100000),
};
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 24e6d420b26b..19926506d033 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -33,6 +33,7 @@
enum hi6220_reset_ctrl_type {
PERIPHERAL,
MEDIA,
+ AO,
};
struct hi6220_reset_data {
@@ -92,6 +93,65 @@ static const struct reset_control_ops hi6220_media_reset_ops = {
.deassert = hi6220_media_deassert,
};
+#define AO_SCTRL_SC_PW_CLKEN0 0x800
+#define AO_SCTRL_SC_PW_CLKDIS0 0x804
+
+#define AO_SCTRL_SC_PW_RSTEN0 0x810
+#define AO_SCTRL_SC_PW_RSTDIS0 0x814
+
+#define AO_SCTRL_SC_PW_ISOEN0 0x820
+#define AO_SCTRL_SC_PW_ISODIS0 0x824
+#define AO_MAX_INDEX 12
+
+static int hi6220_ao_assert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
+ int ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_RSTEN0, BIT(idx));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_ISOEN0, BIT(idx));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_CLKDIS0, BIT(idx));
+ return ret;
+}
+
+static int hi6220_ao_deassert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
+ int ret;
+
+ /*
+ * It was suggested to disable isolation before enabling
+ * the clocks and deasserting reset, to avoid glitches.
+ * But this order is preserved to keep it matching the
+ * vendor code.
+ */
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_RSTDIS0, BIT(idx));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_ISODIS0, BIT(idx));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_CLKEN0, BIT(idx));
+ return ret;
+}
+
+static const struct reset_control_ops hi6220_ao_reset_ops = {
+ .assert = hi6220_ao_assert,
+ .deassert = hi6220_ao_deassert,
+};
+
static int hi6220_reset_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -117,9 +177,12 @@ static int hi6220_reset_probe(struct platform_device *pdev)
if (type == MEDIA) {
data->rc_dev.ops = &hi6220_media_reset_ops;
data->rc_dev.nr_resets = MEDIA_MAX_INDEX;
- } else {
+ } else if (type == PERIPHERAL) {
data->rc_dev.ops = &hi6220_peripheral_reset_ops;
data->rc_dev.nr_resets = PERIPH_MAX_INDEX;
+ } else {
+ data->rc_dev.ops = &hi6220_ao_reset_ops;
+ data->rc_dev.nr_resets = AO_MAX_INDEX;
}
return reset_controller_register(&data->rc_dev);
@@ -134,6 +197,10 @@ static const struct of_device_id hi6220_reset_match[] = {
.compatible = "hisilicon,hi6220-mediactrl",
.data = (void *)MEDIA,
},
+ {
+ .compatible = "hisilicon,hi6220-aoctrl",
+ .data = (void *)AO,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, hi6220_reset_match);
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 1443a55a0c29..d170fe663210 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -15,6 +15,7 @@
#include <linux/regmap.h>
#include <dt-bindings/reset/imx7-reset.h>
#include <dt-bindings/reset/imx8mq-reset.h>
+#include <dt-bindings/reset/imx8mp-reset.h>
struct imx7_src_signal {
unsigned int offset, bit;
@@ -145,6 +146,18 @@ enum imx8mq_src_registers {
SRC_DDRC2_RCR = 0x1004,
};
+enum imx8mp_src_registers {
+ SRC_SUPERMIX_RCR = 0x0018,
+ SRC_AUDIOMIX_RCR = 0x001c,
+ SRC_MLMIX_RCR = 0x0028,
+ SRC_GPU2D_RCR = 0x0038,
+ SRC_GPU3D_RCR = 0x003c,
+ SRC_VPU_G1_RCR = 0x0048,
+ SRC_VPU_G2_RCR = 0x004c,
+ SRC_VPUVC8KE_RCR = 0x0050,
+ SRC_NOC_RCR = 0x0054,
+};
+
static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
[IMX8MQ_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) },
[IMX8MQ_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) },
@@ -253,6 +266,93 @@ static const struct imx7_src_variant variant_imx8mq = {
},
};
+static const struct imx7_src_signal imx8mp_src_signals[IMX8MP_RESET_NUM] = {
+ [IMX8MP_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) },
+ [IMX8MP_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) },
+ [IMX8MP_RESET_A53_CORE_POR_RESET2] = { SRC_A53RCR0, BIT(2) },
+ [IMX8MP_RESET_A53_CORE_POR_RESET3] = { SRC_A53RCR0, BIT(3) },
+ [IMX8MP_RESET_A53_CORE_RESET0] = { SRC_A53RCR0, BIT(4) },
+ [IMX8MP_RESET_A53_CORE_RESET1] = { SRC_A53RCR0, BIT(5) },
+ [IMX8MP_RESET_A53_CORE_RESET2] = { SRC_A53RCR0, BIT(6) },
+ [IMX8MP_RESET_A53_CORE_RESET3] = { SRC_A53RCR0, BIT(7) },
+ [IMX8MP_RESET_A53_DBG_RESET0] = { SRC_A53RCR0, BIT(8) },
+ [IMX8MP_RESET_A53_DBG_RESET1] = { SRC_A53RCR0, BIT(9) },
+ [IMX8MP_RESET_A53_DBG_RESET2] = { SRC_A53RCR0, BIT(10) },
+ [IMX8MP_RESET_A53_DBG_RESET3] = { SRC_A53RCR0, BIT(11) },
+ [IMX8MP_RESET_A53_ETM_RESET0] = { SRC_A53RCR0, BIT(12) },
+ [IMX8MP_RESET_A53_ETM_RESET1] = { SRC_A53RCR0, BIT(13) },
+ [IMX8MP_RESET_A53_ETM_RESET2] = { SRC_A53RCR0, BIT(14) },
+ [IMX8MP_RESET_A53_ETM_RESET3] = { SRC_A53RCR0, BIT(15) },
+ [IMX8MP_RESET_A53_SOC_DBG_RESET] = { SRC_A53RCR0, BIT(20) },
+ [IMX8MP_RESET_A53_L2RESET] = { SRC_A53RCR0, BIT(21) },
+ [IMX8MP_RESET_SW_NON_SCLR_M7C_RST] = { SRC_M4RCR, BIT(0) },
+ [IMX8MP_RESET_OTG1_PHY_RESET] = { SRC_USBOPHY1_RCR, BIT(0) },
+ [IMX8MP_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
+ [IMX8MP_RESET_SUPERMIX_RESET] = { SRC_SUPERMIX_RCR, BIT(0) },
+ [IMX8MP_RESET_AUDIOMIX_RESET] = { SRC_AUDIOMIX_RCR, BIT(0) },
+ [IMX8MP_RESET_MLMIX_RESET] = { SRC_MLMIX_RCR, BIT(0) },
+ [IMX8MP_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, BIT(2) },
+ [IMX8MP_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
+ [IMX8MP_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
+ [IMX8MP_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
+ [IMX8MP_RESET_HDMI_PHY_APB_RESET] = { SRC_HDMI_RCR, BIT(0) },
+ [IMX8MP_RESET_MEDIA_RESET] = { SRC_DISP_RCR, BIT(0) },
+ [IMX8MP_RESET_GPU2D_RESET] = { SRC_GPU2D_RCR, BIT(0) },
+ [IMX8MP_RESET_GPU3D_RESET] = { SRC_GPU3D_RCR, BIT(0) },
+ [IMX8MP_RESET_GPU_RESET] = { SRC_GPU_RCR, BIT(0) },
+ [IMX8MP_RESET_VPU_RESET] = { SRC_VPU_RCR, BIT(0) },
+ [IMX8MP_RESET_VPU_G1_RESET] = { SRC_VPU_G1_RCR, BIT(0) },
+ [IMX8MP_RESET_VPU_G2_RESET] = { SRC_VPU_G2_RCR, BIT(0) },
+ [IMX8MP_RESET_VPUVC8KE_RESET] = { SRC_VPUVC8KE_RCR, BIT(0) },
+ [IMX8MP_RESET_NOC_RESET] = { SRC_NOC_RCR, BIT(0) },
+};
+
+static int imx8mp_reset_set(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct imx7_src *imx7src = to_imx7_src(rcdev);
+ const unsigned int bit = imx7src->signals[id].bit;
+ unsigned int value = assert ? bit : 0;
+
+ switch (id) {
+ case IMX8MP_RESET_PCIEPHY:
+ /*
+ * wait for more than 10us to release phy g_rst and
+ * btnrst
+ */
+ if (!assert)
+ udelay(10);
+ break;
+
+ case IMX8MP_RESET_PCIE_CTRL_APPS_EN:
+ value = assert ? 0 : bit;
+ break;
+ }
+
+ return imx7_reset_update(imx7src, id, value);
+}
+
+static int imx8mp_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mp_reset_set(rcdev, id, true);
+}
+
+static int imx8mp_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mp_reset_set(rcdev, id, false);
+}
+
+static const struct imx7_src_variant variant_imx8mp = {
+ .signals = imx8mp_src_signals,
+ .signals_num = ARRAY_SIZE(imx8mp_src_signals),
+ .ops = {
+ .assert = imx8mp_reset_assert,
+ .deassert = imx8mp_reset_deassert,
+ },
+};
+
static int imx7_reset_probe(struct platform_device *pdev)
{
struct imx7_src *imx7src;
@@ -283,6 +383,7 @@ static int imx7_reset_probe(struct platform_device *pdev)
static const struct of_device_id imx7_reset_dt_ids[] = {
{ .compatible = "fsl,imx7d-src", .data = &variant_imx7 },
{ .compatible = "fsl,imx8mq-src", .data = &variant_imx8mq },
+ { .compatible = "fsl,imx8mp-src", .data = &variant_imx8mp },
{ /* sentinel */ },
};
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
index 0144075b11a6..373ea8d4f7a1 100644
--- a/drivers/reset/reset-zynqmp.c
+++ b/drivers/reset/reset-zynqmp.c
@@ -15,7 +15,6 @@
struct zynqmp_reset_data {
struct reset_controller_dev rcdev;
- const struct zynqmp_eemi_ops *eemi_ops;
};
static inline struct zynqmp_reset_data *
@@ -27,28 +26,23 @@ to_zynqmp_reset_data(struct reset_controller_dev *rcdev)
static int zynqmp_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-
- return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
- PM_RESET_ACTION_ASSERT);
+ return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_ASSERT);
}
static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-
- return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
- PM_RESET_ACTION_RELEASE);
+ return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_RELEASE);
}
static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
int val, err;
- err = priv->eemi_ops->reset_get_status(ZYNQMP_RESET_ID + id, &val);
+ err = zynqmp_pm_reset_get_status(ZYNQMP_RESET_ID + id, &val);
if (err)
return err;
@@ -58,10 +52,8 @@ static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-
- return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
- PM_RESET_ACTION_PULSE);
+ return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_PULSE);
}
static const struct reset_control_ops zynqmp_reset_ops = {
@@ -79,10 +71,6 @@ static int zynqmp_reset_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(priv->eemi_ops))
- return PTR_ERR(priv->eemi_ops);
-
platform_set_drvdata(pdev, priv);
priv->rcdev.ops = &zynqmp_reset_ops;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index ec873f09c763..b54d87d45c89 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1516,7 +1516,7 @@ config RTC_DRV_GENERIC
tristate "Generic RTC support"
# Please consider writing a new RTC driver instead of using the generic
# RTC abstraction
- depends on PARISC || M68K || PPC || SUPERH32 || COMPILE_TEST
+ depends on PARISC || M68K || PPC || SUPERH || COMPILE_TEST
help
Say Y or M here to enable RTC support on systems using the generic
RTC abstraction. If you do not know what you are doing, you should
@@ -1680,6 +1680,7 @@ config RTC_DRV_MPC5121
config RTC_DRV_JZ4740
tristate "Ingenic JZ4740 SoC"
depends on MIPS || COMPILE_TEST
+ depends on OF
help
If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
controllers.
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index cc9b14ef90f1..c90457d001e9 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -106,12 +106,6 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned char buf[4];
unsigned long ticks, base, data;
- if (tm->tm_year > 206) {
- dev_dbg(info->dev, "Set time %d out of range. "
- "Please set time between 1970 to 2106.\n",
- 1900 + tm->tm_year);
- return -EINVAL;
- }
ticks = rtc_tm_to_time64(tm);
/* load 32-bit read-only counter */
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 3521d8e8dc38..803725b3a02c 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -13,6 +13,7 @@
#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/rtc.h>
#include <linux/watchdog.h>
@@ -554,8 +555,9 @@ static const struct rtc_class_ops abx80x_rtc_ops = {
.ioctl = abx80x_ioctl,
};
-static int abx80x_dt_trickle_cfg(struct device_node *np)
+static int abx80x_dt_trickle_cfg(struct i2c_client *client)
{
+ struct device_node *np = client->dev.of_node;
const char *diode;
int trickle_cfg = 0;
int i, ret;
@@ -565,12 +567,14 @@ static int abx80x_dt_trickle_cfg(struct device_node *np)
if (ret)
return ret;
- if (!strcmp(diode, "standard"))
+ if (!strcmp(diode, "standard")) {
trickle_cfg |= ABX8XX_TRICKLE_STANDARD_DIODE;
- else if (!strcmp(diode, "schottky"))
+ } else if (!strcmp(diode, "schottky")) {
trickle_cfg |= ABX8XX_TRICKLE_SCHOTTKY_DIODE;
- else
+ } else {
+ dev_dbg(&client->dev, "Invalid tc-diode value: %s\n", diode);
return -EINVAL;
+ }
ret = of_property_read_u32(np, "abracon,tc-resistor", &tmp);
if (ret)
@@ -580,8 +584,10 @@ static int abx80x_dt_trickle_cfg(struct device_node *np)
if (trickle_resistors[i] == tmp)
break;
- if (i == sizeof(trickle_resistors))
+ if (i == sizeof(trickle_resistors)) {
+ dev_dbg(&client->dev, "Invalid tc-resistor value: %u\n", tmp);
return -EINVAL;
+ }
return (trickle_cfg | i);
}
@@ -793,7 +799,7 @@ static int abx80x_probe(struct i2c_client *client,
}
if (np && abx80x_caps[part].has_tc)
- trickle_cfg = abx80x_dt_trickle_cfg(np);
+ trickle_cfg = abx80x_dt_trickle_cfg(client);
if (trickle_cfg > 0) {
dev_info(&client->dev, "Enabling trickle charger: %02x\n",
@@ -863,9 +869,57 @@ static const struct i2c_device_id abx80x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, abx80x_id);
+#ifdef CONFIG_OF
+static const struct of_device_id abx80x_of_match[] = {
+ {
+ .compatible = "abracon,abx80x",
+ .data = (void *)ABX80X
+ },
+ {
+ .compatible = "abracon,ab0801",
+ .data = (void *)AB0801
+ },
+ {
+ .compatible = "abracon,ab0803",
+ .data = (void *)AB0803
+ },
+ {
+ .compatible = "abracon,ab0804",
+ .data = (void *)AB0804
+ },
+ {
+ .compatible = "abracon,ab0805",
+ .data = (void *)AB0805
+ },
+ {
+ .compatible = "abracon,ab1801",
+ .data = (void *)AB1801
+ },
+ {
+ .compatible = "abracon,ab1803",
+ .data = (void *)AB1803
+ },
+ {
+ .compatible = "abracon,ab1804",
+ .data = (void *)AB1804
+ },
+ {
+ .compatible = "abracon,ab1805",
+ .data = (void *)AB1805
+ },
+ {
+ .compatible = "microcrystal,rv1805",
+ .data = (void *)RV1805
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, abx80x_of_match);
+#endif
+
static struct i2c_driver abx80x_driver = {
.driver = {
.name = "rtc-abx80x",
+ .of_match_table = of_match_ptr(abx80x_of_match),
},
.probe = abx80x_probe,
.id_table = abx80x_id,
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 756af62b0486..68f0a1801a2e 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -21,6 +21,7 @@
#include <linux/rtc.h>
#include <linux/time.h>
#include <linux/acpi.h>
+#include <linux/pm_wakeirq.h>
#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT)
@@ -268,13 +269,11 @@ static int ftm_rtc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "can't get irq number\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, ftm_rtc_alarm_interrupt,
- IRQF_NO_SUSPEND, dev_name(&pdev->dev), rtc);
+ 0, dev_name(&pdev->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request irq\n");
return ret;
@@ -287,6 +286,9 @@ static int ftm_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->ops = &ftm_rtc_ops;
device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to enable irq wake\n");
ret = rtc_register_device(rtc->rtc_dev);
if (ret) {
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index cb6b0ad7ec3f..27797157fcb3 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -174,7 +174,7 @@ static int goldfish_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtcdrv);
rtcdrv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtcdrv->base))
- return -ENODEV;
+ return PTR_ERR(rtcdrv->base);
rtcdrv->irq = platform_get_irq(pdev, 0);
if (rtcdrv->irq < 0)
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index e4c719085c31..9607e6b6e0b3 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -55,14 +55,8 @@ struct jz4740_rtc {
enum jz4740_rtc_type type;
struct rtc_device *rtc;
- struct clk *clk;
-
- int irq;
spinlock_t lock;
-
- unsigned int min_wakeup_pin_assert_time;
- unsigned int reset_pin_assert_time;
};
static struct device *dev_for_power_off;
@@ -259,156 +253,157 @@ static void jz4740_rtc_poweroff(struct device *dev)
static void jz4740_rtc_power_off(void)
{
- struct jz4740_rtc *rtc = dev_get_drvdata(dev_for_power_off);
- unsigned long rtc_rate;
- unsigned long wakeup_filter_ticks;
- unsigned long reset_counter_ticks;
+ jz4740_rtc_poweroff(dev_for_power_off);
+ kernel_halt();
+}
- clk_prepare_enable(rtc->clk);
+static void jz4740_rtc_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
- rtc_rate = clk_get_rate(rtc->clk);
+static const struct of_device_id jz4740_rtc_of_match[] = {
+ { .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
+ { .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
+ { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
+
+static void jz4740_rtc_set_wakeup_params(struct jz4740_rtc *rtc,
+ struct device_node *np,
+ unsigned long rate)
+{
+ unsigned long wakeup_ticks, reset_ticks;
+ unsigned int min_wakeup_pin_assert_time = 60; /* Default: 60ms */
+ unsigned int reset_pin_assert_time = 100; /* Default: 100ms */
+
+ of_property_read_u32(np, "ingenic,reset-pin-assert-time-ms",
+ &reset_pin_assert_time);
+ of_property_read_u32(np, "ingenic,min-wakeup-pin-assert-time-ms",
+ &min_wakeup_pin_assert_time);
/*
* Set minimum wakeup pin assertion time: 100 ms.
* Range is 0 to 2 sec if RTC is clocked at 32 kHz.
*/
- wakeup_filter_ticks =
- (rtc->min_wakeup_pin_assert_time * rtc_rate) / 1000;
- if (wakeup_filter_ticks < JZ_RTC_WAKEUP_FILTER_MASK)
- wakeup_filter_ticks &= JZ_RTC_WAKEUP_FILTER_MASK;
+ wakeup_ticks = (min_wakeup_pin_assert_time * rate) / 1000;
+ if (wakeup_ticks < JZ_RTC_WAKEUP_FILTER_MASK)
+ wakeup_ticks &= JZ_RTC_WAKEUP_FILTER_MASK;
else
- wakeup_filter_ticks = JZ_RTC_WAKEUP_FILTER_MASK;
- jz4740_rtc_reg_write(rtc,
- JZ_REG_RTC_WAKEUP_FILTER, wakeup_filter_ticks);
+ wakeup_ticks = JZ_RTC_WAKEUP_FILTER_MASK;
+ jz4740_rtc_reg_write(rtc, JZ_REG_RTC_WAKEUP_FILTER, wakeup_ticks);
/*
* Set reset pin low-level assertion time after wakeup: 60 ms.
* Range is 0 to 125 ms if RTC is clocked at 32 kHz.
*/
- reset_counter_ticks = (rtc->reset_pin_assert_time * rtc_rate) / 1000;
- if (reset_counter_ticks < JZ_RTC_RESET_COUNTER_MASK)
- reset_counter_ticks &= JZ_RTC_RESET_COUNTER_MASK;
+ reset_ticks = (reset_pin_assert_time * rate) / 1000;
+ if (reset_ticks < JZ_RTC_RESET_COUNTER_MASK)
+ reset_ticks &= JZ_RTC_RESET_COUNTER_MASK;
else
- reset_counter_ticks = JZ_RTC_RESET_COUNTER_MASK;
- jz4740_rtc_reg_write(rtc,
- JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
-
- jz4740_rtc_poweroff(dev_for_power_off);
- kernel_halt();
+ reset_ticks = JZ_RTC_RESET_COUNTER_MASK;
+ jz4740_rtc_reg_write(rtc, JZ_REG_RTC_RESET_COUNTER, reset_ticks);
}
-static const struct of_device_id jz4740_rtc_of_match[] = {
- { .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
- { .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
- { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
- {},
-};
-MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
-
static int jz4740_rtc_probe(struct platform_device *pdev)
{
- int ret;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct jz4740_rtc *rtc;
- const struct platform_device_id *id = platform_get_device_id(pdev);
- const struct of_device_id *of_id = of_match_device(
- jz4740_rtc_of_match, &pdev->dev);
- struct device_node *np = pdev->dev.of_node;
+ unsigned long rate;
+ struct clk *clk;
+ int ret, irq;
- rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- if (of_id)
- rtc->type = (enum jz4740_rtc_type)of_id->data;
- else
- rtc->type = id->driver_data;
+ rtc->type = (enum jz4740_rtc_type)device_get_match_data(dev);
- rtc->irq = platform_get_irq(pdev, 0);
- if (rtc->irq < 0)
- return -ENOENT;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
- rtc->clk = devm_clk_get(&pdev->dev, "rtc");
- if (IS_ERR(rtc->clk)) {
- dev_err(&pdev->dev, "Failed to get RTC clock\n");
- return PTR_ERR(rtc->clk);
+ clk = devm_clk_get(dev, "rtc");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Failed to get RTC clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, jz4740_rtc_clk_disable, clk);
+ if (ret) {
+ dev_err(dev, "Failed to register devm action\n");
+ return ret;
}
spin_lock_init(&rtc->lock);
platform_set_drvdata(pdev, rtc);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(dev, 1);
- ret = dev_pm_set_wake_irq(&pdev->dev, rtc->irq);
+ ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
- dev_err(&pdev->dev, "Failed to set wake irq: %d\n", ret);
+ dev_err(dev, "Failed to set wake irq: %d\n", ret);
return ret;
}
- rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ rtc->rtc = devm_rtc_allocate_device(dev);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
- dev_err(&pdev->dev, "Failed to allocate rtc device: %d\n", ret);
+ dev_err(dev, "Failed to allocate rtc device: %d\n", ret);
return ret;
}
rtc->rtc->ops = &jz4740_rtc_ops;
rtc->rtc->range_max = U32_MAX;
+ rate = clk_get_rate(clk);
+ jz4740_rtc_set_wakeup_params(rtc, np, rate);
+
+ /* Each 1 Hz pulse should happen after (rate) ticks */
+ jz4740_rtc_reg_write(rtc, JZ_REG_RTC_REGULATOR, rate - 1);
+
ret = rtc_register_device(rtc->rtc);
if (ret)
return ret;
- ret = devm_request_irq(&pdev->dev, rtc->irq, jz4740_rtc_irq, 0,
- pdev->name, rtc);
+ ret = devm_request_irq(dev, irq, jz4740_rtc_irq, 0,
+ pdev->name, rtc);
if (ret) {
- dev_err(&pdev->dev, "Failed to request rtc irq: %d\n", ret);
+ dev_err(dev, "Failed to request rtc irq: %d\n", ret);
return ret;
}
- if (np && of_device_is_system_power_controller(np)) {
- if (!pm_power_off) {
- /* Default: 60ms */
- rtc->reset_pin_assert_time = 60;
- of_property_read_u32(np,
- "ingenic,reset-pin-assert-time-ms",
- &rtc->reset_pin_assert_time);
-
- /* Default: 100ms */
- rtc->min_wakeup_pin_assert_time = 100;
- of_property_read_u32(np,
- "ingenic,min-wakeup-pin-assert-time-ms",
- &rtc->min_wakeup_pin_assert_time);
-
- dev_for_power_off = &pdev->dev;
+ if (of_device_is_system_power_controller(np)) {
+ dev_for_power_off = dev;
+
+ if (!pm_power_off)
pm_power_off = jz4740_rtc_power_off;
- } else {
- dev_warn(&pdev->dev,
- "Poweroff handler already present!\n");
- }
+ else
+ dev_warn(dev, "Poweroff handler already present!\n");
}
return 0;
}
-static const struct platform_device_id jz4740_rtc_ids[] = {
- { "jz4740-rtc", ID_JZ4740 },
- { "jz4780-rtc", ID_JZ4780 },
- {}
-};
-MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
-
static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.driver = {
.name = "jz4740-rtc",
- .of_match_table = of_match_ptr(jz4740_rtc_of_match),
+ .of_match_table = jz4740_rtc_of_match,
},
- .id_table = jz4740_rtc_ids,
};
module_platform_driver(jz4740_rtc_driver);
diff --git a/drivers/rtc/rtc-lpc24xx.c b/drivers/rtc/rtc-lpc24xx.c
index 00ef16ba9480..eec881a81067 100644
--- a/drivers/rtc/rtc-lpc24xx.c
+++ b/drivers/rtc/rtc-lpc24xx.c
@@ -205,10 +205,8 @@ static int lpc24xx_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc->rtc_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_warn(&pdev->dev, "can't get interrupt resource\n");
+ if (irq < 0)
return irq;
- }
rtc->clk_rtc = devm_clk_get(&pdev->dev, "rtc");
if (IS_ERR(rtc->clk_rtc)) {
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index d5a0e27dd0a0..03ebcf1c0f3d 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -78,6 +78,8 @@ struct max77686_rtc_driver_data {
int alarm_pending_status_reg;
/* RTC IRQ CHIP for regmap */
const struct regmap_irq_chip *rtc_irq_chip;
+ /* regmap configuration for the chip */
+ const struct regmap_config *regmap_config;
};
struct max77686_rtc_info {
@@ -182,6 +184,11 @@ static const struct regmap_irq_chip max77686_rtc_irq_chip = {
.num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
};
+static const struct regmap_config max77686_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static const struct max77686_rtc_driver_data max77686_drv_data = {
.delay = 16000,
.mask = 0x7f,
@@ -191,6 +198,13 @@ static const struct max77686_rtc_driver_data max77686_drv_data = {
.alarm_pending_status_reg = MAX77686_REG_STATUS2,
.rtc_i2c_addr = MAX77686_I2C_ADDR_RTC,
.rtc_irq_chip = &max77686_rtc_irq_chip,
+ .regmap_config = &max77686_rtc_regmap_config,
+};
+
+static const struct regmap_config max77620_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_write = true,
};
static const struct max77686_rtc_driver_data max77620_drv_data = {
@@ -202,6 +216,7 @@ static const struct max77686_rtc_driver_data max77620_drv_data = {
.alarm_pending_status_reg = MAX77686_INVALID_REG,
.rtc_i2c_addr = MAX77620_I2C_ADDR_RTC,
.rtc_irq_chip = &max77686_rtc_irq_chip,
+ .regmap_config = &max77620_rtc_regmap_config,
};
static const unsigned int max77802_map[REG_RTC_END] = {
@@ -658,11 +673,6 @@ static int max77686_rtc_init_reg(struct max77686_rtc_info *info)
return ret;
}
-static const struct regmap_config max77686_rtc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
{
struct device *parent = info->dev->parent;
@@ -698,7 +708,7 @@ static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
}
info->rtc_regmap = devm_regmap_init_i2c(info->rtc,
- &max77686_rtc_regmap_config);
+ info->drv_data->regmap_config);
if (IS_ERR(info->rtc_regmap)) {
ret = PTR_ERR(info->rtc_regmap);
dev_err(info->dev, "Failed to allocate RTC regmap: %d\n", ret);
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index afce2c0b4bd6..d6802e6191cb 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -308,8 +308,10 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
mc13xxx_unlock(mc13xxx);
ret = rtc_register_device(priv->rtc);
- if (ret)
+ if (ret) {
+ mc13xxx_lock(mc13xxx);
goto err_irq_request;
+ }
return 0;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 3040844129ce..5c2ce71aa044 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -316,7 +316,7 @@ static int mpc5121_rtc_probe(struct platform_device *op)
rtc->regs = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(rtc->regs)) {
dev_err(&op->dev, "%s: couldn't map io space\n", __func__);
- return -ENOSYS;
+ return PTR_ERR(rtc->regs);
}
device_init_wakeup(&op->dev, 1);
diff --git a/drivers/rtc/rtc-mt2712.c b/drivers/rtc/rtc-mt2712.c
index 581b8731fb8a..d5f691c8a035 100644
--- a/drivers/rtc/rtc-mt2712.c
+++ b/drivers/rtc/rtc-mt2712.c
@@ -310,7 +310,6 @@ static const struct rtc_class_ops mt2712_rtc_ops = {
static int mt2712_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct mt2712_rtc *mt2712_rtc;
int ret;
@@ -319,8 +318,7 @@ static int mt2712_rtc_probe(struct platform_device *pdev)
if (!mt2712_rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt2712_rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ mt2712_rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mt2712_rtc->base))
return PTR_ERR(mt2712_rtc->base);
@@ -328,10 +326,8 @@ static int mt2712_rtc_probe(struct platform_device *pdev)
mt2712_rtc_hw_init(mt2712_rtc);
mt2712_rtc->irq = platform_get_irq(pdev, 0);
- if (mt2712_rtc->irq < 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
+ if (mt2712_rtc->irq < 0)
return mt2712_rtc->irq;
- }
platform_set_drvdata(pdev, mt2712_rtc);
@@ -356,13 +352,7 @@ static int mt2712_rtc_probe(struct platform_device *pdev)
mt2712_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
mt2712_rtc->rtc->range_max = MT2712_RTC_TIMESTAMP_END_2127;
- ret = rtc_register_device(mt2712_rtc->rtc);
- if (ret) {
- dev_err(&pdev->dev, "register rtc device failed\n");
- return ret;
- }
-
- return 0;
+ return rtc_register_device(mt2712_rtc->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index cda238dfe69b..f8b1353777ba 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -9,6 +9,7 @@
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
@@ -20,7 +21,7 @@ static int mtk_rtc_write_trigger(struct mt6397_rtc *rtc)
int ret;
u32 data;
- ret = regmap_write(rtc->regmap, rtc->addr_base + RTC_WRTGR, 1);
+ ret = regmap_write(rtc->regmap, rtc->addr_base + rtc->data->wrtgr, 1);
if (ret < 0)
return ret;
@@ -269,6 +270,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rtc->addr_base = res->start;
+ rtc->data = of_device_get_match_data(&pdev->dev);
+
rtc->irq = platform_get_irq(pdev, 0);
if (rtc->irq < 0)
return rtc->irq;
@@ -325,9 +328,18 @@ static int mt6397_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_rtc_suspend,
mt6397_rtc_resume);
+static const struct mtk_rtc_data mt6358_rtc_data = {
+ .wrtgr = RTC_WRTGR_MT6358,
+};
+
+static const struct mtk_rtc_data mt6397_rtc_data = {
+ .wrtgr = RTC_WRTGR_MT6397,
+};
+
static const struct of_device_id mt6397_rtc_of_match[] = {
- { .compatible = "mediatek,mt6323-rtc", },
- { .compatible = "mediatek,mt6397-rtc", },
+ { .compatible = "mediatek,mt6323-rtc", .data = &mt6397_rtc_data },
+ { .compatible = "mediatek,mt6358-rtc", .data = &mt6358_rtc_data },
+ { .compatible = "mediatek,mt6397-rtc", .data = &mt6397_rtc_data },
{ }
};
MODULE_DEVICE_TABLE(of, mt6397_rtc_of_match);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 4e50d6768f13..9c5670776c68 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -137,8 +137,7 @@ static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_wday = buf[PCF2127_REG_DW] & 0x07;
tm->tm_mon = bcd2bin(buf[PCF2127_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
tm->tm_year = bcd2bin(buf[PCF2127_REG_YR]);
- if (tm->tm_year < 70)
- tm->tm_year += 100; /* assume we are in 1970...2069 */
+ tm->tm_year += 100;
dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -172,7 +171,7 @@ static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
buf[i++] = bin2bcd(tm->tm_mon + 1);
/* year */
- buf[i++] = bin2bcd(tm->tm_year % 100);
+ buf[i++] = bin2bcd(tm->tm_year - 100);
/* write register's data */
err = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_SC, buf, i);
@@ -185,30 +184,35 @@ static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-#ifdef CONFIG_RTC_INTF_DEV
static int pcf2127_rtc_ioctl(struct device *dev,
unsigned int cmd, unsigned long arg)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- int touser;
+ int val, touser = 0;
int ret;
switch (cmd) {
case RTC_VL_READ:
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &touser);
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &val);
if (ret)
return ret;
- touser = touser & PCF2127_BIT_CTRL3_BLF ? RTC_VL_BACKUP_LOW : 0;
+ if (val & PCF2127_BIT_CTRL3_BLF)
+ touser |= RTC_VL_BACKUP_LOW;
+
+ if (val & PCF2127_BIT_CTRL3_BF)
+ touser |= RTC_VL_BACKUP_SWITCH;
return put_user(touser, (unsigned int __user *)arg);
+
+ case RTC_VL_CLR:
+ return regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL3,
+ PCF2127_BIT_CTRL3_BF, 0);
+
default:
return -ENOIOCTLCMD;
}
}
-#else
-#define pcf2127_rtc_ioctl NULL
-#endif
static const struct rtc_class_ops pcf2127_rtc_ops = {
.ioctl = pcf2127_rtc_ioctl,
@@ -433,6 +437,9 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return PTR_ERR(pcf2127->rtc);
pcf2127->rtc->ops = &pcf2127_rtc_ops;
+ pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
pcf2127->wdd.parent = dev;
pcf2127->wdd.info = &pcf2127_wdt_info;
@@ -441,6 +448,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
pcf2127->wdd.min_hw_heartbeat_ms = 500;
+ pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
@@ -495,7 +503,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
*/
ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL3,
PCF2127_BIT_CTRL3_BTSE |
- PCF2127_BIT_CTRL3_BF |
PCF2127_BIT_CTRL3_BIE |
PCF2127_BIT_CTRL3_BLIE, 0);
if (ret) {
@@ -636,6 +643,7 @@ static int pcf2127_i2c_probe(struct i2c_client *client,
static const struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0x1d,
};
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
@@ -703,6 +711,7 @@ static int pcf2127_spi_probe(struct spi_device *spi)
.val_bits = 8,
.read_flag_mask = 0xa0,
.write_flag_mask = 0x20,
+ .max_register = 0x1d,
};
struct regmap *regmap;
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
index 24e386ecbc7e..dd1a20977478 100644
--- a/drivers/rtc/rtc-rc5t619.c
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -356,10 +356,8 @@ static int rc5t619_rtc_probe(struct platform_device *pdev)
int err;
rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
- if (IS_ERR(rtc)) {
- err = PTR_ERR(rtc);
+ if (!rtc)
return -ENOMEM;
- }
rtc->rn5t618 = rn5t618;
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index a0ddc86c975a..ec84db0b3d7a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -755,6 +755,8 @@ static int rv3028_probe(struct i2c_client *client)
return -ENOMEM;
rv3028->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(rv3028->regmap))
+ return PTR_ERR(rv3028->regmap);
i2c_set_clientdata(client, rv3028);
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 35ee08aa7584..0263d996b8a8 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -148,10 +148,21 @@ static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
static int snvs_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
- unsigned long time = rtc_read_lp_counter(data);
+ unsigned long time;
+ int ret;
+
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
+ time = rtc_read_lp_counter(data);
rtc_time64_to_tm(time, tm);
+ if (data->clk)
+ clk_disable(data->clk);
+
return 0;
}
@@ -161,6 +172,12 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned long time = rtc_tm_to_time64(tm);
int ret;
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
+
/* Disable RTC first */
ret = snvs_rtc_enable(data, false);
if (ret)
@@ -173,6 +190,9 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* Enable RTC again */
ret = snvs_rtc_enable(data, true);
+ if (data->clk)
+ clk_disable(data->clk);
+
return ret;
}
@@ -180,6 +200,13 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
u32 lptar, lpsr;
+ int ret;
+
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
regmap_read(data->regmap, data->offset + SNVS_LPTAR, &lptar);
rtc_time64_to_tm(lptar, &alrm->time);
@@ -187,18 +214,33 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
alrm->pending = (lpsr & SNVS_LPSR_LPTA) ? 1 : 0;
+ if (data->clk)
+ clk_disable(data->clk);
+
return 0;
}
static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR,
(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
- return rtc_write_sync_lp(data);
+ ret = rtc_write_sync_lp(data);
+
+ if (data->clk)
+ clk_disable(data->clk);
+
+ return ret;
}
static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -207,6 +249,12 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned long time = rtc_tm_to_time64(&alrm->time);
int ret;
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
+
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
ret = rtc_write_sync_lp(data);
if (ret)
@@ -216,6 +264,9 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* Clear alarm interrupt status bit */
regmap_write(data->regmap, data->offset + SNVS_LPSR, SNVS_LPSR_LPTA);
+ if (data->clk)
+ clk_disable(data->clk);
+
return snvs_rtc_alarm_irq_enable(dev, alrm->enabled);
}
@@ -362,7 +413,7 @@ static int __maybe_unused snvs_rtc_suspend_noirq(struct device *dev)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
if (data->clk)
- clk_disable_unprepare(data->clk);
+ clk_disable(data->clk);
return 0;
}
@@ -372,7 +423,7 @@ static int __maybe_unused snvs_rtc_resume_noirq(struct device *dev)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
if (data->clk)
- return clk_prepare_enable(data->clk);
+ return clk_enable(data->clk);
return 0;
}
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index ff6488be385f..c9bc3d4a1e66 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -416,5 +416,5 @@ module_platform_driver(stmp3xxx_rtcdrv);
MODULE_DESCRIPTION("STMP3xxx RTC Driver");
MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com> and "
- "Wolfram Sang <w.sang@pengutronix.de>");
+ "Wolfram Sang <kernel@pengutronix.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 7d079154f849..af5b0ecb8f89 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -143,9 +143,6 @@ int dasd_scan_partitions(struct dasd_block *block)
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
- /* The two structs have 168/176 byte on 31/64 bit. */
- struct blkpg_partition bpart;
- struct blkpg_ioctl_arg barg;
struct block_device *bdev;
/*
@@ -155,19 +152,10 @@ void dasd_destroy_partitions(struct dasd_block *block)
bdev = block->bdev;
block->bdev = NULL;
- /*
- * See fs/partition/check.c:delete_partition
- * Can't call delete_partitions directly. Use ioctl.
- * The ioctl also does locking and invalidation.
- */
- memset(&bpart, 0, sizeof(struct blkpg_partition));
- memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
- barg.data = (void __force __user *) &bpart;
- barg.op = BLKPG_DEL_PARTITION;
- for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
- ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
-
- invalidate_partition(block->gdp, 0);
+ mutex_lock(&bdev->bd_mutex);
+ blk_drop_partitions(bdev);
+ mutex_unlock(&bdev->bd_mutex);
+
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
blkdev_put(bdev, FMODE_READ);
set_capacity(block->gdp, 0);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 9a5f3add325f..777734d1b4e5 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -22,6 +22,7 @@
#include <asm/schid.h>
#include <asm/cmb.h>
#include <linux/uaccess.h>
+#include <linux/dasd_mod.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_ioctl:"
@@ -457,10 +458,9 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
/*
* Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
*/
-static int dasd_ioctl_information(struct dasd_block *block,
- unsigned int cmd, void __user *argp)
+static int __dasd_ioctl_information(struct dasd_block *block,
+ struct dasd_information2_t *dasd_info)
{
- struct dasd_information2_t *dasd_info;
struct subchannel_id sch_id;
struct ccw_dev_id dev_id;
struct dasd_device *base;
@@ -473,15 +473,9 @@ static int dasd_ioctl_information(struct dasd_block *block,
if (!base->discipline || !base->discipline->fill_info)
return -EINVAL;
- dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
- if (dasd_info == NULL)
- return -ENOMEM;
-
rc = base->discipline->fill_info(base, dasd_info);
- if (rc) {
- kfree(dasd_info);
+ if (rc)
return rc;
- }
cdev = base->cdev;
ccw_device_get_id(cdev, &dev_id);
@@ -520,15 +514,24 @@ static int dasd_ioctl_information(struct dasd_block *block,
list_for_each(l, &base->ccw_queue)
dasd_info->chanq_len++;
spin_unlock_irqrestore(&block->queue_lock, flags);
+ return 0;
+}
- rc = 0;
- if (copy_to_user(argp, dasd_info,
- ((cmd == (unsigned int) BIODASDINFO2) ?
- sizeof(struct dasd_information2_t) :
- sizeof(struct dasd_information_t))))
- rc = -EFAULT;
+static int dasd_ioctl_information(struct dasd_block *block, void __user *argp,
+ size_t copy_size)
+{
+ struct dasd_information2_t *dasd_info;
+ int error;
+
+ dasd_info = kzalloc(sizeof(*dasd_info), GFP_KERNEL);
+ if (!dasd_info)
+ return -ENOMEM;
+
+ error = __dasd_ioctl_information(block, dasd_info);
+ if (!error && copy_to_user(argp, dasd_info, copy_size))
+ error = -EFAULT;
kfree(dasd_info);
- return rc;
+ return error;
}
/*
@@ -622,10 +625,12 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
rc = dasd_ioctl_check_format(bdev, argp);
break;
case BIODASDINFO:
- rc = dasd_ioctl_information(block, cmd, argp);
+ rc = dasd_ioctl_information(block, argp,
+ sizeof(struct dasd_information_t));
break;
case BIODASDINFO2:
- rc = dasd_ioctl_information(block, cmd, argp);
+ rc = dasd_ioctl_information(block, argp,
+ sizeof(struct dasd_information2_t));
break;
case BIODASDPRRD:
rc = dasd_ioctl_read_profile(block, argp);
@@ -660,3 +665,36 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
dasd_put_device(base);
return rc;
}
+
+
+/**
+ * dasd_biodasdinfo() - fill out the dasd information structure
+ * @disk [in]: pointer to gendisk structure that references a DASD
+ * @info [out]: pointer to the dasd_information2_t structure
+ *
+ * Provide access to DASD specific information.
+ * The gendisk structure is checked if it belongs to the DASD driver by
+ * comparing the gendisk->fops pointer.
+ * If it does not belong to the DASD driver -EINVAL is returned.
+ * Otherwise the provided dasd_information2_t structure is filled out.
+ *
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int dasd_biodasdinfo(struct gendisk *disk, struct dasd_information2_t *info)
+{
+ struct dasd_device *base;
+ int error;
+
+ if (disk->fops != &dasd_device_operations)
+ return -EINVAL;
+
+ base = dasd_device_from_gendisk(disk);
+ if (!base)
+ return -ENODEV;
+ error = __dasd_ioctl_information(base->block, info);
+ dasd_put_device(base);
+ return error;
+}
+/* export that symbol_get in partition detection is possible */
+EXPORT_SYMBOL_GPL(dasd_biodasdinfo);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 3850a0f5f0bc..53120e68796e 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -63,12 +63,9 @@ config QETH
prompt "Gigabit Ethernet device support"
depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET
help
- This driver supports the IBM System z OSA Express adapters
- in QDIO mode (all media types), HiperSockets interfaces and z/VM
- virtual NICs for Guest LAN and VSWITCH.
-
- For details please refer to the documentation provided by IBM at
- <http://www.ibm.com/developerworks/linux/linux390>
+ This driver supports IBM's OSA Express network adapters in QDIO mode,
+ HiperSockets interfaces and z/VM virtual NICs for Guest LAN and
+ VSWITCH.
To compile this driver as a module, choose M.
The module name is qeth.
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 437a6d822105..d06809eac16d 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1698,43 +1698,6 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev)
put_device(&cgdev->dev);
}
-static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
-{
- struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
-
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
- netif_device_detach(priv->channel[CTCM_READ]->netdev);
- ctcm_close(priv->channel[CTCM_READ]->netdev);
- if (!wait_event_timeout(priv->fsm->wait_q,
- fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
- netif_device_attach(priv->channel[CTCM_READ]->netdev);
- return -EBUSY;
- }
- ccw_device_set_offline(gdev->cdev[1]);
- ccw_device_set_offline(gdev->cdev[0]);
- return 0;
-}
-
-static int ctcm_pm_resume(struct ccwgroup_device *gdev)
-{
- struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
- int rc;
-
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
- rc = ccw_device_set_online(gdev->cdev[1]);
- if (rc)
- goto err_out;
- rc = ccw_device_set_online(gdev->cdev[0]);
- if (rc)
- goto err_out;
- ctcm_open(priv->channel[CTCM_READ]->netdev);
-err_out:
- netif_device_attach(priv->channel[CTCM_READ]->netdev);
- return rc;
-}
-
static struct ccw_device_id ctcm_ids[] = {
{CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
{CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
@@ -1764,9 +1727,6 @@ static struct ccwgroup_driver ctcm_group_driver = {
.remove = ctcm_remove_device,
.set_online = ctcm_new_device,
.set_offline = ctcm_shutdown_device,
- .freeze = ctcm_pm_suspend,
- .thaw = ctcm_pm_resume,
- .restore = ctcm_pm_resume,
};
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index c75112ee7b97..c7fade836d83 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
ISM_NR_DMBS);
- if (!ism->smcd)
+ if (!ism->smcd) {
+ ret = -ENOMEM;
goto err_resource;
+ }
ism->smcd->priv = ism;
ret = ism_dev_init(ism);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 8f08b0a2917c..440219bcaa2b 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2296,60 +2296,6 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
put_device(&ccwgdev->dev);
}
-static int lcs_pm_suspend(struct lcs_card *card)
-{
- if (card->dev)
- netif_device_detach(card->dev);
- lcs_set_allowed_threads(card, 0);
- lcs_wait_for_threads(card, 0xffffffff);
- if (card->state != DEV_STATE_DOWN)
- __lcs_shutdown_device(card->gdev, 1);
- return 0;
-}
-
-static int lcs_pm_resume(struct lcs_card *card)
-{
- int rc = 0;
-
- if (card->state == DEV_STATE_RECOVER)
- rc = lcs_new_device(card->gdev);
- if (card->dev)
- netif_device_attach(card->dev);
- if (rc) {
- dev_warn(&card->gdev->dev, "The lcs device driver "
- "failed to recover the device\n");
- }
- return rc;
-}
-
-static int lcs_prepare(struct ccwgroup_device *gdev)
-{
- return 0;
-}
-
-static void lcs_complete(struct ccwgroup_device *gdev)
-{
- return;
-}
-
-static int lcs_freeze(struct ccwgroup_device *gdev)
-{
- struct lcs_card *card = dev_get_drvdata(&gdev->dev);
- return lcs_pm_suspend(card);
-}
-
-static int lcs_thaw(struct ccwgroup_device *gdev)
-{
- struct lcs_card *card = dev_get_drvdata(&gdev->dev);
- return lcs_pm_resume(card);
-}
-
-static int lcs_restore(struct ccwgroup_device *gdev)
-{
- struct lcs_card *card = dev_get_drvdata(&gdev->dev);
- return lcs_pm_resume(card);
-}
-
static struct ccw_device_id lcs_ids[] = {
{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
@@ -2382,11 +2328,6 @@ static struct ccwgroup_driver lcs_group_driver = {
.remove = lcs_remove_device,
.set_online = lcs_new_device,
.set_offline = lcs_shutdown_device,
- .prepare = lcs_prepare,
- .complete = lcs_complete,
- .freeze = lcs_freeze,
- .thaw = lcs_thaw,
- .restore = lcs_restore,
};
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 5ce2424ca729..260860cf3aa1 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -112,27 +112,10 @@ DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
*/
#define PRINTK_HEADER " iucv: " /* for debugging */
-/* dummy device to make sure netiucv_pm functions are called */
-static struct device *netiucv_dev;
-
-static int netiucv_pm_prepare(struct device *);
-static void netiucv_pm_complete(struct device *);
-static int netiucv_pm_freeze(struct device *);
-static int netiucv_pm_restore_thaw(struct device *);
-
-static const struct dev_pm_ops netiucv_pm_ops = {
- .prepare = netiucv_pm_prepare,
- .complete = netiucv_pm_complete,
- .freeze = netiucv_pm_freeze,
- .thaw = netiucv_pm_restore_thaw,
- .restore = netiucv_pm_restore_thaw,
-};
-
static struct device_driver netiucv_driver = {
.owner = THIS_MODULE,
.name = "netiucv",
.bus = &iucv_bus,
- .pm = &netiucv_pm_ops,
};
static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
@@ -213,7 +196,6 @@ struct netiucv_priv {
fsm_instance *fsm;
struct iucv_connection *conn;
struct device *dev;
- int pm_state;
};
/**
@@ -1275,72 +1257,6 @@ static int netiucv_close(struct net_device *dev)
return 0;
}
-static int netiucv_pm_prepare(struct device *dev)
-{
- IUCV_DBF_TEXT(trace, 3, __func__);
- return 0;
-}
-
-static void netiucv_pm_complete(struct device *dev)
-{
- IUCV_DBF_TEXT(trace, 3, __func__);
- return;
-}
-
-/**
- * netiucv_pm_freeze() - Freeze PM callback
- * @dev: netiucv device
- *
- * close open netiucv interfaces
- */
-static int netiucv_pm_freeze(struct device *dev)
-{
- struct netiucv_priv *priv = dev_get_drvdata(dev);
- struct net_device *ndev = NULL;
- int rc = 0;
-
- IUCV_DBF_TEXT(trace, 3, __func__);
- if (priv && priv->conn)
- ndev = priv->conn->netdev;
- if (!ndev)
- goto out;
- netif_device_detach(ndev);
- priv->pm_state = fsm_getstate(priv->fsm);
- rc = netiucv_close(ndev);
-out:
- return rc;
-}
-
-/**
- * netiucv_pm_restore_thaw() - Thaw and restore PM callback
- * @dev: netiucv device
- *
- * re-open netiucv interfaces closed during freeze
- */
-static int netiucv_pm_restore_thaw(struct device *dev)
-{
- struct netiucv_priv *priv = dev_get_drvdata(dev);
- struct net_device *ndev = NULL;
- int rc = 0;
-
- IUCV_DBF_TEXT(trace, 3, __func__);
- if (priv && priv->conn)
- ndev = priv->conn->netdev;
- if (!ndev)
- goto out;
- switch (priv->pm_state) {
- case DEV_STATE_RUNNING:
- case DEV_STATE_STARTWAIT:
- rc = netiucv_open(ndev);
- break;
- default:
- break;
- }
- netif_device_attach(ndev);
-out:
- return rc;
-}
-
/**
* Start transmission of a packet.
* Called from generic network device layer.
@@ -2156,7 +2072,6 @@ static void __exit netiucv_exit(void)
netiucv_unregister_device(dev);
}
- device_unregister(netiucv_dev);
driver_unregister(&netiucv_driver);
iucv_unregister(&netiucv_handler, 1);
iucv_unregister_dbf_views();
@@ -2182,27 +2097,10 @@ static int __init netiucv_init(void)
IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
goto out_iucv;
}
- /* establish dummy device */
- netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!netiucv_dev) {
- rc = -ENOMEM;
- goto out_driver;
- }
- dev_set_name(netiucv_dev, "netiucv");
- netiucv_dev->bus = &iucv_bus;
- netiucv_dev->parent = iucv_root;
- netiucv_dev->release = (void (*)(struct device *))kfree;
- netiucv_dev->driver = &netiucv_driver;
- rc = device_register(netiucv_dev);
- if (rc) {
- put_device(netiucv_dev);
- goto out_driver;
- }
+
netiucv_banner();
return rc;
-out_driver:
- driver_unregister(&netiucv_driver);
out_iucv:
iucv_unregister(&netiucv_handler, 1);
out_dbf:
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e0b26310ecab..51ea56b73a97 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -11,6 +11,7 @@
#define __QETH_CORE_H__
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
@@ -21,8 +22,10 @@
#include <linux/seq_file.h>
#include <linux/hashtable.h>
#include <linux/ip.h>
+#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
@@ -31,6 +34,7 @@
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
+#include <net/route.h>
#include <net/sch_generic.h>
#include <net/tcp.h>
@@ -231,11 +235,7 @@ struct qeth_hdr_layer3 {
__u16 frame_offset;
union {
/* TX: */
- struct in6_addr ipv6_addr;
- struct ipv4 {
- u8 res[12];
- u32 addr;
- } ipv4;
+ struct in6_addr addr;
/* RX: */
struct rx {
u8 res1[2];
@@ -352,10 +352,15 @@ static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
struct qeth_hdr_layer3 *h2)
{
return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
- ipv6_addr_equal(&h1->next_hop.ipv6_addr,
- &h2->next_hop.ipv6_addr);
+ ipv6_addr_equal(&h1->next_hop.addr, &h2->next_hop.addr);
}
+struct qeth_local_addr {
+ struct hlist_node hnode;
+ struct rcu_head rcu;
+ struct in6_addr addr;
+};
+
enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
@@ -688,6 +693,9 @@ struct qeth_card_info {
u8 promisc_mode:1;
u8 use_v1_blkt:1;
u8 is_vm_nic:1;
+ /* no bitfield, we take a pointer on these two: */
+ u8 has_lp2lp_cso_v6;
+ u8 has_lp2lp_cso_v4;
enum qeth_card_types type;
enum qeth_link_types link_type;
int broadcast_capable;
@@ -786,6 +794,7 @@ struct qeth_card {
struct qeth_channel data;
struct net_device *dev;
+ struct dentry *debugfs;
struct qeth_card_stats stats;
struct qeth_card_info info;
struct qeth_token token;
@@ -797,6 +806,10 @@ struct qeth_card {
wait_queue_head_t wait_q;
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
+ DECLARE_HASHTABLE(local_addrs4, 4);
+ DECLARE_HASHTABLE(local_addrs6, 4);
+ spinlock_t local_addrs4_lock;
+ spinlock_t local_addrs6_lock;
struct mutex ip_lock;
DECLARE_HASHTABLE(ip_mc_htable, 4);
struct work_struct rx_mode_work;
@@ -928,6 +941,25 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
return dst;
}
+static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
+ struct dst_entry *dst)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr;
+}
+
+static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
+ struct dst_entry *dst)
+{
+ struct rt6_info *rt = (struct rt6_info *) dst;
+
+ if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
+ return &rt->rt6i_gateway;
+ else
+ return &ipv6_hdr(skb)->daddr;
+}
+
static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
{
*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
@@ -1021,7 +1053,8 @@ struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
-void qeth_schedule_recovery(struct qeth_card *);
+int qeth_schedule_recovery(struct qeth_card *card);
+void qeth_flush_local_addrs(struct qeth_card *card);
int qeth_poll(struct napi_struct *napi, int budget);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 569966bdc513..18a0fb75a710 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -26,6 +26,7 @@
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
+#include <linux/rcutree.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
@@ -60,6 +61,7 @@ EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
+static struct dentry *qeth_debugfs_root;
static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_issue_next_read_cb(struct qeth_card *card,
@@ -623,6 +625,257 @@ void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
}
EXPORT_SYMBOL_GPL(qeth_notify_cmd);
+static void qeth_flush_local_addrs4(struct qeth_card *card)
+{
+ struct qeth_local_addr *addr;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ spin_lock_irq(&card->local_addrs4_lock);
+ hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
+ hash_del_rcu(&addr->hnode);
+ kfree_rcu(addr, rcu);
+ }
+ spin_unlock_irq(&card->local_addrs4_lock);
+}
+
+static void qeth_flush_local_addrs6(struct qeth_card *card)
+{
+ struct qeth_local_addr *addr;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ spin_lock_irq(&card->local_addrs6_lock);
+ hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
+ hash_del_rcu(&addr->hnode);
+ kfree_rcu(addr, rcu);
+ }
+ spin_unlock_irq(&card->local_addrs6_lock);
+}
+
+void qeth_flush_local_addrs(struct qeth_card *card)
+{
+ qeth_flush_local_addrs4(card);
+ qeth_flush_local_addrs6(card);
+}
+EXPORT_SYMBOL_GPL(qeth_flush_local_addrs);
+
+static void qeth_add_local_addrs4(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs4 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs4_lock);
+ for (i = 0; i < cmd->count; i++) {
+ unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
+ struct qeth_local_addr *addr;
+ bool duplicate = false;
+
+ hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
+ if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ if (duplicate)
+ continue;
+
+ addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
+ if (!addr) {
+ dev_err(&card->gdev->dev,
+ "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
+ &cmd->addrs[i].addr);
+ continue;
+ }
+
+ ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
+ hash_add_rcu(card->local_addrs4, &addr->hnode, key);
+ }
+ spin_unlock(&card->local_addrs4_lock);
+}
+
+static void qeth_add_local_addrs6(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs6 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs6_lock);
+ for (i = 0; i < cmd->count; i++) {
+ u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
+ struct qeth_local_addr *addr;
+ bool duplicate = false;
+
+ hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
+ if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ if (duplicate)
+ continue;
+
+ addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
+ if (!addr) {
+ dev_err(&card->gdev->dev,
+ "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
+ &cmd->addrs[i].addr);
+ continue;
+ }
+
+ addr->addr = cmd->addrs[i].addr;
+ hash_add_rcu(card->local_addrs6, &addr->hnode, key);
+ }
+ spin_unlock(&card->local_addrs6_lock);
+}
+
+static void qeth_del_local_addrs4(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs4 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs4_lock);
+ for (i = 0; i < cmd->count; i++) {
+ struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
+ unsigned int key = ipv4_addr_hash(addr->addr);
+ struct qeth_local_addr *tmp;
+
+ hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
+ if (tmp->addr.s6_addr32[3] == addr->addr) {
+ hash_del_rcu(&tmp->hnode);
+ kfree_rcu(tmp, rcu);
+ break;
+ }
+ }
+ }
+ spin_unlock(&card->local_addrs4_lock);
+}
+
+static void qeth_del_local_addrs6(struct qeth_card *card,
+ struct qeth_ipacmd_local_addrs6 *cmd)
+{
+ unsigned int i;
+
+ if (cmd->addr_length !=
+ sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
+ dev_err_ratelimited(&card->gdev->dev,
+ "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
+ cmd->addr_length);
+ return;
+ }
+
+ spin_lock(&card->local_addrs6_lock);
+ for (i = 0; i < cmd->count; i++) {
+ struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
+ u32 key = ipv6_addr_hash(&addr->addr);
+ struct qeth_local_addr *tmp;
+
+ hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
+ if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
+ hash_del_rcu(&tmp->hnode);
+ kfree_rcu(tmp, rcu);
+ break;
+ }
+ }
+ }
+ spin_unlock(&card->local_addrs6_lock);
+}
+
+static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
+ struct sk_buff *skb)
+{
+ struct qeth_local_addr *tmp;
+ bool is_local = false;
+ unsigned int key;
+ __be32 next_hop;
+
+ if (hash_empty(card->local_addrs4))
+ return false;
+
+ rcu_read_lock();
+ next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
+ key = ipv4_addr_hash(next_hop);
+
+ hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
+ if (tmp->addr.s6_addr32[3] == next_hop) {
+ is_local = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_local;
+}
+
+static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
+ struct sk_buff *skb)
+{
+ struct qeth_local_addr *tmp;
+ struct in6_addr *next_hop;
+ bool is_local = false;
+ u32 key;
+
+ if (hash_empty(card->local_addrs6))
+ return false;
+
+ rcu_read_lock();
+ next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
+ key = ipv6_addr_hash(next_hop);
+
+ hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
+ if (ipv6_addr_equal(&tmp->addr, next_hop)) {
+ is_local = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_local;
+}
+
+static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
+{
+ struct qeth_card *card = m->private;
+ struct qeth_local_addr *tmp;
+ unsigned int i;
+
+ rcu_read_lock();
+ hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
+ seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
+ hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
+ seq_printf(m, "%pI6c\n", &tmp->addr);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
+
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
{
@@ -686,9 +939,19 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_MODCCID:
return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
+
QETH_CARD_TEXT(card, 3, "irla");
return NULL;
case IPA_CMD_UNREGISTER_LOCAL_ADDR:
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
+
QETH_CARD_TEXT(card, 3, "urla");
return NULL;
default:
@@ -868,16 +1131,18 @@ static int qeth_set_thread_start_bit(struct qeth_card *card,
unsigned long thread)
{
unsigned long flags;
+ int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
- if (!(card->thread_allowed_mask & thread) ||
- (card->thread_start_mask & thread)) {
- spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- return -EPERM;
- }
- card->thread_start_mask |= thread;
+ if (!(card->thread_allowed_mask & thread))
+ rc = -EPERM;
+ else if (card->thread_start_mask & thread)
+ rc = -EBUSY;
+ else
+ card->thread_start_mask |= thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- return 0;
+
+ return rc;
}
static void qeth_clear_thread_start_bit(struct qeth_card *card,
@@ -930,11 +1195,17 @@ static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
return rc;
}
-void qeth_schedule_recovery(struct qeth_card *card)
+int qeth_schedule_recovery(struct qeth_card *card)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "startrec");
- if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
+
+ rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
+ if (!rc)
schedule_work(&card->kernel_thread_starter);
+
+ return rc;
}
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
@@ -1376,6 +1647,10 @@ static void qeth_setup_card(struct qeth_card *card)
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
+ hash_init(card->local_addrs4);
+ hash_init(card->local_addrs6);
+ spin_lock_init(&card->local_addrs4_lock);
+ spin_lock_init(&card->local_addrs6_lock);
}
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
@@ -1412,6 +1687,11 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
if (!card->read_cmd)
goto out_read_cmd;
+ card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
+ qeth_debugfs_root);
+ debugfs_create_file("local_addrs", 0400, card->debugfs, card,
+ &qeth_debugfs_local_addr_fops);
+
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
@@ -3345,11 +3625,11 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
+ struct qeth_qdio_out_buffer *buf = queue->bufs[index];
+ unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
struct qeth_card *card = queue->card;
- struct qeth_qdio_out_buffer *buf;
int rc;
int i;
- unsigned int qdio_flags;
for (i = index; i < index + count; ++i) {
unsigned int bidx = QDIO_BUFNR(i);
@@ -3366,9 +3646,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (IS_IQD(card)) {
skb_queue_walk(&buf->skb_list, skb)
skb_tx_timestamp(skb);
- continue;
}
+ }
+ if (!IS_IQD(card)) {
if (!queue->do_pack) {
if ((atomic_read(&queue->used_buffers) >=
(QETH_HIGH_WATERMARK_PACK -
@@ -3393,12 +3674,12 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
}
+
+ if (atomic_read(&queue->set_pci_flags_count))
+ qdio_flags |= QDIO_FLAG_PCI_OUT;
}
QETH_TXQ_STAT_INC(queue, doorbell);
- qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
- if (atomic_read(&queue->set_pci_flags_count))
- qdio_flags |= QDIO_FLAG_PCI_OUT;
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
@@ -3809,15 +4090,47 @@ static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}
-static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
- struct qeth_qdio_out_buffer *buf,
- bool is_first_elem, unsigned int offset)
+/**
+ * qeth_fill_buffer() - map skb into an output buffer
+ * @buf: buffer to transport the skb
+ * @skb: skb to map into the buffer
+ * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
+ * from qeth_core_header_cache.
+ * @offset: when mapping the skb, start at skb->data + offset
+ * @hd_len: if > 0, build a dedicated header element of this size
+ */
+static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len)
{
struct qdio_buffer *buffer = buf->buffer;
int element = buf->next_element_to_fill;
int length = skb_headlen(skb) - offset;
char *data = skb->data + offset;
unsigned int elem_length, cnt;
+ bool is_first_elem = true;
+
+ __skb_queue_tail(&buf->skb_list, skb);
+
+ /* build dedicated element for HW Header */
+ if (hd_len) {
+ is_first_elem = false;
+
+ buffer->element[element].addr = virt_to_phys(hdr);
+ buffer->element[element].length = hd_len;
+ buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
+
+ /* HW header is allocated from cache: */
+ if ((void *)hdr != skb->data)
+ buf->is_header[element] = 1;
+ /* HW header was pushed and is contiguous with linear part: */
+ else if (length > 0 && !PAGE_ALIGNED(data) &&
+ (data == (char *)hdr + hd_len))
+ buffer->element[element].eflags |=
+ SBAL_EFLAGS_CONTIGUOUS;
+
+ element++;
+ }
/* map linear part into buffer element(s) */
while (length > 0) {
@@ -3871,40 +4184,6 @@ static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
return element;
}
-/**
- * qeth_fill_buffer() - map skb into an output buffer
- * @buf: buffer to transport the skb
- * @skb: skb to map into the buffer
- * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
- * from qeth_core_header_cache.
- * @offset: when mapping the skb, start at skb->data + offset
- * @hd_len: if > 0, build a dedicated header element of this size
- */
-static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len)
-{
- struct qdio_buffer *buffer = buf->buffer;
- bool is_first_elem = true;
-
- __skb_queue_tail(&buf->skb_list, skb);
-
- /* build dedicated header element */
- if (hd_len) {
- int element = buf->next_element_to_fill;
- is_first_elem = false;
-
- buffer->element[element].addr = virt_to_phys(hdr);
- buffer->element[element].length = hd_len;
- buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
- /* remember to free cache-allocated qeth_hdr: */
- buf->is_header[element] = ((void *)hdr != skb->data);
- buf->next_element_to_fill++;
- }
-
- return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
-}
-
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, unsigned int elements,
struct qeth_hdr *hdr, unsigned int offset,
@@ -4889,9 +5168,11 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "freecrd");
+
+ unregister_service_level(&card->qeth_service_level);
+ debugfs_remove_recursive(card->debugfs);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
- unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
}
@@ -6153,32 +6434,6 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
qdio_free(CARD_DDEV(card));
}
-static int qeth_suspend(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-
- qeth_set_allowed_threads(card, 0, 1);
- wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
-
- qeth_set_offline(card, false);
- return 0;
-}
-
-static int qeth_resume(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc;
-
- rc = qeth_set_online(card);
-
- qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (rc)
- dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
- return rc;
-}
-
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
size_t count)
{
@@ -6215,11 +6470,6 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.set_online = qeth_core_set_online,
.set_offline = qeth_core_set_offline,
.shutdown = qeth_core_shutdown,
- .prepare = NULL,
- .complete = NULL,
- .freeze = qeth_suspend,
- .thaw = qeth_resume,
- .restore = qeth_resume,
};
struct qeth_card *qeth_get_card_by_busid(char *bus_id)
@@ -6300,7 +6550,7 @@ static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
}
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
- enum qeth_prot_versions prot)
+ enum qeth_prot_versions prot, u8 *lp2lp)
{
u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
struct qeth_cmd_buffer *iob;
@@ -6352,18 +6602,17 @@ static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
- if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
- cstype == IPA_OUTBOUND_CHECKSUM)
- dev_warn(&card->gdev->dev,
- "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
- QETH_CARD_IFNAME(card));
+
+ if (lp2lp)
+ *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
+
return 0;
}
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
- enum qeth_prot_versions prot)
+ enum qeth_prot_versions prot, u8 *lp2lp)
{
- return on ? qeth_set_csum_on(card, cstype, prot) :
+ return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
qeth_set_csum_off(card, cstype, prot);
}
@@ -6451,13 +6700,13 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
- QETH_PROT_IPV4);
+ QETH_PROT_IPV4, NULL);
if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
/* no/one Offload Assist available, so the rc is trivial */
return rc_ipv4;
rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
- QETH_PROT_IPV6);
+ QETH_PROT_IPV6, NULL);
if (on)
/* enable: success if any Assist is active */
@@ -6493,6 +6742,24 @@ void qeth_enable_hw_features(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
+static void qeth_check_restricted_features(struct qeth_card *card,
+ netdev_features_t changed,
+ netdev_features_t actual)
+{
+ netdev_features_t ipv6_features = NETIF_F_TSO6;
+ netdev_features_t ipv4_features = NETIF_F_TSO;
+
+ if (!card->info.has_lp2lp_cso_v6)
+ ipv6_features |= NETIF_F_IPV6_CSUM;
+ if (!card->info.has_lp2lp_cso_v4)
+ ipv4_features |= NETIF_F_IP_CSUM;
+
+ if ((changed & ipv6_features) && !(actual & ipv6_features))
+ qeth_flush_local_addrs6(card);
+ if ((changed & ipv4_features) && !(actual & ipv4_features))
+ qeth_flush_local_addrs4(card);
+}
+
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
@@ -6504,13 +6771,15 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
if ((changed & NETIF_F_IP_CSUM)) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
- IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
+ &card->info.has_lp2lp_cso_v4);
if (rc)
changed ^= NETIF_F_IP_CSUM;
}
if (changed & NETIF_F_IPV6_CSUM) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
- IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
+ &card->info.has_lp2lp_cso_v6);
if (rc)
changed ^= NETIF_F_IPV6_CSUM;
}
@@ -6532,6 +6801,9 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
changed ^= NETIF_F_TSO6;
}
+ qeth_check_restricted_features(card, dev->features ^ features,
+ dev->features ^ changed);
+
/* everything changed successfully? */
if ((dev->features ^ features) == changed)
return 0;
@@ -6568,6 +6840,34 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
+ /* Traffic with local next-hop is not eligible for some offloads: */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ struct qeth_card *card = dev->ml_priv;
+ netdev_features_t restricted = 0;
+
+ if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
+ restricted |= NETIF_F_ALL_TSO;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ if (!card->info.has_lp2lp_cso_v4)
+ restricted |= NETIF_F_IP_CSUM;
+
+ if (restricted && qeth_next_hop_is_local_v4(card, skb))
+ features &= ~restricted;
+ break;
+ case htons(ETH_P_IPV6):
+ if (!card->info.has_lp2lp_cso_v6)
+ restricted |= NETIF_F_IPV6_CSUM;
+
+ if (restricted && qeth_next_hop_is_local_v6(card, skb))
+ features &= ~restricted;
+ break;
+ default:
+ break;
+ }
+ }
+
/* GSO segmentation builds skbs with
* a (small) linear part for the headers, and
* page frags for the data.
@@ -6745,6 +7045,8 @@ static int __init qeth_core_init(void)
pr_info("loading core functions\n");
+ qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
+
rc = qeth_register_dbf_views();
if (rc)
goto dbf_err;
@@ -6786,6 +7088,7 @@ slab_err:
register_err:
qeth_unregister_dbf_views();
dbf_err:
+ debugfs_remove_recursive(qeth_debugfs_root);
pr_err("Initializing the qeth device driver failed\n");
return rc;
}
@@ -6799,6 +7102,7 @@ static void __exit qeth_core_exit(void)
kmem_cache_destroy(qeth_core_header_cache);
root_device_unregister(qeth_core_root_dev);
qeth_unregister_dbf_views();
+ debugfs_remove_recursive(qeth_debugfs_root);
pr_info("core functions removed\n");
}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index d89a04bfd8b0..9d6f39d8f9ab 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -772,6 +772,29 @@ struct qeth_ipacmd_addr_change {
struct qeth_ipacmd_addr_change_entry entry[];
} __packed;
+/* [UN]REGISTER_LOCAL_ADDRESS notifications */
+struct qeth_ipacmd_local_addr4 {
+ __be32 addr;
+ u32 flags;
+};
+
+struct qeth_ipacmd_local_addrs4 {
+ u32 count;
+ u32 addr_length;
+ struct qeth_ipacmd_local_addr4 addrs[];
+};
+
+struct qeth_ipacmd_local_addr6 {
+ struct in6_addr addr;
+ u32 flags;
+};
+
+struct qeth_ipacmd_local_addrs6 {
+ u32 count;
+ u32 addr_length;
+ struct qeth_ipacmd_local_addr6 addrs[];
+};
+
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
@@ -803,6 +826,8 @@ struct qeth_ipa_cmd {
struct qeth_ipacmd_setbridgeport sbp;
struct qeth_ipacmd_addr_change addrchange;
struct qeth_ipacmd_vnicc vnicc;
+ struct qeth_ipacmd_local_addrs4 local_addrs4;
+ struct qeth_ipacmd_local_addrs6 local_addrs6;
} data;
} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index d7e429f6631e..c901c942fed7 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -275,17 +275,20 @@ static ssize_t qeth_dev_recover_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
- int i;
+ bool reset;
+ int rc;
+
+ rc = kstrtobool(buf, &reset);
+ if (rc)
+ return rc;
if (!qeth_card_hw_is_reachable(card))
return -EPERM;
- i = simple_strtoul(buf, &tmp, 16);
- if (i == 1)
- qeth_schedule_recovery(card);
+ if (reset)
+ rc = qeth_schedule_recovery(card);
- return count;
+ return rc ? rc : count;
}
static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0bd5b09e7a22..da47e423e1b1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -291,6 +291,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
+ qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
}
@@ -709,6 +710,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
+ netif_keep_dst(card->dev);
netif_set_gso_max_size(card->dev,
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0742a749d26e..1e50aa0297a3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1176,6 +1176,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
+ qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
}
@@ -1693,8 +1694,8 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
if (skb->protocol == htons(ETH_P_AF_IUCV)) {
l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
- l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80);
- memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2],
+ l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
+ memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
iucv_trans_hdr(skb)->destUserID, 8);
return;
}
@@ -1728,18 +1729,10 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
if (ipv == 4) {
- struct rtable *rt = (struct rtable *) dst;
-
- *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
- rt_nexthop(rt, ip_hdr(skb)->daddr) :
- ip_hdr(skb)->daddr;
+ l3_hdr->next_hop.addr.s6_addr32[3] =
+ qeth_next_hop_v4_rcu(skb, dst);
} else if (ipv == 6) {
- struct rt6_info *rt = (struct rt6_info *) dst;
-
- if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
- l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
- else
- l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
+ l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (!IS_IQD(card))
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 066b5c3aaae6..c84ec2fbf99b 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -29,12 +29,9 @@ MODULE_AUTHOR
MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
static struct iucv_path *smsg_path;
-/* dummy device used as trigger for PM functions */
-static struct device *smsg_dev;
static DEFINE_SPINLOCK(smsg_list_lock);
static LIST_HEAD(smsg_list);
-static int iucv_path_connected;
static int smsg_path_pending(struct iucv_path *, u8 *, u8 *);
static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
@@ -124,60 +121,15 @@ void smsg_unregister_callback(const char *prefix,
kfree(cb);
}
-static int smsg_pm_freeze(struct device *dev)
-{
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "smsg_pm_freeze\n");
-#endif
- if (smsg_path && iucv_path_connected) {
- iucv_path_sever(smsg_path, NULL);
- iucv_path_connected = 0;
- }
- return 0;
-}
-
-static int smsg_pm_restore_thaw(struct device *dev)
-{
- int rc;
-
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "smsg_pm_restore_thaw\n");
-#endif
- if (smsg_path && !iucv_path_connected) {
- memset(smsg_path, 0, sizeof(*smsg_path));
- smsg_path->msglim = 255;
- smsg_path->flags = 0;
- rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
- NULL, NULL, NULL);
-#ifdef CONFIG_PM_DEBUG
- if (rc)
- printk(KERN_ERR
- "iucv_path_connect returned with rc %i\n", rc);
-#endif
- if (!rc)
- iucv_path_connected = 1;
- cpcmd("SET SMSG IUCV", NULL, 0, NULL);
- }
- return 0;
-}
-
-static const struct dev_pm_ops smsg_pm_ops = {
- .freeze = smsg_pm_freeze,
- .thaw = smsg_pm_restore_thaw,
- .restore = smsg_pm_restore_thaw,
-};
-
static struct device_driver smsg_driver = {
.owner = THIS_MODULE,
.name = SMSGIUCV_DRV_NAME,
.bus = &iucv_bus,
- .pm = &smsg_pm_ops,
};
static void __exit smsg_exit(void)
{
cpcmd("SET SMSG OFF", NULL, 0, NULL);
- device_unregister(smsg_dev);
iucv_unregister(&smsg_handler, 1);
driver_unregister(&smsg_driver);
}
@@ -205,27 +157,10 @@ static int __init smsg_init(void)
NULL, NULL, NULL);
if (rc)
goto out_free_path;
- else
- iucv_path_connected = 1;
- smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!smsg_dev) {
- rc = -ENOMEM;
- goto out_free_path;
- }
- dev_set_name(smsg_dev, "smsg_iucv");
- smsg_dev->bus = &iucv_bus;
- smsg_dev->parent = iucv_root;
- smsg_dev->release = (void (*)(struct device *))kfree;
- smsg_dev->driver = &smsg_driver;
- rc = device_register(smsg_dev);
- if (rc)
- goto out_put;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
return 0;
-out_put:
- put_device(smsg_dev);
out_free_path:
iucv_path_free(smsg_path);
smsg_path = NULL;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 09ec846fe01d..18b713a616de 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
/*
@@ -415,8 +415,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
- if (!zfcp_scsi_adapter_register(adapter))
- return adapter;
+ return adapter;
failed:
zfcp_adapter_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
index b9c93d15f67c..3852367f15f6 100644
--- a/drivers/s390/scsi/zfcp_diag.h
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -4,7 +4,7 @@
*
* Definitions for handling diagnostics in the the zfcp device driver.
*
- * Copyright IBM Corp. 2018
+ * Copyright IBM Corp. 2018, 2020
*/
#ifndef ZFCP_DIAG_H
@@ -56,11 +56,11 @@ struct zfcp_diag_adapter {
unsigned long max_age;
- struct {
+ struct zfcp_diag_adapter_port_data {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_port data;
} port_data;
- struct {
+ struct zfcp_diag_adapter_config_data {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_config data;
} config_data;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3d0bc000f500..db320dab1fee 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -4,7 +4,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,7 @@
#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
#define ZFCP_MAX_ERPS 3
@@ -768,10 +769,14 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
return ZFCP_ERP_FAILED;
+ return ZFCP_ERP_SUCCEEDED;
+}
+
+static void
+zfcp_erp_adapter_strategy_open_ptp_port(struct zfcp_adapter *const adapter)
+{
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
zfcp_erp_enqueue_ptp_port(adapter);
-
- return ZFCP_ERP_SUCCEEDED;
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
@@ -800,6 +805,59 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
return ZFCP_ERP_SUCCEEDED;
}
+static enum zfcp_erp_act_result
+zfcp_erp_adapter_strategy_alloc_shost(struct zfcp_adapter *const adapter)
+{
+ struct zfcp_diag_adapter_config_data *const config_data =
+ &adapter->diagnostics->config_data;
+ struct zfcp_diag_adapter_port_data *const port_data =
+ &adapter->diagnostics->port_data;
+ unsigned long flags;
+ int rc;
+
+ rc = zfcp_scsi_adapter_register(adapter);
+ if (rc == -EEXIST)
+ return ZFCP_ERP_SUCCEEDED;
+ else if (rc)
+ return ZFCP_ERP_FAILED;
+
+ /*
+ * We allocated the shost for the first time. Before it was NULL,
+ * and so we deferred all updates in the xconf- and xport-data
+ * handlers. We need to make up for that now, and make all the updates
+ * that would have been done before.
+ *
+ * We can be sure that xconf- and xport-data succeeded, because
+ * otherwise this function is not called. But they might have been
+ * incomplete.
+ */
+
+ spin_lock_irqsave(&config_data->header.access_lock, flags);
+ zfcp_scsi_shost_update_config_data(adapter, &config_data->data,
+ !!config_data->header.incomplete);
+ spin_unlock_irqrestore(&config_data->header.access_lock, flags);
+
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ spin_lock_irqsave(&port_data->header.access_lock, flags);
+ zfcp_scsi_shost_update_port_data(adapter, &port_data->data);
+ spin_unlock_irqrestore(&port_data->header.access_lock, flags);
+ }
+
+ /*
+ * There is a remote possibility that the 'Exchange Port Data' request
+ * reports a different connectivity status than 'Exchange Config Data'.
+ * But any change to the connectivity status of the local optic that
+ * happens after the initial xconf request is expected to be reported
+ * to us, as soon as we post Status Read Buffers to the FCP channel
+ * firmware after this function. So any resulting inconsistency will
+ * only be momentary.
+ */
+ if (config_data->header.incomplete)
+ zfcp_fsf_fc_host_link_down(adapter);
+
+ return ZFCP_ERP_SUCCEEDED;
+}
+
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
struct zfcp_erp_action *act)
{
@@ -809,6 +867,12 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
+ if (zfcp_erp_adapter_strategy_alloc_shost(act->adapter) ==
+ ZFCP_ERP_FAILED)
+ return ZFCP_ERP_FAILED;
+
+ zfcp_erp_adapter_strategy_open_ptp_port(act->adapter);
+
if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num))
return ZFCP_ERP_FAILED;
@@ -1636,6 +1700,13 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
atomic_or(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ /*
+ * if `scsi_host` is missing, xconfig/xport data has never completed
+ * yet, so we can't access it, but there are also no SDEVs yet
+ */
+ if (adapter->scsi_host == NULL)
+ return;
+
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host)
atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
@@ -1673,6 +1744,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ /*
+ * if `scsi_host` is missing, xconfig/xport data has never completed
+ * yet, so we can't access it, but there are also no SDEVs yet
+ */
+ if (adapter->scsi_host == NULL)
+ return;
+
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) {
atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 88294ca0e2ea..3ef5d74331c3 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -125,6 +125,7 @@ extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_port *);
+extern u32 zfcp_fsf_convert_portspeed(u32 fsf_speed);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
@@ -134,6 +135,7 @@ extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+extern void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter);
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
@@ -153,6 +155,8 @@ extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *);
+extern void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+ const struct zfcp_qdio *const qdio);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
@@ -169,6 +173,13 @@ extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+extern void zfcp_scsi_shost_update_config_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_config *const bottom,
+ const bool bottom_incomplete);
+extern void zfcp_scsi_shost_update_port_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_port *const bottom);
/* zfcp_sysfs.c */
extern const struct attribute_group *zfcp_unit_attr_groups[];
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 111fe3fc32d7..c795f22249d8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -120,21 +120,25 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
-static void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
+void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost = adapter->scsi_host;
+ adapter->hydra_version = 0;
+ adapter->peer_wwpn = 0;
+ adapter->peer_wwnn = 0;
+ adapter->peer_d_id = 0;
+
+ /* if there is no shost yet, we have nothing to zero-out */
+ if (shost == NULL)
+ return;
+
fc_host_port_id(shost) = 0;
fc_host_fabric_name(shost) = 0;
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
- adapter->hydra_version = 0;
snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
-
- adapter->peer_wwpn = 0;
- adapter->peer_wwnn = 0;
- adapter->peer_d_id = 0;
}
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
@@ -479,7 +483,7 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8)
#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
-static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
{
u32 fdmi_speed = 0;
if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
@@ -509,64 +513,36 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
struct zfcp_adapter *adapter = req->adapter;
- struct Scsi_Host *shost = adapter->scsi_host;
- struct fc_els_flogi *nsp, *plogi;
+ struct fc_els_flogi *plogi;
/* adjust pointers for missing command code */
- nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
- - sizeof(u32));
plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
- sizeof(u32));
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
- "IBM");
- fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
- fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
- fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
-
adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
(u16)FSF_STATUS_READS_RECOM);
- zfcp_scsi_set_prot(adapter);
-
/* no error return above here, otherwise must fix call chains */
/* do not evaluate invalid fields */
if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
return 0;
- fc_host_port_id(shost) = ntoh24(bottom->s_id);
- fc_host_speed(shost) =
- zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
-
adapter->hydra_version = bottom->adapter_type;
- snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
- bottom->adapter_type);
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
adapter->peer_d_id = ntoh24(bottom->peer_d_id);
adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
- fc_host_port_type(shost) = FC_PORTTYPE_PTP;
- fc_host_fabric_name(shost) = 0;
break;
case FSF_TOPO_FABRIC:
- fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
- if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
- else
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
- fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
- fc_host_fabric_name(shost) = 0;
- fallthrough;
default:
- fc_host_fabric_name(shost) = 0;
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n");
@@ -584,13 +560,10 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
- struct Scsi_Host *shost = adapter->scsi_host;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
- snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
- "0x%08x", bottom->lic_version);
adapter->fsf_lic_version = bottom->lic_version;
adapter->adapter_features = bottom->adapter_features;
adapter->connection_features = bottom->connection_features;
@@ -606,6 +579,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
+ zfcp_scsi_shost_update_config_data(adapter, bottom, false);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
@@ -630,6 +604,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+
+ zfcp_scsi_shost_update_config_data(adapter, bottom, true);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
break;
@@ -638,16 +614,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
return;
}
- if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
adapter->hardware_version = bottom->hardware_version;
- snprintf(fc_host_hardware_version(shost),
- FC_VERSION_STRING_SIZE,
- "0x%08x", bottom->hardware_version);
- memcpy(fc_host_serial_number(shost), bottom->serial_number,
- min(FC_SERIAL_NUMBER_SIZE, 17));
- EBCASC(fc_host_serial_number(shost),
- min(FC_SERIAL_NUMBER_SIZE, 17));
- }
if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
@@ -761,19 +729,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
- struct Scsi_Host *shost = adapter->scsi_host;
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- fc_host_permanent_port_name(shost) = bottom->wwpn;
- fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
- fc_host_supported_speeds(shost) =
- zfcp_fsf_convert_portspeed(bottom->supported_speed);
- memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
- FC_FC4_LIST_SIZE);
- memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
- FC_FC4_LIST_SIZE);
if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
adapter->fc_security_algorithms =
bottom->fc_security_algorithms;
@@ -800,6 +759,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
+ zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
@@ -808,6 +768,8 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+
+ zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 26702b56a7ab..3a7f3374d10a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -4,7 +4,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -342,6 +342,18 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, 0);
}
+void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+ const struct zfcp_qdio *const qdio)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+
+ if (shost == NULL)
+ return;
+
+ shost->sg_tablesize = qdio->max_sbale_per_req;
+ shost->max_sectors = qdio->max_sbale_per_req * 8;
+}
+
/**
* zfcp_qdio_open - prepare and initialize response queue
* @qdio: pointer to struct zfcp_qdio
@@ -420,10 +432,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
- if (adapter->scsi_host) {
- adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
- adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
- }
+ zfcp_qdio_shost_update(adapter, qdio);
return 0;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 13d873f806e4..d58bf79892f2 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -4,7 +4,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -451,26 +451,39 @@ static struct scsi_host_template zfcp_scsi_host_template = {
};
/**
- * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * zfcp_scsi_adapter_register() - Allocate and register SCSI and FC host with
+ * SCSI midlayer
* @adapter: The zfcp adapter to register with the SCSI midlayer
+ *
+ * Allocates the SCSI host object for the given adapter, sets basic properties
+ * (such as the transport template, QDIO limits, ...), and registers it with
+ * the midlayer.
+ *
+ * During registration with the midlayer the corresponding FC host object for
+ * the referenced transport class is also implicitely allocated.
+ *
+ * Upon success adapter->scsi_host is set, and upon failure it remains NULL. If
+ * adapter->scsi_host is already set, nothing is done.
+ *
+ * Return:
+ * * 0 - Allocation and registration was successful
+ * * -EEXIST - SCSI and FC host did already exist, nothing was done, nothing
+ * was changed
+ * * -EIO - Allocation or registration failed
*/
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
if (adapter->scsi_host)
- return 0;
+ return -EEXIST;
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
- if (!adapter->scsi_host) {
- dev_err(&adapter->ccw_device->dev,
- "Registering the FCP device with the "
- "SCSI stack failed\n");
- return -EIO;
- }
+ if (!adapter->scsi_host)
+ goto err_out;
/* tell the SCSI stack some characteristics of this adapter */
adapter->scsi_host->max_id = 511;
@@ -480,14 +493,23 @@ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
adapter->scsi_host->transportt = zfcp_scsi_transport_template;
+ /* make all basic properties known at registration time */
+ zfcp_qdio_shost_update(adapter, adapter->qdio);
+ zfcp_scsi_set_prot(adapter);
+
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
scsi_host_put(adapter->scsi_host);
- return -EIO;
+ goto err_out;
}
return 0;
+err_out:
+ adapter->scsi_host = NULL;
+ dev_err(&adapter->ccw_device->dev,
+ "Registering the FCP device with the SCSI stack failed\n");
+ return -EIO;
}
/**
@@ -841,6 +863,95 @@ void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
set_host_byte(scmd, DID_SOFT_ERROR);
}
+void zfcp_scsi_shost_update_config_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_config *const bottom,
+ const bool bottom_incomplete)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+ const struct fc_els_flogi *nsp, *plogi;
+
+ if (shost == NULL)
+ return;
+
+ snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->lic_version);
+
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ snprintf(fc_host_hardware_version(shost),
+ FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->hardware_version);
+ memcpy(fc_host_serial_number(shost), bottom->serial_number,
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ EBCASC(fc_host_serial_number(shost),
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ }
+
+ /* adjust pointers for missing command code */
+ nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+ - sizeof(u32));
+ plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+ - sizeof(u32));
+
+ snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
+ "IBM");
+ fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
+ fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ zfcp_scsi_set_prot(adapter);
+
+ /* do not evaluate invalid fields */
+ if (bottom_incomplete)
+ return;
+
+ fc_host_port_id(shost) = ntoh24(bottom->s_id);
+ fc_host_speed(shost) =
+ zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+ snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
+ bottom->adapter_type);
+
+ switch (bottom->fc_topology) {
+ case FSF_TOPO_P2P:
+ fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ fc_host_fabric_name(shost) = 0;
+ break;
+ case FSF_TOPO_FABRIC:
+ fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
+ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ break;
+ case FSF_TOPO_AL:
+ fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ fallthrough;
+ default:
+ fc_host_fabric_name(shost) = 0;
+ break;
+ }
+}
+
+void zfcp_scsi_shost_update_port_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_port *const bottom)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+
+ if (shost == NULL)
+ return;
+
+ fc_host_permanent_port_name(shost) = bottom->wwpn;
+ fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+ fc_host_supported_speeds(shost) =
+ zfcp_fsf_convert_portspeed(bottom->supported_speed);
+ memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+ FC_FC4_LIST_SIZE);
+ memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+ FC_FC4_LIST_SIZE);
+}
+
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 7ec30ded0169..8d9662e8b717 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -216,20 +216,32 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ int retval = 0;
if (!adapter)
return -ENODEV;
/*
+ * If `scsi_host` is missing, we can't schedule `scan_work`, as it
+ * makes use of the corresponding fc_host object. But this state is
+ * only possible if xconfig/xport data has never completed yet,
+ * and we couldn't successfully scan for ports anyway.
+ */
+ if (adapter->scsi_host == NULL) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /*
* Users wish is our command: immediately schedule and flush a
* worker to conduct a synchronous port scan, that is, neither
* a random delay nor a rate limit is applied here.
*/
queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
flush_delayed_work(&adapter->scan_work);
+out:
zfcp_ccw_adapter_put(adapter);
-
- return (ssize_t) count;
+ return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index b5b3154e2c28..bb49d83cadc7 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2237,7 +2237,7 @@ static bool __init blogic_inquiry(struct blogic_adapter *adapter)
"INQUIRE INSTALLED DEVICES ID 0 TO 7");
for (tgt_id = 0; tgt_id < 8; tgt_id++)
adapter->tgt_flags[tgt_id].tgt_exists =
- (installed_devs0to7[tgt_id] != 0 ? true : false);
+ installed_devs0to7[tgt_id] != 0;
}
/*
Issue the Inquire Setup Information command.
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index eb72ac8136c3..2b868f8db8ff 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -814,7 +814,6 @@ int aac_probe_container(struct aac_dev *dev, int cid)
kfree(scsidev);
return -ENOMEM;
}
- scsicmd->list.next = NULL;
scsicmd->scsi_done = aac_probe_container_scsi_done;
scsicmd->device = scsidev;
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index ffe41bc111fc..34e65dea992e 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -513,15 +513,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
- if (!user_srbcmd) {
- dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
- rcode = -ENOMEM;
- goto cleanup;
- }
- if(copy_from_user(user_srbcmd, user_srb,fibsize)){
- dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
- rcode = -EFAULT;
+ user_srbcmd = memdup_user(user_srb, fibsize);
+ if (IS_ERR(user_srbcmd)) {
+ rcode = PTR_ERR(user_srbcmd);
+ user_srbcmd = NULL;
goto cleanup;
}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index ddd73f6798af..8ee4e1abe568 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2351,7 +2351,7 @@ fib_free_out:
goto out;
}
-int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
+static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
{
struct tm cur_tm;
char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
@@ -2380,7 +2380,7 @@ out:
return ret;
}
-int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
+static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
{
int ret = -ENOMEM;
struct fib *fibptr;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 83a60b0a8cd8..a308e86a97f1 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -864,7 +864,7 @@ static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
return HBA_IU_TYPE_SATA_REQ;
}
-void aac_tmf_callback(void *context, struct fib *fibptr)
+static void aac_tmf_callback(void *context, struct fib *fibptr)
{
struct aac_hba_resp *err =
&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
@@ -1078,7 +1078,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
* @scsi_cmd: SCSI command block causing the reset
*
*/
-int aac_eh_host_reset(struct scsi_cmnd *cmd)
+static int aac_eh_host_reset(struct scsi_cmnd *cmd)
{
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
@@ -1632,7 +1632,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *shost;
struct aac_dev *aac;
struct list_head *insert = &aac_devices;
- int error = -ENODEV;
+ int error;
int unique_id = 0;
u64 dmamask;
int mask_bits = 0;
@@ -1657,7 +1657,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = pci_enable_device(pdev);
if (error)
goto out;
- error = -ENODEV;
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -1689,8 +1688,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
- if (!shost)
+ if (!shost) {
+ error = -ENOMEM;
goto out_disable_pdev;
+ }
shost->irq = pdev->irq;
shost->unique_id = unique_id;
@@ -1714,8 +1715,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
sizeof(struct fib),
GFP_KERNEL);
- if (!aac->fibs)
+ if (!aac->fibs) {
+ error = -ENOMEM;
goto out_free_host;
+ }
+
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index a336a458c978..e4a09b93d00c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -3662,8 +3662,7 @@ ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
return;
tstate = ahd->enabled_targets[scsi_id];
- if (tstate != NULL)
- kfree(tstate);
+ kfree(tstate);
ahd->enabled_targets[scsi_id] = NULL;
}
#endif
@@ -6054,14 +6053,13 @@ ahd_alloc(void *platform_arg, char *name)
{
struct ahd_softc *ahd;
- ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC);
+ ahd = kzalloc(sizeof(*ahd), GFP_ATOMIC);
if (!ahd) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
- memset(ahd, 0, sizeof(*ahd));
ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
if (ahd->seep_config == NULL) {
kfree(ahd);
@@ -6120,8 +6118,7 @@ ahd_set_unit(struct ahd_softc *ahd, int unit)
void
ahd_set_name(struct ahd_softc *ahd, char *name)
{
- if (ahd->name != NULL)
- kfree(ahd->name);
+ kfree(ahd->name);
ahd->name = name;
}
@@ -6182,12 +6179,9 @@ ahd_free(struct ahd_softc *ahd)
kfree(ahd->black_hole);
}
#endif
- if (ahd->name != NULL)
- kfree(ahd->name);
- if (ahd->seep_config != NULL)
- kfree(ahd->seep_config);
- if (ahd->saved_stack != NULL)
- kfree(ahd->saved_stack);
+ kfree(ahd->name);
+ kfree(ahd->seep_config);
+ kfree(ahd->saved_stack);
kfree(ahd);
return;
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 84fc499cb1e6..3d4df906fa4f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2178,8 +2178,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
if (channel == 'B')
scsi_id += 8;
tstate = ahc->enabled_targets[scsi_id];
- if (tstate != NULL)
- kfree(tstate);
+ kfree(tstate);
ahc->enabled_targets[scsi_id] = NULL;
}
#endif
@@ -4384,13 +4383,13 @@ ahc_alloc(void *platform_arg, char *name)
struct ahc_softc *ahc;
int i;
- ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC);
+ ahc = kzalloc(sizeof(*ahc), GFP_ATOMIC);
if (!ahc) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
- memset(ahc, 0, sizeof(*ahc));
+
ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
if (ahc->seep_config == NULL) {
kfree(ahc);
@@ -4453,8 +4452,7 @@ ahc_set_unit(struct ahc_softc *ahc, int unit)
void
ahc_set_name(struct ahc_softc *ahc, char *name)
{
- if (ahc->name != NULL)
- kfree(ahc->name);
+ kfree(ahc->name);
ahc->name = name;
}
@@ -4515,10 +4513,8 @@ ahc_free(struct ahc_softc *ahc)
kfree(ahc->black_hole);
}
#endif
- if (ahc->name != NULL)
- kfree(ahc->name);
- if (ahc->seep_config != NULL)
- kfree(ahc->seep_config);
+ kfree(ahc->name);
+ kfree(ahc->seep_config);
kfree(ahc);
return;
}
@@ -4927,8 +4923,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
case 0:
break;
}
- if (scb_data->scbarray != NULL)
- kfree(scb_data->scbarray);
+ kfree(scb_data->scbarray);
}
static void
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 3ddc8852bc32..105adba559a1 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -406,7 +406,7 @@ struct asd_manuf_sec {
u8 sas_addr[SAS_ADDR_SIZE];
u8 pcba_sn[ASD_PCBA_SN_SIZE];
/* Here start the other segments */
- u8 linked_list[0];
+ u8 linked_list[];
} __attribute__ ((packed));
struct asd_manuf_phy_desc {
@@ -449,7 +449,7 @@ struct asd_ms_sb_desc {
u8 type;
u8 node_desc_index;
u8 conn_desc_index;
- u8 _recvd[0];
+ u8 _recvd[];
} __attribute__ ((packed));
#if 0
@@ -478,12 +478,12 @@ struct asd_ms_conn_desc {
u8 size_sideband_desc;
u32 _resvd;
u8 name[16];
- struct asd_ms_sb_desc sb_desc[0];
+ struct asd_ms_sb_desc sb_desc[];
} __attribute__ ((packed));
struct asd_nd_phy_desc {
u8 vp_attch_type;
- u8 attch_specific[0];
+ u8 attch_specific[];
} __attribute__ ((packed));
#if 0
@@ -503,7 +503,7 @@ struct asd_ms_node_desc {
u8 size_phy_desc;
u8 _resvd;
u8 name[16];
- struct asd_nd_phy_desc phy_desc[0];
+ struct asd_nd_phy_desc phy_desc[];
} __attribute__ ((packed));
struct asd_ms_conn_map {
@@ -518,7 +518,7 @@ struct asd_ms_conn_map {
u8 usage_model_id;
u32 _resvd;
struct asd_ms_conn_desc conn_desc[0];
- struct asd_ms_node_desc node_desc[0];
+ struct asd_ms_node_desc node_desc[];
} __attribute__ ((packed));
struct asd_ctrla_phy_entry {
@@ -542,7 +542,7 @@ struct asd_ll_el {
u8 id0;
u8 id1;
__le16 next;
- u8 something_here[0];
+ u8 something_here[];
} __attribute__ ((packed));
static int asd_poll_flash(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 0f554ebb8f2c..fb4c469bd89f 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -708,7 +708,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
}
}
-bfa_boolean_t
+static bfa_boolean_t
bfa_isr_rspq(struct bfa_s *bfa, int qid)
{
struct bfi_msg_s *m;
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 284baa3b0c8e..766f2b5ed2ab 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -436,7 +436,7 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
return BFA_STATUS_OK;
}
-void
+static void
bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
{
struct bfa_itnim_latency_s *io_lat =
@@ -453,7 +453,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
io_lat->avg[idx] += val;
}
-void
+static void
bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
{
ioim->start_time = jiffies;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 7c3eadc58b98..297a77f5806c 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1283,7 +1283,7 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
n2n_port->reply_oxid = 0;
}
-void
+static void
bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
{
int i = 0, j = 0, bit = 0, alpa_bit = 0;
@@ -4358,7 +4358,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
bfa_sm_set_state(ns,
bfa_fcs_lport_ns_sm_sending_gid_ft);
bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
- };
+ }
break;
default:
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 82801b366500..fc294e1950a6 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -1575,7 +1575,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
- };
+ }
break;
case RPSM_EVENT_DELETE:
@@ -2449,7 +2449,7 @@ bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
bfa_fcs_itnim_brp_online(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_online(rport);
- };
+ }
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 18b58b2f304f..6fd3383ee538 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -364,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
writel(r32, rb + FNC_PERS_REG);
}
-bfa_boolean_t
+static bfa_boolean_t
bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
{
u32 r32;
@@ -744,7 +744,7 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
writel(0, (rb + CT2_MBIST_CTL_REG));
}
-void
+static void
bfa_ioc_ct2_mac_reset(void __iomem *rb)
{
/* put port0, port1 MAC & AHB in reset */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 6d2131441f0a..0b7d2e8f4a66 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -4284,7 +4284,7 @@ bfa_fcport_dportdisable(struct bfa_s *bfa)
bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
}
-void
+static void
bfa_fcport_ddportenable(struct bfa_s *bfa)
{
/*
@@ -4293,7 +4293,7 @@ bfa_fcport_ddportenable(struct bfa_s *bfa)
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
}
-void
+static void
bfa_fcport_ddportdisable(struct bfa_s *bfa)
{
/*
@@ -5517,7 +5517,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
struct bfa_uf_buf_s *uf_buf;
uint8_t *buf;
- struct fchs_s *fchs;
uf_buf = (struct bfa_uf_buf_s *)
bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
@@ -5526,8 +5525,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
m->frm_len = be16_to_cpu(m->frm_len);
m->xfr_len = be16_to_cpu(m->xfr_len);
- fchs = (struct fchs_s *)uf_buf;
-
list_del(&uf->qe); /* dequeue from posted queue */
uf->data_ptr = buf;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index eb0c76338295..bc5d84f87d8f 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -50,7 +50,7 @@ int pcie_max_read_reqsz;
int bfa_debugfs_enable = 1;
int msix_disable_cb = 0, msix_disable_ct = 0;
int max_xfer_size = BFAD_MAX_SECTORS >> 1;
-int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
+static int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
/* Firmware releated */
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index fbfce02e5b93..5ae1e3f78910 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -437,7 +437,7 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
return status;
}
-int
+static int
bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
@@ -562,7 +562,7 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
-void
+static void
bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
{
struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a76c968dbac5..412dbe125e10 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -136,7 +136,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 1cbb431fa682..0e33324e16f5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -945,7 +945,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
*/
if (interface->enabled)
fcoe_ctlr_link_up(ctlr);
- };
+ }
} else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) {
case FCOE_CTLR_DISABLED:
@@ -965,7 +965,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
put_cpu();
fcoe_clean_pending_queue(lport);
wait_for_upload = 1;
- };
+ }
}
}
mutex_unlock(&bnx2fc_dev_lock);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 2b070f0835df..1aba5897ccb0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1081,6 +1081,7 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
}
static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
+ __must_hold(&tgt->tgt_lock)
{
struct bnx2fc_rport *tgt = io_req->tgt;
unsigned int time_left;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 524cdbcd29aa..ec7d01f6e2d5 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -959,6 +959,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id];
struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
struct sk_buff *skb = NULL;
+ int ret;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
@@ -979,16 +980,16 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
if (csk->atid < 0) {
pr_err("NO atid available.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_sock;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
if (!skb) {
- cxgb3_free_atid(t3dev, csk->atid);
- cxgbi_sock_put(csk);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_atid;
}
skb->sk = (struct sock *)csk;
set_arp_failure_handler(skb, act_open_arp_failure);
@@ -1010,6 +1011,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
return 0;
+
+free_atid:
+ cxgb3_free_atid(t3dev, csk->atid);
+put_sock:
+ cxgbi_sock_put(csk);
+ l2t_release(t3dev, csk->l2t);
+ csk->l2t = NULL;
+
+ return ret;
}
cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index bc1086ae6835..8ce8592f6a64 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1127,10 +1127,9 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
if (!csk)
goto rel_skb;
- if (csk)
- pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
- (&csk->saddr), (&csk->daddr), csk,
- csk->state, csk->flags, csk->tid, rpl->status);
+ pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+ (&csk->saddr), (&csk->daddr), csk,
+ csk->state, csk->flags, csk->tid, rpl->status);
if (rpl->status == CPL_ERR_ABORT_FAILED)
goto rel_skb;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index fbd2ae40dab4..fcc5aa9f6014 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3744,6 +3744,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->afu_cookie = cfg->ops->create_afu(pdev);
if (unlikely(!cfg->afu_cookie)) {
dev_err(dev, "%s: create_afu failed\n", __func__);
+ rc = -ENOMEM;
goto out_remove;
}
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 02dff3a684e0..2cf889512ece 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1120,7 +1120,7 @@ static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u6
{
struct adpt_device* d;
- if(chan < 0 || chan >= MAX_CHANNEL)
+ if (chan >= MAX_CHANNEL)
return NULL;
d = pHba->channel[chan].device[id];
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 442c5e70a7b4..cc620f10eabc 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1510,7 +1510,7 @@ ioctl_done:
}
/* Always copy the buffer back, if only to pick up the status */
- err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
+ err = copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
if (err != 0) {
esas2r_log(ESAS2R_LOG_WARN,
"ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)",
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 25dae9f0b205..cb41d166e0c0 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1915,7 +1915,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case FCOE_CTLR_ENABLED:
case FCOE_CTLR_UNUSED:
fcoe_ctlr_link_up(ctlr);
- };
+ }
} else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) {
case FCOE_CTLR_DISABLED:
@@ -1927,7 +1927,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
stats->LinkFailureCount++;
put_cpu();
fcoe_clean_pending_queue(lport);
- };
+ }
}
out:
return rc;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 18584ab27c32..7910b573bacb 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -49,8 +49,8 @@
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
-LIST_HEAD(fnic_list);
-DEFINE_SPINLOCK(fnic_list_lock);
+static LIST_HEAD(fnic_list);
+static DEFINE_SPINLOCK(fnic_list_lock);
/* Supported devices by fnic module */
static struct pci_device_id fnic_id_table[] = {
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index b60795893994..27535c90b248 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2624,8 +2624,8 @@ int fnic_host_reset(struct scsi_cmnd *sc)
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->internal_reset_inprogress == 0) {
- fnic->internal_reset_inprogress = 1;
+ if (!fnic->internal_reset_inprogress) {
+ fnic->internal_reset_inprogress = true;
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2654,7 +2654,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->internal_reset_inprogress = 0;
+ fnic->internal_reset_inprogress = false;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 1b88a3b53eee..a2beee6e09f0 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -254,7 +254,7 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
}
}
-int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
+static int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
@@ -316,7 +316,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
return -ETIMEDOUT;
}
-int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+static int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
@@ -411,7 +411,7 @@ int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
-int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
{
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
if (!vdev->devcmd)
@@ -422,7 +422,7 @@ int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
}
-int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
{
int err;
unsigned int fetch_index;
@@ -492,7 +492,7 @@ err_free_devcmd2:
}
-void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
{
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
vnic_wq_disable(&vdev->devcmd2->wq);
@@ -503,7 +503,7 @@ void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
}
-int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
{
int err;
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
index 015af2cdabaf..442972c04e65 100644
--- a/drivers/scsi/fnic/vnic_wq.c
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -25,7 +25,7 @@
#include "vnic_wq.h"
-int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
+static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, enum vnic_res_type res_type)
{
wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
@@ -37,7 +37,7 @@ int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
}
-int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
+static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index fe03410268e6..7f150d52b4a6 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -329,8 +329,8 @@ static void gdth_scsi_done(struct scsi_cmnd *scp)
scp->scsi_done(scp);
}
-int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
- int timeout, u32 *info)
+static int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd,
+ char *cmnd, int timeout, u32 *info)
{
gdth_ha_str *ha = shost_priv(sdev->host);
struct scsi_cmnd *scp;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 9a6deb21fe4d..11caa4b0d797 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -898,8 +898,11 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev;
+ dev_dbg(dev, "phy%d OOB ready\n", phy_no);
+ if (phy->phy_attached)
+ return;
+
if (!timer_pending(&phy->timer)) {
- dev_dbg(dev, "phy%d OOB ready\n", phy_no);
phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
add_timer(&phy->timer);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index fa25766502a2..2e1718f9ade2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1175,15 +1175,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
}
-static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot)
+static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
- enum exec_status sts;
struct hisi_sas_complete_v1_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v1_hdr *complete_hdr;
@@ -1194,7 +1193,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
cmplt_hdr_data = le32_to_cpu(complete_hdr->data);
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -1260,7 +1259,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
slot_err_v1_hw(hisi_hba, task, slot);
if (unlikely(slot->abort))
- return ts->stat;
+ return;
goto out;
}
@@ -1309,12 +1308,9 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
out:
hisi_sas_slot_task_free(hisi_hba, task, slot);
- sts = ts->stat;
if (task->task_done)
task->task_done(task);
-
- return sts;
}
/* Interrupts */
@@ -1757,6 +1753,7 @@ static struct device_attribute *host_attrs_v1_hw[] = {
static struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e05faf315dcd..e7e7849a4c14 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2318,8 +2318,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
}
}
-static int
-slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -2327,7 +2327,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
- enum exec_status sts;
struct hisi_sas_complete_v2_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v2_hdr *complete_hdr =
@@ -2337,7 +2336,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -2406,7 +2405,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
error_info[2], error_info[3]);
if (unlikely(slot->abort))
- return ts->stat;
+ return;
goto out;
}
@@ -2456,12 +2455,11 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task);
- return SAS_ABORTED_TASK;
+ return;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2473,15 +2471,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n",
task);
- return sts;
+ return;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
-
- return sts;
}
static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
@@ -3533,6 +3529,7 @@ static struct device_attribute *host_attrs_v2_hw[] = {
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 374885aa8d77..3e6b78a1f993 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -912,11 +912,15 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
return -EINVAL;
}
- /* Switch over to MSI handling , from PCI AER default */
+ /*
+ * This DSM handles some hardware-related configurations:
+ * 1. Switch over to MSI error handling in kernel
+ * 2. BIOS *may* reset some register values through this method
+ */
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
DSM_FUNC_ERR_HANDLE_MSI, NULL);
if (!obj)
- dev_warn(dev, "Switch over to MSI handling failed\n");
+ dev_warn(dev, "can not find DSM method, ignore\n");
else
ACPI_FREE(obj);
@@ -2152,8 +2156,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
}
}
-static int
-slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -2161,7 +2165,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
- enum exec_status sts;
struct hisi_sas_complete_v3_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v3_hdr *complete_hdr =
@@ -2171,7 +2174,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0, dw1, dw3;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -2233,7 +2236,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
error_info[0], error_info[1],
error_info[2], error_info[3]);
if (unlikely(slot->abort))
- return ts->stat;
+ return;
goto out;
}
@@ -2278,12 +2281,11 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task);
- return SAS_ABORTED_TASK;
+ return;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2295,15 +2297,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n ",
task);
- return sts;
+ return;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
-
- return sts;
}
static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
@@ -2897,6 +2897,7 @@ static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
};
static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
+ HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK),
@@ -3071,6 +3072,7 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 59f0f1030c54..44e64aa21194 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2384,7 +2384,7 @@ static struct vio_driver ibmvscsi_driver = {
static struct srp_function_template ibmvscsi_transport_functions = {
};
-int __init ibmvscsi_module_init(void)
+static int __init ibmvscsi_module_init(void)
{
int ret;
@@ -2406,7 +2406,7 @@ int __init ibmvscsi_module_init(void)
return ret;
}
-void __exit ibmvscsi_module_exit(void)
+static void __exit ibmvscsi_module_exit(void)
{
vio_unregister_driver(&ibmvscsi_driver);
srp_release_transport(ibmvscsi_transport_template);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d48a8fa997b9..7d77997d26d4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1164,7 +1164,7 @@ static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int pr
default:
res->ata_class = ATA_DEV_UNKNOWN;
break;
- };
+ }
}
/**
@@ -9529,8 +9529,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
}
}
- if (ioa_cfg->ipr_cmd_pool)
- dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
+ dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
kfree(ioa_cfg->ipr_cmnd_list);
kfree(ioa_cfg->ipr_cmnd_list_dma);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 680e30947671..4e6b1decbca7 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -500,19 +500,19 @@ struct sci_timer {
static inline
void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t))
{
- tmr->cancel = 0;
+ tmr->cancel = false;
timer_setup(&tmr->timer, fn, 0);
}
static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
{
- tmr->cancel = 0;
+ tmr->cancel = false;
mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
}
static inline void sci_del_timer(struct sci_timer *tmr)
{
- tmr->cancel = 1;
+ tmr->cancel = true;
del_timer(&tmr->timer);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 874dd4beed10..e5a64d4f255c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2627,7 +2627,9 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
if (xmit_can_sleep) {
snprintf(ihost->workq_name, sizeof(ihost->workq_name),
"iscsi_q_%d", shost->host_no);
- ihost->workq = create_singlethread_workqueue(ihost->workq_name);
+ ihost->workq = alloc_workqueue("%s",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 2, ihost->workq_name);
if (!ihost->workq)
goto free_host;
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index c5a828a041e0..5d716d388707 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -160,6 +160,7 @@ qc_already_gone:
}
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ __must_hold(ap->lock)
{
struct sas_task *task;
struct scatterlist *sg;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8e2a356911a9..c3ceb6e5b061 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -143,7 +143,7 @@ struct lpfc_dmabuf {
struct lpfc_nvmet_ctxbuf {
struct list_head list;
- struct lpfc_nvmet_rcv_ctx *context;
+ struct lpfc_async_xchg_ctx *context;
struct lpfc_iocbq *iocbq;
struct lpfc_sglq *sglq;
struct work_struct defer_work;
@@ -627,6 +627,19 @@ struct lpfc_ras_fwlog {
enum ras_state state; /* RAS logging running state */
};
+enum lpfc_irq_chann_mode {
+ /* Assign IRQs to all possible cpus that have hardware queues */
+ NORMAL_MODE,
+
+ /* Assign IRQs only to cpus on the same numa node as HBA */
+ NUMA_MODE,
+
+ /* Assign IRQs only on non-hyperthreaded CPUs. This is the
+ * same as normal_mode, but assign IRQS only on physical CPUs.
+ */
+ NHT_MODE,
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -835,7 +848,6 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
- uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
@@ -1003,6 +1015,7 @@ struct lpfc_hba {
mempool_t *active_rrq_pool;
struct fc_host_statistics link_stats;
+ enum lpfc_irq_chann_mode irq_chann_mode;
enum intr_type_t intr_type;
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
@@ -1314,19 +1327,19 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
/**
- * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
- * @numa_mask: Pointer to phba's numa_mask member.
+ * lpfc_next_online_cpu - Finds next online CPU on cpumask
+ * @mask: Pointer to phba's cpumask member.
* @start: starting cpu index
*
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
static inline unsigned int
-lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
+lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
unsigned int cpu_it;
- for_each_cpu_wrap(cpu_it, numa_mask, start) {
+ for_each_cpu_wrap(cpu_it, mask, start) {
if (cpu_online(cpu_it))
break;
}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1354c141d614..a62c60ca6477 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -37,8 +37,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -48,7 +46,6 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_version.h"
#include "lpfc_compat.h"
@@ -4877,7 +4874,7 @@ lpfc_request_firmware_upgrade_store(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- int val = 0, rc = -EINVAL;
+ int val = 0, rc;
/* Sanity check on user data */
if (!isdigit(buf[0]))
@@ -5704,17 +5701,69 @@ LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
-static inline void
-lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
+#if IS_ENABLED(CONFIG_X86)
+/**
+ * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
+ * irq_chann_mode
+ * @phba: Pointer to HBA context object.
+ **/
+static void
+lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
+{
+ unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
+ const struct cpumask *sibling_mask;
+ struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+ cpumask_clear(aff_mask);
+
+ if (phba->irq_chann_mode == NUMA_MODE) {
+ /* Check if we're a NUMA architecture */
+ numa_node = dev_to_node(&phba->pcidev->dev);
+ if (numa_node == NUMA_NO_NODE) {
+ phba->irq_chann_mode = NORMAL_MODE;
+ return;
+ }
+ }
+
+ for_each_possible_cpu(cpu) {
+ switch (phba->irq_chann_mode) {
+ case NUMA_MODE:
+ if (cpu_to_node(cpu) == numa_node)
+ cpumask_set_cpu(cpu, aff_mask);
+ break;
+ case NHT_MODE:
+ sibling_mask = topology_sibling_cpumask(cpu);
+ first_cpu = cpumask_first(sibling_mask);
+ if (first_cpu < nr_cpu_ids)
+ cpumask_set_cpu(first_cpu, aff_mask);
+ break;
+ default:
+ break;
+ }
+ }
+}
+#endif
+
+static void
+lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
{
#if IS_ENABLED(CONFIG_X86)
- /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- phba->cfg_irq_numa = 1;
- else
- phba->cfg_irq_numa = 0;
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ /* If AMD architecture, then default is NUMA_MODE */
+ phba->irq_chann_mode = NUMA_MODE;
+ break;
+ case X86_VENDOR_INTEL:
+ /* If Intel architecture, then default is no hyperthread mode */
+ phba->irq_chann_mode = NHT_MODE;
+ break;
+ default:
+ phba->irq_chann_mode = NORMAL_MODE;
+ break;
+ }
+ lpfc_cpumask_irq_mode_init(phba);
#else
- phba->cfg_irq_numa = 0;
+ phba->irq_chann_mode = NORMAL_MODE;
#endif
}
@@ -5726,6 +5775,7 @@ lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
*
* 0 = Configure number of IRQ Channels to:
* if AMD architecture, number of CPUs on HBA's NUMA node
+ * if Intel architecture, number of physical CPUs.
* otherwise, number of active CPUs.
* [1,256] = Manually specify how many IRQ Channels to use.
*
@@ -5751,35 +5801,44 @@ MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
static int
lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
{
- const struct cpumask *numa_mask;
+ const struct cpumask *aff_mask;
if (phba->cfg_use_msi != 2) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8532 use_msi = %u ignoring cfg_irq_numa\n",
phba->cfg_use_msi);
- phba->cfg_irq_numa = 0;
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ phba->irq_chann_mode = NORMAL_MODE;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return 0;
}
/* Check if default setting was passed */
if (val == LPFC_IRQ_CHANN_DEF)
- lpfc_assign_default_irq_numa(phba);
+ lpfc_assign_default_irq_chann(phba);
- if (phba->cfg_irq_numa) {
- numa_mask = &phba->sli4_hba.numa_mask;
+ if (phba->irq_chann_mode != NORMAL_MODE) {
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
- if (cpumask_empty(numa_mask)) {
+ if (cpumask_empty(aff_mask)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "8533 Could not identify NUMA node, "
- "ignoring cfg_irq_numa\n");
- phba->cfg_irq_numa = 0;
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ "8533 Could not identify CPUS for "
+ "mode %d, ignoring\n",
+ phba->irq_chann_mode);
+ phba->irq_chann_mode = NORMAL_MODE;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
} else {
- phba->cfg_irq_chann = cpumask_weight(numa_mask);
+ phba->cfg_irq_chann = cpumask_weight(aff_mask);
+
+ /* If no hyperthread mode, then set hdwq count to
+ * aff_mask weight as well
+ */
+ if (phba->irq_chann_mode == NHT_MODE)
+ phba->cfg_hdw_queue = phba->cfg_irq_chann;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8543 lpfc_irq_chann set to %u "
- "(numa)\n", phba->cfg_irq_chann);
+ "(mode: %d)\n", phba->cfg_irq_chann,
+ phba->irq_chann_mode);
}
} else {
if (val > LPFC_IRQ_CHANN_MAX) {
@@ -5790,7 +5849,7 @@ lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
val,
LPFC_IRQ_CHANN_MIN,
LPFC_IRQ_CHANN_MAX);
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return -EINVAL;
}
phba->cfg_irq_chann = val;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 76dc8d9493d2..9ee6b930a655 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -24,7 +24,6 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
struct fc_frame_header;
-struct lpfc_nvmet_rcv_ctx;
void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
@@ -564,12 +563,16 @@ void lpfc_nvme_update_localport(struct lpfc_vport *vport);
int lpfc_nvmet_create_targetport(struct lpfc_hba *phba);
int lpfc_nvmet_update_targetport(struct lpfc_hba *phba);
void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
-void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
+int lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *axchg);
+int lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *axchg);
void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
struct rqb_dmabuf *nvmebuf, uint64_t isr_ts,
uint8_t cqflag);
void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
+void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 2aa578d20f8c..69d4710d95a0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -44,7 +44,6 @@
#include "lpfc_disc.h"
#include "lpfc.h"
#include "lpfc_scsi.h"
-#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_version.h"
@@ -462,7 +461,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) ||
- (fc4_type == FC_TYPE_FCP) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 8a6e02aa553f..ae0a8252128c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -39,8 +39,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -50,7 +48,6 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -1035,7 +1032,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
- struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
struct nvme_fc_local_port *localport;
struct lpfc_fc4_ctrl_stat *cstat;
struct lpfc_nvme_lport *lport;
@@ -2166,10 +2163,6 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
char *pbuf;
int i;
- /* Protect copy from user */
- if (!access_ok(buf, nbytes))
- return -EFAULT;
-
memset(mybuf, 0, sizeof(mybuf));
if (copy_from_user(mybuf, buf, nbytes))
@@ -2436,7 +2429,8 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
return 0;
if (dent == phba->debug_InjErrLBA) {
- if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+ if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
+ (dstbuf[2] == 'f'))
tmp = (uint64_t)(-1);
}
@@ -2621,10 +2615,6 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
if (nbytes > 64)
nbytes = 64;
- /* Protect copy from user */
- if (!access_ok(buf, nbytes))
- return -EFAULT;
-
memset(mybuf, 0, sizeof(mybuf));
if (copy_from_user(mybuf, buf, nbytes))
@@ -2787,10 +2777,6 @@ lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf,
char mybuf[6] = {0};
int i;
- /* Protect copy from user */
- if (!access_ok(buf, nbytes))
- return -EFAULT;
-
if (copy_from_user(mybuf, buf, (nbytes >= sizeof(mybuf)) ?
(sizeof(mybuf) - 1) : nbytes))
return -EFAULT;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 80d1e661b0d4..3d670568a276 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7936,19 +7936,13 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (unlikely(!pring))
return;
- if ((phba->pport->load_flag & FC_UNLOADING))
+ if (phba->pport->load_flag & FC_UNLOADING)
return;
+
spin_lock_irq(&phba->hbalock);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
- if ((phba->pport->load_flag & FC_UNLOADING)) {
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
- return;
- }
-
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
cmd = &piocb->iocb;
@@ -8514,6 +8508,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_lock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
spin_unlock_irq(shost->host_lock);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
goto dropit;
}
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 789eecbf32eb..4084f7f2b821 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -36,8 +36,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_nl.h"
@@ -825,6 +823,12 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
if ((phba->sli_rev < LPFC_SLI_REV4) &&
(!remove && ndlp->nlp_type & NLP_FABRIC))
continue;
+
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
lpfc_disc_state_machine(vport, ndlp, NULL,
remove
? NLP_EVT_DEVICE_RM
@@ -1356,14 +1360,14 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
}
/**
- * lpfc_update_fcf_record - Update driver fcf record
* __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: Index for the lpfc_fcf_record.
* @new_fcf_record: pointer to hba fcf record.
*
* This routine updates the driver FCF priority record from the new HBA FCF
- * record. This routine is called with the host lock held.
+ * record. The hbalock is asserted held in the code path calling this
+ * routine.
**/
static void
__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
@@ -1372,8 +1376,6 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
{
struct lpfc_fcf_pri *fcf_pri;
- lockdep_assert_held(&phba->hbalock);
-
fcf_pri = &phba->fcf.fcf_pri[fcf_index];
fcf_pri->fcf_rec.fcf_index = fcf_index;
/* FCF record priority */
@@ -1451,7 +1453,7 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
*
* This routine updates the driver FCF record from the new HBA FCF record
* together with the address mode, vlan_id, and other informations. This
- * routine is called with the host lock held.
+ * routine is called with the hbalock held.
**/
static void
__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 10c5d1c3122e..6dfff0376547 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3541,7 +3541,7 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_UER_SHIFT 0
#define lpfc_mbx_set_feature_UER_MASK 0x00000001
#define lpfc_mbx_set_feature_UER_WORD word6
-#define lpfc_mbx_set_feature_mds_SHIFT 0
+#define lpfc_mbx_set_feature_mds_SHIFT 2
#define lpfc_mbx_set_feature_mds_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_WORD word6
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4104bdcdbb6f..69a5249e007a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -50,8 +50,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -61,7 +59,6 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -1032,7 +1029,7 @@ static int
lpfc_hba_down_post_s4(struct lpfc_hba *phba)
{
struct lpfc_io_buf *psb, *psb_next;
- struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
+ struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
struct lpfc_sli4_hdw_queue *qp;
LIST_HEAD(aborts);
LIST_HEAD(nvme_aborts);
@@ -1099,7 +1096,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
&nvmet_aborts);
spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
- ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
+ ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
}
@@ -6023,29 +6020,6 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
}
/**
- * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
- * @phba: Pointer to HBA context object.
- *
- **/
-static void
-lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
-{
- unsigned int cpu, numa_node;
- struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
-
- cpumask_clear(numa_mask);
-
- /* Check if we're a NUMA architecture */
- numa_node = dev_to_node(&phba->pcidev->dev);
- if (numa_node == NUMA_NO_NODE)
- return;
-
- for_each_possible_cpu(cpu)
- if (cpu_to_node(cpu) == numa_node)
- cpumask_set_cpu(cpu, numa_mask);
-}
-
-/**
* lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure.
*
@@ -6483,7 +6457,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
phba->sli4_hba.curr_disp_cpu = 0;
- lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -6691,6 +6664,13 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
#endif
/* Not supported for NVMET */
phba->cfg_xri_rebalancing = 0;
+ if (phba->irq_chann_mode == NHT_MODE) {
+ phba->cfg_irq_chann =
+ phba->sli4_hba.num_present_cpu;
+ phba->cfg_hdw_queue =
+ phba->sli4_hba.num_present_cpu;
+ phba->irq_chann_mode = NORMAL_MODE;
+ }
break;
}
}
@@ -7032,7 +7012,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
- cpumask_clear(&phba->sli4_hba.numa_mask);
+ cpumask_clear(&phba->sli4_hba.irq_aff_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -11287,11 +11267,12 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
* @offline: true, cpu is going offline. false, cpu is coming online.
*
* If cpu is going offline, we'll try our best effort to find the next
- * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ * online cpu on the phba's original_mask and migrate all offlining IRQ
+ * affinities.
*
- * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
*
- * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
* PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
*
**/
@@ -11301,14 +11282,14 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
struct lpfc_vector_map_info *cpup;
struct cpumask *aff_mask;
unsigned int cpu_select, cpu_next, idx;
- const struct cpumask *numa_mask;
+ const struct cpumask *orig_mask;
- if (!phba->cfg_irq_numa)
+ if (phba->irq_chann_mode == NORMAL_MODE)
return;
- numa_mask = &phba->sli4_hba.numa_mask;
+ orig_mask = &phba->sli4_hba.irq_aff_mask;
- if (!cpumask_test_cpu(cpu, numa_mask))
+ if (!cpumask_test_cpu(cpu, orig_mask))
return;
cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -11317,9 +11298,9 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
return;
if (offline) {
- /* Find next online CPU on NUMA node */
- cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+ /* Find next online CPU on original mask */
+ cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
@@ -11434,7 +11415,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
char *name;
- const struct cpumask *numa_mask = NULL;
+ const struct cpumask *aff_mask = NULL;
unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
struct lpfc_hba_eq_hdl *eqhdl;
const struct cpumask *maskp;
@@ -11444,16 +11425,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- if (phba->cfg_irq_numa) {
- numa_mask = &phba->sli4_hba.numa_mask;
- cpu_cnt = cpumask_weight(numa_mask);
+ if (phba->irq_chann_mode != NORMAL_MODE)
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+ if (aff_mask) {
+ cpu_cnt = cpumask_weight(aff_mask);
vectors = min(phba->cfg_irq_chann, cpu_cnt);
- /* cpu: iterates over numa_mask including offline or online
- * cpu_select: iterates over online numa_mask to set affinity
+ /* cpu: iterates over aff_mask including offline or online
+ * cpu_select: iterates over online aff_mask to set affinity
*/
- cpu = cpumask_first(numa_mask);
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ cpu = cpumask_first(aff_mask);
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else {
flags |= PCI_IRQ_AFFINITY;
}
@@ -11487,7 +11470,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
eqhdl->irq = pci_irq_vector(phba->pcidev, index);
- if (phba->cfg_irq_numa) {
+ if (aff_mask) {
/* If found a neighboring online cpu, set affinity */
if (cpu_select < nr_cpu_ids)
lpfc_irq_set_aff(eqhdl, cpu_select);
@@ -11497,11 +11480,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_CPU_FIRST_IRQ,
cpu);
- /* Iterate to next offline or online cpu in numa_mask */
- cpu = cpumask_next(cpu, numa_mask);
+ /* Iterate to next offline or online cpu in aff_mask */
+ cpu = cpumask_next(cpu, aff_mask);
- /* Find next online cpu in numa_mask to set affinity */
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ /* Find next online cpu in aff_mask to set affinity */
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else if (vectors == 1) {
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e35b52b66d6c..e34e0f11bfdd 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1378,7 +1378,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*/
if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
- phba->host_gp = &phba->mbox->us.s2.host[0];
+ phba->host_gp = (struct lpfc_hgp __iomem *)
+ &phba->mbox->us.s2.host[0];
phba->hbq_put = NULL;
offset = (uint8_t *)&phba->mbox->us.s2.host -
(uint8_t *)phba->slim2p.virt;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 7082279e4c01..726f6619230f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -31,8 +31,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -41,8 +39,6 @@
#include "lpfc_disc.h"
#include "lpfc.h"
#include "lpfc_scsi.h"
-#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index a024e5a3918f..d8501bd959e7 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -32,8 +32,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -491,6 +489,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(unsigned long long)
wwn_to_u64(sp->portName.u.wwn));
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
ndlp->nlp_prev_state = ndlp->nlp_state;
/* rport needs to be unregistered first */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -841,6 +844,12 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
if (ndlp->nlp_DID == Fabric_DID) {
if (vport->port_state <= LPFC_FDISC)
goto out;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index a45936e08031..b16c087ba272 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -36,9 +36,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme.h>
-#include <linux/nvme-fc-driver.h>
-#include <linux/nvme-fc.h>
#include "lpfc_version.h"
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -396,43 +393,100 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
return;
}
-static void
-lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+/**
+ * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
+ * @phba: pointer to lpfc hba data structure.
+ * @axchg: pointer to exchange context for the NVME LS request
+ *
+ * This routine is used for processing an asychronously received NVME LS
+ * request. Any remaining validation is done and the LS is then forwarded
+ * to the nvme-fc transport via nvme_fc_rcv_ls_req().
+ *
+ * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
+ * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
+ * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
+ *
+ * Returns 0 if LS was handled and delivered to the transport
+ * Returns 1 if LS failed to be handled and should be dropped
+ */
+int
+lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *axchg)
{
- struct lpfc_vport *vport = cmdwqe->vport;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_vport *vport;
+ struct lpfc_nvme_rport *lpfc_rport;
+ struct nvme_fc_remote_port *remoteport;
struct lpfc_nvme_lport *lport;
- uint32_t status;
+ uint32_t *payload = axchg->payload;
+ int rc;
+
+ vport = axchg->ndlp->vport;
+ lpfc_rport = axchg->ndlp->nrport;
+ if (!lpfc_rport)
+ return -EINVAL;
+
+ remoteport = lpfc_rport->remoteport;
+ if (!vport->localport)
+ return -EINVAL;
+
+ lport = vport->localport->private;
+ if (!lport)
+ return -EINVAL;
+
+ rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
+ axchg->size);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
+ "%08x %08x %08x\n",
+ axchg->size, rc,
+ *payload, *(payload+1), *(payload+2),
+ *(payload+3), *(payload+4), *(payload+5));
+
+ if (!rc)
+ return 0;
+#endif
+ return 1;
+}
+
+/**
+ * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
+ * LS request.
+ * @phba: Pointer to HBA context object
+ * @vport: The local port that issued the LS
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * This function is the generic completion handler for NVME LS requests.
+ * The function updates any states and statistics, calls the transport
+ * ls_req done() routine, then tears down the command and buffers used
+ * for the LS request.
+ **/
+void
+__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
struct nvmefc_ls_req *pnvme_lsreq;
struct lpfc_dmabuf *buf_ptr;
struct lpfc_nodelist *ndlp;
+ uint32_t status;
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
+ ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
- if (vport->localport) {
- lport = (struct lpfc_nvme_lport *)vport->localport->private;
- if (lport) {
- atomic_inc(&lport->fc4NvmeLsCmpls);
- if (status) {
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
- atomic_inc(&lport->cmpl_ls_xb);
- atomic_inc(&lport->cmpl_ls_err);
- }
- }
- }
-
- ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6047 nvme cmpl Enter "
- "Data %px DID %x Xri: %x status %x reason x%x "
- "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
+ "6047 NVMEx LS REQ %px cmpl DID %x Xri: %x "
+ "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
+ "ndlp:x%px\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status,
(wcqe->parameter & 0xffff),
cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
- lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
+ lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
cmdwqe->sli4_xritag, status, wcqe->parameter);
if (cmdwqe->context3) {
@@ -445,7 +499,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
pnvme_lsreq->done(pnvme_lsreq, status);
else
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6046 nvme cmpl without done call back? "
+ "6046 NVMEx cmpl without done call back? "
"Data %px DID %x Xri: %x status %x\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status);
@@ -456,6 +510,31 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
lpfc_sli_release_iocbq(phba, cmdwqe);
}
+static void
+lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_vport *vport = cmdwqe->vport;
+ struct lpfc_nvme_lport *lport;
+ uint32_t status;
+
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+
+ if (vport->localport) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (lport) {
+ atomic_inc(&lport->fc4NvmeLsCmpls);
+ if (status) {
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_ls_xb);
+ atomic_inc(&lport->cmpl_ls_err);
+ }
+ }
+ }
+
+ __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
+}
+
static int
lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp,
@@ -557,13 +636,6 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Issue GEN REQ WQE for NPORT <did> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "6050 Issue GEN REQ WQE to NPORT x%x "
- "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
- "xmit:%d 1st:%d\n",
- ndlp->nlp_DID, genwqe->iotag,
- vport->port_state,
- genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
genwqe->wqe_cmpl = cmpl;
genwqe->iocb_cmpl = NULL;
genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
@@ -575,105 +647,108 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
if (rc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC | LOG_ELS,
"6045 Issue GEN REQ WQE to NPORT x%x "
- "Data: x%x x%x\n",
+ "Data: x%x x%x rc x%x\n",
ndlp->nlp_DID, genwqe->iotag,
- vport->port_state);
+ vport->port_state, rc);
lpfc_sli_release_iocbq(phba, genwqe);
return 1;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
+ "6050 Issue GEN REQ WQE to NPORT x%x "
+ "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
+ "bmp:x%px xmit:%d 1st:%d\n",
+ ndlp->nlp_DID, genwqe->sli4_xritag,
+ vport->port_state,
+ genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
return 0;
}
+
/**
- * lpfc_nvme_ls_req - Issue an Link Service request
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
+ * @vport: The local port issuing the LS
+ * @ndlp: The remote port to send the LS to
+ * @pnvme_lsreq: Pointer to LS request structure from the transport
*
- * Driver registers this routine to handle any link service request
- * from the nvme_fc transport to a remote nvme-aware port.
+ * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
+ * WQE to perform the LS operation.
*
* Return value :
* 0 - Success
- * TODO: What are the failure codes.
+ * non-zero: various error codes, in form of -Exxx
**/
-static int
-lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
- struct nvme_fc_remote_port *pnvme_rport,
- struct nvmefc_ls_req *pnvme_lsreq)
+int
+__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct nvmefc_ls_req *pnvme_lsreq,
+ void (*gen_req_cmp)(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe))
{
- int ret = 0;
- struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_rport *rport;
- struct lpfc_vport *vport;
- struct lpfc_nodelist *ndlp;
- struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp;
+ struct ulp_bde64 *bpl;
+ int ret;
uint16_t ntype, nstate;
- /* there are two dma buf in the request, actually there is one and
- * the second one is just the start address + cmd size.
- * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
- * in a lpfc_dmabuf struct. When freeing we just free the wrapper
- * because the nvem layer owns the data bufs.
- * We do not have to break these packets open, we don't care what is in
- * them. And we do not have to look at the resonse data, we only care
- * that we got a response. All of the caring is going to happen in the
- * nvme-fc layer.
- */
-
- lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
- rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
- if (unlikely(!lport) || unlikely(!rport))
- return -EINVAL;
-
- vport = lport->vport;
-
- if (vport->load_flag & FC_UNLOADING)
- return -ENODEV;
-
- /* Need the ndlp. It is stored in the driver's rport. */
- ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6051 Remoteport x%px, rport has invalid ndlp. "
- "Failing LS Req\n", pnvme_rport);
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NVME_DISC | LOG_NODE | LOG_NVME_IOERR,
+ "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
+ "LS Req\n",
+ ndlp);
return -ENODEV;
}
- /* The remote node has to be a mapped nvme target or an
- * unmapped nvme initiator or it's an error.
- */
ntype = ndlp->nlp_type;
nstate = ndlp->nlp_state;
if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
(ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6088 DID x%06x not ready for "
- "IO. State x%x, Type x%x\n",
- pnvme_rport->port_id,
- ndlp->nlp_state, ndlp->nlp_type);
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NVME_DISC | LOG_NODE | LOG_NVME_IOERR,
+ "6088 NVMEx LS REQ: Fail DID x%06x not "
+ "ready for IO. Type x%x, State x%x\n",
+ ndlp->nlp_DID, ntype, nstate);
return -ENODEV;
}
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+
+ /*
+ * there are two dma buf in the request, actually there is one and
+ * the second one is just the start address + cmd size.
+ * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
+ * in a lpfc_dmabuf struct. When freeing we just free the wrapper
+ * because the nvem layer owns the data bufs.
+ * We do not have to break these packets open, we don't care what is
+ * in them. And we do not have to look at the resonse data, we only
+ * care that we got a response. All of the caring is going to happen
+ * in the nvme-fc layer.
+ */
+
+ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
if (!bmp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6044 Could not find node for DID %x\n",
- pnvme_rport->port_id);
- return 2;
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NVME_DISC | LOG_NVME_IOERR,
+ "6044 NVMEx LS REQ: Could not alloc LS buf "
+ "for DID %x\n",
+ ndlp->nlp_DID);
+ return -ENOMEM;
}
- INIT_LIST_HEAD(&bmp->list);
+
bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
if (!bmp->virt) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6042 Could not find node for DID %x\n",
- pnvme_rport->port_id);
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NVME_DISC | LOG_NVME_IOERR,
+ "6042 NVMEx LS REQ: Could not alloc mbuf "
+ "for DID %x\n",
+ ndlp->nlp_DID);
kfree(bmp);
- return 3;
+ return -ENOMEM;
}
+
+ INIT_LIST_HEAD(&bmp->list);
+
bpl = (struct ulp_bde64 *)bmp->virt;
bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
@@ -688,118 +763,206 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
- /* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6149 Issue LS Req to DID 0x%06x lport x%px, "
- "rport x%px lsreq x%px rqstlen:%d rsplen:%d "
- "%pad %pad\n",
- ndlp->nlp_DID, pnvme_lport, pnvme_rport,
- pnvme_lsreq, pnvme_lsreq->rqstlen,
- pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
- &pnvme_lsreq->rspdma);
-
- atomic_inc(&lport->fc4NvmeLsRequests);
+ "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
+ "rqstlen:%d rsplen:%d %pad %pad\n",
+ ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
+ pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
+ &pnvme_lsreq->rspdma);
- /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
- * This code allows it all to work.
- */
ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
- pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
- ndlp, 2, 30, 0);
+ pnvme_lsreq, gen_req_cmp, ndlp, 2,
+ LPFC_NVME_LS_TIMEOUT, 0);
if (ret != WQE_SUCCESS) {
- atomic_inc(&lport->xmt_ls_err);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6052 EXIT. issue ls wqe failed lport x%px, "
- "rport x%px lsreq x%px Status %x DID %x\n",
- pnvme_lport, pnvme_rport, pnvme_lsreq,
- ret, ndlp->nlp_DID);
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NVME_DISC | LOG_NVME_IOERR,
+ "6052 NVMEx REQ: EXIT. issue ls wqe failed "
+ "lsreq x%px Status %x DID %x\n",
+ pnvme_lsreq, ret, ndlp->nlp_DID);
lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
kfree(bmp);
- return ret;
+ return -EIO;
}
- /* Stub in routine and return 0 for now. */
- return ret;
+ return 0;
}
/**
- * lpfc_nvme_ls_abort - Issue an Link Service request
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * lpfc_nvme_ls_req - Issue an NVME Link Service request
+ * @lpfc_nvme_lport: Transport localport that LS is to be issued from.
+ * @lpfc_nvme_rport: Transport remoteport that LS is to be sent to.
+ * @pnvme_lsreq - the transport nvme_ls_req structure for the LS
*
* Driver registers this routine to handle any link service request
* from the nvme_fc transport to a remote nvme-aware port.
*
* Return value :
* 0 - Success
- * TODO: What are the failure codes.
+ * non-zero: various error codes, in form of -Exxx
**/
-static void
-lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
- struct nvme_fc_remote_port *pnvme_rport,
- struct nvmefc_ls_req *pnvme_lsreq)
+static int
+lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ struct nvme_fc_remote_port *pnvme_rport,
+ struct nvmefc_ls_req *pnvme_lsreq)
{
struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_rport *rport;
struct lpfc_vport *vport;
- struct lpfc_hba *phba;
- struct lpfc_nodelist *ndlp;
- LIST_HEAD(abort_list);
- struct lpfc_sli_ring *pring;
- struct lpfc_iocbq *wqe, *next_wqe;
+ int ret;
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
- if (unlikely(!lport))
- return;
- vport = lport->vport;
- phba = vport->phba;
+ rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ if (unlikely(!lport) || unlikely(!rport))
+ return -EINVAL;
+ vport = lport->vport;
if (vport->load_flag & FC_UNLOADING)
- return;
+ return -ENODEV;
+
+ atomic_inc(&lport->fc4NvmeLsRequests);
+
+ ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
+ lpfc_nvme_ls_req_cmp);
+ if (ret)
+ atomic_inc(&lport->xmt_ls_err);
+
+ return ret;
+}
+
+/**
+ * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
+ * NVME LS request
+ * @vport: The local port that issued the LS
+ * @ndlp: The remote port the LS was sent to
+ * @pnvme_lsreq: Pointer to LS request structure from the transport
+ *
+ * The driver validates the ndlp, looks for the LS, and aborts the
+ * LS if found.
+ *
+ * Returns:
+ * 0 : if LS found and aborted
+ * non-zero: various error conditions in form -Exxx
+ **/
+int
+__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *wqe, *next_wqe;
+ bool foundit = false;
- ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6049 Could not find node for DID %x\n",
- pnvme_rport->port_id);
- return;
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_NVME_DISC | LOG_NODE |
+ LOG_NVME_IOERR | LOG_NVME_ABTS,
+ "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
+ "x%06x, Failing LS Req\n",
+ ndlp, ndlp ? ndlp->nlp_DID : 0);
+ return -EINVAL;
}
- /* Expand print to include key fields. */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
- "6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d "
- "rsplen:%d %pad %pad\n",
- pnvme_lport, pnvme_rport,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
+ "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
+ "x%p rqstlen:%d rsplen:%d %pad %pad\n",
pnvme_lsreq, pnvme_lsreq->rqstlen,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
/*
- * Lock the ELS ring txcmplq and build a local list of all ELS IOs
- * that need an ABTS. The IOs need to stay on the txcmplq so that
- * the abort operation completes them successfully.
+ * Lock the ELS ring txcmplq and look for the wqe that matches
+ * this ELS. If found, issue an abort on the wqe.
*/
pring = phba->sli4_hba.nvmels_wq->pring;
spin_lock_irq(&phba->hbalock);
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
- /* Add to abort_list on on NDLP match. */
- if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
+ if (wqe->context2 == pnvme_lsreq) {
wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
- list_add_tail(&wqe->dlist, &abort_list);
+ foundit = true;
+ break;
}
}
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
- /* Abort the targeted IOs and remove them from the abort list. */
- list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
- atomic_inc(&lport->xmt_ls_abort);
- spin_lock_irq(&phba->hbalock);
- list_del_init(&wqe->dlist);
+ if (foundit)
lpfc_sli_issue_abort_iotag(phba, pring, wqe);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
+
+ if (foundit)
+ return 0;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
+ "6213 NVMEx LS REQ Abort: Unable to locate req x%p\n",
+ pnvme_lsreq);
+ return -EINVAL;
+}
+
+static int
+lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_rsp *ls_rsp)
+{
+ struct lpfc_async_xchg_ctx *axchg =
+ container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
+ struct lpfc_nvme_lport *lport;
+ int rc;
+
+ if (axchg->phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
+ lport = (struct lpfc_nvme_lport *)localport->private;
+
+ rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
+
+ if (rc) {
+ /*
+ * unless the failure is due to having already sent
+ * the response, an abort will be generated for the
+ * exchange if the rsp can't be sent.
+ */
+ if (rc != -EALREADY)
+ atomic_inc(&lport->xmt_ls_abort);
+ return rc;
}
+
+ return 0;
+}
+
+/**
+ * lpfc_nvme_ls_abort - Abort a prior NVME LS request
+ * @lpfc_nvme_lport: Transport localport that LS is to be issued from.
+ * @lpfc_nvme_rport: Transport remoteport that LS is to be sent to.
+ * @pnvme_lsreq - the transport nvme_ls_req structure for the LS
+ *
+ * Driver registers this routine to abort a NVME LS request that is
+ * in progress (from the transports perspective).
+ **/
+static void
+lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
+ struct nvme_fc_remote_port *pnvme_rport,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ int ret;
+
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ if (unlikely(!lport))
+ return;
+ vport = lport->vport;
+ phba = vport->phba;
+
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
+ ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
+
+ ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
+ if (!ret)
+ atomic_inc(&lport->xmt_ls_abort);
}
/* Fix up the existing sgls for NVME IO. */
@@ -1491,11 +1654,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if (vport->load_flag & FC_UNLOADING) {
- ret = -ENODEV;
- goto out_fail;
- }
-
if (unlikely(vport->load_flag & FC_UNLOADING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
@@ -1911,6 +2069,7 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
.fcp_io = lpfc_nvme_fcp_io_submit,
.ls_abort = lpfc_nvme_ls_abort,
.fcp_abort = lpfc_nvme_fcp_abort,
+ .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
@@ -2106,6 +2265,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
atomic_set(&lport->cmpl_fcp_err, 0);
atomic_set(&lport->cmpl_ls_xb, 0);
atomic_set(&lport->cmpl_ls_err, 0);
+
atomic_set(&lport->fc4NvmeLsRequests, 0);
atomic_set(&lport->fc4NvmeLsCmpls, 0);
}
@@ -2326,38 +2486,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private;
if (oldrport) {
- /* New remoteport record does not guarantee valid
- * host private memory area.
- */
- if (oldrport == remote_port->private) {
- /* Same remoteport - ndlp should match.
- * Just reuse.
- */
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebind lport to current "
- "remoteport x%px wwpn 0x%llx, "
- "Data: x%x x%x x%px x%px x%x "
- " x%06x\n",
- remote_port,
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- oldrport->ndlp,
- ndlp,
- ndlp->nlp_type,
- ndlp->nlp_DID);
-
- /* It's a complete rebind only if the driver
- * is registering with the same ndlp. Otherwise
- * the driver likely executed a node swap
- * prior to this registration and the ndlp to
- * remoteport binding needs to be redone.
- */
- if (prev_ndlp == ndlp)
- return 0;
-
- }
/* Sever the ndlp<->rport association
* before dropping the ndlp ref from
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 593c48ff634e..4a4c3f780e1f 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -21,6 +21,10 @@
* included with this package. *
********************************************************************/
+#include <linux/nvme.h>
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
#define LPFC_NVME_ERSP_LEN 0x20
@@ -74,3 +78,179 @@ struct lpfc_nvme_rport {
struct lpfc_nvme_fcpreq_priv {
struct lpfc_io_buf *nvme_buf;
};
+
+/*
+ * set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
+ * set by the spec, which appears to have issues with some devices.
+ */
+#define LPFC_NVME_LS_TIMEOUT 30
+
+
+#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
+#define LPFC_NVMET_RQE_MIN_POST 128
+#define LPFC_NVMET_RQE_DEF_POST 512
+#define LPFC_NVMET_RQE_DEF_COUNT 2048
+#define LPFC_NVMET_SUCCESS_LEN 12
+
+#define LPFC_NVMET_MRQ_AUTO 0
+#define LPFC_NVMET_MRQ_MAX 16
+
+#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
+
+/* Used for NVME Target */
+#define LPFC_NVMET_INV_HOST_ACTIVE 1
+
+struct lpfc_nvmet_tgtport {
+ struct lpfc_hba *phba;
+ struct completion *tport_unreg_cmp;
+ atomic_t state; /* tracks nvmet hosthandle invalidation */
+
+ /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
+ atomic_t rcv_ls_req_in;
+ atomic_t rcv_ls_req_out;
+ atomic_t rcv_ls_req_drop;
+ atomic_t xmt_ls_abort;
+ atomic_t xmt_ls_abort_cmpl;
+
+ /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
+ atomic_t xmt_ls_rsp;
+ atomic_t xmt_ls_drop;
+
+ /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
+ atomic_t xmt_ls_rsp_error;
+ atomic_t xmt_ls_rsp_aborted;
+ atomic_t xmt_ls_rsp_xb_set;
+ atomic_t xmt_ls_rsp_cmpl;
+
+ /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
+ atomic_t rcv_fcp_cmd_in;
+ atomic_t rcv_fcp_cmd_out;
+ atomic_t rcv_fcp_cmd_drop;
+ atomic_t rcv_fcp_cmd_defer;
+ atomic_t xmt_fcp_release;
+
+ /* Stats counters - lpfc_nvmet_xmt_fcp_op */
+ atomic_t xmt_fcp_drop;
+ atomic_t xmt_fcp_read_rsp;
+ atomic_t xmt_fcp_read;
+ atomic_t xmt_fcp_write;
+ atomic_t xmt_fcp_rsp;
+
+ /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
+ atomic_t xmt_fcp_rsp_xb_set;
+ atomic_t xmt_fcp_rsp_cmpl;
+ atomic_t xmt_fcp_rsp_error;
+ atomic_t xmt_fcp_rsp_aborted;
+ atomic_t xmt_fcp_rsp_drop;
+
+ /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+ atomic_t xmt_fcp_xri_abort_cqe;
+ atomic_t xmt_fcp_abort;
+ atomic_t xmt_fcp_abort_cmpl;
+ atomic_t xmt_abort_sol;
+ atomic_t xmt_abort_unsol;
+ atomic_t xmt_abort_rsp;
+ atomic_t xmt_abort_rsp_error;
+
+ /* Stats counters - defer IO */
+ atomic_t defer_ctx;
+ atomic_t defer_fod;
+ atomic_t defer_wqfull;
+};
+
+struct lpfc_nvmet_ctx_info {
+ struct list_head nvmet_ctx_list;
+ spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
+ struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
+ struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
+ uint16_t nvmet_ctx_list_cnt;
+ char pad[16]; /* pad to a cache-line */
+};
+
+/* This retrieves the context info associated with the specified cpu / mrq */
+#define lpfc_get_ctx_list(phba, cpu, mrq) \
+ (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
+
+/* Values for state field of struct lpfc_async_xchg_ctx */
+#define LPFC_NVME_STE_LS_RCV 1
+#define LPFC_NVME_STE_LS_ABORT 2
+#define LPFC_NVME_STE_LS_RSP 3
+#define LPFC_NVME_STE_RCV 4
+#define LPFC_NVME_STE_DATA 5
+#define LPFC_NVME_STE_ABORT 6
+#define LPFC_NVME_STE_DONE 7
+#define LPFC_NVME_STE_FREE 0xff
+
+/* Values for flag field of struct lpfc_async_xchg_ctx */
+#define LPFC_NVME_IO_INP 0x1 /* IO is in progress on exchange */
+#define LPFC_NVME_ABORT_OP 0x2 /* Abort WQE issued on exchange */
+#define LPFC_NVME_XBUSY 0x4 /* XB bit set on IO cmpl */
+#define LPFC_NVME_CTX_RLS 0x8 /* ctx free requested */
+#define LPFC_NVME_ABTS_RCV 0x10 /* ABTS received on exchange */
+#define LPFC_NVME_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
+#define LPFC_NVME_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
+#define LPFC_NVME_TNOTIFY 0x80 /* notify transport of abts */
+
+struct lpfc_async_xchg_ctx {
+ union {
+ struct nvmefc_tgt_fcp_req fcp_req;
+ } hdlrctx;
+ struct list_head list;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ struct nvmefc_ls_req *ls_req;
+ struct nvmefc_ls_rsp ls_rsp;
+ struct lpfc_iocbq *wqeq;
+ struct lpfc_iocbq *abort_wqeq;
+ spinlock_t ctxlock; /* protect flag access */
+ uint32_t sid;
+ uint32_t offset;
+ uint16_t oxid;
+ uint16_t size;
+ uint16_t entry_cnt;
+ uint16_t cpu;
+ uint16_t idx;
+ uint16_t state;
+ uint16_t flag;
+ void *payload;
+ struct rqb_dmabuf *rqb_buffer;
+ struct lpfc_nvmet_ctxbuf *ctxbuf;
+ struct lpfc_sli4_hdw_queue *hdwq;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint64_t ts_isr_cmd;
+ uint64_t ts_cmd_nvme;
+ uint64_t ts_nvme_data;
+ uint64_t ts_data_wqput;
+ uint64_t ts_isr_data;
+ uint64_t ts_data_nvme;
+ uint64_t ts_nvme_status;
+ uint64_t ts_status_wqput;
+ uint64_t ts_isr_status;
+ uint64_t ts_status_nvme;
+#endif
+};
+
+
+/* routines found in lpfc_nvme.c */
+int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct nvmefc_ls_req *pnvme_lsreq,
+ void (*gen_req_cmp)(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe));
+void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
+int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq);
+
+/* routines found in lpfc_nvmet.c */
+int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
+ uint16_t xri);
+int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
+ struct nvmefc_ls_rsp *ls_rsp,
+ void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe));
+void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 565419bf8d74..88760416a8cb 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -36,10 +36,6 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme.h>
-#include <linux/nvme-fc-driver.h>
-#include <linux/nvme-fc.h>
-
#include "lpfc_version.h"
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -50,29 +46,25 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#include "lpfc_debugfs.h"
static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *,
+ struct lpfc_async_xchg_ctx *,
dma_addr_t rspbuf,
uint16_t rspsize);
static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *);
+ struct lpfc_async_xchg_ctx *);
static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *,
+ struct lpfc_async_xchg_ctx *,
uint32_t, uint16_t);
static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *,
+ struct lpfc_async_xchg_ctx *,
uint32_t, uint16_t);
-static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
- struct lpfc_nvmet_rcv_ctx *,
- uint32_t, uint16_t);
static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_nvmet_rcv_ctx *);
+ struct lpfc_async_xchg_ctx *);
static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
@@ -221,10 +213,10 @@ lpfc_nvmet_cmd_template(void)
}
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
-static struct lpfc_nvmet_rcv_ctx *
+static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
unsigned long iflag;
bool found = false;
@@ -243,10 +235,10 @@ lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
return NULL;
}
-static struct lpfc_nvmet_rcv_ctx *
+static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
unsigned long iflag;
bool found = false;
@@ -267,7 +259,8 @@ lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
#endif
static void
-lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
+lpfc_nvmet_defer_release(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *ctxp)
{
lockdep_assert_held(&ctxp->ctxlock);
@@ -275,10 +268,10 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
"6313 NVMET Defer ctx release oxid x%x flg x%x\n",
ctxp->oxid, ctxp->flag);
- if (ctxp->flag & LPFC_NVMET_CTX_RLS)
+ if (ctxp->flag & LPFC_NVME_CTX_RLS)
return;
- ctxp->flag |= LPFC_NVMET_CTX_RLS;
+ ctxp->flag |= LPFC_NVME_CTX_RLS;
spin_lock(&phba->sli4_hba.t_active_list_lock);
list_del(&ctxp->list);
spin_unlock(&phba->sli4_hba.t_active_list_lock);
@@ -288,6 +281,53 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
}
/**
+ * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
+ * transmission of an NVME LS response.
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. The function frees memory resources used for the command
+ * used to send the NVME LS RSP.
+ **/
+void
+__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
+ struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
+ uint32_t status, result;
+
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ result = wcqe->parameter;
+
+ if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
+ "6410 NVMEx LS cmpl state mismatch IO x%x: "
+ "%d %d\n",
+ axchg->oxid, axchg->state, axchg->entry_cnt);
+ }
+
+ lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
+ axchg->oxid, status, result);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
+ status, result, axchg->oxid);
+
+ lpfc_nlp_put(cmdwqe->context1);
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
+ lpfc_sli_release_iocbq(phba, cmdwqe);
+ ls_rsp->done(ls_rsp);
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
+ status, axchg->oxid);
+ kfree(axchg);
+}
+
+/**
* lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
@@ -295,33 +335,23 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME LS commands
- * The function frees memory resources used for the NVME commands.
+ * The function updates any states and statistics, then calls the
+ * generic completion handler to free resources.
**/
static void
lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_nvmet_tgtport *tgtp;
- struct nvmefc_tgt_ls_req *rsp;
- struct lpfc_nvmet_rcv_ctx *ctxp;
uint32_t status, result;
- status = bf_get(lpfc_wcqe_c_status, wcqe);
- result = wcqe->parameter;
- ctxp = cmdwqe->context2;
-
- if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6410 NVMET LS cmpl state mismatch IO x%x: "
- "%d %d\n",
- ctxp->oxid, ctxp->state, ctxp->entry_cnt);
- }
-
if (!phba->targetport)
- goto out;
+ goto finish;
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ result = wcqe->parameter;
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (tgtp) {
if (status) {
atomic_inc(&tgtp->xmt_ls_rsp_error);
@@ -334,22 +364,8 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
}
}
-out:
- rsp = &ctxp->ctx.ls_req;
-
- lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
- ctxp->oxid, status, result);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
- status, result, ctxp->oxid);
-
- lpfc_nlp_put(cmdwqe->context1);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
- lpfc_sli_release_iocbq(phba, cmdwqe);
- rsp->done(rsp);
- kfree(ctxp);
+finish:
+ __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
}
/**
@@ -369,7 +385,7 @@ void
lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
{
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
- struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+ struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct rqb_dmabuf *nvmebuf;
@@ -378,7 +394,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
int cpu;
unsigned long iflag;
- if (ctxp->state == LPFC_NVMET_STE_FREE) {
+ if (ctxp->state == LPFC_NVME_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6411 NVMET free, already free IO x%x: %d %d\n",
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
@@ -390,8 +406,8 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
/* check if freed in another path whilst acquiring lock */
if (nvmebuf) {
ctxp->rqb_buffer = NULL;
- if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
- ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
+ if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
+ ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
nvmebuf);
@@ -404,7 +420,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
}
- ctxp->state = LPFC_NVMET_STE_FREE;
+ ctxp->state = LPFC_NVME_STE_FREE;
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
if (phba->sli4_hba.nvmet_io_wait_cnt) {
@@ -421,14 +437,14 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
size = nvmebuf->bytes_recv;
sid = sli4_sid_from_fc_hdr(fc_hdr);
- ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+ ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
ctxp->wqeq = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
ctxp->oxid = oxid;
ctxp->sid = sid;
- ctxp->state = LPFC_NVMET_STE_RCV;
+ ctxp->state = LPFC_NVME_STE_RCV;
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
@@ -453,7 +469,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
/* Indicate that a replacement buffer has been posted */
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
+ ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
@@ -495,7 +511,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
static void
lpfc_nvmet_ktime(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp)
+ struct lpfc_async_xchg_ctx *ctxp)
{
uint64_t seg1, seg2, seg3, seg4, seg5;
uint64_t seg6, seg7, seg8, seg9, seg10;
@@ -704,16 +720,16 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
{
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
uint32_t status, result, op, start_clean, logerr;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int id;
#endif
ctxp = cmdwqe->context2;
- ctxp->flag &= ~LPFC_NVMET_IO_INP;
+ ctxp->flag &= ~LPFC_NVME_IO_INP;
- rsp = &ctxp->ctx.fcp_req;
+ rsp = &ctxp->hdlrctx.fcp_req;
op = rsp->op;
status = bf_get(lpfc_wcqe_c_status, wcqe);
@@ -740,13 +756,13 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* pick up SLI4 exhange busy condition */
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
- ctxp->flag |= LPFC_NVMET_XBUSY;
+ ctxp->flag |= LPFC_NVME_XBUSY;
logerr |= LOG_NVME_ABTS;
if (tgtp)
atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
} else {
- ctxp->flag &= ~LPFC_NVMET_XBUSY;
+ ctxp->flag &= ~LPFC_NVME_XBUSY;
}
lpfc_printf_log(phba, KERN_INFO, logerr,
@@ -768,7 +784,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if ((op == NVMET_FCOP_READDATA_RSP) ||
(op == NVMET_FCOP_RSP)) {
/* Sanity check */
- ctxp->state = LPFC_NVMET_STE_DONE;
+ ctxp->state = LPFC_NVME_STE_DONE;
ctxp->entry_cnt++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -826,17 +842,32 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
#endif
}
-static int
-lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_ls_req *rsp)
+/**
+ * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
+ * an NVME LS rsp for a prior NVME LS request that was received.
+ * @axchg: pointer to exchange context for the NVME LS request the response
+ * is for.
+ * @ls_rsp: pointer to the transport LS RSP that is to be sent
+ * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
+ *
+ * This routine is used to format and send a WQE to transmit a NVME LS
+ * Response. The response is for a prior NVME LS request that was
+ * received and posted to the transport.
+ *
+ * Returns:
+ * 0 : if response successfully transmit
+ * non-zero : if response failed to transmit, of the form -Exxx.
+ **/
+int
+__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
+ struct nvmefc_ls_rsp *ls_rsp,
+ void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe))
{
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
- struct lpfc_hba *phba = ctxp->phba;
- struct hbq_dmabuf *nvmebuf =
- (struct hbq_dmabuf *)ctxp->rqb_buffer;
+ struct lpfc_hba *phba = axchg->phba;
+ struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
struct lpfc_iocbq *nvmewqeq;
- struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
struct lpfc_dmabuf dmabuf;
struct ulp_bde64 bpl;
int rc;
@@ -844,34 +875,28 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return -ENODEV;
- if (phba->pport->load_flag & FC_UNLOADING)
- return -ENODEV;
-
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
+ "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
- if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
- (ctxp->entry_cnt != 1)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6412 NVMET LS rsp state mismatch "
+ if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
+ "6412 NVMEx LS rsp state mismatch "
"oxid x%x: %d %d\n",
- ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ axchg->oxid, axchg->state, axchg->entry_cnt);
+ return -EALREADY;
}
- ctxp->state = LPFC_NVMET_STE_LS_RSP;
- ctxp->entry_cnt++;
+ axchg->state = LPFC_NVME_STE_LS_RSP;
+ axchg->entry_cnt++;
- nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
- rsp->rsplen);
+ nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
+ ls_rsp->rsplen);
if (nvmewqeq == NULL) {
- atomic_inc(&nvmep->xmt_ls_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6150 LS Drop IO x%x: Prep\n",
- ctxp->oxid);
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- atomic_inc(&nvmep->xmt_ls_abort);
- lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
- ctxp->sid, ctxp->oxid);
- return -ENOMEM;
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS,
+ "6150 NVMEx LS Drop Rsp x%x: Prep\n",
+ axchg->oxid);
+ rc = -ENOMEM;
+ goto out_free_buf;
}
/* Save numBdes for bpl2sgl */
@@ -881,39 +906,106 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
dmabuf.virt = &bpl;
bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
- bpl.tus.f.bdeSize = rsp->rsplen;
+ bpl.tus.f.bdeSize = ls_rsp->rsplen;
bpl.tus.f.bdeFlags = 0;
bpl.tus.w = le32_to_cpu(bpl.tus.w);
+ /*
+ * Note: although we're using stack space for the dmabuf, the
+ * call to lpfc_sli4_issue_wqe is synchronous, so it will not
+ * be referenced after it returns back to this routine.
+ */
- nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
+ nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
nvmewqeq->iocb_cmpl = NULL;
- nvmewqeq->context2 = ctxp;
+ nvmewqeq->context2 = axchg;
- lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
- ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
+ lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
+ axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
+
+ rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
+
+ /* clear to be sure there's no reference */
+ nvmewqeq->context3 = NULL;
- rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
if (rc == WQE_SUCCESS) {
/*
* Okay to repost buffer here, but wait till cmpl
* before freeing ctxp and iocbq.
*/
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- atomic_inc(&nvmep->xmt_ls_rsp);
return 0;
}
- /* Give back resources */
- atomic_inc(&nvmep->xmt_ls_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6151 LS Drop IO x%x: Issue %d\n",
- ctxp->oxid, rc);
+
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS,
+ "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
+ axchg->oxid, rc);
+
+ rc = -ENXIO;
lpfc_nlp_put(nvmewqeq->context1);
+out_free_buf:
+ /* Give back resources */
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- atomic_inc(&nvmep->xmt_ls_abort);
- lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
- return -ENXIO;
+
+ /*
+ * As transport doesn't track completions of responses, if the rsp
+ * fails to send, the transport will effectively ignore the rsp
+ * and consider the LS done. However, the driver has an active
+ * exchange open for the LS - so be sure to abort the exchange
+ * if the response isn't sent.
+ */
+ lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
+ return rc;
+}
+
+/**
+ * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
+ * @tgtport: pointer to target port that NVME LS is to be transmit from.
+ * @ls_rsp: pointer to the transport LS RSP that is to be sent
+ *
+ * Driver registers this routine to transmit responses for received NVME
+ * LS requests.
+ *
+ * This routine is used to format and send a WQE to transmit a NVME LS
+ * Response. The ls_rsp is used to reverse-map the LS to the original
+ * NVME LS request sequence, which provides addressing information for
+ * the remote port the LS to be sent to, as well as the exchange id
+ * that is the LS is bound to.
+ *
+ * Returns:
+ * 0 : if response successfully transmit
+ * non-zero : if response failed to transmit, of the form -Exxx.
+ **/
+static int
+lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_ls_rsp *ls_rsp)
+{
+ struct lpfc_async_xchg_ctx *axchg =
+ container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
+ struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
+ int rc;
+
+ if (axchg->phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
+ rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
+
+ if (rc) {
+ atomic_inc(&nvmep->xmt_ls_drop);
+ /*
+ * unless the failure is due to having already sent
+ * the response, an abort will be generated for the
+ * exchange if the rsp can't be sent.
+ */
+ if (rc != -EALREADY)
+ atomic_inc(&nvmep->xmt_ls_abort);
+ return rc;
+ }
+
+ atomic_inc(&nvmep->xmt_ls_rsp);
+ return 0;
}
static int
@@ -921,8 +1013,8 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp)
{
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_async_xchg_ctx *ctxp =
+ container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
struct lpfc_hba *phba = ctxp->phba;
struct lpfc_queue *wq;
struct lpfc_iocbq *nvmewqeq;
@@ -938,11 +1030,6 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
- if (phba->pport->load_flag & FC_UNLOADING) {
- rc = -ENODEV;
- goto aerr;
- }
-
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
@@ -968,8 +1055,8 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
#endif
/* Sanity check */
- if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
- (ctxp->state == LPFC_NVMET_STE_ABORT)) {
+ if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
+ (ctxp->state == LPFC_NVME_STE_ABORT)) {
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6102 IO oxid x%x aborted\n",
@@ -997,7 +1084,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
ctxp->oxid, rsp->op, rsp->rsplen);
- ctxp->flag |= LPFC_NVMET_IO_INP;
+ ctxp->flag |= LPFC_NVME_IO_INP;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
if (rc == WQE_SUCCESS) {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1016,7 +1103,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQ was full, so queue nvmewqeq to be sent after
* WQE release CQE
*/
- ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
+ ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
wq = ctxp->hdwq->io_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -1056,8 +1143,8 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *req)
{
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_async_xchg_ctx *ctxp =
+ container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
struct lpfc_hba *phba = ctxp->phba;
struct lpfc_queue *wq;
unsigned long flags;
@@ -1065,9 +1152,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
- if (phba->pport->load_flag & FC_UNLOADING)
- return;
-
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
@@ -1085,13 +1169,13 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
/* Since iaab/iaar are NOT set, we need to check
* if the firmware is in process of aborting IO
*/
- if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
+ if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return;
}
- ctxp->flag |= LPFC_NVMET_ABORT_OP;
+ ctxp->flag |= LPFC_NVME_ABORT_OP;
- if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
+ if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
@@ -1101,11 +1185,11 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
}
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
- /* An state of LPFC_NVMET_STE_RCV means we have just received
+ /* A state of LPFC_NVME_STE_RCV means we have just received
* the NVME command and have not started processing it.
* (by issuing any IO WQEs on this exchange yet)
*/
- if (ctxp->state == LPFC_NVMET_STE_RCV)
+ if (ctxp->state == LPFC_NVME_STE_RCV)
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
else
@@ -1118,26 +1202,26 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp)
{
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_async_xchg_ctx *ctxp =
+ container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
struct lpfc_hba *phba = ctxp->phba;
unsigned long flags;
bool aborting = false;
spin_lock_irqsave(&ctxp->ctxlock, flags);
- if (ctxp->flag & LPFC_NVMET_XBUSY)
+ if (ctxp->flag & LPFC_NVME_XBUSY)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6027 NVMET release with XBUSY flag x%x"
" oxid x%x\n",
ctxp->flag, ctxp->oxid);
- else if (ctxp->state != LPFC_NVMET_STE_DONE &&
- ctxp->state != LPFC_NVMET_STE_ABORT)
+ else if (ctxp->state != LPFC_NVME_STE_DONE &&
+ ctxp->state != LPFC_NVME_STE_ABORT)
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6413 NVMET release bad state %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
- if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
- (ctxp->flag & LPFC_NVMET_XBUSY)) {
+ if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
+ (ctxp->flag & LPFC_NVME_XBUSY)) {
aborting = true;
/* let the abort path do the real release */
lpfc_nvmet_defer_release(phba, ctxp);
@@ -1148,7 +1232,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
ctxp->state, aborting);
atomic_inc(&lpfc_nvmep->xmt_fcp_release);
- ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
+ ctxp->flag &= ~LPFC_NVME_TNOTIFY;
if (aborting)
return;
@@ -1161,8 +1245,8 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp)
{
struct lpfc_nvmet_tgtport *tgtp;
- struct lpfc_nvmet_rcv_ctx *ctxp =
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_async_xchg_ctx *ctxp =
+ container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
struct lpfc_hba *phba = ctxp->phba;
unsigned long iflag;
@@ -1190,6 +1274,116 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
+/**
+ * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
+ * @phba: Pointer to HBA context object
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * This function is the completion handler for NVME LS requests.
+ * The function updates any states and statistics, then calls the
+ * generic completion handler to finish completion of the request.
+ **/
+static void
+lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
+}
+
+/**
+ * lpfc_nvmet_ls_req - Issue an Link Service request
+ * @targetport - pointer to target instance registered with nvmet transport.
+ * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
+ * Driver sets this value to the ndlp pointer.
+ * @pnvme_lsreq - the transport nvme_ls_req structure for the LS
+ *
+ * Driver registers this routine to handle any link service request
+ * from the nvme_fc transport to a remote nvme-aware port.
+ *
+ * Return value :
+ * 0 - Success
+ * non-zero: various error codes, in form of -Exxx
+ **/
+static int
+lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
+ void *hosthandle,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ int ret;
+ u32 hstate;
+
+ if (!lpfc_nvmet)
+ return -EINVAL;
+
+ phba = lpfc_nvmet->phba;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -EINVAL;
+
+ hstate = atomic_read(&lpfc_nvmet->state);
+ if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
+ return -EACCES;
+
+ ndlp = (struct lpfc_nodelist *)hosthandle;
+
+ ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
+ lpfc_nvmet_ls_req_cmp);
+
+ return ret;
+}
+
+/**
+ * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
+ * @targetport: Transport targetport, that LS was issued from.
+ * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
+ * Driver sets this value to the ndlp pointer.
+ * @pnvme_lsreq - the transport nvme_ls_req structure for LS to be aborted
+ *
+ * Driver registers this routine to abort an NVME LS request that is
+ * in progress (from the transports perspective).
+ **/
+static void
+lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
+ void *hosthandle,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ int ret;
+
+ phba = lpfc_nvmet->phba;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
+ ndlp = (struct lpfc_nodelist *)hosthandle;
+
+ ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
+ if (!ret)
+ atomic_inc(&lpfc_nvmet->xmt_ls_abort);
+}
+
+static void
+lpfc_nvmet_host_release(void *hosthandle)
+{
+ struct lpfc_nodelist *ndlp = hosthandle;
+ struct lpfc_hba *phba = NULL;
+ struct lpfc_nvmet_tgtport *tgtp;
+
+ phba = ndlp->phba;
+ if (!phba->targetport || !phba->targetport->private)
+ return;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6202 NVMET XPT releasing hosthandle x%px\n",
+ hosthandle);
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_set(&tgtp->state, 0);
+}
+
static void
lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
{
@@ -1214,6 +1408,9 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
.defer_rcv = lpfc_nvmet_defer_rcv,
.discovery_event = lpfc_nvmet_discovery_event,
+ .ls_req = lpfc_nvmet_ls_req,
+ .ls_abort = lpfc_nvmet_ls_abort,
+ .host_release = lpfc_nvmet_host_release,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1224,6 +1421,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
+ .lsrqst_priv_sz = 0,
};
static void
@@ -1368,7 +1566,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
return -ENOMEM;
}
ctx_buf->context->ctxbuf = ctx_buf;
- ctx_buf->context->state = LPFC_NVMET_STE_FREE;
+ ctx_buf->context->state = LPFC_NVME_STE_FREE;
ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
if (!ctx_buf->iocbq) {
@@ -1568,7 +1766,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
- struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *req = NULL;
struct lpfc_nodelist *ndlp;
@@ -1599,12 +1797,12 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
- if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
- !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
+ if (ctxp->flag & LPFC_NVME_CTX_RLS &&
+ !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
list_del_init(&ctxp->list);
released = true;
}
- ctxp->flag &= ~LPFC_NVMET_XBUSY;
+ ctxp->flag &= ~LPFC_NVME_XBUSY;
spin_unlock(&ctxp->ctxlock);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
@@ -1646,15 +1844,15 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
rxid);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->flag |= LPFC_NVMET_ABTS_RCV;
- ctxp->state = LPFC_NVMET_STE_ABORT;
+ ctxp->flag |= LPFC_NVME_ABTS_RCV;
+ ctxp->state = LPFC_NVME_STE_ABORT;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
lpfc_nvmeio_data(phba,
"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
xri, raw_smp_processor_id(), 0);
- req = &ctxp->ctx.fcp_req;
+ req = &ctxp->hdlrctx.fcp_req;
if (req)
nvmet_fc_rcv_fcp_abort(phba->targetport, req);
}
@@ -1667,7 +1865,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
{
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_hba *phba = vport->phba;
- struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
struct nvmefc_tgt_fcp_req *rsp;
uint32_t sid;
uint16_t oxid, xri;
@@ -1690,7 +1888,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
spin_unlock_irqrestore(&phba->hbalock, iflag);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->flag |= LPFC_NVMET_ABTS_RCV;
+ ctxp->flag |= LPFC_NVME_ABTS_RCV;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
lpfc_nvmeio_data(phba,
@@ -1700,7 +1898,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
- rsp = &ctxp->ctx.fcp_req;
+ rsp = &ctxp->hdlrctx.fcp_req;
nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
/* Respond with BA_ACC accordingly */
@@ -1759,7 +1957,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
xri = ctxp->ctxbuf->sglq->sli4_xritag;
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
+ ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
lpfc_nvmeio_data(phba,
@@ -1771,10 +1969,10 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
"flag x%x state x%x\n",
ctxp->oxid, xri, ctxp->flag, ctxp->state);
- if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
+ if (ctxp->flag & LPFC_NVME_TNOTIFY) {
/* Notify the transport */
nvmet_fc_rcv_fcp_abort(phba->targetport,
- &ctxp->ctx.fcp_req);
+ &ctxp->hdlrctx.fcp_req);
} else {
cancel_work_sync(&ctxp->ctxbuf->defer_work);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
@@ -1802,7 +2000,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
static void
lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
- struct lpfc_nvmet_rcv_ctx *ctxp)
+ struct lpfc_async_xchg_ctx *ctxp)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *nvmewqeq;
@@ -1853,7 +2051,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *nvmewqeq;
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
unsigned long iflags;
int rc;
@@ -1867,7 +2065,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
+ ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
if (rc == -EBUSY) {
@@ -1879,7 +2077,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
if (rc == WQE_SUCCESS) {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
- if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
+ if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
ctxp->ts_status_wqput = ktime_get_ns();
else
ctxp->ts_data_wqput = ktime_get_ns();
@@ -1926,114 +2124,61 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
}
/**
- * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
+ * lpfc_nvmet_handle_lsreq - Process an NVME LS request
* @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
- * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ * @axchg: pointer to exchange context for the NVME LS request
*
- * This routine is used for processing the WQE associated with a unsolicited
- * event. It first determines whether there is an existing ndlp that matches
- * the DID from the unsolicited WQE. If not, it will create a new one with
- * the DID from the unsolicited WQE. The ELS command from the unsolicited
- * WQE is then used to invoke the proper routine and to set up proper state
- * of the discovery state machine.
- **/
-static void
-lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct hbq_dmabuf *nvmebuf)
+ * This routine is used for processing an asychronously received NVME LS
+ * request. Any remaining validation is done and the LS is then forwarded
+ * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
+ *
+ * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
+ * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
+ * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
+ *
+ * Returns 0 if LS was handled and delivered to the transport
+ * Returns 1 if LS failed to be handled and should be dropped
+ */
+int
+lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *axchg)
{
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
- struct lpfc_nvmet_tgtport *tgtp;
- struct fc_frame_header *fc_hdr;
- struct lpfc_nvmet_rcv_ctx *ctxp;
- uint32_t *payload;
- uint32_t size, oxid, sid, rc;
-
-
- if (!nvmebuf || !phba->targetport) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO\n");
- oxid = 0;
- size = 0;
- sid = 0;
- ctxp = NULL;
- goto dropit;
- }
-
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
-
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- payload = (uint32_t *)(nvmebuf->dbuf.virt);
- size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
- sid = sli4_sid_from_fc_hdr(fc_hdr);
+ struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
+ uint32_t *payload = axchg->payload;
+ int rc;
- ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
- if (ctxp == NULL) {
- atomic_inc(&tgtp->rcv_ls_req_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6155 LS Drop IO x%x: Alloc\n",
- oxid);
-dropit:
- lpfc_nvmeio_data(phba, "NVMET LS DROP: "
- "xri x%x sz %d from %06x\n",
- oxid, size, sid);
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- return;
- }
- ctxp->phba = phba;
- ctxp->size = size;
- ctxp->oxid = oxid;
- ctxp->sid = sid;
- ctxp->wqeq = NULL;
- ctxp->state = LPFC_NVMET_STE_LS_RCV;
- ctxp->entry_cnt = 1;
- ctxp->rqb_buffer = (void *)nvmebuf;
- ctxp->hdwq = &phba->sli4_hba.hdwq[0];
+ atomic_inc(&tgtp->rcv_ls_req_in);
- lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
- oxid, size, sid);
/*
- * The calling sequence should be:
- * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
- * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
+ * Driver passes the ndlp as the hosthandle argument allowing
+ * the transport to generate LS requests for any associateions
+ * that are created.
*/
- atomic_inc(&tgtp->rcv_ls_req_in);
- rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
- payload, size);
+ rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
+ axchg->payload, axchg->size);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
- "%08x %08x %08x\n", size, rc,
+ "%08x %08x %08x\n", axchg->size, rc,
*payload, *(payload+1), *(payload+2),
*(payload+3), *(payload+4), *(payload+5));
- if (rc == 0) {
+ if (!rc) {
atomic_inc(&tgtp->rcv_ls_req_out);
- return;
+ return 0;
}
- lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
- oxid, size, sid);
-
atomic_inc(&tgtp->rcv_ls_req_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
- ctxp->oxid, rc);
-
- /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
-
- atomic_inc(&tgtp->xmt_ls_abort);
- lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
#endif
+ return 1;
}
static void
lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
{
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
- struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+ struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
struct lpfc_hba *phba = ctxp->phba;
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
struct lpfc_nvmet_tgtport *tgtp;
@@ -2054,7 +2199,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
return;
}
- if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
+ if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6324 IO oxid x%x aborted\n",
ctxp->oxid);
@@ -2063,7 +2208,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
payload = (uint32_t *)(nvmebuf->dbuf.virt);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- ctxp->flag |= LPFC_NVMET_TNOTIFY;
+ ctxp->flag |= LPFC_NVME_TNOTIFY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_isr_cmd)
ctxp->ts_cmd_nvme = ktime_get_ns();
@@ -2077,13 +2222,13 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
* A buffer has already been reposted for this IO, so just free
* the nvmebuf.
*/
- rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+ rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
payload, ctxp->size);
/* Process FCP command */
if (rc == 0) {
atomic_inc(&tgtp->rcv_fcp_cmd_out);
spin_lock_irqsave(&ctxp->ctxlock, iflags);
- if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
+ if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
(nvmebuf != ctxp->rqb_buffer)) {
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
return;
@@ -2102,7 +2247,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
atomic_inc(&tgtp->rcv_fcp_cmd_out);
atomic_inc(&tgtp->defer_fod);
spin_lock_irqsave(&ctxp->ctxlock, iflags);
- if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
+ if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
return;
}
@@ -2117,7 +2262,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
return;
}
- ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
+ ctxp->flag &= ~LPFC_NVME_TNOTIFY;
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -2224,7 +2369,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
uint64_t isr_timestamp,
uint8_t cqflag)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_ctxbuf *ctx_buf;
@@ -2306,11 +2451,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
sid = sli4_sid_from_fc_hdr(fc_hdr);
- ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+ ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
- if (ctxp->state != LPFC_NVMET_STE_FREE) {
+ if (ctxp->state != LPFC_NVME_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6414 NVMET Context corrupt %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
@@ -2322,7 +2467,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->oxid = oxid;
ctxp->sid = sid;
ctxp->idx = idx;
- ctxp->state = LPFC_NVMET_STE_RCV;
+ ctxp->state = LPFC_NVME_STE_RCV;
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
@@ -2369,40 +2514,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
}
/**
- * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
- * @phba: pointer to lpfc hba data structure.
- * @pring: pointer to a SLI ring.
- * @nvmebuf: pointer to received nvme data structure.
- *
- * This routine is used to process an unsolicited event received from a SLI
- * (Service Level Interface) ring. The actual processing of the data buffer
- * associated with the unsolicited event is done by invoking the routine
- * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
- * SLI RQ on which the unsolicited event was received.
- **/
-void
-lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *piocb)
-{
- struct lpfc_dmabuf *d_buf;
- struct hbq_dmabuf *nvmebuf;
-
- d_buf = piocb->context2;
- nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
-
- if (!nvmebuf) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "3015 LS Drop IO\n");
- return;
- }
- if (phba->nvmet_support == 0) {
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- return;
- }
- lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
-}
-
-/**
* lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
* @phba: pointer to lpfc hba data structure.
* @idx: relative index of MRQ vector
@@ -2462,7 +2573,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
**/
static struct lpfc_iocbq *
lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_async_xchg_ctx *ctxp,
dma_addr_t rspbuf, uint16_t rspsize)
{
struct lpfc_nodelist *ndlp;
@@ -2584,9 +2695,9 @@ nvme_wqe_free_wqeq_exit:
static struct lpfc_iocbq *
lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp)
+ struct lpfc_async_xchg_ctx *ctxp)
{
- struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
+ struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
struct lpfc_nvmet_tgtport *tgtp;
struct sli4_sge *sgl;
struct lpfc_nodelist *ndlp;
@@ -2647,9 +2758,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
}
/* Sanity check */
- if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
+ if (((ctxp->state == LPFC_NVME_STE_RCV) &&
(ctxp->entry_cnt == 1)) ||
- (ctxp->state == LPFC_NVMET_STE_DATA)) {
+ (ctxp->state == LPFC_NVME_STE_DATA)) {
wqe = &nvmewqe->wqe;
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -2912,7 +3023,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
sgl++;
ctxp->offset += cnt;
}
- ctxp->state = LPFC_NVMET_STE_DATA;
+ ctxp->state = LPFC_NVME_STE_DATA;
ctxp->entry_cnt++;
return nvmewqe;
}
@@ -2931,7 +3042,7 @@ static void
lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
unsigned long flags;
@@ -2941,23 +3052,23 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+ if (ctxp->flag & LPFC_NVME_ABORT_OP)
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->state = LPFC_NVMET_STE_DONE;
+ ctxp->state = LPFC_NVME_STE_DONE;
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
- if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
- !(ctxp->flag & LPFC_NVMET_XBUSY)) {
+ if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
+ !(ctxp->flag & LPFC_NVME_XBUSY)) {
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctxp->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp);
@@ -2981,7 +3092,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
lpfc_sli_release_iocbq(phba, cmdwqe);
/* Since iaab/iaar are NOT set, there is no work left.
- * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
+ * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
* should have been called already.
*/
}
@@ -3000,7 +3111,7 @@ static void
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
unsigned long flags;
uint32_t result;
@@ -3020,11 +3131,11 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
spin_lock_irqsave(&ctxp->ctxlock, flags);
- if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+ if (ctxp->flag & LPFC_NVME_ABORT_OP)
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
/* Sanity check */
- if (ctxp->state != LPFC_NVMET_STE_ABORT) {
+ if (ctxp->state != LPFC_NVME_STE_ABORT) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6112 ABTS Wrong state:%d oxid x%x\n",
ctxp->state, ctxp->oxid);
@@ -3033,15 +3144,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
- ctxp->state = LPFC_NVMET_STE_DONE;
- if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
- !(ctxp->flag & LPFC_NVMET_XBUSY)) {
+ ctxp->state = LPFC_NVME_STE_DONE;
+ if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
+ !(ctxp->flag & LPFC_NVME_XBUSY)) {
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctxp->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp);
@@ -3062,7 +3173,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
/* Since iaab/iaar are NOT set, there is no work left.
- * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
+ * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
* should have been called already.
*/
}
@@ -3081,15 +3192,17 @@ static void
lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
ctxp = cmdwqe->context2;
result = wcqe->parameter;
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- atomic_inc(&tgtp->xmt_ls_abort_cmpl);
+ if (phba->nvmet_support) {
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_ls_abort_cmpl);
+ }
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
@@ -3107,7 +3220,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
return;
}
- if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
+ if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6416 NVMET LS abort cmpl state mismatch: "
"oxid x%x: %d %d\n",
@@ -3122,10 +3235,10 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
static int
lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_async_xchg_ctx *ctxp,
uint32_t sid, uint16_t xri)
{
- struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_nvmet_tgtport *tgtp = NULL;
struct lpfc_iocbq *abts_wqeq;
union lpfc_wqe128 *wqe_abts;
struct lpfc_nodelist *ndlp;
@@ -3134,13 +3247,15 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
"6067 ABTS: sid %x xri x%x/x%x\n",
sid, xri, ctxp->wqeq->sli4_xritag);
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (phba->nvmet_support && phba->targetport)
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
ndlp = lpfc_findnode_did(phba->pport, sid);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
- atomic_inc(&tgtp->xmt_abort_rsp_error);
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6134 Drop ABTS - wrong NDLP state x%x.\n",
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
@@ -3217,7 +3332,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_async_xchg_ctx *ctxp,
uint32_t sid, uint16_t xri)
{
struct lpfc_nvmet_tgtport *tgtp;
@@ -3244,7 +3359,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* No failure to an ABTS request. */
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
@@ -3258,13 +3373,13 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
"6161 ABORT failed: No wqeqs: "
"xri: x%x\n", ctxp->oxid);
/* No failure to an ABTS request. */
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
abts_wqeq = ctxp->abort_wqeq;
- ctxp->state = LPFC_NVMET_STE_ABORT;
- opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
+ ctxp->state = LPFC_NVME_STE_ABORT;
+ opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3287,7 +3402,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
phba->hba_flag, ctxp->oxid);
lpfc_sli_release_iocbq(phba, abts_wqeq);
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
@@ -3302,7 +3417,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->oxid);
lpfc_sli_release_iocbq(phba, abts_wqeq);
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
@@ -3331,7 +3446,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_abort_rsp_error);
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_sli_release_iocbq(phba, abts_wqeq);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@@ -3343,7 +3458,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
static int
lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_async_xchg_ctx *ctxp,
uint32_t sid, uint16_t xri)
{
struct lpfc_nvmet_tgtport *tgtp;
@@ -3358,14 +3473,14 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->wqeq->hba_wqidx = 0;
}
- if (ctxp->state == LPFC_NVMET_STE_FREE) {
+ if (ctxp->state == LPFC_NVME_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
rc = WQE_BUSY;
goto aerr;
}
- ctxp->state = LPFC_NVMET_STE_ABORT;
+ ctxp->state = LPFC_NVME_STE_ABORT;
ctxp->entry_cnt++;
rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
if (rc == 0)
@@ -3387,13 +3502,13 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
aerr:
spin_lock_irqsave(&ctxp->ctxlock, flags);
- if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
+ if (ctxp->flag & LPFC_NVME_CTX_RLS) {
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctxp->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
- ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
+ ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
@@ -3406,29 +3521,39 @@ aerr:
return 1;
}
-static int
-lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
- struct lpfc_nvmet_rcv_ctx *ctxp,
+/**
+ * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
+ * via async frame receive where the frame is not handled.
+ * @phba: pointer to adapter structure
+ * @ctxp: pointer to the asynchronously received received sequence
+ * @sid: address of the remote port to send the ABTS to
+ * @xri: oxid value to for the ABTS (other side's exchange id).
+ **/
+int
+lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
+ struct lpfc_async_xchg_ctx *ctxp,
uint32_t sid, uint16_t xri)
{
- struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_nvmet_tgtport *tgtp = NULL;
struct lpfc_iocbq *abts_wqeq;
unsigned long flags;
int rc;
- if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
- (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
- ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+ if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
+ (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
+ ctxp->state = LPFC_NVME_STE_LS_ABORT;
ctxp->entry_cnt++;
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6418 NVMET LS abort state mismatch "
"IO x%x: %d %d\n",
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
- ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+ ctxp->state = LPFC_NVME_STE_LS_ABORT;
}
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (phba->nvmet_support && phba->targetport)
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+
if (!ctxp->wqeq) {
/* Issue ABTS for this WQE based on iotag */
ctxp->wqeq = lpfc_sli_get_iocbq(phba);
@@ -3455,16 +3580,44 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
- atomic_inc(&tgtp->xmt_abort_unsol);
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_abort_unsol);
return 0;
}
out:
- atomic_inc(&tgtp->xmt_abort_rsp_error);
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
abts_wqeq->context2 = NULL;
abts_wqeq->context3 = NULL;
lpfc_sli_release_iocbq(phba, abts_wqeq);
- kfree(ctxp);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6056 Failed to Issue ABTS. Status x%x\n", rc);
- return 0;
+ return 1;
+}
+
+/**
+ * lpfc_nvmet_invalidate_host
+ *
+ * @phba - pointer to the driver instance bound to an adapter port.
+ * @ndlp - pointer to an lpfc_nodelist type
+ *
+ * This routine upcalls the nvmet transport to invalidate an NVME
+ * host to which this target instance had active connections.
+ */
+void
+lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS,
+ "6203 Invalidating hosthandle x%px\n",
+ ndlp);
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
+
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+ /* Need to get the nvmet_fc_target_port pointer here.*/
+ nvmet_fc_invalidate_host(phba->targetport, ndlp);
+#endif
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
deleted file mode 100644
index b80b1639b9a7..000000000000
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*******************************************************************
- * This file is part of the Emulex Linux Device Driver for *
- * Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
- * Copyright (C) 2004-2016 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.broadcom.com *
- * Portions Copyright (C) 2004-2005 Christoph Hellwig *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
- ********************************************************************/
-
-#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
-#define LPFC_NVMET_RQE_MIN_POST 128
-#define LPFC_NVMET_RQE_DEF_POST 512
-#define LPFC_NVMET_RQE_DEF_COUNT 2048
-#define LPFC_NVMET_SUCCESS_LEN 12
-
-#define LPFC_NVMET_MRQ_AUTO 0
-#define LPFC_NVMET_MRQ_MAX 16
-
-#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
-
-/* Used for NVME Target */
-struct lpfc_nvmet_tgtport {
- struct lpfc_hba *phba;
- struct completion *tport_unreg_cmp;
-
- /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
- atomic_t rcv_ls_req_in;
- atomic_t rcv_ls_req_out;
- atomic_t rcv_ls_req_drop;
- atomic_t xmt_ls_abort;
- atomic_t xmt_ls_abort_cmpl;
-
- /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
- atomic_t xmt_ls_rsp;
- atomic_t xmt_ls_drop;
-
- /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
- atomic_t xmt_ls_rsp_error;
- atomic_t xmt_ls_rsp_aborted;
- atomic_t xmt_ls_rsp_xb_set;
- atomic_t xmt_ls_rsp_cmpl;
-
- /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
- atomic_t rcv_fcp_cmd_in;
- atomic_t rcv_fcp_cmd_out;
- atomic_t rcv_fcp_cmd_drop;
- atomic_t rcv_fcp_cmd_defer;
- atomic_t xmt_fcp_release;
-
- /* Stats counters - lpfc_nvmet_xmt_fcp_op */
- atomic_t xmt_fcp_drop;
- atomic_t xmt_fcp_read_rsp;
- atomic_t xmt_fcp_read;
- atomic_t xmt_fcp_write;
- atomic_t xmt_fcp_rsp;
-
- /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
- atomic_t xmt_fcp_rsp_xb_set;
- atomic_t xmt_fcp_rsp_cmpl;
- atomic_t xmt_fcp_rsp_error;
- atomic_t xmt_fcp_rsp_aborted;
- atomic_t xmt_fcp_rsp_drop;
-
- /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
- atomic_t xmt_fcp_xri_abort_cqe;
- atomic_t xmt_fcp_abort;
- atomic_t xmt_fcp_abort_cmpl;
- atomic_t xmt_abort_sol;
- atomic_t xmt_abort_unsol;
- atomic_t xmt_abort_rsp;
- atomic_t xmt_abort_rsp_error;
-
- /* Stats counters - defer IO */
- atomic_t defer_ctx;
- atomic_t defer_fod;
- atomic_t defer_wqfull;
-};
-
-struct lpfc_nvmet_ctx_info {
- struct list_head nvmet_ctx_list;
- spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
- struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
- struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
- uint16_t nvmet_ctx_list_cnt;
- char pad[16]; /* pad to a cache-line */
-};
-
-/* This retrieves the context info associated with the specified cpu / mrq */
-#define lpfc_get_ctx_list(phba, cpu, mrq) \
- (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
-
-struct lpfc_nvmet_rcv_ctx {
- union {
- struct nvmefc_tgt_ls_req ls_req;
- struct nvmefc_tgt_fcp_req fcp_req;
- } ctx;
- struct list_head list;
- struct lpfc_hba *phba;
- struct lpfc_iocbq *wqeq;
- struct lpfc_iocbq *abort_wqeq;
- spinlock_t ctxlock; /* protect flag access */
- uint32_t sid;
- uint32_t offset;
- uint16_t oxid;
- uint16_t size;
- uint16_t entry_cnt;
- uint16_t cpu;
- uint16_t idx;
- uint16_t state;
- /* States */
-#define LPFC_NVMET_STE_LS_RCV 1
-#define LPFC_NVMET_STE_LS_ABORT 2
-#define LPFC_NVMET_STE_LS_RSP 3
-#define LPFC_NVMET_STE_RCV 4
-#define LPFC_NVMET_STE_DATA 5
-#define LPFC_NVMET_STE_ABORT 6
-#define LPFC_NVMET_STE_DONE 7
-#define LPFC_NVMET_STE_FREE 0xff
- uint16_t flag;
-#define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */
-#define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */
-#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
-#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
-#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
-#define LPFC_NVMET_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
-#define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
-#define LPFC_NVMET_TNOTIFY 0x80 /* notify transport of abts */
- struct rqb_dmabuf *rqb_buffer;
- struct lpfc_nvmet_ctxbuf *ctxbuf;
- struct lpfc_sli4_hdw_queue *hdwq;
-
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint64_t ts_isr_cmd;
- uint64_t ts_cmd_nvme;
- uint64_t ts_nvme_data;
- uint64_t ts_data_wqput;
- uint64_t ts_isr_data;
- uint64_t ts_data_nvme;
- uint64_t ts_nvme_status;
- uint64_t ts_status_wqput;
- uint64_t ts_isr_status;
- uint64_t ts_status_nvme;
-#endif
-};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index b6fb665e6ec4..25653baba367 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -39,8 +39,6 @@
#include <asm/set_memory.h>
#endif
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -50,7 +48,6 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
#include "lpfc_compat.h"
@@ -538,7 +535,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
if (count > eq->EQ_max_eqe)
eq->EQ_max_eqe = count;
- eq->queue_claimed = 0;
+ xchg(&eq->queue_claimed, 0);
rearm_and_exit:
/* Always clear the EQ. */
@@ -1248,8 +1245,8 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
- * This function is called with hbalock held to release driver
- * iocb object to the iocb pool. The iotag in the iocb object
+ * This function is called to release the driver iocb object
+ * to the iocb pool. The iotag in the iocb object
* does not change for each use of the iocb object. This function
* clears all other fields of the iocb object when it is freed.
* The sqlq structure that holds the xritag and phys and virtual
@@ -1259,7 +1256,8 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* this IO was aborted then the sglq entry it put on the
* lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
* IO has good status or fails for any other reason then the sglq
- * entry is added to the free list (lpfc_els_sgl_list).
+ * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
+ * asserted held in the code path calling this routine.
**/
static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
@@ -1269,8 +1267,6 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
unsigned long iflag = 0;
struct lpfc_sli_ring *pring;
- lockdep_assert_held(&phba->hbalock);
-
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
@@ -1333,18 +1329,17 @@ out:
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
- * This function is called with hbalock held to release driver
- * iocb object to the iocb pool. The iotag in the iocb object
- * does not change for each use of the iocb object. This function
- * clears all other fields of the iocb object when it is freed.
+ * This function is called to release the driver iocb object to the
+ * iocb pool. The iotag in the iocb object does not change for each
+ * use of the iocb object. This function clears all other fields of
+ * the iocb object when it is freed. The hbalock is asserted held in
+ * the code path calling this routine.
**/
static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
- lockdep_assert_held(&phba->hbalock);
-
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
@@ -1789,17 +1784,17 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* @nextiocb: Pointer to driver iocb object which need to be
* posted to firmware.
*
- * This function is called with hbalock held to post a new iocb to
- * the firmware. This function copies the new iocb to ring iocb slot and
- * updates the ring pointers. It adds the new iocb to txcmplq if there is
+ * This function is called to post a new iocb to the firmware. This
+ * function copies the new iocb to ring iocb slot and updates the
+ * ring pointers. It adds the new iocb to txcmplq if there is
* a completion call back for this iocb else the function will free the
- * iocb object.
+ * iocb object. The hbalock is asserted held in the code path calling
+ * this routine.
**/
static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
{
- lockdep_assert_held(&phba->hbalock);
/*
* Set up an iotag
*/
@@ -2796,6 +2791,123 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
}
/**
+ * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
+ * containing a NVME LS request.
+ * @phba: pointer to lpfc hba data structure.
+ * @piocb: pointer to the iocbq struct representing the sequence starting
+ * frame.
+ *
+ * This routine initially validates the NVME LS, validates there is a login
+ * with the port that sent the LS, and then calls the appropriate nvme host
+ * or target LS request handler.
+ **/
+static void
+lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *d_buf;
+ struct hbq_dmabuf *nvmebuf;
+ struct fc_frame_header *fc_hdr;
+ struct lpfc_async_xchg_ctx *axchg = NULL;
+ char *failwhy = NULL;
+ uint32_t oxid, sid, did, fctl, size;
+ int ret = 1;
+
+ d_buf = piocb->context2;
+
+ nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ fc_hdr = nvmebuf->hbuf.virt;
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
+ did = sli4_did_from_fc_hdr(fc_hdr);
+ fctl = (fc_hdr->fh_f_ctl[0] << 16 |
+ fc_hdr->fh_f_ctl[1] << 8 |
+ fc_hdr->fh_f_ctl[2]);
+ size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
+
+ lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ failwhy = "Driver Unloading";
+ } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
+ failwhy = "NVME FC4 Disabled";
+ } else if (!phba->nvmet_support && !phba->pport->localport) {
+ failwhy = "No Localport";
+ } else if (phba->nvmet_support && !phba->targetport) {
+ failwhy = "No Targetport";
+ } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
+ failwhy = "Bad NVME LS R_CTL";
+ } else if (unlikely((fctl & 0x00FF0000) !=
+ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
+ failwhy = "Bad NVME LS F_CTL";
+ } else {
+ axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
+ if (!axchg)
+ failwhy = "No CTX memory";
+ }
+
+ if (unlikely(failwhy)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
+ "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
+ sid, oxid, failwhy);
+ goto out_fail;
+ }
+
+ /* validate the source of the LS is logged in */
+ ndlp = lpfc_findnode_did(phba->pport, sid);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ "6216 NVME Unsol rcv: No ndlp: "
+ "NPort_ID x%x oxid x%x\n",
+ sid, oxid);
+ goto out_fail;
+ }
+
+ axchg->phba = phba;
+ axchg->ndlp = ndlp;
+ axchg->size = size;
+ axchg->oxid = oxid;
+ axchg->sid = sid;
+ axchg->wqeq = NULL;
+ axchg->state = LPFC_NVME_STE_LS_RCV;
+ axchg->entry_cnt = 1;
+ axchg->rqb_buffer = (void *)nvmebuf;
+ axchg->hdwq = &phba->sli4_hba.hdwq[0];
+ axchg->payload = nvmebuf->dbuf.virt;
+ INIT_LIST_HEAD(&axchg->list);
+
+ if (phba->nvmet_support)
+ ret = lpfc_nvmet_handle_lsreq(phba, axchg);
+ else
+ ret = lpfc_nvme_handle_lsreq(phba, axchg);
+
+ /* if zero, LS was successfully handled. If non-zero, LS not handled */
+ if (!ret)
+ return;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
+ "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
+ "NVMe%s handler failed %d\n",
+ did, sid, oxid,
+ (phba->nvmet_support) ? "T" : "I", ret);
+
+out_fail:
+
+ /* recycle receive buffer */
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+
+ /* If start of new exchange, abort it */
+ if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
+ ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
+
+ if (ret)
+ kfree(axchg);
+}
+
+/**
* lpfc_complete_unsol_iocb - Complete an unsolicited sequence
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
@@ -2816,7 +2928,7 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
switch (fch_type) {
case FC_TYPE_NVME:
- lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
+ lpfc_nvme_unsol_ls_handler(phba, saveq);
return 1;
default:
break;
@@ -11170,6 +11282,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* request, this function issues abort out unconditionally. This function is
* called with hbalock held. The function returns 0 when it fails due to
* memory allocation failure or when the command iocb is an abort request.
+ * The hbalock is asserted held in the code path calling this routine.
**/
static int
lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
@@ -11183,8 +11296,6 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
unsigned long iflags;
struct lpfc_nodelist *ndlp;
- lockdep_assert_held(&phba->hbalock);
-
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
@@ -13694,7 +13805,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
"0369 No entry from completion queue "
"qid=%d\n", cq->queue_id);
- cq->queue_claimed = 0;
+ xchg(&cq->queue_claimed, 0);
rearm_and_exit:
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
@@ -13981,8 +14092,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Just some basic sanity checks on FCP Command frame */
fctl = (fc_hdr->fh_f_ctl[0] << 16 |
- fc_hdr->fh_f_ctl[1] << 8 |
- fc_hdr->fh_f_ctl[2]);
+ fc_hdr->fh_f_ctl[1] << 8 |
+ fc_hdr->fh_f_ctl[2]);
if (((fctl &
(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
@@ -14275,7 +14386,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
int ecount = 0;
int hba_eqidx;
struct lpfc_eq_intr_info *eqi;
- uint32_t icnt;
/* Get the driver's phba structure from the dev_id */
hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
@@ -14303,11 +14413,12 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- eqi = phba->sli4_hba.eq_info;
- icnt = this_cpu_inc_return(eqi->icnt);
+ eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
+ eqi->icnt++;
+
fpeq->last_cpu = raw_smp_processor_id();
- if (icnt > LPFC_EQD_ISR_TRIGGER &&
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
@@ -19891,7 +20002,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe)
{
union lpfc_wqe128 *wqe = &pwqe->wqe;
- struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_queue *wq;
struct lpfc_sglq *sglq;
struct lpfc_sli_ring *pring;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 8da7429e385a..4decb53d81c3 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -920,7 +920,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu;
uint16_t num_present_cpu;
- struct cpumask numa_mask;
+ struct cpumask irq_aff_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ca40c47cfbe0..ab0bc26c098d 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.0"
+#define LPFC_DRIVER_VERSION "12.8.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 8443f2f35be2..8f918df631bf 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -302,8 +302,8 @@ static struct pci_driver megaraid_pci_driver = {
// definitions for the device attributes for exporting logical drive number
// for a scsi address (Host, Channel, Id, Lun)
-DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
- NULL);
+static DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
+ NULL);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_shost_attrs[] = {
@@ -312,7 +312,7 @@ static struct device_attribute *megaraid_shost_attrs[] = {
};
-DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
+static DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_sdev_attrs[] = {
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 83d8c4cb1ad5..af2c7a2a9565 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.713.01.00-rc1"
-#define MEGASAS_RELDATE "Dec 27, 2019"
+#define MEGASAS_VERSION "07.714.04.00-rc1"
+#define MEGASAS_RELDATE "Apr 14, 2020"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -511,7 +511,7 @@ union MR_PROGRESS {
*/
struct MR_PD_PROGRESS {
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1;
u32 patrol:1;
u32 clear:1;
@@ -537,7 +537,7 @@ struct MR_PD_PROGRESS {
};
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1;
u32 patrol:1;
u32 clear:1;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index babe85d7b537..00668335c2af 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -81,7 +81,7 @@ int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
-int rdpq_enable = 1;
+static int rdpq_enable = 1;
module_param(rdpq_enable, int, 0444);
MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
@@ -89,7 +89,7 @@ unsigned int dual_qdepth_disable;
module_param(dual_qdepth_disable, int, 0444);
MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
-unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
+static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
@@ -1982,9 +1982,9 @@ static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
if (is_target_prop) {
tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
- if (tgt_device_qd &&
- (tgt_device_qd <= instance->host->can_queue))
- device_qd = tgt_device_qd;
+ if (tgt_device_qd)
+ device_qd = min(instance->host->can_queue,
+ (int)tgt_device_qd);
}
if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 89c3685f5163..3b3d04d7671f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -85,7 +85,7 @@ u32 mega_mod64(u64 dividend, u32 divisor)
*
* @return quotient
**/
-u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
+static u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
{
u32 remainder;
u64 d;
@@ -367,7 +367,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
return 1;
}
-u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
+static u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
@@ -417,7 +417,7 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
* div_error - Devide error code.
*/
-u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+static u32 mr_spanset_get_span_block(struct megasas_instance *instance,
u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
@@ -642,7 +642,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
}
/* This Function will return Phys arm */
-u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -785,7 +785,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
* span - Span number
* block - Absolute Block number in the physical disk
*/
-u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
+static u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u16 stripRef, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_DRV_RAID_MAP_ALL *map)
@@ -1342,7 +1342,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
}
}
-u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+static u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
struct LD_LOAD_BALANCE_INFO *lbInfo,
struct IO_REQUEST_INFO *io_info,
struct MR_DRV_RAID_MAP_ALL *drv_map)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b2ad96564484..319f241da4b6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -176,7 +176,7 @@ static inline bool megasas_check_same_4gb_region
* megasas_enable_intr_fusion - Enables interrupts
* @regs: MFI register set
*/
-void
+static void
megasas_enable_intr_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
@@ -198,7 +198,7 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
* megasas_disable_intr_fusion - Disables interrupt
* @regs: MFI register set
*/
-void
+static void
megasas_disable_intr_fusion(struct megasas_instance *instance)
{
u32 mask = 0xFFFFFFFF;
@@ -2070,7 +2070,6 @@ static bool
megasas_is_prp_possible(struct megasas_instance *instance,
struct scsi_cmnd *scmd, int sge_count)
{
- int i;
u32 data_length = 0;
struct scatterlist *sg_scmd;
bool build_prp = false;
@@ -2099,63 +2098,6 @@ megasas_is_prp_possible(struct megasas_instance *instance,
build_prp = true;
}
-/*
- * Below code detects gaps/holes in IO data buffers.
- * What does holes/gaps mean?
- * Any SGE except first one in a SGL starts at non NVME page size
- * aligned address OR Any SGE except last one in a SGL ends at
- * non NVME page size boundary.
- *
- * Driver has already informed block layer by setting boundary rules for
- * bio merging done at NVME page size boundary calling kernel API
- * blk_queue_virt_boundary inside slave_config.
- * Still there is possibility of IO coming with holes to driver because of
- * IO merging done by IO scheduler.
- *
- * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
- * IO scheduling so no IO merging.
- *
- * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
- * then sending IOs with holes.
- *
- * Though driver can request block layer to disable IO merging by calling-
- * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
- * user may tune sysfs parameter- nomerges again to 0 or 1.
- *
- * If in future IO scheduling is enabled with SCSI BLK MQ,
- * this algorithm to detect holes will be required in driver
- * for SCSI BLK MQ enabled case as well.
- *
- *
- */
- scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
- if ((i != 0) && (i != (sge_count - 1))) {
- if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
- mega_mod64(sg_dma_address(sg_scmd),
- mr_nvme_pg_size)) {
- build_prp = false;
- break;
- }
- }
-
- if ((sge_count > 1) && (i == 0)) {
- if ((mega_mod64((sg_dma_address(sg_scmd) +
- sg_dma_len(sg_scmd)),
- mr_nvme_pg_size))) {
- build_prp = false;
- break;
- }
- }
-
- if ((sge_count > 1) && (i == (sge_count - 1))) {
- if (mega_mod64(sg_dma_address(sg_scmd),
- mr_nvme_pg_size)) {
- build_prp = false;
- break;
- }
- }
- }
-
return build_prp;
}
@@ -4230,7 +4172,7 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
* megasas_refire_mgmt_cmd : Re-fire management commands
* @instance: Controller's soft instance
*/
-void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
+static void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
bool return_ioctl)
{
int j;
@@ -4238,8 +4180,9 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
u16 smid;
- bool refire_cmd = 0;
+ bool refire_cmd = false;
u8 result;
u32 opcode = 0;
@@ -4305,6 +4248,11 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
result = COMPLETE_CMD;
}
+ scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
+ if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
+ result = RETURN_CMD;
+
switch (result) {
case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
@@ -4533,7 +4481,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
if (!timeleft) {
dev_err(&instance->pdev->dev,
"task mgmt type 0x%x timed out\n", type);
- cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
mutex_unlock(&instance->reset_mutex);
rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
mutex_lock(&instance->reset_mutex);
@@ -4713,12 +4660,12 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
"attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
scmd, devhandle);
- mr_device_priv_data->tm_busy = 1;
+ mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, smid,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
mr_device_priv_data);
- mr_device_priv_data->tm_busy = 0;
+ mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
@@ -4783,12 +4730,12 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device,
"attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
scmd, devhandle);
- mr_device_priv_data->tm_busy = 1;
+ mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
mr_device_priv_data);
- mr_device_priv_data->tm_busy = 0;
+ mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
(ret == SUCCESS) ? "SUCCESS" : "FAILED");
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index d57ecc7f88d8..30de4b01f703 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -774,7 +774,7 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_CPU_AFFINITY_MASK {
union {
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u8 hw_path:1;
u8 cpu0:1;
u8 cpu1:1;
@@ -866,7 +866,7 @@ struct MR_LD_RAID {
__le16 seqNum;
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 ldSyncRequired:1;
u32 regTypeReqOnReadIsValid:1;
u32 isEPD:1;
@@ -889,7 +889,7 @@ struct {
/* 0x30 - 0x33, Logical block size for the LD */
u32 logical_block_length;
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
u32 ld_pi_exp:4;
/* 0x34, LOGICAL BLOCKS PER PHYSICAL
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index 84fb3fbdb0ca..e76d994dbed3 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -7,4 +7,5 @@ mpt3sas-y += mpt3sas_base.o \
mpt3sas_transport.o \
mpt3sas_ctl.o \
mpt3sas_trigger_diag.o \
- mpt3sas_warpdrive.o
+ mpt3sas_warpdrive.o \
+ mpt3sas_debugfs.o \
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 663782bb790d..beaea1933f5c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -413,7 +413,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2SGESimple32_t *sgel, *sgel_next;
u32 sgl_flags, sge_chain_count = 0;
- bool is_write = 0;
+ bool is_write = false;
u16 i = 0;
void __iomem *buffer_iomem;
phys_addr_t buffer_iomem_phys;
@@ -482,7 +482,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
if (le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
- is_write = 1;
+ is_write = true;
for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
@@ -2806,58 +2806,38 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
- u64 required_mask, coherent_mask;
struct sysinfo s;
- /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
- int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
-
- if (ioc->is_mcpu_endpoint)
- goto try_32bit;
+ int dma_mask;
- required_mask = dma_get_required_mask(&pdev->dev);
- if (sizeof(dma_addr_t) == 4 || required_mask == 32)
- goto try_32bit;
-
- if (ioc->dma_mask)
- coherent_mask = DMA_BIT_MASK(dma_mask);
+ if (ioc->is_mcpu_endpoint ||
+ sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
+ dma_get_required_mask(&pdev->dev) <= 32)
+ dma_mask = 32;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
+ dma_mask = 63;
else
- coherent_mask = DMA_BIT_MASK(32);
+ dma_mask = 64;
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
- dma_set_coherent_mask(&pdev->dev, coherent_mask))
- goto try_32bit;
-
- ioc->base_add_sg_single = &_base_add_sg_single_64;
- ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = dma_mask;
- goto out;
-
- try_32bit:
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
return -ENODEV;
- ioc->base_add_sg_single = &_base_add_sg_single_32;
- ioc->sge_size = sizeof(Mpi2SGESimple32_t);
- ioc->dma_mask = 32;
- out:
+ if (dma_mask > 32) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ } else {
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ }
+
si_meminfo(&s);
ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- ioc->dma_mask, convert_to_kb(s.totalram));
+ dma_mask, convert_to_kb(s.totalram));
return 0;
}
-static int
-_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
- struct pci_dev *pdev)
-{
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
- return -ENODEV;
- }
- return 0;
-}
-
/**
* _base_check_enable_msix - checks MSIX capabable.
* @ioc: per adapter object
@@ -4827,8 +4807,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
{
int i = 0;
int j = 0;
+ int dma_alloc_count = 0;
struct chain_tracker *ct;
- struct reply_post_struct *rps;
+ int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -4870,29 +4851,34 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->reply_post) {
- do {
- rps = &ioc->reply_post[i];
- if (rps->reply_post_free) {
- dma_pool_free(
- ioc->reply_post_free_dma_pool,
- rps->reply_post_free,
- rps->reply_post_free_dma);
- dexitprintk(ioc,
- ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
- rps->reply_post_free));
- rps->reply_post_free = NULL;
+ dma_alloc_count = DIV_ROUND_UP(count,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+ for (i = 0; i < count; i++) {
+ if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
+ && dma_alloc_count) {
+ if (ioc->reply_post[i].reply_post_free) {
+ dma_pool_free(
+ ioc->reply_post_free_dma_pool,
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post[i].reply_post_free_dma);
+ dexitprintk(ioc, ioc_info(ioc,
+ "reply_post_free_pool(0x%p): free\n",
+ ioc->reply_post[i].reply_post_free));
+ ioc->reply_post[i].reply_post_free =
+ NULL;
+ }
+ --dma_alloc_count;
}
- } while (ioc->rdpq_array_enable &&
- (++i < ioc->reply_queue_count));
+ }
+ dma_pool_destroy(ioc->reply_post_free_dma_pool);
if (ioc->reply_post_free_array &&
ioc->rdpq_array_enable) {
dma_pool_free(ioc->reply_post_free_array_dma_pool,
- ioc->reply_post_free_array,
- ioc->reply_post_free_array_dma);
+ ioc->reply_post_free_array,
+ ioc->reply_post_free_array_dma);
ioc->reply_post_free_array = NULL;
}
dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
- dma_pool_destroy(ioc->reply_post_free_dma_pool);
kfree(ioc->reply_post);
}
@@ -4902,8 +4888,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->pcie_sg_lookup[i].pcie_sgl,
ioc->pcie_sg_lookup[i].pcie_sgl_dma);
}
- if (ioc->pcie_sgl_dma_pool)
- dma_pool_destroy(ioc->pcie_sgl_dma_pool);
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
if (ioc->config_page) {
@@ -4915,7 +4900,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
kfree(ioc->hpr_lookup);
+ ioc->hpr_lookup = NULL;
kfree(ioc->internal_lookup);
+ ioc->internal_lookup = NULL;
if (ioc->chain_lookup) {
for (i = 0; i < ioc->scsiio_depth; i++) {
for (j = ioc->chains_per_prp_buffer;
@@ -4935,7 +4922,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * is_MSB_are_same - checks whether all reply queues in a set are
+ * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address.
* @reply_pool_start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
@@ -4945,7 +4932,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
*/
static int
-is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
+mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
{
long reply_pool_end_address;
@@ -4959,6 +4946,88 @@ is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
}
/**
+ * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
+ * for reply queues.
+ * @ioc: per adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
+{
+ int i = 0;
+ u32 dma_alloc_count = 0;
+ int reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
+
+ ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
+ GFP_KERNEL);
+ if (!ioc->reply_post)
+ return -ENOMEM;
+ /*
+ * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
+ * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
+ * be within 4GB boundary i.e reply queues in a set must have same
+ * upper 32-bits in their memory address. so here driver is allocating
+ * the DMA'able memory for reply queues according.
+ * Driver uses limitation of
+ * VENTURA_SERIES to manage INVADER_SERIES as well.
+ */
+ dma_alloc_count = DIV_ROUND_UP(count,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+ ioc->reply_post_free_dma_pool =
+ dma_pool_create("reply_post_free pool",
+ &ioc->pdev->dev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool)
+ return -ENOMEM;
+ for (i = 0; i < count; i++) {
+ if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
+ ioc->reply_post[i].reply_post_free =
+ dma_pool_alloc(ioc->reply_post_free_dma_pool,
+ GFP_KERNEL,
+ &ioc->reply_post[i].reply_post_free_dma);
+ if (!ioc->reply_post[i].reply_post_free)
+ return -ENOMEM;
+ /*
+ * Each set of RDPQ pool must satisfy 4gb boundary
+ * restriction.
+ * 1) Check if allocated resources for RDPQ pool are in
+ * the same 4GB range.
+ * 2) If #1 is true, continue with 64 bit DMA.
+ * 3) If #1 is false, return 1. which means free all the
+ * resources and set DMA mask to 32 and allocate.
+ */
+ if (!mpt3sas_check_same_4gb_region(
+ (long)ioc->reply_post[i].reply_post_free, sz)) {
+ dinitprintk(ioc,
+ ioc_err(ioc, "bad Replypost free pool(0x%p)"
+ "reply_post_free_dma = (0x%llx)\n",
+ ioc->reply_post[i].reply_post_free,
+ (unsigned long long)
+ ioc->reply_post[i].reply_post_free_dma));
+ return -EAGAIN;
+ }
+ memset(ioc->reply_post[i].reply_post_free, 0,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK *
+ reply_post_free_sz);
+ dma_alloc_count--;
+
+ } else {
+ ioc->reply_post[i].reply_post_free =
+ (Mpi2ReplyDescriptorsUnion_t *)
+ ((long)ioc->reply_post[i-1].reply_post_free
+ + reply_post_free_sz);
+ ioc->reply_post[i].reply_post_free_dma =
+ (dma_addr_t)
+ (ioc->reply_post[i-1].reply_post_free_dma +
+ reply_post_free_sz);
+ }
+ }
+ return 0;
+}
+
+/**
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
*
@@ -4972,10 +5041,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
u32 retry_sz;
+ u32 rdpq_sz = 0;
u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize;
u16 sge_size;
int i, j;
+ int ret = 0;
struct chain_tracker *ct;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5129,54 +5200,28 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth *
sizeof(Mpi2DefaultReplyDescriptor_t);
-
- sz = reply_post_free_sz;
+ rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
- sz *= ioc->reply_queue_count;
-
- ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
- (ioc->reply_queue_count):1,
- sizeof(struct reply_post_struct), GFP_KERNEL);
-
- if (!ioc->reply_post) {
- ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
- goto out;
- }
- ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
- &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->reply_post_free_dma_pool) {
- ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
- goto out;
- }
- i = 0;
- do {
- ioc->reply_post[i].reply_post_free =
- dma_pool_zalloc(ioc->reply_post_free_dma_pool,
- GFP_KERNEL,
- &ioc->reply_post[i].reply_post_free_dma);
- if (!ioc->reply_post[i].reply_post_free) {
- ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
- goto out;
- }
- dinitprintk(ioc,
- ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->reply_post[i].reply_post_free,
- ioc->reply_post_queue_depth,
- 8, sz / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
- (u64)ioc->reply_post[i].reply_post_free_dma));
- total_sz += sz;
- } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
-
- if (ioc->dma_mask > 32) {
- if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
- ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
- pci_name(ioc->pdev));
- goto out;
+ rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
+ ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
+ if (ret == -EAGAIN) {
+ /*
+ * Free allocated bad RDPQ memory pools.
+ * Change dma coherent mask to 32 bit and reallocate RDPQ
+ */
+ _base_release_memory_pools(ioc);
+ ioc->use_32bit_dma = true;
+ if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
+ ioc_err(ioc,
+ "32 DMA mask failed %s\n", pci_name(ioc->pdev));
+ return -ENODEV;
}
- }
-
+ if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
+ return -ENOMEM;
+ } else if (ret == -ENOMEM)
+ return -ENOMEM;
+ total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
+ DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
ioc->scsiio_depth = ioc->hba_queue_depth -
ioc->hi_priority_depth - ioc->internal_depth;
@@ -5188,7 +5233,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
ioc->shost->can_queue));
-
/* contiguous pool for request and chains, 16 byte align, one extra "
* "frame for smid=0
*/
@@ -5405,7 +5449,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* Actual requirement is not alignment, but we need start and end of
* DMA address must have same upper 32 bit address.
*/
- if (!is_MSB_are_same((long)ioc->sense, sz)) {
+ if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
//Release Sense pool & Reallocate
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
dma_pool_destroy(ioc->sense_dma_pool);
@@ -7158,7 +7202,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->smp_affinity_enable = smp_affinity_enable;
ioc->rdpq_array_enable_assigned = 0;
- ioc->dma_mask = 0;
+ ioc->use_32bit_dma = false;
if (ioc->is_aero_ioc)
ioc->base_readl = &_base_readl_aero;
else
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index e7197150721f..4fca3939c034 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "33.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 33
+#define MPT3SAS_DRIVER_VERSION "34.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 34
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -367,6 +367,7 @@ struct mpt3sas_nvme_cmd {
#define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8
#define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16
#define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128
+#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
/* OEM Specific Flags will come from OEM specific header files */
struct Mpi2ManufacturingPage10_t {
@@ -1026,7 +1027,6 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @ir_firmware: IR firmware present
* @bars: bitmask of BAR's that must be configured
* @mask_interrupts: ignore interrupt
- * @dma_mask: used to set the consistent dma mask
* @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and
* pci resource handling
* @fault_reset_work_q_name: fw fault work queue
@@ -1064,6 +1064,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @thresh_hold: Max number of reply descriptors processed
* before updating Host Index
* @drv_support_bitmap: driver's supported feature bit map
+ * @use_32bit_dma: Flag to use 32 bit consistent dma mask
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -1205,7 +1206,6 @@ struct MPT3SAS_ADAPTER {
u8 ir_firmware;
int bars;
u8 mask_interrupts;
- int dma_mask;
/* fw fault handler */
char fault_reset_work_q_name[20];
@@ -1254,6 +1254,7 @@ struct MPT3SAS_ADAPTER {
u8 high_iops_queues;
u32 drv_support_bitmap;
bool enable_sdev_max_qd;
+ bool use_32bit_dma;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
@@ -1471,6 +1472,8 @@ struct MPT3SAS_ADAPTER {
u16 device_remove_in_progress_sz;
u8 is_gen35_ioc;
u8 is_aero_ioc;
+ struct dentry *debugfs_root;
+ struct dentry *ioc_dump;
PUT_SMID_IO_FP_HIP put_smid_scsi_io;
PUT_SMID_IO_FP_HIP put_smid_fast_path;
PUT_SMID_IO_FP_HIP put_smid_hi_priority;
@@ -1478,6 +1481,11 @@ struct MPT3SAS_ADAPTER {
GET_MSIX_INDEX get_msix_index_for_smlio;
};
+struct mpt3sas_debugfs_buffer {
+ void *buf;
+ u32 len;
+};
+
#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1781,6 +1789,11 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_init_debugfs(void);
+void mpt3sas_exit_debugfs(void);
+
/**
* _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device
* @device_info: bitfield providing information about the device.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debugfs.c b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c
new file mode 100644
index 000000000000..a6ab1db81167
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Debugfs interface Support for MPT (Message Passing Technology) based
+ * controllers.
+ *
+ * Copyright (C) 2020 Broadcom Inc.
+ *
+ * Authors: Broadcom Inc.
+ * Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+ * Suganath Prabu <suganath-prabu.subramani@broadcom.com>
+ *
+ * Send feedback to : MPT-FusionLinux.pdl@broadcom.com)
+ *
+ **/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/uio.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "mpt3sas_base.h"
+#include <linux/debugfs.h>
+
+static struct dentry *mpt3sas_debugfs_root;
+
+/*
+ * _debugfs_iocdump_read - copy ioc dump from debugfs buffer
+ * @filep: File Pointer
+ * @ubuf: Buffer to fill data
+ * @cnt: Length of the buffer
+ * @ppos: Offset in the file
+ */
+
+static ssize_t
+_debugfs_iocdump_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+
+{
+ struct mpt3sas_debugfs_buffer *debug = filp->private_data;
+
+ if (!debug || !debug->buf)
+ return 0;
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len);
+}
+
+/*
+ * _debugfs_iocdump_open : open the ioc_dump debugfs attribute file
+ */
+static int
+_debugfs_iocdump_open(struct inode *inode, struct file *file)
+{
+ struct MPT3SAS_ADAPTER *ioc = inode->i_private;
+ struct mpt3sas_debugfs_buffer *debug;
+
+ debug = kzalloc(sizeof(struct mpt3sas_debugfs_buffer), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->buf = (void *)ioc;
+ debug->len = sizeof(struct MPT3SAS_ADAPTER);
+ file->private_data = debug;
+ return 0;
+}
+
+/*
+ * _debugfs_iocdump_release : release the ioc_dump debugfs attribute
+ * @inode: inode structure to the corresponds device
+ * @file: File pointer
+ */
+static int
+_debugfs_iocdump_release(struct inode *inode, struct file *file)
+{
+ struct mpt3sas_debugfs_buffer *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static const struct file_operations mpt3sas_debugfs_iocdump_fops = {
+ .owner = THIS_MODULE,
+ .open = _debugfs_iocdump_open,
+ .read = _debugfs_iocdump_read,
+ .release = _debugfs_iocdump_release,
+};
+
+/*
+ * mpt3sas_init_debugfs : Create debugfs root for mpt3sas driver
+ */
+void mpt3sas_init_debugfs(void)
+{
+ mpt3sas_debugfs_root = debugfs_create_dir("mpt3sas", NULL);
+ if (!mpt3sas_debugfs_root)
+ pr_info("mpt3sas: Cannot create debugfs root\n");
+}
+
+/*
+ * mpt3sas_exit_debugfs : Remove debugfs root for mpt3sas driver
+ */
+void mpt3sas_exit_debugfs(void)
+{
+ debugfs_remove_recursive(mpt3sas_debugfs_root);
+}
+
+/*
+ * mpt3sas_setup_debugfs : Setup debugfs per HBA adapter
+ * ioc: MPT3SAS_ADAPTER object
+ */
+void
+mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc)
+{
+ char name[64];
+
+ snprintf(name, sizeof(name), "scsi_host%d", ioc->shost->host_no);
+ if (!ioc->debugfs_root) {
+ ioc->debugfs_root =
+ debugfs_create_dir(name, mpt3sas_debugfs_root);
+ if (!ioc->debugfs_root) {
+ dev_err(&ioc->pdev->dev,
+ "Cannot create per adapter debugfs directory\n");
+ return;
+ }
+ }
+
+ snprintf(name, sizeof(name), "ioc_dump");
+ ioc->ioc_dump = debugfs_create_file(name, 0444,
+ ioc->debugfs_root, ioc, &mpt3sas_debugfs_iocdump_fops);
+ if (!ioc->ioc_dump) {
+ dev_err(&ioc->pdev->dev,
+ "Cannot create ioc_dump debugfs file\n");
+ debugfs_remove(ioc->debugfs_root);
+ return;
+ }
+
+ snprintf(name, sizeof(name), "host_recovery");
+ debugfs_create_u8(name, 0444, ioc->debugfs_root, &ioc->shost_recovery);
+
+}
+
+/*
+ * mpt3sas_destroy_debugfs : Destroy debugfs per HBA adapter
+ * @ioc: MPT3SAS_ADAPTER object
+ */
+void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc)
+{
+ debugfs_remove_recursive(ioc->debugfs_root);
+}
+
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 04a40afe60e3..08fc4b381056 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -9928,6 +9928,7 @@ static void scsih_remove(struct pci_dev *pdev)
&ioc->ioc_pg1_copy);
/* release all the volumes */
_scsih_ir_shutdown(ioc);
+ mpt3sas_destroy_debugfs(ioc);
sas_remove_host(shost);
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) {
@@ -10763,8 +10764,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
/* register EEDP capabilities with SCSI layer */
- if (prot_mask > 0)
- scsi_host_set_prot(shost, prot_mask);
+ if (prot_mask >= 0)
+ scsi_host_set_prot(shost, (prot_mask & 0x07));
else
scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
@@ -10814,6 +10815,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
scsi_scan_host(shost);
+ mpt3sas_setup_debugfs(ioc);
return 0;
out_add_shost_fail:
mpt3sas_base_detach(ioc);
@@ -11220,6 +11222,7 @@ scsih_init(void)
tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
_scsih_sas_control_complete);
+ mpt3sas_init_debugfs();
return 0;
}
@@ -11251,6 +11254,7 @@ scsih_exit(void)
if (hbas_to_enumerate != 2)
raid_class_release(mpt2sas_raid_template);
sas_release_transport(mpt3sas_transport_template);
+ mpt3sas_exit_debugfs();
}
/**
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7af9173c4925..5973eed94938 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -25,7 +25,7 @@ static const struct mvs_chip_info mvs_chips[] = {
[chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
};
-struct device_attribute *mvst_host_attrs[];
+static struct device_attribute *mvst_host_attrs[];
#define SOC_SAS_NUM 2
@@ -759,8 +759,6 @@ static DEVICE_ATTR(interrupt_coalescing,
mvs_show_interrupt_coalescing,
mvs_store_interrupt_coalescing);
-/* task handler */
-struct task_struct *mvs_th;
static int __init mvs_init(void)
{
int rc;
@@ -785,7 +783,7 @@ static void __exit mvs_exit(void)
sas_release_transport(mvs_stt);
}
-struct device_attribute *mvst_host_attrs[] = {
+static struct device_attribute *mvst_host_attrs[] = {
&dev_attr_driver_version,
&dev_attr_interrupt_coalescing,
NULL,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 7eb88fe1eb0b..aa9ae2ae8579 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4652,7 +4652,7 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
pinstance->cmd_list[i]->ioa_cb =
- dma_pool_alloc(
+ dma_pool_zalloc(
pinstance->control_pool,
GFP_KERNEL,
&(pinstance->cmd_list[i]->ioa_cb_bus_addr));
@@ -4661,8 +4661,6 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
pmcraid_release_control_blocks(pinstance, i);
return -ENOMEM;
}
- memset(pinstance->cmd_list[i]->ioa_cb, 0,
- sizeof(struct pmcraid_control_block));
}
return 0;
}
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index f3f399fe10c8..e163be8af965 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -355,6 +355,7 @@ struct qedf_ctx {
#define QEDF_GRCDUMP_CAPTURE 4
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
+#define QEDF_PROBING 8
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
@@ -387,7 +388,9 @@ struct qedf_ctx {
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
+ struct delayed_work recovery_work;
struct delayed_work grcdump_work;
+ struct delayed_work stag_work;
u32 slow_sge_ios;
u32 fast_sge_ios;
@@ -403,6 +406,7 @@ struct qedf_ctx {
u32 flogi_cnt;
u32 flogi_failed;
+ u32 flogi_pending;
/* Used for fc statistics */
struct mutex stats_mutex;
@@ -468,7 +472,7 @@ extern uint qedf_dump_frames;
extern uint qedf_io_tracing;
extern uint qedf_stop_io_on_error;
extern uint qedf_link_down_tmo;
-#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */
+#define QEDF_RETRY_DELAY_MAX 600 /* 60 seconds */
extern bool qedf_retry_delay;
extern uint qedf_debug;
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 87e169dcebdb..542ba9454257 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -388,14 +388,10 @@ void qedf_restart_rport(struct qedf_rport *fcport)
mutex_lock(&lport->disc.disc_mutex);
/* Recreate the rport and log back in */
rdata = fc_rport_create(lport, port_id);
- if (rdata) {
- mutex_unlock(&lport->disc.disc_mutex);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (rdata)
fc_rport_login(rdata);
- fcport->rdata = rdata;
- } else {
- mutex_unlock(&lport->disc.disc_mutex);
- fcport->rdata = NULL;
- }
+ fcport->rdata = rdata;
}
clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
}
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index e749a2dcaad7..0f6a15c1a04b 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1021,14 +1021,18 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
atomic_inc(&fcport->ios_to_queue);
if (fcport->retry_delay_timestamp) {
+ /* Take fcport->rport_lock for resetting the delay_timestamp */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
if (time_after(jiffies, fcport->retry_delay_timestamp)) {
fcport->retry_delay_timestamp = 0;
} else {
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
/* If retry_delay timer is active, flow off the ML */
rc = SCSI_MLQUEUE_TARGET_BUSY;
atomic_dec(&fcport->ios_to_queue);
goto exit_qcmd;
}
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
}
io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
@@ -1134,6 +1138,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
int refcount;
u16 scope, qualifier = 0;
u8 fw_residual_flag = 0;
+ unsigned long flags = 0;
+ u16 chk_scope = 0;
if (!io_req)
return;
@@ -1267,16 +1273,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
/* Lower 14 bits */
qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
- if (qedf_retry_delay &&
- scope > 0 && qualifier > 0 &&
- qualifier <= 0x3FEF) {
- /* Check we don't go over the max */
- if (qualifier > QEDF_RETRY_DELAY_MAX)
- qualifier =
- QEDF_RETRY_DELAY_MAX;
- fcport->retry_delay_timestamp =
- jiffies + (qualifier * HZ / 10);
- }
+ if (qedf_retry_delay)
+ chk_scope = 1;
/* Record stats */
if (io_req->cdb_status ==
SAM_STAT_TASK_SET_FULL)
@@ -1287,6 +1285,36 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
+
+ if (chk_scope == 1) {
+ if ((scope == 1 || scope == 2) &&
+ (qualifier > 0 && qualifier <= 0x3FEF)) {
+ /* Check we don't go over the max */
+ if (qualifier > QEDF_RETRY_DELAY_MAX) {
+ qualifier = QEDF_RETRY_DELAY_MAX;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "qualifier = %d\n",
+ (fcp_rsp->retry_delay_timer &
+ 0x3FFF));
+ }
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Scope = %d and qualifier = %d",
+ scope, qualifier);
+ /* Take fcport->rport_lock to
+ * update the retry_delay_timestamp
+ */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ fcport->retry_delay_timestamp =
+ jiffies + (qualifier * HZ / 10);
+ spin_unlock_irqrestore(&fcport->rport_lock,
+ flags);
+
+ } else {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
+ scope, qualifier);
+ }
+ }
break;
default:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 5b19f5175c5c..36b1ca2dadbb 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -28,6 +28,8 @@ const struct qed_fcoe_ops *qed_ops;
static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qedf_remove(struct pci_dev *pdev);
static void qedf_shutdown(struct pci_dev *pdev);
+static void qedf_schedule_recovery_handler(void *dev);
+static void qedf_recovery_handler(struct work_struct *work);
/*
* Driver module parameters.
@@ -282,6 +284,7 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
/* Set the source MAC we will use for FCoE traffic */
qedf_set_data_src_addr(qedf, fp);
+ qedf->flogi_pending = 0;
}
/* Complete flogi_compl so we can proceed to sending ADISCs */
@@ -307,6 +310,11 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
*/
if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++;
+ if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+ schedule_delayed_work(&qedf->stag_work, 2);
+ return NULL;
+ }
+ qedf->flogi_pending++;
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
arg, timeout);
}
@@ -503,6 +511,32 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
}
+static void qedf_bw_update(void *dev)
+{
+ struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+ struct qed_link_output link;
+
+ /* Get the latest status of the link */
+ qed_ops->common->get_link(qedf->cdev, &link);
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore link update, driver getting unload.\n");
+ return;
+ }
+
+ if (link.link_up) {
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+ qedf_update_link_speed(qedf, &link);
+ else
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore bw update, link is down.\n");
+
+ } else {
+ QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
+ }
+}
+
static void qedf_link_update(void *dev, struct qed_link_output *link)
{
struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
@@ -629,6 +663,8 @@ static u32 qedf_get_login_failures(void *cookie)
static struct qed_fcoe_cb_ops qedf_cb_ops = {
{
.link_update = qedf_link_update,
+ .bw_update = qedf_bw_update,
+ .schedule_recovery_handler = qedf_schedule_recovery_handler,
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
@@ -850,6 +886,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
qedf = lport_priv(lport);
+ qedf->flogi_pending = 0;
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
@@ -3153,7 +3190,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
{
int rc = -EINVAL;
struct fc_lport *lport;
- struct qedf_ctx *qedf;
+ struct qedf_ctx *qedf = NULL;
struct Scsi_Host *host;
bool is_vf = false;
struct qed_ll2_params params;
@@ -3183,6 +3220,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
/* Initialize qedf_ctx */
qedf = lport_priv(lport);
+ set_bit(QEDF_PROBING, &qedf->flags);
qedf->lport = lport;
qedf->ctlr.lp = lport;
qedf->pdev = pdev;
@@ -3197,6 +3235,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
init_completion(&qedf->fipvlan_compl);
mutex_init(&qedf->stats_mutex);
mutex_init(&qedf->flush_mutex);
+ qedf->flogi_pending = 0;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
@@ -3206,9 +3245,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} else {
/* Init pointers during recovery */
qedf = pci_get_drvdata(pdev);
+ set_bit(QEDF_PROBING, &qedf->flags);
lport = qedf->lport;
}
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
+
host = lport->host;
/* Allocate mempool for qedf_io_work structs */
@@ -3227,6 +3269,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
+ INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
qedf->fipvlan_retries = qedf_fipvlan_retries;
/* Set a default prio in case DCBX doesn't converge */
if (qedf_default_prio > -1) {
@@ -3281,6 +3324,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
+ /* Learn information crucial for qedf to progress */
+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
+ goto err2;
+ }
+
/* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
@@ -3466,6 +3516,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->lport->host->host_no);
qedf->dpc_wq = create_workqueue(host_buf);
}
+ INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
/*
* GRC dump and sysfs parameters are not reaped during the recovery
@@ -3513,6 +3564,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
else
fc_fabric_login(lport);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+
/* All good */
return 0;
@@ -3538,6 +3593,11 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
+ if (qedf) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+ }
return rc;
}
@@ -3687,11 +3747,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
{
struct qedf_ctx *qedf = dev;
struct qed_mfw_tlv_fcoe *fcoe = data;
- struct fc_lport *lport = qedf->lport;
- struct Scsi_Host *host = lport->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+ struct fc_lport *lport;
+ struct Scsi_Host *host;
+ struct fc_host_attrs *fc_host;
struct fc_host_statistics *hst;
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is null.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_PROBING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
+ return;
+ }
+
+ lport = qedf->lport;
+ host = lport->host;
+ fc_host = shost_to_fc_host(host);
+
/* Force a refresh of the fc_host stats including offload stats */
hst = qedf_fc_get_host_stats(host);
@@ -3762,11 +3836,64 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
fcoe->scsi_tsk_full = qedf->task_set_fulls;
}
+/* Deferred work function to perform soft context reset on STAG change */
+void qedf_stag_change_work(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, stag_work.work);
+
+ if (!qedf) {
+ QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+ return;
+ }
+ QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+ qedf_ctx_soft_reset(qedf->lport);
+}
+
static void qedf_shutdown(struct pci_dev *pdev)
{
__qedf_remove(pdev, QEDF_MODE_NORMAL);
}
+/*
+ * Recovery handler code
+ */
+static void qedf_schedule_recovery_handler(void *dev)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
+ schedule_delayed_work(&qedf->recovery_work, 0);
+}
+
+static void qedf_recovery_handler(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, recovery_work.work);
+
+ if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
+ return;
+
+ /*
+ * Call common_ops->recovery_prolog to allow the MFW to quiesce
+ * any PCI transactions.
+ */
+ qed_ops->common->recovery_prolog(qedf->cdev);
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
+ __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
+ /*
+ * Reset link and dcbx to down state since we will not get a link down
+ * event from the MFW but calling __qedf_remove will essentially be a
+ * link down event.
+ */
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+ __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
+ clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
+}
+
/* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 1f4a5fb00a05..425e665ec08b 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -836,6 +836,11 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
+ if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+ QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+ return ERR_PTR(-ENXIO);
+ }
+
ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
if (!ep) {
QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
@@ -870,12 +875,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
}
- if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
- QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
- ret = -ENXIO;
- goto ep_conn_exit;
- }
-
ret = qedi_alloc_sq(qedi, qedi_ep);
if (ret)
goto ep_conn_exit;
@@ -1001,7 +1000,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- flush_work(&qedi_ep->offload_work);
+ if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
+ flush_work(&qedi_ep->offload_work);
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
@@ -1065,6 +1065,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
break;
}
+ if (!abrt_conn)
+ wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
+
qedi_ep->state = EP_STATE_DISCONN_START;
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
@@ -1218,6 +1221,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
}
iscsi_cid = (u32)path_data->handle;
+ if (iscsi_cid >= qedi->max_active_conns) {
+ ret = -EINVAL;
+ goto set_path_exit;
+ }
qedi_ep = qedi->ep_tbl[iscsi_cid];
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index b995b19865ca..81a307695cc9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -28,6 +28,10 @@
#include "qedi_gbl.h"
#include "qedi_iscsi.h"
+static uint qedi_qed_debug;
+module_param(qedi_qed_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_qed_debug, " QED debug level 0 (default)");
+
static uint qedi_fw_debug;
module_param(qedi_fw_debug, uint, 0644);
MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
@@ -41,7 +45,7 @@ module_param(qedi_io_tracing, uint, 0644);
MODULE_PARM_DESC(qedi_io_tracing,
" Enable logging of SCSI requests/completions into trace buffer. (default off).");
-uint qedi_ll2_buf_size = 0x400;
+static uint qedi_ll2_buf_size = 0x400;
module_param(qedi_ll2_buf_size, uint, 0644);
MODULE_PARM_DESC(qedi_ll2_buf_size,
"parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
@@ -658,8 +662,6 @@ exit_setup_shost:
static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
- struct qedi_uio_dev *udev;
- struct qedi_uio_ctrl *uctrl;
struct skb_work_list *work;
struct ethhdr *eh;
@@ -698,9 +700,6 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
"Allowed frame ethertype [0x%x] len [0x%x].\n",
eh->h_proto, skb->len);
- udev = qedi->udev;
- uctrl = udev->uctrl;
-
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -921,7 +920,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
ipv6_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
- snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
+ snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s",
block->target[index].target_name.byte);
tgt->ipv6_en = ipv6_en;
@@ -1302,13 +1301,13 @@ process_again:
"process already running\n");
}
- if (qedi_fp_has_work(fp) == 0)
+ if (!qedi_fp_has_work(fp))
qed_sb_update_sb_idx(fp->sb_info);
/* Check for more work */
rmb();
- if (qedi_fp_has_work(fp) == 0)
+ if (!qedi_fp_has_work(fp))
qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
else
goto process_again;
@@ -1360,7 +1359,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
u16 idx;
cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ for (i = 0; i < qedi->int_info.msix_cnt; i++) {
idx = i * qedi->dev_info.common.num_hwfns +
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
@@ -2422,7 +2421,6 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi;
struct qed_ll2_params params;
- u32 dp_module = 0;
u8 dp_level = 0;
bool is_vf = false;
char host_buf[16];
@@ -2445,7 +2443,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
memset(&qed_params, 0, sizeof(qed_params));
qed_params.protocol = QED_PROTOCOL_ISCSI;
- qed_params.dp_module = dp_module;
+ qed_params.dp_module = qedi_qed_debug;
qed_params.dp_level = dp_level;
qed_params.is_vf = is_vf;
qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 3337cd341d21..441a45349349 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -526,7 +526,7 @@ static struct pci_device_id qla1280_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
-DEFINE_MUTEX(qla1280_firmware_mutex);
+static DEFINE_MUTEX(qla1280_firmware_mutex);
struct qla_fw {
char *fwname;
@@ -535,7 +535,7 @@ struct qla_fw {
#define QL_NUM_FW_IMAGES 3
-struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
+static struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
{"qlogic/1040.bin", NULL}, /* image 0 */
{"qlogic/1280.bin", NULL}, /* image 1 */
{"qlogic/12160.bin", NULL}, /* image 2 */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 33255968f03a..5d93ccc73153 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -26,7 +26,8 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
+ if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
+ ha->mpi_fw_dump_reading))
return 0;
mutex_lock(&ha->optrom_mutex);
@@ -42,6 +43,10 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
} else if (ha->mctp_dumped && ha->mctp_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
MCTP_DUMP_SIZE);
+ } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
+ rval = memory_read_from_buffer(buf, count, &off,
+ ha->mpi_fw_dump,
+ ha->mpi_fw_dump_len);
} else if (ha->fw_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_len);
@@ -79,7 +84,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_md_prep(vha);
}
ha->fw_dump_reading = 0;
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
break;
case 1:
if (ha->fw_dumped && !ha->fw_dump_reading) {
@@ -103,7 +108,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha);
} else {
- ha->fw_dump_mpi = 1;
qla2x00_system_error(vha);
}
break;
@@ -137,6 +141,22 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
vha->host_no);
}
break;
+ case 8:
+ if (!ha->mpi_fw_dump_reading)
+ break;
+ ql_log(ql_log_info, vha, 0x70e7,
+ "MPI firmware dump cleared on (%ld).\n", vha->host_no);
+ ha->mpi_fw_dump_reading = 0;
+ ha->mpi_fw_dumped = 0;
+ break;
+ case 9:
+ if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
+ ha->mpi_fw_dump_reading = 1;
+ ql_log(ql_log_info, vha, 0x70e8,
+ "Raw MPI firmware dump ready for read on (%ld).\n",
+ vha->host_no);
+ }
+ break;
}
return count;
}
@@ -207,10 +227,9 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
/* Checksum NVRAM. */
if (IS_FWI2_CAPABLE(ha)) {
- uint32_t *iter;
+ __le32 *iter = (__force __le32 *)buf;
uint32_t chksum;
- iter = (uint32_t *)buf;
chksum = 0;
for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
chksum += le32_to_cpu(*iter);
@@ -706,7 +725,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
@@ -724,6 +744,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
qla83xx_idc_unlock(vha, 0);
break;
+ } else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ qla27xx_reset_mpi(vha);
} else {
/* Make sure FC side is not in reset */
WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
@@ -737,6 +759,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
}
+ break;
case 0x2025e:
if (!IS_P3P_TYPE(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071,
@@ -1850,9 +1873,6 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- ql_log(ql_log_info, vha, 0x70d6,
- "port speed:%d\n", ha->link_data_rate);
-
return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
}
@@ -1901,9 +1921,8 @@ static char *mode_to_str[] = {
};
#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
-static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
+static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
{
- int rc = 0;
enum {
NO_ACTION,
MODE_CHANGE_ACCEPT,
@@ -2176,8 +2195,6 @@ static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
vha->ql2xexchoffld, vha->u_ql2xexchoffld);
break;
}
-
- return rc;
}
static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 97b51c477972..88c0338a2ec7 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -490,7 +490,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
>> 24;
switch (loop_id) {
case 0xFC:
- loop_id = cpu_to_le16(NPH_SNS);
+ loop_id = NPH_SNS;
break;
case 0xFA:
loop_id = vha->mgmt_svr_loop_id;
@@ -691,7 +691,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
* dump and reset the chip.
*/
if (ret) {
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
rval = -EINVAL;
@@ -896,7 +896,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
* doesn't work take FCoE dump and then
* reset the chip.
*/
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
@@ -2042,7 +2042,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->loop_id = piocb_rqst->dataword;
+ fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
sp->type = SRB_FXIOCB_BCMD;
sp->name = "bsg_fx_mgmt";
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index bf1e98f11990..19005710f7f6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -115,7 +115,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t *chunk = (uint32_t *)ha->gid_list;
uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
uint32_t stat;
ulong i, j, timer = 6000000;
@@ -126,26 +126,26 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
- WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
- WRT_REG_WORD(&reg->mailbox1, LSW(addr));
- WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+ wrt_reg_word(&reg->mailbox1, LSW(addr));
+ wrt_reg_word(&reg->mailbox8, MSW(addr));
- WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
- WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+ wrt_reg_word(&reg->mailbox4, MSW(dwords));
+ wrt_reg_word(&reg->mailbox5, LSW(dwords));
- WRT_REG_WORD(&reg->mailbox9, 0);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_word(&reg->mailbox9, 0);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
while (timer--) {
udelay(5);
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
continue;
@@ -155,15 +155,15 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
stat != 0x10 && stat != 0x11) {
/* Clear this intr; it wasn't a mailbox intr */
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
continue;
}
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
break;
}
ha->flags.mbox_int = 1;
@@ -189,13 +189,13 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
int
-qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
- uint32_t ram_dwords, void **nxt)
+qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
+ uint32_t ram_dwords, void **nxt)
{
int rval = QLA_FUNCTION_FAILED;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t *chunk = (uint32_t *)ha->gid_list;
uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
uint32_t stat;
ulong i, j, timer = 6000000;
@@ -206,23 +206,23 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
- WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
- WRT_REG_WORD(&reg->mailbox1, LSW(addr));
- WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
+ wrt_reg_word(&reg->mailbox1, LSW(addr));
+ wrt_reg_word(&reg->mailbox8, MSW(addr));
- WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
- WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_word(&reg->mailbox4, MSW(dwords));
+ wrt_reg_word(&reg->mailbox5, LSW(dwords));
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
while (timer--) {
udelay(5);
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
@@ -231,15 +231,15 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
stat &= 0xff;
if (stat != 0x1 && stat != 0x2 &&
stat != 0x10 && stat != 0x11) {
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
continue;
}
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
break;
}
ha->flags.mbox_int = 1;
@@ -254,9 +254,9 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
return rval;
}
for (j = 0; j < dwords; j++) {
- ram[i + j] =
- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
- chunk[j] : swab32(chunk[j]);
+ ram[i + j] = (__force __be32)
+ ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+ chunk[j] : swab32(chunk[j]));
}
}
@@ -265,8 +265,8 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
static int
-qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
- uint32_t cram_size, void **nxt)
+qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram,
+ uint32_t cram_size, void **nxt)
{
int rval;
@@ -286,16 +286,16 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
return rval;
}
-static uint32_t *
+static __be32 *
qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
- uint32_t count, uint32_t *buf)
+ uint32_t count, __be32 *buf)
{
- uint32_t __iomem *dmp_reg;
+ __le32 __iomem *dmp_reg;
- WRT_REG_DWORD(&reg->iobase_addr, iobase);
+ wrt_reg_dword(&reg->iobase_addr, iobase);
dmp_reg = &reg->iobase_window;
for ( ; count--; dmp_reg++)
- *buf++ = htonl(RD_REG_DWORD(dmp_reg));
+ *buf++ = htonl(rd_reg_dword(dmp_reg));
return buf;
}
@@ -303,11 +303,11 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
void
qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
{
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_PAUSE);
/* 100 usec delay is sufficient enough for hardware to pause RISC */
udelay(100);
- if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
+ if (rd_reg_dword(&reg->host_status) & HSRX_RISC_PAUSED)
set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
}
@@ -324,17 +324,17 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
* Driver can proceed with the reset sequence after waiting
* for a timeout period.
*/
- WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
- WRT_REG_DWORD(&reg->ctrl_status,
+ wrt_reg_dword(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
@@ -342,19 +342,19 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
/* Wait for soft-reset to complete. */
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) &
+ if ((rd_reg_dword(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr); /* PCI Posting. */
- for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(10);
@@ -368,7 +368,7 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
}
static int
-qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
+qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram,
uint32_t ram_words, void **nxt)
{
int rval;
@@ -376,7 +376,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
uint16_t mb0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint16_t *dump = (uint16_t *)ha->gid_list;
+ __le16 *dump = (__force __le16 *)ha->gid_list;
rval = QLA_SUCCESS;
mb0 = 0;
@@ -399,11 +399,11 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
WRT_MAILBOX_REG(ha, reg, 4, words);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_INT) {
stat &= 0xff;
@@ -414,10 +414,10 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
mb0 = RD_MAILBOX_REG(ha, reg, 0);
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->semaphore, 0);
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
} else if (stat == 0x10 || stat == 0x11) {
set_bit(MBX_INTERRUPT,
@@ -425,15 +425,15 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
mb0 = RD_MAILBOX_REG(ha, reg, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
}
/* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
udelay(5);
}
@@ -441,7 +441,8 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
rval = mb0 & MBS_MASK;
for (idx = 0; idx < words; idx++)
- ram[cnt + idx] = swab16(dump[idx]);
+ ram[cnt + idx] =
+ cpu_to_be16(le16_to_cpu(dump[idx]));
} else {
rval = QLA_FUNCTION_FAILED;
}
@@ -453,12 +454,12 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
- uint16_t *buf)
+ __be16 *buf)
{
- uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
+ __le16 __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
for ( ; count--; dmp_reg++)
- *buf++ = htons(RD_REG_WORD(dmp_reg));
+ *buf++ = htons(rd_reg_word(dmp_reg));
}
static inline void *
@@ -472,10 +473,10 @@ qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
}
static inline void *
-qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
uint32_t cnt;
- uint32_t *iter_reg;
+ __be32 *iter_reg;
struct qla2xxx_fce_chain *fcec = ptr;
if (!ha->fce)
@@ -499,7 +500,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
-qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_offld_chain *c = ptr;
@@ -517,11 +518,11 @@ qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ptr += sizeof(struct qla2xxx_offld_chain);
memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
- return (char *)ptr + cpu_to_be32(c->size);
+ return (char *)ptr + be32_to_cpu(c->size);
}
static inline void *
-qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_offld_chain *c = ptr;
@@ -539,12 +540,12 @@ qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ptr += sizeof(struct qla2xxx_offld_chain);
memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
- return (char *)ptr + cpu_to_be32(c->size);
+ return (char *)ptr + be32_to_cpu(c->size);
}
static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
- uint32_t **last_chain)
+ __be32 **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
@@ -591,7 +592,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
}
static inline void *
-qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
@@ -662,7 +663,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
-qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
uint32_t cnt, que_idx;
uint8_t que_cnt;
@@ -685,13 +686,13 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
reg = ISP_QUE_REG(ha, cnt);
que_idx = cnt * 4;
mq->qregs[que_idx] =
- htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
+ htonl(rd_reg_dword(&reg->isp25mq.req_q_in));
mq->qregs[que_idx+1] =
- htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
+ htonl(rd_reg_dword(&reg->isp25mq.req_q_out));
mq->qregs[que_idx+2] =
- htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
+ htonl(rd_reg_dword(&reg->isp25mq.rsp_q_in));
mq->qregs[que_idx+3] =
- htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
+ htonl(rd_reg_dword(&reg->isp25mq.rsp_q_out));
}
return ptr + sizeof(struct qla2xxx_mq_chain);
@@ -706,45 +707,47 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
ql_log(ql_log_warn, vha, 0xd000,
"Failed to dump firmware (%x), dump status flags (0x%lx).\n",
rval, ha->fw_dump_cap_flags);
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
} else {
ql_log(ql_log_info, vha, 0xd001,
"Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
}
+void qla2xxx_dump_fw(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ vha->hw->isp_ops->fw_dump(vha);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
/**
* qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
* @vha: HA context
- * @hardware_locked: Called with the hardware_lock
*/
void
-qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla2300_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- uint16_t __iomem *dmp_reg;
- unsigned long flags;
+ __le16 __iomem *dmp_reg;
struct qla2300_fw_dump *fw;
void *nxt;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
-
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ lockdep_assert_held(&ha->hardware_lock);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd002,
"No buffer available for dump.\n");
- goto qla2300_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -752,19 +755,19 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla2300_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp23;
qla2xxx_prep_dump(ha, ha->fw_dump);
rval = QLA_SUCCESS;
- fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+ fw->hccr = htons(rd_reg_word(&reg->hccr));
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
if (IS_QLA2300(ha)) {
for (cnt = 30000;
- (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -772,74 +775,74 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
rval = QLA_FUNCTION_TIMEOUT;
}
} else {
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
udelay(10);
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2300.req_q_in;
- for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg);
cnt++, dmp_reg++)
- fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2300.mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg);
cnt++, dmp_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->ctrl_status, 0x40);
+ wrt_reg_word(&reg->ctrl_status, 0x40);
qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x50);
+ wrt_reg_word(&reg->ctrl_status, 0x50);
qla2xxx_read_window(reg, 48, fw->dma_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ wrt_reg_word(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg);
cnt++, dmp_reg++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->pcr, 0x2000);
+ wrt_reg_word(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
- WRT_REG_WORD(&reg->pcr, 0x2200);
+ wrt_reg_word(&reg->pcr, 0x2200);
qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
- WRT_REG_WORD(&reg->pcr, 0x2400);
+ wrt_reg_word(&reg->pcr, 0x2400);
qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
- WRT_REG_WORD(&reg->pcr, 0x2600);
+ wrt_reg_word(&reg->pcr, 0x2600);
qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
- WRT_REG_WORD(&reg->pcr, 0x2800);
+ wrt_reg_word(&reg->pcr, 0x2800);
qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
- WRT_REG_WORD(&reg->pcr, 0x2A00);
+ wrt_reg_word(&reg->pcr, 0x2A00);
qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
- WRT_REG_WORD(&reg->pcr, 0x2C00);
+ wrt_reg_word(&reg->pcr, 0x2C00);
qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
- WRT_REG_WORD(&reg->pcr, 0x2E00);
+ wrt_reg_word(&reg->pcr, 0x2E00);
qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ wrt_reg_word(&reg->ctrl_status, 0x10);
qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ wrt_reg_word(&reg->ctrl_status, 0x30);
qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
/* Reset RISC. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->ctrl_status) &
+ if ((rd_reg_word(&reg->ctrl_status) &
CSR_ISP_SOFT_RESET) == 0)
break;
@@ -860,12 +863,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Get RISC SRAM. */
if (rval == QLA_SUCCESS)
rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
- sizeof(fw->risc_ram) / 2, &nxt);
+ ARRAY_SIZE(fw->risc_ram), &nxt);
/* Get stack SRAM. */
if (rval == QLA_SUCCESS)
rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
- sizeof(fw->stack_ram) / 2, &nxt);
+ ARRAY_SIZE(fw->stack_ram), &nxt);
/* Get data SRAM. */
if (rval == QLA_SUCCESS)
@@ -876,48 +879,31 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla2xxx_copy_queues(ha, nxt);
qla2xxx_dump_post_process(base_vha, rval);
-
-qla2300_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
/**
* qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
* @vha: HA context
- * @hardware_locked: Called with the hardware_lock
*/
void
-qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla2100_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt, timer;
- uint16_t risc_address;
- uint16_t mb0, mb2;
+ uint16_t risc_address = 0;
+ uint16_t mb0 = 0, mb2 = 0;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- uint16_t __iomem *dmp_reg;
- unsigned long flags;
+ __le16 __iomem *dmp_reg;
struct qla2100_fw_dump *fw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- risc_address = 0;
- mb0 = mb2 = 0;
- flags = 0;
-
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ lockdep_assert_held(&ha->hardware_lock);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd004,
"No buffer available for dump.\n");
- goto qla2100_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -925,17 +911,17 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla2100_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp21;
qla2xxx_prep_dump(ha, ha->fw_dump);
rval = QLA_SUCCESS;
- fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+ fw->hccr = htons(rd_reg_word(&reg->hccr));
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
- for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
+ for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -944,61 +930,61 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2100.mailbox0;
for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
if (cnt == 8)
dmp_reg = &reg->u_end.isp2200.mailbox8;
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
}
dmp_reg = &reg->u.isp2100.unused_2[0];
- for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
- fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++)
+ fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ wrt_reg_word(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++)
+ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->pcr, 0x2000);
+ wrt_reg_word(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
- WRT_REG_WORD(&reg->pcr, 0x2100);
+ wrt_reg_word(&reg->pcr, 0x2100);
qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
- WRT_REG_WORD(&reg->pcr, 0x2200);
+ wrt_reg_word(&reg->pcr, 0x2200);
qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
- WRT_REG_WORD(&reg->pcr, 0x2300);
+ wrt_reg_word(&reg->pcr, 0x2300);
qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
- WRT_REG_WORD(&reg->pcr, 0x2400);
+ wrt_reg_word(&reg->pcr, 0x2400);
qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
- WRT_REG_WORD(&reg->pcr, 0x2500);
+ wrt_reg_word(&reg->pcr, 0x2500);
qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
- WRT_REG_WORD(&reg->pcr, 0x2600);
+ wrt_reg_word(&reg->pcr, 0x2600);
qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
- WRT_REG_WORD(&reg->pcr, 0x2700);
+ wrt_reg_word(&reg->pcr, 0x2700);
qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ wrt_reg_word(&reg->ctrl_status, 0x10);
qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ wrt_reg_word(&reg->ctrl_status, 0x30);
qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
/* Reset the ISP. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
}
for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
@@ -1011,11 +997,11 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Pause RISC. */
if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
- (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
+ (rd_reg_word(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
for (cnt = 30000;
- (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -1025,13 +1011,13 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (rval == QLA_SUCCESS) {
/* Set memory configuration and timing. */
if (IS_QLA2100(ha))
- WRT_REG_WORD(&reg->mctr, 0xf1);
+ wrt_reg_word(&reg->mctr, 0xf1);
else
- WRT_REG_WORD(&reg->mctr, 0xf2);
- RD_REG_WORD(&reg->mctr); /* PCI Posting. */
+ wrt_reg_word(&reg->mctr, 0xf2);
+ rd_reg_word(&reg->mctr); /* PCI Posting. */
/* Release RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
}
}
@@ -1041,29 +1027,29 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
}
- for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS;
cnt++, risc_address++) {
WRT_MAILBOX_REG(ha, reg, 1, risc_address);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
for (timer = 6000000; timer != 0; timer--) {
/* Check for pending interrupts. */
- if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
- if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
+ if (rd_reg_word(&reg->istatus) & ISR_RISC_INT) {
+ if (rd_reg_word(&reg->semaphore) & BIT_0) {
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
mb0 = RD_MAILBOX_REG(ha, reg, 0);
mb2 = RD_MAILBOX_REG(ha, reg, 2);
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->semaphore, 0);
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
udelay(5);
}
@@ -1080,48 +1066,35 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
qla2xxx_dump_post_process(base_vha, rval);
-
-qla2100_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla24xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla24xx_fw_dump *fw;
void *nxt;
void *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ lockdep_assert_held(&ha->hardware_lock);
+
if (IS_P3P_TYPE(ha))
return;
- flags = 0;
ha->fw_dump_cap_flags = 0;
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
-
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd006,
"No buffer available for dump.\n");
- goto qla24xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1129,13 +1102,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla24xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp24;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1145,41 +1118,41 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1218,19 +1191,19 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1339,44 +1312,31 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla24xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla25xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla25xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd008,
"No buffer available for dump.\n");
- goto qla25xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1384,14 +1344,14 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla25xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp25;
qla2xxx_prep_dump(ha, ha->fw_dump);
ha->fw_dump->version = htonl(2);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1405,73 +1365,73 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7010, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1535,19 +1495,19 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1665,44 +1625,31 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla25xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla25xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla81xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla81xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00a,
"No buffer available for dump.\n");
- goto qla81xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1710,12 +1657,12 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla81xx_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp81;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1729,73 +1676,73 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7010, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1859,19 +1806,19 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1993,57 +1940,44 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla81xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla81xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla83xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla83xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00c,
"No buffer available for dump!!!\n");
- goto qla83xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
ql_log(ql_log_warn, vha, 0xd00d,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
- goto qla83xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp83;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -2051,24 +1985,24 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
*/
qla24xx_pause_risc(reg, ha);
- WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+ wrt_reg_dword(&reg->iobase_addr, 0x6000);
dmp_reg = &reg->iobase_window;
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
dmp_reg = &reg->unused_4_1[0];
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
- WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+ wrt_reg_dword(&reg->iobase_addr, 0x6010);
dmp_reg = &reg->unused_4_1[2];
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
/* select PCR and disable ecc checking and correction */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
/* Host/Risc registers. */
iter_reg = fw->host_risc_reg;
@@ -2077,73 +2011,73 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7040, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -2239,19 +2173,19 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -2457,16 +2391,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
+ for (cnt = 30000; cnt && (rd_reg_word(&reg->mailbox0)); cnt--)
udelay(5);
if (!cnt) {
@@ -2507,14 +2441,6 @@ copy_queue:
qla83xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla83xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
/****************************************************************************/
@@ -2735,7 +2661,7 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- uint16_t __iomem *mbx_reg;
+ __le16 __iomem *mbx_reg;
if (!ql_mask_match(level))
return;
@@ -2750,7 +2676,7 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
ql_dbg(level, vha, id, "Mailbox registers:\n");
for (i = 0; i < 6; i++, mbx_reg++)
ql_dbg(level, vha, id,
- "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg));
+ "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg));
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 433e95502808..54ed020e6f75 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -12,205 +12,205 @@
*/
struct qla2300_fw_dump {
- uint16_t hccr;
- uint16_t pbiu_reg[8];
- uint16_t risc_host_reg[8];
- uint16_t mailbox_reg[32];
- uint16_t resp_dma_reg[32];
- uint16_t dma_reg[48];
- uint16_t risc_hdw_reg[16];
- uint16_t risc_gp0_reg[16];
- uint16_t risc_gp1_reg[16];
- uint16_t risc_gp2_reg[16];
- uint16_t risc_gp3_reg[16];
- uint16_t risc_gp4_reg[16];
- uint16_t risc_gp5_reg[16];
- uint16_t risc_gp6_reg[16];
- uint16_t risc_gp7_reg[16];
- uint16_t frame_buf_hdw_reg[64];
- uint16_t fpm_b0_reg[64];
- uint16_t fpm_b1_reg[64];
- uint16_t risc_ram[0xf800];
- uint16_t stack_ram[0x1000];
- uint16_t data_ram[1];
+ __be16 hccr;
+ __be16 pbiu_reg[8];
+ __be16 risc_host_reg[8];
+ __be16 mailbox_reg[32];
+ __be16 resp_dma_reg[32];
+ __be16 dma_reg[48];
+ __be16 risc_hdw_reg[16];
+ __be16 risc_gp0_reg[16];
+ __be16 risc_gp1_reg[16];
+ __be16 risc_gp2_reg[16];
+ __be16 risc_gp3_reg[16];
+ __be16 risc_gp4_reg[16];
+ __be16 risc_gp5_reg[16];
+ __be16 risc_gp6_reg[16];
+ __be16 risc_gp7_reg[16];
+ __be16 frame_buf_hdw_reg[64];
+ __be16 fpm_b0_reg[64];
+ __be16 fpm_b1_reg[64];
+ __be16 risc_ram[0xf800];
+ __be16 stack_ram[0x1000];
+ __be16 data_ram[1];
};
struct qla2100_fw_dump {
- uint16_t hccr;
- uint16_t pbiu_reg[8];
- uint16_t mailbox_reg[32];
- uint16_t dma_reg[48];
- uint16_t risc_hdw_reg[16];
- uint16_t risc_gp0_reg[16];
- uint16_t risc_gp1_reg[16];
- uint16_t risc_gp2_reg[16];
- uint16_t risc_gp3_reg[16];
- uint16_t risc_gp4_reg[16];
- uint16_t risc_gp5_reg[16];
- uint16_t risc_gp6_reg[16];
- uint16_t risc_gp7_reg[16];
- uint16_t frame_buf_hdw_reg[16];
- uint16_t fpm_b0_reg[64];
- uint16_t fpm_b1_reg[64];
- uint16_t risc_ram[0xf000];
+ __be16 hccr;
+ __be16 pbiu_reg[8];
+ __be16 mailbox_reg[32];
+ __be16 dma_reg[48];
+ __be16 risc_hdw_reg[16];
+ __be16 risc_gp0_reg[16];
+ __be16 risc_gp1_reg[16];
+ __be16 risc_gp2_reg[16];
+ __be16 risc_gp3_reg[16];
+ __be16 risc_gp4_reg[16];
+ __be16 risc_gp5_reg[16];
+ __be16 risc_gp6_reg[16];
+ __be16 risc_gp7_reg[16];
+ __be16 frame_buf_hdw_reg[16];
+ __be16 fpm_b0_reg[64];
+ __be16 fpm_b1_reg[64];
+ __be16 risc_ram[0xf000];
};
struct qla24xx_fw_dump {
- uint32_t host_status;
- uint32_t host_reg[32];
- uint32_t shadow_reg[7];
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[16];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[16];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[112];
- uint32_t fpm_hdw_reg[192];
- uint32_t fb_hdw_reg[176];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_reg[32];
+ __be32 shadow_reg[7];
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[16];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[16];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[112];
+ __be32 fpm_hdw_reg[192];
+ __be32 fb_hdw_reg[176];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla25xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[32];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t aseq_gp_reg[128];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[192];
- uint32_t fb_hdw_reg[192];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[32];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 aseq_gp_reg[128];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[192];
+ __be32 fb_hdw_reg[192];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla81xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[32];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t aseq_gp_reg[128];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[224];
- uint32_t fb_hdw_reg[208];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[32];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 aseq_gp_reg[128];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[224];
+ __be32 fb_hdw_reg[208];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla83xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[48];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[256];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t xseq_2_reg[16];
- uint32_t rseq_gp_reg[256];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t rseq_3_reg[16];
- uint32_t aseq_gp_reg[256];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t aseq_3_reg[16];
- uint32_t cmd_dma_reg[64];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[256];
- uint32_t rq0_array_reg[256];
- uint32_t rq1_array_reg[256];
- uint32_t rp0_array_reg[256];
- uint32_t rp1_array_reg[256];
- uint32_t queue_control_reg[16];
- uint32_t fb_hdw_reg[432];
- uint32_t at0_array_reg[128];
- uint32_t code_ram[0x2400];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[48];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[256];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 xseq_2_reg[16];
+ __be32 rseq_gp_reg[256];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 rseq_3_reg[16];
+ __be32 aseq_gp_reg[256];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 aseq_3_reg[16];
+ __be32 cmd_dma_reg[64];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[256];
+ __be32 rq0_array_reg[256];
+ __be32 rq1_array_reg[256];
+ __be32 rp0_array_reg[256];
+ __be32 rp1_array_reg[256];
+ __be32 queue_control_reg[16];
+ __be32 fb_hdw_reg[432];
+ __be32 at0_array_reg[128];
+ __be32 code_ram[0x2400];
+ __be32 ext_mem[1];
};
#define EFT_NUM_BUFFERS 4
@@ -223,44 +223,45 @@ struct qla83xx_fw_dump {
#define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b))
struct qla2xxx_fce_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t size;
- uint32_t addr_l;
- uint32_t addr_h;
- uint32_t eregs[8];
+ __be32 size;
+ __be32 addr_l;
+ __be32 addr_h;
+ __be32 eregs[8];
};
/* used by exchange off load and extended login offload */
struct qla2xxx_offld_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t size;
- u64 addr;
+ __be32 size;
+ __be32 reserved;
+ __be64 addr;
};
struct qla2xxx_mq_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t count;
- uint32_t qregs[4 * QLA_MQ_SIZE];
+ __be32 count;
+ __be32 qregs[4 * QLA_MQ_SIZE];
};
struct qla2xxx_mqueue_header {
- uint32_t queue;
+ __be32 queue;
#define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2
#define TYPE_ATIO_QUEUE 0x3
- uint32_t number;
- uint32_t size;
+ __be32 number;
+ __be32 size;
};
struct qla2xxx_mqueue_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
};
#define DUMP_CHAIN_VARIANT 0x80000000
@@ -273,28 +274,28 @@ struct qla2xxx_mqueue_chain {
struct qla2xxx_fw_dump {
uint8_t signature[4];
- uint32_t version;
+ __be32 version;
- uint32_t fw_major_version;
- uint32_t fw_minor_version;
- uint32_t fw_subminor_version;
- uint32_t fw_attributes;
+ __be32 fw_major_version;
+ __be32 fw_minor_version;
+ __be32 fw_subminor_version;
+ __be32 fw_attributes;
- uint32_t vendor;
- uint32_t device;
- uint32_t subsystem_vendor;
- uint32_t subsystem_device;
+ __be32 vendor;
+ __be32 device;
+ __be32 subsystem_vendor;
+ __be32 subsystem_device;
- uint32_t fixed_size;
- uint32_t mem_size;
- uint32_t req_q_size;
- uint32_t rsp_q_size;
+ __be32 fixed_size;
+ __be32 mem_size;
+ __be32 req_q_size;
+ __be32 rsp_q_size;
- uint32_t eft_size;
- uint32_t eft_addr_l;
- uint32_t eft_addr_h;
+ __be32 eft_size;
+ __be32 eft_addr_l;
+ __be32 eft_addr_h;
- uint32_t header_size;
+ __be32 header_size;
union {
struct qla2100_fw_dump isp21;
@@ -369,7 +370,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
-extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, __be32 *,
uint32_t, void **);
extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 47c7a56438b5..42dbf90d4651 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -128,15 +128,50 @@ static inline uint32_t make_handle(uint16_t x, uint16_t y)
* I/O register
*/
-#define RD_REG_BYTE(addr) readb(addr)
-#define RD_REG_WORD(addr) readw(addr)
-#define RD_REG_DWORD(addr) readl(addr)
-#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr)
-#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr)
-#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr)
-#define WRT_REG_BYTE(addr, data) writeb(data, addr)
-#define WRT_REG_WORD(addr, data) writew(data, addr)
-#define WRT_REG_DWORD(addr, data) writel(data, addr)
+static inline u8 rd_reg_byte(const volatile u8 __iomem *addr)
+{
+ return readb(addr);
+}
+
+static inline u16 rd_reg_word(const volatile __le16 __iomem *addr)
+{
+ return readw(addr);
+}
+
+static inline u32 rd_reg_dword(const volatile __le32 __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline u8 rd_reg_byte_relaxed(const volatile u8 __iomem *addr)
+{
+ return readb_relaxed(addr);
+}
+
+static inline u16 rd_reg_word_relaxed(const volatile __le16 __iomem *addr)
+{
+ return readw_relaxed(addr);
+}
+
+static inline u32 rd_reg_dword_relaxed(const volatile __le32 __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+
+static inline void wrt_reg_byte(volatile u8 __iomem *addr, u8 data)
+{
+ return writeb(data, addr);
+}
+
+static inline void wrt_reg_word(volatile __le16 __iomem *addr, u16 data)
+{
+ return writew(data, addr);
+}
+
+static inline void wrt_reg_dword(volatile __le32 __iomem *addr, u32 data)
+{
+ return writel(data, addr);
+}
/*
* ISP83XX specific remote register addresses
@@ -469,7 +504,7 @@ struct srb_iocb {
u32 rx_size;
dma_addr_t els_plogi_pyld_dma;
dma_addr_t els_resp_pyld_dma;
- uint32_t fw_status[3];
+ __le32 fw_status[3];
__le16 comp_status;
__le16 len;
} els_plogi;
@@ -520,8 +555,8 @@ struct srb_iocb {
#define MAX_IOCB_MB_REG 28
#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
struct {
- __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
- __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
+ u16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
+ u16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
void *out, *in;
dma_addr_t out_dma, in_dma;
struct completion comp;
@@ -532,7 +567,7 @@ struct srb_iocb {
} nack;
struct {
__le16 comp_status;
- uint16_t rsp_pyld_len;
+ __le16 rsp_pyld_len;
uint8_t aen_op;
void *desc;
@@ -663,23 +698,23 @@ struct msg_echo_lb {
* ISP I/O Register Set structure definitions.
*/
struct device_reg_2xxx {
- uint16_t flash_address; /* Flash BIOS address */
- uint16_t flash_data; /* Flash BIOS data */
- uint16_t unused_1[1]; /* Gap */
- uint16_t ctrl_status; /* Control/Status */
+ __le16 flash_address; /* Flash BIOS address */
+ __le16 flash_data; /* Flash BIOS data */
+ __le16 unused_1[1]; /* Gap */
+ __le16 ctrl_status; /* Control/Status */
#define CSR_FLASH_64K_BANK BIT_3 /* Flash upper 64K bank select */
#define CSR_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable */
#define CSR_ISP_SOFT_RESET BIT_0 /* ISP soft reset */
- uint16_t ictrl; /* Interrupt control */
+ __le16 ictrl; /* Interrupt control */
#define ICR_EN_INT BIT_15 /* ISP enable interrupts. */
#define ICR_EN_RISC BIT_3 /* ISP enable RISC interrupts. */
- uint16_t istatus; /* Interrupt status */
+ __le16 istatus; /* Interrupt status */
#define ISR_RISC_INT BIT_3 /* RISC interrupt */
- uint16_t semaphore; /* Semaphore */
- uint16_t nvram; /* NVRAM register. */
+ __le16 semaphore; /* Semaphore */
+ __le16 nvram; /* NVRAM register. */
#define NVR_DESELECT 0
#define NVR_BUSY BIT_15
#define NVR_WRT_ENABLE BIT_14 /* Write enable */
@@ -693,80 +728,80 @@ struct device_reg_2xxx {
union {
struct {
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t unused_2[59]; /* Gap */
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 unused_2[59]; /* Gap */
} __attribute__((packed)) isp2100;
struct {
/* Request Queue */
- uint16_t req_q_in; /* In-Pointer */
- uint16_t req_q_out; /* Out-Pointer */
+ __le16 req_q_in; /* In-Pointer */
+ __le16 req_q_out; /* Out-Pointer */
/* Response Queue */
- uint16_t rsp_q_in; /* In-Pointer */
- uint16_t rsp_q_out; /* Out-Pointer */
+ __le16 rsp_q_in; /* In-Pointer */
+ __le16 rsp_q_out; /* Out-Pointer */
/* RISC to Host Status */
- uint32_t host_status;
+ __le32 host_status;
#define HSR_RISC_INT BIT_15 /* RISC interrupt */
#define HSR_RISC_PAUSED BIT_8 /* RISC Paused */
/* Host to Host Semaphore */
- uint16_t host_semaphore;
- uint16_t unused_3[17]; /* Gap */
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23;
- uint16_t mailbox24;
- uint16_t mailbox25;
- uint16_t mailbox26;
- uint16_t mailbox27;
- uint16_t mailbox28;
- uint16_t mailbox29;
- uint16_t mailbox30;
- uint16_t mailbox31;
- uint16_t fb_cmd;
- uint16_t unused_4[10]; /* Gap */
+ __le16 host_semaphore;
+ __le16 unused_3[17]; /* Gap */
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23;
+ __le16 mailbox24;
+ __le16 mailbox25;
+ __le16 mailbox26;
+ __le16 mailbox27;
+ __le16 mailbox28;
+ __le16 mailbox29;
+ __le16 mailbox30;
+ __le16 mailbox31;
+ __le16 fb_cmd;
+ __le16 unused_4[10]; /* Gap */
} __attribute__((packed)) isp2300;
} u;
- uint16_t fpm_diag_config;
- uint16_t unused_5[0x4]; /* Gap */
- uint16_t risc_hw;
- uint16_t unused_5_1; /* Gap */
- uint16_t pcr; /* Processor Control Register. */
- uint16_t unused_6[0x5]; /* Gap */
- uint16_t mctr; /* Memory Configuration and Timing. */
- uint16_t unused_7[0x3]; /* Gap */
- uint16_t fb_cmd_2100; /* Unused on 23XX */
- uint16_t unused_8[0x3]; /* Gap */
- uint16_t hccr; /* Host command & control register. */
+ __le16 fpm_diag_config;
+ __le16 unused_5[0x4]; /* Gap */
+ __le16 risc_hw;
+ __le16 unused_5_1; /* Gap */
+ __le16 pcr; /* Processor Control Register. */
+ __le16 unused_6[0x5]; /* Gap */
+ __le16 mctr; /* Memory Configuration and Timing. */
+ __le16 unused_7[0x3]; /* Gap */
+ __le16 fb_cmd_2100; /* Unused on 23XX */
+ __le16 unused_8[0x3]; /* Gap */
+ __le16 hccr; /* Host command & control register. */
#define HCCR_HOST_INT BIT_7 /* Host interrupt bit */
#define HCCR_RISC_PAUSE BIT_5 /* Pause mode bit */
/* HCCR commands */
@@ -779,9 +814,9 @@ struct device_reg_2xxx {
#define HCCR_DISABLE_PARITY_PAUSE 0x4001 /* Disable parity error RISC pause. */
#define HCCR_ENABLE_PARITY 0xA000 /* Enable PARITY interrupt */
- uint16_t unused_9[5]; /* Gap */
- uint16_t gpiod; /* GPIO Data register. */
- uint16_t gpioe; /* GPIO Enable register. */
+ __le16 unused_9[5]; /* Gap */
+ __le16 gpiod; /* GPIO Data register. */
+ __le16 gpioe; /* GPIO Enable register. */
#define GPIO_LED_MASK 0x00C0
#define GPIO_LED_GREEN_OFF_AMBER_OFF 0x0000
#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040
@@ -793,95 +828,95 @@ struct device_reg_2xxx {
union {
struct {
- uint16_t unused_10[8]; /* Gap */
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23; /* Also probe reg. */
+ __le16 unused_10[8]; /* Gap */
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23; /* Also probe reg. */
} __attribute__((packed)) isp2200;
} u_end;
};
struct device_reg_25xxmq {
- uint32_t req_q_in;
- uint32_t req_q_out;
- uint32_t rsp_q_in;
- uint32_t rsp_q_out;
- uint32_t atio_q_in;
- uint32_t atio_q_out;
+ __le32 req_q_in;
+ __le32 req_q_out;
+ __le32 rsp_q_in;
+ __le32 rsp_q_out;
+ __le32 atio_q_in;
+ __le32 atio_q_out;
};
struct device_reg_fx00 {
- uint32_t mailbox0; /* 00 */
- uint32_t mailbox1; /* 04 */
- uint32_t mailbox2; /* 08 */
- uint32_t mailbox3; /* 0C */
- uint32_t mailbox4; /* 10 */
- uint32_t mailbox5; /* 14 */
- uint32_t mailbox6; /* 18 */
- uint32_t mailbox7; /* 1C */
- uint32_t mailbox8; /* 20 */
- uint32_t mailbox9; /* 24 */
- uint32_t mailbox10; /* 28 */
- uint32_t mailbox11;
- uint32_t mailbox12;
- uint32_t mailbox13;
- uint32_t mailbox14;
- uint32_t mailbox15;
- uint32_t mailbox16;
- uint32_t mailbox17;
- uint32_t mailbox18;
- uint32_t mailbox19;
- uint32_t mailbox20;
- uint32_t mailbox21;
- uint32_t mailbox22;
- uint32_t mailbox23;
- uint32_t mailbox24;
- uint32_t mailbox25;
- uint32_t mailbox26;
- uint32_t mailbox27;
- uint32_t mailbox28;
- uint32_t mailbox29;
- uint32_t mailbox30;
- uint32_t mailbox31;
- uint32_t aenmailbox0;
- uint32_t aenmailbox1;
- uint32_t aenmailbox2;
- uint32_t aenmailbox3;
- uint32_t aenmailbox4;
- uint32_t aenmailbox5;
- uint32_t aenmailbox6;
- uint32_t aenmailbox7;
+ __le32 mailbox0; /* 00 */
+ __le32 mailbox1; /* 04 */
+ __le32 mailbox2; /* 08 */
+ __le32 mailbox3; /* 0C */
+ __le32 mailbox4; /* 10 */
+ __le32 mailbox5; /* 14 */
+ __le32 mailbox6; /* 18 */
+ __le32 mailbox7; /* 1C */
+ __le32 mailbox8; /* 20 */
+ __le32 mailbox9; /* 24 */
+ __le32 mailbox10; /* 28 */
+ __le32 mailbox11;
+ __le32 mailbox12;
+ __le32 mailbox13;
+ __le32 mailbox14;
+ __le32 mailbox15;
+ __le32 mailbox16;
+ __le32 mailbox17;
+ __le32 mailbox18;
+ __le32 mailbox19;
+ __le32 mailbox20;
+ __le32 mailbox21;
+ __le32 mailbox22;
+ __le32 mailbox23;
+ __le32 mailbox24;
+ __le32 mailbox25;
+ __le32 mailbox26;
+ __le32 mailbox27;
+ __le32 mailbox28;
+ __le32 mailbox29;
+ __le32 mailbox30;
+ __le32 mailbox31;
+ __le32 aenmailbox0;
+ __le32 aenmailbox1;
+ __le32 aenmailbox2;
+ __le32 aenmailbox3;
+ __le32 aenmailbox4;
+ __le32 aenmailbox5;
+ __le32 aenmailbox6;
+ __le32 aenmailbox7;
/* Request Queue. */
- uint32_t req_q_in; /* A0 - Request Queue In-Pointer */
- uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */
+ __le32 req_q_in; /* A0 - Request Queue In-Pointer */
+ __le32 req_q_out; /* A4 - Request Queue Out-Pointer */
/* Response Queue. */
- uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */
- uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */
+ __le32 rsp_q_in; /* A8 - Response Queue In-Pointer */
+ __le32 rsp_q_out; /* AC - Response Queue Out-Pointer */
/* Init values shadowed on FW Up Event */
- uint32_t initval0; /* B0 */
- uint32_t initval1; /* B4 */
- uint32_t initval2; /* B8 */
- uint32_t initval3; /* BC */
- uint32_t initval4; /* C0 */
- uint32_t initval5; /* C4 */
- uint32_t initval6; /* C8 */
- uint32_t initval7; /* CC */
- uint32_t fwheartbeat; /* D0 */
- uint32_t pseudoaen; /* D4 */
+ __le32 initval0; /* B0 */
+ __le32 initval1; /* B4 */
+ __le32 initval2; /* B8 */
+ __le32 initval3; /* BC */
+ __le32 initval4; /* C0 */
+ __le32 initval5; /* C4 */
+ __le32 initval6; /* C8 */
+ __le32 initval7; /* CC */
+ __le32 fwheartbeat; /* D0 */
+ __le32 pseudoaen; /* D4 */
};
@@ -921,18 +956,18 @@ typedef union {
&(reg)->u_end.isp2200.mailbox8 + (num) - 8) : \
&(reg)->u.isp2300.mailbox0 + (num))
#define RD_MAILBOX_REG(ha, reg, num) \
- RD_REG_WORD(MAILBOX_REG(ha, reg, num))
+ rd_reg_word(MAILBOX_REG(ha, reg, num))
#define WRT_MAILBOX_REG(ha, reg, num, data) \
- WRT_REG_WORD(MAILBOX_REG(ha, reg, num), data)
+ wrt_reg_word(MAILBOX_REG(ha, reg, num), data)
#define FB_CMD_REG(ha, reg) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
&(reg)->fb_cmd_2100 : \
&(reg)->u.isp2300.fb_cmd)
#define RD_FB_CMD_REG(ha, reg) \
- RD_REG_WORD(FB_CMD_REG(ha, reg))
+ rd_reg_word(FB_CMD_REG(ha, reg))
#define WRT_FB_CMD_REG(ha, reg, data) \
- WRT_REG_WORD(FB_CMD_REG(ha, reg), data)
+ wrt_reg_word(FB_CMD_REG(ha, reg), data)
typedef struct {
uint32_t out_mb; /* outbound from driver */
@@ -1316,7 +1351,7 @@ typedef struct {
uint8_t port_id[4];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
- uint16_t execution_throttle;
+ __le16 execution_throttle;
uint16_t execution_count;
uint8_t reset_count;
uint8_t reserved_2;
@@ -1402,9 +1437,9 @@ typedef struct {
*/
uint8_t firmware_options[2];
- uint16_t frame_payload_size;
- uint16_t max_iocb_allocation;
- uint16_t execution_throttle;
+ __le16 frame_payload_size;
+ __le16 max_iocb_allocation;
+ __le16 execution_throttle;
uint8_t retry_count;
uint8_t retry_delay; /* unused */
uint8_t port_name[WWN_SIZE]; /* Big endian. */
@@ -1413,17 +1448,17 @@ typedef struct {
uint8_t login_timeout;
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t request_q_outpointer;
- uint16_t response_q_inpointer;
- uint16_t request_q_length;
- uint16_t response_q_length;
- __le64 request_q_address __packed;
- __le64 response_q_address __packed;
+ __le16 request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_length;
+ __le16 response_q_length;
+ __le64 request_q_address __packed;
+ __le64 response_q_address __packed;
- uint16_t lun_enables;
+ __le16 lun_enables;
uint8_t command_resource_count;
uint8_t immediate_notify_resource_count;
- uint16_t timeout;
+ __le16 timeout;
uint8_t reserved_2[2];
/*
@@ -1571,8 +1606,8 @@ typedef struct {
uint8_t firmware_options[2];
uint16_t frame_payload_size;
- uint16_t max_iocb_allocation;
- uint16_t execution_throttle;
+ __le16 max_iocb_allocation;
+ __le16 execution_throttle;
uint8_t retry_count;
uint8_t retry_delay; /* unused */
uint8_t port_name[WWN_SIZE]; /* Big endian. */
@@ -1696,7 +1731,7 @@ typedef struct {
uint8_t reset_delay;
uint8_t port_down_retry_count;
uint8_t boot_id_number;
- uint16_t max_luns_per_target;
+ __le16 max_luns_per_target;
uint8_t fcode_boot_port_name[WWN_SIZE];
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
@@ -1802,7 +1837,7 @@ struct atio {
};
typedef union {
- uint16_t extended;
+ __le16 extended;
struct {
uint8_t reserved;
uint8_t standard;
@@ -1828,18 +1863,18 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
target_id_t target; /* SCSI ID */
- uint16_t lun; /* SCSI LUN */
- uint16_t control_flags; /* Control flags. */
+ __le16 lun; /* SCSI LUN */
+ __le16 control_flags; /* Control flags. */
#define CF_WRITE BIT_6
#define CF_READ BIT_5
#define CF_SIMPLE_TAG BIT_3
#define CF_ORDERED_TAG BIT_2
#define CF_HEAD_TAG BIT_1
uint16_t reserved_1;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
union {
struct dsd32 dsd32[3];
struct dsd64 dsd64[2];
@@ -1857,11 +1892,11 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
target_id_t target; /* SCSI ID */
- uint16_t lun; /* SCSI LUN */
- uint16_t control_flags; /* Control flags. */
+ __le16 lun; /* SCSI LUN */
+ __le16 control_flags; /* Control flags. */
uint16_t reserved_1;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
uint32_t byte_count; /* Total byte count. */
struct dsd64 dsd[2];
@@ -1923,7 +1958,7 @@ struct crc_context {
__le16 guard_seed; /* Initial Guard Seed */
__le16 prot_opts; /* Requested Data Protection Mode */
__le16 blk_size; /* Data size in bytes */
- uint16_t runt_blk_guard; /* Guard value for runt block (tape
+ __le16 runt_blk_guard; /* Guard value for runt block (tape
* only) */
__le32 byte_count; /* Total byte count/ total data
* transfer count */
@@ -1976,13 +2011,13 @@ typedef struct {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t scsi_status; /* SCSI status. */
- uint16_t comp_status; /* Completion status. */
- uint16_t state_flags; /* State flags. */
- uint16_t status_flags; /* Status flags. */
- uint16_t rsp_info_len; /* Response Info Length. */
- uint16_t req_sense_length; /* Request sense data length. */
- uint32_t residual_length; /* Residual transfer length. */
+ __le16 scsi_status; /* SCSI status. */
+ __le16 comp_status; /* Completion status. */
+ __le16 state_flags; /* State flags. */
+ __le16 status_flags; /* Status flags. */
+ __le16 rsp_info_len; /* Response Info Length. */
+ __le16 req_sense_length; /* Request sense data length. */
+ __le32 residual_length; /* Residual transfer length. */
uint8_t rsp_info[8]; /* FCP response information. */
uint8_t req_sense_data[32]; /* Request sense data. */
} sts_entry_t;
@@ -2114,8 +2149,8 @@ typedef struct {
/* clear port changed, */
/* use sequence number. */
uint8_t reserved_1;
- uint16_t sequence_number; /* Sequence number of event */
- uint16_t lun; /* SCSI LUN */
+ __le16 sequence_number; /* Sequence number of event */
+ __le16 lun; /* SCSI LUN */
uint8_t reserved_2[48];
} mrk_entry_t;
@@ -2130,19 +2165,19 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle1; /* System handle. */
target_id_t loop_id;
- uint16_t status;
- uint16_t control_flags; /* Control flags. */
+ __le16 status;
+ __le16 control_flags; /* Control flags. */
uint16_t reserved2;
- uint16_t timeout;
- uint16_t cmd_dsd_count;
- uint16_t total_dsd_count;
+ __le16 timeout;
+ __le16 cmd_dsd_count;
+ __le16 total_dsd_count;
uint8_t type;
uint8_t r_ctl;
- uint16_t rx_id;
+ __le16 rx_id;
uint16_t reserved3;
uint32_t handle2;
- uint32_t rsp_bytecount;
- uint32_t req_bytecount;
+ __le32 rsp_bytecount;
+ __le32 req_bytecount;
struct dsd64 req_dsd;
struct dsd64 rsp_dsd;
} ms_iocb_entry_t;
@@ -2170,20 +2205,20 @@ struct mbx_entry {
uint32_t handle;
target_id_t loop_id;
- uint16_t status;
- uint16_t state_flags;
- uint16_t status_flags;
+ __le16 status;
+ __le16 state_flags;
+ __le16 status_flags;
uint32_t sys_define2[2];
- uint16_t mb0;
- uint16_t mb1;
- uint16_t mb2;
- uint16_t mb3;
- uint16_t mb6;
- uint16_t mb7;
- uint16_t mb9;
- uint16_t mb10;
+ __le16 mb0;
+ __le16 mb1;
+ __le16 mb2;
+ __le16 mb3;
+ __le16 mb6;
+ __le16 mb7;
+ __le16 mb9;
+ __le16 mb10;
uint32_t reserved_2[2];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
@@ -2205,52 +2240,52 @@ struct imm_ntfy_from_isp {
uint8_t entry_status; /* Entry Status. */
union {
struct {
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
- uint16_t lun;
+ __le16 lun;
uint8_t target_id;
uint8_t reserved_1;
- uint16_t status_modifier;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
+ __le16 status_modifier;
+ __le16 status;
+ __le16 task_flags;
+ __le16 seq_id;
+ __le16 srr_rx_id;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
#define SRR_IU_DATA_IN 0x1
#define SRR_IU_DATA_OUT 0x5
#define SRR_IU_STATUS 0x7
- uint16_t srr_ox_id;
+ __le16 srr_ox_id;
uint8_t reserved_2[28];
} isp2x;
struct {
uint32_t reserved;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t reserved_2;
- uint16_t flags;
+ __le16 flags;
#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
- uint16_t srr_rx_id;
- uint16_t status;
+ __le16 srr_rx_id;
+ __le16 status;
uint8_t status_subcode;
uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_ox_id;
+ __le32 exchange_address;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_ox_id;
union {
struct {
uint8_t node_name[8];
} plogi; /* PLOGI/ADISC/PDISC */
struct {
/* PRLI word 3 bit 0-15 */
- uint16_t wd3_lo;
+ __le16 wd3_lo;
uint8_t resv0[6];
} prli;
struct {
uint8_t port_id[3];
uint8_t resv1;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t resv2;
} req_els;
} u;
@@ -2263,7 +2298,7 @@ struct imm_ntfy_from_isp {
} isp24;
} u;
uint16_t reserved_7;
- uint16_t ox_id;
+ __le16 ox_id;
} __packed;
#endif
@@ -2653,8 +2688,8 @@ static const char * const port_dstate_str[] = {
#define FDMI_HBA_VENDOR_IDENTIFIER 0xe0
struct ct_fdmi_hba_attr {
- uint16_t type;
- uint16_t len;
+ __be16 type;
+ __be16 len;
union {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[64];
@@ -2666,11 +2701,11 @@ struct ct_fdmi_hba_attr {
uint8_t orom_version[16];
uint8_t fw_version[32];
uint8_t os_version[128];
- uint32_t max_ct_len;
+ __be32 max_ct_len;
uint8_t sym_name[256];
- uint32_t vendor_specific_info;
- uint32_t num_ports;
+ __be32 vendor_specific_info;
+ __be32 num_ports;
uint8_t fabric_name[WWN_SIZE];
uint8_t bios_name[32];
uint8_t vendor_identifier[8];
@@ -2678,12 +2713,12 @@ struct ct_fdmi_hba_attr {
};
struct ct_fdmi1_hba_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_hba_attr entry[FDMI1_HBA_ATTR_COUNT];
};
struct ct_fdmi2_hba_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_hba_attr entry[FDMI2_HBA_ATTR_COUNT];
};
@@ -2735,44 +2770,44 @@ struct ct_fdmi2_hba_attributes {
#define FC_CLASS_2_3 0x0C
struct ct_fdmi_port_attr {
- uint16_t type;
- uint16_t len;
+ __be16 type;
+ __be16 len;
union {
uint8_t fc4_types[32];
- uint32_t sup_speed;
- uint32_t cur_speed;
- uint32_t max_frame_size;
+ __be32 sup_speed;
+ __be32 cur_speed;
+ __be32 max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[256];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t port_sym_name[128];
- uint32_t port_type;
- uint32_t port_supported_cos;
+ __be32 port_type;
+ __be32 port_supported_cos;
uint8_t fabric_name[WWN_SIZE];
uint8_t port_fc4_type[32];
- uint32_t port_state;
- uint32_t num_ports;
- uint32_t port_id;
+ __be32 port_state;
+ __be32 num_ports;
+ __be32 port_id;
uint8_t smartsan_service[24];
uint8_t smartsan_guid[16];
uint8_t smartsan_version[24];
uint8_t smartsan_prod_name[16];
- uint32_t smartsan_port_info;
- uint32_t smartsan_qos_support;
- uint32_t smartsan_security_support;
+ __be32 smartsan_port_info;
+ __be32 smartsan_qos_support;
+ __be32 smartsan_security_support;
} a;
};
struct ct_fdmi1_port_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_port_attr entry[FDMI1_PORT_ATTR_COUNT];
};
struct ct_fdmi2_port_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_port_attr entry[FDMI2_PORT_ATTR_COUNT];
};
@@ -2826,8 +2861,8 @@ struct ct_cmd_hdr {
/* CT command request */
struct ct_sns_req {
struct ct_cmd_hdr header;
- uint16_t command;
- uint16_t max_rsp_size;
+ __be16 command;
+ __be16 max_rsp_size;
uint8_t fragment_id;
uint8_t reserved[3];
@@ -2884,7 +2919,7 @@ struct ct_sns_req {
struct {
uint8_t hba_identifier[8];
- uint32_t entry_count;
+ __be32 entry_count;
uint8_t port_name[8];
struct ct_fdmi2_hba_attributes attrs;
} rhba;
@@ -2939,7 +2974,7 @@ struct ct_sns_req {
/* CT command response header */
struct ct_rsp_hdr {
struct ct_cmd_hdr header;
- uint16_t response;
+ __be16 response;
uint16_t residual;
uint8_t fragment_id;
uint8_t reason_code;
@@ -3025,8 +3060,8 @@ struct ct_sns_rsp {
} gfpn_id;
struct {
- uint16_t speeds;
- uint16_t speed;
+ __be16 speeds;
+ __be16 speed;
} gpsc;
#define GFF_FCP_SCSI_OFFSET 7
@@ -3116,13 +3151,13 @@ struct fab_scan {
struct sns_cmd_pkt {
union {
struct {
- uint16_t buffer_length;
- uint16_t reserved_1;
- __le64 buffer_address __packed;
- uint16_t subcommand_length;
- uint16_t reserved_2;
- uint16_t subcommand;
- uint16_t size;
+ __le16 buffer_length;
+ __le16 reserved_1;
+ __le64 buffer_address __packed;
+ __le16 subcommand_length;
+ __le16 reserved_2;
+ __le16 subcommand;
+ __le16 size;
uint32_t reserved_3;
uint8_t param[36];
} cmd;
@@ -3148,7 +3183,7 @@ struct gid_list_info {
uint8_t area;
uint8_t domain;
uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */
- uint16_t loop_id; /* ISP23XX -- 6 bytes. */
+ __le16 loop_id; /* ISP23XX -- 6 bytes. */
uint16_t reserved_1; /* ISP24XX -- 8 bytes. */
};
@@ -3222,7 +3257,8 @@ struct isp_operations {
int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t,
uint32_t);
- void (*fw_dump) (struct scsi_qla_host *, int);
+ void (*fw_dump)(struct scsi_qla_host *vha);
+ void (*mpi_fw_dump)(struct scsi_qla_host *, int);
int (*beacon_on) (struct scsi_qla_host *);
int (*beacon_off) (struct scsi_qla_host *);
@@ -3456,8 +3492,8 @@ struct rsp_que {
dma_addr_t dma;
response_t *ring;
response_t *ring_ptr;
- uint32_t __iomem *rsp_q_in; /* FWI2-capable only. */
- uint32_t __iomem *rsp_q_out;
+ __le32 __iomem *rsp_q_in; /* FWI2-capable only. */
+ __le32 __iomem *rsp_q_out;
uint16_t ring_index;
uint16_t out_ptr;
uint16_t *in_ptr; /* queue shadow in index */
@@ -3483,8 +3519,8 @@ struct req_que {
dma_addr_t dma;
request_t *ring;
request_t *ring_ptr;
- uint32_t __iomem *req_q_in; /* FWI2-capable only. */
- uint32_t __iomem *req_q_out;
+ __le32 __iomem *req_q_in; /* FWI2-capable only. */
+ __le32 __iomem *req_q_out;
uint16_t ring_index;
uint16_t in_ptr;
uint16_t *out_ptr; /* queue shadow out index */
@@ -3552,7 +3588,7 @@ struct qla_qpair {
struct list_head hints_list;
uint16_t cpuid;
uint16_t retry_term_cnt;
- uint32_t retry_term_exchg_addr;
+ __le32 retry_term_exchg_addr;
uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
};
@@ -3579,98 +3615,98 @@ struct rdp_req_payload {
struct rdp_rsp_payload {
struct {
- uint32_t cmd;
- uint32_t len;
+ __be32 cmd;
+ __be32 len;
} hdr;
/* LS Request Info descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t req_payload_word_0;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 req_payload_word_0;
} ls_req_info_desc;
/* LS Request Info descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t req_payload_word_0;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 req_payload_word_0;
} ls_req_info_desc2;
/* SFP diagnostic param descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint16_t temperature;
- uint16_t vcc;
- uint16_t tx_bias;
- uint16_t tx_power;
- uint16_t rx_power;
- uint16_t sfp_flags;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 temperature;
+ __be16 vcc;
+ __be16 tx_bias;
+ __be16 tx_power;
+ __be16 rx_power;
+ __be16 sfp_flags;
} sfp_diag_desc;
/* Port Speed Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint16_t speed_capab;
- uint16_t operating_speed;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 speed_capab;
+ __be16 operating_speed;
} port_speed_desc;
/* Link Error Status Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t link_fail_cnt;
- uint32_t loss_sync_cnt;
- uint32_t loss_sig_cnt;
- uint32_t prim_seq_err_cnt;
- uint32_t inval_xmit_word_cnt;
- uint32_t inval_crc_cnt;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 link_fail_cnt;
+ __be32 loss_sync_cnt;
+ __be32 loss_sig_cnt;
+ __be32 prim_seq_err_cnt;
+ __be32 inval_xmit_word_cnt;
+ __be32 inval_crc_cnt;
uint8_t pn_port_phy_type;
uint8_t reserved[3];
} ls_err_desc;
/* Port name description with diag param */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
+ __be32 desc_tag;
+ __be32 desc_len;
uint8_t WWNN[WWN_SIZE];
uint8_t WWPN[WWN_SIZE];
} port_name_diag_desc;
/* Port Name desc for Direct attached Fx_Port or Nx_Port */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
+ __be32 desc_tag;
+ __be32 desc_len;
uint8_t WWNN[WWN_SIZE];
uint8_t WWPN[WWN_SIZE];
} port_name_direct_desc;
/* Buffer Credit descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t fcport_b2b;
- uint32_t attached_fcport_b2b;
- uint32_t fcport_rtt;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 fcport_b2b;
+ __be32 attached_fcport_b2b;
+ __be32 fcport_rtt;
} buffer_credit_desc;
/* Optical Element Data Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint16_t high_alarm;
- uint16_t low_alarm;
- uint16_t high_warn;
- uint16_t low_warn;
- uint32_t element_flags;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 high_alarm;
+ __be16 low_alarm;
+ __be16 high_warn;
+ __be16 low_warn;
+ __be32 element_flags;
} optical_elmt_desc[5];
/* Optical Product Data Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
+ __be32 desc_tag;
+ __be32 desc_len;
uint8_t vendor_name[16];
uint8_t part_number[16];
uint8_t serial_number[16];
@@ -3708,17 +3744,17 @@ struct qlt_hw_data {
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
- uint32_t __iomem *atio_q_in;
- uint32_t __iomem *atio_q_out;
+ __le32 __iomem *atio_q_in;
+ __le32 __iomem *atio_q_out;
struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt_vp_map *tgt_vp_map;
int saved_set;
- uint16_t saved_exchange_count;
- uint32_t saved_firmware_options_1;
- uint32_t saved_firmware_options_2;
- uint32_t saved_firmware_options_3;
+ __le16 saved_exchange_count;
+ __le32 saved_firmware_options_1;
+ __le32 saved_firmware_options_2;
+ __le32 saved_firmware_options_3;
uint8_t saved_firmware_options[2];
uint8_t saved_add_firmware_options[2];
@@ -3748,6 +3784,11 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+struct qla_hw_data_stat {
+ u32 num_fw_dump;
+ u32 num_mpi_reset;
+};
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -4212,7 +4253,7 @@ struct qla_hw_data {
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
- uint16_t fw_seriallink_options24[4];
+ __le16 fw_seriallink_options24[4];
uint8_t serdes_version[3];
uint8_t mpi_version[3];
@@ -4230,7 +4271,6 @@ struct qla_hw_data {
uint32_t fw_dump_len;
u32 fw_dump_alloc_len;
bool fw_dumped;
- bool fw_dump_mpi;
unsigned long fw_dump_cap_flags;
#define RISC_PAUSE_CMPL 0
#define DMA_SHUTDOWN_CMPL 1
@@ -4241,6 +4281,10 @@ struct qla_hw_data {
#define ISP_MBX_RDY 6
#define ISP_SOFT_RESET_CMPL 7
int fw_dump_reading;
+ void *mpi_fw_dump;
+ u32 mpi_fw_dump_len;
+ unsigned int mpi_fw_dump_reading:1;
+ unsigned int mpi_fw_dumped:1;
int prev_minidump_failed;
dma_addr_t eft_dma;
void *eft;
@@ -4392,7 +4436,7 @@ struct qla_hw_data {
#define NUM_DSD_CHAIN 4096
uint8_t fw_type;
- __le32 file_prd_off; /* File firmware product offset */
+ uint32_t file_prd_off; /* File firmware product offset */
uint32_t md_template_size;
void *md_tmplt_hdr;
@@ -4454,6 +4498,8 @@ struct qla_hw_data {
uint16_t last_zio_threshold;
#define DEFAULT_ZIO_THRESHOLD 5
+
+ struct qla_hw_data_stat stat;
};
struct active_regions {
@@ -4698,13 +4744,13 @@ typedef struct scsi_qla_host {
struct qla27xx_image_status {
uint8_t image_status_mask;
- uint16_t generation;
+ __le16 generation;
uint8_t ver_major;
uint8_t ver_minor;
uint8_t bitmap; /* 28xx only */
uint8_t reserved[2];
- uint32_t checksum;
- uint32_t signature;
+ __le32 checksum;
+ __le32 signature;
} __packed;
/* 28xx aux image status bimap values */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index f9bad5bd7198..d1e12a29c3f7 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -134,28 +134,28 @@ struct vp_database_24xx {
struct nvram_24xx {
/* NVRAM header. */
uint8_t id[4];
- uint16_t nvram_version;
+ __le16 nvram_version;
uint16_t reserved_0;
/* Firmware Initialization Control Block. */
- uint16_t version;
+ __le16 version;
uint16_t reserved_1;
- __le16 frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
- uint16_t hard_address;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
+ __le16 hard_address;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint16_t login_retry_count;
- uint16_t link_down_on_nos;
- uint16_t interrupt_delay_timer;
- uint16_t login_timeout;
+ __le16 login_retry_count;
+ __le16 link_down_on_nos;
+ __le16 interrupt_delay_timer;
+ __le16 login_timeout;
- uint32_t firmware_options_1;
- uint32_t firmware_options_2;
- uint32_t firmware_options_3;
+ __le32 firmware_options_1;
+ __le32 firmware_options_2;
+ __le32 firmware_options_3;
/* Offset 56. */
@@ -178,7 +178,7 @@ struct nvram_24xx {
* BIT 11-13 = Output Emphasis 4G
* BIT 14-15 = Reserved
*/
- uint16_t seriallink_options[4];
+ __le16 seriallink_options[4];
uint16_t reserved_2[16];
@@ -218,25 +218,25 @@ struct nvram_24xx {
*
* BIT 16-31 =
*/
- uint32_t host_p;
+ __le32 host_p;
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
uint8_t boot_port_name[WWN_SIZE];
- uint16_t boot_lun_number;
+ __le16 boot_lun_number;
uint16_t reserved_8;
uint8_t alt1_boot_port_name[WWN_SIZE];
- uint16_t alt1_boot_lun_number;
+ __le16 alt1_boot_lun_number;
uint16_t reserved_9;
uint8_t alt2_boot_port_name[WWN_SIZE];
- uint16_t alt2_boot_lun_number;
+ __le16 alt2_boot_lun_number;
uint16_t reserved_10;
uint8_t alt3_boot_port_name[WWN_SIZE];
- uint16_t alt3_boot_lun_number;
+ __le16 alt3_boot_lun_number;
uint16_t reserved_11;
/*
@@ -249,23 +249,23 @@ struct nvram_24xx {
* BIT 6 = Reserved
* BIT 7-31 =
*/
- uint32_t efi_parameters;
+ __le32 efi_parameters;
uint8_t reset_delay;
uint8_t reserved_12;
uint16_t reserved_13;
- uint16_t boot_id_number;
+ __le16 boot_id_number;
uint16_t reserved_14;
- uint16_t max_luns_per_target;
+ __le16 max_luns_per_target;
uint16_t reserved_15;
- uint16_t port_down_retry_count;
- uint16_t link_down_timeout;
+ __le16 port_down_retry_count;
+ __le16 link_down_timeout;
/* FCode parameters. */
- uint16_t fcode_parameter;
+ __le16 fcode_parameter;
uint16_t reserved_16[3];
@@ -275,13 +275,13 @@ struct nvram_24xx {
uint8_t prev_drv_ver_minor;
uint8_t prev_drv_ver_subminor;
- uint16_t prev_bios_ver_major;
- uint16_t prev_bios_ver_minor;
+ __le16 prev_bios_ver_major;
+ __le16 prev_bios_ver_minor;
- uint16_t prev_efi_ver_major;
- uint16_t prev_efi_ver_minor;
+ __le16 prev_efi_ver_major;
+ __le16 prev_efi_ver_minor;
- uint16_t prev_fw_ver_major;
+ __le16 prev_fw_ver_major;
uint8_t prev_fw_ver_minor;
uint8_t prev_fw_ver_subminor;
@@ -309,7 +309,7 @@ struct nvram_24xx {
uint16_t subsystem_vendor_id;
uint16_t subsystem_device_id;
- uint32_t checksum;
+ __le32 checksum;
};
/*
@@ -318,46 +318,46 @@ struct nvram_24xx {
*/
#define ICB_VERSION 1
struct init_cb_24xx {
- uint16_t version;
+ __le16 version;
uint16_t reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
- uint16_t hard_address;
+ __le16 hard_address;
uint8_t port_name[WWN_SIZE]; /* Big endian. */
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t response_q_inpointer;
- uint16_t request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_outpointer;
- uint16_t login_retry_count;
+ __le16 login_retry_count;
- uint16_t prio_request_q_outpointer;
+ __le16 prio_request_q_outpointer;
- uint16_t response_q_length;
- uint16_t request_q_length;
+ __le16 response_q_length;
+ __le16 request_q_length;
- uint16_t link_down_on_nos; /* Milliseconds. */
+ __le16 link_down_on_nos; /* Milliseconds. */
- uint16_t prio_request_q_length;
+ __le16 prio_request_q_length;
__le64 request_q_address __packed;
__le64 response_q_address __packed;
__le64 prio_request_q_address __packed;
- uint16_t msix;
- uint16_t msix_atio;
+ __le16 msix;
+ __le16 msix_atio;
uint8_t reserved_2[4];
- uint16_t atio_q_inpointer;
- uint16_t atio_q_length;
- __le64 atio_q_address __packed;
+ __le16 atio_q_inpointer;
+ __le16 atio_q_length;
+ __le64 atio_q_address __packed;
- uint16_t interrupt_delay_timer; /* 100us increments. */
- uint16_t login_timeout;
+ __le16 interrupt_delay_timer; /* 100us increments. */
+ __le16 login_timeout;
/*
* BIT 0 = Enable Hard Loop Id
@@ -378,7 +378,7 @@ struct init_cb_24xx {
* BIT 14 = Node Name Option
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_1;
+ __le32 firmware_options_1;
/*
* BIT 0 = Operation Mode bit 0
@@ -399,7 +399,7 @@ struct init_cb_24xx {
* BIT 14 = Enable Target PRLI Control
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_2;
+ __le32 firmware_options_2;
/*
* BIT 0 = Reserved
@@ -425,9 +425,9 @@ struct init_cb_24xx {
* BIT 30 = Enable request queue 0 out index shadowing
* BIT 31 = Reserved
*/
- uint32_t firmware_options_3;
- uint16_t qos;
- uint16_t rid;
+ __le32 firmware_options_3;
+ __le16 qos;
+ __le16 rid;
uint8_t reserved_3[20];
};
@@ -443,27 +443,27 @@ struct cmd_bidir {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT hanlde. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Commnad timeout. */
+ __le16 timeout; /* Command timeout. */
- uint16_t wr_dseg_count; /* Write Data segment count. */
- uint16_t rd_dseg_count; /* Read Data segment count. */
+ __le16 wr_dseg_count; /* Write Data segment count. */
+ __le16 rd_dseg_count; /* Read Data segment count. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define BD_WRAP_BACK BIT_3
#define BD_READ_DATA BIT_1
#define BD_WRITE_DATA BIT_0
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
__le64 fcp_cmnd_dseg_address __packed;/* Data segment address. */
uint16_t reserved[2]; /* Reserved */
- uint32_t rd_byte_count; /* Total Byte count Read. */
- uint32_t wr_byte_count; /* Total Byte count write. */
+ __le32 rd_byte_count; /* Total Byte count Read. */
+ __le32 wr_byte_count; /* Total Byte count write. */
uint8_t port_id[3]; /* PortID of destination port.*/
uint8_t vp_index;
@@ -480,28 +480,28 @@ struct cmd_type_6 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
- uint16_t fcp_rsp_dsd_len; /* FCP_RSP DSD length. */
+ __le16 fcp_rsp_dsd_len; /* FCP_RSP DSD length. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
/* Data segment address. */
__le64 fcp_cmnd_dseg_address __packed;
/* Data segment address. */
__le64 fcp_rsp_dseg_address __packed;
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -518,16 +518,16 @@ struct cmd_type_7 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
#define FW_MAX_TIMEOUT 0x1999
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
uint16_t reserved_1;
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t task_mgmt_flags; /* Task management flags. */
+ __le16 task_mgmt_flags; /* Task management flags. */
#define TMF_CLEAR_ACA BIT_14
#define TMF_TARGET_RESET BIT_13
#define TMF_LUN_RESET BIT_12
@@ -547,7 +547,7 @@ struct cmd_type_7 {
uint8_t crn;
uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -565,29 +565,29 @@ struct cmd_type_crc_2 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
- uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
+ __le16 fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
__le64 fcp_cmnd_dseg_address __packed;
/* Data segment address. */
__le64 fcp_rsp_dseg_address __packed;
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
__le64 crc_context_address __packed; /* Data segment address. */
- uint16_t crc_context_len; /* Data segment length. */
+ __le16 crc_context_len; /* Data segment length. */
uint16_t reserved_1; /* MUST be set to 0. */
};
@@ -604,32 +604,32 @@ struct sts_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
- uint16_t ox_id; /* OX_ID used by the firmware. */
+ __le16 comp_status; /* Completion status. */
+ __le16 ox_id; /* OX_ID used by the firmware. */
- uint32_t residual_len; /* FW calc residual transfer length. */
+ __le32 residual_len; /* FW calc residual transfer length. */
union {
uint16_t reserved_1;
- uint16_t nvme_rsp_pyld_len;
+ __le16 nvme_rsp_pyld_len;
};
- uint16_t state_flags; /* State flags. */
+ __le16 state_flags; /* State flags. */
#define SF_TRANSFERRED_DATA BIT_11
#define SF_NVME_ERSP BIT_6
#define SF_FCP_RSP_DMA BIT_0
- uint16_t retry_delay;
- uint16_t scsi_status; /* SCSI status. */
+ __le16 retry_delay;
+ __le16 scsi_status; /* SCSI status. */
#define SS_CONFIRMATION_REQ BIT_12
- uint32_t rsp_residual_count; /* FCP RSP residual count. */
+ __le32 rsp_residual_count; /* FCP RSP residual count. */
- uint32_t sense_len; /* FCP SENSE length. */
+ __le32 sense_len; /* FCP SENSE length. */
union {
struct {
- uint32_t rsp_data_len; /* FCP response data length */
+ __le32 rsp_data_len; /* FCP response data length */
uint8_t data[28]; /* FCP rsp/sense information */
};
struct nvme_fc_ersp_iu nvme_ersp;
@@ -672,7 +672,7 @@ struct mrk_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
uint8_t modifier; /* Modifier (7-0). */
#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */
@@ -701,24 +701,24 @@ struct ct_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t cmd_dsd_count;
+ __le16 cmd_dsd_count;
uint8_t vp_index;
uint8_t reserved_1;
- uint16_t timeout; /* Command timeout. */
+ __le16 timeout; /* Command timeout. */
uint16_t reserved_2;
- uint16_t rsp_dsd_count;
+ __le16 rsp_dsd_count;
uint8_t reserved_3[10];
- uint32_t rsp_byte_count;
- uint32_t cmd_byte_count;
+ __le32 rsp_byte_count;
+ __le32 cmd_byte_count;
struct dsd64 dsd[2];
};
@@ -733,17 +733,17 @@ struct purex_entry_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint16_t reserved1;
+ __le16 reserved1;
uint8_t vp_idx;
uint8_t reserved2;
- uint16_t status_flags;
- uint16_t nport_handle;
+ __le16 status_flags;
+ __le16 nport_handle;
- uint16_t frame_size;
- uint16_t trunc_frame_size;
+ __le16 frame_size;
+ __le16 trunc_frame_size;
- uint32_t rx_xchg_addr;
+ __le32 rx_xchg_addr;
uint8_t d_id[3];
uint8_t r_ctl;
@@ -754,13 +754,13 @@ struct purex_entry_24xx {
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t param;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 param;
uint8_t els_frame_payload[20];
};
@@ -777,18 +777,18 @@ struct els_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* response only */
- uint16_t nport_handle;
+ __le16 comp_status; /* response only */
+ __le16 nport_handle;
- uint16_t tx_dsd_count;
+ __le16 tx_dsd_count;
uint8_t vp_index;
uint8_t sof_type;
#define EST_SOFI3 (1 << 4)
#define EST_SOFI2 (3 << 4)
- uint32_t rx_xchg_address; /* Receive exchange address. */
- uint16_t rx_dsd_count;
+ __le32 rx_xchg_address; /* Receive exchange address. */
+ __le16 rx_dsd_count;
uint8_t opcode;
uint8_t reserved_2;
@@ -796,7 +796,7 @@ struct els_entry_24xx {
uint8_t d_id[3];
uint8_t s_id[3];
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13)
#define EPD_ELS_COMMAND (0 << 13)
#define EPD_ELS_ACC (1 << 13)
@@ -817,10 +817,10 @@ struct els_entry_24xx {
__le32 rx_len; /* DSD 1 length. */
};
struct {
- uint32_t total_byte_count;
- uint32_t error_subcode_1;
- uint32_t error_subcode_2;
- uint32_t error_subcode_3;
+ __le32 total_byte_count;
+ __le32 error_subcode_1;
+ __le32 error_subcode_2;
+ __le32 error_subcode_3;
};
};
};
@@ -831,19 +831,19 @@ struct els_sts_entry_24xx {
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t handle; /* System handle. */
+ __le32 handle; /* System handle. */
- uint16_t comp_status;
+ __le16 comp_status;
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t reserved_1;
+ __le16 reserved_1;
uint8_t vp_index;
uint8_t sof_type;
- uint32_t rx_xchg_address; /* Receive exchange address. */
- uint16_t reserved_2;
+ __le32 rx_xchg_address; /* Receive exchange address. */
+ __le16 reserved_2;
uint8_t opcode;
uint8_t reserved_3;
@@ -851,13 +851,13 @@ struct els_sts_entry_24xx {
uint8_t d_id[3];
uint8_t s_id[3];
- uint16_t control_flags; /* Control flags. */
- uint32_t total_byte_count;
- uint32_t error_subcode_1;
- uint32_t error_subcode_2;
- uint32_t error_subcode_3;
+ __le16 control_flags; /* Control flags. */
+ __le32 total_byte_count;
+ __le32 error_subcode_1;
+ __le32 error_subcode_2;
+ __le32 error_subcode_3;
- uint32_t reserved_4[4];
+ __le32 reserved_4[4];
};
/*
* ISP queue - Mailbox Command entry structure definition.
@@ -884,12 +884,12 @@ struct logio_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_LOGIO_ERROR 0x31 /* Login/Logout IOCB error. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
/* Modifiers. */
#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */
#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
@@ -918,7 +918,7 @@ struct logio_entry_24xx {
uint8_t rsp_size; /* Response size in 32bit words. */
- uint32_t io_parameter[11]; /* General I/O parameters. */
+ __le32 io_parameter[11]; /* General I/O parameters. */
#define LSC_SCODE_NOLINK 0x01
#define LSC_SCODE_NOIOCB 0x02
#define LSC_SCODE_NOXCB 0x03
@@ -946,17 +946,17 @@ struct tsk_mgmt_entry {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
uint16_t reserved_1;
- uint16_t delay; /* Activity delay in seconds. */
+ __le16 delay; /* Activity delay in seconds. */
- uint16_t timeout; /* Command timeout. */
+ __le16 timeout; /* Command timeout. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint32_t control_flags; /* Control Flags. */
+ __le32 control_flags; /* Control Flags. */
#define TCF_NOTMCMD_TO_TARGET BIT_31
#define TCF_LUN_RESET BIT_4
#define TCF_ABORT_TASK_SET BIT_3
@@ -981,15 +981,15 @@ struct abort_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
/* or Completion status. */
- uint16_t options; /* Options. */
+ __le16 options; /* Options. */
#define AOF_NO_ABTS BIT_0 /* Do not send any ABTS. */
uint32_t handle_to_abort; /* System handle to abort. */
- uint16_t req_que_no;
+ __le16 req_que_no;
uint8_t reserved_1[30];
uint8_t port_id[3]; /* PortID of destination port. */
@@ -1006,16 +1006,16 @@ struct abts_entry_24xx {
uint8_t handle_count;
uint8_t entry_status;
- uint32_t handle; /* type 0x55 only */
+ __le32 handle; /* type 0x55 only */
- uint16_t comp_status; /* type 0x55 only */
- uint16_t nport_handle; /* type 0x54 only */
+ __le16 comp_status; /* type 0x55 only */
+ __le16 nport_handle; /* type 0x54 only */
- uint16_t control_flags; /* type 0x55 only */
+ __le16 control_flags; /* type 0x55 only */
uint8_t vp_idx;
uint8_t sof_type; /* sof_type is upper nibble */
- uint32_t rx_xch_addr;
+ __le32 rx_xch_addr;
uint8_t d_id[3];
uint8_t r_ctl;
@@ -1026,30 +1026,30 @@ struct abts_entry_24xx {
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
+ __le16 rx_id;
+ __le16 ox_id;
- uint32_t param;
+ __le32 param;
union {
struct {
- uint32_t subcode3;
- uint32_t rsvd;
- uint32_t subcode1;
- uint32_t subcode2;
+ __le32 subcode3;
+ __le32 rsvd;
+ __le32 subcode1;
+ __le32 subcode2;
} error;
struct {
- uint16_t rsrvd1;
+ __le16 rsrvd1;
uint8_t last_seq_id;
uint8_t seq_id_valid;
- uint16_t aborted_rx_id;
- uint16_t aborted_ox_id;
- uint16_t high_seq_cnt;
- uint16_t low_seq_cnt;
+ __le16 aborted_rx_id;
+ __le16 aborted_ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
} ba_acc;
struct {
uint8_t vendor_unique;
@@ -1058,7 +1058,7 @@ struct abts_entry_24xx {
} ba_rjt;
} payload;
- uint32_t rx_xch_addr_to_abort;
+ __le32 rx_xch_addr_to_abort;
} __packed;
/* ABTS payload explanation values */
@@ -1087,7 +1087,7 @@ struct abts_entry_24xx {
* ISP I/O Register Set structure definitions.
*/
struct device_reg_24xx {
- uint32_t flash_addr; /* Flash/NVRAM BIOS address. */
+ __le32 flash_addr; /* Flash/NVRAM BIOS address. */
#define FARX_DATA_FLAG BIT_31
#define FARX_ACCESS_FLASH_CONF 0x7FFD0000
#define FARX_ACCESS_FLASH_DATA 0x7FF00000
@@ -1138,9 +1138,9 @@ struct device_reg_24xx {
#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
#define HW_EVENT_FLASH_FW_ERR 0xF024
- uint32_t flash_data; /* Flash/NVRAM BIOS data. */
+ __le32 flash_data; /* Flash/NVRAM BIOS data. */
- uint32_t ctrl_status; /* Control/Status. */
+ __le32 ctrl_status; /* Control/Status. */
#define CSRX_FLASH_ACCESS_ERROR BIT_18 /* Flash/NVRAM Access Error. */
#define CSRX_DMA_ACTIVE BIT_17 /* DMA Active status. */
#define CSRX_DMA_SHUTDOWN BIT_16 /* DMA Shutdown control status. */
@@ -1166,35 +1166,35 @@ struct device_reg_24xx {
#define CSRX_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable. */
#define CSRX_ISP_SOFT_RESET BIT_0 /* ISP soft reset. */
- uint32_t ictrl; /* Interrupt control. */
+ __le32 ictrl; /* Interrupt control. */
#define ICRX_EN_RISC_INT BIT_3 /* Enable RISC interrupts on PCI. */
- uint32_t istatus; /* Interrupt status. */
+ __le32 istatus; /* Interrupt status. */
#define ISRX_RISC_INT BIT_3 /* RISC interrupt. */
- uint32_t unused_1[2]; /* Gap. */
+ __le32 unused_1[2]; /* Gap. */
/* Request Queue. */
- uint32_t req_q_in; /* In-Pointer. */
- uint32_t req_q_out; /* Out-Pointer. */
+ __le32 req_q_in; /* In-Pointer. */
+ __le32 req_q_out; /* Out-Pointer. */
/* Response Queue. */
- uint32_t rsp_q_in; /* In-Pointer. */
- uint32_t rsp_q_out; /* Out-Pointer. */
+ __le32 rsp_q_in; /* In-Pointer. */
+ __le32 rsp_q_out; /* Out-Pointer. */
/* Priority Request Queue. */
- uint32_t preq_q_in; /* In-Pointer. */
- uint32_t preq_q_out; /* Out-Pointer. */
+ __le32 preq_q_in; /* In-Pointer. */
+ __le32 preq_q_out; /* Out-Pointer. */
- uint32_t unused_2[2]; /* Gap. */
+ __le32 unused_2[2]; /* Gap. */
/* ATIO Queue. */
- uint32_t atio_q_in; /* In-Pointer. */
- uint32_t atio_q_out; /* Out-Pointer. */
+ __le32 atio_q_in; /* In-Pointer. */
+ __le32 atio_q_out; /* Out-Pointer. */
- uint32_t host_status;
+ __le32 host_status;
#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
- uint32_t hccr; /* Host command & control register. */
+ __le32 hccr; /* Host command & control register. */
/* HCCR statuses. */
#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
@@ -1216,7 +1216,7 @@ struct device_reg_24xx {
/* Clear RISC to PCI interrupt. */
#define HCCRX_CLR_RISC_INT 0xA0000000
- uint32_t gpiod; /* GPIO Data register. */
+ __le32 gpiod; /* GPIO Data register. */
/* LED update mask. */
#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18)
@@ -1235,7 +1235,7 @@ struct device_reg_24xx {
/* Data in/out. */
#define GPDX_DATA_INOUT (BIT_1|BIT_0)
- uint32_t gpioe; /* GPIO Enable register. */
+ __le32 gpioe; /* GPIO Enable register. */
/* Enable update mask. */
#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16)
/* Enable update mask. */
@@ -1243,56 +1243,56 @@ struct device_reg_24xx {
/* Enable. */
#define GPEX_ENABLE (BIT_1|BIT_0)
- uint32_t iobase_addr; /* I/O Bus Base Address register. */
-
- uint32_t unused_3[10]; /* Gap. */
-
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23;
- uint16_t mailbox24;
- uint16_t mailbox25;
- uint16_t mailbox26;
- uint16_t mailbox27;
- uint16_t mailbox28;
- uint16_t mailbox29;
- uint16_t mailbox30;
- uint16_t mailbox31;
-
- uint32_t iobase_window;
- uint32_t iobase_c4;
- uint32_t iobase_c8;
- uint32_t unused_4_1[6]; /* Gap. */
- uint32_t iobase_q;
- uint32_t unused_5[2]; /* Gap. */
- uint32_t iobase_select;
- uint32_t unused_6[2]; /* Gap. */
- uint32_t iobase_sdata;
+ __le32 iobase_addr; /* I/O Bus Base Address register. */
+
+ __le32 unused_3[10]; /* Gap. */
+
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23;
+ __le16 mailbox24;
+ __le16 mailbox25;
+ __le16 mailbox26;
+ __le16 mailbox27;
+ __le16 mailbox28;
+ __le16 mailbox29;
+ __le16 mailbox30;
+ __le16 mailbox31;
+
+ __le32 iobase_window;
+ __le32 iobase_c4;
+ __le32 iobase_c8;
+ __le32 unused_4_1[6]; /* Gap. */
+ __le32 iobase_q;
+ __le32 unused_5[2]; /* Gap. */
+ __le32 iobase_select;
+ __le32 unused_6[2]; /* Gap. */
+ __le32 iobase_sdata;
};
/* RISC-RISC semaphore register PCI offet */
#define RISC_REGISTER_BASE_OFFSET 0x7010
-#define RISC_REGISTER_WINDOW_OFFET 0x6
+#define RISC_REGISTER_WINDOW_OFFSET 0x6
/* RISC-RISC semaphore/flag register (risc address 0x7016) */
@@ -1354,8 +1354,8 @@ struct mid_conf_entry_24xx {
struct mid_init_cb_24xx {
struct init_cb_24xx init_cb;
- uint16_t count;
- uint16_t options;
+ __le16 count;
+ __le16 options;
struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
};
@@ -1389,27 +1389,27 @@ struct vp_ctrl_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t vp_idx_failed;
+ __le16 vp_idx_failed;
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */
#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */
#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */
- uint16_t command;
+ __le16 command;
#define VCE_COMMAND_ENABLE_VPS 0x00 /* Enable VPs. */
#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */
#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */
#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */
#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */
- uint16_t vp_count;
+ __le16 vp_count;
uint8_t vp_idx_map[16];
- uint16_t flags;
- uint16_t id;
+ __le16 flags;
+ __le16 id;
uint16_t reserved_4;
- uint16_t hopct;
+ __le16 hopct;
uint8_t reserved_5[24];
};
@@ -1425,12 +1425,12 @@ struct vp_config_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t flags;
+ __le16 flags;
#define CS_VF_BIND_VPORTS_TO_VF BIT_0
#define CS_VF_SET_QOS_OF_VPORTS BIT_1
#define CS_VF_SET_HOPS_OF_VPORTS BIT_2
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */
#define CS_VCT_CNT_ERROR 0x02 /* Invalid VP count. */
#define CS_VCT_ERROR 0x03 /* Unknown error. */
@@ -1457,9 +1457,9 @@ struct vp_config_entry_24xx {
uint16_t reserved_vp2;
uint8_t port_name_idx2[WWN_SIZE];
uint8_t node_name_idx2[WWN_SIZE];
- uint16_t id;
+ __le16 id;
uint16_t reserved_4;
- uint16_t hopct;
+ __le16 hopct;
uint8_t reserved_5[2];
};
@@ -1486,7 +1486,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t resv1;
+ __le32 resv1;
uint8_t vp_acquired;
uint8_t vp_setup;
uint8_t vp_idx; /* Format 0=reserved */
@@ -1550,15 +1550,15 @@ struct vf_evfp_entry_24xx {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
- uint16_t timeout; /* timeout */
- uint16_t adim_tagging_mode;
+ __le16 comp_status; /* Completion status. */
+ __le16 timeout; /* timeout */
+ __le16 adim_tagging_mode;
- uint16_t vfport_id;
+ __le16 vfport_id;
uint32_t exch_addr;
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t control_flags;
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 control_flags;
uint32_t io_parameter_0;
uint32_t io_parameter_1;
__le64 tx_address __packed; /* Data segment 0 address. */
@@ -1573,13 +1573,13 @@ struct vf_evfp_entry_24xx {
struct qla_fdt_layout {
uint8_t sig[4];
- uint16_t version;
- uint16_t len;
- uint16_t checksum;
+ __le16 version;
+ __le16 len;
+ __le16 checksum;
uint8_t unused1[2];
uint8_t model[16];
- uint16_t man_id;
- uint16_t id;
+ __le16 man_id;
+ __le16 id;
uint8_t flags;
uint8_t erase_cmd;
uint8_t alt_erase_cmd;
@@ -1588,15 +1588,15 @@ struct qla_fdt_layout {
uint8_t wrt_sts_reg_cmd;
uint8_t unprotect_sec_cmd;
uint8_t read_man_id_cmd;
- uint32_t block_size;
- uint32_t alt_block_size;
- uint32_t flash_size;
- uint32_t wrt_enable_data;
+ __le32 block_size;
+ __le32 alt_block_size;
+ __le32 flash_size;
+ __le32 wrt_enable_data;
uint8_t read_id_addr_len;
uint8_t wrt_disable_bits;
uint8_t read_dev_id_len;
uint8_t chip_erase_cmd;
- uint16_t read_timeout;
+ __le16 read_timeout;
uint8_t protect_sec_cmd;
uint8_t unused2[65];
};
@@ -1605,11 +1605,11 @@ struct qla_fdt_layout {
struct qla_flt_location {
uint8_t sig[4];
- uint16_t start_lo;
- uint16_t start_hi;
+ __le16 start_lo;
+ __le16 start_hi;
uint8_t version;
uint8_t unused[5];
- uint16_t checksum;
+ __le16 checksum;
};
#define FLT_REG_FW 0x01
@@ -1664,19 +1664,19 @@ struct qla_flt_location {
#define FLT_REG_PEP_SEC_28XX 0xF1
struct qla_flt_region {
- uint16_t code;
+ __le16 code;
uint8_t attribute;
uint8_t reserved;
- uint32_t size;
- uint32_t start;
- uint32_t end;
+ __le32 size;
+ __le32 start;
+ __le32 end;
};
struct qla_flt_header {
- uint16_t version;
- uint16_t length;
- uint16_t checksum;
- uint16_t unused;
+ __le16 version;
+ __le16 length;
+ __le16 checksum;
+ __le16 unused;
struct qla_flt_region region[0];
};
@@ -1688,18 +1688,18 @@ struct qla_flt_header {
struct qla_npiv_header {
uint8_t sig[2];
- uint16_t version;
- uint16_t entries;
- uint16_t unused[4];
- uint16_t checksum;
+ __le16 version;
+ __le16 entries;
+ __le16 unused[4];
+ __le16 checksum;
};
struct qla_npiv_entry {
- uint16_t flags;
- uint16_t vf_id;
+ __le16 flags;
+ __le16 vf_id;
uint8_t q_qos;
uint8_t f_qos;
- uint16_t unused1;
+ __le16 unused1;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
};
@@ -1729,7 +1729,7 @@ struct verify_chip_entry_84xx {
uint32_t handle;
- uint16_t options;
+ __le16 options;
#define VCO_DONT_UPDATE_FW BIT_0
#define VCO_FORCE_UPDATE BIT_1
#define VCO_DONT_RESET_UPDATE BIT_2
@@ -1737,18 +1737,18 @@ struct verify_chip_entry_84xx {
#define VCO_END_OF_DATA BIT_14
#define VCO_ENABLE_DSD BIT_15
- uint16_t reserved_1;
+ __le16 reserved_1;
- uint16_t data_seg_cnt;
- uint16_t reserved_2[3];
+ __le16 data_seg_cnt;
+ __le16 reserved_2[3];
- uint32_t fw_ver;
- uint32_t exchange_address;
+ __le32 fw_ver;
+ __le32 exchange_address;
- uint32_t reserved_3[3];
- uint32_t fw_size;
- uint32_t fw_seq_size;
- uint32_t relative_offset;
+ __le32 reserved_3[3];
+ __le32 fw_size;
+ __le32 fw_seq_size;
+ __le32 relative_offset;
struct dsd64 dsd;
};
@@ -1761,22 +1761,22 @@ struct verify_chip_rsp_84xx {
uint32_t handle;
- uint16_t comp_status;
+ __le16 comp_status;
#define CS_VCS_CHIP_FAILURE 0x3
#define CS_VCS_BAD_EXCHANGE 0x8
#define CS_VCS_SEQ_COMPLETEi 0x40
- uint16_t failure_code;
+ __le16 failure_code;
#define VFC_CHECKSUM_ERROR 0x1
#define VFC_INVALID_LEN 0x2
#define VFC_ALREADY_IN_PROGRESS 0x8
- uint16_t reserved_1[4];
+ __le16 reserved_1[4];
- uint32_t fw_ver;
- uint32_t exchange_address;
+ __le32 fw_ver;
+ __le32 exchange_address;
- uint32_t reserved_2[6];
+ __le32 reserved_2[6];
};
#define ACCESS_CHIP_IOCB_TYPE 0x2B
@@ -1788,24 +1788,24 @@ struct access_chip_84xx {
uint32_t handle;
- uint16_t options;
+ __le16 options;
#define ACO_DUMP_MEMORY 0x0
#define ACO_LOAD_MEMORY 0x1
#define ACO_CHANGE_CONFIG_PARAM 0x2
#define ACO_REQUEST_INFO 0x3
- uint16_t reserved1;
+ __le16 reserved1;
- uint16_t dseg_count;
- uint16_t reserved2[3];
+ __le16 dseg_count;
+ __le16 reserved2[3];
- uint32_t parameter1;
- uint32_t parameter2;
- uint32_t parameter3;
+ __le32 parameter1;
+ __le32 parameter2;
+ __le32 parameter3;
- uint32_t reserved3[3];
- uint32_t total_byte_cnt;
- uint32_t reserved4;
+ __le32 reserved3[3];
+ __le32 total_byte_cnt;
+ __le32 reserved4;
struct dsd64 dsd;
};
@@ -1818,11 +1818,11 @@ struct access_chip_rsp_84xx {
uint32_t handle;
- uint16_t comp_status;
- uint16_t failure_code;
- uint32_t residual_count;
+ __le16 comp_status;
+ __le16 failure_code;
+ __le32 residual_count;
- uint32_t reserved[12];
+ __le32 reserved[12];
};
/* 81XX Support **************************************************************/
@@ -1877,52 +1877,52 @@ struct access_chip_rsp_84xx {
struct nvram_81xx {
/* NVRAM header. */
uint8_t id[4];
- uint16_t nvram_version;
- uint16_t reserved_0;
+ __le16 nvram_version;
+ __le16 reserved_0;
/* Firmware Initialization Control Block. */
- uint16_t version;
- uint16_t reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
- uint16_t reserved_2;
+ __le16 version;
+ __le16 reserved_1;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
+ __le16 reserved_2;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint16_t login_retry_count;
- uint16_t reserved_3;
- uint16_t interrupt_delay_timer;
- uint16_t login_timeout;
+ __le16 login_retry_count;
+ __le16 reserved_3;
+ __le16 interrupt_delay_timer;
+ __le16 login_timeout;
- uint32_t firmware_options_1;
- uint32_t firmware_options_2;
- uint32_t firmware_options_3;
+ __le32 firmware_options_1;
+ __le32 firmware_options_2;
+ __le32 firmware_options_3;
- uint16_t reserved_4[4];
+ __le16 reserved_4[4];
/* Offset 64. */
uint8_t enode_mac[6];
- uint16_t reserved_5[5];
+ __le16 reserved_5[5];
/* Offset 80. */
- uint16_t reserved_6[24];
+ __le16 reserved_6[24];
/* Offset 128. */
- uint16_t ex_version;
+ __le16 ex_version;
uint8_t prio_fcf_matching_flags;
uint8_t reserved_6_1[3];
- uint16_t pri_fcf_vlan_id;
+ __le16 pri_fcf_vlan_id;
uint8_t pri_fcf_fabric_name[8];
- uint16_t reserved_6_2[7];
+ __le16 reserved_6_2[7];
uint8_t spma_mac_addr[6];
- uint16_t reserved_6_3[14];
+ __le16 reserved_6_3[14];
/* Offset 192. */
uint8_t min_supported_speed;
uint8_t reserved_7_0;
- uint16_t reserved_7[31];
+ __le16 reserved_7[31];
/*
* BIT 0 = Enable spinup delay
@@ -1955,26 +1955,26 @@ struct nvram_81xx {
* BIT 25 = Temp WWPN
* BIT 26-31 =
*/
- uint32_t host_p;
+ __le32 host_p;
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
uint8_t boot_port_name[WWN_SIZE];
- uint16_t boot_lun_number;
- uint16_t reserved_8;
+ __le16 boot_lun_number;
+ __le16 reserved_8;
uint8_t alt1_boot_port_name[WWN_SIZE];
- uint16_t alt1_boot_lun_number;
- uint16_t reserved_9;
+ __le16 alt1_boot_lun_number;
+ __le16 reserved_9;
uint8_t alt2_boot_port_name[WWN_SIZE];
- uint16_t alt2_boot_lun_number;
- uint16_t reserved_10;
+ __le16 alt2_boot_lun_number;
+ __le16 reserved_10;
uint8_t alt3_boot_port_name[WWN_SIZE];
- uint16_t alt3_boot_lun_number;
- uint16_t reserved_11;
+ __le16 alt3_boot_lun_number;
+ __le16 reserved_11;
/*
* BIT 0 = Selective Login
@@ -1986,35 +1986,35 @@ struct nvram_81xx {
* BIT 6 = Reserved
* BIT 7-31 =
*/
- uint32_t efi_parameters;
+ __le32 efi_parameters;
uint8_t reset_delay;
uint8_t reserved_12;
- uint16_t reserved_13;
+ __le16 reserved_13;
- uint16_t boot_id_number;
- uint16_t reserved_14;
+ __le16 boot_id_number;
+ __le16 reserved_14;
- uint16_t max_luns_per_target;
- uint16_t reserved_15;
+ __le16 max_luns_per_target;
+ __le16 reserved_15;
- uint16_t port_down_retry_count;
- uint16_t link_down_timeout;
+ __le16 port_down_retry_count;
+ __le16 link_down_timeout;
/* FCode parameters. */
- uint16_t fcode_parameter;
+ __le16 fcode_parameter;
- uint16_t reserved_16[3];
+ __le16 reserved_16[3];
/* Offset 352. */
uint8_t reserved_17[4];
- uint16_t reserved_18[5];
+ __le16 reserved_18[5];
uint8_t reserved_19[2];
- uint16_t reserved_20[8];
+ __le16 reserved_20[8];
/* Offset 384. */
uint8_t reserved_21[16];
- uint16_t reserved_22[3];
+ __le16 reserved_22[3];
/* Offset 406 (0x196) Enhanced Features
* BIT 0 = Extended BB credits for LR
@@ -2027,20 +2027,20 @@ struct nvram_81xx {
uint16_t reserved_24[4];
/* Offset 416. */
- uint16_t reserved_25[32];
+ __le16 reserved_25[32];
/* Offset 480. */
uint8_t model_name[16];
/* Offset 496. */
- uint16_t feature_mask_l;
- uint16_t feature_mask_h;
- uint16_t reserved_26[2];
+ __le16 feature_mask_l;
+ __le16 feature_mask_h;
+ __le16 reserved_26[2];
- uint16_t subsystem_vendor_id;
- uint16_t subsystem_device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_device_id;
- uint32_t checksum;
+ __le32 checksum;
};
/*
@@ -2049,31 +2049,31 @@ struct nvram_81xx {
*/
#define ICB_VERSION 1
struct init_cb_81xx {
- uint16_t version;
- uint16_t reserved_1;
+ __le16 version;
+ __le16 reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
- uint16_t reserved_2;
+ __le16 reserved_2;
uint8_t port_name[WWN_SIZE]; /* Big endian. */
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t response_q_inpointer;
- uint16_t request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_outpointer;
- uint16_t login_retry_count;
+ __le16 login_retry_count;
- uint16_t prio_request_q_outpointer;
+ __le16 prio_request_q_outpointer;
- uint16_t response_q_length;
- uint16_t request_q_length;
+ __le16 response_q_length;
+ __le16 request_q_length;
- uint16_t reserved_3;
+ __le16 reserved_3;
- uint16_t prio_request_q_length;
+ __le16 prio_request_q_length;
__le64 request_q_address __packed;
__le64 response_q_address __packed;
@@ -2081,12 +2081,12 @@ struct init_cb_81xx {
uint8_t reserved_4[8];
- uint16_t atio_q_inpointer;
- uint16_t atio_q_length;
+ __le16 atio_q_inpointer;
+ __le16 atio_q_length;
__le64 atio_q_address __packed;
- uint16_t interrupt_delay_timer; /* 100us increments. */
- uint16_t login_timeout;
+ __le16 interrupt_delay_timer; /* 100us increments. */
+ __le16 login_timeout;
/*
* BIT 0-3 = Reserved
@@ -2099,7 +2099,7 @@ struct init_cb_81xx {
* BIT 14 = Node Name Option
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_1;
+ __le32 firmware_options_1;
/*
* BIT 0 = Operation Mode bit 0
@@ -2117,7 +2117,7 @@ struct init_cb_81xx {
* BIT 14 = Enable Target PRLI Control
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_2;
+ __le32 firmware_options_2;
/*
* BIT 0-3 = Reserved
@@ -2138,7 +2138,7 @@ struct init_cb_81xx {
* BIT 28 = SPMA selection bit 1
* BIT 30-31 = Reserved
*/
- uint32_t firmware_options_3;
+ __le32 firmware_options_3;
uint8_t reserved_5[8];
@@ -2216,9 +2216,9 @@ struct qla_fcp_prio_cfg {
#define FCP_PRIO_ATTR_ENABLE 0x1
#define FCP_PRIO_ATTR_PERSIST 0x2
uint8_t reserved; /* Reserved for future use */
-#define FCP_PRIO_CFG_HDR_SIZE 0x10
- struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */
-#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
+#define FCP_PRIO_CFG_HDR_SIZE offsetof(struct qla_fcp_prio_cfg, entry)
+ struct qla_fcp_prio_entry entry[1023]; /* fcp priority entries */
+ uint8_t reserved2[16];
};
#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1b93f5b4d77d..061f91b521b3 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -173,6 +173,7 @@ extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers;
+extern int ql2xfulldump_on_mpifail;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -636,15 +637,17 @@ extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_dbg.c source file.
*/
-extern void qla2100_fw_dump(scsi_qla_host_t *, int);
-extern void qla2300_fw_dump(scsi_qla_host_t *, int);
-extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla8044_fw_dump(scsi_qla_host_t *, int);
-
-extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+void qla2xxx_dump_fw(scsi_qla_host_t *vha);
+void qla2100_fw_dump(scsi_qla_host_t *vha);
+void qla2300_fw_dump(scsi_qla_host_t *vha);
+void qla24xx_fw_dump(scsi_qla_host_t *vha);
+void qla25xx_fw_dump(scsi_qla_host_t *vha);
+void qla81xx_fw_dump(scsi_qla_host_t *vha);
+void qla82xx_fw_dump(scsi_qla_host_t *vha);
+void qla8044_fw_dump(scsi_qla_host_t *vha);
+
+void qla27xx_fwdump(scsi_qla_host_t *vha);
+extern void qla27xx_mpi_fwdump(scsi_qla_host_t *, int);
extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *);
extern int qla27xx_fwdt_template_valid(void *);
extern ulong qla27xx_fwdt_template_size(void *);
@@ -769,7 +772,7 @@ extern int qlafx00_fw_ready(scsi_qla_host_t *);
extern int qlafx00_configure_devices(scsi_qla_host_t *);
extern int qlafx00_reset_initialize(scsi_qla_host_t *);
extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t);
-extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
+extern void qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
uint32_t *, int);
extern uint32_t qlafx00_fw_state_show(struct device *,
@@ -871,7 +874,7 @@ extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *);
/* 83xx related functions */
-extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
+void qla83xx_fw_dump(scsi_qla_host_t *vha);
/* Minidump related functions */
extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
@@ -933,5 +936,6 @@ extern void qla24xx_process_purex_list(struct purex_list *);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+void qla27xx_reset_mpi(scsi_qla_host_t *vha);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index caa6b840e459..4576d3ae9937 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -120,7 +120,7 @@ static void qla24xx_abort_iocb_timeout(void *data)
if (sp->cmd_sp)
sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
- abt->u.abt.comp_status = CS_TIMEOUT;
+ abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
sp->done(sp, QLA_OS_TIMER_EXPIRED);
}
@@ -992,7 +992,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
ql_dbg(ql_dbg_disc, vha, 0x20e8,
"%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
- __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
+ __func__, &wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state,
(loop_id & 0x7fff));
}
@@ -1343,7 +1343,7 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
mb[9] = vha->vp_idx;
mb[10] = opt;
- mbx->u.mbx.in = (void *)pd;
+ mbx->u.mbx.in = pd;
mbx->u.mbx.in_dma = pd_dma;
sp->done = qla24xx_async_gpdb_sp_done;
@@ -1791,7 +1791,7 @@ qla2x00_tmf_iocb_timeout(void *data)
}
}
spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
- tmf->u.tmf.comp_status = CS_TIMEOUT;
+ tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
tmf->u.tmf.data = QLA_FUNCTION_FAILED;
complete(&tmf->u.tmf.comp);
}
@@ -2219,7 +2219,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/* Check for secure flash support */
if (IS_QLA28XX(ha)) {
- if (RD_REG_DWORD(&reg->mailbox12) & BIT_0)
+ if (rd_reg_word(&reg->mailbox12) & BIT_0)
ha->flags.secure_adapter = 1;
ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
(ha->flags.secure_adapter) ? "Yes" : "No");
@@ -2357,7 +2357,7 @@ qla2100_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2399,17 +2399,17 @@ qla2300_pci_config(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
break;
udelay(10);
}
/* Select FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
- RD_REG_WORD(&reg->ctrl_status);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
+ rd_reg_word(&reg->ctrl_status);
/* Get the fb rev level */
ha->fb_rev = RD_FB_CMD_REG(ha, reg);
@@ -2418,13 +2418,13 @@ qla2300_pci_config(scsi_qla_host_t *vha)
pci_clear_mwi(ha->pdev);
/* Deselect FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x0);
- RD_REG_WORD(&reg->ctrl_status);
+ wrt_reg_word(&reg->ctrl_status, 0x0);
+ rd_reg_word(&reg->ctrl_status);
/* Release RISC module. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
break;
udelay(10);
@@ -2439,7 +2439,7 @@ qla2300_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2483,7 +2483,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_dword(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2587,36 +2587,36 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
if (!IS_QLA2100(ha)) {
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) &
+ if ((rd_reg_word(&reg->hccr) &
HCCR_RISC_PAUSE) != 0)
break;
udelay(100);
}
} else {
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
udelay(10);
}
/* Select FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0x20);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* FPM Soft Reset. */
- WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
- RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+ wrt_reg_word(&reg->fpm_diag_config, 0x100);
+ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
/* Toggle Fpm Reset. */
if (!IS_QLA2200(ha)) {
- WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
- RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+ wrt_reg_word(&reg->fpm_diag_config, 0x0);
+ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
}
/* Select frame buffer registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0x10);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset frame buffer FIFOs. */
if (IS_QLA2200(ha)) {
@@ -2634,23 +2634,23 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
}
/* Select RISC module registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
/* Release RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT);
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/* Wait for RISC to recover from reset. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -2661,7 +2661,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
*/
udelay(20);
for (cnt = 30000; cnt; cnt--) {
- if ((RD_REG_WORD(&reg->ctrl_status) &
+ if ((rd_reg_word(&reg->ctrl_status) &
CSR_ISP_SOFT_RESET) == 0)
break;
udelay(100);
@@ -2670,13 +2670,13 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
udelay(10);
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
- WRT_REG_WORD(&reg->semaphore, 0);
+ wrt_reg_word(&reg->semaphore, 0);
/* Release RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
@@ -2694,8 +2694,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
/* Disable RISC pause on FPM parity error. */
if (!IS_QLA2100(ha)) {
- WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2740,32 +2740,32 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset RISC. */
- WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
"HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->ctrl_status),
- (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_dword(&reg->ctrl_status),
+ (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
- WRT_REG_DWORD(&reg->ctrl_status,
+ wrt_reg_dword(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
udelay(100);
/* Wait for firmware to complete NVRAM accesses. */
- RD_REG_WORD(&reg->mailbox0);
- for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rd_reg_word(&reg->mailbox0);
+ for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
@@ -2779,26 +2779,26 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
"HCCR: 0x%x, MailBox0 Status 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->mailbox0));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_word(&reg->mailbox0));
/* Wait for soft-reset to complete. */
- RD_REG_DWORD(&reg->ctrl_status);
+ rd_reg_dword(&reg->ctrl_status);
for (cnt = 0; cnt < 60; cnt++) {
barrier();
- if ((RD_REG_DWORD(&reg->ctrl_status) &
+ if ((rd_reg_dword(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
break;
udelay(5);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
"HCCR: 0x%x, Soft Reset status: 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->ctrl_status));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_dword(&reg->ctrl_status));
/* If required, do an MPI FW reset now */
if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
@@ -2817,17 +2817,17 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
}
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- RD_REG_WORD(&reg->mailbox0);
- for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rd_reg_word(&reg->mailbox0);
+ for (cnt = 60; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
@@ -2840,8 +2840,8 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
"Host Risc 0x%x, mailbox0 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_WORD(&reg->mailbox0));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_word(&reg->mailbox0));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2860,9 +2860,8 @@ qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
{
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
- WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
- *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
-
+ wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ *data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
}
static void
@@ -2870,8 +2869,8 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
{
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
- WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
- WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
+ wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
}
static void
@@ -2887,7 +2886,7 @@ qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
vha->hw->pdev->subsystem_device != 0x0240)
return;
- WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
+ wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
udelay(100);
attempt:
@@ -2989,7 +2988,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/*
* We need to have a delay here since the card will not respond while
@@ -2999,7 +2998,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
data = qla2x00_debounce_register(&reg->ctrl_status);
for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
udelay(5);
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
barrier();
}
@@ -3010,8 +3009,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
"Reset register cleared by chip reset.\n");
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
/* Workaround for QLA2312 PCI parity error */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -3339,6 +3338,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
dump_size / 1024);
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->mpi_fw_dump = (char *)fw_dump +
+ ha->fwdt[1].dump_size;
mutex_unlock(&ha->optrom_mutex);
return;
}
@@ -3650,8 +3651,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Disable SRAM, Instruction RAM and GP RAM parity. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
+ rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -3758,11 +3759,11 @@ enable_82xx_npiv:
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2300(ha))
/* SRAM parity */
- WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
+ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
else
/* SRAM, Instruction RAM and GP RAM parity */
- WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
+ rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -4006,11 +4007,11 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
- WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
- WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
- RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
+ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
+ wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
+ wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
+ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
+ rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
}
void
@@ -4072,15 +4073,15 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
}
icb->firmware_options_2 |= cpu_to_le32(BIT_23);
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
- WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
+ wrt_reg_dword(&reg->isp25mq.req_q_in, 0);
+ wrt_reg_dword(&reg->isp25mq.req_q_out, 0);
+ wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0);
+ wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0);
} else {
- WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
- WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
+ wrt_reg_dword(&reg->isp24.req_q_in, 0);
+ wrt_reg_dword(&reg->isp24.req_q_out, 0);
+ wrt_reg_dword(&reg->isp24.rsp_q_in, 0);
+ wrt_reg_dword(&reg->isp24.rsp_q_out, 0);
}
qlt_24xx_config_rings(vha);
@@ -4090,11 +4091,11 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
ql_dbg(ql_dbg_init, vha, 0x00fd,
"Speed set by user : %s Gbps \n",
qla2x00_get_link_speed_str(ha, ha->set_data_rate));
- icb->firmware_options_3 = (ha->set_data_rate << 13);
+ icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
}
/* PCI posting */
- RD_REG_DWORD(&ioreg->hccr);
+ rd_reg_word(&ioreg->hccr);
}
/**
@@ -4125,7 +4126,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req || !test_bit(que, ha->req_qid_map))
continue;
- req->out_ptr = (void *)(req->ring + req->length);
+ req->out_ptr = (uint16_t *)(req->ring + req->length);
*req->out_ptr = 0;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
@@ -4142,7 +4143,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
rsp = ha->rsp_q_map[que];
if (!rsp || !test_bit(que, ha->rsp_qid_map))
continue;
- rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
*rsp->in_ptr = 0;
/* Initialize response queue entries */
if (IS_QLAFX00(ha))
@@ -4181,12 +4182,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->cur_fw_xcb_count);
ha->flags.dport_enabled =
- (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
+ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
+ BIT_7) != 0;
ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
(ha->flags.dport_enabled) ? "enabled" : "disabled");
/* FA-WWPN Status */
ha->flags.fawwpn_enabled =
- (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
+ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
+ BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
@@ -4565,7 +4568,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
ha->nvram_size = sizeof(*nv);
ha->nvram_base = 0;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
- if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
+ if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1)
ha->nvram_base = 0x80;
/* Get NVRAM data and calculate checksum. */
@@ -5079,6 +5082,54 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
return (rval);
}
+static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ fc_port_t *fcport;
+ int rval;
+
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ /* borrowing */
+ u32 *bp, sz;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct els_plogi_payload),
+ ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ ha->init_cb, sz);
+ if (rval == QLA_SUCCESS) {
+ __be32 *q = &ha->plogi_els_payld.data[0];
+
+ bp = (uint32_t *)ha->init_cb;
+ cpu_to_be32_array(q, bp, sz / 4);
+ memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ goto skip_login;
+ }
+ }
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->n2n_flag) {
+ qla24xx_fcport_handle_login(vha, fcport);
+ return QLA_SUCCESS;
+ }
+ }
+
+skip_login:
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+ return QLA_FUNCTION_FAILED;
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5096,7 +5147,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
int found_devs;
int found;
fc_port_t *fcport, *new_fcport;
-
uint16_t index;
uint16_t entries;
struct gid_list_info *gid;
@@ -5106,47 +5156,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
unsigned long flags;
/* Inititae N2N login. */
- if (N2N_TOPO(ha)) {
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- /* borrowing */
- u32 *bp, sz;
-
- memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct els_plogi_payload),
- ha->init_cb_size);
- rval = qla24xx_get_port_login_templ(vha,
- ha->init_cb_dma, (void *)ha->init_cb, sz);
- if (rval == QLA_SUCCESS) {
- __be32 *q = &ha->plogi_els_payld.data[0];
-
- bp = (uint32_t *)ha->init_cb;
- cpu_to_be32_array(q, bp, sz / 4);
-
- memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
- } else {
- ql_dbg(ql_dbg_init, vha, 0x00d1,
- "PLOGI ELS param read fail.\n");
- goto skip_login;
- }
- }
-
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->n2n_flag) {
- qla24xx_fcport_handle_login(vha, fcport);
- return QLA_SUCCESS;
- }
- }
-skip_login:
- spin_lock_irqsave(&vha->work_lock, flags);
- vha->scan.scan_retry++;
- spin_unlock_irqrestore(&vha->work_lock, flags);
-
- if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- }
- return QLA_FUNCTION_FAILED;
- }
+ if (N2N_TOPO(ha))
+ return qla2x00_configure_n2n_loop(vha);
found_devs = 0;
new_fcport = NULL;
@@ -7078,10 +7089,10 @@ qla2x00_reset_adapter(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -7102,10 +7113,10 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (IS_NOPOLLING_TYPE(ha))
@@ -7143,7 +7154,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
int rval;
struct init_cb_24xx *icb;
struct nvram_24xx *nv;
- uint32_t *dptr;
+ __le32 *dptr;
uint8_t *dptr1, *dptr2;
uint32_t chksum;
uint16_t cnt;
@@ -7171,7 +7182,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
/* Get NVRAM data into cache and calculate checksum. */
- dptr = (uint32_t *)nv;
+ dptr = (__force __le32 *)nv;
ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
@@ -7199,7 +7210,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0);
nv->hard_address = cpu_to_le16(124);
@@ -7367,7 +7378,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->login_retry_count = ql2xloginretrycount;
/* N2N: driver will initiate Login instead of FW */
- icb->firmware_options_3 |= BIT_8;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Enable ZIO. */
if (!vha->flags.init_done) {
@@ -7435,7 +7446,7 @@ qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
static ulong
qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
{
- uint32_t *p = (void *)image_status;
+ __le32 *p = (__force __le32 *)image_status;
uint n = sizeof(*image_status) / sizeof(*p);
uint32_t sum = 0;
@@ -7498,7 +7509,7 @@ qla28xx_get_aux_images(
goto check_sec_image;
}
- qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
+ qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
ha->flt_region_aux_img_status_pri,
sizeof(pri_aux_image_status) >> 2);
qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
@@ -7531,7 +7542,7 @@ check_sec_image:
goto check_valid_image;
}
- qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
+ qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
ha->flt_region_aux_img_status_sec,
sizeof(sec_aux_image_status) >> 2);
qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
@@ -7596,7 +7607,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
goto check_sec_image;
}
- if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
+ if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
QLA_SUCCESS) {
WARN_ON_ONCE(true);
@@ -7703,7 +7714,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
ql_dbg(ql_dbg_init, vha, 0x008b,
"FW: Loading firmware from flash (%x).\n", faddr);
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x008c,
@@ -7716,18 +7727,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return QLA_FUNCTION_FAILED;
}
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) {
ql_dbg(ql_dbg_init, vha, 0x008d,
"-> Loading segment %u...\n", j);
qla24xx_read_flash_data(vha, dcode, faddr, 10);
- risc_addr = be32_to_cpu(dcode[2]);
- risc_size = be32_to_cpu(dcode[3]);
+ risc_addr = be32_to_cpu((__force __be32)dcode[2]);
+ risc_size = be32_to_cpu((__force __be32)dcode[3]);
if (!*srisc_addr) {
*srisc_addr = risc_addr;
- risc_attr = be32_to_cpu(dcode[9]);
+ risc_attr = be32_to_cpu((__force __be32)dcode[9]);
}
dlen = ha->fw_transfer_size >> 2;
@@ -7767,9 +7778,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
fwdt->template = NULL;
fwdt->length = 0;
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 7);
- risc_size = be32_to_cpu(dcode[2]);
+ risc_size = be32_to_cpu((__force __be32)dcode[2]);
ql_dbg(ql_dbg_init, vha, 0x0161,
"-> fwdt%u template array at %#x (%#x dwords)\n",
j, faddr, risc_size);
@@ -7838,7 +7849,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
int i, fragment;
- uint16_t *wcode, *fwcode;
+ uint16_t *wcode;
+ __be16 *fwcode;
uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
struct fw_blob *blob;
struct qla_hw_data *ha = vha->hw;
@@ -7858,7 +7870,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
wcode = (uint16_t *)req->ring;
*srisc_addr = 0;
- fwcode = (uint16_t *)blob->fw->data;
+ fwcode = (__force __be16 *)blob->fw->data;
fwclen = 0;
/* Validate firmware image by checking version. */
@@ -7906,7 +7918,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
"words 0x%x.\n", risc_addr, wlen);
for (i = 0; i < wlen; i++)
- wcode[i] = swab16(fwcode[i]);
+ wcode[i] = swab16((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
wlen);
@@ -7943,7 +7955,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ulong i;
uint j;
struct fw_blob *blob;
- uint32_t *fwcode;
+ __be32 *fwcode;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct fwdt *fwdt = ha->fwdt;
@@ -7959,8 +7971,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- fwcode = (void *)blob->fw->data;
- dcode = fwcode;
+ fwcode = (__force __be32 *)blob->fw->data;
+ dcode = (__force uint32_t *)fwcode;
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x0093,
"Unable to verify integrity of firmware image (%zd).\n",
@@ -7971,7 +7983,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) {
@@ -7997,7 +8009,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dlen);
for (i = 0; i < dlen; i++)
- dcode[i] = swab32(fwcode[i]);
+ dcode[i] = swab32((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
if (rval) {
@@ -8051,7 +8063,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dcode = fwdt->template;
for (i = 0; i < risc_size; i++)
- dcode[i] = fwcode[i];
+ dcode[i] = (__force u32)fwcode[i];
if (!qla27xx_fwdt_template_valid(dcode)) {
ql_log(ql_log_warn, vha, 0x0175,
@@ -8322,7 +8334,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
int rval;
struct init_cb_81xx *icb;
struct nvram_81xx *nv;
- uint32_t *dptr;
+ __le32 *dptr;
uint8_t *dptr1, *dptr2;
uint32_t chksum;
uint16_t cnt;
@@ -8369,7 +8381,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
"primary" : "secondary");
ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
- dptr = (uint32_t *)nv;
+ dptr = (__force __le32 *)nv;
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
@@ -8396,7 +8408,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0);
nv->port_name[0] = 0x21;
@@ -8440,7 +8452,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
if (IS_T10_PI_CAPABLE(ha))
- nv->frame_payload_size &= ~7;
+ nv->frame_payload_size &= cpu_to_le16(~7);
qlt_81xx_config_nvram_stage1(vha, nv);
@@ -8603,10 +8615,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
/* enable RIDA Format2 */
- icb->firmware_options_3 |= BIT_0;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_0);
/* N2N: driver will initiate Login instead of FW */
- icb->firmware_options_3 |= BIT_8;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Determine NVMe/FCP priority for target ports */
ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 364b3db8b2dc..1fb6ccac07cc 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -40,16 +40,16 @@ qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
* register value.
*/
static __inline__ uint16_t
-qla2x00_debounce_register(volatile uint16_t __iomem *addr)
+qla2x00_debounce_register(volatile __le16 __iomem *addr)
{
volatile uint16_t first;
volatile uint16_t second;
do {
- first = RD_REG_WORD(addr);
+ first = rd_reg_word(addr);
barrier();
cpu_relax();
- second = RD_REG_WORD(addr);
+ second = rd_reg_word(addr);
} while (first != second);
return (first);
@@ -329,7 +329,7 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
} else
req->ring_ptr++;
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
}
static inline int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 182bd68c79ac..8865c35d3421 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -376,7 +376,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
+ cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -428,8 +428,8 @@ qla2x00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
+ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
@@ -472,21 +472,21 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
/* Set chip new ring index. */
if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
} else if (IS_QLA83XX(ha)) {
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
- WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+ wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
} else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&reg->isp24.req_q_in);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
}
}
}
@@ -661,7 +661,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
cur_dsd->address = 0;
cur_dsd->length = 0;
cur_dsd++;
- cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
return 0;
}
@@ -755,8 +755,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
}
struct fw_dif_context {
- uint32_t ref_tag;
- uint16_t app_tag;
+ __le32 ref_tag;
+ __le16 app_tag;
uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
};
@@ -1389,7 +1389,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{
struct dsd64 *cur_dsd;
- uint32_t *fcp_dl;
+ __be32 *fcp_dl;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t total_bytes = 0;
@@ -1456,7 +1456,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
&crc_ctx_pkt->ref_tag, tot_prot_dsds);
put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
- cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+ cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
/* Determine SCSI command length -- align to 4 byte boundary */
if (cmd->cmd_len > 16) {
@@ -1545,7 +1545,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
/* Fibre channel byte count */
cmd_pkt->byte_count = cpu_to_le32(total_bytes);
- fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
+ fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
additional_fcpcdb_len);
*fcp_dl = htonl(total_bytes);
@@ -1637,7 +1637,7 @@ qla24xx_start_scsi(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1698,7 +1698,7 @@ qla24xx_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -1822,7 +1822,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1881,7 +1881,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
req->ring_ptr++;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1957,7 +1957,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2018,7 +2018,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_SUCCESS;
@@ -2157,7 +2157,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2214,7 +2214,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
req->ring_ptr++;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
@@ -2266,13 +2266,13 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
cnt = *req->out_ptr;
else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha))
- cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
- cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+ cnt = rd_reg_dword(reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
- cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+ cnt = rd_reg_dword(&reg->isp24.req_q_out);
else if (IS_QLAFX00(ha))
- cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
+ cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
@@ -2305,8 +2305,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE);
if (IS_QLAFX00(ha)) {
- WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
- WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
+ wrt_reg_byte((void __iomem *)&pkt->entry_count, req_cnt);
+ wrt_reg_word((void __iomem *)&pkt->handle, handle);
} else {
pkt->entry_count = req_cnt;
pkt->handle = handle;
@@ -2344,9 +2344,10 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
- logio->control_flags |= LCF_NVME_PRLI;
+ logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
if (sp->vha->flags.nvme_first_burst)
- logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
+ logio->io_parameter[0] =
+ cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
}
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
@@ -2680,7 +2681,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- els_iocb->tx_dsd_count = 1;
+ els_iocb->tx_dsd_count = cpu_to_le16(1);
els_iocb->vp_index = vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = 0;
@@ -2700,7 +2701,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
&els_iocb->tx_address);
- els_iocb->rx_dsd_count = 1;
+ els_iocb->rx_dsd_count = cpu_to_le16(1);
els_iocb->rx_byte_count = els_iocb->rx_len =
cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
@@ -2712,7 +2713,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(uint8_t *)els_iocb,
sizeof(*els_iocb));
} else {
- els_iocb->control_flags = 1 << 13;
+ els_iocb->control_flags = cpu_to_le16(1 << 13);
els_iocb->tx_byte_count =
cpu_to_le32(sizeof(struct els_logo_payload));
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
@@ -2787,7 +2788,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct qla_work_evt *e;
struct fc_port *conflict_fcport;
port_id_t cid; /* conflict Nport id */
- u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+ const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072,
@@ -2800,7 +2801,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
else {
- switch (fw_status[0]) {
+ switch (le32_to_cpu(fw_status[0])) {
case CS_DATA_UNDERRUN:
case CS_COMPLETE:
memset(&ea, 0, sizeof(ea));
@@ -2810,9 +2811,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break;
case CS_IOCB_ERROR:
- switch (fw_status[1]) {
+ switch (le32_to_cpu(fw_status[1])) {
case LSC_SCODE_PORTID_USED:
- lid = fw_status[2] & 0xffff;
+ lid = le32_to_cpu(fw_status[2]) & 0xffff;
qlt_find_sess_invalidate_other(vha,
wwn_to_u64(fcport->port_name),
fcport->d_id, lid, &conflict_fcport);
@@ -2846,9 +2847,11 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break;
case LSC_SCODE_NPORT_USED:
- cid.b.domain = (fw_status[2] >> 16) & 0xff;
- cid.b.area = (fw_status[2] >> 8) & 0xff;
- cid.b.al_pa = fw_status[2] & 0xff;
+ cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
+ & 0xff;
+ cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
+ & 0xff;
+ cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
cid.b.rsvd_1 = 0;
ql_dbg(ql_dbg_disc, vha, 0x20ec,
@@ -3022,7 +3025,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->sys_define = 0;
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
- els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
els_iocb->vp_index = sp->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
@@ -3216,7 +3219,7 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t tot_dsds;
struct device_reg_82xx __iomem *reg;
uint32_t dbval;
- uint32_t *fcp_dl;
+ __be32 *fcp_dl;
uint8_t additional_cdb_len;
struct ct6_dsd *ctx;
struct scsi_qla_host *vha = sp->vha;
@@ -3310,7 +3313,7 @@ sufficient_dsds:
req_cnt = 1;
if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3398,7 +3401,7 @@ sufficient_dsds:
memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
- fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
additional_cdb_len);
*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
@@ -3419,7 +3422,7 @@ sufficient_dsds:
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3495,10 +3498,10 @@ sufficient_dsds:
if (ql2xdbwr)
qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -3536,7 +3539,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(make_handle(req->id, sp->handle));
+ abt_iocb->handle = make_handle(req->id, sp->handle);
if (sp->fcport) {
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -3544,10 +3547,10 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
}
abt_iocb->handle_to_abort =
- cpu_to_le32(make_handle(aio->u.abt.req_que_no,
- aio->u.abt.cmd_hndl));
+ make_handle(le16_to_cpu(aio->u.abt.req_que_no),
+ aio->u.abt.cmd_hndl);
abt_iocb->vp_index = vha->vp_idx;
- abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
+ abt_iocb->req_que_no = aio->u.abt.req_que_no;
/* Send the command to the firmware */
wmb();
}
@@ -3562,7 +3565,7 @@ qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
for (i = 0; i < sz; i++)
- mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
+ mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
}
static void
@@ -3586,7 +3589,7 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -3604,32 +3607,29 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
/*
* Build NVME LS request
*/
-static int
+static void
qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
{
struct srb_iocb *nvme;
- int rval = QLA_SUCCESS;
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
- cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
+ cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
- cmd_pkt->tx_dseg_count = 1;
- cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
- cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
+ cmd_pkt->tx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
+ cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
- cmd_pkt->rx_dseg_count = 1;
- cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
- cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
+ cmd_pkt->rx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
+ cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
-
- return rval;
}
static void
@@ -3894,7 +3894,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 8a78d395bbc8..cf0800546740 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -89,9 +89,9 @@ qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
/* terminate exchange */
rsp_els->entry_type = ELS_IOCB_TYPE;
rsp_els->entry_count = 1;
- rsp_els->nport_handle = ~0;
+ rsp_els->nport_handle = cpu_to_le16(~0);
rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
- rsp_els->control_flags = EPD_RX_XCHG;
+ rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
ql_dbg(ql_dbg_init, vha, 0x0283,
"Sending ELS Response to terminate exchange %#x...\n",
abts->rx_xch_addr_to_abort);
@@ -141,7 +141,7 @@ qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
abts_rsp->ox_id = abts->ox_id;
abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
- abts_rsp->payload.ba_acc.high_seq_cnt = ~0;
+ abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
ql_dbg(ql_dbg_init, vha, 0x028b,
"Sending BA ACC response to ABTS %#x...\n",
@@ -204,7 +204,7 @@ qla2100_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- hccr = RD_REG_WORD(&reg->hccr);
+ hccr = rd_reg_word(&reg->hccr);
if (qla2x00_check_reg16_for_disconnect(vha, hccr))
break;
if (hccr & HCCR_RISC_PAUSE) {
@@ -216,18 +216,18 @@ qla2100_intr_handler(int irq, void *dev_id)
* bit to be cleared. Schedule a big hammer to get
* out of the RISC PAUSED state.
*/
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
- } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
+ } else if ((rd_reg_word(&reg->istatus) & ISR_RISC_INT) == 0)
break;
- if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ if (rd_reg_word(&reg->semaphore) & BIT_0) {
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
/* Get mailbox data. */
mb[0] = RD_MAILBOX_REG(ha, reg, 0);
@@ -246,13 +246,13 @@ qla2100_intr_handler(int irq, void *dev_id)
mb[0]);
}
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- RD_REG_WORD(&reg->semaphore);
+ wrt_reg_word(&reg->semaphore, 0);
+ rd_reg_word(&reg->semaphore);
} else {
qla2x00_process_response_queue(rsp);
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
}
qla2x00_handle_mbx_completion(ha, status);
@@ -324,14 +324,14 @@ qla2300_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSR_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_WORD(&reg->hccr);
+ hccr = rd_reg_word(&reg->hccr);
if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
ql_log(ql_log_warn, vha, 0x5026,
@@ -347,10 +347,10 @@ qla2300_intr_handler(int irq, void *dev_id)
* interrupt bit to be cleared. Schedule a big
* hammer to get out of the RISC PAUSED state.
*/
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSR_RISC_INT) == 0)
@@ -365,7 +365,7 @@ qla2300_intr_handler(int irq, void *dev_id)
status |= MBX_INTERRUPT;
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
+ wrt_reg_word(&reg->semaphore, 0);
break;
case 0x12:
mb[0] = MSW(stat);
@@ -393,8 +393,8 @@ qla2300_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD_RELAXED(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word_relaxed(&reg->hccr);
}
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -412,7 +412,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint32_t mboxes;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -428,15 +428,15 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
mboxes >>= 1;
- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
+ wptr = MAILBOX_REG(ha, reg, 1);
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
+ wptr = MAILBOX_REG(ha, reg, 8);
if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
else if (mboxes & BIT_0)
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
wptr++;
mboxes >>= 1;
@@ -451,19 +451,19 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
int rval;
struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
/* Seed data -- mailbox1 -> mailbox7. */
if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
- wptr = (uint16_t __iomem *)&reg24->mailbox1;
+ wptr = &reg24->mailbox1;
else if (IS_QLA8044(vha->hw))
- wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
+ wptr = &reg82->mailbox_out[1];
else
return;
for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
- mb[cnt] = RD_REG_WORD(wptr);
+ mb[cnt] = rd_reg_word(wptr);
ql_dbg(ql_dbg_async, vha, 0x5021,
"Inter-Driver Communication %s -- "
@@ -756,6 +756,39 @@ qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
return NULL;
}
+/* Shall be called only on supported adapters. */
+static void
+qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ bool reset_isp_needed = 0;
+
+ ql_log(ql_log_warn, vha, 0x02f0,
+ "MPI Heartbeat stop. MPI reset is%s needed. "
+ "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
+ mb[0] & BIT_8 ? "" : " not",
+ mb[0], mb[1], mb[2], mb[3]);
+
+ if ((mb[1] & BIT_8) == 0)
+ return;
+
+ ql_log(ql_log_warn, vha, 0x02f1,
+ "MPI Heartbeat stop. FW dump needed\n");
+
+ if (ql2xfulldump_on_mpifail) {
+ ha->isp_ops->fw_dump(vha);
+ reset_isp_needed = 1;
+ }
+
+ ha->isp_ops->mpi_fw_dump(vha, 1);
+
+ if (reset_isp_needed) {
+ vha->hw->flags.fw_init_done = 0;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @vha: SCSI driver HA context
@@ -785,7 +818,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
goto skip_rio;
switch (mb[0]) {
case MBA_SCSI_COMPLETION:
- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
+ handles[0] = make_handle(mb[2], mb[1]);
handle_cnt = 1;
break;
case MBA_CMPLT_1_16BIT:
@@ -824,10 +857,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
mb[0] = MBA_SCSI_COMPLETION;
break;
case MBA_CMPLT_2_32BIT:
- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
- handles[1] = le32_to_cpu(
- ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
- RD_MAILBOX_REG(ha, reg, 6));
+ handles[0] = make_handle(mb[2], mb[1]);
+ handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
+ RD_MAILBOX_REG(ha, reg, 6));
handle_cnt = 2;
mb[0] = MBA_SCSI_COMPLETION;
break;
@@ -858,10 +890,10 @@ skip_rio:
IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
u16 m[4];
- m[0] = RD_REG_WORD(&reg24->mailbox4);
- m[1] = RD_REG_WORD(&reg24->mailbox5);
- m[2] = RD_REG_WORD(&reg24->mailbox6);
- mbx = m[3] = RD_REG_WORD(&reg24->mailbox7);
+ m[0] = rd_reg_word(&reg24->mailbox4);
+ m[1] = rd_reg_word(&reg24->mailbox5);
+ m[2] = rd_reg_word(&reg24->mailbox6);
+ mbx = m[3] = rd_reg_word(&reg24->mailbox7);
ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
@@ -871,10 +903,10 @@ skip_rio:
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
mb[1], mb[2], mb[3]);
- ha->fw_dump_mpi =
- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
- RD_REG_WORD(&reg24->mailbox7) & BIT_8;
- ha->isp_ops->fw_dump(vha, 1);
+ if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ rd_reg_word(&reg24->mailbox7) & BIT_8)
+ ha->isp_ops->mpi_fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
ha->flags.fw_init_done = 0;
QLA_FW_STOPPED(ha);
@@ -979,8 +1011,8 @@ skip_rio:
ha->current_topology = 0;
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
- ? RD_REG_WORD(&reg24->mailbox4) : 0;
- mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
+ ? rd_reg_word(&reg24->mailbox4) : 0;
+ mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4])
: mbx;
ql_log(ql_log_info, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
@@ -1347,7 +1379,7 @@ global_port_update:
break;
case MBA_IDC_NOTIFY:
if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[4] = rd_reg_word(&reg24->mailbox4);
if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
(mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
(mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
@@ -1374,25 +1406,12 @@ global_port_update:
case MBA_IDC_AEN:
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- ha->flags.fw_init_done = 0;
- ql_log(ql_log_warn, vha, 0xffff,
- "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
- mb[0], mb[1], mb[2], mb[3]);
-
- if ((mb[1] & BIT_8) ||
- (mb[2] & BIT_8)) {
- ql_log(ql_log_warn, vha, 0xd013,
- "MPI Heartbeat stop. FW dump needed\n");
- ha->fw_dump_mpi = 1;
- ha->isp_ops->fw_dump(vha, 1);
- }
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ qla27xx_handle_8200_aen(vha, mb);
} else if (IS_QLA83XX(ha)) {
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
- mb[5] = RD_REG_WORD(&reg24->mailbox5);
- mb[6] = RD_REG_WORD(&reg24->mailbox6);
- mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ mb[4] = rd_reg_word(&reg24->mailbox4);
+ mb[5] = rd_reg_word(&reg24->mailbox5);
+ mb[6] = rd_reg_word(&reg24->mailbox6);
+ mb[7] = rd_reg_word(&reg24->mailbox7);
qla83xx_handle_8200_aen(vha, mb);
} else {
ql_dbg(ql_dbg_async, vha, 0x5052,
@@ -1646,7 +1665,7 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
for (i = 0; i < sz; i++)
- si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
+ si->u.mbx.in_mb[i] = pkt->mb[i];
res = (si->u.mbx.in_mb[0] & MBS_MASK);
@@ -1747,6 +1766,7 @@ static void
qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct sts_entry_24xx *pkt, int iocb_type)
{
+ struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
const char func[] = "ELS_CT_IOCB";
const char *type;
srb_t *sp;
@@ -1796,23 +1816,22 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
- fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
- fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
+ fw_status[1] = le32_to_cpu(ese->error_subcode_1);
+ fw_status[2] = le32_to_cpu(ese->error_subcode_2);
if (iocb_type == ELS_IOCB_TYPE) {
els = &sp->u.iocb_cmd;
- els->u.els_plogi.fw_status[0] = fw_status[0];
- els->u.els_plogi.fw_status[1] = fw_status[1];
- els->u.els_plogi.fw_status[2] = fw_status[2];
- els->u.els_plogi.comp_status = fw_status[0];
+ els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
+ els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
+ els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
+ els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
if (comp_status == CS_COMPLETE) {
res = DID_OK << 16;
} else {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- els->u.els_plogi.len =
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count);
+ els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
+ ese->total_byte_count));
} else {
els->u.els_plogi.len = 0;
res = DID_ERROR << 16;
@@ -1821,8 +1840,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count));
+ le32_to_cpu(ese->total_byte_count));
goto els_ct_done;
}
@@ -1838,23 +1856,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
bsg_reply->reply_payload_rcv_len =
- le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
+ le32_to_cpu(ese->total_byte_count);
ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count));
+ le32_to_cpu(ese->total_byte_count));
} else {
ql_dbg(ql_dbg_user, vha, 0x5040,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
type, sp->handle, comp_status,
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->error_subcode_1),
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->error_subcode_2));
+ le32_to_cpu(ese->error_subcode_1),
+ le32_to_cpu(ese->error_subcode_2));
res = DID_ERROR << 16;
bsg_reply->reply_payload_rcv_len = 0;
}
@@ -2062,7 +2077,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
uint16_t state_flags;
struct nvmefc_fcp_req *fd;
uint16_t ret = QLA_SUCCESS;
- uint16_t comp_status = le16_to_cpu(sts->comp_status);
+ __le16 comp_status = sts->comp_status;
int logit = 0;
iocb = &sp->u.iocb_cmd;
@@ -2093,7 +2108,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
} else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
(SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
/* Response already DMA'd to fd->rspaddr. */
- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
} else if ((state_flags & SF_FCP_RSP_DMA)) {
/*
* Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
@@ -2110,8 +2125,8 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
inbuf = (uint32_t *)&sts->nvme_ersp_data;
outbuf = (uint32_t *)fd->rspaddr;
- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
- if (unlikely(iocb->u.nvme.rsp_pyld_len >
+ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
+ if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
sizeof(struct nvme_fc_ersp_iu))) {
if (ql_mask_match(ql_dbg_io)) {
WARN_ONCE(1, "Unexpected response payload length %u.\n",
@@ -2121,9 +2136,9 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
iocb->u.nvme.rsp_pyld_len);
}
iocb->u.nvme.rsp_pyld_len =
- sizeof(struct nvme_fc_ersp_iu);
+ cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
}
- iter = iocb->u.nvme.rsp_pyld_len >> 2;
+ iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
for (; iter; iter--)
*outbuf++ = swab32(*inbuf++);
}
@@ -2138,7 +2153,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
"Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
tgt_xfer_len, fd->transferred_length);
logit = 1;
- } else if (comp_status == CS_DATA_UNDERRUN) {
+ } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
/*
* Do not log if this is just an underflow and there
* is no data loss.
@@ -2158,7 +2173,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
* If transport error then Failure (HBA rejects request)
* otherwise transport will handle.
*/
- switch (comp_status) {
+ switch (le16_to_cpu(comp_status)) {
case CS_COMPLETE:
break;
@@ -2300,7 +2315,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
/* Adjust ring index */
- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
+ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
}
static inline void
@@ -2391,9 +2406,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
* For type 3: ref & app tag is all 'f's
* For type 0,1,2: app tag is all 'f's
*/
- if ((a_app_tag == T10_PI_APP_ESCAPE) &&
- ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
- (a_ref_tag == T10_PI_REF_ESCAPE))) {
+ if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
+ (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
+ a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
uint32_t blocks_done, resid;
sector_t lba_s = scsi_get_lba(cmd);
@@ -2751,6 +2766,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sense_len = par_sense_len = rsp_info_len = resid_len =
fw_resid_len = 0;
if (IS_FWI2_CAPABLE(ha)) {
+ u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay);
+
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le32_to_cpu(sts24->sense_len);
if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
@@ -2765,11 +2782,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
ox_id = le16_to_cpu(sts24->ox_id);
par_sense_len = sizeof(sts24->data);
/* Valid values of the retry delay timer are 0x1-0xffef */
- if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
- retry_delay = sts24->retry_delay & 0x3fff;
+ if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) {
+ retry_delay = sts24_retry_delay & 0x3fff;
ql_dbg(ql_dbg_io, sp->vha, 0x3033,
"%s: scope=%#x retry_delay=%#x\n", __func__,
- sts24->retry_delay >> 14, retry_delay);
+ sts24_retry_delay >> 14, retry_delay);
}
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
@@ -3143,7 +3160,7 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint32_t mboxes;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -3159,11 +3176,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
mboxes >>= 1;
- wptr = (uint16_t __iomem *)&reg->mailbox1;
+ wptr = &reg->mailbox1;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
mboxes >>= 1;
wptr++;
@@ -3183,7 +3200,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
+ abt->u.abt.comp_status = pkt->nport_handle;
sp->done(sp, 0);
}
@@ -3340,9 +3357,9 @@ process_err:
if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
- WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
+ wrt_reg_dword(&reg->rsp_q_out[0], rsp->ring_index);
} else {
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
}
}
@@ -3359,13 +3376,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
return;
rval = QLA_SUCCESS;
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x0001);
- for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x0001);
+ for (cnt = 10000; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt) {
- WRT_REG_DWORD(&reg->iobase_window, 0x0001);
+ wrt_reg_dword(&reg->iobase_window, 0x0001);
udelay(10);
} else
rval = QLA_FUNCTION_TIMEOUT;
@@ -3374,11 +3391,11 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto next_test;
rval = QLA_SUCCESS;
- WRT_REG_DWORD(&reg->iobase_window, 0x0003);
- for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ wrt_reg_dword(&reg->iobase_window, 0x0003);
+ for (cnt = 100; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt) {
- WRT_REG_DWORD(&reg->iobase_window, 0x0003);
+ wrt_reg_dword(&reg->iobase_window, 0x0003);
udelay(10);
} else
rval = QLA_FUNCTION_TIMEOUT;
@@ -3387,13 +3404,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto done;
next_test:
- if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
+ if (rd_reg_dword(&reg->iobase_c8) & BIT_3)
ql_log(ql_log_info, vha, 0x504c,
"Additional code -- 0x55AA.\n");
done:
- WRT_REG_DWORD(&reg->iobase_window, 0x0000);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x0000);
+ rd_reg_dword(&reg->iobase_window);
}
/**
@@ -3437,14 +3454,14 @@ qla24xx_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_DWORD(&reg->hccr);
+ hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_warn, vha, 0x504b,
"RISC paused -- HCCR=%x, Dumping firmware.\n",
@@ -3452,7 +3469,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla2xxx_check_risc_status(vha);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -3469,9 +3486,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
break;
case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox1);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb[1] = rd_reg_word(&reg->mailbox1);
+ mb[2] = rd_reg_word(&reg->mailbox2);
+ mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
case INTR_RSP_QUE_UPDATE:
@@ -3491,8 +3508,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat * 0xff);
break;
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD_RELAXED(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword_relaxed(&reg->hccr);
if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
ndelay(3500);
}
@@ -3531,8 +3548,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
if (!ha->flags.disable_msix_handshake) {
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD_RELAXED(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword_relaxed(&reg->hccr);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3566,14 +3583,14 @@ qla24xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_DWORD(&reg->hccr);
+ hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_info, vha, 0x5050,
"RISC paused -- HCCR=%x, Dumping firmware.\n",
@@ -3581,7 +3598,7 @@ qla24xx_msix_default(int irq, void *dev_id)
qla2xxx_check_risc_status(vha);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -3598,9 +3615,9 @@ qla24xx_msix_default(int irq, void *dev_id)
break;
case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox1);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb[1] = rd_reg_word(&reg->mailbox1);
+ mb[2] = rd_reg_word(&reg->mailbox2);
+ mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
case INTR_RSP_QUE_UPDATE:
@@ -3620,7 +3637,7 @@ qla24xx_msix_default(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
} while (0);
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3671,7 +3688,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
queue_work(ha->wq, &qpair->q_work);
@@ -3932,7 +3949,7 @@ clear_risc_ints:
goto fail;
spin_lock_irq(&ha->hardware_lock);
- WRT_REG_WORD(&reg->isp.semaphore, 0);
+ wrt_reg_word(&reg->isp.semaphore, 0);
spin_unlock_irq(&ha->hardware_lock);
fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d6c991bd1bde..df31ee0d59b2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -106,7 +106,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint8_t io_lock_on;
uint16_t command = 0;
uint16_t *iptr;
- uint16_t __iomem *optr;
+ __le16 __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
@@ -208,11 +208,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
- optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
+ optr = &reg->isp82.mailbox_in[0];
else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
- optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
+ optr = &reg->isp24.mailbox0;
else
- optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
+ optr = MAILBOX_REG(ha, &reg->isp, 0);
iptr = mcp->mb;
command = mcp->mb[0];
@@ -222,12 +222,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Mailbox registers (OUT):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
- optr =
- (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
+ optr = MAILBOX_REG(ha, &reg->isp, 8);
if (mboxes & BIT_0) {
ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr);
- WRT_REG_WORD(optr, *iptr);
+ wrt_reg_word(optr, *iptr);
}
mboxes >>= 1;
@@ -253,11 +252,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
if (IS_P3P_TYPE(ha))
- WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
else if (IS_FWI2_CAPABLE(ha))
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
- WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
@@ -300,7 +299,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Cmd=%x Polling Mode.\n", command);
if (IS_P3P_TYPE(ha)) {
- if (RD_REG_DWORD(&reg->isp82.hint) &
+ if (rd_reg_dword(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -311,11 +310,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
- WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
} else if (IS_FWI2_CAPABLE(ha))
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
- WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
@@ -413,14 +412,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint16_t w;
if (IS_FWI2_CAPABLE(ha)) {
- mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
- mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
- mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
- mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
- mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
- ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
- host_status = RD_REG_DWORD(&reg->isp24.host_status);
- hccr = RD_REG_DWORD(&reg->isp24.hccr);
+ mb[0] = rd_reg_word(&reg->isp24.mailbox0);
+ mb[1] = rd_reg_word(&reg->isp24.mailbox1);
+ mb[2] = rd_reg_word(&reg->isp24.mailbox2);
+ mb[3] = rd_reg_word(&reg->isp24.mailbox3);
+ mb[7] = rd_reg_word(&reg->isp24.mailbox7);
+ ictrl = rd_reg_dword(&reg->isp24.ictrl);
+ host_status = rd_reg_dword(&reg->isp24.host_status);
+ hccr = rd_reg_dword(&reg->isp24.hccr);
ql_log(ql_log_warn, vha, 0xd04c,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
@@ -430,7 +429,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} else {
mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
- ictrl = RD_REG_WORD(&reg->isp.ictrl);
+ ictrl = rd_reg_word(&reg->isp.ictrl);
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
@@ -462,7 +461,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* a dump
*/
if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
rval = QLA_FUNCTION_TIMEOUT;
}
}
@@ -573,15 +572,15 @@ mbx_done:
if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
ql_dbg(ql_dbg_mbx, vha, 0x1198,
"host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
- RD_REG_DWORD(&reg->isp24.host_status),
- RD_REG_DWORD(&reg->isp24.ictrl),
- RD_REG_DWORD(&reg->isp24.istatus));
+ rd_reg_dword(&reg->isp24.host_status),
+ rd_reg_dword(&reg->isp24.ictrl),
+ rd_reg_dword(&reg->isp24.istatus));
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1206,
"ctrl_status=%#x ictrl=%#x istatus=%#x\n",
- RD_REG_WORD(&reg->isp.ctrl_status),
- RD_REG_WORD(&reg->isp.ictrl),
- RD_REG_WORD(&reg->isp.istatus));
+ rd_reg_word(&reg->isp.ctrl_status),
+ rd_reg_word(&reg->isp.ictrl),
+ rd_reg_word(&reg->isp.istatus));
}
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
@@ -3038,7 +3037,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter = (void *)stats;
+ uint32_t *iter = (uint32_t *)stats;
ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
struct qla_hw_data *ha = vha->hw;
@@ -3097,7 +3096,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter = (void *)stats;
+ uint32_t *iter = (uint32_t *)stats;
ushort dwords = sizeof(*stats)/sizeof(*iter);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
@@ -3110,8 +3109,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mc.mb[6] = MSW(MSD(stats_dma));
mc.mb[7] = LSW(MSD(stats_dma));
mc.mb[8] = dwords;
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
- mc.mb[10] = cpu_to_le16(options);
+ mc.mb[9] = vha->vp_idx;
+ mc.mb[10] = options;
rval = qla24xx_send_mb_cmd(vha, &mc);
@@ -3204,7 +3203,7 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx, vha, 0x1090,
"Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(abt->nport_handle));
- if (abt->nport_handle == CS_IOCB_ERROR)
+ if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
rval = QLA_FUNCTION_PARAMETER_ERROR;
else
rval = QLA_FUNCTION_FAILED;
@@ -4427,9 +4426,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
- WRT_REG_DWORD(req->req_q_in, 0);
+ wrt_reg_dword(req->req_q_in, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- WRT_REG_DWORD(req->req_q_out, 0);
+ wrt_reg_dword(req->req_q_out, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4498,9 +4497,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
- WRT_REG_DWORD(rsp->rsp_q_out, 0);
+ wrt_reg_dword(rsp->rsp_q_out, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- WRT_REG_DWORD(rsp->rsp_q_in, 0);
+ wrt_reg_dword(rsp->rsp_q_in, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4727,7 +4726,7 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
mbx_cmd_t *mcp = &mc;
int i;
int len;
- uint16_t *str;
+ __le16 *str;
struct qla_hw_data *ha = vha->hw;
if (!IS_P3P_TYPE(ha))
@@ -4736,14 +4735,14 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
"Entered %s.\n", __func__);
- str = (void *)version;
+ str = (__force __le16 *)version;
len = strlen(version);
mcp->mb[0] = MBC_SET_RNID_PARAMS;
mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
mcp->out_mb = MBX_1|MBX_0;
for (i = 4; i < 16 && len; i++, str++, len -= 2) {
- mcp->mb[i] = cpu_to_le16p(str);
+ mcp->mb[i] = le16_to_cpup(str);
mcp->out_mb |= 1<<i;
}
for (; i < 16; i++) {
@@ -4861,7 +4860,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
"Done %s.\n", __func__);
bp = (uint32_t *) buf;
for (i = 0; i < (bufsiz-4)/4; i++, bp++)
- *bp = le32_to_cpu(*bp);
+ *bp = le32_to_cpu((__force __le32)*bp);
}
return rval;
@@ -5411,18 +5410,18 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
/* Write the MBC data to the registers */
- WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
- WRT_REG_WORD(&reg->mailbox1, mb[0]);
- WRT_REG_WORD(&reg->mailbox2, mb[1]);
- WRT_REG_WORD(&reg->mailbox3, mb[2]);
- WRT_REG_WORD(&reg->mailbox4, mb[3]);
+ wrt_reg_word(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
+ wrt_reg_word(&reg->mailbox1, mb[0]);
+ wrt_reg_word(&reg->mailbox2, mb[1]);
+ wrt_reg_word(&reg->mailbox3, mb[2]);
+ wrt_reg_word(&reg->mailbox4, mb[3]);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
/* Poll for MBC interrupt */
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (stat & HSRX_RISC_INT) {
stat &= 0xff;
@@ -5430,10 +5429,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
stat == 0x10 || stat == 0x11) {
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
- mb0 = RD_REG_WORD(&reg->mailbox0);
- WRT_REG_DWORD(&reg->hccr,
+ mb0 = rd_reg_word(&reg->mailbox0);
+ wrt_reg_dword(&reg->hccr,
HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rd_reg_dword(&reg->hccr);
break;
}
}
@@ -6211,7 +6210,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1144,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
}
@@ -6256,7 +6255,7 @@ qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
mcp->mb[4]);
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
} else {
if (subcode & BIT_5)
*sector_size = mcp->mb[1];
@@ -6470,13 +6469,13 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_PORT_DATABASE;
- mc.mb[1] = cpu_to_le16(fcport->loop_id);
+ mc.mb[1] = fcport->loop_id;
mc.mb[2] = MSW(pd_dma);
mc.mb[3] = LSW(pd_dma);
mc.mb[6] = MSW(MSD(pd_dma));
mc.mb[7] = LSW(MSD(pd_dma));
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
- mc.mb[10] = cpu_to_le16((uint16_t)opt);
+ mc.mb[9] = vha->vp_idx;
+ mc.mb[10] = opt;
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
@@ -6587,7 +6586,7 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
mc.mb[6] = MSW(MSD(id_list_dma));
mc.mb[7] = LSW(MSD(id_list_dma));
mc.mb[8] = 0;
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[9] = vha->vp_idx;
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
@@ -6613,8 +6612,8 @@ int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
- mcp->mb[1] = cpu_to_le16(1);
- mcp->mb[2] = cpu_to_le16(value);
+ mcp->mb[1] = 1;
+ mcp->mb[2] = value;
mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -6639,7 +6638,7 @@ int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
memset(mcp->mb, 0, sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
- mcp->mb[1] = cpu_to_le16(0);
+ mcp->mb[1] = 0;
mcp->out_mb = MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index d82e92da529a..15efe2f04b86 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -770,7 +770,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_in = &reg->isp25mq.req_q_in;
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
- req->out_ptr = (void *)(req->ring + req->length);
+ req->out_ptr = (uint16_t *)(req->ring + req->length);
mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
@@ -884,7 +884,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
- rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index df99911b8bb9..a8fe4f725fa0 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -46,7 +46,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
uint8_t io_lock_on;
uint16_t command = 0;
uint32_t *iptr;
- uint32_t __iomem *optr;
+ __le32 __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
@@ -109,7 +109,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
+ optr = &reg->ispfx00.mailbox0;
iptr = mcp->mb;
command = mcp->mb[0];
@@ -117,7 +117,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
- WRT_REG_DWORD(optr, *iptr);
+ wrt_reg_dword(optr, *iptr);
mboxes >>= 1;
optr++;
@@ -676,14 +676,14 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
- WRT_REG_DWORD(&reg->req_q_in, 0);
- WRT_REG_DWORD(&reg->req_q_out, 0);
+ wrt_reg_dword(&reg->req_q_in, 0);
+ wrt_reg_dword(&reg->req_q_out, 0);
- WRT_REG_DWORD(&reg->rsp_q_in, 0);
- WRT_REG_DWORD(&reg->rsp_q_out, 0);
+ wrt_reg_dword(&reg->rsp_q_in, 0);
+ wrt_reg_dword(&reg->rsp_q_out, 0);
/* PCI posting */
- RD_REG_DWORD(&reg->rsp_q_out);
+ rd_reg_dword(&reg->rsp_q_out);
}
char *
@@ -912,9 +912,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* 30 seconds wait - Adjust if required */
wait_time = 30;
- pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
+ pseudo_aen = rd_reg_dword(&reg->pseudoaen);
if (pseudo_aen == 1) {
- aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
rval = qlafx00_driver_shutdown(vha, 10);
@@ -925,7 +925,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* wait time before firmware ready */
wtime = jiffies + (wait_time * HZ);
do {
- aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
+ aenmbx = rd_reg_dword(&reg->aenmailbox0);
barrier();
ql_dbg(ql_dbg_mbx, vha, 0x0133,
"aenmbx: 0x%x\n", aenmbx);
@@ -944,15 +944,15 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
case MBA_FW_RESTART_CMPLT:
/* Set the mbx and rqstq intr code */
- aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
- WRT_REG_DWORD(&reg->aenmailbox0, 0);
- RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
+ ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
+ wrt_reg_dword(&reg->aenmailbox0, 0);
+ rd_reg_dword_relaxed(&reg->aenmailbox0);
ql_dbg(ql_dbg_init, vha, 0x0134,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -982,13 +982,13 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
* 3. issue Get FW State Mbox cmd to determine fw state
* Set the mbx and rqstq intr code from Shadow Regs
*/
- aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->initval1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
- ha->req_que_len = RD_REG_DWORD(&reg->initval5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
+ ha->req_que_off = rd_reg_dword(&reg->initval1);
+ ha->rsp_que_off = rd_reg_dword(&reg->initval3);
+ ha->req_que_len = rd_reg_dword(&reg->initval5);
+ ha->rsp_que_len = rd_reg_dword(&reg->initval6);
ql_dbg(ql_dbg_init, vha, 0x0135,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -1034,7 +1034,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
if (time_after_eq(jiffies, wtime)) {
ql_dbg(ql_dbg_init, vha, 0x0137,
"Init f/w failed: aen[7]: 0x%x\n",
- RD_REG_DWORD(&reg->aenmailbox7));
+ rd_reg_dword(&reg->aenmailbox7));
rval = QLA_FUNCTION_FAILED;
done = true;
break;
@@ -1428,7 +1428,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp)
pkt = rsp->ring_ptr;
for (cnt = 0; cnt < rsp->length; cnt++) {
pkt->signature = RESPONSE_PROCESSED;
- WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
+ wrt_reg_dword((void __force __iomem *)&pkt->signature,
RESPONSE_PROCESSED);
pkt++;
}
@@ -1444,13 +1444,13 @@ qlafx00_rescan_isp(scsi_qla_host_t *vha)
qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
- aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+ ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
ql_dbg(ql_dbg_disc, vha, 0x2094,
"fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
@@ -1495,7 +1495,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
(!test_bit(UNLOADING, &vha->dpc_flags)) &&
(!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
(ha->mr.fw_hbt_en)) {
- fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
+ fw_heart_beat = rd_reg_dword(&reg->fwheartbeat);
if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
ha->mr.old_fw_hbt_cnt = fw_heart_beat;
ha->mr.fw_hbt_miss_cnt = 0;
@@ -1515,7 +1515,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
/* Reset recovery to be performed in timer routine */
- aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
+ aenmbx0 = rd_reg_dword(&reg->aenmailbox0);
if (ha->mr.fw_reset_timer_exp) {
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -1710,10 +1710,9 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
return;
}
-int
+void
qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
{
- int rval = 0;
uint32_t aen_code, aen_data;
aen_code = FCH_EVT_VENDOR_UNIQUE;
@@ -1764,8 +1763,6 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
fc_host_post_event(vha->host, fc_get_event_number(),
aen_code, aen_data);
-
- return rval;
}
static void
@@ -2721,7 +2718,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
uint16_t lreq_q_in = 0;
uint16_t lreq_q_out = 0;
- lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+ lreq_q_in = rd_reg_dword(rsp->rsp_q_in);
lreq_q_out = rsp->ring_index;
while (lreq_q_in != lreq_q_out) {
@@ -2783,7 +2780,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
}
/* Adjust ring index */
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
}
/**
@@ -2814,9 +2811,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
- ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
+ ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
ql_dbg(ql_dbg_async, vha, 0x5077,
"Asynchronous port Update received "
"aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2846,13 +2843,13 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
default:
- ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
- ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
- ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
- ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
- ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
+ ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
+ ha->aenmb[4] = rd_reg_dword(&reg->aenmailbox4);
+ ha->aenmb[5] = rd_reg_dword(&reg->aenmailbox5);
+ ha->aenmb[6] = rd_reg_dword(&reg->aenmailbox6);
+ ha->aenmb[7] = rd_reg_dword(&reg->aenmailbox7);
ql_dbg(ql_dbg_async, vha, 0x5078,
"AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
@@ -2872,7 +2869,7 @@ static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
{
uint16_t cnt;
- uint32_t __iomem *wptr;
+ __le32 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
@@ -2882,10 +2879,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out32[0] = mb0;
- wptr = (uint32_t __iomem *)&reg->mailbox17;
+ wptr = &reg->mailbox17;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
+ ha->mailbox_out32[cnt] = rd_reg_dword(wptr);
wptr++;
}
}
@@ -2939,13 +2936,13 @@ qlafx00_intr_handler(int irq, void *dev_id)
break;
if (stat & QLAFX00_INTR_MB_CMPLT) {
- mb[0] = RD_REG_WORD(&reg->mailbox16);
+ mb[0] = rd_reg_dword(&reg->mailbox16);
qlafx00_mbx_completion(vha, mb[0]);
status |= MBX_INTERRUPT;
clr_intr |= QLAFX00_INTR_MB_CMPLT;
}
if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
- ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
+ ha->aenmb[0] = rd_reg_dword(&reg->aenmailbox0);
qlafx00_async_event(vha);
clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
}
@@ -3113,7 +3110,7 @@ qlafx00_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3178,7 +3175,7 @@ qlafx00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3205,7 +3202,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1;
- tm_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
+ tm_iocb.handle = make_handle(req->id, sp->handle);
tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
@@ -3215,7 +3212,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
sizeof(struct scsi_lun));
}
- memcpy((void *)ptm_iocb, &tm_iocb,
+ memcpy(ptm_iocb, &tm_iocb,
sizeof(struct tsk_mgmt_entry_fx00));
wmb();
}
@@ -3231,13 +3228,12 @@ qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
abt_iocb.entry_count = 1;
- abt_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
- abt_iocb.abort_handle =
- cpu_to_le32(make_handle(req->id, fxio->u.abt.cmd_hndl));
+ abt_iocb.handle = make_handle(req->id, sp->handle);
+ abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl);
abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
abt_iocb.req_que_no = cpu_to_le16(req->id);
- memcpy((void *)pabt_iocb, &abt_iocb,
+ memcpy(pabt_iocb, &abt_iocb,
sizeof(struct abort_iocb_entry_fx00));
wmb();
}
@@ -3254,7 +3250,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
fx_iocb.entry_type = FX00_IOCB_TYPE;
- fx_iocb.handle = cpu_to_le32(sp->handle);
+ fx_iocb.handle = sp->handle;
fx_iocb.entry_count = entry_cnt;
if (sp->type == SRB_FXIOCB_DCMD) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4567f0c42486..762250891a8f 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -96,7 +96,7 @@ struct tsk_mgmt_entry_fx00 {
uint8_t sys_define;
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
uint32_t reserved_0;
@@ -121,13 +121,13 @@ struct abort_iocb_entry_fx00 {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
__le32 reserved_0;
__le16 tgt_id_sts; /* Completion status. */
__le16 options;
- __le32 abort_handle; /* System handle. */
+ uint32_t abort_handle; /* System handle. */
__le32 reserved_2;
__le16 req_que_no;
@@ -166,7 +166,7 @@ struct fxdisc_entry_fx00 {
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
__le32 reserved_0; /* System handle. */
__le16 func_num;
@@ -359,47 +359,47 @@ struct config_info_data {
#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */
#define QLAFX00_SET_HST_INTR(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
value)
#define QLAFX00_CLR_HST_INTR(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_RD_INTR_REG(ha) \
- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
+ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
#define QLAFX00_CLR_INTR_REG(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
- WRT_REG_DWORD((ha)->cregbase + off, val)
+ wrt_reg_dword((ha)->cregbase + off, val)
#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
- RD_REG_DWORD((ha)->cregbase + off)
+ rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_HBA_RST_REG(ha, val)\
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_RST_REG, val)
#define QLAFX00_RD_ICNTRL_REG(ha) \
- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
+ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
QLAFX00_ICR_ENB_MASK))
#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
QLAFX00_ICR_DIS_MASK))
#define QLAFX00_RD_REG(ha, off) \
- RD_REG_DWORD((ha)->cregbase + off)
+ rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_WR_REG(ha, off, val) \
- WRT_REG_DWORD((ha)->cregbase + off, val)
+ wrt_reg_dword((ha)->cregbase + off, val)
struct qla_mt_iocb_rqst_fx00 {
__le32 reserved_0;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 4886d247df6f..d66d47a0f958 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -138,7 +138,7 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
priv->sp = NULL;
sp->priv = NULL;
if (priv->comp_status == QLA_SUCCESS) {
- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
} else {
fd->rcv_rsplen = 0;
fd->transferred_length = 0;
@@ -295,7 +295,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done;
sp->put_fn = qla_nvme_release_ls_cmd_kref;
- sp->priv = (void *)priv;
+ sp->priv = priv;
priv->sp = sp;
kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock);
@@ -384,7 +384,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -426,11 +426,11 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) {
- cmd_pkt->control_flags = CF_READ_DATA;
+ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
vha->qla_stats.input_bytes += fd->payload_length;
vha->qla_stats.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
- cmd_pkt->control_flags = CF_WRITE_DATA;
+ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
if ((vha->flags.nvme_first_burst) &&
(sp->fcport->nvme_prli_service_param &
NVME_PRLI_SP_FIRST_BURST)) {
@@ -438,7 +438,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
sp->fcport->nvme_first_burst_size) ||
(sp->fcport->nvme_first_burst_size == 0))
cmd_pkt->control_flags |=
- CF_NVME_FIRST_BURST_ENABLE;
+ cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
}
vha->qla_stats.output_bytes += fd->payload_length;
vha->qla_stats.output_requests++;
@@ -514,7 +514,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
}
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags);
@@ -560,7 +560,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
init_waitqueue_head(&sp->nvme_ls_waitq);
kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock);
- sp->priv = (void *)priv;
+ sp->priv = priv;
priv->sp = sp;
sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd";
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index ef912902d4e5..fbb844226630 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -48,26 +48,26 @@ struct cmd_nvme {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
- uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */
+ __le16 dseg_count; /* Data segment count. */
+ __le16 nvme_rsp_dsd_len; /* NVMe RSP DSD length */
uint64_t rsvd;
- uint16_t control_flags; /* Control Flags */
+ __le16 control_flags; /* Control Flags */
#define CF_NVME_FIRST_BURST_ENABLE BIT_11
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
- uint16_t nvme_cmnd_dseg_len; /* Data segment length. */
+ __le16 nvme_cmnd_dseg_len; /* Data segment length. */
__le64 nvme_cmnd_dseg_address __packed;/* Data segment address. */
__le64 nvme_rsp_dseg_address __packed; /* Data segment address. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -82,24 +82,24 @@ struct pt_ls4_request {
uint8_t sys_define;
uint8_t entry_status;
uint32_t handle;
- uint16_t status;
- uint16_t nport_handle;
- uint16_t tx_dseg_count;
+ __le16 status;
+ __le16 nport_handle;
+ __le16 tx_dseg_count;
uint8_t vp_index;
uint8_t rsvd;
- uint16_t timeout;
- uint16_t control_flags;
+ __le16 timeout;
+ __le16 control_flags;
#define CF_LS4_SHIFT 13
#define CF_LS4_ORIGINATOR 0
#define CF_LS4_RESPONDER 1
#define CF_LS4_RESPONDER_TERM 2
- uint16_t rx_dseg_count;
- uint16_t rsvd2;
- uint32_t exchange_address;
- uint32_t rsvd3;
- uint32_t rx_byte_count;
- uint32_t tx_byte_count;
+ __le16 rx_dseg_count;
+ __le16 rsvd2;
+ __le32 exchange_address;
+ __le32 rsvd3;
+ __le32 rx_byte_count;
+ __le32 tx_byte_count;
struct dsd64 dsd[2];
};
@@ -107,32 +107,32 @@ struct pt_ls4_request {
struct pt_ls4_rx_unsol {
uint8_t entry_type;
uint8_t entry_count;
- uint16_t rsvd0;
- uint16_t rsvd1;
+ __le16 rsvd0;
+ __le16 rsvd1;
uint8_t vp_index;
uint8_t rsvd2;
- uint16_t rsvd3;
- uint16_t nport_handle;
- uint16_t frame_size;
- uint16_t rsvd4;
- uint32_t exchange_address;
+ __le16 rsvd3;
+ __le16 nport_handle;
+ __le16 frame_size;
+ __le16 rsvd4;
+ __le32 exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
be_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t param;
- uint32_t desc0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 param;
+ __le32 desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
- uint32_t desc_len;
- uint32_t payload[3];
+ __le32 desc_len;
+ __le32 payload[3];
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 185c5f34d4c1..0baf55b7e88f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -370,7 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
/* Read back value to make sure write has gone through before trying
* to use it.
*/
- win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+ win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
if (win_read != ha->crb_win) {
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
@@ -380,47 +380,6 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
*off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
-static inline unsigned long
-qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
-{
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- /* See if we are currently pointing to the region we want to use next */
- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
- /* No need to change window. PCIX and PCIEregs are in both
- * regs are in both windows.
- */
- return off;
- }
-
- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
- /* We are in first CRB window */
- if (ha->curr_window != 0)
- WARN_ON(1);
- return off;
- }
-
- if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
- /* We are in second CRB window */
- off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
-
- if (ha->curr_window != 1)
- return off;
-
- /* We are in the QM or direct access
- * register region - do nothing
- */
- if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
- (off < QLA82XX_PCI_CAMQM_MAX))
- return off;
- }
- /* strange address given */
- ql_dbg(ql_dbg_p3p, vha, 0xb001,
- "%s: Warning: unm_nic_pci_set_crbwindow "
- "called with an unknown address(%llx).\n",
- QLA2XXX_DRIVER_NAME, off);
- return off;
-}
-
static int
qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
void __iomem **off_out)
@@ -520,7 +479,7 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
qla82xx_crb_win_lock(ha);
qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
}
- data = RD_REG_DWORD(off);
+ data = rd_reg_dword(off);
if (rv == 1) {
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
@@ -937,17 +896,17 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
{
uint32_t off_value, rval = 0;
- WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
+ wrt_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
/* Read back value to make sure write has gone through */
- RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+ rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
off_value = (off & 0x0000FFFF);
if (flag)
- WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
+ wrt_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
data);
else
- rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M +
+ rval = rd_reg_dword(off_value + CRB_INDIRECT_2M +
ha->nx_pcibase);
return rval;
@@ -1561,14 +1520,14 @@ qla82xx_get_table_desc(const u8 *unirom, int section)
uint32_t i;
struct qla82xx_uri_table_desc *directory =
(struct qla82xx_uri_table_desc *)&unirom[0];
- __le32 offset;
- __le32 tab_type;
- __le32 entries = cpu_to_le32(directory->num_entries);
+ uint32_t offset;
+ uint32_t tab_type;
+ uint32_t entries = le32_to_cpu(directory->num_entries);
for (i = 0; i < entries; i++) {
- offset = cpu_to_le32(directory->findex) +
- (i * cpu_to_le32(directory->entry_size));
- tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
+ offset = le32_to_cpu(directory->findex) +
+ (i * le32_to_cpu(directory->entry_size));
+ tab_type = get_unaligned_le32((u32 *)&unirom[offset] + 8);
if (tab_type == section)
return (struct qla82xx_uri_table_desc *)&unirom[offset];
@@ -1582,16 +1541,17 @@ qla82xx_get_data_desc(struct qla_hw_data *ha,
u32 section, u32 idx_offset)
{
const u8 *unirom = ha->hablob->fw->data;
- int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
+ int idx = get_unaligned_le32((u32 *)&unirom[ha->file_prd_off] +
+ idx_offset);
struct qla82xx_uri_table_desc *tab_desc = NULL;
- __le32 offset;
+ uint32_t offset;
tab_desc = qla82xx_get_table_desc(unirom, section);
if (!tab_desc)
return NULL;
- offset = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size) * idx);
+ offset = le32_to_cpu(tab_desc->findex) +
+ (le32_to_cpu(tab_desc->entry_size) * idx);
return (struct qla82xx_uri_data_desc *)&unirom[offset];
}
@@ -1606,7 +1566,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha,
QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
if (uri_desc)
- offset = cpu_to_le32(uri_desc->findex);
+ offset = le32_to_cpu(uri_desc->findex);
}
return (u8 *)&ha->hablob->fw->data[offset];
@@ -1620,7 +1580,7 @@ static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
QLA82XX_URI_FIRMWARE_IDX_OFF);
if (uri_desc)
- return cpu_to_le32(uri_desc->size);
+ return le32_to_cpu(uri_desc->size);
}
return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
@@ -1636,7 +1596,7 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
QLA82XX_URI_FIRMWARE_IDX_OFF);
if (uri_desc)
- offset = cpu_to_le32(uri_desc->findex);
+ offset = le32_to_cpu(uri_desc->findex);
}
return (u8 *)&ha->hablob->fw->data[offset];
@@ -1790,9 +1750,9 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
put_unaligned_le64(req->dma, &icb->request_q_address);
put_unaligned_le64(rsp->dma, &icb->response_q_address);
- WRT_REG_DWORD(&reg->req_q_out[0], 0);
- WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
- WRT_REG_DWORD(&reg->rsp_q_out[0], 0);
+ wrt_reg_dword(&reg->req_q_out[0], 0);
+ wrt_reg_dword(&reg->rsp_q_in[0], 0);
+ wrt_reg_dword(&reg->rsp_q_out[0], 0);
}
static int
@@ -1847,8 +1807,8 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
struct qla82xx_uri_table_desc *ptab_desc = NULL;
const uint8_t *unirom = ha->hablob->fw->data;
uint32_t i;
- __le32 entries;
- __le32 flags, file_chiprev, offset;
+ uint32_t entries;
+ uint32_t flags, file_chiprev, offset;
uint8_t chiprev = ha->chip_revision;
/* Hardcoding mn_present flag for P3P */
int mn_present = 0;
@@ -1859,14 +1819,14 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
if (!ptab_desc)
return -1;
- entries = cpu_to_le32(ptab_desc->num_entries);
+ entries = le32_to_cpu(ptab_desc->num_entries);
for (i = 0; i < entries; i++) {
- offset = cpu_to_le32(ptab_desc->findex) +
- (i * cpu_to_le32(ptab_desc->entry_size));
- flags = cpu_to_le32(*((int *)&unirom[offset] +
+ offset = le32_to_cpu(ptab_desc->findex) +
+ (i * le32_to_cpu(ptab_desc->entry_size));
+ flags = le32_to_cpu(*((__le32 *)&unirom[offset] +
QLA82XX_URI_FLAGS_OFF));
- file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
+ file_chiprev = le32_to_cpu(*((__le32 *)&unirom[offset] +
QLA82XX_URI_CHIP_REV_OFF));
flagbit = mn_present ? 1 : 2;
@@ -1996,18 +1956,18 @@ void
qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
- wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
+ wptr = &reg->mailbox_out[1];
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
wptr++;
}
@@ -2069,8 +2029,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 1; iter--; ) {
- if (RD_REG_DWORD(&reg->host_int)) {
- stat = RD_REG_DWORD(&reg->host_status);
+ if (rd_reg_dword(&reg->host_int)) {
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
@@ -2082,9 +2042,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2097,7 +2057,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
qla2x00_handle_mbx_completion(ha, status);
@@ -2135,11 +2095,11 @@ qla82xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
break;
if (host_int) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
@@ -2151,9 +2111,9 @@ qla82xx_msix_default(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2166,7 +2126,7 @@ qla82xx_msix_default(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
} while (0);
qla2x00_handle_mbx_completion(ha, status);
@@ -2196,11 +2156,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
reg = &ha->iobase->isp82;
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
qla24xx_process_response_queue(vha, rsp);
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
@@ -2231,11 +2191,11 @@ qla82xx_poll(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
if (host_int) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
case 0x2:
@@ -2246,9 +2206,9 @@ qla82xx_poll(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2260,7 +2220,7 @@ qla82xx_poll(int irq, void *dev_id)
stat * 0xff);
break;
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2549,8 +2509,8 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
return qla82xx_check_rcvpeg_state(ha);
}
-static uint32_t *
-qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+static __le32 *
+qla82xx_read_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr,
uint32_t length)
{
uint32_t i;
@@ -2675,13 +2635,13 @@ qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
scsi_block_requests(vha->host);
- qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
+ qla82xx_read_flash_data(vha, buf, offset, length);
scsi_unblock_requests(vha->host);
return buf;
}
static int
-qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
+qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr,
uint32_t faddr, uint32_t dwords)
{
int ret;
@@ -2758,7 +2718,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
}
ret = qla82xx_write_flash_dword(ha, faddr,
- cpu_to_le32(*dwptr));
+ le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_p3p, vha, 0xb020,
"Unable to program flash address=%x data=%x.\n",
@@ -2818,10 +2778,10 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
if (ql2xdbwr)
qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -3724,7 +3684,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
/* Minidump related functions */
static int
qla82xx_minidump_process_control(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
struct qla82xx_md_entry_crb *crb_entry;
@@ -3841,12 +3801,12 @@ qla82xx_minidump_process_control(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_md_entry_rdocm *ocm_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
r_addr = ocm_hdr->read_addr;
@@ -3854,7 +3814,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
loop_cnt = ocm_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase);
+ r_value = rd_reg_dword(r_addr + ha->nx_pcibase);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
@@ -3863,12 +3823,12 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
struct qla82xx_md_entry_mux *mux_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
r_addr = mux_hdr->read_addr;
@@ -3889,12 +3849,12 @@ qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_md_entry_crb *crb_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
r_addr = crb_hdr->addr;
@@ -3912,7 +3872,7 @@ qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
static int
qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t addr, r_addr, c_addr, t_r_addr;
@@ -3921,7 +3881,7 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
uint32_t c_value_w, c_value_r;
struct qla82xx_md_entry_cache *cache_hdr;
int rval = QLA_FUNCTION_FAILED;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
@@ -3971,14 +3931,14 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t addr, r_addr, c_addr, t_r_addr;
uint32_t i, k, loop_count, t_value, r_cnt, r_value;
uint32_t c_value_w;
struct qla82xx_md_entry_cache *cache_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
@@ -4006,14 +3966,14 @@ qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t s_addr, r_addr;
uint32_t r_stride, r_value, r_cnt, qid = 0;
uint32_t i, k, loop_cnt;
struct qla82xx_md_entry_queue *q_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
s_addr = q_hdr->select_addr;
@@ -4036,13 +3996,13 @@ qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_value;
uint32_t i, loop_cnt;
struct qla82xx_md_entry_rdrom *rom_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
r_addr = rom_hdr->read_addr;
@@ -4062,7 +4022,7 @@ qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
static int
qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_value, r_data;
@@ -4070,7 +4030,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
struct qla82xx_md_entry_rdmem *m_hdr;
unsigned long flags;
int rval = QLA_FUNCTION_FAILED;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
r_addr = m_hdr->read_addr;
@@ -4163,12 +4123,12 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
int no_entry_hdr = 0;
qla82xx_md_entry_hdr_t *entry_hdr;
struct qla82xx_md_template_hdr *tmplt_hdr;
- uint32_t *data_ptr;
+ __le32 *data_ptr;
uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
int i = 0, rval = QLA_FUNCTION_FAILED;
tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
- data_ptr = (uint32_t *)ha->md_dump;
+ data_ptr = ha->md_dump;
if (ha->fw_dumped) {
ql_log(ql_log_warn, vha, 0xb037,
@@ -4177,7 +4137,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
goto md_failed;
}
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
if (!ha->md_tmplt_hdr || !ha->md_dump) {
ql_log(ql_log_warn, vha, 0xb038,
@@ -4357,7 +4317,7 @@ skip_nxt_entry:
ql_log(ql_log_info, vha, 0xb044,
"Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
md_failed:
@@ -4514,7 +4474,7 @@ exit:
}
void
-qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla82xx_fw_dump(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 230abee10598..93344a05910a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -800,16 +800,16 @@ struct qla82xx_legacy_intr_set {
#define QLA82XX_URI_FIRMWARE_IDX_OFF 29
struct qla82xx_uri_table_desc{
- uint32_t findex;
- uint32_t num_entries;
- uint32_t entry_size;
- uint32_t reserved[5];
+ __le32 findex;
+ __le32 num_entries;
+ __le32 entry_size;
+ __le32 reserved[5];
};
struct qla82xx_uri_data_desc{
- uint32_t findex;
- uint32_t size;
- uint32_t reserved[5];
+ __le32 findex;
+ __le32 size;
+ __le32 reserved[5];
};
/* UNIFIED ROMIMAGE END */
@@ -829,22 +829,22 @@ struct qla82xx_uri_data_desc{
* ISP 8021 I/O Register Set structure definitions.
*/
struct device_reg_82xx {
- uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
- uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */
- uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */
+ __le32 req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
+ __le32 rsp_q_in[64]; /* Response Queue In-Pointer. */
+ __le32 rsp_q_out[64]; /* Response Queue Out-Pointer. */
- uint16_t mailbox_in[32]; /* Mail box In registers */
- uint16_t unused_1[32];
- uint32_t hint; /* Host interrupt register */
+ __le16 mailbox_in[32]; /* Mailbox In registers */
+ __le16 unused_1[32];
+ __le32 hint; /* Host interrupt register */
#define HINT_MBX_INT_PENDING BIT_0
- uint16_t unused_2[62];
- uint16_t mailbox_out[32]; /* Mail box Out registers */
- uint32_t unused_3[48];
+ __le16 unused_2[62];
+ __le16 mailbox_out[32]; /* Mailbox Out registers */
+ __le32 unused_3[48];
- uint32_t host_status; /* host status */
+ __le32 host_status; /* host status */
#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
- uint32_t host_int; /* Interrupt status. */
+ __le32 host_int; /* Interrupt status. */
#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */
};
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index c056f466f1f4..50e57603ce3d 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1441,7 +1441,7 @@ qla8044_device_bootstrap(struct scsi_qla_host *vha)
if (idc_ctrl & GRACEFUL_RESET_BIT1) {
qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
(idc_ctrl & ~GRACEFUL_RESET_BIT1));
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
}
dev_ready:
@@ -2965,7 +2965,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
/* Prepare: Write pex-dma descriptor to MS memory. */
rval = qla8044_ms_mem_write_128b(vha,
- m_hdr->desc_card_addr, (void *)&dma_desc,
+ m_hdr->desc_card_addr, (uint32_t *)&dma_desc,
(sizeof(struct qla8044_pex_dma_descriptor)/16));
if (rval) {
ql_log(ql_log_warn, vha, 0xb14a,
@@ -2987,7 +2987,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
read_size += chunk_size;
}
- *d_ptr = (void *)data_ptr;
+ *d_ptr = (uint32_t *)data_ptr;
error_exit:
if (rdmem_buffer)
@@ -3249,7 +3249,7 @@ qla8044_collect_md_data(struct scsi_qla_host *vha)
goto md_failed;
}
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
if (!ha->md_tmplt_hdr || !ha->md_dump) {
ql_log(ql_log_warn, vha, 0xb10e,
@@ -3470,7 +3470,7 @@ skip_nxt_entry:
ql_log(ql_log_info, vha, 0xb110,
"Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
@@ -3487,7 +3487,7 @@ qla8044_get_minidump(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
if (!qla8044_collect_md_data(vha)) {
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
ha->prev_minidump_failed = 0;
} else {
ql_log(ql_log_fatal, vha, 0xb0db,
@@ -3946,8 +3946,8 @@ qla8044_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
for (iter = 1; iter--; ) {
- if (RD_REG_DWORD(&reg->host_int)) {
- stat = RD_REG_DWORD(&reg->host_status);
+ if (rd_reg_dword(&reg->host_int)) {
+ stat = rd_reg_dword(&reg->host_status);
if ((stat & HSRX_RISC_INT) == 0)
break;
@@ -3961,9 +3961,9 @@ qla8044_intr_handler(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -3976,7 +3976,7 @@ qla8044_intr_handler(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
qla2x00_handle_mbx_completion(ha, status);
@@ -4070,7 +4070,7 @@ exit_isp_reset:
}
void
-qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla8044_fw_dump(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1d9a4866f9a7..e92fad99338c 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -35,6 +35,11 @@ static int apidev_major;
*/
struct kmem_cache *srb_cachep;
+int ql2xfulldump_on_mpifail;
+module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
+ "Set this to take full dump on MPI hang.");
+
/*
* CT6 CTX allocation cache
*/
@@ -1216,9 +1221,9 @@ uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
if (IS_P3P_TYPE(ha))
- return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
+ return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
else
- return ((RD_REG_DWORD(&reg->host_status)) ==
+ return ((rd_reg_dword(&reg->host_status)) ==
ISP_REG_DISCONNECT);
}
@@ -1902,8 +1907,8 @@ qla2x00_enable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
/* enable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
- RD_REG_WORD(&reg->ictrl);
+ wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
+ rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1917,8 +1922,8 @@ qla2x00_disable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
/* disable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, 0);
- RD_REG_WORD(&reg->ictrl);
+ wrt_reg_word(&reg->ictrl, 0);
+ rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1930,8 +1935,8 @@ qla24xx_enable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
- WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT);
+ rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1945,8 +1950,8 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
return;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -2518,6 +2523,7 @@ static struct isp_operations qla27xx_isp_ops = {
.read_nvram = NULL,
.write_nvram = NULL,
.fw_dump = qla27xx_fwdump,
+ .mpi_fw_dump = qla27xx_mpi_fwdump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla83xx_beacon_blink,
@@ -4614,7 +4620,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
ha->flags.fce_enabled = 0;
ha->eft = NULL;
ha->eft_dma = 0;
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
ha->fw_dump_cap_flags = 0;
ha->fw_dump_reading = 0;
ha->fw_dump = NULL;
@@ -5758,7 +5764,8 @@ qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
if (!pdb) {
ql_dbg(ql_dbg_init, vha, 0x0181,
"%s: Failed allocate pdb\n", __func__);
- } else if (qla24xx_get_port_database(vha, purex->nport_handle, pdb)) {
+ } else if (qla24xx_get_port_database(vha,
+ le16_to_cpu(purex->nport_handle), pdb)) {
ql_dbg(ql_dbg_init, vha, 0x0181,
"%s: Failed get pdb sid=%x\n", __func__, sid);
} else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
@@ -5910,7 +5917,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
"-------- ELS REQ -------\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
- (void *)purex, sizeof(*purex));
+ purex, sizeof(*purex));
if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
rsp_payload_length =
@@ -5952,7 +5959,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
rsp_els->entry_status = 0;
rsp_els->handle = 0;
rsp_els->nport_handle = purex->nport_handle;
- rsp_els->tx_dsd_count = 1;
+ rsp_els->tx_dsd_count = cpu_to_le16(1);
rsp_els->vp_index = purex->vp_idx;
rsp_els->sof_type = EST_SOFI3;
rsp_els->rx_xchg_address = purex->rx_xchg_addr;
@@ -5963,7 +5970,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
rsp_els->d_id[1] = purex->s_id[1];
rsp_els->d_id[2] = purex->s_id[2];
- rsp_els->control_flags = EPD_ELS_ACC;
+ rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
rsp_els->rx_byte_count = 0;
rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
@@ -5975,8 +5982,8 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
/* Prepare Response Payload */
rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
- rsp_payload->hdr.len = cpu_to_be32(
- rsp_els->tx_byte_count - sizeof(rsp_payload->hdr));
+ rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
+ sizeof(rsp_payload->hdr));
/* Link service Request Info Descriptor */
rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
@@ -6026,7 +6033,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
memset(sfp, 0, SFP_RTDI_LEN);
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
if (!rval) {
- uint16_t *trx = (void *)sfp; /* already be16 */
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
rsp_payload->sfp_diag_desc.temperature = trx[0];
rsp_payload->sfp_diag_desc.vcc = trx[1];
rsp_payload->sfp_diag_desc.tx_bias = trx[2];
@@ -6053,17 +6060,17 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
if (!rval) {
rsp_payload->ls_err_desc.link_fail_cnt =
- cpu_to_be32(stat->link_fail_cnt);
+ cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
rsp_payload->ls_err_desc.loss_sync_cnt =
- cpu_to_be32(stat->loss_sync_cnt);
+ cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
rsp_payload->ls_err_desc.loss_sig_cnt =
- cpu_to_be32(stat->loss_sig_cnt);
+ cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
rsp_payload->ls_err_desc.prim_seq_err_cnt =
- cpu_to_be32(stat->prim_seq_err_cnt);
+ cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
rsp_payload->ls_err_desc.inval_xmit_word_cnt =
- cpu_to_be32(stat->inval_xmit_word_cnt);
+ cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
rsp_payload->ls_err_desc.inval_crc_cnt =
- cpu_to_be32(stat->inval_crc_cnt);
+ cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
}
}
@@ -6135,7 +6142,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
memset(sfp, 0, SFP_RTDI_LEN);
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
if (!rval) {
- uint16_t *trx = (void *)sfp; /* already be16 */
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
/* Optical Element Descriptor, Temperature */
rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
@@ -6261,11 +6268,11 @@ send:
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
"-------- ELS RSP -------\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
- (void *)rsp_els, sizeof(*rsp_els));
+ rsp_els, sizeof(*rsp_els));
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
"-------- ELS RSP PAYLOAD -------\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
- (void *)rsp_payload, rsp_payload_length);
+ rsp_payload, rsp_payload_length);
rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
@@ -6871,6 +6878,7 @@ qla2x00_do_dpc(void *data)
if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
+ base_vha->flags.online = 1;
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");
if (ha->isp_ops->abort_isp(base_vha)) {
@@ -7550,15 +7558,15 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2100(ha) || IS_QLA2200(ha)){
- stat = RD_REG_DWORD(&reg->hccr);
+ stat = rd_reg_word(&reg->hccr);
if (stat & HCCR_RISC_PAUSE)
risc_paused = 1;
} else if (IS_QLA23XX(ha)) {
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED)
risc_paused = 1;
} else if (IS_FWI2_CAPABLE(ha)) {
- stat = RD_REG_DWORD(&reg24->host_status);
+ stat = rd_reg_dword(&reg24->host_status);
if (stat & HSRX_RISC_PAUSED)
risc_paused = 1;
}
@@ -7567,7 +7575,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
if (risc_paused) {
ql_log(ql_log_info, base_vha, 0x9003,
"RISC paused -- mmio_enabled, Dumping firmware.\n");
- ha->isp_ops->fw_dump(base_vha, 0);
+ qla2xxx_dump_fw(base_vha);
return PCI_ERS_RESULT_NEED_RESET;
} else
@@ -7814,13 +7822,19 @@ qla2x00_module_init(void)
{
int ret = 0;
+ BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
BUILD_BUG_ON(sizeof(init_cb_t) != 96);
+ BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
BUILD_BUG_ON(sizeof(request_t) != 64);
+ BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
@@ -7828,17 +7842,70 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2344);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
+ BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
+ BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
+ BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
+ BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
+ BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
+ BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
+ BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
+ BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
+ BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
+ BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
+ BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
+ BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
+ BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
+ BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
+ BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
+ BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
+ BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
+ BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
+ BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
+ BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
+ BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
+ BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
+ BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
+ BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
- BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
- BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
+ BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sw_info_t) != 32);
+ BUILD_BUG_ON(sizeof(target_id_t) != 2);
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3da79ee1d88e..e161c05d7d82 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -26,24 +26,24 @@ qla2x00_lock_nvram_access(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
- data = RD_REG_WORD(&reg->nvram);
+ data = rd_reg_word(&reg->nvram);
while (data & NVR_BUSY) {
udelay(100);
- data = RD_REG_WORD(&reg->nvram);
+ data = rd_reg_word(&reg->nvram);
}
/* Lock resource */
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
udelay(5);
- data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ data = rd_reg_word(&reg->u.isp2300.host_semaphore);
while ((data & BIT_0) == 0) {
/* Lock failed */
udelay(100);
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
udelay(5);
- data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ data = rd_reg_word(&reg->u.isp2300.host_semaphore);
}
}
}
@@ -58,8 +58,8 @@ qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
}
}
@@ -73,15 +73,15 @@ qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
@@ -120,21 +120,21 @@ qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
/* Read data from NVRAM. */
for (cnt = 0; cnt < 16; cnt++) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT | NVR_CLOCK);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
data <<= 1;
- reg_data = RD_REG_WORD(&reg->nvram);
+ reg_data = rd_reg_word(&reg->nvram);
if (reg_data & NVR_DATA_IN)
data |= BIT_0;
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
/* Deselect chip. */
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_DESELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
return data;
@@ -171,8 +171,8 @@ qla2x00_nv_deselect(struct qla_hw_data *ha)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_DESELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
@@ -183,7 +183,7 @@ qla2x00_nv_deselect(struct qla_hw_data *ha)
* @data: word to program
*/
static void
-qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
+qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data)
{
int count;
uint16_t word;
@@ -202,7 +202,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
/* Write data */
nv_cmd = (addr << 16) | NV_WRITE_OP;
- nv_cmd |= data;
+ nv_cmd |= (__force u16)data;
nv_cmd <<= 5;
for (count = 0; count < 27; count++) {
if (nv_cmd & BIT_31)
@@ -216,8 +216,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -226,7 +226,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
qla2x00_nv_deselect(ha);
@@ -241,7 +241,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
static int
qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
- uint16_t data, uint32_t tmo)
+ __le16 data, uint32_t tmo)
{
int ret, count;
uint16_t word;
@@ -261,7 +261,7 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
/* Write data */
nv_cmd = (addr << 16) | NV_WRITE_OP;
- nv_cmd |= data;
+ nv_cmd |= (__force u16)data;
nv_cmd <<= 5;
for (count = 0; count < 27; count++) {
if (nv_cmd & BIT_31)
@@ -275,11 +275,11 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
do {
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
if (!--tmo) {
ret = QLA_FUNCTION_FAILED;
break;
@@ -308,7 +308,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
int ret, stat;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
- uint16_t wprot, wprot_old;
+ __le16 wprot, wprot_old;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* Clear NVRAM write protection. */
@@ -318,7 +318,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
cpu_to_le16(0x1234), 100000);
wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
- if (stat != QLA_SUCCESS || wprot != 0x1234) {
+ if (stat != QLA_SUCCESS || wprot != cpu_to_le16(0x1234)) {
/* Write enable. */
qla2x00_nv_write(ha, NVR_DATA_OUT);
qla2x00_nv_write(ha, 0);
@@ -347,8 +347,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready. */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -357,7 +357,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
if (wait_cnt)
@@ -407,8 +407,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready. */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -417,7 +417,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
}
@@ -456,11 +456,11 @@ qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ulong cnt = 30000;
- WRT_REG_DWORD(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
+ wrt_reg_dword(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
while (cnt--) {
- if (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) {
- *data = RD_REG_DWORD(&reg->flash_data);
+ if (rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG) {
+ *data = rd_reg_dword(&reg->flash_data);
return QLA_SUCCESS;
}
udelay(10);
@@ -499,11 +499,11 @@ qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ulong cnt = 500000;
- WRT_REG_DWORD(&reg->flash_data, data);
- WRT_REG_DWORD(&reg->flash_addr, addr | FARX_DATA_FLAG);
+ wrt_reg_dword(&reg->flash_data, data);
+ wrt_reg_dword(&reg->flash_addr, addr | FARX_DATA_FLAG);
while (cnt--) {
- if (!(RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG))
+ if (!(rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG))
return QLA_SUCCESS;
udelay(10);
cond_resched();
@@ -549,11 +549,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
{
const char *loc, *locations[] = { "DEF", "PCI" };
uint32_t pcihdr, pcids;
- uint16_t cnt, chksum, *wptr;
+ uint16_t cnt, chksum;
+ __le16 *wptr;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct qla_flt_location *fltl = (void *)req->ring;
- uint32_t *dcode = (void *)req->ring;
+ uint32_t *dcode = (uint32_t *)req->ring;
uint8_t *buf = (void *)req->ring, *bcode, last_image;
/*
@@ -610,7 +611,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
if (memcmp(fltl->sig, "QFLT", 4))
goto end;
- wptr = (void *)req->ring;
+ wptr = (__force __le16 *)req->ring;
cnt = sizeof(*fltl) / sizeof(*wptr);
for (chksum = 0; cnt--; wptr++)
chksum += le16_to_cpu(*wptr);
@@ -671,7 +672,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
struct qla_flt_header *flt = ha->flt;
struct qla_flt_region *region = &flt->region[0];
- uint16_t *wptr, cnt, chksum;
+ __le16 *wptr;
+ uint16_t cnt, chksum;
uint32_t start;
/* Assign FCP prio region since older adapters may not have FLT, or
@@ -681,8 +683,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
ha->flt_region_flt = flt_addr;
- wptr = (uint16_t *)ha->flt;
- ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2,
+ wptr = (__force __le16 *)ha->flt;
+ ha->isp_ops->read_optrom(vha, flt, flt_addr << 2,
(sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE));
if (le16_to_cpu(*wptr) == 0xffff)
@@ -949,7 +951,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
uint16_t cnt, chksum;
- uint16_t *wptr = (void *)req->ring;
+ __le16 *wptr = (__force __le16 *)req->ring;
struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring;
uint8_t man_id, flash_id;
uint16_t mid = 0, fid = 0;
@@ -1042,14 +1044,14 @@ static void
qla2xxx_get_idc_param(scsi_qla_host_t *vha)
{
#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
- uint32_t *wptr;
+ __le32 *wptr;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
if (!(IS_P3P_TYPE(ha)))
return;
- wptr = (uint32_t *)req->ring;
+ wptr = (__force __le32 *)req->ring;
ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8);
if (*wptr == cpu_to_le32(0xffffffff)) {
@@ -1095,7 +1097,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
{
#define NPIV_CONFIG_SIZE (16*1024)
void *data;
- uint16_t *wptr;
+ __le16 *wptr;
uint16_t cnt, chksum;
int i;
struct qla_npiv_header hdr;
@@ -1197,9 +1199,9 @@ qla24xx_unprotect_flash(scsi_qla_host_t *vha)
return qla81xx_fac_do_write_enable(vha, 1);
/* Enable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
if (!ha->fdt_wrt_disable)
goto done;
@@ -1240,8 +1242,8 @@ qla24xx_protect_flash(scsi_qla_host_t *vha)
skip_wrt_protect:
/* Disable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
return QLA_SUCCESS;
}
@@ -1265,7 +1267,7 @@ qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata)
}
static int
-qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr,
uint32_t dwords)
{
int ret;
@@ -1352,7 +1354,7 @@ next:
/* Slow write */
ret = qla24xx_write_flash_dword(ha,
- flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
+ flash_data_addr(ha, faddr), le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_user, vha, 0x7006,
"Failed slopw write %x (%x)\n", faddr, *dwptr);
@@ -1379,11 +1381,11 @@ qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
uint32_t i;
- uint16_t *wptr;
+ __le16 *wptr;
struct qla_hw_data *ha = vha->hw;
/* Word reads to NVRAM via registers. */
- wptr = (uint16_t *)buf;
+ wptr = buf;
qla2x00_lock_nvram_access(ha);
for (i = 0; i < bytes >> 1; i++, naddr++)
wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha,
@@ -1456,7 +1458,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
{
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t *dwptr = buf;
+ __le32 *dwptr = buf;
uint32_t i;
int ret;
@@ -1466,9 +1468,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
return ret;
/* Enable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
/* Disable NVRAM write-protection. */
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
@@ -1478,7 +1480,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
naddr = nvram_data_addr(ha, naddr);
bytes >>= 2;
for (i = 0; i < bytes; i++, naddr++, dwptr++) {
- if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) {
+ if (qla24xx_write_flash_dword(ha, naddr, le32_to_cpu(*dwptr))) {
ql_dbg(ql_dbg_user, vha, 0x709a,
"Unable to program nvram address=%x data=%x.\n",
naddr, *dwptr);
@@ -1490,9 +1492,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
/* Disable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
return ret;
}
@@ -1588,8 +1590,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
- gpio_enable = RD_REG_WORD(&reg->gpioe);
- gpio_data = RD_REG_WORD(&reg->gpiod);
+ gpio_enable = rd_reg_word(&reg->gpioe);
+ gpio_data = rd_reg_word(&reg->gpiod);
}
/* Set the modified gpio_enable values */
@@ -1598,8 +1600,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
- WRT_REG_WORD(&reg->gpioe, gpio_enable);
- RD_REG_WORD(&reg->gpioe);
+ wrt_reg_word(&reg->gpioe, gpio_enable);
+ rd_reg_word(&reg->gpioe);
}
qla2x00_flip_colors(ha, &led_color);
@@ -1614,8 +1616,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
- WRT_REG_WORD(&reg->gpiod, gpio_data);
- RD_REG_WORD(&reg->gpiod);
+ wrt_reg_word(&reg->gpiod, gpio_data);
+ rd_reg_word(&reg->gpiod);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1645,8 +1647,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
- gpio_enable = RD_REG_WORD(&reg->gpioe);
- gpio_data = RD_REG_WORD(&reg->gpiod);
+ gpio_enable = rd_reg_word(&reg->gpioe);
+ gpio_data = rd_reg_word(&reg->gpiod);
}
gpio_enable |= GPIO_LED_MASK;
@@ -1654,8 +1656,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
- WRT_REG_WORD(&reg->gpioe, gpio_enable);
- RD_REG_WORD(&reg->gpioe);
+ wrt_reg_word(&reg->gpioe, gpio_enable);
+ rd_reg_word(&reg->gpioe);
}
/* Clear out previously set LED colour. */
@@ -1663,8 +1665,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
- WRT_REG_WORD(&reg->gpiod, gpio_data);
- RD_REG_WORD(&reg->gpiod);
+ wrt_reg_word(&reg->gpiod, gpio_data);
+ rd_reg_word(&reg->gpiod);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1731,13 +1733,13 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
/* Save the Original GPIOD. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Enable the gpio_data reg for update. */
gpio_data |= GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Set the color bits. */
qla24xx_flip_colors(ha, &led_color);
@@ -1749,8 +1751,8 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
gpio_data |= led_color;
/* Set the modified gpio_data values. */
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ gpio_data = rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1881,12 +1883,12 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
goto skip_gpio;
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Enable the gpio_data reg for update. */
gpio_data |= GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1929,12 +1931,12 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
/* Give control back to firmware. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Disable the gpio_data reg for update. */
gpio_data &= ~GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
set_fw_options:
@@ -1970,10 +1972,10 @@ qla2x00_flash_enable(struct qla_hw_data *ha)
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
data |= CSR_FLASH_ENABLE;
- WRT_REG_WORD(&reg->ctrl_status, data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/**
@@ -1986,10 +1988,10 @@ qla2x00_flash_disable(struct qla_hw_data *ha)
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
data &= ~(CSR_FLASH_ENABLE);
- WRT_REG_WORD(&reg->ctrl_status, data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/**
@@ -2008,7 +2010,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
uint16_t bank_select;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- bank_select = RD_REG_WORD(&reg->ctrl_status);
+ bank_select = rd_reg_word(&reg->ctrl_status);
if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
/* Specify 64K address range: */
@@ -2016,11 +2018,11 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
bank_select &= ~0xf8;
bank_select |= addr >> 12 & 0xf0;
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- data = RD_REG_WORD(&reg->flash_data);
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ data = rd_reg_word(&reg->flash_data);
return (uint8_t)data;
}
@@ -2028,13 +2030,13 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
/* Setup bit 16 of flash address. */
if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
} else if (((addr & BIT_16) == 0) &&
(bank_select & CSR_FLASH_64K_BANK)) {
bank_select &= ~(CSR_FLASH_64K_BANK);
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/* Always perform IO mapped accesses to the FLASH registers. */
@@ -2049,7 +2051,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
} while (data != data2);
} else {
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
data = qla2x00_debounce_register(&reg->flash_data);
}
@@ -2068,20 +2070,20 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
uint16_t bank_select;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- bank_select = RD_REG_WORD(&reg->ctrl_status);
+ bank_select = rd_reg_word(&reg->ctrl_status);
if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
/* Specify 64K address range: */
/* clear out Module Select and Flash Address bits [19:16]. */
bank_select &= ~0xf8;
bank_select |= addr >> 12 & 0xf0;
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_data, (uint16_t)data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
return;
}
@@ -2089,13 +2091,13 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
/* Setup bit 16 of flash address. */
if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
} else if (((addr & BIT_16) == 0) &&
(bank_select & CSR_FLASH_64K_BANK)) {
bank_select &= ~(CSR_FLASH_64K_BANK);
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/* Always perform IO mapped accesses to the FLASH registers. */
@@ -2103,10 +2105,10 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
} else {
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_data, (uint16_t)data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
}
@@ -2289,12 +2291,12 @@ qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
midpoint = length / 2;
- WRT_REG_WORD(&reg->nvram, 0);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, 0);
+ rd_reg_word(&reg->nvram);
for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) {
if (ilength == midpoint) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram);
}
data = qla2x00_read_flash_byte(ha, saddr);
if (saddr % 100)
@@ -2319,11 +2321,11 @@ qla2x00_suspend_hba(struct scsi_qla_host *vha)
/* Pause RISC. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
+ rd_reg_word(&reg->hccr);
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
break;
udelay(100);
}
@@ -2362,12 +2364,12 @@ qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf,
midpoint = ha->optrom_size / 2;
qla2x00_flash_enable(ha);
- WRT_REG_WORD(&reg->nvram, 0);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, 0);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
for (addr = offset, data = buf; addr < length; addr++, data++) {
if (addr == midpoint) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
}
*data = qla2x00_read_flash_byte(ha, addr);
@@ -2399,7 +2401,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_number = 0;
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
/* Go with write. */
@@ -2548,8 +2550,8 @@ update_flash:
}
}
} else if (addr == ha->optrom_size / 2) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram);
}
if (flash_id == 0xda && man_id == 0xc1) {
@@ -2610,7 +2612,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with read. */
- qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2);
+ qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
@@ -2662,7 +2664,7 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
for (; cnt; cnt--, flt_reg++) {
- if (flt_reg->start == start) {
+ if (le32_to_cpu(flt_reg->start) == start) {
memcpy((uint8_t *)region, flt_reg,
sizeof(struct qla_flt_region));
rval = QLA_SUCCESS;
@@ -2691,7 +2693,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
struct qla_flt_region region;
bool reset_to_rom = false;
uint32_t risc_size, risc_attr = 0;
- uint32_t *fw_array = NULL;
+ __be32 *fw_array = NULL;
/* Retrieve region info - must be a start address passed in */
rval = qla28xx_get_flash_region(vha, offset, &region);
@@ -2722,12 +2724,12 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
"Region %x is secure\n", region.code);
- switch (region.code) {
+ switch (le16_to_cpu(region.code)) {
case FLT_REG_FW:
case FLT_REG_FW_SEC_27XX:
case FLT_REG_MPI_PRI_28XX:
case FLT_REG_MPI_SEC_28XX:
- fw_array = dwptr;
+ fw_array = (__force __be32 *)dwptr;
/* 1st fw array */
risc_size = be32_to_cpu(fw_array[3]);
@@ -2761,7 +2763,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
case FLT_REG_PEP_PRI_28XX:
case FLT_REG_PEP_SEC_28XX:
- fw_array = dwptr;
+ fw_array = (__force __be32 *)dwptr;
/* 1st fw array */
risc_size = be32_to_cpu(fw_array[3]);
@@ -2892,7 +2894,8 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
if (region.attribute && buf_size_without_sfub) {
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
"Sending Secure Flash MB Cmd\n");
- rval = qla28xx_secure_flash_update(vha, 0, region.code,
+ rval = qla28xx_secure_flash_update(vha, 0,
+ le16_to_cpu(region.code),
buf_size_without_sfub, sfub_dma,
sizeof(struct secure_flash_update_block) >> 2);
if (rval != QLA_SUCCESS) {
@@ -2981,11 +2984,11 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
/* Go with write. */
if (IS_QLA28XX(ha))
- rval = qla28xx_write_flash_data(vha, (uint32_t *)buf,
- offset >> 2, length >> 2);
+ rval = qla28xx_write_flash_data(vha, buf, offset >> 2,
+ length >> 2);
else
- rval = qla24xx_write_flash_data(vha, (uint32_t *)buf,
- offset >> 2, length >> 2);
+ rval = qla24xx_write_flash_data(vha, buf, offset >> 2,
+ length >> 2);
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
scsi_unblock_requests(vha->host);
@@ -3513,7 +3516,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
} else {
for (i = 0; i < 4; i++)
- ha->fw_revision[i] = be32_to_cpu(dcode[4+i]);
+ ha->fw_revision[i] =
+ be32_to_cpu((__force __be32)dcode[4+i]);
ql_dbg(ql_dbg_init, vha, 0x0060,
"Firmware revision (flash) %u.%u.%u (%x).\n",
ha->fw_revision[0], ha->fw_revision[1],
@@ -3528,7 +3532,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
faddr = ha->flt_region_gold_fw;
- qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8);
+ qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x0056,
"Unrecognized golden fw at %#x.\n", faddr);
@@ -3537,7 +3541,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
}
for (i = 0; i < 4; i++)
- ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]);
+ ha->gold_fw_version[i] =
+ be32_to_cpu((__force __be32)dcode[4+i]);
return ret;
}
@@ -3617,7 +3622,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
/* read remaining FCP CMD config data from flash */
fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
- len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
+ len = ha->fcp_prio_cfg->num_entries * sizeof(struct qla_fcp_prio_entry);
max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0],
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 622e7337affc..fbb80a043b4f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -378,7 +378,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
qlt_issue_marker(vha, ha_locked);
if ((entry->u.isp24.vp_index != 0xFF) &&
- (entry->u.isp24.nport_handle != 0xFFFF)) {
+ (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
host = qlt_find_host_by_vp_idx(vha,
entry->u.isp24.vp_index);
if (unlikely(!host)) {
@@ -1697,7 +1697,7 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -1725,7 +1725,8 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
struct scsi_qla_host *vha = mcmd->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
- uint32_t f_ctl, h;
+ __le32 f_ctl;
+ uint32_t h;
uint8_t *p;
int rc;
struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
@@ -1782,7 +1783,7 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
resp->payload.ba_acct.low_seq_cnt = 0x0000;
- resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
} else {
@@ -1814,7 +1815,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
- uint32_t f_ctl;
+ __le32 f_ctl;
uint8_t *p;
ql_dbg(ql_dbg_tgt, vha, 0xe006,
@@ -1857,7 +1858,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
resp->payload.ba_acct.low_seq_cnt = 0x0000;
- resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
} else {
@@ -2030,7 +2031,7 @@ static void qlt_do_tmr_work(struct work_struct *work)
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
- tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
+ tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
break;
default:
tag = 0;
@@ -2110,7 +2111,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_tgt_cmd *abort_cmd;
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
- abts->exchange_addr_to_abort);
+ le32_to_cpu(abts->exchange_addr_to_abort));
if (abort_cmd && abort_cmd->qpair) {
mcmd->qpair = abort_cmd->qpair;
mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
@@ -2133,7 +2134,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
{
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess;
- uint32_t tag = abts->exchange_addr_to_abort;
+ uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
be_id_t s_id;
int rc;
unsigned long flags;
@@ -2223,7 +2224,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
- ctio->nport_handle = mcmd->sess->loop_id;
+ ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = ha->vp_idx;
ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -2280,7 +2281,7 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE;
- ctio->nport_handle = cmd->sess->loop_id;
+ ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -2484,7 +2485,7 @@ static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out));
+ rd_reg_dword_relaxed(req->req_q_out));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -2840,10 +2841,14 @@ skip_explict_conf:
cpu_to_le16(SS_SENSE_LEN_VALID);
ctio->u.status1.sense_length =
cpu_to_le16(prm->sense_buffer_len);
- for (i = 0; i < prm->sense_buffer_len/4; i++)
- ((uint32_t *)ctio->u.status1.sense_data)[i] =
- cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+ for (i = 0; i < prm->sense_buffer_len/4; i++) {
+ uint32_t v;
+ v = get_unaligned_be32(
+ &((uint32_t *)prm->sense_buffer)[i]);
+ put_unaligned_le32(v,
+ &((uint32_t *)ctio->u.status1.sense_data)[i]);
+ }
qlt_print_dif_err(prm);
} else {
@@ -3114,7 +3119,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
- pkt->dseg_count = prm->tot_dsds;
+ pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
/* Fibre channel byte count */
pkt->transfer_length = cpu_to_le32(transfer_length);
@@ -3136,7 +3141,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
- pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+ pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
if (!bundling) {
cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
@@ -3573,7 +3578,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
/* terminate */
@@ -3647,7 +3652,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -3885,7 +3890,7 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
- cmd = (void *) req->outstanding_cmds[h];
+ cmd = req->outstanding_cmds[h];
if (unlikely(cmd == NULL)) {
ql_dbg(ql_dbg_async, vha, 0xe053,
"qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -4110,7 +4115,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
- cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
+ cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
if (atio->u.isp24.fcp_cmnd.rddata &&
atio->u.isp24.fcp_cmnd.wrdata) {
@@ -5302,7 +5307,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = sess->loop_id;
+ ctio24->nport_handle = cpu_to_le16(sess->loop_id);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -5315,13 +5320,14 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
* CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
* if the explicit conformation is used.
*/
- ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.ox_id =
+ cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
ctio24->u.status1.scsi_status = cpu_to_le16(status);
- ctio24->u.status1.residual = get_datalen_for_atio(atio);
+ ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
if (ctio24->u.status1.residual != 0)
- ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+ ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
/* Memory Barrier */
wmb();
@@ -5550,7 +5556,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
if (unlikely(atio->u.isp24.exchange_addr ==
- ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+ cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
ql_dbg(ql_dbg_io, vha, 0x3065,
"qla_target(%d): ATIO_TYPE7 "
"received with UNKNOWN exchange address, "
@@ -5670,9 +5676,9 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
if (qpair == ha->base_qpair)
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
else
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -5713,8 +5719,8 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
entry->compl_status);
if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
- if ((entry->error_subcode1 == 0x1E) &&
- (entry->error_subcode2 == 0)) {
+ if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
+ le32_to_cpu(entry->error_subcode2) == 0) {
if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
ha->tgt.tgt_ops->free_mcmd(mcmd);
return;
@@ -5928,11 +5934,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
"qla_target(%d): Async LOOP_UP occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
if (tgt->link_reinit_iocb_pending) {
qlt_send_notify_ack(ha->base_qpair,
- (void *)&tgt->link_reinit_iocb,
+ &tgt->link_reinit_iocb,
0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
@@ -5946,18 +5951,16 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
"qla_target(%d): Async event %#x occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
break;
case MBA_REJECTED_FCP_CMD:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
"qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
- if (le16_to_cpu(mailbox[3]) == 1) {
+ if (mailbox[3] == 1) {
/* exchange starvation. */
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
@@ -5981,10 +5984,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
"qla_target(%d): Port update async event %#x "
"occurred: updating the ports database (m[0]=%x, m[1]=%x, "
"m[2]=%x, m[3]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
- login_code = le16_to_cpu(mailbox[2]);
+ login_code = mailbox[2];
if (login_code == 0x4) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
"Async MB 2: Got PLOGI Complete\n");
@@ -6661,9 +6663,14 @@ static void qlt_disable_vha(struct scsi_qla_host *vha)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+
+ /*
+ * We are expecting the offline state.
+ * QLA_FUNCTION_FAILED means that adapter is offline.
+ */
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
ql_dbg(ql_dbg_tgt, vha, 0xe081,
- "qla2x00_wait_for_hba_online() failed\n");
+ "adapter is offline\n");
}
/*
@@ -6729,7 +6736,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
return;
for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
- pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
pkt++;
}
@@ -6764,7 +6771,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
"corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
&pkt->u.isp24.fcp_hdr.s_id,
be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
- le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+ pkt->u.isp24.exchange_addr, pkt);
adjust_corrupted_atio(pkt);
qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
@@ -6782,14 +6789,14 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
} else
ha->tgt.atio_ring_ptr++;
- pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
}
wmb();
}
/* Adjust ring index */
- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
}
void
@@ -6802,19 +6809,19 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (!QLA_TGT_MODE_ENABLED())
return;
- WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
- RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+ wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
+ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
+ rd_reg_dword(ISP_ATIO_Q_OUT(vha));
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
if (IS_QLA2071(ha)) {
/* 4 ports Baker: Enable Interrupt Handshake */
icb->msix_atio = 0;
- icb->firmware_options_2 |= BIT_26;
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
} else {
icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= ~BIT_26;
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
}
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
@@ -6824,7 +6831,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
/* INTx|MSI */
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
icb->msix_atio = 0;
- icb->firmware_options_2 |= BIT_26;
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"%s: Use INTx for ATIOQ.\n", __func__);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 3cf8590feeac..010f12523b2a 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -135,37 +135,37 @@ struct nack_to_isp {
uint8_t entry_status; /* Entry Status. */
union {
struct {
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
uint8_t target_id;
uint8_t reserved_1;
- uint16_t flags;
- uint16_t resp_code;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_flags;
- uint16_t srr_reject_code;
+ __le16 flags;
+ __le16 resp_code;
+ __le16 status;
+ __le16 task_flags;
+ __le16 seq_id;
+ __le16 srr_rx_id;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_flags;
+ __le16 srr_reject_code;
uint8_t srr_reject_vendor_uniq;
uint8_t srr_reject_code_expl;
uint8_t reserved_2[24];
} isp2x;
struct {
uint32_t handle;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t reserved_1;
- uint16_t flags;
- uint16_t srr_rx_id;
- uint16_t status;
+ __le16 flags;
+ __le16 srr_rx_id;
+ __le16 status;
uint8_t status_subcode;
uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_flags;
+ __le32 exchange_address;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_flags;
uint8_t reserved_4[19];
uint8_t vp_index;
uint8_t srr_reject_vendor_uniq;
@@ -175,7 +175,7 @@ struct nack_to_isp {
} isp24;
} u;
uint8_t reserved[2];
- uint16_t ox_id;
+ __le16 ox_id;
} __packed;
#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
@@ -206,16 +206,16 @@ struct ctio_to_2xxx {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
target_id_t target;
- uint16_t rx_id;
- uint16_t flags;
- uint16_t status;
- uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
- uint16_t dseg_count; /* Data segment count. */
- uint32_t relative_offset;
- uint32_t residual;
- uint16_t reserved_1[3];
- uint16_t scsi_status;
- uint32_t transfer_length;
+ __le16 rx_id;
+ __le16 flags;
+ __le16 status;
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 relative_offset;
+ __le32 residual;
+ __le16 reserved_1[3];
+ __le16 scsi_status;
+ __le32 transfer_length;
struct dsd32 dsd[3];
} __packed;
#define ATIO_PATH_INVALID 0x07
@@ -257,7 +257,7 @@ struct fcp_hdr {
uint16_t seq_cnt;
__be16 ox_id;
uint16_t rx_id;
- uint32_t parameter;
+ __le32 parameter;
} __packed;
struct fcp_hdr_le {
@@ -267,12 +267,12 @@ struct fcp_hdr_le {
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t parameter;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 parameter;
} __packed;
#define F_CTL_EXCH_CONTEXT_RESP BIT_23
@@ -306,7 +306,7 @@ struct atio7_fcp_cmnd {
* BUILD_BUG_ON in qlt_init().
*/
uint8_t add_cdb[4];
- /* uint32_t data_length; */
+ /* __le32 data_length; */
} __packed;
/*
@@ -316,31 +316,31 @@ struct atio7_fcp_cmnd {
struct atio_from_isp {
union {
struct {
- uint16_t entry_hdr;
+ __le16 entry_hdr;
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
- uint16_t rx_id;
- uint16_t flags;
- uint16_t status;
+ __le16 rx_id;
+ __le16 flags;
+ __le16 status;
uint8_t command_ref;
uint8_t task_codes;
uint8_t task_flags;
uint8_t execution_codes;
uint8_t cdb[MAX_CMDSZ];
- uint32_t data_length;
- uint16_t lun;
+ __le32 data_length;
+ __le16 lun;
uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
- uint16_t reserved_32[6];
- uint16_t ox_id;
+ __le16 reserved_32[6];
+ __le16 ox_id;
} isp2x;
struct {
- uint16_t entry_hdr;
+ __le16 entry_hdr;
uint8_t fcp_cmnd_len_low;
uint8_t fcp_cmnd_len_high:4;
uint8_t attr:4;
- uint32_t exchange_addr;
+ __le32 exchange_addr;
#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
struct fcp_hdr fcp_hdr;
struct atio7_fcp_cmnd fcp_cmnd;
@@ -352,7 +352,7 @@ struct atio_from_isp {
#define FCP_CMD_LENGTH_MASK 0x0fff
#define FCP_CMD_LENGTH_MIN 0x38
uint8_t data[56];
- uint32_t signature;
+ __le32 signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
} raw;
} u;
@@ -395,36 +395,36 @@ struct ctio7_to_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
- uint16_t nport_handle;
+ __le16 nport_handle;
#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
- uint16_t timeout;
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout;
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags;
le_id_t initiator_id;
uint8_t reserved;
- uint32_t exchange_addr;
+ __le32 exchange_addr;
union {
struct {
- uint16_t reserved1;
+ __le16 reserved1;
__le16 flags;
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
- uint32_t relative_offset;
- uint32_t reserved2;
- uint32_t transfer_length;
- uint32_t reserved3;
+ __le16 scsi_status;
+ __le32 relative_offset;
+ __le32 reserved2;
+ __le32 transfer_length;
+ __le32 reserved3;
struct dsd64 dsd;
} status0;
struct {
- uint16_t sense_length;
+ __le16 sense_length;
__le16 flags;
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
- uint16_t response_len;
- uint16_t reserved;
+ __le16 scsi_status;
+ __le16 response_len;
+ __le16 reserved;
uint8_t sense_data[24];
} status1;
} u;
@@ -440,18 +440,18 @@ struct ctio7_from_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
- uint16_t status;
- uint16_t timeout;
- uint16_t dseg_count; /* Data segment count. */
+ __le16 status;
+ __le16 timeout;
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t reserved1[5];
- uint32_t exchange_address;
- uint16_t reserved2;
- uint16_t flags;
- uint32_t residual;
- uint16_t ox_id;
- uint16_t reserved3;
- uint32_t relative_offset;
+ __le32 exchange_address;
+ __le16 reserved2;
+ __le16 flags;
+ __le32 residual;
+ __le16 ox_id;
+ __le16 reserved3;
+ __le32 relative_offset;
uint8_t reserved4[24];
} __packed;
@@ -489,29 +489,29 @@ struct ctio_crc2_to_fw {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
__le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags; /* additional flags */
#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
le_id_t initiator_id; /* initiator ID */
uint8_t reserved1;
- uint32_t exchange_addr; /* rcv exchange address */
- uint16_t reserved2;
+ __le32 exchange_addr; /* rcv exchange address */
+ __le16 reserved2;
__le16 flags; /* refer to CTIO7 flags values */
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
+ __le16 scsi_status;
__le32 relative_offset;
- uint32_t reserved5;
+ __le32 reserved5;
__le32 transfer_length; /* total fc transfer length */
- uint32_t reserved6;
+ __le32 reserved6;
__le64 crc_context_address __packed; /* Data segment address. */
- uint16_t crc_context_len; /* Data segment length. */
- uint16_t reserved_1; /* MUST be set to 0. */
+ __le16 crc_context_len; /* Data segment length. */
+ __le16 reserved_1; /* MUST be set to 0. */
};
/* CTIO Type CRC_x Status IOCB */
@@ -522,20 +522,20 @@ struct ctio_crc_from_fw {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t status;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
- uint32_t reserved1;
- uint16_t state_flags;
+ __le16 status;
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 reserved1;
+ __le16 state_flags;
#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
- uint32_t exchange_address; /* rcv exchange address */
- uint16_t reserved2;
- uint16_t flags;
- uint32_t resid_xfer_length;
- uint16_t ox_id;
+ __le32 exchange_address; /* rcv exchange address */
+ __le16 reserved2;
+ __le16 flags;
+ __le32 resid_xfer_length;
+ __le16 ox_id;
uint8_t reserved3[12];
- uint16_t runt_guard; /* reported runt blk guard */
+ __le16 runt_guard; /* reported runt blk guard */
uint8_t actual_dif[8];
uint8_t expected_dif[8];
} __packed;
@@ -558,29 +558,29 @@ struct abts_recv_from_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint8_t reserved_1[6];
- uint16_t nport_handle;
+ __le16 nport_handle;
uint8_t reserved_2[2];
uint8_t vp_index;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
uint8_t reserved_4[16];
- uint32_t exchange_addr_to_abort;
+ __le32 exchange_addr_to_abort;
} __packed;
#define ABTS_PARAM_ABORT_SEQ BIT_0
struct ba_acc_le {
- uint16_t reserved;
+ __le16 reserved;
uint8_t seq_id_last;
uint8_t seq_id_valid;
#define SEQ_ID_VALID 0x80
#define SEQ_ID_INVALID 0x00
- uint16_t rx_id;
- uint16_t ox_id;
- uint16_t high_seq_cnt;
- uint16_t low_seq_cnt;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
} __packed;
struct ba_rjt_le {
@@ -604,21 +604,21 @@ struct abts_resp_to_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle;
- uint16_t reserved_1;
- uint16_t nport_handle;
- uint16_t control_flags;
+ __le16 reserved_1;
+ __le16 nport_handle;
+ __le16 control_flags;
#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
uint8_t vp_index;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
union {
struct ba_acc_le ba_acct;
struct ba_rjt_le ba_rjt;
} __packed payload;
- uint32_t reserved_4;
- uint32_t exchange_addr_to_abort;
+ __le32 reserved_4;
+ __le32 exchange_addr_to_abort;
} __packed;
/*
@@ -634,21 +634,21 @@ struct abts_resp_from_24xx_fw {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle;
- uint16_t compl_status;
+ __le16 compl_status;
#define ABTS_RESP_COMPL_SUCCESS 0
#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
- uint16_t nport_handle;
- uint16_t reserved_1;
+ __le16 nport_handle;
+ __le16 reserved_1;
uint8_t reserved_2;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
uint8_t reserved_4[8];
- uint32_t error_subcode1;
+ __le32 error_subcode1;
#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
- uint32_t error_subcode2;
- uint32_t exchange_addr_to_abort;
+ __le32 error_subcode2;
+ __le32 exchange_addr_to_abort;
} __packed;
/********************************************************************\
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 6aeb1c3fb7a8..8dc82cfd38b2 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -12,6 +12,33 @@
#define IOBASE(vha) IOBAR(ISPREG(vha))
#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
+/* hardware_lock assumed held. */
+static void
+qla27xx_write_remote_reg(struct scsi_qla_host *vha,
+ u32 addr, u32 data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd300,
+ "%s: addr/data = %xh/%xh\n", __func__, addr, data);
+
+ wrt_reg_dword(&reg->iobase_addr, 0x40);
+ wrt_reg_dword(&reg->iobase_c4, data);
+ wrt_reg_dword(&reg->iobase_window, addr);
+}
+
+void
+qla27xx_reset_mpi(scsi_qla_host_t *vha)
+{
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd301,
+ "Entered %s.\n", __func__);
+
+ qla27xx_write_remote_reg(vha, 0x104050, 0x40004);
+ qla27xx_write_remote_reg(vha, 0x10405c, 0x4);
+
+ vha->hw->stat.num_mpi_reset++;
+}
+
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
{
@@ -48,7 +75,7 @@ qla27xx_read8(void __iomem *window, void *buf, ulong *len)
uint8_t value = ~0;
if (buf) {
- value = RD_REG_BYTE(window);
+ value = rd_reg_byte(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -59,7 +86,7 @@ qla27xx_read16(void __iomem *window, void *buf, ulong *len)
uint16_t value = ~0;
if (buf) {
- value = RD_REG_WORD(window);
+ value = rd_reg_word(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -70,7 +97,7 @@ qla27xx_read32(void __iomem *window, void *buf, ulong *len)
uint32_t value = ~0;
if (buf) {
- value = RD_REG_DWORD(window);
+ value = rd_reg_dword(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -99,7 +126,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
if (buf) {
void __iomem *window = (void __iomem *)reg + offset;
- WRT_REG_DWORD(window, data);
+ wrt_reg_dword(window, data);
}
}
@@ -892,9 +919,9 @@ static void
qla27xx_firmware_info(struct scsi_qla_host *vha,
struct qla27xx_fwdt_template *tmp)
{
- tmp->firmware_version[0] = vha->hw->fw_major_version;
- tmp->firmware_version[1] = vha->hw->fw_minor_version;
- tmp->firmware_version[2] = vha->hw->fw_subminor_version;
+ tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version);
+ tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version);
+ tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version);
tmp->firmware_version[3] = cpu_to_le32(
vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
tmp->firmware_version[4] = cpu_to_le32(
@@ -998,14 +1025,65 @@ qla27xx_fwdt_template_valid(void *p)
}
void
-qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
{
ulong flags = 0;
+ bool need_mpi_reset = true;
#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
#endif
+ if (!vha->hw->mpi_fw_dump) {
+ ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
+ } else if (vha->hw->mpi_fw_dumped) {
+ ql_log(ql_log_warn, vha, 0x02f4,
+ "-> MPI firmware already dumped (%p) -- ignoring request\n",
+ vha->hw->mpi_fw_dump);
+ } else {
+ struct fwdt *fwdt = &vha->hw->fwdt[1];
+ ulong len;
+ void *buf = vha->hw->mpi_fw_dump;
+
+ ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0x02f6,
+ "-> fwdt1 no template\n");
+ goto bailout;
+ }
+ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
+ if (len == 0) {
+ goto bailout;
+ } else if (len != fwdt->dump_size) {
+ ql_log(ql_log_warn, vha, 0x02f7,
+ "-> fwdt1 fwdump residual=%+ld\n",
+ fwdt->dump_size - len);
+ } else {
+ need_mpi_reset = false;
+ }
+
+ vha->hw->mpi_fw_dump_len = len;
+ vha->hw->mpi_fw_dumped = 1;
+
+ ql_log(ql_log_warn, vha, 0x02f8,
+ "-> MPI firmware dump saved to buffer (%lu/%p)\n",
+ vha->host_no, vha->hw->mpi_fw_dump);
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+ }
+
+bailout:
+ if (need_mpi_reset)
+ qla27xx_reset_mpi(vha);
+#ifndef __CHECKER__
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+#endif
+}
+
+void
+qla27xx_fwdump(scsi_qla_host_t *vha)
+{
+ lockdep_assert_held(&vha->hw->hardware_lock);
if (!vha->hw->fw_dump) {
ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
@@ -1015,42 +1093,30 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
vha->hw->fw_dump);
} else {
struct fwdt *fwdt = vha->hw->fwdt;
- uint j;
ulong len;
void *buf = vha->hw->fw_dump;
- uint count = vha->hw->fw_dump_mpi ? 2 : 1;
-
- for (j = 0; j < count; j++, fwdt++, buf += len) {
- ql_log(ql_log_warn, vha, 0xd011,
- "-> fwdt%u running...\n", j);
- if (!fwdt->template) {
- ql_log(ql_log_warn, vha, 0xd012,
- "-> fwdt%u no template\n", j);
- break;
- }
- len = qla27xx_execute_fwdt_template(vha,
- fwdt->template, buf);
- if (len == 0) {
- goto bailout;
- } else if (len != fwdt->dump_size) {
- ql_log(ql_log_warn, vha, 0xd013,
- "-> fwdt%u fwdump residual=%+ld\n",
- j, fwdt->dump_size - len);
- }
+
+ ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n");
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0xd012,
+ "-> fwdt0 no template\n");
+ return;
}
- vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
- vha->hw->fw_dumped = 1;
+ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
+ if (len == 0) {
+ return;
+ } else if (len != fwdt->dump_size) {
+ ql_log(ql_log_warn, vha, 0xd013,
+ "-> fwdt0 fwdump residual=%+ld\n",
+ fwdt->dump_size - len);
+ }
+
+ vha->hw->fw_dump_len = len;
+ vha->hw->fw_dumped = true;
ql_log(ql_log_warn, vha, 0xd015,
"-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
-
-bailout:
- vha->hw->fw_dump_mpi = 0;
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
-#endif
}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index bba8dc90acfb..89280b3477aa 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -27,7 +27,7 @@ struct __packed qla27xx_fwdt_template {
uint32_t saved_state[16];
uint32_t reserved_3[8];
- uint32_t firmware_version[5];
+ __le32 firmware_version[5];
};
#define TEMPLATE_TYPE_FWDUMP 99
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 1f0a185b2a95..68183a96a417 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -949,6 +949,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
@@ -1111,6 +1112,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
@@ -1958,6 +1960,20 @@ static int __init tcm_qla2xxx_init(void)
{
int ret;
+ BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32);
+ BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64);
+ BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12);
+ BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4);
+ BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
+ BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24);
+ BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64);
+
ret = tcm_qla2xxx_register_configfs();
if (ret < 0)
return ret;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5504ab11decc..5dc697ce8b5d 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -966,7 +966,7 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
"%s: No such sysfs attribute\n", __func__);
rc = -ENOSYS;
goto exit_set_chap;
- };
+ }
}
if (chap_rec.chap_type == CHAP_TYPE_IN)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4c6c448dc2df..843cccb38cb7 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -7,7 +7,7 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2018 Douglas Gilbert
+ * Copyright (C) 2001 - 2020 Douglas Gilbert
*
* For documentation see http://sg.danny.cz/sg/sdebug26.html
*/
@@ -39,6 +39,9 @@
#include <linux/uuid.h>
#include <linux/t10-pi.h>
#include <linux/msdos_partition.h>
+#include <linux/random.h>
+#include <linux/xarray.h>
+#include <linux/prefetch.h>
#include <net/checksum.h>
@@ -57,8 +60,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20190125";
+#define SDEBUG_VERSION "0189" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20200421";
#define MY_NAME "scsi_debug"
@@ -91,6 +94,11 @@ static const char *sdebug_version_date = "20190125";
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
#define WRITE_ERROR_ASC 0xc
+#define UNALIGNED_WRITE_ASCQ 0x4
+#define WRITE_BOUNDARY_ASCQ 0x5
+#define READ_INVDATA_ASCQ 0x6
+#define READ_BOUNDARY_ASCQ 0x7
+#define INSUFF_ZONE_ASCQ 0xe
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -105,9 +113,12 @@ static const char *sdebug_version_date = "20190125";
#define DEF_ATO 1
#define DEF_CDB_LEN 10
#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
+#define DEF_DEV_SIZE_PRE_INIT 0
#define DEF_DEV_SIZE_MB 8
+#define DEF_ZBC_DEV_SIZE_MB 128
#define DEF_DIF 0
#define DEF_DIX 0
+#define DEF_PER_HOST_STORE false
#define DEF_D_SENSE 0
#define DEF_EVERY_NTH 0
#define DEF_FAKE_RW 0
@@ -126,6 +137,7 @@ static const char *sdebug_version_date = "20190125";
#define DEF_PHYSBLK_EXP 0
#define DEF_OPT_XFERLEN_EXP 0
#define DEF_PTYPE TYPE_DISK
+#define DEF_RANDOM false
#define DEF_REMOVABLE false
#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
#define DEF_SECTOR_SIZE 512
@@ -142,6 +154,11 @@ static const char *sdebug_version_date = "20190125";
#define DEF_UUID_CTL 0
#define JDELAY_OVERRIDDEN -9999
+/* Default parameters for ZBC drives */
+#define DEF_ZBC_ZONE_SIZE_MB 128
+#define DEF_ZBC_MAX_OPEN_ZONES 8
+#define DEF_ZBC_NR_CONV_ZONES 1
+
#define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */
@@ -219,21 +236,23 @@ static const char *sdebug_version_date = "20190125";
#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
#define DEF_CMD_PER_LUN 255
-#define F_D_IN 1
-#define F_D_OUT 2
+/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
+#define F_D_IN 1 /* Data-in command (e.g. READ) */
+#define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
#define F_D_UNKN 8
-#define F_RL_WLUN_OK 0x10
-#define F_SKIP_UA 0x20
-#define F_DELAY_OVERR 0x40
-#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
-#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
-#define F_INV_OP 0x200
-#define F_FAKE_RW 0x400
-#define F_M_ACCESS 0x800 /* media access */
-#define F_SSU_DELAY 0x1000
-#define F_SYNC_DELAY 0x2000
-
+#define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
+#define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
+#define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
+#define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
+#define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
+#define F_INV_OP 0x200 /* invalid opcode (not supported) */
+#define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
+#define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
+#define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
+#define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
+
+/* Useful combinations of the above flags */
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
@@ -243,6 +262,35 @@ static const char *sdebug_version_date = "20190125";
#define SDEBUG_MAX_CMD_LEN 32
+#define SDEB_XA_NOT_IN_USE XA_MARK_1
+
+/* Zone types (zbcr05 table 25) */
+enum sdebug_z_type {
+ ZBC_ZONE_TYPE_CNV = 0x1,
+ ZBC_ZONE_TYPE_SWR = 0x2,
+ ZBC_ZONE_TYPE_SWP = 0x3,
+};
+
+/* enumeration names taken from table 26, zbcr05 */
+enum sdebug_z_cond {
+ ZBC_NOT_WRITE_POINTER = 0x0,
+ ZC1_EMPTY = 0x1,
+ ZC2_IMPLICIT_OPEN = 0x2,
+ ZC3_EXPLICIT_OPEN = 0x3,
+ ZC4_CLOSED = 0x4,
+ ZC6_READ_ONLY = 0xd,
+ ZC5_FULL = 0xe,
+ ZC7_OFFLINE = 0xf,
+};
+
+struct sdeb_zone_state { /* ZBC: per zone state */
+ enum sdebug_z_type z_type;
+ enum sdebug_z_cond z_cond;
+ bool z_non_seq_resource;
+ unsigned int z_size;
+ sector_t z_start;
+ sector_t z_wp;
+};
struct sdebug_dev_info {
struct list_head dev_list;
@@ -255,15 +303,36 @@ struct sdebug_dev_info {
atomic_t num_in_q;
atomic_t stopped;
bool used;
+
+ /* For ZBC devices */
+ enum blk_zoned_model zmodel;
+ unsigned int zsize;
+ unsigned int zsize_shift;
+ unsigned int nr_zones;
+ unsigned int nr_conv_zones;
+ unsigned int nr_imp_open;
+ unsigned int nr_exp_open;
+ unsigned int nr_closed;
+ unsigned int max_open;
+ struct sdeb_zone_state *zstate;
};
struct sdebug_host_info {
struct list_head host_list;
+ int si_idx; /* sdeb_store_info (per host) xarray index */
struct Scsi_Host *shost;
struct device dev;
struct list_head dev_info_list;
};
+/* There is an xarray of pointers to this struct's objects, one per host */
+struct sdeb_store_info {
+ rwlock_t macc_lck; /* for atomic media access on this store */
+ u8 *storep; /* user data storage (ram) */
+ struct t10_pi_tuple *dif_storep; /* protection info */
+ void *map_storep; /* provisioning map */
+};
+
#define to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
@@ -339,7 +408,7 @@ enum sdeb_opcode_index {
SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
SDEB_I_MAINT_IN = 14,
SDEB_I_MAINT_OUT = 15,
- SDEB_I_VERIFY = 16, /* 10 only */
+ SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
SDEB_I_RESERVE = 18, /* 6, 10 */
SDEB_I_RELEASE = 19, /* 6, 10 */
@@ -352,7 +421,10 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_SAME = 26, /* 10, 16 */
SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
SDEB_I_COMP_WRITE = 28,
- SDEB_I_LAST_ELEMENT = 29, /* keep this last (previous + 1) */
+ SDEB_I_PRE_FETCH = 29, /* 10, 16 */
+ SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
+ SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
+ SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
};
@@ -368,7 +440,7 @@ static const unsigned char opcode_ind_arr[256] = {
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
- 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
+ 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
/* 0x40; 0x40->0x5f: 10 byte cdbs */
0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
@@ -382,8 +454,10 @@ static const unsigned char opcode_ind_arr[256] = {
0, SDEB_I_VARIABLE_LEN,
/* 0x80; 0x80->0x9f: 16 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
- SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
- 0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
+ SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
+ 0, 0, 0, SDEB_I_VERIFY,
+ SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
+ SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
@@ -424,11 +498,25 @@ static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+
+static int sdebug_do_add_host(bool mk_new_store);
+static int sdebug_add_host_helper(int per_host_idx);
+static void sdebug_do_remove_host(bool the_end);
+static int sdebug_add_store(void);
+static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
+static void sdebug_erase_all_stores(bool apart_from_first);
/*
* The following are overflow arrays for cdbs that "hit" the same index in
@@ -468,6 +556,12 @@ static const struct opcode_info_t write_iarr[] = {
0xbf, 0xc7, 0, 0, 0, 0} },
};
+static const struct opcode_info_t verify_iarr[] = {
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
+ NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+};
+
static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
@@ -514,11 +608,35 @@ static const struct opcode_info_t sync_cache_iarr[] = {
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
};
+static const struct opcode_info_t pre_fetch_iarr[] = {
+ {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
+ {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
+};
+
+static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
+ {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
+ {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
+ {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
+ {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
+ {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
+ {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
+};
+
+static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
+ {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
+ {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
+};
+
/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
* plus the terminating elements for logic that scans this table such as
* REPORT SUPPORTED OPERATION CODES. */
-static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
+static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 0 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -568,9 +686,10 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
/* 15 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
- {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} },
+ {ARRAY_SIZE(verify_iarr), 0x8f, 0,
+ F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
+ verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
{32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
@@ -609,17 +728,31 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
-
-/* 29 */
+ {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
+ resp_pre_fetch, pre_fetch_iarr,
+ {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} }, /* PRE-FETCH (10) */
+
+/* 30 */
+ {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
+ resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
+ {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
+ {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
+ resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
+ {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
+/* sentinel */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static int sdebug_add_host = DEF_NUM_HOST;
+static int sdebug_num_hosts;
+static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
static int sdebug_ato = DEF_ATO;
static int sdebug_cdb_len = DEF_CDB_LEN;
static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
-static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
static int sdebug_dif = DEF_DIF;
static int sdebug_dix = DEF_DIX;
static int sdebug_dsense = DEF_D_SENSE;
@@ -656,6 +789,8 @@ static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
static int sdebug_uuid_ctl = DEF_UUID_CTL;
+static bool sdebug_random = DEF_RANDOM;
+static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
static bool sdebug_removable = DEF_REMOVABLE;
static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
@@ -666,6 +801,9 @@ static bool have_dif_prot;
static bool write_since_sync;
static bool sdebug_statistics = DEF_STATISTICS;
static bool sdebug_wp;
+/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
+static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
+static char *sdeb_zbc_model_s;
static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity; /* in sectors */
@@ -679,9 +817,11 @@ static int sdebug_sectors_per; /* sectors per cylinder */
static LIST_HEAD(sdebug_host_list);
static DEFINE_SPINLOCK(sdebug_host_list_lock);
-static unsigned char *fake_storep; /* ramdisk storage */
-static struct t10_pi_tuple *dif_storep; /* protection info */
-static void *map_storep; /* provisioning map */
+static struct xarray per_store_arr;
+static struct xarray *per_store_ap = &per_store_arr;
+static int sdeb_first_idx = -1; /* invalid index ==> none created */
+static int sdeb_most_recent_idx = -1;
+static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
static unsigned long map_size;
static int num_aborts;
@@ -693,10 +833,19 @@ static int dix_writes;
static int dix_reads;
static int dif_errors;
+/* ZBC global data */
+static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
+static int sdeb_zbc_zone_size_mb;
+static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
+static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
+
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
static DEFINE_RWLOCK(atomic_rw);
+static DEFINE_RWLOCK(atomic_rw2);
+
+static rwlock_t *ramdisk_lck_a[2];
static char sdebug_proc_name[] = MY_NAME;
static const char *my_name = MY_NAME;
@@ -717,6 +866,8 @@ static const int illegal_condition_result =
static const int device_qfull_result =
(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
+static const int condition_met_result = SAM_STAT_CONDITION_MET;
+
/* Only do the extra work involved in logical block provisioning if one or
* more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
@@ -728,18 +879,25 @@ static inline bool scsi_debug_lbp(void)
(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
}
-static void *lba2fake_store(unsigned long long lba)
+static void *lba2fake_store(struct sdeb_store_info *sip,
+ unsigned long long lba)
{
- lba = do_div(lba, sdebug_store_sectors);
+ struct sdeb_store_info *lsip = sip;
- return fake_storep + lba * sdebug_sector_size;
+ lba = do_div(lba, sdebug_store_sectors);
+ if (!sip || !sip->storep) {
+ WARN_ON_ONCE(true);
+ lsip = xa_load(per_store_ap, 0); /* should never be NULL */
+ }
+ return lsip->storep + lba * sdebug_sector_size;
}
-static struct t10_pi_tuple *dif_store(sector_t sector)
+static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
+ sector_t sector)
{
sector = sector_div(sector, sdebug_store_sectors);
- return dif_storep + sector;
+ return sip->dif_storep + sector;
}
static void sdebug_max_tgts_luns(void)
@@ -1041,7 +1199,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
__func__, off_dst, scsi_bufflen(scp), act_len,
scsi_get_resid(scp));
n = scsi_bufflen(scp) - (off_dst + act_len);
- scsi_set_resid(scp, min(scsi_get_resid(scp), n));
+ scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
return 0;
}
@@ -1354,13 +1512,15 @@ static int inquiry_vpd_b0(unsigned char *arr)
}
/* Block device characteristics VPD page (SBC-3) */
-static int inquiry_vpd_b1(unsigned char *arr)
+static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
{
memset(arr, 0, 0x3c);
arr[0] = 0;
arr[1] = 1; /* non rotating medium (e.g. solid state) */
arr[2] = 0;
arr[3] = 5; /* less than 1.8" */
+ if (devip->zmodel == BLK_ZONED_HA)
+ arr[4] = 1 << 4; /* zoned field = 01b */
return 0x3c;
}
@@ -1384,6 +1544,26 @@ static int inquiry_vpd_b2(unsigned char *arr)
return 0x4;
}
+/* Zoned block device characteristics VPD page (ZBC mandatory) */
+static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
+{
+ memset(arr, 0, 0x3c);
+ arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
+ /*
+ * Set Optimal number of open sequential write preferred zones and
+ * Optimal number of non-sequentially written sequential write
+ * preferred zones fields to 'not reported' (0xffffffff). Leave other
+ * fields set to zero, apart from Max. number of open swrz_s field.
+ */
+ put_unaligned_be32(0xffffffff, &arr[4]);
+ put_unaligned_be32(0xffffffff, &arr[8]);
+ if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
+ put_unaligned_be32(devip->max_open, &arr[12]);
+ else
+ put_unaligned_be32(0xffffffff, &arr[12]);
+ return 0x3c;
+}
+
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -1393,13 +1573,15 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned char *arr;
unsigned char *cmd = scp->cmnd;
int alloc_len, n, ret;
- bool have_wlun, is_disk;
+ bool have_wlun, is_disk, is_zbc, is_disk_zbc;
alloc_len = get_unaligned_be16(cmd + 3);
arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
is_disk = (sdebug_ptype == TYPE_DISK);
+ is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ is_disk_zbc = (is_disk || is_zbc);
have_wlun = scsi_is_wlun(scp->device->lun);
if (have_wlun)
pq_pdt = TYPE_WLUN; /* present, wlun */
@@ -1437,11 +1619,14 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[n++] = 0x86; /* extended inquiry */
arr[n++] = 0x87; /* mode page policy */
arr[n++] = 0x88; /* SCSI ports */
- if (is_disk) { /* SBC only */
+ if (is_disk_zbc) { /* SBC or ZBC */
arr[n++] = 0x89; /* ATA information */
arr[n++] = 0xb0; /* Block limits */
arr[n++] = 0xb1; /* Block characteristics */
- arr[n++] = 0xb2; /* Logical Block Prov */
+ if (is_disk)
+ arr[n++] = 0xb2; /* LB Provisioning */
+ if (is_zbc)
+ arr[n++] = 0xb6; /* ZB dev. char. */
}
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
@@ -1480,19 +1665,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else if (0x88 == cmd[2]) { /* SCSI Ports */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
- } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
+ } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
arr[1] = cmd[2]; /*sanity */
n = inquiry_vpd_89(&arr[4]);
put_unaligned_be16(n, arr + 2);
- } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
+ } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b0(&arr[4]);
- } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
+ } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_vpd_b1(&arr[4]);
+ arr[3] = inquiry_vpd_b1(devip, &arr[4]);
} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b2(&arr[4]);
+ } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_vpd_b6(devip, &arr[4]);
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
kfree(arr);
@@ -1530,10 +1718,13 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
put_unaligned_be16(0x525, arr + n);
n += 2;
+ } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
+ put_unaligned_be16(0x624, arr + n);
+ n += 2;
}
put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
ret = fill_from_dev_buffer(scp, arr,
- min(alloc_len, SDEBUG_LONG_INQ_SZ));
+ min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
kfree(arr);
return ret;
}
@@ -1688,7 +1879,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
}
return fill_from_dev_buffer(scp, arr,
- min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
+ min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
}
#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
@@ -1762,9 +1953,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp,
* - The constructed command length
* - The maximum array size
*/
- rlen = min(alen,n);
+ rlen = min_t(int, alen, n);
ret = fill_from_dev_buffer(scp, arr,
- min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
+ min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
kfree(arr);
return ret;
}
@@ -2119,7 +2310,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
unsigned char *ap;
unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, bad_pcode;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
pcontrol = (cmd[2] & 0xc0) >> 6;
@@ -2128,7 +2319,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
msense_6 = (MODE_SENSE == cmd[0]);
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
- if (is_disk && !dbd)
+ is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ if ((is_disk || is_zbc) && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
@@ -2140,8 +2332,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
}
target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
(devip->target * 1000) - 3;
- /* for disks set DPOFUA bit and clear write protect (WP) bit */
- if (is_disk) {
+ /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
+ if (is_disk || is_zbc) {
dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
if (sdebug_wp)
dev_spec |= 0x80;
@@ -2201,7 +2393,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
bad_pcode = true;
break;
case 0x8: /* Caching page, direct access */
- if (is_disk) {
+ if (is_disk || is_zbc) {
len = resp_caching_pg(ap, pcontrol, target);
offset += len;
} else
@@ -2239,6 +2431,9 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
target);
len += resp_caching_pg(ap + len, pcontrol,
target);
+ } else if (is_zbc) {
+ len += resp_caching_pg(ap + len, pcontrol,
+ target);
}
len += resp_ctrl_m_pg(ap + len, pcontrol, target);
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2266,7 +2461,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
arr[0] = offset - 1;
else
put_unaligned_be16((offset - 2), arr + 0);
- return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
+ return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
}
#define SDEBUG_MAX_MSELECT_SZ 512
@@ -2451,14 +2646,217 @@ static int resp_log_sense(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
}
- len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
+ len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
return fill_from_dev_buffer(scp, arr,
- min(len, SDEBUG_MAX_INQ_ARR_SZ));
+ min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
+}
+
+static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
+{
+ return devip->nr_zones != 0;
+}
+
+static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
+ unsigned long long lba)
+{
+ return &devip->zstate[lba >> devip->zsize_shift];
+}
+
+static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
+{
+ return zsp->z_type == ZBC_ZONE_TYPE_CNV;
+}
+
+static void zbc_close_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ enum sdebug_z_cond zc;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
+ return;
+
+ if (zc == ZC2_IMPLICIT_OPEN)
+ devip->nr_imp_open--;
+ else
+ devip->nr_exp_open--;
+
+ if (zsp->z_wp == zsp->z_start) {
+ zsp->z_cond = ZC1_EMPTY;
+ } else {
+ zsp->z_cond = ZC4_CLOSED;
+ devip->nr_closed++;
+ }
+}
+
+static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp = &devip->zstate[0];
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++, zsp++) {
+ if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
+ zbc_close_zone(devip, zsp);
+ return;
+ }
+ }
+}
+
+static void zbc_open_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp, bool explicit)
+{
+ enum sdebug_z_cond zc;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
+ (!explicit && zc == ZC2_IMPLICIT_OPEN))
+ return;
+
+ /* Close an implicit open zone if necessary */
+ if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ else if (devip->max_open &&
+ devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
+ zbc_close_imp_open_zone(devip);
+
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+ if (explicit) {
+ zsp->z_cond = ZC3_EXPLICIT_OPEN;
+ devip->nr_exp_open++;
+ } else {
+ zsp->z_cond = ZC2_IMPLICIT_OPEN;
+ devip->nr_imp_open++;
+ }
+}
+
+static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ unsigned long long lba, unsigned int num)
+{
+ struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
+ unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ zsp->z_wp += num;
+ if (zsp->z_wp >= zend)
+ zsp->z_cond = ZC5_FULL;
+ return;
+ }
+
+ while (num) {
+ if (lba != zsp->z_wp)
+ zsp->z_non_seq_resource = true;
+
+ end = lba + num;
+ if (end >= zend) {
+ n = zend - lba;
+ zsp->z_wp = zend;
+ } else if (end > zsp->z_wp) {
+ n = num;
+ zsp->z_wp = end;
+ } else {
+ n = num;
+ }
+ if (zsp->z_wp >= zend)
+ zsp->z_cond = ZC5_FULL;
+
+ num -= n;
+ lba += n;
+ if (num) {
+ zsp++;
+ zend = zsp->z_start + zsp->z_size;
+ }
+ }
}
-static inline int check_device_access_params(struct scsi_cmnd *scp,
- unsigned long long lba, unsigned int num, bool write)
+static int check_zbc_access_params(struct scsi_cmnd *scp,
+ unsigned long long lba, unsigned int num, bool write)
{
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+ struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
+ struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
+
+ if (!write) {
+ if (devip->zmodel == BLK_ZONED_HA)
+ return 0;
+ /* For host-managed, reads cannot cross zone types boundaries */
+ if (zsp_end != zsp &&
+ zbc_zone_is_conv(zsp) &&
+ !zbc_zone_is_conv(zsp_end)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ READ_INVDATA_ASCQ);
+ return check_condition_result;
+ }
+ return 0;
+ }
+
+ /* No restrictions for writes within conventional zones */
+ if (zbc_zone_is_conv(zsp)) {
+ if (!zbc_zone_is_conv(zsp_end)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ WRITE_BOUNDARY_ASCQ);
+ return check_condition_result;
+ }
+ return 0;
+ }
+
+ if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ /* Writes cannot cross sequential zone boundaries */
+ if (zsp_end != zsp) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ WRITE_BOUNDARY_ASCQ);
+ return check_condition_result;
+ }
+ /* Cannot write full zones */
+ if (zsp->z_cond == ZC5_FULL) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+ /* Writes must be aligned to the zone WP */
+ if (lba != zsp->z_wp) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ UNALIGNED_WRITE_ASCQ);
+ return check_condition_result;
+ }
+ }
+
+ /* Handle implicit open of closed and empty zones */
+ if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
+ if (devip->max_open &&
+ devip->nr_exp_open >= devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT,
+ INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ return check_condition_result;
+ }
+ zbc_open_zone(devip, zsp, false);
+ }
+
+ return 0;
+}
+
+static inline int check_device_access_params
+ (struct scsi_cmnd *scp, unsigned long long lba,
+ unsigned int num, bool write)
+{
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+
if (lba + num > sdebug_capacity) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
return check_condition_result;
@@ -2473,17 +2871,37 @@ static inline int check_device_access_params(struct scsi_cmnd *scp,
mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
return check_condition_result;
}
+ if (sdebug_dev_is_zoned(devip))
+ return check_zbc_access_params(scp, lba, num, write);
+
return 0;
}
+/*
+ * Note: if BUG_ON() fires it usually indicates a problem with the parser
+ * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
+ * that access any of the "stores" in struct sdeb_store_info should call this
+ * function with bug_if_fake_rw set to true.
+ */
+static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
+ bool bug_if_fake_rw)
+{
+ if (sdebug_fake_rw) {
+ BUG_ON(bug_if_fake_rw); /* See note above */
+ return NULL;
+ }
+ return xa_load(per_store_ap, devip->sdbg_host->si_idx);
+}
+
/* Returns number of bytes copied or -1 if error. */
-static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
- u32 num, bool do_write)
+static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
+ u32 sg_skip, u64 lba, u32 num, bool do_write)
{
int ret;
u64 block, rest = 0;
- struct scsi_data_buffer *sdb = &scmd->sdb;
enum dma_data_direction dir;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ u8 *fsp;
if (do_write) {
dir = DMA_TO_DEVICE;
@@ -2492,24 +2910,25 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
dir = DMA_FROM_DEVICE;
}
- if (!sdb->length)
+ if (!sdb->length || !sip)
return 0;
- if (scmd->sc_data_direction != dir)
+ if (scp->sc_data_direction != dir)
return -1;
+ fsp = sip->storep;
block = do_div(lba, sdebug_store_sectors);
if (block + num > sdebug_store_sectors)
rest = block + num - sdebug_store_sectors;
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep + (block * sdebug_sector_size),
+ fsp + (block * sdebug_sector_size),
(num - rest) * sdebug_sector_size, sg_skip, do_write);
if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep, rest * sdebug_sector_size,
+ fsp, rest * sdebug_sector_size,
sg_skip + ((num - rest) * sdebug_sector_size),
do_write);
}
@@ -2517,34 +2936,49 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
return ret;
}
-/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into lba2fake_store(lba,num) and return true. If comparison fails then
+/* Returns number of bytes copied or -1 if error. */
+static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
+{
+ struct scsi_data_buffer *sdb = &scp->sdb;
+
+ if (!sdb->length)
+ return 0;
+ if (scp->sc_data_direction != DMA_TO_DEVICE)
+ return -1;
+ return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
+ num * sdebug_sector_size, 0, true);
+}
+
+/* If sip->storep+lba compares equal to arr(num), then copy top half of
+ * arr into sip->storep+lba and return true. If comparison fails then
* return false. */
-static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
+static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
+ const u8 *arr, bool compare_only)
{
bool res;
u64 block, rest = 0;
u32 store_blks = sdebug_store_sectors;
u32 lb_size = sdebug_sector_size;
+ u8 *fsp = sip->storep;
block = do_div(lba, store_blks);
if (block + num > store_blks)
rest = block + num - store_blks;
- res = !memcmp(fake_storep + (block * lb_size), arr,
- (num - rest) * lb_size);
+ res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
if (!res)
return res;
if (rest)
- res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
+ res = memcmp(fsp, arr + ((num - rest) * lb_size),
rest * lb_size);
if (!res)
return res;
+ if (compare_only)
+ return true;
arr += num * lb_size;
- memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
+ memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
if (rest)
- memcpy(fake_storep, arr + ((num - rest) * lb_size),
- rest * lb_size);
+ memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
return res;
}
@@ -2587,24 +3021,27 @@ static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
return 0;
}
-static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
+static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
unsigned int sectors, bool read)
{
size_t resid;
void *paddr;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ struct t10_pi_tuple *dif_storep = sip->dif_storep;
const void *dif_store_end = dif_storep + sdebug_store_sectors;
struct sg_mapping_iter miter;
/* Bytes of protection data to copy into sgl */
resid = sectors * sizeof(*dif_storep);
- sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
- scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
- (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
+ sg_miter_start(&miter, scsi_prot_sglist(scp),
+ scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
+ (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
while (sg_miter_next(&miter) && resid > 0) {
- size_t len = min(miter.length, resid);
- void *start = dif_store(sector);
+ size_t len = min_t(size_t, miter.length, resid);
+ void *start = dif_store(sip, sector);
size_t rest = 0;
if (dif_store_end < start + len)
@@ -2630,30 +3067,33 @@ static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
sg_miter_stop(&miter);
}
-static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
unsigned int i;
- struct t10_pi_tuple *sdt;
sector_t sector;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ struct t10_pi_tuple *sdt;
for (i = 0; i < sectors; i++, ei_lba++) {
int ret;
sector = start_sec + i;
- sdt = dif_store(sector);
+ sdt = dif_store(sip, sector);
if (sdt->app_tag == cpu_to_be16(0xffff))
continue;
- ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
+ ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
+ ei_lba);
if (ret) {
dif_errors++;
return ret;
}
}
- dif_copy_prot(SCpnt, start_sec, sectors, true);
+ dif_copy_prot(scp, start_sec, sectors, true);
dix_reads++;
return 0;
@@ -2661,14 +3101,15 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
- u8 *cmd = scp->cmnd;
- struct sdebug_queued_cmd *sqcp;
- u64 lba;
+ bool check_prot;
u32 num;
u32 ei_lba;
- unsigned long iflags;
int ret;
- bool check_prot;
+ u64 lba;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+ u8 *cmd = scp->cmnd;
+ struct sdebug_queued_cmd *sqcp;
switch (cmd[0]) {
case READ_16:
@@ -2750,21 +3191,21 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
- read_lock_irqsave(&atomic_rw, iflags);
+ read_lock(macc_lckp);
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
if (prot_ret) {
- read_unlock_irqrestore(&atomic_rw, iflags);
+ read_unlock(macc_lckp);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
return illegal_condition_result;
}
}
- ret = do_device_access(scp, 0, lba, num, false);
- read_unlock_irqrestore(&atomic_rw, iflags);
+ ret = do_device_access(sip, scp, 0, lba, num, false);
+ read_unlock(macc_lckp);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -2902,7 +3343,8 @@ static sector_t map_index_to_lba(unsigned long index)
return lba;
}
-static unsigned int map_state(sector_t lba, unsigned int *num)
+static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int *num)
{
sector_t end;
unsigned int mapped;
@@ -2910,19 +3352,20 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
unsigned long next;
index = lba_to_map_index(lba);
- mapped = test_bit(index, map_storep);
+ mapped = test_bit(index, sip->map_storep);
if (mapped)
- next = find_next_zero_bit(map_storep, map_size, index);
+ next = find_next_zero_bit(sip->map_storep, map_size, index);
else
- next = find_next_bit(map_storep, map_size, index);
+ next = find_next_bit(sip->map_storep, map_size, index);
end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
*num = end - lba;
return mapped;
}
-static void map_region(sector_t lba, unsigned int len)
+static void map_region(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int len)
{
sector_t end = lba + len;
@@ -2930,15 +3373,17 @@ static void map_region(sector_t lba, unsigned int len)
unsigned long index = lba_to_map_index(lba);
if (index < map_size)
- set_bit(index, map_storep);
+ set_bit(index, sip->map_storep);
lba = map_index_to_lba(index + 1);
}
}
-static void unmap_region(sector_t lba, unsigned int len)
+static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int len)
{
sector_t end = lba + len;
+ u8 *fsp = sip->storep;
while (lba < end) {
unsigned long index = lba_to_map_index(lba);
@@ -2946,17 +3391,16 @@ static void unmap_region(sector_t lba, unsigned int len)
if (lba == map_index_to_lba(index) &&
lba + sdebug_unmap_granularity <= end &&
index < map_size) {
- clear_bit(index, map_storep);
+ clear_bit(index, sip->map_storep);
if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
- memset(fake_storep +
- lba * sdebug_sector_size,
+ memset(fsp + lba * sdebug_sector_size,
(sdebug_lbprz & 1) ? 0 : 0xff,
sdebug_sector_size *
sdebug_unmap_granularity);
}
- if (dif_storep) {
- memset(dif_storep + lba, 0xff,
- sizeof(*dif_storep) *
+ if (sip->dif_storep) {
+ memset(sip->dif_storep + lba, 0xff,
+ sizeof(*sip->dif_storep) *
sdebug_unmap_granularity);
}
}
@@ -2966,13 +3410,14 @@ static void unmap_region(sector_t lba, unsigned int len)
static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
- u8 *cmd = scp->cmnd;
- u64 lba;
+ bool check_prot;
u32 num;
u32 ei_lba;
- unsigned long iflags;
int ret;
- bool check_prot;
+ u64 lba;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
+ u8 *cmd = scp->cmnd;
switch (cmd[0]) {
case WRITE_16:
@@ -3025,26 +3470,32 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
}
+
+ write_lock(macc_lckp);
ret = check_device_access_params(scp, lba, num, true);
- if (ret)
+ if (ret) {
+ write_unlock(macc_lckp);
return ret;
- write_lock_irqsave(&atomic_rw, iflags);
+ }
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
if (prot_ret) {
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
return illegal_condition_result;
}
}
- ret = do_device_access(scp, 0, lba, num, true);
+ ret = do_device_access(sip, scp, 0, lba, num, true);
if (unlikely(scsi_debug_lbp()))
- map_region(lba, num);
- write_unlock_irqrestore(&atomic_rw, iflags);
+ map_region(sip, lba, num);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
+ write_unlock(macc_lckp);
if (unlikely(-1 == ret))
return DID_ERROR << 16;
else if (unlikely(sdebug_verbose &&
@@ -3085,13 +3536,14 @@ static int resp_write_scat(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
u8 *lrdp = NULL;
u8 *up;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
u8 wrprotect;
u16 lbdof, num_lrd, k;
u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
u32 lb_size = sdebug_sector_size;
u32 ei_lba;
u64 lba;
- unsigned long iflags;
int ret, res;
bool is_16;
static const u32 lrd_size = 32; /* + parameter list header size */
@@ -3153,7 +3605,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
goto err_out;
}
- write_lock_irqsave(&atomic_rw, iflags);
+ write_lock(macc_lckp);
sg_off = lbdof_blen;
/* Spec says Buffer xfer Length field in number of LBs in dout */
cum_lb = 0;
@@ -3196,9 +3648,12 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
}
- ret = do_device_access(scp, sg_off, lba, num, true);
+ ret = do_device_access(sip, scp, sg_off, lba, num, true);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
if (unlikely(scsi_debug_lbp()))
- map_region(lba, num);
+ map_region(sip, lba, num);
if (unlikely(-1 == ret)) {
ret = DID_ERROR << 16;
goto err_out_unlock;
@@ -3236,7 +3691,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
ret = 0;
err_out_unlock:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
err_out:
kfree(lrdp);
return ret;
@@ -3245,27 +3700,35 @@ err_out:
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
- int ret;
- unsigned long iflags;
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
unsigned long long i;
- u32 lb_size = sdebug_sector_size;
u64 block, lbaa;
+ u32 lb_size = sdebug_sector_size;
+ int ret;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
u8 *fs1p;
+ u8 *fsp;
+
+ write_lock(macc_lckp);
ret = check_device_access_params(scp, lba, num, true);
- if (ret)
+ if (ret) {
+ write_unlock(macc_lckp);
return ret;
-
- write_lock_irqsave(&atomic_rw, iflags);
+ }
if (unmap && scsi_debug_lbp()) {
- unmap_region(lba, num);
+ unmap_region(sip, lba, num);
goto out;
}
lbaa = lba;
block = do_div(lbaa, sdebug_store_sectors);
/* if ndob then zero 1 logical block, else fetch 1 logical block */
- fs1p = fake_storep + (block * lb_size);
+ fsp = sip->storep;
+ fs1p = fsp + (block * lb_size);
if (ndob) {
memset(fs1p, 0, lb_size);
ret = 0;
@@ -3273,7 +3736,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(&sip->macc_lck);
return DID_ERROR << 16;
} else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
@@ -3284,12 +3747,15 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
for (i = 1 ; i < num ; i++) {
lbaa = lba + i;
block = do_div(lbaa, sdebug_store_sectors);
- memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+ memmove(fsp + (block * lb_size), fs1p, lb_size);
}
if (scsi_debug_lbp())
- map_region(lba, num);
+ map_region(sip, lba, num);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
out:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
return 0;
}
@@ -3401,12 +3867,12 @@ static int resp_comp_write(struct scsi_cmnd *scp,
{
u8 *cmd = scp->cmnd;
u8 *arr;
- u8 *fake_storep_hold;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
u64 lba;
u32 dnum;
u32 lb_size = sdebug_sector_size;
u8 num;
- unsigned long iflags;
int ret;
int retval = 0;
@@ -3435,14 +3901,9 @@ static int resp_comp_write(struct scsi_cmnd *scp,
return check_condition_result;
}
- write_lock_irqsave(&atomic_rw, iflags);
+ write_lock(macc_lckp);
- /* trick do_device_access() to fetch both compare and write buffers
- * from data-in into arr. Safe (atomic) since write_lock held. */
- fake_storep_hold = fake_storep;
- fake_storep = arr;
- ret = do_device_access(scp, 0, 0, dnum, true);
- fake_storep = fake_storep_hold;
+ ret = do_dout_fetch(scp, dnum, arr);
if (ret == -1) {
retval = DID_ERROR << 16;
goto cleanup;
@@ -3450,15 +3911,15 @@ static int resp_comp_write(struct scsi_cmnd *scp,
sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
"indicated=%u, IO sent=%d bytes\n", my_name,
dnum * lb_size, ret);
- if (!comp_write_worker(lba, num, arr)) {
+ if (!comp_write_worker(sip, lba, num, arr, false)) {
mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
retval = check_condition_result;
goto cleanup;
}
if (scsi_debug_lbp())
- map_region(lba, num);
+ map_region(sip, lba, num);
cleanup:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
kfree(arr);
return retval;
}
@@ -3473,10 +3934,10 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
unsigned char *buf;
struct unmap_block_desc *desc;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
unsigned int i, payload_len, descriptors;
int ret;
- unsigned long iflags;
-
if (!scsi_debug_lbp())
return 0; /* fib and say its done */
@@ -3503,7 +3964,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
desc = (void *)&buf[8];
- write_lock_irqsave(&atomic_rw, iflags);
+ write_lock(macc_lckp);
for (i = 0 ; i < descriptors ; i++) {
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
@@ -3513,13 +3974,13 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (ret)
goto out;
- unmap_region(lba, num);
+ unmap_region(sip, lba, num);
}
ret = 0;
out:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
kfree(buf);
return ret;
@@ -3533,8 +3994,8 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
u64 lba;
u32 alloc_len, mapped, num;
- u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
int ret;
+ u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
lba = get_unaligned_be64(cmd + 2);
alloc_len = get_unaligned_be32(cmd + 10);
@@ -3546,9 +4007,11 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
if (ret)
return ret;
- if (scsi_debug_lbp())
- mapped = map_state(lba, &num);
- else {
+ if (scsi_debug_lbp()) {
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+
+ mapped = map_state(sip, lba, &num);
+ } else {
mapped = 1;
/* following just in case virtual_gb changed */
sdebug_capacity = get_sdebug_capacity();
@@ -3593,6 +4056,56 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
return res;
}
+/*
+ * Assuming the LBA+num_blocks is not out-of-range, this function will return
+ * CONDITION MET if the specified blocks will/have fitted in the cache, and
+ * a GOOD status otherwise. Model a disk with a big cache and yield
+ * CONDITION MET. Actually tries to bring range in main memory into the
+ * cache associated with the CPU(s).
+ */
+static int resp_pre_fetch(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 lba;
+ u64 block, rest = 0;
+ u32 nblks;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
+ u8 *fsp = sip->storep;
+
+ if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
+ lba = get_unaligned_be32(cmd + 2);
+ nblks = get_unaligned_be16(cmd + 7);
+ } else { /* PRE-FETCH(16) */
+ lba = get_unaligned_be64(cmd + 2);
+ nblks = get_unaligned_be32(cmd + 10);
+ }
+ if (lba + nblks > sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+ if (!fsp)
+ goto fini;
+ /* PRE-FETCH spec says nothing about LBP or PI so skip them */
+ block = do_div(lba, sdebug_store_sectors);
+ if (block + nblks > sdebug_store_sectors)
+ rest = block + nblks - sdebug_store_sectors;
+
+ /* Try to bring the PRE-FETCH range into CPU's cache */
+ read_lock(macc_lckp);
+ prefetch_range(fsp + (sdebug_sector_size * block),
+ (nblks - rest) * sdebug_sector_size);
+ if (rest)
+ prefetch_range(fsp, rest * sdebug_sector_size);
+ read_unlock(macc_lckp);
+fini:
+ if (cmd[1] & 0x2)
+ res = SDEG_RES_IMMED_MASK;
+ return res | condition_met_result;
+}
+
#define RL_BUCKET_ELEMS 8
/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
@@ -3694,6 +4207,504 @@ static int resp_report_luns(struct scsi_cmnd *scp,
return res;
}
+static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ bool is_bytchk3 = false;
+ u8 bytchk;
+ int ret, j;
+ u32 vnum, a_num, off;
+ const u32 lb_size = sdebug_sector_size;
+ u64 lba;
+ u8 *arr;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
+
+ bytchk = (cmd[1] >> 1) & 0x3;
+ if (bytchk == 0) {
+ return 0; /* always claim internal verify okay */
+ } else if (bytchk == 2) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
+ return check_condition_result;
+ } else if (bytchk == 3) {
+ is_bytchk3 = true; /* 1 block sent, compared repeatedly */
+ }
+ switch (cmd[0]) {
+ case VERIFY_16:
+ lba = get_unaligned_be64(cmd + 2);
+ vnum = get_unaligned_be32(cmd + 10);
+ break;
+ case VERIFY: /* is VERIFY(10) */
+ lba = get_unaligned_be32(cmd + 2);
+ vnum = get_unaligned_be16(cmd + 7);
+ break;
+ default:
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ a_num = is_bytchk3 ? 1 : vnum;
+ /* Treat following check like one for read (i.e. no write) access */
+ ret = check_device_access_params(scp, lba, a_num, false);
+ if (ret)
+ return ret;
+
+ arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+ /* Not changing store, so only need read access */
+ read_lock(macc_lckp);
+
+ ret = do_dout_fetch(scp, a_num, arr);
+ if (ret == -1) {
+ ret = DID_ERROR << 16;
+ goto cleanup;
+ } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, __func__, a_num * lb_size, ret);
+ }
+ if (is_bytchk3) {
+ for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
+ memcpy(arr + off, arr, lb_size);
+ }
+ ret = 0;
+ if (!comp_write_worker(sip, lba, vnum, arr, true)) {
+ mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
+ ret = check_condition_result;
+ goto cleanup;
+ }
+cleanup:
+ read_unlock(macc_lckp);
+ kfree(arr);
+ return ret;
+}
+
+#define RZONES_DESC_HD 64
+
+/* Report zones depending on start LBA nad reporting options */
+static int resp_report_zones(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned int i, max_zones, rep_max_zones, nrz = 0;
+ int ret = 0;
+ u32 alloc_len, rep_opts, rep_len;
+ bool partial;
+ u64 lba, zs_lba;
+ u8 *arr = NULL, *desc;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ zs_lba = get_unaligned_be64(cmd + 2);
+ alloc_len = get_unaligned_be32(cmd + 10);
+ rep_opts = cmd[14] & 0x3f;
+ partial = cmd[14] & 0x80;
+
+ if (zs_lba >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+
+ max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
+ rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
+ max_zones);
+
+ arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+
+ read_lock(macc_lckp);
+
+ desc = arr + 64;
+ for (i = 0; i < max_zones; i++) {
+ lba = zs_lba + devip->zsize * i;
+ if (lba > sdebug_capacity)
+ break;
+ zsp = zbc_zone(devip, lba);
+ switch (rep_opts) {
+ case 0x00:
+ /* All zones */
+ break;
+ case 0x01:
+ /* Empty zones */
+ if (zsp->z_cond != ZC1_EMPTY)
+ continue;
+ break;
+ case 0x02:
+ /* Implicit open zones */
+ if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
+ continue;
+ break;
+ case 0x03:
+ /* Explicit open zones */
+ if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
+ continue;
+ break;
+ case 0x04:
+ /* Closed zones */
+ if (zsp->z_cond != ZC4_CLOSED)
+ continue;
+ break;
+ case 0x05:
+ /* Full zones */
+ if (zsp->z_cond != ZC5_FULL)
+ continue;
+ break;
+ case 0x06:
+ case 0x07:
+ case 0x10:
+ /*
+ * Read-only, offline, reset WP recommended are
+ * not emulated: no zones to report;
+ */
+ continue;
+ case 0x11:
+ /* non-seq-resource set */
+ if (!zsp->z_non_seq_resource)
+ continue;
+ break;
+ case 0x3f:
+ /* Not write pointer (conventional) zones */
+ if (!zbc_zone_is_conv(zsp))
+ continue;
+ break;
+ default:
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ INVALID_FIELD_IN_CDB, 0);
+ ret = check_condition_result;
+ goto fini;
+ }
+
+ if (nrz < rep_max_zones) {
+ /* Fill zone descriptor */
+ desc[0] = zsp->z_type;
+ desc[1] = zsp->z_cond << 4;
+ if (zsp->z_non_seq_resource)
+ desc[1] |= 1 << 1;
+ put_unaligned_be64((u64)zsp->z_size, desc + 8);
+ put_unaligned_be64((u64)zsp->z_start, desc + 16);
+ put_unaligned_be64((u64)zsp->z_wp, desc + 24);
+ desc += 64;
+ }
+
+ if (partial && nrz >= rep_max_zones)
+ break;
+
+ nrz++;
+ }
+
+ /* Report header */
+ put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
+ put_unaligned_be64(sdebug_capacity - 1, arr + 8);
+
+ rep_len = (unsigned long)desc - (unsigned long)arr;
+ ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
+
+fini:
+ read_unlock(macc_lckp);
+ kfree(arr);
+ return ret;
+}
+
+/* Logic transplanted from tcmu-runner, file_zbc.c */
+static void zbc_open_all(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp = &devip->zstate[0];
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++, zsp++) {
+ if (zsp->z_cond == ZC4_CLOSED)
+ zbc_open_zone(devip, &devip->zstate[i], true);
+ }
+}
+
+static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 z_id;
+ enum sdebug_z_cond zc;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ /* Check if all closed zones can be open */
+ if (devip->max_open &&
+ devip->nr_exp_open + devip->nr_closed > devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ res = check_condition_result;
+ goto fini;
+ }
+ /* Open all closed zones */
+ zbc_open_all(devip);
+ goto fini;
+ }
+
+ /* Open the specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zc = zsp->z_cond;
+ if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
+ goto fini;
+
+ if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ if (zc == ZC2_IMPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ zbc_open_zone(devip, zsp, true);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
+static void zbc_close_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_close_zone(devip, &devip->zstate[i]);
+}
+
+static int resp_close_zone(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ zbc_close_all(devip);
+ goto fini;
+ }
+
+ /* Close specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_close_zone(devip, zsp);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
+static void zbc_finish_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp, bool empty)
+{
+ enum sdebug_z_cond zc = zsp->z_cond;
+
+ if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
+ zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
+ if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+ zsp->z_wp = zsp->z_start + zsp->z_size;
+ zsp->z_cond = ZC5_FULL;
+ }
+}
+
+static void zbc_finish_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_finish_zone(devip, &devip->zstate[i], false);
+}
+
+static int resp_finish_zone(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ zbc_finish_all(devip);
+ goto fini;
+ }
+
+ /* Finish the specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_finish_zone(devip, zsp, true);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
+static void zbc_rwp_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ enum sdebug_z_cond zc;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+
+ zsp->z_non_seq_resource = false;
+ zsp->z_wp = zsp->z_start;
+ zsp->z_cond = ZC1_EMPTY;
+}
+
+static void zbc_rwp_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_rwp_zone(devip, &devip->zstate[i]);
+}
+
+static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ zbc_rwp_all(devip);
+ goto fini;
+ }
+
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_rwp_zone(devip, zsp);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
{
u32 tag = blk_mq_unique_tag(cmnd->request);
@@ -3799,6 +4810,92 @@ static void sdebug_q_cmd_wq_complete(struct work_struct *work)
static bool got_shared_uuid;
static uuid_t shared_uuid;
+static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ sector_t capacity = get_sdebug_capacity();
+ sector_t zstart = 0;
+ unsigned int i;
+
+ /*
+ * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
+ * a zone size allowing for at least 4 zones on the device. Otherwise,
+ * use the specified zone size checking that at least 2 zones can be
+ * created for the device.
+ */
+ if (!sdeb_zbc_zone_size_mb) {
+ devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
+ >> ilog2(sdebug_sector_size);
+ while (capacity < devip->zsize << 2 && devip->zsize >= 2)
+ devip->zsize >>= 1;
+ if (devip->zsize < 2) {
+ pr_err("Device capacity too small\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
+ pr_err("Zone size is not a power of 2\n");
+ return -EINVAL;
+ }
+ devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
+ >> ilog2(sdebug_sector_size);
+ if (devip->zsize >= capacity) {
+ pr_err("Zone size too large for device capacity\n");
+ return -EINVAL;
+ }
+ }
+
+ devip->zsize_shift = ilog2(devip->zsize);
+ devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
+
+ if (sdeb_zbc_nr_conv >= devip->nr_zones) {
+ pr_err("Number of conventional zones too large\n");
+ return -EINVAL;
+ }
+ devip->nr_conv_zones = sdeb_zbc_nr_conv;
+
+ if (devip->zmodel == BLK_ZONED_HM) {
+ /* zbc_max_open_zones can be 0, meaning "not reported" */
+ if (sdeb_zbc_max_open >= devip->nr_zones - 1)
+ devip->max_open = (devip->nr_zones - 1) / 2;
+ else
+ devip->max_open = sdeb_zbc_max_open;
+ }
+
+ devip->zstate = kcalloc(devip->nr_zones,
+ sizeof(struct sdeb_zone_state), GFP_KERNEL);
+ if (!devip->zstate)
+ return -ENOMEM;
+
+ for (i = 0; i < devip->nr_zones; i++) {
+ zsp = &devip->zstate[i];
+
+ zsp->z_start = zstart;
+
+ if (i < devip->nr_conv_zones) {
+ zsp->z_type = ZBC_ZONE_TYPE_CNV;
+ zsp->z_cond = ZBC_NOT_WRITE_POINTER;
+ zsp->z_wp = (sector_t)-1;
+ } else {
+ if (devip->zmodel == BLK_ZONED_HM)
+ zsp->z_type = ZBC_ZONE_TYPE_SWR;
+ else
+ zsp->z_type = ZBC_ZONE_TYPE_SWP;
+ zsp->z_cond = ZC1_EMPTY;
+ zsp->z_wp = zsp->z_start;
+ }
+
+ if (zsp->z_start + devip->zsize < capacity)
+ zsp->z_size = devip->zsize;
+ else
+ zsp->z_size = capacity - zsp->z_start;
+
+ zstart += zsp->z_size;
+ }
+
+ return 0;
+}
+
static struct sdebug_dev_info *sdebug_device_create(
struct sdebug_host_info *sdbg_host, gfp_t flags)
{
@@ -3818,6 +4915,16 @@ static struct sdebug_dev_info *sdebug_device_create(
}
}
devip->sdbg_host = sdbg_host;
+ if (sdeb_zbc_in_use) {
+ devip->zmodel = sdeb_zbc_model;
+ if (sdebug_device_create_zones(devip)) {
+ kfree(devip);
+ return NULL;
+ }
+ } else {
+ devip->zmodel = BLK_ZONED_NONE;
+ }
+ devip->sdbg_host = sdbg_host;
list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
}
return devip;
@@ -4144,8 +5251,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
return SUCCESS;
}
-static void __init sdebug_build_parts(unsigned char *ramp,
- unsigned long store_size)
+static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
{
struct msdos_partition *pp;
int starts[SDEBUG_MAX_PARTS + 2];
@@ -4247,6 +5353,8 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
}
+#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
+
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -4258,8 +5366,10 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
- unsigned long iflags;
+ bool new_sd_dp;
int k, num_in_q, qdepth, inject;
+ unsigned long iflags;
+ u64 ns_from_boot = 0;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
@@ -4275,7 +5385,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (delta_jiff == 0)
goto respond_in_thread;
- /* schedule the response at a later time if resources permit */
sqp = get_queue(cmnd);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (unlikely(atomic_read(&sqp->blocked))) {
@@ -4334,13 +5443,17 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
if (sd_dp == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
+ new_sd_dp = true;
+ } else {
+ new_sd_dp = false;
}
+ if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
+ ns_from_boot = ktime_get_boottime_ns();
+
+ /* one of the resp_*() response functions is called here */
cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
if (cmnd->result & SDEG_RES_IMMED_MASK) {
- /*
- * This is the F_DELAY_OVERR case. No delay.
- */
cmnd->result &= ~SDEG_RES_IMMED_MASK;
delta_jiff = ndelay = 0;
}
@@ -4355,9 +5468,37 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
ktime_t kt;
if (delta_jiff > 0) {
- kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
- } else
- kt = ndelay;
+ u64 ns = jiffies_to_nsecs(delta_jiff);
+
+ if (sdebug_random && ns < U32_MAX) {
+ ns = prandom_u32_max((u32)ns);
+ } else if (sdebug_random) {
+ ns >>= 12; /* scale to 4 usec precision */
+ if (ns < U32_MAX) /* over 4 hours max */
+ ns = prandom_u32_max((u32)ns);
+ ns <<= 12;
+ }
+ kt = ns_to_ktime(ns);
+ } else { /* ndelay has a 4.2 second max */
+ kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
+ (u32)ndelay;
+ if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
+ u64 d = ktime_get_boottime_ns() - ns_from_boot;
+
+ if (kt <= d) { /* elapsed duration >= kt */
+ sqcp->a_cmnd = NULL;
+ atomic_dec(&devip->num_in_q);
+ clear_bit(k, sqp->in_use_bm);
+ if (new_sd_dp)
+ kfree(sd_dp);
+ /* call scsi_done() from this thread */
+ cmnd->scsi_done(cmnd);
+ return 0;
+ }
+ /* otherwise reduce kt by elapsed time */
+ kt -= d;
+ }
+ }
if (!sd_dp->init_hrt) {
sd_dp->init_hrt = true;
sqcp->sd_dp = sd_dp;
@@ -4370,6 +5511,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
sd_dp->defer_t = SDEB_DEFER_HRT;
+ /* schedule the invocation of scsi_done() for a later time */
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
if (!sd_dp->init_wq) {
@@ -4427,31 +5569,36 @@ module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(guard, sdebug_guard, uint, S_IRUGO);
module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
-module_param_string(inq_vendor, sdebug_inq_vendor_id,
- sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
module_param_string(inq_product, sdebug_inq_product_id,
- sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
+ sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
module_param_string(inq_rev, sdebug_inq_product_rev,
- sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
+ sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
+module_param_string(inq_vendor, sdebug_inq_vendor_id,
+ sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
+module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
-module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
-module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
-module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
+module_param_named(medium_error_count, sdebug_medium_error_count, int,
+ S_IRUGO | S_IWUSR);
+module_param_named(medium_error_start, sdebug_medium_error_start, int,
+ S_IRUGO | S_IWUSR);
module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
+module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(per_host_store, sdebug_per_host_store, bool,
+ S_IRUGO | S_IWUSR);
module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
-module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
+module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
@@ -4462,20 +5609,24 @@ module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
-module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
+module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
S_IRUGO | S_IWUSR);
module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
module_param_named(write_same_length, sdebug_write_same_length, int,
S_IRUGO | S_IWUSR);
+module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
+module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
+module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
+module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SDEBUG_VERSION);
-MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
+MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
@@ -4488,30 +5639,32 @@ MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
-MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
SDEBUG_VERSION "\")");
+MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
+MODULE_PARM_DESC(lbprz,
+ "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
-MODULE_PARM_DESC(lbprz,
- "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
-MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
+MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
+MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
+MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
-MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
+MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
@@ -4528,6 +5681,10 @@ MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(wp, "Write Protect (def=0)");
MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
+MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
+MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
+MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
+MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
#define SDEBUG_INFO_LEN 256
static char sdebug_info[SDEBUG_INFO_LEN];
@@ -4576,6 +5733,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{
int f, j, l;
struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
SDEBUG_VERSION, sdebug_version_date);
@@ -4611,6 +5769,34 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
"first,last bits", f, l);
}
}
+
+ seq_printf(m, "this host_no=%d\n", host->host_no);
+ if (!xa_empty(per_store_ap)) {
+ bool niu;
+ int idx;
+ unsigned long l_idx;
+ struct sdeb_store_info *sip;
+
+ seq_puts(m, "\nhost list:\n");
+ j = 0;
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ idx = sdhp->si_idx;
+ seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
+ sdhp->shost->host_no, idx);
+ ++j;
+ }
+ seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
+ sdeb_most_recent_idx);
+ j = 0;
+ xa_for_each(per_store_ap, l_idx, sip) {
+ niu = xa_get_mark(per_store_ap, l_idx,
+ SDEB_XA_NOT_IN_USE);
+ idx = (int)l_idx;
+ seq_printf(m, " %d: idx=%d%s\n", j, idx,
+ (niu ? " not_in_use" : ""));
+ ++j;
+ }
+ }
return 0;
}
@@ -4734,7 +5920,13 @@ static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
{
int n;
+ /* Cannot change from or to TYPE_ZBC with sysfs */
+ if (sdebug_ptype == TYPE_ZBC)
+ return -EINVAL;
+
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ if (n == TYPE_ZBC)
+ return -EINVAL;
sdebug_ptype = n;
return count;
}
@@ -4766,25 +5958,41 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n, idx;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ bool want_store = (n == 0);
+ struct sdebug_host_info *sdhp;
+
n = (n > 0);
sdebug_fake_rw = (sdebug_fake_rw > 0);
- if (sdebug_fake_rw != n) {
- if ((0 == n) && (NULL == fake_storep)) {
- unsigned long sz =
- (unsigned long)sdebug_dev_size_mb *
- 1048576;
-
- fake_storep = vzalloc(sz);
- if (NULL == fake_storep) {
- pr_err("out of memory, 9\n");
- return -ENOMEM;
+ if (sdebug_fake_rw == n)
+ return count; /* not transitioning so do nothing */
+
+ if (want_store) { /* 1 --> 0 transition, set up store */
+ if (sdeb_first_idx < 0) {
+ idx = sdebug_add_store();
+ if (idx < 0)
+ return idx;
+ } else {
+ idx = sdeb_first_idx;
+ xa_clear_mark(per_store_ap, idx,
+ SDEB_XA_NOT_IN_USE);
+ }
+ /* make all hosts use same store */
+ list_for_each_entry(sdhp, &sdebug_host_list,
+ host_list) {
+ if (sdhp->si_idx != idx) {
+ xa_set_mark(per_store_ap, sdhp->si_idx,
+ SDEB_XA_NOT_IN_USE);
+ sdhp->si_idx = idx;
}
}
- sdebug_fake_rw = n;
+ sdeb_most_recent_idx = idx;
+ } else { /* 0 --> 1 transition is trigger for shrink */
+ sdebug_erase_all_stores(true /* apart from first */);
}
+ sdebug_fake_rw = n;
return count;
}
return -EINVAL;
@@ -4832,6 +6040,24 @@ static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(dev_size_mb);
+static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
+}
+
+static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_per_host_store = v;
+ return count;
+}
+static DRIVER_ATTR_RW(per_host_store);
+
static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
@@ -4957,6 +6183,10 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
int n;
bool changed;
+ /* Ignore capacity change for ZBC drives for now */
+ if (sdeb_zbc_in_use)
+ return -ENOTSUPP;
+
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
changed = (sdebug_virtual_gb != n);
sdebug_virtual_gb = n;
@@ -4984,26 +6214,42 @@ static DRIVER_ATTR_RW(virtual_gb);
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
+ /* absolute number of hosts currently active is what is shown */
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
}
-static int sdebug_add_adapter(void);
-static void sdebug_remove_adapter(void);
-
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
+ bool found;
+ unsigned long idx;
+ struct sdeb_store_info *sip;
+ bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
int delta_hosts;
if (sscanf(buf, "%d", &delta_hosts) != 1)
return -EINVAL;
if (delta_hosts > 0) {
do {
- sdebug_add_adapter();
+ found = false;
+ if (want_phs) {
+ xa_for_each_marked(per_store_ap, idx, sip,
+ SDEB_XA_NOT_IN_USE) {
+ sdeb_most_recent_idx = (int)idx;
+ found = true;
+ break;
+ }
+ if (found) /* re-use case */
+ sdebug_add_host_helper((int)idx);
+ else
+ sdebug_do_add_host(true);
+ } else {
+ sdebug_do_add_host(false);
+ }
} while (--delta_hosts);
} else if (delta_hosts < 0) {
do {
- sdebug_remove_adapter();
+ sdebug_do_remove_host(false);
} while (++delta_hosts);
}
return count;
@@ -5087,14 +6333,19 @@ static DRIVER_ATTR_RO(ato);
static ssize_t map_show(struct device_driver *ddp, char *buf)
{
- ssize_t count;
+ ssize_t count = 0;
if (!scsi_debug_lbp())
return scnprintf(buf, PAGE_SIZE, "0-%u\n",
sdebug_store_sectors);
- count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
- (int)map_size, map_storep);
+ if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
+ struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
+
+ if (sip)
+ count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ (int)map_size, sip->map_storep);
+ }
buf[count++] = '\n';
buf[count] = '\0';
@@ -5102,6 +6353,24 @@ static ssize_t map_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(map);
+static ssize_t random_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
+}
+
+static ssize_t random_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_random = v;
+ return count;
+}
+static DRIVER_ATTR_RW(random);
+
static ssize_t removable_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
@@ -5178,12 +6447,51 @@ static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
}
static DRIVER_ATTR_RW(cdb_len);
+static const char * const zbc_model_strs_a[] = {
+ [BLK_ZONED_NONE] = "none",
+ [BLK_ZONED_HA] = "host-aware",
+ [BLK_ZONED_HM] = "host-managed",
+};
+
+static const char * const zbc_model_strs_b[] = {
+ [BLK_ZONED_NONE] = "no",
+ [BLK_ZONED_HA] = "aware",
+ [BLK_ZONED_HM] = "managed",
+};
+
+static const char * const zbc_model_strs_c[] = {
+ [BLK_ZONED_NONE] = "0",
+ [BLK_ZONED_HA] = "1",
+ [BLK_ZONED_HM] = "2",
+};
+
+static int sdeb_zbc_model_str(const char *cp)
+{
+ int res = sysfs_match_string(zbc_model_strs_a, cp);
+
+ if (res < 0) {
+ res = sysfs_match_string(zbc_model_strs_b, cp);
+ if (res < 0) {
+ res = sysfs_match_string(zbc_model_strs_c, cp);
+ if (res < 0)
+ return -EINVAL;
+ }
+ }
+ return res;
+}
+
+static ssize_t zbc_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ zbc_model_strs_a[sdeb_zbc_model]);
+}
+static DRIVER_ATTR_RO(zbc);
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
directory) is that auxiliary actions can be triggered when an attribute
- is changed. For example see: sdebug_add_host_store() above.
+ is changed. For example see: add_host_store() above.
*/
static struct attribute *sdebug_drv_attrs[] = {
@@ -5203,6 +6511,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_scsi_level.attr,
&driver_attr_virtual_gb.attr,
&driver_attr_add_host.attr,
+ &driver_attr_per_host_store.attr,
&driver_attr_vpd_use_hostno.attr,
&driver_attr_sector_size.attr,
&driver_attr_statistics.attr,
@@ -5212,12 +6521,14 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_guard.attr,
&driver_attr_ato.attr,
&driver_attr_map.attr,
+ &driver_attr_random.attr,
&driver_attr_removable.attr,
&driver_attr_host_lock.attr,
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
&driver_attr_uuid_ctl.attr,
&driver_attr_cdb_len.attr,
+ &driver_attr_zbc.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5226,11 +6537,13 @@ static struct device *pseudo_primary;
static int __init scsi_debug_init(void)
{
+ bool want_store = (sdebug_fake_rw == 0);
unsigned long sz;
- int host_to_add;
- int k;
- int ret;
+ int k, ret, hosts_to_add;
+ int idx = -1;
+ ramdisk_lck_a[0] = &atomic_rw;
+ ramdisk_lck_a[1] = &atomic_rw2;
atomic_set(&retired_max_queue, 0);
if (sdebug_ndelay >= 1000 * 1000 * 1000) {
@@ -5304,6 +6617,40 @@ static int __init scsi_debug_init(void)
for (k = 0; k < submit_queues; ++k)
spin_lock_init(&sdebug_q_arr[k].qc_lock);
+ /*
+ * check for host managed zoned block device specified with
+ * ptype=0x14 or zbc=XXX.
+ */
+ if (sdebug_ptype == TYPE_ZBC) {
+ sdeb_zbc_model = BLK_ZONED_HM;
+ } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
+ k = sdeb_zbc_model_str(sdeb_zbc_model_s);
+ if (k < 0) {
+ ret = k;
+ goto free_vm;
+ }
+ sdeb_zbc_model = k;
+ switch (sdeb_zbc_model) {
+ case BLK_ZONED_NONE:
+ case BLK_ZONED_HA:
+ sdebug_ptype = TYPE_DISK;
+ break;
+ case BLK_ZONED_HM:
+ sdebug_ptype = TYPE_ZBC;
+ break;
+ default:
+ pr_err("Invalid ZBC model\n");
+ return -EINVAL;
+ }
+ }
+ if (sdeb_zbc_model != BLK_ZONED_NONE) {
+ sdeb_zbc_in_use = true;
+ if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
+ sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
+ }
+
+ if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
+ sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
if (sdebug_dev_size_mb < 1)
sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
sz = (unsigned long)sdebug_dev_size_mb * 1048576;
@@ -5326,36 +6673,6 @@ static int __init scsi_debug_init(void)
sdebug_cylinders_per = (unsigned long)sdebug_capacity /
(sdebug_sectors_per * sdebug_heads);
}
-
- if (sdebug_fake_rw == 0) {
- fake_storep = vzalloc(sz);
- if (NULL == fake_storep) {
- pr_err("out of memory, 1\n");
- ret = -ENOMEM;
- goto free_q_arr;
- }
- if (sdebug_num_parts > 0)
- sdebug_build_parts(fake_storep, sz);
- }
-
- if (sdebug_dix) {
- int dif_size;
-
- dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
- dif_storep = vmalloc(dif_size);
-
- pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
-
- if (dif_storep == NULL) {
- pr_err("out of mem. (DIX)\n");
- ret = -ENOMEM;
- goto free_vm;
- }
-
- memset(dif_storep, 0xff, dif_size);
- }
-
- /* Logical Block Provisioning */
if (scsi_debug_lbp()) {
sdebug_unmap_max_blocks =
clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
@@ -5371,26 +6688,16 @@ static int __init scsi_debug_init(void)
sdebug_unmap_alignment) {
pr_err("ERR: unmap_granularity <= unmap_alignment\n");
ret = -EINVAL;
- goto free_vm;
+ goto free_q_arr;
}
-
- map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
- map_storep = vmalloc(array_size(sizeof(long),
- BITS_TO_LONGS(map_size)));
-
- pr_info("%lu provisioning blocks\n", map_size);
-
- if (map_storep == NULL) {
- pr_err("out of mem. (MAP)\n");
- ret = -ENOMEM;
- goto free_vm;
+ }
+ xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ if (want_store) {
+ idx = sdebug_add_store();
+ if (idx < 0) {
+ ret = idx;
+ goto free_q_arr;
}
-
- bitmap_zero(map_storep, map_size);
-
- /* Map first 1KB for partition table */
- if (sdebug_num_parts)
- map_region(0, 2);
}
pseudo_primary = root_device_register("pseudo_0");
@@ -5410,18 +6717,28 @@ static int __init scsi_debug_init(void)
goto bus_unreg;
}
- host_to_add = sdebug_add_host;
+ hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
- for (k = 0; k < host_to_add; k++) {
- if (sdebug_add_adapter()) {
- pr_err("sdebug_add_adapter failed k=%d\n", k);
- break;
+ for (k = 0; k < hosts_to_add; k++) {
+ if (want_store && k == 0) {
+ ret = sdebug_add_host_helper(idx);
+ if (ret < 0) {
+ pr_err("add_host_helper k=%d, error=%d\n",
+ k, -ret);
+ break;
+ }
+ } else {
+ ret = sdebug_do_add_host(want_store &&
+ sdebug_per_host_store);
+ if (ret < 0) {
+ pr_err("add_host k=%d error=%d\n", k, -ret);
+ break;
+ }
}
}
-
if (sdebug_verbose)
- pr_info("built %d host(s)\n", sdebug_add_host);
+ pr_info("built %d host(s)\n", sdebug_num_hosts);
return 0;
@@ -5430,9 +6747,7 @@ bus_unreg:
dev_unreg:
root_device_unregister(pseudo_primary);
free_vm:
- vfree(map_storep);
- vfree(dif_storep);
- vfree(fake_storep);
+ sdebug_erase_store(idx, NULL);
free_q_arr:
kfree(sdebug_q_arr);
return ret;
@@ -5440,20 +6755,18 @@ free_q_arr:
static void __exit scsi_debug_exit(void)
{
- int k = sdebug_add_host;
+ int k = sdebug_num_hosts;
stop_all_queued();
for (; k; k--)
- sdebug_remove_adapter();
+ sdebug_do_remove_host(true);
free_all_queued();
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
- vfree(map_storep);
- vfree(dif_storep);
- vfree(fake_storep);
- kfree(sdebug_q_arr);
+ sdebug_erase_all_stores(false);
+ xa_destroy(per_store_ap);
}
device_initcall(scsi_debug_init);
@@ -5467,29 +6780,146 @@ static void sdebug_release_adapter(struct device *dev)
kfree(sdbg_host);
}
-static int sdebug_add_adapter(void)
+/* idx must be valid, if sip is NULL then it will be obtained using idx */
+static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
{
- int k, devs_per_host;
- int error = 0;
+ if (idx < 0)
+ return;
+ if (!sip) {
+ if (xa_empty(per_store_ap))
+ return;
+ sip = xa_load(per_store_ap, idx);
+ if (!sip)
+ return;
+ }
+ vfree(sip->map_storep);
+ vfree(sip->dif_storep);
+ vfree(sip->storep);
+ xa_erase(per_store_ap, idx);
+ kfree(sip);
+}
+
+/* Assume apart_from_first==false only in shutdown case. */
+static void sdebug_erase_all_stores(bool apart_from_first)
+{
+ unsigned long idx;
+ struct sdeb_store_info *sip = NULL;
+
+ xa_for_each(per_store_ap, idx, sip) {
+ if (apart_from_first)
+ apart_from_first = false;
+ else
+ sdebug_erase_store(idx, sip);
+ }
+ if (apart_from_first)
+ sdeb_most_recent_idx = sdeb_first_idx;
+}
+
+/*
+ * Returns store xarray new element index (idx) if >=0 else negated errno.
+ * Limit the number of stores to 65536.
+ */
+static int sdebug_add_store(void)
+{
+ int res;
+ u32 n_idx;
+ unsigned long iflags;
+ unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
+ struct sdeb_store_info *sip = NULL;
+ struct xa_limit xal = { .max = 1 << 16, .min = 0 };
+
+ sip = kzalloc(sizeof(*sip), GFP_KERNEL);
+ if (!sip)
+ return -ENOMEM;
+
+ xa_lock_irqsave(per_store_ap, iflags);
+ res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
+ if (unlikely(res < 0)) {
+ xa_unlock_irqrestore(per_store_ap, iflags);
+ kfree(sip);
+ pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
+ return res;
+ }
+ sdeb_most_recent_idx = n_idx;
+ if (sdeb_first_idx < 0)
+ sdeb_first_idx = n_idx;
+ xa_unlock_irqrestore(per_store_ap, iflags);
+
+ res = -ENOMEM;
+ sip->storep = vzalloc(sz);
+ if (!sip->storep) {
+ pr_err("user data oom\n");
+ goto err;
+ }
+ if (sdebug_num_parts > 0)
+ sdebug_build_parts(sip->storep, sz);
+
+ /* DIF/DIX: what T10 calls Protection Information (PI) */
+ if (sdebug_dix) {
+ int dif_size;
+
+ dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
+ sip->dif_storep = vmalloc(dif_size);
+
+ pr_info("dif_storep %u bytes @ %pK\n", dif_size,
+ sip->dif_storep);
+
+ if (!sip->dif_storep) {
+ pr_err("DIX oom\n");
+ goto err;
+ }
+ memset(sip->dif_storep, 0xff, dif_size);
+ }
+ /* Logical Block Provisioning */
+ if (scsi_debug_lbp()) {
+ map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+ sip->map_storep = vmalloc(array_size(sizeof(long),
+ BITS_TO_LONGS(map_size)));
+
+ pr_info("%lu provisioning blocks\n", map_size);
+
+ if (!sip->map_storep) {
+ pr_err("LBP map oom\n");
+ goto err;
+ }
+
+ bitmap_zero(sip->map_storep, map_size);
+
+ /* Map first 1KB for partition table */
+ if (sdebug_num_parts)
+ map_region(sip, 0, 2);
+ }
+
+ rwlock_init(&sip->macc_lck);
+ return (int)n_idx;
+err:
+ sdebug_erase_store((int)n_idx, sip);
+ pr_warn("%s: failed, errno=%d\n", __func__, -res);
+ return res;
+}
+
+static int sdebug_add_host_helper(int per_host_idx)
+{
+ int k, devs_per_host, idx;
+ int error = -ENOMEM;
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
- if (sdbg_host == NULL) {
- pr_err("out of memory at line %d\n", __LINE__);
+ if (!sdbg_host)
return -ENOMEM;
- }
+ idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
+ if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
+ xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
+ sdbg_host->si_idx = idx;
INIT_LIST_HEAD(&sdbg_host->dev_info_list);
devs_per_host = sdebug_num_tgts * sdebug_max_luns;
for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
- if (!sdbg_devinfo) {
- pr_err("out of memory at line %d\n", __LINE__);
- error = -ENOMEM;
+ if (!sdbg_devinfo)
goto clean;
- }
}
spin_lock(&sdebug_host_list_lock);
@@ -5499,44 +6929,77 @@ static int sdebug_add_adapter(void)
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
+ dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
-
if (error)
goto clean;
- ++sdebug_add_host;
- return error;
+ ++sdebug_num_hosts;
+ return 0;
clean:
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo->zstate);
kfree(sdbg_devinfo);
}
-
kfree(sdbg_host);
+ pr_warn("%s: failed, errno=%d\n", __func__, -error);
return error;
}
-static void sdebug_remove_adapter(void)
+static int sdebug_do_add_host(bool mk_new_store)
{
+ int ph_idx = sdeb_most_recent_idx;
+
+ if (mk_new_store) {
+ ph_idx = sdebug_add_store();
+ if (ph_idx < 0)
+ return ph_idx;
+ }
+ return sdebug_add_host_helper(ph_idx);
+}
+
+static void sdebug_do_remove_host(bool the_end)
+{
+ int idx = -1;
struct sdebug_host_info *sdbg_host = NULL;
+ struct sdebug_host_info *sdbg_host2;
spin_lock(&sdebug_host_list_lock);
if (!list_empty(&sdebug_host_list)) {
sdbg_host = list_entry(sdebug_host_list.prev,
struct sdebug_host_info, host_list);
- list_del(&sdbg_host->host_list);
+ idx = sdbg_host->si_idx;
+ }
+ if (!the_end && idx >= 0) {
+ bool unique = true;
+
+ list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
+ if (sdbg_host2 == sdbg_host)
+ continue;
+ if (idx == sdbg_host2->si_idx) {
+ unique = false;
+ break;
+ }
+ }
+ if (unique) {
+ xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
+ if (idx == sdeb_most_recent_idx)
+ --sdeb_most_recent_idx;
+ }
}
+ if (sdbg_host)
+ list_del(&sdbg_host->host_list);
spin_unlock(&sdebug_host_list_lock);
if (!sdbg_host)
return;
device_unregister(&sdbg_host->dev);
- --sdebug_add_host;
+ --sdebug_num_hosts;
}
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -5595,6 +7058,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
const struct opcode_info_t *oip;
const struct opcode_info_t *r_oip;
struct sdebug_dev_info *devip;
+
u8 *cmd = scp->cmnd;
int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
@@ -5724,7 +7188,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
fini:
- if (F_DELAY_OVERR & flags)
+ if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
return schedule_resp(scp, devip, errsts, pfp, 0, 0);
else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
sdebug_ndelay > 10000)) {
@@ -5866,8 +7330,9 @@ static int sdebug_driver_probe(struct device *dev)
pr_err("scsi_add_host failed\n");
error = -ENODEV;
scsi_host_put(hpnt);
- } else
+ } else {
scsi_scan_host(hpnt);
+ }
return error;
}
@@ -5889,6 +7354,7 @@ static int sdebug_driver_remove(struct device *dev)
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo->zstate);
kfree(sdbg_devinfo);
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 978be1602f71..927b1e641842 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1412,6 +1412,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdev,
"%s: skip START_UNIT, past eh deadline\n",
current->comm));
+ scsi_device_put(sdev);
break;
}
stu_scmd = NULL;
@@ -1478,6 +1479,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdev,
"%s: skip BDR, past eh deadline\n",
current->comm));
+ scsi_device_put(sdev);
break;
}
bdr_scmd = NULL;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 8f3af87b6bb0..45d04b7b2643 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -211,18 +211,18 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg
}
switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- if (!access_ok(arg, sizeof(struct scsi_idlun)))
+ case SCSI_IOCTL_GET_IDLUN: {
+ struct scsi_idlun v = {
+ .dev_id = (sdev->id & 0xff)
+ + ((sdev->lun & 0xff) << 8)
+ + ((sdev->channel & 0xff) << 16)
+ + ((sdev->host->host_no & 0xff) << 24),
+ .host_unique_id = sdev->host->unique_id
+ };
+ if (copy_to_user(arg, &v, sizeof(struct scsi_idlun)))
return -EFAULT;
-
- __put_user((sdev->id & 0xff)
- + ((sdev->lun & 0xff) << 8)
- + ((sdev->channel & 0xff) << 16)
- + ((sdev->host->host_no & 0xff) << 24),
- &((struct scsi_idlun __user *)arg)->dev_id);
- __put_user(sdev->host->unique_id,
- &((struct scsi_idlun __user *)arg)->host_unique_id);
return 0;
+ }
case SCSI_IOCTL_GET_BUS_NUMBER:
return put_user(sdev->host->host_no, (int __user *)arg);
case SCSI_IOCTL_PROBE_HOST:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 06c260f6cdae..0ba7a65e7c8d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -202,24 +202,17 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
blk_mq_requeue_request(cmd->request, true);
}
-/*
- * Function: scsi_queue_insert()
- *
- * Purpose: Insert a command in the midlevel queue.
- *
- * Arguments: cmd - command that we are adding to queue.
- * reason - why we are inserting command to queue.
- *
- * Lock status: Assumed that lock is not held upon entry.
+/**
+ * scsi_queue_insert - Reinsert a command in the queue.
+ * @cmd: command that we are adding to queue.
+ * @reason: why we are inserting command to queue.
*
- * Returns: Nothing.
+ * We do this for one of two cases. Either the host is busy and it cannot accept
+ * any more commands for the time being, or the device returned QUEUE_FULL and
+ * can accept no more commands.
*
- * Notes: We do this for one of two cases. Either the host is busy
- * and it cannot accept any more commands for the time being,
- * or the device returned QUEUE_FULL and can accept no more
- * commands.
- * Notes: This could be called either from an interrupt context or a
- * normal process context.
+ * Context: This could be called either from an interrupt context or a normal
+ * process context.
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
@@ -301,16 +294,12 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
}
EXPORT_SYMBOL(__scsi_execute);
-/*
- * Function: scsi_init_cmd_errh()
- *
- * Purpose: Initialize cmd fields related to error handling.
- *
- * Arguments: cmd - command that is ready to be queued.
+/**
+ * scsi_init_cmd_errh - Initialize cmd fields related to error handling.
+ * @cmd: command that is ready to be queued.
*
- * Notes: This function has the job of initializing a number of
- * fields related to error handling. Typically this will
- * be called once for each command, as required.
+ * This function has the job of initializing a number of fields related to error
+ * handling. Typically this will be called once for each command, as required.
*/
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
@@ -496,17 +485,11 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
spin_unlock_irqrestore(shost->host_lock, flags);
}
-/*
- * Function: scsi_run_queue()
- *
- * Purpose: Select a proper request queue to serve next
- *
- * Arguments: q - last request's queue
- *
- * Returns: Nothing
+/**
+ * scsi_run_queue - Select a proper request queue to serve next.
+ * @q: last request's queue
*
- * Notes: The previous command was completely finished, start
- * a new one if possible.
+ * The previous command was completely finished, start a new one if possible.
*/
static void scsi_run_queue(struct request_queue *q)
{
@@ -548,7 +531,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
}
}
-static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
+static void scsi_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table,
@@ -560,7 +543,7 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
}
@@ -896,34 +879,27 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
return result;
}
-/*
- * Function: scsi_io_completion()
- *
- * Purpose: Completion processing for block device I/O requests.
- *
- * Arguments: cmd - command that is finished.
- *
- * Lock status: Assumed that no lock is held upon entry.
- *
- * Returns: Nothing
- *
- * Notes: We will finish off the specified number of sectors. If we
- * are done, the command block will be released and the queue
- * function will be goosed. If we are not done then we have to
- * figure out what to do next:
- *
- * a) We can call scsi_requeue_command(). The request
- * will be unprepared and put back on the queue. Then
- * a new command will be created for it. This should
- * be used if we made forward progress, or if we want
- * to switch from READ(10) to READ(6) for example.
- *
- * b) We can call __scsi_queue_insert(). The request will
- * be put back on the queue and retried using the same
- * command as before, possibly after a delay.
- *
- * c) We can call scsi_end_request() with blk_stat other than
- * BLK_STS_OK, to fail the remainder of the request.
+/**
+ * scsi_io_completion - Completion processing for SCSI commands.
+ * @cmd: command that is finished.
+ * @good_bytes: number of processed bytes.
+ *
+ * We will finish off the specified number of sectors. If we are done, the
+ * command block will be released and the queue function will be goosed. If we
+ * are not done then we have to figure out what to do next:
+ *
+ * a) We can call scsi_io_completion_reprep(). The request will be
+ * unprepared and put back on the queue. Then a new command will
+ * be created for it. This should be used if we made forward
+ * progress, or if we want to switch from READ(10) to READ(6) for
+ * example.
+ *
+ * b) We can call scsi_io_completion_action(). The request will be
+ * put back on the queue and retried using the same command as
+ * before, possibly after a delay.
+ *
+ * c) We can call scsi_end_request() with blk_stat other than
+ * BLK_STS_OK, to fail the remainder of the request.
*/
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
@@ -951,8 +927,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
blk_rq_sectors(req), good_bytes));
/*
- * Next deal with any sectors which we were able to correctly
- * handle. Failed, zero length commands always need to drop down
+ * Failed, zero length commands always need to drop down
* to retry code. Fast path should return in this block.
*/
if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
@@ -978,56 +953,81 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_io_completion_action(cmd, result);
}
-static blk_status_t scsi_init_sgtable(struct request *req,
- struct scsi_data_buffer *sdb)
+static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
+ struct request *rq)
{
+ return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
+ !op_is_write(req_op(rq)) &&
+ sdev->host->hostt->dma_need_drain(rq);
+}
+
+/**
+ * scsi_init_io - SCSI I/O initialization function.
+ * @cmd: command descriptor we wish to initialize
+ *
+ * Returns:
+ * * BLK_STS_OK - on success
+ * * BLK_STS_RESOURCE - if the failure is retryable
+ * * BLK_STS_IOERR - if the failure is fatal
+ */
+blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct request *rq = cmd->request;
+ unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
+ struct scatterlist *last_sg = NULL;
+ blk_status_t ret;
+ bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
int count;
+ if (WARN_ON_ONCE(!nr_segs))
+ return BLK_STS_IOERR;
+
+ /*
+ * Make sure there is space for the drain. The driver must adjust
+ * max_hw_segments to be prepared for this.
+ */
+ if (need_drain)
+ nr_segs++;
+
/*
* If sg table allocation fails, requeue request later.
*/
- if (unlikely(sg_alloc_table_chained(&sdb->table,
- blk_rq_nr_phys_segments(req), sdb->table.sgl,
- SCSI_INLINE_SG_CNT)))
+ if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
+ cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
return BLK_STS_RESOURCE;
- /*
+ /*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
- count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
- BUG_ON(count > sdb->table.nents);
- sdb->table.nents = count;
- sdb->length = blk_rq_payload_bytes(req);
- return BLK_STS_OK;
-}
+ count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
-/*
- * Function: scsi_init_io()
- *
- * Purpose: SCSI I/O initialize function.
- *
- * Arguments: cmd - Command descriptor we wish to initialize
- *
- * Returns: BLK_STS_OK on success
- * BLK_STS_RESOURCE if the failure is retryable
- * BLK_STS_IOERR if the failure is fatal
- */
-blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
-{
- struct request *rq = cmd->request;
- blk_status_t ret;
+ if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
+ unsigned int pad_len =
+ (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
- if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
- return BLK_STS_IOERR;
+ last_sg->length += pad_len;
+ cmd->extra_len += pad_len;
+ }
- ret = scsi_init_sgtable(rq, &cmd->sdb);
- if (ret)
- return ret;
+ if (need_drain) {
+ sg_unmark_end(last_sg);
+ last_sg = sg_next(last_sg);
+ sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
+ sg_mark_end(last_sg);
+
+ cmd->extra_len += sdev->dma_drain_len;
+ count++;
+ }
+
+ BUG_ON(count > cmd->sdb.table.nents);
+ cmd->sdb.table.nents = count;
+ cmd->sdb.length = blk_rq_payload_bytes(rq);
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
- int ivecs, count;
+ int ivecs;
if (WARN_ON_ONCE(!prot_sdb)) {
/*
@@ -1059,7 +1059,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
return BLK_STS_OK;
out_free_sgtables:
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
return ret;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1190,6 +1190,7 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
@@ -1199,9 +1200,14 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
cmd->sc_data_direction = DMA_FROM_DEVICE;
if (blk_rq_is_scsi(req))
- return scsi_setup_scsi_cmnd(sdev, req);
+ ret = scsi_setup_scsi_cmnd(sdev, req);
else
- return scsi_setup_fs_cmnd(sdev, req);
+ ret = scsi_setup_fs_cmnd(sdev, req);
+
+ if (ret != BLK_STS_OK)
+ scsi_free_sgtables(cmd);
+
+ return ret;
}
static blk_status_t
@@ -1610,12 +1616,7 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
- if (scsi_dev_queue_ready(q, sdev))
- return true;
-
- if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
- blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
- return false;
+ return scsi_dev_queue_ready(q, sdev);
}
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1684,6 +1685,7 @@ out_put_budget:
case BLK_STS_OK:
break;
case BLK_STS_RESOURCE:
+ case BLK_STS_ZONE_RESOURCE:
if (atomic_read(&sdev->device_busy) ||
scsi_device_blocked(sdev))
ret = BLK_STS_DEV_RESOURCE;
@@ -1870,6 +1872,7 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
+ struct blk_mq_tag_set *tag_set = &shost->tag_set;
sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
scsi_mq_inline_sgl_size(shost));
@@ -1878,21 +1881,21 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
cmd_size += sizeof(struct scsi_data_buffer) +
sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
- memset(&shost->tag_set, 0, sizeof(shost->tag_set));
+ memset(tag_set, 0, sizeof(*tag_set));
if (shost->hostt->commit_rqs)
- shost->tag_set.ops = &scsi_mq_ops;
+ tag_set->ops = &scsi_mq_ops;
else
- shost->tag_set.ops = &scsi_mq_ops_no_commit;
- shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
- shost->tag_set.queue_depth = shost->can_queue;
- shost->tag_set.cmd_size = cmd_size;
- shost->tag_set.numa_node = NUMA_NO_NODE;
- shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- shost->tag_set.flags |=
+ tag_set->ops = &scsi_mq_ops_no_commit;
+ tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
+ tag_set->queue_depth = shost->can_queue;
+ tag_set->cmd_size = cmd_size;
+ tag_set->numa_node = NUMA_NO_NODE;
+ tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
+ tag_set->flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
- shost->tag_set.driver_data = shost;
+ tag_set->driver_data = shost;
- return blk_mq_alloc_tag_set(&shost->tag_set);
+ return blk_mq_alloc_tag_set(tag_set);
}
void scsi_mq_destroy_tags(struct Scsi_Host *shost)
@@ -1921,21 +1924,13 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(scsi_device_from_queue);
-/*
- * Function: scsi_block_requests()
- *
- * Purpose: Utility function used by low-level drivers to prevent further
- * commands from being queued to the device.
- *
- * Arguments: shost - Host in question
- *
- * Returns: Nothing
- *
- * Lock status: No locks are assumed held.
+/**
+ * scsi_block_requests - Utility function used by low-level drivers to prevent
+ * further commands from being queued to the device.
+ * @shost: host in question
*
- * Notes: There is no timer nor any other means by which the requests
- * get unblocked other than the low-level driver calling
- * scsi_unblock_requests().
+ * There is no timer nor any other means by which the requests get unblocked
+ * other than the low-level driver calling scsi_unblock_requests().
*/
void scsi_block_requests(struct Scsi_Host *shost)
{
@@ -1943,25 +1938,15 @@ void scsi_block_requests(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_block_requests);
-/*
- * Function: scsi_unblock_requests()
- *
- * Purpose: Utility function used by low-level drivers to allow further
- * commands from being queued to the device.
- *
- * Arguments: shost - Host in question
- *
- * Returns: Nothing
- *
- * Lock status: No locks are assumed held.
- *
- * Notes: There is no timer nor any other means by which the requests
- * get unblocked other than the low-level driver calling
- * scsi_unblock_requests().
- *
- * This is done as an API function so that changes to the
- * internals of the scsi mid-layer won't require wholesale
- * changes to drivers that use this feature.
+/**
+ * scsi_unblock_requests - Utility function used by low-level drivers to allow
+ * further commands to be queued to the device.
+ * @shost: host in question
+ *
+ * There is no timer nor any other means by which the requests get unblocked
+ * other than the low-level driver calling scsi_unblock_requests(). This is done
+ * as an API function so that changes to the internals of the scsi mid-layer
+ * won't require wholesale changes to drivers that use this feature.
*/
void scsi_unblock_requests(struct Scsi_Host *shost)
{
@@ -2842,11 +2827,27 @@ scsi_host_block(struct Scsi_Host *shost)
struct scsi_device *sdev;
int ret = 0;
+ /*
+ * Call scsi_internal_device_block_nowait so we can avoid
+ * calling synchronize_rcu() for each LUN.
+ */
shost_for_each_device(sdev, shost) {
- ret = scsi_internal_device_block(sdev);
+ mutex_lock(&sdev->state_mutex);
+ ret = scsi_internal_device_block_nowait(sdev);
+ mutex_unlock(&sdev->state_mutex);
if (ret)
break;
}
+
+ /*
+ * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so
+ * calling synchronize_rcu() once is enough.
+ */
+ WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
+
+ if (!ret)
+ synchronize_rcu();
+
return ret;
}
EXPORT_SYMBOL_GPL(scsi_host_block);
@@ -2859,8 +2860,10 @@ scsi_host_unblock(struct Scsi_Host *shost, int new_state)
shost_for_each_device(sdev, shost) {
ret = scsi_internal_device_unblock(sdev, new_state);
- if (ret)
+ if (ret) {
+ scsi_device_put(sdev);
break;
+ }
}
return ret;
}
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 3717eea37ecb..5f0ad8b32e3a 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -80,6 +80,10 @@ static int scsi_dev_type_resume(struct device *dev,
dev_dbg(dev, "scsi resume: %d\n", err);
if (err == 0) {
+ bool was_runtime_suspended;
+
+ was_runtime_suspended = pm_runtime_suspended(dev);
+
pm_runtime_disable(dev);
err = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -93,8 +97,10 @@ static int scsi_dev_type_resume(struct device *dev,
*/
if (!err && scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
-
- blk_set_runtime_active(sdev->request_queue);
+ if (was_runtime_suspended)
+ blk_post_runtime_resume(sdev->request_queue, 0);
+ else
+ blk_set_runtime_active(sdev->request_queue);
}
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b2a803c51288..f4cc08eb47ba 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1616,6 +1616,12 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
+/*
+ * conn_mutex protects the {start,bind,stop,destroy}_conn from racing
+ * against the kernel stop_connection recovery mechanism
+ */
+static DEFINE_MUTEX(conn_mutex);
+
static LIST_HEAD(sesslist);
static LIST_HEAD(sessdestroylist);
static DEFINE_SPINLOCK(sesslock);
@@ -2445,6 +2451,32 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+/*
+ * This can be called without the rx_queue_mutex, if invoked by the kernel
+ * stop work. But, in that case, it is guaranteed not to race with
+ * iscsi_destroy by conn_mutex.
+ */
+static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
+{
+ /*
+ * It is important that this path doesn't rely on
+ * rx_queue_mutex, otherwise, a thread doing allocation on a
+ * start_session/start_connection could sleep waiting on a
+ * writeback to a failed iscsi device, that cannot be recovered
+ * because the lock is held. If we don't hold it here, the
+ * kernel stop_conn_work_fn has a chance to stop the broken
+ * session and resolve the allocation.
+ *
+ * Still, the user invoked .stop_conn() needs to be serialized
+ * with stop_conn_work_fn by a private mutex. Not pretty, but
+ * it works.
+ */
+ mutex_lock(&conn_mutex);
+ conn->transport->stop_conn(conn, flag);
+ mutex_unlock(&conn_mutex);
+
+}
+
static void stop_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn, *tmp;
@@ -2463,30 +2495,17 @@ static void stop_conn_work_fn(struct work_struct *work)
uint32_t sid = iscsi_conn_get_sid(conn);
struct iscsi_cls_session *session;
- mutex_lock(&rx_queue_mutex);
-
session = iscsi_session_lookup(sid);
if (session) {
if (system_state != SYSTEM_RUNNING) {
session->recovery_tmo = 0;
- conn->transport->stop_conn(conn,
- STOP_CONN_TERM);
+ iscsi_if_stop_conn(conn, STOP_CONN_TERM);
} else {
- conn->transport->stop_conn(conn,
- STOP_CONN_RECOVER);
+ iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
}
}
list_del_init(&conn->conn_list_err);
-
- mutex_unlock(&rx_queue_mutex);
-
- /* we don't want to hold rx_queue_mutex for too long,
- * for instance if many conns failed at the same time,
- * since this stall other iscsi maintenance operations.
- * Give other users a chance to proceed.
- */
- cond_resched();
}
}
@@ -2846,8 +2865,11 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
+
+ mutex_lock(&conn_mutex);
if (transport->destroy_conn)
transport->destroy_conn(conn);
+ mutex_unlock(&conn_mutex);
return 0;
}
@@ -3689,9 +3711,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
break;
}
+ mutex_lock(&conn_mutex);
ev->r.retcode = transport->bind_conn(session, conn,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
+ mutex_unlock(&conn_mutex);
+
if (ev->r.retcode || !transport->ep_connect)
break;
@@ -3713,9 +3738,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_START_CONN:
conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
if (conn) {
+ mutex_lock(&conn_mutex);
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
conn->state = ISCSI_CONN_UP;
+ mutex_unlock(&conn_mutex);
}
else
err = -EINVAL;
@@ -3723,17 +3750,20 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_STOP_CONN:
conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
if (conn)
- transport->stop_conn(conn, ev->u.stop_conn.flag);
+ iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
else
err = -EINVAL;
break;
case ISCSI_UEVENT_SEND_PDU:
conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
- if (conn)
+ if (conn) {
+ mutex_lock(&conn_mutex);
ev->r.retcode = transport->send_pdu(conn,
(struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
(char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
ev->u.send_pdu.data_size);
+ mutex_unlock(&conn_mutex);
+ }
else
err = -EINVAL;
break;
@@ -4728,7 +4758,9 @@ static __init int iscsi_transport_init(void)
goto unregister_flashnode_bus;
}
- iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
+ iscsi_eh_timer_workq = alloc_workqueue("%s",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 2, "iscsi_eh");
if (!iscsi_eh_timer_workq) {
err = -ENOMEM;
goto release_nls;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a793cb08d025..d90fefffe31b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -528,6 +528,21 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(max_write_same_blocks);
+static ssize_t
+zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ if (sdkp->device->type == TYPE_ZBC)
+ return sprintf(buf, "host-managed\n");
+ if (sdkp->zoned == 1)
+ return sprintf(buf, "host-aware\n");
+ if (sdkp->zoned == 2)
+ return sprintf(buf, "drive-managed\n");
+ return sprintf(buf, "none\n");
+}
+static DEVICE_ATTR_RO(zoned_cap);
+
static struct attribute *sd_disk_attrs[] = {
&dev_attr_cache_type.attr,
&dev_attr_FUA.attr,
@@ -541,6 +556,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_zeroing_mode.attr,
&dev_attr_max_write_same_blocks.attr,
&dev_attr_max_medium_access_timeouts.attr,
+ &dev_attr_zoned_cap.attr,
NULL,
};
ATTRIBUTE_GROUPS(sd_disk);
@@ -1206,6 +1222,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
}
}
+ if (req_op(rq) == REQ_OP_ZONE_APPEND) {
+ ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
+ if (ret)
+ return ret;
+ }
+
fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
dix = scsi_prot_sg_count(cmd);
dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
@@ -1287,6 +1309,7 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
return sd_setup_flush_cmnd(cmd);
case REQ_OP_READ:
case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
return sd_setup_read_write_cmnd(cmd);
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
@@ -2055,7 +2078,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
out:
if (sd_is_zoned(sdkp))
- sd_zbc_complete(SCpnt, good_bytes, &sshdr);
+ good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
"sd_done: completed %d of %d bytes\n",
@@ -2955,6 +2978,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
* with partitions as regular block devices.
*/
q->limits.zoned = BLK_ZONED_NONE;
+ if (sdkp->zoned == 2 && sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Drive-managed SMR disk\n");
}
}
if (blk_queue_is_zoned(q) && sdkp->first_scan)
@@ -3372,6 +3398,10 @@ static int sd_probe(struct device *dev)
sdkp->first_scan = 1;
sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
+ error = sd_zbc_init_disk(sdkp);
+ if (error)
+ goto out_free_index;
+
sd_revalidate_disk(gd);
gd->flags = GENHD_FL_EXT_DEVT;
@@ -3409,6 +3439,7 @@ static int sd_probe(struct device *dev)
out_put:
put_disk(gd);
out_free:
+ sd_zbc_release_disk(sdkp);
kfree(sdkp);
out:
scsi_autopm_put_device(sdp);
@@ -3485,6 +3516,8 @@ static void scsi_disk_release(struct device *dev)
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
+ sd_zbc_release_disk(sdkp);
+
kfree(sdkp);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 50fff0bf8c8e..3a74f4b45134 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -79,6 +79,12 @@ struct scsi_disk {
u32 zones_optimal_open;
u32 zones_optimal_nonseq;
u32 zones_max_open;
+ u32 *zones_wp_offset;
+ spinlock_t zones_wp_offset_lock;
+ u32 *rev_wp_offset;
+ struct mutex rev_mutex;
+ struct work_struct zone_wp_offset_work;
+ char *zone_wp_update_buf;
#endif
atomic_t openers;
sector_t capacity; /* size in logical blocks */
@@ -207,17 +213,35 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
+int sd_zbc_init_disk(struct scsi_disk *sdkp);
+void sd_zbc_release_disk(struct scsi_disk *sdkp);
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all);
-extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
- struct scsi_sense_hdr *sshdr);
+unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
+ struct scsi_sense_hdr *sshdr);
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
+blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
+ unsigned int nr_blocks);
+
#else /* CONFIG_BLK_DEV_ZONED */
+static inline int sd_zbc_init(void)
+{
+ return 0;
+}
+
+static inline int sd_zbc_init_disk(struct scsi_disk *sdkp)
+{
+ return 0;
+}
+
+static inline void sd_zbc_exit(void) {}
+static inline void sd_zbc_release_disk(struct scsi_disk *sdkp) {}
+
static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
unsigned char *buf)
{
@@ -233,9 +257,18 @@ static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
return BLK_STS_TARGET;
}
-static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
- unsigned int good_bytes,
- struct scsi_sense_hdr *sshdr) {}
+static inline unsigned int sd_zbc_complete(struct scsi_cmnd *cmd,
+ unsigned int good_bytes, struct scsi_sense_hdr *sshdr)
+{
+ return 0;
+}
+
+static inline blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd,
+ sector_t *lba,
+ unsigned int nr_blocks)
+{
+ return BLK_STS_TARGET;
+}
#define sd_zbc_report_zones NULL
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index f45c22b09726..6f7eba66687e 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -11,6 +11,7 @@
#include <linux/blkdev.h>
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
+#include <linux/mutex.h>
#include <asm/unaligned.h>
@@ -19,11 +20,36 @@
#include "sd.h"
+static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
+{
+ if (zone->type == ZBC_ZONE_TYPE_CONV)
+ return 0;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
+ return zone->wp - zone->start;
+ case BLK_ZONE_COND_FULL:
+ return zone->len;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_OFFLINE:
+ case BLK_ZONE_COND_READONLY:
+ default:
+ /*
+ * Offline and read-only zones do not have a valid
+ * write pointer. Use 0 as for an empty zone.
+ */
+ return 0;
+ }
+}
+
static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
unsigned int idx, report_zones_cb cb, void *data)
{
struct scsi_device *sdp = sdkp->device;
struct blk_zone zone = { 0 };
+ int ret;
zone.type = buf[0] & 0x0f;
zone.cond = (buf[1] >> 4) & 0xf;
@@ -39,7 +65,14 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
zone.cond == ZBC_ZONE_COND_FULL)
zone.wp = zone.start + zone.len;
- return cb(&zone, idx, data);
+ ret = cb(&zone, idx, data);
+ if (ret)
+ return ret;
+
+ if (sdkp->rev_wp_offset)
+ sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone);
+
+ return 0;
}
/**
@@ -136,8 +169,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
while (bufsize >= SECTOR_SIZE) {
buf = __vmalloc(bufsize,
- GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY,
- PAGE_KERNEL);
+ GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY);
if (buf) {
*buflen = bufsize;
return buf;
@@ -209,6 +241,136 @@ out:
return ret;
}
+static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ sector_t sector = blk_rq_pos(rq);
+
+ if (!sd_is_zoned(sdkp))
+ /* Not a zoned device */
+ return BLK_STS_IOERR;
+
+ if (sdkp->device->changed)
+ return BLK_STS_IOERR;
+
+ if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
+ /* Unaligned request */
+ return BLK_STS_IOERR;
+
+ return BLK_STS_OK;
+}
+
+#define SD_ZBC_INVALID_WP_OFST (~0u)
+#define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1)
+
+static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct scsi_disk *sdkp = data;
+
+ lockdep_assert_held(&sdkp->zones_wp_offset_lock);
+
+ sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone);
+
+ return 0;
+}
+
+static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
+{
+ struct scsi_disk *sdkp;
+ unsigned int zno;
+ int ret;
+
+ sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
+
+ spin_lock_bh(&sdkp->zones_wp_offset_lock);
+ for (zno = 0; zno < sdkp->nr_zones; zno++) {
+ if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
+ continue;
+
+ spin_unlock_bh(&sdkp->zones_wp_offset_lock);
+ ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
+ SD_BUF_SIZE,
+ zno * sdkp->zone_blocks, true);
+ spin_lock_bh(&sdkp->zones_wp_offset_lock);
+ if (!ret)
+ sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
+ zno, sd_zbc_update_wp_offset_cb,
+ sdkp);
+ }
+ spin_unlock_bh(&sdkp->zones_wp_offset_lock);
+
+ scsi_device_put(sdkp->device);
+}
+
+/**
+ * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command.
+ * @cmd: the command to setup
+ * @lba: the LBA to patch
+ * @nr_blocks: the number of LBAs to be written
+ *
+ * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND.
+ * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and
+ * patching of the lba for an emulated ZONE_APPEND command.
+ *
+ * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will
+ * schedule a REPORT ZONES command and return BLK_STS_IOERR.
+ */
+blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
+ unsigned int nr_blocks)
+{
+ struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ unsigned int wp_offset, zno = blk_rq_zone_no(rq);
+ blk_status_t ret;
+
+ ret = sd_zbc_cmnd_checks(cmd);
+ if (ret != BLK_STS_OK)
+ return ret;
+
+ if (!blk_rq_zone_is_seq(rq))
+ return BLK_STS_IOERR;
+
+ /* Unlock of the write lock will happen in sd_zbc_complete() */
+ if (!blk_req_zone_write_trylock(rq))
+ return BLK_STS_ZONE_RESOURCE;
+
+ spin_lock_bh(&sdkp->zones_wp_offset_lock);
+ wp_offset = sdkp->zones_wp_offset[zno];
+ switch (wp_offset) {
+ case SD_ZBC_INVALID_WP_OFST:
+ /*
+ * We are about to schedule work to update a zone write pointer
+ * offset, which will cause the zone append command to be
+ * requeued. So make sure that the scsi device does not go away
+ * while the work is being processed.
+ */
+ if (scsi_device_get(sdkp->device)) {
+ ret = BLK_STS_IOERR;
+ break;
+ }
+ sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST;
+ schedule_work(&sdkp->zone_wp_offset_work);
+ fallthrough;
+ case SD_ZBC_UPDATING_WP_OFST:
+ ret = BLK_STS_DEV_RESOURCE;
+ break;
+ default:
+ wp_offset = sectors_to_logical(sdkp->device, wp_offset);
+ if (wp_offset + nr_blocks > sdkp->zone_blocks) {
+ ret = BLK_STS_IOERR;
+ break;
+ }
+
+ *lba += wp_offset;
+ }
+ spin_unlock_bh(&sdkp->zones_wp_offset_lock);
+ if (ret)
+ blk_req_zone_write_unlock(rq);
+ return ret;
+}
+
/**
* sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
* can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
@@ -223,20 +385,14 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all)
{
struct request *rq = cmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t sector = blk_rq_pos(rq);
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t block = sectors_to_logical(sdkp->device, sector);
+ blk_status_t ret;
- if (!sd_is_zoned(sdkp))
- /* Not a zoned device */
- return BLK_STS_IOERR;
-
- if (sdkp->device->changed)
- return BLK_STS_IOERR;
-
- if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
- /* Unaligned request */
- return BLK_STS_IOERR;
+ ret = sd_zbc_cmnd_checks(cmd);
+ if (ret != BLK_STS_OK)
+ return ret;
cmd->cmd_len = 16;
memset(cmd->cmnd, 0, cmd->cmd_len);
@@ -255,16 +411,105 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
return BLK_STS_OK;
}
+static bool sd_zbc_need_zone_wp_update(struct request *rq)
+{
+ switch (req_op(rq)) {
+ case REQ_OP_ZONE_APPEND:
+ case REQ_OP_ZONE_FINISH:
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ return true;
+ case REQ_OP_WRITE:
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_WRITE_SAME:
+ return blk_rq_zone_is_seq(rq);
+ default:
+ return false;
+ }
+}
+
+/**
+ * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion
+ * @cmd: Completed command
+ * @good_bytes: Command reply bytes
+ *
+ * Called from sd_zbc_complete() to handle the update of the cached zone write
+ * pointer value in case an update is needed.
+ */
+static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
+ unsigned int good_bytes)
+{
+ int result = cmd->result;
+ struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ unsigned int zno = blk_rq_zone_no(rq);
+ enum req_opf op = req_op(rq);
+
+ /*
+ * If we got an error for a command that needs updating the write
+ * pointer offset cache, we must mark the zone wp offset entry as
+ * invalid to force an update from disk the next time a zone append
+ * command is issued.
+ */
+ spin_lock_bh(&sdkp->zones_wp_offset_lock);
+
+ if (result && op != REQ_OP_ZONE_RESET_ALL) {
+ if (op == REQ_OP_ZONE_APPEND) {
+ /* Force complete completion (no retry) */
+ good_bytes = 0;
+ scsi_set_resid(cmd, blk_rq_bytes(rq));
+ }
+
+ /*
+ * Force an update of the zone write pointer offset on
+ * the next zone append access.
+ */
+ if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
+ sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST;
+ goto unlock_wp_offset;
+ }
+
+ switch (op) {
+ case REQ_OP_ZONE_APPEND:
+ rq->__sector += sdkp->zones_wp_offset[zno];
+ fallthrough;
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_WRITE_SAME:
+ case REQ_OP_WRITE:
+ if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp))
+ sdkp->zones_wp_offset[zno] +=
+ good_bytes >> SECTOR_SHIFT;
+ break;
+ case REQ_OP_ZONE_RESET:
+ sdkp->zones_wp_offset[zno] = 0;
+ break;
+ case REQ_OP_ZONE_FINISH:
+ sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp);
+ break;
+ case REQ_OP_ZONE_RESET_ALL:
+ memset(sdkp->zones_wp_offset, 0,
+ sdkp->nr_zones * sizeof(unsigned int));
+ break;
+ default:
+ break;
+ }
+
+unlock_wp_offset:
+ spin_unlock_bh(&sdkp->zones_wp_offset_lock);
+
+ return good_bytes;
+}
+
/**
* sd_zbc_complete - ZBC command post processing.
* @cmd: Completed command
* @good_bytes: Command reply bytes
* @sshdr: command sense header
*
- * Called from sd_done(). Process report zones reply and handle reset zone
- * and write commands errors.
+ * Called from sd_done() to handle zone commands errors and updates to the
+ * device queue zone write pointer offset cahce.
*/
-void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
+unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr)
{
int result = cmd->result;
@@ -280,7 +525,13 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
* so be quiet about the error.
*/
rq->rq_flags |= RQF_QUIET;
- }
+ } else if (sd_zbc_need_zone_wp_update(rq))
+ good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes);
+
+ if (req_op(rq) == REQ_OP_ZONE_APPEND)
+ blk_req_zone_write_unlock(rq);
+
+ return good_bytes;
}
/**
@@ -382,11 +633,67 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
return 0;
}
+static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+
+ swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
+}
+
+static int sd_zbc_revalidate_zones(struct scsi_disk *sdkp,
+ u32 zone_blocks,
+ unsigned int nr_zones)
+{
+ struct gendisk *disk = sdkp->disk;
+ int ret = 0;
+
+ /*
+ * Make sure revalidate zones are serialized to ensure exclusive
+ * updates of the scsi disk data.
+ */
+ mutex_lock(&sdkp->rev_mutex);
+
+ /*
+ * Revalidate the disk zones to update the device request queue zone
+ * bitmaps and the zone write pointer offset array. Do this only once
+ * the device capacity is set on the second revalidate execution for
+ * disk scan or if something changed when executing a normal revalidate.
+ */
+ if (sdkp->first_scan) {
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->nr_zones = nr_zones;
+ goto unlock;
+ }
+
+ if (sdkp->zone_blocks == zone_blocks &&
+ sdkp->nr_zones == nr_zones &&
+ disk->queue->nr_zones == nr_zones)
+ goto unlock;
+
+ sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_NOIO);
+ if (!sdkp->rev_wp_offset) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
+
+ kvfree(sdkp->rev_wp_offset);
+ sdkp->rev_wp_offset = NULL;
+
+unlock:
+ mutex_unlock(&sdkp->rev_mutex);
+
+ return ret;
+}
+
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
struct gendisk *disk = sdkp->disk;
+ struct request_queue *q = disk->queue;
unsigned int nr_zones;
u32 zone_blocks = 0;
+ u32 max_append;
int ret;
if (!sd_is_zoned(sdkp))
@@ -407,35 +714,31 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
goto err;
/* The drive satisfies the kernel restrictions: set it up */
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue);
- blk_queue_required_elevator_features(sdkp->disk->queue,
- ELEVATOR_F_ZBD_SEQ_WRITE);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
/* READ16/WRITE16 is mandatory for ZBC disks */
sdkp->device->use_16_for_rw = 1;
sdkp->device->use_10_for_rw = 0;
+ ret = sd_zbc_revalidate_zones(sdkp, zone_blocks, nr_zones);
+ if (ret)
+ goto err;
+
/*
- * Revalidate the disk zone bitmaps once the block device capacity is
- * set on the second revalidate execution during disk scan and if
- * something changed when executing a normal revalidate.
+ * On the first scan 'chunk_sectors' isn't setup yet, so calling
+ * blk_queue_max_zone_append_sectors() will result in a WARN(). Defer
+ * this setting to the second scan.
*/
- if (sdkp->first_scan) {
- sdkp->zone_blocks = zone_blocks;
- sdkp->nr_zones = nr_zones;
+ if (sdkp->first_scan)
return 0;
- }
- if (sdkp->zone_blocks != zone_blocks ||
- sdkp->nr_zones != nr_zones ||
- disk->queue->nr_zones != nr_zones) {
- ret = blk_revalidate_disk_zones(disk);
- if (ret != 0)
- goto err;
- sdkp->zone_blocks = zone_blocks;
- sdkp->nr_zones = nr_zones;
- }
+ max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
+ q->limits.max_segments << (PAGE_SHIFT - 9));
+ max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
+
+ blk_queue_max_zone_append_sectors(q, max_append);
return 0;
@@ -461,3 +764,28 @@ void sd_zbc_print_zones(struct scsi_disk *sdkp)
sdkp->nr_zones,
sdkp->zone_blocks);
}
+
+int sd_zbc_init_disk(struct scsi_disk *sdkp)
+{
+ if (!sd_is_zoned(sdkp))
+ return 0;
+
+ sdkp->zones_wp_offset = NULL;
+ spin_lock_init(&sdkp->zones_wp_offset_lock);
+ sdkp->rev_wp_offset = NULL;
+ mutex_init(&sdkp->rev_mutex);
+ INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn);
+ sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL);
+ if (!sdkp->zone_wp_update_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void sd_zbc_release_disk(struct scsi_disk *sdkp)
+{
+ kvfree(sdkp->zones_wp_offset);
+ sdkp->zones_wp_offset = NULL;
+ kfree(sdkp->zone_wp_update_buf);
+ sdkp->zone_wp_update_buf = NULL;
+}
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 713bce998b0e..3bdf0deb8f15 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -187,7 +187,7 @@ static inline void init_hpc_chain(struct ip22_hostdata *hdata)
hcp++;
dma += sizeof(struct hpc_chunk);
start += sizeof(struct hpc_chunk);
- };
+ }
hcp--;
hcp->desc.pnext = hdata->dma;
}
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index de0ab5fc8474..f4c666285bba 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -399,7 +399,7 @@ void snic_handle_link_event(struct snic *);
void snic_handle_link(struct work_struct *);
int snic_queue_exch_ver_req(struct snic *);
-int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
+void snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len);
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index 449b03f3bbd3..4cd86115cfb2 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -151,7 +151,7 @@ error:
/*
* snic_io_exch_ver_cmpl_handler
*/
-int
+void
snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
struct snic_req_info *rqi = NULL;
@@ -160,7 +160,6 @@ snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
u32 cmnd_id, hid, max_sgs;
ulong ctx = 0;
unsigned long flags;
- int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
@@ -224,8 +223,6 @@ exch_cmpl_end:
snic_release_untagged_req(snic, rqi);
SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
-
- return ret;
} /* end of snic_io_exch_ver_cmpl_handler */
/*
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d2fe3fa470f9..4dcd735ea49e 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -51,6 +51,8 @@
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
@@ -344,10 +346,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
case ILLEGAL_REQUEST:
if (!(SCpnt->sense_buffer[0] & 0x90))
break;
- error_sector = (SCpnt->sense_buffer[3] << 24) |
- (SCpnt->sense_buffer[4] << 16) |
- (SCpnt->sense_buffer[5] << 8) |
- SCpnt->sense_buffer[6];
+ error_sector =
+ get_unaligned_be32(&SCpnt->sense_buffer[3]);
if (SCpnt->request->bio != NULL)
block_sectors =
bio_sectors(SCpnt->request->bio);
@@ -495,13 +495,9 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
SCpnt->sdb.length = this_count * s_size;
}
- SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
- SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
- SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
- SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+ put_unaligned_be32(block, &SCpnt->cmnd[2]);
SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
- SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
- SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+ put_unaligned_be16(this_count, &SCpnt->cmnd[7]);
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -794,9 +790,8 @@ static int sr_probe(struct device *dev)
set_capacity(disk, cd->capacity);
disk->private_data = &cd->driver;
disk->queue = sdev->request_queue;
- cd->cdi.disk = disk;
- if (register_cdrom(&cd->cdi))
+ if (register_cdrom(disk, &cd->cdi))
goto fail_put;
/*
@@ -854,8 +849,7 @@ static void get_sectorsize(struct scsi_cd *cd)
} else {
long last_written;
- cd->capacity = 1 + ((buffer[0] << 24) | (buffer[1] << 16) |
- (buffer[2] << 8) | buffer[3]);
+ cd->capacity = 1 + get_unaligned_be32(&buffer[0]);
/*
* READ_CAPACITY doesn't return the correct size on
* certain UDF media. If last_written is larger, use
@@ -866,8 +860,7 @@ static void get_sectorsize(struct scsi_cd *cd)
if (!cdrom_get_last_written(&cd->cdi, &last_written))
cd->capacity = max_t(long, cd->capacity, last_written);
- sector_size = (buffer[4] << 24) |
- (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+ sector_size = get_unaligned_be32(&buffer[4]);
switch (sector_size) {
/*
* HP 4020i CD-Recorder reports 2340 byte sectors
@@ -955,13 +948,13 @@ static void get_capabilities(struct scsi_cd *cd)
}
n = data.header_length + data.block_descriptor_length;
- cd->cdi.speed = ((buffer[n + 8] << 8) + buffer[n + 9]) / 176;
+ cd->cdi.speed = get_unaligned_be16(&buffer[n + 8]) / 176;
cd->readcd_known = 1;
cd->readcd_cdda = buffer[n + 5] & 0x01;
/* print some capability bits */
sr_printk(KERN_INFO, cd,
"scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n",
- ((buffer[n + 14] << 8) + buffer[n + 15]) / 176,
+ get_unaligned_be16(&buffer[n + 14]) / 176,
cd->cdi.speed,
buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */
buffer[n + 3] & 0x20 ? "dvd-ram " : "",
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c5f9b348b438..4bf4ab3b70f4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1457,7 +1457,6 @@ static int st_flush(struct file *filp, fl_owner_t id)
accessing this tape. */
static int st_release(struct inode *inode, struct file *filp)
{
- int result = 0;
struct scsi_tape *STp = filp->private_data;
if (STp->door_locked == ST_LOCKED_AUTO)
@@ -1470,9 +1469,9 @@ static int st_release(struct inode *inode, struct file *filp)
scsi_autopm_put_device(STp->device);
scsi_tape_put(STp);
- return result;
+ return 0;
}
-
+
/* The checks common to both reading and writing */
static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count)
{
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fb41636519ee..072ed8728657 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -621,6 +621,64 @@ get_in_err:
}
+static void storvsc_change_target_cpu(struct vmbus_channel *channel, u32 old,
+ u32 new)
+{
+ struct storvsc_device *stor_device;
+ struct vmbus_channel *cur_chn;
+ bool old_is_alloced = false;
+ struct hv_device *device;
+ unsigned long flags;
+ int cpu;
+
+ device = channel->primary_channel ?
+ channel->primary_channel->device_obj
+ : channel->device_obj;
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return;
+
+ /* See storvsc_do_io() -> get_og_chn(). */
+ spin_lock_irqsave(&device->channel->lock, flags);
+
+ /*
+ * Determines if the storvsc device has other channels assigned to
+ * the "old" CPU to update the alloced_cpus mask and the stor_chns
+ * array.
+ */
+ if (device->channel != channel && device->channel->target_cpu == old) {
+ cur_chn = device->channel;
+ old_is_alloced = true;
+ goto old_is_alloced;
+ }
+ list_for_each_entry(cur_chn, &device->channel->sc_list, sc_list) {
+ if (cur_chn == channel)
+ continue;
+ if (cur_chn->target_cpu == old) {
+ old_is_alloced = true;
+ goto old_is_alloced;
+ }
+ }
+
+old_is_alloced:
+ if (old_is_alloced)
+ WRITE_ONCE(stor_device->stor_chns[old], cur_chn);
+ else
+ cpumask_clear_cpu(old, &stor_device->alloced_cpus);
+
+ /* "Flush" the stor_chns array. */
+ for_each_possible_cpu(cpu) {
+ if (stor_device->stor_chns[cpu] && !cpumask_test_cpu(
+ cpu, &stor_device->alloced_cpus))
+ WRITE_ONCE(stor_device->stor_chns[cpu], NULL);
+ }
+
+ WRITE_ONCE(stor_device->stor_chns[new], channel);
+ cpumask_set_cpu(new, &stor_device->alloced_cpus);
+
+ spin_unlock_irqrestore(&device->channel->lock, flags);
+}
+
static void handle_sc_creation(struct vmbus_channel *new_sc)
{
struct hv_device *device = new_sc->primary_channel->device_obj;
@@ -648,6 +706,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
return;
}
+ new_sc->change_target_cpu_callback = storvsc_change_target_cpu;
+
/* Add the sub-channel to the array of available channels. */
stor_device->stor_chns[new_sc->target_cpu] = new_sc;
cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
@@ -876,6 +936,8 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
if (stor_device->stor_chns == NULL)
return -ENOMEM;
+ device->channel->change_target_cpu_callback = storvsc_change_target_cpu;
+
stor_device->stor_chns[device->channel->target_cpu] = device->channel;
cpumask_set_cpu(device->channel->target_cpu,
&stor_device->alloced_cpus);
@@ -1248,8 +1310,10 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
const struct cpumask *node_mask;
int num_channels, tgt_cpu;
- if (stor_device->num_sc == 0)
+ if (stor_device->num_sc == 0) {
+ stor_device->stor_chns[q_num] = stor_device->device->channel;
return stor_device->device->channel;
+ }
/*
* Our channel array is sparsley populated and we
@@ -1258,7 +1322,6 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
* The strategy is simple:
* I. Ensure NUMA locality
* II. Distribute evenly (best effort)
- * III. Mapping is persistent.
*/
node_mask = cpumask_of_node(cpu_to_node(q_num));
@@ -1268,8 +1331,10 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
if (cpumask_test_cpu(tgt_cpu, node_mask))
num_channels++;
}
- if (num_channels == 0)
+ if (num_channels == 0) {
+ stor_device->stor_chns[q_num] = stor_device->device->channel;
return stor_device->device->channel;
+ }
hash_qnum = q_num;
while (hash_qnum >= num_channels)
@@ -1295,6 +1360,7 @@ static int storvsc_do_io(struct hv_device *device,
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
struct vmbus_channel *outgoing_channel, *channel;
+ unsigned long flags;
int ret = 0;
const struct cpumask *node_mask;
int tgt_cpu;
@@ -1308,10 +1374,11 @@ static int storvsc_do_io(struct hv_device *device,
request->device = device;
/*
- * Select an an appropriate channel to send the request out.
+ * Select an appropriate channel to send the request out.
*/
- if (stor_device->stor_chns[q_num] != NULL) {
- outgoing_channel = stor_device->stor_chns[q_num];
+ /* See storvsc_change_target_cpu(). */
+ outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
+ if (outgoing_channel != NULL) {
if (outgoing_channel->target_cpu == q_num) {
/*
* Ideally, we want to pick a different channel if
@@ -1324,7 +1391,10 @@ static int storvsc_do_io(struct hv_device *device,
continue;
if (tgt_cpu == q_num)
continue;
- channel = stor_device->stor_chns[tgt_cpu];
+ channel = READ_ONCE(
+ stor_device->stor_chns[tgt_cpu]);
+ if (channel == NULL)
+ continue;
if (hv_get_avail_to_write_percent(
&channel->outbound)
> ring_avail_percent_lowater) {
@@ -1350,7 +1420,10 @@ static int storvsc_do_io(struct hv_device *device,
for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
if (cpumask_test_cpu(tgt_cpu, node_mask))
continue;
- channel = stor_device->stor_chns[tgt_cpu];
+ channel = READ_ONCE(
+ stor_device->stor_chns[tgt_cpu]);
+ if (channel == NULL)
+ continue;
if (hv_get_avail_to_write_percent(
&channel->outbound)
> ring_avail_percent_lowater) {
@@ -1360,7 +1433,14 @@ static int storvsc_do_io(struct hv_device *device,
}
}
} else {
+ spin_lock_irqsave(&device->channel->lock, flags);
+ outgoing_channel = stor_device->stor_chns[q_num];
+ if (outgoing_channel != NULL) {
+ spin_unlock_irqrestore(&device->channel->lock, flags);
+ goto found_channel;
+ }
outgoing_channel = get_og_chn(stor_device, q_num);
+ spin_unlock_irqrestore(&device->channel->lock, flags);
}
found_channel:
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
index 5216d228cdd9..46bb905b4d6a 100644
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -32,14 +32,14 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
- return ret;
+ goto disable_pm;
}
/* Select MPHY refclk frequency */
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "Cannot claim MPHY clock.\n");
- return PTR_ERR(clk);
+ goto clk_err;
}
clk_rate = clk_get_rate(clk);
if (clk_rate == 26000000)
@@ -54,16 +54,23 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
dev);
if (ret) {
dev_err(dev, "failed to populate child nodes %d\n", ret);
- pm_runtime_put_sync(dev);
+ goto clk_err;
}
return ret;
+
+clk_err:
+ pm_runtime_put_sync(dev);
+disable_pm:
+ pm_runtime_disable(dev);
+ return ret;
}
static int ti_j721e_ufs_remove(struct platform_device *pdev)
{
of_platform_depopulate(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 673c16596fb2..d56ce8d97d4e 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -30,6 +30,12 @@
#define ufs_mtk_device_reset_ctrl(high, res) \
ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
+static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
+ UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
+ END_FIX
+};
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -73,9 +79,9 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
if (status == PRE_CHANGE) {
if (host->unipro_lpm)
- hba->hba_enable_delay_us = 0;
+ hba->vps->hba_enable_delay_us = 0;
else
- hba->hba_enable_delay_us = 600;
+ hba->vps->hba_enable_delay_us = 600;
}
return 0;
@@ -263,6 +269,10 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clock-gating */
hba->caps |= UFSHCD_CAP_CLK_GATING;
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
+ hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -555,10 +565,8 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
- if (mid == UFS_VENDOR_SAMSUNG) {
- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+ if (mid == UFS_VENDOR_SAMSUNG)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
- }
/*
* Decide waiting time before gating reference clock and
@@ -575,6 +583,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
return 0;
}
+static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 mid = dev_info->wmanufacturerid;
+
+ ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+ if (mid == UFS_VENDOR_SAMSUNG)
+ hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+}
+
/**
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
@@ -589,6 +608,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
.apply_dev_quirks = ufs_mtk_apply_dev_quirks,
+ .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
.suspend = ufs_mtk_suspend,
.resume = ufs_mtk_resume,
.dbg_register_dump = ufs_mtk_dbg_register_dump,
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 19aa5c44e0da..2e6ddb5cdfc2 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -572,7 +572,6 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
- int ret = 0;
if (ufs_qcom_is_link_off(hba)) {
/*
@@ -587,7 +586,7 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufs_qcom_disable_lane_clks(host);
}
- return ret;
+ return 0;
}
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
@@ -1071,6 +1070,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+ hba->caps |= UFSHCD_CAP_WB_EN;
if (host->hw_ver.major >= 0x2) {
host->caps = UFS_QCOM_CAP_QUNIPRO |
@@ -1658,11 +1658,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
/* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_testbus_read(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_print_unipro_testbus(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
}
/**
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 92a63eebdca9..2d71d232a69d 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -276,6 +276,10 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
+UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
+UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
+UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
+UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_device_type.attr,
@@ -304,6 +308,10 @@ static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_number_of_secure_wpa.attr,
&dev_attr_psa_max_data_size.attr,
&dev_attr_psa_state_timeout.attr,
+ &dev_attr_ext_feature_sup.attr,
+ &dev_attr_wb_presv_us_en.attr,
+ &dev_attr_wb_type.attr,
+ &dev_attr_wb_shared_alloc_units.attr,
NULL,
};
@@ -373,6 +381,12 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
_ENM4_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
_ENM4_CAP_ADJ_FCTR, 2);
+UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
+UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
+
static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_raw_device_capacity.attr,
@@ -404,6 +418,11 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
&dev_attr_enh4_memory_max_alloc_units.attr,
&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
+ &dev_attr_wb_max_alloc_units.attr,
+ &dev_attr_wb_max_wb_luns.attr,
+ &dev_attr_wb_buff_cap_adj.attr,
+ &dev_attr_wb_sup_red_type.attr,
+ &dev_attr_wb_sup_wb_type.attr,
NULL,
};
@@ -603,20 +622,29 @@ static const struct attribute_group ufs_sysfs_string_descriptors_group = {
.attrs = ufs_sysfs_string_descriptors,
};
+static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
+{
+ return ((idn >= QUERY_FLAG_IDN_WB_EN) &&
+ (idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8));
+}
+
#define UFS_FLAG(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
bool flag; \
+ u8 index = 0; \
int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
+ if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
+ index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
- QUERY_FLAG_IDN##_uname, &flag); \
+ QUERY_FLAG_IDN##_uname, index, &flag); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
- return sprintf(buf, "%s\n", flag ? "true" : "false"); \
+ return sprintf(buf, "%s\n", flag ? "true" : "false"); \
} \
static DEVICE_ATTR_RO(_name)
@@ -628,6 +656,9 @@ UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
UFS_FLAG(busy_rtc, _BUSY_RTC);
UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
+UFS_FLAG(wb_enable, _WB_EN);
+UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
+UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_device_init.attr,
@@ -638,6 +669,9 @@ static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_phy_resource_removal.attr,
&dev_attr_busy_rtc.attr,
&dev_attr_disable_fw_update.attr,
+ &dev_attr_wb_enable.attr,
+ &dev_attr_wb_flush_en.attr,
+ &dev_attr_wb_flush_during_h8.attr,
NULL,
};
@@ -646,6 +680,12 @@ static const struct attribute_group ufs_sysfs_flags_group = {
.attrs = ufs_sysfs_device_flags,
};
+static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
+{
+ return ((idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS) &&
+ (idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE));
+}
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -653,9 +693,12 @@ static ssize_t _name##_show(struct device *dev, \
struct ufs_hba *hba = dev_get_drvdata(dev); \
u32 value; \
int ret; \
+ u8 index = 0; \
+ if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
+ index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
- QUERY_ATTR_IDN##_uname, 0, 0, &value); \
+ QUERY_ATTR_IDN##_uname, index, 0, &value); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
@@ -679,6 +722,11 @@ UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
UFS_ATTRIBUTE(psa_state, _PSA_STATE);
UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
+UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
+UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
+UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
+UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
+
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
@@ -697,6 +745,10 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_ffu_status.attr,
&dev_attr_psa_state.attr,
&dev_attr_psa_data_size.attr,
+ &dev_attr_wb_flush_status.attr,
+ &dev_attr_wb_avail_buf.attr,
+ &dev_attr_wb_life_time_est.attr,
+ &dev_attr_wb_cur_buf.attr,
NULL,
};
@@ -748,6 +800,8 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
+UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
+
static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_boot_lun_id.attr,
@@ -763,6 +817,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_physical_memory_resourse_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
+ &dev_attr_wb_buf_alloc_units.attr,
NULL,
};
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 990cb48e2403..c70845d41449 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -64,6 +64,9 @@
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
+/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
+#define UFS_UPIU_MAX_WB_LUN_ID 8
+
/* Well known logical unit id in LUN field of UPIU */
enum {
UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
@@ -140,6 +143,9 @@ enum flag_idn {
QUERY_FLAG_IDN_BUSY_RTC = 0x09,
QUERY_FLAG_IDN_RESERVED3 = 0x0A,
QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+ QUERY_FLAG_IDN_WB_EN = 0x0E,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
};
/* Attribute idn for Query requests */
@@ -168,6 +174,10 @@ enum attr_idn {
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
+ QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
};
/* Descriptor idn for Query requests */
@@ -191,9 +201,9 @@ enum desc_header_offset {
};
enum ufs_desc_def_size {
- QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x59,
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
- QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x2D,
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
QUERY_DESC_POWER_DEF_SIZE = 0x62,
@@ -219,6 +229,7 @@ enum unit_desc_param {
UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
};
/* Device descriptor parameters offsets in bytes*/
@@ -258,6 +269,10 @@ enum device_desc_param {
DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
DEVICE_DESC_PARAM_PSA_TMT = 0x29,
DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
+ DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
+ DEVICE_DESC_PARAM_WB_TYPE = 0x54,
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
};
/* Interconnect descriptor parameters offsets in bytes*/
@@ -302,6 +317,11 @@ enum geometry_desc_param {
GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
+ GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
+ GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
+ GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
+ GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
};
/* Health descriptor parameters offsets in bytes*/
@@ -313,6 +333,12 @@ enum health_desc_param {
HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
};
+/* WriteBooster buffer mode */
+enum {
+ WB_BUF_MODE_LU_DEDICATED = 0x0,
+ WB_BUF_MODE_SHARED = 0x1,
+};
+
/*
* Logical Unit Write Protect
* 00h: LU not write protected
@@ -333,6 +359,11 @@ enum {
UFSHCD_AMP = 3,
};
+/* Possible values for dExtendedUFSFeaturesSupport */
+enum {
+ UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
+};
+
#define POWER_DESC_MAX_SIZE 0x62
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
@@ -447,6 +478,8 @@ enum ufs_dev_pwr_mode {
UFS_POWERDOWN_PWR_MODE = 3,
};
+#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
+
/**
* struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
@@ -532,11 +565,17 @@ struct ufs_dev_info {
bool is_lu_power_on_wp;
/* Maximum number of general LU supported by the UFS device */
u8 max_lu_supported;
+ u8 wb_dedicated_lu;
u16 wmanufacturerid;
/*UFS device Product Name */
u8 *model;
u16 wspecversion;
u32 clk_gating_wait_us;
+ u32 d_ext_ufs_feature_sup;
+ u8 b_wb_buffer_type;
+ u32 d_wb_alloc_units;
+ bool b_rpm_dev_flush_capable;
+ u8 b_presrv_uspc_en;
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index df7a1e6805a3..e3175a63c676 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -101,4 +101,11 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
+/*
+ * Some pre-3.1 UFS devices can support extended features by upgrading
+ * the firmware. Enable this quirk to make UFS core driver probe and enable
+ * supported features on such devices.
+ */
+#define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 698e8d20b4ba..5db18f444ea9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -48,6 +48,8 @@
#include "unipro.h"
#include "ufs-sysfs.h"
#include "ufs_bsg.h"
+#include <asm/unaligned.h>
+#include <linux/blkdev.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -92,6 +94,9 @@
/* default delay of autosuspend: 2000 ms */
#define RPM_AUTOSUSPEND_DELAY_MS 2000
+/* Default delay of RPM device flush delayed work */
+#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
+
/* Default value of wait time before gating device ref clock */
#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
@@ -251,6 +256,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
return tag >= 0 && tag < hba->nutrs;
@@ -272,6 +283,25 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
+static inline void ufshcd_wb_config(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return;
+
+ ret = ufshcd_wb_ctrl(hba, true);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
+ else
+ dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
+ ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
+ if (ret)
+ dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
+ __func__, ret);
+ ufshcd_wb_toggle_flush(hba, true);
+}
+
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
{
if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
@@ -535,21 +565,21 @@ void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
}
EXPORT_SYMBOL_GPL(ufshcd_delay_us);
-/*
+/**
* ufshcd_wait_for_register - wait for register value to change
- * @hba - per-adapter interface
- * @reg - mmio register offset
- * @mask - mask to apply to read register value
- * @val - wait condition
- * @interval_us - polling interval in microsecs
- * @timeout_ms - timeout in millisecs
- * @can_sleep - perform sleep or just spin
+ * @hba: per-adapter interface
+ * @reg: mmio register offset
+ * @mask: mask to apply to the read register value
+ * @val: value to wait for
+ * @interval_us: polling interval in microseconds
+ * @timeout_ms: timeout in milliseconds
*
- * Returns -ETIMEDOUT on error, zero on success
+ * Return:
+ * -ETIMEDOUT on error, zero on success.
*/
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep)
+ unsigned long timeout_ms)
{
int err = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
@@ -558,10 +588,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
val = val & mask;
while ((ufshcd_readl(hba, reg) & mask) != val) {
- if (can_sleep)
- usleep_range(interval_us, interval_us + 50);
- else
- udelay(interval_us);
+ usleep_range(interval_us, interval_us + 50);
if (time_after(jiffies, timeout)) {
if ((ufshcd_readl(hba, reg) & mask) != val)
err = -ETIMEDOUT;
@@ -1150,10 +1177,17 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
- if (ret)
+ if (ret) {
ufshcd_scale_clks(hba, false);
+ goto out_unprepare;
+ }
}
+ /* Enable Write Booster if we have scaled up else disable it */
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_wb_ctrl(hba, scale_up);
+ down_write(&hba->clk_scaling_lock);
+
out_unprepare:
ufshcd_clock_scaling_unprepare(hba);
out:
@@ -1319,23 +1353,6 @@ start_window:
return 0;
}
-static struct devfreq_dev_profile ufs_devfreq_profile = {
- .polling_ms = 100,
- .target = ufshcd_devfreq_target,
- .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
-
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
-static struct devfreq_simple_ondemand_data ufs_ondemand_data = {
- .upthreshold = 70,
- .downdifferential = 5,
-};
-
-static void *gov_data = &ufs_ondemand_data;
-#else
-static void *gov_data; /* NULL */
-#endif
-
static int ufshcd_devfreq_init(struct ufs_hba *hba)
{
struct list_head *clk_list = &hba->clk_list_head;
@@ -1351,12 +1368,12 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
dev_pm_opp_add(hba->dev, clki->min_freq, 0);
dev_pm_opp_add(hba->dev, clki->max_freq, 0);
- ufshcd_vops_config_scaling_param(hba, &ufs_devfreq_profile,
- gov_data);
+ ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
+ &hba->vps->ondemand_data);
devfreq = devfreq_add_device(hba->dev,
- &ufs_devfreq_profile,
+ &hba->vps->devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
- gov_data);
+ &hba->vps->ondemand_data);
if (IS_ERR(devfreq)) {
ret = PTR_ERR(devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
@@ -2560,7 +2577,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
*/
err = ufshcd_wait_for_register(hba,
REG_UTP_TRANSFER_REQ_DOOR_BELL,
- mask, ~mask, 1000, 1000, true);
+ mask, ~mask, 1000, 1000);
return err;
}
@@ -2747,13 +2764,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
}
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
- enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+ enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
int ret;
int retries;
for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
- ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
if (ret)
dev_dbg(hba->dev,
"%s: failed with error %d, retries %d\n",
@@ -2774,16 +2791,17 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @hba: per-adapter instance
* @opcode: flag query to perform
* @idn: flag idn to access
+ * @index: flag index to access
* @flag_res: the flag value after the query request completes
*
* Returns 0 for success, non-zero in case of failure
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res)
+ enum flag_idn idn, u8 index, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
- int err, index = 0, selector = 0;
+ int err, selector = 0;
int timeout = QUERY_REQ_TIMEOUT;
BUG_ON(!hba);
@@ -3223,7 +3241,7 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba,
struct uc_string_id {
u8 len;
u8 type;
- wchar_t uc[0];
+ wchar_t uc[];
} __packed;
/* replace non-printable or non-ASCII characters with spaces */
@@ -4137,10 +4155,10 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
int i;
int err;
- bool flag_res = 1;
+ bool flag_res = true;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
if (err) {
dev_err(hba->dev,
"%s setting fDeviceInit flag failed with error %d\n",
@@ -4151,7 +4169,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
/* poll for max. 1000 iterations for fDeviceInit flag to clear */
for (i = 0; i < 1000 && !err && flag_res; i++)
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
if (err)
dev_err(hba->dev,
@@ -4229,16 +4247,23 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
/**
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
- * @can_sleep: perform sleep or just spin
*/
-static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
{
+ unsigned long flags;
int err;
+ /*
+ * Obtain the host lock to prevent that the controller is disabled
+ * while the UFS interrupt handler is active on another CPU.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
- 10, 1, can_sleep);
+ 10, 1);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
@@ -4259,7 +4284,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
/* UniPro link is disabled at this point */
ufshcd_set_link_off(hba);
@@ -4279,7 +4304,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
* instruction might be read back.
* This delay can be changed based on the controller.
*/
- ufshcd_delay_us(hba->hba_enable_delay_us, 100);
+ ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
/* wait for the host controller to complete initialization */
retry = 50;
@@ -4966,7 +4991,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
goto out;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to enable bkops %d\n",
__func__, err);
@@ -5016,7 +5041,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to disable bkops %d\n",
__func__, err);
@@ -5161,6 +5186,190 @@ out:
__func__, err);
}
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+{
+ int ret;
+ u8 index;
+ enum query_opcode opcode;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return 0;
+
+ if (!(enable ^ hba->wb_enabled))
+ return 0;
+ if (enable)
+ opcode = UPIU_QUERY_OPCODE_SET_FLAG;
+ else
+ opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, opcode,
+ QUERY_FLAG_IDN_WB_EN, index, NULL);
+ if (ret) {
+ dev_err(hba->dev, "%s write booster %s failed %d\n",
+ __func__, enable ? "enable" : "disable", ret);
+ return ret;
+ }
+
+ hba->wb_enabled = enable;
+ dev_dbg(hba->dev, "%s write booster %s %d\n",
+ __func__, enable ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+{
+ int val;
+ u8 index;
+
+ if (set)
+ val = UPIU_QUERY_OPCODE_SET_FLAG;
+ else
+ val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+ index = ufshcd_wb_get_query_index(hba);
+ return ufshcd_query_flag_retry(hba, val,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
+ index, NULL);
+}
+
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+{
+ if (enable)
+ ufshcd_wb_buf_flush_enable(hba);
+ else
+ ufshcd_wb_buf_flush_disable(hba);
+
+}
+
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
+{
+ int ret;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
+ return 0;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
+ index, NULL);
+ if (ret)
+ dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
+ __func__, ret);
+ else
+ hba->wb_buf_flush_enabled = true;
+
+ dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
+ return ret;
+}
+
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
+{
+ int ret;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
+ return 0;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
+ index, NULL);
+ if (ret) {
+ dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
+ __func__, ret);
+ } else {
+ hba->wb_buf_flush_enabled = false;
+ dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+ u32 avail_buf)
+{
+ u32 cur_buf;
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
+ index, 0, &cur_buf);
+ if (ret) {
+ dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (!cur_buf) {
+ dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
+ cur_buf);
+ return false;
+ }
+ /* Let it continue to flush when available buffer exceeds threshold */
+ if (avail_buf < hba->vps->wb_flush_threshold)
+ return true;
+
+ return false;
+}
+
+static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
+{
+ int ret;
+ u32 avail_buf;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return false;
+ /*
+ * The ufs device needs the vcc to be ON to flush.
+ * With user-space reduction enabled, it's enough to enable flush
+ * by checking only the available buffer. The threshold
+ * defined here is > 90% full.
+ * With user-space preserved enabled, the current-buffer
+ * should be checked too because the wb buffer size can reduce
+ * when disk tends to be full. This info is provided by current
+ * buffer (dCurrentWriteBoosterBufferSize). There's no point in
+ * keeping vcc on when current buffer is empty.
+ */
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
+ index, 0, &avail_buf);
+ if (ret) {
+ dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (!hba->dev_info.b_presrv_uspc_en) {
+ if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
+ return true;
+ return false;
+ }
+
+ return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+}
+
+static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(to_delayed_work(work),
+ struct ufs_hba,
+ rpm_dev_flush_recheck_work);
+ /*
+ * To prevent unnecessary VCC power drain after device finishes
+ * WriteBooster buffer flush or Auto BKOPs, force runtime resume
+ * after a certain delay to recheck the threshold by next runtime
+ * suspend.
+ */
+ pm_runtime_get_sync(hba->dev);
+ pm_runtime_put_sync(hba->dev);
+}
+
/**
* ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data
@@ -5723,7 +5932,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL,
- mask, 0, 1000, 1000, true);
+ mask, 0, 1000, 1000);
out:
return err;
}
@@ -6299,8 +6508,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
+ ufshcd_hba_stop(hba);
+
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_hba_stop(hba, false);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
@@ -6603,6 +6813,93 @@ out:
return ret;
}
+static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u8 lun;
+ u32 d_lu_wb_buf_alloc;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return;
+
+ if (hba->desc_size.dev_desc < DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
+ goto wb_disabled;
+
+ hba->dev_info.d_ext_ufs_feature_sup =
+ get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+
+ if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
+ goto wb_disabled;
+
+ /*
+ * WB may be supported but not configured while provisioning.
+ * The spec says, in dedicated wb buffer mode,
+ * a max of 1 lun would have wb buffer configured.
+ * Now only shared buffer mode is supported.
+ */
+ hba->dev_info.b_wb_buffer_type =
+ desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+
+ hba->dev_info.b_presrv_uspc_en =
+ desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
+
+ if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_SHARED) {
+ hba->dev_info.d_wb_alloc_units =
+ get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
+ if (!hba->dev_info.d_wb_alloc_units)
+ goto wb_disabled;
+ } else {
+ for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
+ d_lu_wb_buf_alloc = 0;
+ ufshcd_read_unit_desc_param(hba,
+ lun,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
+ (u8 *)&d_lu_wb_buf_alloc,
+ sizeof(d_lu_wb_buf_alloc));
+ if (d_lu_wb_buf_alloc) {
+ hba->dev_info.wb_dedicated_lu = lun;
+ break;
+ }
+ }
+
+ if (!d_lu_wb_buf_alloc)
+ goto wb_disabled;
+ }
+ return;
+
+wb_disabled:
+ hba->caps &= ~UFSHCD_CAP_WB_EN;
+}
+
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
+{
+ struct ufs_dev_fix *f;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ if (!fixups)
+ return;
+
+ for (f = fixups; f->quirk; f++) {
+ if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
+ f->wmanufacturerid == UFS_ANY_VENDOR) &&
+ ((dev_info->model &&
+ STR_PRFX_EQUAL(f->model, dev_info->model)) ||
+ !strcmp(f->model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+}
+EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
+
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
+{
+ /* fix by general quirk table */
+ ufshcd_fixup_dev_quirks(hba, ufs_fixups);
+
+ /* allow vendors to fix quirks */
+ ufshcd_vops_fixup_dev_quirks(hba);
+}
+
static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
@@ -6639,6 +6936,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
@@ -6647,6 +6945,17 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ ufs_fixup_device_setup(hba);
+
+ /*
+ * Probe WB only for UFS-3.1 devices or UFS devices with quirk
+ * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled
+ */
+ if (dev_info->wspecversion >= 0x310 ||
+ dev_info->wspecversion == 0x220 ||
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))
+ ufshcd_wb_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -6666,21 +6975,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
dev_info->model = NULL;
}
-static void ufs_fixup_device_setup(struct ufs_hba *hba)
-{
- struct ufs_dev_fix *f;
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- for (f = ufs_fixups; f->quirk; f++) {
- if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
- f->wmanufacturerid == UFS_ANY_VENDOR) &&
- ((dev_info->model &&
- STR_PRFX_EQUAL(f->model, dev_info->model)) ||
- !strcmp(f->model, UFS_ANY_MODEL)))
- hba->dev_quirks |= f->quirk;
- }
-}
-
/**
* ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
* @hba: per-adapter instance
@@ -6996,9 +7290,6 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
bool flag;
int ret;
- /* Clear any previous UFS device information */
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
-
/* Init check for device descriptor sizes */
ufshcd_init_desc_sizes(hba);
@@ -7017,10 +7308,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
ufshcd_get_ref_clk_gating_wait(hba);
- ufs_fixup_device_setup(hba);
-
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
hba->dev_info.f_power_on_wp_en = flag;
/* Probe maximum power mode co-supported by both UFS host and device */
@@ -7149,6 +7438,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ ufshcd_wb_config(hba);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
@@ -7195,6 +7485,16 @@ static const struct attribute_group *ufshcd_driver_groups[] = {
NULL,
};
+static struct ufs_hba_variant_params ufs_hba_vps = {
+ .hba_enable_delay_us = 1000,
+ .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
+ .devfreq_profile.polling_ms = 100,
+ .devfreq_profile.target = ufshcd_devfreq_target,
+ .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
+ .ondemand_data.upthreshold = 70,
+ .ondemand_data.downdifferential = 5,
+};
+
static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
@@ -7774,7 +8074,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* Change controller state to "reset state" which
* should also put the link in off/reset state
*/
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
/*
* TODO: Check if we need any delay to make sure that
* controller is reset
@@ -7809,6 +8109,9 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
*
* Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
* in low power state which would save some power.
+ *
+ * If Write Booster is enabled and the device needs to flush the WB
+ * buffer OR if bkops status is urgent for WB, keep Vcc on.
*/
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
!hba->dev_info.is_lu_power_on_wp) {
@@ -7938,16 +8241,31 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
}
- }
+ /*
+ * If device needs to do BKOP or WB buffer flush during
+ * Hibern8, keep device power mode as "active power mode"
+ * and VCC supply.
+ */
+ hba->dev_info.b_rpm_dev_flush_capable =
+ hba->auto_bkops_enabled ||
+ (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
+ ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
+ ufshcd_is_auto_hibern8_enabled(hba))) &&
+ ufshcd_wb_need_flush(hba));
+ }
+
+ if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
+ if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op)) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
- if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
- ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
- !ufshcd_is_runtime_pm(pm_op))) {
- /* ensure that bkops is disabled */
- ufshcd_disable_auto_bkops(hba);
- ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
- if (ret)
- goto enable_gating;
+ if (!hba->dev_info.b_rpm_dev_flush_capable) {
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
}
flush_work(&hba->eeh_work);
@@ -8000,9 +8318,16 @@ enable_gating:
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
+ hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba);
out:
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
+ msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
+ }
+
hba->pm_op_in_progress = 0;
+
if (ret)
ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
return ret;
@@ -8055,9 +8380,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
else
goto vendor_suspend;
} else if (ufshcd_is_link_off(hba)) {
- ret = ufshcd_host_reset_and_restore(hba);
/*
- * ufshcd_host_reset_and_restore() should have already
+ * A full initialization of the host and the device is
+ * required since the link was put to off during suspend.
+ */
+ ret = ufshcd_reset_and_restore(hba);
+ /*
+ * ufshcd_reset_and_restore() should have already
* set the link state as active
*/
if (ret || !ufshcd_is_link_active(hba))
@@ -8087,6 +8416,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ hba->dev_info.b_rpm_dev_flush_capable = false;
+ cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
+ }
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -8313,7 +8647,7 @@ void ufshcd_remove(struct ufs_hba *hba)
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
@@ -8422,7 +8756,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
- hba->hba_enable_delay_us = 1000;
+ hba->vps = &ufs_hba_vps;
err = ufshcd_hba_init(hba);
if (err)
@@ -8560,6 +8894,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
+ ufshcd_rpm_dev_flush_recheck_work);
+
/* Set the default auto-hiberate idle timer value to 150 ms */
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 6ffc08ad85f6..bf97d616e597 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -69,6 +69,7 @@
#include <scsi/scsi_eh.h>
#include "ufs.h"
+#include "ufs_quirks.h"
#include "ufshci.h"
#define UFSHCD "ufshcd"
@@ -336,6 +337,7 @@ struct ufs_hba_variant_ops {
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
int (*apply_dev_quirks)(struct ufs_hba *hba);
+ void (*fixup_dev_quirks)(struct ufs_hba *hba);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -555,6 +557,20 @@ enum ufshcd_caps {
* for userspace to control the power management.
*/
UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
+
+ /*
+ * This capability allows the host controller driver to turn-on
+ * WriteBooster, if the underlying device supports it and is
+ * provisioned to be used. This would increase the write performance.
+ */
+ UFSHCD_CAP_WB_EN = 1 << 7,
+};
+
+struct ufs_hba_variant_params {
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq_simple_ondemand_data ondemand_data;
+ u16 hba_enable_delay_us;
+ u32 wb_flush_threshold;
};
/**
@@ -654,6 +670,7 @@ struct ufs_hba {
int nutmrs;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
+ struct ufs_hba_variant_params *vps;
void *priv;
unsigned int irq;
bool is_irq_enabled;
@@ -675,7 +692,6 @@ struct ufs_hba {
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
- u16 hba_enable_delay_us;
bool is_powered;
/* Work Queues */
@@ -727,6 +743,9 @@ struct ufs_hba {
struct device bsg_dev;
struct request_queue *bsg_queue;
+ bool wb_buf_flush_enabled;
+ bool wb_enabled;
+ struct delayed_work rpm_dev_flush_recheck_work;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -775,6 +794,11 @@ static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
}
+static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_WB_EN;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -808,7 +832,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep);
+ unsigned long timeout_ms);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
u32 reg);
@@ -845,6 +869,13 @@ static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
}
+static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
+{
+ if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
+ return hba->dev_info.wb_dedicated_lu;
+ return 0;
+}
+
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba);
@@ -932,11 +963,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res);
+ enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
@@ -1071,6 +1102,12 @@ static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
return 0;
}
+static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->fixup_dev_quirks)
+ hba->vops->fixup_dev_quirks(hba);
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c3f010df641e..8dbb4db6831a 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -908,7 +908,7 @@ static int pvscsi_host_reset(struct scsi_cmnd *cmd)
use_msg = adapter->use_msg;
if (use_msg) {
- adapter->use_msg = 0;
+ adapter->use_msg = false;
spin_unlock_irqrestore(&adapter->hw_lock, flags);
/*
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 526e3215d8fe..ae1e248a8fb8 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -162,9 +162,8 @@ static int slim_add_device(struct slim_controller *ctrl,
sbdev->ctrl = ctrl;
INIT_LIST_HEAD(&sbdev->stream_list);
spin_lock_init(&sbdev->stream_list_lock);
-
- if (node)
- sbdev->dev.of_node = of_node_get(node);
+ sbdev->dev.of_node = of_node_get(node);
+ sbdev->dev.fwnode = of_fwnode_handle(node);
dev_set_name(&sbdev->dev, "%x:%x:%x:%x",
sbdev->e_addr.manf_id,
@@ -283,6 +282,7 @@ EXPORT_SYMBOL_GPL(slim_register_controller);
/* slim_remove_device: Remove the effect of slim_add_device() */
static void slim_remove_device(struct slim_device *sbdev)
{
+ of_node_put(sbdev->dev.of_node);
device_unregister(&sbdev->dev);
}
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index fc2575fef51b..743ee7b4e63f 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1361,12 +1361,10 @@ static int of_qcom_slim_ngd_register(struct device *parent,
ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
ngd->pdev->dev.of_node = node;
ctrl->ngd = ngd;
- platform_set_drvdata(ngd->pdev, ctrl);
platform_device_add(ngd->pdev);
ngd->base = ctrl->base + ngd->id * data->offset +
(ngd->id - 1) * data->size;
- ctrl->ngd = ngd;
return 0;
}
@@ -1376,12 +1374,13 @@ static int of_qcom_slim_ngd_register(struct device *parent,
static int qcom_slim_ngd_probe(struct platform_device *pdev)
{
- struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev->parent);
int ret;
ctrl->ctrl.dev = dev;
+ platform_set_drvdata(pdev, ctrl);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
pm_runtime_set_suspended(dev);
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 3f0261d53ad9..43665b77aa9e 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -14,13 +14,23 @@
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <dt-bindings/power/meson8-power.h>
#include <dt-bindings/power/meson-g12a-power.h>
+#include <dt-bindings/power/meson-gxbb-power.h>
#include <dt-bindings/power/meson-sm1-power.h>
/* AO Offsets */
-#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
-#define AO_RTI_GEN_PWR_ISO0 (0x3b << 2)
+#define GX_AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
+#define GX_AO_RTI_GEN_PWR_ISO0 (0x3b << 2)
+
+/*
+ * Meson8/Meson8b/Meson8m2 only expose the power management registers of the
+ * AO-bus as syscon. 0x3a from GX translates to 0x02, 0x3b translates to 0x03
+ * and so on.
+ */
+#define MESON8_AO_RTI_GEN_PWR_SLEEP0 (0x02 << 2)
+#define MESON8_AO_RTI_GEN_PWR_ISO0 (0x03 << 2)
/* HHI Offsets */
@@ -66,18 +76,25 @@ struct meson_ee_pwrc_domain_data {
/* TOP Power Domains */
-static struct meson_ee_pwrc_top_domain g12a_pwrc_vpu = {
- .sleep_reg = AO_RTI_GEN_PWR_SLEEP0,
+static struct meson_ee_pwrc_top_domain gx_pwrc_vpu = {
+ .sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
+ .sleep_mask = BIT(8),
+ .iso_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
+ .iso_mask = BIT(9),
+};
+
+static struct meson_ee_pwrc_top_domain meson8_pwrc_vpu = {
+ .sleep_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(8),
- .iso_reg = AO_RTI_GEN_PWR_SLEEP0,
+ .iso_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
.iso_mask = BIT(9),
};
#define SM1_EE_PD(__bit) \
{ \
- .sleep_reg = AO_RTI_GEN_PWR_SLEEP0, \
+ .sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0, \
.sleep_mask = BIT(__bit), \
- .iso_reg = AO_RTI_GEN_PWR_ISO0, \
+ .iso_reg = GX_AO_RTI_GEN_PWR_ISO0, \
.iso_mask = BIT(__bit), \
}
@@ -124,10 +141,26 @@ static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_vpu[] = {
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
-static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_eth[] = {
+static struct meson_ee_pwrc_mem_domain gxbb_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
+static struct meson_ee_pwrc_mem_domain meson_pwrc_mem_eth[] = {
{ HHI_MEM_PD_REG0, GENMASK(3, 2) },
};
+static struct meson_ee_pwrc_mem_domain meson8_pwrc_audio_dsp_mem[] = {
+ { HHI_MEM_PD_REG0, GENMASK(1, 0) },
+};
+
+static struct meson_ee_pwrc_mem_domain meson8_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
@@ -199,9 +232,35 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain);
static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
- [PWRC_G12A_VPU_ID] = VPU_PD("VPU", &g12a_pwrc_vpu, g12a_pwrc_mem_vpu,
+ [PWRC_G12A_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, g12a_pwrc_mem_vpu,
pwrc_ee_get_power, 11, 2),
- [PWRC_G12A_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth),
+ [PWRC_G12A_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
+};
+
+static struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = {
+ [PWRC_GXBB_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, gxbb_pwrc_mem_vpu,
+ pwrc_ee_get_power, 12, 2),
+ [PWRC_GXBB_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
+};
+
+static struct meson_ee_pwrc_domain_desc meson8_pwrc_domains[] = {
+ [PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
+ meson8_pwrc_mem_vpu, pwrc_ee_get_power,
+ 0, 1),
+ [PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
+ meson_pwrc_mem_eth),
+ [PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
+ meson8_pwrc_audio_dsp_mem),
+};
+
+static struct meson_ee_pwrc_domain_desc meson8b_pwrc_domains[] = {
+ [PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
+ meson8_pwrc_mem_vpu, pwrc_ee_get_power,
+ 11, 1),
+ [PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
+ meson_pwrc_mem_eth),
+ [PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
+ meson8_pwrc_audio_dsp_mem),
};
static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
@@ -216,7 +275,7 @@ static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
[PWRC_SM1_GE2D_ID] = TOP_PD("GE2D", &sm1_pwrc_ge2d, sm1_pwrc_mem_ge2d,
pwrc_ee_get_power),
[PWRC_SM1_AUDIO_ID] = MEM_PD("AUDIO", sm1_pwrc_mem_audio),
- [PWRC_SM1_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth),
+ [PWRC_SM1_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
};
struct meson_ee_pwrc_domain {
@@ -470,6 +529,21 @@ static struct meson_ee_pwrc_domain_data meson_ee_g12a_pwrc_data = {
.domains = g12a_pwrc_domains,
};
+static struct meson_ee_pwrc_domain_data meson_ee_gxbb_pwrc_data = {
+ .count = ARRAY_SIZE(gxbb_pwrc_domains),
+ .domains = gxbb_pwrc_domains,
+};
+
+static struct meson_ee_pwrc_domain_data meson_ee_m8_pwrc_data = {
+ .count = ARRAY_SIZE(meson8_pwrc_domains),
+ .domains = meson8_pwrc_domains,
+};
+
+static struct meson_ee_pwrc_domain_data meson_ee_m8b_pwrc_data = {
+ .count = ARRAY_SIZE(meson8b_pwrc_domains),
+ .domains = meson8b_pwrc_domains,
+};
+
static struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = {
.count = ARRAY_SIZE(sm1_pwrc_domains),
.domains = sm1_pwrc_domains,
@@ -477,6 +551,22 @@ static struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = {
static const struct of_device_id meson_ee_pwrc_match_table[] = {
{
+ .compatible = "amlogic,meson8-pwrc",
+ .data = &meson_ee_m8_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson8b-pwrc",
+ .data = &meson_ee_m8b_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson8m2-pwrc",
+ .data = &meson_ee_m8b_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-pwrc",
+ .data = &meson_ee_gxbb_pwrc_data,
+ },
+ {
.compatible = "amlogic,meson-g12a-pwrc",
.data = &meson_ee_g12a_pwrc_data,
},
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index bcdcd3e7d7f1..7351f3030550 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -58,7 +58,7 @@ static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
* If cpu == -1, choose the current cpu, with no guarantees about
* potentially being migrated away.
*/
- if (unlikely(cpu < 0))
+ if (cpu < 0)
cpu = smp_processor_id();
/* If a specific cpu was requested, pick it up immediately */
@@ -70,6 +70,10 @@ static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
if (d)
return d;
+ d = service_select_by_cpu(d, -1);
+ if (d)
+ return d;
+
spin_lock(&dpio_list_lock);
d = list_entry(dpio_list.next, struct dpaa2_io, node);
list_del(&d->node);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 804b8ba9bf5c..0ab85bfb116f 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -572,18 +572,6 @@ void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
#define EQAR_VB(eqar) ((eqar) & 0x80)
#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
-static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
- u8 idx)
-{
- if (idx < 16)
- qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
- QMAN_RT_MODE);
- else
- qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
- (idx - 16) * 4,
- QMAN_RT_MODE);
-}
-
#define QB_RT_BIT ((u32)0x100)
/**
* qbman_swp_enqueue_direct() - Issue an enqueue command
@@ -669,6 +657,7 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
eqcr_ci = s->eqcr.ci;
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 1e164e03410a..9888a7061873 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -449,11 +449,6 @@ static inline int qm_eqcr_init(struct qm_portal *portal,
return 0;
}
-static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
-{
- return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
-}
-
static inline void qm_eqcr_finish(struct qm_portal *portal)
{
struct qm_eqcr *eqcr = &portal->eqcr;
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 447146861c2c..2df20d6f85fa 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -448,7 +448,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
unsigned int i;
unsigned int j;
u32 crc;
- size_t calc_size = sizeof(struct qe_firmware);
+ size_t calc_size;
size_t length;
const struct qe_header *hdr;
@@ -480,7 +480,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
}
/* Validate the length and check if there's a CRC */
- calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
+ calc_size = struct_size(firmware, microcode, firmware->count);
for (i = 0; i < firmware->count; i++)
/*
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index d6c93970df4d..cac0fb7693a0 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -519,7 +519,7 @@ int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
int clock_bits;
u32 shift;
struct qe_mux __iomem *qe_mux_reg;
- __be32 __iomem *cmxs1cr;
+ __be32 __iomem *cmxs1cr;
qe_mux_reg = &qe_immr->qmx;
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 103e2c93c342..446143241fe7 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
+ifeq ($(CONFIG_ARM),y)
+obj-$(CONFIG_ARCH_MXC) += soc-imx.o
+endif
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
new file mode 100644
index 000000000000..fec3d672b606
--- /dev/null
+++ b/drivers/soc/imx/soc-imx.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include <soc/imx/cpu.h>
+#include <soc/imx/revision.h>
+
+#define OCOTP_UID_H 0x420
+#define OCOTP_UID_L 0x410
+
+#define OCOTP_ULP_UID_1 0x4b0
+#define OCOTP_ULP_UID_2 0x4c0
+#define OCOTP_ULP_UID_3 0x4d0
+#define OCOTP_ULP_UID_4 0x4e0
+
+static int __init imx_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const char *ocotp_compat = NULL;
+ struct soc_device *soc_dev;
+ struct device_node *root;
+ struct regmap *ocotp = NULL;
+ const char *soc_id;
+ u64 soc_uid = 0;
+ u32 val;
+ int ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
+ of_node_put(root);
+ if (ret)
+ goto free_soc;
+
+ switch (__mxc_cpu_type) {
+ case MXC_CPU_MX1:
+ soc_id = "i.MX1";
+ break;
+ case MXC_CPU_MX21:
+ soc_id = "i.MX21";
+ break;
+ case MXC_CPU_MX25:
+ soc_id = "i.MX25";
+ break;
+ case MXC_CPU_MX27:
+ soc_id = "i.MX27";
+ break;
+ case MXC_CPU_MX31:
+ soc_id = "i.MX31";
+ break;
+ case MXC_CPU_MX35:
+ soc_id = "i.MX35";
+ break;
+ case MXC_CPU_MX51:
+ soc_id = "i.MX51";
+ break;
+ case MXC_CPU_MX53:
+ soc_id = "i.MX53";
+ break;
+ case MXC_CPU_IMX6SL:
+ ocotp_compat = "fsl,imx6sl-ocotp";
+ soc_id = "i.MX6SL";
+ break;
+ case MXC_CPU_IMX6DL:
+ ocotp_compat = "fsl,imx6q-ocotp";
+ soc_id = "i.MX6DL";
+ break;
+ case MXC_CPU_IMX6SX:
+ ocotp_compat = "fsl,imx6sx-ocotp";
+ soc_id = "i.MX6SX";
+ break;
+ case MXC_CPU_IMX6Q:
+ ocotp_compat = "fsl,imx6q-ocotp";
+ soc_id = "i.MX6Q";
+ break;
+ case MXC_CPU_IMX6UL:
+ ocotp_compat = "fsl,imx6ul-ocotp";
+ soc_id = "i.MX6UL";
+ break;
+ case MXC_CPU_IMX6ULL:
+ ocotp_compat = "fsl,imx6ull-ocotp";
+ soc_id = "i.MX6ULL";
+ break;
+ case MXC_CPU_IMX6ULZ:
+ ocotp_compat = "fsl,imx6ull-ocotp";
+ soc_id = "i.MX6ULZ";
+ break;
+ case MXC_CPU_IMX6SLL:
+ ocotp_compat = "fsl,imx6sll-ocotp";
+ soc_id = "i.MX6SLL";
+ break;
+ case MXC_CPU_IMX7D:
+ ocotp_compat = "fsl,imx7d-ocotp";
+ soc_id = "i.MX7D";
+ break;
+ case MXC_CPU_IMX7ULP:
+ ocotp_compat = "fsl,imx7ulp-ocotp";
+ soc_id = "i.MX7ULP";
+ break;
+ case MXC_CPU_VF500:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF500";
+ break;
+ case MXC_CPU_VF510:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF510";
+ break;
+ case MXC_CPU_VF600:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF600";
+ break;
+ case MXC_CPU_VF610:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF610";
+ break;
+ default:
+ soc_id = "Unknown";
+ }
+ soc_dev_attr->soc_id = soc_id;
+
+ if (ocotp_compat) {
+ ocotp = syscon_regmap_lookup_by_compatible(ocotp_compat);
+ if (IS_ERR(ocotp))
+ pr_err("%s: failed to find %s regmap!\n", __func__, ocotp_compat);
+ }
+
+ if (!IS_ERR_OR_NULL(ocotp)) {
+ if (__mxc_cpu_type == MXC_CPU_IMX7ULP) {
+ regmap_read(ocotp, OCOTP_ULP_UID_4, &val);
+ soc_uid = val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_3, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_2, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_1, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ } else {
+ regmap_read(ocotp, OCOTP_UID_H, &val);
+ soc_uid = val;
+ regmap_read(ocotp, OCOTP_UID_L, &val);
+ soc_uid <<= 32;
+ soc_uid |= val;
+ }
+ }
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
+ (imx_get_soc_revision() >> 4) & 0xf,
+ imx_get_soc_revision() & 0xf);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto free_soc;
+ }
+
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
+ if (!soc_dev_attr->serial_number) {
+ ret = -ENOMEM;
+ goto free_rev;
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto free_serial_number;
+ }
+
+ return 0;
+
+free_serial_number:
+ kfree(soc_dev_attr->serial_number);
+free_rev:
+ kfree(soc_dev_attr->revision);
+free_soc:
+ kfree(soc_dev_attr);
+ return ret;
+}
+device_initcall(imx_soc_device_init);
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 719e1f189ebf..7b0759adb47d 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -53,11 +53,11 @@ static u32 __init imx8mq_soc_revision(void)
struct device_node *np;
void __iomem *ocotp_base;
u32 magic;
- u32 rev = 0;
+ u32 rev;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
if (!np)
- goto out;
+ return 0;
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
@@ -78,9 +78,8 @@ static u32 __init imx8mq_soc_revision(void)
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
iounmap(ocotp_base);
-
-out:
of_node_put(np);
+
return rev;
}
diff --git a/drivers/soc/kendryte/k210-sysctl.c b/drivers/soc/kendryte/k210-sysctl.c
index 4608fbca20e1..707019223dd8 100644
--- a/drivers/soc/kendryte/k210-sysctl.c
+++ b/drivers/soc/kendryte/k210-sysctl.c
@@ -246,3 +246,15 @@ static void __init k210_soc_early_init(const void *fdt)
iounmap(regs);
}
SOC_EARLY_INIT_DECLARE(generic_k210, "kendryte,k210", k210_soc_early_init);
+
+#ifdef CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN
+/*
+ * Generic entry for the default k210.dtb embedded DTB for boards with:
+ * - Vendor ID: 0x4B5
+ * - Arch ID: 0xE59889E6A5A04149 (= "Canaan AI" in UTF-8 encoded Chinese)
+ * - Impl ID: 0x4D41495832303030 (= "MAIX2000")
+ * These values are reported by the SiPEED MAXDUINO, SiPEED MAIX GO and
+ * SiPEED Dan dock boards.
+ */
+SOC_BUILTIN_DTB_DECLARE(k210, 0x4B5, 0xE59889E6A5A04149, 0x4D41495832303030);
+#endif
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 2114b563478c..59a56cd790ec 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -44,4 +44,11 @@ config MTK_SCPSYS
Say yes here to add support for the MediaTek SCPSYS power domain
driver.
+config MTK_MMSYS
+ bool "MediaTek MMSYS Support"
+ default ARCH_MEDIATEK
+ help
+ Say yes here to add support for the MediaTek Multimedia
+ Subsystem (MMSYS).
+
endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index b01733074ad6..01f9f873634a 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
+obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index db37144ae98c..87ee9f767b7a 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -351,7 +351,9 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
spin_unlock_irqrestore(&client->lock, flags);
}
- mbox_send_message(client->chan, pkt);
+ err = mbox_send_message(client->chan, pkt);
+ if (err < 0)
+ return err;
/* We can send next packet immediately, so just call txdone. */
mbox_client_txdone(client->chan, 0);
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
new file mode 100644
index 000000000000..a55f25511173
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
+
+#include "../../gpu/drm/mediatek/mtk_drm_ddp.h"
+#include "../../gpu/drm/mediatek/mtk_drm_ddp_comp.h"
+
+#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
+#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
+#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
+#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
+#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
+#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
+#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
+#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
+#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
+#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
+#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
+#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
+
+#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
+#define DISP_REG_CONFIG_OUT_SEL 0x04c
+#define DISP_REG_CONFIG_DSI_SEL 0x050
+#define DISP_REG_CONFIG_DPI_SEL 0x064
+
+#define OVL0_MOUT_EN_COLOR0 0x1
+#define OD_MOUT_EN_RDMA0 0x1
+#define OD1_MOUT_EN_RDMA1 BIT(16)
+#define UFOE_MOUT_EN_DSI0 0x1
+#define COLOR0_SEL_IN_OVL0 0x1
+#define OVL1_MOUT_EN_COLOR1 0x1
+#define GAMMA_MOUT_EN_RDMA1 0x1
+#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI1 0x3
+#define RDMA0_SOUT_DSI1 0x1
+#define RDMA0_SOUT_DSI2 0x4
+#define RDMA0_SOUT_DSI3 0x5
+#define RDMA1_SOUT_DPI0 0x2
+#define RDMA1_SOUT_DPI1 0x3
+#define RDMA1_SOUT_DSI1 0x1
+#define RDMA1_SOUT_DSI2 0x4
+#define RDMA1_SOUT_DSI3 0x5
+#define RDMA2_SOUT_DPI0 0x2
+#define RDMA2_SOUT_DPI1 0x3
+#define RDMA2_SOUT_DSI1 0x1
+#define RDMA2_SOUT_DSI2 0x4
+#define RDMA2_SOUT_DSI3 0x5
+#define DPI0_SEL_IN_RDMA1 0x1
+#define DPI0_SEL_IN_RDMA2 0x3
+#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
+#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1 0x1
+#define DSI0_SEL_IN_RDMA2 0x4
+#define DSI1_SEL_IN_RDMA1 0x1
+#define DSI1_SEL_IN_RDMA2 0x4
+#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
+#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
+#define COLOR1_SEL_IN_OVL1 0x1
+
+#define OVL_MOUT_EN_RDMA 0x1
+#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
+#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
+#define DSI_SEL_IN_BLS 0x0
+#define DPI_SEL_IN_BLS 0x0
+#define DSI_SEL_IN_RDMA 0x1
+
+struct mtk_mmsys_driver_data {
+ const char *clk_driver;
+};
+
+static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
+ .clk_driver = "clk-mt2701-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
+ .clk_driver = "clk-mt2712-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt6779_mmsys_driver_data = {
+ .clk_driver = "clk-mt6779-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt6797_mmsys_driver_data = {
+ .clk_driver = "clk-mt6797-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
+ .clk_driver = "clk-mt8173-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
+ .clk_driver = "clk-mt8183-mm",
+};
+
+static unsigned int mtk_mmsys_ddp_mout_en(enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next,
+ unsigned int *addr)
+{
+ unsigned int value;
+
+ if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
+ value = OVL0_MOUT_EN_COLOR0;
+ } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
+ value = OVL_MOUT_EN_RDMA;
+ } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD_MOUT_EN_RDMA0;
+ } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
+ value = UFOE_MOUT_EN_DSI0;
+ } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+ *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
+ value = OVL1_MOUT_EN_COLOR1;
+ } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
+ value = GAMMA_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD1_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI3;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI3;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI3;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static unsigned int mtk_mmsys_ddp_sel_in(enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next,
+ unsigned int *addr)
+{
+ unsigned int value;
+
+ if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+ *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
+ value = COLOR0_SEL_IN_OVL0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI3_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI3_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+ *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
+ value = COLOR1_SEL_IN_OVL1;
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSI_SEL;
+ value = DSI_SEL_IN_BLS;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static void mtk_mmsys_ddp_sout_sel(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+ writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
+ writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+ writel_relaxed(DSI_SEL_IN_RDMA,
+ config_regs + DISP_REG_CONFIG_DSI_SEL);
+ writel_relaxed(DPI_SEL_IN_BLS,
+ config_regs + DISP_REG_CONFIG_DPI_SEL);
+ }
+}
+
+void mtk_mmsys_ddp_connect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ void __iomem *config_regs = dev_get_drvdata(dev);
+ unsigned int addr, value, reg;
+
+ value = mtk_mmsys_ddp_mout_en(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) | value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+
+ mtk_mmsys_ddp_sout_sel(config_regs, cur, next);
+
+ value = mtk_mmsys_ddp_sel_in(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) | value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect);
+
+void mtk_mmsys_ddp_disconnect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ void __iomem *config_regs = dev_get_drvdata(dev);
+ unsigned int addr, value, reg;
+
+ value = mtk_mmsys_ddp_mout_en(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) & ~value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+
+ value = mtk_mmsys_ddp_sel_in(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) & ~value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
+
+static int mtk_mmsys_probe(struct platform_device *pdev)
+{
+ const struct mtk_mmsys_driver_data *data;
+ struct device *dev = &pdev->dev;
+ struct platform_device *clks;
+ struct platform_device *drm;
+ void __iomem *config_regs;
+ struct resource *mem;
+ int ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ config_regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(config_regs)) {
+ ret = PTR_ERR(config_regs);
+ dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
+ ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, config_regs);
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ clks = platform_device_register_data(&pdev->dev, data->clk_driver,
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(clks))
+ return PTR_ERR(clks);
+
+ drm = platform_device_register_data(&pdev->dev, "mediatek-drm",
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(drm)) {
+ platform_device_unregister(clks);
+ return PTR_ERR(drm);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_match_mtk_mmsys[] = {
+ {
+ .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt6779-mmsys",
+ .data = &mt6779_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt6797-mmsys",
+ .data = &mt6797_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt8183-mmsys",
+ .data = &mt8183_mmsys_driver_data,
+ },
+ { }
+};
+
+static struct platform_driver mtk_mmsys_drv = {
+ .driver = {
+ .name = "mtk-mmsys",
+ .of_match_table = of_match_mtk_mmsys,
+ },
+ .probe = mtk_mmsys_probe,
+};
+
+builtin_platform_driver(mtk_mmsys_drv);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index bf42a17a45de..250a393f255d 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -80,16 +80,6 @@ config QCOM_PDR_HELPERS
tristate
select QCOM_QMI_HELPERS
-config QCOM_PM
- bool "Qualcomm Power Management"
- depends on ARCH_QCOM && !ARM64
- select ARM_CPU_SUSPEND
- select QCOM_SCM
- help
- QCOM Platform specific power driver to manage cores and L2 low power
- modes. It interface with various system drivers to put the cores in
- low power modes.
-
config QCOM_QMI_HELPERS
tristate
depends on NET
@@ -117,7 +107,7 @@ config QCOM_RPMH
help apply the aggregated state on the resource.
config QCOM_RPMHPD
- bool "Qualcomm RPMh Power domain driver"
+ tristate "Qualcomm RPMh Power domain driver"
depends on QCOM_RPMH && QCOM_COMMAND_DB
help
QCOM RPMh Power domain driver to support power-domains with
@@ -126,8 +116,8 @@ config QCOM_RPMHPD
for the voltage rail.
config QCOM_RPMPD
- bool "Qualcomm RPM Power domain driver"
- depends on QCOM_SMD_RPM=y
+ tristate "Qualcomm RPM Power domain driver"
+ depends on QCOM_SMD_RPM
help
QCOM RPM Power domain driver to support power-domains with
performance states. The driver communicates a performance state
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 5d6b83dc58e8..92cc4232d72c 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
-obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index f6c3d17b05c7..fc5610603b17 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/types.h>
#include <soc/qcom/cmd-db.h>
@@ -236,6 +237,77 @@ enum cmd_db_hw_type cmd_db_read_slave_id(const char *id)
}
EXPORT_SYMBOL(cmd_db_read_slave_id);
+#ifdef CONFIG_DEBUG_FS
+static int cmd_db_debugfs_dump(struct seq_file *seq, void *p)
+{
+ int i, j;
+ const struct rsc_hdr *rsc;
+ const struct entry_header *ent;
+ const char *name;
+ u16 len, version;
+ u8 major, minor;
+
+ seq_puts(seq, "Command DB DUMP\n");
+
+ for (i = 0; i < MAX_SLV_ID; i++) {
+ rsc = &cmd_db_header->header[i];
+ if (!rsc->slv_id)
+ break;
+
+ switch (le16_to_cpu(rsc->slv_id)) {
+ case CMD_DB_HW_ARC:
+ name = "ARC";
+ break;
+ case CMD_DB_HW_VRM:
+ name = "VRM";
+ break;
+ case CMD_DB_HW_BCM:
+ name = "BCM";
+ break;
+ default:
+ name = "Unknown";
+ break;
+ }
+
+ version = le16_to_cpu(rsc->version);
+ major = version >> 8;
+ minor = version;
+
+ seq_printf(seq, "Slave %s (v%u.%u)\n", name, major, minor);
+ seq_puts(seq, "-------------------------\n");
+
+ ent = rsc_to_entry_header(rsc);
+ for (j = 0; j < le16_to_cpu(rsc->cnt); j++, ent++) {
+ seq_printf(seq, "0x%05x: %*pEp", le32_to_cpu(ent->addr),
+ (int)sizeof(ent->id), ent->id);
+
+ len = le16_to_cpu(ent->len);
+ if (len) {
+ seq_printf(seq, " [%*ph]",
+ len, rsc_offset(rsc, ent));
+ }
+ seq_putc(seq, '\n');
+ }
+ }
+
+ return 0;
+}
+
+static int open_cmd_db_debugfs(struct inode *inode, struct file *file)
+{
+ return single_open(file, cmd_db_debugfs_dump, inode->i_private);
+}
+#endif
+
+static const struct file_operations cmd_db_debugfs_ops = {
+#ifdef CONFIG_DEBUG_FS
+ .open = open_cmd_db_debugfs,
+#endif
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int cmd_db_dev_probe(struct platform_device *pdev)
{
struct reserved_mem *rmem;
@@ -259,12 +331,14 @@ static int cmd_db_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
+ debugfs_create_file("cmd-db", 0400, NULL, NULL, &cmd_db_debugfs_ops);
+
return 0;
}
static const struct of_device_id cmd_db_match_table[] = {
{ .compatible = "qcom,cmd-db" },
- { },
+ { }
};
static struct platform_driver cmd_db_dev_driver = {
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 17ad3b8698e1..bdcf16f88a97 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -155,10 +155,6 @@ static int pdr_register_listener(struct pdr_handle *pdr,
return ret;
}
- if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX)
- pr_err("PDR: %s notification state invalid: 0x%x\n",
- pds->service_path, resp.curr_state);
-
pds->state = resp.curr_state;
return 0;
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index f43a2e07ee83..ed2c687c16b3 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -599,6 +599,7 @@ static const struct of_device_id qmp_dt_match[] = {
{ .compatible = "qcom,sc7180-aoss-qmp", },
{ .compatible = "qcom,sdm845-aoss-qmp", },
{ .compatible = "qcom,sm8150-aoss-qmp", },
+ { .compatible = "qcom,sm8250-aoss-qmp", },
{}
};
MODULE_DEVICE_TABLE(of, qmp_dt_match);
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
index 6eec32b97f83..ef60e790a750 100644
--- a/drivers/soc/qcom/rpmh-internal.h
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -22,16 +22,23 @@ struct rsc_drv;
* struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
* to the controller
*
- * @drv: the controller
- * @type: type of the TCS in this group - active, sleep, wake
- * @mask: mask of the TCSes relative to all the TCSes in the RSC
- * @offset: start of the TCS group relative to the TCSes in the RSC
- * @num_tcs: number of TCSes in this type
- * @ncpt: number of commands in each TCS
- * @lock: lock for synchronizing this TCS writes
- * @req: requests that are sent from the TCS
- * @cmd_cache: flattened cache of cmds in sleep/wake TCS
- * @slots: indicates which of @cmd_addr are occupied
+ * @drv: The controller.
+ * @type: Type of the TCS in this group - active, sleep, wake.
+ * @mask: Mask of the TCSes relative to all the TCSes in the RSC.
+ * @offset: Start of the TCS group relative to the TCSes in the RSC.
+ * @num_tcs: Number of TCSes in this type.
+ * @ncpt: Number of commands in each TCS.
+ * @req: Requests that are sent from the TCS; only used for ACTIVE_ONLY
+ * transfers (could be on a wake/sleep TCS if we are borrowing for
+ * an ACTIVE_ONLY transfer).
+ * Start: grab drv->lock, set req, set tcs_in_use, drop drv->lock,
+ * trigger
+ * End: get irq, access req,
+ * grab drv->lock, clear tcs_in_use, drop drv->lock
+ * @slots: Indicates which of @cmd_addr are occupied; only used for
+ * SLEEP / WAKE TCSs. Things are tightly packed in the
+ * case that (ncpt < MAX_CMDS_PER_TCS). That is if ncpt = 2 and
+ * MAX_CMDS_PER_TCS = 16 then bit[2] = the first bit in 2nd TCS.
*/
struct tcs_group {
struct rsc_drv *drv;
@@ -40,9 +47,7 @@ struct tcs_group {
u32 offset;
int num_tcs;
int ncpt;
- spinlock_t lock;
const struct tcs_request *req[MAX_TCS_PER_TYPE];
- u32 *cmd_cache;
DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
};
@@ -84,20 +89,32 @@ struct rpmh_ctrlr {
* struct rsc_drv: the Direct Resource Voter (DRV) of the
* Resource State Coordinator controller (RSC)
*
- * @name: controller identifier
- * @tcs_base: start address of the TCS registers in this controller
- * @id: instance id in the controller (Direct Resource Voter)
- * @num_tcs: number of TCSes in this DRV
- * @tcs: TCS groups
- * @tcs_in_use: s/w state of the TCS
- * @lock: synchronize state of the controller
- * @client: handle to the DRV's client.
+ * @name: Controller identifier.
+ * @tcs_base: Start address of the TCS registers in this controller.
+ * @id: Instance id in the controller (Direct Resource Voter).
+ * @num_tcs: Number of TCSes in this DRV.
+ * @rsc_pm: CPU PM notifier for controller.
+ * Used when solver mode is not present.
+ * @cpus_in_pm: Number of CPUs not in idle power collapse.
+ * Used when solver mode is not present.
+ * @tcs: TCS groups.
+ * @tcs_in_use: S/W state of the TCS; only set for ACTIVE_ONLY
+ * transfers, but might show a sleep/wake TCS in use if
+ * it was borrowed for an active_only transfer. You
+ * must hold the lock in this struct (AKA drv->lock) in
+ * order to update this.
+ * @lock: Synchronize state of the controller. If RPMH's cache
+ * lock will also be held, the order is: drv->lock then
+ * cache_lock.
+ * @client: Handle to the DRV's client.
*/
struct rsc_drv {
const char *name;
void __iomem *tcs_base;
int id;
int num_tcs;
+ struct notifier_block rsc_pm;
+ atomic_t cpus_in_pm;
struct tcs_group tcs[TCS_TYPE_NR];
DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
spinlock_t lock;
@@ -107,7 +124,7 @@ struct rsc_drv {
int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
const struct tcs_request *msg);
-int rpmh_rsc_invalidate(struct rsc_drv *drv);
+void rpmh_rsc_invalidate(struct rsc_drv *drv);
void rpmh_tx_done(const struct tcs_request *msg, int r);
int rpmh_flush(struct rpmh_ctrlr *ctrlr);
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index b71822131f59..076fd27f3081 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -6,9 +6,11 @@
#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
#include <linux/atomic.h>
+#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/of.h>
@@ -30,21 +32,41 @@
#define RSC_DRV_TCS_OFFSET 672
#define RSC_DRV_CMD_OFFSET 20
-/* DRV Configuration Information Register */
+/* DRV HW Solver Configuration Information Register */
+#define DRV_SOLVER_CONFIG 0x04
+#define DRV_HW_SOLVER_MASK 1
+#define DRV_HW_SOLVER_SHIFT 24
+
+/* DRV TCS Configuration Information Register */
#define DRV_PRNT_CHLD_CONFIG 0x0C
#define DRV_NUM_TCS_MASK 0x3F
#define DRV_NUM_TCS_SHIFT 6
#define DRV_NCPT_MASK 0x1F
#define DRV_NCPT_SHIFT 27
-/* Register offsets */
+/* Offsets for common TCS Registers, one bit per TCS */
#define RSC_DRV_IRQ_ENABLE 0x00
#define RSC_DRV_IRQ_STATUS 0x04
-#define RSC_DRV_IRQ_CLEAR 0x08
-#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
+#define RSC_DRV_IRQ_CLEAR 0x08 /* w/o; write 1 to clear */
+
+/*
+ * Offsets for per TCS Registers.
+ *
+ * TCSes start at 0x10 from tcs_base and are stored one after another.
+ * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one
+ * of the below to find a register.
+ */
+#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 /* 1 bit per command */
#define RSC_DRV_CONTROL 0x14
-#define RSC_DRV_STATUS 0x18
-#define RSC_DRV_CMD_ENABLE 0x1C
+#define RSC_DRV_STATUS 0x18 /* zero if tcs is busy */
+#define RSC_DRV_CMD_ENABLE 0x1C /* 1 bit per command */
+
+/*
+ * Offsets for per command in a TCS.
+ *
+ * Commands (up to 16) start at 0x30 in a TCS; multiply command index
+ * by RSC_DRV_CMD_OFFSET and add one of the below to find a register.
+ */
#define RSC_DRV_CMD_MSGID 0x30
#define RSC_DRV_CMD_ADDR 0x34
#define RSC_DRV_CMD_DATA 0x38
@@ -61,94 +83,179 @@
#define CMD_STATUS_ISSUED BIT(8)
#define CMD_STATUS_COMPL BIT(16)
-static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+/*
+ * Here's a high level overview of how all the registers in RPMH work
+ * together:
+ *
+ * - The main rpmh-rsc address is the base of a register space that can
+ * be used to find overall configuration of the hardware
+ * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
+ * space are all the TCS blocks. The offset of the TCS blocks is
+ * specified in the device tree by "qcom,tcs-offset" and used to
+ * compute tcs_base.
+ * - TCS blocks come one after another. Type, count, and order are
+ * specified by the device tree as "qcom,tcs-config".
+ * - Each TCS block has some registers, then space for up to 16 commands.
+ * Note that though address space is reserved for 16 commands, fewer
+ * might be present. See ncpt (num cmds per TCS).
+ *
+ * Here's a picture:
+ *
+ * +---------------------------------------------------+
+ * |RSC |
+ * | ctrl |
+ * | |
+ * | Drvs: |
+ * | +-----------------------------------------------+ |
+ * | |DRV0 | |
+ * | | ctrl/config | |
+ * | | IRQ | |
+ * | | | |
+ * | | TCSes: | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS0 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS1 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS2 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | ...... | |
+ * | +-----------------------------------------------+ |
+ * | +-----------------------------------------------+ |
+ * | |DRV1 | |
+ * | | (same as DRV0) | |
+ * | +-----------------------------------------------+ |
+ * | ...... |
+ * +---------------------------------------------------+
+ */
+
+static inline void __iomem *
+tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
{
- return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
- RSC_DRV_CMD_OFFSET * cmd_id);
+ return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg;
}
-static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
- u32 data)
+static inline void __iomem *
+tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
{
- writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
- RSC_DRV_CMD_OFFSET * cmd_id);
+ return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id;
}
-static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
+static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id)
{
- writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+ return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
}
-static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
- u32 data)
+static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
{
- writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
- for (;;) {
- if (data == readl(drv->tcs_base + reg +
- RSC_DRV_TCS_OFFSET * tcs_id))
- break;
- udelay(1);
- }
+ return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
}
-static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
+static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id, u32 data)
{
- return !test_bit(tcs_id, drv->tcs_in_use) &&
- read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
+ writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
}
-static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
+static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
{
- return &drv->tcs[type];
+ writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
}
-static int tcs_invalidate(struct rsc_drv *drv, int type)
+static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
{
- int m;
- struct tcs_group *tcs;
+ u32 new_data;
- tcs = get_tcs_of_type(drv, type);
+ writel(data, tcs_reg_addr(drv, reg, tcs_id));
+ if (readl_poll_timeout_atomic(tcs_reg_addr(drv, reg, tcs_id), new_data,
+ new_data == data, 1, USEC_PER_SEC))
+ pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
+ data, tcs_id, reg);
+}
- spin_lock(&tcs->lock);
- if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
- spin_unlock(&tcs->lock);
- return 0;
- }
+/**
+ * tcs_is_free() - Return if a TCS is totally free.
+ * @drv: The RSC controller.
+ * @tcs_id: The global ID of this TCS.
+ *
+ * Returns true if nobody has claimed this TCS (by setting tcs_in_use).
+ *
+ * Context: Must be called with the drv->lock held.
+ *
+ * Return: true if the given TCS is free.
+ */
+static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
+{
+ return !test_bit(tcs_id, drv->tcs_in_use);
+}
+
+/**
+ * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
+ * @drv: The RSC controller.
+ * @type: SLEEP_TCS or WAKE_TCS
+ *
+ * This will clear the "slots" variable of the given tcs_group and also
+ * tell the hardware to forget about all entries.
+ *
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
+ */
+static void tcs_invalidate(struct rsc_drv *drv, int type)
+{
+ int m;
+ struct tcs_group *tcs = &drv->tcs[type];
+
+ /* Caller ensures nobody else is running so no lock */
+ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
+ return;
for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
- if (!tcs_is_free(drv, m)) {
- spin_unlock(&tcs->lock);
- return -EAGAIN;
- }
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
}
bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
- spin_unlock(&tcs->lock);
-
- return 0;
}
/**
- * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
+ * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
+ * @drv: The RSC controller.
*
- * @drv: the RSC controller
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
*/
-int rpmh_rsc_invalidate(struct rsc_drv *drv)
+void rpmh_rsc_invalidate(struct rsc_drv *drv)
{
- int ret;
-
- ret = tcs_invalidate(drv, SLEEP_TCS);
- if (!ret)
- ret = tcs_invalidate(drv, WAKE_TCS);
-
- return ret;
+ tcs_invalidate(drv, SLEEP_TCS);
+ tcs_invalidate(drv, WAKE_TCS);
}
+/**
+ * get_tcs_for_msg() - Get the tcs_group used to send the given message.
+ * @drv: The RSC controller.
+ * @msg: The message we want to send.
+ *
+ * This is normally pretty straightforward except if we are trying to send
+ * an ACTIVE_ONLY message but don't have any active_only TCSes.
+ *
+ * Return: A pointer to a tcs_group or an ERR_PTR.
+ */
static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
const struct tcs_request *msg)
{
- int type, ret;
+ int type;
struct tcs_group *tcs;
switch (msg->state) {
@@ -168,24 +275,33 @@ static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
/*
* If we are making an active request on a RSC that does not have a
* dedicated TCS for active state use, then re-purpose a wake TCS to
- * send active votes.
- * NOTE: The driver must be aware that this RSC does not have a
- * dedicated AMC, and therefore would invalidate the sleep and wake
- * TCSes before making an active state request.
+ * send active votes. This is safe because we ensure any active-only
+ * transfers have finished before we use it (maybe by running from
+ * the last CPU in PM code).
*/
- tcs = get_tcs_of_type(drv, type);
- if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) {
- tcs = get_tcs_of_type(drv, WAKE_TCS);
- if (tcs->num_tcs) {
- ret = rpmh_rsc_invalidate(drv);
- if (ret)
- return ERR_PTR(ret);
- }
- }
+ tcs = &drv->tcs[type];
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
+ tcs = &drv->tcs[WAKE_TCS];
return tcs;
}
+/**
+ * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
+ * @drv: The RSC controller.
+ * @tcs_id: The global ID of this TCS.
+ *
+ * For ACTIVE_ONLY transfers we want to call back into the client when the
+ * transfer finishes. To do this we need the "request" that the client
+ * originally provided us. This function grabs the request that we stashed
+ * when we started the transfer.
+ *
+ * This only makes sense for ACTIVE_ONLY transfers since those are the only
+ * ones we track sending (the only ones we enable interrupts for and the only
+ * ones we call back to the client for).
+ *
+ * Return: The stashed request.
+ */
static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
int tcs_id)
{
@@ -202,7 +318,76 @@ static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
}
/**
- * tcs_tx_done: TX Done interrupt handler
+ * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @trigger: If true then untrigger/retrigger. If false then just untrigger.
+ *
+ * In the normal case we only ever call with "trigger=true" to start a
+ * transfer. That will un-trigger/disable the TCS from the last transfer
+ * then trigger/enable for this transfer.
+ *
+ * If we borrowed a wake TCS for an active-only transfer we'll also call
+ * this function with "trigger=false" to just do the un-trigger/disable
+ * before using the TCS for wake purposes again.
+ *
+ * Note that the AP is only in charge of triggering active-only transfers.
+ * The AP never triggers sleep/wake values using this function.
+ */
+static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
+{
+ u32 enable;
+
+ /*
+ * HW req: Clear the DRV_CONTROL and enable TCS again
+ * While clearing ensure that the AMC mode trigger is cleared
+ * and then the mode enable is cleared.
+ */
+ enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id);
+ enable &= ~TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+
+ if (trigger) {
+ /* Enable the AMC mode on the TCS and then trigger the TCS */
+ enable = TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable |= TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ }
+}
+
+/**
+ * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @enable: If true then enable; if false then disable
+ *
+ * We only ever call this when we borrow a wake TCS for an active-only
+ * transfer. For active-only TCSes interrupts are always left enabled.
+ */
+static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
+{
+ u32 data;
+
+ data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE);
+ if (enable)
+ data |= BIT(tcs_id);
+ else
+ data &= ~BIT(tcs_id);
+ writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE);
+}
+
+/**
+ * tcs_tx_done() - TX Done interrupt handler.
+ * @irq: The IRQ number (ignored).
+ * @p: Pointer to "struct rsc_drv".
+ *
+ * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
+ * IRQ for) when a transfer is done.
+ *
+ * Return: IRQ_HANDLED
*/
static irqreturn_t tcs_tx_done(int irq, void *p)
{
@@ -212,7 +397,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
const struct tcs_request *req;
struct tcs_cmd *cmd;
- irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
+ irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
req = get_req_from_tcs(drv, i);
@@ -226,7 +411,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
u32 sts;
cmd = &req->cmds[j];
- sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
+ sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j);
if (!(sts & CMD_STATUS_ISSUED) ||
((req->wait_for_compl || cmd->wait) &&
!(sts & CMD_STATUS_COMPL))) {
@@ -237,13 +422,28 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
}
trace_rpmh_tx_done(drv, i, req, err);
+
+ /*
+ * If wake tcs was re-purposed for sending active
+ * votes, clear AMC trigger & enable modes and
+ * disable interrupt for this TCS
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ __tcs_set_trigger(drv, i, false);
skip:
/* Reclaim the TCS */
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
- write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
+ writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
spin_lock(&drv->lock);
clear_bit(i, drv->tcs_in_use);
+ /*
+ * Disable interrupt for WAKE TCS to avoid being
+ * spammed with interrupts coming when the solver
+ * sends its wake votes.
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ enable_tcs_irq(drv, i, false);
spin_unlock(&drv->lock);
if (req)
rpmh_tx_done(req, err);
@@ -252,6 +452,16 @@ skip:
return IRQ_HANDLED;
}
+/**
+ * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @cmd_id: The index within the TCS to start writing.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This is used for all types of transfers (active, sleep, and wake).
+ */
static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
const struct tcs_request *msg)
{
@@ -265,7 +475,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
cmd_msgid |= CMD_MSGID_WRITE;
- cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+ cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id);
for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
cmd = &msg->cmds[i];
@@ -281,32 +491,30 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
- cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
}
-static void __tcs_trigger(struct rsc_drv *drv, int tcs_id)
-{
- u32 enable;
-
- /*
- * HW req: Clear the DRV_CONTROL and enable TCS again
- * While clearing ensure that the AMC mode trigger is cleared
- * and then the mode enable is cleared.
- */
- enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
- enable &= ~TCS_AMC_MODE_TRIGGER;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
- enable &= ~TCS_AMC_MODE_ENABLE;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
-
- /* Enable the AMC mode on the TCS and then trigger the TCS */
- enable = TCS_AMC_MODE_ENABLE;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
- enable |= TCS_AMC_MODE_TRIGGER;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
-}
-
+/**
+ * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
+ * @drv: The controller.
+ * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This will walk through the TCSes in the group and check if any of them
+ * appear to be sending to addresses referenced in the message. If it finds
+ * one it'll return -EBUSY.
+ *
+ * Only for use for active-only transfers.
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: 0 if nothing in flight or -EBUSY if we should try again later.
+ * The caller must re-enable interrupts between tries since that's
+ * the only way tcs_is_free() will ever return true and the only way
+ * RSC_DRV_CMD_ENABLE will ever be cleared.
+ */
static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
const struct tcs_request *msg)
{
@@ -319,10 +527,10 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
if (tcs_is_free(drv, tcs_id))
continue;
- curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
- addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
+ addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
for (k = 0; k < msg->num_cmds; k++) {
if (addr == msg->cmds[k].addr)
return -EBUSY;
@@ -333,6 +541,15 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
return 0;
}
+/**
+ * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
+ * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
+ * we borrowed it because there are zero active-only ones).
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: The first tcs that's free.
+ */
static int find_free_tcs(struct tcs_group *tcs)
{
int i;
@@ -345,6 +562,20 @@ static int find_free_tcs(struct tcs_group *tcs)
return -EBUSY;
}
+/**
+ * tcs_write() - Store messages into a TCS right now, or return -EBUSY.
+ * @drv: The controller.
+ * @msg: The data to be sent.
+ *
+ * Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it.
+ *
+ * If there are no free TCSes for ACTIVE_ONLY transfers or if a command for
+ * the same address is already transferring returns -EBUSY which means the
+ * client should retry shortly.
+ *
+ * Return: 0 on success, -EBUSY if client should retry, or an error.
+ * Client should have interrupts enabled for a bit before retrying.
+ */
static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
{
struct tcs_group *tcs;
@@ -356,57 +587,77 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- spin_lock_irqsave(&tcs->lock, flags);
- spin_lock(&drv->lock);
+ spin_lock_irqsave(&drv->lock, flags);
/*
* The h/w does not like if we send a request to the same address,
* when one is already in-flight or being processed.
*/
ret = check_for_req_inflight(drv, tcs, msg);
- if (ret) {
- spin_unlock(&drv->lock);
- goto done_write;
- }
+ if (ret)
+ goto unlock;
- tcs_id = find_free_tcs(tcs);
- if (tcs_id < 0) {
- ret = tcs_id;
- spin_unlock(&drv->lock);
- goto done_write;
- }
+ ret = find_free_tcs(tcs);
+ if (ret < 0)
+ goto unlock;
+ tcs_id = ret;
tcs->req[tcs_id - tcs->offset] = msg;
set_bit(tcs_id, drv->tcs_in_use);
- spin_unlock(&drv->lock);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
+ /*
+ * Clear previously programmed WAKE commands in selected
+ * repurposed TCS to avoid triggering them. tcs->slots will be
+ * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
+ */
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+ enable_tcs_irq(drv, tcs_id, true);
+ }
+ spin_unlock_irqrestore(&drv->lock, flags);
+ /*
+ * These two can be done after the lock is released because:
+ * - We marked "tcs_in_use" under lock.
+ * - Once "tcs_in_use" has been marked nobody else could be writing
+ * to these registers until the interrupt goes off.
+ * - The interrupt can't go off until we trigger w/ the last line
+ * of __tcs_set_trigger() below.
+ */
__tcs_buffer_write(drv, tcs_id, 0, msg);
- __tcs_trigger(drv, tcs_id);
+ __tcs_set_trigger(drv, tcs_id, true);
-done_write:
- spin_unlock_irqrestore(&tcs->lock, flags);
+ return 0;
+unlock:
+ spin_unlock_irqrestore(&drv->lock, flags);
return ret;
}
/**
- * rpmh_rsc_send_data: Validate the incoming message and write to the
- * appropriate TCS block.
+ * rpmh_rsc_send_data() - Write / trigger active-only message.
+ * @drv: The controller.
+ * @msg: The data to be sent.
*
- * @drv: the controller
- * @msg: the data to be sent
+ * NOTES:
+ * - This is only used for "ACTIVE_ONLY" since the limitations of this
+ * function don't make sense for sleep/wake cases.
+ * - To do the transfer, we will grab a whole TCS for ourselves--we don't
+ * try to share. If there are none available we'll wait indefinitely
+ * for a free one.
+ * - This function will not wait for the commands to be finished, only for
+ * data to be programmed into the RPMh. See rpmh_tx_done() which will
+ * be called when the transfer is fully complete.
+ * - This function must be called with interrupts enabled. If the hardware
+ * is busy doing someone else's transfer we need that transfer to fully
+ * finish so that we can have the hardware, and to fully finish it needs
+ * the interrupt handler to run. If the interrupts is set to run on the
+ * active CPU this can never happen if interrupts are disabled.
*
* Return: 0 on success, -EINVAL on error.
- * Note: This call blocks until a valid data is written to the TCS.
*/
int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
int ret;
- if (!msg || !msg->cmds || !msg->num_cmds ||
- msg->num_cmds > MAX_RPMH_PAYLOAD) {
- WARN_ON(1);
- return -EINVAL;
- }
-
do {
ret = tcs_write(drv, msg);
if (ret == -EBUSY) {
@@ -419,43 +670,28 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
return ret;
}
-static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
- int len)
-{
- int i, j;
-
- /* Check for already cached commands */
- for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
- if (tcs->cmd_cache[i] != cmd[0].addr)
- continue;
- if (i + len >= tcs->num_tcs * tcs->ncpt)
- goto seq_err;
- for (j = 0; j < len; j++) {
- if (tcs->cmd_cache[i + j] != cmd[j].addr)
- goto seq_err;
- }
- return i;
- }
-
- return -ENODATA;
-
-seq_err:
- WARN(1, "Message does not match previous sequence.\n");
- return -EINVAL;
-}
-
+/**
+ * find_slots() - Find a place to write the given message.
+ * @tcs: The tcs group to search.
+ * @msg: The message we want to find room for.
+ * @tcs_id: If we return 0 from the function, we return the global ID of the
+ * TCS to write to here.
+ * @cmd_id: If we return 0 from the function, we return the index of
+ * the command array of the returned TCS where the client should
+ * start writing the message.
+ *
+ * Only for use on sleep/wake TCSes since those are the only ones we maintain
+ * tcs->slots for.
+ *
+ * Return: -ENOMEM if there was no room, else 0.
+ */
static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
int *tcs_id, int *cmd_id)
{
int slot, offset;
int i = 0;
- /* Find if we already have the msg in our TCS */
- slot = find_match(tcs, msg->cmds, msg->num_cmds);
- if (slot >= 0)
- goto copy_data;
-
- /* Do over, until we can fit the full payload in a TCS */
+ /* Do over, until we can fit the full payload in a single TCS */
do {
slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
i, msg->num_cmds, 0);
@@ -464,11 +700,7 @@ static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
i += tcs->ncpt;
} while (slot + msg->num_cmds - 1 >= i);
-copy_data:
bitmap_set(tcs->slots, slot, msg->num_cmds);
- /* Copy the addresses of the resources over to the slots */
- for (i = 0; i < msg->num_cmds; i++)
- tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
offset = slot / tcs->ncpt;
*tcs_id = offset + tcs->offset;
@@ -477,52 +709,157 @@ copy_data:
return 0;
}
-static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
+/**
+ * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
+ * @drv: The controller.
+ * @msg: The data to be written to the controller.
+ *
+ * This should only be called for for sleep/wake state, never active-only
+ * state.
+ *
+ * The caller must ensure that no other RPMH actions are happening and the
+ * controller is idle when this function is called since it runs lockless.
+ *
+ * Return: 0 if no error; else -error.
+ */
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
struct tcs_group *tcs;
int tcs_id = 0, cmd_id = 0;
- unsigned long flags;
int ret;
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- spin_lock_irqsave(&tcs->lock, flags);
/* find the TCS id and the command in the TCS to write to */
ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
if (!ret)
__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
- spin_unlock_irqrestore(&tcs->lock, flags);
return ret;
}
/**
- * rpmh_rsc_write_ctrl_data: Write request to the controller
+ * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
+ * @drv: The controller
+ *
+ * Checks if any of the AMCs are busy in handling ACTIVE sets.
+ * This is called from the last cpu powering down before flushing
+ * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
+ * power collapse, so deny from the last cpu's pm notification.
*
- * @drv: the controller
- * @msg: the data to be written to the controller
+ * Context: Must be called with the drv->lock held.
*
- * There is no response returned for writing the request to the controller.
+ * Return:
+ * * False - AMCs are idle
+ * * True - AMCs are busy
*/
-int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
+static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
{
- if (!msg || !msg->cmds || !msg->num_cmds ||
- msg->num_cmds > MAX_RPMH_PAYLOAD) {
- pr_err("Payload error\n");
- return -EINVAL;
+ int m;
+ struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
+
+ /*
+ * If we made an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purposed wake TCSes
+ * should be checked for not busy, because we used wake TCSes for
+ * active requests in this case.
+ */
+ if (!tcs->num_tcs)
+ tcs = &drv->tcs[WAKE_TCS];
+
+ for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
+ if (!tcs_is_free(drv, m))
+ return true;
}
- /* Data sent to this API will not be sent immediately */
- if (msg->state == RPMH_ACTIVE_ONLY_STATE)
- return -EINVAL;
+ return false;
+}
+
+/**
+ * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
+ * @nfb: Pointer to the notifier block in struct rsc_drv.
+ * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
+ * @v: Unused
+ *
+ * This function is given to cpu_pm_register_notifier so we can be informed
+ * about when CPUs go down. When all CPUs go down we know no more active
+ * transfers will be started so we write sleep/wake sets. This function gets
+ * called from cpuidle code paths and also at system suspend time.
+ *
+ * If its last CPU going down and AMCs are not busy then writes cached sleep
+ * and wake messages to TCSes. The firmware then takes care of triggering
+ * them when entering deepest low power modes.
+ *
+ * Return: See cpu_pm_register_notifier()
+ */
+static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
+ unsigned long action, void *v)
+{
+ struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
+ int ret = NOTIFY_OK;
+ int cpus_in_pm;
- return tcs_ctrl_write(drv, msg);
+ switch (action) {
+ case CPU_PM_ENTER:
+ cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
+ /*
+ * NOTE: comments for num_online_cpus() point out that it's
+ * only a snapshot so we need to be careful. It should be OK
+ * for us to use, though. It's important for us not to miss
+ * if we're the last CPU going down so it would only be a
+ * problem if a CPU went offline right after we did the check
+ * AND that CPU was not idle AND that CPU was the last non-idle
+ * CPU. That can't happen. CPUs would have to come out of idle
+ * before the CPU could go offline.
+ */
+ if (cpus_in_pm < num_online_cpus())
+ return NOTIFY_OK;
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ atomic_dec(&drv->cpus_in_pm);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * It's likely we're on the last CPU. Grab the drv->lock and write
+ * out the sleep/wake commands to RPMH hardware. Grabbing the lock
+ * means that if we race with another CPU coming up we are still
+ * guaranteed to be safe. If another CPU came up just after we checked
+ * and has grabbed the lock or started an active transfer then we'll
+ * notice we're busy and abort. If another CPU comes up after we start
+ * flushing it will be blocked from starting an active transfer until
+ * we're done flushing. If another CPU starts an active transfer after
+ * we release the lock we're still OK because we're no longer the last
+ * CPU.
+ */
+ if (spin_trylock(&drv->lock)) {
+ if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
+ ret = NOTIFY_BAD;
+ spin_unlock(&drv->lock);
+ } else {
+ /* Another CPU must be up */
+ return NOTIFY_OK;
+ }
+
+ if (ret == NOTIFY_BAD) {
+ /* Double-check if we're here because someone else is up */
+ if (cpus_in_pm < num_online_cpus())
+ ret = NOTIFY_OK;
+ else
+ /* We won't be called w/ CPU_PM_ENTER_FAILED */
+ atomic_dec(&drv->cpus_in_pm);
+ }
+
+ return ret;
}
static int rpmh_probe_tcs_config(struct platform_device *pdev,
- struct rsc_drv *drv)
+ struct rsc_drv *drv, void __iomem *base)
{
struct tcs_type_config {
u32 type;
@@ -532,15 +869,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev,
u32 config, max_tcs, ncpt, offset;
int i, ret, n, st = 0;
struct tcs_group *tcs;
- struct resource *res;
- void __iomem *base;
- char drv_id[10] = {0};
-
- snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
if (ret)
@@ -584,7 +912,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev,
tcs->type = tcs_cfg[i].type;
tcs->num_tcs = tcs_cfg[i].n;
tcs->ncpt = ncpt;
- spin_lock_init(&tcs->lock);
if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
continue;
@@ -596,19 +923,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev,
tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
tcs->offset = st;
st += tcs->num_tcs;
-
- /*
- * Allocate memory to cache sleep and wake requests to
- * avoid reading TCS register memory.
- */
- if (tcs->type == ACTIVE_TCS)
- continue;
-
- tcs->cmd_cache = devm_kcalloc(&pdev->dev,
- tcs->num_tcs * ncpt, sizeof(u32),
- GFP_KERNEL);
- if (!tcs->cmd_cache)
- return -ENOMEM;
}
drv->num_tcs = st;
@@ -620,7 +934,11 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct rsc_drv *drv;
+ struct resource *res;
+ char drv_id[10] = {0};
int ret, irq;
+ u32 solver_config;
+ void __iomem *base;
/*
* Even though RPMh doesn't directly use cmd-db, all of its children
@@ -646,7 +964,13 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
if (!drv->name)
drv->name = dev_name(&pdev->dev);
- ret = rpmh_probe_tcs_config(pdev, drv);
+ snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = rpmh_probe_tcs_config(pdev, drv, base);
if (ret)
return ret;
@@ -663,8 +987,22 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /*
+ * CPU PM notification are not required for controllers that support
+ * 'HW solver' mode where they can be in autonomous mode executing low
+ * power mode to power down.
+ */
+ solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
+ solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
+ solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
+ if (!solver_config) {
+ drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
+ cpu_pm_register_notifier(&drv->rsc_pm);
+ }
+
/* Enable the active TCS to send requests immediately */
- write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
+ writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
+ drv->tcs_base + RSC_DRV_IRQ_ENABLE);
spin_lock_init(&drv->client.cache_lock);
INIT_LIST_HEAD(&drv->client.cache);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index eb0ded059d2e..f2b5b46ccd1f 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -9,6 +9,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -119,6 +120,7 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
{
struct cache_req *req;
unsigned long flags;
+ u32 old_sleep_val, old_wake_val;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
req = __find_req(ctrlr, cmd->addr);
@@ -133,26 +135,27 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
req->addr = cmd->addr;
req->sleep_val = req->wake_val = UINT_MAX;
- INIT_LIST_HEAD(&req->list);
list_add_tail(&req->list, &ctrlr->cache);
existing:
+ old_sleep_val = req->sleep_val;
+ old_wake_val = req->wake_val;
+
switch (state) {
case RPMH_ACTIVE_ONLY_STATE:
- if (req->sleep_val != UINT_MAX)
- req->wake_val = cmd->data;
- break;
case RPMH_WAKE_ONLY_STATE:
req->wake_val = cmd->data;
break;
case RPMH_SLEEP_STATE:
req->sleep_val = cmd->data;
break;
- default:
- break;
}
- ctrlr->dirty = true;
+ ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
+ req->wake_val != old_wake_val) &&
+ req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX;
+
unlock:
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
@@ -287,6 +290,7 @@ static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_add_tail(&req->list, &ctrlr->batch_cache);
+ ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
@@ -294,12 +298,10 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
{
struct batch_cache_req *req;
const struct rpmh_request *rpm_msg;
- unsigned long flags;
int ret = 0;
int i;
/* Send Sleep/Wake requests to the controller, expect no response */
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_for_each_entry(req, &ctrlr->batch_cache, list) {
for (i = 0; i < req->count; i++) {
rpm_msg = req->rpm_msgs + i;
@@ -309,23 +311,10 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
break;
}
}
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
return ret;
}
-static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
-{
- struct batch_cache_req *req, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
- list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
- kfree(req);
- INIT_LIST_HEAD(&ctrlr->batch_cache);
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
-}
-
/**
* rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
* batch to finish.
@@ -442,36 +431,42 @@ static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
}
/**
- * rpmh_flush: Flushes the buffered active and sleep sets to TCS
- *
- * @ctrlr: controller making request to flush cached data
+ * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
*
- * Return: -EBUSY if the controller is busy, probably waiting on a response
- * to a RPMH request sent earlier.
+ * @ctrlr: Controller making request to flush cached data
*
- * This function is always called from the sleep code from the last CPU
- * that is powering down the entire system. Since no other RPMH API would be
- * executing at this time, it is safe to run lockless.
+ * Return:
+ * * 0 - Success
+ * * Error code - Otherwise
*/
int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
struct cache_req *p;
- int ret;
+ int ret = 0;
+
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Currently rpmh_flush() is only called when we think we're running
+ * on the last processor. If the lock is busy it means another
+ * processor is up and it's better to abort than spin.
+ */
+ if (!spin_trylock(&ctrlr->cache_lock))
+ return -EBUSY;
if (!ctrlr->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n");
- return 0;
+ goto exit;
}
+ /* Invalidate the TCSes first to avoid stale data */
+ rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+
/* First flush the cached batch requests */
ret = flush_batch(ctrlr);
if (ret)
- return ret;
+ goto exit;
- /*
- * Nobody else should be calling this function other than system PM,
- * hence we can run without locks.
- */
list_for_each_entry(p, &ctrlr->cache, list) {
if (!is_req_valid(p)) {
pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
@@ -481,38 +476,40 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr)
ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
p->sleep_val);
if (ret)
- return ret;
+ goto exit;
ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
p->wake_val);
if (ret)
- return ret;
+ goto exit;
}
ctrlr->dirty = false;
- return 0;
+exit:
+ spin_unlock(&ctrlr->cache_lock);
+ return ret;
}
/**
- * rpmh_invalidate: Invalidate all sleep and active sets
- * sets.
+ * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
*
* @dev: The device making the request
*
- * Invalidate the sleep and active values in the TCS blocks.
+ * Invalidate the sleep and wake values in batch_cache.
*/
int rpmh_invalidate(const struct device *dev)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
- int ret;
+ struct batch_cache_req *req, *tmp;
+ unsigned long flags;
- invalidate_batch(ctrlr);
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+ kfree(req);
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
- do {
- ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
- } while (ret == -EAGAIN);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 4d264d0672c4..e72426221a69 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
@@ -166,6 +167,24 @@ static const struct rpmhpd_desc sm8150_desc = {
.num_pds = ARRAY_SIZE(sm8150_rpmhpds),
};
+static struct rpmhpd *sm8250_rpmhpds[] = {
+ [SM8250_CX] = &sdm845_cx,
+ [SM8250_CX_AO] = &sdm845_cx_ao,
+ [SM8250_EBI] = &sdm845_ebi,
+ [SM8250_GFX] = &sdm845_gfx,
+ [SM8250_LCX] = &sdm845_lcx,
+ [SM8250_LMX] = &sdm845_lmx,
+ [SM8250_MMCX] = &sm8150_mmcx,
+ [SM8250_MMCX_AO] = &sm8150_mmcx_ao,
+ [SM8250_MX] = &sdm845_mx,
+ [SM8250_MX_AO] = &sdm845_mx_ao,
+};
+
+static const struct rpmhpd_desc sm8250_desc = {
+ .rpmhpds = sm8250_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8250_rpmhpds),
+};
+
/* SC7180 RPMH powerdomains */
static struct rpmhpd *sc7180_rpmhpds[] = {
[SC7180_CX] = &sdm845_cx,
@@ -187,8 +206,10 @@ static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
+ { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
unsigned int corner, bool sync)
@@ -460,3 +481,6 @@ static int __init rpmhpd_init(void)
return platform_driver_register(&rpmhpd_driver);
}
core_initcall(rpmhpd_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 2b1834c5609a..f2168e4259b2 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_domain.h>
#include <linux/of.h>
@@ -226,6 +227,7 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, rpmpd_match_table);
static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
{
@@ -422,3 +424,6 @@ static int __init rpmpd_init(void)
return platform_driver_register(&rpmpd_driver);
}
core_initcall(rpmpd_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM Power Domain Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index c7300d54e444..07183d731d74 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -474,10 +474,8 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
goto report_read_failure;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n");
+ if (irq < 0)
return irq;
- }
smp2p->mbox_client.dev = &pdev->dev;
smp2p->mbox_client.knows_txdone = true;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index ebb49aee179b..5983c6ffb078 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -188,6 +188,10 @@ static const struct soc_id soc_id[] = {
{ 216, "MSM8674PRO" },
{ 217, "MSM8974-AA" },
{ 218, "MSM8974-AB" },
+ { 233, "MSM8936" },
+ { 239, "MSM8939" },
+ { 240, "APQ8036" },
+ { 241, "APQ8039" },
{ 246, "MSM8996" },
{ 247, "APQ8016" },
{ 248, "MSM8216" },
@@ -430,6 +434,8 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
qs->attr.family = "Snapdragon";
qs->attr.machine = socinfo_machine(&pdev->dev,
le32_to_cpu(info->id));
+ qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u",
+ le32_to_cpu(info->id));
qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u",
SOCINFO_MAJOR(le32_to_cpu(info->ver)),
SOCINFO_MINOR(le32_to_cpu(info->ver)));
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 1982c7fb45fa..53cd8d2d0cd2 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -83,6 +83,13 @@ config ARCH_R8A7740
select ARM_ERRATA_754322
select RENESAS_INTC_IRQPIN
+config ARCH_R8A7742
+ bool "RZ/G1H (R8A77420)"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select ARM_ERRATA_814220
+ select SYSC_R8A7742
+
config ARCH_R8A7743
bool "RZ/G1M (R8A77430)"
select ARCH_RCAR_GEN2
@@ -261,6 +268,10 @@ config ARCH_R8A77995
endif # ARM64
# SoC
+config SYSC_R8A7742
+ bool "RZ/G1H System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A7743
bool "RZ/G1M System Controller support" if COMPILE_TEST
select SYSC_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index e595c3c3bd10..08296d78e2ad 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
# SoC
+obj-$(CONFIG_SYSC_R8A7742) += r8a7742-sysc.o
obj-$(CONFIG_SYSC_R8A7743) += r8a7743-sysc.o
obj-$(CONFIG_SYSC_R8A7745) += r8a7745-sysc.o
obj-$(CONFIG_SYSC_R8A77470) += r8a77470-sysc.o
diff --git a/drivers/soc/renesas/r8a7742-sysc.c b/drivers/soc/renesas/r8a7742-sysc.c
new file mode 100644
index 000000000000..219a675f83f4
--- /dev/null
+++ b/drivers/soc/renesas/r8a7742-sysc.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G1H System Controller
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7742-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7742_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7742_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7742_PD_CA15_SCU, R8A7742_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7742_PD_CA15_CPU0, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7742_PD_CA15_CPU1, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu2", 0x40, 2, R8A7742_PD_CA15_CPU2, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu3", 0x40, 3, R8A7742_PD_CA15_CPU3, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca7-scu", 0x100, 0, R8A7742_PD_CA7_SCU, R8A7742_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7742_PD_CA7_CPU0, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7742_PD_CA7_CPU1, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu2", 0x1c0, 2, R8A7742_PD_CA7_CPU2, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu3", 0x1c0, 3, R8A7742_PD_CA7_CPU3, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "rgx", 0xc0, 0, R8A7742_PD_RGX, R8A7742_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7742_sysc_info __initconst = {
+ .areas = r8a7742_areas,
+ .num_areas = ARRAY_SIZE(r8a7742_areas),
+};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 2af2e0dd83fe..a2b2b1768768 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -39,6 +39,7 @@ static const struct rst_config rcar_rst_gen3 __initconst = {
static const struct of_device_id rcar_rst_matches[] __initconst = {
/* RZ/G1 is handled like R-Car Gen2 */
+ { .compatible = "renesas,r8a7742-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7743-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7744-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7745-rst", .data = &rcar_rst_gen2 },
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index f0b291e02b8a..04ea87a188f1 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -273,6 +273,9 @@ finalize:
}
static const struct of_device_id rcar_sysc_matches[] __initconst = {
+#ifdef CONFIG_SYSC_R8A7742
+ { .compatible = "renesas,r8a7742-sysc", .data = &r8a7742_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A7743
{ .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
/* RZ/G1N is identical to RZ/G2M w.r.t. power domains. */
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 0fc3b119930a..e417f26fe155 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -49,6 +49,7 @@ struct rcar_sysc_info {
u32 extmask_val; /* SYSCEXTMASK register mask value */
};
+extern const struct rcar_sysc_info r8a7742_sysc_info;
extern const struct rcar_sysc_info r8a7743_sysc_info;
extern const struct rcar_sysc_info r8a7745_sysc_info;
extern const struct rcar_sysc_info r8a77470_sysc_info;
diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c
index a5069394cd61..44d7e1951da3 100644
--- a/drivers/soc/sifive/sifive_l2_cache.c
+++ b/drivers/soc/sifive/sifive_l2_cache.c
@@ -9,6 +9,8 @@
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/device.h>
+#include <asm/cacheinfo.h>
#include <soc/sifive/sifive_l2_cache.h>
#define SIFIVE_L2_DIRECCFIX_LOW 0x100
@@ -31,6 +33,7 @@
static void __iomem *l2_base;
static int g_irq[SIFIVE_L2_MAX_ECCINTR];
+static struct riscv_cacheinfo_ops l2_cache_ops;
enum {
DIR_CORR = 0,
@@ -48,7 +51,7 @@ static ssize_t l2_write(struct file *file, const char __user *data,
if (kstrtouint_from_user(data, count, 0, &val))
return -EINVAL;
- if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+ if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
else
return -EINVAL;
@@ -107,6 +110,38 @@ int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
+static int l2_largest_wayenabled(void)
+{
+ return readl(l2_base + SIFIVE_L2_WAYENABLE) & 0xFF;
+}
+
+static ssize_t number_of_ways_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", l2_largest_wayenabled());
+}
+
+static DEVICE_ATTR_RO(number_of_ways_enabled);
+
+static struct attribute *priv_attrs[] = {
+ &dev_attr_number_of_ways_enabled.attr,
+ NULL,
+};
+
+static const struct attribute_group priv_attr_group = {
+ .attrs = priv_attrs,
+};
+
+static const struct attribute_group *l2_get_priv_group(struct cacheinfo *this_leaf)
+{
+ /* We want to use private group for L2 cache only */
+ if (this_leaf->level == 2)
+ return &priv_attr_group;
+ else
+ return NULL;
+}
+
static irqreturn_t l2_int_handler(int irq, void *device)
{
unsigned int add_h, add_l;
@@ -170,6 +205,9 @@ static int __init sifive_l2_init(void)
l2_config_read();
+ l2_cache_ops.get_priv_group = l2_get_priv_group;
+ riscv_set_cacheinfo_ops(&l2_cache_ops);
+
#ifdef CONFIG_DEBUG_FS
setup_sifive_debug();
#endif
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 3693532949b8..6bc603d0b9d9 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -133,6 +133,7 @@ config SOC_TEGRA_FLOWCTRL
config SOC_TEGRA_PMC
bool
+ select GENERIC_PINCONF
config SOC_TEGRA_POWERGATE_BPMP
def_bool y
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 802717b9f6a3..d1f8dd0289e6 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -300,6 +300,59 @@ static void tegra_enable_fuse_clk(void __iomem *base)
writel(reg, base + 0x14);
}
+static ssize_t major_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tegra_get_major_rev());
+}
+
+static DEVICE_ATTR_RO(major);
+
+static ssize_t minor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tegra_get_minor_rev());
+}
+
+static DEVICE_ATTR_RO(minor);
+
+static struct attribute *tegra_soc_attr[] = {
+ &dev_attr_major.attr,
+ &dev_attr_minor.attr,
+ NULL,
+};
+
+const struct attribute_group tegra_soc_attr_group = {
+ .attrs = tegra_soc_attr,
+};
+
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+static ssize_t platform_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ /*
+ * Displays the value in the 'pre_si_platform' field of the HIDREV
+ * register for Tegra194 devices. A value of 0 indicates that the
+ * platform type is silicon and all other non-zero values indicate
+ * the type of simulation platform is being used.
+ */
+ return sprintf(buf, "%d\n", (tegra_read_chipid() >> 20) & 0xf);
+}
+
+static DEVICE_ATTR_RO(platform);
+
+static struct attribute *tegra194_soc_attr[] = {
+ &dev_attr_major.attr,
+ &dev_attr_minor.attr,
+ &dev_attr_platform.attr,
+ NULL,
+};
+
+const struct attribute_group tegra194_soc_attr_group = {
+ .attrs = tegra194_soc_attr,
+};
+#endif
+
struct device * __init tegra_soc_device_register(void)
{
struct soc_device_attribute *attr;
@@ -310,8 +363,10 @@ struct device * __init tegra_soc_device_register(void)
return NULL;
attr->family = kasprintf(GFP_KERNEL, "Tegra");
- attr->revision = kasprintf(GFP_KERNEL, "%d", tegra_sku_info.revision);
+ attr->revision = kasprintf(GFP_KERNEL, "%s",
+ tegra_revision_name[tegra_sku_info.revision]);
attr->soc_id = kasprintf(GFP_KERNEL, "%u", tegra_get_chip_id());
+ attr->custom_attr_group = fuse->soc->soc_attr_group;
dev = soc_device_register(attr);
if (IS_ERR(dev)) {
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
index d4aef9c4a94c..16aaa28573ac 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -164,4 +164,5 @@ const struct tegra_fuse_soc tegra20_fuse_soc = {
.speedo_init = tegra20_init_speedo_data,
.probe = tegra20_fuse_probe,
.info = &tegra20_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
};
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index e6037f900fb7..85accef41fa1 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -111,6 +111,7 @@ const struct tegra_fuse_soc tegra30_fuse_soc = {
.init = tegra30_fuse_init,
.speedo_init = tegra30_init_speedo_data,
.info = &tegra30_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -125,6 +126,7 @@ const struct tegra_fuse_soc tegra114_fuse_soc = {
.init = tegra30_fuse_init,
.speedo_init = tegra114_init_speedo_data,
.info = &tegra114_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -205,6 +207,7 @@ const struct tegra_fuse_soc tegra124_fuse_soc = {
.info = &tegra124_fuse_info,
.lookups = tegra124_fuse_lookups,
.num_lookups = ARRAY_SIZE(tegra124_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -290,6 +293,7 @@ const struct tegra_fuse_soc tegra210_fuse_soc = {
.info = &tegra210_fuse_info,
.lookups = tegra210_fuse_lookups,
.num_lookups = ARRAY_SIZE(tegra210_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -319,6 +323,7 @@ const struct tegra_fuse_soc tegra186_fuse_soc = {
.info = &tegra186_fuse_info,
.lookups = tegra186_fuse_lookups,
.num_lookups = ARRAY_SIZE(tegra186_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -348,5 +353,6 @@ const struct tegra_fuse_soc tegra194_fuse_soc = {
.info = &tegra194_fuse_info,
.lookups = tegra194_fuse_lookups,
.num_lookups = ARRAY_SIZE(tegra194_fuse_lookups),
+ .soc_attr_group = &tegra194_soc_attr_group,
};
#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 94a059e577a1..9d4fc315a007 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -32,6 +32,8 @@ struct tegra_fuse_soc {
const struct nvmem_cell_lookup *lookups;
unsigned int num_lookups;
+
+ const struct attribute_group *soc_attr_group;
};
struct tegra_fuse {
@@ -64,6 +66,11 @@ void tegra_init_apbmisc(void);
bool __init tegra_fuse_read_spare(unsigned int spare);
u32 __init tegra_fuse_read_early(unsigned int offset);
+u8 tegra_get_major_rev(void);
+u8 tegra_get_minor_rev(void);
+
+extern const struct attribute_group tegra_soc_attr_group;
+
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
void tegra20_init_speedo_data(struct tegra_sku_info *sku_info);
#endif
@@ -110,6 +117,7 @@ extern const struct tegra_fuse_soc tegra186_fuse_soc;
#ifdef CONFIG_ARCH_TEGRA_194_SOC
extern const struct tegra_fuse_soc tegra194_fuse_soc;
+extern const struct attribute_group tegra194_soc_attr_group;
#endif
#endif
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 089d9340564b..3cdd69d1bd4d 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -37,6 +37,16 @@ u8 tegra_get_chip_id(void)
return (tegra_read_chipid() >> 8) & 0xff;
}
+u8 tegra_get_major_rev(void)
+{
+ return (tegra_read_chipid() >> 4) & 0xf;
+}
+
+u8 tegra_get_minor_rev(void)
+{
+ return (tegra_read_chipid() >> 16) & 0xf;
+}
+
u32 tegra_read_straps(void)
{
WARN(!chipid, "Tegra ABP MISC not yet available\n");
@@ -65,36 +75,32 @@ static const struct of_device_id apbmisc_match[] __initconst = {
void __init tegra_init_revision(void)
{
- u32 id, chip_id, minor_rev;
- int rev;
+ u8 chip_id, minor_rev;
- id = tegra_read_chipid();
- chip_id = (id >> 8) & 0xff;
- minor_rev = (id >> 16) & 0xf;
+ chip_id = tegra_get_chip_id();
+ minor_rev = tegra_get_minor_rev();
switch (minor_rev) {
case 1:
- rev = TEGRA_REVISION_A01;
+ tegra_sku_info.revision = TEGRA_REVISION_A01;
break;
case 2:
- rev = TEGRA_REVISION_A02;
+ tegra_sku_info.revision = TEGRA_REVISION_A02;
break;
case 3:
if (chip_id == TEGRA20 && (tegra_fuse_read_spare(18) ||
tegra_fuse_read_spare(19)))
- rev = TEGRA_REVISION_A03p;
+ tegra_sku_info.revision = TEGRA_REVISION_A03p;
else
- rev = TEGRA_REVISION_A03;
+ tegra_sku_info.revision = TEGRA_REVISION_A03;
break;
case 4:
- rev = TEGRA_REVISION_A04;
+ tegra_sku_info.revision = TEGRA_REVISION_A04;
break;
default:
- rev = TEGRA_REVISION_UNKNOWN;
+ tegra_sku_info.revision = TEGRA_REVISION_UNKNOWN;
}
- tegra_sku_info.revision = rev;
-
tegra_sku_info.sku_id = tegra_fuse_read_early(FUSE_SKU_INFO);
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 1c533a969f54..42cf37a0556b 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -3063,6 +3063,7 @@ static const struct pinctrl_pin_desc tegra210_pin_descs[] = {
static const struct tegra_wake_event tegra210_wake_events[] = {
TEGRA_WAKE_IRQ("rtc", 16, 2),
+ TEGRA_WAKE_IRQ("pmu", 51, 86),
};
static const struct tegra_pmc_soc tegra210_pmc_soc = {
@@ -3193,6 +3194,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
}
static const struct tegra_wake_event tegra186_wake_events[] = {
+ TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
};
@@ -3325,6 +3327,7 @@ static const char * const tegra194_reset_sources[] = {
};
static const struct tegra_wake_event tegra194_wake_events[] = {
+ TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
};
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index 4486e055794c..e192fb788836 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -91,6 +91,16 @@ config TI_K3_RINGACC
and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
If unsure, say N.
+config TI_K3_SOCINFO
+ bool
+ depends on ARCH_K3 || COMPILE_TEST
+ select SOC_BUS
+ select MFD_SYSCON
+ help
+ Include support for the SoC bus socinfo for the TI K3 Multicore SoC
+ platforms to provide information about the SoC family and
+ variant to user space.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index bec827937a5f..1110e5c98685 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
+obj-$(CONFIG_TI_K3_SOCINFO) += k3-socinfo.o
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
new file mode 100644
index 000000000000..af0ba5288e58
--- /dev/null
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 SoC info driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+#define CTRLMMR_WKUP_JTAGID_REG 0
+/*
+ * Bits:
+ * 31-28 VARIANT Device variant
+ * 27-12 PARTNO Part number
+ * 11-1 MFG Indicates TI as manufacturer (0x17)
+ * 1 Always 1
+ */
+#define CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT (28)
+#define CTRLMMR_WKUP_JTAGID_VARIANT_MASK GENMASK(31, 28)
+
+#define CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT (12)
+#define CTRLMMR_WKUP_JTAGID_PARTNO_MASK GENMASK(27, 12)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_SHIFT (1)
+#define CTRLMMR_WKUP_JTAGID_MFG_MASK GENMASK(11, 1)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_TI 0x17
+
+static const struct k3_soc_id {
+ unsigned int id;
+ const char *family_name;
+} k3_soc_ids[] = {
+ { 0xBB5A, "AM65X" },
+ { 0xBB64, "J721E" },
+};
+
+static int
+k3_chipinfo_partno_to_names(unsigned int partno,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(k3_soc_ids); i++)
+ if (partno == k3_soc_ids[i].id) {
+ soc_dev_attr->family = k3_soc_ids[i].family_name;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int k3_chipinfo_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device *dev = &pdev->dev;
+ struct soc_device *soc_dev;
+ struct regmap *regmap;
+ u32 partno_id;
+ u32 variant;
+ u32 jtag_id;
+ u32 mfg;
+ int ret;
+
+ regmap = device_node_to_regmap(node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = regmap_read(regmap, CTRLMMR_WKUP_JTAGID_REG, &jtag_id);
+ if (ret < 0)
+ return ret;
+
+ mfg = (jtag_id & CTRLMMR_WKUP_JTAGID_MFG_MASK) >>
+ CTRLMMR_WKUP_JTAGID_MFG_SHIFT;
+
+ if (mfg != CTRLMMR_WKUP_JTAGID_MFG_TI) {
+ dev_err(dev, "Invalid MFG SoC\n");
+ return -ENODEV;
+ }
+
+ variant = (jtag_id & CTRLMMR_WKUP_JTAGID_VARIANT_MASK) >>
+ CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT;
+ variant++;
+
+ partno_id = (jtag_id & CTRLMMR_WKUP_JTAGID_PARTNO_MASK) >>
+ CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0", variant);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr);
+ if (ret) {
+ dev_err(dev, "Unknown SoC JTAGID[0x%08X]\n", jtag_id);
+ ret = -ENODEV;
+ goto err_free_rev;
+ }
+
+ node = of_find_node_by_path("/");
+ of_property_read_string(node, "model", &soc_dev_attr->machine);
+ of_node_put(node);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err_free_rev;
+ }
+
+ dev_info(dev, "Family:%s rev:%s JTAGID[0x%08x] Detected\n",
+ soc_dev_attr->family,
+ soc_dev_attr->revision, jtag_id);
+
+ return 0;
+
+err_free_rev:
+ kfree(soc_dev_attr->revision);
+err:
+ kfree(soc_dev_attr);
+ return ret;
+}
+
+static const struct of_device_id k3_chipinfo_of_match[] = {
+ { .compatible = "ti,am654-chipid", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver k3_chipinfo_driver = {
+ .driver = {
+ .name = "k3-chipinfo",
+ .of_match_table = k3_chipinfo_of_match,
+ },
+ .probe = k3_chipinfo_probe,
+};
+
+static int __init k3_chipinfo_init(void)
+{
+ return platform_driver_register(&k3_chipinfo_driver);
+}
+subsys_initcall(k3_chipinfo_init);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 37f3db6c041c..aa071d96ef36 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -409,7 +409,7 @@ static int knav_gp_close_queue(struct knav_range_info *range,
return 0;
}
-struct knav_range_ops knav_gp_range_ops = {
+static struct knav_range_ops knav_gp_range_ops = {
.set_notify = knav_gp_set_notify,
.open_queue = knav_gp_open_queue,
.close_queue = knav_gp_close_queue,
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 23d90cb12ba9..226d343f0a6a 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -23,8 +23,6 @@
/* Flag stating if PM nodes mapped to the PM domain has been requested */
#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
-static const struct zynqmp_eemi_ops *eemi_ops;
-
static int min_capability;
/**
@@ -76,11 +74,8 @@ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->set_requirement)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
- ret = eemi_ops->set_requirement(pd->node_id,
+ ret = zynqmp_pm_set_requirement(pd->node_id,
ZYNQMP_PM_CAPABILITY_ACCESS,
ZYNQMP_PM_MAX_QOS,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
@@ -111,9 +106,6 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
u32 capabilities = min_capability;
bool may_wakeup;
- if (!eemi_ops->set_requirement)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If domain is already released there is nothing to be done */
@@ -134,7 +126,7 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
}
}
- ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0,
+ ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0,
ZYNQMP_PM_REQUEST_ACK_NO);
/**
* If powering down of any node inside this domain fails,
@@ -163,16 +155,13 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->request_node)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the first device to attach there is nothing to do */
if (domain->device_count)
return 0;
- ret = eemi_ops->request_node(pd->node_id, 0, 0,
+ ret = zynqmp_pm_request_node(pd->node_id, 0, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
/* If requesting a node fails print and return the error */
if (ret) {
@@ -199,16 +188,13 @@ static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->release_node)
- return;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the last device to detach there is nothing to do */
if (domain->device_count)
return;
- ret = eemi_ops->release_node(pd->node_id);
+ ret = zynqmp_pm_release_node(pd->node_id);
/* If releasing a node fails print the error and return */
if (ret) {
pr_err("%s() %s release failed for node %d: %d\n",
@@ -266,10 +252,6 @@ static int zynqmp_gpd_probe(struct platform_device *pdev)
struct zynqmp_pm_domain *pd;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 09227895d216..31ff49fcd078 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -30,7 +30,6 @@ struct zynqmp_pm_work_struct {
static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
static struct mbox_chan *rx_chan;
-static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -155,9 +154,6 @@ static ssize_t suspend_mode_store(struct device *dev,
{
int md, ret = -EINVAL;
- if (!eemi_ops->set_suspend_mode)
- return ret;
-
for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
if (suspend_modes[md] &&
sysfs_streq(suspend_modes[md], buf)) {
@@ -166,7 +162,7 @@ static ssize_t suspend_mode_store(struct device *dev,
}
if (!ret && md != suspend_mode) {
- ret = eemi_ops->set_suspend_mode(md);
+ ret = zynqmp_pm_set_suspend_mode(md);
if (likely(!ret))
suspend_mode = md;
}
@@ -182,15 +178,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
u32 pm_api_version;
struct mbox_client *client;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
- if (!eemi_ops->get_api_version || !eemi_ops->init_finalize)
- return -ENXIO;
-
- eemi_ops->init_finalize();
- eemi_ops->get_api_version(&pm_api_version);
+ zynqmp_pm_init_finalize();
+ zynqmp_pm_get_api_version(&pm_api_version);
/* Check PM API version number */
if (pm_api_version < ZYNQMP_PM_VERSION)
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index e2cdff990e9f..b5871612613b 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -4,7 +4,8 @@
#
#Bus Objs
-soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
+soundwire-bus-objs := bus_type.o bus.o master.o slave.o mipi_disco.o stream.o \
+ sysfs_slave.o sysfs_slave_dpn.o
obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
ifdef CONFIG_DEBUG_FS
@@ -16,12 +17,9 @@ soundwire-cadence-objs := cadence_master.o
obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
#Intel driver
-soundwire-intel-objs := intel.o
+soundwire-intel-objs := intel.o intel_init.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
-soundwire-intel-init-objs := intel_init.o
-obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel-init.o
-
#Qualcomm driver
soundwire-qcom-objs := qcom.o
obj-$(CONFIG_SOUNDWIRE_QCOM) += soundwire-qcom.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 488c3c9e4947..24ba77226376 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -8,24 +8,54 @@
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include "bus.h"
+#include "sysfs_local.h"
+
+static DEFINE_IDA(sdw_ida);
+
+static int sdw_get_id(struct sdw_bus *bus)
+{
+ int rc = ida_alloc(&sdw_ida, GFP_KERNEL);
+
+ if (rc < 0)
+ return rc;
+
+ bus->id = rc;
+ return 0;
+}
/**
- * sdw_add_bus_master() - add a bus Master instance
+ * sdw_bus_master_add() - add a bus Master instance
* @bus: bus instance
+ * @parent: parent device
+ * @fwnode: firmware node handle
*
* Initializes the bus instance, read properties and create child
* devices.
*/
-int sdw_add_bus_master(struct sdw_bus *bus)
+int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
+ struct fwnode_handle *fwnode)
{
struct sdw_master_prop *prop = NULL;
int ret;
- if (!bus->dev) {
- pr_err("SoundWire bus has no device\n");
+ if (!parent) {
+ pr_err("SoundWire parent device is not set\n");
return -ENODEV;
}
+ ret = sdw_get_id(bus);
+ if (ret) {
+ dev_err(parent, "Failed to get bus id\n");
+ return ret;
+ }
+
+ ret = sdw_master_device_add(bus, parent, fwnode);
+ if (ret) {
+ dev_err(parent, "Failed to add master device at link %d\n",
+ bus->link_id);
+ return ret;
+ }
+
if (!bus->ops) {
dev_err(bus->dev, "SoundWire Bus ops are not set\n");
return -EINVAL;
@@ -107,7 +137,7 @@ int sdw_add_bus_master(struct sdw_bus *bus)
return 0;
}
-EXPORT_SYMBOL(sdw_add_bus_master);
+EXPORT_SYMBOL(sdw_bus_master_add);
static int sdw_delete_slave(struct device *dev, void *data)
{
@@ -131,18 +161,20 @@ static int sdw_delete_slave(struct device *dev, void *data)
}
/**
- * sdw_delete_bus_master() - delete the bus master instance
+ * sdw_bus_master_delete() - delete the bus master instance
* @bus: bus to be deleted
*
* Remove the instance, delete the child devices.
*/
-void sdw_delete_bus_master(struct sdw_bus *bus)
+void sdw_bus_master_delete(struct sdw_bus *bus)
{
device_for_each_child(bus->dev, NULL, sdw_delete_slave);
+ sdw_master_device_del(bus);
sdw_bus_debugfs_exit(bus);
+ ida_free(&sdw_ida, bus->id);
}
-EXPORT_SYMBOL(sdw_delete_bus_master);
+EXPORT_SYMBOL(sdw_bus_master_delete);
/*
* SDW IO Calls
@@ -284,9 +316,10 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
msg->flags = flags;
msg->buf = buf;
- if (addr < SDW_REG_NO_PAGE) { /* no paging area */
+ if (addr < SDW_REG_NO_PAGE) /* no paging area */
return 0;
- } else if (addr >= SDW_REG_MAX) { /* illegal addr */
+
+ if (addr >= SDW_REG_MAX) { /* illegal addr */
pr_err("SDW: Invalid address %x passed\n", addr);
return -EINVAL;
}
@@ -306,7 +339,9 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
if (!slave) {
pr_err("SDW: No slave for paging addr\n");
return -EINVAL;
- } else if (!slave->prop.paging_support) {
+ }
+
+ if (!slave->prop.paging_support) {
dev_err(&slave->dev,
"address %x needs paging but no support\n", addr);
return -EINVAL;
@@ -375,8 +410,8 @@ sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
ret = sdw_transfer(bus, &msg);
if (ret < 0)
return ret;
- else
- return buf;
+
+ return buf;
}
static int
@@ -471,8 +506,8 @@ int sdw_read(struct sdw_slave *slave, u32 addr)
ret = sdw_nread(slave, addr, 1, &buf);
if (ret < 0)
return ret;
- else
- return buf;
+
+ return buf;
}
EXPORT_SYMBOL(sdw_read);
@@ -563,9 +598,9 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
}
if (!new_device)
- dev_info(slave->bus->dev,
- "Slave already registered, reusing dev_num:%d\n",
- slave->dev_num);
+ dev_dbg(slave->bus->dev,
+ "Slave already registered, reusing dev_num:%d\n",
+ slave->dev_num);
/* Clear the slave->dev_num to transfer message on device 0 */
dev_num = slave->dev_num;
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index 204204a26db8..82484f741168 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -19,6 +19,9 @@ static inline int sdw_acpi_find_slaves(struct sdw_bus *bus)
int sdw_of_find_slaves(struct sdw_bus *bus);
void sdw_extract_slave_id(struct sdw_bus *bus,
u64 addr, struct sdw_slave_id *id);
+int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
+ struct fwnode_handle *fwnode);
+int sdw_master_device_del(struct sdw_bus *bus);
#ifdef CONFIG_DEBUG_FS
void sdw_bus_debugfs_init(struct sdw_bus *bus);
@@ -172,5 +175,6 @@ sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
#define SDW_UNATTACH_REQUEST_MASTER_RESET BIT(0)
void sdw_clear_slave_status(struct sdw_bus *bus, u32 request);
+int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
#endif /* __SDW_BUS_H */
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 17f096dd6806..de9a671802b8 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -7,6 +7,7 @@
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_type.h>
#include "bus.h"
+#include "sysfs_local.h"
/**
* sdw_get_device_id - find the matching SoundWire device id
@@ -33,10 +34,17 @@ sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv)
static int sdw_bus_match(struct device *dev, struct device_driver *ddrv)
{
- struct sdw_slave *slave = dev_to_sdw_dev(dev);
- struct sdw_driver *drv = drv_to_sdw_driver(ddrv);
+ struct sdw_slave *slave;
+ struct sdw_driver *drv;
+ int ret = 0;
+
+ if (is_sdw_slave(dev)) {
+ slave = dev_to_sdw_dev(dev);
+ drv = drv_to_sdw_driver(ddrv);
- return !!sdw_get_device_id(slave, drv);
+ ret = !!sdw_get_device_id(slave, drv);
+ }
+ return ret;
}
int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size)
@@ -47,7 +55,7 @@ int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size)
slave->id.mfg_id, slave->id.part_id);
}
-static int sdw_uevent(struct device *dev, struct kobj_uevent_env *env)
+int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct sdw_slave *slave = dev_to_sdw_dev(dev);
char modalias[32];
@@ -63,7 +71,6 @@ static int sdw_uevent(struct device *dev, struct kobj_uevent_env *env)
struct bus_type sdw_bus_type = {
.name = "soundwire",
.match = sdw_bus_match,
- .uevent = sdw_uevent,
};
EXPORT_SYMBOL_GPL(sdw_bus_type);
@@ -98,6 +105,11 @@ static int sdw_drv_probe(struct device *dev)
if (slave->ops && slave->ops->read_prop)
slave->ops->read_prop(slave);
+ /* init the sysfs as we have properties now */
+ ret = sdw_slave_sysfs_init(slave);
+ if (ret < 0)
+ dev_warn(dev, "Slave sysfs init failed:%d\n", ret);
+
/*
* Check for valid clk_stop_timeout, use DisCo worst case value of
* 300ms
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index ecd357d1c63d..9ea87538b9ef 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -407,7 +407,9 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
if (nack) {
dev_err_ratelimited(cdns->dev, "Msg NACKed for Slave %d\n", msg->dev_num);
return SDW_CMD_FAIL;
- } else if (no_ack) {
+ }
+
+ if (no_ack) {
dev_dbg_ratelimited(cdns->dev, "Msg ignored for Slave %d\n", msg->dev_num);
return SDW_CMD_IGNORED;
}
@@ -520,7 +522,9 @@ cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg)
dev_err_ratelimited(cdns->dev,
"SCP_addrpage NACKed for Slave %d\n", msg->dev_num);
return SDW_CMD_FAIL;
- } else if (no_ack) {
+ }
+
+ if (no_ack) {
dev_dbg_ratelimited(cdns->dev,
"SCP_addrpage ignored for Slave %d\n", msg->dev_num);
return SDW_CMD_IGNORED;
diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
index fb1140e82b86..b6cad0d59b7b 100644
--- a/drivers/soundwire/debugfs.c
+++ b/drivers/soundwire/debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2017-2019 Intel Corporation.
#include <linux/device.h>
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 3c83e76c6bf9..4cfdd074e310 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -669,11 +669,11 @@ static int sdw_stream_setup(struct snd_pcm_substream *substream,
/* Set stream pointer on all CODEC DAIs */
for (i = 0; i < rtd->num_codecs; i++) {
- ret = snd_soc_dai_set_sdw_stream(rtd->codec_dais[i], sdw_stream,
+ ret = snd_soc_dai_set_sdw_stream(asoc_rtd_to_codec(rtd, i), sdw_stream,
substream->stream);
if (ret < 0) {
dev_err(dai->dev, "failed to set stream pointer on codec dai %s",
- rtd->codec_dais[i]->name);
+ asoc_rtd_to_codec(rtd, i)->name);
goto release_stream;
}
}
@@ -1099,7 +1099,6 @@ static int intel_probe(struct platform_device *pdev)
sdw->cdns.registers = sdw->link_res->registers;
sdw->cdns.instance = sdw->instance;
sdw->cdns.msg_count = 0;
- sdw->cdns.bus.dev = &pdev->dev;
sdw->cdns.bus.link_id = pdev->id;
sdw_cdns_probe(&sdw->cdns);
@@ -1110,9 +1109,9 @@ static int intel_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdw);
- ret = sdw_add_bus_master(&sdw->cdns.bus);
+ ret = sdw_bus_master_add(&sdw->cdns.bus, &pdev->dev, pdev->dev.fwnode);
if (ret) {
- dev_err(&pdev->dev, "sdw_add_bus_master fail: %d\n", ret);
+ dev_err(&pdev->dev, "sdw_bus_master_add fail: %d\n", ret);
return ret;
}
@@ -1173,7 +1172,7 @@ err_interrupt:
sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->link_res->irq, sdw);
err_init:
- sdw_delete_bus_master(&sdw->cdns.bus);
+ sdw_bus_master_delete(&sdw->cdns.bus);
return ret;
}
@@ -1189,7 +1188,7 @@ static int intel_remove(struct platform_device *pdev)
free_irq(sdw->link_res->irq, sdw);
snd_soc_unregister_component(sdw->cdns.dev);
}
- sdw_delete_bus_master(&sdw->cdns.bus);
+ sdw_bus_master_delete(&sdw->cdns.bus);
return 0;
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 4b769409f6f8..d5d42795a48f 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -86,7 +86,9 @@ static struct sdw_intel_ctx
dev_err(&adev->dev, "Link count %d exceeds max %d\n",
count, SDW_MAX_LINKS);
return NULL;
- } else if (!count) {
+ }
+
+ if (!count) {
dev_warn(&adev->dev, "No SoundWire links detected\n");
return NULL;
}
diff --git a/drivers/soundwire/master.c b/drivers/soundwire/master.c
new file mode 100644
index 000000000000..5f0b2189defe
--- /dev/null
+++ b/drivers/soundwire/master.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2019-2020 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/acpi.h>
+#include <linux/pm_runtime.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include "bus.h"
+
+/*
+ * The sysfs for properties reflects the MIPI description as given
+ * in the MIPI DisCo spec
+ *
+ * Base file is:
+ * sdw-master-N
+ * |---- revision
+ * |---- clk_stop_modes
+ * |---- max_clk_freq
+ * |---- clk_freq
+ * |---- clk_gears
+ * |---- default_row
+ * |---- default_col
+ * |---- dynamic_shape
+ * |---- err_threshold
+ */
+
+#define sdw_master_attr(field, format_string) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_master_device *md = dev_to_sdw_master_device(dev); \
+ return sprintf(buf, format_string, md->bus->prop.field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+sdw_master_attr(revision, "0x%x\n");
+sdw_master_attr(clk_stop_modes, "0x%x\n");
+sdw_master_attr(max_clk_freq, "%d\n");
+sdw_master_attr(default_row, "%d\n");
+sdw_master_attr(default_col, "%d\n");
+sdw_master_attr(default_frame_rate, "%d\n");
+sdw_master_attr(dynamic_frame, "%d\n");
+sdw_master_attr(err_threshold, "%d\n");
+
+static ssize_t clock_frequencies_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_master_device *md = dev_to_sdw_master_device(dev);
+ ssize_t size = 0;
+ int i;
+
+ for (i = 0; i < md->bus->prop.num_clk_freq; i++)
+ size += sprintf(buf + size, "%8d ",
+ md->bus->prop.clk_freq[i]);
+ size += sprintf(buf + size, "\n");
+
+ return size;
+}
+static DEVICE_ATTR_RO(clock_frequencies);
+
+static ssize_t clock_gears_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_master_device *md = dev_to_sdw_master_device(dev);
+ ssize_t size = 0;
+ int i;
+
+ for (i = 0; i < md->bus->prop.num_clk_gears; i++)
+ size += sprintf(buf + size, "%8d ",
+ md->bus->prop.clk_gears[i]);
+ size += sprintf(buf + size, "\n");
+
+ return size;
+}
+static DEVICE_ATTR_RO(clock_gears);
+
+static struct attribute *master_node_attrs[] = {
+ &dev_attr_revision.attr,
+ &dev_attr_clk_stop_modes.attr,
+ &dev_attr_max_clk_freq.attr,
+ &dev_attr_default_row.attr,
+ &dev_attr_default_col.attr,
+ &dev_attr_default_frame_rate.attr,
+ &dev_attr_dynamic_frame.attr,
+ &dev_attr_err_threshold.attr,
+ &dev_attr_clock_frequencies.attr,
+ &dev_attr_clock_gears.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(master_node);
+
+static void sdw_master_device_release(struct device *dev)
+{
+ struct sdw_master_device *md = dev_to_sdw_master_device(dev);
+
+ kfree(md);
+}
+
+static const struct dev_pm_ops master_dev_pm = {
+ SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend,
+ pm_generic_runtime_resume, NULL)
+};
+
+struct device_type sdw_master_type = {
+ .name = "soundwire_master",
+ .release = sdw_master_device_release,
+ .pm = &master_dev_pm,
+};
+
+/**
+ * sdw_master_device_add() - create a Linux Master Device representation.
+ * @bus: SDW bus instance
+ * @parent: parent device
+ * @fwnode: firmware node handle
+ */
+int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
+ struct fwnode_handle *fwnode)
+{
+ struct sdw_master_device *md;
+ int ret;
+
+ if (!parent)
+ return -EINVAL;
+
+ md = kzalloc(sizeof(*md), GFP_KERNEL);
+ if (!md)
+ return -ENOMEM;
+
+ md->dev.bus = &sdw_bus_type;
+ md->dev.type = &sdw_master_type;
+ md->dev.parent = parent;
+ md->dev.groups = master_node_groups;
+ md->dev.of_node = parent->of_node;
+ md->dev.fwnode = fwnode;
+ md->dev.dma_mask = parent->dma_mask;
+
+ dev_set_name(&md->dev, "sdw-master-%d", bus->id);
+
+ ret = device_register(&md->dev);
+ if (ret) {
+ dev_err(parent, "Failed to add master: ret %d\n", ret);
+ /*
+ * On err, don't free but drop ref as this will be freed
+ * when release method is invoked.
+ */
+ put_device(&md->dev);
+ goto device_register_err;
+ }
+
+ /* add shortcuts to improve code readability/compactness */
+ md->bus = bus;
+ bus->dev = &md->dev;
+ bus->md = md;
+
+device_register_err:
+ return ret;
+}
+
+/**
+ * sdw_master_device_del() - delete a Linux Master Device representation.
+ * @bus: bus handle
+ *
+ * This function is the dual of sdw_master_device_add()
+ */
+int sdw_master_device_del(struct sdw_bus *bus)
+{
+ device_unregister(bus->dev);
+
+ return 0;
+}
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index 844e6b22974f..4ae62b452b8c 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -231,16 +231,17 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
nval = fwnode_property_count_u32(node, "mipi-sdw-channel-number-list");
if (nval > 0) {
- dpn[i].num_ch = nval;
- dpn[i].ch = devm_kcalloc(&slave->dev, dpn[i].num_ch,
- sizeof(*dpn[i].ch),
+ dpn[i].num_channels = nval;
+ dpn[i].channels = devm_kcalloc(&slave->dev,
+ dpn[i].num_channels,
+ sizeof(*dpn[i].channels),
GFP_KERNEL);
- if (!dpn[i].ch)
+ if (!dpn[i].channels)
return -ENOMEM;
fwnode_property_read_u32_array(node,
"mipi-sdw-channel-number-list",
- dpn[i].ch, dpn[i].num_ch);
+ dpn[i].channels, dpn[i].num_channels);
}
nval = fwnode_property_count_u32(node, "mipi-sdw-channel-combination-list");
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index d6c9ad231873..a1c2a44a3b4d 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -765,12 +765,16 @@ static int qcom_swrm_probe(struct platform_device *pdev)
}
ctrl->irq = of_irq_get(dev->of_node, 0);
- if (ctrl->irq < 0)
- return ctrl->irq;
+ if (ctrl->irq < 0) {
+ ret = ctrl->irq;
+ goto err_init;
+ }
ctrl->hclk = devm_clk_get(dev, "iface");
- if (IS_ERR(ctrl->hclk))
- return PTR_ERR(ctrl->hclk);
+ if (IS_ERR(ctrl->hclk)) {
+ ret = PTR_ERR(ctrl->hclk);
+ goto err_init;
+ }
clk_prepare_enable(ctrl->hclk);
@@ -780,14 +784,13 @@ static int qcom_swrm_probe(struct platform_device *pdev)
mutex_init(&ctrl->port_lock);
INIT_WORK(&ctrl->slave_work, qcom_swrm_slave_wq);
- ctrl->bus.dev = dev;
ctrl->bus.ops = &qcom_swrm_ops;
ctrl->bus.port_ops = &qcom_swrm_port_ops;
ctrl->bus.compute_params = &qcom_swrm_compute_params;
ret = qcom_swrm_get_port_config(ctrl);
if (ret)
- return ret;
+ goto err_clk;
params = &ctrl->bus.params;
params->max_dr_freq = DEFAULT_CLK_FREQ;
@@ -810,32 +813,37 @@ static int qcom_swrm_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, ctrl->irq, NULL,
qcom_swrm_irq_handler,
- IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
"soundwire", ctrl);
if (ret) {
dev_err(dev, "Failed to request soundwire irq\n");
- goto err;
+ goto err_clk;
}
- ret = sdw_add_bus_master(&ctrl->bus);
+ ret = sdw_bus_master_add(&ctrl->bus, dev, dev->fwnode);
if (ret) {
dev_err(dev, "Failed to register Soundwire controller (%d)\n",
ret);
- goto err;
+ goto err_clk;
}
qcom_swrm_init(ctrl);
ret = qcom_swrm_register_dais(ctrl);
if (ret)
- goto err;
+ goto err_master_add;
dev_info(dev, "Qualcomm Soundwire controller v%x.%x.%x Registered\n",
(ctrl->version >> 24) & 0xff, (ctrl->version >> 16) & 0xff,
ctrl->version & 0xffff);
return 0;
-err:
+
+err_master_add:
+ sdw_bus_master_delete(&ctrl->bus);
+err_clk:
clk_disable_unprepare(ctrl->hclk);
+err_init:
return ret;
}
@@ -843,7 +851,7 @@ static int qcom_swrm_remove(struct platform_device *pdev)
{
struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(&pdev->dev);
- sdw_delete_bus_master(&ctrl->bus);
+ sdw_bus_master_delete(&ctrl->bus);
clk_disable_unprepare(ctrl->hclk);
return 0;
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index aace57fae7f8..0839445ee07b 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -14,6 +14,12 @@ static void sdw_slave_release(struct device *dev)
kfree(slave);
}
+struct device_type sdw_slave_type = {
+ .name = "sdw_slave",
+ .release = sdw_slave_release,
+ .uevent = sdw_slave_uevent,
+};
+
static int sdw_slave_add(struct sdw_bus *bus,
struct sdw_slave_id *id, struct fwnode_handle *fwnode)
{
@@ -41,9 +47,9 @@ static int sdw_slave_add(struct sdw_bus *bus,
id->class_id, id->unique_id);
}
- slave->dev.release = sdw_slave_release;
slave->dev.bus = &sdw_bus_type;
slave->dev.of_node = of_node_get(to_of_node(fwnode));
+ slave->dev.type = &sdw_slave_type;
slave->bus = bus;
slave->status = SDW_SLAVE_UNATTACHED;
init_completion(&slave->enumeration_complete);
@@ -68,6 +74,8 @@ static int sdw_slave_add(struct sdw_bus *bus,
list_del(&slave->node);
mutex_unlock(&bus->bus_lock);
put_device(&slave->dev);
+
+ return ret;
}
sdw_slave_debugfs_init(slave);
diff --git a/drivers/soundwire/sysfs_local.h b/drivers/soundwire/sysfs_local.h
new file mode 100644
index 000000000000..ff60adee3c41
--- /dev/null
+++ b/drivers/soundwire/sysfs_local.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2015-2020 Intel Corporation. */
+
+#ifndef __SDW_SYSFS_LOCAL_H
+#define __SDW_SYSFS_LOCAL_H
+
+/*
+ * SDW sysfs APIs -
+ */
+
+int sdw_slave_sysfs_init(struct sdw_slave *slave);
+int sdw_slave_sysfs_dpn_init(struct sdw_slave *slave);
+
+#endif /* __SDW_SYSFS_LOCAL_H */
diff --git a/drivers/soundwire/sysfs_slave.c b/drivers/soundwire/sysfs_slave.c
new file mode 100644
index 000000000000..f510071b0add
--- /dev/null
+++ b/drivers/soundwire/sysfs_slave.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2015-2020 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include "bus.h"
+#include "sysfs_local.h"
+
+/*
+ * Slave sysfs
+ */
+
+/*
+ * The sysfs for Slave reflects the MIPI description as given
+ * in the MIPI DisCo spec
+ *
+ * Base file is device
+ * |---- modalias
+ * |---- dev-properties
+ * |---- mipi_revision
+ * |---- wake_capable
+ * |---- test_mode_capable
+ * |---- clk_stop_mode1
+ * |---- simple_clk_stop_capable
+ * |---- clk_stop_timeout
+ * |---- ch_prep_timeout
+ * |---- reset_behave
+ * |---- high_PHY_capable
+ * |---- paging_support
+ * |---- bank_delay_support
+ * |---- p15_behave
+ * |---- master_count
+ * |---- source_ports
+ * |---- sink_ports
+ * |---- dp0
+ * |---- max_word
+ * |---- min_word
+ * |---- words
+ * |---- BRA_flow_controlled
+ * |---- simple_ch_prep_sm
+ * |---- imp_def_interrupts
+ * |---- dpN_<sink/src>
+ * |---- max_word
+ * |---- min_word
+ * |---- words
+ * |---- type
+ * |---- max_grouping
+ * |---- simple_ch_prep_sm
+ * |---- ch_prep_timeout
+ * |---- imp_def_interrupts
+ * |---- min_ch
+ * |---- max_ch
+ * |---- channels
+ * |---- ch_combinations
+ * |---- max_async_buffer
+ * |---- block_pack_mode
+ * |---- port_encoding
+ *
+ */
+
+#define sdw_slave_attr(field, format_string) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_slave *slave = dev_to_sdw_dev(dev); \
+ return sprintf(buf, format_string, slave->prop.field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+sdw_slave_attr(mipi_revision, "0x%x\n");
+sdw_slave_attr(wake_capable, "%d\n");
+sdw_slave_attr(test_mode_capable, "%d\n");
+sdw_slave_attr(clk_stop_mode1, "%d\n");
+sdw_slave_attr(simple_clk_stop_capable, "%d\n");
+sdw_slave_attr(clk_stop_timeout, "%d\n");
+sdw_slave_attr(ch_prep_timeout, "%d\n");
+sdw_slave_attr(reset_behave, "%d\n");
+sdw_slave_attr(high_PHY_capable, "%d\n");
+sdw_slave_attr(paging_support, "%d\n");
+sdw_slave_attr(bank_delay_support, "%d\n");
+sdw_slave_attr(p15_behave, "%d\n");
+sdw_slave_attr(master_count, "%d\n");
+sdw_slave_attr(source_ports, "0x%x\n");
+sdw_slave_attr(sink_ports, "0x%x\n");
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ return sdw_slave_modalias(slave, buf, 256);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *slave_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(slave);
+
+static struct attribute *slave_dev_attrs[] = {
+ &dev_attr_mipi_revision.attr,
+ &dev_attr_wake_capable.attr,
+ &dev_attr_test_mode_capable.attr,
+ &dev_attr_clk_stop_mode1.attr,
+ &dev_attr_simple_clk_stop_capable.attr,
+ &dev_attr_clk_stop_timeout.attr,
+ &dev_attr_ch_prep_timeout.attr,
+ &dev_attr_reset_behave.attr,
+ &dev_attr_high_PHY_capable.attr,
+ &dev_attr_paging_support.attr,
+ &dev_attr_bank_delay_support.attr,
+ &dev_attr_p15_behave.attr,
+ &dev_attr_master_count.attr,
+ &dev_attr_source_ports.attr,
+ &dev_attr_sink_ports.attr,
+ NULL,
+};
+
+/*
+ * we don't use ATTRIBUTES_GROUP here since we want to add a subdirectory
+ * for device-level properties
+ */
+static struct attribute_group sdw_slave_dev_attr_group = {
+ .attrs = slave_dev_attrs,
+ .name = "dev-properties",
+};
+
+/*
+ * DP0 sysfs
+ */
+
+#define sdw_dp0_attr(field, format_string) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_slave *slave = dev_to_sdw_dev(dev); \
+ return sprintf(buf, format_string, slave->prop.dp0_prop->field);\
+} \
+static DEVICE_ATTR_RO(field)
+
+sdw_dp0_attr(max_word, "%d\n");
+sdw_dp0_attr(min_word, "%d\n");
+sdw_dp0_attr(BRA_flow_controlled, "%d\n");
+sdw_dp0_attr(simple_ch_prep_sm, "%d\n");
+sdw_dp0_attr(imp_def_interrupts, "0x%x\n");
+
+static ssize_t words_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ ssize_t size = 0;
+ int i;
+
+ for (i = 0; i < slave->prop.dp0_prop->num_words; i++)
+ size += sprintf(buf + size, "%d ",
+ slave->prop.dp0_prop->words[i]);
+ size += sprintf(buf + size, "\n");
+
+ return size;
+}
+static DEVICE_ATTR_RO(words);
+
+static struct attribute *dp0_attrs[] = {
+ &dev_attr_max_word.attr,
+ &dev_attr_min_word.attr,
+ &dev_attr_words.attr,
+ &dev_attr_BRA_flow_controlled.attr,
+ &dev_attr_simple_ch_prep_sm.attr,
+ &dev_attr_imp_def_interrupts.attr,
+ NULL,
+};
+
+/*
+ * we don't use ATTRIBUTES_GROUP here since we want to add a subdirectory
+ * for dp0-level properties
+ */
+static const struct attribute_group dp0_group = {
+ .attrs = dp0_attrs,
+ .name = "dp0",
+};
+
+int sdw_slave_sysfs_init(struct sdw_slave *slave)
+{
+ int ret;
+
+ ret = devm_device_add_groups(&slave->dev, slave_groups);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_device_add_group(&slave->dev, &sdw_slave_dev_attr_group);
+ if (ret < 0)
+ return ret;
+
+ if (slave->prop.dp0_prop) {
+ ret = devm_device_add_group(&slave->dev, &dp0_group);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (slave->prop.source_ports || slave->prop.sink_ports) {
+ ret = sdw_slave_sysfs_dpn_init(slave);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/soundwire/sysfs_slave_dpn.c b/drivers/soundwire/sysfs_slave_dpn.c
new file mode 100644
index 000000000000..05a721ea9830
--- /dev/null
+++ b/drivers/soundwire/sysfs_slave_dpn.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2015-2020 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include "bus.h"
+#include "sysfs_local.h"
+
+struct dpn_attribute {
+ struct device_attribute dev_attr;
+ int N;
+ int dir;
+ const char *format_string;
+};
+
+/*
+ * Since we can't use ARRAY_SIZE, hard-code number of dpN attributes.
+ * This needs to be updated when adding new attributes - an error will be
+ * flagged on a mismatch.
+ */
+#define SDW_DPN_ATTRIBUTES 15
+
+#define sdw_dpn_attribute_alloc(field) \
+static int field##_attribute_alloc(struct device *dev, \
+ struct attribute **res, \
+ int N, int dir, \
+ const char *format_string) \
+{ \
+ struct dpn_attribute *dpn_attr; \
+ \
+ dpn_attr = devm_kzalloc(dev, sizeof(*dpn_attr), GFP_KERNEL); \
+ if (!dpn_attr) \
+ return -ENOMEM; \
+ dpn_attr->N = N; \
+ dpn_attr->dir = dir; \
+ dpn_attr->format_string = format_string; \
+ dpn_attr->dev_attr.attr.name = __stringify(field); \
+ dpn_attr->dev_attr.attr.mode = 0444; \
+ dpn_attr->dev_attr.show = field##_show; \
+ \
+ *res = &dpn_attr->dev_attr.attr; \
+ \
+ return 0; \
+}
+
+#define sdw_dpn_attr(field) \
+ \
+static ssize_t field##_dpn_show(struct sdw_slave *slave, \
+ int N, \
+ int dir, \
+ const char *format_string, \
+ char *buf) \
+{ \
+ struct sdw_dpn_prop *dpn; \
+ unsigned long mask; \
+ int bit; \
+ int i; \
+ \
+ if (dir) { \
+ dpn = slave->prop.src_dpn_prop; \
+ mask = slave->prop.source_ports; \
+ } else { \
+ dpn = slave->prop.sink_dpn_prop; \
+ mask = slave->prop.sink_ports; \
+ } \
+ \
+ i = 0; \
+ for_each_set_bit(bit, &mask, 32) { \
+ if (bit == N) { \
+ return sprintf(buf, format_string, \
+ dpn[i].field); \
+ } \
+ i++; \
+ } \
+ return -EINVAL; \
+} \
+ \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_slave *slave = dev_to_sdw_dev(dev); \
+ struct dpn_attribute *dpn_attr = \
+ container_of(attr, struct dpn_attribute, dev_attr); \
+ \
+ return field##_dpn_show(slave, \
+ dpn_attr->N, dpn_attr->dir, \
+ dpn_attr->format_string, \
+ buf); \
+} \
+sdw_dpn_attribute_alloc(field)
+
+sdw_dpn_attr(imp_def_interrupts);
+sdw_dpn_attr(max_word);
+sdw_dpn_attr(min_word);
+sdw_dpn_attr(type);
+sdw_dpn_attr(max_grouping);
+sdw_dpn_attr(simple_ch_prep_sm);
+sdw_dpn_attr(ch_prep_timeout);
+sdw_dpn_attr(max_ch);
+sdw_dpn_attr(min_ch);
+sdw_dpn_attr(max_async_buffer);
+sdw_dpn_attr(block_pack_mode);
+sdw_dpn_attr(port_encoding);
+
+#define sdw_dpn_array_attr(field) \
+ \
+static ssize_t field##_dpn_show(struct sdw_slave *slave, \
+ int N, \
+ int dir, \
+ const char *format_string, \
+ char *buf) \
+{ \
+ struct sdw_dpn_prop *dpn; \
+ unsigned long mask; \
+ ssize_t size = 0; \
+ int bit; \
+ int i; \
+ int j; \
+ \
+ if (dir) { \
+ dpn = slave->prop.src_dpn_prop; \
+ mask = slave->prop.source_ports; \
+ } else { \
+ dpn = slave->prop.sink_dpn_prop; \
+ mask = slave->prop.sink_ports; \
+ } \
+ \
+ i = 0; \
+ for_each_set_bit(bit, &mask, 32) { \
+ if (bit == N) { \
+ for (j = 0; j < dpn[i].num_##field; j++) \
+ size += sprintf(buf + size, \
+ format_string, \
+ dpn[i].field[j]); \
+ size += sprintf(buf + size, "\n"); \
+ return size; \
+ } \
+ i++; \
+ } \
+ return -EINVAL; \
+} \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_slave *slave = dev_to_sdw_dev(dev); \
+ struct dpn_attribute *dpn_attr = \
+ container_of(attr, struct dpn_attribute, dev_attr); \
+ \
+ return field##_dpn_show(slave, \
+ dpn_attr->N, dpn_attr->dir, \
+ dpn_attr->format_string, \
+ buf); \
+} \
+sdw_dpn_attribute_alloc(field)
+
+sdw_dpn_array_attr(words);
+sdw_dpn_array_attr(ch_combinations);
+sdw_dpn_array_attr(channels);
+
+static int add_all_attributes(struct device *dev, int N, int dir)
+{
+ struct attribute **dpn_attrs;
+ struct attribute_group *dpn_group;
+ int i = 0;
+ int ret;
+
+ /* allocate attributes, last one is NULL */
+ dpn_attrs = devm_kcalloc(dev, SDW_DPN_ATTRIBUTES + 1,
+ sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!dpn_attrs)
+ return -ENOMEM;
+
+ ret = max_word_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = min_word_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = words_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = type_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = max_grouping_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = simple_ch_prep_sm_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = ch_prep_timeout_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = imp_def_interrupts_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "0x%x\n");
+ if (ret < 0)
+ return ret;
+
+ ret = min_ch_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = max_ch_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = channels_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = ch_combinations_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = max_async_buffer_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = block_pack_mode_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = port_encoding_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ /* paranoia check for editing mistakes */
+ if (i != SDW_DPN_ATTRIBUTES) {
+ dev_err(dev, "mismatch in attributes, allocated %d got %d\n",
+ SDW_DPN_ATTRIBUTES, i);
+ return -EINVAL;
+ }
+
+ dpn_group = devm_kzalloc(dev, sizeof(*dpn_group), GFP_KERNEL);
+ if (!dpn_group)
+ return -ENOMEM;
+
+ dpn_group->attrs = dpn_attrs;
+ dpn_group->name = devm_kasprintf(dev, GFP_KERNEL, "dp%d_%s",
+ N, dir ? "src" : "sink");
+ if (!dpn_group->name)
+ return -ENOMEM;
+
+ ret = devm_device_add_group(dev, dpn_group);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int sdw_slave_sysfs_dpn_init(struct sdw_slave *slave)
+{
+ unsigned long mask;
+ int ret;
+ int i;
+
+ mask = slave->prop.source_ports;
+ for_each_set_bit(i, &mask, 32) {
+ ret = add_all_attributes(&slave->dev, i, 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ mask = slave->prop.sink_ports;
+ for_each_set_bit(i, &mask, 32) {
+ ret = add_all_attributes(&slave->dev, i, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 741b9140992a..8f1f8fca79e3 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -226,17 +226,20 @@ config SPI_DESIGNWARE
help
general driver for SPI controller core from DesignWare
+if SPI_DESIGNWARE
+
+config SPI_DW_DMA
+ bool "DMA support for DW SPI controller"
+
config SPI_DW_PCI
tristate "PCI interface driver for DW SPI core"
- depends on SPI_DESIGNWARE && PCI
-
-config SPI_DW_MID_DMA
- bool "DMA support for DW SPI controller on Intel MID platform"
- depends on SPI_DW_PCI && DW_DMAC_PCI
+ depends on PCI
config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core"
- depends on SPI_DESIGNWARE
+ depends on HAS_IOMEM
+
+endif
config SPI_DLN2
tristate "Diolan DLN-2 USB SPI adapter"
@@ -844,6 +847,7 @@ config SPI_TXX9
config SPI_UNIPHIER
tristate "Socionext UniPhier SPI Controller"
depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ depends on HAS_IOMEM
help
This enables a driver for the Socionext UniPhier SoC SCSSI SPI controller.
@@ -910,6 +914,12 @@ config SPI_ZYNQMP_GQSPI
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
+config SPI_AMD
+ tristate "AMD SPI controller"
+ depends on SPI_MASTER || COMPILE_TEST
+ help
+ Enables SPI controller driver for AMD SoC.
+
#
# Add new SPI master controllers in alphabetical order above this line
#
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 28f601327f8c..d2e41d3d464a 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -36,9 +36,10 @@ obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
+spi-dw-y := spi-dw-core.o
+spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o
obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
-obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
-spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
+obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o
obj-$(CONFIG_SPI_EFM32) += spi-efm32.o
obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
@@ -127,6 +128,7 @@ obj-$(CONFIG_SPI_XLP) += spi-xlp.o
obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o
obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
+obj-$(CONFIG_SPI_AMD) += spi-amd.o
# SPI slave protocol handlers
obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
new file mode 100644
index 000000000000..d0aacd4de1b9
--- /dev/null
+++ b/drivers/spi/spi-amd.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+//
+// AMD SPI controller driver
+//
+// Copyright (c) 2020, Advanced Micro Devices, Inc.
+//
+// Author: Sanjay R Mehta <sanju.mehta@amd.com>
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+
+#define AMD_SPI_CTRL0_REG 0x00
+#define AMD_SPI_EXEC_CMD BIT(16)
+#define AMD_SPI_FIFO_CLEAR BIT(20)
+#define AMD_SPI_BUSY BIT(31)
+
+#define AMD_SPI_OPCODE_MASK 0xFF
+
+#define AMD_SPI_ALT_CS_REG 0x1D
+#define AMD_SPI_ALT_CS_MASK 0x3
+
+#define AMD_SPI_FIFO_BASE 0x80
+#define AMD_SPI_TX_COUNT_REG 0x48
+#define AMD_SPI_RX_COUNT_REG 0x4B
+#define AMD_SPI_STATUS_REG 0x4C
+
+#define AMD_SPI_MEM_SIZE 200
+
+/* M_CMD OP codes for SPI */
+#define AMD_SPI_XFER_TX 1
+#define AMD_SPI_XFER_RX 2
+
+struct amd_spi {
+ void __iomem *io_remap_addr;
+ unsigned long io_base_addr;
+ u32 rom_addr;
+ u8 chip_select;
+};
+
+static inline u8 amd_spi_readreg8(struct spi_master *master, int idx)
+{
+ struct amd_spi *amd_spi = spi_master_get_devdata(master);
+
+ return ioread8((u8 __iomem *)amd_spi->io_remap_addr + idx);
+}
+
+static inline void amd_spi_writereg8(struct spi_master *master, int idx,
+ u8 val)
+{
+ struct amd_spi *amd_spi = spi_master_get_devdata(master);
+
+ iowrite8(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
+}
+
+static inline void amd_spi_setclear_reg8(struct spi_master *master, int idx,
+ u8 set, u8 clear)
+{
+ u8 tmp = amd_spi_readreg8(master, idx);
+
+ tmp = (tmp & ~clear) | set;
+ amd_spi_writereg8(master, idx, tmp);
+}
+
+static inline u32 amd_spi_readreg32(struct spi_master *master, int idx)
+{
+ struct amd_spi *amd_spi = spi_master_get_devdata(master);
+
+ return ioread32((u8 __iomem *)amd_spi->io_remap_addr + idx);
+}
+
+static inline void amd_spi_writereg32(struct spi_master *master, int idx,
+ u32 val)
+{
+ struct amd_spi *amd_spi = spi_master_get_devdata(master);
+
+ iowrite32(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
+}
+
+static inline void amd_spi_setclear_reg32(struct spi_master *master, int idx,
+ u32 set, u32 clear)
+{
+ u32 tmp = amd_spi_readreg32(master, idx);
+
+ tmp = (tmp & ~clear) | set;
+ amd_spi_writereg32(master, idx, tmp);
+}
+
+static void amd_spi_select_chip(struct spi_master *master)
+{
+ struct amd_spi *amd_spi = spi_master_get_devdata(master);
+ u8 chip_select = amd_spi->chip_select;
+
+ amd_spi_setclear_reg8(master, AMD_SPI_ALT_CS_REG, chip_select,
+ AMD_SPI_ALT_CS_MASK);
+}
+
+static void amd_spi_clear_fifo_ptr(struct spi_master *master)
+{
+ amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR,
+ AMD_SPI_FIFO_CLEAR);
+}
+
+static void amd_spi_set_opcode(struct spi_master *master, u8 cmd_opcode)
+{
+ amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, cmd_opcode,
+ AMD_SPI_OPCODE_MASK);
+}
+
+static inline void amd_spi_set_rx_count(struct spi_master *master,
+ u8 rx_count)
+{
+ amd_spi_setclear_reg8(master, AMD_SPI_RX_COUNT_REG, rx_count, 0xff);
+}
+
+static inline void amd_spi_set_tx_count(struct spi_master *master,
+ u8 tx_count)
+{
+ amd_spi_setclear_reg8(master, AMD_SPI_TX_COUNT_REG, tx_count, 0xff);
+}
+
+static inline int amd_spi_busy_wait(struct amd_spi *amd_spi)
+{
+ bool spi_busy;
+ int timeout = 100000;
+
+ /* poll for SPI bus to become idle */
+ spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr +
+ AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY;
+ while (spi_busy) {
+ usleep_range(10, 20);
+ if (timeout-- < 0)
+ return -ETIMEDOUT;
+
+ spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr +
+ AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY;
+ }
+
+ return 0;
+}
+
+static void amd_spi_execute_opcode(struct spi_master *master)
+{
+ struct amd_spi *amd_spi = spi_master_get_devdata(master);
+
+ /* Set ExecuteOpCode bit in the CTRL0 register */
+ amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD,
+ AMD_SPI_EXEC_CMD);
+
+ amd_spi_busy_wait(amd_spi);
+}
+
+static int amd_spi_master_setup(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+
+ amd_spi_clear_fifo_ptr(master);
+
+ return 0;
+}
+
+static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
+ struct spi_master *master,
+ struct spi_message *message)
+{
+ struct spi_transfer *xfer = NULL;
+ u8 cmd_opcode;
+ u8 *buf = NULL;
+ u32 m_cmd = 0;
+ u32 i = 0;
+ u32 tx_len = 0, rx_len = 0;
+
+ list_for_each_entry(xfer, &message->transfers,
+ transfer_list) {
+ if (xfer->rx_buf)
+ m_cmd = AMD_SPI_XFER_RX;
+ if (xfer->tx_buf)
+ m_cmd = AMD_SPI_XFER_TX;
+
+ if (m_cmd & AMD_SPI_XFER_TX) {
+ buf = (u8 *)xfer->tx_buf;
+ tx_len = xfer->len - 1;
+ cmd_opcode = *(u8 *)xfer->tx_buf;
+ buf++;
+ amd_spi_set_opcode(master, cmd_opcode);
+
+ /* Write data into the FIFO. */
+ for (i = 0; i < tx_len; i++) {
+ iowrite8(buf[i],
+ ((u8 __iomem *)amd_spi->io_remap_addr +
+ AMD_SPI_FIFO_BASE + i));
+ }
+
+ amd_spi_set_tx_count(master, tx_len);
+ amd_spi_clear_fifo_ptr(master);
+ /* Execute command */
+ amd_spi_execute_opcode(master);
+ }
+ if (m_cmd & AMD_SPI_XFER_RX) {
+ /*
+ * Store no. of bytes to be received from
+ * FIFO
+ */
+ rx_len = xfer->len;
+ buf = (u8 *)xfer->rx_buf;
+ amd_spi_set_rx_count(master, rx_len);
+ amd_spi_clear_fifo_ptr(master);
+ /* Execute command */
+ amd_spi_execute_opcode(master);
+ /* Read data from FIFO to receive buffer */
+ for (i = 0; i < rx_len; i++)
+ buf[i] = amd_spi_readreg8(master,
+ AMD_SPI_FIFO_BASE +
+ tx_len + i);
+ }
+ }
+
+ /* Update statistics */
+ message->actual_length = tx_len + rx_len + 1;
+ /* complete the transaction */
+ message->status = 0;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static int amd_spi_master_transfer(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct amd_spi *amd_spi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+
+ amd_spi->chip_select = spi->chip_select;
+ amd_spi_select_chip(master);
+
+ /*
+ * Extract spi_transfers from the spi message and
+ * program the controller.
+ */
+ amd_spi_fifo_xfer(amd_spi, master, msg);
+
+ return 0;
+}
+
+static int amd_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct amd_spi *amd_spi;
+ struct resource *res;
+ int err = 0;
+
+ /* Allocate storage for spi_master and driver private data */
+ master = spi_alloc_master(dev, sizeof(struct amd_spi));
+ if (!master) {
+ dev_err(dev, "Error allocating SPI master\n");
+ return -ENOMEM;
+ }
+
+ amd_spi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ amd_spi->io_remap_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(amd_spi->io_remap_addr)) {
+ err = PTR_ERR(amd_spi->io_remap_addr);
+ dev_err(dev, "error %d ioremap of SPI registers failed\n", err);
+ goto err_free_master;
+ }
+ dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
+
+ /* Initialize the spi_master fields */
+ master->bus_num = 0;
+ master->num_chipselect = 4;
+ master->mode_bits = 0;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = amd_spi_master_setup;
+ master->transfer_one_message = amd_spi_master_transfer;
+
+ /* Register the controller with SPI framework */
+ err = devm_spi_register_master(dev, master);
+ if (err) {
+ dev_err(dev, "error %d registering SPI controller\n", err);
+ goto err_free_master;
+ }
+
+ return 0;
+
+err_free_master:
+ spi_master_put(master);
+
+ return err;
+}
+
+static const struct acpi_device_id spi_acpi_match[] = {
+ { "AMDI0061", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, spi_acpi_match);
+
+static struct platform_driver amd_spi_driver = {
+ .driver = {
+ .name = "amd_spi",
+ .acpi_match_table = ACPI_PTR(spi_acpi_match),
+ },
+ .probe = amd_spi_probe,
+};
+
+module_platform_driver(amd_spi_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Sanjay Mehta <sanju.mehta@amd.com>");
+MODULE_DESCRIPTION("AMD SPI Master Controller Driver");
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index e450ee17787f..fcde419e480c 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -276,11 +276,11 @@ static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi)
return -ETIMEDOUT;
}
-static int a3700_spi_init(struct a3700_spi *a3700_spi)
+static void a3700_spi_init(struct a3700_spi *a3700_spi)
{
struct spi_master *master = a3700_spi->master;
u32 val;
- int i, ret = 0;
+ int i;
/* Reset SPI unit */
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
@@ -311,8 +311,6 @@ static int a3700_spi_init(struct a3700_spi *a3700_spi)
/* Mask the interrupts and clear cause bits */
spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, ~0U);
-
- return ret;
}
static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
@@ -886,9 +884,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
A3700_SPI_MAX_PRESCALE);
- ret = a3700_spi_init(spi);
- if (ret)
- goto error_clk;
+ a3700_spi_init(spi);
ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0,
dev_name(dev), master);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 013458cabe3c..57ee8c3b7972 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -706,6 +706,7 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
struct spi_transfer *xfer,
u32 *plen)
+ __must_hold(&as->lock)
{
struct atmel_spi *as = spi_master_get_devdata(master);
struct dma_chan *rxchan = master->dma_rx;
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index eb9b78a90dcf..af86e6d6e16b 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -489,22 +489,6 @@ static int spi_engine_probe(struct platform_device *pdev)
spin_lock_init(&spi_engine->lock);
- spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(spi_engine->base)) {
- ret = PTR_ERR(spi_engine->base);
- goto err_put_master;
- }
-
- version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
- if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
- dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
- SPI_ENGINE_VERSION_MAJOR(version),
- SPI_ENGINE_VERSION_MINOR(version),
- SPI_ENGINE_VERSION_PATCH(version));
- ret = -ENODEV;
- goto err_put_master;
- }
-
spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
if (IS_ERR(spi_engine->clk)) {
ret = PTR_ERR(spi_engine->clk);
@@ -525,6 +509,22 @@ static int spi_engine_probe(struct platform_device *pdev)
if (ret)
goto err_clk_disable;
+ spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spi_engine->base)) {
+ ret = PTR_ERR(spi_engine->base);
+ goto err_ref_clk_disable;
+ }
+
+ version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
+ if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
+ dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+ SPI_ENGINE_VERSION_MAJOR(version),
+ SPI_ENGINE_VERSION_MINOR(version),
+ SPI_ENGINE_VERSION_PATCH(version));
+ ret = -ENODEV;
+ goto err_ref_clk_disable;
+ }
+
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 23d295f36c80..681d09085175 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -91,6 +91,7 @@
#define MSPI_MSPI_STATUS 0x020
#define MSPI_CPTQP 0x024
#define MSPI_SPCR3 0x028
+#define MSPI_REV 0x02c
#define MSPI_TXRAM 0x040
#define MSPI_RXRAM 0x0c0
#define MSPI_CDRAM 0x140
@@ -106,14 +107,22 @@
#define MSPI_SPCR2_SPE BIT(6)
#define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
+#define MSPI_SPCR3_FASTBR BIT(0)
+#define MSPI_SPCR3_FASTDT BIT(1)
+#define MSPI_SPCR3_SYSCLKSEL_MASK GENMASK(11, 10)
+#define MSPI_SPCR3_SYSCLKSEL_27 (MSPI_SPCR3_SYSCLKSEL_MASK & \
+ ~(BIT(10) | BIT(11)))
+#define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \
+ BIT(11))
+
#define MSPI_MSPI_STATUS_SPIF BIT(0)
#define INTR_BASE_BIT_SHIFT 0x02
#define INTR_COUNT 0x07
#define NUM_CHIPSELECT 4
-#define QSPI_SPBR_MIN 8U
#define QSPI_SPBR_MAX 255U
+#define MSPI_BASE_FREQ 27000000UL
#define OPCODE_DIOR 0xBB
#define OPCODE_QIOR 0xEB
@@ -217,6 +226,9 @@ struct bcm_qspi {
struct bcm_qspi_dev_id *dev_ids;
struct completion mspi_done;
struct completion bspi_done;
+ u8 mspi_maj_rev;
+ u8 mspi_min_rev;
+ bool mspi_spcr3_sysclk;
};
static inline bool has_bspi(struct bcm_qspi *qspi)
@@ -224,6 +236,36 @@ static inline bool has_bspi(struct bcm_qspi *qspi)
return qspi->bspi_mode;
}
+/* hardware supports spcr3 and fast baud-rate */
+static inline bool bcm_qspi_has_fastbr(struct bcm_qspi *qspi)
+{
+ if (!has_bspi(qspi) &&
+ ((qspi->mspi_maj_rev >= 1) &&
+ (qspi->mspi_min_rev >= 5)))
+ return true;
+
+ return false;
+}
+
+/* hardware supports sys clk 108Mhz */
+static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
+{
+ if (!has_bspi(qspi) && (qspi->mspi_spcr3_sysclk ||
+ ((qspi->mspi_maj_rev >= 1) &&
+ (qspi->mspi_min_rev >= 6))))
+ return true;
+
+ return false;
+}
+
+static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
+{
+ if (bcm_qspi_has_fastbr(qspi))
+ return 1;
+ else
+ return 8;
+}
+
/* Read qspi controller register*/
static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
unsigned int offset)
@@ -531,16 +573,39 @@ static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
if (xp->speed_hz)
spbr = qspi->base_clk / (2 * xp->speed_hz);
- spcr = clamp_val(spbr, QSPI_SPBR_MIN, QSPI_SPBR_MAX);
+ spcr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr);
- spcr = MSPI_MASTER_BIT;
+ if (!qspi->mspi_maj_rev)
+ /* legacy controller */
+ spcr = MSPI_MASTER_BIT;
+ else
+ spcr = 0;
+
/* for 16 bit the data should be zero */
if (xp->bits_per_word != 16)
spcr |= xp->bits_per_word << 2;
spcr |= xp->mode & 3;
+
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
+ if (bcm_qspi_has_fastbr(qspi)) {
+ spcr = 0;
+
+ /* enable fastbr */
+ spcr |= MSPI_SPCR3_FASTBR;
+
+ if (bcm_qspi_has_sysclk_108(qspi)) {
+ /* SYSCLK_108 */
+ spcr |= MSPI_SPCR3_SYSCLKSEL_108;
+ qspi->base_clk = MSPI_BASE_FREQ * 4;
+ /* Change spbr as we changed sysclk */
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, 4);
+ }
+
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
+ }
+
qspi->last_parms = *xp;
}
@@ -612,19 +677,15 @@ static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
if (qt->trans->cs_change &&
(flags & TRANS_STATUS_BREAK_CS_CHANGE))
ret |= TRANS_STATUS_BREAK_CS_CHANGE;
- if (ret)
- goto done;
- dev_dbg(&qspi->pdev->dev, "advance msg exit\n");
if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
- ret = TRANS_STATUS_BREAK_EOM;
+ ret |= TRANS_STATUS_BREAK_EOM;
else
- ret = TRANS_STATUS_BREAK_NO_BYTES;
+ ret |= TRANS_STATUS_BREAK_NO_BYTES;
qt->trans = NULL;
}
-done:
dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
return ret;
@@ -670,7 +731,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
if (buf)
buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
dev_dbg(&qspi->pdev->dev, "RD %02x\n",
- buf ? buf[tp.byte] : 0xff);
+ buf ? buf[tp.byte] : 0x0);
} else {
u16 *buf = tp.trans->rx_buf;
@@ -678,7 +739,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
slot);
dev_dbg(&qspi->pdev->dev, "RD %04x\n",
- buf ? buf[tp.byte] : 0xffff);
+ buf ? buf[tp.byte / 2] : 0x0);
}
update_qspi_trans_byte_count(qspi, &tp,
@@ -733,13 +794,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
while (!tstatus && slot < MSPI_NUM_CDRAM) {
if (tp.trans->bits_per_word <= 8) {
const u8 *buf = tp.trans->tx_buf;
- u8 val = buf ? buf[tp.byte] : 0xff;
+ u8 val = buf ? buf[tp.byte] : 0x00;
write_txram_slot_u8(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
} else {
const u16 *buf = tp.trans->tx_buf;
- u16 val = buf ? buf[tp.byte / 2] : 0xffff;
+ u16 val = buf ? buf[tp.byte / 2] : 0x0000;
write_txram_slot_u16(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
@@ -771,7 +832,16 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
- if (tstatus & TRANS_STATUS_BREAK_DESELECT) {
+ /*
+ * case 1) EOM =1, cs_change =0: SSb inactive
+ * case 2) EOM =1, cs_change =1: SSb stay active
+ * case 3) EOM =0, cs_change =0: SSb stay active
+ * case 4) EOM =0, cs_change =1: SSb inactive
+ */
+ if (((tstatus & TRANS_STATUS_BREAK_DESELECT)
+ == TRANS_STATUS_BREAK_CS_CHANGE) ||
+ ((tstatus & TRANS_STATUS_BREAK_DESELECT)
+ == TRANS_STATUS_BREAK_EOM)) {
mspi_cdram = read_cdram_slot(qspi, slot - 1) &
~MSPI_CDRAM_CONT_BIT;
write_cdram_slot(qspi, slot - 1, mspi_cdram);
@@ -1190,8 +1260,51 @@ static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
.exec_op = bcm_qspi_exec_mem_op,
};
+struct bcm_qspi_data {
+ bool has_mspi_rev;
+ bool has_spcr3_sysclk;
+};
+
+static const struct bcm_qspi_data bcm_qspi_no_rev_data = {
+ .has_mspi_rev = false,
+ .has_spcr3_sysclk = false,
+};
+
+static const struct bcm_qspi_data bcm_qspi_rev_data = {
+ .has_mspi_rev = true,
+ .has_spcr3_sysclk = false,
+};
+
+static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
+ .has_mspi_rev = true,
+ .has_spcr3_sysclk = true,
+};
+
static const struct of_device_id bcm_qspi_of_match[] = {
- { .compatible = "brcm,spi-bcm-qspi" },
+ {
+ .compatible = "brcm,spi-bcm7425-qspi",
+ .data = &bcm_qspi_no_rev_data,
+ },
+ {
+ .compatible = "brcm,spi-bcm7429-qspi",
+ .data = &bcm_qspi_no_rev_data,
+ },
+ {
+ .compatible = "brcm,spi-bcm7435-qspi",
+ .data = &bcm_qspi_no_rev_data,
+ },
+ {
+ .compatible = "brcm,spi-bcm-qspi",
+ .data = &bcm_qspi_rev_data,
+ },
+ {
+ .compatible = "brcm,spi-bcm7216-qspi",
+ .data = &bcm_qspi_spcr3_data,
+ },
+ {
+ .compatible = "brcm,spi-bcm7278-qspi",
+ .data = &bcm_qspi_spcr3_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
@@ -1199,12 +1312,15 @@ MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
int bcm_qspi_probe(struct platform_device *pdev,
struct bcm_qspi_soc_intc *soc_intc)
{
+ const struct of_device_id *of_id = NULL;
+ const struct bcm_qspi_data *data;
struct device *dev = &pdev->dev;
struct bcm_qspi *qspi;
struct spi_master *master;
struct resource *res;
int irq, ret = 0, num_ints = 0;
u32 val;
+ u32 rev = 0;
const char *name = NULL;
int num_irqs = ARRAY_SIZE(qspi_irq_tab);
@@ -1212,9 +1328,12 @@ int bcm_qspi_probe(struct platform_device *pdev,
if (!dev->of_node)
return -ENODEV;
- if (!of_match_node(bcm_qspi_of_match, dev->of_node))
+ of_id = of_match_node(bcm_qspi_of_match, dev->of_node);
+ if (!of_id)
return -ENODEV;
+ data = of_id->data;
+
master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
if (!master) {
dev_err(dev, "error allocating spi_master\n");
@@ -1222,6 +1341,11 @@ int bcm_qspi_probe(struct platform_device *pdev,
}
qspi = spi_master_get_devdata(master);
+
+ qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(qspi->clk))
+ return PTR_ERR(qspi->clk);
+
qspi->pdev = pdev;
qspi->trans_pos.trans = NULL;
qspi->trans_pos.byte = 0;
@@ -1335,13 +1459,6 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->soc_intc = NULL;
}
- qspi->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(qspi->clk)) {
- dev_warn(dev, "unable to get clock\n");
- ret = PTR_ERR(qspi->clk);
- goto qspi_probe_err;
- }
-
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "failed to prepare clock\n");
@@ -1349,7 +1466,19 @@ int bcm_qspi_probe(struct platform_device *pdev,
}
qspi->base_clk = clk_get_rate(qspi->clk);
- qspi->max_speed_hz = qspi->base_clk / (QSPI_SPBR_MIN * 2);
+
+ if (data->has_mspi_rev) {
+ rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
+ /* some older revs do not have a MSPI_REV register */
+ if ((rev & 0xff) == 0xff)
+ rev = 0;
+ }
+
+ qspi->mspi_maj_rev = (rev >> 4) & 0xf;
+ qspi->mspi_min_rev = rev & 0xf;
+ qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
+
+ qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
bcm_qspi_hw_init(qspi);
init_completion(&qspi->mspi_done);
@@ -1406,7 +1535,7 @@ static int __maybe_unused bcm_qspi_suspend(struct device *dev)
bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
spi_master_suspend(qspi->master);
- clk_disable(qspi->clk);
+ clk_disable_unprepare(qspi->clk);
bcm_qspi_hw_uninit(qspi);
return 0;
@@ -1424,7 +1553,7 @@ static int __maybe_unused bcm_qspi_resume(struct device *dev)
qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
true);
- ret = clk_enable(qspi->clk);
+ ret = clk_prepare_enable(qspi->clk);
if (!ret)
spi_master_resume(qspi->master);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 11c235879bb7..237bd306c268 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -191,12 +191,12 @@ static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
}
#endif /* CONFIG_DEBUG_FS */
-static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
+static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg)
{
return readl(bs->regs + reg);
}
-static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val)
+static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val)
{
writel(val, bs->regs + reg);
}
@@ -940,6 +940,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
dev_err(dev, "cannot map zero page - not using DMA mode\n");
bs->fill_tx_addr = 0;
+ ret = -ENOMEM;
goto err_release;
}
@@ -949,6 +950,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
DMA_MEM_TO_DEV, 0);
if (!bs->fill_tx_desc) {
dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n");
+ ret = -ENOMEM;
goto err_release;
}
@@ -979,6 +981,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) {
dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n");
bs->clear_rx_addr = 0;
+ ret = -ENOMEM;
goto err_release;
}
@@ -989,6 +992,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
DMA_MEM_TO_DEV, 0);
if (!bs->clear_rx_desc[i]) {
dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n");
+ ret = -ENOMEM;
goto err_release;
}
@@ -1347,7 +1351,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
goto out_dma_release;
}
- err = devm_spi_register_controller(&pdev->dev, ctlr);
+ err = spi_register_controller(ctlr);
if (err) {
dev_err(&pdev->dev, "could not register SPI controller: %d\n",
err);
@@ -1374,17 +1378,28 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
bcm2835_debugfs_remove(bs);
+ spi_unregister_controller(ctlr);
+
+ bcm2835_dma_release(ctlr, bs);
+
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
clk_disable_unprepare(bs->clk);
- bcm2835_dma_release(ctlr, bs);
-
return 0;
}
+static void bcm2835_spi_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = bcm2835_spi_remove(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
static const struct of_device_id bcm2835_spi_match[] = {
{ .compatible = "brcm,bcm2835-spi", },
{}
@@ -1398,6 +1413,7 @@ static struct platform_driver bcm2835_spi_driver = {
},
.probe = bcm2835_spi_probe,
.remove = bcm2835_spi_remove,
+ .shutdown = bcm2835_spi_shutdown,
};
module_platform_driver(bcm2835_spi_driver);
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index a2162ff56a12..c331efd6e86b 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -569,7 +569,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
- err = devm_spi_register_master(&pdev->dev, master);
+ err = spi_register_master(master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
goto out_clk_disable;
@@ -593,6 +593,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev)
bcm2835aux_debugfs_remove(bs);
+ spi_unregister_master(master);
+
bcm2835aux_spi_reset_hw(bs);
/* disable the HW block by releasing the clock */
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw-core.c
index 31e3f866d11a..323c66c5db50 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw-core.c
@@ -24,74 +24,34 @@ struct chip_data {
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
- u8 poll_mode; /* 1 means use poll mode */
-
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
- void (*cs_control)(u32 command);
};
#ifdef CONFIG_DEBUG_FS
-#define SPI_REGS_BUFSIZE 1024
-static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct dw_spi *dws = file->private_data;
- char *buf;
- u32 len = 0;
- ssize_t ret;
-
- buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "%s registers:\n", dev_name(&dws->master->dev));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "=================================\n");
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
- len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
- "=================================\n");
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return ret;
+
+#define DW_SPI_DBGFS_REG(_name, _off) \
+{ \
+ .name = _name, \
+ .offset = _off, \
}
-static const struct file_operations dw_spi_regs_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = dw_spi_show_regs,
- .llseek = default_llseek,
+static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
+ DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
+ DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
+ DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
+ DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
+ DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
+ DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
+ DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
+ DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
+ DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
+ DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
+ DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
+ DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
+ DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
+ DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
+ DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
};
static int dw_spi_debugfs_init(struct dw_spi *dws)
@@ -103,8 +63,11 @@ static int dw_spi_debugfs_init(struct dw_spi *dws)
if (!dws->debugfs)
return -ENOMEM;
- debugfs_create_file("registers", S_IFREG | S_IRUGO,
- dws->debugfs, (void *)dws, &dw_spi_regs_ops);
+ dws->regset.regs = dw_spi_dbgfs_regs;
+ dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
+ dws->regset.base = dws->regs;
+ debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
+
return 0;
}
@@ -127,13 +90,16 @@ static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
void dw_spi_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
- struct chip_data *chip = spi_get_ctldata(spi);
-
- /* Chip select logic is inverted from spi_set_cs() */
- if (chip && chip->cs_control)
- chip->cs_control(!enable);
+ bool cs_high = !!(spi->mode & SPI_CS_HIGH);
- if (!enable)
+ /*
+ * DW SPI controller demands any native CS being set in order to
+ * proceed with data transfer. So in order to activate the SPI
+ * communications we must set a corresponding bit in the Slave
+ * Enable register no matter whether the SPI core is configured to
+ * support active-high or active-low CS level.
+ */
+ if (cs_high == enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
else if (dws->cs_override)
dw_writel(dws, DW_SPI_SER, 0);
@@ -265,17 +231,56 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id)
return dws->transfer_handler(dws);
}
-/* Must be called inside pump_transfers() */
-static int poll_transfer(struct dw_spi *dws)
+/* Configure CTRLR0 for DW_apb_ssi */
+u32 dw_spi_update_cr0(struct spi_controller *master, struct spi_device *spi,
+ struct spi_transfer *transfer)
{
- do {
- dw_writer(dws);
- dw_reader(dws);
- cpu_relax();
- } while (dws->rx_end > dws->rx);
+ struct chip_data *chip = spi_get_ctldata(spi);
+ u32 cr0;
- return 0;
+ /* Default SPI mode is SCPOL = 0, SCPH = 0 */
+ cr0 = (transfer->bits_per_word - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
+ (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) |
+ (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET))
+ | (chip->tmode << SPI_TMOD_OFFSET);
+
+ return cr0;
+}
+EXPORT_SYMBOL_GPL(dw_spi_update_cr0);
+
+/* Configure CTRLR0 for DWC_ssi */
+u32 dw_spi_update_cr0_v1_01a(struct spi_controller *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+ u32 cr0;
+
+ /* CTRLR0[ 4: 0] Data Frame Size */
+ cr0 = (transfer->bits_per_word - 1);
+
+ /* CTRLR0[ 7: 6] Frame Format */
+ cr0 |= chip->type << DWC_SSI_CTRLR0_FRF_OFFSET;
+
+ /*
+ * SPI mode (SCPOL|SCPH)
+ * CTRLR0[ 8] Serial Clock Phase
+ * CTRLR0[ 9] Serial Clock Polarity
+ */
+ cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET;
+ cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET;
+
+ /* CTRLR0[11:10] Transfer Mode */
+ cr0 |= chip->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET;
+
+ /* CTRLR0[13] Shift Register Loop */
+ cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET;
+
+ return cr0;
}
+EXPORT_SYMBOL_GPL(dw_spi_update_cr0_v1_01a);
static int dw_spi_transfer_one(struct spi_controller *master,
struct spi_device *spi, struct spi_transfer *transfer)
@@ -313,34 +318,11 @@ static int dw_spi_transfer_one(struct spi_controller *master,
spi_set_clk(dws, chip->clk_div);
}
+ transfer->effective_speed_hz = dws->max_freq / chip->clk_div;
dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
- dws->dma_width = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
-
- /* Default SPI mode is SCPOL = 0, SCPH = 0 */
- cr0 = (transfer->bits_per_word - 1)
- | (chip->type << SPI_FRF_OFFSET)
- | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
- (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) |
- (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET))
- | (chip->tmode << SPI_TMOD_OFFSET);
- /*
- * Adjust transfer mode if necessary. Requires platform dependent
- * chipselect mechanism.
- */
- if (chip->cs_control) {
- if (dws->rx && dws->tx)
- chip->tmode = SPI_TMOD_TR;
- else if (dws->rx)
- chip->tmode = SPI_TMOD_RO;
- else
- chip->tmode = SPI_TMOD_TO;
-
- cr0 &= ~SPI_TMOD_MASK;
- cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
- }
-
- dw_writel(dws, DW_SPI_CTRL0, cr0);
+ cr0 = dws->update_cr0(master, spi, transfer);
+ dw_writel(dws, DW_SPI_CTRLR0, cr0);
/* Check if current transfer is a DMA transaction */
if (master->can_dma && master->can_dma(master, spi, transfer))
@@ -359,9 +341,9 @@ static int dw_spi_transfer_one(struct spi_controller *master,
spi_enable_chip(dws, 1);
return ret;
}
- } else if (!chip->poll_mode) {
+ } else {
txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
- dw_writel(dws, DW_SPI_TXFLTR, txlevel);
+ dw_writel(dws, DW_SPI_TXFTLR, txlevel);
/* Set the interrupt mask */
imask |= SPI_INT_TXEI | SPI_INT_TXOI |
@@ -373,14 +355,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
spi_enable_chip(dws, 1);
- if (dws->dma_mapped) {
- ret = dws->dma_ops->dma_transfer(dws, transfer);
- if (ret < 0)
- return ret;
- }
-
- if (chip->poll_mode)
- return poll_transfer(dws);
+ if (dws->dma_mapped)
+ return dws->dma_ops->dma_transfer(dws, transfer);
return 1;
}
@@ -399,7 +375,6 @@ static void dw_spi_handle_err(struct spi_controller *master,
/* This may be called twice for each spi dev */
static int dw_spi_setup(struct spi_device *spi)
{
- struct dw_spi_chip *chip_info = NULL;
struct chip_data *chip;
/* Only alloc on first setup */
@@ -411,21 +386,6 @@ static int dw_spi_setup(struct spi_device *spi)
spi_set_ctldata(spi, chip);
}
- /*
- * Protocol drivers may change the chip settings, so...
- * if chip_info exists, use it
- */
- chip_info = spi->controller_data;
-
- /* chip_info doesn't always exist */
- if (chip_info) {
- if (chip_info->cs_control)
- chip->cs_control = chip_info->cs_control;
-
- chip->poll_mode = chip_info->poll_mode;
- chip->type = chip_info->type;
- }
-
chip->tmode = SPI_TMOD_TR;
return 0;
@@ -452,11 +412,11 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
u32 fifo;
for (fifo = 1; fifo < 256; fifo++) {
- dw_writel(dws, DW_SPI_TXFLTR, fifo);
- if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
+ dw_writel(dws, DW_SPI_TXFTLR, fifo);
+ if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
break;
}
- dw_writel(dws, DW_SPI_TXFLTR, 0);
+ dw_writel(dws, DW_SPI_TXFTLR, 0);
dws->fifo_len = (fifo == 1) ? 0 : fifo;
dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
@@ -481,7 +441,6 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->master = master;
dws->type = SSI_MOTO_SPI;
- dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
spin_lock_init(&dws->buf_lock);
@@ -517,16 +476,16 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
spi_hw_init(dev, dws);
if (dws->dma_ops && dws->dma_ops->dma_init) {
- ret = dws->dma_ops->dma_init(dws);
+ ret = dws->dma_ops->dma_init(dev, dws);
if (ret) {
dev_warn(dev, "DMA init failed\n");
- dws->dma_inited = 0;
} else {
master->can_dma = dws->dma_ops->can_dma;
+ master->flags |= SPI_CONTROLLER_MUST_TX;
}
}
- ret = devm_spi_register_controller(dev, master);
+ ret = spi_register_controller(master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
goto err_dma_exit;
@@ -550,6 +509,8 @@ void dw_spi_remove_host(struct dw_spi *dws)
{
dw_spi_debugfs_remove(dws);
+ spi_unregister_controller(dws->master);
+
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
new file mode 100644
index 000000000000..5986c520b196
--- /dev/null
+++ b/drivers/spi/spi-dw-dma.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Special handling for DW DMA core
+ *
+ * Copyright (c) 2009, 2014 Intel Corporation.
+ */
+
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/pci.h>
+#include <linux/platform_data/dma-dw.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "spi-dw.h"
+
+#define WAIT_RETRIES 5
+#define RX_BUSY 0
+#define RX_BURST_LEVEL 16
+#define TX_BUSY 1
+#define TX_BURST_LEVEL 16
+
+static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_slave *s = param;
+
+ if (s->dma_dev != chan->device->dev)
+ return false;
+
+ chan->private = s;
+ return true;
+}
+
+static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
+{
+ struct dma_slave_caps caps;
+ u32 max_burst, def_burst;
+ int ret;
+
+ def_burst = dws->fifo_len / 2;
+
+ ret = dma_get_slave_caps(dws->rxchan, &caps);
+ if (!ret && caps.max_burst)
+ max_burst = caps.max_burst;
+ else
+ max_burst = RX_BURST_LEVEL;
+
+ dws->rxburst = min(max_burst, def_burst);
+
+ ret = dma_get_slave_caps(dws->txchan, &caps);
+ if (!ret && caps.max_burst)
+ max_burst = caps.max_burst;
+ else
+ max_burst = TX_BURST_LEVEL;
+
+ dws->txburst = min(max_burst, def_burst);
+}
+
+static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
+{
+ struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
+ struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
+ struct pci_dev *dma_dev;
+ dma_cap_mask_t mask;
+
+ /*
+ * Get pci device for DMA controller, currently it could only
+ * be the DMA controller of Medfield
+ */
+ dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+ if (!dma_dev)
+ return -ENODEV;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* 1. Init rx channel */
+ rx->dma_dev = &dma_dev->dev;
+ dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
+ if (!dws->rxchan)
+ goto err_exit;
+
+ /* 2. Init tx channel */
+ tx->dma_dev = &dma_dev->dev;
+ dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
+ if (!dws->txchan)
+ goto free_rxchan;
+
+ dws->master->dma_rx = dws->rxchan;
+ dws->master->dma_tx = dws->txchan;
+
+ init_completion(&dws->dma_completion);
+
+ dw_spi_dma_maxburst_init(dws);
+
+ return 0;
+
+free_rxchan:
+ dma_release_channel(dws->rxchan);
+ dws->rxchan = NULL;
+err_exit:
+ return -EBUSY;
+}
+
+static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
+{
+ dws->rxchan = dma_request_slave_channel(dev, "rx");
+ if (!dws->rxchan)
+ return -ENODEV;
+
+ dws->txchan = dma_request_slave_channel(dev, "tx");
+ if (!dws->txchan) {
+ dma_release_channel(dws->rxchan);
+ dws->rxchan = NULL;
+ return -ENODEV;
+ }
+
+ dws->master->dma_rx = dws->rxchan;
+ dws->master->dma_tx = dws->txchan;
+
+ init_completion(&dws->dma_completion);
+
+ dw_spi_dma_maxburst_init(dws);
+
+ return 0;
+}
+
+static void dw_spi_dma_exit(struct dw_spi *dws)
+{
+ if (dws->txchan) {
+ dmaengine_terminate_sync(dws->txchan);
+ dma_release_channel(dws->txchan);
+ }
+
+ if (dws->rxchan) {
+ dmaengine_terminate_sync(dws->rxchan);
+ dma_release_channel(dws->rxchan);
+ }
+
+ dw_writel(dws, DW_SPI_DMACR, 0);
+}
+
+static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
+{
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR);
+
+ if (!irq_status)
+ return IRQ_NONE;
+
+ dw_readl(dws, DW_SPI_ICR);
+ spi_reset_chip(dws);
+
+ dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
+ dws->master->cur_msg->status = -EIO;
+ complete(&dws->dma_completion);
+ return IRQ_HANDLED;
+}
+
+static bool dw_spi_can_dma(struct spi_controller *master,
+ struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct dw_spi *dws = spi_controller_get_devdata(master);
+
+ return xfer->len > dws->fifo_len;
+}
+
+static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
+{
+ if (n_bytes == 1)
+ return DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (n_bytes == 2)
+ return DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+ return DMA_SLAVE_BUSWIDTH_UNDEFINED;
+}
+
+static int dw_spi_dma_wait(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+ unsigned long long ms;
+
+ ms = xfer->len * MSEC_PER_SEC * BITS_PER_BYTE;
+ do_div(ms, xfer->effective_speed_hz);
+ ms += ms + 200;
+
+ if (ms > UINT_MAX)
+ ms = UINT_MAX;
+
+ ms = wait_for_completion_timeout(&dws->dma_completion,
+ msecs_to_jiffies(ms));
+
+ if (ms == 0) {
+ dev_err(&dws->master->cur_msg->spi->dev,
+ "DMA transaction timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
+{
+ return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT);
+}
+
+static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
+ struct spi_transfer *xfer)
+{
+ int retry = WAIT_RETRIES;
+ struct spi_delay delay;
+ u32 nents;
+
+ nents = dw_readl(dws, DW_SPI_TXFLR);
+ delay.unit = SPI_DELAY_UNIT_SCK;
+ delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
+
+ while (dw_spi_dma_tx_busy(dws) && retry--)
+ spi_delay_exec(&delay, xfer);
+
+ if (retry < 0) {
+ dev_err(&dws->master->dev, "Tx hanged up\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
+ * channel will clear a corresponding bit.
+ */
+static void dw_spi_dma_tx_done(void *arg)
+{
+ struct dw_spi *dws = arg;
+
+ clear_bit(TX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(RX_BUSY, &dws->dma_chan_busy))
+ return;
+
+ dw_writel(dws, DW_SPI_DMACR, 0);
+ complete(&dws->dma_completion);
+}
+
+static struct dma_async_tx_descriptor *
+dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+ struct dma_slave_config txconf;
+ struct dma_async_tx_descriptor *txdesc;
+
+ if (!xfer->tx_buf)
+ return NULL;
+
+ memset(&txconf, 0, sizeof(txconf));
+ txconf.direction = DMA_MEM_TO_DEV;
+ txconf.dst_addr = dws->dma_addr;
+ txconf.dst_maxburst = dws->txburst;
+ txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
+ txconf.device_fc = false;
+
+ dmaengine_slave_config(dws->txchan, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(dws->txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ return NULL;
+
+ txdesc->callback = dw_spi_dma_tx_done;
+ txdesc->callback_param = dws;
+
+ return txdesc;
+}
+
+static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
+{
+ return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT);
+}
+
+static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
+{
+ int retry = WAIT_RETRIES;
+ struct spi_delay delay;
+ unsigned long ns, us;
+ u32 nents;
+
+ /*
+ * It's unlikely that DMA engine is still doing the data fetching, but
+ * if it's let's give it some reasonable time. The timeout calculation
+ * is based on the synchronous APB/SSI reference clock rate, on a
+ * number of data entries left in the Rx FIFO, times a number of clock
+ * periods normally needed for a single APB read/write transaction
+ * without PREADY signal utilized (which is true for the DW APB SSI
+ * controller).
+ */
+ nents = dw_readl(dws, DW_SPI_RXFLR);
+ ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
+ if (ns <= NSEC_PER_USEC) {
+ delay.unit = SPI_DELAY_UNIT_NSECS;
+ delay.value = ns;
+ } else {
+ us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
+ delay.unit = SPI_DELAY_UNIT_USECS;
+ delay.value = clamp_val(us, 0, USHRT_MAX);
+ }
+
+ while (dw_spi_dma_rx_busy(dws) && retry--)
+ spi_delay_exec(&delay, NULL);
+
+ if (retry < 0) {
+ dev_err(&dws->master->dev, "Rx hanged up\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
+ * channel will clear a corresponding bit.
+ */
+static void dw_spi_dma_rx_done(void *arg)
+{
+ struct dw_spi *dws = arg;
+
+ clear_bit(RX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(TX_BUSY, &dws->dma_chan_busy))
+ return;
+
+ dw_writel(dws, DW_SPI_DMACR, 0);
+ complete(&dws->dma_completion);
+}
+
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
+ struct spi_transfer *xfer)
+{
+ struct dma_slave_config rxconf;
+ struct dma_async_tx_descriptor *rxdesc;
+
+ if (!xfer->rx_buf)
+ return NULL;
+
+ memset(&rxconf, 0, sizeof(rxconf));
+ rxconf.direction = DMA_DEV_TO_MEM;
+ rxconf.src_addr = dws->dma_addr;
+ rxconf.src_maxburst = dws->rxburst;
+ rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
+ rxconf.device_fc = false;
+
+ dmaengine_slave_config(dws->rxchan, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ return NULL;
+
+ rxdesc->callback = dw_spi_dma_rx_done;
+ rxdesc->callback_param = dws;
+
+ return rxdesc;
+}
+
+static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+ u16 imr = 0, dma_ctrl = 0;
+
+ dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
+ dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len - dws->txburst);
+
+ if (xfer->tx_buf)
+ dma_ctrl |= SPI_DMA_TDMAE;
+ if (xfer->rx_buf)
+ dma_ctrl |= SPI_DMA_RDMAE;
+ dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
+
+ /* Set the interrupt mask */
+ if (xfer->tx_buf)
+ imr |= SPI_INT_TXOI;
+ if (xfer->rx_buf)
+ imr |= SPI_INT_RXUI | SPI_INT_RXOI;
+ spi_umask_intr(dws, imr);
+
+ reinit_completion(&dws->dma_completion);
+
+ dws->transfer_handler = dw_spi_dma_transfer_handler;
+
+ return 0;
+}
+
+static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+ struct dma_async_tx_descriptor *txdesc, *rxdesc;
+ int ret;
+
+ /* Prepare the TX dma transfer */
+ txdesc = dw_spi_dma_prepare_tx(dws, xfer);
+
+ /* Prepare the RX dma transfer */
+ rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
+
+ /* rx must be started before tx due to spi instinct */
+ if (rxdesc) {
+ set_bit(RX_BUSY, &dws->dma_chan_busy);
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(dws->rxchan);
+ }
+
+ if (txdesc) {
+ set_bit(TX_BUSY, &dws->dma_chan_busy);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(dws->txchan);
+ }
+
+ ret = dw_spi_dma_wait(dws, xfer);
+ if (ret)
+ return ret;
+
+ if (txdesc && dws->master->cur_msg->status == -EINPROGRESS) {
+ ret = dw_spi_dma_wait_tx_done(dws, xfer);
+ if (ret)
+ return ret;
+ }
+
+ if (rxdesc && dws->master->cur_msg->status == -EINPROGRESS)
+ ret = dw_spi_dma_wait_rx_done(dws);
+
+ return ret;
+}
+
+static void dw_spi_dma_stop(struct dw_spi *dws)
+{
+ if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
+ dmaengine_terminate_sync(dws->txchan);
+ clear_bit(TX_BUSY, &dws->dma_chan_busy);
+ }
+ if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
+ dmaengine_terminate_sync(dws->rxchan);
+ clear_bit(RX_BUSY, &dws->dma_chan_busy);
+ }
+
+ dw_writel(dws, DW_SPI_DMACR, 0);
+}
+
+static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
+ .dma_init = dw_spi_dma_init_mfld,
+ .dma_exit = dw_spi_dma_exit,
+ .dma_setup = dw_spi_dma_setup,
+ .can_dma = dw_spi_can_dma,
+ .dma_transfer = dw_spi_dma_transfer,
+ .dma_stop = dw_spi_dma_stop,
+};
+
+void dw_spi_dma_setup_mfld(struct dw_spi *dws)
+{
+ dws->dma_ops = &dw_spi_dma_mfld_ops;
+}
+EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld);
+
+static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
+ .dma_init = dw_spi_dma_init_generic,
+ .dma_exit = dw_spi_dma_exit,
+ .dma_setup = dw_spi_dma_setup,
+ .can_dma = dw_spi_can_dma,
+ .dma_transfer = dw_spi_dma_transfer,
+ .dma_stop = dw_spi_dma_stop,
+};
+
+void dw_spi_dma_setup_generic(struct dw_spi *dws)
+{
+ dws->dma_ops = &dw_spi_dma_generic_ops;
+}
+EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic);
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
deleted file mode 100644
index 0d86c37e0aeb..000000000000
--- a/drivers/spi/spi-dw-mid.c
+++ /dev/null
@@ -1,322 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Special handling for DW core on Intel MID platform
- *
- * Copyright (c) 2009, 2014 Intel Corporation.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/spi/spi.h>
-#include <linux/types.h>
-
-#include "spi-dw.h"
-
-#ifdef CONFIG_SPI_DW_MID_DMA
-#include <linux/pci.h>
-#include <linux/platform_data/dma-dw.h>
-
-#define RX_BUSY 0
-#define TX_BUSY 1
-
-static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 };
-static struct dw_dma_slave mid_dma_rx = { .src_id = 0 };
-
-static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
-{
- struct dw_dma_slave *s = param;
-
- if (s->dma_dev != chan->device->dev)
- return false;
-
- chan->private = s;
- return true;
-}
-
-static int mid_spi_dma_init(struct dw_spi *dws)
-{
- struct pci_dev *dma_dev;
- struct dw_dma_slave *tx = dws->dma_tx;
- struct dw_dma_slave *rx = dws->dma_rx;
- dma_cap_mask_t mask;
-
- /*
- * Get pci device for DMA controller, currently it could only
- * be the DMA controller of Medfield
- */
- dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
- if (!dma_dev)
- return -ENODEV;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- /* 1. Init rx channel */
- rx->dma_dev = &dma_dev->dev;
- dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx);
- if (!dws->rxchan)
- goto err_exit;
- dws->master->dma_rx = dws->rxchan;
-
- /* 2. Init tx channel */
- tx->dma_dev = &dma_dev->dev;
- dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx);
- if (!dws->txchan)
- goto free_rxchan;
- dws->master->dma_tx = dws->txchan;
-
- dws->dma_inited = 1;
- return 0;
-
-free_rxchan:
- dma_release_channel(dws->rxchan);
-err_exit:
- return -EBUSY;
-}
-
-static void mid_spi_dma_exit(struct dw_spi *dws)
-{
- if (!dws->dma_inited)
- return;
-
- dmaengine_terminate_sync(dws->txchan);
- dma_release_channel(dws->txchan);
-
- dmaengine_terminate_sync(dws->rxchan);
- dma_release_channel(dws->rxchan);
-}
-
-static irqreturn_t dma_transfer(struct dw_spi *dws)
-{
- u16 irq_status = dw_readl(dws, DW_SPI_ISR);
-
- if (!irq_status)
- return IRQ_NONE;
-
- dw_readl(dws, DW_SPI_ICR);
- spi_reset_chip(dws);
-
- dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
- dws->master->cur_msg->status = -EIO;
- spi_finalize_current_transfer(dws->master);
- return IRQ_HANDLED;
-}
-
-static bool mid_spi_can_dma(struct spi_controller *master,
- struct spi_device *spi, struct spi_transfer *xfer)
-{
- struct dw_spi *dws = spi_controller_get_devdata(master);
-
- if (!dws->dma_inited)
- return false;
-
- return xfer->len > dws->fifo_len;
-}
-
-static enum dma_slave_buswidth convert_dma_width(u32 dma_width) {
- if (dma_width == 1)
- return DMA_SLAVE_BUSWIDTH_1_BYTE;
- else if (dma_width == 2)
- return DMA_SLAVE_BUSWIDTH_2_BYTES;
-
- return DMA_SLAVE_BUSWIDTH_UNDEFINED;
-}
-
-/*
- * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
- * channel will clear a corresponding bit.
- */
-static void dw_spi_dma_tx_done(void *arg)
-{
- struct dw_spi *dws = arg;
-
- clear_bit(TX_BUSY, &dws->dma_chan_busy);
- if (test_bit(RX_BUSY, &dws->dma_chan_busy))
- return;
- spi_finalize_current_transfer(dws->master);
-}
-
-static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
- struct spi_transfer *xfer)
-{
- struct dma_slave_config txconf;
- struct dma_async_tx_descriptor *txdesc;
-
- if (!xfer->tx_buf)
- return NULL;
-
- txconf.direction = DMA_MEM_TO_DEV;
- txconf.dst_addr = dws->dma_addr;
- txconf.dst_maxburst = 16;
- txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- txconf.dst_addr_width = convert_dma_width(dws->dma_width);
- txconf.device_fc = false;
-
- dmaengine_slave_config(dws->txchan, &txconf);
-
- txdesc = dmaengine_prep_slave_sg(dws->txchan,
- xfer->tx_sg.sgl,
- xfer->tx_sg.nents,
- DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!txdesc)
- return NULL;
-
- txdesc->callback = dw_spi_dma_tx_done;
- txdesc->callback_param = dws;
-
- return txdesc;
-}
-
-/*
- * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
- * channel will clear a corresponding bit.
- */
-static void dw_spi_dma_rx_done(void *arg)
-{
- struct dw_spi *dws = arg;
-
- clear_bit(RX_BUSY, &dws->dma_chan_busy);
- if (test_bit(TX_BUSY, &dws->dma_chan_busy))
- return;
- spi_finalize_current_transfer(dws->master);
-}
-
-static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
- struct spi_transfer *xfer)
-{
- struct dma_slave_config rxconf;
- struct dma_async_tx_descriptor *rxdesc;
-
- if (!xfer->rx_buf)
- return NULL;
-
- rxconf.direction = DMA_DEV_TO_MEM;
- rxconf.src_addr = dws->dma_addr;
- rxconf.src_maxburst = 16;
- rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- rxconf.src_addr_width = convert_dma_width(dws->dma_width);
- rxconf.device_fc = false;
-
- dmaengine_slave_config(dws->rxchan, &rxconf);
-
- rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
- xfer->rx_sg.sgl,
- xfer->rx_sg.nents,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!rxdesc)
- return NULL;
-
- rxdesc->callback = dw_spi_dma_rx_done;
- rxdesc->callback_param = dws;
-
- return rxdesc;
-}
-
-static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
-{
- u16 dma_ctrl = 0;
-
- dw_writel(dws, DW_SPI_DMARDLR, 0xf);
- dw_writel(dws, DW_SPI_DMATDLR, 0x10);
-
- if (xfer->tx_buf)
- dma_ctrl |= SPI_DMA_TDMAE;
- if (xfer->rx_buf)
- dma_ctrl |= SPI_DMA_RDMAE;
- dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
-
- /* Set the interrupt mask */
- spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI);
-
- dws->transfer_handler = dma_transfer;
-
- return 0;
-}
-
-static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
-{
- struct dma_async_tx_descriptor *txdesc, *rxdesc;
-
- /* Prepare the TX dma transfer */
- txdesc = dw_spi_dma_prepare_tx(dws, xfer);
-
- /* Prepare the RX dma transfer */
- rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
-
- /* rx must be started before tx due to spi instinct */
- if (rxdesc) {
- set_bit(RX_BUSY, &dws->dma_chan_busy);
- dmaengine_submit(rxdesc);
- dma_async_issue_pending(dws->rxchan);
- }
-
- if (txdesc) {
- set_bit(TX_BUSY, &dws->dma_chan_busy);
- dmaengine_submit(txdesc);
- dma_async_issue_pending(dws->txchan);
- }
-
- return 0;
-}
-
-static void mid_spi_dma_stop(struct dw_spi *dws)
-{
- if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
- dmaengine_terminate_sync(dws->txchan);
- clear_bit(TX_BUSY, &dws->dma_chan_busy);
- }
- if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
- dmaengine_terminate_sync(dws->rxchan);
- clear_bit(RX_BUSY, &dws->dma_chan_busy);
- }
-}
-
-static const struct dw_spi_dma_ops mid_dma_ops = {
- .dma_init = mid_spi_dma_init,
- .dma_exit = mid_spi_dma_exit,
- .dma_setup = mid_spi_dma_setup,
- .can_dma = mid_spi_can_dma,
- .dma_transfer = mid_spi_dma_transfer,
- .dma_stop = mid_spi_dma_stop,
-};
-#endif
-
-/* Some specific info for SPI0 controller on Intel MID */
-
-/* HW info for MRST Clk Control Unit, 32b reg per controller */
-#define MRST_SPI_CLK_BASE 100000000 /* 100m */
-#define MRST_CLK_SPI_REG 0xff11d86c
-#define CLK_SPI_BDIV_OFFSET 0
-#define CLK_SPI_BDIV_MASK 0x00000007
-#define CLK_SPI_CDIV_OFFSET 9
-#define CLK_SPI_CDIV_MASK 0x00000e00
-#define CLK_SPI_DISABLE_OFFSET 8
-
-int dw_spi_mid_init(struct dw_spi *dws)
-{
- void __iomem *clk_reg;
- u32 clk_cdiv;
-
- clk_reg = ioremap(MRST_CLK_SPI_REG, 16);
- if (!clk_reg)
- return -ENOMEM;
-
- /* Get SPI controller operating freq info */
- clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
- clk_cdiv &= CLK_SPI_CDIV_MASK;
- clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
- dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
-
- iounmap(clk_reg);
-
-#ifdef CONFIG_SPI_DW_MID_DMA
- dws->dma_tx = &mid_dma_tx;
- dws->dma_rx = &mid_dma_rx;
- dws->dma_ops = &mid_dma_ops;
-#endif
- return 0;
-}
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 384a3ab6dc2d..403403deae66 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -20,6 +19,7 @@
#include <linux/acpi.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include "spi-dw.h"
@@ -30,6 +30,7 @@ struct dw_spi_mmio {
struct clk *clk;
struct clk *pclk;
void *priv;
+ struct reset_control *rstc;
};
#define MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
@@ -44,6 +45,13 @@ struct dw_spi_mmio {
#define MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE BIT(13)
#define MSCC_SPI_MST_SW_MODE_SW_SPI_CS(x) (x << 5)
+/*
+ * For Keem Bay, CTRLR0[31] is used to select controller mode.
+ * 0: SSI is slave
+ * 1: SSI is master
+ */
+#define KEEMBAY_CTRLR0_SSIC_IS_MST BIT(31)
+
struct dw_spi_mscc {
struct regmap *syscon;
void __iomem *spi_mst;
@@ -106,6 +114,9 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
dwsmmio->dws.set_cs = dw_spi_mscc_set_cs;
dwsmmio->priv = dwsmscc;
+ /* Register hook to configure CTRLR0 */
+ dwsmmio->dws.update_cr0 = dw_spi_update_cr0;
+
return 0;
}
@@ -128,6 +139,49 @@ static int dw_spi_alpine_init(struct platform_device *pdev,
{
dwsmmio->dws.cs_override = 1;
+ /* Register hook to configure CTRLR0 */
+ dwsmmio->dws.update_cr0 = dw_spi_update_cr0;
+
+ return 0;
+}
+
+static int dw_spi_dw_apb_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ /* Register hook to configure CTRLR0 */
+ dwsmmio->dws.update_cr0 = dw_spi_update_cr0;
+
+ dw_spi_dma_setup_generic(&dwsmmio->dws);
+
+ return 0;
+}
+
+static int dw_spi_dwc_ssi_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ /* Register hook to configure CTRLR0 */
+ dwsmmio->dws.update_cr0 = dw_spi_update_cr0_v1_01a;
+
+ dw_spi_dma_setup_generic(&dwsmmio->dws);
+
+ return 0;
+}
+
+static u32 dw_spi_update_cr0_keembay(struct spi_controller *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ u32 cr0 = dw_spi_update_cr0_v1_01a(master, spi, transfer);
+
+ return cr0 | KEEMBAY_CTRLR0_SSIC_IS_MST;
+}
+
+static int dw_spi_keembay_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ /* Register hook to configure CTRLR0 */
+ dwsmmio->dws.update_cr0 = dw_spi_update_cr0_keembay;
+
return 0;
}
@@ -136,6 +190,7 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
int (*init_func)(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio);
struct dw_spi_mmio *dwsmmio;
+ struct resource *mem;
struct dw_spi *dws;
int ret;
int num_cs;
@@ -148,11 +203,11 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dws = &dwsmmio->dws;
/* Get basic io resource and map it */
- dws->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(dws->regs)) {
- dev_err(&pdev->dev, "SPI region map failed\n");
+ dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(dws->regs))
return PTR_ERR(dws->regs);
- }
+
+ dws->paddr = mem->start;
dws->irq = platform_get_irq(pdev, 0);
if (dws->irq < 0)
@@ -175,6 +230,14 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
if (ret)
goto out_clk;
+ /* find an optional reset controller */
+ dwsmmio->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, "spi");
+ if (IS_ERR(dwsmmio->rstc)) {
+ ret = PTR_ERR(dwsmmio->rstc);
+ goto out_clk;
+ }
+ reset_control_deassert(dwsmmio->rstc);
+
dws->bus_num = pdev->id;
dws->max_freq = clk_get_rate(dwsmmio->clk);
@@ -208,6 +271,8 @@ out:
clk_disable_unprepare(dwsmmio->pclk);
out_clk:
clk_disable_unprepare(dwsmmio->clk);
+ reset_control_assert(dwsmmio->rstc);
+
return ret;
}
@@ -219,25 +284,30 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dwsmmio->pclk);
clk_disable_unprepare(dwsmmio->clk);
+ reset_control_assert(dwsmmio->rstc);
return 0;
}
static const struct of_device_id dw_spi_mmio_of_match[] = {
- { .compatible = "snps,dw-apb-ssi", },
+ { .compatible = "snps,dw-apb-ssi", .data = dw_spi_dw_apb_init},
{ .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init},
{ .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init},
{ .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init},
- { .compatible = "renesas,rzn1-spi", },
+ { .compatible = "renesas,rzn1-spi", .data = dw_spi_dw_apb_init},
+ { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_dwc_ssi_init},
+ { .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init},
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_spi_mmio_acpi_match[] = {
- {"HISI0173", 0},
+ {"HISI0173", (kernel_ulong_t)dw_spi_dw_apb_init},
{},
};
MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match);
+#endif
static struct platform_driver dw_spi_mmio_driver = {
.probe = dw_spi_mmio_probe,
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 12c131b5fb4e..2ea73809ca34 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -5,7 +5,6 @@
* Copyright (c) 2009, 2014 Intel Corporation.
*/
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -16,6 +15,15 @@
#define DRIVER_NAME "dw_spi_pci"
+/* HW info for MRST Clk Control Unit, 32b reg per controller */
+#define MRST_SPI_CLK_BASE 100000000 /* 100m */
+#define MRST_CLK_SPI_REG 0xff11d86c
+#define CLK_SPI_BDIV_OFFSET 0
+#define CLK_SPI_BDIV_MASK 0x00000007
+#define CLK_SPI_CDIV_OFFSET 9
+#define CLK_SPI_CDIV_MASK 0x00000e00
+#define CLK_SPI_DISABLE_OFFSET 8
+
struct spi_pci_desc {
int (*setup)(struct dw_spi *);
u16 num_cs;
@@ -23,19 +31,55 @@ struct spi_pci_desc {
u32 max_freq;
};
+static int spi_mid_init(struct dw_spi *dws)
+{
+ void __iomem *clk_reg;
+ u32 clk_cdiv;
+
+ clk_reg = ioremap(MRST_CLK_SPI_REG, 16);
+ if (!clk_reg)
+ return -ENOMEM;
+
+ /* Get SPI controller operating freq info */
+ clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
+ clk_cdiv &= CLK_SPI_CDIV_MASK;
+ clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
+ dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
+
+ iounmap(clk_reg);
+
+ /* Register hook to configure CTRLR0 */
+ dws->update_cr0 = dw_spi_update_cr0;
+
+ dw_spi_dma_setup_mfld(dws);
+
+ return 0;
+}
+
+static int spi_generic_init(struct dw_spi *dws)
+{
+ /* Register hook to configure CTRLR0 */
+ dws->update_cr0 = dw_spi_update_cr0;
+
+ dw_spi_dma_setup_generic(dws);
+
+ return 0;
+}
+
static struct spi_pci_desc spi_pci_mid_desc_1 = {
- .setup = dw_spi_mid_init,
+ .setup = spi_mid_init,
.num_cs = 5,
.bus_num = 0,
};
static struct spi_pci_desc spi_pci_mid_desc_2 = {
- .setup = dw_spi_mid_init,
+ .setup = spi_mid_init,
.num_cs = 2,
.bus_num = 1,
};
static struct spi_pci_desc spi_pci_ehl_desc = {
+ .setup = spi_generic_init,
.num_cs = 2,
.bus_num = -1,
.max_freq = 100000000,
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 1bf5713e047d..151ba316619e 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -2,18 +2,21 @@
#ifndef DW_SPI_HEADER_H
#define DW_SPI_HEADER_H
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/irqreturn.h>
#include <linux/io.h>
#include <linux/scatterlist.h>
/* Register offsets */
-#define DW_SPI_CTRL0 0x00
-#define DW_SPI_CTRL1 0x04
+#define DW_SPI_CTRLR0 0x00
+#define DW_SPI_CTRLR1 0x04
#define DW_SPI_SSIENR 0x08
#define DW_SPI_MWCR 0x0c
#define DW_SPI_SER 0x10
#define DW_SPI_BAUDR 0x14
-#define DW_SPI_TXFLTR 0x18
-#define DW_SPI_RXFLTR 0x1c
+#define DW_SPI_TXFTLR 0x18
+#define DW_SPI_RXFTLR 0x1c
#define DW_SPI_TXFLR 0x20
#define DW_SPI_RXFLR 0x24
#define DW_SPI_SR 0x28
@@ -57,6 +60,15 @@
#define SPI_SRL_OFFSET 11
#define SPI_CFS_OFFSET 12
+/* Bit fields in CTRLR0 based on DWC_ssi_databook.pdf v1.01a */
+#define DWC_SSI_CTRLR0_SRL_OFFSET 13
+#define DWC_SSI_CTRLR0_TMOD_OFFSET 10
+#define DWC_SSI_CTRLR0_TMOD_MASK GENMASK(11, 10)
+#define DWC_SSI_CTRLR0_SCPOL_OFFSET 9
+#define DWC_SSI_CTRLR0_SCPH_OFFSET 8
+#define DWC_SSI_CTRLR0_FRF_OFFSET 6
+#define DWC_SSI_CTRLR0_DFS_OFFSET 0
+
/* Bit fields in SR, 7 bits */
#define SR_MASK 0x7f /* cover 7 bits */
#define SR_BUSY (1 << 0)
@@ -90,7 +102,7 @@ enum dw_ssi_type {
struct dw_spi;
struct dw_spi_dma_ops {
- int (*dma_init)(struct dw_spi *dws);
+ int (*dma_init)(struct device *dev, struct dw_spi *dws);
void (*dma_exit)(struct dw_spi *dws);
int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer);
bool (*can_dma)(struct spi_controller *master, struct spi_device *spi,
@@ -114,6 +126,8 @@ struct dw_spi {
u16 bus_num;
u16 num_cs; /* supported slave numbers */
void (*set_cs)(struct spi_device *spi, bool enable);
+ u32 (*update_cr0)(struct spi_controller *master, struct spi_device *spi,
+ struct spi_transfer *transfer);
/* Current message transfer state info */
size_t len;
@@ -124,24 +138,22 @@ struct dw_spi {
void *rx_end;
int dma_mapped;
u8 n_bytes; /* current is a 1/2 bytes op */
- u32 dma_width;
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
u32 current_freq; /* frequency in hz */
/* DMA info */
- int dma_inited;
struct dma_chan *txchan;
+ u32 txburst;
struct dma_chan *rxchan;
+ u32 rxburst;
unsigned long dma_chan_busy;
dma_addr_t dma_addr; /* phy address of the Data register */
const struct dw_spi_dma_ops *dma_ops;
- void *dma_tx;
- void *dma_rx;
+ struct completion dma_completion;
- /* Bus interface info */
- void *priv;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
+ struct debugfs_regset32 regset;
#endif
};
@@ -235,24 +247,28 @@ static inline void spi_shutdown_chip(struct dw_spi *dws)
spi_set_clk(dws, 0);
}
-/*
- * Each SPI slave device to work with dw_api controller should
- * has such a structure claiming its working mode (poll or PIO/DMA),
- * which can be save in the "controller_data" member of the
- * struct spi_device.
- */
-struct dw_spi_chip {
- u8 poll_mode; /* 1 for controller polling mode */
- u8 type; /* SPI/SSP/MicroWire */
- void (*cs_control)(u32 command);
-};
-
extern void dw_spi_set_cs(struct spi_device *spi, bool enable);
extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws);
extern int dw_spi_resume_host(struct dw_spi *dws);
+extern u32 dw_spi_update_cr0(struct spi_controller *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer);
+extern u32 dw_spi_update_cr0_v1_01a(struct spi_controller *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer);
+
+#ifdef CONFIG_SPI_DW_DMA
+
+extern void dw_spi_dma_setup_mfld(struct dw_spi *dws);
+extern void dw_spi_dma_setup_generic(struct dw_spi *dws);
+
+#else
+
+static inline void dw_spi_dma_setup_mfld(struct dw_spi *dws) {}
+static inline void dw_spi_dma_setup_generic(struct dw_spi *dws) {}
+
+#endif /* !CONFIG_SPI_DW_DMA */
-/* platform related setup */
-extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */
#endif /* DW_SPI_HEADER_H */
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 4e1ccd4e52b6..8c854b187b1d 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -31,7 +31,8 @@
#include <linux/platform_data/spi-ep93xx.h>
#define SSPCR0 0x0000
-#define SSPCR0_MODE_SHIFT 6
+#define SSPCR0_SPO BIT(6)
+#define SSPCR0_SPH BIT(7)
#define SSPCR0_SCR_SHIFT 8
#define SSPCR1 0x0004
@@ -159,7 +160,10 @@ static int ep93xx_spi_chip_setup(struct spi_master *master,
return err;
cr0 = div_scr << SSPCR0_SCR_SHIFT;
- cr0 |= (spi->mode & (SPI_CPHA | SPI_CPOL)) << SSPCR0_MODE_SHIFT;
+ if (spi->mode & SPI_CPOL)
+ cr0 |= SSPCR0_SPO;
+ if (spi->mode & SPI_CPHA)
+ cr0 |= SSPCR0_SPH;
cr0 |= dss;
dev_dbg(&master->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 50e41f66a2d7..a35faced0456 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2013 Freescale Semiconductor, Inc.
+// Copyright 2020 NXP
//
// Freescale DSPI driver
// This file contains a driver for the Freescale DSPI
@@ -26,6 +27,9 @@
#define SPI_MCR_CLR_TXF BIT(11)
#define SPI_MCR_CLR_RXF BIT(10)
#define SPI_MCR_XSPI BIT(3)
+#define SPI_MCR_DIS_TXF BIT(13)
+#define SPI_MCR_DIS_RXF BIT(12)
+#define SPI_MCR_HALT BIT(0)
#define SPI_TCR 0x08
#define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
@@ -246,13 +250,33 @@ struct fsl_dspi {
static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
{
- memcpy(txdata, dspi->tx, dspi->oper_word_size);
+ switch (dspi->oper_word_size) {
+ case 1:
+ *txdata = *(u8 *)dspi->tx;
+ break;
+ case 2:
+ *txdata = *(u16 *)dspi->tx;
+ break;
+ case 4:
+ *txdata = *(u32 *)dspi->tx;
+ break;
+ }
dspi->tx += dspi->oper_word_size;
}
static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
{
- memcpy(dspi->rx, &rxdata, dspi->oper_word_size);
+ switch (dspi->oper_word_size) {
+ case 1:
+ *(u8 *)dspi->rx = rxdata;
+ break;
+ case 2:
+ *(u16 *)dspi->rx = rxdata;
+ break;
+ case 4:
+ *(u32 *)dspi->rx = rxdata;
+ break;
+ }
dspi->rx += dspi->oper_word_size;
}
@@ -1417,6 +1441,24 @@ static int dspi_remove(struct platform_device *pdev)
return 0;
}
+static void dspi_shutdown(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
+
+ /* Disable RX and TX */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
+ SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
+
+ /* Stop Running */
+ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
+
+ dspi_release_dma(dspi);
+ clk_disable_unprepare(dspi->clk);
+ spi_unregister_controller(dspi->ctlr);
+}
+
static struct platform_driver fsl_dspi_driver = {
.driver.name = DRIVER_NAME,
.driver.of_match_table = fsl_dspi_dt_ids,
@@ -1424,6 +1466,7 @@ static struct platform_driver fsl_dspi_driver = {
.driver.pm = &dspi_pm,
.probe = dspi_probe,
.remove = dspi_remove,
+ .shutdown = dspi_shutdown,
};
module_platform_driver(fsl_dspi_driver);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 8b41b70f6f5c..1552b28b9515 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -186,14 +186,13 @@ static bool fsl_lpspi_can_dma(struct spi_controller *controller,
bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
- switch (bytes_per_word)
- {
- case 1:
- case 2:
- case 4:
- break;
- default:
- return false;
+ switch (bytes_per_word) {
+ case 1:
+ case 2:
+ case 4:
+ break;
+ default:
+ return false;
}
return true;
@@ -941,7 +940,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(fsl_lpspi->dev);
if (ret < 0) {
dev_err(fsl_lpspi->dev, "failed to enable clock\n");
- goto out_controller_put;
+ goto out_pm_get;
}
temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
@@ -950,13 +949,15 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
if (ret == -EPROBE_DEFER)
- goto out_controller_put;
+ goto out_pm_get;
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
return 0;
+out_pm_get:
+ pm_runtime_put_noidle(fsl_lpspi->dev);
out_controller_put:
spi_controller_put(controller);
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 02e5cba0a5bb..6766262d7e75 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -876,14 +876,15 @@ static int fsl_qspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
- q->ahb_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(q->ahb_addr)) {
- ret = PTR_ERR(q->ahb_addr);
+ q->memmap_phy = res->start;
+ /* Since there are 4 cs, map size required is 4 times ahb_buf_size */
+ q->ahb_addr = devm_ioremap(dev, q->memmap_phy,
+ (q->devtype_data->ahb_buf_size * 4));
+ if (!q->ahb_addr) {
+ ret = -ENOMEM;
goto err_put_ctrl;
}
- q->memmap_phy = res->start;
-
/* find the clocks */
q->clk_en = devm_clk_get(dev, "qspi_en");
if (IS_ERR(q->clk_en)) {
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 3b81772fea0d..67f022b8c81d 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -588,7 +588,7 @@ static void fsl_spi_grlib_probe(struct device *dev)
pdata->cs_control = fsl_spi_grlib_cs_control;
}
-static struct spi_master * fsl_spi_probe(struct device *dev,
+static struct spi_master *fsl_spi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
index e3b57252d075..64a18d08a4d9 100644
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -17,6 +17,11 @@
#define HISI_SFC_V3XX_VERSION (0x1f8)
+#define HISI_SFC_V3XX_INT_STAT (0x120)
+#define HISI_SFC_V3XX_INT_STAT_PP_ERR BIT(2)
+#define HISI_SFC_V3XX_INT_STAT_ADDR_IACCES BIT(5)
+#define HISI_SFC_V3XX_INT_CLR (0x12c)
+#define HISI_SFC_V3XX_INT_CLR_CLEAR (0xff)
#define HISI_SFC_V3XX_CMD_CFG (0x300)
#define HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT (1 << 17)
#define HISI_SFC_V3XX_CMD_CFG_DUAL_IO (2 << 17)
@@ -163,7 +168,7 @@ static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
u8 chip_select)
{
int ret, len = op->data.nbytes;
- u32 config = 0;
+ u32 int_stat, config = 0;
if (op->addr.nbytes)
config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
@@ -228,6 +233,25 @@ static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
if (ret)
return ret;
+ /*
+ * The interrupt status register indicates whether an error occurs
+ * after per operation. Check it, and clear the interrupts for
+ * next time judgement.
+ */
+ int_stat = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
+ writel(HISI_SFC_V3XX_INT_CLR_CLEAR,
+ host->regbase + HISI_SFC_V3XX_INT_CLR);
+
+ if (int_stat & HISI_SFC_V3XX_INT_STAT_ADDR_IACCES) {
+ dev_err(host->dev, "fail to access protected address\n");
+ return -EIO;
+ }
+
+ if (int_stat & HISI_SFC_V3XX_INT_STAT_PP_ERR) {
+ dev_err(host->dev, "page program operation failed\n");
+ return -EIO;
+ }
+
if (op->data.dir == SPI_MEM_DATA_IN)
hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index f4f28a400a96..b7a85e3fe1c1 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -71,6 +71,7 @@ struct spi_imx_devtype_data {
void (*reset)(struct spi_imx_data *);
void (*setup_wml)(struct spi_imx_data *);
void (*disable)(struct spi_imx_data *);
+ void (*disable_dma)(struct spi_imx_data *);
bool has_dmamode;
bool has_slavemode;
unsigned int fifo_size;
@@ -485,6 +486,11 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
+static void mx51_disable_dma(struct spi_imx_data *spi_imx)
+{
+ writel(0, spi_imx->base + MX51_ECSPI_DMA);
+}
+
static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
{
u32 ctrl;
@@ -987,6 +993,7 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
.setup_wml = mx51_setup_wml,
+ .disable_dma = mx51_disable_dma,
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
@@ -1001,6 +1008,7 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
.prepare_transfer = mx51_ecspi_prepare_transfer,
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
+ .disable_dma = mx51_disable_dma,
.reset = mx51_ecspi_reset,
.fifo_size = 64,
.has_dmamode = true,
@@ -1385,6 +1393,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dmaengine_terminate_all(master->dma_tx);
+ dmaengine_terminate_all(master->dma_rx);
return -EINVAL;
}
@@ -1498,6 +1507,7 @@ static int spi_imx_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ int ret;
/* flush rxfifo before transfer */
while (spi_imx->devtype_data->rx_available(spi_imx))
@@ -1506,10 +1516,23 @@ static int spi_imx_transfer(struct spi_device *spi,
if (spi_imx->slave_mode)
return spi_imx_pio_transfer_slave(spi, transfer);
- if (spi_imx->usedma)
- return spi_imx_dma_transfer(spi_imx, transfer);
- else
- return spi_imx_pio_transfer(spi, transfer);
+ /*
+ * fallback PIO mode if dma setup error happen, for example sdma
+ * firmware may not be updated as ERR009165 required.
+ */
+ if (spi_imx->usedma) {
+ ret = spi_imx_dma_transfer(spi_imx, transfer);
+ if (ret != -EINVAL)
+ return ret;
+
+ spi_imx->devtype_data->disable_dma(spi_imx);
+
+ spi_imx->usedma = false;
+ spi_imx->dynamic_burst = spi_imx->devtype_data->dynamic_burst;
+ dev_dbg(&spi->dev, "Fallback to PIO mode\n");
+ }
+
+ return spi_imx_pio_transfer(spi, transfer);
}
static int spi_imx_setup(struct spi_device *spi)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index adaa0c49f966..9a86cc27fcc0 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -108,15 +108,17 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
return 0;
case 2:
- if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
- (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
+ if ((tx &&
+ (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
+ (!tx &&
+ (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
return 0;
break;
case 4:
- if ((tx && (mode & SPI_TX_QUAD)) ||
- (!tx && (mode & SPI_RX_QUAD)))
+ if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
+ (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
return 0;
break;
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index c15a9910549f..7bc302b50396 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -391,7 +391,7 @@ static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
}
-int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
int ret;
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index 4f94c9127fc1..cc9ef371db14 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -51,6 +51,10 @@ static int spi_mux_select(struct spi_device *spi)
struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller);
int ret;
+ ret = mux_control_select(priv->mux, spi->chip_select);
+ if (ret)
+ return ret;
+
if (priv->current_cs == spi->chip_select)
return 0;
@@ -62,10 +66,6 @@ static int spi_mux_select(struct spi_device *spi)
priv->spi->mode = spi->mode;
priv->spi->bits_per_word = spi->bits_per_word;
- ret = mux_control_select(priv->mux, spi->chip_select);
- if (ret)
- return ret;
-
priv->current_cs = spi->chip_select;
return 0;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 1f59beb7d27e..43f73db22f21 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -17,10 +17,8 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/clk.h>
#include <linux/sizes.h>
-#include <linux/gpio.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "orion_spi"
@@ -98,7 +96,6 @@ struct orion_spi {
struct clk *clk;
struct clk *axi_clk;
const struct orion_spi_dev *devdata;
- int unused_hw_gpio;
struct orion_child_options child[ORION_NUM_CHIPSELECTS];
};
@@ -325,20 +322,27 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
static void orion_spi_set_cs(struct spi_device *spi, bool enable)
{
struct orion_spi *orion_spi;
- int cs;
orion_spi = spi_master_get_devdata(spi->master);
- if (gpio_is_valid(spi->cs_gpio))
- cs = orion_spi->unused_hw_gpio;
- else
- cs = spi->chip_select;
-
+ /*
+ * If this line is using a GPIO to control chip select, this internal
+ * .set_cs() function will still be called, so we clear any previous
+ * chip select. The CS we activate will not have any elecrical effect,
+ * as it is handled by a GPIO, but that doesn't matter. What we need
+ * is to deassert the old chip select and assert some other chip select.
+ */
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
- ORION_SPI_CS(cs));
+ ORION_SPI_CS(spi->chip_select));
- /* Chip select logic is inverted from spi_set_cs */
+ /*
+ * Chip select logic is inverted from spi_set_cs(). For lines using a
+ * GPIO to do chip select SPI_CS_HIGH is enforced and inversion happens
+ * in the GPIO library, but we don't care about that, because in those
+ * cases we are dealing with an unused native CS anyways so the polarity
+ * doesn't matter.
+ */
if (!enable)
orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
else
@@ -503,9 +507,6 @@ static int orion_spi_transfer_one(struct spi_master *master,
static int orion_spi_setup(struct spi_device *spi)
{
- if (gpio_is_valid(spi->cs_gpio)) {
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- }
return orion_spi_setup_transfer(spi, NULL);
}
@@ -622,13 +623,13 @@ static int orion_spi_probe(struct platform_device *pdev)
master->setup = orion_spi_setup;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->auto_runtime_pm = true;
+ master->use_gpio_descriptors = true;
master->flags = SPI_MASTER_GPIO_SS;
platform_set_drvdata(pdev, master);
spi = spi_master_get_devdata(master);
spi->master = master;
- spi->unused_hw_gpio = -1;
of_id = of_match_device(orion_spi_of_match_table, &pdev->dev);
devdata = (of_id) ? of_id->data : &orion_spi_dev_data;
@@ -683,7 +684,6 @@ static int orion_spi_probe(struct platform_device *pdev)
for_each_available_child_of_node(pdev->dev.of_node, np) {
struct orion_direct_acc *dir_acc;
u32 cs;
- int cs_gpio;
/* Get chip-select number from the "reg" property */
status = of_property_read_u32(np, "reg", &cs);
@@ -695,44 +695,6 @@ static int orion_spi_probe(struct platform_device *pdev)
}
/*
- * Initialize the CS GPIO:
- * - properly request the actual GPIO signal
- * - de-assert the logical signal so that all GPIO CS lines
- * are inactive when probing for slaves
- * - find an unused physical CS which will be driven for any
- * slave which uses a CS GPIO
- */
- cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", cs);
- if (cs_gpio > 0) {
- char *gpio_name;
- int cs_flags;
-
- if (spi->unused_hw_gpio == -1) {
- dev_info(&pdev->dev,
- "Selected unused HW CS#%d for any GPIO CSes\n",
- cs);
- spi->unused_hw_gpio = cs;
- }
-
- gpio_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "%s-CS%d", dev_name(&pdev->dev), cs);
- if (!gpio_name) {
- status = -ENOMEM;
- goto out_rel_axi_clk;
- }
-
- cs_flags = of_property_read_bool(np, "spi-cs-high") ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
- status = devm_gpio_request_one(&pdev->dev, cs_gpio,
- cs_flags, gpio_name);
- if (status) {
- dev_err(&pdev->dev,
- "Can't request GPIO for CS %d\n", cs);
- goto out_rel_axi_clk;
- }
- }
-
- /*
* Check if an address is configured for this SPI device. If
* not, the MBus mapping via the 'ranges' property in the 'soc'
* node is not configured and this device should not use the
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 73d2a65d0b6e..6721910e5f2a 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -150,6 +150,7 @@ static const struct lpss_config lpss_platforms[] = {
.tx_threshold_hi = 48,
.cs_sel_shift = 8,
.cs_sel_mask = 3 << 8,
+ .cs_clk_stays_gated = true,
},
{ /* LPSS_CNL_SSP */
.offset = 0x200,
@@ -1884,7 +1885,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
- status = devm_spi_register_controller(&pdev->dev, controller);
+ status = spi_register_controller(controller);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi controller\n");
goto out_error_pm_runtime_enabled;
@@ -1893,7 +1894,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
return status;
out_error_pm_runtime_enabled:
- pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
out_error_clock_enabled:
@@ -1916,6 +1916,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
+ spi_unregister_controller(drv_data->controller);
+
/* Disable the SSP at the peripheral and SOC level */
pxa2xx_spi_write(drv_data, SSCR0, 0);
clk_disable_unprepare(ssp->clk);
diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
index 4c9620e0d18c..8aa51beb4ff3 100644
--- a/drivers/spi/spi-rb4xx.c
+++ b/drivers/spi/spi-rb4xx.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/spi/spi.h>
+#include <linux/of.h>
#include <asm/mach-ath79/ar71xx_regs.h>
@@ -150,6 +151,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
if (IS_ERR(ahb_clk))
return PTR_ERR(ahb_clk);
+ master->dev.of_node = pdev->dev.of_node;
master->bus_num = 0;
master->num_chipselect = 3;
master->mode_bits = SPI_TX_DUAL;
@@ -158,6 +160,11 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
master->transfer_one = rb4xx_transfer_one;
master->set_cs = rb4xx_set_cs;
+ rbspi = spi_master_get_devdata(master);
+ rbspi->base = spi_base;
+ rbspi->clk = ahb_clk;
+ platform_set_drvdata(pdev, rbspi);
+
err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "failed to register SPI master\n");
@@ -168,11 +175,6 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
if (err)
return err;
- rbspi = spi_master_get_devdata(master);
- rbspi->base = spi_base;
- rbspi->clk = ahb_clk;
- platform_set_drvdata(pdev, rbspi);
-
/* Enable SPI */
rb4xx_write(rbspi, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
@@ -188,11 +190,18 @@ static int rb4xx_spi_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id rb4xx_spi_dt_match[] = {
+ { .compatible = "mikrotik,rb4xx-spi" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rb4xx_spi_dt_match);
+
static struct platform_driver rb4xx_spi_drv = {
.probe = rb4xx_spi_probe,
.remove = rb4xx_spi_remove,
.driver = {
.name = "rb4xx-spi",
+ .of_match_table = of_match_ptr(rb4xx_spi_dt_match),
},
};
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 70ef63e0b6b8..9b8a5e1233c0 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -183,6 +183,8 @@ struct rockchip_spi {
u8 rsd;
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
+
+ bool slave_abort;
};
static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
@@ -219,8 +221,8 @@ static u32 get_fifo_len(struct rockchip_spi *rs)
static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct spi_master *master = spi->master;
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = spi->controller;
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
bool cs_asserted = !enable;
/* Return immediately for no-op */
@@ -244,10 +246,10 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
rs->cs_asserted[spi->chip_select] = cs_asserted;
}
-static void rockchip_spi_handle_err(struct spi_master *master,
+static void rockchip_spi_handle_err(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
/* stop running spi transfer
* this also flushes both rx and tx fifos
@@ -258,10 +260,10 @@ static void rockchip_spi_handle_err(struct spi_master *master,
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
if (atomic_read(&rs->state) & TXDMA)
- dmaengine_terminate_async(master->dma_tx);
+ dmaengine_terminate_async(ctlr->dma_tx);
if (atomic_read(&rs->state) & RXDMA)
- dmaengine_terminate_async(master->dma_rx);
+ dmaengine_terminate_async(ctlr->dma_rx);
}
static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
@@ -319,8 +321,8 @@ static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = dev_id;
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
if (rs->tx_left)
rockchip_spi_pio_writer(rs);
@@ -329,7 +331,7 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
if (!rs->rx_left) {
spi_enable_chip(rs, false);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(ctlr);
}
return IRQ_HANDLED;
@@ -355,35 +357,35 @@ static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
static void rockchip_spi_dma_rxcb(void *data)
{
- struct spi_master *master = data;
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = data;
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
int state = atomic_fetch_andnot(RXDMA, &rs->state);
- if (state & TXDMA)
+ if (state & TXDMA && !rs->slave_abort)
return;
spi_enable_chip(rs, false);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(ctlr);
}
static void rockchip_spi_dma_txcb(void *data)
{
- struct spi_master *master = data;
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = data;
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
int state = atomic_fetch_andnot(TXDMA, &rs->state);
- if (state & RXDMA)
+ if (state & RXDMA && !rs->slave_abort)
return;
/* Wait until the FIFO data completely. */
wait_for_idle(rs);
spi_enable_chip(rs, false);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(ctlr);
}
static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
- struct spi_master *master, struct spi_transfer *xfer)
+ struct spi_controller *ctlr, struct spi_transfer *xfer)
{
struct dma_async_tx_descriptor *rxdesc, *txdesc;
@@ -398,17 +400,17 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
.src_maxburst = 1,
};
- dmaengine_slave_config(master->dma_rx, &rxconf);
+ dmaengine_slave_config(ctlr->dma_rx, &rxconf);
rxdesc = dmaengine_prep_slave_sg(
- master->dma_rx,
+ ctlr->dma_rx,
xfer->rx_sg.sgl, xfer->rx_sg.nents,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!rxdesc)
return -EINVAL;
rxdesc->callback = rockchip_spi_dma_rxcb;
- rxdesc->callback_param = master;
+ rxdesc->callback_param = ctlr;
}
txdesc = NULL;
@@ -420,27 +422,27 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
.dst_maxburst = rs->fifo_len / 4,
};
- dmaengine_slave_config(master->dma_tx, &txconf);
+ dmaengine_slave_config(ctlr->dma_tx, &txconf);
txdesc = dmaengine_prep_slave_sg(
- master->dma_tx,
+ ctlr->dma_tx,
xfer->tx_sg.sgl, xfer->tx_sg.nents,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!txdesc) {
if (rxdesc)
- dmaengine_terminate_sync(master->dma_rx);
+ dmaengine_terminate_sync(ctlr->dma_rx);
return -EINVAL;
}
txdesc->callback = rockchip_spi_dma_txcb;
- txdesc->callback_param = master;
+ txdesc->callback_param = ctlr;
}
/* rx must be started before tx due to spi instinct */
if (rxdesc) {
atomic_or(RXDMA, &rs->state);
dmaengine_submit(rxdesc);
- dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(ctlr->dma_rx);
}
spi_enable_chip(rs, true);
@@ -448,7 +450,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
if (txdesc) {
atomic_or(TXDMA, &rs->state);
dmaengine_submit(txdesc);
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(ctlr->dma_tx);
}
/* 1 means the transfer is in progress */
@@ -457,7 +459,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
static void rockchip_spi_config(struct rockchip_spi *rs,
struct spi_device *spi, struct spi_transfer *xfer,
- bool use_dma)
+ bool use_dma, bool slave_mode)
{
u32 cr0 = CR0_FRF_SPI << CR0_FRF_OFFSET
| CR0_BHT_8BIT << CR0_BHT_OFFSET
@@ -466,6 +468,10 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
u32 cr1;
u32 dmacr = 0;
+ if (slave_mode)
+ cr0 |= CR0_OPM_SLAVE << CR0_OPM_OFFSET;
+ rs->slave_abort = false;
+
cr0 |= rs->rsd << CR0_RSD_OFFSET;
cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
if (spi->mode & SPI_LSB_FIRST)
@@ -493,7 +499,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
break;
default:
/* we only whitelist 4, 8 and 16 bit words in
- * master->bits_per_word_mask, so this shouldn't
+ * ctlr->bits_per_word_mask, so this shouldn't
* happen
*/
unreachable();
@@ -535,12 +541,22 @@ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
return ROCKCHIP_SPI_MAX_TRANLEN;
}
+static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
+{
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ rs->slave_abort = true;
+ complete(&ctlr->xfer_completion);
+
+ return 0;
+}
+
static int rockchip_spi_transfer_one(
- struct spi_master *master,
+ struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
bool use_dma;
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -558,21 +574,21 @@ static int rockchip_spi_transfer_one(
rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
- use_dma = master->can_dma ? master->can_dma(master, spi, xfer) : false;
+ use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
- rockchip_spi_config(rs, spi, xfer, use_dma);
+ rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
if (use_dma)
- return rockchip_spi_prepare_dma(rs, master, xfer);
+ return rockchip_spi_prepare_dma(rs, ctlr, xfer);
return rockchip_spi_prepare_irq(rs, xfer);
}
-static bool rockchip_spi_can_dma(struct spi_master *master,
+static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
unsigned int bytes_per_word = xfer->bits_per_word <= 8 ? 1 : 2;
/* if the numbor of spi words to transfer is less than the fifo
@@ -586,44 +602,55 @@ static int rockchip_spi_probe(struct platform_device *pdev)
{
int ret;
struct rockchip_spi *rs;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct resource *mem;
+ struct device_node *np = pdev->dev.of_node;
u32 rsd_nsecs;
+ bool slave_mode;
+
+ slave_mode = of_property_read_bool(np, "spi-slave");
+
+ if (slave_mode)
+ ctlr = spi_alloc_slave(&pdev->dev,
+ sizeof(struct rockchip_spi));
+ else
+ ctlr = spi_alloc_master(&pdev->dev,
+ sizeof(struct rockchip_spi));
- master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
- if (!master)
+ if (!ctlr)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, ctlr);
- rs = spi_master_get_devdata(master);
+ rs = spi_controller_get_devdata(ctlr);
+ ctlr->slave = slave_mode;
/* Get basic io resource and map it */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rs->regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(rs->regs)) {
ret = PTR_ERR(rs->regs);
- goto err_put_master;
+ goto err_put_ctlr;
}
rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
if (IS_ERR(rs->apb_pclk)) {
dev_err(&pdev->dev, "Failed to get apb_pclk\n");
ret = PTR_ERR(rs->apb_pclk);
- goto err_put_master;
+ goto err_put_ctlr;
}
rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
if (IS_ERR(rs->spiclk)) {
dev_err(&pdev->dev, "Failed to get spi_pclk\n");
ret = PTR_ERR(rs->spiclk);
- goto err_put_master;
+ goto err_put_ctlr;
}
ret = clk_prepare_enable(rs->apb_pclk);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
- goto err_put_master;
+ goto err_put_ctlr;
}
ret = clk_prepare_enable(rs->spiclk);
@@ -639,7 +666,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
goto err_disable_spiclk;
ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL,
- IRQF_ONESHOT, dev_name(&pdev->dev), master);
+ IRQF_ONESHOT, dev_name(&pdev->dev), ctlr);
if (ret)
goto err_disable_spiclk;
@@ -673,78 +700,90 @@ static int rockchip_spi_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- master->auto_runtime_pm = true;
- master->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST;
- master->num_chipselect = ROCKCHIP_SPI_MAX_CS_NUM;
- master->dev.of_node = pdev->dev.of_node;
- master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4);
- master->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
- master->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
-
- master->set_cs = rockchip_spi_set_cs;
- master->transfer_one = rockchip_spi_transfer_one;
- master->max_transfer_size = rockchip_spi_max_transfer_size;
- master->handle_err = rockchip_spi_handle_err;
- master->flags = SPI_MASTER_GPIO_SS;
-
- master->dma_tx = dma_request_chan(rs->dev, "tx");
- if (IS_ERR(master->dma_tx)) {
+ ctlr->auto_runtime_pm = true;
+ ctlr->bus_num = pdev->id;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST;
+ if (slave_mode) {
+ ctlr->mode_bits |= SPI_NO_CS;
+ ctlr->slave_abort = rockchip_spi_slave_abort;
+ } else {
+ ctlr->flags = SPI_MASTER_GPIO_SS;
+ ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM;
+ /*
+ * rk spi0 has two native cs, spi1..5 one cs only
+ * if num-cs is missing in the dts, default to 1
+ */
+ if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect))
+ ctlr->num_chipselect = 1;
+ ctlr->use_gpio_descriptors = true;
+ }
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4);
+ ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
+ ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
+
+ ctlr->set_cs = rockchip_spi_set_cs;
+ ctlr->transfer_one = rockchip_spi_transfer_one;
+ ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
+ ctlr->handle_err = rockchip_spi_handle_err;
+
+ ctlr->dma_tx = dma_request_chan(rs->dev, "tx");
+ if (IS_ERR(ctlr->dma_tx)) {
/* Check tx to see if we need defer probing driver */
- if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
+ if (PTR_ERR(ctlr->dma_tx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_disable_pm_runtime;
}
dev_warn(rs->dev, "Failed to request TX DMA channel\n");
- master->dma_tx = NULL;
+ ctlr->dma_tx = NULL;
}
- master->dma_rx = dma_request_chan(rs->dev, "rx");
- if (IS_ERR(master->dma_rx)) {
- if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
+ ctlr->dma_rx = dma_request_chan(rs->dev, "rx");
+ if (IS_ERR(ctlr->dma_rx)) {
+ if (PTR_ERR(ctlr->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_free_dma_tx;
}
dev_warn(rs->dev, "Failed to request RX DMA channel\n");
- master->dma_rx = NULL;
+ ctlr->dma_rx = NULL;
}
- if (master->dma_tx && master->dma_rx) {
+ if (ctlr->dma_tx && ctlr->dma_rx) {
rs->dma_addr_tx = mem->start + ROCKCHIP_SPI_TXDR;
rs->dma_addr_rx = mem->start + ROCKCHIP_SPI_RXDR;
- master->can_dma = rockchip_spi_can_dma;
+ ctlr->can_dma = rockchip_spi_can_dma;
}
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register master\n");
+ dev_err(&pdev->dev, "Failed to register controller\n");
goto err_free_dma_rx;
}
return 0;
err_free_dma_rx:
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
err_free_dma_tx:
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
err_disable_pm_runtime:
pm_runtime_disable(&pdev->dev);
err_disable_spiclk:
clk_disable_unprepare(rs->spiclk);
err_disable_apbclk:
clk_disable_unprepare(rs->apb_pclk);
-err_put_master:
- spi_master_put(master);
+err_put_ctlr:
+ spi_controller_put(ctlr);
return ret;
}
static int rockchip_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = spi_controller_get(platform_get_drvdata(pdev));
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
pm_runtime_get_sync(&pdev->dev);
@@ -755,12 +794,12 @@ static int rockchip_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
- spi_master_put(master);
+ spi_controller_put(ctlr);
return 0;
}
@@ -769,9 +808,9 @@ static int rockchip_spi_remove(struct platform_device *pdev)
static int rockchip_spi_suspend(struct device *dev)
{
int ret;
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(ctlr);
if (ret < 0)
return ret;
@@ -787,8 +826,8 @@ static int rockchip_spi_suspend(struct device *dev)
static int rockchip_spi_resume(struct device *dev)
{
int ret;
- struct spi_master *master = dev_get_drvdata(dev);
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
pinctrl_pm_select_default_state(dev);
@@ -796,7 +835,7 @@ static int rockchip_spi_resume(struct device *dev)
if (ret < 0)
return ret;
- ret = spi_master_resume(master);
+ ret = spi_controller_resume(ctlr);
if (ret < 0) {
clk_disable_unprepare(rs->spiclk);
clk_disable_unprepare(rs->apb_pclk);
@@ -809,8 +848,8 @@ static int rockchip_spi_resume(struct device *dev)
#ifdef CONFIG_PM
static int rockchip_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
clk_disable_unprepare(rs->spiclk);
clk_disable_unprepare(rs->apb_pclk);
@@ -821,8 +860,8 @@ static int rockchip_spi_runtime_suspend(struct device *dev)
static int rockchip_spi_runtime_resume(struct device *dev)
{
int ret;
- struct spi_master *master = dev_get_drvdata(dev);
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
ret = clk_prepare_enable(rs->apb_pclk);
if (ret < 0)
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 5497eeb3bf3e..ee0f3edf49cd 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -345,6 +345,6 @@ static struct i2c_driver sc18is602_driver = {
module_i2c_driver(sc18is602_driver);
-MODULE_DESCRIPTION("SC18IC602/603 SPI Master Driver");
+MODULE_DESCRIPTION("SC18IS602/603 SPI Master Driver");
MODULE_AUTHOR("Guenter Roeck");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 1c11a00a2c36..b2579af0e3eb 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1398,7 +1398,7 @@ static int sh_msiof_spi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
sh_msiof_spi_resume);
-#define DEV_PM_OPS &sh_msiof_spi_pm_ops
+#define DEV_PM_OPS (&sh_msiof_spi_pm_ops)
#else
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index 87dadb6b8ebf..88e6543648cb 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -319,7 +319,7 @@ static int sprd_adi_transfer_one(struct spi_controller *ctlr,
static void sprd_adi_set_wdt_rst_mode(struct sprd_adi *sadi)
{
-#ifdef CONFIG_SPRD_WATCHDOG
+#if IS_ENABLED(CONFIG_SPRD_WATCHDOG)
u32 val;
/* Set default watchdog reboot mode */
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index d066f5144c3e..3c44bb2fd9b1 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sizes.h>
@@ -87,6 +88,7 @@
#define STM32_BUSY_TIMEOUT_US 100000
#define STM32_ABT_TIMEOUT_US 100000
#define STM32_COMP_TIMEOUT_MS 1000
+#define STM32_AUTOSUSPEND_DELAY -1
struct stm32_qspi_flash {
struct stm32_qspi *qspi;
@@ -431,10 +433,17 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
int ret;
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&qspi->lock);
ret = stm32_qspi_send(mem, op);
mutex_unlock(&qspi->lock);
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
return ret;
}
@@ -444,6 +453,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
u32 presc;
+ int ret;
if (ctrl->busy)
return -EBUSY;
@@ -451,6 +461,10 @@ static int stm32_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret < 0)
+ return ret;
+
presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
flash = &qspi->flash[spi->chip_select];
@@ -467,6 +481,9 @@ static int stm32_qspi_setup(struct spi_device *spi)
writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
mutex_unlock(&qspi->lock);
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
return 0;
}
@@ -538,10 +555,15 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
static void stm32_qspi_release(struct stm32_qspi *qspi)
{
+ pm_runtime_get_sync(qspi->dev);
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
stm32_qspi_dma_free(qspi);
mutex_destroy(&qspi->lock);
+ pm_runtime_put_noidle(qspi->dev);
+ pm_runtime_disable(qspi->dev);
+ pm_runtime_set_suspended(qspi->dev);
+ pm_runtime_dont_use_autosuspend(qspi->dev);
clk_disable_unprepare(qspi->clk);
}
@@ -643,9 +665,20 @@ static int stm32_qspi_probe(struct platform_device *pdev)
ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
ctrl->dev.of_node = dev->of_node;
+ pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_noresume(dev);
+
ret = devm_spi_register_master(dev, ctrl);
- if (!ret)
- return 0;
+ if (ret)
+ goto err_qspi_release;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
err_qspi_release:
stm32_qspi_release(qspi);
@@ -660,14 +693,28 @@ static int stm32_qspi_remove(struct platform_device *pdev)
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
stm32_qspi_release(qspi);
+
return 0;
}
-static int __maybe_unused stm32_qspi_suspend(struct device *dev)
+static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev)
{
struct stm32_qspi *qspi = dev_get_drvdata(dev);
clk_disable_unprepare(qspi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev)
+{
+ struct stm32_qspi *qspi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(qspi->clk);
+}
+
+static int __maybe_unused stm32_qspi_suspend(struct device *dev)
+{
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -683,10 +730,17 @@ static int __maybe_unused stm32_qspi_resume(struct device *dev)
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
return 0;
}
-static SIMPLE_DEV_PM_OPS(stm32_qspi_pm_ops, stm32_qspi_suspend, stm32_qspi_resume);
+static const struct dev_pm_ops stm32_qspi_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend,
+ stm32_qspi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume)
+};
static const struct of_device_id stm32_qspi_match[] = {
{.compatible = "st,stm32f469-qspi"},
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 44ac6eb3298d..4c643dfc7fbb 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -811,7 +811,9 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
mask |= STM32F4_SPI_SR_TXE;
}
- if (!spi->cur_usedma && spi->cur_comm == SPI_FULL_DUPLEX) {
+ if (!spi->cur_usedma && (spi->cur_comm == SPI_FULL_DUPLEX ||
+ spi->cur_comm == SPI_SIMPLEX_RX ||
+ spi->cur_comm == SPI_3WIRE_RX)) {
/* TXE flag is set and is handled when RXNE flag occurs */
sr &= ~STM32F4_SPI_SR_TXE;
mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR;
@@ -850,7 +852,7 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
stm32f4_spi_read_rx(spi);
if (spi->rx_len == 0)
end = true;
- else /* Load data for discontinuous mode */
+ else if (spi->tx_buf)/* Load data for discontinuous mode */
stm32f4_spi_write_tx(spi);
}
@@ -1151,7 +1153,9 @@ static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
/* Enable the interrupts relative to the current communication mode */
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
cr2 |= STM32F4_SPI_CR2_TXEIE;
- } else if (spi->cur_comm == SPI_FULL_DUPLEX) {
+ } else if (spi->cur_comm == SPI_FULL_DUPLEX ||
+ spi->cur_comm == SPI_SIMPLEX_RX ||
+ spi->cur_comm == SPI_3WIRE_RX) {
/* In transmit-only mode, the OVR flag is set in the SR register
* since the received data are never read. Therefore set OVR
* interrupt only when rx buffer is available.
@@ -1462,10 +1466,16 @@ static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
STM32F4_SPI_CR1_BIDIMODE |
STM32F4_SPI_CR1_BIDIOE);
- } else if (comm_type == SPI_FULL_DUPLEX) {
+ } else if (comm_type == SPI_FULL_DUPLEX ||
+ comm_type == SPI_SIMPLEX_RX) {
stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
STM32F4_SPI_CR1_BIDIMODE |
STM32F4_SPI_CR1_BIDIOE);
+ } else if (comm_type == SPI_3WIRE_RX) {
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIMODE);
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIOE);
} else {
return -EINVAL;
}
@@ -1906,6 +1916,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
master->unprepare_message = stm32_spi_unprepare_msg;
+ master->flags = SPI_MASTER_MUST_TX;
spi->dma_tx = dma_request_chan(spi->dev, "tx");
if (IS_ERR(spi->dma_tx)) {
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index ec7967be9e2f..ecea15534c42 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -470,6 +470,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
master->max_speed_hz = 100 * 1000 * 1000;
master->min_speed_hz = 3 * 1000;
+ master->use_gpio_descriptors = true;
master->set_cs = sun6i_spi_set_cs;
master->transfer_one = sun6i_spi_transfer_one;
master->num_chipselect = 4;
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 83edabdb41ad..c2c58871a947 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1398,6 +1398,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable;
}
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 514429379206..02cf5f463ba6 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -491,6 +491,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable;
}
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 7f4d932dade7..a07b72e9c344 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1118,6 +1118,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable;
}
tspi->def_command_reg = SLINK_M_S;
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 0fa50979644d..6a9ef8ee3cc9 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -659,8 +659,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
priv->master = master;
priv->is_save_param = false;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out_master_put;
@@ -716,8 +715,10 @@ static int uniphier_spi_probe(struct platform_device *pdev)
master->dma_tx = dma_request_chan(&pdev->dev, "tx");
if (IS_ERR_OR_NULL(master->dma_tx)) {
- if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
goto out_disable_clk;
+ }
master->dma_tx = NULL;
dma_tx_burst = INT_MAX;
} else {
@@ -732,8 +733,10 @@ static int uniphier_spi_probe(struct platform_device *pdev)
master->dma_rx = dma_request_chan(&pdev->dev, "rx");
if (IS_ERR_OR_NULL(master->dma_rx)) {
- if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
goto out_disable_clk;
+ }
master->dma_rx = NULL;
dma_rx_burst = INT_MAX;
} else {
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 7412a3042a8d..811c97a7c858 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -135,7 +135,6 @@
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
-static const struct zynqmp_eemi_ops *eemi_ops;
/**
* struct zynqmp_qspi - Defines qspi driver instance
@@ -1015,10 +1014,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct zynqmp_qspi *xqspi;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!master)
return -ENOMEM;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c92c89467e7e..8158e281f354 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1023,7 +1023,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
void *tmp;
unsigned int max_tx, max_rx;
- if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
+ if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
+ && !(msg->spi->mode & SPI_3WIRE)) {
max_tx = 0;
max_rx = 0;
@@ -1075,7 +1076,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
- unsigned long long ms = 1;
+ unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
@@ -1160,6 +1161,8 @@ int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
{
int delay;
+ might_sleep();
+
if (!_delay)
return -EINVAL;
@@ -2111,6 +2114,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
}
lookup->max_speed_hz = sb->connection_speed;
+ lookup->bits_per_word = sb->data_bit_length;
if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
lookup->mode |= SPI_CPHA;
@@ -2760,6 +2764,8 @@ void spi_unregister_controller(struct spi_controller *ctlr)
struct spi_controller *found;
int id = ctlr->bus_num;
+ device_for_each_child(&ctlr->dev, NULL, __unregister);
+
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
found = idr_find(&spi_master_idr, id);
@@ -2772,7 +2778,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
list_del(&ctlr->list);
mutex_unlock(&board_lock);
- device_for_each_child(&ctlr->dev, NULL, __unregister);
device_unregister(&ctlr->dev);
/* free bus id */
mutex_lock(&board_lock);
@@ -3853,8 +3858,7 @@ static u8 *buf;
* is zero for success, else a negative errno status code.
* This call may only be used from a context that may sleep.
*
- * Parameters to this routine are always copied using a small buffer;
- * portable code should never use this for more than 32 bytes.
+ * Parameters to this routine are always copied using a small buffer.
* Performance-sensitive or bulk transfer code should instead use
* spi_{async,sync}() calls with dma-safe buffers.
*
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 80dd1025b953..d753df700e9e 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -62,7 +62,8 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS);
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
| SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
- | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
+ | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
+ | SPI_RX_QUAD | SPI_RX_OCTAL)
struct spidev_data {
dev_t devt;
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 6ceee98ed6ff..b97a5c32d44a 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -400,7 +400,8 @@ int ssb_bus_scan(struct ssb_bus *bus,
#ifdef CONFIG_SSB_DRIVER_PCICORE
if (bus->bustype == SSB_BUSTYPE_PCI) {
/* Ignore PCI cores on PCI-E cards.
- * Ignore PCI-E cores on PCI cards. */
+ * Ignore PCI-E cores on PCI cards.
+ */
if (dev->id.coreid == SSB_DEV_PCI) {
if (pci_is_pcie(bus->host_pci))
continue;
@@ -421,7 +422,8 @@ int ssb_bus_scan(struct ssb_bus *bus,
if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM &&
(bus->host_pci->device & 0xFF00) == 0x4300) {
/* This is a dangling ethernet core on a
- * wireless device. Ignore it. */
+ * wireless device. Ignore it.
+ */
continue;
}
}
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 52d2e0f33be7..42d620cee8a9 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -78,7 +78,8 @@ ssize_t ssb_attr_sprom_show(struct ssb_bus *bus, char *buf,
/* Use interruptible locking, as the SPROM write might
* be holding the lock for several seconds. So allow userspace
- * to cancel operation. */
+ * to cancel operation.
+ */
err = -ERESTARTSYS;
if (mutex_lock_interruptible(&bus->sprom_mutex))
goto out_kfree;
@@ -121,7 +122,8 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
/* Use interruptible locking, as the SPROM write might
* be holding the lock for several seconds. So allow userspace
- * to cancel operation. */
+ * to cancel operation.
+ */
err = -ERESTARTSYS;
if (mutex_lock_interruptible(&bus->sprom_mutex))
goto out_kfree;
@@ -188,9 +190,11 @@ int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out)
bool ssb_is_sprom_available(struct ssb_bus *bus)
{
/* status register only exists on chipcomon rev >= 11 and we need check
- for >= 31 only */
+ * for >= 31 only
+ */
/* this routine differs from specs as we do not access SPROM directly
- on PCMCIA */
+ * on PCMCIA
+ */
if (bus->bustype == SSB_BUSTYPE_PCI &&
bus->chipco.dev && /* can be unavailable! */
bus->chipco.dev->id.revision >= 31)
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 473b465724f1..0755b11348ed 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -99,12 +99,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
{
- void *addr = vm_map_ram(pages, num, -1, pgprot);
+ void *addr = vmap(pages, num, VM_MAP, pgprot);
if (!addr)
return -ENOMEM;
memset(addr, 0, PAGE_SIZE * num);
- vm_unmap_ram(addr, num);
+ vunmap(addr);
return 0;
}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index f85ec5b16b65..0198b886d906 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -37,7 +37,7 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
}
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
- 1 << pool->order);
+ 1 << pool->order);
mutex_unlock(&pool->mutex);
}
@@ -57,7 +57,7 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
list_del(&page->lru);
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
- -(1 << pool->order));
+ -(1 << pool->order));
return page;
}
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 5801067e7c1b..2bb1c2e9cb57 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -383,8 +383,9 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
mutex_lock(&fifo->read_lock);
ret = wait_event_interruptible_timeout(fifo->read_queue,
ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
- (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
- MAX_SCHEDULE_TIMEOUT);
+ (read_timeout >= 0) ?
+ msecs_to_jiffies(read_timeout) :
+ MAX_SCHEDULE_TIMEOUT);
if (ret <= 0) {
if (ret == 0) {
@@ -525,9 +526,10 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
mutex_lock(&fifo->write_lock);
ret = wait_event_interruptible_timeout(fifo->write_queue,
ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
- >= words_to_write,
- (write_timeout >= 0) ? msecs_to_jiffies(write_timeout) :
- MAX_SCHEDULE_TIMEOUT);
+ >= words_to_write,
+ (write_timeout >= 0) ?
+ msecs_to_jiffies(write_timeout) :
+ MAX_SCHEDULE_TIMEOUT);
if (ret <= 0) {
if (ret == 0) {
diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile
index 6af5da3b4315..072ed83a5a6a 100644
--- a/drivers/staging/comedi/Makefile
+++ b/drivers/staging/comedi/Makefile
@@ -4,7 +4,6 @@ ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
comedi-y := comedi_fops.o range.o drivers.o \
comedi_buf.o
comedi-$(CONFIG_PROC_FS) += proc.o
-comedi-$(CONFIG_COMPAT) += comedi_compat32.o
obj-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o
obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += comedi_pcmcia.o
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
deleted file mode 100644
index 36a3564ba1fb..000000000000
--- a/drivers/staging/comedi/comedi_compat32.c
+++ /dev/null
@@ -1,455 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * comedi/comedi_compat32.c
- * 32-bit ioctl compatibility for 64-bit comedi kernel module.
- *
- * Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
- * Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
- */
-
-#include <linux/uaccess.h>
-#include <linux/compat.h>
-#include <linux/fs.h>
-#include "comedi.h"
-#include "comedi_compat32.h"
-
-#define COMEDI32_CHANINFO _IOR(CIO, 3, struct comedi32_chaninfo_struct)
-#define COMEDI32_RANGEINFO _IOR(CIO, 8, struct comedi32_rangeinfo_struct)
-/*
- * N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR.
- * It's too late to change it now, but it only affects the command number.
- */
-#define COMEDI32_CMD _IOR(CIO, 9, struct comedi32_cmd_struct)
-/*
- * N.B. COMEDI32_CMDTEST and COMEDI_CMDTEST ought to use _IOWR, not _IOR.
- * It's too late to change it now, but it only affects the command number.
- */
-#define COMEDI32_CMDTEST _IOR(CIO, 10, struct comedi32_cmd_struct)
-#define COMEDI32_INSNLIST _IOR(CIO, 11, struct comedi32_insnlist_struct)
-#define COMEDI32_INSN _IOR(CIO, 12, struct comedi32_insn_struct)
-
-struct comedi32_chaninfo_struct {
- unsigned int subdev;
- compat_uptr_t maxdata_list; /* 32-bit 'unsigned int *' */
- compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */
- compat_uptr_t rangelist; /* 32-bit 'unsigned int *' */
- unsigned int unused[4];
-};
-
-struct comedi32_rangeinfo_struct {
- unsigned int range_type;
- compat_uptr_t range_ptr; /* 32-bit 'void *' */
-};
-
-struct comedi32_cmd_struct {
- unsigned int subdev;
- unsigned int flags;
- unsigned int start_src;
- unsigned int start_arg;
- unsigned int scan_begin_src;
- unsigned int scan_begin_arg;
- unsigned int convert_src;
- unsigned int convert_arg;
- unsigned int scan_end_src;
- unsigned int scan_end_arg;
- unsigned int stop_src;
- unsigned int stop_arg;
- compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */
- unsigned int chanlist_len;
- compat_uptr_t data; /* 32-bit 'short *' */
- unsigned int data_len;
-};
-
-struct comedi32_insn_struct {
- unsigned int insn;
- unsigned int n;
- compat_uptr_t data; /* 32-bit 'unsigned int *' */
- unsigned int subdev;
- unsigned int chanspec;
- unsigned int unused[3];
-};
-
-struct comedi32_insnlist_struct {
- unsigned int n_insns;
- compat_uptr_t insns; /* 32-bit 'struct comedi_insn *' */
-};
-
-/* Handle translated ioctl. */
-static int translated_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- if (file->f_op->unlocked_ioctl)
- return file->f_op->unlocked_ioctl(file, cmd, arg);
-
- return -ENOTTY;
-}
-
-/* Handle 32-bit COMEDI_CHANINFO ioctl. */
-static int compat_chaninfo(struct file *file, unsigned long arg)
-{
- struct comedi_chaninfo __user *chaninfo;
- struct comedi32_chaninfo_struct __user *chaninfo32;
- int err;
- union {
- unsigned int uint;
- compat_uptr_t uptr;
- } temp;
-
- chaninfo32 = compat_ptr(arg);
- chaninfo = compat_alloc_user_space(sizeof(*chaninfo));
-
- /* Copy chaninfo structure. Ignore unused members. */
- if (!access_ok(chaninfo32, sizeof(*chaninfo32)) ||
- !access_ok(chaninfo, sizeof(*chaninfo)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(temp.uint, &chaninfo32->subdev);
- err |= __put_user(temp.uint, &chaninfo->subdev);
- err |= __get_user(temp.uptr, &chaninfo32->maxdata_list);
- err |= __put_user(compat_ptr(temp.uptr), &chaninfo->maxdata_list);
- err |= __get_user(temp.uptr, &chaninfo32->flaglist);
- err |= __put_user(compat_ptr(temp.uptr), &chaninfo->flaglist);
- err |= __get_user(temp.uptr, &chaninfo32->rangelist);
- err |= __put_user(compat_ptr(temp.uptr), &chaninfo->rangelist);
- if (err)
- return -EFAULT;
-
- return translated_ioctl(file, COMEDI_CHANINFO, (unsigned long)chaninfo);
-}
-
-/* Handle 32-bit COMEDI_RANGEINFO ioctl. */
-static int compat_rangeinfo(struct file *file, unsigned long arg)
-{
- struct comedi_rangeinfo __user *rangeinfo;
- struct comedi32_rangeinfo_struct __user *rangeinfo32;
- int err;
- union {
- unsigned int uint;
- compat_uptr_t uptr;
- } temp;
-
- rangeinfo32 = compat_ptr(arg);
- rangeinfo = compat_alloc_user_space(sizeof(*rangeinfo));
-
- /* Copy rangeinfo structure. */
- if (!access_ok(rangeinfo32, sizeof(*rangeinfo32)) ||
- !access_ok(rangeinfo, sizeof(*rangeinfo)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(temp.uint, &rangeinfo32->range_type);
- err |= __put_user(temp.uint, &rangeinfo->range_type);
- err |= __get_user(temp.uptr, &rangeinfo32->range_ptr);
- err |= __put_user(compat_ptr(temp.uptr), &rangeinfo->range_ptr);
- if (err)
- return -EFAULT;
-
- return translated_ioctl(file, COMEDI_RANGEINFO,
- (unsigned long)rangeinfo);
-}
-
-/* Copy 32-bit cmd structure to native cmd structure. */
-static int get_compat_cmd(struct comedi_cmd __user *cmd,
- struct comedi32_cmd_struct __user *cmd32)
-{
- int err;
- union {
- unsigned int uint;
- compat_uptr_t uptr;
- } temp;
-
- /* Copy cmd structure. */
- if (!access_ok(cmd32, sizeof(*cmd32)) ||
- !access_ok(cmd, sizeof(*cmd)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(temp.uint, &cmd32->subdev);
- err |= __put_user(temp.uint, &cmd->subdev);
- err |= __get_user(temp.uint, &cmd32->flags);
- err |= __put_user(temp.uint, &cmd->flags);
- err |= __get_user(temp.uint, &cmd32->start_src);
- err |= __put_user(temp.uint, &cmd->start_src);
- err |= __get_user(temp.uint, &cmd32->start_arg);
- err |= __put_user(temp.uint, &cmd->start_arg);
- err |= __get_user(temp.uint, &cmd32->scan_begin_src);
- err |= __put_user(temp.uint, &cmd->scan_begin_src);
- err |= __get_user(temp.uint, &cmd32->scan_begin_arg);
- err |= __put_user(temp.uint, &cmd->scan_begin_arg);
- err |= __get_user(temp.uint, &cmd32->convert_src);
- err |= __put_user(temp.uint, &cmd->convert_src);
- err |= __get_user(temp.uint, &cmd32->convert_arg);
- err |= __put_user(temp.uint, &cmd->convert_arg);
- err |= __get_user(temp.uint, &cmd32->scan_end_src);
- err |= __put_user(temp.uint, &cmd->scan_end_src);
- err |= __get_user(temp.uint, &cmd32->scan_end_arg);
- err |= __put_user(temp.uint, &cmd->scan_end_arg);
- err |= __get_user(temp.uint, &cmd32->stop_src);
- err |= __put_user(temp.uint, &cmd->stop_src);
- err |= __get_user(temp.uint, &cmd32->stop_arg);
- err |= __put_user(temp.uint, &cmd->stop_arg);
- err |= __get_user(temp.uptr, &cmd32->chanlist);
- err |= __put_user((unsigned int __force *)compat_ptr(temp.uptr),
- &cmd->chanlist);
- err |= __get_user(temp.uint, &cmd32->chanlist_len);
- err |= __put_user(temp.uint, &cmd->chanlist_len);
- err |= __get_user(temp.uptr, &cmd32->data);
- err |= __put_user(compat_ptr(temp.uptr), &cmd->data);
- err |= __get_user(temp.uint, &cmd32->data_len);
- err |= __put_user(temp.uint, &cmd->data_len);
- return err ? -EFAULT : 0;
-}
-
-/* Copy native cmd structure to 32-bit cmd structure. */
-static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
- struct comedi_cmd __user *cmd)
-{
- int err;
- unsigned int temp;
-
- /*
- * Copy back most of cmd structure.
- *
- * Assume the pointer values are already valid.
- * (Could use ptr_to_compat() to set them.)
- */
- if (!access_ok(cmd, sizeof(*cmd)) ||
- !access_ok(cmd32, sizeof(*cmd32)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(temp, &cmd->subdev);
- err |= __put_user(temp, &cmd32->subdev);
- err |= __get_user(temp, &cmd->flags);
- err |= __put_user(temp, &cmd32->flags);
- err |= __get_user(temp, &cmd->start_src);
- err |= __put_user(temp, &cmd32->start_src);
- err |= __get_user(temp, &cmd->start_arg);
- err |= __put_user(temp, &cmd32->start_arg);
- err |= __get_user(temp, &cmd->scan_begin_src);
- err |= __put_user(temp, &cmd32->scan_begin_src);
- err |= __get_user(temp, &cmd->scan_begin_arg);
- err |= __put_user(temp, &cmd32->scan_begin_arg);
- err |= __get_user(temp, &cmd->convert_src);
- err |= __put_user(temp, &cmd32->convert_src);
- err |= __get_user(temp, &cmd->convert_arg);
- err |= __put_user(temp, &cmd32->convert_arg);
- err |= __get_user(temp, &cmd->scan_end_src);
- err |= __put_user(temp, &cmd32->scan_end_src);
- err |= __get_user(temp, &cmd->scan_end_arg);
- err |= __put_user(temp, &cmd32->scan_end_arg);
- err |= __get_user(temp, &cmd->stop_src);
- err |= __put_user(temp, &cmd32->stop_src);
- err |= __get_user(temp, &cmd->stop_arg);
- err |= __put_user(temp, &cmd32->stop_arg);
- /* Assume chanlist pointer is unchanged. */
- err |= __get_user(temp, &cmd->chanlist_len);
- err |= __put_user(temp, &cmd32->chanlist_len);
- /* Assume data pointer is unchanged. */
- err |= __get_user(temp, &cmd->data_len);
- err |= __put_user(temp, &cmd32->data_len);
- return err ? -EFAULT : 0;
-}
-
-/* Handle 32-bit COMEDI_CMD ioctl. */
-static int compat_cmd(struct file *file, unsigned long arg)
-{
- struct comedi_cmd __user *cmd;
- struct comedi32_cmd_struct __user *cmd32;
- int rc, err;
-
- cmd32 = compat_ptr(arg);
- cmd = compat_alloc_user_space(sizeof(*cmd));
-
- rc = get_compat_cmd(cmd, cmd32);
- if (rc)
- return rc;
-
- rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
- if (rc == -EAGAIN) {
- /* Special case: copy cmd back to user. */
- err = put_compat_cmd(cmd32, cmd);
- if (err)
- rc = err;
- }
-
- return rc;
-}
-
-/* Handle 32-bit COMEDI_CMDTEST ioctl. */
-static int compat_cmdtest(struct file *file, unsigned long arg)
-{
- struct comedi_cmd __user *cmd;
- struct comedi32_cmd_struct __user *cmd32;
- int rc, err;
-
- cmd32 = compat_ptr(arg);
- cmd = compat_alloc_user_space(sizeof(*cmd));
-
- rc = get_compat_cmd(cmd, cmd32);
- if (rc)
- return rc;
-
- rc = translated_ioctl(file, COMEDI_CMDTEST, (unsigned long)cmd);
- if (rc < 0)
- return rc;
-
- err = put_compat_cmd(cmd32, cmd);
- if (err)
- rc = err;
-
- return rc;
-}
-
-/* Copy 32-bit insn structure to native insn structure. */
-static int get_compat_insn(struct comedi_insn __user *insn,
- struct comedi32_insn_struct __user *insn32)
-{
- int err;
- union {
- unsigned int uint;
- compat_uptr_t uptr;
- } temp;
-
- /* Copy insn structure. Ignore the unused members. */
- err = 0;
- if (!access_ok(insn32, sizeof(*insn32)) ||
- !access_ok(insn, sizeof(*insn)))
- return -EFAULT;
-
- err |= __get_user(temp.uint, &insn32->insn);
- err |= __put_user(temp.uint, &insn->insn);
- err |= __get_user(temp.uint, &insn32->n);
- err |= __put_user(temp.uint, &insn->n);
- err |= __get_user(temp.uptr, &insn32->data);
- err |= __put_user(compat_ptr(temp.uptr), &insn->data);
- err |= __get_user(temp.uint, &insn32->subdev);
- err |= __put_user(temp.uint, &insn->subdev);
- err |= __get_user(temp.uint, &insn32->chanspec);
- err |= __put_user(temp.uint, &insn->chanspec);
- return err ? -EFAULT : 0;
-}
-
-/* Handle 32-bit COMEDI_INSNLIST ioctl. */
-static int compat_insnlist(struct file *file, unsigned long arg)
-{
- struct combined_insnlist {
- struct comedi_insnlist insnlist;
- struct comedi_insn insn[1];
- } __user *s;
- struct comedi32_insnlist_struct __user *insnlist32;
- struct comedi32_insn_struct __user *insn32;
- compat_uptr_t uptr;
- unsigned int n_insns, n;
- int err, rc;
-
- insnlist32 = compat_ptr(arg);
-
- /* Get 32-bit insnlist structure. */
- if (!access_ok(insnlist32, sizeof(*insnlist32)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(n_insns, &insnlist32->n_insns);
- err |= __get_user(uptr, &insnlist32->insns);
- insn32 = compat_ptr(uptr);
- if (err)
- return -EFAULT;
-
- /* Allocate user memory to copy insnlist and insns into. */
- s = compat_alloc_user_space(offsetof(struct combined_insnlist,
- insn[n_insns]));
-
- /* Set native insnlist structure. */
- if (!access_ok(&s->insnlist, sizeof(s->insnlist)))
- return -EFAULT;
-
- err |= __put_user(n_insns, &s->insnlist.n_insns);
- err |= __put_user(&s->insn[0], &s->insnlist.insns);
- if (err)
- return -EFAULT;
-
- /* Copy insn structures. */
- for (n = 0; n < n_insns; n++) {
- rc = get_compat_insn(&s->insn[n], &insn32[n]);
- if (rc)
- return rc;
- }
-
- return translated_ioctl(file, COMEDI_INSNLIST,
- (unsigned long)&s->insnlist);
-}
-
-/* Handle 32-bit COMEDI_INSN ioctl. */
-static int compat_insn(struct file *file, unsigned long arg)
-{
- struct comedi_insn __user *insn;
- struct comedi32_insn_struct __user *insn32;
- int rc;
-
- insn32 = compat_ptr(arg);
- insn = compat_alloc_user_space(sizeof(*insn));
-
- rc = get_compat_insn(insn, insn32);
- if (rc)
- return rc;
-
- return translated_ioctl(file, COMEDI_INSN, (unsigned long)insn);
-}
-
-/*
- * compat_ioctl file operation.
- *
- * Returns -ENOIOCTLCMD for unrecognised ioctl codes.
- */
-long comedi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int rc;
-
- switch (cmd) {
- case COMEDI_DEVCONFIG:
- case COMEDI_DEVINFO:
- case COMEDI_SUBDINFO:
- case COMEDI_BUFCONFIG:
- case COMEDI_BUFINFO:
- /* Just need to translate the pointer argument. */
- arg = (unsigned long)compat_ptr(arg);
- rc = translated_ioctl(file, cmd, arg);
- break;
- case COMEDI_LOCK:
- case COMEDI_UNLOCK:
- case COMEDI_CANCEL:
- case COMEDI_POLL:
- case COMEDI_SETRSUBD:
- case COMEDI_SETWSUBD:
- /* No translation needed. */
- rc = translated_ioctl(file, cmd, arg);
- break;
- case COMEDI32_CHANINFO:
- rc = compat_chaninfo(file, arg);
- break;
- case COMEDI32_RANGEINFO:
- rc = compat_rangeinfo(file, arg);
- break;
- case COMEDI32_CMD:
- rc = compat_cmd(file, arg);
- break;
- case COMEDI32_CMDTEST:
- rc = compat_cmdtest(file, arg);
- break;
- case COMEDI32_INSNLIST:
- rc = compat_insnlist(file, arg);
- break;
- case COMEDI32_INSN:
- rc = compat_insn(file, arg);
- break;
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
- return rc;
-}
diff --git a/drivers/staging/comedi/comedi_compat32.h b/drivers/staging/comedi/comedi_compat32.h
deleted file mode 100644
index dc3e2a9442c7..000000000000
--- a/drivers/staging/comedi/comedi_compat32.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * comedi/comedi_compat32.h
- * 32-bit ioctl compatibility for 64-bit comedi kernel module.
- *
- * Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
- * Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
- */
-
-#ifndef _COMEDI_COMPAT32_H
-#define _COMEDI_COMPAT32_H
-
-#ifdef CONFIG_COMPAT
-
-struct file;
-long comedi_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
-
-#else /* CONFIG_COMPAT */
-
-#define comedi_compat_ioctl NULL
-
-#endif /* CONFIG_COMPAT */
-
-#endif /* _COMEDI_COMPAT32_H */
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index e84b4fb493d6..a56c8f74a27b 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -4,13 +4,14 @@
* comedi kernel module
*
* COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
+ * compat ioctls:
+ * Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
+ * Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "comedi_compat32.h"
-
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -27,6 +28,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
#include "comedi_internal.h"
@@ -1047,31 +1049,28 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
* array of range table lengths to chaninfo->range_table_list if requested
*/
static int do_chaninfo_ioctl(struct comedi_device *dev,
- struct comedi_chaninfo __user *arg)
+ struct comedi_chaninfo *it)
{
struct comedi_subdevice *s;
- struct comedi_chaninfo it;
lockdep_assert_held(&dev->mutex);
- if (copy_from_user(&it, arg, sizeof(it)))
- return -EFAULT;
- if (it.subdev >= dev->n_subdevices)
+ if (it->subdev >= dev->n_subdevices)
return -EINVAL;
- s = &dev->subdevices[it.subdev];
+ s = &dev->subdevices[it->subdev];
- if (it.maxdata_list) {
+ if (it->maxdata_list) {
if (s->maxdata || !s->maxdata_list)
return -EINVAL;
- if (copy_to_user(it.maxdata_list, s->maxdata_list,
+ if (copy_to_user(it->maxdata_list, s->maxdata_list,
s->n_chan * sizeof(unsigned int)))
return -EFAULT;
}
- if (it.flaglist)
+ if (it->flaglist)
return -EINVAL; /* flaglist not supported */
- if (it.rangelist) {
+ if (it->rangelist) {
int i;
if (!s->range_table_list)
@@ -1079,9 +1078,9 @@ static int do_chaninfo_ioctl(struct comedi_device *dev,
for (i = 0; i < s->n_chan; i++) {
int x;
- x = (dev->minor << 28) | (it.subdev << 24) | (i << 16) |
+ x = (dev->minor << 28) | (it->subdev << 24) | (i << 16) |
(s->range_table_list[i]->length);
- if (put_user(x, it.rangelist + i))
+ if (put_user(x, it->rangelist + i))
return -EFAULT;
}
}
@@ -1521,34 +1520,19 @@ out:
#define MIN_SAMPLES 16
#define MAX_SAMPLES 65536
static int do_insnlist_ioctl(struct comedi_device *dev,
- struct comedi_insnlist __user *arg, void *file)
+ struct comedi_insn *insns,
+ unsigned int n_insns,
+ void *file)
{
- struct comedi_insnlist insnlist;
- struct comedi_insn *insns = NULL;
unsigned int *data = NULL;
unsigned int max_n_data_required = MIN_SAMPLES;
int i = 0;
int ret = 0;
lockdep_assert_held(&dev->mutex);
- if (copy_from_user(&insnlist, arg, sizeof(insnlist)))
- return -EFAULT;
-
- insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
- if (!insns) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (copy_from_user(insns, insnlist.insns,
- sizeof(*insns) * insnlist.n_insns)) {
- dev_dbg(dev->class_dev, "copy_from_user failed\n");
- ret = -EFAULT;
- goto error;
- }
/* Determine maximum memory needed for all instructions. */
- for (i = 0; i < insnlist.n_insns; ++i) {
+ for (i = 0; i < n_insns; ++i) {
if (insns[i].n > MAX_SAMPLES) {
dev_dbg(dev->class_dev,
"number of samples too large\n");
@@ -1566,7 +1550,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
goto error;
}
- for (i = 0; i < insnlist.n_insns; ++i) {
+ for (i = 0; i < n_insns; ++i) {
if (insns[i].insn & INSN_MASK_WRITE) {
if (copy_from_user(data, insns[i].data,
insns[i].n * sizeof(unsigned int))) {
@@ -1593,7 +1577,6 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
error:
- kfree(insns);
kfree(data);
if (ret < 0)
@@ -1616,22 +1599,19 @@ error:
* data (for reads) to insn->data pointer
*/
static int do_insn_ioctl(struct comedi_device *dev,
- struct comedi_insn __user *arg, void *file)
+ struct comedi_insn *insn, void *file)
{
- struct comedi_insn insn;
unsigned int *data = NULL;
unsigned int n_data = MIN_SAMPLES;
int ret = 0;
lockdep_assert_held(&dev->mutex);
- if (copy_from_user(&insn, arg, sizeof(insn)))
- return -EFAULT;
- n_data = max(n_data, insn.n);
+ n_data = max(n_data, insn->n);
/* This is where the behavior of insn and insnlist deviate. */
- if (insn.n > MAX_SAMPLES) {
- insn.n = MAX_SAMPLES;
+ if (insn->n > MAX_SAMPLES) {
+ insn->n = MAX_SAMPLES;
n_data = MAX_SAMPLES;
}
@@ -1641,26 +1621,26 @@ static int do_insn_ioctl(struct comedi_device *dev,
goto error;
}
- if (insn.insn & INSN_MASK_WRITE) {
+ if (insn->insn & INSN_MASK_WRITE) {
if (copy_from_user(data,
- insn.data,
- insn.n * sizeof(unsigned int))) {
+ insn->data,
+ insn->n * sizeof(unsigned int))) {
ret = -EFAULT;
goto error;
}
}
- ret = parse_insn(dev, &insn, data, file);
+ ret = parse_insn(dev, insn, data, file);
if (ret < 0)
goto error;
- if (insn.insn & INSN_MASK_READ) {
- if (copy_to_user(insn.data,
+ if (insn->insn & INSN_MASK_READ) {
+ if (copy_to_user(insn->data,
data,
- insn.n * sizeof(unsigned int))) {
+ insn->n * sizeof(unsigned int))) {
ret = -EFAULT;
goto error;
}
}
- ret = insn.n;
+ ret = insn->n;
error:
kfree(data);
@@ -1669,17 +1649,11 @@ error:
}
static int __comedi_get_user_cmd(struct comedi_device *dev,
- struct comedi_cmd __user *arg,
struct comedi_cmd *cmd)
{
struct comedi_subdevice *s;
lockdep_assert_held(&dev->mutex);
- if (copy_from_user(cmd, arg, sizeof(*cmd))) {
- dev_dbg(dev->class_dev, "bad cmd address\n");
- return -EFAULT;
- }
-
if (cmd->subdev >= dev->n_subdevices) {
dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd->subdev);
return -ENODEV;
@@ -1767,9 +1741,8 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev,
* possibly modified comedi_cmd structure (when -EAGAIN returned)
*/
static int do_cmd_ioctl(struct comedi_device *dev,
- struct comedi_cmd __user *arg, void *file)
+ struct comedi_cmd *cmd, bool *copy, void *file)
{
- struct comedi_cmd cmd;
struct comedi_subdevice *s;
struct comedi_async *async;
unsigned int __user *user_chanlist;
@@ -1777,15 +1750,15 @@ static int do_cmd_ioctl(struct comedi_device *dev,
lockdep_assert_held(&dev->mutex);
- /* get the user's cmd and do some simple validation */
- ret = __comedi_get_user_cmd(dev, arg, &cmd);
+ /* do some simple cmd validation */
+ ret = __comedi_get_user_cmd(dev, cmd);
if (ret)
return ret;
/* save user's chanlist pointer so it can be restored later */
- user_chanlist = (unsigned int __user *)cmd.chanlist;
+ user_chanlist = (unsigned int __user *)cmd->chanlist;
- s = &dev->subdevices[cmd.subdev];
+ s = &dev->subdevices[cmd->subdev];
async = s->async;
/* are we locked? (ioctl lock) */
@@ -1801,13 +1774,13 @@ static int do_cmd_ioctl(struct comedi_device *dev,
}
/* make sure channel/gain list isn't too short */
- if (cmd.chanlist_len < 1) {
+ if (cmd->chanlist_len < 1) {
dev_dbg(dev->class_dev, "channel/gain list too short %u < 1\n",
- cmd.chanlist_len);
+ cmd->chanlist_len);
return -EINVAL;
}
- async->cmd = cmd;
+ async->cmd = *cmd;
async->cmd.data = NULL;
/* load channel/gain list */
@@ -1819,15 +1792,11 @@ static int do_cmd_ioctl(struct comedi_device *dev,
if (async->cmd.flags & CMDF_BOGUS || ret) {
dev_dbg(dev->class_dev, "test returned %d\n", ret);
- cmd = async->cmd;
+ *cmd = async->cmd;
/* restore chanlist pointer before copying back */
- cmd.chanlist = (unsigned int __force *)user_chanlist;
- cmd.data = NULL;
- if (copy_to_user(arg, &cmd, sizeof(cmd))) {
- dev_dbg(dev->class_dev, "fault writing cmd\n");
- ret = -EFAULT;
- goto cleanup;
- }
+ cmd->chanlist = (unsigned int __force *)user_chanlist;
+ cmd->data = NULL;
+ *copy = true;
ret = -EAGAIN;
goto cleanup;
}
@@ -1877,44 +1846,39 @@ cleanup:
* possibly modified comedi_cmd structure
*/
static int do_cmdtest_ioctl(struct comedi_device *dev,
- struct comedi_cmd __user *arg, void *file)
+ struct comedi_cmd *cmd, bool *copy, void *file)
{
- struct comedi_cmd cmd;
struct comedi_subdevice *s;
unsigned int __user *user_chanlist;
int ret;
lockdep_assert_held(&dev->mutex);
- /* get the user's cmd and do some simple validation */
- ret = __comedi_get_user_cmd(dev, arg, &cmd);
+ /* do some simple cmd validation */
+ ret = __comedi_get_user_cmd(dev, cmd);
if (ret)
return ret;
/* save user's chanlist pointer so it can be restored later */
- user_chanlist = (unsigned int __user *)cmd.chanlist;
+ user_chanlist = (unsigned int __user *)cmd->chanlist;
- s = &dev->subdevices[cmd.subdev];
+ s = &dev->subdevices[cmd->subdev];
/* user_chanlist can be NULL for COMEDI_CMDTEST ioctl */
if (user_chanlist) {
/* load channel/gain list */
- ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd);
+ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, cmd);
if (ret)
return ret;
}
- ret = s->do_cmdtest(dev, s, &cmd);
+ ret = s->do_cmdtest(dev, s, cmd);
- kfree(cmd.chanlist); /* free kernel copy of user chanlist */
+ kfree(cmd->chanlist); /* free kernel copy of user chanlist */
/* restore chanlist pointer before copying back */
- cmd.chanlist = (unsigned int __force *)user_chanlist;
-
- if (copy_to_user(arg, &cmd, sizeof(cmd))) {
- dev_dbg(dev->class_dev, "bad cmd address\n");
- ret = -EFAULT;
- }
+ cmd->chanlist = (unsigned int __force *)user_chanlist;
+ *copy = true;
return ret;
}
@@ -2203,12 +2167,22 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
(struct comedi_subdinfo __user *)arg,
file);
break;
- case COMEDI_CHANINFO:
- rc = do_chaninfo_ioctl(dev, (void __user *)arg);
+ case COMEDI_CHANINFO: {
+ struct comedi_chaninfo it;
+ if (copy_from_user(&it, (void __user *)arg, sizeof(it)))
+ rc = -EFAULT;
+ else
+ rc = do_chaninfo_ioctl(dev, &it);
break;
- case COMEDI_RANGEINFO:
- rc = do_rangeinfo_ioctl(dev, (void __user *)arg);
+ }
+ case COMEDI_RANGEINFO: {
+ struct comedi_rangeinfo it;
+ if (copy_from_user(&it, (void __user *)arg, sizeof(it)))
+ rc = -EFAULT;
+ else
+ rc = do_rangeinfo_ioctl(dev, &it);
break;
+ }
case COMEDI_BUFINFO:
rc = do_bufinfo_ioctl(dev,
(struct comedi_bufinfo __user *)arg,
@@ -2223,22 +2197,64 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
case COMEDI_CANCEL:
rc = do_cancel_ioctl(dev, arg, file);
break;
- case COMEDI_CMD:
- rc = do_cmd_ioctl(dev, (struct comedi_cmd __user *)arg, file);
+ case COMEDI_CMD: {
+ struct comedi_cmd cmd;
+ bool copy = false;
+
+ if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd))) {
+ rc = -EFAULT;
+ break;
+ }
+ rc = do_cmd_ioctl(dev, &cmd, &copy, file);
+ if (copy && copy_to_user((void __user *)arg, &cmd, sizeof(cmd)))
+ rc = -EFAULT;
break;
- case COMEDI_CMDTEST:
- rc = do_cmdtest_ioctl(dev, (struct comedi_cmd __user *)arg,
- file);
+ }
+ case COMEDI_CMDTEST: {
+ struct comedi_cmd cmd;
+ bool copy = false;
+
+ if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd))) {
+ rc = -EFAULT;
+ break;
+ }
+ rc = do_cmdtest_ioctl(dev, &cmd, &copy, file);
+ if (copy && copy_to_user((void __user *)arg, &cmd, sizeof(cmd)))
+ rc = -EFAULT;
break;
- case COMEDI_INSNLIST:
- rc = do_insnlist_ioctl(dev,
- (struct comedi_insnlist __user *)arg,
- file);
+ }
+ case COMEDI_INSNLIST: {
+ struct comedi_insnlist insnlist;
+ struct comedi_insn *insns = NULL;
+
+ if (copy_from_user(&insnlist, (void __user *)arg,
+ sizeof(insnlist))) {
+ rc = -EFAULT;
+ break;
+ }
+ insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
+ if (!insns) {
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(insns, insnlist.insns,
+ sizeof(*insns) * insnlist.n_insns)) {
+ rc = -EFAULT;
+ kfree(insns);
+ break;
+ }
+ rc = do_insnlist_ioctl(dev, insns, insnlist.n_insns, file);
+ kfree(insns);
break;
- case COMEDI_INSN:
- rc = do_insn_ioctl(dev, (struct comedi_insn __user *)arg,
- file);
+ }
+ case COMEDI_INSN: {
+ struct comedi_insn insn;
+ if (copy_from_user(&insn, (void __user *)arg, sizeof(insn)))
+ rc = -EFAULT;
+ else
+ rc = do_insn_ioctl(dev, &insn, file);
break;
+ }
case COMEDI_POLL:
rc = do_poll_ioctl(dev, arg, file);
break;
@@ -2808,6 +2824,344 @@ static int comedi_close(struct inode *inode, struct file *file)
return 0;
}
+#ifdef CONFIG_COMPAT
+
+#define COMEDI32_CHANINFO _IOR(CIO, 3, struct comedi32_chaninfo_struct)
+#define COMEDI32_RANGEINFO _IOR(CIO, 8, struct comedi32_rangeinfo_struct)
+/*
+ * N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR.
+ * It's too late to change it now, but it only affects the command number.
+ */
+#define COMEDI32_CMD _IOR(CIO, 9, struct comedi32_cmd_struct)
+/*
+ * N.B. COMEDI32_CMDTEST and COMEDI_CMDTEST ought to use _IOWR, not _IOR.
+ * It's too late to change it now, but it only affects the command number.
+ */
+#define COMEDI32_CMDTEST _IOR(CIO, 10, struct comedi32_cmd_struct)
+#define COMEDI32_INSNLIST _IOR(CIO, 11, struct comedi32_insnlist_struct)
+#define COMEDI32_INSN _IOR(CIO, 12, struct comedi32_insn_struct)
+
+struct comedi32_chaninfo_struct {
+ unsigned int subdev;
+ compat_uptr_t maxdata_list; /* 32-bit 'unsigned int *' */
+ compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */
+ compat_uptr_t rangelist; /* 32-bit 'unsigned int *' */
+ unsigned int unused[4];
+};
+
+struct comedi32_rangeinfo_struct {
+ unsigned int range_type;
+ compat_uptr_t range_ptr; /* 32-bit 'void *' */
+};
+
+struct comedi32_cmd_struct {
+ unsigned int subdev;
+ unsigned int flags;
+ unsigned int start_src;
+ unsigned int start_arg;
+ unsigned int scan_begin_src;
+ unsigned int scan_begin_arg;
+ unsigned int convert_src;
+ unsigned int convert_arg;
+ unsigned int scan_end_src;
+ unsigned int scan_end_arg;
+ unsigned int stop_src;
+ unsigned int stop_arg;
+ compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */
+ unsigned int chanlist_len;
+ compat_uptr_t data; /* 32-bit 'short *' */
+ unsigned int data_len;
+};
+
+struct comedi32_insn_struct {
+ unsigned int insn;
+ unsigned int n;
+ compat_uptr_t data; /* 32-bit 'unsigned int *' */
+ unsigned int subdev;
+ unsigned int chanspec;
+ unsigned int unused[3];
+};
+
+struct comedi32_insnlist_struct {
+ unsigned int n_insns;
+ compat_uptr_t insns; /* 32-bit 'struct comedi_insn *' */
+};
+
+/* Handle 32-bit COMEDI_CHANINFO ioctl. */
+static int compat_chaninfo(struct file *file, unsigned long arg)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+ struct comedi32_chaninfo_struct chaninfo32;
+ struct comedi_chaninfo chaninfo;
+ int err;
+
+ if (copy_from_user(&chaninfo32, compat_ptr(arg), sizeof(chaninfo32)))
+ return -EFAULT;
+
+ memset(&chaninfo, 0, sizeof(chaninfo));
+ chaninfo.subdev = chaninfo32.subdev;
+ chaninfo.maxdata_list = compat_ptr(chaninfo32.maxdata_list);
+ chaninfo.flaglist = compat_ptr(chaninfo32.flaglist);
+ chaninfo.rangelist = compat_ptr(chaninfo32.rangelist);
+
+ mutex_lock(&dev->mutex);
+ err = do_chaninfo_ioctl(dev, &chaninfo);
+ mutex_unlock(&dev->mutex);
+ return err;
+}
+
+/* Handle 32-bit COMEDI_RANGEINFO ioctl. */
+static int compat_rangeinfo(struct file *file, unsigned long arg)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+ struct comedi32_rangeinfo_struct rangeinfo32;
+ struct comedi_rangeinfo rangeinfo;
+ int err;
+
+ if (copy_from_user(&rangeinfo32, compat_ptr(arg), sizeof(rangeinfo32)))
+ return -EFAULT;
+ memset(&rangeinfo, 0, sizeof(rangeinfo));
+ rangeinfo.range_type = rangeinfo32.range_type;
+ rangeinfo.range_ptr = compat_ptr(rangeinfo32.range_ptr);
+
+ mutex_lock(&dev->mutex);
+ err = do_rangeinfo_ioctl(dev, &rangeinfo);
+ mutex_unlock(&dev->mutex);
+ return err;
+}
+
+/* Copy 32-bit cmd structure to native cmd structure. */
+static int get_compat_cmd(struct comedi_cmd *cmd,
+ struct comedi32_cmd_struct __user *cmd32)
+{
+ struct comedi32_cmd_struct v32;
+
+ if (copy_from_user(&v32, cmd32, sizeof(v32)))
+ return -EFAULT;
+
+ cmd->subdev = v32.subdev;
+ cmd->flags = v32.flags;
+ cmd->start_src = v32.start_src;
+ cmd->start_arg = v32.start_arg;
+ cmd->scan_begin_src = v32.scan_begin_src;
+ cmd->scan_begin_arg = v32.scan_begin_arg;
+ cmd->convert_src = v32.convert_src;
+ cmd->convert_arg = v32.convert_arg;
+ cmd->scan_end_src = v32.scan_end_src;
+ cmd->scan_end_arg = v32.scan_end_arg;
+ cmd->stop_src = v32.stop_src;
+ cmd->stop_arg = v32.stop_arg;
+ cmd->chanlist = compat_ptr(v32.chanlist);
+ cmd->chanlist_len = v32.chanlist_len;
+ cmd->data = compat_ptr(v32.data);
+ cmd->data_len = v32.data_len;
+ return 0;
+}
+
+/* Copy native cmd structure to 32-bit cmd structure. */
+static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
+ struct comedi_cmd *cmd)
+{
+ struct comedi32_cmd_struct v32;
+
+ memset(&v32, 0, sizeof(v32));
+ v32.subdev = cmd->subdev;
+ v32.flags = cmd->flags;
+ v32.start_src = cmd->start_src;
+ v32.start_arg = cmd->start_arg;
+ v32.scan_begin_src = cmd->scan_begin_src;
+ v32.scan_begin_arg = cmd->scan_begin_arg;
+ v32.convert_src = cmd->convert_src;
+ v32.convert_arg = cmd->convert_arg;
+ v32.scan_end_src = cmd->scan_end_src;
+ v32.scan_end_arg = cmd->scan_end_arg;
+ v32.stop_src = cmd->stop_src;
+ v32.stop_arg = cmd->stop_arg;
+ /* Assume chanlist pointer is unchanged. */
+ v32.chanlist = ptr_to_compat(cmd->chanlist);
+ v32.chanlist_len = cmd->chanlist_len;
+ v32.data = ptr_to_compat(cmd->data);
+ v32.data_len = cmd->data_len;
+ return copy_to_user(cmd32, &v32, sizeof(v32));
+}
+
+/* Handle 32-bit COMEDI_CMD ioctl. */
+static int compat_cmd(struct file *file, unsigned long arg)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+ struct comedi_cmd cmd;
+ bool copy = false;
+ int rc, err;
+
+ rc = get_compat_cmd(&cmd, compat_ptr(arg));
+ if (rc)
+ return rc;
+
+ mutex_lock(&dev->mutex);
+ rc = do_cmd_ioctl(dev, &cmd, &copy, file);
+ mutex_unlock(&dev->mutex);
+ if (copy) {
+ /* Special case: copy cmd back to user. */
+ err = put_compat_cmd(compat_ptr(arg), &cmd);
+ if (err)
+ rc = err;
+ }
+ return rc;
+}
+
+/* Handle 32-bit COMEDI_CMDTEST ioctl. */
+static int compat_cmdtest(struct file *file, unsigned long arg)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+ struct comedi_cmd cmd;
+ bool copy = false;
+ int rc, err;
+
+ rc = get_compat_cmd(&cmd, compat_ptr(arg));
+ if (rc)
+ return rc;
+
+ mutex_lock(&dev->mutex);
+ rc = do_cmdtest_ioctl(dev, &cmd, &copy, file);
+ mutex_unlock(&dev->mutex);
+ if (copy) {
+ err = put_compat_cmd(compat_ptr(arg), &cmd);
+ if (err)
+ rc = err;
+ }
+ return rc;
+}
+
+/* Copy 32-bit insn structure to native insn structure. */
+static int get_compat_insn(struct comedi_insn *insn,
+ struct comedi32_insn_struct __user *insn32)
+{
+ struct comedi32_insn_struct v32;
+
+ /* Copy insn structure. Ignore the unused members. */
+ if (copy_from_user(&v32, insn32, sizeof(v32)))
+ return -EFAULT;
+ memset(insn, 0, sizeof(*insn));
+ insn->insn = v32.insn;
+ insn->n = v32.n;
+ insn->data = compat_ptr(v32.data);
+ insn->subdev = v32.subdev;
+ insn->chanspec = v32.chanspec;
+ return 0;
+}
+
+/* Handle 32-bit COMEDI_INSNLIST ioctl. */
+static int compat_insnlist(struct file *file, unsigned long arg)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+ struct comedi32_insnlist_struct insnlist32;
+ struct comedi32_insn_struct __user *insn32;
+ struct comedi_insn *insns;
+ unsigned int n;
+ int rc;
+
+ if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32)))
+ return -EFAULT;
+
+ insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Copy insn structures. */
+ insn32 = compat_ptr(insnlist32.insns);
+ for (n = 0; n < insnlist32.n_insns; n++) {
+ rc = get_compat_insn(insns + n, insn32 + n);
+ if (rc) {
+ kfree(insns);
+ return rc;
+ }
+ }
+
+ mutex_lock(&dev->mutex);
+ rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
+ mutex_unlock(&dev->mutex);
+ return rc;
+}
+
+/* Handle 32-bit COMEDI_INSN ioctl. */
+static int compat_insn(struct file *file, unsigned long arg)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+ struct comedi_insn insn;
+ int rc;
+
+ rc = get_compat_insn(&insn, (void __user *)arg);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dev->mutex);
+ rc = do_insn_ioctl(dev, &insn, file);
+ mutex_unlock(&dev->mutex);
+ return rc;
+}
+
+/*
+ * compat_ioctl file operation.
+ *
+ * Returns -ENOIOCTLCMD for unrecognised ioctl codes.
+ */
+static long comedi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int rc;
+
+ switch (cmd) {
+ case COMEDI_DEVCONFIG:
+ case COMEDI_DEVINFO:
+ case COMEDI_SUBDINFO:
+ case COMEDI_BUFCONFIG:
+ case COMEDI_BUFINFO:
+ /* Just need to translate the pointer argument. */
+ arg = (unsigned long)compat_ptr(arg);
+ rc = comedi_unlocked_ioctl(file, cmd, arg);
+ break;
+ case COMEDI_LOCK:
+ case COMEDI_UNLOCK:
+ case COMEDI_CANCEL:
+ case COMEDI_POLL:
+ case COMEDI_SETRSUBD:
+ case COMEDI_SETWSUBD:
+ /* No translation needed. */
+ rc = comedi_unlocked_ioctl(file, cmd, arg);
+ break;
+ case COMEDI32_CHANINFO:
+ rc = compat_chaninfo(file, arg);
+ break;
+ case COMEDI32_RANGEINFO:
+ rc = compat_rangeinfo(file, arg);
+ break;
+ case COMEDI32_CMD:
+ rc = compat_cmd(file, arg);
+ break;
+ case COMEDI32_CMDTEST:
+ rc = compat_cmdtest(file, arg);
+ break;
+ case COMEDI32_INSNLIST:
+ rc = compat_insnlist(file, arg);
+ break;
+ case COMEDI32_INSN:
+ rc = compat_insn(file, arg);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ return rc;
+}
+#else
+#define comedi_compat_ioctl NULL
+#endif
+
static const struct file_operations comedi_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = comedi_unlocked_ioctl,
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index 515f293a5d26..9b3631a654c8 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -18,7 +18,7 @@ struct comedi_subdevice;
struct device;
int do_rangeinfo_ioctl(struct comedi_device *dev,
- struct comedi_rangeinfo __user *arg);
+ struct comedi_rangeinfo *it);
struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device);
void comedi_release_hardware_device(struct device *hardware_device);
int comedi_alloc_subdevice_minor(struct comedi_subdevice *s);
@@ -32,8 +32,8 @@ void comedi_buf_map_get(struct comedi_buf_map *bm);
int comedi_buf_map_put(struct comedi_buf_map *bm);
int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
void *buf, int len, int write);
-struct comedi_buf_map *comedi_buf_map_from_subdev_get(
- struct comedi_subdevice *s);
+struct comedi_buf_map *
+comedi_buf_map_from_subdev_get(struct comedi_subdevice *s);
unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s);
unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s);
void comedi_device_cancel_all(struct comedi_device *dev);
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 7c82d5f9778f..c1d70eec24ab 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1214,7 +1214,7 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
struct mite *mite = devpriv->mite;
resource_size_t daq_phys_addr;
- static const int Start_Cal_EEPROM = 0x400;
+ static const int start_cal_eeprom = 0x400;
static const unsigned int window_size = 10;
unsigned int old_iodwbsr_bits;
unsigned int old_iodwbsr1_bits;
@@ -1234,7 +1234,7 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
writel(0xf, mite->mmio + 0x30);
for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
- devpriv->eeprom_buffer[i] = ni_readb(dev, Start_Cal_EEPROM + i);
+ devpriv->eeprom_buffer[i] = ni_readb(dev, start_cal_eeprom + i);
writel(old_iodwbsr1_bits, mite->mmio + MITE_IODWBSR_1);
writel(old_iodwbsr_bits, mite->mmio + MITE_IODWBSR);
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 89d599877445..a4e6fe0fb729 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -46,17 +46,14 @@ EXPORT_SYMBOL_GPL(range_unknown);
* array of comedi_krange structures to rangeinfo->range_ptr pointer
*/
int do_rangeinfo_ioctl(struct comedi_device *dev,
- struct comedi_rangeinfo __user *arg)
+ struct comedi_rangeinfo *it)
{
- struct comedi_rangeinfo it;
int subd, chan;
const struct comedi_lrange *lr;
struct comedi_subdevice *s;
- if (copy_from_user(&it, arg, sizeof(struct comedi_rangeinfo)))
- return -EFAULT;
- subd = (it.range_type >> 24) & 0xf;
- chan = (it.range_type >> 16) & 0xff;
+ subd = (it->range_type >> 24) & 0xf;
+ chan = (it->range_type >> 16) & 0xff;
if (!dev->attached)
return -EINVAL;
@@ -73,15 +70,15 @@ int do_rangeinfo_ioctl(struct comedi_device *dev,
return -EINVAL;
}
- if (RANGE_LENGTH(it.range_type) != lr->length) {
+ if (RANGE_LENGTH(it->range_type) != lr->length) {
dev_dbg(dev->class_dev,
"wrong length %d should be %d (0x%08x)\n",
- RANGE_LENGTH(it.range_type),
- lr->length, it.range_type);
+ RANGE_LENGTH(it->range_type),
+ lr->length, it->range_type);
return -EINVAL;
}
- if (copy_to_user(it.range_ptr, lr->range,
+ if (copy_to_user(it->range_ptr, lr->range,
sizeof(struct comedi_krange) * lr->length))
return -EFAULT;
diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
index 3c3f387936e8..3a280cc1892c 100644
--- a/drivers/staging/fbtft/fb_st7789v.c
+++ b/drivers/staging/fbtft/fb_st7789v.c
@@ -20,6 +20,12 @@
"70 2C 2E 15 10 09 48 33 53 0B 19 18 20 25\n" \
"70 2C 2E 15 10 09 48 33 53 0B 19 18 20 25"
+#define HSD20_IPS_GAMMA \
+ "D0 05 0A 09 08 05 2E 44 45 0F 17 16 2B 33\n" \
+ "D0 05 0A 09 08 05 2E 43 45 0F 16 16 2B 33"
+
+#define HSD20_IPS 1
+
/**
* enum st7789v_command - ST7789V display controller commands
*
@@ -82,14 +88,20 @@ static int init_display(struct fbtft_par *par)
/* set pixel format to RGB-565 */
write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+ if (HSD20_IPS)
+ write_reg(par, PORCTRL, 0x05, 0x05, 0x00, 0x33, 0x33);
- write_reg(par, PORCTRL, 0x08, 0x08, 0x00, 0x22, 0x22);
+ else
+ write_reg(par, PORCTRL, 0x08, 0x08, 0x00, 0x22, 0x22);
/*
* VGH = 13.26V
* VGL = -10.43V
*/
- write_reg(par, GCTRL, 0x35);
+ if (HSD20_IPS)
+ write_reg(par, GCTRL, 0x75);
+ else
+ write_reg(par, GCTRL, 0x35);
/*
* VDV and VRH register values come from command write
@@ -101,13 +113,19 @@ static int init_display(struct fbtft_par *par)
* VAP = 4.1V + (VCOM + VCOM offset + 0.5 * VDV)
* VAN = -4.1V + (VCOM + VCOM offset + 0.5 * VDV)
*/
- write_reg(par, VRHS, 0x0B);
+ if (HSD20_IPS)
+ write_reg(par, VRHS, 0x13);
+ else
+ write_reg(par, VRHS, 0x0B);
/* VDV = 0V */
write_reg(par, VDVS, 0x20);
/* VCOM = 0.9V */
- write_reg(par, VCOMS, 0x20);
+ if (HSD20_IPS)
+ write_reg(par, VCOMS, 0x22);
+ else
+ write_reg(par, VCOMS, 0x20);
/* VCOM offset = 0V */
write_reg(par, VCMOFSET, 0x20);
@@ -120,6 +138,10 @@ static int init_display(struct fbtft_par *par)
write_reg(par, PWCTRL1, 0xA4, 0xA1);
write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
+
+ if (HSD20_IPS)
+ write_reg(par, MIPI_DCS_ENTER_INVERT_MODE);
+
return 0;
}
@@ -234,7 +256,7 @@ static struct fbtft_display display = {
.height = 320,
.gamma_num = 2,
.gamma_len = 14,
- .gamma = DEFAULT_GAMMA,
+ .gamma = HSD20_IPS_GAMMA,
.fbtftops = {
.init_display = init_display,
.set_var = set_var,
diff --git a/drivers/staging/fsl-dpaa2/ethsw/README b/drivers/staging/fsl-dpaa2/ethsw/README
index f6fc07f780d1..b48dcbf7c5fb 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/README
+++ b/drivers/staging/fsl-dpaa2/ethsw/README
@@ -79,7 +79,7 @@ The DPSW can have ports connected to DPNIs or to PHYs via DPMACs.
For a more detailed description of the Ethernet switch device driver model
see:
- Documentation/networking/switchdev.txt
+ Documentation/networking/switchdev.rst
Creating an Ethernet Switch
===========================
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 676d1ad1b50d..546ad376df99 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -1094,7 +1094,8 @@ static int swdev_port_obj_del(struct net_device *netdev,
static int
ethsw_switchdev_port_attr_set_event(struct net_device *netdev,
- struct switchdev_notifier_port_attr_info *port_attr_info)
+ struct switchdev_notifier_port_attr_info
+ *port_attr_info)
{
int err;
@@ -1277,7 +1278,8 @@ err_addr_alloc:
static int
ethsw_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
- struct switchdev_notifier_port_obj_info *port_obj_info)
+ struct switchdev_notifier_port_obj_info
+ *port_obj_info)
{
int err = -EOPNOTSUPP;
diff --git a/drivers/staging/gasket/gasket_page_table.c b/drivers/staging/gasket/gasket_page_table.c
index f6d715787da8..f3dbe0fe2a67 100644
--- a/drivers/staging/gasket/gasket_page_table.c
+++ b/drivers/staging/gasket/gasket_page_table.c
@@ -898,7 +898,7 @@ static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl,
*
* Note that memory for second level page tables is allocated as needed, but
* that memory is only freed on the final close of the device file, when the
- * page tables are repartitioned, or the the device is removed. If there is an
+ * page tables are repartitioned, or the device is removed. If there is an
* error or if the full range of slots is not available, any memory
* allocated for second level page tables remains allocated until final close,
* repartition, or device removal.
diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c
index 5f0e089573a2..af26bc9f184a 100644
--- a/drivers/staging/gasket/gasket_sysfs.c
+++ b/drivers/staging/gasket/gasket_sysfs.c
@@ -339,6 +339,7 @@ void gasket_sysfs_put_attr(struct device *device,
dev_err(device, "Unable to put unknown attribute: %s\n",
attr->attr.attr.name);
+ put_mapping(mapping);
}
EXPORT_SYMBOL(gasket_sysfs_put_attr);
@@ -372,6 +373,7 @@ ssize_t gasket_sysfs_register_store(struct device *device,
gasket_dev = mapping->gasket_dev;
if (!gasket_dev) {
dev_err(device, "Device driver may have been removed\n");
+ put_mapping(mapping);
return 0;
}
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 354727f0a1fc..eb309190f5be 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -172,7 +172,7 @@ static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
static __sum16 icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
{
- unsigned short *w = ptr;
+ unsigned short *w;
__wsum sum = 0;
int i;
u16 pa;
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
index 04bfd9110502..ed706f39e87a 100644
--- a/drivers/staging/greybus/hid.c
+++ b/drivers/staging/greybus/hid.c
@@ -290,9 +290,8 @@ static int gb_hid_parse(struct hid_device *hid)
}
rdesc = kzalloc(rsize, GFP_KERNEL);
- if (!rdesc) {
+ if (!rdesc)
return -ENOMEM;
- }
ret = gb_hid_get_report_desc(ghid, rdesc);
if (ret) {
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index d6ba25f21d80..d2672b65c3f4 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -1026,7 +1026,8 @@ static int gb_lights_light_config(struct gb_lights *glights, u8 id)
light->channels_count = conf.channel_count;
light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL);
-
+ if (!light->name)
+ return -ENOMEM;
light->channels = kcalloc(light->channels_count,
sizeof(struct gb_channel), GFP_KERNEL);
if (!light->channels)
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index 583d9708a191..2471448ba42a 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -135,7 +135,7 @@ static ssize_t name##_##field##_show(struct device *dev, \
char *buf) \
{ \
struct gb_loopback *gb = dev_get_drvdata(dev); \
- /* Report 0 for min and max if no transfer successed */ \
+ /* Report 0 for min and max if no transfer succeeded */ \
if (!gb->requests_completed) \
return sprintf(buf, "0\n"); \
return sprintf(buf, "%" #type "\n", gb->name.field); \
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index 68c5718be827..0939f4a4c963 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -67,7 +67,6 @@ static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
- ((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
@@ -411,6 +410,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
struct gb_sdio_command_request request = {0};
struct gb_sdio_command_response response;
struct mmc_data *data = host->mrq->data;
+ unsigned int timeout_ms;
u8 cmd_flags;
u8 cmd_type;
int i;
@@ -469,9 +469,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
request.data_blksz = cpu_to_le16(data->blksz);
}
- ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
- &request, sizeof(request), &response,
- sizeof(response));
+ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
+ GB_OPERATION_TIMEOUT_DEFAULT;
+
+ ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND,
+ &request, sizeof(request), &response,
+ sizeof(response), timeout_ms);
if (ret < 0)
goto out;
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 55c51143bb09..607378bfebb7 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -40,14 +40,6 @@
#define GB_UART_FIRMWARE_CREDITS 4096
#define GB_UART_CREDIT_WAIT_TIMEOUT_MSEC 10000
-struct gb_tty_line_coding {
- __le32 rate;
- __u8 format;
- __u8 parity;
- __u8 data_bits;
- __u8 flow_control;
-};
-
struct gb_tty {
struct gbphy_device *gbphy_dev;
struct tty_port port;
@@ -66,7 +58,7 @@ struct gb_tty {
struct mutex mutex;
u8 ctrlin; /* input control lines */
u8 ctrlout; /* output control lines */
- struct gb_tty_line_coding line_coding;
+ struct gb_uart_set_line_coding_request line_coding;
struct work_struct tx_work;
struct kfifo write_fifo;
bool close_pending;
@@ -288,12 +280,9 @@ static void gb_uart_tx_write_work(struct work_struct *work)
static int send_line_coding(struct gb_tty *tty)
{
- struct gb_uart_set_line_coding_request request;
-
- memcpy(&request, &tty->line_coding,
- sizeof(tty->line_coding));
return gb_operation_sync(tty->connection, GB_UART_TYPE_SET_LINE_CODING,
- &request, sizeof(request), NULL, 0);
+ &tty->line_coding, sizeof(tty->line_coding),
+ NULL, 0);
}
static int send_control(struct gb_tty *gb_tty, u8 control)
@@ -493,9 +482,9 @@ static int gb_tty_break_ctl(struct tty_struct *tty, int state)
static void gb_tty_set_termios(struct tty_struct *tty,
struct ktermios *termios_old)
{
+ struct gb_uart_set_line_coding_request newline;
struct gb_tty *gb_tty = tty->driver_data;
struct ktermios *termios = &tty->termios;
- struct gb_tty_line_coding newline;
u8 newctrl = gb_tty->ctrlout;
newline.rate = cpu_to_le32(tty_get_baud_rate(tty));
@@ -537,9 +526,9 @@ static void gb_tty_set_termios(struct tty_struct *tty,
}
if (C_CRTSCTS(tty) && C_BAUD(tty) != B0)
- newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN;
+ newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN;
else
- newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN;
+ newline.flow_control = 0;
if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) {
memcpy(&gb_tty->line_coding, &newline, sizeof(newline));
diff --git a/drivers/staging/iio/Documentation/overview.txt b/drivers/staging/iio/Documentation/overview.txt
index 43f92b06bc3e..ebdc64f451d7 100644
--- a/drivers/staging/iio/Documentation/overview.txt
+++ b/drivers/staging/iio/Documentation/overview.txt
@@ -34,7 +34,7 @@ turned on or off (if possible) via sysfs interfaces.
fifo / ring buffers on the sensor chip. These greatly reduce the load
on the host CPU by buffering relatively large numbers of data samples
based on an internal sampling clock. Examples include VTI SCA3000
-series and Analog Device ADXL345 accelerometers. Each buffer supports
+series and Analog Devices ADXL345 accelerometers. Each buffer supports
polling to establish when data is available.
* Trigger and software buffer support. In many data analysis
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index af0bcf95ee8a..c468355b0848 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -602,11 +602,12 @@ static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = {
.postdisable = ad5933_ring_postdisable,
};
-static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+static int ad5933_register_ring_funcs_and_init(struct device *dev,
+ struct iio_dev *indio_dev)
{
struct iio_buffer *buffer;
- buffer = iio_kfifo_allocate();
+ buffer = devm_iio_kfifo_allocate(dev);
if (!buffer)
return -ENOMEM;
@@ -676,6 +677,20 @@ static void ad5933_work(struct work_struct *work)
}
}
+static void ad5933_reg_disable(void *data)
+{
+ struct ad5933_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static void ad5933_clk_disable(void *data)
+{
+ struct ad5933_state *st = data;
+
+ clk_disable_unprepare(st->mclk);
+}
+
static int ad5933_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -703,23 +718,32 @@ static int ad5933_probe(struct i2c_client *client,
dev_err(&client->dev, "Failed to enable specified VDD supply\n");
return ret;
}
- ret = regulator_get_voltage(st->reg);
+ ret = devm_add_action_or_reset(&client->dev, ad5933_reg_disable, st);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(st->reg);
if (ret < 0)
- goto error_disable_reg;
+ return ret;
st->vref_mv = ret / 1000;
st->mclk = devm_clk_get(&client->dev, "mclk");
- if (IS_ERR(st->mclk) && PTR_ERR(st->mclk) != -ENOENT) {
- ret = PTR_ERR(st->mclk);
- goto error_disable_reg;
- }
+ if (IS_ERR(st->mclk) && PTR_ERR(st->mclk) != -ENOENT)
+ return PTR_ERR(st->mclk);
if (!IS_ERR(st->mclk)) {
ret = clk_prepare_enable(st->mclk);
if (ret < 0)
- goto error_disable_reg;
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev,
+ ad5933_clk_disable,
+ st);
+ if (ret)
+ return ret;
+
ext_clk_hz = clk_get_rate(st->mclk);
}
@@ -742,41 +766,15 @@ static int ad5933_probe(struct i2c_client *client,
indio_dev->channels = ad5933_channels;
indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
- ret = ad5933_register_ring_funcs_and_init(indio_dev);
+ ret = ad5933_register_ring_funcs_and_init(&client->dev, indio_dev);
if (ret)
- goto error_disable_mclk;
+ return ret;
ret = ad5933_setup(st);
if (ret)
- goto error_unreg_ring;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring;
-
- return 0;
-
-error_unreg_ring:
- iio_kfifo_free(indio_dev->buffer);
-error_disable_mclk:
- clk_disable_unprepare(st->mclk);
-error_disable_reg:
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static int ad5933_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ad5933_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_kfifo_free(indio_dev->buffer);
- regulator_disable(st->reg);
- clk_disable_unprepare(st->mclk);
+ return ret;
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id ad5933_id[] = {
@@ -801,7 +799,6 @@ static struct i2c_driver ad5933_driver = {
.of_match_table = ad5933_of_match,
},
.probe = ad5933_probe,
- .remove = ad5933_remove,
.id_table = ad5933_id,
};
module_i2c_driver(ad5933_driver);
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 4b25a3a314ed..ed404355ea4c 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -130,17 +130,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
static int ad2s1210_config_read(struct ad2s1210_state *st,
unsigned char address)
{
- struct spi_transfer xfer = {
- .len = 2,
- .rx_buf = st->rx,
- .tx_buf = st->tx,
+ struct spi_transfer xfers[] = {
+ {
+ .len = 1,
+ .rx_buf = &st->rx[0],
+ .tx_buf = &st->tx[0],
+ .cs_change = 1,
+ }, {
+ .len = 1,
+ .rx_buf = &st->rx[1],
+ .tx_buf = &st->tx[1],
+ },
};
int ret = 0;
ad2s1210_set_mode(MOD_CONFIG, st);
st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
st->tx[1] = AD2S1210_REG_FAULT;
- ret = spi_sync_transfer(st->sdev, &xfer, 1);
+ ret = spi_sync_transfer(st->sdev, xfers, 2);
if (ret < 0)
return ret;
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
index 7b00d7069e21..358d7b2f4ad1 100644
--- a/drivers/staging/kpc2000/kpc2000/core.c
+++ b/drivers/staging/kpc2000/kpc2000/core.c
@@ -298,7 +298,6 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
{
int err = 0;
struct kp2000_device *pcard;
- int rv;
unsigned long reg_bar_phys_addr;
unsigned long reg_bar_phys_len;
unsigned long dma_bar_phys_addr;
@@ -445,11 +444,11 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
if (err < 0)
goto err_release_dma;
- rv = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED,
- pcard->name, pcard);
- if (rv) {
+ err = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED,
+ pcard->name, pcard);
+ if (err) {
dev_err(&pcard->pdev->dev,
- "%s: failed to request_irq: %d\n", __func__, rv);
+ "%s: failed to request_irq: %d\n", __func__, err);
goto err_disable_msi;
}
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index e59a846bc909..4bb1eca6f597 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -22,6 +22,8 @@ if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
source "drivers/staging/media/allegro-dvt/Kconfig"
+source "drivers/staging/media/atomisp/Kconfig"
+
source "drivers/staging/media/hantro/Kconfig"
source "drivers/staging/media/imx/Kconfig"
@@ -30,10 +32,14 @@ source "drivers/staging/media/meson/vdec/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/rkvdec/Kconfig"
+
source "drivers/staging/media/sunxi/Kconfig"
source "drivers/staging/media/tegra-vde/Kconfig"
+source "drivers/staging/media/tegra-video/Kconfig"
+
source "drivers/staging/media/ipu3/Kconfig"
source "drivers/staging/media/soc_camera/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 23c682461b62..71a47b61836d 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro-dvt/
+obj-$(CONFIG_INTEL_ATOMISP) += atomisp/
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
+obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rkvdec/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
+obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/
obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_HANTRO) += hantro/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
new file mode 100644
index 000000000000..c4f3049b0706
--- /dev/null
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -0,0 +1,36 @@
+menuconfig INTEL_ATOMISP
+ bool "Enable support to Intel Atom ISP camera drivers"
+ depends on X86 && EFI && PCI && ACPI
+ select IOSF_MBI
+ select MEDIA_CONTROLLER
+ select COMMON_CLK
+ help
+ Enable support for the Intel ISP2 camera interfaces and MIPI
+ sensor drivers.
+
+config VIDEO_ATOMISP
+ tristate "Intel Atom Image Signal Processor Driver"
+ depends on VIDEO_V4L2 && INTEL_ATOMISP
+ select IOSF_MBI
+ select VIDEOBUF_VMALLOC
+ ---help---
+ Say Y here if your platform supports Intel Atom SoC
+ camera imaging subsystem.
+ To compile this driver as a module, choose M here: the
+ module will be called atomisp
+
+config VIDEO_ATOMISP_ISP2401
+ bool "VIDEO_ATOMISP_ISP2401"
+ depends on VIDEO_ATOMISP
+ help
+ Enable support for Atom ISP2401-based boards.
+
+ Select this option for Anniedale (Merrifield+ / Moorefield)
+ and Cherrytrail SoCs.
+
+ Disabling it enables support for Atom ISP2400-based boards
+ (Merrifield and Baytrail SoCs).
+
+if VIDEO_ATOMISP
+source "drivers/staging/media/atomisp/i2c/Kconfig"
+endif
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
new file mode 100644
index 000000000000..eedecd49bbf4
--- /dev/null
+++ b/drivers/staging/media/atomisp/Makefile
@@ -0,0 +1,363 @@
+#
+# Makefile for camera drivers.
+#
+obj-$(CONFIG_INTEL_ATOMISP) += i2c/
+obj-$(CONFIG_VIDEO_ATOMISP) += atomisp.o
+obj-$(CONFIG_VIDEO_ATOMISP) += pci/atomisp_gmin_platform.o
+
+# While on staging, keep debug enabled
+DEFINES += -DDEBUG
+
+atomisp = $(srctree)/drivers/staging/media/atomisp/
+
+# SPDX-License-Identifier: GPL-2.0
+atomisp-objs += \
+ pci/atomisp_acc.o \
+ pci/atomisp_cmd.o \
+ pci/atomisp_compat_css20.o \
+ pci/atomisp_compat_ioctl32.o \
+ pci/atomisp_csi2.o \
+ pci/atomisp_drvfs.o \
+ pci/atomisp_file.o \
+ pci/atomisp_fops.o \
+ pci/atomisp_ioctl.o \
+ pci/atomisp_subdev.o \
+ pci/atomisp_tpg.o \
+ pci/atomisp_v4l2.o \
+ pci/sh_css_firmware.o \
+ pci/sh_css_host_data.o \
+ pci/sh_css_hrt.o \
+ pci/sh_css_metadata.o \
+ pci/sh_css_metrics.o \
+ pci/sh_css_mipi.o \
+ pci/sh_css_mmu.o \
+ pci/sh_css_morph.o \
+ pci/sh_css.o \
+ pci/sh_css_param_dvs.o \
+ pci/sh_css_param_shading.o \
+ pci/sh_css_params.o \
+ pci/sh_css_pipe.o \
+ pci/sh_css_properties.o \
+ pci/sh_css_shading.o \
+ pci/sh_css_sp.o \
+ pci/sh_css_stream_format.o \
+ pci/sh_css_stream.o \
+ pci/sh_css_version.o \
+ pci/base/circbuf/src/circbuf.o \
+ pci/base/refcount/src/refcount.o \
+ pci/camera/pipe/src/pipe_binarydesc.o \
+ pci/camera/pipe/src/pipe_stagedesc.o \
+ pci/camera/pipe/src/pipe_util.o \
+ pci/camera/util/src/util.o \
+ pci/hmm/hmm_bo.o \
+ pci/hmm/hmm_dynamic_pool.o \
+ pci/hmm/hmm.o \
+ pci/hmm/hmm_reserved_pool.o \
+ pci/hmm/hmm_vm.o \
+ pci/hrt/hive_isp_css_mm_hrt.o \
+ pci/ia_css_device_access.o \
+ pci/ia_css_memory_access.o \
+ pci/isp/kernels/aa/aa_2/ia_css_aa2.host.o \
+ pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.o \
+ pci/isp/kernels/anr/anr_2/ia_css_anr2.host.o \
+ pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.o \
+ pci/isp/kernels/bh/bh_2/ia_css_bh.host.o \
+ pci/isp/kernels/bnlm/ia_css_bnlm.host.o \
+ pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.o \
+ pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.o \
+ pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.o \
+ pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.o \
+ pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.o \
+ pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.o \
+ pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.o \
+ pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.o \
+ pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.o \
+ pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.o \
+ pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.o \
+ pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.o \
+ pci/isp/kernels/de/de_1.0/ia_css_de.host.o \
+ pci/isp/kernels/de/de_2/ia_css_de2.host.o \
+ pci/isp/kernels/dpc2/ia_css_dpc2.host.o \
+ pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.o \
+ pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.o \
+ pci/isp/kernels/eed1_8/ia_css_eed1_8.host.o \
+ pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.o \
+ pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.o \
+ pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.o \
+ pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.o \
+ pci/isp/kernels/gc/gc_2/ia_css_gc2.host.o \
+ pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.o \
+ pci/isp/kernels/hdr/ia_css_hdr.host.o \
+ pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.o \
+ pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.o \
+ pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.o \
+ pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.o \
+ pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.o \
+ pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.o \
+ pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.o \
+ pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.o \
+ pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.o \
+ pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.o \
+ pci/isp/kernels/ob/ob2/ia_css_ob2.host.o \
+ pci/isp/kernels/output/output_1.0/ia_css_output.host.o \
+ pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.o \
+ pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.o \
+ pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.o \
+ pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.o \
+ pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.o \
+ pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.o \
+ pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.o \
+ pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.o \
+ pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.o \
+ pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.o \
+ pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.o \
+ pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.o \
+ pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.o \
+ pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.o \
+ pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.o \
+ pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.o \
+ pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.o \
+ pci/memory_realloc.o \
+ pci/mmu/isp_mmu.o \
+ pci/mmu/sh_mmu_mrfld.o \
+ pci/runtime/binary/src/binary.o \
+ pci/runtime/bufq/src/bufq.o \
+ pci/runtime/debug/src/ia_css_debug.o \
+ pci/runtime/eventq/src/eventq.o \
+ pci/runtime/event/src/event.o \
+ pci/runtime/frame/src/frame.o \
+ pci/runtime/ifmtr/src/ifmtr.o \
+ pci/runtime/inputfifo/src/inputfifo.o \
+ pci/runtime/isp_param/src/isp_param.o \
+ pci/runtime/isys/src/csi_rx_rmgr.o \
+ pci/runtime/isys/src/ibuf_ctrl_rmgr.o \
+ pci/runtime/isys/src/isys_dma_rmgr.o \
+ pci/runtime/isys/src/isys_init.o \
+ pci/runtime/isys/src/isys_stream2mmio_rmgr.o \
+ pci/runtime/isys/src/rx.o \
+ pci/runtime/isys/src/virtual_isys.o \
+ pci/runtime/pipeline/src/pipeline.o \
+ pci/runtime/queue/src/queue_access.o \
+ pci/runtime/queue/src/queue.o \
+ pci/runtime/rmgr/src/rmgr.o \
+ pci/runtime/rmgr/src/rmgr_vbuf.o \
+ pci/runtime/spctrl/src/spctrl.o \
+ pci/runtime/timer/src/timer.o \
+ pci/hive_isp_css_common/host/debug.o \
+ pci/hive_isp_css_common/host/dma.o \
+ pci/hive_isp_css_common/host/event_fifo.o \
+ pci/hive_isp_css_common/host/fifo_monitor.o \
+ pci/hive_isp_css_common/host/gdc.o \
+ pci/hive_isp_css_common/host/gp_device.o \
+ pci/hive_isp_css_common/host/gp_timer.o \
+ pci/hive_isp_css_common/host/hmem.o \
+ pci/hive_isp_css_common/host/input_formatter.o \
+ pci/hive_isp_css_common/host/input_system.o \
+ pci/hive_isp_css_common/host/irq.o \
+ pci/hive_isp_css_common/host/isp.o \
+ pci/hive_isp_css_common/host/mmu.o \
+ pci/hive_isp_css_common/host/sp.o \
+ pci/hive_isp_css_common/host/timed_ctrl.o \
+ pci/hive_isp_css_common/host/vmem.o \
+ pci/hive_isp_css_shared/host/tag.o \
+
+obj-byt = \
+ pci/css_2400_system/hive/ia_css_isp_configs.o \
+ pci/css_2400_system/hive/ia_css_isp_params.o \
+ pci/css_2400_system/hive/ia_css_isp_states.o \
+
+# These will be needed when clean merge CHT support nicely into the driver
+# Keep them here handy for when we get to that point
+#
+
+obj-cht = \
+ pci/css_2401_system/hive/ia_css_isp_configs.o \
+ pci/css_2401_system/hive/ia_css_isp_params.o \
+ pci/css_2401_system/hive/ia_css_isp_states.o \
+ pci/css_2401_system/host/csi_rx.o \
+ pci/css_2401_system/host/ibuf_ctrl.o \
+ pci/css_2401_system/host/isys_dma.o \
+ pci/css_2401_system/host/isys_irq.o \
+ pci/css_2401_system/host/isys_stream2mmio.o
+
+INCLUDES += \
+ -I$(atomisp)/ \
+ -I$(atomisp)/include/ \
+ -I$(atomisp)/include/hmm/ \
+ -I$(atomisp)/include/mmu/ \
+ -I$(atomisp)/pci/ \
+ -I$(atomisp)/pci/hrt/ \
+ -I$(atomisp)/pci/base/circbuf/interface/ \
+ -I$(atomisp)/pci/base/refcount/interface/ \
+ -I$(atomisp)/pci/camera/pipe/interface/ \
+ -I$(atomisp)/pci/camera/util/interface/ \
+ -I$(atomisp)/pci/hive_isp_css_common/ \
+ -I$(atomisp)/pci/hive_isp_css_common/host/ \
+ -I$(atomisp)/pci/hive_isp_css_include/ \
+ -I$(atomisp)/pci/hive_isp_css_include/device_access/ \
+ -I$(atomisp)/pci/hive_isp_css_include/host/ \
+ -I$(atomisp)/pci/hive_isp_css_include/memory_access/ \
+ -I$(atomisp)/pci/hive_isp_css_shared/ \
+ -I$(atomisp)/pci/hive_isp_css_shared/host/ \
+ -I$(atomisp)/pci/isp/kernels/ \
+ -I$(atomisp)/pci/isp/kernels/aa/aa_2/ \
+ -I$(atomisp)/pci/isp/kernels/anr/anr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/anr/anr_2/ \
+ -I$(atomisp)/pci/isp/kernels/bh/bh_2/ \
+ -I$(atomisp)/pci/isp/kernels/bnlm/ \
+ -I$(atomisp)/pci/isp/kernels/bnr/ \
+ -I$(atomisp)/pci/isp/kernels/bnr/bnr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/bnr/bnr2_2/ \
+ -I$(atomisp)/pci/isp/kernels/cnr/ \
+ -I$(atomisp)/pci/isp/kernels/cnr/cnr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/cnr/cnr_2/ \
+ -I$(atomisp)/pci/isp/kernels/conversion/ \
+ -I$(atomisp)/pci/isp/kernels/conversion/conversion_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/copy_output/ \
+ -I$(atomisp)/pci/isp/kernels/copy_output/copy_output_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/crop/ \
+ -I$(atomisp)/pci/isp/kernels/crop/crop_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/csc/ \
+ -I$(atomisp)/pci/isp/kernels/csc/csc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ctc/ \
+ -I$(atomisp)/pci/isp/kernels/ctc/ctc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ctc/ctc1_5/ \
+ -I$(atomisp)/pci/isp/kernels/ctc/ctc2/ \
+ -I$(atomisp)/pci/isp/kernels/de/ \
+ -I$(atomisp)/pci/isp/kernels/de/de_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/de/de_2/ \
+ -I$(atomisp)/pci/isp/kernels/dp/ \
+ -I$(atomisp)/pci/isp/kernels/dpc2/ \
+ -I$(atomisp)/pci/isp/kernels/dp/dp_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/dvs/ \
+ -I$(atomisp)/pci/isp/kernels/dvs/dvs_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/eed1_8/ \
+ -I$(atomisp)/pci/isp/kernels/fc/ \
+ -I$(atomisp)/pci/isp/kernels/fc/fc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/fixedbds/ \
+ -I$(atomisp)/pci/isp/kernels/fixedbds/fixedbds_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/fpn/ \
+ -I$(atomisp)/pci/isp/kernels/fpn/fpn_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/gc/ \
+ -I$(atomisp)/pci/isp/kernels/gc/gc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/gc/gc_2/ \
+ -I$(atomisp)/pci/isp/kernels/hdr/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/common/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/common/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/iterator/ \
+ -I$(atomisp)/pci/isp/kernels/iterator/iterator_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/macc/ \
+ -I$(atomisp)/pci/isp/kernels/macc/macc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/macc/macc1_5/ \
+ -I$(atomisp)/pci/isp/kernels/norm/ \
+ -I$(atomisp)/pci/isp/kernels/norm/norm_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ob/ \
+ -I$(atomisp)/pci/isp/kernels/ob/ob_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ob/ob2/ \
+ -I$(atomisp)/pci/isp/kernels/output/ \
+ -I$(atomisp)/pci/isp/kernels/output/output_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/qplane/ \
+ -I$(atomisp)/pci/isp/kernels/qplane/qplane_2/ \
+ -I$(atomisp)/pci/isp/kernels/raw/ \
+ -I$(atomisp)/pci/isp/kernels/raw_aa_binning/ \
+ -I$(atomisp)/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/raw/raw_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ref/ \
+ -I$(atomisp)/pci/isp/kernels/ref/ref_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/s3a/ \
+ -I$(atomisp)/pci/isp/kernels/s3a/s3a_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/sc/ \
+ -I$(atomisp)/pci/isp/kernels/sc/sc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/sdis/ \
+ -I$(atomisp)/pci/isp/kernels/sdis/common/ \
+ -I$(atomisp)/pci/isp/kernels/sdis/sdis_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/sdis/sdis_2/ \
+ -I$(atomisp)/pci/isp/kernels/tdf/ \
+ -I$(atomisp)/pci/isp/kernels/tdf/tdf_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/tnr/ \
+ -I$(atomisp)/pci/isp/kernels/tnr/tnr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/tnr/tnr3/ \
+ -I$(atomisp)/pci/isp/kernels/uds/ \
+ -I$(atomisp)/pci/isp/kernels/uds/uds_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/vf/ \
+ -I$(atomisp)/pci/isp/kernels/vf/vf_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/wb/ \
+ -I$(atomisp)/pci/isp/kernels/wb/wb_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/xnr/ \
+ -I$(atomisp)/pci/isp/kernels/xnr/xnr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/xnr/xnr_3.0/ \
+ -I$(atomisp)/pci/isp/kernels/ynr/ \
+ -I$(atomisp)/pci/isp/kernels/ynr/ynr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ynr/ynr_2/ \
+ -I$(atomisp)/pci/isp/modes/interface/ \
+ -I$(atomisp)/pci/runtime/binary/interface/ \
+ -I$(atomisp)/pci/runtime/bufq/interface/ \
+ -I$(atomisp)/pci/runtime/debug/interface/ \
+ -I$(atomisp)/pci/runtime/event/interface/ \
+ -I$(atomisp)/pci/runtime/eventq/interface/ \
+ -I$(atomisp)/pci/runtime/frame/interface/ \
+ -I$(atomisp)/pci/runtime/ifmtr/interface/ \
+ -I$(atomisp)/pci/runtime/inputfifo/interface/ \
+ -I$(atomisp)/pci/runtime/isp_param/interface/ \
+ -I$(atomisp)/pci/runtime/isys/interface/ \
+ -I$(atomisp)/pci/runtime/isys/src/ \
+ -I$(atomisp)/pci/runtime/pipeline/interface/ \
+ -I$(atomisp)/pci/runtime/queue/interface/ \
+ -I$(atomisp)/pci/runtime/queue/src/ \
+ -I$(atomisp)/pci/runtime/rmgr/interface/ \
+ -I$(atomisp)/pci/runtime/spctrl/interface/ \
+ -I$(atomisp)/pci/runtime/tagger/interface/
+
+INCLUDES_byt += \
+ -I$(atomisp)/pci/css_2400_system/ \
+ -I$(atomisp)/pci/css_2400_system/hive/ \
+ -I$(atomisp)/pci/css_2400_system/hrt/ \
+
+INCLUDES_cht += \
+ -I$(atomisp)/pci/css_2401_system/ \
+ -I$(atomisp)/pci/css_2401_system/host/ \
+ -I$(atomisp)/pci/css_2401_system/hive/ \
+ -I$(atomisp)/pci/css_2401_system/hrt/ \
+
+# -I$(atomisp)/pci/css_2401_system/hrt/ \
+# -I$(atomisp)/pci/css_2401_system/hive_isp_css_2401_system_generated/ \
+
+
+ifeq ($(CONFIG_ION),y)
+INCLUDES += -I$(srctree)/drivers/staging/android/ion
+endif
+
+DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
+#DEFINES += -DUSE_DYNAMIC_BIN
+#DEFINES += -DISP_POWER_GATING
+#DEFINES += -DUSE_INTERRUPTS
+#DEFINES += -DUSE_SSSE3
+#DEFINES += -DPUNIT_CAMERA_BUSY
+#DEFINES += -DUSE_KMEM_CACHE
+
+ifeq ($(CONFIG_VIDEO_ATOMISP_ISP2401),y)
+atomisp-objs += $(obj-cht)
+INCLUDES += $(INCLUDES_cht)
+DEFINES += -DISP2401 -DISP2401_NEW_INPUT_SYSTEM -DSYSTEM_hive_isp_css_2401_system
+else
+atomisp-objs += $(obj-byt)
+INCLUDES += $(INCLUDES_byt)
+DEFINES += -DISP2400 -DSYSTEM_hive_isp_css_2400_system
+endif
+
+ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
+
+# HACK! While this driver is in bad shape, don't enable several warnings
+# that would be otherwise enabled with W=1
+ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
+ccflags-y += $(call cc-disable-warning, missing-prototypes)
+ccflags-y += $(call cc-disable-warning, missing-declarations)
+ccflags-y += $(call cc-disable-warning, suggest-attribute=format)
+ccflags-y += $(call cc-disable-warning, unused-const-variable)
+ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
new file mode 100644
index 000000000000..52683a704223
--- /dev/null
+++ b/drivers/staging/media/atomisp/TODO
@@ -0,0 +1,89 @@
+1. A single AtomISP driver needs to be implemented to support both
+ Baytrail (BYT and Cherrytail (CHT) platforms at the same time.
+ The current driver is a mechanical and hand combined merge of the
+ two using several runtime macros, plus some ifdef ISP2401 to select the
+ CHT version. Yet, there are some ISP-specific headers that change the
+ driver's behavior during compile time.
+
+2. The file structure needs to get tidied up to resemble a normal Linux
+ driver.
+
+3. Lots of the midlayer glue. unused code and abstraction needs removing.
+
+3. The sensor drivers read MIPI settings from EFI variables or default to the
+ settings hard-coded in the platform data file for different platforms.
+ It should be possible to improve it, by adding support for _DSM tables.
+
+4. The sensor drivers use PMIC and the regulator framework API. In the ideal
+ world it would be using ACPI but that's not how the existing devices work.
+
+5. The AtomISP driver includes some special IOCTLS (ATOMISP_IOC_XXXX_XXXX)
+ and controls that require some cleanup.
+
+6. Correct Coding Style. Please don't send coding style patches for this
+ driver until the other work is done.
+
+7. The ISP code has some dependencies of the exact FW version.
+ The version defined in pci/sh_css_firmware.c:
+ BYT:
+ static const char *isp2400_release_version = STR(irci_stable_candrpv_0415_20150521_0458);
+
+ CHT:
+ static const char *isp2401_release_version = STR(irci_ecr - master_20150911_0724);
+
+ Those versions don't seem to be available anymore. On the tests we've
+ done so far, this version also seems to work for isp2401:
+
+ irci_stable_candrpv_0415_20150521_0458
+
+ At some point we may need to round up a few driver versions and see if
+ there are any specific things that can be done to fold in support for
+ multiple firmware versions.
+
+8. Switch to V4L2 async API to set up sensor, lens and flash devices.
+ Control those devices using V4L2 sub-device API without custom
+ extensions.
+
+9. Switch to standard V4L2 sub-device API for sensor and lens. In
+ particular, the user space API needs to support V4L2 controls as
+ defined in the V4L2 spec and references to atomisp must be removed from
+ these drivers.
+
+10. Use LED flash API for flash LED drivers such as LM3554 (which already
+ has a LED class driver).
+
+11. Switch from videobuf1 to videobuf2. Videobuf1 is being removed!
+
+12. There are some memory management code that seems to be
+ forked from Kernel 3.10 inside hmm/ directory. Get rid of it,
+ making the driver to use a more standard memory management module.
+
+13. While the driver probes the hardware and reports itself as a
+ V4L2 driver, there are still some issues preventing it to
+ stream (at least it doesn't with the standard V4L2 applications.
+ Didn't test yet with some custom-made app for this driver).
+ Solving the related bugs and issues preventing it to work is
+ needed.
+
+Limitations:
+
+1. To test the patches, you also need the ISP firmware
+
+ for BYT: /lib/firmware/shisp_2400b0_v21.bin
+ for CHT: /lib/firmware/shisp_2401a0_v21.bin
+
+ The firmware files will usually be found in /etc/firmware on an Android
+ device but can also be extracted from the upgrade kit if you've managed
+ to lose them somehow.
+
+2. Without a 3A libary the capture behaviour is not very good. To take a good
+ picture, you need tune ISP parameters by IOCTL functions or use a 3A libary
+ such as libxcam.
+
+3. The driver is intended to drive the PCI exposed versions of the device.
+ It will not detect those devices enumerated via ACPI as a field of the
+ i915 GPU driver.
+
+4. The driver supports only v2 of the IPU/Camera. It will not work with the
+ versions of the hardware in other SoCs.
+
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
new file mode 100644
index 000000000000..f7f7177b9b37
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -0,0 +1,86 @@
+#
+# Kconfig for sensor drivers
+#
+
+source "drivers/staging/media/atomisp/i2c/ov5693/Kconfig"
+
+config VIDEO_ATOMISP_OV2722
+ tristate "OVT ov2722 sensor support"
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OVT
+ OV2722 raw camera.
+
+ OVT is a 2M raw sensor.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_ATOMISP_GC2235
+ tristate "Galaxy gc2235 sensor support"
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OVT
+ GC2235 raw camera.
+
+ GC2235 is a 2M raw sensor.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_ATOMISP_MSRLIST_HELPER
+ tristate "Helper library to load, parse and apply large register lists."
+ depends on I2C
+ ---help---
+ This is a helper library to be used from a sensor driver to load, parse
+ and apply large register lists.
+
+ To compile this driver as a module, choose M here: the
+ module will be called libmsrlisthelper.
+
+config VIDEO_ATOMISP_MT9M114
+ tristate "Aptina mt9m114 sensor support"
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Micron
+ mt9m114 1.3 Mpixel camera.
+
+ mt9m114 is video camera sensor.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_ATOMISP_GC0310
+ tristate "GC0310 sensor support"
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Galaxycore
+ GC0310 0.3MP sensor.
+
+config VIDEO_ATOMISP_OV2680
+ tristate "Omnivision OV2680 sensor support"
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ OV2680 raw camera.
+
+ ov2680 is a 2M raw sensor.
+
+ It currently only works with the atomisp driver.
+
+#
+# Kconfig for flash drivers
+#
+
+config VIDEO_ATOMISP_LM3554
+ tristate "LM3554 flash light driver"
+ depends on ACPI
+ depends on VIDEO_V4L2 && I2C
+ ---help---
+ This is a Video4Linux2 sub-dev driver for the LM3554
+ flash light driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lm3554
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
new file mode 100644
index 000000000000..8d022986e199
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for sensor drivers
+#
+
+obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += ov5693/
+obj-$(CONFIG_VIDEO_ATOMISP_MT9M114) += atomisp-mt9m114.o
+obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV2680) += atomisp-ov2680.o
+obj-$(CONFIG_VIDEO_ATOMISP_GC0310) += atomisp-gc0310.o
+
+obj-$(CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER) += atomisp-libmsrlisthelper.o
+
+# Makefile for flash drivers
+#
+
+obj-$(CONFIG_VIDEO_ATOMISP_LM3554) += atomisp-lm3554.o
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
new file mode 100644
index 000000000000..ad1bd7d6a02b
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -0,0 +1,1406 @@
+/*
+ * Support for GalaxyCore GC0310 VGA camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include <linux/io.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+
+#include "gc0310.h"
+
+/* i2c read/write stuff */
+static int gc0310_read_reg(struct i2c_client *client,
+ u16 data_length, u8 reg, u8 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[1];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != GC0310_8BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0, sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == GC0310_8BIT)
+ *val = (u8)data[0];
+
+ return 0;
+}
+
+static int gc0310_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int gc0310_write_reg(struct i2c_client *client, u16 data_length,
+ u8 reg, u8 val)
+{
+ int ret;
+ unsigned char data[2] = {0};
+ u8 *wreg = (u8 *)data;
+ const u16 len = data_length + sizeof(u8); /* 8-bit address + data */
+
+ if (data_length != GC0310_8BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ *wreg = (u8)(reg & 0xff);
+
+ if (data_length == GC0310_8BIT)
+ data[1] = (u8)(val);
+
+ ret = gc0310_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * gc0310_write_reg_array - Initializes a list of GC0310 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __gc0310_flush_reg_array, __gc0310_buf_reg_array() and
+ * __gc0310_write_reg_is_consecutive() are internal functions to
+ * gc0310_write_reg_array_fast() and should be not used anywhere else.
+ *
+ */
+
+static int __gc0310_flush_reg_array(struct i2c_client *client,
+ struct gc0310_write_ctrl *ctrl)
+{
+ u16 size;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u8) + ctrl->index; /* 8-bit address + data */
+ ctrl->buffer.addr = (u8)(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return gc0310_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __gc0310_buf_reg_array(struct i2c_client *client,
+ struct gc0310_write_ctrl *ctrl,
+ const struct gc0310_reg *next)
+{
+ int size;
+
+ switch (next->type) {
+ case GC0310_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u8) >= GC0310_MAX_WRITE_BUF_SIZE)
+ return __gc0310_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __gc0310_write_reg_is_consecutive(struct i2c_client *client,
+ struct gc0310_write_ctrl *ctrl,
+ const struct gc0310_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int gc0310_write_reg_array(struct i2c_client *client,
+ const struct gc0310_reg *reglist)
+{
+ const struct gc0310_reg *next = reglist;
+ struct gc0310_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != GC0310_TOK_TERM; next++) {
+ switch (next->type & GC0310_TOK_MASK) {
+ case GC0310_TOK_DELAY:
+ err = __gc0310_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__gc0310_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __gc0310_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __gc0310_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __gc0310_flush_reg_array(client, &ctrl);
+}
+
+static int gc0310_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (GC0310_FOCAL_LENGTH_NUM << 16) | GC0310_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int gc0310_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for imx*/
+ *val = (GC0310_F_NUMBER_DEFAULT_NUM << 16) | GC0310_F_NUMBER_DEM;
+ return 0;
+}
+
+static int gc0310_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (GC0310_F_NUMBER_DEFAULT_NUM << 24) |
+ (GC0310_F_NUMBER_DEM << 16) |
+ (GC0310_F_NUMBER_DEFAULT_NUM << 8) | GC0310_F_NUMBER_DEM;
+ return 0;
+}
+
+static int gc0310_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ *val = gc0310_res[dev->fmt_idx].bin_factor_x;
+
+ return 0;
+}
+
+static int gc0310_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ *val = gc0310_res[dev->fmt_idx].bin_factor_y;
+
+ return 0;
+}
+
+static int gc0310_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct gc0310_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ u16 val;
+ u8 reg_val;
+ int ret;
+ unsigned int hori_blanking;
+ unsigned int vert_blanking;
+ unsigned int sh_delay;
+
+ if (!info)
+ return -EINVAL;
+
+ /* pixel clock calculattion */
+ dev->vt_pix_clk_freq_mhz = 14400000; // 16.8MHz
+ buf->vt_pix_clk_freq_mhz = dev->vt_pix_clk_freq_mhz;
+ pr_info("vt_pix_clk_freq_mhz=%d\n", buf->vt_pix_clk_freq_mhz);
+
+ /* get integration time */
+ buf->coarse_integration_time_min = GC0310_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ GC0310_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = GC0310_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ GC0310_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = GC0310_FINE_INTG_TIME_MIN;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ /* Getting crop_horizontal_start */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_CROP_START_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_CROP_START_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = val | (reg_val & 0xFF);
+ pr_info("crop_horizontal_start=%d\n", buf->crop_horizontal_start);
+
+ /* Getting crop_vertical_start */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_CROP_START_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_CROP_START_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = val | (reg_val & 0xFF);
+ pr_info("crop_vertical_start=%d\n", buf->crop_vertical_start);
+
+ /* Getting output_width */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_OUTSIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_OUTSIZE_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = val | (reg_val & 0xFF);
+ pr_info("output_width=%d\n", buf->output_width);
+
+ /* Getting output_height */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_OUTSIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_OUTSIZE_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = val | (reg_val & 0xFF);
+ pr_info("output_height=%d\n", buf->output_height);
+
+ buf->crop_horizontal_end = buf->crop_horizontal_start + buf->output_width - 1;
+ buf->crop_vertical_end = buf->crop_vertical_start + buf->output_height - 1;
+ pr_info("crop_horizontal_end=%d\n", buf->crop_horizontal_end);
+ pr_info("crop_vertical_end=%d\n", buf->crop_vertical_end);
+
+ /* Getting line_length_pck */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_BLANKING_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_BLANKING_L, &reg_val);
+ if (ret)
+ return ret;
+ hori_blanking = val | (reg_val & 0xFF);
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_SH_DELAY, &reg_val);
+ if (ret)
+ return ret;
+ sh_delay = reg_val;
+ buf->line_length_pck = buf->output_width + hori_blanking + sh_delay + 4;
+ pr_info("hori_blanking=%d sh_delay=%d line_length_pck=%d\n", hori_blanking,
+ sh_delay, buf->line_length_pck);
+
+ /* Getting frame_length_lines */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_BLANKING_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_BLANKING_L, &reg_val);
+ if (ret)
+ return ret;
+ vert_blanking = val | (reg_val & 0xFF);
+ buf->frame_length_lines = buf->output_height + vert_blanking;
+ pr_info("vert_blanking=%d frame_length_lines=%d\n", vert_blanking,
+ buf->frame_length_lines);
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static int gc0310_set_gain(struct v4l2_subdev *sd, int gain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 again, dgain;
+
+ if (gain < 0x20)
+ gain = 0x20;
+ if (gain > 0x80)
+ gain = 0x80;
+
+ if (gain >= 0x20 && gain < 0x40) {
+ again = 0x0; /* sqrt(2) */
+ dgain = gain;
+ } else {
+ again = 0x2; /* 2 * sqrt(2) */
+ dgain = gain / 2;
+ }
+
+ pr_info("gain=0x%x again=0x%x dgain=0x%x\n", gain, again, dgain);
+
+ /* set analog gain */
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_AGC_ADJ, again);
+ if (ret)
+ return ret;
+
+ /* set digital gain */
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_DGC_ADJ, dgain);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __gc0310_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ pr_info("coarse_itg=%d gain=%d digitgain=%d\n", coarse_itg, gain, digitgain);
+
+ /* set exposure */
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_AEC_PK_EXPO_L,
+ coarse_itg & 0xff);
+ if (ret)
+ return ret;
+
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_AEC_PK_EXPO_H,
+ (coarse_itg >> 8) & 0x0f);
+ if (ret)
+ return ret;
+
+ ret = gc0310_set_gain(sd, gain);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int gc0310_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __gc0310_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long gc0310_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ int exp = exposure->integration_time[0];
+ int gain = exposure->gain[0];
+ int digitgain = exposure->gain[1];
+
+ /* we should not accept the invalid value below. */
+ if (gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return gc0310_set_exposure(sd, exp, gain, digitgain);
+}
+
+/* TO DO */
+static int gc0310_v_flip(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+/* TO DO */
+static int gc0310_h_flip(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+static long gc0310_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return gc0310_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int gc0310_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 reg_v;
+ int ret;
+
+ /* get exposure */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_AEC_PK_EXPO_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ *value = reg_v;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_AEC_PK_EXPO_H,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ *value = *value + (reg_v << 8);
+err:
+ return ret;
+}
+
+static int gc0310_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gc0310_device *dev =
+ container_of(ctrl->handler, struct gc0310_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = gc0310_v_flip(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = gc0310_h_flip(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int gc0310_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gc0310_device *dev =
+ container_of(ctrl->handler, struct gc0310_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = gc0310_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = gc0310_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = gc0310_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = gc0310_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ ret = gc0310_g_bin_factor_x(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_VERT:
+ ret = gc0310_g_bin_factor_y(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = gc0310_s_ctrl,
+ .g_volatile_ctrl = gc0310_g_volatile_ctrl
+};
+
+static const struct v4l2_ctrl_config gc0310_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = GC0310_FOCAL_LENGTH_DEFAULT,
+ .max = GC0310_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = GC0310_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = GC0310_F_NUMBER_DEFAULT,
+ .max = GC0310_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = GC0310_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = GC0310_F_NUMBER_RANGE,
+ .max = GC0310_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = GC0310_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "horizontal binning factor",
+ .min = 0,
+ .max = GC0310_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vertical binning factor",
+ .min = 0,
+ .max = GC0310_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+};
+
+static int gc0310_init(struct v4l2_subdev *sd)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ pr_info("%s S\n", __func__);
+ mutex_lock(&dev->input_lock);
+
+ /* set initial registers */
+ ret = gc0310_write_reg_array(client, gc0310_reset_register);
+
+ /* restore settings */
+ gc0310_res = gc0310_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ mutex_unlock(&dev->input_lock);
+
+ pr_info("%s E\n", __func__);
+ return ret;
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = 0;
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ if (flag) {
+ /* The upstream module driver (written to Crystal
+ * Cove) had this logic to pulse the rails low first.
+ * This appears to break things on the MRD7 with the
+ * X-Powers PMIC...
+ *
+ * ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ * ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ * mdelay(50);
+ */
+ ret |= dev->platform_data->v1p8_ctrl(sd, 1);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ }
+
+ if (!flag || ret) {
+ ret |= dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* GPIO0 == "reset" (active low), GPIO1 == "power down" */
+ if (flag) {
+ /* Pulse reset, then release power down */
+ ret = dev->platform_data->gpio0_ctrl(sd, 0);
+ usleep_range(5000, 10000);
+ ret |= dev->platform_data->gpio0_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ ret |= dev->platform_data->gpio1_ctrl(sd, 0);
+ usleep_range(10000, 15000);
+ } else {
+ ret = dev->platform_data->gpio1_ctrl(sd, 1);
+ ret |= dev->platform_data->gpio0_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd);
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ pr_info("%s S\n", __func__);
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ goto fail_gpio;
+ }
+
+ msleep(100);
+
+ pr_info("%s E\n", __func__);
+ return 0;
+
+fail_gpio:
+ dev->platform_data->flisclk_ctrl(sd, 0);
+fail_clk:
+ power_ctrl(sd, 0);
+fail_power:
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int gc0310_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ if (on == 0)
+ return power_down(sd);
+ else {
+ ret = power_up(sd);
+ if (!ret)
+ return gc0310_init(sd);
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 800
+static int distance(struct gc0310_resolution *res, u32 w, u32 h)
+{
+ unsigned int w_ratio = (res->width << 13) / w;
+ unsigned int h_ratio;
+ int match;
+
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - 8192);
+
+ if ((w_ratio < 8192) || (h_ratio < 8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ struct gc0310_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &gc0310_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != gc0310_res[i].width)
+ continue;
+ if (h != gc0310_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+/* TODO: remove it. */
+static int startup(struct v4l2_subdev *sd)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ pr_info("%s S\n", __func__);
+
+ ret = gc0310_write_reg_array(client, gc0310_res[dev->fmt_idx].regs);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 write register err.\n");
+ return ret;
+ }
+
+ pr_info("%s E\n", __func__);
+ return ret;
+}
+
+static int gc0310_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *gc0310_info = NULL;
+ int ret = 0;
+ int idx = 0;
+
+ pr_info("%s S\n", __func__);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ gc0310_info = v4l2_get_subdev_hostdata(sd);
+ if (!gc0310_info)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = gc0310_res[N_RES - 1].width;
+ fmt->height = gc0310_res[N_RES - 1].height;
+ } else {
+ fmt->width = gc0310_res[idx].width;
+ fmt->height = gc0310_res[idx].height;
+ }
+ fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ printk("%s: before gc0310_write_reg_array %s\n", __func__,
+ gc0310_res[dev->fmt_idx].desc);
+ ret = startup(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 startup err\n");
+ goto err;
+ }
+
+ ret = gc0310_get_intg_factor(client, gc0310_info,
+ &gc0310_res[dev->fmt_idx]);
+ if (ret) {
+ dev_err(&client->dev, "failed to get integration_factor\n");
+ goto err;
+ }
+
+ pr_info("%s E\n", __func__);
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc0310_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = gc0310_res[dev->fmt_idx].width;
+ fmt->height = gc0310_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
+
+ return 0;
+}
+
+static int gc0310_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u8 high, low;
+ int ret;
+ u16 id;
+
+ pr_info("%s S\n", __func__);
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_SC_CMMN_CHIP_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "read sensor_id_high failed\n");
+ return -ENODEV;
+ }
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_SC_CMMN_CHIP_ID_L, &low);
+ if (ret) {
+ dev_err(&client->dev, "read sensor_id_low failed\n");
+ return -ENODEV;
+ }
+ id = ((((u16)high) << 8) | (u16)low);
+ pr_info("sensor ID = 0x%x\n", id);
+
+ if (id != GC0310_ID) {
+ dev_err(&client->dev, "sensor ID error, read id = 0x%x, target id = 0x%x\n", id,
+ GC0310_ID);
+ return -ENODEV;
+ }
+
+ dev_dbg(&client->dev, "detect gc0310 success\n");
+
+ pr_info("%s E\n", __func__);
+
+ return 0;
+}
+
+static int gc0310_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ pr_info("%s S enable=%d\n", __func__, enable);
+ mutex_lock(&dev->input_lock);
+
+ if (enable) {
+ /* enable per frame MIPI and sensor ctrl reset */
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ 0xFE, 0x30);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+ }
+
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_RESET_RELATED, GC0310_REGISTER_PAGE_3);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+
+ ret = gc0310_write_reg(client, GC0310_8BIT, GC0310_SW_STREAM,
+ enable ? GC0310_START_STREAMING :
+ GC0310_STOP_STREAMING);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_RESET_RELATED, GC0310_REGISTER_PAGE_0);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+
+ mutex_unlock(&dev->input_lock);
+ pr_info("%s E\n", __func__);
+ return ret;
+}
+
+static int gc0310_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ pr_info("%s S\n", __func__);
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = gc0310_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "gc0310_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ pr_info("%s E\n", __func__);
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc0310_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = gc0310_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int gc0310_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ return 0;
+}
+
+static int gc0310_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = gc0310_res[index].width;
+ fse->min_height = gc0310_res[index].height;
+ fse->max_width = gc0310_res[index].width;
+ fse->max_height = gc0310_res[index].height;
+
+ return 0;
+}
+
+static int gc0310_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = gc0310_res[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops gc0310_sensor_ops = {
+ .g_skip_frames = gc0310_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops gc0310_video_ops = {
+ .s_stream = gc0310_s_stream,
+ .g_frame_interval = gc0310_g_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops gc0310_core_ops = {
+ .s_power = gc0310_s_power,
+ .ioctl = gc0310_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops gc0310_pad_ops = {
+ .enum_mbus_code = gc0310_enum_mbus_code,
+ .enum_frame_size = gc0310_enum_frame_size,
+ .get_fmt = gc0310_get_fmt,
+ .set_fmt = gc0310_set_fmt,
+};
+
+static const struct v4l2_subdev_ops gc0310_ops = {
+ .core = &gc0310_core_ops,
+ .video = &gc0310_video_ops,
+ .pad = &gc0310_pad_ops,
+ .sensor = &gc0310_sensor_ops,
+};
+
+static int gc0310_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ dev_dbg(&client->dev, "gc0310_remove...\n");
+
+ dev->platform_data->csi_cfg(sd, 0);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+
+ return 0;
+}
+
+static int gc0310_probe(struct i2c_client *client)
+{
+ struct gc0310_device *dev;
+ int ret;
+ void *pdata;
+ unsigned int i;
+ acpi_handle handle;
+ struct acpi_device *adev;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+ pr_info("%s: ACPI detected it on bus ID=%s, HID=%s\n",
+ __func__, acpi_device_bid(adev), acpi_device_hid(adev));
+ // FIXME: may need to release resources allocated by acpi_bus_get_device()
+
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&dev->sd, client, &gc0310_ops);
+
+ pdata = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_8,
+ atomisp_bayer_order_grbg);
+ if (!pdata) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ret = gc0310_s_config(&dev->sd, client->irq, pdata);
+ if (ret)
+ goto out_free;
+
+ ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
+ if (ret)
+ goto out_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(gc0310_controls));
+ if (ret) {
+ gc0310_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gc0310_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &gc0310_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ gc0310_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ gc0310_remove(client);
+
+ pr_info("%s E\n", __func__);
+ return ret;
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+static const struct acpi_device_id gc0310_acpi_match[] = {
+ {"XXGC0310"},
+ {"INT0310"},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, gc0310_acpi_match);
+
+static struct i2c_driver gc0310_driver = {
+ .driver = {
+ .name = "gc0310",
+ .acpi_match_table = gc0310_acpi_match,
+ },
+ .probe_new = gc0310_probe,
+ .remove = gc0310_remove,
+};
+module_i2c_driver(gc0310_driver);
+
+MODULE_AUTHOR("Lai, Angie <angie.lai@intel.com>");
+MODULE_DESCRIPTION("A low-level driver for GalaxyCore GC0310 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
new file mode 100644
index 000000000000..a12dd0e858bc
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -0,0 +1,1139 @@
+/*
+ * Support for GalaxyCore GC2235 2M camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include <linux/acpi.h>
+#include <linux/io.h>
+
+#include "gc2235.h"
+
+/* i2c read/write stuff */
+static int gc2235_read_reg(struct i2c_client *client,
+ u16 data_length, u16 reg, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[6];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != GC2235_8BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0, sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == GC2235_8BIT)
+ *val = (u8)data[0];
+
+ return 0;
+}
+
+static int gc2235_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int gc2235_write_reg(struct i2c_client *client, u16 data_length,
+ u8 reg, u8 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ const u16 len = data_length + sizeof(u8); /* 16-bit address + data */
+
+ if (data_length != GC2235_8BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ data[0] = reg;
+ data[1] = val;
+
+ ret = gc2235_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+static int __gc2235_flush_reg_array(struct i2c_client *client,
+ struct gc2235_write_ctrl *ctrl)
+{
+ u16 size;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u8) + ctrl->index; /* 8-bit address + data */
+ ctrl->index = 0;
+
+ return gc2235_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __gc2235_buf_reg_array(struct i2c_client *client,
+ struct gc2235_write_ctrl *ctrl,
+ const struct gc2235_reg *next)
+{
+ int size;
+
+ if (next->type != GC2235_8BIT)
+ return -EINVAL;
+
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u8) >= GC2235_MAX_WRITE_BUF_SIZE)
+ return __gc2235_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __gc2235_write_reg_is_consecutive(struct i2c_client *client,
+ struct gc2235_write_ctrl *ctrl,
+ const struct gc2235_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int gc2235_write_reg_array(struct i2c_client *client,
+ const struct gc2235_reg *reglist)
+{
+ const struct gc2235_reg *next = reglist;
+ struct gc2235_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != GC2235_TOK_TERM; next++) {
+ switch (next->type & GC2235_TOK_MASK) {
+ case GC2235_TOK_DELAY:
+ err = __gc2235_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__gc2235_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __gc2235_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __gc2235_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __gc2235_flush_reg_array(client, &ctrl);
+}
+
+static int gc2235_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (GC2235_FOCAL_LENGTH_NUM << 16) | GC2235_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int gc2235_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for imx*/
+ *val = (GC2235_F_NUMBER_DEFAULT_NUM << 16) | GC2235_F_NUMBER_DEM;
+ return 0;
+}
+
+static int gc2235_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (GC2235_F_NUMBER_DEFAULT_NUM << 24) |
+ (GC2235_F_NUMBER_DEM << 16) |
+ (GC2235_F_NUMBER_DEFAULT_NUM << 8) | GC2235_F_NUMBER_DEM;
+ return 0;
+}
+
+static int gc2235_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct gc2235_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ u16 reg_val, reg_val_h;
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ /* pixel clock calculattion */
+ buf->vt_pix_clk_freq_mhz = dev->vt_pix_clk_freq_mhz = 30000000;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = GC2235_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ GC2235_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = GC2235_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ GC2235_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = GC2235_FINE_INTG_TIME_MIN;
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_H_CROP_START_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_H_CROP_START_L, &reg_val);
+ if (ret)
+ return ret;
+
+ buf->crop_horizontal_start = (reg_val_h << 8) | reg_val;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_V_CROP_START_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_V_CROP_START_L, &reg_val);
+ if (ret)
+ return ret;
+
+ buf->crop_vertical_start = (reg_val_h << 8) | reg_val;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_H_OUTSIZE_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_H_OUTSIZE_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = (reg_val_h << 8) | reg_val;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_V_OUTSIZE_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_V_OUTSIZE_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = (reg_val_h << 8) | reg_val;
+
+ buf->crop_horizontal_end = buf->crop_horizontal_start +
+ buf->output_width - 1;
+ buf->crop_vertical_end = buf->crop_vertical_start +
+ buf->output_height - 1;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_HB_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_HB_L, &reg_val);
+ if (ret)
+ return ret;
+
+#if 0
+ u16 dummy = (reg_val_h << 8) | reg_val;
+#endif
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_SH_DELAY_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_SH_DELAY_L, &reg_val);
+
+#if 0
+ buf->line_length_pck = buf->output_width + 16 + dummy +
+ (((u16)reg_val_h << 8) | (u16)reg_val) + 4;
+#endif
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_VB_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_VB_L, &reg_val);
+ if (ret)
+ return ret;
+
+#if 0
+ buf->frame_length_lines = buf->output_height + 32 +
+ (((u16)reg_val_h << 8) | (u16)reg_val);
+#endif
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static long __gc2235_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 coarse_integration = (u16)coarse_itg;
+ int ret = 0;
+ u16 expo_coarse_h, expo_coarse_l, gain_val = 0xF0, gain_val2 = 0xF0;
+
+ expo_coarse_h = coarse_integration >> 8;
+ expo_coarse_l = coarse_integration & 0xff;
+
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_H, expo_coarse_h);
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_L, expo_coarse_l);
+
+ if (gain <= 0x58) {
+ gain_val = 0x40;
+ gain_val2 = 0x58;
+ } else if (gain < 256) {
+ gain_val = 0x40;
+ gain_val2 = gain;
+ } else {
+ gain_val2 = 64 * gain / 256;
+ gain_val = 0xff;
+ }
+
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_GLOBAL_GAIN, (u8)gain_val);
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_PRE_GAIN, (u8)gain_val2);
+
+ return ret;
+}
+
+static int gc2235_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __gc2235_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long gc2235_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ int exp = exposure->integration_time[0];
+ int gain = exposure->gain[0];
+ int digitgain = exposure->gain[1];
+
+ /* we should not accept the invalid value below. */
+ if (gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return gc2235_set_exposure(sd, exp, gain, digitgain);
+}
+
+static long gc2235_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return gc2235_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int gc2235_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+
+ *value = reg_v;
+err:
+ return ret;
+}
+
+static int gc2235_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gc2235_device *dev =
+ container_of(ctrl->handler, struct gc2235_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = gc2235_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = gc2235_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = gc2235_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = gc2235_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .g_volatile_ctrl = gc2235_g_volatile_ctrl
+};
+
+static struct v4l2_ctrl_config gc2235_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = GC2235_FOCAL_LENGTH_DEFAULT,
+ .max = GC2235_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = GC2235_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = GC2235_F_NUMBER_DEFAULT,
+ .max = GC2235_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = GC2235_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = GC2235_F_NUMBER_RANGE,
+ .max = GC2235_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = GC2235_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+};
+
+static int __gc2235_init(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* restore settings */
+ gc2235_res = gc2235_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ return gc2235_write_reg_array(client, gc2235_init_settings);
+}
+
+static int is_init;
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = -1;
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ if (flag) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ usleep_range(60, 90);
+ if (ret == 0)
+ ret |= dev->platform_data->v2p8_ctrl(sd, 1);
+ } else {
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ int ret = -1;
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ ret |= dev->platform_data->gpio1_ctrl(sd, !flag);
+ usleep_range(60, 90);
+ return dev->platform_data->gpio0_ctrl(sd, flag);
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ usleep_range(5000, 6000);
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+ usleep_range(5000, 6000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+ }
+
+ msleep(5);
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int gc2235_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ if (on == 0)
+ ret = power_down(sd);
+ else {
+ ret = power_up(sd);
+ if (!ret)
+ ret = __gc2235_init(sd);
+ is_init = 1;
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 800
+static int distance(struct gc2235_resolution *res, u32 w, u32 h)
+{
+ unsigned int w_ratio = (res->width << 13) / w;
+ unsigned int h_ratio;
+ int match;
+
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - 8192);
+
+ if ((w_ratio < 8192) || (h_ratio < 8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ struct gc2235_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &gc2235_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != gc2235_res[i].width)
+ continue;
+ if (h != gc2235_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+static int startup(struct v4l2_subdev *sd)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (is_init == 0) {
+ /* force gc2235 to do a reset in res change, otherwise it
+ * can not output normal after switching res. and it is not
+ * necessary for first time run up after power on, for the sack
+ * of performance
+ */
+ power_down(sd);
+ power_up(sd);
+ gc2235_write_reg_array(client, gc2235_init_settings);
+ }
+
+ ret = gc2235_write_reg_array(client, gc2235_res[dev->fmt_idx].regs);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 write register err.\n");
+ return ret;
+ }
+ is_init = 0;
+
+ return ret;
+}
+
+static int gc2235_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *gc2235_info = NULL;
+ int ret = 0;
+ int idx;
+
+ gc2235_info = v4l2_get_subdev_hostdata(sd);
+ if (!gc2235_info)
+ return -EINVAL;
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+ mutex_lock(&dev->input_lock);
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = gc2235_res[N_RES - 1].width;
+ fmt->height = gc2235_res[N_RES - 1].height;
+ } else {
+ fmt->width = gc2235_res[idx].width;
+ fmt->height = gc2235_res[idx].height;
+ }
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ ret = startup(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 startup err\n");
+ goto err;
+ }
+
+ ret = gc2235_get_intg_factor(client, gc2235_info,
+ &gc2235_res[dev->fmt_idx]);
+ if (ret)
+ dev_err(&client->dev, "failed to get integration_factor\n");
+
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc2235_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = gc2235_res[dev->fmt_idx].width;
+ fmt->height = gc2235_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int gc2235_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high, low;
+ int ret;
+ u16 id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_SENSOR_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+ return -ENODEV;
+ }
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_SENSOR_ID_L, &low);
+ id = ((high << 8) | low);
+
+ if (id != GC2235_ID) {
+ dev_err(&client->dev, "sensor ID error, 0x%x\n", id);
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "detect gc2235 success\n");
+ return 0;
+}
+
+static int gc2235_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ if (enable)
+ ret = gc2235_write_reg_array(client, gc2235_stream_on);
+ else
+ ret = gc2235_write_reg_array(client, gc2235_stream_off);
+
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc2235_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = gc2235_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "gc2235_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc2235_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = gc2235_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int gc2235_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int gc2235_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = gc2235_res[index].width;
+ fse->min_height = gc2235_res[index].height;
+ fse->max_width = gc2235_res[index].width;
+ fse->max_height = gc2235_res[index].height;
+
+ return 0;
+}
+
+static int gc2235_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = gc2235_res[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops gc2235_sensor_ops = {
+ .g_skip_frames = gc2235_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops gc2235_video_ops = {
+ .s_stream = gc2235_s_stream,
+ .g_frame_interval = gc2235_g_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops gc2235_core_ops = {
+ .s_power = gc2235_s_power,
+ .ioctl = gc2235_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops gc2235_pad_ops = {
+ .enum_mbus_code = gc2235_enum_mbus_code,
+ .enum_frame_size = gc2235_enum_frame_size,
+ .get_fmt = gc2235_get_fmt,
+ .set_fmt = gc2235_set_fmt,
+};
+
+static const struct v4l2_subdev_ops gc2235_ops = {
+ .core = &gc2235_core_ops,
+ .video = &gc2235_video_ops,
+ .pad = &gc2235_pad_ops,
+ .sensor = &gc2235_sensor_ops,
+};
+
+static int gc2235_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ dev_dbg(&client->dev, "gc2235_remove...\n");
+
+ dev->platform_data->csi_cfg(sd, 0);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+
+ return 0;
+}
+
+static int gc2235_probe(struct i2c_client *client)
+{
+ struct gc2235_device *dev;
+ void *gcpdev;
+ int ret;
+ unsigned int i;
+ acpi_handle handle;
+ struct acpi_device *adev;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+ pr_info("%s: ACPI detected it on bus ID=%s, HID=%s\n",
+ __func__, acpi_device_bid(adev), acpi_device_hid(adev));
+ // FIXME: may need to release resources allocated by acpi_bus_get_device()
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&dev->sd, client, &gc2235_ops);
+
+ gcpdev = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_grbg);
+
+ ret = gc2235_s_config(&dev->sd, client->irq, gcpdev);
+ if (ret)
+ goto out_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(gc2235_controls));
+ if (ret) {
+ gc2235_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gc2235_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &gc2235_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ gc2235_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ gc2235_remove(client);
+
+ return atomisp_register_i2c_module(&dev->sd, gcpdev, RAW_CAMERA);
+
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+
+ return ret;
+}
+
+static const struct acpi_device_id gc2235_acpi_match[] = {
+ { "INT33F8" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, gc2235_acpi_match);
+
+static struct i2c_driver gc2235_driver = {
+ .driver = {
+ .name = "gc2235",
+ .acpi_match_table = gc2235_acpi_match,
+ },
+ .probe_new = gc2235_probe,
+ .remove = gc2235_remove,
+};
+module_i2c_driver(gc2235_driver);
+
+MODULE_AUTHOR("Shuguang Gong <Shuguang.Gong@intel.com>");
+MODULE_DESCRIPTION("A low-level driver for GC2235 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
new file mode 100644
index 000000000000..33ab884f7352
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#include <linux/i2c.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include "../include/linux/libmsrlisthelper.h"
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/* Tagged binary data container structure definitions. */
+struct tbd_header {
+ u32 tag; /*!< Tag identifier, also checks endianness */
+ u32 size; /*!< Container size including this header */
+ u32 version; /*!< Version, format 0xYYMMDDVV */
+ u32 revision; /*!< Revision, format 0xYYMMDDVV */
+ u32 config_bits; /*!< Configuration flag bits set */
+ u32 checksum; /*!< Global checksum, header included */
+} __packed;
+
+struct tbd_record_header {
+ u32 size; /*!< Size of record including header */
+ u8 format_id; /*!< tbd_format_t enumeration values used */
+ u8 packing_key; /*!< Packing method; 0 = no packing */
+ u16 class_id; /*!< tbd_class_t enumeration values used */
+} __packed;
+
+struct tbd_data_record_header {
+ u16 next_offset;
+ u16 flags;
+ u16 data_offset;
+ u16 data_size;
+} __packed;
+
+#define TBD_CLASS_DRV_ID 2
+
+static int set_msr_configuration(struct i2c_client *client, uint8_t *bufptr,
+ unsigned int size)
+{
+ /* The configuration data contains any number of sequences where
+ * the first byte (that is, uint8_t) that marks the number of bytes
+ * in the sequence to follow, is indeed followed by the indicated
+ * number of bytes of actual data to be written to sensor.
+ * By convention, the first two bytes of actual data should be
+ * understood as an address in the sensor address space (hibyte
+ * followed by lobyte) where the remaining data in the sequence
+ * will be written. */
+
+ u8 *ptr = bufptr;
+
+ while (ptr < bufptr + size) {
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ };
+ int ret;
+
+ /* How many bytes */
+ msg.len = *ptr++;
+ /* Where the bytes are located */
+ msg.buf = ptr;
+ ptr += msg.len;
+
+ if (ptr > bufptr + size)
+ /* Accessing data beyond bounds is not tolerated */
+ return -EINVAL;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c write error: %d", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int parse_and_apply(struct i2c_client *client, uint8_t *buffer,
+ unsigned int size)
+{
+ u8 *endptr8 = buffer + size;
+ struct tbd_data_record_header *header =
+ (struct tbd_data_record_header *)buffer;
+
+ /* There may be any number of datasets present */
+ unsigned int dataset = 0;
+
+ do {
+ /* In below, four variables are read from buffer */
+ if ((uint8_t *)header + sizeof(*header) > endptr8)
+ return -EINVAL;
+
+ /* All data should be located within given buffer */
+ if ((uint8_t *)header + header->data_offset +
+ header->data_size > endptr8)
+ return -EINVAL;
+
+ /* We have a new valid dataset */
+ dataset++;
+ /* See whether there is MSR data */
+ /* If yes, update the reg info */
+ if (header->data_size && (header->flags & 1)) {
+ int ret;
+
+ dev_info(&client->dev,
+ "New MSR data for sensor driver (dataset %02d) size:%d\n",
+ dataset, header->data_size);
+ ret = set_msr_configuration(client,
+ buffer + header->data_offset,
+ header->data_size);
+ if (ret)
+ return ret;
+ }
+ header = (struct tbd_data_record_header *)(buffer +
+ header->next_offset);
+ } while (header->next_offset);
+
+ return 0;
+}
+
+int apply_msr_data(struct i2c_client *client, const struct firmware *fw)
+{
+ struct tbd_header *header;
+ struct tbd_record_header *record;
+
+ if (!fw) {
+ dev_warn(&client->dev, "Drv data is not loaded.\n");
+ return -EINVAL;
+ }
+
+ if (sizeof(*header) > fw->size)
+ return -EINVAL;
+
+ header = (struct tbd_header *)fw->data;
+ /* Check that we have drvb block. */
+ if (memcmp(&header->tag, "DRVB", 4))
+ return -EINVAL;
+
+ /* Check the size */
+ if (header->size != fw->size)
+ return -EINVAL;
+
+ if (sizeof(*header) + sizeof(*record) > fw->size)
+ return -EINVAL;
+
+ record = (struct tbd_record_header *)(header + 1);
+ /* Check that class id mathes tbd's drv id. */
+ if (record->class_id != TBD_CLASS_DRV_ID)
+ return -EINVAL;
+
+ /* Size 0 shall not be treated as an error */
+ if (!record->size)
+ return 0;
+
+ return parse_and_apply(client, (uint8_t *)(record + 1), record->size);
+}
+EXPORT_SYMBOL_GPL(apply_msr_data);
+
+int load_msr_list(struct i2c_client *client, char *name,
+ const struct firmware **fw)
+{
+ int ret = request_firmware(fw, name, &client->dev);
+
+ if (ret) {
+ dev_err(&client->dev,
+ "Error %d while requesting firmware %s\n",
+ ret, name);
+ return ret;
+ }
+ dev_info(&client->dev, "Received %lu bytes drv data\n",
+ (unsigned long)(*fw)->size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(load_msr_list);
+
+void release_msr_list(struct i2c_client *client, const struct firmware *fw)
+{
+ release_firmware(fw);
+}
+EXPORT_SYMBOL_GPL(release_msr_list);
+
+static int init_msrlisthelper(void)
+{
+ return 0;
+}
+
+static void exit_msrlisthelper(void)
+{
+}
+
+module_init(init_msrlisthelper);
+module_exit(exit_msrlisthelper);
+
+MODULE_AUTHOR("Jukka Kaartinen <jukka.o.kaartinen@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
new file mode 100644
index 000000000000..a899145265ff
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -0,0 +1,972 @@
+/*
+ * LED flash driver for LM3554
+ *
+ * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include "../include/media/lm3554.h"
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include "../include/linux/atomisp.h"
+
+/* Registers */
+
+#define LM3554_TORCH_BRIGHTNESS_REG 0xA0
+#define LM3554_TORCH_MODE_SHIFT 0
+#define LM3554_TORCH_CURRENT_SHIFT 3
+#define LM3554_INDICATOR_CURRENT_SHIFT 6
+
+#define LM3554_FLASH_BRIGHTNESS_REG 0xB0
+#define LM3554_FLASH_MODE_SHIFT 0
+#define LM3554_FLASH_CURRENT_SHIFT 3
+#define LM3554_STROBE_SENSITIVITY_SHIFT 7
+
+#define LM3554_FLASH_DURATION_REG 0xC0
+#define LM3554_FLASH_TIMEOUT_SHIFT 0
+#define LM3554_CURRENT_LIMIT_SHIFT 5
+
+#define LM3554_FLAGS_REG 0xD0
+#define LM3554_FLAG_TIMEOUT BIT(0)
+#define LM3554_FLAG_THERMAL_SHUTDOWN BIT(1)
+#define LM3554_FLAG_LED_FAULT BIT(2)
+#define LM3554_FLAG_TX1_INTERRUPT BIT(3)
+#define LM3554_FLAG_TX2_INTERRUPT BIT(4)
+#define LM3554_FLAG_LED_THERMAL_FAULT BIT(5)
+#define LM3554_FLAG_UNUSED BIT(6)
+#define LM3554_FLAG_INPUT_VOLTAGE_LOW BIT(7)
+
+#define LM3554_CONFIG_REG_1 0xE0
+#define LM3554_ENVM_TX2_SHIFT 5
+#define LM3554_TX2_POLARITY_SHIFT 6
+
+struct lm3554 {
+ struct v4l2_subdev sd;
+
+ struct mutex power_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+ int power_count;
+
+ unsigned int mode;
+ int timeout;
+ u8 torch_current;
+ u8 indicator_current;
+ u8 flash_current;
+
+ struct timer_list flash_off_delay;
+ struct lm3554_platform_data *pdata;
+};
+
+#define to_lm3554(p_sd) container_of(p_sd, struct lm3554, sd)
+
+/* Return negative errno else zero on success */
+static int lm3554_write(struct lm3554 *flash, u8 addr, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->sd);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, addr, val);
+
+ dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val,
+ ret < 0 ? "fail" : "ok");
+
+ return ret;
+}
+
+/* Return negative errno else a data byte received from the device. */
+static int lm3554_read(struct lm3554 *flash, u8 addr)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->sd);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, addr);
+
+ dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, ret,
+ ret < 0 ? "fail" : "ok");
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static int lm3554_set_mode(struct lm3554 *flash, unsigned int mode)
+{
+ u8 val;
+ int ret;
+
+ val = (mode << LM3554_FLASH_MODE_SHIFT) |
+ (flash->flash_current << LM3554_FLASH_CURRENT_SHIFT);
+
+ ret = lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, val);
+ if (ret == 0)
+ flash->mode = mode;
+ return ret;
+}
+
+static int lm3554_set_torch(struct lm3554 *flash)
+{
+ u8 val;
+
+ val = (flash->mode << LM3554_TORCH_MODE_SHIFT) |
+ (flash->torch_current << LM3554_TORCH_CURRENT_SHIFT) |
+ (flash->indicator_current << LM3554_INDICATOR_CURRENT_SHIFT);
+
+ return lm3554_write(flash, LM3554_TORCH_BRIGHTNESS_REG, val);
+}
+
+static int lm3554_set_flash(struct lm3554 *flash)
+{
+ u8 val;
+
+ val = (flash->mode << LM3554_FLASH_MODE_SHIFT) |
+ (flash->flash_current << LM3554_FLASH_CURRENT_SHIFT);
+
+ return lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, val);
+}
+
+static int lm3554_set_duration(struct lm3554 *flash)
+{
+ u8 val;
+
+ val = (flash->timeout << LM3554_FLASH_TIMEOUT_SHIFT) |
+ (flash->pdata->current_limit << LM3554_CURRENT_LIMIT_SHIFT);
+
+ return lm3554_write(flash, LM3554_FLASH_DURATION_REG, val);
+}
+
+static int lm3554_set_config1(struct lm3554 *flash)
+{
+ u8 val;
+
+ val = (flash->pdata->envm_tx2 << LM3554_ENVM_TX2_SHIFT) |
+ (flash->pdata->tx2_polarity << LM3554_TX2_POLARITY_SHIFT);
+ return lm3554_write(flash, LM3554_CONFIG_REG_1, val);
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware trigger
+ */
+static void lm3554_flash_off_delay(struct timer_list *t)
+{
+ struct lm3554 *flash = from_timer(flash, t, flash_off_delay);
+ struct lm3554_platform_data *pdata = flash->pdata;
+
+ gpio_set_value(pdata->gpio_strobe, 0);
+}
+
+static int lm3554_hw_strobe(struct i2c_client *client, bool strobe)
+{
+ int ret, timer_pending;
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554_platform_data *pdata = flash->pdata;
+
+ /*
+ * An abnormal high flash current is observed when strobe off the
+ * flash. Workaround here is firstly set flash current to lower level,
+ * wait a short moment, and then strobe off the flash.
+ */
+
+ timer_pending = del_timer_sync(&flash->flash_off_delay);
+
+ /* Flash off */
+ if (!strobe) {
+ /* set current to 70mA and wait a while */
+ ret = lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, 0);
+ if (ret < 0)
+ goto err;
+ mod_timer(&flash->flash_off_delay,
+ jiffies + msecs_to_jiffies(LM3554_TIMER_DELAY));
+ return 0;
+ }
+
+ /* Flash on */
+
+ /*
+ * If timer is killed before run, flash is not strobe off,
+ * so must strobe off here
+ */
+ if (timer_pending)
+ gpio_set_value(pdata->gpio_strobe, 0);
+
+ /* Restore flash current settings */
+ ret = lm3554_set_flash(flash);
+ if (ret < 0)
+ goto err;
+
+ /* Strobe on Flash */
+ gpio_set_value(pdata->gpio_strobe, 1);
+
+ return 0;
+err:
+ dev_err(&client->dev, "failed to %s flash strobe (%d)\n",
+ strobe ? "on" : "off", ret);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 controls
+ */
+
+static int lm3554_read_status(struct lm3554 *flash)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->sd);
+
+ /* NOTE: reading register clear fault status */
+ ret = lm3554_read(flash, LM3554_FLAGS_REG);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Accordingly to datasheet we read back '1' in bit 6.
+ * Clear it first.
+ */
+ ret &= ~LM3554_FLAG_UNUSED;
+
+ /*
+ * Do not take TX1/TX2 signal as an error
+ * because MSIC will not turn off flash, but turn to
+ * torch mode according to gsm modem signal by hardware.
+ */
+ ret &= ~(LM3554_FLAG_TX1_INTERRUPT | LM3554_FLAG_TX2_INTERRUPT);
+
+ if (ret > 0)
+ dev_dbg(&client->dev, "LM3554 flag status: %02x\n", ret);
+
+ return ret;
+}
+
+static int lm3554_s_flash_timeout(struct v4l2_subdev *sd, u32 val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ val = clamp(val, LM3554_MIN_TIMEOUT, LM3554_MAX_TIMEOUT);
+ val = val / LM3554_TIMEOUT_STEPSIZE - 1;
+
+ flash->timeout = val;
+
+ return lm3554_set_duration(flash);
+}
+
+static int lm3554_g_flash_timeout(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ *val = (u32)(flash->timeout + 1) * LM3554_TIMEOUT_STEPSIZE;
+
+ return 0;
+}
+
+static int lm3554_s_flash_intensity(struct v4l2_subdev *sd, u32 intensity)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ intensity = LM3554_CLAMP_PERCENTAGE(intensity);
+ intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_FLASH_STEP);
+
+ flash->flash_current = intensity;
+
+ return lm3554_set_flash(flash);
+}
+
+static int lm3554_g_flash_intensity(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ *val = LM3554_VALUE_TO_PERCENT((u32)flash->flash_current,
+ LM3554_FLASH_STEP);
+
+ return 0;
+}
+
+static int lm3554_s_torch_intensity(struct v4l2_subdev *sd, u32 intensity)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ intensity = LM3554_CLAMP_PERCENTAGE(intensity);
+ intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_TORCH_STEP);
+
+ flash->torch_current = intensity;
+
+ return lm3554_set_torch(flash);
+}
+
+static int lm3554_g_torch_intensity(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ *val = LM3554_VALUE_TO_PERCENT((u32)flash->torch_current,
+ LM3554_TORCH_STEP);
+
+ return 0;
+}
+
+static int lm3554_s_indicator_intensity(struct v4l2_subdev *sd, u32 intensity)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ intensity = LM3554_CLAMP_PERCENTAGE(intensity);
+ intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_INDICATOR_STEP);
+
+ flash->indicator_current = intensity;
+
+ return lm3554_set_torch(flash);
+}
+
+static int lm3554_g_indicator_intensity(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ *val = LM3554_VALUE_TO_PERCENT((u32)flash->indicator_current,
+ LM3554_INDICATOR_STEP);
+
+ return 0;
+}
+
+static int lm3554_s_flash_strobe(struct v4l2_subdev *sd, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return lm3554_hw_strobe(client, val);
+}
+
+static int lm3554_s_flash_mode(struct v4l2_subdev *sd, u32 new_mode)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ unsigned int mode;
+
+ switch (new_mode) {
+ case ATOMISP_FLASH_MODE_OFF:
+ mode = LM3554_MODE_SHUTDOWN;
+ break;
+ case ATOMISP_FLASH_MODE_FLASH:
+ mode = LM3554_MODE_FLASH;
+ break;
+ case ATOMISP_FLASH_MODE_INDICATOR:
+ mode = LM3554_MODE_INDICATOR;
+ break;
+ case ATOMISP_FLASH_MODE_TORCH:
+ mode = LM3554_MODE_TORCH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return lm3554_set_mode(flash, mode);
+}
+
+static int lm3554_g_flash_mode(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ *val = flash->mode;
+ return 0;
+}
+
+static int lm3554_g_flash_status(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ int value;
+
+ value = lm3554_read_status(flash);
+ if (value < 0)
+ return value;
+
+ if (value & LM3554_FLAG_TIMEOUT)
+ *val = ATOMISP_FLASH_STATUS_TIMEOUT;
+ else if (value > 0)
+ *val = ATOMISP_FLASH_STATUS_HW_ERROR;
+ else
+ *val = ATOMISP_FLASH_STATUS_OK;
+
+ return 0;
+}
+
+static int lm3554_g_flash_status_register(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ int ret;
+
+ ret = lm3554_read(flash, LM3554_FLAGS_REG);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+
+static int lm3554_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct lm3554 *dev =
+ container_of(ctrl->handler, struct lm3554, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_TIMEOUT:
+ ret = lm3554_s_flash_timeout(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_INTENSITY:
+ ret = lm3554_s_flash_intensity(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ ret = lm3554_s_torch_intensity(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ ret = lm3554_s_indicator_intensity(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_STROBE:
+ ret = lm3554_s_flash_strobe(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_MODE:
+ ret = lm3554_s_flash_mode(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int lm3554_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct lm3554 *dev =
+ container_of(ctrl->handler, struct lm3554, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_TIMEOUT:
+ ret = lm3554_g_flash_timeout(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_INTENSITY:
+ ret = lm3554_g_flash_intensity(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ ret = lm3554_g_torch_intensity(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ ret = lm3554_g_indicator_intensity(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_MODE:
+ ret = lm3554_g_flash_mode(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_STATUS:
+ ret = lm3554_g_flash_status(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_STATUS_REGISTER:
+ ret = lm3554_g_flash_status_register(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = lm3554_s_ctrl,
+ .g_volatile_ctrl = lm3554_g_volatile_ctrl
+};
+
+static const struct v4l2_ctrl_config lm3554_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_TIMEOUT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Flash Timeout",
+ .min = 0x0,
+ .max = LM3554_MAX_TIMEOUT,
+ .step = 0x01,
+ .def = LM3554_DEFAULT_TIMEOUT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_INTENSITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Flash Intensity",
+ .min = LM3554_MIN_PERCENT,
+ .max = LM3554_MAX_PERCENT,
+ .step = 0x01,
+ .def = LM3554_FLASH_DEFAULT_BRIGHTNESS,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_TORCH_INTENSITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Torch Intensity",
+ .min = LM3554_MIN_PERCENT,
+ .max = LM3554_MAX_PERCENT,
+ .step = 0x01,
+ .def = LM3554_TORCH_DEFAULT_BRIGHTNESS,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_INDICATOR_INTENSITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Indicator Intensity",
+ .min = LM3554_MIN_PERCENT,
+ .max = LM3554_MAX_PERCENT,
+ .step = 0x01,
+ .def = LM3554_INDICATOR_DEFAULT_BRIGHTNESS,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_STROBE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flash Strobe",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_MODE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Flash Mode",
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = ATOMISP_FLASH_MODE_OFF,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_STATUS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Flash Status",
+ .min = ATOMISP_FLASH_STATUS_OK,
+ .max = ATOMISP_FLASH_STATUS_TIMEOUT,
+ .step = 1,
+ .def = ATOMISP_FLASH_STATUS_OK,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_STATUS_REGISTER,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Flash Status Register",
+ .min = 0,
+ .max = 255,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev core operations
+ */
+
+/* Put device into known state. */
+static int lm3554_setup(struct lm3554 *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->sd);
+ int ret;
+
+ /* clear the flags register */
+ ret = lm3554_read(flash, LM3554_FLAGS_REG);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&client->dev, "Fault info: %02x\n", ret);
+
+ ret = lm3554_set_config1(flash);
+ if (ret < 0)
+ return ret;
+
+ ret = lm3554_set_duration(flash);
+ if (ret < 0)
+ return ret;
+
+ ret = lm3554_set_torch(flash);
+ if (ret < 0)
+ return ret;
+
+ ret = lm3554_set_flash(flash);
+ if (ret < 0)
+ return ret;
+
+ /* read status */
+ ret = lm3554_read_status(flash);
+ if (ret < 0)
+ return ret;
+
+ return ret ? -EIO : 0;
+}
+
+static int __lm3554_s_power(struct lm3554 *flash, int power)
+{
+ struct lm3554_platform_data *pdata = flash->pdata;
+ int ret;
+
+ /*initialize flash driver*/
+ gpio_set_value(pdata->gpio_reset, power);
+ usleep_range(100, 100 + 1);
+
+ if (power) {
+ /* Setup default values. This makes sure that the chip
+ * is in a known state.
+ */
+ ret = lm3554_setup(flash);
+ if (ret < 0) {
+ __lm3554_s_power(flash, 0);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int lm3554_s_power(struct v4l2_subdev *sd, int power)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ int ret = 0;
+
+ mutex_lock(&flash->power_lock);
+
+ if (flash->power_count == !power) {
+ ret = __lm3554_s_power(flash, !!power);
+ if (ret < 0)
+ goto done;
+ }
+
+ flash->power_count += power ? 1 : -1;
+ WARN_ON(flash->power_count < 0);
+
+done:
+ mutex_unlock(&flash->power_lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops lm3554_core_ops = {
+ .s_power = lm3554_s_power,
+};
+
+static const struct v4l2_subdev_ops lm3554_ops = {
+ .core = &lm3554_core_ops,
+};
+
+static int lm3554_detect(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_adapter *adapter = client->adapter;
+ struct lm3554 *flash = to_lm3554(sd);
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "lm3554_detect i2c error\n");
+ return -ENODEV;
+ }
+
+ /* Power up the flash driver and reset it */
+ ret = lm3554_s_power(&flash->sd, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to power on lm3554 LED flash\n");
+ } else {
+ dev_dbg(&client->dev, "Successfully detected lm3554 LED flash\n");
+ lm3554_s_power(&flash->sd, 0);
+ }
+
+ return ret;
+}
+
+static int lm3554_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return lm3554_s_power(sd, 1);
+}
+
+static int lm3554_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return lm3554_s_power(sd, 0);
+}
+
+static const struct v4l2_subdev_internal_ops lm3554_internal_ops = {
+ .registered = lm3554_detect,
+ .open = lm3554_open,
+ .close = lm3554_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * I2C driver
+ */
+#ifdef CONFIG_PM
+
+static int lm3554_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(subdev);
+ int rval;
+
+ if (flash->power_count == 0)
+ return 0;
+
+ rval = __lm3554_s_power(flash, 0);
+
+ dev_dbg(&client->dev, "Suspend %s\n", rval < 0 ? "failed" : "ok");
+
+ return rval;
+}
+
+static int lm3554_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(subdev);
+ int rval;
+
+ if (flash->power_count == 0)
+ return 0;
+
+ rval = __lm3554_s_power(flash, 1);
+
+ dev_dbg(&client->dev, "Resume %s\n", rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+#else
+
+#define lm3554_suspend NULL
+#define lm3554_resume NULL
+
+#endif /* CONFIG_PM */
+
+static int lm3554_gpio_init(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554_platform_data *pdata = flash->pdata;
+ int ret;
+
+ if (!gpio_is_valid(pdata->gpio_reset))
+ return -EINVAL;
+
+ ret = gpio_direction_output(pdata->gpio_reset, 0);
+ if (ret < 0)
+ goto err_gpio_reset;
+ dev_info(&client->dev, "flash led reset successfully\n");
+
+ if (!gpio_is_valid(pdata->gpio_strobe)) {
+ ret = -EINVAL;
+ goto err_gpio_dir_reset;
+ }
+
+ ret = gpio_direction_output(pdata->gpio_strobe, 0);
+ if (ret < 0)
+ goto err_gpio_strobe;
+
+ return 0;
+
+err_gpio_strobe:
+ gpio_free(pdata->gpio_strobe);
+err_gpio_dir_reset:
+ gpio_direction_output(pdata->gpio_reset, 0);
+err_gpio_reset:
+ gpio_free(pdata->gpio_reset);
+
+ return ret;
+}
+
+static int lm3554_gpio_uninit(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554_platform_data *pdata = flash->pdata;
+ int ret;
+
+ ret = gpio_direction_output(pdata->gpio_strobe, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(pdata->gpio_reset, 0);
+ if (ret < 0)
+ return ret;
+
+ gpio_free(pdata->gpio_strobe);
+ gpio_free(pdata->gpio_reset);
+ return 0;
+}
+
+static void *lm3554_platform_data_func(struct i2c_client *client)
+{
+ static struct lm3554_platform_data platform_data;
+
+ platform_data.gpio_reset =
+ desc_to_gpio(gpiod_get_index(&client->dev,
+ NULL, 2, GPIOD_OUT_LOW));
+ platform_data.gpio_strobe =
+ desc_to_gpio(gpiod_get_index(&client->dev,
+ NULL, 0, GPIOD_OUT_LOW));
+ platform_data.gpio_torch =
+ desc_to_gpio(gpiod_get_index(&client->dev,
+ NULL, 1, GPIOD_OUT_LOW));
+ dev_info(&client->dev, "camera pdata: lm3554: reset: %d strobe %d torch %d\n",
+ platform_data.gpio_reset, platform_data.gpio_strobe,
+ platform_data.gpio_torch);
+
+ /* Set to TX2 mode, then ENVM/TX2 pin is a power amplifier sync input:
+ * ENVM/TX pin asserted, flash forced into torch;
+ * ENVM/TX pin desserted, flash set back;
+ */
+ platform_data.envm_tx2 = 1;
+ platform_data.tx2_polarity = 0;
+
+ /* set peak current limit to be 1000mA */
+ platform_data.current_limit = 0;
+
+ return &platform_data;
+}
+
+static int lm3554_probe(struct i2c_client *client)
+{
+ int err = 0;
+ struct lm3554 *flash;
+ unsigned int i;
+ int ret;
+ acpi_handle handle;
+ struct acpi_device *adev;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+ pr_info("%s: ACPI detected it on bus ID=%s, HID=%s\n",
+ __func__, acpi_device_bid(adev), acpi_device_hid(adev));
+ // FIXME: may need to release resources allocated by acpi_bus_get_device()
+
+ flash = kzalloc(sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+
+ flash->pdata = lm3554_platform_data_func(client);
+
+ v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
+ flash->sd.internal_ops = &lm3554_internal_ops;
+ flash->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ flash->mode = ATOMISP_FLASH_MODE_OFF;
+ flash->timeout = LM3554_MAX_TIMEOUT / LM3554_TIMEOUT_STEPSIZE - 1;
+ ret =
+ v4l2_ctrl_handler_init(&flash->ctrl_handler,
+ ARRAY_SIZE(lm3554_controls));
+ if (ret) {
+ dev_err(&client->dev, "error initialize a ctrl_handler.\n");
+ goto fail2;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
+ v4l2_ctrl_new_custom(&flash->ctrl_handler, &lm3554_controls[i],
+ NULL);
+
+ if (flash->ctrl_handler.error) {
+ dev_err(&client->dev, "ctrl_handler error.\n");
+ goto fail2;
+ }
+
+ flash->sd.ctrl_handler = &flash->ctrl_handler;
+ err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
+ if (err) {
+ dev_err(&client->dev, "error initialize a media entity.\n");
+ goto fail1;
+ }
+
+ flash->sd.entity.function = MEDIA_ENT_F_FLASH;
+
+ mutex_init(&flash->power_lock);
+
+ timer_setup(&flash->flash_off_delay, lm3554_flash_off_delay, 0);
+
+ err = lm3554_gpio_init(client);
+ if (err) {
+ dev_err(&client->dev, "gpio request/direction_output fail");
+ goto fail2;
+ }
+ return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
+fail2:
+ media_entity_cleanup(&flash->sd.entity);
+ v4l2_ctrl_handler_free(&flash->ctrl_handler);
+fail1:
+ v4l2_device_unregister_subdev(&flash->sd);
+ kfree(flash);
+
+ return err;
+}
+
+static int lm3554_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(sd);
+ int ret;
+
+ media_entity_cleanup(&flash->sd.entity);
+ v4l2_ctrl_handler_free(&flash->ctrl_handler);
+ v4l2_device_unregister_subdev(sd);
+
+ atomisp_gmin_remove_subdev(sd);
+
+ del_timer_sync(&flash->flash_off_delay);
+
+ ret = lm3554_gpio_uninit(client);
+ if (ret < 0)
+ goto fail;
+
+ kfree(flash);
+
+ return 0;
+fail:
+ dev_err(&client->dev, "gpio request/direction_output fail");
+ return ret;
+}
+
+static const struct dev_pm_ops lm3554_pm_ops = {
+ .suspend = lm3554_suspend,
+ .resume = lm3554_resume,
+};
+
+static const struct acpi_device_id lm3554_acpi_match[] = {
+ { "INTCF1C" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, lm3554_acpi_match);
+
+static struct i2c_driver lm3554_driver = {
+ .driver = {
+ .name = "lm3554",
+ .pm = &lm3554_pm_ops,
+ .acpi_match_table = lm3554_acpi_match,
+ },
+ .probe_new = lm3554_probe,
+ .remove = lm3554_remove,
+};
+module_i2c_driver(lm3554_driver);
+
+MODULE_AUTHOR("Jing Tao <jing.tao@intel.com>");
+MODULE_DESCRIPTION("LED flash driver for LM3554");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
new file mode 100644
index 000000000000..ac61b391e3f9
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -0,0 +1,1910 @@
+/*
+ * Support for mt9m114 Camera Sensor.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include <media/v4l2-device.h>
+
+#include "mt9m114.h"
+
+#define to_mt9m114_sensor(sd) container_of(sd, struct mt9m114_device, sd)
+
+/*
+ * TODO: use debug parameter to actually define when debug messages should
+ * be printed.
+ */
+static int debug;
+static int aaalock;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value);
+static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value);
+static int mt9m114_wait_state(struct i2c_client *client, int timeout);
+
+static int
+mt9m114_read_reg(struct i2c_client *client, u16 data_length, u32 reg, u32 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[4];
+
+ if (!client->adapter) {
+ v4l2_err(client, "%s error, no client->adapter\n", __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT
+ && data_length != MISENSOR_32BIT) {
+ v4l2_err(client, "%s error, invalid data length\n", __func__);
+ return -EINVAL;
+ }
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = MSG_LEN_OFFSET;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u16)(reg >> 8);
+ data[1] = (u16)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+
+ if (err >= 0) {
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == MISENSOR_8BIT)
+ *val = data[0];
+ else if (data_length == MISENSOR_16BIT)
+ *val = data[1] + (data[0] << 8);
+ else
+ *val = data[3] + (data[2] << 8) +
+ (data[1] << 16) + (data[0] << 24);
+
+ return 0;
+ }
+
+ dev_err(&client->dev, "read from offset 0x%x error %d", reg, err);
+ return err;
+}
+
+static int
+mt9m114_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u32 val)
+{
+ int num_msg;
+ struct i2c_msg msg;
+ unsigned char data[6] = {0};
+ __be16 *wreg;
+ int retry = 0;
+
+ if (!client->adapter) {
+ v4l2_err(client, "%s error, no client->adapter\n", __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT
+ && data_length != MISENSOR_32BIT) {
+ v4l2_err(client, "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+
+again:
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2 + data_length;
+ msg.buf = data;
+
+ /* high byte goes out first */
+ wreg = (void *)data;
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == MISENSOR_8BIT) {
+ data[2] = (u8)(val);
+ } else if (data_length == MISENSOR_16BIT) {
+ u16 *wdata = (void *)&data[2];
+
+ *wdata = be16_to_cpu(*(__be16 *)&data[2]);
+ } else {
+ /* MISENSOR_32BIT */
+ u32 *wdata = (void *)&data[2];
+
+ *wdata = be32_to_cpu(*(__be32 *)&data[2]);
+ }
+
+ num_msg = i2c_transfer(client->adapter, &msg, 1);
+
+ /*
+ * HACK: Need some delay here for Rev 2 sensors otherwise some
+ * registers do not seem to load correctly.
+ */
+ mdelay(1);
+
+ if (num_msg >= 0)
+ return 0;
+
+ dev_err(&client->dev, "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, num_msg);
+ if (retry <= I2C_RETRY_COUNT) {
+ dev_dbg(&client->dev, "retrying... %d", retry);
+ retry++;
+ msleep(20);
+ goto again;
+ }
+
+ return num_msg;
+}
+
+/**
+ * misensor_rmw_reg - Read/Modify/Write a value to a register in the sensor
+ * device
+ * @client: i2c driver client structure
+ * @data_length: 8/16/32-bits length
+ * @reg: register address
+ * @mask: masked out bits
+ * @set: bits set
+ *
+ * Read/modify/write a value to a register in the sensor device.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int
+misensor_rmw_reg(struct i2c_client *client, u16 data_length, u16 reg,
+ u32 mask, u32 set)
+{
+ int err;
+ u32 val;
+
+ /* Exit when no mask */
+ if (mask == 0)
+ return 0;
+
+ /* @mask must not exceed data length */
+ switch (data_length) {
+ case MISENSOR_8BIT:
+ if (mask & ~0xff)
+ return -EINVAL;
+ break;
+ case MISENSOR_16BIT:
+ if (mask & ~0xffff)
+ return -EINVAL;
+ break;
+ case MISENSOR_32BIT:
+ break;
+ default:
+ /* Wrong @data_length */
+ return -EINVAL;
+ }
+
+ err = mt9m114_read_reg(client, data_length, reg, &val);
+ if (err) {
+ v4l2_err(client, "misensor_rmw_reg error exit, read failed\n");
+ return -EINVAL;
+ }
+
+ val &= ~mask;
+
+ /*
+ * Perform the OR function if the @set exists.
+ * Shift @set value to target bit location. @set should set only
+ * bits included in @mask.
+ *
+ * REVISIT: This function expects @set to be non-shifted. Its shift
+ * value is then defined to be equal to mask's LSB position.
+ * How about to inform values in their right offset position and avoid
+ * this unneeded shift operation?
+ */
+ set <<= ffs(mask) - 1;
+ val |= set & mask;
+
+ err = mt9m114_write_reg(client, data_length, reg, val);
+ if (err) {
+ v4l2_err(client, "misensor_rmw_reg error exit, write failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __mt9m114_flush_reg_array(struct i2c_client *client,
+ struct mt9m114_write_ctrl *ctrl)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+ int retry = 0;
+ __be16 *data16 = (void *)&ctrl->buffer.addr;
+
+ if (ctrl->index == 0)
+ return 0;
+
+again:
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2 + ctrl->index;
+ *data16 = cpu_to_be16(ctrl->buffer.addr);
+ msg.buf = (u8 *)&ctrl->buffer;
+
+ ret = i2c_transfer(client->adapter, &msg, num_msg);
+ if (ret != num_msg) {
+ if (++retry <= I2C_RETRY_COUNT) {
+ dev_dbg(&client->dev, "retrying... %d\n", retry);
+ msleep(20);
+ goto again;
+ }
+ dev_err(&client->dev, "%s: i2c transfer error\n", __func__);
+ return -EIO;
+ }
+
+ ctrl->index = 0;
+
+ /*
+ * REVISIT: Previously we had a delay after writing data to sensor.
+ * But it was removed as our tests have shown it is not necessary
+ * anymore.
+ */
+
+ return 0;
+}
+
+static int __mt9m114_buf_reg_array(struct i2c_client *client,
+ struct mt9m114_write_ctrl *ctrl,
+ const struct misensor_reg *next)
+{
+ __be16 *data16;
+ __be32 *data32;
+ int err;
+
+ /* Insufficient buffer? Let's flush and get more free space. */
+ if (ctrl->index + next->length >= MT9M114_MAX_WRITE_BUF_SIZE) {
+ err = __mt9m114_flush_reg_array(client, ctrl);
+ if (err)
+ return err;
+ }
+
+ switch (next->length) {
+ case MISENSOR_8BIT:
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case MISENSOR_16BIT:
+ data16 = (__be16 *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ case MISENSOR_32BIT:
+ data32 = (__be32 *)&ctrl->buffer.data[ctrl->index];
+ *data32 = cpu_to_be32(next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += next->length;
+
+ return 0;
+}
+
+static int
+__mt9m114_write_reg_is_consecutive(struct i2c_client *client,
+ struct mt9m114_write_ctrl *ctrl,
+ const struct misensor_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+/*
+ * mt9m114_write_reg_array - Initializes a list of mt9m114 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ * @poll: completion polling requirement
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __mt9m114_flush_reg_array, __mt9m114_buf_reg_array() and
+ * __mt9m114_write_reg_is_consecutive() are internal functions to
+ * mt9m114_write_reg_array() and should be not used anywhere else.
+ *
+ */
+static int mt9m114_write_reg_array(struct i2c_client *client,
+ const struct misensor_reg *reglist,
+ int poll)
+{
+ const struct misensor_reg *next = reglist;
+ struct mt9m114_write_ctrl ctrl;
+ int err;
+
+ if (poll == PRE_POLLING) {
+ err = mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ ctrl.index = 0;
+ for (; next->length != MISENSOR_TOK_TERM; next++) {
+ switch (next->length & MISENSOR_TOK_MASK) {
+ case MISENSOR_TOK_DELAY:
+ err = __mt9m114_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ case MISENSOR_TOK_RMW:
+ err = __mt9m114_flush_reg_array(client, &ctrl);
+ err |= misensor_rmw_reg(client,
+ next->length &
+ ~MISENSOR_TOK_RMW,
+ next->reg, next->val,
+ next->val2);
+ if (err) {
+ dev_err(&client->dev, "%s read err. aborted\n",
+ __func__);
+ return -EINVAL;
+ }
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__mt9m114_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __mt9m114_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __mt9m114_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ v4l2_err(client, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ err = __mt9m114_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+
+ if (poll == POST_POLLING)
+ return mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT);
+
+ return 0;
+}
+
+static int mt9m114_wait_state(struct i2c_client *client, int timeout)
+{
+ int ret;
+ unsigned int val;
+
+ while (timeout-- > 0) {
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT, 0x0080, &val);
+ if (ret)
+ return ret;
+ if ((val & 0x2) == 0)
+ return 0;
+ msleep(20);
+ }
+
+ return -EINVAL;
+}
+
+static int mt9m114_set_suspend(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return mt9m114_write_reg_array(client,
+ mt9m114_standby_reg, POST_POLLING);
+}
+
+static int mt9m114_init_common(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return mt9m114_write_reg_array(client, mt9m114_common, PRE_POLLING);
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ if (flag) {
+ ret = dev->platform_data->v2p8_ctrl(sd, 1);
+ if (ret == 0) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ if (ret)
+ ret = dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ } else {
+ ret = dev->platform_data->v2p8_ctrl(sd, 0);
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Note: current modules wire only one GPIO signal (RESET#),
+ * but the schematic wires up two to the connector. BIOS
+ * versions have been unfortunately inconsistent with which
+ * ACPI index RESET# is on, so hit both */
+
+ if (flag) {
+ ret = dev->platform_data->gpio0_ctrl(sd, 0);
+ ret = dev->platform_data->gpio1_ctrl(sd, 0);
+ msleep(60);
+ ret |= dev->platform_data->gpio0_ctrl(sd, 1);
+ ret |= dev->platform_data->gpio1_ctrl(sd, 1);
+ } else {
+ ret = dev->platform_data->gpio0_ctrl(sd, 0);
+ ret = dev->platform_data->gpio1_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev, "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 1\n");
+ /*
+ * according to DS, 44ms is needed between power up and first i2c
+ * commend
+ */
+ msleep(50);
+
+ return 0;
+
+fail_clk:
+ dev->platform_data->flisclk_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev, "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 1\n");
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ /*according to DS, 20ms is needed after power down*/
+ msleep(20);
+
+ return ret;
+}
+
+static int mt9m114_s_power(struct v4l2_subdev *sd, int power)
+{
+ if (power == 0)
+ return power_down(sd);
+ else {
+ if (power_up(sd))
+ return -EINVAL;
+
+ return mt9m114_init_common(sd);
+ }
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 600
+static int distance(struct mt9m114_res_struct const *res, u32 w, u32 h)
+{
+ unsigned int w_ratio;
+ unsigned int h_ratio;
+ int match;
+
+ if (w == 0)
+ return -1;
+ w_ratio = (res->width << 13) / w;
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - 8192);
+
+ if ((w_ratio < 8192) || (h_ratio < 8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ const struct mt9m114_res_struct *tmp_res = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mt9m114_res); i++) {
+ tmp_res = &mt9m114_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int mt9m114_try_res(u32 *w, u32 *h)
+{
+ int idx = 0;
+
+ if ((*w > MT9M114_RES_960P_SIZE_H)
+ || (*h > MT9M114_RES_960P_SIZE_V)) {
+ *w = MT9M114_RES_960P_SIZE_H;
+ *h = MT9M114_RES_960P_SIZE_V;
+ } else {
+ idx = nearest_resolution_index(*w, *h);
+
+ /*
+ * nearest_resolution_index() doesn't return smaller
+ * resolutions. If it fails, it means the requested
+ * resolution is higher than wecan support. Fallback
+ * to highest possible resolution in this case.
+ */
+ if (idx == -1)
+ idx = ARRAY_SIZE(mt9m114_res) - 1;
+
+ *w = mt9m114_res[idx].width;
+ *h = mt9m114_res[idx].height;
+ }
+
+ return 0;
+}
+
+static struct mt9m114_res_struct *mt9m114_to_res(u32 w, u32 h)
+{
+ int index;
+
+ for (index = 0; index < N_RES; index++) {
+ if ((mt9m114_res[index].width == w) &&
+ (mt9m114_res[index].height == h))
+ break;
+ }
+
+ /* No mode found */
+ if (index >= N_RES)
+ return NULL;
+
+ return &mt9m114_res[index];
+}
+
+static int mt9m114_res2size(struct v4l2_subdev *sd, int *h_size, int *v_size)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ unsigned short hsize;
+ unsigned short vsize;
+
+ switch (dev->res) {
+ case MT9M114_RES_736P:
+ hsize = MT9M114_RES_736P_SIZE_H;
+ vsize = MT9M114_RES_736P_SIZE_V;
+ break;
+ case MT9M114_RES_864P:
+ hsize = MT9M114_RES_864P_SIZE_H;
+ vsize = MT9M114_RES_864P_SIZE_V;
+ break;
+ case MT9M114_RES_960P:
+ hsize = MT9M114_RES_960P_SIZE_H;
+ vsize = MT9M114_RES_960P_SIZE_V;
+ break;
+ default:
+ v4l2_err(sd, "%s: Resolution 0x%08x unknown\n", __func__,
+ dev->res);
+ return -EINVAL;
+ }
+
+ if (h_size)
+ *h_size = hsize;
+ if (v_size)
+ *v_size = vsize;
+
+ return 0;
+}
+
+static int mt9m114_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct mt9m114_res_struct *res)
+{
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ u32 reg_val;
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ ret = mt9m114_read_reg(client, MISENSOR_32BIT,
+ REG_PIXEL_CLK, &reg_val);
+ if (ret)
+ return ret;
+ buf->vt_pix_clk_freq_mhz = reg_val;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = MT9M114_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ MT9M114_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = MT9M114_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ MT9M114_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = MT9M114_FINE_INTG_TIME_MIN;
+
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_H_START, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_V_START, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_H_END, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_end = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_V_END, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_end = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_WIDTH, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_HEIGHT, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_TIMING_HTS, &reg_val);
+ if (ret)
+ return ret;
+ buf->line_length_pck = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_TIMING_VTS, &reg_val);
+ if (ret)
+ return ret;
+ buf->frame_length_lines = reg_val;
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static int mt9m114_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ int width, height;
+ int ret;
+
+ if (format->pad)
+ return -EINVAL;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ ret = mt9m114_res2size(sd, &width, &height);
+ if (ret)
+ return ret;
+ fmt->width = width;
+ fmt->height = height;
+
+ return 0;
+}
+
+static int mt9m114_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct mt9m114_res_struct *res_index;
+ u32 width = fmt->width;
+ u32 height = fmt->height;
+ struct camera_mipi_info *mt9m114_info = NULL;
+
+ int ret;
+
+ if (format->pad)
+ return -EINVAL;
+ dev->streamon = 0;
+ dev->first_exp = MT9M114_DEFAULT_FIRST_EXP;
+
+ mt9m114_info = v4l2_get_subdev_hostdata(sd);
+ if (!mt9m114_info)
+ return -EINVAL;
+
+ mt9m114_try_res(&width, &height);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ return 0;
+ }
+ res_index = mt9m114_to_res(width, height);
+
+ /* Sanity check */
+ if (unlikely(!res_index)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ switch (res_index->res) {
+ case MT9M114_RES_736P:
+ ret = mt9m114_write_reg_array(c, mt9m114_736P_init, NO_POLLING);
+ ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
+ break;
+ case MT9M114_RES_864P:
+ ret = mt9m114_write_reg_array(c, mt9m114_864P_init, NO_POLLING);
+ ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
+ break;
+ case MT9M114_RES_960P:
+ ret = mt9m114_write_reg_array(c, mt9m114_976P_init, NO_POLLING);
+ /* set sensor read_mode to Normal */
+ ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
+ break;
+ default:
+ v4l2_err(sd, "set resolution: %d failed!\n", res_index->res);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return -EINVAL;
+
+ ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg, POST_POLLING);
+ if (ret < 0)
+ return ret;
+
+ if (mt9m114_set_suspend(sd))
+ return -EINVAL;
+
+ if (dev->res != res_index->res) {
+ int index;
+
+ /* Switch to different size */
+ if (width <= 640) {
+ dev->nctx = 0x00; /* Set for context A */
+ } else {
+ /*
+ * Context B is used for resolutions larger than 640x480
+ * Using YUV for Context B.
+ */
+ dev->nctx = 0x01; /* set for context B */
+ }
+
+ /*
+ * Marked current sensor res as being "used"
+ *
+ * REVISIT: We don't need to use an "used" field on each mode
+ * list entry to know which mode is selected. If this
+ * information is really necessary, how about to use a single
+ * variable on sensor dev struct?
+ */
+ for (index = 0; index < N_RES; index++) {
+ if ((width == mt9m114_res[index].width) &&
+ (height == mt9m114_res[index].height)) {
+ mt9m114_res[index].used = true;
+ continue;
+ }
+ mt9m114_res[index].used = false;
+ }
+ }
+ ret = mt9m114_get_intg_factor(c, mt9m114_info,
+ &mt9m114_res[res_index->res]);
+ if (ret) {
+ dev_err(&c->dev, "failed to get integration_factor\n");
+ return -EINVAL;
+ }
+ /*
+ * mt9m114 - we don't poll for context switch
+ * because it does not happen with streaming disabled.
+ */
+ dev->res = res_index->res;
+
+ fmt->width = width;
+ fmt->height = height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ return 0;
+}
+
+/* TODO: Update to SOC functions, remove exposure and gain */
+static int mt9m114_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (MT9M114_FOCAL_LENGTH_NUM << 16) | MT9M114_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int mt9m114_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for mt9m114*/
+ *val = (MT9M114_F_NUMBER_DEFAULT_NUM << 16) | MT9M114_F_NUMBER_DEM;
+ return 0;
+}
+
+static int mt9m114_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (MT9M114_F_NUMBER_DEFAULT_NUM << 24) |
+ (MT9M114_F_NUMBER_DEM << 16) |
+ (MT9M114_F_NUMBER_DEFAULT_NUM << 8) | MT9M114_F_NUMBER_DEM;
+ return 0;
+}
+
+/* Horizontal flip the image. */
+static int mt9m114_g_hflip(struct v4l2_subdev *sd, s32 *val)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ int ret;
+ u32 data;
+
+ ret = mt9m114_read_reg(c, MISENSOR_16BIT,
+ (u32)MISENSOR_READ_MODE, &data);
+ if (ret)
+ return ret;
+ *val = !!(data & MISENSOR_HFLIP_MASK);
+
+ return 0;
+}
+
+static int mt9m114_g_vflip(struct v4l2_subdev *sd, s32 *val)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ int ret;
+ u32 data;
+
+ ret = mt9m114_read_reg(c, MISENSOR_16BIT,
+ (u32)MISENSOR_READ_MODE, &data);
+ if (ret)
+ return ret;
+ *val = !!(data & MISENSOR_VFLIP_MASK);
+
+ return 0;
+}
+
+static long mt9m114_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ int ret = 0;
+ unsigned int coarse_integration = 0;
+ unsigned int FLines = 0;
+ unsigned int FrameLengthLines = 0; /* ExposureTime.FrameLengthLines; */
+ unsigned int AnalogGain, DigitalGain;
+ u32 AnalogGainToWrite = 0;
+
+ dev_dbg(&client->dev, "%s(0x%X 0x%X 0x%X)\n", __func__,
+ exposure->integration_time[0], exposure->gain[0],
+ exposure->gain[1]);
+
+ coarse_integration = exposure->integration_time[0];
+ /* fine_integration = ExposureTime.FineIntegrationTime; */
+ /* FrameLengthLines = ExposureTime.FrameLengthLines; */
+ FLines = mt9m114_res[dev->res].lines_per_frame;
+ AnalogGain = exposure->gain[0];
+ DigitalGain = exposure->gain[1];
+ if (!dev->streamon) {
+ /*Save the first exposure values while stream is off*/
+ dev->first_exp = coarse_integration;
+ dev->first_gain = AnalogGain;
+ dev->first_diggain = DigitalGain;
+ }
+ /* DigitalGain = 0x400 * (((u16) DigitalGain) >> 8) +
+ ((unsigned int)(0x400 * (((u16) DigitalGain) & 0xFF)) >>8); */
+
+ /* set frame length */
+ if (FLines < coarse_integration + 6)
+ FLines = coarse_integration + 6;
+ if (FLines < FrameLengthLines)
+ FLines = FrameLengthLines;
+ ret = mt9m114_write_reg(client, MISENSOR_16BIT, 0x300A, FLines);
+ if (ret) {
+ v4l2_err(client, "%s: fail to set FLines\n", __func__);
+ return -EINVAL;
+ }
+
+ /* set coarse integration */
+ /* 3A provide real exposure time.
+ should not translate to any value here. */
+ ret = mt9m114_write_reg(client, MISENSOR_16BIT,
+ REG_EXPO_COARSE, (u16)(coarse_integration));
+ if (ret) {
+ v4l2_err(client, "%s: fail to set exposure time\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ // set analog/digital gain
+ switch(AnalogGain)
+ {
+ case 0:
+ AnalogGainToWrite = 0x0;
+ break;
+ case 1:
+ AnalogGainToWrite = 0x20;
+ break;
+ case 2:
+ AnalogGainToWrite = 0x60;
+ break;
+ case 4:
+ AnalogGainToWrite = 0xA0;
+ break;
+ case 8:
+ AnalogGainToWrite = 0xE0;
+ break;
+ default:
+ AnalogGainToWrite = 0x20;
+ break;
+ }
+ */
+ if (DigitalGain >= 16 || DigitalGain <= 1)
+ DigitalGain = 1;
+ /* AnalogGainToWrite =
+ (u16)((DigitalGain << 12) | AnalogGainToWrite); */
+ AnalogGainToWrite = (u16)((DigitalGain << 12) | (u16)AnalogGain);
+ ret = mt9m114_write_reg(client, MISENSOR_16BIT,
+ REG_GAIN, AnalogGainToWrite);
+ if (ret) {
+ v4l2_err(client, "%s: fail to set AnalogGainToWrite\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static long mt9m114_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return mt9m114_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ for filling in EXIF data, not for actual image processing. */
+static int mt9m114_g_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u32 coarse;
+ int ret;
+
+ /* the fine integration time is currently not calculated */
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_EXPO_COARSE, &coarse);
+ if (ret)
+ return ret;
+
+ *value = coarse;
+ return 0;
+}
+
+/*
+ * This function will return the sensor supported max exposure zone number.
+ * the sensor which supports max exposure zone number is 1.
+ */
+static int mt9m114_g_exposure_zone_num(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = 1;
+
+ return 0;
+}
+
+/*
+ * set exposure metering, average/center_weighted/spot/matrix.
+ */
+static int mt9m114_s_exposure_metering(struct v4l2_subdev *sd, s32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ switch (val) {
+ case V4L2_EXPOSURE_METERING_SPOT:
+ ret = mt9m114_write_reg_array(client, mt9m114_exp_average,
+ NO_POLLING);
+ if (ret) {
+ dev_err(&client->dev, "write exp_average reg err.\n");
+ return ret;
+ }
+ break;
+ case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
+ default:
+ ret = mt9m114_write_reg_array(client, mt9m114_exp_center,
+ NO_POLLING);
+ if (ret) {
+ dev_err(&client->dev, "write exp_default reg err");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is for touch exposure feature.
+ */
+static int mt9m114_s_exposure_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct misensor_reg exp_reg;
+ int width, height;
+ int grid_width, grid_height;
+ int grid_left, grid_top, grid_right, grid_bottom;
+ int win_left, win_top, win_right, win_bottom;
+ int i, j;
+ int ret;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
+ sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ grid_left = sel->r.left;
+ grid_top = sel->r.top;
+ grid_right = sel->r.left + sel->r.width - 1;
+ grid_bottom = sel->r.top + sel->r.height - 1;
+
+ ret = mt9m114_res2size(sd, &width, &height);
+ if (ret)
+ return ret;
+
+ grid_width = width / 5;
+ grid_height = height / 5;
+
+ if (grid_width && grid_height) {
+ win_left = grid_left / grid_width;
+ win_top = grid_top / grid_height;
+ win_right = grid_right / grid_width;
+ win_bottom = grid_bottom / grid_height;
+ } else {
+ dev_err(&client->dev, "Incorrect exp grid.\n");
+ return -EINVAL;
+ }
+
+ win_left = clamp_t(int, win_left, 0, 4);
+ win_top = clamp_t(int, win_top, 0, 4);
+ win_right = clamp_t(int, win_right, 0, 4);
+ win_bottom = clamp_t(int, win_bottom, 0, 4);
+
+ ret = mt9m114_write_reg_array(client, mt9m114_exp_average, NO_POLLING);
+ if (ret) {
+ dev_err(&client->dev, "write exp_average reg err.\n");
+ return ret;
+ }
+
+ for (i = win_top; i <= win_bottom; i++) {
+ for (j = win_left; j <= win_right; j++) {
+ exp_reg = mt9m114_exp_win[i][j];
+
+ ret = mt9m114_write_reg(client, exp_reg.length,
+ exp_reg.reg, exp_reg.val);
+ if (ret) {
+ dev_err(&client->dev, "write exp_reg err.\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mt9m114_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ *val = mt9m114_res[dev->res].bin_factor_x;
+
+ return 0;
+}
+
+static int mt9m114_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ *val = mt9m114_res[dev->res].bin_factor_y;
+
+ return 0;
+}
+
+static int mt9m114_s_ev(struct v4l2_subdev *sd, s32 val)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ s32 luma = 0x37;
+ int err;
+
+ /* EV value only support -2 to 2
+ * 0: 0x37, 1:0x47, 2:0x57, -1:0x27, -2:0x17
+ */
+ if (val < -2 || val > 2)
+ return -EINVAL;
+ luma += 0x10 * val;
+ dev_dbg(&c->dev, "%s val:%d luma:0x%x\n", __func__, val, luma);
+ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A);
+ if (err) {
+ dev_err(&c->dev, "%s logic addr access error\n", __func__);
+ return err;
+ }
+ err = mt9m114_write_reg(c, MISENSOR_8BIT, 0xC87A, (u32)luma);
+ if (err) {
+ dev_err(&c->dev, "%s write target_average_luma failed\n",
+ __func__);
+ return err;
+ }
+ udelay(10);
+
+ return 0;
+}
+
+static int mt9m114_g_ev(struct v4l2_subdev *sd, s32 *val)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ int err;
+ u32 luma;
+
+ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A);
+ if (err) {
+ dev_err(&c->dev, "%s logic addr access error\n", __func__);
+ return err;
+ }
+ err = mt9m114_read_reg(c, MISENSOR_8BIT, 0xC87A, &luma);
+ if (err) {
+ dev_err(&c->dev, "%s read target_average_luma failed\n",
+ __func__);
+ return err;
+ }
+ luma -= 0x17;
+ luma /= 0x10;
+ *val = (s32)luma - 2;
+ dev_dbg(&c->dev, "%s val:%d\n", __func__, *val);
+
+ return 0;
+}
+
+/* Fake interface
+ * mt9m114 now can not support 3a_lock
+*/
+static int mt9m114_s_3a_lock(struct v4l2_subdev *sd, s32 val)
+{
+ aaalock = val;
+ return 0;
+}
+
+static int mt9m114_g_3a_lock(struct v4l2_subdev *sd, s32 *val)
+{
+ if (aaalock)
+ return V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE
+ | V4L2_LOCK_FOCUS;
+ return 0;
+}
+
+static int mt9m114_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9m114_device *dev =
+ container_of(ctrl->handler, struct mt9m114_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = mt9m114_t_vflip(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = mt9m114_t_hflip(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_METERING:
+ ret = mt9m114_s_exposure_metering(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = mt9m114_s_ev(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_3A_LOCK:
+ ret = mt9m114_s_3a_lock(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int mt9m114_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9m114_device *dev =
+ container_of(ctrl->handler, struct mt9m114_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ ret = mt9m114_g_vflip(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ ret = mt9m114_g_hflip(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = mt9m114_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = mt9m114_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = mt9m114_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = mt9m114_g_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_ZONE_NUM:
+ ret = mt9m114_g_exposure_zone_num(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ ret = mt9m114_g_bin_factor_x(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_VERT:
+ ret = mt9m114_g_bin_factor_y(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = mt9m114_g_ev(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_3A_LOCK:
+ ret = mt9m114_g_3a_lock(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = mt9m114_s_ctrl,
+ .g_volatile_ctrl = mt9m114_g_volatile_ctrl
+};
+
+static struct v4l2_ctrl_config mt9m114_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VFLIP,
+ .name = "Image v-Flip",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HFLIP,
+ .name = "Image h-Flip",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .name = "focal length",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = MT9M114_FOCAL_LENGTH_DEFAULT,
+ .max = MT9M114_FOCAL_LENGTH_DEFAULT,
+ .step = 1,
+ .def = MT9M114_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .name = "f-number",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = MT9M114_F_NUMBER_DEFAULT,
+ .max = MT9M114_F_NUMBER_DEFAULT,
+ .step = 1,
+ .def = MT9M114_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .name = "f-number range",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = MT9M114_F_NUMBER_RANGE,
+ .max = MT9M114_F_NUMBER_RANGE,
+ .step = 1,
+ .def = MT9M114_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .name = "exposure",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xffff,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ZONE_NUM,
+ .name = "one-time exposure zone number",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xffff,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_METERING,
+ .name = "metering",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 3,
+ .step = 0,
+ .def = 1,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .name = "horizontal binning factor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = MT9M114_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .name = "vertical binning factor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = MT9M114_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE,
+ .name = "exposure biasx",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -2,
+ .max = 2,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_3A_LOCK,
+ .name = "3a lock",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE | V4L2_LOCK_FOCUS,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+};
+
+static int mt9m114_detect(struct mt9m114_device *dev, struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u32 retvalue;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "%s: i2c error", __func__);
+ return -ENODEV;
+ }
+ mt9m114_read_reg(client, MISENSOR_16BIT, (u32)MT9M114_PID, &retvalue);
+ dev->real_model_id = retvalue;
+
+ if (retvalue != MT9M114_MOD_ID) {
+ dev_err(&client->dev, "%s: failed: client->addr = %x\n",
+ __func__, client->addr);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+mt9m114_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ ret = power_up(sd);
+ if (ret) {
+ v4l2_err(client, "mt9m114 power-up err");
+ return ret;
+ }
+
+ /* config & detect sensor */
+ ret = mt9m114_detect(dev, client);
+ if (ret) {
+ v4l2_err(client, "mt9m114_detect err s_config.\n");
+ goto fail_detect;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ ret = mt9m114_set_suspend(sd);
+ if (ret) {
+ v4l2_err(client, "mt9m114 suspend err");
+ return ret;
+ }
+
+ ret = power_down(sd);
+ if (ret) {
+ v4l2_err(client, "mt9m114 power down err");
+ return ret;
+ }
+
+ return ret;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_detect:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+ return ret;
+}
+
+/* Horizontal flip the image. */
+static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ int err;
+ /* set for direct mode */
+ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850);
+ if (value) {
+ /* enable H flip ctx A */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x01);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x01);
+ /* ctx B */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x01);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x01);
+
+ err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_HFLIP_MASK, MISENSOR_FLIP_EN);
+
+ dev->bpat = MT9M114_BPAT_GRGRBGBG;
+ } else {
+ /* disable H flip ctx A */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x00);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x00);
+ /* ctx B */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x00);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x00);
+
+ err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_HFLIP_MASK, MISENSOR_FLIP_DIS);
+
+ dev->bpat = MT9M114_BPAT_BGBGGRGR;
+ }
+
+ err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06);
+ udelay(10);
+
+ return !!err;
+}
+
+/* Vertically flip the image */
+static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ int err;
+ /* set for direct mode */
+ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850);
+ if (value >= 1) {
+ /* enable H flip - ctx A */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x01);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x01);
+ /* ctx B */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x01);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x01);
+
+ err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_VFLIP_MASK, MISENSOR_FLIP_EN);
+ } else {
+ /* disable H flip - ctx A */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x00);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x00);
+ /* ctx B */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x00);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x00);
+
+ err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_VFLIP_MASK, MISENSOR_FLIP_DIS);
+ }
+
+ err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06);
+ udelay(10);
+
+ return !!err;
+}
+
+static int mt9m114_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = mt9m114_res[dev->res].fps;
+
+ return 0;
+}
+
+static int mt9m114_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int ret;
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct atomisp_exposure exposure;
+
+ if (enable) {
+ ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg,
+ POST_POLLING);
+ if (ret < 0)
+ return ret;
+
+ if (dev->first_exp > MT9M114_MAX_FIRST_EXP) {
+ exposure.integration_time[0] = dev->first_exp;
+ exposure.gain[0] = dev->first_gain;
+ exposure.gain[1] = dev->first_diggain;
+ mt9m114_s_exposure(sd, &exposure);
+ }
+ dev->streamon = 1;
+
+ } else {
+ dev->streamon = 0;
+ ret = mt9m114_set_suspend(sd);
+ }
+
+ return ret;
+}
+
+static int mt9m114_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index)
+ return -EINVAL;
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int mt9m114_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ unsigned int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = mt9m114_res[index].width;
+ fse->min_height = mt9m114_res[index].height;
+ fse->max_width = mt9m114_res[index].width;
+ fse->max_height = mt9m114_res[index].height;
+
+ return 0;
+}
+
+static int mt9m114_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ int index;
+ struct mt9m114_device *snr = to_mt9m114_sensor(sd);
+
+ if (!frames)
+ return -EINVAL;
+
+ for (index = 0; index < N_RES; index++) {
+ if (mt9m114_res[index].res == snr->res)
+ break;
+ }
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ *frames = mt9m114_res[index].skip_frames;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops mt9m114_video_ops = {
+ .s_stream = mt9m114_s_stream,
+ .g_frame_interval = mt9m114_g_frame_interval,
+};
+
+static const struct v4l2_subdev_sensor_ops mt9m114_sensor_ops = {
+ .g_skip_frames = mt9m114_g_skip_frames,
+};
+
+static const struct v4l2_subdev_core_ops mt9m114_core_ops = {
+ .s_power = mt9m114_s_power,
+ .ioctl = mt9m114_ioctl,
+};
+
+/* REVISIT: Do we need pad operations? */
+static const struct v4l2_subdev_pad_ops mt9m114_pad_ops = {
+ .enum_mbus_code = mt9m114_enum_mbus_code,
+ .enum_frame_size = mt9m114_enum_frame_size,
+ .get_fmt = mt9m114_get_fmt,
+ .set_fmt = mt9m114_set_fmt,
+ .set_selection = mt9m114_s_exposure_selection,
+};
+
+static const struct v4l2_subdev_ops mt9m114_ops = {
+ .core = &mt9m114_core_ops,
+ .video = &mt9m114_video_ops,
+ .pad = &mt9m114_pad_ops,
+ .sensor = &mt9m114_sensor_ops,
+};
+
+static int mt9m114_remove(struct i2c_client *client)
+{
+ struct mt9m114_device *dev;
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ dev = container_of(sd, struct mt9m114_device, sd);
+ dev->platform_data->csi_cfg(sd, 0);
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+ return 0;
+}
+
+static int mt9m114_probe(struct i2c_client *client)
+{
+ struct mt9m114_device *dev;
+ int ret = 0;
+ unsigned int i;
+ void *pdata;
+ acpi_handle handle;
+ struct acpi_device *adev;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+ pr_info("%s: ACPI detected it on bus ID=%s, HID=%s\n",
+ __func__, acpi_device_bid(adev), acpi_device_hid(adev));
+ // FIXME: may need to release resources allocated by acpi_bus_get_device()
+
+ /* Setup sensor configuration structure */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&dev->sd, client, &mt9m114_ops);
+ pdata = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_grbg);
+ if (pdata)
+ ret = mt9m114_s_config(&dev->sd, client->irq, pdata);
+ if (!pdata || ret) {
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+ }
+
+ ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
+ if (ret) {
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ /* Coverity CID 298095 - return on error */
+ return ret;
+ }
+
+ /*TODO add format code here*/
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(mt9m114_controls));
+ if (ret) {
+ mt9m114_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt9m114_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &mt9m114_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ mt9m114_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ /* REVISIT: Do we need media controller? */
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret) {
+ mt9m114_remove(client);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct acpi_device_id mt9m114_acpi_match[] = {
+ { "INT33F0" },
+ { "CRMT1040" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, mt9m114_acpi_match);
+
+static struct i2c_driver mt9m114_driver = {
+ .driver = {
+ .name = "mt9m114",
+ .acpi_match_table = mt9m114_acpi_match,
+ },
+ .probe_new = mt9m114_probe,
+ .remove = mt9m114_remove,
+};
+module_i2c_driver(mt9m114_driver);
+
+MODULE_AUTHOR("Shuguang Gong <Shuguang.gong@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
new file mode 100644
index 000000000000..1b60f6a9c0e0
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -0,0 +1,1340 @@
+/*
+ * Support for OmniVision OV2680 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+
+#include "ov2680.h"
+
+static int h_flag;
+static int v_flag;
+static enum atomisp_bayer_order ov2680_bayer_order_mapping[] = {
+ atomisp_bayer_order_bggr,
+ atomisp_bayer_order_grbg,
+ atomisp_bayer_order_gbrg,
+ atomisp_bayer_order_rggb,
+};
+
+/* i2c read/write stuff */
+static int ov2680_read_reg(struct i2c_client *client,
+ int len, u16 reg, u16 *val)
+{
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2] = { reg >> 8, reg & 0xff };
+ u8 data_buf[4] = { 0, };
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "read error: reg=0x%4x: %d\n", reg, ret);
+ return -EIO;
+ }
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int ov2680_write_reg(struct i2c_client *client, unsigned int len,
+ u16 reg, u16 val)
+{
+ u8 buf[6];
+ int ret;
+
+ if (len == 2)
+ put_unaligned_be16(val << (8 * (4 - len)), buf + 2);
+ else if (len == 1)
+ buf[2] = val;
+ else
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2) {
+ dev_err(&client->dev, "write error %d reg 0x%04x, val 0x%02x: buf sent: %*ph\n",
+ ret, reg, val, len + 2, &buf);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ov2680_write_reg_array(struct i2c_client *client,
+ const struct ov2680_reg *reglist)
+{
+ const struct ov2680_reg *next = reglist;
+ int ret;
+
+ for (; next->reg != 0; next++) {
+ ret = ov2680_write_reg(client, 1, next->reg, next->val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov2680_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV2680_FOCAL_LENGTH_NUM << 16) | OV2680_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int ov2680_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for ov2680*/
+
+ *val = (OV2680_F_NUMBER_DEFAULT_NUM << 16) | OV2680_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov2680_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV2680_F_NUMBER_DEFAULT_NUM << 24) |
+ (OV2680_F_NUMBER_DEM << 16) |
+ (OV2680_F_NUMBER_DEFAULT_NUM << 8) | OV2680_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov2680_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ dev_dbg(&client->dev, "++++ov2680_g_bin_factor_x\n");
+ *val = ov2680_res[dev->fmt_idx].bin_factor_x;
+
+ return 0;
+}
+
+static int ov2680_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ *val = ov2680_res[dev->fmt_idx].bin_factor_y;
+ dev_dbg(&client->dev, "++++ov2680_g_bin_factor_y\n");
+ return 0;
+}
+
+static int ov2680_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct ov2680_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ unsigned int pix_clk_freq_hz;
+ u16 reg_val;
+ int ret;
+
+ dev_dbg(&client->dev, "++++ov2680_get_intg_factor\n");
+ if (!info)
+ return -EINVAL;
+
+ /* pixel clock */
+ pix_clk_freq_hz = res->pix_clk_freq * 1000000;
+
+ dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+ buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = OV2680_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ OV2680_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = OV2680_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ OV2680_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = OV2680_FINE_INTG_TIME_MIN;
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = ov2680_read_reg(client, 2,
+ OV2680_HORIZONTAL_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = reg_val;
+
+ ret = ov2680_read_reg(client, 2,
+ OV2680_VERTICAL_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = reg_val;
+
+ ret = ov2680_read_reg(client, 2,
+ OV2680_HORIZONTAL_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_end = reg_val;
+
+ ret = ov2680_read_reg(client, 2,
+ OV2680_VERTICAL_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_end = reg_val;
+
+ ret = ov2680_read_reg(client, 2,
+ OV2680_HORIZONTAL_OUTPUT_SIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = reg_val;
+
+ ret = ov2680_read_reg(client, 2,
+ OV2680_VERTICAL_OUTPUT_SIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = reg_val;
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ (res->bin_factor_x * 2) : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ (res->bin_factor_y * 2) : 1;
+ return 0;
+}
+
+static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ u16 vts;
+ int ret, exp_val;
+
+ dev_dbg(&client->dev,
+ "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",
+ coarse_itg, gain, digitgain);
+
+ vts = ov2680_res[dev->fmt_idx].lines_per_frame;
+
+ /* group hold */
+ ret = ov2680_write_reg(client, 1,
+ OV2680_GROUP_ACCESS, 0x00);
+ if (ret) {
+ dev_err(&client->dev, "%s: write 0x%02x: error, aborted\n",
+ __func__, OV2680_GROUP_ACCESS);
+ return ret;
+ }
+
+ /* Increase the VTS to match exposure + MARGIN */
+ if (coarse_itg > vts - OV2680_INTEGRATION_TIME_MARGIN)
+ vts = (u16)coarse_itg + OV2680_INTEGRATION_TIME_MARGIN;
+
+ ret = ov2680_write_reg(client, 2, OV2680_TIMING_VTS_H, vts);
+ if (ret) {
+ dev_err(&client->dev, "%s: write 0x%02x: error, aborted\n",
+ __func__, OV2680_TIMING_VTS_H);
+ return ret;
+ }
+
+ /* set exposure */
+
+ /* Lower four bit should be 0*/
+ exp_val = coarse_itg << 4;
+ ret = ov2680_write_reg(client, 1,
+ OV2680_EXPOSURE_L, exp_val & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write 0x%02x: error, aborted\n",
+ __func__, OV2680_EXPOSURE_L);
+ return ret;
+ }
+
+ ret = ov2680_write_reg(client, 1,
+ OV2680_EXPOSURE_M, (exp_val >> 8) & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write 0x%02x: error, aborted\n",
+ __func__, OV2680_EXPOSURE_M);
+ return ret;
+ }
+
+ ret = ov2680_write_reg(client, 1,
+ OV2680_EXPOSURE_H, (exp_val >> 16) & 0x0F);
+ if (ret) {
+ dev_err(&client->dev, "%s: write 0x%02x: error, aborted\n",
+ __func__, OV2680_EXPOSURE_H);
+ return ret;
+ }
+
+ /* Analog gain */
+ ret = ov2680_write_reg(client, 2, OV2680_AGC_H, gain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write 0x%02x: error, aborted\n",
+ __func__, OV2680_AGC_H);
+ return ret;
+ }
+ /* Digital gain */
+ if (digitgain) {
+ ret = ov2680_write_reg(client, 2,
+ OV2680_MWB_RED_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: write 0x%02x: error, aborted\n",
+ __func__, OV2680_MWB_RED_GAIN_H);
+ return ret;
+ }
+
+ ret = ov2680_write_reg(client, 2,
+ OV2680_MWB_GREEN_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: write 0x%02x: error, aborted\n",
+ __func__, OV2680_MWB_RED_GAIN_H);
+ return ret;
+ }
+
+ ret = ov2680_write_reg(client, 2,
+ OV2680_MWB_BLUE_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: write 0x%02x: error, aborted\n",
+ __func__, OV2680_MWB_RED_GAIN_H);
+ return ret;
+ }
+ }
+
+ /* End group */
+ ret = ov2680_write_reg(client, 1,
+ OV2680_GROUP_ACCESS, 0x10);
+ if (ret)
+ return ret;
+
+ /* Delay launch group */
+ ret = ov2680_write_reg(client, 1,
+ OV2680_GROUP_ACCESS, 0xa0);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+static int ov2680_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov2680_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long ov2680_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ u16 coarse_itg = exposure->integration_time[0];
+ u16 analog_gain = exposure->gain[0];
+ u16 digital_gain = exposure->gain[1];
+
+ /* we should not accept the invalid value below */
+ if (analog_gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ // EXPOSURE CONTROL DISABLED FOR INITIAL CHECKIN, TUNING DOESN'T WORK
+ return ov2680_set_exposure(sd, coarse_itg, analog_gain, digital_gain);
+}
+
+static long ov2680_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return ov2680_s_exposure(sd, arg);
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int ov2680_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = ov2680_read_reg(client, 1,
+ OV2680_EXPOSURE_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = ov2680_read_reg(client, 1,
+ OV2680_EXPOSURE_M,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+ ret = ov2680_read_reg(client, 1,
+ OV2680_EXPOSURE_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ *value = reg_v + (((u32)reg_v2 << 16));
+err:
+ return ret;
+}
+
+static u32 ov2680_translate_bayer_order(enum atomisp_bayer_order code)
+{
+ switch (code) {
+ case atomisp_bayer_order_rggb:
+ return MEDIA_BUS_FMT_SRGGB10_1X10;
+ case atomisp_bayer_order_grbg:
+ return MEDIA_BUS_FMT_SGRBG10_1X10;
+ case atomisp_bayer_order_bggr:
+ return MEDIA_BUS_FMT_SBGGR10_1X10;
+ case atomisp_bayer_order_gbrg:
+ return MEDIA_BUS_FMT_SGBRG10_1X10;
+ }
+ return 0;
+}
+
+static int ov2680_v_flip(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct camera_mipi_info *ov2680_info = NULL;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u16 val;
+ u8 index;
+
+ dev_dbg(&client->dev, "@%s: value:%d\n", __func__, value);
+ ret = ov2680_read_reg(client, 1, OV2680_FLIP_REG, &val);
+ if (ret)
+ return ret;
+ if (value) {
+ val |= OV2680_FLIP_MIRROR_BIT_ENABLE;
+ } else {
+ val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE;
+ }
+ ret = ov2680_write_reg(client, 1,
+ OV2680_FLIP_REG, val);
+ if (ret)
+ return ret;
+ index = (v_flag > 0 ? OV2680_FLIP_BIT : 0) | (h_flag > 0 ? OV2680_MIRROR_BIT :
+ 0);
+ ov2680_info = v4l2_get_subdev_hostdata(sd);
+ if (ov2680_info) {
+ ov2680_info->raw_bayer_order = ov2680_bayer_order_mapping[index];
+ dev->format.code = ov2680_translate_bayer_order(
+ ov2680_info->raw_bayer_order);
+ }
+ return ret;
+}
+
+static int ov2680_h_flip(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct camera_mipi_info *ov2680_info = NULL;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u16 val;
+ u8 index;
+
+ dev_dbg(&client->dev, "@%s: value:%d\n", __func__, value);
+
+ ret = ov2680_read_reg(client, 1, OV2680_MIRROR_REG, &val);
+ if (ret)
+ return ret;
+ if (value) {
+ val |= OV2680_FLIP_MIRROR_BIT_ENABLE;
+ } else {
+ val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE;
+ }
+ ret = ov2680_write_reg(client, 1,
+ OV2680_MIRROR_REG, val);
+ if (ret)
+ return ret;
+ index = (v_flag > 0 ? OV2680_FLIP_BIT : 0) | (h_flag > 0 ? OV2680_MIRROR_BIT :
+ 0);
+ ov2680_info = v4l2_get_subdev_hostdata(sd);
+ if (ov2680_info) {
+ ov2680_info->raw_bayer_order = ov2680_bayer_order_mapping[index];
+ dev->format.code = ov2680_translate_bayer_order(
+ ov2680_info->raw_bayer_order);
+ }
+ return ret;
+}
+
+static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2680_device *dev =
+ container_of(ctrl->handler, struct ov2680_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = ov2680_v_flip(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = ov2680_h_flip(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2680_device *dev =
+ container_of(ctrl->handler, struct ov2680_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = ov2680_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = ov2680_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = ov2680_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = ov2680_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ ret = ov2680_g_bin_factor_x(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_VERT:
+ ret = ov2680_g_bin_factor_y(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = ov2680_s_ctrl,
+ .g_volatile_ctrl = ov2680_g_volatile_ctrl
+};
+
+static const struct v4l2_ctrl_config ov2680_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = OV2680_FOCAL_LENGTH_DEFAULT,
+ .max = OV2680_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = OV2680_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = OV2680_F_NUMBER_DEFAULT,
+ .max = OV2680_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = OV2680_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = OV2680_F_NUMBER_RANGE,
+ .max = OV2680_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = OV2680_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "horizontal binning factor",
+ .min = 0,
+ .max = OV2680_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vertical binning factor",
+ .min = 0,
+ .max = OV2680_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+};
+
+static int ov2680_init_registers(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = ov2680_write_reg(client, 1, OV2680_SW_RESET, 0x01);
+ ret |= ov2680_write_reg_array(client, ov2680_global_setting);
+
+ return ret;
+}
+
+static int ov2680_init(struct v4l2_subdev *sd)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ /* restore settings */
+ ov2680_res = ov2680_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ ret = ov2680_init_registers(sd);
+
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = 0;
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ dev_dbg(&client->dev, "%s: %s", __func__, flag? "on" : "off");
+
+ if (flag) {
+ ret |= dev->platform_data->v1p8_ctrl(sd, 1);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ }
+
+ if (!flag || ret) {
+ ret |= dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* The OV2680 documents only one GPIO input (#XSHUTDN), but
+ * existing integrations often wire two (reset/power_down)
+ * because that is the way other sensors work. There is no
+ * way to tell how it is wired internally, so existing
+ * firmwares expose both and we drive them symmetrically. */
+ if (flag) {
+ ret = dev->platform_data->gpio0_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ /* Ignore return from second gpio, it may not be there */
+ dev->platform_data->gpio1_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ } else {
+ dev->platform_data->gpio1_ctrl(sd, 0);
+ ret = dev->platform_data->gpio0_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ usleep_range(5000, 6000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+ }
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* according to DS, 20ms is needed between PWDN and i2c access */
+ msleep(20);
+
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ h_flag = 0;
+ v_flag = 0;
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int ov2680_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ if (on == 0) {
+ ret = power_down(sd);
+ } else {
+ ret = power_up(sd);
+ if (!ret)
+ return ov2680_init(sd);
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 600
+static int distance(struct ov2680_resolution *res, u32 w, u32 h)
+{
+ unsigned int w_ratio = (res->width << 13) / w;
+ unsigned int h_ratio;
+ int match;
+
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - 8192);
+
+ if ((w_ratio < 8192) || (h_ratio < 8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ struct ov2680_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &ov2680_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != ov2680_res[i].width)
+ continue;
+ if (h != ov2680_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+static int ov2680_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *ov2680_info = NULL;
+ int ret = 0;
+ int idx = 0;
+
+ dev_dbg(&client->dev, "%s: %s: pad: %d, fmt: %p\n",
+ __func__,
+ (format->which == V4L2_SUBDEV_FORMAT_TRY) ? "try" : "set",
+ format->pad, fmt);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ ov2680_info = v4l2_get_subdev_hostdata(sd);
+ if (!ov2680_info)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = ov2680_res[N_RES - 1].width;
+ fmt->height = ov2680_res[N_RES - 1].height;
+ } else {
+ fmt->width = ov2680_res[idx].width;
+ fmt->height = ov2680_res[idx].height;
+ }
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ dev_dbg(&client->dev, "%s: Resolution index: %d\n",
+ __func__, dev->fmt_idx);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+ dev_dbg(&client->dev, "%s: i=%d, w=%d, h=%d\n",
+ __func__, dev->fmt_idx, fmt->width, fmt->height);
+
+ // IS IT NEEDED?
+ power_up(sd);
+ ret = ov2680_write_reg_array(client, ov2680_res[dev->fmt_idx].regs);
+ if (ret)
+ dev_err(&client->dev,
+ "ov2680 write resolution register err: %d\n", ret);
+
+ ret = ov2680_get_intg_factor(client, ov2680_info,
+ &ov2680_res[dev->fmt_idx]);
+ if (ret) {
+ dev_err(&client->dev, "failed to get integration factor\n");
+ goto err;
+ }
+
+ /*recall flip functions to avoid flip registers
+ * were overridden by default setting
+ */
+ if (h_flag)
+ ov2680_h_flip(sd, h_flag);
+ if (v_flag)
+ ov2680_v_flip(sd, v_flag);
+
+ v4l2_info(client, "\n%s idx %d\n", __func__, dev->fmt_idx);
+
+ /*ret = startup(sd);
+ * if (ret)
+ * dev_err(&client->dev, "ov2680 startup err\n");
+ */
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2680_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = ov2680_res[dev->fmt_idx].width;
+ fmt->height = ov2680_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov2680_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high, low;
+ int ret;
+ u16 id;
+ u8 revision;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = ov2680_read_reg(client, 1,
+ OV2680_SC_CMMN_CHIP_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+ return -ENODEV;
+ }
+ ret = ov2680_read_reg(client, 1,
+ OV2680_SC_CMMN_CHIP_ID_L, &low);
+ id = ((((u16)high) << 8) | (u16)low);
+
+ if (id != OV2680_ID) {
+ dev_err(&client->dev, "sensor ID error 0x%x\n", id);
+ return -ENODEV;
+ }
+
+ ret = ov2680_read_reg(client, 1,
+ OV2680_SC_CMMN_SUB_ID, &high);
+ revision = (u8)high & 0x0f;
+
+ dev_info(&client->dev, "sensor_revision id = 0x%x, rev= %d\n",
+ id, revision);
+
+ return 0;
+}
+
+static int ov2680_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ if (enable)
+ dev_dbg(&client->dev, "ov2680_s_stream one\n");
+ else
+ dev_dbg(&client->dev, "ov2680_s_stream off\n");
+
+ ret = ov2680_write_reg(client, 1, OV2680_SW_STREAM,
+ enable ? OV2680_START_STREAMING :
+ OV2680_STOP_STREAMING);
+#if 0
+ /* restore settings */
+ ov2680_res = ov2680_res_preview;
+ N_RES = N_RES_PREVIEW;
+#endif
+
+ //otp valid at stream on state
+ //if(!dev->otp_data)
+ // dev->otp_data = ov2680_otp_read(sd);
+
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static int ov2680_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2680 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2680 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = ov2680_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "ov2680_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2680 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2680_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = ov2680_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = ov2680_res[index].width;
+ fse->min_height = ov2680_res[index].height;
+ fse->max_width = ov2680_res[index].width;
+ fse->max_height = ov2680_res[index].height;
+
+ return 0;
+}
+
+static int ov2680_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = ov2680_res[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov2680_video_ops = {
+ .s_stream = ov2680_s_stream,
+ .g_frame_interval = ov2680_g_frame_interval,
+};
+
+static const struct v4l2_subdev_sensor_ops ov2680_sensor_ops = {
+ .g_skip_frames = ov2680_g_skip_frames,
+};
+
+static const struct v4l2_subdev_core_ops ov2680_core_ops = {
+ .s_power = ov2680_s_power,
+ .ioctl = ov2680_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
+ .enum_mbus_code = ov2680_enum_mbus_code,
+ .enum_frame_size = ov2680_enum_frame_size,
+ .get_fmt = ov2680_get_fmt,
+ .set_fmt = ov2680_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ov2680_ops = {
+ .core = &ov2680_core_ops,
+ .video = &ov2680_video_ops,
+ .pad = &ov2680_pad_ops,
+ .sensor = &ov2680_sensor_ops,
+};
+
+static int ov2680_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ dev_dbg(&client->dev, "ov2680_remove...\n");
+
+ dev->platform_data->csi_cfg(sd, 0);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+
+ return 0;
+}
+
+static int ov2680_probe(struct i2c_client *client)
+{
+ struct ov2680_device *dev;
+ int ret;
+ void *pdata;
+ unsigned int i;
+ acpi_handle handle;
+ struct acpi_device *adev;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+ dev_info(&client->dev, "%s: ACPI detected it on bus ID=%s, HID=%s\n",
+ __func__, acpi_device_bid(adev), acpi_device_hid(adev));
+ // FIXME: may need to release resources allocated by acpi_bus_get_device()
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&dev->sd, client, &ov2680_ops);
+
+ pdata = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_bggr);
+ if (!pdata) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ret = ov2680_s_config(&dev->sd, client->irq, pdata);
+ if (ret)
+ goto out_free;
+
+ ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
+ if (ret)
+ goto out_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(ov2680_controls));
+ if (ret) {
+ ov2680_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ov2680_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov2680_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ ov2680_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret) {
+ ov2680_remove(client);
+ dev_dbg(&client->dev, "+++ remove ov2680\n");
+ }
+ return ret;
+out_free:
+ dev_dbg(&client->dev, "+++ out free\n");
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+static const struct acpi_device_id ov2680_acpi_match[] = {
+ {"XXOV2680"},
+ {"OVTI2680"},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ov2680_acpi_match);
+
+static struct i2c_driver ov2680_driver = {
+ .driver = {
+ .name = "ov2680",
+ .acpi_match_table = ov2680_acpi_match,
+ },
+ .probe_new = ov2680_probe,
+ .remove = ov2680_remove,
+};
+module_i2c_driver(ov2680_driver);
+
+MODULE_AUTHOR("Jacky Wang <Jacky_wang@ovt.com>");
+MODULE_DESCRIPTION("A low-level driver for OmniVision 2680 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
new file mode 100644
index 000000000000..718d10f89d5a
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -0,0 +1,1288 @@
+/*
+ * Support for OmniVision OV2722 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include <linux/acpi.h>
+#include <linux/io.h>
+
+#include "ov2722.h"
+
+/* i2c read/write stuff */
+static int ov2722_read_reg(struct i2c_client *client,
+ u16 data_length, u16 reg, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[6];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != OV2722_8BIT && data_length != OV2722_16BIT
+ && data_length != OV2722_32BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0, sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg >> 8);
+ data[1] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == OV2722_8BIT)
+ *val = (u8)data[0];
+ else if (data_length == OV2722_16BIT)
+ *val = be16_to_cpu(*(__be16 *)&data[0]);
+ else
+ *val = be32_to_cpu(*(__be32 *)&data[0]);
+
+ return 0;
+}
+
+static int ov2722_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int ov2722_write_reg(struct i2c_client *client, u16 data_length,
+ u16 reg, u16 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ __be16 *wreg = (__be16 *)data;
+ const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
+
+ if (data_length != OV2722_8BIT && data_length != OV2722_16BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == OV2722_8BIT) {
+ data[2] = (u8)(val);
+ } else {
+ /* OV2722_16BIT */
+ __be16 *wdata = (__be16 *)&data[2];
+
+ *wdata = cpu_to_be16(val);
+ }
+
+ ret = ov2722_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * ov2722_write_reg_array - Initializes a list of OV2722 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __ov2722_flush_reg_array, __ov2722_buf_reg_array() and
+ * __ov2722_write_reg_is_consecutive() are internal functions to
+ * ov2722_write_reg_array_fast() and should be not used anywhere else.
+ *
+ */
+
+static int __ov2722_flush_reg_array(struct i2c_client *client,
+ struct ov2722_write_ctrl *ctrl)
+{
+ u16 size;
+ __be16 *data16 = (void *)&ctrl->buffer.addr;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
+ *data16 = cpu_to_be16(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return ov2722_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __ov2722_buf_reg_array(struct i2c_client *client,
+ struct ov2722_write_ctrl *ctrl,
+ const struct ov2722_reg *next)
+{
+ int size;
+ __be16 *data16;
+
+ switch (next->type) {
+ case OV2722_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case OV2722_16BIT:
+ size = 2;
+ data16 = (void *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u16) >= OV2722_MAX_WRITE_BUF_SIZE)
+ return __ov2722_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __ov2722_write_reg_is_consecutive(struct i2c_client *client,
+ struct ov2722_write_ctrl *ctrl,
+ const struct ov2722_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int ov2722_write_reg_array(struct i2c_client *client,
+ const struct ov2722_reg *reglist)
+{
+ const struct ov2722_reg *next = reglist;
+ struct ov2722_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != OV2722_TOK_TERM; next++) {
+ switch (next->type & OV2722_TOK_MASK) {
+ case OV2722_TOK_DELAY:
+ err = __ov2722_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__ov2722_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __ov2722_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __ov2722_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __ov2722_flush_reg_array(client, &ctrl);
+}
+
+static int ov2722_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV2722_FOCAL_LENGTH_NUM << 16) | OV2722_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int ov2722_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for imx*/
+ *val = (OV2722_F_NUMBER_DEFAULT_NUM << 16) | OV2722_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov2722_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV2722_F_NUMBER_DEFAULT_NUM << 24) |
+ (OV2722_F_NUMBER_DEM << 16) |
+ (OV2722_F_NUMBER_DEFAULT_NUM << 8) | OV2722_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov2722_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct ov2722_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2722_device *dev = NULL;
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ const unsigned int ext_clk_freq_hz = 19200000;
+ const unsigned int pll_invariant_div = 10;
+ unsigned int pix_clk_freq_hz;
+ u16 pre_pll_clk_div;
+ u16 pll_multiplier;
+ u16 op_pix_clk_div;
+ u16 reg_val;
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ dev = to_ov2722_sensor(sd);
+
+ /* pixel clock calculattion */
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_PLL_CTRL3, &pre_pll_clk_div);
+ if (ret)
+ return ret;
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_PLL_MULTIPLIER, &pll_multiplier);
+ if (ret)
+ return ret;
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_PLL_DEBUG_OPT, &op_pix_clk_div);
+ if (ret)
+ return ret;
+
+ pre_pll_clk_div = (pre_pll_clk_div & 0x70) >> 4;
+ if (!pre_pll_clk_div)
+ return -EINVAL;
+
+ pll_multiplier = pll_multiplier & 0x7f;
+ op_pix_clk_div = op_pix_clk_div & 0x03;
+ pix_clk_freq_hz = ext_clk_freq_hz / pre_pll_clk_div * pll_multiplier
+ * op_pix_clk_div / pll_invariant_div;
+
+ dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+ buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = OV2722_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ OV2722_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = OV2722_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ OV2722_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = OV2722_FINE_INTG_TIME_MIN;
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_H_CROP_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_V_CROP_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_H_CROP_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_end = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_V_CROP_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_end = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_H_OUTSIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_V_OUTSIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = reg_val;
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static long __ov2722_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ u16 hts, vts;
+ int ret;
+
+ dev_dbg(&client->dev, "set_exposure without group hold\n");
+
+ /* clear VTS_DIFF on manual mode */
+ ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_VTS_DIFF_H, 0);
+ if (ret)
+ return ret;
+
+ hts = dev->pixels_per_line;
+ vts = dev->lines_per_frame;
+
+ if ((coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN) > vts)
+ vts = coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN;
+
+ coarse_itg <<= 4;
+ digitgain <<= 2;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_VTS_H, vts);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_HTS_H, hts);
+ if (ret)
+ return ret;
+
+ /* set exposure */
+ ret = ov2722_write_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_L,
+ coarse_itg & 0xff);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_AEC_PK_EXPO_H,
+ (coarse_itg >> 8) & 0xfff);
+ if (ret)
+ return ret;
+
+ /* set analog gain */
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_AGC_ADJ_H, gain);
+ if (ret)
+ return ret;
+
+ /* set digital gain */
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_MWB_GAIN_R_H, digitgain);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_MWB_GAIN_G_H, digitgain);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_MWB_GAIN_B_H, digitgain);
+
+ return ret;
+}
+
+static int ov2722_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov2722_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long ov2722_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ int exp = exposure->integration_time[0];
+ int gain = exposure->gain[0];
+ int digitgain = exposure->gain[1];
+
+ /* we should not accept the invalid value below. */
+ if (gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return ov2722_set_exposure(sd, exp, gain, digitgain);
+}
+
+static long ov2722_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return ov2722_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int ov2722_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_M,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ *value = reg_v + (((u32)reg_v2 << 16));
+err:
+ return ret;
+}
+
+static int ov2722_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2722_device *dev =
+ container_of(ctrl->handler, struct ov2722_device, ctrl_handler);
+ int ret = 0;
+ unsigned int val;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = ov2722_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = ov2722_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = ov2722_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = ov2722_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_LINK_FREQ:
+ val = ov2722_res[dev->fmt_idx].mipi_freq;
+ if (val == 0)
+ return -EINVAL;
+
+ ctrl->val = val * 1000; /* To Hz */
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .g_volatile_ctrl = ov2722_g_volatile_ctrl
+};
+
+static const struct v4l2_ctrl_config ov2722_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = OV2722_FOCAL_LENGTH_DEFAULT,
+ .max = OV2722_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = OV2722_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = OV2722_F_NUMBER_DEFAULT,
+ .max = OV2722_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = OV2722_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = OV2722_F_NUMBER_RANGE,
+ .max = OV2722_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = OV2722_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_LINK_FREQ,
+ .name = "Link Frequency",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 1500000 * 1000,
+ .step = 1,
+ .def = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ },
+};
+
+static int ov2722_init(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+
+ /* restore settings */
+ ov2722_res = ov2722_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = -1;
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ if (flag) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ if (ret == 0) {
+ ret = dev->platform_data->v2p8_ctrl(sd, 1);
+ if (ret)
+ dev->platform_data->v1p8_ctrl(sd, 0);
+ }
+ } else {
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ int ret = -1;
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Note: the GPIO order is asymmetric: always RESET#
+ * before PWDN# when turning it on or off.
+ */
+ ret = dev->platform_data->gpio0_ctrl(sd, flag);
+ /*
+ *ov2722 PWDN# active high when pull down,opposite to the convention
+ */
+ ret |= dev->platform_data->gpio1_ctrl(sd, !flag);
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ usleep_range(5000, 6000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ goto fail_power;
+ }
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* according to DS, 20ms is needed between PWDN and i2c access */
+ msleep(20);
+
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int ov2722_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ if (on == 0)
+ return power_down(sd);
+ else {
+ ret = power_up(sd);
+ if (!ret)
+ return ov2722_init(sd);
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 800
+static int distance(struct ov2722_resolution *res, u32 w, u32 h)
+{
+ unsigned int w_ratio = (res->width << 13) / w;
+ unsigned int h_ratio;
+ int match;
+
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - 8192);
+
+ if ((w_ratio < 8192) || (h_ratio < 8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ struct ov2722_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &ov2722_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != ov2722_res[i].width)
+ continue;
+ if (h != ov2722_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+/* TODO: remove it. */
+static int startup(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ ret = ov2722_write_reg(client, OV2722_8BIT,
+ OV2722_SW_RESET, 0x01);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 reset err.\n");
+ return ret;
+ }
+
+ ret = ov2722_write_reg_array(client, ov2722_res[dev->fmt_idx].regs);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 write register err.\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ov2722_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *ov2722_info = NULL;
+ int ret = 0;
+ int idx;
+
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+ ov2722_info = v4l2_get_subdev_hostdata(sd);
+ if (!ov2722_info)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = ov2722_res[N_RES - 1].width;
+ fmt->height = ov2722_res[N_RES - 1].height;
+ } else {
+ fmt->width = ov2722_res[idx].width;
+ fmt->height = ov2722_res[idx].height;
+ }
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ dev->pixels_per_line = ov2722_res[dev->fmt_idx].pixels_per_line;
+ dev->lines_per_frame = ov2722_res[dev->fmt_idx].lines_per_frame;
+
+ ret = startup(sd);
+ if (ret) {
+ int i = 0;
+
+ dev_err(&client->dev, "ov2722 startup err, retry to power up\n");
+ for (i = 0; i < OV2722_POWER_UP_RETRY_NUM; i++) {
+ dev_err(&client->dev,
+ "ov2722 retry to power up %d/%d times, result: ",
+ i + 1, OV2722_POWER_UP_RETRY_NUM);
+ power_down(sd);
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "power up failed, continue\n");
+ continue;
+ }
+ ret = startup(sd);
+ if (ret) {
+ dev_err(&client->dev, " startup FAILED!\n");
+ } else {
+ dev_err(&client->dev, " startup SUCCESS!\n");
+ break;
+ }
+ }
+ if (ret) {
+ dev_err(&client->dev, "ov2722 startup err\n");
+ goto err;
+ }
+ }
+
+ ret = ov2722_get_intg_factor(client, ov2722_info,
+ &ov2722_res[dev->fmt_idx]);
+ if (ret)
+ dev_err(&client->dev, "failed to get integration_factor\n");
+
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2722_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = ov2722_res[dev->fmt_idx].width;
+ fmt->height = ov2722_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov2722_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high, low;
+ int ret;
+ u16 id;
+ u8 revision;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_CHIP_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+ return -ENODEV;
+ }
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_CHIP_ID_L, &low);
+ id = (high << 8) | low;
+
+ if ((id != OV2722_ID) && (id != OV2720_ID)) {
+ dev_err(&client->dev, "sensor ID error\n");
+ return -ENODEV;
+ }
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_SUB_ID, &high);
+ revision = (u8)high & 0x0f;
+
+ dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision);
+ dev_dbg(&client->dev, "detect ov2722 success\n");
+ return 0;
+}
+
+static int ov2722_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ ret = ov2722_write_reg(client, OV2722_8BIT, OV2722_SW_STREAM,
+ enable ? OV2722_START_STREAMING :
+ OV2722_STOP_STREAMING);
+
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2722_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = ov2722_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "ov2722_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2722_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = ov2722_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int ov2722_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int ov2722_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = ov2722_res[index].width;
+ fse->min_height = ov2722_res[index].height;
+ fse->max_width = ov2722_res[index].width;
+ fse->max_height = ov2722_res[index].height;
+
+ return 0;
+}
+
+static int ov2722_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = ov2722_res[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops ov2722_sensor_ops = {
+ .g_skip_frames = ov2722_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops ov2722_video_ops = {
+ .s_stream = ov2722_s_stream,
+ .g_frame_interval = ov2722_g_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops ov2722_core_ops = {
+ .s_power = ov2722_s_power,
+ .ioctl = ov2722_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops ov2722_pad_ops = {
+ .enum_mbus_code = ov2722_enum_mbus_code,
+ .enum_frame_size = ov2722_enum_frame_size,
+ .get_fmt = ov2722_get_fmt,
+ .set_fmt = ov2722_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ov2722_ops = {
+ .core = &ov2722_core_ops,
+ .video = &ov2722_video_ops,
+ .pad = &ov2722_pad_ops,
+ .sensor = &ov2722_sensor_ops,
+};
+
+static int ov2722_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ dev_dbg(&client->dev, "ov2722_remove...\n");
+
+ dev->platform_data->csi_cfg(sd, 0);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_device_unregister_subdev(sd);
+
+ atomisp_gmin_remove_subdev(sd);
+
+ media_entity_cleanup(&dev->sd.entity);
+ kfree(dev);
+
+ return 0;
+}
+
+static int __ov2722_init_ctrl_handler(struct ov2722_device *dev)
+{
+ struct v4l2_ctrl_handler *hdl;
+ unsigned int i;
+
+ hdl = &dev->ctrl_handler;
+ v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ov2722_controls));
+ for (i = 0; i < ARRAY_SIZE(ov2722_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov2722_controls[i],
+ NULL);
+
+ dev->link_freq = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_LINK_FREQ);
+
+ if (dev->ctrl_handler.error || !dev->link_freq)
+ return dev->ctrl_handler.error;
+
+ dev->sd.ctrl_handler = hdl;
+
+ return 0;
+}
+
+static int ov2722_probe(struct i2c_client *client)
+{
+ struct ov2722_device *dev;
+ void *ovpdev;
+ int ret;
+ acpi_handle handle;
+ struct acpi_device *adev;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+ pr_info("%s: ACPI detected it on bus ID=%s, HID=%s\n",
+ __func__, acpi_device_bid(adev), acpi_device_hid(adev));
+ // FIXME: may need to release resources allocated by acpi_bus_get_device()
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&dev->sd, client, &ov2722_ops);
+
+ ovpdev = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_grbg);
+
+ ret = ov2722_s_config(&dev->sd, client->irq, ovpdev);
+ if (ret)
+ goto out_free;
+
+ ret = __ov2722_init_ctrl_handler(dev);
+ if (ret)
+ goto out_ctrl_handler_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ ov2722_remove(client);
+
+ return atomisp_register_i2c_module(&dev->sd, ovpdev, RAW_CAMERA);
+
+out_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+static const struct acpi_device_id ov2722_acpi_match[] = {
+ { "INT33FB" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ov2722_acpi_match);
+
+static struct i2c_driver ov2722_driver = {
+ .driver = {
+ .name = "ov2722",
+ .acpi_match_table = ov2722_acpi_match,
+ },
+ .probe_new = ov2722_probe,
+ .remove = ov2722_remove,
+};
+module_i2c_driver(ov2722_driver);
+
+MODULE_AUTHOR("Wei Liu <wei.liu@intel.com>");
+MODULE_DESCRIPTION("A low-level driver for OmniVision 2722 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h
new file mode 100644
index 000000000000..12f746e7828b
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/gc0310.h
@@ -0,0 +1,404 @@
+/*
+ * Support for GalaxyCore GC0310 VGA camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __GC0310_H__
+#define __GC0310_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 1
+#define I2C_RETRY_COUNT 5
+
+#define GC0310_FOCAL_LENGTH_NUM 278 /*2.78mm*/
+#define GC0310_FOCAL_LENGTH_DEM 100
+#define GC0310_F_NUMBER_DEFAULT_NUM 26
+#define GC0310_F_NUMBER_DEM 10
+
+#define MAX_FMTS 1
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC0310_FOCAL_LENGTH_DEFAULT 0x1160064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC0310_F_NUMBER_DEFAULT 0x1a000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define GC0310_F_NUMBER_RANGE 0x1a0a1a0a
+#define GC0310_ID 0xa310
+
+#define GC0310_RESET_RELATED 0xFE
+#define GC0310_REGISTER_PAGE_0 0x0
+#define GC0310_REGISTER_PAGE_3 0x3
+
+#define GC0310_FINE_INTG_TIME_MIN 0
+#define GC0310_FINE_INTG_TIME_MAX_MARGIN 0
+#define GC0310_COARSE_INTG_TIME_MIN 1
+#define GC0310_COARSE_INTG_TIME_MAX_MARGIN 6
+
+/*
+ * GC0310 System control registers
+ */
+#define GC0310_SW_STREAM 0x10
+
+#define GC0310_SC_CMMN_CHIP_ID_H 0xf0
+#define GC0310_SC_CMMN_CHIP_ID_L 0xf1
+
+#define GC0310_AEC_PK_EXPO_H 0x03
+#define GC0310_AEC_PK_EXPO_L 0x04
+#define GC0310_AGC_ADJ 0x48
+#define GC0310_DGC_ADJ 0x71
+#if 0
+#define GC0310_GROUP_ACCESS 0x3208
+#endif
+
+#define GC0310_H_CROP_START_H 0x09
+#define GC0310_H_CROP_START_L 0x0A
+#define GC0310_V_CROP_START_H 0x0B
+#define GC0310_V_CROP_START_L 0x0C
+#define GC0310_H_OUTSIZE_H 0x0F
+#define GC0310_H_OUTSIZE_L 0x10
+#define GC0310_V_OUTSIZE_H 0x0D
+#define GC0310_V_OUTSIZE_L 0x0E
+#define GC0310_H_BLANKING_H 0x05
+#define GC0310_H_BLANKING_L 0x06
+#define GC0310_V_BLANKING_H 0x07
+#define GC0310_V_BLANKING_L 0x08
+#define GC0310_SH_DELAY 0x11
+
+#define GC0310_START_STREAMING 0x94 /* 8-bit enable */
+#define GC0310_STOP_STREAMING 0x0 /* 8-bit disable */
+
+#define GC0310_BIN_FACTOR_MAX 3
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct gc0310_resolution {
+ u8 *desc;
+ const struct gc0310_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+};
+
+struct gc0310_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct gc0310_reg *regs;
+};
+
+/*
+ * gc0310 device structure.
+ */
+struct gc0310_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct camera_sensor_platform_data *platform_data;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ u8 res;
+ u8 type;
+};
+
+enum gc0310_tok_type {
+ GC0310_8BIT = 0x0001,
+ GC0310_TOK_TERM = 0xf000, /* terminating token for reg list */
+ GC0310_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ GC0310_TOK_MASK = 0xfff0
+};
+
+/**
+ * struct gc0310_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct gc0310_reg {
+ enum gc0310_tok_type type;
+ u8 reg;
+ u8 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_gc0310_sensor(x) container_of(x, struct gc0310_device, sd)
+
+#define GC0310_MAX_WRITE_BUF_SIZE 30
+
+struct gc0310_write_buffer {
+ u8 addr;
+ u8 data[GC0310_MAX_WRITE_BUF_SIZE];
+};
+
+struct gc0310_write_ctrl {
+ int index;
+ struct gc0310_write_buffer buffer;
+};
+
+/*
+ * Register settings for various resolution
+ */
+static const struct gc0310_reg gc0310_reset_register[] = {
+/////////////////////////////////////////////////
+///////////////// system reg /////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0xfe, 0xf0},
+ {GC0310_8BIT, 0xfe, 0xf0},
+ {GC0310_8BIT, 0xfe, 0x00},
+
+ {GC0310_8BIT, 0xfc, 0x0e}, //4e
+ {GC0310_8BIT, 0xfc, 0x0e}, //16//4e // [0]apwd [6]regf_clk_gate
+ {GC0310_8BIT, 0xf2, 0x80}, //sync output
+ {GC0310_8BIT, 0xf3, 0x00}, //1f//01 data output
+ {GC0310_8BIT, 0xf7, 0x33}, //f9
+ {GC0310_8BIT, 0xf8, 0x05}, //00
+ {GC0310_8BIT, 0xf9, 0x0e}, // 0x8e //0f
+ {GC0310_8BIT, 0xfa, 0x11},
+
+/////////////////////////////////////////////////
+/////////////////// MIPI ////////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0xfe, 0x03},
+ {GC0310_8BIT, 0x01, 0x03}, ///mipi 1lane
+ {GC0310_8BIT, 0x02, 0x22}, // 0x33
+ {GC0310_8BIT, 0x03, 0x94},
+ {GC0310_8BIT, 0x04, 0x01}, // fifo_prog
+ {GC0310_8BIT, 0x05, 0x00}, //fifo_prog
+ {GC0310_8BIT, 0x06, 0x80}, //b0 //YUV ISP data
+ {GC0310_8BIT, 0x11, 0x2a},//1e //LDI set YUV422
+ {GC0310_8BIT, 0x12, 0x90},//00 //04 //00 //04//00 //LWC[7:0] //
+ {GC0310_8BIT, 0x13, 0x02},//05 //05 //LWC[15:8]
+ {GC0310_8BIT, 0x15, 0x12}, // 0x10 //DPHYY_MODE read_ready
+ {GC0310_8BIT, 0x17, 0x01},
+ {GC0310_8BIT, 0x40, 0x08},
+ {GC0310_8BIT, 0x41, 0x00},
+ {GC0310_8BIT, 0x42, 0x00},
+ {GC0310_8BIT, 0x43, 0x00},
+ {GC0310_8BIT, 0x21, 0x02}, // 0x01
+ {GC0310_8BIT, 0x22, 0x02}, // 0x01
+ {GC0310_8BIT, 0x23, 0x01}, // 0x05 //Nor:0x05 DOU:0x06
+ {GC0310_8BIT, 0x29, 0x00},
+ {GC0310_8BIT, 0x2A, 0x25}, // 0x05 //data zero 0x7a de
+ {GC0310_8BIT, 0x2B, 0x02},
+
+ {GC0310_8BIT, 0xfe, 0x00},
+
+/////////////////////////////////////////////////
+///////////////// CISCTL reg /////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0x00, 0x2f}, //2f//0f//02//01
+ {GC0310_8BIT, 0x01, 0x0f}, //06
+ {GC0310_8BIT, 0x02, 0x04},
+ {GC0310_8BIT, 0x4f, 0x00}, //AEC 0FF
+ {GC0310_8BIT, 0x03, 0x01}, // 0x03 //04
+ {GC0310_8BIT, 0x04, 0xc0}, // 0xe8 //58
+ {GC0310_8BIT, 0x05, 0x00},
+ {GC0310_8BIT, 0x06, 0xb2}, // 0x0a //HB
+ {GC0310_8BIT, 0x07, 0x00},
+ {GC0310_8BIT, 0x08, 0x0c}, // 0x89 //VB
+ {GC0310_8BIT, 0x09, 0x00}, //row start
+ {GC0310_8BIT, 0x0a, 0x00}, //
+ {GC0310_8BIT, 0x0b, 0x00}, //col start
+ {GC0310_8BIT, 0x0c, 0x00},
+ {GC0310_8BIT, 0x0d, 0x01}, //height
+ {GC0310_8BIT, 0x0e, 0xf2}, // 0xf7 //height
+ {GC0310_8BIT, 0x0f, 0x02}, //width
+ {GC0310_8BIT, 0x10, 0x94}, // 0xa0 //height
+ {GC0310_8BIT, 0x17, 0x14},
+ {GC0310_8BIT, 0x18, 0x1a}, //0a//[4]double reset
+ {GC0310_8BIT, 0x19, 0x14}, //AD pipeline
+ {GC0310_8BIT, 0x1b, 0x48},
+ {GC0310_8BIT, 0x1e, 0x6b}, //3b//col bias
+ {GC0310_8BIT, 0x1f, 0x28}, //20//00//08//txlow
+ {GC0310_8BIT, 0x20, 0x89}, //88//0c//[3:2]DA15
+ {GC0310_8BIT, 0x21, 0x49}, //48//[3] txhigh
+ {GC0310_8BIT, 0x22, 0xb0},
+ {GC0310_8BIT, 0x23, 0x04}, //[1:0]vcm_r
+ {GC0310_8BIT, 0x24, 0x16}, //15
+ {GC0310_8BIT, 0x34, 0x20}, //[6:4] rsg high//range
+
+/////////////////////////////////////////////////
+//////////////////// BLK ////////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0x26, 0x23}, //[1]dark_current_en [0]offset_en
+ {GC0310_8BIT, 0x28, 0xff}, //BLK_limie_value
+ {GC0310_8BIT, 0x29, 0x00}, //global offset
+ {GC0310_8BIT, 0x33, 0x18}, //offset_ratio
+ {GC0310_8BIT, 0x37, 0x20}, //dark_current_ratio
+ {GC0310_8BIT, 0x2a, 0x00},
+ {GC0310_8BIT, 0x2b, 0x00},
+ {GC0310_8BIT, 0x2c, 0x00},
+ {GC0310_8BIT, 0x2d, 0x00},
+ {GC0310_8BIT, 0x2e, 0x00},
+ {GC0310_8BIT, 0x2f, 0x00},
+ {GC0310_8BIT, 0x30, 0x00},
+ {GC0310_8BIT, 0x31, 0x00},
+ {GC0310_8BIT, 0x47, 0x80}, //a7
+ {GC0310_8BIT, 0x4e, 0x66}, //select_row
+ {GC0310_8BIT, 0xa8, 0x02}, //win_width_dark, same with crop_win_width
+ {GC0310_8BIT, 0xa9, 0x80},
+
+/////////////////////////////////////////////////
+////////////////// ISP reg ///////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0x40, 0x06}, // 0xff //ff //48
+ {GC0310_8BIT, 0x41, 0x00}, // 0x21 //00//[0]curve_en
+ {GC0310_8BIT, 0x42, 0x04}, // 0xcf //0a//[1]awn_en
+ {GC0310_8BIT, 0x44, 0x18}, // 0x18 //02
+ {GC0310_8BIT, 0x46, 0x02}, // 0x03 //sync
+ {GC0310_8BIT, 0x49, 0x03},
+ {GC0310_8BIT, 0x4c, 0x20}, //00[5]pretect exp
+ {GC0310_8BIT, 0x50, 0x01}, //crop enable
+ {GC0310_8BIT, 0x51, 0x00},
+ {GC0310_8BIT, 0x52, 0x00},
+ {GC0310_8BIT, 0x53, 0x00},
+ {GC0310_8BIT, 0x54, 0x01},
+ {GC0310_8BIT, 0x55, 0x01}, //crop window height
+ {GC0310_8BIT, 0x56, 0xf0},
+ {GC0310_8BIT, 0x57, 0x02}, //crop window width
+ {GC0310_8BIT, 0x58, 0x90},
+
+/////////////////////////////////////////////////
+/////////////////// GAIN ////////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0x70, 0x70}, //70 //80//global gain
+ {GC0310_8BIT, 0x71, 0x20}, // pregain gain
+ {GC0310_8BIT, 0x72, 0x40}, // post gain
+ {GC0310_8BIT, 0x5a, 0x84}, //84//analog gain 0
+ {GC0310_8BIT, 0x5b, 0xc9}, //c9
+ {GC0310_8BIT, 0x5c, 0xed}, //ed//not use pga gain highest level
+ {GC0310_8BIT, 0x77, 0x40}, // R gain 0x74 //awb gain
+ {GC0310_8BIT, 0x78, 0x40}, // G gain
+ {GC0310_8BIT, 0x79, 0x40}, // B gain 0x5f
+
+ {GC0310_8BIT, 0x48, 0x00},
+ {GC0310_8BIT, 0xfe, 0x01},
+ {GC0310_8BIT, 0x0a, 0x45}, //[7]col gain mode
+
+ {GC0310_8BIT, 0x3e, 0x40},
+ {GC0310_8BIT, 0x3f, 0x5c},
+ {GC0310_8BIT, 0x40, 0x7b},
+ {GC0310_8BIT, 0x41, 0xbd},
+ {GC0310_8BIT, 0x42, 0xf6},
+ {GC0310_8BIT, 0x43, 0x63},
+ {GC0310_8BIT, 0x03, 0x60},
+ {GC0310_8BIT, 0x44, 0x03},
+
+/////////////////////////////////////////////////
+///////////////// dark sun //////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0xfe, 0x01},
+ {GC0310_8BIT, 0x45, 0xa4}, // 0xf7
+ {GC0310_8BIT, 0x46, 0xf0}, // 0xff //f0//sun value th
+ {GC0310_8BIT, 0x48, 0x03}, //sun mode
+ {GC0310_8BIT, 0x4f, 0x60}, //sun_clamp
+ {GC0310_8BIT, 0xfe, 0x00},
+
+ {GC0310_TOK_TERM, 0, 0},
+};
+
+static struct gc0310_reg const gc0310_VGA_30fps[] = {
+ {GC0310_8BIT, 0xfe, 0x00},
+ {GC0310_8BIT, 0x0d, 0x01}, //height
+ {GC0310_8BIT, 0x0e, 0xf2}, // 0xf7 //height
+ {GC0310_8BIT, 0x0f, 0x02}, //width
+ {GC0310_8BIT, 0x10, 0x94}, // 0xa0 //height
+
+ {GC0310_8BIT, 0x50, 0x01}, //crop enable
+ {GC0310_8BIT, 0x51, 0x00},
+ {GC0310_8BIT, 0x52, 0x00},
+ {GC0310_8BIT, 0x53, 0x00},
+ {GC0310_8BIT, 0x54, 0x01},
+ {GC0310_8BIT, 0x55, 0x01}, //crop window height
+ {GC0310_8BIT, 0x56, 0xf0},
+ {GC0310_8BIT, 0x57, 0x02}, //crop window width
+ {GC0310_8BIT, 0x58, 0x90},
+
+ {GC0310_8BIT, 0xfe, 0x03},
+ {GC0310_8BIT, 0x12, 0x90},//00 //04 //00 //04//00 //LWC[7:0] //
+ {GC0310_8BIT, 0x13, 0x02},//05 //05 //LWC[15:8]
+
+ {GC0310_8BIT, 0xfe, 0x00},
+
+ {GC0310_TOK_TERM, 0, 0},
+};
+
+static struct gc0310_resolution gc0310_res_preview[] = {
+ {
+ .desc = "gc0310_VGA_30fps",
+ .width = 656, // 648,
+ .height = 496, // 488,
+ .fps = 30,
+ //.pix_clk_freq = 73,
+ .used = 0,
+#if 0
+ .pixels_per_line = 0x0314,
+ .lines_per_frame = 0x0213,
+#endif
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 2,
+ .regs = gc0310_VGA_30fps,
+ },
+};
+
+#define N_RES_PREVIEW (ARRAY_SIZE(gc0310_res_preview))
+
+static struct gc0310_resolution *gc0310_res = gc0310_res_preview;
+static unsigned long N_RES = N_RES_PREVIEW;
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
new file mode 100644
index 000000000000..bb104de61af9
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -0,0 +1,680 @@
+/*
+ * Support for GalaxyCore GC2235 2M camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ */
+
+#ifndef __GC2235_H__
+#define __GC2235_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+/*
+ * FIXME: non-preview resolutions are currently broken
+ */
+#define ENABLE_NON_PREVIEW 0
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define GC2235_FOCAL_LENGTH_NUM 278 /*2.78mm*/
+#define GC2235_FOCAL_LENGTH_DEM 100
+#define GC2235_F_NUMBER_DEFAULT_NUM 26
+#define GC2235_F_NUMBER_DEM 10
+
+#define MAX_FMTS 1
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC2235_FOCAL_LENGTH_DEFAULT 0x1160064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC2235_F_NUMBER_DEFAULT 0x1a000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define GC2235_F_NUMBER_RANGE 0x1a0a1a0a
+#define GC2235_ID 0x2235
+
+#define GC2235_FINE_INTG_TIME_MIN 0
+#define GC2235_FINE_INTG_TIME_MAX_MARGIN 0
+#define GC2235_COARSE_INTG_TIME_MIN 1
+#define GC2235_COARSE_INTG_TIME_MAX_MARGIN 6
+
+/*
+ * GC2235 System control registers
+ */
+/*
+ * GC2235 System control registers
+ */
+#define GC2235_SENSOR_ID_H 0xF0
+#define GC2235_SENSOR_ID_L 0xF1
+#define GC2235_RESET_RELATED 0xFE
+#define GC2235_SW_RESET 0x8
+#define GC2235_MIPI_RESET 0x3
+#define GC2235_RESET_BIT 0x4
+#define GC2235_REGISTER_PAGE_0 0x0
+#define GC2235_REGISTER_PAGE_3 0x3
+
+#define GC2235_V_CROP_START_H 0x91
+#define GC2235_V_CROP_START_L 0x92
+#define GC2235_H_CROP_START_H 0x93
+#define GC2235_H_CROP_START_L 0x94
+#define GC2235_V_OUTSIZE_H 0x95
+#define GC2235_V_OUTSIZE_L 0x96
+#define GC2235_H_OUTSIZE_H 0x97
+#define GC2235_H_OUTSIZE_L 0x98
+
+#define GC2235_HB_H 0x5
+#define GC2235_HB_L 0x6
+#define GC2235_VB_H 0x7
+#define GC2235_VB_L 0x8
+#define GC2235_SH_DELAY_H 0x11
+#define GC2235_SH_DELAY_L 0x12
+
+#define GC2235_CSI2_MODE 0x10
+
+#define GC2235_EXPOSURE_H 0x3
+#define GC2235_EXPOSURE_L 0x4
+#define GC2235_GLOBAL_GAIN 0xB0
+#define GC2235_PRE_GAIN 0xB1
+#define GC2235_AWB_R_GAIN 0xB3
+#define GC2235_AWB_G_GAIN 0xB4
+#define GC2235_AWB_B_GAIN 0xB5
+
+#define GC2235_START_STREAMING 0x91
+#define GC2235_STOP_STREAMING 0x0
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct gc2235_resolution {
+ u8 *desc;
+ const struct gc2235_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+};
+
+struct gc2235_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct gc2235_reg *regs;
+};
+
+/*
+ * gc2235 device structure.
+ */
+struct gc2235_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct camera_sensor_platform_data *platform_data;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ u8 res;
+ u8 type;
+};
+
+enum gc2235_tok_type {
+ GC2235_8BIT = 0x0001,
+ GC2235_16BIT = 0x0002,
+ GC2235_32BIT = 0x0004,
+ GC2235_TOK_TERM = 0xf000, /* terminating token for reg list */
+ GC2235_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ GC2235_TOK_MASK = 0xfff0
+};
+
+/**
+ * struct gc2235_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 8-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct gc2235_reg {
+ enum gc2235_tok_type type;
+ u8 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_gc2235_sensor(x) container_of(x, struct gc2235_device, sd)
+
+#define GC2235_MAX_WRITE_BUF_SIZE 30
+
+struct gc2235_write_buffer {
+ u8 addr;
+ u8 data[GC2235_MAX_WRITE_BUF_SIZE];
+};
+
+struct gc2235_write_ctrl {
+ int index;
+ struct gc2235_write_buffer buffer;
+};
+
+static struct gc2235_reg const gc2235_stream_on[] = {
+ { GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */
+ { GC2235_8BIT, 0x10, 0x91}, /* start mipi */
+ { GC2235_8BIT, 0xfe, 0x00}, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_stream_off[] = {
+ { GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */
+ { GC2235_8BIT, 0x10, 0x01}, /* stop mipi */
+ { GC2235_8BIT, 0xfe, 0x00}, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_init_settings[] = {
+ /* Sysytem */
+ { GC2235_8BIT, 0xfe, 0x80 },
+ { GC2235_8BIT, 0xfe, 0x80 },
+ { GC2235_8BIT, 0xfe, 0x80 },
+ { GC2235_8BIT, 0xf2, 0x00 },
+ { GC2235_8BIT, 0xf6, 0x00 },
+ { GC2235_8BIT, 0xfc, 0x06 },
+ { GC2235_8BIT, 0xf7, 0x15 },
+ { GC2235_8BIT, 0xf8, 0x84 },
+ { GC2235_8BIT, 0xf9, 0xfe },
+ { GC2235_8BIT, 0xfa, 0x00 },
+ { GC2235_8BIT, 0xfe, 0x00 },
+ /* Analog & cisctl */
+ { GC2235_8BIT, 0x03, 0x04 },
+ { GC2235_8BIT, 0x04, 0x9E },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x06, 0xfd },
+ { GC2235_8BIT, 0x07, 0x00 },
+ { GC2235_8BIT, 0x08, 0x14 },
+ { GC2235_8BIT, 0x0a, 0x02 }, /* row start */
+ { GC2235_8BIT, 0x0c, 0x00 }, /* col start */
+ { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */
+ { GC2235_8BIT, 0x0e, 0xd0 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */
+ { GC2235_8BIT, 0x10, 0x60 },
+ { GC2235_8BIT, 0x17, 0x15 }, /* mirror flip */
+ { GC2235_8BIT, 0x18, 0x1a },
+ { GC2235_8BIT, 0x19, 0x06 },
+ { GC2235_8BIT, 0x1a, 0x01 },
+ { GC2235_8BIT, 0x1b, 0x4d },
+ { GC2235_8BIT, 0x1e, 0x88 },
+ { GC2235_8BIT, 0x1f, 0x48 },
+ { GC2235_8BIT, 0x20, 0x03 },
+ { GC2235_8BIT, 0x21, 0x7f },
+ { GC2235_8BIT, 0x22, 0x83 },
+ { GC2235_8BIT, 0x23, 0x42 },
+ { GC2235_8BIT, 0x24, 0x16 },
+ { GC2235_8BIT, 0x26, 0x01 }, /*analog gain*/
+ { GC2235_8BIT, 0x27, 0x30 },
+ { GC2235_8BIT, 0x3f, 0x00 }, /* PRC */
+ /* blk */
+ { GC2235_8BIT, 0x40, 0xa3 },
+ { GC2235_8BIT, 0x41, 0x82 },
+ { GC2235_8BIT, 0x43, 0x20 },
+ { GC2235_8BIT, 0x5e, 0x18 },
+ { GC2235_8BIT, 0x5f, 0x18 },
+ { GC2235_8BIT, 0x60, 0x18 },
+ { GC2235_8BIT, 0x61, 0x18 },
+ { GC2235_8BIT, 0x62, 0x18 },
+ { GC2235_8BIT, 0x63, 0x18 },
+ { GC2235_8BIT, 0x64, 0x18 },
+ { GC2235_8BIT, 0x65, 0x18 },
+ { GC2235_8BIT, 0x66, 0x20 },
+ { GC2235_8BIT, 0x67, 0x20 },
+ { GC2235_8BIT, 0x68, 0x20 },
+ { GC2235_8BIT, 0x69, 0x20 },
+ /* Gain */
+ { GC2235_8BIT, 0xb2, 0x00 },
+ { GC2235_8BIT, 0xb3, 0x40 },
+ { GC2235_8BIT, 0xb4, 0x40 },
+ { GC2235_8BIT, 0xb5, 0x40 },
+ /* Dark sun */
+ { GC2235_8BIT, 0xbc, 0x00 },
+
+ { GC2235_8BIT, 0xfe, 0x03 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+/*
+ * Register settings for various resolution
+ */
+#if ENABLE_NON_PREVIEW
+static struct gc2235_reg const gc2235_1296_736_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x07, 0x01 }, /* VBI */
+ { GC2235_8BIT, 0x08, 0x44 },
+ { GC2235_8BIT, 0x09, 0x00 }, /* row start */
+ { GC2235_8BIT, 0x0a, 0xf0 },
+ { GC2235_8BIT, 0x0b, 0x00 }, /* col start */
+ { GC2235_8BIT, 0x0c, 0xa0 },
+ { GC2235_8BIT, 0x0d, 0x02 }, /* win height 736 */
+ { GC2235_8BIT, 0x0e, 0xf0 },
+ { GC2235_8BIT, 0x0f, 0x05 }, /* win width: 1296 */
+ { GC2235_8BIT, 0x10, 0x20 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x08 },
+ { GC2235_8BIT, 0x94, 0x08 },
+ { GC2235_8BIT, 0x95, 0x02 }, /* crop win height 736 */
+ { GC2235_8BIT, 0x96, 0xe0 },
+ { GC2235_8BIT, 0x97, 0x05 }, /* crop win width 1296 */
+ { GC2235_8BIT, 0x98, 0x10 },
+ /* mimi init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0x54 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x06 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_960_640_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x07, 0x02 }, /* VBI */
+ { GC2235_8BIT, 0x08, 0xA4 },
+ { GC2235_8BIT, 0x09, 0x01 }, /* row start */
+ { GC2235_8BIT, 0x0a, 0x18 },
+ { GC2235_8BIT, 0x0b, 0x01 }, /* col start */
+ { GC2235_8BIT, 0x0c, 0x40 },
+ { GC2235_8BIT, 0x0d, 0x02 }, /* win height 656 */
+ { GC2235_8BIT, 0x0e, 0x90 },
+ { GC2235_8BIT, 0x0f, 0x03 }, /* win width: 976 */
+ { GC2235_8BIT, 0x10, 0xd0 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x02 },
+ { GC2235_8BIT, 0x94, 0x06 },
+ { GC2235_8BIT, 0x95, 0x02 }, /* crop win height 640 */
+ { GC2235_8BIT, 0x96, 0x80 },
+ { GC2235_8BIT, 0x97, 0x03 }, /* crop win width 960 */
+ { GC2235_8BIT, 0x98, 0xc0 },
+ /* mimp init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xb0 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x04 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+#endif
+
+static struct gc2235_reg const gc2235_1600_900_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x0d, 0x03 }, /* win height 932 */
+ { GC2235_8BIT, 0x0e, 0xa4 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1632 */
+ { GC2235_8BIT, 0x10, 0x50 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x02 },
+ { GC2235_8BIT, 0x94, 0x06 },
+ { GC2235_8BIT, 0x95, 0x03 }, /* crop win height 900 */
+ { GC2235_8BIT, 0x96, 0x84 },
+ { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1600 */
+ { GC2235_8BIT, 0x98, 0x40 },
+ /* mimi init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xd0 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_1616_1082_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */
+ { GC2235_8BIT, 0x0e, 0xd0 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */
+ { GC2235_8BIT, 0x10, 0x50 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x4a },
+ { GC2235_8BIT, 0x94, 0x00 },
+ { GC2235_8BIT, 0x95, 0x04 }, /* crop win height 1082 */
+ { GC2235_8BIT, 0x96, 0x3a },
+ { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1616 */
+ { GC2235_8BIT, 0x98, 0x50 },
+ /* mimp init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xe4 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_1616_1216_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */
+ { GC2235_8BIT, 0x0e, 0xd0 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */
+ { GC2235_8BIT, 0x10, 0x50 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x02 },
+ { GC2235_8BIT, 0x94, 0x00 },
+ { GC2235_8BIT, 0x95, 0x04 }, /* crop win height 1216 */
+ { GC2235_8BIT, 0x96, 0xc0 },
+ { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1616 */
+ { GC2235_8BIT, 0x98, 0x50 },
+ /* mimi init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xe4 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_resolution gc2235_res_preview[] = {
+ {
+ .desc = "gc2235_1600_900_30fps",
+ .width = 1600,
+ .height = 900,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1068,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1600_900_30fps,
+ },
+
+ {
+ .desc = "gc2235_1600_1066_30fps",
+ .width = 1616,
+ .height = 1082,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1082_30fps,
+ },
+ {
+ .desc = "gc2235_1600_1200_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1216_30fps,
+ },
+
+};
+
+#define N_RES_PREVIEW (ARRAY_SIZE(gc2235_res_preview))
+
+/*
+ * Disable non-preview configurations until the configuration selection is
+ * improved.
+ */
+#if ENABLE_NON_PREVIEW
+static struct gc2235_resolution gc2235_res_still[] = {
+ {
+ .desc = "gc2235_1600_900_30fps",
+ .width = 1600,
+ .height = 900,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1068,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1600_900_30fps,
+ },
+ {
+ .desc = "gc2235_1600_1066_30fps",
+ .width = 1616,
+ .height = 1082,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1082_30fps,
+ },
+ {
+ .desc = "gc2235_1600_1200_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1216_30fps,
+ },
+
+};
+
+#define N_RES_STILL (ARRAY_SIZE(gc2235_res_still))
+
+static struct gc2235_resolution gc2235_res_video[] = {
+ {
+ .desc = "gc2235_1296_736_30fps",
+ .width = 1296,
+ .height = 736,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1828,
+ .lines_per_frame = 888,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1296_736_30fps,
+ },
+ {
+ .desc = "gc2235_960_640_30fps",
+ .width = 960,
+ .height = 640,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1492,
+ .lines_per_frame = 792,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_960_640_30fps,
+ },
+
+};
+
+#define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video))
+#endif
+
+static struct gc2235_resolution *gc2235_res = gc2235_res_preview;
+static unsigned long N_RES = N_RES_PREVIEW;
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.h b/drivers/staging/media/atomisp/i2c/mt9m114.h
new file mode 100644
index 000000000000..172cec0bb398
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/mt9m114.h
@@ -0,0 +1,1791 @@
+/*
+ * Support for mt9m114 Camera Sensor.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __A1040_H__
+#define __A1040_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include "../include/linux/atomisp_platform.h"
+#include "../include/linux/atomisp.h"
+
+#define V4L2_IDENT_MT9M114 8245
+
+#define MT9P111_REV3
+#define FULLINISUPPORT
+
+/* #defines for register writes and register array processing */
+#define MISENSOR_8BIT 1
+#define MISENSOR_16BIT 2
+#define MISENSOR_32BIT 4
+
+#define MISENSOR_FWBURST0 0x80
+#define MISENSOR_FWBURST1 0x81
+#define MISENSOR_FWBURST4 0x84
+#define MISENSOR_FWBURST 0x88
+
+#define MISENSOR_TOK_TERM 0xf000 /* terminating token for reg list */
+#define MISENSOR_TOK_DELAY 0xfe00 /* delay token for reg list */
+#define MISENSOR_TOK_FWLOAD 0xfd00 /* token indicating load FW */
+#define MISENSOR_TOK_POLL 0xfc00 /* token indicating poll instruction */
+#define MISENSOR_TOK_RMW 0x0010 /* RMW operation */
+#define MISENSOR_TOK_MASK 0xfff0
+#define MISENSOR_AWB_STEADY BIT(0) /* awb steady */
+#define MISENSOR_AE_READY BIT(3) /* ae status ready */
+
+/* mask to set sensor read_mode via misensor_rmw_reg */
+#define MISENSOR_R_MODE_MASK 0x0330
+/* mask to set sensor vert_flip and horz_mirror */
+#define MISENSOR_VFLIP_MASK 0x0002
+#define MISENSOR_HFLIP_MASK 0x0001
+#define MISENSOR_FLIP_EN 1
+#define MISENSOR_FLIP_DIS 0
+
+/* bits set to set sensor read_mode via misensor_rmw_reg */
+#define MISENSOR_SKIPPING_SET 0x0011
+#define MISENSOR_SUMMING_SET 0x0033
+#define MISENSOR_NORMAL_SET 0x0000
+
+/* sensor register that control sensor read-mode and mirror */
+#define MISENSOR_READ_MODE 0xC834
+/* sensor ae-track status register */
+#define MISENSOR_AE_TRACK_STATUS 0xA800
+/* sensor awb status register */
+#define MISENSOR_AWB_STATUS 0xAC00
+/* sensor coarse integration time register */
+#define MISENSOR_COARSE_INTEGRATION_TIME 0xC83C
+
+/* registers */
+#define REG_SW_RESET 0x301A
+#define REG_SW_STREAM 0xDC00
+#define REG_SCCB_CTRL 0x3100
+#define REG_SC_CMMN_CHIP_ID 0x0000
+#define REG_V_START 0xc800 /* 16bits */
+#define REG_H_START 0xc802 /* 16bits */
+#define REG_V_END 0xc804 /* 16bits */
+#define REG_H_END 0xc806 /* 16bits */
+#define REG_PIXEL_CLK 0xc808 /* 32bits */
+#define REG_TIMING_VTS 0xc812 /* 16bits */
+#define REG_TIMING_HTS 0xc814 /* 16bits */
+#define REG_WIDTH 0xC868 /* 16bits */
+#define REG_HEIGHT 0xC86A /* 16bits */
+#define REG_EXPO_COARSE 0x3012 /* 16bits */
+#define REG_EXPO_FINE 0x3014 /* 16bits */
+#define REG_GAIN 0x305E
+#define REG_ANALOGGAIN 0x305F
+#define REG_ADDR_ACESSS 0x098E /* logical_address_access */
+#define REG_COMM_Register 0x0080 /* command_register */
+
+#define SENSOR_DETECTED 1
+#define SENSOR_NOT_DETECTED 0
+
+#define I2C_RETRY_COUNT 5
+#define MSG_LEN_OFFSET 2
+
+#ifndef MIPI_CONTROL
+#define MIPI_CONTROL 0x3400 /* MIPI_Control */
+#endif
+
+/* GPIO pin on Moorestown */
+#define GPIO_SCLK_25 44
+#define GPIO_STB_PIN 47
+
+#define GPIO_STDBY_PIN 49 /* ab:new */
+#define GPIO_RESET_PIN 50
+
+/* System control register for Aptina A-1040SOC*/
+#define MT9M114_PID 0x0
+
+/* MT9P111_DEVICE_ID */
+#define MT9M114_MOD_ID 0x2481
+
+#define MT9M114_FINE_INTG_TIME_MIN 0
+#define MT9M114_FINE_INTG_TIME_MAX_MARGIN 0
+#define MT9M114_COARSE_INTG_TIME_MIN 1
+#define MT9M114_COARSE_INTG_TIME_MAX_MARGIN 6
+
+/* ulBPat; */
+
+#define MT9M114_BPAT_RGRGGBGB BIT(0)
+#define MT9M114_BPAT_GRGRBGBG BIT(1)
+#define MT9M114_BPAT_GBGBRGRG BIT(2)
+#define MT9M114_BPAT_BGBGGRGR BIT(3)
+
+#define MT9M114_FOCAL_LENGTH_NUM 208 /*2.08mm*/
+#define MT9M114_FOCAL_LENGTH_DEM 100
+#define MT9M114_F_NUMBER_DEFAULT_NUM 24
+#define MT9M114_F_NUMBER_DEM 10
+#define MT9M114_WAIT_STAT_TIMEOUT 100
+#define MT9M114_FLICKER_MODE_50HZ 1
+#define MT9M114_FLICKER_MODE_60HZ 2
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define MT9M114_FOCAL_LENGTH_DEFAULT 0xD00064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define MT9M114_F_NUMBER_DEFAULT 0x18000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define MT9M114_F_NUMBER_RANGE 0x180a180a
+
+/* Supported resolutions */
+enum {
+ MT9M114_RES_736P,
+ MT9M114_RES_864P,
+ MT9M114_RES_960P,
+};
+
+#define MT9M114_RES_960P_SIZE_H 1296
+#define MT9M114_RES_960P_SIZE_V 976
+#define MT9M114_RES_720P_SIZE_H 1280
+#define MT9M114_RES_720P_SIZE_V 720
+#define MT9M114_RES_576P_SIZE_H 1024
+#define MT9M114_RES_576P_SIZE_V 576
+#define MT9M114_RES_480P_SIZE_H 768
+#define MT9M114_RES_480P_SIZE_V 480
+#define MT9M114_RES_VGA_SIZE_H 640
+#define MT9M114_RES_VGA_SIZE_V 480
+#define MT9M114_RES_QVGA_SIZE_H 320
+#define MT9M114_RES_QVGA_SIZE_V 240
+#define MT9M114_RES_QCIF_SIZE_H 176
+#define MT9M114_RES_QCIF_SIZE_V 144
+
+#define MT9M114_RES_720_480p_768_SIZE_H 736
+#define MT9M114_RES_720_480p_768_SIZE_V 496
+#define MT9M114_RES_736P_SIZE_H 1296
+#define MT9M114_RES_736P_SIZE_V 736
+#define MT9M114_RES_864P_SIZE_H 1296
+#define MT9M114_RES_864P_SIZE_V 864
+#define MT9M114_RES_976P_SIZE_H 1296
+#define MT9M114_RES_976P_SIZE_V 976
+
+#define MT9M114_BIN_FACTOR_MAX 3
+
+#define MT9M114_DEFAULT_FIRST_EXP 0x10
+#define MT9M114_MAX_FIRST_EXP 0x302
+
+/* completion status polling requirements, usage based on Aptina .INI Rev2 */
+enum poll_reg {
+ NO_POLLING,
+ PRE_POLLING,
+ POST_POLLING,
+};
+
+/*
+ * struct misensor_reg - MI sensor register format
+ * @length: length of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ * Define a structure for sensor register initialization values
+ */
+struct misensor_reg {
+ u32 length;
+ u32 reg;
+ u32 val; /* value or for read/mod/write, AND mask */
+ u32 val2; /* optional; for rmw, OR mask */
+};
+
+/*
+ * struct misensor_fwreg - Firmware burst command
+ * @type: FW burst or 8/16 bit register
+ * @addr: 16-bit offset to register or other values depending on type
+ * @valx: data value for burst (or other commands)
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct misensor_fwreg {
+ u32 type; /* type of value, register or FW burst string */
+ u32 addr; /* target address */
+ u32 val0;
+ u32 val1;
+ u32 val2;
+ u32 val3;
+ u32 val4;
+ u32 val5;
+ u32 val6;
+ u32 val7;
+};
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct mt9m114_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+
+ struct camera_sensor_platform_data *platform_data;
+ struct mutex input_lock; /* serialize sensor's ioctl */
+ struct v4l2_ctrl_handler ctrl_handler;
+ int real_model_id;
+ int nctx;
+ int power;
+
+ unsigned int bus_width;
+ unsigned int mode;
+ unsigned int field_inv;
+ unsigned int field_sel;
+ unsigned int ycseq;
+ unsigned int conv422;
+ unsigned int bpat;
+ unsigned int hpol;
+ unsigned int vpol;
+ unsigned int edge;
+ unsigned int bls;
+ unsigned int gamma;
+ unsigned int cconv;
+ unsigned int res;
+ unsigned int dwn_sz;
+ unsigned int blc;
+ unsigned int agc;
+ unsigned int awb;
+ unsigned int aec;
+ /* extension SENSOR version 2 */
+ unsigned int cie_profile;
+
+ /* extension SENSOR version 3 */
+ unsigned int flicker_freq;
+
+ /* extension SENSOR version 4 */
+ unsigned int smia_mode;
+ unsigned int mipi_mode;
+
+ /* Add name here to load shared library */
+ unsigned int type;
+
+ /*Number of MIPI lanes*/
+ unsigned int mipi_lanes;
+ /*WA for low light AE*/
+ unsigned int first_exp;
+ unsigned int first_gain;
+ unsigned int first_diggain;
+ char name[32];
+
+ u8 lightfreq;
+ u8 streamon;
+};
+
+struct mt9m114_format_struct {
+ u8 *desc;
+ u32 pixelformat;
+ struct regval_list *regs;
+};
+
+struct mt9m114_res_struct {
+ u8 *desc;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int skip_frames;
+ bool used;
+ struct regval_list *regs;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+};
+
+/* 2 bytes used for address: 256 bytes total */
+#define MT9M114_MAX_WRITE_BUF_SIZE 254
+struct mt9m114_write_buffer {
+ u16 addr;
+ u8 data[MT9M114_MAX_WRITE_BUF_SIZE];
+};
+
+struct mt9m114_write_ctrl {
+ int index;
+ struct mt9m114_write_buffer buffer;
+};
+
+/*
+ * Modes supported by the mt9m114 driver.
+ * Please, keep them in ascending order.
+ */
+static struct mt9m114_res_struct mt9m114_res[] = {
+ {
+ .desc = "720P",
+ .res = MT9M114_RES_736P,
+ .width = 1296,
+ .height = 736,
+ .fps = 30,
+ .used = false,
+ .regs = NULL,
+ .skip_frames = 1,
+
+ .pixels_per_line = 0x0640,
+ .lines_per_frame = 0x0307,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ },
+ {
+ .desc = "848P",
+ .res = MT9M114_RES_864P,
+ .width = 1296,
+ .height = 864,
+ .fps = 30,
+ .used = false,
+ .regs = NULL,
+ .skip_frames = 1,
+
+ .pixels_per_line = 0x0640,
+ .lines_per_frame = 0x03E8,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ },
+ {
+ .desc = "960P",
+ .res = MT9M114_RES_960P,
+ .width = 1296,
+ .height = 976,
+ .fps = 30,
+ .used = false,
+ .regs = NULL,
+ .skip_frames = 1,
+
+ .pixels_per_line = 0x0644, /* consistent with regs arrays */
+ .lines_per_frame = 0x03E5, /* consistent with regs arrays */
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ },
+};
+
+#define N_RES (ARRAY_SIZE(mt9m114_res))
+
+#if 0 /* Currently unused */
+static struct misensor_reg const mt9m114_exitstandby[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ /* exit-standby */
+ {MISENSOR_8BIT, 0xDC00, 0x54},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+#endif
+
+static struct misensor_reg const mt9m114_exp_win[5][5] = {
+ {
+ {MISENSOR_8BIT, 0xA407, 0x64},
+ {MISENSOR_8BIT, 0xA408, 0x64},
+ {MISENSOR_8BIT, 0xA409, 0x64},
+ {MISENSOR_8BIT, 0xA40A, 0x64},
+ {MISENSOR_8BIT, 0xA40B, 0x64},
+ },
+ {
+ {MISENSOR_8BIT, 0xA40C, 0x64},
+ {MISENSOR_8BIT, 0xA40D, 0x64},
+ {MISENSOR_8BIT, 0xA40E, 0x64},
+ {MISENSOR_8BIT, 0xA40F, 0x64},
+ {MISENSOR_8BIT, 0xA410, 0x64},
+ },
+ {
+ {MISENSOR_8BIT, 0xA411, 0x64},
+ {MISENSOR_8BIT, 0xA412, 0x64},
+ {MISENSOR_8BIT, 0xA413, 0x64},
+ {MISENSOR_8BIT, 0xA414, 0x64},
+ {MISENSOR_8BIT, 0xA415, 0x64},
+ },
+ {
+ {MISENSOR_8BIT, 0xA416, 0x64},
+ {MISENSOR_8BIT, 0xA417, 0x64},
+ {MISENSOR_8BIT, 0xA418, 0x64},
+ {MISENSOR_8BIT, 0xA419, 0x64},
+ {MISENSOR_8BIT, 0xA41A, 0x64},
+ },
+ {
+ {MISENSOR_8BIT, 0xA41B, 0x64},
+ {MISENSOR_8BIT, 0xA41C, 0x64},
+ {MISENSOR_8BIT, 0xA41D, 0x64},
+ {MISENSOR_8BIT, 0xA41E, 0x64},
+ {MISENSOR_8BIT, 0xA41F, 0x64},
+ },
+};
+
+static struct misensor_reg const mt9m114_exp_average[] = {
+ {MISENSOR_8BIT, 0xA407, 0x00},
+ {MISENSOR_8BIT, 0xA408, 0x00},
+ {MISENSOR_8BIT, 0xA409, 0x00},
+ {MISENSOR_8BIT, 0xA40A, 0x00},
+ {MISENSOR_8BIT, 0xA40B, 0x00},
+ {MISENSOR_8BIT, 0xA40C, 0x00},
+ {MISENSOR_8BIT, 0xA40D, 0x00},
+ {MISENSOR_8BIT, 0xA40E, 0x00},
+ {MISENSOR_8BIT, 0xA40F, 0x00},
+ {MISENSOR_8BIT, 0xA410, 0x00},
+ {MISENSOR_8BIT, 0xA411, 0x00},
+ {MISENSOR_8BIT, 0xA412, 0x00},
+ {MISENSOR_8BIT, 0xA413, 0x00},
+ {MISENSOR_8BIT, 0xA414, 0x00},
+ {MISENSOR_8BIT, 0xA415, 0x00},
+ {MISENSOR_8BIT, 0xA416, 0x00},
+ {MISENSOR_8BIT, 0xA417, 0x00},
+ {MISENSOR_8BIT, 0xA418, 0x00},
+ {MISENSOR_8BIT, 0xA419, 0x00},
+ {MISENSOR_8BIT, 0xA41A, 0x00},
+ {MISENSOR_8BIT, 0xA41B, 0x00},
+ {MISENSOR_8BIT, 0xA41C, 0x00},
+ {MISENSOR_8BIT, 0xA41D, 0x00},
+ {MISENSOR_8BIT, 0xA41E, 0x00},
+ {MISENSOR_8BIT, 0xA41F, 0x00},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_exp_center[] = {
+ {MISENSOR_8BIT, 0xA407, 0x19},
+ {MISENSOR_8BIT, 0xA408, 0x19},
+ {MISENSOR_8BIT, 0xA409, 0x19},
+ {MISENSOR_8BIT, 0xA40A, 0x19},
+ {MISENSOR_8BIT, 0xA40B, 0x19},
+ {MISENSOR_8BIT, 0xA40C, 0x19},
+ {MISENSOR_8BIT, 0xA40D, 0x4B},
+ {MISENSOR_8BIT, 0xA40E, 0x4B},
+ {MISENSOR_8BIT, 0xA40F, 0x4B},
+ {MISENSOR_8BIT, 0xA410, 0x19},
+ {MISENSOR_8BIT, 0xA411, 0x19},
+ {MISENSOR_8BIT, 0xA412, 0x4B},
+ {MISENSOR_8BIT, 0xA413, 0x64},
+ {MISENSOR_8BIT, 0xA414, 0x4B},
+ {MISENSOR_8BIT, 0xA415, 0x19},
+ {MISENSOR_8BIT, 0xA416, 0x19},
+ {MISENSOR_8BIT, 0xA417, 0x4B},
+ {MISENSOR_8BIT, 0xA418, 0x4B},
+ {MISENSOR_8BIT, 0xA419, 0x4B},
+ {MISENSOR_8BIT, 0xA41A, 0x19},
+ {MISENSOR_8BIT, 0xA41B, 0x19},
+ {MISENSOR_8BIT, 0xA41C, 0x19},
+ {MISENSOR_8BIT, 0xA41D, 0x19},
+ {MISENSOR_8BIT, 0xA41E, 0x19},
+ {MISENSOR_8BIT, 0xA41F, 0x19},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+#if 0 /* Currently unused */
+static struct misensor_reg const mt9m114_suspend[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x40},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_streaming[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x34},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+#endif
+
+static struct misensor_reg const mt9m114_standby_reg[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x50},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+#if 0 /* Currently unused */
+static struct misensor_reg const mt9m114_wakeup_reg[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x54},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+#endif
+
+static struct misensor_reg const mt9m114_chgstat_reg[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x28},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+/* [1296x976_30fps] - Intel */
+#if 0
+static struct misensor_reg const mt9m114_960P_init[] = {
+ {MISENSOR_16BIT, 0x098E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC804, 0x03CF}, /* cam_sensor_cfg_y_addr_end = 971 */
+ {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1291 */
+ {MISENSOR_16BIT, 0xC808, 0x02DC}, /* cam_sensor_cfg_pixclk = 48000000 */
+ {MISENSOR_16BIT, 0xC80A, 0x6C00},
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* cam_sensor_cfg_fine_integ_time_max = 1459 */
+ {MISENSOR_16BIT, 0xC810, 0x05B3},
+ /* cam_sensor_cfg_frame_length_lines = 1006 */
+ {MISENSOR_16BIT, 0xC812, 0x03F6},
+ /* cam_sensor_cfg_line_length_pck = 1590 */
+ {MISENSOR_16BIT, 0xC814, 0x063E},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ /* cam_sensor_cfg_cpipe_last_row = 963 */
+ {MISENSOR_16BIT, 0xC818, 0x03C3},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1280 */
+ {MISENSOR_16BIT, 0xC85A, 0x03C8}, /* cam_crop_window_height = 960 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1280 */
+ {MISENSOR_16BIT, 0xC86A, 0x03C8}, /* cam_output_height = 960 */
+ {MISENSOR_TOK_TERM, 0, 0},
+};
+#endif
+
+/* [1296x976_30fps_768Mbps] */
+static struct misensor_reg const mt9m114_976P_init[] = {
+ {MISENSOR_16BIT, 0x98E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC804, 0x03CF}, /* cam_sensor_cfg_y_addr_end = 975 */
+ {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */
+ {MISENSOR_32BIT, 0xC808, 0x2DC6C00},/* cam_sensor_cfg_pixclk = 480000*/
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1459 */
+ {MISENSOR_16BIT, 0xC810, 0x05B3},
+ /* 0x074C //cam_sensor_cfg_frame_length_lines = 1006 */
+ {MISENSOR_16BIT, 0xC812, 0x03E5},
+ /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1590 */
+ {MISENSOR_16BIT, 0xC814, 0x0644},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ /* cam_sensor_cfg_cpipe_last_row = 963 */
+ {MISENSOR_16BIT, 0xC818, 0x03C3},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */
+ {MISENSOR_16BIT, 0xC85A, 0x03C8}, /* cam_crop_window_height = 968 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */
+ {MISENSOR_16BIT, 0xC86A, 0x03C8}, /* cam_output_height = 968 */
+ {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+/* [1296x864_30fps] */
+static struct misensor_reg const mt9m114_864P_init[] = {
+ {MISENSOR_16BIT, 0x98E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0038}, /* cam_sensor_cfg_y_addr_start = 56 */
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC804, 0x0397}, /* cam_sensor_cfg_y_addr_end = 919 */
+ {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */
+ /* cam_sensor_cfg_pixclk = 48000000 */
+ {MISENSOR_32BIT, 0xC808, 0x2DC6C00},
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* cam_sensor_cfg_fine_integ_time_max = 1469 */
+ {MISENSOR_16BIT, 0xC810, 0x05BD},
+ /* cam_sensor_cfg_frame_length_lines = 1000 */
+ {MISENSOR_16BIT, 0xC812, 0x03E8},
+ /* cam_sensor_cfg_line_length_pck = 1600 */
+ {MISENSOR_16BIT, 0xC814, 0x0640},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ /* cam_sensor_cfg_cpipe_last_row = 859 */
+ {MISENSOR_16BIT, 0xC818, 0x035B},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */
+ {MISENSOR_16BIT, 0xC85A, 0x0358}, /* cam_crop_window_height = 856 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */
+ {MISENSOR_16BIT, 0xC86A, 0x0358}, /* cam_output_height = 856 */
+ {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+/* [1296x736_30fps] */
+static struct misensor_reg const mt9m114_736P_init[] = {
+ {MISENSOR_16BIT, 0x98E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x011F}, /* cam_sysctl_pll_divider_m_n = 287 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0078}, /* cam_sensor_cfg_y_addr_start = 120*/
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC804, 0x0357}, /* cam_sensor_cfg_y_addr_end = 855 */
+ {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */
+ {MISENSOR_32BIT, 0xC808, 0x237A07F}, /* cam_sensor_cfg_pixclk=37199999*/
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1469 */
+ {MISENSOR_16BIT, 0xC810, 0x05BD},
+ /* 0x074C //cam_sensor_cfg_frame_length_lines = 775 */
+ {MISENSOR_16BIT, 0xC812, 0x0307},
+ /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1600 */
+ {MISENSOR_16BIT, 0xC814, 0x0640},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ /* cam_sensor_cfg_cpipe_last_row = 731 */
+ {MISENSOR_16BIT, 0xC818, 0x02DB},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */
+ {MISENSOR_16BIT, 0xC85A, 0x02D8}, /* cam_crop_window_height = 728 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */
+ {MISENSOR_16BIT, 0xC86A, 0x02D8}, /* cam_output_height = 728 */
+ {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+/* [736x496_30fps_768Mbps] */
+#if 0 /* Currently unused */
+static struct misensor_reg const mt9m114_720_480P_init[] = {
+ {MISENSOR_16BIT, 0x98E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x00F0}, /* cam_sensor_cfg_y_addr_start = 240*/
+ {MISENSOR_16BIT, 0xC802, 0x0118}, /* cam_sensor_cfg_x_addr_start = 280*/
+ {MISENSOR_16BIT, 0xC804, 0x02DF}, /* cam_sensor_cfg_y_addr_end = 735 */
+ {MISENSOR_16BIT, 0xC806, 0x03F7}, /* cam_sensor_cfg_x_addr_end = 1015 */
+ /* cam_sensor_cfg_pixclk = 48000000 */
+ {MISENSOR_32BIT, 0xC808, 0x2DC6C00},
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1459 */
+ {MISENSOR_16BIT, 0xC810, 0x05B3},
+ /* 0x074C //cam_sensor_cfg_frame_length_lines = 997 */
+ {MISENSOR_16BIT, 0xC812, 0x03E5},
+ /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1604 */
+ {MISENSOR_16BIT, 0xC814, 0x0644},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ {MISENSOR_16BIT, 0xC818, 0x03C3}, /* cam_sensor_cfg_cpipe_last_row=963*/
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0*/
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x02D8}, /* cam_crop_window_width = 728 */
+ {MISENSOR_16BIT, 0xC85A, 0x01E8}, /* cam_crop_window_height = 488 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x02D8}, /* cam_output_width = 728 */
+ {MISENSOR_16BIT, 0xC86A, 0x01E8}, /* cam_output_height = 488 */
+ {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+#endif
+
+static struct misensor_reg const mt9m114_common[] = {
+ /* reset */
+ {MISENSOR_16BIT, 0x301A, 0x0234},
+ /* LOAD = Step2-PLL_Timing //PLL and Timing */
+ {MISENSOR_16BIT, 0x098E, 0x1000}, /* LOGICAL_ADDRESS_ACCESS */
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 216*/
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 168*/
+ {MISENSOR_16BIT, 0xC804, 0x03CD}, /* cam_sensor_cfg_y_addr_end = 761 */
+ {MISENSOR_16BIT, 0xC806, 0x050D}, /* cam_sensor_cfg_x_addr_end = 1127 */
+ {MISENSOR_16BIT, 0xC808, 0x02DC}, /* cam_sensor_cfg_pixclk = 24000000 */
+ {MISENSOR_16BIT, 0xC80A, 0x6C00},
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x01C3},
+ /* cam_sensor_cfg_fine_integ_time_max = 1149 */
+ {MISENSOR_16BIT, 0xC810, 0x03F7},
+ /* cam_sensor_cfg_frame_length_lines = 625 */
+ {MISENSOR_16BIT, 0xC812, 0x0500},
+ /* cam_sensor_cfg_line_length_pck = 1280 */
+ {MISENSOR_16BIT, 0xC814, 0x04E2},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x00E0},
+ /* cam_sensor_cfg_cpipe_last_row = 541 */
+ {MISENSOR_16BIT, 0xC818, 0x01E3},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0330}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0280}, /* cam_crop_window_width = 952 */
+ {MISENSOR_16BIT, 0xC85A, 0x01E0}, /* cam_crop_window_height = 538 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0280}, /* cam_output_width = 952 */
+ {MISENSOR_16BIT, 0xC86A, 0x01E0}, /* cam_output_height = 538 */
+ /* LOAD = Step3-Recommended
+ * Patch,Errata and Sensor optimization Setting */
+ {MISENSOR_16BIT, 0x316A, 0x8270}, /* DAC_TXLO_ROW */
+ {MISENSOR_16BIT, 0x316C, 0x8270}, /* DAC_TXLO */
+ {MISENSOR_16BIT, 0x3ED0, 0x2305}, /* DAC_LD_4_5 */
+ {MISENSOR_16BIT, 0x3ED2, 0x77CF}, /* DAC_LD_6_7 */
+ {MISENSOR_16BIT, 0x316E, 0x8202}, /* DAC_ECL */
+ {MISENSOR_16BIT, 0x3180, 0x87FF}, /* DELTA_DK_CONTROL */
+ {MISENSOR_16BIT, 0x30D4, 0x6080}, /* COLUMN_CORRECTION */
+ {MISENSOR_16BIT, 0xA802, 0x0008}, /* AE_TRACK_MODE */
+ {MISENSOR_16BIT, 0x3E14, 0xFF39}, /* SAMP_COL_PUP2 */
+ {MISENSOR_16BIT, 0x31E0, 0x0003}, /* PIX_DEF_ID */
+ /* LOAD = Step8-Features //Ports, special features, etc. */
+ {MISENSOR_16BIT, 0x098E, 0x0000}, /* LOGICAL_ADDRESS_ACCESS */
+ {MISENSOR_16BIT, 0x001E, 0x0777}, /* PAD_SLEW */
+ {MISENSOR_16BIT, 0x098E, 0x0000}, /* LOGICAL_ADDRESS_ACCESS */
+ {MISENSOR_16BIT, 0xC984, 0x8001}, /* CAM_PORT_OUTPUT_CONTROL */
+ {MISENSOR_16BIT, 0xC988, 0x0F00}, /* CAM_PORT_MIPI_TIMING_T_HS_ZERO */
+ /* CAM_PORT_MIPI_TIMING_T_HS_EXIT_HS_TRAIL */
+ {MISENSOR_16BIT, 0xC98A, 0x0B07},
+ /* CAM_PORT_MIPI_TIMING_T_CLK_POST_CLK_PRE */
+ {MISENSOR_16BIT, 0xC98C, 0x0D01},
+ /* CAM_PORT_MIPI_TIMING_T_CLK_TRAIL_CLK_ZERO */
+ {MISENSOR_16BIT, 0xC98E, 0x071D},
+ {MISENSOR_16BIT, 0xC990, 0x0006}, /* CAM_PORT_MIPI_TIMING_T_LPX */
+ {MISENSOR_16BIT, 0xC992, 0x0A0C}, /* CAM_PORT_MIPI_TIMING_INIT_TIMING */
+ {MISENSOR_16BIT, 0x3C5A, 0x0009}, /* MIPI_DELAY_TRIM */
+ {MISENSOR_16BIT, 0xC86C, 0x0210}, /* CAM_OUTPUT_FORMAT */
+ {MISENSOR_16BIT, 0xA804, 0x0000}, /* AE_TRACK_ALGO */
+ /* default exposure */
+ {MISENSOR_16BIT, 0x3012, 0x0110}, /* COMMAND_REGISTER */
+ {MISENSOR_TOK_TERM, 0, 0},
+
+};
+
+#if 0 /* Currently unused */
+static struct misensor_reg const mt9m114_antiflicker_50hz[] = {
+ {MISENSOR_16BIT, 0x098E, 0xC88B},
+ {MISENSOR_8BIT, 0xC88B, 0x32},
+ {MISENSOR_8BIT, 0xDC00, 0x28},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_antiflicker_60hz[] = {
+ {MISENSOR_16BIT, 0x098E, 0xC88B},
+ {MISENSOR_8BIT, 0xC88B, 0x3C},
+ {MISENSOR_8BIT, 0xDC00, 0x28},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_iq[] = {
+ /* [Step3-Recommended] [Sensor optimization] */
+ {MISENSOR_16BIT, 0x316A, 0x8270},
+ {MISENSOR_16BIT, 0x316C, 0x8270},
+ {MISENSOR_16BIT, 0x3ED0, 0x2305},
+ {MISENSOR_16BIT, 0x3ED2, 0x77CF},
+ {MISENSOR_16BIT, 0x316E, 0x8202},
+ {MISENSOR_16BIT, 0x3180, 0x87FF},
+ {MISENSOR_16BIT, 0x30D4, 0x6080},
+ {MISENSOR_16BIT, 0xA802, 0x0008},
+
+ /* This register is from vender to avoid low light color noise */
+ {MISENSOR_16BIT, 0x31E0, 0x0001},
+
+ /* LOAD=Errata item 1 */
+ {MISENSOR_16BIT, 0x3E14, 0xFF39},
+
+ /* LOAD=Errata item 2 */
+ {MISENSOR_16BIT, 0x301A, 0x8234},
+
+ /*
+ * LOAD=Errata item 3
+ * LOAD=Patch 0202;
+ * Feature Recommended; Black level correction fix
+ */
+ {MISENSOR_16BIT, 0x0982, 0x0001},
+ {MISENSOR_16BIT, 0x098A, 0x5000},
+ {MISENSOR_16BIT, 0xD000, 0x70CF},
+ {MISENSOR_16BIT, 0xD002, 0xFFFF},
+ {MISENSOR_16BIT, 0xD004, 0xC5D4},
+ {MISENSOR_16BIT, 0xD006, 0x903A},
+ {MISENSOR_16BIT, 0xD008, 0x2144},
+ {MISENSOR_16BIT, 0xD00A, 0x0C00},
+ {MISENSOR_16BIT, 0xD00C, 0x2186},
+ {MISENSOR_16BIT, 0xD00E, 0x0FF3},
+ {MISENSOR_16BIT, 0xD010, 0xB844},
+ {MISENSOR_16BIT, 0xD012, 0xB948},
+ {MISENSOR_16BIT, 0xD014, 0xE082},
+ {MISENSOR_16BIT, 0xD016, 0x20CC},
+ {MISENSOR_16BIT, 0xD018, 0x80E2},
+ {MISENSOR_16BIT, 0xD01A, 0x21CC},
+ {MISENSOR_16BIT, 0xD01C, 0x80A2},
+ {MISENSOR_16BIT, 0xD01E, 0x21CC},
+ {MISENSOR_16BIT, 0xD020, 0x80E2},
+ {MISENSOR_16BIT, 0xD022, 0xF404},
+ {MISENSOR_16BIT, 0xD024, 0xD801},
+ {MISENSOR_16BIT, 0xD026, 0xF003},
+ {MISENSOR_16BIT, 0xD028, 0xD800},
+ {MISENSOR_16BIT, 0xD02A, 0x7EE0},
+ {MISENSOR_16BIT, 0xD02C, 0xC0F1},
+ {MISENSOR_16BIT, 0xD02E, 0x08BA},
+
+ {MISENSOR_16BIT, 0xD030, 0x0600},
+ {MISENSOR_16BIT, 0xD032, 0xC1A1},
+ {MISENSOR_16BIT, 0xD034, 0x76CF},
+ {MISENSOR_16BIT, 0xD036, 0xFFFF},
+ {MISENSOR_16BIT, 0xD038, 0xC130},
+ {MISENSOR_16BIT, 0xD03A, 0x6E04},
+ {MISENSOR_16BIT, 0xD03C, 0xC040},
+ {MISENSOR_16BIT, 0xD03E, 0x71CF},
+ {MISENSOR_16BIT, 0xD040, 0xFFFF},
+ {MISENSOR_16BIT, 0xD042, 0xC790},
+ {MISENSOR_16BIT, 0xD044, 0x8103},
+ {MISENSOR_16BIT, 0xD046, 0x77CF},
+ {MISENSOR_16BIT, 0xD048, 0xFFFF},
+ {MISENSOR_16BIT, 0xD04A, 0xC7C0},
+ {MISENSOR_16BIT, 0xD04C, 0xE001},
+ {MISENSOR_16BIT, 0xD04E, 0xA103},
+ {MISENSOR_16BIT, 0xD050, 0xD800},
+ {MISENSOR_16BIT, 0xD052, 0x0C6A},
+ {MISENSOR_16BIT, 0xD054, 0x04E0},
+ {MISENSOR_16BIT, 0xD056, 0xB89E},
+ {MISENSOR_16BIT, 0xD058, 0x7508},
+ {MISENSOR_16BIT, 0xD05A, 0x8E1C},
+ {MISENSOR_16BIT, 0xD05C, 0x0809},
+ {MISENSOR_16BIT, 0xD05E, 0x0191},
+
+ {MISENSOR_16BIT, 0xD060, 0xD801},
+ {MISENSOR_16BIT, 0xD062, 0xAE1D},
+ {MISENSOR_16BIT, 0xD064, 0xE580},
+ {MISENSOR_16BIT, 0xD066, 0x20CA},
+ {MISENSOR_16BIT, 0xD068, 0x0022},
+ {MISENSOR_16BIT, 0xD06A, 0x20CF},
+ {MISENSOR_16BIT, 0xD06C, 0x0522},
+ {MISENSOR_16BIT, 0xD06E, 0x0C5C},
+ {MISENSOR_16BIT, 0xD070, 0x04E2},
+ {MISENSOR_16BIT, 0xD072, 0x21CA},
+ {MISENSOR_16BIT, 0xD074, 0x0062},
+ {MISENSOR_16BIT, 0xD076, 0xE580},
+ {MISENSOR_16BIT, 0xD078, 0xD901},
+ {MISENSOR_16BIT, 0xD07A, 0x79C0},
+ {MISENSOR_16BIT, 0xD07C, 0xD800},
+ {MISENSOR_16BIT, 0xD07E, 0x0BE6},
+ {MISENSOR_16BIT, 0xD080, 0x04E0},
+ {MISENSOR_16BIT, 0xD082, 0xB89E},
+ {MISENSOR_16BIT, 0xD084, 0x70CF},
+ {MISENSOR_16BIT, 0xD086, 0xFFFF},
+ {MISENSOR_16BIT, 0xD088, 0xC8D4},
+ {MISENSOR_16BIT, 0xD08A, 0x9002},
+ {MISENSOR_16BIT, 0xD08C, 0x0857},
+ {MISENSOR_16BIT, 0xD08E, 0x025E},
+
+ {MISENSOR_16BIT, 0xD090, 0xFFDC},
+ {MISENSOR_16BIT, 0xD092, 0xE080},
+ {MISENSOR_16BIT, 0xD094, 0x25CC},
+ {MISENSOR_16BIT, 0xD096, 0x9022},
+ {MISENSOR_16BIT, 0xD098, 0xF225},
+ {MISENSOR_16BIT, 0xD09A, 0x1700},
+ {MISENSOR_16BIT, 0xD09C, 0x108A},
+ {MISENSOR_16BIT, 0xD09E, 0x73CF},
+ {MISENSOR_16BIT, 0xD0A0, 0xFF00},
+ {MISENSOR_16BIT, 0xD0A2, 0x3174},
+ {MISENSOR_16BIT, 0xD0A4, 0x9307},
+ {MISENSOR_16BIT, 0xD0A6, 0x2A04},
+ {MISENSOR_16BIT, 0xD0A8, 0x103E},
+ {MISENSOR_16BIT, 0xD0AA, 0x9328},
+ {MISENSOR_16BIT, 0xD0AC, 0x2942},
+ {MISENSOR_16BIT, 0xD0AE, 0x7140},
+ {MISENSOR_16BIT, 0xD0B0, 0x2A04},
+ {MISENSOR_16BIT, 0xD0B2, 0x107E},
+ {MISENSOR_16BIT, 0xD0B4, 0x9349},
+ {MISENSOR_16BIT, 0xD0B6, 0x2942},
+ {MISENSOR_16BIT, 0xD0B8, 0x7141},
+ {MISENSOR_16BIT, 0xD0BA, 0x2A04},
+ {MISENSOR_16BIT, 0xD0BC, 0x10BE},
+ {MISENSOR_16BIT, 0xD0BE, 0x934A},
+
+ {MISENSOR_16BIT, 0xD0C0, 0x2942},
+ {MISENSOR_16BIT, 0xD0C2, 0x714B},
+ {MISENSOR_16BIT, 0xD0C4, 0x2A04},
+ {MISENSOR_16BIT, 0xD0C6, 0x10BE},
+ {MISENSOR_16BIT, 0xD0C8, 0x130C},
+ {MISENSOR_16BIT, 0xD0CA, 0x010A},
+ {MISENSOR_16BIT, 0xD0CC, 0x2942},
+ {MISENSOR_16BIT, 0xD0CE, 0x7142},
+ {MISENSOR_16BIT, 0xD0D0, 0x2250},
+ {MISENSOR_16BIT, 0xD0D2, 0x13CA},
+ {MISENSOR_16BIT, 0xD0D4, 0x1B0C},
+ {MISENSOR_16BIT, 0xD0D6, 0x0284},
+ {MISENSOR_16BIT, 0xD0D8, 0xB307},
+ {MISENSOR_16BIT, 0xD0DA, 0xB328},
+ {MISENSOR_16BIT, 0xD0DC, 0x1B12},
+ {MISENSOR_16BIT, 0xD0DE, 0x02C4},
+ {MISENSOR_16BIT, 0xD0E0, 0xB34A},
+ {MISENSOR_16BIT, 0xD0E2, 0xED88},
+ {MISENSOR_16BIT, 0xD0E4, 0x71CF},
+ {MISENSOR_16BIT, 0xD0E6, 0xFF00},
+ {MISENSOR_16BIT, 0xD0E8, 0x3174},
+ {MISENSOR_16BIT, 0xD0EA, 0x9106},
+ {MISENSOR_16BIT, 0xD0EC, 0xB88F},
+ {MISENSOR_16BIT, 0xD0EE, 0xB106},
+
+ {MISENSOR_16BIT, 0xD0F0, 0x210A},
+ {MISENSOR_16BIT, 0xD0F2, 0x8340},
+ {MISENSOR_16BIT, 0xD0F4, 0xC000},
+ {MISENSOR_16BIT, 0xD0F6, 0x21CA},
+ {MISENSOR_16BIT, 0xD0F8, 0x0062},
+ {MISENSOR_16BIT, 0xD0FA, 0x20F0},
+ {MISENSOR_16BIT, 0xD0FC, 0x0040},
+ {MISENSOR_16BIT, 0xD0FE, 0x0B02},
+ {MISENSOR_16BIT, 0xD100, 0x0320},
+ {MISENSOR_16BIT, 0xD102, 0xD901},
+ {MISENSOR_16BIT, 0xD104, 0x07F1},
+ {MISENSOR_16BIT, 0xD106, 0x05E0},
+ {MISENSOR_16BIT, 0xD108, 0xC0A1},
+ {MISENSOR_16BIT, 0xD10A, 0x78E0},
+ {MISENSOR_16BIT, 0xD10C, 0xC0F1},
+ {MISENSOR_16BIT, 0xD10E, 0x71CF},
+ {MISENSOR_16BIT, 0xD110, 0xFFFF},
+ {MISENSOR_16BIT, 0xD112, 0xC7C0},
+ {MISENSOR_16BIT, 0xD114, 0xD840},
+ {MISENSOR_16BIT, 0xD116, 0xA900},
+ {MISENSOR_16BIT, 0xD118, 0x71CF},
+ {MISENSOR_16BIT, 0xD11A, 0xFFFF},
+ {MISENSOR_16BIT, 0xD11C, 0xD02C},
+ {MISENSOR_16BIT, 0xD11E, 0xD81E},
+
+ {MISENSOR_16BIT, 0xD120, 0x0A5A},
+ {MISENSOR_16BIT, 0xD122, 0x04E0},
+ {MISENSOR_16BIT, 0xD124, 0xDA00},
+ {MISENSOR_16BIT, 0xD126, 0xD800},
+ {MISENSOR_16BIT, 0xD128, 0xC0D1},
+ {MISENSOR_16BIT, 0xD12A, 0x7EE0},
+
+ {MISENSOR_16BIT, 0x098E, 0x0000},
+ {MISENSOR_16BIT, 0xE000, 0x010C},
+ {MISENSOR_16BIT, 0xE002, 0x0202},
+ {MISENSOR_16BIT, 0xE004, 0x4103},
+ {MISENSOR_16BIT, 0xE006, 0x0202},
+ {MISENSOR_16BIT, 0x0080, 0xFFF0},
+ {MISENSOR_16BIT, 0x0080, 0xFFF1},
+
+ /* LOAD=Patch 0302; Feature Recommended; Adaptive Sensitivity */
+ {MISENSOR_16BIT, 0x0982, 0x0001},
+ {MISENSOR_16BIT, 0x098A, 0x512C},
+ {MISENSOR_16BIT, 0xD12C, 0x70CF},
+ {MISENSOR_16BIT, 0xD12E, 0xFFFF},
+ {MISENSOR_16BIT, 0xD130, 0xC5D4},
+ {MISENSOR_16BIT, 0xD132, 0x903A},
+ {MISENSOR_16BIT, 0xD134, 0x2144},
+ {MISENSOR_16BIT, 0xD136, 0x0C00},
+ {MISENSOR_16BIT, 0xD138, 0x2186},
+ {MISENSOR_16BIT, 0xD13A, 0x0FF3},
+ {MISENSOR_16BIT, 0xD13C, 0xB844},
+ {MISENSOR_16BIT, 0xD13E, 0x262F},
+ {MISENSOR_16BIT, 0xD140, 0xF008},
+ {MISENSOR_16BIT, 0xD142, 0xB948},
+ {MISENSOR_16BIT, 0xD144, 0x21CC},
+ {MISENSOR_16BIT, 0xD146, 0x8021},
+ {MISENSOR_16BIT, 0xD148, 0xD801},
+ {MISENSOR_16BIT, 0xD14A, 0xF203},
+ {MISENSOR_16BIT, 0xD14C, 0xD800},
+ {MISENSOR_16BIT, 0xD14E, 0x7EE0},
+ {MISENSOR_16BIT, 0xD150, 0xC0F1},
+ {MISENSOR_16BIT, 0xD152, 0x71CF},
+ {MISENSOR_16BIT, 0xD154, 0xFFFF},
+ {MISENSOR_16BIT, 0xD156, 0xC610},
+ {MISENSOR_16BIT, 0xD158, 0x910E},
+ {MISENSOR_16BIT, 0xD15A, 0x208C},
+ {MISENSOR_16BIT, 0xD15C, 0x8014},
+ {MISENSOR_16BIT, 0xD15E, 0xF418},
+ {MISENSOR_16BIT, 0xD160, 0x910F},
+ {MISENSOR_16BIT, 0xD162, 0x208C},
+ {MISENSOR_16BIT, 0xD164, 0x800F},
+ {MISENSOR_16BIT, 0xD166, 0xF414},
+ {MISENSOR_16BIT, 0xD168, 0x9116},
+ {MISENSOR_16BIT, 0xD16A, 0x208C},
+ {MISENSOR_16BIT, 0xD16C, 0x800A},
+ {MISENSOR_16BIT, 0xD16E, 0xF410},
+ {MISENSOR_16BIT, 0xD170, 0x9117},
+ {MISENSOR_16BIT, 0xD172, 0x208C},
+ {MISENSOR_16BIT, 0xD174, 0x8807},
+ {MISENSOR_16BIT, 0xD176, 0xF40C},
+ {MISENSOR_16BIT, 0xD178, 0x9118},
+ {MISENSOR_16BIT, 0xD17A, 0x2086},
+ {MISENSOR_16BIT, 0xD17C, 0x0FF3},
+ {MISENSOR_16BIT, 0xD17E, 0xB848},
+ {MISENSOR_16BIT, 0xD180, 0x080D},
+ {MISENSOR_16BIT, 0xD182, 0x0090},
+ {MISENSOR_16BIT, 0xD184, 0xFFEA},
+ {MISENSOR_16BIT, 0xD186, 0xE081},
+ {MISENSOR_16BIT, 0xD188, 0xD801},
+ {MISENSOR_16BIT, 0xD18A, 0xF203},
+ {MISENSOR_16BIT, 0xD18C, 0xD800},
+ {MISENSOR_16BIT, 0xD18E, 0xC0D1},
+ {MISENSOR_16BIT, 0xD190, 0x7EE0},
+ {MISENSOR_16BIT, 0xD192, 0x78E0},
+ {MISENSOR_16BIT, 0xD194, 0xC0F1},
+ {MISENSOR_16BIT, 0xD196, 0x71CF},
+ {MISENSOR_16BIT, 0xD198, 0xFFFF},
+ {MISENSOR_16BIT, 0xD19A, 0xC610},
+ {MISENSOR_16BIT, 0xD19C, 0x910E},
+ {MISENSOR_16BIT, 0xD19E, 0x208C},
+ {MISENSOR_16BIT, 0xD1A0, 0x800A},
+ {MISENSOR_16BIT, 0xD1A2, 0xF418},
+ {MISENSOR_16BIT, 0xD1A4, 0x910F},
+ {MISENSOR_16BIT, 0xD1A6, 0x208C},
+ {MISENSOR_16BIT, 0xD1A8, 0x8807},
+ {MISENSOR_16BIT, 0xD1AA, 0xF414},
+ {MISENSOR_16BIT, 0xD1AC, 0x9116},
+ {MISENSOR_16BIT, 0xD1AE, 0x208C},
+ {MISENSOR_16BIT, 0xD1B0, 0x800A},
+ {MISENSOR_16BIT, 0xD1B2, 0xF410},
+ {MISENSOR_16BIT, 0xD1B4, 0x9117},
+ {MISENSOR_16BIT, 0xD1B6, 0x208C},
+ {MISENSOR_16BIT, 0xD1B8, 0x8807},
+ {MISENSOR_16BIT, 0xD1BA, 0xF40C},
+ {MISENSOR_16BIT, 0xD1BC, 0x9118},
+ {MISENSOR_16BIT, 0xD1BE, 0x2086},
+ {MISENSOR_16BIT, 0xD1C0, 0x0FF3},
+ {MISENSOR_16BIT, 0xD1C2, 0xB848},
+ {MISENSOR_16BIT, 0xD1C4, 0x080D},
+ {MISENSOR_16BIT, 0xD1C6, 0x0090},
+ {MISENSOR_16BIT, 0xD1C8, 0xFFD9},
+ {MISENSOR_16BIT, 0xD1CA, 0xE080},
+ {MISENSOR_16BIT, 0xD1CC, 0xD801},
+ {MISENSOR_16BIT, 0xD1CE, 0xF203},
+ {MISENSOR_16BIT, 0xD1D0, 0xD800},
+ {MISENSOR_16BIT, 0xD1D2, 0xF1DF},
+ {MISENSOR_16BIT, 0xD1D4, 0x9040},
+ {MISENSOR_16BIT, 0xD1D6, 0x71CF},
+ {MISENSOR_16BIT, 0xD1D8, 0xFFFF},
+ {MISENSOR_16BIT, 0xD1DA, 0xC5D4},
+ {MISENSOR_16BIT, 0xD1DC, 0xB15A},
+ {MISENSOR_16BIT, 0xD1DE, 0x9041},
+ {MISENSOR_16BIT, 0xD1E0, 0x73CF},
+ {MISENSOR_16BIT, 0xD1E2, 0xFFFF},
+ {MISENSOR_16BIT, 0xD1E4, 0xC7D0},
+ {MISENSOR_16BIT, 0xD1E6, 0xB140},
+ {MISENSOR_16BIT, 0xD1E8, 0x9042},
+ {MISENSOR_16BIT, 0xD1EA, 0xB141},
+ {MISENSOR_16BIT, 0xD1EC, 0x9043},
+ {MISENSOR_16BIT, 0xD1EE, 0xB142},
+ {MISENSOR_16BIT, 0xD1F0, 0x9044},
+ {MISENSOR_16BIT, 0xD1F2, 0xB143},
+ {MISENSOR_16BIT, 0xD1F4, 0x9045},
+ {MISENSOR_16BIT, 0xD1F6, 0xB147},
+ {MISENSOR_16BIT, 0xD1F8, 0x9046},
+ {MISENSOR_16BIT, 0xD1FA, 0xB148},
+ {MISENSOR_16BIT, 0xD1FC, 0x9047},
+ {MISENSOR_16BIT, 0xD1FE, 0xB14B},
+ {MISENSOR_16BIT, 0xD200, 0x9048},
+ {MISENSOR_16BIT, 0xD202, 0xB14C},
+ {MISENSOR_16BIT, 0xD204, 0x9049},
+ {MISENSOR_16BIT, 0xD206, 0x1958},
+ {MISENSOR_16BIT, 0xD208, 0x0084},
+ {MISENSOR_16BIT, 0xD20A, 0x904A},
+ {MISENSOR_16BIT, 0xD20C, 0x195A},
+ {MISENSOR_16BIT, 0xD20E, 0x0084},
+ {MISENSOR_16BIT, 0xD210, 0x8856},
+ {MISENSOR_16BIT, 0xD212, 0x1B36},
+ {MISENSOR_16BIT, 0xD214, 0x8082},
+ {MISENSOR_16BIT, 0xD216, 0x8857},
+ {MISENSOR_16BIT, 0xD218, 0x1B37},
+ {MISENSOR_16BIT, 0xD21A, 0x8082},
+ {MISENSOR_16BIT, 0xD21C, 0x904C},
+ {MISENSOR_16BIT, 0xD21E, 0x19A7},
+ {MISENSOR_16BIT, 0xD220, 0x009C},
+ {MISENSOR_16BIT, 0xD222, 0x881A},
+ {MISENSOR_16BIT, 0xD224, 0x7FE0},
+ {MISENSOR_16BIT, 0xD226, 0x1B54},
+ {MISENSOR_16BIT, 0xD228, 0x8002},
+ {MISENSOR_16BIT, 0xD22A, 0x78E0},
+ {MISENSOR_16BIT, 0xD22C, 0x71CF},
+ {MISENSOR_16BIT, 0xD22E, 0xFFFF},
+ {MISENSOR_16BIT, 0xD230, 0xC350},
+ {MISENSOR_16BIT, 0xD232, 0xD828},
+ {MISENSOR_16BIT, 0xD234, 0xA90B},
+ {MISENSOR_16BIT, 0xD236, 0x8100},
+ {MISENSOR_16BIT, 0xD238, 0x01C5},
+ {MISENSOR_16BIT, 0xD23A, 0x0320},
+ {MISENSOR_16BIT, 0xD23C, 0xD900},
+ {MISENSOR_16BIT, 0xD23E, 0x78E0},
+ {MISENSOR_16BIT, 0xD240, 0x220A},
+ {MISENSOR_16BIT, 0xD242, 0x1F80},
+ {MISENSOR_16BIT, 0xD244, 0xFFFF},
+ {MISENSOR_16BIT, 0xD246, 0xD4E0},
+ {MISENSOR_16BIT, 0xD248, 0xC0F1},
+ {MISENSOR_16BIT, 0xD24A, 0x0811},
+ {MISENSOR_16BIT, 0xD24C, 0x0051},
+ {MISENSOR_16BIT, 0xD24E, 0x2240},
+ {MISENSOR_16BIT, 0xD250, 0x1200},
+ {MISENSOR_16BIT, 0xD252, 0xFFE1},
+ {MISENSOR_16BIT, 0xD254, 0xD801},
+ {MISENSOR_16BIT, 0xD256, 0xF006},
+ {MISENSOR_16BIT, 0xD258, 0x2240},
+ {MISENSOR_16BIT, 0xD25A, 0x1900},
+ {MISENSOR_16BIT, 0xD25C, 0xFFDE},
+ {MISENSOR_16BIT, 0xD25E, 0xD802},
+ {MISENSOR_16BIT, 0xD260, 0x1A05},
+ {MISENSOR_16BIT, 0xD262, 0x1002},
+ {MISENSOR_16BIT, 0xD264, 0xFFF2},
+ {MISENSOR_16BIT, 0xD266, 0xF195},
+ {MISENSOR_16BIT, 0xD268, 0xC0F1},
+ {MISENSOR_16BIT, 0xD26A, 0x0E7E},
+ {MISENSOR_16BIT, 0xD26C, 0x05C0},
+ {MISENSOR_16BIT, 0xD26E, 0x75CF},
+ {MISENSOR_16BIT, 0xD270, 0xFFFF},
+ {MISENSOR_16BIT, 0xD272, 0xC84C},
+ {MISENSOR_16BIT, 0xD274, 0x9502},
+ {MISENSOR_16BIT, 0xD276, 0x77CF},
+ {MISENSOR_16BIT, 0xD278, 0xFFFF},
+ {MISENSOR_16BIT, 0xD27A, 0xC344},
+ {MISENSOR_16BIT, 0xD27C, 0x2044},
+ {MISENSOR_16BIT, 0xD27E, 0x008E},
+ {MISENSOR_16BIT, 0xD280, 0xB8A1},
+ {MISENSOR_16BIT, 0xD282, 0x0926},
+ {MISENSOR_16BIT, 0xD284, 0x03E0},
+ {MISENSOR_16BIT, 0xD286, 0xB502},
+ {MISENSOR_16BIT, 0xD288, 0x9502},
+ {MISENSOR_16BIT, 0xD28A, 0x952E},
+ {MISENSOR_16BIT, 0xD28C, 0x7E05},
+ {MISENSOR_16BIT, 0xD28E, 0xB5C2},
+ {MISENSOR_16BIT, 0xD290, 0x70CF},
+ {MISENSOR_16BIT, 0xD292, 0xFFFF},
+ {MISENSOR_16BIT, 0xD294, 0xC610},
+ {MISENSOR_16BIT, 0xD296, 0x099A},
+ {MISENSOR_16BIT, 0xD298, 0x04A0},
+ {MISENSOR_16BIT, 0xD29A, 0xB026},
+ {MISENSOR_16BIT, 0xD29C, 0x0E02},
+ {MISENSOR_16BIT, 0xD29E, 0x0560},
+ {MISENSOR_16BIT, 0xD2A0, 0xDE00},
+ {MISENSOR_16BIT, 0xD2A2, 0x0A12},
+ {MISENSOR_16BIT, 0xD2A4, 0x0320},
+ {MISENSOR_16BIT, 0xD2A6, 0xB7C4},
+ {MISENSOR_16BIT, 0xD2A8, 0x0B36},
+ {MISENSOR_16BIT, 0xD2AA, 0x03A0},
+ {MISENSOR_16BIT, 0xD2AC, 0x70C9},
+ {MISENSOR_16BIT, 0xD2AE, 0x9502},
+ {MISENSOR_16BIT, 0xD2B0, 0x7608},
+ {MISENSOR_16BIT, 0xD2B2, 0xB8A8},
+ {MISENSOR_16BIT, 0xD2B4, 0xB502},
+ {MISENSOR_16BIT, 0xD2B6, 0x70CF},
+ {MISENSOR_16BIT, 0xD2B8, 0x0000},
+ {MISENSOR_16BIT, 0xD2BA, 0x5536},
+ {MISENSOR_16BIT, 0xD2BC, 0x7860},
+ {MISENSOR_16BIT, 0xD2BE, 0x2686},
+ {MISENSOR_16BIT, 0xD2C0, 0x1FFB},
+ {MISENSOR_16BIT, 0xD2C2, 0x9502},
+ {MISENSOR_16BIT, 0xD2C4, 0x78C5},
+ {MISENSOR_16BIT, 0xD2C6, 0x0631},
+ {MISENSOR_16BIT, 0xD2C8, 0x05E0},
+ {MISENSOR_16BIT, 0xD2CA, 0xB502},
+ {MISENSOR_16BIT, 0xD2CC, 0x72CF},
+ {MISENSOR_16BIT, 0xD2CE, 0xFFFF},
+ {MISENSOR_16BIT, 0xD2D0, 0xC5D4},
+ {MISENSOR_16BIT, 0xD2D2, 0x923A},
+ {MISENSOR_16BIT, 0xD2D4, 0x73CF},
+ {MISENSOR_16BIT, 0xD2D6, 0xFFFF},
+ {MISENSOR_16BIT, 0xD2D8, 0xC7D0},
+ {MISENSOR_16BIT, 0xD2DA, 0xB020},
+ {MISENSOR_16BIT, 0xD2DC, 0x9220},
+ {MISENSOR_16BIT, 0xD2DE, 0xB021},
+ {MISENSOR_16BIT, 0xD2E0, 0x9221},
+ {MISENSOR_16BIT, 0xD2E2, 0xB022},
+ {MISENSOR_16BIT, 0xD2E4, 0x9222},
+ {MISENSOR_16BIT, 0xD2E6, 0xB023},
+ {MISENSOR_16BIT, 0xD2E8, 0x9223},
+ {MISENSOR_16BIT, 0xD2EA, 0xB024},
+ {MISENSOR_16BIT, 0xD2EC, 0x9227},
+ {MISENSOR_16BIT, 0xD2EE, 0xB025},
+ {MISENSOR_16BIT, 0xD2F0, 0x9228},
+ {MISENSOR_16BIT, 0xD2F2, 0xB026},
+ {MISENSOR_16BIT, 0xD2F4, 0x922B},
+ {MISENSOR_16BIT, 0xD2F6, 0xB027},
+ {MISENSOR_16BIT, 0xD2F8, 0x922C},
+ {MISENSOR_16BIT, 0xD2FA, 0xB028},
+ {MISENSOR_16BIT, 0xD2FC, 0x1258},
+ {MISENSOR_16BIT, 0xD2FE, 0x0101},
+ {MISENSOR_16BIT, 0xD300, 0xB029},
+ {MISENSOR_16BIT, 0xD302, 0x125A},
+ {MISENSOR_16BIT, 0xD304, 0x0101},
+ {MISENSOR_16BIT, 0xD306, 0xB02A},
+ {MISENSOR_16BIT, 0xD308, 0x1336},
+ {MISENSOR_16BIT, 0xD30A, 0x8081},
+ {MISENSOR_16BIT, 0xD30C, 0xA836},
+ {MISENSOR_16BIT, 0xD30E, 0x1337},
+ {MISENSOR_16BIT, 0xD310, 0x8081},
+ {MISENSOR_16BIT, 0xD312, 0xA837},
+ {MISENSOR_16BIT, 0xD314, 0x12A7},
+ {MISENSOR_16BIT, 0xD316, 0x0701},
+ {MISENSOR_16BIT, 0xD318, 0xB02C},
+ {MISENSOR_16BIT, 0xD31A, 0x1354},
+ {MISENSOR_16BIT, 0xD31C, 0x8081},
+ {MISENSOR_16BIT, 0xD31E, 0x7FE0},
+ {MISENSOR_16BIT, 0xD320, 0xA83A},
+ {MISENSOR_16BIT, 0xD322, 0x78E0},
+ {MISENSOR_16BIT, 0xD324, 0xC0F1},
+ {MISENSOR_16BIT, 0xD326, 0x0DC2},
+ {MISENSOR_16BIT, 0xD328, 0x05C0},
+ {MISENSOR_16BIT, 0xD32A, 0x7608},
+ {MISENSOR_16BIT, 0xD32C, 0x09BB},
+ {MISENSOR_16BIT, 0xD32E, 0x0010},
+ {MISENSOR_16BIT, 0xD330, 0x75CF},
+ {MISENSOR_16BIT, 0xD332, 0xFFFF},
+ {MISENSOR_16BIT, 0xD334, 0xD4E0},
+ {MISENSOR_16BIT, 0xD336, 0x8D21},
+ {MISENSOR_16BIT, 0xD338, 0x8D00},
+ {MISENSOR_16BIT, 0xD33A, 0x2153},
+ {MISENSOR_16BIT, 0xD33C, 0x0003},
+ {MISENSOR_16BIT, 0xD33E, 0xB8C0},
+ {MISENSOR_16BIT, 0xD340, 0x8D45},
+ {MISENSOR_16BIT, 0xD342, 0x0B23},
+ {MISENSOR_16BIT, 0xD344, 0x0000},
+ {MISENSOR_16BIT, 0xD346, 0xEA8F},
+ {MISENSOR_16BIT, 0xD348, 0x0915},
+ {MISENSOR_16BIT, 0xD34A, 0x001E},
+ {MISENSOR_16BIT, 0xD34C, 0xFF81},
+ {MISENSOR_16BIT, 0xD34E, 0xE808},
+ {MISENSOR_16BIT, 0xD350, 0x2540},
+ {MISENSOR_16BIT, 0xD352, 0x1900},
+ {MISENSOR_16BIT, 0xD354, 0xFFDE},
+ {MISENSOR_16BIT, 0xD356, 0x8D00},
+ {MISENSOR_16BIT, 0xD358, 0xB880},
+ {MISENSOR_16BIT, 0xD35A, 0xF004},
+ {MISENSOR_16BIT, 0xD35C, 0x8D00},
+ {MISENSOR_16BIT, 0xD35E, 0xB8A0},
+ {MISENSOR_16BIT, 0xD360, 0xAD00},
+ {MISENSOR_16BIT, 0xD362, 0x8D05},
+ {MISENSOR_16BIT, 0xD364, 0xE081},
+ {MISENSOR_16BIT, 0xD366, 0x20CC},
+ {MISENSOR_16BIT, 0xD368, 0x80A2},
+ {MISENSOR_16BIT, 0xD36A, 0xDF00},
+ {MISENSOR_16BIT, 0xD36C, 0xF40A},
+ {MISENSOR_16BIT, 0xD36E, 0x71CF},
+ {MISENSOR_16BIT, 0xD370, 0xFFFF},
+ {MISENSOR_16BIT, 0xD372, 0xC84C},
+ {MISENSOR_16BIT, 0xD374, 0x9102},
+ {MISENSOR_16BIT, 0xD376, 0x7708},
+ {MISENSOR_16BIT, 0xD378, 0xB8A6},
+ {MISENSOR_16BIT, 0xD37A, 0x2786},
+ {MISENSOR_16BIT, 0xD37C, 0x1FFE},
+ {MISENSOR_16BIT, 0xD37E, 0xB102},
+ {MISENSOR_16BIT, 0xD380, 0x0B42},
+ {MISENSOR_16BIT, 0xD382, 0x0180},
+ {MISENSOR_16BIT, 0xD384, 0x0E3E},
+ {MISENSOR_16BIT, 0xD386, 0x0180},
+ {MISENSOR_16BIT, 0xD388, 0x0F4A},
+ {MISENSOR_16BIT, 0xD38A, 0x0160},
+ {MISENSOR_16BIT, 0xD38C, 0x70C9},
+ {MISENSOR_16BIT, 0xD38E, 0x8D05},
+ {MISENSOR_16BIT, 0xD390, 0xE081},
+ {MISENSOR_16BIT, 0xD392, 0x20CC},
+ {MISENSOR_16BIT, 0xD394, 0x80A2},
+ {MISENSOR_16BIT, 0xD396, 0xF429},
+ {MISENSOR_16BIT, 0xD398, 0x76CF},
+ {MISENSOR_16BIT, 0xD39A, 0xFFFF},
+ {MISENSOR_16BIT, 0xD39C, 0xC84C},
+ {MISENSOR_16BIT, 0xD39E, 0x082D},
+ {MISENSOR_16BIT, 0xD3A0, 0x0051},
+ {MISENSOR_16BIT, 0xD3A2, 0x70CF},
+ {MISENSOR_16BIT, 0xD3A4, 0xFFFF},
+ {MISENSOR_16BIT, 0xD3A6, 0xC90C},
+ {MISENSOR_16BIT, 0xD3A8, 0x8805},
+ {MISENSOR_16BIT, 0xD3AA, 0x09B6},
+ {MISENSOR_16BIT, 0xD3AC, 0x0360},
+ {MISENSOR_16BIT, 0xD3AE, 0xD908},
+ {MISENSOR_16BIT, 0xD3B0, 0x2099},
+ {MISENSOR_16BIT, 0xD3B2, 0x0802},
+ {MISENSOR_16BIT, 0xD3B4, 0x9634},
+ {MISENSOR_16BIT, 0xD3B6, 0xB503},
+ {MISENSOR_16BIT, 0xD3B8, 0x7902},
+ {MISENSOR_16BIT, 0xD3BA, 0x1523},
+ {MISENSOR_16BIT, 0xD3BC, 0x1080},
+ {MISENSOR_16BIT, 0xD3BE, 0xB634},
+ {MISENSOR_16BIT, 0xD3C0, 0xE001},
+ {MISENSOR_16BIT, 0xD3C2, 0x1D23},
+ {MISENSOR_16BIT, 0xD3C4, 0x1002},
+ {MISENSOR_16BIT, 0xD3C6, 0xF00B},
+ {MISENSOR_16BIT, 0xD3C8, 0x9634},
+ {MISENSOR_16BIT, 0xD3CA, 0x9503},
+ {MISENSOR_16BIT, 0xD3CC, 0x6038},
+ {MISENSOR_16BIT, 0xD3CE, 0xB614},
+ {MISENSOR_16BIT, 0xD3D0, 0x153F},
+ {MISENSOR_16BIT, 0xD3D2, 0x1080},
+ {MISENSOR_16BIT, 0xD3D4, 0xE001},
+ {MISENSOR_16BIT, 0xD3D6, 0x1D3F},
+ {MISENSOR_16BIT, 0xD3D8, 0x1002},
+ {MISENSOR_16BIT, 0xD3DA, 0xFFA4},
+ {MISENSOR_16BIT, 0xD3DC, 0x9602},
+ {MISENSOR_16BIT, 0xD3DE, 0x7F05},
+ {MISENSOR_16BIT, 0xD3E0, 0xD800},
+ {MISENSOR_16BIT, 0xD3E2, 0xB6E2},
+ {MISENSOR_16BIT, 0xD3E4, 0xAD05},
+ {MISENSOR_16BIT, 0xD3E6, 0x0511},
+ {MISENSOR_16BIT, 0xD3E8, 0x05E0},
+ {MISENSOR_16BIT, 0xD3EA, 0xD800},
+ {MISENSOR_16BIT, 0xD3EC, 0xC0F1},
+ {MISENSOR_16BIT, 0xD3EE, 0x0CFE},
+ {MISENSOR_16BIT, 0xD3F0, 0x05C0},
+ {MISENSOR_16BIT, 0xD3F2, 0x0A96},
+ {MISENSOR_16BIT, 0xD3F4, 0x05A0},
+ {MISENSOR_16BIT, 0xD3F6, 0x7608},
+ {MISENSOR_16BIT, 0xD3F8, 0x0C22},
+ {MISENSOR_16BIT, 0xD3FA, 0x0240},
+ {MISENSOR_16BIT, 0xD3FC, 0xE080},
+ {MISENSOR_16BIT, 0xD3FE, 0x20CA},
+ {MISENSOR_16BIT, 0xD400, 0x0F82},
+ {MISENSOR_16BIT, 0xD402, 0x0000},
+ {MISENSOR_16BIT, 0xD404, 0x190B},
+ {MISENSOR_16BIT, 0xD406, 0x0C60},
+ {MISENSOR_16BIT, 0xD408, 0x05A2},
+ {MISENSOR_16BIT, 0xD40A, 0x21CA},
+ {MISENSOR_16BIT, 0xD40C, 0x0022},
+ {MISENSOR_16BIT, 0xD40E, 0x0C56},
+ {MISENSOR_16BIT, 0xD410, 0x0240},
+ {MISENSOR_16BIT, 0xD412, 0xE806},
+ {MISENSOR_16BIT, 0xD414, 0x0E0E},
+ {MISENSOR_16BIT, 0xD416, 0x0220},
+ {MISENSOR_16BIT, 0xD418, 0x70C9},
+ {MISENSOR_16BIT, 0xD41A, 0xF048},
+ {MISENSOR_16BIT, 0xD41C, 0x0896},
+ {MISENSOR_16BIT, 0xD41E, 0x0440},
+ {MISENSOR_16BIT, 0xD420, 0x0E96},
+ {MISENSOR_16BIT, 0xD422, 0x0400},
+ {MISENSOR_16BIT, 0xD424, 0x0966},
+ {MISENSOR_16BIT, 0xD426, 0x0380},
+ {MISENSOR_16BIT, 0xD428, 0x75CF},
+ {MISENSOR_16BIT, 0xD42A, 0xFFFF},
+ {MISENSOR_16BIT, 0xD42C, 0xD4E0},
+ {MISENSOR_16BIT, 0xD42E, 0x8D00},
+ {MISENSOR_16BIT, 0xD430, 0x084D},
+ {MISENSOR_16BIT, 0xD432, 0x001E},
+ {MISENSOR_16BIT, 0xD434, 0xFF47},
+ {MISENSOR_16BIT, 0xD436, 0x080D},
+ {MISENSOR_16BIT, 0xD438, 0x0050},
+ {MISENSOR_16BIT, 0xD43A, 0xFF57},
+ {MISENSOR_16BIT, 0xD43C, 0x0841},
+ {MISENSOR_16BIT, 0xD43E, 0x0051},
+ {MISENSOR_16BIT, 0xD440, 0x8D04},
+ {MISENSOR_16BIT, 0xD442, 0x9521},
+ {MISENSOR_16BIT, 0xD444, 0xE064},
+ {MISENSOR_16BIT, 0xD446, 0x790C},
+ {MISENSOR_16BIT, 0xD448, 0x702F},
+ {MISENSOR_16BIT, 0xD44A, 0x0CE2},
+ {MISENSOR_16BIT, 0xD44C, 0x05E0},
+ {MISENSOR_16BIT, 0xD44E, 0xD964},
+ {MISENSOR_16BIT, 0xD450, 0x72CF},
+ {MISENSOR_16BIT, 0xD452, 0xFFFF},
+ {MISENSOR_16BIT, 0xD454, 0xC700},
+ {MISENSOR_16BIT, 0xD456, 0x9235},
+ {MISENSOR_16BIT, 0xD458, 0x0811},
+ {MISENSOR_16BIT, 0xD45A, 0x0043},
+ {MISENSOR_16BIT, 0xD45C, 0xFF3D},
+ {MISENSOR_16BIT, 0xD45E, 0x080D},
+ {MISENSOR_16BIT, 0xD460, 0x0051},
+ {MISENSOR_16BIT, 0xD462, 0xD801},
+ {MISENSOR_16BIT, 0xD464, 0xFF77},
+ {MISENSOR_16BIT, 0xD466, 0xF025},
+ {MISENSOR_16BIT, 0xD468, 0x9501},
+ {MISENSOR_16BIT, 0xD46A, 0x9235},
+ {MISENSOR_16BIT, 0xD46C, 0x0911},
+ {MISENSOR_16BIT, 0xD46E, 0x0003},
+ {MISENSOR_16BIT, 0xD470, 0xFF49},
+ {MISENSOR_16BIT, 0xD472, 0x080D},
+ {MISENSOR_16BIT, 0xD474, 0x0051},
+ {MISENSOR_16BIT, 0xD476, 0xD800},
+ {MISENSOR_16BIT, 0xD478, 0xFF72},
+ {MISENSOR_16BIT, 0xD47A, 0xF01B},
+ {MISENSOR_16BIT, 0xD47C, 0x0886},
+ {MISENSOR_16BIT, 0xD47E, 0x03E0},
+ {MISENSOR_16BIT, 0xD480, 0xD801},
+ {MISENSOR_16BIT, 0xD482, 0x0EF6},
+ {MISENSOR_16BIT, 0xD484, 0x03C0},
+ {MISENSOR_16BIT, 0xD486, 0x0F52},
+ {MISENSOR_16BIT, 0xD488, 0x0340},
+ {MISENSOR_16BIT, 0xD48A, 0x0DBA},
+ {MISENSOR_16BIT, 0xD48C, 0x0200},
+ {MISENSOR_16BIT, 0xD48E, 0x0AF6},
+ {MISENSOR_16BIT, 0xD490, 0x0440},
+ {MISENSOR_16BIT, 0xD492, 0x0C22},
+ {MISENSOR_16BIT, 0xD494, 0x0400},
+ {MISENSOR_16BIT, 0xD496, 0x0D72},
+ {MISENSOR_16BIT, 0xD498, 0x0440},
+ {MISENSOR_16BIT, 0xD49A, 0x0DC2},
+ {MISENSOR_16BIT, 0xD49C, 0x0200},
+ {MISENSOR_16BIT, 0xD49E, 0x0972},
+ {MISENSOR_16BIT, 0xD4A0, 0x0440},
+ {MISENSOR_16BIT, 0xD4A2, 0x0D3A},
+ {MISENSOR_16BIT, 0xD4A4, 0x0220},
+ {MISENSOR_16BIT, 0xD4A6, 0xD820},
+ {MISENSOR_16BIT, 0xD4A8, 0x0BFA},
+ {MISENSOR_16BIT, 0xD4AA, 0x0260},
+ {MISENSOR_16BIT, 0xD4AC, 0x70C9},
+ {MISENSOR_16BIT, 0xD4AE, 0x0451},
+ {MISENSOR_16BIT, 0xD4B0, 0x05C0},
+ {MISENSOR_16BIT, 0xD4B2, 0x78E0},
+ {MISENSOR_16BIT, 0xD4B4, 0xD900},
+ {MISENSOR_16BIT, 0xD4B6, 0xF00A},
+ {MISENSOR_16BIT, 0xD4B8, 0x70CF},
+ {MISENSOR_16BIT, 0xD4BA, 0xFFFF},
+ {MISENSOR_16BIT, 0xD4BC, 0xD520},
+ {MISENSOR_16BIT, 0xD4BE, 0x7835},
+ {MISENSOR_16BIT, 0xD4C0, 0x8041},
+ {MISENSOR_16BIT, 0xD4C2, 0x8000},
+ {MISENSOR_16BIT, 0xD4C4, 0xE102},
+ {MISENSOR_16BIT, 0xD4C6, 0xA040},
+ {MISENSOR_16BIT, 0xD4C8, 0x09F1},
+ {MISENSOR_16BIT, 0xD4CA, 0x8114},
+ {MISENSOR_16BIT, 0xD4CC, 0x71CF},
+ {MISENSOR_16BIT, 0xD4CE, 0xFFFF},
+ {MISENSOR_16BIT, 0xD4D0, 0xD4E0},
+ {MISENSOR_16BIT, 0xD4D2, 0x70CF},
+ {MISENSOR_16BIT, 0xD4D4, 0xFFFF},
+ {MISENSOR_16BIT, 0xD4D6, 0xC594},
+ {MISENSOR_16BIT, 0xD4D8, 0xB03A},
+ {MISENSOR_16BIT, 0xD4DA, 0x7FE0},
+ {MISENSOR_16BIT, 0xD4DC, 0xD800},
+ {MISENSOR_16BIT, 0xD4DE, 0x0000},
+ {MISENSOR_16BIT, 0xD4E0, 0x0000},
+ {MISENSOR_16BIT, 0xD4E2, 0x0500},
+ {MISENSOR_16BIT, 0xD4E4, 0x0500},
+ {MISENSOR_16BIT, 0xD4E6, 0x0200},
+ {MISENSOR_16BIT, 0xD4E8, 0x0330},
+ {MISENSOR_16BIT, 0xD4EA, 0x0000},
+ {MISENSOR_16BIT, 0xD4EC, 0x0000},
+ {MISENSOR_16BIT, 0xD4EE, 0x03CD},
+ {MISENSOR_16BIT, 0xD4F0, 0x050D},
+ {MISENSOR_16BIT, 0xD4F2, 0x01C5},
+ {MISENSOR_16BIT, 0xD4F4, 0x03B3},
+ {MISENSOR_16BIT, 0xD4F6, 0x00E0},
+ {MISENSOR_16BIT, 0xD4F8, 0x01E3},
+ {MISENSOR_16BIT, 0xD4FA, 0x0280},
+ {MISENSOR_16BIT, 0xD4FC, 0x01E0},
+ {MISENSOR_16BIT, 0xD4FE, 0x0109},
+ {MISENSOR_16BIT, 0xD500, 0x0080},
+ {MISENSOR_16BIT, 0xD502, 0x0500},
+ {MISENSOR_16BIT, 0xD504, 0x0000},
+ {MISENSOR_16BIT, 0xD506, 0x0000},
+ {MISENSOR_16BIT, 0xD508, 0x0000},
+ {MISENSOR_16BIT, 0xD50A, 0x0000},
+ {MISENSOR_16BIT, 0xD50C, 0x0000},
+ {MISENSOR_16BIT, 0xD50E, 0x0000},
+ {MISENSOR_16BIT, 0xD510, 0x0000},
+ {MISENSOR_16BIT, 0xD512, 0x0000},
+ {MISENSOR_16BIT, 0xD514, 0x0000},
+ {MISENSOR_16BIT, 0xD516, 0x0000},
+ {MISENSOR_16BIT, 0xD518, 0x0000},
+ {MISENSOR_16BIT, 0xD51A, 0x0000},
+ {MISENSOR_16BIT, 0xD51C, 0x0000},
+ {MISENSOR_16BIT, 0xD51E, 0x0000},
+ {MISENSOR_16BIT, 0xD520, 0xFFFF},
+ {MISENSOR_16BIT, 0xD522, 0xC9B4},
+ {MISENSOR_16BIT, 0xD524, 0xFFFF},
+ {MISENSOR_16BIT, 0xD526, 0xD324},
+ {MISENSOR_16BIT, 0xD528, 0xFFFF},
+ {MISENSOR_16BIT, 0xD52A, 0xCA34},
+ {MISENSOR_16BIT, 0xD52C, 0xFFFF},
+ {MISENSOR_16BIT, 0xD52E, 0xD3EC},
+ {MISENSOR_16BIT, 0x098E, 0x0000},
+ {MISENSOR_16BIT, 0xE000, 0x04B4},
+ {MISENSOR_16BIT, 0xE002, 0x0302},
+ {MISENSOR_16BIT, 0xE004, 0x4103},
+ {MISENSOR_16BIT, 0xE006, 0x0202},
+ {MISENSOR_16BIT, 0x0080, 0xFFF0},
+ {MISENSOR_16BIT, 0x0080, 0xFFF1},
+
+ /* PGA parameter and APGA
+ * [Step4-APGA] [TP101_MT9M114_APGA]
+ */
+ {MISENSOR_16BIT, 0x098E, 0x495E},
+ {MISENSOR_16BIT, 0xC95E, 0x0000},
+ {MISENSOR_16BIT, 0x3640, 0x02B0},
+ {MISENSOR_16BIT, 0x3642, 0x8063},
+ {MISENSOR_16BIT, 0x3644, 0x78D0},
+ {MISENSOR_16BIT, 0x3646, 0x50CC},
+ {MISENSOR_16BIT, 0x3648, 0x3511},
+ {MISENSOR_16BIT, 0x364A, 0x0110},
+ {MISENSOR_16BIT, 0x364C, 0xBD8A},
+ {MISENSOR_16BIT, 0x364E, 0x0CD1},
+ {MISENSOR_16BIT, 0x3650, 0x24ED},
+ {MISENSOR_16BIT, 0x3652, 0x7C11},
+ {MISENSOR_16BIT, 0x3654, 0x0150},
+ {MISENSOR_16BIT, 0x3656, 0x124C},
+ {MISENSOR_16BIT, 0x3658, 0x3130},
+ {MISENSOR_16BIT, 0x365A, 0x508C},
+ {MISENSOR_16BIT, 0x365C, 0x21F1},
+ {MISENSOR_16BIT, 0x365E, 0x0090},
+ {MISENSOR_16BIT, 0x3660, 0xBFCA},
+ {MISENSOR_16BIT, 0x3662, 0x0A11},
+ {MISENSOR_16BIT, 0x3664, 0x4F4B},
+ {MISENSOR_16BIT, 0x3666, 0x28B1},
+ {MISENSOR_16BIT, 0x3680, 0x50A9},
+ {MISENSOR_16BIT, 0x3682, 0xA04B},
+ {MISENSOR_16BIT, 0x3684, 0x0E2D},
+ {MISENSOR_16BIT, 0x3686, 0x73EC},
+ {MISENSOR_16BIT, 0x3688, 0x164F},
+ {MISENSOR_16BIT, 0x368A, 0xF829},
+ {MISENSOR_16BIT, 0x368C, 0xC1A8},
+ {MISENSOR_16BIT, 0x368E, 0xB0EC},
+ {MISENSOR_16BIT, 0x3690, 0xE76A},
+ {MISENSOR_16BIT, 0x3692, 0x69AF},
+ {MISENSOR_16BIT, 0x3694, 0x378C},
+ {MISENSOR_16BIT, 0x3696, 0xA70D},
+ {MISENSOR_16BIT, 0x3698, 0x884F},
+ {MISENSOR_16BIT, 0x369A, 0xEE8B},
+ {MISENSOR_16BIT, 0x369C, 0x5DEF},
+ {MISENSOR_16BIT, 0x369E, 0x27CC},
+ {MISENSOR_16BIT, 0x36A0, 0xCAAC},
+ {MISENSOR_16BIT, 0x36A2, 0x840E},
+ {MISENSOR_16BIT, 0x36A4, 0xDAA9},
+ {MISENSOR_16BIT, 0x36A6, 0xF00C},
+ {MISENSOR_16BIT, 0x36C0, 0x1371},
+ {MISENSOR_16BIT, 0x36C2, 0x272F},
+ {MISENSOR_16BIT, 0x36C4, 0x2293},
+ {MISENSOR_16BIT, 0x36C6, 0xE6D0},
+ {MISENSOR_16BIT, 0x36C8, 0xEC32},
+ {MISENSOR_16BIT, 0x36CA, 0x11B1},
+ {MISENSOR_16BIT, 0x36CC, 0x7BAF},
+ {MISENSOR_16BIT, 0x36CE, 0x5813},
+ {MISENSOR_16BIT, 0x36D0, 0xB871},
+ {MISENSOR_16BIT, 0x36D2, 0x8913},
+ {MISENSOR_16BIT, 0x36D4, 0x4610},
+ {MISENSOR_16BIT, 0x36D6, 0x7EEE},
+ {MISENSOR_16BIT, 0x36D8, 0x0DF3},
+ {MISENSOR_16BIT, 0x36DA, 0xB84F},
+ {MISENSOR_16BIT, 0x36DC, 0xB532},
+ {MISENSOR_16BIT, 0x36DE, 0x1171},
+ {MISENSOR_16BIT, 0x36E0, 0x13CF},
+ {MISENSOR_16BIT, 0x36E2, 0x22F3},
+ {MISENSOR_16BIT, 0x36E4, 0xE090},
+ {MISENSOR_16BIT, 0x36E6, 0x8133},
+ {MISENSOR_16BIT, 0x3700, 0x88AE},
+ {MISENSOR_16BIT, 0x3702, 0x00EA},
+ {MISENSOR_16BIT, 0x3704, 0x344F},
+ {MISENSOR_16BIT, 0x3706, 0xEC88},
+ {MISENSOR_16BIT, 0x3708, 0x3E91},
+ {MISENSOR_16BIT, 0x370A, 0xF12D},
+ {MISENSOR_16BIT, 0x370C, 0xB0EF},
+ {MISENSOR_16BIT, 0x370E, 0x77CD},
+ {MISENSOR_16BIT, 0x3710, 0x7930},
+ {MISENSOR_16BIT, 0x3712, 0x5C12},
+ {MISENSOR_16BIT, 0x3714, 0x500C},
+ {MISENSOR_16BIT, 0x3716, 0x22CE},
+ {MISENSOR_16BIT, 0x3718, 0x2370},
+ {MISENSOR_16BIT, 0x371A, 0x258F},
+ {MISENSOR_16BIT, 0x371C, 0x3D30},
+ {MISENSOR_16BIT, 0x371E, 0x370C},
+ {MISENSOR_16BIT, 0x3720, 0x03ED},
+ {MISENSOR_16BIT, 0x3722, 0x9AD0},
+ {MISENSOR_16BIT, 0x3724, 0x7ECF},
+ {MISENSOR_16BIT, 0x3726, 0x1093},
+ {MISENSOR_16BIT, 0x3740, 0x2391},
+ {MISENSOR_16BIT, 0x3742, 0xAAD0},
+ {MISENSOR_16BIT, 0x3744, 0x28F2},
+ {MISENSOR_16BIT, 0x3746, 0xBA4F},
+ {MISENSOR_16BIT, 0x3748, 0xC536},
+ {MISENSOR_16BIT, 0x374A, 0x1472},
+ {MISENSOR_16BIT, 0x374C, 0xD110},
+ {MISENSOR_16BIT, 0x374E, 0x2933},
+ {MISENSOR_16BIT, 0x3750, 0xD0D1},
+ {MISENSOR_16BIT, 0x3752, 0x9F37},
+ {MISENSOR_16BIT, 0x3754, 0x34D1},
+ {MISENSOR_16BIT, 0x3756, 0x1C6C},
+ {MISENSOR_16BIT, 0x3758, 0x3FD2},
+ {MISENSOR_16BIT, 0x375A, 0xCB72},
+ {MISENSOR_16BIT, 0x375C, 0xBA96},
+ {MISENSOR_16BIT, 0x375E, 0x1551},
+ {MISENSOR_16BIT, 0x3760, 0xB74F},
+ {MISENSOR_16BIT, 0x3762, 0x1672},
+ {MISENSOR_16BIT, 0x3764, 0x84F1},
+ {MISENSOR_16BIT, 0x3766, 0xC2D6},
+ {MISENSOR_16BIT, 0x3782, 0x01E0},
+ {MISENSOR_16BIT, 0x3784, 0x0280},
+ {MISENSOR_16BIT, 0x37C0, 0xA6EA},
+ {MISENSOR_16BIT, 0x37C2, 0x874B},
+ {MISENSOR_16BIT, 0x37C4, 0x85CB},
+ {MISENSOR_16BIT, 0x37C6, 0x968A},
+ {MISENSOR_16BIT, 0x098E, 0x0000},
+ {MISENSOR_16BIT, 0xC960, 0x0AF0},
+ {MISENSOR_16BIT, 0xC962, 0x79E2},
+ {MISENSOR_16BIT, 0xC964, 0x5EC8},
+ {MISENSOR_16BIT, 0xC966, 0x791F},
+ {MISENSOR_16BIT, 0xC968, 0x76EE},
+ {MISENSOR_16BIT, 0xC96A, 0x0FA0},
+ {MISENSOR_16BIT, 0xC96C, 0x7DFA},
+ {MISENSOR_16BIT, 0xC96E, 0x7DAF},
+ {MISENSOR_16BIT, 0xC970, 0x7E02},
+ {MISENSOR_16BIT, 0xC972, 0x7E0A},
+ {MISENSOR_16BIT, 0xC974, 0x1964},
+ {MISENSOR_16BIT, 0xC976, 0x7CDC},
+ {MISENSOR_16BIT, 0xC978, 0x7838},
+ {MISENSOR_16BIT, 0xC97A, 0x7C2F},
+ {MISENSOR_16BIT, 0xC97C, 0x7792},
+ {MISENSOR_16BIT, 0xC95E, 0x0003},
+
+ /* [Step4-APGA] */
+ {MISENSOR_16BIT, 0x098E, 0x0000},
+ {MISENSOR_16BIT, 0xC95E, 0x0003},
+
+ /* [Step5-AWB_CCM]1: LOAD=CCM */
+ {MISENSOR_16BIT, 0xC892, 0x0267},
+ {MISENSOR_16BIT, 0xC894, 0xFF1A},
+ {MISENSOR_16BIT, 0xC896, 0xFFB3},
+ {MISENSOR_16BIT, 0xC898, 0xFF80},
+ {MISENSOR_16BIT, 0xC89A, 0x0166},
+ {MISENSOR_16BIT, 0xC89C, 0x0003},
+ {MISENSOR_16BIT, 0xC89E, 0xFF9A},
+ {MISENSOR_16BIT, 0xC8A0, 0xFEB4},
+ {MISENSOR_16BIT, 0xC8A2, 0x024D},
+ {MISENSOR_16BIT, 0xC8A4, 0x01BF},
+ {MISENSOR_16BIT, 0xC8A6, 0xFF01},
+ {MISENSOR_16BIT, 0xC8A8, 0xFFF3},
+ {MISENSOR_16BIT, 0xC8AA, 0xFF75},
+ {MISENSOR_16BIT, 0xC8AC, 0x0198},
+ {MISENSOR_16BIT, 0xC8AE, 0xFFFD},
+ {MISENSOR_16BIT, 0xC8B0, 0xFF9A},
+ {MISENSOR_16BIT, 0xC8B2, 0xFEE7},
+ {MISENSOR_16BIT, 0xC8B4, 0x02A8},
+ {MISENSOR_16BIT, 0xC8B6, 0x01D9},
+ {MISENSOR_16BIT, 0xC8B8, 0xFF26},
+ {MISENSOR_16BIT, 0xC8BA, 0xFFF3},
+ {MISENSOR_16BIT, 0xC8BC, 0xFFB3},
+ {MISENSOR_16BIT, 0xC8BE, 0x0132},
+ {MISENSOR_16BIT, 0xC8C0, 0xFFE8},
+ {MISENSOR_16BIT, 0xC8C2, 0xFFDA},
+ {MISENSOR_16BIT, 0xC8C4, 0xFECD},
+ {MISENSOR_16BIT, 0xC8C6, 0x02C2},
+ {MISENSOR_16BIT, 0xC8C8, 0x0075},
+ {MISENSOR_16BIT, 0xC8CA, 0x011C},
+ {MISENSOR_16BIT, 0xC8CC, 0x009A},
+ {MISENSOR_16BIT, 0xC8CE, 0x0105},
+ {MISENSOR_16BIT, 0xC8D0, 0x00A4},
+ {MISENSOR_16BIT, 0xC8D2, 0x00AC},
+ {MISENSOR_16BIT, 0xC8D4, 0x0A8C},
+ {MISENSOR_16BIT, 0xC8D6, 0x0F0A},
+ {MISENSOR_16BIT, 0xC8D8, 0x1964},
+
+ /* LOAD=AWB */
+ {MISENSOR_16BIT, 0xC914, 0x0000},
+ {MISENSOR_16BIT, 0xC916, 0x0000},
+ {MISENSOR_16BIT, 0xC918, 0x04FF},
+ {MISENSOR_16BIT, 0xC91A, 0x02CF},
+ {MISENSOR_16BIT, 0xC904, 0x0033},
+ {MISENSOR_16BIT, 0xC906, 0x0040},
+ {MISENSOR_8BIT, 0xC8F2, 0x03},
+ {MISENSOR_8BIT, 0xC8F3, 0x02},
+ {MISENSOR_16BIT, 0xC906, 0x003C},
+ {MISENSOR_16BIT, 0xC8F4, 0x0000},
+ {MISENSOR_16BIT, 0xC8F6, 0x0000},
+ {MISENSOR_16BIT, 0xC8F8, 0x0000},
+ {MISENSOR_16BIT, 0xC8FA, 0xE724},
+ {MISENSOR_16BIT, 0xC8FC, 0x1583},
+ {MISENSOR_16BIT, 0xC8FE, 0x2045},
+ {MISENSOR_16BIT, 0xC900, 0x05DC},
+ {MISENSOR_16BIT, 0xC902, 0x007C},
+ {MISENSOR_8BIT, 0xC90C, 0x80},
+ {MISENSOR_8BIT, 0xC90D, 0x80},
+ {MISENSOR_8BIT, 0xC90E, 0x80},
+ {MISENSOR_8BIT, 0xC90F, 0x88},
+ {MISENSOR_8BIT, 0xC910, 0x80},
+ {MISENSOR_8BIT, 0xC911, 0x80},
+
+ /* LOAD=Step7-CPIPE_Preference */
+ {MISENSOR_16BIT, 0xC926, 0x0020},
+ {MISENSOR_16BIT, 0xC928, 0x009A},
+ {MISENSOR_16BIT, 0xC946, 0x0070},
+ {MISENSOR_16BIT, 0xC948, 0x00F3},
+ {MISENSOR_16BIT, 0xC952, 0x0020},
+ {MISENSOR_16BIT, 0xC954, 0x009A},
+ {MISENSOR_8BIT, 0xC92A, 0x80},
+ {MISENSOR_8BIT, 0xC92B, 0x4B},
+ {MISENSOR_8BIT, 0xC92C, 0x00},
+ {MISENSOR_8BIT, 0xC92D, 0xFF},
+ {MISENSOR_8BIT, 0xC92E, 0x3C},
+ {MISENSOR_8BIT, 0xC92F, 0x02},
+ {MISENSOR_8BIT, 0xC930, 0x06},
+ {MISENSOR_8BIT, 0xC931, 0x64},
+ {MISENSOR_8BIT, 0xC932, 0x01},
+ {MISENSOR_8BIT, 0xC933, 0x0C},
+ {MISENSOR_8BIT, 0xC934, 0x3C},
+ {MISENSOR_8BIT, 0xC935, 0x3C},
+ {MISENSOR_8BIT, 0xC936, 0x3C},
+ {MISENSOR_8BIT, 0xC937, 0x0F},
+ {MISENSOR_8BIT, 0xC938, 0x64},
+ {MISENSOR_8BIT, 0xC939, 0x64},
+ {MISENSOR_8BIT, 0xC93A, 0x64},
+ {MISENSOR_8BIT, 0xC93B, 0x32},
+ {MISENSOR_16BIT, 0xC93C, 0x0020},
+ {MISENSOR_16BIT, 0xC93E, 0x009A},
+ {MISENSOR_16BIT, 0xC940, 0x00DC},
+ {MISENSOR_8BIT, 0xC942, 0x38},
+ {MISENSOR_8BIT, 0xC943, 0x30},
+ {MISENSOR_8BIT, 0xC944, 0x50},
+ {MISENSOR_8BIT, 0xC945, 0x19},
+ {MISENSOR_16BIT, 0xC94A, 0x0230},
+ {MISENSOR_16BIT, 0xC94C, 0x0010},
+ {MISENSOR_16BIT, 0xC94E, 0x01CD},
+ {MISENSOR_8BIT, 0xC950, 0x05},
+ {MISENSOR_8BIT, 0xC951, 0x40},
+ {MISENSOR_8BIT, 0xC87B, 0x1B},
+ {MISENSOR_8BIT, 0xC878, 0x0E},
+ {MISENSOR_16BIT, 0xC890, 0x0080},
+ {MISENSOR_16BIT, 0xC886, 0x0100},
+ {MISENSOR_16BIT, 0xC87C, 0x005A},
+ {MISENSOR_8BIT, 0xB42A, 0x05},
+ {MISENSOR_8BIT, 0xA80A, 0x20},
+
+ /* Speed up AE/AWB */
+ {MISENSOR_16BIT, 0x098E, 0x2802},
+ {MISENSOR_16BIT, 0xA802, 0x0008},
+ {MISENSOR_8BIT, 0xC908, 0x01},
+ {MISENSOR_8BIT, 0xC879, 0x01},
+ {MISENSOR_8BIT, 0xC909, 0x02},
+ {MISENSOR_8BIT, 0xA80A, 0x18},
+ {MISENSOR_8BIT, 0xA80B, 0x18},
+ {MISENSOR_8BIT, 0xAC16, 0x18},
+ {MISENSOR_8BIT, 0xC878, 0x0E},
+
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
new file mode 100644
index 000000000000..034e1032f6c0
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -0,0 +1,845 @@
+/*
+ * Support for OmniVision OV2680 5M camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __OV2680_H__
+#define __OV2680_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define OV2680_FOCAL_LENGTH_NUM 334 /*3.34mm*/
+#define OV2680_FOCAL_LENGTH_DEM 100
+#define OV2680_F_NUMBER_DEFAULT_NUM 24
+#define OV2680_F_NUMBER_DEM 10
+
+#define OV2680_BIN_FACTOR_MAX 4
+
+#define MAX_FMTS 1
+
+/* sensor_mode_data read_mode adaptation */
+#define OV2680_READ_MODE_BINNING_ON 0x0400
+#define OV2680_READ_MODE_BINNING_OFF 0x00
+#define OV2680_INTEGRATION_TIME_MARGIN 8
+
+#define OV2680_MAX_EXPOSURE_VALUE 0xFFF1
+#define OV2680_MAX_GAIN_VALUE 0xFF
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2680_FOCAL_LENGTH_DEFAULT 0x1B70064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2680_F_NUMBER_DEFAULT 0x18000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define OV2680_F_NUMBER_RANGE 0x180a180a
+#define OV2680_ID 0x2680
+
+#define OV2680_FINE_INTG_TIME_MIN 0
+#define OV2680_FINE_INTG_TIME_MAX_MARGIN 0
+#define OV2680_COARSE_INTG_TIME_MIN 1
+#define OV2680_COARSE_INTG_TIME_MAX_MARGIN 6
+
+/*
+ * OV2680 System control registers
+ */
+#define OV2680_SW_SLEEP 0x0100
+#define OV2680_SW_RESET 0x0103
+#define OV2680_SW_STREAM 0x0100
+
+#define OV2680_SC_CMMN_CHIP_ID_H 0x300A
+#define OV2680_SC_CMMN_CHIP_ID_L 0x300B
+#define OV2680_SC_CMMN_SCCB_ID 0x302B /* 0x300C*/
+#define OV2680_SC_CMMN_SUB_ID 0x302A /* process, version*/
+
+#define OV2680_GROUP_ACCESS 0x3208 /*Bit[7:4] Group control, Bit[3:0] Group ID*/
+
+#define OV2680_EXPOSURE_H 0x3500 /*Bit[3:0] Bit[19:16] of exposure, remaining 16 bits lies in Reg0x3501&Reg0x3502*/
+#define OV2680_EXPOSURE_M 0x3501
+#define OV2680_EXPOSURE_L 0x3502
+#define OV2680_AGC_H 0x350A /*Bit[1:0] means Bit[9:8] of gain*/
+#define OV2680_AGC_L 0x350B /*Bit[7:0] of gain*/
+
+#define OV2680_HORIZONTAL_START_H 0x3800 /*Bit[11:8]*/
+#define OV2680_HORIZONTAL_START_L 0x3801 /*Bit[7:0]*/
+#define OV2680_VERTICAL_START_H 0x3802 /*Bit[11:8]*/
+#define OV2680_VERTICAL_START_L 0x3803 /*Bit[7:0]*/
+#define OV2680_HORIZONTAL_END_H 0x3804 /*Bit[11:8]*/
+#define OV2680_HORIZONTAL_END_L 0x3805 /*Bit[7:0]*/
+#define OV2680_VERTICAL_END_H 0x3806 /*Bit[11:8]*/
+#define OV2680_VERTICAL_END_L 0x3807 /*Bit[7:0]*/
+#define OV2680_HORIZONTAL_OUTPUT_SIZE_H 0x3808 /*Bit[3:0]*/
+#define OV2680_HORIZONTAL_OUTPUT_SIZE_L 0x3809 /*Bit[7:0]*/
+#define OV2680_VERTICAL_OUTPUT_SIZE_H 0x380a /*Bit[3:0]*/
+#define OV2680_VERTICAL_OUTPUT_SIZE_L 0x380b /*Bit[7:0]*/
+#define OV2680_TIMING_HTS_H 0x380C /*High 8-bit, and low 8-bit HTS address is 0x380d*/
+#define OV2680_TIMING_HTS_L 0x380D /*High 8-bit, and low 8-bit HTS address is 0x380d*/
+#define OV2680_TIMING_VTS_H 0x380e /*High 8-bit, and low 8-bit HTS address is 0x380f*/
+#define OV2680_TIMING_VTS_L 0x380f /*High 8-bit, and low 8-bit HTS address is 0x380f*/
+#define OV2680_FRAME_OFF_NUM 0x4202
+
+/*Flip/Mirror*/
+#define OV2680_FLIP_REG 0x3820
+#define OV2680_MIRROR_REG 0x3821
+#define OV2680_FLIP_BIT 1
+#define OV2680_MIRROR_BIT 2
+#define OV2680_FLIP_MIRROR_BIT_ENABLE 4
+
+#define OV2680_MWB_RED_GAIN_H 0x5004/*0x3400*/
+#define OV2680_MWB_GREEN_GAIN_H 0x5006/*0x3402*/
+#define OV2680_MWB_BLUE_GAIN_H 0x5008/*0x3404*/
+#define OV2680_MWB_GAIN_MAX 0x0fff
+
+#define OV2680_START_STREAMING 0x01
+#define OV2680_STOP_STREAMING 0x00
+
+#define OV2680_INVALID_CONFIG 0xffffffff
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct ov2680_resolution {
+ u8 *desc;
+ const struct ov2680_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+};
+
+struct ov2680_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct ov2680_reg *regs;
+};
+
+/*
+ * ov2680 device structure.
+ */
+struct ov2680_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct camera_sensor_platform_data *platform_data;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ int run_mode;
+ u8 res;
+ u8 type;
+};
+
+/**
+ * struct ov2680_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct ov2680_reg {
+ u16 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_ov2680_sensor(x) container_of(x, struct ov2680_device, sd)
+
+#define OV2680_MAX_WRITE_BUF_SIZE 30
+
+struct ov2680_write_buffer {
+ u16 addr;
+ u8 data[OV2680_MAX_WRITE_BUF_SIZE];
+};
+
+struct ov2680_write_ctrl {
+ int index;
+ struct ov2680_write_buffer buffer;
+};
+
+static struct ov2680_reg const ov2680_global_setting[] = {
+ {0x0103, 0x01},
+ {0x3002, 0x00},
+ {0x3016, 0x1c},
+ {0x3018, 0x44},
+ {0x3020, 0x00},
+ {0x3080, 0x02},
+ {0x3082, 0x45},
+ {0x3084, 0x09},
+ {0x3085, 0x04},
+ {0x3503, 0x03},
+ {0x350b, 0x36},
+ {0x3600, 0xb4},
+ {0x3603, 0x39},
+ {0x3604, 0x24},
+ {0x3605, 0x00},
+ {0x3620, 0x26},
+ {0x3621, 0x37},
+ {0x3622, 0x04},
+ {0x3628, 0x00},
+ {0x3705, 0x3c},
+ {0x370c, 0x50},
+ {0x370d, 0xc0},
+ {0x3718, 0x88},
+ {0x3720, 0x00},
+ {0x3721, 0x00},
+ {0x3722, 0x00},
+ {0x3723, 0x00},
+ {0x3738, 0x00},
+ {0x3717, 0x58},
+ {0x3781, 0x80},
+ {0x3789, 0x60},
+ {0x3800, 0x00},
+ {0x3819, 0x04},
+ {0x4000, 0x81},
+ {0x4001, 0x40},
+ {0x4602, 0x02},
+ {0x481f, 0x36},
+ {0x4825, 0x36},
+ {0x4837, 0x18},
+ {0x5002, 0x30},
+ {0x5004, 0x04},//manual awb 1x
+ {0x5005, 0x00},
+ {0x5006, 0x04},
+ {0x5007, 0x00},
+ {0x5008, 0x04},
+ {0x5009, 0x00},
+ {0x5080, 0x00},
+ {0x3701, 0x64}, //add on 14/05/13
+ {0x3784, 0x0c}, //based OV2680_R1A_AM10.ovt add on 14/06/13
+ {0x5780, 0x3e}, //based OV2680_R1A_AM10.ovt,Adjust DPC setting (57xx) on 14/06/13
+ {0x5781, 0x0f},
+ {0x5782, 0x04},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x00},
+ {0x578a, 0x01},
+ {0x578b, 0x02},
+ {0x578c, 0x03},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x00},
+ {0x5794, 0x03}, //based OV2680_R1A_AM10.ovt,Adjust DPC setting (57xx) on 14/06/13
+ {0x0100, 0x00}, //stream off
+
+ {}
+};
+
+#if 0 /* None of the definitions below are used currently */
+/*
+ * 176x144 30fps VBlanking 1lane 10Bit (binning)
+ */
+static struct ov2680_reg const ov2680_QCIF_30fps[] = {
+ {0x3086, 0x01},
+ {0x3501, 0x24},
+ {0x3502, 0x40},
+ {0x370a, 0x23},
+ {0x3801, 0xa0},
+ {0x3802, 0x00},
+ {0x3803, 0x78},
+ {0x3804, 0x05},
+ {0x3805, 0xaf},
+ {0x3806, 0x04},
+ {0x3807, 0x47},
+ {0x3808, 0x00},
+ {0x3809, 0xC0},
+ {0x380a, 0x00},
+ {0x380b, 0xa0},
+ {0x380c, 0x06},
+ {0x380d, 0xb0},
+ {0x380e, 0x02},
+ {0x380f, 0x84},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x04},
+ {0x3814, 0x31},
+ {0x3815, 0x31},
+ {0x4000, 0x81},
+ {0x4001, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x03},
+ {0x5081, 0x41},
+ {0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x10},
+ {0x5705, 0xa0},
+ {0x5706, 0x0c},
+ {0x5707, 0x78},
+ {0x3820, 0xc2},
+ {0x3821, 0x01},
+ // {0x5090, 0x0c},
+ {}
+};
+
+/*
+ * 352x288 30fps VBlanking 1lane 10Bit (binning)
+ */
+static struct ov2680_reg const ov2680_CIF_30fps[] = {
+ {0x3086, 0x01},
+ {0x3501, 0x24},
+ {0x3502, 0x40},
+ {0x370a, 0x23},
+ {0x3801, 0xa0},
+ {0x3802, 0x00},
+ {0x3803, 0x78},
+ {0x3804, 0x03},
+ {0x3805, 0x8f},
+ {0x3806, 0x02},
+ {0x3807, 0xe7},
+ {0x3808, 0x01},
+ {0x3809, 0x70},
+ {0x380a, 0x01},
+ {0x380b, 0x30},
+ {0x380c, 0x06},
+ {0x380d, 0xb0},
+ {0x380e, 0x02},
+ {0x380f, 0x84},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x04},
+ {0x3814, 0x31},
+ {0x3815, 0x31},
+ {0x4008, 0x00},
+ {0x4009, 0x03},
+ {0x5081, 0x41},
+ {0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x10},
+ {0x5705, 0xa0},
+ {0x5706, 0x0c},
+ {0x5707, 0x78},
+ {0x3820, 0xc2},
+ {0x3821, 0x01},
+ // {0x5090, 0x0c},
+ {}
+};
+
+/*
+ * 336x256 30fps VBlanking 1lane 10Bit (binning)
+ */
+static struct ov2680_reg const ov2680_QVGA_30fps[] = {
+ {0x3086, 0x01},
+ {0x3501, 0x24},
+ {0x3502, 0x40},
+ {0x370a, 0x23},
+ {0x3801, 0xa0},
+ {0x3802, 0x00},
+ {0x3803, 0x78},
+ {0x3804, 0x03},
+ {0x3805, 0x4f},
+ {0x3806, 0x02},
+ {0x3807, 0x87},
+ {0x3808, 0x01},
+ {0x3809, 0x50},
+ {0x380a, 0x01},
+ {0x380b, 0x00},
+ {0x380c, 0x06},
+ {0x380d, 0xb0},
+ {0x380e, 0x02},
+ {0x380f, 0x84},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x04},
+ {0x3814, 0x31},
+ {0x3815, 0x31},
+ {0x4008, 0x00},
+ {0x4009, 0x03},
+ {0x5081, 0x41},
+ {0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x10},
+ {0x5705, 0xa0},
+ {0x5706, 0x0c},
+ {0x5707, 0x78},
+ {0x3820, 0xc2},
+ {0x3821, 0x01},
+ // {0x5090, 0x0c},
+ {}
+};
+
+/*
+ * 656x496 30fps VBlanking 1lane 10Bit (binning)
+ */
+static struct ov2680_reg const ov2680_656x496_30fps[] = {
+ {0x3086, 0x01},
+ {0x3501, 0x24},
+ {0x3502, 0x40},
+ {0x370a, 0x23},
+ {0x3801, 0xa0},
+ {0x3802, 0x00},
+ {0x3803, 0x78},
+ {0x3804, 0x05},
+ {0x3805, 0xcf},
+ {0x3806, 0x04},
+ {0x3807, 0x67},
+ {0x3808, 0x02},
+ {0x3809, 0x90},
+ {0x380a, 0x01},
+ {0x380b, 0xf0},
+ {0x380c, 0x06},
+ {0x380d, 0xb0},
+ {0x380e, 0x02},
+ {0x380f, 0x84},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x04},
+ {0x3814, 0x31},
+ {0x3815, 0x31},
+ {0x4008, 0x00},
+ {0x4009, 0x03},
+ {0x5081, 0x41},
+ {0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x10},
+ {0x5705, 0xa0},
+ {0x5706, 0x0c},
+ {0x5707, 0x78},
+ {0x3820, 0xc2},
+ {0x3821, 0x01},
+ // {0x5090, 0x0c},
+ {}
+};
+/*
+* 800x600 30fps VBlanking 1lane 10Bit (binning)
+*/
+static struct ov2680_reg const ov2680_720x592_30fps[] = {
+ {0x3086, 0x01},
+ {0x3501, 0x26},
+ {0x3502, 0x40},
+ {0x370a, 0x23},
+ {0x3801, 0x00}, // X_ADDR_START;
+ {0x3802, 0x00},
+ {0x3803, 0x00}, // Y_ADDR_START;
+ {0x3804, 0x05},
+ {0x3805, 0xaf}, // X_ADDR_END;
+ {0x3806, 0x04},
+ {0x3807, 0xaf}, // Y_ADDR_END;
+ {0x3808, 0x02},
+ {0x3809, 0xd0}, // X_OUTPUT_SIZE;
+ {0x380a, 0x02},
+ {0x380b, 0x50}, // Y_OUTPUT_SIZE;
+ {0x380c, 0x06},
+ {0x380d, 0xac}, // HTS;
+ {0x380e, 0x02},
+ {0x380f, 0x84}, // VTS;
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x00},
+ {0x3814, 0x31},
+ {0x3815, 0x31},
+ {0x4008, 0x00},
+ {0x4009, 0x03},
+ {0x5708, 0x00},
+ {0x5704, 0x02},
+ {0x5705, 0xd0}, // X_WIN;
+ {0x5706, 0x02},
+ {0x5707, 0x50}, // Y_WIN;
+ {0x3820, 0xc2}, // FLIP_FORMAT;
+ {0x3821, 0x01}, // MIRROR_FORMAT;
+ {0x5090, 0x00}, // PRE ISP CTRL16, default value is 0x0C;
+ // BIT[3]: Mirror order, BG or GB;
+ // BIT[2]: Flip order, BR or RB;
+ {0x5081, 0x41},
+ {}
+};
+/*
+* 800x600 30fps VBlanking 1lane 10Bit (binning)
+*/
+static struct ov2680_reg const ov2680_800x600_30fps[] = {
+ {0x3086, 0x01},
+ {0x3501, 0x26},
+ {0x3502, 0x40},
+ {0x370a, 0x23},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x06},
+ {0x3805, 0x4f},
+ {0x3806, 0x04},
+ {0x3807, 0xbf},
+ {0x3808, 0x03},
+ {0x3809, 0x20},
+ {0x380a, 0x02},
+ {0x380b, 0x58},
+ {0x380c, 0x06},
+ {0x380d, 0xac},
+ {0x380e, 0x02},
+ {0x380f, 0x84},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x00},
+ {0x3814, 0x31},
+ {0x3815, 0x31},
+ {0x5708, 0x00},
+ {0x5704, 0x03},
+ {0x5705, 0x20},
+ {0x5706, 0x02},
+ {0x5707, 0x58},
+ {0x3820, 0xc2},
+ {0x3821, 0x01},
+ {0x5090, 0x00},
+ {0x4008, 0x00},
+ {0x4009, 0x03},
+ {0x5081, 0x41},
+ {}
+};
+
+/*
+ * 720p=1280*720 30fps VBlanking 1lane 10Bit (no-Scaling)
+ */
+static struct ov2680_reg const ov2680_720p_30fps[] = {
+ {0x3086, 0x00},
+ {0x3501, 0x48},
+ {0x3502, 0xe0},
+ {0x370a, 0x21},
+ {0x3801, 0xa0},
+ {0x3802, 0x00},
+ {0x3803, 0xf2},
+ {0x3804, 0x05},
+ {0x3805, 0xbf},
+ {0x3806, 0x03},
+ {0x3807, 0xdd},
+ {0x3808, 0x05},
+ {0x3809, 0x10},
+ {0x380a, 0x02},
+ {0x380b, 0xe0},
+ {0x380c, 0x06},
+ {0x380d, 0xa8},
+ {0x380e, 0x05},
+ {0x380f, 0x0e},
+ {0x3810, 0x00},
+ {0x3811, 0x08},
+ {0x3812, 0x00},
+ {0x3813, 0x06},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x4008, 0x02},
+ {0x4009, 0x09},
+ {0x5081, 0x41},
+ {0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x10},
+ {0x5705, 0xa0},
+ {0x5706, 0x0c},
+ {0x5707, 0x78},
+ {0x3820, 0xc0},
+ {0x3821, 0x00},
+ // {0x5090, 0x0c},
+ {}
+};
+
+/*
+ * 1296x976 30fps VBlanking 1lane 10Bit(no-scaling)
+ */
+static struct ov2680_reg const ov2680_1296x976_30fps[] = {
+ {0x3086, 0x00},
+ {0x3501, 0x48},
+ {0x3502, 0xe0},
+ {0x370a, 0x21},
+ {0x3801, 0xa0},
+ {0x3802, 0x00},
+ {0x3803, 0x78},
+ {0x3804, 0x05},
+ {0x3805, 0xbf},
+ {0x3806, 0x04},
+ {0x3807, 0x57},
+ {0x3808, 0x05},
+ {0x3809, 0x10},
+ {0x380a, 0x03},
+ {0x380b, 0xd0},
+ {0x380c, 0x06},
+ {0x380d, 0xa8},
+ {0x380e, 0x05},
+ {0x380f, 0x0e},
+ {0x3810, 0x00},
+ {0x3811, 0x08},
+ {0x3812, 0x00},
+ {0x3813, 0x08},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x4008, 0x02},
+ {0x4009, 0x09},
+ {0x5081, 0x41},
+ {0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x10},
+ {0x5705, 0xa0},
+ {0x5706, 0x0c},
+ {0x5707, 0x78},
+ {0x3820, 0xc0},
+ {0x3821, 0x00}, //miror/flip
+ // {0x5090, 0x0c},
+ {}
+};
+
+/*
+ * 1456*1096 30fps VBlanking 1lane 10bit(no-scaling)
+*/
+static struct ov2680_reg const ov2680_1456x1096_30fps[] = {
+ {0x3086, 0x00},
+ {0x3501, 0x48},
+ {0x3502, 0xe0},
+ {0x370a, 0x21},
+ {0x3801, 0x90},
+ {0x3802, 0x00},
+ {0x3803, 0x78},
+ {0x3804, 0x06},
+ {0x3805, 0x4f},
+ {0x3806, 0x04},
+ {0x3807, 0xC0},
+ {0x3808, 0x05},
+ {0x3809, 0xb0},
+ {0x380a, 0x04},
+ {0x380b, 0x48},
+ {0x380c, 0x06},
+ {0x380d, 0xa8},
+ {0x380e, 0x05},
+ {0x380f, 0x0e},
+ {0x3810, 0x00},
+ {0x3811, 0x08},
+ {0x3812, 0x00},
+ {0x3813, 0x00},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x4008, 0x02},
+ {0x4009, 0x09},
+ {0x5081, 0x41},
+ {0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x10},
+ {0x5705, 0xa0},
+ {0x5706, 0x0c},
+ {0x5707, 0x78},
+ {0x3820, 0xc0},
+ {0x3821, 0x00},
+ // {0x5090, 0x0c},
+ {}
+};
+#endif
+
+/*
+ *1616x916 30fps VBlanking 1lane 10bit
+ */
+
+static struct ov2680_reg const ov2680_1616x916_30fps[] = {
+ {0x3086, 0x00},
+ {0x3501, 0x48},
+ {0x3502, 0xe0},
+ {0x370a, 0x21},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x96},
+ {0x3804, 0x06},
+ {0x3805, 0x4f},
+ {0x3806, 0x04},
+ {0x3807, 0x39},
+ {0x3808, 0x06},
+ {0x3809, 0x50},
+ {0x380a, 0x03},
+ {0x380b, 0x94},
+ {0x380c, 0x06},
+ {0x380d, 0xa8},
+ {0x380e, 0x05},
+ {0x380f, 0x0e},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x08},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x4008, 0x02},
+ {0x4009, 0x09},
+ {0x5081, 0x41},
+ {0x5708, 0x01}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x06},
+ {0x5705, 0x50},
+ {0x5706, 0x03},
+ {0x5707, 0x94},
+ {0x3820, 0xc0},
+ {0x3821, 0x00},
+ // {0x5090, 0x0C},
+ {}
+};
+
+/*
+ * 1612x1212 30fps VBlanking 1lane 10Bit
+ */
+#if 0
+static struct ov2680_reg const ov2680_1616x1082_30fps[] = {
+ {0x3086, 0x00},
+ {0x3501, 0x48},
+ {0x3502, 0xe0},
+ {0x370a, 0x21},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x86},
+ {0x3804, 0x06},
+ {0x3805, 0x4f},
+ {0x3806, 0x04},
+ {0x3807, 0xbf},
+ {0x3808, 0x06},
+ {0x3809, 0x50},
+ {0x380a, 0x04},
+ {0x380b, 0x3a},
+ {0x380c, 0x06},
+ {0x380d, 0xa8},
+ {0x380e, 0x05},
+ {0x380f, 0x0e},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x00},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x5708, 0x01}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x06},
+ {0x5705, 0x50},
+ {0x5706, 0x04},
+ {0x5707, 0x3a},
+ {0x3820, 0xc0},
+ {0x3821, 0x00},
+ // {0x5090, 0x0C},
+ {0x4008, 0x02},
+ {0x4009, 0x09},
+ {0x5081, 0x41},
+ {}
+};
+#endif
+/*
+ * 1616x1216 30fps VBlanking 1lane 10Bit
+ */
+static struct ov2680_reg const ov2680_1616x1216_30fps[] = {
+ {0x3086, 0x00},
+ {0x3501, 0x48},
+ {0x3502, 0xe0},
+ {0x370a, 0x21},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x06},
+ {0x3805, 0x4f},
+ {0x3806, 0x04},
+ {0x3807, 0xbf},
+ {0x3808, 0x06},
+ {0x3809, 0x50},//50},//4line for mirror and flip
+ {0x380a, 0x04},
+ {0x380b, 0xc0},//c0},
+ {0x380c, 0x06},
+ {0x380d, 0xa8},
+ {0x380e, 0x05},
+ {0x380f, 0x0e},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x00},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x4008, 0x00},
+ {0x4009, 0x0b},
+ {0x5081, 0x01},
+ {0x5708, 0x01}, //add for full size flip off and mirror off 2014/09/11
+ {0x5704, 0x06},
+ {0x5705, 0x50},
+ {0x5706, 0x04},
+ {0x5707, 0xcc},
+ {0x3820, 0xc0},
+ {0x3821, 0x00},
+ // {0x5090, 0x0C},
+ {}
+};
+
+static struct ov2680_resolution ov2680_res_preview[] = {
+ {
+ .desc = "ov2680_1616x1216_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 66,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1616x1216_30fps,
+ },
+ {
+ .desc = "ov2680_1616x916_30fps",
+ .width = 1616,
+ .height = 916,
+ .fps = 30,
+ .pix_clk_freq = 66,
+ .used = 0,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1616x916_30fps,
+ },
+};
+
+#define N_RES_PREVIEW (ARRAY_SIZE(ov2680_res_preview))
+
+static struct ov2680_resolution *ov2680_res = ov2680_res_preview;
+static unsigned long N_RES = N_RES_PREVIEW;
+
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h
new file mode 100644
index 000000000000..1110d723968e
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov2722.h
@@ -0,0 +1,1272 @@
+/*
+ * Support for OmniVision OV2722 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __OV2722_H__
+#define __OV2722_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+#define OV2722_POWER_UP_RETRY_NUM 5
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define OV2722_FOCAL_LENGTH_NUM 278 /*2.78mm*/
+#define OV2722_FOCAL_LENGTH_DEM 100
+#define OV2722_F_NUMBER_DEFAULT_NUM 26
+#define OV2722_F_NUMBER_DEM 10
+
+#define MAX_FMTS 1
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2722_FOCAL_LENGTH_DEFAULT 0x1160064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2722_F_NUMBER_DEFAULT 0x1a000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define OV2722_F_NUMBER_RANGE 0x1a0a1a0a
+#define OV2720_ID 0x2720
+#define OV2722_ID 0x2722
+
+#define OV2722_FINE_INTG_TIME_MIN 0
+#define OV2722_FINE_INTG_TIME_MAX_MARGIN 0
+#define OV2722_COARSE_INTG_TIME_MIN 1
+#define OV2722_COARSE_INTG_TIME_MAX_MARGIN 4
+
+/*
+ * OV2722 System control registers
+ */
+#define OV2722_SW_SLEEP 0x0100
+#define OV2722_SW_RESET 0x0103
+#define OV2722_SW_STREAM 0x0100
+
+#define OV2722_SC_CMMN_CHIP_ID_H 0x300A
+#define OV2722_SC_CMMN_CHIP_ID_L 0x300B
+#define OV2722_SC_CMMN_SCCB_ID 0x300C
+#define OV2722_SC_CMMN_SUB_ID 0x302A /* process, version*/
+
+#define OV2722_SC_CMMN_PAD_OEN0 0x3000
+#define OV2722_SC_CMMN_PAD_OEN1 0x3001
+#define OV2722_SC_CMMN_PAD_OEN2 0x3002
+#define OV2722_SC_CMMN_PAD_OUT0 0x3008
+#define OV2722_SC_CMMN_PAD_OUT1 0x3009
+#define OV2722_SC_CMMN_PAD_OUT2 0x300D
+#define OV2722_SC_CMMN_PAD_SEL0 0x300E
+#define OV2722_SC_CMMN_PAD_SEL1 0x300F
+#define OV2722_SC_CMMN_PAD_SEL2 0x3010
+
+#define OV2722_SC_CMMN_PAD_PK 0x3011
+#define OV2722_SC_CMMN_A_PWC_PK_O_13 0x3013
+#define OV2722_SC_CMMN_A_PWC_PK_O_14 0x3014
+
+#define OV2722_SC_CMMN_CLKRST0 0x301A
+#define OV2722_SC_CMMN_CLKRST1 0x301B
+#define OV2722_SC_CMMN_CLKRST2 0x301C
+#define OV2722_SC_CMMN_CLKRST3 0x301D
+#define OV2722_SC_CMMN_CLKRST4 0x301E
+#define OV2722_SC_CMMN_CLKRST5 0x3005
+#define OV2722_SC_CMMN_PCLK_DIV_CTRL 0x3007
+#define OV2722_SC_CMMN_CLOCK_SEL 0x3020
+#define OV2722_SC_SOC_CLKRST5 0x3040
+
+#define OV2722_SC_CMMN_PLL_CTRL0 0x3034
+#define OV2722_SC_CMMN_PLL_CTRL1 0x3035
+#define OV2722_SC_CMMN_PLL_CTRL2 0x3039
+#define OV2722_SC_CMMN_PLL_CTRL3 0x3037
+#define OV2722_SC_CMMN_PLL_MULTIPLIER 0x3036
+#define OV2722_SC_CMMN_PLL_DEBUG_OPT 0x3038
+#define OV2722_SC_CMMN_PLLS_CTRL0 0x303A
+#define OV2722_SC_CMMN_PLLS_CTRL1 0x303B
+#define OV2722_SC_CMMN_PLLS_CTRL2 0x303C
+#define OV2722_SC_CMMN_PLLS_CTRL3 0x303D
+
+#define OV2722_SC_CMMN_MIPI_PHY_16 0x3016
+#define OV2722_SC_CMMN_MIPI_PHY_17 0x3017
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_18 0x3018
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_19 0x3019
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_21 0x3021
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_22 0x3022
+
+#define OV2722_AEC_PK_EXPO_H 0x3500
+#define OV2722_AEC_PK_EXPO_M 0x3501
+#define OV2722_AEC_PK_EXPO_L 0x3502
+#define OV2722_AEC_MANUAL_CTRL 0x3503
+#define OV2722_AGC_ADJ_H 0x3508
+#define OV2722_AGC_ADJ_L 0x3509
+#define OV2722_VTS_DIFF_H 0x350c
+#define OV2722_VTS_DIFF_L 0x350d
+#define OV2722_GROUP_ACCESS 0x3208
+#define OV2722_HTS_H 0x380c
+#define OV2722_HTS_L 0x380d
+#define OV2722_VTS_H 0x380e
+#define OV2722_VTS_L 0x380f
+
+#define OV2722_MWB_GAIN_R_H 0x5186
+#define OV2722_MWB_GAIN_R_L 0x5187
+#define OV2722_MWB_GAIN_G_H 0x5188
+#define OV2722_MWB_GAIN_G_L 0x5189
+#define OV2722_MWB_GAIN_B_H 0x518a
+#define OV2722_MWB_GAIN_B_L 0x518b
+
+#define OV2722_H_CROP_START_H 0x3800
+#define OV2722_H_CROP_START_L 0x3801
+#define OV2722_V_CROP_START_H 0x3802
+#define OV2722_V_CROP_START_L 0x3803
+#define OV2722_H_CROP_END_H 0x3804
+#define OV2722_H_CROP_END_L 0x3805
+#define OV2722_V_CROP_END_H 0x3806
+#define OV2722_V_CROP_END_L 0x3807
+#define OV2722_H_OUTSIZE_H 0x3808
+#define OV2722_H_OUTSIZE_L 0x3809
+#define OV2722_V_OUTSIZE_H 0x380a
+#define OV2722_V_OUTSIZE_L 0x380b
+
+#define OV2722_START_STREAMING 0x01
+#define OV2722_STOP_STREAMING 0x00
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct ov2722_resolution {
+ u8 *desc;
+ const struct ov2722_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+ int mipi_freq;
+};
+
+struct ov2722_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct ov2722_reg *regs;
+};
+
+/*
+ * ov2722 device structure.
+ */
+struct ov2722_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+
+ struct camera_sensor_platform_data *platform_data;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ int run_mode;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 res;
+ u8 type;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *link_freq;
+};
+
+enum ov2722_tok_type {
+ OV2722_8BIT = 0x0001,
+ OV2722_16BIT = 0x0002,
+ OV2722_32BIT = 0x0004,
+ OV2722_TOK_TERM = 0xf000, /* terminating token for reg list */
+ OV2722_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ OV2722_TOK_MASK = 0xfff0
+};
+
+/**
+ * struct ov2722_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct ov2722_reg {
+ enum ov2722_tok_type type;
+ u16 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_ov2722_sensor(x) container_of(x, struct ov2722_device, sd)
+
+#define OV2722_MAX_WRITE_BUF_SIZE 30
+
+struct ov2722_write_buffer {
+ u16 addr;
+ u8 data[OV2722_MAX_WRITE_BUF_SIZE];
+};
+
+struct ov2722_write_ctrl {
+ int index;
+ struct ov2722_write_buffer buffer;
+};
+
+/*
+ * Register settings for various resolution
+ */
+#if 0
+static struct ov2722_reg const ov2722_QVGA_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x0c},
+ {OV2722_8BIT, 0x373a, 0x1c},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x0c},
+ {OV2722_8BIT, 0x3705, 0x06},
+ {OV2722_8BIT, 0x3730, 0x0e},
+ {OV2722_8BIT, 0x3704, 0x1c},
+ {OV2722_8BIT, 0x3f06, 0x00},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0x46},
+ {OV2722_8BIT, 0x371e, 0x00},
+ {OV2722_8BIT, 0x371f, 0x63},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x42}, /* H crop start: 322 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0x95}, /* H crop end: 1685 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x27}, /* V crop end: 1063 */
+ {OV2722_8BIT, 0x3808, 0x01},
+ {OV2722_8BIT, 0x3809, 0x50}, /* H output size: 336 */
+ {OV2722_8BIT, 0x380a, 0x01},
+ {OV2722_8BIT, 0x380b, 0x00}, /* V output size: 256 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0xc0},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/
+ {OV2722_8BIT, 0x3814, 0x71},
+ {OV2722_8BIT, 0x3815, 0x71},
+ {OV2722_8BIT, 0x3612, 0x49},
+ {OV2722_8BIT, 0x3618, 0x00},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0xc3},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x77},
+ {OV2722_8BIT, 0x3a0d, 0x00},
+ {OV2722_8BIT, 0x3a0e, 0x00},
+ {OV2722_8BIT, 0x4520, 0x09},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xff},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3a00, 0x58},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+
+};
+
+static struct ov2722_reg const ov2722_480P_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x18},
+ {OV2722_8BIT, 0x373a, 0x3c},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x1d},
+ {OV2722_8BIT, 0x3705, 0x12},
+ {OV2722_8BIT, 0x3730, 0x1f},
+ {OV2722_8BIT, 0x3704, 0x3f},
+ {OV2722_8BIT, 0x3f06, 0x1d},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0x83},
+ {OV2722_8BIT, 0x371e, 0x00},
+ {OV2722_8BIT, 0x371f, 0xbd},
+ {OV2722_8BIT, 0x3708, 0x63},
+ {OV2722_8BIT, 0x3709, 0x52},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0xf2}, /* H crop start: 322 - 80 = 242*/
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32*/
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0xBB}, /* H crop end: 1643 + 80 = 1723*/
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x03}, /* V crop end: 1027*/
+ {OV2722_8BIT, 0x3808, 0x02},
+ {OV2722_8BIT, 0x3809, 0xE0}, /* H output size: 656 +80 = 736*/
+ {OV2722_8BIT, 0x380a, 0x01},
+ {OV2722_8BIT, 0x380b, 0xF0}, /* V output size: 496 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/
+ {OV2722_8BIT, 0x3814, 0x31},
+ {OV2722_8BIT, 0x3815, 0x31},
+ {OV2722_8BIT, 0x3612, 0x4b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x02},
+ {OV2722_8BIT, 0x3a09, 0x67},
+ {OV2722_8BIT, 0x3a0a, 0x02},
+ {OV2722_8BIT, 0x3a0b, 0x00},
+ {OV2722_8BIT, 0x3a0d, 0x00},
+ {OV2722_8BIT, 0x3a0e, 0x00},
+ {OV2722_8BIT, 0x4520, 0x0a},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xff},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3a00, 0x58},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+};
+
+static struct ov2722_reg const ov2722_VGA_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x18},
+ {OV2722_8BIT, 0x373a, 0x3c},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x1d},
+ {OV2722_8BIT, 0x3705, 0x12},
+ {OV2722_8BIT, 0x3730, 0x1f},
+ {OV2722_8BIT, 0x3704, 0x3f},
+ {OV2722_8BIT, 0x3f06, 0x1d},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0x83},
+ {OV2722_8BIT, 0x371e, 0x00},
+ {OV2722_8BIT, 0x371f, 0xbd},
+ {OV2722_8BIT, 0x3708, 0x63},
+ {OV2722_8BIT, 0x3709, 0x52},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x42}, /* H crop start: 322 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32*/
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0x6B}, /* H crop end: 1643*/
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x03}, /* V crop end: 1027*/
+ {OV2722_8BIT, 0x3808, 0x02},
+ {OV2722_8BIT, 0x3809, 0x90}, /* H output size: 656 */
+ {OV2722_8BIT, 0x380a, 0x01},
+ {OV2722_8BIT, 0x380b, 0xF0}, /* V output size: 496 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/
+ {OV2722_8BIT, 0x3814, 0x31},
+ {OV2722_8BIT, 0x3815, 0x31},
+ {OV2722_8BIT, 0x3612, 0x4b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x02},
+ {OV2722_8BIT, 0x3a09, 0x67},
+ {OV2722_8BIT, 0x3a0a, 0x02},
+ {OV2722_8BIT, 0x3a0b, 0x00},
+ {OV2722_8BIT, 0x3a0d, 0x00},
+ {OV2722_8BIT, 0x3a0e, 0x00},
+ {OV2722_8BIT, 0x4520, 0x0a},
+ {OV2722_8BIT, 0x4837, 0x29},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xff},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3a00, 0x58},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+};
+#endif
+
+static struct ov2722_reg const ov2722_1632_1092_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for
+ a whole frame complete.(vblank) */
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0x9E}, /* H crop start: 158 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */
+ {OV2722_8BIT, 0x3804, 0x07},
+ {OV2722_8BIT, 0x3805, 0x05}, /* H crop end: 1797 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */
+
+ {OV2722_8BIT, 0x3808, 0x06},
+ {OV2722_8BIT, 0x3809, 0x60}, /* H output size: 1632 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0xd4}, /* H timing: 2260 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xdc}, /* V timing: 1244 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x2c}, /* 422.4 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0}
+};
+
+static struct ov2722_reg const ov2722_1452_1092_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for
+ a whole frame complete.(vblank) */
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0xF8}, /* H crop start: 248 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0xab}, /* H crop end: 1707 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */
+ {OV2722_8BIT, 0x3808, 0x05},
+ {OV2722_8BIT, 0x3809, 0xac}, /* H output size: 1452 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0xd4}, /* H timing: 2260 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xdc}, /* V timing: 1244 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x2c}, /* 422.4 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0}
+};
+
+#if 0
+static struct ov2722_reg const ov2722_1M3_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x4a}, /* H crop start: 330 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x03}, /* V crop start: 3 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0xe1}, /* H crop end: 1761 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x47}, /* V crop end: 1095 */
+ {OV2722_8BIT, 0x3808, 0x05},
+ {OV2722_8BIT, 0x3809, 0x88}, /* H output size: 1416 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x0a}, /* V output size: 1034 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x05}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+};
+#endif
+
+static struct ov2722_reg const ov2722_1080p_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for a whole
+ frame complete.(vblank) */
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x2b},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x28},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0x08}, /* H crop start: 8 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */
+ {OV2722_8BIT, 0x3804, 0x07},
+ {OV2722_8BIT, 0x3805, 0x9b}, /* H crop end: 1947 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */
+ {OV2722_8BIT, 0x3808, 0x07},
+ {OV2722_8BIT, 0x3809, 0x8c}, /* H output size: 1932 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x14}, /* H timing: 2068 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0x5a}, /* V timing: 1114 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x4b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcd}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x24}, /* 345.6 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0}
+};
+
+#if 0 /* Currently unused */
+static struct ov2722_reg const ov2722_720p_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03},
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x40}, /* H crop start: 320 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0xb1}, /* V crop start: 177 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0x55}, /* H crop end: 1621 */
+ {OV2722_8BIT, 0x3806, 0x03},
+ {OV2722_8BIT, 0x3807, 0x95}, /* V crop end: 918 */
+ {OV2722_8BIT, 0x3808, 0x05},
+ {OV2722_8BIT, 0x3809, 0x10}, /* H output size: 0x0788==1928 */
+ {OV2722_8BIT, 0x380a, 0x02},
+ {OV2722_8BIT, 0x380b, 0xe0}, /* output size: 0x02DE==734 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H timing: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa3}, /* V timing: 1187 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26}, /* {0x3036, 0x2c}, //422.4 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0},
+};
+#endif
+
+static struct ov2722_resolution ov2722_res_preview[] = {
+ {
+ .desc = "ov2722_1632_1092_30fps",
+ .width = 1632,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1632_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1452_1092_30fps",
+ .width = 1452,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1452_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1080P_30fps",
+ .width = 1932,
+ .height = 1092,
+ .pix_clk_freq = 69,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2068,
+ .lines_per_frame = 1114,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1080p_30fps,
+ .mipi_freq = 345600,
+ },
+};
+
+#define N_RES_PREVIEW (ARRAY_SIZE(ov2722_res_preview))
+
+/*
+ * Disable non-preview configurations until the configuration selection is
+ * improved.
+ */
+#if 0
+struct ov2722_resolution ov2722_res_still[] = {
+ {
+ .desc = "ov2722_480P_30fps",
+ .width = 1632,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1632_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1452_1092_30fps",
+ .width = 1452,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1452_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1080P_30fps",
+ .width = 1932,
+ .height = 1092,
+ .pix_clk_freq = 69,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2068,
+ .lines_per_frame = 1114,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1080p_30fps,
+ .mipi_freq = 345600,
+ },
+};
+
+#define N_RES_STILL (ARRAY_SIZE(ov2722_res_still))
+
+struct ov2722_resolution ov2722_res_video[] = {
+ {
+ .desc = "ov2722_QVGA_30fps",
+ .width = 336,
+ .height = 256,
+ .fps = 30,
+ .pix_clk_freq = 73,
+ .used = 0,
+ .pixels_per_line = 2048,
+ .lines_per_frame = 1184,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_QVGA_30fps,
+ .mipi_freq = 364800,
+ },
+ {
+ .desc = "ov2722_480P_30fps",
+ .width = 736,
+ .height = 496,
+ .fps = 30,
+ .pix_clk_freq = 73,
+ .used = 0,
+ .pixels_per_line = 2048,
+ .lines_per_frame = 1184,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_480P_30fps,
+ },
+ {
+ .desc = "ov2722_1080P_30fps",
+ .width = 1932,
+ .height = 1092,
+ .pix_clk_freq = 69,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2068,
+ .lines_per_frame = 1114,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1080p_30fps,
+ .mipi_freq = 345600,
+ },
+};
+
+#define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video))
+#endif
+
+static struct ov2722_resolution *ov2722_res = ov2722_res_preview;
+static unsigned long N_RES = N_RES_PREVIEW;
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
new file mode 100644
index 000000000000..3f527f2047a7
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_ATOMISP_OV5693
+ tristate "Omnivision ov5693 sensor support"
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Micron
+ ov5693 5 Mpixel camera.
+
+ ov5693 is video camera sensor.
+
+ It currently only works with the atomisp driver.
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
new file mode 100644
index 000000000000..3275f2be229e
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += atomisp-ov5693.o
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
new file mode 100644
index 000000000000..c97ab24c5d2b
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
@@ -0,0 +1,62 @@
+/*
+ * Support for AD5823 VCM.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __AD5823_H__
+#define __AD5823_H__
+
+#include <linux/types.h>
+
+#define AD5823_VCM_ADDR 0x0c
+
+#define AD5823_REG_RESET 0x01
+#define AD5823_REG_MODE 0x02
+#define AD5823_REG_VCM_MOVE_TIME 0x03
+#define AD5823_REG_VCM_CODE_MSB 0x04
+#define AD5823_REG_VCM_CODE_LSB 0x05
+#define AD5823_REG_VCM_THRESHOLD_MSB 0x06
+#define AD5823_REG_VCM_THRESHOLD_LSB 0x07
+
+#define AD5823_REG_LENGTH 0x1
+
+#define AD5823_RING_CTRL_ENABLE 0x04
+#define AD5823_RING_CTRL_DISABLE 0x00
+
+#define AD5823_RESONANCE_PERIOD 100000
+#define AD5823_RESONANCE_COEF 512
+#define AD5823_HIGH_FREQ_RANGE 0x80
+
+#define VCM_CODE_MSB_MASK 0xfc
+#define AD5823_INIT_FOCUS_POS 350
+
+enum ad5823_tok_type {
+ AD5823_8BIT = 0x1,
+ AD5823_16BIT = 0x2,
+};
+
+enum ad5823_vcm_mode {
+ AD5823_ARC_RES0 = 0x0, /* Actuator response control RES1 */
+ AD5823_ARC_RES1 = 0x1, /* Actuator response control RES0.5 */
+ AD5823_ARC_RES2 = 0x2, /* Actuator response control RES2 */
+ AD5823_ESRC = 0x3, /* Enhanced slew rate control */
+ AD5823_DIRECT = 0x4, /* Direct control */
+};
+
+#define AD5823_INVALID_CONFIG 0xffffffff
+#define AD5823_MAX_FOCUS_POS 1023
+#define DELAY_PER_STEP_NS 1000000
+#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
new file mode 100644
index 000000000000..2be0ef14d53e
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -0,0 +1,2006 @@
+/*
+ * Support for OmniVision OV5693 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include "../../include/linux/atomisp_gmin_platform.h"
+
+#include "ov5693.h"
+#include "ad5823.h"
+
+#define __cci_delay(t) \
+ do { \
+ if ((t) < 10) { \
+ usleep_range((t) * 1000, ((t) + 1) * 1000); \
+ } else { \
+ msleep((t)); \
+ } \
+ } while (0)
+
+/* Value 30ms reached through experimentation on byt ecs.
+ * The DS specifies a much lower value but when using a smaller value
+ * the I2C bus sometimes locks up permanently when starting the camera.
+ * This issue could not be reproduced on cht, so we can reduce the
+ * delay value to a lower value when insmod.
+ */
+static uint up_delay = 30;
+module_param(up_delay, uint, 0644);
+MODULE_PARM_DESC(up_delay,
+ "Delay prior to the first CCI transaction for ov5693");
+
+static int vcm_ad_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
+{
+ int err;
+ struct i2c_msg msg;
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = val;
+
+ msg.addr = VCM_ADDR;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[0];
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err != 1) {
+ dev_err(&client->dev, "%s: vcm i2c fail, err code = %d\n",
+ __func__, err);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int ad5823_i2c_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = val;
+ msg.addr = AD5823_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = 0x02;
+ msg.buf = &buf[0];
+
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static int ad5823_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = 0;
+
+ msg[0].addr = AD5823_VCM_ADDR;
+ msg[0].flags = 0;
+ msg[0].len = 0x01;
+ msg[0].buf = &buf[0];
+
+ msg[1].addr = 0x0c;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 0x01;
+ msg[1].buf = &buf[1];
+ *val = 0;
+ if (i2c_transfer(client->adapter, msg, 2) != 2)
+ return -EIO;
+ *val = buf[1];
+ return 0;
+}
+
+static const u32 ov5693_embedded_effective_size = 28;
+
+/* i2c read/write stuff */
+static int ov5693_read_reg(struct i2c_client *client,
+ u16 data_length, u16 reg, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[6];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != OV5693_8BIT && data_length != OV5693_16BIT
+ && data_length != OV5693_32BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0, sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg >> 8);
+ data[1] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == OV5693_8BIT)
+ *val = (u8)data[0];
+ else if (data_length == OV5693_16BIT)
+ *val = be16_to_cpu(*(__be16 *)&data[0]);
+ else
+ *val = be32_to_cpu(*(__be32 *)&data[0]);
+
+ return 0;
+}
+
+static int ov5693_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int vcm_dw_i2c_write(struct i2c_client *client, u16 data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+ __be16 val;
+
+ val = cpu_to_be16(data);
+ msg.addr = VCM_ADDR;
+ msg.flags = 0;
+ msg.len = OV5693_16BIT;
+ msg.buf = (void *)&val;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+/*
+ * Theory: per datasheet, the two VCMs both allow for a 2-byte read.
+ * The DW9714 doesn't actually specify what this does (it has a
+ * two-byte write-only protocol, but specifies the read sequence as
+ * legal), but it returns the same data (zeroes) always, after an
+ * undocumented initial NAK. The AD5823 has a one-byte address
+ * register to which all writes go, and subsequent reads will cycle
+ * through the 8 bytes of registers. Notably, the default values (the
+ * device is always power-cycled affirmatively, so we can rely on
+ * these) in AD5823 are not pairwise repetitions of the same 16 bit
+ * word. So all we have to do is sequentially read two bytes at a
+ * time and see if we detect a difference in any of the first four
+ * pairs.
+ */
+static int vcm_detect(struct i2c_client *client)
+{
+ int i, ret;
+ struct i2c_msg msg;
+ u16 data0 = 0, data;
+
+ for (i = 0; i < 4; i++) {
+ msg.addr = VCM_ADDR;
+ msg.flags = I2C_M_RD;
+ msg.len = sizeof(data);
+ msg.buf = (u8 *)&data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ /*
+ * DW9714 always fails the first read and returns
+ * zeroes for subsequent ones
+ */
+ if (i == 0 && ret == -EREMOTEIO) {
+ data0 = 0;
+ continue;
+ }
+
+ if (i == 0)
+ data0 = data;
+
+ if (data != data0)
+ return VCM_AD5823;
+ }
+ return ret == 1 ? VCM_DW9714 : ret;
+}
+
+static int ov5693_write_reg(struct i2c_client *client, u16 data_length,
+ u16 reg, u16 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ __be16 *wreg = (void *)data;
+ const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
+
+ if (data_length != OV5693_8BIT && data_length != OV5693_16BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == OV5693_8BIT) {
+ data[2] = (u8)(val);
+ } else {
+ /* OV5693_16BIT */
+ __be16 *wdata = (void *)&data[2];
+
+ *wdata = cpu_to_be16(val);
+ }
+
+ ret = ov5693_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * ov5693_write_reg_array - Initializes a list of OV5693 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __ov5693_flush_reg_array, __ov5693_buf_reg_array() and
+ * __ov5693_write_reg_is_consecutive() are internal functions to
+ * ov5693_write_reg_array_fast() and should be not used anywhere else.
+ *
+ */
+
+static int __ov5693_flush_reg_array(struct i2c_client *client,
+ struct ov5693_write_ctrl *ctrl)
+{
+ u16 size;
+ __be16 *reg = (void *)&ctrl->buffer.addr;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
+
+ *reg = cpu_to_be16(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return ov5693_i2c_write(client, size, (u8 *)reg);
+}
+
+static int __ov5693_buf_reg_array(struct i2c_client *client,
+ struct ov5693_write_ctrl *ctrl,
+ const struct ov5693_reg *next)
+{
+ int size;
+ __be16 *data16;
+
+ switch (next->type) {
+ case OV5693_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case OV5693_16BIT:
+ size = 2;
+
+ data16 = (void *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u16) >= OV5693_MAX_WRITE_BUF_SIZE)
+ return __ov5693_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __ov5693_write_reg_is_consecutive(struct i2c_client *client,
+ struct ov5693_write_ctrl *ctrl,
+ const struct ov5693_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int ov5693_write_reg_array(struct i2c_client *client,
+ const struct ov5693_reg *reglist)
+{
+ const struct ov5693_reg *next = reglist;
+ struct ov5693_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != OV5693_TOK_TERM; next++) {
+ switch (next->type & OV5693_TOK_MASK) {
+ case OV5693_TOK_DELAY:
+ err = __ov5693_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__ov5693_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __ov5693_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __ov5693_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __ov5693_flush_reg_array(client, &ctrl);
+}
+
+static int ov5693_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV5693_FOCAL_LENGTH_NUM << 16) | OV5693_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int ov5693_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for imx*/
+ *val = (OV5693_F_NUMBER_DEFAULT_NUM << 16) | OV5693_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov5693_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV5693_F_NUMBER_DEFAULT_NUM << 24) |
+ (OV5693_F_NUMBER_DEM << 16) |
+ (OV5693_F_NUMBER_DEFAULT_NUM << 8) | OV5693_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov5693_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ *val = ov5693_res[dev->fmt_idx].bin_factor_x;
+
+ return 0;
+}
+
+static int ov5693_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ *val = ov5693_res[dev->fmt_idx].bin_factor_y;
+
+ return 0;
+}
+
+static int ov5693_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct ov5693_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ unsigned int pix_clk_freq_hz;
+ u16 reg_val;
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ /* pixel clock */
+ pix_clk_freq_hz = res->pix_clk_freq * 1000000;
+
+ dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+ buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = OV5693_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ OV5693_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = OV5693_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ OV5693_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = OV5693_FINE_INTG_TIME_MIN;
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_HORIZONTAL_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_VERTICAL_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_HORIZONTAL_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_end = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_VERTICAL_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_end = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_HORIZONTAL_OUTPUT_SIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_VERTICAL_OUTPUT_SIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = reg_val;
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static long __ov5693_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ u16 vts, hts;
+ int ret, exp_val;
+
+ hts = ov5693_res[dev->fmt_idx].pixels_per_line;
+ vts = ov5693_res[dev->fmt_idx].lines_per_frame;
+ /*
+ * If coarse_itg is larger than 1<<15, can not write to reg directly.
+ * The way is to write coarse_itg/2 to the reg, meanwhile write 2*hts
+ * to the reg.
+ */
+ if (coarse_itg > (1 << 15)) {
+ hts = hts * 2;
+ coarse_itg = (int)coarse_itg / 2;
+ }
+ /* group hold */
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_GROUP_ACCESS, 0x00);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_GROUP_ACCESS);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_TIMING_HTS_H, (hts >> 8) & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_TIMING_HTS_H);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_TIMING_HTS_L, hts & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_TIMING_HTS_L);
+ return ret;
+ }
+ /* Increase the VTS to match exposure + MARGIN */
+ if (coarse_itg > vts - OV5693_INTEGRATION_TIME_MARGIN)
+ vts = (u16)coarse_itg + OV5693_INTEGRATION_TIME_MARGIN;
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_TIMING_VTS_H, (vts >> 8) & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_TIMING_VTS_H);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_TIMING_VTS_L, vts & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_TIMING_VTS_L);
+ return ret;
+ }
+
+ /* set exposure */
+
+ /* Lower four bit should be 0*/
+ exp_val = coarse_itg << 4;
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_L, exp_val & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_EXPOSURE_L);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_M, (exp_val >> 8) & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_EXPOSURE_M);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_H, (exp_val >> 16) & 0x0F);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_EXPOSURE_H);
+ return ret;
+ }
+
+ /* Analog gain */
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_AGC_L, gain & 0xff);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_AGC_L);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_AGC_H, (gain >> 8) & 0xff);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_AGC_H);
+ return ret;
+ }
+
+ /* Digital gain */
+ if (digitgain) {
+ ret = ov5693_write_reg(client, OV5693_16BIT,
+ OV5693_MWB_RED_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_MWB_RED_GAIN_H);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_16BIT,
+ OV5693_MWB_GREEN_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_MWB_RED_GAIN_H);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_16BIT,
+ OV5693_MWB_BLUE_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_MWB_RED_GAIN_H);
+ return ret;
+ }
+ }
+
+ /* End group */
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_GROUP_ACCESS, 0x10);
+ if (ret)
+ return ret;
+
+ /* Delay launch group */
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_GROUP_ACCESS, 0xa0);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+static int ov5693_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov5693_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long ov5693_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ u16 coarse_itg = exposure->integration_time[0];
+ u16 analog_gain = exposure->gain[0];
+ u16 digital_gain = exposure->gain[1];
+
+ /* we should not accept the invalid value below */
+ if (analog_gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+ return ov5693_set_exposure(sd, coarse_itg, analog_gain, digital_gain);
+}
+
+static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size,
+ u16 addr, u8 *buf)
+{
+ u16 index;
+ int ret;
+ u16 *pVal = NULL;
+
+ for (index = 0; index <= size; index++) {
+ pVal = (u16 *)(buf + index);
+ ret =
+ ov5693_read_reg(client, OV5693_8BIT, addr + index,
+ pVal);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __ov5693_otp_read(struct v4l2_subdev *sd, u8 *buf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ int ret;
+ int i;
+ u8 *b = buf;
+
+ dev->otp_size = 0;
+ for (i = 1; i < OV5693_OTP_BANK_MAX; i++) {
+ /*set bank NO and OTP read mode. */
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_BANK_REG,
+ (i | 0xc0)); //[7:6] 2'b11 [5:0] bank no
+ if (ret) {
+ dev_err(&client->dev, "failed to prepare OTP page\n");
+ return ret;
+ }
+ //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_BANK_REG,(i|0xc0));
+
+ /*enable read */
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_READ_REG,
+ OV5693_OTP_MODE_READ); // enable :1
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to set OTP reading mode page");
+ return ret;
+ }
+ //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_READ_REG,OV5693_OTP_MODE_READ);
+
+ /* Reading the OTP data array */
+ ret = ov5693_read_otp_reg_array(client, OV5693_OTP_BANK_SIZE,
+ OV5693_OTP_START_ADDR,
+ b);
+ if (ret) {
+ dev_err(&client->dev, "failed to read OTP data\n");
+ return ret;
+ }
+
+ //pr_debug("BANK[%2d] %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", i, *b, *(b+1), *(b+2), *(b+3), *(b+4), *(b+5), *(b+6), *(b+7), *(b+8), *(b+9), *(b+10), *(b+11), *(b+12), *(b+13), *(b+14), *(b+15));
+
+ //Intel OTP map, try to read 320byts first.
+ if (i == 21) {
+ if ((*b) == 0) {
+ dev->otp_size = 320;
+ break;
+ } else {
+ b = buf;
+ continue;
+ }
+ } else if (i ==
+ 24) { //if the first 320bytes data doesn't not exist, try to read the next 32bytes data.
+ if ((*b) == 0) {
+ dev->otp_size = 32;
+ break;
+ } else {
+ b = buf;
+ continue;
+ }
+ } else if (i ==
+ 27) { //if the prvious 32bytes data doesn't exist, try to read the next 32bytes data again.
+ if ((*b) == 0) {
+ dev->otp_size = 32;
+ break;
+ } else {
+ dev->otp_size = 0; // no OTP data.
+ break;
+ }
+ }
+
+ b = b + OV5693_OTP_BANK_SIZE;
+ }
+ return 0;
+}
+
+/*
+ * Read otp data and store it into a kmalloced buffer.
+ * The caller must kfree the buffer when no more needed.
+ * @size: set to the size of the returned otp data.
+ */
+static void *ov5693_otp_read(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 *buf;
+ int ret;
+
+ buf = devm_kzalloc(&client->dev, (OV5693_OTP_DATA_SIZE + 16), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ //otp valid after mipi on and sw stream on
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x00);
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_SW_STREAM, OV5693_START_STREAMING);
+
+ ret = __ov5693_otp_read(sd, buf);
+
+ //mipi off and sw stream off after otp read
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x0f);
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_SW_STREAM, OV5693_STOP_STREAMING);
+
+ /* Driver has failed to find valid data */
+ if (ret) {
+ dev_err(&client->dev, "sensor found no valid OTP data\n");
+ return ERR_PTR(ret);
+ }
+
+ return buf;
+}
+
+static int ov5693_g_priv_int_data(struct v4l2_subdev *sd,
+ struct v4l2_private_int_data *priv)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ u8 __user *to = priv->data;
+ u32 read_size = priv->size;
+ int ret;
+
+ /* No need to copy data if size is 0 */
+ if (!read_size)
+ goto out;
+
+ if (IS_ERR(dev->otp_data)) {
+ dev_err(&client->dev, "OTP data not available");
+ return PTR_ERR(dev->otp_data);
+ }
+
+ /* Correct read_size value only if bigger than maximum */
+ if (read_size > OV5693_OTP_DATA_SIZE)
+ read_size = OV5693_OTP_DATA_SIZE;
+
+ ret = copy_to_user(to, dev->otp_data, read_size);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to copy OTP data to user\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ pr_debug("%s read_size:%d\n", __func__, read_size);
+
+out:
+ /* Return correct size */
+ priv->size = dev->otp_size;
+
+ return 0;
+}
+
+static long ov5693_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return ov5693_s_exposure(sd, arg);
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ return ov5693_g_priv_int_data(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int ov5693_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_M,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ *value = reg_v + (((u32)reg_v2 << 16));
+err:
+ return ret;
+}
+
+static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = -EINVAL;
+ u8 vcm_code;
+
+ ret = ad5823_i2c_read(client, AD5823_REG_VCM_CODE_MSB, &vcm_code);
+ if (ret)
+ return ret;
+
+ /* set reg VCM_CODE_MSB Bit[1:0] */
+ vcm_code = (vcm_code & VCM_CODE_MSB_MASK) |
+ ((val >> 8) & ~VCM_CODE_MSB_MASK);
+ ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB, vcm_code);
+ if (ret)
+ return ret;
+
+ /* set reg VCM_CODE_LSB Bit[7:0] */
+ ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_LSB, (val & 0xff));
+ if (ret)
+ return ret;
+
+ /* set required vcm move time */
+ vcm_code = AD5823_RESONANCE_PERIOD / AD5823_RESONANCE_COEF
+ - AD5823_HIGH_FREQ_RANGE;
+ ret = ad5823_i2c_write(client, AD5823_REG_VCM_MOVE_TIME, vcm_code);
+
+ return ret;
+}
+
+static int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ value = min(value, AD5823_MAX_FOCUS_POS);
+ return ad5823_t_focus_vcm(sd, value);
+}
+
+static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ dev_dbg(&client->dev, "%s: FOCUS_POS: 0x%x\n", __func__, value);
+ value = clamp(value, 0, OV5693_VCM_MAX_FOCUS_POS);
+ if (dev->vcm == VCM_DW9714) {
+ if (dev->vcm_update) {
+ ret = vcm_dw_i2c_write(client, VCM_PROTECTION_OFF);
+ if (ret)
+ return ret;
+ ret = vcm_dw_i2c_write(client, DIRECT_VCM);
+ if (ret)
+ return ret;
+ ret = vcm_dw_i2c_write(client, VCM_PROTECTION_ON);
+ if (ret)
+ return ret;
+ dev->vcm_update = false;
+ }
+ ret = vcm_dw_i2c_write(client,
+ vcm_val(value, VCM_DEFAULT_S));
+ } else if (dev->vcm == VCM_AD5823) {
+ ad5823_t_focus_abs(sd, value);
+ }
+ if (ret == 0) {
+ dev->number_of_steps = value - dev->focus;
+ dev->focus = value;
+ dev->timestamp_t_focus_abs = ktime_get();
+ } else
+ dev_err(&client->dev,
+ "%s: i2c failed. ret %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int ov5693_t_focus_rel(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ return ov5693_t_focus_abs(sd, dev->focus + value);
+}
+
+#define DELAY_PER_STEP_NS 1000000
+#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
+static int ov5693_q_focus_status(struct v4l2_subdev *sd, s32 *value)
+{
+ u32 status = 0;
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ ktime_t temptime;
+ ktime_t timedelay = ns_to_ktime(min_t(u32,
+ abs(dev->number_of_steps) * DELAY_PER_STEP_NS,
+ DELAY_MAX_PER_STEP_NS));
+
+ temptime = ktime_sub(ktime_get(), (dev->timestamp_t_focus_abs));
+ if (ktime_compare(temptime, timedelay) <= 0) {
+ status |= ATOMISP_FOCUS_STATUS_MOVING;
+ status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
+ } else {
+ status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
+ status |= ATOMISP_FOCUS_HP_COMPLETE;
+ }
+
+ *value = status;
+
+ return 0;
+}
+
+static int ov5693_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ s32 val;
+
+ ov5693_q_focus_status(sd, &val);
+
+ if (val & ATOMISP_FOCUS_STATUS_MOVING)
+ *value = dev->focus - dev->number_of_steps;
+ else
+ *value = dev->focus;
+
+ return 0;
+}
+
+static int ov5693_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ dev->number_of_steps = value;
+ dev->vcm_update = true;
+ return 0;
+}
+
+static int ov5693_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ dev->number_of_steps = value;
+ dev->vcm_update = true;
+ return 0;
+}
+
+static int ov5693_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5693_device *dev =
+ container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ dev_dbg(&client->dev, "%s: CID_FOCUS_ABSOLUTE:%d.\n",
+ __func__, ctrl->val);
+ ret = ov5693_t_focus_abs(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_RELATIVE:
+ dev_dbg(&client->dev, "%s: CID_FOCUS_RELATIVE:%d.\n",
+ __func__, ctrl->val);
+ ret = ov5693_t_focus_rel(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_VCM_SLEW:
+ ret = ov5693_t_vcm_slew(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_VCM_TIMEING:
+ ret = ov5693_t_vcm_timing(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int ov5693_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5693_device *dev =
+ container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = ov5693_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = ov5693_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = ov5693_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = ov5693_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ ret = ov5693_q_focus_abs(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_STATUS:
+ ret = ov5693_q_focus_status(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ ret = ov5693_g_bin_factor_x(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_VERT:
+ ret = ov5693_g_bin_factor_y(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = ov5693_s_ctrl,
+ .g_volatile_ctrl = ov5693_g_volatile_ctrl
+};
+
+static const struct v4l2_ctrl_config ov5693_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = OV5693_FOCAL_LENGTH_DEFAULT,
+ .max = OV5693_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = OV5693_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = OV5693_F_NUMBER_DEFAULT,
+ .max = OV5693_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = OV5693_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = OV5693_F_NUMBER_RANGE,
+ .max = OV5693_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = OV5693_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focus move absolute",
+ .min = 0,
+ .max = OV5693_VCM_MAX_FOCUS_POS,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_RELATIVE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focus move relative",
+ .min = OV5693_VCM_MAX_FOCUS_NEG,
+ .max = OV5693_VCM_MAX_FOCUS_POS,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_STATUS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focus status",
+ .min = 0,
+ .max = 100, /* allow enum to grow in the future */
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VCM_SLEW,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vcm slew",
+ .min = 0,
+ .max = OV5693_VCM_SLEW_STEP_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VCM_TIMEING,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vcm step time",
+ .min = 0,
+ .max = OV5693_VCM_SLEW_TIME_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "horizontal binning factor",
+ .min = 0,
+ .max = OV5693_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vertical binning factor",
+ .min = 0,
+ .max = OV5693_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+};
+
+static int ov5693_init(struct v4l2_subdev *sd)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ pr_info("%s\n", __func__);
+ mutex_lock(&dev->input_lock);
+ dev->vcm_update = false;
+
+ if (dev->vcm == VCM_AD5823) {
+ ret = vcm_ad_i2c_wr8(client, 0x01, 0x01); /* vcm init test */
+ if (ret)
+ dev_err(&client->dev,
+ "vcm reset failed\n");
+ /*change the mode*/
+ ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB,
+ AD5823_RING_CTRL_ENABLE);
+ if (ret)
+ dev_err(&client->dev,
+ "vcm enable ringing failed\n");
+ ret = ad5823_i2c_write(client, AD5823_REG_MODE,
+ AD5823_ARC_RES1);
+ if (ret)
+ dev_err(&client->dev,
+ "vcm change mode failed\n");
+ }
+
+ /*change initial focus value for ad5823*/
+ if (dev->vcm == VCM_AD5823) {
+ dev->focus = AD5823_INIT_FOCUS_POS;
+ ov5693_t_focus_abs(sd, AD5823_INIT_FOCUS_POS);
+ } else {
+ dev->focus = 0;
+ ov5693_t_focus_abs(sd, 0);
+ }
+
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /*
+ * This driver assumes "internal DVDD, PWDNB tied to DOVDD".
+ * In this set up only gpio0 (XSHUTDN) should be available
+ * but in some products (for example ECS) gpio1 (PWDNB) is
+ * also available. If gpio1 is available we emulate it being
+ * tied to DOVDD here.
+ */
+ if (flag) {
+ ret = dev->platform_data->v2p8_ctrl(sd, 1);
+ dev->platform_data->gpio1_ctrl(sd, 1);
+ if (ret == 0) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ if (ret) {
+ dev->platform_data->gpio1_ctrl(sd, 0);
+ ret = dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ }
+ } else {
+ dev->platform_data->gpio1_ctrl(sd, 0);
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ return dev->platform_data->gpio0_ctrl(sd, flag);
+}
+
+static int __power_up(struct v4l2_subdev *sd)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ /* add this delay time to 10~11ms*/
+ usleep_range(10000, 11000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+ }
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ __cci_delay(up_delay);
+
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ dev->focus = OV5693_INVALID_CONFIG;
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ static const int retry_count = 4;
+ int i, ret;
+
+ for (i = 0; i < retry_count; i++) {
+ ret = __power_up(sd);
+ if (!ret)
+ return 0;
+
+ power_down(sd);
+ }
+ return ret;
+}
+
+static int ov5693_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ pr_info("%s: on %d\n", __func__, on);
+ if (on == 0)
+ return power_down(sd);
+ else {
+ ret = power_up(sd);
+ if (!ret) {
+ ret = ov5693_init(sd);
+ /* restore settings */
+ ov5693_res = ov5693_res_preview;
+ N_RES = N_RES_PREVIEW;
+ }
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between res_w/res_h and w/h.
+ * distance = (res_w/res_h - w/h) / (w/h) * 8192
+ * res->width/height smaller than w/h wouldn't be considered.
+ * The gap of ratio larger than 1/8 wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 1024
+static int distance(struct ov5693_resolution *res, u32 w, u32 h)
+{
+ int ratio;
+ int distance;
+
+ if (w == 0 || h == 0 ||
+ res->width < w || res->height < h)
+ return -1;
+
+ ratio = res->width << 13;
+ ratio /= w;
+ ratio *= h;
+ ratio /= res->height;
+
+ distance = abs(ratio - 8192);
+
+ if (distance > LARGEST_ALLOWED_RATIO_MISMATCH)
+ return -1;
+
+ return distance;
+}
+
+/* Return the nearest higher resolution index
+ * Firstly try to find the approximate aspect ratio resolution
+ * If we find multiple same AR resolutions, choose the
+ * minimal size.
+ */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ int min_res_w = INT_MAX;
+ struct ov5693_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &ov5693_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ min_res_w = ov5693_res[i].width;
+ continue;
+ }
+ if (dist == min_dist && ov5693_res[i].width < min_res_w)
+ idx = i;
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != ov5693_res[i].width)
+ continue;
+ if (h != ov5693_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+/* TODO: remove it. */
+static int startup(struct v4l2_subdev *sd)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_SW_RESET, 0x01);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 reset err.\n");
+ return ret;
+ }
+
+ ret = ov5693_write_reg_array(client, ov5693_global_setting);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 write register err.\n");
+ return ret;
+ }
+
+ ret = ov5693_write_reg_array(client, ov5693_res[dev->fmt_idx].regs);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 write register err.\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ov5693_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *ov5693_info = NULL;
+ int ret = 0;
+ int idx;
+
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+ ov5693_info = v4l2_get_subdev_hostdata(sd);
+ if (!ov5693_info)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = ov5693_res[N_RES - 1].width;
+ fmt->height = ov5693_res[N_RES - 1].height;
+ } else {
+ fmt->width = ov5693_res[idx].width;
+ fmt->height = ov5693_res[idx].height;
+ }
+
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ ret = startup(sd);
+ if (ret) {
+ int i = 0;
+
+ dev_err(&client->dev, "ov5693 startup err, retry to power up\n");
+ for (i = 0; i < OV5693_POWER_UP_RETRY_NUM; i++) {
+ dev_err(&client->dev,
+ "ov5693 retry to power up %d/%d times, result: ",
+ i + 1, OV5693_POWER_UP_RETRY_NUM);
+ power_down(sd);
+ ret = power_up(sd);
+ if (!ret) {
+ mutex_unlock(&dev->input_lock);
+ ov5693_init(sd);
+ mutex_lock(&dev->input_lock);
+ } else {
+ dev_err(&client->dev, "power up failed, continue\n");
+ continue;
+ }
+ ret = startup(sd);
+ if (ret) {
+ dev_err(&client->dev, " startup FAILED!\n");
+ } else {
+ dev_err(&client->dev, " startup SUCCESS!\n");
+ break;
+ }
+ }
+ }
+
+ /*
+ * After sensor settings are set to HW, sometimes stream is started.
+ * This would cause ISP timeout because ISP is not ready to receive
+ * data yet. So add stop streaming here.
+ */
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
+ OV5693_STOP_STREAMING);
+ if (ret)
+ dev_warn(&client->dev, "ov5693 stream off err\n");
+
+ ret = ov5693_get_intg_factor(client, ov5693_info,
+ &ov5693_res[dev->fmt_idx]);
+ if (ret) {
+ dev_err(&client->dev, "failed to get integration_factor\n");
+ goto err;
+ }
+
+ ov5693_info->metadata_width = fmt->width * 10 / 8;
+ ov5693_info->metadata_height = 1;
+ ov5693_info->metadata_effective_width = &ov5693_embedded_effective_size;
+
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov5693_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = ov5693_res[dev->fmt_idx].width;
+ fmt->height = ov5693_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov5693_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high, low;
+ int ret;
+ u16 id;
+ u8 revision;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_SC_CMMN_CHIP_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+ return -ENODEV;
+ }
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_SC_CMMN_CHIP_ID_L, &low);
+ id = ((((u16)high) << 8) | (u16)low);
+
+ if (id != OV5693_ID) {
+ dev_err(&client->dev, "sensor ID error 0x%x\n", id);
+ return -ENODEV;
+ }
+
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_SC_CMMN_SUB_ID, &high);
+ revision = (u8)high & 0x0f;
+
+ dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision);
+ dev_dbg(&client->dev, "detect ov5693 success\n");
+ return 0;
+}
+
+static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
+ enable ? OV5693_START_STREAMING :
+ OV5693_STOP_STREAMING);
+
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static int ov5693_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ if (!dev->vcm)
+ dev->vcm = vcm_detect(client);
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = ov5693_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "ov5693_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ dev->otp_data = ov5693_otp_read(sd);
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov5693_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = ov5693_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int ov5693_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = ov5693_res[index].width;
+ fse->min_height = ov5693_res[index].height;
+ fse->max_width = ov5693_res[index].width;
+ fse->max_height = ov5693_res[index].height;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov5693_video_ops = {
+ .s_stream = ov5693_s_stream,
+ .g_frame_interval = ov5693_g_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops ov5693_core_ops = {
+ .s_power = ov5693_s_power,
+ .ioctl = ov5693_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops ov5693_pad_ops = {
+ .enum_mbus_code = ov5693_enum_mbus_code,
+ .enum_frame_size = ov5693_enum_frame_size,
+ .get_fmt = ov5693_get_fmt,
+ .set_fmt = ov5693_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ov5693_ops = {
+ .core = &ov5693_core_ops,
+ .video = &ov5693_video_ops,
+ .pad = &ov5693_pad_ops,
+};
+
+static int ov5693_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ dev_dbg(&client->dev, "ov5693_remove...\n");
+
+ dev->platform_data->csi_cfg(sd, 0);
+
+ v4l2_device_unregister_subdev(sd);
+
+ atomisp_gmin_remove_subdev(sd);
+
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+
+ return 0;
+}
+
+static int ov5693_probe(struct i2c_client *client)
+{
+ struct ov5693_device *dev;
+ int i2c;
+ int ret = 0;
+ void *pdata;
+ unsigned int i;
+ acpi_handle handle;
+ struct acpi_device *adev;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+ pr_info("%s: ACPI detected it on bus ID=%s, HID=%s\n",
+ __func__, acpi_device_bid(adev), acpi_device_hid(adev));
+ // FIXME: may need to release resources allocated by acpi_bus_get_device()
+
+ /*
+ * Firmware workaround: Some modules use a "secondary default"
+ * address of 0x10 which doesn't appear on schematics, and
+ * some BIOS versions haven't gotten the memo. Work around
+ * via config.
+ */
+ i2c = gmin_get_var_int(&client->dev, false, "I2CAddr", -1);
+ if (i2c != -1) {
+ dev_info(&client->dev,
+ "Overriding firmware-provided I2C address (0x%x) with 0x%x\n",
+ client->addr, i2c);
+ client->addr = i2c;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&dev->sd, client, &ov5693_ops);
+
+ pdata = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_bggr);
+ if (!pdata)
+ goto out_free;
+
+ ret = ov5693_s_config(&dev->sd, client->irq, pdata);
+ if (ret)
+ goto out_free;
+
+ ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
+ if (ret)
+ goto out_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(ov5693_controls));
+ if (ret) {
+ ov5693_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ov5693_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov5693_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ ov5693_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ ov5693_remove(client);
+
+ return ret;
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+static const struct acpi_device_id ov5693_acpi_match[] = {
+ {"INT33BE"},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
+
+static struct i2c_driver ov5693_driver = {
+ .driver = {
+ .name = "ov5693",
+ .acpi_match_table = ov5693_acpi_match,
+ },
+ .probe_new = ov5693_probe,
+ .remove = ov5693_remove,
+};
+module_i2c_driver(ov5693_driver);
+
+MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
new file mode 100644
index 000000000000..0189907969c6
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
@@ -0,0 +1,1391 @@
+/*
+ * Support for OmniVision OV5693 5M camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __OV5693_H__
+#define __OV5693_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+
+#include "../../include/linux/atomisp_platform.h"
+
+/*
+ * FIXME: non-preview resolutions are currently broken
+ */
+#define ENABLE_NON_PREVIEW 0
+
+#define OV5693_POWER_UP_RETRY_NUM 5
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define OV5693_FOCAL_LENGTH_NUM 334 /*3.34mm*/
+#define OV5693_FOCAL_LENGTH_DEM 100
+#define OV5693_F_NUMBER_DEFAULT_NUM 24
+#define OV5693_F_NUMBER_DEM 10
+
+#define MAX_FMTS 1
+
+/* sensor_mode_data read_mode adaptation */
+#define OV5693_READ_MODE_BINNING_ON 0x0400
+#define OV5693_READ_MODE_BINNING_OFF 0x00
+#define OV5693_INTEGRATION_TIME_MARGIN 8
+
+#define OV5693_MAX_EXPOSURE_VALUE 0xFFF1
+#define OV5693_MAX_GAIN_VALUE 0xFF
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV5693_FOCAL_LENGTH_DEFAULT 0x1B70064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV5693_F_NUMBER_DEFAULT 0x18000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define OV5693_F_NUMBER_RANGE 0x180a180a
+#define OV5693_ID 0x5690
+
+#define OV5693_FINE_INTG_TIME_MIN 0
+#define OV5693_FINE_INTG_TIME_MAX_MARGIN 0
+#define OV5693_COARSE_INTG_TIME_MIN 1
+#define OV5693_COARSE_INTG_TIME_MAX_MARGIN 6
+
+#define OV5693_BIN_FACTOR_MAX 4
+/*
+ * OV5693 System control registers
+ */
+#define OV5693_SW_SLEEP 0x0100
+#define OV5693_SW_RESET 0x0103
+#define OV5693_SW_STREAM 0x0100
+
+#define OV5693_SC_CMMN_CHIP_ID_H 0x300A
+#define OV5693_SC_CMMN_CHIP_ID_L 0x300B
+#define OV5693_SC_CMMN_SCCB_ID 0x300C
+#define OV5693_SC_CMMN_SUB_ID 0x302A /* process, version*/
+/*Bit[7:4] Group control, Bit[3:0] Group ID*/
+#define OV5693_GROUP_ACCESS 0x3208
+/*
+*Bit[3:0] Bit[19:16] of exposure,
+*remaining 16 bits lies in Reg0x3501&Reg0x3502
+*/
+#define OV5693_EXPOSURE_H 0x3500
+#define OV5693_EXPOSURE_M 0x3501
+#define OV5693_EXPOSURE_L 0x3502
+/*Bit[1:0] means Bit[9:8] of gain*/
+#define OV5693_AGC_H 0x350A
+#define OV5693_AGC_L 0x350B /*Bit[7:0] of gain*/
+
+#define OV5693_HORIZONTAL_START_H 0x3800 /*Bit[11:8]*/
+#define OV5693_HORIZONTAL_START_L 0x3801 /*Bit[7:0]*/
+#define OV5693_VERTICAL_START_H 0x3802 /*Bit[11:8]*/
+#define OV5693_VERTICAL_START_L 0x3803 /*Bit[7:0]*/
+#define OV5693_HORIZONTAL_END_H 0x3804 /*Bit[11:8]*/
+#define OV5693_HORIZONTAL_END_L 0x3805 /*Bit[7:0]*/
+#define OV5693_VERTICAL_END_H 0x3806 /*Bit[11:8]*/
+#define OV5693_VERTICAL_END_L 0x3807 /*Bit[7:0]*/
+#define OV5693_HORIZONTAL_OUTPUT_SIZE_H 0x3808 /*Bit[3:0]*/
+#define OV5693_HORIZONTAL_OUTPUT_SIZE_L 0x3809 /*Bit[7:0]*/
+#define OV5693_VERTICAL_OUTPUT_SIZE_H 0x380a /*Bit[3:0]*/
+#define OV5693_VERTICAL_OUTPUT_SIZE_L 0x380b /*Bit[7:0]*/
+/*High 8-bit, and low 8-bit HTS address is 0x380d*/
+#define OV5693_TIMING_HTS_H 0x380C
+/*High 8-bit, and low 8-bit HTS address is 0x380d*/
+#define OV5693_TIMING_HTS_L 0x380D
+/*High 8-bit, and low 8-bit HTS address is 0x380f*/
+#define OV5693_TIMING_VTS_H 0x380e
+/*High 8-bit, and low 8-bit HTS address is 0x380f*/
+#define OV5693_TIMING_VTS_L 0x380f
+
+#define OV5693_MWB_RED_GAIN_H 0x3400
+#define OV5693_MWB_GREEN_GAIN_H 0x3402
+#define OV5693_MWB_BLUE_GAIN_H 0x3404
+#define OV5693_MWB_GAIN_MAX 0x0fff
+
+#define OV5693_START_STREAMING 0x01
+#define OV5693_STOP_STREAMING 0x00
+
+#define VCM_ADDR 0x0c
+#define VCM_CODE_MSB 0x04
+
+#define OV5693_INVALID_CONFIG 0xffffffff
+
+#define OV5693_VCM_SLEW_STEP 0x30F0
+#define OV5693_VCM_SLEW_STEP_MAX 0x7
+#define OV5693_VCM_SLEW_STEP_MASK 0x7
+#define OV5693_VCM_CODE 0x30F2
+#define OV5693_VCM_SLEW_TIME 0x30F4
+#define OV5693_VCM_SLEW_TIME_MAX 0xffff
+#define OV5693_VCM_ENABLE 0x8000
+
+#define OV5693_VCM_MAX_FOCUS_NEG -1023
+#define OV5693_VCM_MAX_FOCUS_POS 1023
+
+#define DLC_ENABLE 1
+#define DLC_DISABLE 0
+#define VCM_PROTECTION_OFF 0xeca3
+#define VCM_PROTECTION_ON 0xdc51
+#define VCM_DEFAULT_S 0x0
+#define vcm_step_s(a) (u8)(a & 0xf)
+#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3)
+#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104)
+#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200)
+#define vcm_val(data, s) (u16)(data << 4 | s)
+#define DIRECT_VCM vcm_dlc_mclk(0, 0)
+
+/* Defines for OTP Data Registers */
+#define OV5693_FRAME_OFF_NUM 0x4202
+#define OV5693_OTP_BYTE_MAX 32 //change to 32 as needed by otpdata
+#define OV5693_OTP_SHORT_MAX 16
+#define OV5693_OTP_START_ADDR 0x3D00
+#define OV5693_OTP_END_ADDR 0x3D0F
+#define OV5693_OTP_DATA_SIZE 320
+#define OV5693_OTP_PROGRAM_REG 0x3D80
+#define OV5693_OTP_READ_REG 0x3D81 // 1:Enable 0:disable
+#define OV5693_OTP_BANK_REG 0x3D84 //otp bank and mode
+#define OV5693_OTP_READY_REG_DONE 1
+#define OV5693_OTP_BANK_MAX 28
+#define OV5693_OTP_BANK_SIZE 16 //16 bytes per bank
+#define OV5693_OTP_READ_ONETIME 16
+#define OV5693_OTP_MODE_READ 1
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct ov5693_resolution {
+ u8 *desc;
+ const struct ov5693_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+};
+
+struct ov5693_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct ov5693_reg *regs;
+};
+
+enum vcm_type {
+ VCM_UNKNOWN,
+ VCM_AD5823,
+ VCM_DW9714,
+};
+
+/*
+ * ov5693 device structure.
+ */
+struct ov5693_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct camera_sensor_platform_data *platform_data;
+ ktime_t timestamp_t_focus_abs;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ int run_mode;
+ int otp_size;
+ u8 *otp_data;
+ u32 focus;
+ s16 number_of_steps;
+ u8 res;
+ u8 type;
+ bool vcm_update;
+ enum vcm_type vcm;
+};
+
+enum ov5693_tok_type {
+ OV5693_8BIT = 0x0001,
+ OV5693_16BIT = 0x0002,
+ OV5693_32BIT = 0x0004,
+ OV5693_TOK_TERM = 0xf000, /* terminating token for reg list */
+ OV5693_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ OV5693_TOK_MASK = 0xfff0
+};
+
+/**
+ * struct ov5693_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct ov5693_reg {
+ enum ov5693_tok_type type;
+ u16 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_ov5693_sensor(x) container_of(x, struct ov5693_device, sd)
+
+#define OV5693_MAX_WRITE_BUF_SIZE 30
+
+struct ov5693_write_buffer {
+ u16 addr;
+ u8 data[OV5693_MAX_WRITE_BUF_SIZE];
+};
+
+struct ov5693_write_ctrl {
+ int index;
+ struct ov5693_write_buffer buffer;
+};
+
+static struct ov5693_reg const ov5693_global_setting[] = {
+ {OV5693_8BIT, 0x0103, 0x01},
+ {OV5693_8BIT, 0x3001, 0x0a},
+ {OV5693_8BIT, 0x3002, 0x80},
+ {OV5693_8BIT, 0x3006, 0x00},
+ {OV5693_8BIT, 0x3011, 0x21},
+ {OV5693_8BIT, 0x3012, 0x09},
+ {OV5693_8BIT, 0x3013, 0x10},
+ {OV5693_8BIT, 0x3014, 0x00},
+ {OV5693_8BIT, 0x3015, 0x08},
+ {OV5693_8BIT, 0x3016, 0xf0},
+ {OV5693_8BIT, 0x3017, 0xf0},
+ {OV5693_8BIT, 0x3018, 0xf0},
+ {OV5693_8BIT, 0x301b, 0xb4},
+ {OV5693_8BIT, 0x301d, 0x02},
+ {OV5693_8BIT, 0x3021, 0x00},
+ {OV5693_8BIT, 0x3022, 0x01},
+ {OV5693_8BIT, 0x3028, 0x44},
+ {OV5693_8BIT, 0x3098, 0x02},
+ {OV5693_8BIT, 0x3099, 0x19},
+ {OV5693_8BIT, 0x309a, 0x02},
+ {OV5693_8BIT, 0x309b, 0x01},
+ {OV5693_8BIT, 0x309c, 0x00},
+ {OV5693_8BIT, 0x30a0, 0xd2},
+ {OV5693_8BIT, 0x30a2, 0x01},
+ {OV5693_8BIT, 0x30b2, 0x00},
+ {OV5693_8BIT, 0x30b3, 0x7d},
+ {OV5693_8BIT, 0x30b4, 0x03},
+ {OV5693_8BIT, 0x30b5, 0x04},
+ {OV5693_8BIT, 0x30b6, 0x01},
+ {OV5693_8BIT, 0x3104, 0x21},
+ {OV5693_8BIT, 0x3106, 0x00},
+ {OV5693_8BIT, 0x3400, 0x04},
+ {OV5693_8BIT, 0x3401, 0x00},
+ {OV5693_8BIT, 0x3402, 0x04},
+ {OV5693_8BIT, 0x3403, 0x00},
+ {OV5693_8BIT, 0x3404, 0x04},
+ {OV5693_8BIT, 0x3405, 0x00},
+ {OV5693_8BIT, 0x3406, 0x01},
+ {OV5693_8BIT, 0x3500, 0x00},
+ {OV5693_8BIT, 0x3503, 0x07},
+ {OV5693_8BIT, 0x3504, 0x00},
+ {OV5693_8BIT, 0x3505, 0x00},
+ {OV5693_8BIT, 0x3506, 0x00},
+ {OV5693_8BIT, 0x3507, 0x02},
+ {OV5693_8BIT, 0x3508, 0x00},
+ {OV5693_8BIT, 0x3509, 0x10},
+ {OV5693_8BIT, 0x350a, 0x00},
+ {OV5693_8BIT, 0x350b, 0x40},
+ {OV5693_8BIT, 0x3601, 0x0a},
+ {OV5693_8BIT, 0x3602, 0x38},
+ {OV5693_8BIT, 0x3612, 0x80},
+ {OV5693_8BIT, 0x3620, 0x54},
+ {OV5693_8BIT, 0x3621, 0xc7},
+ {OV5693_8BIT, 0x3622, 0x0f},
+ {OV5693_8BIT, 0x3625, 0x10},
+ {OV5693_8BIT, 0x3630, 0x55},
+ {OV5693_8BIT, 0x3631, 0xf4},
+ {OV5693_8BIT, 0x3632, 0x00},
+ {OV5693_8BIT, 0x3633, 0x34},
+ {OV5693_8BIT, 0x3634, 0x02},
+ {OV5693_8BIT, 0x364d, 0x0d},
+ {OV5693_8BIT, 0x364f, 0xdd},
+ {OV5693_8BIT, 0x3660, 0x04},
+ {OV5693_8BIT, 0x3662, 0x10},
+ {OV5693_8BIT, 0x3663, 0xf1},
+ {OV5693_8BIT, 0x3665, 0x00},
+ {OV5693_8BIT, 0x3666, 0x20},
+ {OV5693_8BIT, 0x3667, 0x00},
+ {OV5693_8BIT, 0x366a, 0x80},
+ {OV5693_8BIT, 0x3680, 0xe0},
+ {OV5693_8BIT, 0x3681, 0x00},
+ {OV5693_8BIT, 0x3700, 0x42},
+ {OV5693_8BIT, 0x3701, 0x14},
+ {OV5693_8BIT, 0x3702, 0xa0},
+ {OV5693_8BIT, 0x3703, 0xd8},
+ {OV5693_8BIT, 0x3704, 0x78},
+ {OV5693_8BIT, 0x3705, 0x02},
+ {OV5693_8BIT, 0x370a, 0x00},
+ {OV5693_8BIT, 0x370b, 0x20},
+ {OV5693_8BIT, 0x370c, 0x0c},
+ {OV5693_8BIT, 0x370d, 0x11},
+ {OV5693_8BIT, 0x370e, 0x00},
+ {OV5693_8BIT, 0x370f, 0x40},
+ {OV5693_8BIT, 0x3710, 0x00},
+ {OV5693_8BIT, 0x371a, 0x1c},
+ {OV5693_8BIT, 0x371b, 0x05},
+ {OV5693_8BIT, 0x371c, 0x01},
+ {OV5693_8BIT, 0x371e, 0xa1},
+ {OV5693_8BIT, 0x371f, 0x0c},
+ {OV5693_8BIT, 0x3721, 0x00},
+ {OV5693_8BIT, 0x3724, 0x10},
+ {OV5693_8BIT, 0x3726, 0x00},
+ {OV5693_8BIT, 0x372a, 0x01},
+ {OV5693_8BIT, 0x3730, 0x10},
+ {OV5693_8BIT, 0x3738, 0x22},
+ {OV5693_8BIT, 0x3739, 0xe5},
+ {OV5693_8BIT, 0x373a, 0x50},
+ {OV5693_8BIT, 0x373b, 0x02},
+ {OV5693_8BIT, 0x373c, 0x41},
+ {OV5693_8BIT, 0x373f, 0x02},
+ {OV5693_8BIT, 0x3740, 0x42},
+ {OV5693_8BIT, 0x3741, 0x02},
+ {OV5693_8BIT, 0x3742, 0x18},
+ {OV5693_8BIT, 0x3743, 0x01},
+ {OV5693_8BIT, 0x3744, 0x02},
+ {OV5693_8BIT, 0x3747, 0x10},
+ {OV5693_8BIT, 0x374c, 0x04},
+ {OV5693_8BIT, 0x3751, 0xf0},
+ {OV5693_8BIT, 0x3752, 0x00},
+ {OV5693_8BIT, 0x3753, 0x00},
+ {OV5693_8BIT, 0x3754, 0xc0},
+ {OV5693_8BIT, 0x3755, 0x00},
+ {OV5693_8BIT, 0x3756, 0x1a},
+ {OV5693_8BIT, 0x3758, 0x00},
+ {OV5693_8BIT, 0x3759, 0x0f},
+ {OV5693_8BIT, 0x376b, 0x44},
+ {OV5693_8BIT, 0x375c, 0x04},
+ {OV5693_8BIT, 0x3774, 0x10},
+ {OV5693_8BIT, 0x3776, 0x00},
+ {OV5693_8BIT, 0x377f, 0x08},
+ {OV5693_8BIT, 0x3780, 0x22},
+ {OV5693_8BIT, 0x3781, 0x0c},
+ {OV5693_8BIT, 0x3784, 0x2c},
+ {OV5693_8BIT, 0x3785, 0x1e},
+ {OV5693_8BIT, 0x378f, 0xf5},
+ {OV5693_8BIT, 0x3791, 0xb0},
+ {OV5693_8BIT, 0x3795, 0x00},
+ {OV5693_8BIT, 0x3796, 0x64},
+ {OV5693_8BIT, 0x3797, 0x11},
+ {OV5693_8BIT, 0x3798, 0x30},
+ {OV5693_8BIT, 0x3799, 0x41},
+ {OV5693_8BIT, 0x379a, 0x07},
+ {OV5693_8BIT, 0x379b, 0xb0},
+ {OV5693_8BIT, 0x379c, 0x0c},
+ {OV5693_8BIT, 0x37c5, 0x00},
+ {OV5693_8BIT, 0x37c6, 0x00},
+ {OV5693_8BIT, 0x37c7, 0x00},
+ {OV5693_8BIT, 0x37c9, 0x00},
+ {OV5693_8BIT, 0x37ca, 0x00},
+ {OV5693_8BIT, 0x37cb, 0x00},
+ {OV5693_8BIT, 0x37de, 0x00},
+ {OV5693_8BIT, 0x37df, 0x00},
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3810, 0x00},
+ {OV5693_8BIT, 0x3812, 0x00},
+ {OV5693_8BIT, 0x3823, 0x00},
+ {OV5693_8BIT, 0x3824, 0x00},
+ {OV5693_8BIT, 0x3825, 0x00},
+ {OV5693_8BIT, 0x3826, 0x00},
+ {OV5693_8BIT, 0x3827, 0x00},
+ {OV5693_8BIT, 0x382a, 0x04},
+ {OV5693_8BIT, 0x3a04, 0x06},
+ {OV5693_8BIT, 0x3a05, 0x14},
+ {OV5693_8BIT, 0x3a06, 0x00},
+ {OV5693_8BIT, 0x3a07, 0xfe},
+ {OV5693_8BIT, 0x3b00, 0x00},
+ {OV5693_8BIT, 0x3b02, 0x00},
+ {OV5693_8BIT, 0x3b03, 0x00},
+ {OV5693_8BIT, 0x3b04, 0x00},
+ {OV5693_8BIT, 0x3b05, 0x00},
+ {OV5693_8BIT, 0x3e07, 0x20},
+ {OV5693_8BIT, 0x4000, 0x08},
+ {OV5693_8BIT, 0x4001, 0x04},
+ {OV5693_8BIT, 0x4002, 0x45},
+ {OV5693_8BIT, 0x4004, 0x08},
+ {OV5693_8BIT, 0x4005, 0x18},
+ {OV5693_8BIT, 0x4006, 0x20},
+ {OV5693_8BIT, 0x4008, 0x24},
+ {OV5693_8BIT, 0x4009, 0x10},
+ {OV5693_8BIT, 0x400c, 0x00},
+ {OV5693_8BIT, 0x400d, 0x00},
+ {OV5693_8BIT, 0x4058, 0x00},
+ {OV5693_8BIT, 0x404e, 0x37},
+ {OV5693_8BIT, 0x404f, 0x8f},
+ {OV5693_8BIT, 0x4058, 0x00},
+ {OV5693_8BIT, 0x4101, 0xb2},
+ {OV5693_8BIT, 0x4303, 0x00},
+ {OV5693_8BIT, 0x4304, 0x08},
+ {OV5693_8BIT, 0x4307, 0x31},
+ {OV5693_8BIT, 0x4311, 0x04},
+ {OV5693_8BIT, 0x4315, 0x01},
+ {OV5693_8BIT, 0x4511, 0x05},
+ {OV5693_8BIT, 0x4512, 0x01},
+ {OV5693_8BIT, 0x4806, 0x00},
+ {OV5693_8BIT, 0x4816, 0x52},
+ {OV5693_8BIT, 0x481f, 0x30},
+ {OV5693_8BIT, 0x4826, 0x2c},
+ {OV5693_8BIT, 0x4831, 0x64},
+ {OV5693_8BIT, 0x4d00, 0x04},
+ {OV5693_8BIT, 0x4d01, 0x71},
+ {OV5693_8BIT, 0x4d02, 0xfd},
+ {OV5693_8BIT, 0x4d03, 0xf5},
+ {OV5693_8BIT, 0x4d04, 0x0c},
+ {OV5693_8BIT, 0x4d05, 0xcc},
+ {OV5693_8BIT, 0x4837, 0x0a},
+ {OV5693_8BIT, 0x5000, 0x06},
+ {OV5693_8BIT, 0x5001, 0x01},
+ {OV5693_8BIT, 0x5003, 0x20},
+ {OV5693_8BIT, 0x5046, 0x0a},
+ {OV5693_8BIT, 0x5013, 0x00},
+ {OV5693_8BIT, 0x5046, 0x0a},
+ {OV5693_8BIT, 0x5780, 0x1c},
+ {OV5693_8BIT, 0x5786, 0x20},
+ {OV5693_8BIT, 0x5787, 0x10},
+ {OV5693_8BIT, 0x5788, 0x18},
+ {OV5693_8BIT, 0x578a, 0x04},
+ {OV5693_8BIT, 0x578b, 0x02},
+ {OV5693_8BIT, 0x578c, 0x02},
+ {OV5693_8BIT, 0x578e, 0x06},
+ {OV5693_8BIT, 0x578f, 0x02},
+ {OV5693_8BIT, 0x5790, 0x02},
+ {OV5693_8BIT, 0x5791, 0xff},
+ {OV5693_8BIT, 0x5842, 0x01},
+ {OV5693_8BIT, 0x5843, 0x2b},
+ {OV5693_8BIT, 0x5844, 0x01},
+ {OV5693_8BIT, 0x5845, 0x92},
+ {OV5693_8BIT, 0x5846, 0x01},
+ {OV5693_8BIT, 0x5847, 0x8f},
+ {OV5693_8BIT, 0x5848, 0x01},
+ {OV5693_8BIT, 0x5849, 0x0c},
+ {OV5693_8BIT, 0x5e00, 0x00},
+ {OV5693_8BIT, 0x5e10, 0x0c},
+ {OV5693_8BIT, 0x0100, 0x00},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+#if ENABLE_NON_PREVIEW
+/*
+ * 654x496 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+ */
+static struct ov5693_reg const ov5693_654x496[] = {
+ {OV5693_8BIT, 0x3501, 0x3d},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc7},
+ {OV5693_8BIT, 0x3803, 0x00},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x02},
+ {OV5693_8BIT, 0x3809, 0x90},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0xf0},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x08},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 1296x976 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+*DS from 2592x1952
+*/
+static struct ov5693_reg const ov5693_1296x976[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3803, 0x00},
+
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xA3},
+
+ {OV5693_8BIT, 0x3808, 0x05},
+ {OV5693_8BIT, 0x3809, 0x10},
+ {OV5693_8BIT, 0x380a, 0x03},
+ {OV5693_8BIT, 0x380b, 0xD0},
+
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+
+ {OV5693_8BIT, 0x3810, 0x00},
+ {OV5693_8BIT, 0x3811, 0x10},
+ {OV5693_8BIT, 0x3812, 0x00},
+ {OV5693_8BIT, 0x3813, 0x02},
+
+ {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
+ {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+
+};
+
+/*
+ * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+ DS from 2564x1956
+ */
+static struct ov5693_reg const ov5693_336x256[] = {
+ {OV5693_8BIT, 0x3501, 0x3d},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc7},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x01},
+ {OV5693_8BIT, 0x3809, 0x50},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0x00},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x1E},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+ DS from 2368x1956
+ */
+static struct ov5693_reg const ov5693_368x304[] = {
+ {OV5693_8BIT, 0x3501, 0x3d},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc7},
+ {OV5693_8BIT, 0x3808, 0x01},
+ {OV5693_8BIT, 0x3809, 0x70},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0x30},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x80},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * ov5693_192x160 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+ DS from 2460x1956
+ */
+static struct ov5693_reg const ov5693_192x160[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x80},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xA3},
+ {OV5693_8BIT, 0x3808, 0x00},
+ {OV5693_8BIT, 0x3809, 0xC0},
+ {OV5693_8BIT, 0x380a, 0x00},
+ {OV5693_8BIT, 0x380b, 0xA0},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x40},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_reg const ov5693_736x496[] = {
+ {OV5693_8BIT, 0x3501, 0x3d},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc7},
+ {OV5693_8BIT, 0x3803, 0x68},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0x3b},
+ {OV5693_8BIT, 0x3808, 0x02},
+ {OV5693_8BIT, 0x3809, 0xe0},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0xf0},
+ {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07}, /*vts*/
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x08},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+#endif
+
+/*
+static struct ov5693_reg const ov5693_736x496[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0x00},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x02},
+ {OV5693_8BIT, 0x3809, 0xe0},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0xf0},
+ {OV5693_8BIT, 0x380c, 0x0d},
+ {OV5693_8BIT, 0x380d, 0xb0},
+ {OV5693_8BIT, 0x380e, 0x05},
+ {OV5693_8BIT, 0x380f, 0xf2},
+ {OV5693_8BIT, 0x3811, 0x08},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x01},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+*/
+/*
+ * 976x556 30fps 8.8ms VBlanking 2lane 10Bit (Scaling)
+ */
+#if ENABLE_NON_PREVIEW
+static struct ov5693_reg const ov5693_976x556[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa7},
+ {OV5693_8BIT, 0x3808, 0x03},
+ {OV5693_8BIT, 0x3809, 0xd0},
+ {OV5693_8BIT, 0x380a, 0x02},
+ {OV5693_8BIT, 0x380b, 0x2C},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x10},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*DS from 2624x1492*/
+static struct ov5693_reg const ov5693_1296x736[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3803, 0x00},
+
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xA3},
+
+ {OV5693_8BIT, 0x3808, 0x05},
+ {OV5693_8BIT, 0x3809, 0x10},
+ {OV5693_8BIT, 0x380a, 0x02},
+ {OV5693_8BIT, 0x380b, 0xe0},
+
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+
+ {OV5693_8BIT, 0x3813, 0xE8},
+
+ {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
+ {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_reg const ov5693_1636p_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa7},
+ {OV5693_8BIT, 0x3808, 0x06},
+ {OV5693_8BIT, 0x3809, 0x64},
+ {OV5693_8BIT, 0x380a, 0x04},
+ {OV5693_8BIT, 0x380b, 0x48},
+ {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07}, /*vts*/
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x02},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+#endif
+
+static struct ov5693_reg const ov5693_1616x1216_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x80},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00}, /*{3800,3801} Array X start*/
+ {OV5693_8BIT, 0x3801, 0x08}, /* 04 //{3800,3801} Array X start*/
+ {OV5693_8BIT, 0x3802, 0x00}, /*{3802,3803} Array Y start*/
+ {OV5693_8BIT, 0x3803, 0x04}, /* 00 //{3802,3803} Array Y start*/
+ {OV5693_8BIT, 0x3804, 0x0a}, /*{3804,3805} Array X end*/
+ {OV5693_8BIT, 0x3805, 0x37}, /* 3b //{3804,3805} Array X end*/
+ {OV5693_8BIT, 0x3806, 0x07}, /*{3806,3807} Array Y end*/
+ {OV5693_8BIT, 0x3807, 0x9f}, /* a3 //{3806,3807} Array Y end*/
+ {OV5693_8BIT, 0x3808, 0x06}, /*{3808,3809} Final output H size*/
+ {OV5693_8BIT, 0x3809, 0x50}, /*{3808,3809} Final output H size*/
+ {OV5693_8BIT, 0x380a, 0x04}, /*{380a,380b} Final output V size*/
+ {OV5693_8BIT, 0x380b, 0xc0}, /*{380a,380b} Final output V size*/
+ {OV5693_8BIT, 0x380c, 0x0a}, /*{380c,380d} HTS*/
+ {OV5693_8BIT, 0x380d, 0x80}, /*{380c,380d} HTS*/
+ {OV5693_8BIT, 0x380e, 0x07}, /*{380e,380f} VTS*/
+ {OV5693_8BIT, 0x380f, 0xc0}, /* bc //{380e,380f} VTS*/
+ {OV5693_8BIT, 0x3810, 0x00}, /*{3810,3811} windowing X offset*/
+ {OV5693_8BIT, 0x3811, 0x10}, /*{3810,3811} windowing X offset*/
+ {OV5693_8BIT, 0x3812, 0x00}, /*{3812,3813} windowing Y offset*/
+ {OV5693_8BIT, 0x3813, 0x06}, /*{3812,3813} windowing Y offset*/
+ {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
+ {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
+ {OV5693_8BIT, 0x3820, 0x00}, /*FLIP/Binnning control*/
+ {OV5693_8BIT, 0x3821, 0x1e}, /*MIRROR control*/
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 1940x1096 30fps 8.8ms VBlanking 2lane 10bit (Scaling)
+ */
+#if ENABLE_NON_PREVIEW
+static struct ov5693_reg const ov5693_1940x1096[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa7},
+ {OV5693_8BIT, 0x3808, 0x07},
+ {OV5693_8BIT, 0x3809, 0x94},
+ {OV5693_8BIT, 0x380a, 0x04},
+ {OV5693_8BIT, 0x380b, 0x48},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x02},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_reg const ov5693_2592x1456_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa4},
+ {OV5693_8BIT, 0x3808, 0x0a},
+ {OV5693_8BIT, 0x3809, 0x20},
+ {OV5693_8BIT, 0x380a, 0x05},
+ {OV5693_8BIT, 0x380b, 0xb0},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x10},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_TOK_TERM, 0, 0}
+};
+#endif
+
+static struct ov5693_reg const ov5693_2576x1456_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa4},
+ {OV5693_8BIT, 0x3808, 0x0a},
+ {OV5693_8BIT, 0x3809, 0x10},
+ {OV5693_8BIT, 0x380a, 0x05},
+ {OV5693_8BIT, 0x380b, 0xb0},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x18},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 2592x1944 30fps 0.6ms VBlanking 2lane 10Bit
+ */
+#if ENABLE_NON_PREVIEW
+static struct ov5693_reg const ov5693_2592x1944_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0x00},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x0a},
+ {OV5693_8BIT, 0x3809, 0x20},
+ {OV5693_8BIT, 0x380a, 0x07},
+ {OV5693_8BIT, 0x380b, 0x98},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x10},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+#endif
+
+/*
+ * 11:9 Full FOV Output, expected FOV Res: 2346x1920
+ * ISP Effect Res: 1408x1152
+ * Sensor out: 1424x1168, DS From: 2380x1952
+ *
+ * WA: Left Offset: 8, Hor scal: 64
+ */
+#if ENABLE_NON_PREVIEW
+static struct ov5693_reg const ov5693_1424x1168_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
+ {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
+ {OV5693_8BIT, 0x3801, 0x50}, /* 80 */
+ {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
+ {OV5693_8BIT, 0x3803, 0x02}, /* 2 */
+ {OV5693_8BIT, 0x3804, 0x09}, /* TIMING_X_ADDR_END */
+ {OV5693_8BIT, 0x3805, 0xdd}, /* 2525 */
+ {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
+ {OV5693_8BIT, 0x3807, 0xa1}, /* 1953 */
+ {OV5693_8BIT, 0x3808, 0x05}, /* TIMING_X_OUTPUT_SIZE */
+ {OV5693_8BIT, 0x3809, 0x90}, /* 1424 */
+ {OV5693_8BIT, 0x380a, 0x04}, /* TIMING_Y_OUTPUT_SIZE */
+ {OV5693_8BIT, 0x380b, 0x90}, /* 1168 */
+ {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
+ {OV5693_8BIT, 0x3811, 0x02}, /* 2 */
+ {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
+ {OV5693_8BIT, 0x3813, 0x00}, /* 0 */
+ {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
+ {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+#endif
+
+/*
+ * 3:2 Full FOV Output, expected FOV Res: 2560x1706
+ * ISP Effect Res: 720x480
+ * Sensor out: 736x496, DS From 2616x1764
+ */
+static struct ov5693_reg const ov5693_736x496_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
+ {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
+ {OV5693_8BIT, 0x3801, 0x02}, /* 2 */
+ {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
+ {OV5693_8BIT, 0x3803, 0x62}, /* 98 */
+ {OV5693_8BIT, 0x3804, 0x0a}, /* TIMING_X_ADDR_END */
+ {OV5693_8BIT, 0x3805, 0x3b}, /* 2619 */
+ {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
+ {OV5693_8BIT, 0x3807, 0x43}, /* 1859 */
+ {OV5693_8BIT, 0x3808, 0x02}, /* TIMING_X_OUTPUT_SIZE */
+ {OV5693_8BIT, 0x3809, 0xe0}, /* 736 */
+ {OV5693_8BIT, 0x380a, 0x01}, /* TIMING_Y_OUTPUT_SIZE */
+ {OV5693_8BIT, 0x380b, 0xf0}, /* 496 */
+ {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
+ {OV5693_8BIT, 0x3811, 0x02}, /* 2 */
+ {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
+ {OV5693_8BIT, 0x3813, 0x00}, /* 0 */
+ {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
+ {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_reg const ov5693_2576x1936_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0x00},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x0a},
+ {OV5693_8BIT, 0x3809, 0x10},
+ {OV5693_8BIT, 0x380a, 0x07},
+ {OV5693_8BIT, 0x380b, 0x90},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x18},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_resolution ov5693_res_preview[] = {
+ {
+ .desc = "ov5693_736x496_30fps",
+ .width = 736,
+ .height = 496,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_736x496_30fps,
+ },
+ {
+ .desc = "ov5693_1616x1216_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1616x1216_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2576,
+ .height = 1456,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2576x1456_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2576,
+ .height = 1936,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2576x1936_30fps,
+ },
+};
+
+#define N_RES_PREVIEW (ARRAY_SIZE(ov5693_res_preview))
+
+/*
+ * Disable non-preview configurations until the configuration selection is
+ * improved.
+ */
+#if ENABLE_NON_PREVIEW
+struct ov5693_resolution ov5693_res_still[] = {
+ {
+ .desc = "ov5693_736x496_30fps",
+ .width = 736,
+ .height = 496,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_736x496_30fps,
+ },
+ {
+ .desc = "ov5693_1424x1168_30fps",
+ .width = 1424,
+ .height = 1168,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1424x1168_30fps,
+ },
+ {
+ .desc = "ov5693_1616x1216_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1616x1216_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2592,
+ .height = 1456,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2592x1456_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2592,
+ .height = 1944,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2592x1944_30fps,
+ },
+};
+
+#define N_RES_STILL (ARRAY_SIZE(ov5693_res_still))
+
+struct ov5693_resolution ov5693_res_video[] = {
+ {
+ .desc = "ov5693_736x496_30fps",
+ .width = 736,
+ .height = 496,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 1,
+ .regs = ov5693_736x496,
+ },
+ {
+ .desc = "ov5693_336x256_30fps",
+ .width = 336,
+ .height = 256,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 1,
+ .regs = ov5693_336x256,
+ },
+ {
+ .desc = "ov5693_368x304_30fps",
+ .width = 368,
+ .height = 304,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 1,
+ .regs = ov5693_368x304,
+ },
+ {
+ .desc = "ov5693_192x160_30fps",
+ .width = 192,
+ .height = 160,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 1,
+ .regs = ov5693_192x160,
+ },
+ {
+ .desc = "ov5693_1296x736_30fps",
+ .width = 1296,
+ .height = 736,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 0,
+ .regs = ov5693_1296x736,
+ },
+ {
+ .desc = "ov5693_1296x976_30fps",
+ .width = 1296,
+ .height = 976,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 0,
+ .regs = ov5693_1296x976,
+ },
+ {
+ .desc = "ov5693_1636P_30fps",
+ .width = 1636,
+ .height = 1096,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1636p_30fps,
+ },
+ {
+ .desc = "ov5693_1080P_30fps",
+ .width = 1940,
+ .height = 1096,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1940x1096,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2592,
+ .height = 1456,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2592x1456_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2592,
+ .height = 1944,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2592x1944_30fps,
+ },
+};
+
+#define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video))
+#endif
+
+static struct ov5693_resolution *ov5693_res = ov5693_res_preview;
+static unsigned long N_RES = N_RES_PREVIEW;
+#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm.h b/drivers/staging/media/atomisp/include/hmm/hmm.h
new file mode 100644
index 000000000000..254a71442451
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/hmm/hmm.h
@@ -0,0 +1,102 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __HMM_H__
+#define __HMM_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include "hmm/hmm_pool.h"
+#include "ia_css_types.h"
+
+#define HMM_CACHED true
+#define HMM_UNCACHED false
+
+int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type);
+void hmm_pool_unregister(enum hmm_pool_type pool_type);
+
+int hmm_init(void);
+void hmm_cleanup(void);
+
+ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
+ int from_highmem, const void __user *userptr, bool cached);
+void hmm_free(ia_css_ptr ptr);
+int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes);
+int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes);
+int hmm_set(ia_css_ptr virt, int c, unsigned int bytes);
+int hmm_flush(ia_css_ptr virt, unsigned int bytes);
+
+/*
+ * get kernel memory physical address from ISP virtual address.
+ */
+phys_addr_t hmm_virt_to_phys(ia_css_ptr virt);
+
+/*
+ * map ISP memory starts with virt to kernel virtual address
+ * by using vmap. return NULL if failed.
+ *
+ * virt must be the start address of ISP memory (return by hmm_alloc),
+ * do not pass any other address.
+ */
+void *hmm_vmap(ia_css_ptr virt, bool cached);
+void hmm_vunmap(ia_css_ptr virt);
+
+/*
+ * flush the cache for the vmapped buffer.
+ * if the buffer has not been vmapped, return directly.
+ */
+void hmm_flush_vmap(ia_css_ptr virt);
+
+/*
+ * Address translation from ISP shared memory address to kernel virtual address
+ * if the memory is not vmmaped, then do it.
+ */
+void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached);
+
+/*
+ * Address translation from kernel virtual address to ISP shared memory address
+ */
+ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr);
+
+/*
+ * map ISP memory starts with virt to specific vma.
+ *
+ * used for mmap operation.
+ *
+ * virt must be the start address of ISP memory (return by hmm_alloc),
+ * do not pass any other address.
+ */
+int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt);
+
+/* show memory statistic
+ */
+void hmm_show_mem_stat(const char *func, const int line);
+
+/* init memory statistic
+ */
+void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr);
+
+extern bool dypool_enable;
+extern unsigned int dypool_pgnr;
+extern struct hmm_bo_device bo_device;
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h
new file mode 100644
index 000000000000..f847d1de860e
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h
@@ -0,0 +1,315 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __HMM_BO_H__
+#define __HMM_BO_H__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include "mmu/isp_mmu.h"
+#include "hmm/hmm_common.h"
+#include "ia_css_types.h"
+
+#define check_bodev_null_return(bdev, exp) \
+ check_null_return(bdev, exp, \
+ "NULL hmm_bo_device.\n")
+
+#define check_bodev_null_return_void(bdev) \
+ check_null_return_void(bdev, \
+ "NULL hmm_bo_device.\n")
+
+#define check_bo_status_yes_goto(bo, _status, label) \
+ var_not_equal_goto((bo->status & (_status)), (_status), \
+ label, \
+ "HMM buffer status not contain %s.\n", \
+ #_status)
+
+#define check_bo_status_no_goto(bo, _status, label) \
+ var_equal_goto((bo->status & (_status)), (_status), \
+ label, \
+ "HMM buffer status contains %s.\n", \
+ #_status)
+
+#define rbtree_node_to_hmm_bo(root_node) \
+ container_of((root_node), struct hmm_buffer_object, node)
+
+#define list_to_hmm_bo(list_ptr) \
+ list_entry((list_ptr), struct hmm_buffer_object, list)
+
+#define kref_to_hmm_bo(kref_ptr) \
+ list_entry((kref_ptr), struct hmm_buffer_object, kref)
+
+#define check_bo_null_return(bo, exp) \
+ check_null_return(bo, exp, "NULL hmm buffer object.\n")
+
+#define check_bo_null_return_void(bo) \
+ check_null_return_void(bo, "NULL hmm buffer object.\n")
+
+#define HMM_MAX_ORDER 3
+#define HMM_MIN_ORDER 0
+
+#define ISP_VM_START 0x0
+#define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */
+#define ISP_PTR_NULL NULL
+
+#define HMM_BO_DEVICE_INITED 0x1
+
+enum hmm_bo_type {
+ HMM_BO_PRIVATE,
+ HMM_BO_SHARE,
+ HMM_BO_USER,
+#ifdef CONFIG_ION
+ HMM_BO_ION,
+#endif
+ HMM_BO_LAST,
+};
+
+enum hmm_page_type {
+ HMM_PAGE_TYPE_RESERVED,
+ HMM_PAGE_TYPE_DYNAMIC,
+ HMM_PAGE_TYPE_GENERAL,
+};
+
+#define HMM_BO_MASK 0x1
+#define HMM_BO_FREE 0x0
+#define HMM_BO_ALLOCED 0x1
+#define HMM_BO_PAGE_ALLOCED 0x2
+#define HMM_BO_BINDED 0x4
+#define HMM_BO_MMAPED 0x8
+#define HMM_BO_VMAPED 0x10
+#define HMM_BO_VMAPED_CACHED 0x20
+#define HMM_BO_ACTIVE 0x1000
+#define HMM_BO_MEM_TYPE_USER 0x1
+#define HMM_BO_MEM_TYPE_PFN 0x2
+
+struct hmm_bo_device {
+ struct isp_mmu mmu;
+
+ /* start/pgnr/size is used to record the virtual memory of this bo */
+ unsigned int start;
+ unsigned int pgnr;
+ unsigned int size;
+
+ /* list lock is used to protect the entire_bo_list */
+ spinlock_t list_lock;
+#ifdef CONFIG_ION
+ struct ion_client *iclient;
+#endif
+ int flag;
+
+ /* linked list for entire buffer object */
+ struct list_head entire_bo_list;
+ /* rbtree for maintain entire allocated vm */
+ struct rb_root allocated_rbtree;
+ /* rbtree for maintain entire free vm */
+ struct rb_root free_rbtree;
+ struct mutex rbtree_mutex;
+ struct kmem_cache *bo_cache;
+};
+
+struct hmm_page_object {
+ struct page *page;
+ enum hmm_page_type type;
+};
+
+struct hmm_buffer_object {
+ struct hmm_bo_device *bdev;
+ struct list_head list;
+ struct kref kref;
+
+ /* mutex protecting this BO */
+ struct mutex mutex;
+ enum hmm_bo_type type;
+ struct hmm_page_object *page_obj; /* physical pages */
+ int from_highmem;
+ int mmap_count;
+#ifdef CONFIG_ION
+ struct ion_handle *ihandle;
+#endif
+ int status;
+ int mem_type;
+ void *vmap_addr; /* kernel virtual address by vmap */
+
+ struct rb_node node;
+ unsigned int start;
+ unsigned int end;
+ unsigned int pgnr;
+ /*
+ * When insert a bo which has the same pgnr with an existed
+ * bo node in the free_rbtree, using "prev & next" pointer
+ * to maintain a bo linked list instead of insert this bo
+ * into free_rbtree directly, it will make sure each node
+ * in free_rbtree has different pgnr.
+ * "prev & next" default is NULL.
+ */
+ struct hmm_buffer_object *prev;
+ struct hmm_buffer_object *next;
+};
+
+struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
+ unsigned int pgnr);
+
+void hmm_bo_release(struct hmm_buffer_object *bo);
+
+int hmm_bo_device_init(struct hmm_bo_device *bdev,
+ struct isp_mmu_client *mmu_driver,
+ unsigned int vaddr_start, unsigned int size);
+
+/*
+ * clean up all hmm_bo_device related things.
+ */
+void hmm_bo_device_exit(struct hmm_bo_device *bdev);
+
+/*
+ * whether the bo device is inited or not.
+ */
+int hmm_bo_device_inited(struct hmm_bo_device *bdev);
+
+/*
+ * increse buffer object reference.
+ */
+void hmm_bo_ref(struct hmm_buffer_object *bo);
+
+/*
+ * decrese buffer object reference. if reference reaches 0,
+ * release function of the buffer object will be called.
+ *
+ * this call is also used to release hmm_buffer_object or its
+ * upper level object with it embedded in. you need to call
+ * this function when it is no longer used.
+ *
+ * Note:
+ *
+ * user dont need to care about internal resource release of
+ * the buffer object in the release callback, it will be
+ * handled internally.
+ *
+ * this call will only release internal resource of the buffer
+ * object but will not free the buffer object itself, as the
+ * buffer object can be both pre-allocated statically or
+ * dynamically allocated. so user need to deal with the release
+ * of the buffer object itself manually. below example shows
+ * the normal case of using the buffer object.
+ *
+ * struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr);
+ * ......
+ * hmm_bo_unref(bo);
+ *
+ * or:
+ *
+ * struct hmm_buffer_object bo;
+ *
+ * hmm_bo_init(bdev, &bo, pgnr, NULL);
+ * ...
+ * hmm_bo_unref(&bo);
+ */
+void hmm_bo_unref(struct hmm_buffer_object *bo);
+
+/*
+ * allocate/free physical pages for the bo. will try to alloc mem
+ * from highmem if from_highmem is set, and type indicate that the
+ * pages will be allocated by using video driver (for share buffer)
+ * or by ISP driver itself.
+ */
+
+int hmm_bo_allocated(struct hmm_buffer_object *bo);
+
+/*
+ * allocate/free physical pages for the bo. will try to alloc mem
+ * from highmem if from_highmem is set, and type indicate that the
+ * pages will be allocated by using video driver (for share buffer)
+ * or by ISP driver itself.
+ */
+int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
+ enum hmm_bo_type type, int from_highmem,
+ const void __user *userptr, bool cached);
+void hmm_bo_free_pages(struct hmm_buffer_object *bo);
+int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
+
+/*
+ * get physical page info of the bo.
+ */
+int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
+ struct hmm_page_object **page_obj, int *pgnr);
+
+/*
+ * bind/unbind the physical pages to a virtual address space.
+ */
+int hmm_bo_bind(struct hmm_buffer_object *bo);
+void hmm_bo_unbind(struct hmm_buffer_object *bo);
+int hmm_bo_binded(struct hmm_buffer_object *bo);
+
+/*
+ * vmap buffer object's pages to contiguous kernel virtual address.
+ * if the buffer has been vmaped, return the virtual address directly.
+ */
+void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
+
+/*
+ * flush the cache for the vmapped buffer object's pages,
+ * if the buffer has not been vmapped, return directly.
+ */
+void hmm_bo_flush_vmap(struct hmm_buffer_object *bo);
+
+/*
+ * vunmap buffer object's kernel virtual address.
+ */
+void hmm_bo_vunmap(struct hmm_buffer_object *bo);
+
+/*
+ * mmap the bo's physical pages to specific vma.
+ *
+ * vma's address space size must be the same as bo's size,
+ * otherwise it will return -EINVAL.
+ *
+ * vma->vm_flags will be set to (VM_RESERVED | VM_IO).
+ */
+int hmm_bo_mmap(struct vm_area_struct *vma,
+ struct hmm_buffer_object *bo);
+
+extern struct hmm_pool dynamic_pool;
+extern struct hmm_pool reserved_pool;
+
+/*
+ * find the buffer object by its virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_start(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr);
+
+/*
+ * find the buffer object by its virtual address.
+ * it does not need to be the start address of one bo,
+ * it can be an address within the range of one bo.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_in_range(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr);
+
+/*
+ * find the buffer object with kernel virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
+ struct hmm_bo_device *bdev, const void *vaddr);
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_common.h b/drivers/staging/media/atomisp/include/hmm/hmm_common.h
new file mode 100644
index 000000000000..00885203fb14
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/hmm/hmm_common.h
@@ -0,0 +1,96 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __HMM_BO_COMMON_H__
+#define __HMM_BO_COMMON_H__
+
+#define HMM_BO_NAME "HMM"
+
+/*
+ * some common use micros
+ */
+#define var_equal_return(var1, var2, exp, fmt, arg ...) \
+ do { \
+ if ((var1) == (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ return exp;\
+ } \
+ } while (0)
+
+#define var_equal_return_void(var1, var2, fmt, arg ...) \
+ do { \
+ if ((var1) == (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ return;\
+ } \
+ } while (0)
+
+#define var_equal_goto(var1, var2, label, fmt, arg ...) \
+ do { \
+ if ((var1) == (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ goto label;\
+ } \
+ } while (0)
+
+#define var_not_equal_goto(var1, var2, label, fmt, arg ...) \
+ do { \
+ if ((var1) != (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ goto label;\
+ } \
+ } while (0)
+
+#define check_null_return(ptr, exp, fmt, arg ...) \
+ var_equal_return(ptr, NULL, exp, fmt, ## arg)
+
+#define check_null_return_void(ptr, fmt, arg ...) \
+ var_equal_return_void(ptr, NULL, fmt, ## arg)
+
+/* hmm_mem_stat is used to trace the hmm mem used by ISP pipe. The unit is page
+ * number.
+ *
+ * res_size: reserved mem pool size, being allocated from system at system boot time.
+ * res_size >= res_cnt.
+ * sys_size: system mem pool size, being allocated from system at camera running time.
+ * dyc_size: dynamic mem pool size.
+ * dyc_thr: dynamic mem pool high watermark.
+ * dyc_size <= dyc_thr.
+ * usr_size: user ptr mem size.
+ *
+ * res_cnt: track the mem allocated from reserved pool at camera running time.
+ * tol_cnt: track the total mem used by ISP pipe at camera running time.
+ */
+struct _hmm_mem_stat {
+ int res_size;
+ int sys_size;
+ int dyc_size;
+ int dyc_thr;
+ int usr_size;
+ int res_cnt;
+ int tol_cnt;
+};
+
+extern struct _hmm_mem_stat hmm_mem_stat;
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_pool.h b/drivers/staging/media/atomisp/include/hmm/hmm_pool.h
new file mode 100644
index 000000000000..8caf00502d74
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/hmm/hmm_pool.h
@@ -0,0 +1,115 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef __HMM_POOL_H__
+#define __HMM_POOL_H__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include "hmm_common.h"
+#include "hmm/hmm_bo.h"
+
+#define ALLOC_PAGE_FAIL_NUM 5
+
+enum hmm_pool_type {
+ HMM_POOL_TYPE_RESERVED,
+ HMM_POOL_TYPE_DYNAMIC,
+};
+
+/**
+ * struct hmm_pool_ops - memory pool callbacks.
+ *
+ * @pool_init: initialize the memory pool.
+ * @pool_exit: uninitialize the memory pool.
+ * @pool_alloc_pages: allocate pages from memory pool.
+ * @pool_free_pages: free pages to memory pool.
+ * @pool_inited: check whether memory pool is initialized.
+ */
+struct hmm_pool_ops {
+ int (*pool_init)(void **pool, unsigned int pool_size);
+ void (*pool_exit)(void **pool);
+ unsigned int (*pool_alloc_pages)(void *pool,
+ struct hmm_page_object *page_obj,
+ unsigned int size, bool cached);
+ void (*pool_free_pages)(void *pool,
+ struct hmm_page_object *page_obj);
+ int (*pool_inited)(void *pool);
+};
+
+struct hmm_pool {
+ struct hmm_pool_ops *pops;
+
+ void *pool_info;
+};
+
+/**
+ * struct hmm_reserved_pool_info - represents reserved pool private data.
+ * @pages: a array that store physical pages.
+ * The array is as reserved memory pool.
+ * @index: to indicate the first blank page number
+ * in reserved memory pool(pages array).
+ * @pgnr: the valid page amount in reserved memory
+ * pool.
+ * @list_lock: list lock is used to protect the operation
+ * to reserved memory pool.
+ * @flag: reserved memory pool state flag.
+ */
+struct hmm_reserved_pool_info {
+ struct page **pages;
+
+ unsigned int index;
+ unsigned int pgnr;
+ spinlock_t list_lock;
+ bool initialized;
+};
+
+/**
+ * struct hmm_dynamic_pool_info - represents dynamic pool private data.
+ * @pages_list: a list that store physical pages.
+ * The pages list is as dynamic memory pool.
+ * @list_lock: list lock is used to protect the operation
+ * to dynamic memory pool.
+ * @flag: dynamic memory pool state flag.
+ * @pgptr_cache: struct kmem_cache, manages a cache.
+ */
+struct hmm_dynamic_pool_info {
+ struct list_head pages_list;
+
+ /* list lock is used to protect the free pages block lists */
+ spinlock_t list_lock;
+
+ struct kmem_cache *pgptr_cache;
+ bool initialized;
+
+ unsigned int pool_size;
+ unsigned int pgnr;
+};
+
+struct hmm_page {
+ struct page *page;
+ struct list_head list;
+};
+
+extern struct hmm_pool_ops reserved_pops;
+extern struct hmm_pool_ops dynamic_pops;
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_vm.h b/drivers/staging/media/atomisp/include/hmm/hmm_vm.h
new file mode 100644
index 000000000000..93ac5e445137
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/hmm/hmm_vm.h
@@ -0,0 +1,65 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __HMM_VM_H__
+#define __HMM_VM_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+struct hmm_vm {
+ unsigned int start;
+ unsigned int pgnr;
+ unsigned int size;
+ struct list_head vm_node_list;
+ spinlock_t lock;
+ struct kmem_cache *cache;
+};
+
+struct hmm_vm_node {
+ struct list_head list;
+ unsigned int start;
+ unsigned int pgnr;
+ unsigned int size;
+ struct hmm_vm *vm;
+};
+
+#define ISP_VM_START 0x0
+#define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */
+#define ISP_PTR_NULL NULL
+
+int hmm_vm_init(struct hmm_vm *vm, unsigned int start,
+ unsigned int size);
+
+void hmm_vm_clean(struct hmm_vm *vm);
+
+struct hmm_vm_node *hmm_vm_alloc_node(struct hmm_vm *vm,
+ unsigned int pgnr);
+
+void hmm_vm_free_node(struct hmm_vm_node *node);
+
+struct hmm_vm_node *hmm_vm_find_node_start(struct hmm_vm *vm,
+ unsigned int addr);
+
+struct hmm_vm_node *hmm_vm_find_node_in_range(struct hmm_vm *vm,
+ unsigned int addr);
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
new file mode 100644
index 000000000000..e9670749bae0
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -0,0 +1,1359 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef _ATOM_ISP_H
+#define _ATOM_ISP_H
+
+#include <linux/types.h>
+#include <linux/version.h>
+
+/* struct media_device_info.hw_revision */
+#define ATOMISP_HW_REVISION_MASK 0x0000ff00
+#define ATOMISP_HW_REVISION_SHIFT 8
+#define ATOMISP_HW_REVISION_ISP2300 0x00
+#define ATOMISP_HW_REVISION_ISP2400 0x10
+#define ATOMISP_HW_REVISION_ISP2401_LEGACY 0x11
+#define ATOMISP_HW_REVISION_ISP2401 0x20
+
+#define ATOMISP_HW_STEPPING_MASK 0x000000ff
+#define ATOMISP_HW_STEPPING_A0 0x00
+#define ATOMISP_HW_STEPPING_B0 0x10
+
+/*ISP binary running mode*/
+#define CI_MODE_PREVIEW 0x8000
+#define CI_MODE_VIDEO 0x4000
+#define CI_MODE_STILL_CAPTURE 0x2000
+#define CI_MODE_CONTINUOUS 0x1000
+#define CI_MODE_NONE 0x0000
+
+#define OUTPUT_MODE_FILE 0x0100
+#define OUTPUT_MODE_TEXT 0x0200
+
+/*
+ * Camera HAL sets this flag in v4l2_buffer reserved2 to indicate this
+ * buffer has a per-frame parameter.
+ */
+#define ATOMISP_BUFFER_HAS_PER_FRAME_SETTING 0x80000000
+
+/* Custom format for RAW capture from M10MO 0x3130314d */
+#define V4L2_PIX_FMT_CUSTOM_M10MO_RAW v4l2_fourcc('M', '1', '0', '1')
+
+/* Custom media bus formats being used in atomisp */
+#define V4L2_MBUS_FMT_CUSTOM_YUV420 0x8001
+#define V4L2_MBUS_FMT_CUSTOM_YVU420 0x8002
+#define V4L2_MBUS_FMT_CUSTOM_YUV422P 0x8003
+#define V4L2_MBUS_FMT_CUSTOM_YUV444 0x8004
+#define V4L2_MBUS_FMT_CUSTOM_NV12 0x8005
+#define V4L2_MBUS_FMT_CUSTOM_NV21 0x8006
+#define V4L2_MBUS_FMT_CUSTOM_NV16 0x8007
+#define V4L2_MBUS_FMT_CUSTOM_YUYV 0x8008
+#define V4L2_MBUS_FMT_CUSTOM_SBGGR16 0x8009
+#define V4L2_MBUS_FMT_CUSTOM_RGB32 0x800a
+
+/* Custom media bus format for M10MO RAW capture */
+#if 0
+#define V4L2_MBUS_FMT_CUSTOM_M10MO_RAW 0x800b
+#endif
+
+/* FIXME: for now, let's use a boolean to identify the type of atomisp chipset */
+extern bool atomisp_hw_is_isp2401;
+
+/* Configuration used by Bayer noise reduction and YCC noise reduction */
+struct atomisp_nr_config {
+ /* [gain] Strength of noise reduction for Bayer NR (Used by Bayer NR) */
+ unsigned int bnr_gain;
+ /* [gain] Strength of noise reduction for YCC NR (Used by YCC NR) */
+ unsigned int ynr_gain;
+ /* [intensity] Sensitivity of Edge (Used by Bayer NR) */
+ unsigned int direction;
+ /* [intensity] coring threshold for Cb (Used by YCC NR) */
+ unsigned int threshold_cb;
+ /* [intensity] coring threshold for Cr (Used by YCC NR) */
+ unsigned int threshold_cr;
+};
+
+/* Temporal noise reduction configuration */
+struct atomisp_tnr_config {
+ unsigned int gain; /* [gain] Strength of NR */
+ unsigned int threshold_y;/* [intensity] Motion sensitivity for Y */
+ unsigned int threshold_uv;/* [intensity] Motion sensitivity for U/V */
+};
+
+/* Histogram. This contains num_elements values of type unsigned int.
+ * The data pointer is a DDR pointer (virtual address).
+ */
+struct atomisp_histogram {
+ unsigned int num_elements;
+ void __user *data;
+};
+
+enum atomisp_ob_mode {
+ atomisp_ob_mode_none,
+ atomisp_ob_mode_fixed,
+ atomisp_ob_mode_raster
+};
+
+/* Optical black level configuration */
+struct atomisp_ob_config {
+ /* Obtical black level mode (Fixed / Raster) */
+ enum atomisp_ob_mode mode;
+ /* [intensity] optical black level for GR (relevant for fixed mode) */
+ unsigned int level_gr;
+ /* [intensity] optical black level for R (relevant for fixed mode) */
+ unsigned int level_r;
+ /* [intensity] optical black level for B (relevant for fixed mode) */
+ unsigned int level_b;
+ /* [intensity] optical black level for GB (relevant for fixed mode) */
+ unsigned int level_gb;
+ /* [BQ] 0..63 start position of OB area (relevant for raster mode) */
+ unsigned short start_position;
+ /* [BQ] start..63 end position of OB area (relevant for raster mode) */
+ unsigned short end_position;
+};
+
+/* Edge enhancement (sharpen) configuration */
+struct atomisp_ee_config {
+ /* [gain] The strength of sharpness. u5_11 */
+ unsigned int gain;
+ /* [intensity] The threshold that divides noises from edge. u8_8 */
+ unsigned int threshold;
+ /* [gain] The strength of sharpness in pell-mell area. u5_11 */
+ unsigned int detail_gain;
+};
+
+struct atomisp_3a_output {
+ int ae_y;
+ int awb_cnt;
+ int awb_gr;
+ int awb_r;
+ int awb_b;
+ int awb_gb;
+ int af_hpf1;
+ int af_hpf2;
+};
+
+enum atomisp_calibration_type {
+ calibration_type1,
+ calibration_type2,
+ calibration_type3
+};
+
+struct atomisp_calibration_group {
+ unsigned int size;
+ unsigned int type;
+ unsigned short *calb_grp_values;
+};
+
+struct atomisp_gc_config {
+ __u16 gain_k1;
+ __u16 gain_k2;
+};
+
+struct atomisp_3a_config {
+ unsigned int ae_y_coef_r; /* [gain] Weight of R for Y */
+ unsigned int ae_y_coef_g; /* [gain] Weight of G for Y */
+ unsigned int ae_y_coef_b; /* [gain] Weight of B for Y */
+ unsigned int awb_lg_high_raw; /* [intensity]
+ AWB level gate high for raw */
+ unsigned int awb_lg_low; /* [intensity] AWB level gate low */
+ unsigned int awb_lg_high; /* [intensity] AWB level gate high */
+ int af_fir1_coef[7]; /* [factor] AF FIR coefficients of fir1 */
+ int af_fir2_coef[7]; /* [factor] AF FIR coefficients of fir2 */
+};
+
+struct atomisp_dvs_grid_info {
+ u32 enable;
+ u32 width;
+ u32 aligned_width;
+ u32 height;
+ u32 aligned_height;
+ u32 bqs_per_grid_cell;
+ u32 num_hor_coefs;
+ u32 num_ver_coefs;
+};
+
+struct atomisp_dvs_envelop {
+ unsigned int width;
+ unsigned int height;
+};
+
+struct atomisp_grid_info {
+ u32 enable;
+ u32 use_dmem;
+ u32 has_histogram;
+ u32 s3a_width;
+ u32 s3a_height;
+ u32 aligned_width;
+ u32 aligned_height;
+ u32 s3a_bqs_per_grid_cell;
+ u32 deci_factor_log2;
+ u32 elem_bit_depth;
+};
+
+struct atomisp_dis_vector {
+ int x;
+ int y;
+};
+
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
+ * arrays that contain the coeffients for each type.
+ */
+struct atomisp_dvs2_coef_types {
+ short __user *odd_real; /** real part of the odd coefficients*/
+ short __user *odd_imag; /** imaginary part of the odd coefficients*/
+ short __user *even_real;/** real part of the even coefficients*/
+ short __user *even_imag;/** imaginary part of the even coefficients*/
+};
+
+/*
+ * DVS 2.0 Statistic types. This structure contains 4 pointers to
+ * arrays that contain the statistics for each type.
+ */
+struct atomisp_dvs2_stat_types {
+ int __user *odd_real; /** real part of the odd statistics*/
+ int __user *odd_imag; /** imaginary part of the odd statistics*/
+ int __user *even_real;/** real part of the even statistics*/
+ int __user *even_imag;/** imaginary part of the even statistics*/
+};
+
+struct atomisp_dis_coefficients {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_coef_types hor_coefs;
+ struct atomisp_dvs2_coef_types ver_coefs;
+};
+
+struct atomisp_dvs2_statistics {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_stat_types hor_prod;
+ struct atomisp_dvs2_stat_types ver_prod;
+};
+
+struct atomisp_dis_statistics {
+ struct atomisp_dvs2_statistics dvs2_stat;
+ u32 exp_id;
+};
+
+struct atomisp_3a_rgby_output {
+ u32 r;
+ u32 g;
+ u32 b;
+ u32 y;
+};
+
+/*
+ * Because we have 2 pipes at max to output metadata, therefore driver will use
+ * ATOMISP_MAIN_METADATA to specify the metadata from the pipe which keeps
+ * streaming always and use ATOMISP_SEC_METADATA to specify the metadata from
+ * the pipe which is streaming by request like capture pipe of ZSL or SDV mode
+ * as secondary metadata. And for the use case which has only one pipe
+ * streaming like online capture, ATOMISP_MAIN_METADATA will be used.
+ */
+enum atomisp_metadata_type {
+ ATOMISP_MAIN_METADATA = 0,
+ ATOMISP_SEC_METADATA,
+ ATOMISP_METADATA_TYPE_NUM,
+};
+
+struct atomisp_metadata_with_type {
+ /* to specify which type of metadata to get */
+ enum atomisp_metadata_type type;
+ void __user *data;
+ u32 width;
+ u32 height;
+ u32 stride; /* in bytes */
+ u32 exp_id; /* exposure ID */
+ u32 *effective_width; /* mipi packets valid data size */
+};
+
+struct atomisp_metadata {
+ void __user *data;
+ u32 width;
+ u32 height;
+ u32 stride; /* in bytes */
+ u32 exp_id; /* exposure ID */
+ u32 *effective_width; /* mipi packets valid data size */
+};
+
+struct atomisp_ext_isp_ctrl {
+ u32 id;
+ u32 data;
+};
+
+struct atomisp_3a_statistics {
+ struct atomisp_grid_info grid_info;
+ struct atomisp_3a_output __user *data;
+ struct atomisp_3a_rgby_output __user *rgby_data;
+ u32 exp_id; /* exposure ID */
+ u32 isp_config_id; /* isp config ID */
+};
+
+/**
+ * struct atomisp_cont_capture_conf - continuous capture parameters
+ * @num_captures: number of still images to capture
+ * @skip_frames: number of frames to skip between 2 captures
+ * @offset: offset in ring buffer to start capture
+ *
+ * For example, to capture 1 frame from past, current, and 1 from future
+ * and skip one frame between each capture, parameters would be:
+ * num_captures:3
+ * skip_frames:1
+ * offset:-2
+ */
+
+struct atomisp_cont_capture_conf {
+ int num_captures;
+ unsigned int skip_frames;
+ int offset;
+ __u32 reserved[5];
+};
+
+struct atomisp_ae_window {
+ int x_left;
+ int x_right;
+ int y_top;
+ int y_bottom;
+ int weight;
+};
+
+/* White Balance (Gain Adjust) */
+struct atomisp_wb_config {
+ unsigned int integer_bits;
+ unsigned int gr; /* unsigned <integer_bits>.<16-integer_bits> */
+ unsigned int r; /* unsigned <integer_bits>.<16-integer_bits> */
+ unsigned int b; /* unsigned <integer_bits>.<16-integer_bits> */
+ unsigned int gb; /* unsigned <integer_bits>.<16-integer_bits> */
+};
+
+/* Color Space Conversion settings */
+struct atomisp_cc_config {
+ unsigned int fraction_bits;
+ int matrix[3 * 3]; /* RGB2YUV Color matrix, signed
+ <13-fraction_bits>.<fraction_bits> */
+};
+
+/* De pixel noise configuration */
+struct atomisp_de_config {
+ unsigned int pixelnoise;
+ unsigned int c1_coring_threshold;
+ unsigned int c2_coring_threshold;
+};
+
+/* Chroma enhancement */
+struct atomisp_ce_config {
+ unsigned char uv_level_min;
+ unsigned char uv_level_max;
+};
+
+/* Defect pixel correction configuration */
+struct atomisp_dp_config {
+ /* [intensity] The threshold of defect Pixel Correction, representing
+ * the permissible difference of intensity between one pixel and its
+ * surrounding pixels. Smaller values result in more frequent pixel
+ * corrections. u0_16
+ */
+ unsigned int threshold;
+ /* [gain] The sensitivity of mis-correction. ISP will miss a lot of
+ * defects if the value is set too large. u8_8
+ */
+ unsigned int gain;
+ unsigned int gr;
+ unsigned int r;
+ unsigned int b;
+ unsigned int gb;
+};
+
+/* XNR threshold */
+struct atomisp_xnr_config {
+ __u16 threshold;
+};
+
+/* metadata config */
+struct atomisp_metadata_config {
+ u32 metadata_height;
+ u32 metadata_stride;
+};
+
+/*
+ * Generic resolution structure.
+ */
+struct atomisp_resolution {
+ u32 width; /** Width */
+ u32 height; /** Height */
+};
+
+/*
+ * This specifies the coordinates (x,y)
+ */
+struct atomisp_zoom_point {
+ s32 x; /** x coordinate */
+ s32 y; /** y coordinate */
+};
+
+/*
+ * This specifies the region
+ */
+struct atomisp_zoom_region {
+ struct atomisp_zoom_point
+ origin; /* Starting point coordinates for the region */
+ struct atomisp_resolution resolution; /* Region resolution */
+};
+
+struct atomisp_dz_config {
+ u32 dx; /** Horizontal zoom factor */
+ u32 dy; /** Vertical zoom factor */
+ struct atomisp_zoom_region zoom_region; /** region for zoom */
+};
+
+struct atomisp_parm {
+ struct atomisp_grid_info info;
+ struct atomisp_dvs_grid_info dvs_grid;
+ struct atomisp_dvs_envelop dvs_envelop;
+ struct atomisp_wb_config wb_config;
+ struct atomisp_cc_config cc_config;
+ struct atomisp_ob_config ob_config;
+ struct atomisp_de_config de_config;
+ struct atomisp_dz_config dz_config;
+ struct atomisp_ce_config ce_config;
+ struct atomisp_dp_config dp_config;
+ struct atomisp_nr_config nr_config;
+ struct atomisp_ee_config ee_config;
+ struct atomisp_tnr_config tnr_config;
+ struct atomisp_metadata_config metadata_config;
+};
+
+struct dvs2_bq_resolution {
+ int width_bq; /* width [BQ] */
+ int height_bq; /* height [BQ] */
+};
+
+struct atomisp_dvs2_bq_resolutions {
+ /* GDC source image size [BQ] */
+ struct dvs2_bq_resolution source_bq;
+ /* GDC output image size [BQ] */
+ struct dvs2_bq_resolution output_bq;
+ /* GDC effective envelope size [BQ] */
+ struct dvs2_bq_resolution envelope_bq;
+ /* isp pipe filter size [BQ] */
+ struct dvs2_bq_resolution ispfilter_bq;
+ /* GDC shit size [BQ] */
+ struct dvs2_bq_resolution gdc_shift_bq;
+};
+
+struct atomisp_dvs_6axis_config {
+ u32 exp_id;
+ u32 width_y;
+ u32 height_y;
+ u32 width_uv;
+ u32 height_uv;
+ u32 *xcoords_y;
+ u32 *ycoords_y;
+ u32 *xcoords_uv;
+ u32 *ycoords_uv;
+};
+
+struct atomisp_formats_config {
+ u32 video_full_range_flag;
+};
+
+struct atomisp_parameters {
+ struct atomisp_wb_config *wb_config; /* White Balance config */
+ struct atomisp_cc_config *cc_config; /* Color Correction config */
+ struct atomisp_tnr_config *tnr_config; /* Temporal Noise Reduction */
+ struct atomisp_ecd_config *ecd_config; /* Eigen Color Demosaicing */
+ struct atomisp_ynr_config *ynr_config; /* Y(Luma) Noise Reduction */
+ struct atomisp_fc_config *fc_config; /* Fringe Control */
+ struct atomisp_formats_config *formats_config; /* Formats Control */
+ struct atomisp_cnr_config *cnr_config; /* Chroma Noise Reduction */
+ struct atomisp_macc_config *macc_config; /* MACC */
+ struct atomisp_ctc_config *ctc_config; /* Chroma Tone Control */
+ struct atomisp_aa_config *aa_config; /* Anti-Aliasing */
+ struct atomisp_aa_config *baa_config; /* Anti-Aliasing */
+ struct atomisp_ce_config *ce_config;
+ struct atomisp_dvs_6axis_config *dvs_6axis_config;
+ struct atomisp_ob_config *ob_config; /* Objective Black config */
+ struct atomisp_dp_config *dp_config; /* Dead Pixel config */
+ struct atomisp_nr_config *nr_config; /* Noise Reduction config */
+ struct atomisp_ee_config *ee_config; /* Edge Enhancement config */
+ struct atomisp_de_config *de_config; /* Demosaic config */
+ struct atomisp_gc_config *gc_config; /* Gamma Correction config */
+ struct atomisp_anr_config *anr_config; /* Advanced Noise Reduction */
+ struct atomisp_3a_config *a3a_config; /* 3A Statistics config */
+ struct atomisp_xnr_config *xnr_config; /* eXtra Noise Reduction */
+ struct atomisp_dz_config *dz_config; /* Digital Zoom */
+ struct atomisp_cc_config *yuv2rgb_cc_config; /* Color
+ Correction config */
+ struct atomisp_cc_config *rgb2yuv_cc_config; /* Color
+ Correction config */
+ struct atomisp_macc_table *macc_table;
+ struct atomisp_gamma_table *gamma_table;
+ struct atomisp_ctc_table *ctc_table;
+ struct atomisp_xnr_table *xnr_table;
+ struct atomisp_rgb_gamma_table *r_gamma_table;
+ struct atomisp_rgb_gamma_table *g_gamma_table;
+ struct atomisp_rgb_gamma_table *b_gamma_table;
+ struct atomisp_vector *motion_vector; /* For 2-axis DVS */
+ struct atomisp_shading_table *shading_table;
+ struct atomisp_morph_table *morph_table;
+ struct atomisp_dvs_coefficients *dvs_coefs; /* DVS 1.0 coefficients */
+ struct atomisp_dvs2_coefficients *dvs2_coefs; /* DVS 2.0 coefficients */
+ struct atomisp_capture_config *capture_config;
+ struct atomisp_anr_thres *anr_thres;
+
+ void *lin_2500_config; /* Skylake: Linearization config */
+ void *obgrid_2500_config; /* Skylake: OBGRID config */
+ void *bnr_2500_config; /* Skylake: bayer denoise config */
+ void *shd_2500_config; /* Skylake: shading config */
+ void *dm_2500_config; /* Skylake: demosaic config */
+ void *rgbpp_2500_config; /* Skylake: RGBPP config */
+ void *dvs_stat_2500_config; /* Skylake: DVS STAT config */
+ void *lace_stat_2500_config; /* Skylake: LACE STAT config */
+ void *yuvp1_2500_config; /* Skylake: yuvp1 config */
+ void *yuvp2_2500_config; /* Skylake: yuvp2 config */
+ void *tnr_2500_config; /* Skylake: TNR config */
+ void *dpc_2500_config; /* Skylake: DPC config */
+ void *awb_2500_config; /* Skylake: auto white balance config */
+ void *awb_fr_2500_config; /* Skylake: auto white balance filter response config */
+ void *anr_2500_config; /* Skylake: ANR config */
+ void *af_2500_config; /* Skylake: auto focus config */
+ void *ae_2500_config; /* Skylake: auto exposure config */
+ void *bds_2500_config; /* Skylake: bayer downscaler config */
+ void *dvs_2500_config; /* Skylake: digital video stabilization config */
+ void *res_mgr_2500_config;
+
+ /*
+ * Output frame pointer the config is to be applied to (optional),
+ * set to NULL to make this config is applied as global.
+ */
+ void *output_frame;
+ /*
+ * Unique ID to track which config was actually applied to a particular
+ * frame, driver will send this id back with output frame together.
+ */
+ u32 isp_config_id;
+
+ /*
+ * Switch to control per_frame setting:
+ * 0: this is a global setting
+ * 1: this is a per_frame setting
+ * PLEASE KEEP THIS AT THE END OF THE STRUCTURE!!
+ */
+ u32 per_frame_setting;
+};
+
+#define ATOMISP_GAMMA_TABLE_SIZE 1024
+struct atomisp_gamma_table {
+ unsigned short data[ATOMISP_GAMMA_TABLE_SIZE];
+};
+
+/* Morphing table for advanced ISP.
+ * Each line of width elements takes up COORD_TABLE_EXT_WIDTH elements
+ * in memory.
+ */
+#define ATOMISP_MORPH_TABLE_NUM_PLANES 6
+struct atomisp_morph_table {
+ unsigned int enabled;
+
+ unsigned int height;
+ unsigned int width; /* number of valid elements per line */
+ unsigned short __user *coordinates_x[ATOMISP_MORPH_TABLE_NUM_PLANES];
+ unsigned short __user *coordinates_y[ATOMISP_MORPH_TABLE_NUM_PLANES];
+};
+
+#define ATOMISP_NUM_SC_COLORS 4
+#define ATOMISP_SC_FLAG_QUERY BIT(0)
+
+struct atomisp_shading_table {
+ __u32 enable;
+
+ __u32 sensor_width;
+ __u32 sensor_height;
+ __u32 width;
+ __u32 height;
+ __u32 fraction_bits;
+
+ __u16 *data[ATOMISP_NUM_SC_COLORS];
+};
+
+struct atomisp_makernote_info {
+ /* bits 31-16: numerator, bits 15-0: denominator */
+ unsigned int focal_length;
+ /* bits 31-16: numerator, bits 15-0: denominator*/
+ unsigned int f_number_curr;
+ /*
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+ unsigned int f_number_range;
+};
+
+/* parameter for MACC */
+#define ATOMISP_NUM_MACC_AXES 16
+struct atomisp_macc_table {
+ short data[4 * ATOMISP_NUM_MACC_AXES];
+};
+
+struct atomisp_macc_config {
+ int color_effect;
+ struct atomisp_macc_table table;
+};
+
+/* Parameter for ctc parameter control */
+#define ATOMISP_CTC_TABLE_SIZE 1024
+struct atomisp_ctc_table {
+ unsigned short data[ATOMISP_CTC_TABLE_SIZE];
+};
+
+/* Parameter for overlay image loading */
+struct atomisp_overlay {
+ /* the frame containing the overlay data The overlay frame width should
+ * be the multiples of 2*ISP_VEC_NELEMS. The overlay frame height
+ * should be the multiples of 2.
+ */
+ struct v4l2_framebuffer *frame;
+ /* Y value of overlay background */
+ unsigned char bg_y;
+ /* U value of overlay background */
+ char bg_u;
+ /* V value of overlay background */
+ char bg_v;
+ /* the blending percent of input data for Y subpixels */
+ unsigned char blend_input_perc_y;
+ /* the blending percent of input data for U subpixels */
+ unsigned char blend_input_perc_u;
+ /* the blending percent of input data for V subpixels */
+ unsigned char blend_input_perc_v;
+ /* the blending percent of overlay data for Y subpixels */
+ unsigned char blend_overlay_perc_y;
+ /* the blending percent of overlay data for U subpixels */
+ unsigned char blend_overlay_perc_u;
+ /* the blending percent of overlay data for V subpixels */
+ unsigned char blend_overlay_perc_v;
+ /* the overlay start x pixel position on output frame It should be the
+ multiples of 2*ISP_VEC_NELEMS. */
+ unsigned int overlay_start_x;
+ /* the overlay start y pixel position on output frame It should be the
+ multiples of 2. */
+ unsigned int overlay_start_y;
+};
+
+/* Sensor resolution specific data for AE calculation.*/
+struct atomisp_sensor_mode_data {
+ unsigned int coarse_integration_time_min;
+ unsigned int coarse_integration_time_max_margin;
+ unsigned int fine_integration_time_min;
+ unsigned int fine_integration_time_max_margin;
+ unsigned int fine_integration_time_def;
+ unsigned int frame_length_lines;
+ unsigned int line_length_pck;
+ unsigned int read_mode;
+ unsigned int vt_pix_clk_freq_mhz;
+ unsigned int crop_horizontal_start; /* Sensor crop start cord. (x0,y0)*/
+ unsigned int crop_vertical_start;
+ unsigned int crop_horizontal_end; /* Sensor crop end cord. (x1,y1)*/
+ unsigned int crop_vertical_end;
+ unsigned int output_width; /* input size to ISP after binning/scaling */
+ unsigned int output_height;
+ u8 binning_factor_x; /* horizontal binning factor used */
+ u8 binning_factor_y; /* vertical binning factor used */
+ u16 hts;
+};
+
+struct atomisp_exposure {
+ unsigned int integration_time[8];
+ unsigned int shutter_speed[8];
+ unsigned int gain[4];
+ unsigned int aperture;
+};
+
+/* For texture streaming. */
+struct atomisp_bc_video_package {
+ int ioctl_cmd;
+ int device_id;
+ int inputparam;
+ int outputparam;
+};
+
+enum atomisp_focus_hp {
+ ATOMISP_FOCUS_HP_IN_PROGRESS = (1U << 2),
+ ATOMISP_FOCUS_HP_COMPLETE = (2U << 2),
+ ATOMISP_FOCUS_HP_FAILED = (3U << 2)
+};
+
+/* Masks */
+#define ATOMISP_FOCUS_STATUS_MOVING BIT(0)
+#define ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE BIT(1)
+#define ATOMISP_FOCUS_STATUS_HOME_POSITION (3U << 2)
+
+enum atomisp_camera_port {
+ ATOMISP_CAMERA_PORT_SECONDARY,
+ ATOMISP_CAMERA_PORT_PRIMARY,
+ ATOMISP_CAMERA_PORT_TERTIARY,
+ ATOMISP_CAMERA_NR_PORTS
+};
+
+/* Flash modes. Default is off.
+ * Setting a flash to TORCH or INDICATOR mode will automatically
+ * turn it on. Setting it to FLASH mode will not turn on the flash
+ * until the FLASH_STROBE command is sent. */
+enum atomisp_flash_mode {
+ ATOMISP_FLASH_MODE_OFF,
+ ATOMISP_FLASH_MODE_FLASH,
+ ATOMISP_FLASH_MODE_TORCH,
+ ATOMISP_FLASH_MODE_INDICATOR,
+};
+
+/* Flash statuses, used by atomisp driver to check before starting
+ * flash and after having started flash. */
+enum atomisp_flash_status {
+ ATOMISP_FLASH_STATUS_OK,
+ ATOMISP_FLASH_STATUS_HW_ERROR,
+ ATOMISP_FLASH_STATUS_INTERRUPTED,
+ ATOMISP_FLASH_STATUS_TIMEOUT,
+};
+
+/* Frame status. This is used to detect corrupted frames and flash
+ * exposed frames. Usually, the first 2 frames coming out of the sensor
+ * are corrupted. When using flash, the frame before and the frame after
+ * the flash exposed frame may be partially exposed by flash. The ISP
+ * statistics for these frames should not be used by the 3A library.
+ * The frame status value can be found in the "reserved" field in the
+ * v4l2_buffer struct. */
+enum atomisp_frame_status {
+ ATOMISP_FRAME_STATUS_OK,
+ ATOMISP_FRAME_STATUS_CORRUPTED,
+ ATOMISP_FRAME_STATUS_FLASH_EXPOSED,
+ ATOMISP_FRAME_STATUS_FLASH_PARTIAL,
+ ATOMISP_FRAME_STATUS_FLASH_FAILED,
+};
+
+enum atomisp_acc_type {
+ ATOMISP_ACC_STANDALONE, /* Stand-alone acceleration */
+ ATOMISP_ACC_OUTPUT, /* Accelerator stage on output frame */
+ ATOMISP_ACC_VIEWFINDER /* Accelerator stage on viewfinder frame */
+};
+
+enum atomisp_acc_arg_type {
+ ATOMISP_ACC_ARG_SCALAR_IN, /* Scalar input argument */
+ ATOMISP_ACC_ARG_SCALAR_OUT, /* Scalar output argument */
+ ATOMISP_ACC_ARG_SCALAR_IO, /* Scalar in/output argument */
+ ATOMISP_ACC_ARG_PTR_IN, /* Pointer input argument */
+ ATOMISP_ACC_ARG_PTR_OUT, /* Pointer output argument */
+ ATOMISP_ACC_ARG_PTR_IO, /* Pointer in/output argument */
+ ATOMISP_ARG_PTR_NOFLUSH, /* Pointer argument will not be flushed */
+ ATOMISP_ARG_PTR_STABLE, /* Pointer input argument that is stable */
+ ATOMISP_ACC_ARG_FRAME /* Frame argument */
+};
+
+/* ISP memories, isp2400 */
+enum atomisp_acc_memory {
+ ATOMISP_ACC_MEMORY_PMEM0 = 0,
+ ATOMISP_ACC_MEMORY_DMEM0,
+ /* for backward compatibility */
+ ATOMISP_ACC_MEMORY_DMEM = ATOMISP_ACC_MEMORY_DMEM0,
+ ATOMISP_ACC_MEMORY_VMEM0,
+ ATOMISP_ACC_MEMORY_VAMEM0,
+ ATOMISP_ACC_MEMORY_VAMEM1,
+ ATOMISP_ACC_MEMORY_VAMEM2,
+ ATOMISP_ACC_MEMORY_HMEM0,
+ ATOMISP_ACC_NR_MEMORY
+};
+
+enum atomisp_ext_isp_id {
+ EXT_ISP_CID_ISO = 0,
+ EXT_ISP_CID_CAPTURE_HDR,
+ EXT_ISP_CID_CAPTURE_LLS,
+ EXT_ISP_CID_FOCUS_MODE,
+ EXT_ISP_CID_FOCUS_EXECUTION,
+ EXT_ISP_CID_TOUCH_POSX,
+ EXT_ISP_CID_TOUCH_POSY,
+ EXT_ISP_CID_CAF_STATUS,
+ EXT_ISP_CID_AF_STATUS,
+ EXT_ISP_CID_GET_AF_MODE,
+ EXT_ISP_CID_CAPTURE_BURST,
+ EXT_ISP_CID_FLASH_MODE,
+ EXT_ISP_CID_ZOOM,
+ EXT_ISP_CID_SHOT_MODE
+};
+
+#define EXT_ISP_FOCUS_MODE_NORMAL 0
+#define EXT_ISP_FOCUS_MODE_MACRO 1
+#define EXT_ISP_FOCUS_MODE_TOUCH_AF 2
+#define EXT_ISP_FOCUS_MODE_PREVIEW_CAF 3
+#define EXT_ISP_FOCUS_MODE_MOVIE_CAF 4
+#define EXT_ISP_FOCUS_MODE_FACE_CAF 5
+#define EXT_ISP_FOCUS_MODE_TOUCH_MACRO 6
+#define EXT_ISP_FOCUS_MODE_TOUCH_CAF 7
+
+#define EXT_ISP_FOCUS_STOP 0
+#define EXT_ISP_FOCUS_SEARCH 1
+#define EXT_ISP_PAN_FOCUSING 2
+
+#define EXT_ISP_CAF_RESTART_CHECK 1
+#define EXT_ISP_CAF_STATUS_FOCUSING 2
+#define EXT_ISP_CAF_STATUS_SUCCESS 3
+#define EXT_ISP_CAF_STATUS_FAIL 4
+
+#define EXT_ISP_AF_STATUS_INVALID 1
+#define EXT_ISP_AF_STATUS_FOCUSING 2
+#define EXT_ISP_AF_STATUS_SUCCESS 3
+#define EXT_ISP_AF_STATUS_FAIL 4
+
+enum atomisp_burst_capture_options {
+ EXT_ISP_BURST_CAPTURE_CTRL_START = 0,
+ EXT_ISP_BURST_CAPTURE_CTRL_STOP
+};
+
+#define EXT_ISP_FLASH_MODE_OFF 0
+#define EXT_ISP_FLASH_MODE_ON 1
+#define EXT_ISP_FLASH_MODE_AUTO 2
+#define EXT_ISP_LED_TORCH_OFF 3
+#define EXT_ISP_LED_TORCH_ON 4
+
+#define EXT_ISP_SHOT_MODE_AUTO 0
+#define EXT_ISP_SHOT_MODE_BEAUTY_FACE 1
+#define EXT_ISP_SHOT_MODE_BEST_PHOTO 2
+#define EXT_ISP_SHOT_MODE_DRAMA 3
+#define EXT_ISP_SHOT_MODE_BEST_FACE 4
+#define EXT_ISP_SHOT_MODE_ERASER 5
+#define EXT_ISP_SHOT_MODE_PANORAMA 6
+#define EXT_ISP_SHOT_MODE_RICH_TONE_HDR 7
+#define EXT_ISP_SHOT_MODE_NIGHT 8
+#define EXT_ISP_SHOT_MODE_SOUND_SHOT 9
+#define EXT_ISP_SHOT_MODE_ANIMATED_PHOTO 10
+#define EXT_ISP_SHOT_MODE_SPORTS 11
+
+struct atomisp_sp_arg {
+ enum atomisp_acc_arg_type type; /* Type of SP argument */
+ void *value; /* Value of SP argument */
+ unsigned int size; /* Size of SP argument */
+};
+
+/* Acceleration API */
+
+/* For CSS 1.0 only */
+struct atomisp_acc_fw_arg {
+ unsigned int fw_handle;
+ unsigned int index;
+ void __user *value;
+ size_t size;
+};
+
+/*
+ * Set arguments after first mapping with ATOMISP_IOC_ACC_S_MAPPED_ARG.
+ */
+struct atomisp_acc_s_mapped_arg {
+ unsigned int fw_handle;
+ __u32 memory; /* one of enum atomisp_acc_memory */
+ size_t length;
+ unsigned long css_ptr;
+};
+
+struct atomisp_acc_fw_abort {
+ unsigned int fw_handle;
+ /* Timeout in us */
+ unsigned int timeout;
+};
+
+struct atomisp_acc_fw_load {
+ unsigned int size;
+ unsigned int fw_handle;
+ void __user *data;
+};
+
+/*
+ * Load firmware to specified pipeline.
+ */
+struct atomisp_acc_fw_load_to_pipe {
+ __u32 flags; /* Flags, see below for valid values */
+ unsigned int fw_handle; /* Handle, filled by kernel. */
+ __u32 size; /* Firmware binary size */
+ void __user *data; /* Pointer to firmware */
+ __u32 type; /* Binary type */
+ __u32 reserved[3]; /* Set to zero */
+};
+
+/*
+ * Set Senor run mode
+ */
+struct atomisp_s_runmode {
+ __u32 mode;
+};
+
+#define ATOMISP_ACC_FW_LOAD_FL_PREVIEW BIT(0)
+#define ATOMISP_ACC_FW_LOAD_FL_COPY BIT(1)
+#define ATOMISP_ACC_FW_LOAD_FL_VIDEO BIT(2)
+#define ATOMISP_ACC_FW_LOAD_FL_CAPTURE BIT(3)
+#define ATOMISP_ACC_FW_LOAD_FL_ACC BIT(4)
+#define ATOMISP_ACC_FW_LOAD_FL_ENABLE BIT(16)
+
+#define ATOMISP_ACC_FW_LOAD_TYPE_NONE 0 /* Normal binary: don't use */
+#define ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT 1 /* Stage on output */
+#define ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER 2 /* Stage on viewfinder */
+#define ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE 3 /* Stand-alone acceleration */
+
+struct atomisp_acc_map {
+ __u32 flags; /* Flags, see list below */
+ __u32 length; /* Length of data in bytes */
+ void __user *user_ptr; /* Pointer into user space */
+ unsigned long css_ptr; /* Pointer into CSS address space */
+ __u32 reserved[4]; /* Set to zero */
+};
+
+#define ATOMISP_MAP_FLAG_NOFLUSH 0x0001 /* Do not flush cache */
+#define ATOMISP_MAP_FLAG_CACHED 0x0002 /* Enable cache */
+
+struct atomisp_acc_state {
+ __u32 flags; /* Flags, see list below */
+#define ATOMISP_STATE_FLAG_ENABLE ATOMISP_ACC_FW_LOAD_FL_ENABLE
+ unsigned int fw_handle;
+};
+
+struct atomisp_update_exposure {
+ unsigned int gain;
+ unsigned int digi_gain;
+ unsigned int update_gain;
+ unsigned int update_digi_gain;
+};
+
+/*
+ * V4L2 private internal data interface.
+ * -----------------------------------------------------------------------------
+ * struct v4l2_private_int_data - request private data stored in video device
+ * internal memory.
+ * @size: sanity check to ensure userspace's buffer fits whole private data.
+ * If not, kernel will make partial copy (or nothing if @size == 0).
+ * @size is always corrected for the minimum necessary if IOCTL returns
+ * no error.
+ * @data: pointer to userspace buffer.
+ */
+struct v4l2_private_int_data {
+ __u32 size;
+ void __user *data;
+ __u32 reserved[2];
+};
+
+enum atomisp_sensor_ae_bracketing_mode {
+ SENSOR_AE_BRACKETING_MODE_OFF = 0,
+ SENSOR_AE_BRACKETING_MODE_SINGLE, /* back to SW standby after bracketing */
+ SENSOR_AE_BRACKETING_MODE_SINGLE_TO_STREAMING, /* back to normal streaming after bracketing */
+ SENSOR_AE_BRACKETING_MODE_LOOP, /* continue AE bracketing in loop mode */
+};
+
+struct atomisp_sensor_ae_bracketing_info {
+ unsigned int modes; /* bit mask to indicate supported modes */
+ unsigned int lut_depth;
+};
+
+struct atomisp_sensor_ae_bracketing_lut_entry {
+ __u16 coarse_integration_time;
+ __u16 analog_gain;
+ __u16 digital_gain;
+};
+
+struct atomisp_sensor_ae_bracketing_lut {
+ struct atomisp_sensor_ae_bracketing_lut_entry *lut;
+ unsigned int lut_size;
+};
+
+/*Private IOCTLs for ISP */
+#define ATOMISP_IOC_G_XNR \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 0, int)
+#define ATOMISP_IOC_S_XNR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 0, int)
+#define ATOMISP_IOC_G_NR \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config)
+#define ATOMISP_IOC_S_NR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config)
+#define ATOMISP_IOC_G_TNR \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config)
+#define ATOMISP_IOC_S_TNR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config)
+#define ATOMISP_IOC_G_HISTOGRAM \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram)
+#define ATOMISP_IOC_S_HISTOGRAM \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram)
+#define ATOMISP_IOC_G_BLACK_LEVEL_COMP \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config)
+#define ATOMISP_IOC_S_BLACK_LEVEL_COMP \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config)
+#define ATOMISP_IOC_G_EE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config)
+#define ATOMISP_IOC_S_EE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config)
+/* Digital Image Stabilization:
+ * 1. get dis statistics: reads DIS statistics from ISP (every frame)
+ * 2. set dis coefficients: set DIS filter coefficients (one time)
+ * 3. set dis motion vecotr: set motion vector (result of DIS, every frame)
+ */
+#define ATOMISP_IOC_G_DIS_STAT \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics)
+
+#define ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs2_bq_resolutions)
+
+#define ATOMISP_IOC_S_DIS_COEFS \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_coefficients)
+
+#define ATOMISP_IOC_S_DIS_VECTOR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs_6axis_config)
+
+#define ATOMISP_IOC_G_3A_STAT \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 7, struct atomisp_3a_statistics)
+#define ATOMISP_IOC_G_ISP_PARM \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm)
+#define ATOMISP_IOC_S_ISP_PARM \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm)
+#define ATOMISP_IOC_G_ISP_GAMMA \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table)
+#define ATOMISP_IOC_S_ISP_GAMMA \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table)
+#define ATOMISP_IOC_G_ISP_GDC_TAB \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table)
+#define ATOMISP_IOC_S_ISP_GDC_TAB \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table)
+#define ATOMISP_IOC_ISP_MAKERNOTE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 11, struct atomisp_makernote_info)
+
+/* macc parameter control*/
+#define ATOMISP_IOC_G_ISP_MACC \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config)
+#define ATOMISP_IOC_S_ISP_MACC \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config)
+
+/* Defect pixel detection & Correction */
+#define ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config)
+#define ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config)
+
+/* False Color Correction */
+#define ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config)
+#define ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config)
+
+/* ctc parameter control */
+#define ATOMISP_IOC_G_ISP_CTC \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table)
+#define ATOMISP_IOC_S_ISP_CTC \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table)
+
+/* white balance Correction */
+#define ATOMISP_IOC_G_ISP_WHITE_BALANCE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config)
+#define ATOMISP_IOC_S_ISP_WHITE_BALANCE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config)
+
+/* fpn table loading */
+#define ATOMISP_IOC_S_ISP_FPN_TABLE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 17, struct v4l2_framebuffer)
+
+/* overlay image loading */
+#define ATOMISP_IOC_G_ISP_OVERLAY \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay)
+#define ATOMISP_IOC_S_ISP_OVERLAY \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay)
+
+/* bcd driver bridge */
+#define ATOMISP_IOC_CAMERA_BRIDGE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 19, struct atomisp_bc_video_package)
+
+/* Sensor resolution specific info for AE */
+#define ATOMISP_IOC_G_SENSOR_MODE_DATA \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 20, struct atomisp_sensor_mode_data)
+
+#define ATOMISP_IOC_S_EXPOSURE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 21, struct atomisp_exposure)
+
+/* sensor calibration registers group */
+#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group)
+
+/* white balance Correction */
+#define ATOMISP_IOC_G_3A_CONFIG \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
+#define ATOMISP_IOC_S_3A_CONFIG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
+
+/* Accelerate ioctls */
+#define ATOMISP_IOC_ACC_LOAD \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load)
+
+#define ATOMISP_IOC_ACC_UNLOAD \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 24, unsigned int)
+
+/* For CSS 1.0 only */
+#define ATOMISP_IOC_ACC_S_ARG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg)
+
+#define ATOMISP_IOC_ACC_START \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 24, unsigned int)
+
+#define ATOMISP_IOC_ACC_WAIT \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 25, unsigned int)
+
+#define ATOMISP_IOC_ACC_ABORT \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_abort)
+
+#define ATOMISP_IOC_ACC_DESTAB \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg)
+
+/* sensor OTP memory read */
+#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data)
+
+/* LCS (shading) table write */
+#define ATOMISP_IOC_S_ISP_SHD_TAB \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table)
+
+/* Gamma Correction */
+#define ATOMISP_IOC_G_ISP_GAMMA_CORRECTION \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config)
+
+#define ATOMISP_IOC_S_ISP_GAMMA_CORRECTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config)
+
+/* motor internal memory read */
+#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data)
+
+/*
+ * Ioctls to map and unmap user buffers to CSS address space for acceleration.
+ * User fills fields length and user_ptr and sets other fields to zero,
+ * kernel may modify the flags and sets css_ptr.
+ */
+#define ATOMISP_IOC_ACC_MAP \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map)
+
+/* User fills fields length, user_ptr, and css_ptr and zeroes other fields. */
+#define ATOMISP_IOC_ACC_UNMAP \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map)
+
+#define ATOMISP_IOC_ACC_S_MAPPED_ARG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg)
+
+#define ATOMISP_IOC_ACC_LOAD_TO_PIPE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe)
+
+#define ATOMISP_IOC_S_PARAMETERS \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters)
+
+#define ATOMISP_IOC_S_CONT_CAPTURE_CONFIG \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 33, struct atomisp_cont_capture_conf)
+
+#define ATOMISP_IOC_G_METADATA \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata)
+
+#define ATOMISP_IOC_G_METADATA_BY_TYPE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata_with_type)
+
+#define ATOMISP_IOC_EXT_ISP_CTRL \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 35, struct atomisp_ext_isp_ctrl)
+
+#define ATOMISP_IOC_EXP_ID_UNLOCK \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 36, int)
+
+#define ATOMISP_IOC_EXP_ID_CAPTURE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 37, int)
+
+#define ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 38, unsigned int)
+
+#define ATOMISP_IOC_G_FORMATS_CONFIG \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 39, struct atomisp_formats_config)
+
+#define ATOMISP_IOC_S_FORMATS_CONFIG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 39, struct atomisp_formats_config)
+
+#define ATOMISP_IOC_S_EXPOSURE_WINDOW \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 40, struct atomisp_ae_window)
+
+#define ATOMISP_IOC_S_ACC_STATE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state)
+
+#define ATOMISP_IOC_G_ACC_STATE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state)
+
+#define ATOMISP_IOC_INJECT_A_FAKE_EVENT \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 42, int)
+
+#define ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_info)
+
+#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 43, unsigned int)
+
+#define ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 43, unsigned int)
+
+#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_lut)
+
+#define ATOMISP_IOC_G_INVALID_FRAME_NUM \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 44, unsigned int)
+
+#define ATOMISP_IOC_S_ARRAY_RESOLUTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 45, struct atomisp_resolution)
+
+/* for depth mode sensor frame sync compensation */
+#define ATOMISP_IOC_G_DEPTH_SYNC_COMP \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 46, unsigned int)
+
+#define ATOMISP_IOC_S_SENSOR_EE_CONFIG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 47, unsigned int)
+
+#define ATOMISP_IOC_S_SENSOR_RUNMODE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 48, struct atomisp_s_runmode)
+
+#define ATOMISP_IOC_G_UPDATE_EXPOSURE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 49, struct atomisp_update_exposure)
+
+/*
+ * Reserved ioctls. We have customer implementing it internally.
+ * We can't use both numbers to not cause ABI conflict.
+ * Anyway, those ioctls are hacks and not implemented by us:
+ *
+ * #define ATOMISP_IOC_G_SENSOR_REG \
+ * _IOW('v', BASE_VIDIOC_PRIVATE + 55, struct atomisp_sensor_regs)
+ * #define ATOMISP_IOC_S_SENSOR_REG \
+ * _IOW('v', BASE_VIDIOC_PRIVATE + 56, struct atomisp_sensor_regs)
+ */
+
+/* ISP Private control IDs */
+#define V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION \
+ (V4L2_CID_PRIVATE_BASE + 0)
+#define V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC \
+ (V4L2_CID_PRIVATE_BASE + 1)
+#define V4L2_CID_ATOMISP_VIDEO_STABLIZATION \
+ (V4L2_CID_PRIVATE_BASE + 2)
+#define V4L2_CID_ATOMISP_FIXED_PATTERN_NR \
+ (V4L2_CID_PRIVATE_BASE + 3)
+#define V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION \
+ (V4L2_CID_PRIVATE_BASE + 4)
+#define V4L2_CID_ATOMISP_LOW_LIGHT \
+ (V4L2_CID_PRIVATE_BASE + 5)
+
+/* Camera class:
+ * Exposure, Flash and privacy (indicator) light controls, to be upstreamed */
+#define V4L2_CID_CAMERA_LASTP1 (V4L2_CID_CAMERA_CLASS_BASE + 1024)
+
+#define V4L2_CID_FOCAL_ABSOLUTE (V4L2_CID_CAMERA_LASTP1 + 0)
+#define V4L2_CID_FNUMBER_ABSOLUTE (V4L2_CID_CAMERA_LASTP1 + 1)
+#define V4L2_CID_FNUMBER_RANGE (V4L2_CID_CAMERA_LASTP1 + 2)
+
+/* Flash related CIDs, see also:
+ * http://linuxtv.org/downloads/v4l-dvb-apis/extended-controls.html\
+ * #flash-controls */
+
+/* Request a number of flash-exposed frames. The frame status can be
+ * found in the reserved field in the v4l2_buffer struct. */
+#define V4L2_CID_REQUEST_FLASH (V4L2_CID_CAMERA_LASTP1 + 3)
+/* Query flash driver status. See enum atomisp_flash_status above. */
+#define V4L2_CID_FLASH_STATUS (V4L2_CID_CAMERA_LASTP1 + 5)
+/* Set the flash mode (see enum atomisp_flash_mode) */
+#define V4L2_CID_FLASH_MODE (V4L2_CID_CAMERA_LASTP1 + 10)
+
+/* VCM slew control */
+#define V4L2_CID_VCM_SLEW (V4L2_CID_CAMERA_LASTP1 + 11)
+/* VCM step time */
+#define V4L2_CID_VCM_TIMEING (V4L2_CID_CAMERA_LASTP1 + 12)
+
+/* Query Focus Status */
+#define V4L2_CID_FOCUS_STATUS (V4L2_CID_CAMERA_LASTP1 + 14)
+
+/* Query sensor's binning factor */
+#define V4L2_CID_BIN_FACTOR_HORZ (V4L2_CID_CAMERA_LASTP1 + 15)
+#define V4L2_CID_BIN_FACTOR_VERT (V4L2_CID_CAMERA_LASTP1 + 16)
+
+/* number of frames to skip at stream start */
+#define V4L2_CID_G_SKIP_FRAMES (V4L2_CID_CAMERA_LASTP1 + 17)
+
+/* Query sensor's 2A status */
+#define V4L2_CID_2A_STATUS (V4L2_CID_CAMERA_LASTP1 + 18)
+#define V4L2_2A_STATUS_AE_READY BIT(0)
+#define V4L2_2A_STATUS_AWB_READY BIT(1)
+
+#define V4L2_CID_FMT_AUTO (V4L2_CID_CAMERA_LASTP1 + 19)
+
+#define V4L2_CID_RUN_MODE (V4L2_CID_CAMERA_LASTP1 + 20)
+#define ATOMISP_RUN_MODE_VIDEO 1
+#define ATOMISP_RUN_MODE_STILL_CAPTURE 2
+#define ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE 3
+#define ATOMISP_RUN_MODE_PREVIEW 4
+#define ATOMISP_RUN_MODE_SDV 5
+
+#define V4L2_CID_ENABLE_VFPP (V4L2_CID_CAMERA_LASTP1 + 21)
+#define V4L2_CID_ATOMISP_CONTINUOUS_MODE (V4L2_CID_CAMERA_LASTP1 + 22)
+#define V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE \
+ (V4L2_CID_CAMERA_LASTP1 + 23)
+#define V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER \
+ (V4L2_CID_CAMERA_LASTP1 + 24)
+
+#define V4L2_CID_VFPP (V4L2_CID_CAMERA_LASTP1 + 25)
+#define ATOMISP_VFPP_ENABLE 0
+#define ATOMISP_VFPP_DISABLE_SCALER 1
+#define ATOMISP_VFPP_DISABLE_LOWLAT 2
+
+/* Query real flash status register value */
+#define V4L2_CID_FLASH_STATUS_REGISTER (V4L2_CID_CAMERA_LASTP1 + 26)
+
+#define V4L2_CID_START_ZSL_CAPTURE (V4L2_CID_CAMERA_LASTP1 + 28)
+/* Lock and unlock raw buffer */
+#define V4L2_CID_ENABLE_RAW_BUFFER_LOCK (V4L2_CID_CAMERA_LASTP1 + 29)
+
+#define V4L2_CID_DEPTH_MODE (V4L2_CID_CAMERA_LASTP1 + 30)
+
+#define V4L2_CID_EXPOSURE_ZONE_NUM (V4L2_CID_CAMERA_LASTP1 + 31)
+/* Disable digital zoom */
+#define V4L2_CID_DISABLE_DZ (V4L2_CID_CAMERA_LASTP1 + 32)
+
+#define V4L2_CID_TEST_PATTERN_COLOR_R (V4L2_CID_CAMERA_LASTP1 + 33)
+#define V4L2_CID_TEST_PATTERN_COLOR_GR (V4L2_CID_CAMERA_LASTP1 + 34)
+#define V4L2_CID_TEST_PATTERN_COLOR_GB (V4L2_CID_CAMERA_LASTP1 + 35)
+#define V4L2_CID_TEST_PATTERN_COLOR_B (V4L2_CID_CAMERA_LASTP1 + 36)
+
+#define V4L2_CID_ATOMISP_SELECT_ISP_VERSION (V4L2_CID_CAMERA_LASTP1 + 38)
+
+#define V4L2_BUF_FLAG_BUFFER_INVALID 0x0400
+#define V4L2_BUF_FLAG_BUFFER_VALID 0x0800
+
+#define V4L2_BUF_TYPE_VIDEO_CAPTURE_ION (V4L2_BUF_TYPE_PRIVATE + 1024)
+
+#define V4L2_EVENT_ATOMISP_3A_STATS_READY (V4L2_EVENT_PRIVATE_START + 1)
+#define V4L2_EVENT_ATOMISP_METADATA_READY (V4L2_EVENT_PRIVATE_START + 2)
+#define V4L2_EVENT_ATOMISP_RAW_BUFFERS_ALLOC_DONE (V4L2_EVENT_PRIVATE_START + 3)
+#define V4L2_EVENT_ATOMISP_ACC_COMPLETE (V4L2_EVENT_PRIVATE_START + 4)
+#define V4L2_EVENT_ATOMISP_PAUSE_BUFFER (V4L2_EVENT_PRIVATE_START + 5)
+#define V4L2_EVENT_ATOMISP_CSS_RESET (V4L2_EVENT_PRIVATE_START + 6)
+/* Nonstandard color effects for V4L2_CID_COLORFX */
+enum {
+ V4L2_COLORFX_SKIN_WHITEN_LOW = 1001,
+ V4L2_COLORFX_SKIN_WHITEN_HIGH = 1002,
+ V4L2_COLORFX_WARM = 1003,
+ V4L2_COLORFX_COLD = 1004,
+ V4L2_COLORFX_WASHED = 1005,
+ V4L2_COLORFX_RED = 1006,
+ V4L2_COLORFX_GREEN = 1007,
+ V4L2_COLORFX_BLUE = 1008,
+ V4L2_COLORFX_PINK = 1009,
+ V4L2_COLORFX_YELLOW = 1010,
+ V4L2_COLORFX_PURPLE = 1011,
+};
+
+#endif /* _ATOM_ISP_H */
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
new file mode 100644
index 000000000000..8700ffd32bdb
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel MID SoC Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef ATOMISP_GMIN_PLATFORM_H_
+#define ATOMISP_GMIN_PLATFORM_H_
+
+#include "atomisp_platform.h"
+
+int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
+ struct camera_sensor_platform_data *plat_data,
+ enum intel_v4l2_subdev_type type);
+struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter,
+ struct i2c_board_info *board_info);
+int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd);
+int gmin_get_var_int(struct device *dev, bool is_gmin,
+ const char *var, int def);
+int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
+ u32 lanes, u32 format, u32 bayer_order, int flag);
+struct camera_sensor_platform_data *
+gmin_camera_platform_data(
+ struct v4l2_subdev *subdev,
+ enum atomisp_input_format csi_format,
+ enum atomisp_bayer_order csi_bayer);
+
+int atomisp_gmin_register_vcm_control(struct camera_vcm_control *);
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
new file mode 100644
index 000000000000..9cf46325a696
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -0,0 +1,247 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef ATOMISP_PLATFORM_H_
+#define ATOMISP_PLATFORM_H_
+
+#include <linux/i2c.h>
+#include <linux/sfi.h>
+#include <media/v4l2-subdev.h>
+#include "atomisp.h"
+
+#define MAX_SENSORS_PER_PORT 4
+#define MAX_STREAMS_PER_CHANNEL 2
+
+#define CAMERA_MODULE_ID_LEN 64
+
+enum atomisp_bayer_order {
+ atomisp_bayer_order_grbg,
+ atomisp_bayer_order_rggb,
+ atomisp_bayer_order_bggr,
+ atomisp_bayer_order_gbrg
+};
+
+enum atomisp_input_stream_id {
+ ATOMISP_INPUT_STREAM_GENERAL = 0,
+ ATOMISP_INPUT_STREAM_CAPTURE = 0,
+ ATOMISP_INPUT_STREAM_POSTVIEW,
+ ATOMISP_INPUT_STREAM_PREVIEW,
+ ATOMISP_INPUT_STREAM_VIDEO,
+ ATOMISP_INPUT_STREAM_NUM
+};
+
+enum atomisp_input_format {
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY,/* 8 bits per subpixel (legacy) */
+ ATOMISP_INPUT_FORMAT_YUV420_8, /* 8 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV420_10,/* 10 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV420_16,/* 16 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV422_8, /* UYVY..UVYV, 8 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV422_10,/* UYVY..UVYV, 10 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV422_16,/* UYVY..UVYV, 16 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_444, /* BGR..BGR, 4 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_555, /* BGR..BGR, 5 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_565, /* BGR..BGR, 5 bits B and R, 6 bits G */
+ ATOMISP_INPUT_FORMAT_RGB_666, /* BGR..BGR, 6 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_888, /* BGR..BGR, 8 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RAW_6, /* RAW data, 6 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_7, /* RAW data, 7 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_8, /* RAW data, 8 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_10, /* RAW data, 10 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_12, /* RAW data, 12 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_14, /* RAW data, 14 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_16, /* RAW data, 16 bits per pixel */
+ ATOMISP_INPUT_FORMAT_BINARY_8, /* Binary byte stream. */
+
+ /* CSI2-MIPI specific format: Generic short packet data. It is used to
+ * keep the timing information for the opening/closing of shutters,
+ * triggering of flashes and etc.
+ */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT1, /* Generic Short Packet Code 1 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT2, /* Generic Short Packet Code 2 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT3, /* Generic Short Packet Code 3 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT4, /* Generic Short Packet Code 4 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT5, /* Generic Short Packet Code 5 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT6, /* Generic Short Packet Code 6 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT7, /* Generic Short Packet Code 7 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT8, /* Generic Short Packet Code 8 */
+
+ /* CSI2-MIPI specific format: YUV data.
+ */
+ ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT, /* YUV420 8-bit (Chroma Shifted
+ Pixel Sampling) */
+ ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT, /* YUV420 8-bit (Chroma Shifted
+ Pixel Sampling) */
+
+ /* CSI2-MIPI specific format: Generic long packet data
+ */
+ ATOMISP_INPUT_FORMAT_EMBEDDED, /* Embedded 8-bit non Image Data */
+
+ /* CSI2-MIPI specific format: User defined byte-based data. For example,
+ * the data transmitter (e.g. the SoC sensor) can keep the JPEG data as
+ * the User Defined Data Type 4 and the MPEG data as the
+ * User Defined Data Type 7.
+ */
+ ATOMISP_INPUT_FORMAT_USER_DEF1, /* User defined 8-bit data type 1 */
+ ATOMISP_INPUT_FORMAT_USER_DEF2, /* User defined 8-bit data type 2 */
+ ATOMISP_INPUT_FORMAT_USER_DEF3, /* User defined 8-bit data type 3 */
+ ATOMISP_INPUT_FORMAT_USER_DEF4, /* User defined 8-bit data type 4 */
+ ATOMISP_INPUT_FORMAT_USER_DEF5, /* User defined 8-bit data type 5 */
+ ATOMISP_INPUT_FORMAT_USER_DEF6, /* User defined 8-bit data type 6 */
+ ATOMISP_INPUT_FORMAT_USER_DEF7, /* User defined 8-bit data type 7 */
+ ATOMISP_INPUT_FORMAT_USER_DEF8, /* User defined 8-bit data type 8 */
+};
+
+#define N_ATOMISP_INPUT_FORMAT (ATOMISP_INPUT_FORMAT_USER_DEF8 + 1)
+
+enum intel_v4l2_subdev_type {
+ RAW_CAMERA = 1,
+ SOC_CAMERA = 2,
+ CAMERA_MOTOR = 3,
+ LED_FLASH = 4,
+ XENON_FLASH = 5,
+ FILE_INPUT = 6,
+ TEST_PATTERN = 7,
+};
+
+struct intel_v4l2_subdev_id {
+ char name[17];
+ enum intel_v4l2_subdev_type type;
+ enum atomisp_camera_port port;
+};
+
+struct intel_v4l2_subdev_i2c_board_info {
+ struct i2c_board_info board_info;
+ int i2c_adapter_id;
+};
+
+struct intel_v4l2_subdev_table {
+ struct intel_v4l2_subdev_i2c_board_info v4l2_subdev;
+ enum intel_v4l2_subdev_type type;
+ enum atomisp_camera_port port;
+ struct v4l2_subdev *subdev;
+};
+
+struct atomisp_platform_data {
+ struct intel_v4l2_subdev_table *subdevs;
+};
+
+/* Describe the capacities of one single sensor. */
+struct atomisp_sensor_caps {
+ /* The number of streams this sensor can output. */
+ int stream_num;
+ bool is_slave;
+};
+
+/* Describe the capacities of sensors connected to one camera port. */
+struct atomisp_camera_caps {
+ /* The number of sensors connected to this camera port. */
+ int sensor_num;
+ /* The capacities of each sensor. */
+ struct atomisp_sensor_caps sensor[MAX_SENSORS_PER_PORT];
+ /* Define whether stream control is required for multiple streams. */
+ bool multi_stream_ctrl;
+};
+
+/*
+ * Sensor of external ISP can send multiple steams with different mipi data
+ * type in the same virtual channel. This information needs to come from the
+ * sensor or external ISP
+ */
+struct atomisp_isys_config_info {
+ u8 input_format;
+ u16 width;
+ u16 height;
+};
+
+struct atomisp_input_stream_info {
+ enum atomisp_input_stream_id stream;
+ u8 enable;
+ /* Sensor driver fills ch_id with the id
+ of the virtual channel. */
+ u8 ch_id;
+ /* Tells how many streams in this virtual channel. If 0 ignore rest
+ * and the input format will be from mipi_info */
+ u8 isys_configs;
+ /*
+ * if more isys_configs is more than 0, sensor needs to configure the
+ * input format differently. width and height can be 0. If width and
+ * height is not zero, then the corresponsing data needs to be set
+ */
+ struct atomisp_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL];
+};
+
+struct camera_vcm_control;
+struct camera_vcm_ops {
+ int (*power_up)(struct v4l2_subdev *sd, struct camera_vcm_control *vcm);
+ int (*power_down)(struct v4l2_subdev *sd,
+ struct camera_vcm_control *vcm);
+ int (*queryctrl)(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc,
+ struct camera_vcm_control *vcm);
+ int (*g_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl,
+ struct camera_vcm_control *vcm);
+ int (*s_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl,
+ struct camera_vcm_control *vcm);
+};
+
+struct camera_vcm_control {
+ char camera_module[CAMERA_MODULE_ID_LEN];
+ struct camera_vcm_ops *ops;
+ struct list_head list;
+};
+
+struct camera_sensor_platform_data {
+ int (*flisclk_ctrl)(struct v4l2_subdev *subdev, int flag);
+ int (*csi_cfg)(struct v4l2_subdev *subdev, int flag);
+
+ /*
+ * New G-Min power and GPIO interface to control individual
+ * lines as implemented on all known camera modules.
+ */
+ int (*gpio0_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*gpio1_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*v1p8_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*v2p8_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*v1p2_ctrl)(struct v4l2_subdev *subdev, int on);
+ struct camera_vcm_control *(*get_vcm_ctrl)(struct v4l2_subdev *subdev,
+ char *module_id);
+};
+
+struct camera_mipi_info {
+ enum atomisp_camera_port port;
+ unsigned int num_lanes;
+ enum atomisp_input_format input_format;
+ enum atomisp_bayer_order raw_bayer_order;
+ struct atomisp_sensor_mode_data data;
+ enum atomisp_input_format metadata_format;
+ u32 metadata_width;
+ u32 metadata_height;
+ const u32 *metadata_effective_width;
+};
+
+const struct atomisp_platform_data *atomisp_get_platform_data(void);
+const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
+
+/* API from old platform_camera.h, new CPUID implementation */
+#define __IS_SOC(x) (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && \
+ boot_cpu_data.x86 == 6 && \
+ boot_cpu_data.x86_model == x)
+
+#define IS_MFLD __IS_SOC(0x27)
+#define IS_BYT __IS_SOC(0x37)
+#define IS_CHT __IS_SOC(0x4C)
+#define IS_MOFD __IS_SOC(0x5A)
+
+#endif /* ATOMISP_PLATFORM_H_ */
diff --git a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
new file mode 100644
index 000000000000..1b5111ad9415
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef __LIBMSRLISTHELPER_H__
+#define __LIBMSRLISTHELPER_H__
+
+struct i2c_client;
+struct firmware;
+
+int load_msr_list(struct i2c_client *client, char *path,
+ const struct firmware **fw);
+int apply_msr_data(struct i2c_client *client, const struct firmware *fw);
+void release_msr_list(struct i2c_client *client,
+ const struct firmware *fw);
+
+#endif /* ifndef __LIBMSRLISTHELPER_H__ */
diff --git a/drivers/staging/media/atomisp/include/media/lm3554.h b/drivers/staging/media/atomisp/include/media/lm3554.h
new file mode 100644
index 000000000000..03a916ade531
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/media/lm3554.h
@@ -0,0 +1,130 @@
+/*
+ * include/media/lm3554.h
+ *
+ * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef _LM3554_H_
+#define _LM3554_H_
+
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+
+#define LM3554_ID 3554
+
+#define v4l2_queryctrl_entry_integer(_id, _name,\
+ _minimum, _maximum, _step, \
+ _default_value, _flags) \
+ {\
+ .id = (_id), \
+ .type = V4L2_CTRL_TYPE_INTEGER, \
+ .name = _name, \
+ .minimum = (_minimum), \
+ .maximum = (_maximum), \
+ .step = (_step), \
+ .default_value = (_default_value),\
+ .flags = (_flags),\
+ }
+#define v4l2_queryctrl_entry_boolean(_id, _name,\
+ _default_value, _flags) \
+ {\
+ .id = (_id), \
+ .type = V4L2_CTRL_TYPE_BOOLEAN, \
+ .name = _name, \
+ .minimum = 0, \
+ .maximum = 1, \
+ .step = 1, \
+ .default_value = (_default_value),\
+ .flags = (_flags),\
+ }
+
+#define s_ctrl_id_entry_integer(_id, _name, \
+ _minimum, _maximum, _step, \
+ _default_value, _flags, \
+ _s_ctrl, _g_ctrl) \
+ {\
+ .qc = v4l2_queryctrl_entry_integer(_id, _name,\
+ _minimum, _maximum, _step,\
+ _default_value, _flags), \
+ .s_ctrl = _s_ctrl, \
+ .g_ctrl = _g_ctrl, \
+ }
+
+#define s_ctrl_id_entry_boolean(_id, _name, \
+ _default_value, _flags, \
+ _s_ctrl, _g_ctrl) \
+ {\
+ .qc = v4l2_queryctrl_entry_boolean(_id, _name,\
+ _default_value, _flags), \
+ .s_ctrl = _s_ctrl, \
+ .g_ctrl = _g_ctrl, \
+ }
+
+/* Value settings for Flash Time-out Duration*/
+#define LM3554_DEFAULT_TIMEOUT 512U
+#define LM3554_MIN_TIMEOUT 32U
+#define LM3554_MAX_TIMEOUT 1024U
+#define LM3554_TIMEOUT_STEPSIZE 32U
+
+/* Flash modes */
+#define LM3554_MODE_SHUTDOWN 0
+#define LM3554_MODE_INDICATOR 1
+#define LM3554_MODE_TORCH 2
+#define LM3554_MODE_FLASH 3
+
+/* timer delay time */
+#define LM3554_TIMER_DELAY 5
+
+/* Percentage <-> value macros */
+#define LM3554_MIN_PERCENT 0U
+#define LM3554_MAX_PERCENT 100U
+#define LM3554_CLAMP_PERCENTAGE(val) \
+ clamp(val, LM3554_MIN_PERCENT, LM3554_MAX_PERCENT)
+
+#define LM3554_VALUE_TO_PERCENT(v, step) (((((unsigned long)(v)) * (step)) + 50) / 100)
+#define LM3554_PERCENT_TO_VALUE(p, step) (((((unsigned long)(p)) * 100) + (step >> 1)) / (step))
+
+/* Product specific limits
+ * TODO: get these from platform data */
+#define LM3554_FLASH_MAX_LVL 0x0F /* 1191mA */
+
+/* Flash brightness, input is percentage, output is [0..15] */
+#define LM3554_FLASH_STEP \
+ ((100ul * (LM3554_MAX_PERCENT) + ((LM3554_FLASH_MAX_LVL) >> 1)) / ((LM3554_FLASH_MAX_LVL)))
+#define LM3554_FLASH_DEFAULT_BRIGHTNESS \
+ LM3554_VALUE_TO_PERCENT(13, LM3554_FLASH_STEP)
+
+/* Torch brightness, input is percentage, output is [0..7] */
+#define LM3554_TORCH_STEP 1250
+#define LM3554_TORCH_DEFAULT_BRIGHTNESS \
+ LM3554_VALUE_TO_PERCENT(2, LM3554_TORCH_STEP)
+
+/* Indicator brightness, input is percentage, output is [0..3] */
+#define LM3554_INDICATOR_STEP 2500
+#define LM3554_INDICATOR_DEFAULT_BRIGHTNESS \
+ LM3554_VALUE_TO_PERCENT(1, LM3554_INDICATOR_STEP)
+
+/*
+ * lm3554_platform_data - Flash controller platform data
+ */
+struct lm3554_platform_data {
+ int gpio_torch;
+ int gpio_strobe;
+ int gpio_reset;
+
+ unsigned int current_limit;
+ unsigned int envm_tx2;
+ unsigned int tx2_polarity;
+};
+
+#endif /* _LM3554_H_ */
diff --git a/drivers/staging/media/atomisp/include/mmu/isp_mmu.h b/drivers/staging/media/atomisp/include/mmu/isp_mmu.h
new file mode 100644
index 000000000000..d9662c515236
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/mmu/isp_mmu.h
@@ -0,0 +1,168 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+/*
+ * ISP MMU driver for classic two-level page tables
+ */
+#ifndef __ISP_MMU_H__
+#define __ISP_MMU_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+/*
+ * do not change these values, the page size for ISP must be the
+ * same as kernel's page size.
+ */
+#define ISP_PAGE_OFFSET 12
+#define ISP_PAGE_SIZE BIT(ISP_PAGE_OFFSET)
+#define ISP_PAGE_MASK (~(phys_addr_t)(ISP_PAGE_SIZE - 1))
+
+#define ISP_L1PT_OFFSET 22
+#define ISP_L1PT_MASK (~((1U << ISP_L1PT_OFFSET) - 1))
+
+#define ISP_L2PT_OFFSET 12
+#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
+
+#define ISP_L1PT_PTES 1024
+#define ISP_L2PT_PTES 1024
+
+#define ISP_PTR_TO_L1_IDX(x) ((((x) & ISP_L1PT_MASK)) \
+ >> ISP_L1PT_OFFSET)
+
+#define ISP_PTR_TO_L2_IDX(x) ((((x) & ISP_L2PT_MASK)) \
+ >> ISP_L2PT_OFFSET)
+
+#define ISP_PAGE_ALIGN(x) (((x) + (ISP_PAGE_SIZE - 1)) \
+ & ISP_PAGE_MASK)
+
+#define ISP_PT_TO_VIRT(l1_idx, l2_idx, offset) do {\
+ ((l1_idx) << ISP_L1PT_OFFSET) | \
+ ((l2_idx) << ISP_L2PT_OFFSET) | \
+ (offset)\
+} while (0)
+
+#define pgnr_to_size(pgnr) ((pgnr) << ISP_PAGE_OFFSET)
+#define size_to_pgnr_ceil(size) (((size) + (1 << ISP_PAGE_OFFSET) - 1)\
+ >> ISP_PAGE_OFFSET)
+#define size_to_pgnr_bottom(size) ((size) >> ISP_PAGE_OFFSET)
+
+struct isp_mmu;
+
+struct isp_mmu_client {
+ /*
+ * const value
+ *
+ * @name:
+ * driver name
+ * @pte_valid_mask:
+ * should be 1 bit valid data, meaning the value should
+ * be power of 2.
+ */
+ char *name;
+ unsigned int pte_valid_mask;
+ unsigned int null_pte;
+
+ /*
+ * get page directory base address (physical address).
+ *
+ * must be provided.
+ */
+ unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base);
+ /*
+ * callback to flush tlb.
+ *
+ * tlb_flush_range will at least flush TLBs containing
+ * address mapping from addr to addr + size.
+ *
+ * tlb_flush_all will flush all TLBs.
+ *
+ * tlb_flush_all is must be provided. if tlb_flush_range is
+ * not valid, it will set to tlb_flush_all by default.
+ */
+ void (*tlb_flush_range)(struct isp_mmu *mmu,
+ unsigned int addr, unsigned int size);
+ void (*tlb_flush_all)(struct isp_mmu *mmu);
+ unsigned int (*phys_to_pte)(struct isp_mmu *mmu,
+ phys_addr_t phys);
+ phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu,
+ unsigned int pte);
+
+};
+
+struct isp_mmu {
+ struct isp_mmu_client *driver;
+ unsigned int l1_pte;
+ int l2_pgt_refcount[ISP_L1PT_PTES];
+ phys_addr_t base_address;
+
+ struct mutex pt_mutex;
+};
+
+/* flags for PDE and PTE */
+#define ISP_PTE_VALID_MASK(mmu) \
+ ((mmu)->driver->pte_valid_mask)
+
+#define ISP_PTE_VALID(mmu, pte) \
+ ((pte) & ISP_PTE_VALID_MASK(mmu))
+
+#define NULL_PAGE ((phys_addr_t)(-1) & ISP_PAGE_MASK)
+#define PAGE_VALID(page) ((page) != NULL_PAGE)
+
+/*
+ * init mmu with specific mmu driver.
+ */
+int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver);
+/*
+ * cleanup all mmu related things.
+ */
+void isp_mmu_exit(struct isp_mmu *mmu);
+
+/*
+ * setup/remove address mapping for pgnr continuous physical pages
+ * and isp_virt.
+ *
+ * map/unmap is mutex lock protected, and caller does not have
+ * to do lock/unlock operation.
+ *
+ * map/unmap will not flush tlb, and caller needs to deal with
+ * this itself.
+ */
+int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
+ phys_addr_t phys, unsigned int pgnr);
+
+void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
+ unsigned int pgnr);
+
+static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu)
+{
+ if (mmu->driver && mmu->driver->tlb_flush_all)
+ mmu->driver->tlb_flush_all(mmu);
+}
+
+#define isp_mmu_flush_tlb isp_mmu_flush_tlb_all
+
+static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu,
+ unsigned int start, unsigned int size)
+{
+ if (mmu->driver && mmu->driver->tlb_flush_range)
+ mmu->driver->tlb_flush_range(mmu, start, size);
+}
+
+#endif /* ISP_MMU_H_ */
diff --git a/drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h b/drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h
new file mode 100644
index 000000000000..662e98f41da2
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Merrifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __SH_MMU_MRFLD_H__
+#define __SH_MMU_MRFLD_H__
+
+extern struct isp_mmu_client sh_mmu_mrfld;
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp-regs.h
new file mode 100644
index 000000000000..cc489a331a7c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp-regs.h
@@ -0,0 +1,199 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef ATOMISP_REGS_H
+#define ATOMISP_REGS_H
+
+/* common register definitions */
+#define PUNIT_PORT 0x04
+#define CCK_PORT 0x14
+
+#define PCICMDSTS 0x01
+#define INTR 0x0f
+#define MSI_CAPID 0x24
+#define MSI_ADDRESS 0x25
+#define MSI_DATA 0x26
+#define INTR_CTL 0x27
+
+#define PCI_MSI_CAPID 0x90
+#define PCI_MSI_ADDR 0x94
+#define PCI_MSI_DATA 0x98
+#define PCI_INTERRUPT_CTRL 0x9C
+#define PCI_I_CONTROL 0xfc
+
+/* MRFLD specific register definitions */
+#define MRFLD_CSI_AFE 0x39
+#define MRFLD_CSI_CONTROL 0x3a
+#define MRFLD_CSI_RCOMP 0x3d
+
+#define MRFLD_PCI_PMCS 0x84
+#define MRFLD_PCI_CSI_ACCESS_CTRL_VIOL 0xd4
+#define MRFLD_PCI_CSI_AFE_HS_CONTROL 0xdc
+#define MRFLD_PCI_CSI_AFE_RCOMP_CONTROL 0xe0
+#define MRFLD_PCI_CSI_CONTROL 0xe8
+#define MRFLD_PCI_CSI_AFE_TRIM_CONTROL 0xe4
+#define MRFLD_PCI_CSI_DEADLINE_CONTROL 0xec
+#define MRFLD_PCI_CSI_RCOMP_CONTROL 0xf4
+
+/* Select Arasan (legacy)/Intel input system */
+#define MRFLD_PCI_CSI_CONTROL_PARPATHEN BIT(24)
+/* Enable CSI interface (ANN B0/K0) */
+#define MRFLD_PCI_CSI_CONTROL_CSI_READY BIT(25)
+
+/*
+ * Enables the combining of adjacent 32-byte read requests to the same
+ * cache line. When cleared, each 32-byte read request is sent as a
+ * separate request on the IB interface.
+ */
+#define MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING 0x1
+
+/*
+ * Register: MRFLD_PCI_CSI_RCOMP_CONTROL
+ * If cleared, the high speed clock going to the digital logic is gated when
+ * RCOMP update is happening. The clock is gated for a minimum of 100 nsec.
+ * If this bit is set, then the high speed clock is not gated during the
+ * update cycle.
+ */
+#define MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE 0x800000
+
+/*
+ * Enables the combining of adjacent 32-byte write requests to the same
+ * cache line. When cleared, each 32-byte write request is sent as a
+ * separate request on the IB interface.
+ */
+#define MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING 0x2
+
+#define MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK 0xc
+
+#define MRFLD_PCI_CSI1_HSRXCLKTRIM 0x2
+#define MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT 16
+#define MRFLD_PCI_CSI2_HSRXCLKTRIM 0x3
+#define MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT 24
+#define MRFLD_PCI_CSI3_HSRXCLKTRIM 0x2
+#define MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT 28
+#define MRFLD_PCI_CSI_HSRXCLKTRIM_MASK 0xf
+
+/*
+ * This register is IUINT MMIO register, it is used to select the CSI
+ * receiver backend.
+ * 1: SH CSI backend
+ * 0: Arasan CSI backend
+ */
+#define MRFLD_CSI_RECEIVER_SELECTION_REG 0x8081c
+
+#define MRFLD_INTR_CLEAR_REG 0x50c
+#define MRFLD_INTR_STATUS_REG 0x508
+#define MRFLD_INTR_ENABLE_REG 0x510
+
+#define MRFLD_MAX_ZOOM_FACTOR 1024
+
+/* MRFLD ISP POWER related */
+#define MRFLD_ISPSSPM0 0x39
+#define MRFLD_ISPSSPM0_ISPSSC_OFFSET 0
+#define MRFLD_ISPSSPM0_ISPSSS_OFFSET 24
+#define MRFLD_ISPSSPM0_ISPSSC_MASK 0x3
+#define MRFLD_ISPSSPM0_IUNIT_POWER_ON 0
+#define MRFLD_ISPSSPM0_IUNIT_POWER_OFF 0x3
+#define MRFLD_ISPSSDVFS 0x13F
+#define MRFLD_BIT0 0x0001
+#define MRFLD_BIT1 0x0002
+
+/* MRFLD CSI lane configuration related */
+#define MRFLD_PORT_CONFIG_NUM 8
+#define MRFLD_PORT_NUM 3
+#define MRFLD_PORT1_ENABLE_SHIFT 0
+#define MRFLD_PORT2_ENABLE_SHIFT 1
+#define MRFLD_PORT3_ENABLE_SHIFT 2
+#define MRFLD_PORT1_LANES_SHIFT 3
+#define MRFLD_PORT2_LANES_SHIFT 7
+#define MRFLD_PORT3_LANES_SHIFT 8
+#define MRFLD_PORT_CONFIG_MASK 0x000f03ff
+#define MRFLD_PORT_CONFIGCODE_SHIFT 16
+#define MRFLD_ALL_CSI_PORTS_OFF_MASK 0x7
+
+#define CHV_PORT3_LANES_SHIFT 9
+#define CHV_PORT_CONFIG_MASK 0x1f07ff
+
+#define ISPSSPM1 0x3a
+#define ISP_FREQ_STAT_MASK (0x1f << ISP_FREQ_STAT_OFFSET)
+#define ISP_REQ_FREQ_MASK 0x1f
+#define ISP_FREQ_VALID_MASK (0x1 << ISP_FREQ_VALID_OFFSET)
+#define ISP_FREQ_STAT_OFFSET 0x18
+#define ISP_REQ_GUAR_FREQ_OFFSET 0x8
+#define ISP_REQ_FREQ_OFFSET 0x0
+#define ISP_FREQ_VALID_OFFSET 0x7
+#define ISP_FREQ_RULE_ANY 0x0
+
+#define ISP_FREQ_457MHZ 0x1C9
+#define ISP_FREQ_400MHZ 0x190
+#define ISP_FREQ_356MHZ 0x164
+#define ISP_FREQ_320MHZ 0x140
+#define ISP_FREQ_266MHZ 0x10a
+#define ISP_FREQ_200MHZ 0xc8
+#define ISP_FREQ_100MHZ 0x64
+
+#define HPLL_FREQ_800MHZ 0x320
+#define HPLL_FREQ_1600MHZ 0x640
+#define HPLL_FREQ_2000MHZ 0x7D0
+
+#define CCK_FUSE_REG_0 0x08
+#define CCK_FUSE_HPLL_FREQ_MASK 0x03
+
+/* ISP2401 CSI2+ receiver delay settings */
+#define CSI2_PORT_A_BASE 0xC0000
+#define CSI2_PORT_B_BASE 0xC2000
+#define CSI2_PORT_C_BASE 0xC4000
+
+#define CSI2_LANE_CL_BASE 0x418
+#define CSI2_LANE_D0_BASE 0x420
+#define CSI2_LANE_D1_BASE 0x428
+#define CSI2_LANE_D2_BASE 0x430
+#define CSI2_LANE_D3_BASE 0x438
+
+#define CSI2_REG_RX_CSI_DLY_CNT_TERMEN 0
+#define CSI2_REG_RX_CSI_DLY_CNT_SETTLE 0x4
+
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC0418
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC041C
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC0420
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC0424
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC0428
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC042C
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE2 0xC0430
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE2 0xC0434
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE3 0xC0438
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE3 0xC043C
+
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC2418
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC241C
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC2420
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC2424
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC2428
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC242C
+
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC4418
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC441C
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC4420
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC4424
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC4428
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC442C
+
+#define DMA_BURST_SIZE_REG 0xCD408
+
+#define ISP_DFS_TRY_TIMES 2
+
+#endif /* ATOMISP_REGS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c
new file mode 100644
index 000000000000..8d575eb0a73f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c
@@ -0,0 +1,605 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+/*
+ * This file implements loadable acceleration firmware API,
+ * including ioctls to map and unmap acceleration parameters and buffers.
+ */
+
+#include <linux/init.h>
+#include <media/v4l2-event.h>
+
+#include "atomisp_acc.h"
+#include "atomisp_internal.h"
+#include "atomisp_compat.h"
+#include "atomisp_cmd.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+#include "memory_access/memory_access.h"
+#include "ia_css.h"
+
+static const struct {
+ unsigned int flag;
+ enum atomisp_css_pipe_id pipe_id;
+} acc_flag_to_pipe[] = {
+ { ATOMISP_ACC_FW_LOAD_FL_PREVIEW, CSS_PIPE_ID_PREVIEW },
+ { ATOMISP_ACC_FW_LOAD_FL_COPY, CSS_PIPE_ID_COPY },
+ { ATOMISP_ACC_FW_LOAD_FL_VIDEO, CSS_PIPE_ID_VIDEO },
+ { ATOMISP_ACC_FW_LOAD_FL_CAPTURE, CSS_PIPE_ID_CAPTURE },
+ { ATOMISP_ACC_FW_LOAD_FL_ACC, CSS_PIPE_ID_ACC }
+};
+
+/*
+ * Allocate struct atomisp_acc_fw along with space for firmware.
+ * The returned struct atomisp_acc_fw is cleared (firmware region is not).
+ */
+static struct atomisp_acc_fw *acc_alloc_fw(unsigned int fw_size)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ acc_fw = kzalloc(sizeof(*acc_fw), GFP_KERNEL);
+ if (!acc_fw)
+ return NULL;
+
+ acc_fw->fw = vmalloc(fw_size);
+ if (!acc_fw->fw) {
+ kfree(acc_fw);
+ return NULL;
+ }
+
+ return acc_fw;
+}
+
+static void acc_free_fw(struct atomisp_acc_fw *acc_fw)
+{
+ vfree(acc_fw->fw);
+ kfree(acc_fw);
+}
+
+static struct atomisp_acc_fw *
+acc_get_fw(struct atomisp_sub_device *asd, unsigned int handle)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ list_for_each_entry(acc_fw, &asd->acc.fw, list)
+ if (acc_fw->handle == handle)
+ return acc_fw;
+
+ return NULL;
+}
+
+static struct atomisp_map *acc_get_map(struct atomisp_sub_device *asd,
+ unsigned long css_ptr, size_t length)
+{
+ struct atomisp_map *atomisp_map;
+
+ list_for_each_entry(atomisp_map, &asd->acc.memory_maps, list) {
+ if (atomisp_map->ptr == css_ptr &&
+ atomisp_map->length == length)
+ return atomisp_map;
+ }
+ return NULL;
+}
+
+static int acc_stop_acceleration(struct atomisp_sub_device *asd)
+{
+ int ret;
+
+ ret = atomisp_css_stop_acc_pipe(asd);
+ atomisp_css_destroy_acc_pipe(asd);
+
+ return ret;
+}
+
+void atomisp_acc_cleanup(struct atomisp_device *isp)
+{
+ int i;
+
+ for (i = 0; i < isp->num_of_streams; i++)
+ ida_destroy(&isp->asd[i].acc.ida);
+}
+
+void atomisp_acc_release(struct atomisp_sub_device *asd)
+{
+ struct atomisp_acc_fw *acc_fw, *ta;
+ struct atomisp_map *atomisp_map, *tm;
+
+ /* Stop acceleration if already running */
+ if (asd->acc.pipeline)
+ acc_stop_acceleration(asd);
+
+ /* Unload all loaded acceleration binaries */
+ list_for_each_entry_safe(acc_fw, ta, &asd->acc.fw, list) {
+ list_del(&acc_fw->list);
+ ida_free(&asd->acc.ida, acc_fw->handle);
+ acc_free_fw(acc_fw);
+ }
+
+ /* Free all mapped memory blocks */
+ list_for_each_entry_safe(atomisp_map, tm, &asd->acc.memory_maps, list) {
+ list_del(&atomisp_map->list);
+ hmm_free(atomisp_map->ptr);
+ kfree(atomisp_map);
+ }
+}
+
+int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_acc_fw_load_to_pipe *user_fw)
+{
+ static const unsigned int pipeline_flags =
+ ATOMISP_ACC_FW_LOAD_FL_PREVIEW | ATOMISP_ACC_FW_LOAD_FL_COPY |
+ ATOMISP_ACC_FW_LOAD_FL_VIDEO |
+ ATOMISP_ACC_FW_LOAD_FL_CAPTURE | ATOMISP_ACC_FW_LOAD_FL_ACC;
+
+ struct atomisp_acc_fw *acc_fw;
+ int handle;
+
+ if (!user_fw->data || user_fw->size < sizeof(*acc_fw->fw))
+ return -EINVAL;
+
+ /* Binary has to be enabled at least for one pipeline */
+ if (!(user_fw->flags & pipeline_flags))
+ return -EINVAL;
+
+ /* We do not support other flags yet */
+ if (user_fw->flags & ~pipeline_flags)
+ return -EINVAL;
+
+ if (user_fw->type < ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT ||
+ user_fw->type > ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE)
+ return -EINVAL;
+
+ if (asd->acc.pipeline || asd->acc.extension_mode)
+ return -EBUSY;
+
+ acc_fw = acc_alloc_fw(user_fw->size);
+ if (!acc_fw)
+ return -ENOMEM;
+
+ if (copy_from_user(acc_fw->fw, user_fw->data, user_fw->size)) {
+ acc_free_fw(acc_fw);
+ return -EFAULT;
+ }
+
+ handle = ida_alloc(&asd->acc.ida, GFP_KERNEL);
+ if (handle < 0) {
+ acc_free_fw(acc_fw);
+ return -ENOSPC;
+ }
+
+ user_fw->fw_handle = handle;
+ acc_fw->handle = handle;
+ acc_fw->flags = user_fw->flags;
+ acc_fw->type = user_fw->type;
+ acc_fw->fw->handle = handle;
+
+ /*
+ * correct isp firmware type in order ISP firmware can be appended
+ * to correct pipe properly
+ */
+ if (acc_fw->fw->type == ia_css_isp_firmware) {
+ static const int type_to_css[] = {
+ [ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT] =
+ IA_CSS_ACC_OUTPUT,
+ [ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER] =
+ IA_CSS_ACC_VIEWFINDER,
+ [ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE] =
+ IA_CSS_ACC_STANDALONE,
+ };
+ acc_fw->fw->info.isp.type = type_to_css[acc_fw->type];
+ }
+
+ list_add_tail(&acc_fw->list, &asd->acc.fw);
+ return 0;
+}
+
+int atomisp_acc_load(struct atomisp_sub_device *asd,
+ struct atomisp_acc_fw_load *user_fw)
+{
+ struct atomisp_acc_fw_load_to_pipe ltp = {0};
+ int r;
+
+ ltp.flags = ATOMISP_ACC_FW_LOAD_FL_ACC;
+ ltp.type = ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE;
+ ltp.size = user_fw->size;
+ ltp.data = user_fw->data;
+ r = atomisp_acc_load_to_pipe(asd, &ltp);
+ user_fw->fw_handle = ltp.fw_handle;
+ return r;
+}
+
+int atomisp_acc_unload(struct atomisp_sub_device *asd, unsigned int *handle)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ if (asd->acc.pipeline || asd->acc.extension_mode)
+ return -EBUSY;
+
+ acc_fw = acc_get_fw(asd, *handle);
+ if (!acc_fw)
+ return -EINVAL;
+
+ list_del(&acc_fw->list);
+ ida_free(&asd->acc.ida, acc_fw->handle);
+ acc_free_fw(acc_fw);
+
+ return 0;
+}
+
+int atomisp_acc_start(struct atomisp_sub_device *asd, unsigned int *handle)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_acc_fw *acc_fw;
+ int ret;
+ unsigned int nbin;
+
+ if (asd->acc.pipeline || asd->acc.extension_mode)
+ return -EBUSY;
+
+ /* Invalidate caches. FIXME: should flush only necessary buffers */
+ wbinvd();
+
+ ret = atomisp_css_create_acc_pipe(asd);
+ if (ret)
+ return ret;
+
+ nbin = 0;
+ list_for_each_entry(acc_fw, &asd->acc.fw, list) {
+ if (*handle != 0 && *handle != acc_fw->handle)
+ continue;
+
+ if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE)
+ continue;
+
+ /* Add the binary into the pipeline */
+ ret = atomisp_css_load_acc_binary(asd, acc_fw->fw, nbin);
+ if (ret < 0) {
+ dev_err(isp->dev, "acc_load_binary failed\n");
+ goto err_stage;
+ }
+
+ ret = atomisp_css_set_acc_parameters(acc_fw);
+ if (ret < 0) {
+ dev_err(isp->dev, "acc_set_parameters failed\n");
+ goto err_stage;
+ }
+ nbin++;
+ }
+ if (nbin < 1) {
+ /* Refuse creating pipelines with no binaries */
+ dev_err(isp->dev, "%s: no acc binary available\n", __func__);
+ ret = -EINVAL;
+ goto err_stage;
+ }
+
+ ret = atomisp_css_start_acc_pipe(asd);
+ if (ret) {
+ dev_err(isp->dev, "%s: atomisp_acc_start_acc_pipe failed\n",
+ __func__);
+ goto err_stage;
+ }
+
+ return 0;
+
+err_stage:
+ atomisp_css_destroy_acc_pipe(asd);
+ return ret;
+}
+
+int atomisp_acc_wait(struct atomisp_sub_device *asd, unsigned int *handle)
+{
+ struct atomisp_device *isp = asd->isp;
+ int ret;
+
+ if (!asd->acc.pipeline)
+ return -ENOENT;
+
+ if (*handle && !acc_get_fw(asd, *handle))
+ return -EINVAL;
+
+ ret = atomisp_css_wait_acc_finish(asd);
+ if (acc_stop_acceleration(asd) == -EIO) {
+ atomisp_reset(isp);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle)
+{
+ struct v4l2_event event = { 0 };
+
+ event.type = V4L2_EVENT_ATOMISP_ACC_COMPLETE;
+ event.u.frame_sync.frame_sequence = atomic_read(&asd->sequence);
+ event.id = handle;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+int atomisp_acc_map(struct atomisp_sub_device *asd, struct atomisp_acc_map *map)
+{
+ struct atomisp_map *atomisp_map;
+ ia_css_ptr cssptr;
+ int pgnr;
+
+ if (map->css_ptr)
+ return -EINVAL;
+
+ if (asd->acc.pipeline)
+ return -EBUSY;
+
+ if (map->user_ptr) {
+ /* Buffer to map must be page-aligned */
+ if ((unsigned long)map->user_ptr & ~PAGE_MASK) {
+ dev_err(asd->isp->dev,
+ "%s: mapped buffer address %p is not page aligned\n",
+ __func__, map->user_ptr);
+ return -EINVAL;
+ }
+
+ pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE);
+ cssptr = hrt_isp_css_mm_alloc_user_ptr(map->length,
+ map->user_ptr,
+ pgnr, HRT_USR_PTR,
+ (map->flags & ATOMISP_MAP_FLAG_CACHED));
+ } else {
+ /* Allocate private buffer. */
+ if (map->flags & ATOMISP_MAP_FLAG_CACHED)
+ cssptr = hrt_isp_css_mm_calloc_cached(map->length);
+ else
+ cssptr = hrt_isp_css_mm_calloc(map->length);
+ }
+
+ if (!cssptr)
+ return -ENOMEM;
+
+ atomisp_map = kmalloc(sizeof(*atomisp_map), GFP_KERNEL);
+ if (!atomisp_map) {
+ hmm_free(cssptr);
+ return -ENOMEM;
+ }
+ atomisp_map->ptr = cssptr;
+ atomisp_map->length = map->length;
+ list_add(&atomisp_map->list, &asd->acc.memory_maps);
+
+ dev_dbg(asd->isp->dev, "%s: userptr %p, css_address 0x%x, size %d\n",
+ __func__, map->user_ptr, cssptr, map->length);
+ map->css_ptr = cssptr;
+ return 0;
+}
+
+int atomisp_acc_unmap(struct atomisp_sub_device *asd,
+ struct atomisp_acc_map *map)
+{
+ struct atomisp_map *atomisp_map;
+
+ if (asd->acc.pipeline)
+ return -EBUSY;
+
+ atomisp_map = acc_get_map(asd, map->css_ptr, map->length);
+ if (!atomisp_map)
+ return -EINVAL;
+
+ list_del(&atomisp_map->list);
+ hmm_free(atomisp_map->ptr);
+ kfree(atomisp_map);
+ return 0;
+}
+
+int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd,
+ struct atomisp_acc_s_mapped_arg *arg)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ if (arg->memory >= ATOMISP_ACC_NR_MEMORY)
+ return -EINVAL;
+
+ if (asd->acc.pipeline)
+ return -EBUSY;
+
+ acc_fw = acc_get_fw(asd, arg->fw_handle);
+ if (!acc_fw)
+ return -EINVAL;
+
+ if (arg->css_ptr != 0 || arg->length != 0) {
+ /* Unless the parameter is cleared, check that it exists */
+ if (!acc_get_map(asd, arg->css_ptr, arg->length))
+ return -EINVAL;
+ }
+
+ acc_fw->args[arg->memory].length = arg->length;
+ acc_fw->args[arg->memory].css_ptr = arg->css_ptr;
+
+ dev_dbg(asd->isp->dev, "%s: mem %d, address %p, size %ld\n",
+ __func__, arg->memory, (void *)arg->css_ptr,
+ (unsigned long)arg->length);
+ return 0;
+}
+
+/*
+ * Appends the loaded acceleration binary extensions to the
+ * current ISP mode. Must be called just before sh_css_start().
+ */
+int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
+{
+ struct atomisp_acc_fw *acc_fw;
+ bool ext_loaded = false;
+ bool continuous = asd->continuous_mode->val &&
+ asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW;
+ int ret = 0, i = -1;
+ struct atomisp_device *isp = asd->isp;
+
+ if (asd->acc.pipeline || asd->acc.extension_mode)
+ return -EBUSY;
+
+ /* Invalidate caches. FIXME: should flush only necessary buffers */
+ wbinvd();
+
+ list_for_each_entry(acc_fw, &asd->acc.fw, list) {
+ if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
+ acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) {
+ /* QoS (ACC pipe) acceleration stages are currently
+ * allowed only in continuous mode. Skip them for
+ * all other modes. */
+ if (!continuous &&
+ acc_flag_to_pipe[i].flag ==
+ ATOMISP_ACC_FW_LOAD_FL_ACC)
+ continue;
+
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ ret = atomisp_css_load_acc_extension(asd,
+ acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id,
+ acc_fw->type);
+ if (ret)
+ goto error;
+
+ ext_loaded = true;
+ }
+ }
+
+ ret = atomisp_css_set_acc_parameters(acc_fw);
+ if (ret < 0)
+ goto error;
+ }
+
+ if (!ext_loaded)
+ return ret;
+
+ ret = atomisp_css_update_stream(asd);
+ if (ret) {
+ dev_err(isp->dev, "%s: update stream failed.\n", __func__);
+ goto error;
+ }
+
+ asd->acc.extension_mode = true;
+ return 0;
+
+error:
+ while (--i >= 0) {
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ atomisp_css_unload_acc_extension(asd, acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id);
+ }
+ }
+
+ list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) {
+ if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
+ acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
+ continue;
+
+ for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) {
+ if (!continuous &&
+ acc_flag_to_pipe[i].flag ==
+ ATOMISP_ACC_FW_LOAD_FL_ACC)
+ continue;
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ atomisp_css_unload_acc_extension(asd,
+ acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id);
+ }
+ }
+ }
+ return ret;
+}
+
+void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd)
+{
+ struct atomisp_acc_fw *acc_fw;
+ int i;
+
+ if (!asd->acc.extension_mode)
+ return;
+
+ list_for_each_entry_reverse(acc_fw, &asd->acc.fw, list) {
+ if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
+ acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
+ continue;
+
+ for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) {
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ atomisp_css_unload_acc_extension(asd,
+ acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id);
+ }
+ }
+ }
+
+ asd->acc.extension_mode = false;
+}
+
+int atomisp_acc_set_state(struct atomisp_sub_device *asd,
+ struct atomisp_acc_state *arg)
+{
+ struct atomisp_acc_fw *acc_fw;
+ bool enable = (arg->flags & ATOMISP_STATE_FLAG_ENABLE) != 0;
+ struct ia_css_pipe *pipe;
+ enum ia_css_err r;
+ int i;
+
+ if (!asd->acc.extension_mode)
+ return -EBUSY;
+
+ if (arg->flags & ~ATOMISP_STATE_FLAG_ENABLE)
+ return -EINVAL;
+
+ acc_fw = acc_get_fw(asd, arg->fw_handle);
+ if (!acc_fw)
+ return -EINVAL;
+
+ if (enable)
+ wbinvd();
+
+ for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) {
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ pipe = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ pipes[acc_flag_to_pipe[i].pipe_id];
+ r = ia_css_pipe_set_qos_ext_state(pipe, acc_fw->handle,
+ enable);
+ if (r != IA_CSS_SUCCESS)
+ return -EBADRQC;
+ }
+ }
+
+ if (enable)
+ acc_fw->flags |= ATOMISP_ACC_FW_LOAD_FL_ENABLE;
+ else
+ acc_fw->flags &= ~ATOMISP_ACC_FW_LOAD_FL_ENABLE;
+
+ return 0;
+}
+
+int atomisp_acc_get_state(struct atomisp_sub_device *asd,
+ struct atomisp_acc_state *arg)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ if (!asd->acc.extension_mode)
+ return -EBUSY;
+
+ acc_fw = acc_get_fw(asd, arg->fw_handle);
+ if (!acc_fw)
+ return -EINVAL;
+
+ arg->flags = acc_fw->flags;
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.h b/drivers/staging/media/atomisp/pci/atomisp_acc.h
new file mode 100644
index 000000000000..ba14181962f8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_acc.h
@@ -0,0 +1,119 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_ACC_H__
+#define __ATOMISP_ACC_H__
+
+#include "../../include/linux/atomisp.h"
+#include "atomisp_internal.h"
+
+#include "ia_css_types.h"
+
+/*
+ * Interface functions for AtomISP driver acceleration API implementation.
+ */
+
+struct atomisp_sub_device;
+
+void atomisp_acc_cleanup(struct atomisp_device *isp);
+
+/*
+ * Free up any allocated resources.
+ * Must be called each time when the device is closed.
+ * Note that there isn't corresponding open() call;
+ * this function may be called sequentially multiple times.
+ * Must be called to free up resources before driver is unloaded.
+ */
+void atomisp_acc_release(struct atomisp_sub_device *asd);
+
+/* Load acceleration binary. DEPRECATED. */
+int atomisp_acc_load(struct atomisp_sub_device *asd,
+ struct atomisp_acc_fw_load *fw);
+
+/* Load acceleration binary with specified properties */
+int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_acc_fw_load_to_pipe *fw);
+
+/* Unload specified acceleration binary */
+int atomisp_acc_unload(struct atomisp_sub_device *asd,
+ unsigned int *handle);
+
+/*
+ * Map a memory region into ISP memory space.
+ */
+int atomisp_acc_map(struct atomisp_sub_device *asd,
+ struct atomisp_acc_map *map);
+
+/*
+ * Unmap a mapped memory region.
+ */
+int atomisp_acc_unmap(struct atomisp_sub_device *asd,
+ struct atomisp_acc_map *map);
+
+/*
+ * Set acceleration binary argument to a previously mapped memory region.
+ */
+int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd,
+ struct atomisp_acc_s_mapped_arg *arg);
+
+/*
+ * Start acceleration.
+ * Return immediately, acceleration is left running in background.
+ * Specify either acceleration binary or pipeline which to start.
+ */
+int atomisp_acc_start(struct atomisp_sub_device *asd,
+ unsigned int *handle);
+
+/*
+ * Wait until acceleration finishes.
+ * This MUST be called after each acceleration has been started.
+ * Specify either acceleration binary or pipeline handle.
+ */
+int atomisp_acc_wait(struct atomisp_sub_device *asd,
+ unsigned int *handle);
+
+/*
+ * Used by ISR to notify ACC stage finished.
+ * This is internally used and does not export as IOCTL.
+ */
+void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle);
+
+/*
+ * Appends the loaded acceleration binary extensions to the
+ * current ISP mode. Must be called just before atomisp_css_start().
+ */
+int atomisp_acc_load_extensions(struct atomisp_sub_device *asd);
+
+/*
+ * Must be called after streaming is stopped:
+ * unloads any loaded acceleration extensions.
+ */
+void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd);
+
+/*
+ * Set acceleration firmware flags.
+ */
+int atomisp_acc_set_state(struct atomisp_sub_device *asd,
+ struct atomisp_acc_state *arg);
+
+/*
+ * Get acceleration firmware flags.
+ */
+int atomisp_acc_get_state(struct atomisp_sub_device *asd,
+ struct atomisp_acc_state *arg);
+
+#endif /* __ATOMISP_ACC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
new file mode 100644
index 000000000000..5be690f876c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -0,0 +1,6629 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+
+#include <asm/iosf_mbi.h>
+
+#include <media/v4l2-event.h>
+#include <media/videobuf-vmalloc.h>
+
+#define CREATE_TRACE_POINTS
+#include "atomisp_trace_event.h"
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp-regs.h"
+#include "atomisp_tables.h"
+#include "atomisp_acc.h"
+#include "atomisp_compat.h"
+#include "atomisp_subdev.h"
+#include "atomisp_dfs_tables.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+#include "sh_css_hrt.h"
+#include "sh_css_defs.h"
+#include "system_global.h"
+#include "sh_css_internal.h"
+#include "sh_css_sp.h"
+#include "gp_device.h"
+#include "device_access.h"
+#include "irq.h"
+
+#include "ia_css_types.h"
+#include "ia_css_stream.h"
+#include "error_support.h"
+#include "bits.h"
+
+/* We should never need to run the flash for more than 2 frames.
+ * At 15fps this means 133ms. We set the timeout a bit longer.
+ * Each flash driver is supposed to set its own timeout, but
+ * just in case someone else changed the timeout, we set it
+ * here to make sure we don't damage the flash hardware. */
+#define FLASH_TIMEOUT 800 /* ms */
+
+union host {
+ struct {
+ void *kernel_ptr;
+ void __user *user_ptr;
+ int size;
+ } scalar;
+ struct {
+ void *hmm_ptr;
+ } ptr;
+};
+
+/*
+ * get sensor:dis71430/ov2720 related info from v4l2_subdev->priv data field.
+ * subdev->priv is set in mrst.c
+ */
+struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd)
+{
+ return (struct camera_mipi_info *)v4l2_get_subdev_hostdata(sd);
+}
+
+/*
+ * get struct atomisp_video_pipe from v4l2 video_device
+ */
+struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev)
+{
+ return (struct atomisp_video_pipe *)
+ container_of(dev, struct atomisp_video_pipe, vdev);
+}
+
+/*
+ * get struct atomisp_acc_pipe from v4l2 video_device
+ */
+struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev)
+{
+ return (struct atomisp_acc_pipe *)
+ container_of(dev, struct atomisp_acc_pipe, vdev);
+}
+
+static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd)
+{
+ struct v4l2_subdev_frame_interval fi;
+ struct atomisp_device *isp = asd->isp;
+
+ unsigned short fps = 0;
+ int ret;
+
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, g_frame_interval, &fi);
+
+ if (!ret && fi.interval.numerator)
+ fps = fi.interval.denominator / fi.interval.numerator;
+
+ return fps;
+}
+
+/*
+ * DFS progress is shown as follows:
+ * 1. Target frequency is calculated according to FPS/Resolution/ISP running
+ * mode.
+ * 2. Ratio is calculated using formula: 2 * HPLL / target frequency - 1
+ * with proper rounding.
+ * 3. Set ratio to ISPFREQ40, 1 to FREQVALID and ISPFREQGUAR40
+ * to 200MHz in ISPSSPM1.
+ * 4. Wait for FREQVALID to be cleared by P-Unit.
+ * 5. Wait for field ISPFREQSTAT40 in ISPSSPM1 turn to ratio set in 3.
+ */
+static int write_target_freq_to_hw(struct atomisp_device *isp,
+ unsigned int new_freq)
+{
+ unsigned int ratio, timeout, guar_ratio;
+ u32 isp_sspm1 = 0;
+ int i;
+
+ if (!isp->hpll_freq) {
+ dev_err(isp->dev, "failed to get hpll_freq. no change to freq\n");
+ return -EINVAL;
+ }
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ if (isp_sspm1 & ISP_FREQ_VALID_MASK) {
+ dev_dbg(isp->dev, "clearing ISPSSPM1 valid bit.\n");
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
+ isp_sspm1 & ~(1 << ISP_FREQ_VALID_OFFSET));
+ }
+
+ ratio = (2 * isp->hpll_freq + new_freq / 2) / new_freq - 1;
+ guar_ratio = (2 * isp->hpll_freq + 200 / 2) / 200 - 1;
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ isp_sspm1 &= ~(0x1F << ISP_REQ_FREQ_OFFSET);
+
+ for (i = 0; i < ISP_DFS_TRY_TIMES; i++) {
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
+ isp_sspm1
+ | ratio << ISP_REQ_FREQ_OFFSET
+ | 1 << ISP_FREQ_VALID_OFFSET
+ | guar_ratio << ISP_REQ_GUAR_FREQ_OFFSET);
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ timeout = 20;
+ while ((isp_sspm1 & ISP_FREQ_VALID_MASK) && timeout) {
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ dev_dbg(isp->dev, "waiting for ISPSSPM1 valid bit to be 0.\n");
+ udelay(100);
+ timeout--;
+ }
+
+ if (timeout != 0)
+ break;
+ }
+
+ if (timeout == 0) {
+ dev_err(isp->dev, "DFS failed due to HW error.\n");
+ return -EINVAL;
+ }
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ timeout = 10;
+ while (((isp_sspm1 >> ISP_FREQ_STAT_OFFSET) != ratio) && timeout) {
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ dev_dbg(isp->dev, "waiting for ISPSSPM1 status bit to be 0x%x.\n",
+ new_freq);
+ udelay(100);
+ timeout--;
+ }
+ if (timeout == 0) {
+ dev_err(isp->dev, "DFS target freq is rejected by HW.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_freq_scaling(struct atomisp_device *isp,
+ enum atomisp_dfs_mode mode,
+ bool force)
+{
+ /* FIXME! Only use subdev[0] status yet */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+ const struct atomisp_dfs_config *dfs;
+ unsigned int new_freq;
+ struct atomisp_freq_scaling_rule curr_rules;
+ int i, ret;
+ unsigned short fps = 0;
+
+ if (isp->sw_contex.power_state != ATOM_ISP_POWER_UP) {
+ dev_err(isp->dev, "DFS cannot proceed due to no power.\n");
+ return -EINVAL;
+ }
+
+ if ((isp->pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) ==
+ ATOMISP_PCI_DEVICE_SOC_CHT && ATOMISP_USE_YUVPP(asd))
+ isp->dfs = &dfs_config_cht_soc;
+
+ dfs = isp->dfs;
+
+ if (dfs->lowest_freq == 0 || dfs->max_freq_at_vmin == 0 ||
+ dfs->highest_freq == 0 || dfs->dfs_table_size == 0 ||
+ !dfs->dfs_table) {
+ dev_err(isp->dev, "DFS configuration is invalid.\n");
+ return -EINVAL;
+ }
+
+ if (mode == ATOMISP_DFS_MODE_LOW) {
+ new_freq = dfs->lowest_freq;
+ goto done;
+ }
+
+ if (mode == ATOMISP_DFS_MODE_MAX) {
+ new_freq = dfs->highest_freq;
+ goto done;
+ }
+
+ fps = atomisp_get_sensor_fps(asd);
+ if (fps == 0)
+ return -EINVAL;
+
+ curr_rules.width = asd->fmt[asd->capture_pad].fmt.width;
+ curr_rules.height = asd->fmt[asd->capture_pad].fmt.height;
+ curr_rules.fps = fps;
+ curr_rules.run_mode = asd->run_mode->val;
+ /*
+ * For continuous mode, we need to make the capture setting applied
+ * since preview mode, because there is no chance to do this when
+ * starting image capture.
+ */
+ if (asd->continuous_mode->val) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ curr_rules.run_mode = ATOMISP_RUN_MODE_SDV;
+ else
+ curr_rules.run_mode =
+ ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE;
+ }
+
+ /* search for the target frequency by looping freq rules*/
+ for (i = 0; i < dfs->dfs_table_size; i++) {
+ if (curr_rules.width != dfs->dfs_table[i].width &&
+ dfs->dfs_table[i].width != ISP_FREQ_RULE_ANY)
+ continue;
+ if (curr_rules.height != dfs->dfs_table[i].height &&
+ dfs->dfs_table[i].height != ISP_FREQ_RULE_ANY)
+ continue;
+ if (curr_rules.fps != dfs->dfs_table[i].fps &&
+ dfs->dfs_table[i].fps != ISP_FREQ_RULE_ANY)
+ continue;
+ if (curr_rules.run_mode != dfs->dfs_table[i].run_mode &&
+ dfs->dfs_table[i].run_mode != ISP_FREQ_RULE_ANY)
+ continue;
+ break;
+ }
+
+ if (i == dfs->dfs_table_size)
+ new_freq = dfs->max_freq_at_vmin;
+ else
+ new_freq = dfs->dfs_table[i].isp_freq;
+
+done:
+ dev_dbg(isp->dev, "DFS target frequency=%d.\n", new_freq);
+
+ if ((new_freq == isp->sw_contex.running_freq) && !force)
+ return 0;
+
+ dev_dbg(isp->dev, "Programming DFS frequency to %d\n", new_freq);
+
+ ret = write_target_freq_to_hw(isp, new_freq);
+ if (!ret) {
+ isp->sw_contex.running_freq = new_freq;
+ trace_ipu_pstate(new_freq, -1);
+ }
+ return ret;
+}
+
+/*
+ * reset and restore ISP
+ */
+int atomisp_reset(struct atomisp_device *isp)
+{
+ /* Reset ISP by power-cycling it */
+ int ret = 0;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+ atomisp_css_suspend(isp);
+ ret = atomisp_runtime_suspend(isp->dev);
+ if (ret < 0)
+ dev_err(isp->dev, "atomisp_runtime_suspend failed, %d\n", ret);
+ ret = atomisp_mrfld_power_down(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "can not disable ISP power\n");
+ } else {
+ ret = atomisp_mrfld_power_up(isp);
+ if (ret < 0)
+ dev_err(isp->dev, "can not enable ISP power\n");
+ ret = atomisp_runtime_resume(isp->dev);
+ if (ret < 0)
+ dev_err(isp->dev, "atomisp_runtime_resume failed, %d\n", ret);
+ }
+ ret = atomisp_css_resume(isp);
+ if (ret)
+ isp->isp_fatal_error = true;
+
+ return ret;
+}
+
+/*
+ * interrupt disable functions
+ */
+static void disable_isp_irq(enum hrt_isp_css_irq irq)
+{
+ irq_disable_channel(IRQ0_ID, irq);
+
+ if (irq != hrt_isp_css_irq_sp)
+ return;
+
+ cnd_sp_irq_enable(SP0_ID, false);
+}
+
+/*
+ * interrupt clean function
+ */
+static void clear_isp_irq(enum hrt_isp_css_irq irq)
+{
+ irq_clear_all(IRQ0_ID);
+}
+
+void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev)
+{
+ u32 msg32;
+ u16 msg16;
+
+ pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32);
+ msg32 |= 1 << MSI_ENABLE_BIT;
+ pci_write_config_dword(dev, PCI_MSI_CAPID, msg32);
+
+ msg32 = (1 << INTR_IER) | (1 << INTR_IIR);
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32);
+
+ pci_read_config_word(dev, PCI_COMMAND, &msg16);
+ msg16 |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INTX_DISABLE);
+ pci_write_config_word(dev, PCI_COMMAND, msg16);
+}
+
+void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev)
+{
+ u32 msg32;
+ u16 msg16;
+
+ pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32);
+ msg32 &= ~(1 << MSI_ENABLE_BIT);
+ pci_write_config_dword(dev, PCI_MSI_CAPID, msg32);
+
+ msg32 = 0x0;
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32);
+
+ pci_read_config_word(dev, PCI_COMMAND, &msg16);
+ msg16 &= ~(PCI_COMMAND_MASTER);
+ pci_write_config_word(dev, PCI_COMMAND, msg16);
+}
+
+static void atomisp_sof_event(struct atomisp_sub_device *asd)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_FRAME_SYNC;
+ event.u.frame_sync.frame_sequence = atomic_read(&asd->sof_count);
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_FRAME_END;
+ event.u.frame_sync.frame_sequence = exp_id;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void atomisp_3a_stats_ready_event(struct atomisp_sub_device *asd,
+ uint8_t exp_id)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_ATOMISP_3A_STATS_READY;
+ event.u.frame_sync.frame_sequence = exp_id;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void atomisp_metadata_ready_event(struct atomisp_sub_device *asd,
+ enum atomisp_metadata_type md_type)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_ATOMISP_METADATA_READY;
+ event.u.data[0] = md_type;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void atomisp_reset_event(struct atomisp_sub_device *asd)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_ATOMISP_CSS_RESET;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void print_csi_rx_errors(enum mipi_port_id port,
+ struct atomisp_device *isp)
+{
+ u32 infos = 0;
+
+ atomisp_css_rx_get_irq_info(port, &infos);
+
+ dev_err(isp->dev, "CSI Receiver port %d errors:\n", port);
+ if (infos & CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+ dev_err(isp->dev, " buffer overrun");
+ if (infos & CSS_RX_IRQ_INFO_ERR_SOT)
+ dev_err(isp->dev, " start-of-transmission error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+ dev_err(isp->dev, " start-of-transmission sync error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_CONTROL)
+ dev_err(isp->dev, " control error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+ dev_err(isp->dev, " 2 or more ECC errors");
+ if (infos & CSS_RX_IRQ_INFO_ERR_CRC)
+ dev_err(isp->dev, " CRC mismatch");
+ if (infos & CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+ dev_err(isp->dev, " unknown error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+ dev_err(isp->dev, " frame sync error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+ dev_err(isp->dev, " frame data error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+ dev_err(isp->dev, " data timeout");
+ if (infos & CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+ dev_err(isp->dev, " unknown escape command entry");
+ if (infos & CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+ dev_err(isp->dev, " line sync error");
+}
+
+/* Clear irq reg */
+static void clear_irq_reg(struct atomisp_device *isp)
+{
+ u32 msg_ret;
+
+ pci_read_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, &msg_ret);
+ msg_ret |= 1 << INTR_IIR;
+ pci_write_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, msg_ret);
+}
+
+static struct atomisp_sub_device *
+__get_asd_from_port(struct atomisp_device *isp, enum mipi_port_id port)
+{
+ int i;
+
+ /* Check which isp subdev to send eof */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ struct camera_mipi_info *mipi_info;
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED &&
+ __get_mipi_port(isp, mipi_info->port) == port) {
+ return asd;
+ }
+ }
+
+ return NULL;
+}
+
+/* interrupt handling function*/
+irqreturn_t atomisp_isr(int irq, void *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)dev;
+ struct atomisp_sub_device *asd;
+ struct atomisp_css_event eof_event;
+ unsigned int irq_infos = 0;
+ unsigned long flags;
+ unsigned int i;
+ int err;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (isp->sw_contex.power_state != ATOM_ISP_POWER_UP ||
+ !isp->css_initialized) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return IRQ_HANDLED;
+ }
+ err = atomisp_css_irq_translate(isp, &irq_infos);
+ if (err) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return IRQ_NONE;
+ }
+
+ dev_dbg(isp->dev, "irq:0x%x\n", irq_infos);
+
+ clear_irq_reg(isp);
+
+ if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp))
+ goto out_nowake;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+ /*
+ * Current SOF only support one stream, so the SOF only valid
+ * either solely one stream is running
+ */
+ if (irq_infos & CSS_IRQ_INFO_CSS_RECEIVER_SOF) {
+ atomic_inc(&asd->sof_count);
+ atomisp_sof_event(asd);
+
+ /* If sequence_temp and sequence are the same
+ * there where no frames lost so we can increase
+ * sequence_temp.
+ * If not then processing of frame is still in progress
+ * and driver needs to keep old sequence_temp value.
+ * NOTE: There is assumption here that ISP will not
+ * start processing next frame from sensor before old
+ * one is completely done. */
+ if (atomic_read(&asd->sequence) == atomic_read(
+ &asd->sequence_temp))
+ atomic_set(&asd->sequence_temp,
+ atomic_read(&asd->sof_count));
+ }
+ if (irq_infos & CSS_IRQ_INFO_EVENTS_READY)
+ atomic_set(&asd->sequence,
+ atomic_read(&asd->sequence_temp));
+ }
+
+ if (irq_infos & CSS_IRQ_INFO_CSS_RECEIVER_SOF)
+ irq_infos &= ~CSS_IRQ_INFO_CSS_RECEIVER_SOF;
+
+ if ((irq_infos & CSS_IRQ_INFO_INPUT_SYSTEM_ERROR) ||
+ (irq_infos & CSS_IRQ_INFO_IF_ERROR)) {
+ /* handle mipi receiver error */
+ u32 rx_infos;
+ enum mipi_port_id port;
+
+ for (port = MIPI_PORT0_ID; port <= MIPI_PORT2_ID;
+ port++) {
+ print_csi_rx_errors(port, isp);
+ atomisp_css_rx_get_irq_info(port, &rx_infos);
+ atomisp_css_rx_clear_irq_info(port, rx_infos);
+ }
+ }
+
+ if (irq_infos & IA_CSS_IRQ_INFO_ISYS_EVENTS_READY) {
+ while (ia_css_dequeue_isys_event(&eof_event.event) ==
+ IA_CSS_SUCCESS) {
+ /* EOF Event does not have the css_pipe returned */
+ asd = __get_asd_from_port(isp, eof_event.event.port);
+ if (!asd) {
+ dev_err(isp->dev, "%s:no subdev.event:%d", __func__,
+ eof_event.event.type);
+ continue;
+ }
+
+ atomisp_eof_event(asd, eof_event.event.exp_id);
+ dev_dbg(isp->dev, "%s EOF exp_id %d, asd %d\n",
+ __func__, eof_event.event.exp_id, asd->index);
+ }
+
+ irq_infos &= ~IA_CSS_IRQ_INFO_ISYS_EVENTS_READY;
+ if (irq_infos == 0)
+ goto out_nowake;
+ }
+
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ return IRQ_WAKE_THREAD;
+
+out_nowake:
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd)
+{
+ int i;
+
+ memset(asd->s3a_bufs_in_css, 0, sizeof(asd->s3a_bufs_in_css));
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
+ memset(asd->metadata_bufs_in_css[i], 0,
+ sizeof(asd->metadata_bufs_in_css[i]));
+ asd->dis_bufs_in_css = 0;
+ asd->video_out_capture.buffers_in_css = 0;
+ asd->video_out_vf.buffers_in_css = 0;
+ asd->video_out_preview.buffers_in_css = 0;
+ asd->video_out_video_capture.buffers_in_css = 0;
+}
+
+/* ISP2400 */
+bool atomisp_buffers_queued(struct atomisp_sub_device *asd)
+{
+ return asd->video_out_capture.buffers_in_css ||
+ asd->video_out_vf.buffers_in_css ||
+ asd->video_out_preview.buffers_in_css ||
+ asd->video_out_video_capture.buffers_in_css ?
+ true : false;
+}
+
+/* ISP2401 */
+bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe)
+{
+ return pipe->buffers_in_css ? true : false;
+}
+
+/* 0x100000 is the start of dmem inside SP */
+#define SP_DMEM_BASE 0x100000
+
+void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
+ unsigned int size)
+{
+ unsigned int data = 0;
+ unsigned int size32 = DIV_ROUND_UP(size, sizeof(u32));
+
+ dev_dbg(isp->dev, "atomisp_io_base:%p\n", atomisp_io_base);
+ dev_dbg(isp->dev, "%s, addr:0x%x, size: %d, size32: %d\n", __func__,
+ addr, size, size32);
+ if (size32 * 4 + addr > 0x4000) {
+ dev_err(isp->dev, "illegal size (%d) or addr (0x%x)\n",
+ size32, addr);
+ return;
+ }
+ addr += SP_DMEM_BASE;
+ do {
+ data = _hrt_master_port_uload_32(addr);
+
+ dev_dbg(isp->dev, "%s, \t [0x%x]:0x%x\n", __func__, addr, data);
+ addr += sizeof(unsigned int);
+ size32 -= 1;
+ } while (size32 > 0);
+}
+
+static struct videobuf_buffer *atomisp_css_frame_to_vbuf(
+ struct atomisp_video_pipe *pipe, struct atomisp_css_frame *frame)
+{
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct atomisp_css_frame *handle;
+ int i;
+
+ for (i = 0; pipe->capq.bufs[i]; i++) {
+ vm_mem = pipe->capq.bufs[i]->priv;
+ handle = vm_mem->vaddr;
+ if (handle && handle->data == frame->data)
+ return pipe->capq.bufs[i];
+ }
+
+ return NULL;
+}
+
+static void atomisp_flush_video_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe)
+{
+ unsigned long irqflags;
+ int i;
+
+ if (!pipe->users)
+ return;
+
+ for (i = 0; pipe->capq.bufs[i]; i++) {
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ if (pipe->capq.bufs[i]->state == VIDEOBUF_ACTIVE ||
+ pipe->capq.bufs[i]->state == VIDEOBUF_QUEUED) {
+ pipe->capq.bufs[i]->ts = ktime_get_ns();
+ pipe->capq.bufs[i]->field_count =
+ atomic_read(&asd->sequence) << 1;
+ dev_dbg(asd->isp->dev, "release buffers on device %s\n",
+ pipe->vdev.name);
+ if (pipe->capq.bufs[i]->state == VIDEOBUF_QUEUED)
+ list_del_init(&pipe->capq.bufs[i]->queue);
+ pipe->capq.bufs[i]->state = VIDEOBUF_ERROR;
+ wake_up(&pipe->capq.bufs[i]->done);
+ }
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ }
+}
+
+/* Returns queued buffers back to video-core */
+void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd)
+{
+ atomisp_flush_video_pipe(asd, &asd->video_out_capture);
+ atomisp_flush_video_pipe(asd, &asd->video_out_vf);
+ atomisp_flush_video_pipe(asd, &asd->video_out_preview);
+ atomisp_flush_video_pipe(asd, &asd->video_out_video_capture);
+}
+
+/* clean out the parameters that did not apply */
+void atomisp_flush_params_queue(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_css_params_with_list *param;
+
+ while (!list_empty(&pipe->per_frame_params)) {
+ param = list_entry(pipe->per_frame_params.next,
+ struct atomisp_css_params_with_list, list);
+ list_del(&param->list);
+ atomisp_free_css_parameters(&param->params);
+ kvfree(param);
+ }
+}
+
+/* Re-queue per-frame parameters */
+static void atomisp_recover_params_queue(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_css_params_with_list *param;
+ int i;
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ param = pipe->frame_params[i];
+ if (param)
+ list_add_tail(&param->list, &pipe->per_frame_params);
+ pipe->frame_params[i] = NULL;
+ }
+ atomisp_handle_parameter_and_buffer(pipe);
+}
+
+/* find atomisp_video_pipe with css pipe id, buffer type and atomisp run_mode */
+static struct atomisp_video_pipe *__atomisp_get_pipe(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id,
+ enum atomisp_css_buffer_type buf_type)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (css_pipe_id == CSS_PIPE_ID_COPY &&
+ isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1) {
+ switch (stream_id) {
+ case ATOMISP_INPUT_STREAM_PREVIEW:
+ return &asd->video_out_preview;
+ case ATOMISP_INPUT_STREAM_POSTVIEW:
+ return &asd->video_out_vf;
+ case ATOMISP_INPUT_STREAM_VIDEO:
+ return &asd->video_out_video_capture;
+ case ATOMISP_INPUT_STREAM_CAPTURE:
+ default:
+ return &asd->video_out_capture;
+ }
+ }
+
+ /* video is same in online as in continuouscapture mode */
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ /*
+ * Disable vf_pp and run CSS in still capture mode. In this
+ * mode, CSS does not cause extra latency with buffering, but
+ * scaling is not available.
+ */
+ return &asd->video_out_capture;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ /*
+ * Disable vf_pp and run CSS in video mode. This allows using
+ * ISP scaling but it has one frame delay due to CSS internal
+ * buffering.
+ */
+ return &asd->video_out_video_capture;
+ } else if (css_pipe_id == CSS_PIPE_ID_YUVPP) {
+ /*
+ * to SOC camera, yuvpp pipe is run for capture/video/SDV/ZSL.
+ */
+ if (asd->continuous_mode->val) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /* SDV case */
+ switch (buf_type) {
+ case CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+ return &asd->video_out_video_capture;
+ case CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
+ return &asd->video_out_preview;
+ case CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ return &asd->video_out_capture;
+ default:
+ return &asd->video_out_vf;
+ }
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ /* ZSL case */
+ switch (buf_type) {
+ case CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+ return &asd->video_out_preview;
+ case CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ return &asd->video_out_capture;
+ default:
+ return &asd->video_out_vf;
+ }
+ }
+ } else if (buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME) {
+ switch (asd->run_mode->val) {
+ case ATOMISP_RUN_MODE_VIDEO:
+ return &asd->video_out_video_capture;
+ case ATOMISP_RUN_MODE_PREVIEW:
+ return &asd->video_out_preview;
+ default:
+ return &asd->video_out_capture;
+ }
+ } else if (buf_type == CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ return &asd->video_out_preview;
+ else
+ return &asd->video_out_vf;
+ }
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /* For online video or SDV video pipe. */
+ if (css_pipe_id == CSS_PIPE_ID_VIDEO ||
+ css_pipe_id == CSS_PIPE_ID_COPY) {
+ if (buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ return &asd->video_out_video_capture;
+ return &asd->video_out_preview;
+ }
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ /* For online preview or ZSL preview pipe. */
+ if (css_pipe_id == CSS_PIPE_ID_PREVIEW ||
+ css_pipe_id == CSS_PIPE_ID_COPY)
+ return &asd->video_out_preview;
+ }
+ /* For capture pipe. */
+ if (buf_type == CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ return &asd->video_out_vf;
+ return &asd->video_out_capture;
+}
+
+enum atomisp_metadata_type
+atomisp_get_metadata_type(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id) {
+ if (!asd->continuous_mode->val)
+ return ATOMISP_MAIN_METADATA;
+
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) /* online capture pipe */
+ return ATOMISP_SEC_METADATA;
+ else
+ return ATOMISP_MAIN_METADATA;
+}
+
+void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
+ enum atomisp_css_buffer_type buf_type,
+ enum atomisp_css_pipe_id css_pipe_id,
+ bool q_buffers, enum atomisp_input_stream_id stream_id)
+{
+ struct videobuf_buffer *vb = NULL;
+ struct atomisp_video_pipe *pipe = NULL;
+ struct atomisp_css_buffer buffer;
+ bool requeue = false;
+ int err;
+ unsigned long irqflags;
+ struct atomisp_css_frame *frame = NULL;
+ struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp;
+ struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp;
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp;
+ enum atomisp_metadata_type md_type;
+ struct atomisp_device *isp = asd->isp;
+ struct v4l2_control ctrl;
+ bool reset_wdt_timer = false;
+
+ if (
+ buf_type != CSS_BUFFER_TYPE_METADATA &&
+ buf_type != CSS_BUFFER_TYPE_3A_STATISTICS &&
+ buf_type != CSS_BUFFER_TYPE_DIS_STATISTICS &&
+ buf_type != CSS_BUFFER_TYPE_OUTPUT_FRAME &&
+ buf_type != CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME &&
+ buf_type != CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME &&
+ buf_type != CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME &&
+ buf_type != CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
+ dev_err(isp->dev, "%s, unsupported buffer type: %d\n",
+ __func__, buf_type);
+ return;
+ }
+
+ memset(&buffer, 0, sizeof(struct atomisp_css_buffer));
+ buffer.css_buffer.type = buf_type;
+ err = atomisp_css_dequeue_buffer(asd, stream_id, css_pipe_id,
+ buf_type, &buffer);
+ if (err) {
+ dev_err(isp->dev,
+ "atomisp_css_dequeue_buffer failed: 0x%x\n", err);
+ return;
+ }
+
+ /* need to know the atomisp pipe for frame buffers */
+ pipe = __atomisp_get_pipe(asd, stream_id, css_pipe_id, buf_type);
+ if (!pipe) {
+ dev_err(isp->dev, "error getting atomisp pipe\n");
+ return;
+ }
+
+ switch (buf_type) {
+ case CSS_BUFFER_TYPE_3A_STATISTICS:
+ list_for_each_entry_safe(s3a_buf, _s3a_buf_tmp,
+ &asd->s3a_stats_in_css, list) {
+ if (s3a_buf->s3a_data ==
+ buffer.css_buffer.data.stats_3a) {
+ list_del_init(&s3a_buf->list);
+ list_add_tail(&s3a_buf->list,
+ &asd->s3a_stats_ready);
+ break;
+ }
+ }
+
+ asd->s3a_bufs_in_css[css_pipe_id]--;
+ atomisp_3a_stats_ready_event(asd, buffer.css_buffer.exp_id);
+ dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n",
+ __func__, s3a_buf->s3a_data->exp_id);
+ break;
+ case CSS_BUFFER_TYPE_METADATA:
+ if (error)
+ break;
+
+ md_type = atomisp_get_metadata_type(asd, css_pipe_id);
+ list_for_each_entry_safe(md_buf, _md_buf_tmp,
+ &asd->metadata_in_css[md_type], list) {
+ if (md_buf->metadata ==
+ buffer.css_buffer.data.metadata) {
+ list_del_init(&md_buf->list);
+ list_add_tail(&md_buf->list,
+ &asd->metadata_ready[md_type]);
+ break;
+ }
+ }
+ asd->metadata_bufs_in_css[stream_id][css_pipe_id]--;
+ atomisp_metadata_ready_event(asd, md_type);
+ dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n",
+ __func__, md_buf->metadata->exp_id);
+ break;
+ case CSS_BUFFER_TYPE_DIS_STATISTICS:
+ list_for_each_entry_safe(dis_buf, _dis_buf_tmp,
+ &asd->dis_stats_in_css, list) {
+ if (dis_buf->dis_data ==
+ buffer.css_buffer.data.stats_dvs) {
+ spin_lock_irqsave(&asd->dis_stats_lock,
+ irqflags);
+ list_del_init(&dis_buf->list);
+ list_add(&dis_buf->list, &asd->dis_stats);
+ asd->params.dis_proj_data_valid = true;
+ spin_unlock_irqrestore(&asd->dis_stats_lock,
+ irqflags);
+ break;
+ }
+ }
+ asd->dis_bufs_in_css--;
+ dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n",
+ __func__, dis_buf->dis_data->exp_id);
+ break;
+ case CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
+ case CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
+ if (atomisp_hw_is_isp2401)
+ reset_wdt_timer = true;
+
+ pipe->buffers_in_css--;
+ frame = buffer.css_buffer.data.frame;
+ if (!frame) {
+ WARN_ON(1);
+ break;
+ }
+ if (!frame->valid)
+ error = true;
+
+ /* FIXME:
+ * YUVPP doesn't set postview exp_id correctlly in SDV mode.
+ * This is a WORKAROUND to set exp_id. see HSDES-1503911606.
+ */
+ if (IS_BYT && buf_type == CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME &&
+ asd->continuous_mode->val && ATOMISP_USE_YUVPP(asd))
+ frame->exp_id = (asd->postview_exp_id++) %
+ (ATOMISP_MAX_EXP_ID + 1);
+
+ dev_dbg(isp->dev, "%s: vf frame with exp_id %d is ready\n",
+ __func__, frame->exp_id);
+ if (asd->params.flash_state == ATOMISP_FLASH_ONGOING) {
+ if (frame->flash_state
+ == CSS_FRAME_FLASH_STATE_PARTIAL)
+ dev_dbg(isp->dev, "%s thumb partially flashed\n",
+ __func__);
+ else if (frame->flash_state
+ == CSS_FRAME_FLASH_STATE_FULL)
+ dev_dbg(isp->dev, "%s thumb completely flashed\n",
+ __func__);
+ else
+ dev_dbg(isp->dev, "%s thumb no flash in this frame\n",
+ __func__);
+ }
+ vb = atomisp_css_frame_to_vbuf(pipe, frame);
+ WARN_ON(!vb);
+ if (vb)
+ pipe->frame_config_id[vb->i] = frame->isp_config_id;
+ if (css_pipe_id == IA_CSS_PIPE_ID_CAPTURE &&
+ asd->pending_capture_request > 0) {
+ err = atomisp_css_offline_capture_configure(asd,
+ asd->params.offline_parm.num_captures,
+ asd->params.offline_parm.skip_frames,
+ asd->params.offline_parm.offset);
+
+ asd->pending_capture_request--;
+
+ if (atomisp_hw_is_isp2401)
+ asd->re_trigger_capture = false;
+
+ dev_dbg(isp->dev, "Trigger capture again for new buffer. err=%d\n",
+ err);
+ } else if (atomisp_hw_is_isp2401) {
+ asd->re_trigger_capture = true;
+ }
+ break;
+ case CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ case CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+ if (atomisp_hw_is_isp2401)
+ reset_wdt_timer = true;
+
+ pipe->buffers_in_css--;
+ frame = buffer.css_buffer.data.frame;
+ if (!frame) {
+ WARN_ON(1);
+ break;
+ }
+
+ if (!frame->valid)
+ error = true;
+
+ /* FIXME:
+ * YUVPP doesn't set preview exp_id correctlly in ZSL mode.
+ * This is a WORKAROUND to set exp_id. see HSDES-1503911606.
+ */
+ if (IS_BYT && buf_type == CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME &&
+ asd->continuous_mode->val && ATOMISP_USE_YUVPP(asd))
+ frame->exp_id = (asd->preview_exp_id++) %
+ (ATOMISP_MAX_EXP_ID + 1);
+
+ dev_dbg(isp->dev, "%s: main frame with exp_id %d is ready\n",
+ __func__, frame->exp_id);
+ vb = atomisp_css_frame_to_vbuf(pipe, frame);
+ if (!vb) {
+ WARN_ON(1);
+ break;
+ }
+
+ /* free the parameters */
+ if (pipe->frame_params[vb->i]) {
+ if (asd->params.dvs_6axis ==
+ pipe->frame_params[vb->i]->params.dvs_6axis)
+ asd->params.dvs_6axis = NULL;
+ atomisp_free_css_parameters(
+ &pipe->frame_params[vb->i]->params);
+ kvfree(pipe->frame_params[vb->i]);
+ pipe->frame_params[vb->i] = NULL;
+ }
+
+ pipe->frame_config_id[vb->i] = frame->isp_config_id;
+ ctrl.id = V4L2_CID_FLASH_MODE;
+ if (asd->params.flash_state == ATOMISP_FLASH_ONGOING) {
+ if (frame->flash_state
+ == CSS_FRAME_FLASH_STATE_PARTIAL) {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_FLASH_PARTIAL;
+ dev_dbg(isp->dev, "%s partially flashed\n",
+ __func__);
+ } else if (frame->flash_state
+ == CSS_FRAME_FLASH_STATE_FULL) {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_FLASH_EXPOSED;
+ asd->params.num_flash_frames--;
+ dev_dbg(isp->dev, "%s completely flashed\n",
+ __func__);
+ } else {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_OK;
+ dev_dbg(isp->dev,
+ "%s no flash in this frame\n",
+ __func__);
+ }
+
+ /* Check if flashing sequence is done */
+ if (asd->frame_status[vb->i] ==
+ ATOMISP_FRAME_STATUS_FLASH_EXPOSED)
+ asd->params.flash_state = ATOMISP_FLASH_DONE;
+ } else if (isp->flash) {
+ if (v4l2_g_ctrl(isp->flash->ctrl_handler, &ctrl) ==
+ 0 && ctrl.value == ATOMISP_FLASH_MODE_TORCH) {
+ ctrl.id = V4L2_CID_FLASH_TORCH_INTENSITY;
+ if (v4l2_g_ctrl(isp->flash->ctrl_handler, &ctrl)
+ == 0 && ctrl.value > 0) {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_FLASH_EXPOSED;
+ } else {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_OK;
+ }
+ } else
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_OK;
+ } else {
+ asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK;
+ }
+
+ asd->params.last_frame_status = asd->frame_status[vb->i];
+
+ if (asd->continuous_mode->val) {
+ if (css_pipe_id == CSS_PIPE_ID_PREVIEW ||
+ css_pipe_id == CSS_PIPE_ID_VIDEO) {
+ asd->latest_preview_exp_id = frame->exp_id;
+ } else if (css_pipe_id ==
+ CSS_PIPE_ID_CAPTURE) {
+ if (asd->run_mode->val ==
+ ATOMISP_RUN_MODE_VIDEO)
+ dev_dbg(isp->dev, "SDV capture raw buffer id: %u\n",
+ frame->exp_id);
+ else
+ dev_dbg(isp->dev, "ZSL capture raw buffer id: %u\n",
+ frame->exp_id);
+ }
+ }
+ /*
+ * Only after enabled the raw buffer lock
+ * and in continuous mode.
+ * in preview/video pipe, each buffer will
+ * be locked automatically, so record it here.
+ */
+ if (((css_pipe_id == CSS_PIPE_ID_PREVIEW) ||
+ (css_pipe_id == CSS_PIPE_ID_VIDEO)) &&
+ asd->enable_raw_buffer_lock->val &&
+ asd->continuous_mode->val) {
+ atomisp_set_raw_buffer_bitmap(asd, frame->exp_id);
+ WARN_ON(frame->exp_id > ATOMISP_MAX_EXP_ID);
+ }
+
+ if (asd->params.css_update_params_needed) {
+ atomisp_apply_css_parameters(asd,
+ &asd->params.css_param);
+ if (asd->params.css_param.update_flag.dz_config)
+ atomisp_css_set_dz_config(asd,
+ &asd->params.css_param.dz_config);
+ /* New global dvs 6axis config should be blocked
+ * here if there's a buffer with per-frame parameters
+ * pending in CSS frame buffer queue.
+ * This is to aviod zooming vibration since global
+ * parameters take effect immediately while
+ * per-frame parameters are taken after previous
+ * buffers in CSS got processed.
+ */
+ if (asd->params.dvs_6axis)
+ atomisp_css_set_dvs_6axis(asd,
+ asd->params.dvs_6axis);
+ else
+ asd->params.css_update_params_needed = false;
+ /* The update flag should not be cleaned here
+ * since it is still going to be used to make up
+ * following per-frame parameters.
+ * This will introduce more copy work since each
+ * time when updating global parameters, the whole
+ * parameter set are applied.
+ * FIXME: A new set of parameter copy functions can
+ * be added to make up per-frame parameters based on
+ * solid structures stored in asd->params.css_param
+ * instead of using shadow pointers in update flag.
+ */
+ atomisp_css_update_isp_params(asd);
+ }
+ break;
+ default:
+ break;
+ }
+ if (vb)
+ {
+ vb->ts = ktime_get_ns();
+ vb->field_count = atomic_read(&asd->sequence) << 1;
+ /*mark videobuffer done for dequeue*/
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ vb->state = !error ? VIDEOBUF_DONE : VIDEOBUF_ERROR;
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+
+ /*
+ * Frame capture done, wake up any process block on
+ * current active buffer
+ * possibly hold by videobuf_dqbuf()
+ */
+ wake_up(&vb->done);
+ }
+ if (atomisp_hw_is_isp2401)
+ atomic_set(&pipe->wdt_count, 0);
+
+ /*
+ * Requeue should only be done for 3a and dis buffers.
+ * Queue/dequeue order will change if driver recycles image buffers.
+ */
+ if (requeue)
+ {
+ err = atomisp_css_queue_buffer(asd,
+ stream_id, css_pipe_id,
+ buf_type, &buffer);
+ if (err)
+ dev_err(isp->dev, "%s, q to css fails: %d\n",
+ __func__, err);
+ return;
+ }
+ if (!error && q_buffers)
+ atomisp_qbuffers_to_css(asd);
+
+ if (atomisp_hw_is_isp2401) {
+ /* If there are no buffers queued then
+ * delete wdt timer. */
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return;
+ if (!atomisp_buffers_queued_pipe(pipe))
+ atomisp_wdt_stop_pipe(pipe, false);
+ else if (reset_wdt_timer)
+ /* SOF irq should not reset wdt timer. */
+ atomisp_wdt_refresh_pipe(pipe,
+ ATOMISP_WDT_KEEP_CURRENT_DELAY);
+ }
+}
+
+void atomisp_delayed_init_work(struct work_struct *work)
+{
+ struct atomisp_sub_device *asd = container_of(work,
+ struct atomisp_sub_device,
+ delayed_init_work);
+ /*
+ * to SOC camera, use yuvpp pipe and no support continuous mode.
+ */
+ if (!ATOMISP_USE_YUVPP(asd)) {
+ struct v4l2_event event = {0};
+
+ atomisp_css_allocate_continuous_frames(false, asd);
+ atomisp_css_update_continuous_frames(asd);
+
+ event.type = V4L2_EVENT_ATOMISP_RAW_BUFFERS_ALLOC_DONE;
+ v4l2_event_queue(asd->subdev.devnode, &event);
+ }
+
+ /* signal streamon after delayed init is done */
+ asd->delayed_init = ATOMISP_DELAYED_INIT_DONE;
+ complete(&asd->init_done);
+}
+
+static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
+{
+ enum atomisp_css_pipe_id css_pipe_id;
+ bool stream_restart[MAX_STREAM_NUM] = {0};
+ bool depth_mode = false;
+ int i, ret, depth_cnt = 0;
+
+ if (!isp->sw_contex.file_input)
+ atomisp_css_irq_enable(isp,
+ CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
+
+ BUG_ON(isp->num_of_streams > MAX_STREAM_NUM);
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ struct ia_css_pipeline *acc_pipeline;
+ struct ia_css_pipe *acc_pipe = NULL;
+
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED &&
+ !asd->stream_prepared)
+ continue;
+
+ /*
+ * AtomISP::waitStageUpdate is blocked when WDT happens.
+ * By calling acc_done() for all loaded fw_handles,
+ * HAL will be unblocked.
+ */
+ acc_pipe = asd->stream_env[i].pipes[CSS_PIPE_ID_ACC];
+ if (acc_pipe) {
+ acc_pipeline = ia_css_pipe_get_pipeline(acc_pipe);
+ if (acc_pipeline) {
+ struct ia_css_pipeline_stage *stage;
+
+ for (stage = acc_pipeline->stages; stage;
+ stage = stage->next) {
+ const struct ia_css_fw_info *fw;
+
+ fw = stage->firmware;
+ atomisp_acc_done(asd, fw->handle);
+ }
+ }
+ }
+
+ depth_cnt++;
+
+ if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED)
+ cancel_work_sync(&asd->delayed_init_work);
+
+ complete(&asd->init_done);
+ asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
+
+ stream_restart[asd->index] = true;
+
+ asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING;
+
+ /* stream off sensor */
+ ret = v4l2_subdev_call(
+ isp->inputs[asd->input_curr].
+ camera, video, s_stream, 0);
+ if (ret)
+ dev_warn(isp->dev,
+ "can't stop streaming on sensor!\n");
+
+ atomisp_acc_unload_extensions(asd);
+
+ atomisp_clear_css_buffer_counters(asd);
+
+ css_pipe_id = atomisp_get_css_pipe_id(asd);
+ atomisp_css_stop(asd, css_pipe_id, true);
+
+ asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+
+ asd->preview_exp_id = 1;
+ asd->postview_exp_id = 1;
+ /* notify HAL the CSS reset */
+ dev_dbg(isp->dev,
+ "send reset event to %s\n", asd->subdev.devnode->name);
+ atomisp_reset_event(asd);
+ }
+
+ /* clear irq */
+ disable_isp_irq(hrt_isp_css_irq_sp);
+ clear_isp_irq(hrt_isp_css_irq_sp);
+
+ /* Set the SRSE to 3 before resetting */
+ pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control |
+ MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
+
+ /* reset ISP and restore its state */
+ isp->isp_timeout = true;
+ atomisp_reset(isp);
+ isp->isp_timeout = false;
+
+ if (!isp_timeout) {
+ for (i = 0; i < isp->num_of_streams; i++) {
+ if (isp->asd[i].depth_mode->val)
+ return;
+ }
+ }
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ if (!stream_restart[i])
+ continue;
+
+ if (isp->inputs[asd->input_curr].type != FILE_INPUT)
+ atomisp_css_input_set_mode(asd,
+ CSS_INPUT_MODE_SENSOR);
+
+ css_pipe_id = atomisp_get_css_pipe_id(asd);
+ if (atomisp_css_start(asd, css_pipe_id, true))
+ dev_warn(isp->dev,
+ "start SP failed, so do not set streaming to be enable!\n");
+ else
+ asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED;
+
+ atomisp_csi2_configure(asd);
+ }
+
+ if (!isp->sw_contex.file_input) {
+ atomisp_css_irq_enable(isp, CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+ atomisp_css_valid_sof(isp));
+
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ } else {
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, true) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ }
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd;
+
+ asd = &isp->asd[i];
+
+ if (!stream_restart[i])
+ continue;
+
+ if (asd->continuous_mode->val &&
+ asd->delayed_init == ATOMISP_DELAYED_INIT_NOT_QUEUED) {
+ reinit_completion(&asd->init_done);
+ asd->delayed_init = ATOMISP_DELAYED_INIT_QUEUED;
+ queue_work(asd->delayed_init_workq,
+ &asd->delayed_init_work);
+ }
+ /*
+ * dequeueing buffers is not needed. CSS will recycle
+ * buffers that it has.
+ */
+ atomisp_flush_bufs_and_wakeup(asd);
+
+ /* Requeue unprocessed per-frame parameters. */
+ atomisp_recover_params_queue(&asd->video_out_capture);
+ atomisp_recover_params_queue(&asd->video_out_preview);
+ atomisp_recover_params_queue(&asd->video_out_video_capture);
+
+ if ((asd->depth_mode->val) &&
+ (depth_cnt == ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) {
+ depth_mode = true;
+ continue;
+ }
+
+ ret = v4l2_subdev_call(
+ isp->inputs[asd->input_curr].camera, video,
+ s_stream, 1);
+ if (ret)
+ dev_warn(isp->dev,
+ "can't start streaming on sensor!\n");
+ }
+
+ if (depth_mode) {
+ if (atomisp_stream_on_master_slave_sensor(isp, true))
+ dev_warn(isp->dev,
+ "master slave sensor stream on failed!\n");
+ }
+}
+
+void atomisp_wdt_work(struct work_struct *work)
+{
+ struct atomisp_device *isp = container_of(work, struct atomisp_device,
+ wdt_work);
+ int i;
+ unsigned int pipe_wdt_cnt[MAX_STREAM_NUM][4] = { {0} };
+ bool css_recover = false;
+
+ rt_mutex_lock(&isp->mutex);
+ if (!atomisp_streaming_count(isp)) {
+ atomic_set(&isp->wdt_work_queued, 0);
+ rt_mutex_unlock(&isp->mutex);
+ return;
+ }
+
+ if (!atomisp_hw_is_isp2401) {
+ dev_err(isp->dev, "timeout %d of %d\n",
+ atomic_read(&isp->wdt_count) + 1,
+ ATOMISP_ISP_MAX_TIMEOUT_COUNT);
+
+ if (atomic_inc_return(&isp->wdt_count) < ATOMISP_ISP_MAX_TIMEOUT_COUNT)
+ css_recover = true;
+ } else {
+ css_recover = true;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ pipe_wdt_cnt[i][0] +=
+ atomic_read(&asd->video_out_capture.wdt_count);
+ pipe_wdt_cnt[i][1] +=
+ atomic_read(&asd->video_out_vf.wdt_count);
+ pipe_wdt_cnt[i][2] +=
+ atomic_read(&asd->video_out_preview.wdt_count);
+ pipe_wdt_cnt[i][3] +=
+ atomic_read(&asd->video_out_video_capture.wdt_count);
+ css_recover =
+ (pipe_wdt_cnt[i][0] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT &&
+ pipe_wdt_cnt[i][1] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT &&
+ pipe_wdt_cnt[i][2] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT &&
+ pipe_wdt_cnt[i][3] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT)
+ ? true : false;
+ dev_err(isp->dev,
+ "pipe on asd%d timeout cnt: (%d, %d, %d, %d) of %d, recover = %d\n",
+ asd->index, pipe_wdt_cnt[i][0], pipe_wdt_cnt[i][1],
+ pipe_wdt_cnt[i][2], pipe_wdt_cnt[i][3],
+ ATOMISP_ISP_MAX_TIMEOUT_COUNT, css_recover);
+ }
+ }
+
+ if (css_recover) {
+ unsigned int old_dbglevel = dbg_level;
+
+ atomisp_css_debug_dump_sp_sw_debug_info();
+ atomisp_css_debug_dump_debug_info(__func__);
+ dbg_level = old_dbglevel;
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+ dev_err(isp->dev, "%s, vdev %s buffers in css: %d\n",
+ __func__,
+ asd->video_out_capture.vdev.name,
+ asd->video_out_capture.
+ buffers_in_css);
+ dev_err(isp->dev,
+ "%s, vdev %s buffers in css: %d\n",
+ __func__,
+ asd->video_out_vf.vdev.name,
+ asd->video_out_vf.
+ buffers_in_css);
+ dev_err(isp->dev,
+ "%s, vdev %s buffers in css: %d\n",
+ __func__,
+ asd->video_out_preview.vdev.name,
+ asd->video_out_preview.
+ buffers_in_css);
+ dev_err(isp->dev,
+ "%s, vdev %s buffers in css: %d\n",
+ __func__,
+ asd->video_out_video_capture.vdev.name,
+ asd->video_out_video_capture.
+ buffers_in_css);
+ dev_err(isp->dev,
+ "%s, s3a buffers in css preview pipe:%d\n",
+ __func__,
+ asd->s3a_bufs_in_css[CSS_PIPE_ID_PREVIEW]);
+ dev_err(isp->dev,
+ "%s, s3a buffers in css capture pipe:%d\n",
+ __func__,
+ asd->s3a_bufs_in_css[CSS_PIPE_ID_CAPTURE]);
+ dev_err(isp->dev,
+ "%s, s3a buffers in css video pipe:%d\n",
+ __func__,
+ asd->s3a_bufs_in_css[CSS_PIPE_ID_VIDEO]);
+ dev_err(isp->dev,
+ "%s, dis buffers in css: %d\n",
+ __func__, asd->dis_bufs_in_css);
+ dev_err(isp->dev,
+ "%s, metadata buffers in css preview pipe:%d\n",
+ __func__,
+ asd->metadata_bufs_in_css
+ [ATOMISP_INPUT_STREAM_GENERAL]
+ [CSS_PIPE_ID_PREVIEW]);
+ dev_err(isp->dev,
+ "%s, metadata buffers in css capture pipe:%d\n",
+ __func__,
+ asd->metadata_bufs_in_css
+ [ATOMISP_INPUT_STREAM_GENERAL]
+ [CSS_PIPE_ID_CAPTURE]);
+ dev_err(isp->dev,
+ "%s, metadata buffers in css video pipe:%d\n",
+ __func__,
+ asd->metadata_bufs_in_css
+ [ATOMISP_INPUT_STREAM_GENERAL]
+ [CSS_PIPE_ID_VIDEO]);
+ if (asd->enable_raw_buffer_lock->val) {
+ unsigned int j;
+
+ dev_err(isp->dev, "%s, raw_buffer_locked_count %d\n",
+ __func__, asd->raw_buffer_locked_count);
+ for (j = 0; j <= ATOMISP_MAX_EXP_ID / 32; j++)
+ dev_err(isp->dev, "%s, raw_buffer_bitmap[%d]: 0x%x\n",
+ __func__, j,
+ asd->raw_buffer_bitmap[j]);
+ }
+ }
+
+ /*sh_css_dump_sp_state();*/
+ /*sh_css_dump_isp_state();*/
+ } else {
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ if (asd->streaming ==
+ ATOMISP_DEVICE_STREAMING_ENABLED) {
+ atomisp_clear_css_buffer_counters(asd);
+ atomisp_flush_bufs_and_wakeup(asd);
+ complete(&asd->init_done);
+ }
+ if (atomisp_hw_is_isp2401)
+ atomisp_wdt_stop(asd, false);
+ }
+
+ if (!atomisp_hw_is_isp2401) {
+ atomic_set(&isp->wdt_count, 0);
+ } else {
+ isp->isp_fatal_error = true;
+ atomic_set(&isp->wdt_work_queued, 0);
+
+ rt_mutex_unlock(&isp->mutex);
+ return;
+ }
+ }
+
+ __atomisp_css_recover(isp, true);
+ if (atomisp_hw_is_isp2401) {
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+
+ atomisp_wdt_refresh(asd,
+ isp->sw_contex.file_input ?
+ ATOMISP_ISP_FILE_TIMEOUT_DURATION :
+ ATOMISP_ISP_TIMEOUT_DURATION);
+ }
+ }
+
+ dev_err(isp->dev, "timeout recovery handling done\n");
+ atomic_set(&isp->wdt_work_queued, 0);
+
+ rt_mutex_unlock(&isp->mutex);
+}
+
+void atomisp_css_flush(struct atomisp_device *isp)
+{
+ int i;
+
+ if (!atomisp_streaming_count(isp))
+ return;
+
+ /* Disable wdt */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ atomisp_wdt_stop(asd, true);
+ }
+
+ /* Start recover */
+ __atomisp_css_recover(isp, false);
+ /* Restore wdt */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ if (asd->streaming !=
+ ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+
+ atomisp_wdt_refresh(asd,
+ isp->sw_contex.file_input ?
+ ATOMISP_ISP_FILE_TIMEOUT_DURATION :
+ ATOMISP_ISP_TIMEOUT_DURATION);
+ }
+ dev_dbg(isp->dev, "atomisp css flush done\n");
+}
+
+void atomisp_wdt(struct timer_list *t)
+{
+ struct atomisp_sub_device *asd;
+ struct atomisp_device *isp;
+
+ if (!atomisp_hw_is_isp2401) {
+ asd = from_timer(asd, t, wdt);
+ isp = asd->isp;
+ } else {
+ struct atomisp_video_pipe *pipe = from_timer(pipe, t, wdt);
+
+ asd = pipe->asd;
+ isp = asd->isp;
+
+ atomic_inc(&pipe->wdt_count);
+ dev_warn(isp->dev,
+ "[WARNING]asd %d pipe %s ISP timeout %d!\n",
+ asd->index, pipe->vdev.name,
+ atomic_read(&pipe->wdt_count));
+ }
+
+ if (atomic_read(&isp->wdt_work_queued)) {
+ dev_dbg(isp->dev, "ISP watchdog was put into workqueue\n");
+ return;
+ }
+ atomic_set(&isp->wdt_work_queued, 1);
+ queue_work(isp->wdt_work_queue, &isp->wdt_work);
+}
+
+/* ISP2400 */
+void atomisp_wdt_start(struct atomisp_sub_device *asd)
+{
+ atomisp_wdt_refresh(asd, ATOMISP_ISP_TIMEOUT_DURATION);
+}
+
+/* ISP2401 */
+void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe,
+ unsigned int delay)
+{
+ unsigned long next;
+
+ if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY)
+ pipe->wdt_duration = delay;
+
+ next = jiffies + pipe->wdt_duration;
+
+ /* Override next if it has been pushed beyon the "next" time */
+ if (atomisp_is_wdt_running(pipe) && time_after(pipe->wdt_expires, next))
+ next = pipe->wdt_expires;
+
+ pipe->wdt_expires = next;
+
+ if (atomisp_is_wdt_running(pipe))
+ dev_dbg(pipe->asd->isp->dev, "WDT will hit after %d ms (%s)\n",
+ ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name);
+ else
+ dev_dbg(pipe->asd->isp->dev, "WDT starts with %d ms period (%s)\n",
+ ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name);
+
+ mod_timer(&pipe->wdt, next);
+}
+
+void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay)
+{
+ if (!atomisp_hw_is_isp2401) {
+ unsigned long next;
+
+ if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY)
+ asd->wdt_duration = delay;
+
+ next = jiffies + asd->wdt_duration;
+
+ /* Override next if it has been pushed beyon the "next" time */
+ if (atomisp_is_wdt_running(asd) && time_after(asd->wdt_expires, next))
+ next = asd->wdt_expires;
+
+ asd->wdt_expires = next;
+
+ if (atomisp_is_wdt_running(asd))
+ dev_dbg(asd->isp->dev, "WDT will hit after %d ms\n",
+ ((int)(next - jiffies) * 1000 / HZ));
+ else
+ dev_dbg(asd->isp->dev, "WDT starts with %d ms period\n",
+ ((int)(next - jiffies) * 1000 / HZ));
+
+ mod_timer(&asd->wdt, next);
+ atomic_set(&asd->isp->wdt_count, 0);
+ } else {
+ dev_dbg(asd->isp->dev, "WDT refresh all:\n");
+ if (atomisp_is_wdt_running(&asd->video_out_capture))
+ atomisp_wdt_refresh_pipe(&asd->video_out_capture, delay);
+ if (atomisp_is_wdt_running(&asd->video_out_preview))
+ atomisp_wdt_refresh_pipe(&asd->video_out_preview, delay);
+ if (atomisp_is_wdt_running(&asd->video_out_vf))
+ atomisp_wdt_refresh_pipe(&asd->video_out_vf, delay);
+ if (atomisp_is_wdt_running(&asd->video_out_video_capture))
+ atomisp_wdt_refresh_pipe(&asd->video_out_video_capture, delay);
+ }
+}
+
+/* ISP2401 */
+void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync)
+{
+ if (!atomisp_is_wdt_running(pipe))
+ return;
+
+ dev_dbg(pipe->asd->isp->dev,
+ "WDT stop asd %d (%s)\n", pipe->asd->index, pipe->vdev.name);
+
+ if (sync) {
+ del_timer_sync(&pipe->wdt);
+ cancel_work_sync(&pipe->asd->isp->wdt_work);
+ } else {
+ del_timer(&pipe->wdt);
+ }
+}
+
+/* ISP 2401 */
+void atomisp_wdt_start_pipe(struct atomisp_video_pipe *pipe)
+{
+ atomisp_wdt_refresh_pipe(pipe, ATOMISP_ISP_TIMEOUT_DURATION);
+}
+
+void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync)
+{
+ dev_dbg(asd->isp->dev, "WDT stop:\n");
+
+ if (!atomisp_hw_is_isp2401) {
+ if (sync) {
+ del_timer_sync(&asd->wdt);
+ cancel_work_sync(&asd->isp->wdt_work);
+ } else {
+ del_timer(&asd->wdt);
+ }
+ } else {
+ atomisp_wdt_stop_pipe(&asd->video_out_capture, sync);
+ atomisp_wdt_stop_pipe(&asd->video_out_preview, sync);
+ atomisp_wdt_stop_pipe(&asd->video_out_vf, sync);
+ atomisp_wdt_stop_pipe(&asd->video_out_video_capture, sync);
+ }
+}
+
+void atomisp_setup_flash(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct v4l2_control ctrl;
+
+ if (!isp->flash)
+ return;
+
+ if (asd->params.flash_state != ATOMISP_FLASH_REQUESTED &&
+ asd->params.flash_state != ATOMISP_FLASH_DONE)
+ return;
+
+ if (asd->params.num_flash_frames) {
+ /* make sure the timeout is set before setting flash mode */
+ ctrl.id = V4L2_CID_FLASH_TIMEOUT;
+ ctrl.value = FLASH_TIMEOUT;
+
+ if (v4l2_s_ctrl(NULL, isp->flash->ctrl_handler, &ctrl)) {
+ dev_err(isp->dev, "flash timeout configure failed\n");
+ return;
+ }
+
+ atomisp_css_request_flash(asd);
+ asd->params.flash_state = ATOMISP_FLASH_ONGOING;
+ } else {
+ asd->params.flash_state = ATOMISP_FLASH_IDLE;
+ }
+}
+
+irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
+{
+ struct atomisp_device *isp = isp_ptr;
+ unsigned long flags;
+ bool frame_done_found[MAX_STREAM_NUM] = {0};
+ bool css_pipe_done[MAX_STREAM_NUM] = {0};
+ unsigned int i;
+ struct atomisp_sub_device *asd;
+
+ dev_dbg(isp->dev, ">%s\n", __func__);
+
+ spin_lock_irqsave(&isp->lock, flags);
+
+ if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp)) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ /*
+ * The standard CSS2.0 API tells the following calling sequence of
+ * dequeue ready buffers:
+ * while (ia_css_dequeue_event(...)) {
+ * switch (event.type) {
+ * ...
+ * ia_css_pipe_dequeue_buffer()
+ * }
+ * }
+ * That is, dequeue event and buffer are one after another.
+ *
+ * But the following implementation is to first deuque all the event
+ * to a FIFO, then process the event in the FIFO.
+ * This will not have issue in single stream mode, but it do have some
+ * issue in multiple stream case. The issue is that
+ * ia_css_pipe_dequeue_buffer() will not return the corrent buffer in
+ * a specific pipe.
+ *
+ * This is due to ia_css_pipe_dequeue_buffer() does not take the
+ * ia_css_pipe parameter.
+ *
+ * So:
+ * For CSS2.0: we change the way to not dequeue all the event at one
+ * time, instead, dequue one and process one, then another
+ */
+ rt_mutex_lock(&isp->mutex);
+ if (atomisp_css_isr_thread(isp, frame_done_found, css_pipe_done))
+ goto out;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+ atomisp_setup_flash(asd);
+ }
+out:
+ rt_mutex_unlock(&isp->mutex);
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED
+ && css_pipe_done[asd->index]
+ && isp->sw_contex.file_input)
+ v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 1);
+ /* FIXME! FIX ACC implementation */
+ if (asd->acc.pipeline && css_pipe_done[asd->index])
+ atomisp_css_acc_done(asd);
+ }
+ dev_dbg(isp->dev, "<%s\n", __func__);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * utils for buffer allocation/free
+ */
+
+int atomisp_get_frame_pgnr(struct atomisp_device *isp,
+ const struct atomisp_css_frame *frame, u32 *p_pgnr)
+{
+ if (!frame) {
+ dev_err(isp->dev, "%s: NULL frame pointer ERROR.\n", __func__);
+ return -EINVAL;
+ }
+
+ *p_pgnr = DIV_ROUND_UP(frame->data_bytes, PAGE_SIZE);
+ return 0;
+}
+
+/*
+ * Get internal fmt according to V4L2 fmt
+ */
+static enum atomisp_css_frame_format
+v4l2_fmt_to_sh_fmt(u32 fmt) {
+ switch (fmt)
+ {
+ case V4L2_PIX_FMT_YUV420:
+ return CSS_FRAME_FORMAT_YUV420;
+ case V4L2_PIX_FMT_YVU420:
+ return CSS_FRAME_FORMAT_YV12;
+ case V4L2_PIX_FMT_YUV422P:
+ return CSS_FRAME_FORMAT_YUV422;
+ case V4L2_PIX_FMT_YUV444:
+ return CSS_FRAME_FORMAT_YUV444;
+ case V4L2_PIX_FMT_NV12:
+ return CSS_FRAME_FORMAT_NV12;
+ case V4L2_PIX_FMT_NV21:
+ return CSS_FRAME_FORMAT_NV21;
+ case V4L2_PIX_FMT_NV16:
+ return CSS_FRAME_FORMAT_NV16;
+ case V4L2_PIX_FMT_NV61:
+ return CSS_FRAME_FORMAT_NV61;
+ case V4L2_PIX_FMT_UYVY:
+ return CSS_FRAME_FORMAT_UYVY;
+ case V4L2_PIX_FMT_YUYV:
+ return CSS_FRAME_FORMAT_YUYV;
+ case V4L2_PIX_FMT_RGB24:
+ return CSS_FRAME_FORMAT_PLANAR_RGB888;
+ case V4L2_PIX_FMT_RGB32:
+ return CSS_FRAME_FORMAT_RGBA888;
+ case V4L2_PIX_FMT_RGB565:
+ return CSS_FRAME_FORMAT_RGB565;
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_CUSTOM_M10MO_RAW:
+ return CSS_FRAME_FORMAT_BINARY_8;
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ return CSS_FRAME_FORMAT_RAW;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * raw format match between SH format and V4L2 format
+ */
+static int raw_output_format_match_input(u32 input, u32 output)
+{
+ if ((input == CSS_FORMAT_RAW_12) &&
+ ((output == V4L2_PIX_FMT_SRGGB12) ||
+ (output == V4L2_PIX_FMT_SGRBG12) ||
+ (output == V4L2_PIX_FMT_SBGGR12) ||
+ (output == V4L2_PIX_FMT_SGBRG12)))
+ return 0;
+
+ if ((input == CSS_FORMAT_RAW_10) &&
+ ((output == V4L2_PIX_FMT_SRGGB10) ||
+ (output == V4L2_PIX_FMT_SGRBG10) ||
+ (output == V4L2_PIX_FMT_SBGGR10) ||
+ (output == V4L2_PIX_FMT_SGBRG10)))
+ return 0;
+
+ if ((input == CSS_FORMAT_RAW_8) &&
+ ((output == V4L2_PIX_FMT_SRGGB8) ||
+ (output == V4L2_PIX_FMT_SGRBG8) ||
+ (output == V4L2_PIX_FMT_SBGGR8) ||
+ (output == V4L2_PIX_FMT_SGBRG8)))
+ return 0;
+
+ if ((input == CSS_FORMAT_RAW_16) && (output == V4L2_PIX_FMT_SBGGR16))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u32 get_pixel_depth(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YVU420:
+ return 12;
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ return 16;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_YUV444:
+ return 24;
+ case V4L2_PIX_FMT_RGB32:
+ return 32;
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_CUSTOM_M10MO_RAW:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ return 8;
+ default:
+ return 8 * 2; /* raw type now */
+ }
+}
+
+bool atomisp_is_mbuscode_raw(uint32_t code)
+{
+ return code >= 0x3000 && code < 0x4000;
+}
+
+/*
+ * ISP features control function
+ */
+
+/*
+ * Set ISP capture mode based on current settings
+ */
+static void atomisp_update_capture_mode(struct atomisp_sub_device *asd)
+{
+ if (asd->params.gdc_cac_en)
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_ADVANCED);
+ else if (asd->params.low_light)
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_LOW_LIGHT);
+ else if (asd->video_out_capture.sh_fmt == CSS_FRAME_FORMAT_RAW)
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_RAW);
+ else
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_PRIMARY);
+}
+
+/* ISP2401 */
+int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
+ struct atomisp_s_runmode *runmode)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct v4l2_ctrl *c;
+ int ret = 0;
+
+ if (!(runmode && (runmode->mode & RUNMODE_MASK)))
+ return -EINVAL;
+
+ mutex_lock(asd->ctrl_handler.lock);
+ c = v4l2_ctrl_find(isp->inputs[asd->input_curr].camera->ctrl_handler,
+ V4L2_CID_RUN_MODE);
+
+ if (c)
+ ret = v4l2_ctrl_s_ctrl(c, runmode->mode);
+
+ mutex_unlock(asd->ctrl_handler.lock);
+ return ret;
+}
+
+/*
+ * Function to enable/disable lens geometry distortion correction (GDC) and
+ * chromatic aberration correction (CAC)
+ */
+int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.gdc_cac_en;
+ return 0;
+ }
+
+ asd->params.gdc_cac_en = !!*value;
+ if (asd->params.gdc_cac_en) {
+ atomisp_css_set_morph_table(asd,
+ asd->params.css_param.morph_table);
+ } else {
+ atomisp_css_set_morph_table(asd, NULL);
+ }
+ asd->params.css_update_params_needed = true;
+ atomisp_update_capture_mode(asd);
+ return 0;
+}
+
+/*
+ * Function to enable/disable low light mode including ANR
+ */
+int atomisp_low_light(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.low_light;
+ return 0;
+ }
+
+ asd->params.low_light = (*value != 0);
+ atomisp_update_capture_mode(asd);
+ return 0;
+}
+
+/*
+ * Function to enable/disable extra noise reduction (XNR) in low light
+ * condition
+ */
+int atomisp_xnr(struct atomisp_sub_device *asd, int flag,
+ int *xnr_enable)
+{
+ if (flag == 0) {
+ *xnr_enable = asd->params.xnr_en;
+ return 0;
+ }
+
+ atomisp_css_capture_enable_xnr(asd, !!*xnr_enable);
+
+ return 0;
+}
+
+/*
+ * Function to configure bayer noise reduction
+ */
+int atomisp_nr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_nr_config *arg)
+{
+ if (flag == 0) {
+ /* Get nr config from current setup */
+ if (atomisp_css_get_nr_config(asd, arg))
+ return -EINVAL;
+ } else {
+ /* Set nr config to isp parameters */
+ memcpy(&asd->params.css_param.nr_config, arg,
+ sizeof(struct atomisp_css_nr_config));
+ atomisp_css_set_nr_config(asd, &asd->params.css_param.nr_config);
+ asd->params.css_update_params_needed = true;
+ }
+ return 0;
+}
+
+/*
+ * Function to configure temporal noise reduction (TNR)
+ */
+int atomisp_tnr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_tnr_config *config)
+{
+ /* Get tnr config from current setup */
+ if (flag == 0) {
+ /* Get tnr config from current setup */
+ if (atomisp_css_get_tnr_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set tnr config to isp parameters */
+ memcpy(&asd->params.css_param.tnr_config, config,
+ sizeof(struct atomisp_css_tnr_config));
+ atomisp_css_set_tnr_config(asd, &asd->params.css_param.tnr_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to configure black level compensation
+ */
+int atomisp_black_level(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ob_config *config)
+{
+ if (flag == 0) {
+ /* Get ob config from current setup */
+ if (atomisp_css_get_ob_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set ob config to isp parameters */
+ memcpy(&asd->params.css_param.ob_config, config,
+ sizeof(struct atomisp_css_ob_config));
+ atomisp_css_set_ob_config(asd, &asd->params.css_param.ob_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to configure edge enhancement
+ */
+int atomisp_ee(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ee_config *config)
+{
+ if (flag == 0) {
+ /* Get ee config from current setup */
+ if (atomisp_css_get_ee_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set ee config to isp parameters */
+ memcpy(&asd->params.css_param.ee_config, config,
+ sizeof(asd->params.css_param.ee_config));
+ atomisp_css_set_ee_config(asd, &asd->params.css_param.ee_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update Gamma table for gamma, brightness and contrast config
+ */
+int atomisp_gamma(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gamma_table *config)
+{
+ if (flag == 0) {
+ /* Get gamma table from current setup */
+ if (atomisp_css_get_gamma_table(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set gamma table to isp parameters */
+ memcpy(&asd->params.css_param.gamma_table, config,
+ sizeof(asd->params.css_param.gamma_table));
+ atomisp_css_set_gamma_table(asd, &asd->params.css_param.gamma_table);
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update Ctc table for Chroma Enhancement
+ */
+int atomisp_ctc(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ctc_table *config)
+{
+ if (flag == 0) {
+ /* Get ctc table from current setup */
+ if (atomisp_css_get_ctc_table(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set ctc table to isp parameters */
+ memcpy(&asd->params.css_param.ctc_table, config,
+ sizeof(asd->params.css_param.ctc_table));
+ atomisp_css_set_ctc_table(asd, &asd->params.css_param.ctc_table);
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update gamma correction parameters
+ */
+int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gc_config *config)
+{
+ if (flag == 0) {
+ /* Get gamma correction params from current setup */
+ if (atomisp_css_get_gc_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set gamma correction params to isp parameters */
+ memcpy(&asd->params.css_param.gc_config, config,
+ sizeof(asd->params.css_param.gc_config));
+ atomisp_css_set_gc_config(asd, &asd->params.css_param.gc_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update narrow gamma flag
+ */
+int atomisp_formats(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_formats_config *config)
+{
+ if (flag == 0) {
+ /* Get narrow gamma flag from current setup */
+ if (atomisp_css_get_formats_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set narrow gamma flag to isp parameters */
+ memcpy(&asd->params.css_param.formats_config, config,
+ sizeof(asd->params.css_param.formats_config));
+ atomisp_css_set_formats_config(asd, &asd->params.css_param.formats_config);
+ }
+
+ return 0;
+}
+
+void atomisp_free_internal_buffers(struct atomisp_sub_device *asd)
+{
+ atomisp_free_css_parameters(&asd->params.css_param);
+
+ if (asd->raw_output_frame) {
+ atomisp_css_frame_free(asd->raw_output_frame);
+ asd->raw_output_frame = NULL;
+ }
+}
+
+static void atomisp_update_grid_info(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id,
+ int source_pad)
+{
+ struct atomisp_device *isp = asd->isp;
+ int err;
+ u16 stream_id = atomisp_source_pad_to_stream_id(asd, source_pad);
+
+ if (atomisp_css_get_grid_info(asd, pipe_id, source_pad))
+ return;
+
+ /* We must free all buffers because they no longer match
+ the grid size. */
+ atomisp_css_free_stat_buffers(asd);
+
+ err = atomisp_alloc_css_stat_bufs(asd, stream_id);
+ if (err) {
+ dev_err(isp->dev, "stat_buf allocate error\n");
+ goto err;
+ }
+
+ if (atomisp_alloc_3a_output_buf(asd)) {
+ /* Failure for 3A buffers does not influence DIS buffers */
+ if (asd->params.s3a_output_bytes != 0) {
+ /* For SOC sensor happens s3a_output_bytes == 0,
+ * using if condition to exclude false error log */
+ dev_err(isp->dev, "Failed to allocate memory for 3A statistics\n");
+ }
+ goto err;
+ }
+
+ if (atomisp_alloc_dis_coef_buf(asd)) {
+ dev_err(isp->dev,
+ "Failed to allocate memory for DIS statistics\n");
+ goto err;
+ }
+
+ if (atomisp_alloc_metadata_output_buf(asd)) {
+ dev_err(isp->dev, "Failed to allocate memory for metadata\n");
+ goto err;
+ }
+
+ return;
+
+err:
+ atomisp_css_free_stat_buffers(asd);
+ return;
+}
+
+static void atomisp_curr_user_grid_info(struct atomisp_sub_device *asd,
+ struct atomisp_grid_info *info)
+{
+ memcpy(info, &asd->params.curr_grid_info.s3a_grid,
+ sizeof(struct atomisp_css_3a_grid_info));
+}
+
+int atomisp_compare_grid(struct atomisp_sub_device *asd,
+ struct atomisp_grid_info *atomgrid)
+{
+ struct atomisp_grid_info tmp = {0};
+
+ atomisp_curr_user_grid_info(asd, &tmp);
+ return memcmp(atomgrid, &tmp, sizeof(tmp));
+}
+
+/*
+ * Function to update Gdc table for gdc
+ */
+int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_morph_table *config)
+{
+ int ret;
+ int i;
+ struct atomisp_device *isp = asd->isp;
+
+ if (flag == 0) {
+ /* Get gdc table from current setup */
+ struct atomisp_css_morph_table tab = {0};
+
+ atomisp_css_get_morph_table(asd, &tab);
+
+ config->width = tab.width;
+ config->height = tab.height;
+
+ for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ ret = copy_to_user(config->coordinates_x[i],
+ tab.coordinates_x[i], tab.height *
+ tab.width * sizeof(*tab.coordinates_x[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy to User for x\n");
+ return -EFAULT;
+ }
+ ret = copy_to_user(config->coordinates_y[i],
+ tab.coordinates_y[i], tab.height *
+ tab.width * sizeof(*tab.coordinates_y[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy to User for y\n");
+ return -EFAULT;
+ }
+ }
+ } else {
+ struct atomisp_css_morph_table *tab =
+ asd->params.css_param.morph_table;
+
+ /* free first if we have one */
+ if (tab) {
+ atomisp_css_morph_table_free(tab);
+ asd->params.css_param.morph_table = NULL;
+ }
+
+ /* allocate new one */
+ tab = atomisp_css_morph_table_allocate(config->width,
+ config->height);
+
+ if (!tab) {
+ dev_err(isp->dev, "out of memory\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ ret = copy_from_user(tab->coordinates_x[i],
+ config->coordinates_x[i],
+ config->height * config->width *
+ sizeof(*config->coordinates_x[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy from User for x, ret %d\n",
+ ret);
+ atomisp_css_morph_table_free(tab);
+ return -EFAULT;
+ }
+ ret = copy_from_user(tab->coordinates_y[i],
+ config->coordinates_y[i],
+ config->height * config->width *
+ sizeof(*config->coordinates_y[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy from User for y, ret is %d\n",
+ ret);
+ atomisp_css_morph_table_free(tab);
+ return -EFAULT;
+ }
+ }
+ asd->params.css_param.morph_table = tab;
+ if (asd->params.gdc_cac_en)
+ atomisp_css_set_morph_table(asd, tab);
+ }
+
+ return 0;
+}
+
+int atomisp_macc_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_macc_config *config)
+{
+ struct atomisp_css_macc_table *macc_table;
+
+ switch (config->color_effect) {
+ case V4L2_COLORFX_NONE:
+ macc_table = &asd->params.css_param.macc_table;
+ break;
+ case V4L2_COLORFX_SKY_BLUE:
+ macc_table = &blue_macc_table;
+ break;
+ case V4L2_COLORFX_GRASS_GREEN:
+ macc_table = &green_macc_table;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_LOW:
+ macc_table = &skin_low_macc_table;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN:
+ macc_table = &skin_medium_macc_table;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_HIGH:
+ macc_table = &skin_high_macc_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (flag == 0) {
+ /* Get macc table from current setup */
+ memcpy(&config->table, macc_table,
+ sizeof(struct atomisp_css_macc_table));
+ } else {
+ memcpy(macc_table, &config->table,
+ sizeof(struct atomisp_css_macc_table));
+ if (config->color_effect == asd->params.color_effect)
+ atomisp_css_set_macc_table(asd, macc_table);
+ }
+
+ return 0;
+}
+
+int atomisp_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector)
+{
+ atomisp_css_video_set_dis_vector(asd, vector);
+
+ asd->params.dis_proj_data_valid = false;
+ asd->params.css_update_params_needed = true;
+ return 0;
+}
+
+/*
+ * Function to set/get image stablization statistics
+ */
+int atomisp_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats)
+{
+ return atomisp_css_get_dis_stat(asd, stats);
+}
+
+/*
+ * Function set camrea_prefiles.xml current sensor pixel array size
+ */
+int atomisp_set_array_res(struct atomisp_sub_device *asd,
+ struct atomisp_resolution *config)
+{
+ dev_dbg(asd->isp->dev, ">%s start\n", __func__);
+ if (!config) {
+ dev_err(asd->isp->dev, "Set sensor array size is not valid\n");
+ return -EINVAL;
+ }
+
+ asd->sensor_array_res.width = config->width;
+ asd->sensor_array_res.height = config->height;
+ return 0;
+}
+
+/*
+ * Function to get DVS2 BQ resolution settings
+ */
+int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd,
+ struct atomisp_dvs2_bq_resolutions *bq_res)
+{
+ struct ia_css_pipe_config *pipe_cfg = NULL;
+ struct ia_css_stream_config *stream_cfg = NULL;
+ struct ia_css_stream_input_config *input_config = NULL;
+
+ struct ia_css_stream *stream =
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream;
+ if (!stream) {
+ dev_warn(asd->isp->dev, "stream is not created");
+ return -EAGAIN;
+ }
+
+ pipe_cfg = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[CSS_PIPE_ID_VIDEO];
+ stream_cfg = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config;
+ input_config = &stream_cfg->input_config;
+
+ if (!bq_res)
+ return -EINVAL;
+
+ /* the GDC output resolution */
+ bq_res->output_bq.width_bq = pipe_cfg->output_info[0].res.width / 2;
+ bq_res->output_bq.height_bq = pipe_cfg->output_info[0].res.height / 2;
+
+ bq_res->envelope_bq.width_bq = 0;
+ bq_res->envelope_bq.height_bq = 0;
+ /* the GDC input resolution */
+ if (!asd->continuous_mode->val) {
+ bq_res->source_bq.width_bq = bq_res->output_bq.width_bq +
+ pipe_cfg->dvs_envelope.width / 2;
+ bq_res->source_bq.height_bq = bq_res->output_bq.height_bq +
+ pipe_cfg->dvs_envelope.height / 2;
+ /*
+ * Bad pixels caused by spatial filter processing
+ * ISP filter resolution should be given by CSS/FW, but for now
+ * there is not such API to query, and it is fixed value, so
+ * hardcoded here.
+ */
+ bq_res->ispfilter_bq.width_bq = 12 / 2;
+ bq_res->ispfilter_bq.height_bq = 12 / 2;
+ /* spatial filter shift, always 4 pixels */
+ bq_res->gdc_shift_bq.width_bq = 4 / 2;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+
+ if (asd->params.video_dis_en) {
+ bq_res->envelope_bq.width_bq = pipe_cfg->dvs_envelope.width
+ / 2 - bq_res->ispfilter_bq.width_bq;
+ bq_res->envelope_bq.height_bq = pipe_cfg->dvs_envelope.height
+ / 2 - bq_res->ispfilter_bq.height_bq;
+ }
+ } else {
+ unsigned int w_padding;
+ unsigned int gdc_effective_input = 0;
+
+ /* For GDC:
+ * gdc_effective_input = effective_input + envelope
+ *
+ * From the comment and formula in BZ1786,
+ * we see the source_bq should be:
+ * effective_input / bayer_ds_ratio
+ */
+ bq_res->source_bq.width_bq =
+ (input_config->effective_res.width *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width + 1) / 2;
+ bq_res->source_bq.height_bq =
+ (input_config->effective_res.height *
+ pipe_cfg->bayer_ds_out_res.height /
+ input_config->effective_res.height + 1) / 2;
+
+ if (!asd->params.video_dis_en) {
+ /*
+ * We adjust the ispfilter_bq to:
+ * ispfilter_bq = 128/BDS
+ * we still need firmware team to provide an offical
+ * formula for SDV.
+ */
+ bq_res->ispfilter_bq.width_bq = 128 *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width / 2;
+ bq_res->ispfilter_bq.height_bq = 128 *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width / 2;
+
+ if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401)) {
+ /* No additional left padding for ISYS2401 */
+ bq_res->gdc_shift_bq.width_bq = 4 / 2;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+ } else {
+ /*
+ * For the w_padding and gdc_shift_bq cacluation
+ * Please see the BZ 1786 and 4358 for more info.
+ * Just test that this formula can work now,
+ * but we still have no offical formula.
+ *
+ * w_padding = ceiling(gdc_effective_input
+ * /128, 1) * 128 - effective_width
+ * gdc_shift_bq = w_padding/BDS/2 + ispfilter_bq/2
+ */
+ gdc_effective_input =
+ input_config->effective_res.width +
+ pipe_cfg->dvs_envelope.width;
+ w_padding = roundup(gdc_effective_input, 128) -
+ input_config->effective_res.width;
+ w_padding = w_padding *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width + 1;
+ w_padding = roundup(w_padding / 2, 1);
+
+ bq_res->gdc_shift_bq.width_bq = bq_res->ispfilter_bq.width_bq / 2
+ + w_padding;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+ }
+ } else {
+ unsigned int dvs_w, dvs_h, dvs_w_max, dvs_h_max;
+
+ bq_res->ispfilter_bq.width_bq = 8 / 2;
+ bq_res->ispfilter_bq.height_bq = 8 / 2;
+
+ if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401)) {
+ /* No additional left padding for ISYS2401 */
+ bq_res->gdc_shift_bq.width_bq = 4 / 2;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+ } else {
+ w_padding =
+ roundup(input_config->effective_res.width, 128) -
+ input_config->effective_res.width;
+ if (w_padding < 12)
+ w_padding = 12;
+ bq_res->gdc_shift_bq.width_bq = 4 / 2 +
+ ((w_padding - 12) *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width + 1) / 2;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+ }
+
+ dvs_w = pipe_cfg->bayer_ds_out_res.width -
+ pipe_cfg->output_info[0].res.width;
+ dvs_h = pipe_cfg->bayer_ds_out_res.height -
+ pipe_cfg->output_info[0].res.height;
+ dvs_w_max = rounddown(
+ pipe_cfg->output_info[0].res.width / 5,
+ ATOM_ISP_STEP_WIDTH);
+ dvs_h_max = rounddown(
+ pipe_cfg->output_info[0].res.height / 5,
+ ATOM_ISP_STEP_HEIGHT);
+ bq_res->envelope_bq.width_bq =
+ min((dvs_w / 2), (dvs_w_max / 2)) -
+ bq_res->ispfilter_bq.width_bq;
+ bq_res->envelope_bq.height_bq =
+ min((dvs_h / 2), (dvs_h_max / 2)) -
+ bq_res->ispfilter_bq.height_bq;
+ }
+ }
+
+ dev_dbg(asd->isp->dev,
+ "source_bq.width_bq %d, source_bq.height_bq %d,\nispfilter_bq.width_bq %d, ispfilter_bq.height_bq %d,\ngdc_shift_bq.width_bq %d, gdc_shift_bq.height_bq %d,\nenvelope_bq.width_bq %d, envelope_bq.height_bq %d,\noutput_bq.width_bq %d, output_bq.height_bq %d\n",
+ bq_res->source_bq.width_bq, bq_res->source_bq.height_bq,
+ bq_res->ispfilter_bq.width_bq, bq_res->ispfilter_bq.height_bq,
+ bq_res->gdc_shift_bq.width_bq, bq_res->gdc_shift_bq.height_bq,
+ bq_res->envelope_bq.width_bq, bq_res->envelope_bq.height_bq,
+ bq_res->output_bq.width_bq, bq_res->output_bq.height_bq);
+
+ return 0;
+}
+
+int atomisp_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs)
+{
+ return atomisp_css_set_dis_coefs(asd, coefs);
+}
+
+/*
+ * Function to set/get 3A stat from isp
+ */
+int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_statistics *config)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_s3a_buf *s3a_buf;
+ unsigned long ret;
+
+ if (flag != 0)
+ return -EINVAL;
+
+ /* sanity check to avoid writing into unallocated memory. */
+ if (asd->params.s3a_output_bytes == 0)
+ return -EINVAL;
+
+ if (atomisp_compare_grid(asd, &config->grid_info) != 0) {
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+ }
+
+ if (list_empty(&asd->s3a_stats_ready)) {
+ dev_err(isp->dev, "3a statistics is not valid.\n");
+ return -EAGAIN;
+ }
+
+ s3a_buf = list_entry(asd->s3a_stats_ready.next,
+ struct atomisp_s3a_buf, list);
+ if (s3a_buf->s3a_map)
+ ia_css_translate_3a_statistics(
+ asd->params.s3a_user_stat, s3a_buf->s3a_map);
+ else
+ ia_css_get_3a_statistics(asd->params.s3a_user_stat,
+ s3a_buf->s3a_data);
+
+ config->exp_id = s3a_buf->s3a_data->exp_id;
+ config->isp_config_id = s3a_buf->s3a_data->isp_config_id;
+
+ ret = copy_to_user(config->data, asd->params.s3a_user_stat->data,
+ asd->params.s3a_output_bytes);
+ if (ret) {
+ dev_err(isp->dev, "copy to user failed: copied %lu bytes\n",
+ ret);
+ return -EFAULT;
+ }
+
+ /* Move to free buffer list */
+ list_del_init(&s3a_buf->list);
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ dev_dbg(isp->dev, "%s: finish getting exp_id %d 3a stat, isp_config_id %d\n",
+ __func__,
+ config->exp_id, config->isp_config_id);
+ return 0;
+}
+
+int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_metadata *md)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_stream_config *stream_config;
+ struct ia_css_stream_info *stream_info;
+ struct camera_mipi_info *mipi_info;
+ struct atomisp_metadata_buf *md_buf;
+ enum atomisp_metadata_type md_type = ATOMISP_MAIN_METADATA;
+ int ret, i;
+
+ if (flag != 0)
+ return -EINVAL;
+
+ stream_config = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_config;
+ stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_info;
+
+ /* We always return the resolution and stride even if there is
+ * no valid metadata. This allows the caller to get the information
+ * needed to allocate user-space buffers. */
+ md->width = stream_info->metadata_info.resolution.width;
+ md->height = stream_info->metadata_info.resolution.height;
+ md->stride = stream_info->metadata_info.stride;
+
+ /* sanity check to avoid writing into unallocated memory.
+ * This does not return an error because it is a valid way
+ * for applications to detect that metadata is not enabled. */
+ if (md->width == 0 || md->height == 0 || !md->data)
+ return 0;
+
+ /* This is done in the atomisp_buf_done() */
+ if (list_empty(&asd->metadata_ready[md_type])) {
+ dev_warn(isp->dev, "Metadata queue is empty now!\n");
+ return -EAGAIN;
+ }
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ if (!mipi_info)
+ return -EINVAL;
+
+ if (mipi_info->metadata_effective_width) {
+ for (i = 0; i < md->height; i++)
+ md->effective_width[i] =
+ mipi_info->metadata_effective_width[i];
+ }
+
+ md_buf = list_entry(asd->metadata_ready[md_type].next,
+ struct atomisp_metadata_buf, list);
+ md->exp_id = md_buf->metadata->exp_id;
+ if (md_buf->md_vptr) {
+ ret = copy_to_user(md->data,
+ md_buf->md_vptr,
+ stream_info->metadata_info.size);
+ } else {
+ hmm_load(md_buf->metadata->address,
+ asd->params.metadata_user[md_type],
+ stream_info->metadata_info.size);
+
+ ret = copy_to_user(md->data,
+ asd->params.metadata_user[md_type],
+ stream_info->metadata_info.size);
+ }
+ if (ret) {
+ dev_err(isp->dev, "copy to user failed: copied %d bytes\n",
+ ret);
+ return -EFAULT;
+ }
+
+ list_del_init(&md_buf->list);
+ list_add_tail(&md_buf->list, &asd->metadata[md_type]);
+
+ dev_dbg(isp->dev, "%s: HAL de-queued metadata type %d with exp_id %d\n",
+ __func__, md_type, md->exp_id);
+ return 0;
+}
+
+int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_metadata_with_type *md)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_stream_config *stream_config;
+ struct ia_css_stream_info *stream_info;
+ struct camera_mipi_info *mipi_info;
+ struct atomisp_metadata_buf *md_buf;
+ enum atomisp_metadata_type md_type;
+ int ret, i;
+
+ if (flag != 0)
+ return -EINVAL;
+
+ stream_config = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_config;
+ stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_info;
+
+ /* We always return the resolution and stride even if there is
+ * no valid metadata. This allows the caller to get the information
+ * needed to allocate user-space buffers. */
+ md->width = stream_info->metadata_info.resolution.width;
+ md->height = stream_info->metadata_info.resolution.height;
+ md->stride = stream_info->metadata_info.stride;
+
+ /* sanity check to avoid writing into unallocated memory.
+ * This does not return an error because it is a valid way
+ * for applications to detect that metadata is not enabled. */
+ if (md->width == 0 || md->height == 0 || !md->data)
+ return 0;
+
+ md_type = md->type;
+ if (md_type < 0 || md_type >= ATOMISP_METADATA_TYPE_NUM)
+ return -EINVAL;
+
+ /* This is done in the atomisp_buf_done() */
+ if (list_empty(&asd->metadata_ready[md_type])) {
+ dev_warn(isp->dev, "Metadata queue is empty now!\n");
+ return -EAGAIN;
+ }
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ if (!mipi_info)
+ return -EINVAL;
+
+ if (mipi_info->metadata_effective_width) {
+ for (i = 0; i < md->height; i++)
+ md->effective_width[i] =
+ mipi_info->metadata_effective_width[i];
+ }
+
+ md_buf = list_entry(asd->metadata_ready[md_type].next,
+ struct atomisp_metadata_buf, list);
+ md->exp_id = md_buf->metadata->exp_id;
+ if (md_buf->md_vptr) {
+ ret = copy_to_user(md->data,
+ md_buf->md_vptr,
+ stream_info->metadata_info.size);
+ } else {
+ hmm_load(md_buf->metadata->address,
+ asd->params.metadata_user[md_type],
+ stream_info->metadata_info.size);
+
+ ret = copy_to_user(md->data,
+ asd->params.metadata_user[md_type],
+ stream_info->metadata_info.size);
+ }
+ if (ret) {
+ dev_err(isp->dev, "copy to user failed: copied %d bytes\n",
+ ret);
+ return -EFAULT;
+ } else {
+ list_del_init(&md_buf->list);
+ list_add_tail(&md_buf->list, &asd->metadata[md_type]);
+ }
+ dev_dbg(isp->dev, "%s: HAL de-queued metadata type %d with exp_id %d\n",
+ __func__, md_type, md->exp_id);
+ return 0;
+}
+
+/*
+ * Function to calculate real zoom region for every pipe
+ */
+int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd,
+ struct ia_css_dz_config *dz_config,
+ enum atomisp_css_pipe_id css_pipe_id)
+
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct atomisp_resolution eff_res, out_res;
+ int w_offset, h_offset;
+
+ memset(&eff_res, 0, sizeof(eff_res));
+ memset(&out_res, 0, sizeof(out_res));
+
+ if (dz_config->dx || dz_config->dy)
+ return 0;
+
+ if (css_pipe_id != IA_CSS_PIPE_ID_PREVIEW
+ && css_pipe_id != IA_CSS_PIPE_ID_CAPTURE) {
+ dev_err(asd->isp->dev, "%s the set pipe no support crop region"
+ , __func__);
+ return -EINVAL;
+ }
+
+ eff_res.width =
+ stream_env->stream_config.input_config.effective_res.width;
+ eff_res.height =
+ stream_env->stream_config.input_config.effective_res.height;
+ if (eff_res.width == 0 || eff_res.height == 0) {
+ dev_err(asd->isp->dev, "%s err effective resolution"
+ , __func__);
+ return -EINVAL;
+ }
+
+ if (dz_config->zoom_region.resolution.width
+ == asd->sensor_array_res.width
+ || dz_config->zoom_region.resolution.height
+ == asd->sensor_array_res.height) {
+ /*no need crop region*/
+ dz_config->zoom_region.origin.x = 0;
+ dz_config->zoom_region.origin.y = 0;
+ dz_config->zoom_region.resolution.width = eff_res.width;
+ dz_config->zoom_region.resolution.height = eff_res.height;
+ return 0;
+ }
+
+ /* FIXME:
+ * This is not the correct implementation with Google's definition, due
+ * to firmware limitation.
+ * map real crop region base on above calculating base max crop region.
+ */
+
+ if (!atomisp_hw_is_isp2401) {
+ dz_config->zoom_region.origin.x = dz_config->zoom_region.origin.x
+ * eff_res.width
+ / asd->sensor_array_res.width;
+ dz_config->zoom_region.origin.y = dz_config->zoom_region.origin.y
+ * eff_res.height
+ / asd->sensor_array_res.height;
+ dz_config->zoom_region.resolution.width = dz_config->zoom_region.resolution.width
+ * eff_res.width
+ / asd->sensor_array_res.width;
+ dz_config->zoom_region.resolution.height = dz_config->zoom_region.resolution.height
+ * eff_res.height
+ / asd->sensor_array_res.height;
+ /*
+ * Set same ratio of crop region resolution and current pipe output
+ * resolution
+ */
+ out_res.width = stream_env->pipe_configs[css_pipe_id].output_info[0].res.width;
+ out_res.height = stream_env->pipe_configs[css_pipe_id].output_info[0].res.height;
+ if (out_res.width == 0 || out_res.height == 0) {
+ dev_err(asd->isp->dev, "%s err current pipe output resolution"
+ , __func__);
+ return -EINVAL;
+ }
+ } else {
+ out_res.width = stream_env->pipe_configs[css_pipe_id].output_info[0].res.width;
+ out_res.height = stream_env->pipe_configs[css_pipe_id].output_info[0].res.height;
+ if (out_res.width == 0 || out_res.height == 0) {
+ dev_err(asd->isp->dev, "%s err current pipe output resolution"
+ , __func__);
+ return -EINVAL;
+ }
+
+ if (asd->sensor_array_res.width * out_res.height
+ < out_res.width * asd->sensor_array_res.height) {
+ h_offset = asd->sensor_array_res.height
+ - asd->sensor_array_res.width
+ * out_res.height / out_res.width;
+ h_offset = h_offset / 2;
+ if (dz_config->zoom_region.origin.y < h_offset)
+ dz_config->zoom_region.origin.y = 0;
+ else
+ dz_config->zoom_region.origin.y = dz_config->zoom_region.origin.y - h_offset;
+ w_offset = 0;
+ } else {
+ w_offset = asd->sensor_array_res.width
+ - asd->sensor_array_res.height
+ * out_res.width / out_res.height;
+ w_offset = w_offset / 2;
+ if (dz_config->zoom_region.origin.x < w_offset)
+ dz_config->zoom_region.origin.x = 0;
+ else
+ dz_config->zoom_region.origin.x = dz_config->zoom_region.origin.x - w_offset;
+ h_offset = 0;
+ }
+ dz_config->zoom_region.origin.x = dz_config->zoom_region.origin.x
+ * eff_res.width
+ / (asd->sensor_array_res.width - 2 * w_offset);
+ dz_config->zoom_region.origin.y = dz_config->zoom_region.origin.y
+ * eff_res.height
+ / (asd->sensor_array_res.height - 2 * h_offset);
+ dz_config->zoom_region.resolution.width = dz_config->zoom_region.resolution.width
+ * eff_res.width
+ / (asd->sensor_array_res.width - 2 * w_offset);
+ dz_config->zoom_region.resolution.height = dz_config->zoom_region.resolution.height
+ * eff_res.height
+ / (asd->sensor_array_res.height - 2 * h_offset);
+ }
+
+ if (out_res.width * dz_config->zoom_region.resolution.height
+ > dz_config->zoom_region.resolution.width * out_res.height) {
+ dz_config->zoom_region.resolution.height =
+ dz_config->zoom_region.resolution.width
+ * out_res.height / out_res.width;
+ } else {
+ dz_config->zoom_region.resolution.width =
+ dz_config->zoom_region.resolution.height
+ * out_res.width / out_res.height;
+ }
+ dev_dbg(asd->isp->dev,
+ "%s crop region:(%d,%d),(%d,%d) eff_res(%d, %d) array_size(%d,%d) out_res(%d, %d)\n",
+ __func__, dz_config->zoom_region.origin.x,
+ dz_config->zoom_region.origin.y,
+ dz_config->zoom_region.resolution.width,
+ dz_config->zoom_region.resolution.height,
+ eff_res.width, eff_res.height,
+ asd->sensor_array_res.width,
+ asd->sensor_array_res.height,
+ out_res.width, out_res.height);
+
+ if ((dz_config->zoom_region.origin.x +
+ dz_config->zoom_region.resolution.width
+ > eff_res.width) ||
+ (dz_config->zoom_region.origin.y +
+ dz_config->zoom_region.resolution.height
+ > eff_res.height))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Function to check the zoom region whether is effective
+ */
+static bool atomisp_check_zoom_region(
+ struct atomisp_sub_device *asd,
+ struct ia_css_dz_config *dz_config)
+{
+ struct atomisp_resolution config;
+ bool flag = false;
+ unsigned int w, h;
+
+ memset(&config, 0, sizeof(struct atomisp_resolution));
+
+ if (dz_config->dx && dz_config->dy)
+ return true;
+
+ config.width = asd->sensor_array_res.width;
+ config.height = asd->sensor_array_res.height;
+ w = dz_config->zoom_region.origin.x +
+ dz_config->zoom_region.resolution.width;
+ h = dz_config->zoom_region.origin.y +
+ dz_config->zoom_region.resolution.height;
+
+ if ((w <= config.width) && (h <= config.height) && w > 0 && h > 0)
+ flag = true;
+ else
+ /* setting error zoom region */
+ dev_err(asd->isp->dev,
+ "%s zoom region ERROR:dz_config:(%d,%d),(%d,%d)array_res(%d, %d)\n",
+ __func__, dz_config->zoom_region.origin.x,
+ dz_config->zoom_region.origin.y,
+ dz_config->zoom_region.resolution.width,
+ dz_config->zoom_region.resolution.height,
+ config.width, config.height);
+
+ return flag;
+}
+
+void atomisp_apply_css_parameters(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_params *css_param)
+{
+ if (css_param->update_flag.wb_config)
+ atomisp_css_set_wb_config(asd, &css_param->wb_config);
+
+ if (css_param->update_flag.ob_config)
+ atomisp_css_set_ob_config(asd, &css_param->ob_config);
+
+ if (css_param->update_flag.dp_config)
+ atomisp_css_set_dp_config(asd, &css_param->dp_config);
+
+ if (css_param->update_flag.nr_config)
+ atomisp_css_set_nr_config(asd, &css_param->nr_config);
+
+ if (css_param->update_flag.ee_config)
+ atomisp_css_set_ee_config(asd, &css_param->ee_config);
+
+ if (css_param->update_flag.tnr_config)
+ atomisp_css_set_tnr_config(asd, &css_param->tnr_config);
+
+ if (css_param->update_flag.a3a_config)
+ atomisp_css_set_3a_config(asd, &css_param->s3a_config);
+
+ if (css_param->update_flag.ctc_config)
+ atomisp_css_set_ctc_config(asd, &css_param->ctc_config);
+
+ if (css_param->update_flag.cnr_config)
+ atomisp_css_set_cnr_config(asd, &css_param->cnr_config);
+
+ if (css_param->update_flag.ecd_config)
+ atomisp_css_set_ecd_config(asd, &css_param->ecd_config);
+
+ if (css_param->update_flag.ynr_config)
+ atomisp_css_set_ynr_config(asd, &css_param->ynr_config);
+
+ if (css_param->update_flag.fc_config)
+ atomisp_css_set_fc_config(asd, &css_param->fc_config);
+
+ if (css_param->update_flag.macc_config)
+ atomisp_css_set_macc_config(asd, &css_param->macc_config);
+
+ if (css_param->update_flag.aa_config)
+ atomisp_css_set_aa_config(asd, &css_param->aa_config);
+
+ if (css_param->update_flag.anr_config)
+ atomisp_css_set_anr_config(asd, &css_param->anr_config);
+
+ if (css_param->update_flag.xnr_config)
+ atomisp_css_set_xnr_config(asd, &css_param->xnr_config);
+
+ if (css_param->update_flag.yuv2rgb_cc_config)
+ atomisp_css_set_yuv2rgb_cc_config(asd,
+ &css_param->yuv2rgb_cc_config);
+
+ if (css_param->update_flag.rgb2yuv_cc_config)
+ atomisp_css_set_rgb2yuv_cc_config(asd,
+ &css_param->rgb2yuv_cc_config);
+
+ if (css_param->update_flag.macc_table)
+ atomisp_css_set_macc_table(asd, &css_param->macc_table);
+
+ if (css_param->update_flag.xnr_table)
+ atomisp_css_set_xnr_table(asd, &css_param->xnr_table);
+
+ if (css_param->update_flag.r_gamma_table)
+ atomisp_css_set_r_gamma_table(asd, &css_param->r_gamma_table);
+
+ if (css_param->update_flag.g_gamma_table)
+ atomisp_css_set_g_gamma_table(asd, &css_param->g_gamma_table);
+
+ if (css_param->update_flag.b_gamma_table)
+ atomisp_css_set_b_gamma_table(asd, &css_param->b_gamma_table);
+
+ if (css_param->update_flag.anr_thres)
+ atomisp_css_set_anr_thres(asd, &css_param->anr_thres);
+
+ if (css_param->update_flag.shading_table)
+ atomisp_css_set_shading_table(asd, css_param->shading_table);
+
+ if (css_param->update_flag.morph_table && asd->params.gdc_cac_en)
+ atomisp_css_set_morph_table(asd, css_param->morph_table);
+
+ if (css_param->update_flag.dvs2_coefs) {
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(
+ &asd->params.curr_grid_info);
+
+ if (dvs_grid_info && dvs_grid_info->enable)
+ atomisp_css_set_dvs2_coefs(asd, css_param->dvs2_coeff);
+ }
+
+ if (css_param->update_flag.dvs_6axis_config)
+ atomisp_css_set_dvs_6axis(asd, css_param->dvs_6axis);
+
+ atomisp_css_set_isp_config_id(asd, css_param->isp_config_id);
+ /*
+ * These configurations are on used by ISP1.x, not for ISP2.x,
+ * so do not handle them. see comments of ia_css_isp_config.
+ * 1 cc_config
+ * 2 ce_config
+ * 3 de_config
+ * 4 gc_config
+ * 5 gamma_table
+ * 6 ctc_table
+ * 7 dvs_coefs
+ */
+}
+
+static unsigned int long copy_from_compatible(void *to, const void *from,
+ unsigned long n, bool from_user)
+{
+ if (from_user)
+ return copy_from_user(to, (void __user *)from, n);
+ else
+ memcpy(to, from, n);
+ return 0;
+}
+
+int atomisp_cp_general_isp_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ struct atomisp_parameters *cur_config = &css_param->update_flag;
+
+ if (!arg || !asd || !css_param)
+ return -EINVAL;
+
+ if (arg->wb_config && (from_user || !cur_config->wb_config)) {
+ if (copy_from_compatible(&css_param->wb_config, arg->wb_config,
+ sizeof(struct atomisp_css_wb_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.wb_config =
+ (struct atomisp_wb_config *)&css_param->wb_config;
+ }
+
+ if (arg->ob_config && (from_user || !cur_config->ob_config)) {
+ if (copy_from_compatible(&css_param->ob_config, arg->ob_config,
+ sizeof(struct atomisp_css_ob_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ob_config =
+ (struct atomisp_ob_config *)&css_param->ob_config;
+ }
+
+ if (arg->dp_config && (from_user || !cur_config->dp_config)) {
+ if (copy_from_compatible(&css_param->dp_config, arg->dp_config,
+ sizeof(struct atomisp_css_dp_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.dp_config =
+ (struct atomisp_dp_config *)&css_param->dp_config;
+ }
+
+ if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
+ if (arg->dz_config && (from_user || !cur_config->dz_config)) {
+ if (copy_from_compatible(&css_param->dz_config,
+ arg->dz_config,
+ sizeof(struct atomisp_css_dz_config),
+ from_user))
+ return -EFAULT;
+ if (!atomisp_check_zoom_region(asd,
+ &css_param->dz_config)) {
+ dev_err(asd->isp->dev, "crop region error!");
+ return -EINVAL;
+ }
+ css_param->update_flag.dz_config =
+ (struct atomisp_dz_config *)
+ &css_param->dz_config;
+ }
+ }
+
+ if (arg->nr_config && (from_user || !cur_config->nr_config)) {
+ if (copy_from_compatible(&css_param->nr_config, arg->nr_config,
+ sizeof(struct atomisp_css_nr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.nr_config =
+ (struct atomisp_nr_config *)&css_param->nr_config;
+ }
+
+ if (arg->ee_config && (from_user || !cur_config->ee_config)) {
+ if (copy_from_compatible(&css_param->ee_config, arg->ee_config,
+ sizeof(struct atomisp_css_ee_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ee_config =
+ (struct atomisp_ee_config *)&css_param->ee_config;
+ }
+
+ if (arg->tnr_config && (from_user || !cur_config->tnr_config)) {
+ if (copy_from_compatible(&css_param->tnr_config,
+ arg->tnr_config,
+ sizeof(struct atomisp_css_tnr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.tnr_config =
+ (struct atomisp_tnr_config *)
+ &css_param->tnr_config;
+ }
+
+ if (arg->a3a_config && (from_user || !cur_config->a3a_config)) {
+ if (copy_from_compatible(&css_param->s3a_config,
+ arg->a3a_config,
+ sizeof(struct atomisp_css_3a_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.a3a_config =
+ (struct atomisp_3a_config *)&css_param->s3a_config;
+ }
+
+ if (arg->ctc_config && (from_user || !cur_config->ctc_config)) {
+ if (copy_from_compatible(&css_param->ctc_config,
+ arg->ctc_config,
+ sizeof(struct atomisp_css_ctc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ctc_config =
+ (struct atomisp_ctc_config *)
+ &css_param->ctc_config;
+ }
+
+ if (arg->cnr_config && (from_user || !cur_config->cnr_config)) {
+ if (copy_from_compatible(&css_param->cnr_config,
+ arg->cnr_config,
+ sizeof(struct atomisp_css_cnr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.cnr_config =
+ (struct atomisp_cnr_config *)
+ &css_param->cnr_config;
+ }
+
+ if (arg->ecd_config && (from_user || !cur_config->ecd_config)) {
+ if (copy_from_compatible(&css_param->ecd_config,
+ arg->ecd_config,
+ sizeof(struct atomisp_css_ecd_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ecd_config =
+ (struct atomisp_ecd_config *)
+ &css_param->ecd_config;
+ }
+
+ if (arg->ynr_config && (from_user || !cur_config->ynr_config)) {
+ if (copy_from_compatible(&css_param->ynr_config,
+ arg->ynr_config,
+ sizeof(struct atomisp_css_ynr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ynr_config =
+ (struct atomisp_ynr_config *)
+ &css_param->ynr_config;
+ }
+
+ if (arg->fc_config && (from_user || !cur_config->fc_config)) {
+ if (copy_from_compatible(&css_param->fc_config,
+ arg->fc_config,
+ sizeof(struct atomisp_css_fc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.fc_config =
+ (struct atomisp_fc_config *)&css_param->fc_config;
+ }
+
+ if (arg->macc_config && (from_user || !cur_config->macc_config)) {
+ if (copy_from_compatible(&css_param->macc_config,
+ arg->macc_config,
+ sizeof(struct atomisp_css_macc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.macc_config =
+ (struct atomisp_macc_config *)
+ &css_param->macc_config;
+ }
+
+ if (arg->aa_config && (from_user || !cur_config->aa_config)) {
+ if (copy_from_compatible(&css_param->aa_config, arg->aa_config,
+ sizeof(struct atomisp_css_aa_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.aa_config =
+ (struct atomisp_aa_config *)&css_param->aa_config;
+ }
+
+ if (arg->anr_config && (from_user || !cur_config->anr_config)) {
+ if (copy_from_compatible(&css_param->anr_config,
+ arg->anr_config,
+ sizeof(struct atomisp_css_anr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.anr_config =
+ (struct atomisp_anr_config *)
+ &css_param->anr_config;
+ }
+
+ if (arg->xnr_config && (from_user || !cur_config->xnr_config)) {
+ if (copy_from_compatible(&css_param->xnr_config,
+ arg->xnr_config,
+ sizeof(struct atomisp_css_xnr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.xnr_config =
+ (struct atomisp_xnr_config *)
+ &css_param->xnr_config;
+ }
+
+ if (arg->yuv2rgb_cc_config &&
+ (from_user || !cur_config->yuv2rgb_cc_config)) {
+ if (copy_from_compatible(&css_param->yuv2rgb_cc_config,
+ arg->yuv2rgb_cc_config,
+ sizeof(struct atomisp_css_cc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.yuv2rgb_cc_config =
+ (struct atomisp_cc_config *)
+ &css_param->yuv2rgb_cc_config;
+ }
+
+ if (arg->rgb2yuv_cc_config &&
+ (from_user || !cur_config->rgb2yuv_cc_config)) {
+ if (copy_from_compatible(&css_param->rgb2yuv_cc_config,
+ arg->rgb2yuv_cc_config,
+ sizeof(struct atomisp_css_cc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.rgb2yuv_cc_config =
+ (struct atomisp_cc_config *)
+ &css_param->rgb2yuv_cc_config;
+ }
+
+ if (arg->macc_table && (from_user || !cur_config->macc_table)) {
+ if (copy_from_compatible(&css_param->macc_table,
+ arg->macc_table,
+ sizeof(struct atomisp_css_macc_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.macc_table =
+ (struct atomisp_macc_table *)
+ &css_param->macc_table;
+ }
+
+ if (arg->xnr_table && (from_user || !cur_config->xnr_table)) {
+ if (copy_from_compatible(&css_param->xnr_table,
+ arg->xnr_table,
+ sizeof(struct atomisp_css_xnr_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.xnr_table =
+ (struct atomisp_xnr_table *)&css_param->xnr_table;
+ }
+
+ if (arg->r_gamma_table && (from_user || !cur_config->r_gamma_table)) {
+ if (copy_from_compatible(&css_param->r_gamma_table,
+ arg->r_gamma_table,
+ sizeof(struct atomisp_css_rgb_gamma_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.r_gamma_table =
+ (struct atomisp_rgb_gamma_table *)
+ &css_param->r_gamma_table;
+ }
+
+ if (arg->g_gamma_table && (from_user || !cur_config->g_gamma_table)) {
+ if (copy_from_compatible(&css_param->g_gamma_table,
+ arg->g_gamma_table,
+ sizeof(struct atomisp_css_rgb_gamma_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.g_gamma_table =
+ (struct atomisp_rgb_gamma_table *)
+ &css_param->g_gamma_table;
+ }
+
+ if (arg->b_gamma_table && (from_user || !cur_config->b_gamma_table)) {
+ if (copy_from_compatible(&css_param->b_gamma_table,
+ arg->b_gamma_table,
+ sizeof(struct atomisp_css_rgb_gamma_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.b_gamma_table =
+ (struct atomisp_rgb_gamma_table *)
+ &css_param->b_gamma_table;
+ }
+
+ if (arg->anr_thres && (from_user || !cur_config->anr_thres)) {
+ if (copy_from_compatible(&css_param->anr_thres, arg->anr_thres,
+ sizeof(struct atomisp_css_anr_thres),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.anr_thres =
+ (struct atomisp_anr_thres *)&css_param->anr_thres;
+ }
+
+ if (from_user)
+ css_param->isp_config_id = arg->isp_config_id;
+ /*
+ * These configurations are on used by ISP1.x, not for ISP2.x,
+ * so do not handle them. see comments of ia_css_isp_config.
+ * 1 cc_config
+ * 2 ce_config
+ * 3 de_config
+ * 4 gc_config
+ * 5 gamma_table
+ * 6 ctc_table
+ * 7 dvs_coefs
+ */
+ return 0;
+}
+
+int atomisp_cp_lsc_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *source_st,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ unsigned int i;
+ unsigned int len_table;
+ struct atomisp_css_shading_table *shading_table;
+ struct atomisp_css_shading_table *old_table;
+ struct atomisp_shading_table *st, dest_st;
+
+ if (!source_st)
+ return 0;
+
+ if (!css_param)
+ return -EINVAL;
+
+ if (!from_user && css_param->update_flag.shading_table)
+ return 0;
+
+ if (atomisp_hw_is_isp2401) {
+ if (copy_from_compatible(&dest_st, source_st,
+ sizeof(struct atomisp_shading_table),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy shading table failed!");
+ return -EFAULT;
+ }
+ st = &dest_st;
+ } else {
+ st = source_st;
+ }
+
+ old_table = css_param->shading_table;
+
+ /* user config is to disable the shading table. */
+ if (!st->enable) {
+ /* Generate a minimum table with enable = 0. */
+ shading_table = atomisp_css_shading_table_alloc(1, 1);
+ if (!shading_table)
+ return -ENOMEM;
+ shading_table->enable = 0;
+ goto set_lsc;
+ }
+
+ /* Setting a new table. Validate first - all tables must be set */
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (!st->data[i]) {
+ dev_err(asd->isp->dev, "shading table validate failed");
+ return -EINVAL;
+ }
+ }
+
+ /* Shading table size per color */
+ if (!atomisp_hw_is_isp2401) {
+ if (st->width > ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ st->height > ISP2400_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) {
+ dev_err(asd->isp->dev, "shading table w/h validate failed!");
+ return -EINVAL;
+ }
+ } else {
+ if (st->width > ISP2401_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ st->height > ISP2401_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) {
+ dev_err(asd->isp->dev, "shading table w/h validate failed!");
+ return -EINVAL;
+ }
+ }
+
+ shading_table = atomisp_css_shading_table_alloc(st->width, st->height);
+ if (!shading_table)
+ return -ENOMEM;
+
+ len_table = st->width * st->height * ATOMISP_SC_TYPE_SIZE;
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (copy_from_compatible(shading_table->data[i],
+ st->data[i], len_table, from_user)) {
+ atomisp_css_shading_table_free(shading_table);
+ return -EFAULT;
+ }
+ }
+ shading_table->sensor_width = st->sensor_width;
+ shading_table->sensor_height = st->sensor_height;
+ shading_table->fraction_bits = st->fraction_bits;
+ shading_table->enable = st->enable;
+
+ /* No need to update shading table if it is the same */
+ if (old_table &&
+ old_table->sensor_width == shading_table->sensor_width &&
+ old_table->sensor_height == shading_table->sensor_height &&
+ old_table->width == shading_table->width &&
+ old_table->height == shading_table->height &&
+ old_table->fraction_bits == shading_table->fraction_bits &&
+ old_table->enable == shading_table->enable) {
+ bool data_is_same = true;
+
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (memcmp(shading_table->data[i], old_table->data[i],
+ len_table) != 0) {
+ data_is_same = false;
+ break;
+ }
+ }
+
+ if (data_is_same) {
+ atomisp_css_shading_table_free(shading_table);
+ return 0;
+ }
+ }
+
+set_lsc:
+ /* set LSC to CSS */
+ css_param->shading_table = shading_table;
+ css_param->update_flag.shading_table = (struct atomisp_shading_table *)shading_table;
+ asd->params.sc_en = shading_table;
+
+ if (old_table)
+ atomisp_css_shading_table_free(old_table);
+
+ return 0;
+}
+
+int atomisp_css_cp_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ struct atomisp_css_dvs_grid_info *cur =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ int dvs_hor_coef_bytes, dvs_ver_coef_bytes;
+ struct ia_css_dvs2_coefficients dvs2_coefs;
+
+ if (!coefs || !cur)
+ return 0;
+
+ if (!from_user && css_param->update_flag.dvs2_coefs)
+ return 0;
+
+ if (!atomisp_hw_is_isp2401) {
+ if (sizeof(*cur) != sizeof(coefs->grid) ||
+ memcmp(&coefs->grid, cur, sizeof(coefs->grid))) {
+ dev_err(asd->isp->dev, "dvs grid mis-match!\n");
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+ }
+
+ if (!coefs->hor_coefs.odd_real ||
+ !coefs->hor_coefs.odd_imag ||
+ !coefs->hor_coefs.even_real ||
+ !coefs->hor_coefs.even_imag ||
+ !coefs->ver_coefs.odd_real ||
+ !coefs->ver_coefs.odd_imag ||
+ !coefs->ver_coefs.even_real ||
+ !coefs->ver_coefs.even_imag)
+ return -EINVAL;
+
+ if (!css_param->dvs2_coeff) {
+ /* DIS coefficients. */
+ css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur);
+ if (!css_param->dvs2_coeff)
+ return -ENOMEM;
+ }
+
+ dvs_hor_coef_bytes = asd->params.dvs_hor_coef_bytes;
+ dvs_ver_coef_bytes = asd->params.dvs_ver_coef_bytes;
+ if (copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_real,
+ coefs->hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_imag,
+ coefs->hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_real,
+ coefs->hor_coefs.even_real, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_imag,
+ coefs->hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_real,
+ coefs->ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_imag,
+ coefs->ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_real,
+ coefs->ver_coefs.even_real, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_imag,
+ coefs->ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) {
+ ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
+ css_param->dvs2_coeff = NULL;
+ return -EFAULT;
+ }
+ } else {
+ if (copy_from_compatible(&dvs2_coefs, coefs,
+ sizeof(struct ia_css_dvs2_coefficients),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy dvs2 coef failed");
+ return -EFAULT;
+ }
+
+ if (sizeof(*cur) != sizeof(dvs2_coefs.grid) ||
+ memcmp(&dvs2_coefs.grid, cur, sizeof(dvs2_coefs.grid))) {
+ dev_err(asd->isp->dev, "dvs grid mis-match!\n");
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+ }
+
+ if (!dvs2_coefs.hor_coefs.odd_real ||
+ !dvs2_coefs.hor_coefs.odd_imag ||
+ !dvs2_coefs.hor_coefs.even_real ||
+ !dvs2_coefs.hor_coefs.even_imag ||
+ !dvs2_coefs.ver_coefs.odd_real ||
+ !dvs2_coefs.ver_coefs.odd_imag ||
+ !dvs2_coefs.ver_coefs.even_real ||
+ !dvs2_coefs.ver_coefs.even_imag)
+ return -EINVAL;
+
+ if (!css_param->dvs2_coeff) {
+ /* DIS coefficients. */
+ css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur);
+ if (!css_param->dvs2_coeff)
+ return -ENOMEM;
+ }
+
+ dvs_hor_coef_bytes = asd->params.dvs_hor_coef_bytes;
+ dvs_ver_coef_bytes = asd->params.dvs_ver_coef_bytes;
+ if (copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_real,
+ dvs2_coefs.hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_imag,
+ dvs2_coefs.hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_real,
+ dvs2_coefs.hor_coefs.even_real, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_imag,
+ dvs2_coefs.hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_real,
+ dvs2_coefs.ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_imag,
+ dvs2_coefs.ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_real,
+ dvs2_coefs.ver_coefs.even_real, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_imag,
+ dvs2_coefs.ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) {
+ ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
+ css_param->dvs2_coeff = NULL;
+ return -EFAULT;
+ }
+ }
+
+ css_param->update_flag.dvs2_coefs =
+ (struct atomisp_dvs2_coefficients *)css_param->dvs2_coeff;
+ return 0;
+}
+
+int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd,
+ struct atomisp_dvs_6axis_config *source_6axis_config,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ struct atomisp_css_dvs_6axis_config *dvs_6axis_config;
+ struct atomisp_css_dvs_6axis_config *old_6axis_config;
+ struct ia_css_stream *stream =
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream;
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ int ret = -EFAULT;
+
+ if (!stream) {
+ dev_err(asd->isp->dev, "%s: internal error!", __func__);
+ return -EINVAL;
+ }
+
+ if (!source_6axis_config || !dvs_grid_info)
+ return 0;
+
+ if (!dvs_grid_info->enable)
+ return 0;
+
+ if (!from_user && css_param->update_flag.dvs_6axis_config)
+ return 0;
+
+ /* check whether need to reallocate for 6 axis config */
+ old_6axis_config = css_param->dvs_6axis;
+ dvs_6axis_config = old_6axis_config;
+
+ if (atomisp_hw_is_isp2401) {
+ struct atomisp_css_dvs_6axis_config t_6axis_config;
+
+ if (copy_from_compatible(&t_6axis_config, source_6axis_config,
+ sizeof(struct atomisp_dvs_6axis_config),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy morph table failed!");
+ return -EFAULT;
+ }
+
+ if (old_6axis_config &&
+ (old_6axis_config->width_y != t_6axis_config.width_y ||
+ old_6axis_config->height_y != t_6axis_config.height_y ||
+ old_6axis_config->width_uv != t_6axis_config.width_uv ||
+ old_6axis_config->height_uv != t_6axis_config.height_uv)) {
+ ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
+ css_param->dvs_6axis = NULL;
+
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config)
+ return -ENOMEM;
+ } else if (!dvs_6axis_config) {
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config)
+ return -ENOMEM;
+ }
+
+ dvs_6axis_config->exp_id = t_6axis_config.exp_id;
+
+ if (copy_from_compatible(dvs_6axis_config->xcoords_y,
+ t_6axis_config.xcoords_y,
+ t_6axis_config.width_y *
+ t_6axis_config.height_y *
+ sizeof(*dvs_6axis_config->xcoords_y),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_y,
+ t_6axis_config.ycoords_y,
+ t_6axis_config.width_y *
+ t_6axis_config.height_y *
+ sizeof(*dvs_6axis_config->ycoords_y),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->xcoords_uv,
+ t_6axis_config.xcoords_uv,
+ t_6axis_config.width_uv *
+ t_6axis_config.height_uv *
+ sizeof(*dvs_6axis_config->xcoords_uv),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_uv,
+ t_6axis_config.ycoords_uv,
+ t_6axis_config.width_uv *
+ t_6axis_config.height_uv *
+ sizeof(*dvs_6axis_config->ycoords_uv),
+ from_user))
+ goto error;
+ } else {
+ if (old_6axis_config &&
+ (old_6axis_config->width_y != source_6axis_config->width_y ||
+ old_6axis_config->height_y != source_6axis_config->height_y ||
+ old_6axis_config->width_uv != source_6axis_config->width_uv ||
+ old_6axis_config->height_uv != source_6axis_config->height_uv)) {
+ ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
+ css_param->dvs_6axis = NULL;
+
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config)
+ return -ENOMEM;
+ } else if (!dvs_6axis_config) {
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config)
+ return -ENOMEM;
+ }
+
+ dvs_6axis_config->exp_id = source_6axis_config->exp_id;
+
+ if (copy_from_compatible(dvs_6axis_config->xcoords_y,
+ source_6axis_config->xcoords_y,
+ source_6axis_config->width_y *
+ source_6axis_config->height_y *
+ sizeof(*source_6axis_config->xcoords_y),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_y,
+ source_6axis_config->ycoords_y,
+ source_6axis_config->width_y *
+ source_6axis_config->height_y *
+ sizeof(*source_6axis_config->ycoords_y),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->xcoords_uv,
+ source_6axis_config->xcoords_uv,
+ source_6axis_config->width_uv *
+ source_6axis_config->height_uv *
+ sizeof(*source_6axis_config->xcoords_uv),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_uv,
+ source_6axis_config->ycoords_uv,
+ source_6axis_config->width_uv *
+ source_6axis_config->height_uv *
+ sizeof(*source_6axis_config->ycoords_uv),
+ from_user))
+ goto error;
+ }
+ css_param->dvs_6axis = dvs_6axis_config;
+ css_param->update_flag.dvs_6axis_config =
+ (struct atomisp_dvs_6axis_config *)dvs_6axis_config;
+ return 0;
+
+error:
+ if (dvs_6axis_config)
+ ia_css_dvs2_6axis_config_free(dvs_6axis_config);
+ return ret;
+}
+
+int atomisp_cp_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_morph_table *source_morph_table,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ int ret = -EFAULT;
+ unsigned int i;
+ struct atomisp_css_morph_table *morph_table;
+ struct atomisp_css_morph_table *old_morph_table;
+
+ if (!source_morph_table)
+ return 0;
+
+ if (!from_user && css_param->update_flag.morph_table)
+ return 0;
+
+ old_morph_table = css_param->morph_table;
+
+ if (atomisp_hw_is_isp2401) {
+ struct atomisp_css_morph_table mtbl;
+
+ if (copy_from_compatible(&mtbl, source_morph_table,
+ sizeof(struct atomisp_morph_table),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy morph table failed!");
+ return -EFAULT;
+ }
+
+ morph_table = atomisp_css_morph_table_allocate(
+ mtbl.width,
+ mtbl.height);
+ if (!morph_table)
+ return -ENOMEM;
+
+ for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ if (copy_from_compatible(morph_table->coordinates_x[i],
+ (__force void *)source_morph_table->coordinates_x[i],
+ mtbl.height * mtbl.width *
+ sizeof(*morph_table->coordinates_x[i]),
+ from_user))
+ goto error;
+
+ if (copy_from_compatible(morph_table->coordinates_y[i],
+ (__force void *)source_morph_table->coordinates_y[i],
+ mtbl.height * mtbl.width *
+ sizeof(*morph_table->coordinates_y[i]),
+ from_user))
+ goto error;
+ }
+ } else {
+ morph_table = atomisp_css_morph_table_allocate(
+ source_morph_table->width,
+ source_morph_table->height);
+ if (!morph_table)
+ return -ENOMEM;
+
+ for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ if (copy_from_compatible(morph_table->coordinates_x[i],
+ (__force void *)source_morph_table->coordinates_x[i],
+ source_morph_table->height * source_morph_table->width *
+ sizeof(*source_morph_table->coordinates_x[i]),
+ from_user))
+ goto error;
+
+ if (copy_from_compatible(morph_table->coordinates_y[i],
+ (__force void *)source_morph_table->coordinates_y[i],
+ source_morph_table->height * source_morph_table->width *
+ sizeof(*source_morph_table->coordinates_y[i]),
+ from_user))
+ goto error;
+ }
+ }
+
+ css_param->morph_table = morph_table;
+ if (old_morph_table)
+ atomisp_css_morph_table_free(old_morph_table);
+ css_param->update_flag.morph_table =
+ (struct atomisp_morph_table *)morph_table;
+ return 0;
+
+error:
+ if (morph_table)
+ atomisp_css_morph_table_free(morph_table);
+ return ret;
+}
+
+int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param)
+{
+ int ret;
+
+ ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_css_cp_dvs2_coefs(asd,
+ (struct ia_css_dvs2_coefficients *)arg->dvs2_coefs,
+ css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config,
+ css_param, false);
+ return ret;
+}
+
+void atomisp_free_css_parameters(struct atomisp_css_params *css_param)
+{
+ if (css_param->dvs_6axis) {
+ ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
+ css_param->dvs_6axis = NULL;
+ }
+ if (css_param->dvs2_coeff) {
+ ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
+ css_param->dvs2_coeff = NULL;
+ }
+ if (css_param->shading_table) {
+ ia_css_shading_table_free(css_param->shading_table);
+ css_param->shading_table = NULL;
+ }
+ if (css_param->morph_table) {
+ ia_css_morph_table_free(css_param->morph_table);
+ css_param->morph_table = NULL;
+ }
+}
+
+/*
+ * Check parameter queue list and buffer queue list to find out if matched items
+ * and then set parameter to CSS and enqueue buffer to CSS.
+ * Of course, if the buffer in buffer waiting list is not bound to a per-frame
+ * parameter, it will be enqueued into CSS as long as the per-frame setting
+ * buffers before it get enqueued.
+ */
+void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct videobuf_buffer *vb = NULL, *vb_tmp;
+ struct atomisp_css_params_with_list *param = NULL, *param_tmp;
+ struct videobuf_vmalloc_memory *vm_mem = NULL;
+ unsigned long irqflags;
+ bool need_to_enqueue_buffer = false;
+
+ if (atomisp_is_vf_pipe(pipe))
+ return;
+
+ /*
+ * CSS/FW requires set parameter and enqueue buffer happen after ISP
+ * is streamon.
+ */
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return;
+
+ if (list_empty(&pipe->per_frame_params) ||
+ list_empty(&pipe->buffers_waiting_for_param))
+ return;
+
+ list_for_each_entry_safe(vb, vb_tmp,
+ &pipe->buffers_waiting_for_param, queue) {
+ if (pipe->frame_request_config_id[vb->i]) {
+ list_for_each_entry_safe(param, param_tmp,
+ &pipe->per_frame_params, list) {
+ if (pipe->frame_request_config_id[vb->i] !=
+ param->params.isp_config_id)
+ continue;
+
+ list_del(&param->list);
+ list_del(&vb->queue);
+ /*
+ * clear the request config id as the buffer
+ * will be handled and enqueued into CSS soon
+ */
+ pipe->frame_request_config_id[vb->i] = 0;
+ pipe->frame_params[vb->i] = param;
+ vm_mem = vb->priv;
+ BUG_ON(!vm_mem);
+ break;
+ }
+
+ if (vm_mem) {
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ list_add_tail(&vb->queue, &pipe->activeq);
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ vm_mem = NULL;
+ need_to_enqueue_buffer = true;
+ } else {
+ /* The is the end, stop further loop */
+ break;
+ }
+ } else {
+ list_del(&vb->queue);
+ pipe->frame_params[vb->i] = NULL;
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ list_add_tail(&vb->queue, &pipe->activeq);
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ need_to_enqueue_buffer = true;
+ }
+ }
+
+ if (!need_to_enqueue_buffer)
+ return;
+
+ atomisp_qbuffers_to_css(asd);
+
+ if (!atomisp_hw_is_isp2401) {
+ if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd))
+ atomisp_wdt_start(asd);
+ } else {
+ if (atomisp_buffers_queued_pipe(pipe)) {
+ if (!atomisp_is_wdt_running(pipe))
+ atomisp_wdt_start_pipe(pipe);
+ else
+ atomisp_wdt_refresh_pipe(pipe,
+ ATOMISP_WDT_KEEP_CURRENT_DELAY);
+ }
+ }
+}
+
+/*
+* Function to configure ISP parameters
+*/
+int atomisp_set_parameters(struct video_device *vdev,
+ struct atomisp_parameters *arg)
+{
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_css_params_with_list *param = NULL;
+ struct atomisp_css_params *css_param = &asd->params.css_param;
+ int ret;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(asd->isp->dev, "%s: internal error!\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(asd->isp->dev,
+ "%s: set parameter(per_frame_setting %d) for asd%d with isp_config_id %d of %s\n",
+ __func__, arg->per_frame_setting, asd->index,
+ arg->isp_config_id, vdev->name);
+
+ if (atomisp_hw_is_isp2401) {
+ if (atomisp_is_vf_pipe(pipe) && arg->per_frame_setting) {
+ dev_err(asd->isp->dev, "%s: vf pipe not support per_frame_setting",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (arg->per_frame_setting && !atomisp_is_vf_pipe(pipe)) {
+ /*
+ * Per-frame setting enabled, we allocate a new parameter
+ * buffer to cache the parameters and only when frame buffers
+ * are ready, the parameters will be set to CSS.
+ * per-frame setting only works for the main output frame.
+ */
+ param = kvzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param) {
+ dev_err(asd->isp->dev, "%s: failed to alloc params buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+ css_param = &param->params;
+ }
+
+ ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_css_cp_dvs2_coefs(asd,
+ (struct ia_css_dvs2_coefficients *)arg->dvs2_coefs,
+ css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config,
+ css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ if (!(arg->per_frame_setting && !atomisp_is_vf_pipe(pipe))) {
+ /* indicate to CSS that we have parameters to be updated */
+ asd->params.css_update_params_needed = true;
+ } else {
+ list_add_tail(&param->list, &pipe->per_frame_params);
+ atomisp_handle_parameter_and_buffer(pipe);
+ }
+
+ return 0;
+
+apply_parameter_failed:
+ if (css_param)
+ atomisp_free_css_parameters(css_param);
+ if (param)
+ kvfree(param);
+
+ return ret;
+}
+
+/*
+ * Function to set/get isp parameters to isp
+ */
+int atomisp_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_parm *config)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_pipe_config *vp_cfg =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ pipe_configs[IA_CSS_PIPE_ID_VIDEO];
+
+ /* Read parameter for 3A binary info */
+ if (flag == 0) {
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(
+ &asd->params.curr_grid_info);
+
+ if (!&config->info) {
+ dev_err(isp->dev, "ERROR: NULL pointer in grid_info\n");
+ return -EINVAL;
+ }
+ atomisp_curr_user_grid_info(asd, &config->info);
+
+ /* We always return the resolution and stride even if there is
+ * no valid metadata. This allows the caller to get the
+ * information needed to allocate user-space buffers. */
+ config->metadata_config.metadata_height = asd->
+ stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
+ metadata_info.resolution.height;
+ config->metadata_config.metadata_stride = asd->
+ stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
+ metadata_info.stride;
+
+ /* update dvs grid info */
+ if (dvs_grid_info)
+ memcpy(&config->dvs_grid,
+ dvs_grid_info,
+ sizeof(struct atomisp_css_dvs_grid_info));
+
+ if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
+ config->dvs_envelop.width = 0;
+ config->dvs_envelop.height = 0;
+ return 0;
+ }
+
+ /* update dvs envelop info */
+ if (!asd->continuous_mode->val) {
+ config->dvs_envelop.width = vp_cfg->dvs_envelope.width;
+ config->dvs_envelop.height =
+ vp_cfg->dvs_envelope.height;
+ } else {
+ unsigned int dvs_w, dvs_h, dvs_w_max, dvs_h_max;
+
+ dvs_w = vp_cfg->bayer_ds_out_res.width -
+ vp_cfg->output_info[0].res.width;
+ dvs_h = vp_cfg->bayer_ds_out_res.height -
+ vp_cfg->output_info[0].res.height;
+ dvs_w_max = rounddown(
+ vp_cfg->output_info[0].res.width / 5,
+ ATOM_ISP_STEP_WIDTH);
+ dvs_h_max = rounddown(
+ vp_cfg->output_info[0].res.height / 5,
+ ATOM_ISP_STEP_HEIGHT);
+
+ config->dvs_envelop.width = min(dvs_w, dvs_w_max);
+ config->dvs_envelop.height = min(dvs_h, dvs_h_max);
+ }
+
+ return 0;
+ }
+
+ memcpy(&asd->params.css_param.wb_config, &config->wb_config,
+ sizeof(struct atomisp_css_wb_config));
+ memcpy(&asd->params.css_param.ob_config, &config->ob_config,
+ sizeof(struct atomisp_css_ob_config));
+ memcpy(&asd->params.css_param.dp_config, &config->dp_config,
+ sizeof(struct atomisp_css_dp_config));
+ memcpy(&asd->params.css_param.de_config, &config->de_config,
+ sizeof(struct atomisp_css_de_config));
+ memcpy(&asd->params.css_param.dz_config, &config->dz_config,
+ sizeof(struct atomisp_css_dz_config));
+ memcpy(&asd->params.css_param.ce_config, &config->ce_config,
+ sizeof(struct atomisp_css_ce_config));
+ memcpy(&asd->params.css_param.nr_config, &config->nr_config,
+ sizeof(struct atomisp_css_nr_config));
+ memcpy(&asd->params.css_param.ee_config, &config->ee_config,
+ sizeof(struct atomisp_css_ee_config));
+ memcpy(&asd->params.css_param.tnr_config, &config->tnr_config,
+ sizeof(struct atomisp_css_tnr_config));
+
+ if (asd->params.color_effect == V4L2_COLORFX_NEGATIVE) {
+ asd->params.css_param.cc_config.matrix[3] = -config->cc_config.matrix[3];
+ asd->params.css_param.cc_config.matrix[4] = -config->cc_config.matrix[4];
+ asd->params.css_param.cc_config.matrix[5] = -config->cc_config.matrix[5];
+ asd->params.css_param.cc_config.matrix[6] = -config->cc_config.matrix[6];
+ asd->params.css_param.cc_config.matrix[7] = -config->cc_config.matrix[7];
+ asd->params.css_param.cc_config.matrix[8] = -config->cc_config.matrix[8];
+ }
+
+ if (asd->params.color_effect != V4L2_COLORFX_SEPIA &&
+ asd->params.color_effect != V4L2_COLORFX_BW) {
+ memcpy(&asd->params.css_param.cc_config, &config->cc_config,
+ sizeof(struct atomisp_css_cc_config));
+ atomisp_css_set_cc_config(asd, &asd->params.css_param.cc_config);
+ }
+
+ atomisp_css_set_wb_config(asd, &asd->params.css_param.wb_config);
+ atomisp_css_set_ob_config(asd, &asd->params.css_param.ob_config);
+ atomisp_css_set_de_config(asd, &asd->params.css_param.de_config);
+ atomisp_css_set_dz_config(asd, &asd->params.css_param.dz_config);
+ atomisp_css_set_ce_config(asd, &asd->params.css_param.ce_config);
+ atomisp_css_set_dp_config(asd, &asd->params.css_param.dp_config);
+ atomisp_css_set_nr_config(asd, &asd->params.css_param.nr_config);
+ atomisp_css_set_ee_config(asd, &asd->params.css_param.ee_config);
+ atomisp_css_set_tnr_config(asd, &asd->params.css_param.tnr_config);
+ asd->params.css_update_params_needed = true;
+
+ return 0;
+}
+
+/*
+ * Function to configure color effect of the image
+ */
+int atomisp_color_effect(struct atomisp_sub_device *asd, int flag,
+ __s32 *effect)
+{
+ struct atomisp_css_cc_config *cc_config = NULL;
+ struct atomisp_css_macc_table *macc_table = NULL;
+ struct atomisp_css_ctc_table *ctc_table = NULL;
+ int ret = 0;
+ struct v4l2_control control;
+ struct atomisp_device *isp = asd->isp;
+
+ if (flag == 0) {
+ *effect = asd->params.color_effect;
+ return 0;
+ }
+
+ control.id = V4L2_CID_COLORFX;
+ control.value = *effect;
+ ret =
+ v4l2_s_ctrl(NULL, isp->inputs[asd->input_curr].camera->ctrl_handler,
+ &control);
+ /*
+ * if set color effect to sensor successfully, return
+ * 0 directly.
+ */
+ if (!ret) {
+ asd->params.color_effect = (u32)*effect;
+ return 0;
+ }
+
+ if (*effect == asd->params.color_effect)
+ return 0;
+
+ /*
+ * isp_subdev->params.macc_en should be set to false.
+ */
+ asd->params.macc_en = false;
+
+ switch (*effect) {
+ case V4L2_COLORFX_NONE:
+ macc_table = &asd->params.css_param.macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ cc_config = &sepia_cc_config;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ cc_config = &nega_cc_config;
+ break;
+ case V4L2_COLORFX_BW:
+ cc_config = &mono_cc_config;
+ break;
+ case V4L2_COLORFX_SKY_BLUE:
+ macc_table = &blue_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_GRASS_GREEN:
+ macc_table = &green_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_LOW:
+ macc_table = &skin_low_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN:
+ macc_table = &skin_medium_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_HIGH:
+ macc_table = &skin_high_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_VIVID:
+ ctc_table = &vivid_ctc_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+ atomisp_update_capture_mode(asd);
+
+ if (cc_config)
+ atomisp_css_set_cc_config(asd, cc_config);
+ if (macc_table)
+ atomisp_css_set_macc_table(asd, macc_table);
+ if (ctc_table)
+ atomisp_css_set_ctc_table(asd, ctc_table);
+ asd->params.color_effect = (u32)*effect;
+ asd->params.css_update_params_needed = true;
+ return 0;
+}
+
+/*
+ * Function to configure bad pixel correction
+ */
+int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.bad_pixel_en;
+ return 0;
+ }
+ asd->params.bad_pixel_en = !!*value;
+
+ return 0;
+}
+
+/*
+ * Function to configure bad pixel correction params
+ */
+int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_dp_config *config)
+{
+ if (flag == 0) {
+ /* Get bad pixel from current setup */
+ if (atomisp_css_get_dp_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set bad pixel to isp parameters */
+ memcpy(&asd->params.css_param.dp_config, config,
+ sizeof(asd->params.css_param.dp_config));
+ atomisp_css_set_dp_config(asd, &asd->params.css_param.dp_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to enable/disable video image stablization
+ */
+int atomisp_video_stable(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0)
+ *value = asd->params.video_dis_en;
+ else
+ asd->params.video_dis_en = !!*value;
+
+ return 0;
+}
+
+/*
+ * Function to configure fixed pattern noise
+ */
+int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.fpn_en;
+ return 0;
+ }
+
+ if (*value == 0) {
+ asd->params.fpn_en = false;
+ return 0;
+ }
+
+ /* Add function to get black from from sensor with shutter off */
+ return 0;
+}
+
+static unsigned int
+atomisp_bytesperline_to_padded_width(unsigned int bytesperline,
+ enum atomisp_css_frame_format format)
+{
+ switch (format) {
+ case CSS_FRAME_FORMAT_UYVY:
+ case CSS_FRAME_FORMAT_YUYV:
+ case CSS_FRAME_FORMAT_RAW:
+ case CSS_FRAME_FORMAT_RGB565:
+ return bytesperline / 2;
+ case CSS_FRAME_FORMAT_RGBA888:
+ return bytesperline / 4;
+ /* The following cases could be removed, but we leave them
+ in to document the formats that are included. */
+ case CSS_FRAME_FORMAT_NV11:
+ case CSS_FRAME_FORMAT_NV12:
+ case CSS_FRAME_FORMAT_NV16:
+ case CSS_FRAME_FORMAT_NV21:
+ case CSS_FRAME_FORMAT_NV61:
+ case CSS_FRAME_FORMAT_YV12:
+ case CSS_FRAME_FORMAT_YV16:
+ case CSS_FRAME_FORMAT_YUV420:
+ case CSS_FRAME_FORMAT_YUV420_16:
+ case CSS_FRAME_FORMAT_YUV422:
+ case CSS_FRAME_FORMAT_YUV422_16:
+ case CSS_FRAME_FORMAT_YUV444:
+ case CSS_FRAME_FORMAT_YUV_LINE:
+ case CSS_FRAME_FORMAT_PLANAR_RGB888:
+ case CSS_FRAME_FORMAT_QPLANE6:
+ case CSS_FRAME_FORMAT_BINARY_8:
+ default:
+ return bytesperline;
+ }
+}
+
+static int
+atomisp_v4l2_framebuffer_to_css_frame(const struct v4l2_framebuffer *arg,
+ struct atomisp_css_frame **result)
+{
+ struct atomisp_css_frame *res = NULL;
+ unsigned int padded_width;
+ enum atomisp_css_frame_format sh_format;
+ char *tmp_buf = NULL;
+ int ret = 0;
+
+ sh_format = v4l2_fmt_to_sh_fmt(arg->fmt.pixelformat);
+ padded_width = atomisp_bytesperline_to_padded_width(
+ arg->fmt.bytesperline, sh_format);
+
+ /* Note: the padded width on an atomisp_css_frame is in elements, not in
+ bytes. The RAW frame we use here should always be a 16bit RAW
+ frame. This is why we bytesperline/2 is equal to the padded with */
+ if (atomisp_css_frame_allocate(&res, arg->fmt.width, arg->fmt.height,
+ sh_format, padded_width, 0)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tmp_buf = vmalloc(arg->fmt.sizeimage);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ if (copy_from_user(tmp_buf, (void __user __force *)arg->base,
+ arg->fmt.sizeimage)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (hmm_store(res->data, tmp_buf, arg->fmt.sizeimage)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ if (ret && res)
+ atomisp_css_frame_free(res);
+ if (tmp_buf)
+ vfree(tmp_buf);
+ if (ret == 0)
+ *result = res;
+ return ret;
+}
+
+/*
+ * Function to configure fixed pattern noise table
+ */
+int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd,
+ struct v4l2_framebuffer *arg)
+{
+ struct atomisp_css_frame *raw_black_frame = NULL;
+ int ret;
+
+ if (!arg)
+ return -EINVAL;
+
+ ret = atomisp_v4l2_framebuffer_to_css_frame(arg, &raw_black_frame);
+ if (ret)
+ return ret;
+ if (atomisp_css_set_black_frame(asd, raw_black_frame))
+ ret = -ENOMEM;
+
+ atomisp_css_frame_free(raw_black_frame);
+ return ret;
+}
+
+/*
+ * Function to configure false color correction
+ */
+int atomisp_false_color(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ /* Get nr config from current setup */
+ if (flag == 0) {
+ *value = asd->params.false_color;
+ return 0;
+ }
+
+ /* Set nr config to isp parameters */
+ if (*value) {
+ atomisp_css_set_default_de_config(asd);
+ } else {
+ asd->params.css_param.de_config.pixelnoise = 0;
+ atomisp_css_set_de_config(asd, &asd->params.css_param.de_config);
+ }
+ asd->params.css_update_params_needed = true;
+ asd->params.false_color = *value;
+ return 0;
+}
+
+/*
+ * Function to configure bad pixel correction params
+ */
+int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_de_config *config)
+{
+ if (flag == 0) {
+ /* Get false color from current setup */
+ if (atomisp_css_get_de_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set false color to isp parameters */
+ memcpy(&asd->params.css_param.de_config, config,
+ sizeof(asd->params.css_param.de_config));
+ atomisp_css_set_de_config(asd, &asd->params.css_param.de_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to configure white balance params
+ */
+int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_wb_config *config)
+{
+ if (flag == 0) {
+ /* Get white balance from current setup */
+ if (atomisp_css_get_wb_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set white balance to isp parameters */
+ memcpy(&asd->params.css_param.wb_config, config,
+ sizeof(asd->params.css_param.wb_config));
+ atomisp_css_set_wb_config(asd, &asd->params.css_param.wb_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_config *config)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ dev_dbg(isp->dev, ">%s %d\n", __func__, flag);
+
+ if (flag == 0) {
+ /* Get white balance from current setup */
+ if (atomisp_css_get_3a_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set white balance to isp parameters */
+ memcpy(&asd->params.css_param.s3a_config, config,
+ sizeof(asd->params.css_param.s3a_config));
+ atomisp_css_set_3a_config(asd, &asd->params.css_param.s3a_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ dev_dbg(isp->dev, "<%s %d\n", __func__, flag);
+ return 0;
+}
+
+/*
+ * Function to setup digital zoom
+ */
+int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ u32 zoom;
+ struct atomisp_device *isp = asd->isp;
+
+ unsigned int max_zoom = MRFLD_MAX_ZOOM_FACTOR;
+
+ if (flag == 0) {
+ atomisp_css_get_zoom_factor(asd, &zoom);
+ *value = max_zoom - zoom;
+ } else {
+ if (*value < 0)
+ return -EINVAL;
+
+ zoom = max_zoom - min_t(u32, max_zoom - 1, *value);
+ atomisp_css_set_zoom_factor(asd, zoom);
+
+ dev_dbg(isp->dev, "%s, zoom: %d\n", __func__, zoom);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to get sensor specific info for current resolution,
+ * which will be used for auto exposure conversion.
+ */
+int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd,
+ struct atomisp_sensor_mode_data *config)
+{
+ struct camera_mipi_info *mipi_info;
+ struct atomisp_device *isp = asd->isp;
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ if (!mipi_info)
+ return -EINVAL;
+
+ memcpy(config, &mipi_info->data, sizeof(*config));
+ return 0;
+}
+
+int atomisp_get_fmt(struct video_device *vdev, struct v4l2_format *f)
+{
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ f->fmt.pix = pipe->pix;
+
+ return 0;
+}
+
+static void __atomisp_update_stream_env(struct atomisp_sub_device *asd,
+ u16 stream_index, struct atomisp_input_stream_info *stream_info)
+{
+ int i;
+
+ /* assign virtual channel id return from sensor driver query */
+ asd->stream_env[stream_index].ch_id = stream_info->ch_id;
+ asd->stream_env[stream_index].isys_configs = stream_info->isys_configs;
+ for (i = 0; i < stream_info->isys_configs; i++) {
+ asd->stream_env[stream_index].isys_info[i].input_format =
+ stream_info->isys_info[i].input_format;
+ asd->stream_env[stream_index].isys_info[i].width =
+ stream_info->isys_info[i].width;
+ asd->stream_env[stream_index].isys_info[i].height =
+ stream_info->isys_info[i].height;
+ }
+}
+
+static void __atomisp_init_stream_info(u16 stream_index,
+ struct atomisp_input_stream_info *stream_info)
+{
+ int i;
+
+ stream_info->enable = 1;
+ stream_info->stream = stream_index;
+ stream_info->ch_id = 0;
+ stream_info->isys_configs = 0;
+ for (i = 0; i < MAX_STREAMS_PER_CHANNEL; i++) {
+ stream_info->isys_info[i].input_format = 0;
+ stream_info->isys_info[i].width = 0;
+ stream_info->isys_info[i].height = 0;
+ }
+}
+
+/* This function looks up the closest available resolution. */
+int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f,
+ bool *res_overflow)
+{
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+
+ struct v4l2_mbus_framefmt *snr_mbus_fmt = &format.format;
+ const struct atomisp_format_bridge *fmt;
+ struct atomisp_input_stream_info *stream_info =
+ (struct atomisp_input_stream_info *)snr_mbus_fmt->reserved;
+ u16 stream_index;
+ int source_pad = atomisp_subdev_source_pad(vdev);
+ int ret;
+
+ if (!isp->inputs[asd->input_curr].camera)
+ return -EINVAL;
+
+ stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+ fmt = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+ if (!fmt) {
+ dev_err(isp->dev, "unsupported pixelformat!\n");
+ fmt = atomisp_output_fmts;
+ }
+
+ if (f->fmt.pix.width <= 0 || f->fmt.pix.height <= 0)
+ return -EINVAL;
+
+ snr_mbus_fmt->code = fmt->mbus_code;
+ snr_mbus_fmt->width = f->fmt.pix.width;
+ snr_mbus_fmt->height = f->fmt.pix.height;
+
+ __atomisp_init_stream_info(stream_index, stream_info);
+
+ dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
+ snr_mbus_fmt->width, snr_mbus_fmt->height);
+
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ pad, set_fmt, &pad_cfg, &format);
+ if (ret)
+ return ret;
+
+ dev_dbg(isp->dev, "try_mbus_fmt: got %ux%u\n",
+ snr_mbus_fmt->width, snr_mbus_fmt->height);
+
+ fmt = atomisp_get_format_bridge_from_mbus(snr_mbus_fmt->code);
+ if (!fmt) {
+ dev_err(isp->dev, "unknown sensor format 0x%8.8x\n",
+ snr_mbus_fmt->code);
+ return -EINVAL;
+ }
+
+ f->fmt.pix.pixelformat = fmt->pixelformat;
+
+ /*
+ * If the format is jpeg or custom RAW, then the width and height will
+ * not satisfy the normal atomisp requirements and no need to check
+ * the below conditions. So just assign to what is being returned from
+ * the sensor driver.
+ */
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG ||
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW) {
+ f->fmt.pix.width = snr_mbus_fmt->width;
+ f->fmt.pix.height = snr_mbus_fmt->height;
+ return 0;
+ }
+
+ if (snr_mbus_fmt->width < f->fmt.pix.width
+ && snr_mbus_fmt->height < f->fmt.pix.height) {
+ f->fmt.pix.width = snr_mbus_fmt->width;
+ f->fmt.pix.height = snr_mbus_fmt->height;
+ /* Set the flag when resolution requested is
+ * beyond the max value supported by sensor
+ */
+ if (res_overflow)
+ *res_overflow = true;
+ }
+
+ /* app vs isp */
+ f->fmt.pix.width = rounddown(
+ clamp_t(u32, f->fmt.pix.width, ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH), ATOM_ISP_STEP_WIDTH);
+ f->fmt.pix.height = rounddown(
+ clamp_t(u32, f->fmt.pix.height, ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT), ATOM_ISP_STEP_HEIGHT);
+
+ return 0;
+}
+
+static int
+atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f)
+{
+ u32 width = f->fmt.pix.width;
+ u32 height = f->fmt.pix.height;
+ u32 pixelformat = f->fmt.pix.pixelformat;
+ enum v4l2_field field = f->fmt.pix.field;
+ u32 depth;
+
+ if (!atomisp_get_format_bridge(pixelformat)) {
+ dev_err(isp->dev, "Wrong output pixelformat\n");
+ return -EINVAL;
+ }
+
+ depth = get_pixel_depth(pixelformat);
+
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (field != V4L2_FIELD_NONE) {
+ dev_err(isp->dev, "Wrong output field\n");
+ return -EINVAL;
+ }
+
+ f->fmt.pix.field = field;
+ f->fmt.pix.width = clamp_t(u32,
+ rounddown(width, (u32)ATOM_ISP_STEP_WIDTH),
+ ATOM_ISP_MIN_WIDTH, ATOM_ISP_MAX_WIDTH);
+ f->fmt.pix.height = clamp_t(u32, rounddown(height,
+ (u32)ATOM_ISP_STEP_HEIGHT),
+ ATOM_ISP_MIN_HEIGHT, ATOM_ISP_MAX_HEIGHT);
+ f->fmt.pix.bytesperline = (width * depth) >> 3;
+
+ return 0;
+}
+
+enum mipi_port_id __get_mipi_port(struct atomisp_device *isp,
+ enum atomisp_camera_port port)
+{
+ switch (port) {
+ case ATOMISP_CAMERA_PORT_PRIMARY:
+ return MIPI_PORT0_ID;
+ case ATOMISP_CAMERA_PORT_SECONDARY:
+ return MIPI_PORT1_ID;
+ case ATOMISP_CAMERA_PORT_TERTIARY:
+ if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID)
+ return MIPI_PORT1_ID + 1;
+ /* go through down for else case */
+ default:
+ dev_err(isp->dev, "unsupported port: %d\n", port);
+ return MIPI_PORT0_ID;
+ }
+}
+
+static inline int atomisp_set_sensor_mipi_to_isp(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct camera_mipi_info *mipi_info)
+{
+ struct v4l2_control ctrl;
+ struct atomisp_device *isp = asd->isp;
+ const struct atomisp_in_fmt_conv *fc;
+ int mipi_freq = 0;
+ unsigned int input_format, bayer_order;
+
+ ctrl.id = V4L2_CID_LINK_FREQ;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0)
+ mipi_freq = ctrl.value;
+
+ if (asd->stream_env[stream_id].isys_configs == 1) {
+ input_format =
+ asd->stream_env[stream_id].isys_info[0].input_format;
+ atomisp_css_isys_set_format(asd, stream_id,
+ input_format, IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ } else if (asd->stream_env[stream_id].isys_configs == 2) {
+ atomisp_css_isys_two_stream_cfg_update_stream1(
+ asd, stream_id,
+ asd->stream_env[stream_id].isys_info[0].input_format,
+ asd->stream_env[stream_id].isys_info[0].width,
+ asd->stream_env[stream_id].isys_info[0].height);
+
+ atomisp_css_isys_two_stream_cfg_update_stream2(
+ asd, stream_id,
+ asd->stream_env[stream_id].isys_info[1].input_format,
+ asd->stream_env[stream_id].isys_info[1].width,
+ asd->stream_env[stream_id].isys_info[1].height);
+ }
+
+ /* Compatibility for sensors which provide no media bus code
+ * in s_mbus_framefmt() nor support pad formats. */
+ if (mipi_info->input_format != -1) {
+ bayer_order = mipi_info->raw_bayer_order;
+
+ /* Input stream config is still needs configured */
+ /* TODO: Check if this is necessary */
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ mipi_info->input_format);
+ if (!fc)
+ return -EINVAL;
+ input_format = fc->css_stream_fmt;
+ } else {
+ struct v4l2_mbus_framefmt *sink;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+ fc = atomisp_find_in_fmt_conv(sink->code);
+ if (!fc)
+ return -EINVAL;
+ input_format = fc->css_stream_fmt;
+ bayer_order = fc->bayer_order;
+ }
+
+ atomisp_css_input_set_format(asd, stream_id, input_format);
+ atomisp_css_input_set_bayer_order(asd, stream_id, bayer_order);
+
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ mipi_info->metadata_format);
+ if (!fc)
+ return -EINVAL;
+ input_format = fc->css_stream_fmt;
+ atomisp_css_input_configure_port(asd,
+ __get_mipi_port(asd->isp, mipi_info->port),
+ mipi_info->num_lanes,
+ 0xffff4, mipi_freq,
+ input_format,
+ mipi_info->metadata_width,
+ mipi_info->metadata_height);
+ return 0;
+}
+
+static int __enable_continuous_mode(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ dev_dbg(isp->dev,
+ "continuous mode %d, raw buffers %d, stop preview %d\n",
+ enable, asd->continuous_raw_buffer_size->val,
+ !asd->continuous_viewfinder->val);
+
+ if (!atomisp_hw_is_isp2401)
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_PRIMARY);
+ else
+ atomisp_update_capture_mode(asd);
+
+ /* in case of ANR, force capture pipe to offline mode */
+ atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL,
+ asd->params.low_light ? false : !enable);
+ atomisp_css_preview_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL,
+ !enable);
+ atomisp_css_enable_continuous(asd, enable);
+ atomisp_css_enable_cvf(asd, asd->continuous_viewfinder->val);
+
+ if (atomisp_css_continuous_set_num_raw_frames(asd,
+ asd->continuous_raw_buffer_size->val)) {
+ dev_err(isp->dev, "css_continuous_set_num_raw_frames failed\n");
+ return -EINVAL;
+ }
+
+ if (!enable) {
+ atomisp_css_enable_raw_binning(asd, false);
+ atomisp_css_input_set_two_pixels_per_clock(asd, false);
+ }
+
+ if (isp->inputs[asd->input_curr].type != FILE_INPUT)
+ atomisp_css_input_set_mode(asd, CSS_INPUT_MODE_SENSOR);
+
+ return atomisp_update_run_mode(asd);
+}
+
+static int configure_pp_input_nop(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ return 0;
+}
+
+static int configure_output_nop(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format sh_fmt)
+{
+ return 0;
+}
+
+static int get_frame_info_nop(struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *finfo)
+{
+ return 0;
+}
+
+/*
+ * Resets CSS parameters that depend on input resolution.
+ *
+ * Update params like CSS RAW binning, 2ppc mode and pp_input
+ * which depend on input size, but are not automatically
+ * handled in CSS when the input resolution is changed.
+ */
+static int css_input_resolution_changed(struct atomisp_sub_device *asd,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf;
+ unsigned int i;
+
+ dev_dbg(asd->isp->dev, "css_input_resolution_changed to %ux%u\n",
+ ffmt->width, ffmt->height);
+
+#if defined(ISP2401_NEW_INPUT_SYSTEM)
+ atomisp_css_input_set_two_pixels_per_clock(asd, false);
+#else
+ atomisp_css_input_set_two_pixels_per_clock(asd, true);
+#endif
+ if (asd->continuous_mode->val) {
+ /* Note for all checks: ffmt includes pad_w+pad_h */
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
+ (ffmt->width >= 2048 || ffmt->height >= 1536)) {
+ /*
+ * For preview pipe, enable only if resolution
+ * is >= 3M for ISP2400.
+ */
+ atomisp_css_enable_raw_binning(asd, true);
+ }
+ }
+ /*
+ * If sensor input changed, which means metadata resolution changed
+ * together. Release all metadata buffers here to let it re-allocated
+ * next time in reqbufs.
+ */
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i],
+ list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ }
+ return 0;
+
+ /*
+ * TODO: atomisp_css_preview_configure_pp_input() not
+ * reset due to CSS bug tracked as PSI BZ 115124
+ */
+}
+
+static int atomisp_set_fmt_to_isp(struct video_device *vdev,
+ struct atomisp_css_frame_info *output_info,
+ struct atomisp_css_frame_info *raw_output_info,
+ struct v4l2_pix_format *pix,
+ unsigned int source_pad)
+{
+ struct camera_mipi_info *mipi_info;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ const struct atomisp_format_bridge *format;
+ struct v4l2_rect *isp_sink_crop;
+ enum atomisp_css_pipe_id pipe_id;
+ struct v4l2_subdev_fh fh;
+ int (*configure_output)(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format sh_fmt) =
+ configure_output_nop;
+ int (*get_frame_info)(struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *finfo) =
+ get_frame_info_nop;
+ int (*configure_pp_input)(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height) =
+ configure_pp_input_nop;
+ u16 stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+ const struct atomisp_in_fmt_conv *fc;
+ int ret;
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ isp_sink_crop = atomisp_subdev_get_rect(
+ &asd->subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, V4L2_SEL_TGT_CROP);
+
+ format = atomisp_get_format_bridge(pix->pixelformat);
+ if (!format)
+ return -EINVAL;
+
+ if (isp->inputs[asd->input_curr].type != TEST_PATTERN &&
+ isp->inputs[asd->input_curr].type != FILE_INPUT) {
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ if (!mipi_info) {
+ dev_err(isp->dev, "mipi_info is NULL\n");
+ return -EINVAL;
+ }
+ if (atomisp_set_sensor_mipi_to_isp(asd, stream_index,
+ mipi_info))
+ return -EINVAL;
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ mipi_info->input_format);
+ if (!fc)
+ fc = atomisp_find_in_fmt_conv(
+ atomisp_subdev_get_ffmt(&asd->subdev,
+ NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK)->code);
+ if (!fc)
+ return -EINVAL;
+ if (format->sh_fmt == CSS_FRAME_FORMAT_RAW &&
+ raw_output_format_match_input(fc->css_stream_fmt,
+ pix->pixelformat))
+ return -EINVAL;
+ }
+
+ /*
+ * Configure viewfinder also when vfpp is disabled: the
+ * CSS still requires viewfinder configuration.
+ */
+ if (asd->fmt_auto->val ||
+ asd->vfpp->val != ATOMISP_VFPP_ENABLE) {
+ struct v4l2_rect vf_size = {0};
+ struct v4l2_mbus_framefmt vf_ffmt = {0};
+
+ if (pix->width < 640 || pix->height < 480) {
+ vf_size.width = pix->width;
+ vf_size.height = pix->height;
+ } else {
+ vf_size.width = 640;
+ vf_size.height = 480;
+ }
+
+ /* FIXME: proper format name for this one. See
+ atomisp_output_fmts[] in atomisp_v4l2.c */
+ vf_ffmt.code = V4L2_MBUS_FMT_CUSTOM_YUV420;
+
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_VF,
+ V4L2_SEL_TGT_COMPOSE, 0, &vf_size);
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_VF, &vf_ffmt);
+ asd->video_out_vf.sh_fmt = CSS_FRAME_FORMAT_NV12;
+
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ atomisp_css_video_configure_viewfinder(asd,
+ vf_size.width, vf_size.height, 0,
+ asd->video_out_vf.sh_fmt);
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO)
+ atomisp_css_video_configure_viewfinder(asd,
+ vf_size.width, vf_size.height, 0,
+ asd->video_out_vf.sh_fmt);
+ else
+ atomisp_css_capture_configure_viewfinder(asd,
+ vf_size.width, vf_size.height, 0,
+ asd->video_out_vf.sh_fmt);
+ } else if (source_pad != ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW ||
+ asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ atomisp_css_capture_configure_viewfinder(asd,
+ vf_size.width, vf_size.height, 0,
+ asd->video_out_vf.sh_fmt);
+ }
+ }
+
+ if (asd->continuous_mode->val) {
+ ret = __enable_continuous_mode(asd, true);
+ if (ret)
+ return -EINVAL;
+ }
+
+ atomisp_css_input_set_mode(asd, CSS_INPUT_MODE_SENSOR);
+ atomisp_css_disable_vf_pp(asd,
+ asd->vfpp->val != ATOMISP_VFPP_ENABLE);
+
+ /* ISP2401 new input system need to use copy pipe */
+ if (asd->copy_mode) {
+ pipe_id = CSS_PIPE_ID_COPY;
+ atomisp_css_capture_enable_online(asd, stream_index, false);
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ /* video same in continuouscapture and online modes */
+ configure_output = atomisp_css_video_configure_output;
+ get_frame_info = atomisp_css_video_get_output_frame_info;
+ pipe_id = CSS_PIPE_ID_VIDEO;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ if (!asd->continuous_mode->val) {
+ configure_output = atomisp_css_video_configure_output;
+ get_frame_info =
+ atomisp_css_video_get_output_frame_info;
+ pipe_id = CSS_PIPE_ID_VIDEO;
+ } else {
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
+ configure_output =
+ atomisp_css_video_configure_output;
+ get_frame_info =
+ atomisp_css_video_get_output_frame_info;
+ configure_pp_input =
+ atomisp_css_video_configure_pp_input;
+ pipe_id = CSS_PIPE_ID_VIDEO;
+ } else {
+ configure_output =
+ atomisp_css_capture_configure_output;
+ get_frame_info =
+ atomisp_css_capture_get_output_frame_info;
+ configure_pp_input =
+ atomisp_css_capture_configure_pp_input;
+ pipe_id = CSS_PIPE_ID_CAPTURE;
+
+ atomisp_update_capture_mode(asd);
+ atomisp_css_capture_enable_online(asd, stream_index, false);
+ }
+ }
+ } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) {
+ configure_output = atomisp_css_preview_configure_output;
+ get_frame_info = atomisp_css_preview_get_output_frame_info;
+ configure_pp_input = atomisp_css_preview_configure_pp_input;
+ pipe_id = CSS_PIPE_ID_PREVIEW;
+ } else {
+ /* CSS doesn't support low light mode on SOC cameras, so disable
+ * it. FIXME: if this is done elsewhere, it gives corrupted
+ * colors into thumbnail image.
+ */
+ if (isp->inputs[asd->input_curr].type == SOC_CAMERA)
+ asd->params.low_light = false;
+
+ if (format->sh_fmt == CSS_FRAME_FORMAT_RAW) {
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_RAW);
+ atomisp_css_enable_dz(asd, false);
+ } else {
+ atomisp_update_capture_mode(asd);
+ }
+
+ if (!asd->continuous_mode->val)
+ /* in case of ANR, force capture pipe to offline mode */
+ atomisp_css_capture_enable_online(asd, stream_index,
+ asd->params.low_light ?
+ false : asd->params.online_process);
+
+ configure_output = atomisp_css_capture_configure_output;
+ get_frame_info = atomisp_css_capture_get_output_frame_info;
+ configure_pp_input = atomisp_css_capture_configure_pp_input;
+ pipe_id = CSS_PIPE_ID_CAPTURE;
+
+ if (!asd->params.online_process &&
+ !asd->continuous_mode->val) {
+ ret = atomisp_css_capture_get_output_raw_frame_info(asd,
+ raw_output_info);
+ if (ret)
+ return ret;
+ }
+ if (!asd->continuous_mode->val && asd->run_mode->val
+ != ATOMISP_RUN_MODE_STILL_CAPTURE) {
+ dev_err(isp->dev,
+ "Need to set the running mode first\n");
+ asd->run_mode->val = ATOMISP_RUN_MODE_STILL_CAPTURE;
+ }
+ }
+
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = CSS_PIPE_ID_YUVPP;
+
+ if (asd->copy_mode)
+ ret = atomisp_css_copy_configure_output(asd, stream_index,
+ pix->width, pix->height,
+ format->planar ? pix->bytesperline :
+ pix->bytesperline * 8 / format->depth,
+ format->sh_fmt);
+ else
+ ret = configure_output(asd, pix->width, pix->height,
+ format->planar ? pix->bytesperline :
+ pix->bytesperline * 8 / format->depth,
+ format->sh_fmt);
+ if (ret) {
+ dev_err(isp->dev, "configure_output %ux%u, format %8.8x\n",
+ pix->width, pix->height, format->sh_fmt);
+ return -EINVAL;
+ }
+
+ if (asd->continuous_mode->val &&
+ (configure_pp_input == atomisp_css_preview_configure_pp_input ||
+ configure_pp_input == atomisp_css_video_configure_pp_input)) {
+ /* for isp 2.2, configure pp input is available for continuous
+ * mode */
+ ret = configure_pp_input(asd, isp_sink_crop->width,
+ isp_sink_crop->height);
+ if (ret) {
+ dev_err(isp->dev, "configure_pp_input %ux%u\n",
+ isp_sink_crop->width,
+ isp_sink_crop->height);
+ return -EINVAL;
+ }
+ } else {
+ ret = configure_pp_input(asd, isp_sink_crop->width,
+ isp_sink_crop->height);
+ if (ret) {
+ dev_err(isp->dev, "configure_pp_input %ux%u\n",
+ isp_sink_crop->width, isp_sink_crop->height);
+ return -EINVAL;
+ }
+ }
+ if (asd->copy_mode)
+ ret = atomisp_css_copy_get_output_frame_info(asd, stream_index,
+ output_info);
+ else
+ ret = get_frame_info(asd, output_info);
+ if (ret) {
+ dev_err(isp->dev, "get_frame_info %ux%u (padded to %u)\n",
+ pix->width, pix->height, pix->bytesperline);
+ return -EINVAL;
+ }
+
+ atomisp_update_grid_info(asd, pipe_id, source_pad);
+
+ /* Free the raw_dump buffer first */
+ atomisp_css_frame_free(asd->raw_output_frame);
+ asd->raw_output_frame = NULL;
+
+ if (!asd->continuous_mode->val &&
+ !asd->params.online_process && !isp->sw_contex.file_input &&
+ atomisp_css_frame_allocate_from_info(&asd->raw_output_frame,
+ raw_output_info))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void atomisp_get_dis_envelop(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int *dvs_env_w, unsigned int *dvs_env_h)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ /* if subdev type is SOC camera,we do not need to set DVS */
+ if (isp->inputs[asd->input_curr].type == SOC_CAMERA)
+ asd->params.video_dis_en = false;
+
+ if (asd->params.video_dis_en &&
+ asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /* envelope is 20% of the output resolution */
+ /*
+ * dvs envelope cannot be round up.
+ * it would cause ISP timeout and color switch issue
+ */
+ *dvs_env_w = rounddown(width / 5, ATOM_ISP_STEP_WIDTH);
+ *dvs_env_h = rounddown(height / 5, ATOM_ISP_STEP_HEIGHT);
+ }
+
+ asd->params.dis_proj_data_valid = false;
+ asd->params.css_update_params_needed = true;
+}
+
+static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
+ int source_pad, struct v4l2_format *f)
+{
+#if defined(ISP2401_NEW_INPUT_SYSTEM)
+ struct v4l2_mbus_framefmt *sink, *src;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK);
+ src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, source_pad);
+
+ if ((sink->code == src->code &&
+ sink->width == f->fmt.pix.width &&
+ sink->height == f->fmt.pix.height) ||
+ ((asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) &&
+ (asd->isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1)))
+ asd->copy_mode = true;
+ else
+#endif
+ /* Only used for the new input system */
+ asd->copy_mode = false;
+
+ dev_dbg(asd->isp->dev, "copy_mode: %d\n", asd->copy_mode);
+}
+
+static int atomisp_set_fmt_to_snr(struct video_device *vdev,
+ struct v4l2_format *f, unsigned int pixelformat,
+ unsigned int padding_w, unsigned int padding_h,
+ unsigned int dvs_env_w, unsigned int dvs_env_h)
+{
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ const struct atomisp_format_bridge *format;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format vformat = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ struct v4l2_mbus_framefmt *ffmt = &vformat.format;
+ struct v4l2_mbus_framefmt *req_ffmt;
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_input_stream_info *stream_info =
+ (struct atomisp_input_stream_info *)ffmt->reserved;
+ u16 stream_index = ATOMISP_INPUT_STREAM_GENERAL;
+ int source_pad = atomisp_subdev_source_pad(vdev);
+ struct v4l2_subdev_fh fh;
+ int ret;
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+
+ format = atomisp_get_format_bridge(pixelformat);
+ if (!format)
+ return -EINVAL;
+
+ v4l2_fill_mbus_format(ffmt, &f->fmt.pix, format->mbus_code);
+ ffmt->height += padding_h + dvs_env_h;
+ ffmt->width += padding_w + dvs_env_w;
+
+ dev_dbg(isp->dev, "s_mbus_fmt: ask %ux%u (padding %ux%u, dvs %ux%u)\n",
+ ffmt->width, ffmt->height, padding_w, padding_h,
+ dvs_env_w, dvs_env_h);
+
+ __atomisp_init_stream_info(stream_index, stream_info);
+
+ req_ffmt = ffmt;
+
+ /* Disable dvs if resolution can't be supported by sensor */
+ if (asd->params.video_dis_en &&
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
+ vformat.which = V4L2_SUBDEV_FORMAT_TRY;
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ pad, set_fmt, &pad_cfg, &vformat);
+ if (ret)
+ return ret;
+ if (ffmt->width < req_ffmt->width ||
+ ffmt->height < req_ffmt->height) {
+ req_ffmt->height -= dvs_env_h;
+ req_ffmt->width -= dvs_env_w;
+ ffmt = req_ffmt;
+ dev_warn(isp->dev,
+ "can not enable video dis due to sensor limitation.");
+ asd->params.video_dis_en = false;
+ }
+ }
+ dev_dbg(isp->dev, "sensor width: %d, height: %d\n",
+ ffmt->width, ffmt->height);
+ vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad,
+ set_fmt, NULL, &vformat);
+ if (ret)
+ return ret;
+
+ __atomisp_update_stream_env(asd, stream_index, stream_info);
+
+ dev_dbg(isp->dev, "sensor width: %d, height: %d\n",
+ ffmt->width, ffmt->height);
+
+ if (ffmt->width < ATOM_ISP_STEP_WIDTH ||
+ ffmt->height < ATOM_ISP_STEP_HEIGHT)
+ return -EINVAL;
+
+ if (asd->params.video_dis_en &&
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO &&
+ (ffmt->width < req_ffmt->width || ffmt->height < req_ffmt->height)) {
+ dev_warn(isp->dev,
+ "can not enable video dis due to sensor limitation.");
+ asd->params.video_dis_en = false;
+ }
+
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, ffmt);
+
+ return css_input_resolution_changed(asd, ffmt);
+}
+
+int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
+{
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ const struct atomisp_format_bridge *format_bridge;
+ const struct atomisp_format_bridge *snr_format_bridge;
+ struct atomisp_css_frame_info output_info, raw_output_info;
+ struct v4l2_format snr_fmt = *f;
+ struct v4l2_format backup_fmt = *f, s_fmt = *f;
+ unsigned int dvs_env_w = 0, dvs_env_h = 0;
+ unsigned int padding_w = pad_w, padding_h = pad_h;
+ bool res_overflow = false, crop_needs_override = false;
+ struct v4l2_mbus_framefmt isp_sink_fmt;
+ struct v4l2_mbus_framefmt isp_source_fmt = {0};
+ struct v4l2_rect isp_sink_crop;
+ u16 source_pad = atomisp_subdev_source_pad(vdev);
+ struct v4l2_subdev_fh fh;
+ int ret;
+
+ dev_dbg(isp->dev,
+ "setting resolution %ux%u on pad %u for asd%d, bytesperline %u\n",
+ f->fmt.pix.width, f->fmt.pix.height, source_pad,
+ asd->index, f->fmt.pix.bytesperline);
+
+ if (source_pad >= ATOMISP_SUBDEV_PADS_NUM)
+ return -EINVAL;
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
+ dev_warn(isp->dev, "ISP does not support set format while at streaming!\n");
+ return -EBUSY;
+ }
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+ if (!format_bridge)
+ return -EINVAL;
+
+ pipe->sh_fmt = format_bridge->sh_fmt;
+ pipe->pix.pixelformat = f->fmt.pix.pixelformat;
+
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VF ||
+ (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW
+ && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)) {
+ if (asd->fmt_auto->val) {
+ struct v4l2_rect *capture_comp;
+ struct v4l2_rect r = {0};
+
+ r.width = f->fmt.pix.width;
+ r.height = f->fmt.pix.height;
+
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW)
+ capture_comp = atomisp_subdev_get_rect(
+ &asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_VIDEO,
+ V4L2_SEL_TGT_COMPOSE);
+ else
+ capture_comp = atomisp_subdev_get_rect(
+ &asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE,
+ V4L2_SEL_TGT_COMPOSE);
+
+ if (capture_comp->width < r.width
+ || capture_comp->height < r.height) {
+ r.width = capture_comp->width;
+ r.height = capture_comp->height;
+ }
+
+ atomisp_subdev_set_selection(
+ &asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE, source_pad,
+ V4L2_SEL_TGT_COMPOSE, 0, &r);
+
+ f->fmt.pix.width = r.width;
+ f->fmt.pix.height = r.height;
+ }
+
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
+ (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) &&
+ (asd->isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1)) {
+ /* For M10MO outputing YUV preview images. */
+ u16 video_index =
+ atomisp_source_pad_to_stream_id(asd,
+ ATOMISP_SUBDEV_PAD_SOURCE_VIDEO);
+
+ ret = atomisp_css_copy_get_output_frame_info(asd,
+ video_index, &output_info);
+ if (ret) {
+ dev_err(isp->dev,
+ "copy_get_output_frame_info ret %i", ret);
+ return -EINVAL;
+ }
+ if (!asd->yuvpp_mode) {
+ /*
+ * If viewfinder was configured into copy_mode,
+ * we switch to using yuvpp pipe instead.
+ */
+ asd->yuvpp_mode = true;
+ ret = atomisp_css_copy_configure_output(
+ asd, video_index, 0, 0, 0, 0);
+ if (ret) {
+ dev_err(isp->dev,
+ "failed to disable copy pipe");
+ return -EINVAL;
+ }
+ ret = atomisp_css_yuvpp_configure_output(
+ asd, video_index,
+ output_info.res.width,
+ output_info.res.height,
+ output_info.padded_width,
+ output_info.format);
+ if (ret) {
+ dev_err(isp->dev,
+ "failed to set up yuvpp pipe\n");
+ return -EINVAL;
+ }
+ atomisp_css_video_enable_online(asd, false);
+ atomisp_css_preview_enable_online(asd,
+ ATOMISP_INPUT_STREAM_GENERAL, false);
+ }
+ atomisp_css_yuvpp_configure_viewfinder(asd, video_index,
+ f->fmt.pix.width, f->fmt.pix.height,
+ format_bridge->planar ? f->fmt.pix.bytesperline
+ : f->fmt.pix.bytesperline * 8
+ / format_bridge->depth, format_bridge->sh_fmt);
+ atomisp_css_yuvpp_get_viewfinder_frame_info(
+ asd, video_index, &output_info);
+ } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) {
+ atomisp_css_video_configure_viewfinder(asd,
+ f->fmt.pix.width, f->fmt.pix.height,
+ format_bridge->planar ? f->fmt.pix.bytesperline
+ : f->fmt.pix.bytesperline * 8
+ / format_bridge->depth, format_bridge->sh_fmt);
+ atomisp_css_video_get_viewfinder_frame_info(asd,
+ &output_info);
+ asd->copy_mode = false;
+ } else {
+ atomisp_css_capture_configure_viewfinder(asd,
+ f->fmt.pix.width, f->fmt.pix.height,
+ format_bridge->planar ? f->fmt.pix.bytesperline
+ : f->fmt.pix.bytesperline * 8
+ / format_bridge->depth, format_bridge->sh_fmt);
+ atomisp_css_capture_get_viewfinder_frame_info(asd,
+ &output_info);
+ asd->copy_mode = false;
+ }
+
+ goto done;
+ }
+ /*
+ * Check whether main resolution configured smaller
+ * than snapshot resolution. If so, force main resolution
+ * to be the same as snapshot resolution
+ */
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) {
+ struct v4l2_rect *r;
+
+ r = atomisp_subdev_get_rect(
+ &asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_VF, V4L2_SEL_TGT_COMPOSE);
+
+ if (r->width && r->height
+ && (r->width > f->fmt.pix.width
+ || r->height > f->fmt.pix.height))
+ dev_warn(isp->dev,
+ "Main Resolution config smaller then Vf Resolution. Force to be equal with Vf Resolution.");
+ }
+
+ /* Pipeline configuration done through subdevs. Bail out now. */
+ if (!asd->fmt_auto->val)
+ goto set_fmt_to_isp;
+
+ /* get sensor resolution and format */
+ ret = atomisp_try_fmt(vdev, &snr_fmt, &res_overflow);
+ if (ret)
+ return ret;
+ f->fmt.pix.width = snr_fmt.fmt.pix.width;
+ f->fmt.pix.height = snr_fmt.fmt.pix.height;
+
+ snr_format_bridge =
+ atomisp_get_format_bridge(snr_fmt.fmt.pix.pixelformat);
+ if (!snr_format_bridge)
+ return -EINVAL;
+
+ atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK)->code =
+ snr_format_bridge->mbus_code;
+
+ isp_sink_fmt = *atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ isp_source_fmt.code = format_bridge->mbus_code;
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ source_pad, &isp_source_fmt);
+
+ if (!atomisp_subdev_format_conversion(asd, source_pad)) {
+ padding_w = 0;
+ padding_h = 0;
+ } else if (IS_BYT) {
+ padding_w = 12;
+ padding_h = 12;
+ }
+
+ /* construct resolution supported by isp */
+ if (res_overflow && !asd->continuous_mode->val) {
+ f->fmt.pix.width = rounddown(
+ clamp_t(u32, f->fmt.pix.width - padding_w,
+ ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH), ATOM_ISP_STEP_WIDTH);
+ f->fmt.pix.height = rounddown(
+ clamp_t(u32, f->fmt.pix.height - padding_h,
+ ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT), ATOM_ISP_STEP_HEIGHT);
+ }
+
+ atomisp_get_dis_envelop(asd, f->fmt.pix.width, f->fmt.pix.height,
+ &dvs_env_w, &dvs_env_h);
+
+ if (asd->continuous_mode->val) {
+ struct v4l2_rect *r;
+
+ r = atomisp_subdev_get_rect(
+ &asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE,
+ V4L2_SEL_TGT_COMPOSE);
+ /*
+ * The ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE should get resolutions
+ * properly set otherwise, it should not be the capture_pad.
+ */
+ if (r->width && r->height)
+ asd->capture_pad = ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE;
+ else
+ asd->capture_pad = source_pad;
+ } else {
+ asd->capture_pad = source_pad;
+ }
+ /*
+ * set format info to sensor
+ * In continuous mode, resolution is set only if it is higher than
+ * existing value. This because preview pipe will be configured after
+ * capture pipe and usually has lower resolution than capture pipe.
+ */
+ if (!asd->continuous_mode->val ||
+ isp_sink_fmt.width < (f->fmt.pix.width + padding_w + dvs_env_w) ||
+ isp_sink_fmt.height < (f->fmt.pix.height + padding_h +
+ dvs_env_h)) {
+ /*
+ * For jpeg or custom raw format the sensor will return constant
+ * width and height. Because we already had quried try_mbus_fmt,
+ * f->fmt.pix.width and f->fmt.pix.height has been changed to
+ * this fixed width and height. So we cannot select the correct
+ * resolution with that information. So use the original width
+ * and height while set_mbus_fmt() so actual resolutions are
+ * being used in while set media bus format.
+ */
+ s_fmt = *f;
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG ||
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW) {
+ s_fmt.fmt.pix.width = backup_fmt.fmt.pix.width;
+ s_fmt.fmt.pix.height = backup_fmt.fmt.pix.height;
+ }
+ ret = atomisp_set_fmt_to_snr(vdev, &s_fmt,
+ f->fmt.pix.pixelformat, padding_w,
+ padding_h, dvs_env_w, dvs_env_h);
+ if (ret)
+ return -EINVAL;
+
+ atomisp_csi_lane_config(isp);
+ crop_needs_override = true;
+ }
+
+ atomisp_check_copy_mode(asd, source_pad, &backup_fmt);
+ asd->yuvpp_mode = false; /* Reset variable */
+
+ isp_sink_crop = *atomisp_subdev_get_rect(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK,
+ V4L2_SEL_TGT_CROP);
+
+ /* Try to enable YUV downscaling if ISP input is 10 % (either
+ * width or height) bigger than the desired result. */
+ if (isp_sink_crop.width * 9 / 10 < f->fmt.pix.width ||
+ isp_sink_crop.height * 9 / 10 < f->fmt.pix.height ||
+ (atomisp_subdev_format_conversion(asd, source_pad) &&
+ ((asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ !asd->continuous_mode->val) ||
+ asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER))) {
+ /* for continuous mode, preview size might be smaller than
+ * still capture size. if preview size still needs crop,
+ * pick the larger one between crop size of preview and
+ * still capture.
+ */
+ if (asd->continuous_mode->val
+ && source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW
+ && !crop_needs_override) {
+ isp_sink_crop.width =
+ max_t(unsigned int, f->fmt.pix.width,
+ isp_sink_crop.width);
+ isp_sink_crop.height =
+ max_t(unsigned int, f->fmt.pix.height,
+ isp_sink_crop.height);
+ } else {
+ isp_sink_crop.width = f->fmt.pix.width;
+ isp_sink_crop.height = f->fmt.pix.height;
+ }
+
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK,
+ V4L2_SEL_TGT_CROP,
+ V4L2_SEL_FLAG_KEEP_CONFIG,
+ &isp_sink_crop);
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ source_pad, V4L2_SEL_TGT_COMPOSE,
+ 0, &isp_sink_crop);
+ } else if (IS_MOFD) {
+ struct v4l2_rect main_compose = {0};
+
+ main_compose.width = isp_sink_crop.width;
+ main_compose.height =
+ DIV_ROUND_UP(main_compose.width * f->fmt.pix.height,
+ f->fmt.pix.width);
+ if (main_compose.height > isp_sink_crop.height) {
+ main_compose.height = isp_sink_crop.height;
+ main_compose.width =
+ DIV_ROUND_UP(main_compose.height *
+ f->fmt.pix.width,
+ f->fmt.pix.height);
+ }
+
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ source_pad,
+ V4L2_SEL_TGT_COMPOSE, 0,
+ &main_compose);
+ } else {
+ struct v4l2_rect sink_crop = {0};
+ struct v4l2_rect main_compose = {0};
+
+ main_compose.width = f->fmt.pix.width;
+ main_compose.height = f->fmt.pix.height;
+
+ /* WORKAROUND: this override is universally enabled in
+ * GMIN to work around a CTS failures (GMINL-539)
+ * which appears to be related by a hardware
+ * performance limitation. It's unclear why this
+ * particular code triggers the issue. */
+ if (!atomisp_hw_is_isp2401 || crop_needs_override) {
+ if (isp_sink_crop.width * main_compose.height >
+ isp_sink_crop.height * main_compose.width) {
+ sink_crop.height = isp_sink_crop.height;
+ sink_crop.width = DIV_NEAREST_STEP(
+ sink_crop.height *
+ f->fmt.pix.width,
+ f->fmt.pix.height,
+ ATOM_ISP_STEP_WIDTH);
+ } else {
+ sink_crop.width = isp_sink_crop.width;
+ sink_crop.height = DIV_NEAREST_STEP(
+ sink_crop.width *
+ f->fmt.pix.height,
+ f->fmt.pix.width,
+ ATOM_ISP_STEP_HEIGHT);
+ }
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK,
+ V4L2_SEL_TGT_CROP,
+ V4L2_SEL_FLAG_KEEP_CONFIG,
+ &sink_crop);
+ }
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ source_pad,
+ V4L2_SEL_TGT_COMPOSE, 0,
+ &main_compose);
+ }
+
+set_fmt_to_isp:
+ ret = atomisp_set_fmt_to_isp(vdev, &output_info, &raw_output_info,
+ &f->fmt.pix, source_pad);
+ if (ret)
+ return -EINVAL;
+done:
+ pipe->pix.width = f->fmt.pix.width;
+ pipe->pix.height = f->fmt.pix.height;
+ pipe->pix.pixelformat = f->fmt.pix.pixelformat;
+ if (format_bridge->planar) {
+ pipe->pix.bytesperline = output_info.padded_width;
+ pipe->pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height *
+ DIV_ROUND_UP(format_bridge->depth *
+ output_info.padded_width, 8));
+ } else {
+ pipe->pix.bytesperline =
+ DIV_ROUND_UP(format_bridge->depth *
+ output_info.padded_width, 8);
+ pipe->pix.sizeimage =
+ PAGE_ALIGN(f->fmt.pix.height * pipe->pix.bytesperline);
+ }
+ if (f->fmt.pix.field == V4L2_FIELD_ANY)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ pipe->pix.field = f->fmt.pix.field;
+
+ f->fmt.pix = pipe->pix;
+ f->fmt.pix.priv = PAGE_ALIGN(pipe->pix.width *
+ pipe->pix.height * 2);
+
+ pipe->capq.field = f->fmt.pix.field;
+
+ /*
+ * If in video 480P case, no GFX throttle
+ */
+ if (asd->run_mode->val == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO &&
+ f->fmt.pix.width == 720 && f->fmt.pix.height == 480)
+ isp->need_gfx_throttle = false;
+ else
+ isp->need_gfx_throttle = true;
+
+ return 0;
+}
+
+int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f)
+{
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct v4l2_mbus_framefmt ffmt = {0};
+ const struct atomisp_format_bridge *format_bridge;
+ struct v4l2_subdev_fh fh;
+ int ret;
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ dev_dbg(isp->dev, "setting fmt %ux%u 0x%x for file inject\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat);
+ ret = atomisp_try_fmt_file(isp, f);
+ if (ret) {
+ dev_err(isp->dev, "atomisp_try_fmt_file err: %d\n", ret);
+ return ret;
+ }
+
+ format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+ if (!format_bridge) {
+ dev_dbg(isp->dev, "atomisp_get_format_bridge err! fmt:0x%x\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ pipe->pix = f->fmt.pix;
+ atomisp_css_input_set_mode(asd, CSS_INPUT_MODE_FIFO);
+ atomisp_css_input_configure_port(asd,
+ __get_mipi_port(isp, ATOMISP_CAMERA_PORT_PRIMARY), 2, 0xffff4,
+ 0, 0, 0, 0);
+ ffmt.width = f->fmt.pix.width;
+ ffmt.height = f->fmt.pix.height;
+ ffmt.code = format_bridge->mbus_code;
+
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, &ffmt);
+
+ return 0;
+}
+
+int atomisp_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *user_shading_table)
+{
+ struct atomisp_css_shading_table *shading_table;
+ struct atomisp_css_shading_table *free_table;
+ unsigned int len_table;
+ int i;
+ int ret = 0;
+
+ if (!user_shading_table)
+ return -EINVAL;
+
+ if (!user_shading_table->enable) {
+ atomisp_css_set_shading_table(asd, NULL);
+ asd->params.sc_en = false;
+ return 0;
+ }
+
+ /* If enabling, all tables must be set */
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (!user_shading_table->data[i])
+ return -EINVAL;
+ }
+
+ /* Shading table size per color */
+ if (!atomisp_hw_is_isp2401) {
+ if (user_shading_table->width > ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ user_shading_table->height > ISP2400_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR)
+ return -EINVAL;
+ } else {
+ if (user_shading_table->width > ISP2401_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ user_shading_table->height > ISP2401_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR)
+ return -EINVAL;
+ }
+
+ shading_table = atomisp_css_shading_table_alloc(
+ user_shading_table->width, user_shading_table->height);
+ if (!shading_table)
+ return -ENOMEM;
+
+ len_table = user_shading_table->width * user_shading_table->height *
+ ATOMISP_SC_TYPE_SIZE;
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ ret = copy_from_user(shading_table->data[i],
+ (void __user *)user_shading_table->data[i],
+ len_table);
+ if (ret) {
+ free_table = shading_table;
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ shading_table->sensor_width = user_shading_table->sensor_width;
+ shading_table->sensor_height = user_shading_table->sensor_height;
+ shading_table->fraction_bits = user_shading_table->fraction_bits;
+
+ free_table = asd->params.css_param.shading_table;
+ asd->params.css_param.shading_table = shading_table;
+ atomisp_css_set_shading_table(asd, shading_table);
+ asd->params.sc_en = true;
+
+out:
+ if (free_table)
+ atomisp_css_shading_table_free(free_table);
+
+ return ret;
+}
+
+/*Turn off ISP dphy */
+int atomisp_ospm_dphy_down(struct atomisp_device *isp)
+{
+ unsigned long flags;
+ u32 reg;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ /* if ISP timeout, we can force powerdown */
+ if (isp->isp_timeout)
+ goto done;
+
+ if (!atomisp_dev_users(isp))
+ goto done;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ isp->sw_contex.power_state = ATOM_ISP_POWER_DOWN;
+ spin_unlock_irqrestore(&isp->lock, flags);
+done:
+ /*
+ * MRFLD IUNIT DPHY is located in an always-power-on island
+ * MRFLD HW design need all CSI ports are disabled before
+ * powering down the IUNIT.
+ */
+ pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &reg);
+ reg |= MRFLD_ALL_CSI_PORTS_OFF_MASK;
+ pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, reg);
+ return 0;
+}
+
+/*Turn on ISP dphy */
+int atomisp_ospm_dphy_up(struct atomisp_device *isp)
+{
+ unsigned long flags;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&isp->lock, flags);
+ isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ return 0;
+}
+
+int atomisp_exif_makernote(struct atomisp_sub_device *asd,
+ struct atomisp_makernote_info *config)
+{
+ struct v4l2_control ctrl;
+ struct atomisp_device *isp = asd->isp;
+
+ ctrl.id = V4L2_CID_FOCAL_ABSOLUTE;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl)) {
+ dev_warn(isp->dev, "failed to g_ctrl for focal length\n");
+ return -EINVAL;
+ } else {
+ config->focal_length = ctrl.value;
+ }
+
+ ctrl.id = V4L2_CID_FNUMBER_ABSOLUTE;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl)) {
+ dev_warn(isp->dev, "failed to g_ctrl for f-number\n");
+ return -EINVAL;
+ } else {
+ config->f_number_curr = ctrl.value;
+ }
+
+ ctrl.id = V4L2_CID_FNUMBER_RANGE;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl)) {
+ dev_warn(isp->dev, "failed to g_ctrl for f number range\n");
+ return -EINVAL;
+ } else {
+ config->f_number_range = ctrl.value;
+ }
+
+ return 0;
+}
+
+int atomisp_offline_capture_configure(struct atomisp_sub_device *asd,
+ struct atomisp_cont_capture_conf *cvf_config)
+{
+ struct v4l2_ctrl *c;
+
+ /*
+ * In case of M10MO ZSL capture case, we need to issue a separate
+ * capture request to M10MO which will output captured jpeg image
+ */
+ c = v4l2_ctrl_find(
+ asd->isp->inputs[asd->input_curr].camera->ctrl_handler,
+ V4L2_CID_START_ZSL_CAPTURE);
+ if (c) {
+ int ret;
+
+ dev_dbg(asd->isp->dev, "%s trigger ZSL capture request\n",
+ __func__);
+ /* TODO: use the cvf_config */
+ ret = v4l2_ctrl_s_ctrl(c, 1);
+ if (ret)
+ return ret;
+
+ return v4l2_ctrl_s_ctrl(c, 0);
+ }
+
+ asd->params.offline_parm = *cvf_config;
+
+ if (asd->params.offline_parm.num_captures) {
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED) {
+ unsigned int init_raw_num;
+
+ if (asd->enable_raw_buffer_lock->val) {
+ init_raw_num =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN;
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ asd->params.video_dis_en)
+ init_raw_num +=
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+ } else {
+ init_raw_num =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES;
+ }
+
+ /* TODO: this can be removed once user-space
+ * has been updated to use control API */
+ asd->continuous_raw_buffer_size->val =
+ max_t(int,
+ asd->continuous_raw_buffer_size->val,
+ asd->params.offline_parm.
+ num_captures + init_raw_num);
+ asd->continuous_raw_buffer_size->val =
+ min_t(int, ATOMISP_CONT_RAW_FRAMES,
+ asd->continuous_raw_buffer_size->val);
+ }
+ asd->continuous_mode->val = true;
+ } else {
+ asd->continuous_mode->val = false;
+ __enable_continuous_mode(asd, false);
+ }
+
+ return 0;
+}
+
+/*
+ * set auto exposure metering window to camera sensor
+ */
+int atomisp_s_ae_window(struct atomisp_sub_device *asd,
+ struct atomisp_ae_window *arg)
+{
+ struct atomisp_device *isp = asd->isp;
+ /* Coverity CID 298071 - initialzize struct */
+ struct v4l2_subdev_selection sel = { 0 };
+
+ sel.r.left = arg->x_left;
+ sel.r.top = arg->y_top;
+ sel.r.width = arg->x_right - arg->x_left + 1;
+ sel.r.height = arg->y_bottom - arg->y_top + 1;
+
+ if (v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ pad, set_selection, NULL, &sel)) {
+ dev_err(isp->dev, "failed to call sensor set_selection.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_flash_enable(struct atomisp_sub_device *asd, int num_frames)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (num_frames < 0) {
+ dev_dbg(isp->dev, "%s ERROR: num_frames: %d\n", __func__,
+ num_frames);
+ return -EINVAL;
+ }
+ /* a requested flash is still in progress. */
+ if (num_frames && asd->params.flash_state != ATOMISP_FLASH_IDLE) {
+ dev_dbg(isp->dev, "%s flash busy: %d frames left: %d\n",
+ __func__, asd->params.flash_state,
+ asd->params.num_flash_frames);
+ return -EBUSY;
+ }
+
+ asd->params.num_flash_frames = num_frames;
+ asd->params.flash_state = ATOMISP_FLASH_REQUESTED;
+ return 0;
+}
+
+int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd,
+ uint16_t source_pad)
+{
+ int stream_id;
+ struct atomisp_device *isp = asd->isp;
+
+ if (isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num == 1)
+ return ATOMISP_INPUT_STREAM_GENERAL;
+
+ switch (source_pad) {
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ stream_id = ATOMISP_INPUT_STREAM_CAPTURE;
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ stream_id = ATOMISP_INPUT_STREAM_POSTVIEW;
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ stream_id = ATOMISP_INPUT_STREAM_PREVIEW;
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
+ stream_id = ATOMISP_INPUT_STREAM_VIDEO;
+ break;
+ default:
+ stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+ }
+
+ return stream_id;
+}
+
+bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_sub_device *asd = pipe->asd;
+
+ if (pipe == &asd->video_out_vf)
+ return true;
+
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ pipe == &asd->video_out_preview)
+ return true;
+
+ return false;
+}
+
+static int __checking_exp_id(struct atomisp_sub_device *asd, int exp_id)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->enable_raw_buffer_lock->val) {
+ dev_warn(isp->dev, "%s Raw Buffer Lock is disable.\n", __func__);
+ return -EINVAL;
+ }
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) {
+ dev_err(isp->dev, "%s streaming %d invalid exp_id %d.\n",
+ __func__, exp_id, asd->streaming);
+ return -EINVAL;
+ }
+ if ((exp_id > ATOMISP_MAX_EXP_ID) || (exp_id <= 0)) {
+ dev_err(isp->dev, "%s exp_id %d invalid.\n", __func__, exp_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ memset(asd->raw_buffer_bitmap, 0, sizeof(asd->raw_buffer_bitmap));
+ asd->raw_buffer_locked_count = 0;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+}
+
+int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id)
+{
+ int *bitmap, bit;
+ unsigned long flags;
+
+ if (__checking_exp_id(asd, exp_id))
+ return -EINVAL;
+
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ (*bitmap) |= (1 << bit);
+ asd->raw_buffer_locked_count++;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+
+ dev_dbg(asd->isp->dev, "%s: exp_id %d, raw_buffer_locked_count %d\n",
+ __func__, exp_id, asd->raw_buffer_locked_count);
+
+ /* Check if the raw buffer after next is still locked!!! */
+ exp_id += 2;
+ if (exp_id > ATOMISP_MAX_EXP_ID)
+ exp_id -= ATOMISP_MAX_EXP_ID;
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ if ((*bitmap) & (1 << bit)) {
+ int ret;
+
+ /* WORKAROUND unlock the raw buffer compulsively */
+ ret = atomisp_css_exp_id_unlock(asd, exp_id);
+ if (ret) {
+ dev_err(asd->isp->dev,
+ "%s exp_id is wrapping back to %d but force unlock failed,, err %d.\n",
+ __func__, exp_id, ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ (*bitmap) &= ~(1 << bit);
+ asd->raw_buffer_locked_count--;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+ dev_warn(asd->isp->dev,
+ "%s exp_id is wrapping back to %d but it is still locked so force unlock it, raw_buffer_locked_count %d\n",
+ __func__, exp_id, asd->raw_buffer_locked_count);
+ }
+ return 0;
+}
+
+static int __is_raw_buffer_locked(struct atomisp_sub_device *asd, int exp_id)
+{
+ int *bitmap, bit;
+ unsigned long flags;
+ int ret;
+
+ if (__checking_exp_id(asd, exp_id))
+ return -EINVAL;
+
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ ret = ((*bitmap) & (1 << bit));
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+ return !ret;
+}
+
+static int __clear_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id)
+{
+ int *bitmap, bit;
+ unsigned long flags;
+
+ if (__is_raw_buffer_locked(asd, exp_id))
+ return -EINVAL;
+
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ (*bitmap) &= ~(1 << bit);
+ asd->raw_buffer_locked_count--;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+
+ dev_dbg(asd->isp->dev, "%s: exp_id %d, raw_buffer_locked_count %d\n",
+ __func__, exp_id, asd->raw_buffer_locked_count);
+ return 0;
+}
+
+int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int value = *exp_id;
+ int ret;
+
+ ret = __is_raw_buffer_locked(asd, value);
+ if (ret) {
+ dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
+ return -EINVAL;
+ }
+
+ dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value);
+ ret = atomisp_css_exp_id_capture(asd, value);
+ if (ret) {
+ dev_err(isp->dev, "%s exp_id %d failed.\n", __func__, value);
+ return -EIO;
+ }
+ return 0;
+}
+
+int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int value = *exp_id;
+ int ret;
+
+ ret = __clear_raw_buffer_bitmap(asd, value);
+ if (ret) {
+ dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
+ return -EINVAL;
+ }
+
+ dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value);
+ ret = atomisp_css_exp_id_unlock(asd, value);
+ if (ret)
+ dev_err(isp->dev, "%s exp_id %d failed, err %d.\n",
+ __func__, value, ret);
+
+ return ret;
+}
+
+int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
+ unsigned int *enable)
+{
+ bool value;
+
+ if (!enable)
+ return -EINVAL;
+
+ value = *enable > 0 ? true : false;
+
+ atomisp_en_dz_capt_pipe(asd, value);
+
+ return 0;
+}
+
+int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event)
+{
+ if (!event || asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return -EINVAL;
+
+ dev_dbg(asd->isp->dev, "%s: trying to inject a fake event 0x%x\n",
+ __func__, *event);
+
+ switch (*event) {
+ case V4L2_EVENT_FRAME_SYNC:
+ atomisp_sof_event(asd);
+ break;
+ case V4L2_EVENT_FRAME_END:
+ atomisp_eof_event(asd, 0);
+ break;
+ case V4L2_EVENT_ATOMISP_3A_STATS_READY:
+ atomisp_3a_stats_ready_event(asd, 0);
+ break;
+ case V4L2_EVENT_ATOMISP_METADATA_READY:
+ atomisp_metadata_ready_event(asd, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int atomisp_get_pipe_id(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_sub_device *asd = pipe->asd;
+
+ if (ATOMISP_USE_YUVPP(asd))
+ return CSS_PIPE_ID_YUVPP;
+ else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER)
+ return CSS_PIPE_ID_VIDEO;
+ else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT)
+ return CSS_PIPE_ID_CAPTURE;
+ else if (pipe == &asd->video_out_video_capture)
+ return CSS_PIPE_ID_VIDEO;
+ else if (pipe == &asd->video_out_vf)
+ return CSS_PIPE_ID_CAPTURE;
+ else if (pipe == &asd->video_out_preview) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ return CSS_PIPE_ID_VIDEO;
+ else
+ return CSS_PIPE_ID_PREVIEW;
+ } else if (pipe == &asd->video_out_capture) {
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+ else
+ return CSS_PIPE_ID_CAPTURE;
+ }
+
+ /* fail through */
+ dev_warn(asd->isp->dev, "%s failed to find proper pipe\n",
+ __func__);
+ return CSS_PIPE_ID_CAPTURE;
+}
+
+int atomisp_get_invalid_frame_num(struct video_device *vdev,
+ int *invalid_frame_num)
+{
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ enum atomisp_css_pipe_id pipe_id;
+ struct ia_css_pipe_info p_info;
+ int ret;
+
+ if (asd->isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1) {
+ /* External ISP */
+ *invalid_frame_num = 0;
+ return 0;
+ }
+
+ pipe_id = atomisp_get_pipe_id(pipe);
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id]) {
+ dev_warn(asd->isp->dev,
+ "%s pipe %d has not been created yet, do SET_FMT first!\n",
+ __func__, pipe_id);
+ return -EINVAL;
+ }
+
+ ret = ia_css_pipe_get_info(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipes[pipe_id], &p_info);
+ if (ret == IA_CSS_SUCCESS) {
+ *invalid_frame_num = p_info.num_invalid_frames;
+ return 0;
+ } else {
+ dev_warn(asd->isp->dev, "%s get pipe infor failed %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
new file mode 100644
index 000000000000..b5af9da3b0fb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
@@ -0,0 +1,442 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_CMD_H__
+#define __ATOMISP_CMD_H__
+
+#include "../../include/linux/atomisp.h"
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "atomisp_internal.h"
+
+#include "ia_css_types.h"
+#include "ia_css.h"
+
+struct atomisp_device;
+struct atomisp_css_frame;
+
+#define MSI_ENABLE_BIT 16
+#define INTR_DISABLE_BIT 10
+#define BUS_MASTER_ENABLE 2
+#define MEMORY_SPACE_ENABLE 1
+#define INTR_IER 24
+#define INTR_IIR 16
+
+/* ISP2401 */
+#define RUNMODE_MASK (ATOMISP_RUN_MODE_VIDEO | ATOMISP_RUN_MODE_STILL_CAPTURE \
+ | ATOMISP_RUN_MODE_PREVIEW)
+
+/* FIXME: check if can go */
+extern int atomisp_punit_hpll_freq;
+
+/*
+ * Helper function
+ */
+void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
+ unsigned int size);
+struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd);
+struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev);
+struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev);
+int atomisp_reset(struct atomisp_device *isp);
+void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd);
+void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd);
+/* ISP2400 */
+bool atomisp_buffers_queued(struct atomisp_sub_device *asd);
+/* ISP2401 */
+bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe);
+
+/* TODO:should be here instead of atomisp_helper.h
+extern void __iomem *atomisp_io_base;
+
+static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address)
+{
+ void __iomem *ret = atomisp_io_base + (address & 0x003FFFFF);
+ return ret;
+}
+*/
+
+/*
+ * Interrupt functions
+ */
+void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev);
+void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev);
+void atomisp_wdt_work(struct work_struct *work);
+void atomisp_wdt(struct timer_list *t);
+void atomisp_setup_flash(struct atomisp_sub_device *asd);
+irqreturn_t atomisp_isr(int irq, void *dev);
+irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr);
+const struct atomisp_format_bridge *get_atomisp_format_bridge_from_mbus(
+ u32 mbus_code);
+bool atomisp_is_mbuscode_raw(uint32_t code);
+int atomisp_get_frame_pgnr(struct atomisp_device *isp,
+ const struct atomisp_css_frame *frame, u32 *p_pgnr);
+void atomisp_delayed_init_work(struct work_struct *work);
+
+/*
+ * Get internal fmt according to V4L2 fmt
+ */
+
+bool atomisp_is_viewfinder_support(struct atomisp_device *isp);
+
+/*
+ * ISP features control function
+ */
+
+/*
+ * Function to set sensor runmode by user when
+ * ATOMISP_IOC_S_SENSOR_RUNMODE ioctl was called
+ */
+int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
+ struct atomisp_s_runmode *runmode);
+/*
+ * Function to enable/disable lens geometry distortion correction (GDC) and
+ * chromatic aberration correction (CAC)
+ */
+int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to enable/disable low light mode (including ANR)
+ */
+int atomisp_low_light(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to enable/disable extra noise reduction (XNR) in low light
+ * condition
+ */
+int atomisp_xnr(struct atomisp_sub_device *asd, int flag, int *arg);
+
+int atomisp_formats(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_formats_config *config);
+
+/*
+ * Function to configure noise reduction
+ */
+int atomisp_nr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_nr_config *config);
+
+/*
+ * Function to configure temporal noise reduction (TNR)
+ */
+int atomisp_tnr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_tnr_config *config);
+
+/*
+ * Function to configure black level compensation
+ */
+int atomisp_black_level(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ob_config *config);
+
+/*
+ * Function to configure edge enhancement
+ */
+int atomisp_ee(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ee_config *config);
+
+/*
+ * Function to update Gamma table for gamma, brightness and contrast config
+ */
+int atomisp_gamma(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gamma_table *config);
+/*
+ * Function to update Ctc table for Chroma Enhancement
+ */
+int atomisp_ctc(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ctc_table *config);
+
+/*
+ * Function to update gamma correction parameters
+ */
+int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gc_config *config);
+
+/*
+ * Function to update Gdc table for gdc
+ */
+int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_morph_table *config);
+
+/*
+ * Function to update table for macc
+ */
+int atomisp_macc_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_macc_config *config);
+/*
+ * Function to get DIS statistics.
+ */
+int atomisp_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats);
+
+/*
+ * Function to get DVS2 BQ resolution settings
+ */
+int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd,
+ struct atomisp_dvs2_bq_resolutions *bq_res);
+
+/*
+ * Function to set the DIS coefficients.
+ */
+int atomisp_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs);
+
+/*
+ * Function to set the DIS motion vector.
+ */
+int atomisp_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector);
+
+/*
+ * Function to set/get 3A stat from isp
+ */
+int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_statistics *config);
+
+/*
+ * Function to get metadata from isp
+ */
+int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_metadata *config);
+
+int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_metadata_with_type *config);
+
+int atomisp_set_parameters(struct video_device *vdev,
+ struct atomisp_parameters *arg);
+/*
+ * Function to set/get isp parameters to isp
+ */
+int atomisp_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_parm *config);
+
+/*
+ * Function to configure color effect of the image
+ */
+int atomisp_color_effect(struct atomisp_sub_device *asd, int flag,
+ __s32 *effect);
+
+/*
+ * Function to configure bad pixel correction
+ */
+int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to configure bad pixel correction params
+ */
+int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_dp_config *config);
+
+/*
+ * Function to enable/disable video image stablization
+ */
+int atomisp_video_stable(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to configure fixed pattern noise
+ */
+int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to configure fixed pattern noise table
+ */
+int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd,
+ struct v4l2_framebuffer *config);
+
+/*
+ * Function to configure false color correction
+ */
+int atomisp_false_color(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to configure false color correction params
+ */
+int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_de_config *config);
+
+/*
+ * Function to configure white balance params
+ */
+int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_wb_config *config);
+
+int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_config *config);
+
+/*
+ * Function to setup digital zoom
+ */
+int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function set camera_prefiles.xml current sensor pixel array size
+ */
+int atomisp_set_array_res(struct atomisp_sub_device *asd,
+ struct atomisp_resolution *config);
+
+/*
+ * Function to calculate real zoom region for every pipe
+ */
+int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd,
+ struct atomisp_css_dz_config *dz_config,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+int atomisp_cp_general_isp_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_cp_lsc_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *source_st,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_css_cp_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_cp_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_morph_table *source_morph_table,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd,
+ struct atomisp_dvs_6axis_config *user_6axis_config,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param);
+
+int atomisp_compare_grid(struct atomisp_sub_device *asd,
+ struct atomisp_grid_info *atomgrid);
+
+int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd,
+ struct atomisp_sensor_mode_data *config);
+
+int atomisp_get_fmt(struct video_device *vdev, struct v4l2_format *f);
+
+/* This function looks up the closest available resolution. */
+int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f,
+ bool *res_overflow);
+
+int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f);
+int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f);
+
+int atomisp_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *shading_table);
+
+int atomisp_offline_capture_configure(struct atomisp_sub_device *asd,
+ struct atomisp_cont_capture_conf *cvf_config);
+
+int atomisp_ospm_dphy_down(struct atomisp_device *isp);
+int atomisp_ospm_dphy_up(struct atomisp_device *isp);
+int atomisp_exif_makernote(struct atomisp_sub_device *asd,
+ struct atomisp_makernote_info *config);
+
+void atomisp_free_internal_buffers(struct atomisp_sub_device *asd);
+
+int atomisp_s_ae_window(struct atomisp_sub_device *asd,
+ struct atomisp_ae_window *arg);
+
+int atomisp_flash_enable(struct atomisp_sub_device *asd,
+ int num_frames);
+
+int atomisp_freq_scaling(struct atomisp_device *vdev,
+ enum atomisp_dfs_mode mode,
+ bool force);
+
+void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
+ enum atomisp_css_buffer_type buf_type,
+ enum atomisp_css_pipe_id css_pipe_id,
+ bool q_buffers, enum atomisp_input_stream_id stream_id);
+
+void atomisp_css_flush(struct atomisp_device *isp);
+int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd,
+ uint16_t source_pad);
+
+/*
+ * Events. Only one event has to be exported for now.
+ */
+void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id);
+
+enum mipi_port_id __get_mipi_port(struct atomisp_device *isp,
+ enum atomisp_camera_port port);
+
+bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe);
+
+void atomisp_apply_css_parameters(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_params *css_param);
+void atomisp_free_css_parameters(struct atomisp_css_params *css_param);
+
+void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe);
+
+void atomisp_flush_params_queue(struct atomisp_video_pipe *asd);
+/*
+ * Function to do Raw Buffer related operation, after enable Lock Unlock Raw Buffer
+ */
+int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id);
+int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id);
+
+/*
+ * Function to update Raw Buffer bitmap
+ */
+int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id);
+void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd);
+
+/*
+ * Function to enable/disable zoom for capture pipe
+ */
+int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
+ unsigned int *enable);
+
+/*
+ * Function to get metadata type bu pipe id
+ */
+enum atomisp_metadata_type
+atomisp_get_metadata_type(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id);
+
+/*
+ * Function for HAL to inject a fake event to wake up poll thread
+ */
+int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event);
+
+/*
+ * Function for HAL to query how many invalid frames at the beginning of ISP
+ * pipeline output
+ */
+int atomisp_get_invalid_frame_num(struct video_device *vdev,
+ int *invalid_frame_num);
+
+int atomisp_mrfld_power_up(struct atomisp_device *isp);
+int atomisp_mrfld_power_down(struct atomisp_device *isp);
+int atomisp_runtime_suspend(struct device *dev);
+int atomisp_runtime_resume(struct device *dev);
+#endif /* __ATOMISP_CMD_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_common.h b/drivers/staging/media/atomisp/pci/atomisp_common.h
new file mode 100644
index 000000000000..65c9caf72b7b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_common.h
@@ -0,0 +1,74 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_COMMON_H__
+#define __ATOMISP_COMMON_H__
+
+#include "../../include/linux/atomisp.h"
+
+#include <linux/v4l2-mediabus.h>
+
+#include <media/videobuf-core.h>
+
+#include "atomisp_compat.h"
+
+#include "ia_css.h"
+
+extern int dbg_level;
+extern int dbg_func;
+extern int mipicsi_flag;
+extern int pad_w;
+extern int pad_h;
+
+#define CSS_DTRACE_VERBOSITY_LEVEL 5 /* Controls trace verbosity */
+#define CSS_DTRACE_VERBOSITY_TIMEOUT 9 /* Verbosity on ISP timeout */
+#define MRFLD_MAX_ZOOM_FACTOR 1024
+
+/* ISP2401 */
+#define ATOMISP_CSS_ISP_PIPE_VERSION_2_7 1
+
+#define IS_ISP2401(isp) \
+ (((isp)->media_dev.hw_revision & ATOMISP_HW_REVISION_MASK) \
+ >= (ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT))
+
+struct atomisp_format_bridge {
+ unsigned int pixelformat;
+ unsigned int depth;
+ u32 mbus_code;
+ enum atomisp_css_frame_format sh_fmt;
+ unsigned char description[32]; /* the same as struct v4l2_fmtdesc */
+ bool planar;
+};
+
+struct atomisp_fmt {
+ u32 pixelformat;
+ u32 depth;
+ u32 bytesperline;
+ u32 framesize;
+ u32 imagesize;
+ u32 width;
+ u32 height;
+ u32 bayer_order;
+};
+
+struct atomisp_buffer {
+ struct videobuf_buffer vb;
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h
new file mode 100644
index 000000000000..205c530e8090
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h
@@ -0,0 +1,663 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_COMPAT_H__
+#define __ATOMISP_COMPAT_H__
+
+#include "atomisp_compat_css20.h"
+
+#include "../../include/linux/atomisp.h"
+#include <media/videobuf-vmalloc.h>
+
+#define CSS_RX_IRQ_INFO_BUFFER_OVERRUN \
+ CSS_ID(CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+#define CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE \
+ CSS_ID(CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE)
+#define CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE \
+ CSS_ID(CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE)
+#define CSS_RX_IRQ_INFO_ECC_CORRECTED \
+ CSS_ID(CSS_RX_IRQ_INFO_ECC_CORRECTED)
+#define CSS_RX_IRQ_INFO_ERR_SOT \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_SOT)
+#define CSS_RX_IRQ_INFO_ERR_SOT_SYNC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+#define CSS_RX_IRQ_INFO_ERR_CONTROL \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_CONTROL)
+#define CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+#define CSS_RX_IRQ_INFO_ERR_CRC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_CRC)
+#define CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+#define CSS_RX_IRQ_INFO_ERR_FRAME_SYNC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+#define CSS_RX_IRQ_INFO_ERR_FRAME_DATA \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+#define CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+#define CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+#define CSS_RX_IRQ_INFO_ERR_LINE_SYNC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+#define CSS_RX_IRQ_INFO_INIT_TIMEOUT \
+ CSS_ID(CSS_RX_IRQ_INFO_INIT_TIMEOUT)
+
+#define CSS_IRQ_INFO_CSS_RECEIVER_SOF CSS_ID(CSS_IRQ_INFO_CSS_RECEIVER_SOF)
+#define CSS_IRQ_INFO_CSS_RECEIVER_EOF CSS_ID(CSS_IRQ_INFO_CSS_RECEIVER_EOF)
+#define CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW \
+ CSS_ID(CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW)
+#define CSS_EVENT_OUTPUT_FRAME_DONE CSS_EVENT(OUTPUT_FRAME_DONE)
+#define CSS_EVENT_SEC_OUTPUT_FRAME_DONE CSS_EVENT(SECOND_OUTPUT_FRAME_DONE)
+#define CSS_EVENT_VF_OUTPUT_FRAME_DONE CSS_EVENT(VF_OUTPUT_FRAME_DONE)
+#define CSS_EVENT_SEC_VF_OUTPUT_FRAME_DONE CSS_EVENT(SECOND_VF_OUTPUT_FRAME_DONE)
+#define CSS_EVENT_3A_STATISTICS_DONE CSS_EVENT(3A_STATISTICS_DONE)
+#define CSS_EVENT_DIS_STATISTICS_DONE CSS_EVENT(DIS_STATISTICS_DONE)
+#define CSS_EVENT_PIPELINE_DONE CSS_EVENT(PIPELINE_DONE)
+#define CSS_EVENT_METADATA_DONE CSS_EVENT(METADATA_DONE)
+#define CSS_EVENT_ACC_STAGE_COMPLETE CSS_EVENT(ACC_STAGE_COMPLETE)
+#define CSS_EVENT_TIMER CSS_EVENT(TIMER)
+
+#define CSS_BUFFER_TYPE_METADATA CSS_ID(CSS_BUFFER_TYPE_METADATA)
+#define CSS_BUFFER_TYPE_3A_STATISTICS CSS_ID(CSS_BUFFER_TYPE_3A_STATISTICS)
+#define CSS_BUFFER_TYPE_DIS_STATISTICS CSS_ID(CSS_BUFFER_TYPE_DIS_STATISTICS)
+#define CSS_BUFFER_TYPE_INPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_INPUT_FRAME)
+#define CSS_BUFFER_TYPE_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_OUTPUT_FRAME)
+#define CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
+#define CSS_BUFFER_TYPE_VF_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+#define CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
+#define CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME \
+ CSS_ID(CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME)
+
+#define CSS_FORMAT_RAW_8 CSS_FORMAT(RAW_8)
+#define CSS_FORMAT_RAW_10 CSS_FORMAT(RAW_10)
+#define CSS_FORMAT_RAW_12 CSS_FORMAT(RAW_12)
+#define CSS_FORMAT_RAW_16 CSS_FORMAT(RAW_16)
+
+#define CSS_CAPTURE_MODE_RAW CSS_ID(CSS_CAPTURE_MODE_RAW)
+#define CSS_CAPTURE_MODE_BAYER CSS_ID(CSS_CAPTURE_MODE_BAYER)
+#define CSS_CAPTURE_MODE_PRIMARY CSS_ID(CSS_CAPTURE_MODE_PRIMARY)
+#define CSS_CAPTURE_MODE_ADVANCED CSS_ID(CSS_CAPTURE_MODE_ADVANCED)
+#define CSS_CAPTURE_MODE_LOW_LIGHT CSS_ID(CSS_CAPTURE_MODE_LOW_LIGHT)
+
+#define CSS_MORPH_TABLE_NUM_PLANES CSS_ID(CSS_MORPH_TABLE_NUM_PLANES)
+
+#define CSS_FRAME_FORMAT_NV11 CSS_ID(CSS_FRAME_FORMAT_NV11)
+#define CSS_FRAME_FORMAT_NV12 CSS_ID(CSS_FRAME_FORMAT_NV12)
+#define CSS_FRAME_FORMAT_NV16 CSS_ID(CSS_FRAME_FORMAT_NV16)
+#define CSS_FRAME_FORMAT_NV21 CSS_ID(CSS_FRAME_FORMAT_NV21)
+#define CSS_FRAME_FORMAT_NV61 CSS_ID(CSS_FRAME_FORMAT_NV61)
+#define CSS_FRAME_FORMAT_YV12 CSS_ID(CSS_FRAME_FORMAT_YV12)
+#define CSS_FRAME_FORMAT_YV16 CSS_ID(CSS_FRAME_FORMAT_YV16)
+#define CSS_FRAME_FORMAT_YUV420 CSS_ID(CSS_FRAME_FORMAT_YUV420)
+#define CSS_FRAME_FORMAT_YUV420_16 CSS_ID(CSS_FRAME_FORMAT_YUV420_16)
+#define CSS_FRAME_FORMAT_YUV422 CSS_ID(CSS_FRAME_FORMAT_YUV422)
+#define CSS_FRAME_FORMAT_YUV422_16 CSS_ID(CSS_FRAME_FORMAT_YUV422_16)
+#define CSS_FRAME_FORMAT_UYVY CSS_ID(CSS_FRAME_FORMAT_UYVY)
+#define CSS_FRAME_FORMAT_YUYV CSS_ID(CSS_FRAME_FORMAT_YUYV)
+#define CSS_FRAME_FORMAT_YUV444 CSS_ID(CSS_FRAME_FORMAT_YUV444)
+#define CSS_FRAME_FORMAT_YUV_LINE CSS_ID(CSS_FRAME_FORMAT_YUV_LINE)
+#define CSS_FRAME_FORMAT_RAW CSS_ID(CSS_FRAME_FORMAT_RAW)
+#define CSS_FRAME_FORMAT_RGB565 CSS_ID(CSS_FRAME_FORMAT_RGB565)
+#define CSS_FRAME_FORMAT_PLANAR_RGB888 CSS_ID(CSS_FRAME_FORMAT_PLANAR_RGB888)
+#define CSS_FRAME_FORMAT_RGBA888 CSS_ID(CSS_FRAME_FORMAT_RGBA888)
+#define CSS_FRAME_FORMAT_QPLANE6 CSS_ID(CSS_FRAME_FORMAT_QPLANE6)
+#define CSS_FRAME_FORMAT_BINARY_8 CSS_ID(CSS_FRAME_FORMAT_BINARY_8)
+
+struct atomisp_device;
+struct atomisp_sub_device;
+struct video_device;
+enum atomisp_input_stream_id;
+
+struct atomisp_metadata_buf {
+ struct ia_css_metadata *metadata;
+ void *md_vptr;
+ struct list_head list;
+};
+
+void atomisp_css_debug_dump_sp_sw_debug_info(void);
+void atomisp_css_debug_dump_debug_info(const char *context);
+void atomisp_css_debug_set_dtrace_level(const unsigned int trace_level);
+
+void atomisp_store_uint32(hrt_address addr, uint32_t data);
+void atomisp_load_uint32(hrt_address addr, uint32_t *data);
+
+int atomisp_css_init(struct atomisp_device *isp);
+
+void atomisp_css_uninit(struct atomisp_device *isp);
+
+void atomisp_css_suspend(struct atomisp_device *isp);
+
+int atomisp_css_resume(struct atomisp_device *isp);
+
+void atomisp_css_init_struct(struct atomisp_sub_device *asd);
+
+int atomisp_css_irq_translate(struct atomisp_device *isp,
+ unsigned int *infos);
+
+void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
+ unsigned int *infos);
+
+void atomisp_css_rx_clear_irq_info(enum mipi_port_id port,
+ unsigned int infos);
+
+int atomisp_css_irq_enable(struct atomisp_device *isp,
+ enum atomisp_css_irq_info info, bool enable);
+
+int atomisp_q_video_buffer_to_css(struct atomisp_sub_device *asd,
+ struct videobuf_vmalloc_memory *vm_mem,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_buffer_type css_buf_type,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+int atomisp_q_s3a_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_s3a_buf *s3a_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+int atomisp_q_metadata_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_metadata_buf *metadata_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_dis_buf *dis_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+void atomisp_css_mmu_invalidate_cache(void);
+
+void atomisp_css_mmu_invalidate_tlb(void);
+
+int atomisp_css_start(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id, bool in_reset);
+
+void atomisp_css_update_isp_params(struct atomisp_sub_device *asd);
+void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd,
+ struct ia_css_pipe *pipe);
+
+int atomisp_css_queue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id pipe_id,
+ enum atomisp_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer);
+
+int atomisp_css_dequeue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id pipe_id,
+ enum atomisp_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer);
+
+int atomisp_css_allocate_stat_buffers(struct atomisp_sub_device *asd,
+ u16 stream_id,
+ struct atomisp_s3a_buf *s3a_buf,
+ struct atomisp_dis_buf *dis_buf,
+ struct atomisp_metadata_buf *md_buf);
+
+void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd);
+
+void atomisp_css_free_3a_buffer(struct atomisp_s3a_buf *s3a_buf);
+
+void atomisp_css_free_dis_buffer(struct atomisp_dis_buf *dis_buf);
+
+void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf
+ *metadata_buf);
+
+int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id,
+ int source_pad);
+
+int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd);
+
+int atomisp_alloc_dis_coef_buf(struct atomisp_sub_device *asd);
+
+int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd);
+
+void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd);
+
+void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd,
+ struct atomisp_css_buffer *isp_css_buffer,
+ struct ia_css_isp_dvs_statistics_map *dvs_map);
+
+int atomisp_css_dequeue_event(struct atomisp_css_event *current_event);
+
+void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
+ struct atomisp_css_event *current_event);
+
+int atomisp_css_isys_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt,
+ int isys_stream);
+
+void atomisp_css_isys_set_link(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ int link,
+ int isys_stream);
+
+void atomisp_css_isys_set_valid(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ bool valid,
+ int isys_stream);
+
+void atomisp_css_isys_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format format,
+ int isys_stream);
+
+int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt);
+
+int atomisp_css_isys_two_stream_cfg(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format);
+
+void atomisp_css_isys_two_stream_cfg_update_stream1(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format,
+ unsigned int width, unsigned int height);
+
+void atomisp_css_isys_two_stream_cfg_update_stream2(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_input_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt);
+
+void atomisp_css_input_set_binning_factor(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int bin_factor);
+
+void atomisp_css_input_set_bayer_order(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_bayer_order bayer_order);
+
+void atomisp_css_input_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format format);
+
+int atomisp_css_input_set_effective_resolution(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int width,
+ unsigned int height);
+
+void atomisp_css_video_set_dis_envelope(struct atomisp_sub_device *asd,
+ unsigned int dvs_w, unsigned int dvs_h);
+
+void atomisp_css_input_set_two_pixels_per_clock(
+ struct atomisp_sub_device *asd,
+ bool two_ppc);
+
+void atomisp_css_enable_raw_binning(struct atomisp_sub_device *asd,
+ bool enable);
+
+void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable);
+
+void atomisp_css_capture_set_mode(struct atomisp_sub_device *asd,
+ enum atomisp_css_capture_mode mode);
+
+void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
+ enum atomisp_css_input_mode mode);
+
+void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable);
+
+void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable);
+
+void atomisp_css_video_enable_online(struct atomisp_sub_device *asd,
+ bool enable);
+
+void atomisp_css_enable_continuous(struct atomisp_sub_device *asd,
+ bool enable);
+
+void atomisp_css_enable_cvf(struct atomisp_sub_device *asd,
+ bool enable);
+
+int atomisp_css_input_configure_port(struct atomisp_sub_device *asd,
+ enum mipi_port_id port,
+ unsigned int num_lanes,
+ unsigned int timeout,
+ unsigned int mipi_freq,
+ enum atomisp_input_format metadata_format,
+ unsigned int metadata_width,
+ unsigned int metadata_height);
+
+int atomisp_css_frame_allocate(struct atomisp_css_frame **frame,
+ unsigned int width, unsigned int height,
+ enum atomisp_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth);
+
+int atomisp_css_frame_allocate_from_info(struct atomisp_css_frame **frame,
+ const struct atomisp_css_frame_info *info);
+
+void atomisp_css_frame_free(struct atomisp_css_frame *frame);
+
+int atomisp_css_frame_map(struct atomisp_css_frame **frame,
+ const struct atomisp_css_frame_info *info,
+ const void __user *data, uint16_t attribute,
+ void *context);
+
+int atomisp_css_set_black_frame(struct atomisp_sub_device *asd,
+ const struct atomisp_css_frame *raw_black_frame);
+
+int atomisp_css_allocate_continuous_frames(bool init_time,
+ struct atomisp_sub_device *asd);
+
+void atomisp_css_update_continuous_frames(struct atomisp_sub_device *asd);
+
+void atomisp_create_pipes_stream(struct atomisp_sub_device *asd);
+void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd);
+
+int atomisp_css_stop(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id, bool in_reset);
+
+int atomisp_css_continuous_set_num_raw_frames(
+ struct atomisp_sub_device *asd,
+ int num_frames);
+
+void atomisp_css_disable_vf_pp(struct atomisp_sub_device *asd,
+ bool disable);
+
+int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_yuvpp_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_yuvpp_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_yuvpp_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_yuvpp_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_capture_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_video_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
+ u16 source_pad,
+ struct atomisp_css_frame_info *frame_info);
+
+int atomisp_css_video_configure_viewfinder(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_capture_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_video_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_capture_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_copy_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_capture_get_output_raw_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_preview_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_capture_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_video_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_preview_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_capture_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_video_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_offline_capture_configure(struct atomisp_sub_device *asd,
+ int num_captures, unsigned int skip, int offset);
+int atomisp_css_exp_id_capture(struct atomisp_sub_device *asd, int exp_id);
+int atomisp_css_exp_id_unlock(struct atomisp_sub_device *asd, int exp_id);
+
+int atomisp_css_capture_enable_xnr(struct atomisp_sub_device *asd,
+ bool enable);
+
+void atomisp_css_send_input_frame(struct atomisp_sub_device *asd,
+ unsigned short *data, unsigned int width,
+ unsigned int height);
+
+bool atomisp_css_isp_has_started(void);
+
+void atomisp_css_request_flash(struct atomisp_sub_device *asd);
+
+void atomisp_css_set_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_wb_config *wb_config);
+
+void atomisp_css_set_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ob_config *ob_config);
+
+void atomisp_css_set_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_dp_config *dp_config);
+
+void atomisp_css_set_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_de_config *de_config);
+
+void atomisp_css_set_dz_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_dz_config *dz_config);
+
+void atomisp_css_set_default_de_config(struct atomisp_sub_device *asd);
+
+void atomisp_css_set_ce_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ce_config *ce_config);
+
+void atomisp_css_set_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_nr_config *nr_config);
+
+void atomisp_css_set_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ee_config *ee_config);
+
+void atomisp_css_set_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_tnr_config *tnr_config);
+
+void atomisp_css_set_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *cc_config);
+
+void atomisp_css_set_macc_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_macc_table *macc_table);
+
+void atomisp_css_set_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_gamma_table *gamma_table);
+
+void atomisp_css_set_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_ctc_table *ctc_table);
+
+void atomisp_css_set_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_gc_config *gc_config);
+
+void atomisp_css_set_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_3a_config *s3a_config);
+
+void atomisp_css_video_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector);
+
+void atomisp_css_set_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs);
+
+int atomisp_css_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs);
+
+void atomisp_css_set_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int zoom);
+
+int atomisp_css_get_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_wb_config *config);
+
+int atomisp_css_get_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_ob_config *config);
+
+int atomisp_css_get_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_dp_config *config);
+
+int atomisp_css_get_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_de_config *config);
+
+int atomisp_css_get_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_nr_config *config);
+
+int atomisp_css_get_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_ee_config *config);
+
+int atomisp_css_get_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_tnr_config *config);
+
+int atomisp_css_get_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_ctc_table *config);
+
+int atomisp_css_get_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_gamma_table *config);
+
+int atomisp_css_get_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_gc_config *config);
+
+int atomisp_css_get_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_3a_config *config);
+
+int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_formats_config *formats_config);
+
+void atomisp_css_set_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_formats_config *formats_config);
+
+int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int *zoom);
+
+struct atomisp_css_shading_table *atomisp_css_shading_table_alloc(
+ unsigned int width, unsigned int height);
+
+void atomisp_css_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_shading_table *table);
+
+void atomisp_css_shading_table_free(struct atomisp_css_shading_table *table);
+
+struct atomisp_css_morph_table *atomisp_css_morph_table_allocate(
+ unsigned int width, unsigned int height);
+
+void atomisp_css_set_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_morph_table *table);
+
+void atomisp_css_get_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_morph_table *table);
+
+void atomisp_css_morph_table_free(struct atomisp_css_morph_table *table);
+
+void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp,
+ unsigned int overlap);
+
+int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats);
+
+int atomisp_css_update_stream(struct atomisp_sub_device *asd);
+
+int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd);
+
+int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd);
+
+int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd);
+
+void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd);
+
+int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ enum atomisp_css_pipe_id pipe_id,
+ unsigned int type);
+
+void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ enum atomisp_css_pipe_id pipe_id);
+
+int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd);
+
+void atomisp_css_acc_done(struct atomisp_sub_device *asd);
+
+int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ unsigned int index);
+
+void atomisp_css_unload_acc_binary(struct atomisp_sub_device *asd);
+
+struct atomisp_acc_fw;
+int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw);
+
+int atomisp_css_isr_thread(struct atomisp_device *isp,
+ bool *frame_done_found,
+ bool *css_pipe_done);
+
+bool atomisp_css_valid_sof(struct atomisp_device *isp);
+
+void atomisp_en_dz_capt_pipe(struct atomisp_sub_device *asd, bool enable);
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
new file mode 100644
index 000000000000..209bc9954a53
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -0,0 +1,4706 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include <media/videobuf-vmalloc.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+
+#include "mmu/isp_mmu.h"
+#include "mmu/sh_mmu_mrfld.h"
+#include "hmm/hmm_bo.h"
+#include "hmm/hmm.h"
+
+#include "atomisp_compat.h"
+#include "atomisp_internal.h"
+#include "atomisp_cmd.h"
+#include "atomisp-regs.h"
+#include "atomisp_fops.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_acc.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+#include <asm/intel-mid.h>
+
+#include "ia_css_debug.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_hrt.h"
+#include "ia_css_isys.h"
+
+#include <linux/pm_runtime.h>
+
+/* Assume max number of ACC stages */
+#define MAX_ACC_STAGES 20
+
+/* Ideally, this should come from CSS headers */
+#define NO_LINK -1
+
+/*
+ * to serialize MMIO access , this is due to ISP2400 silicon issue Sighting
+ * #4684168, if concurrency access happened, system may hard hang.
+ */
+static DEFINE_SPINLOCK(mmio_lock);
+
+enum frame_info_type {
+ ATOMISP_CSS_VF_FRAME,
+ ATOMISP_CSS_SECOND_VF_FRAME,
+ ATOMISP_CSS_OUTPUT_FRAME,
+ ATOMISP_CSS_SECOND_OUTPUT_FRAME,
+ ATOMISP_CSS_RAW_FRAME,
+};
+
+struct bayer_ds_factor {
+ unsigned int numerator;
+ unsigned int denominator;
+};
+
+void atomisp_css_debug_dump_sp_sw_debug_info(void)
+{
+ ia_css_debug_dump_sp_sw_debug_info();
+}
+
+void atomisp_css_debug_dump_debug_info(const char *context)
+{
+ ia_css_debug_dump_debug_info(context);
+}
+
+void atomisp_css_debug_set_dtrace_level(const unsigned int trace_level)
+{
+ ia_css_debug_set_dtrace_level(trace_level);
+}
+
+unsigned int atomisp_css_debug_get_dtrace_level(void)
+{
+ return ia_css_debug_trace_level;
+}
+
+static void atomisp_css2_hw_store_8(hrt_address addr, uint8_t data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ _hrt_master_port_store_8(addr, data);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static void atomisp_css2_hw_store_16(hrt_address addr, uint16_t data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ _hrt_master_port_store_16(addr, data);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ _hrt_master_port_store_32(addr, data);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static uint8_t atomisp_css2_hw_load_8(hrt_address addr)
+{
+ unsigned long flags;
+ u8 ret;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ ret = _hrt_master_port_load_8(addr);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+ return ret;
+}
+
+static uint16_t atomisp_css2_hw_load_16(hrt_address addr)
+{
+ unsigned long flags;
+ u16 ret;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ ret = _hrt_master_port_load_16(addr);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+ return ret;
+}
+
+static uint32_t atomisp_css2_hw_load_32(hrt_address addr)
+{
+ unsigned long flags;
+ u32 ret;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ ret = _hrt_master_port_load_32(addr);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+ return ret;
+}
+
+static void atomisp_css2_hw_store(hrt_address addr,
+ const void *from, uint32_t n)
+{
+ unsigned long flags;
+ unsigned int i;
+ unsigned int _to = (unsigned int)addr;
+ const char *_from = (const char *)from;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ for (i = 0; i < n; i++, _to++, _from++)
+ _hrt_master_port_store_8(_to, *_from);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n)
+{
+ unsigned long flags;
+ unsigned int i;
+ char *_to = (char *)to;
+ unsigned int _from = (unsigned int)addr;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ for (i = 0; i < n; i++, _to++, _from++)
+ *_to = _hrt_master_port_load_8(_from);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static int atomisp_css2_dbg_print(const char *fmt, va_list args)
+{
+ vprintk(fmt, args);
+ return 0;
+}
+
+static int atomisp_css2_dbg_ftrace_print(const char *fmt, va_list args)
+{
+ ftrace_vprintk(fmt, args);
+ return 0;
+}
+
+static int atomisp_css2_err_print(const char *fmt, va_list args)
+{
+ vprintk(fmt, args);
+ return 0;
+}
+
+void atomisp_store_uint32(hrt_address addr, uint32_t data)
+{
+ atomisp_css2_hw_store_32(addr, data);
+}
+
+void atomisp_load_uint32(hrt_address addr, uint32_t *data)
+{
+ *data = atomisp_css2_hw_load_32(addr);
+}
+
+static int hmm_get_mmu_base_addr(unsigned int *mmu_base_addr)
+{
+ if (!sh_mmu_mrfld.get_pd_base) {
+ dev_err(atomisp_dev, "get mmu base address failed.\n");
+ return -EINVAL;
+ }
+
+ *mmu_base_addr = sh_mmu_mrfld.get_pd_base(&bo_device.mmu,
+ bo_device.mmu.base_address);
+ return 0;
+}
+
+static void atomisp_isp_parameters_clean_up(
+ struct atomisp_css_isp_config *config)
+{
+ /*
+ * Set NULL to configs pointer to avoid they are set into isp again when
+ * some configs are changed and need to be updated later.
+ */
+ memset(config, 0, sizeof(*config));
+}
+
+static void __dump_pipe_config(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ unsigned int pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (stream_env->pipes[pipe_id]) {
+ struct ia_css_pipe_config *p_config;
+ struct ia_css_pipe_extra_config *pe_config;
+
+ p_config = &stream_env->pipe_configs[pipe_id];
+ pe_config = &stream_env->pipe_extra_configs[pipe_id];
+ dev_dbg(isp->dev, "dumping pipe[%d] config:\n", pipe_id);
+ dev_dbg(isp->dev,
+ "pipe_config.pipe_mode:%d.\n", p_config->mode);
+ dev_dbg(isp->dev,
+ "pipe_config.output_info[0] w=%d, h=%d.\n",
+ p_config->output_info[0].res.width,
+ p_config->output_info[0].res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.vf_pp_in_res w=%d, h=%d.\n",
+ p_config->vf_pp_in_res.width,
+ p_config->vf_pp_in_res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.capt_pp_in_res w=%d, h=%d.\n",
+ p_config->capt_pp_in_res.width,
+ p_config->capt_pp_in_res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.output.padded w=%d.\n",
+ p_config->output_info[0].padded_width);
+ dev_dbg(isp->dev,
+ "pipe_config.vf_output_info[0] w=%d, h=%d.\n",
+ p_config->vf_output_info[0].res.width,
+ p_config->vf_output_info[0].res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.bayer_ds_out_res w=%d, h=%d.\n",
+ p_config->bayer_ds_out_res.width,
+ p_config->bayer_ds_out_res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.envelope w=%d, h=%d.\n",
+ p_config->dvs_envelope.width,
+ p_config->dvs_envelope.height);
+ dev_dbg(isp->dev,
+ "pipe_config.dvs_frame_delay=%d.\n",
+ p_config->dvs_frame_delay);
+ dev_dbg(isp->dev,
+ "pipe_config.isp_pipe_version:%d.\n",
+ p_config->isp_pipe_version);
+ dev_dbg(isp->dev,
+ "pipe_config.acc_extension=%p.\n",
+ p_config->acc_extension);
+ dev_dbg(isp->dev,
+ "pipe_config.acc_stages=%p.\n",
+ p_config->acc_stages);
+ dev_dbg(isp->dev,
+ "pipe_config.num_acc_stages=%d.\n",
+ p_config->num_acc_stages);
+ dev_dbg(isp->dev,
+ "pipe_config.acc_num_execs=%d.\n",
+ p_config->acc_num_execs);
+ dev_dbg(isp->dev,
+ "pipe_config.default_capture_config.capture_mode=%d.\n",
+ p_config->default_capture_config.mode);
+ dev_dbg(isp->dev,
+ "pipe_config.enable_dz=%d.\n",
+ p_config->enable_dz);
+ dev_dbg(isp->dev,
+ "pipe_config.default_capture_config.enable_xnr=%d.\n",
+ p_config->default_capture_config.enable_xnr);
+ dev_dbg(isp->dev,
+ "dumping pipe[%d] extra config:\n", pipe_id);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_raw_binning:%d.\n",
+ pe_config->enable_raw_binning);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_yuv_ds:%d.\n",
+ pe_config->enable_yuv_ds);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_high_speed:%d.\n",
+ pe_config->enable_high_speed);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_dvs_6axis:%d.\n",
+ pe_config->enable_dvs_6axis);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_reduced_pipe:%d.\n",
+ pe_config->enable_reduced_pipe);
+ dev_dbg(isp->dev,
+ "pipe_(extra_)config.enable_dz:%d.\n",
+ p_config->enable_dz);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.disable_vf_pp:%d.\n",
+ pe_config->disable_vf_pp);
+ }
+}
+
+static void __dump_stream_config(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_stream_config *s_config;
+ int j;
+ bool valid_stream = false;
+
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ if (stream_env->pipes[j]) {
+ __dump_pipe_config(asd, stream_env, j);
+ valid_stream = true;
+ }
+ }
+ if (!valid_stream)
+ return;
+ s_config = &stream_env->stream_config;
+ dev_dbg(isp->dev, "stream_config.mode=%d.\n", s_config->mode);
+
+ if (s_config->mode == IA_CSS_INPUT_MODE_SENSOR ||
+ s_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ dev_dbg(isp->dev, "stream_config.source.port.port=%d.\n",
+ s_config->source.port.port);
+ dev_dbg(isp->dev, "stream_config.source.port.num_lanes=%d.\n",
+ s_config->source.port.num_lanes);
+ dev_dbg(isp->dev, "stream_config.source.port.timeout=%d.\n",
+ s_config->source.port.timeout);
+ dev_dbg(isp->dev, "stream_config.source.port.rxcount=0x%x.\n",
+ s_config->source.port.rxcount);
+ dev_dbg(isp->dev, "stream_config.source.port.compression.type=%d.\n",
+ s_config->source.port.compression.type);
+ dev_dbg(isp->dev,
+ "stream_config.source.port.compression.compressed_bits_per_pixel=%d.\n",
+ s_config->source.port.compression.
+ compressed_bits_per_pixel);
+ dev_dbg(isp->dev,
+ "stream_config.source.port.compression.uncompressed_bits_per_pixel=%d.\n",
+ s_config->source.port.compression.
+ uncompressed_bits_per_pixel);
+ } else if (s_config->mode == IA_CSS_INPUT_MODE_TPG) {
+ dev_dbg(isp->dev, "stream_config.source.tpg.id=%d.\n",
+ s_config->source.tpg.id);
+ dev_dbg(isp->dev, "stream_config.source.tpg.mode=%d.\n",
+ s_config->source.tpg.mode);
+ dev_dbg(isp->dev, "stream_config.source.tpg.x_mask=%d.\n",
+ s_config->source.tpg.x_mask);
+ dev_dbg(isp->dev, "stream_config.source.tpg.x_delta=%d.\n",
+ s_config->source.tpg.x_delta);
+ dev_dbg(isp->dev, "stream_config.source.tpg.y_mask=%d.\n",
+ s_config->source.tpg.y_mask);
+ dev_dbg(isp->dev, "stream_config.source.tpg.y_delta=%d.\n",
+ s_config->source.tpg.y_delta);
+ dev_dbg(isp->dev, "stream_config.source.tpg.xy_mask=%d.\n",
+ s_config->source.tpg.xy_mask);
+ } else if (s_config->mode == IA_CSS_INPUT_MODE_PRBS) {
+ dev_dbg(isp->dev, "stream_config.source.prbs.id=%d.\n",
+ s_config->source.prbs.id);
+ dev_dbg(isp->dev, "stream_config.source.prbs.h_blank=%d.\n",
+ s_config->source.prbs.h_blank);
+ dev_dbg(isp->dev, "stream_config.source.prbs.v_blank=%d.\n",
+ s_config->source.prbs.v_blank);
+ dev_dbg(isp->dev, "stream_config.source.prbs.seed=%d.\n",
+ s_config->source.prbs.seed);
+ dev_dbg(isp->dev, "stream_config.source.prbs.seed1=%d.\n",
+ s_config->source.prbs.seed1);
+ }
+
+ for (j = 0; j < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; j++) {
+ dev_dbg(isp->dev, "stream_configisys_config[%d].input_res w=%d, h=%d.\n",
+ j,
+ s_config->isys_config[j].input_res.width,
+ s_config->isys_config[j].input_res.height);
+
+ dev_dbg(isp->dev, "stream_configisys_config[%d].linked_isys_stream_id=%d\n",
+ j,
+ s_config->isys_config[j].linked_isys_stream_id);
+
+ dev_dbg(isp->dev, "stream_configisys_config[%d].format=%d\n",
+ j,
+ s_config->isys_config[j].format);
+
+ dev_dbg(isp->dev, "stream_configisys_config[%d].valid=%d.\n",
+ j,
+ s_config->isys_config[j].valid);
+ }
+
+ dev_dbg(isp->dev, "stream_config.input_config.input_res w=%d, h=%d.\n",
+ s_config->input_config.input_res.width,
+ s_config->input_config.input_res.height);
+
+ dev_dbg(isp->dev, "stream_config.input_config.effective_res w=%d, h=%d.\n",
+ s_config->input_config.effective_res.width,
+ s_config->input_config.effective_res.height);
+
+ dev_dbg(isp->dev, "stream_config.input_config.format=%d\n",
+ s_config->input_config.format);
+
+ dev_dbg(isp->dev, "stream_config.input_config.bayer_order=%d.\n",
+ s_config->input_config.bayer_order);
+
+ dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n",
+ s_config->pixels_per_clock);
+ dev_dbg(isp->dev, "stream_config.online=%d.\n", s_config->online);
+ dev_dbg(isp->dev, "stream_config.continuous=%d.\n",
+ s_config->continuous);
+ dev_dbg(isp->dev, "stream_config.disable_cont_viewfinder=%d.\n",
+ s_config->disable_cont_viewfinder);
+ dev_dbg(isp->dev, "stream_config.channel_id=%d.\n",
+ s_config->channel_id);
+ dev_dbg(isp->dev, "stream_config.init_num_cont_raw_buf=%d.\n",
+ s_config->init_num_cont_raw_buf);
+ dev_dbg(isp->dev, "stream_config.target_num_cont_raw_buf=%d.\n",
+ s_config->target_num_cont_raw_buf);
+ dev_dbg(isp->dev, "stream_config.left_padding=%d.\n",
+ s_config->left_padding);
+ dev_dbg(isp->dev, "stream_config.sensor_binning_factor=%d.\n",
+ s_config->sensor_binning_factor);
+ dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n",
+ s_config->pixels_per_clock);
+ dev_dbg(isp->dev, "stream_config.pack_raw_pixels=%d.\n",
+ s_config->pack_raw_pixels);
+ dev_dbg(isp->dev, "stream_config.flash_gpio_pin=%d.\n",
+ s_config->flash_gpio_pin);
+ dev_dbg(isp->dev, "stream_config.mipi_buffer_config.size_mem_words=%d.\n",
+ s_config->mipi_buffer_config.size_mem_words);
+ dev_dbg(isp->dev, "stream_config.mipi_buffer_config.contiguous=%d.\n",
+ s_config->mipi_buffer_config.contiguous);
+ dev_dbg(isp->dev, "stream_config.metadata_config.data_type=%d.\n",
+ s_config->metadata_config.data_type);
+ dev_dbg(isp->dev, "stream_config.metadata_config.resolution w=%d, h=%d.\n",
+ s_config->metadata_config.resolution.width,
+ s_config->metadata_config.resolution.height);
+}
+
+static int __destroy_stream(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env, bool force)
+{
+ struct atomisp_device *isp = asd->isp;
+ int i;
+ unsigned long timeout;
+
+ if (!stream_env->stream)
+ return 0;
+
+ if (!force) {
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ if (stream_env->update_pipe[i])
+ break;
+
+ if (i == IA_CSS_PIPE_ID_NUM)
+ return 0;
+ }
+
+ if (stream_env->stream_state == CSS_STREAM_STARTED
+ && ia_css_stream_stop(stream_env->stream) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "stop stream failed.\n");
+ return -EINVAL;
+ }
+
+ if (stream_env->stream_state == CSS_STREAM_STARTED) {
+ timeout = jiffies + msecs_to_jiffies(40);
+ while (1) {
+ if (ia_css_stream_has_stopped(stream_env->stream))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(isp->dev, "stop stream timeout.\n");
+ break;
+ }
+
+ usleep_range(100, 200);
+ }
+ }
+
+ stream_env->stream_state = CSS_STREAM_STOPPED;
+
+ if (ia_css_stream_destroy(stream_env->stream) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "destroy stream failed.\n");
+ return -EINVAL;
+ }
+ stream_env->stream_state = CSS_STREAM_UNINIT;
+ stream_env->stream = NULL;
+
+ return 0;
+}
+
+static int __destroy_streams(struct atomisp_sub_device *asd, bool force)
+{
+ int ret, i;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ ret = __destroy_stream(asd, &asd->stream_env[i], force);
+ if (ret)
+ return ret;
+ }
+ asd->stream_prepared = false;
+ return 0;
+}
+
+static int __create_stream(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env)
+{
+ int pipe_index = 0, i;
+ struct ia_css_pipe *multi_pipes[IA_CSS_PIPE_ID_NUM];
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (stream_env->pipes[i])
+ multi_pipes[pipe_index++] = stream_env->pipes[i];
+ }
+ if (pipe_index == 0)
+ return 0;
+
+ stream_env->stream_config.target_num_cont_raw_buf =
+ asd->continuous_raw_buffer_size->val;
+ stream_env->stream_config.channel_id = stream_env->ch_id;
+ stream_env->stream_config.ia_css_enable_raw_buffer_locking =
+ asd->enable_raw_buffer_lock->val;
+
+ __dump_stream_config(asd, stream_env);
+ if (ia_css_stream_create(&stream_env->stream_config,
+ pipe_index, multi_pipes, &stream_env->stream) != IA_CSS_SUCCESS)
+ return -EINVAL;
+ if (ia_css_stream_get_info(stream_env->stream,
+ &stream_env->stream_info) != IA_CSS_SUCCESS) {
+ ia_css_stream_destroy(stream_env->stream);
+ stream_env->stream = NULL;
+ return -EINVAL;
+ }
+
+ stream_env->stream_state = CSS_STREAM_CREATED;
+ return 0;
+}
+
+static int __create_streams(struct atomisp_sub_device *asd)
+{
+ int ret, i;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ ret = __create_stream(asd, &asd->stream_env[i]);
+ if (ret)
+ goto rollback;
+ }
+ asd->stream_prepared = true;
+ return 0;
+rollback:
+ for (i--; i >= 0; i--)
+ __destroy_stream(asd, &asd->stream_env[i], true);
+ return ret;
+}
+
+static int __destroy_stream_pipes(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ bool force)
+{
+ struct atomisp_device *isp = asd->isp;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (!stream_env->pipes[i] ||
+ !(force || stream_env->update_pipe[i]))
+ continue;
+ if (ia_css_pipe_destroy(stream_env->pipes[i])
+ != IA_CSS_SUCCESS) {
+ dev_err(isp->dev,
+ "destroy pipe[%d]failed.cannot recover.\n", i);
+ ret = -EINVAL;
+ }
+ stream_env->pipes[i] = NULL;
+ stream_env->update_pipe[i] = false;
+ }
+ return ret;
+}
+
+static int __destroy_pipes(struct atomisp_sub_device *asd, bool force)
+{
+ struct atomisp_device *isp = asd->isp;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ if (asd->stream_env[i].stream) {
+ dev_err(isp->dev,
+ "cannot destroy css pipes for stream[%d].\n",
+ i);
+ continue;
+ }
+
+ ret = __destroy_stream_pipes(asd, &asd->stream_env[i], force);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd)
+{
+ __destroy_streams(asd, true);
+ __destroy_pipes(asd, true);
+}
+
+static void __apply_additional_pipe_config(
+ struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (pipe_id < 0 || pipe_id >= IA_CSS_PIPE_ID_NUM) {
+ dev_err(isp->dev,
+ "wrong pipe_id for additional pipe config.\n");
+ return;
+ }
+
+ /* apply default pipe config */
+ stream_env->pipe_configs[pipe_id].isp_pipe_version = 2;
+ stream_env->pipe_configs[pipe_id].enable_dz =
+ asd->disable_dz->val ? false : true;
+ /* apply isp 2.2 specific config for baytrail*/
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_CAPTURE:
+ /* enable capture pp/dz manually or digital zoom would
+ * fail*/
+ if (stream_env->pipe_configs[pipe_id].
+ default_capture_config.mode == CSS_CAPTURE_MODE_RAW)
+ stream_env->pipe_configs[pipe_id].enable_dz = false;
+
+ if (atomisp_hw_is_isp2401) {
+ /* the isp default to use ISP2.2 and the camera hal will
+ * control whether use isp2.7 */
+ if (asd->select_isp_version->val == ATOMISP_CSS_ISP_PIPE_VERSION_2_7)
+ stream_env->pipe_configs[pipe_id].isp_pipe_version = SH_CSS_ISP_PIPE_VERSION_2_7;
+ else
+ stream_env->pipe_configs[pipe_id].isp_pipe_version = SH_CSS_ISP_PIPE_VERSION_2_2;
+ }
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ /* enable reduced pipe to have binary
+ * video_dz_2_min selected*/
+ stream_env->pipe_extra_configs[pipe_id]
+ .enable_reduced_pipe = true;
+ stream_env->pipe_configs[pipe_id]
+ .enable_dz = false;
+ if (ATOMISP_SOC_CAMERA(asd))
+ stream_env->pipe_configs[pipe_id].enable_dz = true;
+
+ if (asd->params.video_dis_en) {
+ stream_env->pipe_extra_configs[pipe_id]
+ .enable_dvs_6axis = true;
+ stream_env->pipe_configs[pipe_id]
+ .dvs_frame_delay =
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+ }
+ break;
+ case IA_CSS_PIPE_ID_PREVIEW:
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ case IA_CSS_PIPE_ID_COPY:
+ if (ATOMISP_SOC_CAMERA(asd))
+ stream_env->pipe_configs[pipe_id].enable_dz = true;
+ else
+ stream_env->pipe_configs[pipe_id].enable_dz = false;
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ stream_env->pipe_configs[pipe_id].mode = IA_CSS_PIPE_MODE_ACC;
+ stream_env->pipe_configs[pipe_id].enable_dz = false;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id)
+{
+ if (!asd)
+ return false;
+
+ if (pipe_id == CSS_PIPE_ID_ACC || pipe_id == CSS_PIPE_ID_YUVPP)
+ return true;
+
+ if (asd->vfpp) {
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ return true;
+ else
+ return false;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE)
+ return true;
+ else
+ return false;
+ }
+ }
+
+ if (!asd->run_mode)
+ return false;
+
+ if (asd->copy_mode && pipe_id == IA_CSS_PIPE_ID_COPY)
+ return true;
+
+ switch (asd->run_mode->val) {
+ case ATOMISP_RUN_MODE_STILL_CAPTURE:
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE)
+ return true;
+ else
+ return false;
+ case ATOMISP_RUN_MODE_PREVIEW:
+ if (!asd->continuous_mode->val) {
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ return true;
+ else
+ return false;
+ }
+ /* fall through to ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE */
+ case ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE:
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ return true;
+ else
+ return false;
+ case ATOMISP_RUN_MODE_VIDEO:
+ if (!asd->continuous_mode->val) {
+ if (pipe_id == IA_CSS_PIPE_ID_VIDEO ||
+ pipe_id == IA_CSS_PIPE_ID_YUVPP)
+ return true;
+ else
+ return false;
+ }
+ /* fall through to ATOMISP_RUN_MODE_SDV */
+ case ATOMISP_RUN_MODE_SDV:
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ return true;
+ else
+ return false;
+ }
+
+ return false;
+}
+
+static int __create_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_pipe_extra_config extra_config;
+ enum ia_css_err ret;
+
+ if (pipe_id >= IA_CSS_PIPE_ID_NUM)
+ return -EINVAL;
+
+ if (pipe_id != CSS_PIPE_ID_ACC &&
+ !stream_env->pipe_configs[pipe_id].output_info[0].res.width)
+ return 0;
+
+ if (pipe_id == CSS_PIPE_ID_ACC &&
+ !stream_env->pipe_configs[pipe_id].acc_extension)
+ return 0;
+
+ if (!is_pipe_valid_to_current_run_mode(asd, pipe_id))
+ return 0;
+
+ ia_css_pipe_extra_config_defaults(&extra_config);
+
+ __apply_additional_pipe_config(asd, stream_env, pipe_id);
+ if (!memcmp(&extra_config,
+ &stream_env->pipe_extra_configs[pipe_id],
+ sizeof(extra_config)))
+ ret = ia_css_pipe_create(
+ &stream_env->pipe_configs[pipe_id],
+ &stream_env->pipes[pipe_id]);
+ else
+ ret = ia_css_pipe_create_extra(
+ &stream_env->pipe_configs[pipe_id],
+ &stream_env->pipe_extra_configs[pipe_id],
+ &stream_env->pipes[pipe_id]);
+ if (ret != IA_CSS_SUCCESS)
+ dev_err(isp->dev, "create pipe[%d] error.\n", pipe_id);
+ return ret;
+}
+
+static int __create_pipes(struct atomisp_sub_device *asd)
+{
+ enum ia_css_err ret;
+ int i, j;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ ret = __create_pipe(asd, &asd->stream_env[i], j);
+ if (ret != IA_CSS_SUCCESS)
+ break;
+ }
+ if (j < IA_CSS_PIPE_ID_NUM)
+ goto pipe_err;
+ }
+ return 0;
+pipe_err:
+ for (; i >= 0; i--) {
+ for (j--; j >= 0; j--) {
+ if (asd->stream_env[i].pipes[j]) {
+ ia_css_pipe_destroy(asd->stream_env[i].pipes[j]);
+ asd->stream_env[i].pipes[j] = NULL;
+ }
+ }
+ j = IA_CSS_PIPE_ID_NUM;
+ }
+ return -EINVAL;
+}
+
+void atomisp_create_pipes_stream(struct atomisp_sub_device *asd)
+{
+ __create_pipes(asd);
+ __create_streams(asd);
+}
+
+int atomisp_css_update_stream(struct atomisp_sub_device *asd)
+{
+ int ret;
+ struct atomisp_device *isp = asd->isp;
+
+ if (__destroy_streams(asd, true) != IA_CSS_SUCCESS)
+ dev_warn(isp->dev, "destroy stream failed.\n");
+
+ if (__destroy_pipes(asd, true) != IA_CSS_SUCCESS)
+ dev_warn(isp->dev, "destroy pipe failed.\n");
+
+ ret = __create_pipes(asd);
+ if (ret != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "create pipe failed %d.\n", ret);
+ return -EIO;
+ }
+
+ ret = __create_streams(asd);
+ if (ret != IA_CSS_SUCCESS) {
+ dev_warn(isp->dev, "create stream failed %d.\n", ret);
+ __destroy_pipes(asd, true);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int atomisp_css_init(struct atomisp_device *isp)
+{
+ unsigned int mmu_base_addr;
+ int ret;
+ enum ia_css_err err;
+
+ ret = hmm_get_mmu_base_addr(&mmu_base_addr);
+ if (ret)
+ return ret;
+
+ /* Init ISP */
+ err = ia_css_init(isp->dev, &isp->css_env.isp_css_env, NULL,
+ (uint32_t)mmu_base_addr, IA_CSS_IRQ_TYPE_PULSE);
+ if (err != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "css init failed --- bad firmware?\n");
+ return -EINVAL;
+ }
+ ia_css_enable_isys_event_queue(true);
+
+ isp->css_initialized = true;
+ dev_dbg(isp->dev, "sh_css_init success\n");
+
+ return 0;
+}
+
+static inline int __set_css_print_env(struct atomisp_device *isp, int opt)
+{
+ int ret = 0;
+
+ if (opt == 0)
+ isp->css_env.isp_css_env.print_env.debug_print = NULL;
+ else if (opt == 1)
+ isp->css_env.isp_css_env.print_env.debug_print =
+ atomisp_css2_dbg_ftrace_print;
+ else if (opt == 2)
+ isp->css_env.isp_css_env.print_env.debug_print =
+ atomisp_css2_dbg_print;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int atomisp_css_check_firmware_version(struct atomisp_device *isp)
+{
+ if (!sh_css_check_firmware_version(isp->dev, (void *)isp->firmware->data)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int atomisp_css_load_firmware(struct atomisp_device *isp)
+{
+ enum ia_css_err err;
+
+ /* set css env */
+ isp->css_env.isp_css_fw.data = (void *)isp->firmware->data;
+ isp->css_env.isp_css_fw.bytes = isp->firmware->size;
+
+ isp->css_env.isp_css_env.hw_access_env.store_8 =
+ atomisp_css2_hw_store_8;
+ isp->css_env.isp_css_env.hw_access_env.store_16 =
+ atomisp_css2_hw_store_16;
+ isp->css_env.isp_css_env.hw_access_env.store_32 =
+ atomisp_css2_hw_store_32;
+
+ isp->css_env.isp_css_env.hw_access_env.load_8 = atomisp_css2_hw_load_8;
+ isp->css_env.isp_css_env.hw_access_env.load_16 =
+ atomisp_css2_hw_load_16;
+ isp->css_env.isp_css_env.hw_access_env.load_32 =
+ atomisp_css2_hw_load_32;
+
+ isp->css_env.isp_css_env.hw_access_env.load = atomisp_css2_hw_load;
+ isp->css_env.isp_css_env.hw_access_env.store = atomisp_css2_hw_store;
+
+ __set_css_print_env(isp, dbg_func);
+
+ isp->css_env.isp_css_env.print_env.error_print = atomisp_css2_err_print;
+
+ /* load isp fw into ISP memory */
+ err = ia_css_load_firmware(isp->dev, &isp->css_env.isp_css_env,
+ &isp->css_env.isp_css_fw);
+ if (err != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "css load fw failed.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_unload_firmware(struct atomisp_device *isp)
+{
+ ia_css_unload_firmware();
+}
+
+void atomisp_css_uninit(struct atomisp_device *isp)
+{
+ struct atomisp_sub_device *asd;
+ unsigned int i;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ atomisp_isp_parameters_clean_up(&asd->params.config);
+ asd->params.css_update_params_needed = false;
+ }
+
+ isp->css_initialized = false;
+ ia_css_uninit();
+}
+
+void atomisp_css_suspend(struct atomisp_device *isp)
+{
+ isp->css_initialized = false;
+ ia_css_uninit();
+}
+
+int atomisp_css_resume(struct atomisp_device *isp)
+{
+ unsigned int mmu_base_addr;
+ int ret;
+
+ ret = hmm_get_mmu_base_addr(&mmu_base_addr);
+ if (ret) {
+ dev_err(isp->dev, "get base address error.\n");
+ return -EINVAL;
+ }
+
+ ret = ia_css_init(isp->dev, &isp->css_env.isp_css_env, NULL,
+ mmu_base_addr, IA_CSS_IRQ_TYPE_PULSE);
+ if (ret) {
+ dev_err(isp->dev, "re-init css failed.\n");
+ return -EINVAL;
+ }
+ ia_css_enable_isys_event_queue(true);
+
+ isp->css_initialized = true;
+ return 0;
+}
+
+int atomisp_css_irq_translate(struct atomisp_device *isp,
+ unsigned int *infos)
+{
+ int err;
+
+ err = ia_css_irq_translate(infos);
+ if (err != IA_CSS_SUCCESS) {
+ dev_warn(isp->dev,
+ "%s:failed to translate irq (err = %d,infos = %d)\n",
+ __func__, err, *infos);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
+ unsigned int *infos)
+{
+#ifndef ISP2401_NEW_INPUT_SYSTEM
+ ia_css_isys_rx_get_irq_info(port, infos);
+#else
+ *infos = 0;
+#endif
+}
+
+void atomisp_css_rx_clear_irq_info(enum mipi_port_id port,
+ unsigned int infos)
+{
+#ifndef ISP2401_NEW_INPUT_SYSTEM
+ ia_css_isys_rx_clear_irq_info(port, infos);
+#endif
+}
+
+int atomisp_css_irq_enable(struct atomisp_device *isp,
+ enum atomisp_css_irq_info info, bool enable)
+{
+ dev_dbg(isp->dev, "%s: css irq info 0x%08x: %s.\n",
+ __func__, info,
+ enable ? "enable" : "disable");
+ if (ia_css_irq_enable(info, enable) != IA_CSS_SUCCESS) {
+ dev_warn(isp->dev, "%s:Invalid irq info.\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_init_struct(struct atomisp_sub_device *asd)
+{
+ int i, j;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ asd->stream_env[i].stream = NULL;
+ for (j = 0; j < IA_CSS_PIPE_MODE_NUM; j++) {
+ asd->stream_env[i].pipes[j] = NULL;
+ asd->stream_env[i].update_pipe[j] = false;
+ ia_css_pipe_config_defaults(
+ &asd->stream_env[i].pipe_configs[j]);
+ ia_css_pipe_extra_config_defaults(
+ &asd->stream_env[i].pipe_extra_configs[j]);
+ }
+ ia_css_stream_config_defaults(&asd->stream_env[i].stream_config);
+ }
+}
+
+int atomisp_q_video_buffer_to_css(struct atomisp_sub_device *asd,
+ struct videobuf_vmalloc_memory *vm_mem,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_buffer_type css_buf_type,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer css_buf = {0};
+ enum ia_css_err err;
+
+ css_buf.type = css_buf_type;
+ css_buf.data.frame = vm_mem->vaddr;
+
+ err = ia_css_pipe_enqueue_buffer(
+ stream_env->pipes[css_pipe_id], &css_buf);
+ if (err != IA_CSS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atomisp_q_metadata_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_metadata_buf *metadata_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer buffer = {0};
+ struct atomisp_device *isp = asd->isp;
+
+ buffer.type = IA_CSS_BUFFER_TYPE_METADATA;
+ buffer.data.metadata = metadata_buf->metadata;
+ if (ia_css_pipe_enqueue_buffer(stream_env->pipes[css_pipe_id],
+ &buffer)) {
+ dev_err(isp->dev, "failed to q meta data buffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_q_s3a_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_s3a_buf *s3a_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer buffer = {0};
+ struct atomisp_device *isp = asd->isp;
+
+ buffer.type = IA_CSS_BUFFER_TYPE_3A_STATISTICS;
+ buffer.data.stats_3a = s3a_buf->s3a_data;
+ if (ia_css_pipe_enqueue_buffer(
+ stream_env->pipes[css_pipe_id],
+ &buffer)) {
+ dev_dbg(isp->dev, "failed to q s3a stat buffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_dis_buf *dis_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer buffer = {0};
+ struct atomisp_device *isp = asd->isp;
+
+ buffer.type = IA_CSS_BUFFER_TYPE_DIS_STATISTICS;
+ buffer.data.stats_dvs = dis_buf->dis_data;
+ if (ia_css_pipe_enqueue_buffer(
+ stream_env->pipes[css_pipe_id],
+ &buffer)) {
+ dev_dbg(isp->dev, "failed to q dvs stat buffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_mmu_invalidate_cache(void)
+{
+ ia_css_mmu_invalidate_cache();
+}
+
+void atomisp_css_mmu_invalidate_tlb(void)
+{
+ ia_css_mmu_invalidate_cache();
+}
+
+int atomisp_css_start(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id, bool in_reset)
+{
+ struct atomisp_device *isp = asd->isp;
+ bool sp_is_started = false;
+ int ret = 0, i = 0;
+
+ if (in_reset) {
+ if (__destroy_streams(asd, true))
+ dev_warn(isp->dev, "destroy stream failed.\n");
+
+ if (__destroy_pipes(asd, true))
+ dev_warn(isp->dev, "destroy pipe failed.\n");
+
+ if (__create_pipes(asd)) {
+ dev_err(isp->dev, "create pipe error.\n");
+ return -EINVAL;
+ }
+ if (__create_streams(asd)) {
+ dev_err(isp->dev, "create stream error.\n");
+ ret = -EINVAL;
+ goto stream_err;
+ }
+ /* in_reset == true, extension firmwares are reloaded after the recovery */
+ atomisp_acc_load_extensions(asd);
+ }
+
+ /*
+ * For dual steam case, it is possible that:
+ * 1: for this stream, it is at the stage that:
+ * - after set_fmt is called
+ * - before stream on is called
+ * 2: for the other stream, the stream off is called which css reset
+ * has been done.
+ *
+ * Thus the stream created in set_fmt get destroyed and need to be
+ * recreated in the next stream on.
+ */
+ if (asd->stream_prepared == false) {
+ if (__create_pipes(asd)) {
+ dev_err(isp->dev, "create pipe error.\n");
+ return -EINVAL;
+ }
+ if (__create_streams(asd)) {
+ dev_err(isp->dev, "create stream error.\n");
+ ret = -EINVAL;
+ goto stream_err;
+ }
+ }
+ /*
+ * SP can only be started one time
+ * if atomisp_subdev_streaming_count() tell there already has some
+ * subdev at streamming, then SP should already be started previously,
+ * so need to skip start sp procedure
+ */
+ if (atomisp_streaming_count(isp)) {
+ dev_dbg(isp->dev, "skip start sp\n");
+ } else {
+ if (!sh_css_hrt_system_is_idle())
+ dev_err(isp->dev, "CSS HW not idle before starting SP\n");
+ if (ia_css_start_sp() != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "start sp error.\n");
+ ret = -EINVAL;
+ goto start_err;
+ } else {
+ sp_is_started = true;
+ }
+ }
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ if (asd->stream_env[i].stream) {
+ if (ia_css_stream_start(asd->stream_env[i]
+ .stream) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "stream[%d] start error.\n", i);
+ ret = -EINVAL;
+ goto start_err;
+ } else {
+ asd->stream_env[i].stream_state = CSS_STREAM_STARTED;
+ dev_dbg(isp->dev, "stream[%d] started.\n", i);
+ }
+ }
+ }
+
+ return 0;
+
+start_err:
+ __destroy_streams(asd, true);
+stream_err:
+ __destroy_pipes(asd, true);
+
+ /* css 2.0 API limitation: ia_css_stop_sp() could be only called after
+ * destroy all pipes
+ */
+ /*
+ * SP can not be stop if other streams are in use
+ */
+ if ((atomisp_streaming_count(isp) == 0) && sp_is_started)
+ ia_css_stop_sp();
+
+ return ret;
+}
+
+void atomisp_css_update_isp_params(struct atomisp_sub_device *asd)
+{
+ /*
+ * FIXME!
+ * for ISP2401 new input system, this api is under development.
+ * Calling it would cause kernel panic.
+ *
+ * VIED BZ: 1458
+ *
+ * Check if it is Cherry Trail and also new input system
+ */
+ if (asd->copy_mode) {
+ dev_warn(asd->isp->dev,
+ "%s: ia_css_stream_set_isp_config() not supported in copy mode!.\n",
+ __func__);
+ return;
+ }
+
+ ia_css_stream_set_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &asd->params.config);
+ atomisp_isp_parameters_clean_up(&asd->params.config);
+}
+
+void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd,
+ struct ia_css_pipe *pipe)
+{
+ enum ia_css_err ret;
+
+ if (!pipe) {
+ atomisp_css_update_isp_params(asd);
+ return;
+ }
+
+ dev_dbg(asd->isp->dev,
+ "%s: apply parameter for ia_css_frame %p with isp_config_id %d on pipe %p.\n",
+ __func__, asd->params.config.output_frame,
+ asd->params.config.isp_config_id, pipe);
+
+ ret = ia_css_stream_set_isp_config_on_pipe(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &asd->params.config, pipe);
+ if (ret != IA_CSS_SUCCESS)
+ dev_warn(asd->isp->dev, "%s: ia_css_stream_set_isp_config_on_pipe failed %d\n",
+ __func__, ret);
+ atomisp_isp_parameters_clean_up(&asd->params.config);
+}
+
+int atomisp_css_queue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id pipe_id,
+ enum atomisp_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer)
+{
+ if (ia_css_pipe_enqueue_buffer(
+ asd->stream_env[stream_id].pipes[pipe_id],
+ &isp_css_buffer->css_buffer)
+ != IA_CSS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atomisp_css_dequeue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id pipe_id,
+ enum atomisp_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer)
+{
+ struct atomisp_device *isp = asd->isp;
+ enum ia_css_err err;
+
+ err = ia_css_pipe_dequeue_buffer(
+ asd->stream_env[stream_id].pipes[pipe_id],
+ &isp_css_buffer->css_buffer);
+ if (err != IA_CSS_SUCCESS) {
+ dev_err(isp->dev,
+ "ia_css_pipe_dequeue_buffer failed: 0x%x\n", err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_css_allocate_stat_buffers(struct atomisp_sub_device *asd,
+ u16 stream_id,
+ struct atomisp_s3a_buf *s3a_buf,
+ struct atomisp_dis_buf *dis_buf,
+ struct atomisp_metadata_buf *md_buf)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+
+ if (s3a_buf && asd->params.curr_grid_info.s3a_grid.enable) {
+ void *s3a_ptr;
+
+ s3a_buf->s3a_data = ia_css_isp_3a_statistics_allocate(
+ &asd->params.curr_grid_info.s3a_grid);
+ if (!s3a_buf->s3a_data) {
+ dev_err(isp->dev, "3a buf allocation failed.\n");
+ return -EINVAL;
+ }
+
+ s3a_ptr = hmm_vmap(s3a_buf->s3a_data->data_ptr, true);
+ s3a_buf->s3a_map = ia_css_isp_3a_statistics_map_allocate(
+ s3a_buf->s3a_data, s3a_ptr);
+ }
+
+ if (dis_buf && dvs_grid_info && dvs_grid_info->enable) {
+ void *dvs_ptr;
+
+ dis_buf->dis_data = ia_css_isp_dvs2_statistics_allocate(
+ dvs_grid_info);
+ if (!dis_buf->dis_data) {
+ dev_err(isp->dev, "dvs buf allocation failed.\n");
+ if (s3a_buf)
+ ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
+ return -EINVAL;
+ }
+
+ dvs_ptr = hmm_vmap(dis_buf->dis_data->data_ptr, true);
+ dis_buf->dvs_map = ia_css_isp_dvs_statistics_map_allocate(
+ dis_buf->dis_data, dvs_ptr);
+ }
+
+ if (asd->stream_env[stream_id].stream_info.
+ metadata_info.size && md_buf) {
+ md_buf->metadata = ia_css_metadata_allocate(
+ &asd->stream_env[stream_id].stream_info.metadata_info);
+ if (!md_buf->metadata) {
+ if (s3a_buf)
+ ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
+ if (dis_buf)
+ ia_css_isp_dvs2_statistics_free(dis_buf->dis_data);
+ dev_err(isp->dev, "metadata buf allocation failed.\n");
+ return -EINVAL;
+ }
+ md_buf->md_vptr = hmm_vmap(md_buf->metadata->address, false);
+ }
+
+ return 0;
+}
+
+void atomisp_css_free_3a_buffer(struct atomisp_s3a_buf *s3a_buf)
+{
+ if (s3a_buf->s3a_data)
+ hmm_vunmap(s3a_buf->s3a_data->data_ptr);
+
+ ia_css_isp_3a_statistics_map_free(s3a_buf->s3a_map);
+ s3a_buf->s3a_map = NULL;
+ ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
+}
+
+void atomisp_css_free_dis_buffer(struct atomisp_dis_buf *dis_buf)
+{
+ if (dis_buf->dis_data)
+ hmm_vunmap(dis_buf->dis_data->data_ptr);
+
+ ia_css_isp_dvs_statistics_map_free(dis_buf->dvs_map);
+ dis_buf->dvs_map = NULL;
+ ia_css_isp_dvs2_statistics_free(dis_buf->dis_data);
+}
+
+void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf *metadata_buf)
+{
+ if (metadata_buf->md_vptr) {
+ hmm_vunmap(metadata_buf->metadata->address);
+ metadata_buf->md_vptr = NULL;
+ }
+ ia_css_metadata_free(metadata_buf->metadata);
+}
+
+void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd)
+{
+ struct atomisp_s3a_buf *s3a_buf, *_s3a_buf;
+ struct atomisp_dis_buf *dis_buf, *_dis_buf;
+ struct atomisp_metadata_buf *md_buf, *_md_buf;
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ unsigned int i;
+
+ /* 3A statistics use vmalloc, DIS use kmalloc */
+ if (dvs_grid_info && dvs_grid_info->enable) {
+ ia_css_dvs2_coefficients_free(asd->params.css_param.dvs2_coeff);
+ ia_css_dvs2_statistics_free(asd->params.dvs_stat);
+ asd->params.css_param.dvs2_coeff = NULL;
+ asd->params.dvs_stat = NULL;
+ asd->params.dvs_hor_proj_bytes = 0;
+ asd->params.dvs_ver_proj_bytes = 0;
+ asd->params.dvs_hor_coef_bytes = 0;
+ asd->params.dvs_ver_coef_bytes = 0;
+ asd->params.dis_proj_data_valid = false;
+ list_for_each_entry_safe(dis_buf, _dis_buf,
+ &asd->dis_stats, list) {
+ atomisp_css_free_dis_buffer(dis_buf);
+ list_del(&dis_buf->list);
+ kfree(dis_buf);
+ }
+ list_for_each_entry_safe(dis_buf, _dis_buf,
+ &asd->dis_stats_in_css, list) {
+ atomisp_css_free_dis_buffer(dis_buf);
+ list_del(&dis_buf->list);
+ kfree(dis_buf);
+ }
+ }
+ if (asd->params.curr_grid_info.s3a_grid.enable) {
+ ia_css_3a_statistics_free(asd->params.s3a_user_stat);
+ asd->params.s3a_user_stat = NULL;
+ asd->params.s3a_output_bytes = 0;
+ list_for_each_entry_safe(s3a_buf, _s3a_buf,
+ &asd->s3a_stats, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+ list_for_each_entry_safe(s3a_buf, _s3a_buf,
+ &asd->s3a_stats_in_css, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+ list_for_each_entry_safe(s3a_buf, _s3a_buf,
+ &asd->s3a_stats_ready, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+ }
+
+ if (asd->params.css_param.dvs_6axis) {
+ ia_css_dvs2_6axis_config_free(asd->params.css_param.dvs_6axis);
+ asd->params.css_param.dvs_6axis = NULL;
+ }
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_for_each_entry_safe(md_buf, _md_buf,
+ &asd->metadata[i], list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ list_for_each_entry_safe(md_buf, _md_buf,
+ &asd->metadata_in_css[i], list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ list_for_each_entry_safe(md_buf, _md_buf,
+ &asd->metadata_ready[i], list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ }
+ asd->params.metadata_width_size = 0;
+ atomisp_free_metadata_output_buf(asd);
+}
+
+int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id,
+ int source_pad)
+{
+ struct ia_css_pipe_info p_info;
+ struct ia_css_grid_info old_info;
+ struct atomisp_device *isp = asd->isp;
+ int stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+ int md_width = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_config.metadata_config.resolution.width;
+
+ memset(&p_info, 0, sizeof(struct ia_css_pipe_info));
+ memset(&old_info, 0, sizeof(struct ia_css_grid_info));
+
+ if (ia_css_pipe_get_info(
+ asd->stream_env[stream_index].pipes[pipe_id],
+ &p_info) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "ia_css_pipe_get_info failed\n");
+ return -EINVAL;
+ }
+
+ memcpy(&old_info, &asd->params.curr_grid_info,
+ sizeof(struct ia_css_grid_info));
+ memcpy(&asd->params.curr_grid_info, &p_info.grid_info,
+ sizeof(struct ia_css_grid_info));
+ /*
+ * Record which css pipe enables s3a_grid.
+ * Currently would have one css pipe that need it
+ */
+ if (asd->params.curr_grid_info.s3a_grid.enable) {
+ if (asd->params.s3a_enabled_pipe != CSS_PIPE_ID_NUM)
+ dev_dbg(isp->dev, "css pipe %d enabled s3a grid replaced by: %d.\n",
+ asd->params.s3a_enabled_pipe, pipe_id);
+ asd->params.s3a_enabled_pipe = pipe_id;
+ }
+
+ /* If the grid info has not changed and the buffers for 3A and
+ * DIS statistics buffers are allocated or buffer size would be zero
+ * then no need to do anything. */
+ if (((!memcmp(&old_info, &asd->params.curr_grid_info, sizeof(old_info))
+ && asd->params.s3a_user_stat && asd->params.dvs_stat)
+ || asd->params.curr_grid_info.s3a_grid.width == 0
+ || asd->params.curr_grid_info.s3a_grid.height == 0)
+ && asd->params.metadata_width_size == md_width) {
+ dev_dbg(isp->dev,
+ "grid info change escape. memcmp=%d, s3a_user_stat=%d,dvs_stat=%d, s3a.width=%d, s3a.height=%d, metadata width =%d\n",
+ !memcmp(&old_info, &asd->params.curr_grid_info,
+ sizeof(old_info)),
+ !!asd->params.s3a_user_stat, !!asd->params.dvs_stat,
+ asd->params.curr_grid_info.s3a_grid.width,
+ asd->params.curr_grid_info.s3a_grid.height,
+ asd->params.metadata_width_size);
+ return -EINVAL;
+ }
+ asd->params.metadata_width_size = md_width;
+
+ return 0;
+}
+
+int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd)
+{
+ if (!asd->params.curr_grid_info.s3a_grid.width ||
+ !asd->params.curr_grid_info.s3a_grid.height)
+ return 0;
+
+ asd->params.s3a_user_stat = ia_css_3a_statistics_allocate(
+ &asd->params.curr_grid_info.s3a_grid);
+ if (!asd->params.s3a_user_stat)
+ return -ENOMEM;
+ /* 3A statistics. These can be big, so we use vmalloc. */
+ asd->params.s3a_output_bytes =
+ asd->params.curr_grid_info.s3a_grid.width *
+ asd->params.curr_grid_info.s3a_grid.height *
+ sizeof(*asd->params.s3a_user_stat->data);
+
+ return 0;
+}
+
+int atomisp_alloc_dis_coef_buf(struct atomisp_sub_device *asd)
+{
+ struct atomisp_css_dvs_grid_info *dvs_grid =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+
+ if (!dvs_grid)
+ return 0;
+
+ if (!dvs_grid->enable) {
+ dev_dbg(asd->isp->dev, "%s: dvs_grid not enabled.\n", __func__);
+ return 0;
+ }
+
+ /* DIS coefficients. */
+ asd->params.css_param.dvs2_coeff = ia_css_dvs2_coefficients_allocate(
+ dvs_grid);
+ if (!asd->params.css_param.dvs2_coeff)
+ return -ENOMEM;
+
+ asd->params.dvs_hor_coef_bytes = dvs_grid->num_hor_coefs *
+ sizeof(*asd->params.css_param.dvs2_coeff->hor_coefs.odd_real);
+
+ asd->params.dvs_ver_coef_bytes = dvs_grid->num_ver_coefs *
+ sizeof(*asd->params.css_param.dvs2_coeff->ver_coefs.odd_real);
+
+ /* DIS projections. */
+ asd->params.dis_proj_data_valid = false;
+ asd->params.dvs_stat = ia_css_dvs2_statistics_allocate(dvs_grid);
+ if (!asd->params.dvs_stat)
+ return -ENOMEM;
+
+ asd->params.dvs_hor_proj_bytes =
+ dvs_grid->aligned_height * dvs_grid->aligned_width *
+ sizeof(*asd->params.dvs_stat->hor_prod.odd_real);
+
+ asd->params.dvs_ver_proj_bytes =
+ dvs_grid->aligned_height * dvs_grid->aligned_width *
+ sizeof(*asd->params.dvs_stat->ver_prod.odd_real);
+
+ return 0;
+}
+
+int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd)
+{
+ int i;
+
+ /* We allocate the cpu-side buffer used for communication with user
+ * space */
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ asd->params.metadata_user[i] = kvmalloc(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_info.metadata_info.size, GFP_KERNEL);
+ if (!asd->params.metadata_user[i]) {
+ while (--i >= 0) {
+ kvfree(asd->params.metadata_user[i]);
+ asd->params.metadata_user[i] = NULL;
+ }
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ if (asd->params.metadata_user[i]) {
+ kvfree(asd->params.metadata_user[i]);
+ asd->params.metadata_user[i] = NULL;
+ }
+ }
+}
+
+void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd,
+ struct atomisp_css_buffer *isp_css_buffer,
+ struct ia_css_isp_dvs_statistics_map *dvs_map)
+{
+ if (asd->params.dvs_stat) {
+ if (dvs_map)
+ ia_css_translate_dvs2_statistics(
+ asd->params.dvs_stat, dvs_map);
+ else
+ ia_css_get_dvs2_statistics(asd->params.dvs_stat,
+ isp_css_buffer->css_buffer.data.stats_dvs);
+ }
+}
+
+int atomisp_css_dequeue_event(struct atomisp_css_event *current_event)
+{
+ if (ia_css_dequeue_event(&current_event->event) != IA_CSS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
+ struct atomisp_css_event *current_event)
+{
+ /*
+ * FIXME!
+ * Pipe ID reported in CSS event is not correct for new system's
+ * copy pipe.
+ * VIED BZ: 1463
+ */
+ ia_css_temp_pipe_to_pipe_id(current_event->event.pipe,
+ &current_event->pipe);
+ if (asd && asd->copy_mode &&
+ current_event->pipe == IA_CSS_PIPE_ID_CAPTURE)
+ current_event->pipe = IA_CSS_PIPE_ID_COPY;
+}
+
+int atomisp_css_isys_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ if (isys_stream >= IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH)
+ return -EINVAL;
+
+ s_config->isys_config[isys_stream].input_res.width = ffmt->width;
+ s_config->isys_config[isys_stream].input_res.height = ffmt->height;
+ return 0;
+}
+
+int atomisp_css_input_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->input_config.input_res.width = ffmt->width;
+ s_config->input_config.input_res.height = ffmt->height;
+ return 0;
+}
+
+void atomisp_css_input_set_binning_factor(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int bin_factor)
+{
+ asd->stream_env[stream_id]
+ .stream_config.sensor_binning_factor = bin_factor;
+}
+
+void atomisp_css_input_set_bayer_order(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_bayer_order bayer_order)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+ s_config->input_config.bayer_order = bayer_order;
+}
+
+void atomisp_css_isys_set_link(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ int link,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[isys_stream].linked_isys_stream_id = link;
+}
+
+void atomisp_css_isys_set_valid(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ bool valid,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[isys_stream].valid = valid;
+}
+
+void atomisp_css_isys_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format format,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[isys_stream].format = format;
+}
+
+void atomisp_css_input_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format format)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->input_config.format = format;
+}
+
+int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ int i;
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+ /*
+ * Set all isys configs to not valid.
+ * Currently we support only one stream per channel
+ */
+ for (i = IA_CSS_STREAM_ISYS_STREAM_0;
+ i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++)
+ s_config->isys_config[i].valid = false;
+
+ atomisp_css_isys_set_resolution(asd, stream_id, ffmt,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ atomisp_css_isys_set_format(asd, stream_id,
+ s_config->input_config.format,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ atomisp_css_isys_set_link(asd, stream_id, NO_LINK,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ atomisp_css_isys_set_valid(asd, stream_id, true,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+
+ return 0;
+}
+
+int atomisp_css_isys_two_stream_cfg(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.width =
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.width;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.height =
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.height / 2;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].linked_isys_stream_id
+ = IA_CSS_STREAM_ISYS_STREAM_0;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].format =
+ ATOMISP_INPUT_FORMAT_USER_DEF1;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].format =
+ ATOMISP_INPUT_FORMAT_USER_DEF2;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].valid = true;
+ return 0;
+}
+
+void atomisp_css_isys_two_stream_cfg_update_stream1(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format,
+ unsigned int width, unsigned int height)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.width =
+ width;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.height =
+ height;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].format =
+ input_format;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].valid = true;
+}
+
+void atomisp_css_isys_two_stream_cfg_update_stream2(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format,
+ unsigned int width, unsigned int height)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.width =
+ width;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.height =
+ height;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].linked_isys_stream_id
+ = IA_CSS_STREAM_ISYS_STREAM_0;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].format =
+ input_format;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].valid = true;
+}
+
+int atomisp_css_input_set_effective_resolution(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int width, unsigned int height)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+ s_config->input_config.effective_res.width = width;
+ s_config->input_config.effective_res.height = height;
+ return 0;
+}
+
+void atomisp_css_video_set_dis_envelope(struct atomisp_sub_device *asd,
+ unsigned int dvs_w, unsigned int dvs_h)
+{
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.width = dvs_w;
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.height = dvs_h;
+}
+
+void atomisp_css_input_set_two_pixels_per_clock(
+ struct atomisp_sub_device *asd,
+ bool two_ppc)
+{
+ int i;
+
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.pixels_per_clock == (two_ppc ? 2 : 1))
+ return;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.pixels_per_clock = (two_ppc ? 2 : 1);
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[i] = true;
+}
+
+void atomisp_css_enable_raw_binning(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ unsigned int pipe;
+
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ pipe = IA_CSS_PIPE_ID_VIDEO;
+ else
+ pipe = IA_CSS_PIPE_ID_PREVIEW;
+
+ stream_env->pipe_extra_configs[pipe].enable_raw_binning = enable;
+ stream_env->update_pipe[pipe] = true;
+ if (enable)
+ stream_env->pipe_configs[pipe].output_info[0].padded_width =
+ stream_env->stream_config.input_config.effective_res.width;
+}
+
+void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable)
+{
+ int i;
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[i].enable_dz = enable;
+}
+
+void atomisp_css_capture_set_mode(struct atomisp_sub_device *asd,
+ enum atomisp_css_capture_mode mode)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+
+ if (stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE]
+ .default_capture_config.mode == mode)
+ return;
+
+ stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
+ default_capture_config.mode = mode;
+ stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
+}
+
+void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
+ enum atomisp_css_input_mode mode)
+{
+ int i;
+ struct atomisp_device *isp = asd->isp;
+ unsigned int size_mem_words;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
+ asd->stream_env[i].stream_config.mode = mode;
+
+ if (isp->inputs[asd->input_curr].type == TEST_PATTERN) {
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_config;
+ s_config->mode = IA_CSS_INPUT_MODE_TPG;
+ s_config->source.tpg.mode = IA_CSS_TPG_MODE_CHECKERBOARD;
+ s_config->source.tpg.x_mask = (1 << 4) - 1;
+ s_config->source.tpg.x_delta = -2;
+ s_config->source.tpg.y_mask = (1 << 4) - 1;
+ s_config->source.tpg.y_delta = 3;
+ s_config->source.tpg.xy_mask = (1 << 8) - 1;
+ return;
+ }
+
+ if (mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ return;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ /*
+ * TODO: sensor needs to export the embedded_data_size_words
+ * information to atomisp for each setting.
+ * Here using a large safe value.
+ */
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[i].stream_config;
+
+ if (s_config->input_config.input_res.width == 0)
+ continue;
+
+ if (ia_css_mipi_frame_calculate_size(
+ s_config->input_config.input_res.width,
+ s_config->input_config.input_res.height,
+ s_config->input_config.format,
+ true,
+ 0x13000,
+ &size_mem_words) != IA_CSS_SUCCESS) {
+ if (intel_mid_identify_cpu() ==
+ INTEL_MID_CPU_CHIP_TANGIER)
+ size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_2;
+ else
+ size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_1;
+ dev_warn(asd->isp->dev,
+ "ia_css_mipi_frame_calculate_size failed,applying pre-defined MIPI buffer size %u.\n",
+ size_mem_words);
+ }
+ s_config->mipi_buffer_config.size_mem_words = size_mem_words;
+ s_config->mipi_buffer_config.nof_mipi_buffers = 2;
+ }
+}
+
+void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+
+ if (stream_env->stream_config.online == !!enable)
+ return;
+
+ stream_env->stream_config.online = !!enable;
+ stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
+}
+
+void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ int i;
+
+ if (stream_env->stream_config.online != !!enable) {
+ stream_env->stream_config.online = !!enable;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ stream_env->update_pipe[i] = true;
+ }
+}
+
+void atomisp_css_video_enable_online(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_VIDEO];
+ int i;
+
+ if (stream_env->stream_config.online != enable) {
+ stream_env->stream_config.online = enable;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ stream_env->update_pipe[i] = true;
+ }
+}
+
+void atomisp_css_enable_continuous(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ int i;
+
+ /*
+ * To SOC camera, there is only one YUVPP pipe in any case
+ * including ZSL/SDV/continuous viewfinder, so always set
+ * stream_config.continuous to 0.
+ */
+ if (ATOMISP_USE_YUVPP(asd)) {
+ stream_env->stream_config.continuous = 0;
+ stream_env->stream_config.online = 1;
+ return;
+ }
+
+ if (stream_env->stream_config.continuous != !!enable) {
+ stream_env->stream_config.continuous = !!enable;
+ stream_env->stream_config.pack_raw_pixels = true;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ stream_env->update_pipe[i] = true;
+ }
+}
+
+void atomisp_css_enable_cvf(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ int i;
+
+ if (stream_env->stream_config.disable_cont_viewfinder != !enable) {
+ stream_env->stream_config.disable_cont_viewfinder = !enable;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ stream_env->update_pipe[i] = true;
+ }
+}
+
+int atomisp_css_input_configure_port(
+ struct atomisp_sub_device *asd,
+ enum mipi_port_id port,
+ unsigned int num_lanes,
+ unsigned int timeout,
+ unsigned int mipi_freq,
+ enum atomisp_input_format metadata_format,
+ unsigned int metadata_width,
+ unsigned int metadata_height)
+{
+ int i;
+ struct atomisp_stream_env *stream_env;
+ /*
+ * Calculate rx_count as follows:
+ * Input: mipi_freq : CSI-2 bus frequency in Hz
+ * UI = 1 / (2 * mipi_freq) : period of one bit on the bus
+ * min = 85e-9 + 6 * UI : Limits for rx_count in seconds
+ * max = 145e-9 + 10 * UI
+ * rxcount0 = min / (4 / mipi_freq) : convert seconds to byte clocks
+ * rxcount = rxcount0 - 2 : adjust for better results
+ * The formula below is simplified version of the above with
+ * 10-bit fixed points for improved accuracy.
+ */
+ const unsigned int rxcount =
+ min(((mipi_freq / 46000) - 1280) >> 10, 0xffU) * 0x01010101U;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ stream_env = &asd->stream_env[i];
+ stream_env->stream_config.source.port.port = port;
+ stream_env->stream_config.source.port.num_lanes = num_lanes;
+ stream_env->stream_config.source.port.timeout = timeout;
+ if (mipi_freq)
+ stream_env->stream_config.source.port.rxcount = rxcount;
+ stream_env->stream_config.
+ metadata_config.data_type = metadata_format;
+ stream_env->stream_config.
+ metadata_config.resolution.width = metadata_width;
+ stream_env->stream_config.
+ metadata_config.resolution.height = metadata_height;
+ }
+
+ return 0;
+}
+
+int atomisp_css_frame_allocate(struct atomisp_css_frame **frame,
+ unsigned int width, unsigned int height,
+ enum atomisp_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth)
+{
+ if (ia_css_frame_allocate(frame, width, height, format,
+ padded_width, raw_bit_depth) != IA_CSS_SUCCESS)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int atomisp_css_frame_allocate_from_info(struct atomisp_css_frame **frame,
+ const struct atomisp_css_frame_info *info)
+{
+ if (ia_css_frame_allocate_from_info(frame, info) != IA_CSS_SUCCESS)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void atomisp_css_frame_free(struct atomisp_css_frame *frame)
+{
+ ia_css_frame_free(frame);
+}
+
+int atomisp_css_frame_map(struct atomisp_css_frame **frame,
+ const struct atomisp_css_frame_info *info,
+ const void __user *data, uint16_t attribute,
+ void *context)
+{
+ if (ia_css_frame_map(frame, info, data, attribute, context)
+ != IA_CSS_SUCCESS)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int atomisp_css_set_black_frame(struct atomisp_sub_device *asd,
+ const struct atomisp_css_frame *raw_black_frame)
+{
+ if (sh_css_set_black_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ raw_black_frame) != IA_CSS_SUCCESS)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int atomisp_css_allocate_continuous_frames(bool init_time,
+ struct atomisp_sub_device *asd)
+{
+ if (ia_css_alloc_continuous_frame_remain(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream)
+ != IA_CSS_SUCCESS)
+ return -EINVAL;
+ return 0;
+}
+
+void atomisp_css_update_continuous_frames(struct atomisp_sub_device *asd)
+{
+ ia_css_update_continuous_frames(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream);
+}
+
+int atomisp_css_stop(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id, bool in_reset)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_s3a_buf *s3a_buf;
+ struct atomisp_dis_buf *dis_buf;
+ struct atomisp_metadata_buf *md_buf;
+ unsigned long irqflags;
+ unsigned int i;
+
+ /* if is called in atomisp_reset(), force destroy stream */
+ if (__destroy_streams(asd, true))
+ dev_err(isp->dev, "destroy stream failed.\n");
+
+ /* if is called in atomisp_reset(), force destroy all pipes */
+ if (__destroy_pipes(asd, true))
+ dev_err(isp->dev, "destroy pipes failed.\n");
+
+ atomisp_init_raw_buffer_bitmap(asd);
+
+ /*
+ * SP can not be stop if other streams are in use
+ */
+ if (atomisp_streaming_count(isp) == 0)
+ ia_css_stop_sp();
+
+ if (!in_reset) {
+ struct atomisp_stream_env *stream_env;
+ int i, j;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ stream_env = &asd->stream_env[i];
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ ia_css_pipe_config_defaults(
+ &stream_env->pipe_configs[j]);
+ ia_css_pipe_extra_config_defaults(
+ &stream_env->pipe_extra_configs[j]);
+ }
+ ia_css_stream_config_defaults(
+ &stream_env->stream_config);
+ }
+ atomisp_isp_parameters_clean_up(&asd->params.config);
+ asd->params.css_update_params_needed = false;
+ }
+
+ /* move stats buffers to free queue list */
+ while (!list_empty(&asd->s3a_stats_in_css)) {
+ s3a_buf = list_entry(asd->s3a_stats_in_css.next,
+ struct atomisp_s3a_buf, list);
+ list_del(&s3a_buf->list);
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ }
+ while (!list_empty(&asd->s3a_stats_ready)) {
+ s3a_buf = list_entry(asd->s3a_stats_ready.next,
+ struct atomisp_s3a_buf, list);
+ list_del(&s3a_buf->list);
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ }
+
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ while (!list_empty(&asd->dis_stats_in_css)) {
+ dis_buf = list_entry(asd->dis_stats_in_css.next,
+ struct atomisp_dis_buf, list);
+ list_del(&dis_buf->list);
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ }
+ asd->params.dis_proj_data_valid = false;
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ while (!list_empty(&asd->metadata_in_css[i])) {
+ md_buf = list_entry(asd->metadata_in_css[i].next,
+ struct atomisp_metadata_buf, list);
+ list_del(&md_buf->list);
+ list_add_tail(&md_buf->list, &asd->metadata[i]);
+ }
+ while (!list_empty(&asd->metadata_ready[i])) {
+ md_buf = list_entry(asd->metadata_ready[i].next,
+ struct atomisp_metadata_buf, list);
+ list_del(&md_buf->list);
+ list_add_tail(&md_buf->list, &asd->metadata[i]);
+ }
+ }
+
+ atomisp_flush_params_queue(&asd->video_out_capture);
+ atomisp_flush_params_queue(&asd->video_out_vf);
+ atomisp_flush_params_queue(&asd->video_out_preview);
+ atomisp_flush_params_queue(&asd->video_out_video_capture);
+ atomisp_free_css_parameters(&asd->params.css_param);
+ memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
+ return 0;
+}
+
+int atomisp_css_continuous_set_num_raw_frames(
+ struct atomisp_sub_device *asd,
+ int num_frames)
+{
+ if (asd->enable_raw_buffer_lock->val) {
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN;
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ asd->params.video_dis_en)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf +=
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+ } else {
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES;
+ }
+
+ if (asd->params.video_dis_en)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf +=
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.target_num_cont_raw_buf = num_frames;
+ return 0;
+}
+
+void atomisp_css_disable_vf_pp(struct atomisp_sub_device *asd,
+ bool disable)
+{
+ int i;
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_extra_configs[i].disable_vf_pp = !!disable;
+}
+
+static enum ia_css_pipe_mode __pipe_id_to_pipe_mode(
+ struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct camera_mipi_info *mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_COPY:
+ /* Currently only YUVPP mode supports YUV420_Legacy format.
+ * Revert this when other pipe modes can support
+ * YUV420_Legacy format.
+ */
+ if (mipi_info && mipi_info->input_format ==
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY)
+ return IA_CSS_PIPE_MODE_YUVPP;
+ return IA_CSS_PIPE_MODE_COPY;
+ case IA_CSS_PIPE_ID_PREVIEW:
+ return IA_CSS_PIPE_MODE_PREVIEW;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ return IA_CSS_PIPE_MODE_CAPTURE;
+ case IA_CSS_PIPE_ID_VIDEO:
+ return IA_CSS_PIPE_MODE_VIDEO;
+ case IA_CSS_PIPE_ID_ACC:
+ return IA_CSS_PIPE_MODE_ACC;
+ case IA_CSS_PIPE_ID_YUVPP:
+ return IA_CSS_PIPE_MODE_YUVPP;
+ default:
+ WARN_ON(1);
+ return IA_CSS_PIPE_MODE_PREVIEW;
+ }
+}
+
+static void __configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ struct ia_css_stream_config *s_config = &stream_env->stream_config;
+
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ stream_env->pipe_configs[pipe_id].output_info[0].res.width = width;
+ stream_env->pipe_configs[pipe_id].output_info[0].res.height = height;
+ stream_env->pipe_configs[pipe_id].output_info[0].format = format;
+ stream_env->pipe_configs[pipe_id].output_info[0].padded_width = min_width;
+
+ /* isp binary 2.2 specific setting*/
+ if (width > s_config->input_config.effective_res.width ||
+ height > s_config->input_config.effective_res.height) {
+ s_config->input_config.effective_res.width = width;
+ s_config->input_config.effective_res.height = height;
+ }
+
+ dev_dbg(isp->dev, "configuring pipe[%d] output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+static void __configure_video_preview_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ struct ia_css_frame_info *css_output_info;
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ /*
+ * second_output will be as video main output in SDV mode
+ * with SOC camera. output will be as video main output in
+ * normal video mode.
+ */
+ if (asd->continuous_mode->val)
+ css_output_info = &stream_env->pipe_configs[pipe_id].
+ output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ css_output_info = &stream_env->pipe_configs[pipe_id].
+ output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+
+ css_output_info->res.width = width;
+ css_output_info->res.height = height;
+ css_output_info->format = format;
+ css_output_info->padded_width = min_width;
+
+ /* isp binary 2.2 specific setting*/
+ if (width > stream_config->input_config.effective_res.width ||
+ height > stream_config->input_config.effective_res.height) {
+ stream_config->input_config.effective_res.width = width;
+ stream_config->input_config.effective_res.height = height;
+ }
+
+ dev_dbg(isp->dev, "configuring pipe[%d] output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+/*
+ * For CSS2.1, capture pipe uses capture_pp_in_res to configure yuv
+ * downscaling input resolution.
+ */
+static void __configure_capture_pp_input(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+ struct ia_css_pipe_config *pipe_configs =
+ &stream_env->pipe_configs[pipe_id];
+ struct ia_css_pipe_extra_config *pipe_extra_configs =
+ &stream_env->pipe_extra_configs[pipe_id];
+ unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
+
+ if (width == 0 && height == 0)
+ return;
+
+ if (width * 9 / 10 < pipe_configs->output_info[0].res.width ||
+ height * 9 / 10 < pipe_configs->output_info[0].res.height)
+ return;
+ /* here just copy the calculation in css */
+ hor_ds_factor = CEIL_DIV(width >> 1,
+ pipe_configs->output_info[0].res.width);
+ ver_ds_factor = CEIL_DIV(height >> 1,
+ pipe_configs->output_info[0].res.height);
+
+ if ((asd->isp->media_dev.hw_revision <
+ (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) ||
+ IS_CHT) && hor_ds_factor != ver_ds_factor) {
+ dev_warn(asd->isp->dev,
+ "Cropping for capture due to FW limitation");
+ return;
+ }
+
+ pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ pipe_extra_configs->enable_yuv_ds = true;
+
+ pipe_configs->capt_pp_in_res.width =
+ stream_config->input_config.effective_res.width;
+ pipe_configs->capt_pp_in_res.height =
+ stream_config->input_config.effective_res.height;
+
+ dev_dbg(isp->dev, "configuring pipe[%d]capture pp input w=%d.h=%d.\n",
+ pipe_id, width, height);
+}
+
+/*
+ * For CSS2.1, preview pipe could support bayer downscaling, yuv decimation and
+ * yuv downscaling, which needs addtional configurations.
+ */
+static void __configure_preview_pp_input(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int out_width, out_height, yuv_ds_in_width, yuv_ds_in_height;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+ struct ia_css_pipe_config *pipe_configs =
+ &stream_env->pipe_configs[pipe_id];
+ struct ia_css_pipe_extra_config *pipe_extra_configs =
+ &stream_env->pipe_extra_configs[pipe_id];
+ struct ia_css_resolution *bayer_ds_out_res =
+ &pipe_configs->bayer_ds_out_res;
+ struct ia_css_resolution *vf_pp_in_res =
+ &pipe_configs->vf_pp_in_res;
+ struct ia_css_resolution *effective_res =
+ &stream_config->input_config.effective_res;
+
+ const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} };
+ /*
+ * BZ201033: YUV decimation factor of 4 causes couple of rightmost
+ * columns to be shaded. Remove this factor to work around the CSS bug.
+ * const unsigned int yuv_dec_fct[] = {4, 2};
+ */
+ const unsigned int yuv_dec_fct[] = { 2 };
+ unsigned int i;
+
+ if (width == 0 && height == 0)
+ return;
+
+ pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ out_width = pipe_configs->output_info[0].res.width;
+ out_height = pipe_configs->output_info[0].res.height;
+
+ /*
+ * The ISP could do bayer downscaling, yuv decimation and yuv
+ * downscaling:
+ * 1: Bayer Downscaling: between effective resolution and
+ * bayer_ds_res_out;
+ * 2: YUV Decimation: between bayer_ds_res_out and vf_pp_in_res;
+ * 3: YUV Downscaling: between vf_pp_in_res and final vf output
+ *
+ * Rule for Bayer Downscaling: support factor 2, 1.5 and 1.25
+ * Rule for YUV Decimation: support factor 2, 4
+ * Rule for YUV Downscaling: arbitrary value below 2
+ *
+ * General rule of factor distribution among these stages:
+ * 1: try to do Bayer downscaling first if not in online mode.
+ * 2: try to do maximum of 2 for YUV downscaling
+ * 3: the remainling for YUV decimation
+ *
+ * Note:
+ * Do not configure bayer_ds_out_res if:
+ * online == 1 or continuous == 0 or raw_binning = 0
+ */
+ if (stream_config->online || !stream_config->continuous ||
+ !pipe_extra_configs->enable_raw_binning) {
+ bayer_ds_out_res->width = 0;
+ bayer_ds_out_res->height = 0;
+ } else {
+ bayer_ds_out_res->width = effective_res->width;
+ bayer_ds_out_res->height = effective_res->height;
+
+ for (i = 0; i < ARRAY_SIZE(bds_fct); i++) {
+ if (effective_res->width >= out_width *
+ bds_fct[i].numerator / bds_fct[i].denominator &&
+ effective_res->height >= out_height *
+ bds_fct[i].numerator / bds_fct[i].denominator) {
+ bayer_ds_out_res->width =
+ effective_res->width *
+ bds_fct[i].denominator /
+ bds_fct[i].numerator;
+ bayer_ds_out_res->height =
+ effective_res->height *
+ bds_fct[i].denominator /
+ bds_fct[i].numerator;
+ break;
+ }
+ }
+ }
+ /*
+ * calculate YUV Decimation, YUV downscaling facor:
+ * YUV Downscaling factor must not exceed 2.
+ * YUV Decimation factor could be 2, 4.
+ */
+ /* first decide the yuv_ds input resolution */
+ if (bayer_ds_out_res->width == 0) {
+ yuv_ds_in_width = effective_res->width;
+ yuv_ds_in_height = effective_res->height;
+ } else {
+ yuv_ds_in_width = bayer_ds_out_res->width;
+ yuv_ds_in_height = bayer_ds_out_res->height;
+ }
+
+ vf_pp_in_res->width = yuv_ds_in_width;
+ vf_pp_in_res->height = yuv_ds_in_height;
+
+ /* find out the yuv decimation factor */
+ for (i = 0; i < ARRAY_SIZE(yuv_dec_fct); i++) {
+ if (yuv_ds_in_width >= out_width * yuv_dec_fct[i] &&
+ yuv_ds_in_height >= out_height * yuv_dec_fct[i]) {
+ vf_pp_in_res->width = yuv_ds_in_width / yuv_dec_fct[i];
+ vf_pp_in_res->height = yuv_ds_in_height / yuv_dec_fct[i];
+ break;
+ }
+ }
+
+ if (vf_pp_in_res->width == out_width &&
+ vf_pp_in_res->height == out_height) {
+ pipe_extra_configs->enable_yuv_ds = false;
+ vf_pp_in_res->width = 0;
+ vf_pp_in_res->height = 0;
+ } else {
+ pipe_extra_configs->enable_yuv_ds = true;
+ }
+
+ dev_dbg(isp->dev, "configuring pipe[%d]preview pp input w=%d.h=%d.\n",
+ pipe_id, width, height);
+}
+
+/*
+ * For CSS2.1, offline video pipe could support bayer decimation, and
+ * yuv downscaling, which needs addtional configurations.
+ */
+static void __configure_video_pp_input(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int out_width, out_height;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+ struct ia_css_pipe_config *pipe_configs =
+ &stream_env->pipe_configs[pipe_id];
+ struct ia_css_pipe_extra_config *pipe_extra_configs =
+ &stream_env->pipe_extra_configs[pipe_id];
+ struct ia_css_resolution *bayer_ds_out_res =
+ &pipe_configs->bayer_ds_out_res;
+ struct ia_css_resolution *effective_res =
+ &stream_config->input_config.effective_res;
+
+ const struct bayer_ds_factor bds_factors[] = {
+ {8, 1}, {6, 1}, {4, 1}, {3, 1}, {2, 1}, {3, 2}
+ };
+ unsigned int i;
+
+ if (width == 0 && height == 0)
+ return;
+
+ pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ pipe_extra_configs->enable_yuv_ds = false;
+
+ /*
+ * If DVS is enabled, video binary will take care the dvs envelope
+ * and usually the bayer_ds_out_res should be larger than 120% of
+ * destination resolution, the extra 20% will be cropped as DVS
+ * envelope. But, if the bayer_ds_out_res is less than 120% of the
+ * destination. The ISP can still work, but DVS quality is not good.
+ */
+ /* taking at least 10% as envelope */
+ if (asd->params.video_dis_en) {
+ out_width = pipe_configs->output_info[0].res.width * 110 / 100;
+ out_height = pipe_configs->output_info[0].res.height * 110 / 100;
+ } else {
+ out_width = pipe_configs->output_info[0].res.width;
+ out_height = pipe_configs->output_info[0].res.height;
+ }
+
+ /*
+ * calculate bayer decimate factor:
+ * 1: only 1.5, 2, 4 and 8 get supported
+ * 2: Do not configure bayer_ds_out_res if:
+ * online == 1 or continuous == 0 or raw_binning = 0
+ */
+ if (stream_config->online || !stream_config->continuous) {
+ bayer_ds_out_res->width = 0;
+ bayer_ds_out_res->height = 0;
+ goto done;
+ }
+
+ pipe_extra_configs->enable_raw_binning = true;
+ bayer_ds_out_res->width = effective_res->width;
+ bayer_ds_out_res->height = effective_res->height;
+
+ for (i = 0; i < sizeof(bds_factors) / sizeof(struct bayer_ds_factor);
+ i++) {
+ if (effective_res->width >= out_width *
+ bds_factors[i].numerator / bds_factors[i].denominator &&
+ effective_res->height >= out_height *
+ bds_factors[i].numerator / bds_factors[i].denominator) {
+ bayer_ds_out_res->width = effective_res->width *
+ bds_factors[i].denominator /
+ bds_factors[i].numerator;
+ bayer_ds_out_res->height = effective_res->height *
+ bds_factors[i].denominator /
+ bds_factors[i].numerator;
+ break;
+ }
+ }
+
+ /*
+ * DVS is cropped from BDS output, so we do not really need to set the
+ * envelope to 20% of output resolution here. always set it to 12x12
+ * per firmware requirement.
+ */
+ pipe_configs->dvs_envelope.width = 12;
+ pipe_configs->dvs_envelope.height = 12;
+
+done:
+ if (pipe_id == IA_CSS_PIPE_ID_YUVPP)
+ stream_config->left_padding = -1;
+ else
+ stream_config->left_padding = 12;
+ dev_dbg(isp->dev, "configuring pipe[%d]video pp input w=%d.h=%d.\n",
+ pipe_id, width, height);
+}
+
+static void __configure_vf_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.width = width;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.height = height;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].format = format;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].padded_width =
+ min_width;
+ dev_dbg(isp->dev,
+ "configuring pipe[%d] vf output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+static void __configure_video_vf_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_frame_info *css_output_info;
+
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ /*
+ * second_vf_output will be as video viewfinder in SDV mode
+ * with SOC camera. vf_output will be as video viewfinder in
+ * normal video mode.
+ */
+ if (asd->continuous_mode->val)
+ css_output_info = &stream_env->pipe_configs[pipe_id].
+ vf_output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ css_output_info = &stream_env->pipe_configs[pipe_id].
+ vf_output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+
+ css_output_info->res.width = width;
+ css_output_info->res.height = height;
+ css_output_info->format = format;
+ css_output_info->padded_width = min_width;
+ dev_dbg(isp->dev,
+ "configuring pipe[%d] vf output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+static int __get_frame_info(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info,
+ enum frame_info_type type,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ enum ia_css_err ret;
+ struct ia_css_pipe_info p_info;
+
+ /* FIXME! No need to destroy/recreate all streams */
+ if (__destroy_streams(asd, true))
+ dev_warn(isp->dev, "destroy stream failed.\n");
+
+ if (__destroy_pipes(asd, true))
+ dev_warn(isp->dev, "destroy pipe failed.\n");
+
+ if (__create_pipes(asd))
+ return -EINVAL;
+
+ if (__create_streams(asd))
+ goto stream_err;
+
+ ret = ia_css_pipe_get_info(
+ asd->stream_env[stream_index]
+ .pipes[pipe_id], &p_info);
+ if (ret == IA_CSS_SUCCESS) {
+ switch (type) {
+ case ATOMISP_CSS_VF_FRAME:
+ *info = p_info.vf_output_info[0];
+ dev_dbg(isp->dev, "getting vf frame info.\n");
+ break;
+ case ATOMISP_CSS_SECOND_VF_FRAME:
+ *info = p_info.vf_output_info[1];
+ dev_dbg(isp->dev, "getting second vf frame info.\n");
+ break;
+ case ATOMISP_CSS_OUTPUT_FRAME:
+ *info = p_info.output_info[0];
+ dev_dbg(isp->dev, "getting main frame info.\n");
+ break;
+ case ATOMISP_CSS_SECOND_OUTPUT_FRAME:
+ *info = p_info.output_info[1];
+ dev_dbg(isp->dev, "getting second main frame info.\n");
+ break;
+ case ATOMISP_CSS_RAW_FRAME:
+ *info = p_info.raw_output_info;
+ dev_dbg(isp->dev, "getting raw frame info.\n");
+ }
+ dev_dbg(isp->dev, "get frame info: w=%d, h=%d, num_invalid_frames %d.\n",
+ info->res.width, info->res.height, p_info.num_invalid_frames);
+ return 0;
+ }
+
+stream_err:
+ __destroy_pipes(asd, true);
+ return -EINVAL;
+}
+
+static unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd,
+ uint16_t source_pad)
+{
+ struct atomisp_device *isp = asd->isp;
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ return IA_CSS_PIPE_ID_YUVPP;
+
+ switch (source_pad) {
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
+ if (asd->yuvpp_mode)
+ return IA_CSS_PIPE_ID_YUVPP;
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO
+ || asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER)
+ return IA_CSS_PIPE_ID_VIDEO;
+ else
+ return IA_CSS_PIPE_ID_CAPTURE;
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+ return IA_CSS_PIPE_ID_CAPTURE;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ if (!atomisp_is_mbuscode_raw(
+ asd->fmt[asd->capture_pad].fmt.code))
+ return IA_CSS_PIPE_ID_CAPTURE;
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ if (asd->yuvpp_mode)
+ return IA_CSS_PIPE_ID_YUVPP;
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ return IA_CSS_PIPE_ID_VIDEO;
+ else
+ return IA_CSS_PIPE_ID_PREVIEW;
+ }
+ dev_warn(isp->dev,
+ "invalid source pad:%d, return default preview pipe index.\n",
+ source_pad);
+ return IA_CSS_PIPE_ID_PREVIEW;
+}
+
+int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
+ u16 source_pad,
+ struct atomisp_css_frame_info *frame_info)
+{
+ struct ia_css_pipe_info info;
+ int pipe_index = atomisp_get_pipe_index(asd, source_pad);
+ int stream_index;
+ struct atomisp_device *isp = asd->isp;
+
+ if (ATOMISP_SOC_CAMERA(asd))
+ stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+ else {
+ stream_index = (pipe_index == IA_CSS_PIPE_ID_YUVPP) ?
+ ATOMISP_INPUT_STREAM_VIDEO :
+ atomisp_source_pad_to_stream_id(asd, source_pad);
+ }
+
+ if (IA_CSS_SUCCESS != ia_css_pipe_get_info(asd->stream_env[stream_index]
+ .pipes[pipe_index], &info)) {
+ dev_err(isp->dev, "ia_css_pipe_get_info FAILED");
+ return -EINVAL;
+ }
+
+ switch (source_pad) {
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ *frame_info = info.output_info[0];
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
+ if (ATOMISP_USE_YUVPP(asd) && asd->continuous_mode->val)
+ *frame_info = info.
+ output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ *frame_info = info.
+ output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ if (stream_index == ATOMISP_INPUT_STREAM_POSTVIEW)
+ *frame_info = info.output_info[0];
+ else
+ *frame_info = info.vf_output_info[0];
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ (pipe_index == IA_CSS_PIPE_ID_VIDEO ||
+ pipe_index == IA_CSS_PIPE_ID_YUVPP))
+ if (ATOMISP_USE_YUVPP(asd) && asd->continuous_mode->val)
+ *frame_info = info.
+ vf_output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ *frame_info = info.
+ vf_output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+ else if (ATOMISP_USE_YUVPP(asd) && asd->continuous_mode->val)
+ *frame_info =
+ info.output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ *frame_info =
+ info.output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+
+ break;
+ default:
+ frame_info = NULL;
+ break;
+ }
+ return frame_info ? 0 : -EINVAL;
+}
+
+int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum atomisp_css_frame_format format)
+{
+ asd->stream_env[stream_index].pipe_configs[IA_CSS_PIPE_ID_COPY].
+ default_capture_config.mode =
+ CSS_CAPTURE_MODE_RAW;
+
+ __configure_output(asd, stream_index, width, height, padded_width,
+ format, IA_CSS_PIPE_ID_COPY);
+ return 0;
+}
+
+int atomisp_css_yuvpp_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum atomisp_css_frame_format format)
+{
+ asd->stream_env[stream_index].pipe_configs[IA_CSS_PIPE_ID_YUVPP].
+ default_capture_config.mode =
+ CSS_CAPTURE_MODE_RAW;
+
+ __configure_output(asd, stream_index, width, height, padded_width,
+ format, IA_CSS_PIPE_ID_YUVPP);
+ return 0;
+}
+
+int atomisp_css_yuvpp_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ enum ia_css_pipe_id pipe_id = IA_CSS_PIPE_ID_YUVPP;
+
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.width = width;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.height = height;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].format = format;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].padded_width =
+ min_width;
+ return 0;
+}
+
+int atomisp_css_yuvpp_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info)
+{
+ return __get_frame_info(asd, stream_index, info,
+ ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_YUVPP);
+}
+
+int atomisp_css_yuvpp_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info)
+{
+ return __get_frame_info(asd, stream_index, info,
+ ATOMISP_CSS_VF_FRAME, IA_CSS_PIPE_ID_YUVPP);
+}
+
+int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ __configure_video_preview_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width,
+ height,
+ min_width, format, IA_CSS_PIPE_ID_YUVPP);
+ else
+ __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, IA_CSS_PIPE_ID_PREVIEW);
+ return 0;
+}
+
+int atomisp_css_capture_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ enum ia_css_pipe_id pipe_id;
+
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ else
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+
+ __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, pipe_id);
+ return 0;
+}
+
+int atomisp_css_video_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ __configure_video_preview_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width,
+ height,
+ min_width, format, IA_CSS_PIPE_ID_YUVPP);
+ else
+ __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, IA_CSS_PIPE_ID_VIDEO);
+ return 0;
+}
+
+int atomisp_css_video_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ /*
+ * to SOC camera, video will use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ __configure_video_vf_output(asd, width, height, min_width, format,
+ IA_CSS_PIPE_ID_YUVPP);
+ else
+ __configure_vf_output(asd, width, height, min_width, format,
+ IA_CSS_PIPE_ID_VIDEO);
+ return 0;
+}
+
+int atomisp_css_capture_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ enum ia_css_pipe_id pipe_id;
+
+ /*
+ * to SOC camera, video will use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ else
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+
+ __configure_vf_output(asd, width, height, min_width, format,
+ pipe_id);
+ return 0;
+}
+
+int atomisp_css_video_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+ enum frame_info_type frame_type = ATOMISP_CSS_VF_FRAME;
+
+ if (ATOMISP_USE_YUVPP(asd)) {
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ if (asd->continuous_mode->val)
+ frame_type = ATOMISP_CSS_SECOND_VF_FRAME;
+ } else {
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ }
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ frame_type, pipe_id);
+}
+
+int atomisp_css_capture_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ else
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_VF_FRAME, pipe_id);
+}
+
+int atomisp_css_capture_get_output_raw_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ if (ATOMISP_USE_YUVPP(asd))
+ return 0;
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_RAW_FRAME, IA_CSS_PIPE_ID_CAPTURE);
+}
+
+int atomisp_css_copy_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info)
+{
+ return __get_frame_info(asd, stream_index, info,
+ ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_COPY);
+}
+
+int atomisp_css_preview_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+ enum frame_info_type frame_type = ATOMISP_CSS_OUTPUT_FRAME;
+
+ if (ATOMISP_USE_YUVPP(asd)) {
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ if (asd->continuous_mode->val)
+ frame_type = ATOMISP_CSS_SECOND_OUTPUT_FRAME;
+ } else {
+ pipe_id = IA_CSS_PIPE_ID_PREVIEW;
+ }
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ frame_type, pipe_id);
+}
+
+int atomisp_css_capture_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ else
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_OUTPUT_FRAME, pipe_id);
+}
+
+int atomisp_css_video_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+ enum frame_info_type frame_type = ATOMISP_CSS_OUTPUT_FRAME;
+
+ if (ATOMISP_USE_YUVPP(asd)) {
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ if (asd->continuous_mode->val)
+ frame_type = ATOMISP_CSS_SECOND_OUTPUT_FRAME;
+ } else {
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ }
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ frame_type, pipe_id);
+}
+
+int atomisp_css_preview_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ __configure_preview_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_PREVIEW);
+
+ if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
+ capt_pp_in_res.width)
+ __configure_capture_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_CAPTURE);
+ return 0;
+}
+
+int atomisp_css_capture_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ __configure_capture_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_CAPTURE);
+ return 0;
+}
+
+int atomisp_css_video_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+
+ __configure_video_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_VIDEO);
+
+ if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
+ capt_pp_in_res.width)
+ __configure_capture_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_CAPTURE);
+ return 0;
+}
+
+int atomisp_css_offline_capture_configure(struct atomisp_sub_device *asd,
+ int num_captures, unsigned int skip, int offset)
+{
+ enum ia_css_err ret;
+
+ dev_dbg(asd->isp->dev, "%s num_capture:%d skip:%d offset:%d\n",
+ __func__, num_captures, skip, offset);
+
+ ret = ia_css_stream_capture(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ num_captures, skip, offset);
+ if (ret != IA_CSS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atomisp_css_exp_id_capture(struct atomisp_sub_device *asd, int exp_id)
+{
+ enum ia_css_err ret;
+
+ ret = ia_css_stream_capture_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ exp_id);
+ if (ret == IA_CSS_ERR_QUEUE_IS_FULL) {
+ /* capture cmd queue is full */
+ return -EBUSY;
+ } else if (ret != IA_CSS_SUCCESS) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int atomisp_css_exp_id_unlock(struct atomisp_sub_device *asd, int exp_id)
+{
+ enum ia_css_err ret;
+
+ ret = ia_css_unlock_raw_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ exp_id);
+ if (ret == IA_CSS_ERR_QUEUE_IS_FULL)
+ return -EAGAIN;
+ else if (ret != IA_CSS_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+int atomisp_css_capture_enable_xnr(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_CAPTURE]
+ .default_capture_config.enable_xnr = enable;
+ asd->params.capture_config.enable_xnr = enable;
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
+
+ return 0;
+}
+
+void atomisp_css_send_input_frame(struct atomisp_sub_device *asd,
+ unsigned short *data, unsigned int width,
+ unsigned int height)
+{
+ ia_css_stream_send_input_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ data, width, height);
+}
+
+bool atomisp_css_isp_has_started(void)
+{
+ return ia_css_isp_has_started();
+}
+
+void atomisp_css_request_flash(struct atomisp_sub_device *asd)
+{
+ ia_css_stream_request_flash(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream);
+}
+
+void atomisp_css_set_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_wb_config *wb_config)
+{
+ asd->params.config.wb_config = wb_config;
+}
+
+void atomisp_css_set_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ob_config *ob_config)
+{
+ asd->params.config.ob_config = ob_config;
+}
+
+void atomisp_css_set_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_dp_config *dp_config)
+{
+ asd->params.config.dp_config = dp_config;
+}
+
+void atomisp_css_set_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_de_config *de_config)
+{
+ asd->params.config.de_config = de_config;
+}
+
+void atomisp_css_set_dz_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_dz_config *dz_config)
+{
+ asd->params.config.dz_config = dz_config;
+}
+
+void atomisp_css_set_default_de_config(struct atomisp_sub_device *asd)
+{
+ asd->params.config.de_config = NULL;
+}
+
+void atomisp_css_set_ce_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ce_config *ce_config)
+{
+ asd->params.config.ce_config = ce_config;
+}
+
+void atomisp_css_set_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_nr_config *nr_config)
+{
+ asd->params.config.nr_config = nr_config;
+}
+
+void atomisp_css_set_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ee_config *ee_config)
+{
+ asd->params.config.ee_config = ee_config;
+}
+
+void atomisp_css_set_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_tnr_config *tnr_config)
+{
+ asd->params.config.tnr_config = tnr_config;
+}
+
+void atomisp_css_set_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *cc_config)
+{
+ asd->params.config.cc_config = cc_config;
+}
+
+void atomisp_css_set_macc_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_macc_table *macc_table)
+{
+ asd->params.config.macc_table = macc_table;
+}
+
+void atomisp_css_set_macc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_macc_config *macc_config)
+{
+ asd->params.config.macc_config = macc_config;
+}
+
+void atomisp_css_set_ecd_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ecd_config *ecd_config)
+{
+ asd->params.config.ecd_config = ecd_config;
+}
+
+void atomisp_css_set_ynr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ynr_config *ynr_config)
+{
+ asd->params.config.ynr_config = ynr_config;
+}
+
+void atomisp_css_set_fc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_fc_config *fc_config)
+{
+ asd->params.config.fc_config = fc_config;
+}
+
+void atomisp_css_set_ctc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ctc_config *ctc_config)
+{
+ asd->params.config.ctc_config = ctc_config;
+}
+
+void atomisp_css_set_cnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cnr_config *cnr_config)
+{
+ asd->params.config.cnr_config = cnr_config;
+}
+
+void atomisp_css_set_aa_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_aa_config *aa_config)
+{
+ asd->params.config.aa_config = aa_config;
+}
+
+void atomisp_css_set_baa_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_baa_config *baa_config)
+{
+ asd->params.config.baa_config = baa_config;
+}
+
+void atomisp_css_set_anr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_anr_config *anr_config)
+{
+ asd->params.config.anr_config = anr_config;
+}
+
+void atomisp_css_set_xnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_xnr_config *xnr_config)
+{
+ asd->params.config.xnr_config = xnr_config;
+}
+
+void atomisp_css_set_yuv2rgb_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *yuv2rgb_cc_config)
+{
+ asd->params.config.yuv2rgb_cc_config = yuv2rgb_cc_config;
+}
+
+void atomisp_css_set_rgb2yuv_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *rgb2yuv_cc_config)
+{
+ asd->params.config.rgb2yuv_cc_config = rgb2yuv_cc_config;
+}
+
+void atomisp_css_set_xnr_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_xnr_table *xnr_table)
+{
+ asd->params.config.xnr_table = xnr_table;
+}
+
+void atomisp_css_set_r_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *r_gamma_table)
+{
+ asd->params.config.r_gamma_table = r_gamma_table;
+}
+
+void atomisp_css_set_g_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *g_gamma_table)
+{
+ asd->params.config.g_gamma_table = g_gamma_table;
+}
+
+void atomisp_css_set_b_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *b_gamma_table)
+{
+ asd->params.config.b_gamma_table = b_gamma_table;
+}
+
+void atomisp_css_set_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_gamma_table *gamma_table)
+{
+ asd->params.config.gamma_table = gamma_table;
+}
+
+void atomisp_css_set_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_ctc_table *ctc_table)
+{
+ int i;
+ u16 *vamem_ptr = ctc_table->data.vamem_1;
+ int data_size = IA_CSS_VAMEM_1_CTC_TABLE_SIZE;
+ bool valid = false;
+
+ /* workaround: if ctc_table is all 0, do not apply it */
+ if (ctc_table->vamem_type == IA_CSS_VAMEM_TYPE_2) {
+ vamem_ptr = ctc_table->data.vamem_2;
+ data_size = IA_CSS_VAMEM_2_CTC_TABLE_SIZE;
+ }
+
+ for (i = 0; i < data_size; i++) {
+ if (*(vamem_ptr + i)) {
+ valid = true;
+ break;
+ }
+ }
+
+ if (valid)
+ asd->params.config.ctc_table = ctc_table;
+ else
+ dev_warn(asd->isp->dev, "Bypass the invalid ctc_table.\n");
+}
+
+void atomisp_css_set_anr_thres(struct atomisp_sub_device *asd,
+ struct atomisp_css_anr_thres *anr_thres)
+{
+ asd->params.config.anr_thres = anr_thres;
+}
+
+void atomisp_css_set_dvs_6axis(struct atomisp_sub_device *asd,
+ struct atomisp_css_dvs_6axis *dvs_6axis)
+{
+ asd->params.config.dvs_6axis_config = dvs_6axis;
+}
+
+void atomisp_css_set_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_gc_config *gc_config)
+{
+ asd->params.config.gc_config = gc_config;
+}
+
+void atomisp_css_set_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_3a_config *s3a_config)
+{
+ asd->params.config.s3a_config = s3a_config;
+}
+
+void atomisp_css_video_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector)
+{
+ if (!asd->params.config.motion_vector)
+ asd->params.config.motion_vector = &asd->params.css_param.motion_vector;
+
+ memset(asd->params.config.motion_vector,
+ 0, sizeof(struct ia_css_vector));
+ asd->params.css_param.motion_vector.x = vector->x;
+ asd->params.css_param.motion_vector.y = vector->y;
+}
+
+static int atomisp_compare_dvs_grid(struct atomisp_sub_device *asd,
+ struct atomisp_dvs_grid_info *atomgrid)
+{
+ struct atomisp_css_dvs_grid_info *cur =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+
+ if (!cur) {
+ dev_err(asd->isp->dev, "dvs grid not available!\n");
+ return -EINVAL;
+ }
+
+ if (sizeof(*cur) != sizeof(*atomgrid)) {
+ dev_err(asd->isp->dev, "dvs grid mis-match!\n");
+ return -EINVAL;
+ }
+
+ if (!cur->enable) {
+ dev_err(asd->isp->dev, "dvs not enabled!\n");
+ return -EINVAL;
+ }
+
+ return memcmp(atomgrid, cur, sizeof(*cur));
+}
+
+void atomisp_css_set_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs)
+{
+ asd->params.config.dvs2_coefs = coefs;
+}
+
+int atomisp_css_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs)
+{
+ if (atomisp_compare_dvs_grid(asd, &coefs->grid_info) != 0)
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+
+ if (!coefs->hor_coefs.odd_real ||
+ !coefs->hor_coefs.odd_imag ||
+ !coefs->hor_coefs.even_real ||
+ !coefs->hor_coefs.even_imag ||
+ !coefs->ver_coefs.odd_real ||
+ !coefs->ver_coefs.odd_imag ||
+ !coefs->ver_coefs.even_real ||
+ !coefs->ver_coefs.even_imag ||
+ !asd->params.css_param.dvs2_coeff->hor_coefs.odd_real ||
+ !asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag ||
+ !asd->params.css_param.dvs2_coeff->hor_coefs.even_real ||
+ !asd->params.css_param.dvs2_coeff->hor_coefs.even_imag ||
+ !asd->params.css_param.dvs2_coeff->ver_coefs.odd_real ||
+ !asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag ||
+ !asd->params.css_param.dvs2_coeff->ver_coefs.even_real ||
+ !asd->params.css_param.dvs2_coeff->ver_coefs.even_imag)
+ return -EINVAL;
+
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_real,
+ coefs->hor_coefs.odd_real, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag,
+ coefs->hor_coefs.odd_imag, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_real,
+ coefs->hor_coefs.even_real, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_imag,
+ coefs->hor_coefs.even_imag, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_real,
+ coefs->ver_coefs.odd_real, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag,
+ coefs->ver_coefs.odd_imag, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_real,
+ coefs->ver_coefs.even_real, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_imag,
+ coefs->ver_coefs.even_imag, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+
+ asd->params.css_param.update_flag.dvs2_coefs =
+ (struct atomisp_dvs2_coefficients *)
+ asd->params.css_param.dvs2_coeff;
+ /* FIXME! */
+ /* asd->params.dis_proj_data_valid = false; */
+ asd->params.css_update_params_needed = true;
+
+ return 0;
+}
+
+void atomisp_css_set_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int zoom)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (zoom == asd->params.css_param.dz_config.dx &&
+ zoom == asd->params.css_param.dz_config.dy) {
+ dev_dbg(isp->dev, "same zoom scale. skipped.\n");
+ return;
+ }
+
+ memset(&asd->params.css_param.dz_config, 0,
+ sizeof(struct ia_css_dz_config));
+ asd->params.css_param.dz_config.dx = zoom;
+ asd->params.css_param.dz_config.dy = zoom;
+
+ asd->params.css_param.update_flag.dz_config =
+ (struct atomisp_dz_config *)&asd->params.css_param.dz_config;
+ asd->params.css_update_params_needed = true;
+}
+
+void atomisp_css_set_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_formats_config *formats_config)
+{
+ asd->params.config.formats_config = formats_config;
+}
+
+int atomisp_css_get_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_wb_config *config)
+{
+ struct atomisp_css_wb_config wb_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&wb_config, 0, sizeof(struct atomisp_css_wb_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.wb_config = &wb_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &wb_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_ob_config *config)
+{
+ struct atomisp_css_ob_config ob_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&ob_config, 0, sizeof(struct atomisp_css_ob_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.ob_config = &ob_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &ob_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_dp_config *config)
+{
+ struct atomisp_css_dp_config dp_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&dp_config, 0, sizeof(struct atomisp_css_dp_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.dp_config = &dp_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &dp_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_de_config *config)
+{
+ struct atomisp_css_de_config de_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&de_config, 0, sizeof(struct atomisp_css_de_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.de_config = &de_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &de_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_nr_config *config)
+{
+ struct atomisp_css_nr_config nr_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&nr_config, 0, sizeof(struct atomisp_css_nr_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+
+ isp_config.nr_config = &nr_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &nr_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_ee_config *config)
+{
+ struct atomisp_css_ee_config ee_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&ee_config, 0, sizeof(struct atomisp_css_ee_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.ee_config = &ee_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &ee_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_tnr_config *config)
+{
+ struct atomisp_css_tnr_config tnr_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&tnr_config, 0, sizeof(struct atomisp_css_tnr_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.tnr_config = &tnr_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &tnr_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_ctc_table *config)
+{
+ struct atomisp_css_ctc_table *tab;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ tab = vzalloc(sizeof(struct atomisp_css_ctc_table));
+ if (!tab)
+ return -ENOMEM;
+
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.ctc_table = tab;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, tab, sizeof(*tab));
+ vfree(tab);
+
+ return 0;
+}
+
+int atomisp_css_get_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_gamma_table *config)
+{
+ struct atomisp_css_gamma_table *tab;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ tab = vzalloc(sizeof(struct atomisp_css_gamma_table));
+ if (!tab)
+ return -ENOMEM;
+
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.gamma_table = tab;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, tab, sizeof(*tab));
+ vfree(tab);
+
+ return 0;
+}
+
+int atomisp_css_get_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_gc_config *config)
+{
+ struct atomisp_css_gc_config gc_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&gc_config, 0, sizeof(struct atomisp_css_gc_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.gc_config = &gc_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ /* Get gamma correction params from current setup */
+ memcpy(config, &gc_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_3a_config *config)
+{
+ struct atomisp_css_3a_config s3a_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&s3a_config, 0, sizeof(struct atomisp_css_3a_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.s3a_config = &s3a_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ /* Get white balance from current setup */
+ memcpy(config, &s3a_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_formats_config *config)
+{
+ struct atomisp_css_formats_config formats_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&formats_config, 0, sizeof(formats_config));
+ memset(&isp_config, 0, sizeof(isp_config));
+ isp_config.formats_config = &formats_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ /* Get narrow gamma from current setup */
+ memcpy(config, &formats_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int *zoom)
+{
+ struct ia_css_dz_config dz_config; /** Digital Zoom */
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&dz_config, 0, sizeof(struct ia_css_dz_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.dz_config = &dz_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ *zoom = dz_config.dx;
+
+ return 0;
+}
+
+/*
+ * Function to set/get image stablization statistics
+ */
+int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_dis_buf *dis_buf;
+ unsigned long flags;
+
+ if (!asd->params.dvs_stat->hor_prod.odd_real ||
+ !asd->params.dvs_stat->hor_prod.odd_imag ||
+ !asd->params.dvs_stat->hor_prod.even_real ||
+ !asd->params.dvs_stat->hor_prod.even_imag ||
+ !asd->params.dvs_stat->ver_prod.odd_real ||
+ !asd->params.dvs_stat->ver_prod.odd_imag ||
+ !asd->params.dvs_stat->ver_prod.even_real ||
+ !asd->params.dvs_stat->ver_prod.even_imag)
+ return -EINVAL;
+
+ /* isp needs to be streaming to get DIS statistics */
+ spin_lock_irqsave(&isp->lock, flags);
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ if (atomisp_compare_dvs_grid(asd, &stats->dvs2_stat.grid_info) != 0)
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+
+ spin_lock_irqsave(&asd->dis_stats_lock, flags);
+ if (!asd->params.dis_proj_data_valid || list_empty(&asd->dis_stats)) {
+ spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
+ dev_err(isp->dev, "dis statistics is not valid.\n");
+ return -EAGAIN;
+ }
+
+ dis_buf = list_entry(asd->dis_stats.next,
+ struct atomisp_dis_buf, list);
+ list_del_init(&dis_buf->list);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
+
+ if (dis_buf->dvs_map)
+ ia_css_translate_dvs2_statistics(
+ asd->params.dvs_stat, dis_buf->dvs_map);
+ else
+ ia_css_get_dvs2_statistics(asd->params.dvs_stat,
+ dis_buf->dis_data);
+ stats->exp_id = dis_buf->dis_data->exp_id;
+
+ spin_lock_irqsave(&asd->dis_stats_lock, flags);
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
+
+ if (copy_to_user(stats->dvs2_stat.ver_prod.odd_real,
+ asd->params.dvs_stat->ver_prod.odd_real,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.ver_prod.odd_imag,
+ asd->params.dvs_stat->ver_prod.odd_imag,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.ver_prod.even_real,
+ asd->params.dvs_stat->ver_prod.even_real,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.ver_prod.even_imag,
+ asd->params.dvs_stat->ver_prod.even_imag,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.odd_real,
+ asd->params.dvs_stat->hor_prod.odd_real,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.odd_imag,
+ asd->params.dvs_stat->hor_prod.odd_imag,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.even_real,
+ asd->params.dvs_stat->hor_prod.even_real,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.even_imag,
+ asd->params.dvs_stat->hor_prod.even_imag,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct atomisp_css_shading_table *atomisp_css_shading_table_alloc(
+ unsigned int width, unsigned int height)
+{
+ return ia_css_shading_table_alloc(width, height);
+}
+
+void atomisp_css_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_shading_table *table)
+{
+ asd->params.config.shading_table = table;
+}
+
+void atomisp_css_shading_table_free(struct atomisp_css_shading_table *table)
+{
+ ia_css_shading_table_free(table);
+}
+
+struct atomisp_css_morph_table *atomisp_css_morph_table_allocate(
+ unsigned int width, unsigned int height)
+{
+ return ia_css_morph_table_allocate(width, height);
+}
+
+void atomisp_css_set_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_morph_table *table)
+{
+ asd->params.config.morph_table = table;
+}
+
+void atomisp_css_get_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_morph_table *table)
+{
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev,
+ "%s called after streamoff, skipping.\n", __func__);
+ return;
+ }
+ memset(table, 0, sizeof(struct atomisp_css_morph_table));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.morph_table = table;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+}
+
+void atomisp_css_morph_table_free(struct atomisp_css_morph_table *table)
+{
+ ia_css_morph_table_free(table);
+}
+
+void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp,
+ unsigned int overlap)
+{
+ /* CSS 2.0 doesn't support this API. */
+ dev_dbg(isp->dev, "set cont prev start time is not supported.\n");
+ return;
+}
+
+void atomisp_css_acc_done(struct atomisp_sub_device *asd)
+{
+ complete(&asd->acc.acc_done);
+}
+
+int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd)
+{
+ int ret = 0;
+ struct atomisp_device *isp = asd->isp;
+
+ /* Unlock the isp mutex taken in IOCTL handler before sleeping! */
+ rt_mutex_unlock(&isp->mutex);
+ if (wait_for_completion_interruptible_timeout(&asd->acc.acc_done,
+ ATOMISP_ISP_TIMEOUT_DURATION) == 0) {
+ dev_err(isp->dev, "<%s: completion timeout\n", __func__);
+ atomisp_css_debug_dump_sp_sw_debug_info();
+ atomisp_css_debug_dump_debug_info(__func__);
+ ret = -EIO;
+ }
+ rt_mutex_lock(&isp->mutex);
+
+ return ret;
+}
+
+/* Set the ACC binary arguments */
+int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw)
+{
+ unsigned int mem;
+
+ for (mem = 0; mem < ATOMISP_ACC_NR_MEMORY; mem++) {
+ if (acc_fw->args[mem].length == 0)
+ continue;
+
+ ia_css_isp_param_set_css_mem_init(&acc_fw->fw->mem_initializers,
+ IA_CSS_PARAM_CLASS_PARAM, mem,
+ acc_fw->args[mem].css_ptr,
+ acc_fw->args[mem].length);
+ }
+
+ return 0;
+}
+
+/* Load acc binary extension */
+int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ enum atomisp_css_pipe_id pipe_id,
+ unsigned int type)
+{
+ struct atomisp_css_fw_info **hd;
+
+ fw->next = NULL;
+ hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[pipe_id].acc_extension);
+ while (*hd)
+ hd = &(*hd)->next;
+ *hd = fw;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[pipe_id] = true;
+ return 0;
+}
+
+/* Unload acc binary extension */
+void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ enum atomisp_css_pipe_id pipe_id)
+{
+ struct atomisp_css_fw_info **hd;
+
+ hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[pipe_id].acc_extension);
+ while (*hd && *hd != fw)
+ hd = &(*hd)->next;
+ if (!*hd) {
+ dev_err(asd->isp->dev, "did not find acc fw for removal\n");
+ return;
+ }
+ *hd = fw->next;
+ fw->next = NULL;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[pipe_id] = true;
+}
+
+int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_pipe_config *pipe_config;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+
+ if (stream_env->acc_stream) {
+ if (stream_env->acc_stream_state == CSS_STREAM_STARTED) {
+ if (ia_css_stream_stop(stream_env->acc_stream)
+ != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "stop acc_stream failed.\n");
+ return -EBUSY;
+ }
+ }
+
+ if (ia_css_stream_destroy(stream_env->acc_stream)
+ != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "destroy acc_stream failed.\n");
+ return -EBUSY;
+ }
+ stream_env->acc_stream = NULL;
+ }
+
+ pipe_config = &stream_env->pipe_configs[CSS_PIPE_ID_ACC];
+ ia_css_pipe_config_defaults(pipe_config);
+ asd->acc.acc_stages = kzalloc(MAX_ACC_STAGES *
+ sizeof(void *), GFP_KERNEL);
+ if (!asd->acc.acc_stages)
+ return -ENOMEM;
+ pipe_config->acc_stages = asd->acc.acc_stages;
+ pipe_config->mode = IA_CSS_PIPE_MODE_ACC;
+ pipe_config->num_acc_stages = 0;
+
+ /*
+ * We delay the ACC pipeline creation to atomisp_css_start_acc_pipe,
+ * because pipe configuration will soon be changed by
+ * atomisp_css_load_acc_binary()
+ */
+ return 0;
+}
+
+int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_pipe_config *pipe_config =
+ &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC];
+
+ if (ia_css_pipe_create(pipe_config,
+ &stream_env->pipes[IA_CSS_PIPE_ID_ACC]) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "%s: ia_css_pipe_create failed\n",
+ __func__);
+ return -EBADE;
+ }
+
+ memset(&stream_env->acc_stream_config, 0,
+ sizeof(struct ia_css_stream_config));
+ if (ia_css_stream_create(&stream_env->acc_stream_config, 1,
+ &stream_env->pipes[IA_CSS_PIPE_ID_ACC],
+ &stream_env->acc_stream) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "%s: create acc_stream error.\n", __func__);
+ return -EINVAL;
+ }
+ stream_env->acc_stream_state = CSS_STREAM_CREATED;
+
+ init_completion(&asd->acc.acc_done);
+ asd->acc.pipeline = stream_env->pipes[IA_CSS_PIPE_ID_ACC];
+
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false);
+
+ if (ia_css_start_sp() != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "start sp error.\n");
+ return -EIO;
+ }
+
+ if (ia_css_stream_start(stream_env->acc_stream)
+ != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "acc_stream start error.\n");
+ return -EIO;
+ }
+
+ stream_env->acc_stream_state = CSS_STREAM_STARTED;
+ return 0;
+}
+
+int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ if (stream_env->acc_stream_state == CSS_STREAM_STARTED) {
+ ia_css_stream_stop(stream_env->acc_stream);
+ stream_env->acc_stream_state = CSS_STREAM_STOPPED;
+ }
+ return 0;
+}
+
+void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ if (stream_env->acc_stream) {
+ if (ia_css_stream_destroy(stream_env->acc_stream)
+ != IA_CSS_SUCCESS)
+ dev_warn(asd->isp->dev,
+ "destroy acc_stream failed.\n");
+ stream_env->acc_stream = NULL;
+ }
+
+ if (stream_env->pipes[IA_CSS_PIPE_ID_ACC]) {
+ if (ia_css_pipe_destroy(stream_env->pipes[IA_CSS_PIPE_ID_ACC])
+ != IA_CSS_SUCCESS)
+ dev_warn(asd->isp->dev,
+ "destroy ACC pipe failed.\n");
+ stream_env->pipes[IA_CSS_PIPE_ID_ACC] = NULL;
+ stream_env->update_pipe[IA_CSS_PIPE_ID_ACC] = false;
+ ia_css_pipe_config_defaults(
+ &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC]);
+ ia_css_pipe_extra_config_defaults(
+ &stream_env->pipe_extra_configs[IA_CSS_PIPE_ID_ACC]);
+ }
+ asd->acc.pipeline = NULL;
+
+ /* css 2.0 API limitation: ia_css_stop_sp() could be only called after
+ * destroy all pipes
+ */
+ ia_css_stop_sp();
+
+ kfree(asd->acc.acc_stages);
+ asd->acc.acc_stages = NULL;
+
+ atomisp_freq_scaling(asd->isp, ATOMISP_DFS_MODE_LOW, false);
+}
+
+int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ unsigned int index)
+{
+ struct ia_css_pipe_config *pipe_config =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_ACC];
+
+ if (index >= MAX_ACC_STAGES) {
+ dev_dbg(asd->isp->dev, "%s: index(%d) out of range\n",
+ __func__, index);
+ return -ENOMEM;
+ }
+
+ pipe_config->acc_stages[index] = fw;
+ pipe_config->num_acc_stages = index + 1;
+ pipe_config->acc_num_execs = 1;
+
+ return 0;
+}
+
+static struct atomisp_sub_device *__get_atomisp_subdev(
+ struct ia_css_pipe *css_pipe,
+ struct atomisp_device *isp,
+ enum atomisp_input_stream_id *stream_id)
+{
+ int i, j, k;
+ struct atomisp_sub_device *asd;
+ struct atomisp_stream_env *stream_env;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED &&
+ !asd->acc.pipeline)
+ continue;
+ for (j = 0; j < ATOMISP_INPUT_STREAM_NUM; j++) {
+ stream_env = &asd->stream_env[j];
+ for (k = 0; k < IA_CSS_PIPE_ID_NUM; k++) {
+ if (stream_env->pipes[k] &&
+ stream_env->pipes[k] == css_pipe) {
+ *stream_id = j;
+ return asd;
+ }
+ }
+ }
+ }
+
+ return NULL;
+}
+
+int atomisp_css_isr_thread(struct atomisp_device *isp,
+ bool *frame_done_found,
+ bool *css_pipe_done)
+{
+ enum atomisp_input_stream_id stream_id = 0;
+ struct atomisp_css_event current_event;
+ struct atomisp_sub_device *asd;
+ bool reset_wdt_timer[MAX_STREAM_NUM] = {false};
+ int i;
+
+ while (!atomisp_css_dequeue_event(&current_event)) {
+ if (current_event.event.type ==
+ IA_CSS_EVENT_TYPE_FW_ASSERT) {
+ /*
+ * Received FW assertion signal,
+ * trigger WDT to recover
+ */
+ dev_err(isp->dev,
+ "%s: ISP reports FW_ASSERT event! fw_assert_module_id %d fw_assert_line_no %d\n",
+ __func__,
+ current_event.event.fw_assert_module_id,
+ current_event.event.fw_assert_line_no);
+ for (i = 0; i < isp->num_of_streams; i++)
+ atomisp_wdt_stop(&isp->asd[i], 0);
+
+ if (!atomisp_hw_is_isp2401)
+ atomisp_wdt(&isp->asd[0].wdt);
+ else
+ queue_work(isp->wdt_work_queue, &isp->wdt_work);
+
+ return -EINVAL;
+ } else if (current_event.event.type == IA_CSS_EVENT_TYPE_FW_WARNING) {
+ dev_warn(isp->dev, "%s: ISP reports warning, code is %d, exp_id %d\n",
+ __func__, current_event.event.fw_warning,
+ current_event.event.exp_id);
+ continue;
+ }
+
+ asd = __get_atomisp_subdev(current_event.event.pipe,
+ isp, &stream_id);
+ if (!asd) {
+ if (current_event.event.type == CSS_EVENT_TIMER)
+ dev_dbg(isp->dev,
+ "event: Timer event.");
+ else
+ dev_warn(isp->dev, "%s:no subdev.event:%d",
+ __func__,
+ current_event.event.type);
+ continue;
+ }
+
+ atomisp_css_temp_pipe_to_pipe_id(asd, &current_event);
+ switch (current_event.event.type) {
+ case CSS_EVENT_OUTPUT_FRAME_DONE:
+ frame_done_found[asd->index] = true;
+ atomisp_buf_done(asd, 0, CSS_BUFFER_TYPE_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+
+ if (!atomisp_hw_is_isp2401)
+ reset_wdt_timer[asd->index] = true; /* ISP running */
+
+ break;
+ case CSS_EVENT_SEC_OUTPUT_FRAME_DONE:
+ frame_done_found[asd->index] = true;
+ atomisp_buf_done(asd, 0, CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+
+ if (!atomisp_hw_is_isp2401)
+ reset_wdt_timer[asd->index] = true; /* ISP running */
+
+ break;
+ case CSS_EVENT_3A_STATISTICS_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_3A_STATISTICS,
+ current_event.pipe,
+ false, stream_id);
+ break;
+ case CSS_EVENT_METADATA_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_METADATA,
+ current_event.pipe,
+ false, stream_id);
+ break;
+ case CSS_EVENT_VF_OUTPUT_FRAME_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_VF_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+
+ if (!atomisp_hw_is_isp2401)
+ reset_wdt_timer[asd->index] = true; /* ISP running */
+
+ break;
+ case CSS_EVENT_SEC_VF_OUTPUT_FRAME_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+ if (!atomisp_hw_is_isp2401)
+ reset_wdt_timer[asd->index] = true; /* ISP running */
+
+ break;
+ case CSS_EVENT_DIS_STATISTICS_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_DIS_STATISTICS,
+ current_event.pipe,
+ false, stream_id);
+ break;
+ case CSS_EVENT_PIPELINE_DONE:
+ css_pipe_done[asd->index] = true;
+ break;
+ case CSS_EVENT_ACC_STAGE_COMPLETE:
+ atomisp_acc_done(asd, current_event.event.fw_handle);
+ break;
+ default:
+ dev_dbg(isp->dev, "unhandled css stored event: 0x%x\n",
+ current_event.event.type);
+ break;
+ }
+ }
+
+ if (atomisp_hw_is_isp2401)
+ return 0;
+
+ /* ISP2400: If there are no buffers queued then delete wdt timer. */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ if (!asd)
+ continue;
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+ if (!atomisp_buffers_queued(asd))
+ atomisp_wdt_stop(asd, false);
+ else if (reset_wdt_timer[i])
+ /* SOF irq should not reset wdt timer. */
+ atomisp_wdt_refresh(asd,
+ ATOMISP_WDT_KEEP_CURRENT_DELAY);
+ }
+
+ return 0;
+}
+
+bool atomisp_css_valid_sof(struct atomisp_device *isp)
+{
+ unsigned int i, j;
+
+ /* Loop for each css stream */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ /* Loop for each css vc stream */
+ for (j = 0; j < ATOMISP_INPUT_STREAM_NUM; j++) {
+ if (asd->stream_env[j].stream &&
+ asd->stream_env[j].stream_config.mode ==
+ IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int atomisp_css_debug_dump_isp_binary(void)
+{
+ ia_css_debug_dump_isp_binary();
+ return 0;
+}
+
+int atomisp_css_dump_sp_raw_copy_linecount(bool reduced)
+{
+ sh_css_dump_sp_raw_copy_linecount(reduced);
+ return 0;
+}
+
+int atomisp_css_dump_blob_infor(void)
+{
+ struct ia_css_blob_descr *bd = sh_css_blob_info;
+ unsigned int i, nm = sh_css_num_binaries;
+
+ if (nm == 0)
+ return -EPERM;
+ if (!bd)
+ return -EPERM;
+
+ for (i = 1; i < sh_css_num_binaries; i++)
+ dev_dbg(atomisp_dev, "Num%d binary id is %d, name is %s\n", i,
+ bd[i - 1].header.info.isp.sp.id, bd[i - 1].name);
+
+ return 0;
+}
+
+void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd,
+ uint32_t isp_config_id)
+{
+ asd->params.config.isp_config_id = isp_config_id;
+}
+
+void atomisp_css_set_isp_config_applied_frame(struct atomisp_sub_device *asd,
+ struct atomisp_css_frame *output_frame)
+{
+ asd->params.config.output_frame = output_frame;
+}
+
+int atomisp_get_css_dbgfunc(void)
+{
+ return dbg_func;
+}
+
+int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt)
+{
+ int ret;
+
+ ret = __set_css_print_env(isp, opt);
+ if (ret == 0)
+ dbg_func = opt;
+
+ return ret;
+}
+
+void atomisp_en_dz_capt_pipe(struct atomisp_sub_device *asd, bool enable)
+{
+ ia_css_en_dz_capt_pipe(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ enable);
+}
+
+struct atomisp_css_dvs_grid_info *atomisp_css_get_dvs_grid_info(
+ struct atomisp_css_grid_info *grid_info)
+{
+ if (!grid_info)
+ return NULL;
+
+#ifdef IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
+ return &grid_info->dvs_grid.dvs_grid_info;
+#else
+ return &grid_info->dvs_grid;
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h
new file mode 100644
index 000000000000..7abd1ff35652
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h
@@ -0,0 +1,277 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_COMPAT_CSS20_H__
+#define __ATOMISP_COMPAT_CSS20_H__
+
+#include <media/v4l2-mediabus.h>
+
+#include "ia_css.h"
+#include "ia_css_types.h"
+#include "ia_css_acc_types.h"
+#include "sh_css_legacy.h"
+
+#define ATOMISP_CSS2_PIPE_MAX 2
+#define ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES 3
+#define ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN 4
+#define ATOMISP_CSS2_NUM_DVS_FRAME_DELAY 2
+
+#define atomisp_css_pipe_id ia_css_pipe_id
+#define atomisp_css_pipeline ia_css_pipe
+#define atomisp_css_buffer_type ia_css_buffer_type
+#define atomisp_css_dis_data ia_css_isp_dvs_statistics
+#define atomisp_css_irq_info ia_css_irq_info
+#define atomisp_css_isp_config ia_css_isp_config
+#define atomisp_css_bayer_order ia_css_bayer_order
+#define atomisp_css_capture_mode ia_css_capture_mode
+#define atomisp_css_input_mode ia_css_input_mode
+#define atomisp_css_frame ia_css_frame
+#define atomisp_css_frame_format ia_css_frame_format
+#define atomisp_css_frame_info ia_css_frame_info
+#define atomisp_css_dp_config ia_css_dp_config
+#define atomisp_css_wb_config ia_css_wb_config
+#define atomisp_css_cc_config ia_css_cc_config
+#define atomisp_css_nr_config ia_css_nr_config
+#define atomisp_css_ee_config ia_css_ee_config
+#define atomisp_css_ob_config ia_css_ob_config
+#define atomisp_css_de_config ia_css_de_config
+#define atomisp_css_dz_config ia_css_dz_config
+#define atomisp_css_ce_config ia_css_ce_config
+#define atomisp_css_gc_config ia_css_gc_config
+#define atomisp_css_tnr_config ia_css_tnr_config
+#define atomisp_css_cnr_config ia_css_cnr_config
+#define atomisp_css_ctc_config ia_css_ctc_config
+#define atomisp_css_3a_config ia_css_3a_config
+#define atomisp_css_ecd_config ia_css_ecd_config
+#define atomisp_css_ynr_config ia_css_ynr_config
+#define atomisp_css_fc_config ia_css_fc_config
+#define atomisp_css_aa_config ia_css_aa_config
+#define atomisp_css_baa_config ia_css_aa_config
+#define atomisp_css_anr_config ia_css_anr_config
+#define atomisp_css_xnr_config ia_css_xnr_config
+#define atomisp_css_macc_config ia_css_macc_config
+#define atomisp_css_gamma_table ia_css_gamma_table
+#define atomisp_css_ctc_table ia_css_ctc_table
+#define atomisp_css_macc_table ia_css_macc_table
+#define atomisp_css_xnr_table ia_css_xnr_table
+#define atomisp_css_rgb_gamma_table ia_css_rgb_gamma_table
+#define atomisp_css_anr_thres ia_css_anr_thres
+#define atomisp_css_dvs_6axis ia_css_dvs_6axis_config
+#define atomisp_css_grid_info ia_css_grid_info
+#define atomisp_css_3a_grid_info ia_css_3a_grid_info
+#define atomisp_css_dvs_grid_info ia_css_dvs_grid_info
+#define atomisp_css_shading_table ia_css_shading_table
+#define atomisp_css_morph_table ia_css_morph_table
+#define atomisp_css_dvs_6axis_config ia_css_dvs_6axis_config
+#define atomisp_css_fw_info ia_css_fw_info
+#define atomisp_css_formats_config ia_css_formats_config
+
+#define CSS_PIPE_ID_PREVIEW IA_CSS_PIPE_ID_PREVIEW
+#define CSS_PIPE_ID_COPY IA_CSS_PIPE_ID_COPY
+#define CSS_PIPE_ID_VIDEO IA_CSS_PIPE_ID_VIDEO
+#define CSS_PIPE_ID_CAPTURE IA_CSS_PIPE_ID_CAPTURE
+#define CSS_PIPE_ID_ACC IA_CSS_PIPE_ID_ACC
+#define CSS_PIPE_ID_YUVPP IA_CSS_PIPE_ID_YUVPP
+#define CSS_PIPE_ID_NUM IA_CSS_PIPE_ID_NUM
+
+#define CSS_INPUT_MODE_SENSOR IA_CSS_INPUT_MODE_BUFFERED_SENSOR
+#define CSS_INPUT_MODE_FIFO IA_CSS_INPUT_MODE_FIFO
+#define CSS_INPUT_MODE_TPG IA_CSS_INPUT_MODE_TPG
+#define CSS_INPUT_MODE_PRBS IA_CSS_INPUT_MODE_PRBS
+#define CSS_INPUT_MODE_MEMORY IA_CSS_INPUT_MODE_MEMORY
+
+#define CSS_IRQ_INFO_CSS_RECEIVER_ERROR IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR
+#define CSS_IRQ_INFO_EVENTS_READY IA_CSS_IRQ_INFO_EVENTS_READY
+#define CSS_IRQ_INFO_INPUT_SYSTEM_ERROR \
+ IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR
+#define CSS_IRQ_INFO_IF_ERROR IA_CSS_IRQ_INFO_IF_ERROR
+
+#define CSS_BUFFER_TYPE_NUM IA_CSS_BUFFER_TYPE_NUM
+
+#define CSS_FRAME_FLASH_STATE_NONE IA_CSS_FRAME_FLASH_STATE_NONE
+#define CSS_FRAME_FLASH_STATE_PARTIAL IA_CSS_FRAME_FLASH_STATE_PARTIAL
+#define CSS_FRAME_FLASH_STATE_FULL IA_CSS_FRAME_FLASH_STATE_FULL
+
+#define CSS_BAYER_ORDER_GRBG IA_CSS_BAYER_ORDER_GRBG
+#define CSS_BAYER_ORDER_RGGB IA_CSS_BAYER_ORDER_RGGB
+#define CSS_BAYER_ORDER_BGGR IA_CSS_BAYER_ORDER_BGGR
+#define CSS_BAYER_ORDER_GBRG IA_CSS_BAYER_ORDER_GBRG
+
+/*
+ * Hide IA_ naming difference in otherwise common CSS macros.
+ */
+#define CSS_ID(val) (IA_ ## val)
+#define CSS_EVENT(val) (IA_CSS_EVENT_TYPE_ ## val)
+#define CSS_FORMAT(val) (ATOMISP_INPUT_FORMAT_ ## val)
+
+#define CSS_EVENT_PORT_EOF CSS_EVENT(PORT_EOF)
+#define CSS_EVENT_FRAME_TAGGED CSS_EVENT(FRAME_TAGGED)
+
+#define CSS_MIPI_FRAME_BUFFER_SIZE_1 0x60000
+#define CSS_MIPI_FRAME_BUFFER_SIZE_2 0x80000
+
+struct atomisp_device;
+struct atomisp_sub_device;
+
+#define MAX_STREAMS_PER_CHANNEL 2
+
+/*
+ * These are used to indicate the css stream state, corresponding
+ * stream handling can be done via judging the different state.
+ */
+enum atomisp_css_stream_state {
+ CSS_STREAM_UNINIT,
+ CSS_STREAM_CREATED,
+ CSS_STREAM_STARTED,
+ CSS_STREAM_STOPPED,
+};
+
+/*
+ * Sensor of external ISP can send multiple steams with different mipi data
+ * type in the same virtual channel. This information needs to come from the
+ * sensor or external ISP
+ */
+struct atomisp_css_isys_config_info {
+ unsigned int input_format;
+ unsigned int width;
+ unsigned int height;
+};
+
+struct atomisp_stream_env {
+ struct ia_css_stream *stream;
+ struct ia_css_stream_config stream_config;
+ struct ia_css_stream_info stream_info;
+ struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe *multi_pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe_config pipe_configs[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe_extra_config pipe_extra_configs[IA_CSS_PIPE_ID_NUM];
+ bool update_pipe[IA_CSS_PIPE_ID_NUM];
+ enum atomisp_css_stream_state stream_state;
+ struct ia_css_stream *acc_stream;
+ enum atomisp_css_stream_state acc_stream_state;
+ struct ia_css_stream_config acc_stream_config;
+ unsigned int ch_id; /* virtual channel ID */
+ unsigned int isys_configs;
+ struct atomisp_css_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL];
+};
+
+struct atomisp_css_env {
+ struct ia_css_env isp_css_env;
+ struct ia_css_fw isp_css_fw;
+};
+
+struct atomisp_s3a_buf {
+ struct ia_css_isp_3a_statistics *s3a_data;
+ struct ia_css_isp_3a_statistics_map *s3a_map;
+ struct list_head list;
+};
+
+struct atomisp_dis_buf {
+ struct atomisp_css_dis_data *dis_data;
+ struct ia_css_isp_dvs_statistics_map *dvs_map;
+ struct list_head list;
+};
+
+struct atomisp_css_buffer {
+ struct ia_css_buffer css_buffer;
+};
+
+struct atomisp_css_event {
+ enum atomisp_css_pipe_id pipe;
+ struct ia_css_event event;
+};
+
+void atomisp_css_set_macc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_macc_config *macc_config);
+
+void atomisp_css_set_ecd_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ecd_config *ecd_config);
+
+void atomisp_css_set_ynr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ynr_config *ynr_config);
+
+void atomisp_css_set_fc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_fc_config *fc_config);
+
+void atomisp_css_set_aa_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_aa_config *aa_config);
+
+void atomisp_css_set_baa_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_baa_config *baa_config);
+
+void atomisp_css_set_anr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_anr_config *anr_config);
+
+void atomisp_css_set_xnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_xnr_config *xnr_config);
+
+void atomisp_css_set_cnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cnr_config *cnr_config);
+
+void atomisp_css_set_ctc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ctc_config *ctc_config);
+
+void atomisp_css_set_yuv2rgb_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *yuv2rgb_cc_config);
+
+void atomisp_css_set_rgb2yuv_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *rgb2yuv_cc_config);
+
+void atomisp_css_set_xnr_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_xnr_table *xnr_table);
+
+void atomisp_css_set_r_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *r_gamma_table);
+
+void atomisp_css_set_g_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *g_gamma_table);
+
+void atomisp_css_set_b_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *b_gamma_table);
+
+void atomisp_css_set_anr_thres(struct atomisp_sub_device *asd,
+ struct atomisp_css_anr_thres *anr_thres);
+
+int atomisp_css_check_firmware_version(struct atomisp_device *isp);
+
+int atomisp_css_load_firmware(struct atomisp_device *isp);
+
+void atomisp_css_unload_firmware(struct atomisp_device *isp);
+
+void atomisp_css_set_dvs_6axis(struct atomisp_sub_device *asd,
+ struct atomisp_css_dvs_6axis *dvs_6axis);
+
+unsigned int atomisp_css_debug_get_dtrace_level(void);
+
+int atomisp_css_debug_dump_isp_binary(void);
+
+int atomisp_css_dump_sp_raw_copy_linecount(bool reduced);
+
+int atomisp_css_dump_blob_infor(void);
+
+void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd,
+ uint32_t isp_config_id);
+
+void atomisp_css_set_isp_config_applied_frame(struct atomisp_sub_device *asd,
+ struct atomisp_css_frame *output_frame);
+
+int atomisp_get_css_dbgfunc(void);
+
+int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt);
+struct atomisp_css_dvs_grid_info *atomisp_css_get_dvs_grid_info(
+ struct atomisp_css_grid_info *grid_info);
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c
new file mode 100644
index 000000000000..3079043f1fac
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c
@@ -0,0 +1,1177 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+#include <linux/videodev2.h>
+
+#include "atomisp_internal.h"
+#include "atomisp_compat.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_compat_ioctl32.h"
+
+static int get_atomisp_histogram32(struct atomisp_histogram *kp,
+ struct atomisp_histogram32 __user *up)
+{
+ compat_uptr_t tmp;
+
+ if (!access_ok(up, sizeof(struct atomisp_histogram32)) ||
+ get_user(kp->num_elements, &up->num_elements) ||
+ get_user(tmp, &up->data))
+ return -EFAULT;
+
+ kp->data = compat_ptr(tmp);
+ return 0;
+}
+
+static int put_atomisp_histogram32(struct atomisp_histogram *kp,
+ struct atomisp_histogram32 __user *up)
+{
+ compat_uptr_t tmp = (compat_uptr_t)((uintptr_t)kp->data);
+
+ if (!access_ok(up, sizeof(struct atomisp_histogram32)) ||
+ put_user(kp->num_elements, &up->num_elements) ||
+ put_user(tmp, &up->data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp,
+ struct v4l2_pix_format __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp,
+ struct v4l2_pix_format __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp,
+ struct v4l2_framebuffer32 __user *up)
+{
+ compat_uptr_t tmp;
+
+ if (!access_ok(up, sizeof(struct v4l2_framebuffer32)) ||
+ get_user(tmp, &up->base) ||
+ get_user(kp->capability, &up->capability) ||
+ get_user(kp->flags, &up->flags))
+ return -EFAULT;
+
+ kp->base = (void __force *)compat_ptr(tmp);
+ get_v4l2_pix_format((struct v4l2_pix_format *)&kp->fmt, &up->fmt);
+ return 0;
+}
+
+static int get_atomisp_dis_statistics32(struct atomisp_dis_statistics *kp,
+ struct atomisp_dis_statistics32 __user *up)
+{
+ compat_uptr_t hor_prod_odd_real;
+ compat_uptr_t hor_prod_odd_imag;
+ compat_uptr_t hor_prod_even_real;
+ compat_uptr_t hor_prod_even_imag;
+ compat_uptr_t ver_prod_odd_real;
+ compat_uptr_t ver_prod_odd_imag;
+ compat_uptr_t ver_prod_even_real;
+ compat_uptr_t ver_prod_even_imag;
+
+ if (!access_ok(up, sizeof(struct atomisp_dis_statistics32)) ||
+ copy_from_user(kp, up, sizeof(struct atomisp_dvs_grid_info)) ||
+ get_user(hor_prod_odd_real, &up->dvs2_stat.hor_prod.odd_real) ||
+ get_user(hor_prod_odd_imag, &up->dvs2_stat.hor_prod.odd_imag) ||
+ get_user(hor_prod_even_real, &up->dvs2_stat.hor_prod.even_real) ||
+ get_user(hor_prod_even_imag, &up->dvs2_stat.hor_prod.even_imag) ||
+ get_user(ver_prod_odd_real, &up->dvs2_stat.ver_prod.odd_real) ||
+ get_user(ver_prod_odd_imag, &up->dvs2_stat.ver_prod.odd_imag) ||
+ get_user(ver_prod_even_real, &up->dvs2_stat.ver_prod.even_real) ||
+ get_user(ver_prod_even_imag, &up->dvs2_stat.ver_prod.even_imag) ||
+ get_user(kp->exp_id, &up->exp_id))
+ return -EFAULT;
+
+ kp->dvs2_stat.hor_prod.odd_real = compat_ptr(hor_prod_odd_real);
+ kp->dvs2_stat.hor_prod.odd_imag = compat_ptr(hor_prod_odd_imag);
+ kp->dvs2_stat.hor_prod.even_real = compat_ptr(hor_prod_even_real);
+ kp->dvs2_stat.hor_prod.even_imag = compat_ptr(hor_prod_even_imag);
+ kp->dvs2_stat.ver_prod.odd_real = compat_ptr(ver_prod_odd_real);
+ kp->dvs2_stat.ver_prod.odd_imag = compat_ptr(ver_prod_odd_imag);
+ kp->dvs2_stat.ver_prod.even_real = compat_ptr(ver_prod_even_real);
+ kp->dvs2_stat.ver_prod.even_imag = compat_ptr(ver_prod_even_imag);
+ return 0;
+}
+
+static int put_atomisp_dis_statistics32(struct atomisp_dis_statistics *kp,
+ struct atomisp_dis_statistics32 __user *up)
+{
+ compat_uptr_t hor_prod_odd_real =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.odd_real);
+ compat_uptr_t hor_prod_odd_imag =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.odd_imag);
+ compat_uptr_t hor_prod_even_real =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.even_real);
+ compat_uptr_t hor_prod_even_imag =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.even_imag);
+ compat_uptr_t ver_prod_odd_real =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.odd_real);
+ compat_uptr_t ver_prod_odd_imag =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.odd_imag);
+ compat_uptr_t ver_prod_even_real =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.even_real);
+ compat_uptr_t ver_prod_even_imag =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.even_imag);
+
+ if (!access_ok(up, sizeof(struct atomisp_dis_statistics32)) ||
+ copy_to_user(up, kp, sizeof(struct atomisp_dvs_grid_info)) ||
+ put_user(hor_prod_odd_real, &up->dvs2_stat.hor_prod.odd_real) ||
+ put_user(hor_prod_odd_imag, &up->dvs2_stat.hor_prod.odd_imag) ||
+ put_user(hor_prod_even_real, &up->dvs2_stat.hor_prod.even_real) ||
+ put_user(hor_prod_even_imag, &up->dvs2_stat.hor_prod.even_imag) ||
+ put_user(ver_prod_odd_real, &up->dvs2_stat.ver_prod.odd_real) ||
+ put_user(ver_prod_odd_imag, &up->dvs2_stat.ver_prod.odd_imag) ||
+ put_user(ver_prod_even_real, &up->dvs2_stat.ver_prod.even_real) ||
+ put_user(ver_prod_even_imag, &up->dvs2_stat.ver_prod.even_imag) ||
+ put_user(kp->exp_id, &up->exp_id))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_dis_coefficients32(struct atomisp_dis_coefficients *kp,
+ struct atomisp_dis_coefficients32 __user *up)
+{
+ compat_uptr_t hor_coefs_odd_real;
+ compat_uptr_t hor_coefs_odd_imag;
+ compat_uptr_t hor_coefs_even_real;
+ compat_uptr_t hor_coefs_even_imag;
+ compat_uptr_t ver_coefs_odd_real;
+ compat_uptr_t ver_coefs_odd_imag;
+ compat_uptr_t ver_coefs_even_real;
+ compat_uptr_t ver_coefs_even_imag;
+
+ if (!access_ok(up, sizeof(struct atomisp_dis_coefficients32)) ||
+ copy_from_user(kp, up, sizeof(struct atomisp_dvs_grid_info)) ||
+ get_user(hor_coefs_odd_real, &up->hor_coefs.odd_real) ||
+ get_user(hor_coefs_odd_imag, &up->hor_coefs.odd_imag) ||
+ get_user(hor_coefs_even_real, &up->hor_coefs.even_real) ||
+ get_user(hor_coefs_even_imag, &up->hor_coefs.even_imag) ||
+ get_user(ver_coefs_odd_real, &up->ver_coefs.odd_real) ||
+ get_user(ver_coefs_odd_imag, &up->ver_coefs.odd_imag) ||
+ get_user(ver_coefs_even_real, &up->ver_coefs.even_real) ||
+ get_user(ver_coefs_even_imag, &up->ver_coefs.even_imag))
+ return -EFAULT;
+
+ kp->hor_coefs.odd_real = compat_ptr(hor_coefs_odd_real);
+ kp->hor_coefs.odd_imag = compat_ptr(hor_coefs_odd_imag);
+ kp->hor_coefs.even_real = compat_ptr(hor_coefs_even_real);
+ kp->hor_coefs.even_imag = compat_ptr(hor_coefs_even_imag);
+ kp->ver_coefs.odd_real = compat_ptr(ver_coefs_odd_real);
+ kp->ver_coefs.odd_imag = compat_ptr(ver_coefs_odd_imag);
+ kp->ver_coefs.even_real = compat_ptr(ver_coefs_even_real);
+ kp->ver_coefs.even_imag = compat_ptr(ver_coefs_even_imag);
+ return 0;
+}
+
+static int get_atomisp_dvs_6axis_config32(struct atomisp_dvs_6axis_config *kp,
+ struct atomisp_dvs_6axis_config32 __user *up)
+{
+ compat_uptr_t xcoords_y;
+ compat_uptr_t ycoords_y;
+ compat_uptr_t xcoords_uv;
+ compat_uptr_t ycoords_uv;
+
+ if (!access_ok(up, sizeof(struct atomisp_dvs_6axis_config32)) ||
+ get_user(kp->exp_id, &up->exp_id) ||
+ get_user(kp->width_y, &up->width_y) ||
+ get_user(kp->height_y, &up->height_y) ||
+ get_user(kp->width_uv, &up->width_uv) ||
+ get_user(kp->height_uv, &up->height_uv) ||
+ get_user(xcoords_y, &up->xcoords_y) ||
+ get_user(ycoords_y, &up->ycoords_y) ||
+ get_user(xcoords_uv, &up->xcoords_uv) ||
+ get_user(ycoords_uv, &up->ycoords_uv))
+ return -EFAULT;
+
+ kp->xcoords_y = (void __force *)compat_ptr(xcoords_y);
+ kp->ycoords_y = (void __force *)compat_ptr(ycoords_y);
+ kp->xcoords_uv = (void __force *)compat_ptr(xcoords_uv);
+ kp->ycoords_uv = (void __force *)compat_ptr(ycoords_uv);
+ return 0;
+}
+
+static int get_atomisp_3a_statistics32(struct atomisp_3a_statistics *kp,
+ struct atomisp_3a_statistics32 __user *up)
+{
+ compat_uptr_t data;
+ compat_uptr_t rgby_data;
+
+ if (!access_ok(up, sizeof(struct atomisp_3a_statistics32)) ||
+ copy_from_user(kp, up, sizeof(struct atomisp_grid_info)) ||
+ get_user(rgby_data, &up->rgby_data) ||
+ get_user(data, &up->data) ||
+ get_user(kp->exp_id, &up->exp_id) ||
+ get_user(kp->isp_config_id, &up->isp_config_id))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ kp->rgby_data = compat_ptr(rgby_data);
+
+ return 0;
+}
+
+static int put_atomisp_3a_statistics32(struct atomisp_3a_statistics *kp,
+ struct atomisp_3a_statistics32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+ compat_uptr_t rgby_data = (compat_uptr_t)((uintptr_t)kp->rgby_data);
+
+ if (!access_ok(up, sizeof(struct atomisp_3a_statistics32)) ||
+ copy_to_user(up, kp, sizeof(struct atomisp_grid_info)) ||
+ put_user(rgby_data, &up->rgby_data) ||
+ put_user(data, &up->data) ||
+ put_user(kp->exp_id, &up->exp_id) ||
+ put_user(kp->isp_config_id, &up->isp_config_id))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_metadata_stat32(struct atomisp_metadata *kp,
+ struct atomisp_metadata32 __user *up)
+{
+ compat_uptr_t data;
+ compat_uptr_t effective_width;
+
+ if (!access_ok(up, sizeof(struct atomisp_metadata32)) ||
+ get_user(data, &up->data) ||
+ get_user(kp->width, &up->width) ||
+ get_user(kp->height, &up->height) ||
+ get_user(kp->stride, &up->stride) ||
+ get_user(kp->exp_id, &up->exp_id) ||
+ get_user(effective_width, &up->effective_width))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ kp->effective_width = (void __force *)compat_ptr(effective_width);
+ return 0;
+}
+
+static int put_atomisp_metadata_stat32(struct atomisp_metadata *kp,
+ struct atomisp_metadata32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+ compat_uptr_t effective_width =
+ (compat_uptr_t)((uintptr_t)kp->effective_width);
+ if (!access_ok(up, sizeof(struct atomisp_metadata32)) ||
+ put_user(data, &up->data) ||
+ put_user(kp->width, &up->width) ||
+ put_user(kp->height, &up->height) ||
+ put_user(kp->stride, &up->stride) ||
+ put_user(kp->exp_id, &up->exp_id) ||
+ put_user(effective_width, &up->effective_width))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int put_atomisp_metadata_by_type_stat32(
+ struct atomisp_metadata_with_type *kp,
+ struct atomisp_metadata_with_type32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+ compat_uptr_t effective_width =
+ (compat_uptr_t)((uintptr_t)kp->effective_width);
+ if (!access_ok(up, sizeof(struct atomisp_metadata_with_type32)) ||
+ put_user(data, &up->data) ||
+ put_user(kp->width, &up->width) ||
+ put_user(kp->height, &up->height) ||
+ put_user(kp->stride, &up->stride) ||
+ put_user(kp->exp_id, &up->exp_id) ||
+ put_user(effective_width, &up->effective_width) ||
+ put_user(kp->type, &up->type))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_metadata_by_type_stat32(
+ struct atomisp_metadata_with_type *kp,
+ struct atomisp_metadata_with_type32 __user *up)
+{
+ compat_uptr_t data;
+ compat_uptr_t effective_width;
+
+ if (!access_ok(up, sizeof(struct atomisp_metadata_with_type32)) ||
+ get_user(data, &up->data) ||
+ get_user(kp->width, &up->width) ||
+ get_user(kp->height, &up->height) ||
+ get_user(kp->stride, &up->stride) ||
+ get_user(kp->exp_id, &up->exp_id) ||
+ get_user(effective_width, &up->effective_width) ||
+ get_user(kp->type, &up->type))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ kp->effective_width = (void __force *)compat_ptr(effective_width);
+ return 0;
+}
+
+static int get_atomisp_morph_table32(struct atomisp_morph_table *kp,
+ struct atomisp_morph_table32 __user *up)
+{
+ unsigned int n = ATOMISP_MORPH_TABLE_NUM_PLANES;
+
+ if (!access_ok(up, sizeof(struct atomisp_morph_table32)) ||
+ get_user(kp->enabled, &up->enabled) ||
+ get_user(kp->width, &up->width) ||
+ get_user(kp->height, &up->height))
+ return -EFAULT;
+
+ while (n-- > 0) {
+ uintptr_t *coord_kp = (uintptr_t *)&kp->coordinates_x[n];
+
+ if (get_user((*coord_kp), &up->coordinates_x[n]))
+ return -EFAULT;
+
+ coord_kp = (uintptr_t *)&kp->coordinates_y[n];
+ if (get_user((*coord_kp), &up->coordinates_y[n]))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int put_atomisp_morph_table32(struct atomisp_morph_table *kp,
+ struct atomisp_morph_table32 __user *up)
+{
+ unsigned int n = ATOMISP_MORPH_TABLE_NUM_PLANES;
+
+ if (!access_ok(up, sizeof(struct atomisp_morph_table32)) ||
+ put_user(kp->enabled, &up->enabled) ||
+ put_user(kp->width, &up->width) ||
+ put_user(kp->height, &up->height))
+ return -EFAULT;
+
+ while (n-- > 0) {
+ uintptr_t *coord_kp = (uintptr_t *)&kp->coordinates_x[n];
+
+ if (put_user((*coord_kp), &up->coordinates_x[n]))
+ return -EFAULT;
+
+ coord_kp = (uintptr_t *)&kp->coordinates_y[n];
+ if (put_user((*coord_kp), &up->coordinates_y[n]))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int get_atomisp_overlay32(struct atomisp_overlay *kp,
+ struct atomisp_overlay32 __user *up)
+{
+ compat_uptr_t frame;
+
+ if (!access_ok(up, sizeof(struct atomisp_overlay32)) ||
+ get_user(frame, &up->frame) ||
+ get_user(kp->bg_y, &up->bg_y) ||
+ get_user(kp->bg_u, &up->bg_u) ||
+ get_user(kp->bg_v, &up->bg_v) ||
+ get_user(kp->blend_input_perc_y, &up->blend_input_perc_y) ||
+ get_user(kp->blend_input_perc_u, &up->blend_input_perc_u) ||
+ get_user(kp->blend_input_perc_v, &up->blend_input_perc_v) ||
+ get_user(kp->blend_overlay_perc_y, &up->blend_overlay_perc_y) ||
+ get_user(kp->blend_overlay_perc_u, &up->blend_overlay_perc_u) ||
+ get_user(kp->blend_overlay_perc_v, &up->blend_overlay_perc_v) ||
+ get_user(kp->blend_overlay_perc_u, &up->blend_overlay_perc_u) ||
+ get_user(kp->overlay_start_x, &up->overlay_start_y))
+ return -EFAULT;
+
+ kp->frame = (void __force *)compat_ptr(frame);
+ return 0;
+}
+
+static int put_atomisp_overlay32(struct atomisp_overlay *kp,
+ struct atomisp_overlay32 __user *up)
+{
+ compat_uptr_t frame = (compat_uptr_t)((uintptr_t)kp->frame);
+
+ if (!access_ok(up, sizeof(struct atomisp_overlay32)) ||
+ put_user(frame, &up->frame) ||
+ put_user(kp->bg_y, &up->bg_y) ||
+ put_user(kp->bg_u, &up->bg_u) ||
+ put_user(kp->bg_v, &up->bg_v) ||
+ put_user(kp->blend_input_perc_y, &up->blend_input_perc_y) ||
+ put_user(kp->blend_input_perc_u, &up->blend_input_perc_u) ||
+ put_user(kp->blend_input_perc_v, &up->blend_input_perc_v) ||
+ put_user(kp->blend_overlay_perc_y, &up->blend_overlay_perc_y) ||
+ put_user(kp->blend_overlay_perc_u, &up->blend_overlay_perc_u) ||
+ put_user(kp->blend_overlay_perc_v, &up->blend_overlay_perc_v) ||
+ put_user(kp->blend_overlay_perc_u, &up->blend_overlay_perc_u) ||
+ put_user(kp->overlay_start_x, &up->overlay_start_y))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_calibration_group32(
+ struct atomisp_calibration_group *kp,
+ struct atomisp_calibration_group32 __user *up)
+{
+ compat_uptr_t calb_grp_values;
+
+ if (!access_ok(up, sizeof(struct atomisp_calibration_group32)) ||
+ get_user(kp->size, &up->size) ||
+ get_user(kp->type, &up->type) ||
+ get_user(calb_grp_values, &up->calb_grp_values))
+ return -EFAULT;
+
+ kp->calb_grp_values = (void __force *)compat_ptr(calb_grp_values);
+ return 0;
+}
+
+static int put_atomisp_calibration_group32(
+ struct atomisp_calibration_group *kp,
+ struct atomisp_calibration_group32 __user *up)
+{
+ compat_uptr_t calb_grp_values =
+ (compat_uptr_t)((uintptr_t)kp->calb_grp_values);
+
+ if (!access_ok(up, sizeof(struct atomisp_calibration_group32)) ||
+ put_user(kp->size, &up->size) ||
+ put_user(kp->type, &up->type) ||
+ put_user(calb_grp_values, &up->calb_grp_values))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_acc_fw_load32(struct atomisp_acc_fw_load *kp,
+ struct atomisp_acc_fw_load32 __user *up)
+{
+ compat_uptr_t data;
+
+ if (!access_ok(up, sizeof(struct atomisp_acc_fw_load32)) ||
+ get_user(kp->size, &up->size) ||
+ get_user(kp->fw_handle, &up->fw_handle) ||
+ get_user(data, &up->data))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ return 0;
+}
+
+static int put_atomisp_acc_fw_load32(struct atomisp_acc_fw_load *kp,
+ struct atomisp_acc_fw_load32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+
+ if (!access_ok(up, sizeof(struct atomisp_acc_fw_load32)) ||
+ put_user(kp->size, &up->size) ||
+ put_user(kp->fw_handle, &up->fw_handle) ||
+ put_user(data, &up->data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_acc_fw_arg32(struct atomisp_acc_fw_arg *kp,
+ struct atomisp_acc_fw_arg32 __user *up)
+{
+ compat_uptr_t value;
+
+ if (!access_ok(up, sizeof(struct atomisp_acc_fw_arg32)) ||
+ get_user(kp->fw_handle, &up->fw_handle) ||
+ get_user(kp->index, &up->index) ||
+ get_user(value, &up->value) ||
+ get_user(kp->size, &up->size))
+ return -EFAULT;
+
+ kp->value = compat_ptr(value);
+ return 0;
+}
+
+static int put_atomisp_acc_fw_arg32(struct atomisp_acc_fw_arg *kp,
+ struct atomisp_acc_fw_arg32 __user *up)
+{
+ compat_uptr_t value = (compat_uptr_t)((uintptr_t)kp->value);
+
+ if (!access_ok(up, sizeof(struct atomisp_acc_fw_arg32)) ||
+ put_user(kp->fw_handle, &up->fw_handle) ||
+ put_user(kp->index, &up->index) ||
+ put_user(value, &up->value) ||
+ put_user(kp->size, &up->size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_v4l2_private_int_data32(struct v4l2_private_int_data *kp,
+ struct v4l2_private_int_data32 __user *up)
+{
+ compat_uptr_t data;
+
+ if (!access_ok(up, sizeof(struct v4l2_private_int_data32)) ||
+ get_user(kp->size, &up->size) ||
+ get_user(data, &up->data) ||
+ get_user(kp->reserved[0], &up->reserved[0]) ||
+ get_user(kp->reserved[1], &up->reserved[1]))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ return 0;
+}
+
+static int put_v4l2_private_int_data32(struct v4l2_private_int_data *kp,
+ struct v4l2_private_int_data32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+
+ if (!access_ok(up, sizeof(struct v4l2_private_int_data32)) ||
+ put_user(kp->size, &up->size) ||
+ put_user(data, &up->data) ||
+ put_user(kp->reserved[0], &up->reserved[0]) ||
+ put_user(kp->reserved[1], &up->reserved[1]))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_shading_table32(struct atomisp_shading_table *kp,
+ struct atomisp_shading_table32 __user *up)
+{
+ unsigned int n = ATOMISP_NUM_SC_COLORS;
+
+ if (!access_ok(up, sizeof(struct atomisp_shading_table32)) ||
+ get_user(kp->enable, &up->enable) ||
+ get_user(kp->sensor_width, &up->sensor_width) ||
+ get_user(kp->sensor_height, &up->sensor_height) ||
+ get_user(kp->width, &up->width) ||
+ get_user(kp->height, &up->height) ||
+ get_user(kp->fraction_bits, &up->fraction_bits))
+ return -EFAULT;
+
+ while (n-- > 0) {
+ uintptr_t *data_p = (uintptr_t *)&kp->data[n];
+
+ if (get_user((*data_p), &up->data[n]))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int get_atomisp_acc_map32(struct atomisp_acc_map *kp,
+ struct atomisp_acc_map32 __user *up)
+{
+ compat_uptr_t user_ptr;
+
+ if (!access_ok(up, sizeof(struct atomisp_acc_map32)) ||
+ get_user(kp->flags, &up->flags) ||
+ get_user(kp->length, &up->length) ||
+ get_user(user_ptr, &up->user_ptr) ||
+ get_user(kp->css_ptr, &up->css_ptr) ||
+ get_user(kp->reserved[0], &up->reserved[0]) ||
+ get_user(kp->reserved[1], &up->reserved[1]) ||
+ get_user(kp->reserved[2], &up->reserved[2]) ||
+ get_user(kp->reserved[3], &up->reserved[3]))
+ return -EFAULT;
+
+ kp->user_ptr = compat_ptr(user_ptr);
+ return 0;
+}
+
+static int put_atomisp_acc_map32(struct atomisp_acc_map *kp,
+ struct atomisp_acc_map32 __user *up)
+{
+ compat_uptr_t user_ptr = (compat_uptr_t)((uintptr_t)kp->user_ptr);
+
+ if (!access_ok(up, sizeof(struct atomisp_acc_map32)) ||
+ put_user(kp->flags, &up->flags) ||
+ put_user(kp->length, &up->length) ||
+ put_user(user_ptr, &up->user_ptr) ||
+ put_user(kp->css_ptr, &up->css_ptr) ||
+ put_user(kp->reserved[0], &up->reserved[0]) ||
+ put_user(kp->reserved[1], &up->reserved[1]) ||
+ put_user(kp->reserved[2], &up->reserved[2]) ||
+ put_user(kp->reserved[3], &up->reserved[3]))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_acc_s_mapped_arg32(struct atomisp_acc_s_mapped_arg *kp,
+ struct atomisp_acc_s_mapped_arg32 __user *up)
+{
+ if (!access_ok(up, sizeof(struct atomisp_acc_s_mapped_arg32)) ||
+ get_user(kp->fw_handle, &up->fw_handle) ||
+ get_user(kp->memory, &up->memory) ||
+ get_user(kp->length, &up->length) ||
+ get_user(kp->css_ptr, &up->css_ptr))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int put_atomisp_acc_s_mapped_arg32(struct atomisp_acc_s_mapped_arg *kp,
+ struct atomisp_acc_s_mapped_arg32 __user *up)
+{
+ if (!access_ok(up, sizeof(struct atomisp_acc_s_mapped_arg32)) ||
+ put_user(kp->fw_handle, &up->fw_handle) ||
+ put_user(kp->memory, &up->memory) ||
+ put_user(kp->length, &up->length) ||
+ put_user(kp->css_ptr, &up->css_ptr))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_parameters32(struct atomisp_parameters *kp,
+ struct atomisp_parameters32 __user *up)
+{
+ int n = offsetof(struct atomisp_parameters32, output_frame) /
+ sizeof(compat_uptr_t);
+ unsigned int size, offset = 0;
+ void __user *user_ptr;
+ unsigned int stp, mtp, dcp, dscp = 0;
+
+ if (!access_ok(up, sizeof(struct atomisp_parameters32)))
+ return -EFAULT;
+
+ while (n >= 0) {
+ compat_uptr_t __user *src = ((compat_uptr_t __user *)up) + n;
+ uintptr_t *dst = ((uintptr_t *)kp) + n;
+
+ if (get_user((*dst), src))
+ return -EFAULT;
+ n--;
+ }
+ if (get_user(kp->isp_config_id, &up->isp_config_id) ||
+ get_user(kp->per_frame_setting, &up->per_frame_setting) ||
+ get_user(stp, &up->shading_table) ||
+ get_user(mtp, &up->morph_table) ||
+ get_user(dcp, &up->dvs2_coefs) ||
+ get_user(dscp, &up->dvs_6axis_config))
+ return -EFAULT;
+
+ {
+ union {
+ struct atomisp_shading_table shading_table;
+ struct atomisp_morph_table morph_table;
+ struct atomisp_dis_coefficients dvs2_coefs;
+ struct atomisp_dvs_6axis_config dvs_6axis_config;
+ } karg;
+
+ size = sizeof(struct atomisp_shading_table) +
+ sizeof(struct atomisp_morph_table) +
+ sizeof(struct atomisp_dis_coefficients) +
+ sizeof(struct atomisp_dvs_6axis_config);
+ user_ptr = compat_alloc_user_space(size);
+
+ /* handle shading table */
+ if (stp != 0) {
+ if (get_atomisp_shading_table32(&karg.shading_table,
+ (struct atomisp_shading_table32 __user *)
+ (uintptr_t)stp))
+ return -EFAULT;
+
+ kp->shading_table = (void __force *)user_ptr + offset;
+ offset = sizeof(struct atomisp_shading_table);
+ if (!kp->shading_table)
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)kp->shading_table,
+ &karg.shading_table,
+ sizeof(struct atomisp_shading_table)))
+ return -EFAULT;
+ }
+
+ /* handle morph table */
+ if (mtp != 0) {
+ if (get_atomisp_morph_table32(&karg.morph_table,
+ (struct atomisp_morph_table32 __user *)
+ (uintptr_t)mtp))
+ return -EFAULT;
+
+ kp->morph_table = (void __force *)user_ptr + offset;
+ offset += sizeof(struct atomisp_morph_table);
+ if (!kp->morph_table)
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)kp->morph_table,
+ &karg.morph_table,
+ sizeof(struct atomisp_morph_table)))
+ return -EFAULT;
+ }
+
+ /* handle dvs2 coefficients */
+ if (dcp != 0) {
+ if (get_atomisp_dis_coefficients32(&karg.dvs2_coefs,
+ (struct atomisp_dis_coefficients32 __user *)
+ (uintptr_t)dcp))
+ return -EFAULT;
+
+ kp->dvs2_coefs = (void __force *)user_ptr + offset;
+ offset += sizeof(struct atomisp_dis_coefficients);
+ if (!kp->dvs2_coefs)
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)kp->dvs2_coefs,
+ &karg.dvs2_coefs,
+ sizeof(struct atomisp_dis_coefficients)))
+ return -EFAULT;
+ }
+ /* handle dvs 6axis configuration */
+ if (dscp != 0) {
+ if (get_atomisp_dvs_6axis_config32(&karg.dvs_6axis_config,
+ (struct atomisp_dvs_6axis_config32 __user *)
+ (uintptr_t)dscp))
+ return -EFAULT;
+
+ kp->dvs_6axis_config = (void __force *)user_ptr + offset;
+ offset += sizeof(struct atomisp_dvs_6axis_config);
+ if (!kp->dvs_6axis_config)
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)kp->dvs_6axis_config,
+ &karg.dvs_6axis_config,
+ sizeof(struct atomisp_dvs_6axis_config)))
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static int get_atomisp_acc_fw_load_to_pipe32(
+ struct atomisp_acc_fw_load_to_pipe *kp,
+ struct atomisp_acc_fw_load_to_pipe32 __user *up)
+{
+ compat_uptr_t data;
+
+ if (!access_ok(up, sizeof(struct atomisp_acc_fw_load_to_pipe32)) ||
+ get_user(kp->flags, &up->flags) ||
+ get_user(kp->fw_handle, &up->fw_handle) ||
+ get_user(kp->size, &up->size) ||
+ get_user(kp->type, &up->type) ||
+ get_user(kp->reserved[0], &up->reserved[0]) ||
+ get_user(kp->reserved[1], &up->reserved[1]) ||
+ get_user(kp->reserved[2], &up->reserved[2]) ||
+ get_user(data, &up->data))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ return 0;
+}
+
+static int put_atomisp_acc_fw_load_to_pipe32(
+ struct atomisp_acc_fw_load_to_pipe *kp,
+ struct atomisp_acc_fw_load_to_pipe32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+
+ if (!access_ok(up, sizeof(struct atomisp_acc_fw_load_to_pipe32)) ||
+ put_user(kp->flags, &up->flags) ||
+ put_user(kp->fw_handle, &up->fw_handle) ||
+ put_user(kp->size, &up->size) ||
+ put_user(kp->type, &up->type) ||
+ put_user(kp->reserved[0], &up->reserved[0]) ||
+ put_user(kp->reserved[1], &up->reserved[1]) ||
+ put_user(kp->reserved[2], &up->reserved[2]) ||
+ put_user(data, &up->data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_sensor_ae_bracketing_lut(
+ struct atomisp_sensor_ae_bracketing_lut *kp,
+ struct atomisp_sensor_ae_bracketing_lut32 __user *up)
+{
+ compat_uptr_t lut;
+
+ if (!access_ok(up, sizeof(struct atomisp_sensor_ae_bracketing_lut32)) ||
+ get_user(kp->lut_size, &up->lut_size) ||
+ get_user(lut, &up->lut))
+ return -EFAULT;
+
+ kp->lut = (void __force *)compat_ptr(lut);
+ return 0;
+}
+
+static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret = -ENOIOCTLCMD;
+
+ if (file->f_op->unlocked_ioctl)
+ ret = file->f_op->unlocked_ioctl(file, cmd, arg);
+
+ return ret;
+}
+
+static long atomisp_do_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ union {
+ struct atomisp_histogram his;
+ struct atomisp_dis_statistics dis_s;
+ struct atomisp_dis_coefficients dis_c;
+ struct atomisp_dvs_6axis_config dvs_c;
+ struct atomisp_3a_statistics s3a_s;
+ struct atomisp_morph_table mor_t;
+ struct v4l2_framebuffer v4l2_buf;
+ struct atomisp_overlay overlay;
+ struct atomisp_calibration_group cal_grp;
+ struct atomisp_acc_fw_load acc_fw_load;
+ struct atomisp_acc_fw_arg acc_fw_arg;
+ struct v4l2_private_int_data v4l2_pri_data;
+ struct atomisp_shading_table shd_tbl;
+ struct atomisp_acc_map acc_map;
+ struct atomisp_acc_s_mapped_arg acc_map_arg;
+ struct atomisp_parameters param;
+ struct atomisp_acc_fw_load_to_pipe acc_fw_to_pipe;
+ struct atomisp_metadata md;
+ struct atomisp_metadata_with_type md_with_type;
+ struct atomisp_sensor_ae_bracketing_lut lut;
+ } karg;
+ mm_segment_t old_fs;
+ void __user *up = compat_ptr(arg);
+ long err = -ENOIOCTLCMD;
+
+ /* First, convert the command. */
+ switch (cmd) {
+ case ATOMISP_IOC_G_HISTOGRAM32:
+ cmd = ATOMISP_IOC_G_HISTOGRAM;
+ break;
+ case ATOMISP_IOC_S_HISTOGRAM32:
+ cmd = ATOMISP_IOC_S_HISTOGRAM;
+ break;
+ case ATOMISP_IOC_G_DIS_STAT32:
+ cmd = ATOMISP_IOC_G_DIS_STAT;
+ break;
+ case ATOMISP_IOC_S_DIS_COEFS32:
+ cmd = ATOMISP_IOC_S_DIS_COEFS;
+ break;
+ case ATOMISP_IOC_S_DIS_VECTOR32:
+ cmd = ATOMISP_IOC_S_DIS_VECTOR;
+ break;
+ case ATOMISP_IOC_G_3A_STAT32:
+ cmd = ATOMISP_IOC_G_3A_STAT;
+ break;
+ case ATOMISP_IOC_G_ISP_GDC_TAB32:
+ cmd = ATOMISP_IOC_G_ISP_GDC_TAB;
+ break;
+ case ATOMISP_IOC_S_ISP_GDC_TAB32:
+ cmd = ATOMISP_IOC_S_ISP_GDC_TAB;
+ break;
+ case ATOMISP_IOC_S_ISP_FPN_TABLE32:
+ cmd = ATOMISP_IOC_S_ISP_FPN_TABLE;
+ break;
+ case ATOMISP_IOC_G_ISP_OVERLAY32:
+ cmd = ATOMISP_IOC_G_ISP_OVERLAY;
+ break;
+ case ATOMISP_IOC_S_ISP_OVERLAY32:
+ cmd = ATOMISP_IOC_S_ISP_OVERLAY;
+ break;
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32:
+ cmd = ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP;
+ break;
+ case ATOMISP_IOC_ACC_LOAD32:
+ cmd = ATOMISP_IOC_ACC_LOAD;
+ break;
+ case ATOMISP_IOC_ACC_S_ARG32:
+ cmd = ATOMISP_IOC_ACC_S_ARG;
+ break;
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32:
+ cmd = ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA;
+ break;
+ case ATOMISP_IOC_S_ISP_SHD_TAB32:
+ cmd = ATOMISP_IOC_S_ISP_SHD_TAB;
+ break;
+ case ATOMISP_IOC_ACC_DESTAB32:
+ cmd = ATOMISP_IOC_ACC_DESTAB;
+ break;
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32:
+ cmd = ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA;
+ break;
+ case ATOMISP_IOC_ACC_MAP32:
+ cmd = ATOMISP_IOC_ACC_MAP;
+ break;
+ case ATOMISP_IOC_ACC_UNMAP32:
+ cmd = ATOMISP_IOC_ACC_UNMAP;
+ break;
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG32:
+ cmd = ATOMISP_IOC_ACC_S_MAPPED_ARG;
+ break;
+ case ATOMISP_IOC_S_PARAMETERS32:
+ cmd = ATOMISP_IOC_S_PARAMETERS;
+ break;
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE32:
+ cmd = ATOMISP_IOC_ACC_LOAD_TO_PIPE;
+ break;
+ case ATOMISP_IOC_G_METADATA32:
+ cmd = ATOMISP_IOC_G_METADATA;
+ break;
+ case ATOMISP_IOC_G_METADATA_BY_TYPE32:
+ cmd = ATOMISP_IOC_G_METADATA_BY_TYPE;
+ break;
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32:
+ cmd = ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT;
+ break;
+ }
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_HISTOGRAM:
+ case ATOMISP_IOC_S_HISTOGRAM:
+ err = get_atomisp_histogram32(&karg.his, up);
+ break;
+ case ATOMISP_IOC_G_DIS_STAT:
+ err = get_atomisp_dis_statistics32(&karg.dis_s, up);
+ break;
+ case ATOMISP_IOC_S_DIS_COEFS:
+ err = get_atomisp_dis_coefficients32(&karg.dis_c, up);
+ break;
+ case ATOMISP_IOC_S_DIS_VECTOR:
+ err = get_atomisp_dvs_6axis_config32(&karg.dvs_c, up);
+ break;
+ case ATOMISP_IOC_G_3A_STAT:
+ err = get_atomisp_3a_statistics32(&karg.s3a_s, up);
+ break;
+ case ATOMISP_IOC_G_ISP_GDC_TAB:
+ case ATOMISP_IOC_S_ISP_GDC_TAB:
+ err = get_atomisp_morph_table32(&karg.mor_t, up);
+ break;
+ case ATOMISP_IOC_S_ISP_FPN_TABLE:
+ err = get_v4l2_framebuffer32(&karg.v4l2_buf, up);
+ break;
+ case ATOMISP_IOC_G_ISP_OVERLAY:
+ case ATOMISP_IOC_S_ISP_OVERLAY:
+ err = get_atomisp_overlay32(&karg.overlay, up);
+ break;
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ err = get_atomisp_calibration_group32(&karg.cal_grp, up);
+ break;
+ case ATOMISP_IOC_ACC_LOAD:
+ err = get_atomisp_acc_fw_load32(&karg.acc_fw_load, up);
+ break;
+ case ATOMISP_IOC_ACC_S_ARG:
+ case ATOMISP_IOC_ACC_DESTAB:
+ err = get_atomisp_acc_fw_arg32(&karg.acc_fw_arg, up);
+ break;
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+ err = get_v4l2_private_int_data32(&karg.v4l2_pri_data, up);
+ break;
+ case ATOMISP_IOC_S_ISP_SHD_TAB:
+ err = get_atomisp_shading_table32(&karg.shd_tbl, up);
+ break;
+ case ATOMISP_IOC_ACC_MAP:
+ case ATOMISP_IOC_ACC_UNMAP:
+ err = get_atomisp_acc_map32(&karg.acc_map, up);
+ break;
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG:
+ err = get_atomisp_acc_s_mapped_arg32(&karg.acc_map_arg, up);
+ break;
+ case ATOMISP_IOC_S_PARAMETERS:
+ err = get_atomisp_parameters32(&karg.param, up);
+ break;
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE:
+ err = get_atomisp_acc_fw_load_to_pipe32(&karg.acc_fw_to_pipe,
+ up);
+ break;
+ case ATOMISP_IOC_G_METADATA:
+ err = get_atomisp_metadata_stat32(&karg.md, up);
+ break;
+ case ATOMISP_IOC_G_METADATA_BY_TYPE:
+ err = get_atomisp_metadata_by_type_stat32(&karg.md_with_type,
+ up);
+ break;
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
+ err = get_atomisp_sensor_ae_bracketing_lut(&karg.lut, up);
+ break;
+ }
+ if (err)
+ return err;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = native_ioctl(file, cmd, (unsigned long)&karg);
+ set_fs(old_fs);
+ if (err)
+ return err;
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_HISTOGRAM:
+ err = put_atomisp_histogram32(&karg.his, up);
+ break;
+ case ATOMISP_IOC_G_DIS_STAT:
+ err = put_atomisp_dis_statistics32(&karg.dis_s, up);
+ break;
+ case ATOMISP_IOC_G_3A_STAT:
+ err = put_atomisp_3a_statistics32(&karg.s3a_s, up);
+ break;
+ case ATOMISP_IOC_G_ISP_GDC_TAB:
+ err = put_atomisp_morph_table32(&karg.mor_t, up);
+ break;
+ case ATOMISP_IOC_G_ISP_OVERLAY:
+ err = put_atomisp_overlay32(&karg.overlay, up);
+ break;
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ err = put_atomisp_calibration_group32(&karg.cal_grp, up);
+ break;
+ case ATOMISP_IOC_ACC_LOAD:
+ err = put_atomisp_acc_fw_load32(&karg.acc_fw_load, up);
+ break;
+ case ATOMISP_IOC_ACC_S_ARG:
+ case ATOMISP_IOC_ACC_DESTAB:
+ err = put_atomisp_acc_fw_arg32(&karg.acc_fw_arg, up);
+ break;
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+ err = put_v4l2_private_int_data32(&karg.v4l2_pri_data, up);
+ break;
+ case ATOMISP_IOC_ACC_MAP:
+ case ATOMISP_IOC_ACC_UNMAP:
+ err = put_atomisp_acc_map32(&karg.acc_map, up);
+ break;
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG:
+ err = put_atomisp_acc_s_mapped_arg32(&karg.acc_map_arg, up);
+ break;
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE:
+ err = put_atomisp_acc_fw_load_to_pipe32(&karg.acc_fw_to_pipe,
+ up);
+ break;
+ case ATOMISP_IOC_G_METADATA:
+ err = put_atomisp_metadata_stat32(&karg.md, up);
+ break;
+ case ATOMISP_IOC_G_METADATA_BY_TYPE:
+ err = put_atomisp_metadata_by_type_stat32(&karg.md_with_type,
+ up);
+ break;
+ }
+
+ return err;
+}
+
+long atomisp_compat_ioctl32(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ long ret = -ENOIOCTLCMD;
+
+ if (!file->f_op->unlocked_ioctl)
+ return ret;
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_XNR:
+ case ATOMISP_IOC_S_XNR:
+ case ATOMISP_IOC_G_NR:
+ case ATOMISP_IOC_S_NR:
+ case ATOMISP_IOC_G_TNR:
+ case ATOMISP_IOC_S_TNR:
+ case ATOMISP_IOC_G_BLACK_LEVEL_COMP:
+ case ATOMISP_IOC_S_BLACK_LEVEL_COMP:
+ case ATOMISP_IOC_G_EE:
+ case ATOMISP_IOC_S_EE:
+ case ATOMISP_IOC_S_DIS_VECTOR:
+ case ATOMISP_IOC_G_ISP_PARM:
+ case ATOMISP_IOC_S_ISP_PARM:
+ case ATOMISP_IOC_G_ISP_GAMMA:
+ case ATOMISP_IOC_S_ISP_GAMMA:
+ case ATOMISP_IOC_ISP_MAKERNOTE:
+ case ATOMISP_IOC_G_ISP_MACC:
+ case ATOMISP_IOC_S_ISP_MACC:
+ case ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION:
+ case ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION:
+ case ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION:
+ case ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION:
+ case ATOMISP_IOC_G_ISP_CTC:
+ case ATOMISP_IOC_S_ISP_CTC:
+ case ATOMISP_IOC_G_ISP_WHITE_BALANCE:
+ case ATOMISP_IOC_S_ISP_WHITE_BALANCE:
+ case ATOMISP_IOC_CAMERA_BRIDGE:
+ case ATOMISP_IOC_G_SENSOR_MODE_DATA:
+ case ATOMISP_IOC_S_EXPOSURE:
+ case ATOMISP_IOC_G_3A_CONFIG:
+ case ATOMISP_IOC_S_3A_CONFIG:
+ case ATOMISP_IOC_ACC_UNLOAD:
+ case ATOMISP_IOC_ACC_START:
+ case ATOMISP_IOC_ACC_WAIT:
+ case ATOMISP_IOC_ACC_ABORT:
+ case ATOMISP_IOC_G_ISP_GAMMA_CORRECTION:
+ case ATOMISP_IOC_S_ISP_GAMMA_CORRECTION:
+ case ATOMISP_IOC_S_CONT_CAPTURE_CONFIG:
+ case ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS:
+ case ATOMISP_IOC_EXT_ISP_CTRL:
+ case ATOMISP_IOC_EXP_ID_UNLOCK:
+ case ATOMISP_IOC_EXP_ID_CAPTURE:
+ case ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE:
+ case ATOMISP_IOC_G_FORMATS_CONFIG:
+ case ATOMISP_IOC_S_FORMATS_CONFIG:
+ case ATOMISP_IOC_S_EXPOSURE_WINDOW:
+ case ATOMISP_IOC_S_ACC_STATE:
+ case ATOMISP_IOC_G_ACC_STATE:
+ case ATOMISP_IOC_INJECT_A_FAKE_EVENT:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_INVALID_FRAME_NUM:
+ case ATOMISP_IOC_S_ARRAY_RESOLUTION:
+ case ATOMISP_IOC_S_SENSOR_RUNMODE:
+ case ATOMISP_IOC_G_UPDATE_EXPOSURE:
+ ret = native_ioctl(file, cmd, arg);
+ break;
+
+ case ATOMISP_IOC_G_HISTOGRAM32:
+ case ATOMISP_IOC_S_HISTOGRAM32:
+ case ATOMISP_IOC_G_DIS_STAT32:
+ case ATOMISP_IOC_S_DIS_COEFS32:
+ case ATOMISP_IOC_S_DIS_VECTOR32:
+ case ATOMISP_IOC_G_3A_STAT32:
+ case ATOMISP_IOC_G_ISP_GDC_TAB32:
+ case ATOMISP_IOC_S_ISP_GDC_TAB32:
+ case ATOMISP_IOC_S_ISP_FPN_TABLE32:
+ case ATOMISP_IOC_G_ISP_OVERLAY32:
+ case ATOMISP_IOC_S_ISP_OVERLAY32:
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32:
+ case ATOMISP_IOC_ACC_LOAD32:
+ case ATOMISP_IOC_ACC_S_ARG32:
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32:
+ case ATOMISP_IOC_S_ISP_SHD_TAB32:
+ case ATOMISP_IOC_ACC_DESTAB32:
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32:
+ case ATOMISP_IOC_ACC_MAP32:
+ case ATOMISP_IOC_ACC_UNMAP32:
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG32:
+ case ATOMISP_IOC_S_PARAMETERS32:
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE32:
+ case ATOMISP_IOC_G_METADATA32:
+ case ATOMISP_IOC_G_METADATA_BY_TYPE32:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32:
+ ret = atomisp_do_compat_ioctl(file, cmd, arg);
+ break;
+
+ default:
+ dev_warn(isp->dev,
+ "%s: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
+ __func__, _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd),
+ cmd);
+ break;
+ }
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
new file mode 100644
index 000000000000..7e59ccb88a2e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
@@ -0,0 +1,367 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef __ATOMISP_COMPAT_IOCTL32_H__
+#define __ATOMISP_COMPAT_IOCTL32_H__
+
+#include <linux/compat.h>
+#include <linux/videodev2.h>
+
+#include "atomisp_compat.h"
+
+struct atomisp_histogram32 {
+ unsigned int num_elements;
+ compat_uptr_t data;
+};
+
+struct atomisp_dvs2_stat_types32 {
+ compat_uptr_t odd_real; /** real part of the odd statistics*/
+ compat_uptr_t odd_imag; /** imaginary part of the odd statistics*/
+ compat_uptr_t even_real;/** real part of the even statistics*/
+ compat_uptr_t even_imag;/** imaginary part of the even statistics*/
+};
+
+struct atomisp_dvs2_coef_types32 {
+ compat_uptr_t odd_real; /** real part of the odd coefficients*/
+ compat_uptr_t odd_imag; /** imaginary part of the odd coefficients*/
+ compat_uptr_t even_real;/** real part of the even coefficients*/
+ compat_uptr_t even_imag;/** imaginary part of the even coefficients*/
+};
+
+struct atomisp_dvs2_statistics32 {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_stat_types32 hor_prod;
+ struct atomisp_dvs2_stat_types32 ver_prod;
+};
+
+struct atomisp_dis_statistics32 {
+ struct atomisp_dvs2_statistics32 dvs2_stat;
+ u32 exp_id;
+};
+
+struct atomisp_dis_coefficients32 {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_coef_types32 hor_coefs;
+ struct atomisp_dvs2_coef_types32 ver_coefs;
+};
+
+struct atomisp_3a_statistics32 {
+ struct atomisp_grid_info grid_info;
+ compat_uptr_t data;
+ compat_uptr_t rgby_data;
+ u32 exp_id;
+ u32 isp_config_id;
+};
+
+struct atomisp_metadata_with_type32 {
+ /* to specify which type of metadata to get */
+ enum atomisp_metadata_type type;
+ compat_uptr_t data;
+ u32 width;
+ u32 height;
+ u32 stride; /* in bytes */
+ u32 exp_id; /* exposure ID */
+ compat_uptr_t effective_width;
+};
+
+struct atomisp_metadata32 {
+ compat_uptr_t data;
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 exp_id;
+ compat_uptr_t effective_width;
+};
+
+struct atomisp_morph_table32 {
+ unsigned int enabled;
+ unsigned int height;
+ unsigned int width; /* number of valid elements per line */
+ compat_uptr_t coordinates_x[ATOMISP_MORPH_TABLE_NUM_PLANES];
+ compat_uptr_t coordinates_y[ATOMISP_MORPH_TABLE_NUM_PLANES];
+};
+
+struct v4l2_framebuffer32 {
+ __u32 capability;
+ __u32 flags;
+ compat_uptr_t base;
+ struct v4l2_pix_format fmt;
+};
+
+struct atomisp_overlay32 {
+ /* the frame containing the overlay data The overlay frame width should
+ * be the multiples of 2*ISP_VEC_NELEMS. The overlay frame height
+ * should be the multiples of 2.
+ */
+ compat_uptr_t frame;
+ /* Y value of overlay background */
+ unsigned char bg_y;
+ /* U value of overlay background */
+ char bg_u;
+ /* V value of overlay background */
+ char bg_v;
+ /* the blending percent of input data for Y subpixels */
+ unsigned char blend_input_perc_y;
+ /* the blending percent of input data for U subpixels */
+ unsigned char blend_input_perc_u;
+ /* the blending percent of input data for V subpixels */
+ unsigned char blend_input_perc_v;
+ /* the blending percent of overlay data for Y subpixels */
+ unsigned char blend_overlay_perc_y;
+ /* the blending percent of overlay data for U subpixels */
+ unsigned char blend_overlay_perc_u;
+ /* the blending percent of overlay data for V subpixels */
+ unsigned char blend_overlay_perc_v;
+ /* the overlay start x pixel position on output frame It should be the
+ multiples of 2*ISP_VEC_NELEMS. */
+ unsigned int overlay_start_x;
+ /* the overlay start y pixel position on output frame It should be the
+ multiples of 2. */
+ unsigned int overlay_start_y;
+};
+
+struct atomisp_calibration_group32 {
+ unsigned int size;
+ unsigned int type;
+ compat_uptr_t calb_grp_values;
+};
+
+struct atomisp_acc_fw_load32 {
+ unsigned int size;
+ unsigned int fw_handle;
+ compat_uptr_t data;
+};
+
+struct atomisp_acc_fw_arg32 {
+ unsigned int fw_handle;
+ unsigned int index;
+ compat_uptr_t value;
+ compat_size_t size;
+};
+
+struct v4l2_private_int_data32 {
+ __u32 size;
+ compat_uptr_t data;
+ __u32 reserved[2];
+};
+
+struct atomisp_shading_table32 {
+ __u32 enable;
+ __u32 sensor_width;
+ __u32 sensor_height;
+ __u32 width;
+ __u32 height;
+ __u32 fraction_bits;
+
+ compat_uptr_t data[ATOMISP_NUM_SC_COLORS];
+};
+
+struct atomisp_acc_map32 {
+ __u32 flags; /* Flags, see list below */
+ __u32 length; /* Length of data in bytes */
+ compat_uptr_t user_ptr; /* Pointer into user space */
+ compat_ulong_t css_ptr; /* Pointer into CSS address space */
+ __u32 reserved[4]; /* Set to zero */
+};
+
+struct atomisp_acc_s_mapped_arg32 {
+ unsigned int fw_handle;
+ __u32 memory; /* one of enum atomisp_acc_memory */
+ compat_size_t length;
+ compat_ulong_t css_ptr;
+};
+
+struct atomisp_parameters32 {
+ compat_uptr_t wb_config; /* White Balance config */
+ compat_uptr_t cc_config; /* Color Correction config */
+ compat_uptr_t tnr_config; /* Temporal Noise Reduction */
+ compat_uptr_t ecd_config; /* Eigen Color Demosaicing */
+ compat_uptr_t ynr_config; /* Y(Luma) Noise Reduction */
+ compat_uptr_t fc_config; /* Fringe Control */
+ compat_uptr_t formats_config; /* Formats Control */
+ compat_uptr_t cnr_config; /* Chroma Noise Reduction */
+ compat_uptr_t macc_config; /* MACC */
+ compat_uptr_t ctc_config; /* Chroma Tone Control */
+ compat_uptr_t aa_config; /* Anti-Aliasing */
+ compat_uptr_t baa_config; /* Anti-Aliasing */
+ compat_uptr_t ce_config;
+ compat_uptr_t dvs_6axis_config;
+ compat_uptr_t ob_config; /* Objective Black config */
+ compat_uptr_t dp_config; /* Dead Pixel config */
+ compat_uptr_t nr_config; /* Noise Reduction config */
+ compat_uptr_t ee_config; /* Edge Enhancement config */
+ compat_uptr_t de_config; /* Demosaic config */
+ compat_uptr_t gc_config; /* Gamma Correction config */
+ compat_uptr_t anr_config; /* Advanced Noise Reduction */
+ compat_uptr_t a3a_config; /* 3A Statistics config */
+ compat_uptr_t xnr_config; /* eXtra Noise Reduction */
+ compat_uptr_t dz_config; /* Digital Zoom */
+ compat_uptr_t yuv2rgb_cc_config; /* Color
+ Correction config */
+ compat_uptr_t rgb2yuv_cc_config; /* Color
+ Correction config */
+ compat_uptr_t macc_table;
+ compat_uptr_t gamma_table;
+ compat_uptr_t ctc_table;
+ compat_uptr_t xnr_table;
+ compat_uptr_t r_gamma_table;
+ compat_uptr_t g_gamma_table;
+ compat_uptr_t b_gamma_table;
+ compat_uptr_t motion_vector; /* For 2-axis DVS */
+ compat_uptr_t shading_table;
+ compat_uptr_t morph_table;
+ compat_uptr_t dvs_coefs; /* DVS 1.0 coefficients */
+ compat_uptr_t dvs2_coefs; /* DVS 2.0 coefficients */
+ compat_uptr_t capture_config;
+ compat_uptr_t anr_thres;
+
+ compat_uptr_t lin_2500_config; /* Skylake: Linearization config */
+ compat_uptr_t obgrid_2500_config; /* Skylake: OBGRID config */
+ compat_uptr_t bnr_2500_config; /* Skylake: bayer denoise config */
+ compat_uptr_t shd_2500_config; /* Skylake: shading config */
+ compat_uptr_t dm_2500_config; /* Skylake: demosaic config */
+ compat_uptr_t rgbpp_2500_config; /* Skylake: RGBPP config */
+ compat_uptr_t dvs_stat_2500_config; /* Skylake: DVS STAT config */
+ compat_uptr_t lace_stat_2500_config; /* Skylake: LACE STAT config */
+ compat_uptr_t yuvp1_2500_config; /* Skylake: yuvp1 config */
+ compat_uptr_t yuvp2_2500_config; /* Skylake: yuvp2 config */
+ compat_uptr_t tnr_2500_config; /* Skylake: TNR config */
+ compat_uptr_t dpc_2500_config; /* Skylake: DPC config */
+ compat_uptr_t awb_2500_config; /* Skylake: auto white balance config */
+ compat_uptr_t
+ awb_fr_2500_config; /* Skylake: auto white balance filter response config */
+ compat_uptr_t anr_2500_config; /* Skylake: ANR config */
+ compat_uptr_t af_2500_config; /* Skylake: auto focus config */
+ compat_uptr_t ae_2500_config; /* Skylake: auto exposure config */
+ compat_uptr_t bds_2500_config; /* Skylake: bayer downscaler config */
+ compat_uptr_t
+ dvs_2500_config; /* Skylake: digital video stabilization config */
+ compat_uptr_t res_mgr_2500_config;
+
+ /*
+ * Output frame pointer the config is to be applied to (optional),
+ * set to NULL to make this config is applied as global.
+ */
+ compat_uptr_t output_frame;
+ /*
+ * Unique ID to track which config was actually applied to a particular
+ * frame, driver will send this id back with output frame together.
+ */
+ u32 isp_config_id;
+ u32 per_frame_setting;
+};
+
+struct atomisp_acc_fw_load_to_pipe32 {
+ __u32 flags; /* Flags, see below for valid values */
+ unsigned int fw_handle; /* Handle, filled by kernel. */
+ __u32 size; /* Firmware binary size */
+ compat_uptr_t data; /* Pointer to firmware */
+ __u32 type; /* Binary type */
+ __u32 reserved[3]; /* Set to zero */
+};
+
+struct atomisp_dvs_6axis_config32 {
+ u32 exp_id;
+ u32 width_y;
+ u32 height_y;
+ u32 width_uv;
+ u32 height_uv;
+ compat_uptr_t xcoords_y;
+ compat_uptr_t ycoords_y;
+ compat_uptr_t xcoords_uv;
+ compat_uptr_t ycoords_uv;
+};
+
+struct atomisp_sensor_ae_bracketing_lut32 {
+ compat_uptr_t lut;
+ unsigned int lut_size;
+};
+
+#define ATOMISP_IOC_G_HISTOGRAM32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram32)
+#define ATOMISP_IOC_S_HISTOGRAM32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram32)
+
+#define ATOMISP_IOC_G_DIS_STAT32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics32)
+#define ATOMISP_IOC_S_DIS_COEFS32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_coefficients32)
+
+#define ATOMISP_IOC_S_DIS_VECTOR32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs_6axis_config32)
+
+#define ATOMISP_IOC_G_3A_STAT32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 7, struct atomisp_3a_statistics32)
+
+#define ATOMISP_IOC_G_ISP_GDC_TAB32 \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table32)
+#define ATOMISP_IOC_S_ISP_GDC_TAB32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table32)
+
+#define ATOMISP_IOC_S_ISP_FPN_TABLE32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 17, struct v4l2_framebuffer32)
+
+#define ATOMISP_IOC_G_ISP_OVERLAY32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay32)
+#define ATOMISP_IOC_S_ISP_OVERLAY32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay32)
+
+#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group32)
+
+#define ATOMISP_IOC_ACC_LOAD32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load32)
+
+#define ATOMISP_IOC_ACC_S_ARG32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg32)
+
+#define ATOMISP_IOC_ACC_DESTAB32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg32)
+
+#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data32)
+
+#define ATOMISP_IOC_S_ISP_SHD_TAB32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table32)
+
+#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data32)
+
+#define ATOMISP_IOC_ACC_MAP32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32)
+
+#define ATOMISP_IOC_ACC_UNMAP32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32)
+
+#define ATOMISP_IOC_ACC_S_MAPPED_ARG32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg32)
+
+#define ATOMISP_IOC_ACC_LOAD_TO_PIPE32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe32)
+
+#define ATOMISP_IOC_S_PARAMETERS32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters32)
+
+#define ATOMISP_IOC_G_METADATA32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata32)
+
+#define ATOMISP_IOC_G_METADATA_BY_TYPE32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata_with_type32)
+
+#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_lut32)
+
+#endif /* __ATOMISP_COMPAT_IOCTL32_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
new file mode 100644
index 000000000000..a2638863206e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
@@ -0,0 +1,426 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include "atomisp_cmd.h"
+#include "atomisp_internal.h"
+#include "atomisp-regs.h"
+
+static struct v4l2_mbus_framefmt *__csi2_get_format(struct
+ atomisp_mipi_csi2_device
+ * csi2,
+ struct
+ v4l2_subdev_pad_config *cfg,
+ enum
+ v4l2_subdev_format_whence
+ which, unsigned int pad) {
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
+ else
+ return &csi2->formats[pad];
+}
+
+/*
+ * csi2_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
+ * return -EINVAL or zero on success
+*/
+static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct atomisp_in_fmt_conv *ic = atomisp_in_fmt_conv;
+ unsigned int i = 0;
+
+ while (ic->code) {
+ if (i == code->index) {
+ code->code = ic->code;
+ return 0;
+ }
+ i++, ic++;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * csi2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @pad: pad num
+ * @fmt: pointer to v4l2 format structure
+ * return -EINVAL or zero on success
+*/
+static int csi2_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, cfg, fmt->which, fmt->pad);
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int which, uint16_t pad,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *actual_ffmt = __csi2_get_format(csi2, cfg, which, pad);
+
+ if (pad == CSI2_PAD_SINK) {
+ const struct atomisp_in_fmt_conv *ic;
+ struct v4l2_mbus_framefmt tmp_ffmt;
+
+ ic = atomisp_find_in_fmt_conv(ffmt->code);
+ if (ic)
+ actual_ffmt->code = ic->code;
+ else
+ actual_ffmt->code = atomisp_in_fmt_conv[0].code;
+
+ actual_ffmt->width = clamp_t(
+ u32, ffmt->width, ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH);
+ actual_ffmt->height = clamp_t(
+ u32, ffmt->height, ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT);
+
+ tmp_ffmt = *ffmt = *actual_ffmt;
+
+ return atomisp_csi2_set_ffmt(sd, cfg, which, CSI2_PAD_SOURCE,
+ &tmp_ffmt);
+ }
+
+ /* FIXME: DPCM decompression */
+ *actual_ffmt = *ffmt = *__csi2_get_format(csi2, cfg, which, CSI2_PAD_SINK);
+
+ return 0;
+}
+
+/*
+ * csi2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @pad: pad num
+ * @fmt: pointer to v4l2 format structure
+ * return -EINVAL or zero on success
+*/
+static int csi2_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ return atomisp_csi2_set_ffmt(sd, cfg, fmt->which, fmt->pad,
+ &fmt->format);
+}
+
+/*
+ * csi2_set_stream - Enable/Disable streaming on the CSI2 module
+ * @sd: ISP CSI2 V4L2 subdevice
+ * @enable: Enable/disable stream (1/0)
+ *
+ * Return 0 on success or a negative error code otherwise.
+*/
+static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops csi2_core_ops = {
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+ .s_stream = csi2_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .enum_mbus_code = csi2_enum_mbus_code,
+ .get_fmt = csi2_get_format,
+ .set_fmt = csi2_set_format,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops csi2_ops = {
+ .core = &csi2_core_ops,
+ .video = &csi2_video_ops,
+ .pad = &csi2_pad_ops,
+};
+
+/*
+ * csi2_link_setup - Setup CSI2 connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL or zero on success
+*/
+static int csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ u32 result = local->index | is_media_entity_v4l2_subdev(remote->entity);
+
+ switch (result) {
+ case CSI2_PAD_SOURCE | MEDIA_ENT_F_OLD_BASE:
+ /* not supported yet */
+ return -EINVAL;
+
+ case CSI2_PAD_SOURCE | MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_ISP_SUBDEV)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_ISP_SUBDEV;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_ISP_SUBDEV;
+ }
+ break;
+
+ default:
+ /* Link from camera to CSI2 is fixed... */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations csi2_media_ops = {
+ .link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+* ispcsi2_init_entities - Initialize subdev and media entity.
+* @csi2: Pointer to ispcsi2 structure.
+* return -ENOMEM or zero on success
+*/
+static int mipi_csi2_init_entities(struct atomisp_mipi_csi2_device *csi2,
+ int port)
+{
+ struct v4l2_subdev *sd = &csi2->subdev;
+ struct media_pad *pads = csi2->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ v4l2_subdev_init(sd, &csi2_ops);
+ snprintf(sd->name, sizeof(sd->name), "ATOM ISP CSI2-port%d", port);
+
+ v4l2_set_subdevdata(sd, csi2);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+
+ me->ops = &csi2_media_ops;
+ me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ ret = media_entity_pads_init(me, CSI2_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ csi2->formats[CSI2_PAD_SINK].code =
+ csi2->formats[CSI2_PAD_SOURCE].code =
+ atomisp_in_fmt_conv[0].code;
+
+ return 0;
+}
+
+void
+atomisp_mipi_csi2_unregister_entities(struct atomisp_mipi_csi2_device *csi2)
+{
+ media_entity_cleanup(&csi2->subdev.entity);
+ v4l2_device_unregister_subdev(&csi2->subdev);
+}
+
+int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ atomisp_mipi_csi2_unregister_entities(csi2);
+ return ret;
+}
+
+static const int LIMIT_SHIFT = 6; /* Limit numeric range into 31 bits */
+
+static int
+atomisp_csi2_configure_calc(const short int coeffs[2], int mipi_freq, int def)
+{
+ /* Delay counter accuracy, 1/0.0625 for ANN/CHT, 1/0.125 for BXT */
+ static const int accinv = 16; /* 1 / COUNT_ACC */
+ int r;
+
+ if (mipi_freq >> LIMIT_SHIFT <= 0)
+ return def;
+
+ r = accinv * coeffs[1] * (500000000 >> LIMIT_SHIFT);
+ r /= mipi_freq >> LIMIT_SHIFT;
+ r += accinv * coeffs[0];
+
+ return r;
+}
+
+static void atomisp_csi2_configure_isp2401(struct atomisp_sub_device *asd)
+{
+ /*
+ * The ISP2401 new input system CSI2+ receiver has several
+ * parameters affecting the receiver timings. These depend
+ * on the MIPI bus frequency F in Hz (sensor transmitter rate)
+ * as follows:
+ * register value = (A/1e9 + B * UI) / COUNT_ACC
+ * where
+ * UI = 1 / (2 * F) in seconds
+ * COUNT_ACC = counter accuracy in seconds
+ * For ANN and CHV, COUNT_ACC = 0.0625 ns
+ * For BXT, COUNT_ACC = 0.125 ns
+ * A and B are coefficients from the table below,
+ * depending whether the register minimum or maximum value is
+ * calculated.
+ * Minimum Maximum
+ * Clock lane A B A B
+ * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
+ * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
+ * Data lanes
+ * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
+ *
+ * We use the minimum values in the calculations below.
+ */
+ static const short int coeff_clk_termen[] = { 0, 0 };
+ static const short int coeff_clk_settle[] = { 95, -8 };
+ static const short int coeff_dat_termen[] = { 0, 0 };
+ static const short int coeff_dat_settle[] = { 85, -2 };
+ static const int TERMEN_DEFAULT = 0 * 0;
+ static const int SETTLE_DEFAULT = 0x480;
+
+ static const hrt_address csi2_port_base[] = {
+ [ATOMISP_CAMERA_PORT_PRIMARY] = CSI2_PORT_A_BASE,
+ [ATOMISP_CAMERA_PORT_SECONDARY] = CSI2_PORT_B_BASE,
+ [ATOMISP_CAMERA_PORT_TERTIARY] = CSI2_PORT_C_BASE,
+ };
+ /* Number of lanes on each port, excluding clock lane */
+ static const unsigned char csi2_port_lanes[] = {
+ [ATOMISP_CAMERA_PORT_PRIMARY] = 4,
+ [ATOMISP_CAMERA_PORT_SECONDARY] = 2,
+ [ATOMISP_CAMERA_PORT_TERTIARY] = 2,
+ };
+ static const hrt_address csi2_lane_base[] = {
+ CSI2_LANE_CL_BASE,
+ CSI2_LANE_D0_BASE,
+ CSI2_LANE_D1_BASE,
+ CSI2_LANE_D2_BASE,
+ CSI2_LANE_D3_BASE,
+ };
+
+ int clk_termen;
+ int clk_settle;
+ int dat_termen;
+ int dat_settle;
+
+ struct v4l2_control ctrl;
+ struct atomisp_device *isp = asd->isp;
+ struct camera_mipi_info *mipi_info;
+ int mipi_freq = 0;
+ enum atomisp_camera_port port;
+
+ int n;
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ port = mipi_info->port;
+
+ ctrl.id = V4L2_CID_LINK_FREQ;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0)
+ mipi_freq = ctrl.value;
+
+ clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen,
+ mipi_freq, TERMEN_DEFAULT);
+ clk_settle = atomisp_csi2_configure_calc(coeff_clk_settle,
+ mipi_freq, SETTLE_DEFAULT);
+ dat_termen = atomisp_csi2_configure_calc(coeff_dat_termen,
+ mipi_freq, TERMEN_DEFAULT);
+ dat_settle = atomisp_csi2_configure_calc(coeff_dat_settle,
+ mipi_freq, SETTLE_DEFAULT);
+ for (n = 0; n < csi2_port_lanes[port] + 1; n++) {
+ hrt_address base = csi2_port_base[port] + csi2_lane_base[n];
+
+ atomisp_store_uint32(base + CSI2_REG_RX_CSI_DLY_CNT_TERMEN,
+ n == 0 ? clk_termen : dat_termen);
+ atomisp_store_uint32(base + CSI2_REG_RX_CSI_DLY_CNT_SETTLE,
+ n == 0 ? clk_settle : dat_settle);
+ }
+}
+
+void atomisp_csi2_configure(struct atomisp_sub_device *asd)
+{
+ if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401))
+ atomisp_csi2_configure_isp2401(asd);
+}
+
+/*
+ * atomisp_mipi_csi2_cleanup - Routine for module driver cleanup
+*/
+void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp)
+{
+}
+
+int atomisp_mipi_csi2_init(struct atomisp_device *isp)
+{
+ struct atomisp_mipi_csi2_device *csi2_port;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ csi2_port = &isp->csi2_port[i];
+ csi2_port->isp = isp;
+ ret = mipi_csi2_init_entities(csi2_port, i);
+ if (ret < 0)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ atomisp_mipi_csi2_cleanup(isp);
+ return ret;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
new file mode 100644
index 000000000000..739c26f0807a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
@@ -0,0 +1,58 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef __ATOMISP_CSI2_H__
+#define __ATOMISP_CSI2_H__
+
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ctrls.h>
+
+#define CSI2_PAD_SINK 0
+#define CSI2_PAD_SOURCE 1
+#define CSI2_PADS_NUM 2
+
+#define CSI2_OUTPUT_ISP_SUBDEV BIT(0)
+#define CSI2_OUTPUT_MEMORY BIT(1)
+
+struct atomisp_device;
+struct v4l2_device;
+struct atomisp_sub_device;
+
+struct atomisp_mipi_csi2_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CSI2_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
+
+ struct v4l2_ctrl_handler ctrls;
+ struct atomisp_device *isp;
+
+ u32 output; /* output direction */
+};
+
+int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int which, uint16_t pad,
+ struct v4l2_mbus_framefmt *ffmt);
+int atomisp_mipi_csi2_init(struct atomisp_device *isp);
+void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp);
+void atomisp_mipi_csi2_unregister_entities(
+ struct atomisp_mipi_csi2_device *csi2);
+int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2,
+ struct v4l2_device *vdev);
+
+void atomisp_csi2_configure(struct atomisp_sub_device *asd);
+
+#endif /* __ATOMISP_CSI2_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h b/drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h
new file mode 100644
index 000000000000..9680f211d424
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef __ATOMISP_DFS_TABLES_H__
+#define __ATOMISP_DFS_TABLES_H__
+
+#include <linux/kernel.h>
+
+struct atomisp_freq_scaling_rule {
+ unsigned int width;
+ unsigned int height;
+ unsigned short fps;
+ unsigned int isp_freq;
+ unsigned int run_mode;
+};
+
+struct atomisp_dfs_config {
+ unsigned int lowest_freq;
+ unsigned int max_freq_at_vmin;
+ unsigned int highest_freq;
+ const struct atomisp_freq_scaling_rule *dfs_table;
+ unsigned int dfs_table_size;
+};
+
+extern const struct atomisp_dfs_config dfs_config_cht_soc;
+
+#endif /* __ATOMISP_DFS_TABLES_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
new file mode 100644
index 000000000000..4a6ea021ddcc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
@@ -0,0 +1,205 @@
+/*
+ * Support for atomisp driver sysfs interface
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+
+#include "atomisp_compat.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_drvfs.h"
+#include "hmm/hmm.h"
+
+/*
+ * _iunit_debug:
+ * dbglvl: iunit css driver trace level
+ * dbgopt: iunit debug option:
+ * bit 0: binary list
+ * bit 1: running binary
+ * bit 2: memory statistic
+*/
+struct _iunit_debug {
+ struct device_driver *drv;
+ struct atomisp_device *isp;
+ unsigned int dbglvl;
+ unsigned int dbgfun;
+ unsigned int dbgopt;
+};
+
+#define OPTION_BIN_LIST BIT(0)
+#define OPTION_BIN_RUN BIT(1)
+#define OPTION_MEM_STAT BIT(2)
+#define OPTION_VALID (OPTION_BIN_LIST \
+ | OPTION_BIN_RUN \
+ | OPTION_MEM_STAT)
+
+static struct _iunit_debug iunit_debug = {
+ .dbglvl = 0,
+ .dbgopt = OPTION_BIN_LIST,
+};
+
+static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
+ unsigned int opt)
+{
+ int ret = 0;
+
+ if (opt & OPTION_VALID) {
+ if (opt & OPTION_BIN_LIST) {
+ ret = atomisp_css_dump_blob_infor();
+ if (ret) {
+ dev_err(atomisp_dev, "%s dump blob infor err[ret:%d]\n",
+ __func__, ret);
+ goto opt_err;
+ }
+ }
+
+ if (opt & OPTION_BIN_RUN) {
+ if (atomisp_streaming_count(isp)) {
+ atomisp_css_dump_sp_raw_copy_linecount(true);
+ atomisp_css_debug_dump_isp_binary();
+ } else {
+ ret = -EPERM;
+ dev_err(atomisp_dev, "%s dump running bin err[ret:%d]\n",
+ __func__, ret);
+ goto opt_err;
+ }
+ }
+
+ if (opt & OPTION_MEM_STAT)
+ hmm_show_mem_stat(__func__, __LINE__);
+ } else {
+ ret = -EINVAL;
+ dev_err(atomisp_dev, "%s dump nothing[ret=%d]\n", __func__,
+ ret);
+ }
+
+opt_err:
+ return ret;
+}
+
+static ssize_t iunit_dbglvl_show(struct device_driver *drv, char *buf)
+{
+ iunit_debug.dbglvl = atomisp_css_debug_get_dtrace_level();
+ return sprintf(buf, "dtrace level:%u\n", iunit_debug.dbglvl);
+}
+
+static ssize_t iunit_dbglvl_store(struct device_driver *drv, const char *buf,
+ size_t size)
+{
+ if (kstrtouint(buf, 10, &iunit_debug.dbglvl)
+ || iunit_debug.dbglvl < 1
+ || iunit_debug.dbglvl > 9) {
+ return -ERANGE;
+ }
+ atomisp_css_debug_set_dtrace_level(iunit_debug.dbglvl);
+
+ return size;
+}
+
+static ssize_t iunit_dbgfun_show(struct device_driver *drv, char *buf)
+{
+ iunit_debug.dbgfun = atomisp_get_css_dbgfunc();
+ return sprintf(buf, "dbgfun opt:%u\n", iunit_debug.dbgfun);
+}
+
+static ssize_t iunit_dbgfun_store(struct device_driver *drv, const char *buf,
+ size_t size)
+{
+ unsigned int opt;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &opt);
+ if (ret)
+ return ret;
+
+ ret = atomisp_set_css_dbgfunc(iunit_debug.isp, opt);
+ if (ret)
+ return ret;
+
+ iunit_debug.dbgfun = opt;
+
+ return size;
+}
+
+static ssize_t iunit_dbgopt_show(struct device_driver *drv, char *buf)
+{
+ return sprintf(buf, "option:0x%x\n", iunit_debug.dbgopt);
+}
+
+static ssize_t iunit_dbgopt_store(struct device_driver *drv, const char *buf,
+ size_t size)
+{
+ unsigned int opt;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &opt);
+ if (ret)
+ return ret;
+
+ iunit_debug.dbgopt = opt;
+ ret = iunit_dump_dbgopt(iunit_debug.isp, iunit_debug.dbgopt);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static const struct driver_attribute iunit_drvfs_attrs[] = {
+ __ATTR(dbglvl, 0644, iunit_dbglvl_show, iunit_dbglvl_store),
+ __ATTR(dbgfun, 0644, iunit_dbgfun_show, iunit_dbgfun_store),
+ __ATTR(dbgopt, 0644, iunit_dbgopt_show, iunit_dbgopt_store),
+};
+
+static int iunit_drvfs_create_files(struct device_driver *drv)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++)
+ ret |= driver_create_file(drv, &iunit_drvfs_attrs[i]);
+
+ return ret;
+}
+
+static void iunit_drvfs_remove_files(struct device_driver *drv)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++)
+ driver_remove_file(drv, &iunit_drvfs_attrs[i]);
+}
+
+int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp)
+{
+ int ret;
+
+ iunit_debug.isp = isp;
+ iunit_debug.drv = drv;
+
+ ret = iunit_drvfs_create_files(iunit_debug.drv);
+ if (ret) {
+ dev_err(atomisp_dev, "drvfs_create_files error: %d\n", ret);
+ iunit_drvfs_remove_files(iunit_debug.drv);
+ }
+
+ return ret;
+}
+
+void atomisp_drvfs_exit(void)
+{
+ iunit_drvfs_remove_files(iunit_debug.drv);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.h b/drivers/staging/media/atomisp/pci/atomisp_drvfs.h
new file mode 100644
index 000000000000..7c99240d107a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.h
@@ -0,0 +1,24 @@
+/*
+ * Support for atomisp driver sysfs interface.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_DRVFS_H__
+#define __ATOMISP_DRVFS_H__
+
+int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp);
+void atomisp_drvfs_exit(void);
+
+#endif /* __ATOMISP_DRVFS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp_file.c
new file mode 100644
index 000000000000..4ab0390316cf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_file.c
@@ -0,0 +1,227 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+
+#include <media/videobuf-vmalloc.h>
+#include <linux/delay.h>
+
+#include "ia_css.h"
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_file.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+
+static void file_work(struct work_struct *work)
+{
+ struct atomisp_file_device *file_dev =
+ container_of(work, struct atomisp_file_device, work);
+ struct atomisp_device *isp = file_dev->isp;
+ /* only support file injection on subdev0 */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+ struct atomisp_video_pipe *out_pipe = &asd->video_in;
+ unsigned short *buf = videobuf_to_vmalloc(out_pipe->outq.bufs[0]);
+ struct v4l2_mbus_framefmt isp_sink_fmt;
+
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return;
+
+ dev_dbg(isp->dev, ">%s: ready to start streaming\n", __func__);
+ isp_sink_fmt = *atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ while (!atomisp_css_isp_has_started())
+ usleep_range(1000, 1500);
+
+ atomisp_css_send_input_frame(asd, buf, isp_sink_fmt.width,
+ isp_sink_fmt.height);
+ dev_dbg(isp->dev, "<%s: streaming done\n", __func__);
+}
+
+static int file_input_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = file_dev->isp;
+ /* only support file injection on subdev0 */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+
+ dev_dbg(isp->dev, "%s: enable %d\n", __func__, enable);
+ if (enable) {
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return 0;
+
+ queue_work(file_dev->work_queue, &file_dev->work);
+ return 0;
+ }
+ cancel_work_sync(&file_dev->work);
+ return 0;
+}
+
+static int file_input_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = file_dev->isp;
+ /* only support file injection on subdev0 */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+ struct v4l2_mbus_framefmt *isp_sink_fmt;
+
+ if (format->pad)
+ return -EINVAL;
+ isp_sink_fmt = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ fmt->width = isp_sink_fmt->width;
+ fmt->height = isp_sink_fmt->height;
+ fmt->code = isp_sink_fmt->code;
+
+ return 0;
+}
+
+static int file_input_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+
+ if (format->pad)
+ return -EINVAL;
+ file_input_get_fmt(sd, cfg, format);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ cfg->try_fmt = *fmt;
+ return 0;
+}
+
+static int file_input_log_status(struct v4l2_subdev *sd)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int file_input_s_power(struct v4l2_subdev *sd, int on)
+{
+ /* to fake */
+ return 0;
+}
+
+static int file_input_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int file_input_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int file_input_enum_frame_ival(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum
+ *fie)
+{
+ /*to fake*/
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops file_input_video_ops = {
+ .s_stream = file_input_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops file_input_core_ops = {
+ .log_status = file_input_log_status,
+ .s_power = file_input_s_power,
+};
+
+static const struct v4l2_subdev_pad_ops file_input_pad_ops = {
+ .enum_mbus_code = file_input_enum_mbus_code,
+ .enum_frame_size = file_input_enum_frame_size,
+ .enum_frame_interval = file_input_enum_frame_ival,
+ .get_fmt = file_input_get_fmt,
+ .set_fmt = file_input_set_fmt,
+};
+
+static const struct v4l2_subdev_ops file_input_ops = {
+ .core = &file_input_core_ops,
+ .video = &file_input_video_ops,
+ .pad = &file_input_pad_ops,
+};
+
+void
+atomisp_file_input_unregister_entities(struct atomisp_file_device *file_dev)
+{
+ media_entity_cleanup(&file_dev->sd.entity);
+ v4l2_device_unregister_subdev(&file_dev->sd);
+}
+
+int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev,
+ struct v4l2_device *vdev)
+{
+ /* Register the subdev and video nodes. */
+ return v4l2_device_register_subdev(vdev, &file_dev->sd);
+}
+
+void atomisp_file_input_cleanup(struct atomisp_device *isp)
+{
+ struct atomisp_file_device *file_dev = &isp->file_dev;
+
+ if (file_dev->work_queue) {
+ destroy_workqueue(file_dev->work_queue);
+ file_dev->work_queue = NULL;
+ }
+}
+
+int atomisp_file_input_init(struct atomisp_device *isp)
+{
+ struct atomisp_file_device *file_dev = &isp->file_dev;
+ struct v4l2_subdev *sd = &file_dev->sd;
+ struct media_pad *pads = file_dev->pads;
+ struct media_entity *me = &sd->entity;
+
+ file_dev->isp = isp;
+ file_dev->work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
+ if (!file_dev->work_queue) {
+ dev_err(isp->dev, "Failed to initialize file inject workq\n");
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&file_dev->work, file_work);
+
+ v4l2_subdev_init(sd, &file_input_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strcpy(sd->name, "file_input_subdev");
+ v4l2_set_subdevdata(sd, file_dev);
+
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+
+ return media_entity_pads_init(me, 1, pads);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.h b/drivers/staging/media/atomisp/pci/atomisp_file.h
new file mode 100644
index 000000000000..e38f8bc389f1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_file.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_FILE_H__
+#define __ATOMISP_FILE_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+struct atomisp_device;
+
+struct atomisp_file_device {
+ struct v4l2_subdev sd;
+ struct atomisp_device *isp;
+ struct media_pad pads[1];
+
+ struct workqueue_struct *work_queue;
+ struct work_struct work;
+};
+
+void atomisp_file_input_cleanup(struct atomisp_device *isp);
+int atomisp_file_input_init(struct atomisp_device *isp);
+void atomisp_file_input_unregister_entities(
+ struct atomisp_file_device *file_dev);
+int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev,
+ struct v4l2_device *vdev);
+#endif /* __ATOMISP_FILE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
new file mode 100644
index 000000000000..667d6f7d1d5e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -0,0 +1,1306 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-vmalloc.h>
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_compat.h"
+#include "atomisp_subdev.h"
+#include "atomisp_v4l2.h"
+#include "atomisp-regs.h"
+#include "hmm/hmm.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+#include "type_support.h"
+#include "device_access/device_access.h"
+#include "memory_access/memory_access.h"
+
+#include "atomisp_acc.h"
+
+#define ISP_LEFT_PAD 128 /* equal to 2*NWAY */
+
+/*
+ * input image data, and current frame resolution for test
+ */
+#define ISP_PARAM_MMAP_OFFSET 0xfffff000
+
+#define MAGIC_CHECK(is, should) \
+ do { \
+ if (unlikely((is) != (should))) { \
+ pr_err("magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ } \
+ } while (0)
+
+/*
+ * Videobuf ops
+ */
+static int atomisp_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ *size = pipe->pix.sizeimage;
+
+ return 0;
+}
+
+static int atomisp_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ vb->size = pipe->pix.sizeimage;
+ vb->width = pipe->pix.width;
+ vb->height = pipe->pix.height;
+ vb->field = field;
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+}
+
+static int atomisp_q_one_metadata_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_metadata_buf *metadata_buf;
+ enum atomisp_metadata_type md_type =
+ atomisp_get_metadata_type(asd, css_pipe_id);
+ struct list_head *metadata_list;
+
+ if (asd->metadata_bufs_in_css[stream_id][css_pipe_id] >=
+ ATOMISP_CSS_Q_DEPTH)
+ return 0; /* we have reached CSS queue depth */
+
+ if (!list_empty(&asd->metadata[md_type])) {
+ metadata_list = &asd->metadata[md_type];
+ } else if (!list_empty(&asd->metadata_ready[md_type])) {
+ metadata_list = &asd->metadata_ready[md_type];
+ } else {
+ dev_warn(asd->isp->dev, "%s: No metadata buffers available for type %d!\n",
+ __func__, md_type);
+ return -EINVAL;
+ }
+
+ metadata_buf = list_entry(metadata_list->next,
+ struct atomisp_metadata_buf, list);
+ list_del_init(&metadata_buf->list);
+
+ if (atomisp_q_metadata_buffer_to_css(asd, metadata_buf,
+ stream_id, css_pipe_id)) {
+ list_add(&metadata_buf->list, metadata_list);
+ return -EINVAL;
+ } else {
+ list_add_tail(&metadata_buf->list,
+ &asd->metadata_in_css[md_type]);
+ }
+ asd->metadata_bufs_in_css[stream_id][css_pipe_id]++;
+
+ return 0;
+}
+
+static int atomisp_q_one_s3a_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_s3a_buf *s3a_buf;
+ struct list_head *s3a_list;
+ unsigned int exp_id;
+
+ if (asd->s3a_bufs_in_css[css_pipe_id] >= ATOMISP_CSS_Q_DEPTH)
+ return 0; /* we have reached CSS queue depth */
+
+ if (!list_empty(&asd->s3a_stats)) {
+ s3a_list = &asd->s3a_stats;
+ } else if (!list_empty(&asd->s3a_stats_ready)) {
+ s3a_list = &asd->s3a_stats_ready;
+ } else {
+ dev_warn(asd->isp->dev, "%s: No s3a buffers available!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ s3a_buf = list_entry(s3a_list->next, struct atomisp_s3a_buf, list);
+ list_del_init(&s3a_buf->list);
+ exp_id = s3a_buf->s3a_data->exp_id;
+
+ hmm_flush_vmap(s3a_buf->s3a_data->data_ptr);
+ if (atomisp_q_s3a_buffer_to_css(asd, s3a_buf,
+ stream_id, css_pipe_id)) {
+ /* got from head, so return back to the head */
+ list_add(&s3a_buf->list, s3a_list);
+ return -EINVAL;
+ } else {
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats_in_css);
+ if (s3a_list == &asd->s3a_stats_ready)
+ dev_warn(asd->isp->dev, "%s: drop one s3a stat which has exp_id %d!\n",
+ __func__, exp_id);
+ }
+
+ asd->s3a_bufs_in_css[css_pipe_id]++;
+ return 0;
+}
+
+static int atomisp_q_one_dis_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_dis_buf *dis_buf;
+ unsigned long irqflags;
+
+ if (asd->dis_bufs_in_css >= ATOMISP_CSS_Q_DEPTH)
+ return 0; /* we have reached CSS queue depth */
+
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ if (list_empty(&asd->dis_stats)) {
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+ dev_warn(asd->isp->dev, "%s: No dis buffers available!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dis_buf = list_entry(asd->dis_stats.prev,
+ struct atomisp_dis_buf, list);
+ list_del_init(&dis_buf->list);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+
+ hmm_flush_vmap(dis_buf->dis_data->data_ptr);
+ if (atomisp_q_dis_buffer_to_css(asd, dis_buf,
+ stream_id, css_pipe_id)) {
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ /* got from tail, so return back to the tail */
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+ return -EINVAL;
+ } else {
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ list_add_tail(&dis_buf->list, &asd->dis_stats_in_css);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+ }
+
+ asd->dis_bufs_in_css++;
+
+ return 0;
+}
+
+int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_buffer_type css_buf_type,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct atomisp_css_params_with_list *param;
+ struct atomisp_css_dvs_grid_info *dvs_grid =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ unsigned long irqflags;
+ int err = 0;
+
+ while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) {
+ struct videobuf_buffer *vb;
+
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ if (list_empty(&pipe->activeq)) {
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ return -EINVAL;
+ }
+ vb = list_entry(pipe->activeq.next,
+ struct videobuf_buffer, queue);
+ list_del_init(&vb->queue);
+ vb->state = VIDEOBUF_ACTIVE;
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+
+ /*
+ * If there is a per_frame setting to apply on the buffer,
+ * do it before buffer en-queueing.
+ */
+ vm_mem = vb->priv;
+
+ param = pipe->frame_params[vb->i];
+ if (param) {
+ atomisp_makeup_css_parameters(asd,
+ &asd->params.css_param.update_flag,
+ &param->params);
+ atomisp_apply_css_parameters(asd, &param->params);
+
+ if (param->params.update_flag.dz_config &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
+ err = atomisp_calculate_real_zoom_region(asd,
+ &param->params.dz_config, css_pipe_id);
+ if (!err)
+ atomisp_css_set_dz_config(asd,
+ &param->params.dz_config);
+ }
+ atomisp_css_set_isp_config_applied_frame(asd,
+ vm_mem->vaddr);
+ atomisp_css_update_isp_params_on_pipe(asd,
+ asd->stream_env[stream_id].pipes[css_pipe_id]);
+ asd->params.dvs_6axis = (struct atomisp_css_dvs_6axis *)
+ param->params.dvs_6axis;
+
+ /*
+ * WORKAROUND:
+ * Because the camera halv3 can't ensure to set zoom
+ * region to per_frame setting and global setting at
+ * same time and only set zoom region to pre_frame
+ * setting now.so when the pre_frame setting include
+ * zoom region,I will set it to global setting.
+ */
+ if (param->params.update_flag.dz_config &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO
+ && !err) {
+ memcpy(&asd->params.css_param.dz_config,
+ &param->params.dz_config,
+ sizeof(struct ia_css_dz_config));
+ asd->params.css_param.update_flag.dz_config =
+ (struct atomisp_dz_config *)
+ &asd->params.css_param.dz_config;
+ asd->params.css_update_params_needed = true;
+ }
+ }
+ /* Enqueue buffer */
+ err = atomisp_q_video_buffer_to_css(asd, vm_mem, stream_id,
+ css_buf_type, css_pipe_id);
+ if (err) {
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ list_add_tail(&vb->queue, &pipe->activeq);
+ vb->state = VIDEOBUF_QUEUED;
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ dev_err(asd->isp->dev, "%s, css q fails: %d\n",
+ __func__, err);
+ return -EINVAL;
+ }
+ pipe->buffers_in_css++;
+
+ /* enqueue 3A/DIS/metadata buffers */
+ if (asd->params.curr_grid_info.s3a_grid.enable &&
+ css_pipe_id == asd->params.s3a_enabled_pipe &&
+ css_buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ atomisp_q_one_s3a_buffer(asd, stream_id,
+ css_pipe_id);
+
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
+ metadata_info.size &&
+ css_buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ atomisp_q_one_metadata_buffer(asd, stream_id,
+ css_pipe_id);
+
+ if (dvs_grid && dvs_grid->enable &&
+ css_pipe_id == CSS_PIPE_ID_VIDEO &&
+ css_buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ atomisp_q_one_dis_buffer(asd, stream_id,
+ css_pipe_id);
+ }
+
+ return 0;
+}
+
+static int atomisp_get_css_buf_type(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id,
+ uint16_t source_pad)
+{
+ if (ATOMISP_USE_YUVPP(asd)) {
+ /* when run ZSL case */
+ if (asd->continuous_mode->val &&
+ asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE)
+ return CSS_BUFFER_TYPE_OUTPUT_FRAME;
+ else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW)
+ return CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME;
+ else
+ return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
+ }
+
+ /*when run SDV case*/
+ if (asd->continuous_mode->val &&
+ asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE)
+ return CSS_BUFFER_TYPE_OUTPUT_FRAME;
+ else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW)
+ return CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME;
+ else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO)
+ return CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME;
+ else
+ return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
+ }
+
+ /*other case: default setting*/
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO ||
+ (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO))
+ return CSS_BUFFER_TYPE_OUTPUT_FRAME;
+ else
+ return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
+ }
+
+ if (pipe_id == CSS_PIPE_ID_COPY ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO ||
+ (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO))
+ return CSS_BUFFER_TYPE_OUTPUT_FRAME;
+ else
+ return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
+}
+
+static int atomisp_qbuffers_to_css_for_all_pipes(struct atomisp_sub_device *asd)
+{
+ enum atomisp_css_buffer_type buf_type;
+ enum atomisp_css_pipe_id css_capture_pipe_id = CSS_PIPE_ID_COPY;
+ enum atomisp_css_pipe_id css_preview_pipe_id = CSS_PIPE_ID_COPY;
+ enum atomisp_css_pipe_id css_video_pipe_id = CSS_PIPE_ID_COPY;
+ enum atomisp_input_stream_id input_stream_id;
+ struct atomisp_video_pipe *capture_pipe;
+ struct atomisp_video_pipe *preview_pipe;
+ struct atomisp_video_pipe *video_pipe;
+
+ capture_pipe = &asd->video_out_capture;
+ preview_pipe = &asd->video_out_preview;
+ video_pipe = &asd->video_out_video_capture;
+
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_preview_pipe_id,
+ atomisp_subdev_source_pad(&preview_pipe->vdev));
+ input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW;
+ atomisp_q_video_buffers_to_css(asd, preview_pipe,
+ input_stream_id,
+ buf_type, css_preview_pipe_id);
+
+ buf_type = atomisp_get_css_buf_type(asd, css_capture_pipe_id,
+ atomisp_subdev_source_pad(&capture_pipe->vdev));
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+ atomisp_q_video_buffers_to_css(asd, capture_pipe,
+ input_stream_id,
+ buf_type, css_capture_pipe_id);
+
+ buf_type = atomisp_get_css_buf_type(asd, css_video_pipe_id,
+ atomisp_subdev_source_pad(&video_pipe->vdev));
+ input_stream_id = ATOMISP_INPUT_STREAM_VIDEO;
+ atomisp_q_video_buffers_to_css(asd, video_pipe,
+ input_stream_id,
+ buf_type, css_video_pipe_id);
+ return 0;
+}
+
+/* queue all available buffers to css */
+int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd)
+{
+ enum atomisp_css_buffer_type buf_type;
+ enum atomisp_css_pipe_id css_capture_pipe_id = CSS_PIPE_ID_NUM;
+ enum atomisp_css_pipe_id css_preview_pipe_id = CSS_PIPE_ID_NUM;
+ enum atomisp_css_pipe_id css_video_pipe_id = CSS_PIPE_ID_NUM;
+ enum atomisp_input_stream_id input_stream_id;
+ struct atomisp_video_pipe *capture_pipe = NULL;
+ struct atomisp_video_pipe *vf_pipe = NULL;
+ struct atomisp_video_pipe *preview_pipe = NULL;
+ struct atomisp_video_pipe *video_pipe = NULL;
+ bool raw_mode = atomisp_is_mbuscode_raw(
+ asd->fmt[asd->capture_pad].fmt.code);
+
+ if (asd->isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num == 2 &&
+ !asd->yuvpp_mode)
+ return atomisp_qbuffers_to_css_for_all_pipes(asd);
+
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ video_pipe = &asd->video_out_video_capture;
+ css_video_pipe_id = CSS_PIPE_ID_VIDEO;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ preview_pipe = &asd->video_out_capture;
+ css_preview_pipe_id = CSS_PIPE_ID_CAPTURE;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ if (asd->continuous_mode->val) {
+ capture_pipe = &asd->video_out_capture;
+ vf_pipe = &asd->video_out_vf;
+ css_capture_pipe_id = CSS_PIPE_ID_CAPTURE;
+ }
+ video_pipe = &asd->video_out_video_capture;
+ preview_pipe = &asd->video_out_preview;
+ css_video_pipe_id = CSS_PIPE_ID_VIDEO;
+ css_preview_pipe_id = CSS_PIPE_ID_VIDEO;
+ } else if (asd->continuous_mode->val) {
+ capture_pipe = &asd->video_out_capture;
+ vf_pipe = &asd->video_out_vf;
+ preview_pipe = &asd->video_out_preview;
+
+ css_preview_pipe_id = CSS_PIPE_ID_PREVIEW;
+ css_capture_pipe_id = CSS_PIPE_ID_CAPTURE;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ preview_pipe = &asd->video_out_preview;
+ css_preview_pipe_id = CSS_PIPE_ID_PREVIEW;
+ } else {
+ /* ATOMISP_RUN_MODE_STILL_CAPTURE */
+ capture_pipe = &asd->video_out_capture;
+ if (!raw_mode)
+ vf_pipe = &asd->video_out_vf;
+ css_capture_pipe_id = CSS_PIPE_ID_CAPTURE;
+ }
+
+#ifdef ISP2401_NEW_INPUT_SYSTEM
+ if (asd->copy_mode) {
+ css_capture_pipe_id = CSS_PIPE_ID_COPY;
+ css_preview_pipe_id = CSS_PIPE_ID_COPY;
+ css_video_pipe_id = CSS_PIPE_ID_COPY;
+ }
+#endif
+
+ if (asd->yuvpp_mode) {
+ capture_pipe = &asd->video_out_capture;
+ video_pipe = &asd->video_out_video_capture;
+ preview_pipe = &asd->video_out_preview;
+ css_capture_pipe_id = CSS_PIPE_ID_COPY;
+ css_video_pipe_id = CSS_PIPE_ID_YUVPP;
+ css_preview_pipe_id = CSS_PIPE_ID_YUVPP;
+ }
+
+ if (capture_pipe) {
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_capture_pipe_id,
+ atomisp_subdev_source_pad(&capture_pipe->vdev));
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+
+ /*
+ * use yuvpp pipe for SOC camera.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ css_capture_pipe_id = CSS_PIPE_ID_YUVPP;
+
+ atomisp_q_video_buffers_to_css(asd, capture_pipe,
+ input_stream_id,
+ buf_type, css_capture_pipe_id);
+ }
+
+ if (vf_pipe) {
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_capture_pipe_id,
+ atomisp_subdev_source_pad(&vf_pipe->vdev));
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_POSTVIEW].stream)
+ input_stream_id = ATOMISP_INPUT_STREAM_POSTVIEW;
+ else
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+
+ /*
+ * use yuvpp pipe for SOC camera.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ css_capture_pipe_id = CSS_PIPE_ID_YUVPP;
+ atomisp_q_video_buffers_to_css(asd, vf_pipe,
+ input_stream_id,
+ buf_type, css_capture_pipe_id);
+ }
+
+ if (preview_pipe) {
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_preview_pipe_id,
+ atomisp_subdev_source_pad(&preview_pipe->vdev));
+ if (ATOMISP_SOC_CAMERA(asd) && css_preview_pipe_id == CSS_PIPE_ID_YUVPP)
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+ /* else for ext isp use case */
+ else if (css_preview_pipe_id == CSS_PIPE_ID_YUVPP)
+ input_stream_id = ATOMISP_INPUT_STREAM_VIDEO;
+ else if (asd->stream_env[ATOMISP_INPUT_STREAM_PREVIEW].stream)
+ input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW;
+ else
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+
+ /*
+ * use yuvpp pipe for SOC camera.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ css_preview_pipe_id = CSS_PIPE_ID_YUVPP;
+
+ atomisp_q_video_buffers_to_css(asd, preview_pipe,
+ input_stream_id,
+ buf_type, css_preview_pipe_id);
+ }
+
+ if (video_pipe) {
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_video_pipe_id,
+ atomisp_subdev_source_pad(&video_pipe->vdev));
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_VIDEO].stream)
+ input_stream_id = ATOMISP_INPUT_STREAM_VIDEO;
+ else
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+
+ /*
+ * use yuvpp pipe for SOC camera.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ css_video_pipe_id = CSS_PIPE_ID_YUVPP;
+
+ atomisp_q_video_buffers_to_css(asd, video_pipe,
+ input_stream_id,
+ buf_type, css_video_pipe_id);
+ }
+
+ return 0;
+}
+
+static void atomisp_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ /*
+ * when a frame buffer meets following conditions, it should be put into
+ * the waiting list:
+ * 1. It is not a main output frame, and it has a per-frame parameter
+ * to go with it.
+ * 2. It is not a main output frame, and the waiting buffer list is not
+ * empty, to keep the FIFO sequence of frame buffer processing, it
+ * is put to waiting list until previous per-frame parameter buffers
+ * get enqueued.
+ */
+ if (!atomisp_is_vf_pipe(pipe) &&
+ (pipe->frame_request_config_id[vb->i] ||
+ !list_empty(&pipe->buffers_waiting_for_param)))
+ list_add_tail(&vb->queue, &pipe->buffers_waiting_for_param);
+ else
+ list_add_tail(&vb->queue, &pipe->activeq);
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+static void atomisp_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ atomisp_videobuf_free_buf(vb);
+}
+
+static int atomisp_buf_setup_output(struct videobuf_queue *vq,
+ unsigned int *count, unsigned int *size)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ *size = pipe->pix.sizeimage;
+
+ return 0;
+}
+
+static int atomisp_buf_prepare_output(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ vb->size = pipe->pix.sizeimage;
+ vb->width = pipe->pix.width;
+ vb->height = pipe->pix.height;
+ vb->field = field;
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+}
+
+static void atomisp_buf_queue_output(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ list_add_tail(&vb->queue, &pipe->activeq_out);
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+static void atomisp_buf_release_output(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ videobuf_vmalloc_free(vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static const struct videobuf_queue_ops videobuf_qops = {
+ .buf_setup = atomisp_buf_setup,
+ .buf_prepare = atomisp_buf_prepare,
+ .buf_queue = atomisp_buf_queue,
+ .buf_release = atomisp_buf_release,
+};
+
+static const struct videobuf_queue_ops videobuf_qops_output = {
+ .buf_setup = atomisp_buf_setup_output,
+ .buf_prepare = atomisp_buf_prepare_output,
+ .buf_queue = atomisp_buf_queue_output,
+ .buf_release = atomisp_buf_release_output,
+};
+
+static int atomisp_init_pipe(struct atomisp_video_pipe *pipe)
+{
+ /* init locks */
+ spin_lock_init(&pipe->irq_lock);
+
+ videobuf_queue_vmalloc_init(&pipe->capq, &videobuf_qops, NULL,
+ &pipe->irq_lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_NONE,
+ sizeof(struct atomisp_buffer), pipe,
+ NULL); /* ext_lock: NULL */
+
+ videobuf_queue_vmalloc_init(&pipe->outq, &videobuf_qops_output, NULL,
+ &pipe->irq_lock,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ V4L2_FIELD_NONE,
+ sizeof(struct atomisp_buffer), pipe,
+ NULL); /* ext_lock: NULL */
+
+ INIT_LIST_HEAD(&pipe->activeq);
+ INIT_LIST_HEAD(&pipe->activeq_out);
+ INIT_LIST_HEAD(&pipe->buffers_waiting_for_param);
+ INIT_LIST_HEAD(&pipe->per_frame_params);
+ memset(pipe->frame_request_config_id, 0,
+ VIDEO_MAX_FRAME * sizeof(unsigned int));
+ memset(pipe->frame_params, 0,
+ VIDEO_MAX_FRAME *
+ sizeof(struct atomisp_css_params_with_list *));
+
+ return 0;
+}
+
+static void atomisp_dev_init_struct(struct atomisp_device *isp)
+{
+ unsigned int i;
+
+ isp->sw_contex.file_input = false;
+ isp->need_gfx_throttle = true;
+ isp->isp_fatal_error = false;
+ isp->mipi_frame_size = 0;
+
+ for (i = 0; i < isp->input_cnt; i++)
+ isp->inputs[i].asd = NULL;
+ /*
+ * For Merrifield, frequency is scalable.
+ * After boot-up, the default frequency is 200MHz.
+ */
+ isp->sw_contex.running_freq = ISP_FREQ_200MHZ;
+}
+
+static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd)
+{
+ v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_STILL_CAPTURE);
+ memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
+ asd->params.color_effect = V4L2_COLORFX_NONE;
+ asd->params.bad_pixel_en = true;
+ asd->params.gdc_cac_en = false;
+ asd->params.video_dis_en = false;
+ asd->params.sc_en = false;
+ asd->params.fpn_en = false;
+ asd->params.xnr_en = false;
+ asd->params.false_color = 0;
+ asd->params.online_process = 1;
+ asd->params.yuv_ds_en = 0;
+ /* s3a grid not enabled for any pipe */
+ asd->params.s3a_enabled_pipe = CSS_PIPE_ID_NUM;
+
+ asd->params.offline_parm.num_captures = 1;
+ asd->params.offline_parm.skip_frames = 0;
+ asd->params.offline_parm.offset = 0;
+ asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
+ /* Add for channel */
+ asd->input_curr = 0;
+
+ asd->mipi_frame_size = 0;
+ asd->copy_mode = false;
+ asd->yuvpp_mode = false;
+
+ asd->stream_prepared = false;
+ asd->high_speed_mode = false;
+ asd->sensor_array_res.height = 0;
+ asd->sensor_array_res.width = 0;
+ atomisp_css_init_struct(asd);
+}
+
+/*
+ * file operation functions
+ */
+static unsigned int atomisp_subdev_users(struct atomisp_sub_device *asd)
+{
+ return asd->video_out_preview.users +
+ asd->video_out_vf.users +
+ asd->video_out_capture.users +
+ asd->video_out_video_capture.users +
+ asd->video_acc.users +
+ asd->video_in.users;
+}
+
+unsigned int atomisp_dev_users(struct atomisp_device *isp)
+{
+ unsigned int i, sum;
+
+ for (i = 0, sum = 0; i < isp->num_of_streams; i++)
+ sum += atomisp_subdev_users(&isp->asd[i]);
+
+ return sum;
+}
+
+static int atomisp_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = NULL;
+ struct atomisp_acc_pipe *acc_pipe = NULL;
+ struct atomisp_sub_device *asd;
+ bool acc_node = false;
+ int ret;
+
+ dev_dbg(isp->dev, "open device %s\n", vdev->name);
+
+ rt_mutex_lock(&isp->mutex);
+
+ acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC");
+ if (acc_node) {
+ acc_pipe = atomisp_to_acc_pipe(vdev);
+ asd = acc_pipe->asd;
+ } else {
+ pipe = atomisp_to_video_pipe(vdev);
+ asd = pipe->asd;
+ }
+ asd->subdev.devnode = vdev;
+ /* Deferred firmware loading case. */
+ if (isp->css_env.isp_css_fw.bytes == 0) {
+ dev_err(isp->dev, "Deferred firmware load.\n");
+ isp->firmware = atomisp_load_firmware(isp);
+ if (!isp->firmware) {
+ dev_err(isp->dev, "Failed to load ISP firmware.\n");
+ ret = -ENOENT;
+ goto error;
+ }
+ ret = atomisp_css_load_firmware(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to init css.\n");
+ goto error;
+ }
+ /* No need to keep FW in memory anymore. */
+ release_firmware(isp->firmware);
+ isp->firmware = NULL;
+ isp->css_env.isp_css_fw.data = NULL;
+ }
+
+ if (acc_node && acc_pipe->users) {
+ dev_dbg(isp->dev, "acc node already opened\n");
+ rt_mutex_unlock(&isp->mutex);
+ return -EBUSY;
+ } else if (acc_node) {
+ goto dev_init;
+ }
+
+ if (!isp->input_cnt) {
+ dev_err(isp->dev, "no camera attached\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * atomisp does not allow multiple open
+ */
+ if (pipe->users) {
+ dev_dbg(isp->dev, "video node already opened\n");
+ rt_mutex_unlock(&isp->mutex);
+ return -EBUSY;
+ }
+
+ ret = atomisp_init_pipe(pipe);
+ if (ret)
+ goto error;
+
+dev_init:
+ if (atomisp_dev_users(isp)) {
+ dev_dbg(isp->dev, "skip init isp in open\n");
+ goto init_subdev;
+ }
+
+ /* runtime power management, turn on ISP */
+ ret = pm_runtime_get_sync(vdev->v4l2_dev->dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "Failed to power on device\n");
+ goto error;
+ }
+
+ if (dypool_enable) {
+ ret = hmm_pool_register(dypool_pgnr, HMM_POOL_TYPE_DYNAMIC);
+ if (ret)
+ dev_err(isp->dev, "Failed to register dynamic memory pool.\n");
+ }
+
+ /* Init ISP */
+ if (atomisp_css_init(isp)) {
+ ret = -EINVAL;
+ /* Need to clean up CSS init if it fails. */
+ goto css_error;
+ }
+
+ atomisp_dev_init_struct(isp);
+
+ ret = v4l2_subdev_call(isp->flash, core, s_power, 1);
+ if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD) {
+ dev_err(isp->dev, "Failed to power-on flash\n");
+ goto css_error;
+ }
+
+init_subdev:
+ if (atomisp_subdev_users(asd))
+ goto done;
+
+ atomisp_subdev_init_struct(asd);
+
+done:
+
+ if (acc_node)
+ acc_pipe->users++;
+ else
+ pipe->users++;
+ rt_mutex_unlock(&isp->mutex);
+ return 0;
+
+css_error:
+ atomisp_css_uninit(isp);
+error:
+ hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC);
+ pm_runtime_put(vdev->v4l2_dev->dev);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe;
+ struct atomisp_acc_pipe *acc_pipe;
+ struct atomisp_sub_device *asd;
+ bool acc_node;
+ struct v4l2_requestbuffers req;
+ struct v4l2_subdev_fh fh;
+ struct v4l2_rect clear_compose = {0};
+ int ret = 0;
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ req.count = 0;
+ if (!isp)
+ return -EBADF;
+
+ mutex_lock(&isp->streamoff_mutex);
+ rt_mutex_lock(&isp->mutex);
+
+ dev_dbg(isp->dev, "release device %s\n", vdev->name);
+ acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC");
+ if (acc_node) {
+ acc_pipe = atomisp_to_acc_pipe(vdev);
+ asd = acc_pipe->asd;
+ } else {
+ pipe = atomisp_to_video_pipe(vdev);
+ asd = pipe->asd;
+ }
+ asd->subdev.devnode = vdev;
+ if (acc_node) {
+ acc_pipe->users--;
+ goto subdev_uninit;
+ }
+ pipe->users--;
+
+ if (pipe->capq.streaming)
+ dev_warn(isp->dev,
+ "%s: ISP still streaming while closing!",
+ __func__);
+
+ if (pipe->capq.streaming &&
+ __atomisp_streamoff(file, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
+ dev_err(isp->dev,
+ "atomisp_streamoff failed on release, driver bug");
+ goto done;
+ }
+
+ if (pipe->users)
+ goto done;
+
+ if (__atomisp_reqbufs(file, NULL, &req)) {
+ dev_err(isp->dev,
+ "atomisp_reqbufs failed on release, driver bug");
+ goto done;
+ }
+
+ if (pipe->outq.bufs[0]) {
+ mutex_lock(&pipe->outq.vb_lock);
+ videobuf_queue_cancel(&pipe->outq);
+ mutex_unlock(&pipe->outq.vb_lock);
+ }
+
+ /*
+ * A little trick here:
+ * file injection input resolution is recorded in the sink pad,
+ * therefore can not be cleared when releaseing one device node.
+ * The sink pad setting can only be cleared when all device nodes
+ * get released.
+ */
+ if (!isp->sw_contex.file_input && asd->fmt_auto->val) {
+ struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
+
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
+ }
+subdev_uninit:
+ if (atomisp_subdev_users(asd))
+ goto done;
+
+ /* clear the sink pad for file input */
+ if (isp->sw_contex.file_input && asd->fmt_auto->val) {
+ struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
+
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
+ }
+
+ atomisp_css_free_stat_buffers(asd);
+ atomisp_free_internal_buffers(asd);
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, s_power, 0);
+ if (ret)
+ dev_warn(isp->dev, "Failed to power-off sensor\n");
+
+ /* clear the asd field to show this camera is not used */
+ isp->inputs[asd->input_curr].asd = NULL;
+ asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+
+ if (atomisp_dev_users(isp))
+ goto done;
+
+ atomisp_acc_release(asd);
+
+ atomisp_destroy_pipes_stream_force(asd);
+ atomisp_css_uninit(isp);
+
+ if (defer_fw_load) {
+ atomisp_css_unload_firmware(isp);
+ isp->css_env.isp_css_fw.data = NULL;
+ isp->css_env.isp_css_fw.bytes = 0;
+ }
+
+ hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC);
+
+ ret = v4l2_subdev_call(isp->flash, core, s_power, 0);
+ if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD)
+ dev_warn(isp->dev, "Failed to power-off flash\n");
+
+ if (pm_runtime_put_sync(vdev->v4l2_dev->dev) < 0)
+ dev_err(isp->dev, "Failed to power off device\n");
+
+done:
+ if (!acc_node) {
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ atomisp_subdev_source_pad(vdev),
+ V4L2_SEL_TGT_COMPOSE, 0,
+ &clear_compose);
+ }
+ rt_mutex_unlock(&isp->mutex);
+ mutex_unlock(&isp->streamoff_mutex);
+
+ return 0;
+}
+
+/*
+ * Memory help functions for image frame and private parameters
+ */
+static int do_isp_mm_remap(struct atomisp_device *isp,
+ struct vm_area_struct *vma,
+ ia_css_ptr isp_virt, u32 host_virt, u32 pgnr)
+{
+ u32 pfn;
+
+ while (pgnr) {
+ pfn = hmm_virt_to_phys(isp_virt) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, host_virt, pfn,
+ PAGE_SIZE, PAGE_SHARED)) {
+ dev_err(isp->dev, "remap_pfn_range err.\n");
+ return -EAGAIN;
+ }
+
+ isp_virt += PAGE_SIZE;
+ host_virt += PAGE_SIZE;
+ pgnr--;
+ }
+
+ return 0;
+}
+
+static int frame_mmap(struct atomisp_device *isp,
+ const struct atomisp_css_frame *frame, struct vm_area_struct *vma)
+{
+ ia_css_ptr isp_virt;
+ u32 host_virt;
+ u32 pgnr;
+
+ if (!frame) {
+ dev_err(isp->dev, "%s: NULL frame pointer.\n", __func__);
+ return -EINVAL;
+ }
+
+ host_virt = vma->vm_start;
+ isp_virt = frame->data;
+ atomisp_get_frame_pgnr(isp, frame, &pgnr);
+
+ if (do_isp_mm_remap(isp, vma, isp_virt, host_virt, pgnr))
+ return -EAGAIN;
+
+ return 0;
+}
+
+int atomisp_videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct vm_area_struct *vma)
+{
+ u32 offset = vma->vm_pgoff << PAGE_SHIFT;
+ int ret = -EINVAL, i;
+ struct atomisp_device *isp =
+ ((struct atomisp_video_pipe *)(q->priv_data))->isp;
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct videobuf_mapping *map;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+ if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
+ dev_err(isp->dev, "map appl bug: PROT_WRITE and MAP_SHARED are required\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&q->vb_lock);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ struct videobuf_buffer *buf = q->bufs[i];
+
+ if (!buf)
+ continue;
+
+ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+ if (!map) {
+ mutex_unlock(&q->vb_lock);
+ return -ENOMEM;
+ }
+
+ buf->map = map;
+ map->q = q;
+
+ buf->baddr = vma->vm_start;
+
+ if (buf && buf->memory == V4L2_MEMORY_MMAP &&
+ buf->boff == offset) {
+ vm_mem = buf->priv;
+ ret = frame_mmap(isp, vm_mem->vaddr, vma);
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ break;
+ }
+ }
+ mutex_unlock(&q->vb_lock);
+
+ return ret;
+}
+
+/* The input frame contains left and right padding that need to be removed.
+ * There is always ISP_LEFT_PAD padding on the left side.
+ * There is also padding on the right (padded_width - width).
+ */
+static int remove_pad_from_frame(struct atomisp_device *isp,
+ struct atomisp_css_frame *in_frame, __u32 width, __u32 height)
+{
+ unsigned int i;
+ unsigned short *buffer;
+ int ret = 0;
+ ia_css_ptr load = in_frame->data;
+ ia_css_ptr store = load;
+
+ buffer = kmalloc_array(width, sizeof(load), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ load += ISP_LEFT_PAD;
+ for (i = 0; i < height; i++) {
+ ret = hmm_load(load, buffer, width * sizeof(load));
+ if (ret < 0)
+ goto remove_pad_error;
+
+ ret = hmm_store(store, buffer, width * sizeof(store));
+ if (ret < 0)
+ goto remove_pad_error;
+
+ load += in_frame->info.padded_width;
+ store += width;
+ }
+
+remove_pad_error:
+ kfree(buffer);
+ return ret;
+}
+
+static int atomisp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_css_frame *raw_virt_addr;
+ u32 start = vma->vm_start;
+ u32 end = vma->vm_end;
+ u32 size = end - start;
+ u32 origin_size, new_size;
+ int ret;
+
+ if (!(vma->vm_flags & (VM_WRITE | VM_READ)))
+ return -EACCES;
+
+ rt_mutex_lock(&isp->mutex);
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ /* Map private buffer.
+ * Set VM_SHARED to the flags since we need
+ * to map the buffer page by page.
+ * Without VM_SHARED, remap_pfn_range() treats
+ * this kind of mapping as invalid.
+ */
+ vma->vm_flags |= VM_SHARED;
+ ret = hmm_mmap(vma, vma->vm_pgoff << PAGE_SHIFT);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+ }
+
+ /* mmap for ISP offline raw data */
+ if (atomisp_subdev_source_pad(vdev)
+ == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE &&
+ vma->vm_pgoff == (ISP_PARAM_MMAP_OFFSET >> PAGE_SHIFT)) {
+ new_size = pipe->pix.width * pipe->pix.height * 2;
+ if (asd->params.online_process != 0) {
+ ret = -EINVAL;
+ goto error;
+ }
+ raw_virt_addr = asd->raw_output_frame;
+ if (!raw_virt_addr) {
+ dev_err(isp->dev, "Failed to request RAW frame\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = remove_pad_from_frame(isp, raw_virt_addr,
+ pipe->pix.width, pipe->pix.height);
+ if (ret < 0) {
+ dev_err(isp->dev, "remove pad failed.\n");
+ goto error;
+ }
+ origin_size = raw_virt_addr->data_bytes;
+ raw_virt_addr->data_bytes = new_size;
+
+ if (size != PAGE_ALIGN(new_size)) {
+ dev_err(isp->dev, "incorrect size for mmap ISP Raw Frame\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (frame_mmap(isp, raw_virt_addr, vma)) {
+ dev_err(isp->dev, "frame_mmap failed.\n");
+ raw_virt_addr->data_bytes = origin_size;
+ ret = -EAGAIN;
+ goto error;
+ }
+ raw_virt_addr->data_bytes = origin_size;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ rt_mutex_unlock(&isp->mutex);
+ return 0;
+ }
+
+ /*
+ * mmap for normal frames
+ */
+ if (size != pipe->pix.sizeimage) {
+ dev_err(isp->dev, "incorrect size for mmap ISP frames\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ return atomisp_videobuf_mmap_mapper(&pipe->capq, vma);
+
+error:
+ rt_mutex_unlock(&isp->mutex);
+
+ return ret;
+}
+
+static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ return videobuf_mmap_mapper(&pipe->outq, vma);
+}
+
+static __poll_t atomisp_poll(struct file *file,
+ struct poll_table_struct *pt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ rt_mutex_lock(&isp->mutex);
+ if (pipe->capq.streaming != 1) {
+ rt_mutex_unlock(&isp->mutex);
+ return EPOLLERR;
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ return videobuf_poll_stream(file, &pipe->capq, pt);
+}
+
+const struct v4l2_file_operations atomisp_fops = {
+ .owner = THIS_MODULE,
+ .open = atomisp_open,
+ .release = atomisp_release,
+ .mmap = atomisp_mmap,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ /*
+ * There are problems with this code. Disable this for now.
+ .compat_ioctl32 = atomisp_compat_ioctl32,
+ */
+#endif
+ .poll = atomisp_poll,
+};
+
+const struct v4l2_file_operations atomisp_file_fops = {
+ .owner = THIS_MODULE,
+ .open = atomisp_open,
+ .release = atomisp_release,
+ .mmap = atomisp_file_mmap,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ /*
+ * There are problems with this code. Disable this for now.
+ .compat_ioctl32 = atomisp_compat_ioctl32,
+ */
+#endif
+ .poll = atomisp_poll,
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.h b/drivers/staging/media/atomisp/pci/atomisp_fops.h
new file mode 100644
index 000000000000..e05e8f3a4442
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.h
@@ -0,0 +1,50 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_FOPS_H__
+#define __ATOMISP_FOPS_H__
+#include "atomisp_subdev.h"
+
+int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_buffer_type css_buf_type,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+unsigned int atomisp_dev_users(struct atomisp_device *isp);
+unsigned int atomisp_sub_dev_users(struct atomisp_sub_device *asd);
+
+/*
+ * Memory help functions for image frame and private parameters
+ */
+
+int atomisp_videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct vm_area_struct *vma);
+
+int atomisp_qbuf_to_css(struct atomisp_device *isp,
+ struct atomisp_video_pipe *pipe,
+ struct videobuf_buffer *vb);
+
+int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd);
+
+extern const struct v4l2_file_operations atomisp_fops;
+
+extern bool defer_fw_load;
+
+#endif /* __ATOMISP_FOPS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
new file mode 100644
index 000000000000..b096b7d30463
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -0,0 +1,1081 @@
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include "../../include/linux/atomisp_platform.h"
+#include "../../include/linux/atomisp_gmin_platform.h"
+
+#define MAX_SUBDEVS 8
+
+enum clock_rate {
+ VLV2_CLK_XTAL_25_0MHz = 0,
+ VLV2_CLK_PLL_19P2MHZ = 1
+};
+
+#define CLK_RATE_19_2MHZ 19200000
+#define CLK_RATE_25_0MHZ 25000000
+
+/* X-Powers AXP288 register set */
+#define ALDO1_SEL_REG 0x28
+#define ALDO1_CTRL3_REG 0x13
+#define ALDO1_2P8V 0x16
+#define ALDO1_CTRL3_SHIFT 0x05
+
+#define ELDO_CTRL_REG 0x12
+
+#define ELDO1_SEL_REG 0x19
+#define ELDO1_1P8V 0x16
+#define ELDO1_CTRL_SHIFT 0x00
+
+#define ELDO2_SEL_REG 0x1a
+#define ELDO2_1P8V 0x16
+#define ELDO2_CTRL_SHIFT 0x01
+
+/* TI SND9039 PMIC register set */
+#define LDO9_REG 0x49
+#define LDO10_REG 0x4a
+#define LDO11_REG 0x4b
+
+#define LDO_2P8V_ON 0x2f /* 0x2e selects 2.85V ... */
+#define LDO_2P8V_OFF 0x2e /* ... bottom bit is "enabled" */
+
+#define LDO_1P8V_ON 0x59 /* 0x58 selects 1.80V ... */
+#define LDO_1P8V_OFF 0x58 /* ... bottom bit is "enabled" */
+
+/* CRYSTAL COVE PMIC register set */
+#define CRYSTAL_1P8V_REG 0x57
+#define CRYSTAL_2P8V_REG 0x5d
+#define CRYSTAL_ON 0x63
+#define CRYSTAL_OFF 0x62
+
+struct gmin_subdev {
+ struct v4l2_subdev *subdev;
+ int clock_num;
+ enum clock_rate clock_src;
+ bool clock_on;
+ struct clk *pmc_clk;
+ struct gpio_desc *gpio0;
+ struct gpio_desc *gpio1;
+ struct regulator *v1p8_reg;
+ struct regulator *v2p8_reg;
+ struct regulator *v1p2_reg;
+ struct regulator *v2p8_vcm_reg;
+ enum atomisp_camera_port csi_port;
+ unsigned int csi_lanes;
+ enum atomisp_input_format csi_fmt;
+ enum atomisp_bayer_order csi_bayer;
+ bool v1p8_on;
+ bool v2p8_on;
+ bool v1p2_on;
+ bool v2p8_vcm_on;
+
+ u8 pwm_i2c_addr;
+
+ /* For PMIC AXP */
+ int eldo1_sel_reg, eldo1_1p8v, eldo1_ctrl_shift;
+ int eldo2_sel_reg, eldo2_1p8v, eldo2_ctrl_shift;
+};
+
+static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS];
+
+/* ACPI HIDs for the PMICs that could be used by this driver */
+#define PMIC_ACPI_AXP "INT33F4:00" /* XPower AXP288 PMIC */
+#define PMIC_ACPI_TI "INT33F5:00" /* Dollar Cove TI PMIC */
+#define PMIC_ACPI_CRYSTALCOVE "INT33FD:00" /* Crystal Cove PMIC */
+
+#define PMIC_PLATFORM_TI "intel_soc_pmic_chtdc_ti"
+
+static enum {
+ PMIC_UNSET = 0,
+ PMIC_REGULATOR,
+ PMIC_AXP,
+ PMIC_TI,
+ PMIC_CRYSTALCOVE
+} pmic_id;
+
+static const char *pmic_name[] = {
+ [PMIC_UNSET] = "unset",
+ [PMIC_REGULATOR] = "regulator driver",
+ [PMIC_AXP] = "XPower AXP288 PMIC",
+ [PMIC_TI] = "Dollar Cove TI PMIC",
+ [PMIC_CRYSTALCOVE] = "Crystal Cove PMIC",
+};
+
+/* The atomisp uses type==0 for the end-of-list marker, so leave space. */
+static struct intel_v4l2_subdev_table pdata_subdevs[MAX_SUBDEVS + 1];
+
+static const struct atomisp_platform_data pdata = {
+ .subdevs = pdata_subdevs,
+};
+
+/*
+ * Something of a hack. The ECS E7 board drives camera 2.8v from an
+ * external regulator instead of the PMIC. There's a gmin_CamV2P8
+ * config variable that specifies the GPIO to handle this particular
+ * case, but this needs a broader architecture for handling camera
+ * power.
+ */
+enum { V2P8_GPIO_UNSET = -2, V2P8_GPIO_NONE = -1 };
+static int v2p8_gpio = V2P8_GPIO_UNSET;
+
+/*
+ * Something of a hack. The CHT RVP board drives camera 1.8v from an
+ * external regulator instead of the PMIC just like ECS E7 board, see the
+ * comments above.
+ */
+enum { V1P8_GPIO_UNSET = -2, V1P8_GPIO_NONE = -1 };
+static int v1p8_gpio = V1P8_GPIO_UNSET;
+
+static LIST_HEAD(vcm_devices);
+static DEFINE_MUTEX(vcm_lock);
+
+static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev);
+
+/*
+ * Legacy/stub behavior copied from upstream platform_camera.c. The
+ * atomisp driver relies on these values being non-NULL in a few
+ * places, even though they are hard-coded in all current
+ * implementations.
+ */
+const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void)
+{
+ static const struct atomisp_camera_caps caps = {
+ .sensor_num = 1,
+ .sensor = {
+ { .stream_num = 1, },
+ },
+ };
+ return &caps;
+}
+EXPORT_SYMBOL_GPL(atomisp_get_default_camera_caps);
+
+const struct atomisp_platform_data *atomisp_get_platform_data(void)
+{
+ return &pdata;
+}
+EXPORT_SYMBOL_GPL(atomisp_get_platform_data);
+
+int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
+ struct camera_sensor_platform_data *plat_data,
+ enum intel_v4l2_subdev_type type)
+{
+ int i;
+ struct i2c_board_info *bi;
+ struct gmin_subdev *gs;
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+
+ dev_info(&client->dev, "register atomisp i2c module type %d\n", type);
+
+ /* The windows driver model (and thus most BIOSes by default)
+ * uses ACPI runtime power management for camera devices, but
+ * we don't. Disable it, or else the rails will be needlessly
+ * tickled during suspend/resume. This has caused power and
+ * performance issues on multiple devices.
+ */
+ adev->power.flags.power_resources = 0;
+
+ for (i = 0; i < MAX_SUBDEVS; i++)
+ if (!pdata.subdevs[i].type)
+ break;
+
+ if (pdata.subdevs[i].type)
+ return -ENOMEM;
+
+ /* Note subtlety of initialization order: at the point where
+ * this registration API gets called, the platform data
+ * callbacks have probably already been invoked, so the
+ * gmin_subdev struct is already initialized for us.
+ */
+ gs = find_gmin_subdev(subdev);
+
+ pdata.subdevs[i].type = type;
+ pdata.subdevs[i].port = gs->csi_port;
+ pdata.subdevs[i].subdev = subdev;
+ pdata.subdevs[i].v4l2_subdev.i2c_adapter_id = client->adapter->nr;
+
+ /* Convert i2c_client to i2c_board_info */
+ bi = &pdata.subdevs[i].v4l2_subdev.board_info;
+ memcpy(bi->type, client->name, I2C_NAME_SIZE);
+ bi->flags = client->flags;
+ bi->addr = client->addr;
+ bi->irq = client->irq;
+ bi->platform_data = plat_data;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atomisp_register_i2c_module);
+
+struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter,
+ struct i2c_board_info *board_info)
+{
+ int i;
+
+ for (i = 0; i < MAX_SUBDEVS && pdata.subdevs[i].type; i++) {
+ struct intel_v4l2_subdev_table *sd = &pdata.subdevs[i];
+
+ if (sd->v4l2_subdev.i2c_adapter_id == adapter->nr &&
+ sd->v4l2_subdev.board_info.addr == board_info->addr)
+ return sd->subdev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(atomisp_gmin_find_subdev);
+
+int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd)
+{
+ int i, j;
+
+ if (!sd)
+ return 0;
+
+ for (i = 0; i < MAX_SUBDEVS; i++) {
+ if (pdata.subdevs[i].subdev == sd) {
+ for (j = i + 1; j <= MAX_SUBDEVS; j++)
+ pdata.subdevs[j - 1] = pdata.subdevs[j];
+ }
+ if (gmin_subdevs[i].subdev == sd) {
+ if (gmin_subdevs[i].gpio0)
+ gpiod_put(gmin_subdevs[i].gpio0);
+ gmin_subdevs[i].gpio0 = NULL;
+ if (gmin_subdevs[i].gpio1)
+ gpiod_put(gmin_subdevs[i].gpio1);
+ gmin_subdevs[i].gpio1 = NULL;
+ if (pmic_id == PMIC_REGULATOR) {
+ regulator_put(gmin_subdevs[i].v1p8_reg);
+ regulator_put(gmin_subdevs[i].v2p8_reg);
+ regulator_put(gmin_subdevs[i].v1p2_reg);
+ regulator_put(gmin_subdevs[i].v2p8_vcm_reg);
+ }
+ gmin_subdevs[i].subdev = NULL;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atomisp_gmin_remove_subdev);
+
+struct gmin_cfg_var {
+ const char *name, *val;
+};
+
+static struct gmin_cfg_var ffrd8_vars[] = {
+ { "INTCF1B:00_ImxId", "0x134" },
+ { "INTCF1B:00_CsiPort", "1" },
+ { "INTCF1B:00_CsiLanes", "4" },
+ { "INTCF1B:00_CamClk", "0" },
+ {},
+};
+
+/* Cribbed from MCG defaults in the mt9m114 driver, not actually verified
+ * vs. T100 hardware
+ */
+static struct gmin_cfg_var t100_vars[] = {
+ { "INT33F0:00_CsiPort", "0" },
+ { "INT33F0:00_CsiLanes", "1" },
+ { "INT33F0:00_CamClk", "1" },
+ {},
+};
+
+static struct gmin_cfg_var mrd7_vars[] = {
+ {"INT33F8:00_CamType", "1"},
+ {"INT33F8:00_CsiPort", "1"},
+ {"INT33F8:00_CsiLanes", "2"},
+ {"INT33F8:00_CsiFmt", "13"},
+ {"INT33F8:00_CsiBayer", "0"},
+ {"INT33F8:00_CamClk", "0"},
+ {"INT33F9:00_CamType", "1"},
+ {"INT33F9:00_CsiPort", "0"},
+ {"INT33F9:00_CsiLanes", "1"},
+ {"INT33F9:00_CsiFmt", "13"},
+ {"INT33F9:00_CsiBayer", "0"},
+ {"INT33F9:00_CamClk", "1"},
+ {},
+};
+
+static struct gmin_cfg_var ecs7_vars[] = {
+ {"INT33BE:00_CsiPort", "1"},
+ {"INT33BE:00_CsiLanes", "2"},
+ {"INT33BE:00_CsiFmt", "13"},
+ {"INT33BE:00_CsiBayer", "2"},
+ {"INT33BE:00_CamClk", "0"},
+ {"INT33F0:00_CsiPort", "0"},
+ {"INT33F0:00_CsiLanes", "1"},
+ {"INT33F0:00_CsiFmt", "13"},
+ {"INT33F0:00_CsiBayer", "0"},
+ {"INT33F0:00_CamClk", "1"},
+ {"gmin_V2P8GPIO", "402"},
+ {},
+};
+
+static struct gmin_cfg_var i8880_vars[] = {
+ {"XXOV2680:00_CsiPort", "1"},
+ {"XXOV2680:00_CsiLanes", "1"},
+ {"XXOV2680:00_CamClk", "0"},
+ {"XXGC0310:00_CsiPort", "0"},
+ {"XXGC0310:00_CsiLanes", "1"},
+ {"XXGC0310:00_CamClk", "1"},
+ {},
+};
+
+static struct gmin_cfg_var asus_vars[] = {
+ {"OVTI2680:00_CsiPort", "1"},
+ {"OVTI2680:00_CsiLanes", "1"},
+ {"OVTI2680:00_CsiFmt", "15"},
+ {"OVTI2680:00_CsiBayer", "0"},
+ {"OVTI2680:00_CamClk", "1"},
+ {},
+};
+
+static const struct dmi_system_id gmin_vars[] = {
+ {
+ .ident = "BYT-T FFD8",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ },
+ .driver_data = ffrd8_vars,
+ },
+ {
+ .ident = "T100TA",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T100TA"),
+ },
+ .driver_data = t100_vars,
+ },
+ {
+ .ident = "MRD7",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "TABLET"),
+ DMI_MATCH(DMI_BOARD_VERSION, "MRD 7"),
+ },
+ .driver_data = mrd7_vars,
+ },
+ {
+ .ident = "ST70408",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "ST70408"),
+ },
+ .driver_data = ecs7_vars,
+ },
+ {
+ .ident = "VTA0803",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VTA0803"),
+ },
+ .driver_data = i8880_vars,
+ },
+ {
+ .ident = "T101HA",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T101HA"),
+ },
+ .driver_data = asus_vars,
+ },
+ {}
+};
+
+#define GMIN_CFG_VAR_EFI_GUID EFI_GUID(0xecb54cd9, 0xe5ae, 0x4fdc, \
+ 0xa9, 0x71, 0xe8, 0x77, \
+ 0x75, 0x60, 0x68, 0xf7)
+
+#define CFG_VAR_NAME_MAX 64
+
+#define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */
+static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME];
+
+static int gmin_i2c_match_one(struct device *dev, const void *data)
+{
+ const char *name = data;
+ struct i2c_client *client;
+
+ if (dev->type != &i2c_client_type)
+ return 0;
+
+ client = to_i2c_client(dev);
+
+ return (!strcmp(name, client->name));
+}
+
+static struct i2c_client *gmin_i2c_dev_exists(struct device *dev, char *name,
+ struct i2c_client **client)
+{
+ struct device *d;
+
+ while ((d = bus_find_device(&i2c_bus_type, NULL, name,
+ gmin_i2c_match_one))) {
+ *client = to_i2c_client(d);
+ dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n",
+ (*client)->name, (*client)->addr,
+ (*client)->adapter->nr);
+ return *client;
+ }
+
+ return NULL;
+}
+
+static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg,
+ u32 value, u32 mask)
+{
+ int ret;
+
+ /*
+ * FIXME: Right now, the intel_pmic driver just write values
+ * directly at the regmap, instead of properly implementing
+ * i2c_transfer() mechanism. Let's use the same interface here,
+ * as otherwise we may face issues.
+ */
+
+ dev_dbg(dev,
+ "I2C write, addr: 0x%02x, reg: 0x%02x, value: 0x%02x, mask: 0x%02x\n",
+ i2c_addr, reg, value, mask);
+
+ ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg,
+ value, mask);
+
+ if (ret == -EOPNOTSUPP) {
+ dev_err(dev,
+ "ACPI didn't mapped the OpRegion needed to access I2C address 0x%02x.\n"
+ "Need to compile the Kernel using CONFIG_*_PMIC_OPREGION settings\n",
+ i2c_addr);
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
+{
+ int i, ret;
+ struct device *dev;
+ struct i2c_client *power = NULL, *client = v4l2_get_subdevdata(subdev);
+
+ if (!client)
+ return NULL;
+
+ dev = &client->dev;
+
+ if (!pmic_id) {
+ if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power))
+ pmic_id = PMIC_TI;
+ else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power))
+ pmic_id = PMIC_AXP;
+ else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_CRYSTALCOVE, &power))
+ pmic_id = PMIC_CRYSTALCOVE;
+ else
+ pmic_id = PMIC_REGULATOR;
+ }
+
+ for (i = 0; i < MAX_SUBDEVS && gmin_subdevs[i].subdev; i++)
+ ;
+ if (i >= MAX_SUBDEVS)
+ return NULL;
+
+
+ if (power) {
+ gmin_subdevs[i].pwm_i2c_addr = power->addr;
+ dev_info(dev,
+ "gmin: power management provided via %s (i2c addr 0x%02x)\n",
+ pmic_name[pmic_id], power->addr);
+ } else {
+ dev_info(dev, "gmin: power management provided via %s\n",
+ pmic_name[pmic_id]);
+ }
+
+ gmin_subdevs[i].subdev = subdev;
+ gmin_subdevs[i].clock_num = gmin_get_var_int(dev, false, "CamClk", 0);
+ /*WA:CHT requires XTAL clock as PLL is not stable.*/
+ gmin_subdevs[i].clock_src = gmin_get_var_int(dev, false, "ClkSrc",
+ VLV2_CLK_PLL_19P2MHZ);
+ gmin_subdevs[i].csi_port = gmin_get_var_int(dev, false, "CsiPort", 0);
+ gmin_subdevs[i].csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1);
+
+ /* get PMC clock with clock framework */
+ snprintf(gmin_pmc_clk_name,
+ sizeof(gmin_pmc_clk_name),
+ "%s_%d", "pmc_plt_clk", gmin_subdevs[i].clock_num);
+
+ gmin_subdevs[i].pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
+ if (IS_ERR(gmin_subdevs[i].pmc_clk)) {
+ ret = PTR_ERR(gmin_subdevs[i].pmc_clk);
+
+ dev_err(dev,
+ "Failed to get clk from %s : %d\n",
+ gmin_pmc_clk_name,
+ ret);
+
+ return NULL;
+ }
+
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(gmin_subdevs[i].pmc_clk);
+ if (!ret)
+ clk_disable_unprepare(gmin_subdevs[i].pmc_clk);
+
+ gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
+ if (IS_ERR(gmin_subdevs[i].gpio0))
+ gmin_subdevs[i].gpio0 = NULL;
+
+ gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
+ if (IS_ERR(gmin_subdevs[i].gpio1))
+ gmin_subdevs[i].gpio1 = NULL;
+
+ switch (pmic_id) {
+ case PMIC_REGULATOR:
+ gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX");
+ gmin_subdevs[i].v2p8_reg = regulator_get(dev, "V2P8SX");
+
+ gmin_subdevs[i].v1p2_reg = regulator_get(dev, "V1P2A");
+ gmin_subdevs[i].v2p8_vcm_reg = regulator_get(dev, "VPROG4B");
+
+ /* Note: ideally we would initialize v[12]p8_on to the
+ * output of regulator_is_enabled(), but sadly that
+ * API is broken with the current drivers, returning
+ * "1" for a regulator that will then emit a
+ * "unbalanced disable" WARNing if we try to disable
+ * it.
+ */
+ break;
+
+ case PMIC_AXP:
+ gmin_subdevs[i].eldo1_1p8v = gmin_get_var_int(dev, false,
+ "eldo1_1p8v",
+ ELDO1_1P8V);
+ gmin_subdevs[i].eldo1_sel_reg = gmin_get_var_int(dev, false,
+ "eldo1_sel_reg",
+ ELDO1_SEL_REG);
+ gmin_subdevs[i].eldo1_ctrl_shift = gmin_get_var_int(dev, false,
+ "eldo1_ctrl_shift",
+ ELDO1_CTRL_SHIFT);
+ gmin_subdevs[i].eldo2_1p8v = gmin_get_var_int(dev, false,
+ "eldo2_1p8v",
+ ELDO2_1P8V);
+ gmin_subdevs[i].eldo2_sel_reg = gmin_get_var_int(dev, false,
+ "eldo2_sel_reg",
+ ELDO2_SEL_REG);
+ gmin_subdevs[i].eldo2_ctrl_shift = gmin_get_var_int(dev, false,
+ "eldo2_ctrl_shift",
+ ELDO2_CTRL_SHIFT);
+ gmin_subdevs[i].pwm_i2c_addr = power->addr;
+ break;
+
+ default:
+ break;
+ }
+
+ return &gmin_subdevs[i];
+}
+
+static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
+{
+ int i;
+
+ for (i = 0; i < MAX_SUBDEVS; i++)
+ if (gmin_subdevs[i].subdev == subdev)
+ return &gmin_subdevs[i];
+ return gmin_subdev_add(subdev);
+}
+
+static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs,
+ int sel_reg, u8 setting,
+ int ctrl_reg, int shift, bool on)
+{
+ int ret;
+ int val;
+
+ ret = gmin_i2c_write(dev, gs->pwm_i2c_addr, sel_reg, setting, 0xff);
+ if (ret)
+ return ret;
+
+ val = on ? 1 << shift : 0;
+
+ ret = gmin_i2c_write(dev, gs->pwm_i2c_addr, sel_reg, val, 1 << shift);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs)
+{
+ int ret;
+ ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
+ ELDO_CTRL_REG, gs->eldo2_ctrl_shift, true);
+ if (ret)
+ return ret;
+
+ /*
+ * This sleep comes out of the gc2235 driver, which is the
+ * only one I currently see that wants to set both 1.8v rails.
+ */
+ usleep_range(110, 150);
+
+ ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p8v,
+ ELDO_CTRL_REG, gs->eldo1_ctrl_shift, true);
+ if (ret)
+ return ret;
+
+ ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
+ ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false);
+ return ret;
+}
+
+static int axp_v1p8_off(struct device *dev, struct gmin_subdev *gs)
+{
+ int ret;
+ ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p8v,
+ ELDO_CTRL_REG, gs->eldo1_ctrl_shift, false);
+ if (ret)
+ return ret;
+
+ ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
+ ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false);
+ return ret;
+}
+
+
+static int gmin_gpio0_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
+ if (gs) {
+ gpiod_set_value(gs->gpio0, on);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int gmin_gpio1_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
+ if (gs) {
+ gpiod_set_value(gs->gpio1, on);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int gmin_v1p2_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
+ if (!gs || gs->v1p2_on == on)
+ return 0;
+ gs->v1p2_on = on;
+
+ /* use regulator for PMIC */
+ if (gs->v1p2_reg) {
+ if (on)
+ return regulator_enable(gs->v1p2_reg);
+ else
+ return regulator_disable(gs->v1p2_reg);
+ }
+
+ /* TODO:v1p2 may need to extend to other PMICs */
+
+ return -EINVAL;
+}
+
+static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ int ret;
+ struct device *dev;
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int value;
+
+ dev = &client->dev;
+
+ if (v1p8_gpio == V1P8_GPIO_UNSET) {
+ v1p8_gpio = gmin_get_var_int(dev, true,
+ "V1P8GPIO", V1P8_GPIO_NONE);
+ if (v1p8_gpio != V1P8_GPIO_NONE) {
+ pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n",
+ v1p8_gpio);
+ ret = gpio_request(v1p8_gpio, "camera_v1p8_en");
+ if (!ret)
+ ret = gpio_direction_output(v1p8_gpio, 0);
+ if (ret)
+ pr_err("V1P8 GPIO initialization failed\n");
+ }
+ }
+
+ if (!gs || gs->v1p8_on == on)
+ return 0;
+ gs->v1p8_on = on;
+
+ if (v1p8_gpio >= 0)
+ gpio_set_value(v1p8_gpio, on);
+
+ if (gs->v1p8_reg) {
+ regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
+ if (on)
+ return regulator_enable(gs->v1p8_reg);
+ else
+ return regulator_disable(gs->v1p8_reg);
+ }
+
+ switch (pmic_id) {
+ case PMIC_AXP:
+ if (on)
+ return axp_v1p8_on(subdev->dev, gs);
+ else
+ return axp_v1p8_off(subdev->dev, gs);
+ case PMIC_TI:
+ value = on ? LDO_1P8V_ON : LDO_1P8V_OFF;
+
+ return gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ LDO10_REG, value, 0xff);
+ case PMIC_CRYSTALCOVE:
+ value = on ? CRYSTAL_ON : CRYSTAL_OFF;
+
+ return gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ CRYSTAL_1P8V_REG, value, 0xff);
+ default:
+ dev_err(subdev->dev, "Couldn't set power mode for v1p2\n");
+ }
+
+
+ return -EINVAL;
+}
+
+static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ int ret;
+ struct device *dev;
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int value;
+
+ dev = &client->dev;
+
+ if (v2p8_gpio == V2P8_GPIO_UNSET) {
+ v2p8_gpio = gmin_get_var_int(dev, true,
+ "V2P8GPIO", V2P8_GPIO_NONE);
+ if (v2p8_gpio != V2P8_GPIO_NONE) {
+ pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n",
+ v2p8_gpio);
+ ret = gpio_request(v2p8_gpio, "camera_v2p8");
+ if (!ret)
+ ret = gpio_direction_output(v2p8_gpio, 0);
+ if (ret)
+ pr_err("V2P8 GPIO initialization failed\n");
+ }
+ }
+
+ if (!gs || gs->v2p8_on == on)
+ return 0;
+ gs->v2p8_on = on;
+
+ if (v2p8_gpio >= 0)
+ gpio_set_value(v2p8_gpio, on);
+
+ if (gs->v2p8_reg) {
+ regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
+ if (on)
+ return regulator_enable(gs->v2p8_reg);
+ else
+ return regulator_disable(gs->v2p8_reg);
+ }
+
+ switch (pmic_id) {
+ case PMIC_AXP:
+ return axp_regulator_set(subdev->dev, gs, ALDO1_SEL_REG,
+ ALDO1_2P8V, ALDO1_CTRL3_REG,
+ ALDO1_CTRL3_SHIFT, on);
+ case PMIC_TI:
+ value = on ? LDO_2P8V_ON : LDO_2P8V_OFF;
+
+ return gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ LDO9_REG, value, 0xff);
+ case PMIC_CRYSTALCOVE:
+ value = on ? CRYSTAL_ON : CRYSTAL_OFF;
+
+ return gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ CRYSTAL_2P8V_REG, value, 0xff);
+ default:
+ dev_err(subdev->dev, "Couldn't set power mode for v1p2\n");
+ }
+
+ return -EINVAL;
+}
+
+static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ int ret = 0;
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+
+ if (gs->clock_on == !!on)
+ return 0;
+
+ if (on) {
+ ret = clk_set_rate(gs->pmc_clk,
+ gs->clock_src ? CLK_RATE_19_2MHZ : CLK_RATE_25_0MHZ);
+
+ if (ret)
+ dev_err(&client->dev, "unable to set PMC rate %d\n",
+ gs->clock_src);
+
+ ret = clk_prepare_enable(gs->pmc_clk);
+ if (ret == 0)
+ gs->clock_on = true;
+ } else {
+ clk_disable_unprepare(gs->pmc_clk);
+ gs->clock_on = false;
+ }
+
+ return ret;
+}
+
+static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct gmin_subdev *gs = find_gmin_subdev(sd);
+
+ if (!client || !gs)
+ return -ENODEV;
+
+ return camera_sensor_csi(sd, gs->csi_port, gs->csi_lanes,
+ gs->csi_fmt, gs->csi_bayer, flag);
+}
+
+static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev,
+ char *camera_module)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ struct camera_vcm_control *vcm;
+
+ if (!client || !gs)
+ return NULL;
+
+ if (!camera_module)
+ return NULL;
+
+ mutex_lock(&vcm_lock);
+ list_for_each_entry(vcm, &vcm_devices, list) {
+ if (!strcmp(camera_module, vcm->camera_module)) {
+ mutex_unlock(&vcm_lock);
+ return vcm;
+ }
+ }
+
+ mutex_unlock(&vcm_lock);
+ return NULL;
+}
+
+static struct camera_sensor_platform_data gmin_plat = {
+ .gpio0_ctrl = gmin_gpio0_ctrl,
+ .gpio1_ctrl = gmin_gpio1_ctrl,
+ .v1p8_ctrl = gmin_v1p8_ctrl,
+ .v2p8_ctrl = gmin_v2p8_ctrl,
+ .v1p2_ctrl = gmin_v1p2_ctrl,
+ .flisclk_ctrl = gmin_flisclk_ctrl,
+ .csi_cfg = gmin_csi_cfg,
+ .get_vcm_ctrl = gmin_get_vcm_ctrl,
+};
+
+struct camera_sensor_platform_data *gmin_camera_platform_data(
+ struct v4l2_subdev *subdev,
+ enum atomisp_input_format csi_format,
+ enum atomisp_bayer_order csi_bayer)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
+ gs->csi_fmt = csi_format;
+ gs->csi_bayer = csi_bayer;
+
+ return &gmin_plat;
+}
+EXPORT_SYMBOL_GPL(gmin_camera_platform_data);
+
+int atomisp_gmin_register_vcm_control(struct camera_vcm_control *vcmCtrl)
+{
+ if (!vcmCtrl)
+ return -EINVAL;
+
+ mutex_lock(&vcm_lock);
+ list_add_tail(&vcmCtrl->list, &vcm_devices);
+ mutex_unlock(&vcm_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atomisp_gmin_register_vcm_control);
+
+static int gmin_get_hardcoded_var(struct gmin_cfg_var *varlist,
+ const char *var8, char *out, size_t *out_len)
+{
+ struct gmin_cfg_var *gv;
+
+ for (gv = varlist; gv->name; gv++) {
+ size_t vl;
+
+ if (strcmp(var8, gv->name))
+ continue;
+
+ vl = strlen(gv->val);
+ if (vl > *out_len - 1)
+ return -ENOSPC;
+
+ strcpy(out, gv->val);
+ *out_len = vl;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Retrieves a device-specific configuration variable. The dev
+ * argument should be a device with an ACPI companion, as all
+ * configuration is based on firmware ID.
+ */
+static int gmin_get_config_var(struct device *maindev,
+ bool is_gmin,
+ const char *var,
+ char *out, size_t *out_len)
+{
+ char var8[CFG_VAR_NAME_MAX];
+ efi_char16_t var16[CFG_VAR_NAME_MAX];
+ struct efivar_entry *ev;
+ const struct dmi_system_id *id;
+ int i, ret;
+ struct device *dev = maindev;
+
+ if (!is_gmin && ACPI_COMPANION(dev))
+ dev = &ACPI_COMPANION(dev)->dev;
+
+ if (!is_gmin)
+ ret = snprintf(var8, sizeof(var8), "%s_%s", dev_name(dev), var);
+ else
+ ret = snprintf(var8, sizeof(var8), "gmin_%s", var);
+
+ if (ret < 0 || ret >= sizeof(var8) - 1)
+ return -EINVAL;
+
+ /* First check a hard-coded list of board-specific variables.
+ * Some device firmwares lack the ability to set EFI variables at
+ * runtime.
+ */
+ id = dmi_first_match(gmin_vars);
+ if (id) {
+ dev_info(maindev, "Found DMI entry for '%s'\n", var8);
+ return gmin_get_hardcoded_var(id->driver_data, var8, out,
+ out_len);
+ }
+
+ /* Our variable names are ASCII by construction, but EFI names
+ * are wide chars. Convert and zero-pad.
+ */
+ memset(var16, 0, sizeof(var16));
+ for (i = 0; i < sizeof(var8) && var8[i]; i++)
+ var16[i] = var8[i];
+
+ /* Not sure this API usage is kosher; efivar_entry_get()'s
+ * implementation simply uses VariableName and VendorGuid from
+ * the struct and ignores the rest, but it seems like there
+ * ought to be an "official" efivar_entry registered
+ * somewhere?
+ */
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+ memcpy(&ev->var.VariableName, var16, sizeof(var16));
+ ev->var.VendorGuid = GMIN_CFG_VAR_EFI_GUID;
+ ev->var.DataSize = *out_len;
+
+ ret = efivar_entry_get(ev, &ev->var.Attributes,
+ &ev->var.DataSize, ev->var.Data);
+ if (ret == 0) {
+ memcpy(out, ev->var.Data, ev->var.DataSize);
+ *out_len = ev->var.DataSize;
+ dev_info(maindev, "found EFI entry for '%s'\n", var8);
+ } else if (is_gmin) {
+ dev_warn(maindev, "Failed to find gmin variable %s\n", var8);
+ } else {
+ dev_warn(maindev, "Failed to find variable %s\n", var8);
+ }
+
+ kfree(ev);
+
+ return ret;
+}
+
+int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def)
+{
+ char val[CFG_VAR_NAME_MAX];
+ size_t len = sizeof(val);
+ long result;
+ int ret;
+
+ ret = gmin_get_config_var(dev, is_gmin, var, val, &len);
+ if (!ret) {
+ val[len] = 0;
+ ret = kstrtol(val, 0, &result);
+ }
+
+ return ret ? def : result;
+}
+EXPORT_SYMBOL_GPL(gmin_get_var_int);
+
+int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
+ u32 lanes, u32 format, u32 bayer_order, int flag)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *csi = NULL;
+
+ if (flag) {
+ csi = kzalloc(sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+ csi->port = port;
+ csi->num_lanes = lanes;
+ csi->input_format = format;
+ csi->raw_bayer_order = bayer_order;
+ v4l2_set_subdev_hostdata(sd, (void *)csi);
+ csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
+ csi->metadata_effective_width = NULL;
+ dev_info(&client->dev,
+ "camera pdata: port: %d lanes: %d order: %8.8x\n",
+ port, lanes, bayer_order);
+ } else {
+ csi = v4l2_get_subdev_hostdata(sd);
+ kfree(csi);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(camera_sensor_csi);
+
+/* PCI quirk: The BYT ISP advertises PCI runtime PM but it doesn't
+ * work. Disable so the kernel framework doesn't hang the device
+ * trying. The driver itself does direct calls to the PUNIT to manage
+ * ISP power.
+ */
+static void isp_pm_cap_fixup(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "Disabling PCI power management on camera ISP\n");
+ dev->pm_cap = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup);
+
+MODULE_DESCRIPTION("Ancillary routines for binding ACPI devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/pci/atomisp_helper.h b/drivers/staging/media/atomisp/pci/atomisp_helper.h
new file mode 100644
index 000000000000..56035063f81d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_helper.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef _atomisp_helper_h_
+#define _atomisp_helper_h_
+extern void __iomem *atomisp_io_base;
+
+static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address)
+{
+ void __iomem *ret = atomisp_io_base + (address & 0x003FFFFF);
+ return ret;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
new file mode 100644
index 000000000000..26539f3ffb9b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -0,0 +1,307 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef __ATOMISP_INTERNAL_H__
+#define __ATOMISP_INTERNAL_H__
+
+#include "../../include/linux/atomisp_platform.h"
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/pm_qos.h>
+#include <linux/idr.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-subdev.h>
+
+/* ISP2400*/
+#include "ia_css_types.h"
+#include "sh_css_legacy.h"
+
+#include "atomisp_csi2.h"
+#include "atomisp_file.h"
+#include "atomisp_subdev.h"
+#include "atomisp_tpg.h"
+#include "atomisp_compat.h"
+
+#include "gp_device.h"
+#include "irq.h"
+#include <linux/vmalloc.h>
+
+#define V4L2_EVENT_FRAME_END 5
+
+#define IS_HWREVISION(isp, rev) \
+ (((isp)->media_dev.hw_revision & ATOMISP_HW_REVISION_MASK) == \
+ ((rev) << ATOMISP_HW_REVISION_SHIFT))
+
+#define MAX_STREAM_NUM 2
+
+#define ATOMISP_PCI_DEVICE_SOC_MASK 0xfff8
+/* MRFLD with 0x1178: ISP freq can burst to 457MHz */
+#define ATOMISP_PCI_DEVICE_SOC_MRFLD 0x1178
+/* MRFLD with 0x1179: max ISP freq limited to 400MHz */
+#define ATOMISP_PCI_DEVICE_SOC_MRFLD_1179 0x1179
+/* MRFLD with 0x117a: max ISP freq is 400MHz and max freq at Vmin is 200MHz */
+#define ATOMISP_PCI_DEVICE_SOC_MRFLD_117A 0x117a
+#define ATOMISP_PCI_DEVICE_SOC_BYT 0x0f38
+#define ATOMISP_PCI_DEVICE_SOC_ANN 0x1478
+#define ATOMISP_PCI_DEVICE_SOC_CHT 0x22b8
+
+#define ATOMISP_PCI_REV_MRFLD_A0_MAX 0
+#define ATOMISP_PCI_REV_BYT_A0_MAX 4
+
+#define ATOM_ISP_STEP_WIDTH 2
+#define ATOM_ISP_STEP_HEIGHT 2
+
+#define ATOM_ISP_MIN_WIDTH 4
+#define ATOM_ISP_MIN_HEIGHT 4
+#define ATOM_ISP_MAX_WIDTH UINT_MAX
+#define ATOM_ISP_MAX_HEIGHT UINT_MAX
+
+/* sub-QCIF resolution */
+#define ATOM_RESOLUTION_SUBQCIF_WIDTH 128
+#define ATOM_RESOLUTION_SUBQCIF_HEIGHT 96
+
+#define ATOM_ISP_MAX_WIDTH_TMP 1280
+#define ATOM_ISP_MAX_HEIGHT_TMP 720
+
+#define ATOM_ISP_I2C_BUS_1 4
+#define ATOM_ISP_I2C_BUS_2 5
+
+#define ATOM_ISP_POWER_DOWN 0
+#define ATOM_ISP_POWER_UP 1
+
+#define ATOM_ISP_MAX_INPUTS 4
+
+#define ATOMISP_SC_TYPE_SIZE 2
+
+#define ATOMISP_ISP_TIMEOUT_DURATION (2 * HZ)
+#define ATOMISP_EXT_ISP_TIMEOUT_DURATION (6 * HZ)
+#define ATOMISP_ISP_FILE_TIMEOUT_DURATION (60 * HZ)
+#define ATOMISP_WDT_KEEP_CURRENT_DELAY 0
+#define ATOMISP_ISP_MAX_TIMEOUT_COUNT 2
+#define ATOMISP_CSS_STOP_TIMEOUT_US 200000
+
+#define ATOMISP_CSS_Q_DEPTH 3
+#define ATOMISP_CSS_EVENTS_MAX 16
+#define ATOMISP_CONT_RAW_FRAMES 15
+#define ATOMISP_METADATA_QUEUE_DEPTH_FOR_HAL 8
+#define ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL 8
+
+#define ATOMISP_DELAYED_INIT_NOT_QUEUED 0
+#define ATOMISP_DELAYED_INIT_QUEUED 1
+#define ATOMISP_DELAYED_INIT_DONE 2
+
+#define ATOMISP_CALC_CSS_PREV_OVERLAP(lines) \
+ ((lines) * 38 / 100 & 0xfffffe)
+
+/*
+ * Define how fast CPU should be able to serve ISP interrupts.
+ * The bigger the value, the higher risk that the ISP is not
+ * triggered sufficiently fast for it to process image during
+ * vertical blanking time, increasing risk of dropped frames.
+ * 1000 us is a reasonable value considering that the processing
+ * time is typically ~2000 us.
+ */
+#define ATOMISP_MAX_ISR_LATENCY 1000
+
+/* Add new YUVPP pipe for SOC sensor. */
+#define ATOMISP_CSS_SUPPORT_YUVPP 1
+
+#define ATOMISP_CSS_OUTPUT_SECOND_INDEX 1
+#define ATOMISP_CSS_OUTPUT_DEFAULT_INDEX 0
+
+/*
+ * ATOMISP_SOC_CAMERA
+ * This is to differentiate between ext-isp and soc camera in
+ * Moorefield/Baytrail platform.
+ */
+#define ATOMISP_SOC_CAMERA(asd) \
+ (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA \
+ && asd->isp->inputs[asd->input_curr].camera_caps-> \
+ sensor[asd->sensor_curr].stream_num == 1)
+
+#define ATOMISP_USE_YUVPP(asd) \
+ (ATOMISP_SOC_CAMERA(asd) && ATOMISP_CSS_SUPPORT_YUVPP && \
+ !asd->copy_mode)
+
+#define ATOMISP_DEPTH_SENSOR_STREAMON_COUNT 2
+
+#define ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR 0
+#define ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR 1
+
+/* ISP2401 */
+#define ATOMISP_ION_DEVICE_FD_OFFSET 16
+#define ATOMISP_ION_SHARED_FD_MASK (0xFFFF)
+#define ATOMISP_ION_DEVICE_FD_MASK (~ATOMISP_ION_SHARED_FD_MASK)
+#define ION_FD_UNSET (-1)
+
+#define DIV_NEAREST_STEP(n, d, step) \
+ round_down((2 * (n) + (d) * (step)) / (2 * (d)), (step))
+
+struct atomisp_input_subdev {
+ unsigned int type;
+ enum atomisp_camera_port port;
+ struct v4l2_subdev *camera;
+ struct v4l2_subdev *motor;
+ struct v4l2_frmsizeenum frame_size;
+
+ /*
+ * To show this resource is used by
+ * which stream, in ISP multiple stream mode
+ */
+ struct atomisp_sub_device *asd;
+
+ const struct atomisp_camera_caps *camera_caps;
+ int sensor_index;
+};
+
+enum atomisp_dfs_mode {
+ ATOMISP_DFS_MODE_AUTO = 0,
+ ATOMISP_DFS_MODE_LOW,
+ ATOMISP_DFS_MODE_MAX,
+};
+
+struct atomisp_regs {
+ /* PCI config space info */
+ u16 pcicmdsts;
+ u32 ispmmadr;
+ u32 msicap;
+ u32 msi_addr;
+ u16 msi_data;
+ u8 intr;
+ u32 interrupt_control;
+ u32 pmcs;
+ u32 cg_dis;
+ u32 i_control;
+
+ /* I-Unit PHY related info */
+ u32 csi_rcomp_config;
+ u32 csi_afe_dly;
+ u32 csi_control;
+
+ /* New for MRFLD */
+ u32 csi_afe_rcomp_config;
+ u32 csi_afe_hs_control;
+ u32 csi_deadline_control;
+ u32 csi_access_viol;
+};
+
+struct atomisp_sw_contex {
+ bool file_input;
+ int power_state;
+ int running_freq;
+};
+
+#define ATOMISP_DEVICE_STREAMING_DISABLED 0
+#define ATOMISP_DEVICE_STREAMING_ENABLED 1
+#define ATOMISP_DEVICE_STREAMING_STOPPING 2
+
+/*
+ * ci device struct
+ */
+struct atomisp_device {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct atomisp_platform_data *pdata;
+ void *mmu_l1_base;
+ const struct firmware *firmware;
+
+ struct pm_qos_request pm_qos;
+ s32 max_isr_latency;
+
+ /*
+ * ISP modules
+ * Multiple streams are represents by multiple
+ * atomisp_sub_device instances
+ */
+ struct atomisp_sub_device *asd;
+ /*
+ * this will be assigned dyanamically.
+ * For Merr/BTY(ISP2400), 2 streams are supported.
+ */
+ unsigned int num_of_streams;
+
+ struct atomisp_mipi_csi2_device csi2_port[ATOMISP_CAMERA_NR_PORTS];
+ struct atomisp_tpg_device tpg;
+ struct atomisp_file_device file_dev;
+
+ /* Purpose of mutex is to protect and serialize use of isp data
+ * structures and css API calls. */
+ struct rt_mutex mutex;
+ /*
+ * Serialise streamoff: mutex is dropped during streamoff to
+ * cancel the watchdog queue. MUST be acquired BEFORE
+ * "mutex".
+ */
+ struct mutex streamoff_mutex;
+
+ unsigned int input_cnt;
+ struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS];
+ struct v4l2_subdev *flash;
+ struct v4l2_subdev *motor;
+
+ struct atomisp_regs saved_regs;
+ struct atomisp_sw_contex sw_contex;
+ struct atomisp_css_env css_env;
+
+ /* isp timeout status flag */
+ bool isp_timeout;
+ bool isp_fatal_error;
+ struct workqueue_struct *wdt_work_queue;
+ struct work_struct wdt_work;
+
+ /* ISP2400 */
+ atomic_t wdt_count;
+
+ atomic_t wdt_work_queued;
+
+ spinlock_t lock; /* Just for streaming below */
+
+ bool need_gfx_throttle;
+
+ unsigned int mipi_frame_size;
+ const struct atomisp_dfs_config *dfs;
+ unsigned int hpll_freq;
+
+ bool css_initialized;
+};
+
+#define v4l2_dev_to_atomisp_device(dev) \
+ container_of(dev, struct atomisp_device, v4l2_dev)
+
+extern struct device *atomisp_dev;
+
+#define atomisp_is_wdt_running(a) timer_pending(&(a)->wdt)
+
+/* ISP2401 */
+void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe,
+ unsigned int delay);
+void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay);
+
+/* ISP2400 */
+void atomisp_wdt_start(struct atomisp_sub_device *asd);
+
+/* ISP2401 */
+void atomisp_wdt_start_pipe(struct atomisp_video_pipe *pipe);
+void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync);
+
+void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync);
+
+#endif /* __ATOMISP_INTERNAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
new file mode 100644
index 000000000000..a5e71e5b714e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -0,0 +1,3094 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf-vmalloc.h>
+
+#include "atomisp_acc.h"
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp-regs.h"
+#include "atomisp_compat.h"
+
+#include "sh_css_hrt.h"
+
+#include "gp_device.h"
+#include "device_access.h"
+#include "irq.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+static const char *DRIVER = "atomisp"; /* max size 15 */
+static const char *CARD = "ATOM ISP"; /* max size 31 */
+
+/*
+ * FIXME: ISP should not know beforehand all CIDs supported by sensor.
+ * Instead, it needs to propagate to sensor unkonwn CIDs.
+ */
+static struct v4l2_queryctrl ci_v4l2_controls[] = {
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Automatic White Balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x00,
+ },
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x00,
+ },
+ {
+ .id = V4L2_CID_GAMMA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gamma",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x00,
+ },
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 1,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_COLORFX,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Image Color Effect",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_COLORFX_CBCR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Image Color Effect CbCr",
+ .minimum = 0,
+ .maximum = 0xffff,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Bad Pixel Correction",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "GDC/CAC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_VIDEO_STABLIZATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Video Stablization",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_FIXED_PATTERN_NR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Fixed Pattern Noise Reduction",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "False Color Correction",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_REQUEST_FLASH,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Request flash frames",
+ .minimum = 0,
+ .maximum = 10,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_LOW_LIGHT,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Low light mode",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Horizontal binning factor",
+ .minimum = 0,
+ .maximum = 10,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Vertical binning factor",
+ .minimum = 0,
+ .maximum = 10,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_2A_STATUS,
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .name = "AE and AWB status",
+ .minimum = 0,
+ .maximum = V4L2_2A_STATUS_AE_READY | V4L2_2A_STATUS_AWB_READY,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .minimum = -4,
+ .maximum = 4,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE_ZONE_NUM,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "one-time exposure zone number",
+ .minimum = 0x0,
+ .maximum = 0xffff,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure auto priority",
+ .minimum = V4L2_EXPOSURE_AUTO,
+ .maximum = V4L2_EXPOSURE_APERTURE_PRIORITY,
+ .step = 1,
+ .default_value = V4L2_EXPOSURE_AUTO,
+ },
+ {
+ .id = V4L2_CID_SCENE_MODE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "scene mode",
+ .minimum = 0,
+ .maximum = 13,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ISO_SENSITIVITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "iso",
+ .minimum = -4,
+ .maximum = 4,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ISO_SENSITIVITY_AUTO,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "iso mode",
+ .minimum = V4L2_ISO_SENSITIVITY_MANUAL,
+ .maximum = V4L2_ISO_SENSITIVITY_AUTO,
+ .step = 1,
+ .default_value = V4L2_ISO_SENSITIVITY_AUTO,
+ },
+ {
+ .id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "white balance",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE_METERING,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "metering",
+ .minimum = 0,
+ .maximum = 3,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_3A_LOCK,
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .name = "3a lock",
+ .minimum = 0,
+ .maximum = V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE
+ | V4L2_LOCK_FOCUS,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern",
+ .minimum = 0,
+ .maximum = 0xffff,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN_COLOR_R,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern Solid Color R",
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN_COLOR_GR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern Solid Color GR",
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN_COLOR_GB,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern Solid Color GB",
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN_COLOR_B,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern Solid Color B",
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+};
+
+static const u32 ctrls_num = ARRAY_SIZE(ci_v4l2_controls);
+
+/*
+ * supported V4L2 fmts and resolutions
+ */
+const struct atomisp_format_bridge atomisp_output_fmts[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV420,
+ .sh_fmt = CSS_FRAME_FORMAT_YUV420,
+ .description = "YUV420, planar",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YVU420,
+ .sh_fmt = CSS_FRAME_FORMAT_YV12,
+ .description = "YVU420, planar",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV422P,
+ .sh_fmt = CSS_FRAME_FORMAT_YUV422,
+ .description = "YUV422, planar",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV444,
+ .depth = 24,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV444,
+ .sh_fmt = CSS_FRAME_FORMAT_YUV444,
+ .description = "YUV444"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV12,
+ .sh_fmt = CSS_FRAME_FORMAT_NV12,
+ .description = "NV12, Y-plane, CbCr interleaved",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV21,
+ .sh_fmt = CSS_FRAME_FORMAT_NV21,
+ .description = "NV21, Y-plane, CbCr interleaved",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV16,
+ .sh_fmt = CSS_FRAME_FORMAT_NV16,
+ .description = "NV16, Y-plane, CbCr interleaved",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUYV,
+ .sh_fmt = CSS_FRAME_FORMAT_YUYV,
+ .description = "YUYV, interleaved"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .sh_fmt = CSS_FRAME_FORMAT_UYVY,
+ .description = "UYVY, interleaved"
+ }, { /* This one is for parallel sensors! DO NOT USE! */
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .sh_fmt = CSS_FRAME_FORMAT_UYVY,
+ .description = "UYVY, interleaved"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR16,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_SBGGR16,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 16"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGBRG8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGRBG8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SRGGB8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGBRG12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGRBG12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SRGGB12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_RGB32,
+ .sh_fmt = CSS_FRAME_FORMAT_RGBA888,
+ .description = "32 RGB 8-8-8-8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_BGR565_2X8_LE,
+ .sh_fmt = CSS_FRAME_FORMAT_RGB565,
+ .description = "16 RGB 5-6-5"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_JPEG,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_BINARY_8,
+ .description = "JPEG"
+ },
+#if 0
+ {
+ /* This is a custom format being used by M10MO to send the RAW data */
+ .pixelformat = V4L2_PIX_FMT_CUSTOM_M10MO_RAW,
+ .depth = 8,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_M10MO_RAW,
+ .sh_fmt = CSS_FRAME_FORMAT_BINARY_8,
+ .description = "Custom RAW for M10MO"
+ },
+#endif
+};
+
+const struct atomisp_format_bridge *atomisp_get_format_bridge(
+ unsigned int pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
+ if (atomisp_output_fmts[i].pixelformat == pixelformat)
+ return &atomisp_output_fmts[i];
+ }
+
+ return NULL;
+}
+
+const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(
+ u32 mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
+ if (mbus_code == atomisp_output_fmts[i].mbus_code)
+ return &atomisp_output_fmts[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * v4l2 ioctls
+ * return ISP capabilities
+ */
+static int atomisp_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ strscpy(cap->driver, DRIVER, sizeof(cap->driver) - 1);
+ strscpy(cap->card, CARD, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
+ pci_name(isp->pdev));
+
+ return 0;
+}
+
+/*
+ * enum input are used to check primary/secondary camera
+ */
+static int atomisp_enum_input(struct file *file, void *fh,
+ struct v4l2_input *input)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int index = input->index;
+ struct v4l2_subdev *motor;
+
+ if (index >= isp->input_cnt)
+ return -EINVAL;
+
+ if (!isp->inputs[index].camera)
+ return -EINVAL;
+
+ memset(input, 0, sizeof(struct v4l2_input));
+ strncpy(input->name, isp->inputs[index].camera->name,
+ sizeof(input->name) - 1);
+
+ /*
+ * HACK: append actuator's name to sensor's
+ * As currently userspace can't talk directly to subdev nodes, this
+ * ioctl is the only way to enum inputs + possible external actuators
+ * for 3A tuning purpose.
+ */
+ if (!atomisp_hw_is_isp2401)
+ motor = isp->inputs[index].motor;
+ else
+ motor = isp->motor;
+
+ if (motor && strlen(motor->name) > 0) {
+ const int cur_len = strlen(input->name);
+ const int max_size = sizeof(input->name) - cur_len - 1;
+
+ if (max_size > 1) {
+ input->name[cur_len] = '+';
+ strncpy(&input->name[cur_len + 1],
+ motor->name, max_size - 1);
+ }
+ }
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->index = index;
+ input->reserved[0] = isp->inputs[index].type;
+ input->reserved[1] = isp->inputs[index].port;
+
+ return 0;
+}
+
+static unsigned int atomisp_subdev_streaming_count(
+ struct atomisp_sub_device *asd)
+{
+ return asd->video_out_preview.capq.streaming
+ + asd->video_out_capture.capq.streaming
+ + asd->video_out_video_capture.capq.streaming
+ + asd->video_out_vf.capq.streaming
+ + asd->video_in.capq.streaming;
+}
+
+unsigned int atomisp_streaming_count(struct atomisp_device *isp)
+{
+ unsigned int i, sum;
+
+ for (i = 0, sum = 0; i < isp->num_of_streams; i++)
+ sum += isp->asd[i].streaming ==
+ ATOMISP_DEVICE_STREAMING_ENABLED;
+
+ return sum;
+}
+
+unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp)
+{
+ unsigned int i;
+
+ for (i = 0; i < isp->num_of_streams; i++)
+ if (isp->asd[i].acc.pipeline)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * get input are used to get current primary/secondary camera
+ */
+static int atomisp_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+
+ rt_mutex_lock(&isp->mutex);
+ *input = asd->input_curr;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+}
+
+/*
+ * set input are used to set current primary/secondary camera
+ */
+static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_subdev *camera = NULL;
+ struct v4l2_subdev *motor;
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) {
+ dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * check whether the request camera:
+ * 1: already in use
+ * 2: if in use, whether it is used by other streams
+ */
+ if (isp->inputs[input].asd && isp->inputs[input].asd != asd) {
+ dev_err(isp->dev,
+ "%s, camera is already used by stream: %d\n", __func__,
+ isp->inputs[input].asd->index);
+ ret = -EBUSY;
+ goto error;
+ }
+
+ camera = isp->inputs[input].camera;
+ if (!camera) {
+ dev_err(isp->dev, "%s, no camera\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (atomisp_subdev_streaming_count(asd)) {
+ dev_err(isp->dev,
+ "ISP is still streaming, stop first\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* power off the current owned sensor, as it is not used this time */
+ if (isp->inputs[asd->input_curr].asd == asd &&
+ asd->input_curr != input) {
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, s_power, 0);
+ if (ret)
+ dev_warn(isp->dev,
+ "Failed to power-off sensor\n");
+ /* clear the asd field to show this camera is not used */
+ isp->inputs[asd->input_curr].asd = NULL;
+ }
+
+ /* powe on the new sensor */
+ ret = v4l2_subdev_call(isp->inputs[input].camera, core, s_power, 1);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power-on sensor\n");
+ goto error;
+ }
+ /*
+ * Some sensor driver resets the run mode during power-on, thus force
+ * update the run mode to sensor after power-on.
+ */
+ atomisp_update_run_mode(asd);
+
+ /* select operating sensor */
+ ret = v4l2_subdev_call(isp->inputs[input].camera, video, s_routing,
+ 0, isp->inputs[input].sensor_index, 0);
+ if (ret && (ret != -ENOIOCTLCMD)) {
+ dev_err(isp->dev, "Failed to select sensor\n");
+ goto error;
+ }
+
+ if (!atomisp_hw_is_isp2401) {
+ motor = isp->inputs[input].motor;
+ } else {
+ motor = isp->motor;
+ if (motor)
+ ret = v4l2_subdev_call(motor, core, s_power, 1);
+ }
+
+ if (!isp->sw_contex.file_input && motor)
+ ret = v4l2_subdev_call(motor, core, init, 1);
+
+ asd->input_curr = input;
+ /* mark this camera is used by the current stream */
+ isp->inputs[input].asd = asd;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+
+error:
+ rt_mutex_unlock(&isp->mutex);
+
+ return ret;
+}
+
+static int atomisp_enum_fmt_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_subdev_mbus_code_enum code = { 0 };
+ unsigned int i, fi = 0;
+ int rval;
+
+ rt_mutex_lock(&isp->mutex);
+ rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad,
+ enum_mbus_code, NULL, &code);
+ if (rval == -ENOIOCTLCMD) {
+ dev_warn(isp->dev,
+ "enum_mbus_code pad op not supported. Please fix your sensor driver!\n");
+ // rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ // video, enum_mbus_fmt, 0, &code.code);
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ if (rval)
+ return rval;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
+ const struct atomisp_format_bridge *format =
+ &atomisp_output_fmts[i];
+
+ /*
+ * Is the atomisp-supported format is valid for the
+ * sensor (configuration)? If not, skip it.
+ */
+ if (format->sh_fmt == CSS_FRAME_FORMAT_RAW
+ && format->mbus_code != code.code)
+ continue;
+
+ /* Found a match. Now let's pick f->index'th one. */
+ if (fi < f->index) {
+ fi++;
+ continue;
+ }
+
+ strlcpy(f->description, format->description,
+ sizeof(f->description));
+ f->pixelformat = format->pixelformat;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int atomisp_g_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_get_fmt(vdev, f);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_g_fmt_file(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ rt_mutex_lock(&isp->mutex);
+ f->fmt.pix = pipe->pix;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+}
+
+/* This function looks up the closest available resolution. */
+static int atomisp_try_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_try_fmt(vdev, f, NULL);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_s_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ if (isp->isp_fatal_error) {
+ ret = -EIO;
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+ }
+ ret = atomisp_set_fmt(vdev, f);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_s_fmt_file(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_set_fmt_file(vdev, f);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+/*
+ * Free videobuffer buffer priv data
+ */
+void atomisp_videobuf_free_buf(struct videobuf_buffer *vb)
+{
+ struct videobuf_vmalloc_memory *vm_mem;
+
+ if (!vb)
+ return;
+
+ vm_mem = vb->priv;
+ if (vm_mem && vm_mem->vaddr) {
+ atomisp_css_frame_free(vm_mem->vaddr);
+ vm_mem->vaddr = NULL;
+ }
+}
+
+/*
+ * this function is used to free video buffer queue
+ */
+static void atomisp_videobuf_free_queue(struct videobuf_queue *q)
+{
+ int i;
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ atomisp_videobuf_free_buf(q->bufs[i]);
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+}
+
+int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
+ uint16_t stream_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf;
+ struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf;
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf;
+ int count;
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ unsigned int i;
+
+ if (list_empty(&asd->s3a_stats) &&
+ asd->params.curr_grid_info.s3a_grid.enable) {
+ count = ATOMISP_CSS_Q_DEPTH +
+ ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL;
+ dev_dbg(isp->dev, "allocating %d 3a buffers\n", count);
+ while (count--) {
+ s3a_buf = kzalloc(sizeof(struct atomisp_s3a_buf), GFP_KERNEL);
+ if (!s3a_buf)
+ goto error;
+
+ if (atomisp_css_allocate_stat_buffers(
+ asd, stream_id, s3a_buf, NULL, NULL)) {
+ kfree(s3a_buf);
+ goto error;
+ }
+
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ }
+ }
+
+ if (list_empty(&asd->dis_stats) && dvs_grid_info &&
+ dvs_grid_info->enable) {
+ count = ATOMISP_CSS_Q_DEPTH + 1;
+ dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
+ while (count--) {
+ dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
+ if (!dis_buf) {
+ kfree(s3a_buf);
+ goto error;
+ }
+ if (atomisp_css_allocate_stat_buffers(
+ asd, stream_id, NULL, dis_buf, NULL)) {
+ kfree(dis_buf);
+ goto error;
+ }
+
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ }
+ }
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ if (list_empty(&asd->metadata[i]) &&
+ list_empty(&asd->metadata_ready[i]) &&
+ list_empty(&asd->metadata_in_css[i])) {
+ count = ATOMISP_CSS_Q_DEPTH +
+ ATOMISP_METADATA_QUEUE_DEPTH_FOR_HAL;
+ dev_dbg(isp->dev, "allocating %d metadata buffers for type %d\n",
+ count, i);
+ while (count--) {
+ md_buf = kzalloc(sizeof(struct atomisp_metadata_buf),
+ GFP_KERNEL);
+ if (!md_buf)
+ goto error;
+
+ if (atomisp_css_allocate_stat_buffers(
+ asd, stream_id, NULL, NULL, md_buf)) {
+ kfree(md_buf);
+ goto error;
+ }
+ list_add_tail(&md_buf->list, &asd->metadata[i]);
+ }
+ }
+ }
+ return 0;
+
+error:
+ dev_err(isp->dev, "failed to allocate statistics buffers\n");
+
+ list_for_each_entry_safe(dis_buf, _dis_buf, &asd->dis_stats, list) {
+ atomisp_css_free_dis_buffer(dis_buf);
+ list_del(&dis_buf->list);
+ kfree(dis_buf);
+ }
+
+ list_for_each_entry_safe(s3a_buf, _s3a_buf, &asd->s3a_stats, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i],
+ list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ }
+ return -ENOMEM;
+}
+
+/*
+ * Initiate Memory Mapping or User Pointer I/O
+ */
+int __atomisp_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_css_frame_info frame_info;
+ struct atomisp_css_frame *frame;
+ struct videobuf_vmalloc_memory *vm_mem;
+ u16 source_pad = atomisp_subdev_source_pad(vdev);
+ u16 stream_id = atomisp_source_pad_to_stream_id(asd, source_pad);
+ int ret = 0, i = 0;
+
+ if (req->count == 0) {
+ mutex_lock(&pipe->capq.vb_lock);
+ if (!list_empty(&pipe->capq.stream))
+ videobuf_queue_cancel(&pipe->capq);
+
+ atomisp_videobuf_free_queue(&pipe->capq);
+ mutex_unlock(&pipe->capq.vb_lock);
+ /* clear request config id */
+ memset(pipe->frame_request_config_id, 0,
+ VIDEO_MAX_FRAME * sizeof(unsigned int));
+ memset(pipe->frame_params, 0,
+ VIDEO_MAX_FRAME *
+ sizeof(struct atomisp_css_params_with_list *));
+ return 0;
+ }
+
+ ret = videobuf_reqbufs(&pipe->capq, req);
+ if (ret)
+ return ret;
+
+ atomisp_alloc_css_stat_bufs(asd, stream_id);
+
+ /*
+ * for user pointer type, buffers are not really allcated here,
+ * buffers are setup in QBUF operation through v4l2_buffer structure
+ */
+ if (req->memory == V4L2_MEMORY_USERPTR)
+ return 0;
+
+ ret = atomisp_get_css_frame_info(asd, source_pad, &frame_info);
+ if (ret)
+ return ret;
+
+ /*
+ * Allocate the real frame here for selected node using our
+ * memory management function
+ */
+ for (i = 0; i < req->count; i++) {
+ if (atomisp_css_frame_allocate_from_info(&frame, &frame_info))
+ goto error;
+ vm_mem = pipe->capq.bufs[i]->priv;
+ vm_mem->vaddr = frame;
+ }
+
+ return ret;
+
+error:
+ while (i--) {
+ vm_mem = pipe->capq.bufs[i]->priv;
+ atomisp_css_frame_free(vm_mem->vaddr);
+ }
+
+ if (asd->vf_frame)
+ atomisp_css_frame_free(asd->vf_frame);
+
+ return -ENOMEM;
+}
+
+int atomisp_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ ret = __atomisp_reqbufs(file, fh, req);
+ rt_mutex_unlock(&isp->mutex);
+
+ return ret;
+}
+
+static int atomisp_reqbufs_file(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ if (req->count == 0) {
+ mutex_lock(&pipe->outq.vb_lock);
+ atomisp_videobuf_free_queue(&pipe->outq);
+ mutex_unlock(&pipe->outq.vb_lock);
+ return 0;
+ }
+
+ return videobuf_reqbufs(&pipe->outq, req);
+}
+
+/* application query the status of a buffer */
+static int atomisp_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ return videobuf_querybuf(&pipe->capq, buf);
+}
+
+static int atomisp_querybuf_file(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ return videobuf_querybuf(&pipe->outq, buf);
+}
+
+/*
+ * Applications call the VIDIOC_QBUF ioctl to enqueue an empty (capturing) or
+ * filled (output) buffer in the drivers incoming queue.
+ */
+static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ static const int NOFLUSH_FLAGS = V4L2_BUF_FLAG_NO_CACHE_INVALIDATE |
+ V4L2_BUF_FLAG_NO_CACHE_CLEAN;
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct videobuf_buffer *vb;
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct atomisp_css_frame_info frame_info;
+ struct atomisp_css_frame *handle = NULL;
+ u32 length;
+ u32 pgnr;
+ int ret = 0;
+
+ rt_mutex_lock(&isp->mutex);
+ if (isp->isp_fatal_error) {
+ ret = -EIO;
+ goto error;
+ }
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) {
+ dev_err(isp->dev, "%s: reject, as ISP at stopping.\n",
+ __func__);
+ ret = -EIO;
+ goto error;
+ }
+
+ if (!buf || buf->index >= VIDEO_MAX_FRAME ||
+ !pipe->capq.bufs[buf->index]) {
+ dev_err(isp->dev, "Invalid index for qbuf.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * For userptr type frame, we convert user space address to physic
+ * address and reprograme out page table properly
+ */
+ if (buf->memory == V4L2_MEMORY_USERPTR) {
+ struct hrt_userbuffer_attr attributes;
+
+ vb = pipe->capq.bufs[buf->index];
+ vm_mem = vb->priv;
+ if (!vm_mem) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ length = vb->bsize;
+ pgnr = (length + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ if (vb->baddr == buf->m.userptr && vm_mem->vaddr)
+ goto done;
+
+ if (atomisp_get_css_frame_info(asd,
+ atomisp_subdev_source_pad(vdev), &frame_info)) {
+ ret = -EIO;
+ goto error;
+ }
+
+ attributes.pgnr = pgnr;
+ attributes.type = HRT_USR_PTR;
+#ifdef CONFIG_ION
+ if (!atomisp_hw_is_isp2401) {
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_ION)
+ attributes.type = HRT_USR_ION;
+ } else {
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_ION) {
+ attributes.type = HRT_USR_ION;
+ if (asd->ion_dev_fd->val != ION_FD_UNSET) {
+ dev_dbg(isp->dev, "ION buffer queued, share_fd=%lddev_fd=%d.\n",
+ buf->m.userptr, asd->ion_dev_fd->val);
+ /*
+ * Make sure the shared fd we just got
+ * from user space isn't larger than
+ * the space we have for it.
+ */
+ if ((buf->m.userptr &
+ (ATOMISP_ION_DEVICE_FD_MASK)) != 0) {
+ dev_err(isp->dev,
+ "Error: v4l2 buffer fd:0X%0lX > 0XFFFF.\n",
+ buf->m.userptr);
+ ret = -EINVAL;
+ goto error;
+ }
+ buf->m.userptr |= asd->ion_dev_fd->val <<
+ ATOMISP_ION_DEVICE_FD_OFFSET;
+ } else {
+ dev_err(isp->dev, "v4l2 buffer type is ION, \
+ but no dev fd set from userspace.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ }
+#endif
+ ret = atomisp_css_frame_map(&handle, &frame_info,
+ (void __user *)buf->m.userptr,
+ 0, &attributes);
+ if (ret) {
+ dev_err(isp->dev, "Failed to map user buffer\n");
+ goto error;
+ }
+
+ if (vm_mem->vaddr) {
+ mutex_lock(&pipe->capq.vb_lock);
+ atomisp_css_frame_free(vm_mem->vaddr);
+ vm_mem->vaddr = NULL;
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ mutex_unlock(&pipe->capq.vb_lock);
+ }
+
+ vm_mem->vaddr = handle;
+
+ buf->flags &= ~V4L2_BUF_FLAG_MAPPED;
+ buf->flags |= V4L2_BUF_FLAG_QUEUED;
+ buf->flags &= ~V4L2_BUF_FLAG_DONE;
+ } else if (buf->memory == V4L2_MEMORY_MMAP) {
+ buf->flags |= V4L2_BUF_FLAG_MAPPED;
+ buf->flags |= V4L2_BUF_FLAG_QUEUED;
+ buf->flags &= ~V4L2_BUF_FLAG_DONE;
+ }
+
+done:
+ if (!((buf->flags & NOFLUSH_FLAGS) == NOFLUSH_FLAGS))
+ wbinvd();
+
+ if (!atomisp_is_vf_pipe(pipe) &&
+ (buf->reserved2 & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING)) {
+ /* this buffer will have a per-frame parameter */
+ pipe->frame_request_config_id[buf->index] = buf->reserved2 &
+ ~ATOMISP_BUFFER_HAS_PER_FRAME_SETTING;
+ dev_dbg(isp->dev,
+ "This buffer requires per_frame setting which has isp_config_id %d\n",
+ pipe->frame_request_config_id[buf->index]);
+ } else {
+ pipe->frame_request_config_id[buf->index] = 0;
+ }
+
+ pipe->frame_params[buf->index] = NULL;
+
+ rt_mutex_unlock(&isp->mutex);
+
+ ret = videobuf_qbuf(&pipe->capq, buf);
+ rt_mutex_lock(&isp->mutex);
+ if (ret)
+ goto error;
+
+ /* TODO: do this better, not best way to queue to css */
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
+ if (!list_empty(&pipe->buffers_waiting_for_param)) {
+ atomisp_handle_parameter_and_buffer(pipe);
+ } else {
+ atomisp_qbuffers_to_css(asd);
+
+ if (!atomisp_hw_is_isp2401) {
+ if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd))
+ atomisp_wdt_start(asd);
+ } else {
+ if (!atomisp_is_wdt_running(pipe) &&
+ atomisp_buffers_queued_pipe(pipe))
+ atomisp_wdt_start_pipe(pipe);
+ }
+ }
+ }
+
+ /* Workaround: Due to the design of HALv3,
+ * sometimes in ZSL or SDV mode HAL needs to
+ * capture multiple images within one streaming cycle.
+ * But the capture number cannot be determined by HAL.
+ * So HAL only sets the capture number to be 1 and queue multiple
+ * buffers. Atomisp driver needs to check this case and re-trigger
+ * CSS to do capture when new buffer is queued. */
+ if (asd->continuous_mode->val &&
+ atomisp_subdev_source_pad(vdev)
+ == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE &&
+ pipe->capq.streaming &&
+ !asd->enable_raw_buffer_lock->val &&
+ asd->params.offline_parm.num_captures == 1) {
+ if (!atomisp_hw_is_isp2401) {
+ asd->pending_capture_request++;
+ dev_dbg(isp->dev, "Add one pending capture request.\n");
+ } else {
+ if (asd->re_trigger_capture) {
+ ret = atomisp_css_offline_capture_configure(asd,
+ asd->params.offline_parm.num_captures,
+ asd->params.offline_parm.skip_frames,
+ asd->params.offline_parm.offset);
+ asd->re_trigger_capture = false;
+ dev_dbg(isp->dev, "%s Trigger capture again ret=%d\n",
+ __func__, ret);
+
+ } else {
+ asd->pending_capture_request++;
+ asd->re_trigger_capture = false;
+ dev_dbg(isp->dev, "Add one pending capture request.\n");
+ }
+ }
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ dev_dbg(isp->dev, "qbuf buffer %d (%s) for asd%d\n", buf->index,
+ vdev->name, asd->index);
+
+ return ret;
+
+error:
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_qbuf_file(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ if (isp->isp_fatal_error) {
+ ret = -EIO;
+ goto error;
+ }
+
+ if (!buf || buf->index >= VIDEO_MAX_FRAME ||
+ !pipe->outq.bufs[buf->index]) {
+ dev_err(isp->dev, "Invalid index for qbuf.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (buf->memory != V4L2_MEMORY_MMAP) {
+ dev_err(isp->dev, "Unsupported memory method\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ dev_err(isp->dev, "Unsupported buffer type\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ return videobuf_qbuf(&pipe->outq, buf);
+
+error:
+ rt_mutex_unlock(&isp->mutex);
+
+ return ret;
+}
+
+static int __get_frame_exp_id(struct atomisp_video_pipe *pipe,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct atomisp_css_frame *handle;
+ int i;
+
+ for (i = 0; pipe->capq.bufs[i]; i++) {
+ vm_mem = pipe->capq.bufs[i]->priv;
+ handle = vm_mem->vaddr;
+ if (buf->index == pipe->capq.bufs[i]->i && handle)
+ return handle->exp_id;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Applications call the VIDIOC_DQBUF ioctl to dequeue a filled (capturing) or
+ * displayed (output buffer)from the driver's outgoing queue
+ */
+static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret = 0;
+
+ rt_mutex_lock(&isp->mutex);
+
+ if (isp->isp_fatal_error) {
+ rt_mutex_unlock(&isp->mutex);
+ return -EIO;
+ }
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) {
+ rt_mutex_unlock(&isp->mutex);
+ dev_err(isp->dev, "%s: reject, as ISP at stopping.\n",
+ __func__);
+ return -EIO;
+ }
+
+ rt_mutex_unlock(&isp->mutex);
+
+ ret = videobuf_dqbuf(&pipe->capq, buf, file->f_flags & O_NONBLOCK);
+ if (ret) {
+ dev_dbg(isp->dev, "<%s: %d\n", __func__, ret);
+ return ret;
+ }
+ rt_mutex_lock(&isp->mutex);
+ buf->bytesused = pipe->pix.sizeimage;
+ buf->reserved = asd->frame_status[buf->index];
+
+ /*
+ * Hack:
+ * Currently frame_status in the enum type which takes no more lower
+ * 8 bit.
+ * use bit[31:16] for exp_id as it is only in the range of 1~255
+ */
+ buf->reserved &= 0x0000ffff;
+ if (!(buf->flags & V4L2_BUF_FLAG_ERROR))
+ buf->reserved |= __get_frame_exp_id(pipe, buf) << 16;
+ buf->reserved2 = pipe->frame_config_id[buf->index];
+ rt_mutex_unlock(&isp->mutex);
+
+ dev_dbg(isp->dev,
+ "dqbuf buffer %d (%s) for asd%d with exp_id %d, isp_config_id %d\n",
+ buf->index, vdev->name, asd->index, buf->reserved >> 16,
+ buf->reserved2);
+ return 0;
+}
+
+enum atomisp_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd)
+{
+ if (ATOMISP_USE_YUVPP(asd))
+ return CSS_PIPE_ID_YUVPP;
+
+ if (asd->continuous_mode->val) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ return CSS_PIPE_ID_VIDEO;
+ else
+ return CSS_PIPE_ID_PREVIEW;
+ }
+
+ /*
+ * Disable vf_pp and run CSS in video mode. This allows using ISP
+ * scaling but it has one frame delay due to CSS internal buffering.
+ */
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER)
+ return CSS_PIPE_ID_VIDEO;
+
+ /*
+ * Disable vf_pp and run CSS in still capture mode. In this mode
+ * CSS does not cause extra latency with buffering, but scaling
+ * is not available.
+ */
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT)
+ return CSS_PIPE_ID_CAPTURE;
+
+ switch (asd->run_mode->val) {
+ case ATOMISP_RUN_MODE_PREVIEW:
+ return CSS_PIPE_ID_PREVIEW;
+ case ATOMISP_RUN_MODE_VIDEO:
+ return CSS_PIPE_ID_VIDEO;
+ case ATOMISP_RUN_MODE_STILL_CAPTURE:
+ /* fall through */
+ default:
+ return CSS_PIPE_ID_CAPTURE;
+ }
+}
+
+static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1) {
+ if (asd->high_speed_mode)
+ return 1;
+ else
+ return 2;
+ }
+
+ if (asd->vfpp->val != ATOMISP_VFPP_ENABLE ||
+ asd->copy_mode)
+ return 1;
+
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
+ (asd->run_mode->val == ATOMISP_RUN_MODE_STILL_CAPTURE &&
+ !atomisp_is_mbuscode_raw(
+ asd->fmt[
+ asd->capture_pad].fmt.code) &&
+ !asd->continuous_mode->val))
+ return 2;
+ else
+ return 1;
+}
+
+int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp,
+ bool isp_timeout)
+{
+ unsigned int master = -1, slave = -1, delay_slave = 0;
+ int i, ret;
+
+ /*
+ * ISP only support 2 streams now so ignore multiple master/slave
+ * case to reduce the delay between 2 stream_on calls.
+ */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ int sensor_index = isp->asd[i].input_curr;
+
+ if (isp->inputs[sensor_index].camera_caps->
+ sensor[isp->asd[i].sensor_curr].is_slave)
+ slave = sensor_index;
+ else
+ master = sensor_index;
+ }
+
+ if (master == -1 || slave == -1) {
+ master = ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR;
+ slave = ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR;
+ dev_warn(isp->dev,
+ "depth mode use default master=%s.slave=%s.\n",
+ isp->inputs[master].camera->name,
+ isp->inputs[slave].camera->name);
+ }
+
+ ret = v4l2_subdev_call(isp->inputs[master].camera, core,
+ ioctl, ATOMISP_IOC_G_DEPTH_SYNC_COMP,
+ &delay_slave);
+ if (ret)
+ dev_warn(isp->dev,
+ "get depth sensor %s compensation delay failed.\n",
+ isp->inputs[master].camera->name);
+
+ ret = v4l2_subdev_call(isp->inputs[master].camera,
+ video, s_stream, 1);
+ if (ret) {
+ dev_err(isp->dev, "depth mode master sensor %s stream-on failed.\n",
+ isp->inputs[master].camera->name);
+ return -EINVAL;
+ }
+
+ if (delay_slave != 0)
+ udelay(delay_slave);
+
+ ret = v4l2_subdev_call(isp->inputs[slave].camera,
+ video, s_stream, 1);
+ if (ret) {
+ dev_err(isp->dev, "depth mode slave sensor %s stream-on failed.\n",
+ isp->inputs[slave].camera->name);
+ v4l2_subdev_call(isp->inputs[master].camera, video, s_stream, 0);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* FIXME! ISP2400 */
+static void __wdt_on_master_slave_sensor(struct atomisp_device *isp,
+ unsigned int wdt_duration)
+{
+ if (atomisp_buffers_queued(&isp->asd[0]))
+ atomisp_wdt_refresh(&isp->asd[0], wdt_duration);
+ if (atomisp_buffers_queued(&isp->asd[1]))
+ atomisp_wdt_refresh(&isp->asd[1], wdt_duration);
+}
+
+/* FIXME! ISP2401 */
+static void __wdt_on_master_slave_sensor_pipe(struct atomisp_video_pipe *pipe,
+ unsigned int wdt_duration,
+ bool enable)
+{
+ static struct atomisp_video_pipe *pipe0;
+
+ if (enable) {
+ if (atomisp_buffers_queued_pipe(pipe0))
+ atomisp_wdt_refresh_pipe(pipe0, wdt_duration);
+ if (atomisp_buffers_queued_pipe(pipe))
+ atomisp_wdt_refresh_pipe(pipe, wdt_duration);
+ } else {
+ pipe0 = pipe;
+ }
+}
+
+static void atomisp_pause_buffer_event(struct atomisp_device *isp)
+{
+ struct v4l2_event event = {0};
+ int i;
+
+ event.type = V4L2_EVENT_ATOMISP_PAUSE_BUFFER;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ int sensor_index = isp->asd[i].input_curr;
+
+ if (isp->inputs[sensor_index].camera_caps->
+ sensor[isp->asd[i].sensor_curr].is_slave) {
+ v4l2_event_queue(isp->asd[i].subdev.devnode, &event);
+ break;
+ }
+ }
+}
+
+/* Input system HW workaround */
+/* Input system address translation corrupts burst during */
+/* invalidate. SW workaround for this is to set burst length */
+/* manually to 128 in case of 13MPx snapshot and to 1 otherwise. */
+static void atomisp_dma_burst_len_cfg(struct atomisp_sub_device *asd)
+{
+ struct v4l2_mbus_framefmt *sink;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ if (sink->width * sink->height >= 4096 * 3072)
+ atomisp_store_uint32(DMA_BURST_SIZE_REG, 0x7F);
+ else
+ atomisp_store_uint32(DMA_BURST_SIZE_REG, 0x00);
+}
+
+/*
+ * This ioctl start the capture during streaming I/O.
+ */
+static int atomisp_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ enum atomisp_css_pipe_id css_pipe_id;
+ unsigned int sensor_start_stream;
+ unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION;
+ int ret = 0;
+ unsigned long irqflags;
+
+ dev_dbg(isp->dev, "Start stream on pad %d for asd%d\n",
+ atomisp_subdev_source_pad(vdev), asd->index);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_dbg(isp->dev, "unsupported v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ rt_mutex_lock(&isp->mutex);
+ if (isp->isp_fatal_error) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (pipe->capq.streaming)
+ goto out;
+
+ /* Input system HW workaround */
+ atomisp_dma_burst_len_cfg(asd);
+
+ /*
+ * The number of streaming video nodes is based on which
+ * binary is going to be run.
+ */
+ sensor_start_stream = atomisp_sensor_start_stream(asd);
+
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ if (list_empty(&pipe->capq.stream)) {
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ dev_dbg(isp->dev, "no buffer in the queue\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+
+ ret = videobuf_streamon(&pipe->capq);
+ if (ret)
+ goto out;
+
+ /* Reset pending capture request count. */
+ asd->pending_capture_request = 0;
+ if (atomisp_hw_is_isp2401)
+ asd->re_trigger_capture = false;
+
+ if ((atomisp_subdev_streaming_count(asd) > sensor_start_stream) &&
+ (!isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl)) {
+ /* trigger still capture */
+ if (asd->continuous_mode->val &&
+ atomisp_subdev_source_pad(vdev)
+ == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ dev_dbg(isp->dev, "SDV last video raw buffer id: %u\n",
+ asd->latest_preview_exp_id);
+ else
+ dev_dbg(isp->dev, "ZSL last preview raw buffer id: %u\n",
+ asd->latest_preview_exp_id);
+
+ if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) {
+ flush_work(&asd->delayed_init_work);
+ rt_mutex_unlock(&isp->mutex);
+ if (wait_for_completion_interruptible(
+ &asd->init_done) != 0)
+ return -ERESTARTSYS;
+ rt_mutex_lock(&isp->mutex);
+ }
+
+ /* handle per_frame_setting parameter and buffers */
+ atomisp_handle_parameter_and_buffer(pipe);
+
+ /*
+ * only ZSL/SDV capture request will be here, raise
+ * the ISP freq to the highest possible to minimize
+ * the S2S latency.
+ */
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false);
+ /*
+ * When asd->enable_raw_buffer_lock->val is true,
+ * An extra IOCTL is needed to call
+ * atomisp_css_exp_id_capture and trigger real capture
+ */
+ if (!asd->enable_raw_buffer_lock->val) {
+ ret = atomisp_css_offline_capture_configure(asd,
+ asd->params.offline_parm.num_captures,
+ asd->params.offline_parm.skip_frames,
+ asd->params.offline_parm.offset);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (asd->depth_mode->val)
+ atomisp_pause_buffer_event(isp);
+ }
+ }
+ atomisp_qbuffers_to_css(asd);
+ goto out;
+ }
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
+ atomisp_qbuffers_to_css(asd);
+ goto start_sensor;
+ }
+
+ css_pipe_id = atomisp_get_css_pipe_id(asd);
+
+ ret = atomisp_acc_load_extensions(asd);
+ if (ret < 0) {
+ dev_err(isp->dev, "acc extension failed to load\n");
+ goto out;
+ }
+
+ if (asd->params.css_update_params_needed) {
+ atomisp_apply_css_parameters(asd, &asd->params.css_param);
+ if (asd->params.css_param.update_flag.dz_config)
+ atomisp_css_set_dz_config(asd,
+ &asd->params.css_param.dz_config);
+ atomisp_css_update_isp_params(asd);
+ asd->params.css_update_params_needed = false;
+ memset(&asd->params.css_param.update_flag, 0,
+ sizeof(struct atomisp_parameters));
+ }
+ asd->params.dvs_6axis = NULL;
+
+ ret = atomisp_css_start(asd, css_pipe_id, false);
+ if (ret)
+ goto out;
+
+ asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED;
+ atomic_set(&asd->sof_count, -1);
+ atomic_set(&asd->sequence, -1);
+ atomic_set(&asd->sequence_temp, -1);
+ if (isp->sw_contex.file_input)
+ wdt_duration = ATOMISP_ISP_FILE_TIMEOUT_DURATION;
+
+ asd->params.dis_proj_data_valid = false;
+ asd->latest_preview_exp_id = 0;
+ asd->postview_exp_id = 1;
+ asd->preview_exp_id = 1;
+
+ /* handle per_frame_setting parameter and buffers */
+ atomisp_handle_parameter_and_buffer(pipe);
+
+ atomisp_qbuffers_to_css(asd);
+
+ /* Only start sensor when the last streaming instance started */
+ if (atomisp_subdev_streaming_count(asd) < sensor_start_stream)
+ goto out;
+
+start_sensor:
+ if (isp->flash) {
+ asd->params.num_flash_frames = 0;
+ asd->params.flash_state = ATOMISP_FLASH_IDLE;
+ atomisp_setup_flash(asd);
+ }
+
+ if (!isp->sw_contex.file_input) {
+ atomisp_css_irq_enable(isp, CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+ atomisp_css_valid_sof(isp));
+ atomisp_csi2_configure(asd);
+ /*
+ * set freq to max when streaming count > 1 which indicate
+ * dual camera would run
+ */
+ if (atomisp_streaming_count(isp) > 1) {
+ if (atomisp_freq_scaling(isp,
+ ATOMISP_DFS_MODE_MAX, false) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ } else {
+ if (atomisp_freq_scaling(isp,
+ ATOMISP_DFS_MODE_AUTO, false) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ }
+ } else {
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ }
+
+ if (asd->depth_mode->val && atomisp_streaming_count(isp) ==
+ ATOMISP_DEPTH_SENSOR_STREAMON_COUNT) {
+ ret = atomisp_stream_on_master_slave_sensor(isp, false);
+ if (ret) {
+ dev_err(isp->dev, "master slave sensor stream on failed!\n");
+ goto out;
+ }
+ if (!atomisp_hw_is_isp2401) {
+ __wdt_on_master_slave_sensor(isp, wdt_duration);
+ } else {
+ __wdt_on_master_slave_sensor_pipe(pipe, wdt_duration, true);
+ }
+ goto start_delay_wq;
+ } else if (asd->depth_mode->val && (atomisp_streaming_count(isp) <
+ ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) {
+ if (atomisp_hw_is_isp2401)
+ __wdt_on_master_slave_sensor_pipe(pipe, wdt_duration, false);
+ goto start_delay_wq;
+ }
+
+ /* Enable the CSI interface on ANN B0/K0 */
+ if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
+ pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control |
+ MRFLD_PCI_CSI_CONTROL_CSI_READY);
+ }
+
+ /* stream on the sensor */
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 1);
+ if (ret) {
+ asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!atomisp_hw_is_isp2401) {
+ if (atomisp_buffers_queued(asd))
+ atomisp_wdt_refresh(asd, wdt_duration);
+ } else {
+ if (atomisp_buffers_queued_pipe(pipe))
+ atomisp_wdt_refresh_pipe(pipe, wdt_duration);
+ }
+
+start_delay_wq:
+ if (asd->continuous_mode->val) {
+ struct v4l2_mbus_framefmt *sink;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ reinit_completion(&asd->init_done);
+ asd->delayed_init = ATOMISP_DELAYED_INIT_QUEUED;
+ queue_work(asd->delayed_init_workq, &asd->delayed_init_work);
+ atomisp_css_set_cont_prev_start_time(isp,
+ ATOMISP_CALC_CSS_PREV_OVERLAP(sink->height));
+ } else {
+ asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
+ }
+out:
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_video_pipe *capture_pipe = NULL;
+ struct atomisp_video_pipe *vf_pipe = NULL;
+ struct atomisp_video_pipe *preview_pipe = NULL;
+ struct atomisp_video_pipe *video_pipe = NULL;
+ struct videobuf_buffer *vb, *_vb;
+ enum atomisp_css_pipe_id css_pipe_id;
+ int ret;
+ unsigned long flags;
+ bool first_streamoff = false;
+
+ dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n",
+ atomisp_subdev_source_pad(vdev), asd->index);
+
+ BUG_ON(!rt_mutex_is_locked(&isp->mutex));
+ BUG_ON(!mutex_is_locked(&isp->streamoff_mutex));
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_dbg(isp->dev, "unsupported v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * do only videobuf_streamoff for capture & vf pipes in
+ * case of continuous capture
+ */
+ if ((asd->continuous_mode->val ||
+ isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) &&
+ atomisp_subdev_source_pad(vdev) !=
+ ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
+ atomisp_subdev_source_pad(vdev) !=
+ ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
+ if (isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) {
+ v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 0);
+ } else if (atomisp_subdev_source_pad(vdev)
+ == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) {
+ /* stop continuous still capture if needed */
+ if (asd->params.offline_parm.num_captures == -1)
+ atomisp_css_offline_capture_configure(asd,
+ 0, 0, 0);
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, false);
+ }
+ /*
+ * Currently there is no way to flush buffers queued to css.
+ * When doing videobuf_streamoff, active buffers will be
+ * marked as VIDEOBUF_NEEDS_INIT. HAL will be able to use
+ * these buffers again, and these buffers might be queued to
+ * css more than once! Warn here, if HAL has not dequeued all
+ * buffers back before calling streamoff.
+ */
+ if (pipe->buffers_in_css != 0) {
+ WARN(1, "%s: buffers of vdev %s still in CSS!\n",
+ __func__, pipe->vdev.name);
+
+ /*
+ * Buffers remained in css maybe dequeued out in the
+ * next stream on, while this will causes serious
+ * issues as buffers already get invalid after
+ * previous stream off.
+ *
+ * No way to flush buffers but to reset the whole css
+ */
+ dev_warn(isp->dev, "Reset CSS to clean up css buffers.\n");
+ atomisp_css_flush(isp);
+ }
+
+ return videobuf_streamoff(&pipe->capq);
+ }
+
+ if (!pipe->capq.streaming)
+ return 0;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
+ asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING;
+ first_streamoff = true;
+ }
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ if (first_streamoff) {
+ /* if other streams are running, should not disable watch dog */
+ rt_mutex_unlock(&isp->mutex);
+ atomisp_wdt_stop(asd, true);
+
+ /*
+ * must stop sending pixels into GP_FIFO before stop
+ * the pipeline.
+ */
+ if (isp->sw_contex.file_input)
+ v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 0);
+
+ rt_mutex_lock(&isp->mutex);
+ atomisp_acc_unload_extensions(asd);
+ }
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (atomisp_subdev_streaming_count(asd) == 1)
+ asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ if (!first_streamoff) {
+ ret = videobuf_streamoff(&pipe->capq);
+ if (ret)
+ return ret;
+ goto stopsensor;
+ }
+
+ atomisp_clear_css_buffer_counters(asd);
+
+ if (!isp->sw_contex.file_input)
+ atomisp_css_irq_enable(isp, CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+ false);
+
+ if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) {
+ cancel_work_sync(&asd->delayed_init_work);
+ asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
+ }
+ if (first_streamoff) {
+ css_pipe_id = atomisp_get_css_pipe_id(asd);
+ ret = atomisp_css_stop(asd, css_pipe_id, false);
+ }
+ /* cancel work queue*/
+ if (asd->video_out_capture.users) {
+ capture_pipe = &asd->video_out_capture;
+ wake_up_interruptible(&capture_pipe->capq.wait);
+ }
+ if (asd->video_out_vf.users) {
+ vf_pipe = &asd->video_out_vf;
+ wake_up_interruptible(&vf_pipe->capq.wait);
+ }
+ if (asd->video_out_preview.users) {
+ preview_pipe = &asd->video_out_preview;
+ wake_up_interruptible(&preview_pipe->capq.wait);
+ }
+ if (asd->video_out_video_capture.users) {
+ video_pipe = &asd->video_out_video_capture;
+ wake_up_interruptible(&video_pipe->capq.wait);
+ }
+ ret = videobuf_streamoff(&pipe->capq);
+ if (ret)
+ return ret;
+
+ /* cleanup css here */
+ /* no need for this, as ISP will be reset anyway */
+ /*atomisp_flush_bufs_in_css(isp);*/
+
+ spin_lock_irqsave(&pipe->irq_lock, flags);
+ list_for_each_entry_safe(vb, _vb, &pipe->activeq, queue) {
+ vb->state = VIDEOBUF_PREPARED;
+ list_del(&vb->queue);
+ }
+ list_for_each_entry_safe(vb, _vb, &pipe->buffers_waiting_for_param, queue) {
+ vb->state = VIDEOBUF_PREPARED;
+ list_del(&vb->queue);
+ pipe->frame_request_config_id[vb->i] = 0;
+ }
+ spin_unlock_irqrestore(&pipe->irq_lock, flags);
+
+ atomisp_subdev_cleanup_pending_events(asd);
+stopsensor:
+ if (atomisp_subdev_streaming_count(asd) + 1
+ != atomisp_sensor_start_stream(asd))
+ return 0;
+
+ if (!isp->sw_contex.file_input)
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 0);
+
+ if (isp->flash) {
+ asd->params.num_flash_frames = 0;
+ asd->params.flash_state = ATOMISP_FLASH_IDLE;
+ }
+
+ /* if other streams are running, isp should not be powered off */
+ if (atomisp_streaming_count(isp)) {
+ atomisp_css_flush(isp);
+ return 0;
+ }
+
+ /* Disable the CSI interface on ANN B0/K0 */
+ if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
+ pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control &
+ ~MRFLD_PCI_CSI_CONTROL_CSI_READY);
+ }
+
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false))
+ dev_warn(isp->dev, "DFS failed.\n");
+ /*
+ * ISP work around, need to reset isp
+ * Is it correct time to reset ISP when first node does streamoff?
+ */
+ if (isp->sw_contex.power_state == ATOM_ISP_POWER_UP) {
+ unsigned int i;
+ bool recreate_streams[MAX_STREAM_NUM] = {0};
+
+ if (isp->isp_timeout)
+ dev_err(isp->dev, "%s: Resetting with WA activated",
+ __func__);
+ /*
+ * It is possible that the other asd stream is in the stage
+ * that v4l2_setfmt is just get called on it, which will
+ * create css stream on that stream. But at this point, there
+ * is no way to destroy the css stream created on that stream.
+ *
+ * So force stream destroy here.
+ */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ if (isp->asd[i].stream_prepared) {
+ atomisp_destroy_pipes_stream_force(&isp->
+ asd[i]);
+ recreate_streams[i] = true;
+ }
+ }
+
+ /* disable PUNIT/ISP acknowlede/handshake - SRSE=3 */
+ pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control |
+ MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
+ dev_err(isp->dev, "atomisp_reset");
+ atomisp_reset(isp);
+ for (i = 0; i < isp->num_of_streams; i++) {
+ if (recreate_streams[i])
+ atomisp_create_pipes_stream(&isp->asd[i]);
+ }
+ isp->isp_timeout = false;
+ }
+ return ret;
+}
+
+static int atomisp_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int rval;
+
+ mutex_lock(&isp->streamoff_mutex);
+ rt_mutex_lock(&isp->mutex);
+ rval = __atomisp_streamoff(file, fh, type);
+ rt_mutex_unlock(&isp->mutex);
+ mutex_unlock(&isp->streamoff_mutex);
+
+ return rval;
+}
+
+/*
+ * To get the current value of a control.
+ * applications initialize the id field of a struct v4l2_control and
+ * call this ioctl with a pointer to this structure
+ */
+static int atomisp_g_ctrl(struct file *file, void *fh,
+ struct v4l2_control *control)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ctrls_num; i++) {
+ if (ci_v4l2_controls[i].id == control->id) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ rt_mutex_lock(&isp->mutex);
+
+ switch (control->id) {
+ case V4L2_CID_IRIS_ABSOLUTE:
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ case V4L2_CID_2A_STATUS:
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ case V4L2_CID_EXPOSURE:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_SCENE_MODE:
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_SHARPNESS:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_EXPOSURE_ZONE_NUM:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TEST_PATTERN_COLOR_R:
+ case V4L2_CID_TEST_PATTERN_COLOR_GR:
+ case V4L2_CID_TEST_PATTERN_COLOR_GB:
+ case V4L2_CID_TEST_PATTERN_COLOR_B:
+ rt_mutex_unlock(&isp->mutex);
+ return v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->
+ ctrl_handler, control);
+ case V4L2_CID_COLORFX:
+ ret = atomisp_color_effect(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION:
+ ret = atomisp_bad_pixel(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC:
+ ret = atomisp_gdc_cac(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_VIDEO_STABLIZATION:
+ ret = atomisp_video_stable(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FIXED_PATTERN_NR:
+ ret = atomisp_fixed_pattern(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION:
+ ret = atomisp_false_color(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_LOW_LIGHT:
+ ret = atomisp_low_light(asd, 0, &control->value);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+/*
+ * To change the value of a control.
+ * applications initialize the id and value fields of a struct v4l2_control
+ * and call this ioctl.
+ */
+static int atomisp_s_ctrl(struct file *file, void *fh,
+ struct v4l2_control *control)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ctrls_num; i++) {
+ if (ci_v4l2_controls[i].id == control->id) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ rt_mutex_lock(&isp->mutex);
+ switch (control->id) {
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ case V4L2_CID_EXPOSURE:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
+ case V4L2_CID_SCENE_MODE:
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_SHARPNESS:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_COLORFX_CBCR:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TEST_PATTERN_COLOR_R:
+ case V4L2_CID_TEST_PATTERN_COLOR_GR:
+ case V4L2_CID_TEST_PATTERN_COLOR_GB:
+ case V4L2_CID_TEST_PATTERN_COLOR_B:
+ rt_mutex_unlock(&isp->mutex);
+ return v4l2_s_ctrl(NULL,
+ isp->inputs[asd->input_curr].camera->
+ ctrl_handler, control);
+ case V4L2_CID_COLORFX:
+ ret = atomisp_color_effect(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION:
+ ret = atomisp_bad_pixel(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC:
+ ret = atomisp_gdc_cac(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_VIDEO_STABLIZATION:
+ ret = atomisp_video_stable(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FIXED_PATTERN_NR:
+ ret = atomisp_fixed_pattern(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION:
+ ret = atomisp_false_color(asd, 1, &control->value);
+ break;
+ case V4L2_CID_REQUEST_FLASH:
+ ret = atomisp_flash_enable(asd, control->value);
+ break;
+ case V4L2_CID_ATOMISP_LOW_LIGHT:
+ ret = atomisp_low_light(asd, 1, &control->value);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+/*
+ * To query the attributes of a control.
+ * applications set the id field of a struct v4l2_queryctrl and call the
+ * this ioctl with a pointer to this structure. The driver fills
+ * the rest of the structure.
+ */
+static int atomisp_queryctl(struct file *file, void *fh,
+ struct v4l2_queryctrl *qc)
+{
+ int i, ret = -EINVAL;
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ switch (qc->id) {
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_FOCUS_STATUS:
+ if (!atomisp_hw_is_isp2401) {
+ return v4l2_queryctrl(isp->inputs[asd->input_curr].camera->
+ ctrl_handler, qc);
+ }
+ /* ISP2401 */
+ if (isp->motor)
+ return v4l2_queryctrl(isp->motor->ctrl_handler, qc);
+ else
+ return v4l2_queryctrl(isp->inputs[asd->input_curr].
+ camera->ctrl_handler, qc);
+ }
+
+ if (qc->id & V4L2_CTRL_FLAG_NEXT_CTRL)
+ return ret;
+
+ for (i = 0; i < ctrls_num; i++) {
+ if (ci_v4l2_controls[i].id == qc->id) {
+ memcpy(qc, &ci_v4l2_controls[i],
+ sizeof(struct v4l2_queryctrl));
+ qc->reserved[0] = 0;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret != 0)
+ qc->flags = V4L2_CTRL_FLAG_DISABLED;
+
+ return ret;
+}
+
+static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct v4l2_subdev *motor;
+ struct v4l2_control ctrl;
+ int i;
+ int ret = 0;
+
+ if (!atomisp_hw_is_isp2401)
+ motor = isp->inputs[asd->input_curr].motor;
+ else
+ motor = isp->motor;
+
+ for (i = 0; i < c->count; i++) {
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ switch (ctrl.id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_IRIS_ABSOLUTE:
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ case V4L2_CID_BIN_FACTOR_VERT:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TEST_PATTERN_COLOR_R:
+ case V4L2_CID_TEST_PATTERN_COLOR_GR:
+ case V4L2_CID_TEST_PATTERN_COLOR_GB:
+ case V4L2_CID_TEST_PATTERN_COLOR_B:
+ /*
+ * Exposure related control will be handled by sensor
+ * driver
+ */
+ ret =
+ v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->
+ ctrl_handler, &ctrl);
+ break;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_FOCUS_STATUS:
+ case V4L2_CID_FOCUS_AUTO:
+ if (motor)
+ ret = v4l2_g_ctrl(motor->ctrl_handler, &ctrl);
+ break;
+ case V4L2_CID_FLASH_STATUS:
+ case V4L2_CID_FLASH_INTENSITY:
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ case V4L2_CID_FLASH_TIMEOUT:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_MODE:
+ case V4L2_CID_FLASH_STATUS_REGISTER:
+ if (isp->flash)
+ ret =
+ v4l2_g_ctrl(isp->flash->ctrl_handler,
+ &ctrl);
+ break;
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_digital_zoom(asd, 0, &ctrl.value);
+ rt_mutex_unlock(&isp->mutex);
+ break;
+ case V4L2_CID_G_SKIP_FRAMES:
+ ret = v4l2_subdev_call(
+ isp->inputs[asd->input_curr].camera,
+ sensor, g_skip_frames, (u32 *)&ctrl.value);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ c->controls[i].value = ctrl.value;
+ }
+ return ret;
+}
+
+/* This ioctl allows the application to get multiple controls by class */
+static int atomisp_g_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct v4l2_control ctrl;
+ int i, ret = 0;
+
+ /* input_lock is not need for the Camera related IOCTLs
+ * The input_lock downgrade the FPS of 3A*/
+ ret = atomisp_camera_g_ext_ctrls(file, fh, c);
+ if (ret != -EINVAL)
+ return ret;
+
+ for (i = 0; i < c->count; i++) {
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ ret = atomisp_g_ctrl(file, fh, &ctrl);
+ c->controls[i].value = ctrl.value;
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct v4l2_subdev *motor;
+ struct v4l2_control ctrl;
+ int i;
+ int ret = 0;
+
+
+ if (!atomisp_hw_is_isp2401)
+ motor = isp->inputs[asd->input_curr].motor;
+ else
+ motor = isp->motor;
+
+ for (i = 0; i < c->count; i++) {
+ struct v4l2_ctrl *ctr;
+
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ switch (ctrl.id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_IRIS_ABSOLUTE:
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ case V4L2_CID_VCM_TIMEING:
+ case V4L2_CID_VCM_SLEW:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TEST_PATTERN_COLOR_R:
+ case V4L2_CID_TEST_PATTERN_COLOR_GR:
+ case V4L2_CID_TEST_PATTERN_COLOR_GB:
+ case V4L2_CID_TEST_PATTERN_COLOR_B:
+ ret = v4l2_s_ctrl(NULL,
+ isp->inputs[asd->input_curr].camera->
+ ctrl_handler, &ctrl);
+ break;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_FOCUS_STATUS:
+ case V4L2_CID_FOCUS_AUTO:
+ if (motor)
+ ret = v4l2_s_ctrl(NULL, motor->ctrl_handler,
+ &ctrl);
+ else
+ ret = v4l2_s_ctrl(NULL,
+ isp->inputs[asd->input_curr].
+ camera->ctrl_handler, &ctrl);
+ break;
+ case V4L2_CID_FLASH_STATUS:
+ case V4L2_CID_FLASH_INTENSITY:
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ case V4L2_CID_FLASH_TIMEOUT:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_MODE:
+ case V4L2_CID_FLASH_STATUS_REGISTER:
+ rt_mutex_lock(&isp->mutex);
+ if (isp->flash) {
+ ret =
+ v4l2_s_ctrl(NULL, isp->flash->ctrl_handler,
+ &ctrl);
+ /* When flash mode is changed we need to reset
+ * flash state */
+ if (ctrl.id == V4L2_CID_FLASH_MODE) {
+ asd->params.flash_state =
+ ATOMISP_FLASH_IDLE;
+ asd->params.num_flash_frames = 0;
+ }
+ }
+ rt_mutex_unlock(&isp->mutex);
+ break;
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_digital_zoom(asd, 1, &ctrl.value);
+ rt_mutex_unlock(&isp->mutex);
+ break;
+ default:
+ ctr = v4l2_ctrl_find(&asd->ctrl_handler, ctrl.id);
+ if (ctr)
+ ret = v4l2_ctrl_s_ctrl(ctr, ctrl.value);
+ else
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ c->controls[i].value = ctrl.value;
+ }
+ return ret;
+}
+
+/* This ioctl allows the application to set multiple controls by class */
+static int atomisp_s_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct v4l2_control ctrl;
+ int i, ret = 0;
+
+ /* input_lock is not need for the Camera related IOCTLs
+ * The input_lock downgrade the FPS of 3A*/
+ ret = atomisp_camera_s_ext_ctrls(file, fh, c);
+ if (ret != -EINVAL)
+ return ret;
+
+ for (i = 0; i < c->count; i++) {
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ ret = atomisp_s_ctrl(file, fh, &ctrl);
+ c->controls[i].value = ctrl.value;
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * vidioc_g/s_param are used to switch isp running mode
+ */
+static int atomisp_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(isp->dev, "unsupport v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ rt_mutex_lock(&isp->mutex);
+ parm->parm.capture.capturemode = asd->run_mode->val;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+}
+
+static int atomisp_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ int mode;
+ int rval;
+ int fps;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(isp->dev, "unsupport v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ rt_mutex_lock(&isp->mutex);
+
+ asd->high_speed_mode = false;
+ switch (parm->parm.capture.capturemode) {
+ case CI_MODE_NONE: {
+ struct v4l2_subdev_frame_interval fi = {0};
+
+ fi.interval = parm->parm.capture.timeperframe;
+
+ rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_frame_interval, &fi);
+ if (!rval)
+ parm->parm.capture.timeperframe = fi.interval;
+
+ if (fi.interval.numerator != 0) {
+ fps = fi.interval.denominator / fi.interval.numerator;
+ if (fps > 30)
+ asd->high_speed_mode = true;
+ }
+
+ goto out;
+ }
+ case CI_MODE_VIDEO:
+ mode = ATOMISP_RUN_MODE_VIDEO;
+ break;
+ case CI_MODE_STILL_CAPTURE:
+ mode = ATOMISP_RUN_MODE_STILL_CAPTURE;
+ break;
+ case CI_MODE_CONTINUOUS:
+ mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE;
+ break;
+ case CI_MODE_PREVIEW:
+ mode = ATOMISP_RUN_MODE_PREVIEW;
+ break;
+ default:
+ rval = -EINVAL;
+ goto out;
+ }
+
+ rval = v4l2_ctrl_s_ctrl(asd->run_mode, mode);
+
+out:
+ rt_mutex_unlock(&isp->mutex);
+
+ return rval == -ENOIOCTLCMD ? 0 : rval;
+}
+
+static int atomisp_s_parm_file(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ dev_err(isp->dev, "unsupport v4l2 buf type for output\n");
+ return -EINVAL;
+ }
+
+ rt_mutex_lock(&isp->mutex);
+ isp->sw_contex.file_input = true;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+}
+
+static long atomisp_vidioc_default(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd;
+ struct v4l2_subdev *motor;
+ bool acc_node;
+ int err;
+
+ acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC");
+ if (acc_node)
+ asd = atomisp_to_acc_pipe(vdev)->asd;
+ else
+ asd = atomisp_to_video_pipe(vdev)->asd;
+
+ if (!atomisp_hw_is_isp2401)
+ motor = isp->inputs[asd->input_curr].motor;
+ else
+ motor = isp->motor;
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_S_EXPOSURE:
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_EXT_ISP_CTRL:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
+ case ATOMISP_IOC_S_SENSOR_EE_CONFIG:
+ case ATOMISP_IOC_G_UPDATE_EXPOSURE:
+ /* we do not need take isp->mutex for these IOCTLs */
+ break;
+ default:
+ rt_mutex_lock(&isp->mutex);
+ break;
+ }
+ switch (cmd) {
+ case ATOMISP_IOC_S_SENSOR_RUNMODE:
+ if (atomisp_hw_is_isp2401)
+ err = atomisp_set_sensor_runmode(asd, arg);
+ else
+ err = -EINVAL;
+ break;
+
+ case ATOMISP_IOC_G_XNR:
+ err = atomisp_xnr(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_XNR:
+ err = atomisp_xnr(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_NR:
+ err = atomisp_nr(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_NR:
+ err = atomisp_nr(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_TNR:
+ err = atomisp_tnr(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_TNR:
+ err = atomisp_tnr(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_BLACK_LEVEL_COMP:
+ err = atomisp_black_level(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_BLACK_LEVEL_COMP:
+ err = atomisp_black_level(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_EE:
+ err = atomisp_ee(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_EE:
+ err = atomisp_ee(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_DIS_STAT:
+ err = atomisp_get_dis_stat(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS:
+ err = atomisp_get_dvs2_bq_resolutions(asd, arg);
+ break;
+
+ case ATOMISP_IOC_S_DIS_COEFS:
+ err = atomisp_css_cp_dvs2_coefs(asd, arg,
+ &asd->params.css_param, true);
+ if (!err && arg)
+ asd->params.css_update_params_needed = true;
+ break;
+
+ case ATOMISP_IOC_S_DIS_VECTOR:
+ err = atomisp_cp_dvs_6axis_config(asd, arg,
+ &asd->params.css_param, true);
+ if (!err && arg)
+ asd->params.css_update_params_needed = true;
+ break;
+
+ case ATOMISP_IOC_G_ISP_PARM:
+ err = atomisp_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_PARM:
+ err = atomisp_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_3A_STAT:
+ err = atomisp_3a_stat(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_GAMMA:
+ err = atomisp_gamma(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_GAMMA:
+ err = atomisp_gamma(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_GDC_TAB:
+ err = atomisp_gdc_cac_table(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_GDC_TAB:
+ err = atomisp_gdc_cac_table(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_MACC:
+ err = atomisp_macc_table(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_MACC:
+ err = atomisp_macc_table(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION:
+ err = atomisp_bad_pixel_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION:
+ err = atomisp_bad_pixel_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION:
+ err = atomisp_false_color_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION:
+ err = atomisp_false_color_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_CTC:
+ err = atomisp_ctc(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_CTC:
+ err = atomisp_ctc(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_WHITE_BALANCE:
+ err = atomisp_white_balance_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_WHITE_BALANCE:
+ err = atomisp_white_balance_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_3A_CONFIG:
+ err = atomisp_3a_config_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_3A_CONFIG:
+ err = atomisp_3a_config_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_FPN_TABLE:
+ err = atomisp_fixed_pattern_table(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ISP_MAKERNOTE:
+ err = atomisp_exif_makernote(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_SENSOR_MODE_DATA:
+ err = atomisp_get_sensor_mode_data(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+ if (motor)
+ err = v4l2_subdev_call(motor, core, ioctl, cmd, arg);
+ else
+ err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, ioctl, cmd, arg);
+ break;
+
+ case ATOMISP_IOC_S_EXPOSURE:
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
+ err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, ioctl, cmd, arg);
+ break;
+ case ATOMISP_IOC_G_UPDATE_EXPOSURE:
+ if (atomisp_hw_is_isp2401)
+ err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, ioctl, cmd, arg);
+ else
+ err = -EINVAL;
+ break;
+
+ case ATOMISP_IOC_ACC_LOAD:
+ err = atomisp_acc_load(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE:
+ err = atomisp_acc_load_to_pipe(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_UNLOAD:
+ err = atomisp_acc_unload(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_START:
+ err = atomisp_acc_start(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_WAIT:
+ err = atomisp_acc_wait(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_MAP:
+ err = atomisp_acc_map(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_UNMAP:
+ err = atomisp_acc_unmap(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG:
+ err = atomisp_acc_s_mapped_arg(asd, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_SHD_TAB:
+ err = atomisp_set_shading_table(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_GAMMA_CORRECTION:
+ err = atomisp_gamma_correction(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_GAMMA_CORRECTION:
+ err = atomisp_gamma_correction(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_S_PARAMETERS:
+ err = atomisp_set_parameters(vdev, arg);
+ break;
+
+ case ATOMISP_IOC_S_CONT_CAPTURE_CONFIG:
+ err = atomisp_offline_capture_configure(asd, arg);
+ break;
+ case ATOMISP_IOC_G_METADATA:
+ err = atomisp_get_metadata(asd, 0, arg);
+ break;
+ case ATOMISP_IOC_G_METADATA_BY_TYPE:
+ err = atomisp_get_metadata_by_type(asd, 0, arg);
+ break;
+ case ATOMISP_IOC_EXT_ISP_CTRL:
+ err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, ioctl, cmd, arg);
+ break;
+ case ATOMISP_IOC_EXP_ID_UNLOCK:
+ err = atomisp_exp_id_unlock(asd, arg);
+ break;
+ case ATOMISP_IOC_EXP_ID_CAPTURE:
+ err = atomisp_exp_id_capture(asd, arg);
+ break;
+ case ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE:
+ err = atomisp_enable_dz_capt_pipe(asd, arg);
+ break;
+ case ATOMISP_IOC_G_FORMATS_CONFIG:
+ err = atomisp_formats(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_FORMATS_CONFIG:
+ err = atomisp_formats(asd, 1, arg);
+ break;
+ case ATOMISP_IOC_S_EXPOSURE_WINDOW:
+ err = atomisp_s_ae_window(asd, arg);
+ break;
+ case ATOMISP_IOC_S_ACC_STATE:
+ err = atomisp_acc_set_state(asd, arg);
+ break;
+ case ATOMISP_IOC_G_ACC_STATE:
+ err = atomisp_acc_get_state(asd, arg);
+ break;
+ case ATOMISP_IOC_INJECT_A_FAKE_EVENT:
+ err = atomisp_inject_a_fake_event(asd, arg);
+ break;
+ case ATOMISP_IOC_G_INVALID_FRAME_NUM:
+ err = atomisp_get_invalid_frame_num(vdev, arg);
+ break;
+ case ATOMISP_IOC_S_ARRAY_RESOLUTION:
+ err = atomisp_set_array_res(asd, arg);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_S_EXPOSURE:
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_EXT_ISP_CTRL:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
+ case ATOMISP_IOC_G_UPDATE_EXPOSURE:
+ break;
+ default:
+ rt_mutex_unlock(&isp->mutex);
+ break;
+ }
+ return err;
+}
+
+const struct v4l2_ioctl_ops atomisp_ioctl_ops = {
+ .vidioc_querycap = atomisp_querycap,
+ .vidioc_enum_input = atomisp_enum_input,
+ .vidioc_g_input = atomisp_g_input,
+ .vidioc_s_input = atomisp_s_input,
+ .vidioc_queryctrl = atomisp_queryctl,
+ .vidioc_s_ctrl = atomisp_s_ctrl,
+ .vidioc_g_ctrl = atomisp_g_ctrl,
+ .vidioc_s_ext_ctrls = atomisp_s_ext_ctrls,
+ .vidioc_g_ext_ctrls = atomisp_g_ext_ctrls,
+ .vidioc_enum_fmt_vid_cap = atomisp_enum_fmt_cap,
+ .vidioc_try_fmt_vid_cap = atomisp_try_fmt_cap,
+ .vidioc_g_fmt_vid_cap = atomisp_g_fmt_cap,
+ .vidioc_s_fmt_vid_cap = atomisp_s_fmt_cap,
+ .vidioc_reqbufs = atomisp_reqbufs,
+ .vidioc_querybuf = atomisp_querybuf,
+ .vidioc_qbuf = atomisp_qbuf,
+ .vidioc_dqbuf = atomisp_dqbuf,
+ .vidioc_streamon = atomisp_streamon,
+ .vidioc_streamoff = atomisp_streamoff,
+ .vidioc_default = atomisp_vidioc_default,
+ .vidioc_s_parm = atomisp_s_parm,
+ .vidioc_g_parm = atomisp_g_parm,
+};
+
+const struct v4l2_ioctl_ops atomisp_file_ioctl_ops = {
+ .vidioc_querycap = atomisp_querycap,
+ .vidioc_g_fmt_vid_out = atomisp_g_fmt_file,
+ .vidioc_s_fmt_vid_out = atomisp_s_fmt_file,
+ .vidioc_s_parm = atomisp_s_parm_file,
+ .vidioc_reqbufs = atomisp_reqbufs_file,
+ .vidioc_querybuf = atomisp_querybuf_file,
+ .vidioc_qbuf = atomisp_qbuf_file,
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
new file mode 100644
index 000000000000..5f3f2ec2539b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
@@ -0,0 +1,66 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_IOCTL_H__
+#define __ATOMISP_IOCTL_H__
+
+#include "ia_css.h"
+
+struct atomisp_device;
+struct atomisp_video_pipe;
+
+extern const struct atomisp_format_bridge atomisp_output_fmts[];
+
+const struct
+atomisp_format_bridge *atomisp_get_format_bridge(unsigned int pixelformat);
+
+const struct
+atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(u32 mbus_code);
+
+int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
+ uint16_t stream_id);
+
+int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type);
+int __atomisp_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req);
+
+int atomisp_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req);
+
+enum atomisp_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device
+ *asd);
+
+void atomisp_videobuf_free_buf(struct videobuf_buffer *vb);
+
+extern const struct v4l2_file_operations atomisp_file_fops;
+
+extern const struct v4l2_ioctl_ops atomisp_ioctl_ops;
+
+extern const struct v4l2_ioctl_ops atomisp_file_ioctl_ops;
+
+unsigned int atomisp_streaming_count(struct atomisp_device *isp);
+
+unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp);
+/* compat_ioctl for 32bit userland app and 64bit kernel */
+long atomisp_compat_ioctl32(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp,
+ bool isp_timeout);
+#endif /* __ATOMISP_IOCTL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
new file mode 100644
index 000000000000..46590129cbe3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -0,0 +1,1456 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_compat.h"
+#include "atomisp_internal.h"
+
+const struct atomisp_in_fmt_conv atomisp_in_fmt_conv[] = {
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_BGGR, CSS_FORMAT_RAW_8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_GBRG, CSS_FORMAT_RAW_8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_GRBG, CSS_FORMAT_RAW_8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_RGGB, CSS_FORMAT_RAW_8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_BGGR, CSS_FORMAT_RAW_10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_GBRG, CSS_FORMAT_RAW_10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_GRBG, CSS_FORMAT_RAW_10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_RGGB, CSS_FORMAT_RAW_10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_BGGR, CSS_FORMAT_RAW_12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_GBRG, CSS_FORMAT_RAW_12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_GRBG, CSS_FORMAT_RAW_12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_RGGB, CSS_FORMAT_RAW_12 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0, ATOMISP_INPUT_FORMAT_YUV422_8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0, ATOMISP_INPUT_FORMAT_YUV422_8 },
+ { MEDIA_BUS_FMT_JPEG_1X8, 8, 8, CSS_FRAME_FORMAT_BINARY_8, 0, ATOMISP_INPUT_FORMAT_BINARY_8 },
+ { V4L2_MBUS_FMT_CUSTOM_NV12, 12, 12, CSS_FRAME_FORMAT_NV12, 0, CSS_FRAME_FORMAT_NV12 },
+ { V4L2_MBUS_FMT_CUSTOM_NV21, 12, 12, CSS_FRAME_FORMAT_NV21, 0, CSS_FRAME_FORMAT_NV21 },
+ { V4L2_MBUS_FMT_CUSTOM_YUV420, 12, 12, ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY, 0, ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY },
+#if 0
+ { V4L2_MBUS_FMT_CUSTOM_M10MO_RAW, 8, 8, CSS_FRAME_FORMAT_BINARY_8, 0, ATOMISP_INPUT_FORMAT_BINARY_8 },
+#endif
+ /* no valid V4L2 MBUS code for metadata format, so leave it 0. */
+ { 0, 0, 0, ATOMISP_INPUT_FORMAT_EMBEDDED, 0, ATOMISP_INPUT_FORMAT_EMBEDDED },
+ {}
+};
+
+static const struct {
+ u32 code;
+ u32 compressed;
+} compressed_codes[] = {
+ { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 },
+};
+
+u32 atomisp_subdev_uncompressed_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(compressed_codes); i++)
+ if (code == compressed_codes[i].compressed)
+ return compressed_codes[i].code;
+
+ return code;
+}
+
+bool atomisp_subdev_is_compressed(u32 code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
+ if (code == atomisp_in_fmt_conv[i].code)
+ return atomisp_in_fmt_conv[i].bpp !=
+ atomisp_in_fmt_conv[i].depth;
+
+ return false;
+}
+
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv(u32 code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
+ if (code == atomisp_in_fmt_conv[i].code)
+ return atomisp_in_fmt_conv + i;
+
+ return NULL;
+}
+
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ enum atomisp_input_format atomisp_in_fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
+ if (atomisp_in_fmt_conv[i].atomisp_in_fmt == atomisp_in_fmt)
+ return atomisp_in_fmt_conv + i;
+
+ return NULL;
+}
+
+bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd,
+ unsigned int source_pad)
+{
+ struct v4l2_mbus_framefmt *sink, *src;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+ src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, source_pad);
+
+ return atomisp_is_mbuscode_raw(sink->code)
+ && !atomisp_is_mbuscode_raw(src->code);
+}
+
+uint16_t atomisp_subdev_source_pad(struct video_device *vdev)
+{
+ struct media_link *link;
+ u16 ret = 0;
+
+ list_for_each_entry(link, &vdev->entity.links, list) {
+ if (link->source) {
+ ret = link->source->index;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * V4L2 subdev operations
+ */
+
+/*
+ * isp_subdev_ioctl - CCDC module private ioctl's
+ * @sd: ISP V4L2 subdevice
+ * @cmd: ioctl command
+ * @arg: ioctl argument
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static long isp_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return 0;
+}
+
+/*
+ * isp_subdev_set_power - Power on/off the CCDC module
+ * @sd: ISP V4L2 subdevice
+ * @on: power on/off
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int isp_subdev_set_power(struct v4l2_subdev *sd, int on)
+{
+ return 0;
+}
+
+static int isp_subdev_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+
+ if (sub->type != V4L2_EVENT_FRAME_SYNC &&
+ sub->type != V4L2_EVENT_FRAME_END &&
+ sub->type != V4L2_EVENT_ATOMISP_3A_STATS_READY &&
+ sub->type != V4L2_EVENT_ATOMISP_METADATA_READY &&
+ sub->type != V4L2_EVENT_ATOMISP_PAUSE_BUFFER &&
+ sub->type != V4L2_EVENT_ATOMISP_CSS_RESET &&
+ sub->type != V4L2_EVENT_ATOMISP_RAW_BUFFERS_ALLOC_DONE &&
+ sub->type != V4L2_EVENT_ATOMISP_ACC_COMPLETE)
+ return -EINVAL;
+
+ if (sub->type == V4L2_EVENT_FRAME_SYNC &&
+ !atomisp_css_valid_sof(isp))
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 16, NULL);
+}
+
+static int isp_subdev_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+/*
+ * isp_subdev_enum_mbus_code - Handle pixel format enumeration
+ * @sd: pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_pad_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int isp_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(atomisp_in_fmt_conv) - 1)
+ return -EINVAL;
+
+ code->code = atomisp_in_fmt_conv[code->index].code;
+
+ return 0;
+}
+
+static int isp_subdev_validate_rect(struct v4l2_subdev *sd, uint32_t pad,
+ uint32_t target)
+{
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK:
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return 0;
+ }
+ break;
+ default:
+ switch (target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ return 0;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
+struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which, uint32_t pad,
+ uint32_t target)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY) {
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return v4l2_subdev_get_try_crop(sd, cfg, pad);
+ case V4L2_SEL_TGT_COMPOSE:
+ return v4l2_subdev_get_try_compose(sd, cfg, pad);
+ }
+ }
+
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return &isp_sd->fmt[pad].crop;
+ case V4L2_SEL_TGT_COMPOSE:
+ return &isp_sd->fmt[pad].compose;
+ }
+
+ return NULL;
+}
+
+struct v4l2_mbus_framefmt
+*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ uint32_t pad)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(sd, cfg, pad);
+
+ return &isp_sd->fmt[pad].fmt;
+}
+
+static void isp_get_fmt_rect(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_mbus_framefmt **ffmt,
+ struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
+ struct v4l2_rect *comp[ATOMISP_SUBDEV_PADS_NUM])
+{
+ unsigned int i;
+
+ for (i = 0; i < ATOMISP_SUBDEV_PADS_NUM; i++) {
+ ffmt[i] = atomisp_subdev_get_ffmt(sd, cfg, which, i);
+ crop[i] = atomisp_subdev_get_rect(sd, cfg, which, i,
+ V4L2_SEL_TGT_CROP);
+ comp[i] = atomisp_subdev_get_rect(sd, cfg, which, i,
+ V4L2_SEL_TGT_COMPOSE);
+ }
+}
+
+static void isp_subdev_propagate(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which, uint32_t pad, uint32_t target,
+ uint32_t flags)
+{
+ struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM];
+ struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
+ *comp[ATOMISP_SUBDEV_PADS_NUM];
+
+ if (flags & V4L2_SEL_FLAG_KEEP_CONFIG)
+ return;
+
+ isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp);
+
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK: {
+ struct v4l2_rect r = {0};
+
+ /* Only crop target supported on sink pad. */
+ r.width = ffmt[pad]->width;
+ r.height = ffmt[pad]->height;
+
+ atomisp_subdev_set_selection(sd, cfg, which, pad,
+ target, flags, &r);
+ break;
+ }
+ }
+}
+
+static int isp_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_rect *rec;
+ int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
+
+ if (rval)
+ return rval;
+
+ rec = atomisp_subdev_get_rect(sd, cfg, sel->which, sel->pad,
+ sel->target);
+ if (!rec)
+ return -EINVAL;
+
+ sel->r = *rec;
+ return 0;
+}
+
+static char *atomisp_pad_str[] = { "ATOMISP_SUBDEV_PAD_SINK",
+ "ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE",
+ "ATOMISP_SUBDEV_PAD_SOURCE_VF",
+ "ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW",
+ "ATOMISP_SUBDEV_PAD_SOURCE_VIDEO"
+ };
+
+int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which, uint32_t pad, uint32_t target,
+ u32 flags, struct v4l2_rect *r)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+ struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM];
+ u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode);
+ struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
+ *comp[ATOMISP_SUBDEV_PADS_NUM];
+ enum atomisp_input_stream_id stream_id;
+ unsigned int i;
+ unsigned int padding_w = pad_w;
+ unsigned int padding_h = pad_h;
+
+ stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad);
+
+ isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp);
+
+ dev_dbg(isp->dev,
+ "sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n",
+ atomisp_pad_str[pad], target == V4L2_SEL_TGT_CROP
+ ? "V4L2_SEL_TGT_CROP" : "V4L2_SEL_TGT_COMPOSE",
+ r->left, r->top, r->width, r->height,
+ which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+ : "V4L2_SUBDEV_FORMAT_ACTIVE", flags);
+
+ r->width = rounddown(r->width, ATOM_ISP_STEP_WIDTH);
+ r->height = rounddown(r->height, ATOM_ISP_STEP_HEIGHT);
+
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK: {
+ /* Only crop target supported on sink pad. */
+ unsigned int dvs_w, dvs_h;
+
+ crop[pad]->width = ffmt[pad]->width;
+ crop[pad]->height = ffmt[pad]->height;
+
+ /* Workaround for BYT 1080p perfectshot since the maxinum resolution of
+ * front camera ov2722 is 1932x1092 and cannot use pad_w > 12*/
+ if (!strncmp(isp->inputs[isp_sd->input_curr].camera->name,
+ "ov2722", 6) && crop[pad]->height == 1092) {
+ padding_w = 12;
+ padding_h = 12;
+ }
+
+ if (isp->inputs[isp_sd->input_curr].type == SOC_CAMERA) {
+ padding_w = 0;
+ padding_h = 0;
+ }
+
+ if (atomisp_subdev_format_conversion(isp_sd,
+ isp_sd->capture_pad)
+ && crop[pad]->width && crop[pad]->height)
+ crop[pad]->width -= padding_w, crop[pad]->height -= padding_h;
+
+ /* if subdev type is SOC camera,we do not need to set DVS */
+ if (isp->inputs[isp_sd->input_curr].type == SOC_CAMERA)
+ isp_sd->params.video_dis_en = 0;
+
+ if (isp_sd->params.video_dis_en &&
+ isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ !isp_sd->continuous_mode->val) {
+ /* This resolution contains 20 % of DVS slack
+ * (of the desired captured image before
+ * scaling, or 1 / 6 of what we get from the
+ * sensor) in both width and height. Remove
+ * it. */
+ crop[pad]->width = roundup(crop[pad]->width * 5 / 6,
+ ATOM_ISP_STEP_WIDTH);
+ crop[pad]->height = roundup(crop[pad]->height * 5 / 6,
+ ATOM_ISP_STEP_HEIGHT);
+ }
+
+ crop[pad]->width = min(crop[pad]->width, r->width);
+ crop[pad]->height = min(crop[pad]->height, r->height);
+
+ if (!(flags & V4L2_SEL_FLAG_KEEP_CONFIG)) {
+ for (i = ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE;
+ i < ATOMISP_SUBDEV_PADS_NUM; i++) {
+ struct v4l2_rect tmp = *crop[pad];
+
+ atomisp_subdev_set_selection(
+ sd, cfg, which, i, V4L2_SEL_TGT_COMPOSE,
+ flags, &tmp);
+ }
+ }
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ break;
+
+ if (isp_sd->params.video_dis_en &&
+ isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ !isp_sd->continuous_mode->val) {
+ dvs_w = rounddown(crop[pad]->width / 5,
+ ATOM_ISP_STEP_WIDTH);
+ dvs_h = rounddown(crop[pad]->height / 5,
+ ATOM_ISP_STEP_HEIGHT);
+ } else if (!isp_sd->params.video_dis_en &&
+ isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /*
+ * For CSS2.0, digital zoom needs to set dvs envelope to 12
+ * when dvs is disabled.
+ */
+ dvs_w = dvs_h = 12;
+ } else
+ dvs_w = dvs_h = 0;
+
+ atomisp_css_video_set_dis_envelope(isp_sd, dvs_w, dvs_h);
+ atomisp_css_input_set_effective_resolution(isp_sd, stream_id,
+ crop[pad]->width, crop[pad]->height);
+
+ break;
+ }
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: {
+ /* Only compose target is supported on source pads. */
+
+ if (isp_sd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ /* Scaling is disabled in this mode */
+ r->width = crop[ATOMISP_SUBDEV_PAD_SINK]->width;
+ r->height = crop[ATOMISP_SUBDEV_PAD_SINK]->height;
+ }
+
+ if (crop[ATOMISP_SUBDEV_PAD_SINK]->width == r->width
+ && crop[ATOMISP_SUBDEV_PAD_SINK]->height == r->height)
+ isp_sd->params.yuv_ds_en = false;
+ else
+ isp_sd->params.yuv_ds_en = true;
+
+ comp[pad]->width = r->width;
+ comp[pad]->height = r->height;
+
+ if (r->width == 0 || r->height == 0 ||
+ crop[ATOMISP_SUBDEV_PAD_SINK]->width == 0 ||
+ crop[ATOMISP_SUBDEV_PAD_SINK]->height == 0)
+ break;
+ /*
+ * do cropping on sensor input if ratio of required resolution
+ * is different with sensor output resolution ratio:
+ *
+ * ratio = width / height
+ *
+ * if ratio_output < ratio_sensor:
+ * effect_width = sensor_height * out_width / out_height;
+ * effect_height = sensor_height;
+ * else
+ * effect_width = sensor_width;
+ * effect_height = sensor_width * out_height / out_width;
+ *
+ */
+ if (r->width * crop[ATOMISP_SUBDEV_PAD_SINK]->height <
+ crop[ATOMISP_SUBDEV_PAD_SINK]->width * r->height)
+ atomisp_css_input_set_effective_resolution(isp_sd,
+ stream_id,
+ rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
+ height * r->width / r->height,
+ ATOM_ISP_STEP_WIDTH),
+ crop[ATOMISP_SUBDEV_PAD_SINK]->height);
+ else
+ atomisp_css_input_set_effective_resolution(isp_sd,
+ stream_id,
+ crop[ATOMISP_SUBDEV_PAD_SINK]->width,
+ rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
+ width * r->height / r->width,
+ ATOM_ISP_STEP_WIDTH));
+
+ break;
+ }
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ comp[pad]->width = r->width;
+ comp[pad]->height = r->height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set format dimensions on non-sink pads as well. */
+ if (pad != ATOMISP_SUBDEV_PAD_SINK) {
+ ffmt[pad]->width = comp[pad]->width;
+ ffmt[pad]->height = comp[pad]->height;
+ }
+
+ if (!atomisp_subdev_get_rect(sd, cfg, which, pad, target))
+ return -EINVAL;
+ *r = *atomisp_subdev_get_rect(sd, cfg, which, pad, target);
+
+ dev_dbg(isp->dev, "sel actual: l %d t %d w %d h %d\n",
+ r->left, r->top, r->width, r->height);
+
+ return 0;
+}
+
+static int isp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
+
+ if (rval)
+ return rval;
+
+ return atomisp_subdev_set_selection(sd, cfg, sel->which, sel->pad,
+ sel->target, sel->flags, &sel->r);
+}
+
+static int atomisp_get_sensor_bin_factor(struct atomisp_sub_device *asd)
+{
+ struct v4l2_control ctrl = {0};
+ struct atomisp_device *isp = asd->isp;
+ int hbin, vbin;
+ int ret;
+
+ if (isp->inputs[asd->input_curr].type == FILE_INPUT ||
+ isp->inputs[asd->input_curr].type == TEST_PATTERN)
+ return 0;
+
+ ctrl.id = V4L2_CID_BIN_FACTOR_HORZ;
+ ret =
+ v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->ctrl_handler,
+ &ctrl);
+ hbin = ctrl.value;
+ ctrl.id = V4L2_CID_BIN_FACTOR_VERT;
+ ret |=
+ v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->ctrl_handler,
+ &ctrl);
+ vbin = ctrl.value;
+
+ /*
+ * ISP needs to know binning factor from sensor.
+ * In case horizontal and vertical sensor's binning factors
+ * are different or sensor does not support binning factor CID,
+ * ISP will apply default 0 value.
+ */
+ if (ret || hbin != vbin)
+ hbin = 0;
+
+ return hbin;
+}
+
+void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ u32 pad, struct v4l2_mbus_framefmt *ffmt)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+ struct v4l2_mbus_framefmt *__ffmt =
+ atomisp_subdev_get_ffmt(sd, cfg, which, pad);
+ u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode);
+ enum atomisp_input_stream_id stream_id;
+
+ dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n",
+ atomisp_pad_str[pad], ffmt->width, ffmt->height, ffmt->code,
+ which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+ : "V4L2_SUBDEV_FORMAT_ACTIVE");
+
+ stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad);
+
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK: {
+ const struct atomisp_in_fmt_conv *fc =
+ atomisp_find_in_fmt_conv(ffmt->code);
+
+ if (!fc) {
+ fc = atomisp_in_fmt_conv;
+ ffmt->code = fc->code;
+ dev_dbg(isp->dev, "using 0x%8.8x instead\n",
+ ffmt->code);
+ }
+
+ *__ffmt = *ffmt;
+
+ isp_subdev_propagate(sd, cfg, which, pad,
+ V4L2_SEL_TGT_CROP, 0);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ atomisp_css_input_set_resolution(isp_sd,
+ stream_id, ffmt);
+ atomisp_css_input_set_binning_factor(isp_sd,
+ stream_id,
+ atomisp_get_sensor_bin_factor(isp_sd));
+ atomisp_css_input_set_bayer_order(isp_sd, stream_id,
+ fc->bayer_order);
+ atomisp_css_input_set_format(isp_sd, stream_id,
+ fc->css_stream_fmt);
+ atomisp_css_set_default_isys_config(isp_sd, stream_id,
+ ffmt);
+ }
+
+ break;
+ }
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
+ __ffmt->code = ffmt->code;
+ break;
+ }
+}
+
+/*
+ * isp_subdev_get_format - Retrieve the video format on a pad
+ * @sd : ISP V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int isp_subdev_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format = *atomisp_subdev_get_ffmt(sd, cfg, fmt->which, fmt->pad);
+
+ return 0;
+}
+
+/*
+ * isp_subdev_set_format - Set the video format on a pad
+ * @sd : ISP subdev V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int isp_subdev_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ atomisp_subdev_set_ffmt(sd, cfg, fmt->which, fmt->pad, &fmt->format);
+
+ return 0;
+}
+
+/* V4L2 subdev core operations */
+static const struct v4l2_subdev_core_ops isp_subdev_v4l2_core_ops = {
+ .ioctl = isp_subdev_ioctl, .s_power = isp_subdev_set_power,
+ .subscribe_event = isp_subdev_subscribe_event,
+ .unsubscribe_event = isp_subdev_unsubscribe_event,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops isp_subdev_v4l2_pad_ops = {
+ .enum_mbus_code = isp_subdev_enum_mbus_code,
+ .get_fmt = isp_subdev_get_format,
+ .set_fmt = isp_subdev_set_format,
+ .get_selection = isp_subdev_get_selection,
+ .set_selection = isp_subdev_set_selection,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops isp_subdev_v4l2_ops = {
+ .core = &isp_subdev_v4l2_core_ops,
+ .pad = &isp_subdev_v4l2_pad_ops,
+};
+
+static void isp_subdev_init_params(struct atomisp_sub_device *asd)
+{
+ unsigned int i;
+
+ /* parameters initialization */
+ INIT_LIST_HEAD(&asd->s3a_stats);
+ INIT_LIST_HEAD(&asd->s3a_stats_in_css);
+ INIT_LIST_HEAD(&asd->s3a_stats_ready);
+ INIT_LIST_HEAD(&asd->dis_stats);
+ INIT_LIST_HEAD(&asd->dis_stats_in_css);
+ spin_lock_init(&asd->dis_stats_lock);
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ INIT_LIST_HEAD(&asd->metadata[i]);
+ INIT_LIST_HEAD(&asd->metadata_in_css[i]);
+ INIT_LIST_HEAD(&asd->metadata_ready[i]);
+ }
+}
+
+/*
+* isp_subdev_link_setup - Setup isp subdev connections
+* @entity: ispsubdev media entity
+* @local: Pad at the local end of the link
+* @remote: Pad at the remote end of the link
+* @flags: Link flags
+*
+* return -EINVAL or zero on success
+*/
+static int isp_subdev_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+ unsigned int i;
+
+ switch (local->index | is_media_entity_v4l2_subdev(remote->entity)) {
+ case ATOMISP_SUBDEV_PAD_SINK | MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN:
+ /* Read from the sensor CSI2-ports. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ isp_sd->input = ATOMISP_SUBDEV_INPUT_NONE;
+ break;
+ }
+
+ if (isp_sd->input != ATOMISP_SUBDEV_INPUT_NONE)
+ return -EBUSY;
+
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ if (remote->entity != &isp->csi2_port[i].subdev.entity)
+ continue;
+
+ isp_sd->input = ATOMISP_SUBDEV_INPUT_CSI2_PORT1 + i;
+ return 0;
+ }
+
+ return -EINVAL;
+
+ case ATOMISP_SUBDEV_PAD_SINK | MEDIA_ENT_F_OLD_BASE:
+ /* read from memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (isp_sd->input >= ATOMISP_SUBDEV_INPUT_CSI2_PORT1 &&
+ isp_sd->input < (ATOMISP_SUBDEV_INPUT_CSI2_PORT1
+ + ATOMISP_CAMERA_NR_PORTS))
+ return -EBUSY;
+ isp_sd->input = ATOMISP_SUBDEV_INPUT_MEMORY;
+ } else {
+ if (isp_sd->input == ATOMISP_SUBDEV_INPUT_MEMORY)
+ isp_sd->input = ATOMISP_SUBDEV_INPUT_NONE;
+ }
+ break;
+
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW | MEDIA_ENT_F_OLD_BASE:
+ /* always write to memory */
+ break;
+
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF | MEDIA_ENT_F_OLD_BASE:
+ /* always write to memory */
+ break;
+
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE | MEDIA_ENT_F_OLD_BASE:
+ /* always write to memory */
+ break;
+
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO | MEDIA_ENT_F_OLD_BASE:
+ /* always write to memory */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations isp_subdev_media_ops = {
+ .link_setup = isp_subdev_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+ /* .set_power = v4l2_subdev_set_power, */
+};
+
+static int __atomisp_update_run_mode(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct v4l2_ctrl *ctrl = asd->run_mode;
+ struct v4l2_ctrl *c;
+ s32 mode;
+
+ if (ctrl->val != ATOMISP_RUN_MODE_VIDEO &&
+ asd->continuous_mode->val)
+ mode = ATOMISP_RUN_MODE_PREVIEW;
+ else
+ mode = ctrl->val;
+
+ c = v4l2_ctrl_find(
+ isp->inputs[asd->input_curr].camera->ctrl_handler,
+ V4L2_CID_RUN_MODE);
+
+ if (c)
+ return v4l2_ctrl_s_ctrl(c, mode);
+
+ return 0;
+}
+
+int atomisp_update_run_mode(struct atomisp_sub_device *asd)
+{
+ int rval;
+
+ mutex_lock(asd->ctrl_handler.lock);
+ rval = __atomisp_update_run_mode(asd);
+ mutex_unlock(asd->ctrl_handler.lock);
+
+ return rval;
+}
+
+static int s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct atomisp_sub_device *asd = container_of(
+ ctrl->handler, struct atomisp_sub_device, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_RUN_MODE:
+ return __atomisp_update_run_mode(asd);
+ case V4L2_CID_DEPTH_MODE:
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_DISABLED) {
+ dev_err(asd->isp->dev,
+ "ISP is streaming, it is not supported to change the depth mode\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = &s_ctrl,
+};
+
+static const struct v4l2_ctrl_config ctrl_fmt_auto = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FMT_AUTO,
+ .name = "Automatic format guessing",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
+static const char *const ctrl_run_mode_menu[] = {
+ NULL,
+ "Video",
+ "Still capture",
+ "Continuous capture",
+ "Preview",
+};
+
+static const struct v4l2_ctrl_config ctrl_run_mode = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_RUN_MODE,
+ .name = "Atomisp run mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .def = 1,
+ .max = 4,
+ .qmenu = ctrl_run_mode_menu,
+};
+
+static const char *const ctrl_vfpp_mode_menu[] = {
+ "Enable", /* vfpp always enabled */
+ "Disable to scaler mode", /* CSS into video mode and disable */
+ "Disable to low latency mode", /* CSS into still mode and disable */
+};
+
+static const struct v4l2_ctrl_config ctrl_vfpp = {
+ .id = V4L2_CID_VFPP,
+ .name = "Atomisp vf postprocess",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .def = 0,
+ .max = 2,
+ .qmenu = ctrl_vfpp_mode_menu,
+};
+
+/*
+ * Control for ISP continuous mode
+ *
+ * When enabled, capture processing is possible without
+ * stopping the preview pipeline. When disabled, ISP needs
+ * to be restarted between preview and capture.
+ */
+static const struct v4l2_ctrl_config ctrl_continuous_mode = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_ATOMISP_CONTINUOUS_MODE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Continuous mode",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control for continuous mode raw buffer size
+ *
+ * The size of the RAW ringbuffer sets limit on how much
+ * back in time application can go when requesting capture
+ * frames to be rendered, and how many frames can be rendered
+ * in a burst at full sensor rate.
+ *
+ * Note: this setting has a big impact on memory consumption of
+ * the CSS subsystem.
+ */
+static const struct v4l2_ctrl_config ctrl_continuous_raw_buffer_size = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Continuous raw ringbuffer size",
+ .min = 1,
+ .max = 100, /* depends on CSS version, runtime checked */
+ .step = 1,
+ .def = 3,
+};
+
+/*
+ * Control for enabling continuous viewfinder
+ *
+ * When enabled, and ISP is in continuous mode (see ctrl_continuous_mode ),
+ * preview pipeline continues concurrently with capture
+ * processing. When disabled, and continuous mode is used,
+ * preview is paused while captures are processed, but
+ * full pipeline restart is not needed.
+ *
+ * By setting this to disabled, capture processing is
+ * essentially given priority over preview, and the effective
+ * capture output rate may be higher than with continuous
+ * viewfinder enabled.
+ */
+static const struct v4l2_ctrl_config ctrl_continuous_viewfinder = {
+ .id = V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Continuous viewfinder",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control for enabling Lock&Unlock Raw Buffer mechanism
+ *
+ * When enabled, Raw Buffer can be locked and unlocked.
+ * Application can hold the exp_id of Raw Buffer
+ * and unlock it when no longer needed.
+ * Note: Make sure set this configuration before creating stream.
+ */
+static const struct v4l2_ctrl_config ctrl_enable_raw_buffer_lock = {
+ .id = V4L2_CID_ENABLE_RAW_BUFFER_LOCK,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Lock Unlock Raw Buffer",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control to disable digital zoom of the whole stream
+ *
+ * When it is true, pipe configuration enable_dz will be set to false.
+ * This can help get a better performance by disabling pp binary.
+ *
+ * Note: Make sure set this configuration before creating stream.
+ */
+static const struct v4l2_ctrl_config ctrl_disable_dz = {
+ .id = V4L2_CID_DISABLE_DZ,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Disable digital zoom",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control for ISP depth mode
+ *
+ * When enabled, that means ISP will deal with dual streams and sensors will be
+ * in slave/master mode.
+ * slave sensor will have no output until master sensor is streamed on.
+ */
+static const struct v4l2_ctrl_config ctrl_depth_mode = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_DEPTH_MODE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Depth mode",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control for selectting ISP version
+ *
+ * When enabled, that means ISP version will be used ISP2.7. when disable, the
+ * isp will default to use ISP2.2.
+ * Note: Make sure set this configuration before creating stream.
+ */
+static const struct v4l2_ctrl_config ctrl_select_isp_version = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_ATOMISP_SELECT_ISP_VERSION,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Select Isp version",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+#if 0 /* #ifdef CONFIG_ION */
+/*
+ * Control for ISP ion device fd
+ *
+ * userspace will open ion device and pass the fd to kernel.
+ * this fd will be used to map shared fd to buffer.
+ */
+/* V4L2_CID_ATOMISP_ION_DEVICE_FD is not defined */
+static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Ion Device Fd",
+ .min = -1,
+ .max = 1024,
+ .step = 1,
+ .def = ION_FD_UNSET
+};
+#endif
+
+static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
+{
+ pipe->type = buf_type;
+ pipe->asd = asd;
+ pipe->isp = asd->isp;
+ spin_lock_init(&pipe->irq_lock);
+ INIT_LIST_HEAD(&pipe->activeq);
+ INIT_LIST_HEAD(&pipe->activeq_out);
+ INIT_LIST_HEAD(&pipe->buffers_waiting_for_param);
+ INIT_LIST_HEAD(&pipe->per_frame_params);
+ memset(pipe->frame_request_config_id,
+ 0, VIDEO_MAX_FRAME * sizeof(unsigned int));
+ memset(pipe->frame_params,
+ 0, VIDEO_MAX_FRAME *
+ sizeof(struct atomisp_css_params_with_list *));
+}
+
+static void atomisp_init_acc_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_acc_pipe *pipe)
+{
+ pipe->asd = asd;
+ pipe->isp = asd->isp;
+ INIT_LIST_HEAD(&asd->acc.fw);
+ INIT_LIST_HEAD(&asd->acc.memory_maps);
+ ida_init(&asd->acc.ida);
+}
+
+/*
+ * isp_subdev_init_entities - Initialize V4L2 subdev and media entity
+ * @asd: ISP CCDC module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
+{
+ struct v4l2_subdev *sd = &asd->subdev;
+ struct media_pad *pads = asd->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ asd->input = ATOMISP_SUBDEV_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &isp_subdev_v4l2_ops);
+ sprintf(sd->name, "ATOMISP_SUBDEV_%d", asd->index);
+ v4l2_set_subdevdata(sd, asd);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[ATOMISP_SUBDEV_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW].flags = MEDIA_PAD_FL_SOURCE;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE_VF].flags = MEDIA_PAD_FL_SOURCE;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
+
+ asd->fmt[ATOMISP_SUBDEV_PAD_SINK].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_VF].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_VIDEO].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ me->ops = &isp_subdev_media_ops;
+ me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ ret = media_entity_pads_init(me, ATOMISP_SUBDEV_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ atomisp_init_subdev_pipe(asd, &asd->video_in,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ atomisp_init_subdev_pipe(asd, &asd->video_out_preview,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ atomisp_init_subdev_pipe(asd, &asd->video_out_vf,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ atomisp_init_subdev_pipe(asd, &asd->video_out_capture,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ atomisp_init_subdev_pipe(asd, &asd->video_out_video_capture,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ atomisp_init_acc_pipe(asd, &asd->video_acc);
+
+ ret = atomisp_video_init(&asd->video_in, "MEMORY");
+ if (ret < 0)
+ return ret;
+
+ ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE");
+ if (ret < 0)
+ return ret;
+
+ ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER");
+ if (ret < 0)
+ return ret;
+
+ ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW");
+ if (ret < 0)
+ return ret;
+
+ ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO");
+ if (ret < 0)
+ return ret;
+
+ atomisp_acc_init(&asd->video_acc, "ACC");
+
+ ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, 1);
+ if (ret)
+ return ret;
+
+ asd->fmt_auto = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_fmt_auto, NULL);
+ asd->run_mode = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_run_mode, NULL);
+ asd->vfpp = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_vfpp, NULL);
+ asd->continuous_mode = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_continuous_mode, NULL);
+ asd->continuous_viewfinder = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_continuous_viewfinder,
+ NULL);
+ asd->continuous_raw_buffer_size =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_continuous_raw_buffer_size,
+ NULL);
+
+ asd->enable_raw_buffer_lock =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_enable_raw_buffer_lock,
+ NULL);
+ asd->depth_mode =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_depth_mode,
+ NULL);
+ asd->disable_dz =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_disable_dz,
+ NULL);
+ if (atomisp_hw_is_isp2401) {
+ asd->select_isp_version = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_select_isp_version,
+ NULL);
+#if 0 /* #ifdef CONFIG_ION */
+ asd->ion_dev_fd = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_ion_dev_fd,
+ NULL);
+#endif
+ }
+
+ /* Make controls visible on subdev as well. */
+ asd->subdev.ctrl_handler = &asd->ctrl_handler;
+ spin_lock_init(&asd->raw_buffer_bitmap_lock);
+ return asd->ctrl_handler.error;
+}
+
+int atomisp_create_pads_links(struct atomisp_device *isp)
+{
+ struct atomisp_sub_device *asd;
+ int i, j, ret = 0;
+
+ isp->num_of_streams = 2;
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ for (j = 0; j < isp->num_of_streams; j++) {
+ ret =
+ media_create_pad_link(&isp->csi2_port[i].subdev.
+ entity, CSI2_PAD_SOURCE,
+ &isp->asd[j].subdev.entity,
+ ATOMISP_SUBDEV_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ for (i = 0; i < isp->input_cnt - 2; i++) {
+ ret = media_create_pad_link(&isp->inputs[i].camera->entity, 0,
+ &isp->csi2_port[isp->inputs[i].
+ port].subdev.entity,
+ CSI2_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ ret = media_create_pad_link(&asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW,
+ &asd->video_out_preview.vdev.entity,
+ 0, 0);
+ if (ret < 0)
+ return ret;
+ ret = media_create_pad_link(&asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SOURCE_VF,
+ &asd->video_out_vf.vdev.entity, 0,
+ 0);
+ if (ret < 0)
+ return ret;
+ ret = media_create_pad_link(&asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE,
+ &asd->video_out_capture.vdev.entity,
+ 0, 0);
+ if (ret < 0)
+ return ret;
+ ret = media_create_pad_link(&asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SOURCE_VIDEO,
+ &asd->video_out_video_capture.vdev.
+ entity, 0, 0);
+ if (ret < 0)
+ return ret;
+ /*
+ * file input only supported on subdev0
+ * so do not create pad link for subdevs other then subdev0
+ */
+ if (asd->index)
+ return 0;
+ ret = media_create_pad_link(&asd->video_in.vdev.entity,
+ 0, &asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void atomisp_subdev_cleanup_entities(struct atomisp_sub_device *asd)
+{
+ v4l2_ctrl_handler_free(&asd->ctrl_handler);
+
+ media_entity_cleanup(&asd->subdev.entity);
+}
+
+void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd)
+{
+ struct v4l2_fh *fh, *fh_tmp;
+ struct v4l2_event event;
+ unsigned int i, pending_event;
+
+ list_for_each_entry_safe(fh, fh_tmp,
+ &asd->subdev.devnode->fh_list, list) {
+ pending_event = v4l2_event_pending(fh);
+ for (i = 0; i < pending_event; i++)
+ v4l2_event_dequeue(fh, &event, 1);
+ }
+}
+
+void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd)
+{
+ atomisp_subdev_cleanup_entities(asd);
+ v4l2_device_unregister_subdev(&asd->subdev);
+ atomisp_video_unregister(&asd->video_in);
+ atomisp_video_unregister(&asd->video_out_preview);
+ atomisp_video_unregister(&asd->video_out_vf);
+ atomisp_video_unregister(&asd->video_out_capture);
+ atomisp_video_unregister(&asd->video_out_video_capture);
+ atomisp_acc_unregister(&asd->video_acc);
+}
+
+int atomisp_subdev_register_entities(struct atomisp_sub_device *asd,
+ struct v4l2_device *vdev)
+{
+ int ret;
+ u32 device_caps;
+
+ /*
+ * FIXME: check if all device caps are properly initialized.
+ * Should any of those use V4L2_CAP_META_OUTPUT? Probably yes.
+ */
+
+ device_caps = V4L2_CAP_IO_MC |
+ V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING;
+
+ /* Register the subdev and video node. */
+
+ ret = v4l2_device_register_subdev(vdev, &asd->subdev);
+ if (ret < 0)
+ goto error;
+
+ asd->video_out_capture.vdev.v4l2_dev = vdev;
+ asd->video_out_capture.vdev.device_caps = device_caps |
+ V4L2_CAP_VIDEO_OUTPUT;
+ ret = video_register_device(&asd->video_out_capture.vdev,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0)
+ goto error;
+
+ asd->video_out_vf.vdev.v4l2_dev = vdev;
+ asd->video_out_vf.vdev.device_caps = device_caps |
+ V4L2_CAP_VIDEO_OUTPUT;
+ ret = video_register_device(&asd->video_out_vf.vdev,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0)
+ goto error;
+ asd->video_out_preview.vdev.v4l2_dev = vdev;
+ asd->video_out_preview.vdev.device_caps = device_caps |
+ V4L2_CAP_VIDEO_OUTPUT;
+ ret = video_register_device(&asd->video_out_preview.vdev,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0)
+ goto error;
+ asd->video_out_video_capture.vdev.v4l2_dev = vdev;
+ asd->video_out_video_capture.vdev.device_caps = device_caps |
+ V4L2_CAP_VIDEO_OUTPUT;
+ ret = video_register_device(&asd->video_out_video_capture.vdev,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0)
+ goto error;
+ asd->video_acc.vdev.v4l2_dev = vdev;
+ asd->video_acc.vdev.device_caps = device_caps |
+ V4L2_CAP_VIDEO_OUTPUT;
+ ret = video_register_device(&asd->video_acc.vdev,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * file input only supported on subdev0
+ * so do not create video node for subdevs other then subdev0
+ */
+ if (asd->index)
+ return 0;
+
+ asd->video_in.vdev.v4l2_dev = vdev;
+ asd->video_in.vdev.device_caps = device_caps |
+ V4L2_CAP_VIDEO_CAPTURE;
+ ret = video_register_device(&asd->video_in.vdev,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ atomisp_subdev_unregister_entities(asd);
+ return ret;
+}
+
+/*
+ * atomisp_subdev_init - ISP Subdevice initialization.
+ * @dev: Device pointer specific to the ATOM ISP.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int atomisp_subdev_init(struct atomisp_device *isp)
+{
+ struct atomisp_sub_device *asd;
+ int i, ret = 0;
+
+ /*
+ * CSS2.0 running ISP2400 support
+ * multiple streams
+ */
+ isp->num_of_streams = 2;
+ isp->asd = devm_kzalloc(isp->dev, sizeof(struct atomisp_sub_device) *
+ isp->num_of_streams, GFP_KERNEL);
+ if (!isp->asd)
+ return -ENOMEM;
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ spin_lock_init(&asd->lock);
+ asd->isp = isp;
+ isp_subdev_init_params(asd);
+ asd->index = i;
+ ret = isp_subdev_init_entities(asd);
+ if (ret < 0) {
+ atomisp_subdev_cleanup_entities(asd);
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
new file mode 100644
index 000000000000..b0d561224beb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
@@ -0,0 +1,466 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef __ATOMISP_SUBDEV_H__
+#define __ATOMISP_SUBDEV_H__
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf-core.h>
+
+#include "atomisp_common.h"
+#include "atomisp_compat.h"
+#include "atomisp_v4l2.h"
+
+#include "ia_css.h"
+
+/* EXP_ID's ranger is 1 ~ 250 */
+#define ATOMISP_MAX_EXP_ID (250)
+enum atomisp_subdev_input_entity {
+ ATOMISP_SUBDEV_INPUT_NONE,
+ ATOMISP_SUBDEV_INPUT_MEMORY,
+ ATOMISP_SUBDEV_INPUT_CSI2,
+ /*
+ * The following enum for CSI2 port must go together in one row.
+ * Otherwise it breaks the code logic.
+ */
+ ATOMISP_SUBDEV_INPUT_CSI2_PORT1,
+ ATOMISP_SUBDEV_INPUT_CSI2_PORT2,
+ ATOMISP_SUBDEV_INPUT_CSI2_PORT3,
+};
+
+#define ATOMISP_SUBDEV_PAD_SINK 0
+/* capture output for still frames */
+#define ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE 1
+/* viewfinder output for downscaled capture output */
+#define ATOMISP_SUBDEV_PAD_SOURCE_VF 2
+/* preview output for display */
+#define ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW 3
+/* main output for video pipeline */
+#define ATOMISP_SUBDEV_PAD_SOURCE_VIDEO 4
+#define ATOMISP_SUBDEV_PADS_NUM 5
+
+struct atomisp_in_fmt_conv {
+ u32 code;
+ u8 bpp; /* bits per pixel */
+ u8 depth; /* uncompressed */
+ enum atomisp_input_format atomisp_in_fmt;
+ enum atomisp_css_bayer_order bayer_order;
+ enum atomisp_input_format css_stream_fmt;
+};
+
+struct atomisp_sub_device;
+
+struct atomisp_video_pipe {
+ struct video_device vdev;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+ struct videobuf_queue capq;
+ struct videobuf_queue outq;
+ struct list_head activeq;
+ struct list_head activeq_out;
+ /*
+ * the buffers waiting for per-frame parameters, this is only valid
+ * in per-frame setting mode.
+ */
+ struct list_head buffers_waiting_for_param;
+ /* the link list to store per_frame parameters */
+ struct list_head per_frame_params;
+
+ unsigned int buffers_in_css;
+
+ /* irq_lock is used to protect video buffer state change operations and
+ * also to make activeq, activeq_out, capq and outq list
+ * operations atomic. */
+ spinlock_t irq_lock;
+ unsigned int users;
+
+ struct atomisp_device *isp;
+ struct v4l2_pix_format pix;
+ u32 sh_fmt;
+
+ struct atomisp_sub_device *asd;
+
+ /*
+ * This frame_config_id is got from CSS when dequueues buffers from CSS,
+ * it is used to indicate which parameter it has applied.
+ */
+ unsigned int frame_config_id[VIDEO_MAX_FRAME];
+ /*
+ * This config id is set when camera HAL enqueues buffer, it has a
+ * non-zero value to indicate which parameter it needs to applu
+ */
+ unsigned int frame_request_config_id[VIDEO_MAX_FRAME];
+ struct atomisp_css_params_with_list *frame_params[VIDEO_MAX_FRAME];
+
+ /*
+ * move wdt from asd struct to create wdt for each pipe
+ */
+ /* ISP2401 */
+ struct timer_list wdt;
+ unsigned int wdt_duration; /* in jiffies */
+ unsigned long wdt_expires;
+ atomic_t wdt_count;
+};
+
+struct atomisp_acc_pipe {
+ struct video_device vdev;
+ unsigned int users;
+ bool running;
+ struct atomisp_sub_device *asd;
+ struct atomisp_device *isp;
+};
+
+struct atomisp_pad_format {
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+};
+
+/* Internal states for flash process */
+enum atomisp_flash_state {
+ ATOMISP_FLASH_IDLE,
+ ATOMISP_FLASH_REQUESTED,
+ ATOMISP_FLASH_ONGOING,
+ ATOMISP_FLASH_DONE
+};
+
+/*
+ * This structure is used to cache the CSS parameters, it aligns to
+ * struct ia_css_isp_config but without un-supported and deprecated parts.
+ */
+struct atomisp_css_params {
+ struct ia_css_wb_config wb_config;
+ struct ia_css_cc_config cc_config;
+ struct ia_css_tnr_config tnr_config;
+ struct ia_css_ecd_config ecd_config;
+ struct ia_css_ynr_config ynr_config;
+ struct ia_css_fc_config fc_config;
+ struct ia_css_formats_config formats_config;
+ struct ia_css_cnr_config cnr_config;
+ struct ia_css_macc_config macc_config;
+ struct ia_css_ctc_config ctc_config;
+ struct ia_css_aa_config aa_config;
+ struct ia_css_aa_config baa_config;
+ struct ia_css_ce_config ce_config;
+ struct ia_css_ob_config ob_config;
+ struct ia_css_dp_config dp_config;
+ struct ia_css_de_config de_config;
+ struct ia_css_gc_config gc_config;
+ struct ia_css_nr_config nr_config;
+ struct ia_css_ee_config ee_config;
+ struct ia_css_anr_config anr_config;
+ struct ia_css_3a_config s3a_config;
+ struct ia_css_xnr_config xnr_config;
+ struct ia_css_dz_config dz_config;
+ struct ia_css_cc_config yuv2rgb_cc_config;
+ struct ia_css_cc_config rgb2yuv_cc_config;
+ struct ia_css_macc_table macc_table;
+ struct ia_css_gamma_table gamma_table;
+ struct ia_css_ctc_table ctc_table;
+
+ struct ia_css_xnr_table xnr_table;
+ struct ia_css_rgb_gamma_table r_gamma_table;
+ struct ia_css_rgb_gamma_table g_gamma_table;
+ struct ia_css_rgb_gamma_table b_gamma_table;
+
+ struct ia_css_vector motion_vector;
+ struct ia_css_anr_thres anr_thres;
+
+ struct ia_css_dvs_6axis_config *dvs_6axis;
+ struct ia_css_dvs2_coefficients *dvs2_coeff;
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_morph_table *morph_table;
+
+ /*
+ * Used to store the user pointer address of the frame. driver needs to
+ * translate to ia_css_frame * and then set to CSS.
+ */
+ void *output_frame;
+ u32 isp_config_id;
+
+ /* Indicates which parameters need to be updated. */
+ struct atomisp_parameters update_flag;
+};
+
+struct atomisp_subdev_params {
+ /* FIXME: Determines whether raw capture buffer are being passed to
+ * user space. Unimplemented for now. */
+ int online_process;
+ int yuv_ds_en;
+ unsigned int color_effect;
+ bool gdc_cac_en;
+ bool macc_en;
+ bool bad_pixel_en;
+ bool video_dis_en;
+ bool sc_en;
+ bool fpn_en;
+ bool xnr_en;
+ bool low_light;
+ int false_color;
+ unsigned int histogram_elenum;
+
+ /* Current grid info */
+ struct atomisp_css_grid_info curr_grid_info;
+ enum atomisp_css_pipe_id s3a_enabled_pipe;
+
+ int s3a_output_bytes;
+
+ bool dis_proj_data_valid;
+
+ struct ia_css_dz_config dz_config; /** Digital Zoom */
+ struct ia_css_capture_config capture_config;
+
+ struct atomisp_css_isp_config config;
+
+ /* current configurations */
+ struct atomisp_css_params css_param;
+
+ /*
+ * Intermediate buffers used to communicate data between
+ * CSS and user space.
+ */
+ struct ia_css_3a_statistics *s3a_user_stat;
+
+ void *metadata_user[ATOMISP_METADATA_TYPE_NUM];
+ u32 metadata_width_size;
+
+ struct ia_css_dvs2_statistics *dvs_stat;
+ struct atomisp_css_dvs_6axis *dvs_6axis;
+ u32 exp_id;
+ int dvs_hor_coef_bytes;
+ int dvs_ver_coef_bytes;
+ int dvs_ver_proj_bytes;
+ int dvs_hor_proj_bytes;
+
+ /* Flash */
+ int num_flash_frames;
+ enum atomisp_flash_state flash_state;
+ enum atomisp_frame_status last_frame_status;
+
+ /* continuous capture */
+ struct atomisp_cont_capture_conf offline_parm;
+ /* Flag to check if driver needs to update params to css */
+ bool css_update_params_needed;
+};
+
+struct atomisp_css_params_with_list {
+ /* parameters for CSS */
+ struct atomisp_css_params params;
+ struct list_head list;
+};
+
+struct atomisp_acc_fw {
+ struct atomisp_css_fw_info *fw;
+ unsigned int handle;
+ unsigned int flags;
+ unsigned int type;
+ struct {
+ size_t length;
+ unsigned long css_ptr;
+ } args[ATOMISP_ACC_NR_MEMORY];
+ struct list_head list;
+};
+
+struct atomisp_map {
+ ia_css_ptr ptr;
+ size_t length;
+ struct list_head list;
+ /* FIXME: should keep book which maps are currently used
+ * by binaries and not allow releasing those
+ * which are in use. Implement by reference counting.
+ */
+};
+
+struct atomisp_sub_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[ATOMISP_SUBDEV_PADS_NUM];
+ struct atomisp_pad_format fmt[ATOMISP_SUBDEV_PADS_NUM];
+ u16 capture_pad; /* main capture pad; defines much of isp config */
+
+ enum atomisp_subdev_input_entity input;
+ unsigned int output;
+ struct atomisp_video_pipe video_in;
+ struct atomisp_video_pipe video_out_capture; /* capture output */
+ struct atomisp_video_pipe video_out_vf; /* viewfinder output */
+ struct atomisp_video_pipe video_out_preview; /* preview output */
+ struct atomisp_acc_pipe video_acc;
+ /* video pipe main output */
+ struct atomisp_video_pipe video_out_video_capture;
+ /* struct isp_subdev_params params; */
+ spinlock_t lock;
+ struct atomisp_device *isp;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *fmt_auto;
+ struct v4l2_ctrl *run_mode;
+ struct v4l2_ctrl *depth_mode;
+ struct v4l2_ctrl *vfpp;
+ struct v4l2_ctrl *continuous_mode;
+ struct v4l2_ctrl *continuous_raw_buffer_size;
+ struct v4l2_ctrl *continuous_viewfinder;
+ struct v4l2_ctrl *enable_raw_buffer_lock;
+
+ /* ISP2401 */
+ struct v4l2_ctrl *ion_dev_fd;
+ struct v4l2_ctrl *select_isp_version;
+
+ struct v4l2_ctrl *disable_dz;
+
+ struct {
+ struct list_head fw;
+ struct list_head memory_maps;
+ struct atomisp_css_pipeline *pipeline;
+ bool extension_mode;
+ struct ida ida;
+ struct completion acc_done;
+ void *acc_stages;
+ } acc;
+
+ struct atomisp_subdev_params params;
+
+ struct atomisp_stream_env stream_env[ATOMISP_INPUT_STREAM_NUM];
+
+ struct v4l2_pix_format dvs_envelop;
+ unsigned int s3a_bufs_in_css[CSS_PIPE_ID_NUM];
+ unsigned int dis_bufs_in_css;
+
+ unsigned int metadata_bufs_in_css
+ [ATOMISP_INPUT_STREAM_NUM][CSS_PIPE_ID_NUM];
+ /* The list of free and available metadata buffers for CSS */
+ struct list_head metadata[ATOMISP_METADATA_TYPE_NUM];
+ /* The list of metadata buffers which have been en-queued to CSS */
+ struct list_head metadata_in_css[ATOMISP_METADATA_TYPE_NUM];
+ /* The list of metadata buffers which are ready for userspace to get */
+ struct list_head metadata_ready[ATOMISP_METADATA_TYPE_NUM];
+
+ /* The list of free and available s3a stat buffers for CSS */
+ struct list_head s3a_stats;
+ /* The list of s3a stat buffers which have been en-queued to CSS */
+ struct list_head s3a_stats_in_css;
+ /* The list of s3a stat buffers which are ready for userspace to get */
+ struct list_head s3a_stats_ready;
+
+ struct list_head dis_stats;
+ struct list_head dis_stats_in_css;
+ spinlock_t dis_stats_lock;
+
+ struct atomisp_css_frame *vf_frame; /* TODO: needed? */
+ struct atomisp_css_frame *raw_output_frame;
+ enum atomisp_frame_status frame_status[VIDEO_MAX_FRAME];
+
+ /* This field specifies which camera (v4l2 input) is selected. */
+ int input_curr;
+ /* This field specifies which sensor is being selected when there
+ are multiple sensors connected to the same MIPI port. */
+ int sensor_curr;
+
+ atomic_t sof_count;
+ atomic_t sequence; /* Sequence value that is assigned to buffer. */
+ atomic_t sequence_temp;
+
+ unsigned int streaming; /* Hold both mutex and lock to change this */
+ bool stream_prepared; /* whether css stream is created */
+
+ /* subdev index: will be used to show which subdev is holding the
+ * resource, like which camera is used by which subdev
+ */
+ unsigned int index;
+
+ /* delayed memory allocation for css */
+ struct completion init_done;
+ struct workqueue_struct *delayed_init_workq;
+ unsigned int delayed_init;
+ struct work_struct delayed_init_work;
+
+ unsigned int latest_preview_exp_id; /* CSS ZSL/SDV raw buffer id */
+
+ unsigned int mipi_frame_size;
+
+ bool copy_mode; /* CSI2+ use copy mode */
+ bool yuvpp_mode; /* CSI2+ yuvpp pipe */
+
+ int raw_buffer_bitmap[ATOMISP_MAX_EXP_ID / 32 +
+ 1]; /* Record each Raw Buffer lock status */
+ int raw_buffer_locked_count;
+ spinlock_t raw_buffer_bitmap_lock;
+
+ /* ISP 2400 */
+ struct timer_list wdt;
+ unsigned int wdt_duration; /* in jiffies */
+ unsigned long wdt_expires;
+
+ /* ISP2401 */
+ bool re_trigger_capture;
+
+ struct atomisp_resolution sensor_array_res;
+ bool high_speed_mode; /* Indicate whether now is a high speed mode */
+ int pending_capture_request; /* Indicates the number of pending capture requests. */
+
+ unsigned int preview_exp_id;
+ unsigned int postview_exp_id;
+};
+
+extern const struct atomisp_in_fmt_conv atomisp_in_fmt_conv[];
+
+u32 atomisp_subdev_uncompressed_code(u32 code);
+bool atomisp_subdev_is_compressed(u32 code);
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv(u32 code);
+
+/* ISP2400 */
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ enum atomisp_input_format atomisp_in_fmt);
+
+/* ISP2401 */
+const struct atomisp_in_fmt_conv
+*atomisp_find_in_fmt_conv_by_atomisp_in_fmt(enum atomisp_input_format
+ atomisp_in_fmt);
+
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_compressed(u32 code);
+bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd,
+ unsigned int source_pad);
+uint16_t atomisp_subdev_source_pad(struct video_device *vdev);
+
+/* Get pointer to appropriate format */
+struct v4l2_mbus_framefmt
+*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ uint32_t pad);
+struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which, uint32_t pad,
+ uint32_t target);
+int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which, uint32_t pad, uint32_t target,
+ u32 flags, struct v4l2_rect *r);
+/* Actually set the format */
+void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ u32 pad, struct v4l2_mbus_framefmt *ffmt);
+
+int atomisp_update_run_mode(struct atomisp_sub_device *asd);
+
+void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd);
+
+void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd);
+int atomisp_subdev_register_entities(struct atomisp_sub_device *asd,
+ struct v4l2_device *vdev);
+int atomisp_subdev_init(struct atomisp_device *isp);
+void atomisp_subdev_cleanup(struct atomisp_device *isp);
+int atomisp_create_pads_links(struct atomisp_device *isp);
+
+#endif /* __ATOMISP_SUBDEV_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_tables.h b/drivers/staging/media/atomisp/pci/atomisp_tables.h
new file mode 100644
index 000000000000..22eac8a25dba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_tables.h
@@ -0,0 +1,187 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef __ATOMISP_TABLES_H__
+#define __ATOMISP_TABLES_H__
+
+#include "sh_css_params.h"
+
+/*Sepia image effect table*/
+static struct atomisp_css_cc_config sepia_cc_config = {
+ .fraction_bits = 8,
+ .matrix = {141, 18, 68, -40, -5, -19, 35, 4, 16},
+};
+
+/*Negative image effect table*/
+static struct atomisp_css_cc_config nega_cc_config = {
+ .fraction_bits = 8,
+ .matrix = {255, 29, 120, 0, 374, 342, 0, 672, -301},
+};
+
+/*Mono image effect table*/
+static struct atomisp_css_cc_config mono_cc_config = {
+ .fraction_bits = 8,
+ .matrix = {255, 29, 120, 0, 0, 0, 0, 0, 0},
+};
+
+/*Skin whiten image effect table*/
+static struct atomisp_css_macc_table skin_low_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 7168, 0, 2048, 8192,
+ 5120, -1024, 2048, 8192,
+ 8192, 2048, -1024, 5120,
+ 8192, 2048, 0, 7168,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+static struct atomisp_css_macc_table skin_medium_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 5120, 0, 6144, 8192,
+ 3072, -1024, 2048, 6144,
+ 6144, 2048, -1024, 3072,
+ 8192, 6144, 0, 5120,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+static struct atomisp_css_macc_table skin_high_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 4096, 0, 8192, 8192,
+ 0, -2048, 4096, 6144,
+ 6144, 4096, -2048, 0,
+ 8192, 8192, 0, 4096,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+/*Blue enhencement image effect table*/
+static struct atomisp_css_macc_table blue_macc_table = {
+ .data = {
+ 9728, -3072, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 9728, 0, -3072, 8192,
+ 12800, 1536, -3072, 8192,
+ 11264, 0, 0, 11264,
+ 9728, -3072, 0, 11264
+ }
+};
+
+/*Green enhencement image effect table*/
+static struct atomisp_css_macc_table green_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 10240, 4096, 0, 8192,
+ 10240, 4096, 0, 12288,
+ 12288, 0, 0, 12288,
+ 14336, -2048, 4096, 8192,
+ 10240, 0, 4096, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+static struct atomisp_css_ctc_table vivid_ctc_table = {
+ .data.vamem_2 = {
+ 0, 384, 837, 957, 1011, 1062, 1083, 1080,
+ 1078, 1077, 1053, 1039, 1012, 992, 969, 951,
+ 929, 906, 886, 866, 845, 823, 809, 790,
+ 772, 758, 741, 726, 711, 701, 688, 675,
+ 666, 656, 648, 639, 633, 626, 618, 612,
+ 603, 594, 582, 572, 557, 545, 529, 516,
+ 504, 491, 480, 467, 459, 447, 438, 429,
+ 419, 412, 404, 397, 389, 382, 376, 368,
+ 363, 357, 351, 345, 340, 336, 330, 326,
+ 321, 318, 312, 308, 304, 300, 297, 294,
+ 291, 286, 284, 281, 278, 275, 271, 268,
+ 261, 257, 251, 245, 240, 235, 232, 225,
+ 223, 218, 213, 209, 206, 204, 199, 197,
+ 193, 189, 186, 185, 183, 179, 177, 175,
+ 172, 170, 169, 167, 164, 164, 162, 160,
+ 158, 157, 156, 154, 154, 152, 151, 150,
+ 149, 148, 146, 147, 146, 144, 143, 143,
+ 142, 141, 140, 141, 139, 138, 138, 138,
+ 137, 136, 136, 135, 134, 134, 134, 133,
+ 132, 132, 131, 130, 131, 130, 129, 128,
+ 129, 127, 127, 127, 127, 125, 125, 125,
+ 123, 123, 122, 120, 118, 115, 114, 111,
+ 110, 108, 106, 105, 103, 102, 100, 99,
+ 97, 97, 96, 95, 94, 93, 93, 91,
+ 91, 91, 90, 90, 89, 89, 88, 88,
+ 89, 88, 88, 87, 87, 87, 87, 86,
+ 87, 87, 86, 87, 86, 86, 84, 84,
+ 82, 80, 78, 76, 74, 72, 70, 68,
+ 67, 65, 62, 60, 58, 56, 55, 54,
+ 53, 51, 49, 49, 47, 45, 45, 45,
+ 41, 40, 39, 39, 34, 33, 34, 32,
+ 25, 23, 24, 20, 13, 9, 12, 0,
+ 0
+ }
+};
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp_tpg.c
new file mode 100644
index 000000000000..97176b54d1ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_tpg.c
@@ -0,0 +1,163 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include "atomisp_internal.h"
+#include "atomisp_tpg.h"
+
+static int tpg_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+static int tpg_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+
+ if (format->pad)
+ return -EINVAL;
+ /* only raw8 grbg is supported by TPG */
+ fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ return 0;
+ }
+ return 0;
+}
+
+static int tpg_log_status(struct v4l2_subdev *sd)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_s_power(struct v4l2_subdev *sd, int on)
+{
+ return 0;
+}
+
+static int tpg_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_enum_frame_ival(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ /*to fake*/
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops tpg_video_ops = {
+ .s_stream = tpg_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops tpg_core_ops = {
+ .log_status = tpg_log_status,
+ .s_power = tpg_s_power,
+};
+
+static const struct v4l2_subdev_pad_ops tpg_pad_ops = {
+ .enum_mbus_code = tpg_enum_mbus_code,
+ .enum_frame_size = tpg_enum_frame_size,
+ .enum_frame_interval = tpg_enum_frame_ival,
+ .get_fmt = tpg_get_fmt,
+ .set_fmt = tpg_set_fmt,
+};
+
+static const struct v4l2_subdev_ops tpg_ops = {
+ .core = &tpg_core_ops,
+ .video = &tpg_video_ops,
+ .pad = &tpg_pad_ops,
+};
+
+void atomisp_tpg_unregister_entities(struct atomisp_tpg_device *tpg)
+{
+ media_entity_cleanup(&tpg->sd.entity);
+ v4l2_device_unregister_subdev(&tpg->sd);
+}
+
+int atomisp_tpg_register_entities(struct atomisp_tpg_device *tpg,
+ struct v4l2_device *vdev)
+{
+ int ret;
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &tpg->sd);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ atomisp_tpg_unregister_entities(tpg);
+ return ret;
+}
+
+void atomisp_tpg_cleanup(struct atomisp_device *isp)
+{
+}
+
+int atomisp_tpg_init(struct atomisp_device *isp)
+{
+ struct atomisp_tpg_device *tpg = &isp->tpg;
+ struct v4l2_subdev *sd = &tpg->sd;
+ struct media_pad *pads = tpg->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ tpg->isp = isp;
+ v4l2_subdev_init(sd, &tpg_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strcpy(sd->name, "tpg_subdev");
+ v4l2_set_subdevdata(sd, tpg);
+
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+
+ ret = media_entity_pads_init(me, 1, pads);
+ if (ret < 0)
+ goto fail;
+ return 0;
+fail:
+ atomisp_tpg_cleanup(isp);
+ return ret;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_tpg.h b/drivers/staging/media/atomisp/pci/atomisp_tpg.h
new file mode 100644
index 000000000000..cf492d757773
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_tpg.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_TPG_H__
+#define __ATOMISP_TPG_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+struct atomisp_tpg_device {
+ struct v4l2_subdev sd;
+ struct atomisp_device *isp;
+ struct media_pad pads[1];
+};
+
+void atomisp_tpg_cleanup(struct atomisp_device *isp);
+int atomisp_tpg_init(struct atomisp_device *isp);
+void atomisp_tpg_unregister_entities(struct atomisp_tpg_device *tpg);
+int atomisp_tpg_register_entities(struct atomisp_tpg_device *tpg,
+ struct v4l2_device *vdev);
+
+#endif /* __ATOMISP_TPG_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_trace_event.h b/drivers/staging/media/atomisp/pci/atomisp_trace_event.h
new file mode 100644
index 000000000000..4d7a6794ee66
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_trace_event.h
@@ -0,0 +1,127 @@
+/*
+ * Support Camera Imaging tracer core.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM atomisp
+
+#if !defined(ATOMISP_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define ATOMISP_TRACE_EVENT_H
+
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+TRACE_EVENT(camera_meminfo,
+
+ TP_PROTO(const char *name, int uptr_size, int counter, int sys_size,
+ int sys_res_size, int cam_sys_use, int cam_dyc_use,
+ int cam_res_use),
+
+ TP_ARGS(name, uptr_size, counter, sys_size, sys_res_size, cam_sys_use,
+ cam_dyc_use, cam_res_use),
+
+ TP_STRUCT__entry(
+ __array(char, name, 24)
+ __field(int, uptr_size)
+ __field(int, counter)
+ __field(int, sys_size)
+ __field(int, sys_res_size)
+ __field(int, cam_res_use)
+ __field(int, cam_dyc_use)
+ __field(int, cam_sys_use)
+ ),
+
+ TP_fast_assign(
+ strlcpy(__entry->name, name, 24);
+ __entry->uptr_size = uptr_size;
+ __entry->counter = counter;
+ __entry->sys_size = sys_size;
+ __entry->sys_res_size = sys_res_size;
+ __entry->cam_res_use = cam_res_use;
+ __entry->cam_dyc_use = cam_dyc_use;
+ __entry->cam_sys_use = cam_sys_use;
+ ),
+
+ TP_printk(
+ "<%s> User ptr memory:%d pages,\tISP private memory used:%d pages:\tsysFP system size:%d,\treserved size:%d\tcamFP sysUse:%d,\tdycUse:%d,\tresUse:%d.\n",
+ __entry->name, __entry->uptr_size, __entry->counter,
+ __entry->sys_size, __entry->sys_res_size, __entry->cam_sys_use,
+ __entry->cam_dyc_use, __entry->cam_res_use)
+ );
+
+TRACE_EVENT(camera_debug,
+
+ TP_PROTO(const char *name, char *info, const int line),
+
+ TP_ARGS(name, info, line),
+
+ TP_STRUCT__entry(
+ __array(char, name, 24)
+ __array(char, info, 24)
+ __field(int, line)
+ ),
+
+ TP_fast_assign(
+ strlcpy(__entry->name, name, 24);
+ strlcpy(__entry->info, info, 24);
+ __entry->line = line;
+ ),
+
+ TP_printk("<%s>-<%d> %s\n", __entry->name, __entry->line,
+ __entry->info)
+ );
+
+TRACE_EVENT(ipu_cstate,
+
+ TP_PROTO(int cstate),
+
+ TP_ARGS(cstate),
+
+ TP_STRUCT__entry(
+ __field(int, cstate)
+ ),
+
+ TP_fast_assign(
+ __entry->cstate = cstate;
+ ),
+
+ TP_printk("cstate=%d", __entry->cstate)
+ );
+
+TRACE_EVENT(ipu_pstate,
+
+ TP_PROTO(int freq, int util),
+
+ TP_ARGS(freq, util),
+
+ TP_STRUCT__entry(
+ __field(int, freq)
+ __field(int, util)
+ ),
+
+ TP_fast_assign(
+ __entry->freq = freq;
+ __entry->util = util;
+ ),
+
+ TP_printk("freq=%d util=%d", __entry->freq, __entry->util)
+ );
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE atomisp_trace_event
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
new file mode 100644
index 000000000000..98cff13d9f93
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -0,0 +1,1997 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/interrupt.h>
+
+#include <asm/iosf_mbi.h>
+
+#include "../../include/linux/atomisp_gmin_platform.h"
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_file.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_internal.h"
+#include "atomisp_acc.h"
+#include "atomisp-regs.h"
+#include "atomisp_dfs_tables.h"
+#include "atomisp_drvfs.h"
+#include "hmm/hmm.h"
+#include "atomisp_trace_event.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+#include "device_access.h"
+
+/* Timeouts to wait for all subdevs to be registered */
+#define SUBDEV_WAIT_TIMEOUT 50 /* ms */
+#define SUBDEV_WAIT_TIMEOUT_MAX_COUNT 40 /* up to 2 seconds */
+
+/* G-Min addition: pull this in from intel_mid_pm.h */
+#define CSTATE_EXIT_LATENCY_C1 1
+
+static uint skip_fwload;
+module_param(skip_fwload, uint, 0644);
+MODULE_PARM_DESC(skip_fwload, "Skip atomisp firmware load");
+
+/* set reserved memory pool size in page */
+static unsigned int repool_pgnr;
+module_param(repool_pgnr, uint, 0644);
+MODULE_PARM_DESC(repool_pgnr,
+ "Set the reserved memory pool size in page (default:0)");
+
+/* set dynamic memory pool size in page */
+unsigned int dypool_pgnr = UINT_MAX;
+module_param(dypool_pgnr, uint, 0644);
+MODULE_PARM_DESC(dypool_pgnr,
+ "Set the dynamic memory pool size in page (default:0)");
+
+bool dypool_enable;
+module_param(dypool_enable, bool, 0644);
+MODULE_PARM_DESC(dypool_enable,
+ "dynamic memory pool enable/disable (default:disable)");
+
+/* memory optimization: deferred firmware loading */
+bool defer_fw_load;
+module_param(defer_fw_load, bool, 0644);
+MODULE_PARM_DESC(defer_fw_load,
+ "Defer FW loading until device is opened (default:disable)");
+
+/* cross componnet debug message flag */
+int dbg_level;
+module_param(dbg_level, int, 0644);
+MODULE_PARM_DESC(dbg_level, "debug message on/off (default:off)");
+
+/* log function switch */
+int dbg_func = 2;
+module_param(dbg_func, int, 0644);
+MODULE_PARM_DESC(dbg_func,
+ "log function switch non/trace_printk/printk (default:printk)");
+
+int mipicsi_flag;
+module_param(mipicsi_flag, int, 0644);
+MODULE_PARM_DESC(mipicsi_flag, "mipi csi compression predictor algorithm");
+
+/*set to 16x16 since this is the amount of lines and pixels the sensor
+exports extra. If these are kept at the 10x8 that they were on, in yuv
+downscaling modes incorrect resolutions where requested to the sensor
+driver with strange outcomes as a result. The proper way tot do this
+would be to have a list of tables the specify the sensor res, mipi rec,
+output res, and isp output res. however since we do not have this yet,
+the chosen solution is the next best thing. */
+int pad_w = 16;
+module_param(pad_w, int, 0644);
+MODULE_PARM_DESC(pad_w, "extra data for ISP processing");
+
+int pad_h = 16;
+module_param(pad_h, int, 0644);
+MODULE_PARM_DESC(pad_h, "extra data for ISP processing");
+
+/*
+ * FIXME: this is a hack to make easier to support ISP2401 variant.
+ * As a given system will either be ISP2401 or not, we can just use
+ * a boolean, in order to replace existing #ifdef ISP2401 everywhere.
+ *
+ * Once this driver gets into a better shape, however, the best would
+ * be to replace this to something stored inside atomisp allocated
+ * structures.
+ */
+bool atomisp_hw_is_isp2401;
+
+/* Types of atomisp hardware */
+#define HW_IS_ISP2400 0
+#define HW_IS_ISP2401 1
+
+struct device *atomisp_dev;
+
+void __iomem *atomisp_io_base;
+
+static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_457MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+/* Merrifield and Moorefield DFS rules */
+static const struct atomisp_dfs_config dfs_config_merr = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_400MHZ,
+ .highest_freq = ISP_FREQ_457MHZ,
+ .dfs_table = dfs_rules_merr,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_merr),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_merr_1179[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_merr_1179 = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_400MHZ,
+ .highest_freq = ISP_FREQ_400MHZ,
+ .dfs_table = dfs_rules_merr_1179,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_merr_1179),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_merr_117a[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .fps = 30,
+ .isp_freq = ISP_FREQ_266MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = 1080,
+ .height = 1920,
+ .fps = 30,
+ .isp_freq = ISP_FREQ_266MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .fps = 45,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = 1080,
+ .height = 1920,
+ .fps = 45,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = 60,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_200MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_200MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static struct atomisp_dfs_config dfs_config_merr_117a = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_200MHZ,
+ .highest_freq = ISP_FREQ_400MHZ,
+ .dfs_table = dfs_rules_merr_117a,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_merr_117a),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_byt[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_byt = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_400MHZ,
+ .highest_freq = ISP_FREQ_400MHZ,
+ .dfs_table = dfs_rules_byt,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_byt),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_byt_cr[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_byt_cr = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_320MHZ,
+ .highest_freq = ISP_FREQ_320MHZ,
+ .dfs_table = dfs_rules_byt_cr,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_byt_cr),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_cht[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_cht_soc[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_cht = {
+ .lowest_freq = ISP_FREQ_100MHZ,
+ .max_freq_at_vmin = ISP_FREQ_356MHZ,
+ .highest_freq = ISP_FREQ_356MHZ,
+ .dfs_table = dfs_rules_cht,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_cht),
+};
+
+/* This one should be visible also by atomisp_cmd.c */
+const struct atomisp_dfs_config dfs_config_cht_soc = {
+ .lowest_freq = ISP_FREQ_100MHZ,
+ .max_freq_at_vmin = ISP_FREQ_356MHZ,
+ .highest_freq = ISP_FREQ_356MHZ,
+ .dfs_table = dfs_rules_cht_soc,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_cht_soc),
+};
+
+int atomisp_video_init(struct atomisp_video_pipe *video, const char *name)
+{
+ int ret;
+ const char *direction;
+
+ switch (video->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ direction = "output";
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ video->vdev.fops = &atomisp_fops;
+ video->vdev.ioctl_ops = &atomisp_ioctl_ops;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ direction = "input";
+ video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ video->vdev.fops = &atomisp_file_fops;
+ video->vdev.ioctl_ops = &atomisp_file_ioctl_ops;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = media_entity_pads_init(&video->vdev.entity, 1, &video->pad);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the video device. */
+ snprintf(video->vdev.name, sizeof(video->vdev.name),
+ "ATOMISP ISP %s %s", name, direction);
+ video->vdev.release = video_device_release_empty;
+ video_set_drvdata(&video->vdev, video->isp);
+
+ return 0;
+}
+
+void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name)
+{
+ video->vdev.fops = &atomisp_fops;
+ video->vdev.ioctl_ops = &atomisp_ioctl_ops;
+
+ /* Initialize the video device. */
+ snprintf(video->vdev.name, sizeof(video->vdev.name),
+ "ATOMISP ISP %s", name);
+ video->vdev.release = video_device_release_empty;
+ video_set_drvdata(&video->vdev, video->isp);
+}
+
+void atomisp_video_unregister(struct atomisp_video_pipe *video)
+{
+ if (video_is_registered(&video->vdev)) {
+ media_entity_cleanup(&video->vdev.entity);
+ video_unregister_device(&video->vdev);
+ }
+}
+
+void atomisp_acc_unregister(struct atomisp_acc_pipe *video)
+{
+ if (video_is_registered(&video->vdev))
+ video_unregister_device(&video->vdev);
+}
+
+static int atomisp_save_iunit_reg(struct atomisp_device *isp)
+{
+ struct pci_dev *dev = isp->pdev;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ pci_read_config_word(dev, PCI_COMMAND, &isp->saved_regs.pcicmdsts);
+ /* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */
+ pci_read_config_dword(dev, PCI_MSI_CAPID, &isp->saved_regs.msicap);
+ pci_read_config_dword(dev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr);
+ pci_read_config_word(dev, PCI_MSI_DATA, &isp->saved_regs.msi_data);
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr);
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL,
+ &isp->saved_regs.interrupt_control);
+
+ pci_read_config_dword(dev, MRFLD_PCI_PMCS,
+ &isp->saved_regs.pmcs);
+ /* Ensure read/write combining is enabled. */
+ pci_read_config_dword(dev, PCI_I_CONTROL,
+ &isp->saved_regs.i_control);
+ isp->saved_regs.i_control |=
+ MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING |
+ MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
+ &isp->saved_regs.csi_access_viol);
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL,
+ &isp->saved_regs.csi_rcomp_config);
+ /*
+ * Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE.
+ * ANN/CHV: RCOMP updates do not happen when using CSI2+ path
+ * and sensor sending "continuous clock".
+ * TNG/ANN/CHV: MIPI packets are lost if the HS entry sequence
+ * is missed, and IUNIT can hang.
+ * For both issues, setting this bit is a workaround.
+ */
+ isp->saved_regs.csi_rcomp_config |=
+ MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE;
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ &isp->saved_regs.csi_afe_dly);
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_CONTROL,
+ &isp->saved_regs.csi_control);
+ if (isp->media_dev.hw_revision >=
+ (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT))
+ isp->saved_regs.csi_control |=
+ MRFLD_PCI_CSI_CONTROL_PARPATHEN;
+ /*
+ * On CHT CSI_READY bit should be enabled before stream on
+ */
+ if (IS_CHT && (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)))
+ isp->saved_regs.csi_control |=
+ MRFLD_PCI_CSI_CONTROL_CSI_READY;
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
+ &isp->saved_regs.csi_afe_rcomp_config);
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
+ &isp->saved_regs.csi_afe_hs_control);
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
+ &isp->saved_regs.csi_deadline_control);
+ return 0;
+}
+
+static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp)
+{
+ struct pci_dev *dev = isp->pdev;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ pci_write_config_word(dev, PCI_COMMAND, isp->saved_regs.pcicmdsts);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
+ isp->saved_regs.ispmmadr);
+ pci_write_config_dword(dev, PCI_MSI_CAPID, isp->saved_regs.msicap);
+ pci_write_config_dword(dev, PCI_MSI_ADDR, isp->saved_regs.msi_addr);
+ pci_write_config_word(dev, PCI_MSI_DATA, isp->saved_regs.msi_data);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, isp->saved_regs.intr);
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL,
+ isp->saved_regs.interrupt_control);
+ pci_write_config_dword(dev, PCI_I_CONTROL,
+ isp->saved_regs.i_control);
+
+ pci_write_config_dword(dev, MRFLD_PCI_PMCS,
+ isp->saved_regs.pmcs);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
+ isp->saved_regs.csi_access_viol);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL,
+ isp->saved_regs.csi_rcomp_config);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ isp->saved_regs.csi_afe_dly);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
+ isp->saved_regs.csi_afe_rcomp_config);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
+ isp->saved_regs.csi_afe_hs_control);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
+ isp->saved_regs.csi_deadline_control);
+
+ /*
+ * for MRFLD, Software/firmware needs to write a 1 to bit0
+ * of the register at CSI_RECEIVER_SELECTION_REG to enable
+ * SH CSI backend write 0 will enable Arasan CSI backend,
+ * which has bugs(like sighting:4567697 and 4567699) and
+ * will be removed in B0
+ */
+ atomisp_store_uint32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
+ return 0;
+}
+
+static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
+{
+ struct pci_dev *dev = isp->pdev;
+ u32 irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (isp->sw_contex.power_state == ATOM_ISP_POWER_DOWN) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ dev_dbg(isp->dev, "<%s %d.\n", __func__, __LINE__);
+ return 0;
+ }
+ /*
+ * MRFLD HAS requirement: cannot power off i-unit if
+ * ISP has IRQ not serviced.
+ * So, here we need to check if there is any pending
+ * IRQ, if so, waiting for it to be served
+ */
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq = irq & 1 << INTR_IIR;
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ if (!(irq & (1 << INTR_IIR)))
+ goto done;
+
+ atomisp_store_uint32(MRFLD_INTR_CLEAR_REG, 0xFFFFFFFF);
+ atomisp_load_uint32(MRFLD_INTR_STATUS_REG, &irq);
+ if (irq != 0) {
+ dev_err(isp->dev,
+ "%s: fail to clear isp interrupt status reg=0x%x\n",
+ __func__, irq);
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return -EAGAIN;
+ } else {
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq = irq & 1 << INTR_IIR;
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ if (!(irq & (1 << INTR_IIR))) {
+ atomisp_store_uint32(MRFLD_INTR_ENABLE_REG, 0x0);
+ goto done;
+ }
+ dev_err(isp->dev,
+ "%s: error in iunit interrupt. status reg=0x%x\n",
+ __func__, irq);
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return -EAGAIN;
+ }
+done:
+ /*
+ * MRFLD WORKAROUND:
+ * before powering off IUNIT, clear the pending interrupts
+ * and disable the interrupt. driver should avoid writing 0
+ * to IIR. It could block subsequent interrupt messages.
+ * HW sighting:4568410.
+ */
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq &= ~(1 << INTR_IER);
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ atomisp_msi_irq_uninit(isp, dev);
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ return 0;
+}
+
+/*
+* WA for DDR DVFS enable/disable
+* By default, ISP will force DDR DVFS 1600MHz before disable DVFS
+*/
+static void punit_ddr_dvfs_enable(bool enable)
+{
+ int door_bell = 1 << 8;
+ int max_wait = 30;
+ int reg;
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, &reg);
+ if (enable) {
+ reg &= ~(MRFLD_BIT0 | MRFLD_BIT1);
+ } else {
+ reg |= (MRFLD_BIT1 | door_bell);
+ reg &= ~(MRFLD_BIT0);
+ }
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSDVFS, reg);
+
+ /* Check Req_ACK to see freq status, wait until door_bell is cleared */
+ while ((reg & door_bell) && max_wait--) {
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, &reg);
+ usleep_range(100, 500);
+ }
+
+ if (max_wait == -1)
+ pr_info("DDR DVFS, door bell is not cleared within 3ms\n");
+}
+
+static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
+{
+ unsigned long timeout;
+ u32 val = enable ? MRFLD_ISPSSPM0_IUNIT_POWER_ON :
+ MRFLD_ISPSSPM0_IUNIT_POWER_OFF;
+
+ dev_dbg(isp->dev, "IUNIT power-%s.\n", enable ? "on" : "off");
+
+ /*WA:Enable DVFS*/
+ if (IS_CHT && enable)
+ punit_ddr_dvfs_enable(true);
+
+ /*
+ * FIXME:WA for ECS28A, with this sleep, CTS
+ * android.hardware.camera2.cts.CameraDeviceTest#testCameraDeviceAbort
+ * PASS, no impact on other platforms
+ */
+ if (IS_BYT && enable)
+ msleep(10);
+
+ /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
+ iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0,
+ val, MRFLD_ISPSSPM0_ISPSSC_MASK);
+
+ /*WA:Enable DVFS*/
+ if (IS_CHT && !enable)
+ punit_ddr_dvfs_enable(true);
+
+ /*
+ * There should be no IUNIT access while power-down is
+ * in progress. HW sighting: 4567865.
+ * Wait up to 50 ms for the IUNIT to shut down.
+ * And we do the same for power on.
+ */
+ timeout = jiffies + msecs_to_jiffies(50);
+ do {
+ u32 tmp;
+
+ /* Wait until ISPSSPM0 bit[25:24] shows the right value */
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &tmp);
+ tmp = (tmp & MRFLD_ISPSSPM0_ISPSSC_MASK) >> MRFLD_ISPSSPM0_ISPSSS_OFFSET;
+ if (tmp == val) {
+ trace_ipu_cstate(enable);
+ return 0;
+ }
+
+ if (time_after(jiffies, timeout))
+ break;
+
+ /* FIXME: experienced value for delay */
+ usleep_range(100, 150);
+ } while (1);
+
+ if (enable)
+ msleep(10);
+
+ dev_err(isp->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+ return -EBUSY;
+}
+
+/* Workaround for pmu_nc_set_power_state not ready in MRFLD */
+int atomisp_mrfld_power_down(struct atomisp_device *isp)
+{
+// FIXME: at least with ISP2401, enabling this code causes the driver to break
+ return 0 && atomisp_mrfld_power(isp, false);
+}
+
+/* Workaround for pmu_nc_set_power_state not ready in MRFLD */
+int atomisp_mrfld_power_up(struct atomisp_device *isp)
+{
+// FIXME: at least with ISP2401, enabling this code causes the driver to break
+ return 0 && atomisp_mrfld_power(isp, true);
+}
+
+int atomisp_runtime_suspend(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ int ret;
+
+ ret = atomisp_mrfld_pre_power_down(isp);
+ if (ret)
+ return ret;
+
+ /*Turn off the ISP d-phy*/
+ ret = atomisp_ospm_dphy_down(isp);
+ if (ret)
+ return ret;
+ cpu_latency_qos_update_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
+ return atomisp_mrfld_power_down(isp);
+}
+
+int atomisp_runtime_resume(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ int ret;
+
+ ret = atomisp_mrfld_power_up(isp);
+ if (ret)
+ return ret;
+
+ cpu_latency_qos_update_request(&isp->pm_qos, isp->max_isr_latency);
+ if (isp->sw_contex.power_state == ATOM_ISP_POWER_DOWN) {
+ /*Turn on ISP d-phy */
+ ret = atomisp_ospm_dphy_up(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power up ISP!.\n");
+ return -EINVAL;
+ }
+ }
+
+ /*restore register values for iUnit and iUnitPHY registers*/
+ if (isp->saved_regs.pcicmdsts)
+ atomisp_restore_iunit_reg(isp);
+
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
+ return 0;
+}
+
+static int __maybe_unused atomisp_suspend(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ /* FIXME: only has one isp_subdev at present */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+ unsigned long flags;
+ int ret;
+
+ /*
+ * FIXME: Suspend is not supported by sensors. Abort if any video
+ * node was opened.
+ */
+ if (atomisp_dev_users(isp))
+ return -EBUSY;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_DISABLED) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ dev_err(isp->dev, "atomisp cannot suspend at this time.\n");
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ ret = atomisp_mrfld_pre_power_down(isp);
+ if (ret)
+ return ret;
+
+ /*Turn off the ISP d-phy */
+ ret = atomisp_ospm_dphy_down(isp);
+ if (ret) {
+ dev_err(isp->dev, "fail to power off ISP\n");
+ return ret;
+ }
+ cpu_latency_qos_update_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
+ return atomisp_mrfld_power_down(isp);
+}
+
+static int __maybe_unused atomisp_resume(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ int ret;
+
+ ret = atomisp_mrfld_power_up(isp);
+ if (ret)
+ return ret;
+
+ cpu_latency_qos_update_request(&isp->pm_qos, isp->max_isr_latency);
+
+ /*Turn on ISP d-phy */
+ ret = atomisp_ospm_dphy_up(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power up ISP!.\n");
+ return -EINVAL;
+ }
+
+ /*restore register values for iUnit and iUnitPHY registers*/
+ if (isp->saved_regs.pcicmdsts)
+ atomisp_restore_iunit_reg(isp);
+
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
+ return 0;
+}
+
+int atomisp_csi_lane_config(struct atomisp_device *isp)
+{
+ static const struct {
+ u8 code;
+ u8 lanes[MRFLD_PORT_NUM];
+ } portconfigs[] = {
+ /* Tangier/Merrifield available lane configurations */
+ { 0x00, { 4, 1, 0 } }, /* 00000 */
+ { 0x01, { 3, 1, 0 } }, /* 00001 */
+ { 0x02, { 2, 1, 0 } }, /* 00010 */
+ { 0x03, { 1, 1, 0 } }, /* 00011 */
+ { 0x04, { 2, 1, 2 } }, /* 00100 */
+ { 0x08, { 3, 1, 1 } }, /* 01000 */
+ { 0x09, { 2, 1, 1 } }, /* 01001 */
+ { 0x0a, { 1, 1, 1 } }, /* 01010 */
+
+ /* Anniedale/Moorefield only configurations */
+ { 0x10, { 4, 2, 0 } }, /* 10000 */
+ { 0x11, { 3, 2, 0 } }, /* 10001 */
+ { 0x12, { 2, 2, 0 } }, /* 10010 */
+ { 0x13, { 1, 2, 0 } }, /* 10011 */
+ { 0x14, { 2, 2, 2 } }, /* 10100 */
+ { 0x18, { 3, 2, 1 } }, /* 11000 */
+ { 0x19, { 2, 2, 1 } }, /* 11001 */
+ { 0x1a, { 1, 2, 1 } }, /* 11010 */
+ };
+
+ unsigned int i, j;
+ u8 sensor_lanes[MRFLD_PORT_NUM] = { 0 };
+ u32 csi_control;
+ int nportconfigs;
+ u32 port_config_mask;
+ int port3_lanes_shift;
+
+ if (isp->media_dev.hw_revision <
+ ATOMISP_HW_REVISION_ISP2401_LEGACY <<
+ ATOMISP_HW_REVISION_SHIFT) {
+ /* Merrifield */
+ port_config_mask = MRFLD_PORT_CONFIG_MASK;
+ port3_lanes_shift = MRFLD_PORT3_LANES_SHIFT;
+ } else {
+ /* Moorefield / Cherryview */
+ port_config_mask = CHV_PORT_CONFIG_MASK;
+ port3_lanes_shift = CHV_PORT3_LANES_SHIFT;
+ }
+
+ if (isp->media_dev.hw_revision <
+ ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) {
+ /* Merrifield / Moorefield legacy input system */
+ nportconfigs = MRFLD_PORT_CONFIG_NUM;
+ } else {
+ /* Moorefield / Cherryview new input system */
+ nportconfigs = ARRAY_SIZE(portconfigs);
+ }
+
+ for (i = 0; i < isp->input_cnt; i++) {
+ struct camera_mipi_info *mipi_info;
+
+ if (isp->inputs[i].type != RAW_CAMERA &&
+ isp->inputs[i].type != SOC_CAMERA)
+ continue;
+
+ mipi_info = atomisp_to_sensor_mipi_info(isp->inputs[i].camera);
+ if (!mipi_info)
+ continue;
+
+ switch (mipi_info->port) {
+ case ATOMISP_CAMERA_PORT_PRIMARY:
+ sensor_lanes[0] = mipi_info->num_lanes;
+ break;
+ case ATOMISP_CAMERA_PORT_SECONDARY:
+ sensor_lanes[1] = mipi_info->num_lanes;
+ break;
+ case ATOMISP_CAMERA_PORT_TERTIARY:
+ sensor_lanes[2] = mipi_info->num_lanes;
+ break;
+ default:
+ dev_err(isp->dev,
+ "%s: invalid port: %d for the %dth sensor\n",
+ __func__, mipi_info->port, i);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < nportconfigs; i++) {
+ for (j = 0; j < MRFLD_PORT_NUM; j++)
+ if (sensor_lanes[j] &&
+ sensor_lanes[j] != portconfigs[i].lanes[j])
+ break;
+
+ if (j == MRFLD_PORT_NUM)
+ break; /* Found matching setting */
+ }
+
+ if (i >= nportconfigs) {
+ dev_err(isp->dev,
+ "%s: could not find the CSI port setting for %d-%d-%d\n",
+ __func__,
+ sensor_lanes[0], sensor_lanes[1], sensor_lanes[2]);
+ return -EINVAL;
+ }
+
+ pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &csi_control);
+ csi_control &= ~port_config_mask;
+ csi_control |= (portconfigs[i].code << MRFLD_PORT_CONFIGCODE_SHIFT)
+ | (portconfigs[i].lanes[0] ? 0 : (1 << MRFLD_PORT1_ENABLE_SHIFT))
+ | (portconfigs[i].lanes[1] ? 0 : (1 << MRFLD_PORT2_ENABLE_SHIFT))
+ | (portconfigs[i].lanes[2] ? 0 : (1 << MRFLD_PORT3_ENABLE_SHIFT))
+ | (((1 << portconfigs[i].lanes[0]) - 1) << MRFLD_PORT1_LANES_SHIFT)
+ | (((1 << portconfigs[i].lanes[1]) - 1) << MRFLD_PORT2_LANES_SHIFT)
+ | (((1 << portconfigs[i].lanes[2]) - 1) << port3_lanes_shift);
+
+ pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, csi_control);
+
+ dev_dbg(isp->dev,
+ "%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%08X\n",
+ __func__, portconfigs[i].lanes[0], portconfigs[i].lanes[1],
+ portconfigs[i].lanes[2], csi_control);
+
+ return 0;
+}
+
+static int atomisp_subdev_probe(struct atomisp_device *isp)
+{
+ const struct atomisp_platform_data *pdata;
+ struct intel_v4l2_subdev_table *subdevs;
+ int ret, raw_index = -1, count;
+
+ pdata = atomisp_get_platform_data();
+ if (!pdata) {
+ dev_err(isp->dev, "no platform data available\n");
+ return 0;
+ }
+
+ /* FIXME: should return -EPROBE_DEFER if not all subdevs were probed */
+ for (count = 0; count < SUBDEV_WAIT_TIMEOUT_MAX_COUNT; count++) {
+ int camera_count = 0;
+ for (subdevs = pdata->subdevs; subdevs->type; ++subdevs) {
+ if (subdevs->type == RAW_CAMERA ||
+ subdevs->type == SOC_CAMERA)
+ camera_count ++;
+ }
+ if (camera_count)
+ break;
+ msleep(SUBDEV_WAIT_TIMEOUT);
+ count++;
+ }
+ /* Wait more time to give more time for subdev init code to finish */
+ msleep(5 * SUBDEV_WAIT_TIMEOUT);
+
+ /* FIXME: should, instead, use I2C probe */
+
+ for (subdevs = pdata->subdevs; subdevs->type; ++subdevs) {
+ struct v4l2_subdev *subdev;
+ struct i2c_board_info *board_info =
+ &subdevs->v4l2_subdev.board_info;
+ struct i2c_adapter *adapter =
+ i2c_get_adapter(subdevs->v4l2_subdev.i2c_adapter_id);
+ int sensor_num, i;
+
+ dev_info(isp->dev, "Probing Subdev %s\n", board_info->type);
+
+ if (!adapter) {
+ dev_err(isp->dev,
+ "Failed to find i2c adapter for subdev %s\n",
+ board_info->type);
+ break;
+ }
+
+ /* In G-Min, the sensor devices will already be probed
+ * (via ACPI) and registered, do not create new
+ * ones */
+ subdev = atomisp_gmin_find_subdev(adapter, board_info);
+ if (!subdev) {
+ dev_warn(isp->dev, "Subdev %s not found\n",
+ board_info->type);
+ continue;
+ }
+ ret = v4l2_device_register_subdev(&isp->v4l2_dev, subdev);
+ if (ret) {
+ dev_warn(isp->dev, "Subdev %s detection fail\n",
+ board_info->type);
+ continue;
+ }
+
+ if (!subdev) {
+ dev_warn(isp->dev, "Subdev %s detection fail\n",
+ board_info->type);
+ continue;
+ }
+
+ dev_info(isp->dev, "Subdev %s successfully register\n",
+ board_info->type);
+
+ switch (subdevs->type) {
+ case RAW_CAMERA:
+ raw_index = isp->input_cnt;
+ dev_dbg(isp->dev, "raw_index: %d\n", raw_index);
+ /* pass-though */
+ case SOC_CAMERA:
+ dev_dbg(isp->dev, "SOC_INDEX: %d\n", isp->input_cnt);
+ if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) {
+ dev_warn(isp->dev,
+ "too many atomisp inputs, ignored\n");
+ break;
+ }
+
+ isp->inputs[isp->input_cnt].type = subdevs->type;
+ isp->inputs[isp->input_cnt].port = subdevs->port;
+ isp->inputs[isp->input_cnt].camera = subdev;
+ isp->inputs[isp->input_cnt].sensor_index = 0;
+ /*
+ * initialize the subdev frame size, then next we can
+ * judge whether frame_size store effective value via
+ * pixel_format.
+ */
+ isp->inputs[isp->input_cnt].frame_size.pixel_format = 0;
+ isp->inputs[isp->input_cnt].camera_caps =
+ atomisp_get_default_camera_caps();
+ sensor_num = isp->inputs[isp->input_cnt]
+ .camera_caps->sensor_num;
+ isp->input_cnt++;
+ for (i = 1; i < sensor_num; i++) {
+ if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) {
+ dev_warn(isp->dev,
+ "atomisp inputs out of range\n");
+ break;
+ }
+ isp->inputs[isp->input_cnt] =
+ isp->inputs[isp->input_cnt - 1];
+ isp->inputs[isp->input_cnt].sensor_index = i;
+ isp->input_cnt++;
+ }
+ break;
+ case CAMERA_MOTOR:
+ if (isp->motor) {
+ dev_warn(isp->dev,
+ "too many atomisp motors, ignored %s\n",
+ board_info->type);
+ continue;
+ }
+ isp->motor = subdev;
+ break;
+ case LED_FLASH:
+ case XENON_FLASH:
+ if (isp->flash) {
+ dev_warn(isp->dev,
+ "too many atomisp flash devices, ignored %s\n",
+ board_info->type);
+ continue;
+ }
+ isp->flash = subdev;
+ break;
+ default:
+ dev_dbg(isp->dev, "unknown subdev probed\n");
+ break;
+ }
+ }
+
+ /*
+ * HACK: Currently VCM belongs to primary sensor only, but correct
+ * approach must be to acquire from platform code which sensor
+ * owns it.
+ */
+ if (isp->motor && raw_index >= 0)
+ isp->inputs[raw_index].motor = isp->motor;
+
+ /* Proceed even if no modules detected. For COS mode and no modules. */
+ if (!isp->input_cnt)
+ dev_warn(isp->dev, "no camera attached or fail to detect\n");
+ else
+ dev_info(isp->dev, "detected %d camera sensors\n",
+ isp->input_cnt);
+
+ return atomisp_csi_lane_config(isp);
+}
+
+static void atomisp_unregister_entities(struct atomisp_device *isp)
+{
+ unsigned int i;
+ struct v4l2_subdev *sd, *next;
+
+ for (i = 0; i < isp->num_of_streams; i++)
+ atomisp_subdev_unregister_entities(&isp->asd[i]);
+ atomisp_tpg_unregister_entities(&isp->tpg);
+ atomisp_file_input_unregister_entities(&isp->file_dev);
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
+ atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
+
+ list_for_each_entry_safe(sd, next, &isp->v4l2_dev.subdevs, list)
+ v4l2_device_unregister_subdev(sd);
+
+ v4l2_device_unregister(&isp->v4l2_dev);
+ media_device_unregister(&isp->media_dev);
+}
+
+static int atomisp_register_entities(struct atomisp_device *isp)
+{
+ int ret = 0;
+ unsigned int i;
+
+ isp->media_dev.dev = isp->dev;
+
+ strlcpy(isp->media_dev.model, "Intel Atom ISP",
+ sizeof(isp->media_dev.model));
+
+ media_device_init(&isp->media_dev);
+ isp->v4l2_dev.mdev = &isp->media_dev;
+ ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
+ __func__, ret);
+ goto v4l2_device_failed;
+ }
+
+ ret = atomisp_subdev_probe(isp);
+ if (ret < 0)
+ goto csi_and_subdev_probe_failed;
+
+ /* Register internal entities */
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ ret = atomisp_mipi_csi2_register_entities(&isp->csi2_port[i],
+ &isp->v4l2_dev);
+ if (ret == 0)
+ continue;
+
+ /* error case */
+ dev_err(isp->dev, "failed to register the CSI port: %d\n", i);
+ /* deregister all registered CSI ports */
+ while (i--)
+ atomisp_mipi_csi2_unregister_entities(
+ &isp->csi2_port[i]);
+
+ goto csi_and_subdev_probe_failed;
+ }
+
+ ret =
+ atomisp_file_input_register_entities(&isp->file_dev, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "atomisp_file_input_register_entities\n");
+ goto file_input_register_failed;
+ }
+
+ ret = atomisp_tpg_register_entities(&isp->tpg, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "atomisp_tpg_register_entities\n");
+ goto tpg_register_failed;
+ }
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ ret = atomisp_subdev_register_entities(asd, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev,
+ "atomisp_subdev_register_entities fail\n");
+ for (; i > 0; i--)
+ atomisp_subdev_unregister_entities(
+ &isp->asd[i - 1]);
+ goto subdev_register_failed;
+ }
+ }
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ init_completion(&asd->init_done);
+
+ asd->delayed_init_workq =
+ alloc_workqueue(isp->v4l2_dev.name, WQ_CPU_INTENSIVE,
+ 1);
+ if (!asd->delayed_init_workq) {
+ dev_err(isp->dev,
+ "Failed to initialize delayed init workq\n");
+ ret = -ENOMEM;
+
+ for (; i > 0; i--)
+ destroy_workqueue(isp->asd[i - 1].
+ delayed_init_workq);
+ goto wq_alloc_failed;
+ }
+ INIT_WORK(&asd->delayed_init_work, atomisp_delayed_init_work);
+ }
+
+ for (i = 0; i < isp->input_cnt; i++) {
+ if (isp->inputs[i].port >= ATOMISP_CAMERA_NR_PORTS) {
+ dev_err(isp->dev, "isp->inputs port %d not supported\n",
+ isp->inputs[i].port);
+ ret = -EINVAL;
+ goto link_failed;
+ }
+ }
+
+ dev_dbg(isp->dev,
+ "FILE_INPUT enable, camera_cnt: %d\n", isp->input_cnt);
+ isp->inputs[isp->input_cnt].type = FILE_INPUT;
+ isp->inputs[isp->input_cnt].port = -1;
+ isp->inputs[isp->input_cnt].camera_caps =
+ atomisp_get_default_camera_caps();
+ isp->inputs[isp->input_cnt++].camera = &isp->file_dev.sd;
+
+ if (isp->input_cnt < ATOM_ISP_MAX_INPUTS) {
+ dev_dbg(isp->dev,
+ "TPG detected, camera_cnt: %d\n", isp->input_cnt);
+ isp->inputs[isp->input_cnt].type = TEST_PATTERN;
+ isp->inputs[isp->input_cnt].port = -1;
+ isp->inputs[isp->input_cnt].camera_caps =
+ atomisp_get_default_camera_caps();
+ isp->inputs[isp->input_cnt++].camera = &isp->tpg.sd;
+ } else {
+ dev_warn(isp->dev, "too many atomisp inputs, TPG ignored.\n");
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+ if (ret < 0)
+ goto link_failed;
+
+ return media_device_register(&isp->media_dev);
+
+link_failed:
+ for (i = 0; i < isp->num_of_streams; i++)
+ destroy_workqueue(isp->asd[i].
+ delayed_init_workq);
+wq_alloc_failed:
+ for (i = 0; i < isp->num_of_streams; i++)
+ atomisp_subdev_unregister_entities(
+ &isp->asd[i]);
+subdev_register_failed:
+ atomisp_tpg_unregister_entities(&isp->tpg);
+tpg_register_failed:
+ atomisp_file_input_unregister_entities(&isp->file_dev);
+file_input_register_failed:
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
+ atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
+csi_and_subdev_probe_failed:
+ v4l2_device_unregister(&isp->v4l2_dev);
+v4l2_device_failed:
+ media_device_unregister(&isp->media_dev);
+ media_device_cleanup(&isp->media_dev);
+ return ret;
+}
+
+static int atomisp_initialize_modules(struct atomisp_device *isp)
+{
+ int ret;
+
+ ret = atomisp_mipi_csi2_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "mipi csi2 initialization failed\n");
+ goto error_mipi_csi2;
+ }
+
+ ret = atomisp_file_input_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev,
+ "file input device initialization failed\n");
+ goto error_file_input;
+ }
+
+ ret = atomisp_tpg_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "tpg initialization failed\n");
+ goto error_tpg;
+ }
+
+ ret = atomisp_subdev_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "ISP subdev initialization failed\n");
+ goto error_isp_subdev;
+ }
+
+ return 0;
+
+error_isp_subdev:
+error_tpg:
+ atomisp_tpg_cleanup(isp);
+error_file_input:
+ atomisp_file_input_cleanup(isp);
+error_mipi_csi2:
+ atomisp_mipi_csi2_cleanup(isp);
+ return ret;
+}
+
+static void atomisp_uninitialize_modules(struct atomisp_device *isp)
+{
+ atomisp_tpg_cleanup(isp);
+ atomisp_file_input_cleanup(isp);
+ atomisp_mipi_csi2_cleanup(isp);
+}
+
+const struct firmware *
+atomisp_load_firmware(struct atomisp_device *isp)
+{
+ const struct firmware *fw;
+ int rc;
+ char *fw_path = NULL;
+
+ if (skip_fwload)
+ return NULL;
+
+ if ((isp->media_dev.hw_revision >> ATOMISP_HW_REVISION_SHIFT)
+ == ATOMISP_HW_REVISION_ISP2401)
+ fw_path = "shisp_2401a0_v21.bin";
+
+ if (isp->media_dev.hw_revision ==
+ ((ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT)
+ | ATOMISP_HW_STEPPING_A0))
+ fw_path = "shisp_2401a0_legacy_v21.bin";
+
+ if (isp->media_dev.hw_revision ==
+ ((ATOMISP_HW_REVISION_ISP2400 << ATOMISP_HW_REVISION_SHIFT)
+ | ATOMISP_HW_STEPPING_B0))
+ fw_path = "shisp_2400b0_v21.bin";
+
+ if (!fw_path) {
+ dev_err(isp->dev, "Unsupported hw_revision 0x%x\n",
+ isp->media_dev.hw_revision);
+ return NULL;
+ }
+
+ rc = request_firmware(&fw, fw_path, isp->dev);
+ if (rc) {
+ dev_err(isp->dev,
+ "atomisp: Error %d while requesting firmware %s\n",
+ rc, fw_path);
+ return NULL;
+ }
+
+ return fw;
+}
+
+/*
+ * Check for flags the driver was compiled with against the PCI
+ * device. Always returns true on other than ISP 2400.
+ */
+static bool is_valid_device(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ unsigned int a0_max_id = 0;
+ const char *name;
+ const char *product;
+
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+
+ switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD:
+ a0_max_id = ATOMISP_PCI_REV_MRFLD_A0_MAX;
+ atomisp_hw_is_isp2401 = false;
+ name = "Merrifield";
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_BYT:
+ a0_max_id = ATOMISP_PCI_REV_BYT_A0_MAX;
+ atomisp_hw_is_isp2401 = false;
+ name = "Baytrail";
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_ANN:
+ name = "Anniedale";
+ atomisp_hw_is_isp2401 = true;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_CHT:
+ name = "Cherrytrail";
+ atomisp_hw_is_isp2401 = true;
+ break;
+ default:
+ dev_err(&dev->dev, "%s: unknown device ID %x04:%x04\n",
+ product, id->vendor, id->device);
+ return false;
+ }
+
+ if (dev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) {
+ dev_err(&dev->dev, "%s revision %d is not unsupported\n",
+ name, dev->revision);
+ return false;
+ }
+
+ /*
+ * FIXME:
+ * remove the if once the driver become generic
+ */
+
+#if defined(ISP2400)
+ if (atomisp_hw_is_isp2401) {
+ dev_err(&dev->dev, "Support for %s (ISP2401) was disabled at compile time\n",
+ name);
+ return false;
+ }
+#else
+ if (!atomisp_hw_is_isp2401) {
+ dev_err(&dev->dev, "Support for %s (ISP2400) was disabled at compile time\n",
+ name);
+ return false;
+ }
+#endif
+
+ dev_info(&dev->dev, "Detected %s version %d (ISP240%c) on %s\n",
+ name, dev->revision,
+ atomisp_hw_is_isp2401 ? '1' : '0',
+ product);
+
+ return true;
+}
+
+static int init_atomisp_wdts(struct atomisp_device *isp)
+{
+ int i, err;
+
+ atomic_set(&isp->wdt_work_queued, 0);
+ isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
+ if (!isp->wdt_work_queue) {
+ dev_err(isp->dev, "Failed to initialize wdt work queue\n");
+ err = -ENOMEM;
+ goto alloc_fail;
+ }
+ INIT_WORK(&isp->wdt_work, atomisp_wdt_work);
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ if (!atomisp_hw_is_isp2401)
+ timer_setup(&asd->wdt, atomisp_wdt, 0);
+ else {
+ timer_setup(&asd->video_out_capture.wdt,
+ atomisp_wdt, 0);
+ timer_setup(&asd->video_out_preview.wdt,
+ atomisp_wdt, 0);
+ timer_setup(&asd->video_out_vf.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_video_capture.wdt,
+ atomisp_wdt, 0);
+ }
+ }
+ return 0;
+alloc_fail:
+ return err;
+}
+
+#define ATOM_ISP_PCI_BAR 0
+
+static int atomisp_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ const struct atomisp_platform_data *pdata;
+ struct atomisp_device *isp;
+ unsigned int start;
+ void __iomem *base;
+ int err, val;
+ u32 irq;
+
+ if (!is_valid_device(dev, id))
+ return -ENODEV;
+
+ /* Pointer to struct device. */
+ atomisp_dev = &dev->dev;
+
+ pdata = atomisp_get_platform_data();
+ if (!pdata)
+ dev_warn(&dev->dev, "no platform data available\n");
+
+ err = pcim_enable_device(dev);
+ if (err) {
+ dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n",
+ err);
+ return err;
+ }
+
+ start = pci_resource_start(dev, ATOM_ISP_PCI_BAR);
+ dev_dbg(&dev->dev, "start: 0x%x\n", start);
+
+ err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev));
+ if (err) {
+ dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n",
+ err);
+ goto ioremap_fail;
+ }
+
+ base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR];
+ dev_dbg(&dev->dev, "base: %p\n", base);
+
+ atomisp_io_base = base;
+
+ dev_dbg(&dev->dev, "atomisp_io_base: %p\n", atomisp_io_base);
+
+ isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL);
+ if (!isp) {
+ err = -ENOMEM;
+ goto atomisp_dev_alloc_fail;
+ }
+ isp->pdev = dev;
+ isp->dev = &dev->dev;
+ isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
+ isp->saved_regs.ispmmadr = start;
+
+ rt_mutex_init(&isp->mutex);
+ mutex_init(&isp->streamoff_mutex);
+ spin_lock_init(&isp->lock);
+
+ /* This is not a true PCI device on SoC, so the delay is not needed. */
+ isp->pdev->d3_delay = 0;
+
+ switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD:
+ isp->media_dev.hw_revision =
+ (ATOMISP_HW_REVISION_ISP2400
+ << ATOMISP_HW_REVISION_SHIFT) |
+ ATOMISP_HW_STEPPING_B0;
+
+ switch (id->device) {
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_1179:
+ isp->dfs = &dfs_config_merr_1179;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_117A:
+ isp->dfs = &dfs_config_merr_117a;
+
+ break;
+ default:
+ isp->dfs = &dfs_config_merr;
+ break;
+ }
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_BYT:
+ isp->media_dev.hw_revision =
+ (ATOMISP_HW_REVISION_ISP2400
+ << ATOMISP_HW_REVISION_SHIFT) |
+ ATOMISP_HW_STEPPING_B0;
+#ifdef FIXME
+ if (INTEL_MID_BOARD(3, TABLET, BYT, BLK, PRO, CRV2) ||
+ INTEL_MID_BOARD(3, TABLET, BYT, BLK, ENG, CRV2)) {
+ isp->dfs = &dfs_config_byt_cr;
+ isp->hpll_freq = HPLL_FREQ_2000MHZ;
+ } else
+#endif
+ {
+ isp->dfs = &dfs_config_byt;
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ }
+ /* HPLL frequency is known to be device-specific, but we don't
+ * have specs yet for exactly how it varies. Default to
+ * BYT-CR but let provisioning set it via EFI variable */
+ isp->hpll_freq = gmin_get_var_int(&dev->dev, false, "HpllFreq",
+ HPLL_FREQ_2000MHZ);
+
+ /*
+ * for BYT/CHT we are put isp into D3cold to avoid pci registers access
+ * in power off. Set d3cold_delay to 0 since default 100ms is not
+ * necessary.
+ */
+ isp->pdev->d3cold_delay = 0;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_ANN:
+ isp->media_dev.hw_revision = (
+#ifdef ISP2401_NEW_INPUT_SYSTEM
+ ATOMISP_HW_REVISION_ISP2401
+#else
+ ATOMISP_HW_REVISION_ISP2401_LEGACY
+#endif
+ << ATOMISP_HW_REVISION_SHIFT);
+ isp->media_dev.hw_revision |= isp->pdev->revision < 2 ?
+ ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
+ isp->dfs = &dfs_config_merr;
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_CHT:
+ isp->media_dev.hw_revision = (
+#ifdef ISP2401_NEW_INPUT_SYSTEM
+ ATOMISP_HW_REVISION_ISP2401
+#else
+ ATOMISP_HW_REVISION_ISP2401_LEGACY
+#endif
+ << ATOMISP_HW_REVISION_SHIFT);
+ isp->media_dev.hw_revision |= isp->pdev->revision < 2 ?
+ ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
+
+ isp->dfs = &dfs_config_cht;
+ isp->pdev->d3cold_delay = 0;
+
+ iosf_mbi_read(CCK_PORT, MBI_REG_READ, CCK_FUSE_REG_0, &val);
+ switch (val & CCK_FUSE_HPLL_FREQ_MASK) {
+ case 0x00:
+ isp->hpll_freq = HPLL_FREQ_800MHZ;
+ break;
+ case 0x01:
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ break;
+ case 0x02:
+ isp->hpll_freq = HPLL_FREQ_2000MHZ;
+ break;
+ default:
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ dev_warn(isp->dev,
+ "read HPLL from cck failed.default 1600MHz.\n");
+ }
+ break;
+ default:
+ dev_err(&dev->dev, "un-supported IUNIT device\n");
+ err = -ENODEV;
+ goto atomisp_dev_alloc_fail;
+ }
+
+ dev_info(&dev->dev, "ISP HPLL frequency base = %d MHz\n",
+ isp->hpll_freq);
+
+ isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
+
+ /* Load isp firmware from user space */
+ if (!defer_fw_load) {
+ isp->firmware = atomisp_load_firmware(isp);
+ if (!isp->firmware) {
+ err = -ENOENT;
+ dev_dbg(&dev->dev, "Firmware load failed\n");
+ goto load_fw_fail;
+ }
+
+ err = atomisp_css_check_firmware_version(isp);
+ if (err) {
+ dev_dbg(&dev->dev, "Firmware version check failed\n");
+ goto fw_validation_fail;
+ }
+ } else {
+ dev_info(&dev->dev, "Firmware load will be deferred\n");
+ }
+
+ pci_set_master(dev);
+ pci_set_drvdata(dev, isp);
+
+ err = pci_enable_msi(dev);
+ if (err) {
+ dev_err(&dev->dev, "Failed to enable msi (%d)\n", err);
+ goto enable_msi_fail;
+ }
+
+ atomisp_msi_irq_init(isp, dev);
+
+ cpu_latency_qos_add_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ /*
+ * for MRFLD, Software/firmware needs to write a 1 to bit 0 of
+ * the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI
+ * backend write 0 will enable Arasan CSI backend, which has
+ * bugs(like sighting:4567697 and 4567699) and will be removed
+ * in B0
+ */
+ atomisp_store_uint32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
+
+ if ((id->device & ATOMISP_PCI_DEVICE_SOC_MASK) ==
+ ATOMISP_PCI_DEVICE_SOC_MRFLD) {
+ u32 csi_afe_trim;
+
+ /*
+ * Workaround for imbalance data eye issue which is observed
+ * on TNG B0.
+ */
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ &csi_afe_trim);
+ csi_afe_trim &= ~((MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
+ MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
+ MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
+ MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT));
+ csi_afe_trim |= (MRFLD_PCI_CSI1_HSRXCLKTRIM <<
+ MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI2_HSRXCLKTRIM <<
+ MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI3_HSRXCLKTRIM <<
+ MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ csi_afe_trim);
+ }
+
+ err = atomisp_initialize_modules(isp);
+ if (err < 0) {
+ dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err);
+ goto initialize_modules_fail;
+ }
+
+ err = atomisp_register_entities(isp);
+ if (err < 0) {
+ dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n",
+ err);
+ goto register_entities_fail;
+ }
+ err = atomisp_create_pads_links(isp);
+ if (err < 0)
+ goto register_entities_fail;
+ /* init atomisp wdts */
+ if (init_atomisp_wdts(isp) != 0)
+ goto wdt_work_queue_fail;
+
+ /* save the iunit context only once after all the values are init'ed. */
+ atomisp_save_iunit_reg(isp);
+
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_allow(&dev->dev);
+
+ hmm_init_mem_stat(repool_pgnr, dypool_enable, dypool_pgnr);
+ err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
+ if (err) {
+ dev_err(&dev->dev, "Failed to register reserved memory pool.\n");
+ goto hmm_pool_fail;
+ }
+
+ /* Init ISP memory management */
+ hmm_init();
+
+ err = devm_request_threaded_irq(&dev->dev, dev->irq,
+ atomisp_isr, atomisp_isr_thread,
+ IRQF_SHARED, "isp_irq", isp);
+ if (err) {
+ dev_err(&dev->dev, "Failed to request irq (%d)\n", err);
+ goto request_irq_fail;
+ }
+
+ /* Load firmware into ISP memory */
+ if (!defer_fw_load) {
+ err = atomisp_css_load_firmware(isp);
+ if (err) {
+ dev_err(&dev->dev, "Failed to init css.\n");
+ goto css_init_fail;
+ }
+ } else {
+ dev_dbg(&dev->dev, "Skip css init.\n");
+ }
+ /* Clear FW image from memory */
+ release_firmware(isp->firmware);
+ isp->firmware = NULL;
+ isp->css_env.isp_css_fw.data = NULL;
+
+ atomisp_drvfs_init(&dev->driver->driver, isp);
+
+ return 0;
+
+css_init_fail:
+ devm_free_irq(&dev->dev, dev->irq, isp);
+request_irq_fail:
+ hmm_cleanup();
+ hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
+hmm_pool_fail:
+ destroy_workqueue(isp->wdt_work_queue);
+wdt_work_queue_fail:
+ atomisp_acc_cleanup(isp);
+ atomisp_unregister_entities(isp);
+register_entities_fail:
+ atomisp_uninitialize_modules(isp);
+initialize_modules_fail:
+ cpu_latency_qos_remove_request(&isp->pm_qos);
+ atomisp_msi_irq_uninit(isp, dev);
+ pci_disable_msi(dev);
+enable_msi_fail:
+fw_validation_fail:
+ release_firmware(isp->firmware);
+load_fw_fail:
+ /*
+ * Switch off ISP, as keeping it powered on would prevent
+ * reaching S0ix states.
+ *
+ * The following lines have been copied from atomisp suspend path
+ */
+
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq = irq & 1 << INTR_IIR;
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq &= ~(1 << INTR_IER);
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ atomisp_msi_irq_uninit(isp, dev);
+
+ atomisp_ospm_dphy_down(isp);
+
+ /* Address later when we worry about the ...field chips */
+ if (IS_ENABLED(CONFIG_PM) && atomisp_mrfld_power_down(isp))
+ dev_err(&dev->dev, "Failed to switch off ISP\n");
+
+atomisp_dev_alloc_fail:
+ pcim_iounmap_regions(dev, 1 << ATOM_ISP_PCI_BAR);
+
+ioremap_fail:
+ return err;
+}
+
+static void atomisp_pci_remove(struct pci_dev *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ pci_get_drvdata(dev);
+
+ dev_info(&dev->dev, "Removing atomisp driver\n");
+
+ atomisp_drvfs_exit();
+
+ atomisp_acc_cleanup(isp);
+
+ atomisp_css_unload_firmware(isp);
+ hmm_cleanup();
+
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_get_noresume(&dev->dev);
+ cpu_latency_qos_remove_request(&isp->pm_qos);
+
+ atomisp_msi_irq_uninit(isp, dev);
+ atomisp_unregister_entities(isp);
+
+ destroy_workqueue(isp->wdt_work_queue);
+ atomisp_file_input_cleanup(isp);
+
+ release_firmware(isp->firmware);
+
+ hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
+}
+
+static const struct pci_device_id atomisp_pci_tbl[] = {
+ /* Merrifield */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_MRFLD)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_MRFLD_1179)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_MRFLD_117A)},
+ /* Baytrail */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_BYT)},
+ /* Anniedale (Merrifield+ / Moorefield) */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_ANN)},
+ /* Cherrytrail */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_CHT)},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, atomisp_pci_tbl);
+
+static const struct dev_pm_ops atomisp_pm_ops = {
+ .runtime_suspend = atomisp_runtime_suspend,
+ .runtime_resume = atomisp_runtime_resume,
+ .suspend = atomisp_suspend,
+ .resume = atomisp_resume,
+};
+
+static struct pci_driver atomisp_pci_driver = {
+ .driver = {
+ .pm = &atomisp_pm_ops,
+ },
+ .name = "atomisp-isp2",
+ .id_table = atomisp_pci_tbl,
+ .probe = atomisp_pci_probe,
+ .remove = atomisp_pci_remove,
+};
+
+module_pci_driver(atomisp_pci_driver);
+
+MODULE_AUTHOR("Wen Wang <wen.w.wang@intel.com>");
+MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ATOM Platform ISP Driver");
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
new file mode 100644
index 000000000000..87881fa6a698
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef __ATOMISP_V4L2_H__
+#define __ATOMISP_V4L2_H__
+
+struct atomisp_video_pipe;
+struct atomisp_acc_pipe;
+struct v4l2_device;
+struct atomisp_device;
+struct firmware;
+
+int atomisp_video_init(struct atomisp_video_pipe *video, const char *name);
+void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name);
+void atomisp_video_unregister(struct atomisp_video_pipe *video);
+void atomisp_acc_unregister(struct atomisp_acc_pipe *video);
+const struct firmware *atomisp_load_firmware(struct atomisp_device *isp);
+int atomisp_csi_lane_config(struct atomisp_device *isp);
+
+#endif /* __ATOMISP_V4L2_H__ */
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
new file mode 100644
index 000000000000..789a2e68cab8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
@@ -0,0 +1,376 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_CIRCBUF_H
+#define _IA_CSS_CIRCBUF_H
+
+#include <sp.h>
+#include <type_support.h>
+#include <math_support.h>
+#include <assert_support.h>
+#include <platform_support.h>
+#include "ia_css_circbuf_comm.h"
+#include "ia_css_circbuf_desc.h"
+
+/****************************************************************
+ *
+ * Data structures.
+ *
+ ****************************************************************/
+/**
+ * @brief Data structure for the circular buffer.
+ */
+typedef struct ia_css_circbuf_s ia_css_circbuf_t;
+struct ia_css_circbuf_s {
+ ia_css_circbuf_desc_t *desc; /* Pointer to the descriptor of the circbuf */
+ ia_css_circbuf_elem_t *elems; /* an array of elements */
+};
+
+/**
+ * @brief Create the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param elems An array of elements.
+ * @param desc The descriptor set to the size using ia_css_circbuf_desc_init().
+ */
+void ia_css_circbuf_create(
+ ia_css_circbuf_t *cb,
+ ia_css_circbuf_elem_t *elems,
+ ia_css_circbuf_desc_t *desc);
+
+/**
+ * @brief Destroy the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ */
+void ia_css_circbuf_destroy(
+ ia_css_circbuf_t *cb);
+
+/**
+ * @brief Pop a value out of the circular buffer.
+ * Get a value at the head of the circular buffer.
+ * The user should call "ia_css_circbuf_is_empty()"
+ * to avoid accessing to an empty buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the pop-out value.
+ */
+uint32_t ia_css_circbuf_pop(
+ ia_css_circbuf_t *cb);
+
+/**
+ * @brief Extract a value out of the circular buffer.
+ * Get a value at an arbitrary poistion in the circular
+ * buffer. The user should call "ia_css_circbuf_is_empty()"
+ * to avoid accessing to an empty buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param offset The offset from "start" to the target position.
+ *
+ * @return the extracted value.
+ */
+uint32_t ia_css_circbuf_extract(
+ ia_css_circbuf_t *cb,
+ int offset);
+
+/****************************************************************
+ *
+ * Inline functions.
+ *
+ ****************************************************************/
+/**
+ * @brief Set the "val" field in the element.
+ *
+ * @param elem The pointer to the element.
+ * @param val The value to be set.
+ */
+static inline void ia_css_circbuf_elem_set_val(
+ ia_css_circbuf_elem_t *elem,
+ uint32_t val)
+{
+ OP___assert(elem);
+
+ elem->val = val;
+}
+
+/**
+ * @brief Initialize the element.
+ *
+ * @param elem The pointer to the element.
+ */
+static inline void ia_css_circbuf_elem_init(
+ ia_css_circbuf_elem_t *elem)
+{
+ OP___assert(elem);
+ ia_css_circbuf_elem_set_val(elem, 0);
+}
+
+/**
+ * @brief Copy an element.
+ *
+ * @param src The element as the copy source.
+ * @param dest The element as the copy destination.
+ */
+static inline void ia_css_circbuf_elem_cpy(
+ ia_css_circbuf_elem_t *src,
+ ia_css_circbuf_elem_t *dest)
+{
+ OP___assert(src);
+ OP___assert(dest);
+
+ ia_css_circbuf_elem_set_val(dest, src->val);
+}
+
+/**
+ * @brief Get position in the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param base The base position.
+ * @param offset The offset.
+ *
+ * @return the position at offset.
+ */
+static inline uint8_t ia_css_circbuf_get_pos_at_offset(
+ ia_css_circbuf_t *cb,
+ u32 base,
+ int offset)
+{
+ u8 dest;
+
+ OP___assert(cb);
+ OP___assert(cb->desc);
+ OP___assert(cb->desc->size > 0);
+
+ /* step 1: adjudst the offset */
+ while (offset < 0) {
+ offset += cb->desc->size;
+ }
+
+ /* step 2: shift and round by the upper limit */
+ dest = OP_std_modadd(base, offset, cb->desc->size);
+
+ return dest;
+}
+
+/**
+ * @brief Get the offset between two positions in the circular buffer.
+ * Get the offset from the source position to the terminal position,
+ * along the direction in which the new elements come in.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param src_pos The source position.
+ * @param dest_pos The terminal position.
+ *
+ * @return the offset.
+ */
+static inline int ia_css_circbuf_get_offset(
+ ia_css_circbuf_t *cb,
+ u32 src_pos,
+ uint32_t dest_pos)
+{
+ int offset;
+
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ offset = (int)(dest_pos - src_pos);
+ offset += (offset < 0) ? cb->desc->size : 0;
+
+ return offset;
+}
+
+/**
+ * @brief Get the maximum number of elements.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the maximum number of elements.
+ *
+ * TODO: Test this API.
+ */
+static inline uint32_t ia_css_circbuf_get_size(
+ ia_css_circbuf_t *cb)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ return cb->desc->size;
+}
+
+/**
+ * @brief Get the number of available elements.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the number of available elements.
+ */
+static inline uint32_t ia_css_circbuf_get_num_elems(
+ ia_css_circbuf_t *cb)
+{
+ int num;
+
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ num = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end);
+
+ return (uint32_t)num;
+}
+
+/**
+ * @brief Test if the circular buffer is empty.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return
+ * - true when it is empty.
+ * - false when it is not empty.
+ */
+static inline bool ia_css_circbuf_is_empty(
+ ia_css_circbuf_t *cb)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ return ia_css_circbuf_desc_is_empty(cb->desc);
+}
+
+/**
+ * @brief Test if the circular buffer is full.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return
+ * - true when it is full.
+ * - false when it is not full.
+ */
+static inline bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ return ia_css_circbuf_desc_is_full(cb->desc);
+}
+
+/**
+ * @brief Write a new element into the circular buffer.
+ * Write a new element WITHOUT checking whether the
+ * circular buffer is full or not. So it also overwrites
+ * the oldest element when the buffer is full.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param elem The new element.
+ */
+static inline void ia_css_circbuf_write(
+ ia_css_circbuf_t *cb,
+ ia_css_circbuf_elem_t elem)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ /* Cannot continue as the queue is full*/
+ assert(!ia_css_circbuf_is_full(cb));
+
+ ia_css_circbuf_elem_cpy(&elem, &cb->elems[cb->desc->end]);
+
+ cb->desc->end = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, 1);
+}
+
+/**
+ * @brief Push a value in the circular buffer.
+ * Put a new value at the tail of the circular buffer.
+ * The user should call "ia_css_circbuf_is_full()"
+ * to avoid accessing to a full buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param val The value to be pushed in.
+ */
+static inline void ia_css_circbuf_push(
+ ia_css_circbuf_t *cb,
+ uint32_t val)
+{
+ ia_css_circbuf_elem_t elem;
+
+ OP___assert(cb);
+
+ /* set up an element */
+ ia_css_circbuf_elem_init(&elem);
+ ia_css_circbuf_elem_set_val(&elem, val);
+
+ /* write the element into the buffer */
+ ia_css_circbuf_write(cb, elem);
+}
+
+/**
+ * @brief Get the number of free elements.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return: The number of free elements.
+ */
+static inline uint32_t ia_css_circbuf_get_free_elems(
+ ia_css_circbuf_t *cb)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ return ia_css_circbuf_desc_get_free_elems(cb->desc);
+}
+
+/**
+ * @brief Peek an element in Circular Buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param offset Offset to the element.
+ *
+ * @return the elements value.
+ */
+uint32_t ia_css_circbuf_peek(
+ ia_css_circbuf_t *cb,
+ int offset);
+
+/**
+ * @brief Get an element in Circular Buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param offset Offset to the element.
+ *
+ * @return the elements value.
+ */
+uint32_t ia_css_circbuf_peek_from_start(
+ ia_css_circbuf_t *cb,
+ int offset);
+
+/**
+ * @brief Increase Size of a Circular Buffer.
+ * Use 'CAUTION' before using this function, This was added to
+ * support / fix issue with increasing size for tagger only
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param sz_delta delta increase for new size
+ * @param elems (optional) pointers to new additional elements
+ * cb element array size will not be increased dynamically,
+ * but new elements should be added at the end to existing
+ * cb element array which if of max_size >= new size
+ *
+ * @return true on successfully increasing the size
+ * false on failure
+ */
+bool ia_css_circbuf_increase_size(
+ ia_css_circbuf_t *cb,
+ unsigned int sz_delta,
+ ia_css_circbuf_elem_t *elems);
+
+#endif /*_IA_CSS_CIRCBUF_H */
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h
new file mode 100644
index 000000000000..09b049b3bd15
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h
@@ -0,0 +1,58 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_CIRCBUF_COMM_H
+#define _IA_CSS_CIRCBUF_COMM_H
+
+#include <type_support.h> /* uint8_t, uint32_t */
+
+#define IA_CSS_CIRCBUF_PADDING 1 /* The circular buffer is implemented in lock-less manner, wherein
+ * the head and tail can advance independently without any locks.
+ * But to achieve this, an extra buffer element is required to detect
+ * queue full & empty conditions, wherein the tail trails the head for
+ * full and is equal to head for empty condition. This causes 1 buffer
+ * not being available for use.
+ */
+
+/****************************************************************
+ *
+ * Portable Data structures
+ *
+ ****************************************************************/
+/**
+ * @brief Data structure for the circular descriptor.
+ */
+typedef struct ia_css_circbuf_desc_s ia_css_circbuf_desc_t;
+struct ia_css_circbuf_desc_s {
+ u8 size; /* the maximum number of elements*/
+ u8 step; /* number of bytes per element */
+ u8 start; /* index of the oldest element */
+ u8 end; /* index at which to write the new element */
+};
+
+#define SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT \
+ (4 * sizeof(uint8_t))
+
+/**
+ * @brief Data structure for the circular buffer element.
+ */
+typedef struct ia_css_circbuf_elem_s ia_css_circbuf_elem_t;
+struct ia_css_circbuf_elem_s {
+ u32 val; /* the value stored in the element */
+};
+
+#define SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT \
+ (sizeof(uint32_t))
+
+#endif /*_IA_CSS_CIRCBUF_COMM_H*/
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h
new file mode 100644
index 000000000000..47c488cec8ad
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h
@@ -0,0 +1,173 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_CIRCBUF_DESC_H_
+#define _IA_CSS_CIRCBUF_DESC_H_
+
+#include <type_support.h>
+#include <math_support.h>
+#include <platform_support.h>
+#include <sp.h>
+#include "ia_css_circbuf_comm.h"
+/****************************************************************
+ *
+ * Inline functions.
+ *
+ ****************************************************************/
+/**
+ * @brief Test if the circular buffer is empty.
+ *
+ * @param cb_desc The pointer to the circular buffer descriptor.
+ *
+ * @return
+ * - true when it is empty.
+ * - false when it is not empty.
+ */
+static inline bool ia_css_circbuf_desc_is_empty(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ OP___assert(cb_desc);
+ return (cb_desc->end == cb_desc->start);
+}
+
+/**
+ * @brief Test if the circular buffer descriptor is full.
+ *
+ * @param cb_desc The pointer to the circular buffer
+ * descriptor.
+ *
+ * @return
+ * - true when it is full.
+ * - false when it is not full.
+ */
+static inline bool ia_css_circbuf_desc_is_full(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ OP___assert(cb_desc);
+ return (OP_std_modadd(cb_desc->end, 1, cb_desc->size) == cb_desc->start);
+}
+
+/**
+ * @brief Initialize the circular buffer descriptor
+ *
+ * @param cb_desc The pointer circular buffer descriptor
+ * @param size The size of the circular buffer
+ */
+static inline void ia_css_circbuf_desc_init(
+ ia_css_circbuf_desc_t *cb_desc,
+ int8_t size)
+{
+ OP___assert(cb_desc);
+ cb_desc->size = size;
+}
+
+/**
+ * @brief Get a position in the circular buffer descriptor.
+ *
+ * @param cb The pointer to the circular buffer descriptor.
+ * @param base The base position.
+ * @param offset The offset.
+ *
+ * @return the position in the circular buffer descriptor.
+ */
+static inline uint8_t ia_css_circbuf_desc_get_pos_at_offset(
+ ia_css_circbuf_desc_t *cb_desc,
+ u32 base,
+ int offset)
+{
+ u8 dest;
+
+ OP___assert(cb_desc);
+ OP___assert(cb_desc->size > 0);
+
+ /* step 1: adjust the offset */
+ while (offset < 0) {
+ offset += cb_desc->size;
+ }
+
+ /* step 2: shift and round by the upper limit */
+ dest = OP_std_modadd(base, offset, cb_desc->size);
+
+ return dest;
+}
+
+/**
+ * @brief Get the offset between two positions in the circular buffer
+ * descriptor.
+ * Get the offset from the source position to the terminal position,
+ * along the direction in which the new elements come in.
+ *
+ * @param cb_desc The pointer to the circular buffer descriptor.
+ * @param src_pos The source position.
+ * @param dest_pos The terminal position.
+ *
+ * @return the offset.
+ */
+static inline int ia_css_circbuf_desc_get_offset(
+ ia_css_circbuf_desc_t *cb_desc,
+ u32 src_pos,
+ uint32_t dest_pos)
+{
+ int offset;
+
+ OP___assert(cb_desc);
+
+ offset = (int)(dest_pos - src_pos);
+ offset += (offset < 0) ? cb_desc->size : 0;
+
+ return offset;
+}
+
+/**
+ * @brief Get the number of available elements.
+ *
+ * @param cb_desc The pointer to the circular buffer.
+ *
+ * @return The number of available elements.
+ */
+static inline uint32_t ia_css_circbuf_desc_get_num_elems(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ int num;
+
+ OP___assert(cb_desc);
+
+ num = ia_css_circbuf_desc_get_offset(cb_desc,
+ cb_desc->start,
+ cb_desc->end);
+
+ return (uint32_t)num;
+}
+
+/**
+ * @brief Get the number of free elements.
+ *
+ * @param cb_desc The pointer to the circular buffer descriptor.
+ *
+ * @return: The number of free elements.
+ */
+static inline uint32_t ia_css_circbuf_desc_get_free_elems(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ u32 num;
+
+ OP___assert(cb_desc);
+
+ num = ia_css_circbuf_desc_get_offset(cb_desc,
+ cb_desc->start,
+ cb_desc->end);
+
+ return (cb_desc->size - num);
+}
+#endif /*_IA_CSS_CIRCBUF_DESC_H_ */
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c b/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
new file mode 100644
index 000000000000..78e98268e188
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
@@ -0,0 +1,320 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_circbuf.h"
+
+#include <assert_support.h>
+
+/**********************************************************************
+ *
+ * Forward declarations.
+ *
+ **********************************************************************/
+/*
+ * @brief Read the oldest element from the circular buffer.
+ * Read the oldest element WITHOUT checking whehter the
+ * circular buffer is empty or not. The oldest element is
+ * also removed out from the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the oldest element.
+ */
+static inline ia_css_circbuf_elem_t
+ia_css_circbuf_read(ia_css_circbuf_t *cb);
+
+/*
+ * @brief Shift a chunk of elements in the circular buffer.
+ * A chunk of elements (i.e. the ones from the "start" position
+ * to the "chunk_src" position) are shifted in the circular buffer,
+ * along the direction of new elements coming.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param chunk_src The position at which the first element in the chunk is.
+ * @param chunk_dest The position to which the first element in the chunk would be shift.
+ */
+static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
+ u32 chunk_src,
+ uint32_t chunk_dest);
+
+/*
+ * @brief Get the "val" field in the element.
+ *
+ * @param elem The pointer to the element.
+ *
+ * @return the "val" field.
+ */
+static inline uint32_t
+ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem);
+
+/**********************************************************************
+ *
+ * Non-inline functions.
+ *
+ **********************************************************************/
+/*
+ * @brief Create the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+void
+ia_css_circbuf_create(ia_css_circbuf_t *cb,
+ ia_css_circbuf_elem_t *elems,
+ ia_css_circbuf_desc_t *desc)
+{
+ u32 i;
+
+ OP___assert(desc);
+
+ cb->desc = desc;
+ /* Initialize to defaults */
+ cb->desc->start = 0;
+ cb->desc->end = 0;
+ cb->desc->step = 0;
+
+ for (i = 0; i < cb->desc->size; i++)
+ ia_css_circbuf_elem_init(&elems[i]);
+
+ cb->elems = elems;
+}
+
+/*
+ * @brief Destroy the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+void ia_css_circbuf_destroy(ia_css_circbuf_t *cb)
+{
+ cb->desc = NULL;
+
+ cb->elems = NULL;
+}
+
+/*
+ * @brief Pop a value out of the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb)
+{
+ u32 ret;
+ ia_css_circbuf_elem_t elem;
+
+ assert(!ia_css_circbuf_is_empty(cb));
+
+ /* read an element from the buffer */
+ elem = ia_css_circbuf_read(cb);
+ ret = ia_css_circbuf_elem_get_val(&elem);
+ return ret;
+}
+
+/*
+ * @brief Extract a value out of the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset)
+{
+ int max_offset;
+ u32 val;
+ u32 pos;
+ u32 src_pos;
+ u32 dest_pos;
+
+ /* get the maximum offest */
+ max_offset = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end);
+ max_offset--;
+
+ /*
+ * Step 1: When the target element is at the "start" position.
+ */
+ if (offset == 0) {
+ val = ia_css_circbuf_pop(cb);
+ return val;
+ }
+
+ /*
+ * Step 2: When the target element is out of the range.
+ */
+ if (offset > max_offset) {
+ val = 0;
+ return val;
+ }
+
+ /*
+ * Step 3: When the target element is between the "start" and
+ * "end" position.
+ */
+ /* get the position of the target element */
+ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset);
+
+ /* get the value from the target element */
+ val = ia_css_circbuf_elem_get_val(&cb->elems[pos]);
+
+ /* shift the elements */
+ src_pos = ia_css_circbuf_get_pos_at_offset(cb, pos, -1);
+ dest_pos = pos;
+ ia_css_circbuf_shift_chunk(cb, src_pos, dest_pos);
+
+ return val;
+}
+
+/*
+ * @brief Peek an element from the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset)
+{
+ int pos;
+
+ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, offset);
+
+ /* get the value at the position */
+ return cb->elems[pos].val;
+}
+
+/*
+ * @brief Get the value of an element from the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset)
+{
+ int pos;
+
+ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset);
+
+ /* get the value at the position */
+ return cb->elems[pos].val;
+}
+
+/* @brief increase size of a circular buffer.
+ * Use 'CAUTION' before using this function. This was added to
+ * support / fix issue with increasing size for tagger only
+ * Please refer to "ia_css_circbuf.h" for details.
+ */
+bool ia_css_circbuf_increase_size(
+ ia_css_circbuf_t *cb,
+ unsigned int sz_delta,
+ ia_css_circbuf_elem_t *elems)
+{
+ u8 curr_size;
+ u8 curr_end;
+ unsigned int i = 0;
+
+ if (!cb || sz_delta == 0)
+ return false;
+
+ curr_size = cb->desc->size;
+ curr_end = cb->desc->end;
+ /* We assume cb was pre defined as global to allow
+ * increase in size */
+ /* FM: are we sure this cannot cause size to become too big? */
+ if (((uint8_t)(cb->desc->size + (uint8_t)sz_delta) > cb->desc->size) &&
+ ((uint8_t)sz_delta == sz_delta))
+ cb->desc->size += (uint8_t)sz_delta;
+ else
+ return false; /* overflow in size */
+
+ /* If elems are passed update them else we assume its been taken
+ * care before calling this function */
+ if (elems) {
+ /* cb element array size will not be increased dynamically,
+ * but pointers to new elements can be added at the end
+ * of existing pre defined cb element array of
+ * size >= new size if not already added */
+ for (i = curr_size; i < cb->desc->size; i++)
+ cb->elems[i] = elems[i - curr_size];
+ }
+ /* Fix Start / End */
+ if (curr_end < cb->desc->start) {
+ if (curr_end == 0) {
+ /* Easily fix End */
+ cb->desc->end = curr_size;
+ } else {
+ /* Move elements and fix Start*/
+ ia_css_circbuf_shift_chunk(cb,
+ curr_size - 1,
+ curr_size + sz_delta - 1);
+ }
+ }
+
+ return true;
+}
+
+/****************************************************************
+ *
+ * Inline functions.
+ *
+ ****************************************************************/
+/*
+ * @brief Get the "val" field in the element.
+ * Refer to "Forward declarations" for details.
+ */
+static inline uint32_t
+ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem)
+{
+ return elem->val;
+}
+
+/*
+ * @brief Read the oldest element from the circular buffer.
+ * Refer to "Forward declarations" for details.
+ */
+static inline ia_css_circbuf_elem_t
+ia_css_circbuf_read(ia_css_circbuf_t *cb)
+{
+ ia_css_circbuf_elem_t elem;
+
+ /* get the element from the target position */
+ elem = cb->elems[cb->desc->start];
+
+ /* clear the target position */
+ ia_css_circbuf_elem_init(&cb->elems[cb->desc->start]);
+
+ /* adjust the "start" position */
+ cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, 1);
+ return elem;
+}
+
+/*
+ * @brief Shift a chunk of elements in the circular buffer.
+ * Refer to "Forward declarations" for details.
+ */
+static inline void
+ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
+ u32 chunk_src, uint32_t chunk_dest)
+{
+ int chunk_offset;
+ int chunk_sz;
+ int i;
+
+ /* get the chunk offset and size */
+ chunk_offset = ia_css_circbuf_get_offset(cb,
+ chunk_src, chunk_dest);
+ chunk_sz = ia_css_circbuf_get_offset(cb, cb->desc->start, chunk_src) + 1;
+
+ /* shift each element to its terminal position */
+ for (i = 0; i < chunk_sz; i++) {
+ /* copy the element from the source to the destination */
+ ia_css_circbuf_elem_cpy(&cb->elems[chunk_src],
+ &cb->elems[chunk_dest]);
+
+ /* clear the source position */
+ ia_css_circbuf_elem_init(&cb->elems[chunk_src]);
+
+ /* adjust the source/terminal positions */
+ chunk_src = ia_css_circbuf_get_pos_at_offset(cb, chunk_src, -1);
+ chunk_dest = ia_css_circbuf_get_pos_at_offset(cb, chunk_dest, -1);
+ }
+
+ /* adjust the index "start" */
+ cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start,
+ chunk_offset);
+}
diff --git a/drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h b/drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h
new file mode 100644
index 000000000000..8cf3b0e0cc39
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h
@@ -0,0 +1,83 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_REFCOUNT_H_
+#define _IA_CSS_REFCOUNT_H_
+
+#include <type_support.h>
+#include <system_types.h>
+#include <ia_css_err.h>
+
+typedef void (*clear_func)(hrt_vaddress ptr);
+
+/*! \brief Function for initializing refcount list
+ *
+ * \param[in] size Size of the refcount list.
+ * \return ia_css_err
+ */
+enum ia_css_err ia_css_refcount_init(uint32_t size);
+
+/*! \brief Function for de-initializing refcount list
+ *
+ * \return None
+ */
+void ia_css_refcount_uninit(void);
+
+/*! \brief Function for increasing reference by 1.
+ *
+ * \param[in] id ID of the object.
+ * \param[in] ptr Data of the object (ptr).
+ * \return hrt_vaddress (saved address)
+ */
+hrt_vaddress ia_css_refcount_increment(s32 id, hrt_vaddress ptr);
+
+/*! \brief Function for decrease reference by 1.
+ *
+ * \param[in] id ID of the object.
+ * \param[in] ptr Data of the object (ptr).
+ *
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_refcount_decrement(s32 id, hrt_vaddress ptr);
+
+/*! \brief Function to check if reference count is 1.
+ *
+ * \param[in] ptr Data of the object (ptr).
+ *
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_refcount_is_single(hrt_vaddress ptr);
+
+/*! \brief Function to clear reference list objects.
+ *
+ * \param[in] id ID of the object.
+ * \param[in] clear_func function to be run to free reference objects.
+ *
+ * return None
+ */
+void ia_css_refcount_clear(s32 id,
+ clear_func clear_func_ptr);
+
+/*! \brief Function to verify if object is valid
+ *
+ * \param[in] ptr Data of the object (ptr)
+ *
+ * - true, if valid
+ * - false, if invalid
+ */
+bool ia_css_refcount_is_valid(hrt_vaddress ptr);
+
+#endif /* _IA_CSS_REFCOUNT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c b/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c
new file mode 100644
index 000000000000..e39cc2132953
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c
@@ -0,0 +1,275 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_refcount.h"
+#include "memory_access/memory_access.h"
+#include "sh_css_defs.h"
+
+#include "platform_support.h"
+
+#include "assert_support.h"
+
+#include "ia_css_debug.h"
+
+/* TODO: enable for other memory aswell
+ now only for hrt_vaddress */
+struct ia_css_refcount_entry {
+ u32 count;
+ hrt_vaddress data;
+ s32 id;
+};
+
+struct ia_css_refcount_list {
+ u32 size;
+ struct ia_css_refcount_entry *items;
+};
+
+static struct ia_css_refcount_list myrefcount;
+
+static struct ia_css_refcount_entry *refcount_find_entry(hrt_vaddress ptr,
+ bool firstfree)
+{
+ u32 i;
+
+ if (ptr == 0)
+ return NULL;
+ if (!myrefcount.items) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "refcount_find_entry(): Ref count not initiliazed!\n");
+ return NULL;
+ }
+
+ for (i = 0; i < myrefcount.size; i++) {
+ if ((&myrefcount.items[i])->data == 0) {
+ if (firstfree) {
+ /* for new entry */
+ return &myrefcount.items[i];
+ }
+ }
+ if ((&myrefcount.items[i])->data == ptr) {
+ /* found entry */
+ return &myrefcount.items[i];
+ }
+ }
+ return NULL;
+}
+
+enum ia_css_err ia_css_refcount_init(uint32_t size)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (size == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_init(): Size of 0 for Ref count init!\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (myrefcount.items) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_init(): Ref count is already initialized\n");
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ myrefcount.items =
+ sh_css_malloc(sizeof(struct ia_css_refcount_entry) * size);
+ if (!myrefcount.items)
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ if (err == IA_CSS_SUCCESS) {
+ memset(myrefcount.items, 0,
+ sizeof(struct ia_css_refcount_entry) * size);
+ myrefcount.size = size;
+ }
+ return err;
+}
+
+void ia_css_refcount_uninit(void)
+{
+ struct ia_css_refcount_entry *entry;
+ u32 i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_uninit() entry\n");
+ for (i = 0; i < myrefcount.size; i++) {
+ /* driver verifier tool has issues with &arr[i]
+ and prefers arr + i; as these are actually equivalent
+ the line below uses + i
+ */
+ entry = myrefcount.items + i;
+ if (entry->data != mmgr_NULL) {
+ /* ia_css_debug_dtrace(IA_CSS_DBG_TRACE,
+ "ia_css_refcount_uninit: freeing (%x)\n",
+ entry->data);*/
+ hmm_free(entry->data);
+ entry->data = mmgr_NULL;
+ entry->count = 0;
+ entry->id = 0;
+ }
+ }
+ sh_css_free(myrefcount.items);
+ myrefcount.items = NULL;
+ myrefcount.size = 0;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_uninit() leave\n");
+}
+
+hrt_vaddress ia_css_refcount_increment(s32 id, hrt_vaddress ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ if (ptr == mmgr_NULL)
+ return ptr;
+
+ entry = refcount_find_entry(ptr, false);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_increment(%x) 0x%x\n", id, ptr);
+
+ if (!entry) {
+ entry = refcount_find_entry(ptr, true);
+ assert(entry);
+ if (!entry)
+ return mmgr_NULL;
+ entry->id = id;
+ }
+
+ if (entry->id != id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_refcount_increment(): Ref count IDS do not match!\n");
+ return mmgr_NULL;
+ }
+
+ if (entry->data == ptr)
+ entry->count += 1;
+ else if (entry->data == mmgr_NULL) {
+ entry->data = ptr;
+ entry->count = 1;
+ } else
+ return mmgr_NULL;
+
+ return ptr;
+}
+
+bool ia_css_refcount_decrement(s32 id, hrt_vaddress ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_decrement(%x) 0x%x\n", id, ptr);
+
+ if (ptr == mmgr_NULL)
+ return false;
+
+ entry = refcount_find_entry(ptr, false);
+
+ if (entry) {
+ if (entry->id != id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_refcount_decrement(): Ref count IDS do not match!\n");
+ return false;
+ }
+ if (entry->count > 0) {
+ entry->count -= 1;
+ if (entry->count == 0) {
+ /* ia_css_debug_dtrace(IA_CSS_DBEUG_TRACE,
+ "ia_css_refcount_decrement: freeing\n");*/
+ hmm_free(ptr);
+ entry->data = mmgr_NULL;
+ entry->id = 0;
+ }
+ return true;
+ }
+ }
+
+ /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not
+ valid anymore */
+ if (entry)
+ IA_CSS_ERROR("id %x, ptr 0x%x entry %p entry->id %x entry->count %d\n",
+ id, ptr, entry, entry->id, entry->count);
+ else
+ IA_CSS_ERROR("entry NULL\n");
+ assert(false);
+
+ return false;
+}
+
+bool ia_css_refcount_is_single(hrt_vaddress ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ if (ptr == mmgr_NULL)
+ return false;
+
+ entry = refcount_find_entry(ptr, false);
+
+ if (entry)
+ return (entry->count == 1);
+
+ return true;
+}
+
+void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr)
+{
+ struct ia_css_refcount_entry *entry;
+ u32 i;
+ u32 count = 0;
+
+ assert(clear_func_ptr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear(%x)\n",
+ id);
+
+ for (i = 0; i < myrefcount.size; i++) {
+ /* driver verifier tool has issues with &arr[i]
+ and prefers arr + i; as these are actually equivalent
+ the line below uses + i
+ */
+ entry = myrefcount.items + i;
+ if ((entry->data != mmgr_NULL) && (entry->id == id)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_clear: %x: 0x%x\n",
+ id, entry->data);
+ if (clear_func_ptr) {
+ /* clear using provided function */
+ clear_func_ptr(entry->data);
+ } else {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_clear: using hmm_free: no clear_func\n");
+ hmm_free(entry->data);
+ }
+
+ if (entry->count != 0) {
+ IA_CSS_WARNING("Ref count for entry %x is not zero!", entry->id);
+ }
+
+ assert(entry->count == 0);
+
+ entry->data = mmgr_NULL;
+ entry->count = 0;
+ entry->id = 0;
+ count++;
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_clear(%x): cleared %d\n", id,
+ count);
+}
+
+bool ia_css_refcount_is_valid(hrt_vaddress ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ if (ptr == mmgr_NULL)
+ return false;
+
+ entry = refcount_find_entry(ptr, false);
+
+ return entry;
+}
diff --git a/drivers/staging/media/atomisp/pci/bits.h b/drivers/staging/media/atomisp/pci/bits.h
new file mode 100644
index 000000000000..c6d2a5cba213
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/bits.h
@@ -0,0 +1,104 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_BITS_H
+#define _HRT_BITS_H
+
+#include "defs.h"
+
+#define _hrt_ones(n) HRTCAT(_hrt_ones_, n)
+#define _hrt_ones_0x0 0x00000000U
+#define _hrt_ones_0x1 0x00000001U
+#define _hrt_ones_0x2 0x00000003U
+#define _hrt_ones_0x3 0x00000007U
+#define _hrt_ones_0x4 0x0000000FU
+#define _hrt_ones_0x5 0x0000001FU
+#define _hrt_ones_0x6 0x0000003FU
+#define _hrt_ones_0x7 0x0000007FU
+#define _hrt_ones_0x8 0x000000FFU
+#define _hrt_ones_0x9 0x000001FFU
+#define _hrt_ones_0xA 0x000003FFU
+#define _hrt_ones_0xB 0x000007FFU
+#define _hrt_ones_0xC 0x00000FFFU
+#define _hrt_ones_0xD 0x00001FFFU
+#define _hrt_ones_0xE 0x00003FFFU
+#define _hrt_ones_0xF 0x00007FFFU
+#define _hrt_ones_0x10 0x0000FFFFU
+#define _hrt_ones_0x11 0x0001FFFFU
+#define _hrt_ones_0x12 0x0003FFFFU
+#define _hrt_ones_0x13 0x0007FFFFU
+#define _hrt_ones_0x14 0x000FFFFFU
+#define _hrt_ones_0x15 0x001FFFFFU
+#define _hrt_ones_0x16 0x003FFFFFU
+#define _hrt_ones_0x17 0x007FFFFFU
+#define _hrt_ones_0x18 0x00FFFFFFU
+#define _hrt_ones_0x19 0x01FFFFFFU
+#define _hrt_ones_0x1A 0x03FFFFFFU
+#define _hrt_ones_0x1B 0x07FFFFFFU
+#define _hrt_ones_0x1C 0x0FFFFFFFU
+#define _hrt_ones_0x1D 0x1FFFFFFFU
+#define _hrt_ones_0x1E 0x3FFFFFFFU
+#define _hrt_ones_0x1F 0x7FFFFFFFU
+#define _hrt_ones_0x20 0xFFFFFFFFU
+
+#define _hrt_ones_0 _hrt_ones_0x0
+#define _hrt_ones_1 _hrt_ones_0x1
+#define _hrt_ones_2 _hrt_ones_0x2
+#define _hrt_ones_3 _hrt_ones_0x3
+#define _hrt_ones_4 _hrt_ones_0x4
+#define _hrt_ones_5 _hrt_ones_0x5
+#define _hrt_ones_6 _hrt_ones_0x6
+#define _hrt_ones_7 _hrt_ones_0x7
+#define _hrt_ones_8 _hrt_ones_0x8
+#define _hrt_ones_9 _hrt_ones_0x9
+#define _hrt_ones_10 _hrt_ones_0xA
+#define _hrt_ones_11 _hrt_ones_0xB
+#define _hrt_ones_12 _hrt_ones_0xC
+#define _hrt_ones_13 _hrt_ones_0xD
+#define _hrt_ones_14 _hrt_ones_0xE
+#define _hrt_ones_15 _hrt_ones_0xF
+#define _hrt_ones_16 _hrt_ones_0x10
+#define _hrt_ones_17 _hrt_ones_0x11
+#define _hrt_ones_18 _hrt_ones_0x12
+#define _hrt_ones_19 _hrt_ones_0x13
+#define _hrt_ones_20 _hrt_ones_0x14
+#define _hrt_ones_21 _hrt_ones_0x15
+#define _hrt_ones_22 _hrt_ones_0x16
+#define _hrt_ones_23 _hrt_ones_0x17
+#define _hrt_ones_24 _hrt_ones_0x18
+#define _hrt_ones_25 _hrt_ones_0x19
+#define _hrt_ones_26 _hrt_ones_0x1A
+#define _hrt_ones_27 _hrt_ones_0x1B
+#define _hrt_ones_28 _hrt_ones_0x1C
+#define _hrt_ones_29 _hrt_ones_0x1D
+#define _hrt_ones_30 _hrt_ones_0x1E
+#define _hrt_ones_31 _hrt_ones_0x1F
+#define _hrt_ones_32 _hrt_ones_0x20
+
+#define _hrt_mask(b, n) \
+ (_hrt_ones(n) << (b))
+#define _hrt_get_bits(w, b, n) \
+ (((w) >> (b)) & _hrt_ones(n))
+#define _hrt_set_bits(w, b, n, v) \
+ (((w) & ~_hrt_mask(b, n)) | (((v) & _hrt_ones(n)) << (b)))
+#define _hrt_get_bit(w, b) \
+ (((w) >> (b)) & 1)
+#define _hrt_set_bit(w, b, v) \
+ (((w) & (~(1 << (b)))) | (((v) & 1) << (b)))
+#define _hrt_set_lower_half(w, v) \
+ _hrt_set_bits(w, 0, 16, v)
+#define _hrt_set_upper_half(w, v) \
+ _hrt_set_bits(w, 16, 16, v)
+
+#endif /* _HRT_BITS_H */
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h
new file mode 100644
index 000000000000..551e8d7c5003
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h
@@ -0,0 +1,297 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_BINARYDESC_H__
+#define __IA_CSS_PIPE_BINARYDESC_H__
+
+#include <ia_css_types.h> /* ia_css_pipe */
+#include <ia_css_frame_public.h> /* ia_css_frame_info */
+#include <ia_css_binary.h> /* ia_css_binary_descr */
+
+/* @brief Get a binary descriptor for copy.
+ *
+ * @param[in] pipe
+ * @param[out] copy_desc
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_copy_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *copy_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for vfpp.
+ *
+ * @param[in] pipe
+ * @param[out] vfpp_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_vfpp_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *vf_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get numerator and denominator of bayer downscaling factor.
+ *
+ * @param[in] bds_factor: The bayer downscaling factor.
+ * (= The bds_factor member in the sh_css_bds_factor structure.)
+ * @param[out] bds_factor_numerator: The numerator of the bayer downscaling factor.
+ * (= The numerator member in the sh_css_bds_factor structure.)
+ * @param[out] bds_factor_denominator: The denominator of the bayer downscaling factor.
+ * (= The denominator member in the sh_css_bds_factor structure.)
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
+ unsigned int bds_factor,
+ unsigned int *bds_factor_numerator,
+ unsigned int *bds_factor_denominator);
+
+/* @brief Get a binary descriptor for preview stage.
+ *
+ * @param[in] pipe
+ * @param[out] preview_descr
+ * @param[in/out] in_info
+ * @param[in/out] bds_out_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipe_get_preview_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *preview_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for video stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] video_descr
+ * @param[in/out] in_info
+ * @param[in/out] bds_out_info
+ * @param[in/out] vf_info
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipe_get_video_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *video_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ int stream_config_left_padding);
+
+/* @brief Get a binary descriptor for yuv scaler stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] yuv_scaler_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] internal_out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_yuvscaler_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *yuv_scaler_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *internal_out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for capture pp stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] capture_pp_descr
+ * @param[in/out] in_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_capturepp_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *capture_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for primary capture.
+ *
+ * @param[in] pipe
+ * @param[out] prim_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_primary_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *prim_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int stage_idx);
+
+/* @brief Get a binary descriptor for pre gdc stage.
+ *
+ * @param[in] pipe
+ * @param[out] pre_gdc_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_pre_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for gdc stage.
+ *
+ * @param[in] pipe
+ * @param[out] gdc_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for post gdc.
+ *
+ * @param[in] pipe
+ * @param[out] post_gdc_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_post_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *post_gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for de.
+ *
+ * @param[in] pipe
+ * @param[out] pre_de_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_pre_de_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_de_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for pre anr stage.
+ *
+ * @param[in] pipe
+ * @param[out] pre_anr_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_pre_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for ANR stage.
+ *
+ * @param[in] pipe
+ * @param[out] anr_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for post anr stage.
+ *
+ * @param[in] pipe
+ * @param[out] post_anr_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_post_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *post_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for ldc stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] capture_pp_descr
+ * @param[in/out] in_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_ldc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *ldc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Calculates the required BDS factor
+ *
+ * @param[in] input_res
+ * @param[in] output_res
+ * @param[in/out] bds_factor
+ * @return IA_CSS_SUCCESS or error code upon error.
+ */
+enum ia_css_err binarydesc_calculate_bds_factor(
+ struct ia_css_resolution input_res,
+ struct ia_css_resolution output_res,
+ unsigned int *bds_factor);
+
+#endif /* __IA_CSS_PIPE_BINARYDESC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h
new file mode 100644
index 000000000000..e58c9190310d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h
@@ -0,0 +1,51 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_STAGEDESC_H__
+#define __IA_CSS_PIPE_STAGEDESC_H__
+
+#include <ia_css_acc_types.h> /* ia_css_fw_info */
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+#include "ia_css_pipeline.h"
+#include "ia_css_pipeline_common.h"
+
+void ia_css_pipe_get_generic_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame);
+
+void ia_css_pipe_get_firmwares_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame,
+ const struct ia_css_fw_info *fw,
+ unsigned int mode);
+
+void ia_css_pipe_get_acc_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_fw_info *fw);
+
+void ia_css_pipe_get_sp_func_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_frame *out_frame,
+ enum ia_css_pipeline_stage_sp_func sp_func,
+ unsigned int max_input_width);
+
+#endif /*__IA_CSS_PIPE_STAGEDESC__H__ */
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h
new file mode 100644
index 000000000000..ad60210abe95
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_UTIL_H__
+#define __IA_CSS_PIPE_UTIL_H__
+
+#include <ia_css_types.h>
+#include <ia_css_frame_public.h>
+
+/* @brief Get Input format bits per pixel based on stream configuration of this
+ * pipe.
+ *
+ * @param[in] pipe
+ * @return bits per pixel for the underlying stream
+ *
+ */
+unsigned int ia_css_pipe_util_pipe_input_format_bpp(
+ const struct ia_css_pipe *const pipe);
+
+void ia_css_pipe_util_create_output_frames(
+ struct ia_css_frame *frames[]);
+
+void ia_css_pipe_util_set_output_frames(
+ struct ia_css_frame *frames[],
+ unsigned int idx,
+ struct ia_css_frame *frame);
+
+#endif /* __IA_CSS_PIPE_UTIL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c
new file mode 100644
index 000000000000..c6b07d65ce3e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c
@@ -0,0 +1,873 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_pipe_binarydesc.h"
+#include "ia_css_frame_format.h"
+#include "ia_css_pipe.h"
+#include "ia_css_pipe_util.h"
+#include "ia_css_util.h"
+#include "ia_css_debug.h"
+#include "sh_css_params.h"
+#include <assert_support.h>
+/* HRT_GDC_N */
+#include "gdc_device.h"
+#include <linux/kernel.h>
+
+/* This module provides a binary descriptions to used to find a binary. Since,
+ * every stage is associated with a binary, it implicity helps stage
+ * description. Apart from providing a binary description, this module also
+ * populates the frame info's when required.*/
+
+/* Generic descriptor for offline binaries. Internal function. */
+static void pipe_binarydesc_get_offline(
+ struct ia_css_pipe const *const pipe,
+ const int mode,
+ struct ia_css_binary_descr *descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info[],
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ /* in_info, out_info, vf_info can be NULL */
+ assert(pipe);
+ assert(descr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "pipe_binarydesc_get_offline() enter:\n");
+
+ descr->mode = mode;
+ descr->online = false;
+ descr->continuous = pipe->stream->config.continuous;
+ descr->striped = false;
+ descr->two_ppc = false;
+ descr->enable_yuv_ds = false;
+ descr->enable_high_speed = false;
+ descr->enable_dvs_6axis = false;
+ descr->enable_reduced_pipe = false;
+ descr->enable_dz = true;
+ descr->enable_xnr = false;
+ descr->enable_dpc = false;
+ descr->enable_luma_only = false;
+ descr->enable_tnr = false;
+ descr->enable_capture_pp_bli = false;
+ descr->enable_fractional_ds = false;
+ descr->dvs_env.width = 0;
+ descr->dvs_env.height = 0;
+ descr->stream_format = pipe->stream->config.input_config.format;
+ descr->in_info = in_info;
+ descr->bds_out_info = NULL;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ descr->out_info[i] = out_info[i];
+ descr->vf_info = vf_info;
+ descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
+ descr->stream_config_left_padding = -1;
+}
+
+void ia_css_pipe_get_copy_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *copy_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ unsigned int i;
+ /* out_info can be NULL */
+ assert(pipe);
+ assert(in_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_COPY,
+ copy_descr, in_info, out_infos, vf_info);
+ copy_descr->online = true;
+ copy_descr->continuous = false;
+ copy_descr->two_ppc = (pipe->stream->config.pixels_per_clock == 2);
+ copy_descr->enable_dz = false;
+ copy_descr->isp_pipe_version = IA_CSS_PIPE_VERSION_1;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_vfpp_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *vf_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ unsigned int i;
+ /* out_info can be NULL ??? */
+ assert(pipe);
+ assert(in_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ in_info->raw_bit_depth = 0;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_VF_PP,
+ vf_pp_descr, in_info, out_infos, NULL);
+ vf_pp_descr->enable_fractional_ds = true;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+static struct sh_css_bds_factor bds_factors_list[] = {
+ {1, 1, SH_CSS_BDS_FACTOR_1_00},
+ {5, 4, SH_CSS_BDS_FACTOR_1_25},
+ {3, 2, SH_CSS_BDS_FACTOR_1_50},
+ {2, 1, SH_CSS_BDS_FACTOR_2_00},
+ {9, 4, SH_CSS_BDS_FACTOR_2_25},
+ {5, 2, SH_CSS_BDS_FACTOR_2_50},
+ {3, 1, SH_CSS_BDS_FACTOR_3_00},
+ {4, 1, SH_CSS_BDS_FACTOR_4_00},
+ {9, 2, SH_CSS_BDS_FACTOR_4_50},
+ {5, 1, SH_CSS_BDS_FACTOR_5_00},
+ {6, 1, SH_CSS_BDS_FACTOR_6_00},
+ {8, 1, SH_CSS_BDS_FACTOR_8_00}
+};
+
+enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
+ unsigned int bds_factor,
+ unsigned int *bds_factor_numerator,
+ unsigned int *bds_factor_denominator)
+{
+ unsigned int i;
+
+ /* Loop over all bds factors until a match is found */
+ for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
+ if (bds_factors_list[i].bds_factor == bds_factor) {
+ *bds_factor_numerator = bds_factors_list[i].numerator;
+ *bds_factor_denominator = bds_factors_list[i].denominator;
+ return IA_CSS_SUCCESS;
+ }
+ }
+
+ /* Throw an error since bds_factor cannot be found
+ in bds_factors_list */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+}
+
+enum ia_css_err binarydesc_calculate_bds_factor(
+ struct ia_css_resolution input_res,
+ struct ia_css_resolution output_res,
+ unsigned int *bds_factor)
+{
+ unsigned int i;
+ unsigned int in_w = input_res.width,
+ in_h = input_res.height,
+ out_w = output_res.width, out_h = output_res.height;
+
+ unsigned int max_bds_factor = 8;
+ unsigned int max_rounding_margin = 2;
+ /* delta in pixels to account for rounding margin in the calculation */
+ unsigned int delta = max_bds_factor * max_rounding_margin;
+
+ /* Assert if the resolutions are not set */
+ assert(in_w != 0 && in_h != 0);
+ assert(out_w != 0 && out_h != 0);
+
+ /* Loop over all bds factors until a match is found */
+ for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
+ unsigned int num = bds_factors_list[i].numerator;
+ unsigned int den = bds_factors_list[i].denominator;
+
+ /* See width-wise and height-wise if this bds_factor
+ * satisfies the condition */
+ bool cond = (out_w * num / den + delta > in_w) &&
+ (out_w * num / den <= in_w) &&
+ (out_h * num / den + delta > in_h) &&
+ (out_h * num / den <= in_h);
+
+ if (cond) {
+ *bds_factor = bds_factors_list[i].bds_factor;
+ return IA_CSS_SUCCESS;
+ }
+ }
+
+ /* Throw an error since a suitable bds_factor cannot be found */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+}
+
+enum ia_css_err ia_css_pipe_get_preview_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *preview_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ enum ia_css_err err;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ int mode = IA_CSS_BINARY_MODE_PREVIEW;
+ unsigned int i;
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ assert(vf_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ /*
+ * Set up the info of the input frame with
+ * the ISP required resolution
+ */
+ in_info->res = pipe->config.input_effective_res;
+ in_info->padded_width = in_info->res.width;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+
+ if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
+ mode = IA_CSS_BINARY_MODE_COPY;
+ else
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, mode,
+ preview_descr, in_info, out_infos, vf_info);
+ if (pipe->stream->config.online) {
+ preview_descr->online = pipe->stream->config.online;
+ preview_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ }
+ preview_descr->stream_format = pipe->stream->config.input_config.format;
+
+ /* TODO: Remove this when bds_out_info is available! */
+ *bds_out_info = *in_info;
+
+ if (pipe->extra_config.enable_raw_binning) {
+ if (pipe->config.bayer_ds_out_res.width != 0 &&
+ pipe->config.bayer_ds_out_res.height != 0) {
+ bds_out_info->res.width =
+ pipe->config.bayer_ds_out_res.width;
+ bds_out_info->res.height =
+ pipe->config.bayer_ds_out_res.height;
+ bds_out_info->padded_width =
+ pipe->config.bayer_ds_out_res.width;
+ err =
+ binarydesc_calculate_bds_factor(in_info->res,
+ bds_out_info->res,
+ &preview_descr->required_bds_factor);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ bds_out_info->res.width = in_info->res.width / 2;
+ bds_out_info->res.height = in_info->res.height / 2;
+ bds_out_info->padded_width = in_info->padded_width / 2;
+ preview_descr->required_bds_factor =
+ SH_CSS_BDS_FACTOR_2_00;
+ }
+ } else {
+ /* TODO: Remove this when bds_out_info->is available! */
+ bds_out_info->res.width = in_info->res.width;
+ bds_out_info->res.height = in_info->res.height;
+ bds_out_info->padded_width = in_info->padded_width;
+ preview_descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
+ }
+ pipe->required_bds_factor = preview_descr->required_bds_factor;
+
+ /* bayer ds and fractional ds cannot be enabled at the same time,
+ so we disable bds_out_info when fractional ds is used */
+ if (!pipe->extra_config.enable_fractional_ds)
+ preview_descr->bds_out_info = bds_out_info;
+ else
+ preview_descr->bds_out_info = NULL;
+ /*
+ ----Preview binary-----
+ --in-->|--out->|vf_veceven|--|--->vf
+ -----------------------
+ * Preview binary normally doesn't have a vf_port but
+ * instead it has an output port. However, the output is
+ * generated by vf_veceven module in which we might have
+ * a downscaling (by 1x, 2x, or 4x). Because the resolution
+ * might change, we need two different info, namely out_info
+ * & vf_info. In fill_binary_info we use out&vf info to
+ * calculate vf decimation factor.
+ */
+ *out_info = *vf_info;
+
+ /* In case of preview_ds binary, we can do any fractional amount
+ * of downscale, so there is no DS needed in vf_veceven. Therefore,
+ * out and vf infos will be the same. Otherwise, we set out resolution
+ * equal to in resolution. */
+ if (!pipe->extra_config.enable_fractional_ds) {
+ /* TODO: Change this when bds_out_info is available! */
+ out_info->res.width = bds_out_info->res.width;
+ out_info->res.height = bds_out_info->res.height;
+ out_info->padded_width = bds_out_info->padded_width;
+ }
+ preview_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+
+ preview_descr->enable_dpc = pipe->config.enable_dpc;
+
+ preview_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_pipe_get_video_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *video_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ int stream_config_left_padding)
+{
+ int mode = IA_CSS_BINARY_MODE_VIDEO;
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool stream_dz_config = false;
+
+ /* vf_info can be NULL */
+ assert(pipe);
+ assert(in_info);
+ /* assert(vf_info != NULL); */
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* The solution below is not optimal; we should move to using ia_css_pipe_get_copy_binarydesc()
+ * But for now this fixes things; this code used to be there but was removed
+ * with gerrit 8908 as this was wrong for Skycam; however 240x still needs this
+ */
+ if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
+ mode = IA_CSS_BINARY_MODE_COPY;
+
+ in_info->res = pipe->config.input_effective_res;
+ in_info->padded_width = in_info->res.width;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, mode,
+ video_descr, in_info, out_infos, vf_info);
+
+ if (pipe->stream->config.online) {
+ video_descr->online = pipe->stream->config.online;
+ video_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ }
+
+ if (mode == IA_CSS_BINARY_MODE_VIDEO) {
+ stream_dz_config =
+ ((pipe->stream->isp_params_configs->dz_config.dx !=
+ HRT_GDC_N)
+ || (pipe->stream->isp_params_configs->dz_config.dy !=
+ HRT_GDC_N));
+
+ video_descr->enable_dz = pipe->config.enable_dz
+ || stream_dz_config;
+ video_descr->dvs_env = pipe->config.dvs_envelope;
+ video_descr->enable_yuv_ds = pipe->extra_config.enable_yuv_ds;
+ video_descr->enable_high_speed =
+ pipe->extra_config.enable_high_speed;
+ video_descr->enable_dvs_6axis =
+ pipe->extra_config.enable_dvs_6axis;
+ video_descr->enable_reduced_pipe =
+ pipe->extra_config.enable_reduced_pipe;
+ video_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ video_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+ video_descr->enable_dpc =
+ pipe->config.enable_dpc;
+ video_descr->enable_luma_only =
+ pipe->config.enable_luma_only;
+ video_descr->enable_tnr =
+ pipe->config.enable_tnr;
+
+ if (pipe->extra_config.enable_raw_binning) {
+ if (pipe->config.bayer_ds_out_res.width != 0 &&
+ pipe->config.bayer_ds_out_res.height != 0) {
+ bds_out_info->res.width =
+ pipe->config.bayer_ds_out_res.width;
+ bds_out_info->res.height =
+ pipe->config.bayer_ds_out_res.height;
+ bds_out_info->padded_width =
+ pipe->config.bayer_ds_out_res.width;
+ err =
+ binarydesc_calculate_bds_factor(
+ in_info->res, bds_out_info->res,
+ &video_descr->required_bds_factor);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ bds_out_info->res.width =
+ in_info->res.width / 2;
+ bds_out_info->res.height =
+ in_info->res.height / 2;
+ bds_out_info->padded_width =
+ in_info->padded_width / 2;
+ video_descr->required_bds_factor =
+ SH_CSS_BDS_FACTOR_2_00;
+ }
+ } else {
+ bds_out_info->res.width = in_info->res.width;
+ bds_out_info->res.height = in_info->res.height;
+ bds_out_info->padded_width = in_info->padded_width;
+ video_descr->required_bds_factor =
+ SH_CSS_BDS_FACTOR_1_00;
+ }
+
+ pipe->required_bds_factor = video_descr->required_bds_factor;
+
+ /* bayer ds and fractional ds cannot be enabled
+ at the same time, so we disable bds_out_info when
+ fractional ds is used */
+ if (!pipe->extra_config.enable_fractional_ds)
+ video_descr->bds_out_info = bds_out_info;
+ else
+ video_descr->bds_out_info = NULL;
+
+ video_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+ video_descr->stream_config_left_padding = stream_config_left_padding;
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+void ia_css_pipe_get_yuvscaler_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *yuv_scaler_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *internal_out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame_info *this_vf_info = NULL;
+
+ assert(pipe);
+ assert(in_info);
+ /* Note: if the following assert fails, the number of ports has been
+ * changed; in that case an additional initializer must be added
+ * a few lines below after which this assert can be updated.
+ */
+ assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == 2);
+ IA_CSS_ENTER_PRIVATE("");
+
+ in_info->padded_width = in_info->res.width;
+ in_info->raw_bit_depth = 0;
+ ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
+ out_infos[0] = out_info;
+ out_infos[1] = internal_out_info;
+ /* add initializers here if
+ * assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == ...);
+ * fails
+ */
+
+ if (vf_info) {
+ this_vf_info = (vf_info->res.width == 0 &&
+ vf_info->res.height == 0) ? NULL : vf_info;
+ }
+
+ pipe_binarydesc_get_offline(pipe,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ yuv_scaler_descr,
+ in_info, out_infos, this_vf_info);
+
+ yuv_scaler_descr->enable_fractional_ds = true;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_capturepp_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *capture_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(vf_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* the in_info is only used for resolution to enable
+ bayer down scaling. */
+ if (pipe->out_yuv_ds_input_info.res.width)
+ *in_info = pipe->out_yuv_ds_input_info;
+ else
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_YUV420;
+ in_info->raw_bit_depth = 0;
+ ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
+
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ capture_pp_descr,
+ in_info, out_infos, vf_info);
+
+ capture_pp_descr->enable_capture_pp_bli =
+ pipe->config.default_capture_config.enable_capture_pp_bli;
+ capture_pp_descr->enable_fractional_ds = true;
+ capture_pp_descr->enable_xnr =
+ pipe->config.default_capture_config.enable_xnr != 0;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+/* lookup table for high quality primary binaries */
+static unsigned int primary_hq_binary_modes[NUM_PRIMARY_HQ_STAGES] = {
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE0,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE1,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE2,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE3,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE4,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE5
+};
+
+void ia_css_pipe_get_primary_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *prim_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int stage_idx)
+{
+ enum ia_css_pipe_version pipe_version = pipe->config.isp_pipe_version;
+ int mode;
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ assert(stage_idx < NUM_PRIMARY_HQ_STAGES);
+ /* vf_info can be NULL - example video_binarydescr */
+ /*assert(vf_info != NULL);*/
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1)
+ mode = primary_hq_binary_modes[stage_idx];
+ else
+ mode = IA_CSS_BINARY_MODE_PRIMARY;
+
+ if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
+ mode = IA_CSS_BINARY_MODE_COPY;
+
+ in_info->res = pipe->config.input_effective_res;
+ in_info->padded_width = in_info->res.width;
+
+#if !defined(HAS_NO_PACKED_RAW_PIXELS)
+ if (pipe->stream->config.pack_raw_pixels)
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
+ else
+#endif
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, mode,
+ prim_descr, in_info, out_infos, vf_info);
+
+ if (pipe->stream->config.online &&
+ pipe->stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
+ prim_descr->online = true;
+ prim_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ prim_descr->stream_format = pipe->stream->config.input_config.format;
+ }
+ if (mode == IA_CSS_BINARY_MODE_PRIMARY) {
+ prim_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ prim_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+ prim_descr->enable_luma_only =
+ pipe->config.enable_luma_only;
+ /* We have both striped and non-striped primary binaries,
+ * if continuous viewfinder is required, then we must select
+ * a striped one. Otherwise we prefer to use a non-striped
+ * since it has better performance. */
+ if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1)
+ prim_descr->striped = false;
+ else if (!atomisp_hw_is_isp2401) {
+ prim_descr->striped = prim_descr->continuous &&
+ (!pipe->stream->stop_copy_preview || !pipe->stream->disable_cont_vf);
+ } else {
+ prim_descr->striped = prim_descr->continuous && !pipe->stream->disable_cont_vf;
+
+ if ((pipe->config.default_capture_config.enable_xnr != 0) &&
+ (pipe->extra_config.enable_dvs_6axis == true))
+ prim_descr->enable_xnr = true;
+ }
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_pre_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
+ pre_gdc_descr, in_info, out_infos, NULL);
+ pre_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_QPLANE6;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_GDC,
+ gdc_descr, in_info, out_infos, NULL);
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_post_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *post_gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ assert(vf_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_YUV420_16;
+ in_info->raw_bit_depth = 16;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP,
+ post_gdc_descr, in_info, out_infos, vf_info);
+
+ post_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_pre_de_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_de_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
+ pre_de_descr, in_info, out_infos, NULL);
+ else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2) {
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_DE,
+ pre_de_descr, in_info, out_infos, NULL);
+ }
+
+ if (pipe->stream->config.online) {
+ pre_de_descr->online = true;
+ pre_de_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ pre_de_descr->stream_format = pipe->stream->config.input_config.format;
+ }
+ pre_de_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_pre_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
+ pre_anr_descr, in_info, out_infos, NULL);
+
+ if (pipe->stream->config.online) {
+ pre_anr_descr->online = true;
+ pre_anr_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ pre_anr_descr->stream_format = pipe->stream->config.input_config.format;
+ }
+ pre_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ANR_ELEMENT_BITS;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_ANR,
+ anr_descr, in_info, out_infos, NULL);
+
+ anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_post_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *post_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ assert(vf_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ANR_ELEMENT_BITS;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP,
+ post_anr_descr, in_info, out_infos, vf_info);
+
+ post_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_ldc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *ldc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (!atomisp_hw_is_isp2401) {
+ *in_info = *out_info;
+ } else {
+ if (pipe->out_yuv_ds_input_info.res.width)
+ *in_info = pipe->out_yuv_ds_input_info;
+ else
+ *in_info = *out_info;
+ }
+
+ in_info->format = IA_CSS_FRAME_FORMAT_YUV420;
+ in_info->raw_bit_depth = 0;
+ ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
+
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_CAPTURE_PP,
+ ldc_descr, in_info, out_infos, NULL);
+ ldc_descr->enable_dvs_6axis =
+ pipe->extra_config.enable_dvs_6axis;
+ IA_CSS_LEAVE_PRIVATE("");
+}
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c
new file mode 100644
index 000000000000..43f63cc20f49
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c
@@ -0,0 +1,118 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_pipe_stagedesc.h"
+#include "assert_support.h"
+#include "ia_css_debug.h"
+
+void ia_css_pipe_get_generic_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame)
+{
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("stage_desc = %p, binary = %p, out_frame = %p, in_frame = %p, vf_frame = %p",
+ stage_desc, binary, out_frame, in_frame, vf_frame);
+
+ assert(stage_desc && binary && binary->info);
+ if (!stage_desc || !binary || !binary->info) {
+ IA_CSS_ERROR("invalid arguments");
+ goto ERR;
+ }
+
+ stage_desc->binary = binary;
+ stage_desc->firmware = NULL;
+ stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
+ stage_desc->max_input_width = 0;
+ stage_desc->mode = binary->info->sp.pipeline.mode;
+ stage_desc->in_frame = in_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = out_frame[i];
+ }
+ stage_desc->vf_frame = vf_frame;
+ERR:
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_firmwares_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame,
+ const struct ia_css_fw_info *fw,
+ unsigned int mode)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_get_firmwares_stage_desc() enter:\n");
+ stage_desc->binary = binary;
+ stage_desc->firmware = fw;
+ stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
+ stage_desc->max_input_width = 0;
+ stage_desc->mode = mode;
+ stage_desc->in_frame = in_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = out_frame[i];
+ }
+ stage_desc->vf_frame = vf_frame;
+}
+
+void ia_css_pipe_get_acc_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_fw_info *fw)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_get_acc_stage_desc() enter:\n");
+ stage_desc->binary = binary;
+ stage_desc->firmware = fw;
+ stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
+ stage_desc->max_input_width = 0;
+ stage_desc->mode = IA_CSS_BINARY_MODE_VF_PP;
+ stage_desc->in_frame = NULL;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = NULL;
+ }
+ stage_desc->vf_frame = NULL;
+}
+
+void ia_css_pipe_get_sp_func_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_frame *out_frame,
+ enum ia_css_pipeline_stage_sp_func sp_func,
+ unsigned int max_input_width)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_get_sp_func_stage_desc() enter:\n");
+ stage_desc->binary = NULL;
+ stage_desc->firmware = NULL;
+ stage_desc->sp_func = sp_func;
+ stage_desc->max_input_width = max_input_width;
+ stage_desc->mode = (unsigned int)-1;
+ stage_desc->in_frame = NULL;
+ stage_desc->out_frame[0] = out_frame;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = NULL;
+ }
+ stage_desc->vf_frame = NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c
new file mode 100644
index 000000000000..cc0631550724
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c
@@ -0,0 +1,50 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_pipe_util.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_pipe.h"
+#include "ia_css_util.h"
+#include "assert_support.h"
+
+unsigned int ia_css_pipe_util_pipe_input_format_bpp(
+ const struct ia_css_pipe *const pipe)
+{
+ assert(pipe);
+ assert(pipe->stream);
+
+ return ia_css_util_input_format_bpp(pipe->stream->config.input_config.format,
+ pipe->stream->config.pixels_per_clock == 2);
+}
+
+void ia_css_pipe_util_create_output_frames(
+ struct ia_css_frame *frames[])
+{
+ unsigned int i;
+
+ assert(frames);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ frames[i] = NULL;
+ }
+}
+
+void ia_css_pipe_util_set_output_frames(
+ struct ia_css_frame *frames[],
+ unsigned int idx,
+ struct ia_css_frame *frame)
+{
+ assert(idx < IA_CSS_BINARY_MAX_OUTPUT_PORTS);
+
+ frames[idx] = frame;
+}
diff --git a/drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h b/drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h
new file mode 100644
index 000000000000..75333166ed9b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h
@@ -0,0 +1,141 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_UTIL_H__
+#define __IA_CSS_UTIL_H__
+
+#include <ia_css_err.h>
+#include <error_support.h>
+#include <type_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_stream_public.h>
+#include <ia_css_stream_format.h>
+
+/* @brief convert "errno" error code to "ia_css_err" error code
+ *
+ * @param[in] "errno" error code
+ * @return "ia_css_err" error code
+ *
+ */
+enum ia_css_err ia_css_convert_errno(
+ int in_err);
+
+/* @brief check vf frame info.
+ *
+ * @param[in] info
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_util_check_vf_info(
+ const struct ia_css_frame_info *const info);
+
+/* @brief check input configuration.
+ *
+ * @param[in] stream_config
+ * @param[in] must_be_raw
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_util_check_input(
+ const struct ia_css_stream_config *const stream_config,
+ bool must_be_raw,
+ bool must_be_yuv);
+
+/* @brief check vf and out frame info.
+ *
+ * @param[in] out_info
+ * @param[in] vf_info
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_util_check_vf_out_info(
+ const struct ia_css_frame_info *const out_info,
+ const struct ia_css_frame_info *const vf_info);
+
+/* @brief check width and height
+ *
+ * @param[in] width
+ * @param[in] height
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_util_check_res(
+ unsigned int width,
+ unsigned int height);
+
+/* ISP2401 */
+/* @brief compare resolutions (less or equal)
+ *
+ * @param[in] a resolution
+ * @param[in] b resolution
+ * @return true if both dimensions of a are less or
+ * equal than those of b, false otherwise
+ *
+ */
+bool ia_css_util_res_leq(
+ struct ia_css_resolution a,
+ struct ia_css_resolution b);
+
+/* ISP2401 */
+/**
+ * @brief Check if resolution is zero
+ *
+ * @param[in] resolution The resolution to check
+ *
+ * @returns true if resolution is zero
+ */
+bool ia_css_util_resolution_is_zero(
+ const struct ia_css_resolution resolution);
+
+/* ISP2401 */
+/**
+ * @brief Check if resolution is even
+ *
+ * @param[in] resolution The resolution to check
+ *
+ * @returns true if resolution is even
+ */
+bool ia_css_util_resolution_is_even(
+ const struct ia_css_resolution resolution);
+
+/* @brief check width and height
+ *
+ * @param[in] stream_format
+ * @param[in] two_ppc
+ * @return bits per pixel based on given parameters.
+ *
+ */
+unsigned int ia_css_util_input_format_bpp(
+ enum atomisp_input_format stream_format,
+ bool two_ppc);
+
+/* @brief check if input format it raw
+ *
+ * @param[in] stream_format
+ * @return true if the input format is raw or false otherwise
+ *
+ */
+bool ia_css_util_is_input_format_raw(
+ enum atomisp_input_format stream_format);
+
+/* @brief check if input format it yuv
+ *
+ * @param[in] stream_format
+ * @return true if the input format is yuv or false otherwise
+ *
+ */
+bool ia_css_util_is_input_format_yuv(
+ enum atomisp_input_format stream_format);
+
+#endif /* __IA_CSS_UTIL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/camera/util/src/util.c b/drivers/staging/media/atomisp/pci/camera/util/src/util.c
new file mode 100644
index 000000000000..217fe9cb54ff
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/util/src/util.c
@@ -0,0 +1,225 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_util.h"
+#include <ia_css_frame.h>
+#include <assert_support.h>
+#include <math_support.h>
+
+/* for ia_css_binary_max_vf_width() */
+#include "ia_css_binary.h"
+
+enum ia_css_err ia_css_convert_errno(
+ int in_err)
+{
+ enum ia_css_err out_err;
+
+ switch (in_err) {
+ case 0:
+ out_err = IA_CSS_SUCCESS;
+ break;
+ case EINVAL:
+ out_err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ break;
+ case ENODATA:
+ out_err = IA_CSS_ERR_QUEUE_IS_EMPTY;
+ break;
+ case ENOSYS:
+ case ENOTSUP:
+ out_err = IA_CSS_ERR_INTERNAL_ERROR;
+ break;
+ case ENOBUFS:
+ out_err = IA_CSS_ERR_QUEUE_IS_FULL;
+ break;
+ default:
+ out_err = IA_CSS_ERR_INTERNAL_ERROR;
+ break;
+ }
+ return out_err;
+}
+
+/* MW: Table look-up ??? */
+unsigned int ia_css_util_input_format_bpp(
+ enum atomisp_input_format format,
+ bool two_ppc)
+{
+ unsigned int rval = 0;
+
+ switch (format) {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ rval = 8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ rval = 10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ rval = 16;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ rval = 4;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ rval = 5;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ rval = 65;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ rval = 6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ rval = 7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ rval = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ if (two_ppc)
+ rval = 14;
+ else
+ rval = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ if (two_ppc)
+ rval = 16;
+ else
+ rval = 12;
+ break;
+ default:
+ rval = 0;
+ break;
+ }
+ return rval;
+}
+
+enum ia_css_err ia_css_util_check_vf_info(
+ const struct ia_css_frame_info *const info)
+{
+ enum ia_css_err err;
+ unsigned int max_vf_width;
+
+ assert(info);
+ err = ia_css_frame_check_info(info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ max_vf_width = ia_css_binary_max_vf_width();
+ if (max_vf_width != 0 && info->res.width > max_vf_width * 2)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_util_check_vf_out_info(
+ const struct ia_css_frame_info *const out_info,
+ const struct ia_css_frame_info *const vf_info)
+{
+ enum ia_css_err err;
+
+ assert(out_info);
+ assert(vf_info);
+
+ err = ia_css_frame_check_info(out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_util_check_vf_info(vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_util_check_res(unsigned int width, unsigned int height)
+{
+ /* height can be odd number for jpeg/embedded data from ISYS2401 */
+ if (((width == 0) ||
+ (height == 0) ||
+ IS_ODD(width))) {
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ return IA_CSS_SUCCESS;
+}
+
+/* ISP2401 */
+bool ia_css_util_res_leq(struct ia_css_resolution a, struct ia_css_resolution b)
+{
+ return a.width <= b.width && a.height <= b.height;
+}
+
+/* ISP2401 */
+bool ia_css_util_resolution_is_zero(const struct ia_css_resolution resolution)
+{
+ return (resolution.width == 0) || (resolution.height == 0);
+}
+
+/* ISP2401 */
+bool ia_css_util_resolution_is_even(const struct ia_css_resolution resolution)
+{
+ return IS_EVEN(resolution.height) && IS_EVEN(resolution.width);
+}
+
+bool ia_css_util_is_input_format_raw(enum atomisp_input_format format)
+{
+ return ((format == ATOMISP_INPUT_FORMAT_RAW_6) ||
+ (format == ATOMISP_INPUT_FORMAT_RAW_7) ||
+ (format == ATOMISP_INPUT_FORMAT_RAW_8) ||
+ (format == ATOMISP_INPUT_FORMAT_RAW_10) ||
+ (format == ATOMISP_INPUT_FORMAT_RAW_12));
+ /* raw_14 and raw_16 are not supported as input formats to the ISP.
+ * They can only be copied to a frame in memory using the
+ * copy binary.
+ */
+}
+
+bool ia_css_util_is_input_format_yuv(enum atomisp_input_format format)
+{
+ return format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY ||
+ format == ATOMISP_INPUT_FORMAT_YUV420_8 ||
+ format == ATOMISP_INPUT_FORMAT_YUV420_10 ||
+ format == ATOMISP_INPUT_FORMAT_YUV420_16 ||
+ format == ATOMISP_INPUT_FORMAT_YUV422_8 ||
+ format == ATOMISP_INPUT_FORMAT_YUV422_10 ||
+ format == ATOMISP_INPUT_FORMAT_YUV422_16;
+}
+
+enum ia_css_err ia_css_util_check_input(
+ const struct ia_css_stream_config *const stream_config,
+ bool must_be_raw,
+ bool must_be_yuv)
+{
+ assert(stream_config);
+
+ if (!stream_config)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (stream_config->input_config.effective_res.width == 0 ||
+ stream_config->input_config.effective_res.height == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (must_be_raw &&
+ !ia_css_util_is_input_format_raw(stream_config->input_config.format))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (must_be_yuv &&
+ !ia_css_util_is_input_format_yuv(stream_config->input_config.format))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/cell_params.h b/drivers/staging/media/atomisp/pci/cell_params.h
new file mode 100644
index 000000000000..0eabc59ff5af
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/cell_params.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _cell_params_h
+#define _cell_params_h
+
+#define SP_PMEM_LOG_WIDTH_BITS 6 /*Width of PC, 64 bits, 8 bytes*/
+#define SP_ICACHE_TAG_BITS 4 /*size of tag*/
+#define SP_ICACHE_SET_BITS 8 /* 256 sets*/
+#define SP_ICACHE_BLOCKS_PER_SET_BITS 1 /* 2 way associative*/
+#define SP_ICACHE_BLOCK_ADDRESS_BITS 11 /* 2048 lines capacity*/
+
+#define SP_ICACHE_ADDRESS_BITS \
+ (SP_ICACHE_TAG_BITS + SP_ICACHE_BLOCK_ADDRESS_BITS)
+
+#define SP_PMEM_DEPTH BIT(SP_ICACHE_ADDRESS_BITS)
+
+#define SP_FIFO_0_DEPTH 0
+#define SP_FIFO_1_DEPTH 0
+#define SP_FIFO_2_DEPTH 0
+#define SP_FIFO_3_DEPTH 0
+#define SP_FIFO_4_DEPTH 0
+#define SP_FIFO_5_DEPTH 0
+#define SP_FIFO_6_DEPTH 0
+#define SP_FIFO_7_DEPTH 0
+
+#define SP_SLV_BUS_MAXBURSTSIZE 1
+
+#endif /* _cell_params_h */
diff --git a/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_configs.c
new file mode 100644
index 000000000000..3ef556a64825
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_configs.c
@@ -0,0 +1,385 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_iterator(
+ const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_iterator() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
+ }
+ if (size) {
+ ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_iterator() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_copy_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_copy_output() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
+ }
+ if (size) {
+ ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_copy_output() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_crop(
+ const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_crop() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
+ }
+ if (size) {
+ ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_crop() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_fpn(
+ const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_fpn() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
+ }
+ if (size) {
+ ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_fpn() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_dvs(
+ const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_dvs() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
+ }
+ if (size) {
+ ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_dvs() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_qplane(
+ const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_qplane() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
+ }
+ if (size) {
+ ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_qplane() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output0(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output0() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
+ }
+ if (size) {
+ ia_css_output0_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output0() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output1(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output1() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
+ }
+ if (size) {
+ ia_css_output1_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output1() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
+ }
+ if (size) {
+ ia_css_output_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_raw(
+ const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_raw() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
+ }
+ if (size) {
+ ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_raw() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_tnr(
+ const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_tnr() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
+ }
+ if (size) {
+ ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_tnr() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_ref(
+ const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_ref() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
+ }
+ if (size) {
+ ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_ref() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_vf(
+ const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_vf() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
+ }
+ if (size) {
+ ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_vf() leave:\n");
+}
diff --git a/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_params.c
new file mode 100644
index 000000000000..2b90a7075b9b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_params.c
@@ -0,0 +1,3419 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define IA_CSS_INCLUDE_PARAMETERS
+#include "sh_css_params.h"
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h"
+#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h"
+#include "isp/kernels/bh/bh_2/ia_css_bh.host.h"
+#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h"
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h"
+#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/de/de_2/ia_css_de2.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h"
+#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h"
+#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h"
+#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/ob/ob2/ia_css_ob2.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h"
+#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h"
+#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h"
+#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h"
+#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h"
+#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+#include "isp/kernels/bnlm/ia_css_bnlm.host.h"
+#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h"
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_params.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_aa(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.aa.size;
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset;
+
+ if (size) {
+ struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ t->strength = params->aa_config.strength;
+ }
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.anr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr() enter:\n");
+
+ ia_css_anr_encode((struct sh_css_isp_anr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->anr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr2(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr2() enter:\n");
+
+ ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->anr_thres,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr2() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bh(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bh.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ ia_css_bh_encode((struct sh_css_isp_bh_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_cnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_cnr() enter:\n");
+
+ ia_css_cnr_encode((struct sh_css_isp_cnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cnr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_cnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_crop(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.crop.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_crop() enter:\n");
+
+ ia_css_crop_encode((struct sh_css_isp_crop_isp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->crop_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_crop() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_csc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.csc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_csc() enter:\n");
+
+ ia_css_csc_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_csc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_dp(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n");
+
+ ia_css_dp_encode((struct sh_css_isp_dp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dp_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bnr() enter:\n");
+
+ ia_css_bnr_encode((struct sh_css_isp_bnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_de(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.de.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.de.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n");
+
+ ia_css_de_encode((struct sh_css_isp_de_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->de_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ecd(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ecd() enter:\n");
+
+ ia_css_ecd_encode((struct sh_css_isp_ecd_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ecd_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ecd() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_formats(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.formats.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_formats() enter:\n");
+
+ ia_css_formats_encode((struct sh_css_isp_formats_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->formats_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_formats() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fpn(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_fpn() enter:\n");
+
+ ia_css_fpn_encode((struct sh_css_isp_fpn_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fpn_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_fpn() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_gc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.gc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_encode((struct sh_css_isp_gc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->gc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->gc_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ce(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ce.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n");
+
+ ia_css_ce_encode((struct sh_css_isp_ce_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ce_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yuv2rgb(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yuv2rgb() enter:\n");
+
+ ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yuv2rgb_cc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yuv2rgb() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_rgb2yuv(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_rgb2yuv() enter:\n");
+
+ ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->rgb2yuv_cc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_rgb2yuv() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_r_gamma(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_r_gamma() enter:\n");
+
+ ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->r_gamma_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_r_gamma() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_g_gamma(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_g_gamma() enter:\n");
+
+ ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->g_gamma_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_g_gamma() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_b_gamma(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_b_gamma() enter:\n");
+
+ ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset],
+ &params->b_gamma_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_b_gamma() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_uds(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.uds.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset;
+
+ if (size) {
+ struct sh_css_sp_uds_params *p;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_uds() enter:\n");
+
+ p = (struct sh_css_sp_uds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->crop_pos = params->uds_config.crop_pos;
+ p->uds = params->uds_config.uds;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_uds() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_raa(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.raa.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_raa() enter:\n");
+
+ ia_css_raa_encode((struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->raa_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_raa() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_s3a(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_s3a() enter:\n");
+
+ ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_s3a() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ob(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ob.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_encode((struct sh_css_isp_ob_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ob_config,
+ &params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.ob.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->ob_config,
+ &params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_output(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.output.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.output.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_output() enter:\n");
+
+ ia_css_output_encode((struct sh_css_isp_output_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->output_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_output() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n");
+
+ ia_css_sc_encode((struct sh_css_isp_sc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->sc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bds(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bds.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset;
+
+ if (size) {
+ struct sh_css_isp_bds_params *p;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bds() enter:\n");
+
+ p = (struct sh_css_isp_bds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->baf_strength = params->bds_config.strength;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bds() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_tnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_tnr() enter:\n");
+
+ ia_css_tnr_encode((struct sh_css_isp_tnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->tnr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_tnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_macc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.macc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_macc() enter:\n");
+
+ ia_css_macc_encode((struct sh_css_isp_macc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->macc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_macc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horicoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horicoef() enter:\n");
+
+ ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horicoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertcoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertcoef() enter:\n");
+
+ ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertcoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horiproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horiproj() enter:\n");
+
+ ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horiproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertproj() enter:\n");
+
+ ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horicoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horicoef() enter:\n");
+
+ ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horicoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertcoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertcoef() enter:\n");
+
+ ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertcoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horiproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horiproj() enter:\n");
+
+ ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horiproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertproj() enter:\n");
+
+ ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_wb(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.wb.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n");
+
+ ia_css_wb_encode((struct sh_css_isp_wb_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->wb_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_nr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.nr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n");
+
+ ia_css_nr_encode((struct sh_css_isp_ynr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yee(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yee.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yee() enter:\n");
+
+ ia_css_yee_encode((struct sh_css_isp_yee_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yee_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yee() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ynr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ynr() enter:\n");
+
+ ia_css_ynr_encode((struct sh_css_isp_yee2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ynr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ynr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n");
+
+ ia_css_fc_encode((struct sh_css_isp_fc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ctc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_encode((struct sh_css_isp_ctc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ctc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->ctc_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr_table(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr_table() enter:\n");
+
+ ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->xnr_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr_table() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr() enter:\n");
+
+ ia_css_xnr_encode((struct sh_css_isp_xnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr3(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr3_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr3() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+void (*ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params) = {
+ ia_css_process_aa,
+ ia_css_process_anr,
+ ia_css_process_anr2,
+ ia_css_process_bh,
+ ia_css_process_cnr,
+ ia_css_process_crop,
+ ia_css_process_csc,
+ ia_css_process_dp,
+ ia_css_process_bnr,
+ ia_css_process_de,
+ ia_css_process_ecd,
+ ia_css_process_formats,
+ ia_css_process_fpn,
+ ia_css_process_gc,
+ ia_css_process_ce,
+ ia_css_process_yuv2rgb,
+ ia_css_process_rgb2yuv,
+ ia_css_process_r_gamma,
+ ia_css_process_g_gamma,
+ ia_css_process_b_gamma,
+ ia_css_process_uds,
+ ia_css_process_raa,
+ ia_css_process_s3a,
+ ia_css_process_ob,
+ ia_css_process_output,
+ ia_css_process_sc,
+ ia_css_process_bds,
+ ia_css_process_tnr,
+ ia_css_process_macc,
+ ia_css_process_sdis_horicoef,
+ ia_css_process_sdis_vertcoef,
+ ia_css_process_sdis_horiproj,
+ ia_css_process_sdis_vertproj,
+ ia_css_process_sdis2_horicoef,
+ ia_css_process_sdis2_vertcoef,
+ ia_css_process_sdis2_horiproj,
+ ia_css_process_sdis2_vertproj,
+ ia_css_process_wb,
+ ia_css_process_nr,
+ ia_css_process_yee,
+ ia_css_process_ynr,
+ ia_css_process_fc,
+ ia_css_process_ctc,
+ ia_css_process_xnr_table,
+ ia_css_process_xnr,
+ ia_css_process_xnr3,
+};
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_dp_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dp_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_dp_config() enter: config=%p\n",
+ config);
+
+ *config = params->dp_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_dp_config() leave\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dp_config = *config;
+ params->config_changed[IA_CSS_DP_ID] = true;
+ params->config_changed[IA_CSS_DP_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_dp_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_wb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_wb_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_wb_config() enter: config=%p\n",
+ config);
+
+ *config = params->wb_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_wb_config() leave\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->wb_config = *config;
+ params->config_changed[IA_CSS_WB_ID] = true;
+ params->config_changed[IA_CSS_WB_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_wb_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_tnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_tnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_tnr_config() enter: config=%p\n",
+ config);
+
+ *config = params->tnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_tnr_config() leave\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->tnr_config = *config;
+ params->config_changed[IA_CSS_TNR_ID] = true;
+ params->config_changed[IA_CSS_TNR_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_tnr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ob_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ob_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ob_config() enter: config=%p\n",
+ config);
+
+ *config = params->ob_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ob_config() leave\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ob_config = *config;
+ params->config_changed[IA_CSS_OB_ID] = true;
+ params->config_changed[IA_CSS_OB_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ob_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_de_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_de_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_de_config() enter: config=%p\n",
+ config);
+
+ *config = params->de_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_de_config() leave\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->de_config = *config;
+ params->config_changed[IA_CSS_DE_ID] = true;
+ params->config_changed[IA_CSS_DE_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_de_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr_config() enter: config=%p\n",
+ config);
+
+ *config = params->anr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr_config() leave\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_config = *config;
+ params->config_changed[IA_CSS_ANR_ID] = true;
+ params->config_changed[IA_CSS_ANR_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_anr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr2_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_thres *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr2_config() enter: config=%p\n",
+ config);
+
+ *config = params->anr_thres;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr2_config() leave\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_thres = *config;
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_anr2_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ce_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ce_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ce_config() enter: config=%p\n",
+ config);
+
+ *config = params->ce_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ce_config() leave\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ce_config = *config;
+ params->config_changed[IA_CSS_CE_ID] = true;
+ params->config_changed[IA_CSS_CE_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ce_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ecd_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ecd_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ecd_config() enter: config=%p\n",
+ config);
+
+ *config = params->ecd_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ecd_config() leave\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ecd_config = *config;
+ params->config_changed[IA_CSS_ECD_ID] = true;
+ params->config_changed[IA_CSS_ECD_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ecd_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ynr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ynr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ynr_config() enter: config=%p\n",
+ config);
+
+ *config = params->ynr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ynr_config() leave\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ynr_config = *config;
+ params->config_changed[IA_CSS_YNR_ID] = true;
+ params->config_changed[IA_CSS_YNR_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ynr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_fc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_fc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_fc_config() enter: config=%p\n",
+ config);
+
+ *config = params->fc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_fc_config() leave\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->fc_config = *config;
+ params->config_changed[IA_CSS_FC_ID] = true;
+ params->config_changed[IA_CSS_FC_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_fc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_cnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_cnr_config() enter: config=%p\n",
+ config);
+
+ *config = params->cnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_cnr_config() leave\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cnr_config = *config;
+ params->config_changed[IA_CSS_CNR_ID] = true;
+ params->config_changed[IA_CSS_CNR_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_cnr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_macc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_macc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_macc_config() enter: config=%p\n",
+ config);
+
+ *config = params->macc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_macc_config() leave\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->macc_config = *config;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_macc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ctc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ctc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ctc_config() enter: config=%p\n",
+ config);
+
+ *config = params->ctc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ctc_config() leave\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ctc_config = *config;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ctc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_aa_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_aa_config() enter: config=%p\n",
+ config);
+
+ *config = params->aa_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_aa_config() leave\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n");
+ params->aa_config = *config;
+ params->config_changed[IA_CSS_AA_ID] = true;
+ params->config_changed[IA_CSS_AA_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_aa_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_yuv2rgb_config() enter: config=%p\n",
+ config);
+
+ *config = params->yuv2rgb_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_yuv2rgb_config() leave\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->yuv2rgb_cc_config = *config;
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_yuv2rgb_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_rgb2yuv_config() enter: config=%p\n",
+ config);
+
+ *config = params->rgb2yuv_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_rgb2yuv_config() leave\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->rgb2yuv_cc_config = *config;
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_rgb2yuv_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_csc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_csc_config() enter: config=%p\n",
+ config);
+
+ *config = params->cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_csc_config() leave\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cc_config = *config;
+ params->config_changed[IA_CSS_CSC_ID] = true;
+ params->config_changed[IA_CSS_CSC_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_csc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_nr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_nr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_nr_config() enter: config=%p\n",
+ config);
+
+ *config = params->nr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_nr_config() leave\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->nr_config = *config;
+ params->config_changed[IA_CSS_BNR_ID] = true;
+ params->config_changed[IA_CSS_NR_ID] = true;
+ params->config_changed[IA_CSS_NR_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_nr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_gc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_gc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_gc_config() enter: config=%p\n",
+ config);
+
+ *config = params->gc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_gc_config() leave\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->gc_config = *config;
+ params->config_changed[IA_CSS_GC_ID] = true;
+ params->config_changed[IA_CSS_GC_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_gc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horicoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horicoef_config() leave\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_horicoef_config() enter:\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_horicoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertcoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertcoef_config() leave\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_vertcoef_config() enter:\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_vertcoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horiproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horiproj_config() leave\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_horiproj_config() enter:\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_horiproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertproj_config() leave\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_vertproj_config() enter:\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_vertproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horicoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horicoef_config() leave\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_horicoef_config() enter:\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_horicoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertcoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertcoef_config() leave\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_vertcoef_config() enter:\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_vertcoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horiproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horiproj_config() leave\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_horiproj_config() enter:\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_horiproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertproj_config() leave\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_vertproj_config() enter:\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_vertproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_r_gamma_config() enter: config=%p\n",
+ config);
+
+ *config = params->r_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_r_gamma_config() leave\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->r_gamma_table = *config;
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_r_gamma_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_g_gamma_config() enter: config=%p\n",
+ config);
+
+ *config = params->g_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_g_gamma_config() leave\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->g_gamma_table = *config;
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_g_gamma_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_b_gamma_config() enter: config=%p\n",
+ config);
+
+ *config = params->b_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_b_gamma_config() leave\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->b_gamma_table = *config;
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_b_gamma_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_table_config() enter: config=%p\n",
+ config);
+
+ *config = params->xnr_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_table_config() leave\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_xnr_table_config() enter:\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_table = *config;
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_xnr_table_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_formats_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_formats_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_formats_config() enter: config=%p\n",
+ config);
+
+ *config = params->formats_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_formats_config() leave\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->formats_config = *config;
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_formats_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_config() enter: config=%p\n",
+ config);
+
+ *config = params->xnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_config() leave\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_config = *config;
+ params->config_changed[IA_CSS_XNR_ID] = true;
+ params->config_changed[IA_CSS_XNR_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_xnr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr3_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr3_config() enter: config=%p\n",
+ config);
+
+ *config = params->xnr3_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr3_config() leave\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr3_config = *config;
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_xnr3_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_s3a_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_3a_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_s3a_config() enter: config=%p\n",
+ config);
+
+ *config = params->s3a_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_s3a_config() leave\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->s3a_config = *config;
+ params->config_changed[IA_CSS_BH_ID] = true;
+ params->config_changed[IA_CSS_S3A_ID] = true;
+ params->config_changed[IA_CSS_S3A_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_s3a_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_output_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_output_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_output_config() enter: config=%p\n",
+ config);
+
+ *config = params->output_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_output_config() leave\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->output_config = *config;
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_output_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_get_dp_config(params, config->dp_config);
+ ia_css_get_wb_config(params, config->wb_config);
+ ia_css_get_tnr_config(params, config->tnr_config);
+ ia_css_get_ob_config(params, config->ob_config);
+ ia_css_get_de_config(params, config->de_config);
+ ia_css_get_anr_config(params, config->anr_config);
+ ia_css_get_anr2_config(params, config->anr_thres);
+ ia_css_get_ce_config(params, config->ce_config);
+ ia_css_get_ecd_config(params, config->ecd_config);
+ ia_css_get_ynr_config(params, config->ynr_config);
+ ia_css_get_fc_config(params, config->fc_config);
+ ia_css_get_cnr_config(params, config->cnr_config);
+ ia_css_get_macc_config(params, config->macc_config);
+ ia_css_get_ctc_config(params, config->ctc_config);
+ ia_css_get_aa_config(params, config->aa_config);
+ ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_get_csc_config(params, config->cc_config);
+ ia_css_get_nr_config(params, config->nr_config);
+ ia_css_get_gc_config(params, config->gc_config);
+ ia_css_get_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_get_r_gamma_config(params, config->r_gamma_table);
+ ia_css_get_g_gamma_config(params, config->g_gamma_table);
+ ia_css_get_b_gamma_config(params, config->b_gamma_table);
+ ia_css_get_xnr_table_config(params, config->xnr_table);
+ ia_css_get_formats_config(params, config->formats_config);
+ ia_css_get_xnr_config(params, config->xnr_config);
+ ia_css_get_xnr3_config(params, config->xnr3_config);
+ ia_css_get_s3a_config(params, config->s3a_config);
+ ia_css_get_output_config(params, config->output_config);
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_set_dp_config(params, config->dp_config);
+ ia_css_set_wb_config(params, config->wb_config);
+ ia_css_set_tnr_config(params, config->tnr_config);
+ ia_css_set_ob_config(params, config->ob_config);
+ ia_css_set_de_config(params, config->de_config);
+ ia_css_set_anr_config(params, config->anr_config);
+ ia_css_set_anr2_config(params, config->anr_thres);
+ ia_css_set_ce_config(params, config->ce_config);
+ ia_css_set_ecd_config(params, config->ecd_config);
+ ia_css_set_ynr_config(params, config->ynr_config);
+ ia_css_set_fc_config(params, config->fc_config);
+ ia_css_set_cnr_config(params, config->cnr_config);
+ ia_css_set_macc_config(params, config->macc_config);
+ ia_css_set_ctc_config(params, config->ctc_config);
+ ia_css_set_aa_config(params, config->aa_config);
+ ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_set_csc_config(params, config->cc_config);
+ ia_css_set_nr_config(params, config->nr_config);
+ ia_css_set_gc_config(params, config->gc_config);
+ ia_css_set_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_set_r_gamma_config(params, config->r_gamma_table);
+ ia_css_set_g_gamma_config(params, config->g_gamma_table);
+ ia_css_set_b_gamma_config(params, config->b_gamma_table);
+ ia_css_set_xnr_table_config(params, config->xnr_table);
+ ia_css_set_formats_config(params, config->formats_config);
+ ia_css_set_xnr_config(params, config->xnr_config);
+ ia_css_set_xnr3_config(params, config->xnr3_config);
+ ia_css_set_s3a_config(params, config->s3a_config);
+ ia_css_set_output_config(params, config->output_config);
+}
diff --git a/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_states.c
new file mode 100644
index 000000000000..42e0344c677d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_states.c
@@ -0,0 +1,223 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_states.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_aa_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_aa_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.aa.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset;
+
+ if (size)
+ memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ 0, size);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_aa_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset;
+
+ if (size) {
+ ia_css_init_cnr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr2_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr2_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset;
+
+ if (size) {
+ ia_css_init_cnr2_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr2_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_dp_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_dp_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.dp.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset;
+
+ if (size) {
+ ia_css_init_dp_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_dp_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_de_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_de_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.de.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.de.offset;
+
+ if (size) {
+ ia_css_init_de_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_de_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_tnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_tnr_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->dmem.tnr.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_tnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ref_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ref_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->dmem.ref.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset;
+
+ if (size) {
+ ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ref_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ynr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ynr_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.ynr.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset;
+
+ if (size) {
+ ia_css_init_ynr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ynr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(
+ const struct ia_css_binary *binary) = {
+ ia_css_initialize_aa_state,
+ ia_css_initialize_cnr_state,
+ ia_css_initialize_cnr2_state,
+ ia_css_initialize_dp_state,
+ ia_css_initialize_de_state,
+ ia_css_initialize_tnr_state,
+ ia_css_initialize_ref_state,
+ ia_css_initialize_ynr_state,
+};
diff --git a/drivers/staging/media/atomisp/pci/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h b/drivers/staging/media/atomisp/pci/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h
new file mode 100644
index 000000000000..5c636effba48
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h
@@ -0,0 +1,68 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_
+#define _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_
+
+/*
+ * These are the indices of each interrupt in the interrupt
+ * controller's registers. these can be used as the irq_id
+ * argument to the hrt functions irq_controller.h.
+ *
+ * The definitions are taken from <system>_defs.h
+ */
+typedef enum hrt_isp_css_irq {
+ hrt_isp_css_irq_gpio_pin_0 = HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_1 = HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_2 = HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_3 = HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_4 = HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_5 = HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_6 = HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_7 = HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_8 = HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_9 = HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_10 = HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_11 = HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID,
+ hrt_isp_css_irq_sp = HIVE_GP_DEV_IRQ_SP_BIT_ID,
+ hrt_isp_css_irq_isp = HIVE_GP_DEV_IRQ_ISP_BIT_ID,
+ hrt_isp_css_irq_isys = HIVE_GP_DEV_IRQ_ISYS_BIT_ID,
+ hrt_isp_css_irq_isel = HIVE_GP_DEV_IRQ_ISEL_BIT_ID,
+ hrt_isp_css_irq_ifmt = HIVE_GP_DEV_IRQ_IFMT_BIT_ID,
+ hrt_isp_css_irq_sp_stream_mon = HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID,
+ hrt_isp_css_irq_isp_stream_mon = HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID,
+ hrt_isp_css_irq_mod_stream_mon = HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID,
+ hrt_isp_css_irq_isp_pmem_error = HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_isp_bamem_error = HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_isp_dmem_error = HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_sp_icache_mem_error = HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_sp_dmem_error = HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_mmu_cache_mem_error = HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_gp_timer_0 = HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID,
+ hrt_isp_css_irq_gp_timer_1 = HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID,
+ hrt_isp_css_irq_sw_pin_0 = HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID,
+ hrt_isp_css_irq_sw_pin_1 = HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID,
+ hrt_isp_css_irq_dma = HIVE_GP_DEV_IRQ_DMA_BIT_ID,
+ hrt_isp_css_irq_sp_stream_mon_b = HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID,
+ /* this must (obviously) be the last on in the enum */
+ hrt_isp_css_irq_num_irqs
+} hrt_isp_css_irq_t;
+
+typedef enum hrt_isp_css_irq_status {
+ hrt_isp_css_irq_status_error,
+ hrt_isp_css_irq_status_more_irqs,
+ hrt_isp_css_irq_status_success
+} hrt_isp_css_irq_status_t;
+
+#endif /* _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/css_2400_system/hrt/isp2400_mamoiada_params.h b/drivers/staging/media/atomisp/pci/css_2400_system/hrt/isp2400_mamoiada_params.h
new file mode 100644
index 000000000000..edc4d4ff1846
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2400_system/hrt/isp2400_mamoiada_params.h
@@ -0,0 +1,228 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Version */
+#define RTL_VERSION
+
+/* Cell name */
+#define ISP_CELL_TYPE isp2400_mamoiada
+#define ISP_VMEM simd_vmem
+#define _HRT_ISP_VMEM isp2400_mamoiada_simd_vmem
+
+/* instruction pipeline depth */
+#define ISP_BRANCHDELAY 5
+
+/* bus */
+#define ISP_BUS_WIDTH 32
+#define ISP_BUS_ADDR_WIDTH 32
+#define ISP_BUS_BURST_SIZE 1
+
+/* data-path */
+#define ISP_SCALAR_WIDTH 32
+#define ISP_SLICE_NELEMS 4
+#define ISP_VEC_NELEMS 64
+#define ISP_VEC_ELEMBITS 14
+#define ISP_VEC_ELEM8BITS 16
+#define ISP_CLONE_DATAPATH_IS_16 1
+
+/* memories */
+#define ISP_DMEM_DEPTH 4096
+#define ISP_DMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_DEPTH 3072
+#define ISP_VMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_ELEMBITS 14
+#define ISP_VMEM_ELEM_PRECISION 14
+#define ISP_PMEM_DEPTH 2048
+#define ISP_PMEM_WIDTH 640
+#define ISP_VAMEM_ADDRESS_BITS 12
+#define ISP_VAMEM_ELEMBITS 12
+#define ISP_VAMEM_DEPTH 2048
+#define ISP_VAMEM_ALIGNMENT 2
+#define ISP_VA_ADDRESS_WIDTH 896
+#define ISP_VEC_VALSU_LATENCY ISP_VEC_NELEMS
+#define ISP_HIST_ADDRESS_BITS 12
+#define ISP_HIST_ALIGNMENT 4
+#define ISP_HIST_COMP_IN_PREC 12
+#define ISP_HIST_DEPTH 1024
+#define ISP_HIST_WIDTH 24
+#define ISP_HIST_COMPONENTS 4
+
+/* program counter */
+#define ISP_PC_WIDTH 13
+
+/* Template switches */
+#define ISP_SHIELD_INPUT_DMEM 0
+#define ISP_SHIELD_OUTPUT_DMEM 1
+#define ISP_SHIELD_INPUT_VMEM 0
+#define ISP_SHIELD_OUTPUT_VMEM 0
+#define ISP_SHIELD_INPUT_PMEM 1
+#define ISP_SHIELD_OUTPUT_PMEM 1
+#define ISP_SHIELD_INPUT_HIST 1
+#define ISP_SHIELD_OUTPUT_HIST 1
+/* When LUT is select the shielding is always on */
+#define ISP_SHIELD_INPUT_VAMEM 1
+#define ISP_SHIELD_OUTPUT_VAMEM 1
+
+#define ISP_HAS_IRQ 1
+#define ISP_HAS_SOFT_RESET 1
+#define ISP_HAS_VEC_DIV 0
+#define ISP_HAS_VFU_W_2O 1
+#define ISP_HAS_DEINT3 1
+#define ISP_HAS_LUT 1
+#define ISP_HAS_HIST 1
+#define ISP_HAS_VALSU 1
+#define ISP_HAS_3rdVALSU 1
+#define ISP_VRF1_HAS_2P 1
+
+#define ISP_SRU_GUARDING 1
+#define ISP_VLSU_GUARDING 1
+
+#define ISP_VRF_RAM 1
+#define ISP_SRF_RAM 1
+
+#define ISP_SPLIT_VMUL_VADD_IS 0
+#define ISP_RFSPLIT_FPGA 0
+
+/* RSN or Bus pipelining */
+#define ISP_RSN_PIPE 1
+#define ISP_VSF_BUS_PIPE 0
+
+/* extra slave port to vmem */
+#define ISP_IF_VMEM 0
+#define ISP_GDC_VMEM 0
+
+/* Streaming ports */
+#define ISP_IF 1
+#define ISP_IF_B 1
+#define ISP_GDC 1
+#define ISP_SCL 1
+#define ISP_GPFIFO 1
+#define ISP_SP 1
+
+/* Removing Issue Slot(s) */
+#define ISP_HAS_NOT_SIMD_IS2 0
+#define ISP_HAS_NOT_SIMD_IS3 0
+#define ISP_HAS_NOT_SIMD_IS4 0
+#define ISP_HAS_NOT_SIMD_IS4_VADD 0
+#define ISP_HAS_NOT_SIMD_IS5 0
+#define ISP_HAS_NOT_SIMD_IS6 0
+#define ISP_HAS_NOT_SIMD_IS7 0
+#define ISP_HAS_NOT_SIMD_IS8 0
+
+/* ICache */
+#define ISP_ICACHE 1
+#define ISP_ICACHE_ONLY 0
+#define ISP_ICACHE_PREFETCH 1
+#define ISP_ICACHE_INDEX_BITS 8
+#define ISP_ICACHE_SET_BITS 5
+#define ISP_ICACHE_BLOCKS_PER_SET_BITS 1
+
+/* Experimental Flags */
+#define ISP_EXP_1 0
+#define ISP_EXP_2 0
+#define ISP_EXP_3 0
+#define ISP_EXP_4 0
+#define ISP_EXP_5 0
+#define ISP_EXP_6 0
+
+/* Derived values */
+#define ISP_LOG2_PMEM_WIDTH 10
+#define ISP_VEC_WIDTH 896
+#define ISP_SLICE_WIDTH 56
+#define ISP_VMEM_WIDTH 896
+#define ISP_VMEM_ALIGN 128
+#define ISP_SIMDLSU 1
+#define ISP_LSU_IMM_BITS 12
+
+/* convenient shortcuts for software*/
+#define ISP_NWAY ISP_VEC_NELEMS
+#define NBITS ISP_VEC_ELEMBITS
+
+#define _isp_ceil_div(a, b) (((a) + (b) - 1) / (b))
+
+#define ISP_VEC_ALIGN ISP_VMEM_ALIGN
+
+/* HRT specific vector support */
+#define isp2400_mamoiada_vector_alignment ISP_VEC_ALIGN
+#define isp2400_mamoiada_vector_elem_bits ISP_VMEM_ELEMBITS
+#define isp2400_mamoiada_vector_elem_precision ISP_VMEM_ELEM_PRECISION
+#define isp2400_mamoiada_vector_num_elems ISP_VEC_NELEMS
+
+/* register file sizes */
+#define ISP_RF0_SIZE 64
+#define ISP_RF1_SIZE 16
+#define ISP_RF2_SIZE 64
+#define ISP_RF3_SIZE 4
+#define ISP_RF4_SIZE 64
+#define ISP_RF5_SIZE 16
+#define ISP_RF6_SIZE 16
+#define ISP_RF7_SIZE 16
+#define ISP_RF8_SIZE 16
+#define ISP_RF9_SIZE 16
+#define ISP_RF10_SIZE 16
+#define ISP_RF11_SIZE 16
+#define ISP_VRF1_SIZE 24
+#define ISP_VRF2_SIZE 24
+#define ISP_VRF3_SIZE 24
+#define ISP_VRF4_SIZE 24
+#define ISP_VRF5_SIZE 24
+#define ISP_VRF6_SIZE 24
+#define ISP_VRF7_SIZE 24
+#define ISP_VRF8_SIZE 24
+#define ISP_SRF1_SIZE 4
+#define ISP_SRF2_SIZE 64
+#define ISP_SRF3_SIZE 64
+#define ISP_SRF4_SIZE 32
+#define ISP_SRF5_SIZE 64
+#define ISP_FRF0_SIZE 16
+#define ISP_FRF1_SIZE 4
+#define ISP_FRF2_SIZE 16
+#define ISP_FRF3_SIZE 4
+#define ISP_FRF4_SIZE 4
+#define ISP_FRF5_SIZE 8
+#define ISP_FRF6_SIZE 4
+/* register file read latency */
+#define ISP_VRF1_READ_LAT 1
+#define ISP_VRF2_READ_LAT 1
+#define ISP_VRF3_READ_LAT 1
+#define ISP_VRF4_READ_LAT 1
+#define ISP_VRF5_READ_LAT 1
+#define ISP_VRF6_READ_LAT 1
+#define ISP_VRF7_READ_LAT 1
+#define ISP_VRF8_READ_LAT 1
+#define ISP_SRF1_READ_LAT 1
+#define ISP_SRF2_READ_LAT 1
+#define ISP_SRF3_READ_LAT 1
+#define ISP_SRF4_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+/* immediate sizes */
+#define ISP_IS1_IMM_BITS 14
+#define ISP_IS2_IMM_BITS 13
+#define ISP_IS3_IMM_BITS 14
+#define ISP_IS4_IMM_BITS 14
+#define ISP_IS5_IMM_BITS 9
+#define ISP_IS6_IMM_BITS 16
+#define ISP_IS7_IMM_BITS 9
+#define ISP_IS8_IMM_BITS 16
+#define ISP_IS9_IMM_BITS 11
+/* fifo depths */
+#define ISP_IF_FIFO_DEPTH 0
+#define ISP_IF_B_FIFO_DEPTH 0
+#define ISP_DMA_FIFO_DEPTH 0
+#define ISP_OF_FIFO_DEPTH 0
+#define ISP_GDC_FIFO_DEPTH 0
+#define ISP_SCL_FIFO_DEPTH 0
+#define ISP_GPFIFO_FIFO_DEPTH 0
+#define ISP_SP_FIFO_DEPTH 0
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h
new file mode 100644
index 000000000000..4de5bb81bd23
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h
@@ -0,0 +1,63 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_GLOBAL_H_INCLUDED__
+#define __CSI_RX_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+typedef enum {
+ CSI_MIPI_PACKET_TYPE_UNDEFINED = 0,
+ CSI_MIPI_PACKET_TYPE_LONG,
+ CSI_MIPI_PACKET_TYPE_SHORT,
+ CSI_MIPI_PACKET_TYPE_RESERVED,
+ N_CSI_MIPI_PACKET_TYPE
+} csi_mipi_packet_type_t;
+
+typedef struct csi_rx_backend_lut_entry_s csi_rx_backend_lut_entry_t;
+struct csi_rx_backend_lut_entry_s {
+ u32 long_packet_entry;
+ u32 short_packet_entry;
+};
+
+typedef struct csi_rx_backend_cfg_s csi_rx_backend_cfg_t;
+struct csi_rx_backend_cfg_s {
+ /* LUT entry for the packet */
+ csi_rx_backend_lut_entry_t lut_entry;
+
+ /* can be derived from the Data Type */
+ csi_mipi_packet_type_t csi_mipi_packet_type;
+
+ struct {
+ bool comp_enable;
+ u32 virtual_channel;
+ u32 data_type;
+ u32 comp_scheme;
+ u32 comp_predictor;
+ u32 comp_bit_idx;
+ } csi_mipi_cfg;
+};
+
+typedef struct csi_rx_frontend_cfg_s csi_rx_frontend_cfg_t;
+struct csi_rx_frontend_cfg_s {
+ u32 active_lanes;
+};
+
+extern const u32 N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID];
+extern const u32 N_LONG_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID];
+extern const u32 N_CSI_RX_FE_CTRL_DLANES[N_CSI_RX_FRONTEND_ID];
+/* sid_width for CSI_RX_BACKEND<N>_ID */
+extern const u32 N_CSI_RX_BE_SID_WIDTH[N_CSI_RX_BACKEND_ID];
+
+#endif /* __CSI_RX_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_configs.c
new file mode 100644
index 000000000000..29d85407cac4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_configs.c
@@ -0,0 +1,386 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_iterator(
+ const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_iterator() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
+ }
+ if (size) {
+ ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_iterator() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_copy_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_copy_output() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
+ }
+ if (size) {
+ ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_copy_output() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_crop(
+ const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_crop() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
+ }
+ if (size) {
+ ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_crop() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_fpn(
+ const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_fpn() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
+ }
+ if (size) {
+ ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_fpn() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_dvs(
+ const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_dvs() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
+ }
+ if (size) {
+ ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_dvs() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_qplane(
+ const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_qplane() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
+ }
+ if (size) {
+ ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_qplane() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output0(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output0() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
+ }
+ if (size) {
+ ia_css_output0_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output0() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output1(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output1() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
+ }
+ if (size) {
+ ia_css_output1_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output1() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
+ }
+ if (size) {
+ ia_css_output_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_output() leave:\n");
+}
+
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_raw(
+ const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_raw() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
+ }
+ if (size) {
+ ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_raw() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_tnr(
+ const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_tnr() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
+ }
+ if (size) {
+ ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_tnr() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_ref(
+ const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_ref() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
+ }
+ if (size) {
+ ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_ref() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_vf(
+ const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_vf() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
+ }
+ if (size) {
+ ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_vf() leave:\n");
+}
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_params.c
new file mode 100644
index 000000000000..68297296885e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_params.c
@@ -0,0 +1,3366 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define IA_CSS_INCLUDE_PARAMETERS
+#include "sh_css_params.h"
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h"
+#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h"
+#include "isp/kernels/bh/bh_2/ia_css_bh.host.h"
+#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h"
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h"
+#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/de/de_2/ia_css_de2.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h"
+#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h"
+#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h"
+#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/ob/ob2/ia_css_ob2.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h"
+#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h"
+#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h"
+#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h"
+#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h"
+#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+#include "isp/kernels/bnlm/ia_css_bnlm.host.h"
+#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h"
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_params.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_aa(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.aa.size;
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset;
+
+ if (size) {
+ struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ t->strength = params->aa_config.strength;
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.anr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr() enter:\n");
+
+ ia_css_anr_encode((struct sh_css_isp_anr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->anr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr2(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr2() enter:\n");
+
+ ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->anr_thres,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr2() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bh(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bh.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ ia_css_bh_encode((struct sh_css_isp_bh_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_cnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_cnr() enter:\n");
+
+ ia_css_cnr_encode((struct sh_css_isp_cnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cnr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_cnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_crop(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.crop.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_crop() enter:\n");
+
+ ia_css_crop_encode((struct sh_css_isp_crop_isp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->crop_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_crop() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_csc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.csc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_csc() enter:\n");
+
+ ia_css_csc_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_csc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_dp(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n");
+
+ ia_css_dp_encode((struct sh_css_isp_dp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dp_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bnr() enter:\n");
+
+ ia_css_bnr_encode((struct sh_css_isp_bnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_de(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.de.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.de.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n");
+
+ ia_css_de_encode((struct sh_css_isp_de_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->de_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ecd(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ecd() enter:\n");
+
+ ia_css_ecd_encode((struct sh_css_isp_ecd_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ecd_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ecd() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_formats(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.formats.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_formats() enter:\n");
+
+ ia_css_formats_encode((struct sh_css_isp_formats_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->formats_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_formats() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fpn(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_fpn() enter:\n");
+
+ ia_css_fpn_encode((struct sh_css_isp_fpn_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fpn_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_fpn() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_gc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.gc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_encode((struct sh_css_isp_gc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->gc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->gc_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ce(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ce.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n");
+
+ ia_css_ce_encode((struct sh_css_isp_ce_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ce_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yuv2rgb(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yuv2rgb() enter:\n");
+
+ ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yuv2rgb_cc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yuv2rgb() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_rgb2yuv(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_rgb2yuv() enter:\n");
+
+ ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->rgb2yuv_cc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_rgb2yuv() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_r_gamma(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_r_gamma() enter:\n");
+
+ ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->r_gamma_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_r_gamma() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_g_gamma(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_g_gamma() enter:\n");
+
+ ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->g_gamma_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_g_gamma() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_b_gamma(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_b_gamma() enter:\n");
+
+ ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset],
+ &params->b_gamma_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_b_gamma() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_uds(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.uds.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset;
+
+ if (size) {
+ struct sh_css_sp_uds_params *p;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_uds() enter:\n");
+
+ p = (struct sh_css_sp_uds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->crop_pos = params->uds_config.crop_pos;
+ p->uds = params->uds_config.uds;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_uds() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_raa(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.raa.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_raa() enter:\n");
+
+ ia_css_raa_encode((struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->raa_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_raa() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_s3a(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_s3a() enter:\n");
+
+ ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_s3a() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ob(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ob.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_encode((struct sh_css_isp_ob_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ob_config,
+ &params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.ob.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->ob_config,
+ &params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_output(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.output.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.output.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_output() enter:\n");
+
+ ia_css_output_encode((struct sh_css_isp_output_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->output_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_output() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n");
+
+ ia_css_sc_encode((struct sh_css_isp_sc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->sc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bds(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bds.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset;
+
+ if (size) {
+ struct sh_css_isp_bds_params *p;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bds() enter:\n");
+
+ p = (struct sh_css_isp_bds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->baf_strength = params->bds_config.strength;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bds() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_tnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_tnr() enter:\n");
+
+ ia_css_tnr_encode((struct sh_css_isp_tnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->tnr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_tnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_macc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.macc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_macc() enter:\n");
+
+ ia_css_macc_encode((struct sh_css_isp_macc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->macc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_macc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horicoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horicoef() enter:\n");
+
+ ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horicoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertcoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertcoef() enter:\n");
+
+ ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertcoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horiproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horiproj() enter:\n");
+
+ ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horiproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertproj() enter:\n");
+
+ ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horicoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horicoef() enter:\n");
+
+ ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horicoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertcoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertcoef() enter:\n");
+
+ ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertcoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horiproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horiproj() enter:\n");
+
+ ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horiproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertproj() enter:\n");
+
+ ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_wb(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.wb.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n");
+
+ ia_css_wb_encode((struct sh_css_isp_wb_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->wb_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_nr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.nr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n");
+
+ ia_css_nr_encode((struct sh_css_isp_ynr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yee(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yee.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yee() enter:\n");
+
+ ia_css_yee_encode((struct sh_css_isp_yee_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yee_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yee() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ynr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ynr() enter:\n");
+
+ ia_css_ynr_encode((struct sh_css_isp_yee2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ynr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ynr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n");
+
+ ia_css_fc_encode((struct sh_css_isp_fc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ctc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_encode((struct sh_css_isp_ctc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ctc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->ctc_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr_table(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr_table() enter:\n");
+
+ ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->xnr_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr_table() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr() enter:\n");
+
+ ia_css_xnr_encode((struct sh_css_isp_xnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr3(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr3_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr3() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_vmem_encode((struct sh_css_isp_xnr3_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->xnr3_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr3() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+void (*ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params) = {
+ ia_css_process_aa,
+ ia_css_process_anr,
+ ia_css_process_anr2,
+ ia_css_process_bh,
+ ia_css_process_cnr,
+ ia_css_process_crop,
+ ia_css_process_csc,
+ ia_css_process_dp,
+ ia_css_process_bnr,
+ ia_css_process_de,
+ ia_css_process_ecd,
+ ia_css_process_formats,
+ ia_css_process_fpn,
+ ia_css_process_gc,
+ ia_css_process_ce,
+ ia_css_process_yuv2rgb,
+ ia_css_process_rgb2yuv,
+ ia_css_process_r_gamma,
+ ia_css_process_g_gamma,
+ ia_css_process_b_gamma,
+ ia_css_process_uds,
+ ia_css_process_raa,
+ ia_css_process_s3a,
+ ia_css_process_ob,
+ ia_css_process_output,
+ ia_css_process_sc,
+ ia_css_process_bds,
+ ia_css_process_tnr,
+ ia_css_process_macc,
+ ia_css_process_sdis_horicoef,
+ ia_css_process_sdis_vertcoef,
+ ia_css_process_sdis_horiproj,
+ ia_css_process_sdis_vertproj,
+ ia_css_process_sdis2_horicoef,
+ ia_css_process_sdis2_vertcoef,
+ ia_css_process_sdis2_horiproj,
+ ia_css_process_sdis2_vertproj,
+ ia_css_process_wb,
+ ia_css_process_nr,
+ ia_css_process_yee,
+ ia_css_process_ynr,
+ ia_css_process_fc,
+ ia_css_process_ctc,
+ ia_css_process_xnr_table,
+ ia_css_process_xnr,
+ ia_css_process_xnr3,
+};
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_dp_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dp_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_dp_config() enter: config=%p\n",
+ config);
+
+ *config = params->dp_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_dp_config() leave\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dp_config = *config;
+ params->config_changed[IA_CSS_DP_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_dp_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_wb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_wb_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_wb_config() enter: config=%p\n",
+ config);
+
+ *config = params->wb_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_wb_config() leave\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->wb_config = *config;
+ params->config_changed[IA_CSS_WB_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_wb_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_tnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_tnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_tnr_config() enter: config=%p\n",
+ config);
+
+ *config = params->tnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_tnr_config() leave\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->tnr_config = *config;
+ params->config_changed[IA_CSS_TNR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_tnr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ob_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ob_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ob_config() enter: config=%p\n",
+ config);
+
+ *config = params->ob_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ob_config() leave\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ob_config = *config;
+ params->config_changed[IA_CSS_OB_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ob_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_de_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_de_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_de_config() enter: config=%p\n",
+ config);
+
+ *config = params->de_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_de_config() leave\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->de_config = *config;
+ params->config_changed[IA_CSS_DE_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_de_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr_config() enter: config=%p\n",
+ config);
+
+ *config = params->anr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr_config() leave\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_config = *config;
+ params->config_changed[IA_CSS_ANR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_anr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr2_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_thres *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr2_config() enter: config=%p\n",
+ config);
+
+ *config = params->anr_thres;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr2_config() leave\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_thres = *config;
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_anr2_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ce_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ce_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ce_config() enter: config=%p\n",
+ config);
+
+ *config = params->ce_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ce_config() leave\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ce_config = *config;
+ params->config_changed[IA_CSS_CE_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ce_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ecd_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ecd_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ecd_config() enter: config=%p\n",
+ config);
+
+ *config = params->ecd_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ecd_config() leave\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ecd_config = *config;
+ params->config_changed[IA_CSS_ECD_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ecd_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ynr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ynr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ynr_config() enter: config=%p\n",
+ config);
+
+ *config = params->ynr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ynr_config() leave\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ynr_config = *config;
+ params->config_changed[IA_CSS_YNR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ynr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_fc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_fc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_fc_config() enter: config=%p\n",
+ config);
+
+ *config = params->fc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_fc_config() leave\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->fc_config = *config;
+ params->config_changed[IA_CSS_FC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_fc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_cnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_cnr_config() enter: config=%p\n",
+ config);
+
+ *config = params->cnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_cnr_config() leave\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cnr_config = *config;
+ params->config_changed[IA_CSS_CNR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_cnr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_macc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_macc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_macc_config() enter: config=%p\n",
+ config);
+
+ *config = params->macc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_macc_config() leave\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->macc_config = *config;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_macc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ctc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ctc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ctc_config() enter: config=%p\n",
+ config);
+
+ *config = params->ctc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ctc_config() leave\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ctc_config = *config;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ctc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_aa_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_aa_config() enter: config=%p\n",
+ config);
+
+ *config = params->aa_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_aa_config() leave\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n");
+ params->aa_config = *config;
+ params->config_changed[IA_CSS_AA_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_aa_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_yuv2rgb_config() enter: config=%p\n",
+ config);
+
+ *config = params->yuv2rgb_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_yuv2rgb_config() leave\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->yuv2rgb_cc_config = *config;
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_yuv2rgb_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_rgb2yuv_config() enter: config=%p\n",
+ config);
+
+ *config = params->rgb2yuv_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_rgb2yuv_config() leave\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->rgb2yuv_cc_config = *config;
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_rgb2yuv_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_csc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_csc_config() enter: config=%p\n",
+ config);
+
+ *config = params->cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_csc_config() leave\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cc_config = *config;
+ params->config_changed[IA_CSS_CSC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_csc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_nr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_nr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_nr_config() enter: config=%p\n",
+ config);
+
+ *config = params->nr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_nr_config() leave\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->nr_config = *config;
+ params->config_changed[IA_CSS_BNR_ID] = true;
+ params->config_changed[IA_CSS_NR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_nr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_gc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_gc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_gc_config() enter: config=%p\n",
+ config);
+
+ *config = params->gc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_gc_config() leave\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->gc_config = *config;
+ params->config_changed[IA_CSS_GC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_gc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horicoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horicoef_config() leave\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_horicoef_config() enter:\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_horicoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertcoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertcoef_config() leave\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_vertcoef_config() enter:\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_vertcoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horiproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horiproj_config() leave\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_horiproj_config() enter:\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_horiproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertproj_config() leave\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_vertproj_config() enter:\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_vertproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horicoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horicoef_config() leave\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_horicoef_config() enter:\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_horicoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertcoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertcoef_config() leave\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_vertcoef_config() enter:\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_vertcoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horiproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horiproj_config() leave\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_horiproj_config() enter:\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_horiproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertproj_config() leave\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_vertproj_config() enter:\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_vertproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_r_gamma_config() enter: config=%p\n",
+ config);
+
+ *config = params->r_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_r_gamma_config() leave\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->r_gamma_table = *config;
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_r_gamma_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_g_gamma_config() enter: config=%p\n",
+ config);
+
+ *config = params->g_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_g_gamma_config() leave\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->g_gamma_table = *config;
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_g_gamma_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_b_gamma_config() enter: config=%p\n",
+ config);
+
+ *config = params->b_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_b_gamma_config() leave\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->b_gamma_table = *config;
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_b_gamma_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_table_config() enter: config=%p\n",
+ config);
+
+ *config = params->xnr_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_table_config() leave\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_xnr_table_config() enter:\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_table = *config;
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_xnr_table_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_formats_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_formats_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_formats_config() enter: config=%p\n",
+ config);
+
+ *config = params->formats_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_formats_config() leave\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->formats_config = *config;
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_formats_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_config() enter: config=%p\n",
+ config);
+
+ *config = params->xnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_config() leave\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_config = *config;
+ params->config_changed[IA_CSS_XNR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_xnr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr3_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr3_config() enter: config=%p\n",
+ config);
+
+ *config = params->xnr3_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr3_config() leave\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr3_config = *config;
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_xnr3_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_s3a_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_3a_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_s3a_config() enter: config=%p\n",
+ config);
+
+ *config = params->s3a_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_s3a_config() leave\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->s3a_config = *config;
+ params->config_changed[IA_CSS_BH_ID] = true;
+ params->config_changed[IA_CSS_S3A_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_s3a_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_output_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_output_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_output_config() enter: config=%p\n",
+ config);
+
+ *config = params->output_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_output_config() leave\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->output_config = *config;
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_output_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_get_dp_config(params, config->dp_config);
+ ia_css_get_wb_config(params, config->wb_config);
+ ia_css_get_tnr_config(params, config->tnr_config);
+ ia_css_get_ob_config(params, config->ob_config);
+ ia_css_get_de_config(params, config->de_config);
+ ia_css_get_anr_config(params, config->anr_config);
+ ia_css_get_anr2_config(params, config->anr_thres);
+ ia_css_get_ce_config(params, config->ce_config);
+ ia_css_get_ecd_config(params, config->ecd_config);
+ ia_css_get_ynr_config(params, config->ynr_config);
+ ia_css_get_fc_config(params, config->fc_config);
+ ia_css_get_cnr_config(params, config->cnr_config);
+ ia_css_get_macc_config(params, config->macc_config);
+ ia_css_get_ctc_config(params, config->ctc_config);
+ ia_css_get_aa_config(params, config->aa_config);
+ ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_get_csc_config(params, config->cc_config);
+ ia_css_get_nr_config(params, config->nr_config);
+ ia_css_get_gc_config(params, config->gc_config);
+ ia_css_get_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_get_r_gamma_config(params, config->r_gamma_table);
+ ia_css_get_g_gamma_config(params, config->g_gamma_table);
+ ia_css_get_b_gamma_config(params, config->b_gamma_table);
+ ia_css_get_xnr_table_config(params, config->xnr_table);
+ ia_css_get_formats_config(params, config->formats_config);
+ ia_css_get_xnr_config(params, config->xnr_config);
+ ia_css_get_xnr3_config(params, config->xnr3_config);
+ ia_css_get_s3a_config(params, config->s3a_config);
+ ia_css_get_output_config(params, config->output_config);
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_set_dp_config(params, config->dp_config);
+ ia_css_set_wb_config(params, config->wb_config);
+ ia_css_set_tnr_config(params, config->tnr_config);
+ ia_css_set_ob_config(params, config->ob_config);
+ ia_css_set_de_config(params, config->de_config);
+ ia_css_set_anr_config(params, config->anr_config);
+ ia_css_set_anr2_config(params, config->anr_thres);
+ ia_css_set_ce_config(params, config->ce_config);
+ ia_css_set_ecd_config(params, config->ecd_config);
+ ia_css_set_ynr_config(params, config->ynr_config);
+ ia_css_set_fc_config(params, config->fc_config);
+ ia_css_set_cnr_config(params, config->cnr_config);
+ ia_css_set_macc_config(params, config->macc_config);
+ ia_css_set_ctc_config(params, config->ctc_config);
+ ia_css_set_aa_config(params, config->aa_config);
+ ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_set_csc_config(params, config->cc_config);
+ ia_css_set_nr_config(params, config->nr_config);
+ ia_css_set_gc_config(params, config->gc_config);
+ ia_css_set_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_set_r_gamma_config(params, config->r_gamma_table);
+ ia_css_set_g_gamma_config(params, config->g_gamma_table);
+ ia_css_set_b_gamma_config(params, config->b_gamma_table);
+ ia_css_set_xnr_table_config(params, config->xnr_table);
+ ia_css_set_formats_config(params, config->formats_config);
+ ia_css_set_xnr_config(params, config->xnr_config);
+ ia_css_set_xnr3_config(params, config->xnr3_config);
+ ia_css_set_s3a_config(params, config->s3a_config);
+ ia_css_set_output_config(params, config->output_config);
+}
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_states.c
new file mode 100644
index 000000000000..c54787f3fc24
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_states.c
@@ -0,0 +1,223 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_states.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_aa_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_aa_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.aa.size;
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset;
+
+ if (size)
+ memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ 0, size);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_aa_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset;
+
+ if (size) {
+ ia_css_init_cnr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr2_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr2_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset;
+
+ if (size) {
+ ia_css_init_cnr2_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr2_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_dp_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_dp_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.dp.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset;
+
+ if (size) {
+ ia_css_init_dp_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_dp_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_de_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_de_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.de.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.de.offset;
+
+ if (size) {
+ ia_css_init_de_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_de_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_tnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_tnr_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->dmem.tnr.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_tnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ref_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ref_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->dmem.ref.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset;
+
+ if (size) {
+ ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ref_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ynr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ynr_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.ynr.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset;
+
+ if (size) {
+ ia_css_init_ynr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ynr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(
+ const struct ia_css_binary *binary) = {
+ ia_css_initialize_aa_state,
+ ia_css_initialize_cnr_state,
+ ia_css_initialize_cnr2_state,
+ ia_css_initialize_dp_state,
+ ia_css_initialize_de_state,
+ ia_css_initialize_tnr_state,
+ ia_css_initialize_ref_state,
+ ia_css_initialize_ynr_state,
+};
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c
new file mode 100644
index 000000000000..50080565d0d6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+const u32 N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = {
+ 4, /* 4 entries at CSI_RX_BACKEND0_ID*/
+ 4, /* 4 entries at CSI_RX_BACKEND1_ID*/
+ 4 /* 4 entries at CSI_RX_BACKEND2_ID*/
+};
+
+const u32 N_LONG_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = {
+ 8, /* 8 entries at CSI_RX_BACKEND0_ID*/
+ 4, /* 4 entries at CSI_RX_BACKEND1_ID*/
+ 4 /* 4 entries at CSI_RX_BACKEND2_ID*/
+};
+
+const u32 N_CSI_RX_FE_CTRL_DLANES[N_CSI_RX_FRONTEND_ID] = {
+ N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND0_ID */
+ N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND1_ID */
+ N_CSI_RX_DLANE_ID /* 4 dlanes for CSI_RX_FR0NTEND2_ID */
+};
+
+/* sid_width for CSI_RX_BACKEND<N>_ID */
+const u32 N_CSI_RX_BE_SID_WIDTH[N_CSI_RX_BACKEND_ID] = {
+ 3,
+ 2,
+ 2
+};
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h
new file mode 100644
index 000000000000..a86de89b2cfc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h
@@ -0,0 +1,62 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_LOCAL_H_INCLUDED__
+#define __CSI_RX_LOCAL_H_INCLUDED__
+
+#include "csi_rx_global.h"
+#define N_CSI_RX_BE_MIPI_COMP_FMT_REG 4
+#define N_CSI_RX_BE_MIPI_CUSTOM_PEC 12
+#define N_CSI_RX_BE_SHORT_PKT_LUT 4
+#define N_CSI_RX_BE_LONG_PKT_LUT 8
+typedef struct csi_rx_fe_ctrl_state_s csi_rx_fe_ctrl_state_t;
+typedef struct csi_rx_fe_ctrl_lane_s csi_rx_fe_ctrl_lane_t;
+typedef struct csi_rx_be_ctrl_state_s csi_rx_be_ctrl_state_t;
+/*mipi_backend_custom_mode_pixel_extraction_config*/
+typedef struct csi_rx_be_ctrl_pec_s csi_rx_be_ctrl_pec_t;
+
+struct csi_rx_fe_ctrl_lane_s {
+ hrt_data termen;
+ hrt_data settle;
+};
+
+struct csi_rx_fe_ctrl_state_s {
+ hrt_data enable;
+ hrt_data nof_enable_lanes;
+ hrt_data error_handling;
+ hrt_data status;
+ hrt_data status_dlane_hs;
+ hrt_data status_dlane_lp;
+ csi_rx_fe_ctrl_lane_t clane;
+ csi_rx_fe_ctrl_lane_t dlane[N_CSI_RX_DLANE_ID];
+};
+
+struct csi_rx_be_ctrl_state_s {
+ hrt_data enable;
+ hrt_data status;
+ hrt_data comp_format_reg[N_CSI_RX_BE_MIPI_COMP_FMT_REG];
+ hrt_data raw16;
+ hrt_data raw18;
+ hrt_data force_raw8;
+ hrt_data irq_status;
+ hrt_data custom_mode_enable;
+ hrt_data custom_mode_data_state;
+ hrt_data pec[N_CSI_RX_BE_MIPI_CUSTOM_PEC];
+ hrt_data custom_mode_valid_eop_config;
+ hrt_data global_lut_disregard_reg;
+ hrt_data packet_status_stall;
+ hrt_data short_packet_lut_entry[N_CSI_RX_BE_SHORT_PKT_LUT];
+ hrt_data long_packet_lut_entry[N_CSI_RX_BE_LONG_PKT_LUT];
+};
+#endif /* __CSI_RX_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h
new file mode 100644
index 000000000000..3fa3c3a487ab
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h
@@ -0,0 +1,305 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_PRIVATE_H_INCLUDED__
+#define __CSI_RX_PRIVATE_H_INCLUDED__
+
+#include "rx_csi_defs.h"
+#include "mipi_backend_defs.h"
+#include "csi_rx.h"
+
+#include "device_access.h" /* ia_css_device_load_uint32 */
+
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline hrt_data csi_rx_fe_ctrl_reg_load(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_CSI_RX_FRONTEND_ID);
+ assert(CSI_RX_FE_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(CSI_RX_FE_CTRL_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+static inline void csi_rx_fe_ctrl_reg_store(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_CSI_RX_FRONTEND_ID);
+ assert(CSI_RX_FE_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(CSI_RX_FE_CTRL_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+}
+
+/**
+ * @brief Load the register value.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline hrt_data csi_rx_be_ctrl_reg_load(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_CSI_RX_BACKEND_ID);
+ assert(CSI_RX_BE_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+static inline void csi_rx_be_ctrl_reg_store(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_CSI_RX_BACKEND_ID);
+ assert(CSI_RX_BE_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+}
+
+/* end of DLI */
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the state of the csi rx fe dlane process.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_fe_ctrl_get_dlane_state(
+ const csi_rx_frontend_ID_t ID,
+ const u32 lane,
+ csi_rx_fe_ctrl_lane_t *dlane_state)
+{
+ dlane_state->termen =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_TERMEN_DLANE_REG_IDX(lane));
+ dlane_state->settle =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_SETTLE_DLANE_REG_IDX(lane));
+}
+
+/**
+ * @brief Get the csi rx fe state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_fe_ctrl_get_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state)
+{
+ u32 i;
+
+ state->enable =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_ENABLE_REG_IDX);
+ state->nof_enable_lanes =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_NOF_ENABLED_LANES_REG_IDX);
+ state->error_handling =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_ERROR_HANDLING_REG_IDX);
+ state->status =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_REG_IDX);
+ state->status_dlane_hs =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX);
+ state->status_dlane_lp =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX);
+ state->clane.termen =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_TERMEN_CLANE_REG_IDX);
+ state->clane.settle =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_SETTLE_CLANE_REG_IDX);
+
+ /*
+ * Get the values of the register-set per
+ * dlane.
+ */
+ for (i = 0; i < N_CSI_RX_FE_CTRL_DLANES[ID]; i++) {
+ csi_rx_fe_ctrl_get_dlane_state(
+ ID,
+ i,
+ &state->dlane[i]);
+ }
+}
+
+/**
+ * @brief dump the csi rx fe state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_fe_ctrl_dump_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state)
+{
+ u32 i;
+
+ ia_css_print("CSI RX FE STATE Controller %d Enable state 0x%x\n", ID,
+ state->enable);
+ ia_css_print("CSI RX FE STATE Controller %d No Of enable lanes 0x%x\n", ID,
+ state->nof_enable_lanes);
+ ia_css_print("CSI RX FE STATE Controller %d Error handling 0x%x\n", ID,
+ state->error_handling);
+ ia_css_print("CSI RX FE STATE Controller %d Status 0x%x\n", ID, state->status);
+ ia_css_print("CSI RX FE STATE Controller %d Status Dlane HS 0x%x\n", ID,
+ state->status_dlane_hs);
+ ia_css_print("CSI RX FE STATE Controller %d Status Dlane LP 0x%x\n", ID,
+ state->status_dlane_lp);
+ ia_css_print("CSI RX FE STATE Controller %d Status term enable LP 0x%x\n", ID,
+ state->clane.termen);
+ ia_css_print("CSI RX FE STATE Controller %d Status term settle LP 0x%x\n", ID,
+ state->clane.settle);
+
+ /*
+ * Get the values of the register-set per
+ * dlane.
+ */
+ for (i = 0; i < N_CSI_RX_FE_CTRL_DLANES[ID]; i++) {
+ ia_css_print("CSI RX FE STATE Controller %d DLANE ID %d termen 0x%x\n", ID, i,
+ state->dlane[i].termen);
+ ia_css_print("CSI RX FE STATE Controller %d DLANE ID %d settle 0x%x\n", ID, i,
+ state->dlane[i].settle);
+ }
+}
+
+/**
+ * @brief Get the csi rx be state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_be_ctrl_get_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state)
+{
+ u32 i;
+
+ state->enable =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_ENABLE_REG_IDX);
+
+ state->status =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_STATUS_REG_IDX);
+
+ for (i = 0; i < N_CSI_RX_BE_MIPI_COMP_FMT_REG ; i++) {
+ state->comp_format_reg[i] =
+ csi_rx_be_ctrl_reg_load(ID,
+ _HRT_MIPI_BACKEND_COMP_FORMAT_REG0_IDX + i);
+ }
+
+ state->raw16 =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_IDX);
+
+ state->raw18 =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_IDX);
+ state->force_raw8 =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_FORCE_RAW8_REG_IDX);
+ state->irq_status =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_IRQ_STATUS_REG_IDX);
+#if 0 /* device access error for these registers */
+ /* ToDo: rootcause this failure */
+ state->custom_mode_enable =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_EN_REG_IDX);
+
+ state->custom_mode_data_state =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_DATA_STATE_REG_IDX);
+ for (i = 0; i < N_CSI_RX_BE_MIPI_CUSTOM_PEC ; i++) {
+ state->pec[i] =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P0_REG_IDX + i);
+ }
+ state->custom_mode_valid_eop_config =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_REG_IDX);
+#endif
+ state->global_lut_disregard_reg =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_IDX);
+ state->packet_status_stall =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_IDX);
+ /*
+ * Get the values of the register-set per
+ * lut.
+ */
+ for (i = 0; i < N_SHORT_PACKET_LUT_ENTRIES[ID]; i++) {
+ state->short_packet_lut_entry[i] =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_IDX + i);
+ }
+ for (i = 0; i < N_LONG_PACKET_LUT_ENTRIES[ID]; i++) {
+ state->long_packet_lut_entry[i] =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_LP_LUT_ENTRY_0_REG_IDX + i);
+ }
+}
+
+/**
+ * @brief Dump the csi rx be state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_be_ctrl_dump_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state)
+{
+ u32 i;
+
+ ia_css_print("CSI RX BE STATE Controller %d Enable 0x%x\n", ID, state->enable);
+ ia_css_print("CSI RX BE STATE Controller %d Status 0x%x\n", ID, state->status);
+
+ for (i = 0; i < N_CSI_RX_BE_MIPI_COMP_FMT_REG ; i++) {
+ ia_css_print("CSI RX BE STATE Controller %d comp format reg vc%d value 0x%x\n",
+ ID, i, state->status);
+ }
+ ia_css_print("CSI RX BE STATE Controller %d RAW16 0x%x\n", ID, state->raw16);
+ ia_css_print("CSI RX BE STATE Controller %d RAW18 0x%x\n", ID, state->raw18);
+ ia_css_print("CSI RX BE STATE Controller %d Force RAW8 0x%x\n", ID,
+ state->force_raw8);
+ ia_css_print("CSI RX BE STATE Controller %d IRQ state 0x%x\n", ID,
+ state->irq_status);
+#if 0 /* ToDo:Getting device access error for this register */
+ for (i = 0; i < N_CSI_RX_BE_MIPI_CUSTOM_PEC ; i++) {
+ ia_css_print("CSI RX BE STATE Controller %d PEC ID %d custom pec 0x%x\n", ID, i,
+ state->pec[i]);
+ }
+#endif
+ ia_css_print("CSI RX BE STATE Controller %d Global LUT disregard reg 0x%x\n",
+ ID, state->global_lut_disregard_reg);
+ ia_css_print("CSI RX BE STATE Controller %d packet stall reg 0x%x\n", ID,
+ state->packet_status_stall);
+ /*
+ * Get the values of the register-set per
+ * lut.
+ */
+ for (i = 0; i < N_SHORT_PACKET_LUT_ENTRIES[ID]; i++) {
+ ia_css_print("CSI RX BE STATE Controller ID %d Short packat entry %d shart packet lut id 0x%x\n",
+ ID, i,
+ state->short_packet_lut_entry[i]);
+ }
+ for (i = 0; i < N_LONG_PACKET_LUT_ENTRIES[ID]; i++) {
+ ia_css_print("CSI RX BE STATE Controller ID %d Long packat entry %d Long packet lut id 0x%x\n",
+ ID, i,
+ state->long_packet_lut_entry[i]);
+ }
+}
+
+/* end of NCI */
+
+#endif /* __CSI_RX_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c
new file mode 100644
index 000000000000..8b06b2410d1d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include "system_global.h"
+
+const u32 N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID] = {
+ 8, /* IBUF_CTRL0_ID supports at most 8 processes */
+ 4, /* IBUF_CTRL1_ID supports at most 4 processes */
+ 4 /* IBUF_CTRL2_ID supports at most 4 processes */
+};
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h
new file mode 100644
index 000000000000..ea40284623d1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h
@@ -0,0 +1,58 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_LOCAL_H_INCLUDED__
+#define __IBUF_CTRL_LOCAL_H_INCLUDED__
+
+#include "ibuf_ctrl_global.h"
+
+typedef struct ibuf_ctrl_proc_state_s ibuf_ctrl_proc_state_t;
+typedef struct ibuf_ctrl_state_s ibuf_ctrl_state_t;
+
+struct ibuf_ctrl_proc_state_s {
+ hrt_data num_items;
+ hrt_data num_stores;
+ hrt_data dma_channel;
+ hrt_data dma_command;
+ hrt_data ibuf_st_addr;
+ hrt_data ibuf_stride;
+ hrt_data ibuf_end_addr;
+ hrt_data dest_st_addr;
+ hrt_data dest_stride;
+ hrt_data dest_end_addr;
+ hrt_data sync_frame;
+ hrt_data sync_command;
+ hrt_data store_command;
+ hrt_data shift_returned_items;
+ hrt_data elems_ibuf;
+ hrt_data elems_dest;
+ hrt_data cur_stores;
+ hrt_data cur_acks;
+ hrt_data cur_s2m_ibuf_addr;
+ hrt_data cur_dma_ibuf_addr;
+ hrt_data cur_dma_dest_addr;
+ hrt_data cur_isp_dest_addr;
+ hrt_data dma_cmds_send;
+ hrt_data main_cntrl_state;
+ hrt_data dma_sync_state;
+ hrt_data isp_sync_state;
+};
+
+struct ibuf_ctrl_state_s {
+ hrt_data recalc_words;
+ hrt_data arbiters;
+ ibuf_ctrl_proc_state_t proc_state[N_STREAM2MMIO_SID_ID];
+};
+
+#endif /* __IBUF_CTRL_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_private.h
new file mode 100644
index 000000000000..a0800a5df68a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_private.h
@@ -0,0 +1,267 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_PRIVATE_H_INCLUDED__
+#define __IBUF_CTRL_PRIVATE_H_INCLUDED__
+
+#include "ibuf_ctrl_public.h"
+
+#include "device_access.h" /* ia_css_device_load_uint32 */
+
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the ibuf-controller state.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_get_state(
+ const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state)
+{
+ u32 i;
+
+ state->recalc_words =
+ ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_RECALC_WORDS_STATUS);
+ state->arbiters =
+ ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_ARBITERS_STATUS);
+
+ /*
+ * Get the values of the register-set per
+ * ibuf-controller process.
+ */
+ for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
+ ibuf_ctrl_get_proc_state(
+ ID,
+ i,
+ &state->proc_state[i]);
+ }
+}
+
+/**
+ * @brief Get the state of the ibuf-controller process.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_get_proc_state(
+ const ibuf_ctrl_ID_t ID,
+ const u32 proc_id,
+ ibuf_ctrl_proc_state_t *state)
+{
+ hrt_address reg_bank_offset;
+
+ reg_bank_offset =
+ _IBUF_CNTRL_PROC_REG_ALIGN * (1 + proc_id);
+
+ state->num_items =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_ITEMS_PER_STORE);
+
+ state->num_stores =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_STORES_PER_FRAME);
+
+ state->dma_channel =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CHANNEL);
+
+ state->dma_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CMD);
+
+ state->ibuf_st_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_START_ADDRESS);
+
+ state->ibuf_stride =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_STRIDE);
+
+ state->ibuf_end_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_END_ADDRESS);
+
+ state->dest_st_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_START_ADDRESS);
+
+ state->dest_stride =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_STRIDE);
+
+ state->dest_end_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_END_ADDRESS);
+
+ state->sync_frame =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SYNC_FRAME);
+
+ state->sync_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_SYNC_CMD);
+
+ state->store_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_STORE_CMD);
+
+ state->shift_returned_items =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SHIFT_ITEMS);
+
+ state->elems_ibuf =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_IBUF);
+
+ state->elems_dest =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_DEST);
+
+ state->cur_stores =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_STORES);
+
+ state->cur_acks =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ACKS);
+
+ state->cur_s2m_ibuf_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_S2M_IBUF_ADDR);
+
+ state->cur_dma_ibuf_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_IBUF_ADDR);
+
+ state->cur_dma_dest_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_DEST_ADDR);
+
+ state->cur_isp_dest_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ISP_DEST_ADDR);
+
+ state->dma_cmds_send =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND);
+
+ state->main_cntrl_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_MAIN_CNTRL_STATE);
+
+ state->dma_sync_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_SYNC_STATE);
+
+ state->isp_sync_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ISP_SYNC_STATE);
+}
+
+/**
+ * @brief Dump the ibuf-controller state.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_dump_state(
+ const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state)
+{
+ u32 i;
+
+ ia_css_print("IBUF controller ID %d recalculate words 0x%x\n", ID,
+ state->recalc_words);
+ ia_css_print("IBUF controller ID %d arbiters 0x%x\n", ID, state->arbiters);
+
+ /*
+ * Dump the values of the register-set per
+ * ibuf-controller process.
+ */
+ for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
+ ia_css_print("IBUF controller ID %d Process ID %d num_items 0x%x\n", ID, i,
+ state->proc_state[i].num_items);
+ ia_css_print("IBUF controller ID %d Process ID %d num_stores 0x%x\n", ID, i,
+ state->proc_state[i].num_stores);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_channel 0x%x\n", ID, i,
+ state->proc_state[i].dma_channel);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_command 0x%x\n", ID, i,
+ state->proc_state[i].dma_command);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_st_addr 0x%x\n", ID, i,
+ state->proc_state[i].ibuf_st_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_stride 0x%x\n", ID, i,
+ state->proc_state[i].ibuf_stride);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_end_addr 0x%x\n", ID, i,
+ state->proc_state[i].ibuf_end_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_st_addr 0x%x\n", ID, i,
+ state->proc_state[i].dest_st_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_stride 0x%x\n", ID, i,
+ state->proc_state[i].dest_stride);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_end_addr 0x%x\n", ID, i,
+ state->proc_state[i].dest_end_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d sync_frame 0x%x\n", ID, i,
+ state->proc_state[i].sync_frame);
+ ia_css_print("IBUF controller ID %d Process ID %d sync_command 0x%x\n", ID, i,
+ state->proc_state[i].sync_command);
+ ia_css_print("IBUF controller ID %d Process ID %d store_command 0x%x\n", ID, i,
+ state->proc_state[i].store_command);
+ ia_css_print("IBUF controller ID %d Process ID %d shift_returned_items 0x%x\n",
+ ID, i,
+ state->proc_state[i].shift_returned_items);
+ ia_css_print("IBUF controller ID %d Process ID %d elems_ibuf 0x%x\n", ID, i,
+ state->proc_state[i].elems_ibuf);
+ ia_css_print("IBUF controller ID %d Process ID %d elems_dest 0x%x\n", ID, i,
+ state->proc_state[i].elems_dest);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_stores 0x%x\n", ID, i,
+ state->proc_state[i].cur_stores);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_acks 0x%x\n", ID, i,
+ state->proc_state[i].cur_acks);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_s2m_ibuf_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_s2m_ibuf_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_dma_ibuf_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_dma_ibuf_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_dma_dest_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_dma_dest_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_isp_dest_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_isp_dest_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_cmds_send 0x%x\n", ID, i,
+ state->proc_state[i].dma_cmds_send);
+ ia_css_print("IBUF controller ID %d Process ID %d main_cntrl_state 0x%x\n", ID,
+ i,
+ state->proc_state[i].main_cntrl_state);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_sync_state 0x%x\n", ID, i,
+ state->proc_state[i].dma_sync_state);
+ ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i,
+ state->proc_state[i].isp_sync_state);
+ }
+}
+
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C hrt_data ibuf_ctrl_reg_load(
+ const ibuf_ctrl_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_IBUF_CTRL_ID);
+ assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(IBUF_CTRL_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_reg_store(
+ const ibuf_ctrl_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_IBUF_CTRL_ID);
+ assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+}
+
+/* end of DLI */
+
+#endif /* __IBUF_CTRL_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c
new file mode 100644
index 000000000000..36c026cbd7cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "isys_dma.h"
+#include "assert_support.h"
+
+#ifndef __INLINE_ISYS2401_DMA__
+/*
+ * Include definitions for isys dma register access functions. isys_dma.h
+ * includes declarations of these functions by including isys_dma_public.h.
+ */
+#include "isys_dma_private.h"
+#endif
+
+const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID] = {
+ N_ISYS2401_DMA_CHANNEL
+};
+
+void isys2401_dma_set_max_burst_size(
+ const isys2401_dma_ID_t dma_id,
+ uint32_t max_burst_size)
+{
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert((max_burst_size > 0x00) && (max_burst_size <= 0xFF));
+
+ isys2401_dma_reg_store(dma_id,
+ DMA_DEV_INFO_REG_IDX(_DMA_V2_DEV_INTERF_MAX_BURST_IDX, HIVE_DMA_BUS_DDR_CONN),
+ (max_burst_size - 1));
+}
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_local.h
new file mode 100644
index 000000000000..5c694a26386e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_LOCAL_H_INCLUDED__
+#define __ISYS_DMA_LOCAL_H_INCLUDED__
+
+#include "isys_dma_global.h"
+
+#endif /* __ISYS_DMA_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h
new file mode 100644
index 000000000000..a1a222372ed3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h
@@ -0,0 +1,61 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_PRIVATE_H_INCLUDED__
+#define __ISYS_DMA_PRIVATE_H_INCLUDED__
+
+#include "isys_dma_public.h"
+#include "device_access.h"
+#include "assert_support.h"
+#include "dma.h"
+#include "dma_v2_defs.h"
+#include "print_support.h"
+
+STORAGE_CLASS_ISYS2401_DMA_C void isys2401_dma_reg_store(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ unsigned int reg_loc;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(ISYS2401_DMA_BASE[dma_id] != (hrt_address) - 1);
+
+ reg_loc = ISYS2401_DMA_BASE[dma_id] + (reg * sizeof(hrt_data));
+
+ ia_css_print("isys dma store at addr(0x%x) val(%u)\n", reg_loc,
+ (unsigned int)value);
+ ia_css_device_store_uint32(reg_loc, value);
+}
+
+STORAGE_CLASS_ISYS2401_DMA_C hrt_data isys2401_dma_reg_load(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg)
+{
+ unsigned int reg_loc;
+ hrt_data value;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(ISYS2401_DMA_BASE[dma_id] != (hrt_address) - 1);
+
+ reg_loc = ISYS2401_DMA_BASE[dma_id] + (reg * sizeof(hrt_data));
+
+ value = ia_css_device_load_uint32(reg_loc);
+ ia_css_print("isys dma load from addr(0x%x) val(%u)\n", reg_loc,
+ (unsigned int)value);
+
+ return value;
+}
+
+#endif /* __ISYS_DMA_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c
new file mode 100644
index 000000000000..567c926bd47f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <system_local.h>
+#include "device_access.h"
+#include "assert_support.h"
+#include "ia_css_debug.h"
+#include "isys_irq.h"
+
+#ifndef __INLINE_ISYS2401_IRQ__
+/*
+ * Include definitions for isys irq private functions. isys_irq.h includes
+ * declarations of these functions by including isys_irq_public.h.
+ */
+#include "isys_irq_private.h"
+#endif
+
+/* Public interface */
+STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_status_enable(
+ const isys_irq_ID_t isys_irqc_id)
+{
+ assert(isys_irqc_id < N_ISYS_IRQ_ID);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Setting irq mask for port %u\n",
+ isys_irqc_id);
+ isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_MASK_REG_IDX,
+ ISYS_IRQ_MASK_REG_VALUE);
+ isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_CLEAR_REG_IDX,
+ ISYS_IRQ_CLEAR_REG_VALUE);
+ isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_ENABLE_REG_IDX,
+ ISYS_IRQ_ENABLE_REG_VALUE);
+}
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
new file mode 100644
index 000000000000..4fd05b29dfdb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_IRQ_LOCAL_H__
+#define __ISYS_IRQ_LOCAL_H__
+
+#include <type_support.h>
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+typedef struct isys_irqc_state_s isys_irqc_state_t;
+
+struct isys_irqc_state_s {
+ hrt_data edge;
+ hrt_data mask;
+ hrt_data status;
+ hrt_data enable;
+ hrt_data level_no;
+ /*hrt_data clear; */ /* write-only register */
+};
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __ISYS_IRQ_LOCAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
new file mode 100644
index 000000000000..c519e6f06462
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
@@ -0,0 +1,106 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_IRQ_PRIVATE_H__
+#define __ISYS_IRQ_PRIVATE_H__
+
+#include "isys_irq_global.h"
+#include "isys_irq_local.h"
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+/* -------------------------------------------------------+
+ | Native command interface (NCI) |
+ + -------------------------------------------------------*/
+
+/**
+* @brief Get the isys irq status.
+* Refer to "isys_irq.h" for details.
+*/
+STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_get(
+ const isys_irq_ID_t isys_irqc_id,
+ isys_irqc_state_t *state)
+{
+ state->edge = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_EDGE_REG_IDX);
+ state->mask = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_MASK_REG_IDX);
+ state->status = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_STATUS_REG_IDX);
+ state->enable = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_ENABLE_REG_IDX);
+ state->level_no = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_LEVEL_NO_REG_IDX);
+ /*
+ ** Invalid to read/load from write-only register 'clear'
+ ** state->clear = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_CLEAR_REG_IDX);
+ */
+}
+
+/**
+* @brief Dump the isys irq status.
+* Refer to "isys_irq.h" for details.
+*/
+STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_dump(
+ const isys_irq_ID_t isys_irqc_id,
+ const isys_irqc_state_t *state)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "isys irq controller id %d\n\tstatus:0x%x\n\tedge:0x%x\n\tmask:0x%x\n\tenable:0x%x\n\tlevel_not_pulse:0x%x\n",
+ isys_irqc_id,
+ state->status, state->edge, state->mask, state->enable, state->level_no);
+}
+
+/* end of NCI */
+
+/* -------------------------------------------------------+
+ | Device level interface (DLI) |
+ + -------------------------------------------------------*/
+
+/* Support functions */
+STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_reg_store(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx,
+ const hrt_data value)
+{
+ unsigned int reg_addr;
+
+ assert(isys_irqc_id < N_ISYS_IRQ_ID);
+ assert(reg_idx <= ISYS_IRQ_LEVEL_NO_REG_IDX);
+
+ reg_addr = ISYS_IRQ_BASE[isys_irqc_id] + (reg_idx * sizeof(hrt_data));
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "isys irq store at addr(0x%x) val(%u)\n", reg_addr, (unsigned int)value);
+
+ ia_css_device_store_uint32(reg_addr, value);
+}
+
+STORAGE_CLASS_ISYS2401_IRQ_C hrt_data isys_irqc_reg_load(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx)
+{
+ unsigned int reg_addr;
+ hrt_data value;
+
+ assert(isys_irqc_id < N_ISYS_IRQ_ID);
+ assert(reg_idx <= ISYS_IRQ_LEVEL_NO_REG_IDX);
+
+ reg_addr = ISYS_IRQ_BASE[isys_irqc_id] + (reg_idx * sizeof(hrt_data));
+ value = ia_css_device_load_uint32(reg_addr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "isys irq load from addr(0x%x) val(%u)\n", reg_addr, (unsigned int)value);
+
+ return value;
+}
+
+/* end of DLI */
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __ISYS_IRQ_PRIVATE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c
new file mode 100644
index 000000000000..67570138ba24
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "isys_stream2mmio.h"
+
+const stream2mmio_sid_ID_t N_STREAM2MMIO_SID_PROCS[N_STREAM2MMIO_ID] = {
+ N_STREAM2MMIO_SID_ID,
+ STREAM2MMIO_SID4_ID,
+ STREAM2MMIO_SID4_ID
+};
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h
new file mode 100644
index 000000000000..1449c19abc86
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__
+
+#include "isys_stream2mmio_global.h"
+
+typedef struct stream2mmio_state_s stream2mmio_state_t;
+typedef struct stream2mmio_sid_state_s stream2mmio_sid_state_t;
+
+struct stream2mmio_sid_state_s {
+ hrt_data rcv_ack;
+ hrt_data pix_width_id;
+ hrt_data start_addr;
+ hrt_data end_addr;
+ hrt_data strides;
+ hrt_data num_items;
+ hrt_data block_when_no_cmd;
+};
+
+struct stream2mmio_state_s {
+ stream2mmio_sid_state_t sid_state[N_STREAM2MMIO_SID_ID];
+};
+#endif /* __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h
new file mode 100644
index 000000000000..e5aae5c022eb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h
@@ -0,0 +1,167 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__
+
+#include "isys_stream2mmio_public.h"
+#include "device_access.h" /* ia_css_device_load_uint32 */
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+#define STREAM2MMIO_COMMAND_REG_ID 0
+#define STREAM2MMIO_ACKNOWLEDGE_REG_ID 1
+#define STREAM2MMIO_PIX_WIDTH_ID_REG_ID 2
+#define STREAM2MMIO_START_ADDR_REG_ID 3 /* master port address,NOT Byte */
+#define STREAM2MMIO_END_ADDR_REG_ID 4 /* master port address,NOT Byte */
+#define STREAM2MMIO_STRIDE_REG_ID 5 /* stride in master port words, increment is per packet for long sids, stride is not used for short sid's*/
+#define STREAM2MMIO_NUM_ITEMS_REG_ID 6 /* number of packets for store packets cmd, number of words for store_words cmd */
+#define STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID 7 /* if this register is 1, input will be stalled if there is no pending command for this sid */
+#define STREAM2MMIO_REGS_PER_SID 8
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the stream2mmio-controller state.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_get_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state)
+{
+ stream2mmio_sid_ID_t i;
+
+ /*
+ * Get the values of the register-set per
+ * stream2mmio-controller sids.
+ */
+ for (i = STREAM2MMIO_SID0_ID; i < N_STREAM2MMIO_SID_PROCS[ID]; i++) {
+ stream2mmio_get_sid_state(ID, i, &state->sid_state[i]);
+ }
+}
+
+/**
+ * @brief Get the state of the stream2mmio-controller sidess.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_get_sid_state(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ stream2mmio_sid_state_t *state)
+{
+ state->rcv_ack =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_ACKNOWLEDGE_REG_ID);
+
+ state->pix_width_id =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_PIX_WIDTH_ID_REG_ID);
+
+ state->start_addr =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_START_ADDR_REG_ID);
+
+ state->end_addr =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_END_ADDR_REG_ID);
+
+ state->strides =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_STRIDE_REG_ID);
+
+ state->num_items =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_NUM_ITEMS_REG_ID);
+
+ state->block_when_no_cmd =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID);
+}
+
+/**
+ * @brief Dump the state of the stream2mmio-controller sidess.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_print_sid_state(
+ stream2mmio_sid_state_t *state)
+{
+ ia_css_print("\t \t Receive acks 0x%x\n", state->rcv_ack);
+ ia_css_print("\t \t Pixel width 0x%x\n", state->pix_width_id);
+ ia_css_print("\t \t Startaddr 0x%x\n", state->start_addr);
+ ia_css_print("\t \t Endaddr 0x%x\n", state->end_addr);
+ ia_css_print("\t \t Strides 0x%x\n", state->strides);
+ ia_css_print("\t \t Num Items 0x%x\n", state->num_items);
+ ia_css_print("\t \t block when no cmd 0x%x\n", state->block_when_no_cmd);
+}
+
+/**
+ * @brief Dump the ibuf-controller state.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_dump_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state)
+{
+ stream2mmio_sid_ID_t i;
+
+ /*
+ * Get the values of the register-set per
+ * stream2mmio-controller sids.
+ */
+ for (i = STREAM2MMIO_SID0_ID; i < N_STREAM2MMIO_SID_PROCS[ID]; i++) {
+ ia_css_print("StREAM2MMIO ID %d SID %d\n", ID, i);
+ stream2mmio_print_sid_state(&state->sid_state[i]);
+ }
+}
+
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C hrt_data stream2mmio_reg_load(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ const uint32_t reg_idx)
+{
+ u32 reg_bank_offset;
+
+ assert(ID < N_STREAM2MMIO_ID);
+
+ reg_bank_offset = STREAM2MMIO_REGS_PER_SID * sid_id;
+ return ia_css_device_load_uint32(STREAM2MMIO_CTRL_BASE[ID] +
+ (reg_bank_offset + reg_idx) * sizeof(hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_reg_store(
+ const stream2mmio_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_STREAM2MMIO_ID);
+ assert(STREAM2MMIO_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(STREAM2MMIO_CTRL_BASE[ID] +
+ reg * sizeof(hrt_data), value);
+}
+
+/* end of DLI */
+
+#endif /* __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h
new file mode 100644
index 000000000000..24f4da9aef40
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h
@@ -0,0 +1,50 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_LOCAL_H_INCLUDED__
+#define __PIXELGEN_LOCAL_H_INCLUDED__
+
+#include "pixelgen_global.h"
+
+typedef struct pixelgen_ctrl_state_s pixelgen_ctrl_state_t;
+struct pixelgen_ctrl_state_s {
+ hrt_data com_enable;
+ hrt_data prbs_rstval0;
+ hrt_data prbs_rstval1;
+ hrt_data syng_sid;
+ hrt_data syng_free_run;
+ hrt_data syng_pause;
+ hrt_data syng_nof_frames;
+ hrt_data syng_nof_pixels;
+ hrt_data syng_nof_line;
+ hrt_data syng_hblank_cyc;
+ hrt_data syng_vblank_cyc;
+ hrt_data syng_stat_hcnt;
+ hrt_data syng_stat_vcnt;
+ hrt_data syng_stat_fcnt;
+ hrt_data syng_stat_done;
+ hrt_data tpg_mode;
+ hrt_data tpg_hcnt_mask;
+ hrt_data tpg_vcnt_mask;
+ hrt_data tpg_xycnt_mask;
+ hrt_data tpg_hcnt_delta;
+ hrt_data tpg_vcnt_delta;
+ hrt_data tpg_r1;
+ hrt_data tpg_g1;
+ hrt_data tpg_b1;
+ hrt_data tpg_r2;
+ hrt_data tpg_g2;
+ hrt_data tpg_b2;
+};
+#endif /* __PIXELGEN_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h
new file mode 100644
index 000000000000..65ea23604479
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h
@@ -0,0 +1,182 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_PRIVATE_H_INCLUDED__
+#define __PIXELGEN_PRIVATE_H_INCLUDED__
+#include "pixelgen_public.h"
+#include "PixelGen_SysBlock_defs.h"
+#include "device_access.h" /* ia_css_device_load_uint32 */
+#include "assert_support.h" /* assert */
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the pixelgen state.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_get_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state)
+{
+ state->com_enable =
+ pixelgen_ctrl_reg_load(ID, _PXG_COM_ENABLE_REG_IDX);
+ state->prbs_rstval0 =
+ pixelgen_ctrl_reg_load(ID, _PXG_PRBS_RSTVAL_REG0_IDX);
+ state->prbs_rstval1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_PRBS_RSTVAL_REG1_IDX);
+ state->syng_sid =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_SID_REG_IDX);
+ state->syng_free_run =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_FREE_RUN_REG_IDX);
+ state->syng_pause =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_PAUSE_REG_IDX);
+ state->syng_nof_frames =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_FRAME_REG_IDX);
+ state->syng_nof_pixels =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_PIXEL_REG_IDX);
+ state->syng_nof_line =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_LINE_REG_IDX);
+ state->syng_hblank_cyc =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_HBLANK_CYC_REG_IDX);
+ state->syng_vblank_cyc =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_VBLANK_CYC_REG_IDX);
+ state->syng_stat_hcnt =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_HCNT_REG_IDX);
+ state->syng_stat_vcnt =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_VCNT_REG_IDX);
+ state->syng_stat_fcnt =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_FCNT_REG_IDX);
+ state->syng_stat_done =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_DONE_REG_IDX);
+ state->tpg_mode =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_MODE_REG_IDX);
+ state->tpg_hcnt_mask =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_HCNT_MASK_REG_IDX);
+ state->tpg_vcnt_mask =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_VCNT_MASK_REG_IDX);
+ state->tpg_xycnt_mask =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_XYCNT_MASK_REG_IDX);
+ state->tpg_hcnt_delta =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_HCNT_DELTA_REG_IDX);
+ state->tpg_vcnt_delta =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_VCNT_DELTA_REG_IDX);
+ state->tpg_r1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_R1_REG_IDX);
+ state->tpg_g1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_G1_REG_IDX);
+ state->tpg_b1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_B1_REG_IDX);
+ state->tpg_r2 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_R2_REG_IDX);
+ state->tpg_g2 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_G2_REG_IDX);
+ state->tpg_b2 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_B2_REG_IDX);
+}
+
+/**
+ * @brief Dump the pixelgen state.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_dump_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state)
+{
+ ia_css_print("Pixel Generator ID %d Enable 0x%x\n", ID, state->com_enable);
+ ia_css_print("Pixel Generator ID %d PRBS reset vlue 0 0x%x\n", ID,
+ state->prbs_rstval0);
+ ia_css_print("Pixel Generator ID %d PRBS reset vlue 1 0x%x\n", ID,
+ state->prbs_rstval1);
+ ia_css_print("Pixel Generator ID %d SYNC SID 0x%x\n", ID, state->syng_sid);
+ ia_css_print("Pixel Generator ID %d syng free run 0x%x\n", ID,
+ state->syng_free_run);
+ ia_css_print("Pixel Generator ID %d syng pause 0x%x\n", ID, state->syng_pause);
+ ia_css_print("Pixel Generator ID %d syng no of frames 0x%x\n", ID,
+ state->syng_nof_frames);
+ ia_css_print("Pixel Generator ID %d syng no of pixels 0x%x\n", ID,
+ state->syng_nof_pixels);
+ ia_css_print("Pixel Generator ID %d syng no of line 0x%x\n", ID,
+ state->syng_nof_line);
+ ia_css_print("Pixel Generator ID %d syng hblank cyc 0x%x\n", ID,
+ state->syng_hblank_cyc);
+ ia_css_print("Pixel Generator ID %d syng vblank cyc 0x%x\n", ID,
+ state->syng_vblank_cyc);
+ ia_css_print("Pixel Generator ID %d syng stat hcnt 0x%x\n", ID,
+ state->syng_stat_hcnt);
+ ia_css_print("Pixel Generator ID %d syng stat vcnt 0x%x\n", ID,
+ state->syng_stat_vcnt);
+ ia_css_print("Pixel Generator ID %d syng stat fcnt 0x%x\n", ID,
+ state->syng_stat_fcnt);
+ ia_css_print("Pixel Generator ID %d syng stat done 0x%x\n", ID,
+ state->syng_stat_done);
+ ia_css_print("Pixel Generator ID %d tpg modee 0x%x\n", ID, state->tpg_mode);
+ ia_css_print("Pixel Generator ID %d tpg hcnt mask 0x%x\n", ID,
+ state->tpg_hcnt_mask);
+ ia_css_print("Pixel Generator ID %d tpg hcnt mask 0x%x\n", ID,
+ state->tpg_hcnt_mask);
+ ia_css_print("Pixel Generator ID %d tpg xycnt mask 0x%x\n", ID,
+ state->tpg_xycnt_mask);
+ ia_css_print("Pixel Generator ID %d tpg hcnt delta 0x%x\n", ID,
+ state->tpg_hcnt_delta);
+ ia_css_print("Pixel Generator ID %d tpg vcnt delta 0x%x\n", ID,
+ state->tpg_vcnt_delta);
+ ia_css_print("Pixel Generator ID %d tpg r1 0x%x\n", ID, state->tpg_r1);
+ ia_css_print("Pixel Generator ID %d tpg g1 0x%x\n", ID, state->tpg_g1);
+ ia_css_print("Pixel Generator ID %d tpg b1 0x%x\n", ID, state->tpg_b1);
+ ia_css_print("Pixel Generator ID %d tpg r2 0x%x\n", ID, state->tpg_r2);
+ ia_css_print("Pixel Generator ID %d tpg g2 0x%x\n", ID, state->tpg_g2);
+ ia_css_print("Pixel Generator ID %d tpg b2 0x%x\n", ID, state->tpg_b2);
+}
+
+/* end of NCI */
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C hrt_data pixelgen_ctrl_reg_load(
+ const pixelgen_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_PIXELGEN_ID);
+ assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(PIXELGEN_CTRL_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "pixelgen_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store(
+ const pixelgen_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_PIXELGEN_ID);
+ assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+}
+
+/* end of DLI */
+#endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h
new file mode 100644
index 000000000000..ce53ba4837ea
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h
@@ -0,0 +1,113 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _PixelGen_SysBlock_defs_h
+#define _PixelGen_SysBlock_defs_h
+
+/* Parematers and User_Parameters for HSS */
+#define _PXG_PPC Ppc
+#define _PXG_PIXEL_BITS PixelWidth
+#define _PXG_MAX_NOF_SID MaxNofSids
+#define _PXG_DATA_BITS DataWidth
+#define _PXG_CNT_BITS CntWidth
+#define _PXG_FIFODEPTH FifoDepth
+#define _PXG_DBG Dbg_device_not_included
+
+/* ID's and Address */
+#define _PXG_ADRRESS_ALIGN_REG 4
+
+#define _PXG_COM_ENABLE_REG_IDX 0
+#define _PXG_PRBS_RSTVAL_REG0_IDX 1
+#define _PXG_PRBS_RSTVAL_REG1_IDX 2
+#define _PXG_SYNG_SID_REG_IDX 3
+#define _PXG_SYNG_FREE_RUN_REG_IDX 4
+#define _PXG_SYNG_PAUSE_REG_IDX 5
+#define _PXG_SYNG_NOF_FRAME_REG_IDX 6
+#define _PXG_SYNG_NOF_PIXEL_REG_IDX 7
+#define _PXG_SYNG_NOF_LINE_REG_IDX 8
+#define _PXG_SYNG_HBLANK_CYC_REG_IDX 9
+#define _PXG_SYNG_VBLANK_CYC_REG_IDX 10
+#define _PXG_SYNG_STAT_HCNT_REG_IDX 11
+#define _PXG_SYNG_STAT_VCNT_REG_IDX 12
+#define _PXG_SYNG_STAT_FCNT_REG_IDX 13
+#define _PXG_SYNG_STAT_DONE_REG_IDX 14
+#define _PXG_TPG_MODE_REG_IDX 15
+#define _PXG_TPG_HCNT_MASK_REG_IDX 16
+#define _PXG_TPG_VCNT_MASK_REG_IDX 17
+#define _PXG_TPG_XYCNT_MASK_REG_IDX 18
+#define _PXG_TPG_HCNT_DELTA_REG_IDX 19
+#define _PXG_TPG_VCNT_DELTA_REG_IDX 20
+#define _PXG_TPG_R1_REG_IDX 21
+#define _PXG_TPG_G1_REG_IDX 22
+#define _PXG_TPG_B1_REG_IDX 23
+#define _PXG_TPG_R2_REG_IDX 24
+#define _PXG_TPG_G2_REG_IDX 25
+#define _PXG_TPG_B2_REG_IDX 26
+/* */
+#define _PXG_SYNG_PAUSE_CYCLES 0
+/* Subblock ID's */
+#define _PXG_DISABLE_IDX 0
+#define _PXG_PRBS_IDX 0
+#define _PXG_TPG_IDX 1
+#define _PXG_SYNG_IDX 2
+#define _PXG_SMUX_IDX 3
+/* Register Widths */
+#define _PXG_COM_ENABLE_REG_WIDTH 2
+#define _PXG_COM_SRST_REG_WIDTH 4
+#define _PXG_PRBS_RSTVAL_REG0_WIDTH 31
+#define _PXG_PRBS_RSTVAL_REG1_WIDTH 31
+
+#define _PXG_SYNG_SID_REG_WIDTH 3
+
+#define _PXG_SYNG_FREE_RUN_REG_WIDTH 1
+#define _PXG_SYNG_PAUSE_REG_WIDTH 1
+/*
+#define _PXG_SYNG_NOF_FRAME_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_NOF_PIXEL_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_NOF_LINE_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_HBLANK_CYC_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_VBLANK_CYC_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_STAT_HCNT_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_STAT_VCNT_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_STAT_FCNT_REG_WIDTH <sync_gen_cnt_width>
+*/
+#define _PXG_SYNG_STAT_DONE_REG_WIDTH 1
+#define _PXG_TPG_MODE_REG_WIDTH 2
+/*
+#define _PXG_TPG_HCNT_MASK_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_TPG_VCNT_MASK_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_TPG_XYCNT_MASK_REG_WIDTH <pixle_width>
+*/
+#define _PXG_TPG_HCNT_DELTA_REG_WIDTH 4
+#define _PXG_TPG_VCNT_DELTA_REG_WIDTH 4
+/*
+#define _PXG_TPG_R1_REG_WIDTH <pixle_width>
+#define _PXG_TPG_G1_REG_WIDTH <pixle_width>
+#define _PXG_TPG_B1_REG_WIDTH <pixle_width>
+#define _PXG_TPG_R2_REG_WIDTH <pixle_width>
+#define _PXG_TPG_G2_REG_WIDTH <pixle_width>
+#define _PXG_TPG_B2_REG_WIDTH <pixle_width>
+*/
+#define _PXG_FIFO_DEPTH 2
+/* MISC */
+#define _PXG_ENABLE_REG_VAL 1
+#define _PXG_PRBS_ENABLE_REG_VAL 1
+#define _PXG_TPG_ENABLE_REG_VAL 2
+#define _PXG_SYNG_ENABLE_REG_VAL 4
+#define _PXG_FIFO_ENABLE_REG_VAL 8
+#define _PXG_PXL_BITS 14
+#define _PXG_INVALID_FLAG 0xDEADBEEF
+#define _PXG_CAFE_FLAG 0xCAFEBABE
+
+#endif /* _PixelGen_SysBlock_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h
new file mode 100644
index 000000000000..5975b094a9d0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h
@@ -0,0 +1,134 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ibuf_cntrl_defs_h_
+#define _ibuf_cntrl_defs_h_
+
+#include <stream2mmio_defs.h>
+#include <dma_v2_defs.h>
+
+#define _IBUF_CNTRL_REG_ALIGN 4
+/* alignment of register banks, first bank are shared configuration and status registers: */
+#define _IBUF_CNTRL_PROC_REG_ALIGN 32
+
+/* the actual amount of configuration registers per proc: */
+#define _IBUF_CNTRL_CONFIG_REGS_PER_PROC 18
+/* the actual amount of shared configuration registers: */
+#define _IBUF_CNTRL_CONFIG_REGS_NO_PROC 0
+
+/* the actual amount of status registers per proc */
+#define _IBUF_CNTRL_STATUS_REGS_PER_PROC (_IBUF_CNTRL_CONFIG_REGS_PER_PROC + 10)
+/* the actual amount shared status registers */
+#define _IBUF_CNTRL_STATUS_REGS_NO_PROC (_IBUF_CNTRL_CONFIG_REGS_NO_PROC + 2)
+
+/* time out bits, maximum time out value is 2^_IBUF_CNTRL_TIME_OUT_BITS - 1 */
+#define _IBUF_CNTRL_TIME_OUT_BITS 5
+
+/* command token definition */
+#define _IBUF_CNTRL_CMD_TOKEN_LSB 0
+#define _IBUF_CNTRL_CMD_TOKEN_MSB 1
+
+/* Str2MMIO defines */
+#define _IBUF_CNTRL_STREAM2MMIO_CMD_TOKEN_MSB _STREAM2MMIO_CMD_TOKEN_CMD_MSB
+#define _IBUF_CNTRL_STREAM2MMIO_CMD_TOKEN_LSB _STREAM2MMIO_CMD_TOKEN_CMD_LSB
+#define _IBUF_CNTRL_STREAM2MMIO_NUM_ITEMS_BITS _STREAM2MMIO_PACK_NUM_ITEMS_BITS
+#define _IBUF_CNTRL_STREAM2MMIO_ACK_EOF_BIT _STREAM2MMIO_PACK_ACK_EOF_BIT
+#define _IBUF_CNTRL_STREAM2MMIO_ACK_TOKEN_VALID_BIT _STREAM2MMIO_ACK_TOKEN_VALID_BIT
+
+/* acknowledge token definition */
+#define _IBUF_CNTRL_ACK_TOKEN_STORES_IDX 0
+#define _IBUF_CNTRL_ACK_TOKEN_STORES_BITS 15
+#define _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX (_IBUF_CNTRL_ACK_TOKEN_STORES_BITS + _IBUF_CNTRL_ACK_TOKEN_STORES_IDX)
+#define _IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS _STREAM2MMIO_PACK_NUM_ITEMS_BITS
+#define _IBUF_CNTRL_ACK_TOKEN_LSB _IBUF_CNTRL_ACK_TOKEN_STORES_IDX
+#define _IBUF_CNTRL_ACK_TOKEN_MSB (_IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS + _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX - 1)
+/* bit 31 indicates a valid ack: */
+#define _IBUF_CNTRL_ACK_TOKEN_VALID_BIT (_IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS + _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX)
+
+/*shared registers:*/
+#define _IBUF_CNTRL_RECALC_WORDS_STATUS 0
+#define _IBUF_CNTRL_ARBITERS_STATUS 1
+
+#define _IBUF_CNTRL_SET_CRUN 2 /* NO PHYSICAL REGISTER!! Only used in HSS model */
+
+/*register addresses for each proc: */
+#define _IBUF_CNTRL_CMD 0
+#define _IBUF_CNTRL_ACK 1
+
+/* number of items (packets or words) per frame: */
+#define _IBUF_CNTRL_NUM_ITEMS_PER_STORE 2
+
+/* number of stores (packets or words) per store/buffer: */
+#define _IBUF_CNTRL_NUM_STORES_PER_FRAME 3
+
+/* the channel and command in the DMA */
+#define _IBUF_CNTRL_DMA_CHANNEL 4
+#define _IBUF_CNTRL_DMA_CMD 5
+
+/* the start address and stride of the buffers */
+#define _IBUF_CNTRL_BUFFER_START_ADDRESS 6
+#define _IBUF_CNTRL_BUFFER_STRIDE 7
+#define _IBUF_CNTRL_BUFFER_END_ADDRESS 8
+
+/* destination start address, stride and end address; should be the same as in the DMA */
+#define _IBUF_CNTRL_DEST_START_ADDRESS 9
+#define _IBUF_CNTRL_DEST_STRIDE 10
+#define _IBUF_CNTRL_DEST_END_ADDRESS 11
+
+/* send a frame sync or not, default 1 */
+#define _IBUF_CNTRL_SYNC_FRAME 12
+
+/* str2mmio cmds */
+#define _IBUF_CNTRL_STR2MMIO_SYNC_CMD 13
+#define _IBUF_CNTRL_STR2MMIO_STORE_CMD 14
+
+/* num elems p word*/
+#define _IBUF_CNTRL_SHIFT_ITEMS 15
+#define _IBUF_CNTRL_ELEMS_P_WORD_IBUF 16
+#define _IBUF_CNTRL_ELEMS_P_WORD_DEST 17
+
+/* STATUS */
+/* current frame and stores in buffer */
+#define _IBUF_CNTRL_CUR_STORES 18
+#define _IBUF_CNTRL_CUR_ACKS 19
+
+/* current buffer and destination address for DMA cmd's */
+#define _IBUF_CNTRL_CUR_S2M_IBUF_ADDR 20
+#define _IBUF_CNTRL_CUR_DMA_IBUF_ADDR 21
+#define _IBUF_CNTRL_CUR_DMA_DEST_ADDR 22
+#define _IBUF_CNTRL_CUR_ISP_DEST_ADDR 23
+
+#define _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND 24
+
+#define _IBUF_CNTRL_MAIN_CNTRL_STATE 25
+#define _IBUF_CNTRL_DMA_SYNC_STATE 26
+#define _IBUF_CNTRL_ISP_SYNC_STATE 27
+
+/*Commands: */
+#define _IBUF_CNTRL_CMD_STORE_FRAME_IDX 0
+#define _IBUF_CNTRL_CMD_ONLINE_IDX 1
+
+/* initialize, copy st_addr to cur_addr etc */
+#define _IBUF_CNTRL_CMD_INITIALIZE 0
+
+/* store an online frame (sync with ISP, use end cfg start, stride and end address: */
+#define _IBUF_CNTRL_CMD_STORE_ONLINE_FRAME ((1 << _IBUF_CNTRL_CMD_STORE_FRAME_IDX) | (1 << _IBUF_CNTRL_CMD_ONLINE_IDX))
+
+/* store an offline frame (don't sync with ISP, requires start address as 2nd token, no end address: */
+#define _IBUF_CNTRL_CMD_STORE_OFFLINE_FRAME BIT(_IBUF_CNTRL_CMD_STORE_FRAME_IDX)
+
+/* false command token, should be different then commands. Use online bit, not store frame: */
+#define _IBUF_CNTRL_FALSE_ACK 2
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h
new file mode 100644
index 000000000000..84fe95c16404
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h
@@ -0,0 +1,205 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_common_defs_h_
+#define _css_receiver_2400_common_defs_h_
+#ifndef _mipi_backend_common_defs_h_
+#define _mipi_backend_common_defs_h_
+
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH 16
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH 2
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH (_HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_WIDTH 32 /* use 32 to be compatibel with streaming monitor !, MSB's of interface are tied to '0' */
+
+/* Definition of data format ID at the interface CSS_receiver capture/acquisition units */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8 24 /* 01 1000 YUV420 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10 25 /* 01 1001 YUV420 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8L 26 /* 01 1010 YUV420 8-bit legacy */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_8 30 /* 01 1110 YUV422 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_10 31 /* 01 1111 YUV422 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB444 32 /* 10 0000 RGB444 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB555 33 /* 10 0001 RGB555 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB565 34 /* 10 0010 RGB565 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB666 35 /* 10 0011 RGB666 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB888 36 /* 10 0100 RGB888 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW6 40 /* 10 1000 RAW6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW7 41 /* 10 1001 RAW7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW8 42 /* 10 1010 RAW8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW10 43 /* 10 1011 RAW10 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW12 44 /* 10 1100 RAW12 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW14 45 /* 10 1101 RAW14 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_1 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_2 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_3 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_4 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_5 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_6 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_7 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_8 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_Emb 18 /* 01 0010 embedded eight bit non image data */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH1 8 /* 00 1000 Generic Short Packet Code 1 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH2 9 /* 00 1001 Generic Short Packet Code 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH3 10 /* 00 1010 Generic Short Packet Code 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH4 11 /* 00 1011 Generic Short Packet Code 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH5 12 /* 00 1100 Generic Short Packet Code 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH6 13 /* 00 1101 Generic Short Packet Code 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH7 14 /* 00 1110 Generic Short Packet Code 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH8 15 /* 00 1111 Generic Short Packet Code 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8_CSPS 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10_CSPS 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+/* used reserved mipi positions for these */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW16 46
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18 47
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_2 37
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_3 38
+
+//_HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 63
+#define _HRT_MIPI_BACKEND_FMT_TYPE_CUSTOM 63
+
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_WIDTH 6
+
+/* Definition of format_types at the interface CSS --> input_selector*/
+/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding
+
+/* definition for state machine of data FIFO for decode different type of data */
+#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9
+#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7
+#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7
+
+#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */
+
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6
+
+/* packet bit definition */
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "mipi_backend_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+/*
+#define BE_CUST_EN_IDX 0 // 2bits
+#define BE_CUST_EN_DATAID_IDX 2 // 6bits MIPI DATA ID
+#define BE_CUST_EN_WIDTH 8
+#define BE_CUST_MODE_ALL 1 // Enable Custom Decoding for all DATA IDs
+#define BE_CUST_MODE_ONE 3 // Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID
+
+// Data State config = {get_bits(6bits), valid(1bit)} //
+#define BE_CUST_DATA_STATE_S0_IDX 0 // 7bits
+#define BE_CUST_DATA_STATE_S1_IDX 8 //7 // 7bits
+#define BE_CUST_DATA_STATE_S2_IDX 16//14 // 7bits /
+#define BE_CUST_DATA_STATE_WIDTH 24//21
+#define BE_CUST_DATA_STATE_VALID_IDX 0 // 1bits
+#define BE_CUST_DATA_STATE_GETBITS_IDX 1 // 6bits
+
+// Pixel Extractor config
+#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 // 6bits
+#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 6//5 // 5bits
+#define BE_CUST_PIX_EXT_PIX_MASK_IDX 11//10 // 18bits
+#define BE_CUST_PIX_EXT_PIX_EN_IDX 29 //28 // 1bits
+
+#define BE_CUST_PIX_EXT_WIDTH 30//29
+
+// Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)}
+#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 // 4bits
+#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 // 4bits
+#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 // 4bits
+#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 // 4bits
+#define BE_CUST_PIX_VALID_EOP_WIDTH 16
+#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 // Normal (NO less get_bits case) Valid - 1bits
+#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 // Normal (NO less get_bits case) EoP - 1bits
+#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 // Especial (less get_bits case) Valid - 1bits
+#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 // Especial (less get_bits case) EoP - 1bits
+
+*/
+
+#endif /* _mipi_backend_common_defs_h_ */
+#endif /* _css_receiver_2400_common_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h
new file mode 100644
index 000000000000..45f20b524368
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h
@@ -0,0 +1,208 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _mipi_backend_defs_h
+#define _mipi_backend_defs_h
+
+#include "mipi_backend_common_defs.h"
+
+#define MIPI_BACKEND_REG_ALIGN 4 // assuming 32 bit control bus width
+
+#define _HRT_MIPI_BACKEND_NOF_IRQS 3 // sid_lut
+
+// SH Backend Register IDs
+#define _HRT_MIPI_BACKEND_ENABLE_REG_IDX 0
+#define _HRT_MIPI_BACKEND_STATUS_REG_IDX 1
+//#define _HRT_MIPI_BACKEND_HIGH_PREC_REG_IDX 2
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG0_IDX 2
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG1_IDX 3
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG2_IDX 4
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG3_IDX 5
+#define _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_IDX 6
+#define _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_IDX 7
+#define _HRT_MIPI_BACKEND_FORCE_RAW8_REG_IDX 8
+#define _HRT_MIPI_BACKEND_IRQ_STATUS_REG_IDX 9
+#define _HRT_MIPI_BACKEND_IRQ_CLEAR_REG_IDX 10
+////
+#define _HRT_MIPI_BACKEND_CUST_EN_REG_IDX 11
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_REG_IDX 12
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P0_REG_IDX 13
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P1_REG_IDX 14
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P2_REG_IDX 15
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P3_REG_IDX 16
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P0_REG_IDX 17
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P1_REG_IDX 18
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P2_REG_IDX 19
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P3_REG_IDX 20
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P0_REG_IDX 21
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P1_REG_IDX 22
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P2_REG_IDX 23
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P3_REG_IDX 24
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_REG_IDX 25
+////
+#define _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_IDX 26
+#define _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_IDX 27
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENABLE_REG_IDX 28
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_IDX 28
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_1_REG_IDX 29
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_2_REG_IDX 30
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_3_REG_IDX 31
+
+#define _HRT_MIPI_BACKEND_NOF_REGISTERS 32 // excluding the LP LUT entries
+
+#define _HRT_MIPI_BACKEND_LP_LUT_ENTRY_0_REG_IDX 32
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+#define _HRT_MIPI_BACKEND_ENABLE_REG_WIDTH 1
+#define _HRT_MIPI_BACKEND_STATUS_REG_WIDTH 1
+//#define _HRT_MIPI_BACKEND_HIGH_PREC_REG_WIDTH 1
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG_WIDTH 32
+#define _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_WIDTH 7
+#define _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_WIDTH 9
+#define _HRT_MIPI_BACKEND_FORCE_RAW8_REG_WIDTH 8
+#define _HRT_MIPI_BACKEND_IRQ_STATUS_REG_WIDTH _HRT_MIPI_BACKEND_NOF_IRQS
+#define _HRT_MIPI_BACKEND_IRQ_CLEAR_REG_WIDTH 0
+#define _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_WIDTH 1
+#define _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_WIDTH 1 + 2 + 6
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENABLE_REG_WIDTH 1
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_WIDTH 7
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_1_REG_WIDTH 7
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_2_REG_WIDTH 7
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_3_REG_WIDTH 7
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define _HRT_MIPI_BACKEND_NOF_SP_LUT_ENTRIES 4
+
+//#define _HRT_MIPI_BACKEND_MAX_NOF_LP_LUT_ENTRIES 16 // to satisfy hss model static array declaration
+
+#define _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH 2
+#define _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH 6
+#define _HRT_MIPI_BACKEND_PACKET_ID_WIDTH _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH + _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH
+
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_LSB 0
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_MSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_LSB + (pix_width) - 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_VAL_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_MSB(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_LSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_VAL_BIT(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_MSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_LSB(pix_width) + (pix_width) - 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_VAL_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_MSB(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_SOP_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_VAL_BIT(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_EOP_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_SOP_BIT(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_WIDTH(pix_width) (_HRT_MIPI_BACKEND_STREAMING_EOP_BIT(pix_width) + 1)
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "mipi_backend_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+#define _HRT_MIPI_BACKEND_CUST_EN_IDX 0 /* 2bits */
+#define _HRT_MIPI_BACKEND_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */
+#define _HRT_MIPI_BACKEND_CUST_EN_HIGH_PREC_IDX 8 // 1 bit
+#define _HRT_MIPI_BACKEND_CUST_EN_WIDTH 9
+#define _HRT_MIPI_BACKEND_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */
+#define _HRT_MIPI_BACKEND_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */
+
+#define _HRT_MIPI_BACKEND_CUST_EN_OPTION_IDX 1
+
+/* Data State config = {get_bits(6bits), valid(1bit)} */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S0_IDX 0 /* 7bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S1_IDX 8 /* 7bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S2_IDX 16 /* was 14 7bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_WIDTH 24 /* was 21*/
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */
+
+/* Pixel Extractor config */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 6bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_ALIGN_IDX 6 /* 5bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_MASK_IDX 11 /* was 10 18bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_EN_IDX 29 /* was 28 1bits */
+
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_WIDTH 30 /* was 29 */
+
+/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_WIDTH 16
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */
+
+/*************************************************************************************************/
+/* MIPI backend output streaming interface definition */
+/* These parameters define the fields within the streaming bus. These should also be used by the */
+/* subsequent block, ie stream2mmio. */
+/*************************************************************************************************/
+/* The pipe backend - stream2mmio should be design time configurable in */
+/* PixWidth - Number of bits per pixel */
+/* PPC - Pixel per Clocks */
+/* NumSids - Max number of source Ids (ifc's) and derived from that: */
+/* SidWidth - Number of bits required for the sid parameter */
+/* In order to keep this configurability, below Macro's have these as a parameter */
+/*************************************************************************************************/
+
+#define HRT_MIPI_BACKEND_STREAM_EOP_BIT 0
+#define HRT_MIPI_BACKEND_STREAM_SOP_BIT 1
+#define HRT_MIPI_BACKEND_STREAM_EOF_BIT 2
+#define HRT_MIPI_BACKEND_STREAM_SOF_BIT 3
+#define HRT_MIPI_BACKEND_STREAM_CHID_LS_BIT 4
+#define HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT(sid_width) (HRT_MIPI_BACKEND_STREAM_CHID_LS_BIT + (sid_width) - 1)
+#define HRT_MIPI_BACKEND_STREAM_PIX_VAL_BIT(sid_width, p) (HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT(sid_width) + 1 + p)
+
+#define HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width, ppc, pix_width, p) (HRT_MIPI_BACKEND_STREAM_PIX_VAL_BIT(sid_width, ppc) + ((pix_width) * p))
+#define HRT_MIPI_BACKEND_STREAM_PIX_MS_BIT(sid_width, ppc, pix_width, p) (HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width, ppc, pix_width, p) + (pix_width) - 1)
+
+#if 0
+//#define HRT_MIPI_BACKEND_STREAM_PIX_BITS 14
+//#define HRT_MIPI_BACKEND_STREAM_CHID_BITS 4
+//#define HRT_MIPI_BACKEND_STREAM_PPC 4
+#endif
+
+#define HRT_MIPI_BACKEND_STREAM_BITS(sid_width, ppc, pix_width) (HRT_MIPI_BACKEND_STREAM_PIX_MS_BIT(sid_width, ppc, pix_width, (ppc - 1)) + 1)
+
+/* SP and LP LUT BIT POSITIONS */
+#define HRT_MIPI_BACKEND_LUT_PKT_DISREGARD_BIT 0 // 0
+#define HRT_MIPI_BACKEND_LUT_SID_LS_BIT HRT_MIPI_BACKEND_LUT_PKT_DISREGARD_BIT + 1 // 1
+#define HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) (HRT_MIPI_BACKEND_LUT_SID_LS_BIT + (sid_width) - 1) // 1 + (4) - 1 = 4
+#define HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_LS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) + 1 // 5
+#define HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_LS_BIT(sid_width) + _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH - 1 // 6
+#define HRT_MIPI_BACKEND_LUT_MIPI_FMT_LS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) + 1 // 7
+#define HRT_MIPI_BACKEND_LUT_MIPI_FMT_MS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_FMT_LS_BIT(sid_width) + _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH - 1 // 12
+
+/* #define HRT_MIPI_BACKEND_SP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) + 1 // 7 */
+
+#define HRT_MIPI_BACKEND_SP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) + 1
+#define HRT_MIPI_BACKEND_LP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_FMT_MS_BIT(sid_width) + 1 // 13
+
+// temp solution
+//#define HRT_MIPI_BACKEND_STREAM_PIXA_VAL_BIT HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT + 1 // 8
+//#define HRT_MIPI_BACKEND_STREAM_PIXB_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXA_VAL_BIT + 1 // 9
+//#define HRT_MIPI_BACKEND_STREAM_PIXC_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXB_VAL_BIT + 1 // 10
+//#define HRT_MIPI_BACKEND_STREAM_PIXD_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXC_VAL_BIT + 1 // 11
+//#define HRT_MIPI_BACKEND_STREAM_PIXA_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXD_VAL_BIT + 1 // 12
+//#define HRT_MIPI_BACKEND_STREAM_PIXA_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXA_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 25
+//#define HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXA_MS_BIT + 1 // 26
+//#define HRT_MIPI_BACKEND_STREAM_PIXB_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 39
+//#define HRT_MIPI_BACKEND_STREAM_PIXC_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXB_MS_BIT + 1 // 40
+//#define HRT_MIPI_BACKEND_STREAM_PIXC_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXC_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 53
+//#define HRT_MIPI_BACKEND_STREAM_PIXD_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXC_MS_BIT + 1 // 54
+//#define HRT_MIPI_BACKEND_STREAM_PIXD_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXD_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 67
+
+// vc hidden in pixb data (passed as raw12 the pipe)
+#define HRT_MIPI_BACKEND_STREAM_VC_LS_BIT(sid_width, ppc, pix_width) HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width, ppc, pix_width, 1) + 10 //HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT + 10 // 36
+#define HRT_MIPI_BACKEND_STREAM_VC_MS_BIT(sid_width, ppc, pix_width) HRT_MIPI_BACKEND_STREAM_VC_LS_BIT(sid_width, ppc, pix_width) + 1 // 37
+
+#endif /* _mipi_backend_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h
new file mode 100644
index 000000000000..a8d0dbd7f6d7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h
@@ -0,0 +1,169 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _csi_rx_defs_h
+#define _csi_rx_defs_h
+
+//#include "rx_csi_common_defs.h"
+
+#define MIPI_PKT_DATA_WIDTH 32
+//#define CLK_CROSSING_FIFO_DEPTH 16
+#define _CSI_RX_REG_ALIGN 4
+
+//define number of IRQ (see below for definition of each IRQ bits)
+#define CSI_RX_NOF_IRQS_BYTE_DOMAIN 11
+#define CSI_RX_NOF_IRQS_ISP_DOMAIN 15 // CSI_RX_NOF_IRQS_BYTE_DOMAIN + remaining from Dphy_rx already on ISP clock domain
+
+// REGISTER DESCRIPTION
+//#define _HRT_CSI_RX_SOFTRESET_REG_IDX 0
+#define _HRT_CSI_RX_ENABLE_REG_IDX 0
+#define _HRT_CSI_RX_NOF_ENABLED_LANES_REG_IDX 1
+#define _HRT_CSI_RX_ERROR_HANDLING_REG_IDX 2
+#define _HRT_CSI_RX_STATUS_REG_IDX 3
+#define _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX 4
+#define _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX 5
+//#define _HRT_CSI_RX_IRQ_CONFIG_REG_IDX 6
+#define _HRT_CSI_RX_DLY_CNT_TERMEN_CLANE_REG_IDX 6
+#define _HRT_CSI_RX_DLY_CNT_SETTLE_CLANE_REG_IDX 7
+#define _HRT_CSI_RX_DLY_CNT_TERMEN_DLANE_REG_IDX(lane_idx) (8 + (2 * lane_idx))
+#define _HRT_CSI_RX_DLY_CNT_SETTLE_DLANE_REG_IDX(lane_idx) (8 + (2 * lane_idx) + 1)
+
+#define _HRT_CSI_RX_NOF_REGISTERS(nof_dlanes) (8 + 2 * (nof_dlanes))
+
+//#define _HRT_CSI_RX_SOFTRESET_REG_WIDTH 1
+#define _HRT_CSI_RX_ENABLE_REG_WIDTH 1
+#define _HRT_CSI_RX_NOF_ENABLED_LANES_REG_WIDTH 3
+#define _HRT_CSI_RX_ERROR_HANDLING_REG_WIDTH 4
+#define _HRT_CSI_RX_STATUS_REG_WIDTH 1
+#define _HRT_CSI_RX_STATUS_DLANE_HS_REG_WIDTH 8
+#define _HRT_CSI_RX_STATUS_DLANE_LP_REG_WIDTH 24
+#define _HRT_CSI_RX_IRQ_CONFIG_REG_WIDTH (CSI_RX_NOF_IRQS_ISP_DOMAIN)
+#define _HRT_CSI_RX_DLY_CNT_REG_WIDTH 24
+//#define _HRT_CSI_RX_IRQ_STATUS_REG_WIDTH NOF_IRQS
+//#define _HRT_CSI_RX_IRQ_CLEAR_REG_WIDTH 0
+
+#define ONE_LANE_ENABLED 0
+#define TWO_LANES_ENABLED 1
+#define THREE_LANES_ENABLED 2
+#define FOUR_LANES_ENABLED 3
+
+// Error handling reg bit positions
+#define ERR_DECISION_BIT 0
+#define DISC_RESERVED_SP_BIT 1
+#define DISC_RESERVED_LP_BIT 2
+#define DIS_INCOMP_PKT_CHK_BIT 3
+
+#define _HRT_CSI_RX_IRQ_CONFIG_REG_VAL_POSEDGE 0
+#define _HRT_CSI_RX_IRQ_CONFIG_REG_VAL_ORIGINAL 1
+
+// Interrupt bits
+#define _HRT_RX_CSI_IRQ_SINGLE_PH_ERROR_CORRECTED 0
+#define _HRT_RX_CSI_IRQ_MULTIPLE_PH_ERROR_DETECTED 1
+#define _HRT_RX_CSI_IRQ_PAYLOAD_CHECKSUM_ERROR 2
+#define _HRT_RX_CSI_IRQ_FIFO_FULL_ERROR 3
+#define _HRT_RX_CSI_IRQ_RESERVED_SP_DETECTED 4
+#define _HRT_RX_CSI_IRQ_RESERVED_LP_DETECTED 5
+//#define _HRT_RX_CSI_IRQ_PREMATURE_SOP 6
+#define _HRT_RX_CSI_IRQ_INCOMPLETE_PACKET 6
+#define _HRT_RX_CSI_IRQ_FRAME_SYNC_ERROR 7
+#define _HRT_RX_CSI_IRQ_LINE_SYNC_ERROR 8
+#define _HRT_RX_CSI_IRQ_DLANE_HS_SOT_ERROR 9
+#define _HRT_RX_CSI_IRQ_DLANE_HS_SOT_SYNC_ERROR 10
+
+#define _HRT_RX_CSI_IRQ_DLANE_ESC_ERROR 11
+#define _HRT_RX_CSI_IRQ_DLANE_TRIGGERESC 12
+#define _HRT_RX_CSI_IRQ_DLANE_ULPSESC 13
+#define _HRT_RX_CSI_IRQ_CLANE_ULPSCLKNOT 14
+
+/* OLD ARASAN FRONTEND IRQs
+#define _HRT_RX_CSI_IRQ_OVERRUN_BIT 0
+#define _HRT_RX_CSI_IRQ_RESERVED_BIT 1
+#define _HRT_RX_CSI_IRQ_SLEEP_MODE_ENTRY_BIT 2
+#define _HRT_RX_CSI_IRQ_SLEEP_MODE_EXIT_BIT 3
+#define _HRT_RX_CSI_IRQ_ERR_SOT_HS_BIT 4
+#define _HRT_RX_CSI_IRQ_ERR_SOT_SYNC_HS_BIT 5
+#define _HRT_RX_CSI_IRQ_ERR_CONTROL_BIT 6
+#define _HRT_RX_CSI_IRQ_ERR_ECC_DOUBLE_BIT 7
+#define _HRT_RX_CSI_IRQ_ERR_ECC_CORRECTED_BIT 8
+#define _HRT_RX_CSI_IRQ_ERR_ECC_NO_CORRECTION_BIT 9
+#define _HRT_RX_CSI_IRQ_ERR_CRC_BIT 10
+#define _HRT_RX_CSI_IRQ_ERR_ID_BIT 11
+#define _HRT_RX_CSI_IRQ_ERR_FRAME_SYNC_BIT 12
+#define _HRT_RX_CSI_IRQ_ERR_FRAME_DATA_BIT 13
+#define _HRT_RX_CSI_IRQ_DATA_TIMEOUT_BIT 14
+#define _HRT_RX_CSI_IRQ_ERR_ESCAPE_BIT 15
+#define _HRT_RX_CSI_IRQ_ERR_LINE_SYNC_BIT 16
+*/
+
+////Bit Description for reg _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE0 0
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE1 1
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE2 2
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE3 3
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE0 4
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE1 5
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE2 6
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE3 7
+
+////Bit Description for reg _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE0 0
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE1 1
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE2 2
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE3 3
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE0 4
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE0 5
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE0 6
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE0 7
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE1 8
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE1 9
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE1 10
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE1 11
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE2 12
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE2 13
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE2 14
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE2 15
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE3 16
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE3 17
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE3 18
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE3 19
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE0 20
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE1 21
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE2 22
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE3 23
+
+/*********************************************************/
+/*** Relevant declarations from rx_csi_common_defs.h *****/
+/*********************************************************/
+/* packet bit definition */
+#define _HRT_RX_CSI_PKT_SOP_BITPOS 32
+#define _HRT_RX_CSI_PKT_EOP_BITPOS 33
+#define _HRT_RX_CSI_PKT_PAYLOAD_BITPOS 0
+#define _HRT_RX_CSI_PH_CH_ID_BITPOS 22
+#define _HRT_RX_CSI_PH_FMT_ID_BITPOS 16
+#define _HRT_RX_CSI_PH_DATA_FIELD_BITPOS 0
+
+#define _HRT_RX_CSI_PKT_SOP_BITS 1
+#define _HRT_RX_CSI_PKT_EOP_BITS 1
+#define _HRT_RX_CSI_PKT_PAYLOAD_BITS 32
+#define _HRT_RX_CSI_PH_CH_ID_BITS 2
+#define _HRT_RX_CSI_PH_FMT_ID_BITS 6
+#define _HRT_RX_CSI_PH_DATA_FIELD_BITS 16
+
+/* Definition of data format ID at the interface CSS_receiver units */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+
+#endif /* _csi_rx_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h
new file mode 100644
index 000000000000..a3940d246890
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h
@@ -0,0 +1,68 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _STREAM2MMMIO_DEFS_H
+#define _STREAM2MMMIO_DEFS_H
+
+#include <mipi_backend_defs.h>
+
+#define _STREAM2MMIO_REG_ALIGN 4
+
+#define _STREAM2MMIO_COMMAND_REG_ID 0
+#define _STREAM2MMIO_ACKNOWLEDGE_REG_ID 1
+#define _STREAM2MMIO_PIX_WIDTH_ID_REG_ID 2
+#define _STREAM2MMIO_START_ADDR_REG_ID 3 /* master port address,NOT Byte */
+#define _STREAM2MMIO_END_ADDR_REG_ID 4 /* master port address,NOT Byte */
+#define _STREAM2MMIO_STRIDE_REG_ID 5 /* stride in master port words, increment is per packet for long sids, stride is not used for short sid's*/
+#define _STREAM2MMIO_NUM_ITEMS_REG_ID 6 /* number of packets for store packets cmd, number of words for store_words cmd */
+#define _STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID 7 /* if this register is 1, input will be stalled if there is no pending command for this sid */
+#define _STREAM2MMIO_REGS_PER_SID 8
+
+#define _STREAM2MMIO_SID_REG_OFFSET 8
+#define _STREAM2MMIO_MAX_NOF_SIDS 64 /* value used in hss model */
+
+/* command token definition */
+#define _STREAM2MMIO_CMD_TOKEN_CMD_LSB 0 /* bits 1-0 is for the command field */
+#define _STREAM2MMIO_CMD_TOKEN_CMD_MSB 1
+
+#define _STREAM2MMIO_CMD_TOKEN_WIDTH (_STREAM2MMIO_CMD_TOKEN_CMD_MSB + 1 - _STREAM2MMIO_CMD_TOKEN_CMD_LSB)
+
+#define _STREAM2MMIO_CMD_TOKEN_STORE_WORDS 0 /* command for storing a number of output words indicated by reg _STREAM2MMIO_NUM_ITEMS */
+#define _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS 1 /* command for storing a number of packets indicated by reg _STREAM2MMIO_NUM_ITEMS */
+#define _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME 2 /* command for waiting for a frame start */
+
+/* acknowledges from packer module */
+/* fields: eof - indicates whether last (short) packet received was an eof packet */
+/* eop - indicates whether command has ended due to packet end or due to no of words requested has been received */
+/* count - indicates number of words stored */
+#define _STREAM2MMIO_PACK_NUM_ITEMS_BITS 16
+#define _STREAM2MMIO_PACK_ACK_EOP_BIT _STREAM2MMIO_PACK_NUM_ITEMS_BITS
+#define _STREAM2MMIO_PACK_ACK_EOF_BIT (_STREAM2MMIO_PACK_ACK_EOP_BIT + 1)
+
+/* acknowledge token definition */
+#define _STREAM2MMIO_ACK_TOKEN_NUM_ITEMS_LSB 0 /* bits 3-0 is for the command field */
+#define _STREAM2MMIO_ACK_TOKEN_NUM_ITEMS_MSB (_STREAM2MMIO_PACK_NUM_ITEMS_BITS - 1)
+#define _STREAM2MMIO_ACK_TOKEN_EOP_BIT _STREAM2MMIO_PACK_ACK_EOP_BIT
+#define _STREAM2MMIO_ACK_TOKEN_EOF_BIT _STREAM2MMIO_PACK_ACK_EOF_BIT
+#define _STREAM2MMIO_ACK_TOKEN_VALID_BIT (_STREAM2MMIO_ACK_TOKEN_EOF_BIT + 1) /* this bit indicates a valid ack */
+/* if there is no valid ack, a read */
+/* on the ack register returns 0 */
+#define _STREAM2MMIO_ACK_TOKEN_WIDTH (_STREAM2MMIO_ACK_TOKEN_VALID_BIT + 1)
+
+/* commands for packer module */
+#define _STREAM2MMIO_PACK_CMD_STORE_WORDS 0
+#define _STREAM2MMIO_PACK_CMD_STORE_LONG_PACKET 1
+#define _STREAM2MMIO_PACK_CMD_STORE_SHORT_PACKET 2
+
+#endif /* _STREAM2MMIO_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h
new file mode 100644
index 000000000000..dc8d091c6769
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_GLOBAL_H_INCLUDED__
+#define __IBUF_CTRL_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#include <ibuf_cntrl_defs.h> /* _IBUF_CNTRL_RECALC_WORDS_STATUS,
+ * _IBUF_CNTRL_ARBITERS_STATUS,
+ * _IBUF_CNTRL_PROC_REG_ALIGN,
+ * etc.
+ */
+
+/* Definition of contents of main controller state register is lacking
+ * in ibuf_cntrl_defs.h, so define these here:
+ */
+#define _IBUF_CNTRL_MAIN_CNTRL_FSM_MASK 0xf
+#define _IBUF_CNTRL_MAIN_CNTRL_FSM_NEXT_COMMAND_CHECK 0x9
+#define _IBUF_CNTRL_MAIN_CNTRL_MEM_INP_BUF_ALLOC BIT(8)
+#define _IBUF_CNTRL_DMA_SYNC_WAIT_FOR_SYNC 1
+#define _IBUF_CNTRL_DMA_SYNC_FSM_WAIT_FOR_ACK (0x3 << 1)
+
+typedef struct ib_buffer_s ib_buffer_t;
+struct ib_buffer_s {
+ u32 start_addr; /* start address of the buffer in the
+ * "input-buffer hardware block"
+ */
+
+ u32 stride; /* stride per buffer line (in bytes) */
+ u32 lines; /* lines in the buffer */
+};
+
+typedef struct ibuf_ctrl_cfg_s ibuf_ctrl_cfg_t;
+struct ibuf_ctrl_cfg_s {
+ bool online;
+
+ struct {
+ /* DMA configuration */
+ u32 channel;
+ u32 cmd; /* must be _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND */
+
+ /* DMA reconfiguration */
+ u32 shift_returned_items;
+ u32 elems_per_word_in_ibuf;
+ u32 elems_per_word_in_dest;
+ } dma_cfg;
+
+ ib_buffer_t ib_buffer;
+
+ struct {
+ u32 stride;
+ u32 start_addr;
+ u32 lines;
+ } dest_buf_cfg;
+
+ u32 items_per_store;
+ u32 stores_per_frame;
+
+ struct {
+ u32 sync_cmd; /* must be _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME */
+ u32 store_cmd; /* must be _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS */
+ } stream2mmio_cfg;
+};
+
+extern const u32 N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID];
+
+#endif /* __IBUF_CTRL_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h
new file mode 100644
index 000000000000..2ca4d5210a38
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h
@@ -0,0 +1,89 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_GLOBAL_H_INCLUDED__
+#define __ISYS_DMA_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define HIVE_ISYS2401_DMA_IBUF_DDR_CONN 0
+#define HIVE_ISYS2401_DMA_IBUF_VMEM_CONN 1
+#define _DMA_V2_ZERO_EXTEND 0
+#define _DMA_V2_SIGN_EXTEND 1
+
+#define _DMA_ZERO_EXTEND _DMA_V2_ZERO_EXTEND
+#define _DMA_SIGN_EXTEND _DMA_V2_SIGN_EXTEND
+
+/********************************************************
+ *
+ * DMA Port.
+ *
+ * The DMA port definition for the input system
+ * 2401 DMA is the duplication of the DMA port
+ * definition for the CSS system DMA. It is duplicated
+ * here just as the temporal step before the device library
+ * is available. The device library is suppose to provide
+ * the capability of reusing the control interface of the
+ * same device prototypes. The refactor team will work on
+ * this, right?
+ *
+ ********************************************************/
+typedef struct isys2401_dma_port_cfg_s isys2401_dma_port_cfg_t;
+struct isys2401_dma_port_cfg_s {
+ u32 stride;
+ u32 elements;
+ u32 cropping;
+ u32 width;
+};
+/* end of DMA Port */
+
+/************************************************
+ *
+ * DMA Device.
+ *
+ * The DMA device definition for the input system
+ * 2401 DMA is the duplicattion of the DMA device
+ * definition for the CSS system DMA. It is duplicated
+ * here just as the temporal step before the device library
+ * is available. The device library is suppose to provide
+ * the capability of reusing the control interface of the
+ * same device prototypes. The refactor team will work on
+ * this, right?
+ *
+ ************************************************/
+typedef enum {
+ isys2401_dma_ibuf_to_ddr_connection = HIVE_ISYS2401_DMA_IBUF_DDR_CONN,
+ isys2401_dma_ibuf_to_vmem_connection = HIVE_ISYS2401_DMA_IBUF_VMEM_CONN
+} isys2401_dma_connection;
+
+typedef enum {
+ isys2401_dma_zero_extension = _DMA_ZERO_EXTEND,
+ isys2401_dma_sign_extension = _DMA_SIGN_EXTEND
+} isys2401_dma_extension;
+
+typedef struct isys2401_dma_cfg_s isys2401_dma_cfg_t;
+struct isys2401_dma_cfg_s {
+ isys2401_dma_channel channel;
+ isys2401_dma_connection connection;
+ isys2401_dma_extension extension;
+ u32 height;
+};
+
+/* end of DMA Device */
+
+/* isys2401_dma_channel limits per DMA ID */
+extern const isys2401_dma_channel
+N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID];
+
+#endif /* __ISYS_DMA_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
new file mode 100644
index 000000000000..41d051db3987
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_IRQ_GLOBAL_H__
+#define __ISYS_IRQ_GLOBAL_H__
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+/* Register offset/index from base location */
+#define ISYS_IRQ_EDGE_REG_IDX (0)
+#define ISYS_IRQ_MASK_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 1)
+#define ISYS_IRQ_STATUS_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 2)
+#define ISYS_IRQ_CLEAR_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 3)
+#define ISYS_IRQ_ENABLE_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 4)
+#define ISYS_IRQ_LEVEL_NO_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 5)
+
+/* Register values */
+#define ISYS_IRQ_MASK_REG_VALUE (0xFFFF)
+#define ISYS_IRQ_CLEAR_REG_VALUE (0xFFFF)
+#define ISYS_IRQ_ENABLE_REG_VALUE (0xFFFF)
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __ISYS_IRQ_GLOBAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h
new file mode 100644
index 000000000000..bcb46b293b6a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+typedef struct stream2mmio_cfg_s stream2mmio_cfg_t;
+struct stream2mmio_cfg_s {
+ u32 bits_per_pixel;
+ u32 enable_blocking;
+};
+
+/* Stream2MMIO limits per ID*/
+/*
+ * Stream2MMIO 0 has 8 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
+ *
+ * Stream2MMIO 1 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
+ *
+ * Stream2MMIO 2 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
+ */
+extern const stream2mmio_sid_ID_t N_STREAM2MMIO_SID_PROCS[N_STREAM2MMIO_ID];
+
+#endif /* __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h
new file mode 100644
index 000000000000..cde599c5d0d2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h
@@ -0,0 +1,90 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_GLOBAL_H_INCLUDED__
+#define __PIXELGEN_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+/**
+ * Pixel-generator. ("pixelgen_global.h")
+ */
+/*
+ * Duplicates "sync_generator_cfg_t" in "input_system_global.h".
+ */
+typedef struct sync_generator_cfg_s sync_generator_cfg_t;
+struct sync_generator_cfg_s {
+ u32 hblank_cycles;
+ u32 vblank_cycles;
+ u32 pixels_per_clock;
+ u32 nr_of_frames;
+ u32 pixels_per_line;
+ u32 lines_per_frame;
+};
+
+typedef enum {
+ PIXELGEN_TPG_MODE_RAMP = 0,
+ PIXELGEN_TPG_MODE_CHBO,
+ PIXELGEN_TPG_MODE_MONO,
+ N_PIXELGEN_TPG_MODE
+} pixelgen_tpg_mode_t;
+
+/*
+ * "pixelgen_tpg_cfg_t" duplicates parts of
+ * "tpg_cfg_t" in "input_system_global.h".
+ */
+typedef struct pixelgen_tpg_cfg_s pixelgen_tpg_cfg_t;
+struct pixelgen_tpg_cfg_s {
+ pixelgen_tpg_mode_t mode; /* CHBO, MONO */
+
+ struct {
+ /* be used by CHBO and MON */
+ u32 R1;
+ u32 G1;
+ u32 B1;
+
+ /* be used by CHBO only */
+ u32 R2;
+ u32 G2;
+ u32 B2;
+ } color_cfg;
+
+ struct {
+ u32 h_mask; /* horizontal mask */
+ u32 v_mask; /* vertical mask */
+ u32 hv_mask; /* horizontal+vertical mask? */
+ } mask_cfg;
+
+ struct {
+ s32 h_delta; /* horizontal delta? */
+ s32 v_delta; /* vertical delta? */
+ } delta_cfg;
+
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+/*
+ * "pixelgen_prbs_cfg_t" duplicates parts of
+ * prbs_cfg_t" in "input_system_global.h".
+ */
+typedef struct pixelgen_prbs_cfg_s pixelgen_prbs_cfg_t;
+struct pixelgen_prbs_cfg_s {
+ s32 seed0;
+ s32 seed1;
+
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+/* end of Pixel-generator: TPG. ("pixelgen_global.h") */
+#endif /* __PIXELGEN_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h b/drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h
new file mode 100644
index 000000000000..99d292164efc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h
@@ -0,0 +1,198 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_common_defs_h_
+#define _css_receiver_2400_common_defs_h_
+#ifndef _mipi_backend_common_defs_h_
+#define _mipi_backend_common_defs_h_
+
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH 16
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH 2
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH (_HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_WIDTH 32 /* use 32 to be compatibel with streaming monitor !, MSB's of interface are tied to '0' */
+
+/* Definition of data format ID at the interface CSS_receiver capture/acquisition units */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8 24 /* 01 1000 YUV420 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10 25 /* 01 1001 YUV420 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8L 26 /* 01 1010 YUV420 8-bit legacy */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_8 30 /* 01 1110 YUV422 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_10 31 /* 01 1111 YUV422 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB444 32 /* 10 0000 RGB444 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB555 33 /* 10 0001 RGB555 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB565 34 /* 10 0010 RGB565 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB666 35 /* 10 0011 RGB666 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB888 36 /* 10 0100 RGB888 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW6 40 /* 10 1000 RAW6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW7 41 /* 10 1001 RAW7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW8 42 /* 10 1010 RAW8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW10 43 /* 10 1011 RAW10 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW12 44 /* 10 1100 RAW12 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW14 45 /* 10 1101 RAW14 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_1 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_2 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_3 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_4 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_5 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_6 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_7 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_8 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_Emb 18 /* 01 0010 embedded eight bit non image data */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH1 8 /* 00 1000 Generic Short Packet Code 1 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH2 9 /* 00 1001 Generic Short Packet Code 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH3 10 /* 00 1010 Generic Short Packet Code 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH4 11 /* 00 1011 Generic Short Packet Code 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH5 12 /* 00 1100 Generic Short Packet Code 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH6 13 /* 00 1101 Generic Short Packet Code 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH7 14 /* 00 1110 Generic Short Packet Code 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH8 15 /* 00 1111 Generic Short Packet Code 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8_CSPS 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10_CSPS 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+/* used reserved mipi positions for these */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW16 46
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18 47
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_2 37
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_3 38
+
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_WIDTH 6
+
+/* Definition of format_types at the interface CSS --> input_selector*/
+/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding
+
+/* definition for state machine of data FIFO for decode different type of data */
+#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9
+#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7
+#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7
+
+#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */
+
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6
+
+/* packet bit definition */
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "csi_be_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+#define BE_CUST_EN_IDX 0 /* 2bits */
+#define BE_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */
+#define BE_CUST_EN_WIDTH 8
+#define BE_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */
+#define BE_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */
+
+/* Data State config = {get_bits(6bits), valid(1bit)} */
+#define BE_CUST_DATA_STATE_S0_IDX 0 /* 7bits */
+#define BE_CUST_DATA_STATE_S1_IDX 7 /* 7bits */
+#define BE_CUST_DATA_STATE_S2_IDX 14 /* 7bits */
+#define BE_CUST_DATA_STATE_WIDTH 21
+#define BE_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */
+#define BE_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */
+
+/* Pixel Extractor config */
+#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 5 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_MASK_IDX 10 /* 18bits */
+#define BE_CUST_PIX_EXT_PIX_EN_IDX 28 /* 1bits */
+#define BE_CUST_PIX_EXT_WIDTH 29
+
+/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */
+#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_WIDTH 16
+#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */
+
+#endif /* _mipi_backend_common_defs_h_ */
+#endif /* _css_receiver_2400_common_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h b/drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h
new file mode 100644
index 000000000000..f4b2b41b6d94
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h
@@ -0,0 +1,256 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_defs_h_
+#define _css_receiver_2400_defs_h_
+
+#include "css_receiver_2400_common_defs.h"
+
+#define CSS_RECEIVER_DATA_WIDTH 8
+#define CSS_RECEIVER_RX_TRIG 4
+#define CSS_RECEIVER_RF_WORD 32
+#define CSS_RECEIVER_IMG_PROC_RF_ADDR 10
+#define CSS_RECEIVER_CSI_RF_ADDR 4
+#define CSS_RECEIVER_DATA_OUT 12
+#define CSS_RECEIVER_CHN_NO 2
+#define CSS_RECEIVER_DWORD_CNT 11
+#define CSS_RECEIVER_FORMAT_TYP 5
+#define CSS_RECEIVER_HRESPONSE 2
+#define CSS_RECEIVER_STATE_WIDTH 3
+#define CSS_RECEIVER_FIFO_DAT 32
+#define CSS_RECEIVER_CNT_VAL 2
+#define CSS_RECEIVER_PRED10_VAL 10
+#define CSS_RECEIVER_PRED12_VAL 12
+#define CSS_RECEIVER_CNT_WIDTH 8
+#define CSS_RECEIVER_WORD_CNT 16
+#define CSS_RECEIVER_PIXEL_LEN 6
+#define CSS_RECEIVER_PIXEL_CNT 5
+#define CSS_RECEIVER_COMP_8_BIT 8
+#define CSS_RECEIVER_COMP_7_BIT 7
+#define CSS_RECEIVER_COMP_6_BIT 6
+
+#define CSI_CONFIG_WIDTH 4
+
+/* division of gen_short data, ch_id and fmt_type over streaming data interface */
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB 0
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH - 1)
+
+#define _HRT_CSS_RECEIVER_2400_REG_ALIGN 4
+#define _HRT_CSS_RECEIVER_2400_BYTES_PER_PKT 4
+
+#define hrt_css_receiver_2400_4_lane_port_offset 0x100
+#define hrt_css_receiver_2400_1_lane_port_offset 0x200
+#define hrt_css_receiver_2400_2_lane_port_offset 0x300
+#define hrt_css_receiver_2400_backend_port_offset 0x100
+
+#define _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX 3
+#define _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX 4
+#define _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX 14
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX 15
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX 16
+#define _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX 17
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX 18
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX 19
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX 21
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX 22
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX 23
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX 24
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX 25
+#define _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX 26
+#define _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX 27
+#define _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX 28
+
+/* Interrupt bits for IRQ_STATUS and IRQ_ENABLE registers */
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT 2
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT 3
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT 4
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT 5
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT 6
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT 7
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT 8
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT 9
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT 10
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT 11
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT 12
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT 13
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT 14
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT 15
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT 16
+
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_CAUSE_ "Fifo Overrun"
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_CAUSE_ "Reserved"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_CAUSE_ "Sleep mode entry"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_CAUSE_ "Sleep mode exit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_CAUSE_ "Error high speed SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_CAUSE_ "Error high speed sync SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_CAUSE_ "Error control"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_CAUSE_ "Error correction double bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_CAUSE_ "Error correction single bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_CAUSE_ "No error"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_CAUSE_ "Error cyclic redundancy check"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_CAUSE_ "Error id"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_CAUSE_ "Error frame sync"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_CAUSE_ "Error frame data"
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_CAUSE_ "Data time-out"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_CAUSE_ "Error escape"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_CAUSE_ "Error line sync"
+
+/* Bits for CSI2_DEVICE_READY register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DEVICE_READY_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_INIT_TIME_OUT_ERR_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_OVER_RUN_ERR_IDX 3
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_SOT_SYNC_ERR_IDX 4
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_RECEIVE_DATA_TIME_OUT_ERR_IDX 5
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_ECC_TWO_BIT_ERR_IDX 6
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_DATA_ID_ERR_IDX 7
+
+/* Bits for CSI2_FUNC_PROG register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS 19
+
+/* Bits for INIT_COUNT register */
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_IDX 0
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_BITS 16
+
+/* Bits for COUNT registers */
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_BITS 8
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_BITS 8
+
+/* Bits for RAW116_18_DATAID register */
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_BITS 6
+
+/* Bits for COMP_FORMAT register, this selects the compression data format */
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS 8
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_IDX (_HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX + _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS)
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_BITS 8
+
+/* Bits for COMP_PREDICT register, this selects the predictor algorithm */
+#define _HRT_CSS_RECEIVER_2400_PREDICT_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_PREDICT_1 1
+#define _HRT_CSS_RECEIVER_2400_PREDICT_2 2
+
+/* Number of bits used for the delay registers */
+#define _HRT_CSS_RECEIVER_2400_DELAY_BITS 8
+
+/* Bits for COMP_SCHEME register, this selects the compression scheme for a VC */
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD1_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD2_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD3_BITS_IDX 10
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD4_BITS_IDX 15
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD5_BITS_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD6_BITS_IDX 25
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD7_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD8_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_BITS_BITS 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_BITS 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_IDX 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_BITS 2
+
+/* BITS for backend RAW16 and RAW 18 registers */
+
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_BITS 1
+
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_BITS 1
+
+/* These hsync and vsync values are for HSS simulation only */
+#define _HRT_CSS_RECEIVER_2400_HSYNC_VAL BIT(16)
+#define _HRT_CSS_RECEIVER_2400_VSYNC_VAL BIT(17)
+
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_WIDTH 28
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB 0
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_EOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT + 1)
+
+// SH Backend Register IDs
+#define _HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_DATA_STATE_REG_IDX 14 /* Data State 0,1,2 config */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P0_REG_IDX 15 /* Pixel Extractor config for Data State 0 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P1_REG_IDX 16 /* Pixel Extractor config for Data State 0 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P2_REG_IDX 17 /* Pixel Extractor config for Data State 0 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P3_REG_IDX 18 /* Pixel Extractor config for Data State 0 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P0_REG_IDX 19 /* Pixel Extractor config for Data State 1 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P1_REG_IDX 20 /* Pixel Extractor config for Data State 1 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P2_REG_IDX 21 /* Pixel Extractor config for Data State 1 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P3_REG_IDX 22 /* Pixel Extractor config for Data State 1 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P0_REG_IDX 23 /* Pixel Extractor config for Data State 2 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P1_REG_IDX 24 /* Pixel Extractor config for Data State 2 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P2_REG_IDX 25 /* Pixel Extractor config for Data State 2 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P3_REG_IDX 26 /* Pixel Extractor config for Data State 2 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_VALID_EOP_REG_IDX 27 /* Pixel Valid & EoP config for Pix 0,1,2,3 */
+
+#define _HRT_CSS_RECEIVER_2400_BE_NOF_REGISTERS 28
+
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_HE 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_RCF 1
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PF 2
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SM 3
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PD 4
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SD 5
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_OT 6
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_BC 7
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_WIDTH 8
+
+#endif /* _css_receiver_2400_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/css_trace.h b/drivers/staging/media/atomisp/pci/css_trace.h
new file mode 100644
index 000000000000..32520c21c324
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_trace.h
@@ -0,0 +1,278 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSS_TRACE_H_
+#define __CSS_TRACE_H_
+
+#include <type_support.h>
+#include "sh_css_internal.h" /* for SH_CSS_MAX_SP_THREADS */
+
+/*
+ structs and constants for tracing
+*/
+
+/* one tracer item: major, minor and counter. The counter value can be used for GP data */
+struct trace_item_t {
+ u8 major;
+ u8 minor;
+ u16 counter;
+};
+
+#define MAX_SCRATCH_DATA 4
+#define MAX_CMD_DATA 2
+
+/* trace header: holds the version and the topology of the tracer. */
+struct trace_header_t {
+ /* 1st dword: descriptor */
+ u8 version;
+ u8 max_threads;
+ u16 max_tracer_points;
+ /* 2nd field: command + data */
+ /* 2nd dword */
+ u32 command;
+ /* 3rd & 4th dword */
+ u32 data[MAX_CMD_DATA];
+ /* 3rd field: debug pointer */
+ /* 5th & 6th dword: debug pointer mechanism */
+ u32 debug_ptr_signature;
+ u32 debug_ptr_value;
+ /* Rest of the header: status & scratch data */
+ u8 thr_status_byte[SH_CSS_MAX_SP_THREADS];
+ u16 thr_status_word[SH_CSS_MAX_SP_THREADS];
+ u32 thr_status_dword[SH_CSS_MAX_SP_THREADS];
+ u32 scratch_debug[MAX_SCRATCH_DATA];
+};
+
+/* offsets for master_port read/write */
+#define HDR_HDR_OFFSET 0 /* offset of the header */
+#define HDR_COMMAND_OFFSET offsetof(struct trace_header_t, command)
+#define HDR_DATA_OFFSET offsetof(struct trace_header_t, data)
+#define HDR_DEBUG_SIGNATURE_OFFSET offsetof(struct trace_header_t, debug_ptr_signature)
+#define HDR_DEBUG_POINTER_OFFSET offsetof(struct trace_header_t, debug_ptr_value)
+#define HDR_STATUS_OFFSET offsetof(struct trace_header_t, thr_status_byte)
+#define HDR_STATUS_OFFSET_BYTE offsetof(struct trace_header_t, thr_status_byte)
+#define HDR_STATUS_OFFSET_WORD offsetof(struct trace_header_t, thr_status_word)
+#define HDR_STATUS_OFFSET_DWORD offsetof(struct trace_header_t, thr_status_dword)
+#define HDR_STATUS_OFFSET_SCRATCH offsetof(struct trace_header_t, scratch_debug)
+
+/*
+Trace version history:
+ 1: initial version, hdr = descr, command & ptr.
+ 2: added ISP + 24-bit fields.
+ 3: added thread ID.
+ 4: added status in header.
+*/
+#define TRACER_VER 4
+
+#define TRACE_BUFF_ADDR 0xA000
+#define TRACE_BUFF_SIZE 0x1000 /* 4K allocated */
+
+#define TRACE_ENABLE_SP0 0
+#define TRACE_ENABLE_SP1 0
+#define TRACE_ENABLE_ISP 0
+
+enum TRACE_CORE_ID {
+ TRACE_SP0_ID,
+ TRACE_SP1_ID,
+ TRACE_ISP_ID
+};
+
+/* TODO: add timing format? */
+enum TRACE_DUMP_FORMAT {
+ TRACE_DUMP_FORMAT_POINT_NO_TID,
+ TRACE_DUMP_FORMAT_VALUE24,
+ TRACE_DUMP_FORMAT_VALUE24_TIMING,
+ TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA,
+ TRACE_DUMP_FORMAT_POINT
+};
+
+/* currently divided as follows:*/
+#if (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 3)
+/* can be divided as needed */
+#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE / 4)
+#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE / 4)
+#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE / 2)
+#elif (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 2)
+#if TRACE_ENABLE_SP0
+#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE / 2)
+#else
+#define TRACE_SP0_SIZE (0)
+#endif
+#if TRACE_ENABLE_SP1
+#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE / 2)
+#else
+#define TRACE_SP1_SIZE (0)
+#endif
+#if TRACE_ENABLE_ISP
+#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE / 2)
+#else
+#define TRACE_ISP_SIZE (0)
+#endif
+#elif (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 1)
+#if TRACE_ENABLE_SP0
+#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE)
+#else
+#define TRACE_SP0_SIZE (0)
+#endif
+#if TRACE_ENABLE_SP1
+#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE)
+#else
+#define TRACE_SP1_SIZE (0)
+#endif
+#if TRACE_ENABLE_ISP
+#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE)
+#else
+#define TRACE_ISP_SIZE (0)
+#endif
+#else
+#define TRACE_SP0_SIZE (0)
+#define TRACE_SP1_SIZE (0)
+#define TRACE_ISP_SIZE (0)
+#endif
+
+#define TRACE_SP0_ADDR (TRACE_BUFF_ADDR)
+#define TRACE_SP1_ADDR (TRACE_SP0_ADDR + TRACE_SP0_SIZE)
+#define TRACE_ISP_ADDR (TRACE_SP1_ADDR + TRACE_SP1_SIZE)
+
+/* check if it's a legal division */
+#if (TRACE_BUFF_SIZE < TRACE_SP0_SIZE + TRACE_SP1_SIZE + TRACE_ISP_SIZE)
+#error trace sizes are not divided correctly and are above limit
+#endif
+
+#define TRACE_SP0_HEADER_ADDR (TRACE_SP0_ADDR)
+#define TRACE_SP0_HEADER_SIZE (sizeof(struct trace_header_t))
+#define TRACE_SP0_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_SP0_DATA_ADDR (TRACE_SP0_HEADER_ADDR + TRACE_SP0_HEADER_SIZE)
+#define TRACE_SP0_DATA_SIZE (TRACE_SP0_SIZE - TRACE_SP0_HEADER_SIZE)
+#define TRACE_SP0_MAX_POINTS (TRACE_SP0_DATA_SIZE / TRACE_SP0_ITEM_SIZE)
+
+#define TRACE_SP1_HEADER_ADDR (TRACE_SP1_ADDR)
+#define TRACE_SP1_HEADER_SIZE (sizeof(struct trace_header_t))
+#define TRACE_SP1_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_SP1_DATA_ADDR (TRACE_SP1_HEADER_ADDR + TRACE_SP1_HEADER_SIZE)
+#define TRACE_SP1_DATA_SIZE (TRACE_SP1_SIZE - TRACE_SP1_HEADER_SIZE)
+#define TRACE_SP1_MAX_POINTS (TRACE_SP1_DATA_SIZE / TRACE_SP1_ITEM_SIZE)
+
+#define TRACE_ISP_HEADER_ADDR (TRACE_ISP_ADDR)
+#define TRACE_ISP_HEADER_SIZE (sizeof(struct trace_header_t))
+#define TRACE_ISP_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_ISP_DATA_ADDR (TRACE_ISP_HEADER_ADDR + TRACE_ISP_HEADER_SIZE)
+#define TRACE_ISP_DATA_SIZE (TRACE_ISP_SIZE - TRACE_ISP_HEADER_SIZE)
+#define TRACE_ISP_MAX_POINTS (TRACE_ISP_DATA_SIZE / TRACE_ISP_ITEM_SIZE)
+
+
+/* common majors */
+/* SP0 */
+#define MAJOR_MAIN 1
+#define MAJOR_ISP_STAGE_ENTRY 2
+#define MAJOR_DMA_PRXY 3
+#define MAJOR_START_ISP 4
+/* SP1 */
+#define MAJOR_OBSERVER_ISP0_EVENT 21
+#define MAJOR_OBSERVER_OUTPUT_FORM_EVENT 22
+#define MAJOR_OBSERVER_OUTPUT_SCAL_EVENT 23
+#define MAJOR_OBSERVER_IF_ACK 24
+#define MAJOR_OBSERVER_SP0_EVENT 25
+#define MAJOR_OBSERVER_SP_TERMINATE_EVENT 26
+#define MAJOR_OBSERVER_DMA_ACK 27
+#define MAJOR_OBSERVER_ACC_ACK 28
+
+#define DEBUG_PTR_SIGNATURE 0xABCD /* signature for the debug parameter pointer */
+
+/* command codes (1st byte) */
+typedef enum {
+ CMD_SET_ONE_MAJOR = 1, /* mask in one major. 2nd byte in the command is the major code */
+ CMD_UNSET_ONE_MAJOR = 2, /* mask out one major. 2nd byte in the command is the major code */
+ CMD_SET_ALL_MAJORS = 3, /* set the major print mask. the full mask is in the data DWORD */
+ CMD_SET_VERBOSITY = 4 /* set verbosity level */
+} DBG_commands;
+
+/* command signature */
+#define CMD_SIGNATURE 0xAABBCC00
+
+/* shared macros in traces infrastructure */
+/* increment the pointer cyclicly */
+#define DBG_NEXT_ITEM(x, max_items) (((x + 1) >= max_items) ? 0 : x + 1)
+#define DBG_PREV_ITEM(x, max_items) ((x) ? x - 1 : max_items - 1)
+
+#define FIELD_MASK(width) (((1 << (width)) - 1))
+#define FIELD_PACK(value, mask, offset) (((value) & (mask)) << (offset))
+#define FIELD_UNPACK(value, mask, offset) (((value) >> (offset)) & (mask))
+
+#define FIELD_VALUE_OFFSET (0)
+#define FIELD_VALUE_WIDTH (16)
+#define FIELD_VALUE_MASK FIELD_MASK(FIELD_VALUE_WIDTH)
+#define FIELD_VALUE_PACK(f) FIELD_PACK(f, FIELD_VALUE_MASK, FIELD_VALUE_OFFSET)
+#define FIELD_VALUE_UNPACK(f) FIELD_UNPACK(f, FIELD_VALUE_MASK, FIELD_VALUE_OFFSET)
+
+#define FIELD_MINOR_OFFSET (FIELD_VALUE_OFFSET + FIELD_VALUE_WIDTH)
+#define FIELD_MINOR_WIDTH (8)
+#define FIELD_MINOR_MASK FIELD_MASK(FIELD_MINOR_WIDTH)
+#define FIELD_MINOR_PACK(f) FIELD_PACK(f, FIELD_MINOR_MASK, FIELD_MINOR_OFFSET)
+#define FIELD_MINOR_UNPACK(f) FIELD_UNPACK(f, FIELD_MINOR_MASK, FIELD_MINOR_OFFSET)
+
+#define FIELD_MAJOR_OFFSET (FIELD_MINOR_OFFSET + FIELD_MINOR_WIDTH)
+#define FIELD_MAJOR_WIDTH (5)
+#define FIELD_MAJOR_MASK FIELD_MASK(FIELD_MAJOR_WIDTH)
+#define FIELD_MAJOR_PACK(f) FIELD_PACK(f, FIELD_MAJOR_MASK, FIELD_MAJOR_OFFSET)
+#define FIELD_MAJOR_UNPACK(f) FIELD_UNPACK(f, FIELD_MAJOR_MASK, FIELD_MAJOR_OFFSET)
+
+/* for quick traces - only insertion, compatible with the regular point */
+#define FIELD_FULL_MAJOR_WIDTH (8)
+#define FIELD_FULL_MAJOR_MASK FIELD_MASK(FIELD_FULL_MAJOR_WIDTH)
+#define FIELD_FULL_MAJOR_PACK(f) FIELD_PACK(f, FIELD_FULL_MAJOR_MASK, FIELD_MAJOR_OFFSET)
+
+/* The following 2 fields are used only when FIELD_TID value is 111b.
+ * it means we don't want to use thread id, but format. In this case,
+ * the last 2 MSB bits of the major field will indicates the format
+ */
+#define FIELD_MAJOR_W_FMT_OFFSET FIELD_MAJOR_OFFSET
+#define FIELD_MAJOR_W_FMT_WIDTH (3)
+#define FIELD_MAJOR_W_FMT_MASK FIELD_MASK(FIELD_MAJOR_W_FMT_WIDTH)
+#define FIELD_MAJOR_W_FMT_PACK(f) FIELD_PACK(f, FIELD_MAJOR_W_FMT_MASK, FIELD_MAJOR_W_FMT_OFFSET)
+#define FIELD_MAJOR_W_FMT_UNPACK(f) FIELD_UNPACK(f, FIELD_MAJOR_W_FMT_MASK, FIELD_MAJOR_W_FMT_OFFSET)
+
+#define FIELD_FORMAT_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_W_FMT_WIDTH)
+#define FIELD_FORMAT_WIDTH (2)
+#define FIELD_FORMAT_MASK FIELD_MASK(FIELD_MAJOR_W_FMT_WIDTH)
+#define FIELD_FORMAT_PACK(f) FIELD_PACK(f, FIELD_FORMAT_MASK, FIELD_FORMAT_OFFSET)
+#define FIELD_FORMAT_UNPACK(f) FIELD_UNPACK(f, FIELD_FORMAT_MASK, FIELD_FORMAT_OFFSET)
+
+#define FIELD_TID_SEL_FORMAT_PAT (7)
+
+#define FIELD_TID_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_WIDTH)
+#define FIELD_TID_WIDTH (3)
+#define FIELD_TID_MASK FIELD_MASK(FIELD_TID_WIDTH)
+#define FIELD_TID_PACK(f) FIELD_PACK(f, FIELD_TID_MASK, FIELD_TID_OFFSET)
+#define FIELD_TID_UNPACK(f) FIELD_UNPACK(f, FIELD_TID_MASK, FIELD_TID_OFFSET)
+
+#define FIELD_VALUE_24_OFFSET (0)
+#define FIELD_VALUE_24_WIDTH (24)
+#define FIELD_VALUE_24_MASK FIELD_MASK(FIELD_VALUE_24_WIDTH)
+#define FIELD_VALUE_24_PACK(f) FIELD_PACK(f, FIELD_VALUE_24_MASK, FIELD_VALUE_24_OFFSET)
+#define FIELD_VALUE_24_UNPACK(f) FIELD_UNPACK(f, FIELD_VALUE_24_MASK, FIELD_VALUE_24_OFFSET)
+
+#define PACK_TRACEPOINT(tid, major, minor, value) \
+ (FIELD_TID_PACK(tid) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value))
+
+#define PACK_QUICK_TRACEPOINT(major, minor) \
+ (FIELD_FULL_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor))
+
+#define PACK_FORMATTED_TRACEPOINT(format, major, minor, value) \
+ (FIELD_TID_PACK(FIELD_TID_SEL_FORMAT_PAT) | FIELD_FORMAT_PACK(format) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value))
+
+#define PACK_TRACE_VALUE24(major, value) \
+ (FIELD_TID_PACK(FIELD_TID_SEL_FORMAT_PAT) | FIELD_MAJOR_PACK(major) | FIELD_VALUE_24_PACK(value))
+
+#endif /* __CSS_TRACE_H_ */
diff --git a/drivers/staging/media/atomisp/pci/defs.h b/drivers/staging/media/atomisp/pci/defs.h
new file mode 100644
index 000000000000..47505f41790c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/defs.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_DEFS_H_
+#define _HRT_DEFS_H_
+
+#ifndef HRTCAT
+#define _HRTCAT(m, n) m##n
+#define HRTCAT(m, n) _HRTCAT(m, n)
+#endif
+
+#ifndef HRTSTR
+#define _HRTSTR(x) #x
+#define HRTSTR(x) _HRTSTR(x)
+#endif
+
+#ifndef HRTMIN
+#define HRTMIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef HRTMAX
+#define HRTMAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#endif /* _HRT_DEFS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/dma_v2_defs.h b/drivers/staging/media/atomisp/pci/dma_v2_defs.h
new file mode 100644
index 000000000000..8741b8347dd4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/dma_v2_defs.h
@@ -0,0 +1,199 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _dma_v2_defs_h
+#define _dma_v2_defs_h
+
+#define _DMA_V2_NUM_CHANNELS_ID MaxNumChannels
+#define _DMA_V2_CONNECTIONS_ID Connections
+#define _DMA_V2_DEV_ELEM_WIDTHS_ID DevElemWidths
+#define _DMA_V2_DEV_FIFO_DEPTH_ID DevFifoDepth
+#define _DMA_V2_DEV_FIFO_RD_LAT_ID DevFifoRdLat
+#define _DMA_V2_DEV_FIFO_LAT_BYPASS_ID DevFifoRdLatBypass
+#define _DMA_V2_DEV_NO_BURST_ID DevNoBurst
+#define _DMA_V2_DEV_RD_ACCEPT_ID DevRdAccept
+#define _DMA_V2_DEV_SRMD_ID DevSRMD
+#define _DMA_V2_DEV_HAS_CRUN_ID CRunMasters
+#define _DMA_V2_CTRL_ACK_FIFO_DEPTH_ID CtrlAckFifoDepth
+#define _DMA_V2_CMD_FIFO_DEPTH_ID CommandFifoDepth
+#define _DMA_V2_CMD_FIFO_RD_LAT_ID CommandFifoRdLat
+#define _DMA_V2_CMD_FIFO_LAT_BYPASS_ID CommandFifoRdLatBypass
+#define _DMA_V2_NO_PACK_ID has_no_pack
+
+#define _DMA_V2_REG_ALIGN 4
+#define _DMA_V2_REG_ADDR_BITS 2
+
+/* Command word */
+#define _DMA_V2_CMD_IDX 0
+#define _DMA_V2_CMD_BITS 6
+#define _DMA_V2_CHANNEL_IDX (_DMA_V2_CMD_IDX + _DMA_V2_CMD_BITS)
+#define _DMA_V2_CHANNEL_BITS 5
+
+/* The command to set a parameter contains the PARAM field next */
+#define _DMA_V2_PARAM_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_PARAM_BITS 4
+
+/* Commands to read, write or init specific blocks contain these
+ three values */
+#define _DMA_V2_SPEC_DEV_A_XB_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_SPEC_DEV_A_XB_BITS 8
+#define _DMA_V2_SPEC_DEV_B_XB_IDX (_DMA_V2_SPEC_DEV_A_XB_IDX + _DMA_V2_SPEC_DEV_A_XB_BITS)
+#define _DMA_V2_SPEC_DEV_B_XB_BITS 8
+#define _DMA_V2_SPEC_YB_IDX (_DMA_V2_SPEC_DEV_B_XB_IDX + _DMA_V2_SPEC_DEV_B_XB_BITS)
+#define _DMA_V2_SPEC_YB_BITS (32 - _DMA_V2_SPEC_DEV_B_XB_BITS - _DMA_V2_SPEC_DEV_A_XB_BITS - _DMA_V2_CMD_BITS - _DMA_V2_CHANNEL_BITS)
+
+/* */
+#define _DMA_V2_CMD_CTRL_IDX 4
+#define _DMA_V2_CMD_CTRL_BITS 4
+
+/* Packing setup word */
+#define _DMA_V2_CONNECTION_IDX 0
+#define _DMA_V2_CONNECTION_BITS 4
+#define _DMA_V2_EXTENSION_IDX (_DMA_V2_CONNECTION_IDX + _DMA_V2_CONNECTION_BITS)
+#define _DMA_V2_EXTENSION_BITS 1
+
+/* Elements packing word */
+#define _DMA_V2_ELEMENTS_IDX 0
+#define _DMA_V2_ELEMENTS_BITS 8
+#define _DMA_V2_LEFT_CROPPING_IDX (_DMA_V2_ELEMENTS_IDX + _DMA_V2_ELEMENTS_BITS)
+#define _DMA_V2_LEFT_CROPPING_BITS 8
+
+#define _DMA_V2_WIDTH_IDX 0
+#define _DMA_V2_WIDTH_BITS 16
+
+#define _DMA_V2_HEIGHT_IDX 0
+#define _DMA_V2_HEIGHT_BITS 16
+
+#define _DMA_V2_STRIDE_IDX 0
+#define _DMA_V2_STRIDE_BITS 32
+
+/* Command IDs */
+#define _DMA_V2_MOVE_B2A_COMMAND 0
+#define _DMA_V2_MOVE_B2A_BLOCK_COMMAND 1
+#define _DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND 2
+#define _DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND 3
+#define _DMA_V2_MOVE_A2B_COMMAND 4
+#define _DMA_V2_MOVE_A2B_BLOCK_COMMAND 5
+#define _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND 6
+#define _DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND 7
+#define _DMA_V2_INIT_A_COMMAND 8
+#define _DMA_V2_INIT_A_BLOCK_COMMAND 9
+#define _DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND 10
+#define _DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND 11
+#define _DMA_V2_INIT_B_COMMAND 12
+#define _DMA_V2_INIT_B_BLOCK_COMMAND 13
+#define _DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND 14
+#define _DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND 15
+#define _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_CONFIG_CHANNEL_COMMAND 32
+#define _DMA_V2_SET_CHANNEL_PARAM_COMMAND 33
+#define _DMA_V2_SET_CRUN_COMMAND 62
+
+/* Channel Parameter IDs */
+#define _DMA_V2_PACKING_SETUP_PARAM 0
+#define _DMA_V2_STRIDE_A_PARAM 1
+#define _DMA_V2_ELEM_CROPPING_A_PARAM 2
+#define _DMA_V2_WIDTH_A_PARAM 3
+#define _DMA_V2_STRIDE_B_PARAM 4
+#define _DMA_V2_ELEM_CROPPING_B_PARAM 5
+#define _DMA_V2_WIDTH_B_PARAM 6
+#define _DMA_V2_HEIGHT_PARAM 7
+#define _DMA_V2_QUEUED_CMDS 8
+
+/* Parameter Constants */
+#define _DMA_V2_ZERO_EXTEND 0
+#define _DMA_V2_SIGN_EXTEND 1
+
+/* SLAVE address map */
+#define _DMA_V2_SEL_FSM_CMD 0
+#define _DMA_V2_SEL_CH_REG 1
+#define _DMA_V2_SEL_CONN_GROUP 2
+#define _DMA_V2_SEL_DEV_INTERF 3
+
+#define _DMA_V2_ADDR_SEL_COMP_IDX 12
+#define _DMA_V2_ADDR_SEL_COMP_BITS 4
+#define _DMA_V2_ADDR_SEL_CH_REG_IDX 2
+#define _DMA_V2_ADDR_SEL_CH_REG_BITS 6
+#define _DMA_V2_ADDR_SEL_PARAM_IDX (_DMA_V2_ADDR_SEL_CH_REG_BITS + _DMA_V2_ADDR_SEL_CH_REG_IDX)
+#define _DMA_V2_ADDR_SEL_PARAM_BITS 4
+
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_IDX 2
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_BITS 6
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX (_DMA_V2_ADDR_SEL_GROUP_COMP_BITS + _DMA_V2_ADDR_SEL_GROUP_COMP_IDX)
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS 4
+
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX 2
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS 6
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX (_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX + _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS)
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS 4
+
+#define _DMA_V2_FSM_GROUP_CMD_IDX 0
+#define _DMA_V2_FSM_GROUP_ADDR_SRC_IDX 1
+#define _DMA_V2_FSM_GROUP_ADDR_DEST_IDX 2
+#define _DMA_V2_FSM_GROUP_CMD_CTRL_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_PACK_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_REQ_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_WR_IDX 7
+
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX 7
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX 8
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX 9
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX 10
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX 11
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX 12
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX 13
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX 14
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX 15
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_CMD_CTRL_IDX 15
+
+#define _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX 3
+
+#define _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_REQ_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_BURST_IDX 4
+
+#define _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_WR_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_BURST_IDX 4
+
+#define _DMA_V2_DEV_INTERF_REQ_SIDE_STATUS_IDX 0
+#define _DMA_V2_DEV_INTERF_SEND_SIDE_STATUS_IDX 1
+#define _DMA_V2_DEV_INTERF_FIFO_STATUS_IDX 2
+#define _DMA_V2_DEV_INTERF_REQ_ONLY_COMPLETE_BURST_IDX 3
+#define _DMA_V2_DEV_INTERF_MAX_BURST_IDX 4
+#define _DMA_V2_DEV_INTERF_CHK_ADDR_ALIGN 5
+
+#endif /* _dma_v2_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/gdc_v2_defs.h b/drivers/staging/media/atomisp/pci/gdc_v2_defs.h
new file mode 100644
index 000000000000..3cc627aa6b09
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/gdc_v2_defs.h
@@ -0,0 +1,163 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef HRT_GDC_v2_defs_h_
+#define HRT_GDC_v2_defs_h_
+
+#define HRT_GDC_IS_V2
+
+#define HRT_GDC_N 1024 /* Top-level design constant, equal to the number of entries in the LUT */
+#define HRT_GDC_FRAC_BITS 10 /* Number of fractional bits in the GDC block, driven by the size of the LUT */
+
+#define HRT_GDC_BLI_FRAC_BITS 4 /* Number of fractional bits for the bi-linear interpolation type */
+#define HRT_GDC_BLI_COEF_ONE BIT(HRT_GDC_BLI_FRAC_BITS)
+
+#define HRT_GDC_BCI_COEF_BITS 14 /* 14 bits per coefficient */
+#define HRT_GDC_BCI_COEF_ONE (1 << (HRT_GDC_BCI_COEF_BITS - 2)) /* We represent signed 10 bit coefficients. */
+/* The supported range is [-256, .., +256] */
+/* in 14-bit signed notation, */
+/* We need all ten bits (MSB must be zero). */
+/* -s is inserted to solve this issue, and */
+/* therefore "1" is equal to +256. */
+#define HRT_GDC_BCI_COEF_MASK ((1 << HRT_GDC_BCI_COEF_BITS) - 1)
+
+#define HRT_GDC_LUT_BYTES (HRT_GDC_N * 4 * 2) /* 1024 addresses, 4 coefficients per address, */
+/* 2 bytes per coefficient */
+
+#define _HRT_GDC_REG_ALIGN 4
+
+// 31 30 29 25 24 0
+// |-----|---|--------|------------------------|
+// | CMD | C | Reg_ID | Value |
+
+// There are just two commands possible for the GDC block:
+// 1 - Configure reg
+// 0 - Data token
+
+// C - Reserved bit
+// Used in protocol to indicate whether it is C-run or other type of runs
+// In case of C-run, this bit has a value of 1, for all the other runs, it is 0.
+
+// Reg_ID - Address of the register to be configured
+
+// Value - Value to store to the addressed register, maximum of 24 bits
+
+// Configure reg command is not followed by any other token.
+// The address of the register and the data to be filled in is contained in the same token
+
+// When the first data token is received, it must be:
+// 1. FRX and FRY (device configured in one of the scaling modes) ***DEFAULT MODE***, or,
+// 2. P0'X (device configured in one of the tetragon modes)
+// After the first data token is received, pre-defined number of tokens with the following meaning follow:
+// 1. two tokens: SRC address ; DST address
+// 2. nine tokens: P0'Y, .., P3'Y ; SRC address ; DST address
+
+#define HRT_GDC_CONFIG_CMD 1
+#define HRT_GDC_DATA_CMD 0
+
+#define HRT_GDC_CMD_POS 31
+#define HRT_GDC_CMD_BITS 1
+#define HRT_GDC_CRUN_POS 30
+#define HRT_GDC_REG_ID_POS 25
+#define HRT_GDC_REG_ID_BITS 5
+#define HRT_GDC_DATA_POS 0
+#define HRT_GDC_DATA_BITS 25
+
+#define HRT_GDC_FRYIPXFRX_BITS 26
+#define HRT_GDC_P0X_BITS 23
+
+#define HRT_GDC_MAX_OXDIM (8192 - 64)
+#define HRT_GDC_MAX_OYDIM 4095
+#define HRT_GDC_MAX_IXDIM (8192 - 64)
+#define HRT_GDC_MAX_IYDIM 4095
+#define HRT_GDC_MAX_DS_FAC 16
+#define HRT_GDC_MAX_DX (HRT_GDC_MAX_DS_FAC * HRT_GDC_N - 1)
+#define HRT_GDC_MAX_DY HRT_GDC_MAX_DX
+
+/* GDC lookup tables entries are 10 bits values, but they're
+ stored 2 by 2 as 32 bit values, yielding 16 bits per entry.
+ A GDC lookup table contains 64 * 4 elements */
+
+#define HRT_GDC_PERF_1_1_pix 0
+#define HRT_GDC_PERF_2_1_pix 1
+#define HRT_GDC_PERF_1_2_pix 2
+#define HRT_GDC_PERF_2_2_pix 3
+
+#define HRT_GDC_NND_MODE 0
+#define HRT_GDC_BLI_MODE 1
+#define HRT_GDC_BCI_MODE 2
+#define HRT_GDC_LUT_MODE 3
+
+#define HRT_GDC_SCAN_STB 0
+#define HRT_GDC_SCAN_STR 1
+
+#define HRT_GDC_MODE_SCALING 0
+#define HRT_GDC_MODE_TETRAGON 1
+
+#define HRT_GDC_LUT_COEFF_OFFSET 16
+#define HRT_GDC_FRY_BIT_OFFSET 16
+// FRYIPXFRX is the only register where we store two values in one field,
+// to save one token in the scaling protocol.
+// Like this, we have three tokens in the scaling protocol,
+// Otherwise, we would have had four.
+// The register bit-map is:
+// 31 26 25 16 15 10 9 0
+// |------|----------|------|----------|
+// | XXXX | FRY | IPX | FRX |
+
+#define HRT_GDC_CE_FSM0_POS 0
+#define HRT_GDC_CE_FSM0_LEN 2
+#define HRT_GDC_CE_OPY_POS 2
+#define HRT_GDC_CE_OPY_LEN 14
+#define HRT_GDC_CE_OPX_POS 16
+#define HRT_GDC_CE_OPX_LEN 16
+// CHK_ENGINE register bit-map:
+// 31 16 15 2 1 0
+// |----------------|-----------|----|
+// | OPX | OPY |FSM0|
+// However, for the time being at least,
+// this implementation is meaningless in hss model,
+// So, we just return 0
+
+#define HRT_GDC_CHK_ENGINE_IDX 0
+#define HRT_GDC_WOIX_IDX 1
+#define HRT_GDC_WOIY_IDX 2
+#define HRT_GDC_BPP_IDX 3
+#define HRT_GDC_FRYIPXFRX_IDX 4
+#define HRT_GDC_OXDIM_IDX 5
+#define HRT_GDC_OYDIM_IDX 6
+#define HRT_GDC_SRC_ADDR_IDX 7
+#define HRT_GDC_SRC_END_ADDR_IDX 8
+#define HRT_GDC_SRC_WRAP_ADDR_IDX 9
+#define HRT_GDC_SRC_STRIDE_IDX 10
+#define HRT_GDC_DST_ADDR_IDX 11
+#define HRT_GDC_DST_STRIDE_IDX 12
+#define HRT_GDC_DX_IDX 13
+#define HRT_GDC_DY_IDX 14
+#define HRT_GDC_P0X_IDX 15
+#define HRT_GDC_P0Y_IDX 16
+#define HRT_GDC_P1X_IDX 17
+#define HRT_GDC_P1Y_IDX 18
+#define HRT_GDC_P2X_IDX 19
+#define HRT_GDC_P2Y_IDX 20
+#define HRT_GDC_P3X_IDX 21
+#define HRT_GDC_P3Y_IDX 22
+#define HRT_GDC_PERF_POINT_IDX 23 // 1x1 ; 1x2 ; 2x1 ; 2x2 pixels per cc
+#define HRT_GDC_INTERP_TYPE_IDX 24 // NND ; BLI ; BCI ; LUT
+#define HRT_GDC_SCAN_IDX 25 // 0 = STB (Slide To Bottom) ; 1 = STR (Slide To Right)
+#define HRT_GDC_PROC_MODE_IDX 26 // 0 = Scaling ; 1 = Tetragon
+
+#define HRT_GDC_LUT_IDX 32
+
+#endif /* HRT_GDC_v2_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/gp_timer_defs.h b/drivers/staging/media/atomisp/pci/gp_timer_defs.h
new file mode 100644
index 000000000000..ffd7b38fce9d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/gp_timer_defs.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gp_timer_defs_h
+#define _gp_timer_defs_h
+
+#define _HRT_GP_TIMER_REG_ALIGN 4
+
+#define HIVE_GP_TIMER_RESET_REG_IDX 0
+#define HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX 1
+#define HIVE_GP_TIMER_ENABLE_REG_IDX(timer) (HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX + 1 + timer)
+#define HIVE_GP_TIMER_VALUE_REG_IDX(timer, timers) (HIVE_GP_TIMER_ENABLE_REG_IDX(timers) + timer)
+#define HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer, timers) (HIVE_GP_TIMER_VALUE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer, timers) (HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq, timers) (HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timers, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq, timers, irqs) (HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irqs, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq, timers, irqs) (HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irqs, timers, irqs) + irq)
+
+#define HIVE_GP_TIMER_COUNT_TYPE_HIGH 0
+#define HIVE_GP_TIMER_COUNT_TYPE_LOW 1
+#define HIVE_GP_TIMER_COUNT_TYPE_POSEDGE 2
+#define HIVE_GP_TIMER_COUNT_TYPE_NEGEDGE 3
+#define HIVE_GP_TIMER_COUNT_TYPES 4
+
+#endif /* _gp_timer_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/gpio_block_defs.h b/drivers/staging/media/atomisp/pci/gpio_block_defs.h
new file mode 100644
index 000000000000..96286a141b00
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/gpio_block_defs.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gpio_block_defs_h_
+#define _gpio_block_defs_h_
+
+#define _HRT_GPIO_BLOCK_REG_ALIGN 4
+
+/* R/W registers */
+#define _gpio_block_reg_do_e 0
+#define _gpio_block_reg_do_select 1
+#define _gpio_block_reg_do_0 2
+#define _gpio_block_reg_do_1 3
+#define _gpio_block_reg_do_pwm_cnt_0 4
+#define _gpio_block_reg_do_pwm_cnt_1 5
+#define _gpio_block_reg_do_pwm_cnt_2 6
+#define _gpio_block_reg_do_pwm_cnt_3 7
+#define _gpio_block_reg_do_pwm_main_cnt 8
+#define _gpio_block_reg_do_pwm_enable 9
+#define _gpio_block_reg_di_debounce_sel 10
+#define _gpio_block_reg_di_debounce_cnt_0 11
+#define _gpio_block_reg_di_debounce_cnt_1 12
+#define _gpio_block_reg_di_debounce_cnt_2 13
+#define _gpio_block_reg_di_debounce_cnt_3 14
+#define _gpio_block_reg_di_active_level 15
+
+/* read-only registers */
+#define _gpio_block_reg_di 16
+
+#endif /* _gpio_block_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_2401_irq_types_hrt.h b/drivers/staging/media/atomisp/pci/hive_isp_css_2401_irq_types_hrt.h
new file mode 100644
index 000000000000..0760b95818f6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_2401_irq_types_hrt.h
@@ -0,0 +1,68 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_
+#define _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_
+
+/*
+ * These are the indices of each interrupt in the interrupt
+ * controller's registers. these can be used as the irq_id
+ * argument to the hrt functions irq_controller.h.
+ *
+ * The definitions are taken from <system>_defs.h
+ */
+typedef enum hrt_isp_css_irq {
+ hrt_isp_css_irq_gpio_pin_0 = HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_1 = HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_2 = HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_3 = HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_4 = HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_5 = HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_6 = HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_7 = HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_8 = HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_9 = HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_10 = HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_11 = HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID,
+ hrt_isp_css_irq_sp = HIVE_GP_DEV_IRQ_SP_BIT_ID,
+ hrt_isp_css_irq_isp = HIVE_GP_DEV_IRQ_ISP_BIT_ID,
+ hrt_isp_css_irq_isys = HIVE_GP_DEV_IRQ_ISYS_BIT_ID,
+ hrt_isp_css_irq_isel = HIVE_GP_DEV_IRQ_ISEL_BIT_ID,
+ hrt_isp_css_irq_ifmt = HIVE_GP_DEV_IRQ_IFMT_BIT_ID,
+ hrt_isp_css_irq_sp_stream_mon = HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID,
+ hrt_isp_css_irq_isp_stream_mon = HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID,
+ hrt_isp_css_irq_mod_stream_mon = HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID,
+ hrt_isp_css_irq_is2401 = HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_isp_bamem_error = HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_isp_dmem_error = HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_sp_icache_mem_error = HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_sp_dmem_error = HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_mmu_cache_mem_error = HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_gp_timer_0 = HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID,
+ hrt_isp_css_irq_gp_timer_1 = HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID,
+ hrt_isp_css_irq_sw_pin_0 = HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID,
+ hrt_isp_css_irq_sw_pin_1 = HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID,
+ hrt_isp_css_irq_dma = HIVE_GP_DEV_IRQ_DMA_BIT_ID,
+ hrt_isp_css_irq_sp_stream_mon_b = HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID,
+ /* this must (obviously) be the last on in the enum */
+ hrt_isp_css_irq_num_irqs
+} hrt_isp_css_irq_t;
+
+typedef enum hrt_isp_css_irq_status {
+ hrt_isp_css_irq_status_error,
+ hrt_isp_css_irq_status_more_irqs,
+ hrt_isp_css_irq_status_success
+} hrt_isp_css_irq_status_t;
+
+#endif /* _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h
new file mode 100644
index 000000000000..7580cf5c9624
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h
@@ -0,0 +1,81 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_GLOBAL_H_INCLUDED__
+#define __DEBUG_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define DEBUG_BUF_SIZE 1024
+#define DEBUG_BUF_MASK (DEBUG_BUF_SIZE - 1)
+
+#define DEBUG_DATA_ENABLE_ADDR 0x00
+#define DEBUG_DATA_BUF_MODE_ADDR 0x04
+#define DEBUG_DATA_HEAD_ADDR 0x08
+#define DEBUG_DATA_TAIL_ADDR 0x0C
+#define DEBUG_DATA_BUF_ADDR 0x10
+
+#define DEBUG_DATA_ENABLE_DDR_ADDR 0x00
+#define DEBUG_DATA_BUF_MODE_DDR_ADDR HIVE_ISP_DDR_WORD_BYTES
+#define DEBUG_DATA_HEAD_DDR_ADDR (2 * HIVE_ISP_DDR_WORD_BYTES)
+#define DEBUG_DATA_TAIL_DDR_ADDR (3 * HIVE_ISP_DDR_WORD_BYTES)
+#define DEBUG_DATA_BUF_DDR_ADDR (4 * HIVE_ISP_DDR_WORD_BYTES)
+
+#define DEBUG_BUFFER_ISP_DMEM_ADDR 0x0
+
+/*
+ * Enable HAS_WATCHDOG_SP_THREAD_DEBUG for additional SP thread and
+ * pipe information on watchdog output
+ * #undef HAS_WATCHDOG_SP_THREAD_DEBUG
+ * #define HAS_WATCHDOG_SP_THREAD_DEBUG
+ */
+
+/*
+ * The linear buffer mode will accept data until the first
+ * overflow and then stop accepting new data
+ * The circular buffer mode will accept if there is place
+ * and discard the data if the buffer is full
+ */
+typedef enum {
+ DEBUG_BUFFER_MODE_LINEAR = 0,
+ DEBUG_BUFFER_MODE_CIRCULAR,
+ N_DEBUG_BUFFER_MODE
+} debug_buf_mode_t;
+
+struct debug_data_s {
+ u32 enable;
+ u32 bufmode;
+ u32 head;
+ u32 tail;
+ u32 buf[DEBUG_BUF_SIZE];
+};
+
+/* thread.sp.c doesn't have a notion of HIVE_ISP_DDR_WORD_BYTES
+ still one point of control is needed for debug purposes */
+
+#ifdef HIVE_ISP_DDR_WORD_BYTES
+struct debug_data_ddr_s {
+ u32 enable;
+ s8 padding1[HIVE_ISP_DDR_WORD_BYTES - sizeof(uint32_t)];
+ u32 bufmode;
+ s8 padding2[HIVE_ISP_DDR_WORD_BYTES - sizeof(uint32_t)];
+ u32 head;
+ s8 padding3[HIVE_ISP_DDR_WORD_BYTES - sizeof(uint32_t)];
+ u32 tail;
+ s8 padding4[HIVE_ISP_DDR_WORD_BYTES - sizeof(uint32_t)];
+ u32 buf[DEBUG_BUF_SIZE];
+};
+#endif
+
+#endif /* __DEBUG_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h
new file mode 100644
index 000000000000..85d509f5b923
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h
@@ -0,0 +1,254 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_GLOBAL_H_INCLUDED__
+#define __DMA_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define IS_DMA_VERSION_2
+
+#define HIVE_ISP_NUM_DMA_CONNS 3
+#define HIVE_ISP_NUM_DMA_CHANNELS 32
+
+#define N_DMA_CHANNEL_ID HIVE_ISP_NUM_DMA_CHANNELS
+
+#include "dma_v2_defs.h"
+
+/*
+ * Command token bit mappings
+ *
+ * transfer / config
+ * param id[4] channel id[5] cmd id[6]
+ * | b14 .. b11 | b10 ... b6 | b5 ... b0 |
+ *
+ *
+ * fast transfer:
+ * height[5] width[8] width[8] channel id[5] cmd id[6]
+ * | b31 .. b26 | b25 .. b18 | b17 .. b11 | b10 ... b6 | b5 ... b0 |
+ *
+ */
+
+#define _DMA_PACKING_SETUP_PARAM _DMA_V2_PACKING_SETUP_PARAM
+#define _DMA_HEIGHT_PARAM _DMA_V2_HEIGHT_PARAM
+#define _DMA_STRIDE_A_PARAM _DMA_V2_STRIDE_A_PARAM
+#define _DMA_ELEM_CROPPING_A_PARAM _DMA_V2_ELEM_CROPPING_A_PARAM
+#define _DMA_WIDTH_A_PARAM _DMA_V2_WIDTH_A_PARAM
+#define _DMA_STRIDE_B_PARAM _DMA_V2_STRIDE_B_PARAM
+#define _DMA_ELEM_CROPPING_B_PARAM _DMA_V2_ELEM_CROPPING_B_PARAM
+#define _DMA_WIDTH_B_PARAM _DMA_V2_WIDTH_B_PARAM
+
+#define _DMA_ZERO_EXTEND _DMA_V2_ZERO_EXTEND
+#define _DMA_SIGN_EXTEND _DMA_V2_SIGN_EXTEND
+
+typedef unsigned int dma_channel;
+
+typedef enum {
+ dma_isp_to_bus_connection = HIVE_DMA_ISP_BUS_CONN,
+ dma_isp_to_ddr_connection = HIVE_DMA_ISP_DDR_CONN,
+ dma_bus_to_ddr_connection = HIVE_DMA_BUS_DDR_CONN,
+} dma_connection;
+
+typedef enum {
+ dma_zero_extension = _DMA_ZERO_EXTEND,
+ dma_sign_extension = _DMA_SIGN_EXTEND
+} dma_extension;
+
+#define DMA_PROP_SHIFT(val, param) ((val) << _DMA_V2_ ## param ## _IDX)
+#define DMA_PROP_MASK(param) ((1U << _DMA_V2_ ## param ## _BITS) - 1)
+#define DMA_PACK(val, param) DMA_PROP_SHIFT((val) & DMA_PROP_MASK(param), param)
+
+#define DMA_PACK_COMMAND(cmd) DMA_PACK(cmd, CMD)
+#define DMA_PACK_CHANNEL(ch) DMA_PACK(ch, CHANNEL)
+#define DMA_PACK_PARAM(par) DMA_PACK(par, PARAM)
+#define DMA_PACK_EXTENSION(ext) DMA_PACK(ext, EXTENSION)
+#define DMA_PACK_LEFT_CROPPING(lc) DMA_PACK(lc, LEFT_CROPPING)
+#define DMA_PACK_WIDTH_A(w) DMA_PACK(w, SPEC_DEV_A_XB)
+#define DMA_PACK_WIDTH_B(w) DMA_PACK(w, SPEC_DEV_B_XB)
+#define DMA_PACK_HEIGHT(h) DMA_PACK(h, SPEC_YB)
+
+#define DMA_PACK_CMD_CHANNEL(cmd, ch) (DMA_PACK_COMMAND(cmd) | DMA_PACK_CHANNEL(ch))
+#define DMA_PACK_SETUP(conn, ext) ((conn) | DMA_PACK_EXTENSION(ext))
+#define DMA_PACK_CROP_ELEMS(elems, crop) ((elems) | DMA_PACK_LEFT_CROPPING(crop))
+
+#define hive_dma_snd(dma_id, token) OP_std_snd(dma_id, (unsigned int)(token))
+
+#define DMA_PACK_BLOCK_CMD(cmd, ch, width_a, width_b, height) \
+ (DMA_PACK_COMMAND(cmd) | \
+ DMA_PACK_CHANNEL(ch) | \
+ DMA_PACK_WIDTH_A(width_a) | \
+ DMA_PACK_WIDTH_B(width_b) | \
+ DMA_PACK_HEIGHT(height))
+
+#define hive_dma_move_data(dma_id, read, channel, addr_a, addr_b, to_is_var, from_is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(read ? _DMA_V2_MOVE_B2A_COMMAND : _DMA_V2_MOVE_A2B_COMMAND, channel)); \
+ hive_dma_snd(dma_id, read ? (unsigned int)(addr_b) : (unsigned int)(addr_a)); \
+ hive_dma_snd(dma_id, read ? (unsigned int)(addr_a) : (unsigned int)(addr_b)); \
+ hive_dma_snd(dma_id, to_is_var); \
+ hive_dma_snd(dma_id, from_is_var); \
+}
+
+#define hive_dma_move_data_no_ack(dma_id, read, channel, addr_a, addr_b, to_is_var, from_is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(read ? _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND : _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND, channel)); \
+ hive_dma_snd(dma_id, read ? (unsigned int)(addr_b) : (unsigned int)(addr_a)); \
+ hive_dma_snd(dma_id, read ? (unsigned int)(addr_a) : (unsigned int)(addr_b)); \
+ hive_dma_snd(dma_id, to_is_var); \
+ hive_dma_snd(dma_id, from_is_var); \
+}
+
+#define hive_dma_move_b2a_data(dma_id, channel, to_addr, from_addr, to_is_var, from_is_var) \
+{ \
+ hive_dma_move_data(dma_id, true, channel, to_addr, from_addr, to_is_var, from_is_var) \
+}
+
+#define hive_dma_move_a2b_data(dma_id, channel, from_addr, to_addr, from_is_var, to_is_var) \
+{ \
+ hive_dma_move_data(dma_id, false, channel, from_addr, to_addr, from_is_var, to_is_var) \
+}
+
+#define hive_dma_set_data(dma_id, channel, address, value, is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_INIT_A_COMMAND, channel)); \
+ hive_dma_snd(dma_id, value); \
+ hive_dma_snd(dma_id, address); \
+ hive_dma_snd(dma_id, is_var); \
+}
+
+#define hive_dma_clear_data(dma_id, channel, address, is_var) hive_dma_set_data(dma_id, channel, address, 0, is_var)
+
+#define hive_dma_configure(dma_id, channel, connection, extension, height, \
+ stride_A, elems_A, cropping_A, width_A, \
+ stride_B, elems_B, cropping_B, width_B) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_CONFIG_CHANNEL_COMMAND, channel)); \
+ hive_dma_snd(dma_id, DMA_PACK_SETUP(connection, extension)); \
+ hive_dma_snd(dma_id, stride_A); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_A, cropping_A)); \
+ hive_dma_snd(dma_id, width_A); \
+ hive_dma_snd(dma_id, stride_B); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_B, cropping_B)); \
+ hive_dma_snd(dma_id, width_B); \
+ hive_dma_snd(dma_id, height); \
+}
+
+#define hive_dma_execute(dma_id, channel, cmd, to_addr, from_addr_value, to_is_var, from_is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(cmd, channel)); \
+ hive_dma_snd(dma_id, to_addr); \
+ hive_dma_snd(dma_id, from_addr_value); \
+ hive_dma_snd(dma_id, to_is_var); \
+ if ((cmd & DMA_CLEAR_CMDBIT) == 0) { \
+ hive_dma_snd(dma_id, from_is_var); \
+ } \
+}
+
+#define hive_dma_configure_fast(dma_id, channel, connection, extension, elems_A, elems_B) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_CONFIG_CHANNEL_COMMAND, channel)); \
+ hive_dma_snd(dma_id, DMA_PACK_SETUP(connection, extension)); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_A, 0)); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_B, 0)); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, 1); \
+}
+
+#define hive_dma_set_parameter(dma_id, channel, param, value) \
+{ \
+ hive_dma_snd(dma_id, _DMA_V2_SET_CHANNEL_PARAM_COMMAND | DMA_PACK_CHANNEL(channel) | DMA_PACK_PARAM(param)); \
+ hive_dma_snd(dma_id, value); \
+}
+
+#define DMA_SPECIFIC_CMDBIT 0x01
+#define DMA_CHECK_CMDBIT 0x02
+#define DMA_RW_CMDBIT 0x04
+#define DMA_CLEAR_CMDBIT 0x08
+#define DMA_ACK_CMDBIT 0x10
+#define DMA_CFG_CMDBIT 0x20
+#define DMA_PARAM_CMDBIT 0x01
+
+/* Write complete check not necessary if there's no ack */
+#define DMA_NOACK_CMD (DMA_ACK_CMDBIT | DMA_CHECK_CMDBIT)
+#define DMA_CFG_CMD (DMA_CFG_CMDBIT)
+#define DMA_CFGPARAM_CMD (DMA_CFG_CMDBIT | DMA_PARAM_CMDBIT)
+
+#define DMA_CMD_NEEDS_ACK(cmd) ((cmd & DMA_NOACK_CMD) == 0)
+#define DMA_CMD_IS_TRANSFER(cmd) ((cmd & DMA_CFG_CMDBIT) == 0)
+#define DMA_CMD_IS_WR(cmd) ((cmd & DMA_RW_CMDBIT) != 0)
+#define DMA_CMD_IS_RD(cmd) ((cmd & DMA_RW_CMDBIT) == 0)
+#define DMA_CMD_IS_CLR(cmd) ((cmd & DMA_CLEAR_CMDBIT) != 0)
+#define DMA_CMD_IS_CFG(cmd) ((cmd & DMA_CFG_CMDBIT) != 0)
+#define DMA_CMD_IS_PARAMCFG(cmd) ((cmd & DMA_CFGPARAM_CMD) == DMA_CFGPARAM_CMD)
+
+/* As a matter of convention */
+#define DMA_TRANSFER_READ DMA_TRANSFER_B2A
+#define DMA_TRANSFER_WRITE DMA_TRANSFER_A2B
+/* store/load from the PoV of the system(memory) */
+#define DMA_TRANSFER_STORE DMA_TRANSFER_B2A
+#define DMA_TRANSFER_LOAD DMA_TRANSFER_A2B
+#define DMA_TRANSFER_CLEAR DMA_TRANSFER_CLEAR_A
+
+typedef enum {
+ DMA_TRANSFER_CLEAR_A = DMA_CLEAR_CMDBIT, /* 8 */
+ DMA_TRANSFER_CLEAR_B = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT, /* 12 */
+ DMA_TRANSFER_A2B = DMA_RW_CMDBIT, /* 4 */
+ DMA_TRANSFER_B2A = 0, /* 0 */
+ DMA_TRANSFER_CLEAR_A_NOACK = DMA_CLEAR_CMDBIT | DMA_NOACK_CMD, /* 26 */
+ DMA_TRANSFER_CLEAR_B_NOACK = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_NOACK_CMD, /* 30 */
+ DMA_TRANSFER_A2B_NOACK = DMA_RW_CMDBIT | DMA_NOACK_CMD, /* 22 */
+ DMA_TRANSFER_B2A_NOACK = DMA_NOACK_CMD, /* 18 */
+ DMA_FASTTRANSFER_CLEAR_A = DMA_CLEAR_CMDBIT | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_CLEAR_B = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_A2B = DMA_RW_CMDBIT | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_B2A = DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_CLEAR_A_NOACK = DMA_CLEAR_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_CLEAR_B_NOACK = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_A2B_NOACK = DMA_RW_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_B2A_NOACK = DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+} dma_transfer_type_t;
+
+typedef enum {
+ DMA_CONFIG_SETUP = _DMA_V2_PACKING_SETUP_PARAM,
+ DMA_CONFIG_HEIGHT = _DMA_V2_HEIGHT_PARAM,
+ DMA_CONFIG_STRIDE_A_ = _DMA_V2_STRIDE_A_PARAM,
+ DMA_CONFIG_CROP_ELEM_A = _DMA_V2_ELEM_CROPPING_A_PARAM,
+ DMA_CONFIG_WIDTH_A = _DMA_V2_WIDTH_A_PARAM,
+ DMA_CONFIG_STRIDE_B_ = _DMA_V2_STRIDE_B_PARAM,
+ DMA_CONFIG_CROP_ELEM_B = _DMA_V2_ELEM_CROPPING_B_PARAM,
+ DMA_CONFIG_WIDTH_B = _DMA_V2_WIDTH_B_PARAM,
+} dma_config_type_t;
+
+struct dma_port_config {
+ u8 crop, elems;
+ u16 width;
+ u32 stride;
+};
+
+/* Descriptor for dma configuration */
+struct dma_channel_config {
+ u8 connection;
+ u8 extension;
+ u8 height;
+ struct dma_port_config a, b;
+};
+
+#endif /* __DMA_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h
new file mode 100644
index 000000000000..4df7a405cdcf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __EVENT_FIFO_GLOBAL_H
+#define __EVENT_FIFO_GLOBAL_H
+
+/*#error "event_global.h: No global event information permitted"*/
+
+#endif /* __EVENT_FIFO_GLOBAL_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h
new file mode 100644
index 000000000000..f43bf0ad2468
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_GLOBAL_H_INCLUDED__
+#define __FIFO_MONITOR_GLOBAL_H_INCLUDED__
+
+#define IS_FIFO_MONITOR_VERSION_2
+
+/*
+#define HIVE_ISP_CSS_STREAM_SWITCH_NONE 0
+#define HIVE_ISP_CSS_STREAM_SWITCH_SP 1
+#define HIVE_ISP_CSS_STREAM_SWITCH_ISP 2
+ *
+ * Actually, "HIVE_ISP_CSS_STREAM_SWITCH_SP = 1", "HIVE_ISP_CSS_STREAM_SWITCH_ISP = 0"
+ * "hive_isp_css_stream_switch_hrt.h"
+ */
+#define HIVE_ISP_CSS_STREAM_SWITCH_ISP 0
+#define HIVE_ISP_CSS_STREAM_SWITCH_SP 1
+#define HIVE_ISP_CSS_STREAM_SWITCH_NONE 2
+
+#endif /* __FIFO_MONITOR_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h
new file mode 100644
index 000000000000..f3ce9e9f1ad4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h
@@ -0,0 +1,89 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_GLOBAL_H_INCLUDED__
+#define __GDC_GLOBAL_H_INCLUDED__
+
+#define IS_GDC_VERSION_2
+
+#include <type_support.h>
+#include "gdc_v2_defs.h"
+
+/*
+ * Storage addresses for packed data transfer
+ */
+#define GDC_PARAM_ICX_LEFT_ROUNDED_IDX 0
+#define GDC_PARAM_OXDIM_FLOORED_IDX 1
+#define GDC_PARAM_OXDIM_LAST_IDX 2
+#define GDC_PARAM_WOIX_LAST_IDX 3
+#define GDC_PARAM_IY_TOPLEFT_IDX 4
+#define GDC_PARAM_CHUNK_CNT_IDX 5
+/*#define GDC_PARAM_ELEMENTS_PER_XMEM_ADDR_IDX 6 */ /* Derived from bpp */
+#define GDC_PARAM_BPP_IDX 6
+#define GDC_PARAM_BLOCK_HEIGHT_IDX 7
+/*#define GDC_PARAM_DMA_CHANNEL_STRIDE_A_IDX 8*/ /* The DMA stride == the GDC buffer stride */
+#define GDC_PARAM_WOIX_IDX 8
+#define GDC_PARAM_DMA_CHANNEL_STRIDE_B_IDX 9
+#define GDC_PARAM_DMA_CHANNEL_WIDTH_A_IDX 10
+#define GDC_PARAM_DMA_CHANNEL_WIDTH_B_IDX 11
+#define GDC_PARAM_VECTORS_PER_LINE_IN_IDX 12
+#define GDC_PARAM_VECTORS_PER_LINE_OUT_IDX 13
+#define GDC_PARAM_VMEM_IN_DIMY_IDX 14
+#define GDC_PARAM_COMMAND_IDX 15
+#define N_GDC_PARAM 16
+
+/* Because of the packed parameter transfer max(params) == max(fragments) */
+#define N_GDC_FRAGMENTS N_GDC_PARAM
+
+/* The GDC is capable of higher internal precision than the parameter data structures */
+#define HRT_GDC_COORD_SCALE_BITS 6
+#define HRT_GDC_COORD_SCALE BIT(HRT_GDC_COORD_SCALE_BITS)
+
+typedef enum {
+ GDC_CH0_ID = 0,
+ N_GDC_CHANNEL_ID
+} gdc_channel_ID_t;
+
+typedef enum {
+ gdc_8_bpp = 8,
+ gdc_10_bpp = 10,
+ gdc_12_bpp = 12,
+ gdc_14_bpp = 14
+} gdc_bits_per_pixel_t;
+
+typedef struct gdc_scale_param_mem_s {
+ u16 params[N_GDC_PARAM];
+ u16 ipx_start_array[N_GDC_PARAM];
+ u16 ibuf_offset[N_GDC_PARAM];
+ u16 obuf_offset[N_GDC_PARAM];
+} gdc_scale_param_mem_t;
+
+typedef struct gdc_warp_param_mem_s {
+ u32 origin_x;
+ u32 origin_y;
+ u32 in_addr_offset;
+ u32 in_block_width;
+ u32 in_block_height;
+ u32 p0_x;
+ u32 p0_y;
+ u32 p1_x;
+ u32 p1_y;
+ u32 p2_x;
+ u32 p2_y;
+ u32 p3_x;
+ u32 p3_y;
+ u32 padding[3];
+} gdc_warp_param_mem_t;
+
+#endif /* __GDC_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h
new file mode 100644
index 000000000000..1c1b0667a53b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h
@@ -0,0 +1,84 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_GLOBAL_H_INCLUDED__
+#define __GP_DEVICE_GLOBAL_H_INCLUDED__
+
+#define IS_GP_DEVICE_VERSION_2
+
+#define _REG_GP_IRQ_REQ0_ADDR 0x08
+#define _REG_GP_IRQ_REQ1_ADDR 0x0C
+/* The SP sends SW interrupt info to this register */
+#define _REG_GP_IRQ_REQUEST0_ADDR _REG_GP_IRQ_REQ0_ADDR
+#define _REG_GP_IRQ_REQUEST1_ADDR _REG_GP_IRQ_REQ1_ADDR
+
+/* The SP configures FIFO switches in these registers */
+#define _REG_GP_SWITCH_IF_ADDR 0x40
+#define _REG_GP_SWITCH_GDC1_ADDR 0x44
+#define _REG_GP_SWITCH_GDC2_ADDR 0x48
+/* @ INPUT_FORMATTER_BASE -> GP_DEVICE_BASE */
+#define _REG_GP_IFMT_input_switch_lut_reg0 0x00030800
+#define _REG_GP_IFMT_input_switch_lut_reg1 0x00030804
+#define _REG_GP_IFMT_input_switch_lut_reg2 0x00030808
+#define _REG_GP_IFMT_input_switch_lut_reg3 0x0003080C
+#define _REG_GP_IFMT_input_switch_lut_reg4 0x00030810
+#define _REG_GP_IFMT_input_switch_lut_reg5 0x00030814
+#define _REG_GP_IFMT_input_switch_lut_reg6 0x00030818
+#define _REG_GP_IFMT_input_switch_lut_reg7 0x0003081C
+#define _REG_GP_IFMT_input_switch_fsync_lut 0x00030820
+#define _REG_GP_IFMT_srst 0x00030824
+#define _REG_GP_IFMT_slv_reg_srst 0x00030828
+#define _REG_GP_IFMT_input_switch_ch_id_fmt_type 0x0003082C
+
+/* @ GP_DEVICE_BASE */
+#define _REG_GP_SYNCGEN_ENABLE_ADDR 0x00090000
+#define _REG_GP_SYNCGEN_FREE_RUNNING_ADDR 0x00090004
+#define _REG_GP_SYNCGEN_PAUSE_ADDR 0x00090008
+#define _REG_GP_NR_FRAMES_ADDR 0x0009000C
+#define _REG_GP_SYNGEN_NR_PIX_ADDR 0x00090010
+#define _REG_GP_SYNGEN_NR_LINES_ADDR 0x00090014
+#define _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR 0x00090018
+#define _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR 0x0009001C
+#define _REG_GP_ISEL_SOF_ADDR 0x00090020
+#define _REG_GP_ISEL_EOF_ADDR 0x00090024
+#define _REG_GP_ISEL_SOL_ADDR 0x00090028
+#define _REG_GP_ISEL_EOL_ADDR 0x0009002C
+#define _REG_GP_ISEL_LFSR_ENABLE_ADDR 0x00090030
+#define _REG_GP_ISEL_LFSR_ENABLE_B_ADDR 0x00090034
+#define _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR 0x00090038
+#define _REG_GP_ISEL_TPG_ENABLE_ADDR 0x0009003C
+#define _REG_GP_ISEL_TPG_ENABLE_B_ADDR 0x00090040
+#define _REG_GP_ISEL_HOR_CNT_MASK_ADDR 0x00090044
+#define _REG_GP_ISEL_VER_CNT_MASK_ADDR 0x00090048
+#define _REG_GP_ISEL_XY_CNT_MASK_ADDR 0x0009004C
+#define _REG_GP_ISEL_HOR_CNT_DELTA_ADDR 0x00090050
+#define _REG_GP_ISEL_VER_CNT_DELTA_ADDR 0x00090054
+#define _REG_GP_ISEL_TPG_MODE_ADDR 0x00090058
+#define _REG_GP_ISEL_TPG_RED1_ADDR 0x0009005C
+#define _REG_GP_ISEL_TPG_GREEN1_ADDR 0x00090060
+#define _REG_GP_ISEL_TPG_BLUE1_ADDR 0x00090064
+#define _REG_GP_ISEL_TPG_RED2_ADDR 0x00090068
+#define _REG_GP_ISEL_TPG_GREEN2_ADDR 0x0009006C
+#define _REG_GP_ISEL_TPG_BLUE2_ADDR 0x00090070
+#define _REG_GP_ISEL_CH_ID_ADDR 0x00090074
+#define _REG_GP_ISEL_FMT_TYPE_ADDR 0x00090078
+#define _REG_GP_ISEL_DATA_SEL_ADDR 0x0009007C
+#define _REG_GP_ISEL_SBAND_SEL_ADDR 0x00090080
+#define _REG_GP_ISEL_SYNC_SEL_ADDR 0x00090084
+#define _REG_GP_SYNCGEN_HOR_CNT_ADDR 0x00090088
+#define _REG_GP_SYNCGEN_VER_CNT_ADDR 0x0009008C
+#define _REG_GP_SYNCGEN_FRAME_CNT_ADDR 0x00090090
+#define _REG_GP_SOFT_RESET_ADDR 0x00090094
+
+#endif /* __GP_DEVICE_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h
new file mode 100644
index 000000000000..ee636ad6c5b3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_GLOBAL_H_INCLUDED__
+#define __GP_TIMER_GLOBAL_H_INCLUDED__
+
+#include "hive_isp_css_defs.h" /*HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ */
+
+/* from gp_timer_defs.h*/
+#define GP_TIMER_COUNT_TYPE_HIGH 0
+#define GP_TIMER_COUNT_TYPE_LOW 1
+#define GP_TIMER_COUNT_TYPE_POSEDGE 2
+#define GP_TIMER_COUNT_TYPE_NEGEDGE 3
+#define GP_TIMER_COUNT_TYPE_TYPES 4
+
+/* timer - 3 is selected */
+#define GP_TIMER_SEL 3
+
+/*HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ is selected*/
+#define GP_TIMER_SIGNAL_SELECT HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ
+
+#endif /* __GP_TIMER_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h
new file mode 100644
index 000000000000..a82ca2a8cada
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_GLOBAL_H_INCLUDED__
+#define __GPIO_GLOBAL_H_INCLUDED__
+
+#define IS_GPIO_VERSION_1
+
+#include <gpio_block_defs.h>
+
+/* pqiao: following part only defines in hive_isp_css_defs.h in fpga system.
+ port it here
+*/
+
+/* GPIO pin defines */
+/*#define HIVE_GPIO_CAMERA_BOARD_RESET_PIN_NR 0
+#define HIVE_GPIO_LCD_CLOCK_SELECT_PIN_NR 7
+#define HIVE_GPIO_HDMI_CLOCK_SELECT_PIN_NR 8
+#define HIVE_GPIO_LCD_VERT_FLIP_PIN_NR 8
+#define HIVE_GPIO_LCD_HOR_FLIP_PIN_NR 9
+#define HIVE_GPIO_AS3683_GPIO_P0_PIN_NR 1
+#define HIVE_GPIO_AS3683_DATA_P1_PIN_NR 2
+#define HIVE_GPIO_AS3683_CLK_P2_PIN_NR 3
+#define HIVE_GPIO_AS3683_T1_F0_PIN_NR 4
+#define HIVE_GPIO_AS3683_SFL_F1_PIN_NR 5
+#define HIVE_GPIO_AS3683_STROBE_F2_PIN_NR 6
+#define HIVE_GPIO_MAX1577_EN1_PIN_NR 1
+#define HIVE_GPIO_MAX1577_EN2_PIN_NR 2
+#define HIVE_GPIO_MAX8685A_EN_PIN_NR 3
+#define HIVE_GPIO_MAX8685A_TRIG_PIN_NR 4*/
+
+#define HIVE_GPIO_STROBE_TRIGGER_PIN 2
+
+#endif /* __GPIO_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h
new file mode 100644
index 000000000000..e4b9daa2d062
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_GLOBAL_H_INCLUDED__
+#define __HMEM_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define IS_HMEM_VERSION_1
+
+#include "isp.h"
+
+/*
+#define ISP_HIST_ADDRESS_BITS 12
+#define ISP_HIST_ALIGNMENT 4
+#define ISP_HIST_COMP_IN_PREC 12
+#define ISP_HIST_DEPTH 1024
+#define ISP_HIST_WIDTH 24
+#define ISP_HIST_COMPONENTS 4
+*/
+#define ISP_HIST_ALIGNMENT_LOG2 2
+
+#define HMEM_SIZE_LOG2 (ISP_HIST_ADDRESS_BITS - ISP_HIST_ALIGNMENT_LOG2)
+#define HMEM_SIZE ISP_HIST_DEPTH
+
+#define HMEM_UNIT_SIZE (HMEM_SIZE / ISP_HIST_COMPONENTS)
+#define HMEM_UNIT_COUNT ISP_HIST_COMPONENTS
+
+#define HMEM_RANGE_LOG2 ISP_HIST_WIDTH
+#define HMEM_RANGE BIT(HMEM_RANGE_LOG2)
+
+typedef u32 hmem_data_t;
+
+#endif /* __HMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c
new file mode 100644
index 000000000000..d911aec24185
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c
@@ -0,0 +1,71 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "debug.h"
+
+#ifndef __INLINE_DEBUG__
+#include "debug_private.h"
+#endif /* __INLINE_DEBUG__ */
+
+#include "memory_access.h"
+
+#define __INLINE_SP__
+#include "sp.h"
+
+#include "assert_support.h"
+
+/* The address of the remote copy */
+hrt_address debug_buffer_address = (hrt_address) - 1;
+hrt_vaddress debug_buffer_ddr_address = (hrt_vaddress)-1;
+/* The local copy */
+static debug_data_t debug_data;
+debug_data_t *debug_data_ptr = &debug_data;
+
+void debug_buffer_init(const hrt_address addr)
+{
+ debug_buffer_address = addr;
+
+ debug_data.head = 0;
+ debug_data.tail = 0;
+}
+
+void debug_buffer_ddr_init(const hrt_vaddress addr)
+{
+ debug_buf_mode_t mode = DEBUG_BUFFER_MODE_LINEAR;
+ u32 enable = 1;
+ u32 head = 0;
+ u32 tail = 0;
+ /* set the ddr queue */
+ debug_buffer_ddr_address = addr;
+ mmgr_store(addr + DEBUG_DATA_BUF_MODE_DDR_ADDR,
+ &mode, sizeof(debug_buf_mode_t));
+ mmgr_store(addr + DEBUG_DATA_HEAD_DDR_ADDR,
+ &head, sizeof(uint32_t));
+ mmgr_store(addr + DEBUG_DATA_TAIL_DDR_ADDR,
+ &tail, sizeof(uint32_t));
+ mmgr_store(addr + DEBUG_DATA_ENABLE_DDR_ADDR,
+ &enable, sizeof(uint32_t));
+
+ /* set the local copy */
+ debug_data.head = 0;
+ debug_data.tail = 0;
+}
+
+void debug_buffer_setmode(const debug_buf_mode_t mode)
+{
+ assert(debug_buffer_address != ((hrt_address)-1));
+
+ sp_dmem_store_uint32(SP0_ID,
+ debug_buffer_address + DEBUG_DATA_BUF_MODE_ADDR, mode);
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h
new file mode 100644
index 000000000000..4c95eda694f7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_LOCAL_H_INCLUDED__
+#define __DEBUG_LOCAL_H_INCLUDED__
+
+#include "debug_global.h"
+
+#endif /* __DEBUG_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h
new file mode 100644
index 000000000000..8447e33d1c04
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h
@@ -0,0 +1,126 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_PRIVATE_H_INCLUDED__
+#define __DEBUG_PRIVATE_H_INCLUDED__
+
+#include "debug_public.h"
+
+#include "sp.h"
+
+#define __INLINE_ISP__
+#include "isp.h"
+
+#include "memory_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_DEBUG_C bool is_debug_buffer_empty(void)
+{
+ return (debug_data_ptr->head == debug_data_ptr->tail);
+}
+
+STORAGE_CLASS_DEBUG_C hrt_data debug_dequeue(void)
+{
+ hrt_data value = 0;
+
+ assert(debug_buffer_address != ((hrt_address) - 1));
+
+ debug_synch_queue();
+
+ if (!is_debug_buffer_empty()) {
+ value = debug_data_ptr->buf[debug_data_ptr->head];
+ debug_data_ptr->head = (debug_data_ptr->head + 1) & DEBUG_BUF_MASK;
+ sp_dmem_store_uint32(SP0_ID, debug_buffer_address + DEBUG_DATA_HEAD_ADDR,
+ debug_data_ptr->head);
+ }
+
+ return value;
+}
+
+STORAGE_CLASS_DEBUG_C void debug_synch_queue(void)
+{
+ u32 remote_tail = sp_dmem_load_uint32(SP0_ID,
+ debug_buffer_address + DEBUG_DATA_TAIL_ADDR);
+ /* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */
+ if (remote_tail > debug_data_ptr->tail) {
+ size_t delta = remote_tail - debug_data_ptr->tail;
+
+ sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ } else if (remote_tail < debug_data_ptr->tail) {
+ size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail;
+
+ sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR,
+ (void *)&debug_data_ptr->buf[0],
+ remote_tail * sizeof(uint32_t));
+ } /* else we are up to date */
+ debug_data_ptr->tail = remote_tail;
+}
+
+STORAGE_CLASS_DEBUG_C void debug_synch_queue_isp(void)
+{
+ u32 remote_tail = isp_dmem_load_uint32(ISP0_ID,
+ DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_TAIL_ADDR);
+ /* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */
+ if (remote_tail > debug_data_ptr->tail) {
+ size_t delta = remote_tail - debug_data_ptr->tail;
+
+ isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ } else if (remote_tail < debug_data_ptr->tail) {
+ size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail;
+
+ isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR,
+ (void *)&debug_data_ptr->buf[0],
+ remote_tail * sizeof(uint32_t));
+ } /* else we are up to date */
+ debug_data_ptr->tail = remote_tail;
+}
+
+STORAGE_CLASS_DEBUG_C void debug_synch_queue_ddr(void)
+{
+ u32 remote_tail;
+
+ mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_TAIL_DDR_ADDR, &remote_tail,
+ sizeof(uint32_t));
+ /* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */
+ if (remote_tail > debug_data_ptr->tail) {
+ size_t delta = remote_tail - debug_data_ptr->tail;
+
+ mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ } else if (remote_tail < debug_data_ptr->tail) {
+ size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail;
+
+ mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR,
+ (void *)&debug_data_ptr->buf[0],
+ remote_tail * sizeof(uint32_t));
+ } /* else we are up to date */
+ debug_data_ptr->tail = remote_tail;
+}
+
+#endif /* __DEBUG_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c
new file mode 100644
index 000000000000..87df1da1164e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c
@@ -0,0 +1,299 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+
+#include "dma.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_DMA__
+#include "dma_private.h"
+#endif /* __INLINE_DMA__ */
+
+void dma_get_state(const dma_ID_t ID, dma_state_t *state)
+{
+ int i;
+ hrt_data tmp;
+
+ assert(ID < N_DMA_ID);
+ assert(state);
+
+ tmp = dma_reg_load(ID, DMA_COMMAND_FSM_REG_IDX);
+ //reg [3:0] : flags error [3], stall, run, idle [0]
+ //reg [9:4] : command
+ //reg[14:10] : channel
+ //reg [23:15] : param
+ state->fsm_command_idle = tmp & 0x1;
+ state->fsm_command_run = tmp & 0x2;
+ state->fsm_command_stalling = tmp & 0x4;
+ state->fsm_command_error = tmp & 0x8;
+ state->last_command_channel = (tmp >> 10 & 0x1F);
+ state->last_command_param = (tmp >> 15 & 0x0F);
+ tmp = (tmp >> 4) & 0x3F;
+ /* state->last_command = (dma_commands_t)tmp; */
+ /* if the enumerator is made non-linear */
+ /* AM: the list below does not cover all the cases*/
+ /* and these are not correct */
+ /* therefore for just dumpinmg this command*/
+ state->last_command = tmp;
+
+ /*
+ if (tmp == 0)
+ state->last_command = DMA_COMMAND_READ;
+ if (tmp == 1)
+ state->last_command = DMA_COMMAND_WRITE;
+ if (tmp == 2)
+ state->last_command = DMA_COMMAND_SET_CHANNEL;
+ if (tmp == 3)
+ state->last_command = DMA_COMMAND_SET_PARAM;
+ if (tmp == 4)
+ state->last_command = DMA_COMMAND_READ_SPECIFIC;
+ if (tmp == 5)
+ state->last_command = DMA_COMMAND_WRITE_SPECIFIC;
+ if (tmp == 8)
+ state->last_command = DMA_COMMAND_INIT;
+ if (tmp == 12)
+ state->last_command = DMA_COMMAND_INIT_SPECIFIC;
+ if (tmp == 15)
+ state->last_command = DMA_COMMAND_RST;
+ */
+
+ /* No sub-fields, idx = 0 */
+ state->current_command = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_CMD_IDX));
+ state->current_addr_a = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_A_IDX));
+ state->current_addr_b = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_B_IDX));
+
+ tmp = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_idle = tmp & 0x1;
+ state->fsm_ctrl_run = tmp & 0x2;
+ state->fsm_ctrl_stalling = tmp & 0x4;
+ state->fsm_ctrl_error = tmp & 0x8;
+ tmp = tmp >> 4;
+ /* state->fsm_ctrl_state = (dma_ctrl_states_t)tmp; */
+ if (tmp == 0)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_IDLE;
+ if (tmp == 1)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_REQ_RCV;
+ if (tmp == 2)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_RCV;
+ if (tmp == 3)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_RCV_REQ;
+ if (tmp == 4)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_INIT;
+ state->fsm_ctrl_source_dev = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_source_addr = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_source_stride = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_source_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_source_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_source_dev = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_dest_dev = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_dest_addr = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_dest_stride = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_source_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_dest_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_dest_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_source_elems = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_dest_elems = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_extension = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+
+ tmp = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_PACK_STATE_IDX,
+ _DMA_FSM_GROUP_FSM_PACK_IDX));
+ state->pack_idle = tmp & 0x1;
+ state->pack_run = tmp & 0x2;
+ state->pack_stalling = tmp & 0x4;
+ state->pack_error = tmp & 0x8;
+ state->pack_cnt_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX,
+ _DMA_FSM_GROUP_FSM_PACK_IDX));
+ state->pack_src_cnt_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX,
+ _DMA_FSM_GROUP_FSM_PACK_IDX));
+ state->pack_dest_cnt_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX,
+ _DMA_FSM_GROUP_FSM_PACK_IDX));
+
+ tmp = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_REQ_STATE_IDX,
+ _DMA_FSM_GROUP_FSM_REQ_IDX));
+ /* state->read_state = (dma_rw_states_t)tmp; */
+ if (tmp == 0)
+ state->read_state = DMA_RW_STATE_IDLE;
+ if (tmp == 1)
+ state->read_state = DMA_RW_STATE_REQ;
+ if (tmp == 2)
+ state->read_state = DMA_RW_STATE_NEXT_LINE;
+ if (tmp == 3)
+ state->read_state = DMA_RW_STATE_UNLOCK_CHANNEL;
+ state->read_cnt_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX,
+ _DMA_FSM_GROUP_FSM_REQ_IDX));
+ state->read_cnt_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX,
+ _DMA_FSM_GROUP_FSM_REQ_IDX));
+
+ tmp = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_WR_STATE_IDX,
+ _DMA_FSM_GROUP_FSM_WR_IDX));
+ /* state->write_state = (dma_rw_states_t)tmp; */
+ if (tmp == 0)
+ state->write_state = DMA_RW_STATE_IDLE;
+ if (tmp == 1)
+ state->write_state = DMA_RW_STATE_REQ;
+ if (tmp == 2)
+ state->write_state = DMA_RW_STATE_NEXT_LINE;
+ if (tmp == 3)
+ state->write_state = DMA_RW_STATE_UNLOCK_CHANNEL;
+ state->write_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX,
+ _DMA_FSM_GROUP_FSM_WR_IDX));
+ state->write_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX,
+ _DMA_FSM_GROUP_FSM_WR_IDX));
+
+ for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) {
+ dma_port_state_t *port = &state->port_states[i];
+
+ tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(0, i));
+ port->req_cs = ((tmp & 0x1) != 0);
+ port->req_we_n = ((tmp & 0x2) != 0);
+ port->req_run = ((tmp & 0x4) != 0);
+ port->req_ack = ((tmp & 0x8) != 0);
+
+ tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(1, i));
+ port->send_cs = ((tmp & 0x1) != 0);
+ port->send_we_n = ((tmp & 0x2) != 0);
+ port->send_run = ((tmp & 0x4) != 0);
+ port->send_ack = ((tmp & 0x8) != 0);
+
+ tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(2, i));
+ if (tmp & 0x1)
+ port->fifo_state = DMA_FIFO_STATE_WILL_BE_FULL;
+ if (tmp & 0x2)
+ port->fifo_state = DMA_FIFO_STATE_FULL;
+ if (tmp & 0x4)
+ port->fifo_state = DMA_FIFO_STATE_EMPTY;
+ port->fifo_counter = tmp >> 3;
+ }
+
+ for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) {
+ dma_channel_state_t *ch = &state->channel_states[i];
+
+ ch->connection = DMA_GET_CONNECTION(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_PACKING_SETUP_PARAM)));
+ ch->sign_extend = DMA_GET_EXTENSION(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_PACKING_SETUP_PARAM)));
+ ch->height = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_HEIGHT_PARAM));
+ ch->stride_a = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_STRIDE_A_PARAM));
+ ch->elems_a = DMA_GET_ELEMENTS(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_ELEM_CROPPING_A_PARAM)));
+ ch->cropping_a = DMA_GET_CROPPING(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_ELEM_CROPPING_A_PARAM)));
+ ch->width_a = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_WIDTH_A_PARAM));
+ ch->stride_b = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_STRIDE_B_PARAM));
+ ch->elems_b = DMA_GET_ELEMENTS(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_ELEM_CROPPING_B_PARAM)));
+ ch->cropping_b = DMA_GET_CROPPING(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_ELEM_CROPPING_B_PARAM)));
+ ch->width_b = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_WIDTH_B_PARAM));
+ }
+}
+
+void
+dma_set_max_burst_size(const dma_ID_t ID, dma_connection conn,
+ uint32_t max_burst_size)
+{
+ assert(ID < N_DMA_ID);
+ assert(max_burst_size > 0);
+ dma_reg_store(ID, DMA_DEV_INFO_REG_IDX(_DMA_DEV_INTERF_MAX_BURST_IDX, conn),
+ max_burst_size - 1);
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h
new file mode 100644
index 000000000000..d7db964d5cec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h
@@ -0,0 +1,207 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_LOCAL_H_INCLUDED__
+#define __DMA_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+#include "dma_global.h"
+
+#include <defs.h> /* HRTCAT() */
+#include <bits.h> /* _hrt_get_bits() */
+#include <hive_isp_css_defs.h> /* HIVE_DMA_NUM_CHANNELS */
+#include <dma_v2_defs.h>
+
+#define _DMA_FSM_GROUP_CMD_IDX _DMA_V2_FSM_GROUP_CMD_IDX
+#define _DMA_FSM_GROUP_ADDR_A_IDX _DMA_V2_FSM_GROUP_ADDR_SRC_IDX
+#define _DMA_FSM_GROUP_ADDR_B_IDX _DMA_V2_FSM_GROUP_ADDR_DEST_IDX
+
+#define _DMA_FSM_GROUP_CMD_CTRL_IDX _DMA_V2_FSM_GROUP_CMD_CTRL_IDX
+
+#define _DMA_FSM_GROUP_FSM_CTRL_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX
+
+#define _DMA_FSM_GROUP_FSM_PACK_IDX _DMA_V2_FSM_GROUP_FSM_PACK_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_STATE_IDX _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX
+
+#define _DMA_FSM_GROUP_FSM_REQ_IDX _DMA_V2_FSM_GROUP_FSM_REQ_IDX
+#define _DMA_FSM_GROUP_FSM_REQ_STATE_IDX _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX
+#define _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX
+
+#define _DMA_FSM_GROUP_FSM_WR_IDX _DMA_V2_FSM_GROUP_FSM_WR_IDX
+#define _DMA_FSM_GROUP_FSM_WR_STATE_IDX _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX
+#define _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX
+
+#define _DMA_DEV_INTERF_MAX_BURST_IDX _DMA_V2_DEV_INTERF_MAX_BURST_IDX
+
+/*
+ * Macro's to compute the DMA parameter register indices
+ */
+#define DMA_SEL_COMP(comp) (((comp) & _hrt_ones(_DMA_V2_ADDR_SEL_COMP_BITS)) << _DMA_V2_ADDR_SEL_COMP_IDX)
+#define DMA_SEL_CH(ch) (((ch) & _hrt_ones(_DMA_V2_ADDR_SEL_CH_REG_BITS)) << _DMA_V2_ADDR_SEL_CH_REG_IDX)
+#define DMA_SEL_PARAM(param) (((param) & _hrt_ones(_DMA_V2_ADDR_SEL_PARAM_BITS)) << _DMA_V2_ADDR_SEL_PARAM_IDX)
+/* CG = Connection Group */
+#define DMA_SEL_CG_INFO(info) (((info) & _hrt_ones(_DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS)) << _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX)
+#define DMA_SEL_CG_COMP(comp) (((comp) & _hrt_ones(_DMA_V2_ADDR_SEL_GROUP_COMP_BITS)) << _DMA_V2_ADDR_SEL_GROUP_COMP_IDX)
+#define DMA_SEL_DEV_INFO(info) (((info) & _hrt_ones(_DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS)) << _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX)
+#define DMA_SEL_DEV_ID(dev) (((dev) & _hrt_ones(_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS)) << _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX)
+
+#define DMA_COMMAND_FSM_REG_IDX (DMA_SEL_COMP(_DMA_V2_SEL_FSM_CMD) >> 2)
+#define DMA_CHANNEL_PARAM_REG_IDX(ch, param) ((DMA_SEL_COMP(_DMA_V2_SEL_CH_REG) | DMA_SEL_CH(ch) | DMA_SEL_PARAM(param)) >> 2)
+#define DMA_CG_INFO_REG_IDX(info_id, comp_id) ((DMA_SEL_COMP(_DMA_V2_SEL_CONN_GROUP) | DMA_SEL_CG_INFO(info_id) | DMA_SEL_CG_COMP(comp_id)) >> 2)
+#define DMA_DEV_INFO_REG_IDX(info_id, dev_id) ((DMA_SEL_COMP(_DMA_V2_SEL_DEV_INTERF) | DMA_SEL_DEV_INFO(info_id) | DMA_SEL_DEV_ID(dev_id)) >> 2)
+#define DMA_RST_REG_IDX (DMA_SEL_COMP(_DMA_V2_SEL_RESET) >> 2)
+
+#define DMA_GET_CONNECTION(val) _hrt_get_bits(val, _DMA_V2_CONNECTION_IDX, _DMA_V2_CONNECTION_BITS)
+#define DMA_GET_EXTENSION(val) _hrt_get_bits(val, _DMA_V2_EXTENSION_IDX, _DMA_V2_EXTENSION_BITS)
+#define DMA_GET_ELEMENTS(val) _hrt_get_bits(val, _DMA_V2_ELEMENTS_IDX, _DMA_V2_ELEMENTS_BITS)
+#define DMA_GET_CROPPING(val) _hrt_get_bits(val, _DMA_V2_LEFT_CROPPING_IDX, _DMA_V2_LEFT_CROPPING_BITS)
+
+typedef enum {
+ DMA_CTRL_STATE_IDLE,
+ DMA_CTRL_STATE_REQ_RCV,
+ DMA_CTRL_STATE_RCV,
+ DMA_CTRL_STATE_RCV_REQ,
+ DMA_CTRL_STATE_INIT,
+ N_DMA_CTRL_STATES
+} dma_ctrl_states_t;
+
+typedef enum {
+ DMA_COMMAND_READ,
+ DMA_COMMAND_WRITE,
+ DMA_COMMAND_SET_CHANNEL,
+ DMA_COMMAND_SET_PARAM,
+ DMA_COMMAND_READ_SPECIFIC,
+ DMA_COMMAND_WRITE_SPECIFIC,
+ DMA_COMMAND_INIT,
+ DMA_COMMAND_INIT_SPECIFIC,
+ DMA_COMMAND_RST,
+ N_DMA_COMMANDS
+} dma_commands_t;
+
+typedef enum {
+ DMA_RW_STATE_IDLE,
+ DMA_RW_STATE_REQ,
+ DMA_RW_STATE_NEXT_LINE,
+ DMA_RW_STATE_UNLOCK_CHANNEL,
+ N_DMA_RW_STATES
+} dma_rw_states_t;
+
+typedef enum {
+ DMA_FIFO_STATE_WILL_BE_FULL,
+ DMA_FIFO_STATE_FULL,
+ DMA_FIFO_STATE_EMPTY,
+ N_DMA_FIFO_STATES
+} dma_fifo_states_t;
+
+/* typedef struct dma_state_s dma_state_t; */
+typedef struct dma_channel_state_s dma_channel_state_t;
+typedef struct dma_port_state_s dma_port_state_t;
+
+struct dma_port_state_s {
+ bool req_cs;
+ bool req_we_n;
+ bool req_run;
+ bool req_ack;
+ bool send_cs;
+ bool send_we_n;
+ bool send_run;
+ bool send_ack;
+ dma_fifo_states_t fifo_state;
+ int fifo_counter;
+};
+
+struct dma_channel_state_s {
+ int connection;
+ bool sign_extend;
+ int height;
+ int stride_a;
+ int elems_a;
+ int cropping_a;
+ int width_a;
+ int stride_b;
+ int elems_b;
+ int cropping_b;
+ int width_b;
+};
+
+struct dma_state_s {
+ bool fsm_command_idle;
+ bool fsm_command_run;
+ bool fsm_command_stalling;
+ bool fsm_command_error;
+ dma_commands_t last_command;
+ int last_command_channel;
+ int last_command_param;
+ dma_commands_t current_command;
+ int current_addr_a;
+ int current_addr_b;
+ bool fsm_ctrl_idle;
+ bool fsm_ctrl_run;
+ bool fsm_ctrl_stalling;
+ bool fsm_ctrl_error;
+ dma_ctrl_states_t fsm_ctrl_state;
+ int fsm_ctrl_source_dev;
+ int fsm_ctrl_source_addr;
+ int fsm_ctrl_source_stride;
+ int fsm_ctrl_source_width;
+ int fsm_ctrl_source_height;
+ int fsm_ctrl_pack_source_dev;
+ int fsm_ctrl_pack_dest_dev;
+ int fsm_ctrl_dest_addr;
+ int fsm_ctrl_dest_stride;
+ int fsm_ctrl_pack_source_width;
+ int fsm_ctrl_pack_dest_height;
+ int fsm_ctrl_pack_dest_width;
+ int fsm_ctrl_pack_source_elems;
+ int fsm_ctrl_pack_dest_elems;
+ int fsm_ctrl_pack_extension;
+ int pack_idle;
+ int pack_run;
+ int pack_stalling;
+ int pack_error;
+ int pack_cnt_height;
+ int pack_src_cnt_width;
+ int pack_dest_cnt_width;
+ dma_rw_states_t read_state;
+ int read_cnt_height;
+ int read_cnt_width;
+ dma_rw_states_t write_state;
+ int write_height;
+ int write_width;
+ dma_port_state_t port_states[HIVE_ISP_NUM_DMA_CONNS];
+ dma_channel_state_t channel_states[HIVE_DMA_NUM_CHANNELS];
+};
+
+#endif /* __DMA_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h
new file mode 100644
index 000000000000..ebb75da72e18
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_PRIVATE_H_INCLUDED__
+#define __DMA_PRIVATE_H_INCLUDED__
+
+#include "dma_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_DMA_C void dma_reg_store(const dma_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_DMA_ID);
+ assert(DMA_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(DMA_BASE[ID] + reg * sizeof(hrt_data), value);
+}
+
+STORAGE_CLASS_DMA_C hrt_data dma_reg_load(const dma_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_DMA_ID);
+ assert(DMA_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(DMA_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __DMA_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c
new file mode 100644
index 000000000000..777670948d6f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c
@@ -0,0 +1,19 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "event_fifo.h"
+
+#ifndef __INLINE_EVENT__
+#include "event_fifo_private.h"
+#endif /* __INLINE_EVENT__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h
new file mode 100644
index 000000000000..39a9dd697096
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h
@@ -0,0 +1,61 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _EVENT_FIFO_LOCAL_H
+#define _EVENT_FIFO_LOCAL_H
+
+/*
+ * All events come from connections mapped on the system
+ * bus but do not use a global IRQ
+ */
+#include "event_fifo_global.h"
+
+typedef enum {
+ SP0_EVENT_ID,
+ ISP0_EVENT_ID,
+ STR2MIPI_EVENT_ID,
+ N_EVENT_ID
+} event_ID_t;
+
+#define EVENT_QUERY_BIT 0
+
+/* Events are read from FIFO */
+static const hrt_address event_source_addr[N_EVENT_ID] = {
+ 0x0000000000380000ULL,
+ 0x0000000000380004ULL,
+ 0xffffffffffffffffULL
+};
+
+/* Read from FIFO are blocking, query data availability */
+static const hrt_address event_source_query_addr[N_EVENT_ID] = {
+ 0x0000000000380010ULL,
+ 0x0000000000380014ULL,
+ 0xffffffffffffffffULL
+};
+
+/* Events are written to FIFO */
+static const hrt_address event_sink_addr[N_EVENT_ID] = {
+ 0x0000000000380008ULL,
+ 0x000000000038000CULL,
+ 0x0000000000090104ULL
+};
+
+/* Writes to FIFO are blocking, query data space */
+static const hrt_address event_sink_query_addr[N_EVENT_ID] = {
+ 0x0000000000380018ULL,
+ 0x000000000038001CULL,
+ 0x000000000009010CULL
+};
+
+#endif /* _EVENT_FIFO_LOCAL_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h
new file mode 100644
index 000000000000..3b6cc27ecb28
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h
@@ -0,0 +1,77 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __EVENT_FIFO_PRIVATE_H
+#define __EVENT_FIFO_PRIVATE_H
+
+#include "event_fifo_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+#include <bits.h> /* _hrt_get_bits() */
+
+STORAGE_CLASS_EVENT_C void event_wait_for(const event_ID_t ID)
+{
+ assert(ID < N_EVENT_ID);
+ assert(event_source_addr[ID] != ((hrt_address) - 1));
+ (void)ia_css_device_load_uint32(event_source_addr[ID]);
+ return;
+}
+
+STORAGE_CLASS_EVENT_C void cnd_event_wait_for(const event_ID_t ID,
+ const bool cnd)
+{
+ if (cnd) {
+ event_wait_for(ID);
+ }
+}
+
+STORAGE_CLASS_EVENT_C hrt_data event_receive_token(const event_ID_t ID)
+{
+ assert(ID < N_EVENT_ID);
+ assert(event_source_addr[ID] != ((hrt_address) - 1));
+ return ia_css_device_load_uint32(event_source_addr[ID]);
+}
+
+STORAGE_CLASS_EVENT_C void event_send_token(const event_ID_t ID,
+ const hrt_data token)
+{
+ assert(ID < N_EVENT_ID);
+ assert(event_sink_addr[ID] != ((hrt_address) - 1));
+ ia_css_device_store_uint32(event_sink_addr[ID], token);
+}
+
+STORAGE_CLASS_EVENT_C bool is_event_pending(const event_ID_t ID)
+{
+ hrt_data value;
+
+ assert(ID < N_EVENT_ID);
+ assert(event_source_query_addr[ID] != ((hrt_address) - 1));
+ value = ia_css_device_load_uint32(event_source_query_addr[ID]);
+ return !_hrt_get_bit(value, EVENT_QUERY_BIT);
+}
+
+STORAGE_CLASS_EVENT_C bool can_event_send_token(const event_ID_t ID)
+{
+ hrt_data value;
+
+ assert(ID < N_EVENT_ID);
+ assert(event_sink_query_addr[ID] != ((hrt_address) - 1));
+ value = ia_css_device_load_uint32(event_sink_query_addr[ID]);
+ return !_hrt_get_bit(value, EVENT_QUERY_BIT);
+}
+
+#endif /* __EVENT_FIFO_PRIVATE_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c
new file mode 100644
index 000000000000..82f7c43bcb0a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c
@@ -0,0 +1,569 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "fifo_monitor.h"
+
+#include <type_support.h>
+#include "device_access.h"
+
+#include <bits.h>
+
+#include "gp_device.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_FIFO_MONITOR__
+#define STORAGE_CLASS_FIFO_MONITOR_DATA static const
+#else
+#define STORAGE_CLASS_FIFO_MONITOR_DATA const
+#endif /* __INLINE_FIFO_MONITOR__ */
+
+STORAGE_CLASS_FIFO_MONITOR_DATA unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH] = {
+ _REG_GP_SWITCH_IF_ADDR,
+ _REG_GP_SWITCH_GDC1_ADDR,
+ _REG_GP_SWITCH_GDC2_ADDR
+};
+
+#ifndef __INLINE_FIFO_MONITOR__
+#include "fifo_monitor_private.h"
+#endif /* __INLINE_FIFO_MONITOR__ */
+
+static inline bool fifo_monitor_status_valid(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id);
+
+static inline bool fifo_monitor_status_accept(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id);
+
+void fifo_channel_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_channel_t channel_id,
+ fifo_channel_state_t *state)
+{
+ assert(channel_id < N_FIFO_CHANNEL);
+ assert(state);
+
+ switch (channel_id) {
+ case FIFO_CHANNEL_ISP0_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_SP); /* ISP_STR_MON_PORT_ISP2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_SP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_ISP); /* ISP_STR_MON_PORT_SP2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_ISP);
+ break;
+ case FIFO_CHANNEL_SP0_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_ISP); /* ISP_STR_MON_PORT_SP2ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_ISP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_SP); /* ISP_STR_MON_PORT_ISP2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_SP);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_IF0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_A); /* ISP_STR_MON_PORT_ISP2PIFA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_IF0_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_A); /* ISP_STR_MON_PORT_PIFA2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_IF1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_B); /* ISP_STR_MON_PORT_ISP2PIFA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_IF1_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B); /* ISP_STR_MON_PORT_PIFB2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_DMA0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_DMA); /* ISP_STR_MON_PORT_ISP2DMA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_DMA);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_ISP); /* MOD_STR_MON_PORT_ISP2DMA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_ISP);
+ break;
+ case FIFO_CHANNEL_DMA0_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2ISP); /* MOD_STR_MON_PORT_DMA2ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2ISP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_DMA); /* ISP_STR_MON_PORT_DMA2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_DMA);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_GDC0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GDC); /* ISP_STR_MON_PORT_ISP2GDC1 */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GDC);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_GDC); /* MOD_STR_MON_PORT_CELLS2GDC1 */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_GDC);
+ break;
+ case FIFO_CHANNEL_GDC0_TO_ISP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_GDC); /* MOD_STR_MON_PORT_GDC12CELLS */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_GDC);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GDC); /* ISP_STR_MON_PORT_GDC12ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GDC);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_GDC1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_ISP2GDC2);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_ISP2GDC2);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ break;
+ case FIFO_CHANNEL_GDC1_TO_ISP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_GDC22ISP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_GDC22ISP);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_HOST0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GPD); /* ISP_STR_MON_PORT_ISP2GPD */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GPD);
+ {
+ hrt_data value = ia_css_device_load_uint32(0x0000000000380014ULL);
+
+ state->fifo_valid = !_hrt_get_bit(value, 0);
+ state->sink_accept = false; /* no monitor connected */
+ }
+ break;
+ case FIFO_CHANNEL_HOST0_TO_ISP0: {
+ hrt_data value = ia_css_device_load_uint32(0x000000000038001CULL);
+
+ state->fifo_valid = false; /* no monitor connected */
+ state->sink_accept = !_hrt_get_bit(value, 0);
+ }
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GPD); /* ISP_STR_MON_PORT_FA2ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GPD);
+ break;
+ case FIFO_CHANNEL_SP0_TO_IF0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_A); /* SP_STR_MON_PORT_SP2PIFA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_IF0_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_PIF_A); /* SP_STR_MON_PORT_PIFA2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_SP0_TO_IF1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_B); /* SP_STR_MON_PORT_SP2PIFB */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_IF1_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B); /* SP_STR_MON_PORT_PIFB2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_SP0_TO_IF2:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_SIF); /* SP_STR_MON_PORT_SP2SIF */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_SIF);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_SIF); /* MOD_STR_MON_PORT_SP2SIF */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_SIF);
+ break;
+ case FIFO_CHANNEL_IF2_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_SIF); /* MOD_STR_MON_PORT_SIF2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_SIF);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_SIF); /* SP_STR_MON_PORT_SIF2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_SIF);
+ break;
+ case FIFO_CHANNEL_SP0_TO_DMA0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_DMA); /* SP_STR_MON_PORT_SP2DMA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_DMA);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_SP); /* MOD_STR_MON_PORT_SP2DMA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_SP);
+ break;
+ case FIFO_CHANNEL_DMA0_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2SP); /* MOD_STR_MON_PORT_DMA2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2SP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_DMA); /* SP_STR_MON_PORT_DMA2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_DMA);
+ break;
+ case FIFO_CHANNEL_SP0_TO_GDC0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC1);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC1);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC1);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC1);
+ break;
+ case FIFO_CHANNEL_GDC0_TO_SP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC12CELLS);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC12CELLS);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC12SP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC12SP);
+ break;
+ case FIFO_CHANNEL_SP0_TO_GDC1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC2);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC2);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ break;
+ case FIFO_CHANNEL_GDC1_TO_SP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC22SP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC22SP);
+ break;
+ case FIFO_CHANNEL_SP0_TO_HOST0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_GPD); /* SP_STR_MON_PORT_SP2GPD */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_GPD);
+ {
+ hrt_data value = ia_css_device_load_uint32(0x0000000000380010ULL);
+
+ state->fifo_valid = !_hrt_get_bit(value, 0);
+ state->sink_accept = false; /* no monitor connected */
+ }
+ break;
+ case FIFO_CHANNEL_HOST0_TO_SP0: {
+ hrt_data value = ia_css_device_load_uint32(0x0000000000380018ULL);
+
+ state->fifo_valid = false; /* no monitor connected */
+ state->sink_accept = !_hrt_get_bit(value, 0);
+ }
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_GPD); /* SP_STR_MON_PORT_FA2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_GPD);
+ break;
+ case FIFO_CHANNEL_SP0_TO_STREAM2MEM0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_SP2MC */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_MC);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_SP2MC */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_MC);
+ break;
+ case FIFO_CHANNEL_STREAM2MEM0_TO_SP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_MC2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_MC);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_MC2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_MC);
+ break;
+ case FIFO_CHANNEL_SP0_TO_INPUT_SYSTEM0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SP2ISYS);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SP2ISYS);
+ state->fifo_valid = false;
+ state->sink_accept = false;
+ break;
+ case FIFO_CHANNEL_INPUT_SYSTEM0_TO_SP0:
+ state->fifo_valid = false;
+ state->sink_accept = false;
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_ISYS2SP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_ISYS2SP);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ return;
+}
+
+void fifo_switch_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ fifo_switch_state_t *state)
+{
+ hrt_data data = (hrt_data)-1;
+
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(switch_id < N_FIFO_SWITCH);
+ assert(state);
+
+ (void)ID;
+
+ data = gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
+
+ state->is_none = (data == HIVE_ISP_CSS_STREAM_SWITCH_NONE);
+ state->is_sp = (data == HIVE_ISP_CSS_STREAM_SWITCH_SP);
+ state->is_isp = (data == HIVE_ISP_CSS_STREAM_SWITCH_ISP);
+
+ return;
+}
+
+void fifo_monitor_get_state(
+ const fifo_monitor_ID_t ID,
+ fifo_monitor_state_t *state)
+{
+ fifo_channel_t ch_id;
+ fifo_switch_t sw_id;
+
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(state);
+
+ for (ch_id = 0; ch_id < N_FIFO_CHANNEL; ch_id++) {
+ fifo_channel_get_state(ID, ch_id,
+ &state->fifo_channels[ch_id]);
+ }
+
+ for (sw_id = 0; sw_id < N_FIFO_SWITCH; sw_id++) {
+ fifo_switch_get_state(ID, sw_id,
+ &state->fifo_switches[sw_id]);
+ }
+ return;
+}
+
+static inline bool fifo_monitor_status_valid(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id)
+{
+ hrt_data data = fifo_monitor_reg_load(ID, reg);
+
+ return (data >> (((port_id * 2) + _hive_str_mon_valid_offset))) & 0x1;
+}
+
+static inline bool fifo_monitor_status_accept(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id)
+{
+ hrt_data data = fifo_monitor_reg_load(ID, reg);
+
+ return (data >> (((port_id * 2) + _hive_str_mon_accept_offset))) & 0x1;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h
new file mode 100644
index 000000000000..a557ff8a416f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h
@@ -0,0 +1,99 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_LOCAL_H_INCLUDED__
+#define __FIFO_MONITOR_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+#include "fifo_monitor_global.h"
+
+#include "hive_isp_css_defs.h" /* ISP_STR_MON_PORT_SND_SP, ... */
+
+#define _hive_str_mon_valid_offset 0
+#define _hive_str_mon_accept_offset 1
+
+#define FIFO_CHANNEL_SP_VALID_MASK 0x55555555
+#define FIFO_CHANNEL_SP_VALID_B_MASK 0x00000055
+#define FIFO_CHANNEL_ISP_VALID_MASK 0x15555555
+#define FIFO_CHANNEL_MOD_VALID_MASK 0x55555555
+
+typedef enum fifo_switch {
+ FIFO_SWITCH_IF,
+ FIFO_SWITCH_GDC0,
+ FIFO_SWITCH_GDC1,
+ N_FIFO_SWITCH
+} fifo_switch_t;
+
+typedef enum fifo_channel {
+ FIFO_CHANNEL_ISP0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_IF0,
+ FIFO_CHANNEL_IF0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_IF1,
+ FIFO_CHANNEL_IF1_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_DMA0,
+ FIFO_CHANNEL_DMA0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_GDC0,
+ FIFO_CHANNEL_GDC0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_GDC1,
+ FIFO_CHANNEL_GDC1_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_HOST0,
+ FIFO_CHANNEL_HOST0_TO_ISP0,
+ FIFO_CHANNEL_SP0_TO_IF0,
+ FIFO_CHANNEL_IF0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_IF1,
+ FIFO_CHANNEL_IF1_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_IF2,
+ FIFO_CHANNEL_IF2_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_DMA0,
+ FIFO_CHANNEL_DMA0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_GDC0,
+ FIFO_CHANNEL_GDC0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_GDC1,
+ FIFO_CHANNEL_GDC1_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_HOST0,
+ FIFO_CHANNEL_HOST0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_STREAM2MEM0,
+ FIFO_CHANNEL_STREAM2MEM0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_INPUT_SYSTEM0,
+ FIFO_CHANNEL_INPUT_SYSTEM0_TO_SP0,
+ /*
+ * No clue what this is
+ *
+ FIFO_CHANNEL_SP0_TO_IRQ0,
+ FIFO_CHANNEL_IRQ0_TO_SP0,
+ */
+ N_FIFO_CHANNEL
+} fifo_channel_t;
+
+struct fifo_channel_state_s {
+ bool src_valid;
+ bool fifo_accept;
+ bool fifo_valid;
+ bool sink_accept;
+};
+
+/* The switch is tri-state */
+struct fifo_switch_state_s {
+ bool is_none;
+ bool is_isp;
+ bool is_sp;
+};
+
+struct fifo_monitor_state_s {
+ struct fifo_channel_state_s fifo_channels[N_FIFO_CHANNEL];
+ struct fifo_switch_state_s fifo_switches[N_FIFO_SWITCH];
+};
+
+#endif /* __FIFO_MONITOR_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h
new file mode 100644
index 000000000000..abaef8672ae2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h
@@ -0,0 +1,80 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_PRIVATE_H_INCLUDED__
+#define __FIFO_MONITOR_PRIVATE_H_INCLUDED__
+
+#include "fifo_monitor_public.h"
+
+#define __INLINE_GP_DEVICE__
+#include "gp_device.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+#ifdef __INLINE_FIFO_MONITOR__
+extern const unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH];
+#endif
+
+STORAGE_CLASS_FIFO_MONITOR_C void fifo_switch_set(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ const hrt_data sel)
+{
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address) - 1);
+ assert(switch_id < N_FIFO_SWITCH);
+ (void)ID;
+
+ gp_device_reg_store(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id], sel);
+
+ return;
+}
+
+STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_switch_get(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id)
+{
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address) - 1);
+ assert(switch_id < N_FIFO_SWITCH);
+ (void)ID;
+
+ return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
+}
+
+STORAGE_CLASS_FIFO_MONITOR_C void fifo_monitor_reg_store(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(FIFO_MONITOR_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+ return;
+}
+
+STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_monitor_reg_load(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+#endif /* __FIFO_MONITOR_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c
new file mode 100644
index 000000000000..65c5296163dd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c
@@ -0,0 +1,125 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* The name "gdc.h is already taken" */
+#include "gdc_device.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+/*
+ * Local function declarations
+ */
+static inline void gdc_reg_store(
+ const gdc_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+static inline hrt_data gdc_reg_load(
+ const gdc_ID_t ID,
+ const unsigned int reg);
+
+#ifndef __INLINE_GDC__
+#include "gdc_private.h"
+#endif /* __INLINE_GDC__ */
+
+/*
+ * Exported function implementations
+ */
+void gdc_lut_store(
+ const gdc_ID_t ID,
+ const int data[4][HRT_GDC_N])
+{
+ unsigned int i, lut_offset = HRT_GDC_LUT_IDX;
+
+ assert(ID < N_GDC_ID);
+ assert(HRT_GDC_LUT_COEFF_OFFSET <= (4 * sizeof(hrt_data)));
+
+ for (i = 0; i < HRT_GDC_N; i++) {
+ hrt_data entry_0 = data[0][i] & HRT_GDC_BCI_COEF_MASK;
+ hrt_data entry_1 = data[1][i] & HRT_GDC_BCI_COEF_MASK;
+ hrt_data entry_2 = data[2][i] & HRT_GDC_BCI_COEF_MASK;
+ hrt_data entry_3 = data[3][i] & HRT_GDC_BCI_COEF_MASK;
+
+ hrt_data word_0 = entry_0 |
+ (entry_1 << HRT_GDC_LUT_COEFF_OFFSET);
+ hrt_data word_1 = entry_2 |
+ (entry_3 << HRT_GDC_LUT_COEFF_OFFSET);
+
+ gdc_reg_store(ID, lut_offset++, word_0);
+ gdc_reg_store(ID, lut_offset++, word_1);
+ }
+ return;
+}
+
+/*
+ * Input LUT format:
+ * c0[0-1023], c1[0-1023], c2[0-1023] c3[0-1023]
+ *
+ * Output LUT format (interleaved):
+ * c0[0], c1[0], c2[0], c3[0], c0[1], c1[1], c2[1], c3[1], ....
+ * c0[1023], c1[1023], c2[1023], c3[1023]
+ *
+ * The first format needs c0[0], c1[0] (which are 1024 words apart)
+ * to program gdc LUT registers. This makes it difficult to do piecemeal
+ * reads in SP side gdc_lut_store
+ *
+ * Interleaved format allows use of contiguous bytes to store into
+ * gdc LUT registers.
+ *
+ * See gdc_lut_store() definition in host/gdc.c vs sp/gdc_private.h
+ *
+ */
+void gdc_lut_convert_to_isp_format(const int in_lut[4][HRT_GDC_N],
+ int out_lut[4][HRT_GDC_N])
+{
+ unsigned int i;
+ int *out = (int *)out_lut;
+
+ for (i = 0; i < HRT_GDC_N; i++) {
+ out[0] = in_lut[0][i];
+ out[1] = in_lut[1][i];
+ out[2] = in_lut[2][i];
+ out[3] = in_lut[3][i];
+ out += 4;
+ }
+}
+
+int gdc_get_unity(
+ const gdc_ID_t ID)
+{
+ assert(ID < N_GDC_ID);
+ (void)ID;
+ return (int)(1UL << HRT_GDC_FRAC_BITS);
+}
+
+/*
+ * Local function implementations
+ */
+static inline void gdc_reg_store(
+ const gdc_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ ia_css_device_store_uint32(GDC_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+static inline hrt_data gdc_reg_load(
+ const gdc_ID_t ID,
+ const unsigned int reg)
+{
+ return ia_css_device_load_uint32(GDC_BASE[ID] + reg * sizeof(hrt_data));
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h
new file mode 100644
index 000000000000..0c6de867e012
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_LOCAL_H_INCLUDED__
+#define __GDC_LOCAL_H_INCLUDED__
+
+#include "gdc_global.h"
+
+#endif /* __GDC_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h
new file mode 100644
index 000000000000..f7dec75adf78
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_PRIVATE_H_INCLUDED__
+#define __GDC_PRIVATE_H_INCLUDED__
+
+#include "gdc_public.h"
+
+#endif /* __GDC_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c
new file mode 100644
index 000000000000..5f20ac0b492e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c
@@ -0,0 +1,108 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "assert_support.h"
+#include "gp_device.h"
+
+#ifndef __INLINE_GP_DEVICE__
+#include "gp_device_private.h"
+#endif /* __INLINE_GP_DEVICE__ */
+
+void gp_device_get_state(
+ const gp_device_ID_t ID,
+ gp_device_state_t *state)
+{
+ assert(ID < N_GP_DEVICE_ID);
+ assert(state);
+
+ state->syncgen_enable = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_ENABLE_ADDR);
+ state->syncgen_free_running = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_FREE_RUNNING_ADDR);
+ state->syncgen_pause = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_PAUSE_ADDR);
+ state->nr_frames = gp_device_reg_load(ID,
+ _REG_GP_NR_FRAMES_ADDR);
+ state->syngen_nr_pix = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_NR_PIX_ADDR);
+ state->syngen_nr_pix = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_NR_PIX_ADDR);
+ state->syngen_nr_lines = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_NR_LINES_ADDR);
+ state->syngen_hblank_cycles = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR);
+ state->syngen_vblank_cycles = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR);
+ state->isel_sof = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SOF_ADDR);
+ state->isel_eof = gp_device_reg_load(ID,
+ _REG_GP_ISEL_EOF_ADDR);
+ state->isel_sol = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SOL_ADDR);
+ state->isel_eol = gp_device_reg_load(ID,
+ _REG_GP_ISEL_EOL_ADDR);
+ state->isel_lfsr_enable = gp_device_reg_load(ID,
+ _REG_GP_ISEL_LFSR_ENABLE_ADDR);
+ state->isel_lfsr_enable_b = gp_device_reg_load(ID,
+ _REG_GP_ISEL_LFSR_ENABLE_B_ADDR);
+ state->isel_lfsr_reset_value = gp_device_reg_load(ID,
+ _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR);
+ state->isel_tpg_enable = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_ENABLE_ADDR);
+ state->isel_tpg_enable_b = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_ENABLE_B_ADDR);
+ state->isel_hor_cnt_mask = gp_device_reg_load(ID,
+ _REG_GP_ISEL_HOR_CNT_MASK_ADDR);
+ state->isel_ver_cnt_mask = gp_device_reg_load(ID,
+ _REG_GP_ISEL_VER_CNT_MASK_ADDR);
+ state->isel_xy_cnt_mask = gp_device_reg_load(ID,
+ _REG_GP_ISEL_XY_CNT_MASK_ADDR);
+ state->isel_hor_cnt_delta = gp_device_reg_load(ID,
+ _REG_GP_ISEL_HOR_CNT_DELTA_ADDR);
+ state->isel_ver_cnt_delta = gp_device_reg_load(ID,
+ _REG_GP_ISEL_VER_CNT_DELTA_ADDR);
+ state->isel_tpg_mode = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_MODE_ADDR);
+ state->isel_tpg_red1 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_RED1_ADDR);
+ state->isel_tpg_green1 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_GREEN1_ADDR);
+ state->isel_tpg_blue1 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_BLUE1_ADDR);
+ state->isel_tpg_red2 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_RED2_ADDR);
+ state->isel_tpg_green2 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_GREEN2_ADDR);
+ state->isel_tpg_blue2 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_BLUE2_ADDR);
+ state->isel_ch_id = gp_device_reg_load(ID,
+ _REG_GP_ISEL_CH_ID_ADDR);
+ state->isel_fmt_type = gp_device_reg_load(ID,
+ _REG_GP_ISEL_FMT_TYPE_ADDR);
+ state->isel_data_sel = gp_device_reg_load(ID,
+ _REG_GP_ISEL_DATA_SEL_ADDR);
+ state->isel_sband_sel = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SBAND_SEL_ADDR);
+ state->isel_sync_sel = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SYNC_SEL_ADDR);
+ state->syncgen_hor_cnt = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_HOR_CNT_ADDR);
+ state->syncgen_ver_cnt = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_VER_CNT_ADDR);
+ state->syncgen_frame_cnt = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_FRAME_CNT_ADDR);
+ state->soft_reset = gp_device_reg_load(ID,
+ _REG_GP_SOFT_RESET_ADDR);
+ return;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h
new file mode 100644
index 000000000000..113d5ed32d42
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h
@@ -0,0 +1,143 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_LOCAL_H_INCLUDED__
+#define __GP_DEVICE_LOCAL_H_INCLUDED__
+
+#include "gp_device_global.h"
+
+/* @ GP_REGS_BASE -> GP_DEVICE_BASE */
+#define _REG_GP_SDRAM_WAKEUP_ADDR 0x00
+#define _REG_GP_IDLE_ADDR 0x04
+/* #define _REG_GP_IRQ_REQ0_ADDR 0x08 */
+/* #define _REG_GP_IRQ_REQ1_ADDR 0x0C */
+#define _REG_GP_SP_STREAM_STAT_ADDR 0x10
+#define _REG_GP_SP_STREAM_STAT_B_ADDR 0x14
+#define _REG_GP_ISP_STREAM_STAT_ADDR 0x18
+#define _REG_GP_MOD_STREAM_STAT_ADDR 0x1C
+#define _REG_GP_SP_STREAM_STAT_IRQ_COND_ADDR 0x20
+#define _REG_GP_SP_STREAM_STAT_B_IRQ_COND_ADDR 0x24
+#define _REG_GP_ISP_STREAM_STAT_IRQ_COND_ADDR 0x28
+#define _REG_GP_MOD_STREAM_STAT_IRQ_COND_ADDR 0x2C
+#define _REG_GP_SP_STREAM_STAT_IRQ_ENABLE_ADDR 0x30
+#define _REG_GP_SP_STREAM_STAT_B_IRQ_ENABLE_ADDR 0x34
+#define _REG_GP_ISP_STREAM_STAT_IRQ_ENABLE_ADDR 0x38
+#define _REG_GP_MOD_STREAM_STAT_IRQ_ENABLE_ADDR 0x3C
+/*
+#define _REG_GP_SWITCH_IF_ADDR 0x40
+#define _REG_GP_SWITCH_GDC1_ADDR 0x44
+#define _REG_GP_SWITCH_GDC2_ADDR 0x48
+*/
+#define _REG_GP_SLV_REG_RST_ADDR 0x50
+#define _REG_GP_SWITCH_ISYS2401_ADDR 0x54
+
+/* @ INPUT_FORMATTER_BASE -> GP_DEVICE_BASE */
+/*
+#define _REG_GP_IFMT_input_switch_lut_reg0 0x00030800
+#define _REG_GP_IFMT_input_switch_lut_reg1 0x00030804
+#define _REG_GP_IFMT_input_switch_lut_reg2 0x00030808
+#define _REG_GP_IFMT_input_switch_lut_reg3 0x0003080C
+#define _REG_GP_IFMT_input_switch_lut_reg4 0x00030810
+#define _REG_GP_IFMT_input_switch_lut_reg5 0x00030814
+#define _REG_GP_IFMT_input_switch_lut_reg6 0x00030818
+#define _REG_GP_IFMT_input_switch_lut_reg7 0x0003081C
+#define _REG_GP_IFMT_input_switch_fsync_lut 0x00030820
+#define _REG_GP_IFMT_srst 0x00030824
+#define _REG_GP_IFMT_slv_reg_srst 0x00030828
+#define _REG_GP_IFMT_input_switch_ch_id_fmt_type 0x0003082C
+*/
+/* @ GP_DEVICE_BASE */
+/*
+#define _REG_GP_SYNCGEN_ENABLE_ADDR 0x00090000
+#define _REG_GP_SYNCGEN_FREE_RUNNING_ADDR 0x00090004
+#define _REG_GP_SYNCGEN_PAUSE_ADDR 0x00090008
+#define _REG_GP_NR_FRAMES_ADDR 0x0009000C
+#define _REG_GP_SYNGEN_NR_PIX_ADDR 0x00090010
+#define _REG_GP_SYNGEN_NR_LINES_ADDR 0x00090014
+#define _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR 0x00090018
+#define _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR 0x0009001C
+#define _REG_GP_ISEL_SOF_ADDR 0x00090020
+#define _REG_GP_ISEL_EOF_ADDR 0x00090024
+#define _REG_GP_ISEL_SOL_ADDR 0x00090028
+#define _REG_GP_ISEL_EOL_ADDR 0x0009002C
+#define _REG_GP_ISEL_LFSR_ENABLE_ADDR 0x00090030
+#define _REG_GP_ISEL_LFSR_ENABLE_B_ADDR 0x00090034
+#define _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR 0x00090038
+#define _REG_GP_ISEL_TPG_ENABLE_ADDR 0x0009003C
+#define _REG_GP_ISEL_TPG_ENABLE_B_ADDR 0x00090040
+#define _REG_GP_ISEL_HOR_CNT_MASK_ADDR 0x00090044
+#define _REG_GP_ISEL_VER_CNT_MASK_ADDR 0x00090048
+#define _REG_GP_ISEL_XY_CNT_MASK_ADDR 0x0009004C
+#define _REG_GP_ISEL_HOR_CNT_DELTA_ADDR 0x00090050
+#define _REG_GP_ISEL_VER_CNT_DELTA_ADDR 0x00090054
+#define _REG_GP_ISEL_TPG_MODE_ADDR 0x00090058
+#define _REG_GP_ISEL_TPG_RED1_ADDR 0x0009005C
+#define _REG_GP_ISEL_TPG_GREEN1_ADDR 0x00090060
+#define _REG_GP_ISEL_TPG_BLUE1_ADDR 0x00090064
+#define _REG_GP_ISEL_TPG_RED2_ADDR 0x00090068
+#define _REG_GP_ISEL_TPG_GREEN2_ADDR 0x0009006C
+#define _REG_GP_ISEL_TPG_BLUE2_ADDR 0x00090070
+#define _REG_GP_ISEL_CH_ID_ADDR 0x00090074
+#define _REG_GP_ISEL_FMT_TYPE_ADDR 0x00090078
+#define _REG_GP_ISEL_DATA_SEL_ADDR 0x0009007C
+#define _REG_GP_ISEL_SBAND_SEL_ADDR 0x00090080
+#define _REG_GP_ISEL_SYNC_SEL_ADDR 0x00090084
+#define _REG_GP_SYNCGEN_HOR_CNT_ADDR 0x00090088
+#define _REG_GP_SYNCGEN_VER_CNT_ADDR 0x0009008C
+#define _REG_GP_SYNCGEN_FRAME_CNT_ADDR 0x00090090
+#define _REG_GP_SOFT_RESET_ADDR 0x00090094
+*/
+
+struct gp_device_state_s {
+ int syncgen_enable;
+ int syncgen_free_running;
+ int syncgen_pause;
+ int nr_frames;
+ int syngen_nr_pix;
+ int syngen_nr_lines;
+ int syngen_hblank_cycles;
+ int syngen_vblank_cycles;
+ int isel_sof;
+ int isel_eof;
+ int isel_sol;
+ int isel_eol;
+ int isel_lfsr_enable;
+ int isel_lfsr_enable_b;
+ int isel_lfsr_reset_value;
+ int isel_tpg_enable;
+ int isel_tpg_enable_b;
+ int isel_hor_cnt_mask;
+ int isel_ver_cnt_mask;
+ int isel_xy_cnt_mask;
+ int isel_hor_cnt_delta;
+ int isel_ver_cnt_delta;
+ int isel_tpg_mode;
+ int isel_tpg_red1;
+ int isel_tpg_green1;
+ int isel_tpg_blue1;
+ int isel_tpg_red2;
+ int isel_tpg_green2;
+ int isel_tpg_blue2;
+ int isel_ch_id;
+ int isel_fmt_type;
+ int isel_data_sel;
+ int isel_sband_sel;
+ int isel_sync_sel;
+ int syncgen_hor_cnt;
+ int syncgen_ver_cnt;
+ int syncgen_frame_cnt;
+ int soft_reset;
+};
+
+#endif /* __GP_DEVICE_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h
new file mode 100644
index 000000000000..cdc1b12a9e8a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_PRIVATE_H_INCLUDED__
+#define __GP_DEVICE_PRIVATE_H_INCLUDED__
+
+#include "gp_device_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_GP_DEVICE_C void gp_device_reg_store(
+ const gp_device_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value)
+{
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address) - 1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ ia_css_device_store_uint32(GP_DEVICE_BASE[ID] + reg_addr, value);
+ return;
+}
+
+STORAGE_CLASS_GP_DEVICE_C hrt_data gp_device_reg_load(
+ const gp_device_ID_t ID,
+ const hrt_address reg_addr)
+{
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr);
+}
+
+#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c
new file mode 100644
index 000000000000..4a856f1f14bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c
@@ -0,0 +1,70 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h> /*uint32_t */
+#include "gp_timer.h" /*system_local.h,
+ gp_timer_public.h*/
+
+#ifndef __INLINE_GP_TIMER__
+#include "gp_timer_private.h" /*device_access.h*/
+#endif /* __INLINE_GP_TIMER__ */
+#include "system_local.h"
+
+/* FIXME: not sure if reg_load(), reg_store() should be API.
+ */
+static uint32_t
+gp_timer_reg_load(uint32_t reg);
+
+static void
+gp_timer_reg_store(u32 reg, uint32_t value);
+
+static uint32_t
+gp_timer_reg_load(uint32_t reg)
+{
+ return ia_css_device_load_uint32(
+ GP_TIMER_BASE +
+ (reg * sizeof(uint32_t)));
+}
+
+static void
+gp_timer_reg_store(u32 reg, uint32_t value)
+{
+ ia_css_device_store_uint32((GP_TIMER_BASE +
+ (reg * sizeof(uint32_t))),
+ value);
+}
+
+void gp_timer_init(gp_timer_ID_t ID)
+{
+ /* set_overall_enable*/
+ gp_timer_reg_store(_REG_GP_TIMER_OVERALL_ENABLE, 1);
+
+ /*set enable*/
+ gp_timer_reg_store(_REG_GP_TIMER_ENABLE_ID(ID), 1);
+
+ /* set signal select */
+ gp_timer_reg_store(_REG_GP_TIMER_SIGNAL_SELECT_ID(ID), GP_TIMER_SIGNAL_SELECT);
+
+ /*set count type */
+ gp_timer_reg_store(_REG_GP_TIMER_COUNT_TYPE_ID(ID), GP_TIMER_COUNT_TYPE_LOW);
+
+ /*reset gp timer */
+ gp_timer_reg_store(_REG_GP_TIMER_RESET_REG, 0xFF);
+}
+
+uint32_t
+gp_timer_read(gp_timer_ID_t ID)
+{
+ return gp_timer_reg_load(_REG_GP_TIMER_VALUE_ID(ID));
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h
new file mode 100644
index 000000000000..4d5961c78c16
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_LOCAL_H_INCLUDED__
+#define __GP_TIMER_LOCAL_H_INCLUDED__
+
+#include "gp_timer_global.h" /*GP_TIMER_SEL
+ GP_TIMER_SIGNAL_SELECT*/
+
+#include "gp_timer_defs.h" /*HIVE_GP_TIMER_xxx registers*/
+#include "hive_isp_css_defs.h" /*HIVE_GP_TIMER_NUM_COUNTERS
+ HIVE_GP_TIMER_NUM_IRQS*/
+
+#define _REG_GP_TIMER_RESET_REG HIVE_GP_TIMER_RESET_REG_IDX
+#define _REG_GP_TIMER_OVERALL_ENABLE HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX
+
+/*Register offsets for timers [1,7] can be obtained
+ * by adding (GP_TIMERx_ID * sizeof(uint32_t))*/
+#define _REG_GP_TIMER_ENABLE_ID(timer_id) HIVE_GP_TIMER_ENABLE_REG_IDX(timer_id)
+#define _REG_GP_TIMER_VALUE_ID(timer_id) HIVE_GP_TIMER_VALUE_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS)
+#define _REG_GP_TIMER_COUNT_TYPE_ID(timer_id) HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS)
+#define _REG_GP_TIMER_SIGNAL_SELECT_ID(timer_id) HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS)
+
+#define _REG_GP_TIMER_IRQ_TRIGGER_VALUE_ID(irq_id) HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS)
+
+#define _REG_GP_TIMER_IRQ_TIMER_SELECT_ID(irq_id) \
+ HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS, HIVE_GP_TIMER_NUM_IRQS)
+
+#define _REG_GP_TIMER_IRQ_ENABLE_ID(irq_id) \
+ HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS, HIVE_GP_TIMER_NUM_IRQS)
+
+#endif /*__GP_TIMER_LOCAL_H_INCLUDED__*/
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h
new file mode 100644
index 000000000000..705be5e5cc70
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_PRIVATE_H_INCLUDED__
+#define __GP_TIMER_PRIVATE_H_INCLUDED__
+
+#include "gp_timer_public.h"
+#include "device_access.h"
+#include "assert_support.h"
+
+#endif /* __GP_TIMER_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_local.h
new file mode 100644
index 000000000000..f4652b79734d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_LOCAL_H_INCLUDED__
+#define __GPIO_LOCAL_H_INCLUDED__
+
+#include "gpio_global.h"
+
+#endif /* __GPIO_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h
new file mode 100644
index 000000000000..56b442040ad9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_PRIVATE_H_INCLUDED__
+#define __GPIO_PRIVATE_H_INCLUDED__
+
+#include "gpio_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_GPIO_C void gpio_reg_store(
+ const gpio_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ OP___assert(ID < N_GPIO_ID);
+ OP___assert(GPIO_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(GPIO_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_GPIO_C hrt_data gpio_reg_load(
+ const gpio_ID_t ID,
+ const unsigned int reg)
+{
+ OP___assert(ID < N_GPIO_ID);
+ OP___assert(GPIO_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(GPIO_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __GPIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c
new file mode 100644
index 000000000000..e48f180c9507
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c
@@ -0,0 +1,19 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "hmem.h"
+
+#ifndef __INLINE_HMEM__
+#include "hmem_private.h"
+#endif /* __INLINE_HMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h
new file mode 100644
index 000000000000..499f55f07253
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_LOCAL_H_INCLUDED__
+#define __HMEM_LOCAL_H_INCLUDED__
+
+#include "hmem_global.h"
+
+#endif /* __HMEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h
new file mode 100644
index 000000000000..270d04cc9d09
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_PRIVATE_H_INCLUDED__
+#define __HMEM_PRIVATE_H_INCLUDED__
+
+#include "hmem_public.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_HMEM_C size_t sizeof_hmem(
+ const hmem_ID_t ID)
+{
+ assert(ID < N_HMEM_ID);
+ (void)ID;
+ return HMEM_SIZE * sizeof(hmem_data_t);
+}
+
+#endif /* __HMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
new file mode 100644
index 000000000000..0c90c5ed659b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
@@ -0,0 +1,241 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+
+#include "input_formatter.h"
+#include <type_support.h>
+#include "gp_device.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_INPUT_FORMATTER__
+#include "input_formatter_private.h"
+#endif /* __INLINE_INPUT_FORMATTER__ */
+
+const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID] = {
+ INPUT_FORMATTER0_SRST_OFFSET,
+ INPUT_FORMATTER1_SRST_OFFSET,
+ INPUT_FORMATTER2_SRST_OFFSET,
+ INPUT_FORMATTER3_SRST_OFFSET
+};
+
+const hrt_data HIVE_IF_SRST_MASK[N_INPUT_FORMATTER_ID] = {
+ INPUT_FORMATTER0_SRST_MASK,
+ INPUT_FORMATTER1_SRST_MASK,
+ INPUT_FORMATTER2_SRST_MASK,
+ INPUT_FORMATTER3_SRST_MASK
+};
+
+const u8 HIVE_IF_SWITCH_CODE[N_INPUT_FORMATTER_ID] = {
+ HIVE_INPUT_SWITCH_SELECT_IF_PRIM,
+ HIVE_INPUT_SWITCH_SELECT_IF_PRIM,
+ HIVE_INPUT_SWITCH_SELECT_IF_SEC,
+ HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM
+};
+
+/* MW Should be part of system_global.h, where we have the main enumeration */
+static const bool HIVE_IF_BIN_COPY[N_INPUT_FORMATTER_ID] = {
+ false, false, false, true
+};
+
+void input_formatter_rst(
+ const input_formatter_ID_t ID)
+{
+ hrt_address addr;
+ hrt_data rst;
+
+ assert(ID < N_INPUT_FORMATTER_ID);
+
+ addr = HIVE_IF_SRST_ADDRESS[ID];
+ rst = HIVE_IF_SRST_MASK[ID];
+
+ /* TEMPORARY HACK: THIS RESET BREAKS THE METADATA FEATURE
+ * WICH USES THE STREAM2MEMRY BLOCK.
+ * MUST BE FIXED PROPERLY
+ */
+ if (!HIVE_IF_BIN_COPY[ID]) {
+ input_formatter_reg_store(ID, addr, rst);
+ }
+
+ return;
+}
+
+unsigned int input_formatter_get_alignment(
+ const input_formatter_ID_t ID)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+
+ return input_formatter_alignment[ID];
+}
+
+void input_formatter_set_fifo_blocking_mode(
+ const input_formatter_ID_t ID,
+ const bool enable)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+
+ /* cnd_input_formatter_reg_store() */
+ if (!HIVE_IF_BIN_COPY[ID]) {
+ input_formatter_reg_store(ID,
+ HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS, enable);
+ }
+ return;
+}
+
+void input_formatter_get_switch_state(
+ const input_formatter_ID_t ID,
+ input_formatter_switch_state_t *state)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(state);
+
+ /* We'll change this into an intelligent function to get switch info per IF */
+ (void)ID;
+
+ state->if_input_switch_lut_reg[0] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg0);
+ state->if_input_switch_lut_reg[1] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg1);
+ state->if_input_switch_lut_reg[2] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg2);
+ state->if_input_switch_lut_reg[3] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg3);
+ state->if_input_switch_lut_reg[4] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg4);
+ state->if_input_switch_lut_reg[5] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg5);
+ state->if_input_switch_lut_reg[6] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg6);
+ state->if_input_switch_lut_reg[7] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg7);
+ state->if_input_switch_fsync_lut = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_fsync_lut);
+ state->if_input_switch_ch_id_fmt_type = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_ch_id_fmt_type);
+
+ return;
+}
+
+void input_formatter_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_state_t *state)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(state);
+ /*
+ state->reset = input_formatter_reg_load(ID,
+ HIVE_IF_RESET_ADDRESS);
+ */
+ state->start_line = input_formatter_reg_load(ID,
+ HIVE_IF_START_LINE_ADDRESS);
+ state->start_column = input_formatter_reg_load(ID,
+ HIVE_IF_START_COLUMN_ADDRESS);
+ state->cropped_height = input_formatter_reg_load(ID,
+ HIVE_IF_CROPPED_HEIGHT_ADDRESS);
+ state->cropped_width = input_formatter_reg_load(ID,
+ HIVE_IF_CROPPED_WIDTH_ADDRESS);
+ state->ver_decimation = input_formatter_reg_load(ID,
+ HIVE_IF_VERTICAL_DECIMATION_ADDRESS);
+ state->hor_decimation = input_formatter_reg_load(ID,
+ HIVE_IF_HORIZONTAL_DECIMATION_ADDRESS);
+ state->hor_deinterleaving = input_formatter_reg_load(ID,
+ HIVE_IF_H_DEINTERLEAVING_ADDRESS);
+ state->left_padding = input_formatter_reg_load(ID,
+ HIVE_IF_LEFTPADDING_WIDTH_ADDRESS);
+ state->eol_offset = input_formatter_reg_load(ID,
+ HIVE_IF_END_OF_LINE_OFFSET_ADDRESS);
+ state->vmem_start_address = input_formatter_reg_load(ID,
+ HIVE_IF_VMEM_START_ADDRESS_ADDRESS);
+ state->vmem_end_address = input_formatter_reg_load(ID,
+ HIVE_IF_VMEM_END_ADDRESS_ADDRESS);
+ state->vmem_increment = input_formatter_reg_load(ID,
+ HIVE_IF_VMEM_INCREMENT_ADDRESS);
+ state->is_yuv420 = input_formatter_reg_load(ID,
+ HIVE_IF_YUV_420_FORMAT_ADDRESS);
+ state->vsync_active_low = input_formatter_reg_load(ID,
+ HIVE_IF_VSYNCK_ACTIVE_LOW_ADDRESS);
+ state->hsync_active_low = input_formatter_reg_load(ID,
+ HIVE_IF_HSYNCK_ACTIVE_LOW_ADDRESS);
+ state->allow_fifo_overflow = input_formatter_reg_load(ID,
+ HIVE_IF_ALLOW_FIFO_OVERFLOW_ADDRESS);
+ state->block_fifo_when_no_req = input_formatter_reg_load(ID,
+ HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS);
+ state->ver_deinterleaving = input_formatter_reg_load(ID,
+ HIVE_IF_V_DEINTERLEAVING_ADDRESS);
+ /* FSM */
+ state->fsm_sync_status = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_SYNC_STATUS);
+ state->fsm_sync_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_SYNC_COUNTER);
+ state->fsm_crop_status = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_CROP_STATUS);
+ state->fsm_crop_line_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_CROP_LINE_COUNTER);
+ state->fsm_crop_pixel_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_CROP_PIXEL_COUNTER);
+ state->fsm_deinterleaving_index = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DEINTERLEAVING_IDX);
+ state->fsm_dec_h_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DECIMATION_H_COUNTER);
+ state->fsm_dec_v_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DECIMATION_V_COUNTER);
+ state->fsm_dec_block_v_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DECIMATION_BLOCK_V_COUNTER);
+ state->fsm_padding_status = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_PADDING_STATUS);
+ state->fsm_padding_elem_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_PADDING_ELEMENT_COUNTER);
+ state->fsm_vector_support_error = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_VECTOR_SUPPORT_ERROR);
+ state->fsm_vector_buffer_full = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_VECTOR_SUPPORT_BUFF_FULL);
+ state->vector_support = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_VECTOR_SUPPORT);
+ state->sensor_data_lost = input_formatter_reg_load(ID,
+ HIVE_IF_FIFO_SENSOR_STATUS);
+
+ return;
+}
+
+void input_formatter_bin_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_bin_state_t *state)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(state);
+
+ state->reset = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_SOFT_RESET_REG_ADDRESS);
+ state->input_endianness = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_INPUT_ENDIANNESS_REG_ADDRESS);
+ state->output_endianness = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_OUTPUT_ENDIANNESS_REG_ADDRESS);
+ state->bitswap = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_BIT_SWAPPING_REG_ADDRESS);
+ state->block_synch = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_BLOCK_SYNC_LEVEL_REG_ADDRESS);
+ state->packet_synch = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_PACKET_SYNC_LEVEL_REG_ADDRESS);
+ state->readpostwrite_synch = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ADDRESS);
+ state->is_2ppc = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ADDRESS);
+ state->en_status_update = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS);
+ return;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h
new file mode 100644
index 000000000000..ee2c8372421c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h
@@ -0,0 +1,121 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_LOCAL_H_INCLUDED__
+#define __INPUT_FORMATTER_LOCAL_H_INCLUDED__
+
+#include "input_formatter_global.h"
+
+#include "isp.h" /* ISP_VEC_ALIGN */
+
+typedef struct input_formatter_switch_state_s input_formatter_switch_state_t;
+typedef struct input_formatter_state_s input_formatter_state_t;
+typedef struct input_formatter_bin_state_s input_formatter_bin_state_t;
+
+#define HIVE_IF_FSM_SYNC_STATUS 0x100
+#define HIVE_IF_FSM_SYNC_COUNTER 0x104
+#define HIVE_IF_FSM_DEINTERLEAVING_IDX 0x114
+#define HIVE_IF_FSM_DECIMATION_H_COUNTER 0x118
+#define HIVE_IF_FSM_DECIMATION_V_COUNTER 0x11C
+#define HIVE_IF_FSM_DECIMATION_BLOCK_V_COUNTER 0x120
+#define HIVE_IF_FSM_PADDING_STATUS 0x124
+#define HIVE_IF_FSM_PADDING_ELEMENT_COUNTER 0x128
+#define HIVE_IF_FSM_VECTOR_SUPPORT_ERROR 0x12C
+#define HIVE_IF_FSM_VECTOR_SUPPORT_BUFF_FULL 0x130
+#define HIVE_IF_FSM_VECTOR_SUPPORT 0x134
+#define HIVE_IF_FIFO_SENSOR_STATUS 0x138
+
+/*
+ * The switch LUT's coding defines a sink for each
+ * single channel ID + channel format type. Conversely
+ * the sink (i.e. an input formatter) can be reached
+ * from multiple channel & format type combinations
+ *
+ * LUT[0,1] channel=0, format type {0,1,...31}
+ * LUT[2,3] channel=1, format type {0,1,...31}
+ * LUT[4,5] channel=2, format type {0,1,...31}
+ * LUT[6,7] channel=3, format type {0,1,...31}
+ *
+ * Each register hold 16 2-bit fields encoding the sink
+ * {0,1,2,3}, "0" means unconnected.
+ *
+ * The single FSYNCH register uses four 3-bit fields of 1-hot
+ * encoded sink information, "0" means unconnected.
+ *
+ * The encoding is redundant. The FSYNCH setting will connect
+ * a channel to a sink. At that point the LUT's belonging to
+ * that channel can be directed to another sink. Thus the data
+ * goes to another place than the synch
+ */
+struct input_formatter_switch_state_s {
+ int if_input_switch_lut_reg[8];
+ int if_input_switch_fsync_lut;
+ int if_input_switch_ch_id_fmt_type;
+ bool if_input_switch_map[HIVE_SWITCH_N_CHANNELS][HIVE_SWITCH_N_FORMATTYPES];
+};
+
+struct input_formatter_state_s {
+ /* int reset; */
+ int start_line;
+ int start_column;
+ int cropped_height;
+ int cropped_width;
+ int ver_decimation;
+ int hor_decimation;
+ int ver_deinterleaving;
+ int hor_deinterleaving;
+ int left_padding;
+ int eol_offset;
+ int vmem_start_address;
+ int vmem_end_address;
+ int vmem_increment;
+ int is_yuv420;
+ int vsync_active_low;
+ int hsync_active_low;
+ int allow_fifo_overflow;
+ int block_fifo_when_no_req;
+ int fsm_sync_status;
+ int fsm_sync_counter;
+ int fsm_crop_status;
+ int fsm_crop_line_counter;
+ int fsm_crop_pixel_counter;
+ int fsm_deinterleaving_index;
+ int fsm_dec_h_counter;
+ int fsm_dec_v_counter;
+ int fsm_dec_block_v_counter;
+ int fsm_padding_status;
+ int fsm_padding_elem_counter;
+ int fsm_vector_support_error;
+ int fsm_vector_buffer_full;
+ int vector_support;
+ int sensor_data_lost;
+};
+
+struct input_formatter_bin_state_s {
+ u32 reset;
+ u32 input_endianness;
+ u32 output_endianness;
+ u32 bitswap;
+ u32 block_synch;
+ u32 packet_synch;
+ u32 readpostwrite_synch;
+ u32 is_2ppc;
+ u32 en_status_update;
+};
+
+static const unsigned int input_formatter_alignment[N_INPUT_FORMATTER_ID] = {
+ ISP_VEC_ALIGN, ISP_VEC_ALIGN, HIVE_ISP_CTRL_DATA_BYTES
+};
+
+#endif /* __INPUT_FORMATTER_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h
new file mode 100644
index 000000000000..bdca709219a4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_PRIVATE_H_INCLUDED__
+#define __INPUT_FORMATTER_PRIVATE_H_INCLUDED__
+
+#include "input_formatter_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_INPUT_FORMATTER_C void input_formatter_reg_store(
+ const input_formatter_ID_t ID,
+ const hrt_address reg_addr,
+ const hrt_data value)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ ia_css_device_store_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr, value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_FORMATTER_C hrt_data input_formatter_reg_load(
+ const input_formatter_ID_t ID,
+ const unsigned int reg_addr)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr);
+}
+
+#endif /* __INPUT_FORMATTER_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
new file mode 100644
index 000000000000..2114cf4f3fda
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
@@ -0,0 +1,1849 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+
+#include "input_system.h"
+#include <type_support.h>
+#include "gp_device.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_INPUT_SYSTEM__
+#include "input_system_private.h"
+#endif /* __INLINE_INPUT_SYSTEM__ */
+
+#define ZERO (0x0)
+#define ONE (1U)
+
+static const ib_buffer_t IB_BUFFER_NULL = {0, 0, 0 };
+
+static input_system_error_t input_system_configure_channel(
+ const channel_cfg_t channel);
+
+static input_system_error_t input_system_configure_channel_sensor(
+ const channel_cfg_t channel);
+
+static input_system_error_t input_buffer_configuration(void);
+
+static input_system_error_t configuration_to_registers(void);
+
+static void receiver_rst(const rx_ID_t ID);
+static void input_system_network_rst(const input_system_ID_t ID);
+
+static void capture_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ib_buffer_t *const cfg);
+
+static void acquisition_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ib_buffer_t *const cfg);
+
+static void ctrl_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ctrl_unit_cfg_t *const cfg);
+
+static void input_system_network_configure(
+ const input_system_ID_t ID,
+ const input_system_network_cfg_t *const cfg);
+
+// MW: CSI is previously named as "rx" short for "receiver"
+static input_system_error_t set_csi_cfg(
+ csi_cfg_t *const lhs,
+ const csi_cfg_t *const rhs,
+ input_system_config_flags_t *const flags);
+
+static input_system_error_t set_source_type(
+ input_system_source_t *const lhs,
+ const input_system_source_t rhs,
+ input_system_config_flags_t *const flags);
+
+static input_system_error_t input_system_multiplexer_cfg(
+ input_system_multiplex_t *const lhs,
+ const input_system_multiplex_t rhs,
+ input_system_config_flags_t *const flags);
+
+static inline void capture_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ capture_unit_state_t *state);
+
+static inline void acquisition_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ acquisition_unit_state_t *state);
+
+static inline void ctrl_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ ctrl_unit_state_t *state);
+
+static inline void mipi_port_get_state(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ mipi_port_state_t *state);
+
+static inline void rx_channel_get_state(
+ const rx_ID_t ID,
+ const unsigned int ch_id,
+ rx_channel_state_t *state);
+
+static void gp_device_rst(const gp_device_ID_t ID);
+
+static void input_selector_cfg_for_sensor(const gp_device_ID_t ID);
+
+static void input_switch_rst(const gp_device_ID_t ID);
+
+static void input_switch_cfg(
+ const gp_device_ID_t ID,
+ const input_switch_cfg_t *const cfg
+);
+
+void input_system_get_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state)
+{
+ sub_system_ID_t sub_id;
+
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(state);
+
+ state->str_multicastA_sel = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX);
+ state->str_multicastB_sel = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_B_IDX);
+ state->str_multicastC_sel = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_C_IDX);
+ state->str_mux_sel = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX);
+ state->str_mon_status = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_STRMON_STAT_IDX);
+ state->str_mon_irq_cond = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_STRMON_COND_IDX);
+ state->str_mon_irq_en = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX);
+ state->isys_srst = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_SRST_IDX);
+ state->isys_slv_reg_srst = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_SLV_REG_SRST_IDX);
+ state->str_deint_portA_cnt = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_REG_PORT_A_IDX);
+ state->str_deint_portB_cnt = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_REG_PORT_B_IDX);
+
+ for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID;
+ sub_id++) {
+ capture_unit_get_state(ID, sub_id,
+ &state->capture_unit[sub_id - CAPTURE_UNIT0_ID]);
+ }
+ for (sub_id = ACQUISITION_UNIT0_ID;
+ sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
+ acquisition_unit_get_state(ID, sub_id,
+ &state->acquisition_unit[sub_id - ACQUISITION_UNIT0_ID]);
+ }
+ for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID;
+ sub_id++) {
+ ctrl_unit_get_state(ID, sub_id,
+ &state->ctrl_unit_state[sub_id - CTRL_UNIT0_ID]);
+ }
+
+ return;
+}
+
+void receiver_get_state(
+ const rx_ID_t ID,
+ receiver_state_t *state)
+{
+ enum mipi_port_id port_id;
+ unsigned int ch_id;
+
+ assert(ID < N_RX_ID);
+ assert(state);
+
+ state->fs_to_ls_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX);
+ state->ls_to_data_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX);
+ state->data_to_le_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX);
+ state->le_to_fe_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX);
+ state->fe_to_fs_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX);
+ state->le_to_fs_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX);
+ state->is_two_ppc = (bool)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX);
+ state->backend_rst = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX);
+ state->raw18 = (uint16_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_RAW18_REG_IDX);
+ state->force_raw8 = (bool)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX);
+ state->raw16 = (uint16_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_RAW16_REG_IDX);
+
+ for (port_id = (enum mipi_port_id)0; port_id < N_MIPI_PORT_ID; port_id++) {
+ mipi_port_get_state(ID, port_id,
+ &state->mipi_port_state[port_id]);
+ }
+ for (ch_id = 0U; ch_id < N_RX_CHANNEL_ID; ch_id++) {
+ rx_channel_get_state(ID, ch_id,
+ &state->rx_channel_state[ch_id]);
+ }
+
+ state->be_gsp_acc_ovl = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX);
+ state->be_srst = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_SRST_REG_IDX);
+ state->be_is_two_ppc = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX);
+ state->be_comp_format0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX);
+ state->be_comp_format1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX);
+ state->be_comp_format2 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX);
+ state->be_comp_format3 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX);
+ state->be_sel = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_SEL_REG_IDX);
+ state->be_raw16_config = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX);
+ state->be_raw18_config = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX);
+ state->be_force_raw8 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX);
+ state->be_irq_status = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX);
+ state->be_irq_clear = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX);
+
+ return;
+}
+
+bool is_mipi_format_yuv420(
+ const mipi_format_t mipi_format)
+{
+ bool is_yuv420 = (
+ (mipi_format == MIPI_FORMAT_YUV420_8) ||
+ (mipi_format == MIPI_FORMAT_YUV420_10) ||
+ (mipi_format == MIPI_FORMAT_YUV420_8_SHIFT) ||
+ (mipi_format == MIPI_FORMAT_YUV420_10_SHIFT));
+ /* MIPI_FORMAT_YUV420_8_LEGACY is not YUV420 */
+
+ return is_yuv420;
+}
+
+void receiver_set_compression(
+ const rx_ID_t ID,
+ const unsigned int cfg_ID,
+ const mipi_compressor_t comp,
+ const mipi_predictor_t pred)
+{
+ const unsigned int field_id = cfg_ID % N_MIPI_FORMAT_CUSTOM;
+ const unsigned int ch_id = cfg_ID / N_MIPI_FORMAT_CUSTOM;
+ hrt_data val;
+ hrt_address addr = 0;
+ hrt_data reg;
+
+ assert(ID < N_RX_ID);
+ assert(cfg_ID < N_MIPI_COMPRESSOR_CONTEXT);
+ assert(field_id < N_MIPI_FORMAT_CUSTOM);
+ assert(ch_id < N_RX_CHANNEL_ID);
+ assert(comp < N_MIPI_COMPRESSOR_METHODS);
+ assert(pred < N_MIPI_PREDICTOR_TYPES);
+
+ val = (((uint8_t)pred) << 3) | comp;
+
+ switch (ch_id) {
+ case 0:
+ addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX :
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
+ break;
+ case 1:
+ addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX :
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
+ break;
+ case 2:
+ addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX :
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
+ break;
+ case 3:
+ addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX :
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
+ break;
+ default:
+ /* should not happen */
+ assert(false);
+ return;
+ }
+
+ reg = ((field_id < 6) ? (val << (field_id * 5)) : (val << ((
+ field_id - 6) * 5)));
+ receiver_reg_store(ID, addr, reg);
+
+ return;
+}
+
+void receiver_port_enable(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const bool cnd)
+{
+ hrt_data reg = receiver_port_reg_load(ID, port_ID,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
+
+ if (cnd) {
+ reg |= 0x01;
+ } else {
+ reg &= ~0x01;
+ }
+
+ receiver_port_reg_store(ID, port_ID,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, reg);
+ return;
+}
+
+bool is_receiver_port_enabled(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID)
+{
+ hrt_data reg = receiver_port_reg_load(ID, port_ID,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
+ return ((reg & 0x01) != 0);
+}
+
+void receiver_irq_enable(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const rx_irq_info_t irq_info)
+{
+ receiver_port_reg_store(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, irq_info);
+ return;
+}
+
+rx_irq_info_t receiver_get_irq_info(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID)
+{
+ return receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
+}
+
+void receiver_irq_clear(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const rx_irq_info_t irq_info)
+{
+ receiver_port_reg_store(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info);
+ return;
+}
+
+static inline void capture_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ capture_unit_state_t *state)
+{
+ assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <= CAPTURE_UNIT2_ID));
+ assert(state);
+
+ state->StartMode = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_START_MODE_REG_ID);
+ state->Start_Addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_START_ADDR_REG_ID);
+ state->Mem_Region_Size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_MEM_REGION_SIZE_REG_ID);
+ state->Num_Mem_Regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_NUM_MEM_REGIONS_REG_ID);
+// AM: Illegal read from following registers.
+ /* state->Init = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_INIT_REG_ID);
+ state->Start = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_START_REG_ID);
+ state->Stop = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_STOP_REG_ID);
+ */
+ state->Packet_Length = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_PACKET_LENGTH_REG_ID);
+ state->Received_Length = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_RECEIVED_LENGTH_REG_ID);
+ state->Received_Short_Packets = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_RECEIVED_SHORT_PACKETS_REG_ID);
+ state->Received_Long_Packets = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_RECEIVED_LONG_PACKETS_REG_ID);
+ state->Last_Command = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_LAST_COMMAND_REG_ID);
+ state->Next_Command = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_NEXT_COMMAND_REG_ID);
+ state->Last_Acknowledge = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_LAST_ACKNOWLEDGE_REG_ID);
+ state->Next_Acknowledge = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_NEXT_ACKNOWLEDGE_REG_ID);
+ state->FSM_State_Info = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_FSM_STATE_INFO_REG_ID);
+
+ return;
+}
+
+static inline void acquisition_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ acquisition_unit_state_t *state)
+{
+ assert(sub_id == ACQUISITION_UNIT0_ID);
+ assert(state);
+
+ state->Start_Addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_START_ADDR_REG_ID);
+ state->Mem_Region_Size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_MEM_REGION_SIZE_REG_ID);
+ state->Num_Mem_Regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_NUM_MEM_REGIONS_REG_ID);
+// AM: Illegal read from following registers.
+ /* state->Init = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_INIT_REG_ID);
+ */
+ state->Received_Short_Packets = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_RECEIVED_SHORT_PACKETS_REG_ID);
+ state->Received_Long_Packets = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_RECEIVED_LONG_PACKETS_REG_ID);
+ state->Last_Command = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_LAST_COMMAND_REG_ID);
+ state->Next_Command = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_NEXT_COMMAND_REG_ID);
+ state->Last_Acknowledge = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_LAST_ACKNOWLEDGE_REG_ID);
+ state->Next_Acknowledge = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_NEXT_ACKNOWLEDGE_REG_ID);
+ state->FSM_State_Info = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_FSM_STATE_INFO_REG_ID);
+ state->Int_Cntr_Info = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_INT_CNTR_INFO_REG_ID);
+
+ return;
+}
+
+static inline void ctrl_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ ctrl_unit_state_t *state)
+{
+ assert(sub_id == CTRL_UNIT0_ID);
+ assert(state);
+
+ state->captA_start_addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_A_REG_ID);
+ state->captB_start_addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_B_REG_ID);
+ state->captC_start_addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_C_REG_ID);
+ state->captA_mem_region_size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID);
+ state->captB_mem_region_size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID);
+ state->captC_mem_region_size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID);
+ state->captA_num_mem_regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID);
+ state->captB_num_mem_regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID);
+ state->captC_num_mem_regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID);
+ state->acq_start_addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_START_ADDR_REG_ID);
+ state->acq_mem_region_size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID);
+ state->acq_num_mem_regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID);
+// AM: Illegal read from following registers.
+ /* state->ctrl_init = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_INIT_REG_ID);
+ */
+ state->last_cmd = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_LAST_COMMAND_REG_ID);
+ state->next_cmd = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_NEXT_COMMAND_REG_ID);
+ state->last_ack = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID);
+ state->next_ack = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID);
+ state->top_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_FSM_STATE_INFO_REG_ID);
+ state->captA_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID);
+ state->captB_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID);
+ state->captC_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID);
+ state->acq_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID);
+ state->capt_reserve_one_mem_region = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID);
+
+ return;
+}
+
+static inline void mipi_port_get_state(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ mipi_port_state_t *state)
+{
+ int i;
+
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(state);
+
+ state->device_ready = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
+ state->irq_status = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
+ state->irq_enable = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
+ state->timeout_count = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX);
+ state->init_count = (uint16_t)receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX);
+ state->raw16_18 = (uint16_t)receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX);
+ state->sync_count = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX);
+ state->rx_count = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX);
+
+ for (i = 0; i < MIPI_4LANE_CFG ; i++) {
+ state->lane_sync_count[i] = (uint8_t)((state->sync_count) >> (i * 8));
+ state->lane_rx_count[i] = (uint8_t)((state->rx_count) >> (i * 8));
+ }
+
+ return;
+}
+
+static inline void rx_channel_get_state(
+ const rx_ID_t ID,
+ const unsigned int ch_id,
+ rx_channel_state_t *state)
+{
+ int i;
+
+ assert(ID < N_RX_ID);
+ assert(ch_id < N_RX_CHANNEL_ID);
+ assert(state);
+
+ switch (ch_id) {
+ case 0:
+ state->comp_scheme0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX);
+ state->comp_scheme1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
+ break;
+ case 1:
+ state->comp_scheme0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX);
+ state->comp_scheme1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
+ break;
+ case 2:
+ state->comp_scheme0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX);
+ state->comp_scheme1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
+ break;
+ case 3:
+ state->comp_scheme0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX);
+ state->comp_scheme1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
+ break;
+ }
+
+ /* See Table 7.1.17,..., 7.1.24 */
+ for (i = 0; i < 6; i++) {
+ u8 val = (uint8_t)((state->comp_scheme0) >> (i * 5)) & 0x1f;
+
+ state->comp[i] = (mipi_compressor_t)(val & 0x07);
+ state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
+ }
+ for (i = 6; i < N_MIPI_FORMAT_CUSTOM; i++) {
+ u8 val = (uint8_t)((state->comp_scheme0) >> ((i - 6) * 5)) & 0x1f;
+
+ state->comp[i] = (mipi_compressor_t)(val & 0x07);
+ state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
+ }
+
+ return;
+}
+
+// MW: "2400" in the name is not good, but this is to avoid a naming conflict
+static input_system_cfg2400_t config;
+
+static void receiver_rst(
+ const rx_ID_t ID)
+{
+ enum mipi_port_id port_id;
+
+ assert(ID < N_RX_ID);
+
+// Disable all ports.
+ for (port_id = MIPI_PORT0_ID; port_id < N_MIPI_PORT_ID; port_id++) {
+ receiver_port_enable(ID, port_id, false);
+ }
+
+ // AM: Additional actions for stopping receiver?
+
+ return;
+}
+
+//Single function to reset all the devices mapped via GP_DEVICE.
+static void gp_device_rst(const gp_device_ID_t ID)
+{
+ assert(ID < N_GP_DEVICE_ID);
+
+ gp_device_reg_store(ID, _REG_GP_SYNCGEN_ENABLE_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FREE_RUNNING_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_PAUSE_ADDR, ONE);
+ // gp_device_reg_store(ID, _REG_GP_NR_FRAMES_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_LINES_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR, ZERO);
+// AM: Following calls cause strange warnings. Probably they should not be initialized.
+// gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ZERO);
+// gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ZERO);
+// gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ZERO);
+// gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_B_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_B_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_MASK_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_MASK_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_XY_CNT_MASK_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_DELTA_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_DELTA_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_MODE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED1_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN1_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE1_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED2_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN2_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE2_ADDR, ZERO);
+ //gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO);
+ //gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_HOR_CNT_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_VER_CNT_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FRAME_CNT_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR,
+ ZERO); // AM: Maybe this soft reset is not safe.
+
+ return;
+}
+
+static void input_selector_cfg_for_sensor(const gp_device_ID_t ID)
+{
+ assert(ID < N_GP_DEVICE_ID);
+
+ gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR, ZERO);
+
+ return;
+}
+
+static void input_switch_rst(const gp_device_ID_t ID)
+{
+ int addr;
+
+ assert(ID < N_GP_DEVICE_ID);
+
+ // Initialize the data&hsync LUT.
+ for (addr = _REG_GP_IFMT_input_switch_lut_reg0;
+ addr <= _REG_GP_IFMT_input_switch_lut_reg7; addr += SIZEOF_HRT_REG) {
+ gp_device_reg_store(ID, addr, ZERO);
+ }
+
+ // Initialize the vsync LUT.
+ gp_device_reg_store(ID,
+ _REG_GP_IFMT_input_switch_fsync_lut,
+ ZERO);
+
+ return;
+}
+
+static void input_switch_cfg(
+ const gp_device_ID_t ID,
+ const input_switch_cfg_t *const cfg)
+{
+ int addr_offset;
+
+ assert(ID < N_GP_DEVICE_ID);
+ assert(cfg);
+
+ // Initialize the data&hsync LUT.
+ for (addr_offset = 0; addr_offset < N_RX_CHANNEL_ID * 2; addr_offset++) {
+ assert(addr_offset * SIZEOF_HRT_REG + _REG_GP_IFMT_input_switch_lut_reg0 <=
+ _REG_GP_IFMT_input_switch_lut_reg7);
+ gp_device_reg_store(ID,
+ _REG_GP_IFMT_input_switch_lut_reg0 + addr_offset * SIZEOF_HRT_REG,
+ cfg->hsync_data_reg[addr_offset]);
+ }
+
+ // Initialize the vsync LUT.
+ gp_device_reg_store(ID,
+ _REG_GP_IFMT_input_switch_fsync_lut,
+ cfg->vsync_data_reg);
+
+ return;
+}
+
+static void input_system_network_rst(const input_system_ID_t ID)
+{
+ unsigned int sub_id;
+
+ // Reset all 3 multicasts.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX,
+ INPUT_SYSTEM_DISCARD_ALL);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_B_IDX,
+ INPUT_SYSTEM_DISCARD_ALL);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_C_IDX,
+ INPUT_SYSTEM_DISCARD_ALL);
+
+ // Reset stream mux.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ N_INPUT_SYSTEM_MULTIPLEX);
+
+ // Reset 3 capture units.
+ for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID;
+ sub_id++) {
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_INIT_REG_ID,
+ 1U << CAPT_INIT_RST_REG_BIT);
+ }
+
+ // Reset acquisition unit.
+ for (sub_id = ACQUISITION_UNIT0_ID;
+ sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_INIT_REG_ID,
+ 1U << ACQ_INIT_RST_REG_BIT);
+ }
+
+ // DMA unit reset is not needed.
+
+ // Reset controller units.
+ // NB: In future we need to keep part of ctrl_state for split capture and
+ for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID;
+ sub_id++) {
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_INIT_REG_ID,
+ 1U); //AM: Is there any named constant?
+ }
+
+ return;
+}
+
+// Function that resets current configuration.
+input_system_error_t input_system_configuration_reset(void)
+{
+ unsigned int i;
+
+ receiver_rst(RX0_ID);
+
+ input_system_network_rst(INPUT_SYSTEM0_ID);
+
+ gp_device_rst(INPUT_SYSTEM0_ID);
+
+ input_switch_rst(INPUT_SYSTEM0_ID);
+
+ //target_rst();
+
+ // Reset IRQ_CTRLs.
+
+ // Reset configuration data structures.
+ for (i = 0; i < N_CHANNELS; i++) {
+ config.ch_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.target_isp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.target_sp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.target_strm2mem_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ }
+
+ for (i = 0; i < N_CSI_PORTS; i++) {
+ config.csi_buffer_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.multicast[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ }
+
+ config.source_type_flags = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.unallocated_ib_mem_words = IB_CAPACITY_IN_WORDS;
+ //config.acq_allocated_ib_mem_words = 0;
+
+ // Set the start of the session cofiguration.
+ config.session_flags = INPUT_SYSTEM_CFG_FLAG_REQUIRED;
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// MW: Comments are good, but doxygen is required, place it at the declaration
+// Function that appends the channel to current configuration.
+static input_system_error_t input_system_configure_channel(
+ const channel_cfg_t channel)
+{
+ input_system_error_t error = INPUT_SYSTEM_ERR_NO_ERROR;
+ // Check if channel is not already configured.
+ if (config.ch_flags[channel.ch_id] & INPUT_SYSTEM_CFG_FLAG_SET) {
+ return INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET;
+ } else {
+ switch (channel.source_type) {
+ case INPUT_SYSTEM_SOURCE_SENSOR:
+ error = input_system_configure_channel_sensor(channel);
+ break;
+ case INPUT_SYSTEM_SOURCE_TPG:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ case INPUT_SYSTEM_SOURCE_PRBS:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ case INPUT_SYSTEM_SOURCE_FIFO:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ default:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ }
+
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR) return error;
+ // Input switch channel configurations must be combined in united config.
+ config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2]
+ =
+ channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[0];
+ config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2 +
+ 1] =
+ channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[1];
+ config.input_switch_cfg.vsync_data_reg |=
+ (channel.target_cfg.input_switch_channel_cfg.vsync_data_reg & 0x7) <<
+ (channel.source_cfg.csi_cfg.csi_port * 3);
+
+ // Other targets are just copied and marked as set.
+ config.target_isp[channel.source_cfg.csi_cfg.csi_port] =
+ channel.target_cfg.target_isp_cfg;
+ config.target_sp[channel.source_cfg.csi_cfg.csi_port] =
+ channel.target_cfg.target_sp_cfg;
+ config.target_strm2mem[channel.source_cfg.csi_cfg.csi_port] =
+ channel.target_cfg.target_strm2mem_cfg;
+ config.target_isp_flags[channel.source_cfg.csi_cfg.csi_port] |=
+ INPUT_SYSTEM_CFG_FLAG_SET;
+ config.target_sp_flags[channel.source_cfg.csi_cfg.csi_port] |=
+ INPUT_SYSTEM_CFG_FLAG_SET;
+ config.target_strm2mem_flags[channel.source_cfg.csi_cfg.csi_port] |=
+ INPUT_SYSTEM_CFG_FLAG_SET;
+
+ config.ch_flags[channel.ch_id] = INPUT_SYSTEM_CFG_FLAG_SET;
+ }
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Function that partitions input buffer space with determining addresses.
+static input_system_error_t input_buffer_configuration(void)
+{
+ u32 current_address = 0;
+ u32 unallocated_memory = IB_CAPACITY_IN_WORDS;
+
+ ib_buffer_t candidate_buffer_acq = IB_BUFFER_NULL;
+ u32 size_requested;
+ input_system_config_flags_t acq_already_specified = INPUT_SYSTEM_CFG_FLAG_RESET;
+ input_system_csi_port_t port;
+
+ for (port = INPUT_SYSTEM_PORT_A; port < N_INPUT_SYSTEM_PORTS; port++) {
+ csi_cfg_t source = config.csi_value[port];//.csi_cfg;
+
+ if (config.csi_flags[port] & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // Check and set csi buffer in input buffer.
+ switch (source.buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+ case INPUT_SYSTEM_XMEM_ACQUIRE:
+ config.csi_buffer_flags[port] =
+ INPUT_SYSTEM_CFG_FLAG_BLOCKED; // Well, not used.
+ break;
+
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING:
+ case INPUT_SYSTEM_SRAM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_CAPTURE:
+ size_requested = source.csi_buffer.mem_reg_size *
+ source.csi_buffer.nof_mem_regs;
+ if (source.csi_buffer.mem_reg_size > 0
+ && source.csi_buffer.nof_mem_regs > 0
+ && size_requested <= unallocated_memory
+ ) {
+ config.csi_buffer[port].mem_reg_addr = current_address;
+ config.csi_buffer[port].mem_reg_size = source.csi_buffer.mem_reg_size;
+ config.csi_buffer[port].nof_mem_regs = source.csi_buffer.nof_mem_regs;
+ current_address += size_requested;
+ unallocated_memory -= size_requested;
+ config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_SET;
+ } else {
+ config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ break;
+
+ default:
+ config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ }
+
+ // Check acquisition buffer specified but set it later since it has to be unique.
+ switch (source.buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+ case INPUT_SYSTEM_SRAM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_CAPTURE:
+ // Nothing to do.
+ break;
+
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING:
+ case INPUT_SYSTEM_XMEM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_ACQUIRE:
+ if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_RESET) {
+ size_requested = source.acquisition_buffer.mem_reg_size
+ * source.acquisition_buffer.nof_mem_regs;
+ if (source.acquisition_buffer.mem_reg_size > 0
+ && source.acquisition_buffer.nof_mem_regs > 0
+ && size_requested <= unallocated_memory
+ ) {
+ candidate_buffer_acq = source.acquisition_buffer;
+ acq_already_specified = INPUT_SYSTEM_CFG_FLAG_SET;
+ }
+ } else {
+ // Check if specified acquisition buffer is the same as specified before.
+ if (source.acquisition_buffer.mem_reg_size != candidate_buffer_acq.mem_reg_size
+ || source.acquisition_buffer.nof_mem_regs != candidate_buffer_acq.nof_mem_regs
+ ) {
+ config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ break;
+
+ default:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ }
+ } else {
+ config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ }
+ } // end of for ( port )
+
+ // Set the acquisition buffer at the end.
+ size_requested = candidate_buffer_acq.mem_reg_size *
+ candidate_buffer_acq.nof_mem_regs;
+ if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_SET
+ && size_requested <= unallocated_memory) {
+ config.acquisition_buffer_unique.mem_reg_addr = current_address;
+ config.acquisition_buffer_unique.mem_reg_size =
+ candidate_buffer_acq.mem_reg_size;
+ config.acquisition_buffer_unique.nof_mem_regs =
+ candidate_buffer_acq.nof_mem_regs;
+ current_address += size_requested;
+ unallocated_memory -= size_requested;
+ config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_SET;
+
+ assert(current_address <= IB_CAPACITY_IN_WORDS);
+ }
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+static void capture_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ib_buffer_t *const cfg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <=
+ CAPTURE_UNIT2_ID)); // Commented part is always true.
+ assert(cfg);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_START_ADDR_REG_ID,
+ cfg->mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_MEM_REGION_SIZE_REG_ID,
+ cfg->mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_NUM_MEM_REGIONS_REG_ID,
+ cfg->nof_mem_regs);
+
+ return;
+}
+
+static void acquisition_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ib_buffer_t *const cfg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_id == ACQUISITION_UNIT0_ID);
+ assert(cfg);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_START_ADDR_REG_ID,
+ cfg->mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_NUM_MEM_REGIONS_REG_ID,
+ cfg->nof_mem_regs);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_MEM_REGION_SIZE_REG_ID,
+ cfg->mem_reg_size);
+
+ return;
+}
+
+static void ctrl_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ctrl_unit_cfg_t *const cfg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_id == CTRL_UNIT0_ID);
+ assert(cfg);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_A_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT0_ID].nof_mem_regs);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_B_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT1_ID].nof_mem_regs);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_C_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT2_ID].nof_mem_regs);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_START_ADDR_REG_ID,
+ cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID,
+ cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID,
+ cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].nof_mem_regs);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID,
+ 0);
+ return;
+}
+
+static void input_system_network_configure(
+ const input_system_ID_t ID,
+ const input_system_network_cfg_t *const cfg)
+{
+ u32 sub_id;
+
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(cfg);
+
+ // Set all 3 multicasts.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX,
+ cfg->multicast_cfg[CAPTURE_UNIT0_ID]);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_B_IDX,
+ cfg->multicast_cfg[CAPTURE_UNIT1_ID]);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_C_IDX,
+ cfg->multicast_cfg[CAPTURE_UNIT2_ID]);
+
+ // Set stream mux.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ cfg->mux_cfg);
+
+ // Set capture units.
+ for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID;
+ sub_id++) {
+ capture_unit_configure(ID,
+ sub_id,
+ &cfg->ctrl_unit_cfg[ID].buffer_mipi[sub_id - CAPTURE_UNIT0_ID]);
+ }
+
+ // Set acquisition units.
+ for (sub_id = ACQUISITION_UNIT0_ID;
+ sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
+ acquisition_unit_configure(ID,
+ sub_id,
+ &cfg->ctrl_unit_cfg[sub_id - ACQUISITION_UNIT0_ID].buffer_acquire[sub_id -
+ ACQUISITION_UNIT0_ID]);
+ }
+
+ // No DMA configuration needed. Ctrl_unit will fully control it.
+
+ // Set controller units.
+ for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID;
+ sub_id++) {
+ ctrl_unit_configure(ID,
+ sub_id,
+ &cfg->ctrl_unit_cfg[sub_id - CTRL_UNIT0_ID]);
+ }
+
+ return;
+}
+
+static input_system_error_t configuration_to_registers(void)
+{
+ input_system_network_cfg_t input_system_network_cfg;
+ int i;
+
+ assert(config.source_type_flags & INPUT_SYSTEM_CFG_FLAG_SET);
+
+ switch (config.source_type) {
+ case INPUT_SYSTEM_SOURCE_SENSOR:
+
+ // Determine stream multicasts setting based on the mode of csi_cfg_t.
+ // AM: This should be moved towards earlier function call, e.g. in
+ // the commit function.
+ for (i = MIPI_PORT0_ID; i < N_MIPI_PORT_ID; i++) {
+ if (config.csi_flags[i] & INPUT_SYSTEM_CFG_FLAG_SET) {
+ switch (config.csi_value[i].buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+ config.multicast[i] = INPUT_SYSTEM_CSI_BACKEND;
+ break;
+
+ case INPUT_SYSTEM_XMEM_CAPTURE:
+ case INPUT_SYSTEM_SRAM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_BUFFERING:
+ config.multicast[i] = INPUT_SYSTEM_INPUT_BUFFER;
+ break;
+
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING:
+ config.multicast[i] = INPUT_SYSTEM_MULTICAST;
+ break;
+
+ case INPUT_SYSTEM_XMEM_ACQUIRE:
+ config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL;
+ break;
+
+ default:
+ config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL;
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ //break;
+ }
+ } else {
+ config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL;
+ }
+
+ input_system_network_cfg.multicast_cfg[i] = config.multicast[i];
+
+ } // for
+
+ input_system_network_cfg.mux_cfg = config.multiplexer;
+
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID -
+ CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT0_ID] =
+ config.csi_buffer[MIPI_PORT0_ID];
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID -
+ CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT1_ID] =
+ config.csi_buffer[MIPI_PORT1_ID];
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID -
+ CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT2_ID] =
+ config.csi_buffer[MIPI_PORT2_ID];
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID -
+ CTRL_UNIT0_ID].buffer_acquire[ACQUISITION_UNIT0_ID -
+ ACQUISITION_UNIT0_ID] =
+ config.acquisition_buffer_unique;
+
+ // First set input network around CSI receiver.
+ input_system_network_configure(INPUT_SYSTEM0_ID, &input_system_network_cfg);
+
+ // Set the CSI receiver.
+ //...
+ break;
+
+ case INPUT_SYSTEM_SOURCE_TPG:
+
+ break;
+
+ case INPUT_SYSTEM_SOURCE_PRBS:
+
+ break;
+
+ case INPUT_SYSTEM_SOURCE_FIFO:
+ break;
+
+ default:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+
+ } // end of switch (source_type)
+
+ // Set input selector.
+ input_selector_cfg_for_sensor(INPUT_SYSTEM0_ID);
+
+ // Set input switch.
+ input_switch_cfg(INPUT_SYSTEM0_ID, &config.input_switch_cfg);
+
+ // Set input formatters.
+ // AM: IF are set dynamically.
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Function that applies the whole configuration.
+input_system_error_t input_system_configuration_commit(void)
+{
+ // The last configuration step is to configure the input buffer.
+ input_system_error_t error = input_buffer_configuration();
+
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR) {
+ return error;
+ }
+
+ // Translate the whole configuration into registers.
+ error = configuration_to_registers();
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR) {
+ return error;
+ }
+
+ // Translate the whole configuration into ctrl commands etc.
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// FIFO
+
+input_system_error_t input_system_csi_fifo_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_FIFO_CAPTURE;
+ channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = 0;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+input_system_error_t input_system_csi_fifo_channel_with_counting_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 csi_mem_reg_size,
+ u32 csi_nof_mem_regs,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode =
+ INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+// SRAM
+
+input_system_error_t input_system_csi_sram_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 csi_mem_reg_size,
+ u32 csi_nof_mem_regs,
+ // uint32_t acq_mem_reg_size,
+ // uint32_t acq_nof_mem_regs,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_SRAM_BUFFERING;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = 0;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+//XMEM
+
+// Collects all parameters and puts them in channel_cfg_t.
+input_system_error_t input_system_csi_xmem_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 csi_mem_reg_size,
+ u32 csi_nof_mem_regs,
+ u32 acq_mem_reg_size,
+ u32 acq_nof_mem_regs,
+ target_cfg2400_t target,
+ uint32_t nof_xmem_buffers
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_BUFFERING;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size;
+ channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_xmem_buffers;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 acq_mem_reg_size,
+ u32 acq_nof_mem_regs,
+ target_cfg2400_t target)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_ACQUIRE;
+ channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size;
+ channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+input_system_error_t input_system_csi_xmem_capture_only_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ u32 csi_mem_reg_size,
+ u32 csi_nof_mem_regs,
+ u32 acq_mem_reg_size,
+ u32 acq_nof_mem_regs,
+ target_cfg2400_t target)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ //channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ //channel.source_cfg.csi_cfg.backend_ch = backend_ch;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_CAPTURE;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size;
+ channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+// Non - CSI
+
+input_system_error_t input_system_prbs_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,//not used yet
+ u32 seed,
+ u32 sync_gen_width,
+ u32 sync_gen_height,
+ u32 sync_gen_hblank_cycles,
+ u32 sync_gen_vblank_cycles,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ (void)nof_frames;
+
+ channel.ch_id = ch_id;
+ channel.source_type = INPUT_SYSTEM_SOURCE_PRBS;
+
+ channel.source_cfg.prbs_cfg.seed = seed;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.width = sync_gen_width;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.height = sync_gen_height;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.hblank_cycles = sync_gen_hblank_cycles;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.vblank_cycles = sync_gen_vblank_cycles;
+
+ channel.target_cfg = target;
+
+ return input_system_configure_channel(channel);
+}
+
+input_system_error_t input_system_tpg_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,//not used yet
+ u32 x_mask,
+ u32 y_mask,
+ u32 x_delta,
+ u32 y_delta,
+ u32 xy_mask,
+ u32 sync_gen_width,
+ u32 sync_gen_height,
+ u32 sync_gen_hblank_cycles,
+ u32 sync_gen_vblank_cycles,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ (void)nof_frames;
+
+ channel.ch_id = ch_id;
+ channel.source_type = INPUT_SYSTEM_SOURCE_TPG;
+
+ channel.source_cfg.tpg_cfg.x_mask = x_mask;
+ channel.source_cfg.tpg_cfg.y_mask = y_mask;
+ channel.source_cfg.tpg_cfg.x_delta = x_delta;
+ channel.source_cfg.tpg_cfg.y_delta = y_delta;
+ channel.source_cfg.tpg_cfg.xy_mask = xy_mask;
+ channel.source_cfg.tpg_cfg.sync_gen_cfg.width = sync_gen_width;
+ channel.source_cfg.tpg_cfg.sync_gen_cfg.height = sync_gen_height;
+ channel.source_cfg.tpg_cfg.sync_gen_cfg.hblank_cycles = sync_gen_hblank_cycles;
+ channel.source_cfg.tpg_cfg.sync_gen_cfg.vblank_cycles = sync_gen_vblank_cycles;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+// MW: Don't use system specific names, (even in system specific files) "cfg2400" -> cfg
+input_system_error_t input_system_gpfifo_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames, //not used yet
+
+ target_cfg2400_t target)
+{
+ channel_cfg_t channel;
+
+ (void)nof_frames;
+
+ channel.ch_id = ch_id;
+ channel.source_type = INPUT_SYSTEM_SOURCE_FIFO;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Private specialized functions for channel setting.
+//
+///////////////////////////////////////////////////////////////////////////
+
+// Fills the parameters to config.csi_value[port]
+static input_system_error_t input_system_configure_channel_sensor(
+ const channel_cfg_t channel)
+{
+ const u32 port = channel.source_cfg.csi_cfg.csi_port;
+ input_system_error_t status = INPUT_SYSTEM_ERR_NO_ERROR;
+
+ input_system_multiplex_t mux;
+
+ if (port >= N_INPUT_SYSTEM_PORTS)
+ return INPUT_SYSTEM_ERR_GENERIC;
+
+ //check if port > N_INPUT_SYSTEM_MULTIPLEX
+
+ status = set_source_type(&config.source_type, channel.source_type,
+ &config.source_type_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+
+ // Check for conflicts on source (implicitly on multicast, capture unit and input buffer).
+
+ status = set_csi_cfg(&config.csi_value[port], &channel.source_cfg.csi_cfg,
+ &config.csi_flags[port]);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+
+ switch (channel.source_cfg.csi_cfg.buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+
+ // Check for conflicts on mux.
+ mux = INPUT_SYSTEM_MIPI_PORT0 + port;
+ status = input_system_multiplexer_cfg(&config.multiplexer, mux,
+ &config.multiplexer_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+ config.multicast[port] = INPUT_SYSTEM_CSI_BACKEND;
+
+ // Shared resource, so it should be blocked.
+ //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+
+ break;
+ case INPUT_SYSTEM_SRAM_BUFFERING:
+
+ // Check for conflicts on mux.
+ mux = INPUT_SYSTEM_ACQUISITION_UNIT;
+ status = input_system_multiplexer_cfg(&config.multiplexer, mux,
+ &config.multiplexer_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+ config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER;
+
+ // Shared resource, so it should be blocked.
+ //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+
+ break;
+ case INPUT_SYSTEM_XMEM_BUFFERING:
+
+ // Check for conflicts on mux.
+ mux = INPUT_SYSTEM_ACQUISITION_UNIT;
+ status = input_system_multiplexer_cfg(&config.multiplexer, mux,
+ &config.multiplexer_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+ config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER;
+
+ // Shared resource, so it should be blocked.
+ //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+
+ break;
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ case INPUT_SYSTEM_XMEM_CAPTURE:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ case INPUT_SYSTEM_XMEM_ACQUIRE:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ default:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ }
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Test flags and set structure.
+static input_system_error_t set_source_type(
+ input_system_source_t *const lhs,
+ const input_system_source_t rhs,
+ input_system_config_flags_t *const flags)
+{
+ // MW: Not enough asserts
+ assert(lhs);
+ assert(flags);
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // Check for consistency with already set value.
+ if ((*lhs) == (rhs)) {
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+ } else {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ // Check the value (individually).
+ if (rhs >= N_INPUT_SYSTEM_SOURCE) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ // Set the value.
+ *lhs = rhs;
+
+ *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Test flags and set structure.
+static input_system_error_t set_csi_cfg(
+ csi_cfg_t *const lhs,
+ const csi_cfg_t *const rhs,
+ input_system_config_flags_t *const flags)
+{
+ u32 memory_required;
+ u32 acq_memory_required;
+
+ assert(lhs);
+ assert(flags);
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+
+ if (*flags & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // check for consistency with already set value.
+ if (/*lhs->backend_ch == rhs.backend_ch
+ &&*/ lhs->buffering_mode == rhs->buffering_mode
+ && lhs->csi_buffer.mem_reg_size == rhs->csi_buffer.mem_reg_size
+ && lhs->csi_buffer.nof_mem_regs == rhs->csi_buffer.nof_mem_regs
+ && lhs->acquisition_buffer.mem_reg_size == rhs->acquisition_buffer.mem_reg_size
+ && lhs->acquisition_buffer.nof_mem_regs == rhs->acquisition_buffer.nof_mem_regs
+ && lhs->nof_xmem_buffers == rhs->nof_xmem_buffers
+ ) {
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+ } else {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ // Check the value (individually).
+ // no check for backend_ch
+ // no check for nof_xmem_buffers
+ memory_required = rhs->csi_buffer.mem_reg_size * rhs->csi_buffer.nof_mem_regs;
+ acq_memory_required = rhs->acquisition_buffer.mem_reg_size *
+ rhs->acquisition_buffer.nof_mem_regs;
+ if (rhs->buffering_mode >= N_INPUT_SYSTEM_BUFFERING_MODE
+ ||
+ // Check if required memory is available in input buffer (SRAM).
+ (memory_required + acq_memory_required) > config.unallocated_ib_mem_words
+
+ ) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ // Set the value.
+ //lhs[port]->backend_ch = rhs.backend_ch;
+ lhs->buffering_mode = rhs->buffering_mode;
+ lhs->nof_xmem_buffers = rhs->nof_xmem_buffers;
+
+ lhs->csi_buffer.mem_reg_size = rhs->csi_buffer.mem_reg_size;
+ lhs->csi_buffer.nof_mem_regs = rhs->csi_buffer.nof_mem_regs;
+ lhs->acquisition_buffer.mem_reg_size = rhs->acquisition_buffer.mem_reg_size;
+ lhs->acquisition_buffer.nof_mem_regs = rhs->acquisition_buffer.nof_mem_regs;
+ // ALX: NB: Here we just set buffer parameters, but still not allocate it
+ // (no addresses determined). That will be done during commit.
+
+ // FIXIT: acq_memory_required is not deducted, since it can be allocated multiple times.
+ config.unallocated_ib_mem_words -= memory_required;
+//assert(config.unallocated_ib_mem_words >=0);
+ *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Test flags and set structure.
+static input_system_error_t input_system_multiplexer_cfg(
+ input_system_multiplex_t *const lhs,
+ const input_system_multiplex_t rhs,
+ input_system_config_flags_t *const flags)
+{
+ assert(lhs);
+ assert(flags);
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // Check for consistency with already set value.
+ if ((*lhs) == (rhs)) {
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+ } else {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ // Check the value (individually).
+ if (rhs >= N_INPUT_SYSTEM_MULTIPLEX) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ }
+ // Set the value.
+ *lhs = rhs;
+
+ *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c
new file mode 100644
index 000000000000..fdc99cc6eae4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c
@@ -0,0 +1,451 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "assert_support.h"
+#include "irq.h"
+
+#ifndef __INLINE_GP_DEVICE__
+#define __INLINE_GP_DEVICE__
+#endif
+#include "gp_device.h" /* _REG_GP_IRQ_REQUEST_ADDR */
+
+#include "platform_support.h" /* hrt_sleep() */
+
+static inline void irq_wait_for_write_complete(
+ const irq_ID_t ID);
+
+static inline bool any_irq_channel_enabled(
+ const irq_ID_t ID);
+
+static inline irq_ID_t virq_get_irq_id(
+ const virq_id_t irq_ID,
+ unsigned int *channel_ID);
+
+#ifndef __INLINE_IRQ__
+#include "irq_private.h"
+#endif /* __INLINE_IRQ__ */
+
+static unsigned short IRQ_N_CHANNEL[N_IRQ_ID] = {
+ IRQ0_ID_N_CHANNEL,
+ IRQ1_ID_N_CHANNEL,
+ IRQ2_ID_N_CHANNEL,
+ IRQ3_ID_N_CHANNEL
+};
+
+static unsigned short IRQ_N_ID_OFFSET[N_IRQ_ID + 1] = {
+ IRQ0_ID_OFFSET,
+ IRQ1_ID_OFFSET,
+ IRQ2_ID_OFFSET,
+ IRQ3_ID_OFFSET,
+ IRQ_END_OFFSET
+};
+
+static virq_id_t IRQ_NESTING_ID[N_IRQ_ID] = {
+ N_virq_id,
+ virq_ifmt,
+ virq_isys,
+ virq_isel
+};
+
+void irq_clear_all(
+ const irq_ID_t ID)
+{
+ hrt_data mask = 0xFFFFFFFF;
+
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_N_CHANNEL[ID] <= HRT_DATA_WIDTH);
+
+ if (IRQ_N_CHANNEL[ID] < HRT_DATA_WIDTH) {
+ mask = ~((~(hrt_data)0) >> IRQ_N_CHANNEL[ID]);
+ }
+
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, mask);
+ return;
+}
+
+/*
+ * Do we want the user to be able to set the signalling method ?
+ */
+void irq_enable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq_id)
+{
+ unsigned int mask = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
+ unsigned int enable = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+ unsigned int edge_in = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_REG_IDX);
+ unsigned int me = 1U << irq_id;
+
+ assert(ID < N_IRQ_ID);
+ assert(irq_id < IRQ_N_CHANNEL[ID]);
+
+ mask |= me;
+ enable |= me;
+ edge_in |= me; /* rising edge */
+
+ /* to avoid mishaps configuration must follow the following order */
+
+ /* mask this interrupt */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask & ~me);
+ /* rising edge at input */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_REG_IDX, edge_in);
+ /* enable interrupt to output */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable);
+ /* clear current irq only */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me);
+ /* unmask interrupt from input */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask);
+
+ irq_wait_for_write_complete(ID);
+
+ return;
+}
+
+void irq_enable_pulse(
+ const irq_ID_t ID,
+ bool pulse)
+{
+ unsigned int edge_out = 0x0;
+
+ if (pulse) {
+ edge_out = 0xffffffff;
+ }
+ /* output is given as edge, not pulse */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX, edge_out);
+ return;
+}
+
+void irq_disable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq_id)
+{
+ unsigned int mask = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
+ unsigned int enable = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+ unsigned int me = 1U << irq_id;
+
+ assert(ID < N_IRQ_ID);
+ assert(irq_id < IRQ_N_CHANNEL[ID]);
+
+ mask &= ~me;
+ enable &= ~me;
+
+ /* enable interrupt to output */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable);
+ /* unmask interrupt from input */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask);
+ /* clear current irq only */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me);
+
+ irq_wait_for_write_complete(ID);
+
+ return;
+}
+
+enum hrt_isp_css_irq_status irq_get_channel_id(
+ const irq_ID_t ID,
+ unsigned int *irq_id)
+{
+ unsigned int irq_status = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ unsigned int idx;
+ enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success;
+
+ assert(ID < N_IRQ_ID);
+ assert(irq_id);
+
+ /* find the first irq bit */
+ for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) {
+ if (irq_status & (1U << idx))
+ break;
+ }
+ if (idx == IRQ_N_CHANNEL[ID])
+ return hrt_isp_css_irq_status_error;
+
+ /* now check whether there are more bits set */
+ if (irq_status != (1U << idx))
+ status = hrt_isp_css_irq_status_more_irqs;
+
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx);
+
+ irq_wait_for_write_complete(ID);
+
+ if (irq_id)
+ *irq_id = (unsigned int)idx;
+
+ return status;
+}
+
+static const hrt_address IRQ_REQUEST_ADDR[N_IRQ_SW_CHANNEL_ID] = {
+ _REG_GP_IRQ_REQUEST0_ADDR,
+ _REG_GP_IRQ_REQUEST1_ADDR
+};
+
+void irq_raise(
+ const irq_ID_t ID,
+ const irq_sw_channel_id_t irq_id)
+{
+ hrt_address addr;
+
+ OP___assert(ID == IRQ0_ID);
+ OP___assert(IRQ_BASE[ID] != (hrt_address)-1);
+ OP___assert(irq_id < N_IRQ_SW_CHANNEL_ID);
+
+ (void)ID;
+
+ addr = IRQ_REQUEST_ADDR[irq_id];
+ /* The SW IRQ pins are remapped to offset zero */
+ gp_device_reg_store(GP_DEVICE0_ID,
+ (unsigned int)addr, 1);
+ gp_device_reg_store(GP_DEVICE0_ID,
+ (unsigned int)addr, 0);
+ return;
+}
+
+void irq_controller_get_state(
+ const irq_ID_t ID,
+ irq_controller_state_t *state)
+{
+ assert(ID < N_IRQ_ID);
+ assert(state);
+
+ state->irq_edge = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_REG_IDX);
+ state->irq_mask = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
+ state->irq_status = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ state->irq_enable = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+ state->irq_level_not_pulse = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX);
+ return;
+}
+
+bool any_virq_signal(void)
+{
+ unsigned int irq_status = irq_reg_load(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+
+ return (irq_status != 0);
+}
+
+void cnd_virq_enable_channel(
+ const virq_id_t irq_ID,
+ const bool en)
+{
+ irq_ID_t i;
+ unsigned int channel_ID;
+ irq_ID_t ID = virq_get_irq_id(irq_ID, &channel_ID);
+
+ assert(ID < N_IRQ_ID);
+
+ for (i = IRQ1_ID; i < N_IRQ_ID; i++) {
+ /* It is not allowed to enable the pin of a nested IRQ directly */
+ assert(irq_ID != IRQ_NESTING_ID[i]);
+ }
+
+ if (en) {
+ irq_enable_channel(ID, channel_ID);
+ if (IRQ_NESTING_ID[ID] != N_virq_id) {
+ /* Single level nesting, otherwise we'd need to recurse */
+ irq_enable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]);
+ }
+ } else {
+ irq_disable_channel(ID, channel_ID);
+ if ((IRQ_NESTING_ID[ID] != N_virq_id) && !any_irq_channel_enabled(ID)) {
+ /* Only disable the top if the nested ones are empty */
+ irq_disable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]);
+ }
+ }
+ return;
+}
+
+void virq_clear_all(void)
+{
+ irq_ID_t irq_id;
+
+ for (irq_id = (irq_ID_t)0; irq_id < N_IRQ_ID; irq_id++) {
+ irq_clear_all(irq_id);
+ }
+ return;
+}
+
+enum hrt_isp_css_irq_status virq_get_channel_signals(
+ virq_info_t *irq_info)
+{
+ enum hrt_isp_css_irq_status irq_status = hrt_isp_css_irq_status_error;
+ irq_ID_t ID;
+
+ assert(irq_info);
+
+ for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
+ if (any_irq_channel_enabled(ID)) {
+ hrt_data irq_data = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+
+ if (irq_data != 0) {
+ /* The error condition is an IRQ pulse received with no IRQ status written */
+ irq_status = hrt_isp_css_irq_status_success;
+ }
+
+ irq_info->irq_status_reg[ID] |= irq_data;
+
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, irq_data);
+
+ irq_wait_for_write_complete(ID);
+ }
+ }
+
+ return irq_status;
+}
+
+void virq_clear_info(
+ virq_info_t *irq_info)
+{
+ irq_ID_t ID;
+
+ assert(irq_info);
+
+ for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
+ irq_info->irq_status_reg[ID] = 0;
+ }
+ return;
+}
+
+enum hrt_isp_css_irq_status virq_get_channel_id(
+ virq_id_t *irq_id)
+{
+ unsigned int irq_status = irq_reg_load(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ unsigned int idx;
+ enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success;
+ irq_ID_t ID;
+
+ assert(irq_id);
+
+ /* find the first irq bit on device 0 */
+ for (idx = 0; idx < IRQ_N_CHANNEL[IRQ0_ID]; idx++) {
+ if (irq_status & (1U << idx))
+ break;
+ }
+
+ if (idx == IRQ_N_CHANNEL[IRQ0_ID]) {
+ return hrt_isp_css_irq_status_error;
+ }
+
+ /* Check whether there are more bits set on device 0 */
+ if (irq_status != (1U << idx)) {
+ status = hrt_isp_css_irq_status_more_irqs;
+ }
+
+ /* Check whether we have an IRQ on one of the nested devices */
+ for (ID = N_IRQ_ID - 1 ; ID > (irq_ID_t)0; ID--) {
+ if (IRQ_NESTING_ID[ID] == (virq_id_t)idx) {
+ break;
+ }
+ }
+
+ /* If we have a nested IRQ, load that state, discard the device 0 state */
+ if (ID != IRQ0_ID) {
+ irq_status = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ /* find the first irq bit on device "id" */
+ for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) {
+ if (irq_status & (1U << idx))
+ break;
+ }
+
+ if (idx == IRQ_N_CHANNEL[ID]) {
+ return hrt_isp_css_irq_status_error;
+ }
+
+ /* Alternatively check whether there are more bits set on this device */
+ if (irq_status != (1U << idx)) {
+ status = hrt_isp_css_irq_status_more_irqs;
+ } else {
+ /* If this device is empty, clear the state on device 0 */
+ irq_reg_store(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << IRQ_NESTING_ID[ID]);
+ }
+ } /* if (ID != IRQ0_ID) */
+
+ /* Here we proceed to clear the IRQ on detected device, if no nested IRQ, this is device 0 */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx);
+
+ irq_wait_for_write_complete(ID);
+
+ idx += IRQ_N_ID_OFFSET[ID];
+ if (irq_id)
+ *irq_id = (virq_id_t)idx;
+
+ return status;
+}
+
+static inline void irq_wait_for_write_complete(
+ const irq_ID_t ID)
+{
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address)-1);
+ (void)ia_css_device_load_uint32(IRQ_BASE[ID] +
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX * sizeof(hrt_data));
+}
+
+static inline bool any_irq_channel_enabled(
+ const irq_ID_t ID)
+{
+ hrt_data en_reg;
+
+ assert(ID < N_IRQ_ID);
+
+ en_reg = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+
+ return (en_reg != 0);
+}
+
+static inline irq_ID_t virq_get_irq_id(
+ const virq_id_t irq_ID,
+ unsigned int *channel_ID)
+{
+ irq_ID_t ID;
+
+ assert(channel_ID);
+
+ for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
+ if (irq_ID < IRQ_N_ID_OFFSET[ID + 1]) {
+ break;
+ }
+ }
+
+ *channel_ID = (unsigned int)irq_ID - IRQ_N_ID_OFFSET[ID];
+
+ return ID;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h
new file mode 100644
index 000000000000..86028fde2a94
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h
@@ -0,0 +1,134 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_LOCAL_H_INCLUDED__
+#define __IRQ_LOCAL_H_INCLUDED__
+
+#include "irq_global.h"
+
+#include <irq_controller_defs.h>
+
+/* IRQ0_ID */
+#include "hive_isp_css_defs.h"
+#define HIVE_GP_DEV_IRQ_NUM_IRQS 32
+/* IRQ1_ID */
+#include "input_formatter_subsystem_defs.h"
+#define HIVE_IFMT_IRQ_NUM_IRQS 5
+/* IRQ2_ID */
+#include "input_system_defs.h"
+/* IRQ3_ID */
+#include "input_selector_defs.h"
+
+#define IRQ_ID_OFFSET 32
+#define IRQ0_ID_OFFSET 0
+#define IRQ1_ID_OFFSET IRQ_ID_OFFSET
+#define IRQ2_ID_OFFSET (2 * IRQ_ID_OFFSET)
+#define IRQ3_ID_OFFSET (3 * IRQ_ID_OFFSET)
+#define IRQ_END_OFFSET (4 * IRQ_ID_OFFSET)
+
+#define IRQ0_ID_N_CHANNEL HIVE_GP_DEV_IRQ_NUM_IRQS
+#define IRQ1_ID_N_CHANNEL HIVE_IFMT_IRQ_NUM_IRQS
+#define IRQ2_ID_N_CHANNEL HIVE_ISYS_IRQ_NUM_BITS
+#define IRQ3_ID_N_CHANNEL HIVE_ISEL_IRQ_NUM_IRQS
+
+typedef struct virq_info_s virq_info_t;
+typedef struct irq_controller_state_s irq_controller_state_t;
+
+typedef enum {
+ virq_gpio_pin_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID,
+ virq_gpio_pin_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID,
+ virq_gpio_pin_2 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID,
+ virq_gpio_pin_3 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID,
+ virq_gpio_pin_4 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID,
+ virq_gpio_pin_5 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID,
+ virq_gpio_pin_6 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID,
+ virq_gpio_pin_7 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID,
+ virq_gpio_pin_8 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID,
+ virq_gpio_pin_9 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID,
+ virq_gpio_pin_10 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID,
+ virq_gpio_pin_11 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID,
+ virq_sp = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_BIT_ID,
+ virq_isp = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_BIT_ID,
+ virq_isys = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISYS_BIT_ID,
+ virq_isel = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISEL_BIT_ID,
+ virq_ifmt = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_IFMT_BIT_ID,
+ virq_sp_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID,
+ virq_isp_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID,
+ virq_mod_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID,
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+ virq_isp_pmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID,
+#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+ virq_isys_2401 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID,
+#else
+#error "irq_local.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+ virq_isp_bamem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID,
+ virq_isp_dmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID,
+ virq_sp_icache_mem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID,
+ virq_sp_dmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID,
+ virq_mmu_cache_mem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID,
+ virq_gp_timer_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID,
+ virq_gp_timer_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID,
+ virq_sw_pin_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID,
+ virq_sw_pin_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID,
+ virq_dma = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_DMA_BIT_ID,
+ virq_sp_stream_mon_b = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID,
+
+ virq_ifmt0_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID,
+ virq_ifmt1_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID,
+ virq_ifmt2_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_SEC_BIT_ID,
+ virq_ifmt3_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_MEM_CPY_BIT_ID,
+ virq_ifmt_sideband_changed = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID,
+
+ virq_isys_sof = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_SOF_BIT_ID,
+ virq_isys_eof = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_EOF_BIT_ID,
+ virq_isys_sol = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_SOL_BIT_ID,
+ virq_isys_eol = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_EOL_BIT_ID,
+ virq_isys_csi = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID,
+ virq_isys_csi_be = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID,
+ virq_isys_capt0_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP,
+ virq_isys_capt0_id_late_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP,
+ virq_isys_capt1_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP,
+ virq_isys_capt1_id_late_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP,
+ virq_isys_capt2_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP,
+ virq_isys_capt2_id_late_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP,
+ virq_isys_acq_sop_mismatch = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH,
+ virq_isys_ctrl_capt0 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPA,
+ virq_isys_ctrl_capt1 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPB,
+ virq_isys_ctrl_capt2 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPC,
+ virq_isys_cio_to_ahb = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CIO2AHB,
+ virq_isys_dma = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_DMA_BIT_ID,
+ virq_isys_fifo_monitor = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_STREAM_MON_BIT_ID,
+
+ virq_isel_sof = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID,
+ virq_isel_eof = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID,
+ virq_isel_sol = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID,
+ virq_isel_eol = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID,
+
+ N_virq_id = IRQ_END_OFFSET
+} virq_id_t;
+
+struct virq_info_s {
+ hrt_data irq_status_reg[N_IRQ_ID];
+};
+
+struct irq_controller_state_s {
+ unsigned int irq_edge;
+ unsigned int irq_mask;
+ unsigned int irq_status;
+ unsigned int irq_enable;
+ unsigned int irq_level_not_pulse;
+};
+
+#endif /* __IRQ_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h
new file mode 100644
index 000000000000..8a947aefd851
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_PRIVATE_H_INCLUDED__
+#define __IRQ_PRIVATE_H_INCLUDED__
+
+#include "irq_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_IRQ_C void irq_reg_store(
+ const irq_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(IRQ_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_IRQ_C hrt_data irq_reg_load(
+ const irq_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(IRQ_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __IRQ_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c
new file mode 100644
index 000000000000..7de7d08f4757
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c
@@ -0,0 +1,128 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <system_global.h>
+#include "isp.h"
+
+#ifndef __INLINE_ISP__
+#include "isp_private.h"
+#endif /* __INLINE_ISP__ */
+
+#include "assert_support.h"
+#include "platform_support.h" /* hrt_sleep() */
+
+void cnd_isp_irq_enable(
+ const isp_ID_t ID,
+ const bool cnd)
+{
+ if (cnd) {
+ isp_ctrl_setbit(ID, ISP_IRQ_READY_REG, ISP_IRQ_READY_BIT);
+ /* Enabling the IRQ immediately triggers an interrupt, clear it */
+ isp_ctrl_setbit(ID, ISP_IRQ_CLEAR_REG, ISP_IRQ_CLEAR_BIT);
+ } else {
+ isp_ctrl_clearbit(ID, ISP_IRQ_READY_REG,
+ ISP_IRQ_READY_BIT);
+ }
+ return;
+}
+
+void isp_get_state(
+ const isp_ID_t ID,
+ isp_state_t *state,
+ isp_stall_t *stall)
+{
+ hrt_data sc = isp_ctrl_load(ID, ISP_SC_REG);
+
+ assert(state);
+ assert(stall);
+
+#if defined(_hrt_sysmem_ident_address)
+ /* Patch to avoid compiler unused symbol warning in C_RUN build */
+ (void)__hrt_sysmem_ident_address;
+ (void)_hrt_sysmem_map_var;
+#endif
+
+ state->pc = isp_ctrl_load(ID, ISP_PC_REG);
+ state->status_register = sc;
+ state->is_broken = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_BROKEN_BIT);
+ state->is_idle = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT);
+ state->is_sleeping = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT);
+ state->is_stalling = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_STALLING_BIT);
+ stall->stat_ctrl =
+ !isp_ctrl_getbit(ID, ISP_CTRL_SINK_REG, ISP_CTRL_SINK_BIT);
+ stall->pmem =
+ !isp_ctrl_getbit(ID, ISP_PMEM_SINK_REG, ISP_PMEM_SINK_BIT);
+ stall->dmem =
+ !isp_ctrl_getbit(ID, ISP_DMEM_SINK_REG, ISP_DMEM_SINK_BIT);
+ stall->vmem =
+ !isp_ctrl_getbit(ID, ISP_VMEM_SINK_REG, ISP_VMEM_SINK_BIT);
+ stall->fifo0 =
+ !isp_ctrl_getbit(ID, ISP_FIFO0_SINK_REG, ISP_FIFO0_SINK_BIT);
+ stall->fifo1 =
+ !isp_ctrl_getbit(ID, ISP_FIFO1_SINK_REG, ISP_FIFO1_SINK_BIT);
+ stall->fifo2 =
+ !isp_ctrl_getbit(ID, ISP_FIFO2_SINK_REG, ISP_FIFO2_SINK_BIT);
+ stall->fifo3 =
+ !isp_ctrl_getbit(ID, ISP_FIFO3_SINK_REG, ISP_FIFO3_SINK_BIT);
+ stall->fifo4 =
+ !isp_ctrl_getbit(ID, ISP_FIFO4_SINK_REG, ISP_FIFO4_SINK_BIT);
+ stall->fifo5 =
+ !isp_ctrl_getbit(ID, ISP_FIFO5_SINK_REG, ISP_FIFO5_SINK_BIT);
+ stall->fifo6 =
+ !isp_ctrl_getbit(ID, ISP_FIFO6_SINK_REG, ISP_FIFO6_SINK_BIT);
+ stall->vamem1 =
+ !isp_ctrl_getbit(ID, ISP_VAMEM1_SINK_REG, ISP_VAMEM1_SINK_BIT);
+ stall->vamem2 =
+ !isp_ctrl_getbit(ID, ISP_VAMEM2_SINK_REG, ISP_VAMEM2_SINK_BIT);
+ stall->vamem3 =
+ !isp_ctrl_getbit(ID, ISP_VAMEM3_SINK_REG, ISP_VAMEM3_SINK_BIT);
+ stall->hmem =
+ !isp_ctrl_getbit(ID, ISP_HMEM_SINK_REG, ISP_HMEM_SINK_BIT);
+ /*
+ stall->icache_master =
+ !isp_ctrl_getbit(ID, ISP_ICACHE_MT_SINK_REG,
+ ISP_ICACHE_MT_SINK_BIT);
+ */
+ return;
+}
+
+/* ISP functions to control the ISP state from the host, even in crun. */
+
+/* Inspect readiness of an ISP indexed by ID */
+unsigned int isp_is_ready(isp_ID_t ID)
+{
+ assert(ID < N_ISP_ID);
+ return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT);
+}
+
+/* Inspect sleeping of an ISP indexed by ID */
+unsigned int isp_is_sleeping(isp_ID_t ID)
+{
+ assert(ID < N_ISP_ID);
+ return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT);
+}
+
+/* To be called by the host immediately before starting ISP ID. */
+void isp_start(isp_ID_t ID)
+{
+ assert(ID < N_ISP_ID);
+}
+
+/* Wake up ISP ID. */
+void isp_wake(isp_ID_t ID)
+{
+ assert(ID < N_ISP_ID);
+ isp_ctrl_setbit(ID, ISP_SC_REG, ISP_START_BIT);
+ hrt_sleep();
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
new file mode 100644
index 000000000000..b04da7f1f98c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
@@ -0,0 +1,57 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_LOCAL_H_INCLUDED__
+#define __ISP_LOCAL_H_INCLUDED__
+
+#include <stdbool.h>
+
+#include "isp_global.h"
+
+#include <isp2400_support.h>
+
+#define HIVE_ISP_VMEM_MASK ((1U << ISP_VMEM_ELEMBITS) - 1)
+
+typedef struct isp_state_s isp_state_t;
+typedef struct isp_stall_s isp_stall_t;
+
+struct isp_state_s {
+ int pc;
+ int status_register;
+ bool is_broken;
+ bool is_idle;
+ bool is_sleeping;
+ bool is_stalling;
+};
+
+struct isp_stall_s {
+ bool fifo0;
+ bool fifo1;
+ bool fifo2;
+ bool fifo3;
+ bool fifo4;
+ bool fifo5;
+ bool fifo6;
+ bool stat_ctrl;
+ bool dmem;
+ bool vmem;
+ bool vamem1;
+ bool vamem2;
+ bool vamem3;
+ bool hmem;
+ bool pmem;
+ bool icache_master;
+};
+
+#endif /* __ISP_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h
new file mode 100644
index 000000000000..a6ab10711255
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h
@@ -0,0 +1,160 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_PRIVATE_H_INCLUDED__
+#define __ISP_PRIVATE_H_INCLUDED__
+
+#ifdef HRT_MEMORY_ACCESS
+#include <hrt/api.h>
+#endif
+
+#include "isp_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+#include "type_support.h"
+
+STORAGE_CLASS_ISP_C void isp_ctrl_store(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_CTRL_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store_uint32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+#else
+ hrt_master_port_store_32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C hrt_data isp_ctrl_load(
+ const isp_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_CTRL_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ return ia_css_device_load_uint32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data));
+#else
+ return hrt_master_port_uload_32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data));
+#endif
+}
+
+STORAGE_CLASS_ISP_C bool isp_ctrl_getbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit)
+{
+ hrt_data val = isp_ctrl_load(ID, reg);
+
+ return (val & (1UL << bit)) != 0;
+}
+
+STORAGE_CLASS_ISP_C void isp_ctrl_setbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit)
+{
+ hrt_data data = isp_ctrl_load(ID, reg);
+
+ isp_ctrl_store(ID, reg, (data | (1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_ctrl_clearbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit)
+{
+ hrt_data data = isp_ctrl_load(ID, reg);
+
+ isp_ctrl_store(ID, reg, (data & ~(1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_dmem_store(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const void *data,
+ const size_t size)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store(ISP_DMEM_BASE[ID] + addr, data, size);
+#else
+ hrt_master_port_store(ISP_DMEM_BASE[ID] + addr, data, size);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_dmem_load(
+ const isp_ID_t ID,
+ const unsigned int addr,
+ void *data,
+ const size_t size)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_load(ISP_DMEM_BASE[ID] + addr, data, size);
+#else
+ hrt_master_port_load(ISP_DMEM_BASE[ID] + addr, data, size);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_dmem_store_uint32(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const uint32_t data)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1);
+ (void)ID;
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store_uint32(ISP_DMEM_BASE[ID] + addr, data);
+#else
+ hrt_master_port_store_32(ISP_DMEM_BASE[ID] + addr, data);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C uint32_t isp_dmem_load_uint32(
+ const isp_ID_t ID,
+ const unsigned int addr)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1);
+ (void)ID;
+#if !defined(HRT_MEMORY_ACCESS)
+ return ia_css_device_load_uint32(ISP_DMEM_BASE[ID] + addr);
+#else
+ return hrt_master_port_uload_32(ISP_DMEM_BASE[ID] + addr);
+#endif
+}
+
+STORAGE_CLASS_ISP_C uint32_t isp_2w_cat_1w(
+ const u16 x0,
+ const uint16_t x1)
+{
+ u32 out = ((uint32_t)(x1 & HIVE_ISP_VMEM_MASK) << ISP_VMEM_ELEMBITS)
+ | (x0 & HIVE_ISP_VMEM_MASK);
+ return out;
+}
+
+#endif /* __ISP_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c
new file mode 100644
index 000000000000..a17b32b6d414
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* The name "mmu.h is already taken" */
+#include "mmu_device.h"
+
+void mmu_set_page_table_base_index(
+ const mmu_ID_t ID,
+ const hrt_data base_index)
+{
+ mmu_reg_store(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX, base_index);
+ return;
+}
+
+hrt_data mmu_get_page_table_base_index(
+ const mmu_ID_t ID)
+{
+ return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX);
+}
+
+void mmu_invalidate_cache(
+ const mmu_ID_t ID)
+{
+ mmu_reg_store(ID, _HRT_MMU_INVALIDATE_TLB_REG_IDX, 1);
+ return;
+}
+
+void mmu_invalidate_cache_all(void)
+{
+ mmu_ID_t mmu_id;
+
+ for (mmu_id = (mmu_ID_t)0; mmu_id < N_MMU_ID; mmu_id++) {
+ mmu_invalidate_cache(mmu_id);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h
new file mode 100644
index 000000000000..7c3ad157189f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MMU_LOCAL_H_INCLUDED__
+#define __MMU_LOCAL_H_INCLUDED__
+
+#include "mmu_global.h"
+
+#endif /* __MMU_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c
new file mode 100644
index 000000000000..f084b316e373
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c
@@ -0,0 +1,81 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "sp.h"
+
+#ifndef __INLINE_SP__
+#include "sp_private.h"
+#endif /* __INLINE_SP__ */
+
+#include "assert_support.h"
+
+void cnd_sp_irq_enable(
+ const sp_ID_t ID,
+ const bool cnd)
+{
+ if (cnd) {
+ sp_ctrl_setbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT);
+ /* Enabling the IRQ immediately triggers an interrupt, clear it */
+ sp_ctrl_setbit(ID, SP_IRQ_CLEAR_REG, SP_IRQ_CLEAR_BIT);
+ } else {
+ sp_ctrl_clearbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT);
+ }
+}
+
+void sp_get_state(
+ const sp_ID_t ID,
+ sp_state_t *state,
+ sp_stall_t *stall)
+{
+ hrt_data sc = sp_ctrl_load(ID, SP_SC_REG);
+
+ assert(state);
+ assert(stall);
+
+ state->pc = sp_ctrl_load(ID, SP_PC_REG);
+ state->status_register = sc;
+ state->is_broken = (sc & (1U << SP_BROKEN_BIT)) != 0;
+ state->is_idle = (sc & (1U << SP_IDLE_BIT)) != 0;
+ state->is_sleeping = (sc & (1U << SP_SLEEPING_BIT)) != 0;
+ state->is_stalling = (sc & (1U << SP_STALLING_BIT)) != 0;
+ stall->fifo0 =
+ !sp_ctrl_getbit(ID, SP_FIFO0_SINK_REG, SP_FIFO0_SINK_BIT);
+ stall->fifo1 =
+ !sp_ctrl_getbit(ID, SP_FIFO1_SINK_REG, SP_FIFO1_SINK_BIT);
+ stall->fifo2 =
+ !sp_ctrl_getbit(ID, SP_FIFO2_SINK_REG, SP_FIFO2_SINK_BIT);
+ stall->fifo3 =
+ !sp_ctrl_getbit(ID, SP_FIFO3_SINK_REG, SP_FIFO3_SINK_BIT);
+ stall->fifo4 =
+ !sp_ctrl_getbit(ID, SP_FIFO4_SINK_REG, SP_FIFO4_SINK_BIT);
+ stall->fifo5 =
+ !sp_ctrl_getbit(ID, SP_FIFO5_SINK_REG, SP_FIFO5_SINK_BIT);
+ stall->fifo6 =
+ !sp_ctrl_getbit(ID, SP_FIFO6_SINK_REG, SP_FIFO6_SINK_BIT);
+ stall->fifo7 =
+ !sp_ctrl_getbit(ID, SP_FIFO7_SINK_REG, SP_FIFO7_SINK_BIT);
+ stall->fifo8 =
+ !sp_ctrl_getbit(ID, SP_FIFO8_SINK_REG, SP_FIFO8_SINK_BIT);
+ stall->fifo9 =
+ !sp_ctrl_getbit(ID, SP_FIFO9_SINK_REG, SP_FIFO9_SINK_BIT);
+ stall->fifoa =
+ !sp_ctrl_getbit(ID, SP_FIFOA_SINK_REG, SP_FIFOA_SINK_BIT);
+ stall->dmem =
+ !sp_ctrl_getbit(ID, SP_DMEM_SINK_REG, SP_DMEM_SINK_BIT);
+ stall->control_master =
+ !sp_ctrl_getbit(ID, SP_CTRL_MT_SINK_REG, SP_CTRL_MT_SINK_BIT);
+ stall->icache_master =
+ !sp_ctrl_getbit(ID, SP_ICACHE_MT_SINK_REG,
+ SP_ICACHE_MT_SINK_BIT);
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h
new file mode 100644
index 000000000000..0e477b497c98
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h
@@ -0,0 +1,101 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_LOCAL_H_INCLUDED__
+#define __SP_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+#include "sp_global.h"
+
+struct sp_state_s {
+ int pc;
+ int status_register;
+ bool is_broken;
+ bool is_idle;
+ bool is_sleeping;
+ bool is_stalling;
+};
+
+struct sp_stall_s {
+ bool fifo0;
+ bool fifo1;
+ bool fifo2;
+ bool fifo3;
+ bool fifo4;
+ bool fifo5;
+ bool fifo6;
+ bool fifo7;
+ bool fifo8;
+ bool fifo9;
+ bool fifoa;
+ bool dmem;
+ bool control_master;
+ bool icache_master;
+};
+
+#define sp_address_of(var) (HIVE_ADDR_ ## var)
+
+/*
+ * deprecated
+ */
+#define store_sp_int(var, value) \
+ sp_dmem_store_uint32(SP0_ID, (unsigned int)sp_address_of(var), \
+ (uint32_t)(value))
+
+#define store_sp_ptr(var, value) \
+ sp_dmem_store_uint32(SP0_ID, (unsigned int)sp_address_of(var), \
+ (uint32_t)(value))
+
+#define load_sp_uint(var) \
+ sp_dmem_load_uint32(SP0_ID, (unsigned int)sp_address_of(var))
+
+#define load_sp_array_uint8(array_name, index) \
+ sp_dmem_load_uint8(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint8_t))
+
+#define load_sp_array_uint16(array_name, index) \
+ sp_dmem_load_uint16(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint16_t))
+
+#define load_sp_array_uint(array_name, index) \
+ sp_dmem_load_uint32(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint32_t))
+
+#define store_sp_var(var, data, bytes) \
+ sp_dmem_store(SP0_ID, (unsigned int)sp_address_of(var), data, bytes)
+
+#define store_sp_array_uint8(array_name, index, value) \
+ sp_dmem_store_uint8(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint8_t), value)
+
+#define store_sp_array_uint16(array_name, index, value) \
+ sp_dmem_store_uint16(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint16_t), value)
+
+#define store_sp_array_uint(array_name, index, value) \
+ sp_dmem_store_uint32(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint32_t), value)
+
+#define store_sp_var_with_offset(var, offset, data, bytes) \
+ sp_dmem_store(SP0_ID, (unsigned int)sp_address_of(var) + \
+ offset, data, bytes)
+
+#define load_sp_var(var, data, bytes) \
+ sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(var), data, bytes)
+
+#define load_sp_var_with_offset(var, offset, data, bytes) \
+ sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(var) + offset, \
+ data, bytes)
+
+#endif /* __SP_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h
new file mode 100644
index 000000000000..e3e24fac126e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h
@@ -0,0 +1,166 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_PRIVATE_H_INCLUDED__
+#define __SP_PRIVATE_H_INCLUDED__
+
+#include "sp_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_SP_C void sp_ctrl_store(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(SP_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_SP_C hrt_data sp_ctrl_load(
+ const sp_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+STORAGE_CLASS_SP_C bool sp_ctrl_getbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit)
+{
+ hrt_data val = sp_ctrl_load(ID, reg);
+
+ return (val & (1UL << bit)) != 0;
+}
+
+STORAGE_CLASS_SP_C void sp_ctrl_setbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit)
+{
+ hrt_data data = sp_ctrl_load(ID, reg);
+
+ sp_ctrl_store(ID, reg, (data | (1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_ctrl_clearbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit)
+{
+ hrt_data data = sp_ctrl_load(ID, reg);
+
+ sp_ctrl_store(ID, reg, (data & ~(1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const void *data,
+ const size_t size)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store(SP_DMEM_BASE[ID] + addr, data, size);
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_load(
+ const sp_ID_t ID,
+ const hrt_address addr,
+ void *data,
+ const size_t size)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ ia_css_device_load(SP_DMEM_BASE[ID] + addr, data, size);
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store_uint8(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint8_t data)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ ia_css_device_store_uint8(SP_DMEM_BASE[SP0_ID] + addr, data);
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store_uint16(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint16_t data)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ ia_css_device_store_uint16(SP_DMEM_BASE[SP0_ID] + addr, data);
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store_uint32(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint32_t data)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ ia_css_device_store_uint32(SP_DMEM_BASE[SP0_ID] + addr, data);
+ return;
+}
+
+STORAGE_CLASS_SP_C uint8_t sp_dmem_load_uint8(
+ const sp_ID_t ID,
+ const hrt_address addr)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ return ia_css_device_load_uint8(SP_DMEM_BASE[SP0_ID] + addr);
+}
+
+STORAGE_CLASS_SP_C uint16_t sp_dmem_load_uint16(
+ const sp_ID_t ID,
+ const hrt_address addr)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ return ia_css_device_load_uint16(SP_DMEM_BASE[SP0_ID] + addr);
+}
+
+STORAGE_CLASS_SP_C uint32_t sp_dmem_load_uint32(
+ const sp_ID_t ID,
+ const hrt_address addr)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ return ia_css_device_load_uint32(SP_DMEM_BASE[SP0_ID] + addr);
+}
+
+#endif /* __SP_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c
new file mode 100644
index 000000000000..aaea74389443
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c
@@ -0,0 +1,74 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "timed_ctrl.h"
+
+#ifndef __INLINE_TIMED_CTRL__
+#include "timed_ctrl_private.h"
+#endif /* __INLINE_TIMED_CTRL__ */
+
+#include "assert_support.h"
+
+void timed_ctrl_snd_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ hrt_address addr,
+ hrt_data value)
+{
+ OP___assert(ID == TIMED_CTRL0_ID);
+ OP___assert(TIMED_CTRL_BASE[ID] != (hrt_address)-1);
+
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, mask);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, condition);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, counter);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, (hrt_data)addr);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, value);
+}
+
+/* pqiao TODO: make sure the following commands get
+ correct BASE address both for csim and android */
+
+void timed_ctrl_snd_sp_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const sp_ID_t SP_ID,
+ hrt_address offset,
+ hrt_data value)
+{
+ OP___assert(SP_ID < N_SP_ID);
+ OP___assert(SP_DMEM_BASE[SP_ID] != (hrt_address)-1);
+
+ timed_ctrl_snd_commnd(ID, mask, condition, counter,
+ SP_DMEM_BASE[SP_ID] + offset, value);
+}
+
+void timed_ctrl_snd_gpio_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const gpio_ID_t GPIO_ID,
+ hrt_address offset,
+ hrt_data value)
+{
+ OP___assert(GPIO_ID < N_GPIO_ID);
+ OP___assert(GPIO_BASE[GPIO_ID] != (hrt_address)-1);
+
+ timed_ctrl_snd_commnd(ID, mask, condition, counter,
+ GPIO_BASE[GPIO_ID] + offset, value);
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h
new file mode 100644
index 000000000000..e570813af28d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_LOCAL_H_INCLUDED__
+#define __TIMED_CTRL_LOCAL_H_INCLUDED__
+
+#include "timed_ctrl_global.h"
+
+#endif /* __TIMED_CTRL_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h
new file mode 100644
index 000000000000..3c137badbd43
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_PRIVATE_H_INCLUDED__
+#define __TIMED_CTRL_PRIVATE_H_INCLUDED__
+
+#include "timed_ctrl_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_TIMED_CTRL_C void timed_ctrl_reg_store(
+ const timed_ctrl_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ OP___assert(ID < N_TIMED_CTRL_ID);
+ OP___assert(TIMED_CTRL_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(TIMED_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+}
+
+#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h
new file mode 100644
index 000000000000..c4e99afe0d29
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VAMEM_LOCAL_H_INCLUDED__
+#define __VAMEM_LOCAL_H_INCLUDED__
+
+#include "vamem_global.h"
+
+#endif /* __VAMEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c
new file mode 100644
index 000000000000..0c6830ae7344
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c
@@ -0,0 +1,276 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "isp.h"
+#include "vmem.h"
+#include "vmem_local.h"
+
+#if !defined(HRT_MEMORY_ACCESS)
+#include "ia_css_device_access.h"
+#endif
+#include "assert_support.h"
+#include "platform_support.h" /* hrt_sleep() */
+
+typedef unsigned long long hive_uedge;
+typedef hive_uedge *hive_wide;
+
+/* Copied from SDK: sim_semantics.c */
+
+/* subword bits move like this: MSB[____xxxx____]LSB -> MSB[00000000xxxx]LSB */
+#define SUBWORD(w, start, end) (((w) & (((1ULL << ((end) - 1)) - 1) << 1 | 1)) >> (start))
+
+/* inverse subword bits move like this: MSB[xxxx____xxxx]LSB -> MSB[xxxx0000xxxx]LSB */
+#define INV_SUBWORD(w, start, end) ((w) & (~(((1ULL << ((end) - 1)) - 1) << 1 | 1) | ((1ULL << (start)) - 1)))
+
+#define uedge_bits (8 * sizeof(hive_uedge))
+#define move_lower_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, 0, src_bit)
+#define move_upper_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, src_bit, uedge_bits)
+#define move_word(target, target_bit, src) move_subword(target, target_bit, src, 0, uedge_bits)
+
+static void
+move_subword(
+ hive_uedge *target,
+ unsigned int target_bit,
+ hive_uedge src,
+ unsigned int src_start,
+ unsigned int src_end)
+{
+ unsigned int start_elem = target_bit / uedge_bits;
+ unsigned int start_bit = target_bit % uedge_bits;
+ unsigned int subword_width = src_end - src_start;
+
+ hive_uedge src_subword = SUBWORD(src, src_start, src_end);
+
+ if (subword_width + start_bit > uedge_bits) { /* overlap */
+ hive_uedge old_val1;
+ hive_uedge old_val0 = INV_SUBWORD(target[start_elem], start_bit, uedge_bits);
+
+ target[start_elem] = old_val0 | (src_subword << start_bit);
+ old_val1 = INV_SUBWORD(target[start_elem + 1], 0,
+ subword_width + start_bit - uedge_bits);
+ target[start_elem + 1] = old_val1 | (src_subword >> (uedge_bits - start_bit));
+ } else {
+ hive_uedge old_val = INV_SUBWORD(target[start_elem], start_bit,
+ start_bit + subword_width);
+
+ target[start_elem] = old_val | (src_subword << start_bit);
+ }
+}
+
+static void
+hive_sim_wide_unpack(
+ hive_wide vector,
+ hive_wide elem,
+ hive_uint elem_bits,
+ hive_uint index)
+{
+ /* pointers into wide_type: */
+ unsigned int start_elem = (elem_bits * index) / uedge_bits;
+ unsigned int start_bit = (elem_bits * index) % uedge_bits;
+ unsigned int end_elem = (elem_bits * (index + 1) - 1) / uedge_bits;
+ unsigned int end_bit = ((elem_bits * (index + 1) - 1) % uedge_bits) + 1;
+
+ if (elem_bits == uedge_bits) {
+ /* easy case for speedup: */
+ elem[0] = vector[index];
+ } else if (start_elem == end_elem) {
+ /* only one (<=64 bits) element needs to be (partly) copied: */
+ move_subword(elem, 0, vector[start_elem], start_bit, end_bit);
+ } else {
+ /* general case: handles edge spanning cases (includes >64bit elements) */
+ unsigned int bits_written = 0;
+ unsigned int i;
+
+ move_upper_bits(elem, bits_written, vector[start_elem], start_bit);
+ bits_written += (64 - start_bit);
+ for (i = start_elem + 1; i < end_elem; i++) {
+ move_word(elem, bits_written, vector[i]);
+ bits_written += uedge_bits;
+ }
+ move_lower_bits(elem, bits_written, vector[end_elem], end_bit);
+ }
+}
+
+static void
+hive_sim_wide_pack(
+ hive_wide vector,
+ hive_wide elem,
+ hive_uint elem_bits,
+ hive_uint index)
+{
+ /* pointers into wide_type: */
+ unsigned int start_elem = (elem_bits * index) / uedge_bits;
+
+ /* easy case for speedup: */
+ if (elem_bits == uedge_bits) {
+ vector[start_elem] = elem[0];
+ } else if (elem_bits > uedge_bits) {
+ unsigned int bits_to_write = elem_bits;
+ unsigned int start_bit = elem_bits * index;
+ unsigned int i = 0;
+
+ for (; bits_to_write > uedge_bits;
+ bits_to_write -= uedge_bits, i++, start_bit += uedge_bits) {
+ move_word(vector, start_bit, elem[i]);
+ }
+ move_lower_bits(vector, start_bit, elem[i], bits_to_write);
+ } else {
+ /* only one element needs to be (partly) copied: */
+ move_lower_bits(vector, elem_bits * index, elem[0], elem_bits);
+ }
+}
+
+static void load_vector(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from)
+{
+ unsigned int i;
+ hive_uedge *data;
+ unsigned int size = sizeof(short) * ISP_NWAY;
+
+ VMEM_ARRAY(v, 2 * ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */
+ assert(ISP_BAMEM_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size);
+#else
+ hrt_master_port_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size);
+#endif
+ data = (hive_uedge *)v;
+ for (i = 0; i < ISP_NWAY; i++) {
+ hive_uedge elem = 0;
+
+ hive_sim_wide_unpack(data, &elem, ISP_VEC_ELEMBITS, i);
+ to[i] = elem;
+ }
+ hrt_sleep(); /* Spend at least 1 cycles per vector */
+}
+
+static void store_vector(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from)
+{
+ unsigned int i;
+ unsigned int size = sizeof(short) * ISP_NWAY;
+
+ VMEM_ARRAY(v, 2 * ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */
+ //load_vector (&v[1][0], &to[ISP_NWAY]); /* Fetch the next vector, since it will be overwritten. */
+ hive_uedge *data = (hive_uedge *)v;
+
+ for (i = 0; i < ISP_NWAY; i++) {
+ hive_sim_wide_pack(data, (hive_wide)&from[i], ISP_VEC_ELEMBITS, i);
+ }
+ assert(ISP_BAMEM_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size);
+#else
+ //hrt_mem_store (ISP, VMEM, (unsigned)to, &v, siz); /* This will overwrite the next vector as well */
+ hrt_master_port_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size);
+#endif
+ hrt_sleep(); /* Spend at least 1 cycles per vector */
+}
+
+void isp_vmem_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned int elems) /* In t_vmem_elem */
+{
+ unsigned int c;
+ const t_vmem_elem *vp = from;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)from % ISP_VEC_ALIGN == 0);
+ assert(elems % ISP_NWAY == 0);
+ for (c = 0; c < elems; c += ISP_NWAY) {
+ load_vector(ID, &to[c], vp);
+ vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
+ }
+}
+
+void isp_vmem_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned int elems) /* In t_vmem_elem */
+{
+ unsigned int c;
+ t_vmem_elem *vp = to;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)to % ISP_VEC_ALIGN == 0);
+ assert(elems % ISP_NWAY == 0);
+ for (c = 0; c < elems; c += ISP_NWAY) {
+ store_vector(ID, vp, &from[c]);
+ vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
+ }
+}
+
+void isp_vmem_2d_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned int height,
+ unsigned int width,
+ unsigned int stride_to, /* In t_vmem_elem */
+
+ unsigned stride_from /* In t_vmem_elem */)
+{
+ unsigned int h;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)from % ISP_VEC_ALIGN == 0);
+ assert(width % ISP_NWAY == 0);
+ assert(stride_from % ISP_NWAY == 0);
+ for (h = 0; h < height; h++) {
+ unsigned int c;
+ const t_vmem_elem *vp = from;
+
+ for (c = 0; c < width; c += ISP_NWAY) {
+ load_vector(ID, &to[stride_to * h + c], vp);
+ vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
+ }
+ from = (const t_vmem_elem *)((const char *)from + stride_from / ISP_NWAY *
+ ISP_VEC_ALIGN);
+ }
+}
+
+void isp_vmem_2d_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned int height,
+ unsigned int width,
+ unsigned int stride_to, /* In t_vmem_elem */
+
+ unsigned stride_from /* In t_vmem_elem */)
+{
+ unsigned int h;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)to % ISP_VEC_ALIGN == 0);
+ assert(width % ISP_NWAY == 0);
+ assert(stride_to % ISP_NWAY == 0);
+ for (h = 0; h < height; h++) {
+ unsigned int c;
+ t_vmem_elem *vp = to;
+
+ for (c = 0; c < width; c += ISP_NWAY) {
+ store_vector(ID, vp, &from[stride_from * h + c]);
+ vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
+ }
+ to = (t_vmem_elem *)((char *)to + stride_to / ISP_NWAY * ISP_VEC_ALIGN);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
new file mode 100644
index 000000000000..a42cce42f29d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
@@ -0,0 +1,57 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_LOCAL_H_INCLUDED__
+#define __VMEM_LOCAL_H_INCLUDED__
+
+#include "type_support.h"
+#include "vmem_global.h"
+
+typedef u16 t_vmem_elem;
+
+#define VMEM_ARRAY(x, s) t_vmem_elem x[s / ISP_NWAY][ISP_NWAY]
+
+void isp_vmem_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned int elems); /* In t_vmem_elem */
+
+void isp_vmem_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned int elems); /* In t_vmem_elem */
+
+void isp_vmem_2d_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned int height,
+ unsigned int width,
+ unsigned int stride_to, /* In t_vmem_elem */
+
+ unsigned stride_from /* In t_vmem_elem */);
+
+void isp_vmem_2d_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned int height,
+ unsigned int width,
+ unsigned int stride_to, /* In t_vmem_elem */
+
+ unsigned stride_from /* In t_vmem_elem */);
+
+#endif /* __VMEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h
new file mode 100644
index 000000000000..f48d1281b5a7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_PRIVATE_H_INCLUDED__
+#define __VMEM_PRIVATE_H_INCLUDED__
+
+#include "vmem_public.h"
+
+#endif /* __VMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h
new file mode 100644
index 000000000000..163521c53d4b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h
@@ -0,0 +1,114 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_GLOBAL_H_INCLUDED__
+#define __INPUT_FORMATTER_GLOBAL_H_INCLUDED__
+
+#define IS_INPUT_FORMATTER_VERSION2
+#define IS_INPUT_SWITCH_VERSION2
+
+#include <type_support.h>
+#include <system_types.h>
+#include "if_defs.h"
+#include "str2mem_defs.h"
+#include "input_switch_2400_defs.h"
+
+#define _HIVE_INPUT_SWITCH_GET_FSYNC_REG_LSB(ch_id) ((ch_id) * 3)
+
+#define HIVE_SWITCH_N_CHANNELS 4
+#define HIVE_SWITCH_N_FORMATTYPES 32
+#define HIVE_SWITCH_N_SWITCH_CODE 4
+#define HIVE_SWITCH_M_CHANNELS 0x00000003
+#define HIVE_SWITCH_M_FORMATTYPES 0x0000001f
+#define HIVE_SWITCH_M_SWITCH_CODE 0x00000003
+#define HIVE_SWITCH_M_FSYNC 0x00000007
+
+#define HIVE_SWITCH_ENCODE_FSYNC(x) \
+ (1U << (((x) - 1) & HIVE_SWITCH_M_CHANNELS))
+
+#define _HIVE_INPUT_SWITCH_GET_LUT_FIELD(reg, bit_index) \
+ (((reg) >> (bit_index)) & HIVE_SWITCH_M_SWITCH_CODE)
+#define _HIVE_INPUT_SWITCH_SET_LUT_FIELD(reg, bit_index, val) \
+ (((reg) & ~(HIVE_SWITCH_M_SWITCH_CODE << (bit_index))) | (((hrt_data)(val) & HIVE_SWITCH_M_SWITCH_CODE) << (bit_index)))
+#define _HIVE_INPUT_SWITCH_GET_FSYNC_FIELD(reg, bit_index) \
+ (((reg) >> (bit_index)) & HIVE_SWITCH_M_FSYNC)
+#define _HIVE_INPUT_SWITCH_SET_FSYNC_FIELD(reg, bit_index, val) \
+ (((reg) & ~(HIVE_SWITCH_M_FSYNC << (bit_index))) | (((hrt_data)(val) & HIVE_SWITCH_M_FSYNC) << (bit_index)))
+
+typedef struct input_formatter_cfg_s input_formatter_cfg_t;
+
+/* Hardware registers */
+/*#define HIVE_IF_RESET_ADDRESS 0x000*/ /* deprecated */
+#define HIVE_IF_START_LINE_ADDRESS 0x004
+#define HIVE_IF_START_COLUMN_ADDRESS 0x008
+#define HIVE_IF_CROPPED_HEIGHT_ADDRESS 0x00C
+#define HIVE_IF_CROPPED_WIDTH_ADDRESS 0x010
+#define HIVE_IF_VERTICAL_DECIMATION_ADDRESS 0x014
+#define HIVE_IF_HORIZONTAL_DECIMATION_ADDRESS 0x018
+#define HIVE_IF_H_DEINTERLEAVING_ADDRESS 0x01C
+#define HIVE_IF_LEFTPADDING_WIDTH_ADDRESS 0x020
+#define HIVE_IF_END_OF_LINE_OFFSET_ADDRESS 0x024
+#define HIVE_IF_VMEM_START_ADDRESS_ADDRESS 0x028
+#define HIVE_IF_VMEM_END_ADDRESS_ADDRESS 0x02C
+#define HIVE_IF_VMEM_INCREMENT_ADDRESS 0x030
+#define HIVE_IF_YUV_420_FORMAT_ADDRESS 0x034
+#define HIVE_IF_VSYNCK_ACTIVE_LOW_ADDRESS 0x038
+#define HIVE_IF_HSYNCK_ACTIVE_LOW_ADDRESS 0x03C
+#define HIVE_IF_ALLOW_FIFO_OVERFLOW_ADDRESS 0x040
+#define HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS 0x044
+#define HIVE_IF_V_DEINTERLEAVING_ADDRESS 0x048
+#define HIVE_IF_FSM_CROP_PIXEL_COUNTER 0x110
+#define HIVE_IF_FSM_CROP_LINE_COUNTER 0x10C
+#define HIVE_IF_FSM_CROP_STATUS 0x108
+
+/* Registers only for simulation */
+#define HIVE_IF_CRUN_MODE_ADDRESS 0x04C
+#define HIVE_IF_DUMP_OUTPUT_ADDRESS 0x050
+
+/* Follow the DMA syntax, "cmd" last */
+#define IF_PACK(val, cmd) ((val & 0x0fff) | (cmd /*& 0xf000*/))
+
+#define HIVE_STR2MEM_SOFT_RESET_REG_ADDRESS (_STR2MEM_SOFT_RESET_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_INPUT_ENDIANNESS_REG_ADDRESS (_STR2MEM_INPUT_ENDIANNESS_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_OUTPUT_ENDIANNESS_REG_ADDRESS (_STR2MEM_OUTPUT_ENDIANNESS_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_BIT_SWAPPING_REG_ADDRESS (_STR2MEM_BIT_SWAPPING_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_BLOCK_SYNC_LEVEL_REG_ADDRESS (_STR2MEM_BLOCK_SYNC_LEVEL_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_PACKET_SYNC_LEVEL_REG_ADDRESS (_STR2MEM_PACKET_SYNC_LEVEL_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ADDRESS (_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ADDRESS (_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS (_STR2MEM_EN_STAT_UPDATE_ID * _STR2MEM_REG_ALIGN)
+
+/*
+ * This data structure is shared between host and SP
+ */
+struct input_formatter_cfg_s {
+ u32 start_line;
+ u32 start_column;
+ u32 left_padding;
+ u32 cropped_height;
+ u32 cropped_width;
+ u32 deinterleaving;
+ u32 buf_vecs;
+ u32 buf_start_index;
+ u32 buf_increment;
+ u32 buf_eol_offset;
+ u32 is_yuv420_format;
+ u32 block_no_reqs;
+};
+
+extern const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID];
+extern const hrt_data HIVE_IF_SRST_MASK[N_INPUT_FORMATTER_ID];
+extern const u8 HIVE_IF_SWITCH_CODE[N_INPUT_FORMATTER_ID];
+
+#endif /* __INPUT_FORMATTER_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h
new file mode 100644
index 000000000000..64554d80dc0b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_GLOBAL_H_INCLUDED__
+#define __IRQ_GLOBAL_H_INCLUDED__
+
+#include <system_types.h>
+
+#define IS_IRQ_VERSION_2
+#define IS_IRQ_MAP_VERSION_2
+
+/* We cannot include the (hrt host ID) file defining the "CSS_RECEIVER" property without side effects */
+#ifndef HAS_NO_RX
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+/*#define CSS_RECEIVER testbench_isp_inp_sys_csi_receiver*/
+#include "hive_isp_css_irq_types_hrt.h" /* enum hrt_isp_css_irq */
+#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/*#define CSS_RECEIVER testbench_isp_is_2400_inp_sys_csi_receiver*/
+#include "hive_isp_css_2401_irq_types_hrt.h" /* enum hrt_isp_css_irq */
+#else
+#error "irq_global.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+#endif
+
+/* The IRQ is not mapped uniformly on its related interfaces */
+#define IRQ_SW_CHANNEL_OFFSET hrt_isp_css_irq_sw_pin_0
+
+typedef enum {
+ IRQ_SW_CHANNEL0_ID = hrt_isp_css_irq_sw_pin_0 - IRQ_SW_CHANNEL_OFFSET,
+ IRQ_SW_CHANNEL1_ID = hrt_isp_css_irq_sw_pin_1 - IRQ_SW_CHANNEL_OFFSET,
+ N_IRQ_SW_CHANNEL_ID
+} irq_sw_channel_id_t;
+
+#endif /* __IRQ_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h
new file mode 100644
index 000000000000..1a8547d58435
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h
@@ -0,0 +1,109 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_GLOBAL_H_INCLUDED__
+#define __ISP_GLOBAL_H_INCLUDED__
+
+#include <system_types.h>
+
+#if defined(HAS_ISP_2401_MAMOIADA)
+#define IS_ISP_2401_MAMOIADA
+
+#include "isp2401_mamoiada_params.h"
+#elif defined(HAS_ISP_2400_MAMOIADA)
+#define IS_ISP_2400_MAMOIADA
+
+#include "isp2400_mamoiada_params.h"
+#else
+#error "isp_global_h: ISP_2400_MAMOIDA must be one of {2400, 2401 }"
+#endif
+
+#define ISP_PMEM_WIDTH_LOG2 ISP_LOG2_PMEM_WIDTH
+#define ISP_PMEM_SIZE ISP_PMEM_DEPTH
+
+#define ISP_NWAY_LOG2 6
+#define ISP_VEC_NELEMS_LOG2 ISP_NWAY_LOG2
+
+#ifdef PIPE_GENERATION
+#define PIPEMEM(x) MEM(x)
+#define ISP_NWAY BIT(ISP_NWAY_LOG2)
+#else
+#define PIPEMEM(x)
+#endif
+
+/* The number of data bytes in a vector disregarding the reduced precision */
+#define ISP_VEC_BYTES (ISP_VEC_NELEMS * sizeof(uint16_t))
+
+/* ISP SC Registers */
+#define ISP_SC_REG 0x00
+#define ISP_PC_REG 0x07
+#define ISP_IRQ_READY_REG 0x00
+#define ISP_IRQ_CLEAR_REG 0x00
+
+/* ISP SC Register bits */
+#define ISP_RST_BIT 0x00
+#define ISP_START_BIT 0x01
+#define ISP_BREAK_BIT 0x02
+#define ISP_RUN_BIT 0x03
+#define ISP_BROKEN_BIT 0x04
+#define ISP_IDLE_BIT 0x05 /* READY */
+#define ISP_SLEEPING_BIT 0x06
+#define ISP_STALLING_BIT 0x07
+#define ISP_IRQ_CLEAR_BIT 0x08
+#define ISP_IRQ_READY_BIT 0x0A
+#define ISP_IRQ_SLEEPING_BIT 0x0B
+
+/* ISP Register bits */
+#define ISP_CTRL_SINK_BIT 0x00
+#define ISP_PMEM_SINK_BIT 0x01
+#define ISP_DMEM_SINK_BIT 0x02
+#define ISP_FIFO0_SINK_BIT 0x03
+#define ISP_FIFO1_SINK_BIT 0x04
+#define ISP_FIFO2_SINK_BIT 0x05
+#define ISP_FIFO3_SINK_BIT 0x06
+#define ISP_FIFO4_SINK_BIT 0x07
+#define ISP_FIFO5_SINK_BIT 0x08
+#define ISP_FIFO6_SINK_BIT 0x09
+#define ISP_VMEM_SINK_BIT 0x0A
+#define ISP_VAMEM1_SINK_BIT 0x0B
+#define ISP_VAMEM2_SINK_BIT 0x0C
+#define ISP_VAMEM3_SINK_BIT 0x0D
+#define ISP_HMEM_SINK_BIT 0x0E
+
+#define ISP_CTRL_SINK_REG 0x08
+#define ISP_PMEM_SINK_REG 0x08
+#define ISP_DMEM_SINK_REG 0x08
+#define ISP_FIFO0_SINK_REG 0x08
+#define ISP_FIFO1_SINK_REG 0x08
+#define ISP_FIFO2_SINK_REG 0x08
+#define ISP_FIFO3_SINK_REG 0x08
+#define ISP_FIFO4_SINK_REG 0x08
+#define ISP_FIFO5_SINK_REG 0x08
+#define ISP_FIFO6_SINK_REG 0x08
+#define ISP_VMEM_SINK_REG 0x08
+#define ISP_VAMEM1_SINK_REG 0x08
+#define ISP_VAMEM2_SINK_REG 0x08
+#define ISP_VAMEM3_SINK_REG 0x08
+#define ISP_HMEM_SINK_REG 0x08
+
+/* ISP2401 */
+#define BAMEM VMEM
+#define XNR3_DOWN_BAMEM_BASE_ADDRESS (0x16880)
+#define XNR3_UP_BAMEM_BASE_ADDRESS (0x12880)
+#define bmem_ldrow(fu, pid, offset, data) bmem_ldrow_s(fu, pid, offset, data)
+#define bmem_strow(fu, pid, offset, data) bmem_strow_s(fu, pid, offset, data)
+#define bmem_ldblk(fu, pid, offset, data) bmem_ldblk_s(fu, pid, offset, data)
+#define bmem_stblk(fu, pid, offset, data) bmem_stblk_s(fu, pid, offset, data)
+
+#endif /* __ISP_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h
new file mode 100644
index 000000000000..83ca418c8ff2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MMU_GLOBAL_H_INCLUDED__
+#define __MMU_GLOBAL_H_INCLUDED__
+
+#define IS_MMU_VERSION_2
+
+#include <mmu_defs.h>
+
+#endif /* __MMU_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h
new file mode 100644
index 000000000000..6ec4e590e3b4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h
@@ -0,0 +1,93 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_GLOBAL_H_INCLUDED__
+#define __SP_GLOBAL_H_INCLUDED__
+
+#include <system_types.h>
+
+#if defined(HAS_SP_2401)
+#define IS_SP_2401
+/* 2401 uses 2400 */
+#include <scalar_processor_2400_params.h>
+#elif defined(HAS_SP_2400)
+#define IS_SP_2400
+
+#include <scalar_processor_2400_params.h>
+#else
+#error "sp_global.h: SP_2400 must be one of {2400, 2401 }"
+#endif
+
+#define SP_PMEM_WIDTH_LOG2 SP_PMEM_LOG_WIDTH_BITS
+#define SP_PMEM_SIZE SP_PMEM_DEPTH
+
+#define SP_DMEM_SIZE 0x4000
+
+/* SP Registers */
+#define SP_PC_REG 0x09
+#define SP_SC_REG 0x00
+#define SP_START_ADDR_REG 0x01
+#define SP_ICACHE_ADDR_REG 0x05
+#define SP_IRQ_READY_REG 0x00
+#define SP_IRQ_CLEAR_REG 0x00
+#define SP_ICACHE_INV_REG 0x00
+#define SP_CTRL_SINK_REG 0x0A
+
+/* SP Register bits */
+#define SP_RST_BIT 0x00
+#define SP_START_BIT 0x01
+#define SP_BREAK_BIT 0x02
+#define SP_RUN_BIT 0x03
+#define SP_BROKEN_BIT 0x04
+#define SP_IDLE_BIT 0x05 /* READY */
+#define SP_SLEEPING_BIT 0x06
+#define SP_STALLING_BIT 0x07
+#define SP_IRQ_CLEAR_BIT 0x08
+#define SP_IRQ_READY_BIT 0x0A
+#define SP_IRQ_SLEEPING_BIT 0x0B
+
+#define SP_ICACHE_INV_BIT 0x0C
+#define SP_IPREFETCH_EN_BIT 0x0D
+
+#define SP_FIFO0_SINK_BIT 0x00
+#define SP_FIFO1_SINK_BIT 0x01
+#define SP_FIFO2_SINK_BIT 0x02
+#define SP_FIFO3_SINK_BIT 0x03
+#define SP_FIFO4_SINK_BIT 0x04
+#define SP_FIFO5_SINK_BIT 0x05
+#define SP_FIFO6_SINK_BIT 0x06
+#define SP_FIFO7_SINK_BIT 0x07
+#define SP_FIFO8_SINK_BIT 0x08
+#define SP_FIFO9_SINK_BIT 0x09
+#define SP_FIFOA_SINK_BIT 0x0A
+#define SP_DMEM_SINK_BIT 0x0B
+#define SP_CTRL_MT_SINK_BIT 0x0C
+#define SP_ICACHE_MT_SINK_BIT 0x0D
+
+#define SP_FIFO0_SINK_REG 0x0A
+#define SP_FIFO1_SINK_REG 0x0A
+#define SP_FIFO2_SINK_REG 0x0A
+#define SP_FIFO3_SINK_REG 0x0A
+#define SP_FIFO4_SINK_REG 0x0A
+#define SP_FIFO5_SINK_REG 0x0A
+#define SP_FIFO6_SINK_REG 0x0A
+#define SP_FIFO7_SINK_REG 0x0A
+#define SP_FIFO8_SINK_REG 0x0A
+#define SP_FIFO9_SINK_REG 0x0A
+#define SP_FIFOA_SINK_REG 0x0A
+#define SP_DMEM_SINK_REG 0x0A
+#define SP_CTRL_MT_SINK_REG 0x0A
+#define SP_ICACHE_MT_SINK_REG 0x0A
+
+#endif /* __SP_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h
new file mode 100644
index 000000000000..f185859e3084
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_GLOBAL_H_INCLUDED__
+#define __TIMED_CTRL_GLOBAL_H_INCLUDED__
+
+#define IS_TIMED_CTRL_VERSION_1
+
+#include "timed_controller_defs.h"
+
+/**
+ * Order of the input bits for the timed controller taken from
+ * ISP_CSS_2401 System Architecture Description valid for
+ * 2400, 2401.
+ *
+ * Check for other systems.
+ */
+#define HIVE_TIMED_CTRL_GPIO_PIN_0_BIT_ID 0
+#define HIVE_TIMED_CTRL_GPIO_PIN_1_BIT_ID 1
+#define HIVE_TIMED_CTRL_GPIO_PIN_2_BIT_ID 2
+#define HIVE_TIMED_CTRL_GPIO_PIN_3_BIT_ID 3
+#define HIVE_TIMED_CTRL_GPIO_PIN_4_BIT_ID 4
+#define HIVE_TIMED_CTRL_GPIO_PIN_5_BIT_ID 5
+#define HIVE_TIMED_CTRL_GPIO_PIN_6_BIT_ID 6
+#define HIVE_TIMED_CTRL_GPIO_PIN_7_BIT_ID 7
+#define HIVE_TIMED_CTRL_GPIO_PIN_8_BIT_ID 8
+#define HIVE_TIMED_CTRL_GPIO_PIN_9_BIT_ID 9
+#define HIVE_TIMED_CTRL_GPIO_PIN_10_BIT_ID 10
+#define HIVE_TIMED_CTRL_GPIO_PIN_11_BIT_ID 11
+#define HIVE_TIMED_CTRL_IRQ_SP_BIT_ID 12
+#define HIVE_TIMED_CTRL_IRQ_ISP_BIT_ID 13
+#define HIVE_TIMED_CTRL_IRQ_INPUT_SYSTEM_BIT_ID 14
+#define HIVE_TIMED_CTRL_IRQ_INPUT_SELECTOR_BIT_ID 15
+#define HIVE_TIMED_CTRL_IRQ_IF_BLOCK_BIT_ID 16
+#define HIVE_TIMED_CTRL_IRQ_GP_TIMER_0_BIT_ID 17
+#define HIVE_TIMED_CTRL_IRQ_GP_TIMER_1_BIT_ID 18
+#define HIVE_TIMED_CTRL_CSI_SOL_BIT_ID 19
+#define HIVE_TIMED_CTRL_CSI_EOL_BIT_ID 20
+#define HIVE_TIMED_CTRL_CSI_SOF_BIT_ID 21
+#define HIVE_TIMED_CTRL_CSI_EOF_BIT_ID 22
+#define HIVE_TIMED_CTRL_IRQ_IS_STREAMING_MONITOR_BIT_ID 23
+
+#endif /* __TIMED_CTRL_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h
new file mode 100644
index 000000000000..92b783fed82c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VAMEM_GLOBAL_H_INCLUDED__
+#define __VAMEM_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define IS_VAMEM_VERSION_2
+
+/* (log) stepsize of linear interpolation */
+#define VAMEM_INTERP_STEP_LOG2 4
+#define VAMEM_INTERP_STEP BIT(VAMEM_INTERP_STEP_LOG2)
+/* (physical) size of the tables */
+#define VAMEM_TABLE_UNIT_SIZE ((1 << (ISP_VAMEM_ADDRESS_BITS - VAMEM_INTERP_STEP_LOG2)) + 1)
+/* (logical) size of the tables */
+#define VAMEM_TABLE_UNIT_STEP ((VAMEM_TABLE_UNIT_SIZE - 1) << 1)
+/* Number of tables */
+#define VAMEM_TABLE_UNIT_COUNT (ISP_VAMEM_DEPTH / VAMEM_TABLE_UNIT_STEP)
+
+typedef u16 vamem_data_t;
+
+#endif /* __VAMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h
new file mode 100644
index 000000000000..7867cd137f3f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_GLOBAL_H_INCLUDED__
+#define __VMEM_GLOBAL_H_INCLUDED__
+
+#include "isp.h"
+
+#define VMEM_SIZE ISP_VMEM_DEPTH
+#define VMEM_ELEMBITS ISP_VMEM_ELEMBITS
+#define VMEM_ALIGN ISP_VMEM_ALIGN
+
+#ifndef PIPE_GENERATION
+typedef tvector *pvector;
+#endif
+
+#endif /* __VMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_defs.h b/drivers/staging/media/atomisp/pci/hive_isp_css_defs.h
new file mode 100644
index 000000000000..52676f3610ba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_defs.h
@@ -0,0 +1,411 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_defs_h__
+#define _hive_isp_css_defs_h__
+
+#define HIVE_ISP_CTRL_DATA_WIDTH 32
+#define HIVE_ISP_CTRL_ADDRESS_WIDTH 32
+#define HIVE_ISP_CTRL_MAX_BURST_SIZE 1
+#define HIVE_ISP_DDR_ADDRESS_WIDTH 36
+
+#define HIVE_ISP_HOST_MAX_BURST_SIZE 8 /* host supports bursts in order to prevent repeating DDRAM accesses */
+#define HIVE_ISP_NUM_GPIO_PINS 12
+
+/* This list of vector num_elems/elem_bits pairs is valid both in C as initializer
+ and in the DMA parameter list */
+#define HIVE_ISP_DDR_DMA_SPECS {{32, 8}, {16, 16}, {18, 14}, {25, 10}, {21, 12}}
+#define HIVE_ISP_DDR_WORD_BITS 256
+#define HIVE_ISP_DDR_WORD_BYTES (HIVE_ISP_DDR_WORD_BITS / 8)
+#define HIVE_ISP_DDR_BYTES (512 * 1024 * 1024) /* hss only */
+#define HIVE_ISP_DDR_BYTES_RTL (127 * 1024 * 1024) /* RTL only */
+#define HIVE_ISP_DDR_SMALL_BYTES (128 * 256 / 8)
+#define HIVE_ISP_PAGE_SHIFT 12
+#define HIVE_ISP_PAGE_SIZE BIT(HIVE_ISP_PAGE_SHIFT)
+
+#define CSS_DDR_WORD_BITS HIVE_ISP_DDR_WORD_BITS
+#define CSS_DDR_WORD_BYTES HIVE_ISP_DDR_WORD_BYTES
+
+/* If HIVE_ISP_DDR_BASE_OFFSET is set to a non-zero value, the wide bus just before the DDRAM gets an extra dummy port where */
+/* address range 0 .. HIVE_ISP_DDR_BASE_OFFSET-1 maps onto. This effectively creates an offset for the DDRAM from system perspective */
+#define HIVE_ISP_DDR_BASE_OFFSET 0x120000000 /* 0x200000 */
+
+#define HIVE_DMA_ISP_BUS_CONN 0
+#define HIVE_DMA_ISP_DDR_CONN 1
+#define HIVE_DMA_BUS_DDR_CONN 2
+#define HIVE_DMA_ISP_MASTER master_port0
+#define HIVE_DMA_BUS_MASTER master_port1
+#define HIVE_DMA_DDR_MASTER master_port2
+
+#define HIVE_DMA_NUM_CHANNELS 32 /* old value was 8 */
+#define HIVE_DMA_CMD_FIFO_DEPTH 24 /* old value was 12 */
+
+#define HIVE_IF_PIXEL_WIDTH 12
+
+#define HIVE_MMU_TLB_SETS 8
+#define HIVE_MMU_TLB_SET_BLOCKS 8
+#define HIVE_MMU_TLB_BLOCK_ELEMENTS 8
+#define HIVE_MMU_PAGE_TABLE_LEVELS 2
+#define HIVE_MMU_PAGE_BYTES HIVE_ISP_PAGE_SIZE
+
+#define HIVE_ISP_CH_ID_BITS 2
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#define HIVE_ISP_ISEL_SEL_BITS 2
+
+#define HIVE_GP_REGS_SDRAM_WAKEUP_IDX 0
+#define HIVE_GP_REGS_IDLE_IDX 1
+#define HIVE_GP_REGS_IRQ_0_IDX 2
+#define HIVE_GP_REGS_IRQ_1_IDX 3
+#define HIVE_GP_REGS_SP_STREAM_STAT_IDX 4
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IDX 5
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IDX 6
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IDX 7
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_COND_IDX 8
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_COND_IDX 9
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_COND_IDX 10
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_COND_IDX 11
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_ENABLE_IDX 12
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_ENABLE_IDX 13
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_ENABLE_IDX 14
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_ENABLE_IDX 15
+#define HIVE_GP_REGS_SWITCH_PRIM_IF_IDX 16
+#define HIVE_GP_REGS_SWITCH_GDC1_IDX 17
+#define HIVE_GP_REGS_SWITCH_GDC2_IDX 18
+#define HIVE_GP_REGS_SRST_IDX 19
+#define HIVE_GP_REGS_SLV_REG_SRST_IDX 20
+
+/* Bit numbers of the soft reset register */
+#define HIVE_GP_REGS_SRST_ISYS_CBUS 0
+#define HIVE_GP_REGS_SRST_ISEL_CBUS 1
+#define HIVE_GP_REGS_SRST_IFMT_CBUS 2
+#define HIVE_GP_REGS_SRST_GPDEV_CBUS 3
+#define HIVE_GP_REGS_SRST_GPIO 4
+#define HIVE_GP_REGS_SRST_TC 5
+#define HIVE_GP_REGS_SRST_GPTIMER 6
+#define HIVE_GP_REGS_SRST_FACELLFIFOS 7
+#define HIVE_GP_REGS_SRST_D_OSYS 8
+#define HIVE_GP_REGS_SRST_IFT_SEC_PIPE 9
+#define HIVE_GP_REGS_SRST_GDC1 10
+#define HIVE_GP_REGS_SRST_GDC2 11
+#define HIVE_GP_REGS_SRST_VEC_BUS 12
+#define HIVE_GP_REGS_SRST_ISP 13
+#define HIVE_GP_REGS_SRST_SLV_GRP_BUS 14
+#define HIVE_GP_REGS_SRST_DMA 15
+#define HIVE_GP_REGS_SRST_SF_ISP_SP 16
+#define HIVE_GP_REGS_SRST_SF_PIF_CELLS 17
+#define HIVE_GP_REGS_SRST_SF_SIF_SP 18
+#define HIVE_GP_REGS_SRST_SF_MC_SP 19
+#define HIVE_GP_REGS_SRST_SF_ISYS_SP 20
+#define HIVE_GP_REGS_SRST_SF_DMA_CELLS 21
+#define HIVE_GP_REGS_SRST_SF_GDC1_CELLS 22
+#define HIVE_GP_REGS_SRST_SF_GDC2_CELLS 23
+#define HIVE_GP_REGS_SRST_SP 24
+#define HIVE_GP_REGS_SRST_OCP2CIO 25
+#define HIVE_GP_REGS_SRST_NBUS 26
+#define HIVE_GP_REGS_SRST_HOST12BUS 27
+#define HIVE_GP_REGS_SRST_WBUS 28
+#define HIVE_GP_REGS_SRST_IC_OSYS 29
+#define HIVE_GP_REGS_SRST_WBUS_IC 30
+
+/* Bit numbers of the slave register soft reset register */
+#define HIVE_GP_REGS_SLV_REG_SRST_DMA 0
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC1 1
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC2 2
+
+/* order of the input bits for the irq controller */
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_IRQ_SP_BIT_ID 12
+#define HIVE_GP_DEV_IRQ_ISP_BIT_ID 13
+#define HIVE_GP_DEV_IRQ_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_IRQ_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_IRQ_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID 17
+#define HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID 18
+#define HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID 19
+#define HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID 20
+#define HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID 21
+#define HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID 22
+#define HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID 23
+#define HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID 24
+#define HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID 25
+#define HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID 26
+#define HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID 27
+#define HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID 28
+#define HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID 29
+#define HIVE_GP_DEV_IRQ_DMA_BIT_ID 30
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID 31
+
+#define HIVE_GP_REGS_NUM_SW_IRQ_REGS 2
+
+/* order of the input bits for the timed controller */
+#define HIVE_GP_DEV_TC_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_TC_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_TC_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_TC_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_TC_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_TC_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_TC_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_TC_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_TC_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_TC_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_TC_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_TC_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_TC_SP_BIT_ID 12
+#define HIVE_GP_DEV_TC_ISP_BIT_ID 13
+#define HIVE_GP_DEV_TC_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_TC_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_TC_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_TC_GP_TIMER_0_BIT_ID 17
+#define HIVE_GP_DEV_TC_GP_TIMER_1_BIT_ID 18
+#define HIVE_GP_DEV_TC_MIPI_SOL_BIT_ID 19
+#define HIVE_GP_DEV_TC_MIPI_EOL_BIT_ID 20
+#define HIVE_GP_DEV_TC_MIPI_SOF_BIT_ID 21
+#define HIVE_GP_DEV_TC_MIPI_EOF_BIT_ID 22
+#define HIVE_GP_DEV_TC_INPSYS_SM 23
+
+/* definitions for the gp_timer block */
+#define HIVE_GP_TIMER_0 0
+#define HIVE_GP_TIMER_1 1
+#define HIVE_GP_TIMER_2 2
+#define HIVE_GP_TIMER_3 3
+#define HIVE_GP_TIMER_4 4
+#define HIVE_GP_TIMER_5 5
+#define HIVE_GP_TIMER_6 6
+#define HIVE_GP_TIMER_7 7
+#define HIVE_GP_TIMER_NUM_COUNTERS 8
+
+#define HIVE_GP_TIMER_IRQ_0 0
+#define HIVE_GP_TIMER_IRQ_1 1
+#define HIVE_GP_TIMER_NUM_IRQS 2
+
+#define HIVE_GP_TIMER_GPIO_0_BIT_ID 0
+#define HIVE_GP_TIMER_GPIO_1_BIT_ID 1
+#define HIVE_GP_TIMER_GPIO_2_BIT_ID 2
+#define HIVE_GP_TIMER_GPIO_3_BIT_ID 3
+#define HIVE_GP_TIMER_GPIO_4_BIT_ID 4
+#define HIVE_GP_TIMER_GPIO_5_BIT_ID 5
+#define HIVE_GP_TIMER_GPIO_6_BIT_ID 6
+#define HIVE_GP_TIMER_GPIO_7_BIT_ID 7
+#define HIVE_GP_TIMER_GPIO_8_BIT_ID 8
+#define HIVE_GP_TIMER_GPIO_9_BIT_ID 9
+#define HIVE_GP_TIMER_GPIO_10_BIT_ID 10
+#define HIVE_GP_TIMER_GPIO_11_BIT_ID 11
+#define HIVE_GP_TIMER_INP_SYS_IRQ 12
+#define HIVE_GP_TIMER_ISEL_IRQ 13
+#define HIVE_GP_TIMER_IFMT_IRQ 14
+#define HIVE_GP_TIMER_SP_STRMON_IRQ 15
+#define HIVE_GP_TIMER_SP_B_STRMON_IRQ 16
+#define HIVE_GP_TIMER_ISP_STRMON_IRQ 17
+#define HIVE_GP_TIMER_MOD_STRMON_IRQ 18
+#define HIVE_GP_TIMER_ISP_BAMEM_ERROR_IRQ 20
+#define HIVE_GP_TIMER_ISP_DMEM_ERROR_IRQ 21
+#define HIVE_GP_TIMER_SP_ICACHE_MEM_ERROR_IRQ 22
+#define HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ 23
+#define HIVE_GP_TIMER_SP_OUT_RUN_DP 24
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 25
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 26
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I2 27
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I3 28
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I4 29
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I5 30
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I6 31
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I7 32
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I8 33
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I9 34
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I10 35
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 36
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 37
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 38
+#define HIVE_GP_TIMER_ISP_OUT_RUN_DP 39
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 40
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 41
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 42
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 43
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I1 44
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I2 45
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I3 46
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I4 47
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I5 48
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I6 49
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 50
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I4_I0 51
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I5_I0 52
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I6_I0 53
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I7_I0 54
+#define HIVE_GP_TIMER_MIPI_SOL_BIT_ID 55
+#define HIVE_GP_TIMER_MIPI_EOL_BIT_ID 56
+#define HIVE_GP_TIMER_MIPI_SOF_BIT_ID 57
+#define HIVE_GP_TIMER_MIPI_EOF_BIT_ID 58
+#define HIVE_GP_TIMER_INPSYS_SM 59
+
+/* port definitions for the streaming monitors */
+/* port definititions SP streaming monitor, monitors the status of streaming ports at the SP side of the streaming FIFO's */
+#define SP_STR_MON_PORT_SP2SIF 0
+#define SP_STR_MON_PORT_SIF2SP 1
+#define SP_STR_MON_PORT_SP2MC 2
+#define SP_STR_MON_PORT_MC2SP 3
+#define SP_STR_MON_PORT_SP2DMA 4
+#define SP_STR_MON_PORT_DMA2SP 5
+#define SP_STR_MON_PORT_SP2ISP 6
+#define SP_STR_MON_PORT_ISP2SP 7
+#define SP_STR_MON_PORT_SP2GPD 8
+#define SP_STR_MON_PORT_FA2SP 9
+#define SP_STR_MON_PORT_SP2ISYS 10
+#define SP_STR_MON_PORT_ISYS2SP 11
+#define SP_STR_MON_PORT_SP2PIFA 12
+#define SP_STR_MON_PORT_PIFA2SP 13
+#define SP_STR_MON_PORT_SP2PIFB 14
+#define SP_STR_MON_PORT_PIFB2SP 15
+
+#define SP_STR_MON_PORT_B_SP2GDC1 0
+#define SP_STR_MON_PORT_B_GDC12SP 1
+#define SP_STR_MON_PORT_B_SP2GDC2 2
+#define SP_STR_MON_PORT_B_GDC22SP 3
+
+/* previously used SP streaming monitor port identifiers, kept for backward compatibility */
+#define SP_STR_MON_PORT_SND_SIF SP_STR_MON_PORT_SP2SIF
+#define SP_STR_MON_PORT_RCV_SIF SP_STR_MON_PORT_SIF2SP
+#define SP_STR_MON_PORT_SND_MC SP_STR_MON_PORT_SP2MC
+#define SP_STR_MON_PORT_RCV_MC SP_STR_MON_PORT_MC2SP
+#define SP_STR_MON_PORT_SND_DMA SP_STR_MON_PORT_SP2DMA
+#define SP_STR_MON_PORT_RCV_DMA SP_STR_MON_PORT_DMA2SP
+#define SP_STR_MON_PORT_SND_ISP SP_STR_MON_PORT_SP2ISP
+#define SP_STR_MON_PORT_RCV_ISP SP_STR_MON_PORT_ISP2SP
+#define SP_STR_MON_PORT_SND_GPD SP_STR_MON_PORT_SP2GPD
+#define SP_STR_MON_PORT_RCV_GPD SP_STR_MON_PORT_FA2SP
+/* Deprecated */
+#define SP_STR_MON_PORT_SND_PIF SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIFB SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIFB SP_STR_MON_PORT_PIFB2SP
+
+#define SP_STR_MON_PORT_SND_PIF_A SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF_A SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIF_B SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIF_B SP_STR_MON_PORT_PIFB2SP
+
+/* port definititions ISP streaming monitor, monitors the status of streaming ports at the ISP side of the streaming FIFO's */
+#define ISP_STR_MON_PORT_ISP2PIFA 0
+#define ISP_STR_MON_PORT_PIFA2ISP 1
+#define ISP_STR_MON_PORT_ISP2PIFB 2
+#define ISP_STR_MON_PORT_PIFB2ISP 3
+#define ISP_STR_MON_PORT_ISP2DMA 4
+#define ISP_STR_MON_PORT_DMA2ISP 5
+#define ISP_STR_MON_PORT_ISP2GDC1 6
+#define ISP_STR_MON_PORT_GDC12ISP 7
+#define ISP_STR_MON_PORT_ISP2GDC2 8
+#define ISP_STR_MON_PORT_GDC22ISP 9
+#define ISP_STR_MON_PORT_ISP2GPD 10
+#define ISP_STR_MON_PORT_FA2ISP 11
+#define ISP_STR_MON_PORT_ISP2SP 12
+#define ISP_STR_MON_PORT_SP2ISP 13
+
+/* previously used ISP streaming monitor port identifiers, kept for backward compatibility */
+#define ISP_STR_MON_PORT_SND_PIF_A ISP_STR_MON_PORT_ISP2PIFA
+#define ISP_STR_MON_PORT_RCV_PIF_A ISP_STR_MON_PORT_PIFA2ISP
+#define ISP_STR_MON_PORT_SND_PIF_B ISP_STR_MON_PORT_ISP2PIFB
+#define ISP_STR_MON_PORT_RCV_PIF_B ISP_STR_MON_PORT_PIFB2ISP
+#define ISP_STR_MON_PORT_SND_DMA ISP_STR_MON_PORT_ISP2DMA
+#define ISP_STR_MON_PORT_RCV_DMA ISP_STR_MON_PORT_DMA2ISP
+#define ISP_STR_MON_PORT_SND_GDC ISP_STR_MON_PORT_ISP2GDC1
+#define ISP_STR_MON_PORT_RCV_GDC ISP_STR_MON_PORT_GDC12ISP
+#define ISP_STR_MON_PORT_SND_GPD ISP_STR_MON_PORT_ISP2GPD
+#define ISP_STR_MON_PORT_RCV_GPD ISP_STR_MON_PORT_FA2ISP
+#define ISP_STR_MON_PORT_SND_SP ISP_STR_MON_PORT_ISP2SP
+#define ISP_STR_MON_PORT_RCV_SP ISP_STR_MON_PORT_SP2ISP
+
+/* port definititions MOD streaming monitor, monitors the status of streaming ports at the module side of the streaming FIFO's */
+
+#define MOD_STR_MON_PORT_PIFA2CELLS 0
+#define MOD_STR_MON_PORT_CELLS2PIFA 1
+#define MOD_STR_MON_PORT_PIFB2CELLS 2
+#define MOD_STR_MON_PORT_CELLS2PIFB 3
+#define MOD_STR_MON_PORT_SIF2SP 4
+#define MOD_STR_MON_PORT_SP2SIF 5
+#define MOD_STR_MON_PORT_MC2SP 6
+#define MOD_STR_MON_PORT_SP2MC 7
+#define MOD_STR_MON_PORT_DMA2ISP 8
+#define MOD_STR_MON_PORT_ISP2DMA 9
+#define MOD_STR_MON_PORT_DMA2SP 10
+#define MOD_STR_MON_PORT_SP2DMA 11
+#define MOD_STR_MON_PORT_GDC12CELLS 12
+#define MOD_STR_MON_PORT_CELLS2GDC1 13
+#define MOD_STR_MON_PORT_GDC22CELLS 14
+#define MOD_STR_MON_PORT_CELLS2GDC2 15
+
+#define MOD_STR_MON_PORT_SND_PIF_A 0
+#define MOD_STR_MON_PORT_RCV_PIF_A 1
+#define MOD_STR_MON_PORT_SND_PIF_B 2
+#define MOD_STR_MON_PORT_RCV_PIF_B 3
+#define MOD_STR_MON_PORT_SND_SIF 4
+#define MOD_STR_MON_PORT_RCV_SIF 5
+#define MOD_STR_MON_PORT_SND_MC 6
+#define MOD_STR_MON_PORT_RCV_MC 7
+#define MOD_STR_MON_PORT_SND_DMA2ISP 8
+#define MOD_STR_MON_PORT_RCV_DMA_FR_ISP 9
+#define MOD_STR_MON_PORT_SND_DMA2SP 10
+#define MOD_STR_MON_PORT_RCV_DMA_FR_SP 11
+#define MOD_STR_MON_PORT_SND_GDC 12
+#define MOD_STR_MON_PORT_RCV_GDC 13
+
+/* testbench signals: */
+
+/* testbench GP adapter register ids */
+#define HIVE_TESTBENCH_GPIO_DATA_OUT_REG_IDX 0
+#define HIVE_TESTBENCH_GPIO_DIR_OUT_REG_IDX 1
+#define HIVE_TESTBENCH_IRQ_REG_IDX 2
+#define HIVE_TESTBENCH_SDRAM_WAKEUP_REG_IDX 3
+#define HIVE_TESTBENCH_IDLE_REG_IDX 4
+#define HIVE_TESTBENCH_GPIO_DATA_IN_REG_IDX 5
+#define HIVE_TESTBENCH_MIPI_BFM_EN_REG_IDX 6
+#define HIVE_TESTBENCH_CSI_CONFIG_REG_IDX 7
+#define HIVE_TESTBENCH_DDR_STALL_EN_REG_IDX 8
+
+#define HIVE_TESTBENCH_ISP_PMEM_ERROR_IRQ_REG_IDX 9
+#define HIVE_TESTBENCH_ISP_BAMEM_ERROR_IRQ_REG_IDX 10
+#define HIVE_TESTBENCH_ISP_DMEM_ERROR_IRQ_REG_IDX 11
+#define HIVE_TESTBENCH_SP_ICACHE_MEM_ERROR_IRQ_REG_IDX 12
+#define HIVE_TESTBENCH_SP_DMEM_ERROR_IRQ_REG_IDX 13
+
+/* Signal monitor input bit ids */
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_O_BIT_ID 0
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_1_BIT_ID 1
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_2_BIT_ID 2
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_3_BIT_ID 3
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_4_BIT_ID 4
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_5_BIT_ID 5
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_6_BIT_ID 6
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_7_BIT_ID 7
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_8_BIT_ID 8
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_9_BIT_ID 9
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_10_BIT_ID 10
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_11_BIT_ID 11
+#define HIVE_TESTBENCH_SIG_MON_IRQ_PIN_BIT_ID 12
+#define HIVE_TESTBENCH_SIG_MON_SDRAM_WAKEUP_PIN_BIT_ID 13
+#define HIVE_TESTBENCH_SIG_MON_IDLE_PIN_BIT_ID 14
+
+#define ISP2400_DEBUG_NETWORK 1
+
+#endif /* _hive_isp_css_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
new file mode 100644
index 000000000000..4cb7e4c952c5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
@@ -0,0 +1,73 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ASSERT_SUPPORT_H_INCLUDED__
+#define __ASSERT_SUPPORT_H_INCLUDED__
+
+/**
+ * The following macro can help to test the size of a struct at compile
+ * time rather than at run-time. It does not work for all compilers; see
+ * below.
+ *
+ * Depending on the value of 'condition', the following macro is expanded to:
+ * - condition==true:
+ * an expression containing an array declaration with negative size,
+ * usually resulting in a compilation error
+ * - condition==false:
+ * (void) 1; // C statement with no effect
+ *
+ * example:
+ * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != SIZE_OF_HOST_SP_QUEUES_STRUCT);
+ *
+ * verify that the macro indeed triggers a compilation error with your compiler:
+ * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != (sizeof(struct host_sp_queues)+1) );
+ *
+ * Not all compilers will trigger an error with this macro; use a search engine to search for
+ * BUILD_BUG_ON to find other methods.
+ */
+#define COMPILATION_ERROR_IF(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
+
+/* Compile time assertion */
+#ifndef CT_ASSERT
+#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd) ? 1 : -1]))
+#endif /* CT_ASSERT */
+
+#include <linux/bug.h>
+
+/* TODO: it would be cleaner to use this:
+ * #define assert(cnd) BUG_ON(cnd)
+ * but that causes many compiler warnings (==errors) under Android
+ * because it seems that the BUG_ON() macro is not seen as a check by
+ * gcc like the BUG() macro is. */
+#define assert(cnd) \
+ do { \
+ if (!(cnd)) \
+ BUG(); \
+ } while (0)
+
+#ifndef PIPE_GENERATION
+/* Deprecated OP___assert, this is still used in ~1000 places
+ * in the code. This will be removed over time.
+ * The implementation for the pipe generation tool is in see support.isp.h */
+#define OP___assert(cnd) assert(cnd)
+
+static inline void compile_time_assert(unsigned int cond)
+{
+ /* Call undefined function if cond is false */
+ void _compile_time_assert(void);
+ if (!cond) _compile_time_assert();
+}
+#endif /* PIPE_GENERATION */
+
+#endif /* __ASSERT_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h
new file mode 100644
index 000000000000..76856db58626
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __BITOP_SUPPORT_H_INCLUDED__
+#define __BITOP_SUPPORT_H_INCLUDED__
+
+#define bitop_setbit(a, b) ((a) |= (1UL << (b)))
+
+#define bitop_getbit(a, b) (((a) & (1UL << (b))) != 0)
+
+#define bitop_clearbit(a, b) ((a) &= ~(1UL << (b)))
+
+#endif /* __BITOP_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h
new file mode 100644
index 000000000000..badb15705710
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_H_INCLUDED__
+#define __CSI_RX_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "csi_rx_local.h"
+
+#ifndef __INLINE_CSI_RX__
+#include "csi_rx_public.h"
+#else /* __INLINE_CSI_RX__ */
+#include "csi_rx_private.h"
+#endif /* __INLINE_CSI_RX__ */
+
+#endif /* __CSI_RX_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h
new file mode 100644
index 000000000000..ba11b956eb1a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_H_INCLUDED__
+#define __DEBUG_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the DMA device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "system_local.h"
+#include "debug_local.h"
+
+#ifndef __INLINE_DEBUG__
+#define STORAGE_CLASS_DEBUG_H extern
+#define STORAGE_CLASS_DEBUG_C
+#include "debug_public.h"
+#else /* __INLINE_DEBUG__ */
+#define STORAGE_CLASS_DEBUG_H static inline
+#define STORAGE_CLASS_DEBUG_C static inline
+#include "debug_private.h"
+#endif /* __INLINE_DEBUG__ */
+
+#endif /* __DEBUG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h
new file mode 100644
index 000000000000..be031d41de7c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h
@@ -0,0 +1,177 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __DEVICE_ACCESS_H_INCLUDED__
+#define __DEVICE_ACCESS_H_INCLUDED__
+
+/*!
+ * \brief
+ * Define the public interface for physical system
+ * access functions to SRAM and registers. Access
+ * types are limited to those defined in <stdint.h>
+ * All accesses are aligned
+ *
+ * The address representation is private to the system
+ * and represented as/stored in "hrt_address".
+ *
+ * The system global address can differ by an offset;
+ * The device base address. This offset must be added
+ * by the implementation of the access function
+ *
+ * "store" is a transfer to the device
+ * "load" is a transfer from the device
+ */
+
+#include <type_support.h>
+
+/*
+ * User provided file that defines the system address types:
+ * - hrt_address a type that can hold the (sub)system address range
+ */
+#include "system_types.h"
+/*
+ * We cannot assume that the global system address size is the size of
+ * a pointer because a (say) 64-bit host can be simulated in a 32-bit
+ * environment. Only if the host environment is modelled as on the target
+ * we could use a pointer. Even then, prototyping may need to be done
+ * before the target environment is available. AS we cannot wait for that
+ * we are stuck with integer addresses
+ */
+
+/*typedef char *sys_address;*/
+typedef hrt_address sys_address;
+
+/*! Set the (sub)system base address
+
+ \param base_addr[in] The offset on which the (sub)system is located
+ in the global address map
+
+ \return none,
+ */
+void device_set_base_address(
+ const sys_address base_addr);
+
+/*! Get the (sub)system base address
+
+ \return base_address,
+ */
+sys_address device_get_base_address(void);
+
+/*! Read an 8-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+uint8_t ia_css_device_load_uint8(
+ const hrt_address addr);
+
+/*! Read a 16-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+uint16_t ia_css_device_load_uint16(
+ const hrt_address addr);
+
+/*! Read a 32-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+uint32_t ia_css_device_load_uint32(
+ const hrt_address addr);
+
+/*! Read a 64-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+uint64_t ia_css_device_load_uint64(
+ const hrt_address addr);
+
+/*! Write an 8-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+void ia_css_device_store_uint8(
+ const hrt_address addr,
+ const uint8_t data);
+
+/*! Write a 16-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+void ia_css_device_store_uint16(
+ const hrt_address addr,
+ const uint16_t data);
+
+/*! Write a 32-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+void ia_css_device_store_uint32(
+ const hrt_address addr,
+ const uint32_t data);
+
+/*! Write a 64-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+void ia_css_device_store_uint64(
+ const hrt_address addr,
+ const uint64_t data);
+
+/*! Read an array of bytes from device registers or memory in the device
+
+ \param addr[in] Local address
+ \param data[out] pointer to the destination array
+ \param size[in] number of bytes to read
+
+ \return none
+ */
+void ia_css_device_load(
+ const hrt_address addr,
+ void *data,
+ const size_t size);
+
+/*! Write an array of bytes to device registers or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] pointer to the source array
+ \param size[in] number of bytes to write
+
+ \return none
+ */
+void ia_css_device_store(
+ const hrt_address addr,
+ const void *data,
+ const size_t size);
+
+#endif /* __DEVICE_ACCESS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h
new file mode 100644
index 000000000000..b6c464a530ea
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_H_INCLUDED__
+#define __DMA_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the DMA device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "system_local.h"
+#include "dma_local.h"
+
+#ifndef __INLINE_DMA__
+#define STORAGE_CLASS_DMA_H extern
+#define STORAGE_CLASS_DMA_C
+#include "dma_public.h"
+#else /* __INLINE_DMA__ */
+#define STORAGE_CLASS_DMA_H static inline
+#define STORAGE_CLASS_DMA_C static inline
+#include "dma_private.h"
+#endif /* __INLINE_DMA__ */
+
+#endif /* __DMA_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/error_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/error_support.h
new file mode 100644
index 000000000000..4f0d259bf7ed
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/error_support.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ERROR_SUPPORT_H_INCLUDED__
+#define __ERROR_SUPPORT_H_INCLUDED__
+
+#include <linux/errno.h>
+/*
+ * Put here everything __KERNEL__ specific not covered in
+ * "errno.h"
+ */
+#define ENOTSUP 252
+
+#define verifexit(cond, error_tag) \
+do { \
+ if (!(cond)) { \
+ goto EXIT; \
+ } \
+} while (0)
+
+#define verifjmpexit(cond) \
+do { \
+ if (!(cond)) { \
+ goto EXIT; \
+ } \
+} while (0)
+
+#endif /* __ERROR_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h
new file mode 100644
index 000000000000..8bfe348772f4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __EVENT_FIFO_H
+#define __EVENT_FIFO_H
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the IRQ device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "event_fifo_local.h"
+
+#ifndef __INLINE_EVENT__
+#define STORAGE_CLASS_EVENT_H extern
+#define STORAGE_CLASS_EVENT_C
+#include "event_fifo_public.h"
+#else /* __INLINE_EVENT__ */
+#define STORAGE_CLASS_EVENT_H static inline
+#define STORAGE_CLASS_EVENT_C static inline
+#include "event_fifo_private.h"
+#endif /* __INLINE_EVENT__ */
+
+#endif /* __EVENT_FIFO_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h
new file mode 100644
index 000000000000..1743caa006d0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_H_INCLUDED__
+#define __FIFO_MONITOR_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "fifo_monitor_local.h"
+
+#ifndef __INLINE_FIFO_MONITOR__
+#define STORAGE_CLASS_FIFO_MONITOR_H extern
+#define STORAGE_CLASS_FIFO_MONITOR_C
+#include "fifo_monitor_public.h"
+#else /* __INLINE_FIFO_MONITOR__ */
+#define STORAGE_CLASS_FIFO_MONITOR_H static inline
+#define STORAGE_CLASS_FIFO_MONITOR_C static inline
+#include "fifo_monitor_private.h"
+#endif /* __INLINE_FIFO_MONITOR__ */
+
+#endif /* __FIFO_MONITOR_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h
new file mode 100644
index 000000000000..4f8d7fbc8e7f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_DEVICE_H_INCLUDED__
+#define __GDC_DEVICE_H_INCLUDED__
+
+/* The file gdc.h already exists */
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the GDC device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "gdc_local.h"
+
+#ifndef __INLINE_GDC__
+#define STORAGE_CLASS_GDC_H extern
+#define STORAGE_CLASS_GDC_C
+#include "gdc_public.h"
+#else /* __INLINE_GDC__ */
+#define STORAGE_CLASS_GDC_H static inline
+#define STORAGE_CLASS_GDC_C static inline
+#include "gdc_private.h"
+#endif /* __INLINE_GDC__ */
+
+#endif /* __GDC_DEVICE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h
new file mode 100644
index 000000000000..665557bae7a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_H_INCLUDED__
+#define __GP_DEVICE_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "gp_device_local.h"
+
+#ifndef __INLINE_GP_DEVICE__
+#define STORAGE_CLASS_GP_DEVICE_H extern
+#define STORAGE_CLASS_GP_DEVICE_C
+#include "gp_device_public.h"
+#else /* __INLINE_GP_DEVICE__ */
+#define STORAGE_CLASS_GP_DEVICE_H static inline
+#define STORAGE_CLASS_GP_DEVICE_C static inline
+#include "gp_device_private.h"
+#endif /* __INLINE_GP_DEVICE__ */
+
+#endif /* __GP_DEVICE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h
new file mode 100644
index 000000000000..cd26c9d16a35
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_H_INCLUDED__
+#define __GP_TIMER_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h" /*GP_TIMER_BASE address */
+#include "gp_timer_local.h" /*GP_TIMER register offsets */
+
+#ifndef __INLINE_GP_TIMER__
+#define STORAGE_CLASS_GP_TIMER_H extern
+#define STORAGE_CLASS_GP_TIMER_C
+#include "gp_timer_public.h" /* functions*/
+#else /* __INLINE_GP_TIMER__ */
+#define STORAGE_CLASS_GP_TIMER_H static inline
+#define STORAGE_CLASS_GP_TIMER_C static inline
+#include "gp_timer_private.h" /* inline functions*/
+#endif /* __INLINE_GP_TIMER__ */
+
+#endif /* __GP_TIMER_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/gpio.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gpio.h
new file mode 100644
index 000000000000..ad79c03e59f4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gpio.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_H_INCLUDED__
+#define __GPIO_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "gpio_local.h"
+
+#ifndef __INLINE_GPIO__
+#define STORAGE_CLASS_GPIO_H extern
+#define STORAGE_CLASS_GPIO_C
+#include "gpio_public.h"
+#else /* __INLINE_GPIO__ */
+#define STORAGE_CLASS_GPIO_H static inline
+#define STORAGE_CLASS_GPIO_C static inline
+#include "gpio_private.h"
+#endif /* __INLINE_GPIO__ */
+
+#endif /* __GPIO_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h
new file mode 100644
index 000000000000..f87fd6b2ba23
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_H_INCLUDED__
+#define __HMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the HMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "hmem_local.h"
+
+#ifndef __INLINE_HMEM__
+#define STORAGE_CLASS_HMEM_H extern
+#define STORAGE_CLASS_HMEM_C
+#include "hmem_public.h"
+#else /* __INLINE_HMEM__ */
+#define STORAGE_CLASS_HMEM_H static inline
+#define STORAGE_CLASS_HMEM_C static inline
+#include "hmem_private.h"
+#endif /* __INLINE_HMEM__ */
+
+#endif /* __HMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
new file mode 100644
index 000000000000..f7cd4d7b96e5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
@@ -0,0 +1,135 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_PUBLIC_H_INCLUDED__
+#define __CSI_RX_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the csi rx frontend state.
+ * Get the state of the csi rx frontend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx fe controller.
+ * @param[out] state Point to the register-state.
+ */
+void csi_rx_fe_ctrl_get_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state);
+/**
+ * @brief Dump the csi rx frontend state.
+ * Dump the state of the csi rx frontend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx fe controller.
+ * @param[in] state Point to the register-state.
+ */
+void csi_rx_fe_ctrl_dump_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state);
+/**
+ * @brief Get the state of the csi rx fe dlane.
+ * Get the state of the register set per dlane process.
+ *
+ * @param[in] id The global unique ID of the input-buffer controller.
+ * @param[in] lane The lane ID.
+ * @param[out] state Point to the dlane state.
+ */
+void csi_rx_fe_ctrl_get_dlane_state(
+ const csi_rx_frontend_ID_t ID,
+ const u32 lane,
+ csi_rx_fe_ctrl_lane_t *dlane_state);
+/**
+ * @brief Get the csi rx backend state.
+ * Get the state of the csi rx backend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx be controller.
+ * @param[out] state Point to the register-state.
+ */
+void csi_rx_be_ctrl_get_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state);
+/**
+ * @brief Dump the csi rx backend state.
+ * Dump the state of the csi rx backend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx be controller.
+ * @param[in] state Point to the register-state.
+ */
+void csi_rx_be_ctrl_dump_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state);
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the csi rx fe.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ *
+ * @return the value of the register.
+ */
+hrt_data csi_rx_fe_ctrl_reg_load(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the csi rx fe.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+void csi_rx_fe_ctrl_reg_store(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the csirx be.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ *
+ * @return the value of the register.
+ */
+hrt_data csi_rx_be_ctrl_reg_load(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the csi rx be.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+void csi_rx_be_ctrl_reg_store(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/* end of DLI */
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h
new file mode 100644
index 000000000000..79a8446658ee
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h
@@ -0,0 +1,98 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_PUBLIC_H_INCLUDED__
+#define __DEBUG_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! brief
+ *
+ * Simple queuing trace buffer for debug data
+ * instantiatable in SP DMEM
+ *
+ * The buffer has a remote and and a local store
+ * which contain duplicate data (when in sync).
+ * The buffers are automatically synched when the
+ * user dequeues, or manualy using the synch function
+ *
+ * An alternative (storage efficient) implementation
+ * could manage the buffers to contain unique data
+ *
+ * The buffer empty status is computed from local
+ * state which does not reflect the presence of data
+ * in the remote buffer (unless the alternative
+ * implementation is followed)
+ */
+
+typedef struct debug_data_s debug_data_t;
+typedef struct debug_data_ddr_s debug_data_ddr_t;
+
+extern debug_data_t *debug_data_ptr;
+extern hrt_address debug_buffer_address;
+extern hrt_vaddress debug_buffer_ddr_address;
+
+/*! Check the empty state of the local debug data buffer
+
+ \return isEmpty(buffer)
+ */
+STORAGE_CLASS_DEBUG_H bool is_debug_buffer_empty(void);
+
+/*! Dequeue a token from the debug data buffer
+
+ \return isEmpty(buffer)?0:buffer[head]
+ */
+STORAGE_CLASS_DEBUG_H hrt_data debug_dequeue(void);
+
+/*! Synchronise the remote buffer to the local buffer
+
+ \return none
+ */
+STORAGE_CLASS_DEBUG_H void debug_synch_queue(void);
+
+/*! Synchronise the remote buffer to the local buffer
+
+ \return none
+ */
+STORAGE_CLASS_DEBUG_H void debug_synch_queue_isp(void);
+
+/*! Synchronise the remote buffer to the local buffer
+
+ \return none
+ */
+STORAGE_CLASS_DEBUG_H void debug_synch_queue_ddr(void);
+
+/*! Set the offset/address of the (remote) debug buffer
+
+ \return none
+ */
+void debug_buffer_init(
+ const hrt_address addr);
+
+/*! Set the offset/address of the (remote) debug buffer
+
+ \return none
+ */
+void debug_buffer_ddr_init(
+ const hrt_vaddress addr);
+
+/*! Set the (remote) operating mode of the debug buffer
+
+ \return none
+ */
+void debug_buffer_setmode(
+ const debug_buf_mode_t mode);
+
+#endif /* __DEBUG_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h
new file mode 100644
index 000000000000..385b978b703b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h
@@ -0,0 +1,72 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_PUBLIC_H_INCLUDED__
+#define __DMA_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+typedef struct dma_state_s dma_state_t;
+
+/*! Read the control registers of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param state[out] input formatter state structure
+
+ \return none, state = DMA[ID].state
+ */
+void dma_get_state(
+ const dma_ID_t ID,
+ dma_state_t *state);
+
+/*! Write to a control register of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, DMA[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_DMA_H void dma_reg_store(
+ const dma_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return DMA[ID].ctrl[reg]
+ */
+STORAGE_CLASS_DMA_H hrt_data dma_reg_load(
+ const dma_ID_t ID,
+ const unsigned int reg);
+
+/*! Set maximum burst size of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param conn[in] Connection to set max burst size for
+ \param max_burst_size[in] Maximum burst size in words
+
+ \return none
+*/
+void
+dma_set_max_burst_size(
+ dma_ID_t ID,
+ dma_connection conn,
+ uint32_t max_burst_size);
+
+#endif /* __DMA_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h
new file mode 100644
index 000000000000..a84b74b3bc1e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __EVENT_FIFO_PUBLIC_H
+#define __EVENT_FIFO_PUBLIC_H
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! Blocking read from an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return none, dequeue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H void event_wait_for(
+ const event_ID_t ID);
+
+/*! Conditional blocking wait for an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+ \param cnd[in] predicate
+
+ \return none, if(cnd) dequeue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H void cnd_event_wait_for(
+ const event_ID_t ID,
+ const bool cnd);
+
+/*! Blocking read from an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return dequeue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H hrt_data event_receive_token(
+ const event_ID_t ID);
+
+/*! Blocking write to an event sink EVENT[ID]
+
+ \param ID[in] EVENT identifier
+ \param token[in] token to be written on the event
+
+ \return none, enqueue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H void event_send_token(
+ const event_ID_t ID,
+ const hrt_data token);
+
+/*! Query an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return !isempty(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H bool is_event_pending(
+ const event_ID_t ID);
+
+/*! Query an event sink EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return !isfull(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H bool can_event_send_token(
+ const event_ID_t ID);
+
+#endif /* __EVENT_FIFO_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h
new file mode 100644
index 000000000000..e451d6f2a70d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h
@@ -0,0 +1,110 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_PUBLIC_H_INCLUDED__
+#define __FIFO_MONITOR_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+typedef struct fifo_channel_state_s fifo_channel_state_t;
+typedef struct fifo_switch_state_s fifo_switch_state_t;
+typedef struct fifo_monitor_state_s fifo_monitor_state_t;
+
+/*! Set a fifo switch multiplex
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param switch_id[in] fifo switch identifier
+ \param sel[in] fifo switch selector
+
+ \return none, fifo_switch[switch_id].sel = sel
+ */
+STORAGE_CLASS_FIFO_MONITOR_H void fifo_switch_set(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ const hrt_data sel);
+
+/*! Get a fifo switch multiplex
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param switch_id[in] fifo switch identifier
+
+ \return fifo_switch[switch_id].sel
+ */
+STORAGE_CLASS_FIFO_MONITOR_H hrt_data fifo_switch_get(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id);
+
+/*! Read the state of FIFO_MONITOR[ID]
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param state[out] fifo monitor state structure
+
+ \return none, state = FIFO_MONITOR[ID].state
+ */
+void fifo_monitor_get_state(
+ const fifo_monitor_ID_t ID,
+ fifo_monitor_state_t *state);
+
+/*! Read the state of a fifo channel
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param channel_id[in] fifo channel identifier
+ \param state[out] fifo channel state structure
+
+ \return none, state = fifo_channel[channel_id].state
+ */
+void fifo_channel_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_channel_t channel_id,
+ fifo_channel_state_t *state);
+
+/*! Read the state of a fifo switch
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param switch_id[in] fifo switch identifier
+ \param state[out] fifo switch state structure
+
+ \return none, state = fifo_switch[switch_id].state
+ */
+void fifo_switch_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ fifo_switch_state_t *state);
+
+/*! Write to a control register of FIFO_MONITOR[ID]
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, FIFO_MONITOR[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_FIFO_MONITOR_H void fifo_monitor_reg_store(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of FIFO_MONITOR[ID]
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return FIFO_MONITOR[ID].ctrl[reg]
+ */
+STORAGE_CLASS_FIFO_MONITOR_H hrt_data fifo_monitor_reg_load(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg);
+
+#endif /* __FIFO_MONITOR_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h
new file mode 100644
index 000000000000..fc6f42e76fbe
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h
@@ -0,0 +1,59 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_PUBLIC_H_INCLUDED__
+#define __GDC_PUBLIC_H_INCLUDED__
+
+/*! Write the bicubic interpolation table of GDC[ID]
+
+ \param ID[in] GDC identifier
+ \param data[in] The data matrix to be written
+
+ \pre
+ - data must point to a matrix[4][HRT_GDC_N]
+
+ \implementation dependent
+ - The value of "HRT_GDC_N" is device specific
+ - The LUT should not be partially written
+ - The LUT format is a quadri-phase interpolation
+ table. The layout is device specific
+ - The range of the values data[n][m] is device
+ specific
+
+ \return none, GDC[ID].lut[0...3][0...HRT_GDC_N-1] = data
+ */
+void gdc_lut_store(
+ const gdc_ID_t ID,
+ const int data[4][HRT_GDC_N]);
+
+/*! Convert the bicubic interpolation table of GDC[ID] to the ISP-specific format
+
+ \param ID[in] GDC identifier
+ \param in_lut[in] The data matrix to be converted
+ \param out_lut[out] The data matrix as the output of conversion
+ */
+void gdc_lut_convert_to_isp_format(
+ const int in_lut[4][HRT_GDC_N],
+ int out_lut[4][HRT_GDC_N]);
+
+/*! Return the integer representation of 1.0 of GDC[ID]
+
+ \param ID[in] GDC identifier
+
+ \return unity
+ */
+int gdc_get_unity(
+ const gdc_ID_t ID);
+
+#endif /* __GDC_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h
new file mode 100644
index 000000000000..7cc0799d49ed
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h
@@ -0,0 +1,58 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_PUBLIC_H_INCLUDED__
+#define __GP_DEVICE_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+typedef struct gp_device_state_s gp_device_state_t;
+
+/*! Read the state of GP_DEVICE[ID]
+
+ \param ID[in] GP_DEVICE identifier
+ \param state[out] gp device state structure
+
+ \return none, state = GP_DEVICE[ID].state
+ */
+void gp_device_get_state(
+ const gp_device_ID_t ID,
+ gp_device_state_t *state);
+
+/*! Write to a control register of GP_DEVICE[ID]
+
+ \param ID[in] GP_DEVICE identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, GP_DEVICE[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_GP_DEVICE_H void gp_device_reg_store(
+ const gp_device_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value);
+
+/*! Read from a control register of GP_DEVICE[ID]
+
+ \param ID[in] GP_DEVICE identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return GP_DEVICE[ID].ctrl[reg]
+ */
+STORAGE_CLASS_GP_DEVICE_H hrt_data gp_device_reg_load(
+ const gp_device_ID_t ID,
+ const hrt_address reg_addr);
+
+#endif /* __GP_DEVICE_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h
new file mode 100644
index 000000000000..2ddb8c40a5b2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_PUBLIC_H_INCLUDED__
+#define __GP_TIMER_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+/*! initialize mentioned timer
+param ID timer_id
+*/
+extern void
+gp_timer_init(gp_timer_ID_t ID);
+
+/*! read timer value for (platform selected)selected timer.
+param ID timer_id
+ \return uint32_t 32 bit timer value
+*/
+extern uint32_t
+gp_timer_read(gp_timer_ID_t ID);
+
+#endif /* __GP_TIMER_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gpio_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gpio_public.h
new file mode 100644
index 000000000000..d21aab3a179d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gpio_public.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_PUBLIC_H_INCLUDED__
+#define __GPIO_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+/*! Write to a control register of GPIO[ID]
+
+ \param ID[in] GPIO identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, GPIO[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_GPIO_H void gpio_reg_store(
+ const gpio_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value);
+
+/*! Read from a control register of GPIO[ID]
+
+ \param ID[in] GPIO identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return GPIO[ID].ctrl[reg]
+ */
+STORAGE_CLASS_GPIO_H hrt_data gpio_reg_load(
+ const gpio_ID_t ID,
+ const unsigned int reg_addr);
+
+#endif /* __GPIO_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h
new file mode 100644
index 000000000000..567fbc1d35e7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_PUBLIC_H_INCLUDED__
+#define __HMEM_PUBLIC_H_INCLUDED__
+
+#include <linux/types.h> /* size_t */
+
+/*! Return the size of HMEM[ID]
+
+ \param ID[in] HMEM identifier
+
+ \Note: The size is the byte size of the area it occupies
+ in the address map. I.e. disregarding internal structure
+
+ \return sizeof(HMEM[ID])
+ */
+STORAGE_CLASS_HMEM_H size_t sizeof_hmem(
+ const hmem_ID_t ID);
+
+#endif /* __HMEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ibuf_ctrl_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ibuf_ctrl_public.h
new file mode 100644
index 000000000000..6b17a6b651b7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ibuf_ctrl_public.h
@@ -0,0 +1,93 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_PUBLIC_H_INCLUDED__
+#define __IBUF_CTRL_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the ibuf-controller state.
+ * Get the state of the ibuf-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the input-buffer controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_state(
+ const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state);
+
+/**
+ * @brief Get the state of the ibuf-controller process.
+ * Get the state of the register set per buf-controller process.
+ *
+ * @param[in] id The global unique ID of the input-buffer controller.
+ * @param[in] proc_id The process ID.
+ * @param[out] state Point to the process state.
+ */
+STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_proc_state(
+ const ibuf_ctrl_ID_t ID,
+ const u32 proc_id,
+ ibuf_ctrl_proc_state_t *state);
+/**
+ * @brief Dump the ibuf-controller state.
+ * Dump the state of the ibuf-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the input-buffer controller.
+ * @param[in] state Pointer to the register-state.
+ */
+STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_dump_state(
+ const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state);
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the ibuf-controller.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_IBUF_CTRL_H hrt_data ibuf_ctrl_reg_load(
+ const ibuf_ctrl_ID_t ID,
+ const hrt_address reg);
+
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the ibuf-controller.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_reg_store(
+ const ibuf_ctrl_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/* end of DLI */
+
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* __IBUF_CTRL_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h
new file mode 100644
index 000000000000..e5758cb8bedd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h
@@ -0,0 +1,115 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_PUBLIC_H_INCLUDED__
+#define __INPUT_FORMATTER_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! Reset INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+
+ \return none, reset(INPUT_FORMATTER[ID])
+ */
+void input_formatter_rst(
+ const input_formatter_ID_t ID);
+
+/*! Set the blocking mode of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param enable[in] blocking enable flag
+
+ \use
+ - In HW, the capture unit will deliver an infinite stream of frames,
+ the input formatter will synchronise on the first SOF. In simulation
+ there are only a fixed number of frames, presented only once. By
+ enabling blocking the inputformatter will wait on the first presented
+ frame, thus avoiding race in the simulation setup.
+
+ \return none, INPUT_FORMATTER[ID].blocking_mode = enable
+ */
+void input_formatter_set_fifo_blocking_mode(
+ const input_formatter_ID_t ID,
+ const bool enable);
+
+/*! Return the data alignment of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+
+ \return alignment(INPUT_FORMATTER[ID].data)
+ */
+unsigned int input_formatter_get_alignment(
+ const input_formatter_ID_t ID);
+
+/*! Read the source switch state into INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param state[out] input formatter switch state structure
+
+ \return none, state = INPUT_FORMATTER[ID].switch_state
+ */
+void input_formatter_get_switch_state(
+ const input_formatter_ID_t ID,
+ input_formatter_switch_state_t *state);
+
+/*! Read the control registers of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param state[out] input formatter state structure
+
+ \return none, state = INPUT_FORMATTER[ID].state
+ */
+void input_formatter_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_state_t *state);
+
+/*! Read the control registers of bin copy INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param state[out] input formatter state structure
+
+ \return none, state = INPUT_FORMATTER[ID].state
+ */
+void input_formatter_bin_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_bin_state_t *state);
+
+/*! Write to a control register of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, INPUT_FORMATTER[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_FORMATTER_H void input_formatter_reg_store(
+ const input_formatter_ID_t ID,
+ const hrt_address reg_addr,
+ const hrt_data value);
+
+/*! Read from a control register of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return INPUT_FORMATTER[ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_FORMATTER_H hrt_data input_formatter_reg_load(
+ const input_formatter_ID_t ID,
+ const unsigned int reg_addr);
+
+#endif /* __INPUT_FORMATTER_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h
new file mode 100644
index 000000000000..dfe2aa9ff257
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h
@@ -0,0 +1,184 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_PUBLIC_H_INCLUDED__
+#define __IRQ_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! Read the control registers of IRQ[ID]
+
+ \param ID[in] IRQ identifier
+ \param state[out] irq controller state structure
+
+ \return none, state = IRQ[ID].state
+ */
+void irq_controller_get_state(
+ const irq_ID_t ID,
+ irq_controller_state_t *state);
+
+/*! Write to a control register of IRQ[ID]
+
+ \param ID[in] IRQ identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, IRQ[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_IRQ_H void irq_reg_store(
+ const irq_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of IRQ[ID]
+
+ \param ID[in] IRQ identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return IRQ[ID].ctrl[reg]
+ */
+STORAGE_CLASS_IRQ_H hrt_data irq_reg_load(
+ const irq_ID_t ID,
+ const unsigned int reg);
+
+/*! Enable an IRQ channel of IRQ[ID] with a mode
+
+ \param ID[in] IRQ (device) identifier
+ \param irq[in] IRQ (channel) identifier
+
+ \return none, enable(IRQ[ID].channel[irq_ID])
+ */
+void irq_enable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq_ID);
+
+/*! Enable pulse interrupts for IRQ[ID] with a mode
+
+ \param ID[in] IRQ (device) identifier
+ \param enable enable/disable pulse interrupts
+
+ \return none
+ */
+void irq_enable_pulse(
+ const irq_ID_t ID,
+ bool pulse);
+
+/*! Disable an IRQ channel of IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+ \param irq[in] IRQ (channel) identifier
+
+ \return none, disable(IRQ[ID].channel[irq_ID])
+ */
+void irq_disable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq);
+
+/*! Clear the state of all IRQ channels of IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+
+ \return none, clear(IRQ[ID].channel[])
+ */
+void irq_clear_all(
+ const irq_ID_t ID);
+
+/*! Return the ID of a signalling IRQ channel of IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+ \param irq_id[out] active IRQ (channel) identifier
+
+ \Note: This function operates as strtok(), based on the return
+ state the user is informed if there are additional signalling
+ channels
+
+ \return state(IRQ[ID])
+ */
+enum hrt_isp_css_irq_status irq_get_channel_id(
+ const irq_ID_t ID,
+ unsigned int *irq_id);
+
+/*! Raise an interrupt on channel irq_id of device IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+ \param irq_id[in] IRQ (channel) identifier
+
+ \return none, signal(IRQ[ID].channel[irq_id])
+ */
+void irq_raise(
+ const irq_ID_t ID,
+ const irq_sw_channel_id_t irq_id);
+
+/*! Test if any IRQ channel of the virtual super IRQ has raised a signal
+
+ \return any(VIRQ.channel[irq_ID] != 0)
+ */
+bool any_virq_signal(void);
+
+/*! Enable an IRQ channel of the virtual super IRQ
+
+ \param irq[in] IRQ (channel) identifier
+ \param en[in] predicate channel enable
+
+ \return none, VIRQ.channel[irq_ID].enable = en
+ */
+void cnd_virq_enable_channel(
+ const virq_id_t irq_ID,
+ const bool en);
+
+/*! Clear the state of all IRQ channels of the virtual super IRQ
+
+ \return none, clear(VIRQ.channel[])
+ */
+void virq_clear_all(void);
+
+/*! Clear the IRQ info state of the virtual super IRQ
+
+ \param irq_info[in/out] The IRQ (channel) state
+
+ \return none
+ */
+void virq_clear_info(
+ virq_info_t *irq_info);
+
+/*! Return the ID of a signalling IRQ channel of the virtual super IRQ
+
+ \param irq_id[out] active IRQ (channel) identifier
+
+ \Note: This function operates as strtok(), based on the return
+ state the user is informed if there are additional signalling
+ channels
+
+ \return state(IRQ[...])
+ */
+enum hrt_isp_css_irq_status virq_get_channel_id(
+ virq_id_t *irq_id);
+
+/*! Return the IDs of all signaling IRQ channels of the virtual super IRQ
+
+ \param irq_info[out] all active IRQ (channel) identifiers
+
+ \Note: Unlike "irq_get_channel_id()" this function returns all
+ channel signaling info. The new info is OR'd with the current
+ info state. N.B. this is the same as repeatedly calling the function
+ "irq_get_channel_id()" in a (non-blocked) handler routine
+
+ \return (error(state(IRQ[...]))
+ */
+enum hrt_isp_css_irq_status virq_get_channel_signals(
+ virq_info_t *irq_info);
+
+#endif /* __IRQ_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h
new file mode 100644
index 000000000000..0da2937b900e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h
@@ -0,0 +1,185 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_PUBLIC_H_INCLUDED__
+#define __ISP_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! Enable or disable the program complete irq signal of ISP[ID]
+
+ \param ID[in] SP identifier
+ \param cnd[in] predicate
+
+ \return none, if(cnd) enable(ISP[ID].irq) else disable(ISP[ID].irq)
+ */
+void cnd_isp_irq_enable(
+ const isp_ID_t ID,
+ const bool cnd);
+
+/*! Read the state of cell ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param state[out] isp state structure
+ \param stall[out] isp stall conditions
+
+ \return none, state = ISP[ID].state, stall = ISP[ID].stall
+ */
+void isp_get_state(
+ const isp_ID_t ID,
+ isp_state_t *state,
+ isp_stall_t *stall);
+
+/*! Write to the status and control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, ISP[ID].sc[reg] = value
+ */
+STORAGE_CLASS_ISP_H void isp_ctrl_store(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from the status and control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return ISP[ID].sc[reg]
+ */
+STORAGE_CLASS_ISP_H hrt_data isp_ctrl_load(
+ const isp_ID_t ID,
+ const unsigned int reg);
+
+/*! Get the status of a bitfield in the control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be checked
+
+ \return (ISP[ID].sc[reg] & (1<<bit)) != 0
+ */
+STORAGE_CLASS_ISP_H bool isp_ctrl_getbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit);
+
+/*! Set a bitfield in the control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, ISP[ID].sc[reg] |= (1<<bit)
+ */
+STORAGE_CLASS_ISP_H void isp_ctrl_setbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit);
+
+/*! Clear a bitfield in the control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, ISP[ID].sc[reg] &= ~(1<<bit)
+ */
+STORAGE_CLASS_ISP_H void isp_ctrl_clearbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit);
+
+/*! Write to the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, ISP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_ISP_H void isp_dmem_store(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const void *data,
+ const size_t size);
+
+/*! Read from the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = ISP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_ISP_H void isp_dmem_load(
+ const isp_ID_t ID,
+ const unsigned int addr,
+ void *data,
+ const size_t size);
+
+/*! Write a 32-bit datum to the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, ISP[ID].dmem[addr] = data
+ */
+STORAGE_CLASS_ISP_H void isp_dmem_store_uint32(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const uint32_t data);
+
+/*! Load a 32-bit datum from the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = ISP[ID].dmem[addr]
+ */
+STORAGE_CLASS_ISP_H uint32_t isp_dmem_load_uint32(
+ const isp_ID_t ID,
+ const unsigned int addr);
+
+/*! Concatenate the LSW and MSW into a double precision word
+
+ \param x0[in] Integer containing the LSW
+ \param x1[in] Integer containing the MSW
+
+ \return x0 | (x1 << bits_per_vector_element)
+ */
+STORAGE_CLASS_ISP_H uint32_t isp_2w_cat_1w(
+ const u16 x0,
+ const uint16_t x1);
+
+unsigned int isp_is_ready(isp_ID_t ID);
+
+unsigned int isp_is_sleeping(isp_ID_t ID);
+
+void isp_start(isp_ID_t ID);
+
+void isp_wake(isp_ID_t ID);
+
+#endif /* __ISP_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
new file mode 100644
index 000000000000..734634aedadf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_PUBLIC_H_INCLUDED__
+#define __ISYS_DMA_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "system_types.h"
+#include "type_support.h"
+
+STORAGE_CLASS_ISYS2401_DMA_H void isys2401_dma_reg_store(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg,
+ const hrt_data value);
+
+STORAGE_CLASS_ISYS2401_DMA_H hrt_data isys2401_dma_reg_load(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg);
+
+void isys2401_dma_set_max_burst_size(
+ const isys2401_dma_ID_t dma_id,
+ uint32_t max_burst_size);
+
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+
+#endif /* __ISYS_DMA_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
new file mode 100644
index 000000000000..d561783e47a3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_IRQ_PUBLIC_H__
+#define __ISYS_IRQ_PUBLIC_H__
+
+#include "isys_irq_global.h"
+#include "isys_irq_local.h"
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_state_get(
+ const isys_irq_ID_t isys_irqc_id,
+ isys_irqc_state_t *state);
+
+STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_state_dump(
+ const isys_irq_ID_t isys_irqc_id,
+ const isys_irqc_state_t *state);
+
+STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_reg_store(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx,
+ const hrt_data value);
+
+STORAGE_CLASS_ISYS2401_IRQ_H hrt_data isys_irqc_reg_load(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx);
+
+STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_status_enable(
+ const isys_irq_ID_t isys_irqc_id);
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __ISYS_IRQ_PUBLIC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h
new file mode 100644
index 000000000000..2d1859f2c656
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_PUBLIC_H_INCLUDED__
+#define __ISYS_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/*! Read the state of INPUT_SYSTEM[ID]
+ \param ID[in] INPUT_SYSTEM identifier
+ \param state[out] pointer to input system state structure
+ \return none, state = INPUT_SYSTEM[ID].state
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H input_system_err_t input_system_get_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state);
+/*! Dump the state of INPUT_SYSTEM[ID]
+ \param ID[in] INPUT_SYSTEM identifier
+ \param state[in] pointer to input system state structure
+ \return none
+ \depends on host supplied print function as part of ia_css_init()
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void input_system_dump_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state);
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* __ISYS_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h
new file mode 100644
index 000000000000..87a8d512713e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h
@@ -0,0 +1,101 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the stream2mmio-controller state.
+ * Get the state of the stream2mmio-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the steeam2mmio controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state);
+
+/**
+ * @brief Get the state of the stream2mmio-controller sidess.
+ * Get the state of the register set per buf-controller sidess.
+ *
+ * @param[in] id The global unique ID of the steeam2mmio controller.
+ * @param[in] sid_id The sid ID.
+ * @param[out] state Point to the sid state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_sid_state(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ stream2mmio_sid_state_t *state);
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the stream2mmio-controller.
+ *
+ * @param[in] ID The global unique ID for the stream2mmio-controller instance.
+ * @param[in] sid_id The SID in question.
+ * @param[in] reg_idx The offset address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_STREAM2MMIO_H hrt_data stream2mmio_reg_load(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ const uint32_t reg_idx);
+
+/**
+ * @brief Dump the SID processor state.
+ * Dump the state of the sid regiester-set.
+ *
+ * @param[in] state Pointer to the register-state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_print_sid_state(
+ stream2mmio_sid_state_t *state);
+/**
+ * @brief Dump the stream2mmio state.
+ * Dump the state of the ibuf-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the st2mmio
+ * @param[in] state Pointer to the register-state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_dump_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the stream2mmio-controller.
+ *
+ * @param[in] ID The global unique ID for the stream2mmio-controller instance.
+ * @param[in] reg The offset address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_reg_store(
+ const stream2mmio_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/* end of DLI */
+
+#endif /* __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h
new file mode 100644
index 000000000000..278f9cd85a00
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h
@@ -0,0 +1,94 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MMU_PUBLIC_H_INCLUDED__
+#define __MMU_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+#include "device_access.h"
+#include "assert_support.h"
+
+/*! Set the page table base index of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param base_index[in] page table base index
+
+ \return none, MMU[ID].page_table_base_index = base_index
+ */
+void mmu_set_page_table_base_index(
+ const mmu_ID_t ID,
+ const hrt_data base_index);
+
+/*! Get the page table base index of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param base_index[in] page table base index
+
+ \return MMU[ID].page_table_base_index
+ */
+hrt_data mmu_get_page_table_base_index(
+ const mmu_ID_t ID);
+
+/*! Invalidate the page table cache of MMU[ID]
+
+ \param ID[in] MMU identifier
+
+ \return none
+ */
+void mmu_invalidate_cache(
+ const mmu_ID_t ID);
+
+/*! Invalidate the page table cache of all MMUs
+
+ \return none
+ */
+void mmu_invalidate_cache_all(void);
+
+/*! Write to a control register of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, MMU[ID].ctrl[reg] = value
+ */
+static inline void mmu_reg_store(
+ const mmu_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(MMU_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+/*! Read from a control register of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return MMU[ID].ctrl[reg]
+ */
+static inline hrt_data mmu_reg_load(
+ const mmu_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(MMU_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __MMU_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
new file mode 100644
index 000000000000..ba67a276ce55
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_PUBLIC_H_INCLUDED__
+#define __PIXELGEN_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the pixelgen state.
+ * Get the state of the pixelgen regiester-set.
+ *
+ * @param[in] id The global unique ID of the pixelgen controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_get_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state);
+/**
+ * @brief Dump the pixelgen state.
+ * Dump the state of the pixelgen regiester-set.
+ *
+ * @param[in] id The global unique ID of the pixelgen controller.
+ * @param[in] state Point to the register-state.
+ */
+STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_dump_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state);
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the pixelgen
+ *
+ * @param[in] ID The global unique ID for the pixelgen instance.
+ * @param[in] reg The offset address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_PIXELGEN_H hrt_data pixelgen_ctrl_reg_load(
+ const pixelgen_ID_t ID,
+ const hrt_address reg);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the pixelgen
+ *
+ * @param[in] ID The global unique ID for the pixelgen.
+ * @param[in] reg The offset address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
+ const pixelgen_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/* end of DLI */
+
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h
new file mode 100644
index 000000000000..b8db5469b592
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h
@@ -0,0 +1,223 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_PUBLIC_H_INCLUDED__
+#define __SP_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+typedef struct sp_state_s sp_state_t;
+typedef struct sp_stall_s sp_stall_t;
+
+/*! Enable or disable the program complete irq signal of SP[ID]
+
+ \param ID[in] SP identifier
+ \param cnd[in] predicate
+
+ \return none, if(cnd) enable(SP[ID].irq) else disable(SP[ID].irq)
+ */
+void cnd_sp_irq_enable(
+ const sp_ID_t ID,
+ const bool cnd);
+
+/*! Read the state of cell SP[ID]
+
+ \param ID[in] SP identifier
+ \param state[out] sp state structure
+ \param stall[out] isp stall conditions
+
+ \return none, state = SP[ID].state, stall = SP[ID].stall
+ */
+void sp_get_state(
+ const sp_ID_t ID,
+ sp_state_t *state,
+ sp_stall_t *stall);
+
+/*! Write to the status and control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, SP[ID].sc[reg] = value
+ */
+STORAGE_CLASS_SP_H void sp_ctrl_store(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from the status and control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return SP[ID].sc[reg]
+ */
+STORAGE_CLASS_SP_H hrt_data sp_ctrl_load(
+ const sp_ID_t ID,
+ const hrt_address reg);
+
+/*! Get the status of a bitfield in the control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be checked
+
+ \return (SP[ID].sc[reg] & (1<<bit)) != 0
+ */
+STORAGE_CLASS_SP_H bool sp_ctrl_getbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit);
+
+/*! Set a bitfield in the control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, SP[ID].sc[reg] |= (1<<bit)
+ */
+STORAGE_CLASS_SP_H void sp_ctrl_setbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit);
+
+/*! Clear a bitfield in the control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, SP[ID].sc[reg] &= ~(1<<bit)
+ */
+STORAGE_CLASS_SP_H void sp_ctrl_clearbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit);
+
+/*! Write to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const void *data,
+ const size_t size);
+
+/*! Read from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H void sp_dmem_load(
+ const sp_ID_t ID,
+ const hrt_address addr,
+ void *data,
+ const size_t size);
+
+/*! Write a 8-bit datum to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store_uint8(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint8_t data);
+
+/*! Write a 16-bit datum to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store_uint16(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint16_t data);
+
+/*! Write a 32-bit datum to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store_uint32(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint32_t data);
+
+/*! Load a 8-bit datum from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H uint8_t sp_dmem_load_uint8(
+ const sp_ID_t ID,
+ const hrt_address addr);
+
+/*! Load a 16-bit datum from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H uint16_t sp_dmem_load_uint16(
+ const sp_ID_t ID,
+ const hrt_address addr);
+
+/*! Load a 32-bit datum from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H uint32_t sp_dmem_load_uint32(
+ const sp_ID_t ID,
+ const hrt_address addr);
+
+#endif /* __SP_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h
new file mode 100644
index 000000000000..afd5a59489cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_PUBLIC_H_INCLUDED__
+#define __TAG_PUBLIC_H_INCLUDED__
+
+/**
+ * @brief Creates the tag description from the given parameters.
+ * @param[in] num_captures
+ * @param[in] skip
+ * @param[in] offset
+ * @param[out] tag_descr
+ */
+void
+sh_css_create_tag_descr(int num_captures,
+ unsigned int skip,
+ int offset,
+ unsigned int exp_id,
+ struct sh_css_tag_descr *tag_descr);
+
+/**
+ * @brief Encodes the members of tag description into a 32-bit value.
+ * @param[in] tag Pointer to the tag description
+ * @return (unsigned int) Encoded 32-bit tag-info
+ */
+unsigned int
+sh_css_encode_tag_descr(struct sh_css_tag_descr *tag);
+
+#endif /* __TAG_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h
new file mode 100644
index 000000000000..5f9277adb2ab
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h
@@ -0,0 +1,59 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_PUBLIC_H_INCLUDED__
+#define __TIMED_CTRL_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+/*! Write to a control register of TIMED_CTRL[ID]
+
+ \param ID[in] TIMED_CTRL identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, TIMED_CTRL[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_TIMED_CTRL_H void timed_ctrl_reg_store(
+ const timed_ctrl_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value);
+
+void timed_ctrl_snd_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ hrt_address addr,
+ hrt_data value);
+
+void timed_ctrl_snd_sp_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const sp_ID_t SP_ID,
+ hrt_address offset,
+ hrt_data value);
+
+void timed_ctrl_snd_gpio_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const gpio_ID_t GPIO_ID,
+ hrt_address offset,
+ hrt_data value);
+
+#endif /* __TIMED_CTRL_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h
new file mode 100644
index 000000000000..577b9b8449e8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h
@@ -0,0 +1,18 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VAMEM_PUBLIC_H_INCLUDED__
+#define __VAMEM_PUBLIC_H_INCLUDED__
+
+#endif /* __VAMEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h
new file mode 100644
index 000000000000..e9801c0fe147
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_PUBLIC_H_INCLUDED__
+#define __VMEM_PUBLIC_H_INCLUDED__
+
+#include "isp.h" /* tmemvectoru */
+
+#endif /* __VMEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/ibuf_ctrl.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/ibuf_ctrl.h
new file mode 100644
index 000000000000..f9cf7b586045
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/ibuf_ctrl.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_H_INCLUDED__
+#define __IBUF_CTRL_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "ibuf_ctrl_local.h"
+
+#ifndef __INLINE_IBUF_CTRL__
+#define STORAGE_CLASS_IBUF_CTRL_H extern
+#define STORAGE_CLASS_IBUF_CTRL_C
+#include "ibuf_ctrl_public.h"
+#else /* __INLINE_IBUF_CTRL__ */
+#define STORAGE_CLASS_IBUF_CTRL_H static inline
+#define STORAGE_CLASS_IBUF_CTRL_C static inline
+#include "ibuf_ctrl_private.h"
+#endif /* __INLINE_IBUF_CTRL__ */
+
+#endif /* __IBUF_CTRL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h
new file mode 100644
index 000000000000..377996e0536d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_H_INCLUDED__
+#define __INPUT_FORMATTER_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "input_formatter_local.h"
+
+#ifndef __INLINE_INPUT_FORMATTER__
+#define STORAGE_CLASS_INPUT_FORMATTER_H extern
+#define STORAGE_CLASS_INPUT_FORMATTER_C
+#include "input_formatter_public.h"
+#else /* __INLINE_INPUT_FORMATTER__ */
+#define STORAGE_CLASS_INPUT_FORMATTER_H static inline
+#define STORAGE_CLASS_INPUT_FORMATTER_C static inline
+#include "input_formatter_private.h"
+#endif /* __INLINE_INPUT_FORMATTER__ */
+
+#endif /* __INPUT_FORMATTER_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h
new file mode 100644
index 000000000000..33ab8a85909e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_H_INCLUDED__
+#define __INPUT_SYSTEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "input_system_local.h"
+
+#ifndef __INLINE_INPUT_SYSTEM__
+#define STORAGE_CLASS_INPUT_SYSTEM_H extern
+#define STORAGE_CLASS_INPUT_SYSTEM_C
+#include "input_system_public.h"
+#else /* __INLINE_INPUT_SYSTEM__ */
+#define STORAGE_CLASS_INPUT_SYSTEM_H static inline
+#define STORAGE_CLASS_INPUT_SYSTEM_C static inline
+#include "input_system_private.h"
+#endif /* __INLINE_INPUT_SYSTEM__ */
+
+#endif /* __INPUT_SYSTEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h
new file mode 100644
index 000000000000..133dd9014fef
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_H_INCLUDED__
+#define __IRQ_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the IRQ device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "irq_local.h"
+
+#ifndef __INLINE_IRQ__
+#define STORAGE_CLASS_IRQ_H extern
+#define STORAGE_CLASS_IRQ_C
+#include "irq_public.h"
+#else /* __INLINE_IRQ__ */
+#define STORAGE_CLASS_IRQ_H static inline
+#define STORAGE_CLASS_IRQ_C static inline
+#include "irq_private.h"
+#endif /* __INLINE_IRQ__ */
+
+#endif /* __IRQ_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h
new file mode 100644
index 000000000000..749610b8a831
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_H_INCLUDED__
+#define __ISP_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the ISP cell. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "isp_local.h"
+
+#ifndef __INLINE_ISP__
+#define STORAGE_CLASS_ISP_H extern
+#define STORAGE_CLASS_ISP_C
+#include "isp_public.h"
+#else /* __INLINE_iSP__ */
+#define STORAGE_CLASS_ISP_H static inline
+#define STORAGE_CLASS_ISP_C static inline
+#include "isp_private.h"
+#endif /* __INLINE_ISP__ */
+
+#endif /* __ISP_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_dma.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_dma.h
new file mode 100644
index 000000000000..dbdd17115018
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_dma.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_H_INCLUDED__
+#define __ISYS_DMA_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "isys_dma_local.h"
+
+#ifndef __INLINE_ISYS2401_DMA__
+#define STORAGE_CLASS_ISYS2401_DMA_H extern
+#define STORAGE_CLASS_ISYS2401_DMA_C
+#include "isys_dma_public.h"
+#else /* __INLINE_ISYS2401_DMA__ */
+#define STORAGE_CLASS_ISYS2401_DMA_H static inline
+#define STORAGE_CLASS_ISYS2401_DMA_C static inline
+#include "isys_dma_private.h"
+#endif /* __INLINE_ISYS2401_DMA__ */
+
+#endif /* __ISYS_DMA_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
new file mode 100644
index 000000000000..d3f64cfd0b7d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ISYS_IRQ_H__
+#define __IA_CSS_ISYS_IRQ_H__
+
+#include <type_support.h>
+#include <system_local.h>
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+#ifndef __INLINE_ISYS2401_IRQ__
+
+#define STORAGE_CLASS_ISYS2401_IRQ_H extern
+#define STORAGE_CLASS_ISYS2401_IRQ_C extern
+#include "isys_irq_public.h"
+
+#else /* __INLINE_ISYS2401_IRQ__ */
+
+#define STORAGE_CLASS_ISYS2401_IRQ_H static inline
+#define STORAGE_CLASS_ISYS2401_IRQ_C static inline
+#include "isys_irq_private.h"
+
+#endif /* __INLINE_ISYS2401_IRQ__ */
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __IA_CSS_ISYS_IRQ_H__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h
new file mode 100644
index 000000000000..e2ebeb14e7c2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "isys_stream2mmio_local.h"
+
+#ifndef __INLINE_STREAM2MMIO__
+#define STORAGE_CLASS_STREAM2MMIO_H extern
+#define STORAGE_CLASS_STREAM2MMIO_C
+#include "isys_stream2mmio_public.h"
+#else /* __INLINE_STREAM2MMIO__ */
+#define STORAGE_CLASS_STREAM2MMIO_H static inline
+#define STORAGE_CLASS_STREAM2MMIO_C static inline
+#include "isys_stream2mmio_private.h"
+#endif /* __INLINE_STREAM2MMIO__ */
+
+#endif /* __ISYS_STREAM2MMIO_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
new file mode 100644
index 000000000000..0b9519c8961c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
@@ -0,0 +1,153 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MATH_SUPPORT_H
+#define __MATH_SUPPORT_H
+
+#include <linux/kernel.h> /* Override the definition of max/min from linux kernel*/
+
+#define IS_ODD(a) ((a) & 0x1)
+#define IS_EVEN(a) (!IS_ODD(a))
+
+/* force a value to a lower even value */
+#define EVEN_FLOOR(x) ((x) & ~1)
+
+/* ISP2401 */
+/* If the number is odd, find the next even number */
+#define EVEN_CEIL(x) ((IS_ODD(x)) ? ((x) + 1) : (x))
+
+/* A => B */
+#define IMPLIES(a, b) (!(a) || (b))
+
+/* for preprocessor and array sizing use MIN and MAX
+ otherwise use min and max */
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define ROUND_DIV(a, b) (((b) != 0) ? ((a) + ((b) >> 1)) / (b) : 0)
+#define CEIL_DIV(a, b) (((b) != 0) ? ((a) + (b) - 1) / (b) : 0)
+#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b))
+#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1))
+#define CEIL_SHIFT(a, b) (((a) + (1 << (b)) - 1) >> (b))
+#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b))
+#define ROUND_HALF_DOWN_DIV(a, b) (((b) != 0) ? ((a) + (b / 2) - 1) / (b) : 0)
+#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b))
+
+/*To Find next power of 2 number from x */
+#define bit2(x) ((x) | ((x) >> 1))
+#define bit4(x) (bit2(x) | (bit2(x) >> 2))
+#define bit8(x) (bit4(x) | (bit4(x) >> 4))
+#define bit16(x) (bit8(x) | (bit8(x) >> 8))
+#define bit32(x) (bit16(x) | (bit16(x) >> 16))
+#define NEXT_POWER_OF_2(x) (bit32(x - 1) + 1)
+
+/* min and max should not be macros as they will evaluate their arguments twice.
+ if you really need a macro (e.g. for CPP or for initializing an array)
+ use MIN() and MAX(), otherwise use min() and max().
+
+*/
+
+#if !defined(PIPE_GENERATION)
+
+/*
+This macro versions are added back as we are mixing types in usage of inline.
+This causes corner cases of calculations to be incorrect due to conversions
+between signed and unsigned variables or overflows.
+Before the addition of the inline functions, max, min and ceil_div were macros
+and therefore adding them back.
+
+Leaving out the other math utility functions as they are newly added
+*/
+
+#define ceil_div(a, b) (CEIL_DIV(a, b))
+
+static inline unsigned int ceil_mul(unsigned int a, unsigned int b)
+{
+ return CEIL_MUL(a, b);
+}
+
+static inline unsigned int ceil_mul2(unsigned int a, unsigned int b)
+{
+ return CEIL_MUL2(a, b);
+}
+
+static inline unsigned int ceil_shift(unsigned int a, unsigned int b)
+{
+ return CEIL_SHIFT(a, b);
+}
+
+static inline unsigned int ceil_shift_mul(unsigned int a, unsigned int b)
+{
+ return CEIL_SHIFT_MUL(a, b);
+}
+
+/* ISP2401 */
+static inline unsigned int round_half_down_div(unsigned int a, unsigned int b)
+{
+ return ROUND_HALF_DOWN_DIV(a, b);
+}
+
+/* ISP2401 */
+static inline unsigned int round_half_down_mul(unsigned int a, unsigned int b)
+{
+ return ROUND_HALF_DOWN_MUL(a, b);
+}
+
+/* @brief Next Power of Two
+ *
+ * @param[in] unsigned number
+ *
+ * @return next power of two
+ *
+ * This function rounds input to the nearest power of 2 (2^x)
+ * towards infinity
+ *
+ * Input Range: 0 .. 2^(8*sizeof(int)-1)
+ *
+ * IF input is a power of 2
+ * out = in
+ * OTHERWISE
+ * out = 2^(ceil(log2(in))
+ *
+ */
+
+static inline unsigned int ceil_pow2(unsigned int a)
+{
+ if (a == 0) {
+ return 1;
+ }
+ /* IF input is already a power of two*/
+ else if ((!((a) & ((a) - 1)))) {
+ return a;
+ } else {
+ unsigned int v = a;
+
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return (v + 1);
+ }
+}
+
+#endif /* !defined(PIPE_GENERATION) */
+
+/*
+ * For SP and ISP, SDK provides the definition of OP_std_modadd.
+ * We need it only for host
+ */
+#define OP_std_modadd(base, offset, size) ((base + offset) % (size))
+
+#endif /* __MATH_SUPPORT_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_access/memory_access.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_access/memory_access.h
new file mode 100644
index 000000000000..dc63ff0c9c6a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_access/memory_access.h
@@ -0,0 +1,174 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015-2017, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MEMORY_ACCESS_H_INCLUDED__
+#define __MEMORY_ACCESS_H_INCLUDED__
+
+/*!
+ * \brief
+ * Define the public interface for virtual memory
+ * access functions. Access types are limited to
+ * those defined in <stdint.h>
+ *
+ * The address representation is private to the system
+ * and represented as "hrt_vaddress" rather than a
+ * pointer, as the memory allocation cannot be accessed
+ * by dereferencing but reaquires load and store access
+ * functions
+ *
+ * The page table selection or virtual memory context;
+ * The page table base index; Is implicit. This page
+ * table base index must be set by the implementation
+ * of the access function
+ *
+ * "store" is a transfer to the system
+ * "load" is a transfer from the system
+ *
+ * Allocation properties can be specified by setting
+ * attributes (see below) in case of multiple physical
+ * memories the memory ID is encoded on the attribute
+ *
+ * Allocations in the same physical memory, but in a
+ * different (set of) page tables can be shared through
+ * a page table information mapping function
+ */
+
+#include <type_support.h>
+#include "platform_support.h" /* for __func__ */
+
+/*
+ * User provided file that defines the (sub)system address types:
+ * - hrt_vaddress a type that can hold the (sub)system virtual address range
+ */
+#include "system_types.h"
+
+/*
+ * The MMU base address is a physical address, thus the same type is used
+ * as for the device base address
+ */
+#include "device_access.h"
+
+#include "hmm/hmm.h"
+
+/*!
+ * \brief
+ * Bit masks for specialised allocation functions
+ * the default is "uncached", "not contiguous",
+ * "not page aligned" and "not cleared"
+ *
+ * Forcing alignment (usually) returns a pointer
+ * at an alignment boundary that is offset from
+ * the allocated pointer. Without storing this
+ * pointer/offset, we cannot free it. The memory
+ * manager is responsible for the bookkeeping, e.g.
+ * the allocation function creates a sentinel
+ * within the allocation referencable from the
+ * returned pointer/address.
+ */
+#define MMGR_ATTRIBUTE_MASK 0x000f
+#define MMGR_ATTRIBUTE_CACHED 0x0001
+#define MMGR_ATTRIBUTE_CONTIGUOUS 0x0002
+#define MMGR_ATTRIBUTE_PAGEALIGN 0x0004
+#define MMGR_ATTRIBUTE_CLEARED 0x0008
+#define MMGR_ATTRIBUTE_UNUSED 0xfff0
+
+/* #define MMGR_ATTRIBUTE_DEFAULT (MMGR_ATTRIBUTE_CACHED) */
+#define MMGR_ATTRIBUTE_DEFAULT 0
+
+extern const hrt_vaddress mmgr_NULL;
+extern const hrt_vaddress mmgr_EXCEPTION;
+
+/*! Return the address of an allocation in memory
+
+ \param size[in] Size in bytes of the allocation
+ \param caller_func[in] Caller function name
+ \param caller_line[in] Caller function line number
+
+ \return vaddress
+ */
+hrt_vaddress mmgr_malloc(const size_t size);
+
+/*! Return the address of a zero initialised allocation in memory
+
+ \param N[in] Horizontal dimension of array
+ \param size[in] Vertical dimension of array Total size is N*size
+
+ \return vaddress
+ */
+hrt_vaddress mmgr_calloc(const size_t N, const size_t size);
+
+/*! Return the address of an allocation in memory
+
+ \param size[in] Size in bytes of the allocation
+ \param attribute[in] Bit vector specifying the properties
+ of the allocation including zero initialisation
+
+ \return vaddress
+ */
+
+hrt_vaddress mmgr_alloc_attr(const size_t size, const uint16_t attribute);
+
+/*! Return the address of a mapped existing allocation in memory
+
+ \param ptr[in] Pointer to an allocation in a different
+ virtual memory page table, but the same
+ physical memory
+ \param size[in] Size of the memory of the pointer
+ \param attribute[in] Bit vector specifying the properties
+ of the allocation
+ \param context Pointer of a context provided by
+ client/driver for additional parameters
+ needed by the implementation
+ \Note
+ This interface is tentative, limited to the desired function
+ the actual interface may require furhter parameters
+
+ \return vaddress
+ */
+hrt_vaddress mmgr_mmap(
+ const void __user *ptr,
+ const size_t size,
+ u16 attribute,
+ void *context);
+
+/*! Zero initialise an allocation in memory
+
+ \param vaddr[in] Address of an allocation
+ \param size[in] Size in bytes of the area to be cleared
+
+ \return none
+ */
+void mmgr_clear(hrt_vaddress vaddr, const size_t size);
+
+/*! Read an array of bytes from a virtual memory address
+
+ \param vaddr[in] Address of an allocation
+ \param data[out] pointer to the destination array
+ \param size[in] number of bytes to read
+
+ \return none
+ */
+void mmgr_load(const hrt_vaddress vaddr, void *data, const size_t size);
+
+/*! Write an array of bytes to device registers or memory in the device
+
+ \param vaddr[in] Address of an allocation
+ \param data[in] pointer to the source array
+ \param size[in] number of bytes to write
+
+ \return none
+ */
+void mmgr_store(const hrt_vaddress vaddr, const void *data, const size_t size);
+
+#endif /* __MEMORY_ACCESS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_realloc.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_realloc.h
new file mode 100644
index 000000000000..546b93ca1186
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_realloc.h
@@ -0,0 +1,38 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#ifndef __MEMORY_REALLOC_H_INCLUDED__
+#define __MEMORY_REALLOC_H_INCLUDED__
+
+/*!
+ * \brief
+ * Define the internal reallocation of private css memory
+ *
+ */
+
+#include <type_support.h>
+/*
+ * User provided file that defines the (sub)system address types:
+ * - hrt_vaddress a type that can hold the (sub)system virtual address range
+ */
+#include "system_types.h"
+#include "ia_css_err.h"
+
+bool reallocate_buffer(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err);
+
+#endif /*__MEMORY_REALLOC_H_INCLUDED__*/
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h
new file mode 100644
index 000000000000..38db1ecef3c8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MISC_SUPPORT_H_INCLUDED__
+#define __MISC_SUPPORT_H_INCLUDED__
+
+/* suppress compiler warnings on unused variables */
+#ifndef NOT_USED
+#define NOT_USED(a) ((void)(a))
+#endif
+
+/* Calculate the total bytes for pow(2) byte alignment */
+#define tot_bytes_for_pow2_align(pow2, cur_bytes) ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1))
+
+#endif /* __MISC_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h
new file mode 100644
index 000000000000..1a36cb493fd8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MMU_DEVICE_H_INCLUDED__
+#define __MMU_DEVICE_H_INCLUDED__
+
+/* The file mmu.h already exists */
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the MMU device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "mmu_local.h"
+
+#include "mmu_public.h"
+
+#endif /* __MMU_DEVICE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h
new file mode 100644
index 000000000000..74335fdeff7d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_H_INCLUDED__
+#define __PIXELGEN_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "pixelgen_local.h"
+
+#ifndef __INLINE_PIXELGEN__
+#define STORAGE_CLASS_PIXELGEN_H extern
+#define STORAGE_CLASS_PIXELGEN_C
+#include "pixelgen_public.h"
+#else /* __INLINE_PIXELGEN__ */
+#define STORAGE_CLASS_PIXELGEN_H static inline
+#define STORAGE_CLASS_PIXELGEN_C static inline
+#include "pixelgen_private.h"
+#endif /* __INLINE_PIXELGEN__ */
+
+#endif /* __PIXELGEN_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h
new file mode 100644
index 000000000000..525c34882fd7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PLATFORM_SUPPORT_H_INCLUDED__
+#define __PLATFORM_SUPPORT_H_INCLUDED__
+
+/**
+* @file
+* Platform specific includes and functionality.
+*/
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/* For definition of hrt_sleep() */
+#include "hive_isp_css_custom_host_hrt.h"
+
+#define UINT16_MAX USHRT_MAX
+#define UINT32_MAX UINT_MAX
+#define UCHAR_MAX (255)
+
+#define CSS_ALIGN(d, a) d __attribute__((aligned(a)))
+
+#endif /* __PLATFORM_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
new file mode 100644
index 000000000000..f5fcf6b1d667
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PRINT_SUPPORT_H_INCLUDED__
+#define __PRINT_SUPPORT_H_INCLUDED__
+
+#include <stdarg.h>
+
+extern int (*sh_css_printf)(const char *fmt, va_list args);
+/* depends on host supplied print function in ia_css_init() */
+static inline void ia_css_print(const char *fmt, ...)
+{
+ va_list ap;
+
+ if (sh_css_printf) {
+ va_start(ap, fmt);
+ sh_css_printf(fmt, ap);
+ va_end(ap);
+ }
+}
+
+/* Start adding support for bxt tracing functions for poc. From
+ * bxt_sandbox/support/print_support.h. */
+/* TODO: support these macros in userspace. */
+#define PWARN(format, ...) ia_css_print("warning: ", ##__VA_ARGS__)
+#define PRINT(format, ...) ia_css_print(format, ##__VA_ARGS__)
+#define PERROR(format, ...) ia_css_print("error: " format, ##__VA_ARGS__)
+#define PDEBUG(format, ...) ia_css_print("debug: " format, ##__VA_ARGS__)
+
+#endif /* __PRINT_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h
new file mode 100644
index 000000000000..1bcadd838161
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __QUEUE_H_INCLUDED__
+#define __QUEUE_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and is system agnostic
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: cell specific interfaces
+ * - private: cell specific inline implementations
+ * - global: inter cell constants and identifiers
+ * - local: cell specific constants and identifiers
+ *
+ */
+
+#include "queue_local.h"
+
+#ifndef __INLINE_QUEUE__
+#define STORAGE_CLASS_QUEUE_H extern
+#define STORAGE_CLASS_QUEUE_C
+/* #include "queue_public.h" */
+#include "ia_css_queue.h"
+#else /* __INLINE_QUEUE__ */
+#define STORAGE_CLASS_QUEUE_H static inline
+#define STORAGE_CLASS_QUEUE_C static inline
+#include "queue_private.h"
+#endif /* __INLINE_QUEUE__ */
+
+#endif /* __QUEUE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h
new file mode 100644
index 000000000000..129446600067
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __RESOURCE_H_INCLUDED__
+#define __RESOURCE_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses a RESOURCE manager. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "system_local.h"
+#include "resource_local.h"
+
+#ifndef __INLINE_RESOURCE__
+#define STORAGE_CLASS_RESOURCE_H extern
+#define STORAGE_CLASS_RESOURCE_C
+#include "resource_public.h"
+#else /* __INLINE_RESOURCE__ */
+#define STORAGE_CLASS_RESOURCE_H static inline
+#define STORAGE_CLASS_RESOURCE_C static inline
+#include "resource_private.h"
+#endif /* __INLINE_RESOURCE__ */
+
+#endif /* __RESOURCE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h
new file mode 100644
index 000000000000..194cd64a7da8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_H_INCLUDED__
+#define __SP_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the SP cell. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "sp_local.h"
+
+#ifndef __INLINE_SP__
+#define STORAGE_CLASS_SP_H extern
+#define STORAGE_CLASS_SP_C
+#include "sp_public.h"
+#else /* __INLINE_SP__ */
+#define STORAGE_CLASS_SP_H static inline
+#define STORAGE_CLASS_SP_C static inline
+#include "sp_private.h"
+#endif /* __INLINE_SP__ */
+
+#endif /* __SP_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/string_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/string_support.h
new file mode 100644
index 000000000000..84efbbe78650
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/string_support.h
@@ -0,0 +1,165 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __STRING_SUPPORT_H_INCLUDED__
+#define __STRING_SUPPORT_H_INCLUDED__
+#include <platform_support.h>
+#include <type_support.h>
+
+/*
+ * For all non microsoft cases, we need the following functions
+ */
+
+/* @brief Copy from src_buf to dest_buf.
+ *
+ * @param[out] dest_buf. Destination buffer to copy to
+ * @param[in] dest_size. The size of the destination buffer in bytes
+ * @param[in] src_buf. The source buffer
+ * @param[in] src_size. The size of the source buffer in bytes
+ * @return 0 on success, error code on failure
+ * @return EINVAL on Invalid arguments
+ * @return ERANGE on Destination size too small
+ */
+static inline int memcpy_s(
+ void *dest_buf,
+ size_t dest_size,
+ const void *src_buf,
+ size_t src_size)
+{
+ if ((!src_buf) || (!dest_buf)) {
+ /* Invalid arguments*/
+ return EINVAL;
+ }
+
+ if ((dest_size < src_size) || (src_size == 0)) {
+ /* Destination too small*/
+ return ERANGE;
+ }
+
+ memcpy(dest_buf, src_buf, src_size);
+ return 0;
+}
+
+/* @brief Get the length of the string, excluding the null terminator
+ *
+ * @param[in] src_str. The source string
+ * @param[in] max_len. Look only for max_len bytes in the string
+ * @return Return the string length excluding null character
+ * @return Return max_len if no null character in the first max_len bytes
+ * @return Returns 0 if src_str is NULL
+ */
+static size_t strnlen_s(
+ const char *src_str,
+ size_t max_len)
+{
+ size_t ix;
+
+ if (!src_str) {
+ /* Invalid arguments*/
+ return 0;
+ }
+
+ for (ix = 0; ix < max_len && src_str[ix] != '\0'; ix++)
+ ;
+
+ /* On Error, it will return src_size == max_len*/
+ return ix;
+}
+
+/* @brief Copy string from src_str to dest_str
+ *
+ * @param[out] dest_str. Destination buffer to copy to
+ * @param[in] dest_size. The size of the destination buffer in bytes
+ * @param[in] src_str. The source buffer
+ * @param[in] src_size. The size of the source buffer in bytes
+ * @return Returns 0 on success
+ * @return Returns EINVAL on invalid arguments
+ * @return Returns ERANGE on destination size too small
+ */
+static inline int strncpy_s(
+ char *dest_str,
+ size_t dest_size,
+ const char *src_str,
+ size_t src_size)
+{
+ size_t len;
+
+ if (!dest_str) {
+ /* Invalid arguments*/
+ return EINVAL;
+ }
+
+ if ((!src_str) || (dest_size == 0)) {
+ /* Invalid arguments*/
+ dest_str[0] = '\0';
+ return EINVAL;
+ }
+
+ len = strnlen_s(src_str, src_size);
+
+ if (len >= dest_size) {
+ /* Destination too small*/
+ dest_str[0] = '\0';
+ return ERANGE;
+ }
+
+ /* dest_str is big enough for the len */
+ strncpy(dest_str, src_str, len);
+ dest_str[len] = '\0';
+ return 0;
+}
+
+/* @brief Copy string from src_str to dest_str
+ *
+ * @param[out] dest_str. Destination buffer to copy to
+ * @param[in] dest_size. The size of the destination buffer in bytes
+ * @param[in] src_str. The source buffer
+ * @return Returns 0 on success
+ * @return Returns EINVAL on invalid arguments
+ * @return Returns ERANGE on destination size too small
+ */
+static inline int strcpy_s(
+ char *dest_str,
+ size_t dest_size,
+ const char *src_str)
+{
+ size_t len;
+
+ if (!dest_str) {
+ /* Invalid arguments*/
+ return EINVAL;
+ }
+
+ if ((!src_str) || (dest_size == 0)) {
+ /* Invalid arguments*/
+ dest_str[0] = '\0';
+ return EINVAL;
+ }
+
+ len = strnlen_s(src_str, dest_size);
+
+ if (len >= dest_size) {
+ /* Destination too small*/
+ dest_str[0] = '\0';
+ return ERANGE;
+ }
+
+ /* dest_str is big enough for the len */
+ strncpy(dest_str, src_str, len);
+ dest_str[len] = '\0';
+ return 0;
+}
+
+
+#endif /* __STRING_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/system_types.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/system_types.h
new file mode 100644
index 000000000000..764fda8dd214
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/system_types.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef __SYSTEM_TYPES_H_INCLUDED__
+#define __SYSTEM_TYPES_H_INCLUDED__
+
+/**
+* @file
+* Platform specific types.
+*/
+
+#include "system_local.h"
+
+#endif /* __SYSTEM_TYPES_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h
new file mode 100644
index 000000000000..1f0a5d948316
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_H_INCLUDED__
+#define __TAG_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and is system agnostic
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: cell specific interfaces
+ * - private: cell specific inline implementations
+ * - global: inter cell constants and identifiers
+ * - local: cell specific constants and identifiers
+ *
+ */
+
+#include "tag_local.h"
+
+#ifndef __INLINE_TAG__
+#define STORAGE_CLASS_TAG_H extern
+#define STORAGE_CLASS_TAG_C
+#include "tag_public.h"
+#else /* __INLINE_TAG__ */
+#define STORAGE_CLASS_TAG_H static inline
+#define STORAGE_CLASS_TAG_C static inline
+#include "tag_private.h"
+#endif /* __INLINE_TAG__ */
+
+#endif /* __TAG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h
new file mode 100644
index 000000000000..403abcb828bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_H_INCLUDED__
+#define __TIMED_CTRL_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "timed_ctrl_local.h"
+
+#ifndef __INLINE_TIMED_CTRL__
+#define STORAGE_CLASS_TIMED_CTRL_H extern
+#define STORAGE_CLASS_TIMED_CTRL_C
+#include "timed_ctrl_public.h"
+#else /* __INLINE_TIMED_CTRL__ */
+#define STORAGE_CLASS_TIMED_CTRL_H static inline
+#define STORAGE_CLASS_TIMED_CTRL_C static inline
+#include "timed_ctrl_private.h"
+#endif /* __INLINE_TIMED_CTRL__ */
+
+#endif /* __TIMED_CTRL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h
new file mode 100644
index 000000000000..bc77537fa73a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TYPE_SUPPORT_H_INCLUDED__
+#define __TYPE_SUPPORT_H_INCLUDED__
+
+/**
+* @file
+* Platform specific types.
+*
+* Per the DLI spec, types are in "type_support.h" and
+* "platform_support.h" is for unclassified/to be refactored
+* platform specific definitions.
+*/
+
+#define IA_CSS_UINT8_T_BITS 8
+#define IA_CSS_UINT16_T_BITS 16
+#define IA_CSS_UINT32_T_BITS 32
+#define IA_CSS_INT32_T_BITS 32
+#define IA_CSS_UINT64_T_BITS 64
+
+#define CHAR_BIT (8)
+
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/errno.h>
+#define HOST_ADDRESS(x) (unsigned long)(x)
+
+#endif /* __TYPE_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h
new file mode 100644
index 000000000000..9918ca398138
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VAMEM_H_INCLUDED__
+#define __VAMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the VAMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "vamem_local.h"
+#include "vamem_public.h"
+
+#endif /* __VAMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h
new file mode 100644
index 000000000000..873e01e6d054
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_H_INCLUDED__
+#define __VMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the VMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "vmem_local.h"
+
+#ifndef __INLINE_VMEM__
+#define STORAGE_CLASS_VMEM_H extern
+#define STORAGE_CLASS_VMEM_C
+#include "vmem_public.h"
+#else /* __INLINE_VMEM__ */
+#define STORAGE_CLASS_VMEM_H static inline
+#define STORAGE_CLASS_VMEM_C static inline
+#include "vmem_private.h"
+#endif /* __INLINE_VMEM__ */
+
+#endif /* __VMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h
new file mode 100644
index 000000000000..9f4060319b4b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __QUEUE_LOCAL_H_INCLUDED__
+#define __QUEUE_LOCAL_H_INCLUDED__
+
+#include "queue_global.h"
+
+#endif /* __QUEUE_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h
new file mode 100644
index 000000000000..2b396955cdad
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h
@@ -0,0 +1,18 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __QUEUE_PRIVATE_H_INCLUDED__
+#define __QUEUE_PRIVATE_H_INCLUDED__
+
+#endif /* __QUEUE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c
new file mode 100644
index 000000000000..a7089ee7462a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c
@@ -0,0 +1,91 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "tag.h"
+#include <platform_support.h> /* NULL */
+#include <assert_support.h>
+#include "tag_local.h"
+
+/*
+ * @brief Creates the tag description from the given parameters.
+ * @param[in] num_captures
+ * @param[in] skip
+ * @param[in] offset
+ * @param[out] tag_descr
+ */
+void
+sh_css_create_tag_descr(int num_captures,
+ unsigned int skip,
+ int offset,
+ unsigned int exp_id,
+ struct sh_css_tag_descr *tag_descr)
+{
+ assert(tag_descr);
+
+ tag_descr->num_captures = num_captures;
+ tag_descr->skip = skip;
+ tag_descr->offset = offset;
+ tag_descr->exp_id = exp_id;
+}
+
+/*
+ * @brief Encodes the members of tag description into a 32-bit value.
+ * @param[in] tag Pointer to the tag description
+ * @return (unsigned int) Encoded 32-bit tag-info
+ */
+unsigned int
+sh_css_encode_tag_descr(struct sh_css_tag_descr *tag)
+{
+ int num_captures;
+ unsigned int num_captures_sign;
+ unsigned int skip;
+ int offset;
+ unsigned int offset_sign;
+ unsigned int exp_id;
+ unsigned int encoded_tag;
+
+ assert(tag);
+
+ if (tag->num_captures < 0) {
+ num_captures = -tag->num_captures;
+ num_captures_sign = 1;
+ } else {
+ num_captures = tag->num_captures;
+ num_captures_sign = 0;
+ }
+ skip = tag->skip;
+ if (tag->offset < 0) {
+ offset = -tag->offset;
+ offset_sign = 1;
+ } else {
+ offset = tag->offset;
+ offset_sign = 0;
+ }
+ exp_id = tag->exp_id;
+
+ if (exp_id != 0) {
+ /* we encode either an exp_id or capture data */
+ assert((num_captures == 0) && (skip == 0) && (offset == 0));
+
+ encoded_tag = TAG_EXP | (exp_id & 0xFF) << TAG_EXP_ID_SHIFT;
+ } else {
+ encoded_tag = TAG_CAP
+ | ((num_captures_sign & 0x00000001) << TAG_NUM_CAPTURES_SIGN_SHIFT)
+ | ((offset_sign & 0x00000001) << TAG_OFFSET_SIGN_SHIFT)
+ | ((num_captures & 0x000000FF) << TAG_NUM_CAPTURES_SHIFT)
+ | ((skip & 0x000000FF) << TAG_OFFSET_SHIFT)
+ | ((offset & 0x000000FF) << TAG_SKIP_SHIFT);
+ }
+ return encoded_tag;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h
new file mode 100644
index 000000000000..01a8977c189e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_LOCAL_H_INCLUDED__
+#define __TAG_LOCAL_H_INCLUDED__
+
+#include "tag_global.h"
+
+#define SH_CSS_MINIMUM_TAG_ID (-1)
+
+#endif /* __TAG_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h
new file mode 100644
index 000000000000..0570a95ec5bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h
@@ -0,0 +1,18 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_PRIVATE_H_INCLUDED__
+#define __TAG_PRIVATE_H_INCLUDED__
+
+#endif /* __TAG_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h
new file mode 100644
index 000000000000..ce0d99418538
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h
@@ -0,0 +1,18 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __QUEUE_GLOBAL_H_INCLUDED__
+#define __QUEUE_GLOBAL_H_INCLUDED__
+
+#endif /* __QUEUE_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h
new file mode 100644
index 000000000000..549c0d2b7970
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SW_EVENT_GLOBAL_H_INCLUDED__
+#define __SW_EVENT_GLOBAL_H_INCLUDED__
+
+#define MAX_NR_OF_PAYLOADS_PER_SW_EVENT 4
+
+enum ia_css_psys_sw_event {
+ IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED, /* from host to SP */
+ IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED, /* from SP to host */
+ IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, /* from SP to host, one way only */
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ IA_CSS_PSYS_SW_EVENT_STOP_STREAM,
+ IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY,
+ IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER,
+ IA_CSS_PSYS_SW_EVENT_STAGE_ENABLE_DISABLE /* for extension state change enable/disable */
+};
+
+enum ia_css_isys_sw_event {
+ IA_CSS_ISYS_SW_EVENT_EVENT_DEQUEUED
+};
+
+#endif /* __SW_EVENT_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h
new file mode 100644
index 000000000000..9db8766b3a7b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h
@@ -0,0 +1,56 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_GLOBAL_H_INCLUDED__
+#define __TAG_GLOBAL_H_INCLUDED__
+
+/* offsets for encoding/decoding the tag into an uint32_t */
+
+#define TAG_CAP 1
+#define TAG_EXP 2
+
+#define TAG_NUM_CAPTURES_SIGN_SHIFT 6
+#define TAG_OFFSET_SIGN_SHIFT 7
+#define TAG_NUM_CAPTURES_SHIFT 8
+#define TAG_OFFSET_SHIFT 16
+#define TAG_SKIP_SHIFT 24
+
+#define TAG_EXP_ID_SHIFT 8
+
+/* Data structure containing the tagging information which is used in
+ * continuous mode to specify which frames should be captured.
+ * num_captures The number of RAW frames to be processed to
+ * YUV. Setting this to -1 will make continuous
+ * capture run until it is stopped.
+ * skip Skip N frames in between captures. This can be
+ * used to select a slower capture frame rate than
+ * the sensor output frame rate.
+ * offset Start the RAW-to-YUV processing at RAW buffer
+ * with this offset. This allows the user to
+ * process RAW frames that were captured in the
+ * past or future.
+ * exp_id Exposure id of the RAW frame to tag.
+ *
+ * NOTE: Either exp_id = 0 or all other fields are 0
+ * (so yeah, this could be a union)
+ */
+
+struct sh_css_tag_descr {
+ int num_captures;
+ unsigned int skip;
+ int offset;
+ unsigned int exp_id;
+};
+
+#endif /* __TAG_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h b/drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h
new file mode 100644
index 000000000000..a22b771f61f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_streaming_to_mipi_types_hrt_h_
+#define _hive_isp_css_streaming_to_mipi_types_hrt_h_
+
+#include <streaming_to_mipi_defs.h>
+
+#define _HIVE_ISP_CH_ID_MASK ((1U << HIVE_ISP_CH_ID_BITS) - 1)
+#define _HIVE_ISP_FMT_TYPE_MASK ((1U << HIVE_ISP_FMT_TYPE_BITS) - 1)
+
+#define _HIVE_STR_TO_MIPI_FMT_TYPE_LSB (HIVE_STR_TO_MIPI_CH_ID_LSB + HIVE_ISP_CH_ID_BITS)
+#define _HIVE_STR_TO_MIPI_DATA_B_LSB (HIVE_STR_TO_MIPI_DATA_A_LSB + HIVE_IF_PIXEL_WIDTH)
+
+#endif /* _hive_isp_css_streaming_to_mipi_types_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/hive_types.h b/drivers/staging/media/atomisp/pci/hive_types.h
new file mode 100644
index 000000000000..9715893c8a36
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_types.h
@@ -0,0 +1,128 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_HIVE_TYPES_H
+#define _HRT_HIVE_TYPES_H
+
+#include "version.h"
+#include "defs.h"
+
+#ifndef HRTCAT3
+#define _HRTCAT3(m, n, o) m##n##o
+#define HRTCAT3(m, n, o) _HRTCAT3(m, n, o)
+#endif
+
+#ifndef HRTCAT4
+#define _HRTCAT4(m, n, o, p) m##n##o##p
+#define HRTCAT4(m, n, o, p) _HRTCAT4(m, n, o, p)
+#endif
+
+#ifndef HRTMIN
+#define HRTMIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef HRTMAX
+#define HRTMAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+/* boolean data type */
+typedef unsigned int hive_bool;
+#define hive_false 0
+#define hive_true 1
+
+typedef char hive_int8;
+typedef short hive_int16;
+typedef int hive_int32;
+typedef long long hive_int64;
+
+typedef unsigned char hive_uint8;
+typedef unsigned short hive_uint16;
+typedef unsigned int hive_uint32;
+typedef unsigned long long hive_uint64;
+
+/* by default assume 32 bit master port (both data and address) */
+#ifndef HRT_DATA_WIDTH
+#define HRT_DATA_WIDTH 32
+#endif
+#ifndef HRT_ADDRESS_WIDTH
+#define HRT_ADDRESS_WIDTH 32
+#endif
+
+#define HRT_DATA_BYTES (HRT_DATA_WIDTH / 8)
+#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH / 8)
+
+#if HRT_DATA_WIDTH == 64
+typedef hive_uint64 hrt_data;
+#elif HRT_DATA_WIDTH == 32
+typedef hive_uint32 hrt_data;
+#else
+#error data width not supported
+#endif
+
+#if HRT_ADDRESS_WIDTH == 64
+typedef hive_uint64 hrt_address;
+#elif HRT_ADDRESS_WIDTH == 32
+typedef hive_uint32 hrt_address;
+#else
+#error adddres width not supported
+#endif
+
+/* The SP side representation of an HMM virtual address */
+typedef hive_uint32 hrt_vaddress;
+
+/* use 64 bit addresses in simulation, where possible */
+typedef hive_uint64 hive_sim_address;
+
+/* below is for csim, not for hrt, rename and move this elsewhere */
+
+typedef unsigned int hive_uint;
+typedef hive_uint32 hive_address;
+typedef hive_address hive_slave_address;
+typedef hive_address hive_mem_address;
+
+/* MMIO devices */
+typedef hive_uint hive_mmio_id;
+typedef hive_mmio_id hive_slave_id;
+typedef hive_mmio_id hive_port_id;
+typedef hive_mmio_id hive_master_id;
+typedef hive_mmio_id hive_mem_id;
+typedef hive_mmio_id hive_dev_id;
+typedef hive_mmio_id hive_fifo_id;
+
+typedef hive_uint hive_hier_id;
+typedef hive_hier_id hive_device_id;
+typedef hive_device_id hive_proc_id;
+typedef hive_device_id hive_cell_id;
+typedef hive_device_id hive_host_id;
+typedef hive_device_id hive_bus_id;
+typedef hive_device_id hive_bridge_id;
+typedef hive_device_id hive_fifo_adapter_id;
+typedef hive_device_id hive_custom_device_id;
+
+typedef hive_uint hive_slot_id;
+typedef hive_uint hive_fu_id;
+typedef hive_uint hive_reg_file_id;
+typedef hive_uint hive_reg_id;
+
+/* Streaming devices */
+typedef hive_uint hive_outport_id;
+typedef hive_uint hive_inport_id;
+
+typedef hive_uint hive_msink_id;
+
+/* HRT specific */
+typedef char *hive_program;
+typedef char *hive_function;
+
+#endif /* _HRT_HIVE_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
new file mode 100644
index 000000000000..cd70307ffd57
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -0,0 +1,733 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+/*
+ * This file contains entry functions for memory management of ISP driver
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/highmem.h> /* for kmap */
+#include <linux/io.h> /* for page_to_phys */
+#include <linux/sysfs.h>
+
+#include "hmm/hmm.h"
+#include "hmm/hmm_pool.h"
+#include "hmm/hmm_bo.h"
+
+#include "atomisp_internal.h"
+#include "asm/cacheflush.h"
+#include "mmu/isp_mmu.h"
+#include "mmu/sh_mmu_mrfld.h"
+
+struct hmm_bo_device bo_device;
+struct hmm_pool dynamic_pool;
+struct hmm_pool reserved_pool;
+static ia_css_ptr dummy_ptr;
+static bool hmm_initialized;
+struct _hmm_mem_stat hmm_mem_stat;
+
+/*
+ * p: private
+ * s: shared
+ * u: user
+ * i: ion
+ */
+static const char hmm_bo_type_string[] = "psui";
+
+static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
+ char *buf, struct list_head *bo_list, bool active)
+{
+ ssize_t ret = 0;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+ int i;
+ long total[HMM_BO_LAST] = { 0 };
+ long count[HMM_BO_LAST] = { 0 };
+ int index1 = 0;
+ int index2 = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
+ if (ret <= 0)
+ return 0;
+
+ index1 += ret;
+
+ spin_lock_irqsave(&bo_device.list_lock, flags);
+ list_for_each_entry(bo, bo_list, list) {
+ if ((active && (bo->status & HMM_BO_ALLOCED)) ||
+ (!active && !(bo->status & HMM_BO_ALLOCED))) {
+ ret = scnprintf(buf + index1, PAGE_SIZE - index1,
+ "%c %d\n",
+ hmm_bo_type_string[bo->type], bo->pgnr);
+
+ total[bo->type] += bo->pgnr;
+ count[bo->type]++;
+ if (ret > 0)
+ index1 += ret;
+ }
+ }
+ spin_unlock_irqrestore(&bo_device.list_lock, flags);
+
+ for (i = 0; i < HMM_BO_LAST; i++) {
+ if (count[i]) {
+ ret = scnprintf(buf + index1 + index2,
+ PAGE_SIZE - index1 - index2,
+ "%ld %c buffer objects: %ld KB\n",
+ count[i], hmm_bo_type_string[i],
+ total[i] * 4);
+ if (ret > 0)
+ index2 += ret;
+ }
+ }
+
+ /* Add trailing zero, not included by scnprintf */
+ return index1 + index2 + 1;
+}
+
+static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
+}
+
+static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
+}
+
+static ssize_t reserved_pool_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret = 0;
+
+ struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
+ unsigned long flags;
+
+ if (!pinfo || !pinfo->initialized)
+ return 0;
+
+ spin_lock_irqsave(&pinfo->list_lock, flags);
+ ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
+ pinfo->index, pinfo->pgnr);
+ spin_unlock_irqrestore(&pinfo->list_lock, flags);
+
+ if (ret > 0)
+ ret++; /* Add trailing zero, not included by scnprintf */
+
+ return ret;
+};
+
+static ssize_t dynamic_pool_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret = 0;
+
+ struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
+ unsigned long flags;
+
+ if (!pinfo || !pinfo->initialized)
+ return 0;
+
+ spin_lock_irqsave(&pinfo->list_lock, flags);
+ ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
+ pinfo->pgnr, pinfo->pool_size);
+ spin_unlock_irqrestore(&pinfo->list_lock, flags);
+
+ if (ret > 0)
+ ret++; /* Add trailing zero, not included by scnprintf */
+
+ return ret;
+};
+
+static DEVICE_ATTR_RO(active_bo);
+static DEVICE_ATTR_RO(free_bo);
+static DEVICE_ATTR_RO(reserved_pool);
+static DEVICE_ATTR_RO(dynamic_pool);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &dev_attr_active_bo.attr,
+ &dev_attr_free_bo.attr,
+ &dev_attr_reserved_pool.attr,
+ &dev_attr_dynamic_pool.attr,
+ NULL
+};
+
+static struct attribute_group atomisp_attribute_group[] = {
+ {.attrs = sysfs_attrs_ctrl },
+};
+
+int hmm_init(void)
+{
+ int ret;
+
+ ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
+ ISP_VM_START, ISP_VM_SIZE);
+ if (ret)
+ dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
+
+ hmm_initialized = true;
+
+ /*
+ * As hmm use NULL to indicate invalid ISP virtual address,
+ * and ISP_VM_START is defined to 0 too, so we allocate
+ * one piece of dummy memory, which should return value 0,
+ * at the beginning, to avoid hmm_alloc return 0 in the
+ * further allocation.
+ */
+ dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, HMM_UNCACHED);
+
+ if (!ret) {
+ ret = sysfs_create_group(&atomisp_dev->kobj,
+ atomisp_attribute_group);
+ if (ret)
+ dev_err(atomisp_dev,
+ "%s Failed to create sysfs\n", __func__);
+ }
+
+ return ret;
+}
+
+void hmm_cleanup(void)
+{
+ sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
+
+ /* free dummy memory first */
+ hmm_free(dummy_ptr);
+ dummy_ptr = 0;
+
+ hmm_bo_device_exit(&bo_device);
+ hmm_initialized = false;
+}
+
+ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
+ int from_highmem, const void __user *userptr, bool cached)
+{
+ unsigned int pgnr;
+ struct hmm_buffer_object *bo;
+ int ret;
+
+ /*
+ * Check if we are initialized. In the ideal world we wouldn't need
+ * this but we can tackle it once the driver is a lot cleaner
+ */
+
+ if (!hmm_initialized)
+ hmm_init();
+ /* Get page number from size */
+ pgnr = size_to_pgnr_ceil(bytes);
+
+ /* Buffer object structure init */
+ bo = hmm_bo_alloc(&bo_device, pgnr);
+ if (!bo) {
+ dev_err(atomisp_dev, "hmm_bo_create failed.\n");
+ goto create_bo_err;
+ }
+
+ /* Allocate pages for memory */
+ ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
+ goto alloc_page_err;
+ }
+
+ /* Combind the virtual address and pages togather */
+ ret = hmm_bo_bind(bo);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
+ goto bind_err;
+ }
+
+ hmm_mem_stat.tol_cnt += pgnr;
+
+ return bo->start;
+
+bind_err:
+ hmm_bo_free_pages(bo);
+alloc_page_err:
+ hmm_bo_unref(bo);
+create_bo_err:
+ return 0;
+}
+
+void hmm_free(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ WARN_ON(!virt);
+
+ bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
+
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object start with address 0x%x\n",
+ (unsigned int)virt);
+ return;
+ }
+
+ hmm_mem_stat.tol_cnt -= bo->pgnr;
+
+ hmm_bo_unbind(bo);
+ hmm_bo_free_pages(bo);
+ hmm_bo_unref(bo);
+}
+
+static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
+{
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ ptr);
+ return -EINVAL;
+ }
+
+ if (!hmm_bo_page_allocated(bo)) {
+ dev_err(atomisp_dev,
+ "buffer object has no page allocated.\n");
+ return -EINVAL;
+ }
+
+ if (!hmm_bo_allocated(bo)) {
+ dev_err(atomisp_dev,
+ "buffer object has no virtual address space allocated.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Read function in ISP memory management */
+static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
+ unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *src, *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ des = (char *)data;
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ src = (char *)kmap(bo->page_obj[idx].page) + offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len; /* update virt for next loop */
+
+ if (des) {
+ memcpy(des, src, len);
+ des += len;
+ }
+
+ clflush_cache_range(src, len);
+
+ kunmap(bo->page_obj[idx].page);
+ }
+
+ return 0;
+}
+
+/* Read function in ISP memory management */
+static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *src = bo->vmap_addr;
+
+ src += (virt - bo->start);
+ memcpy(data, src, bytes);
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(src, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (!vptr)
+ return load_and_flush_by_kmap(virt, data, bytes);
+ else
+ vptr = vptr + (virt - bo->start);
+
+ memcpy(data, vptr, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ }
+
+ return 0;
+}
+
+/* Read function in ISP memory management */
+int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ if (!data) {
+ dev_err(atomisp_dev,
+ "hmm_load NULL argument\n");
+ return -EINVAL;
+ }
+ return load_and_flush(virt, data, bytes);
+}
+
+/* Flush hmm data from the data cache */
+int hmm_flush(ia_css_ptr virt, unsigned int bytes)
+{
+ return load_and_flush(virt, NULL, bytes);
+}
+
+/* Write function in ISP memory management */
+int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *src, *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *dst = bo->vmap_addr;
+
+ dst += (virt - bo->start);
+ memcpy(dst, data, bytes);
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(dst, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (vptr) {
+ vptr = vptr + (virt - bo->start);
+
+ memcpy(vptr, data, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ return 0;
+ }
+ }
+
+ src = (char *)data;
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ if (in_atomic())
+ des = (char *)kmap_atomic(bo->page_obj[idx].page);
+ else
+ des = (char *)kmap(bo->page_obj[idx].page);
+
+ if (!des) {
+ dev_err(atomisp_dev,
+ "kmap buffer object page failed: pg_idx = %d\n",
+ idx);
+ return -EINVAL;
+ }
+
+ des += offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len;
+
+ memcpy(des, src, len);
+
+ src += len;
+
+ clflush_cache_range(des, len);
+
+ if (in_atomic())
+ /*
+ * Note: kunmap_atomic requires return addr from
+ * kmap_atomic, not the page. See linux/highmem.h
+ */
+ kunmap_atomic(des - offset);
+ else
+ kunmap(bo->page_obj[idx].page);
+ }
+
+ return 0;
+}
+
+/* memset function in ISP memory management */
+int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *dst = bo->vmap_addr;
+
+ dst += (virt - bo->start);
+ memset(dst, c, bytes);
+
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(dst, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (vptr) {
+ vptr = vptr + (virt - bo->start);
+ memset(vptr, c, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ return 0;
+ }
+ }
+
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ des = (char *)kmap(bo->page_obj[idx].page) + offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len;
+
+ memset(des, c, len);
+
+ clflush_cache_range(des, len);
+
+ kunmap(bo->page_obj[idx].page);
+ }
+
+ return 0;
+}
+
+/* Virtual address to physical address convert */
+phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
+{
+ unsigned int idx, offset;
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return -1;
+ }
+
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ return page_to_phys(bo->page_obj[idx].page) + offset;
+}
+
+int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_start(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object start with address 0x%x\n",
+ virt);
+ return -EINVAL;
+ }
+
+ return hmm_bo_mmap(vma, bo);
+}
+
+/* Map ISP virtual address into IA virtual address */
+void *hmm_vmap(ia_css_ptr virt, bool cached)
+{
+ struct hmm_buffer_object *bo;
+ void *ptr;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return NULL;
+ }
+
+ ptr = hmm_bo_vmap(bo, cached);
+ if (ptr)
+ return ptr + (virt - bo->start);
+ else
+ return NULL;
+}
+
+/* Flush the memory which is mapped as cached memory through hmm_vmap */
+void hmm_flush_vmap(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_warn(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return;
+ }
+
+ hmm_bo_flush_vmap(bo);
+}
+
+void hmm_vunmap(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_warn(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return;
+ }
+
+ hmm_bo_vunmap(bo);
+}
+
+int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type)
+{
+#if 0 // Just use the "normal" pool
+ switch (pool_type) {
+ case HMM_POOL_TYPE_RESERVED:
+ reserved_pool.pops = &reserved_pops;
+ return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
+ pool_size);
+ case HMM_POOL_TYPE_DYNAMIC:
+ dynamic_pool.pops = &dynamic_pops;
+ return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
+ pool_size);
+ default:
+ dev_err(atomisp_dev, "invalid pool type.\n");
+ return -EINVAL;
+ }
+#else
+ return 0;
+#endif
+}
+
+void hmm_pool_unregister(enum hmm_pool_type pool_type)
+{
+#if 0 // Just use the "normal" pool
+ switch (pool_type) {
+ case HMM_POOL_TYPE_RESERVED:
+ if (reserved_pool.pops && reserved_pool.pops->pool_exit)
+ reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
+ break;
+ case HMM_POOL_TYPE_DYNAMIC:
+ if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
+ dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
+ break;
+ default:
+ dev_err(atomisp_dev, "invalid pool type.\n");
+ break;
+ }
+#endif
+
+ return;
+}
+
+void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
+{
+ return hmm_vmap(ptr, cached);
+ /* vmunmap will be done in hmm_bo_release() */
+}
+
+ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
+ if (bo)
+ return bo->start;
+
+ dev_err(atomisp_dev,
+ "can not find buffer object whose kernel virtual address is %p\n",
+ ptr);
+ return 0;
+}
+
+void hmm_show_mem_stat(const char *func, const int line)
+{
+ trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
+ hmm_mem_stat.tol_cnt,
+ hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
+ hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
+ hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
+}
+
+void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
+{
+ hmm_mem_stat.res_size = res_pgnr;
+ /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
+ if (hmm_mem_stat.res_size == 0) {
+ hmm_mem_stat.res_size = -1;
+ hmm_mem_stat.res_cnt = -1;
+ }
+
+ /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
+ if (!dyc_en) {
+ hmm_mem_stat.dyc_size = -1;
+ hmm_mem_stat.dyc_thr = -1;
+ } else {
+ hmm_mem_stat.dyc_size = 0;
+ hmm_mem_stat.dyc_thr = dyc_pgnr;
+ }
+ hmm_mem_stat.usr_size = 0;
+ hmm_mem_stat.sys_size = 0;
+ hmm_mem_stat.tol_cnt = 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
new file mode 100644
index 000000000000..492b76c29490
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -0,0 +1,1511 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+/*
+ * This file contains functions for buffer object structure management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gfp.h> /* for GFP_ATOMIC */
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/hugetlb.h>
+#include <linux/highmem.h>
+#include <linux/slab.h> /* for kmalloc */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/current.h>
+#include <linux/sched/signal.h>
+#include <linux/file.h>
+
+#include <asm/set_memory.h>
+
+#include "atomisp_internal.h"
+#include "hmm/hmm_common.h"
+#include "hmm/hmm_pool.h"
+#include "hmm/hmm_bo.h"
+
+static unsigned int order_to_nr(unsigned int order)
+{
+ return 1U << order;
+}
+
+static unsigned int nr_to_order_bottom(unsigned int nr)
+{
+ return fls(nr) - 1;
+}
+
+static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
+ unsigned int pgnr)
+{
+ check_bodev_null_return(bdev, -EINVAL);
+ var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
+ "hmm_bo_device not inited yet.\n");
+ /* prevent zero size buffer object */
+ if (pgnr == 0) {
+ dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
+ return -EINVAL;
+ }
+
+ memset(bo, 0, sizeof(*bo));
+ mutex_init(&bo->mutex);
+
+ /* init the bo->list HEAD as an element of entire_bo_list */
+ INIT_LIST_HEAD(&bo->list);
+
+ bo->bdev = bdev;
+ bo->vmap_addr = NULL;
+ bo->status = HMM_BO_FREE;
+ bo->start = bdev->start;
+ bo->pgnr = pgnr;
+ bo->end = bo->start + pgnr_to_size(pgnr);
+ bo->prev = NULL;
+ bo->next = NULL;
+
+ return 0;
+}
+
+static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
+ struct rb_node *node, unsigned int pgnr)
+{
+ struct hmm_buffer_object *this, *ret_bo, *temp_bo;
+
+ this = rb_entry(node, struct hmm_buffer_object, node);
+ if (this->pgnr == pgnr ||
+ (this->pgnr > pgnr && !this->node.rb_left)) {
+ goto remove_bo_and_return;
+ } else {
+ if (this->pgnr < pgnr) {
+ if (!this->node.rb_right)
+ return NULL;
+ ret_bo = __bo_search_and_remove_from_free_rbtree(
+ this->node.rb_right, pgnr);
+ } else {
+ ret_bo = __bo_search_and_remove_from_free_rbtree(
+ this->node.rb_left, pgnr);
+ }
+ if (!ret_bo) {
+ if (this->pgnr > pgnr)
+ goto remove_bo_and_return;
+ else
+ return NULL;
+ }
+ return ret_bo;
+ }
+
+remove_bo_and_return:
+ /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
+ * 1. check if 'this->next' is NULL:
+ * yes: erase 'this' node and rebalance rbtree, return 'this'.
+ */
+ if (!this->next) {
+ rb_erase(&this->node, &this->bdev->free_rbtree);
+ return this;
+ }
+ /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
+ * 2. check if 'this->next->next' is NULL:
+ * yes: change the related 'next/prev' pointer,
+ * return 'this->next' but the rbtree stays unchanged.
+ */
+ temp_bo = this->next;
+ this->next = temp_bo->next;
+ if (temp_bo->next)
+ temp_bo->next->prev = this;
+ temp_bo->next = NULL;
+ temp_bo->prev = NULL;
+ return temp_bo;
+}
+
+static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
+ ia_css_ptr start)
+{
+ struct rb_node *n = root->rb_node;
+ struct hmm_buffer_object *bo;
+
+ do {
+ bo = rb_entry(n, struct hmm_buffer_object, node);
+
+ if (bo->start > start) {
+ if (!n->rb_left)
+ return NULL;
+ n = n->rb_left;
+ } else if (bo->start < start) {
+ if (!n->rb_right)
+ return NULL;
+ n = n->rb_right;
+ } else {
+ return bo;
+ }
+ } while (n);
+
+ return NULL;
+}
+
+static struct hmm_buffer_object *__bo_search_by_addr_in_range(
+ struct rb_root *root, unsigned int start)
+{
+ struct rb_node *n = root->rb_node;
+ struct hmm_buffer_object *bo;
+
+ do {
+ bo = rb_entry(n, struct hmm_buffer_object, node);
+
+ if (bo->start > start) {
+ if (!n->rb_left)
+ return NULL;
+ n = n->rb_left;
+ } else {
+ if (bo->end > start)
+ return bo;
+ if (!n->rb_right)
+ return NULL;
+ n = n->rb_right;
+ }
+ } while (n);
+
+ return NULL;
+}
+
+static void __bo_insert_to_free_rbtree(struct rb_root *root,
+ struct hmm_buffer_object *bo)
+{
+ struct rb_node **new = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hmm_buffer_object *this;
+ unsigned int pgnr = bo->pgnr;
+
+ while (*new) {
+ parent = *new;
+ this = container_of(*new, struct hmm_buffer_object, node);
+
+ if (pgnr < this->pgnr) {
+ new = &((*new)->rb_left);
+ } else if (pgnr > this->pgnr) {
+ new = &((*new)->rb_right);
+ } else {
+ bo->prev = this;
+ bo->next = this->next;
+ if (this->next)
+ this->next->prev = bo;
+ this->next = bo;
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
+ return;
+ }
+ }
+
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
+
+ rb_link_node(&bo->node, parent, new);
+ rb_insert_color(&bo->node, root);
+}
+
+static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
+ struct hmm_buffer_object *bo)
+{
+ struct rb_node **new = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hmm_buffer_object *this;
+ unsigned int start = bo->start;
+
+ while (*new) {
+ parent = *new;
+ this = container_of(*new, struct hmm_buffer_object, node);
+
+ if (start < this->start)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ kref_init(&bo->kref);
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
+
+ rb_link_node(&bo->node, parent, new);
+ rb_insert_color(&bo->node, root);
+}
+
+static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
+ struct hmm_buffer_object *bo,
+ unsigned int pgnr)
+{
+ struct hmm_buffer_object *new_bo;
+ unsigned long flags;
+ int ret;
+
+ new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
+ if (!new_bo) {
+ dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
+ return NULL;
+ }
+ ret = __bo_init(bdev, new_bo, pgnr);
+ if (ret) {
+ dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
+ kmem_cache_free(bdev->bo_cache, new_bo);
+ return NULL;
+ }
+
+ new_bo->start = bo->start;
+ new_bo->end = new_bo->start + pgnr_to_size(pgnr);
+ bo->start = new_bo->end;
+ bo->pgnr = bo->pgnr - pgnr;
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_add_tail(&new_bo->list, &bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ return new_bo;
+}
+
+static void __bo_take_off_handling(struct hmm_buffer_object *bo)
+{
+ struct hmm_bo_device *bdev = bo->bdev;
+ /* There are 4 situations when we take off a known bo from free rbtree:
+ * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
+ * and does not have a linked list after bo, to take off this bo,
+ * we just need erase bo directly and rebalance the free rbtree
+ */
+ if (!bo->prev && !bo->next) {
+ rb_erase(&bo->node, &bdev->free_rbtree);
+ /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
+ * and has a linked list,to take off this bo we need erase bo
+ * first, then, insert bo->next into free rbtree and rebalance
+ * the free rbtree
+ */
+ } else if (!bo->prev && bo->next) {
+ bo->next->prev = NULL;
+ rb_erase(&bo->node, &bdev->free_rbtree);
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
+ bo->next = NULL;
+ /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
+ * node, bo is the last element of the linked list after rbtree
+ * node, to take off this bo, we just need set the "prev/next"
+ * pointers to NULL, the free rbtree stays unchaged
+ */
+ } else if (bo->prev && !bo->next) {
+ bo->prev->next = NULL;
+ bo->prev = NULL;
+ /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
+ * node, bo is in the middle of the linked list after rbtree node,
+ * to take off this bo, we just set take the "prev/next" pointers
+ * to NULL, the free rbtree stays unchaged
+ */
+ } else if (bo->prev && bo->next) {
+ bo->next->prev = bo->prev;
+ bo->prev->next = bo->next;
+ bo->next = NULL;
+ bo->prev = NULL;
+ }
+}
+
+static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
+ struct hmm_buffer_object *next_bo)
+{
+ struct hmm_bo_device *bdev;
+ unsigned long flags;
+
+ bdev = bo->bdev;
+ next_bo->start = bo->start;
+ next_bo->pgnr = next_bo->pgnr + bo->pgnr;
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ kmem_cache_free(bo->bdev->bo_cache, bo);
+
+ return next_bo;
+}
+
+/*
+ * hmm_bo_device functions.
+ */
+int hmm_bo_device_init(struct hmm_bo_device *bdev,
+ struct isp_mmu_client *mmu_driver,
+ unsigned int vaddr_start,
+ unsigned int size)
+{
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+ int ret;
+
+ check_bodev_null_return(bdev, -EINVAL);
+
+ ret = isp_mmu_init(&bdev->mmu, mmu_driver);
+ if (ret) {
+ dev_err(atomisp_dev, "isp_mmu_init failed.\n");
+ return ret;
+ }
+
+ bdev->start = vaddr_start;
+ bdev->pgnr = size_to_pgnr_ceil(size);
+ bdev->size = pgnr_to_size(bdev->pgnr);
+
+ spin_lock_init(&bdev->list_lock);
+ mutex_init(&bdev->rbtree_mutex);
+
+ bdev->flag = HMM_BO_DEVICE_INITED;
+
+ INIT_LIST_HEAD(&bdev->entire_bo_list);
+ bdev->allocated_rbtree = RB_ROOT;
+ bdev->free_rbtree = RB_ROOT;
+
+ bdev->bo_cache = kmem_cache_create("bo_cache",
+ sizeof(struct hmm_buffer_object), 0, 0, NULL);
+ if (!bdev->bo_cache) {
+ dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
+ isp_mmu_exit(&bdev->mmu);
+ return -ENOMEM;
+ }
+
+ bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
+ if (!bo) {
+ dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
+ isp_mmu_exit(&bdev->mmu);
+ return -ENOMEM;
+ }
+
+ ret = __bo_init(bdev, bo, bdev->pgnr);
+ if (ret) {
+ dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
+ kmem_cache_free(bdev->bo_cache, bo);
+ isp_mmu_exit(&bdev->mmu);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_add_tail(&bo->list, &bdev->entire_bo_list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ return 0;
+}
+
+struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
+ unsigned int pgnr)
+{
+ struct hmm_buffer_object *bo, *new_bo;
+ struct rb_root *root = &bdev->free_rbtree;
+
+ check_bodev_null_return(bdev, NULL);
+ var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
+ "hmm_bo_device not inited yet.\n");
+
+ if (pgnr == 0) {
+ dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
+ return NULL;
+ }
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
+ __func__);
+ return NULL;
+ }
+
+ if (bo->pgnr > pgnr) {
+ new_bo = __bo_break_up(bdev, bo, pgnr);
+ if (!new_bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
+ __func__);
+ return NULL;
+ }
+
+ __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return new_bo;
+ }
+
+ __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return bo;
+}
+
+void hmm_bo_release(struct hmm_buffer_object *bo)
+{
+ struct hmm_bo_device *bdev = bo->bdev;
+ struct hmm_buffer_object *next_bo, *prev_bo;
+
+ mutex_lock(&bdev->rbtree_mutex);
+
+ /*
+ * FIX ME:
+ *
+ * how to destroy the bo when it is stilled MMAPED?
+ *
+ * ideally, this will not happened as hmm_bo_release
+ * will only be called when kref reaches 0, and in mmap
+ * operation the hmm_bo_ref will eventually be called.
+ * so, if this happened, something goes wrong.
+ */
+ if (bo->status & HMM_BO_MMAPED) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
+ return;
+ }
+
+ if (bo->status & HMM_BO_BINDED) {
+ dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
+ hmm_bo_unbind(bo);
+ }
+
+ if (bo->status & HMM_BO_PAGE_ALLOCED) {
+ dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
+ hmm_bo_free_pages(bo);
+ }
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
+ hmm_bo_vunmap(bo);
+ }
+
+ rb_erase(&bo->node, &bdev->allocated_rbtree);
+
+ prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
+ next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
+
+ if (bo->list.prev != &bdev->entire_bo_list &&
+ prev_bo->end == bo->start &&
+ (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
+ __bo_take_off_handling(prev_bo);
+ bo = __bo_merge(prev_bo, bo);
+ }
+
+ if (bo->list.next != &bdev->entire_bo_list &&
+ next_bo->start == bo->end &&
+ (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
+ __bo_take_off_handling(next_bo);
+ bo = __bo_merge(bo, next_bo);
+ }
+
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return;
+}
+
+void hmm_bo_device_exit(struct hmm_bo_device *bdev)
+{
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
+
+ check_bodev_null_return_void(bdev);
+
+ /*
+ * release all allocated bos even they a in use
+ * and all bos will be merged into a big bo
+ */
+ while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
+ hmm_bo_release(
+ rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
+
+ dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
+ __func__);
+
+ /* free all bos to release all ISP virtual memory */
+ while (!list_empty(&bdev->entire_bo_list)) {
+ bo = list_to_hmm_bo(bdev->entire_bo_list.next);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ kmem_cache_free(bdev->bo_cache, bo);
+ }
+
+ dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
+
+ kmem_cache_destroy(bdev->bo_cache);
+
+ isp_mmu_exit(&bdev->mmu);
+}
+
+int hmm_bo_device_inited(struct hmm_bo_device *bdev)
+{
+ check_bodev_null_return(bdev, -EINVAL);
+
+ return bdev->flag == HMM_BO_DEVICE_INITED;
+}
+
+int hmm_bo_allocated(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return(bo, 0);
+
+ return bo->status & HMM_BO_ALLOCED;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_start(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr)
+{
+ struct hmm_buffer_object *bo;
+
+ check_bodev_null_return(bdev, NULL);
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
+ __func__, vaddr);
+ return NULL;
+ }
+ mutex_unlock(&bdev->rbtree_mutex);
+
+ return bo;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_in_range(
+ struct hmm_bo_device *bdev, unsigned int vaddr)
+{
+ struct hmm_buffer_object *bo;
+
+ check_bodev_null_return(bdev, NULL);
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
+ __func__, vaddr);
+ return NULL;
+ }
+ mutex_unlock(&bdev->rbtree_mutex);
+
+ return bo;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
+ struct hmm_bo_device *bdev, const void *vaddr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ check_bodev_null_return(bdev, NULL);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->entire_bo_list) {
+ bo = list_to_hmm_bo(pos);
+ /* pass bo which has no vm_node allocated */
+ if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
+ continue;
+ if (bo->vmap_addr == vaddr)
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return NULL;
+found:
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return bo;
+}
+
+static void free_private_bo_pages(struct hmm_buffer_object *bo,
+ struct hmm_pool *dypool,
+ struct hmm_pool *repool,
+ int free_pgnr)
+{
+ int i, ret;
+
+ for (i = 0; i < free_pgnr; i++) {
+ switch (bo->page_obj[i].type) {
+ case HMM_PAGE_TYPE_RESERVED:
+ if (repool->pops
+ && repool->pops->pool_free_pages) {
+ repool->pops->pool_free_pages(repool->pool_info,
+ &bo->page_obj[i]);
+ hmm_mem_stat.res_cnt--;
+ }
+ break;
+ /*
+ * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
+ * memory, so when free them, they should be put into dynamic
+ * pool.
+ */
+ case HMM_PAGE_TYPE_DYNAMIC:
+ case HMM_PAGE_TYPE_GENERAL:
+ if (dypool->pops
+ && dypool->pops->pool_inited
+ && dypool->pops->pool_inited(dypool->pool_info)) {
+ if (dypool->pops->pool_free_pages)
+ dypool->pops->pool_free_pages(
+ dypool->pool_info,
+ &bo->page_obj[i]);
+ break;
+ }
+
+ /*
+ * if dynamic memory pool doesn't exist, need to free
+ * pages to system directly.
+ */
+ default:
+ ret = set_pages_wb(bo->page_obj[i].page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret = %d\n",
+ ret);
+ /*
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why page
+ address be valid,it maybe memory corruption by lowmemory
+ */
+ if (!ret) {
+ __free_pages(bo->page_obj[i].page, 0);
+ hmm_mem_stat.sys_size--;
+ }
+ break;
+ }
+ }
+
+ return;
+}
+
+/*Allocate pages which will be used only by ISP*/
+static int alloc_private_pages(struct hmm_buffer_object *bo,
+ int from_highmem,
+ bool cached,
+ struct hmm_pool *dypool,
+ struct hmm_pool *repool)
+{
+ int ret;
+ unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
+ struct page *pages;
+ gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
+ int i, j;
+ int failure_number = 0;
+ bool reduce_order = false;
+ bool lack_mem = true;
+
+ if (from_highmem)
+ gfp |= __GFP_HIGHMEM;
+
+ pgnr = bo->pgnr;
+
+ bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
+ GFP_KERNEL);
+ if (unlikely(!bo->page_obj))
+ return -ENOMEM;
+
+ i = 0;
+ alloc_pgnr = 0;
+
+ /*
+ * get physical pages from dynamic pages pool.
+ */
+ if (dypool->pops && dypool->pops->pool_alloc_pages) {
+ alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
+ bo->page_obj, pgnr,
+ cached);
+ hmm_mem_stat.dyc_size -= alloc_pgnr;
+
+ if (alloc_pgnr == pgnr)
+ return 0;
+ }
+
+ pgnr -= alloc_pgnr;
+ i += alloc_pgnr;
+
+ /*
+ * get physical pages from reserved pages pool for atomisp.
+ */
+ if (repool->pops && repool->pops->pool_alloc_pages) {
+ alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
+ &bo->page_obj[i], pgnr,
+ cached);
+ hmm_mem_stat.res_cnt += alloc_pgnr;
+ if (alloc_pgnr == pgnr)
+ return 0;
+ }
+
+ pgnr -= alloc_pgnr;
+ i += alloc_pgnr;
+
+ while (pgnr) {
+ order = nr_to_order_bottom(pgnr);
+ /*
+ * if be short of memory, we will set order to 0
+ * everytime.
+ */
+ if (lack_mem)
+ order = HMM_MIN_ORDER;
+ else if (order > HMM_MAX_ORDER)
+ order = HMM_MAX_ORDER;
+retry:
+ /*
+ * When order > HMM_MIN_ORDER, for performance reasons we don't
+ * want alloc_pages() to sleep. In case it fails and fallbacks
+ * to HMM_MIN_ORDER or in case the requested order is originally
+ * the minimum value, we can allow alloc_pages() to sleep for
+ * robustness purpose.
+ *
+ * REVISIT: why __GFP_FS is necessary?
+ */
+ if (order == HMM_MIN_ORDER) {
+ gfp &= ~GFP_NOWAIT;
+ gfp |= __GFP_RECLAIM | __GFP_FS;
+ }
+
+ pages = alloc_pages(gfp, order);
+ if (unlikely(!pages)) {
+ /*
+ * in low memory case, if allocation page fails,
+ * we turn to try if order=0 allocation could
+ * succeed. if order=0 fails too, that means there is
+ * no memory left.
+ */
+ if (order == HMM_MIN_ORDER) {
+ dev_err(atomisp_dev,
+ "%s: cannot allocate pages\n",
+ __func__);
+ goto cleanup;
+ }
+ order = HMM_MIN_ORDER;
+ failure_number++;
+ reduce_order = true;
+ /*
+ * if fail two times continuously, we think be short
+ * of memory now.
+ */
+ if (failure_number == 2) {
+ lack_mem = true;
+ failure_number = 0;
+ }
+ goto retry;
+ } else {
+ blk_pgnr = order_to_nr(order);
+
+ if (!cached) {
+ /*
+ * set memory to uncacheable -- UC_MINUS
+ */
+ ret = set_pages_uc(pages, blk_pgnr);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "set page uncacheablefailed.\n");
+
+ __free_pages(pages, order);
+
+ goto cleanup;
+ }
+ }
+
+ for (j = 0; j < blk_pgnr; j++) {
+ bo->page_obj[i].page = pages + j;
+ bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
+ }
+
+ pgnr -= blk_pgnr;
+ hmm_mem_stat.sys_size += blk_pgnr;
+
+ /*
+ * if order is not reduced this time, clear
+ * failure_number.
+ */
+ if (reduce_order)
+ reduce_order = false;
+ else
+ failure_number = 0;
+ }
+ }
+
+ return 0;
+cleanup:
+ alloc_pgnr = i;
+ free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
+
+ kfree(bo->page_obj);
+
+ return -ENOMEM;
+}
+
+static void free_private_pages(struct hmm_buffer_object *bo,
+ struct hmm_pool *dypool,
+ struct hmm_pool *repool)
+{
+ free_private_bo_pages(bo, dypool, repool, bo->pgnr);
+
+ kfree(bo->page_obj);
+}
+
+/*
+ * Hacked from kernel function __get_user_pages in mm/memory.c
+ *
+ * Handle buffers allocated by other kernel space driver and mmaped into user
+ * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
+ *
+ * Get physical pages from user space virtual address and update into page list
+ */
+static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ int i, ret;
+ unsigned long vm_flags;
+
+ if (nr_pages <= 0)
+ return 0;
+
+ VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
+ /*
+ * Require read or write permissions.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
+ */
+ vm_flags = (gup_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (gup_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ i = 0;
+
+ do {
+ struct vm_area_struct *vma;
+
+ vma = find_vma(mm, start);
+ if (!vma) {
+ dev_err(atomisp_dev, "find_vma failed\n");
+ return i ? : -EFAULT;
+ }
+
+ if (is_vm_hugetlb_page(vma)) {
+ /*
+ i = follow_hugetlb_page(mm, vma, pages, vmas,
+ &start, &nr_pages, i, gup_flags);
+ */
+ continue;
+ }
+
+ do {
+ struct page *page;
+ unsigned long pfn;
+
+ /*
+ * If we have a pending SIGKILL, don't keep faulting
+ * pages and potentially allocating memory.
+ */
+ if (unlikely(fatal_signal_pending(current))) {
+ dev_err(atomisp_dev,
+ "fatal_signal_pending in %s\n",
+ __func__);
+ return i ? i : -ERESTARTSYS;
+ }
+
+ ret = follow_pfn(vma, start, &pfn);
+ if (ret) {
+ dev_err(atomisp_dev, "follow_pfn() failed\n");
+ return i ? : -EFAULT;
+ }
+
+ page = pfn_to_page(pfn);
+ if (IS_ERR(page))
+ return i ? i : PTR_ERR(page);
+ if (pages) {
+ pages[i] = page;
+ get_page(page);
+ flush_anon_page(vma, page, start);
+ flush_dcache_page(page);
+ }
+ if (vmas)
+ vmas[i] = vma;
+ i++;
+ start += PAGE_SIZE;
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+ } while (nr_pages);
+
+ return i;
+}
+
+static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int flags = FOLL_TOUCH;
+
+ if (pages)
+ flags |= FOLL_GET;
+ if (write)
+ flags |= FOLL_WRITE;
+ if (force)
+ flags |= FOLL_FORCE;
+
+ return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+}
+
+/*
+ * Convert user space virtual address into pages list
+ */
+static int alloc_user_pages(struct hmm_buffer_object *bo,
+ const void __user *userptr, bool cached)
+{
+ int page_nr;
+ int i;
+ struct vm_area_struct *vma;
+ struct page **pages;
+
+ pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(!pages))
+ return -ENOMEM;
+
+ bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
+ GFP_KERNEL);
+ if (unlikely(!bo->page_obj)) {
+ kfree(pages);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&bo->mutex);
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, (unsigned long)userptr);
+ up_read(&current->mm->mmap_sem);
+ if (!vma) {
+ dev_err(atomisp_dev, "find_vma failed\n");
+ kfree(bo->page_obj);
+ kfree(pages);
+ mutex_lock(&bo->mutex);
+ return -EFAULT;
+ }
+ mutex_lock(&bo->mutex);
+ /*
+ * Handle frame buffer allocated in other kerenl space driver
+ * and map to user space
+ */
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
+ page_nr = get_pfnmap_pages(current, current->mm,
+ (unsigned long)userptr,
+ (int)(bo->pgnr), 1, 0,
+ pages, NULL);
+ bo->mem_type = HMM_BO_MEM_TYPE_PFN;
+ } else {
+ /*Handle frame buffer allocated in user space*/
+ mutex_unlock(&bo->mutex);
+ page_nr = get_user_pages_fast((unsigned long)userptr,
+ (int)(bo->pgnr), 1, pages);
+ mutex_lock(&bo->mutex);
+ bo->mem_type = HMM_BO_MEM_TYPE_USER;
+ }
+
+ /* can be written by caller, not forced */
+ if (page_nr != bo->pgnr) {
+ dev_err(atomisp_dev,
+ "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
+ bo->pgnr, page_nr);
+ goto out_of_mem;
+ }
+
+ for (i = 0; i < bo->pgnr; i++) {
+ bo->page_obj[i].page = pages[i];
+ bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
+ }
+ hmm_mem_stat.usr_size += bo->pgnr;
+ kfree(pages);
+
+ return 0;
+
+out_of_mem:
+ for (i = 0; i < page_nr; i++)
+ put_page(pages[i]);
+ kfree(pages);
+ kfree(bo->page_obj);
+
+ return -ENOMEM;
+}
+
+static void free_user_pages(struct hmm_buffer_object *bo)
+{
+ int i;
+
+ for (i = 0; i < bo->pgnr; i++)
+ put_page(bo->page_obj[i].page);
+ hmm_mem_stat.usr_size -= bo->pgnr;
+
+ kfree(bo->page_obj);
+}
+
+/*
+ * allocate/free physical pages for the bo.
+ *
+ * type indicate where are the pages from. currently we have 3 types
+ * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
+ *
+ * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
+ * try to alloc memory from highmem if from_highmem is set.
+ *
+ * userptr is only valid when type is HMM_BO_USER, it indicates
+ * the start address from user space task.
+ *
+ * from_highmem and userptr will both be ignored when type is
+ * HMM_BO_SHARE.
+ */
+int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
+ enum hmm_bo_type type, int from_highmem,
+ const void __user *userptr, bool cached)
+{
+ int ret = -EINVAL;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+ check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ /*
+ * TO DO:
+ * add HMM_BO_USER type
+ */
+ if (type == HMM_BO_PRIVATE) {
+ ret = alloc_private_pages(bo, from_highmem,
+ cached, &dynamic_pool, &reserved_pool);
+ } else if (type == HMM_BO_USER) {
+ ret = alloc_user_pages(bo, userptr, cached);
+ } else {
+ dev_err(atomisp_dev, "invalid buffer type.\n");
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto alloc_err;
+
+ bo->type = type;
+
+ bo->status |= HMM_BO_PAGE_ALLOCED;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+alloc_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "alloc pages err...\n");
+ return ret;
+status_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object has already page allocated.\n");
+ return -EINVAL;
+}
+
+/*
+ * free physical pages of the bo.
+ */
+void hmm_bo_free_pages(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
+
+ /* clear the flag anyway. */
+ bo->status &= (~HMM_BO_PAGE_ALLOCED);
+
+ if (bo->type == HMM_BO_PRIVATE)
+ free_private_pages(bo, &dynamic_pool, &reserved_pool);
+ else if (bo->type == HMM_BO_USER)
+ free_user_pages(bo);
+ else
+ dev_err(atomisp_dev, "invalid buffer type.\n");
+ mutex_unlock(&bo->mutex);
+
+ return;
+
+status_err2:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object not page allocated yet.\n");
+}
+
+int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return(bo, 0);
+
+ return bo->status & HMM_BO_PAGE_ALLOCED;
+}
+
+/*
+ * get physical page info of the bo.
+ */
+int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
+ struct hmm_page_object **page_obj, int *pgnr)
+{
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ *page_obj = bo->page_obj;
+ *pgnr = bo->pgnr;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+status_err:
+ dev_err(atomisp_dev,
+ "buffer object not page allocated yet.\n");
+ mutex_unlock(&bo->mutex);
+ return -EINVAL;
+}
+
+/*
+ * bind the physical pages to a virtual address space.
+ */
+int hmm_bo_bind(struct hmm_buffer_object *bo)
+{
+ int ret;
+ unsigned int virt;
+ struct hmm_bo_device *bdev;
+ unsigned int i;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo,
+ HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
+ status_err1);
+
+ check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
+
+ bdev = bo->bdev;
+
+ virt = bo->start;
+
+ for (i = 0; i < bo->pgnr; i++) {
+ ret =
+ isp_mmu_map(&bdev->mmu, virt,
+ page_to_phys(bo->page_obj[i].page), 1);
+ if (ret)
+ goto map_err;
+ virt += (1 << PAGE_SHIFT);
+ }
+
+ /*
+ * flush TBL here.
+ *
+ * theoretically, we donot need to flush TLB as we didnot change
+ * any existed address mappings, but for Silicon Hive's MMU, its
+ * really a bug here. I guess when fetching PTEs (page table entity)
+ * to TLB, its MMU will fetch additional INVALID PTEs automatically
+ * for performance issue. EX, we only set up 1 page address mapping,
+ * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
+ * so the additional 3 PTEs are invalid.
+ */
+ if (bo->start != 0x0)
+ isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
+ (bo->pgnr << PAGE_SHIFT));
+
+ bo->status |= HMM_BO_BINDED;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+map_err:
+ /* unbind the physical pages with related virtual address space */
+ virt = bo->start;
+ for ( ; i > 0; i--) {
+ isp_mmu_unmap(&bdev->mmu, virt, 1);
+ virt += pgnr_to_size(1);
+ }
+
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "setup MMU address mapping failed.\n");
+ return ret;
+
+status_err2:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "buffer object already binded.\n");
+ return -EINVAL;
+status_err1:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object vm_node or page not allocated.\n");
+ return -EINVAL;
+}
+
+/*
+ * unbind the physical pages with related virtual address space.
+ */
+void hmm_bo_unbind(struct hmm_buffer_object *bo)
+{
+ unsigned int virt;
+ struct hmm_bo_device *bdev;
+ unsigned int i;
+
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo,
+ HMM_BO_PAGE_ALLOCED |
+ HMM_BO_ALLOCED |
+ HMM_BO_BINDED, status_err);
+
+ bdev = bo->bdev;
+
+ virt = bo->start;
+
+ for (i = 0; i < bo->pgnr; i++) {
+ isp_mmu_unmap(&bdev->mmu, virt, 1);
+ virt += pgnr_to_size(1);
+ }
+
+ /*
+ * flush TLB as the address mapping has been removed and
+ * related TLBs should be invalidated.
+ */
+ isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
+ (bo->pgnr << PAGE_SHIFT));
+
+ bo->status &= (~HMM_BO_BINDED);
+
+ mutex_unlock(&bo->mutex);
+
+ return;
+
+status_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer vm or page not allocated or not binded yet.\n");
+}
+
+int hmm_bo_binded(struct hmm_buffer_object *bo)
+{
+ int ret;
+
+ check_bo_null_return(bo, 0);
+
+ mutex_lock(&bo->mutex);
+
+ ret = bo->status & HMM_BO_BINDED;
+
+ mutex_unlock(&bo->mutex);
+
+ return ret;
+}
+
+void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
+{
+ struct page **pages;
+ int i;
+
+ check_bo_null_return(bo, NULL);
+
+ mutex_lock(&bo->mutex);
+ if (((bo->status & HMM_BO_VMAPED) && !cached) ||
+ ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
+ mutex_unlock(&bo->mutex);
+ return bo->vmap_addr;
+ }
+
+ /* cached status need to be changed, so vunmap first */
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ vunmap(bo->vmap_addr);
+ bo->vmap_addr = NULL;
+ bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
+ }
+
+ pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
+ if (unlikely(!pages)) {
+ mutex_unlock(&bo->mutex);
+ return NULL;
+ }
+
+ for (i = 0; i < bo->pgnr; i++)
+ pages[i] = bo->page_obj[i].page;
+
+ bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
+ cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
+ if (unlikely(!bo->vmap_addr)) {
+ kfree(pages);
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "vmap failed...\n");
+ return NULL;
+ }
+ bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
+
+ kfree(pages);
+
+ mutex_unlock(&bo->mutex);
+ return bo->vmap_addr;
+}
+
+void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+ if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
+ mutex_unlock(&bo->mutex);
+ return;
+ }
+
+ clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
+ mutex_unlock(&bo->mutex);
+}
+
+void hmm_bo_vunmap(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ vunmap(bo->vmap_addr);
+ bo->vmap_addr = NULL;
+ bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
+ }
+
+ mutex_unlock(&bo->mutex);
+ return;
+}
+
+void hmm_bo_ref(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ kref_get(&bo->kref);
+}
+
+static void kref_hmm_bo_release(struct kref *kref)
+{
+ if (!kref)
+ return;
+
+ hmm_bo_release(kref_to_hmm_bo(kref));
+}
+
+void hmm_bo_unref(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ kref_put(&bo->kref, kref_hmm_bo_release);
+}
+
+static void hmm_bo_vm_open(struct vm_area_struct *vma)
+{
+ struct hmm_buffer_object *bo =
+ (struct hmm_buffer_object *)vma->vm_private_data;
+
+ check_bo_null_return_void(bo);
+
+ hmm_bo_ref(bo);
+
+ mutex_lock(&bo->mutex);
+
+ bo->status |= HMM_BO_MMAPED;
+
+ bo->mmap_count++;
+
+ mutex_unlock(&bo->mutex);
+}
+
+static void hmm_bo_vm_close(struct vm_area_struct *vma)
+{
+ struct hmm_buffer_object *bo =
+ (struct hmm_buffer_object *)vma->vm_private_data;
+
+ check_bo_null_return_void(bo);
+
+ hmm_bo_unref(bo);
+
+ mutex_lock(&bo->mutex);
+
+ bo->mmap_count--;
+
+ if (!bo->mmap_count) {
+ bo->status &= (~HMM_BO_MMAPED);
+ vma->vm_private_data = NULL;
+ }
+
+ mutex_unlock(&bo->mutex);
+}
+
+static const struct vm_operations_struct hmm_bo_vm_ops = {
+ .open = hmm_bo_vm_open,
+ .close = hmm_bo_vm_close,
+};
+
+/*
+ * mmap the bo to user space.
+ */
+int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
+{
+ unsigned int start, end;
+ unsigned int virt;
+ unsigned int pgnr, i;
+ unsigned int pfn;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ pgnr = bo->pgnr;
+ start = vma->vm_start;
+ end = vma->vm_end;
+
+ /*
+ * check vma's virtual address space size and buffer object's size.
+ * must be the same.
+ */
+ if ((start + pgnr_to_size(pgnr)) != end) {
+ dev_warn(atomisp_dev,
+ "vma's address space size not equal to buffer object's size");
+ return -EINVAL;
+ }
+
+ virt = vma->vm_start;
+ for (i = 0; i < pgnr; i++) {
+ pfn = page_to_pfn(bo->page_obj[i].page);
+ if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
+ dev_warn(atomisp_dev,
+ "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
+ virt, pfn, 1);
+ return -EINVAL;
+ }
+ virt += PAGE_SIZE;
+ }
+
+ vma->vm_private_data = bo;
+
+ vma->vm_ops = &hmm_bo_vm_ops;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+ /*
+ * call hmm_bo_vm_open explicitly.
+ */
+ hmm_bo_vm_open(vma);
+
+ return 0;
+
+status_err:
+ dev_err(atomisp_dev, "buffer page not allocated yet.\n");
+ return -EINVAL;
+}
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c
new file mode 100644
index 000000000000..1a87af68a924
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c
@@ -0,0 +1,233 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+/*
+ * This file contains functions for dynamic memory pool management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <asm/set_memory.h>
+
+#include "atomisp_internal.h"
+
+#include "hmm/hmm_pool.h"
+
+/*
+ * dynamic memory pool ops.
+ */
+static unsigned int get_pages_from_dynamic_pool(void *pool,
+ struct hmm_page_object *page_obj,
+ unsigned int size, bool cached)
+{
+ struct hmm_page *hmm_page;
+ unsigned long flags;
+ unsigned int i = 0;
+ struct hmm_dynamic_pool_info *dypool_info = pool;
+
+ if (!dypool_info)
+ return 0;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ if (dypool_info->initialized) {
+ while (!list_empty(&dypool_info->pages_list)) {
+ hmm_page = list_entry(dypool_info->pages_list.next,
+ struct hmm_page, list);
+
+ list_del(&hmm_page->list);
+ dypool_info->pgnr--;
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ page_obj[i].page = hmm_page->page;
+ page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
+ kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
+
+ if (i == size)
+ return i;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ return i;
+}
+
+static void free_pages_to_dynamic_pool(void *pool,
+ struct hmm_page_object *page_obj)
+{
+ struct hmm_page *hmm_page;
+ unsigned long flags;
+ int ret;
+ struct hmm_dynamic_pool_info *dypool_info = pool;
+
+ if (!dypool_info)
+ return;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ if (!dypool_info->initialized) {
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
+ return;
+
+ if (dypool_info->pgnr >= dypool_info->pool_size) {
+ /* free page directly back to system */
+ ret = set_pages_wb(page_obj->page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret=%d\n", ret);
+ /*
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why page
+ address be valid, it maybe memory corruption by lowmemory
+ */
+ if (!ret) {
+ __free_pages(page_obj->page, 0);
+ hmm_mem_stat.sys_size--;
+ }
+ return;
+ }
+ hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
+ GFP_KERNEL);
+ if (!hmm_page) {
+ /* free page directly */
+ ret = set_pages_wb(page_obj->page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret=%d\n", ret);
+ if (!ret) {
+ __free_pages(page_obj->page, 0);
+ hmm_mem_stat.sys_size--;
+ }
+ return;
+ }
+
+ hmm_page->page = page_obj->page;
+
+ /*
+ * add to pages_list of pages_pool
+ */
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ list_add_tail(&hmm_page->list, &dypool_info->pages_list);
+ dypool_info->pgnr++;
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+ hmm_mem_stat.dyc_size++;
+}
+
+static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
+{
+ struct hmm_dynamic_pool_info *dypool_info;
+
+ if (pool_size == 0)
+ return 0;
+
+ dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
+ GFP_KERNEL);
+ if (unlikely(!dypool_info))
+ return -ENOMEM;
+
+ dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
+ sizeof(struct hmm_page), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dypool_info->pgptr_cache) {
+ kfree(dypool_info);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&dypool_info->pages_list);
+ spin_lock_init(&dypool_info->list_lock);
+ dypool_info->initialized = true;
+ dypool_info->pool_size = pool_size;
+ dypool_info->pgnr = 0;
+
+ *pool = dypool_info;
+
+ return 0;
+}
+
+static void hmm_dynamic_pool_exit(void **pool)
+{
+ struct hmm_dynamic_pool_info *dypool_info = *pool;
+ struct hmm_page *hmm_page;
+ unsigned long flags;
+ int ret;
+
+ if (!dypool_info)
+ return;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ if (!dypool_info->initialized) {
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+ return;
+ }
+ dypool_info->initialized = false;
+
+ while (!list_empty(&dypool_info->pages_list)) {
+ hmm_page = list_entry(dypool_info->pages_list.next,
+ struct hmm_page, list);
+
+ list_del(&hmm_page->list);
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ /* can cause thread sleep, so cannot be put into spin_lock */
+ ret = set_pages_wb(hmm_page->page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err...ret=%d\n", ret);
+ if (!ret) {
+ __free_pages(hmm_page->page, 0);
+ hmm_mem_stat.dyc_size--;
+ hmm_mem_stat.sys_size--;
+ }
+ kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ kmem_cache_destroy(dypool_info->pgptr_cache);
+
+ kfree(dypool_info);
+
+ *pool = NULL;
+}
+
+static int hmm_dynamic_pool_inited(void *pool)
+{
+ struct hmm_dynamic_pool_info *dypool_info = pool;
+
+ if (!dypool_info)
+ return 0;
+
+ return dypool_info->initialized;
+}
+
+struct hmm_pool_ops dynamic_pops = {
+ .pool_init = hmm_dynamic_pool_init,
+ .pool_exit = hmm_dynamic_pool_exit,
+ .pool_alloc_pages = get_pages_from_dynamic_pool,
+ .pool_free_pages = free_pages_to_dynamic_pool,
+ .pool_inited = hmm_dynamic_pool_inited,
+};
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c
new file mode 100644
index 000000000000..d739ed914d65
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c
@@ -0,0 +1,252 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+/*
+ * This file contains functions for reserved memory pool management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <asm/set_memory.h>
+
+#include "atomisp_internal.h"
+#include "hmm/hmm_pool.h"
+
+/*
+ * reserved memory pool ops.
+ */
+static unsigned int get_pages_from_reserved_pool(void *pool,
+ struct hmm_page_object *page_obj,
+ unsigned int size, bool cached)
+{
+ unsigned long flags;
+ unsigned int i = 0;
+ unsigned int repool_pgnr;
+ int j;
+ struct hmm_reserved_pool_info *repool_info = pool;
+
+ if (!repool_info)
+ return 0;
+
+ spin_lock_irqsave(&repool_info->list_lock, flags);
+ if (repool_info->initialized) {
+ repool_pgnr = repool_info->index;
+
+ for (j = repool_pgnr - 1; j >= 0; j--) {
+ page_obj[i].page = repool_info->pages[j];
+ page_obj[i].type = HMM_PAGE_TYPE_RESERVED;
+ i++;
+ repool_info->index--;
+ if (i == size)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+ return i;
+}
+
+static void free_pages_to_reserved_pool(void *pool,
+ struct hmm_page_object *page_obj)
+{
+ unsigned long flags;
+ struct hmm_reserved_pool_info *repool_info = pool;
+
+ if (!repool_info)
+ return;
+
+ spin_lock_irqsave(&repool_info->list_lock, flags);
+
+ if (repool_info->initialized &&
+ repool_info->index < repool_info->pgnr &&
+ page_obj->type == HMM_PAGE_TYPE_RESERVED) {
+ repool_info->pages[repool_info->index++] = page_obj->page;
+ }
+
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+}
+
+static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
+ unsigned int pool_size)
+{
+ struct hmm_reserved_pool_info *pool_info;
+
+ pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
+ GFP_KERNEL);
+ if (unlikely(!pool_info))
+ return -ENOMEM;
+
+ pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
+ GFP_KERNEL);
+ if (unlikely(!pool_info->pages)) {
+ kfree(pool_info);
+ return -ENOMEM;
+ }
+
+ pool_info->index = 0;
+ pool_info->pgnr = 0;
+ spin_lock_init(&pool_info->list_lock);
+ pool_info->initialized = true;
+
+ *repool_info = pool_info;
+
+ return 0;
+}
+
+static int hmm_reserved_pool_init(void **pool, unsigned int pool_size)
+{
+ int ret;
+ unsigned int blk_pgnr;
+ unsigned int pgnr = pool_size;
+ unsigned int order = 0;
+ unsigned int i = 0;
+ int fail_number = 0;
+ struct page *pages;
+ int j;
+ struct hmm_reserved_pool_info *repool_info;
+
+ if (pool_size == 0)
+ return 0;
+
+ ret = hmm_reserved_pool_setup(&repool_info, pool_size);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n");
+ return ret;
+ }
+
+ pgnr = pool_size;
+
+ i = 0;
+ order = MAX_ORDER;
+
+ while (pgnr) {
+ blk_pgnr = 1U << order;
+ while (blk_pgnr > pgnr) {
+ order--;
+ blk_pgnr >>= 1U;
+ }
+ BUG_ON(order > MAX_ORDER);
+
+ pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order);
+ if (unlikely(!pages)) {
+ if (order == 0) {
+ fail_number++;
+ dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n",
+ __func__, fail_number);
+ /* if fail five times, will goto end */
+
+ /* FIXME: whether is the mechanism is ok? */
+ if (fail_number == ALLOC_PAGE_FAIL_NUM)
+ goto end;
+ } else {
+ order--;
+ }
+ } else {
+ blk_pgnr = 1U << order;
+
+ ret = set_pages_uc(pages, blk_pgnr);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "set pages uncached failed\n");
+ __free_pages(pages, order);
+ goto end;
+ }
+
+ for (j = 0; j < blk_pgnr; j++)
+ repool_info->pages[i++] = pages + j;
+
+ repool_info->index += blk_pgnr;
+ repool_info->pgnr += blk_pgnr;
+
+ pgnr -= blk_pgnr;
+
+ fail_number = 0;
+ }
+ }
+
+end:
+ repool_info->initialized = true;
+
+ *pool = repool_info;
+
+ dev_info(atomisp_dev,
+ "hmm_reserved_pool init successfully,hmm_reserved_pool is with %d pages.\n",
+ repool_info->pgnr);
+ return 0;
+}
+
+static void hmm_reserved_pool_exit(void **pool)
+{
+ unsigned long flags;
+ int i, ret;
+ unsigned int pgnr;
+ struct hmm_reserved_pool_info *repool_info = *pool;
+
+ if (!repool_info)
+ return;
+
+ spin_lock_irqsave(&repool_info->list_lock, flags);
+ if (!repool_info->initialized) {
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+ return;
+ }
+ pgnr = repool_info->pgnr;
+ repool_info->index = 0;
+ repool_info->pgnr = 0;
+ repool_info->initialized = false;
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+
+ for (i = 0; i < pgnr; i++) {
+ ret = set_pages_wb(repool_info->pages[i], 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err...ret=%d\n", ret);
+ /*
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why
+ page address be valid, it maybe memory corruption by lowmemory
+ */
+ if (!ret)
+ __free_pages(repool_info->pages[i], 0);
+ }
+
+ kfree(repool_info->pages);
+ kfree(repool_info);
+
+ *pool = NULL;
+}
+
+static int hmm_reserved_pool_inited(void *pool)
+{
+ struct hmm_reserved_pool_info *repool_info = pool;
+
+ if (!repool_info)
+ return 0;
+
+ return repool_info->initialized;
+}
+
+struct hmm_pool_ops reserved_pops = {
+ .pool_init = hmm_reserved_pool_init,
+ .pool_exit = hmm_reserved_pool_exit,
+ .pool_alloc_pages = get_pages_from_reserved_pool,
+ .pool_free_pages = free_pages_to_reserved_pool,
+ .pool_inited = hmm_reserved_pool_inited,
+};
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_vm.c b/drivers/staging/media/atomisp/pci/hmm/hmm_vm.c
new file mode 100644
index 000000000000..976a2cb51354
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_vm.c
@@ -0,0 +1,212 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+/*
+ * This file contains function for ISP virtual address management in ISP driver
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+
+#include "atomisp_internal.h"
+#include "mmu/isp_mmu.h"
+#include "hmm/hmm_vm.h"
+#include "hmm/hmm_common.h"
+
+static unsigned int vm_node_end(unsigned int start, unsigned int pgnr)
+{
+ return start + pgnr_to_size(pgnr);
+}
+
+static int addr_in_vm_node(unsigned int addr,
+ struct hmm_vm_node *node)
+{
+ return (addr >= node->start) && (addr < (node->start + node->size));
+}
+
+int hmm_vm_init(struct hmm_vm *vm, unsigned int start,
+ unsigned int size)
+{
+ if (!vm)
+ return -1;
+
+ vm->start = start;
+ vm->pgnr = size_to_pgnr_ceil(size);
+ vm->size = pgnr_to_size(vm->pgnr);
+
+ INIT_LIST_HEAD(&vm->vm_node_list);
+ spin_lock_init(&vm->lock);
+ vm->cache = kmem_cache_create("atomisp_vm", sizeof(struct hmm_vm_node),
+ 0, 0, NULL);
+
+ return vm->cache ? 0 : -ENOMEM;
+}
+
+void hmm_vm_clean(struct hmm_vm *vm)
+{
+ struct hmm_vm_node *node, *tmp;
+ struct list_head new_head;
+
+ if (!vm)
+ return;
+
+ spin_lock(&vm->lock);
+ list_replace_init(&vm->vm_node_list, &new_head);
+ spin_unlock(&vm->lock);
+
+ list_for_each_entry_safe(node, tmp, &new_head, list) {
+ list_del(&node->list);
+ kmem_cache_free(vm->cache, node);
+ }
+
+ kmem_cache_destroy(vm->cache);
+}
+
+static struct hmm_vm_node *alloc_hmm_vm_node(unsigned int pgnr,
+ struct hmm_vm *vm)
+{
+ struct hmm_vm_node *node;
+
+ node = kmem_cache_alloc(vm->cache, GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ INIT_LIST_HEAD(&node->list);
+ node->pgnr = pgnr;
+ node->size = pgnr_to_size(pgnr);
+ node->vm = vm;
+
+ return node;
+}
+
+struct hmm_vm_node *hmm_vm_alloc_node(struct hmm_vm *vm, unsigned int pgnr)
+{
+ struct list_head *head;
+ struct hmm_vm_node *node, *cur, *next;
+ unsigned int vm_start, vm_end;
+ unsigned int addr;
+ unsigned int size;
+
+ if (!vm)
+ return NULL;
+
+ vm_start = vm->start;
+ vm_end = vm_node_end(vm->start, vm->pgnr);
+ size = pgnr_to_size(pgnr);
+
+ addr = vm_start;
+ head = &vm->vm_node_list;
+
+ node = alloc_hmm_vm_node(pgnr, vm);
+ if (!node) {
+ dev_err(atomisp_dev, "no memory to allocate hmm vm node.\n");
+ return NULL;
+ }
+
+ spin_lock(&vm->lock);
+ /*
+ * if list is empty, the loop code will not be executed.
+ */
+ list_for_each_entry(cur, head, list) {
+ /* Add gap between vm areas as helper to not hide overflow */
+ addr = PAGE_ALIGN(vm_node_end(cur->start, cur->pgnr) + 1);
+
+ if (list_is_last(&cur->list, head)) {
+ if (addr + size > vm_end) {
+ /* vm area does not have space anymore */
+ spin_unlock(&vm->lock);
+ kmem_cache_free(vm->cache, node);
+ dev_err(atomisp_dev,
+ "no enough virtual address space.\n");
+ return NULL;
+ }
+
+ /* We still have vm space to add new node to tail */
+ break;
+ }
+
+ next = list_entry(cur->list.next, struct hmm_vm_node, list);
+ if ((next->start - addr) > size)
+ break;
+ }
+ node->start = addr;
+ node->vm = vm;
+ list_add(&node->list, &cur->list);
+ spin_unlock(&vm->lock);
+
+ return node;
+}
+
+void hmm_vm_free_node(struct hmm_vm_node *node)
+{
+ struct hmm_vm *vm;
+
+ if (!node)
+ return;
+
+ vm = node->vm;
+
+ spin_lock(&vm->lock);
+ list_del(&node->list);
+ spin_unlock(&vm->lock);
+
+ kmem_cache_free(vm->cache, node);
+}
+
+struct hmm_vm_node *hmm_vm_find_node_start(struct hmm_vm *vm, unsigned int addr)
+{
+ struct hmm_vm_node *node;
+
+ if (!vm)
+ return NULL;
+
+ spin_lock(&vm->lock);
+
+ list_for_each_entry(node, &vm->vm_node_list, list) {
+ if (node->start == addr) {
+ spin_unlock(&vm->lock);
+ return node;
+ }
+ }
+
+ spin_unlock(&vm->lock);
+ return NULL;
+}
+
+struct hmm_vm_node *hmm_vm_find_node_in_range(struct hmm_vm *vm,
+ unsigned int addr)
+{
+ struct hmm_vm_node *node;
+
+ if (!vm)
+ return NULL;
+
+ spin_lock(&vm->lock);
+
+ list_for_each_entry(node, &vm->vm_node_list, list) {
+ if (addr_in_vm_node(addr, node)) {
+ spin_unlock(&vm->lock);
+ return node;
+ }
+ }
+
+ spin_unlock(&vm->lock);
+ return NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/hrt/hive_isp_css_custom_host_hrt.h b/drivers/staging/media/atomisp/pci/hrt/hive_isp_css_custom_host_hrt.h
new file mode 100644
index 000000000000..c6893d712d61
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hrt/hive_isp_css_custom_host_hrt.h
@@ -0,0 +1,106 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#ifndef _hive_isp_css_custom_host_hrt_h_
+#define _hive_isp_css_custom_host_hrt_h_
+
+#include <linux/delay.h>
+#include "atomisp_helper.h"
+
+/*
+ * _hrt_master_port_store/load/uload -macros using __force attributed
+ * cast to intentional dereferencing __iomem attributed (noderef)
+ * pointer from atomisp_get_io_virt_addr
+ */
+#define _hrt_master_port_store_8(a, d) \
+ (*((s8 __force *)atomisp_get_io_virt_addr(a)) = (d))
+
+#define _hrt_master_port_store_16(a, d) \
+ (*((s16 __force *)atomisp_get_io_virt_addr(a)) = (d))
+
+#define _hrt_master_port_store_32(a, d) \
+ (*((s32 __force *)atomisp_get_io_virt_addr(a)) = (d))
+
+#define _hrt_master_port_load_8(a) \
+ (*(s8 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_load_16(a) \
+ (*(s16 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_load_32(a) \
+ (*(s32 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_uload_8(a) \
+ (*(u8 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_uload_16(a) \
+ (*(u16 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_uload_32(a) \
+ (*(u32 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_store_8_volatile(a, d) _hrt_master_port_store_8(a, d)
+#define _hrt_master_port_store_16_volatile(a, d) _hrt_master_port_store_16(a, d)
+#define _hrt_master_port_store_32_volatile(a, d) _hrt_master_port_store_32(a, d)
+
+#define _hrt_master_port_load_8_volatile(a) _hrt_master_port_load_8(a)
+#define _hrt_master_port_load_16_volatile(a) _hrt_master_port_load_16(a)
+#define _hrt_master_port_load_32_volatile(a) _hrt_master_port_load_32(a)
+
+#define _hrt_master_port_uload_8_volatile(a) _hrt_master_port_uload_8(a)
+#define _hrt_master_port_uload_16_volatile(a) _hrt_master_port_uload_16(a)
+#define _hrt_master_port_uload_32_volatile(a) _hrt_master_port_uload_32(a)
+
+static inline void hrt_sleep(void)
+{
+ udelay(1);
+}
+
+static inline u32 _hrt_mem_store(u32 to, const void *from, size_t n)
+{
+ unsigned int i;
+ u32 _to = to;
+ const char *_from = (const char *)from;
+
+ for (i = 0; i < n; i++, _to++, _from++)
+ _hrt_master_port_store_8(_to, *_from);
+ return _to;
+}
+
+static inline void *_hrt_mem_load(u32 from, void *to, size_t n)
+{
+ unsigned int i;
+ char *_to = (char *)to;
+ u32 _from = from;
+
+ for (i = 0; i < n; i++, _to++, _from++)
+ *_to = _hrt_master_port_load_8(_from);
+ return _to;
+}
+
+static inline u32 _hrt_mem_set(u32 to, int c, size_t n)
+{
+ unsigned int i;
+ u32 _to = to;
+
+ for (i = 0; i < n; i++, _to++)
+ _hrt_master_port_store_8(_to, c);
+ return _to;
+}
+
+#endif /* _hive_isp_css_custom_host_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.c b/drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.c
new file mode 100644
index 000000000000..236f27b50386
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.c
@@ -0,0 +1,124 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#include "atomisp_internal.h"
+
+#include "hive_isp_css_mm_hrt.h"
+#include "hmm/hmm.h"
+
+#define __page_align(size) (((size) + (PAGE_SIZE - 1)) & (~(PAGE_SIZE - 1)))
+
+static void __user *my_userptr;
+static unsigned int my_num_pages;
+static enum hrt_userptr_type my_usr_type;
+
+void hrt_isp_css_mm_set_user_ptr(void __user *userptr,
+ unsigned int num_pages,
+ enum hrt_userptr_type type)
+{
+ my_userptr = userptr;
+ my_num_pages = num_pages;
+ my_usr_type = type;
+}
+
+static ia_css_ptr __hrt_isp_css_mm_alloc(size_t bytes,
+ const void __user *userptr,
+ unsigned int num_pages,
+ enum hrt_userptr_type type,
+ bool cached)
+{
+#ifdef CONFIG_ION
+ if (type == HRT_USR_ION)
+ return hmm_alloc(bytes, HMM_BO_ION, 0,
+ userptr, cached);
+
+#endif
+ if (type == HRT_USR_PTR) {
+ if (!userptr)
+ return hmm_alloc(bytes, HMM_BO_PRIVATE, 0,
+ NULL, cached);
+ else {
+ if (num_pages < ((__page_align(bytes)) >> PAGE_SHIFT))
+ dev_err(atomisp_dev,
+ "user space memory size is less than the expected size..\n");
+ else if (num_pages > ((__page_align(bytes))
+ >> PAGE_SHIFT))
+ dev_err(atomisp_dev,
+ "user space memory size is large than the expected size..\n");
+
+ return hmm_alloc(bytes, HMM_BO_USER, 0,
+ userptr, cached);
+ }
+ } else {
+ dev_err(atomisp_dev, "user ptr type is incorrect.\n");
+ return 0;
+ }
+}
+
+ia_css_ptr hrt_isp_css_mm_alloc(size_t bytes)
+{
+ return __hrt_isp_css_mm_alloc(bytes, my_userptr,
+ my_num_pages, my_usr_type, false);
+}
+
+ia_css_ptr hrt_isp_css_mm_alloc_user_ptr(size_t bytes,
+ const void __user *userptr,
+ unsigned int num_pages,
+ enum hrt_userptr_type type,
+ bool cached)
+{
+ return __hrt_isp_css_mm_alloc(bytes, userptr, num_pages,
+ type, cached);
+}
+
+ia_css_ptr hrt_isp_css_mm_alloc_cached(size_t bytes)
+{
+ if (!my_userptr)
+ return hmm_alloc(bytes, HMM_BO_PRIVATE, 0, NULL,
+ HMM_CACHED);
+ else {
+ if (my_num_pages < ((__page_align(bytes)) >> PAGE_SHIFT))
+ dev_err(atomisp_dev,
+ "user space memory size is less than the expected size..\n");
+ else if (my_num_pages > ((__page_align(bytes)) >> PAGE_SHIFT))
+ dev_err(atomisp_dev,
+ "user space memory size is large than the expected size..\n");
+
+ return hmm_alloc(bytes, HMM_BO_USER, 0,
+ my_userptr, HMM_CACHED);
+ }
+}
+
+ia_css_ptr hrt_isp_css_mm_calloc(size_t bytes)
+{
+ ia_css_ptr ptr = hrt_isp_css_mm_alloc(bytes);
+
+ if (ptr)
+ hmm_set(ptr, 0, bytes);
+ return ptr;
+}
+
+ia_css_ptr hrt_isp_css_mm_calloc_cached(size_t bytes)
+{
+ ia_css_ptr ptr = hrt_isp_css_mm_alloc_cached(bytes);
+
+ if (ptr)
+ hmm_set(ptr, 0, bytes);
+ return ptr;
+}
diff --git a/drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.h b/drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.h
new file mode 100644
index 000000000000..818ecf90b1f5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.h
@@ -0,0 +1,57 @@
+/*
+ * Support for Medfield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+
+#ifndef _hive_isp_css_mm_hrt_h_
+#define _hive_isp_css_mm_hrt_h_
+
+#include <hmm/hmm.h>
+#include <hrt/hive_isp_css_custom_host_hrt.h>
+
+#define HRT_BUF_FLAG_CACHED BIT(0)
+
+enum hrt_userptr_type {
+ HRT_USR_PTR = 0,
+#ifdef CONFIG_ION
+ HRT_USR_ION,
+#endif
+};
+
+struct hrt_userbuffer_attr {
+ enum hrt_userptr_type type;
+ unsigned int pgnr;
+};
+
+void hrt_isp_css_mm_set_user_ptr(void __user *userptr,
+ unsigned int num_pages, enum hrt_userptr_type);
+
+/* Allocate memory, returns a virtual address */
+ia_css_ptr hrt_isp_css_mm_alloc(size_t bytes);
+ia_css_ptr hrt_isp_css_mm_alloc_user_ptr(size_t bytes,
+ const void __user *userptr,
+ unsigned int num_pages,
+ enum hrt_userptr_type,
+ bool cached);
+ia_css_ptr hrt_isp_css_mm_alloc_cached(size_t bytes);
+
+/* allocate memory and initialize with zeros,
+ returns a virtual address */
+ia_css_ptr hrt_isp_css_mm_calloc(size_t bytes);
+ia_css_ptr hrt_isp_css_mm_calloc_cached(size_t bytes);
+
+#endif /* _hive_isp_css_mm_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/ia_css.h b/drivers/staging/media/atomisp/pci/ia_css.h
new file mode 100644
index 000000000000..e44df6916d90
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css.h
@@ -0,0 +1,57 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_H_
+#define _IA_CSS_H_
+
+/* @file
+ * This file is the starting point of the CSS-API. It includes all CSS-API
+ * header files.
+ */
+
+#include "ia_css_3a.h"
+#include "ia_css_acc_types.h"
+#include "ia_css_buffer.h"
+#include "ia_css_control.h"
+#include "ia_css_device_access.h"
+#include "ia_css_dvs.h"
+#include "ia_css_env.h"
+#include "ia_css_err.h"
+#include "ia_css_event_public.h"
+#include "ia_css_firmware.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_input_port.h"
+#include "ia_css_irq.h"
+#include "ia_css_metadata.h"
+#include "ia_css_mipi.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_prbs.h"
+#include "ia_css_properties.h"
+#include "ia_css_stream_format.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_tpg.h"
+#include "ia_css_version.h"
+#include "ia_css_mmu.h"
+#include "ia_css_morph.h"
+#include "ia_css_shading.h"
+#include "ia_css_timer.h"
+
+/*
+ Please do not add code to this file. Public functionality is to be
+ exposed in a function/data type specific header file.
+ Please add to the appropriate header file or create a new one.
+ */
+
+#endif /* _IA_CSS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_3a.h b/drivers/staging/media/atomisp/pci/ia_css_3a.h
new file mode 100644
index 000000000000..a79941a2e0f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_3a.h
@@ -0,0 +1,189 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_3A_H
+#define __IA_CSS_3A_H
+
+/* @file
+ * This file contains types used for 3A statistics
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_err.h"
+#include "system_global.h"
+
+enum ia_css_3a_tables {
+ IA_CSS_S3A_TBL_HI,
+ IA_CSS_S3A_TBL_LO,
+ IA_CSS_RGBY_TBL,
+ IA_CSS_NUM_3A_TABLES
+};
+
+/* Structure that holds 3A statistics in the ISP internal
+ * format. Use ia_css_get_3a_statistics() to translate
+ * this to the format used on the host (3A library).
+ * */
+struct ia_css_isp_3a_statistics {
+ union {
+ struct {
+ ia_css_ptr s3a_tbl;
+ } dmem;
+ struct {
+ ia_css_ptr s3a_tbl_hi;
+ ia_css_ptr s3a_tbl_lo;
+ } vmem;
+ } data;
+ struct {
+ ia_css_ptr rgby_tbl;
+ } data_hmem;
+ u32 exp_id; /** exposure id, to match statistics to a frame,
+ see ia_css_event_public.h for more detail. */
+ u32 isp_config_id;/** Unique ID to track which config was actually applied to a particular frame */
+ ia_css_ptr data_ptr; /** pointer to base of all data */
+ u32 size; /** total size of all data */
+ u32 dmem_size;
+ u32 vmem_size; /** both lo and hi have this size */
+ u32 hmem_size;
+};
+
+#define SIZE_OF_DMEM_STRUCT \
+ (SIZE_OF_IA_CSS_PTR)
+
+#define SIZE_OF_VMEM_STRUCT \
+ (2 * SIZE_OF_IA_CSS_PTR)
+
+#define SIZE_OF_DATA_UNION \
+ (MAX(SIZE_OF_DMEM_STRUCT, SIZE_OF_VMEM_STRUCT))
+
+#define SIZE_OF_DATA_HMEM_STRUCT \
+ (SIZE_OF_IA_CSS_PTR)
+
+#define SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT \
+ (SIZE_OF_DATA_UNION + \
+ SIZE_OF_DATA_HMEM_STRUCT + \
+ sizeof(uint32_t) + \
+ sizeof(uint32_t) + \
+ SIZE_OF_IA_CSS_PTR + \
+ 4 * sizeof(uint32_t))
+
+/* Map with host-side pointers to ISP-format statistics.
+ * These pointers can either be copies of ISP data or memory mapped
+ * ISP pointers.
+ * All of the data behind these pointers is allocated contiguously, the
+ * allocated pointer is stored in the data_ptr field. The other fields
+ * point into this one block of data.
+ */
+struct ia_css_isp_3a_statistics_map {
+ void *data_ptr; /** Pointer to start of memory */
+ struct ia_css_3a_output *dmem_stats;
+ u16 *vmem_stats_hi;
+ u16 *vmem_stats_lo;
+ struct ia_css_bh_table *hmem_stats;
+ u32 size; /** total size in bytes of data_ptr */
+ u32 data_allocated; /** indicate whether data_ptr
+ was allocated or not. */
+};
+
+/* @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
+ * @param[out] host_stats Host buffer.
+ * @param[in] isp_stats ISP buffer.
+ * @return error value if temporary memory cannot be allocated
+ *
+ * This copies 3a statistics from an ISP pointer to a host pointer and then
+ * translates some of the statistics, details depend on which ISP binary is
+ * used.
+ * Always use this function, never copy the buffer directly.
+ */
+enum ia_css_err
+ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics *isp_stats);
+
+/* @brief Translate 3A statistics from ISP format to host format.
+ * @param[out] host_stats host-format statistics
+ * @param[in] isp_stats ISP-format statistics
+ * @return None
+ *
+ * This function translates statistics from the internal ISP-format to
+ * the host-format. This function does not include an additional copy
+ * step.
+ * */
+void
+ia_css_translate_3a_statistics(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics_map *isp_stats);
+
+/* Convenience functions for alloc/free of certain datatypes */
+
+/* @brief Allocate memory for the 3a statistics on the ISP
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated 3a statistics buffer on the ISP
+*/
+struct ia_css_isp_3a_statistics *
+ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
+
+/* @brief Free the 3a statistics memory on the isp
+ * @param[in] me Pointer to the 3a statistics buffer on the ISP.
+ * @return None
+*/
+void
+ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me);
+
+/* @brief Allocate memory for the 3a statistics on the host
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated 3a statistics buffer on the host
+*/
+struct ia_css_3a_statistics *
+ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
+
+/* @brief Free the 3a statistics memory on the host
+ * @param[in] me Pointer to the 3a statistics buffer on the host.
+ * @return None
+ */
+void
+ia_css_3a_statistics_free(struct ia_css_3a_statistics *me);
+
+/* @brief Allocate a 3a statistics map structure
+ * @param[in] isp_stats pointer to ISP 3a statistis struct
+ * @param[in] data_ptr host-side pointer to ISP 3a statistics.
+ * @return Pointer to the allocated 3a statistics map
+ *
+ * This function allocates the ISP 3a statistics map structure
+ * and uses the data_ptr as base pointer to set the appropriate
+ * pointers to all relevant subsets of the 3a statistics (dmem,
+ * vmem, hmem).
+ * If the data_ptr is NULL, this function will allocate the host-side
+ * memory. This information is stored in the struct and used in the
+ * ia_css_isp_3a_statistics_map_free() function to determine whether
+ * the memory should be freed or not.
+ * Note that this function does not allocate or map any ISP
+ * memory.
+*/
+struct ia_css_isp_3a_statistics_map *
+ia_css_isp_3a_statistics_map_allocate(
+ const struct ia_css_isp_3a_statistics *isp_stats,
+ void *data_ptr);
+
+/* @brief Free the 3a statistics map
+ * @param[in] me Pointer to the 3a statistics map
+ * @return None
+ *
+ * This function frees the map struct. If the data_ptr inside it
+ * was allocated inside ia_css_isp_3a_statistics_map_allocate(), it
+ * will be freed in this function. Otherwise it will not be freed.
+ */
+void
+ia_css_isp_3a_statistics_map_free(struct ia_css_isp_3a_statistics_map *me);
+
+#endif /* __IA_CSS_3A_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
new file mode 100644
index 000000000000..d281846eeba5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
@@ -0,0 +1,476 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_ACC_TYPES_H
+#define _IA_CSS_ACC_TYPES_H
+
+/* @file
+ * This file contains types used for acceleration
+ */
+
+#include <system_types.h> /* HAS_IRQ_MAP_VERSION_# */
+#include <type_support.h>
+#include <platform_support.h>
+#include <debug_global.h>
+
+#include "ia_css_types.h"
+#include "ia_css_frame_format.h"
+
+/* Should be included without the path.
+ However, that requires adding the path to numerous makefiles
+ that have nothing to do with isp parameters.
+ */
+#include "runtime/isp_param/interface/ia_css_isp_param_types.h"
+
+/* Types for the acceleration API.
+ * These should be moved to sh_css_internal.h once the old acceleration
+ * argument handling has been completed.
+ * After that, interpretation of these structures is no longer needed
+ * in the kernel and HAL.
+*/
+
+/* Type of acceleration.
+ */
+enum ia_css_acc_type {
+ IA_CSS_ACC_NONE, /** Normal binary */
+ IA_CSS_ACC_OUTPUT, /** Accelerator stage on output frame */
+ IA_CSS_ACC_VIEWFINDER, /** Accelerator stage on viewfinder frame */
+ IA_CSS_ACC_STANDALONE, /** Stand-alone acceleration */
+};
+
+/* Cells types
+ */
+enum ia_css_cell_type {
+ IA_CSS_SP0 = 0,
+ IA_CSS_SP1,
+ IA_CSS_ISP,
+ MAX_NUM_OF_CELLS
+};
+
+/* Firmware types.
+ */
+enum ia_css_fw_type {
+ ia_css_sp_firmware, /** Firmware for the SP */
+ ia_css_isp_firmware, /** Firmware for the ISP */
+ ia_css_bootloader_firmware, /** Firmware for the BootLoader */
+ ia_css_acc_firmware /** Firmware for accelrations */
+};
+
+struct ia_css_blob_descr;
+
+/* Blob descriptor.
+ * This structure describes an SP or ISP blob.
+ * It describes the test, data and bss sections as well as position in a
+ * firmware file.
+ * For convenience, it contains dynamic data after loading.
+ */
+struct ia_css_blob_info {
+ /** Static blob data */
+ u32 offset; /** Blob offset in fw file */
+ struct ia_css_isp_param_memory_offsets
+ memory_offsets; /** offset wrt hdr in bytes */
+ u32 prog_name_offset; /** offset wrt hdr in bytes */
+ u32 size; /** Size of blob */
+ u32 padding_size; /** total cummulative of bytes added due to section alignment */
+ u32 icache_source; /** Position of icache in blob */
+ u32 icache_size; /** Size of icache section */
+ u32 icache_padding;/** bytes added due to icache section alignment */
+ u32 text_source; /** Position of text in blob */
+ u32 text_size; /** Size of text section */
+ u32 text_padding; /** bytes added due to text section alignment */
+ u32 data_source; /** Position of data in blob */
+ u32 data_target; /** Start of data in SP dmem */
+ u32 data_size; /** Size of text section */
+ u32 data_padding; /** bytes added due to data section alignment */
+ u32 bss_target; /** Start position of bss in SP dmem */
+ u32 bss_size; /** Size of bss section */
+ /** Dynamic data filled by loader */
+ CSS_ALIGN(const void *code,
+ 8); /** Code section absolute pointer within fw, code = icache + text */
+ CSS_ALIGN(const void *data,
+ 8); /** Data section absolute pointer within fw, data = data + bss */
+};
+
+struct ia_css_binary_input_info {
+ u32 min_width;
+ u32 min_height;
+ u32 max_width;
+ u32 max_height;
+ u32 source; /* memory, sensor, variable */
+};
+
+struct ia_css_binary_output_info {
+ u32 min_width;
+ u32 min_height;
+ u32 max_width;
+ u32 max_height;
+ u32 num_chunks;
+ u32 variable_format;
+};
+
+struct ia_css_binary_internal_info {
+ u32 max_width;
+ u32 max_height;
+};
+
+struct ia_css_binary_bds_info {
+ u32 supported_bds_factors;
+};
+
+struct ia_css_binary_dvs_info {
+ u32 max_envelope_width;
+ u32 max_envelope_height;
+};
+
+struct ia_css_binary_vf_dec_info {
+ u32 is_variable;
+ u32 max_log_downscale;
+};
+
+struct ia_css_binary_s3a_info {
+ u32 s3atbl_use_dmem;
+ u32 fixed_s3a_deci_log;
+};
+
+/* DPC related binary info */
+struct ia_css_binary_dpc_info {
+ u32 bnr_lite; /** bnr lite enable flag */
+};
+
+struct ia_css_binary_iterator_info {
+ u32 num_stripes;
+ u32 row_stripes_height;
+ u32 row_stripes_overlap_lines;
+};
+
+struct ia_css_binary_address_info {
+ u32 isp_addresses; /* Address in ISP dmem */
+ u32 main_entry; /* Address of entry fct */
+ u32 in_frame; /* Address in ISP dmem */
+ u32 out_frame; /* Address in ISP dmem */
+ u32 in_data; /* Address in ISP dmem */
+ u32 out_data; /* Address in ISP dmem */
+ u32 sh_dma_cmd_ptr; /* In ISP dmem */
+};
+
+struct ia_css_binary_uds_info {
+ u16 bpp;
+ u16 use_bci;
+ u16 use_str;
+ u16 woix;
+ u16 woiy;
+ u16 extra_out_vecs;
+ u16 vectors_per_line_in;
+ u16 vectors_per_line_out;
+ u16 vectors_c_per_line_in;
+ u16 vectors_c_per_line_out;
+ u16 vmem_gdc_in_block_height_y;
+ u16 vmem_gdc_in_block_height_c;
+ /* uint16_t padding; */
+};
+
+struct ia_css_binary_pipeline_info {
+ u32 mode;
+ u32 isp_pipe_version;
+ u32 pipelining;
+ u32 c_subsampling;
+ u32 top_cropping;
+ u32 left_cropping;
+ u32 variable_resolution;
+};
+
+struct ia_css_binary_block_info {
+ u32 block_width;
+ u32 block_height;
+ u32 output_block_height;
+};
+
+/* Structure describing an ISP binary.
+ * It describes the capabilities of a binary, like the maximum resolution,
+ * support features, dma channels, uds features, etc.
+ * This part is to be used by the SP.
+ * Future refactoring should move binary properties to ia_css_binary_xinfo,
+ * thereby making the SP code more binary independent.
+ */
+struct ia_css_binary_info {
+ CSS_ALIGN(u32 id, 8); /* IA_CSS_BINARY_ID_* */
+ struct ia_css_binary_pipeline_info pipeline;
+ struct ia_css_binary_input_info input;
+ struct ia_css_binary_output_info output;
+ struct ia_css_binary_internal_info internal;
+ struct ia_css_binary_bds_info bds;
+ struct ia_css_binary_dvs_info dvs;
+ struct ia_css_binary_vf_dec_info vf_dec;
+ struct ia_css_binary_s3a_info s3a;
+ struct ia_css_binary_dpc_info dpc_bnr; /** DPC related binary info */
+ struct ia_css_binary_iterator_info iterator;
+ struct ia_css_binary_address_info addresses;
+ struct ia_css_binary_uds_info uds;
+ struct ia_css_binary_block_info block;
+ struct ia_css_isp_param_isp_segments mem_initializers;
+ /* MW: Packing (related) bools in an integer ?? */
+ struct {
+ /* ISP2401 */
+ u8 luma_only;
+ u8 input_yuv;
+ u8 input_raw;
+
+ u8 reduced_pipe;
+ u8 vf_veceven;
+ u8 dis;
+ u8 dvs_envelope;
+ u8 uds;
+ u8 dvs_6axis;
+ u8 block_output;
+ u8 streaming_dma;
+ u8 ds;
+ u8 bayer_fir_6db;
+ u8 raw_binning;
+ u8 continuous;
+ u8 s3a;
+ u8 fpnr;
+ u8 sc;
+ u8 macc;
+ u8 output;
+ u8 ref_frame;
+ u8 tnr;
+ u8 xnr;
+ u8 params;
+ u8 ca_gdc;
+ u8 isp_addresses;
+ u8 in_frame;
+ u8 out_frame;
+ u8 high_speed;
+ u8 dpc;
+ u8 padding[2];
+ } enable;
+ struct {
+ /* DMA channel ID: [0,...,HIVE_ISP_NUM_DMA_CHANNELS> */
+ u8 ref_y_channel;
+ u8 ref_c_channel;
+ u8 tnr_channel;
+ u8 tnr_out_channel;
+ u8 dvs_coords_channel;
+ u8 output_channel;
+ u8 c_channel;
+ u8 vfout_channel;
+ u8 vfout_c_channel;
+ u8 vfdec_bits_per_pixel;
+ u8 claimed_by_isp;
+ u8 padding[2];
+ } dma;
+};
+
+/* Structure describing an ISP binary.
+ * It describes the capabilities of a binary, like the maximum resolution,
+ * support features, dma channels, uds features, etc.
+ */
+struct ia_css_binary_xinfo {
+ /* Part that is of interest to the SP. */
+ struct ia_css_binary_info sp;
+
+ /* Rest of the binary info, only interesting to the host. */
+ enum ia_css_acc_type type;
+
+ CSS_ALIGN(s32 num_output_formats, 8);
+ enum ia_css_frame_format output_formats[IA_CSS_FRAME_FORMAT_NUM];
+
+ CSS_ALIGN(s32 num_vf_formats, 8); /** number of supported vf formats */
+ enum ia_css_frame_format
+ vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /** types of supported vf formats */
+ u8 num_output_pins;
+ ia_css_ptr xmem_addr;
+
+ CSS_ALIGN(const struct ia_css_blob_descr *blob, 8);
+ CSS_ALIGN(u32 blob_index, 8);
+ CSS_ALIGN(union ia_css_all_memory_offsets mem_offsets, 8);
+ CSS_ALIGN(struct ia_css_binary_xinfo *next, 8);
+};
+
+/* Structure describing the Bootloader (an ISP binary).
+ * It contains several address, either in ddr, isp_dmem or
+ * the entry function in icache.
+ */
+struct ia_css_bl_info {
+ u32 num_dma_cmds; /** Number of cmds sent by CSS */
+ u32 dma_cmd_list; /** Dma command list sent by CSS */
+ u32 sw_state; /** Polled from css */
+ /* Entry functions */
+ u32 bl_entry; /** The SP entry function */
+};
+
+/* Structure describing the SP binary.
+ * It contains several address, either in ddr, sp_dmem or
+ * the entry function in pmem.
+ */
+struct ia_css_sp_info {
+ u32 init_dmem_data; /** data sect config, stored to dmem */
+ u32 per_frame_data; /** Per frame data, stored to dmem */
+ u32 group; /** Per pipeline data, loaded by dma */
+ u32 output; /** SP output data, loaded by dmem */
+ u32 host_sp_queue; /** Host <-> SP queues */
+ u32 host_sp_com;/** Host <-> SP commands */
+ u32 isp_started; /** Polled from sensor thread, csim only */
+ u32 sw_state; /** Polled from css */
+ u32 host_sp_queues_initialized; /** Polled from the SP */
+ u32 sleep_mode; /** different mode to halt SP */
+ u32 invalidate_tlb; /** inform SP to invalidate mmu TLB */
+
+ /* ISP2400 */
+ u32 stop_copy_preview; /** suspend copy and preview pipe when capture */
+
+ u32 debug_buffer_ddr_address; /** inform SP the address
+ of DDR debug queue */
+ u32 perf_counter_input_system_error; /** input system perf
+ counter array */
+#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
+ u32 debug_wait; /** thread/pipe post mortem debug */
+ u32 debug_stage; /** thread/pipe post mortem debug */
+ u32 debug_stripe; /** thread/pipe post mortem debug */
+#endif
+ u32 threads_stack; /** sp thread's stack pointers */
+ u32 threads_stack_size; /** sp thread's stack sizes */
+ u32 curr_binary_id; /** current binary id */
+ u32 raw_copy_line_count; /** raw copy line counter */
+ u32 ddr_parameter_address; /** acc param ddrptr, sp dmem */
+ u32 ddr_parameter_size; /** acc param size, sp dmem */
+ /* Entry functions */
+ u32 sp_entry; /** The SP entry function */
+ u32 tagger_frames_addr; /** Base address of tagger state */
+};
+
+/* The following #if is there because this header file is also included
+ by SP and ISP code but they do not need this data and HIVECC has alignment
+ issue with the firmware struct/union's.
+ More permanent solution will be to refactor this include.
+*/
+
+/* Accelerator firmware information.
+ */
+struct ia_css_acc_info {
+ u32 per_frame_data; /** Dummy for now */
+};
+
+/* Firmware information.
+ */
+union ia_css_fw_union {
+ struct ia_css_binary_xinfo isp; /** ISP info */
+ struct ia_css_sp_info sp; /** SP info */
+ struct ia_css_bl_info bl; /** Bootloader info */
+ struct ia_css_acc_info acc; /** Accelerator info */
+};
+
+/* Firmware information.
+ */
+struct ia_css_fw_info {
+ size_t header_size; /** size of fw header */
+
+ CSS_ALIGN(u32 type, 8);
+ union ia_css_fw_union info; /** Binary info */
+ struct ia_css_blob_info blob; /** Blob info */
+ /* Dynamic part */
+ struct ia_css_fw_info *next;
+
+ CSS_ALIGN(u32 loaded, 8); /** Firmware has been loaded */
+ CSS_ALIGN(const u8 *isp_code, 8); /** ISP pointer to code */
+ /** Firmware handle between user space and kernel */
+ CSS_ALIGN(u32 handle, 8);
+ /** Sections to copy from/to ISP */
+ struct ia_css_isp_param_css_segments mem_initializers;
+ /** Initializer for local ISP memories */
+};
+
+struct ia_css_blob_descr {
+ const unsigned char *blob;
+ struct ia_css_fw_info header;
+ const char *name;
+ union ia_css_all_memory_offsets mem_offsets;
+};
+
+struct ia_css_acc_fw;
+
+/* Structure describing the SP binary of a stand-alone accelerator.
+ */
+struct ia_css_acc_sp {
+ void (*init)(struct ia_css_acc_fw *); /** init for crun */
+ u32 sp_prog_name_offset; /** program name offset wrt hdr in bytes */
+ u32 sp_blob_offset; /** blob offset wrt hdr in bytes */
+ void *entry; /** Address of sp entry point */
+ u32 *css_abort; /** SP dmem abort flag */
+ void *isp_code; /** SP dmem address holding xmem
+ address of isp code */
+ struct ia_css_fw_info fw; /** SP fw descriptor */
+ const u8 *code; /** ISP pointer of allocated SP code */
+};
+
+/* Acceleration firmware descriptor.
+ * This descriptor descibes either SP code (stand-alone), or
+ * ISP code (a separate pipeline stage).
+ */
+struct ia_css_acc_fw_hdr {
+ enum ia_css_acc_type type; /** Type of accelerator */
+ u32 isp_prog_name_offset; /** program name offset wrt
+ header in bytes */
+ u32 isp_blob_offset; /** blob offset wrt header
+ in bytes */
+ u32 isp_size; /** Size of isp blob */
+ const u8 *isp_code; /** ISP pointer to code */
+ struct ia_css_acc_sp sp; /** Standalone sp code */
+ /** Firmware handle between user space and kernel */
+ u32 handle;
+ struct ia_css_data parameters; /** Current SP parameters */
+};
+
+/* Firmware structure.
+ * This contains the header and actual blobs.
+ * For standalone, it contains SP and ISP blob.
+ * For a pipeline stage accelerator, it contains ISP code only.
+ * Since its members are variable size, their offsets are described in the
+ * header and computed using the access macros below.
+ */
+struct ia_css_acc_fw {
+ struct ia_css_acc_fw_hdr header; /** firmware header */
+ /*
+ int8_t isp_progname[]; **< ISP program name
+ int8_t sp_progname[]; **< SP program name, stand-alone only
+ uint8_t sp_code[]; **< SP blob, stand-alone only
+ uint8_t isp_code[]; **< ISP blob
+ */
+};
+
+/* Access macros for firmware */
+#define IA_CSS_ACC_OFFSET(t, f, n) ((t)((uint8_t *)(f) + (f->header.n)))
+#define IA_CSS_ACC_SP_PROG_NAME(f) IA_CSS_ACC_OFFSET(const char *, f, \
+ sp.sp_prog_name_offset)
+#define IA_CSS_ACC_ISP_PROG_NAME(f) IA_CSS_ACC_OFFSET(const char *, f, \
+ isp_prog_name_offset)
+#define IA_CSS_ACC_SP_CODE(f) IA_CSS_ACC_OFFSET(uint8_t *, f, \
+ sp.sp_blob_offset)
+#define IA_CSS_ACC_SP_DATA(f) (IA_CSS_ACC_SP_CODE(f) + \
+ (f)->header.sp.fw.blob.data_source)
+#define IA_CSS_ACC_ISP_CODE(f) IA_CSS_ACC_OFFSET(uint8_t*, f,\
+ isp_blob_offset)
+#define IA_CSS_ACC_ISP_SIZE(f) ((f)->header.isp_size)
+
+/* Binary name follows header immediately */
+#define IA_CSS_EXT_ISP_PROG_NAME(f) ((const char *)(f) + (f)->blob.prog_name_offset)
+#define IA_CSS_EXT_ISP_MEM_OFFSETS(f) \
+ ((const struct ia_css_memory_offsets *)((const char *)(f) + (f)->blob.mem_offsets))
+
+enum ia_css_sp_sleep_mode {
+ SP_DISABLE_SLEEP_MODE = 0,
+ SP_SLEEP_AFTER_FRAME = 1 << 0,
+ SP_SLEEP_AFTER_IRQ = 1 << 1
+};
+#endif /* _IA_CSS_ACC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_buffer.h b/drivers/staging/media/atomisp/pci/ia_css_buffer.h
new file mode 100644
index 000000000000..38e1f4791029
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_buffer.h
@@ -0,0 +1,85 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BUFFER_H
+#define __IA_CSS_BUFFER_H
+
+/* @file
+ * This file contains datastructures and types for buffers used in CSS
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_timer.h"
+
+/* Enumeration of buffer types. Buffers can be queued and de-queued
+ * to hand them over between IA and ISP.
+ */
+enum ia_css_buffer_type {
+ IA_CSS_BUFFER_TYPE_INVALID = -1,
+ IA_CSS_BUFFER_TYPE_3A_STATISTICS = 0,
+ IA_CSS_BUFFER_TYPE_DIS_STATISTICS,
+ IA_CSS_BUFFER_TYPE_LACE_STATISTICS,
+ IA_CSS_BUFFER_TYPE_INPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_CUSTOM_INPUT,
+ IA_CSS_BUFFER_TYPE_CUSTOM_OUTPUT,
+ IA_CSS_BUFFER_TYPE_METADATA,
+ IA_CSS_BUFFER_TYPE_PARAMETER_SET,
+ IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET,
+ IA_CSS_NUM_DYNAMIC_BUFFER_TYPE,
+ IA_CSS_NUM_BUFFER_TYPE
+};
+
+/* Driver API is not SP/ISP visible, 64 bit types not supported on hivecc */
+
+/* Buffer structure. This is a container structure that enables content
+ * independent buffer queues and access functions.
+ */
+struct ia_css_buffer {
+ enum ia_css_buffer_type type; /** Buffer type. */
+ unsigned int exp_id;
+ /** exposure id for this buffer; 0 = not available
+ see ia_css_event_public.h for more detail. */
+ union {
+ struct ia_css_isp_3a_statistics
+ *stats_3a; /** 3A statistics & optionally RGBY statistics. */
+ struct ia_css_isp_dvs_statistics *stats_dvs; /** DVS statistics. */
+ struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs; /** SKC DVS statistics. */
+ struct ia_css_frame *frame; /** Frame buffer. */
+ struct ia_css_acc_param *custom_data; /** Custom buffer. */
+ struct ia_css_metadata *metadata; /** Sensor metadata. */
+ } data; /** Buffer data pointer. */
+ u64 driver_cookie; /** cookie for the driver */
+ struct ia_css_time_meas
+ timing_data; /** timing data (readings from the timer) */
+ struct ia_css_clock_tick
+ isys_eof_clock_tick; /** ISYS's end of frame timer tick*/
+};
+
+/* @brief Dequeue param buffers from sp2host_queue
+ *
+ * @return None
+ *
+ * This function must be called at every driver interrupt handler to prevent
+ * overflow of sp2host_queue.
+ */
+void
+ia_css_dequeue_param_buffers(void);
+
+#endif /* __IA_CSS_BUFFER_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_control.h b/drivers/staging/media/atomisp/pci/ia_css_control.h
new file mode 100644
index 000000000000..248040b3ec07
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_control.h
@@ -0,0 +1,131 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CONTROL_H
+#define __IA_CSS_CONTROL_H
+
+/* @file
+ * This file contains functionality for starting and controlling CSS
+ */
+
+#include <type_support.h>
+#include <ia_css_env.h>
+#include <ia_css_firmware.h>
+#include <ia_css_irq.h>
+
+/* @brief Initialize the CSS API.
+ * @param[in] env Environment, provides functions to access the
+ * environment in which the CSS code runs. This is
+ * used for host side memory access and message
+ * printing. May not be NULL.
+ * @param[in] fw Firmware package containing the firmware for all
+ * predefined ISP binaries.
+ * if fw is NULL the firmware must be loaded before
+ * through a call of ia_css_load_firmware
+ * @param[in] l1_base Base index (isp2400)
+ * of the L1 page table. This is a physical
+ * address or index.
+ * @param[in] irq_type The type of interrupt to be used (edge or level)
+ * @return Returns IA_CSS_ERR_INTERNAL_ERROR in case of any
+ * errors and IA_CSS_SUCCESS otherwise.
+ *
+ * This function initializes the API which includes allocating and initializing
+ * internal data structures. This also interprets the firmware package. All
+ * contents of this firmware package are copied into local data structures, so
+ * the fw pointer could be freed after this function completes.
+ */
+enum ia_css_err ia_css_init(struct device *dev,
+ const struct ia_css_env *env,
+ const struct ia_css_fw *fw,
+ u32 l1_base,
+ enum ia_css_irq_type irq_type);
+
+/* @brief Un-initialize the CSS API.
+ * @return None
+ *
+ * This function deallocates all memory that has been allocated by the CSS API
+ * Exception: if you explicitly loaded firmware through ia_css_load_firmware
+ * you need to call ia_css_unload_firmware to deallocate the memory reserved
+ * for the firmware.
+ * After this function is called, no other CSS functions should be called
+ * with the exception of ia_css_init which will re-initialize the CSS code,
+ * ia_css_unload_firmware to unload the firmware or ia_css_load_firmware
+ * to load new firmware
+ */
+void
+ia_css_uninit(void);
+
+/* @brief Enable use of a separate queue for ISYS events.
+ *
+ * @param[in] enable: enable or disable use of separate ISYS event queues.
+ * @return error if called when SP is running.
+ *
+ * @deprecated{This is a temporary function that allows drivers to migrate to
+ * the use of the separate ISYS event queue. Once all drivers supports this, it
+ * will be made the default and this function will be removed.
+ * This function should only be called when the SP is not running, calling it
+ * when the SP is running will result in an error value being returned. }
+ */
+enum ia_css_err
+ia_css_enable_isys_event_queue(bool enable);
+
+/* @brief Test whether the ISP has started.
+ *
+ * @return Boolean flag true if the ISP has started or false otherwise.
+ *
+ * Temporary function to poll whether the ISP has been started. Once it has,
+ * the sensor can also be started. */
+bool
+ia_css_isp_has_started(void);
+
+/* @brief Test whether the SP has initialized.
+ *
+ * @return Boolean flag true if the SP has initialized or false otherwise.
+ *
+ * Temporary function to poll whether the SP has been initialized. Once it has,
+ * we can enqueue buffers. */
+bool
+ia_css_sp_has_initialized(void);
+
+/* @brief Test whether the SP has terminated.
+ *
+ * @return Boolean flag true if the SP has terminated or false otherwise.
+ *
+ * Temporary function to poll whether the SP has been terminated. Once it has,
+ * we can switch mode. */
+bool
+ia_css_sp_has_terminated(void);
+
+/* @brief start SP hardware
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * It will boot the SP hardware and start multi-threading infrastructure.
+ * All threads will be started and blocked by semaphore. This function should
+ * be called before any ia_css_stream_start().
+ */
+enum ia_css_err
+ia_css_start_sp(void);
+
+/* @brief stop SP hardware
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function will terminate all threads and shut down SP. It should be
+ * called after all ia_css_stream_stop().
+ */
+enum ia_css_err
+ia_css_stop_sp(void);
+
+#endif /* __IA_CSS_CONTROL_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_device_access.c b/drivers/staging/media/atomisp/pci/ia_css_device_access.c
new file mode 100644
index 000000000000..6ad8687cf08b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_device_access.c
@@ -0,0 +1,95 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_device_access.h"
+#include <type_support.h> /* for uint*, size_t */
+#include <system_types.h> /* for hrt_address */
+#include <ia_css_env.h> /* for ia_css_hw_access_env */
+#include <assert_support.h> /* for assert */
+
+static struct ia_css_hw_access_env my_env;
+
+void
+ia_css_device_access_init(const struct ia_css_hw_access_env *env)
+{
+ assert(env);
+
+ my_env = *env;
+}
+
+uint8_t
+ia_css_device_load_uint8(const hrt_address addr)
+{
+ return my_env.load_8(addr);
+}
+
+uint16_t
+ia_css_device_load_uint16(const hrt_address addr)
+{
+ return my_env.load_16(addr);
+}
+
+uint32_t
+ia_css_device_load_uint32(const hrt_address addr)
+{
+ return my_env.load_32(addr);
+}
+
+uint64_t
+ia_css_device_load_uint64(const hrt_address addr)
+{
+ assert(0);
+
+ (void)addr;
+ return 0;
+}
+
+void
+ia_css_device_store_uint8(const hrt_address addr, const uint8_t data)
+{
+ my_env.store_8(addr, data);
+}
+
+void
+ia_css_device_store_uint16(const hrt_address addr, const uint16_t data)
+{
+ my_env.store_16(addr, data);
+}
+
+void
+ia_css_device_store_uint32(const hrt_address addr, const uint32_t data)
+{
+ my_env.store_32(addr, data);
+}
+
+void
+ia_css_device_store_uint64(const hrt_address addr, const uint64_t data)
+{
+ assert(0);
+
+ (void)addr;
+ (void)data;
+}
+
+void
+ia_css_device_load(const hrt_address addr, void *data, const size_t size)
+{
+ my_env.load(addr, data, (uint32_t)size);
+}
+
+void
+ia_css_device_store(const hrt_address addr, const void *data, const size_t size)
+{
+ my_env.store(addr, data, (uint32_t)size);
+}
diff --git a/drivers/staging/media/atomisp/pci/ia_css_device_access.h b/drivers/staging/media/atomisp/pci/ia_css_device_access.h
new file mode 100644
index 000000000000..b2bf7d540b62
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_device_access.h
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_DEVICE_ACCESS_H
+#define _IA_CSS_DEVICE_ACCESS_H
+
+/* @file
+ * File containing internal functions for the CSS-API to access the CSS device.
+ */
+
+#include <type_support.h> /* for uint*, size_t */
+#include <system_types.h> /* for hrt_address */
+#include <ia_css_env.h> /* for ia_css_hw_access_env */
+
+void
+ia_css_device_access_init(const struct ia_css_hw_access_env *env);
+
+uint8_t
+ia_css_device_load_uint8(const hrt_address addr);
+
+uint16_t
+ia_css_device_load_uint16(const hrt_address addr);
+
+uint32_t
+ia_css_device_load_uint32(const hrt_address addr);
+
+uint64_t
+ia_css_device_load_uint64(const hrt_address addr);
+
+void
+ia_css_device_store_uint8(const hrt_address addr, const uint8_t data);
+
+void
+ia_css_device_store_uint16(const hrt_address addr, const uint16_t data);
+
+void
+ia_css_device_store_uint32(const hrt_address addr, const uint32_t data);
+
+void
+ia_css_device_store_uint64(const hrt_address addr, const uint64_t data);
+
+void
+ia_css_device_load(const hrt_address addr, void *data, const size_t size);
+
+void
+ia_css_device_store(const hrt_address addr, const void *data,
+ const size_t size);
+
+#endif /* _IA_CSS_DEVICE_ACCESS_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_dvs.h b/drivers/staging/media/atomisp/pci/ia_css_dvs.h
new file mode 100644
index 000000000000..e647f73c3bd6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_dvs.h
@@ -0,0 +1,297 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DVS_H
+#define __IA_CSS_DVS_H
+
+/* @file
+ * This file contains types for DVS statistics
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_err.h"
+#include "ia_css_stream_public.h"
+
+enum dvs_statistics_type {
+ DVS_STATISTICS,
+ DVS2_STATISTICS,
+ SKC_DVS_STATISTICS
+};
+
+/* Structure that holds DVS statistics in the ISP internal
+ * format. Use ia_css_get_dvs_statistics() to translate
+ * this to the format used on the host (DVS engine).
+ * */
+struct ia_css_isp_dvs_statistics {
+ ia_css_ptr hor_proj;
+ ia_css_ptr ver_proj;
+ u32 hor_size;
+ u32 ver_size;
+ u32 exp_id; /** see ia_css_event_public.h for more detail */
+ ia_css_ptr data_ptr; /* base pointer containing all memory */
+ u32 size; /* size of allocated memory in data_ptr */
+};
+
+/* Structure that holds SKC DVS statistics in the ISP internal
+ * format. Use ia_css_dvs_statistics_get() to translate this to
+ * the format used on the host.
+ * */
+struct ia_css_isp_skc_dvs_statistics;
+
+#define SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT \
+ ((3 * SIZE_OF_IA_CSS_PTR) + \
+ (4 * sizeof(uint32_t)))
+
+/* Map with host-side pointers to ISP-format statistics.
+ * These pointers can either be copies of ISP data or memory mapped
+ * ISP pointers.
+ * All of the data behind these pointers is allocatd contiguously, the
+ * allocated pointer is stored in the data_ptr field. The other fields
+ * point into this one block of data.
+ */
+struct ia_css_isp_dvs_statistics_map {
+ void *data_ptr;
+ s32 *hor_proj;
+ s32 *ver_proj;
+ u32 size; /* total size in bytes */
+ u32 data_allocated; /* indicate whether data was allocated */
+};
+
+union ia_css_dvs_statistics_isp {
+ struct ia_css_isp_dvs_statistics *p_dvs_statistics_isp;
+ struct ia_css_isp_skc_dvs_statistics *p_skc_dvs_statistics_isp;
+};
+
+union ia_css_dvs_statistics_host {
+ struct ia_css_dvs_statistics *p_dvs_statistics_host;
+ struct ia_css_dvs2_statistics *p_dvs2_statistics_host;
+ struct ia_css_skc_dvs_statistics *p_skc_dvs_statistics_host;
+};
+
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return error value if temporary memory cannot be allocated
+ *
+ * This may include a translation step as well depending
+ * on the ISP version.
+ * Always use this function, never copy the buffer directly.
+ * Note that this function uses the mem_load function from the CSS
+ * environment struct.
+ * In certain environments this may be slow. In those cases it is
+ * advised to map the ISP memory into a host-side pointer and use
+ * the ia_css_translate_dvs_statistics() function instead.
+ */
+enum ia_css_err
+ia_css_get_dvs_statistics(struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+/* @brief Translate DVS statistics from ISP format to host format
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return None
+ *
+ * This function translates the dvs statistics from the ISP-internal
+ * format to the format used by the DVS library on the CPU.
+ * This function takes a host-side pointer as input. This can either
+ * point to a copy of the data or be a memory mapped pointer to the
+ * ISP memory pages.
+ */
+void
+ia_css_translate_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+/* @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return error value if temporary memory cannot be allocated
+ *
+ * This may include a translation step as well depending
+ * on the ISP version.
+ * Always use this function, never copy the buffer directly.
+ * Note that this function uses the mem_load function from the CSS
+ * environment struct.
+ * In certain environments this may be slow. In those cases it is
+ * advised to map the ISP memory into a host-side pointer and use
+ * the ia_css_translate_dvs2_statistics() function instead.
+ */
+enum ia_css_err
+ia_css_get_dvs2_statistics(struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+/* @brief Translate DVS2 statistics from ISP format to host format
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return None
+ *
+ * This function translates the dvs2 statistics from the ISP-internal
+ * format to the format used by the DVS2 library on the CPU.
+ * This function takes a host-side pointer as input. This can either
+ * point to a copy of the data or be a memory mapped pointer to the
+ * ISP memory pages.
+ */
+void
+ia_css_translate_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
+ * @param[in] type - DVS statistics type
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return None
+ */
+void
+ia_css_dvs_statistics_get(enum dvs_statistics_type type,
+ union ia_css_dvs_statistics_host *host_stats,
+ const union ia_css_dvs_statistics_isp *isp_stats);
+
+/* @brief Allocate the DVS statistics memory on the ISP
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS statistics buffer on the ISP
+*/
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS statistics memory on the ISP
+ * @param[in] me Pointer to the DVS statistics buffer on the ISP.
+ * @return None
+*/
+void
+ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me);
+
+/* @brief Allocate the DVS 2.0 statistics memory
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS statistics buffer on the ISP
+*/
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS 2.0 statistics memory
+ * @param[in] me Pointer to the DVS statistics buffer on the ISP.
+ * @return None
+*/
+void
+ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me);
+
+/* @brief Allocate the DVS statistics memory on the host
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS statistics buffer on the host
+*/
+struct ia_css_dvs_statistics *
+ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS statistics memory on the host
+ * @param[in] me Pointer to the DVS statistics buffer on the host.
+ * @return None
+*/
+void
+ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me);
+
+/* @brief Allocate the DVS coefficients memory
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS coefficients buffer
+*/
+struct ia_css_dvs_coefficients *
+ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS coefficients memory
+ * @param[in] me Pointer to the DVS coefficients buffer.
+ * @return None
+ */
+void
+ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me);
+
+/* @brief Allocate the DVS 2.0 statistics memory on the host
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS 2.0 statistics buffer on the host
+ */
+struct ia_css_dvs2_statistics *
+ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS 2.0 statistics memory
+ * @param[in] me Pointer to the DVS 2.0 statistics buffer on the host.
+ * @return None
+*/
+void
+ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me);
+
+/* @brief Allocate the DVS 2.0 coefficients memory
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS 2.0 coefficients buffer
+*/
+struct ia_css_dvs2_coefficients *
+ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS 2.0 coefficients memory
+ * @param[in] me Pointer to the DVS 2.0 coefficients buffer.
+ * @return None
+*/
+void
+ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me);
+
+/* @brief Allocate the DVS 2.0 6-axis config memory
+ * @param[in] stream The stream.
+ * @return Pointer to the allocated DVS 6axis configuration buffer
+*/
+struct ia_css_dvs_6axis_config *
+ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream);
+
+/* @brief Free the DVS 2.0 6-axis config memory
+ * @param[in] dvs_6axis_config Pointer to the DVS 6axis configuration buffer
+ * @return None
+ */
+void
+ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config);
+
+/* @brief Allocate a dvs statistics map structure
+ * @param[in] isp_stats pointer to ISP dvs statistis struct
+ * @param[in] data_ptr host-side pointer to ISP dvs statistics.
+ * @return Pointer to the allocated dvs statistics map
+ *
+ * This function allocates the ISP dvs statistics map structure
+ * and uses the data_ptr as base pointer to set the appropriate
+ * pointers to all relevant subsets of the dvs statistics (dmem,
+ * vmem, hmem).
+ * If the data_ptr is NULL, this function will allocate the host-side
+ * memory. This information is stored in the struct and used in the
+ * ia_css_isp_dvs_statistics_map_free() function to determine whether
+ * the memory should be freed or not.
+ * Note that this function does not allocate or map any ISP
+ * memory.
+*/
+struct ia_css_isp_dvs_statistics_map *
+ia_css_isp_dvs_statistics_map_allocate(
+ const struct ia_css_isp_dvs_statistics *isp_stats,
+ void *data_ptr);
+
+/* @brief Free the dvs statistics map
+ * @param[in] me Pointer to the dvs statistics map
+ * @return None
+ *
+ * This function frees the map struct. If the data_ptr inside it
+ * was allocated inside ia_css_isp_dvs_statistics_map_allocate(), it
+ * will be freed in this function. Otherwise it will not be freed.
+ */
+void
+ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me);
+
+/* @brief Allocate memory for the SKC DVS statistics on the ISP
+ * @return Pointer to the allocated ACC DVS statistics buffer on the ISP
+*/
+struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void);
+
+#endif /* __IA_CSS_DVS_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_env.h b/drivers/staging/media/atomisp/pci/ia_css_env.h
new file mode 100644
index 000000000000..8b0218ee658d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_env.h
@@ -0,0 +1,94 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ENV_H
+#define __IA_CSS_ENV_H
+
+#include <type_support.h>
+#include <stdarg.h> /* va_list */
+#include "ia_css_types.h"
+#include "ia_css_acc_types.h"
+
+/* @file
+ * This file contains prototypes for functions that need to be provided to the
+ * CSS-API host-code by the environment in which the CSS-API code runs.
+ */
+
+/* Memory allocation attributes, for use in ia_css_css_mem_env. */
+enum ia_css_mem_attr {
+ IA_CSS_MEM_ATTR_CACHED = 1 << 0,
+ IA_CSS_MEM_ATTR_ZEROED = 1 << 1,
+ IA_CSS_MEM_ATTR_PAGEALIGN = 1 << 2,
+ IA_CSS_MEM_ATTR_CONTIGUOUS = 1 << 3,
+};
+
+/* Environment with function pointers for local IA memory allocation.
+ * This provides the CSS code with environment specific functionality
+ * for memory allocation of small local buffers such as local data structures.
+ * This is never expected to allocate more than one page of memory (4K bytes).
+ */
+struct ia_css_cpu_mem_env {
+ void (*flush)(struct ia_css_acc_fw *fw);
+ /** Flush function to flush the cache for given accelerator. */
+};
+
+/* Environment with function pointers to access the CSS hardware. This includes
+ * registers and local memories.
+ */
+struct ia_css_hw_access_env {
+ void (*store_8)(hrt_address addr, uint8_t data);
+ /** Store an 8 bit value into an address in the CSS HW address space.
+ The address must be an 8 bit aligned address. */
+ void (*store_16)(hrt_address addr, uint16_t data);
+ /** Store a 16 bit value into an address in the CSS HW address space.
+ The address must be a 16 bit aligned address. */
+ void (*store_32)(hrt_address addr, uint32_t data);
+ /** Store a 32 bit value into an address in the CSS HW address space.
+ The address must be a 32 bit aligned address. */
+ uint8_t (*load_8)(hrt_address addr);
+ /** Load an 8 bit value from an address in the CSS HW address
+ space. The address must be an 8 bit aligned address. */
+ uint16_t (*load_16)(hrt_address addr);
+ /** Load a 16 bit value from an address in the CSS HW address
+ space. The address must be a 16 bit aligned address. */
+ uint32_t (*load_32)(hrt_address addr);
+ /** Load a 32 bit value from an address in the CSS HW address
+ space. The address must be a 32 bit aligned address. */
+ void (*store)(hrt_address addr, const void *data, uint32_t bytes);
+ /** Store a number of bytes into a byte-aligned address in the CSS HW address space. */
+ void (*load)(hrt_address addr, void *data, uint32_t bytes);
+ /** Load a number of bytes from a byte-aligned address in the CSS HW address space. */
+};
+
+/* Environment with function pointers to print error and debug messages.
+ */
+struct ia_css_print_env {
+ int (*debug_print)(const char *fmt, va_list args);
+ /** Print a debug message. */
+ int (*error_print)(const char *fmt, va_list args);
+ /** Print an error message.*/
+};
+
+/* Environment structure. This includes function pointers to access several
+ * features provided by the environment in which the CSS API is used.
+ * This is used to run the camera IP in multiple platforms such as Linux,
+ * Windows and several simulation environments.
+ */
+struct ia_css_env {
+ struct ia_css_cpu_mem_env cpu_mem_env; /** local flush. */
+ struct ia_css_hw_access_env hw_access_env; /** CSS HW access functions */
+ struct ia_css_print_env print_env; /** Message printing env. */
+};
+
+#endif /* __IA_CSS_ENV_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_err.h b/drivers/staging/media/atomisp/pci/ia_css_err.h
new file mode 100644
index 000000000000..375952a7782e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_err.h
@@ -0,0 +1,63 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ERR_H
+#define __IA_CSS_ERR_H
+
+/* @file
+ * This file contains possible return values for most
+ * functions in the CSS-API.
+ */
+
+/* Errors, these values are used as the return value for most
+ * functions in this API.
+ */
+enum ia_css_err {
+ IA_CSS_SUCCESS,
+ IA_CSS_ERR_INTERNAL_ERROR,
+ IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY,
+ IA_CSS_ERR_INVALID_ARGUMENTS,
+ IA_CSS_ERR_SYSTEM_NOT_IDLE,
+ IA_CSS_ERR_MODE_HAS_NO_VIEWFINDER,
+ IA_CSS_ERR_QUEUE_IS_FULL,
+ IA_CSS_ERR_QUEUE_IS_EMPTY,
+ IA_CSS_ERR_RESOURCE_NOT_AVAILABLE,
+ IA_CSS_ERR_RESOURCE_LIST_TO_SMALL,
+ IA_CSS_ERR_RESOURCE_ITEMS_STILL_ALLOCATED,
+ IA_CSS_ERR_RESOURCE_EXHAUSTED,
+ IA_CSS_ERR_RESOURCE_ALREADY_ALLOCATED,
+ IA_CSS_ERR_VERSION_MISMATCH,
+ IA_CSS_ERR_NOT_SUPPORTED
+};
+
+/* FW warnings. This enum contains a value for each warning that
+ * the SP FW could indicate potential performance issue
+ */
+enum ia_css_fw_warning {
+ IA_CSS_FW_WARNING_NONE,
+ IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the ISys queue.
+ This warning can be avoided by de-queuing ISYS buffers more timely. */
+ IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the PSys queue.
+ This warning can be avoided by de-queuing PSYS buffers more timely. */
+ IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /* < CSS system delayed because of insufficient available buffers.
+ This warning can be avoided by unlocking locked frame-buffers more timely. */
+ IA_CSS_FW_WARNING_EXP_ID_LOCKED, /* < Exposure ID skipped because the frame associated to it was still locked.
+ This warning can be avoided by unlocking locked frame-buffers more timely. */
+ IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /* < Exposure ID cannot be found on the circular buffer.
+ This warning can be avoided by unlocking locked frame-buffers more timely. */
+ IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /* < Frame and param pair mismatched in tagger.
+ This warning can be avoided by providing a param set for each frame. */
+};
+
+#endif /* __IA_CSS_ERR_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_event_public.h b/drivers/staging/media/atomisp/pci/ia_css_event_public.h
new file mode 100644
index 000000000000..5c0470fa4a74
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_event_public.h
@@ -0,0 +1,196 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EVENT_PUBLIC_H
+#define __IA_CSS_EVENT_PUBLIC_H
+
+/* @file
+ * This file contains CSS-API events functionality
+ */
+
+#include <type_support.h> /* uint8_t */
+#include <ia_css_err.h> /* ia_css_err */
+#include <ia_css_types.h> /* ia_css_pipe */
+#include <ia_css_timer.h> /* ia_css_timer */
+
+/* The event type, distinguishes the kind of events that
+ * can are generated by the CSS system.
+ *
+ * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
+ * 1) "enum ia_css_event_type" (ia_css_event_public.h)
+ * 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
+ * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
+ * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
+ */
+enum ia_css_event_type {
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE = 1 << 0,
+ /** Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE = 1 << 1,
+ /** Second output frame ready. */
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE = 1 << 2,
+ /** Viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE = 1 << 3,
+ /** Second viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE = 1 << 4,
+ /** Indication that 3A statistics are available. */
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE = 1 << 5,
+ /** Indication that DIS statistics are available. */
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE = 1 << 6,
+ /** Pipeline Done event, sent after last pipeline stage. */
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED = 1 << 7,
+ /** Frame tagged. */
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE = 1 << 8,
+ /** Input frame ready. */
+ IA_CSS_EVENT_TYPE_METADATA_DONE = 1 << 9,
+ /** Metadata ready. */
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE = 1 << 10,
+ /** Indication that LACE statistics are available. */
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE = 1 << 11,
+ /** Extension stage complete. */
+ IA_CSS_EVENT_TYPE_TIMER = 1 << 12,
+ /** Timer event for measuring the SP side latencies. It contains the
+ 32-bit timer value from the SP */
+ IA_CSS_EVENT_TYPE_PORT_EOF = 1 << 13,
+ /** End Of Frame event, sent when in buffered sensor mode. */
+ IA_CSS_EVENT_TYPE_FW_WARNING = 1 << 14,
+ /** Performance warning encounter by FW */
+ IA_CSS_EVENT_TYPE_FW_ASSERT = 1 << 15,
+ /** Assertion hit by FW */
+};
+
+#define IA_CSS_EVENT_TYPE_NONE 0
+
+/* IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
+ * The other events (such as PORT_EOF) cannot be enabled/disabled
+ * and are hence excluded from this macro.
+ */
+#define IA_CSS_EVENT_TYPE_ALL \
+ (IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE | \
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE | \
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE | \
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED | \
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_METADATA_DONE | \
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE | \
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE)
+
+/* The event struct, container for the event type and its related values.
+ * Depending on the event type, either pipe or port will be filled.
+ * Pipeline related events (like buffer/frame events) will return a valid and filled pipe handle.
+ * For non pipeline related events (but i.e. stream specific, like EOF event), the port will be
+ * filled.
+ */
+struct ia_css_event {
+ struct ia_css_pipe *pipe;
+ /** Pipe handle on which event happened, NULL for non pipe related
+ events. */
+ enum ia_css_event_type type;
+ /** Type of Event, always valid/filled. */
+ u8 port;
+ /** Port number for EOF event (not valid for other events). */
+ u8 exp_id;
+ /** Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
+ The exposure ID is unique only within a logical stream and it is
+ only generated on systems that have an input system (such as 2400
+ and 2401).
+ Most outputs produced by the CSS are tagged with an exposure ID.
+ This allows users of the CSS API to keep track of which buffer
+ was generated from which sensor output frame. This includes:
+ EOF event, output frames, 3A statistics, DVS statistics and
+ sensor metadata.
+ Exposure IDs start at IA_CSS_MIN_EXPOSURE_ID, increment by one
+ until IA_CSS_MAX_EXPOSURE_ID is reached, after that they wrap
+ around to IA_CSS_MIN_EXPOSURE_ID again.
+ Note that in case frames are dropped, this will not be reflected
+ in the exposure IDs. Therefor applications should not use this
+ to detect frame drops. */
+ u32 fw_handle;
+ /** Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
+ events). */
+ enum ia_css_fw_warning fw_warning;
+ /** Firmware warning code, only for WARNING events. */
+ u8 fw_assert_module_id;
+ /** Firmware module id, only for ASSERT events, should be logged by driver. */
+ u16 fw_assert_line_no;
+ /** Firmware line number, only for ASSERT events, should be logged by driver. */
+ clock_value_t timer_data;
+ /** For storing the full 32-bit of the timer value. Valid only for TIMER
+ event */
+ u8 timer_code;
+ /** For storing the code of the TIMER event. Valid only for
+ TIMER event */
+ u8 timer_subcode;
+ /** For storing the subcode of the TIMER event. Valid only
+ for TIMER event */
+};
+
+/* @brief Dequeue a PSYS event from the CSS system.
+ *
+ * @param[out] event Pointer to the event struct which will be filled by
+ * this function if an event is available.
+ * @return IA_CSS_ERR_QUEUE_IS_EMPTY if no events are
+ * available or
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * This function dequeues an event from the PSYS event queue. The queue is
+ * between the Host CPU and the CSS system. This function can be
+ * called after an interrupt has been generated that signalled that a new event
+ * was available and can be used in a polling-like situation where the NO_EVENT
+ * return value is used to determine whether an event was available or not.
+ */
+enum ia_css_err
+ia_css_dequeue_psys_event(struct ia_css_event *event);
+
+/* @brief Dequeue an event from the CSS system.
+ *
+ * @param[out] event Pointer to the event struct which will be filled by
+ * this function if an event is available.
+ * @return IA_CSS_ERR_QUEUE_IS_EMPTY if no events are
+ * available or
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * deprecated{Use ia_css_dequeue_psys_event instead}.
+ * Unless the isys event queue is explicitly enabled, this function will
+ * dequeue both isys (EOF) and psys events (all others).
+ */
+enum ia_css_err
+ia_css_dequeue_event(struct ia_css_event *event);
+
+/* @brief Dequeue an ISYS event from the CSS system.
+ *
+ * @param[out] event Pointer to the event struct which will be filled by
+ * this function if an event is available.
+ * @return IA_CSS_ERR_QUEUE_IS_EMPTY if no events are
+ * available or
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * This function dequeues an event from the ISYS event queue. The queue is
+ * between host and the CSS system.
+ * Unlike the ia_css_dequeue_event() function, this function can be called
+ * directly from an interrupt service routine (ISR) and it is safe to call
+ * this function in parallel with other CSS API functions (but only one
+ * call to this function should be in flight at any point in time).
+ *
+ * The reason for having the ISYS events separate is to prevent them from
+ * incurring additional latency due to locks being held by other CSS API
+ * functions.
+ */
+enum ia_css_err
+ia_css_dequeue_isys_event(struct ia_css_event *event);
+
+#endif /* __IA_CSS_EVENT_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_firmware.h b/drivers/staging/media/atomisp/pci/ia_css_firmware.h
new file mode 100644
index 000000000000..50817162703b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_firmware.h
@@ -0,0 +1,64 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FIRMWARE_H
+#define __IA_CSS_FIRMWARE_H
+
+/* @file
+ * This file contains firmware loading/unloading support functionality
+ */
+
+#include "ia_css_err.h"
+#include "ia_css_env.h"
+
+/* CSS firmware package structure.
+ */
+struct ia_css_fw {
+ void *data; /** pointer to the firmware data */
+ unsigned int bytes; /** length in bytes of firmware data */
+};
+
+/* @brief Loads the firmware
+ * @param[in] env Environment, provides functions to access the
+ * environment in which the CSS code runs. This is
+ * used for host side memory access and message
+ * printing.
+ * @param[in] fw Firmware package containing the firmware for all
+ * predefined ISP binaries.
+ * @return Returns IA_CSS_ERR_INTERNAL_ERROR in case of any
+ * errors and IA_CSS_SUCCESS otherwise.
+ *
+ * This function interprets the firmware package. All
+ * contents of this firmware package are copied into local data structures, so
+ * the fw pointer could be freed after this function completes.
+ *
+ * Rationale for this function is that it can be called before ia_css_init, and thus
+ * speeds up ia_css_init (ia_css_init is called each time a stream is created but the
+ * firmware only needs to be loaded once).
+ */
+enum ia_css_err
+ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
+ const struct ia_css_fw *fw);
+
+/* @brief Unloads the firmware
+ * @return None
+ *
+ * This function unloads the firmware loaded by ia_css_load_firmware.
+ * It is pointless to call this function if no firmware is loaded,
+ * but it won't harm. Use this to deallocate all memory associated with the firmware.
+ */
+void
+ia_css_unload_firmware(void);
+
+#endif /* __IA_CSS_FIRMWARE_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_frac.h b/drivers/staging/media/atomisp/pci/ia_css_frac.h
new file mode 100644
index 000000000000..59720370cb8e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_frac.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_FRAC_H
+#define _IA_CSS_FRAC_H
+
+/* @file
+ * This file contains typedefs used for fractional numbers
+ */
+
+#include <type_support.h>
+
+/* Fixed point types.
+ * NOTE: the 16 bit fixed point types actually occupy 32 bits
+ * to save on extension operations in the ISP code.
+ */
+/* Unsigned fixed point value, 0 integer bits, 16 fractional bits */
+typedef u32 ia_css_u0_16;
+/* Unsigned fixed point value, 5 integer bits, 11 fractional bits */
+typedef u32 ia_css_u5_11;
+/* Unsigned fixed point value, 8 integer bits, 8 fractional bits */
+typedef u32 ia_css_u8_8;
+/* Signed fixed point value, 0 integer bits, 15 fractional bits */
+typedef s32 ia_css_s0_15;
+
+#endif /* _IA_CSS_FRAC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_frame_format.h b/drivers/staging/media/atomisp/pci/ia_css_frame_format.h
new file mode 100644
index 000000000000..2f177edc36ac
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_frame_format.h
@@ -0,0 +1,101 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FRAME_FORMAT_H
+#define __IA_CSS_FRAME_FORMAT_H
+
+/* @file
+ * This file contains information about formats supported in the ISP
+ */
+
+/* Frame formats, some of these come from fourcc.org, others are
+ better explained by video4linux2. The NV11 seems to be described only
+ on MSDN pages, but even those seem to be gone now.
+ Frames can come in many forms, the main categories are RAW, RGB and YUV
+ (or YCbCr). The YUV frames come in 4 flavors, determined by how the U and V
+ values are subsampled:
+ 1. YUV420: hor = 2, ver = 2
+ 2. YUV411: hor = 4, ver = 1
+ 3. YUV422: hor = 2, ver = 1
+ 4. YUV444: hor = 1, ver = 1
+
+ Warning: not all frame formats are supported as input or output to/from ISP.
+ Some of these formats are therefore not defined in the output table module.
+ Modifications in below frame format enum can require modifications in the
+ output table module.
+
+ Warning2: Throughout the CSS code assumptions are made on the order
+ of formats in this enumeration type, or some sort of copy is maintained.
+ The following files are identified:
+ - FileSupport.h
+ - css/isp/kernels/fc/fc_1.0/formats.isp.c
+ - css/isp/kernels/output/output_1.0/output_table.isp.c
+ - css/isp/kernels/output/sc_output_1.0/formats.hive.c
+ - css/isp/modes/interface/isp_formats.isp.h
+ - css/bxt_sandbox/psyspoc/interface/ia_css_pg_info.h
+ - css/bxt_sandbox/psysapi/data/interface/ia_css_program_group_data.h
+ - css/bxt_sandbox/isysapi/interface/ia_css_isysapi_fw_types.h
+*/
+enum ia_css_frame_format {
+ IA_CSS_FRAME_FORMAT_NV11 = 0, /** 12 bit YUV 411, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12, /** 12 bit YUV 420, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12_16, /** 16 bit YUV 420, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12_TILEY, /** 12 bit YUV 420, Intel proprietary tiled format, TileY */
+ IA_CSS_FRAME_FORMAT_NV16, /** 16 bit YUV 422, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV21, /** 12 bit YUV 420, Y, VU plane */
+ IA_CSS_FRAME_FORMAT_NV61, /** 16 bit YUV 422, Y, VU plane */
+ IA_CSS_FRAME_FORMAT_YV12, /** 12 bit YUV 420, Y, V, U plane */
+ IA_CSS_FRAME_FORMAT_YV16, /** 16 bit YUV 422, Y, V, U plane */
+ IA_CSS_FRAME_FORMAT_YUV420, /** 12 bit YUV 420, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV420_16, /** yuv420, 16 bits per subpixel */
+ IA_CSS_FRAME_FORMAT_YUV422, /** 16 bit YUV 422, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV422_16, /** yuv422, 16 bits per subpixel */
+ IA_CSS_FRAME_FORMAT_UYVY, /** 16 bit YUV 422, UYVY interleaved */
+ IA_CSS_FRAME_FORMAT_YUYV, /** 16 bit YUV 422, YUYV interleaved */
+ IA_CSS_FRAME_FORMAT_YUV444, /** 24 bit YUV 444, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV_LINE, /** Internal format, 2 y lines followed
+ by a uvinterleaved line */
+ IA_CSS_FRAME_FORMAT_RAW, /** RAW, 1 plane */
+ IA_CSS_FRAME_FORMAT_RGB565, /** 16 bit RGB, 1 plane. Each 3 sub
+ pixels are packed into one 16 bit
+ value, 5 bits for R, 6 bits for G
+ and 5 bits for B. */
+ IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /** 24 bit RGB, 3 planes */
+ IA_CSS_FRAME_FORMAT_RGBA888, /** 32 bit RGBA, 1 plane, A=Alpha
+ (alpha is unused) */
+ IA_CSS_FRAME_FORMAT_QPLANE6, /** Internal, for advanced ISP */
+ IA_CSS_FRAME_FORMAT_BINARY_8, /** byte stream, used for jpeg. For
+ frames of this type, we set the
+ height to 1 and the width to the
+ number of allocated bytes. */
+ IA_CSS_FRAME_FORMAT_MIPI, /** MIPI frame, 1 plane */
+ IA_CSS_FRAME_FORMAT_RAW_PACKED, /** RAW, 1 plane, packed */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8, /** 8 bit per Y/U/V.
+ Y odd line; UYVY
+ interleaved even line */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /** Legacy YUV420. UY odd
+ line; VY even line */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 /** 10 bit per Y/U/V. Y odd
+ line; UYVY interleaved
+ even line */
+};
+
+/* NOTE: IA_CSS_FRAME_FORMAT_NUM was purposely defined outside of enum type ia_css_frame_format, */
+/* because of issues this would cause with the Clockwork code checking tool. */
+#define IA_CSS_FRAME_FORMAT_NUM (IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 + 1)
+
+/* Number of valid output frame formats for ISP **/
+#define IA_CSS_FRAME_OUT_FORMAT_NUM (IA_CSS_FRAME_FORMAT_RGBA888 + 1)
+
+#endif /* __IA_CSS_FRAME_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_frame_public.h b/drivers/staging/media/atomisp/pci/ia_css_frame_public.h
new file mode 100644
index 000000000000..69e9143e5418
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_frame_public.h
@@ -0,0 +1,353 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FRAME_PUBLIC_H
+#define __IA_CSS_FRAME_PUBLIC_H
+
+/* @file
+ * This file contains structs to describe various frame-formats supported by the ISP.
+ */
+
+#include <type_support.h>
+#include "ia_css_err.h"
+#include "ia_css_types.h"
+#include "ia_css_frame_format.h"
+#include "ia_css_buffer.h"
+
+/* For RAW input, the bayer order needs to be specified separately. There
+ * are 4 possible orders. The name is constructed by taking the first two
+ * colors on the first line and the first two colors from the second line.
+ */
+enum ia_css_bayer_order {
+ IA_CSS_BAYER_ORDER_GRBG, /** GRGRGRGRGR .. BGBGBGBGBG */
+ IA_CSS_BAYER_ORDER_RGGB, /** RGRGRGRGRG .. GBGBGBGBGB */
+ IA_CSS_BAYER_ORDER_BGGR, /** BGBGBGBGBG .. GRGRGRGRGR */
+ IA_CSS_BAYER_ORDER_GBRG, /** GBGBGBGBGB .. RGRGRGRGRG */
+};
+
+#define IA_CSS_BAYER_ORDER_NUM (IA_CSS_BAYER_ORDER_GBRG + 1)
+
+/* Frame plane structure. This describes one plane in an image
+ * frame buffer.
+ */
+struct ia_css_frame_plane {
+ unsigned int height; /** height of a plane in lines */
+ unsigned int width; /** width of a line, in DMA elements, note that
+ for RGB565 the three subpixels are stored in
+ one element. For all other formats this is
+ the number of subpixels per line. */
+ unsigned int stride; /** stride of a line in bytes */
+ unsigned int offset; /** offset in bytes to start of frame data.
+ offset is wrt data field in ia_css_frame */
+};
+
+/* Binary "plane". This is used to story binary streams such as jpeg
+ * images. This is not actually a real plane.
+ */
+struct ia_css_frame_binary_plane {
+ unsigned int size; /** number of bytes in the stream */
+ struct ia_css_frame_plane data; /** plane */
+};
+
+/* Container for planar YUV frames. This contains 3 planes.
+ */
+struct ia_css_frame_yuv_planes {
+ struct ia_css_frame_plane y; /** Y plane */
+ struct ia_css_frame_plane u; /** U plane */
+ struct ia_css_frame_plane v; /** V plane */
+};
+
+/* Container for semi-planar YUV frames.
+ */
+struct ia_css_frame_nv_planes {
+ struct ia_css_frame_plane y; /** Y plane */
+ struct ia_css_frame_plane uv; /** UV plane */
+};
+
+/* Container for planar RGB frames. Each color has its own plane.
+ */
+struct ia_css_frame_rgb_planes {
+ struct ia_css_frame_plane r; /** Red plane */
+ struct ia_css_frame_plane g; /** Green plane */
+ struct ia_css_frame_plane b; /** Blue plane */
+};
+
+/* Container for 6-plane frames. These frames are used internally
+ * in the advanced ISP only.
+ */
+struct ia_css_frame_plane6_planes {
+ struct ia_css_frame_plane r; /** Red plane */
+ struct ia_css_frame_plane r_at_b; /** Red at blue plane */
+ struct ia_css_frame_plane gr; /** Red-green plane */
+ struct ia_css_frame_plane gb; /** Blue-green plane */
+ struct ia_css_frame_plane b; /** Blue plane */
+ struct ia_css_frame_plane b_at_r; /** Blue at red plane */
+};
+
+/* Crop info struct - stores the lines to be cropped in isp */
+struct ia_css_crop_info {
+ /* the final start column and start line
+ * sum of lines to be cropped + bayer offset
+ */
+ unsigned int start_column;
+ unsigned int start_line;
+};
+
+/* Frame info struct. This describes the contents of an image frame buffer.
+ */
+struct ia_css_frame_info {
+ struct ia_css_resolution res; /** Frame resolution (valid data) */
+ unsigned int padded_width; /** stride of line in memory (in pixels) */
+ enum ia_css_frame_format format; /** format of the frame data */
+ unsigned int raw_bit_depth; /** number of valid bits per pixel,
+ only valid for RAW bayer frames */
+ enum ia_css_bayer_order raw_bayer_order; /** bayer order, only valid
+ for RAW bayer frames */
+ /* the params below are computed based on bayer_order
+ * we can remove the raw_bayer_order if it is redundant
+ * keeping it for now as bxt and fpn code seem to use it
+ */
+ struct ia_css_crop_info crop_info;
+};
+
+#define IA_CSS_BINARY_DEFAULT_FRAME_INFO \
+(struct ia_css_frame_info) { \
+ .format = IA_CSS_FRAME_FORMAT_NUM, \
+ .raw_bayer_order = IA_CSS_BAYER_ORDER_NUM, \
+}
+
+/**
+ * Specifies the DVS loop delay in "frame periods"
+ */
+enum ia_css_frame_delay {
+ IA_CSS_FRAME_DELAY_0, /** Frame delay = 0 */
+ IA_CSS_FRAME_DELAY_1, /** Frame delay = 1 */
+ IA_CSS_FRAME_DELAY_2 /** Frame delay = 2 */
+};
+
+enum ia_css_frame_flash_state {
+ IA_CSS_FRAME_FLASH_STATE_NONE,
+ IA_CSS_FRAME_FLASH_STATE_PARTIAL,
+ IA_CSS_FRAME_FLASH_STATE_FULL
+};
+
+/* Frame structure. This structure describes an image buffer or frame.
+ * This is the main structure used for all input and output images.
+ */
+struct ia_css_frame {
+ struct ia_css_frame_info info; /** info struct describing the frame */
+ ia_css_ptr data; /** pointer to start of image data */
+ unsigned int data_bytes; /** size of image data in bytes */
+ /* LA: move this to ia_css_buffer */
+ /*
+ * -1 if data address is static during life time of pipeline
+ * >=0 if data address can change per pipeline/frame iteration
+ * index to dynamic data: ia_css_frame_in, ia_css_frame_out
+ * ia_css_frame_out_vf
+ * index to host-sp queue id: queue_0, queue_1 etc.
+ */
+ int dynamic_queue_id;
+ /*
+ * if it is dynamic frame, buf_type indicates which buffer type it
+ * should use for event generation. we have this because in vf_pp
+ * binary, we use output port, but we expect VF_OUTPUT_DONE event
+ */
+ enum ia_css_buffer_type buf_type;
+ enum ia_css_frame_flash_state flash_state;
+ unsigned int exp_id;
+ /** exposure id, see ia_css_event_public.h for more detail */
+ u32 isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
+ bool valid; /** First video output frame is not valid */
+ bool contiguous; /** memory is allocated physically contiguously */
+ union {
+ unsigned int _initialisation_dummy;
+ struct ia_css_frame_plane raw;
+ struct ia_css_frame_plane rgb;
+ struct ia_css_frame_rgb_planes planar_rgb;
+ struct ia_css_frame_plane yuyv;
+ struct ia_css_frame_yuv_planes yuv;
+ struct ia_css_frame_nv_planes nv;
+ struct ia_css_frame_plane6_planes plane6;
+ struct ia_css_frame_binary_plane binary;
+ } planes; /** frame planes, select the right one based on
+ info.format */
+};
+
+#define DEFAULT_FRAME \
+(struct ia_css_frame) { \
+ .info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .dynamic_queue_id = SH_CSS_INVALID_QUEUE_ID, \
+ .buf_type = IA_CSS_BUFFER_TYPE_INVALID, \
+ .flash_state = IA_CSS_FRAME_FLASH_STATE_NONE, \
+}
+
+/* @brief Fill a frame with zeros
+ *
+ * @param frame The frame.
+ * @return None
+ *
+ * Fill a frame with pixel values of zero
+ */
+void ia_css_frame_zero(struct ia_css_frame *frame);
+
+/* @brief Allocate a CSS frame structure
+ *
+ * @param frame The allocated frame.
+ * @param width The width (in pixels) of the frame.
+ * @param height The height (in lines) of the frame.
+ * @param format The frame format.
+ * @param stride The padded stride, in pixels.
+ * @param raw_bit_depth The raw bit depth, in bits.
+ * @return The error code.
+ *
+ * Allocate a CSS frame structure. The memory for the frame data will be
+ * allocated in the CSS address space.
+ */
+enum ia_css_err
+ia_css_frame_allocate(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int stride,
+ unsigned int raw_bit_depth);
+
+/* @brief Allocate a CSS frame structure using a frame info structure.
+ *
+ * @param frame The allocated frame.
+ * @param[in] info The frame info structure.
+ * @return The error code.
+ *
+ * Allocate a frame using the resolution and format from a frame info struct.
+ * This is a convenience function, implemented on top of
+ * ia_css_frame_allocate().
+ */
+enum ia_css_err
+ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info);
+/* @brief Free a CSS frame structure.
+ *
+ * @param[in] frame Pointer to the frame.
+ * @return None
+ *
+ * Free a CSS frame structure. This will free both the frame structure
+ * and the pixel data pointer contained within the frame structure.
+ */
+void
+ia_css_frame_free(struct ia_css_frame *frame);
+
+/* @brief Allocate a contiguous CSS frame structure
+ *
+ * @param frame The allocated frame.
+ * @param width The width (in pixels) of the frame.
+ * @param height The height (in lines) of the frame.
+ * @param format The frame format.
+ * @param stride The padded stride, in pixels.
+ * @param raw_bit_depth The raw bit depth, in bits.
+ * @return The error code.
+ *
+ * Contiguous frame allocation, only for FPGA display driver which needs
+ * physically contiguous memory.
+ * Deprecated.
+ */
+enum ia_css_err
+ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int stride,
+ unsigned int raw_bit_depth);
+
+/* @brief Allocate a contiguous CSS frame from a frame info structure.
+ *
+ * @param frame The allocated frame.
+ * @param[in] info The frame info structure.
+ * @return The error code.
+ *
+ * Allocate a frame using the resolution and format from a frame info struct.
+ * This is a convenience function, implemented on top of
+ * ia_css_frame_allocate_contiguous().
+ * Only for FPGA display driver which needs physically contiguous memory.
+ * Deprecated.
+ */
+enum ia_css_err
+ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info);
+
+/* @brief Allocate a CSS frame structure using a frame info structure.
+ *
+ * @param frame The allocated frame.
+ * @param[in] info The frame info structure.
+ * @return The error code.
+ *
+ * Allocate an empty CSS frame with no data buffer using the parameters
+ * in the frame info.
+ */
+enum ia_css_err
+ia_css_frame_create_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info);
+
+/* @brief Set a mapped data buffer to a CSS frame
+ *
+ * @param[in] frame Valid CSS frame pointer
+ * @param[in] mapped_data Mapped data buffer to be assigned to the CSS frame
+ * @param[in] data_size_bytes Size of the mapped_data in bytes
+ * @return The error code.
+ *
+ * Sets a mapped data buffer to this frame. This function can be called multiple
+ * times with different buffers or NULL to reset the data pointer. This API
+ * would not try free the mapped_data and its the callers responsiblity to
+ * free the mapped_data buffer. However if ia_css_frame_free() is called and
+ * the frame had a valid data buffer, it would be freed along with the frame.
+ */
+enum ia_css_err
+ia_css_frame_set_data(struct ia_css_frame *frame,
+ const ia_css_ptr mapped_data,
+ size_t data_size_bytes);
+
+/* @brief Map an existing frame data pointer to a CSS frame.
+ *
+ * @param frame Pointer to the frame to be initialized
+ * @param[in] info The frame info.
+ * @param[in] data Pointer to the allocated frame data.
+ * @param[in] attribute Attributes to be passed to mmgr_mmap.
+ * @param[in] context Pointer to the a context to be passed to mmgr_mmap.
+ * @return The allocated frame structure.
+ *
+ * This function maps a pre-allocated pointer into a CSS frame. This can be
+ * used when an upper software layer is responsible for allocating the frame
+ * data and it wants to share that frame pointer with the CSS code.
+ * This function will fill the CSS frame structure just like
+ * ia_css_frame_allocate() does, but instead of allocating the memory, it will
+ * map the pre-allocated memory into the CSS address space.
+ */
+enum ia_css_err
+ia_css_frame_map(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info,
+ const void __user *data,
+ u16 attribute,
+ void *context);
+
+/* @brief Unmap a CSS frame structure.
+ *
+ * @param[in] frame Pointer to the CSS frame.
+ * @return None
+ *
+ * This function unmaps the frame data pointer within a CSS frame and
+ * then frees the CSS frame structure. Use this for frame pointers created
+ * using ia_css_frame_map().
+ */
+void
+ia_css_frame_unmap(struct ia_css_frame *frame);
+
+#endif /* __IA_CSS_FRAME_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_host_data.h b/drivers/staging/media/atomisp/pci/ia_css_host_data.h
new file mode 100644
index 000000000000..bc82e97d24cb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_host_data.h
@@ -0,0 +1,45 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_HOST_DATA_H
+#define __SH_CSS_HOST_DATA_H
+
+#include <ia_css_types.h> /* ia_css_pipe */
+
+/**
+ * @brief Allocate structure ia_css_host_data.
+ *
+ * @param[in] size Size of the requested host data
+ *
+ * @return
+ * - NULL, can't allocate requested size
+ * - pointer to structure, field address points to host data with size bytes
+ */
+struct ia_css_host_data *
+ia_css_host_data_allocate(size_t size);
+
+/**
+ * @brief Free structure ia_css_host_data.
+ *
+ * @param[in] me Pointer to structure, if a NULL is passed functions
+ * returns without error. Otherwise a valid pointer to
+ * structure must be passed and a related memory
+ * is freed.
+ *
+ * @return
+ */
+void ia_css_host_data_free(struct ia_css_host_data *me);
+
+#endif /* __SH_CSS_HOST_DATA_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_input_port.h b/drivers/staging/media/atomisp/pci/ia_css_input_port.h
new file mode 100644
index 000000000000..ad9ca5449369
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_input_port.h
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* For MIPI_PORT0_ID to MIPI_PORT2_ID */
+#include "system_global.h"
+
+#ifndef __IA_CSS_INPUT_PORT_H
+#define __IA_CSS_INPUT_PORT_H
+
+/* @file
+ * This file contains information about the possible input ports for CSS
+ */
+
+/* Backward compatible for CSS API 2.0 only
+ * TO BE REMOVED when all drivers move to CSS API 2.1
+ */
+#define IA_CSS_CSI2_PORT_4LANE MIPI_PORT0_ID
+#define IA_CSS_CSI2_PORT_1LANE MIPI_PORT1_ID
+#define IA_CSS_CSI2_PORT_2LANE MIPI_PORT2_ID
+
+/* The CSI2 interface supports 2 types of compression or can
+ * be run without compression.
+ */
+enum ia_css_csi2_compression_type {
+ IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /** No compression */
+ IA_CSS_CSI2_COMPRESSION_TYPE_1, /** Compression scheme 1 */
+ IA_CSS_CSI2_COMPRESSION_TYPE_2 /** Compression scheme 2 */
+};
+
+struct ia_css_csi2_compression {
+ enum ia_css_csi2_compression_type type;
+ /** Compression used */
+ unsigned int compressed_bits_per_pixel;
+ /** Compressed bits per pixel (only when compression is enabled) */
+ unsigned int uncompressed_bits_per_pixel;
+ /** Uncompressed bits per pixel (only when compression is enabled) */
+};
+
+/* Input port structure.
+ */
+struct ia_css_input_port {
+ enum mipi_port_id port; /** Physical CSI-2 port */
+ unsigned int num_lanes; /** Number of lanes used (4-lane port only) */
+ unsigned int timeout; /** Timeout value */
+ unsigned int rxcount; /** Register value, should include all lanes */
+ struct ia_css_csi2_compression compression; /** Compression used */
+};
+
+#endif /* __IA_CSS_INPUT_PORT_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_irq.h b/drivers/staging/media/atomisp/pci/ia_css_irq.h
new file mode 100644
index 000000000000..7716373553e0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_irq.h
@@ -0,0 +1,235 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_IRQ_H
+#define __IA_CSS_IRQ_H
+
+/* @file
+ * This file contains information for Interrupts/IRQs from CSS
+ */
+
+#include "ia_css_err.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_input_port.h"
+
+/* Interrupt types, these enumerate all supported interrupt types.
+ */
+enum ia_css_irq_type {
+ IA_CSS_IRQ_TYPE_EDGE, /** Edge (level) sensitive interrupt */
+ IA_CSS_IRQ_TYPE_PULSE /** Pulse-shaped interrupt */
+};
+
+/* Interrupt request type.
+ * When the CSS hardware generates an interrupt, a function in this API
+ * needs to be called to retrieve information about the interrupt.
+ * This interrupt type is part of this information and indicates what
+ * type of information the interrupt signals.
+ *
+ * Note that one interrupt can carry multiple interrupt types. For
+ * example: the online video ISP will generate only 2 interrupts, one to
+ * signal that the statistics (3a and DIS) are ready and one to signal
+ * that all output frames are done (output and viewfinder).
+ *
+ * DEPRECATED, this interface is not portable it should only define user
+ * (SW) interrupts
+ */
+enum ia_css_irq_info {
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR = 1 << 0,
+ /** the css receiver has encountered an error */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW = 1 << 1,
+ /** the FIFO in the csi receiver has overflown */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF = 1 << 2,
+ /** the css receiver received the start of frame */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF = 1 << 3,
+ /** the css receiver received the end of frame */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL = 1 << 4,
+ /** the css receiver received the start of line */
+ IA_CSS_IRQ_INFO_PSYS_EVENTS_READY = 1 << 5,
+ /** One or more events are available in the PSYS event queue */
+ IA_CSS_IRQ_INFO_EVENTS_READY = IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
+ /** deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
+ * same functionality.} */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL = 1 << 6,
+ /** the css receiver received the end of line */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = 1 << 7,
+ /** the css receiver received a change in side band signals */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0 = 1 << 8,
+ /** generic short packets (0) */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1 = 1 << 9,
+ /** generic short packets (1) */
+ IA_CSS_IRQ_INFO_IF_PRIM_ERROR = 1 << 10,
+ /** the primary input formatter (A) has encountered an error */
+ IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR = 1 << 11,
+ /** the primary input formatter (B) has encountered an error */
+ IA_CSS_IRQ_INFO_IF_SEC_ERROR = 1 << 12,
+ /** the secondary input formatter has encountered an error */
+ IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR = 1 << 13,
+ /** the stream-to-memory device has encountered an error */
+ IA_CSS_IRQ_INFO_SW_0 = 1 << 14,
+ /** software interrupt 0 */
+ IA_CSS_IRQ_INFO_SW_1 = 1 << 15,
+ /** software interrupt 1 */
+ IA_CSS_IRQ_INFO_SW_2 = 1 << 16,
+ /** software interrupt 2 */
+ IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = 1 << 17,
+ /** ISP binary statistics are ready */
+ IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = 1 << 18,
+ /** the input system in in error */
+ IA_CSS_IRQ_INFO_IF_ERROR = 1 << 19,
+ /** the input formatter in in error */
+ IA_CSS_IRQ_INFO_DMA_ERROR = 1 << 20,
+ /** the dma in in error */
+ IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = 1 << 21,
+ /** end-of-frame events are ready in the isys_event queue */
+};
+
+/* CSS receiver error types. Whenever the CSS receiver has encountered
+ * an error, this enumeration is used to indicate which errors have occurred.
+ *
+ * Note that multiple error flags can be enabled at once and that this is in
+ * fact common (whenever an error occurs, it usually results in multiple
+ * errors).
+ *
+ * DEPRECATED: This interface is not portable, different systems have
+ * different receiver types, or possibly none in case of tests systems.
+ */
+enum ia_css_rx_irq_info {
+ IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = 1U << 0, /** buffer overrun */
+ IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /** entering sleep mode */
+ IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = 1U << 2, /** exited sleep mode */
+ IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = 1U << 3, /** ECC corrected */
+ IA_CSS_RX_IRQ_INFO_ERR_SOT = 1U << 4,
+ /** Start of transmission */
+ IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = 1U << 5, /** SOT sync (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_CONTROL = 1U << 6, /** Control (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = 1U << 7, /** Double ECC */
+ IA_CSS_RX_IRQ_INFO_ERR_CRC = 1U << 8, /** CRC error */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = 1U << 9, /** Unknown ID */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = 1U << 10,/** Frame sync error */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = 1U << 11,/** Frame data error */
+ IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/** Timeout occurred */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1U << 13,/** Unknown escape seq. */
+ IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = 1U << 14,/** Line Sync error */
+ IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT = 1U << 15,
+};
+
+/* Interrupt info structure. This structure contains information about an
+ * interrupt. This needs to be used after an interrupt is received on the IA
+ * to perform the correct action.
+ */
+struct ia_css_irq {
+ enum ia_css_irq_info type; /** Interrupt type. */
+ unsigned int sw_irq_0_val; /** In case of SW interrupt 0, value. */
+ unsigned int sw_irq_1_val; /** In case of SW interrupt 1, value. */
+ unsigned int sw_irq_2_val; /** In case of SW interrupt 2, value. */
+ struct ia_css_pipe *pipe;
+ /** The image pipe that generated the interrupt. */
+};
+
+/* @brief Obtain interrupt information.
+ *
+ * @param[out] info Pointer to the interrupt info. The interrupt
+ * information wil be written to this info.
+ * @return If an error is encountered during the interrupt info
+ * and no interrupt could be translated successfully, this
+ * will return IA_CSS_INTERNAL_ERROR. Otherwise
+ * IA_CSS_SUCCESS.
+ *
+ * This function is expected to be executed after an interrupt has been sent
+ * to the IA from the CSS. This function returns information about the interrupt
+ * which is needed by the IA code to properly handle the interrupt. This
+ * information includes the image pipe, buffer type etc.
+ */
+enum ia_css_err
+ia_css_irq_translate(unsigned int *info);
+
+/* @brief Get CSI receiver error info.
+ *
+ * @param[out] irq_bits Pointer to the interrupt bits. The interrupt
+ * bits will be written this info.
+ * This will be the error bits that are enabled in the CSI
+ * receiver error register.
+ * @return None
+ *
+ * This function should be used whenever a CSI receiver error interrupt is
+ * generated. It provides the detailed information (bits) on the exact error
+ * that occurred.
+ *
+ *@deprecated {this function is DEPRECATED since it only works on CSI port 1.
+ * Use the function below instead and specify the appropriate port.}
+ */
+void
+ia_css_rx_get_irq_info(unsigned int *irq_bits);
+
+/* @brief Get CSI receiver error info.
+ *
+ * @param[in] port Input port identifier.
+ * @param[out] irq_bits Pointer to the interrupt bits. The interrupt
+ * bits will be written this info.
+ * This will be the error bits that are enabled in the CSI
+ * receiver error register.
+ * @return None
+ *
+ * This function should be used whenever a CSI receiver error interrupt is
+ * generated. It provides the detailed information (bits) on the exact error
+ * that occurred.
+ */
+void
+ia_css_rx_port_get_irq_info(enum mipi_port_id port, unsigned int *irq_bits);
+
+/* @brief Clear CSI receiver error info.
+ *
+ * @param[in] irq_bits The bits that should be cleared from the CSI receiver
+ * interrupt bits register.
+ * @return None
+ *
+ * This function should be called after ia_css_rx_get_irq_info has been called
+ * and the error bits have been interpreted. It is advised to use the return
+ * value of that function as the argument to this function to make sure no new
+ * error bits get overwritten.
+ *
+ * @deprecated{this function is DEPRECATED since it only works on CSI port 1.
+ * Use the function below instead and specify the appropriate port.}
+ */
+void
+ia_css_rx_clear_irq_info(unsigned int irq_bits);
+
+/* @brief Clear CSI receiver error info.
+ *
+ * @param[in] port Input port identifier.
+ * @param[in] irq_bits The bits that should be cleared from the CSI receiver
+ * interrupt bits register.
+ * @return None
+ *
+ * This function should be called after ia_css_rx_get_irq_info has been called
+ * and the error bits have been interpreted. It is advised to use the return
+ * value of that function as the argument to this function to make sure no new
+ * error bits get overwritten.
+ */
+void
+ia_css_rx_port_clear_irq_info(enum mipi_port_id port, unsigned int irq_bits);
+
+/* @brief Enable or disable specific interrupts.
+ *
+ * @param[in] type The interrupt type that will be enabled/disabled.
+ * @param[in] enable enable or disable.
+ * @return Returns IA_CSS_INTERNAL_ERROR if this interrupt
+ * type cannot be enabled/disabled which is true for
+ * CSS internal interrupts. Otherwise returns
+ * IA_CSS_SUCCESS.
+ */
+enum ia_css_err
+ia_css_irq_enable(enum ia_css_irq_info type, bool enable);
+
+#endif /* __IA_CSS_IRQ_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h
new file mode 100644
index 000000000000..6545efd24cbe
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h
@@ -0,0 +1,183 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifdef IA_CSS_INCLUDE_CONFIGURATIONS
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/qplane/qplane_2/ia_css_qplane.host.h"
+#include "isp/kernels/raw/raw_1.0/ia_css_raw.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+
+/* ISP2401 */
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/vf/vf_1.0/ia_css_vf.host.h"
+#include "isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h"
+#include "isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h"
+#endif /* IA_CSS_INCLUDE_CONFIGURATIONS */
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_CONFIG_H
+#define _IA_CSS_ISP_CONFIG_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_configuration_ids {
+ IA_CSS_ITERATOR_CONFIG_ID,
+ IA_CSS_COPY_OUTPUT_CONFIG_ID,
+ IA_CSS_CROP_CONFIG_ID,
+ IA_CSS_FPN_CONFIG_ID,
+ IA_CSS_DVS_CONFIG_ID,
+ IA_CSS_QPLANE_CONFIG_ID,
+ IA_CSS_OUTPUT0_CONFIG_ID,
+ IA_CSS_OUTPUT1_CONFIG_ID,
+ IA_CSS_OUTPUT_CONFIG_ID,
+ IA_CSS_RAW_CONFIG_ID,
+ IA_CSS_TNR_CONFIG_ID,
+ IA_CSS_REF_CONFIG_ID,
+ IA_CSS_VF_CONFIG_ID,
+
+ /* ISP 2401 */
+ IA_CSS_SC_CONFIG_ID,
+
+ IA_CSS_NUM_CONFIGURATION_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_config_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter iterator;
+ struct ia_css_isp_parameter copy_output;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter dvs;
+ struct ia_css_isp_parameter qplane;
+ struct ia_css_isp_parameter output0;
+ struct ia_css_isp_parameter output1;
+ struct ia_css_isp_parameter output;
+
+ /* ISP2401 */
+ struct ia_css_isp_parameter sc;
+
+ struct ia_css_isp_parameter raw;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ struct ia_css_isp_parameter vf;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_CONFIGURATIONS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_iterator(
+ const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_copy_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_crop(
+ const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_fpn(
+ const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_dvs(
+ const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_qplane(
+ const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output0(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output1(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_raw(
+ const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_tnr(
+ const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_ref(
+ const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_vf(
+ const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem);
+
+#endif /* IA_CSS_INCLUDE_CONFIGURATION */
+
+#endif /* _IA_CSS_ISP_CONFIG_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_params.h b/drivers/staging/media/atomisp/pci/ia_css_isp_params.h
new file mode 100644
index 000000000000..b8b3c48492ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_params.h
@@ -0,0 +1,394 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_PARAM_H
+#define _IA_CSS_ISP_PARAM_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_parameter_ids {
+ IA_CSS_AA_ID,
+ IA_CSS_ANR_ID,
+ IA_CSS_ANR2_ID,
+ IA_CSS_BH_ID,
+ IA_CSS_CNR_ID,
+ IA_CSS_CROP_ID,
+ IA_CSS_CSC_ID,
+ IA_CSS_DP_ID,
+ IA_CSS_BNR_ID,
+ IA_CSS_DE_ID,
+ IA_CSS_ECD_ID,
+ IA_CSS_FORMATS_ID,
+ IA_CSS_FPN_ID,
+ IA_CSS_GC_ID,
+ IA_CSS_CE_ID,
+ IA_CSS_YUV2RGB_ID,
+ IA_CSS_RGB2YUV_ID,
+ IA_CSS_R_GAMMA_ID,
+ IA_CSS_G_GAMMA_ID,
+ IA_CSS_B_GAMMA_ID,
+ IA_CSS_UDS_ID,
+ IA_CSS_RAA_ID,
+ IA_CSS_S3A_ID,
+ IA_CSS_OB_ID,
+ IA_CSS_OUTPUT_ID,
+ IA_CSS_SC_ID,
+ IA_CSS_BDS_ID,
+ IA_CSS_TNR_ID,
+ IA_CSS_MACC_ID,
+ IA_CSS_SDIS_HORICOEF_ID,
+ IA_CSS_SDIS_VERTCOEF_ID,
+ IA_CSS_SDIS_HORIPROJ_ID,
+ IA_CSS_SDIS_VERTPROJ_ID,
+ IA_CSS_SDIS2_HORICOEF_ID,
+ IA_CSS_SDIS2_VERTCOEF_ID,
+ IA_CSS_SDIS2_HORIPROJ_ID,
+ IA_CSS_SDIS2_VERTPROJ_ID,
+ IA_CSS_WB_ID,
+ IA_CSS_NR_ID,
+ IA_CSS_YEE_ID,
+ IA_CSS_YNR_ID,
+ IA_CSS_FC_ID,
+ IA_CSS_CTC_ID,
+ IA_CSS_XNR_TABLE_ID,
+ IA_CSS_XNR_ID,
+ IA_CSS_XNR3_ID,
+ IA_CSS_NUM_PARAMETER_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter anr;
+ struct ia_css_isp_parameter bh;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter csc;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter bnr;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ecd;
+ struct ia_css_isp_parameter formats;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter ce;
+ struct ia_css_isp_parameter yuv2rgb;
+ struct ia_css_isp_parameter rgb2yuv;
+ struct ia_css_isp_parameter uds;
+ struct ia_css_isp_parameter raa;
+ struct ia_css_isp_parameter s3a;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter output;
+ struct ia_css_isp_parameter sc;
+ struct ia_css_isp_parameter bds;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter macc;
+ struct ia_css_isp_parameter sdis_horiproj;
+ struct ia_css_isp_parameter sdis_vertproj;
+ struct ia_css_isp_parameter sdis2_horiproj;
+ struct ia_css_isp_parameter sdis2_vertproj;
+ struct ia_css_isp_parameter wb;
+ struct ia_css_isp_parameter nr;
+ struct ia_css_isp_parameter yee;
+ struct ia_css_isp_parameter ynr;
+ struct ia_css_isp_parameter fc;
+ struct ia_css_isp_parameter ctc;
+ struct ia_css_isp_parameter xnr;
+ struct ia_css_isp_parameter xnr3;
+ struct ia_css_isp_parameter get;
+ struct ia_css_isp_parameter put;
+ } dmem;
+ struct {
+ struct ia_css_isp_parameter anr2;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter sdis_horicoef;
+ struct ia_css_isp_parameter sdis_vertcoef;
+ struct ia_css_isp_parameter sdis2_horicoef;
+ struct ia_css_isp_parameter sdis2_vertcoef;
+
+ /* ISP2401 */
+ struct ia_css_isp_parameter xnr3;
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter bh;
+ } hmem0;
+ struct {
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter g_gamma;
+ struct ia_css_isp_parameter xnr_table;
+ } vamem1;
+ struct {
+ struct ia_css_isp_parameter r_gamma;
+ struct ia_css_isp_parameter ctc;
+ } vamem0;
+ struct {
+ struct ia_css_isp_parameter b_gamma;
+ } vamem2;
+};
+
+#if defined(IA_CSS_INCLUDE_PARAMETERS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+struct ia_css_pipeline_stage; /* forward declaration */
+
+extern void (*ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config);
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+
+#endif /* IA_CSS_INCLUDE_PARAMETER */
+#endif /* _IA_CSS_ISP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_states.h b/drivers/staging/media/atomisp/pci/ia_css_isp_states.h
new file mode 100644
index 000000000000..cc9cdcd0e2be
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_states.h
@@ -0,0 +1,73 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define IA_CSS_INCLUDE_STATES
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_STATE_H
+#define _IA_CSS_ISP_STATE_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_state_ids {
+ IA_CSS_AA_STATE_ID,
+ IA_CSS_CNR_STATE_ID,
+ IA_CSS_CNR2_STATE_ID,
+ IA_CSS_DP_STATE_ID,
+ IA_CSS_DE_STATE_ID,
+ IA_CSS_TNR_STATE_ID,
+ IA_CSS_REF_STATE_ID,
+ IA_CSS_YNR_STATE_ID,
+ IA_CSS_NUM_STATE_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_state_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter cnr2;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ynr;
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_STATES)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+extern void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(
+ const struct ia_css_binary *binary);
+
+#endif /* IA_CSS_INCLUDE_STATE */
+
+#endif /* _IA_CSS_ISP_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_memory_access.c b/drivers/staging/media/atomisp/pci/ia_css_memory_access.c
new file mode 100644
index 000000000000..8d1356047448
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_memory_access.c
@@ -0,0 +1,85 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015-2017, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <system_types.h>
+#include <assert_support.h>
+#include <memory_access.h>
+#include <ia_css_env.h>
+#include <hrt/hive_isp_css_mm_hrt.h>
+
+const hrt_vaddress mmgr_NULL = (hrt_vaddress)0;
+const hrt_vaddress mmgr_EXCEPTION = (hrt_vaddress)-1;
+
+hrt_vaddress
+mmgr_malloc(const size_t size)
+{
+ return mmgr_alloc_attr(size, 0);
+}
+
+hrt_vaddress mmgr_alloc_attr(const size_t size, const uint16_t attrs)
+{
+ u16 masked_attrs = attrs & MMGR_ATTRIBUTE_MASK;
+
+ WARN_ON(attrs & MMGR_ATTRIBUTE_CONTIGUOUS);
+
+ if (masked_attrs & MMGR_ATTRIBUTE_CLEARED) {
+ if (masked_attrs & MMGR_ATTRIBUTE_CACHED)
+ return (ia_css_ptr) hrt_isp_css_mm_calloc_cached(size);
+ else
+ return (ia_css_ptr) hrt_isp_css_mm_calloc(size);
+ } else {
+ if (masked_attrs & MMGR_ATTRIBUTE_CACHED)
+ return (ia_css_ptr) hrt_isp_css_mm_alloc_cached(size);
+ else
+ return (ia_css_ptr) hrt_isp_css_mm_alloc(size);
+ }
+}
+
+hrt_vaddress
+mmgr_calloc(const size_t N, const size_t size)
+{
+ return mmgr_alloc_attr(size * N, MMGR_ATTRIBUTE_CLEARED);
+}
+
+void mmgr_clear(hrt_vaddress vaddr, const size_t size)
+{
+ if (vaddr)
+ hmm_set(vaddr, 0, size);
+}
+
+void mmgr_load(const hrt_vaddress vaddr, void *data, const size_t size)
+{
+ if (vaddr && data)
+ hmm_load(vaddr, data, size);
+}
+
+void
+mmgr_store(const hrt_vaddress vaddr, const void *data, const size_t size)
+{
+ if (vaddr && data)
+ hmm_store(vaddr, data, size);
+}
+
+hrt_vaddress
+mmgr_mmap(const void __user *ptr, const size_t size,
+ u16 attribute, void *context)
+{
+ struct hrt_userbuffer_attr *userbuffer_attr = context;
+
+ return hrt_isp_css_mm_alloc_user_ptr(
+ size, ptr, userbuffer_attr->pgnr,
+ userbuffer_attr->type,
+ attribute & HRT_BUF_FLAG_CACHED);
+}
diff --git a/drivers/staging/media/atomisp/pci/ia_css_metadata.h b/drivers/staging/media/atomisp/pci/ia_css_metadata.h
new file mode 100644
index 000000000000..0212d71b3355
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_metadata.h
@@ -0,0 +1,72 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_METADATA_H
+#define __IA_CSS_METADATA_H
+
+/* @file
+ * This file contains structure for processing sensor metadata.
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_stream_format.h"
+
+/* Metadata configuration. This data structure contains necessary info
+ * to process sensor metadata.
+ */
+struct ia_css_metadata_config {
+ enum atomisp_input_format data_type; /** Data type of CSI-2 embedded
+ data. The default value is ATOMISP_INPUT_FORMAT_EMBEDDED. For
+ certain sensors, user can choose non-default data type for embedded
+ data. */
+ struct ia_css_resolution resolution; /** Resolution */
+};
+
+struct ia_css_metadata_info {
+ struct ia_css_resolution resolution; /** Resolution */
+ u32 stride; /** Stride in bytes */
+ u32 size; /** Total size in bytes */
+};
+
+struct ia_css_metadata {
+ struct ia_css_metadata_info info; /** Layout info */
+ ia_css_ptr address; /** CSS virtual address */
+ u32 exp_id;
+ /** Exposure ID, see ia_css_event_public.h for more detail */
+};
+
+#define SIZE_OF_IA_CSS_METADATA_STRUCT sizeof(struct ia_css_metadata)
+
+/* @brief Allocate a metadata buffer.
+ * @param[in] metadata_info Metadata info struct, contains details on metadata buffers.
+ * @return Pointer of metadata buffer or NULL (if error)
+ *
+ * This function allocates a metadata buffer according to the properties
+ * specified in the metadata_info struct.
+ */
+struct ia_css_metadata *
+ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info);
+
+/* @brief Free a metadata buffer.
+ *
+ * @param[in] metadata Pointer of metadata buffer.
+ * @return None
+ *
+ * This function frees a metadata buffer.
+ */
+void
+ia_css_metadata_free(struct ia_css_metadata *metadata);
+
+#endif /* __IA_CSS_METADATA_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_mipi.h b/drivers/staging/media/atomisp/pci/ia_css_mipi.h
new file mode 100644
index 000000000000..c02138ee2511
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_mipi.h
@@ -0,0 +1,82 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MIPI_H
+#define __IA_CSS_MIPI_H
+
+/* @file
+ * This file contains MIPI support functionality
+ */
+
+#include <type_support.h>
+#include "ia_css_err.h"
+#include "ia_css_stream_format.h"
+#include "ia_css_input_port.h"
+
+/* Backward compatible for CSS API 2.0 only
+ * TO BE REMOVED when all drivers move to CSS API 2.1.
+ */
+/* @brief Specify a CSS MIPI frame buffer.
+ *
+ * @param[in] size_mem_words The frame size in memory words (32B).
+ * @param[in] contiguous Allocate memory physically contiguously or not.
+ * @return The error code.
+ *
+ * \deprecated{Use ia_css_mipi_buffer_config instead.}
+ *
+ * Specifies a CSS MIPI frame buffer: size in memory words (32B).
+ */
+enum ia_css_err
+ia_css_mipi_frame_specify(const unsigned int size_mem_words,
+ const bool contiguous);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+/* @brief Register size of a CSS MIPI frame for check during capturing.
+ *
+ * @param[in] port CSI-2 port this check is registered.
+ * @param[in] size_mem_words The frame size in memory words (32B).
+ * @return Return the error in case of failure. E.g. MAX_NOF_ENTRIES REACHED
+ *
+ * Register size of a CSS MIPI frame to check during capturing. Up to
+ * IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES entries per port allowed. Entries are reset
+ * when stream is stopped.
+ *
+ *
+ */
+enum ia_css_err
+ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
+ const unsigned int size_mem_words);
+#endif
+
+/* @brief Calculate the size of a mipi frame.
+ *
+ * @param[in] width The width (in pixels) of the frame.
+ * @param[in] height The height (in lines) of the frame.
+ * @param[in] format The frame (MIPI) format.
+ * @param[in] hasSOLandEOL Whether frame (MIPI) contains (optional) SOL and EOF packets.
+ * @param[in] embedded_data_size_words Embedded data size in memory words.
+ * @param size_mem_words The mipi frame size in memory words (32B).
+ * @return The error code.
+ *
+ * Calculate the size of a mipi frame, based on the resolution and format.
+ */
+enum ia_css_err
+ia_css_mipi_frame_calculate_size(const unsigned int width,
+ const unsigned int height,
+ const enum atomisp_input_format format,
+ const bool hasSOLandEOL,
+ const unsigned int embedded_data_size_words,
+ unsigned int *size_mem_words);
+
+#endif /* __IA_CSS_MIPI_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_mmu.h b/drivers/staging/media/atomisp/pci/ia_css_mmu.h
new file mode 100644
index 000000000000..13c21056bfbf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_mmu.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MMU_H
+#define __IA_CSS_MMU_H
+
+/* @file
+ * This file contains one support function for invalidating the CSS MMU cache
+ */
+
+/* @brief Invalidate the MMU internal cache.
+ * @return None
+ *
+ * This function triggers an invalidation of the translate-look-aside
+ * buffer (TLB) that's inside the CSS MMU. This function should be called
+ * every time the page tables used by the MMU change.
+ */
+void
+ia_css_mmu_invalidate_cache(void);
+
+#endif /* __IA_CSS_MMU_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_mmu_private.h b/drivers/staging/media/atomisp/pci/ia_css_mmu_private.h
new file mode 100644
index 000000000000..1021e4f380a5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_mmu_private.h
@@ -0,0 +1,29 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MMU_PRIVATE_H
+#define __IA_CSS_MMU_PRIVATE_H
+
+#include "system_local.h"
+
+/*
+ * This function sets the L1 pagetable address.
+ * After power-up of the ISP the L1 pagetable can be set.
+ * Once being set the L1 pagetable is protected against
+ * further modifications.
+ */
+void
+sh_css_mmu_set_page_table_base_index(hrt_data base_index);
+
+#endif /* __IA_CSS_MMU_PRIVATE_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_morph.h b/drivers/staging/media/atomisp/pci/ia_css_morph.h
new file mode 100644
index 000000000000..de409638d009
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_morph.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MORPH_H
+#define __IA_CSS_MORPH_H
+
+/* @file
+ * This file contains supporting for morphing table
+ */
+
+#include <ia_css_types.h>
+
+/* @brief Morphing table
+ * @param[in] width Width of the morphing table.
+ * @param[in] height Height of the morphing table.
+ * @return Pointer to the morphing table
+*/
+struct ia_css_morph_table *
+ia_css_morph_table_allocate(unsigned int width, unsigned int height);
+
+/* @brief Free the morph table
+ * @param[in] me Pointer to the morph table.
+ * @return None
+*/
+void
+ia_css_morph_table_free(struct ia_css_morph_table *me);
+
+#endif /* __IA_CSS_MORPH_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_pipe.h b/drivers/staging/media/atomisp/pci/ia_css_pipe.h
new file mode 100644
index 000000000000..91653952f1a7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_pipe.h
@@ -0,0 +1,189 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_H__
+#define __IA_CSS_PIPE_H__
+
+#include <type_support.h>
+#include "ia_css_stream.h"
+#include "ia_css_frame.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_binary.h"
+#include "sh_css_legacy.h"
+
+#define PIPE_ENTRY_EMPTY_TOKEN (~0U)
+#define PIPE_ENTRY_RESERVED_TOKEN (0x1)
+
+struct ia_css_preview_settings {
+ struct ia_css_binary copy_binary;
+ struct ia_css_binary preview_binary;
+ struct ia_css_binary vf_pp_binary;
+
+ /* 2401 only for these two - do we in fact use them for anything real */
+ struct ia_css_frame *delay_frames[MAX_NUM_DELAY_FRAMES];
+ struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES];
+
+ struct ia_css_pipe *copy_pipe;
+ struct ia_css_pipe *capture_pipe;
+ struct ia_css_pipe *acc_pipe;
+};
+
+#define IA_CSS_DEFAULT_PREVIEW_SETTINGS \
+(struct ia_css_preview_settings) { \
+ .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .preview_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .vf_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+}
+
+struct ia_css_capture_settings {
+ struct ia_css_binary copy_binary;
+ /* we extend primary binary to multiple stages because in ISP2.6.1
+ * the computation load is too high to fit in one single binary. */
+ struct ia_css_binary primary_binary[MAX_NUM_PRIMARY_STAGES];
+ unsigned int num_primary_stage;
+ struct ia_css_binary pre_isp_binary;
+ struct ia_css_binary anr_gdc_binary;
+ struct ia_css_binary post_isp_binary;
+ struct ia_css_binary capture_pp_binary;
+ struct ia_css_binary vf_pp_binary;
+ struct ia_css_binary capture_ldc_binary;
+ struct ia_css_binary *yuv_scaler_binary;
+ struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+ bool *is_output_stage;
+ unsigned int num_yuv_scaler;
+};
+
+#define IA_CSS_DEFAULT_CAPTURE_SETTINGS \
+(struct ia_css_capture_settings) { \
+ .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .primary_binary = {IA_CSS_BINARY_DEFAULT_SETTINGS}, \
+ .pre_isp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .anr_gdc_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .post_isp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .capture_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .vf_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .capture_ldc_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+}
+
+struct ia_css_video_settings {
+ struct ia_css_binary copy_binary;
+ struct ia_css_binary video_binary;
+ struct ia_css_binary vf_pp_binary;
+ struct ia_css_binary *yuv_scaler_binary;
+ struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+ struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES];
+ struct ia_css_frame *vf_pp_in_frame;
+ struct ia_css_pipe *copy_pipe;
+ struct ia_css_pipe *capture_pipe;
+ bool *is_output_stage;
+ unsigned int num_yuv_scaler;
+};
+
+#define IA_CSS_DEFAULT_VIDEO_SETTINGS \
+(struct ia_css_video_settings) { \
+ .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .video_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .vf_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+}
+
+struct ia_css_yuvpp_settings {
+ struct ia_css_binary copy_binary;
+ struct ia_css_binary *yuv_scaler_binary;
+ struct ia_css_binary *vf_pp_binary;
+ bool *is_output_stage;
+ unsigned int num_yuv_scaler;
+ unsigned int num_vf_pp;
+ unsigned int num_output;
+};
+
+#define IA_CSS_DEFAULT_YUVPP_SETTINGS \
+(struct ia_css_yuvpp_settings) { \
+ .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+}
+
+struct osys_object;
+
+struct ia_css_pipe {
+ /* TODO: Remove stop_requested and use stop_requested in the pipeline */
+ bool stop_requested;
+ struct ia_css_pipe_config config;
+ struct ia_css_pipe_extra_config extra_config;
+ struct ia_css_pipe_info info;
+ enum ia_css_pipe_id mode;
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_pipeline pipeline;
+ struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info bds_output_info;
+ struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info out_yuv_ds_input_info;
+ struct ia_css_frame_info vf_yuv_ds_input_info;
+ struct ia_css_fw_info *output_stage; /* extra output stage */
+ struct ia_css_fw_info *vf_stage; /* extra vf_stage */
+ unsigned int required_bds_factor;
+ unsigned int dvs_frame_delay;
+ int num_invalid_frames;
+ bool enable_viewfinder[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_stream *stream;
+ struct ia_css_frame in_frame_struct;
+ struct ia_css_frame out_frame_struct;
+ struct ia_css_frame vf_frame_struct;
+ struct ia_css_frame *continuous_frames[NUM_CONTINUOUS_FRAMES];
+ struct ia_css_metadata *cont_md_buffers[NUM_CONTINUOUS_FRAMES];
+ union {
+ struct ia_css_preview_settings preview;
+ struct ia_css_video_settings video;
+ struct ia_css_capture_settings capture;
+ struct ia_css_yuvpp_settings yuvpp;
+ } pipe_settings;
+ hrt_vaddress scaler_pp_lut;
+ struct osys_object *osys_obj;
+
+ /* This number is unique per pipe each instance of css. This number is
+ * reused as pipeline number also. There is a 1-1 mapping between pipe_num
+ * and sp thread id. Current logic limits pipe_num to
+ * SH_CSS_MAX_SP_THREADS */
+ unsigned int pipe_num;
+};
+
+#define IA_CSS_DEFAULT_PIPE \
+(struct ia_css_pipe) { \
+ .config = DEFAULT_PIPE_CONFIG, \
+ .info = DEFAULT_PIPE_INFO, \
+ .mode = IA_CSS_PIPE_ID_ACC, /* (pipe_id) */ \
+ .pipeline = DEFAULT_PIPELINE, \
+ .output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .bds_output_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .vf_output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .out_yuv_ds_input_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .vf_yuv_ds_input_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .required_bds_factor = SH_CSS_BDS_FACTOR_1_00, \
+ .dvs_frame_delay = 1, \
+ .enable_viewfinder = {true}, \
+ .in_frame_struct = DEFAULT_FRAME, \
+ .out_frame_struct = DEFAULT_FRAME, \
+ .vf_frame_struct = DEFAULT_FRAME, \
+ .pipe_settings = { \
+ .preview = IA_CSS_DEFAULT_PREVIEW_SETTINGS \
+ }, \
+ .pipe_num = PIPE_ENTRY_EMPTY_TOKEN, \
+}
+
+void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map);
+
+enum ia_css_err
+sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
+ struct ia_css_isp_parameters *params,
+ bool commit, struct ia_css_pipe *pipe);
+
+#endif /* __IA_CSS_PIPE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h b/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h
new file mode 100644
index 000000000000..e782978a5ce7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h
@@ -0,0 +1,569 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_PUBLIC_H
+#define __IA_CSS_PIPE_PUBLIC_H
+
+/* @file
+ * This file contains the public interface for CSS pipes.
+ */
+
+#include <type_support.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_buffer.h>
+/* ISP2401 */
+#include <ia_css_acc_types.h>
+
+enum {
+ IA_CSS_PIPE_OUTPUT_STAGE_0 = 0,
+ IA_CSS_PIPE_OUTPUT_STAGE_1,
+ IA_CSS_PIPE_MAX_OUTPUT_STAGE,
+};
+
+/* Enumeration of pipe modes. This mode can be used to create
+ * an image pipe for this mode. These pipes can be combined
+ * to configure and run streams on the ISP.
+ *
+ * For example, one can create a preview and capture pipe to
+ * create a continuous capture stream.
+ */
+enum ia_css_pipe_mode {
+ IA_CSS_PIPE_MODE_PREVIEW, /** Preview pipe */
+ IA_CSS_PIPE_MODE_VIDEO, /** Video pipe */
+ IA_CSS_PIPE_MODE_CAPTURE, /** Still capture pipe */
+ IA_CSS_PIPE_MODE_ACC, /** Accelerated pipe */
+ IA_CSS_PIPE_MODE_COPY, /** Copy pipe, only used for embedded/image data copying */
+ IA_CSS_PIPE_MODE_YUVPP, /** YUV post processing pipe, used for all use cases with YUV input,
+ for SoC sensor and external ISP */
+};
+
+/* Temporary define */
+#define IA_CSS_PIPE_MODE_NUM (IA_CSS_PIPE_MODE_YUVPP + 1)
+
+/**
+ * Enumeration of pipe versions.
+ * the order should match with definition in sh_css_defs.h
+ */
+enum ia_css_pipe_version {
+ IA_CSS_PIPE_VERSION_1 = 1, /** ISP1.0 pipe */
+ IA_CSS_PIPE_VERSION_2_2 = 2, /** ISP2.2 pipe */
+ IA_CSS_PIPE_VERSION_2_6_1 = 3, /** ISP2.6.1 pipe */
+ IA_CSS_PIPE_VERSION_2_7 = 4 /** ISP2.7 pipe */
+};
+
+/**
+ * Pipe configuration structure.
+ * Resolution properties are filled by Driver, kernel configurations are
+ * set by AIC
+ */
+struct ia_css_pipe_config {
+ enum ia_css_pipe_mode mode;
+ /** mode, indicates which mode the pipe should use. */
+ enum ia_css_pipe_version isp_pipe_version;
+ /** pipe version, indicates which imaging pipeline the pipe should use. */
+ struct ia_css_resolution input_effective_res;
+ /** input effective resolution */
+ struct ia_css_resolution bayer_ds_out_res;
+ /** bayer down scaling */
+ struct ia_css_resolution capt_pp_in_res;
+ /** capture post processing input resolution */
+ struct ia_css_resolution vf_pp_in_res;
+
+ /** ISP2401: view finder post processing input resolution */
+ struct ia_css_resolution output_system_in_res;
+ /** For IPU3 only: use output_system_in_res to specify what input resolution
+ will OSYS receive, this resolution is equal to the output resolution of GDC
+ if not determined CSS will set output_system_in_res with main osys output pin resolution
+ All other IPUs may ignore this property */
+ struct ia_css_resolution dvs_crop_out_res;
+ /** dvs crop, video only, not in use yet. Use dvs_envelope below. */
+ struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /** output of YUV scaling */
+ struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /** output of VF YUV scaling */
+ struct ia_css_fw_info *acc_extension;
+ /** Pipeline extension accelerator */
+ struct ia_css_fw_info **acc_stages;
+ /** Standalone accelerator stages */
+ u32 num_acc_stages;
+ /** Number of standalone accelerator stages */
+ struct ia_css_capture_config default_capture_config;
+ /** Default capture config for initial capture pipe configuration. */
+ struct ia_css_resolution dvs_envelope; /** temporary */
+ enum ia_css_frame_delay dvs_frame_delay;
+ /** indicates the DVS loop delay in frame periods */
+ int acc_num_execs;
+ /** For acceleration pipes only: determine how many times the pipe
+ should be run. Setting this to -1 means it will run until
+ stopped. */
+ bool enable_dz;
+ /** Disabling digital zoom for a pipeline, if this is set to false,
+ then setting a zoom factor will have no effect.
+ In some use cases this provides better performance. */
+ bool enable_dpc;
+ /** Disabling "Defect Pixel Correction" for a pipeline, if this is set
+ to false. In some use cases this provides better performance. */
+ bool enable_vfpp_bci;
+ /** Enabling BCI mode will cause yuv_scale binary to be picked up
+ instead of vf_pp. This only applies to viewfinder post
+ processing stages. */
+
+/* ISP2401 */
+ bool enable_luma_only;
+ /** Enabling of monochrome mode for a pipeline. If enabled only luma processing
+ will be done. */
+ bool enable_tnr;
+ /** Enabling of TNR (temporal noise reduction). This is only applicable to video
+ pipes. Non video-pipes should always set this parameter to false. */
+
+ struct ia_css_isp_config *p_isp_config;
+ /** Pointer to ISP configuration */
+ struct ia_css_resolution gdc_in_buffer_res;
+ /** GDC in buffer resolution. */
+ struct ia_css_point gdc_in_buffer_offset;
+ /** GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
+
+/* ISP2401 */
+ struct ia_css_coordinate internal_frame_origin_bqs_on_sctbl;
+ /** Origin of internal frame positioned on shading table at shading correction in ISP.
+ NOTE: Shading table is larger than or equal to internal frame.
+ Shading table has shading gains and internal frame has bayer data.
+ The origin of internal frame is used in shading correction in ISP
+ to retrieve shading gains which correspond to bayer data. */
+};
+
+/**
+ * Default settings for newly created pipe configurations.
+ */
+#define DEFAULT_PIPE_CONFIG \
+(struct ia_css_pipe_config) { \
+ .mode = IA_CSS_PIPE_MODE_PREVIEW, \
+ .isp_pipe_version = 1, \
+ .output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .vf_output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .default_capture_config = DEFAULT_CAPTURE_CONFIG, \
+ .dvs_frame_delay = IA_CSS_FRAME_DELAY_1, \
+ .acc_num_execs = -1, \
+}
+
+/* Pipe info, this struct describes properties of a pipe after it's stream has
+ * been created.
+ * ~~~** DO NOT ADD NEW FIELD **~~~ This structure will be deprecated.
+ * - On the Behalf of CSS-API Committee.
+ */
+struct ia_css_pipe_info {
+ struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /** Info about output resolution. This contains the stride which
+ should be used for memory allocation. */
+ struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /** Info about viewfinder output resolution (optional). This contains
+ the stride that should be used for memory allocation. */
+ struct ia_css_frame_info raw_output_info;
+ /** Raw output resolution. This indicates the resolution of the
+ RAW bayer output for pipes that support this. Currently, only the
+ still capture pipes support this feature. When this resolution is
+ smaller than the input resolution, cropping will be performed by
+ the ISP. The first cropping that will be performed is on the upper
+ left corner where we crop 8 lines and 8 columns to remove the
+ pixels normally used to initialize the ISP filters.
+ This is why the raw output resolution should normally be set to
+ the input resolution - 8x8. */
+ /* ISP2401 */
+ struct ia_css_resolution output_system_in_res_info;
+ /** For IPU3 only. Info about output system in resolution which is considered
+ as gdc out resolution. */
+ struct ia_css_shading_info shading_info;
+ /** After an image pipe is created, this field will contain the info
+ for the shading correction. */
+ struct ia_css_grid_info grid_info;
+ /** After an image pipe is created, this field will contain the grid
+ info for 3A and DVS. */
+ int num_invalid_frames;
+ /** The very first frames in a started stream do not contain valid data.
+ In this field, the CSS-firmware communicates to the host-driver how
+ many initial frames will contain invalid data; this allows the
+ host-driver to discard those initial invalid frames and start it's
+ output at the first valid frame. */
+};
+
+/**
+ * Defaults for ia_css_pipe_info structs.
+ */
+#define DEFAULT_PIPE_INFO \
+(struct ia_css_pipe_info) { \
+ .output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .vf_output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .raw_output_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .shading_info = DEFAULT_SHADING_INFO, \
+ .grid_info = DEFAULT_GRID_INFO, \
+}
+
+/* @brief Load default pipe configuration
+ * @param[out] pipe_config The pipe configuration.
+ * @return None
+ *
+ * This function will load the default pipe configuration:
+@code
+ struct ia_css_pipe_config def_config = {
+ IA_CSS_PIPE_MODE_PREVIEW, // mode
+ 1, // isp_pipe_version
+ {0, 0}, // bayer_ds_out_res
+ {0, 0}, // capt_pp_in_res
+ {0, 0}, // vf_pp_in_res
+ {0, 0}, // dvs_crop_out_res
+ {{0, 0}, 0, 0, 0, 0}, // output_info
+ {{0, 0}, 0, 0, 0, 0}, // second_output_info
+ {{0, 0}, 0, 0, 0, 0}, // vf_output_info
+ {{0, 0}, 0, 0, 0, 0}, // second_vf_output_info
+ NULL, // acc_extension
+ NULL, // acc_stages
+ 0, // num_acc_stages
+ {
+ IA_CSS_CAPTURE_MODE_RAW, // mode
+ false, // enable_xnr
+ false // enable_raw_output
+ }, // default_capture_config
+ {0, 0}, // dvs_envelope
+ 1, // dvs_frame_delay
+ -1, // acc_num_execs
+ true, // enable_dz
+ NULL, // p_isp_config
+ };
+@endcode
+ */
+void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config);
+
+/* @brief Create a pipe
+ * @param[in] config The pipe configuration.
+ * @param[out] pipe The pipe.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * This function will create a pipe with the given
+ * configuration.
+ */
+enum ia_css_err
+ia_css_pipe_create(const struct ia_css_pipe_config *config,
+ struct ia_css_pipe **pipe);
+
+/* @brief Destroy a pipe
+ * @param[in] pipe The pipe.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * This function will destroy a given pipe.
+ */
+enum ia_css_err
+ia_css_pipe_destroy(struct ia_css_pipe *pipe);
+
+/* @brief Provides information about a pipe
+ * @param[in] pipe The pipe.
+ * @param[out] pipe_info The pipe information.
+ * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS.
+ *
+ * This function will provide information about a given pipe.
+ */
+enum ia_css_err
+ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
+ struct ia_css_pipe_info *pipe_info);
+
+/* @brief Configure a pipe with filter coefficients.
+ * @param[in] pipe The pipe.
+ * @param[in] config The pointer to ISP configuration.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function configures the filter coefficients for an image
+ * pipe.
+ */
+enum ia_css_err
+ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config);
+
+/* @brief Controls when the Event generator raises an IRQ to the Host.
+ *
+ * @param[in] pipe The pipe.
+ * @param[in] or_mask Binary or of enum ia_css_event_irq_mask_type. Each pipe
+ related event that is part of this mask will directly
+ raise an IRQ to the Host when the event occurs in the
+ CSS.
+ * @param[in] and_mask Binary or of enum ia_css_event_irq_mask_type. An event
+ IRQ for the Host is only raised after all pipe related
+ events have occurred at least once for all the active
+ pipes. Events are remembered and don't need to occurred
+ at the same moment in time. There is no control over
+ the order of these events. Once an IRQ has been raised
+ all remembered events are reset.
+ * @return IA_CSS_SUCCESS.
+ *
+ Controls when the Event generator in the CSS raises an IRQ to the Host.
+ The main purpose of this function is to reduce the amount of interrupts
+ between the CSS and the Host. This will help saving power as it wakes up the
+ Host less often. In case both or_mask and and_mask are
+ IA_CSS_EVENT_TYPE_NONE for all pipes, no event IRQ's will be raised. An
+ exception holds for IA_CSS_EVENT_TYPE_PORT_EOF, for this event an IRQ is always
+ raised.
+ Note that events are still queued and the Host can poll for them. The
+ or_mask and and_mask may be active at the same time\n
+ \n
+ Default values, for all pipe id's, after ia_css_init:\n
+ or_mask = IA_CSS_EVENT_TYPE_ALL\n
+ and_mask = IA_CSS_EVENT_TYPE_NONE\n
+ \n
+ Examples\n
+ \code
+ ia_css_pipe_set_irq_mask(h_pipe,
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE |
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE ,
+ IA_CSS_EVENT_TYPE_NONE);
+ \endcode
+ The event generator will only raise an interrupt to the Host when there are
+ 3A or DIS statistics available from the preview pipe. It will not generate
+ an interrupt for any other event of the preview pipe e.g when there is an
+ output frame available.
+
+ \code
+ ia_css_pipe_set_irq_mask(h_pipe_preview,
+ IA_CSS_EVENT_TYPE_NONE,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE |
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE );
+
+ ia_css_pipe_set_irq_mask(h_pipe_capture,
+ IA_CSS_EVENT_TYPE_NONE,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE );
+ \endcode
+ The event generator will only raise an interrupt to the Host when there is
+ both a frame done and 3A event available from the preview pipe AND when there
+ is a frame done available from the capture pipe. Note that these events
+ may occur at different moments in time. Also the order of the events is not
+ relevant.
+
+ \code
+ ia_css_pipe_set_irq_mask(h_pipe_preview,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,
+ IA_CSS_EVENT_TYPE_ALL );
+
+ ia_css_pipe_set_irq_mask(h_pipe_capture,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,
+ IA_CSS_EVENT_TYPE_ALL );
+ \endcode
+ The event generator will only raise an interrupt to the Host when there is an
+ output frame from the preview pipe OR an output frame from the capture pipe.
+ All other events (3A, VF output, pipeline done) will not raise an interrupt
+ to the Host. These events are not lost but always stored in the event queue.
+ */
+enum ia_css_err
+ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
+ unsigned int or_mask,
+ unsigned int and_mask);
+
+/* @brief Reads the current event IRQ mask from the CSS.
+ *
+ * @param[in] pipe The pipe.
+ * @param[out] or_mask Current or_mask. The bits in this mask are a binary or
+ of enum ia_css_event_irq_mask_type. Pointer may be NULL.
+ * @param[out] and_mask Current and_mask.The bits in this mask are a binary or
+ of enum ia_css_event_irq_mask_type. Pointer may be NULL.
+ * @return IA_CSS_SUCCESS.
+ *
+ Reads the current event IRQ mask from the CSS. Reading returns the actual
+ values as used by the SP and not any mirrored values stored at the Host.\n
+\n
+Precondition:\n
+SP must be running.\n
+
+*/
+enum ia_css_err
+ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
+ unsigned int *or_mask,
+ unsigned int *and_mask);
+
+/* @brief Queue a buffer for an image pipe.
+ *
+ * @param[in] pipe The pipe that will own the buffer.
+ * @param[in] buffer Pointer to the buffer.
+ * Note that the caller remains owner of the buffer
+ * structure. Only the data pointer within it will
+ * be passed into the internal queues.
+ * @return IA_CSS_INTERNAL_ERROR in case of unexpected errors,
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * This function adds a buffer (which has a certain buffer type) to the queue
+ * for this type. This queue is owned by the image pipe. After this function
+ * completes successfully, the buffer is now owned by the image pipe and should
+ * no longer be accessed by any other code until it gets dequeued. The image
+ * pipe will dequeue buffers from this queue, use them and return them to the
+ * host code via an interrupt. Buffers will be consumed in the same order they
+ * get queued, but may be returned to the host out of order.
+ */
+enum ia_css_err
+ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
+ const struct ia_css_buffer *buffer);
+
+/* @brief Dequeue a buffer from an image pipe.
+ *
+ * @param[in] pipe The pipeline that the buffer queue belongs to.
+ * @param[in,out] buffer The buffer is used to lookup the type which determines
+ * which internal queue to use.
+ * The resulting buffer pointer is written into the dta
+ * field.
+ * @return IA_CSS_ERR_NO_BUFFER if the queue is empty or
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * This function dequeues a buffer from a buffer queue. The queue is indicated
+ * by the buffer type argument. This function can be called after an interrupt
+ * has been generated that signalled that a new buffer was available and can
+ * be used in a polling-like situation where the NO_BUFFER return value is used
+ * to determine whether a buffer was available or not.
+ */
+enum ia_css_err
+ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
+ struct ia_css_buffer *buffer);
+
+/* @brief Set the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ * @param[in] pipe Pipe handle.
+ * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle)
+ * @param[in] enable Enable Flag (1 to enable ; 0 to disable)
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ * IA_CSS_ERR_RESOURCE_NOT_AVAILABLE : Inactive QOS Pipe
+ * (No active stream with this pipe)
+ *
+ * This function will request state change (enable or disable) for the Extension
+ * stage (firmware handle) in the given pipe.
+ *
+ * Note:
+ * 1. Extension can be enabled/disabled only on QOS Extensions
+ * 2. Extension can be enabled/disabled only with an active QOS Pipe
+ * 3. Initial(Default) state of QOS Extensions is Disabled
+ * 4. State change cannot be guaranteed immediately OR on frame boundary
+ *
+ */
+enum ia_css_err
+ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe,
+ u32 fw_handle,
+ bool enable);
+
+/* @brief Get the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ * @param[in] pipe Pipe handle.
+ * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle)
+ * @param[out] *enable Enable Flag
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ * IA_CSS_ERR_RESOURCE_NOT_AVAILABLE : Inactive QOS Pipe
+ * (No active stream with this pipe)
+ *
+ * This function will query the state of the Extension stage (firmware handle)
+ * in the given Pipe.
+ *
+ * Note:
+ * 1. Extension state can be queried only on QOS Extensions
+ * 2. Extension can be enabled/disabled only with an active QOS Pipe
+ * 3. Initial(Default) state of QOS Extensions is Disabled.
+ *
+ */
+enum ia_css_err
+ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe,
+ u32 fw_handle,
+ bool *enable);
+
+/* ISP2401 */
+/* @brief Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
+ * @param[in] pipe Pipe handle.
+ * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle).
+ * @param[in] css_seg Parameter memory descriptors for CSS segments.
+ * @param[in] isp_seg Parameter memory descriptors for ISP segments.
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ * IA_CSS_ERR_RESOURCE_NOT_AVAILABLE : Inactive QOS Pipe
+ * (No active stream with this pipe)
+ *
+ * \deprecated{This interface is used to temporarily support a late-developed,
+ * specific use-case on a specific IPU2 platform. It will not be supported or
+ * maintained on IPU3 or further.}
+ */
+enum ia_css_err
+ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
+ uint32_t fw_handle,
+ struct ia_css_isp_param_css_segments *css_seg,
+ struct ia_css_isp_param_isp_segments *isp_seg);
+
+/* @brief Get selected configuration settings
+ * @param[in] pipe The pipe.
+ * @param[out] config Configuration settings.
+ * @return None
+ */
+void
+ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config);
+
+/* @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
+ * address space. So the LUT can be freed by caller.
+ * @param[in] pipe Pipe handle.
+ * @param[in] lut Look up tabel
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ *
+ * Note:
+ * 1) Note that both GDC's are programmed with the same table.
+ * 2) Current implementation ignores the pipe and overrides the
+ * global lut. This will be fixed in the future
+ * 3) This function must be called before stream start
+ *
+ */
+enum ia_css_err
+ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
+ const void *lut);
+/* @brief Checking of DVS statistics ability
+ * @param[in] pipe_info The pipe info.
+ * @return true - has DVS statistics ability
+ * false - otherwise
+ */
+bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info);
+
+/* ISP2401 */
+/* @brief Override the frameformat set on the output pins.
+ * @param[in] pipe Pipe handle.
+ * @param[in] output_pin Pin index to set the format on
+ * 0 - main output pin
+ * 1 - display output pin
+ * @param[in] format Format to set
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ * IA_CSS_ERR_INTERNAL_ERROR : Pipe misses binary info
+ *
+ * Note:
+ * 1) This is an optional function to override the formats set in the pipe.
+ * 2) Only overriding with IA_CSS_FRAME_FORMAT_NV12_TILEY is currently allowed.
+ * 3) This function is only to be used on pipes that use the output system.
+ * 4) If this function is used, it MUST be called after ia_css_pipe_create.
+ * 5) If this function is used, this function MUST be called before ia_css_stream_start.
+ */
+enum ia_css_err
+ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
+ int output_pin,
+ enum ia_css_frame_format format);
+
+#endif /* __IA_CSS_PIPE_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_prbs.h b/drivers/staging/media/atomisp/pci/ia_css_prbs.h
new file mode 100644
index 000000000000..037fc4f77c77
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_prbs.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PRBS_H
+#define __IA_CSS_PRBS_H
+
+/* @file
+ * This file contains support for Pseudo Random Bit Sequence (PRBS) inputs
+ */
+
+/* Enumerate the PRBS IDs.
+ */
+enum ia_css_prbs_id {
+ IA_CSS_PRBS_ID0,
+ IA_CSS_PRBS_ID1,
+ IA_CSS_PRBS_ID2
+};
+
+/**
+ * Maximum number of PRBS IDs.
+ *
+ * Make sure the value of this define gets changed to reflect the correct
+ * number of ia_css_prbs_id enum if you add/delete an item in the enum.
+ */
+#define N_CSS_PRBS_IDS (IA_CSS_PRBS_ID2 + 1)
+
+/**
+ * PRBS configuration structure.
+ *
+ * Seed the for the Pseudo Random Bit Sequence.
+ *
+ * @deprecated{This interface is deprecated, it is not portable -> move to input system API}
+ */
+struct ia_css_prbs_config {
+ enum ia_css_prbs_id id;
+ unsigned int h_blank; /** horizontal blank */
+ unsigned int v_blank; /** vertical blank */
+ int seed; /** random seed for the 1st 2-pixel-components/clock */
+ int seed1; /** random seed for the 2nd 2-pixel-components/clock */
+};
+
+#endif /* __IA_CSS_PRBS_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_properties.h b/drivers/staging/media/atomisp/pci/ia_css_properties.h
new file mode 100644
index 000000000000..9a167306611c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_properties.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PROPERTIES_H
+#define __IA_CSS_PROPERTIES_H
+
+/* @file
+ * This file contains support for retrieving properties of some hardware the CSS system
+ */
+
+#include <type_support.h> /* bool */
+#include <ia_css_types.h> /* ia_css_vamem_type */
+
+struct ia_css_properties {
+ int gdc_coord_one;
+ bool l1_base_is_index; /** Indicate whether the L1 page base
+ is a page index or a byte address. */
+ enum ia_css_vamem_type vamem_type;
+};
+
+/* @brief Get hardware properties
+ * @param[in,out] properties The hardware properties
+ * @return None
+ *
+ * This function returns a number of hardware properties.
+ */
+void
+ia_css_get_properties(struct ia_css_properties *properties);
+
+#endif /* __IA_CSS_PROPERTIES_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_shading.h b/drivers/staging/media/atomisp/pci/ia_css_shading.h
new file mode 100644
index 000000000000..588f53d32b72
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_shading.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SHADING_H
+#define __IA_CSS_SHADING_H
+
+/* @file
+ * This file contains support for setting the shading table for CSS
+ */
+
+#include <ia_css_types.h>
+
+/* @brief Shading table
+ * @param[in] width Width of the shading table.
+ * @param[in] height Height of the shading table.
+ * @return Pointer to the shading table
+*/
+struct ia_css_shading_table *
+ia_css_shading_table_alloc(unsigned int width,
+ unsigned int height);
+
+/* @brief Free shading table
+ * @param[in] table Pointer to the shading table.
+ * @return None
+*/
+void
+ia_css_shading_table_free(struct ia_css_shading_table *table);
+
+#endif /* __IA_CSS_SHADING_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream.h b/drivers/staging/media/atomisp/pci/ia_css_stream.h
new file mode 100644
index 000000000000..5690fe832f41
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream.h
@@ -0,0 +1,111 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_STREAM_H_
+#define _IA_CSS_STREAM_H_
+
+#include <type_support.h>
+#include <system_local.h>
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#include <input_system.h>
+#endif
+#include "ia_css_types.h"
+#include "ia_css_stream_public.h"
+
+/**
+ * structure to hold all internal stream related information
+ */
+struct ia_css_stream {
+ struct ia_css_stream_config config;
+ struct ia_css_stream_info info;
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ rx_cfg_t csi_rx_config;
+#endif
+ bool reconfigure_css_rx;
+ struct ia_css_pipe *last_pipe;
+ int num_pipes;
+ struct ia_css_pipe **pipes;
+ struct ia_css_pipe *continuous_pipe;
+ struct ia_css_isp_parameters *isp_params_configs;
+ struct ia_css_isp_parameters *per_frame_isp_params_configs;
+
+ bool cont_capt;
+ bool disable_cont_vf;
+
+ /* ISP2401 */
+ bool stop_copy_preview;
+ bool started;
+};
+
+/* @brief Get a binary in the stream, which binary has the shading correction.
+ *
+ * @param[in] stream: The stream.
+ * @return The binary which has the shading correction.
+ *
+ */
+struct ia_css_binary *
+ia_css_stream_get_shading_correction_binary(const struct ia_css_stream *stream);
+
+struct ia_css_binary *
+ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream);
+
+struct ia_css_binary *
+ia_css_stream_get_3a_binary(const struct ia_css_stream *stream);
+
+unsigned int
+ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream);
+
+bool
+sh_css_params_set_binning_factor(struct ia_css_stream *stream,
+ unsigned int sensor_binning);
+
+void
+sh_css_invalidate_params(struct ia_css_stream *stream);
+
+/* The following functions are used for testing purposes only */
+const struct ia_css_fpn_table *
+ia_css_get_fpn_table(struct ia_css_stream *stream);
+
+/* @brief Get a pointer to the shading table.
+ *
+ * @param[in] stream: The stream.
+ * @return The pointer to the shading table.
+ *
+ */
+struct ia_css_shading_table *
+ia_css_get_shading_table(struct ia_css_stream *stream);
+
+void
+ia_css_get_isp_dis_coefficients(struct ia_css_stream *stream,
+ short *horizontal_coefficients,
+ short *vertical_coefficients);
+
+void
+ia_css_get_isp_dvs2_coefficients(struct ia_css_stream *stream,
+ short *hor_coefs_odd_real,
+ short *hor_coefs_odd_imag,
+ short *hor_coefs_even_real,
+ short *hor_coefs_even_imag,
+ short *ver_coefs_odd_real,
+ short *ver_coefs_odd_imag,
+ short *ver_coefs_even_real,
+ short *ver_coefs_even_imag);
+
+enum ia_css_err
+ia_css_stream_isp_parameters_init(struct ia_css_stream *stream);
+
+void
+ia_css_stream_isp_parameters_uninit(struct ia_css_stream *stream);
+
+#endif /*_IA_CSS_STREAM_H_*/
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream_format.h b/drivers/staging/media/atomisp/pci/ia_css_stream_format.h
new file mode 100644
index 000000000000..4cd29833584f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream_format.h
@@ -0,0 +1,29 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_STREAM_FORMAT_H
+#define __IA_CSS_STREAM_FORMAT_H
+
+/* @file
+ * This file contains formats usable for ISP streaming input
+ */
+
+#include <type_support.h> /* bool */
+#include "../../../include/linux/atomisp_platform.h"
+
+unsigned int ia_css_util_input_format_bpp(
+ enum atomisp_input_format format,
+ bool two_ppc);
+
+#endif /* __ATOMISP_INPUT_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream_public.h b/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
new file mode 100644
index 000000000000..fe11c8bf3cdc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
@@ -0,0 +1,585 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_STREAM_PUBLIC_H
+#define __IA_CSS_STREAM_PUBLIC_H
+
+/* @file
+ * This file contains support for configuring and controlling streams
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_metadata.h"
+#include "ia_css_tpg.h"
+#include "ia_css_prbs.h"
+#include "ia_css_input_port.h"
+
+/* Input modes, these enumerate all supported input modes.
+ * Note that not all ISP modes support all input modes.
+ */
+enum ia_css_input_mode {
+ IA_CSS_INPUT_MODE_SENSOR, /** data from sensor */
+ IA_CSS_INPUT_MODE_FIFO, /** data from input-fifo */
+ IA_CSS_INPUT_MODE_TPG, /** data from test-pattern generator */
+ IA_CSS_INPUT_MODE_PRBS, /** data from pseudo-random bit stream */
+ IA_CSS_INPUT_MODE_MEMORY, /** data from a frame in memory */
+ IA_CSS_INPUT_MODE_BUFFERED_SENSOR /** data is sent through mipi buffer */
+};
+
+/* Structure of the MIPI buffer configuration
+ */
+struct ia_css_mipi_buffer_config {
+ unsigned int size_mem_words; /** The frame size in the system memory
+ words (32B) */
+ bool contiguous; /** Allocated memory physically
+ contiguously or not. \deprecated{Will be false always.}*/
+ unsigned int nof_mipi_buffers; /** The number of MIPI buffers required for this
+ stream */
+};
+
+enum {
+ IA_CSS_STREAM_ISYS_STREAM_0 = 0,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX = IA_CSS_STREAM_ISYS_STREAM_0,
+ IA_CSS_STREAM_ISYS_STREAM_1,
+ IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH
+};
+
+/* This is input data configuration for one MIPI data type. We can have
+ * multiple of this in one virtual channel.
+ */
+struct ia_css_stream_isys_stream_config {
+ struct ia_css_resolution input_res; /** Resolution of input data */
+ enum atomisp_input_format format; /** Format of input stream. This data
+ format will be mapped to MIPI data
+ type internally. */
+ int linked_isys_stream_id; /** default value is -1, other value means
+ current isys_stream shares the same buffer with
+ indicated isys_stream*/
+ bool valid; /** indicate whether other fields have valid value */
+};
+
+struct ia_css_stream_input_config {
+ struct ia_css_resolution input_res; /** Resolution of input data */
+ struct ia_css_resolution effective_res; /** Resolution of input data.
+ Used for CSS 2400/1 System and deprecated for other
+ systems (replaced by input_effective_res in
+ ia_css_pipe_config) */
+ enum atomisp_input_format format; /** Format of input stream. This data
+ format will be mapped to MIPI data
+ type internally. */
+ enum ia_css_bayer_order bayer_order; /** Bayer order for RAW streams */
+};
+
+/* Input stream description. This describes how input will flow into the
+ * CSS. This is used to program the CSS hardware.
+ */
+struct ia_css_stream_config {
+ enum ia_css_input_mode mode; /** Input mode */
+ union {
+ struct ia_css_input_port port; /** Port, for sensor only. */
+ struct ia_css_tpg_config tpg; /** TPG configuration */
+ struct ia_css_prbs_config prbs; /** PRBS configuration */
+ } source; /** Source of input data */
+ unsigned int channel_id; /** Channel on which input data
+ will arrive. Use this field
+ to specify virtual channel id.
+ Valid values are: 0, 1, 2, 3 */
+ struct ia_css_stream_isys_stream_config
+ isys_config[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
+ struct ia_css_stream_input_config input_config;
+
+ /* Currently, Android and Windows platforms interpret the binning_factor parameter
+ * differently. In Android, the binning factor is expressed in the form
+ * 2^N * 2^N, whereas in Windows platform, the binning factor is N*N
+ * To use the Windows method of specification, the caller has to define
+ * macro USE_WINDOWS_BINNING_FACTOR. This is for backward compatibility only
+ * and will be deprecated. In the future,all platforms will use the N*N method
+ */
+ /* ISP2401 */
+ unsigned int sensor_binning_factor; /** Binning factor used by sensor
+ to produce image data. This is
+ used for shading correction. */
+ unsigned int pixels_per_clock; /** Number of pixels per clock, which can be
+ 1, 2 or 4. */
+ bool online; /** offline will activate RAW copy on SP, use this for
+ continuous capture. */
+ /* ISYS2401 usage: ISP receives data directly from sensor, no copy. */
+ unsigned int init_num_cont_raw_buf; /** initial number of raw buffers to
+ allocate */
+ unsigned int target_num_cont_raw_buf; /** total number of raw buffers to
+ allocate */
+ bool pack_raw_pixels; /** Pack pixels in the raw buffers */
+ bool continuous; /** Use SP copy feature to continuously capture frames
+ to system memory and run pipes in offline mode */
+ bool disable_cont_viewfinder; /** disable continuous viewfinder for ZSL use case */
+ s32 flash_gpio_pin; /** pin on which the flash is connected, -1 for no flash */
+ int left_padding; /** The number of input-formatter left-paddings, -1 for default from binary.*/
+ struct ia_css_mipi_buffer_config
+ mipi_buffer_config; /** mipi buffer configuration */
+ struct ia_css_metadata_config
+ metadata_config; /** Metadata configuration. */
+ bool ia_css_enable_raw_buffer_locking; /** Enable Raw Buffer Locking for HALv3 Support */
+ bool lock_all;
+ /** Lock all RAW buffers (true) or lock only buffers processed by
+ video or preview pipe (false).
+ This setting needs to be enabled to allow raw buffer locking
+ without continuous viewfinder. */
+};
+
+struct ia_css_stream;
+
+/* Stream info, this struct describes properties of a stream after it has been
+ * created.
+ */
+struct ia_css_stream_info {
+ struct ia_css_metadata_info metadata_info;
+ /** Info about the metadata layout, this contains the stride. */
+};
+
+/* @brief Load default stream configuration
+ * @param[in,out] stream_config The stream configuration.
+ * @return None
+ *
+ * This function will reset the stream configuration to the default state:
+@code
+ memset(stream_config, 0, sizeof(*stream_config));
+ stream_config->online = true;
+ stream_config->left_padding = -1;
+@endcode
+ */
+void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config);
+
+/*
+ * create the internal structures and fill in the configuration data and pipes
+ */
+
+/* @brief Creates a stream
+* @param[in] stream_config The stream configuration.
+* @param[in] num_pipes The number of pipes to incorporate in the stream.
+* @param[in] pipes The pipes.
+* @param[out] stream The stream.
+* @return IA_CSS_SUCCESS or the error code.
+*
+* This function will create a stream with a given configuration and given pipes.
+*/
+enum ia_css_err
+ia_css_stream_create(const struct ia_css_stream_config *stream_config,
+ int num_pipes,
+ struct ia_css_pipe *pipes[],
+ struct ia_css_stream **stream);
+
+/* @brief Destroys a stream
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * This function will destroy a given stream.
+ */
+enum ia_css_err
+ia_css_stream_destroy(struct ia_css_stream *stream);
+
+/* @brief Provides information about a stream
+ * @param[in] stream The stream.
+ * @param[out] stream_info The information about the stream.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * This function will destroy a given stream.
+ */
+enum ia_css_err
+ia_css_stream_get_info(const struct ia_css_stream *stream,
+ struct ia_css_stream_info *stream_info);
+
+/* @brief load (rebuild) a stream that was unloaded.
+ * @param[in] stream The stream
+ * @return IA_CSS_SUCCESS or the error code
+ *
+ * Rebuild a stream, including allocating structs, setting configuration and
+ * building the required pipes.
+ */
+enum ia_css_err
+ia_css_stream_load(struct ia_css_stream *stream);
+
+/* @brief Starts the stream.
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * The dynamic data in
+ * the buffers are not used and need to be queued with a separate call
+ * to ia_css_pipe_enqueue_buffer.
+ * NOTE: this function will only send start event to corresponding
+ * thread and will not start SP any more.
+ */
+enum ia_css_err
+ia_css_stream_start(struct ia_css_stream *stream);
+
+/* @brief Stop the stream.
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * NOTE: this function will send stop event to pipes belong to this
+ * stream but will not terminate threads.
+ */
+enum ia_css_err
+ia_css_stream_stop(struct ia_css_stream *stream);
+
+/* @brief Check if a stream has stopped
+ * @param[in] stream The stream.
+ * @return boolean flag
+ *
+ * This function will check if the stream has stopped and return the correspondent boolean flag.
+ */
+bool
+ia_css_stream_has_stopped(struct ia_css_stream *stream);
+
+/* @brief destroy a stream according to the stream seed previosly saved in the seed array.
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS (no other errors are generated now)
+ *
+ * Destroy the stream and all the pipes related to it.
+ */
+enum ia_css_err
+ia_css_stream_unload(struct ia_css_stream *stream);
+
+/* @brief Returns stream format
+ * @param[in] stream The stream.
+ * @return format of the string
+ *
+ * This function will return the stream format.
+ */
+enum atomisp_input_format
+ia_css_stream_get_format(const struct ia_css_stream *stream);
+
+/* @brief Check if the stream is configured for 2 pixels per clock
+ * @param[in] stream The stream.
+ * @return boolean flag
+ *
+ * This function will check if the stream is configured for 2 pixels per clock and
+ * return the correspondent boolean flag.
+ */
+bool
+ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
+
+/* @brief Sets the output frame stride (at the last pipe)
+ * @param[in] stream The stream
+ * @param[in] output_padded_width - the output buffer stride.
+ * @return ia_css_err
+ *
+ * This function will Set the output frame stride (at the last pipe)
+ */
+enum ia_css_err
+ia_css_stream_set_output_padded_width(struct ia_css_stream *stream,
+ unsigned int output_padded_width);
+
+/* @brief Return max number of continuous RAW frames.
+ * @param[in] stream The stream.
+ * @param[out] buffer_depth The maximum number of continuous RAW frames.
+ * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
+ *
+ * This function will return the maximum number of continuous RAW frames
+ * the system can support.
+ */
+enum ia_css_err
+ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream,
+ int *buffer_depth);
+
+/* @brief Set nr of continuous RAW frames to use.
+ *
+ * @param[in] stream The stream.
+ * @param[in] buffer_depth Number of frames to set.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * Set the number of continuous frames to use during continuous modes.
+ */
+enum ia_css_err
+ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth);
+
+/* @brief Get number of continuous RAW frames to use.
+ * @param[in] stream The stream.
+ * @param[out] buffer_depth The number of frames to use
+ * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
+ *
+ * Get the currently set number of continuous frames
+ * to use during continuous modes.
+ */
+enum ia_css_err
+ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
+
+/* ===== CAPTURE ===== */
+
+/* @brief Configure the continuous capture
+ *
+ * @param[in] stream The stream.
+ * @param[in] num_captures The number of RAW frames to be processed to
+ * YUV. Setting this to -1 will make continuous
+ * capture run until it is stopped.
+ * This number will also be used to allocate RAW
+ * buffers. To allow the viewfinder to also
+ * keep operating, 2 extra buffers will always be
+ * allocated.
+ * If the offset is negative and the skip setting
+ * is greater than 0, additional buffers may be
+ * needed.
+ * @param[in] skip Skip N frames in between captures. This can be
+ * used to select a slower capture frame rate than
+ * the sensor output frame rate.
+ * @param[in] offset Start the RAW-to-YUV processing at RAW buffer
+ * with this offset. This allows the user to
+ * process RAW frames that were captured in the
+ * past or future.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * For example, to capture the current frame plus the 2 previous
+ * frames and 2 subsequent frames, you would call
+ * ia_css_stream_capture(5, 0, -2).
+ */
+enum ia_css_err
+ia_css_stream_capture(struct ia_css_stream *stream,
+ int num_captures,
+ unsigned int skip,
+ int offset);
+
+/* @brief Specify which raw frame to tag based on exp_id found in frame info
+ *
+ * @param[in] stream The stream.
+ * @param[in] exp_id The exposure id of the raw frame to tag.
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function allows the user to tag a raw frame based on the exposure id
+ * found in the viewfinder frames' frame info.
+ */
+enum ia_css_err
+ia_css_stream_capture_frame(struct ia_css_stream *stream,
+ unsigned int exp_id);
+
+/* ===== VIDEO ===== */
+
+/* @brief Send streaming data into the css input FIFO
+ *
+ * @param[in] stream The stream.
+ * @param[in] data Pointer to the pixels to be send.
+ * @param[in] width Width of the input frame.
+ * @param[in] height Height of the input frame.
+ * @return None
+ *
+ * Send streaming data into the css input FIFO. This is for testing purposes
+ * only. This uses the channel ID and input format as set by the user with
+ * the regular functions for this.
+ * This function blocks until the entire frame has been written into the
+ * input FIFO.
+ *
+ * Note:
+ * For higher flexibility the ia_css_stream_send_input_frame is replaced by
+ * three separate functions:
+ * 1) ia_css_stream_start_input_frame
+ * 2) ia_css_stream_send_input_line
+ * 3) ia_css_stream_end_input_frame
+ * In this way it is possible to stream multiple frames on different
+ * channel ID's on a line basis. It will be possible to simulate
+ * line-interleaved Stereo 3D muxed on 1 mipi port.
+ * These 3 functions are for testing purpose only and can be used in
+ * conjunction with ia_css_stream_send_input_frame
+ */
+void
+ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height);
+
+/* @brief Start an input frame on the CSS input FIFO.
+ *
+ * @param[in] stream The stream.
+ * @return None
+ *
+ * Starts the streaming to mipi frame by sending SoF for channel channel_id.
+ * It will use the input_format and two_pixels_per_clock as provided by
+ * the user.
+ * For the "correct" use-case, input_format and two_pixels_per_clock must match
+ * with the values as set by the user with the regular functions.
+ * To simulate an error, the user can provide "incorrect" values for
+ * input_format and/or two_pixels_per_clock.
+ */
+void
+ia_css_stream_start_input_frame(const struct ia_css_stream *stream);
+
+/* @brief Send a line of input data into the CSS input FIFO.
+ *
+ * @param[in] stream The stream.
+ * @param[in] data Array of the first line of image data.
+ * @param width The width (in pixels) of the first line.
+ * @param[in] data2 Array of the second line of image data.
+ * @param width2 The width (in pixels) of the second line.
+ * @return None
+ *
+ * Sends 1 frame line. Start with SoL followed by width bytes of data, followed
+ * by width2 bytes of data2 and followed by and EoL
+ * It will use the input_format and two_pixels_per_clock settings as provided
+ * with the ia_css_stream_start_input_frame function call.
+ *
+ * This function blocks until the entire line has been written into the
+ * input FIFO.
+ */
+void
+ia_css_stream_send_input_line(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2);
+
+/* @brief Send a line of input embedded data into the CSS input FIFO.
+ *
+ * @param[in] stream Pointer of the stream.
+ * @param[in] format Format of the embedded data.
+ * @param[in] data Pointer of the embedded data line.
+ * @param[in] width The width (in pixels) of the line.
+ * @return None
+ *
+ * Sends one embedded data line to input fifo. Start with SoL followed by
+ * width bytes of data, and followed by and EoL.
+ * It will use the two_pixels_per_clock settings as provided with the
+ * ia_css_stream_start_input_frame function call.
+ *
+ * This function blocks until the entire line has been written into the
+ * input FIFO.
+ */
+void
+ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
+ enum atomisp_input_format format,
+ const unsigned short *data,
+ unsigned int width);
+
+/* @brief End an input frame on the CSS input FIFO.
+ *
+ * @param[in] stream The stream.
+ * @return None
+ *
+ * Send the end-of-frame signal into the CSS input FIFO.
+ */
+void
+ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
+
+/* @brief send a request flash command to SP
+ *
+ * @param[in] stream The stream.
+ * @return None
+ *
+ * Driver needs to call this function to send a flash request command
+ * to SP, SP will be responsible for switching on/off the flash at proper
+ * time. Due to the SP multi-threading environment, this request may have
+ * one-frame delay, the driver needs to check the flashed flag in frame info
+ * to determine which frame is being flashed.
+ */
+void
+ia_css_stream_request_flash(struct ia_css_stream *stream);
+
+/* @brief Configure a stream with filter coefficients.
+ * @deprecated {Replaced by
+ * ia_css_pipe_set_isp_config_on_pipe()}
+ *
+ * @param[in] stream The stream.
+ * @param[in] config The set of filter coefficients.
+ * @param[in] pipe Pipe to be updated when set isp config, NULL means to
+ * update all pipes in the stream.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function configures the filter coefficients for an image
+ * stream. For image pipes that do not execute any ISP filters, this
+ * function will have no effect.
+ * It is safe to call this function while the image stream is running,
+ * in fact this is the expected behavior most of the time. Proper
+ * resource locking and double buffering is in place to allow for this.
+ */
+enum ia_css_err
+ia_css_stream_set_isp_config_on_pipe(struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe);
+
+/* @brief Configure a stream with filter coefficients.
+ * @deprecated {Replaced by
+ * ia_css_pipe_set_isp_config()}
+ * @param[in] stream The stream.
+ * @param[in] config The set of filter coefficients.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function configures the filter coefficients for an image
+ * stream. For image pipes that do not execute any ISP filters, this
+ * function will have no effect. All pipes of a stream will be updated.
+ * See ::ia_css_stream_set_isp_config_on_pipe() for the per-pipe alternative.
+ * It is safe to call this function while the image stream is running,
+ * in fact this is the expected behaviour most of the time. Proper
+ * resource locking and double buffering is in place to allow for this.
+ */
+enum ia_css_err
+ia_css_stream_set_isp_config(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config);
+
+/* @brief Get selected configuration settings
+ * @param[in] stream The stream.
+ * @param[out] config Configuration settings.
+ * @return None
+ */
+void
+ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
+ struct ia_css_isp_config *config);
+
+/* @brief allocate continuous raw frames for continuous capture
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or error code.
+ *
+ * because this allocation takes a long time (around 120ms per frame),
+ * we separate the allocation part and update part to let driver call
+ * this function without locking. This function is the allocation part
+ * and next one is update part
+ */
+enum ia_css_err
+ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
+
+/* @brief allocate continuous raw frames for continuous capture
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or error code.
+ *
+ * because this allocation takes a long time (around 120ms per frame),
+ * we separate the allocation part and update part to let driver call
+ * this function without locking. This function is the update part
+ */
+enum ia_css_err
+ia_css_update_continuous_frames(struct ia_css_stream *stream);
+
+/* @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
+ * @param[in] stream The stream.
+ * @param[in] exp_id exposure id that uniquely identifies the locked Raw Frame Buffer
+ * @return ia_css_err IA_CSS_SUCCESS or error code
+ *
+ * As part of HALv3 Feature requirement, SP locks raw buffer until the Application
+ * releases its reference to a raw buffer (which are managed by SP), this function allows
+ * application to explicitly unlock that buffer in SP.
+ */
+enum ia_css_err
+ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id);
+
+/* @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
+ * @param[in] stream The stream.
+ * @param[in] enable - true, disable - false
+ * @return None
+ *
+ * Enables or disables digital zoom for capture pipe in provided stream, if capture pipe
+ * exists. This function sets enable_zoom flag in CAPTURE_PP stage of the capture pipe.
+ * In process_zoom_and_motion(), decision to enable or disable zoom for every stage depends
+ * on this flag.
+ */
+void
+ia_css_en_dz_capt_pipe(struct ia_css_stream *stream, bool enable);
+#endif /* __IA_CSS_STREAM_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_timer.h b/drivers/staging/media/atomisp/pci/ia_css_timer.h
new file mode 100644
index 000000000000..a37cfa60ad35
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_timer.h
@@ -0,0 +1,68 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_TIMER_H
+#define __IA_CSS_TIMER_H
+
+/* @file
+ * Timer interface definitions
+ */
+#include <type_support.h> /* for uint32_t */
+#include "ia_css_err.h"
+
+/* @brief timer reading definition */
+typedef u32 clock_value_t;
+
+/* @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
+struct ia_css_clock_tick {
+ clock_value_t ticks; /** measured time in ticks.*/
+};
+
+/* @brief TIMER event codes */
+enum ia_css_tm_event {
+ IA_CSS_TM_EVENT_AFTER_INIT,
+ /** Timer Event after Initialization */
+ IA_CSS_TM_EVENT_MAIN_END,
+ /** Timer Event after end of Main */
+ IA_CSS_TM_EVENT_THREAD_START,
+ /** Timer Event after thread start */
+ IA_CSS_TM_EVENT_FRAME_PROC_START,
+ /** Timer Event after Frame Process Start */
+ IA_CSS_TM_EVENT_FRAME_PROC_END
+ /** Timer Event after Frame Process End */
+};
+
+/* @brief code measurement common struct */
+struct ia_css_time_meas {
+ clock_value_t start_timer_value; /** measured time in ticks */
+ clock_value_t end_timer_value; /** measured time in ticks */
+};
+
+/**@brief SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT checks to ensure correct alignment for struct ia_css_clock_tick. */
+#define SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT sizeof(clock_value_t)
+/* @brief checks to ensure correct alignment for ia_css_time_meas. */
+#define SIZE_OF_IA_CSS_TIME_MEAS_STRUCT (sizeof(clock_value_t) \
+ + sizeof(clock_value_t))
+
+/* @brief API to fetch timer count directly
+*
+* @param curr_ts [out] measured count value
+* @return IA_CSS_SUCCESS if success
+*
+*/
+enum ia_css_err
+ia_css_timer_get_current_tick(
+ struct ia_css_clock_tick *curr_ts);
+
+#endif /* __IA_CSS_TIMER_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_tpg.h b/drivers/staging/media/atomisp/pci/ia_css_tpg.h
new file mode 100644
index 000000000000..79c4e1b3b48f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_tpg.h
@@ -0,0 +1,78 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TPG_H
+#define __IA_CSS_TPG_H
+
+/* @file
+ * This file contains support for the test pattern generator (TPG)
+ */
+
+/* Enumerate the TPG IDs.
+ */
+enum ia_css_tpg_id {
+ IA_CSS_TPG_ID0,
+ IA_CSS_TPG_ID1,
+ IA_CSS_TPG_ID2
+};
+
+/**
+ * Maximum number of TPG IDs.
+ *
+ * Make sure the value of this define gets changed to reflect the correct
+ * number of ia_css_tpg_id enum if you add/delete an item in the enum.
+ */
+#define N_CSS_TPG_IDS (IA_CSS_TPG_ID2 + 1)
+
+/* Enumerate the TPG modes.
+ */
+enum ia_css_tpg_mode {
+ IA_CSS_TPG_MODE_RAMP,
+ IA_CSS_TPG_MODE_CHECKERBOARD,
+ IA_CSS_TPG_MODE_FRAME_BASED_COLOR,
+ IA_CSS_TPG_MODE_MONO
+};
+
+/* @brief Configure the test pattern generator.
+ *
+ * Configure the Test Pattern Generator, the way these values are used to
+ * generate the pattern can be seen in the HRT extension for the test pattern
+ * generator:
+ * devices/test_pat_gen/hrt/include/test_pat_gen.h: hrt_calc_tpg_data().
+ *
+ * This interface is deprecated, it is not portable -> move to input system API
+ *
+@code
+unsigned int test_pattern_value(unsigned int x, unsigned int y)
+{
+ unsigned int x_val, y_val;
+ if (x_delta > 0) (x_val = (x << x_delta) & x_mask;
+ else (x_val = (x >> -x_delta) & x_mask;
+ if (y_delta > 0) (y_val = (y << y_delta) & y_mask;
+ else (y_val = (y >> -y_delta) & x_mask;
+ return (x_val + y_val) & xy_mask;
+}
+@endcode
+ */
+struct ia_css_tpg_config {
+ enum ia_css_tpg_id id;
+ enum ia_css_tpg_mode mode;
+ unsigned int x_mask;
+ int x_delta;
+ unsigned int y_mask;
+ int y_delta;
+ unsigned int xy_mask;
+};
+
+#endif /* __IA_CSS_TPG_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_types.h b/drivers/staging/media/atomisp/pci/ia_css_types.h
new file mode 100644
index 000000000000..d3584756e34e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_types.h
@@ -0,0 +1,605 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_TYPES_H
+#define _IA_CSS_TYPES_H
+
+/* @file
+ * This file contains types used for the ia_css parameters.
+ * These types are in a separate file because they are expected
+ * to be used in software layers that do not access the CSS API
+ * directly but still need to forward parameters for it.
+ */
+
+#include <type_support.h>
+
+#include "ia_css_frac.h"
+
+#include "isp/kernels/aa/aa_2/ia_css_aa2_types.h"
+#include "isp/kernels/anr/anr_1.0/ia_css_anr_types.h"
+#include "isp/kernels/anr/anr_2/ia_css_anr2_types.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h"
+#include "isp/kernels/csc/csc_1.0/ia_css_csc_types.h"
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp_types.h"
+#include "isp/kernels/de/de_1.0/ia_css_de_types.h"
+#include "isp/kernels/de/de_2/ia_css_de2_types.h"
+#include "isp/kernels/fc/fc_1.0/ia_css_formats_types.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h"
+#include "isp/kernels/gc/gc_1.0/ia_css_gc_types.h"
+#include "isp/kernels/gc/gc_2/ia_css_gc2_types.h"
+#include "isp/kernels/macc/macc_1.0/ia_css_macc_types.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob_types.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h"
+#include "isp/kernels/sc/sc_1.0/ia_css_sc_types.h"
+#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h"
+#include "isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h"
+#include "isp/kernels/wb/wb_1.0/ia_css_wb_types.h"
+#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h"
+#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h"
+
+/* ISP2401 */
+#include "isp/kernels/tnr/tnr3/ia_css_tnr3_types.h"
+
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h"
+#include "isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h"
+#include "isp/kernels/output/output_1.0/ia_css_output_types.h"
+
+#define IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
+/** Should be removed after Driver adaptation will be done */
+
+#define IA_CSS_VERSION_MAJOR 2
+#define IA_CSS_VERSION_MINOR 0
+#define IA_CSS_VERSION_REVISION 2
+
+#define IA_CSS_MORPH_TABLE_NUM_PLANES 6
+
+/* Min and max exposure IDs. These macros are here to allow
+ * the drivers to get this information. Changing these macros
+ * constitutes a CSS API change. */
+#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1 /** Minimum exposure ID */
+#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /** Maximum exposure ID */
+
+/* opaque types */
+struct ia_css_isp_parameters;
+struct ia_css_pipe;
+struct ia_css_memory_offsets;
+struct ia_css_config_memory_offsets;
+struct ia_css_state_memory_offsets;
+
+/* Virtual address within the CSS address space. */
+typedef u32 ia_css_ptr;
+
+/* Generic resolution structure.
+ */
+struct ia_css_resolution {
+ u32 width; /** Width */
+ u32 height; /** Height */
+};
+
+/* Generic coordinate structure.
+ */
+struct ia_css_coordinate {
+ s32 x; /** Value of a coordinate on the horizontal axis */
+ s32 y; /** Value of a coordinate on the vertical axis */
+};
+
+/* Vector with signed values. This is used to indicate motion for
+ * Digital Image Stabilization.
+ */
+struct ia_css_vector {
+ s32 x; /** horizontal motion (in pixels) */
+ s32 y; /** vertical motion (in pixels) */
+};
+
+/* Short hands */
+#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
+#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
+
+/* CSS data descriptor */
+struct ia_css_data {
+ ia_css_ptr address; /** CSS virtual address */
+ u32 size; /** Disabled if 0 */
+};
+
+/* Host data descriptor */
+struct ia_css_host_data {
+ char *address; /** Host address */
+ u32 size; /** Disabled if 0 */
+};
+
+/* ISP data descriptor */
+struct ia_css_isp_data {
+ u32 address; /** ISP address */
+ u32 size; /** Disabled if 0 */
+};
+
+/* Shading Correction types. */
+enum ia_css_shading_correction_type {
+ IA_CSS_SHADING_CORRECTION_NONE, /** Shading Correction is not processed in the pipe. */
+ IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
+
+ /** More shading correction types can be added in the future. */
+};
+
+/* Shading Correction information. */
+struct ia_css_shading_info {
+ enum ia_css_shading_correction_type type; /** Shading Correction type. */
+
+ union { /* Shading Correction information of each Shading Correction types. */
+
+ /* Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
+ *
+ * This structure contains the information necessary to generate
+ * the shading table required in the isp.
+ * This structure is filled in the css,
+ * and the driver needs to get it to generate the shading table.
+ *
+ * Before the shading correction is applied, NxN-filter and/or scaling
+ * are applied in the isp, depending on the isp binaries.
+ * Then, these should be considered in generating the shading table.
+ * - Bad pixels on left/top sides generated by NxN-filter
+ * (Bad pixels are NOT considered currently,
+ * because they are subtle.)
+ * - Down-scaling/Up-scaling factor
+ *
+ * Shading correction is applied to the area
+ * which has real sensor data and margin.
+ * Then, the shading table should cover the area including margin.
+ * This structure has this information.
+ * - Origin coordinate of bayer (real sensor data)
+ * on the shading table
+ *
+ * ------------------------ISP 2401-----------------------
+ *
+ * the shading table directly required from ISP.
+ * This structure is filled in CSS, and the driver needs to get it to generate the shading table.
+ *
+ * The shading correction is applied to the bayer area which contains sensor data and padding data.
+ * The shading table should cover this bayer area.
+ *
+ * The shading table size directly required from ISP is expressed by these parameters.
+ * 1. uint32_t num_hor_grids;
+ * 2. uint32_t num_ver_grids;
+ * 3. uint32_t bqs_per_grid_cell;
+ *
+ * In some isp binaries, the bayer scaling is applied before the shading correction is applied.
+ * Then, this scaling factor should be considered in generating the shading table.
+ * The scaling factor is expressed by these parameters.
+ * 4. uint32_t bayer_scale_hor_ratio_in;
+ * 5. uint32_t bayer_scale_hor_ratio_out;
+ * 6. uint32_t bayer_scale_ver_ratio_in;
+ * 7. uint32_t bayer_scale_ver_ratio_out;
+ *
+ * The sensor data size inputted to ISP is expressed by this parameter.
+ * This is the size BEFORE the bayer scaling is applied.
+ * 8. struct ia_css_resolution isp_input_sensor_data_res_bqs;
+ *
+ * The origin of the sensor data area positioned on the shading table at the shading correction
+ * is expressed by this parameter.
+ * The size of this area assumes the size AFTER the bayer scaling is applied
+ * to the isp_input_sensor_data_resolution_bqs.
+ * 9. struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
+ *
+ * ****** Definitions of the shading table and the sensor data at the shading correction ******
+ *
+ * (0,0)--------------------- TW -------------------------------
+ * | shading table |
+ * | (ox,oy)---------- W -------------------------- |
+ * | | sensor data | |
+ * | | | |
+ * TH H sensor data center | |
+ * | | (cx,cy) | |
+ * | | | |
+ * | | | |
+ * | | | |
+ * | ------------------------------------------- |
+ * | |
+ * ----------------------------------------------------------
+ *
+ * Example of still mode for output 1080p:
+ *
+ * num_hor_grids = 66
+ * num_ver_grids = 37
+ * bqs_per_grid_cell = 16
+ * bayer_scale_hor_ratio_in = 1
+ * bayer_scale_hor_ratio_out = 1
+ * bayer_scale_ver_ratio_in = 1
+ * bayer_scale_ver_ratio_out = 1
+ * isp_input_sensor_data_resolution_bqs = {966, 546}
+ * sensor_data_origin_bqs_on_sctbl = {61, 15}
+ *
+ * TW, TH [bqs]: width and height of shading table
+ * TW = (num_hor_grids - 1) * bqs_per_grid_cell = (66 - 1) * 16 = 1040
+ * TH = (num_ver_grids - 1) * bqs_per_grid_cell = (37 - 1) * 16 = 576
+ *
+ * W, H [bqs]: width and height of sensor data at shading correction
+ * W = sensor_data_res_bqs.width
+ * = isp_input_sensor_data_res_bqs.width
+ * * bayer_scale_hor_ratio_out / bayer_scale_hor_ratio_in + 0.5 = 966
+ * H = sensor_data_res_bqs.height
+ * = isp_input_sensor_data_res_bqs.height
+ * * bayer_scale_ver_ratio_out / bayer_scale_ver_ratio_in + 0.5 = 546
+ *
+ * (ox, oy) [bqs]: origin of sensor data positioned on shading table at shading correction
+ * ox = sensor_data_origin_bqs_on_sctbl.x = 61
+ * oy = sensor_data_origin_bqs_on_sctbl.y = 15
+ *
+ * (cx, cy) [bqs]: center of sensor data positioned on shading table at shading correction
+ * cx = ox + W/2 = 61 + 966/2 = 544
+ * cy = oy + H/2 = 15 + 546/2 = 288
+ *
+ * ****** Relation between the shading table and the sensor data ******
+ *
+ * The origin of the sensor data should be on the shading table.
+ * 0 <= ox < TW, 0 <= oy < TH
+ *
+ * ****** How to center the shading table on the sensor data ******
+ *
+ * To center the shading table on the sensor data,
+ * CSS decides the shading table size so that a certain grid point is positioned
+ * on the center of the sensor data at the shading correction.
+ * CSS expects the shading center is set on this grid point
+ * when the shading table data is calculated in AIC.
+ *
+ * W, H [bqs]: width and height of sensor data at shading correction
+ * W = sensor_data_res_bqs.width
+ * H = sensor_data_res_bqs.height
+ *
+ * (cx, cy) [bqs]: center of sensor data positioned on shading table at shading correction
+ * cx = sensor_data_origin_bqs_on_sctbl.x + W/2
+ * cy = sensor_data_origin_bqs_on_sctbl.y + H/2
+ *
+ * CSS decides the shading table size and the sensor data position
+ * so that the (cx, cy) satisfies this condition.
+ * mod(cx, bqs_per_grid_cell) = 0
+ * mod(cy, bqs_per_grid_cell) = 0
+ *
+ * ****** How to change the sensor data size by processes in the driver and ISP ******
+ *
+ * 1. sensor data size: Physical sensor size
+ * (The struct ia_css_shading_info does not have this information.)
+ * 2. process: Driver applies the sensor cropping/binning/scaling to physical sensor size.
+ * 3. sensor data size: ISP input size (== shading_info.isp_input_sensor_data_res_bqs)
+ * (ISP assumes the ISP input sensor data is centered on the physical sensor.)
+ * 4. process: ISP applies the bayer scaling by the factor of shading_info.bayer_scale_*.
+ * 5. sensor data size: Scaling factor * ISP input size (== shading_info.sensor_data_res_bqs)
+ * 6. process: ISP applies the shading correction.
+ *
+ * ISP block: SC1
+ * ISP1: SC1 is used.
+ * ISP2: SC1 is used.
+ */
+ struct {
+ /* ISP2400 */
+ u32 enable; /** Shading correction enabled.
+ 0:disabled, 1:enabled */
+
+ /* ISP2401 */
+ u32 num_hor_grids; /** Number of data points per line per color on shading table. */
+ u32 num_ver_grids; /** Number of lines of data points per color on shading table. */
+ u32 bqs_per_grid_cell; /** Grid cell size in BQ unit.
+ NOTE: bqs = size in BQ(Bayer Quad) unit.
+ 1BQ means {Gr,R,B,Gb} (2x2 pixels).
+ Horizontal 1 bqs corresponds to horizontal 2 pixels.
+ Vertical 1 bqs corresponds to vertical 2 pixels. */
+ u32 bayer_scale_hor_ratio_in;
+ u32 bayer_scale_hor_ratio_out;
+
+ /** Horizontal ratio of bayer scaling between input width and output width,
+ for the scaling which should be done before shading correction.
+ output_width = input_width * bayer_scale_hor_ratio_out
+ / bayer_scale_hor_ratio_in + 0.5 */
+ u32 bayer_scale_ver_ratio_in;
+ u32 bayer_scale_ver_ratio_out;
+
+
+ /** Vertical ratio of bayer scaling
+ between input height and output height, for the scaling
+ which should be done before shading correction.
+ output_height = input_height * bayer_scale_ver_ratio_out
+ / bayer_scale_ver_ratio_in */
+ /* ISP2400 */
+ u32 sc_bayer_origin_x_bqs_on_shading_table;
+ /** X coordinate (in bqs) of bayer origin on shading table.
+ This indicates the left-most pixel of bayer
+ (not include margin) inputted to the shading correction.
+ This corresponds to the left-most pixel of bayer
+ inputted to isp from sensor. */
+ /* ISP2400 */
+ u32 sc_bayer_origin_y_bqs_on_shading_table;
+ /** Y coordinate (in bqs) of bayer origin on shading table.
+ This indicates the top pixel of bayer
+ (not include margin) inputted to the shading correction.
+ This corresponds to the top pixel of bayer
+ inputted to isp from sensor. */
+
+ /** Vertical ratio of bayer scaling between input height and output height,
+ for the scaling which should be done before shading correction.
+ output_height = input_height * bayer_scale_ver_ratio_out
+ / bayer_scale_ver_ratio_in + 0.5 */
+ /* ISP2401 */
+ struct ia_css_resolution isp_input_sensor_data_res_bqs;
+ /** Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
+ NOTE: This is NOT the size of the physical sensor size.
+ CSS requests the driver that ISP inputs sensor data
+ by the size of isp_input_sensor_data_res_bqs.
+ The driver sends the sensor data to ISP,
+ after the adequate cropping/binning/scaling
+ are applied to the physical sensor data area.
+ ISP assumes the area of isp_input_sensor_data_res_bqs
+ is centered on the physical sensor. */
+ /* ISP2401 */
+ struct ia_css_resolution sensor_data_res_bqs;
+ /** Sensor data size (in bqs) at shading correction.
+ This is the size AFTER bayer scaling. */
+ /* ISP2401 */
+ struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
+ /** Origin of sensor data area positioned on shading table at shading correction.
+ The coordinate x,y should be positive values. */
+ } type_1;
+
+ /** More structures can be added here when more shading correction types will be added
+ in the future. */
+ } info;
+};
+
+/* Default Shading Correction information of Shading Correction Type 1. */
+#define DEFAULT_SHADING_INFO_TYPE_1 \
+(struct ia_css_shading_info) { \
+ .type = IA_CSS_SHADING_CORRECTION_TYPE_1, \
+ .info = { \
+ .type_1 = { \
+ .bayer_scale_hor_ratio_in = 1, \
+ .bayer_scale_hor_ratio_out = 1, \
+ .bayer_scale_ver_ratio_in = 1, \
+ .bayer_scale_ver_ratio_out = 1, \
+ } \
+ } \
+}
+
+/* Default Shading Correction information. */
+#define DEFAULT_SHADING_INFO DEFAULT_SHADING_INFO_TYPE_1
+
+/* structure that describes the 3A and DIS grids */
+struct ia_css_grid_info {
+ /* \name ISP input size
+ * that is visible for user
+ * @{
+ */
+ u32 isp_in_width;
+ u32 isp_in_height;
+ /* @}*/
+
+ struct ia_css_3a_grid_info s3a_grid; /** 3A grid info */
+ union ia_css_dvs_grid_u dvs_grid;
+ /** All types of DVS statistics grid info union */
+
+ enum ia_css_vamem_type vamem_type;
+};
+
+/* defaults for ia_css_grid_info structs */
+#define DEFAULT_GRID_INFO \
+(struct ia_css_grid_info) { \
+ .dvs_grid = DEFAULT_DVS_GRID_INFO, \
+ .vamem_type = IA_CSS_VAMEM_TYPE_1 \
+}
+
+/* Morphing table, used for geometric distortion and chromatic abberration
+ * correction (GDCAC, also called GDC).
+ * This table describes the imperfections introduced by the lens, the
+ * advanced ISP can correct for these imperfections using this table.
+ */
+struct ia_css_morph_table {
+ u32 enable; /** To disable GDC, set this field to false. The
+ coordinates fields can be set to NULL in this case. */
+ u32 height; /** Table height */
+ u32 width; /** Table width */
+ u16 *coordinates_x[IA_CSS_MORPH_TABLE_NUM_PLANES];
+ /** X coordinates that describe the sensor imperfection */
+ u16 *coordinates_y[IA_CSS_MORPH_TABLE_NUM_PLANES];
+ /** Y coordinates that describe the sensor imperfection */
+};
+
+struct ia_css_dvs_6axis_config {
+ unsigned int exp_id;
+ /** Exposure ID, see ia_css_event_public.h for more detail */
+ u32 width_y;
+ u32 height_y;
+ u32 width_uv;
+ u32 height_uv;
+ u32 *xcoords_y;
+ u32 *ycoords_y;
+ u32 *xcoords_uv;
+ u32 *ycoords_uv;
+};
+
+/**
+ * This specifies the coordinates (x,y)
+ */
+struct ia_css_point {
+ s32 x; /** x coordinate */
+ s32 y; /** y coordinate */
+};
+
+/**
+ * This specifies the region
+ */
+struct ia_css_region {
+ struct ia_css_point origin; /** Starting point coordinates for the region */
+ struct ia_css_resolution resolution; /** Region resolution */
+};
+
+/**
+ * Digital zoom:
+ * This feature is currently available only for video, but will become
+ * available for preview and capture as well.
+ * Set the digital zoom factor, this is a logarithmic scale. The actual zoom
+ * factor will be 64/x.
+ * Setting dx or dy to 0 disables digital zoom for that direction.
+ * New API change for Digital zoom:(added struct ia_css_region zoom_region)
+ * zoom_region specifies the origin of the zoom region and width and
+ * height of that region.
+ * origin : This is the coordinate (x,y) within the effective input resolution
+ * of the stream. where, x >= 0 and y >= 0. (0,0) maps to the upper left of the
+ * effective input resolution.
+ * resolution : This is resolution of zoom region.
+ * where, x + width <= effective input width
+ * y + height <= effective input height
+ */
+struct ia_css_dz_config {
+ u32 dx; /** Horizontal zoom factor */
+ u32 dy; /** Vertical zoom factor */
+ struct ia_css_region zoom_region; /** region for zoom */
+};
+
+/* The still capture mode, this can be RAW (simply copy sensor input to DDR),
+ * Primary ISP, the Advanced ISP (GDC) or the low-light ISP (ANR).
+ */
+enum ia_css_capture_mode {
+ IA_CSS_CAPTURE_MODE_RAW, /** no processing, copy data only */
+ IA_CSS_CAPTURE_MODE_BAYER, /** bayer processing, up to demosaic */
+ IA_CSS_CAPTURE_MODE_PRIMARY, /** primary ISP */
+ IA_CSS_CAPTURE_MODE_ADVANCED, /** advanced ISP (GDC) */
+ IA_CSS_CAPTURE_MODE_LOW_LIGHT /** low light ISP (ANR) */
+};
+
+struct ia_css_capture_config {
+ enum ia_css_capture_mode mode; /** Still capture mode */
+ u32 enable_xnr; /** Enable/disable XNR */
+ u32 enable_raw_output;
+ bool enable_capture_pp_bli; /** Enable capture_pp_bli mode */
+};
+
+/* default settings for ia_css_capture_config structs */
+#define DEFAULT_CAPTURE_CONFIG \
+(struct ia_css_capture_config) { \
+ .mode = IA_CSS_CAPTURE_MODE_PRIMARY, \
+}
+
+/* ISP filter configuration. This is a collection of configurations
+ * for each of the ISP filters (modules).
+ *
+ * NOTE! The contents of all pointers is copied when get or set with the
+ * exception of the shading and morph tables. For these we only copy the
+ * pointer, so the caller must make sure the memory contents of these pointers
+ * remain valid as long as they are used by the CSS. This will be fixed in the
+ * future by copying the contents instead of just the pointer.
+ *
+ * Comment:
+ * ["ISP block", 1&2] : ISP block is used both for ISP1 and ISP2.
+ * ["ISP block", 1only] : ISP block is used only for ISP1.
+ * ["ISP block", 2only] : ISP block is used only for ISP2.
+ */
+struct ia_css_isp_config {
+ struct ia_css_wb_config *wb_config; /** White Balance
+ [WB1, 1&2] */
+ struct ia_css_cc_config *cc_config; /** Color Correction
+ [CSC1, 1only] */
+ struct ia_css_tnr_config *tnr_config; /** Temporal Noise Reduction
+ [TNR1, 1&2] */
+ struct ia_css_ecd_config *ecd_config; /** Eigen Color Demosaicing
+ [DE2, 2only] */
+ struct ia_css_ynr_config *ynr_config; /** Y(Luma) Noise Reduction
+ [YNR2&YEE2, 2only] */
+ struct ia_css_fc_config *fc_config; /** Fringe Control
+ [FC2, 2only] */
+ struct ia_css_formats_config
+ *formats_config; /** Formats Control for main output
+ [FORMATS, 1&2] */
+ struct ia_css_cnr_config *cnr_config; /** Chroma Noise Reduction
+ [CNR2, 2only] */
+ struct ia_css_macc_config *macc_config; /** MACC
+ [MACC2, 2only] */
+ struct ia_css_ctc_config *ctc_config; /** Chroma Tone Control
+ [CTC2, 2only] */
+ struct ia_css_aa_config *aa_config; /** YUV Anti-Aliasing
+ [AA2, 2only]
+ (not used currently) */
+ struct ia_css_aa_config *baa_config; /** Bayer Anti-Aliasing
+ [BAA2, 1&2] */
+ struct ia_css_ce_config *ce_config; /** Chroma Enhancement
+ [CE1, 1only] */
+ struct ia_css_dvs_6axis_config *dvs_6axis_config;
+ struct ia_css_ob_config *ob_config; /** Objective Black
+ [OB1, 1&2] */
+ struct ia_css_dp_config *dp_config; /** Defect Pixel Correction
+ [DPC1/DPC2, 1&2] */
+ struct ia_css_nr_config *nr_config; /** Noise Reduction
+ [BNR1&YNR1&CNR1, 1&2]*/
+ struct ia_css_ee_config *ee_config; /** Edge Enhancement
+ [YEE1, 1&2] */
+ struct ia_css_de_config *de_config; /** Demosaic
+ [DE1, 1only] */
+ struct ia_css_gc_config *gc_config; /** Gamma Correction (for YUV)
+ [GC1, 1only] */
+ struct ia_css_anr_config *anr_config; /** Advanced Noise Reduction */
+ struct ia_css_3a_config *s3a_config; /** 3A Statistics config */
+ struct ia_css_xnr_config *xnr_config; /** eXtra Noise Reduction */
+ struct ia_css_dz_config *dz_config; /** Digital Zoom */
+ struct ia_css_cc_config *yuv2rgb_cc_config; /** Color Correction
+ [CCM2, 2only] */
+ struct ia_css_cc_config *rgb2yuv_cc_config; /** Color Correction
+ [CSC2, 2only] */
+ struct ia_css_macc_table *macc_table; /** MACC
+ [MACC1/MACC2, 1&2]*/
+ struct ia_css_gamma_table *gamma_table; /** Gamma Correction (for YUV)
+ [GC1, 1only] */
+ struct ia_css_ctc_table *ctc_table; /** Chroma Tone Control
+ [CTC1, 1only] */
+
+ /* \deprecated */
+ struct ia_css_xnr_table *xnr_table; /** eXtra Noise Reduction
+ [XNR1, 1&2] */
+ struct ia_css_rgb_gamma_table *r_gamma_table;/** sRGB Gamma Correction
+ [GC2, 2only] */
+ struct ia_css_rgb_gamma_table *g_gamma_table;/** sRGB Gamma Correction
+ [GC2, 2only] */
+ struct ia_css_rgb_gamma_table *b_gamma_table;/** sRGB Gamma Correction
+ [GC2, 2only] */
+ struct ia_css_vector *motion_vector; /** For 2-axis DVS */
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_morph_table *morph_table;
+ struct ia_css_dvs_coefficients *dvs_coefs; /** DVS 1.0 coefficients */
+ struct ia_css_dvs2_coefficients *dvs2_coefs; /** DVS 2.0 coefficients */
+ struct ia_css_capture_config *capture_config;
+ struct ia_css_anr_thres *anr_thres;
+ /* @deprecated{Old shading settings, see bugzilla bz675 for details} */
+ struct ia_css_shading_settings *shading_settings;
+ struct ia_css_xnr3_config *xnr3_config; /** eXtreme Noise Reduction v3 */
+ /* comment from Lasse: Be aware how this feature will affect coordinate
+ * normalization in different parts of the system. (e.g. face detection,
+ * touch focus, 3A statistics and windows of interest, shading correction,
+ * DVS, GDC) from IQ tool level and application level down-to ISP FW level.
+ * the risk for regression is not in the individual blocks, but how they
+ * integrate together. */
+ struct ia_css_output_config
+ *output_config; /** Main Output Mirroring, flipping */
+
+ struct ia_css_scaler_config
+ *scaler_config; /** Skylake: scaler config (optional) */
+ struct ia_css_formats_config
+ *formats_config_display;/** Formats control for viewfinder/display output (optional)
+ [OSYS, n/a] */
+ struct ia_css_output_config
+ *output_config_display; /** Viewfinder/display output mirroring, flipping (optional) */
+
+ struct ia_css_frame
+ *output_frame; /** Output frame the config is to be applied to (optional) */
+ u32 isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
+};
+
+#endif /* _IA_CSS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_version.h b/drivers/staging/media/atomisp/pci/ia_css_version.h
new file mode 100644
index 000000000000..1e88901e0b82
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_version.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_VERSION_H
+#define __IA_CSS_VERSION_H
+
+/* @file
+ * This file contains functions to retrieve CSS-API version information
+ */
+
+#include <ia_css_err.h>
+
+/* a common size for the version arrays */
+#define MAX_VERSION_SIZE 500
+
+/* @brief Retrieves the current CSS version
+ * @param[out] version A pointer to a buffer where to put the generated
+ * version string. NULL is ignored.
+ * @param[in] max_size Size of the version buffer. If version string
+ * would be larger than max_size, an error is
+ * returned by this function.
+ *
+ * This function generates and returns the version string. If FW is loaded, it
+ * attaches the FW version.
+ */
+enum ia_css_err
+ia_css_get_version(char *version, int max_size);
+
+#endif /* __IA_CSS_VERSION_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_version_data.h b/drivers/staging/media/atomisp/pci/ia_css_version_data.h
new file mode 100644
index 000000000000..f630fa5d55cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_version_data.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+//
+// This file contains the version data for the CSS
+//
+// === Do not change - automatically generated ===
+//
+
+#ifndef __IA_CSS_VERSION_DATA_H
+#define __IA_CSS_VERSION_DATA_H
+
+#define ISP2400_CSS_VERSION_STRING "REL:20150521_21.4_0539; API:2.1.15.3; GIT:irci_candrpv_0415_20150504_35b345#35b345be52ac575f8934abb3a88fea26a94e7343; SDK:/nfs/iir/disks/iir_hivepackages_003/iir_hivepkgs_disk017/Css_Mizuchi/packages/Css_Mizuchi/int_css_mizuchi_20140829_1053; USER:viedifw; "
+#define ISP2401_CSS_VERSION_STRING "REL:20150911_37.5_1652; API:2.1.20.9; GIT:irci___#ebf437d53a8951bb7ff6d13fdb7270dab393a92a; SDK:; USER:viedifw; "
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/if_defs.h b/drivers/staging/media/atomisp/pci/if_defs.h
new file mode 100644
index 000000000000..7d39e45796ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/if_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IF_DEFS_H
+#define _IF_DEFS_H
+
+#define HIVE_IF_FRAME_REQUEST 0xA000
+#define HIVE_IF_LINES_REQUEST 0xB000
+#define HIVE_IF_VECTORS_REQUEST 0xC000
+
+#endif /* _IF_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h
new file mode 100644
index 000000000000..176456da961f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _if_subsystem_defs_h__
+#define _if_subsystem_defs_h__
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_1 1
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_2 2
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_3 3
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_4 4
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_5 5
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_6 6
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_7 7
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_FSYNC_LUT_REG 8
+#define HIVE_IFMT_GP_REGS_SRST_IDX 9
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IDX 10
+
+#define HIVE_IFMT_GP_REGS_CH_ID_FMT_TYPE_IDX 11
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_BASE HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0
+
+/* order of the input bits for the ifmt irq controller */
+#define HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID 0
+#define HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID 1
+#define HIVE_IFMT_IRQ_IFT_SEC_BIT_ID 2
+#define HIVE_IFMT_IRQ_MEM_CPY_BIT_ID 3
+#define HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID 4
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SRST_MEM_CPY_BIT_IDX 3
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_MEM_CPY_BIT_IDX 3
+
+#endif /* _if_subsystem_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/input_selector_defs.h b/drivers/staging/media/atomisp/pci/input_selector_defs.h
new file mode 100644
index 000000000000..1dd8ea3cd6d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_selector_defs.h
@@ -0,0 +1,88 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_selector_defs_h
+#define _input_selector_defs_h
+
+#ifndef HIVE_ISP_ISEL_SEL_BITS
+#define HIVE_ISP_ISEL_SEL_BITS 2
+#endif
+
+#ifndef HIVE_ISP_CH_ID_BITS
+#define HIVE_ISP_CH_ID_BITS 2
+#endif
+
+#ifndef HIVE_ISP_FMT_TYPE_BITS
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#endif
+
+/* gp_register register id's -- Outputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_ENABLE_IDX 0
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FREE_RUNNING_IDX 1
+#define HIVE_ISEL_GP_REGS_SYNCGEN_PAUSE_IDX 2
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_FRAMES_IDX 3
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_PIX_IDX 4
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_LINES_IDX 5
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HBLANK_CYCLES_IDX 6
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VBLANK_CYCLES_IDX 7
+
+#define HIVE_ISEL_GP_REGS_SOF_IDX 8
+#define HIVE_ISEL_GP_REGS_EOF_IDX 9
+#define HIVE_ISEL_GP_REGS_SOL_IDX 10
+#define HIVE_ISEL_GP_REGS_EOL_IDX 11
+
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE 12
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE_PORT_B 13
+#define HIVE_ISEL_GP_REGS_PRBS_LFSR_RESET_VALUE 14
+
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE 15
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE_PORT_B 16
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_MASK_IDX 17
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_MASK_IDX 18
+#define HIVE_ISEL_GP_REGS_TPG_XY_CNT_MASK_IDX 19
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_DELTA_IDX 20
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_DELTA_IDX 21
+#define HIVE_ISEL_GP_REGS_TPG_MODE_IDX 22
+#define HIVE_ISEL_GP_REGS_TPG_R1_IDX 23
+#define HIVE_ISEL_GP_REGS_TPG_G1_IDX 24
+#define HIVE_ISEL_GP_REGS_TPG_B1_IDX 25
+#define HIVE_ISEL_GP_REGS_TPG_R2_IDX 26
+#define HIVE_ISEL_GP_REGS_TPG_G2_IDX 27
+#define HIVE_ISEL_GP_REGS_TPG_B2_IDX 28
+
+#define HIVE_ISEL_GP_REGS_CH_ID_IDX 29
+#define HIVE_ISEL_GP_REGS_FMT_TYPE_IDX 30
+#define HIVE_ISEL_GP_REGS_DATA_SEL_IDX 31
+#define HIVE_ISEL_GP_REGS_SBAND_SEL_IDX 32
+#define HIVE_ISEL_GP_REGS_SYNC_SEL_IDX 33
+#define HIVE_ISEL_GP_REGS_SRST_IDX 37
+
+#define HIVE_ISEL_GP_REGS_SRST_SYNCGEN_BIT 0
+#define HIVE_ISEL_GP_REGS_SRST_PRBS_BIT 1
+#define HIVE_ISEL_GP_REGS_SRST_TPG_BIT 2
+#define HIVE_ISEL_GP_REGS_SRST_FIFO_BIT 3
+
+/* gp_register register id's -- Inputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HOR_CNT_IDX 34
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VER_CNT_IDX 35
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FRAMES_CNT_IDX 36
+
+/* irq sources isel irq controller */
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID 0
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID 1
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID 2
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID 3
+#define HIVE_ISEL_IRQ_NUM_IRQS 4
+
+#endif /* _input_selector_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/input_switch_2400_defs.h b/drivers/staging/media/atomisp/pci/input_switch_2400_defs.h
new file mode 100644
index 000000000000..2d5baae30522
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_switch_2400_defs.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_switch_2400_defs_h
+#define _input_switch_2400_defs_h
+
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_ID(ch_id, fmt_type) (((ch_id) * 2) + ((fmt_type) >= 16))
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_LSB(fmt_type) (((fmt_type) % 16) * 2)
+
+#define HIVE_INPUT_SWITCH_SELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_SELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_SELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM 3
+#define HIVE_INPUT_SWITCH_VSELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_VSELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_VSELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_VSELECT_STR_TO_MEM 4
+
+#endif /* _input_switch_2400_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h b/drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h
new file mode 100644
index 000000000000..fcfa8c4971be
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h
@@ -0,0 +1,243 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_system_ctrl_defs_h
+#define _input_system_ctrl_defs_h
+
+#define _INPUT_SYSTEM_CTRL_REG_ALIGN 4 /* assuming 32 bit control bus width */
+
+/* --------------------------------------------------*/
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define ISYS_CTRL_NOF_REGS 23
+
+// Register id's of MMIO slave accesible registers
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_ID 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_ID 1
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_ID 2
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID 3
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID 4
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID 5
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID 6
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID 7
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID 8
+#define ISYS_CTRL_ACQ_START_ADDR_REG_ID 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID 10
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID 11
+#define ISYS_CTRL_INIT_REG_ID 12
+#define ISYS_CTRL_LAST_COMMAND_REG_ID 13
+#define ISYS_CTRL_NEXT_COMMAND_REG_ID 14
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID 15
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID 16
+#define ISYS_CTRL_FSM_STATE_INFO_REG_ID 17
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID 18
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID 19
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID 20
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID 21
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID 22
+
+/* register reset value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_RSTVAL 3
+#define ISYS_CTRL_ACQ_START_ADDR_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ISYS_CTRL_INIT_REG_RSTVAL 0
+#define ISYS_CTRL_LAST_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_RSTVAL 0
+
+/* register width value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_START_ADDR_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ISYS_CTRL_INIT_REG_WIDTH 3
+#define ISYS_CTRL_LAST_COMMAND_REG_WIDTH 32 /* slave data width */
+#define ISYS_CTRL_NEXT_COMMAND_REG_WIDTH 32
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_WIDTH 1
+
+/* bit definitions */
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+
+/*
+InpSysCaptFramesAcq 1/0 [3:0] - 'b0000
+[7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB-'b0001
+ CaptC-'b0010
+[31:16] - NOF_frames
+InpSysCaptFrameExt 2/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+ 2/1 [31:0] - external capture address
+InpSysAcqFrame 2/0 [3:0] - 'b0010,
+[31:4] - NOF_ext_mem_words
+ 2/1 [31:0] - external memory read start address
+InpSysOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+InpSysOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+InpSysOverruleCmd 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+ 2/1 [31:0] - command token value for port opid
+
+acknowledge tokens:
+
+InpSysAckCFA 1/0 [3:0] - 'b0000
+ [7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB- 'b0001
+ CaptC-'b0010
+ [31:16] - NOF_frames
+InpSysAckCFE 1/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+InpSysAckAF 1/0 [3:0] - 'b0010
+InpSysAckOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+InpSysAckOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+InpSysAckOverrule 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+ 2/1 [31:0] - acknowledge token value from port opid
+
+*/
+
+/* Command and acknowledge tokens IDs */
+#define ISYS_CTRL_CAPT_FRAMES_ACQ_TOKEN_ID 0 /* 0000b */
+#define ISYS_CTRL_CAPT_FRAME_EXT_TOKEN_ID 1 /* 0001b */
+#define ISYS_CTRL_ACQ_FRAME_TOKEN_ID 2 /* 0010b */
+#define ISYS_CTRL_OVERRULE_ON_TOKEN_ID 3 /* 0011b */
+#define ISYS_CTRL_OVERRULE_OFF_TOKEN_ID 4 /* 0100b */
+#define ISYS_CTRL_OVERRULE_TOKEN_ID 5 /* 0101b */
+
+#define ISYS_CTRL_ACK_CFA_TOKEN_ID 0
+#define ISYS_CTRL_ACK_CFE_TOKEN_ID 1
+#define ISYS_CTRL_ACK_AF_TOKEN_ID 2
+#define ISYS_CTRL_ACK_OVERRULE_ON_TOKEN_ID 3
+#define ISYS_CTRL_ACK_OVERRULE_OFF_TOKEN_ID 4
+#define ISYS_CTRL_ACK_OVERRULE_TOKEN_ID 5
+#define ISYS_CTRL_ACK_DEVICE_ERROR_TOKEN_ID 6
+
+#define ISYS_CTRL_TOKEN_ID_MSB 3
+#define ISYS_CTRL_TOKEN_ID_LSB 0
+#define ISYS_CTRL_PORT_ID_TOKEN_MSB 7
+#define ISYS_CTRL_PORT_ID_TOKEN_LSB 4
+#define ISYS_CTRL_NOF_CAPT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_CAPT_TOKEN_LSB 16
+#define ISYS_CTRL_NOF_EXT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_EXT_TOKEN_LSB 8
+
+#define ISYS_CTRL_TOKEN_ID_IDX 0
+#define ISYS_CTRL_TOKEN_ID_BITS (ISYS_CTRL_TOKEN_ID_MSB - ISYS_CTRL_TOKEN_ID_LSB + 1)
+#define ISYS_CTRL_PORT_ID_IDX (ISYS_CTRL_TOKEN_ID_IDX + ISYS_CTRL_TOKEN_ID_BITS)
+#define ISYS_CTRL_PORT_ID_BITS (ISYS_CTRL_PORT_ID_TOKEN_MSB - ISYS_CTRL_PORT_ID_TOKEN_LSB + 1)
+#define ISYS_CTRL_NOF_CAPT_IDX ISYS_CTRL_NOF_CAPT_TOKEN_LSB
+#define ISYS_CTRL_NOF_CAPT_BITS (ISYS_CTRL_NOF_CAPT_TOKEN_MSB - ISYS_CTRL_NOF_CAPT_TOKEN_LSB + 1)
+#define ISYS_CTRL_NOF_EXT_IDX ISYS_CTRL_NOF_EXT_TOKEN_LSB
+#define ISYS_CTRL_NOF_EXT_BITS (ISYS_CTRL_NOF_EXT_TOKEN_MSB - ISYS_CTRL_NOF_EXT_TOKEN_LSB + 1)
+
+#define ISYS_CTRL_PORT_ID_CAPT_A 0 /* device ID for capture unit A */
+#define ISYS_CTRL_PORT_ID_CAPT_B 1 /* device ID for capture unit B */
+#define ISYS_CTRL_PORT_ID_CAPT_C 2 /* device ID for capture unit C */
+#define ISYS_CTRL_PORT_ID_ACQUISITION 3 /* device ID for acquistion unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_A 4 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_B 5 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_C 6 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_ACQ 7 /* device ID for dma unit */
+
+#define ISYS_CTRL_NO_ACQ_ACK 16 /* no ack from acquisition unit */
+#define ISYS_CTRL_NO_DMA_ACK 0
+#define ISYS_CTRL_NO_CAPT_ACK 16
+
+#endif /* _input_system_ctrl_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/input_system_defs.h b/drivers/staging/media/atomisp/pci/input_system_defs.h
new file mode 100644
index 000000000000..ae62163034a6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_defs.h
@@ -0,0 +1,126 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_system_defs_h
+#define _input_system_defs_h
+
+/* csi controller modes */
+#define HIVE_CSI_CONFIG_MAIN 0
+#define HIVE_CSI_CONFIG_STEREO1 4
+#define HIVE_CSI_CONFIG_STEREO2 8
+
+/* general purpose register IDs */
+
+/* Stream Multicast select modes */
+#define HIVE_ISYS_GPREG_MULTICAST_A_IDX 0
+#define HIVE_ISYS_GPREG_MULTICAST_B_IDX 1
+#define HIVE_ISYS_GPREG_MULTICAST_C_IDX 2
+
+/* Stream Mux select modes */
+#define HIVE_ISYS_GPREG_MUX_IDX 3
+
+/* streaming monitor status and control */
+#define HIVE_ISYS_GPREG_STRMON_STAT_IDX 4
+#define HIVE_ISYS_GPREG_STRMON_COND_IDX 5
+#define HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX 6
+#define HIVE_ISYS_GPREG_SRST_IDX 7
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_IDX 8
+#define HIVE_ISYS_GPREG_REG_PORT_A_IDX 9
+#define HIVE_ISYS_GPREG_REG_PORT_B_IDX 10
+
+/* Bit numbers of the soft reset register */
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_A_BIT 0
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_B_BIT 1
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_C_BIT 2
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_A_BIT 3
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_B_BIT 4
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_C_BIT 5
+#define HIVE_ISYS_GPREG_SRST_CAPT_A_BIT 6
+#define HIVE_ISYS_GPREG_SRST_CAPT_B_BIT 7
+#define HIVE_ISYS_GPREG_SRST_CAPT_C_BIT 8
+#define HIVE_ISYS_GPREG_SRST_ACQ_BIT 9
+/* For ISYS_CTRL 5bits are defined to allow soft-reset per sub-controller and top-ctrl */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_BIT 10 /*LSB for 5bit vector */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_A_BIT 10
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_B_BIT 11
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_C_BIT 12
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_ACQ_BIT 13
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_TOP_BIT 14
+/* -- */
+#define HIVE_ISYS_GPREG_SRST_STR_MUX_BIT 15
+#define HIVE_ISYS_GPREG_SRST_CIO2AHB_BIT 16
+#define HIVE_ISYS_GPREG_SRST_GEN_SHORT_FIFO_BIT 17
+#define HIVE_ISYS_GPREG_SRST_WIDE_BUS_BIT 18 // includes CIO conv
+#define HIVE_ISYS_GPREG_SRST_DMA_BIT 19
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_A_BIT 20
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_B_BIT 21
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_C_BIT 22
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_ACQ_BIT 23
+#define HIVE_ISYS_GPREG_SRST_CSI_BE_OUT_BIT 24
+
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_A_BIT 0
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_B_BIT 1
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_C_BIT 2
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ACQ_BIT 3
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_DMA_BIT 4
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ISYS_CTRL_BIT 5
+
+/* streaming monitor port id's */
+#define HIVE_ISYS_STR_MON_PORT_CAPA 0
+#define HIVE_ISYS_STR_MON_PORT_CAPB 1
+#define HIVE_ISYS_STR_MON_PORT_CAPC 2
+#define HIVE_ISYS_STR_MON_PORT_ACQ 3
+#define HIVE_ISYS_STR_MON_PORT_CSS_GENSH 4
+#define HIVE_ISYS_STR_MON_PORT_SF_GENSH 5
+#define HIVE_ISYS_STR_MON_PORT_SP2ISYS 6
+#define HIVE_ISYS_STR_MON_PORT_ISYS2SP 7
+#define HIVE_ISYS_STR_MON_PORT_PIXA 8
+#define HIVE_ISYS_STR_MON_PORT_PIXB 9
+
+/* interrupt bit ID's */
+#define HIVE_ISYS_IRQ_CSI_SOF_BIT_ID 0
+#define HIVE_ISYS_IRQ_CSI_EOF_BIT_ID 1
+#define HIVE_ISYS_IRQ_CSI_SOL_BIT_ID 2
+#define HIVE_ISYS_IRQ_CSI_EOL_BIT_ID 3
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID 4
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID 5
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP 6
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP 7
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_A_UNDEF_PH 7*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP 8
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP 9
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_B_UNDEF_PH 10*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP 10
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP 11
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_C_UNDEF_PH 13*/
+#define HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH 12
+/*#define HIVE_ISYS_IRQ_ACQ_UNIT_UNDEF_PH 15*/
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPA 13
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPB 14
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPC 15
+#define HIVE_ISYS_IRQ_CIO2AHB 16
+#define HIVE_ISYS_IRQ_DMA_BIT_ID 17
+#define HIVE_ISYS_IRQ_STREAM_MON_BIT_ID 18
+#define HIVE_ISYS_IRQ_NUM_BITS 19
+
+/* DMA */
+#define HIVE_ISYS_DMA_CHANNEL 0
+#define HIVE_ISYS_DMA_IBUF_DDR_CONN 0
+#define HIVE_ISYS_DMA_HEIGHT 1
+#define HIVE_ISYS_DMA_ELEMS 1 /* both master buses of same width */
+#define HIVE_ISYS_DMA_STRIDE 0 /* no stride required as height is fixed to 1 */
+#define HIVE_ISYS_DMA_CROP 0 /* no cropping */
+#define HIVE_ISYS_DMA_EXTENSION 0 /* no extension as elem width is same on both side */
+
+#endif /* _input_system_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/input_system_global.h b/drivers/staging/media/atomisp/pci/input_system_global.h
new file mode 100644
index 000000000000..e75c2f29042d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_global.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#ifdef ISP2401
+# include "isp2401_input_system_global.h"
+#else
+# include "isp2400_input_system_global.h"
+#endif
diff --git a/drivers/staging/media/atomisp/pci/input_system_local.h b/drivers/staging/media/atomisp/pci/input_system_local.h
new file mode 100644
index 000000000000..8533a1e017e4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_local.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#ifdef ISP2401
+# include "isp2401_input_system_local.h"
+#else
+# include "isp2400_input_system_local.h"
+#endif
diff --git a/drivers/staging/media/atomisp/pci/input_system_private.h b/drivers/staging/media/atomisp/pci/input_system_private.h
new file mode 100644
index 000000000000..69c63a79a30c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_private.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#ifdef ISP2401
+# include "isp2401_input_system_private.h"
+#else
+# include "isp2400_input_system_private.h"
+#endif
diff --git a/drivers/staging/media/atomisp/pci/input_system_public.h b/drivers/staging/media/atomisp/pci/input_system_public.h
new file mode 100644
index 000000000000..17682c86bceb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_public.h
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#ifndef ISP2401
+# include "isp2400_input_system_public.h"
+#endif
diff --git a/drivers/staging/media/atomisp/pci/irq_controller_defs.h b/drivers/staging/media/atomisp/pci/irq_controller_defs.h
new file mode 100644
index 000000000000..efb3d7e135bd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/irq_controller_defs.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _irq_controller_defs_h
+#define _irq_controller_defs_h
+
+#define _HRT_IRQ_CONTROLLER_EDGE_REG_IDX 0
+#define _HRT_IRQ_CONTROLLER_MASK_REG_IDX 1
+#define _HRT_IRQ_CONTROLLER_STATUS_REG_IDX 2
+#define _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX 3
+#define _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX 4
+#define _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX 5
+#define _HRT_IRQ_CONTROLLER_STR_OUT_ENABLE_REG_IDX 6
+
+#define _HRT_IRQ_CONTROLLER_REG_ALIGN 4
+
+#endif /* _irq_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c
new file mode 100644
index 000000000000..9cdfe50b2835
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+
+#include "ia_css_aa2.host.h"
+
+/* YUV Anti-Aliasing configuration. */
+const struct ia_css_aa_config default_aa_config = {
+ 8191 /* default should be 0 */
+};
+
+/* Bayer Anti-Aliasing configuration. */
+const struct ia_css_aa_config default_baa_config = {
+ 8191 /* default should be 0 */
+};
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h
new file mode 100644
index 000000000000..71587d85ff2d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_AA_HOST_H
+#define __IA_CSS_AA_HOST_H
+
+#include "ia_css_aa2_types.h"
+#include "ia_css_aa2_param.h"
+
+/* YUV Anti-Aliasing configuration. */
+extern const struct ia_css_aa_config default_aa_config;
+
+/* Bayer Anti-Aliasing configuration. */
+extern const struct ia_css_aa_config default_baa_config;
+
+#endif /* __IA_CSS_AA_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h
new file mode 100644
index 000000000000..3c699bae2f55
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_AA_PARAM_H
+#define __IA_CSS_AA_PARAM_H
+
+#include "type_support.h"
+
+struct sh_css_isp_aa_params {
+ s32 strength;
+};
+
+#endif /* __IA_CSS_AA_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h
new file mode 100644
index 000000000000..cc6a444ac716
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_AA2_TYPES_H
+#define __IA_CSS_AA2_TYPES_H
+
+/* @file
+* CSS-API header file for Anti-Aliasing parameters.
+*/
+
+/* Anti-Aliasing configuration.
+ *
+ * This structure is used both for YUV AA and Bayer AA.
+ *
+ * 1. YUV Anti-Aliasing
+ * struct ia_css_aa_config *aa_config
+ *
+ * ISP block: AA2
+ * (ISP1: AA2 is not used.)
+ * ISP2: AA2 should be used. But, AA2 is not used currently.
+ *
+ * 2. Bayer Anti-Aliasing
+ * struct ia_css_aa_config *baa_config
+ *
+ * ISP block: BAA2
+ * ISP1: BAA2 is used.
+ * ISP2: BAA2 is used.
+ */
+struct ia_css_aa_config {
+ u16 strength; /** Strength of the filter.
+ u0.13, [0,8191],
+ default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_AA2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c
new file mode 100644
index 000000000000..c190483dc2b3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c
@@ -0,0 +1,61 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_anr.host.h"
+
+const struct ia_css_anr_config default_anr_config = {
+ 10,
+ {
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4,
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4,
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4,
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4
+ },
+ {10, 20, 30}
+};
+
+void
+ia_css_anr_encode(
+ struct sh_css_isp_anr_params *to,
+ const struct ia_css_anr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->threshold = from->threshold;
+}
+
+void
+ia_css_anr_dump(
+ const struct sh_css_isp_anr_params *anr,
+ unsigned int level)
+{
+ if (!anr) return;
+ ia_css_debug_dtrace(level, "Advance Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "anr_threshold", anr->threshold);
+}
+
+void
+ia_css_anr_debug_dtrace(
+ const struct ia_css_anr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d\n",
+ config->threshold);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h
new file mode 100644
index 000000000000..3855f54765e3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR_HOST_H
+#define __IA_CSS_ANR_HOST_H
+
+#include "ia_css_anr_types.h"
+#include "ia_css_anr_param.h"
+
+extern const struct ia_css_anr_config default_anr_config;
+
+void
+ia_css_anr_encode(
+ struct sh_css_isp_anr_params *to,
+ const struct ia_css_anr_config *from,
+ unsigned int size);
+
+void
+ia_css_anr_dump(
+ const struct sh_css_isp_anr_params *anr,
+ unsigned int level);
+
+void
+ia_css_anr_debug_dtrace(
+ const struct ia_css_anr_config *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_ANR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h
new file mode 100644
index 000000000000..6bf834cb47d9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR_PARAM_H
+#define __IA_CSS_ANR_PARAM_H
+
+#include "type_support.h"
+
+/* ANR (Advanced Noise Reduction) */
+struct sh_css_isp_anr_params {
+ s32 threshold;
+};
+
+#endif /* __IA_CSS_ANR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
new file mode 100644
index 000000000000..d3fa0193ae07
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR_TYPES_H
+#define __IA_CSS_ANR_TYPES_H
+
+/* @file
+* CSS-API header file for Advanced Noise Reduction kernel v1
+*/
+
+/* Application specific DMA settings */
+#define ANR_BPP 10
+#define ANR_ELEMENT_BITS ((CEIL_DIV(ANR_BPP, 8)) * 8)
+
+/* Advanced Noise Reduction configuration.
+ * This is also known as Low-Light.
+ */
+struct ia_css_anr_config {
+ s32 threshold; /** Threshold */
+ s32 thresholds[4 * 4 * 4];
+ s32 factors[3];
+};
+
+#endif /* __IA_CSS_ANR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c
new file mode 100644
index 000000000000..feee073b5099
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_anr2.host.h"
+
+void
+ia_css_anr2_vmem_encode(
+ struct ia_css_isp_anr2_params *to,
+ const struct ia_css_anr_thres *from,
+ size_t size)
+{
+ unsigned int i;
+
+ (void)size;
+ for (i = 0; i < ANR_PARAM_SIZE; i++) {
+ unsigned int j;
+
+ for (j = 0; j < ISP_VEC_NELEMS; j++) {
+ to->data[i][j] = from->data[i * ISP_VEC_NELEMS + j];
+ }
+ }
+}
+
+void
+ia_css_anr2_debug_dtrace(
+ const struct ia_css_anr_thres *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h
new file mode 100644
index 000000000000..e99108682f5d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR2_HOST_H
+#define __IA_CSS_ANR2_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_anr2_types.h"
+#include "ia_css_anr2_param.h"
+#include "ia_css_anr2_table.host.h"
+
+void
+ia_css_anr2_vmem_encode(
+ struct ia_css_isp_anr2_params *to,
+ const struct ia_css_anr_thres *from,
+ size_t size);
+
+void
+ia_css_anr2_debug_dtrace(
+ const struct ia_css_anr_thres *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_ANR2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h
new file mode 100644
index 000000000000..47a0fb08cfcc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR2_PARAM_H
+#define __IA_CSS_ANR2_PARAM_H
+
+#include "vmem.h"
+#include "ia_css_anr2_types.h"
+
+/* Advanced Noise Reduction (ANR) thresholds */
+
+struct ia_css_isp_anr2_params {
+ VMEM_ARRAY(data, ANR_PARAM_SIZE *ISP_VEC_NELEMS);
+};
+
+#endif /* __IA_CSS_ANR2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c
new file mode 100644
index 000000000000..070e90e3e2b5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c
@@ -0,0 +1,55 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+#include "ia_css_types.h"
+#include "ia_css_anr2_table.host.h"
+
+#if 1
+const struct ia_css_anr_thres default_anr_thres = {
+ {
+ 128, 384, 640, 896, 896, 640, 384, 128, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 128, 384, 640, 896, 896, 640, 384, 128,
+ 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20,
+ 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40,
+ 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60,
+ 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50,
+ 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100,
+ 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150,
+ 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30,
+ 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60,
+ 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90,
+ 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40,
+ 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80,
+ 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120
+ }
+};
+#else
+const struct ia_css_anr_thres default_anr_thres = {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ }
+};
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h
new file mode 100644
index 000000000000..534119e064c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR2_TABLE_HOST_H
+#define __IA_CSS_ANR2_TABLE_HOST_H
+
+#include "ia_css_anr2_types.h"
+
+extern const struct ia_css_anr_thres default_anr_thres;
+
+#endif /* __IA_CSS_ANR2_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h
new file mode 100644
index 000000000000..200df3829fc7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR2_TYPES_H
+#define __IA_CSS_ANR2_TYPES_H
+
+/* @file
+* CSS-API header file for Advanced Noise Reduction kernel v2
+*/
+
+#include "type_support.h"
+
+#define ANR_PARAM_SIZE 13
+
+/* Advanced Noise Reduction (ANR) thresholds */
+struct ia_css_anr_thres {
+ s16 data[13 * 64];
+};
+
+#endif /* __IA_CSS_ANR2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c
new file mode 100644
index 000000000000..6c7aa51ec079
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c
@@ -0,0 +1,66 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#if !defined(HAS_NO_HMEM)
+
+#include "memory_access.h"
+#include "ia_css_types.h"
+#include "sh_css_internal.h"
+#include "assert_support.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_bh.host.h"
+
+void
+ia_css_bh_hmem_decode(
+ struct ia_css_3a_rgby_output *out_ptr,
+ const struct ia_css_bh_table *hmem_buf)
+{
+ int i;
+
+ /*
+ * No weighted histogram, hence no grid definition
+ */
+ if (!hmem_buf)
+ return;
+ assert(sizeof_hmem(HMEM0_ID) == sizeof(*hmem_buf));
+
+ /* Deinterleave */
+ for (i = 0; i < HMEM_UNIT_SIZE; i++) {
+ out_ptr[i].r = hmem_buf->hmem[BH_COLOR_R][i];
+ out_ptr[i].g = hmem_buf->hmem[BH_COLOR_G][i];
+ out_ptr[i].b = hmem_buf->hmem[BH_COLOR_B][i];
+ out_ptr[i].y = hmem_buf->hmem[BH_COLOR_Y][i];
+ /* sh_css_print ("hmem[%d] = %d, %d, %d, %d\n",
+ i, out_ptr[i].r, out_ptr[i].g, out_ptr[i].b, out_ptr[i].y); */
+ }
+}
+
+void
+ia_css_bh_encode(
+ struct sh_css_isp_bh_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* coefficients to calculate Y */
+ to->y_coef_r =
+ uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_g =
+ uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_b =
+ uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT);
+}
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h
new file mode 100644
index 000000000000..ccd83169fe22
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BH_HOST_H
+#define __IA_CSS_BH_HOST_H
+
+#include "ia_css_bh_param.h"
+#include "s3a/s3a_1.0/ia_css_s3a_types.h"
+
+void
+ia_css_bh_hmem_decode(
+ struct ia_css_3a_rgby_output *out_ptr,
+ const struct ia_css_bh_table *hmem_buf);
+
+void
+ia_css_bh_encode(
+ struct sh_css_isp_bh_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size);
+
+#endif /* __IA_CSS_BH_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h
new file mode 100644
index 000000000000..692a855ba012
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_HB_PARAM_H
+#define __IA_CSS_HB_PARAM_H
+
+#include "type_support.h"
+
+#ifndef PIPE_GENERATION
+#define __INLINE_HMEM__
+#include "hmem.h"
+#endif
+
+#include "ia_css_bh_types.h"
+
+/* AE (3A Support) */
+struct sh_css_isp_bh_params {
+ /* coefficients to calculate Y */
+ s32 y_coef_r;
+ s32 y_coef_g;
+ s32 y_coef_b;
+};
+
+/* This should be hmem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_bh_hmem_params {
+ u32 bh[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
+};
+
+#endif /* __IA_CSS_HB_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h
new file mode 100644
index 000000000000..8b2a53a26b75
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BH_TYPES_H
+#define __IA_CSS_BH_TYPES_H
+
+/* Number of elements in the BH table.
+ * Should be consistent with hmem.h
+ */
+#define IA_CSS_HMEM_BH_TABLE_SIZE ISP_HIST_DEPTH
+#define IA_CSS_HMEM_BH_UNIT_SIZE (ISP_HIST_DEPTH / ISP_HIST_COMPONENTS)
+
+#define BH_COLOR_R (0)
+#define BH_COLOR_G (1)
+#define BH_COLOR_B (2)
+#define BH_COLOR_Y (3)
+#define BH_COLOR_NUM (4)
+
+/* BH table */
+struct ia_css_bh_table {
+ u32 hmem[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
+};
+
+#endif /* __IA_CSS_BH_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c
new file mode 100644
index 000000000000..6888a7363710
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c
@@ -0,0 +1,196 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "type_support.h"
+#include "ia_css_bnlm.host.h"
+
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h" /* ia_css_debug_dtrace() */
+#endif
+#include <assert_support.h>
+
+#define BNLM_DIV_LUT_SIZE (12)
+static const s32 div_lut_nearests[BNLM_DIV_LUT_SIZE] = {
+ 0, 454, 948, 1484, 2070, 2710, 3412, 4184, 5035, 5978, 7025, 8191
+};
+
+static const s32 div_lut_slopes[BNLM_DIV_LUT_SIZE] = {
+ -7760, -6960, -6216, -5536, -4912, -4344, -3832, -3360, -2936, -2552, -2208, -2208
+ };
+
+static const s32 div_lut_intercepts[BNLM_DIV_LUT_SIZE] = {
+ 8184, 7752, 7336, 6928, 6536, 6152, 5776, 5416, 5064, 4728, 4408, 4408
+};
+
+/* Encodes a look-up table from BNLM public parameters to vmem parameters.
+ * Input:
+ * lut : bnlm_lut struct containing encoded vmem parameters look-up table
+ * lut_thr : array containing threshold values for lut
+ * lut_val : array containing output values related to lut_thr
+ * lut_size: Size of lut_val array
+ */
+static inline void
+bnlm_lut_encode(struct bnlm_lut *lut, const int32_t *lut_thr,
+ const int32_t *lut_val, const uint32_t lut_size)
+{
+ u32 blk, i;
+ const u32 block_size = 16;
+ const u32 total_blocks = ISP_VEC_NELEMS / block_size;
+
+ /* Create VMEM LUTs from the threshold and value arrays.
+ *
+ * Min size of the LUT is 2 entries.
+ *
+ * Max size of the LUT is 16 entries, so that the LUT can fit into a
+ * single group of 16 elements inside a vector.
+ * Then these elements are copied into other groups inside the same
+ * vector. If the LUT size is less than 16, then remaining elements are
+ * set to 0.
+ */
+ assert((lut_size >= 2) && (lut_size <= block_size));
+ /* array lut_thr has (lut_size-1) entries */
+ for (i = 0; i < lut_size - 2; i++) {
+ /* Check if the lut_thr is monotonically increasing */
+ assert(lut_thr[i] <= lut_thr[i + 1]);
+ }
+
+ /* Initialize */
+ for (i = 0; i < total_blocks * block_size; i++) {
+ lut->thr[0][i] = 0;
+ lut->val[0][i] = 0;
+ }
+
+ /* Copy all data */
+ for (i = 0; i < lut_size - 1; i++) {
+ lut->thr[0][i] = lut_thr[i];
+ lut->val[0][i] = lut_val[i];
+ }
+ lut->val[0][i] = lut_val[i]; /* val has one more element than thr */
+
+ /* Copy data from first block to all blocks */
+ for (blk = 1; blk < total_blocks; blk++) {
+ u32 blk_offset = blk * block_size;
+
+ for (i = 1; i < lut_size; i++) {
+ lut->thr[0][blk_offset + i] = lut->thr[0][i];
+ lut->val[0][blk_offset + i] = lut->val[0][i];
+ }
+ }
+}
+
+/*
+ * - Encodes BNLM public parameters into VMEM parameters
+ * - Generates VMEM parameters which will needed internally ISP
+ */
+void
+ia_css_bnlm_vmem_encode(
+ struct bnlm_vmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size)
+{
+ int i;
+ (void)size;
+
+ /* Initialize LUTs in VMEM parameters */
+ bnlm_lut_encode(&to->mu_root_lut, from->mu_root_lut_thr, from->mu_root_lut_val,
+ 16);
+ bnlm_lut_encode(&to->sad_norm_lut, from->sad_norm_lut_thr,
+ from->sad_norm_lut_val, 16);
+ bnlm_lut_encode(&to->sig_detail_lut, from->sig_detail_lut_thr,
+ from->sig_detail_lut_val, 16);
+ bnlm_lut_encode(&to->sig_rad_lut, from->sig_rad_lut_thr, from->sig_rad_lut_val,
+ 16);
+ bnlm_lut_encode(&to->rad_pow_lut, from->rad_pow_lut_thr, from->rad_pow_lut_val,
+ 16);
+ bnlm_lut_encode(&to->nl_0_lut, from->nl_0_lut_thr, from->nl_0_lut_val, 16);
+ bnlm_lut_encode(&to->nl_1_lut, from->nl_1_lut_thr, from->nl_1_lut_val, 16);
+ bnlm_lut_encode(&to->nl_2_lut, from->nl_2_lut_thr, from->nl_2_lut_val, 16);
+ bnlm_lut_encode(&to->nl_3_lut, from->nl_3_lut_thr, from->nl_3_lut_val, 16);
+
+ /* Initialize arrays in VMEM parameters */
+ memset(to->nl_th, 0, sizeof(to->nl_th));
+ to->nl_th[0][0] = from->nl_th[0];
+ to->nl_th[0][1] = from->nl_th[1];
+ to->nl_th[0][2] = from->nl_th[2];
+
+ memset(to->match_quality_max_idx, 0, sizeof(to->match_quality_max_idx));
+ to->match_quality_max_idx[0][0] = from->match_quality_max_idx[0];
+ to->match_quality_max_idx[0][1] = from->match_quality_max_idx[1];
+ to->match_quality_max_idx[0][2] = from->match_quality_max_idx[2];
+ to->match_quality_max_idx[0][3] = from->match_quality_max_idx[3];
+
+ bnlm_lut_encode(&to->div_lut, div_lut_nearests, div_lut_slopes,
+ BNLM_DIV_LUT_SIZE);
+ memset(to->div_lut_intercepts, 0, sizeof(to->div_lut_intercepts));
+ for (i = 0; i < BNLM_DIV_LUT_SIZE; i++) {
+ to->div_lut_intercepts[0][i] = div_lut_intercepts[i];
+ }
+
+ memset(to->power_of_2, 0, sizeof(to->power_of_2));
+ for (i = 0; i < (ISP_VEC_ELEMBITS - 1); i++) {
+ to->power_of_2[0][i] = 1 << i;
+ }
+}
+
+/* - Encodes BNLM public parameters into DMEM parameters */
+void
+ia_css_bnlm_encode(
+ struct bnlm_dmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size)
+{
+ (void)size;
+ to->rad_enable = from->rad_enable;
+ to->rad_x_origin = from->rad_x_origin;
+ to->rad_y_origin = from->rad_y_origin;
+ to->avg_min_th = from->avg_min_th;
+ to->max_min_th = from->max_min_th;
+
+ to->exp_coeff_a = from->exp_coeff_a;
+ to->exp_coeff_b = from->exp_coeff_b;
+ to->exp_coeff_c = from->exp_coeff_c;
+ to->exp_exponent = from->exp_exponent;
+}
+
+/* Prints debug traces for BNLM public parameters */
+void
+ia_css_bnlm_debug_trace(
+ const struct ia_css_bnlm_config *config,
+ unsigned int level)
+{
+ if (!config)
+ return;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(level, "BNLM:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_enable", config->rad_enable);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_x_origin",
+ config->rad_x_origin);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_y_origin",
+ config->rad_y_origin);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "avg_min_th", config->avg_min_th);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "max_min_th", config->max_min_th);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_a",
+ config->exp_coeff_a);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_b",
+ config->exp_coeff_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_c",
+ config->exp_coeff_c);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_exponent",
+ config->exp_exponent);
+
+ /* ToDo: print traces for LUTs */
+#endif /* IA_CSS_NO_DEBUG */
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h
new file mode 100644
index 000000000000..a57933bfb974
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNLM_HOST_H
+#define __IA_CSS_BNLM_HOST_H
+
+#include "ia_css_bnlm_types.h"
+#include "ia_css_bnlm_param.h"
+
+void
+ia_css_bnlm_vmem_encode(
+ struct bnlm_vmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size);
+
+void
+ia_css_bnlm_encode(
+ struct bnlm_dmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_bnlm_debug_trace(
+ const struct ia_css_bnlm_config *config,
+ unsigned int level);
+#endif
+
+#endif /* __IA_CSS_BNLM_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h
new file mode 100644
index 000000000000..c7d5cadf5fd4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h
@@ -0,0 +1,64 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNLM_PARAM_H
+#define __IA_CSS_BNLM_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+struct bnlm_lut {
+ VMEM_ARRAY(thr, ISP_VEC_NELEMS); /* thresholds */
+ VMEM_ARRAY(val, ISP_VEC_NELEMS); /* values */
+};
+
+struct bnlm_vmem_params {
+ VMEM_ARRAY(nl_th, ISP_VEC_NELEMS);
+ VMEM_ARRAY(match_quality_max_idx, ISP_VEC_NELEMS);
+ struct bnlm_lut mu_root_lut;
+ struct bnlm_lut sad_norm_lut;
+ struct bnlm_lut sig_detail_lut;
+ struct bnlm_lut sig_rad_lut;
+ struct bnlm_lut rad_pow_lut;
+ struct bnlm_lut nl_0_lut;
+ struct bnlm_lut nl_1_lut;
+ struct bnlm_lut nl_2_lut;
+ struct bnlm_lut nl_3_lut;
+
+ /* LUTs used for division approximiation */
+ struct bnlm_lut div_lut;
+
+ VMEM_ARRAY(div_lut_intercepts, ISP_VEC_NELEMS);
+
+ /* 240x does not have an ISP instruction to left shift each element of a
+ * vector by different shift value. Hence it will be simulated by multiplying
+ * the elements by required 2^shift. */
+ VMEM_ARRAY(power_of_2, ISP_VEC_NELEMS);
+};
+
+/* BNLM ISP parameters */
+struct bnlm_dmem_params {
+ bool rad_enable;
+ s32 rad_x_origin;
+ s32 rad_y_origin;
+ s32 avg_min_th;
+ s32 max_min_th;
+
+ s32 exp_coeff_a;
+ u32 exp_coeff_b;
+ s32 exp_coeff_c;
+ u32 exp_exponent;
+};
+
+#endif /* __IA_CSS_BNLM_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h
new file mode 100644
index 000000000000..8dd1b1766c64
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h
@@ -0,0 +1,106 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNLM_TYPES_H
+#define __IA_CSS_BNLM_TYPES_H
+
+/* @file
+* CSS-API header file for Bayer Non-Linear Mean parameters.
+*/
+
+#include "type_support.h" /* int32_t */
+
+/* Bayer Non-Linear Mean configuration
+ *
+ * \brief BNLM public parameters.
+ * \details Struct with all parameters for the BNLM kernel that can be set
+ * from the CSS API.
+ *
+ * ISP2.6.1: BNLM is used.
+ */
+struct ia_css_bnlm_config {
+ bool rad_enable; /** Enable a radial dependency in a weight calculation */
+ s32 rad_x_origin; /** Initial x coordinate for a radius calculation */
+ s32 rad_y_origin; /** Initial x coordinate for a radius calculation */
+ /* a threshold for average of weights if this < Th, do not denoise pixel */
+ s32 avg_min_th;
+ /* minimum weight for denoising if max < th, do not denoise pixel */
+ s32 max_min_th;
+
+ /**@{*/
+ /* Coefficient for approximation, in the form of (1 + x / N)^N,
+ * that fits the first-order exp() to default exp_lut in BNLM sheet
+ * */
+ s32 exp_coeff_a;
+ u32 exp_coeff_b;
+ s32 exp_coeff_c;
+ u32 exp_exponent;
+ /**@}*/
+
+ s32 nl_th[3]; /** Detail thresholds */
+
+ /* Index for n-th maximum candidate weight for each detail group */
+ s32 match_quality_max_idx[4];
+
+ /**@{*/
+ /* A lookup table for 1/sqrt(1+mu) approximation */
+ s32 mu_root_lut_thr[15];
+ s32 mu_root_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* A lookup table for SAD normalization */
+ s32 sad_norm_lut_thr[15];
+ s32 sad_norm_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* A lookup table that models a weight's dependency on textures */
+ s32 sig_detail_lut_thr[15];
+ s32 sig_detail_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* A lookup table that models a weight's dependency on a pixel's radial distance */
+ s32 sig_rad_lut_thr[15];
+ s32 sig_rad_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* A lookup table to control denoise power depending on a pixel's radial distance */
+ s32 rad_pow_lut_thr[15];
+ s32 rad_pow_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* Non linear transfer functions to calculate the blending coefficient depending on detail group */
+ /* detail group 0 */
+ /**@{*/
+ s32 nl_0_lut_thr[15];
+ s32 nl_0_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* detail group 1 */
+ s32 nl_1_lut_thr[15];
+ s32 nl_1_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* detail group 2 */
+ s32 nl_2_lut_thr[15];
+ s32 nl_2_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* detail group 3 */
+ s32 nl_3_lut_thr[15];
+ s32 nl_3_lut_val[16];
+ /**@}*/
+ /**@}*/
+};
+
+#endif /* __IA_CSS_BNLM_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c
new file mode 100644
index 000000000000..a5e20596539d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c
@@ -0,0 +1,131 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "type_support.h"
+#include "ia_css_bnr2_2.host.h"
+
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h" /* ia_css_debug_dtrace() */
+#endif
+
+/* Default kernel parameters. */
+const struct ia_css_bnr2_2_config default_bnr2_2_config = {
+ 200,
+ 200,
+ 200,
+ 0,
+ 0,
+ 0,
+ 200,
+ 200,
+ 200,
+ 0,
+ 0,
+ 0,
+ 0,
+ 4096,
+ 8191,
+ 128,
+ 1,
+ 0,
+ 0,
+ 0,
+ 8191,
+ 0,
+ 8191
+};
+
+void
+ia_css_bnr2_2_encode(
+ struct sh_css_isp_bnr2_2_params *to,
+ const struct ia_css_bnr2_2_config *from,
+ size_t size)
+{
+ (void)size;
+ to->d_var_gain_r = from->d_var_gain_r;
+ to->d_var_gain_g = from->d_var_gain_g;
+ to->d_var_gain_b = from->d_var_gain_b;
+ to->d_var_gain_slope_r = from->d_var_gain_slope_r;
+ to->d_var_gain_slope_g = from->d_var_gain_slope_g;
+ to->d_var_gain_slope_b = from->d_var_gain_slope_b;
+
+ to->n_var_gain_r = from->n_var_gain_r;
+ to->n_var_gain_g = from->n_var_gain_g;
+ to->n_var_gain_b = from->n_var_gain_b;
+ to->n_var_gain_slope_r = from->n_var_gain_slope_r;
+ to->n_var_gain_slope_g = from->n_var_gain_slope_g;
+ to->n_var_gain_slope_b = from->n_var_gain_slope_b;
+
+ to->dir_thres = from->dir_thres;
+ to->dir_thres_w = from->dir_thres_w;
+ to->var_offset_coef = from->var_offset_coef;
+
+ to->dir_gain = from->dir_gain;
+ to->detail_gain = from->detail_gain;
+ to->detail_gain_divisor = from->detail_gain_divisor;
+ to->detail_level_offset = from->detail_level_offset;
+
+ to->d_var_th_min = from->d_var_th_min;
+ to->d_var_th_max = from->d_var_th_max;
+ to->n_var_th_min = from->n_var_th_min;
+ to->n_var_th_max = from->n_var_th_max;
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_bnr2_2_debug_dtrace(
+ const struct ia_css_bnr2_2_config *bnr,
+ unsigned int level)
+{
+ if (!bnr)
+ return;
+
+ ia_css_debug_dtrace(level, "Bayer Noise Reduction 2.2:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_r", bnr->d_var_gain_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_g", bnr->d_var_gain_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_b", bnr->d_var_gain_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_r",
+ bnr->d_var_gain_slope_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_g",
+ bnr->d_var_gain_slope_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_b",
+ bnr->d_var_gain_slope_b);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_r", bnr->n_var_gain_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_g", bnr->n_var_gain_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_b", bnr->n_var_gain_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_r",
+ bnr->n_var_gain_slope_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_g",
+ bnr->n_var_gain_slope_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_b",
+ bnr->n_var_gain_slope_b);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres", bnr->dir_thres);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres_w", bnr->dir_thres_w);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "var_offset_coef",
+ bnr->var_offset_coef);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_gain", bnr->dir_gain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain", bnr->detail_gain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain_divisor",
+ bnr->detail_gain_divisor);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_level_offset",
+ bnr->detail_level_offset);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_min", bnr->d_var_th_min);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_max", bnr->d_var_th_max);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_min", bnr->n_var_th_min);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_max", bnr->n_var_th_max);
+}
+#endif /* IA_CSS_NO_DEBUG */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h
new file mode 100644
index 000000000000..a021733dcdf7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef __IA_CSS_BNR2_2_HOST_H
+#define __IA_CSS_BNR2_2_HOST_H
+
+#include "ia_css_bnr2_2_types.h"
+#include "ia_css_bnr2_2_param.h"
+
+extern const struct ia_css_bnr2_2_config default_bnr2_2_config;
+
+void
+ia_css_bnr2_2_encode(
+ struct sh_css_isp_bnr2_2_params *to,
+ const struct ia_css_bnr2_2_config *from,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_bnr2_2_debug_dtrace(
+ const struct ia_css_bnr2_2_config *config,
+ unsigned int level);
+#endif
+
+#endif /* __IA_CSS_BNR2_2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h
new file mode 100644
index 000000000000..698fdc0b13fa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNR2_2_PARAM_H
+#define __IA_CSS_BNR2_2_PARAM_H
+
+#include "type_support.h"
+
+/* BNR (Bayer Noise Reduction) ISP parameters */
+struct sh_css_isp_bnr2_2_params {
+ s32 d_var_gain_r;
+ s32 d_var_gain_g;
+ s32 d_var_gain_b;
+ s32 d_var_gain_slope_r;
+ s32 d_var_gain_slope_g;
+ s32 d_var_gain_slope_b;
+ s32 n_var_gain_r;
+ s32 n_var_gain_g;
+ s32 n_var_gain_b;
+ s32 n_var_gain_slope_r;
+ s32 n_var_gain_slope_g;
+ s32 n_var_gain_slope_b;
+ s32 dir_thres;
+ s32 dir_thres_w;
+ s32 var_offset_coef;
+ s32 dir_gain;
+ s32 detail_gain;
+ s32 detail_gain_divisor;
+ s32 detail_level_offset;
+ s32 d_var_th_min;
+ s32 d_var_th_max;
+ s32 n_var_th_min;
+ s32 n_var_th_max;
+};
+
+#endif /* __IA_CSS_BNR2_2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
new file mode 100644
index 000000000000..ee9569891747
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
@@ -0,0 +1,71 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNR2_2_TYPES_H
+#define __IA_CSS_BNR2_2_TYPES_H
+
+/* @file
+* CSS-API header file for Bayer Noise Reduction parameters.
+*/
+
+#include "type_support.h" /* int32_t */
+
+/* Bayer Noise Reduction 2.2 configuration
+ *
+ * \brief BNR2_2 public parameters.
+ * \details Struct with all parameters for the BNR2.2 kernel that can be set
+ * from the CSS API.
+ *
+ * ISP2.6.1: BNR2.2 is used.
+ */
+struct ia_css_bnr2_2_config {
+ /**@{*/
+ /* Directional variance gain for R/G/B components in dark region */
+ s32 d_var_gain_r;
+ s32 d_var_gain_g;
+ s32 d_var_gain_b;
+ /**@}*/
+ /**@{*/
+ /* Slope of Directional variance gain between dark and bright region */
+ s32 d_var_gain_slope_r;
+ s32 d_var_gain_slope_g;
+ s32 d_var_gain_slope_b;
+ /**@}*/
+ /**@{*/
+ /* Non-Directional variance gain for R/G/B components in dark region */
+ s32 n_var_gain_r;
+ s32 n_var_gain_g;
+ s32 n_var_gain_b;
+ /**@}*/
+ /**@{*/
+ /* Slope of Non-Directional variance gain between dark and bright region */
+ s32 n_var_gain_slope_r;
+ s32 n_var_gain_slope_g;
+ s32 n_var_gain_slope_b;
+ /**@}*/
+
+ s32 dir_thres; /** Threshold for directional filtering */
+ s32 dir_thres_w; /** Threshold width for directional filtering */
+ s32 var_offset_coef; /** Variance offset coefficient */
+ s32 dir_gain; /** Gain for directional coefficient */
+ s32 detail_gain; /** Gain for low contrast texture control */
+ s32 detail_gain_divisor; /** Gain divisor for low contrast texture control */
+ s32 detail_level_offset; /** Bias value for low contrast texture control */
+ s32 d_var_th_min; /** Minimum clipping value for directional variance*/
+ s32 d_var_th_max; /** Maximum clipping value for diretional variance*/
+ s32 n_var_th_min; /** Minimum clipping value for non-directional variance*/
+ s32 n_var_th_max; /** Maximum clipping value for non-directional variance*/
+};
+
+#endif /* __IA_CSS_BNR2_2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
new file mode 100644
index 000000000000..5efb0ce7f323
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
@@ -0,0 +1,64 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_bnr.host.h"
+
+void
+ia_css_bnr_encode(
+ struct sh_css_isp_bnr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* BNR (Bayer Noise Reduction) */
+ to->threshold_low =
+ uDIGIT_FITTING(from->direction, 16, SH_CSS_BAYER_BITS);
+ to->threshold_width_log2 = uFRACTION_BITS_FITTING(8);
+ to->threshold_width =
+ 1 << to->threshold_width_log2;
+ to->gain_all =
+ uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT);
+ to->gain_dir =
+ uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT);
+ to->clip = uDIGIT_FITTING(16384U, 16, SH_CSS_BAYER_BITS);
+}
+
+void
+ia_css_bnr_dump(
+ const struct sh_css_isp_bnr_params *bnr,
+ unsigned int level)
+{
+ if (!bnr) return;
+ ia_css_debug_dtrace(level, "Bayer Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_gain_all", bnr->gain_all);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_gain_dir", bnr->gain_dir);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_threshold_low",
+ bnr->threshold_low);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_threshold_width_log2",
+ bnr->threshold_width_log2);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_threshold_width",
+ bnr->threshold_width);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_clip", bnr->clip);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h
new file mode 100644
index 000000000000..4c29b47b8177
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNR_HOST_H
+#define __IA_CSS_BNR_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ynr/ynr_1.0/ia_css_ynr_types.h"
+#include "ia_css_bnr_param.h"
+
+void
+ia_css_bnr_encode(
+ struct sh_css_isp_bnr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned int size);
+
+void
+ia_css_bnr_dump(
+ const struct sh_css_isp_bnr_params *bnr,
+ unsigned int level);
+
+#endif /* __IA_CSS_DP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h
new file mode 100644
index 000000000000..52f21ce8f4d2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNR_PARAM_H
+#define __IA_CSS_BNR_PARAM_H
+
+#include "type_support.h"
+
+/* BNR (Bayer Noise Reduction) */
+struct sh_css_isp_bnr_params {
+ s32 gain_all;
+ s32 gain_dir;
+ s32 threshold_low;
+ s32 threshold_width_log2;
+ s32 threshold_width;
+ s32 clip;
+};
+
+#endif /* __IA_CSS_BNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c
new file mode 100644
index 000000000000..c50afa6bf8a6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_cnr.host.h"
+
+/* keep the interface here, it is not enabled yet because host doesn't know the size of individual state */
+void
+ia_css_init_cnr_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h
new file mode 100644
index 000000000000..87250ca5842c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR_HOST_H
+#define __IA_CSS_CNR_HOST_H
+
+#include "ia_css_cnr_param.h"
+
+void
+ia_css_init_cnr_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ * state,
+ size_t size);
+
+#endif /* __IA_CSS_CNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h
new file mode 100644
index 000000000000..c1af207cbf9a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR_PARAM_H
+#define __IA_CSS_CNR_PARAM_H
+
+#include "type_support.h"
+
+/* CNR (Chroma Noise Reduction) */
+/* Reuse YNR1 param structure */
+#include "../../ynr/ynr_1.0/ia_css_ynr_param.h"
+
+#endif /* __IA_CSS_CNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c
new file mode 100644
index 000000000000..610871d213bb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c
@@ -0,0 +1,73 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_cnr2.host.h"
+
+const struct ia_css_cnr_config default_cnr_config = {
+ 0,
+ 0,
+ 100,
+ 100,
+ 100,
+ 50,
+ 50,
+ 50
+};
+
+void
+ia_css_cnr_encode(
+ struct sh_css_isp_cnr_params *to,
+ const struct ia_css_cnr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->coring_u = from->coring_u;
+ to->coring_v = from->coring_v;
+ to->sense_gain_vy = from->sense_gain_vy;
+ to->sense_gain_vu = from->sense_gain_vu;
+ to->sense_gain_vv = from->sense_gain_vv;
+ to->sense_gain_hy = from->sense_gain_hy;
+ to->sense_gain_hu = from->sense_gain_hu;
+ to->sense_gain_hv = from->sense_gain_hv;
+}
+
+void
+ia_css_cnr_dump(
+ const struct sh_css_isp_cnr_params *cnr,
+ unsigned int level);
+
+void
+ia_css_cnr_debug_dtrace(
+ const struct ia_css_cnr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.coring_u=%d, config.coring_v=%d, config.sense_gain_vy=%d, config.sense_gain_hy=%d, config.sense_gain_vu=%d, config.sense_gain_hu=%d, config.sense_gain_vv=%d, config.sense_gain_hv=%d\n",
+ config->coring_u, config->coring_v,
+ config->sense_gain_vy, config->sense_gain_hy,
+ config->sense_gain_vu, config->sense_gain_hu,
+ config->sense_gain_vv, config->sense_gain_hv);
+}
+
+void
+ia_css_init_cnr2_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h
new file mode 100644
index 000000000000..d322359feedf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR2_HOST_H
+#define __IA_CSS_CNR2_HOST_H
+
+#include "ia_css_cnr2_types.h"
+#include "ia_css_cnr2_param.h"
+
+extern const struct ia_css_cnr_config default_cnr_config;
+
+void
+ia_css_cnr_encode(
+ struct sh_css_isp_cnr_params *to,
+ const struct ia_css_cnr_config *from,
+ unsigned int size);
+
+void
+ia_css_cnr_dump(
+ const struct sh_css_isp_cnr_params *cnr,
+ unsigned int level);
+
+void
+ia_css_cnr_debug_dtrace(
+ const struct ia_css_cnr_config *config,
+ unsigned int level);
+
+void
+ia_css_init_cnr2_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ * state,
+ size_t size);
+#endif /* __IA_CSS_CNR2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h
new file mode 100644
index 000000000000..0d2fb2897720
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR2_PARAM_H
+#define __IA_CSS_CNR2_PARAM_H
+
+#include "type_support.h"
+
+/* CNR (Chroma Noise Reduction) */
+struct sh_css_isp_cnr_params {
+ s32 coring_u;
+ s32 coring_v;
+ s32 sense_gain_vy;
+ s32 sense_gain_vu;
+ s32 sense_gain_vv;
+ s32 sense_gain_hy;
+ s32 sense_gain_hu;
+ s32 sense_gain_hv;
+};
+
+#endif /* __IA_CSS_CNR2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
new file mode 100644
index 000000000000..35fc2e77eb3d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR2_TYPES_H
+#define __IA_CSS_CNR2_TYPES_H
+
+/* @file
+* CSS-API header file for Chroma Noise Reduction (CNR) parameters
+*/
+
+/* Chroma Noise Reduction configuration.
+ *
+ * Small sensitivity of edge means strong smoothness and NR performance.
+ * If you see blurred color on vertical edges,
+ * set higher values on sense_gain_h*.
+ * If you see blurred color on horizontal edges,
+ * set higher values on sense_gain_v*.
+ *
+ * ISP block: CNR2
+ * (ISP1: CNR1 is used.)
+ * (ISP2: CNR1 is used for Preview/Video.)
+ * ISP2: CNR2 is used for Still.
+ */
+struct ia_css_cnr_config {
+ u16 coring_u; /** Coring level of U.
+ u0.13, [0,8191], default/ineffective 0 */
+ u16 coring_v; /** Coring level of V.
+ u0.13, [0,8191], default/ineffective 0 */
+ u16 sense_gain_vy; /** Sensitivity of horizontal edge of Y.
+ u13.0, [0,8191], default 100, ineffective 8191 */
+ u16 sense_gain_vu; /** Sensitivity of horizontal edge of U.
+ u13.0, [0,8191], default 100, ineffective 8191 */
+ u16 sense_gain_vv; /** Sensitivity of horizontal edge of V.
+ u13.0, [0,8191], default 100, ineffective 8191 */
+ u16 sense_gain_hy; /** Sensitivity of vertical edge of Y.
+ u13.0, [0,8191], default 50, ineffective 8191 */
+ u16 sense_gain_hu; /** Sensitivity of vertical edge of U.
+ u13.0, [0,8191], default 50, ineffective 8191 */
+ u16 sense_gain_hv; /** Sensitivity of vertical edge of V.
+ u13.0, [0,8191], default 50, ineffective 8191 */
+};
+
+#endif /* __IA_CSS_CNR2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c
new file mode 100644
index 000000000000..e64e26089a4d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "ia_css_conversion.host.h"
+
+const struct ia_css_conversion_config default_conversion_config = {
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+void
+ia_css_conversion_encode(
+ struct sh_css_isp_conversion_params *to,
+ const struct ia_css_conversion_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->en = from->en;
+ to->dummy0 = from->dummy0;
+ to->dummy1 = from->dummy1;
+ to->dummy2 = from->dummy2;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h
new file mode 100644
index 000000000000..c136d5e03511
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h
@@ -0,0 +1,29 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CONVERSION_HOST_H
+#define __IA_CSS_CONVERSION_HOST_H
+
+#include "ia_css_conversion_types.h"
+#include "ia_css_conversion_param.h"
+
+extern const struct ia_css_conversion_config default_conversion_config;
+
+void
+ia_css_conversion_encode(
+ struct sh_css_isp_conversion_params *to,
+ const struct ia_css_conversion_config *from,
+ unsigned int size);
+
+#endif /* __IA_CSS_CONVERSION_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h
new file mode 100644
index 000000000000..3a6ede394bdc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CONVERSION_PARAM_H
+#define __IA_CSS_CONVERSION_PARAM_H
+
+#include "type_support.h"
+
+/* CONVERSION */
+struct sh_css_isp_conversion_params {
+ u32 en;
+ u32 dummy0;
+ u32 dummy1;
+ u32 dummy2;
+};
+
+#endif /* __IA_CSS_CONVERSION_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
new file mode 100644
index 000000000000..79a626fe3a29
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CONVERSION_TYPES_H
+#define __IA_CSS_CONVERSION_TYPES_H
+
+/**
+ * Conversion Kernel parameters.
+ * Deinterleave bayer quad into isys format
+ *
+ * ISP block: CONVERSION
+ *
+ */
+struct ia_css_conversion_config {
+ u32 en; /** en parameter */
+ u32 dummy0; /** dummy0 dummy parameter 0 */
+ u32 dummy1; /** dummy1 dummy parameter 1 */
+ u32 dummy2; /** dummy2 dummy parameter 2 */
+};
+
+#endif /* __IA_CSS_CONVERSION_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
new file mode 100644
index 000000000000..6e29b7eeb3ed
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_copy_output.host.h"
+#include "ia_css_binary.h"
+#include "type_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+static const struct ia_css_copy_output_configuration default_config = {
+ .enable = false,
+};
+
+void
+ia_css_copy_output_config(
+ struct sh_css_isp_copy_output_isp_config *to,
+ const struct ia_css_copy_output_configuration *from,
+ unsigned int size)
+{
+ (void)size;
+ to->enable = from->enable;
+}
+
+void
+ia_css_copy_output_configure(
+ const struct ia_css_binary *binary,
+ bool enable)
+{
+ struct ia_css_copy_output_configuration config = default_config;
+
+ config.enable = enable;
+
+ ia_css_configure_copy_output(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h
new file mode 100644
index 000000000000..6f42abdec9bb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_COPY_OUTPUT_HOST_H
+#define __IA_CSS_COPY_OUTPUT_HOST_H
+
+#include "type_support.h"
+#include "ia_css_binary.h"
+
+#include "ia_css_copy_output_param.h"
+
+void
+ia_css_copy_output_config(
+ struct sh_css_isp_copy_output_isp_config *to,
+ const struct ia_css_copy_output_configuration *from,
+ unsigned int size);
+
+void
+ia_css_copy_output_configure(
+ const struct ia_css_binary *binary,
+ bool enable);
+
+#endif /* __IA_CSS_COPY_OUTPUT_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h
new file mode 100644
index 000000000000..587d0c62c936
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_COPY_PARAM_H
+#define __IA_CSS_COPY_PARAM_H
+
+struct ia_css_copy_output_configuration {
+ bool enable;
+};
+
+struct sh_css_isp_copy_output_isp_config {
+ u32 enable;
+};
+
+#endif /* __IA_CSS_COPY_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
new file mode 100644
index 000000000000..c6a3bd4fbf80
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
@@ -0,0 +1,64 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <assert_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_frame.h>
+#include <ia_css_binary.h>
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+#include "ia_css_crop.host.h"
+
+static const struct ia_css_crop_configuration default_config = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_crop_encode(
+ struct sh_css_isp_crop_isp_params *to,
+ const struct ia_css_crop_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->crop_pos = from->crop_pos;
+}
+
+void
+ia_css_crop_config(
+ struct sh_css_isp_crop_isp_config *to,
+ const struct ia_css_crop_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, from->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert(elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_crop_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_crop_configuration config = default_config;
+
+ config.info = info;
+
+ ia_css_configure_crop(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h
new file mode 100644
index 000000000000..2e451a872d2a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CROP_HOST_H
+#define __IA_CSS_CROP_HOST_H
+
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+
+#include "ia_css_crop_types.h"
+#include "ia_css_crop_param.h"
+
+void
+ia_css_crop_encode(
+ struct sh_css_isp_crop_isp_params *to,
+ const struct ia_css_crop_config *from,
+ unsigned int size);
+
+void
+ia_css_crop_config(
+ struct sh_css_isp_crop_isp_config *to,
+ const struct ia_css_crop_configuration *from,
+ unsigned int size);
+
+void
+ia_css_crop_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+#endif /* __IA_CSS_CROP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
new file mode 100644
index 000000000000..35835929d252
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CROP_PARAM_H
+#define __IA_CSS_CROP_PARAM_H
+
+#include <type_support.h>
+#include "dma.h"
+#include "sh_css_internal.h" /* sh_css_crop_pos */
+
+/* Crop frame */
+struct sh_css_isp_crop_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+};
+
+struct sh_css_isp_crop_isp_params {
+ struct sh_css_crop_pos crop_pos;
+};
+
+#endif /* __IA_CSS_CROP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
new file mode 100644
index 000000000000..5c166be6c5e8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CROP_TYPES_H
+#define __IA_CSS_CROP_TYPES_H
+
+/* Crop frame
+ *
+ * ISP block: crop frame
+ */
+
+#include <ia_css_frame_public.h>
+#include "sh_css_uds.h" /* sh_css_crop_pos */
+
+struct ia_css_crop_config {
+ struct sh_css_crop_pos crop_pos;
+};
+
+struct ia_css_crop_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_CROP_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c
new file mode 100644
index 000000000000..ea81e1d3e445
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c
@@ -0,0 +1,127 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+
+#include "ia_css_csc.host.h"
+
+const struct ia_css_cc_config default_cc_config = {
+ 8,
+ {255, 29, 120, 0, -374, -342, 0, -672, 301},
+};
+
+void
+ia_css_encode_cc(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size)
+{
+ (void)size;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() enter:\n");
+#endif
+
+ to->m_shift = (int16_t)from->fraction_bits;
+ to->m00 = (int16_t)from->matrix[0];
+ to->m01 = (int16_t)from->matrix[1];
+ to->m02 = (int16_t)from->matrix[2];
+ to->m10 = (int16_t)from->matrix[3];
+ to->m11 = (int16_t)from->matrix[4];
+ to->m12 = (int16_t)from->matrix[5];
+ to->m20 = (int16_t)from->matrix[6];
+ to->m21 = (int16_t)from->matrix[7];
+ to->m22 = (int16_t)from->matrix[8];
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() leave:\n");
+#endif
+}
+
+void
+ia_css_csc_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size)
+{
+ ia_css_encode_cc(to, from, size);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_cc_dump(
+ const struct sh_css_isp_csc_params *csc,
+ unsigned int level,
+ const char *name)
+{
+ if (!csc) return;
+ ia_css_debug_dtrace(level, "%s\n", name);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m_shift",
+ csc->m_shift);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m00",
+ csc->m00);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m01",
+ csc->m01);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m02",
+ csc->m02);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m10",
+ csc->m10);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m11",
+ csc->m11);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m12",
+ csc->m12);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m20",
+ csc->m20);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m21",
+ csc->m21);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m22",
+ csc->m22);
+}
+
+void
+ia_css_csc_dump(
+ const struct sh_css_isp_csc_params *csc,
+ unsigned int level)
+{
+ ia_css_cc_dump(csc, level, "Color Space Conversion");
+}
+
+void
+ia_css_cc_config_debug_dtrace(
+ const struct ia_css_cc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.m[0]=%d, config.m[1]=%d, config.m[2]=%d, config.m[3]=%d, config.m[4]=%d, config.m[5]=%d, config.m[6]=%d, config.m[7]=%d, config.m[8]=%d\n",
+ config->matrix[0],
+ config->matrix[1], config->matrix[2],
+ config->matrix[3], config->matrix[4],
+ config->matrix[5], config->matrix[6],
+ config->matrix[7], config->matrix[8]);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h
new file mode 100644
index 000000000000..347ccd864577
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CSC_HOST_H
+#define __IA_CSS_CSC_HOST_H
+
+#include "ia_css_csc_types.h"
+#include "ia_css_csc_param.h"
+
+extern const struct ia_css_cc_config default_cc_config;
+
+void
+ia_css_encode_cc(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size);
+
+void
+ia_css_csc_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_cc_dump(
+ const struct sh_css_isp_csc_params *csc, unsigned int level,
+ const char *name);
+
+void
+ia_css_csc_dump(
+ const struct sh_css_isp_csc_params *csc,
+ unsigned int level);
+
+void
+ia_css_cc_config_debug_dtrace(
+ const struct ia_css_cc_config *config,
+ unsigned int level);
+
+#define ia_css_csc_debug_dtrace ia_css_cc_config_debug_dtrace
+#endif
+
+#endif /* __IA_CSS_CSC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h
new file mode 100644
index 000000000000..53e270df2db7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CSC_PARAM_H
+#define __IA_CSS_CSC_PARAM_H
+
+#include "type_support.h"
+/* CSC (Color Space Conversion) */
+struct sh_css_isp_csc_params {
+ u16 m_shift;
+ s16 m00;
+ s16 m01;
+ s16 m02;
+ s16 m10;
+ s16 m11;
+ s16 m12;
+ s16 m20;
+ s16 m21;
+ s16 m22;
+};
+
+#endif /* __IA_CSS_CSC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
new file mode 100644
index 000000000000..d49203d322bd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
@@ -0,0 +1,78 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CSC_TYPES_H
+#define __IA_CSS_CSC_TYPES_H
+
+/* @file
+* CSS-API header file for Color Space Conversion parameters.
+*/
+
+/* Color Correction configuration.
+ *
+ * This structure is used for 3 cases.
+ * ("YCgCo" is the output format of Demosaic.)
+ *
+ * 1. Color Space Conversion (YCgCo to YUV) for ISP1.
+ * ISP block: CSC1 (Color Space Conversion)
+ * struct ia_css_cc_config *cc_config
+ *
+ * 2. Color Correction Matrix (YCgCo to RGB) for ISP2.
+ * ISP block: CCM2 (Color Correction Matrix)
+ * struct ia_css_cc_config *yuv2rgb_cc_config
+ *
+ * 3. Color Space Conversion (RGB to YUV) for ISP2.
+ * ISP block: CSC2 (Color Space Conversion)
+ * struct ia_css_cc_config *rgb2yuv_cc_config
+ *
+ * default/ineffective:
+ * 1. YCgCo -> YUV
+ * 1 0.174 0.185
+ * 0 -0.66252 -0.66874
+ * 0 -0.83738 0.58131
+ *
+ * fraction_bits = 12
+ * 4096 713 758
+ * 0 -2714 -2739
+ * 0 -3430 2381
+ *
+ * 2. YCgCo -> RGB
+ * 1 -1 1
+ * 1 1 0
+ * 1 -1 -1
+ *
+ * fraction_bits = 12
+ * 4096 -4096 4096
+ * 4096 4096 0
+ * 4096 -4096 -4096
+ *
+ * 3. RGB -> YUV
+ * 0.299 0.587 0.114
+ * -0.16874 -0.33126 0.5
+ * 0.5 -0.41869 -0.08131
+ *
+ * fraction_bits = 13
+ * 2449 4809 934
+ * -1382 -2714 4096
+ * 4096 -3430 -666
+ */
+struct ia_css_cc_config {
+ u32 fraction_bits;/** Fractional bits of matrix.
+ u8.0, [0,13] */
+ s32 matrix[3 * 3]; /** Conversion matrix.
+ s[13-fraction_bits].[fraction_bits],
+ [-8192,8191] */
+};
+
+#endif /* __IA_CSS_CSC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c
new file mode 100644
index 000000000000..e80f42ab0e6a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c
@@ -0,0 +1,121 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "ia_css_ctc1_5.host.h"
+
+static void ctc_gradient(
+ int *dydx, int *shift,
+ int y1, int y0, int x1, int x0)
+{
+ int frc_bits = max(IA_CSS_CTC_COEF_SHIFT, 16);
+ int dy = y1 - y0;
+ int dx = x1 - x0;
+ int dydx_int;
+ int dydx_frc;
+ int sft;
+ /* max_dydx = the maxinum gradient = the maximum y (gain) */
+ int max_dydx = (1 << IA_CSS_CTC_COEF_SHIFT) - 1;
+
+ if (dx == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ctc_gradient() error, illegal division operation\n");
+ return;
+ } else {
+ dydx_int = dy / dx;
+ dydx_frc = ((dy - dydx_int * dx) << frc_bits) / dx;
+ }
+
+ assert(y0 >= 0 && y0 <= max_dydx);
+ assert(y1 >= 0 && y1 <= max_dydx);
+ assert(x0 < x1);
+ assert(dydx);
+ assert(shift);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() enter:\n");
+
+ /* search "sft" which meets this condition:
+ (1 << (IA_CSS_CTC_COEF_SHIFT - 1))
+ <= (((float)dy / (float)dx) * (1 << sft))
+ <= ((1 << IA_CSS_CTC_COEF_SHIFT) - 1) */
+ for (sft = 0; sft <= IA_CSS_CTC_COEF_SHIFT; sft++) {
+ int tmp_dydx = (dydx_int << sft)
+ + (dydx_frc >> (frc_bits - sft));
+ if (tmp_dydx <= max_dydx) {
+ *dydx = tmp_dydx;
+ *shift = sft;
+ }
+ if (tmp_dydx >= max_dydx)
+ break;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() leave:\n");
+}
+
+void
+ia_css_ctc_encode(
+ struct sh_css_isp_ctc_params *to,
+ const struct ia_css_ctc_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->y0 = from->y0;
+ to->y1 = from->y1;
+ to->y2 = from->y2;
+ to->y3 = from->y3;
+ to->y4 = from->y4;
+ to->y5 = from->y5;
+
+ to->ce_gain_exp = from->ce_gain_exp;
+
+ to->x1 = from->x1;
+ to->x2 = from->x2;
+ to->x3 = from->x3;
+ to->x4 = from->x4;
+
+ ctc_gradient(&to->dydx0,
+ &to->dydx0_shift,
+ from->y1, from->y0,
+ from->x1, 0);
+
+ ctc_gradient(&to->dydx1,
+ &to->dydx1_shift,
+ from->y2, from->y1,
+ from->x2, from->x1);
+
+ ctc_gradient(&to->dydx2,
+ &to->dydx2_shift,
+ from->y3, from->y2,
+ from->x3, from->x2);
+
+ ctc_gradient(&to->dydx3,
+ &to->dydx3_shift,
+ from->y4, from->y3,
+ from->x4, from->x3);
+
+ ctc_gradient(&to->dydx4,
+ &to->dydx4_shift,
+ from->y5, from->y4,
+ SH_CSS_BAYER_MAXVAL, from->x4);
+}
+
+void
+ia_css_ctc_dump(
+ const struct sh_css_isp_ctc_params *ctc,
+ unsigned int level);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h
new file mode 100644
index 000000000000..f3c40a49f7c0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC1_5_HOST_H
+#define __IA_CSS_CTC1_5_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_ctc1_5_param.h"
+
+void
+ia_css_ctc_encode(
+ struct sh_css_isp_ctc_params *to,
+ const struct ia_css_ctc_config *from,
+ unsigned int size);
+
+void
+ia_css_ctc_dump(
+ const struct sh_css_isp_ctc_params *ctc,
+ unsigned int level);
+
+#endif /* __IA_CSS_CTC1_5_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h
new file mode 100644
index 000000000000..95cf34ef4ed2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC1_5_PARAM_H
+#define __IA_CSS_CTC1_5_PARAM_H
+
+#include "type_support.h"
+#include "ctc/ctc_1.0/ia_css_ctc_param.h" /* vamem params */
+
+/* CTC (Color Tone Control) */
+struct sh_css_isp_ctc_params {
+ s32 y0;
+ s32 y1;
+ s32 y2;
+ s32 y3;
+ s32 y4;
+ s32 y5;
+ s32 ce_gain_exp;
+ s32 x1;
+ s32 x2;
+ s32 x3;
+ s32 x4;
+ s32 dydx0;
+ s32 dydx0_shift;
+ s32 dydx1;
+ s32 dydx1_shift;
+ s32 dydx2;
+ s32 dydx2_shift;
+ s32 dydx3;
+ s32 dydx3_shift;
+ s32 dydx4;
+ s32 dydx4_shift;
+};
+
+#endif /* __IA_CSS_CTC1_5_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c
new file mode 100644
index 000000000000..b247dc6bec6a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c
@@ -0,0 +1,157 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "assert_support.h"
+
+#include "ia_css_ctc2.host.h"
+
+#define INEFFECTIVE_VAL 4096
+#define BASIC_VAL 819
+
+/*Default configuration of parameters for Ctc2*/
+const struct ia_css_ctc2_config default_ctc2_config = {
+ INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL,
+ INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL,
+ BASIC_VAL * 2, BASIC_VAL * 4, BASIC_VAL * 6,
+ BASIC_VAL * 8, INEFFECTIVE_VAL, INEFFECTIVE_VAL,
+ BASIC_VAL >> 1, BASIC_VAL
+};
+
+/* (dydx) = ctc2_slope(y1, y0, x1, x0)
+ * -----------------------------------------------
+ * Calculation of the Slope of a Line = ((y1 - y0) >> 8)/(x1 - x0)
+ *
+ * Note: y1, y0 , x1 & x0 must lie within the range 0 <-> 8191
+ */
+static int ctc2_slope(int y1, int y0, int x1, int x0)
+{
+ const int shift_val = 8;
+ const int max_slope = (1 << IA_CSS_CTC_COEF_SHIFT) - 1;
+ int dy = y1 - y0;
+ int dx = x1 - x0;
+ int rounding = (dx + 1) >> 1;
+ int dy_shift = dy << shift_val;
+ int slope, dydx;
+
+ /*Protection for parameter values, & avoiding zero divisions*/
+ assert(y0 >= 0 && y0 <= max_slope);
+ assert(y1 >= 0 && y1 <= max_slope);
+ assert(x0 >= 0 && x0 <= max_slope);
+ assert(x1 > 0 && x1 <= max_slope);
+ assert(dx > 0);
+
+ if (dy < 0)
+ rounding = -rounding;
+ slope = (int)(dy_shift + rounding) / dx;
+
+ /*the slope must lie within the range
+ (-max_slope-1) >= (dydx) >= (max_slope)
+ */
+ if (slope <= -max_slope - 1) {
+ dydx = -max_slope - 1;
+ } else if (slope >= max_slope) {
+ dydx = max_slope;
+ } else {
+ dydx = slope;
+ }
+
+ return dydx;
+}
+
+/* (void) = ia_css_ctc2_vmem_encode(*to, *from)
+ * -----------------------------------------------
+ * VMEM Encode Function to translate Y parameters from userspace into ISP space
+ */
+void ia_css_ctc2_vmem_encode(struct ia_css_isp_ctc2_vmem_params *to,
+ const struct ia_css_ctc2_config *from,
+ size_t size)
+{
+ unsigned int i, j;
+ const unsigned int shffl_blck = 4;
+ const unsigned int length_zeros = 11;
+ short dydx0, dydx1, dydx2, dydx3, dydx4;
+
+ (void)size;
+ /*
+ * Calculation of slopes of lines interconnecting
+ * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0
+ */
+ dydx0 = ctc2_slope(from->y_y1, from->y_y0,
+ from->y_x1, 0);
+ dydx1 = ctc2_slope(from->y_y2, from->y_y1,
+ from->y_x2, from->y_x1);
+ dydx2 = ctc2_slope(from->y_y3, from->y_y2,
+ from->y_x3, from->y_x2);
+ dydx3 = ctc2_slope(from->y_y4, from->y_y3,
+ from->y_x4, from->y_x3);
+ dydx4 = ctc2_slope(from->y_y5, from->y_y4,
+ SH_CSS_BAYER_MAXVAL, from->y_x4);
+
+ /*Fill 3 arrays with:
+ * - Luma input gain values y_y0, y_y1, y_y2, y_3, y_y4
+ * - Luma kneepoints 0, y_x1, y_x2, y_x3, y_x4
+ * - Calculated slopes dydx0, dyxd1, dydx2, dydx3, dydx4
+ *
+ * - Each 64-element array is divided in blocks of 16 elements:
+ * the 5 parameters + zeros in the remaining 11 positions
+ * - All blocks of the same array will contain the same data
+ */
+ for (i = 0; i < shffl_blck; i++) {
+ to->y_x[0][(i << shffl_blck)] = 0;
+ to->y_x[0][(i << shffl_blck) + 1] = from->y_x1;
+ to->y_x[0][(i << shffl_blck) + 2] = from->y_x2;
+ to->y_x[0][(i << shffl_blck) + 3] = from->y_x3;
+ to->y_x[0][(i << shffl_blck) + 4] = from->y_x4;
+
+ to->y_y[0][(i << shffl_blck)] = from->y_y0;
+ to->y_y[0][(i << shffl_blck) + 1] = from->y_y1;
+ to->y_y[0][(i << shffl_blck) + 2] = from->y_y2;
+ to->y_y[0][(i << shffl_blck) + 3] = from->y_y3;
+ to->y_y[0][(i << shffl_blck) + 4] = from->y_y4;
+
+ to->e_y_slope[0][(i << shffl_blck)] = dydx0;
+ to->e_y_slope[0][(i << shffl_blck) + 1] = dydx1;
+ to->e_y_slope[0][(i << shffl_blck) + 2] = dydx2;
+ to->e_y_slope[0][(i << shffl_blck) + 3] = dydx3;
+ to->e_y_slope[0][(i << shffl_blck) + 4] = dydx4;
+
+ for (j = 0; j < length_zeros; j++) {
+ to->y_x[0][(i << shffl_blck) + 5 + j] = 0;
+ to->y_y[0][(i << shffl_blck) + 5 + j] = 0;
+ to->e_y_slope[0][(i << shffl_blck) + 5 + j] = 0;
+ }
+ }
+}
+
+/* (void) = ia_css_ctc2_encode(*to, *from)
+ * -----------------------------------------------
+ * DMEM Encode Function to translate UV parameters from userspace into ISP space
+ */
+void ia_css_ctc2_encode(struct ia_css_isp_ctc2_dmem_params *to,
+ struct ia_css_ctc2_config *from,
+ size_t size)
+{
+ (void)size;
+
+ to->uv_y0 = from->uv_y0;
+ to->uv_y1 = from->uv_y1;
+ to->uv_x0 = from->uv_x0;
+ to->uv_x1 = from->uv_x1;
+
+ /*Slope Calculation*/
+ to->uv_dydx = ctc2_slope(from->uv_y1, from->uv_y0,
+ from->uv_x1, from->uv_x0);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h
new file mode 100644
index 000000000000..3733aee24dcd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC2_HOST_H
+#define __IA_CSS_CTC2_HOST_H
+
+#include "ia_css_ctc2_param.h"
+#include "ia_css_ctc2_types.h"
+
+extern const struct ia_css_ctc2_config default_ctc2_config;
+
+/*Encode Functions to translate parameters from userspace into ISP space*/
+
+void ia_css_ctc2_vmem_encode(struct ia_css_isp_ctc2_vmem_params *to,
+ const struct ia_css_ctc2_config *from,
+ size_t size);
+
+void ia_css_ctc2_encode(struct ia_css_isp_ctc2_dmem_params *to,
+ struct ia_css_ctc2_config *from,
+ size_t size);
+
+#endif /* __IA_CSS_CTC2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
new file mode 100644
index 000000000000..224bdb199942
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC2_PARAM_H
+#define __IA_CSS_CTC2_PARAM_H
+
+#define IA_CSS_CTC_COEF_SHIFT 13
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+/* CTC (Chroma Tone Control)ISP Parameters */
+
+/*VMEM Luma params*/
+struct ia_css_isp_ctc2_vmem_params {
+ /** Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
+ VMEM_ARRAY(y_x, ISP_VEC_NELEMS);
+ /* kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
+ VMEM_ARRAY(y_y, ISP_VEC_NELEMS);
+ /* Slopes of lines interconnecting
+ * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/
+ VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS);
+};
+
+/*DMEM Chroma params*/
+struct ia_css_isp_ctc2_dmem_params {
+ /* Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
+ s32 uv_y0;
+ s32 uv_y1;
+
+ /* Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
+ s32 uv_x0;
+ s32 uv_x1;
+
+ /* Slope of line interconnecting uv_x0 -> uv_x1*/
+ s32 uv_dydx;
+
+};
+#endif /* __IA_CSS_CTC2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
new file mode 100644
index 000000000000..9d5dadf70f1a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC2_TYPES_H
+#define __IA_CSS_CTC2_TYPES_H
+
+/* Chroma Tone Control configuration.
+*
+* ISP block: CTC2 (CTC by polygonal approximation)
+* (ISP1: CTC1 (CTC by look-up table) is used.)
+* ISP2: CTC2 is used.
+* ISP261: CTC2 (CTC by Fast Approximate Distance)
+*/
+struct ia_css_ctc2_config {
+ /** Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
+ * --default/ineffective value: 4096(0.5f)
+ */
+ s32 y_y0;
+ s32 y_y1;
+ s32 y_y2;
+ s32 y_y3;
+ s32 y_y4;
+ s32 y_y5;
+ /* 1st-4th kneepoints by Y(Luma) --default/ineffective value:n/a
+ * requirement: 0.0 < y_x1 < y_x2 <y _x3 < y_x4 < 1.0
+ */
+ s32 y_x1;
+ s32 y_x2;
+ s32 y_x3;
+ s32 y_x4;
+ /* Gains by UV(Chroma) under threholds uv_x0 and uv_x1
+ * --default/ineffective value: 4096(0.5f)
+ */
+ s32 uv_y0;
+ s32 uv_y1;
+ /* Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
+ * --default/ineffective value: n/a
+ */
+ s32 uv_x0;
+ s32 uv_x1;
+};
+
+#endif /* __IA_CSS_CTC2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c
new file mode 100644
index 000000000000..26311b0a23f9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c
@@ -0,0 +1,58 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ia_css_ctc.host.h"
+
+const struct ia_css_ctc_config default_ctc_config = {
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ 1,
+ SH_CSS_BAYER_MAXVAL / 5, /* To be implemented */
+ SH_CSS_BAYER_MAXVAL * 2 / 5, /* To be implemented */
+ SH_CSS_BAYER_MAXVAL * 3 / 5, /* To be implemented */
+ SH_CSS_BAYER_MAXVAL * 4 / 5, /* To be implemented */
+};
+
+void
+ia_css_ctc_vamem_encode(
+ struct sh_css_isp_ctc_vamem_params *to,
+ const struct ia_css_ctc_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->ctc, &from->data, sizeof(to->ctc));
+}
+
+void
+ia_css_ctc_debug_dtrace(
+ const struct ia_css_ctc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.ce_gain_exp=%d, config.y0=%d, config.x1=%d, config.y1=%d, config.x2=%d, config.y2=%d, config.x3=%d, config.y3=%d, config.x4=%d, config.y4=%d\n",
+ config->ce_gain_exp, config->y0,
+ config->x1, config->y1,
+ config->x2, config->y2,
+ config->x3, config->y3,
+ config->x4, config->y4);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h
new file mode 100644
index 000000000000..e4ad676361dd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC_HOST_H
+#define __IA_CSS_CTC_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_ctc_param.h"
+#include "ia_css_ctc_table.host.h"
+
+extern const struct ia_css_ctc_config default_ctc_config;
+
+void
+ia_css_ctc_vamem_encode(
+ struct sh_css_isp_ctc_vamem_params *to,
+ const struct ia_css_ctc_table *from,
+ unsigned int size);
+
+void
+ia_css_ctc_debug_dtrace(
+ const struct ia_css_ctc_config *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_CTC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h
new file mode 100644
index 000000000000..6e541a0ebaa9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC_PARAM_H
+#define __IA_CSS_CTC_PARAM_H
+
+#include "type_support.h"
+#include <system_global.h>
+
+#include "ia_css_ctc_types.h"
+
+#ifndef PIPE_GENERATION
+#if defined(HAS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_CTC_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_CTC_TABLE_SIZE IA_CSS_VAMEM_2_CTC_TABLE_SIZE
+#elif defined(HAS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_CTC_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_CTC_TABLE_SIZE IA_CSS_VAMEM_1_CTC_TABLE_SIZE
+#else
+#error "VAMEM should be {VERSION1, VERSION2}"
+#endif
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_CTC_TABLE_SIZE 0
+#endif
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_ctc_vamem_params {
+ u16 ctc[SH_CSS_ISP_CTC_TABLE_SIZE];
+};
+
+#endif /* __IA_CSS_CTC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
new file mode 100644
index 000000000000..adb146c03a73
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
@@ -0,0 +1,214 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <string_support.h> /* memcpy */
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_ctc_table.host.h"
+
+struct ia_css_ctc_table default_ctc_table;
+
+#if defined(HAS_VAMEM_VERSION_2)
+
+static const uint16_t
+default_ctc_table_data[IA_CSS_VAMEM_2_CTC_TABLE_SIZE] = {
+ 0, 384, 837, 957, 1011, 1062, 1083, 1080,
+ 1078, 1077, 1053, 1039, 1012, 992, 969, 951,
+ 929, 906, 886, 866, 845, 823, 809, 790,
+ 772, 758, 741, 726, 711, 701, 688, 675,
+ 666, 656, 648, 639, 633, 626, 618, 612,
+ 603, 594, 582, 572, 557, 545, 529, 516,
+ 504, 491, 480, 467, 459, 447, 438, 429,
+ 419, 412, 404, 397, 389, 382, 376, 368,
+ 363, 357, 351, 345, 340, 336, 330, 326,
+ 321, 318, 312, 308, 304, 300, 297, 294,
+ 291, 286, 284, 281, 278, 275, 271, 268,
+ 261, 257, 251, 245, 240, 235, 232, 225,
+ 223, 218, 213, 209, 206, 204, 199, 197,
+ 193, 189, 186, 185, 183, 179, 177, 175,
+ 172, 170, 169, 167, 164, 164, 162, 160,
+ 158, 157, 156, 154, 154, 152, 151, 150,
+ 149, 148, 146, 147, 146, 144, 143, 143,
+ 142, 141, 140, 141, 139, 138, 138, 138,
+ 137, 136, 136, 135, 134, 134, 134, 133,
+ 132, 132, 131, 130, 131, 130, 129, 128,
+ 129, 127, 127, 127, 127, 125, 125, 125,
+ 123, 123, 122, 120, 118, 115, 114, 111,
+ 110, 108, 106, 105, 103, 102, 100, 99,
+ 97, 97, 96, 95, 94, 93, 93, 91,
+ 91, 91, 90, 90, 89, 89, 88, 88,
+ 89, 88, 88, 87, 87, 87, 87, 86,
+ 87, 87, 86, 87, 86, 86, 84, 84,
+ 82, 80, 78, 76, 74, 72, 70, 68,
+ 67, 65, 62, 60, 58, 56, 55, 54,
+ 53, 51, 49, 49, 47, 45, 45, 45,
+ 41, 40, 39, 39, 34, 33, 34, 32,
+ 25, 23, 24, 20, 13, 9, 12, 0,
+ 0
+};
+
+#elif defined(HAS_VAMEM_VERSION_1)
+
+/* Default Parameters */
+static const uint16_t
+default_ctc_table_data[IA_CSS_VAMEM_1_CTC_TABLE_SIZE] = {
+ 0, 0, 256, 384, 384, 497, 765, 806,
+ 837, 851, 888, 901, 957, 981, 993, 1001,
+ 1011, 1029, 1028, 1039, 1062, 1059, 1073, 1080,
+ 1083, 1085, 1085, 1098, 1080, 1084, 1085, 1093,
+ 1078, 1073, 1070, 1069, 1077, 1066, 1072, 1063,
+ 1053, 1044, 1046, 1053, 1039, 1028, 1025, 1024,
+ 1012, 1013, 1016, 996, 992, 990, 990, 980,
+ 969, 968, 961, 955, 951, 949, 933, 930,
+ 929, 925, 921, 916, 906, 901, 895, 893,
+ 886, 877, 872, 869, 866, 861, 857, 849,
+ 845, 838, 836, 832, 823, 821, 815, 813,
+ 809, 805, 796, 793, 790, 785, 784, 778,
+ 772, 768, 766, 763, 758, 752, 749, 745,
+ 741, 740, 736, 730, 726, 724, 723, 718,
+ 711, 709, 706, 704, 701, 698, 691, 689,
+ 688, 683, 683, 678, 675, 673, 671, 669,
+ 666, 663, 661, 660, 656, 656, 653, 650,
+ 648, 647, 646, 643, 639, 638, 637, 635,
+ 633, 632, 629, 627, 626, 625, 622, 621,
+ 618, 618, 614, 614, 612, 609, 606, 606,
+ 603, 600, 600, 597, 594, 591, 590, 586,
+ 582, 581, 578, 575, 572, 569, 563, 560,
+ 557, 554, 551, 548, 545, 539, 536, 533,
+ 529, 527, 524, 519, 516, 513, 510, 507,
+ 504, 501, 498, 493, 491, 488, 485, 484,
+ 480, 476, 474, 471, 467, 466, 464, 460,
+ 459, 455, 453, 449, 447, 446, 443, 441,
+ 438, 435, 432, 432, 429, 427, 426, 422,
+ 419, 418, 416, 414, 412, 410, 408, 406,
+ 404, 402, 401, 398, 397, 395, 393, 390,
+ 389, 388, 387, 384, 382, 380, 378, 377,
+ 376, 375, 372, 370, 368, 368, 366, 364,
+ 363, 361, 360, 358, 357, 355, 354, 352,
+ 351, 350, 349, 346, 345, 344, 344, 342,
+ 340, 339, 337, 337, 336, 335, 333, 331,
+ 330, 329, 328, 326, 326, 324, 324, 322,
+ 321, 320, 318, 318, 318, 317, 315, 313,
+ 312, 311, 311, 310, 308, 307, 306, 306,
+ 304, 304, 302, 301, 300, 300, 299, 297,
+ 297, 296, 296, 294, 294, 292, 291, 291,
+ 291, 290, 288, 287, 286, 286, 287, 285,
+ 284, 283, 282, 282, 281, 281, 279, 278,
+ 278, 278, 276, 276, 275, 274, 274, 273,
+ 271, 270, 269, 268, 268, 267, 265, 262,
+ 261, 260, 260, 259, 257, 254, 252, 252,
+ 251, 251, 249, 246, 245, 244, 243, 242,
+ 240, 239, 239, 237, 235, 235, 233, 231,
+ 232, 230, 229, 226, 225, 224, 225, 224,
+ 223, 220, 219, 219, 218, 217, 217, 214,
+ 213, 213, 212, 211, 209, 209, 209, 208,
+ 206, 205, 204, 203, 204, 203, 201, 200,
+ 199, 197, 198, 198, 197, 195, 194, 194,
+ 193, 192, 192, 191, 189, 190, 189, 188,
+ 186, 187, 186, 185, 185, 184, 183, 181,
+ 183, 182, 181, 180, 179, 178, 178, 178,
+ 177, 176, 175, 176, 175, 174, 174, 173,
+ 172, 173, 172, 171, 170, 170, 169, 169,
+ 169, 168, 167, 166, 167, 167, 166, 165,
+ 164, 164, 164, 163, 164, 163, 162, 163,
+ 162, 161, 160, 161, 160, 160, 160, 159,
+ 158, 157, 158, 158, 157, 157, 156, 156,
+ 156, 156, 155, 155, 154, 154, 154, 154,
+ 154, 153, 152, 153, 152, 152, 151, 152,
+ 151, 152, 151, 150, 150, 149, 149, 150,
+ 149, 149, 148, 148, 148, 149, 148, 147,
+ 146, 146, 147, 146, 147, 146, 145, 146,
+ 146, 145, 144, 145, 144, 145, 144, 144,
+ 143, 143, 143, 144, 143, 142, 142, 142,
+ 142, 142, 142, 141, 141, 141, 141, 140,
+ 140, 141, 140, 140, 141, 140, 139, 139,
+ 139, 140, 139, 139, 138, 138, 137, 139,
+ 138, 138, 138, 137, 138, 137, 137, 137,
+ 137, 136, 137, 136, 136, 136, 136, 135,
+ 136, 135, 135, 135, 135, 136, 135, 135,
+ 134, 134, 133, 135, 134, 134, 134, 133,
+ 134, 133, 134, 133, 133, 132, 133, 133,
+ 132, 133, 132, 132, 132, 132, 131, 131,
+ 131, 132, 131, 131, 130, 131, 130, 132,
+ 131, 130, 130, 129, 130, 129, 130, 129,
+ 129, 129, 130, 129, 128, 128, 128, 128,
+ 129, 128, 128, 127, 127, 128, 128, 127,
+ 127, 126, 126, 127, 127, 126, 126, 126,
+ 127, 126, 126, 126, 125, 125, 126, 125,
+ 125, 124, 124, 124, 125, 125, 124, 124,
+ 123, 124, 124, 123, 123, 122, 122, 122,
+ 122, 122, 121, 120, 120, 119, 118, 118,
+ 118, 117, 117, 116, 115, 115, 115, 114,
+ 114, 113, 113, 112, 111, 111, 111, 110,
+ 110, 109, 109, 108, 108, 108, 107, 107,
+ 106, 106, 105, 105, 105, 104, 104, 103,
+ 103, 102, 102, 102, 102, 101, 101, 100,
+ 100, 99, 99, 99, 99, 99, 99, 98,
+ 97, 98, 97, 97, 97, 96, 96, 95,
+ 96, 95, 96, 95, 95, 94, 94, 95,
+ 94, 94, 94, 93, 93, 92, 93, 93,
+ 93, 93, 92, 92, 91, 92, 92, 92,
+ 91, 91, 90, 90, 91, 91, 91, 90,
+ 90, 90, 90, 91, 90, 90, 90, 89,
+ 89, 89, 90, 89, 89, 89, 89, 89,
+ 88, 89, 89, 88, 88, 88, 88, 87,
+ 89, 88, 88, 88, 88, 88, 87, 88,
+ 88, 88, 87, 87, 87, 87, 87, 88,
+ 87, 87, 87, 87, 87, 87, 88, 87,
+ 87, 87, 87, 86, 86, 87, 87, 87,
+ 87, 86, 86, 86, 87, 87, 86, 87,
+ 86, 86, 86, 87, 87, 86, 86, 86,
+ 86, 86, 87, 87, 86, 85, 85, 85,
+ 84, 85, 85, 84, 84, 83, 83, 82,
+ 82, 82, 81, 81, 80, 79, 79, 79,
+ 78, 77, 77, 76, 76, 76, 75, 74,
+ 74, 74, 73, 73, 72, 71, 71, 71,
+ 70, 70, 69, 69, 68, 68, 67, 67,
+ 67, 66, 66, 65, 65, 64, 64, 63,
+ 62, 62, 62, 61, 60, 60, 59, 59,
+ 58, 58, 57, 57, 56, 56, 56, 55,
+ 55, 54, 55, 55, 54, 53, 53, 52,
+ 53, 53, 52, 51, 51, 50, 51, 50,
+ 49, 49, 50, 49, 49, 48, 48, 47,
+ 47, 48, 46, 45, 45, 45, 46, 45,
+ 45, 44, 45, 45, 45, 43, 42, 42,
+ 41, 43, 41, 40, 40, 39, 40, 41,
+ 39, 39, 39, 39, 39, 38, 35, 35,
+ 34, 37, 36, 34, 33, 33, 33, 35,
+ 34, 32, 32, 31, 32, 30, 29, 26,
+ 25, 25, 27, 26, 23, 23, 23, 25,
+ 24, 24, 22, 21, 20, 19, 16, 14,
+ 13, 13, 13, 10, 9, 7, 7, 7,
+ 12, 12, 12, 7, 0, 0, 0, 0
+};
+
+#else
+#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
+#endif
+
+void
+ia_css_config_ctc_table(void)
+{
+#if defined(HAS_VAMEM_VERSION_2)
+ memcpy(default_ctc_table.data.vamem_2, default_ctc_table_data,
+ sizeof(default_ctc_table_data));
+ default_ctc_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+#else
+ memcpy(default_ctc_table.data.vamem_1, default_ctc_table_data,
+ sizeof(default_ctc_table_data));
+ default_ctc_table.vamem_type = 1IA_CSS_VAMEM_TYPE_1;
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h
new file mode 100644
index 000000000000..a350dec8b4ad
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC_TABLE_HOST_H
+#define __IA_CSS_CTC_TABLE_HOST_H
+
+#include "ia_css_ctc_types.h"
+
+extern struct ia_css_ctc_table default_ctc_table;
+
+void ia_css_config_ctc_table(void);
+
+#endif /* __IA_CSS_CTC_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
new file mode 100644
index 000000000000..f6f5ec28827f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
@@ -0,0 +1,110 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC_TYPES_H
+#define __IA_CSS_CTC_TYPES_H
+
+#include <linux/bitops.h>
+
+/* @file
+* CSS-API header file for Chroma Tone Control parameters.
+*/
+
+/* Fractional bits for CTC gain (used only for ISP1).
+ *
+ * IA_CSS_CTC_COEF_SHIFT(=13) includes not only the fractional bits
+ * of gain(=8), but also the bits(=5) to convert chroma
+ * from 13bit precision to 8bit precision.
+ *
+ * Gain (struct ia_css_ctc_table) : u5.8
+ * Input(Chorma) : s0.12 (13bit precision)
+ * Output(Chorma): s0.7 (8bit precision)
+ * Output = (Input * Gain) >> IA_CSS_CTC_COEF_SHIFT
+ */
+#define IA_CSS_CTC_COEF_SHIFT 13
+
+/* Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2 10
+/* Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE BIT(IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2)
+
+/* Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2 8
+/* Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE ((1U << IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2) + 1)
+
+enum ia_css_vamem_type {
+ IA_CSS_VAMEM_TYPE_1,
+ IA_CSS_VAMEM_TYPE_2
+};
+
+/* Chroma Tone Control configuration.
+ *
+ * ISP block: CTC2 (CTC by polygonal line approximation)
+ * (ISP1: CTC1 (CTC by look-up table) is used.)
+ * ISP2: CTC2 is used.
+ */
+struct ia_css_ctc_config {
+ u16 y0; /** 1st kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y1; /** 2nd kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y2; /** 3rd kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y3; /** 4th kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y4; /** 5th kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y5; /** 6th kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 ce_gain_exp; /** Common exponent of y-axis gain.
+ u8.0, [0,13],
+ default/ineffective 1 */
+ u16 x1; /** 2nd kneepoint luma.
+ u0.13, [0,8191], constraints: 0<x1<x2,
+ default/ineffective 1024 */
+ u16 x2; /** 3rd kneepoint luma.
+ u0.13, [0,8191], constraints: x1<x2<x3,
+ default/ineffective 2048 */
+ u16 x3; /** 4th kneepoint luma.
+ u0.13, [0,8191], constraints: x2<x3<x4,
+ default/ineffective 6144 */
+ u16 x4; /** 5tn kneepoint luma.
+ u0.13, [0,8191], constraints: x3<x4<8191,
+ default/ineffective 7168 */
+};
+
+union ia_css_ctc_data {
+ u16 vamem_1[IA_CSS_VAMEM_1_CTC_TABLE_SIZE];
+ u16 vamem_2[IA_CSS_VAMEM_2_CTC_TABLE_SIZE];
+};
+
+/* CTC table, used for Chroma Tone Control.
+ *
+ * ISP block: CTC1 (CTC by look-up table)
+ * ISP1: CTC1 is used.
+ * (ISP2: CTC2 (CTC by polygonal line approximation) is used.)
+ */
+struct ia_css_ctc_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_ctc_data data;
+};
+
+#endif /* __IA_CSS_CTC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
new file mode 100644
index 000000000000..8e4218cb70cd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
@@ -0,0 +1,78 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+#include "ia_css_de.host.h"
+
+const struct ia_css_de_config default_de_config = {
+ 0,
+ 0,
+ 0
+};
+
+void
+ia_css_de_encode(
+ struct sh_css_isp_de_params *to,
+ const struct ia_css_de_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->pixelnoise =
+ uDIGIT_FITTING(from->pixelnoise, 16, SH_CSS_BAYER_BITS);
+ to->c1_coring_threshold =
+ uDIGIT_FITTING(from->c1_coring_threshold, 16,
+ SH_CSS_BAYER_BITS);
+ to->c2_coring_threshold =
+ uDIGIT_FITTING(from->c2_coring_threshold, 16,
+ SH_CSS_BAYER_BITS);
+}
+
+void
+ia_css_de_dump(
+ const struct sh_css_isp_de_params *de,
+ unsigned int level)
+{
+ if (!de) return;
+ ia_css_debug_dtrace(level, "Demosaic:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "de_pixelnoise", de->pixelnoise);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "de_c1_coring_threshold",
+ de->c1_coring_threshold);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "de_c2_coring_threshold",
+ de->c2_coring_threshold);
+}
+
+void
+ia_css_de_debug_dtrace(
+ const struct ia_css_de_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.pixelnoise=%d, config.c1_coring_threshold=%d, config.c2_coring_threshold=%d\n",
+ config->pixelnoise,
+ config->c1_coring_threshold, config->c2_coring_threshold);
+}
+
+void
+ia_css_init_de_state(
+ void/*struct sh_css_isp_de_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h
new file mode 100644
index 000000000000..baae1d9809da
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE_HOST_H
+#define __IA_CSS_DE_HOST_H
+
+#include "ia_css_de_types.h"
+#include "ia_css_de_param.h"
+
+extern const struct ia_css_de_config default_de_config;
+
+void
+ia_css_de_encode(
+ struct sh_css_isp_de_params *to,
+ const struct ia_css_de_config *from,
+ unsigned int size);
+
+void
+ia_css_de_dump(
+ const struct sh_css_isp_de_params *de,
+ unsigned int level);
+
+void
+ia_css_de_debug_dtrace(
+ const struct ia_css_de_config *config,
+ unsigned int level);
+
+void
+ia_css_init_de_state(
+ void/*struct sh_css_isp_de_vmem_state*/ * state,
+ size_t size);
+
+#endif /* __IA_CSS_DE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h
new file mode 100644
index 000000000000..c85a57e194cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE_PARAM_H
+#define __IA_CSS_DE_PARAM_H
+
+#include "type_support.h"
+
+/* DE (Demosaic) */
+struct sh_css_isp_de_params {
+ s32 pixelnoise;
+ s32 c1_coring_threshold;
+ s32 c2_coring_threshold;
+};
+
+#endif /* __IA_CSS_DE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h
new file mode 100644
index 000000000000..a4b446904570
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE_TYPES_H
+#define __IA_CSS_DE_TYPES_H
+
+/* @file
+* CSS-API header file for Demosaic (bayer-to-YCgCo) parameters.
+*/
+
+/* Demosaic (bayer-to-YCgCo) configuration.
+ *
+ * ISP block: DE1
+ * ISP1: DE1 is used.
+ * (ISP2: DE2 is used.)
+ */
+struct ia_css_de_config {
+ ia_css_u0_16 pixelnoise; /** Pixel noise used in moire elimination.
+ u0.16, [0,65535],
+ default 0, ineffective 0 */
+ ia_css_u0_16 c1_coring_threshold; /** Coring threshold for C1.
+ This is the same as nr_config.threshold_cb.
+ u0.16, [0,65535],
+ default 128(0.001953125), ineffective 0 */
+ ia_css_u0_16 c2_coring_threshold; /** Coring threshold for C2.
+ This is the same as nr_config.threshold_cr.
+ u0.16, [0,65535],
+ default 128(0.001953125), ineffective 0 */
+};
+
+#endif /* __IA_CSS_DE_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c
new file mode 100644
index 000000000000..ade23d53f6bb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_de2.host.h"
+
+const struct ia_css_ecd_config default_ecd_config = {
+ (1 << (ISP_VEC_ELEMBITS - 1)) * 2 / 3, /* 2/3 */
+ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1.0 */
+ 0, /* 0.0 */
+};
+
+void
+ia_css_ecd_encode(
+ struct sh_css_isp_ecd_params *to,
+ const struct ia_css_ecd_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->zip_strength = from->zip_strength;
+ to->fc_strength = from->fc_strength;
+ to->fc_debias = from->fc_debias;
+}
+
+void
+ia_css_ecd_dump(
+ const struct sh_css_isp_ecd_params *ecd,
+ unsigned int level);
+
+void
+ia_css_ecd_debug_dtrace(
+ const struct ia_css_ecd_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.zip_strength=%d, config.fc_strength=%d, config.fc_debias=%d\n",
+ config->zip_strength,
+ config->fc_strength, config->fc_debias);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h
new file mode 100644
index 000000000000..f3749e514505
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE2_HOST_H
+#define __IA_CSS_DE2_HOST_H
+
+#include "ia_css_de2_types.h"
+#include "ia_css_de2_param.h"
+
+extern const struct ia_css_ecd_config default_ecd_config;
+
+void
+ia_css_ecd_encode(
+ struct sh_css_isp_ecd_params *to,
+ const struct ia_css_ecd_config *from,
+ unsigned int size);
+
+void
+ia_css_ecd_dump(
+ const struct sh_css_isp_ecd_params *ecd,
+ unsigned int level);
+
+void
+ia_css_ecd_debug_dtrace(
+ const struct ia_css_ecd_config *config, unsigned int level);
+
+#endif /* __IA_CSS_DE2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h
new file mode 100644
index 000000000000..868dfaaf78c7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE2_PARAM_H
+#define __IA_CSS_DE2_PARAM_H
+
+#include "type_support.h"
+
+/* Reuse DE1 params and extend them */
+#include "../de_1.0/ia_css_de_param.h"
+
+/* DE (Demosaic) */
+struct sh_css_isp_ecd_params {
+ s32 zip_strength;
+ s32 fc_strength;
+ s32 fc_debias;
+};
+
+#endif /* __IA_CSS_DE2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h
new file mode 100644
index 000000000000..24700d256bfd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE2_TYPES_H
+#define __IA_CSS_DE2_TYPES_H
+
+/* @file
+* CSS-API header file for Demosaicing parameters.
+*/
+
+/* Eigen Color Demosaicing configuration.
+ *
+ * ISP block: DE2
+ * (ISP1: DE1 is used.)
+ * ISP2: DE2 is used.
+ */
+struct ia_css_ecd_config {
+ u16 zip_strength; /** Strength of zipper reduction.
+ u0.13, [0,8191],
+ default 5489(0.67), ineffective 0 */
+ u16 fc_strength; /** Strength of false color reduction.
+ u0.13, [0,8191],
+ default 8191(almost 1.0), ineffective 0 */
+ u16 fc_debias; /** Prevent color change
+ on noise or Gr/Gb imbalance.
+ u0.13, [0,8191],
+ default 0, ineffective 0 */
+};
+
+#endif /* __IA_CSS_DE2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c
new file mode 100644
index 000000000000..9fb37447831c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c
@@ -0,0 +1,131 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_dp.host.h"
+
+/* We use a different set of DPC configuration parameters when
+ * DPC is used before OBC and NORM. Currently these parameters
+ * are used in usecases which selects both BDS and DPC.
+ **/
+const struct ia_css_dp_config default_dp_10bpp_config = {
+ 1024,
+ 2048,
+ 32768,
+ 32768,
+ 32768,
+ 32768
+};
+
+const struct ia_css_dp_config default_dp_config = {
+ 8192,
+ 2048,
+ 32768,
+ 32768,
+ 32768,
+ 32768
+};
+
+void
+ia_css_dp_encode(
+ struct sh_css_isp_dp_params *to,
+ const struct ia_css_dp_config *from,
+ unsigned int size)
+{
+ int gain = from->gain;
+ int gr = from->gr;
+ int r = from->r;
+ int b = from->b;
+ int gb = from->gb;
+
+ (void)size;
+ to->threshold_single =
+ SH_CSS_BAYER_MAXVAL;
+ to->threshold_2adjacent =
+ uDIGIT_FITTING(from->threshold, 16, SH_CSS_BAYER_BITS);
+ to->gain =
+ uDIGIT_FITTING(from->gain, 8, SH_CSS_DP_GAIN_SHIFT);
+
+ to->coef_rr_gr =
+ uDIGIT_FITTING(gain * gr / r, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_rr_gb =
+ uDIGIT_FITTING(gain * gb / r, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_bb_gb =
+ uDIGIT_FITTING(gain * gb / b, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_bb_gr =
+ uDIGIT_FITTING(gain * gr / b, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gr_rr =
+ uDIGIT_FITTING(gain * r / gr, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gr_bb =
+ uDIGIT_FITTING(gain * b / gr, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gb_bb =
+ uDIGIT_FITTING(gain * b / gb, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gb_rr =
+ uDIGIT_FITTING(gain * r / gb, 8, SH_CSS_DP_GAIN_SHIFT);
+}
+
+void
+ia_css_dp_dump(
+ const struct sh_css_isp_dp_params *dp,
+ unsigned int level)
+{
+ if (!dp) return;
+ ia_css_debug_dtrace(level, "Defect Pixel Correction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dp_threshold_single_w_2adj_on",
+ dp->threshold_single);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dp_threshold_2adj_w_2adj_on",
+ dp->threshold_2adjacent);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dp_gain", dp->gain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_rr_gr", dp->coef_rr_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_rr_gb", dp->coef_rr_gb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_bb_gb", dp->coef_bb_gb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_bb_gr", dp->coef_bb_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gr_rr", dp->coef_gr_rr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gr_bb", dp->coef_gr_bb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gb_bb", dp->coef_gb_bb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gb_rr", dp->coef_gb_rr);
+}
+
+void
+ia_css_dp_debug_dtrace(
+ const struct ia_css_dp_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d, config.gain=%d\n",
+ config->threshold, config->gain);
+}
+
+void
+ia_css_init_dp_state(
+ void/*struct sh_css_isp_dp_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h
new file mode 100644
index 000000000000..009541fafda0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DP_HOST_H
+#define __IA_CSS_DP_HOST_H
+
+#include "ia_css_dp_types.h"
+#include "ia_css_dp_param.h"
+
+extern const struct ia_css_dp_config default_dp_config;
+
+/* ISP2401 */
+extern const struct ia_css_dp_config default_dp_10bpp_config;
+
+void
+ia_css_dp_encode(
+ struct sh_css_isp_dp_params *to,
+ const struct ia_css_dp_config *from,
+ unsigned int size);
+
+void
+ia_css_dp_dump(
+ const struct sh_css_isp_dp_params *dp,
+ unsigned int level);
+
+void
+ia_css_dp_debug_dtrace(
+ const struct ia_css_dp_config *config,
+ unsigned int level);
+
+void
+ia_css_init_dp_state(
+ void/*struct sh_css_isp_dp_vmem_state*/ * state,
+ size_t size);
+
+#endif /* __IA_CSS_DP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h
new file mode 100644
index 000000000000..8567a620696a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DP_PARAM_H
+#define __IA_CSS_DP_PARAM_H
+
+#include "type_support.h"
+#include "bnr/bnr_1.0/ia_css_bnr_param.h"
+
+/* DP (Defect Pixel Correction) */
+struct sh_css_isp_dp_params {
+ s32 threshold_single;
+ s32 threshold_2adjacent;
+ s32 gain;
+ s32 coef_rr_gr;
+ s32 coef_rr_gb;
+ s32 coef_bb_gb;
+ s32 coef_bb_gr;
+ s32 coef_gr_rr;
+ s32 coef_gr_bb;
+ s32 coef_gb_bb;
+ s32 coef_gb_rr;
+};
+
+#endif /* __IA_CSS_DP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
new file mode 100644
index 000000000000..e96f83e5d47c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DP_TYPES_H
+#define __IA_CSS_DP_TYPES_H
+
+/* @file
+* CSS-API header file for Defect Pixel Correction (DPC) parameters.
+*/
+
+/* Defect Pixel Correction configuration.
+ *
+ * ISP block: DPC1 (DPC after WB)
+ * DPC2 (DPC before WB)
+ * ISP1: DPC1 is used.
+ * ISP2: DPC2 is used.
+ */
+struct ia_css_dp_config {
+ ia_css_u0_16 threshold; /** The threshold of defect pixel correction,
+ representing the permissible difference of
+ intensity between one pixel and its
+ surrounding pixels. Smaller values result
+ in more frequent pixel corrections.
+ u0.16, [0,65535],
+ default 8192, ineffective 65535 */
+ ia_css_u8_8 gain; /** The sensitivity of mis-correction. ISP will
+ miss a lot of defects if the value is set
+ too large.
+ u8.8, [0,65535],
+ default 4096, ineffective 65535 */
+ u32 gr; /* unsigned <integer_bits>.<16-integer_bits> */
+ u32 r; /* unsigned <integer_bits>.<16-integer_bits> */
+ u32 b; /* unsigned <integer_bits>.<16-integer_bits> */
+ u32 gb; /* unsigned <integer_bits>.<16-integer_bits> */
+};
+
+#endif /* __IA_CSS_DP_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c
new file mode 100644
index 000000000000..4dfad4ace20b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c
@@ -0,0 +1,65 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_dpc2.host.h"
+#include "assert_support.h"
+
+void
+ia_css_dpc2_encode(
+ struct ia_css_isp_dpc2_params *to,
+ const struct ia_css_dpc2_config *from,
+ size_t size)
+{
+ (void)size;
+
+ assert((from->metric1 >= 0) && (from->metric1 <= METRIC1_ONE_FP));
+ assert((from->metric3 >= 0) && (from->metric3 <= METRIC3_ONE_FP));
+ assert((from->metric2 >= METRIC2_ONE_FP) &&
+ (from->metric2 < 256 * METRIC2_ONE_FP));
+ assert((from->wb_gain_gr > 0) && (from->wb_gain_gr < 16 * WBGAIN_ONE_FP));
+ assert((from->wb_gain_r > 0) && (from->wb_gain_r < 16 * WBGAIN_ONE_FP));
+ assert((from->wb_gain_b > 0) && (from->wb_gain_b < 16 * WBGAIN_ONE_FP));
+ assert((from->wb_gain_gb > 0) && (from->wb_gain_gb < 16 * WBGAIN_ONE_FP));
+
+ to->metric1 = from->metric1;
+ to->metric2 = from->metric2;
+ to->metric3 = from->metric3;
+
+ to->wb_gain_gr = from->wb_gain_gr;
+ to->wb_gain_r = from->wb_gain_r;
+ to->wb_gain_b = from->wb_gain_b;
+ to->wb_gain_gb = from->wb_gain_gb;
+}
+
+/* TODO: AM: This needs a proper implementation. */
+void
+ia_css_init_dpc2_state(
+ void *state,
+ size_t size)
+{
+ (void)state;
+ (void)size;
+}
+
+#ifndef IA_CSS_NO_DEBUG
+/* TODO: AM: This needs a proper implementation. */
+void
+ia_css_dpc2_debug_dtrace(
+ const struct ia_css_dpc2_config *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h
new file mode 100644
index 000000000000..a31ef0e5047b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DPC2_HOST_H
+#define __IA_CSS_DPC2_HOST_H
+
+#include "ia_css_dpc2_types.h"
+#include "ia_css_dpc2_param.h"
+
+void
+ia_css_dpc2_encode(
+ struct ia_css_isp_dpc2_params *to,
+ const struct ia_css_dpc2_config *from,
+ size_t size);
+
+void
+ia_css_init_dpc2_state(
+ void *state,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_dpc2_debug_dtrace(
+ const struct ia_css_dpc2_config *config,
+ unsigned int level);
+#endif
+
+#endif /* __IA_CSS_DPC2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h
new file mode 100644
index 000000000000..6df06fb249aa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h
@@ -0,0 +1,51 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DPC2_PARAM_H
+#define __IA_CSS_DPC2_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* for VMEM_ARRAY*/
+
+/* 4 planes : GR, R, B, GB */
+#define NUM_PLANES 4
+
+/* ToDo: Move this to testsetup */
+#define MAX_FRAME_SIMDWIDTH 30
+
+/* 3 lines state per color plane input_line_state */
+#define DPC2_STATE_INPUT_BUFFER_HEIGHT (3 * NUM_PLANES)
+/* Each plane has width equal to half frame line */
+#define DPC2_STATE_INPUT_BUFFER_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane for local deviation state*/
+#define DPC2_STATE_LOCAL_DEVIATION_BUFFER_HEIGHT (1 * NUM_PLANES)
+/* Each plane has width equal to half frame line */
+#define DPC2_STATE_LOCAL_DEVIATION_BUFFER_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* MINMAX state buffer stores 1 full input line (GR-R color line) */
+#define DPC2_STATE_SECOND_MINMAX_BUFFER_HEIGHT 1
+#define DPC2_STATE_SECOND_MINMAX_BUFFER_WIDTH MAX_FRAME_SIMDWIDTH
+
+struct ia_css_isp_dpc2_params {
+ s32 metric1;
+ s32 metric2;
+ s32 metric3;
+ s32 wb_gain_gr;
+ s32 wb_gain_r;
+ s32 wb_gain_b;
+ s32 wb_gain_gb;
+};
+
+#endif /* __IA_CSS_DPC2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h
new file mode 100644
index 000000000000..f78451be8d6a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h
@@ -0,0 +1,59 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DPC2_TYPES_H
+#define __IA_CSS_DPC2_TYPES_H
+
+/* @file
+* CSS-API header file for Defect Pixel Correction 2 (DPC2) parameters.
+*/
+
+#include "type_support.h"
+
+/**@{*/
+/* Floating point constants for different metrics. */
+#define METRIC1_ONE_FP BIT(12)
+#define METRIC2_ONE_FP BIT(5)
+#define METRIC3_ONE_FP BIT(12)
+#define WBGAIN_ONE_FP BIT(9)
+/**@}*/
+
+/**@{*/
+/* Defect Pixel Correction 2 configuration.
+ *
+ * \brief DPC2 public parameters.
+ * \details Struct with all parameters for the Defect Pixel Correction 2
+ * kernel that can be set from the CSS API.
+ *
+ * ISP block: DPC1 (DPC after WB)
+ * DPC2 (DPC before WB)
+ * ISP1: DPC1 is used.
+ * ISP2: DPC2 is used.
+ *
+ */
+struct ia_css_dpc2_config {
+ /**@{*/
+ s32 metric1;
+ s32 metric2;
+ s32 metric3;
+ s32 wb_gain_gr;
+ s32 wb_gain_r;
+ s32 wb_gain_b;
+ s32 wb_gain_gb;
+ /**@}*/
+};
+
+/**@}*/
+
+#endif /* __IA_CSS_DPC2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
new file mode 100644
index 000000000000..d2c3e8edf626
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
@@ -0,0 +1,301 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_frame_public.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+#include "ia_css_types.h"
+#include "ia_css_host_data.h"
+#include "sh_css_param_dvs.h"
+#include "sh_css_params.h"
+#include "ia_css_binary.h"
+#include "ia_css_debug.h"
+#include "memory_access.h"
+#include "assert_support.h"
+
+#include "ia_css_dvs.host.h"
+
+static const struct ia_css_dvs_configuration default_config = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_dvs_config(
+ struct sh_css_isp_dvs_isp_config *to,
+ const struct ia_css_dvs_configuration *from,
+ unsigned int size)
+{
+ (void)size;
+ to->num_horizontal_blocks =
+ DVS_NUM_BLOCKS_X(from->info->res.width);
+ to->num_vertical_blocks =
+ DVS_NUM_BLOCKS_Y(from->info->res.height);
+}
+
+void
+ia_css_dvs_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_dvs_configuration config = default_config;
+
+ config.info = info;
+
+ ia_css_configure_dvs(binary, &config);
+}
+
+static void
+convert_coords_to_ispparams(
+ struct ia_css_host_data *gdc_warp_table,
+ const struct ia_css_dvs_6axis_config *config,
+ unsigned int i_stride,
+ unsigned int o_width,
+ unsigned int o_height,
+ unsigned int uv_flag)
+{
+ unsigned int i, j;
+ gdc_warp_param_mem_t s = { 0 };
+ unsigned int x00, x01, x10, x11,
+ y00, y01, y10, y11;
+
+ unsigned int xmin, ymin, xmax, ymax;
+ unsigned int topleft_x, topleft_y, bottom_x, bottom_y,
+ topleft_x_frac, topleft_y_frac;
+ unsigned int dvs_interp_envelope = (DVS_GDC_INTERP_METHOD == HRT_GDC_BLI_MODE ?
+ DVS_GDC_BLI_INTERP_ENVELOPE : DVS_GDC_BCI_INTERP_ENVELOPE);
+
+ /* number of blocks per height and width */
+ unsigned int num_blocks_y = (uv_flag ? DVS_NUM_BLOCKS_Y_CHROMA(
+ o_height) : DVS_NUM_BLOCKS_Y(o_height));
+ unsigned int num_blocks_x = (uv_flag ? DVS_NUM_BLOCKS_X_CHROMA(
+ o_width) : DVS_NUM_BLOCKS_X(
+ o_width)); // round num_x up to blockdim_x, if it concerns the Y0Y1 block (uv_flag==0) round up to even
+
+ unsigned int in_stride = i_stride * DVS_INPUT_BYTES_PER_PIXEL;
+ unsigned int width, height;
+ unsigned int *xbuff = NULL;
+ unsigned int *ybuff = NULL;
+ struct gdc_warp_param_mem_s *ptr;
+
+ assert(config);
+ assert(gdc_warp_table);
+ assert(gdc_warp_table->address);
+
+ ptr = (struct gdc_warp_param_mem_s *)gdc_warp_table->address;
+
+ ptr += (2 * uv_flag); /* format is Y0 Y1 UV, so UV starts at 3rd position */
+
+ if (uv_flag == 0) {
+ xbuff = config->xcoords_y;
+ ybuff = config->ycoords_y;
+ width = config->width_y;
+ height = config->height_y;
+ } else {
+ xbuff = config->xcoords_uv;
+ ybuff = config->ycoords_uv;
+ width = config->width_uv;
+ height = config->height_uv;
+ }
+
+ IA_CSS_LOG("blockdim_x %d blockdim_y %d",
+ DVS_BLOCKDIM_X, DVS_BLOCKDIM_Y_LUMA >> uv_flag);
+ IA_CSS_LOG("num_blocks_x %d num_blocks_y %d", num_blocks_x, num_blocks_y);
+ IA_CSS_LOG("width %d height %d", width, height);
+
+ assert(width == num_blocks_x +
+ 1); // the width and height of the provided morphing table should be 1 more than the number of blocks
+ assert(height == num_blocks_y + 1);
+
+ for (j = 0; j < num_blocks_y; j++) {
+ for (i = 0; i < num_blocks_x; i++) {
+ x00 = xbuff[j * width + i];
+ x01 = xbuff[j * width + (i + 1)];
+ x10 = xbuff[(j + 1) * width + i];
+ x11 = xbuff[(j + 1) * width + (i + 1)];
+
+ y00 = ybuff[j * width + i];
+ y01 = ybuff[j * width + (i + 1)];
+ y10 = ybuff[(j + 1) * width + i];
+ y11 = ybuff[(j + 1) * width + (i + 1)];
+
+ xmin = min(x00, x10);
+ xmax = max(x01, x11);
+ ymin = min(y00, y01);
+ ymax = max(y10, y11);
+
+ /* Assert that right column's X is greater */
+ assert(x01 >= xmin);
+ assert(x11 >= xmin);
+ /* Assert that bottom row's Y is greater */
+ assert(y10 >= ymin);
+ assert(y11 >= ymin);
+
+ topleft_y = ymin >> DVS_COORD_FRAC_BITS;
+ topleft_x = ((xmin >> DVS_COORD_FRAC_BITS)
+ >> XMEM_ALIGN_LOG2)
+ << (XMEM_ALIGN_LOG2);
+ s.in_addr_offset = topleft_y * in_stride + topleft_x;
+
+ /* similar to topleft_y calculation, but round up if ymax
+ * has any fraction bits */
+ bottom_y = CEIL_DIV(ymax, 1 << DVS_COORD_FRAC_BITS);
+ s.in_block_height = bottom_y - topleft_y + dvs_interp_envelope;
+
+ bottom_x = CEIL_DIV(xmax, 1 << DVS_COORD_FRAC_BITS);
+ s.in_block_width = bottom_x - topleft_x + dvs_interp_envelope;
+
+ topleft_x_frac = topleft_x << (DVS_COORD_FRAC_BITS);
+ topleft_y_frac = topleft_y << (DVS_COORD_FRAC_BITS);
+
+ s.p0_x = x00 - topleft_x_frac;
+ s.p1_x = x01 - topleft_x_frac;
+ s.p2_x = x10 - topleft_x_frac;
+ s.p3_x = x11 - topleft_x_frac;
+
+ s.p0_y = y00 - topleft_y_frac;
+ s.p1_y = y01 - topleft_y_frac;
+ s.p2_y = y10 - topleft_y_frac;
+ s.p3_y = y11 - topleft_y_frac;
+
+ // block should fit within the boundingbox.
+ assert(s.p0_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p1_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p2_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p3_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p0_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+ assert(s.p1_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+ assert(s.p2_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+ assert(s.p3_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+
+ // block size should be greater than zero.
+ assert(s.p0_x < s.p1_x);
+ assert(s.p2_x < s.p3_x);
+ assert(s.p0_y < s.p2_y);
+ assert(s.p1_y < s.p3_y);
+
+#if 0
+ printf("j: %d\ti:%d\n", j, i);
+ printf("offset: %d\n", s.in_addr_offset);
+ printf("p0_x: %d\n", s.p0_x);
+ printf("p0_y: %d\n", s.p0_y);
+ printf("p1_x: %d\n", s.p1_x);
+ printf("p1_y: %d\n", s.p1_y);
+ printf("p2_x: %d\n", s.p2_x);
+ printf("p2_y: %d\n", s.p2_y);
+ printf("p3_x: %d\n", s.p3_x);
+ printf("p3_y: %d\n", s.p3_y);
+
+ printf("p0_x_nofrac[0]: %d\n", s.p0_x >> DVS_COORD_FRAC_BITS);
+ printf("p0_y_nofrac[1]: %d\n", s.p0_y >> DVS_COORD_FRAC_BITS);
+ printf("p1_x_nofrac[2]: %d\n", s.p1_x >> DVS_COORD_FRAC_BITS);
+ printf("p1_y_nofrac[3]: %d\n", s.p1_y >> DVS_COORD_FRAC_BITS);
+ printf("p2_x_nofrac[0]: %d\n", s.p2_x >> DVS_COORD_FRAC_BITS);
+ printf("p2_y_nofrac[1]: %d\n", s.p2_y >> DVS_COORD_FRAC_BITS);
+ printf("p3_x_nofrac[2]: %d\n", s.p3_x >> DVS_COORD_FRAC_BITS);
+ printf("p3_y_nofrac[3]: %d\n", s.p3_y >> DVS_COORD_FRAC_BITS);
+ printf("\n");
+#endif
+
+ *ptr = s;
+
+ // storage format:
+ // Y0 Y1 UV0 Y2 Y3 UV1
+ /* if uv_flag equals true increment with 2 incase x is odd, this to
+ skip the uv position. */
+ if (uv_flag)
+ ptr += 3;
+ else
+ ptr += (1 + (i & 1));
+ }
+ }
+}
+
+struct ia_css_host_data *
+convert_allocate_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info)
+{
+ unsigned int i_stride;
+ unsigned int o_width;
+ unsigned int o_height;
+ struct ia_css_host_data *me;
+ struct gdc_warp_param_mem_s *isp_data_ptr;
+
+ assert(binary);
+ assert(dvs_6axis_config);
+ assert(dvs_in_frame_info);
+
+ me = ia_css_host_data_allocate((size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3));
+
+ if (!me)
+ return NULL;
+
+ /*DVS only supports input frame of YUV420 or NV12. Fail for all other cases*/
+ assert((dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_NV12)
+ || (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420));
+
+ isp_data_ptr = (struct gdc_warp_param_mem_s *)me->address;
+
+ i_stride = dvs_in_frame_info->padded_width;
+
+ o_width = binary->out_frame_info[0].res.width;
+ o_height = binary->out_frame_info[0].res.height;
+
+ /* Y plane */
+ convert_coords_to_ispparams(me, dvs_6axis_config,
+ i_stride, o_width, o_height, 0);
+
+ if (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420) {
+ /*YUV420 has half the stride for U/V plane*/
+ i_stride /= 2;
+ }
+
+ /* UV plane (packed inside the y plane) */
+ convert_coords_to_ispparams(me, dvs_6axis_config,
+ i_stride, o_width / 2, o_height / 2, 1);
+
+ return me;
+}
+
+enum ia_css_err
+store_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info,
+ hrt_vaddress ddr_addr_y) {
+ struct ia_css_host_data *me;
+
+ assert(dvs_6axis_config);
+ assert(ddr_addr_y != mmgr_NULL);
+ assert(dvs_in_frame_info);
+
+ me = convert_allocate_dvs_6axis_config(dvs_6axis_config,
+ binary,
+ dvs_in_frame_info);
+
+ if (!me)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ ia_css_params_store_ia_css_host_data(
+ ddr_addr_y,
+ me);
+ ia_css_host_data_free(me);
+
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h
new file mode 100644
index 000000000000..d711170cf7cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DVS_HOST_H
+#define __IA_CSS_DVS_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+#include "sh_css_params.h"
+
+#include "ia_css_types.h"
+#include "ia_css_dvs_types.h"
+#include "ia_css_dvs_param.h"
+
+/* For bilinear interpolation, we need to add +1 to input block height calculation.
+ * For bicubic interpolation, we will need to add +3 instaed */
+#define DVS_GDC_BLI_INTERP_ENVELOPE 1
+#define DVS_GDC_BCI_INTERP_ENVELOPE 3
+
+void
+ia_css_dvs_config(
+ struct sh_css_isp_dvs_isp_config *to,
+ const struct ia_css_dvs_configuration *from,
+ unsigned int size);
+
+void
+ia_css_dvs_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+convert_dvs_6axis_config(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_binary *binary);
+
+struct ia_css_host_data *
+convert_allocate_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info);
+
+enum ia_css_err
+store_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info,
+ hrt_vaddress ddr_addr_y);
+
+#endif /* __IA_CSS_DVS_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
new file mode 100644
index 000000000000..f8842dae943b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DVS_PARAM_H
+#define __IA_CSS_DVS_PARAM_H
+
+#include <type_support.h>
+
+#if !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) && !defined(PARAMBIN_GENERATION)
+#include "dma.h"
+#endif /* !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) */
+
+#include "uds/uds_1.0/ia_css_uds_param.h"
+
+/* dvserence frame */
+struct sh_css_isp_dvs_isp_config {
+ u32 num_horizontal_blocks;
+ u32 num_vertical_blocks;
+};
+
+#endif /* __IA_CSS_DVS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
new file mode 100644
index 000000000000..a1a14d93ef29
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
@@ -0,0 +1,29 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DVS_TYPES_H
+#define __IA_CSS_DVS_TYPES_H
+
+/* DVS frame
+ *
+ * ISP block: dvs frame
+ */
+
+#include "ia_css_frame_public.h"
+
+struct ia_css_dvs_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_DVS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
new file mode 100644
index 000000000000..03e1998b0464
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
@@ -0,0 +1,338 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+
+#include "type_support.h"
+#include "assert_support.h"
+#include "math_support.h" /* for min and max */
+
+#include "ia_css_eed1_8.host.h"
+
+/* WARNING1: Number of inv points should be less or equal to 16,
+ * due to implementation limitation. See kernel design document
+ * for more details.
+ * WARNING2: Do not modify the number of inv points without correcting
+ * the EED1_8 kernel implementation assumptions.
+ */
+#define NUMBER_OF_CHGRINV_POINTS 15
+#define NUMBER_OF_TCINV_POINTS 9
+#define NUMBER_OF_FCINV_POINTS 9
+
+static const s16 chgrinv_x[NUMBER_OF_CHGRINV_POINTS] = {
+ 0, 16, 64, 144, 272, 448, 672, 976,
+ 1376, 1888, 2528, 3312, 4256, 5376, 6688
+};
+
+static const s16 chgrinv_a[NUMBER_OF_CHGRINV_POINTS] = {
+ -7171, -256, -29, -3456, -1071, -475, -189, -102,
+ -48, -38, -10, -9, -7, -6, 0
+ };
+
+static const s16 chgrinv_b[NUMBER_OF_CHGRINV_POINTS] = {
+ 8191, 1021, 256, 114, 60, 37, 24, 17,
+ 12, 9, 6, 5, 4, 3, 2
+};
+
+static const s16 chgrinv_c[NUMBER_OF_CHGRINV_POINTS] = {
+ 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0
+};
+
+static const s16 tcinv_x[NUMBER_OF_TCINV_POINTS] = {
+ 0, 4, 11, 23, 42, 68, 102, 148, 205
+};
+
+static const s16 tcinv_a[NUMBER_OF_TCINV_POINTS] = {
+ -6364, -631, -126, -34, -13, -6, -4452, -2156, 0
+ };
+
+static const s16 tcinv_b[NUMBER_OF_TCINV_POINTS] = {
+ 8191, 1828, 726, 352, 197, 121, 80, 55, 40
+};
+
+static const s16 tcinv_c[NUMBER_OF_TCINV_POINTS] = {
+ 1, 1, 1, 1, 1, 1, 0, 0, 0
+};
+
+static const s16 fcinv_x[NUMBER_OF_FCINV_POINTS] = {
+ 0, 80, 216, 456, 824, 1344, 2040, 2952, 4096
+};
+
+static const s16 fcinv_a[NUMBER_OF_FCINV_POINTS] = {
+ -5244, -486, -86, -2849, -961, -400, -180, -86, 0
+ };
+
+static const s16 fcinv_b[NUMBER_OF_FCINV_POINTS] = {
+ 8191, 1637, 607, 287, 159, 98, 64, 44, 32
+};
+
+static const s16 fcinv_c[NUMBER_OF_FCINV_POINTS] = {
+ 1, 1, 1, 0, 0, 0, 0, 0, 0
+};
+
+void
+ia_css_eed1_8_vmem_encode(
+ struct eed1_8_vmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size)
+{
+ unsigned int i, j, base;
+ const unsigned int total_blocks = 4;
+ const unsigned int shuffle_block = 16;
+
+ (void)size;
+
+ /* Init */
+ for (i = 0; i < ISP_VEC_NELEMS; i++) {
+ to->e_dew_enh_x[0][i] = 0;
+ to->e_dew_enh_y[0][i] = 0;
+ to->e_dew_enh_a[0][i] = 0;
+ to->e_dew_enh_f[0][i] = 0;
+ to->chgrinv_x[0][i] = 0;
+ to->chgrinv_a[0][i] = 0;
+ to->chgrinv_b[0][i] = 0;
+ to->chgrinv_c[0][i] = 0;
+ to->tcinv_x[0][i] = 0;
+ to->tcinv_a[0][i] = 0;
+ to->tcinv_b[0][i] = 0;
+ to->tcinv_c[0][i] = 0;
+ to->fcinv_x[0][i] = 0;
+ to->fcinv_a[0][i] = 0;
+ to->fcinv_b[0][i] = 0;
+ to->fcinv_c[0][i] = 0;
+ }
+
+ /* Constraints on dew_enhance_seg_x and dew_enhance_seg_y:
+ * - values should be greater or equal to 0.
+ * - values should be ascending.
+ * - value of index zero is equal to 0.
+ */
+
+ /* Checking constraints: */
+ /* TODO: investigate if an assert is the right way to report that
+ * the constraints are violated.
+ */
+ for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
+ assert(from->dew_enhance_seg_x[j] > -1);
+ assert(from->dew_enhance_seg_y[j] > -1);
+ }
+
+ for (j = 1; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
+ assert(from->dew_enhance_seg_x[j] > from->dew_enhance_seg_x[j - 1]);
+ assert(from->dew_enhance_seg_y[j] > from->dew_enhance_seg_y[j - 1]);
+ }
+
+ assert(from->dew_enhance_seg_x[0] == 0);
+ assert(from->dew_enhance_seg_y[0] == 0);
+
+ /* Constraints on chgrinv_x, tcinv_x and fcinv_x:
+ * - values should be greater or equal to 0.
+ * - values should be ascending.
+ * - value of index zero is equal to 0.
+ */
+ assert(chgrinv_x[0] == 0);
+ assert(tcinv_x[0] == 0);
+ assert(fcinv_x[0] == 0);
+
+ for (j = 1; j < NUMBER_OF_CHGRINV_POINTS; j++) {
+ assert(chgrinv_x[j] > chgrinv_x[j - 1]);
+ }
+
+ for (j = 1; j < NUMBER_OF_TCINV_POINTS; j++) {
+ assert(tcinv_x[j] > tcinv_x[j - 1]);
+ }
+
+ for (j = 1; j < NUMBER_OF_FCINV_POINTS; j++) {
+ assert(fcinv_x[j] > fcinv_x[j - 1]);
+ }
+
+ /* The implementation of the calulating 1/x is based on the availability
+ * of the OP_vec_shuffle16 operation.
+ * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to
+ * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or
+ * initialised as described in the KFS. The remaining elements of a vector are set to 0.
+ */
+ /* TODO: guard this code with above assumptions */
+ for (i = 0; i < total_blocks; i++) {
+ base = shuffle_block * i;
+
+ for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
+ to->e_dew_enh_x[0][base + j] = min_t(int, max_t(int,
+ from->dew_enhance_seg_x[j], 0),
+ 8191);
+ to->e_dew_enh_y[0][base + j] = min_t(int, max_t(int,
+ from->dew_enhance_seg_y[j], -8192),
+ 8191);
+ }
+
+ for (j = 0; j < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); j++) {
+ to->e_dew_enh_a[0][base + j] = min_t(int, max_t(int,
+ from->dew_enhance_seg_slope[j],
+ -8192), 8191);
+ /* Convert dew_enhance_seg_exp to flag:
+ * 0 -> 0
+ * 1...13 -> 1
+ */
+ to->e_dew_enh_f[0][base + j] = (min_t(int, max_t(int,
+ from->dew_enhance_seg_exp[j],
+ 0), 13) > 0);
+ }
+
+ /* Hard-coded to 0, in order to be able to handle out of
+ * range input in the same way as the other segments.
+ * See KFS for more details.
+ */
+ to->e_dew_enh_a[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0;
+ to->e_dew_enh_f[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0;
+
+ for (j = 0; j < NUMBER_OF_CHGRINV_POINTS; j++) {
+ to->chgrinv_x[0][base + j] = chgrinv_x[j];
+ to->chgrinv_a[0][base + j] = chgrinv_a[j];
+ to->chgrinv_b[0][base + j] = chgrinv_b[j];
+ to->chgrinv_c[0][base + j] = chgrinv_c[j];
+ }
+
+ for (j = 0; j < NUMBER_OF_TCINV_POINTS; j++) {
+ to->tcinv_x[0][base + j] = tcinv_x[j];
+ to->tcinv_a[0][base + j] = tcinv_a[j];
+ to->tcinv_b[0][base + j] = tcinv_b[j];
+ to->tcinv_c[0][base + j] = tcinv_c[j];
+ }
+
+ for (j = 0; j < NUMBER_OF_FCINV_POINTS; j++) {
+ to->fcinv_x[0][base + j] = fcinv_x[j];
+ to->fcinv_a[0][base + j] = fcinv_a[j];
+ to->fcinv_b[0][base + j] = fcinv_b[j];
+ to->fcinv_c[0][base + j] = fcinv_c[j];
+ }
+ }
+}
+
+void
+ia_css_eed1_8_encode(
+ struct eed1_8_dmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size)
+{
+ int i;
+ int min_exp = 0;
+
+ (void)size;
+
+ to->rbzp_strength = from->rbzp_strength;
+
+ to->fcstrength = from->fcstrength;
+ to->fcthres_0 = from->fcthres_0;
+ to->fc_sat_coef = from->fc_sat_coef;
+ to->fc_coring_prm = from->fc_coring_prm;
+ to->fc_slope = from->fcthres_1 - from->fcthres_0;
+
+ to->aerel_thres0 = from->aerel_thres0;
+ to->aerel_gain0 = from->aerel_gain0;
+ to->aerel_thres_diff = from->aerel_thres1 - from->aerel_thres0;
+ to->aerel_gain_diff = from->aerel_gain1 - from->aerel_gain0;
+
+ to->derel_thres0 = from->derel_thres0;
+ to->derel_gain0 = from->derel_gain0;
+ to->derel_thres_diff = (from->derel_thres1 - from->derel_thres0);
+ to->derel_gain_diff = (from->derel_gain1 - from->derel_gain0);
+
+ to->coring_pos0 = from->coring_pos0;
+ to->coring_pos_diff = (from->coring_pos1 - from->coring_pos0);
+ to->coring_neg0 = from->coring_neg0;
+ to->coring_neg_diff = (from->coring_neg1 - from->coring_neg0);
+
+ /* Note: (ISP_VEC_ELEMBITS -1)
+ * TODO: currently the testbench does not support to use
+ * ISP_VEC_ELEMBITS. Investigate how to fix this
+ */
+ to->gain_exp = (13 - from->gain_exp);
+ to->gain_pos0 = from->gain_pos0;
+ to->gain_pos_diff = (from->gain_pos1 - from->gain_pos0);
+ to->gain_neg0 = from->gain_neg0;
+ to->gain_neg_diff = (from->gain_neg1 - from->gain_neg0);
+
+ to->margin_pos0 = from->pos_margin0;
+ to->margin_pos_diff = (from->pos_margin1 - from->pos_margin0);
+ to->margin_neg0 = from->neg_margin0;
+ to->margin_neg_diff = (from->neg_margin1 - from->neg_margin0);
+
+ /* Encode DEWEnhance exp (e_dew_enh_asr) */
+ for (i = 0; i < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); i++) {
+ min_exp = max(min_exp, from->dew_enhance_seg_exp[i]);
+ }
+ to->e_dew_enh_asr = 13 - min(max(min_exp, 0), 13);
+
+ to->dedgew_max = from->dedgew_max;
+}
+
+void
+ia_css_init_eed1_8_state(
+ void *state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_eed1_8_debug_dtrace(
+ const struct ia_css_eed1_8_config *eed,
+ unsigned int level)
+{
+ if (!eed)
+ return;
+
+ ia_css_debug_dtrace(level, "Edge Enhancing Demosaic 1.8:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rbzp_strength",
+ eed->rbzp_strength);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcstrength", eed->fcstrength);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_0", eed->fcthres_0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_1", eed->fcthres_1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_sat_coef", eed->fc_sat_coef);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_coring_prm",
+ eed->fc_coring_prm);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres0", eed->aerel_thres0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain0", eed->aerel_gain0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres1", eed->aerel_thres1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain1", eed->aerel_gain1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres0", eed->derel_thres0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain0", eed->derel_gain0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres1", eed->derel_thres1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain1", eed->derel_gain1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos0", eed->coring_pos0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos1", eed->coring_pos1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg0", eed->coring_neg0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg1", eed->coring_neg1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_exp", eed->gain_exp);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos0", eed->gain_pos0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos1", eed->gain_pos1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg0", eed->gain_neg0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg1", eed->gain_neg1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin0", eed->pos_margin0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin1", eed->pos_margin1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin0", eed->neg_margin0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin1", eed->neg_margin1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dedgew_max", eed->dedgew_max);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h
new file mode 100644
index 000000000000..05f817125d3c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EED1_8_HOST_H
+#define __IA_CSS_EED1_8_HOST_H
+
+#include "ia_css_eed1_8_types.h"
+#include "ia_css_eed1_8_param.h"
+
+void
+ia_css_eed1_8_vmem_encode(
+ struct eed1_8_vmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size);
+
+void
+ia_css_eed1_8_encode(
+ struct eed1_8_dmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size);
+
+void
+ia_css_init_eed1_8_state(
+ void *state,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_eed1_8_debug_dtrace(
+ const struct ia_css_eed1_8_config *config,
+ unsigned int level);
+#endif
+
+#endif /* __IA_CSS_EED1_8_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
new file mode 100644
index 000000000000..880454d4dcf5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
@@ -0,0 +1,153 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EED1_8_PARAM_H
+#define __IA_CSS_EED1_8_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+#include "ia_css_eed1_8_types.h" /* IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS */
+
+/* Configuration parameters: */
+
+/* Enable median for false color correction
+ * 0: Do not use median
+ * 1: Use median
+ * Default: 1
+ */
+#define EED1_8_FC_ENABLE_MEDIAN 1
+
+/* Coring Threshold minima
+ * Used in Tint color suppression.
+ * Default: 1
+ */
+#define EED1_8_CORINGTHMIN 1
+
+/* Define size of the state..... TODO: check if this is the correct place */
+/* 4 planes : GR, R, B, GB */
+#define NUM_PLANES 4
+
+/* 5 lines state per color plane input_line_state */
+#define EED1_8_STATE_INPUT_BUFFER_HEIGHT (5 * NUM_PLANES)
+
+/* Each plane has width equal to half frame line */
+#define EED1_8_STATE_INPUT_BUFFER_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane LD_H state */
+#define EED1_8_STATE_LD_H_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_LD_H_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane LD_V state */
+#define EED1_8_STATE_LD_V_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_LD_V_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line (single plane) state for D_Hr state */
+#define EED1_8_STATE_D_HR_HEIGHT 1
+#define EED1_8_STATE_D_HR_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line (single plane) state for D_Hb state */
+#define EED1_8_STATE_D_HB_HEIGHT 1
+#define EED1_8_STATE_D_HB_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 2 lines (single plane) state for D_Vr state */
+#define EED1_8_STATE_D_VR_HEIGHT 2
+#define EED1_8_STATE_D_VR_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 2 line (single plane) state for D_Vb state */
+#define EED1_8_STATE_D_VB_HEIGHT 2
+#define EED1_8_STATE_D_VB_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 2 lines state for R and B (= 2 planes) rb_zipped_state */
+#define EED1_8_STATE_RB_ZIPPED_HEIGHT (2 * 2)
+#define EED1_8_STATE_RB_ZIPPED_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+#if EED1_8_FC_ENABLE_MEDIAN
+/* 1 full input line (GR-R color line) for Yc state */
+#define EED1_8_STATE_YC_HEIGHT 1
+#define EED1_8_STATE_YC_WIDTH MAX_FRAME_SIMDWIDTH
+
+/* 1 line state per color plane Cg_state */
+#define EED1_8_STATE_CG_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_CG_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane Co_state */
+#define EED1_8_STATE_CO_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_CO_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 full input line (GR-R color line) for AbsK state */
+#define EED1_8_STATE_ABSK_HEIGHT 1
+#define EED1_8_STATE_ABSK_WIDTH MAX_FRAME_SIMDWIDTH
+#endif
+
+struct eed1_8_vmem_params {
+ VMEM_ARRAY(e_dew_enh_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS);
+ VMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(e_dew_enh_f, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_c, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_c, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_c, ISP_VEC_NELEMS);
+};
+
+/* EED (Edge Enhancing Demosaic) ISP parameters */
+struct eed1_8_dmem_params {
+ s32 rbzp_strength;
+
+ s32 fcstrength;
+ s32 fcthres_0;
+ s32 fc_sat_coef;
+ s32 fc_coring_prm;
+ s32 fc_slope;
+
+ s32 aerel_thres0;
+ s32 aerel_gain0;
+ s32 aerel_thres_diff;
+ s32 aerel_gain_diff;
+
+ s32 derel_thres0;
+ s32 derel_gain0;
+ s32 derel_thres_diff;
+ s32 derel_gain_diff;
+
+ s32 coring_pos0;
+ s32 coring_pos_diff;
+ s32 coring_neg0;
+ s32 coring_neg_diff;
+
+ s32 gain_exp;
+ s32 gain_pos0;
+ s32 gain_pos_diff;
+ s32 gain_neg0;
+ s32 gain_neg_diff;
+
+ s32 margin_pos0;
+ s32 margin_pos_diff;
+ s32 margin_neg0;
+ s32 margin_neg_diff;
+
+ s32 e_dew_enh_asr;
+ s32 dedgew_max;
+};
+
+#endif /* __IA_CSS_EED1_8_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h
new file mode 100644
index 000000000000..b8fdb7a51a12
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h
@@ -0,0 +1,87 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EED1_8_TYPES_H
+#define __IA_CSS_EED1_8_TYPES_H
+
+/* @file
+* CSS-API header file for Edge Enhanced Demosaic parameters.
+*/
+
+#include "type_support.h"
+
+/**
+ * \brief EED1_8 public parameters.
+ * \details Struct with all parameters for the EED1.8 kernel that can be set
+ * from the CSS API.
+ */
+
+/* parameter list is based on ISP261 CSS API public parameter list_all.xlsx from 28-01-2015 */
+
+/* Number of segments + 1 segment used in edge reliability enhancement
+ * Ineffective: N/A
+ * Default: 9
+ */
+#define IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS 9
+
+/* Edge Enhanced Demosaic configuration
+ *
+ * ISP2.6.1: EED1_8 is used.
+ */
+
+struct ia_css_eed1_8_config {
+ s32 rbzp_strength; /** Strength of zipper reduction. */
+
+ s32 fcstrength; /** Strength of false color reduction. */
+ s32 fcthres_0; /** Threshold to prevent chroma coring due to noise or green disparity in dark region. */
+ s32 fcthres_1; /** Threshold to prevent chroma coring due to noise or green disparity in bright region. */
+ s32 fc_sat_coef; /** How much color saturation to maintain in high color saturation region. */
+ s32 fc_coring_prm; /** Chroma coring coefficient for tint color suppression. */
+
+ s32 aerel_thres0; /** Threshold for Non-Directional Reliability at dark region. */
+ s32 aerel_gain0; /** Gain for Non-Directional Reliability at dark region. */
+ s32 aerel_thres1; /** Threshold for Non-Directional Reliability at bright region. */
+ s32 aerel_gain1; /** Gain for Non-Directional Reliability at bright region. */
+
+ s32 derel_thres0; /** Threshold for Directional Reliability at dark region. */
+ s32 derel_gain0; /** Gain for Directional Reliability at dark region. */
+ s32 derel_thres1; /** Threshold for Directional Reliability at bright region. */
+ s32 derel_gain1; /** Gain for Directional Reliability at bright region. */
+
+ s32 coring_pos0; /** Positive Edge Coring Threshold in dark region. */
+ s32 coring_pos1; /** Positive Edge Coring Threshold in bright region. */
+ s32 coring_neg0; /** Negative Edge Coring Threshold in dark region. */
+ s32 coring_neg1; /** Negative Edge Coring Threshold in bright region. */
+
+ s32 gain_exp; /** Common Exponent of Gain. */
+ s32 gain_pos0; /** Gain for Positive Edge in dark region. */
+ s32 gain_pos1; /** Gain for Positive Edge in bright region. */
+ s32 gain_neg0; /** Gain for Negative Edge in dark region. */
+ s32 gain_neg1; /** Gain for Negative Edge in bright region. */
+
+ s32 pos_margin0; /** Margin for Positive Edge in dark region. */
+ s32 pos_margin1; /** Margin for Positive Edge in bright region. */
+ s32 neg_margin0; /** Margin for Negative Edge in dark region. */
+ s32 neg_margin1; /** Margin for Negative Edge in bright region. */
+
+ s32 dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: X. */
+ s32 dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: Y. */
+ s32 dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS -
+ 1)]; /** Segment data for directional edge weight: Slope. */
+ s32 dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS -
+ 1)]; /** Segment data for directional edge weight: Exponent. */
+ s32 dedgew_max; /** Max Weight for Directional Edge. */
+};
+
+#endif /* __IA_CSS_EED1_8_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c
new file mode 100644
index 000000000000..0b96b9618ab6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c
@@ -0,0 +1,63 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_formats.host.h"
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+
+/*#include "sh_css_frac.h"*/
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+
+const struct ia_css_formats_config default_formats_config = {
+ 1
+};
+
+void
+ia_css_formats_encode(
+ struct sh_css_isp_formats_params *to,
+ const struct ia_css_formats_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->video_full_range_flag = from->video_full_range_flag;
+}
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_dump(
+ const struct sh_css_isp_formats_params *formats,
+ unsigned int level)
+{
+ if (!formats) return;
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "video_full_range_flag", formats->video_full_range_flag);
+}
+#endif
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_debug_dtrace(
+ const struct ia_css_formats_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.video_full_range_flag=%d\n",
+ config->video_full_range_flag);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h
new file mode 100644
index 000000000000..0aac424d9d54
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FORMATS_HOST_H
+#define __IA_CSS_FORMATS_HOST_H
+
+#include "ia_css_formats_types.h"
+#include "ia_css_formats_param.h"
+
+extern const struct ia_css_formats_config default_formats_config;
+
+void
+ia_css_formats_encode(
+ struct sh_css_isp_formats_params *to,
+ const struct ia_css_formats_config *from,
+ unsigned int size);
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_dump(
+ const struct sh_css_isp_formats_params *formats,
+ unsigned int level);
+#endif
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_debug_dtrace(
+ const struct ia_css_formats_config *formats,
+ unsigned int level);
+#endif /*IA_CSS_NO_DEBUG*/
+
+#endif /* __IA_CSS_FORMATS_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h
new file mode 100644
index 000000000000..8f36af1a5ae6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FORMATS_PARAM_H
+#define __IA_CSS_FORMATS_PARAM_H
+
+#include "type_support.h"
+
+/* FORMATS (Format conversion) */
+struct sh_css_isp_formats_params {
+ s32 video_full_range_flag;
+};
+
+#endif /* __IA_CSS_FORMATS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
new file mode 100644
index 000000000000..7cfebaf05dc2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FORMATS_TYPES_H
+#define __IA_CSS_FORMATS_TYPES_H
+
+/* @file
+* CSS-API header file for output format parameters.
+*/
+
+#include "type_support.h"
+
+/* Formats configuration.
+ *
+ * ISP block: FORMATS
+ * ISP1: FORMATS is used.
+ * ISP2: FORMATS is used.
+ */
+struct ia_css_formats_config {
+ u32 video_full_range_flag; /** selects the range of YUV output.
+ u8.0, [0,1],
+ default 1, ineffective n/a\n
+ 1 - full range, luma 0-255, chroma 0-255\n
+ 0 - reduced range, luma 16-235, chroma 16-240 */
+};
+
+#endif /* __IA_CSS_FORMATS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h
new file mode 100644
index 000000000000..adfd4b37171c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FIXEDBDS_PARAM_H
+#define __IA_CSS_FIXEDBDS_PARAM_H
+
+#include "type_support.h"
+
+/* ISP2401 */
+#define BDS_UNIT 8
+#define FRAC_LOG 3
+#define FRAC_ACC BIT(FRAC_LOG)
+#if FRAC_ACC != BDS_UNIT
+#error "FRAC_ACC and BDS_UNIT need to be merged into one define"
+#endif
+
+struct sh_css_isp_bds_params {
+ int baf_strength;
+};
+
+#endif /* __IA_CSS_FIXEDBDS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h
new file mode 100644
index 000000000000..6485834704da
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FIXEDBDS_TYPES_H
+#define __IA_CSS_FIXEDBDS_TYPES_H
+
+struct sh_css_bds_factor {
+ unsigned int numerator;
+ unsigned int denominator;
+ unsigned int bds_factor;
+};
+
+#endif /*__IA_CSS_FIXEDBDS_TYPES_H*/
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
new file mode 100644
index 000000000000..7b55dfea359a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
@@ -0,0 +1,88 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <assert_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_frame.h>
+#include <ia_css_binary.h>
+#include <ia_css_types.h>
+#include <sh_css_defs.h>
+#include <ia_css_debug.h>
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+#include "ia_css_fpn.host.h"
+
+void
+ia_css_fpn_encode(
+ struct sh_css_isp_fpn_params *to,
+ const struct ia_css_fpn_table *from,
+ unsigned int size)
+{
+ (void)size;
+ to->shift = from->shift;
+ to->enabled = from->data != NULL;
+}
+
+void
+ia_css_fpn_dump(
+ const struct sh_css_isp_fpn_params *fpn,
+ unsigned int level)
+{
+ if (!fpn) return;
+ ia_css_debug_dtrace(level, "Fixed Pattern Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "fpn_shift", fpn->shift);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "fpn_enabled", fpn->enabled);
+}
+
+void
+ia_css_fpn_config(
+ struct sh_css_isp_fpn_isp_config *to,
+ const struct ia_css_fpn_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, from->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert(elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_fpn_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ const struct ia_css_fpn_configuration config = {
+ &my_info
+ };
+
+ my_info.res.width = CEIL_DIV(info->res.width, 2); /* Packed by 2x */
+ my_info.res.height = info->res.height;
+ my_info.padded_width = CEIL_DIV(info->padded_width, 2); /* Packed by 2x */
+ my_info.format = info->format;
+ my_info.raw_bit_depth = FPN_BITS_PER_PIXEL;
+ my_info.raw_bayer_order = info->raw_bayer_order;
+ my_info.crop_info = info->crop_info;
+
+ ia_css_configure_fpn(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h
new file mode 100644
index 000000000000..02e85570bd1c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FPN_HOST_H
+#define __IA_CSS_FPN_HOST_H
+
+#include "ia_css_binary.h"
+#include "ia_css_fpn_types.h"
+#include "ia_css_fpn_param.h"
+
+void
+ia_css_fpn_encode(
+ struct sh_css_isp_fpn_params *to,
+ const struct ia_css_fpn_table *from,
+ unsigned int size);
+
+void
+ia_css_fpn_dump(
+ const struct sh_css_isp_fpn_params *fpn,
+ unsigned int level);
+
+void
+ia_css_fpn_config(
+ struct sh_css_isp_fpn_isp_config *to,
+ const struct ia_css_fpn_configuration *from,
+ unsigned int size);
+
+void
+ia_css_fpn_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+#endif /* __IA_CSS_FPN_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h
new file mode 100644
index 000000000000..f103ddd882fd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FPN_PARAM_H
+#define __IA_CSS_FPN_PARAM_H
+
+#include "type_support.h"
+
+#include "dma.h"
+
+#define FPN_BITS_PER_PIXEL 16
+
+/* FPNR (Fixed Pattern Noise Reduction) */
+struct sh_css_isp_fpn_params {
+ s32 shift;
+ s32 enabled;
+};
+
+struct sh_css_isp_fpn_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+};
+
+#endif /* __IA_CSS_FPN_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
new file mode 100644
index 000000000000..95552a0e3c45
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
@@ -0,0 +1,52 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FPN_TYPES_H
+#define __IA_CSS_FPN_TYPES_H
+
+/* @file
+* CSS-API header file for Fixed Pattern Noise parameters.
+*/
+
+/* Fixed Pattern Noise table.
+ *
+ * This contains the fixed patterns noise values
+ * obtained from a black frame capture.
+ *
+ * "shift" should be set as the smallest value
+ * which satisfies the requirement the maximum data is less than 64.
+ *
+ * ISP block: FPN1
+ * ISP1: FPN1 is used.
+ * ISP2: FPN1 is used.
+ */
+
+struct ia_css_fpn_table {
+ s16 *data; /** Table content (fixed patterns noise).
+ u0.[13-shift], [0,63] */
+ u32 width; /** Table width (in pixels).
+ This is the input frame width. */
+ u32 height; /** Table height (in pixels).
+ This is the input frame height. */
+ u32 shift; /** Common exponent of table content.
+ u8.0, [0,13] */
+ u32 enabled; /** Fpn is enabled.
+ bool */
+};
+
+struct ia_css_fpn_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_FPN_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c
new file mode 100644
index 000000000000..1a489c93eb97
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c
@@ -0,0 +1,117 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+#include "sh_css_frac.h"
+#include "vamem.h"
+
+#include "ia_css_gc.host.h"
+
+const struct ia_css_gc_config default_gc_config = {
+ 0,
+ 0
+};
+
+const struct ia_css_ce_config default_ce_config = {
+ 0,
+ 255
+};
+
+void
+ia_css_gc_encode(
+ struct sh_css_isp_gc_params *to,
+ const struct ia_css_gc_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->gain_k1 =
+ uDIGIT_FITTING((int)from->gain_k1, 16,
+ IA_CSS_GAMMA_GAIN_K_SHIFT);
+ to->gain_k2 =
+ uDIGIT_FITTING((int)from->gain_k2, 16,
+ IA_CSS_GAMMA_GAIN_K_SHIFT);
+}
+
+void
+ia_css_ce_encode(
+ struct sh_css_isp_ce_params *to,
+ const struct ia_css_ce_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->uv_level_min = from->uv_level_min;
+ to->uv_level_max = from->uv_level_max;
+}
+
+void
+ia_css_gc_vamem_encode(
+ struct sh_css_isp_gc_vamem_params *to,
+ const struct ia_css_gamma_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->gc, &from->data, sizeof(to->gc));
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_gc_dump(
+ const struct sh_css_isp_gc_params *gc,
+ unsigned int level)
+{
+ if (!gc) return;
+ ia_css_debug_dtrace(level, "Gamma Correction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "gamma_gain_k1", gc->gain_k1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "gamma_gain_k2", gc->gain_k2);
+}
+
+void
+ia_css_ce_dump(
+ const struct sh_css_isp_ce_params *ce,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level, "Chroma Enhancement:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ce_uv_level_min", ce->uv_level_min);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ce_uv_level_max", ce->uv_level_max);
+}
+
+void
+ia_css_gc_debug_dtrace(
+ const struct ia_css_gc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.gain_k1=%d, config.gain_k2=%d\n",
+ config->gain_k1, config->gain_k2);
+}
+
+void
+ia_css_ce_debug_dtrace(
+ const struct ia_css_ce_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.uv_level_min=%d, config.uv_level_max=%d\n",
+ config->uv_level_min, config->uv_level_max);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h
new file mode 100644
index 000000000000..2fb2927b07f1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h
@@ -0,0 +1,65 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC_HOST_H
+#define __IA_CSS_GC_HOST_H
+
+#include "ia_css_gc_param.h"
+#include "ia_css_gc_table.host.h"
+
+extern const struct ia_css_gc_config default_gc_config;
+extern const struct ia_css_ce_config default_ce_config;
+
+void
+ia_css_gc_encode(
+ struct sh_css_isp_gc_params *to,
+ const struct ia_css_gc_config *from,
+ unsigned int size);
+
+void
+ia_css_gc_vamem_encode(
+ struct sh_css_isp_gc_vamem_params *to,
+ const struct ia_css_gamma_table *from,
+ unsigned int size);
+
+void
+ia_css_ce_encode(
+ struct sh_css_isp_ce_params *to,
+ const struct ia_css_ce_config *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_gc_dump(
+ const struct sh_css_isp_gc_params *gc,
+ unsigned int level);
+
+void
+ia_css_ce_dump(
+ const struct sh_css_isp_ce_params *ce,
+ unsigned int level);
+
+void
+ia_css_gc_debug_dtrace(
+ const struct ia_css_gc_config *config,
+ unsigned int level);
+
+void
+ia_css_ce_debug_dtrace(
+ const struct ia_css_ce_config *config,
+ unsigned int level);
+
+#endif
+
+#endif /* __IA_CSS_GC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h
new file mode 100644
index 000000000000..beeba6c9be6a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h
@@ -0,0 +1,61 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC_PARAM_H
+#define __IA_CSS_GC_PARAM_H
+
+#include "type_support.h"
+#ifndef PIPE_GENERATION
+#ifdef __ISP
+#define __INLINE_VAMEM__
+#endif
+#include "vamem.h"
+#include "ia_css_gc_types.h"
+
+#if defined(IS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_GAMMA_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_GC_TABLE_SIZE IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE
+#elif defined(IS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_GAMMA_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_GC_TABLE_SIZE IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE
+#else
+#error "Undefined vamem version"
+#endif
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_GC_TABLE_SIZE 0
+#endif
+
+#define GAMMA_OUTPUT_BITS 8
+#define GAMMA_OUTPUT_MAX_VAL ((1 << GAMMA_OUTPUT_BITS) - 1)
+
+/* GC (Gamma Correction) */
+struct sh_css_isp_gc_params {
+ s32 gain_k1;
+ s32 gain_k2;
+};
+
+/* CE (Chroma Enhancement) */
+struct sh_css_isp_ce_params {
+ s32 uv_level_min;
+ s32 uv_level_max;
+};
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_gc_vamem_params {
+ u16 gc[SH_CSS_ISP_GC_TABLE_SIZE];
+};
+
+#endif /* __IA_CSS_GC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
new file mode 100644
index 000000000000..15cf0575aac5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
@@ -0,0 +1,213 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <string_support.h> /* memcpy */
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_gc_table.host.h"
+
+#if defined(HAS_VAMEM_VERSION_2)
+
+struct ia_css_gamma_table default_gamma_table;
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE] = {
+ 0, 4, 8, 12, 17, 21, 27, 32,
+ 38, 44, 49, 55, 61, 66, 71, 76,
+ 80, 84, 88, 92, 95, 98, 102, 105,
+ 108, 110, 113, 116, 118, 121, 123, 126,
+ 128, 130, 132, 135, 137, 139, 141, 143,
+ 145, 146, 148, 150, 152, 153, 155, 156,
+ 158, 160, 161, 162, 164, 165, 166, 168,
+ 169, 170, 171, 172, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 184,
+ 185, 186, 187, 188, 189, 189, 190, 191,
+ 192, 192, 193, 194, 195, 195, 196, 197,
+ 197, 198, 198, 199, 200, 200, 201, 201,
+ 202, 203, 203, 204, 204, 205, 205, 206,
+ 206, 207, 207, 208, 208, 209, 209, 210,
+ 210, 210, 211, 211, 212, 212, 213, 213,
+ 214, 214, 214, 215, 215, 216, 216, 216,
+ 217, 217, 218, 218, 218, 219, 219, 220,
+ 220, 220, 221, 221, 222, 222, 222, 223,
+ 223, 223, 224, 224, 225, 225, 225, 226,
+ 226, 226, 227, 227, 227, 228, 228, 228,
+ 229, 229, 229, 230, 230, 230, 231, 231,
+ 231, 232, 232, 232, 233, 233, 233, 234,
+ 234, 234, 234, 235, 235, 235, 236, 236,
+ 236, 237, 237, 237, 237, 238, 238, 238,
+ 239, 239, 239, 239, 240, 240, 240, 241,
+ 241, 241, 241, 242, 242, 242, 242, 243,
+ 243, 243, 243, 244, 244, 244, 245, 245,
+ 245, 245, 246, 246, 246, 246, 247, 247,
+ 247, 247, 248, 248, 248, 248, 249, 249,
+ 249, 249, 250, 250, 250, 250, 251, 251,
+ 251, 251, 252, 252, 252, 252, 253, 253,
+ 253, 253, 254, 254, 254, 254, 255, 255,
+ 255
+};
+
+#elif defined(HAS_VAMEM_VERSION_1)
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 16,
+ 17, 18, 19, 20, 21, 23, 24, 25,
+ 27, 28, 29, 31, 32, 33, 35, 36,
+ 38, 39, 41, 42, 44, 45, 47, 48,
+ 49, 51, 52, 54, 55, 57, 58, 60,
+ 61, 62, 64, 65, 66, 68, 69, 70,
+ 71, 72, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 93, 94,
+ 95, 96, 97, 98, 98, 99, 100, 101,
+ 102, 102, 103, 104, 105, 105, 106, 107,
+ 108, 108, 109, 110, 110, 111, 112, 112,
+ 113, 114, 114, 115, 116, 116, 117, 118,
+ 118, 119, 120, 120, 121, 121, 122, 123,
+ 123, 124, 125, 125, 126, 126, 127, 127, /* 128 */
+ 128, 129, 129, 130, 130, 131, 131, 132,
+ 132, 133, 134, 134, 135, 135, 136, 136,
+ 137, 137, 138, 138, 139, 139, 140, 140,
+ 141, 141, 142, 142, 143, 143, 144, 144,
+ 145, 145, 145, 146, 146, 147, 147, 148,
+ 148, 149, 149, 150, 150, 150, 151, 151,
+ 152, 152, 152, 153, 153, 154, 154, 155,
+ 155, 155, 156, 156, 156, 157, 157, 158,
+ 158, 158, 159, 159, 160, 160, 160, 161,
+ 161, 161, 162, 162, 162, 163, 163, 163,
+ 164, 164, 164, 165, 165, 165, 166, 166,
+ 166, 167, 167, 167, 168, 168, 168, 169,
+ 169, 169, 170, 170, 170, 170, 171, 171,
+ 171, 172, 172, 172, 172, 173, 173, 173,
+ 174, 174, 174, 174, 175, 175, 175, 176,
+ 176, 176, 176, 177, 177, 177, 177, 178, /* 256 */
+ 178, 178, 178, 179, 179, 179, 179, 180,
+ 180, 180, 180, 181, 181, 181, 181, 182,
+ 182, 182, 182, 182, 183, 183, 183, 183,
+ 184, 184, 184, 184, 184, 185, 185, 185,
+ 185, 186, 186, 186, 186, 186, 187, 187,
+ 187, 187, 187, 188, 188, 188, 188, 188,
+ 189, 189, 189, 189, 189, 190, 190, 190,
+ 190, 190, 191, 191, 191, 191, 191, 192,
+ 192, 192, 192, 192, 192, 193, 193, 193,
+ 193, 193, 194, 194, 194, 194, 194, 194,
+ 195, 195, 195, 195, 195, 195, 196, 196,
+ 196, 196, 196, 196, 197, 197, 197, 197,
+ 197, 197, 198, 198, 198, 198, 198, 198,
+ 198, 199, 199, 199, 199, 199, 199, 200,
+ 200, 200, 200, 200, 200, 200, 201, 201,
+ 201, 201, 201, 201, 201, 202, 202, 202, /* 384 */
+ 202, 202, 202, 202, 203, 203, 203, 203,
+ 203, 203, 203, 204, 204, 204, 204, 204,
+ 204, 204, 204, 205, 205, 205, 205, 205,
+ 205, 205, 205, 206, 206, 206, 206, 206,
+ 206, 206, 206, 207, 207, 207, 207, 207,
+ 207, 207, 207, 208, 208, 208, 208, 208,
+ 208, 208, 208, 209, 209, 209, 209, 209,
+ 209, 209, 209, 209, 210, 210, 210, 210,
+ 210, 210, 210, 210, 210, 211, 211, 211,
+ 211, 211, 211, 211, 211, 211, 212, 212,
+ 212, 212, 212, 212, 212, 212, 212, 213,
+ 213, 213, 213, 213, 213, 213, 213, 213,
+ 214, 214, 214, 214, 214, 214, 214, 214,
+ 214, 214, 215, 215, 215, 215, 215, 215,
+ 215, 215, 215, 216, 216, 216, 216, 216,
+ 216, 216, 216, 216, 216, 217, 217, 217, /* 512 */
+ 217, 217, 217, 217, 217, 217, 217, 218,
+ 218, 218, 218, 218, 218, 218, 218, 218,
+ 218, 219, 219, 219, 219, 219, 219, 219,
+ 219, 219, 219, 220, 220, 220, 220, 220,
+ 220, 220, 220, 220, 220, 221, 221, 221,
+ 221, 221, 221, 221, 221, 221, 221, 221,
+ 222, 222, 222, 222, 222, 222, 222, 222,
+ 222, 222, 223, 223, 223, 223, 223, 223,
+ 223, 223, 223, 223, 223, 224, 224, 224,
+ 224, 224, 224, 224, 224, 224, 224, 224,
+ 225, 225, 225, 225, 225, 225, 225, 225,
+ 225, 225, 225, 226, 226, 226, 226, 226,
+ 226, 226, 226, 226, 226, 226, 226, 227,
+ 227, 227, 227, 227, 227, 227, 227, 227,
+ 227, 227, 228, 228, 228, 228, 228, 228,
+ 228, 228, 228, 228, 228, 228, 229, 229,
+ 229, 229, 229, 229, 229, 229, 229, 229,
+ 229, 229, 230, 230, 230, 230, 230, 230,
+ 230, 230, 230, 230, 230, 230, 231, 231,
+ 231, 231, 231, 231, 231, 231, 231, 231,
+ 231, 231, 231, 232, 232, 232, 232, 232,
+ 232, 232, 232, 232, 232, 232, 232, 233,
+ 233, 233, 233, 233, 233, 233, 233, 233,
+ 233, 233, 233, 233, 234, 234, 234, 234,
+ 234, 234, 234, 234, 234, 234, 234, 234,
+ 234, 235, 235, 235, 235, 235, 235, 235,
+ 235, 235, 235, 235, 235, 235, 236, 236,
+ 236, 236, 236, 236, 236, 236, 236, 236,
+ 236, 236, 236, 236, 237, 237, 237, 237,
+ 237, 237, 237, 237, 237, 237, 237, 237,
+ 237, 237, 238, 238, 238, 238, 238, 238,
+ 238, 238, 238, 238, 238, 238, 238, 238,
+ 239, 239, 239, 239, 239, 239, 239, 239,
+ 239, 239, 239, 239, 239, 239, 240, 240,
+ 240, 240, 240, 240, 240, 240, 240, 240,
+ 240, 240, 240, 240, 241, 241, 241, 241,
+ 241, 241, 241, 241, 241, 241, 241, 241,
+ 241, 241, 241, 242, 242, 242, 242, 242,
+ 242, 242, 242, 242, 242, 242, 242, 242,
+ 242, 242, 243, 243, 243, 243, 243, 243,
+ 243, 243, 243, 243, 243, 243, 243, 243,
+ 243, 244, 244, 244, 244, 244, 244, 244,
+ 244, 244, 244, 244, 244, 244, 244, 244,
+ 245, 245, 245, 245, 245, 245, 245, 245,
+ 245, 245, 245, 245, 245, 245, 245, 246,
+ 246, 246, 246, 246, 246, 246, 246, 246,
+ 246, 246, 246, 246, 246, 246, 246, 247,
+ 247, 247, 247, 247, 247, 247, 247, 247,
+ 247, 247, 247, 247, 247, 247, 247, 248,
+ 248, 248, 248, 248, 248, 248, 248, 248,
+ 248, 248, 248, 248, 248, 248, 248, 249,
+ 249, 249, 249, 249, 249, 249, 249, 249,
+ 249, 249, 249, 249, 249, 249, 249, 250,
+ 250, 250, 250, 250, 250, 250, 250, 250,
+ 250, 250, 250, 250, 250, 250, 250, 251,
+ 251, 251, 251, 251, 251, 251, 251, 251,
+ 251, 251, 251, 251, 251, 251, 251, 252,
+ 252, 252, 252, 252, 252, 252, 252, 252,
+ 252, 252, 252, 252, 252, 252, 252, 253,
+ 253, 253, 253, 253, 253, 253, 253, 253,
+ 253, 253, 253, 253, 253, 253, 253, 253,
+ 254, 254, 254, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 254, 254, 254,
+ 255, 255, 255, 255, 255, 255, 255, 255
+};
+
+#else
+#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
+#endif
+
+void
+ia_css_config_gamma_table(void)
+{
+#if defined(HAS_VAMEM_VERSION_2)
+ memcpy(default_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+#else
+ memcpy(default_gamma_table.data.vamem_1, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h
new file mode 100644
index 000000000000..9686623d9cdd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC_TABLE_HOST_H
+#define __IA_CSS_GC_TABLE_HOST_H
+
+#include "ia_css_gc_types.h"
+
+extern struct ia_css_gamma_table default_gamma_table;
+
+void ia_css_config_gamma_table(void);
+
+#endif /* __IA_CSS_GC_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
new file mode 100644
index 000000000000..c896c138b569
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
@@ -0,0 +1,97 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC_TYPES_H
+#define __IA_CSS_GC_TYPES_H
+
+/* @file
+* CSS-API header file for Gamma Correction parameters.
+*/
+
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: Needed for ia_css_vamem_type */
+
+/* Fractional bits for GAMMA gain */
+#define IA_CSS_GAMMA_GAIN_K_SHIFT 13
+
+/* Number of elements in the gamma table. */
+#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2 10
+#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE BIT(IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2)
+
+/* Number of elements in the gamma table. */
+#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2 8
+#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE ((1U << IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2) + 1)
+
+/* Gamma table, used for Y(Luma) Gamma Correction.
+ *
+ * ISP block: GC1 (YUV Gamma Correction)
+ * ISP1: GC1 is used.
+ * (ISP2: GC2(sRGB Gamma Correction) is used.)
+ */
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
+ IA_CSS_VAMEM_TYPE_2(ISP2400) */
+union ia_css_gc_data {
+ u16 vamem_1[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE];
+ /** Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
+ u16 vamem_2[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE];
+ /** Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
+};
+
+struct ia_css_gamma_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_gc_data data;
+};
+
+/* Gamma Correction configuration (used only for YUV Gamma Correction).
+ *
+ * ISP block: GC1 (YUV Gamma Correction)
+ * ISP1: GC1 is used.
+ * (ISP2: GC2 (sRGB Gamma Correction) is used.)
+ */
+struct ia_css_gc_config {
+ u16 gain_k1; /** Gain to adjust U after YUV Gamma Correction.
+ u0.16, [0,65535],
+ default/ineffective 19000(0.29) */
+ u16 gain_k2; /** Gain to adjust V after YUV Gamma Correction.
+ u0.16, [0,65535],
+ default/ineffective 19000(0.29) */
+};
+
+/* Chroma Enhancement configuration.
+ *
+ * This parameter specifies range of chroma output level.
+ * The standard range is [0,255] or [16,240].
+ *
+ * ISP block: CE1
+ * ISP1: CE1 is used.
+ * (ISP2: CE1 is not used.)
+ */
+struct ia_css_ce_config {
+ u8 uv_level_min; /** Minimum of chroma output level.
+ u0.8, [0,255], default/ineffective 0 */
+ u8 uv_level_max; /** Maximum of chroma output level.
+ u0.8, [0,255], default/ineffective 255 */
+};
+
+/* Multi-Axes Color Correction (MACC) configuration.
+ *
+ * ISP block: MACC2 (MACC by matrix and exponent(ia_css_macc_config))
+ * (ISP1: MACC1 (MACC by only matrix) is used.)
+ * ISP2: MACC2 is used.
+ */
+struct ia_css_macc_config {
+ u8 exp; /** Common exponent of ia_css_macc_table.
+ u8.0, [0,13], default 1, ineffective 1 */
+};
+
+#endif /* __IA_CSS_GC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c
new file mode 100644
index 000000000000..29a1e013a9aa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c
@@ -0,0 +1,109 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+#include "csc/csc_1.0/ia_css_csc.host.h"
+#include "vamem.h"
+
+#include "ia_css_gc2.host.h"
+
+const struct ia_css_cc_config default_yuv2rgb_cc_config = {
+ 12,
+ {4096, -4096, 4096, 4096, 4096, 0, 4096, -4096, -4096}
+};
+
+const struct ia_css_cc_config default_rgb2yuv_cc_config = {
+ 13,
+ {2449, 4809, 934, -1382, -2714, 4096, 4096, -3430, -666}
+};
+
+void
+ia_css_yuv2rgb_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size)
+{
+ ia_css_encode_cc(to, from, size);
+}
+
+void
+ia_css_rgb2yuv_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size)
+{
+ ia_css_encode_cc(to, from, size);
+}
+
+void
+ia_css_r_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->gc, &from->data, sizeof(to->gc));
+}
+
+void
+ia_css_g_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->gc, &from->data, sizeof(to->gc));
+}
+
+void
+ia_css_b_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->gc, &from->data, sizeof(to->gc));
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_yuv2rgb_dump(
+ const struct sh_css_isp_csc_params *yuv2rgb,
+ unsigned int level)
+{
+ ia_css_cc_dump(yuv2rgb, level, "YUV to RGB Conversion");
+}
+
+void
+ia_css_rgb2yuv_dump(
+ const struct sh_css_isp_csc_params *rgb2yuv,
+ unsigned int level)
+{
+ ia_css_cc_dump(rgb2yuv, level, "RGB to YUV Conversion");
+}
+
+void
+ia_css_rgb_gamma_table_debug_dtrace(
+ const struct ia_css_rgb_gamma_table *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h
new file mode 100644
index 000000000000..ca7d54576471
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC2_HOST_H
+#define __IA_CSS_GC2_HOST_H
+
+#include "ia_css_gc2_types.h"
+#include "ia_css_gc2_param.h"
+#include "ia_css_gc2_table.host.h"
+
+extern const struct ia_css_cc_config default_yuv2rgb_cc_config;
+extern const struct ia_css_cc_config default_rgb2yuv_cc_config;
+
+void
+ia_css_yuv2rgb_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size);
+
+void
+ia_css_rgb2yuv_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size);
+
+void
+ia_css_r_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size);
+
+void
+ia_css_g_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size);
+
+void
+ia_css_b_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_yuv2rgb_dump(
+ const struct sh_css_isp_csc_params *yuv2rgb,
+ unsigned int level);
+
+void
+ia_css_rgb2yuv_dump(
+ const struct sh_css_isp_csc_params *rgb2yuv,
+ unsigned int level);
+
+void
+ia_css_rgb_gamma_table_debug_dtrace(
+ const struct ia_css_rgb_gamma_table *config,
+ unsigned int level);
+
+#define ia_css_yuv2rgb_debug_dtrace ia_css_cc_config_debug_dtrace
+#define ia_css_rgb2yuv_debug_dtrace ia_css_cc_config_debug_dtrace
+#define ia_css_r_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace
+#define ia_css_g_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace
+#define ia_css_b_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace
+
+#endif
+
+#endif /* __IA_CSS_GC2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h
new file mode 100644
index 000000000000..458c72a45eef
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC2_PARAM_H
+#define __IA_CSS_GC2_PARAM_H
+
+#include "type_support.h"
+/* Extend GC1 */
+#include "ia_css_gc2_types.h"
+#include "gc/gc_1.0/ia_css_gc_param.h"
+#include "csc/csc_1.0/ia_css_csc_param.h"
+
+#ifndef PIPE_GENERATION
+#if defined(IS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE
+#elif defined(IS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE
+#else
+#error "Undefined vamem version"
+#endif
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE 0
+#endif
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_rgb_gamma_vamem_params {
+ u16 gc[SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE];
+};
+
+#endif /* __IA_CSS_GC2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
new file mode 100644
index 000000000000..d2fe0052fb00
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
@@ -0,0 +1,131 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <string_support.h> /* memcpy */
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_gc2_table.host.h"
+
+struct ia_css_rgb_gamma_table default_r_gamma_table;
+struct ia_css_rgb_gamma_table default_g_gamma_table;
+struct ia_css_rgb_gamma_table default_b_gamma_table;
+
+/* Identical default gamma table for R, G, and B. */
+
+#if defined(HAS_VAMEM_VERSION_2)
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE] = {
+ 0, 72, 144, 216, 288, 360, 426, 486,
+ 541, 592, 641, 687, 730, 772, 812, 850,
+ 887, 923, 958, 991, 1024, 1055, 1086, 1117,
+ 1146, 1175, 1203, 1230, 1257, 1284, 1310, 1335,
+ 1360, 1385, 1409, 1433, 1457, 1480, 1502, 1525,
+ 1547, 1569, 1590, 1612, 1632, 1653, 1674, 1694,
+ 1714, 1734, 1753, 1772, 1792, 1811, 1829, 1848,
+ 1866, 1884, 1902, 1920, 1938, 1955, 1973, 1990,
+ 2007, 2024, 2040, 2057, 2074, 2090, 2106, 2122,
+ 2138, 2154, 2170, 2185, 2201, 2216, 2231, 2247,
+ 2262, 2277, 2291, 2306, 2321, 2335, 2350, 2364,
+ 2378, 2393, 2407, 2421, 2435, 2449, 2462, 2476,
+ 2490, 2503, 2517, 2530, 2543, 2557, 2570, 2583,
+ 2596, 2609, 2622, 2634, 2647, 2660, 2673, 2685,
+ 2698, 2710, 2722, 2735, 2747, 2759, 2771, 2783,
+ 2795, 2807, 2819, 2831, 2843, 2855, 2867, 2878,
+ 2890, 2901, 2913, 2924, 2936, 2947, 2958, 2970,
+ 2981, 2992, 3003, 3014, 3025, 3036, 3047, 3058,
+ 3069, 3080, 3091, 3102, 3112, 3123, 3134, 3144,
+ 3155, 3165, 3176, 3186, 3197, 3207, 3217, 3228,
+ 3238, 3248, 3258, 3268, 3279, 3289, 3299, 3309,
+ 3319, 3329, 3339, 3349, 3358, 3368, 3378, 3388,
+ 3398, 3407, 3417, 3427, 3436, 3446, 3455, 3465,
+ 3474, 3484, 3493, 3503, 3512, 3521, 3531, 3540,
+ 3549, 3559, 3568, 3577, 3586, 3595, 3605, 3614,
+ 3623, 3632, 3641, 3650, 3659, 3668, 3677, 3686,
+ 3694, 3703, 3712, 3721, 3730, 3739, 3747, 3756,
+ 3765, 3773, 3782, 3791, 3799, 3808, 3816, 3825,
+ 3833, 3842, 3850, 3859, 3867, 3876, 3884, 3893,
+ 3901, 3909, 3918, 3926, 3934, 3942, 3951, 3959,
+ 3967, 3975, 3984, 3992, 4000, 4008, 4016, 4024,
+ 4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088,
+ 4095
+};
+#elif defined(HAS_VAMEM_VERSION_1)
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE] = {
+ 0, 72, 144, 216, 288, 360, 426, 486,
+ 541, 592, 641, 687, 730, 772, 812, 850,
+ 887, 923, 958, 991, 1024, 1055, 1086, 1117,
+ 1146, 1175, 1203, 1230, 1257, 1284, 1310, 1335,
+ 1360, 1385, 1409, 1433, 1457, 1480, 1502, 1525,
+ 1547, 1569, 1590, 1612, 1632, 1653, 1674, 1694,
+ 1714, 1734, 1753, 1772, 1792, 1811, 1829, 1848,
+ 1866, 1884, 1902, 1920, 1938, 1955, 1973, 1990,
+ 2007, 2024, 2040, 2057, 2074, 2090, 2106, 2122,
+ 2138, 2154, 2170, 2185, 2201, 2216, 2231, 2247,
+ 2262, 2277, 2291, 2306, 2321, 2335, 2350, 2364,
+ 2378, 2393, 2407, 2421, 2435, 2449, 2462, 2476,
+ 2490, 2503, 2517, 2530, 2543, 2557, 2570, 2583,
+ 2596, 2609, 2622, 2634, 2647, 2660, 2673, 2685,
+ 2698, 2710, 2722, 2735, 2747, 2759, 2771, 2783,
+ 2795, 2807, 2819, 2831, 2843, 2855, 2867, 2878,
+ 2890, 2901, 2913, 2924, 2936, 2947, 2958, 2970,
+ 2981, 2992, 3003, 3014, 3025, 3036, 3047, 3058,
+ 3069, 3080, 3091, 3102, 3112, 3123, 3134, 3144,
+ 3155, 3165, 3176, 3186, 3197, 3207, 3217, 3228,
+ 3238, 3248, 3258, 3268, 3279, 3289, 3299, 3309,
+ 3319, 3329, 3339, 3349, 3358, 3368, 3378, 3388,
+ 3398, 3407, 3417, 3427, 3436, 3446, 3455, 3465,
+ 3474, 3484, 3493, 3503, 3512, 3521, 3531, 3540,
+ 3549, 3559, 3568, 3577, 3586, 3595, 3605, 3614,
+ 3623, 3632, 3641, 3650, 3659, 3668, 3677, 3686,
+ 3694, 3703, 3712, 3721, 3730, 3739, 3747, 3756,
+ 3765, 3773, 3782, 3791, 3799, 3808, 3816, 3825,
+ 3833, 3842, 3850, 3859, 3867, 3876, 3884, 3893,
+ 3901, 3909, 3918, 3926, 3934, 3942, 3951, 3959,
+ 3967, 3975, 3984, 3992, 4000, 4008, 4016, 4024,
+ 4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088
+};
+#else
+#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
+#endif
+
+void
+ia_css_config_rgb_gamma_tables(void)
+{
+#if defined(HAS_VAMEM_VERSION_2)
+ default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+ default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+ default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+ memcpy(default_r_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_g_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_b_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+#else
+ memcpy(default_r_gamma_table.data.vamem_1, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_g_gamma_table.data.vamem_1, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_b_gamma_table.data.vamem_1, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+ default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+ default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h
new file mode 100644
index 000000000000..8686e6e3586c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC2_TABLE_HOST_H
+#define __IA_CSS_GC2_TABLE_HOST_H
+
+#include "ia_css_gc2_types.h"
+
+extern struct ia_css_rgb_gamma_table default_r_gamma_table;
+extern struct ia_css_rgb_gamma_table default_g_gamma_table;
+extern struct ia_css_rgb_gamma_table default_b_gamma_table;
+
+void ia_css_config_rgb_gamma_tables(void);
+
+#endif /* __IA_CSS_GC2_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h
new file mode 100644
index 000000000000..30780394ed7f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC2_TYPES_H
+#define __IA_CSS_GC2_TYPES_H
+
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: needed for ia_css_vamem_type */
+
+/* @file
+* CSS-API header file for Gamma Correction parameters.
+*/
+
+/* sRGB Gamma table, used for sRGB Gamma Correction.
+ *
+ * ISP block: GC2 (sRGB Gamma Correction)
+ * (ISP1: GC1(YUV Gamma Correction) is used.)
+ * ISP2: GC2 is used.
+ */
+
+/* Number of elements in the sRGB gamma table. */
+#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2 8
+#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE BIT(IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2)
+
+/* Number of elements in the sRGB gamma table. */
+#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2 8
+#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE ((1U << IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2) + 1)
+
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
+ IA_CSS_VAMEM_TYPE_2(ISP2400) */
+union ia_css_rgb_gamma_data {
+ u16 vamem_1[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE];
+ /** RGB Gamma table on vamem type1. This table is not used,
+ because sRGB Gamma Correction is not implemented for ISP2300. */
+ u16 vamem_2[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE];
+ /** RGB Gamma table on vamem type2. u0.12, [0,4095] */
+};
+
+struct ia_css_rgb_gamma_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_rgb_gamma_data data;
+};
+
+#endif /* __IA_CSS_GC2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c
new file mode 100644
index 000000000000..643b7d9095e6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c
@@ -0,0 +1,41 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/* Release Version: irci_ecr-master_20150911_0724 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_hdr.host.h"
+
+void
+ia_css_hdr_init_config(
+ struct sh_css_isp_hdr_params *to,
+ const struct ia_css_hdr_config *from,
+ unsigned int size)
+{
+ int i;
+ (void)size;
+
+ for (i = 0; i < HDR_NUM_INPUT_FRAMES - 1; i++) {
+ to->irradiance.match_shift[i] = from->irradiance.match_shift[i];
+ to->irradiance.match_mul[i] = from->irradiance.match_mul[i];
+ to->irradiance.thr_low[i] = from->irradiance.thr_low[i];
+ to->irradiance.thr_high[i] = from->irradiance.thr_high[i];
+ to->irradiance.thr_coeff[i] = from->irradiance.thr_coeff[i];
+ to->irradiance.thr_shift[i] = from->irradiance.thr_shift[i];
+ }
+ to->irradiance.test_irr = from->irradiance.test_irr;
+ to->irradiance.weight_bpp = from->irradiance.weight_bpp;
+
+ to->deghost.test_deg = from->deghost.test_deg;
+ to->exclusion.test_excl = from->exclusion.test_excl;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h
new file mode 100644
index 000000000000..ecc8bea3542b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h
@@ -0,0 +1,31 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/* Release Version: irci_ecr-master_20150911_0724 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_HDR_HOST_H
+#define __IA_CSS_HDR_HOST_H
+
+#include "ia_css_hdr_param.h"
+#include "ia_css_hdr_types.h"
+
+extern const struct ia_css_hdr_config default_hdr_config;
+
+void
+ia_css_hdr_init_config(
+ struct sh_css_isp_hdr_params *to,
+ const struct ia_css_hdr_config *from,
+ unsigned int size);
+
+#endif /* __IA_CSS_HDR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h
new file mode 100644
index 000000000000..47651cdf94b7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h
@@ -0,0 +1,59 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/* Release Version: irci_ecr-master_20150911_0724 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_HDR_PARAMS_H
+#define __IA_CSS_HDR_PARAMS_H
+
+#include "type_support.h"
+
+#define HDR_NUM_INPUT_FRAMES (3)
+
+/* HDR irradiance map parameters on ISP. */
+struct sh_css_hdr_irradiance_params {
+ s32 test_irr;
+ s32 match_shift[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Histogram matching shift parameter */
+ s32 match_mul[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Histogram matching multiplication parameter */
+ s32 thr_low[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Weight map soft threshold low bound parameter */
+ s32 thr_high[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Weight map soft threshold high bound parameter */
+ s32 thr_coeff[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Soft threshold linear function coefficient */
+ s32 thr_shift[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Soft threshold precision shift parameter */
+ s32 weight_bpp; /* Weight map bits per pixel */
+};
+
+/* HDR deghosting parameters on ISP */
+struct sh_css_hdr_deghost_params {
+ s32 test_deg;
+};
+
+/* HDR exclusion parameters on ISP */
+struct sh_css_hdr_exclusion_params {
+ s32 test_excl;
+};
+
+/* HDR ISP parameters */
+struct sh_css_isp_hdr_params {
+ struct sh_css_hdr_irradiance_params irradiance;
+ struct sh_css_hdr_deghost_params deghost;
+ struct sh_css_hdr_exclusion_params exclusion;
+};
+
+#endif /* __IA_CSS_HDR_PARAMS_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
new file mode 100644
index 000000000000..7c2f8f213bef
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
@@ -0,0 +1,70 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/* Release Version: irci_ecr-master_20150911_0724 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_HDR_TYPES_H
+#define __IA_CSS_HDR_TYPES_H
+
+#define IA_CSS_HDR_MAX_NUM_INPUT_FRAMES (3)
+
+/**
+ * \brief HDR Irradiance Parameters
+ * \detail Currently HDR parameters are used only for testing purposes
+ */
+struct ia_css_hdr_irradiance_params {
+ int test_irr; /** Test parameter */
+ int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Histogram matching shift parameter */
+ int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Histogram matching multiplication parameter */
+ int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Weight map soft threshold low bound parameter */
+ int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Weight map soft threshold high bound parameter */
+ int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Soft threshold linear function coefficien */
+ int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Soft threshold precision shift parameter */
+ int weight_bpp; /** Weight map bits per pixel */
+};
+
+/**
+ * \brief HDR Deghosting Parameters
+ * \detail Currently HDR parameters are used only for testing purposes
+ */
+struct ia_css_hdr_deghost_params {
+ int test_deg; /** Test parameter */
+};
+
+/**
+ * \brief HDR Exclusion Parameters
+ * \detail Currently HDR parameters are used only for testing purposes
+ */
+struct ia_css_hdr_exclusion_params {
+ int test_excl; /** Test parameter */
+};
+
+/**
+ * \brief HDR public paramterers.
+ * \details Struct with all parameters for HDR that can be seet from
+ * the CSS API. Currenly, only test parameters are defined.
+ */
+struct ia_css_hdr_config {
+ struct ia_css_hdr_irradiance_params irradiance; /** HDR irradiance parameters */
+ struct ia_css_hdr_deghost_params deghost; /** HDR deghosting parameters */
+ struct ia_css_hdr_exclusion_params exclusion; /** HDR exclusion parameters */
+};
+
+#endif /* __IA_CSS_HDR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
new file mode 100644
index 000000000000..bf71a7f661e6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
@@ -0,0 +1,93 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_bayer_io.host.h"
+#include "dma.h"
+#include "math_support.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "ia_css_isp_params.h"
+#include "ia_css_frame.h"
+
+void
+ia_css_bayer_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args)
+{
+ const struct ia_css_frame *in_frame = args->in_frame;
+ const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
+ &args->out_frame;
+ const struct ia_css_frame_info *in_frame_info = (in_frame) ? &in_frame->info :
+ &binary->in_frame_info;
+
+ const unsigned int ddr_bits_per_element = sizeof(short) * 8;
+ const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS,
+ ddr_bits_per_element);
+ unsigned int size_get = 0, size_put = 0;
+ unsigned int offset = 0;
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_get = binary->info->mem_offsets.offsets.param->dmem.get.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.get.offset;
+ }
+
+ if (size_get) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_bayer_io_config() get part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, in_frame_info);
+ // The base_address of the input frame will be set in the ISP
+ to->width = in_frame_info->res.width;
+ to->height = in_frame_info->res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_bayer_io_config() get part leave:\n");
+#endif
+ }
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_put = binary->info->mem_offsets.offsets.param->dmem.put.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.put.offset;
+ }
+
+ if (size_put) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_bayer_io_config() put part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, &out_frames[0]->info);
+ to->base_address = out_frames[0]->data;
+ to->width = out_frames[0]->info.res.width;
+ to->height = out_frames[0]->info.res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_bayer_io_config() put part leave:\n");
+#endif
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h
new file mode 100644
index 000000000000..f9db75a089af
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __BAYER_IO_HOST_H
+#define __BAYER_IO_HOST_H
+
+#include "ia_css_bayer_io_param.h"
+#include "ia_css_bayer_io_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+
+void
+ia_css_bayer_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args);
+
+#endif /*__BAYER_IO_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h
new file mode 100644
index 000000000000..77cfed002e14
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BAYER_IO_PARAM
+#define __IA_CSS_BAYER_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif /* __IA_CSS_BAYER_IO_PARAM */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h
new file mode 100644
index 000000000000..59b58f30af11
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BAYER_IO_TYPES_H
+#define __IA_CSS_BAYER_IO_TYPES_H
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif /* __IA_CSS_BAYER_IO_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h
new file mode 100644
index 000000000000..22aedcc4470f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h
@@ -0,0 +1,20 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_COMMON_IO_PARAM
+#define __IA_CSS_COMMON_IO_PARAM
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif /* __IA_CSS_COMMON_IO_PARAM */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h
new file mode 100644
index 000000000000..e49bd95f77da
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h
@@ -0,0 +1,29 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_COMMON_IO_TYPES
+#define __IA_CSS_COMMON_IO_TYPES
+
+#define MAX_IO_DMA_CHANNELS 3
+
+struct ia_css_common_io_config {
+ unsigned int base_address;
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int ddr_elems_per_word;
+ unsigned int dma_channel[MAX_IO_DMA_CHANNELS];
+};
+
+#endif /* __IA_CSS_COMMON_IO_TYPES */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
new file mode 100644
index 000000000000..ba490c5fc18e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
@@ -0,0 +1,93 @@
+/*
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#include "ia_css_yuv444_io.host.h"
+#include "dma.h"
+#include "math_support.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "ia_css_isp_params.h"
+#include "ia_css_frame.h"
+
+void
+ia_css_yuv444_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args)
+{
+ const struct ia_css_frame *in_frame = args->in_frame;
+ const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
+ &args->out_frame;
+ const struct ia_css_frame_info *in_frame_info = (in_frame) ? &in_frame->info :
+ &binary->in_frame_info;
+
+ const unsigned int ddr_bits_per_element = sizeof(short) * 8;
+ const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS,
+ ddr_bits_per_element);
+ unsigned int size_get = 0, size_put = 0;
+ unsigned int offset = 0;
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_get = binary->info->mem_offsets.offsets.param->dmem.get.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.get.offset;
+ }
+
+ if (size_get) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_yuv444_io_config() get part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, in_frame_info);
+ // The base_address of the input frame will be set in the ISP
+ to->width = in_frame_info->res.width;
+ to->height = in_frame_info->res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_yuv444_io_config() get part leave:\n");
+#endif
+ }
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_put = binary->info->mem_offsets.offsets.param->dmem.put.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.put.offset;
+ }
+
+ if (size_put) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_yuv444_io_config() put part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, &out_frames[0]->info);
+ to->base_address = out_frames[0]->data;
+ to->width = out_frames[0]->info.res.width;
+ to->height = out_frames[0]->info.res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_yuv444_io_config() put part leave:\n");
+#endif
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h
new file mode 100644
index 000000000000..556e53e05607
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h
@@ -0,0 +1,28 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __YUV444_IO_HOST_H
+#define __YUV444_IO_HOST_H
+
+#include "ia_css_yuv444_io_param.h"
+#include "ia_css_yuv444_io_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+
+void
+ia_css_yuv444_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args);
+
+#endif /*__YUV44_IO_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h
new file mode 100644
index 000000000000..1cc2aff57ef3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h
@@ -0,0 +1,20 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_YUV444_IO_PARAM
+#define __IA_CSS_YUV444_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h
new file mode 100644
index 000000000000..990299a0d2c7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h
@@ -0,0 +1,20 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_YUV444_IO_TYPES
+#define __IA_CSS_YUV444_IO_TYPES
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
new file mode 100644
index 000000000000..49c1b3e3370d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
@@ -0,0 +1,80 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_iterator.host.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+#include "ia_css_err.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+static const struct ia_css_iterator_configuration default_config = {
+ .input_info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_iterator_config(
+ struct sh_css_isp_iterator_isp_config *to,
+ const struct ia_css_iterator_configuration *from,
+ unsigned int size)
+{
+ (void)size;
+ ia_css_frame_info_to_frame_sp_info(&to->input_info, from->input_info);
+ ia_css_frame_info_to_frame_sp_info(&to->internal_info, from->internal_info);
+ ia_css_frame_info_to_frame_sp_info(&to->output_info, from->output_info);
+ ia_css_frame_info_to_frame_sp_info(&to->vf_info, from->vf_info);
+ ia_css_resolution_to_sp_resolution(&to->dvs_envelope, from->dvs_envelope);
+}
+
+enum ia_css_err
+ia_css_iterator_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info) {
+ struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ struct ia_css_iterator_configuration config = default_config;
+
+ config.input_info = &binary->in_frame_info;
+ config.internal_info = &binary->internal_frame_info;
+ config.output_info = &binary->out_frame_info[0];
+ config.vf_info = &binary->vf_frame_info;
+ config.dvs_envelope = &binary->dvs_envelope;
+
+ /* Use in_info iso binary->in_frame_info.
+ * They can differ in padded width in case of scaling, e.g. for capture_pp.
+ * Find out why.
+ */
+ if (in_info)
+ config.input_info = in_info;
+ if (binary->out_frame_info[0].res.width == 0)
+ config.output_info = &binary->out_frame_info[1];
+ my_info = *config.output_info;
+ config.output_info = &my_info;
+ /* we do this only for preview pipe because in fill_binary_info function
+ * we assign vf_out res to out res, but for ISP internal processing, we need
+ * the original out res. for video pipe, it has two output pins --- out and
+ * vf_out, so it can keep these two resolutions already. */
+ if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
+ binary->vf_downscale_log2 > 0)
+ {
+ /* TODO: Remove this after preview output decimation is fixed
+ * by configuring out&vf info files properly */
+ my_info.padded_width <<= binary->vf_downscale_log2;
+ my_info.res.width <<= binary->vf_downscale_log2;
+ my_info.res.height <<= binary->vf_downscale_log2;
+ }
+
+ ia_css_configure_iterator(binary, &config);
+
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h
new file mode 100644
index 000000000000..c5e8d58e0fe1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ITERATOR_HOST_H
+#define __IA_CSS_ITERATOR_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+#include "ia_css_err.h"
+#include "ia_css_iterator_param.h"
+
+void
+ia_css_iterator_config(
+ struct sh_css_isp_iterator_isp_config *to,
+ const struct ia_css_iterator_configuration *from,
+ unsigned int size);
+
+enum ia_css_err
+ia_css_iterator_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info);
+
+#endif /* __IA_CSS_ITERATOR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h
new file mode 100644
index 000000000000..d308126e41d3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ITERATOR_PARAM_H
+#define __IA_CSS_ITERATOR_PARAM_H
+
+#include "ia_css_types.h" /* ia_css_resolution */
+#include "ia_css_frame_public.h" /* ia_css_frame_info */
+#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
+
+struct ia_css_iterator_configuration {
+ const struct ia_css_frame_info *input_info;
+ const struct ia_css_frame_info *internal_info;
+ const struct ia_css_frame_info *output_info;
+ const struct ia_css_frame_info *vf_info;
+ const struct ia_css_resolution *dvs_envelope;
+};
+
+struct sh_css_isp_iterator_isp_config {
+ struct ia_css_frame_sp_info input_info;
+ struct ia_css_frame_sp_info internal_info;
+ struct ia_css_frame_sp_info output_info;
+ struct ia_css_frame_sp_info vf_info;
+ struct ia_css_sp_resolution dvs_envelope;
+};
+
+#endif /* __IA_CSS_ITERATOR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
new file mode 100644
index 000000000000..7a6abe0c5b7d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
@@ -0,0 +1,74 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+
+#include "ia_css_macc1_5.host.h"
+
+const struct ia_css_macc1_5_config default_macc1_5_config = {
+ 1
+};
+
+void
+ia_css_macc1_5_encode(
+ struct sh_css_isp_macc1_5_params *to,
+ const struct ia_css_macc1_5_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->exp = from->exp;
+}
+
+void
+ia_css_macc1_5_vmem_encode(
+ struct sh_css_isp_macc1_5_vmem_params *params,
+ const struct ia_css_macc1_5_table *from,
+ unsigned int size)
+{
+ unsigned int i, j, k, idx;
+ unsigned int idx_map[] = {
+ 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8
+ };
+
+ (void)size;
+
+ for (k = 0; k < 4; k++)
+ for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) {
+ idx = idx_map[i] + (k * IA_CSS_MACC_NUM_AXES);
+ j = 4 * i;
+
+ params->data[0][(idx)] = from->data[j];
+ params->data[1][(idx)] = from->data[j + 1];
+ params->data[2][(idx)] = from->data[j + 2];
+ params->data[3][(idx)] = from->data[j + 3];
+ }
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_macc1_5_debug_dtrace(
+ const struct ia_css_macc1_5_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.exp=%d\n",
+ config->exp);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h
new file mode 100644
index 000000000000..ae9ede2b685a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC1_5_HOST_H
+#define __IA_CSS_MACC1_5_HOST_H
+
+#include "ia_css_macc1_5_param.h"
+#include "ia_css_macc1_5_table.host.h"
+
+extern const struct ia_css_macc1_5_config default_macc1_5_config;
+
+void
+ia_css_macc1_5_encode(
+ struct sh_css_isp_macc1_5_params *to,
+ const struct ia_css_macc1_5_config *from,
+ unsigned int size);
+
+void
+ia_css_macc1_5_vmem_encode(
+ struct sh_css_isp_macc1_5_vmem_params *params,
+ const struct ia_css_macc1_5_table *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_macc1_5_debug_dtrace(
+ const struct ia_css_macc1_5_config *config,
+ unsigned int level);
+#endif
+#endif /* __IA_CSS_MACC1_5_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h
new file mode 100644
index 000000000000..497ad89ab728
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC1_5_PARAM_H
+#define __IA_CSS_MACC1_5_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h"
+#include "ia_css_macc1_5_types.h"
+
+/* MACC */
+struct sh_css_isp_macc1_5_params {
+ s32 exp;
+};
+
+struct sh_css_isp_macc1_5_vmem_params {
+ VMEM_ARRAY(data, IA_CSS_MACC_NUM_COEFS *ISP_NWAY);
+};
+
+#endif /* __IA_CSS_MACC1_5_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c
new file mode 100644
index 000000000000..c094f3df10aa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+#include "ia_css_types.h"
+#include "ia_css_macc1_5_table.host.h"
+
+/* Multi-Axes Color Correction table for ISP2.
+ * 64values = 2x2matrix for 16area, [s1.12]
+ * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096}
+ */
+const struct ia_css_macc1_5_table default_macc1_5_table = {
+ {
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096
+ }
+};
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h
new file mode 100644
index 000000000000..10a50aa82be8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC1_5_TABLE_HOST_H
+#define __IA_CSS_MACC1_5_TABLE_HOST_H
+
+#include "macc/macc1_5/ia_css_macc1_5_types.h"
+
+extern const struct ia_css_macc1_5_table default_macc1_5_table;
+
+#endif /* __IA_CSS_MACC1_5_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
new file mode 100644
index 000000000000..9aa352cbcffc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
@@ -0,0 +1,73 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC1_5_TYPES_H
+#define __IA_CSS_MACC1_5_TYPES_H
+
+/* @file
+* CSS-API header file for Multi-Axis Color Conversion algorithm parameters.
+*/
+
+/* Multi-Axis Color Conversion configuration
+ *
+ * ISP2.6.1: MACC1_5 is used.
+ */
+
+/* Number of axes in the MACC table. */
+#define IA_CSS_MACC_NUM_AXES 16
+/* Number of coefficients per MACC axes. */
+#define IA_CSS_MACC_NUM_COEFS 4
+
+/* Multi-Axes Color Correction (MACC) table.
+ *
+ * ISP block: MACC (MACC by only matrix)
+ * MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
+ * ISP1: MACC is used.
+ * ISP2: MACC1_5 is used.
+ *
+ * [MACC]
+ * OutU = (data00 * InU + data01 * InV) >> 13
+ * OutV = (data10 * InU + data11 * InV) >> 13
+ *
+ * default/ineffective:
+ * OutU = (8192 * InU + 0 * InV) >> 13
+ * OutV = ( 0 * InU + 8192 * InV) >> 13
+ *
+ * [MACC1_5]
+ * OutU = (data00 * InU + data01 * InV) >> (13 - exp)
+ * OutV = (data10 * InU + data11 * InV) >> (13 - exp)
+ *
+ * default/ineffective: (exp=1)
+ * OutU = (4096 * InU + 0 * InV) >> (13 - 1)
+ * OutV = ( 0 * InU + 4096 * InV) >> (13 - 1)
+ */
+struct ia_css_macc1_5_table {
+ s16 data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
+ /** 16 of 2x2 matix
+ MACC1_5: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
+ default/ineffective: (s1.12)
+ 16 of "identity 2x2 matix" {4096,0,0,4096} */
+};
+
+/* Multi-Axes Color Correction (MACC) configuration.
+ *
+ * ISP block: MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
+ * ISP2: MACC1_5 is used.
+ */
+struct ia_css_macc1_5_config {
+ u8 exp; /** Common exponent of ia_css_macc_table.
+ u8.0, [0,13], default 1, ineffective 1 */
+};
+
+#endif /* __IA_CSS_MACC1_5_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c
new file mode 100644
index 000000000000..0b1d1bf5e8a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_macc.host.h"
+
+const struct ia_css_macc_config default_macc_config = {
+ 1,
+};
+
+void
+ia_css_macc_encode(
+ struct sh_css_isp_macc_params *to,
+ const struct ia_css_macc_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->exp = from->exp;
+}
+
+void
+ia_css_macc_dump(
+ const struct sh_css_isp_macc_params *macc,
+ unsigned int level);
+
+void
+ia_css_macc_debug_dtrace(
+ const struct ia_css_macc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.exp=%d\n",
+ config->exp);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h
new file mode 100644
index 000000000000..0e13e9cb0547
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC_HOST_H
+#define __IA_CSS_MACC_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_macc_param.h"
+#include "ia_css_macc_table.host.h"
+
+extern const struct ia_css_macc_config default_macc_config;
+
+void
+ia_css_macc_encode(
+ struct sh_css_isp_macc_params *to,
+ const struct ia_css_macc_config *from,
+ unsigned int size);
+
+void
+ia_css_macc_dump(
+ const struct sh_css_isp_macc_params *macc,
+ unsigned int level);
+
+void
+ia_css_macc_debug_dtrace(
+ const struct ia_css_macc_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_MACC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h
new file mode 100644
index 000000000000..3b4e440c3c30
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC_PARAM_H
+#define __IA_CSS_MACC_PARAM_H
+
+#include "type_support.h"
+
+/* MACC */
+struct sh_css_isp_macc_params {
+ s32 exp;
+};
+
+#endif /* __IA_CSS_MACC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
new file mode 100644
index 000000000000..f9a430da54b8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
@@ -0,0 +1,51 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+#include "ia_css_types.h"
+#include "ia_css_macc_table.host.h"
+
+/* Multi-Axes Color Correction table for ISP1.
+ * 64values = 2x2matrix for 16area, [s2.13]
+ * ineffective: 16 of "identity 2x2 matix" {8192,0,0,8192}
+ */
+const struct ia_css_macc_table default_macc_table = {
+ {
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192
+ }
+};
+
+/* Multi-Axes Color Correction table for ISP2.
+ * 64values = 2x2matrix for 16area, [s1.12]
+ * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096}
+ */
+const struct ia_css_macc_table default_macc2_table = {
+ {
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096
+ }
+};
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h
new file mode 100644
index 000000000000..96d62c9912b8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC_TABLE_HOST_H
+#define __IA_CSS_MACC_TABLE_HOST_H
+
+#include "ia_css_macc_types.h"
+
+extern const struct ia_css_macc_table default_macc_table;
+extern const struct ia_css_macc_table default_macc2_table;
+
+#endif /* __IA_CSS_MACC_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
new file mode 100644
index 000000000000..093302f08bca
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
@@ -0,0 +1,63 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC_TYPES_H
+#define __IA_CSS_MACC_TYPES_H
+
+/* @file
+* CSS-API header file for Multi-Axis Color Correction (MACC) parameters.
+*/
+
+/* Number of axes in the MACC table. */
+#define IA_CSS_MACC_NUM_AXES 16
+/* Number of coefficients per MACC axes. */
+#define IA_CSS_MACC_NUM_COEFS 4
+/* The number of planes in the morphing table. */
+
+/* Multi-Axis Color Correction (MACC) table.
+ *
+ * ISP block: MACC1 (MACC by only matrix)
+ * MACC2 (MACC by matrix and exponent(ia_css_macc_config))
+ * ISP1: MACC1 is used.
+ * ISP2: MACC2 is used.
+ *
+ * [MACC1]
+ * OutU = (data00 * InU + data01 * InV) >> 13
+ * OutV = (data10 * InU + data11 * InV) >> 13
+ *
+ * default/ineffective:
+ * OutU = (8192 * InU + 0 * InV) >> 13
+ * OutV = ( 0 * InU + 8192 * InV) >> 13
+ *
+ * [MACC2]
+ * OutU = (data00 * InU + data01 * InV) >> (13 - exp)
+ * OutV = (data10 * InU + data11 * InV) >> (13 - exp)
+ *
+ * default/ineffective: (exp=1)
+ * OutU = (4096 * InU + 0 * InV) >> (13 - 1)
+ * OutV = ( 0 * InU + 4096 * InV) >> (13 - 1)
+ */
+
+struct ia_css_macc_table {
+ s16 data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
+ /** 16 of 2x2 matix
+ MACC1: s2.13, [-65536,65535]
+ default/ineffective:
+ 16 of "identity 2x2 matix" {8192,0,0,8192}
+ MACC2: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
+ default/ineffective: (s1.12)
+ 16 of "identity 2x2 matix" {4096,0,0,4096} */
+};
+
+#endif /* __IA_CSS_MACC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c
new file mode 100644
index 000000000000..102dc6feb6d1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c
@@ -0,0 +1,15 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_norm.host.h"
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h
new file mode 100644
index 000000000000..42b5143ef78f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_NORM_HOST_H
+#define __IA_CSS_NORM_HOST_H
+
+#include "ia_css_norm_param.h"
+
+#endif /* __IA_CSS_NORM_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h
new file mode 100644
index 000000000000..d432e2e39df6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h
@@ -0,0 +1,18 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_NORM_PARAM_H
+#define __IA_CSS_NORM_PARAM_H
+
+#endif /* __IA_CSS_NORM_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c
new file mode 100644
index 000000000000..f7403ce16c99
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c
@@ -0,0 +1,76 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "sh_css_frac.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "isp.h"
+#include "ia_css_ob2.host.h"
+
+const struct ia_css_ob2_config default_ob2_config = {
+ 0,
+ 0,
+ 0,
+ 0
+};
+
+void
+ia_css_ob2_encode(
+ struct sh_css_isp_ob2_params *to,
+ const struct ia_css_ob2_config *from,
+ unsigned int size)
+{
+ (void)size;
+
+ /* Blacklevels types are u0_16 */
+ to->blacklevel_gr = uDIGIT_FITTING(from->level_gr, 16, SH_CSS_BAYER_BITS);
+ to->blacklevel_r = uDIGIT_FITTING(from->level_r, 16, SH_CSS_BAYER_BITS);
+ to->blacklevel_b = uDIGIT_FITTING(from->level_b, 16, SH_CSS_BAYER_BITS);
+ to->blacklevel_gb = uDIGIT_FITTING(from->level_gb, 16, SH_CSS_BAYER_BITS);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ob2_dump(
+ const struct sh_css_isp_ob2_params *ob2,
+ unsigned int level)
+{
+ if (!ob2)
+ return;
+
+ ia_css_debug_dtrace(level, "Optical Black 2:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_gr", ob2->blacklevel_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_r", ob2->blacklevel_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_b", ob2->blacklevel_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_gb", ob2->blacklevel_gb);
+}
+
+void
+ia_css_ob2_debug_dtrace(
+ const struct ia_css_ob2_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.level_gr=%d, config.level_r=%d, config.level_b=%d, config.level_gb=%d, ",
+ config->level_gr, config->level_r,
+ config->level_b, config->level_gb);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h
new file mode 100644
index 000000000000..936f6a08a174
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB2_HOST_H
+#define __IA_CSS_OB2_HOST_H
+
+#include "ia_css_ob2_types.h"
+#include "ia_css_ob2_param.h"
+
+extern const struct ia_css_ob2_config default_ob2_config;
+
+void
+ia_css_ob2_encode(
+ struct sh_css_isp_ob2_params *to,
+ const struct ia_css_ob2_config *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ob2_dump(
+ const struct sh_css_isp_ob2_params *ob2,
+ unsigned int level);
+
+void
+ia_css_ob2_debug_dtrace(
+ const struct ia_css_ob2_config *config, unsigned int level);
+#endif
+
+#endif /* __IA_CSS_OB2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h
new file mode 100644
index 000000000000..c728f8791ef4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB2_PARAM_H
+#define __IA_CSS_OB2_PARAM_H
+
+#include "type_support.h"
+
+/* OB2 (Optical Black) */
+struct sh_css_isp_ob2_params {
+ s32 blacklevel_gr;
+ s32 blacklevel_r;
+ s32 blacklevel_b;
+ s32 blacklevel_gb;
+};
+
+#endif /* __IA_CSS_OB2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h
new file mode 100644
index 000000000000..0ccc09f6eb0f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB2_TYPES_H
+#define __IA_CSS_OB2_TYPES_H
+
+/* @file
+* CSS-API header file for Optical Black algorithm parameters.
+*/
+
+/* Optical Black configuration
+ *
+ * ISP2.6.1: OB2 is used.
+ */
+
+#include "ia_css_frac.h"
+
+struct ia_css_ob2_config {
+ ia_css_u0_16 level_gr; /** Black level for GR pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_r; /** Black level for R pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_b; /** Black level for B pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_gb; /** Black level for GB pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_OB2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c
new file mode 100644
index 000000000000..6367d94275fb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c
@@ -0,0 +1,154 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "isp.h"
+
+#include "ia_css_ob.host.h"
+
+const struct ia_css_ob_config default_ob_config = {
+ IA_CSS_OB_MODE_NONE,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+};
+
+/* TODO: include ob.isp.h to get isp knowledge and
+ add assert on platform restrictions */
+
+void
+ia_css_ob_configure(
+ struct sh_css_isp_ob_stream_config *config,
+ unsigned int isp_pipe_version,
+ unsigned int raw_bit_depth)
+{
+ config->isp_pipe_version = isp_pipe_version;
+ config->raw_bit_depth = raw_bit_depth;
+}
+
+void
+ia_css_ob_encode(
+ struct sh_css_isp_ob_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned int size)
+{
+ unsigned int ob_bit_depth
+ = config->isp_pipe_version == 2 ? SH_CSS_BAYER_BITS : config->raw_bit_depth;
+ unsigned int scale = 16 - ob_bit_depth;
+
+ (void)size;
+ switch (from->mode) {
+ case IA_CSS_OB_MODE_FIXED:
+ to->blacklevel_gr = from->level_gr >> scale;
+ to->blacklevel_r = from->level_r >> scale;
+ to->blacklevel_b = from->level_b >> scale;
+ to->blacklevel_gb = from->level_gb >> scale;
+ to->area_start_bq = 0;
+ to->area_length_bq = 0;
+ to->area_length_bq_inverse = 0;
+ break;
+ case IA_CSS_OB_MODE_RASTER:
+ to->blacklevel_gr = 0;
+ to->blacklevel_r = 0;
+ to->blacklevel_b = 0;
+ to->blacklevel_gb = 0;
+ to->area_start_bq = from->start_position;
+ to->area_length_bq =
+ (from->end_position - from->start_position) + 1;
+ to->area_length_bq_inverse = AREA_LENGTH_UNIT / to->area_length_bq;
+ break;
+ default:
+ to->blacklevel_gr = 0;
+ to->blacklevel_r = 0;
+ to->blacklevel_b = 0;
+ to->blacklevel_gb = 0;
+ to->area_start_bq = 0;
+ to->area_length_bq = 0;
+ to->area_length_bq_inverse = 0;
+ break;
+ }
+}
+
+void
+ia_css_ob_vmem_encode(
+ struct sh_css_isp_ob_vmem_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned int size)
+{
+ struct sh_css_isp_ob_params tmp;
+ struct sh_css_isp_ob_params *ob = &tmp;
+
+ (void)size;
+ ia_css_ob_encode(&tmp, from, config, sizeof(tmp));
+
+ {
+ unsigned int i;
+ unsigned int sp_obarea_start_bq = ob->area_start_bq;
+ unsigned int sp_obarea_length_bq = ob->area_length_bq;
+ unsigned int low = sp_obarea_start_bq;
+ unsigned int high = low + sp_obarea_length_bq;
+ u16 all_ones = ~0;
+
+ for (i = 0; i < OBAREA_MASK_SIZE; i++) {
+ if (i >= low && i < high)
+ to->vmask[i / ISP_VEC_NELEMS][i % ISP_VEC_NELEMS] = all_ones;
+ else
+ to->vmask[i / ISP_VEC_NELEMS][i % ISP_VEC_NELEMS] = 0;
+ }
+ }
+}
+
+void
+ia_css_ob_dump(
+ const struct sh_css_isp_ob_params *ob,
+ unsigned int level)
+{
+ if (!ob) return;
+ ia_css_debug_dtrace(level, "Optical Black:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_gr", ob->blacklevel_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_r", ob->blacklevel_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_b", ob->blacklevel_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_gb", ob->blacklevel_gb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "obarea_start_bq", ob->area_start_bq);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "obarea_length_bq", ob->area_length_bq);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "obarea_length_bq_inverse",
+ ob->area_length_bq_inverse);
+}
+
+void
+ia_css_ob_debug_dtrace(
+ const struct ia_css_ob_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.mode=%d, config.level_gr=%d, config.level_r=%d, config.level_b=%d, config.level_gb=%d, config.start_position=%d, config.end_position=%d\n",
+ config->mode,
+ config->level_gr, config->level_r,
+ config->level_b, config->level_gb,
+ config->start_position, config->end_position);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h
new file mode 100644
index 000000000000..d767c5856880
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB_HOST_H
+#define __IA_CSS_OB_HOST_H
+
+#include "ia_css_ob_types.h"
+#include "ia_css_ob_param.h"
+
+extern const struct ia_css_ob_config default_ob_config;
+
+void
+ia_css_ob_configure(
+ struct sh_css_isp_ob_stream_config *config,
+ unsigned int isp_pipe_version,
+ unsigned int raw_bit_depth);
+
+void
+ia_css_ob_encode(
+ struct sh_css_isp_ob_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned int size);
+
+void
+ia_css_ob_vmem_encode(
+ struct sh_css_isp_ob_vmem_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned int size);
+
+void
+ia_css_ob_dump(
+ const struct sh_css_isp_ob_params *ob,
+ unsigned int level);
+
+void
+ia_css_ob_debug_dtrace(
+ const struct ia_css_ob_config *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_OB_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h
new file mode 100644
index 000000000000..f5c3e14a1a8a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB_PARAM_H
+#define __IA_CSS_OB_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h"
+
+#define OBAREA_MASK_SIZE 64
+#define OBAREA_LENGTHBQ_INVERSE_SHIFT 12
+
+/* AREA_LENGTH_UNIT is dependent on NWAY, requires rewrite */
+#define AREA_LENGTH_UNIT BIT(12)
+
+/* OB (Optical Black) */
+struct sh_css_isp_ob_stream_config {
+ unsigned int isp_pipe_version;
+ unsigned int raw_bit_depth;
+};
+
+struct sh_css_isp_ob_params {
+ s32 blacklevel_gr;
+ s32 blacklevel_r;
+ s32 blacklevel_b;
+ s32 blacklevel_gb;
+ s32 area_start_bq;
+ s32 area_length_bq;
+ s32 area_length_bq_inverse;
+};
+
+struct sh_css_isp_ob_vmem_params {
+ VMEM_ARRAY(vmask, OBAREA_MASK_SIZE);
+};
+
+#endif /* __IA_CSS_OB_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
new file mode 100644
index 000000000000..317b24e240d8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
@@ -0,0 +1,68 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB_TYPES_H
+#define __IA_CSS_OB_TYPES_H
+
+/* @file
+* CSS-API header file for Optical Black level parameters.
+*/
+
+#include "ia_css_frac.h"
+
+/* Optical black mode.
+ */
+enum ia_css_ob_mode {
+ IA_CSS_OB_MODE_NONE, /** OB has no effect. */
+ IA_CSS_OB_MODE_FIXED, /** Fixed OB */
+ IA_CSS_OB_MODE_RASTER /** Raster OB */
+};
+
+/* Optical Black level configuration.
+ *
+ * ISP block: OB1
+ * ISP1: OB1 is used.
+ * ISP2: OB1 is used.
+ */
+struct ia_css_ob_config {
+ enum ia_css_ob_mode mode; /** Mode (None / Fixed / Raster).
+ enum, [0,2],
+ default 1, ineffective 0 */
+ ia_css_u0_16 level_gr; /** Black level for GR pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_r; /** Black level for R pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_b; /** Black level for B pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_gb; /** Black level for GB pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ u16 start_position; /** Start position of OB area
+ (used for Raster Mode only).
+ u16.0, [0,63],
+ default/ineffective 0 */
+ u16 end_position; /** End position of OB area
+ (used for Raster Mode only).
+ u16.0, [0,63],
+ default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_OB_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c
new file mode 100644
index 000000000000..df4cb9c362a4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c
@@ -0,0 +1,163 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_frame.h"
+#include "ia_css_debug.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "ia_css_output.host.h"
+#include "isp.h"
+
+#include "assert_support.h"
+
+const struct ia_css_output_config default_output_config = {
+ 0,
+ 0
+};
+
+static const struct ia_css_output_configuration default_output_configuration = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+static const struct ia_css_output0_configuration default_output0_configuration
+ = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+static const struct ia_css_output1_configuration default_output1_configuration
+ = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_output_encode(
+ struct sh_css_isp_output_params *to,
+ const struct ia_css_output_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->enable_hflip = from->enable_hflip;
+ to->enable_vflip = from->enable_vflip;
+}
+
+void
+ia_css_output_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, from->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->height = from->info ? from->info->res.height : 0;
+ to->enable = from->info != NULL;
+ ia_css_frame_info_to_frame_sp_info(&to->info, from->info);
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert(elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_output0_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output0_configuration *from,
+ unsigned int size)
+{
+ ia_css_output_config(
+ to, (const struct ia_css_output_configuration *)from, size);
+}
+
+void
+ia_css_output1_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output1_configuration *from,
+ unsigned int size)
+{
+ ia_css_output_config(
+ to, (const struct ia_css_output_configuration *)from, size);
+}
+
+void
+ia_css_output_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ if (info) {
+ struct ia_css_output_configuration config =
+ default_output_configuration;
+
+ config.info = info;
+
+ ia_css_configure_output(binary, &config);
+ }
+}
+
+void
+ia_css_output0_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ if (info) {
+ struct ia_css_output0_configuration config =
+ default_output0_configuration;
+
+ config.info = info;
+
+ ia_css_configure_output0(binary, &config);
+ }
+}
+
+void
+ia_css_output1_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ if (info) {
+ struct ia_css_output1_configuration config =
+ default_output1_configuration;
+
+ config.info = info;
+
+ ia_css_configure_output1(binary, &config);
+ }
+}
+
+void
+ia_css_output_dump(
+ const struct sh_css_isp_output_params *output,
+ unsigned int level)
+{
+ if (!output) return;
+ ia_css_debug_dtrace(level, "Horizontal Output Flip:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "enable", output->enable_hflip);
+ ia_css_debug_dtrace(level, "Vertical Output Flip:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "enable", output->enable_vflip);
+}
+
+void
+ia_css_output_debug_dtrace(
+ const struct ia_css_output_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.enable_hflip=%d",
+ config->enable_hflip);
+ ia_css_debug_dtrace(level,
+ "config.enable_vflip=%d",
+ config->enable_vflip);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h
new file mode 100644
index 000000000000..3d8f61c225cf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h
@@ -0,0 +1,75 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OUTPUT_HOST_H
+#define __IA_CSS_OUTPUT_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+
+#include "ia_css_output_types.h"
+#include "ia_css_output_param.h"
+
+extern const struct ia_css_output_config default_output_config;
+
+void
+ia_css_output_encode(
+ struct sh_css_isp_output_params *to,
+ const struct ia_css_output_config *from,
+ unsigned int size);
+
+void
+ia_css_output_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output_configuration *from,
+ unsigned int size);
+
+void
+ia_css_output0_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output0_configuration *from,
+ unsigned int size);
+
+void
+ia_css_output1_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output1_configuration *from,
+ unsigned int size);
+
+void
+ia_css_output_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+ia_css_output0_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+ia_css_output1_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+ia_css_output_dump(
+ const struct sh_css_isp_output_params *output,
+ unsigned int level);
+
+void
+ia_css_output_debug_dtrace(
+ const struct ia_css_output_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_OUTPUT_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h
new file mode 100644
index 000000000000..3a63eee58cb6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OUTPUT_PARAM_H
+#define __IA_CSS_OUTPUT_PARAM_H
+
+#include <type_support.h>
+#include "dma.h"
+#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
+
+/* output frame */
+struct sh_css_isp_output_isp_config {
+ u32 width_a_over_b;
+ u32 height;
+ u32 enable;
+ struct ia_css_frame_sp_info info;
+ struct dma_port_config port_b;
+};
+
+struct sh_css_isp_output_params {
+ u8 enable_hflip;
+ u8 enable_vflip;
+};
+
+#endif /* __IA_CSS_OUTPUT_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h
new file mode 100644
index 000000000000..3248bc3fd6c3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OUTPUT_TYPES_H
+#define __IA_CSS_OUTPUT_TYPES_H
+
+/* @file
+* CSS-API header file for parameters of output frames.
+*/
+
+/* Output frame
+ *
+ * ISP block: output frame
+ */
+
+//#include "ia_css_frame_public.h"
+struct ia_css_frame_info;
+
+struct ia_css_output_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+struct ia_css_output0_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+struct ia_css_output1_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+struct ia_css_output_config {
+ u8 enable_hflip; /** enable horizontal output mirroring */
+ u8 enable_vflip; /** enable vertical output mirroring */
+};
+
+#endif /* __IA_CSS_OUTPUT_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
new file mode 100644
index 000000000000..3de108b56005
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
@@ -0,0 +1,61 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_frame.h"
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+#include "ia_css_qplane.host.h"
+
+static const struct ia_css_qplane_configuration default_config = {
+ .pipe = (struct sh_css_sp_pipeline *)NULL,
+};
+
+void
+ia_css_qplane_config(
+ struct sh_css_isp_qplane_isp_config *to,
+ const struct ia_css_qplane_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, from->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert(elems_a % to->port_b.elems == 0);
+
+ to->inout_port_config = from->pipe->inout_port_config;
+ to->format = from->info->format;
+}
+
+void
+ia_css_qplane_configure(
+ const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_qplane_configuration config = default_config;
+
+ config.pipe = pipe;
+ config.info = info;
+
+ ia_css_configure_qplane(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h
new file mode 100644
index 000000000000..ad6d7ca783e4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_QPLANE_HOST_H
+#define __IA_CSS_QPLANE_HOST_H
+
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+
+#if 0
+/* Cannot be included, since sh_css_internal.h is too generic
+ * e.g. for FW generation.
+*/
+#include "sh_css_internal.h" /* sh_css_sp_pipeline */
+#endif
+
+#include "ia_css_qplane_types.h"
+#include "ia_css_qplane_param.h"
+
+void
+ia_css_qplane_config(
+ struct sh_css_isp_qplane_isp_config *to,
+ const struct ia_css_qplane_configuration *from,
+ unsigned int size);
+
+void
+ia_css_qplane_configure(
+ const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+#endif /* __IA_CSS_QPLANE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h
new file mode 100644
index 000000000000..87898d2df2de
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_QPLANE_PARAM_H
+#define __IA_CSS_QPLANE_PARAM_H
+
+#include <type_support.h>
+#include "dma.h"
+
+/* qplane channel */
+struct sh_css_isp_qplane_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+ u32 inout_port_config;
+ u32 input_needs_raw_binning;
+ u32 format; /* enum ia_css_frame_format */
+};
+
+#endif /* __IA_CSS_QPLANE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
new file mode 100644
index 000000000000..b7ecd8f40c1c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_QPLANE_TYPES_H
+#define __IA_CSS_QPLANE_TYPES_H
+
+#include <ia_css_frame_public.h>
+#include "sh_css_internal.h"
+
+/* qplane frame
+ *
+ * ISP block: qplane frame
+ */
+
+struct ia_css_qplane_configuration {
+ const struct sh_css_sp_pipeline *pipe;
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_QPLANE_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
new file mode 100644
index 000000000000..1a85f20770c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
@@ -0,0 +1,135 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_frame.h"
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+#include "isp/modes/interface/isp_types.h"
+
+#include "ia_css_raw.host.h"
+
+static const struct ia_css_raw_configuration default_config = {
+ .pipe = (struct sh_css_sp_pipeline *)NULL,
+};
+
+static inline unsigned
+sh_css_elems_bytes_from_info(unsigned int raw_bit_depth)
+{
+ return CEIL_DIV(raw_bit_depth, 8);
+}
+
+/* MW: These areMIPI / ISYS properties, not camera function properties */
+static enum sh_stream_format
+css2isp_stream_format(enum atomisp_input_format from) {
+ switch (from)
+ {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ return sh_stream_format_yuv420_legacy;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ return sh_stream_format_yuv420;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ return sh_stream_format_yuv422;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ return sh_stream_format_rgb;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ return sh_stream_format_raw;
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ default:
+ return sh_stream_format_raw;
+ }
+}
+
+void
+ia_css_raw_config(
+ struct sh_css_isp_raw_isp_config *to,
+ const struct ia_css_raw_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+ const struct ia_css_frame_info *in_info = from->in_info;
+ const struct ia_css_frame_info *internal_info = from->internal_info;
+
+ (void)size;
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* 2401 input system uses input width width */
+ in_info = internal_info;
+#else
+ /*in some cases, in_info is NULL*/
+ if (in_info)
+ (void)internal_info;
+ else
+ in_info = internal_info;
+
+#endif
+ ia_css_dma_configure_from_info(&to->port_b, in_info);
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert((in_info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) ||
+ (elems_a % to->port_b.elems == 0));
+
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->inout_port_config = from->pipe->inout_port_config;
+ to->format = in_info->format;
+ to->required_bds_factor = from->pipe->required_bds_factor;
+ to->two_ppc = from->two_ppc;
+ to->stream_format = css2isp_stream_format(from->stream_format);
+ to->deinterleaved = from->deinterleaved;
+#if (defined(USE_INPUT_SYSTEM_VERSION_2401) || defined(CONFIG_CSI2_PLUS))
+ to->start_column = in_info->crop_info.start_column;
+ to->start_line = in_info->crop_info.start_line;
+ to->enable_left_padding = from->enable_left_padding;
+#endif
+}
+
+void
+ia_css_raw_configure(
+ const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *internal_info,
+ bool two_ppc,
+ bool deinterleaved)
+{
+ u8 enable_left_padding = (uint8_t)((binary->left_padding) ? 1 : 0);
+ struct ia_css_raw_configuration config = default_config;
+
+ config.pipe = pipe;
+ config.in_info = in_info;
+ config.internal_info = internal_info;
+ config.two_ppc = two_ppc;
+ config.stream_format = binary->input_format;
+ config.deinterleaved = deinterleaved;
+ config.enable_left_padding = enable_left_padding;
+
+ ia_css_configure_raw(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h
new file mode 100644
index 000000000000..36a4079aa24a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_RAW_HOST_H
+#define __IA_CSS_RAW_HOST_H
+
+#include "ia_css_binary.h"
+
+#include "ia_css_raw_types.h"
+#include "ia_css_raw_param.h"
+
+void
+ia_css_raw_config(
+ struct sh_css_isp_raw_isp_config *to,
+ const struct ia_css_raw_configuration *from,
+ unsigned int size);
+
+void
+ia_css_raw_configure(
+ const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *internal_info,
+ bool two_ppc,
+ bool deinterleaved);
+
+#endif /* __IA_CSS_RAW_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h
new file mode 100644
index 000000000000..a1a314272a77
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_RAW_PARAM_H
+#define __IA_CSS_RAW_PARAM_H
+
+#include "type_support.h"
+
+#include "dma.h"
+
+/* Raw channel */
+struct sh_css_isp_raw_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+ u32 inout_port_config;
+ u32 input_needs_raw_binning;
+ u32 format; /* enum ia_css_frame_format */
+ u32 required_bds_factor;
+ u32 two_ppc;
+ u32 stream_format; /* enum sh_stream_format */
+ u32 deinterleaved;
+ u32 start_column; /*left crop offset*/
+ u32 start_line; /*top crop offset*/
+ u8 enable_left_padding; /*need this for multiple binary case*/
+};
+
+#endif /* __IA_CSS_RAW_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
new file mode 100644
index 000000000000..7838f59a2986
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_RAW_TYPES_H
+#define __IA_CSS_RAW_TYPES_H
+
+#include <ia_css_frame_public.h>
+#include "sh_css_internal.h"
+
+/* Raw frame
+ *
+ * ISP block: Raw frame
+ */
+
+struct ia_css_raw_configuration {
+ const struct sh_css_sp_pipeline *pipe;
+ const struct ia_css_frame_info *in_info;
+ const struct ia_css_frame_info *internal_info;
+ bool two_ppc;
+ enum atomisp_input_format stream_format;
+ bool deinterleaved;
+ u8 enable_left_padding;
+};
+
+#endif /* __IA_CSS_RAW_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
new file mode 100644
index 000000000000..2045b974ec8a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#if !defined(HAS_NO_HMEM)
+
+#include "memory_access.h"
+#include "ia_css_types.h"
+#include "sh_css_internal.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_raa.host.h"
+
+void
+ia_css_raa_encode(
+ struct sh_css_isp_aa_params *to,
+ const struct ia_css_aa_config *from,
+ unsigned int size)
+{
+ (void)size;
+ (void)to;
+ (void)from;
+}
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h
new file mode 100644
index 000000000000..d4df1dc540a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_RAA_HOST_H
+#define __IA_CSS_RAA_HOST_H
+
+#include "aa/aa_2/ia_css_aa2_types.h"
+#include "aa/aa_2/ia_css_aa2_param.h"
+
+void
+ia_css_raa_encode(
+ struct sh_css_isp_aa_params *to,
+ const struct ia_css_aa_config *from,
+ unsigned int size);
+
+#endif /* __IA_CSS_RAA_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
new file mode 100644
index 000000000000..c3f43fd327d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
@@ -0,0 +1,76 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <assert_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_frame.h>
+#include <ia_css_binary.h>
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+#include "ia_css_ref.host.h"
+
+void
+ia_css_ref_config(
+ struct sh_css_isp_ref_isp_config *to,
+ const struct ia_css_ref_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS, i;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, &from->ref_frames[0]->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->dvs_frame_delay = from->dvs_frame_delay;
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) {
+ if (from->ref_frames[i]) {
+ to->ref_frame_addr_y[i] = from->ref_frames[i]->data +
+ from->ref_frames[i]->planes.yuv.y.offset;
+ to->ref_frame_addr_c[i] = from->ref_frames[i]->data +
+ from->ref_frames[i]->planes.yuv.u.offset;
+ } else {
+ to->ref_frame_addr_y[i] = 0;
+ to->ref_frame_addr_c[i] = 0;
+ }
+ }
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert(elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_ref_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame **ref_frames,
+ const uint32_t dvs_frame_delay)
+{
+ struct ia_css_ref_configuration config;
+ unsigned int i;
+
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++)
+ config.ref_frames[i] = ref_frames[i];
+ config.dvs_frame_delay = dvs_frame_delay;
+ ia_css_configure_ref(binary, &config);
+}
+
+void
+ia_css_init_ref_state(
+ struct sh_css_isp_ref_dmem_state *state,
+ unsigned int size)
+{
+ (void)size;
+ assert(MAX_NUM_VIDEO_DELAY_FRAMES >= 2);
+ state->ref_in_buf_idx = 0;
+ state->ref_out_buf_idx = 1;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h
new file mode 100644
index 000000000000..4f48a8cfc604
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_REF_HOST_H
+#define __IA_CSS_REF_HOST_H
+
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+
+#include "ia_css_ref_types.h"
+#include "ia_css_ref_param.h"
+#include "ia_css_ref_state.h"
+
+void
+ia_css_ref_config(
+ struct sh_css_isp_ref_isp_config *to,
+ const struct ia_css_ref_configuration *from,
+ unsigned int size);
+
+void
+ia_css_ref_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame **ref_frames,
+ const uint32_t dvs_frame_delay);
+
+void
+ia_css_init_ref_state(
+ struct sh_css_isp_ref_dmem_state *state,
+ unsigned int size);
+#endif /* __IA_CSS_REF_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
new file mode 100644
index 000000000000..0a0498c17fba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_REF_PARAM_H
+#define __IA_CSS_REF_PARAM_H
+
+#include <type_support.h>
+#include "sh_css_defs.h"
+#include "dma.h"
+
+/* Reference frame */
+struct ia_css_ref_configuration {
+ const struct ia_css_frame *ref_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+ u32 dvs_frame_delay;
+};
+
+struct sh_css_isp_ref_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+ hrt_vaddress ref_frame_addr_y[MAX_NUM_VIDEO_DELAY_FRAMES];
+ hrt_vaddress ref_frame_addr_c[MAX_NUM_VIDEO_DELAY_FRAMES];
+ u32 dvs_frame_delay;
+};
+
+#endif /* __IA_CSS_REF_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h
new file mode 100644
index 000000000000..1d30ccc2c638
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_REF_STATE_H
+#define __IA_CSS_REF_STATE_H
+
+#include "type_support.h"
+
+/* REF (temporal noise reduction) */
+struct sh_css_isp_ref_dmem_state {
+ s32 ref_in_buf_idx;
+ s32 ref_out_buf_idx;
+};
+
+#endif /* __IA_CSS_REF_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
new file mode 100644
index 000000000000..156d6cd8cf3a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_REF_TYPES_H
+#define __IA_CSS_REF_TYPES_H
+
+/* Reference frame
+ *
+ * ISP block: reference frame
+ */
+
+#include <ia_css_frame_public.h>
+
+#endif /* __IA_CSS_REF_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
new file mode 100644
index 000000000000..d093565d9eb8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
@@ -0,0 +1,386 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "sh_css_frac.h"
+#include "assert_support.h"
+
+#include "bh/bh_2/ia_css_bh.host.h"
+#include "ia_css_s3a.host.h"
+
+const struct ia_css_3a_config default_3a_config = {
+ 25559,
+ 32768,
+ 7209,
+ 65535,
+ 0,
+ 65535,
+ {-3344, -6104, -19143, 19143, 6104, 3344, 0},
+ {1027, 0, -9219, 16384, -9219, 1027, 0}
+};
+
+static unsigned int s3a_raw_bit_depth;
+
+void
+ia_css_s3a_configure(unsigned int raw_bit_depth)
+{
+ s3a_raw_bit_depth = raw_bit_depth;
+}
+
+static void
+ia_css_ae_encode(
+ struct sh_css_isp_ae_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* coefficients to calculate Y */
+ to->y_coef_r =
+ uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_g =
+ uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_b =
+ uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT);
+}
+
+static void
+ia_css_awb_encode(
+ struct sh_css_isp_awb_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* AWB level gate */
+ to->lg_high_raw =
+ uDIGIT_FITTING(from->awb_lg_high_raw, 16, s3a_raw_bit_depth);
+ to->lg_low =
+ uDIGIT_FITTING(from->awb_lg_low, 16, SH_CSS_BAYER_BITS);
+ to->lg_high =
+ uDIGIT_FITTING(from->awb_lg_high, 16, SH_CSS_BAYER_BITS);
+}
+
+static void
+ia_css_af_encode(
+ struct sh_css_isp_af_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ unsigned int i;
+ (void)size;
+
+ /* af fir coefficients */
+ for (i = 0; i < 7; ++i) {
+ to->fir1[i] =
+ sDIGIT_FITTING(from->af_fir1_coef[i], 15,
+ SH_CSS_AF_FIR_SHIFT);
+ to->fir2[i] =
+ sDIGIT_FITTING(from->af_fir2_coef[i], 15,
+ SH_CSS_AF_FIR_SHIFT);
+ }
+}
+
+void
+ia_css_s3a_encode(
+ struct sh_css_isp_s3a_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ (void)size;
+
+ ia_css_ae_encode(&to->ae, from, sizeof(to->ae));
+ ia_css_awb_encode(&to->awb, from, sizeof(to->awb));
+ ia_css_af_encode(&to->af, from, sizeof(to->af));
+}
+
+#if 0
+void
+ia_css_process_s3a(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ short dmem_offset = stage->binary->info->mem_offsets->dmem.s3a;
+
+ assert(params);
+
+ if (dmem_offset >= 0) {
+ ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
+ &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset],
+ &params->s3a_config);
+ ia_css_bh_encode((struct sh_css_isp_bh_params *)
+ &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset],
+ &params->s3a_config);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM0] =
+ true;
+ }
+
+ params->isp_params_changed = true;
+}
+#endif
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ae_dump(
+ const struct sh_css_isp_ae_params *ae,
+ unsigned int level)
+{
+ if (!ae) return;
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ae_y_coef_r", ae->y_coef_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ae_y_coef_g", ae->y_coef_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ae_y_coef_b", ae->y_coef_b);
+}
+
+void
+ia_css_awb_dump(
+ const struct sh_css_isp_awb_params *awb,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "awb_lg_high_raw", awb->lg_high_raw);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "awb_lg_low", awb->lg_low);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "awb_lg_high", awb->lg_high);
+}
+
+void
+ia_css_af_dump(
+ const struct sh_css_isp_af_params *af,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[0]", af->fir1[0]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[1]", af->fir1[1]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[2]", af->fir1[2]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[3]", af->fir1[3]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[4]", af->fir1[4]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[5]", af->fir1[5]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[6]", af->fir1[6]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[0]", af->fir2[0]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[1]", af->fir2[1]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[2]", af->fir2[2]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[3]", af->fir2[3]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[4]", af->fir2[4]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[5]", af->fir2[5]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[6]", af->fir2[6]);
+}
+
+void
+ia_css_s3a_dump(
+ const struct sh_css_isp_s3a_params *s3a,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level, "S3A Support:\n");
+ ia_css_ae_dump(&s3a->ae, level);
+ ia_css_awb_dump(&s3a->awb, level);
+ ia_css_af_dump(&s3a->af, level);
+}
+
+void
+ia_css_s3a_debug_dtrace(
+ const struct ia_css_3a_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.ae_y_coef_r=%d, config.ae_y_coef_g=%d, config.ae_y_coef_b=%d, config.awb_lg_high_raw=%d, config.awb_lg_low=%d, config.awb_lg_high=%d\n",
+ config->ae_y_coef_r, config->ae_y_coef_g,
+ config->ae_y_coef_b, config->awb_lg_high_raw,
+ config->awb_lg_low, config->awb_lg_high);
+}
+#endif
+
+void
+ia_css_s3a_hmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_bh_table *hmem_buf)
+{
+#if defined(HAS_NO_HMEM)
+ (void)host_stats;
+ (void)hmem_buf;
+#else
+ struct ia_css_3a_rgby_output *out_ptr;
+ int i;
+
+ /* pixel counts(BQ) for 3A area */
+ int count_for_3a;
+ int sum_r, diff;
+
+ assert(host_stats);
+ assert(host_stats->rgby_data);
+ assert(hmem_buf);
+
+ count_for_3a = host_stats->grid.width * host_stats->grid.height
+ * host_stats->grid.bqs_per_grid_cell
+ * host_stats->grid.bqs_per_grid_cell;
+
+ out_ptr = host_stats->rgby_data;
+
+ ia_css_bh_hmem_decode(out_ptr, hmem_buf);
+
+ /* Calculate sum of histogram of R,
+ which should not be less than count_for_3a */
+ sum_r = 0;
+ for (i = 0; i < HMEM_UNIT_SIZE; i++) {
+ sum_r += out_ptr[i].r;
+ }
+ if (sum_r < count_for_3a) {
+ /* histogram is invalid */
+ return;
+ }
+
+ /* Verify for sum of histogram of R/G/B/Y */
+#if 0
+ {
+ int sum_g = 0;
+ int sum_b = 0;
+ int sum_y = 0;
+
+ for (i = 0; i < HMEM_UNIT_SIZE; i++) {
+ sum_g += out_ptr[i].g;
+ sum_b += out_ptr[i].b;
+ sum_y += out_ptr[i].y;
+ }
+ if (sum_g != sum_r || sum_b != sum_r || sum_y != sum_r) {
+ /* histogram is invalid */
+ return;
+ }
+ }
+#endif
+
+ /*
+ * Limit the histogram area only to 3A area.
+ * In DSP, the histogram of 0 is incremented for pixels
+ * which are outside of 3A area. That amount should be subtracted here.
+ * hist[0] = hist[0] - ((sum of all hist[]) - (pixel count for 3A area))
+ */
+ diff = sum_r - count_for_3a;
+ out_ptr[0].r -= diff;
+ out_ptr[0].g -= diff;
+ out_ptr[0].b -= diff;
+ out_ptr[0].y -= diff;
+#endif
+}
+
+void
+ia_css_s3a_dmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_3a_output *isp_stats)
+{
+ int isp_width, host_width, height, i;
+ struct ia_css_3a_output *host_ptr;
+
+ assert(host_stats);
+ assert(host_stats->data);
+ assert(isp_stats);
+
+ isp_width = host_stats->grid.aligned_width;
+ host_width = host_stats->grid.width;
+ height = host_stats->grid.height;
+ host_ptr = host_stats->data;
+
+ /* Getting 3A statistics from DMEM does not involve any
+ * transformation (like the VMEM version), we just copy the data
+ * using a different output width. */
+ for (i = 0; i < height; i++) {
+ memcpy(host_ptr, isp_stats, host_width * sizeof(*host_ptr));
+ isp_stats += isp_width;
+ host_ptr += host_width;
+ }
+}
+
+/* MW: this is an ISP function */
+static inline int
+merge_hi_lo_14(unsigned short hi, unsigned short lo)
+{
+ int val = (int)((((unsigned int)hi << 14) & 0xfffc000) |
+ ((unsigned int)lo & 0x3fff));
+ return val;
+}
+
+void
+ia_css_s3a_vmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const u16 *isp_stats_hi,
+ const uint16_t *isp_stats_lo)
+{
+ int out_width, out_height, chunk, rest, kmax, y, x, k, elm_start, elm, ofs;
+ const u16 *hi, *lo;
+ struct ia_css_3a_output *output;
+
+ assert(host_stats);
+ assert(host_stats->data);
+ assert(isp_stats_hi);
+ assert(isp_stats_lo);
+
+ output = host_stats->data;
+ out_width = host_stats->grid.width;
+ out_height = host_stats->grid.height;
+ hi = isp_stats_hi;
+ lo = isp_stats_lo;
+
+ chunk = ISP_VEC_NELEMS >> host_stats->grid.deci_factor_log2;
+ chunk = max(chunk, 1);
+
+ for (y = 0; y < out_height; y++) {
+ elm_start = y * ISP_S3ATBL_HI_LO_STRIDE;
+ rest = out_width;
+ x = 0;
+ while (x < out_width) {
+ kmax = (rest > chunk) ? chunk : rest;
+ ofs = y * out_width + x;
+ elm = elm_start + x * sizeof(*output) / sizeof(int32_t);
+ for (k = 0; k < kmax; k++, elm++) {
+ output[ofs + k].ae_y = merge_hi_lo_14(
+ hi[elm + chunk * 0], lo[elm + chunk * 0]);
+ output[ofs + k].awb_cnt = merge_hi_lo_14(
+ hi[elm + chunk * 1], lo[elm + chunk * 1]);
+ output[ofs + k].awb_gr = merge_hi_lo_14(
+ hi[elm + chunk * 2], lo[elm + chunk * 2]);
+ output[ofs + k].awb_r = merge_hi_lo_14(
+ hi[elm + chunk * 3], lo[elm + chunk * 3]);
+ output[ofs + k].awb_b = merge_hi_lo_14(
+ hi[elm + chunk * 4], lo[elm + chunk * 4]);
+ output[ofs + k].awb_gb = merge_hi_lo_14(
+ hi[elm + chunk * 5], lo[elm + chunk * 5]);
+ output[ofs + k].af_hpf1 = merge_hi_lo_14(
+ hi[elm + chunk * 6], lo[elm + chunk * 6]);
+ output[ofs + k].af_hpf2 = merge_hi_lo_14(
+ hi[elm + chunk * 7], lo[elm + chunk * 7]);
+ }
+ x += chunk;
+ rest -= chunk;
+ }
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h
new file mode 100644
index 000000000000..13d19dab1f1d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h
@@ -0,0 +1,77 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_S3A_HOST_H
+#define __IA_CSS_S3A_HOST_H
+
+#include "ia_css_s3a_types.h"
+#include "ia_css_s3a_param.h"
+#include "bh/bh_2/ia_css_bh.host.h"
+
+extern const struct ia_css_3a_config default_3a_config;
+
+void
+ia_css_s3a_configure(
+ unsigned int raw_bit_depth);
+
+void
+ia_css_s3a_encode(
+ struct sh_css_isp_s3a_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ae_dump(
+ const struct sh_css_isp_ae_params *ae,
+ unsigned int level);
+
+void
+ia_css_awb_dump(
+ const struct sh_css_isp_awb_params *awb,
+ unsigned int level);
+
+void
+ia_css_af_dump(
+ const struct sh_css_isp_af_params *af,
+ unsigned int level);
+
+void
+ia_css_s3a_dump(
+ const struct sh_css_isp_s3a_params *s3a,
+ unsigned int level);
+
+void
+ia_css_s3a_debug_dtrace(
+ const struct ia_css_3a_config *config,
+ unsigned int level);
+#endif
+
+void
+ia_css_s3a_hmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_bh_table *hmem_buf);
+
+void
+ia_css_s3a_dmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_3a_output *isp_stats);
+
+void
+ia_css_s3a_vmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const u16 *isp_stats_hi,
+ const uint16_t *isp_stats_lo);
+
+#endif /* __IA_CSS_S3A_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h
new file mode 100644
index 000000000000..041101767ff2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_S3A_PARAM_H
+#define __IA_CSS_S3A_PARAM_H
+
+#include "type_support.h"
+
+/* AE (3A Support) */
+struct sh_css_isp_ae_params {
+ /* coefficients to calculate Y */
+ s32 y_coef_r;
+ s32 y_coef_g;
+ s32 y_coef_b;
+};
+
+/* AWB (3A Support) */
+struct sh_css_isp_awb_params {
+ s32 lg_high_raw;
+ s32 lg_low;
+ s32 lg_high;
+};
+
+/* AF (3A Support) */
+struct sh_css_isp_af_params {
+ s32 fir1[7];
+ s32 fir2[7];
+};
+
+/* S3A (3A Support) */
+struct sh_css_isp_s3a_params {
+ /* coefficients to calculate Y */
+ struct sh_css_isp_ae_params ae;
+
+ /* AWB level gate */
+ struct sh_css_isp_awb_params awb;
+
+ /* af fir coefficients */
+ struct sh_css_isp_af_params af;
+};
+
+#endif /* __IA_CSS_S3A_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
new file mode 100644
index 000000000000..5a5b277ca0eb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
@@ -0,0 +1,221 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_S3A_TYPES_H
+#define __IA_CSS_S3A_TYPES_H
+
+/* @file
+* CSS-API header file for 3A statistics parameters.
+*/
+
+#include <ia_css_frac.h>
+
+#if (defined(SYSTEM_css_skycam_c0_system)) && (!defined(PIPE_GENERATION))
+#include "../../../../components/stats_3a/src/stats_3a_public.h"
+#endif
+
+/* 3A configuration. This configures the 3A statistics collection
+ * module.
+ */
+
+/* 3A statistics grid
+ *
+ * ISP block: S3A1 (3A Support for 3A ver.1 (Histogram is not used for AE))
+ * S3A2 (3A Support for 3A ver.2 (Histogram is used for AE))
+ * ISP1: S3A1 is used.
+ * ISP2: S3A2 is used.
+ */
+struct ia_css_3a_grid_info {
+#if defined(SYSTEM_css_skycam_c0_system)
+ u32 ae_enable; /** ae enabled in binary,
+ 0:disabled, 1:enabled */
+ struct ae_public_config_grid_config
+ ae_grd_info; /** see description in ae_public.h*/
+
+ u32 awb_enable; /** awb enabled in binary,
+ 0:disabled, 1:enabled */
+ struct awb_public_config_grid_config
+ awb_grd_info; /** see description in awb_public.h*/
+
+ u32 af_enable; /** af enabled in binary,
+ 0:disabled, 1:enabled */
+ struct af_public_grid_config af_grd_info; /** see description in af_public.h*/
+
+ u32 awb_fr_enable; /** awb_fr enabled in binary,
+ 0:disabled, 1:enabled */
+ struct awb_fr_public_grid_config
+ awb_fr_grd_info;/** see description in awb_fr_public.h*/
+
+ u32 elem_bit_depth; /** TODO:Taken from BYT - need input from AIQ
+ if needed for SKC
+ Bit depth of element used
+ to calculate 3A statistics.
+ This is 13, which is the normalized
+ bayer bit depth in DSP. */
+
+#else
+ u32 enable; /** 3A statistics enabled.
+ 0:disabled, 1:enabled */
+ u32 use_dmem; /** DMEM or VMEM determines layout.
+ 0:3A statistics are stored to VMEM,
+ 1:3A statistics are stored to DMEM */
+ u32 has_histogram; /** Statistics include histogram.
+ 0:no histogram, 1:has histogram */
+ u32 width; /** Width of 3A grid table.
+ (= Horizontal number of grid cells
+ in table, which cells have effective
+ statistics.) */
+ u32 height; /** Height of 3A grid table.
+ (= Vertical number of grid cells
+ in table, which cells have effective
+ statistics.) */
+ u32 aligned_width; /** Horizontal stride (for alloc).
+ (= Horizontal number of grid cells
+ in table, which means
+ the allocated width.) */
+ u32 aligned_height; /** Vertical stride (for alloc).
+ (= Vertical number of grid cells
+ in table, which means
+ the allocated height.) */
+ u32 bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
+ (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
+ Valid values are 8,16,32,64. */
+ u32 deci_factor_log2; /** log2 of bqs_per_grid_cell. */
+ u32 elem_bit_depth; /** Bit depth of element used
+ to calculate 3A statistics.
+ This is 13, which is the normalized
+ bayer bit depth in DSP. */
+#endif
+};
+
+/* This struct should be split into 3, for AE, AWB and AF.
+ * However, that will require driver/ 3A lib modifications.
+ */
+
+/* 3A configuration. This configures the 3A statistics collection
+ * module.
+ *
+ * ae_y_*: Coefficients to calculate luminance from bayer.
+ * awb_lg_*: Thresholds to check the saturated bayer pixels for AWB.
+ * Condition of effective pixel for AWB level gate check:
+ * bayer(sensor) <= awb_lg_high_raw &&
+ * bayer(when AWB statisitcs is calculated) >= awb_lg_low &&
+ * bayer(when AWB statisitcs is calculated) <= awb_lg_high
+ * af_fir*: Coefficients of high pass filter to calculate AF statistics.
+ *
+ * ISP block: S3A1(ae_y_* for AE/AF, awb_lg_* for AWB)
+ * S3A2(ae_y_* for AF, awb_lg_* for AWB)
+ * SDVS1(ae_y_*)
+ * SDVS2(ae_y_*)
+ * ISP1: S3A1 and SDVS1 are used.
+ * ISP2: S3A2 and SDVS2 are used.
+ */
+struct ia_css_3a_config {
+ ia_css_u0_16 ae_y_coef_r; /** Weight of R for Y.
+ u0.16, [0,65535],
+ default/ineffective 25559 */
+ ia_css_u0_16 ae_y_coef_g; /** Weight of G for Y.
+ u0.16, [0,65535],
+ default/ineffective 32768 */
+ ia_css_u0_16 ae_y_coef_b; /** Weight of B for Y.
+ u0.16, [0,65535],
+ default/ineffective 7209 */
+ ia_css_u0_16 awb_lg_high_raw; /** AWB level gate high for raw.
+ u0.16, [0,65535],
+ default 65472(=1023*64),
+ ineffective 65535 */
+ ia_css_u0_16 awb_lg_low; /** AWB level gate low.
+ u0.16, [0,65535],
+ default 64(=1*64),
+ ineffective 0 */
+ ia_css_u0_16 awb_lg_high; /** AWB level gate high.
+ u0.16, [0,65535],
+ default 65535,
+ ineffective 65535 */
+ ia_css_s0_15 af_fir1_coef[7]; /** AF FIR coefficients of fir1.
+ s0.15, [-32768,32767],
+ default/ineffective
+ -6689,-12207,-32768,32767,12207,6689,0 */
+ ia_css_s0_15 af_fir2_coef[7]; /** AF FIR coefficients of fir2.
+ s0.15, [-32768,32767],
+ default/ineffective
+ 2053,0,-18437,32767,-18437,2053,0 */
+};
+
+/* 3A statistics. This structure describes the data stored
+ * in each 3A grid point.
+ *
+ * ISP block: S3A1 (3A Support for 3A ver.1) (Histogram is not used for AE)
+ * S3A2 (3A Support for 3A ver.2) (Histogram is used for AE)
+ * - ae_y is used only for S3A1.
+ * - awb_* and af_* are used both for S3A1 and S3A2.
+ * ISP1: S3A1 is used.
+ * ISP2: S3A2 is used.
+ */
+struct ia_css_3a_output {
+ s32 ae_y; /** Sum of Y in a statistics window, for AE.
+ (u19.13) */
+ s32 awb_cnt; /** Number of effective pixels
+ in a statistics window.
+ Pixels passed by the AWB level gate check are
+ judged as "effective". (u32) */
+ s32 awb_gr; /** Sum of Gr in a statistics window, for AWB.
+ All Gr pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ s32 awb_r; /** Sum of R in a statistics window, for AWB.
+ All R pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ s32 awb_b; /** Sum of B in a statistics window, for AWB.
+ All B pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ s32 awb_gb; /** Sum of Gb in a statistics window, for AWB.
+ All Gb pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ s32 af_hpf1; /** Sum of |Y| following high pass filter af_fir1
+ within a statistics window, for AF. (u19.13) */
+ s32 af_hpf2; /** Sum of |Y| following high pass filter af_fir2
+ within a statistics window, for AF. (u19.13) */
+};
+
+/* 3A Statistics. This structure describes the statistics that are generated
+ * using the provided configuration (ia_css_3a_config).
+ */
+struct ia_css_3a_statistics {
+ struct ia_css_3a_grid_info
+ grid; /** grid info contains the dimensions of the 3A grid */
+ struct ia_css_3a_output
+ *data; /** the pointer to 3a_output[grid.width * grid.height]
+ containing the 3A statistics */
+ struct ia_css_3a_rgby_output *rgby_data;/** the pointer to 3a_rgby_output[256]
+ containing the histogram */
+};
+
+/* Histogram (Statistics for AE).
+ *
+ * 4 histograms(r,g,b,y),
+ * 256 bins for each histogram, unsigned 24bit value for each bin.
+ * struct ia_css_3a_rgby_output data[256];
+
+ * ISP block: HIST2
+ * (ISP1: HIST2 is not used.)
+ * ISP2: HIST2 is used.
+ */
+struct ia_css_3a_rgby_output {
+ u32 r; /** Number of R of one bin of the histogram R. (u24) */
+ u32 g; /** Number of G of one bin of the histogram G. (u24) */
+ u32 b; /** Number of B of one bin of the histogram B. (u24) */
+ u32 y; /** Number of Y of one bin of the histogram Y. (u24) */
+};
+
+#endif /* __IA_CSS_S3A_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
new file mode 100644
index 000000000000..43954ed6d106
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
@@ -0,0 +1,158 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+#include "ia_css_sc.host.h"
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+/* ISP2401 */
+static void
+ia_css_configure_sc(
+ const struct ia_css_binary *binary,
+ const struct ia_css_sc_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_sc() enter:\n");
+
+ {
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.sc.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.sc.offset;
+ }
+ if (size) {
+ ia_css_sc_config((struct sh_css_isp_sc_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_configure_sc() leave:\n");
+}
+
+void
+ia_css_sc_encode(
+ struct sh_css_isp_sc_params *to,
+ struct ia_css_shading_table **from,
+ unsigned int size)
+{
+ (void)size;
+ to->gain_shift = (*from)->fraction_bits;
+}
+
+void
+ia_css_sc_dump(
+ const struct sh_css_isp_sc_params *sc,
+ unsigned int level)
+{
+ if (!sc) return;
+ ia_css_debug_dtrace(level, "Shading Correction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "sc_gain_shift", sc->gain_shift);
+}
+
+/* ISP2401 */
+void
+ia_css_sc_config(
+ struct sh_css_isp_sc_isp_config *to,
+ const struct ia_css_sc_configuration *from,
+ unsigned int size)
+{
+ u32 internal_org_x_bqs = from->internal_frame_origin_x_bqs_on_sctbl;
+ u32 internal_org_y_bqs = from->internal_frame_origin_y_bqs_on_sctbl;
+ u32 slice, rest, i;
+
+ (void)size;
+
+ /* The internal_frame_origin_x_bqs_on_sctbl is separated to 8 times of slice_vec. */
+ rest = internal_org_x_bqs;
+ for (i = 0; i < SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES; i++) {
+ slice = min(rest, ((uint32_t)ISP_SLICE_NELEMS));
+ rest = rest - slice;
+ to->interped_gain_hor_slice_bqs[i] = slice;
+ }
+
+ to->internal_frame_origin_y_bqs_on_sctbl = internal_org_y_bqs;
+}
+
+/* ISP2401 */
+void
+ia_css_sc_configure(
+ const struct ia_css_binary *binary,
+ u32 internal_frame_origin_x_bqs_on_sctbl,
+ uint32_t internal_frame_origin_y_bqs_on_sctbl)
+{
+ const struct ia_css_sc_configuration config = {
+ internal_frame_origin_x_bqs_on_sctbl,
+ internal_frame_origin_y_bqs_on_sctbl
+ };
+
+ ia_css_configure_sc(binary, &config);
+}
+
+/* ------ deprecated(bz675) : from ------ */
+/* It looks like @parameter{} (in *.pipe) is used to generate the process/get/set functions,
+ for parameters which should be used in the isp kernels.
+ However, the ia_css_shading_settings structure has a parameter which is used only in the css,
+ and does not have a parameter which is used in the isp kernels.
+ Then, I did not use @parameter{} to generate the get/set function
+ for the ia_css_shading_settings structure. (michie) */
+void
+sh_css_get_shading_settings(const struct ia_css_isp_parameters *params,
+ struct ia_css_shading_settings *settings)
+{
+ if (!settings)
+ return;
+ assert(params);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_get_shading_settings() enter: settings=%p\n", settings);
+
+ *settings = params->shading_settings;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_get_shading_settings() leave: settings.enable_shading_table_conversion=%d\n",
+ settings->enable_shading_table_conversion);
+}
+
+void
+sh_css_set_shading_settings(struct ia_css_isp_parameters *params,
+ const struct ia_css_shading_settings *settings)
+{
+ if (!settings)
+ return;
+ assert(params);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_shading_settings() enter: settings.enable_shading_table_conversion=%d\n",
+ settings->enable_shading_table_conversion);
+
+ params->shading_settings = *settings;
+ params->shading_settings_changed = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_shading_settings() leave: return_void\n");
+}
+
+/* ------ deprecated(bz675) : to ------ */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
new file mode 100644
index 000000000000..efbe40b399dd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
@@ -0,0 +1,77 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SC_HOST_H
+#define __IA_CSS_SC_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_sc_types.h"
+#include "ia_css_sc_param.h"
+
+void
+ia_css_sc_encode(
+ struct sh_css_isp_sc_params *to,
+ struct ia_css_shading_table **from,
+ unsigned int size);
+
+void
+ia_css_sc_dump(
+ const struct sh_css_isp_sc_params *sc,
+ unsigned int level);
+
+/* @brief Configure the shading correction.
+ * @param[out] to Parameters used in the shading correction kernel in the isp.
+ * @param[in] from Parameters passed from the host.
+ * @param[in] size Size of the sh_css_isp_sc_isp_config structure.
+ *
+ * This function passes the parameters for the shading correction from the host to the isp.
+ */
+/* ISP2401 */
+void
+ia_css_sc_config(
+ struct sh_css_isp_sc_isp_config *to,
+ const struct ia_css_sc_configuration *from,
+ unsigned int size);
+
+/* @brief Configure the shading correction.
+ * @param[in] binary The binary, which has the shading correction.
+ * @param[in] internal_frame_origin_x_bqs_on_sctbl
+ * X coordinate (in bqs) of the origin of the internal frame on the shading table.
+ * @param[in] internal_frame_origin_y_bqs_on_sctbl
+ * Y coordinate (in bqs) of the origin of the internal frame on the shading table.
+ *
+ * This function calls the ia_css_configure_sc() function.
+ * (The ia_css_configure_sc() function is automatically generated in ia_css_isp.configs.c.)
+ * The ia_css_configure_sc() function calls the ia_css_sc_config() function
+ * to pass the parameters for the shading correction from the host to the isp.
+ */
+/* ISP2401 */
+void
+ia_css_sc_configure(
+ const struct ia_css_binary *binary,
+ u32 internal_frame_origin_x_bqs_on_sctbl,
+ uint32_t internal_frame_origin_y_bqs_on_sctbl);
+
+/* ------ deprecated(bz675) : from ------ */
+void
+sh_css_get_shading_settings(const struct ia_css_isp_parameters *params,
+ struct ia_css_shading_settings *settings);
+
+void
+sh_css_set_shading_settings(struct ia_css_isp_parameters *params,
+ const struct ia_css_shading_settings *settings);
+/* ------ deprecated(bz675) : to ------ */
+
+#endif /* __IA_CSS_SC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h
new file mode 100644
index 000000000000..38a625821987
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SC_PARAM_H
+#define __IA_CSS_SC_PARAM_H
+
+#include "type_support.h"
+
+/* SC (Shading Corrction) */
+struct sh_css_isp_sc_params {
+ s32 gain_shift;
+};
+
+/* Number of horizontal slice times for interpolated gain:
+ *
+ * The start position of the internal frame does not match the start position of the shading table.
+ * To get a vector of shading gains (interpolated horizontally and vertically)
+ * which matches a vector on the internal frame,
+ * vec_slice is used for 2 adjacent vectors of shading gains.
+ * The number of shift times by vec_slice is 8.
+ * Max grid cell bqs to support the shading table centerting: N = 32
+ * CEIL_DIV(N-1, ISP_SLICE_NELEMS) = CEIL_DIV(31, 4) = 8
+ */
+#define SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES 8
+
+struct sh_css_isp_sc_isp_config {
+ u32 interped_gain_hor_slice_bqs[SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES];
+ u32 internal_frame_origin_y_bqs_on_sctbl;
+};
+
+#endif /* __IA_CSS_SC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
new file mode 100644
index 000000000000..41f3ee7158ff
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
@@ -0,0 +1,134 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SC_TYPES_H
+#define __IA_CSS_SC_TYPES_H
+
+/* @file
+* CSS-API header file for Lens Shading Correction (SC) parameters.
+*/
+
+/* Number of color planes in the shading table. */
+#define IA_CSS_SC_NUM_COLORS 4
+
+/* The 4 colors that a shading table consists of.
+ * For each color we store a grid of values.
+ */
+enum ia_css_sc_color {
+ IA_CSS_SC_COLOR_GR, /** Green on a green-red line */
+ IA_CSS_SC_COLOR_R, /** Red */
+ IA_CSS_SC_COLOR_B, /** Blue */
+ IA_CSS_SC_COLOR_GB /** Green on a green-blue line */
+};
+
+/* Lens Shading Correction table.
+ *
+ * This describes the color shading artefacts
+ * introduced by lens imperfections. To correct artefacts,
+ * bayer values should be multiplied by gains in this table.
+ *
+ *------------ deprecated(bz675) : from ---------------------------
+ * When shading_settings.enable_shading_table_conversion is set as 0,
+ * this shading table is directly sent to the isp. This table should contain
+ * the data based on the ia_css_shading_info information filled in the css.
+ * So, the driver needs to get the ia_css_shading_info information
+ * from the css, prior to generating the shading table.
+ *
+ * When shading_settings.enable_shading_table_conversion is set as 1,
+ * this shading table is converted in the legacy way in the css
+ * before it is sent to the isp.
+ * The driver does not need to get the ia_css_shading_info information.
+ *
+ * NOTE:
+ * The shading table conversion will be removed from the css in the near future,
+ * because it does not support the bayer scaling by sensor.
+ * Also, we had better generate the shading table only in one place(AIC).
+ * At the moment, to support the old driver which assumes the conversion is done in the css,
+ * shading_settings.enable_shading_table_conversion is set as 1 by default.
+ *------------ deprecated(bz675) : to ---------------------------
+ *
+ * ISP block: SC1
+ * ISP1: SC1 is used.
+ * ISP2: SC1 is used.
+ */
+struct ia_css_shading_table {
+ u32 enable; /** Set to false for no shading correction.
+ The data field can be NULL when enable == true */
+ /* ------ deprecated(bz675) : from ------ */
+ u32 sensor_width; /** Native sensor width in pixels. */
+ u32 sensor_height; /** Native sensor height in lines.
+ When shading_settings.enable_shading_table_conversion is set
+ as 0, sensor_width and sensor_height are NOT used.
+ These are used only in the legacy shading table conversion
+ in the css, when shading_settings.
+ enable_shading_table_conversion is set as 1. */
+ /* ------ deprecated(bz675) : to ------ */
+ u32 width; /** Number of data points per line per color.
+ u8.0, [0,81] */
+ u32 height; /** Number of lines of data points per color.
+ u8.0, [0,61] */
+ u32 fraction_bits; /** Bits of fractional part in the data
+ points.
+ u8.0, [0,13] */
+ u16 *data[IA_CSS_SC_NUM_COLORS];
+ /** Table data, one array for each color.
+ Use ia_css_sc_color to index this array.
+ u[13-fraction_bits].[fraction_bits], [0,8191] */
+};
+
+/* ------ deprecated(bz675) : from ------ */
+/* Shading Correction settings.
+ *
+ * NOTE:
+ * This structure should be removed when the shading table conversion is
+ * removed from the css.
+ */
+struct ia_css_shading_settings {
+ u32 enable_shading_table_conversion; /** Set to 0,
+ if the conversion of the shading table should be disabled
+ in the css. (default 1)
+ 0: The shading table is directly sent to the isp.
+ The shading table should contain the data based on the
+ ia_css_shading_info information filled in the css.
+ 1: The shading table is converted in the css, to be fitted
+ to the shading table definition required in the isp.
+ NOTE:
+ Previously, the shading table was always converted in the css
+ before it was sent to the isp, and this config was not defined.
+ Currently, the driver is supposed to pass the shading table
+ which should be directly sent to the isp.
+ However, some drivers may still pass the shading table which
+ needs the conversion without setting this config as 1.
+ To support such an unexpected case for the time being,
+ enable_shading_table_conversion is set as 1 by default
+ in the css. */
+};
+
+/* ------ deprecated(bz675) : to ------ */
+
+/* Shading Correction configuration.
+ *
+ * NOTE: The shading table size is larger than or equal to the internal frame size.
+ */
+/* ISP2401 */
+struct ia_css_sc_configuration {
+ u32 internal_frame_origin_x_bqs_on_sctbl; /** Origin X (in bqs) of internal frame on shading table. */
+ u32 internal_frame_origin_y_bqs_on_sctbl; /** Origin Y (in bqs) of internal frame on shading table. */
+ /** NOTE: bqs = size in BQ(Bayer Quad) unit.
+ 1BQ means {Gr,R,B,Gb}(2x2 pixels).
+ Horizontal 1 bqs corresponds to horizontal 2 pixels.
+ Vertical 1 bqs corresponds to vertical 2 pixels. */
+};
+
+#endif /* __IA_CSS_SC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h
new file mode 100644
index 000000000000..c03936fb0550
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h
@@ -0,0 +1,101 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_SDIS_COMMON_HOST_H
+#define _IA_CSS_SDIS_COMMON_HOST_H
+
+#define ISP_MAX_SDIS_HOR_PROJ_NUM_ISP \
+ __ISP_SDIS_HOR_PROJ_NUM_ISP(ISP_MAX_INTERNAL_WIDTH, ISP_MAX_INTERNAL_HEIGHT, \
+ SH_CSS_DIS_DECI_FACTOR_LOG2, ISP_PIPE_VERSION)
+#define ISP_MAX_SDIS_VER_PROJ_NUM_ISP \
+ __ISP_SDIS_VER_PROJ_NUM_ISP(ISP_MAX_INTERNAL_WIDTH, \
+ SH_CSS_DIS_DECI_FACTOR_LOG2)
+
+#define _ISP_SDIS_HOR_COEF_NUM_VECS \
+ __ISP_SDIS_HOR_COEF_NUM_VECS(ISP_INTERNAL_WIDTH)
+#define ISP_MAX_SDIS_HOR_COEF_NUM_VECS \
+ __ISP_SDIS_HOR_COEF_NUM_VECS(ISP_MAX_INTERNAL_WIDTH)
+#define ISP_MAX_SDIS_VER_COEF_NUM_VECS \
+ __ISP_SDIS_VER_COEF_NUM_VECS(ISP_MAX_INTERNAL_HEIGHT)
+
+/* SDIS Coefficients: */
+/* The ISP uses vectors to store the coefficients, so we round
+ the number of coefficients up to vectors. */
+#define __ISP_SDIS_HOR_COEF_NUM_VECS(in_width) _ISP_VECS(_ISP_BQS(in_width))
+#define __ISP_SDIS_VER_COEF_NUM_VECS(in_height) _ISP_VECS(_ISP_BQS(in_height))
+
+/* SDIS Projections:
+ * SDIS1: Horizontal projections are calculated for each line.
+ * Vertical projections are calculated for each column.
+ * SDIS2: Projections are calculated for each grid cell.
+ * Grid cells that do not fall completely within the image are not
+ * valid. The host needs to use the bigger one for the stride but
+ * should only return the valid ones to the 3A. */
+#define __ISP_SDIS_HOR_PROJ_NUM_ISP(in_width, in_height, deci_factor_log2, \
+ isp_pipe_version) \
+ ((isp_pipe_version == 1) ? \
+ CEIL_SHIFT(_ISP_BQS(in_height), deci_factor_log2) : \
+ CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2))
+
+#define __ISP_SDIS_VER_PROJ_NUM_ISP(in_width, deci_factor_log2) \
+ CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2)
+
+#define SH_CSS_DIS_VER_NUM_COEF_TYPES(b) \
+ (((b)->info->sp.pipeline.isp_pipe_version == 2) ? \
+ IA_CSS_DVS2_NUM_COEF_TYPES : \
+ IA_CSS_DVS_NUM_COEF_TYPES)
+
+#ifndef PIPE_GENERATION
+#if defined(__ISP) || defined(MK_FIRMWARE)
+
+/* Array cannot be 2-dimensional, since driver ddr allocation does not know stride */
+struct sh_css_isp_sdis_hori_proj_tbl {
+ s32 tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_HOR_PROJ_NUM_ISP];
+#if DVS2_PROJ_MARGIN > 0
+ s32 margin[DVS2_PROJ_MARGIN];
+#endif
+};
+
+struct sh_css_isp_sdis_vert_proj_tbl {
+ s32 tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_VER_PROJ_NUM_ISP];
+#if DVS2_PROJ_MARGIN > 0
+ s32 margin[DVS2_PROJ_MARGIN];
+#endif
+};
+
+struct sh_css_isp_sdis_hori_coef_tbl {
+ VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES],
+ ISP_MAX_SDIS_HOR_COEF_NUM_VECS *ISP_NWAY);
+};
+
+struct sh_css_isp_sdis_vert_coef_tbl {
+ VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES],
+ ISP_MAX_SDIS_VER_COEF_NUM_VECS *ISP_NWAY);
+};
+
+#endif /* defined(__ISP) || defined (MK_FIRMWARE) */
+#endif /* PIPE_GENERATION */
+
+#ifndef PIPE_GENERATION
+struct s_sdis_config {
+ unsigned int horicoef_vectors;
+ unsigned int vertcoef_vectors;
+ unsigned int horiproj_num;
+ unsigned int vertproj_num;
+};
+
+extern struct s_sdis_config sdis_config;
+#endif
+
+#endif /* _IA_CSS_SDIS_COMMON_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h
new file mode 100644
index 000000000000..e257841bba67
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h
@@ -0,0 +1,220 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS_COMMON_TYPES_H
+#define __IA_CSS_SDIS_COMMON_TYPES_H
+
+/* @file
+* CSS-API header file for DVS statistics parameters.
+*/
+
+#include <type_support.h>
+
+/* DVS statistics grid dimensions in number of cells.
+ */
+
+struct ia_css_dvs_grid_dim {
+ u32 width; /** Width of DVS grid table in cells */
+ u32 height; /** Height of DVS grid table in cells */
+};
+
+/* DVS statistics dimensions in number of cells for
+ * grid, coeffieicient and projection.
+ */
+
+struct ia_css_sdis_info {
+ struct {
+ struct ia_css_dvs_grid_dim dim; /* Dimensions */
+ struct ia_css_dvs_grid_dim pad; /* Padded dimensions */
+ } grid, coef, proj;
+ u32 deci_factor_log2;
+};
+
+/* DVS statistics grid
+ *
+ * ISP block: SDVS1 (DIS/DVS Support for DIS/DVS ver.1 (2-axes))
+ * SDVS2 (DVS Support for DVS ver.2 (6-axes))
+ * ISP1: SDVS1 is used.
+ * ISP2: SDVS2 is used.
+ */
+struct ia_css_dvs_grid_res {
+ u32 width; /** Width of DVS grid table.
+ (= Horizontal number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, this is equal to
+ the number of vertical statistics. */
+ u32 aligned_width; /** Stride of each grid line.
+ (= Horizontal number of grid cells
+ in table, which means
+ the allocated width.) */
+ u32 height; /** Height of DVS grid table.
+ (= Vertical number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, This is equal to
+ the number of horizontal statistics. */
+ u32 aligned_height;/** Stride of each grid column.
+ (= Vertical number of grid cells
+ in table, which means
+ the allocated height.) */
+};
+
+/* TODO: use ia_css_dvs_grid_res in here.
+ * However, that implies driver I/F changes
+ */
+struct ia_css_dvs_grid_info {
+ u32 enable; /** DVS statistics enabled.
+ 0:disabled, 1:enabled */
+ u32 width; /** Width of DVS grid table.
+ (= Horizontal number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, this is equal to
+ the number of vertical statistics. */
+ u32 aligned_width; /** Stride of each grid line.
+ (= Horizontal number of grid cells
+ in table, which means
+ the allocated width.) */
+ u32 height; /** Height of DVS grid table.
+ (= Vertical number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, This is equal to
+ the number of horizontal statistics. */
+ u32 aligned_height;/** Stride of each grid column.
+ (= Vertical number of grid cells
+ in table, which means
+ the allocated height.) */
+ u32 bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
+ (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
+ For DVS1, valid value is 64.
+ For DVS2, valid value is only 64,
+ currently. */
+ u32 num_hor_coefs; /** Number of horizontal coefficients. */
+ u32 num_ver_coefs; /** Number of vertical coefficients. */
+};
+
+/* Number of DVS statistics levels
+ */
+#define IA_CSS_DVS_STAT_NUM_OF_LEVELS 3
+
+/* DVS statistics generated by accelerator global configuration
+ */
+struct dvs_stat_public_dvs_global_cfg {
+ unsigned char kappa;
+ /** DVS statistics global configuration - kappa */
+ unsigned char match_shift;
+ /** DVS statistics global configuration - match_shift */
+ unsigned char ybin_mode;
+ /** DVS statistics global configuration - y binning mode */
+};
+
+/* DVS statistics generated by accelerator level grid
+ * configuration
+ */
+struct dvs_stat_public_dvs_level_grid_cfg {
+ unsigned char grid_width;
+ /** DVS statistics grid width */
+ unsigned char grid_height;
+ /** DVS statistics grid height */
+ unsigned char block_width;
+ /** DVS statistics block width */
+ unsigned char block_height;
+ /** DVS statistics block height */
+};
+
+/* DVS statistics generated by accelerator level grid start
+ * configuration
+ */
+struct dvs_stat_public_dvs_level_grid_start {
+ unsigned short x_start;
+ /** DVS statistics level x start */
+ unsigned short y_start;
+ /** DVS statistics level y start */
+ unsigned char enable;
+ /** DVS statistics level enable */
+};
+
+/* DVS statistics generated by accelerator level grid end
+ * configuration
+ */
+struct dvs_stat_public_dvs_level_grid_end {
+ unsigned short x_end;
+ /** DVS statistics level x end */
+ unsigned short y_end;
+ /** DVS statistics level y end */
+};
+
+/* DVS statistics generated by accelerator Feature Extraction
+ * Region Of Interest (FE-ROI) configuration
+ */
+struct dvs_stat_public_dvs_level_fe_roi_cfg {
+ unsigned char x_start;
+ /** DVS statistics fe-roi level x start */
+ unsigned char y_start;
+ /** DVS statistics fe-roi level y start */
+ unsigned char x_end;
+ /** DVS statistics fe-roi level x end */
+ unsigned char y_end;
+ /** DVS statistics fe-roi level y end */
+};
+
+/* DVS statistics generated by accelerator public configuration
+ */
+struct dvs_stat_public_dvs_grd_cfg {
+ struct dvs_stat_public_dvs_level_grid_cfg grd_cfg;
+ /** DVS statistics level grid configuration */
+ struct dvs_stat_public_dvs_level_grid_start grd_start;
+ /** DVS statistics level grid start configuration */
+ struct dvs_stat_public_dvs_level_grid_end grd_end;
+ /** DVS statistics level grid end configuration */
+};
+
+/* DVS statistics grid generated by accelerator
+ */
+struct ia_css_dvs_stat_grid_info {
+ struct dvs_stat_public_dvs_global_cfg dvs_gbl_cfg;
+ /** DVS statistics global configuration (kappa, match, binning) */
+ struct dvs_stat_public_dvs_grd_cfg grd_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
+ /** DVS statistics grid configuration (blocks and grids) */
+ struct dvs_stat_public_dvs_level_fe_roi_cfg
+ fe_roi_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
+ /** DVS statistics FE ROI (region of interest) configuration */
+};
+
+/* DVS statistics generated by accelerator default grid info
+ */
+#define DEFAULT_DVS_GRID_INFO \
+(union ia_css_dvs_grid_u) { \
+ .dvs_stat_grid_info = (struct ia_css_dvs_stat_grid_info) { \
+ .fe_roi_cfg = { \
+ [1] = (struct dvs_stat_public_dvs_level_fe_roi_cfg) { \
+ .x_start = 4 \
+ } \
+ } \
+ } \
+}
+
+/* Union that holds all types of DVS statistics grid info in
+ * CSS format
+ * */
+union ia_css_dvs_grid_u {
+ struct ia_css_dvs_stat_grid_info dvs_stat_grid_info;
+ /** DVS statistics produced by accelerator grid info */
+ struct ia_css_dvs_grid_info dvs_grid_info;
+ /** DVS (DVS1/DVS2) grid info */
+};
+
+#endif /* __IA_CSS_SDIS_COMMON_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
new file mode 100644
index 000000000000..418481e016f7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
@@ -0,0 +1,437 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "memory_access.h"
+#include "assert_support.h"
+#include "ia_css_debug.h"
+#include "ia_css_sdis_types.h"
+#include "sdis/common/ia_css_sdis_common.host.h"
+#include "ia_css_sdis.host.h"
+
+const struct ia_css_dvs_coefficients default_sdis_config = {
+ .grid = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .hor_coefs = NULL,
+ .ver_coefs = NULL
+};
+
+static void
+fill_row(short *private, const short *public, unsigned int width,
+ unsigned int padding)
+{
+ assert((int)width >= 0);
+ assert((int)padding >= 0);
+ memcpy(private, public, width * sizeof(short));
+ memset(&private[width], 0, padding * sizeof(short));
+}
+
+void ia_css_sdis_horicoef_vmem_encode(
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size)
+{
+ unsigned int aligned_width = from->grid.aligned_width *
+ from->grid.bqs_per_grid_cell;
+ unsigned int width = from->grid.num_hor_coefs;
+ int padding = aligned_width - width;
+ unsigned int stride = size / IA_CSS_DVS_NUM_COEF_TYPES / sizeof(short);
+ unsigned int total_bytes = aligned_width * IA_CSS_DVS_NUM_COEF_TYPES * sizeof(
+ short);
+ short *public = from->hor_coefs;
+ short *private = (short *)to;
+ unsigned int type;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof(
+ short)) == 0);
+
+ for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) {
+ fill_row(&private[type * stride], &public[type * width], width, padding);
+ }
+}
+
+void ia_css_sdis_vertcoef_vmem_encode(
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size)
+{
+ unsigned int aligned_height = from->grid.aligned_height *
+ from->grid.bqs_per_grid_cell;
+ unsigned int height = from->grid.num_ver_coefs;
+ int padding = aligned_height - height;
+ unsigned int stride = size / IA_CSS_DVS_NUM_COEF_TYPES / sizeof(short);
+ unsigned int total_bytes = aligned_height * IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(short);
+ short *public = from->ver_coefs;
+ short *private = (short *)to;
+ unsigned int type;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof(
+ short)) == 0);
+
+ for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) {
+ fill_row(&private[type * stride], &public[type * height], height, padding);
+ }
+}
+
+void ia_css_sdis_horiproj_encode(
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_sdis_vertproj_encode(
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_get_isp_dis_coefficients(
+ struct ia_css_stream *stream,
+ short *horizontal_coefficients,
+ short *vertical_coefficients)
+{
+ struct ia_css_isp_parameters *params;
+ unsigned int hor_num_isp, ver_num_isp;
+ unsigned int hor_num_3a, ver_num_3a;
+ int i;
+ struct ia_css_binary *dvs_binary;
+
+ IA_CSS_ENTER("void");
+
+ assert(horizontal_coefficients);
+ assert(vertical_coefficients);
+
+ params = stream->isp_params_configs;
+
+ /* Only video pipe supports DVS */
+ dvs_binary = ia_css_stream_get_dvs_binary(stream);
+ if (!dvs_binary)
+ return;
+
+ hor_num_isp = dvs_binary->dis.coef.pad.width;
+ ver_num_isp = dvs_binary->dis.coef.pad.height;
+ hor_num_3a = dvs_binary->dis.coef.dim.width;
+ ver_num_3a = dvs_binary->dis.coef.dim.height;
+
+ for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) {
+ fill_row(&horizontal_coefficients[i * hor_num_isp],
+ &params->dvs_coefs.hor_coefs[i * hor_num_3a], hor_num_3a,
+ hor_num_isp - hor_num_3a);
+ }
+ for (i = 0; i < SH_CSS_DIS_VER_NUM_COEF_TYPES(dvs_binary); i++) {
+ fill_row(&vertical_coefficients[i * ver_num_isp],
+ &params->dvs_coefs.ver_coefs[i * ver_num_3a], ver_num_3a,
+ ver_num_isp - ver_num_3a);
+ }
+
+ IA_CSS_LEAVE("void");
+}
+
+size_t
+ia_css_sdis_hor_coef_tbl_bytes(
+ const struct ia_css_binary *binary)
+{
+ if (binary->info->sp.pipeline.isp_pipe_version == 1)
+ return sizeof(short) * IA_CSS_DVS_NUM_COEF_TYPES * binary->dis.coef.pad.width;
+ else
+ return sizeof(short) * IA_CSS_DVS2_NUM_COEF_TYPES * binary->dis.coef.pad.width;
+}
+
+size_t
+ia_css_sdis_ver_coef_tbl_bytes(
+ const struct ia_css_binary *binary)
+{
+ return sizeof(short) * SH_CSS_DIS_VER_NUM_COEF_TYPES(binary) *
+ binary->dis.coef.pad.height;
+}
+
+void
+ia_css_sdis_init_info(
+ struct ia_css_sdis_info *dis,
+ unsigned int sc_3a_dis_width,
+ unsigned int sc_3a_dis_padded_width,
+ unsigned int sc_3a_dis_height,
+ unsigned int isp_pipe_version,
+ unsigned int enabled)
+{
+ if (!enabled) {
+ *dis = (struct ia_css_sdis_info) { };
+ return;
+ }
+
+ dis->deci_factor_log2 = SH_CSS_DIS_DECI_FACTOR_LOG2;
+
+ dis->grid.dim.width =
+ _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->grid.dim.height =
+ _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->grid.pad.width =
+ CEIL_SHIFT(_ISP_BQS(sc_3a_dis_padded_width), SH_CSS_DIS_DECI_FACTOR_LOG2);
+ dis->grid.pad.height =
+ CEIL_SHIFT(_ISP_BQS(sc_3a_dis_height), SH_CSS_DIS_DECI_FACTOR_LOG2);
+
+ dis->coef.dim.width =
+ (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) <<
+ SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->coef.dim.height =
+ (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2) <<
+ SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->coef.pad.width =
+ __ISP_SDIS_HOR_COEF_NUM_VECS(sc_3a_dis_padded_width) * ISP_VEC_NELEMS;
+ dis->coef.pad.height =
+ __ISP_SDIS_VER_COEF_NUM_VECS(sc_3a_dis_height) * ISP_VEC_NELEMS;
+ if (isp_pipe_version == 1) {
+ dis->proj.dim.width =
+ _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->proj.dim.height =
+ _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ } else {
+ dis->proj.dim.width =
+ (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) *
+ (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2);
+ dis->proj.dim.height =
+ (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) *
+ (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2);
+ }
+ dis->proj.pad.width =
+ __ISP_SDIS_HOR_PROJ_NUM_ISP(sc_3a_dis_padded_width,
+ sc_3a_dis_height,
+ SH_CSS_DIS_DECI_FACTOR_LOG2,
+ isp_pipe_version);
+ dis->proj.pad.height =
+ __ISP_SDIS_VER_PROJ_NUM_ISP(sc_3a_dis_padded_width,
+ SH_CSS_DIS_DECI_FACTOR_LOG2);
+}
+
+void ia_css_sdis_clear_coefficients(
+ struct ia_css_dvs_coefficients *dvs_coefs)
+{
+ dvs_coefs->hor_coefs = NULL;
+ dvs_coefs->ver_coefs = NULL;
+}
+
+enum ia_css_err
+ia_css_get_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats) {
+ struct ia_css_isp_dvs_statistics_map *map;
+ enum ia_css_err ret = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
+
+ assert(host_stats);
+ assert(isp_stats);
+
+ map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
+ if (map)
+ {
+ mmgr_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_dvs_statistics(host_stats, map);
+ ia_css_isp_dvs_statistics_map_free(map);
+ } else
+ {
+ IA_CSS_ERROR("out of memory");
+ ret = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+void
+ia_css_translate_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats)
+{
+ unsigned int hor_num_isp, ver_num_isp, hor_num_dvs, ver_num_dvs, i;
+ s32 *hor_ptr_dvs, *ver_ptr_dvs, *hor_ptr_isp, *ver_ptr_isp;
+
+ assert(host_stats);
+ assert(host_stats->hor_proj);
+ assert(host_stats->ver_proj);
+ assert(isp_stats);
+ assert(isp_stats->hor_proj);
+ assert(isp_stats->ver_proj);
+
+ IA_CSS_ENTER("hproj=%p, vproj=%p, haddr=%p, vaddr=%p",
+ host_stats->hor_proj, host_stats->ver_proj,
+ isp_stats->hor_proj, isp_stats->ver_proj);
+
+ hor_num_isp = host_stats->grid.aligned_height;
+ ver_num_isp = host_stats->grid.aligned_width;
+ hor_ptr_isp = isp_stats->hor_proj;
+ ver_ptr_isp = isp_stats->ver_proj;
+ hor_num_dvs = host_stats->grid.height;
+ ver_num_dvs = host_stats->grid.width;
+ hor_ptr_dvs = host_stats->hor_proj;
+ ver_ptr_dvs = host_stats->ver_proj;
+
+ for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) {
+ memcpy(hor_ptr_dvs, hor_ptr_isp, hor_num_dvs * sizeof(int32_t));
+ hor_ptr_isp += hor_num_isp;
+ hor_ptr_dvs += hor_num_dvs;
+
+ memcpy(ver_ptr_dvs, ver_ptr_isp, ver_num_dvs * sizeof(int32_t));
+ ver_ptr_isp += ver_num_isp;
+ ver_ptr_dvs += ver_num_dvs;
+ }
+
+ IA_CSS_LEAVE("void");
+}
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_isp_dvs_statistics *me;
+ int hor_size, ver_size;
+
+ assert(grid);
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ if (!grid->enable)
+ return NULL;
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ hor_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES *
+ grid->aligned_height,
+ HIVE_ISP_DDR_WORD_BYTES);
+ ver_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES *
+ grid->aligned_width,
+ HIVE_ISP_DDR_WORD_BYTES);
+
+ me->size = hor_size + ver_size;
+ me->data_ptr = mmgr_malloc(me->size);
+ if (me->data_ptr == mmgr_NULL)
+ goto err;
+ me->hor_size = hor_size;
+ me->hor_proj = me->data_ptr;
+ me->ver_size = ver_size;
+ me->ver_proj = me->data_ptr + hor_size;
+
+ IA_CSS_LEAVE("return=%p", me);
+
+ return me;
+err:
+ ia_css_isp_dvs_statistics_free(me);
+
+ IA_CSS_LEAVE("return=%p", NULL);
+
+ return NULL;
+}
+
+struct ia_css_isp_dvs_statistics_map *
+ia_css_isp_dvs_statistics_map_allocate(
+ const struct ia_css_isp_dvs_statistics *isp_stats,
+ void *data_ptr)
+{
+ struct ia_css_isp_dvs_statistics_map *me;
+ /* Windows compiler does not like adding sizes to a void *
+ * so we use a local char * instead. */
+ char *base_ptr;
+
+ me = sh_css_malloc(sizeof(*me));
+ if (!me) {
+ IA_CSS_LOG("cannot allocate memory");
+ goto err;
+ }
+
+ me->data_ptr = data_ptr;
+ me->data_allocated = !data_ptr;
+
+ if (!me->data_ptr) {
+ me->data_ptr = sh_css_malloc(isp_stats->size);
+ if (!me->data_ptr) {
+ IA_CSS_LOG("cannot allocate memory");
+ goto err;
+ }
+ }
+ base_ptr = me->data_ptr;
+
+ me->size = isp_stats->size;
+ /* GCC complains when we assign a char * to a void *, so these
+ * casts are necessary unfortunately. */
+ me->hor_proj = (void *)base_ptr;
+ me->ver_proj = (void *)(base_ptr + isp_stats->hor_size);
+
+ return me;
+err:
+ if (me)
+ sh_css_free(me);
+ return NULL;
+}
+
+void
+ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me)
+{
+ if (me) {
+ if (me->data_allocated)
+ sh_css_free(me->data_ptr);
+ sh_css_free(me);
+ }
+}
+
+void
+ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me)
+{
+ if (me) {
+ hmm_free(me->data_ptr);
+ sh_css_free(me);
+ }
+}
+
+void ia_css_sdis_horicoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis_vertcoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis_horiproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis_vertproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h
new file mode 100644
index 000000000000..b1b0cb8ea175
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h
@@ -0,0 +1,101 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS_HOST_H
+#define __IA_CSS_SDIS_HOST_H
+
+#include "ia_css_sdis_types.h"
+#include "ia_css_binary.h"
+#include "ia_css_stream.h"
+#include "sh_css_params.h"
+
+extern const struct ia_css_dvs_coefficients default_sdis_config;
+
+/* Opaque here, since size is binary dependent. */
+struct sh_css_isp_sdis_hori_coef_tbl;
+struct sh_css_isp_sdis_vert_coef_tbl;
+struct sh_css_isp_sdis_hori_proj_tbl;
+struct sh_css_isp_sdis_vert_proj_tbl;
+
+void ia_css_sdis_horicoef_vmem_encode(
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis_vertcoef_vmem_encode(
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis_horiproj_encode(
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis_vertproj_encode(
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size);
+
+void ia_css_get_isp_dis_coefficients(
+ struct ia_css_stream *stream,
+ short *horizontal_coefficients,
+ short *vertical_coefficients);
+
+enum ia_css_err
+ia_css_get_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+void
+ia_css_translate_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid);
+
+void
+ia_css_isp_dvs_statistics_free(
+ struct ia_css_isp_dvs_statistics *me);
+
+size_t ia_css_sdis_hor_coef_tbl_bytes(const struct ia_css_binary *binary);
+size_t ia_css_sdis_ver_coef_tbl_bytes(const struct ia_css_binary *binary);
+
+void
+ia_css_sdis_init_info(
+ struct ia_css_sdis_info *dis,
+ unsigned int sc_3a_dis_width,
+ unsigned int sc_3a_dis_padded_width,
+ unsigned int sc_3a_dis_height,
+ unsigned int isp_pipe_version,
+ unsigned int enabled);
+
+void ia_css_sdis_clear_coefficients(
+ struct ia_css_dvs_coefficients *dvs_coefs);
+
+void ia_css_sdis_horicoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level);
+
+void ia_css_sdis_vertcoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level);
+
+void ia_css_sdis_horiproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level);
+
+void ia_css_sdis_vertproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level);
+
+#endif /* __IA_CSS_SDIS_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
new file mode 100644
index 000000000000..5542fa5555b4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
@@ -0,0 +1,55 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS_TYPES_H
+#define __IA_CSS_SDIS_TYPES_H
+
+/* @file
+* CSS-API header file for DVS statistics parameters.
+*/
+
+/* Number of DVS coefficient types */
+#define IA_CSS_DVS_NUM_COEF_TYPES 6
+
+#ifndef PIPE_GENERATION
+#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
+#endif
+
+/* DVS 1.0 Coefficients.
+ * This structure describes the coefficients that are needed for the dvs statistics.
+ */
+
+struct ia_css_dvs_coefficients {
+ struct ia_css_dvs_grid_info
+ grid;/** grid info contains the dimensions of the dvs grid */
+ s16 *hor_coefs; /** the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the horizontal coefficients */
+ s16 *ver_coefs; /** the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the vertical coefficients */
+};
+
+/* DVS 1.0 Statistics.
+ * This structure describes the statistics that are generated using the provided coefficients.
+ */
+
+struct ia_css_dvs_statistics {
+ struct ia_css_dvs_grid_info
+ grid;/** grid info contains the dimensions of the dvs grid */
+ s32 *hor_proj; /** the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the horizontal projections */
+ s32 *ver_proj; /** the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the vertical projections */
+};
+
+#endif /* __IA_CSS_SDIS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
new file mode 100644
index 000000000000..20fa7d924d58
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
@@ -0,0 +1,350 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <assert_support.h>
+#include "memory_access.h"
+#include "ia_css_debug.h"
+#include "ia_css_sdis2.host.h"
+
+const struct ia_css_dvs2_coefficients default_sdis2_config = {
+ .grid = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .hor_coefs = { NULL, NULL, NULL, NULL },
+ .ver_coefs = { NULL, NULL, NULL, NULL },
+};
+
+static void
+fill_row(short *private, const short *public, unsigned int width,
+ unsigned int padding)
+{
+ memcpy(private, public, width * sizeof(short));
+ memset(&private[width], 0, padding * sizeof(short));
+}
+
+void ia_css_sdis2_horicoef_vmem_encode(
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size)
+{
+ unsigned int aligned_width = from->grid.aligned_width *
+ from->grid.bqs_per_grid_cell;
+ unsigned int width = from->grid.num_hor_coefs;
+ int padding = aligned_width - width;
+ unsigned int stride = size / IA_CSS_DVS2_NUM_COEF_TYPES / sizeof(short);
+ unsigned int total_bytes = aligned_width * IA_CSS_DVS2_NUM_COEF_TYPES *
+ sizeof(short);
+ short *private = (short *)to;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof(
+ short)) == 0);
+ fill_row(&private[0 * stride], from->hor_coefs.odd_real, width, padding);
+ fill_row(&private[1 * stride], from->hor_coefs.odd_imag, width, padding);
+ fill_row(&private[2 * stride], from->hor_coefs.even_real, width, padding);
+ fill_row(&private[3 * stride], from->hor_coefs.even_imag, width, padding);
+}
+
+void ia_css_sdis2_vertcoef_vmem_encode(
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size)
+{
+ unsigned int aligned_height = from->grid.aligned_height *
+ from->grid.bqs_per_grid_cell;
+ unsigned int height = from->grid.num_ver_coefs;
+ int padding = aligned_height - height;
+ unsigned int stride = size / IA_CSS_DVS2_NUM_COEF_TYPES / sizeof(short);
+ unsigned int total_bytes = aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES *
+ sizeof(short);
+ short *private = (short *)to;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof(
+ short)) == 0);
+ fill_row(&private[0 * stride], from->ver_coefs.odd_real, height, padding);
+ fill_row(&private[1 * stride], from->ver_coefs.odd_imag, height, padding);
+ fill_row(&private[2 * stride], from->ver_coefs.even_real, height, padding);
+ fill_row(&private[3 * stride], from->ver_coefs.even_imag, height, padding);
+}
+
+void ia_css_sdis2_horiproj_encode(
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_sdis2_vertproj_encode(
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_get_isp_dvs2_coefficients(
+ struct ia_css_stream *stream,
+ short *hor_coefs_odd_real,
+ short *hor_coefs_odd_imag,
+ short *hor_coefs_even_real,
+ short *hor_coefs_even_imag,
+ short *ver_coefs_odd_real,
+ short *ver_coefs_odd_imag,
+ short *ver_coefs_even_real,
+ short *ver_coefs_even_imag)
+{
+ struct ia_css_isp_parameters *params;
+ unsigned int hor_num_3a, ver_num_3a;
+ unsigned int hor_num_isp, ver_num_isp;
+ struct ia_css_binary *dvs_binary;
+
+ IA_CSS_ENTER("void");
+
+ assert(stream);
+ assert(hor_coefs_odd_real);
+ assert(hor_coefs_odd_imag);
+ assert(hor_coefs_even_real);
+ assert(hor_coefs_even_imag);
+ assert(ver_coefs_odd_real);
+ assert(ver_coefs_odd_imag);
+ assert(ver_coefs_even_real);
+ assert(ver_coefs_even_imag);
+
+ params = stream->isp_params_configs;
+
+ /* Only video pipe supports DVS */
+ dvs_binary = ia_css_stream_get_dvs_binary(stream);
+ if (!dvs_binary)
+ return;
+
+ hor_num_3a = dvs_binary->dis.coef.dim.width;
+ ver_num_3a = dvs_binary->dis.coef.dim.height;
+ hor_num_isp = dvs_binary->dis.coef.pad.width;
+ ver_num_isp = dvs_binary->dis.coef.pad.height;
+
+ memcpy(hor_coefs_odd_real, params->dvs2_coefs.hor_coefs.odd_real,
+ hor_num_3a * sizeof(short));
+ memcpy(hor_coefs_odd_imag, params->dvs2_coefs.hor_coefs.odd_imag,
+ hor_num_3a * sizeof(short));
+ memcpy(hor_coefs_even_real, params->dvs2_coefs.hor_coefs.even_real,
+ hor_num_3a * sizeof(short));
+ memcpy(hor_coefs_even_imag, params->dvs2_coefs.hor_coefs.even_imag,
+ hor_num_3a * sizeof(short));
+ memcpy(ver_coefs_odd_real, params->dvs2_coefs.ver_coefs.odd_real,
+ ver_num_3a * sizeof(short));
+ memcpy(ver_coefs_odd_imag, params->dvs2_coefs.ver_coefs.odd_imag,
+ ver_num_3a * sizeof(short));
+ memcpy(ver_coefs_even_real, params->dvs2_coefs.ver_coefs.even_real,
+ ver_num_3a * sizeof(short));
+ memcpy(ver_coefs_even_imag, params->dvs2_coefs.ver_coefs.even_imag,
+ ver_num_3a * sizeof(short));
+
+ IA_CSS_LEAVE("void");
+}
+
+void ia_css_sdis2_clear_coefficients(
+ struct ia_css_dvs2_coefficients *dvs2_coefs)
+{
+ dvs2_coefs->hor_coefs.odd_real = NULL;
+ dvs2_coefs->hor_coefs.odd_imag = NULL;
+ dvs2_coefs->hor_coefs.even_real = NULL;
+ dvs2_coefs->hor_coefs.even_imag = NULL;
+ dvs2_coefs->ver_coefs.odd_real = NULL;
+ dvs2_coefs->ver_coefs.odd_imag = NULL;
+ dvs2_coefs->ver_coefs.even_real = NULL;
+ dvs2_coefs->ver_coefs.even_imag = NULL;
+}
+
+enum ia_css_err
+ia_css_get_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats) {
+ struct ia_css_isp_dvs_statistics_map *map;
+ enum ia_css_err ret = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
+
+ assert(host_stats);
+ assert(isp_stats);
+
+ map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
+ if (map)
+ {
+ mmgr_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_dvs2_statistics(host_stats, map);
+ ia_css_isp_dvs_statistics_map_free(map);
+ } else
+ {
+ IA_CSS_ERROR("out of memory");
+ ret = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+void
+ia_css_translate_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats)
+{
+ unsigned int size_bytes, table_width, table_size, height;
+ unsigned int src_offset = 0, dst_offset = 0;
+ s32 *htemp_ptr, *vtemp_ptr;
+
+ assert(host_stats);
+ assert(host_stats->hor_prod.odd_real);
+ assert(host_stats->hor_prod.odd_imag);
+ assert(host_stats->hor_prod.even_real);
+ assert(host_stats->hor_prod.even_imag);
+ assert(host_stats->ver_prod.odd_real);
+ assert(host_stats->ver_prod.odd_imag);
+ assert(host_stats->ver_prod.even_real);
+ assert(host_stats->ver_prod.even_imag);
+ assert(isp_stats);
+ assert(isp_stats->hor_proj);
+ assert(isp_stats->ver_proj);
+
+ IA_CSS_ENTER("hor_coefs.odd_real=%p, hor_coefs.odd_imag=%p, hor_coefs.even_real=%p, hor_coefs.even_imag=%p, ver_coefs.odd_real=%p, ver_coefs.odd_imag=%p, ver_coefs.even_real=%p, ver_coefs.even_imag=%p, haddr=%p, vaddr=%p",
+ host_stats->hor_prod.odd_real, host_stats->hor_prod.odd_imag,
+ host_stats->hor_prod.even_real, host_stats->hor_prod.even_imag,
+ host_stats->ver_prod.odd_real, host_stats->ver_prod.odd_imag,
+ host_stats->ver_prod.even_real, host_stats->ver_prod.even_imag,
+ isp_stats->hor_proj, isp_stats->ver_proj);
+
+ /* Host side: reflecting the true width in bytes */
+ size_bytes = host_stats->grid.aligned_width * sizeof(*htemp_ptr);
+
+ /* DDR side: need to be aligned to the system bus width */
+ /* statistics table width in terms of 32-bit words*/
+ table_width = CEIL_MUL(size_bytes,
+ HIVE_ISP_DDR_WORD_BYTES) / sizeof(*htemp_ptr);
+ table_size = table_width * host_stats->grid.aligned_height;
+
+ htemp_ptr = isp_stats->hor_proj; /* horizontal stats */
+ vtemp_ptr = isp_stats->ver_proj; /* vertical stats */
+ for (height = 0; height < host_stats->grid.aligned_height; height++) {
+ /* hor stats */
+ memcpy(host_stats->hor_prod.odd_real + dst_offset,
+ &htemp_ptr[0 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->hor_prod.odd_imag + dst_offset,
+ &htemp_ptr[1 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->hor_prod.even_real + dst_offset,
+ &htemp_ptr[2 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->hor_prod.even_imag + dst_offset,
+ &htemp_ptr[3 * table_size + src_offset], size_bytes);
+
+ /* ver stats */
+ memcpy(host_stats->ver_prod.odd_real + dst_offset,
+ &vtemp_ptr[0 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->ver_prod.odd_imag + dst_offset,
+ &vtemp_ptr[1 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->ver_prod.even_real + dst_offset,
+ &vtemp_ptr[2 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->ver_prod.even_imag + dst_offset,
+ &vtemp_ptr[3 * table_size + src_offset], size_bytes);
+
+ src_offset += table_width; /* aligned table width */
+ dst_offset += host_stats->grid.aligned_width;
+ }
+
+ IA_CSS_LEAVE("void");
+}
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs2_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_isp_dvs_statistics *me;
+ int size;
+
+ assert(grid);
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ if (!grid->enable)
+ return NULL;
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ /* on ISP 2 SDIS DMA model, every row of projection table width must be
+ aligned to HIVE_ISP_DDR_WORD_BYTES
+ */
+ size = CEIL_MUL(sizeof(int) * grid->aligned_width, HIVE_ISP_DDR_WORD_BYTES)
+ * grid->aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES;
+
+ me->size = 2 * size;
+ me->data_ptr = mmgr_malloc(me->size);
+ if (me->data_ptr == mmgr_NULL)
+ goto err;
+ me->hor_proj = me->data_ptr;
+ me->hor_size = size;
+ me->ver_proj = me->data_ptr + size;
+ me->ver_size = size;
+
+ IA_CSS_LEAVE("return=%p", me);
+ return me;
+err:
+ ia_css_isp_dvs2_statistics_free(me);
+ IA_CSS_LEAVE("return=%p", NULL);
+
+ return NULL;
+}
+
+void
+ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me)
+{
+ if (me) {
+ hmm_free(me->data_ptr);
+ sh_css_free(me);
+ }
+}
+
+void ia_css_sdis2_horicoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis2_vertcoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis2_horiproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis2_vertproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h
new file mode 100644
index 000000000000..a966a6bcb692
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h
@@ -0,0 +1,95 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS2_HOST_H
+#define __IA_CSS_SDIS2_HOST_H
+
+#include "ia_css_sdis2_types.h"
+#include "ia_css_binary.h"
+#include "ia_css_stream.h"
+#include "sh_css_params.h"
+
+extern const struct ia_css_dvs2_coefficients default_sdis2_config;
+
+/* Opaque here, since size is binary dependent. */
+struct sh_css_isp_sdis_hori_coef_tbl;
+struct sh_css_isp_sdis_vert_coef_tbl;
+struct sh_css_isp_sdis_hori_proj_tbl;
+struct sh_css_isp_sdis_vert_proj_tbl;
+
+void ia_css_sdis2_horicoef_vmem_encode(
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis2_vertcoef_vmem_encode(
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis2_horiproj_encode(
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis2_vertproj_encode(
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size);
+
+void ia_css_get_isp_dvs2_coefficients(
+ struct ia_css_stream *stream,
+ short *hor_coefs_odd_real,
+ short *hor_coefs_odd_imag,
+ short *hor_coefs_even_real,
+ short *hor_coefs_even_imag,
+ short *ver_coefs_odd_real,
+ short *ver_coefs_odd_imag,
+ short *ver_coefs_even_real,
+ short *ver_coefs_even_imag);
+
+void ia_css_sdis2_clear_coefficients(
+ struct ia_css_dvs2_coefficients *dvs2_coefs);
+
+enum ia_css_err
+ia_css_get_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+void
+ia_css_translate_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs2_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid);
+
+void
+ia_css_isp_dvs2_statistics_free(
+ struct ia_css_isp_dvs_statistics *me);
+
+void ia_css_sdis2_horicoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level);
+
+void ia_css_sdis2_vertcoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level);
+
+void ia_css_sdis2_horiproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level);
+
+void ia_css_sdis2_vertproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level);
+
+#endif /* __IA_CSS_SDIS2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
new file mode 100644
index 000000000000..e8ae135bfd6a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
@@ -0,0 +1,75 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS2_TYPES_H
+#define __IA_CSS_SDIS2_TYPES_H
+
+/* @file
+* CSS-API header file for DVS statistics parameters.
+*/
+
+/* Number of DVS coefficient types */
+#define IA_CSS_DVS2_NUM_COEF_TYPES 4
+
+#ifndef PIPE_GENERATION
+#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
+#endif
+
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
+ * arrays that contain the coeffients for each type.
+ */
+struct ia_css_dvs2_coef_types {
+ s16 *odd_real; /** real part of the odd coefficients*/
+ s16 *odd_imag; /** imaginary part of the odd coefficients*/
+ s16 *even_real;/** real part of the even coefficients*/
+ s16 *even_imag;/** imaginary part of the even coefficients*/
+};
+
+/* DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
+ * e.g. hor_coefs.odd_real is the pointer to int16_t[grid.num_hor_coefs] containing the horizontal odd real
+ * coefficients.
+ */
+struct ia_css_dvs2_coefficients {
+ struct ia_css_dvs_grid_info
+ grid; /** grid info contains the dimensions of the dvs grid */
+ struct ia_css_dvs2_coef_types
+ hor_coefs; /** struct with pointers that contain the horizontal coefficients */
+ struct ia_css_dvs2_coef_types
+ ver_coefs; /** struct with pointers that contain the vertical coefficients */
+};
+
+/* DVS 2.0 Statistic types. This structure contains 4 pointers to
+ * arrays that contain the statistics for each type.
+ */
+struct ia_css_dvs2_stat_types {
+ s32 *odd_real; /** real part of the odd statistics*/
+ s32 *odd_imag; /** imaginary part of the odd statistics*/
+ s32 *even_real;/** real part of the even statistics*/
+ s32 *even_imag;/** imaginary part of the even statistics*/
+};
+
+/* DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
+ * e.g. hor_prod.odd_real is the pointer to int16_t[grid.aligned_height][grid.aligned_width] containing
+ * the horizontal odd real statistics. Valid statistics data area is int16_t[0..grid.height-1][0..grid.width-1]
+ */
+struct ia_css_dvs2_statistics {
+ struct ia_css_dvs_grid_info
+ grid; /** grid info contains the dimensions of the dvs grid */
+ struct ia_css_dvs2_stat_types
+ hor_prod; /** struct with pointers that contain the horizontal statistics */
+ struct ia_css_dvs2_stat_types
+ ver_prod; /** struct with pointers that contain the vertical statistics */
+};
+
+#endif /* __IA_CSS_SDIS2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c
new file mode 100644
index 000000000000..69921c27bfae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c
@@ -0,0 +1,74 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_debug.h"
+#include "ia_css_tdf.host.h"
+
+static const s16 g_pyramid[8][8] = {
+ {128, 384, 640, 896, 896, 640, 384, 128},
+ {384, 1152, 1920, 2688, 2688, 1920, 1152, 384},
+ {640, 1920, 3200, 4480, 4480, 3200, 1920, 640},
+ {896, 2688, 4480, 6272, 6272, 4480, 2688, 896},
+ {896, 2688, 4480, 6272, 6272, 4480, 2688, 896},
+ {640, 1920, 3200, 4480, 4480, 3200, 1920, 640},
+ {384, 1152, 1920, 2688, 2688, 1920, 1152, 384},
+ {128, 384, 640, 896, 896, 640, 384, 128}
+};
+
+void
+ia_css_tdf_vmem_encode(
+ struct ia_css_isp_tdf_vmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size)
+{
+ unsigned int i;
+ (void)size;
+
+ for (i = 0; i < ISP_VEC_NELEMS; i++) {
+ to->pyramid[0][i] = g_pyramid[i / 8][i % 8];
+ to->threshold_flat[0][i] = from->thres_flat_table[i];
+ to->threshold_detail[0][i] = from->thres_detail_table[i];
+ }
+}
+
+void
+ia_css_tdf_encode(
+ struct ia_css_isp_tdf_dmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size)
+{
+ (void)size;
+ to->Epsilon_0 = from->epsilon_0;
+ to->Epsilon_1 = from->epsilon_1;
+ to->EpsScaleText = from->eps_scale_text;
+ to->EpsScaleEdge = from->eps_scale_edge;
+ to->Sepa_flat = from->sepa_flat;
+ to->Sepa_Edge = from->sepa_edge;
+ to->Blend_Flat = from->blend_flat;
+ to->Blend_Text = from->blend_text;
+ to->Blend_Edge = from->blend_edge;
+ to->Shading_Gain = from->shading_gain;
+ to->Shading_baseGain = from->shading_base_gain;
+ to->LocalY_Gain = from->local_y_gain;
+ to->LocalY_baseGain = from->local_y_base_gain;
+}
+
+void
+ia_css_tdf_debug_dtrace(
+ const struct ia_css_tdf_config *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h
new file mode 100644
index 000000000000..bc6e1653e354
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TDF_HOST_H
+#define __IA_CSS_TDF_HOST_H
+
+#include "ia_css_tdf_types.h"
+#include "ia_css_tdf_param.h"
+
+void
+ia_css_tdf_vmem_encode(
+ struct ia_css_isp_tdf_vmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size);
+
+void
+ia_css_tdf_encode(
+ struct ia_css_isp_tdf_dmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size);
+
+void
+ia_css_tdf_debug_dtrace(
+ const struct ia_css_tdf_config *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_TDF_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h
new file mode 100644
index 000000000000..a93891448cde
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TDF_PARAM_H
+#define __IA_CSS_TDF_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+struct ia_css_isp_tdf_vmem_params {
+ VMEM_ARRAY(pyramid, ISP_VEC_NELEMS);
+ VMEM_ARRAY(threshold_flat, ISP_VEC_NELEMS);
+ VMEM_ARRAY(threshold_detail, ISP_VEC_NELEMS);
+};
+
+struct ia_css_isp_tdf_dmem_params {
+ s32 Epsilon_0;
+ s32 Epsilon_1;
+ s32 EpsScaleText;
+ s32 EpsScaleEdge;
+ s32 Sepa_flat;
+ s32 Sepa_Edge;
+ s32 Blend_Flat;
+ s32 Blend_Text;
+ s32 Blend_Edge;
+ s32 Shading_Gain;
+ s32 Shading_baseGain;
+ s32 LocalY_Gain;
+ s32 LocalY_baseGain;
+};
+
+#endif /* __IA_CSS_TDF_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
new file mode 100644
index 000000000000..e4263afee7da
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
@@ -0,0 +1,52 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TDF_TYPES_H
+#define __IA_CSS_TDF_TYPES_H
+
+/* @file
+* CSS-API header file for Transform Domain Filter parameters.
+*/
+
+#include "type_support.h"
+
+/* Transform Domain Filter configuration
+ *
+ * \brief TDF public parameters.
+ * \details Struct with all parameters for the TDF kernel that can be set
+ * from the CSS API.
+ *
+ * ISP2.6.1: TDF is used.
+ */
+struct ia_css_tdf_config {
+ s32 thres_flat_table[64]; /** Final optimized strength table of NR for flat region. */
+ s32 thres_detail_table[64]; /** Final optimized strength table of NR for detail region. */
+ s32 epsilon_0; /** Coefficient to control variance for dark area (for flat region). */
+ s32 epsilon_1; /** Coefficient to control variance for bright area (for flat region). */
+ s32 eps_scale_text; /** Epsilon scaling coefficient for texture region. */
+ s32 eps_scale_edge; /** Epsilon scaling coefficient for edge region. */
+ s32 sepa_flat; /** Threshold to judge flat (edge < m_Flat_thre). */
+ s32 sepa_edge; /** Threshold to judge edge (edge > m_Edge_thre). */
+ s32 blend_flat; /** Blending ratio at flat region. */
+ s32 blend_text; /** Blending ratio at texture region. */
+ s32 blend_edge; /** Blending ratio at edge region. */
+ s32 shading_gain; /** Gain of Shading control. */
+ s32 shading_base_gain; /** Base Gain of Shading control. */
+ s32 local_y_gain; /** Gain of local luminance control. */
+ s32 local_y_base_gain; /** Base gain of local luminance control. */
+ s32 rad_x_origin; /** Initial x coord. for radius computation. */
+ s32 rad_y_origin; /** Initial y coord. for radius computation. */
+};
+
+#endif /* __IA_CSS_TDF_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
new file mode 100644
index 000000000000..349f0800bbe6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
@@ -0,0 +1,63 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef _IA_CSS_TNR3_TYPES_H
+#define _IA_CSS_TNR3_TYPES_H
+
+/* @file
+* CSS-API header file for Temporal Noise Reduction v3 (TNR3) kernel
+*/
+
+/**
+ * \brief Number of piecewise linear segments.
+ * \details The parameters to TNR3 are specified as a piecewise linear segment.
+ * The number of such segments is fixed at 3.
+ */
+#define TNR3_NUM_SEGMENTS 3
+
+/* Temporal Noise Reduction v3 (TNR3) configuration.
+ * The parameter to this kernel is fourfold
+ * 1. Three piecewise linear graphs (one for each plane) with three segments
+ * each. Each line graph has Luma values on the x axis and sigma values for
+ * each plane on the y axis. The three linear segments may have a different
+ * slope and the point of Luma value which where the slope may change is called
+ * a "Knee" point. As there are three such segments, four points need to be
+ * specified each on the Luma axis and the per plane Sigma axis. On the Luma
+ * axis two points are fixed (namely 0 and maximum luma value - depending on
+ * ISP bit depth). The other two points are the points where the slope may
+ * change its value. These two points are called knee points. The four points on
+ * the per plane sigma axis are also specified at the interface.
+ * 2. One rounding adjustment parameter for each plane
+ * 3. One maximum feedback threshold value for each plane
+ * 4. Selection of the reference frame buffer to be used for noise reduction.
+ */
+struct ia_css_tnr3_kernel_config {
+ unsigned int maxfb_y; /** Maximum Feedback Gain for Y */
+ unsigned int maxfb_u; /** Maximum Feedback Gain for U */
+ unsigned int maxfb_v; /** Maximum Feedback Gain for V */
+ unsigned int round_adj_y; /** Rounding Adjust for Y */
+ unsigned int round_adj_u; /** Rounding Adjust for U */
+ unsigned int round_adj_v; /** Rounding Adjust for V */
+ unsigned int knee_y[TNR3_NUM_SEGMENTS - 1]; /** Knee points */
+ unsigned int sigma_y[TNR3_NUM_SEGMENTS +
+ 1]; /** Standard deviation for Y at points Y0, Y1, Y2, Y3 */
+ unsigned int sigma_u[TNR3_NUM_SEGMENTS +
+ 1]; /** Standard deviation for U at points U0, U1, U2, U3 */
+ unsigned int sigma_v[TNR3_NUM_SEGMENTS +
+ 1]; /** Standard deviation for V at points V0, V1, V2, V3 */
+ unsigned int
+ ref_buf_select; /** Selection of the reference buffer */
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
new file mode 100644
index 000000000000..ecbd3042951a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
@@ -0,0 +1,120 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "ia_css_frame.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+#include "assert_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+#include "ia_css_tnr.host.h"
+const struct ia_css_tnr_config default_tnr_config = {
+ 32768,
+ 32,
+ 32,
+};
+
+void
+ia_css_tnr_encode(
+ struct sh_css_isp_tnr_params *to,
+ const struct ia_css_tnr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->coef =
+ uDIGIT_FITTING(from->gain, 16, SH_CSS_TNR_COEF_SHIFT);
+ to->threshold_Y =
+ uDIGIT_FITTING(from->threshold_y, 16, SH_CSS_ISP_YUV_BITS);
+ to->threshold_C =
+ uDIGIT_FITTING(from->threshold_uv, 16, SH_CSS_ISP_YUV_BITS);
+}
+
+void
+ia_css_tnr_dump(
+ const struct sh_css_isp_tnr_params *tnr,
+ unsigned int level)
+{
+ if (!tnr) return;
+ ia_css_debug_dtrace(level, "Temporal Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "tnr_coef", tnr->coef);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "tnr_threshold_Y", tnr->threshold_Y);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "tnr_threshold_C", tnr->threshold_C);
+}
+
+void
+ia_css_tnr_debug_dtrace(
+ const struct ia_css_tnr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.gain=%d, config.threshold_y=%d, config.threshold_uv=%d\n",
+ config->gain,
+ config->threshold_y, config->threshold_uv);
+}
+
+void
+ia_css_tnr_config(
+ struct sh_css_isp_tnr_isp_config *to,
+ const struct ia_css_tnr_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+ unsigned int i;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, &from->tnr_frames[0]->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->frame_height = from->tnr_frames[0]->info.res.height;
+ for (i = 0; i < NUM_TNR_FRAMES; i++) {
+ to->tnr_frame_addr[i] = from->tnr_frames[i]->data +
+ from->tnr_frames[i]->planes.yuyv.offset;
+ }
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert(elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_tnr_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame **frames)
+{
+ struct ia_css_tnr_configuration config;
+ unsigned int i;
+
+ for (i = 0; i < NUM_TNR_FRAMES; i++)
+ config.tnr_frames[i] = frames[i];
+
+ ia_css_configure_tnr(binary, &config);
+}
+
+void
+ia_css_init_tnr_state(
+ struct sh_css_isp_tnr_dmem_state *state,
+ size_t size)
+{
+ (void)size;
+
+ assert(NUM_TNR_FRAMES >= 2);
+ assert(sizeof(*state) == size);
+ state->tnr_in_buf_idx = 0;
+ state->tnr_out_buf_idx = 1;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h
new file mode 100644
index 000000000000..3dbf962089d0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h
@@ -0,0 +1,56 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TNR_HOST_H
+#define __IA_CSS_TNR_HOST_H
+
+#include "ia_css_binary.h"
+#include "ia_css_tnr_state.h"
+#include "ia_css_tnr_types.h"
+#include "ia_css_tnr_param.h"
+
+extern const struct ia_css_tnr_config default_tnr_config;
+
+void
+ia_css_tnr_encode(
+ struct sh_css_isp_tnr_params *to,
+ const struct ia_css_tnr_config *from,
+ unsigned int size);
+
+void
+ia_css_tnr_dump(
+ const struct sh_css_isp_tnr_params *tnr,
+ unsigned int level);
+
+void
+ia_css_tnr_debug_dtrace(
+ const struct ia_css_tnr_config *config,
+ unsigned int level);
+
+void
+ia_css_tnr_config(
+ struct sh_css_isp_tnr_isp_config *to,
+ const struct ia_css_tnr_configuration *from,
+ unsigned int size);
+
+void
+ia_css_tnr_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame **frames);
+
+void
+ia_css_init_tnr_state(
+ struct sh_css_isp_tnr_dmem_state *state,
+ size_t size);
+#endif /* __IA_CSS_TNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h
new file mode 100644
index 000000000000..1973766d8e41
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TNR_PARAM_H
+#define __IA_CSS_TNR_PARAM_H
+
+#include "type_support.h"
+#include "sh_css_defs.h"
+#include "dma.h"
+
+/* TNR (Temporal Noise Reduction) */
+struct sh_css_isp_tnr_params {
+ s32 coef;
+ s32 threshold_Y;
+ s32 threshold_C;
+};
+
+struct ia_css_tnr_configuration {
+ const struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES];
+};
+
+struct sh_css_isp_tnr_isp_config {
+ u32 width_a_over_b;
+ u32 frame_height;
+ struct dma_port_config port_b;
+ hrt_vaddress tnr_frame_addr[NUM_TNR_FRAMES];
+};
+
+#endif /* __IA_CSS_TNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h
new file mode 100644
index 000000000000..901aa1e298e0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TNR_STATE_H
+#define __IA_CSS_TNR_STATE_H
+
+#include "type_support.h"
+
+/* TNR (temporal noise reduction) */
+struct sh_css_isp_tnr_dmem_state {
+ u32 tnr_in_buf_idx;
+ u32 tnr_out_buf_idx;
+};
+
+#endif /* __IA_CSS_TNR_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
new file mode 100644
index 000000000000..98b0daeeab39
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
@@ -0,0 +1,57 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TNR_TYPES_H
+#define __IA_CSS_TNR_TYPES_H
+
+/* @file
+* CSS-API header file for Temporal Noise Reduction (TNR) parameters.
+*/
+
+/* Temporal Noise Reduction (TNR) configuration.
+ *
+ * When difference between current frame and previous frame is less than or
+ * equal to threshold, TNR works and current frame is mixed
+ * with previous frame.
+ * When difference between current frame and previous frame is greater
+ * than threshold, we judge motion is detected. Then, TNR does not work and
+ * current frame is outputted as it is.
+ * Therefore, when threshold_y and threshold_uv are set as 0, TNR can be disabled.
+ *
+ * ISP block: TNR1
+ * ISP1: TNR1 is used.
+ * ISP2: TNR1 is used.
+ */
+
+struct ia_css_tnr_config {
+ ia_css_u0_16 gain; /** Interpolation ratio of current frame
+ and previous frame.
+ gain=0.0 -> previous frame is outputted.
+ gain=1.0 -> current frame is outputted.
+ u0.16, [0,65535],
+ default 32768(0.5), ineffective 65535(almost 1.0) */
+ ia_css_u0_16 threshold_y; /** Threshold to enable interpolation of Y.
+ If difference between current frame and
+ previous frame is greater than threshold_y,
+ TNR for Y is disabled.
+ u0.16, [0,65535], default/ineffective 0 */
+ ia_css_u0_16 threshold_uv; /** Threshold to enable interpolation of
+ U/V.
+ If difference between current frame and
+ previous frame is greater than threshold_uv,
+ TNR for UV is disabled.
+ u0.16, [0,65535], default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_TNR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h
new file mode 100644
index 000000000000..26b7b5bc9391
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_UDS_PARAM_H
+#define __IA_CSS_UDS_PARAM_H
+
+#include "sh_css_uds.h"
+
+/* uds (Up and Down scaling) */
+struct ia_css_uds_config {
+ struct sh_css_crop_pos crop_pos;
+ struct sh_css_uds_info uds;
+};
+
+struct sh_css_sp_uds_params {
+ struct sh_css_crop_pos crop_pos;
+ struct sh_css_uds_info uds;
+};
+
+#endif /* __IA_CSS_UDS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
new file mode 100644
index 000000000000..be274d680caf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
@@ -0,0 +1,138 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_vf.host.h"
+#include <assert_support.h>
+#include <ia_css_err.h>
+#include <ia_css_frame.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_pipeline.h>
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+#include "isp.h"
+
+void
+ia_css_vf_config(
+ struct sh_css_isp_vf_isp_config *to,
+ const struct ia_css_vf_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ to->vf_downscale_bits = from->vf_downscale_bits;
+ to->enable = from->info != NULL;
+
+ if (from->info) {
+ ia_css_frame_info_to_frame_sp_info(&to->info, from->info);
+ ia_css_dma_configure_from_info(&to->dma.port_b, from->info);
+ to->dma.width_a_over_b = elems_a / to->dma.port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert(elems_a % to->dma.port_b.elems == 0);
+ }
+}
+
+/* compute the log2 of the downscale factor needed to get closest
+ * to the requested viewfinder resolution on the upper side. The output cannot
+ * be smaller than the requested viewfinder resolution.
+ */
+enum ia_css_err
+sh_css_vf_downscale_log2(
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2) {
+ unsigned int ds_log2 = 0;
+ unsigned int out_width;
+
+ if ((!out_info) | (!vf_info))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ out_width = out_info->res.width;
+
+ if (out_width == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* downscale until width smaller than the viewfinder width. We don't
+ * test for the height since the vmem buffers only put restrictions on
+ * the width of a line, not on the number of lines in a frame.
+ */
+ while (out_width >= vf_info->res.width)
+ {
+ ds_log2++;
+ out_width /= 2;
+ }
+ /* now width is smaller, so we go up one step */
+ if ((ds_log2 > 0) && (out_width < ia_css_binary_max_vf_width()))
+ ds_log2--;
+ /* TODO: use actual max input resolution of vf_pp binary */
+ if ((out_info->res.width >> ds_log2) >= 2 * ia_css_binary_max_vf_width())
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ *downscale_log2 = ds_log2;
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+configure_kernel(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2,
+ struct ia_css_vf_configuration *config) {
+ enum ia_css_err err;
+ unsigned int vf_log_ds = 0;
+
+ /* First compute value */
+ if (vf_info)
+ {
+ err = sh_css_vf_downscale_log2(out_info, vf_info, &vf_log_ds);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ vf_log_ds = min(vf_log_ds, info->vf_dec.max_log_downscale);
+ *downscale_log2 = vf_log_ds;
+
+ /* Then store it in isp config section */
+ config->vf_downscale_bits = vf_log_ds;
+ return IA_CSS_SUCCESS;
+}
+
+static void
+configure_dma(
+ struct ia_css_vf_configuration *config,
+ const struct ia_css_frame_info *vf_info)
+{
+ config->info = vf_info;
+}
+
+enum ia_css_err
+ia_css_vf_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2) {
+ enum ia_css_err err;
+ struct ia_css_vf_configuration config;
+ const struct ia_css_binary_info *info = &binary->info->sp;
+
+ err = configure_kernel(info, out_info, vf_info, downscale_log2, &config);
+ configure_dma(&config, vf_info);
+
+ if (vf_info)
+ vf_info->raw_bit_depth = info->dma.vfdec_bits_per_pixel;
+ ia_css_configure_vf(binary, &config);
+
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h
new file mode 100644
index 000000000000..9cc594f9a840
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_VF_HOST_H
+#define __IA_CSS_VF_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+
+#include "ia_css_vf_types.h"
+#include "ia_css_vf_param.h"
+
+/* compute the log2 of the downscale factor needed to get closest
+ * to the requested viewfinder resolution on the upper side. The output cannot
+ * be smaller than the requested viewfinder resolution.
+ */
+enum ia_css_err
+sh_css_vf_downscale_log2(
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2);
+
+void
+ia_css_vf_config(
+ struct sh_css_isp_vf_isp_config *to,
+ const struct ia_css_vf_configuration *from,
+ unsigned int size);
+
+enum ia_css_err
+ia_css_vf_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2);
+
+#endif /* __IA_CSS_VF_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
new file mode 100644
index 000000000000..171a98508a88
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_VF_PARAM_H
+#define __IA_CSS_VF_PARAM_H
+
+#include "type_support.h"
+#include "dma.h"
+#include "gc/gc_1.0/ia_css_gc_param.h" /* GAMMA_OUTPUT_BITS */
+#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
+#include "ia_css_vf_types.h"
+
+#define VFDEC_BITS_PER_PIXEL GAMMA_OUTPUT_BITS
+
+/* Viewfinder decimation */
+struct sh_css_isp_vf_isp_config {
+ u32 vf_downscale_bits; /** Log VF downscale value */
+ u32 enable;
+ struct ia_css_frame_sp_info info;
+ struct {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+ } dma;
+};
+
+#endif /* __IA_CSS_VF_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
new file mode 100644
index 000000000000..a4d39e2e9d8e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_VF_TYPES_H
+#define __IA_CSS_VF_TYPES_H
+
+/* Viewfinder decimation
+ *
+ * ISP block: vfeven_horizontal_downscale
+ */
+
+#include <ia_css_frame_public.h>
+#include <type_support.h>
+
+struct ia_css_vf_configuration {
+ u32 vf_downscale_bits; /** Log VF downscale value */
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_VF_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c
new file mode 100644
index 000000000000..d07c500eb542
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c
@@ -0,0 +1,86 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "sh_css_frac.h"
+
+#include "ia_css_wb.host.h"
+
+const struct ia_css_wb_config default_wb_config = {
+ 1,
+ 32768,
+ 32768,
+ 32768,
+ 32768
+};
+
+void
+ia_css_wb_encode(
+ struct sh_css_isp_wb_params *to,
+ const struct ia_css_wb_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->gain_shift =
+ uISP_REG_BIT - from->integer_bits;
+ to->gain_gr =
+ uDIGIT_FITTING(from->gr, 16 - from->integer_bits,
+ to->gain_shift);
+ to->gain_r =
+ uDIGIT_FITTING(from->r, 16 - from->integer_bits,
+ to->gain_shift);
+ to->gain_b =
+ uDIGIT_FITTING(from->b, 16 - from->integer_bits,
+ to->gain_shift);
+ to->gain_gb =
+ uDIGIT_FITTING(from->gb, 16 - from->integer_bits,
+ to->gain_shift);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_wb_dump(
+ const struct sh_css_isp_wb_params *wb,
+ unsigned int level)
+{
+ if (!wb) return;
+ ia_css_debug_dtrace(level, "White Balance:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_shift", wb->gain_shift);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_gr", wb->gain_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_r", wb->gain_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_b", wb->gain_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_gb", wb->gain_gb);
+}
+
+void
+ia_css_wb_debug_dtrace(
+ const struct ia_css_wb_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.integer_bits=%d, config.gr=%d, config.r=%d, config.b=%d, config.gb=%d\n",
+ config->integer_bits,
+ config->gr, config->r,
+ config->b, config->gb);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h
new file mode 100644
index 000000000000..545dea39c2e0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_WB_HOST_H
+#define __IA_CSS_WB_HOST_H
+
+#include "ia_css_wb_types.h"
+#include "ia_css_wb_param.h"
+
+extern const struct ia_css_wb_config default_wb_config;
+
+void
+ia_css_wb_encode(
+ struct sh_css_isp_wb_params *to,
+ const struct ia_css_wb_config *from,
+ unsigned int size);
+
+void
+ia_css_wb_dump(
+ const struct sh_css_isp_wb_params *wb,
+ unsigned int level);
+
+void
+ia_css_wb_debug_dtrace(
+ const struct ia_css_wb_config *wb,
+ unsigned int level);
+
+#endif /* __IA_CSS_WB_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h
new file mode 100644
index 000000000000..dcf548da55cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h
@@ -0,0 +1,29 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_WB_PARAM_H
+#define __IA_CSS_WB_PARAM_H
+
+#include "type_support.h"
+
+/* WB (White Balance) */
+struct sh_css_isp_wb_params {
+ s32 gain_shift;
+ s32 gain_gr;
+ s32 gain_r;
+ s32 gain_b;
+ s32 gain_gb;
+};
+
+#endif /* __IA_CSS_WB_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
new file mode 100644
index 000000000000..59cbd71ef332
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_WB_TYPES_H
+#define __IA_CSS_WB_TYPES_H
+
+/* @file
+* CSS-API header file for White Balance parameters.
+*/
+
+/* White Balance configuration (Gain Adjust).
+ *
+ * ISP block: WB1
+ * ISP1: WB1 is used.
+ * ISP2: WB1 is used.
+ */
+struct ia_css_wb_config {
+ u32 integer_bits; /** Common exponent of gains.
+ u8.0, [0,3],
+ default 1, ineffective 1 */
+ u32 gr; /** Significand of Gr gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+ u32 r; /** Significand of R gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+ u32 b; /** Significand of B gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+ u32 gb; /** Significand of Gb gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+};
+
+#endif /* __IA_CSS_WB_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
new file mode 100644
index 000000000000..e04c604ba612
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
@@ -0,0 +1,65 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_xnr.host.h"
+
+const struct ia_css_xnr_config default_xnr_config = {
+ /* default threshold 6400 translates to 25 on ISP. */
+ 6400
+};
+
+void
+ia_css_xnr_table_vamem_encode(
+ struct sh_css_isp_xnr_vamem_params *to,
+ const struct ia_css_xnr_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->xnr, &from->data, sizeof(to->xnr));
+}
+
+void
+ia_css_xnr_encode(
+ struct sh_css_isp_xnr_params *to,
+ const struct ia_css_xnr_config *from,
+ unsigned int size)
+{
+ (void)size;
+
+ to->threshold =
+ (uint16_t)uDIGIT_FITTING(from->threshold, 16, SH_CSS_ISP_YUV_BITS);
+}
+
+void
+ia_css_xnr_table_debug_dtrace(
+ const struct ia_css_xnr_table *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void
+ia_css_xnr_debug_dtrace(
+ const struct ia_css_xnr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d\n", config->threshold);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h
new file mode 100644
index 000000000000..31833b78739f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR_HOST_H
+#define __IA_CSS_XNR_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_xnr_param.h"
+#include "ia_css_xnr_table.host.h"
+
+extern const struct ia_css_xnr_config default_xnr_config;
+
+void
+ia_css_xnr_table_vamem_encode(
+ struct sh_css_isp_xnr_vamem_params *to,
+ const struct ia_css_xnr_table *from,
+ unsigned int size);
+
+void
+ia_css_xnr_encode(
+ struct sh_css_isp_xnr_params *to,
+ const struct ia_css_xnr_config *from,
+ unsigned int size);
+
+void
+ia_css_xnr_table_debug_dtrace(
+ const struct ia_css_xnr_table *s3a,
+ unsigned int level);
+
+void
+ia_css_xnr_debug_dtrace(
+ const struct ia_css_xnr_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_XNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
new file mode 100644
index 000000000000..72a5c5fd10e7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
@@ -0,0 +1,50 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR_PARAM_H
+#define __IA_CSS_XNR_PARAM_H
+
+#include "type_support.h"
+#include <system_global.h>
+
+#ifndef PIPE_GENERATION
+#if defined(HAS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_XNR_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_XNR_TABLE_SIZE IA_CSS_VAMEM_2_XNR_TABLE_SIZE
+#elif defined(HAS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_XNR_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_XNR_TABLE_SIZE IA_CSS_VAMEM_1_XNR_TABLE_SIZE
+#else
+#error "Unknown vamem type"
+#endif
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_XNR_TABLE_SIZE 0
+#endif
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_xnr_vamem_params {
+ u16 xnr[SH_CSS_ISP_XNR_TABLE_SIZE];
+};
+
+struct sh_css_isp_xnr_params {
+ /* XNR threshold.
+ * type:u0.16 but actual valid range is:[0,255]
+ * valid range is dependent on SH_CSS_ISP_YUV_BITS (currently 8bits)
+ * default: 25 */
+ u16 threshold;
+};
+
+#endif /* __IA_CSS_XNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
new file mode 100644
index 000000000000..78653b2666a4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
@@ -0,0 +1,81 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <string_support.h> /* memcpy */
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_xnr_table.host.h"
+
+struct ia_css_xnr_table default_xnr_table;
+
+#if defined(HAS_VAMEM_VERSION_2)
+
+static const uint16_t
+default_xnr_table_data[IA_CSS_VAMEM_2_XNR_TABLE_SIZE] = {
+ /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */
+ 8191 >> 1, 4096 >> 1, 2730 >> 1, 2048 >> 1, 1638 >> 1, 1365 >> 1, 1170 >> 1, 1024 >> 1, 910 >> 1, 819 >> 1, 744 >> 1, 682 >> 1, 630 >> 1, 585 >> 1,
+ 546 >> 1, 512 >> 1,
+
+ /* 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 */
+ 481 >> 1, 455 >> 1, 431 >> 1, 409 >> 1, 390 >> 1, 372 >> 1, 356 >> 1, 341 >> 1, 327 >> 1, 315 >> 1, 303 >> 1, 292 >> 1, 282 >> 1, 273 >> 1, 264 >> 1,
+ 256 >> 1,
+
+ /* 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 */
+ 248 >> 1, 240 >> 1, 234 >> 1, 227 >> 1, 221 >> 1, 215 >> 1, 210 >> 1, 204 >> 1, 199 >> 1, 195 >> 1, 190 >> 1, 186 >> 1, 182 >> 1, 178 >> 1, 174 >> 1,
+ 170 >> 1,
+
+ /* 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 */
+ 167 >> 1, 163 >> 1, 160 >> 1, 157 >> 1, 154 >> 1, 151 >> 1, 148 >> 1, 146 >> 1, 143 >> 1, 141 >> 1, 138 >> 1, 136 >> 1, 134 >> 1, 132 >> 1, 130 >> 1, 128 >> 1
+};
+
+#elif defined(HAS_VAMEM_VERSION_1)
+
+static const uint16_t
+default_xnr_table_data[IA_CSS_VAMEM_1_XNR_TABLE_SIZE] = {
+ /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */
+ 8191 >> 1, 4096 >> 1, 2730 >> 1, 2048 >> 1, 1638 >> 1, 1365 >> 1, 1170 >> 1, 1024 >> 1, 910 >> 1, 819 >> 1, 744 >> 1, 682 >> 1, 630 >> 1, 585 >> 1,
+ 546 >> 1, 512 >> 1,
+
+ /* 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 */
+ 481 >> 1, 455 >> 1, 431 >> 1, 409 >> 1, 390 >> 1, 372 >> 1, 356 >> 1, 341 >> 1, 327 >> 1, 315 >> 1, 303 >> 1, 292 >> 1, 282 >> 1, 273 >> 1, 264 >> 1,
+ 256 >> 1,
+
+ /* 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 */
+ 248 >> 1, 240 >> 1, 234 >> 1, 227 >> 1, 221 >> 1, 215 >> 1, 210 >> 1, 204 >> 1, 199 >> 1, 195 >> 1, 190 >> 1, 186 >> 1, 182 >> 1, 178 >> 1, 174 >> 1,
+ 170 >> 1,
+
+ /* 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 */
+ 167 >> 1, 163 >> 1, 160 >> 1, 157 >> 1, 154 >> 1, 151 >> 1, 148 >> 1, 146 >> 1, 143 >> 1, 141 >> 1, 138 >> 1, 136 >> 1, 134 >> 1, 132 >> 1, 130 >> 1, 128 >> 1
+};
+
+#else
+#error "sh_css_params.c: VAMEM version must \
+be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
+#endif
+
+void
+ia_css_config_xnr_table(void)
+{
+#if defined(HAS_VAMEM_VERSION_2)
+ memcpy(default_xnr_table.data.vamem_2, default_xnr_table_data,
+ sizeof(default_xnr_table_data));
+ default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+#else
+ memcpy(default_xnr_table.data.vamem_1, default_xnr_table_data,
+ sizeof(default_xnr_table_data));
+ default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h
new file mode 100644
index 000000000000..130086713a7f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR_TABLE_HOST_H
+#define __IA_CSS_XNR_TABLE_HOST_H
+
+extern struct ia_css_xnr_table default_xnr_table;
+
+void ia_css_config_xnr_table(void);
+
+#endif /* __IA_CSS_XNR_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
new file mode 100644
index 000000000000..22189c936f64
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
@@ -0,0 +1,70 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR_TYPES_H
+#define __IA_CSS_XNR_TYPES_H
+
+/* @file
+* CSS-API header file for Extra Noise Reduction (XNR) parameters.
+*/
+
+/* XNR table.
+ *
+ * NOTE: The driver does not need to set this table,
+ * because the default values are set inside the css.
+ *
+ * This table contains coefficients used for division in XNR.
+ *
+ * u0.12, [0,4095],
+ * {4095, 2048, 1365, .........., 65, 64}
+ * ({1/1, 1/2, 1/3, ............., 1/63, 1/64})
+ *
+ * ISP block: XNR1
+ * ISP1: XNR1 is used.
+ * ISP2: XNR1 is used.
+ *
+ */
+
+/* Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2 6
+/* Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE BIT(IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2)
+
+/* Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2 6
+/* Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE BIT(IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2)
+
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
+ IA_CSS_VAMEM_TYPE_2(ISP2400) */
+union ia_css_xnr_data {
+ u16 vamem_1[IA_CSS_VAMEM_1_XNR_TABLE_SIZE];
+ /** Coefficients table on vamem type1. u0.12, [0,4095] */
+ u16 vamem_2[IA_CSS_VAMEM_2_XNR_TABLE_SIZE];
+ /** Coefficients table on vamem type2. u0.12, [0,4095] */
+};
+
+struct ia_css_xnr_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_xnr_data data;
+};
+
+struct ia_css_xnr_config {
+ /* XNR threshold.
+ * type:u0.16 valid range:[0,65535]
+ * default: 6400 */
+ u16 threshold;
+};
+
+#endif /* __IA_CSS_XNR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
new file mode 100644
index 000000000000..a9db6366d20b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
@@ -0,0 +1,248 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "type_support.h"
+#include "math_support.h"
+#include "sh_css_defs.h"
+#include "ia_css_types.h"
+#include "assert_support.h"
+#include "ia_css_xnr3.host.h"
+
+/* Maximum value for alpha on ISP interface */
+#define XNR_MAX_ALPHA ((1 << (ISP_VEC_ELEMBITS - 1)) - 1)
+
+/* Minimum value for sigma on host interface. Lower values translate to
+ * max_alpha.
+ */
+#define XNR_MIN_SIGMA (IA_CSS_XNR3_SIGMA_SCALE / 100)
+
+/*
+ * division look-up table
+ * Refers to XNR3.0.5
+ */
+#define XNR3_LOOK_UP_TABLE_POINTS 16
+
+static const s16 x[XNR3_LOOK_UP_TABLE_POINTS] = {
+ 1024, 1164, 1320, 1492, 1680, 1884, 2108, 2352,
+ 2616, 2900, 3208, 3540, 3896, 4276, 4684, 5120
+};
+
+static const s16 a[XNR3_LOOK_UP_TABLE_POINTS] = {
+ -7213, -5580, -4371, -3421, -2722, -2159, -6950, -5585,
+ -4529, -3697, -3010, -2485, -2070, -1727, -1428, 0
+ };
+
+static const s16 b[XNR3_LOOK_UP_TABLE_POINTS] = {
+ 4096, 3603, 3178, 2811, 2497, 2226, 1990, 1783,
+ 1603, 1446, 1307, 1185, 1077, 981, 895, 819
+};
+
+static const s16 c[XNR3_LOOK_UP_TABLE_POINTS] = {
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/*
+ * Default kernel parameters. In general, default is bypass mode or as close
+ * to the ineffective values as possible. Due to the chroma down+upsampling,
+ * perfect bypass mode is not possible for xnr3 filter itself. Instead, the
+ * 'blending' parameter is used to create a bypass.
+ */
+const struct ia_css_xnr3_config default_xnr3_config = {
+ /* sigma */
+ { 0, 0, 0, 0, 0, 0 },
+ /* coring */
+ { 0, 0, 0, 0 },
+ /* blending */
+ { 0 }
+};
+
+/*
+ * Compute an alpha value for the ISP kernel from sigma value on the host
+ * parameter interface as: alpha_scale * 1/(sigma/sigma_scale)
+ */
+static int32_t
+compute_alpha(int sigma)
+{
+ s32 alpha;
+ int offset = sigma / 2;
+
+ if (sigma < XNR_MIN_SIGMA) {
+ alpha = XNR_MAX_ALPHA;
+ } else {
+ alpha = ((IA_CSS_XNR3_SIGMA_SCALE * XNR_ALPHA_SCALE_FACTOR) + offset) / sigma;
+
+ if (alpha > XNR_MAX_ALPHA)
+ alpha = XNR_MAX_ALPHA;
+ }
+
+ return alpha;
+}
+
+/*
+ * Compute the scaled coring value for the ISP kernel from the value on the
+ * host parameter interface.
+ */
+static int32_t
+compute_coring(int coring)
+{
+ s32 isp_coring;
+ s32 isp_scale = XNR_CORING_SCALE_FACTOR;
+ s32 host_scale = IA_CSS_XNR3_CORING_SCALE;
+ s32 offset = host_scale / 2; /* fixed-point 0.5 */
+
+ /* Convert from public host-side scale factor to isp-side scale
+ * factor. Clip to [0, isp_scale-1).
+ */
+ isp_coring = ((coring * isp_scale) + offset) / host_scale;
+ return min(max(isp_coring, 0), isp_scale - 1);
+}
+
+/*
+ * Compute the scaled blending strength for the ISP kernel from the value on
+ * the host parameter interface.
+ */
+static int32_t
+compute_blending(int strength)
+{
+ s32 isp_strength;
+ s32 isp_scale = XNR_BLENDING_SCALE_FACTOR;
+ s32 host_scale = IA_CSS_XNR3_BLENDING_SCALE;
+ s32 offset = host_scale / 2; /* fixed-point 0.5 */
+
+ /* Convert from public host-side scale factor to isp-side scale
+ * factor. The blending factor is positive on the host side, but
+ * negative on the ISP side because +1.0 cannot be represented
+ * exactly as s0.11 fixed point, but -1.0 can.
+ */
+ isp_strength = -(((strength * isp_scale) + offset) / host_scale);
+ return MAX(MIN(isp_strength, 0), -XNR_BLENDING_SCALE_FACTOR);
+}
+
+void
+ia_css_xnr3_encode(
+ struct sh_css_isp_xnr3_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned int size)
+{
+ int kernel_size = XNR_FILTER_SIZE;
+ /* The adjust factor is the next power of 2
+ w.r.t. the kernel size*/
+ int adjust_factor = ceil_pow2(kernel_size);
+ s32 max_diff = (1 << (ISP_VEC_ELEMBITS - 1)) - 1;
+ s32 min_diff = -(1 << (ISP_VEC_ELEMBITS - 1));
+
+ s32 alpha_y0 = compute_alpha(from->sigma.y0);
+ s32 alpha_y1 = compute_alpha(from->sigma.y1);
+ s32 alpha_u0 = compute_alpha(from->sigma.u0);
+ s32 alpha_u1 = compute_alpha(from->sigma.u1);
+ s32 alpha_v0 = compute_alpha(from->sigma.v0);
+ s32 alpha_v1 = compute_alpha(from->sigma.v1);
+ s32 alpha_ydiff = (alpha_y1 - alpha_y0) * adjust_factor / kernel_size;
+ s32 alpha_udiff = (alpha_u1 - alpha_u0) * adjust_factor / kernel_size;
+ s32 alpha_vdiff = (alpha_v1 - alpha_v0) * adjust_factor / kernel_size;
+
+ s32 coring_u0 = compute_coring(from->coring.u0);
+ s32 coring_u1 = compute_coring(from->coring.u1);
+ s32 coring_v0 = compute_coring(from->coring.v0);
+ s32 coring_v1 = compute_coring(from->coring.v1);
+ s32 coring_udiff = (coring_u1 - coring_u0) * adjust_factor / kernel_size;
+ s32 coring_vdiff = (coring_v1 - coring_v0) * adjust_factor / kernel_size;
+
+ s32 blending = compute_blending(from->blending.strength);
+
+ (void)size;
+
+ /* alpha's are represented in qN.5 format */
+ to->alpha.y0 = alpha_y0;
+ to->alpha.u0 = alpha_u0;
+ to->alpha.v0 = alpha_v0;
+ to->alpha.ydiff = min(max(alpha_ydiff, min_diff), max_diff);
+ to->alpha.udiff = min(max(alpha_udiff, min_diff), max_diff);
+ to->alpha.vdiff = min(max(alpha_vdiff, min_diff), max_diff);
+
+ /* coring parameters are expressed in q1.NN format */
+ to->coring.u0 = coring_u0;
+ to->coring.v0 = coring_v0;
+ to->coring.udiff = min(max(coring_udiff, min_diff), max_diff);
+ to->coring.vdiff = min(max(coring_vdiff, min_diff), max_diff);
+
+ /* blending strength is expressed in q1.NN format */
+ to->blending.strength = blending;
+}
+
+/* ISP2401 */
+/* (void) = ia_css_xnr3_vmem_encode(*to, *from)
+ * -----------------------------------------------
+ * VMEM Encode Function to translate UV parameters from userspace into ISP space
+*/
+void
+ia_css_xnr3_vmem_encode(
+ struct sh_css_isp_xnr3_vmem_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned int size)
+{
+ unsigned int i, j, base;
+ const unsigned int total_blocks = 4;
+ const unsigned int shuffle_block = 16;
+
+ (void)from;
+ (void)size;
+
+ /* Init */
+ for (i = 0; i < ISP_VEC_NELEMS; i++) {
+ to->x[0][i] = 0;
+ to->a[0][i] = 0;
+ to->b[0][i] = 0;
+ to->c[0][i] = 0;
+ }
+
+ /* Constraints on "x":
+ * - values should be greater or equal to 0.
+ * - values should be ascending.
+ */
+ assert(x[0] >= 0);
+
+ for (j = 1; j < XNR3_LOOK_UP_TABLE_POINTS; j++) {
+ assert(x[j] >= 0);
+ assert(x[j] > x[j - 1]);
+ }
+
+ /* The implementation of the calulating 1/x is based on the availability
+ * of the OP_vec_shuffle16 operation.
+ * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to
+ * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or
+ * initialised as described in the KFS. The remaining elements of a vector are set to 0.
+ */
+ /* TODO: guard this code with above assumptions */
+ for (i = 0; i < total_blocks; i++) {
+ base = shuffle_block * i;
+
+ for (j = 0; j < XNR3_LOOK_UP_TABLE_POINTS; j++) {
+ to->x[0][base + j] = x[j];
+ to->a[0][base + j] = a[j];
+ to->b[0][base + j] = b[j];
+ to->c[0][base + j] = c[j];
+ }
+ }
+}
+
+/* Dummy Function added as the tool expects it*/
+void
+ia_css_xnr3_debug_dtrace(
+ const struct ia_css_xnr3_config *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h
new file mode 100644
index 000000000000..959533ec29c6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR3_HOST_H
+#define __IA_CSS_XNR3_HOST_H
+
+#include "ia_css_xnr3_param.h"
+#include "ia_css_xnr3_types.h"
+
+extern const struct ia_css_xnr3_config default_xnr3_config;
+
+void
+ia_css_xnr3_encode(
+ struct sh_css_isp_xnr3_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned int size);
+
+/* ISP2401 */
+void
+ia_css_xnr3_vmem_encode(
+ struct sh_css_isp_xnr3_vmem_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned int size);
+
+void
+ia_css_xnr3_debug_dtrace(
+ const struct ia_css_xnr3_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_XNR3_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h
new file mode 100644
index 000000000000..7d108669e19a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h
@@ -0,0 +1,83 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR3_PARAM_H
+#define __IA_CSS_XNR3_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* ISP2401: needed for VMEM_ARRAY */
+
+/* Scaling factor of the alpha values: which fixed-point value represents 1.0?
+ * It must be chosen such that 1/min_sigma still fits in an ISP vector
+ * element. */
+#define XNR_ALPHA_SCALE_LOG2 5
+#define XNR_ALPHA_SCALE_FACTOR BIT(XNR_ALPHA_SCALE_LOG2)
+
+/* Scaling factor of the coring values on the ISP. */
+#define XNR_CORING_SCALE_LOG2 (ISP_VEC_ELEMBITS - 1)
+#define XNR_CORING_SCALE_FACTOR BIT(XNR_CORING_SCALE_LOG2)
+
+/* Scaling factor of the blending strength on the ISP. */
+#define XNR_BLENDING_SCALE_LOG2 (ISP_VEC_ELEMBITS - 1)
+#define XNR_BLENDING_SCALE_FACTOR BIT(XNR_BLENDING_SCALE_LOG2)
+
+/* XNR3 filter size. Must be 11x11, 9x9 or 5x5. */
+#define XNR_FILTER_SIZE 5
+
+/* XNR3 alpha (1/sigma) parameters on the ISP, expressed as a base (0) value
+ * for dark areas, and a scaled diff towards the value for bright areas. */
+struct sh_css_xnr3_alpha_params {
+ s32 y0;
+ s32 u0;
+ s32 v0;
+ s32 ydiff;
+ s32 udiff;
+ s32 vdiff;
+};
+
+/* XNR3 coring parameters on the ISP, expressed as a base (0) value
+ * for dark areas, and a scaled diff towards the value for bright areas. */
+struct sh_css_xnr3_coring_params {
+ s32 u0;
+ s32 v0;
+ s32 udiff;
+ s32 vdiff;
+};
+
+/* XNR3 blending strength on the ISP. */
+struct sh_css_xnr3_blending_params {
+ s32 strength;
+};
+
+/* XNR3 ISP parameters */
+struct sh_css_isp_xnr3_params {
+ struct sh_css_xnr3_alpha_params alpha;
+ struct sh_css_xnr3_coring_params coring;
+ struct sh_css_xnr3_blending_params blending;
+};
+
+/* ISP2401 */
+/*
+ * STRUCT sh_css_isp_xnr3_vmem_params
+ * -----------------------------------------------
+ * ISP VMEM parameters
+ */
+struct sh_css_isp_xnr3_vmem_params {
+ VMEM_ARRAY(x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(c, ISP_VEC_NELEMS);
+};
+
+#endif /*__IA_CSS_XNR3_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
new file mode 100644
index 000000000000..6963bef3c07d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
@@ -0,0 +1,97 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR3_TYPES_H
+#define __IA_CSS_XNR3_TYPES_H
+
+/* @file
+* CSS-API header file for Extra Noise Reduction (XNR) parameters.
+*/
+
+/**
+ * \brief Scale of the XNR sigma parameters.
+ * \details The define specifies which fixed-point value represents 1.0.
+ */
+#define IA_CSS_XNR3_SIGMA_SCALE BIT(10)
+
+/**
+ * \brief Scale of the XNR coring parameters.
+ * \details The define specifies which fixed-point value represents 1.0.
+ */
+#define IA_CSS_XNR3_CORING_SCALE BIT(15)
+
+/**
+ * \brief Scale of the XNR blending parameter.
+ * \details The define specifies which fixed-point value represents 1.0.
+ */
+#define IA_CSS_XNR3_BLENDING_SCALE BIT(11)
+
+/**
+ * \brief XNR3 Sigma Parameters.
+ * \details Sigma parameters define the strength of the XNR filter.
+ * A higher number means stronger filtering. There are two values for each of
+ * the three YUV planes: one for dark areas and one for bright areas. All
+ * sigma parameters are fixed-point values between 0.0 and 1.0, scaled with
+ * IA_CSS_XNR3_SIGMA_SCALE.
+ */
+struct ia_css_xnr3_sigma_params {
+ int y0; /** Sigma for Y range similarity in dark area */
+ int y1; /** Sigma for Y range similarity in bright area */
+ int u0; /** Sigma for U range similarity in dark area */
+ int u1; /** Sigma for U range similarity in bright area */
+ int v0; /** Sigma for V range similarity in dark area */
+ int v1; /** Sigma for V range similarity in bright area */
+};
+
+/**
+ * \brief XNR3 Coring Parameters
+ * \details Coring parameters define the "coring" strength, which is a soft
+ * thresholding technique to avoid false coloring. There are two values for
+ * each of the two chroma planes: one for dark areas and one for bright areas.
+ * All coring parameters are fixed-point values between 0.0 and 1.0, scaled
+ * with IA_CSS_XNR3_CORING_SCALE. The ineffective value is 0.
+ */
+struct ia_css_xnr3_coring_params {
+ int u0; /** Coring threshold of U channel in dark area */
+ int u1; /** Coring threshold of U channel in bright area */
+ int v0; /** Coring threshold of V channel in dark area */
+ int v1; /** Coring threshold of V channel in bright area */
+};
+
+/**
+ * \brief XNR3 Blending Parameters
+ * \details Blending parameters define the blending strength of filtered
+ * output pixels with the original chroma pixels from before xnr3. The
+ * blending strength is a fixed-point value between 0.0 and 1.0 (inclusive),
+ * scaled with IA_CSS_XNR3_BLENDING_SCALE.
+ * A higher number applies xnr filtering more strongly. A value of 1.0
+ * disables the blending and returns the xnr3 filtered output, while a
+ * value of 0.0 bypasses the entire xnr3 filter.
+ */
+struct ia_css_xnr3_blending_params {
+ int strength; /** Blending strength */
+};
+
+/**
+ * \brief XNR3 public parameters.
+ * \details Struct with all parameters for the XNR3 kernel that can be set
+ * from the CSS API.
+ */
+struct ia_css_xnr3_config {
+ struct ia_css_xnr3_sigma_params sigma; /** XNR3 sigma parameters */
+ struct ia_css_xnr3_coring_params coring; /** XNR3 coring parameters */
+ struct ia_css_xnr3_blending_params blending; /** XNR3 blending parameters */
+};
+
+#endif /* __IA_CSS_XNR3_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c
new file mode 100644
index 000000000000..a1d0e915636d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c
@@ -0,0 +1,217 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "ia_css_ynr.host.h"
+
+const struct ia_css_nr_config default_nr_config = {
+ 16384,
+ 8192,
+ 1280,
+ 0,
+ 0
+};
+
+const struct ia_css_ee_config default_ee_config = {
+ 8192,
+ 128,
+ 2048
+};
+
+void
+ia_css_nr_encode(
+ struct sh_css_isp_ynr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* YNR (Y Noise Reduction) */
+ to->threshold =
+ uDIGIT_FITTING(8192U, 16, SH_CSS_BAYER_BITS);
+ to->gain_all =
+ uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT);
+ to->gain_dir =
+ uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT);
+ to->threshold_cb =
+ uDIGIT_FITTING(from->threshold_cb, 16, SH_CSS_BAYER_BITS);
+ to->threshold_cr =
+ uDIGIT_FITTING(from->threshold_cr, 16, SH_CSS_BAYER_BITS);
+}
+
+void
+ia_css_yee_encode(
+ struct sh_css_isp_yee_params *to,
+ const struct ia_css_yee_config *from,
+ unsigned int size)
+{
+ int asiWk1 = (int)from->ee.gain;
+ int asiWk2 = asiWk1 / 8;
+ int asiWk3 = asiWk1 / 4;
+
+ (void)size;
+ /* YEE (Y Edge Enhancement) */
+ to->dirthreshold_s =
+ min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS)
+ << 1),
+ SH_CSS_BAYER_MAXVAL);
+ to->dirthreshold_g =
+ min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS)
+ << 4),
+ SH_CSS_BAYER_MAXVAL);
+ to->dirthreshold_width_log2 =
+ uFRACTION_BITS_FITTING(8);
+ to->dirthreshold_width =
+ 1 << to->dirthreshold_width_log2;
+ to->detailgain =
+ uDIGIT_FITTING(from->ee.detail_gain, 11,
+ SH_CSS_YEE_DETAIL_GAIN_SHIFT);
+ to->coring_s =
+ (uDIGIT_FITTING(56U, 16, SH_CSS_BAYER_BITS) *
+ from->ee.threshold) >> 8;
+ to->coring_g =
+ (uDIGIT_FITTING(224U, 16, SH_CSS_BAYER_BITS) *
+ from->ee.threshold) >> 8;
+ /* 8; // *1.125 ->[s4.8] */
+ to->scale_plus_s =
+ (asiWk1 + asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ /* 8; // ( * -.25)->[s4.8] */
+ to->scale_plus_g =
+ (0 - asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ /* 8; // *0.875 ->[s4.8] */
+ to->scale_minus_s =
+ (asiWk1 - asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ /* 8; // ( *.25 ) ->[s4.8] */
+ to->scale_minus_g =
+ (asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ to->clip_plus_s =
+ uDIGIT_FITTING(32760U, 16, SH_CSS_BAYER_BITS);
+ to->clip_plus_g = 0;
+ to->clip_minus_s =
+ uDIGIT_FITTING(504U, 16, SH_CSS_BAYER_BITS);
+ to->clip_minus_g =
+ uDIGIT_FITTING(32256U, 16, SH_CSS_BAYER_BITS);
+ to->Yclip = SH_CSS_BAYER_MAXVAL;
+}
+
+void
+ia_css_nr_dump(
+ const struct sh_css_isp_ynr_params *ynr,
+ unsigned int level)
+{
+ if (!ynr) return;
+ ia_css_debug_dtrace(level,
+ "Y Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_threshold", ynr->threshold);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_gain_all", ynr->gain_all);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_gain_dir", ynr->gain_dir);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_threshold_cb", ynr->threshold_cb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_threshold_cr", ynr->threshold_cr);
+}
+
+void
+ia_css_yee_dump(
+ const struct sh_css_isp_yee_params *yee,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "Y Edge Enhancement:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_s",
+ yee->dirthreshold_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_g",
+ yee->dirthreshold_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_width_log2",
+ yee->dirthreshold_width_log2);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_width",
+ yee->dirthreshold_width);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_detailgain",
+ yee->detailgain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_coring_s",
+ yee->coring_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_coring_g",
+ yee->coring_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_plus_s",
+ yee->scale_plus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_plus_g",
+ yee->scale_plus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_minus_s",
+ yee->scale_minus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_minus_g",
+ yee->scale_minus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_plus_s",
+ yee->clip_plus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_plus_g",
+ yee->clip_plus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_minus_s",
+ yee->clip_minus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_minus_g",
+ yee->clip_minus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_Yclip",
+ yee->Yclip);
+}
+
+void
+ia_css_nr_debug_dtrace(
+ const struct ia_css_nr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.direction=%d, config.bnr_gain=%d, config.ynr_gain=%d, config.threshold_cb=%d, config.threshold_cr=%d\n",
+ config->direction,
+ config->bnr_gain, config->ynr_gain,
+ config->threshold_cb, config->threshold_cr);
+}
+
+void
+ia_css_ee_debug_dtrace(
+ const struct ia_css_ee_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d, config.gain=%d, config.detail_gain=%d\n",
+ config->threshold, config->gain, config->detail_gain);
+}
+
+void
+ia_css_init_ynr_state(
+ void/*struct sh_css_isp_ynr_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h
new file mode 100644
index 000000000000..20165093a298
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR_HOST_H
+#define __IA_CSS_YNR_HOST_H
+
+#include "ia_css_ynr_types.h"
+#include "ia_css_ynr_param.h"
+
+extern const struct ia_css_nr_config default_nr_config;
+extern const struct ia_css_ee_config default_ee_config;
+
+void
+ia_css_nr_encode(
+ struct sh_css_isp_ynr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned int size);
+
+void
+ia_css_yee_encode(
+ struct sh_css_isp_yee_params *to,
+ const struct ia_css_yee_config *from,
+ unsigned int size);
+
+void
+ia_css_nr_dump(
+ const struct sh_css_isp_ynr_params *ynr,
+ unsigned int level);
+
+void
+ia_css_yee_dump(
+ const struct sh_css_isp_yee_params *yee,
+ unsigned int level);
+
+void
+ia_css_nr_debug_dtrace(
+ const struct ia_css_nr_config *config,
+ unsigned int level);
+
+void
+ia_css_ee_debug_dtrace(
+ const struct ia_css_ee_config *config,
+ unsigned int level);
+
+void
+ia_css_init_ynr_state(
+ void/*struct sh_css_isp_ynr_vmem_state*/ * state,
+ size_t size);
+#endif /* __IA_CSS_YNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h
new file mode 100644
index 000000000000..8f104bcb4d0f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR_PARAM_H
+#define __IA_CSS_YNR_PARAM_H
+
+#include "type_support.h"
+
+/* YNR (Y Noise Reduction) */
+struct sh_css_isp_ynr_params {
+ s32 threshold;
+ s32 gain_all;
+ s32 gain_dir;
+ s32 threshold_cb;
+ s32 threshold_cr;
+};
+
+/* YEE (Y Edge Enhancement) */
+struct sh_css_isp_yee_params {
+ s32 dirthreshold_s;
+ s32 dirthreshold_g;
+ s32 dirthreshold_width_log2;
+ s32 dirthreshold_width;
+ s32 detailgain;
+ s32 coring_s;
+ s32 coring_g;
+ s32 scale_plus_s;
+ s32 scale_plus_g;
+ s32 scale_minus_s;
+ s32 scale_minus_g;
+ s32 clip_plus_s;
+ s32 clip_plus_g;
+ s32 clip_minus_s;
+ s32 clip_minus_g;
+ s32 Yclip;
+};
+
+#endif /* __IA_CSS_YNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
new file mode 100644
index 000000000000..1a62e1dbfc6f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
@@ -0,0 +1,80 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR_TYPES_H
+#define __IA_CSS_YNR_TYPES_H
+
+/* @file
+* CSS-API header file for Noise Reduction (BNR) and YCC Noise Reduction (YNR,CNR).
+*/
+
+/* Configuration used by Bayer Noise Reduction (BNR) and
+ * YCC Noise Reduction (YNR,CNR).
+ *
+ * ISP block: BNR1, YNR1, CNR1
+ * ISP1: BNR1,YNR1,CNR1 are used.
+ * ISP2: BNR1,YNR1,CNR1 are used for Preview/Video.
+ * BNR1,YNR2,CNR2 are used for Still.
+ */
+struct ia_css_nr_config {
+ ia_css_u0_16 bnr_gain; /** Strength of noise reduction (BNR).
+ u0.16, [0,65535],
+ default 14336(0.21875), ineffective 0 */
+ ia_css_u0_16 ynr_gain; /** Strength of noise reduction (YNR).
+ u0.16, [0,65535],
+ default 14336(0.21875), ineffective 0 */
+ ia_css_u0_16 direction; /** Sensitivity of edge (BNR).
+ u0.16, [0,65535],
+ default 512(0.0078125), ineffective 0 */
+ ia_css_u0_16 threshold_cb; /** Coring threshold for Cb (CNR).
+ This is the same as
+ de_config.c1_coring_threshold.
+ u0.16, [0,65535],
+ default 0(0), ineffective 0 */
+ ia_css_u0_16 threshold_cr; /** Coring threshold for Cr (CNR).
+ This is the same as
+ de_config.c2_coring_threshold.
+ u0.16, [0,65535],
+ default 0(0), ineffective 0 */
+};
+
+/* Edge Enhancement (sharpen) configuration.
+ *
+ * ISP block: YEE1
+ * ISP1: YEE1 is used.
+ * ISP2: YEE1 is used for Preview/Video.
+ * (YEE2 is used for Still.)
+ */
+struct ia_css_ee_config {
+ ia_css_u5_11 gain; /** The strength of sharpness.
+ u5.11, [0,65535],
+ default 8192(4.0), ineffective 0 */
+ ia_css_u8_8 threshold; /** The threshold that divides noises from
+ edge.
+ u8.8, [0,65535],
+ default 256(1.0), ineffective 65535 */
+ ia_css_u5_11 detail_gain; /** The strength of sharpness in pell-mell
+ area.
+ u5.11, [0,65535],
+ default 2048(1.0), ineffective 0 */
+};
+
+/* YNR and YEE (sharpen) configuration.
+ */
+struct ia_css_yee_config {
+ struct ia_css_nr_config nr; /** The NR configuration. */
+ struct ia_css_ee_config ee; /** The EE configuration. */
+};
+
+#endif /* __IA_CSS_YNR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c
new file mode 100644
index 000000000000..9a3cd59c4507
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c
@@ -0,0 +1,118 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ia_css_ynr2.host.h"
+
+const struct ia_css_ynr_config default_ynr_config = {
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+const struct ia_css_fc_config default_fc_config = {
+ 1,
+ 0, /* 0 -> ineffective */
+ 0, /* 0 -> ineffective */
+ 0, /* 0 -> ineffective */
+ 0, /* 0 -> ineffective */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */
+ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */
+ (int16_t)-(1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */
+ (int16_t)-(1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */
+};
+
+void
+ia_css_ynr_encode(
+ struct sh_css_isp_yee2_params *to,
+ const struct ia_css_ynr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->edge_sense_gain_0 = from->edge_sense_gain_0;
+ to->edge_sense_gain_1 = from->edge_sense_gain_1;
+ to->corner_sense_gain_0 = from->corner_sense_gain_0;
+ to->corner_sense_gain_1 = from->corner_sense_gain_1;
+}
+
+void
+ia_css_fc_encode(
+ struct sh_css_isp_fc_params *to,
+ const struct ia_css_fc_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->gain_exp = from->gain_exp;
+
+ to->coring_pos_0 = from->coring_pos_0;
+ to->coring_pos_1 = from->coring_pos_1;
+ to->coring_neg_0 = from->coring_neg_0;
+ to->coring_neg_1 = from->coring_neg_1;
+
+ to->gain_pos_0 = from->gain_pos_0;
+ to->gain_pos_1 = from->gain_pos_1;
+ to->gain_neg_0 = from->gain_neg_0;
+ to->gain_neg_1 = from->gain_neg_1;
+
+ to->crop_pos_0 = from->crop_pos_0;
+ to->crop_pos_1 = from->crop_pos_1;
+ to->crop_neg_0 = from->crop_neg_0;
+ to->crop_neg_1 = from->crop_neg_1;
+}
+
+void
+ia_css_ynr_dump(
+ const struct sh_css_isp_yee2_params *yee2,
+ unsigned int level);
+
+void
+ia_css_fc_dump(
+ const struct sh_css_isp_fc_params *fc,
+ unsigned int level);
+
+void
+ia_css_fc_debug_dtrace(
+ const struct ia_css_fc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.gain_exp=%d, config.coring_pos_0=%d, config.coring_pos_1=%d, config.coring_neg_0=%d, config.coring_neg_1=%d, config.gain_pos_0=%d, config.gain_pos_1=%d, config.gain_neg_0=%d, config.gain_neg_1=%d, config.crop_pos_0=%d, config.crop_pos_1=%d, config.crop_neg_0=%d, config.crop_neg_1=%d\n",
+ config->gain_exp,
+ config->coring_pos_0, config->coring_pos_1,
+ config->coring_neg_0, config->coring_neg_1,
+ config->gain_pos_0, config->gain_pos_1,
+ config->gain_neg_0, config->gain_neg_1,
+ config->crop_pos_0, config->crop_pos_1,
+ config->crop_neg_0, config->crop_neg_1);
+}
+
+void
+ia_css_ynr_debug_dtrace(
+ const struct ia_css_ynr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.edge_sense_gain_0=%d, config.edge_sense_gain_1=%d, config.corner_sense_gain_0=%d, config.corner_sense_gain_1=%d\n",
+ config->edge_sense_gain_0, config->edge_sense_gain_1,
+ config->corner_sense_gain_0, config->corner_sense_gain_1);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h
new file mode 100644
index 000000000000..38204f8c5735
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h
@@ -0,0 +1,56 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR2_HOST_H
+#define __IA_CSS_YNR2_HOST_H
+
+#include "ia_css_ynr2_types.h"
+#include "ia_css_ynr2_param.h"
+
+extern const struct ia_css_ynr_config default_ynr_config;
+extern const struct ia_css_fc_config default_fc_config;
+
+void
+ia_css_ynr_encode(
+ struct sh_css_isp_yee2_params *to,
+ const struct ia_css_ynr_config *from,
+ unsigned int size);
+
+void
+ia_css_fc_encode(
+ struct sh_css_isp_fc_params *to,
+ const struct ia_css_fc_config *from,
+ unsigned int size);
+
+void
+ia_css_ynr_dump(
+ const struct sh_css_isp_yee2_params *yee2,
+ unsigned int level);
+
+void
+ia_css_fc_dump(
+ const struct sh_css_isp_fc_params *fc,
+ unsigned int level);
+
+void
+ia_css_fc_debug_dtrace(
+ const struct ia_css_fc_config *config,
+ unsigned int level);
+
+void
+ia_css_ynr_debug_dtrace(
+ const struct ia_css_ynr_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_YNR2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h
new file mode 100644
index 000000000000..7479bce598d5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR2_PARAM_H
+#define __IA_CSS_YNR2_PARAM_H
+
+#include "type_support.h"
+
+/* YNR (Y Noise Reduction), YEE (Y Edge Enhancement) */
+struct sh_css_isp_yee2_params {
+ s32 edge_sense_gain_0;
+ s32 edge_sense_gain_1;
+ s32 corner_sense_gain_0;
+ s32 corner_sense_gain_1;
+};
+
+/* Fringe Control */
+struct sh_css_isp_fc_params {
+ s32 gain_exp;
+ u16 coring_pos_0;
+ u16 coring_pos_1;
+ u16 coring_neg_0;
+ u16 coring_neg_1;
+ s32 gain_pos_0;
+ s32 gain_pos_1;
+ s32 gain_neg_0;
+ s32 gain_neg_1;
+ s32 crop_pos_0;
+ s32 crop_pos_1;
+ s32 crop_neg_0;
+ s32 crop_neg_1;
+};
+
+#endif /* __IA_CSS_YNR2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
new file mode 100644
index 000000000000..36e4bb61b38c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
@@ -0,0 +1,93 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR2_TYPES_H
+#define __IA_CSS_YNR2_TYPES_H
+
+/* @file
+* CSS-API header file for Y(Luma) Noise Reduction.
+*/
+
+/* Y(Luma) Noise Reduction configuration.
+ *
+ * ISP block: YNR2 & YEE2
+ * (ISP1: YNR1 and YEE1 are used.)
+ * (ISP2: YNR1 and YEE1 are used for Preview/Video.)
+ * ISP2: YNR2 and YEE2 are used for Still.
+ */
+struct ia_css_ynr_config {
+ u16 edge_sense_gain_0; /** Sensitivity of edge in dark area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+ u16 edge_sense_gain_1; /** Sensitivity of edge in bright area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+ u16 corner_sense_gain_0; /** Sensitivity of corner in dark area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+ u16 corner_sense_gain_1; /** Sensitivity of corner in bright area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+};
+
+/* Fringe Control configuration.
+ *
+ * ISP block: FC2 (FC2 is used with YNR2/YEE2.)
+ * (ISP1: FC2 is not used.)
+ * (ISP2: FC2 is not for Preview/Video.)
+ * ISP2: FC2 is used for Still.
+ */
+struct ia_css_fc_config {
+ u8 gain_exp; /** Common exponent of gains.
+ u8.0, [0,13],
+ default 1, ineffective 0 */
+ u16 coring_pos_0; /** Coring threshold for positive edge in dark area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ u16 coring_pos_1; /** Coring threshold for positive edge in bright area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ u16 coring_neg_0; /** Coring threshold for negative edge in dark area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ u16 coring_neg_1; /** Coring threshold for negative edge in bright area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ u16 gain_pos_0; /** Gain for positive edge in dark area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ u16 gain_pos_1; /** Gain for positive edge in bright area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ u16 gain_neg_0; /** Gain for negative edge in dark area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ u16 gain_neg_1; /** Gain for negative edge in bright area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ u16 crop_pos_0; /** Limit for positive edge in dark area.
+ u0.13, [0,8191],
+ default/ineffective 8191(almost 1.0) */
+ u16 crop_pos_1; /** Limit for positive edge in bright area.
+ u0.13, [0,8191],
+ default/ineffective 8191(almost 1.0) */
+ s16 crop_neg_0; /** Limit for negative edge in dark area.
+ s0.13, [-8192,0],
+ default/ineffective -8192(-1.0) */
+ s16 crop_neg_1; /** Limit for negative edge in bright area.
+ s0.13, [-8192,0],
+ default/ineffective -8192(-1.0) */
+};
+
+#endif /* __IA_CSS_YNR2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h b/drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h
new file mode 100644
index 000000000000..5774c905d8e1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h
@@ -0,0 +1,37 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef _INPUT_BUF_ISP_H_
+#define _INPUT_BUF_ISP_H_
+
+/* Temporary include, since IA_CSS_BINARY_MODE_COPY is still needed */
+#include "sh_css_defs.h"
+#include "isp_const.h" /* MAX_VECTORS_PER_INPUT_LINE */
+
+#define INPUT_BUF_HEIGHT 2 /* double buffer */
+#define INPUT_BUF_LINES 2
+
+#ifndef ENABLE_CONTINUOUS
+#define ENABLE_CONTINUOUS 0
+#endif
+
+/* In continuous mode, the input buffer must be a fixed size for all binaries
+ * and at a fixed address since it will be used by the SP. */
+#define EXTRA_INPUT_VECTORS 2 /* For left padding */
+#define MAX_VECTORS_PER_INPUT_LINE_CONT (CEIL_DIV(SH_CSS_MAX_SENSOR_WIDTH, ISP_NWAY) + EXTRA_INPUT_VECTORS)
+
+/* The input buffer should be on a fixed address in vmem, for continuous capture */
+#define INPUT_BUF_ADDR 0x0
+
+#endif /* _INPUT_BUF_ISP_H_ */
diff --git a/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h
new file mode 100644
index 000000000000..fc392c7fb18b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h
@@ -0,0 +1,180 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef _COMMON_ISP_CONST_H_
+#define _COMMON_ISP_CONST_H_
+
+/*#include "isp.h"*/ /* ISP_VEC_NELEMS */
+
+/* Binary independent constants */
+
+#ifndef NO_HOIST
+# define NO_HOIST HIVE_ATTRIBUTE((no_hoist))
+#endif
+
+#define NO_HOIST_CSE HIVE_ATTRIBUTE((no_hoist, no_cse))
+
+#define UNION struct /* Union constructors not allowed in C++ */
+
+#define XMEM_WIDTH_BITS HIVE_ISP_DDR_WORD_BITS
+#define XMEM_SHORTS_PER_WORD (HIVE_ISP_DDR_WORD_BITS / 16)
+#define XMEM_INTS_PER_WORD (HIVE_ISP_DDR_WORD_BITS / 32)
+#define XMEM_POW2_BYTES_PER_WORD HIVE_ISP_DDR_WORD_BYTES
+
+#define BITS8_ELEMENTS_PER_XMEM_ADDR CEIL_DIV(XMEM_WIDTH_BITS, 8)
+#define BITS16_ELEMENTS_PER_XMEM_ADDR CEIL_DIV(XMEM_WIDTH_BITS, 16)
+
+#if ISP_VEC_NELEMS == 64
+#define ISP_NWAY_LOG2 6
+#elif ISP_VEC_NELEMS == 32
+#define ISP_NWAY_LOG2 5
+#elif ISP_VEC_NELEMS == 16
+#define ISP_NWAY_LOG2 4
+#elif ISP_VEC_NELEMS == 8
+#define ISP_NWAY_LOG2 3
+#else
+#error "isp_const.h ISP_VEC_NELEMS must be one of {8, 16, 32, 64}"
+#endif
+
+/* *****************************
+ * ISP input/output buffer sizes
+ * ****************************/
+/* input image */
+#define INPUT_BUF_DMA_HEIGHT 2
+#define INPUT_BUF_HEIGHT 2 /* double buffer */
+#define OUTPUT_BUF_DMA_HEIGHT 2
+#define OUTPUT_BUF_HEIGHT 2 /* double buffer */
+#define OUTPUT_NUM_TRANSFERS 4
+
+/* GDC accelerator: Up/Down Scaling */
+/* These should be moved to the gdc_defs.h in the device */
+#define UDS_SCALING_N HRT_GDC_N
+/* AB: This should cover the zooming up to 16MP */
+#define UDS_MAX_OXDIM 5000
+/* We support maximally 2 planes with different parameters
+ - luma and chroma (YUV420) */
+#define UDS_MAX_PLANES 2
+#define UDS_BLI_BLOCK_HEIGHT 2
+#define UDS_BCI_BLOCK_HEIGHT 4
+#define UDS_BLI_INTERP_ENVELOPE 1
+#define UDS_BCI_INTERP_ENVELOPE 3
+#define UDS_MAX_ZOOM_FAC 64
+/* Make it always one FPGA vector.
+ Four FPGA vectors are required and
+ four of them fit in one ASIC vector.*/
+#define UDS_MAX_CHUNKS 16
+
+#define ISP_LEFT_PADDING _ISP_LEFT_CROP_EXTRA(ISP_LEFT_CROPPING)
+#define ISP_LEFT_PADDING_VECS CEIL_DIV(ISP_LEFT_PADDING, ISP_VEC_NELEMS)
+/* in case of continuous the croppong of the current binary doesn't matter for the buffer calculation, but the cropping of the sp copy should be used */
+#define ISP_LEFT_PADDING_CONT _ISP_LEFT_CROP_EXTRA(SH_CSS_MAX_LEFT_CROPPING)
+#define ISP_LEFT_PADDING_VECS_CONT CEIL_DIV(ISP_LEFT_PADDING_CONT, ISP_VEC_NELEMS)
+
+#define CEIL_ROUND_DIV_STRIPE(width, stripe, padding) \
+ CEIL_MUL(padding + CEIL_DIV(width - padding, stripe), ((ENABLE_RAW_BINNING || ENABLE_FIXED_BAYER_DS) ? 4 : 2))
+
+/* output (Y,U,V) image, 4:2:0 */
+#define MAX_VECTORS_PER_LINE \
+ CEIL_ROUND_DIV_STRIPE(CEIL_DIV(ISP_MAX_INTERNAL_WIDTH, ISP_VEC_NELEMS), \
+ ISP_NUM_STRIPES, \
+ ISP_LEFT_PADDING_VECS)
+
+/*
+ * ITERATOR_VECTOR_INCREMENT' explanation:
+ * when striping an even number of iterations, one of the stripes is
+ * one iteration wider than the other to account for overlap
+ * so the calc for the output buffer vmem size is:
+ * ((width[vectors]/num_of_stripes) + 2[vectors])
+ */
+#define MAX_VECTORS_PER_OUTPUT_LINE \
+ CEIL_DIV(CEIL_DIV(ISP_MAX_OUTPUT_WIDTH, ISP_NUM_STRIPES) + ISP_LEFT_PADDING, ISP_VEC_NELEMS)
+
+/* Must be even due to interlaced bayer input */
+#define MAX_VECTORS_PER_INPUT_LINE CEIL_MUL((CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS) + ISP_LEFT_PADDING_VECS), 2)
+#define MAX_VECTORS_PER_INPUT_STRIPE CEIL_ROUND_DIV_STRIPE(MAX_VECTORS_PER_INPUT_LINE, \
+ ISP_NUM_STRIPES, \
+ ISP_LEFT_PADDING_VECS)
+
+/* Add 2 for left croppping */
+#define MAX_SP_RAW_COPY_VECTORS_PER_INPUT_LINE (CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS) + 2)
+
+#define MAX_VECTORS_PER_BUF_LINE \
+ (MAX_VECTORS_PER_LINE + DUMMY_BUF_VECTORS)
+#define MAX_VECTORS_PER_BUF_INPUT_LINE \
+ (MAX_VECTORS_PER_INPUT_STRIPE + DUMMY_BUF_VECTORS)
+#define MAX_OUTPUT_Y_FRAME_WIDTH \
+ (MAX_VECTORS_PER_LINE * ISP_VEC_NELEMS)
+#define MAX_OUTPUT_Y_FRAME_SIMDWIDTH \
+ MAX_VECTORS_PER_LINE
+#define MAX_OUTPUT_C_FRAME_WIDTH \
+ (MAX_OUTPUT_Y_FRAME_WIDTH / 2)
+#define MAX_OUTPUT_C_FRAME_SIMDWIDTH \
+ CEIL_DIV(MAX_OUTPUT_C_FRAME_WIDTH, ISP_VEC_NELEMS)
+
+/* should be even */
+#define NO_CHUNKING (OUTPUT_NUM_CHUNKS == 1)
+
+#define MAX_VECTORS_PER_CHUNK \
+ (NO_CHUNKING ? MAX_VECTORS_PER_LINE \
+ : 2 * CEIL_DIV(MAX_VECTORS_PER_LINE, \
+ 2 * OUTPUT_NUM_CHUNKS))
+
+#define MAX_C_VECTORS_PER_CHUNK \
+ (MAX_VECTORS_PER_CHUNK / 2)
+
+/* should be even */
+#define MAX_VECTORS_PER_OUTPUT_CHUNK \
+ (NO_CHUNKING ? MAX_VECTORS_PER_OUTPUT_LINE \
+ : 2 * CEIL_DIV(MAX_VECTORS_PER_OUTPUT_LINE, \
+ 2 * OUTPUT_NUM_CHUNKS))
+
+#define MAX_C_VECTORS_PER_OUTPUT_CHUNK \
+ (MAX_VECTORS_PER_OUTPUT_CHUNK / 2)
+
+/* should be even */
+#define MAX_VECTORS_PER_INPUT_CHUNK \
+ (INPUT_NUM_CHUNKS == 1 ? MAX_VECTORS_PER_INPUT_STRIPE \
+ : 2 * CEIL_DIV(MAX_VECTORS_PER_INPUT_STRIPE, \
+ 2 * OUTPUT_NUM_CHUNKS))
+
+#define DEFAULT_C_SUBSAMPLING 2
+
+/****** DMA buffer properties */
+
+#define RAW_BUF_LINES ((ENABLE_RAW_BINNING || ENABLE_FIXED_BAYER_DS) ? 4 : 2)
+
+#define RAW_BUF_STRIDE \
+ (BINARY_ID == SH_CSS_BINARY_ID_POST_ISP ? MAX_VECTORS_PER_INPUT_CHUNK : \
+ ISP_NUM_STRIPES > 1 ? MAX_VECTORS_PER_INPUT_STRIPE + _ISP_EXTRA_PADDING_VECS : \
+ !ENABLE_CONTINUOUS ? MAX_VECTORS_PER_INPUT_LINE : \
+ MAX_VECTORS_PER_INPUT_CHUNK)
+
+/* [isp vmem] table size[vectors] per line per color (GR,R,B,GB),
+ multiples of NWAY */
+#define ISP2400_SCTBL_VECTORS_PER_LINE_PER_COLOR \
+ CEIL_DIV(ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+#define ISP2401_SCTBL_VECTORS_PER_LINE_PER_COLOR \
+ CEIL_DIV(ISP2401_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+/* [isp vmem] table size[vectors] per line for 4colors (GR,R,B,GB),
+ multiples of NWAY */
+#define SCTBL_VECTORS_PER_LINE \
+ (SCTBL_VECTORS_PER_LINE_PER_COLOR * IA_CSS_SC_NUM_COLORS)
+
+/*************/
+
+/* Format for fixed primaries */
+
+#define ISP_FIXED_PRIMARY_FORMAT IA_CSS_FRAME_FORMAT_NV12
+
+#endif /* _COMMON_ISP_CONST_H_ */
diff --git a/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h
new file mode 100644
index 000000000000..6bdf8451e7d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h
@@ -0,0 +1,79 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef _ISP_TYPES_H_
+#define _ISP_TYPES_H_
+
+/* Workaround: hivecc complains about "tag "sh_css_3a_output" already declared"
+ without this extra decl. */
+struct ia_css_3a_output;
+
+/* Input stream formats, these correspond to the MIPI formats and the way
+ * the CSS receiver sends these to the input formatter.
+ * The bit depth of each pixel element is stored in the global variable
+ * isp_bits_per_pixel.
+ * NOTE: for rgb565, we set isp_bits_per_pixel to 565, for all other rgb
+ * formats it's the actual depth (4, for 444, 8 for 888 etc).
+ */
+enum sh_stream_format {
+ sh_stream_format_yuv420_legacy,
+ sh_stream_format_yuv420,
+ sh_stream_format_yuv422,
+ sh_stream_format_rgb,
+ sh_stream_format_raw,
+ sh_stream_format_binary, /* bytestream such as jpeg */
+};
+
+struct s_isp_frames {
+ /* global variables that are written to by either the SP or the host,
+ every ISP binary needs these. */
+ /* output frame */
+ char *xmem_base_addr_y;
+ char *xmem_base_addr_uv;
+ char *xmem_base_addr_u;
+ char *xmem_base_addr_v;
+ /* 2nd output frame */
+ char *xmem_base_addr_second_out_y;
+ char *xmem_base_addr_second_out_u;
+ char *xmem_base_addr_second_out_v;
+ /* input yuv frame */
+ char *xmem_base_addr_y_in;
+ char *xmem_base_addr_u_in;
+ char *xmem_base_addr_v_in;
+ /* input raw frame */
+ char *xmem_base_addr_raw;
+ /* output raw frame */
+ char *xmem_base_addr_raw_out;
+ /* viewfinder output (vf_veceven) */
+ char *xmem_base_addr_vfout_y;
+ char *xmem_base_addr_vfout_u;
+ char *xmem_base_addr_vfout_v;
+ /* overlay frame (for vf_pp) */
+ char *xmem_base_addr_overlay_y;
+ char *xmem_base_addr_overlay_u;
+ char *xmem_base_addr_overlay_v;
+ /* pre-gdc output frame (gdc input) */
+ char *xmem_base_addr_qplane_r;
+ char *xmem_base_addr_qplane_ratb;
+ char *xmem_base_addr_qplane_gr;
+ char *xmem_base_addr_qplane_gb;
+ char *xmem_base_addr_qplane_b;
+ char *xmem_base_addr_qplane_batr;
+ /* YUV as input, used by postisp binary */
+ char *xmem_base_addr_yuv_16_y;
+ char *xmem_base_addr_yuv_16_u;
+ char *xmem_base_addr_yuv_16_v;
+};
+
+#endif /* _ISP_TYPES_H_ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
new file mode 100644
index 000000000000..759141c9310a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
@@ -0,0 +1,155 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+
+#define IS_INPUT_SYSTEM_VERSION_2
+
+#include <type_support.h>
+
+//CSI reveiver has 3 ports.
+#define N_CSI_PORTS (3)
+//AM: Use previous define for this.
+
+//MIPI allows upto 4 channels.
+#define N_CHANNELS (4)
+// 12KB = 256bit x 384 words
+#define IB_CAPACITY_IN_WORDS (384)
+
+typedef enum {
+ MIPI_0LANE_CFG = 0,
+ MIPI_1LANE_CFG = 1,
+ MIPI_2LANE_CFG = 2,
+ MIPI_3LANE_CFG = 3,
+ MIPI_4LANE_CFG = 4
+} mipi_lane_cfg_t;
+
+typedef enum {
+ INPUT_SYSTEM_SOURCE_SENSOR = 0,
+ INPUT_SYSTEM_SOURCE_FIFO,
+ INPUT_SYSTEM_SOURCE_TPG,
+ INPUT_SYSTEM_SOURCE_PRBS,
+ INPUT_SYSTEM_SOURCE_MEMORY,
+ N_INPUT_SYSTEM_SOURCE
+} input_system_source_t;
+
+/* internal routing configuration */
+typedef enum {
+ INPUT_SYSTEM_DISCARD_ALL = 0,
+ INPUT_SYSTEM_CSI_BACKEND = 1,
+ INPUT_SYSTEM_INPUT_BUFFER = 2,
+ INPUT_SYSTEM_MULTICAST = 3,
+ N_INPUT_SYSTEM_CONNECTION
+} input_system_connection_t;
+
+typedef enum {
+ INPUT_SYSTEM_MIPI_PORT0,
+ INPUT_SYSTEM_MIPI_PORT1,
+ INPUT_SYSTEM_MIPI_PORT2,
+ INPUT_SYSTEM_ACQUISITION_UNIT,
+ N_INPUT_SYSTEM_MULTIPLEX
+} input_system_multiplex_t;
+
+typedef enum {
+ INPUT_SYSTEM_SINK_MEMORY = 0,
+ INPUT_SYSTEM_SINK_ISP,
+ INPUT_SYSTEM_SINK_SP,
+ N_INPUT_SYSTEM_SINK
+} input_system_sink_t;
+
+typedef enum {
+ INPUT_SYSTEM_FIFO_CAPTURE = 0,
+ INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING,
+ INPUT_SYSTEM_SRAM_BUFFERING,
+ INPUT_SYSTEM_XMEM_BUFFERING,
+ INPUT_SYSTEM_XMEM_CAPTURE,
+ INPUT_SYSTEM_XMEM_ACQUIRE,
+ N_INPUT_SYSTEM_BUFFERING_MODE
+} buffering_mode_t;
+
+typedef struct input_system_cfg_s input_system_cfg_t;
+typedef struct sync_generator_cfg_s sync_generator_cfg_t;
+typedef struct tpg_cfg_s tpg_cfg_t;
+typedef struct prbs_cfg_s prbs_cfg_t;
+
+/* MW: uint16_t should be sufficient */
+struct input_system_cfg_s {
+ u32 no_side_band;
+ u32 fmt_type;
+ u32 ch_id;
+ u32 input_mode;
+};
+
+struct sync_generator_cfg_s {
+ u32 width;
+ u32 height;
+ u32 hblank_cycles;
+ u32 vblank_cycles;
+};
+
+/* MW: tpg & prbs are exclusive */
+struct tpg_cfg_s {
+ u32 x_mask;
+ u32 y_mask;
+ u32 x_delta;
+ u32 y_delta;
+ u32 xy_mask;
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+struct prbs_cfg_s {
+ u32 seed;
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+struct gpfifo_cfg_s {
+// TBD.
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+typedef struct gpfifo_cfg_s gpfifo_cfg_t;
+
+//ALX:Commented out to pass the compilation.
+//typedef struct input_system_cfg_s input_system_cfg_t;
+
+struct ib_buffer_s {
+ u32 mem_reg_size;
+ u32 nof_mem_regs;
+ u32 mem_reg_addr;
+};
+
+typedef struct ib_buffer_s ib_buffer_t;
+
+struct csi_cfg_s {
+ u32 csi_port;
+ buffering_mode_t buffering_mode;
+ ib_buffer_t csi_buffer;
+ ib_buffer_t acquisition_buffer;
+ u32 nof_xmem_buffers;
+};
+
+typedef struct csi_cfg_s csi_cfg_t;
+
+typedef enum {
+ INPUT_SYSTEM_CFG_FLAG_RESET = 0,
+ INPUT_SYSTEM_CFG_FLAG_SET = 1U << 0,
+ INPUT_SYSTEM_CFG_FLAG_BLOCKED = 1U << 1,
+ INPUT_SYSTEM_CFG_FLAG_REQUIRED = 1U << 2,
+ INPUT_SYSTEM_CFG_FLAG_CONFLICT = 1U << 3 // To mark a conflicting configuration.
+} input_system_cfg_flag_t;
+
+typedef u32 input_system_config_flags_t;
+
+#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
new file mode 100644
index 000000000000..3c0e2efb08ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
@@ -0,0 +1,539 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#include "input_system_global.h"
+
+#include "input_system_defs.h" /* HIVE_ISYS_GPREG_MULTICAST_A_IDX,... */
+#include "css_receiver_2400_defs.h" /* _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX, _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX,... */
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+#include "isp_capture_defs.h"
+#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/* Same name, but keep the distinction,it is a different device */
+#include "isp_capture_defs.h"
+#else
+#error "input_system_local.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+#include "isp_acquisition_defs.h"
+#include "input_system_ctrl_defs.h"
+
+typedef enum {
+ INPUT_SYSTEM_ERR_NO_ERROR = 0,
+ INPUT_SYSTEM_ERR_GENERIC,
+ INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET,
+ INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE,
+ INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED,
+ N_INPUT_SYSTEM_ERR
+} input_system_error_t;
+
+typedef enum {
+ INPUT_SYSTEM_PORT_A = 0,
+ INPUT_SYSTEM_PORT_B,
+ INPUT_SYSTEM_PORT_C,
+ N_INPUT_SYSTEM_PORTS
+} input_system_csi_port_t;
+
+typedef struct ctrl_unit_cfg_s ctrl_unit_cfg_t;
+typedef struct input_system_network_cfg_s input_system_network_cfg_t;
+typedef struct target_cfg2400_s target_cfg2400_t;
+typedef struct channel_cfg_s channel_cfg_t;
+typedef struct backend_channel_cfg_s backend_channel_cfg_t;
+typedef struct input_system_cfg2400_s input_system_cfg2400_t;
+typedef struct mipi_port_state_s mipi_port_state_t;
+typedef struct rx_channel_state_s rx_channel_state_t;
+typedef struct input_switch_cfg_channel_s input_switch_cfg_channel_t;
+typedef struct input_switch_cfg_s input_switch_cfg_t;
+
+struct ctrl_unit_cfg_s {
+ ib_buffer_t buffer_mipi[N_CAPTURE_UNIT_ID];
+ ib_buffer_t buffer_acquire[N_ACQUISITION_UNIT_ID];
+};
+
+struct input_system_network_cfg_s {
+ input_system_connection_t multicast_cfg[N_CAPTURE_UNIT_ID];
+ input_system_multiplex_t mux_cfg;
+ ctrl_unit_cfg_t ctrl_unit_cfg[N_CTRL_UNIT_ID];
+};
+
+typedef struct {
+// TBD.
+ u32 dummy_parameter;
+} target_isp_cfg_t;
+
+typedef struct {
+// TBD.
+ u32 dummy_parameter;
+} target_sp_cfg_t;
+
+typedef struct {
+// TBD.
+ u32 dummy_parameter;
+} target_strm2mem_cfg_t;
+
+struct input_switch_cfg_channel_s {
+ u32 hsync_data_reg[2];
+ u32 vsync_data_reg;
+};
+
+struct target_cfg2400_s {
+ input_switch_cfg_channel_t input_switch_channel_cfg;
+ target_isp_cfg_t target_isp_cfg;
+ target_sp_cfg_t target_sp_cfg;
+ target_strm2mem_cfg_t target_strm2mem_cfg;
+};
+
+struct backend_channel_cfg_s {
+ u32 fmt_control_word_1; // Format config.
+ u32 fmt_control_word_2;
+ u32 no_side_band;
+};
+
+typedef union {
+ csi_cfg_t csi_cfg;
+ tpg_cfg_t tpg_cfg;
+ prbs_cfg_t prbs_cfg;
+ gpfifo_cfg_t gpfifo_cfg;
+} source_cfg_t;
+
+struct input_switch_cfg_s {
+ u32 hsync_data_reg[N_RX_CHANNEL_ID * 2];
+ u32 vsync_data_reg;
+};
+
+// Configuration of a channel.
+struct channel_cfg_s {
+ u32 ch_id;
+ backend_channel_cfg_t backend_ch;
+ input_system_source_t source_type;
+ source_cfg_t source_cfg;
+ target_cfg2400_t target_cfg;
+};
+
+// Complete configuration for input system.
+struct input_system_cfg2400_s {
+ input_system_source_t source_type;
+ input_system_config_flags_t source_type_flags;
+ //channel_cfg_t channel[N_CHANNELS];
+ input_system_config_flags_t ch_flags[N_CHANNELS];
+ // This is the place where the buffers' settings are collected, as given.
+ csi_cfg_t csi_value[N_CSI_PORTS];
+ input_system_config_flags_t csi_flags[N_CSI_PORTS];
+
+ // Possible another struct for ib.
+ // This buffers set at the end, based on the all configurations.
+ ib_buffer_t csi_buffer[N_CSI_PORTS];
+ input_system_config_flags_t csi_buffer_flags[N_CSI_PORTS];
+ ib_buffer_t acquisition_buffer_unique;
+ input_system_config_flags_t acquisition_buffer_unique_flags;
+ u32 unallocated_ib_mem_words; // Used for check.DEFAULT = IB_CAPACITY_IN_WORDS.
+ //uint32_t acq_allocated_ib_mem_words;
+
+ input_system_connection_t multicast[N_CSI_PORTS];
+ input_system_multiplex_t multiplexer;
+ input_system_config_flags_t multiplexer_flags;
+
+ tpg_cfg_t tpg_value;
+ input_system_config_flags_t tpg_flags;
+ prbs_cfg_t prbs_value;
+ input_system_config_flags_t prbs_flags;
+ gpfifo_cfg_t gpfifo_value;
+ input_system_config_flags_t gpfifo_flags;
+
+ input_switch_cfg_t input_switch_cfg;
+
+ target_isp_cfg_t target_isp[N_CHANNELS];
+ input_system_config_flags_t target_isp_flags[N_CHANNELS];
+ target_sp_cfg_t target_sp[N_CHANNELS];
+ input_system_config_flags_t target_sp_flags[N_CHANNELS];
+ target_strm2mem_cfg_t target_strm2mem[N_CHANNELS];
+ input_system_config_flags_t target_strm2mem_flags[N_CHANNELS];
+
+ input_system_config_flags_t session_flags;
+
+};
+
+/*
+ * For each MIPI port
+ */
+#define _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX
+#define _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX
+#define _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX
+#define _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX
+#define _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX
+/* new regs for each MIPI port w.r.t. 2300 */
+#define _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX
+#define _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX
+#define _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX
+
+/* _HRT_CSS_RECEIVER_2400_COMP_FORMAT_REG_IDX is not defined per MIPI port but per channel */
+/* _HRT_CSS_RECEIVER_2400_COMP_PREDICT_REG_IDX is not defined per MIPI port but per channel */
+#define _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX
+#define _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX
+#define _HRT_CSS_RECEIVER_RAW18_REG_IDX _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX
+#define _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX
+#define _HRT_CSS_RECEIVER_RAW16_REG_IDX _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX
+
+/* Previously MIPI port regs, now 2x2 logical channel regs */
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC0_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC0_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC1_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC1_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC2_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC2_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC3_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC3_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX
+
+/* Second backend is at offset 0x0700 w.r.t. the first port at offset 0x0100 */
+#define _HRT_CSS_BE_OFFSET 448
+#define _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_SRST_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_SEL_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX + _HRT_CSS_BE_OFFSET)
+
+#define _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT
+#define _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT
+#define _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT
+#define _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT
+#define _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT
+
+#define _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX
+#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_IDX _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX
+#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_BITS _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS
+
+typedef struct capture_unit_state_s capture_unit_state_t;
+typedef struct acquisition_unit_state_s acquisition_unit_state_t;
+typedef struct ctrl_unit_state_s ctrl_unit_state_t;
+
+/*
+ * In 2300 ports can be configured independently and stream
+ * formats need to be specified. In 2400, there are only 8
+ * supported configurations but the HW is fused to support
+ * only a single one.
+ *
+ * In 2300 the compressed format types are programmed by the
+ * user. In 2400 all stream formats are encoded on the stream.
+ *
+ * Use the enum to check validity of a user configuration
+ */
+typedef enum {
+ MONO_4L_1L_0L = 0,
+ MONO_3L_1L_0L,
+ MONO_2L_1L_0L,
+ MONO_1L_1L_0L,
+ STEREO_2L_1L_2L,
+ STEREO_3L_1L_1L,
+ STEREO_2L_1L_1L,
+ STEREO_1L_1L_1L,
+ N_RX_MODE
+} rx_mode_t;
+
+typedef enum {
+ MIPI_PREDICTOR_NONE = 0,
+ MIPI_PREDICTOR_TYPE1,
+ MIPI_PREDICTOR_TYPE2,
+ N_MIPI_PREDICTOR_TYPES
+} mipi_predictor_t;
+
+typedef enum {
+ MIPI_COMPRESSOR_NONE = 0,
+ MIPI_COMPRESSOR_10_6_10,
+ MIPI_COMPRESSOR_10_7_10,
+ MIPI_COMPRESSOR_10_8_10,
+ MIPI_COMPRESSOR_12_6_12,
+ MIPI_COMPRESSOR_12_7_12,
+ MIPI_COMPRESSOR_12_8_12,
+ N_MIPI_COMPRESSOR_METHODS
+} mipi_compressor_t;
+
+typedef enum {
+ MIPI_FORMAT_RGB888 = 0,
+ MIPI_FORMAT_RGB555,
+ MIPI_FORMAT_RGB444,
+ MIPI_FORMAT_RGB565,
+ MIPI_FORMAT_RGB666,
+ MIPI_FORMAT_RAW8, /* 5 */
+ MIPI_FORMAT_RAW10,
+ MIPI_FORMAT_RAW6,
+ MIPI_FORMAT_RAW7,
+ MIPI_FORMAT_RAW12,
+ MIPI_FORMAT_RAW14, /* 10 */
+ MIPI_FORMAT_YUV420_8,
+ MIPI_FORMAT_YUV420_10,
+ MIPI_FORMAT_YUV422_8,
+ MIPI_FORMAT_YUV422_10,
+ MIPI_FORMAT_CUSTOM0, /* 15 */
+ MIPI_FORMAT_YUV420_8_LEGACY,
+ MIPI_FORMAT_EMBEDDED,
+ MIPI_FORMAT_CUSTOM1,
+ MIPI_FORMAT_CUSTOM2,
+ MIPI_FORMAT_CUSTOM3, /* 20 */
+ MIPI_FORMAT_CUSTOM4,
+ MIPI_FORMAT_CUSTOM5,
+ MIPI_FORMAT_CUSTOM6,
+ MIPI_FORMAT_CUSTOM7,
+ MIPI_FORMAT_YUV420_8_SHIFT, /* 25 */
+ MIPI_FORMAT_YUV420_10_SHIFT,
+ MIPI_FORMAT_RAW16,
+ MIPI_FORMAT_RAW18,
+ N_MIPI_FORMAT,
+} mipi_format_t;
+
+#define MIPI_FORMAT_JPEG MIPI_FORMAT_CUSTOM0
+#define MIPI_FORMAT_BINARY_8 MIPI_FORMAT_CUSTOM0
+#define N_MIPI_FORMAT_CUSTOM 8
+
+/* The number of stores for compressed format types */
+#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM)
+
+typedef enum {
+ RX_IRQ_INFO_BUFFER_OVERRUN = 1UL << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT,
+ RX_IRQ_INFO_INIT_TIMEOUT = 1UL << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT,
+ RX_IRQ_INFO_ENTER_SLEEP_MODE = 1UL << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT,
+ RX_IRQ_INFO_EXIT_SLEEP_MODE = 1UL << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT,
+ RX_IRQ_INFO_ECC_CORRECTED = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT,
+ RX_IRQ_INFO_ERR_SOT = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT,
+ RX_IRQ_INFO_ERR_SOT_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT,
+ RX_IRQ_INFO_ERR_CONTROL = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT,
+ RX_IRQ_INFO_ERR_ECC_DOUBLE = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT,
+ /* RX_IRQ_INFO_NO_ERR = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT, */
+ RX_IRQ_INFO_ERR_CRC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT,
+ RX_IRQ_INFO_ERR_UNKNOWN_ID = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT,
+ RX_IRQ_INFO_ERR_FRAME_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT,
+ RX_IRQ_INFO_ERR_FRAME_DATA = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT,
+ RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1UL << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT,
+ RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT,
+ RX_IRQ_INFO_ERR_LINE_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT,
+} rx_irq_info_t;
+
+typedef struct rx_cfg_s rx_cfg_t;
+
+/*
+ * Applied per port
+ */
+struct rx_cfg_s {
+ rx_mode_t mode; /* The HW config */
+ enum mipi_port_id port; /* The port ID to apply the control on */
+ unsigned int timeout;
+ unsigned int initcount;
+ unsigned int synccount;
+ unsigned int rxcount;
+ mipi_predictor_t comp; /* Just for backward compatibility */
+ bool is_two_ppc;
+};
+
+/* NOTE: The base has already an offset of 0x0100 */
+static const hrt_address MIPI_PORT_OFFSET[N_MIPI_PORT_ID] = {
+ 0x00000000UL,
+ 0x00000100UL,
+ 0x00000200UL
+};
+
+static const mipi_lane_cfg_t MIPI_PORT_MAXLANES[N_MIPI_PORT_ID] = {
+ MIPI_4LANE_CFG,
+ MIPI_1LANE_CFG,
+ MIPI_2LANE_CFG
+};
+
+static const bool MIPI_PORT_ACTIVE[N_RX_MODE][N_MIPI_PORT_ID] = {
+ {true, true, false},
+ {true, true, false},
+ {true, true, false},
+ {true, true, false},
+ {true, true, true},
+ {true, true, true},
+ {true, true, true},
+ {true, true, true}
+};
+
+static const mipi_lane_cfg_t MIPI_PORT_LANES[N_RX_MODE][N_MIPI_PORT_ID] = {
+ {MIPI_4LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_2LANE_CFG},
+ {MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG},
+ {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG},
+ {MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG}
+};
+
+static const hrt_address SUB_SYSTEM_OFFSET[N_SUB_SYSTEM_ID] = {
+ 0x00001000UL,
+ 0x00002000UL,
+ 0x00003000UL,
+ 0x00004000UL,
+ 0x00005000UL,
+ 0x00009000UL,
+ 0x0000A000UL,
+ 0x0000B000UL,
+ 0x0000C000UL
+};
+
+struct capture_unit_state_s {
+ int Packet_Length;
+ int Received_Length;
+ int Received_Short_Packets;
+ int Received_Long_Packets;
+ int Last_Command;
+ int Next_Command;
+ int Last_Acknowledge;
+ int Next_Acknowledge;
+ int FSM_State_Info;
+ int StartMode;
+ int Start_Addr;
+ int Mem_Region_Size;
+ int Num_Mem_Regions;
+ /* int Init; write-only registers
+ int Start;
+ int Stop; */
+};
+
+struct acquisition_unit_state_s {
+ /* int Init; write-only register */
+ int Received_Short_Packets;
+ int Received_Long_Packets;
+ int Last_Command;
+ int Next_Command;
+ int Last_Acknowledge;
+ int Next_Acknowledge;
+ int FSM_State_Info;
+ int Int_Cntr_Info;
+ int Start_Addr;
+ int Mem_Region_Size;
+ int Num_Mem_Regions;
+};
+
+struct ctrl_unit_state_s {
+ int last_cmd;
+ int next_cmd;
+ int last_ack;
+ int next_ack;
+ int top_fsm_state;
+ int captA_fsm_state;
+ int captB_fsm_state;
+ int captC_fsm_state;
+ int acq_fsm_state;
+ int captA_start_addr;
+ int captB_start_addr;
+ int captC_start_addr;
+ int captA_mem_region_size;
+ int captB_mem_region_size;
+ int captC_mem_region_size;
+ int captA_num_mem_regions;
+ int captB_num_mem_regions;
+ int captC_num_mem_regions;
+ int acq_start_addr;
+ int acq_mem_region_size;
+ int acq_num_mem_regions;
+ /* int ctrl_init; write only register */
+ int capt_reserve_one_mem_region;
+};
+
+struct input_system_state_s {
+ int str_multicastA_sel;
+ int str_multicastB_sel;
+ int str_multicastC_sel;
+ int str_mux_sel;
+ int str_mon_status;
+ int str_mon_irq_cond;
+ int str_mon_irq_en;
+ int isys_srst;
+ int isys_slv_reg_srst;
+ int str_deint_portA_cnt;
+ int str_deint_portB_cnt;
+ struct capture_unit_state_s capture_unit[N_CAPTURE_UNIT_ID];
+ struct acquisition_unit_state_s acquisition_unit[N_ACQUISITION_UNIT_ID];
+ struct ctrl_unit_state_s ctrl_unit_state[N_CTRL_UNIT_ID];
+};
+
+struct mipi_port_state_s {
+ int device_ready;
+ int irq_status;
+ int irq_enable;
+ u32 timeout_count;
+ u16 init_count;
+ u16 raw16_18;
+ u32 sync_count; /*4 x uint8_t */
+ u32 rx_count; /*4 x uint8_t */
+ u8 lane_sync_count[MIPI_4LANE_CFG];
+ u8 lane_rx_count[MIPI_4LANE_CFG];
+};
+
+struct rx_channel_state_s {
+ u32 comp_scheme0;
+ u32 comp_scheme1;
+ mipi_predictor_t pred[N_MIPI_FORMAT_CUSTOM];
+ mipi_compressor_t comp[N_MIPI_FORMAT_CUSTOM];
+};
+
+struct receiver_state_s {
+ u8 fs_to_ls_delay;
+ u8 ls_to_data_delay;
+ u8 data_to_le_delay;
+ u8 le_to_fe_delay;
+ u8 fe_to_fs_delay;
+ u8 le_to_fs_delay;
+ bool is_two_ppc;
+ int backend_rst;
+ u16 raw18;
+ bool force_raw8;
+ u16 raw16;
+ struct mipi_port_state_s mipi_port_state[N_MIPI_PORT_ID];
+ struct rx_channel_state_s rx_channel_state[N_RX_CHANNEL_ID];
+ int be_gsp_acc_ovl;
+ int be_srst;
+ int be_is_two_ppc;
+ int be_comp_format0;
+ int be_comp_format1;
+ int be_comp_format2;
+ int be_comp_format3;
+ int be_sel;
+ int be_raw16_config;
+ int be_raw18_config;
+ int be_force_raw8;
+ int be_irq_status;
+ int be_irq_clear;
+};
+
+#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h
new file mode 100644
index 000000000000..0ce9cbc0063e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h
@@ -0,0 +1,122 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+
+#include "input_system_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_INPUT_SYSTEM_C void input_system_reg_store(
+ const input_system_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_reg_load(
+ const input_system_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store(
+ const rx_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(RX_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_reg_load(
+ const rx_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg *
+ sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const hrt_address reg)
+{
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg *
+ sizeof(hrt_data));
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] +
+ reg * sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] +
+ SUB_SYSTEM_OFFSET[sub_ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
new file mode 100644
index 000000000000..d0de27abb95a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
@@ -0,0 +1,369 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_PUBLIC_H_INCLUDED__
+#define __INPUT_SYSTEM_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#include "isys_public.h"
+#else
+
+typedef struct input_system_state_s input_system_state_t;
+typedef struct receiver_state_s receiver_state_t;
+
+/*! Read the state of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param state[out] input system state structure
+
+ \return none, state = INPUT_SYSTEM[ID].state
+ */
+void input_system_get_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state);
+
+/*! Read the state of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param state[out] receiver state structure
+
+ \return none, state = RECEIVER[ID].state
+ */
+void receiver_get_state(
+ const rx_ID_t ID,
+ receiver_state_t *state);
+
+/*! Flag whether a MIPI format is YUV420
+
+ \param mipi_format[in] MIPI format
+
+ \return mipi_format == YUV420
+ */
+bool is_mipi_format_yuv420(
+ const mipi_format_t mipi_format);
+
+/*! Set compression parameters for cfg[cfg_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param cfg_ID[in] Configuration identifier
+ \param comp[in] Compression method
+ \param pred[in] Predictor method
+
+ \NOTE: the storage of compression configuration is
+ implementation specific. The config can be
+ carried either on MIPI ports or on MIPI channels
+
+ \return none, RECEIVER[ID].cfg[cfg_ID] = {comp, pred}
+ */
+void receiver_set_compression(
+ const rx_ID_t ID,
+ const unsigned int cfg_ID,
+ const mipi_compressor_t comp,
+ const mipi_predictor_t pred);
+
+/*! Enable PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param cnd[in] irq predicate
+
+ \return None, enable(RECEIVER[ID].PORT[port_ID])
+ */
+void receiver_port_enable(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const bool cnd);
+
+/*! Flag if PORT[port_ID] of RECEIVER[ID] is enabled
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+
+ \return enable(RECEIVER[ID].PORT[port_ID]) == true
+ */
+bool is_receiver_port_enabled(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID);
+
+/*! Enable the IRQ channels of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param irq_info[in] irq channels
+
+ \return None, enable(RECEIVER[ID].PORT[port_ID].irq_info)
+ */
+void receiver_irq_enable(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const rx_irq_info_t irq_info);
+
+/*! Return the IRQ status of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+
+ \return RECEIVER[ID].PORT[port_ID].irq_info
+ */
+rx_irq_info_t receiver_get_irq_info(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID);
+
+/*! Clear the IRQ status of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param irq_info[in] irq status
+
+ \return None, clear(RECEIVER[ID].PORT[port_ID].irq_info)
+ */
+void receiver_irq_clear(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const rx_irq_info_t irq_info);
+
+/*! Write to a control register of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, INPUT_SYSTEM[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void input_system_reg_store(
+ const input_system_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return INPUT_SYSTEM[ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data input_system_reg_load(
+ const input_system_ID_t ID,
+ const hrt_address reg);
+
+/*! Write to a control register of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, RECEIVER[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void receiver_reg_store(
+ const rx_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return RECEIVER[ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data receiver_reg_load(
+ const rx_ID_t ID,
+ const hrt_address reg);
+
+/*! Write to a control register of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, RECEIVER[ID].PORT[port_ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void receiver_port_reg_store(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register PORT[port_ID] of of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return RECEIVER[ID].PORT[port_ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data receiver_port_reg_load(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const hrt_address reg);
+
+/*! Write to a control register of SUB_SYSTEM[sub_ID] of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param port_ID[in] sub system identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, INPUT_SYSTEM[ID].SUB_SYSTEM[sub_ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void input_system_sub_system_reg_store(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register SUB_SYSTEM[sub_ID] of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param port_ID[in] sub system identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return INPUT_SYSTEM[ID].SUB_SYSTEM[sub_ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data input_system_sub_system_reg_load(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg);
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Functions for configuration phase on input system.
+//
+///////////////////////////////////////////////////////////////////////////
+
+// Function that resets current configuration.
+// remove the argument since it should be private.
+input_system_error_t input_system_configuration_reset(void);
+
+// Function that commits current configuration.
+// remove the argument since it should be private.
+input_system_error_t input_system_configuration_commit(void);
+
+///////////////////////////////////////////////////////////////////////////
+//
+// User functions:
+// (encoded generic function)
+// - no checking
+// - decoding name and agruments into the generic (channel) configuration
+// function.
+//
+///////////////////////////////////////////////////////////////////////////
+
+// FIFO channel config function user
+
+input_system_error_t input_system_csi_fifo_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ target_cfg2400_t target
+);
+
+input_system_error_t input_system_csi_fifo_channel_with_counting_cfg(
+ u32 ch_id,
+ u32 nof_frame,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 mem_region_size,
+ u32 nof_mem_regions,
+ target_cfg2400_t target
+);
+
+// SRAM channel config function user
+
+input_system_error_t input_system_csi_sram_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 csi_mem_region_size,
+ u32 csi_nof_mem_regions,
+ target_cfg2400_t target
+);
+
+//XMEM channel config function user
+
+input_system_error_t input_system_csi_xmem_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 mem_region_size,
+ u32 nof_mem_regions,
+ u32 acq_mem_region_size,
+ u32 acq_nof_mem_regions,
+ target_cfg2400_t target,
+ uint32_t nof_xmem_buffers
+);
+
+input_system_error_t input_system_csi_xmem_capture_only_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ u32 csi_mem_region_size,
+ u32 csi_nof_mem_regions,
+ u32 acq_mem_region_size,
+ u32 acq_nof_mem_regions,
+ target_cfg2400_t target
+);
+
+input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 acq_mem_region_size,
+ u32 acq_nof_mem_regions,
+ target_cfg2400_t target
+);
+
+// Non - CSI channel config function user
+
+input_system_error_t input_system_prbs_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ u32 seed,
+ u32 sync_gen_width,
+ u32 sync_gen_height,
+ u32 sync_gen_hblank_cycles,
+ u32 sync_gen_vblank_cycles,
+ target_cfg2400_t target
+);
+
+input_system_error_t input_system_tpg_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,//not used yet
+ u32 x_mask,
+ u32 y_mask,
+ u32 x_delta,
+ u32 y_delta,
+ u32 xy_mask,
+ u32 sync_gen_width,
+ u32 sync_gen_height,
+ u32 sync_gen_hblank_cycles,
+ u32 sync_gen_vblank_cycles,
+ target_cfg2400_t target
+);
+
+input_system_error_t input_system_gpfifo_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ target_cfg2400_t target
+);
+#endif /* #ifdef USE_INPUT_SYSTEM_VERSION_2401 */
+
+#endif /* __INPUT_SYSTEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_support.h b/drivers/staging/media/atomisp/pci/isp2400_support.h
new file mode 100644
index 000000000000..e9106d1e6a63
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_support.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp2400_support_h
+#define _isp2400_support_h
+
+#ifndef ISP2400_VECTOR_TYPES
+/* This typedef is to be able to include hive header files
+ in the host code which is useful in crun */
+typedef char *tmemvectors, *tmemvectoru, *tvector;
+#endif
+
+#define hrt_isp_vamem1_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem1), addr, val)
+#define hrt_isp_vamem2_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem2), addr, val)
+
+#define hrt_isp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _base_dmem)
+#define hrt_isp_vmem(cell) HRT_PROC_TYPE_PROP(cell, _simd_vmem)
+
+#define hrt_isp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_dmem(cell))
+#define hrt_isp_vmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_vmem(cell))
+
+#if ISP_HAS_HIST
+#define hrt_isp_hist(cell) HRT_PROC_TYPE_PROP(cell, _simd_histogram)
+#define hrt_isp_hist_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_hist(cell))
+#endif
+
+#endif /* _isp2400_support_h */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_system_global.h b/drivers/staging/media/atomisp/pci/isp2400_system_global.h
new file mode 100644
index 000000000000..06fce25f2034
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_system_global.h
@@ -0,0 +1,348 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
+#define __SYSTEM_GLOBAL_H_INCLUDED__
+
+#include <hive_isp_css_defs.h>
+#include <type_support.h>
+
+/*
+ * The longest allowed (uninteruptible) bus transfer, does not
+ * take stalling into account
+ */
+#define HIVE_ISP_MAX_BURST_LENGTH 1024
+
+/*
+ * Maximum allowed burst length in words for the ISP DMA
+ */
+#define ISP_DMA_MAX_BURST_LENGTH 128
+
+/*
+ * Create a list of HAS and IS properties that defines the system
+ *
+ * The configuration assumes the following
+ * - The system is hetereogeneous; Multiple cells and devices classes
+ * - The cell and device instances are homogeneous, each device type
+ * belongs to the same class
+ * - Device instances supporting a subset of the class capabilities are
+ * allowed
+ *
+ * We could manage different device classes through the enumerated
+ * lists (C) or the use of classes (C++), but that is presently not
+ * fully supported
+ *
+ * N.B. the 3 input formatters are of 2 different classess
+ */
+
+/*
+ * Since this file is visible everywhere and the system definition
+ * macros are not, detect the separate definitions for {host, SP, ISP}
+ *
+ * The 2401 system has the nice property that it uses a vanilla 2400 SP
+ * so the SP will believe it is a 2400 system rather than 2401...
+ */
+//#if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) || defined(__scalar_processor_2401)
+#if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada)
+#define IS_ISP_2401_MAMOIADA_SYSTEM
+#define HAS_ISP_2401_MAMOIADA
+#define HAS_SP_2400
+//#elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) || defined(__scalar_processor_2400)
+#elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada)
+#define IS_ISP_2400_MAMOIADA_SYSTEM
+#define HAS_ISP_2400_MAMOIADA
+#define HAS_SP_2400
+#else
+#error "system_global.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+
+#define USE_INPUT_SYSTEM_VERSION_2
+
+#define HAS_MMU_VERSION_2
+#define HAS_DMA_VERSION_2
+#define HAS_GDC_VERSION_2
+#define HAS_VAMEM_VERSION_2
+#define HAS_HMEM_VERSION_1
+#define HAS_BAMEM_VERSION_2
+#define HAS_IRQ_VERSION_2
+#define HAS_IRQ_MAP_VERSION_2
+#define HAS_INPUT_FORMATTER_VERSION_2
+/* 2401: HAS_INPUT_SYSTEM_VERSION_2401 */
+#define HAS_INPUT_SYSTEM_VERSION_2
+#define HAS_BUFFERED_SENSOR
+#define HAS_FIFO_MONITORS_VERSION_2
+/* #define HAS_GP_REGS_VERSION_2 */
+#define HAS_GP_DEVICE_VERSION_2
+#define HAS_GPIO_VERSION_1
+#define HAS_TIMED_CTRL_VERSION_1
+#define HAS_RX_VERSION_2
+
+#define DMA_DDR_TO_VAMEM_WORKAROUND
+#define DMA_DDR_TO_HMEM_WORKAROUND
+
+/*
+ * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply
+ */
+#define HRT_VADDRESS_WIDTH 32
+//#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property*/
+#define HRT_DATA_WIDTH 32
+
+#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
+#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8)
+
+/* The main bus connecting all devices */
+#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
+#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
+
+/* per-frame parameter handling support */
+#define SH_CSS_ENABLE_PER_FRAME_PARAMS
+
+typedef u32 hrt_bus_align_t;
+
+/*
+ * Enumerate the devices, device access through the API is by ID, through the DLI by address
+ * The enumerator terminators are used to size the wiring arrays and as an exception value.
+ */
+typedef enum {
+ DDR0_ID = 0,
+ N_DDR_ID
+} ddr_ID_t;
+
+typedef enum {
+ ISP0_ID = 0,
+ N_ISP_ID
+} isp_ID_t;
+
+typedef enum {
+ SP0_ID = 0,
+ N_SP_ID
+} sp_ID_t;
+
+#if defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+#elif defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+#else
+#error "system_global.h: SYSTEM must be one of {2400, 2401}"
+#endif
+
+typedef enum {
+ DMA0_ID = 0,
+ N_DMA_ID
+} dma_ID_t;
+
+typedef enum {
+ GDC0_ID = 0,
+ GDC1_ID,
+ N_GDC_ID
+} gdc_ID_t;
+
+#define N_GDC_ID_CPP 2 // this extra define is needed because we want to use it also in the preprocessor, and that doesn't work with enums.
+
+typedef enum {
+ VAMEM0_ID = 0,
+ VAMEM1_ID,
+ VAMEM2_ID,
+ N_VAMEM_ID
+} vamem_ID_t;
+
+typedef enum {
+ BAMEM0_ID = 0,
+ N_BAMEM_ID
+} bamem_ID_t;
+
+typedef enum {
+ HMEM0_ID = 0,
+ N_HMEM_ID
+} hmem_ID_t;
+
+/*
+typedef enum {
+ IRQ0_ID = 0,
+ N_IRQ_ID
+} irq_ID_t;
+*/
+
+typedef enum {
+ IRQ0_ID = 0, // GP IRQ block
+ IRQ1_ID, // Input formatter
+ IRQ2_ID, // input system
+ IRQ3_ID, // input selector
+ N_IRQ_ID
+} irq_ID_t;
+
+typedef enum {
+ FIFO_MONITOR0_ID = 0,
+ N_FIFO_MONITOR_ID
+} fifo_monitor_ID_t;
+
+/*
+ * Deprecated: Since all gp_reg instances are different
+ * and put in the address maps of other devices we cannot
+ * enumerate them as that assumes the instrances are the
+ * same.
+ *
+ * We define a single GP_DEVICE containing all gp_regs
+ * w.r.t. a single base address
+ *
+typedef enum {
+ GP_REGS0_ID = 0,
+ N_GP_REGS_ID
+} gp_regs_ID_t;
+ */
+typedef enum {
+ GP_DEVICE0_ID = 0,
+ N_GP_DEVICE_ID
+} gp_device_ID_t;
+
+typedef enum {
+ GP_TIMER0_ID = 0,
+ GP_TIMER1_ID,
+ GP_TIMER2_ID,
+ GP_TIMER3_ID,
+ GP_TIMER4_ID,
+ GP_TIMER5_ID,
+ GP_TIMER6_ID,
+ GP_TIMER7_ID,
+ N_GP_TIMER_ID
+} gp_timer_ID_t;
+
+typedef enum {
+ GPIO0_ID = 0,
+ N_GPIO_ID
+} gpio_ID_t;
+
+typedef enum {
+ TIMED_CTRL0_ID = 0,
+ N_TIMED_CTRL_ID
+} timed_ctrl_ID_t;
+
+typedef enum {
+ INPUT_FORMATTER0_ID = 0,
+ INPUT_FORMATTER1_ID,
+ INPUT_FORMATTER2_ID,
+ INPUT_FORMATTER3_ID,
+ N_INPUT_FORMATTER_ID
+} input_formatter_ID_t;
+
+/* The IF RST is outside the IF */
+#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
+#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
+#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
+#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
+
+#define INPUT_FORMATTER0_SRST_MASK 0x0001
+#define INPUT_FORMATTER1_SRST_MASK 0x0002
+#define INPUT_FORMATTER2_SRST_MASK 0x0004
+#define INPUT_FORMATTER3_SRST_MASK 0x0008
+
+typedef enum {
+ INPUT_SYSTEM0_ID = 0,
+ N_INPUT_SYSTEM_ID
+} input_system_ID_t;
+
+typedef enum {
+ RX0_ID = 0,
+ N_RX_ID
+} rx_ID_t;
+
+enum mipi_port_id {
+ MIPI_PORT0_ID = 0,
+ MIPI_PORT1_ID,
+ MIPI_PORT2_ID,
+ N_MIPI_PORT_ID
+};
+
+#define N_RX_CHANNEL_ID 4
+
+/* Generic port enumeration with an internal port type ID */
+typedef enum {
+ CSI_PORT0_ID = 0,
+ CSI_PORT1_ID,
+ CSI_PORT2_ID,
+ TPG_PORT0_ID,
+ PRBS_PORT0_ID,
+ FIFO_PORT0_ID,
+ MEMORY_PORT0_ID,
+ N_INPUT_PORT_ID
+} input_port_ID_t;
+
+typedef enum {
+ CAPTURE_UNIT0_ID = 0,
+ CAPTURE_UNIT1_ID,
+ CAPTURE_UNIT2_ID,
+ ACQUISITION_UNIT0_ID,
+ DMA_UNIT0_ID,
+ CTRL_UNIT0_ID,
+ GPREGS_UNIT0_ID,
+ FIFO_UNIT0_ID,
+ IRQ_UNIT0_ID,
+ N_SUB_SYSTEM_ID
+} sub_system_ID_t;
+
+#define N_CAPTURE_UNIT_ID 3
+#define N_ACQUISITION_UNIT_ID 1
+#define N_CTRL_UNIT_ID 1
+
+enum ia_css_isp_memories {
+ IA_CSS_ISP_PMEM0 = 0,
+ IA_CSS_ISP_DMEM0,
+ IA_CSS_ISP_VMEM0,
+ IA_CSS_ISP_VAMEM0,
+ IA_CSS_ISP_VAMEM1,
+ IA_CSS_ISP_VAMEM2,
+ IA_CSS_ISP_HMEM0,
+ IA_CSS_SP_DMEM0,
+ IA_CSS_DDR,
+ N_IA_CSS_MEMORIES
+};
+
+#define IA_CSS_NUM_MEMORIES 9
+/* For driver compatibility */
+#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+
+#if 0
+typedef enum {
+ dev_chn, /* device channels, external resource */
+ ext_mem, /* external memories */
+ int_mem, /* internal memories */
+ int_chn /* internal channels, user defined */
+} resource_type_t;
+
+/* if this enum is extended with other memory resources, pls also extend the function resource_to_memptr() */
+typedef enum {
+ vied_nci_dev_chn_dma_ext0,
+ int_mem_vmem0,
+ int_mem_dmem0
+} resource_id_t;
+
+/* enum listing the different memories within a program group.
+ This enum is used in the mem_ptr_t type */
+typedef enum {
+ buf_mem_invalid = 0,
+ buf_mem_vmem_prog0,
+ buf_mem_dmem_prog0
+} buf_mem_t;
+
+#endif
+#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_system_local.h b/drivers/staging/media/atomisp/pci/isp2400_system_local.h
new file mode 100644
index 000000000000..ee38059d6ceb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_system_local.h
@@ -0,0 +1,325 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SYSTEM_LOCAL_H_INCLUDED__
+#define __SYSTEM_LOCAL_H_INCLUDED__
+
+#ifdef HRT_ISP_CSS_CUSTOM_HOST
+#ifndef HRT_USE_VIR_ADDRS
+#define HRT_USE_VIR_ADDRS
+#endif
+/* This interface is deprecated */
+/*#include "hive_isp_css_custom_host_hrt.h"*/
+#endif
+
+#include "system_global.h"
+
+/* HRT assumes 32 by default (see Linux/include/hive_types.h), overrule it in case it is different */
+#undef HRT_ADDRESS_WIDTH
+#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */
+
+/* This interface is deprecated */
+#include "hive_types.h"
+
+/*
+ * Cell specific address maps
+ */
+#if HRT_ADDRESS_WIDTH == 64
+
+#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
+
+/* DDR */
+static const hrt_address DDR_BASE[N_DDR_ID] = {
+ (hrt_address)0x0000000120000000ULL
+};
+
+/* ISP */
+static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ (hrt_address)0x0000000000020000ULL
+};
+
+static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ (hrt_address)0x0000000000200000ULL
+};
+
+static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ (hrt_address)0x0000000000100000ULL
+};
+
+static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
+ (hrt_address)0x00000000001C0000ULL,
+ (hrt_address)0x00000000001D0000ULL,
+ (hrt_address)0x00000000001E0000ULL
+};
+
+static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
+ (hrt_address)0x00000000001F0000ULL
+};
+
+/* SP */
+static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ (hrt_address)0x0000000000010000ULL
+};
+
+static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ (hrt_address)0x0000000000300000ULL
+};
+
+static const hrt_address SP_PMEM_BASE[N_SP_ID] = {
+ (hrt_address)0x00000000000B0000ULL
+};
+
+/* MMU */
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) || defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+static const hrt_address MMU_BASE[N_MMU_ID] = {
+ (hrt_address)0x0000000000070000ULL,
+ (hrt_address)0x00000000000A0000ULL
+};
+#else
+#error "system_local.h: SYSTEM must be one of {2400, 2401 }"
+#endif
+
+/* DMA */
+static const hrt_address DMA_BASE[N_DMA_ID] = {
+ (hrt_address)0x0000000000040000ULL
+};
+
+/* IRQ */
+static const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ (hrt_address)0x0000000000000500ULL,
+ (hrt_address)0x0000000000030A00ULL,
+ (hrt_address)0x000000000008C000ULL,
+ (hrt_address)0x0000000000090200ULL
+};
+/*
+ (hrt_address)0x0000000000000500ULL};
+ */
+
+/* GDC */
+static const hrt_address GDC_BASE[N_GDC_ID] = {
+ (hrt_address)0x0000000000050000ULL,
+ (hrt_address)0x0000000000060000ULL
+};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ (hrt_address)0x0000000000000000ULL
+};
+
+/*
+static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ (hrt_address)0x0000000000000000ULL};
+
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ (hrt_address)0x0000000000090000ULL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ (hrt_address)0x0000000000000000ULL
+};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+static const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x0000000000000600ULL;
+/* GPIO */
+static const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ (hrt_address)0x0000000000000400ULL
+};
+
+/* TIMED_CTRL */
+static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ (hrt_address)0x0000000000000100ULL
+};
+
+/* INPUT_FORMATTER */
+static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ (hrt_address)0x0000000000030000ULL,
+ (hrt_address)0x0000000000030200ULL,
+ (hrt_address)0x0000000000030400ULL,
+ (hrt_address)0x0000000000030600ULL
+}; /* memcpy() */
+
+/* INPUT_SYSTEM */
+static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ (hrt_address)0x0000000000080000ULL
+};
+/* (hrt_address)0x0000000000081000ULL, */ /* capture A */
+/* (hrt_address)0x0000000000082000ULL, */ /* capture B */
+/* (hrt_address)0x0000000000083000ULL, */ /* capture C */
+/* (hrt_address)0x0000000000084000ULL, */ /* Acquisition */
+/* (hrt_address)0x0000000000085000ULL, */ /* DMA */
+/* (hrt_address)0x0000000000089000ULL, */ /* ctrl */
+/* (hrt_address)0x000000000008A000ULL, */ /* GP regs */
+/* (hrt_address)0x000000000008B000ULL, */ /* FIFO */
+/* (hrt_address)0x000000000008C000ULL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+static const hrt_address RX_BASE[N_RX_ID] = {
+ (hrt_address)0x0000000000080100ULL
+};
+
+#elif HRT_ADDRESS_WIDTH == 32
+
+#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */
+
+/* DDR : Attention, this value not defined in 32-bit */
+static const hrt_address DDR_BASE[N_DDR_ID] = {
+ (hrt_address)0x00000000UL
+};
+
+/* ISP */
+static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ (hrt_address)0x00020000UL
+};
+
+static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ (hrt_address)0x00200000UL
+};
+
+static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ (hrt_address)0x100000UL
+};
+
+static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
+ (hrt_address)0xffffffffUL,
+ (hrt_address)0xffffffffUL,
+ (hrt_address)0xffffffffUL
+};
+
+static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
+ (hrt_address)0xffffffffUL
+};
+
+/* SP */
+static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ (hrt_address)0x00010000UL
+};
+
+static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ (hrt_address)0x00300000UL
+};
+
+static const hrt_address SP_PMEM_BASE[N_SP_ID] = {
+ (hrt_address)0x000B0000UL
+};
+
+/* MMU */
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) || defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+static const hrt_address MMU_BASE[N_MMU_ID] = {
+ (hrt_address)0x00070000UL,
+ (hrt_address)0x000A0000UL
+};
+#else
+#error "system_local.h: SYSTEM must be one of {2400, 2401 }"
+#endif
+
+/* DMA */
+static const hrt_address DMA_BASE[N_DMA_ID] = {
+ (hrt_address)0x00040000UL
+};
+
+/* IRQ */
+static const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ (hrt_address)0x00000500UL,
+ (hrt_address)0x00030A00UL,
+ (hrt_address)0x0008C000UL,
+ (hrt_address)0x00090200UL
+};
+/*
+ (hrt_address)0x00000500UL};
+ */
+
+/* GDC */
+static const hrt_address GDC_BASE[N_GDC_ID] = {
+ (hrt_address)0x00050000UL,
+ (hrt_address)0x00060000UL
+};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ (hrt_address)0x00000000UL
+};
+
+/*
+static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ (hrt_address)0x00000000UL};
+
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ (hrt_address)0x00090000UL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ (hrt_address)0x00000000UL
+};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+static const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x00000600UL;
+
+/* GPIO */
+static const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ (hrt_address)0x00000400UL
+};
+
+/* TIMED_CTRL */
+static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ (hrt_address)0x00000100UL
+};
+
+/* INPUT_FORMATTER */
+static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ (hrt_address)0x00030000UL,
+ (hrt_address)0x00030200UL,
+ (hrt_address)0x00030400UL
+};
+/* (hrt_address)0x00030600UL, */ /* memcpy() */
+
+/* INPUT_SYSTEM */
+static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ (hrt_address)0x00080000UL
+};
+/* (hrt_address)0x00081000UL, */ /* capture A */
+/* (hrt_address)0x00082000UL, */ /* capture B */
+/* (hrt_address)0x00083000UL, */ /* capture C */
+/* (hrt_address)0x00084000UL, */ /* Acquisition */
+/* (hrt_address)0x00085000UL, */ /* DMA */
+/* (hrt_address)0x00089000UL, */ /* ctrl */
+/* (hrt_address)0x0008A000UL, */ /* GP regs */
+/* (hrt_address)0x0008B000UL, */ /* FIFO */
+/* (hrt_address)0x0008C000UL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+static const hrt_address RX_BASE[N_RX_ID] = {
+ (hrt_address)0x00080100UL
+};
+
+#else
+#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
+#endif
+
+#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
new file mode 100644
index 000000000000..9c882fe134f4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
@@ -0,0 +1,205 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+
+#define IS_INPUT_SYSTEM_VERSION_VERSION_2401
+
+/* CSI reveiver has 3 ports. */
+#define N_CSI_PORTS (3)
+
+#include "isys_dma.h" /* isys2401_dma_channel,
+ * isys2401_dma_cfg_t
+ */
+
+#include "ibuf_ctrl.h" /* ibuf_cfg_t,
+ * ibuf_ctrl_cfg_t
+ */
+
+#include "isys_stream2mmio.h" /* stream2mmio_cfg_t */
+
+#include "csi_rx.h" /* csi_rx_frontend_cfg_t,
+ * csi_rx_backend_cfg_t,
+ * csi_rx_backend_lut_entry_t
+ */
+#include "pixelgen.h"
+
+#define INPUT_SYSTEM_N_STREAM_ID 6 /* maximum number of simultaneous
+ virtual channels supported*/
+
+typedef enum {
+ INPUT_SYSTEM_ERR_NO_ERROR = 0,
+ INPUT_SYSTEM_ERR_CREATE_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_CONFIGURE_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_OPEN_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_TRANSFER_FAIL,
+ INPUT_SYSTEM_ERR_CREATE_INPUT_PORT_FAIL,
+ INPUT_SYSTEM_ERR_CONFIGURE_INPUT_PORT_FAIL,
+ INPUT_SYSTEM_ERR_OPEN_INPUT_PORT_FAIL,
+ N_INPUT_SYSTEM_ERR
+} input_system_err_t;
+
+typedef enum {
+ INPUT_SYSTEM_SOURCE_TYPE_UNDEFINED = 0,
+ INPUT_SYSTEM_SOURCE_TYPE_SENSOR,
+ INPUT_SYSTEM_SOURCE_TYPE_TPG,
+ INPUT_SYSTEM_SOURCE_TYPE_PRBS,
+ N_INPUT_SYSTEM_SOURCE_TYPE
+} input_system_source_type_t;
+
+typedef enum {
+ INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME,
+ INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST,
+} input_system_polling_mode_t;
+
+typedef struct input_system_channel_s input_system_channel_t;
+struct input_system_channel_s {
+ stream2mmio_ID_t stream2mmio_id;
+ stream2mmio_sid_ID_t stream2mmio_sid_id;
+
+ ibuf_ctrl_ID_t ibuf_ctrl_id;
+ ib_buffer_t ib_buffer;
+
+ isys2401_dma_ID_t dma_id;
+ isys2401_dma_channel dma_channel;
+};
+
+typedef struct input_system_channel_cfg_s input_system_channel_cfg_t;
+struct input_system_channel_cfg_s {
+ stream2mmio_cfg_t stream2mmio_cfg;
+ ibuf_ctrl_cfg_t ibuf_ctrl_cfg;
+ isys2401_dma_cfg_t dma_cfg;
+ isys2401_dma_port_cfg_t dma_src_port_cfg;
+ isys2401_dma_port_cfg_t dma_dest_port_cfg;
+};
+
+typedef struct input_system_input_port_s input_system_input_port_t;
+struct input_system_input_port_s {
+ input_system_source_type_t source_type;
+
+ struct {
+ csi_rx_frontend_ID_t frontend_id;
+ csi_rx_backend_ID_t backend_id;
+ csi_mipi_packet_type_t packet_type;
+ csi_rx_backend_lut_entry_t backend_lut_entry;
+ } csi_rx;
+
+ struct {
+ csi_mipi_packet_type_t packet_type;
+ csi_rx_backend_lut_entry_t backend_lut_entry;
+ } metadata;
+
+ struct {
+ pixelgen_ID_t pixelgen_id;
+ } pixelgen;
+};
+
+typedef struct input_system_input_port_cfg_s input_system_input_port_cfg_t;
+struct input_system_input_port_cfg_s {
+ struct {
+ csi_rx_frontend_cfg_t frontend_cfg;
+ csi_rx_backend_cfg_t backend_cfg;
+ csi_rx_backend_cfg_t md_backend_cfg;
+ } csi_rx_cfg;
+
+ struct {
+ pixelgen_tpg_cfg_t tpg_cfg;
+ pixelgen_prbs_cfg_t prbs_cfg;
+ } pixelgen_cfg;
+};
+
+typedef struct input_system_cfg_s input_system_cfg_t;
+struct input_system_cfg_s {
+ input_system_input_port_ID_t input_port_id;
+
+ input_system_source_type_t mode;
+
+ /* ISP2401 */
+ input_system_polling_mode_t polling_mode;
+
+ bool online;
+ bool raw_packed;
+ s8 linked_isys_stream_id;
+
+ struct {
+ bool comp_enable;
+ s32 active_lanes;
+ s32 fmt_type;
+ s32 ch_id;
+ s32 comp_predictor;
+ s32 comp_scheme;
+ } csi_port_attr;
+
+ pixelgen_tpg_cfg_t tpg_port_attr;
+
+ pixelgen_prbs_cfg_t prbs_port_attr;
+
+ struct {
+ s32 align_req_in_bytes;
+ s32 bits_per_pixel;
+ s32 pixels_per_line;
+ s32 lines_per_frame;
+ } input_port_resolution;
+
+ struct {
+ s32 left_padding;
+ s32 max_isp_input_width;
+ } output_port_attr;
+
+ struct {
+ bool enable;
+ s32 fmt_type;
+ s32 align_req_in_bytes;
+ s32 bits_per_pixel;
+ s32 pixels_per_line;
+ s32 lines_per_frame;
+ } metadata;
+};
+
+typedef struct virtual_input_system_stream_s virtual_input_system_stream_t;
+struct virtual_input_system_stream_s {
+ u32 id; /*Used when multiple MIPI data types and/or virtual channels are used.
+ Must be unique within one CSI RX
+ and lower than SH_CSS_MAX_ISYS_CHANNEL_NODES */
+ u8 enable_metadata;
+ input_system_input_port_t input_port;
+ input_system_channel_t channel;
+ input_system_channel_t md_channel; /* metadata channel */
+ u8 online;
+ s8 linked_isys_stream_id;
+ u8 valid;
+
+ /* ISP2401 */
+ input_system_polling_mode_t polling_mode;
+ s32 subscr_index;
+};
+
+typedef struct virtual_input_system_stream_cfg_s
+ virtual_input_system_stream_cfg_t;
+struct virtual_input_system_stream_cfg_s {
+ u8 enable_metadata;
+ input_system_input_port_cfg_t input_port_cfg;
+ input_system_channel_cfg_t channel_cfg;
+ input_system_channel_cfg_t md_channel_cfg;
+ u8 valid;
+};
+
+#define ISP_INPUT_BUF_START_ADDR 0
+#define NUM_OF_INPUT_BUF 2
+#define NUM_OF_LINES_PER_BUF 2
+#define LINES_OF_ISP_INPUT_BUF (NUM_OF_INPUT_BUF * NUM_OF_LINES_PER_BUF)
+#define ISP_INPUT_BUF_STRIDE SH_CSS_MAX_SENSOR_WIDTH
+
+#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
new file mode 100644
index 000000000000..f199423e28da
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
@@ -0,0 +1,106 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+
+#include "type_support.h"
+#include "input_system_global.h"
+
+#include "ibuf_ctrl.h"
+#include "csi_rx.h"
+#include "pixelgen.h"
+#include "isys_stream2mmio.h"
+#include "isys_irq.h"
+
+typedef input_system_err_t input_system_error_t;
+
+typedef enum {
+ MIPI_FORMAT_SHORT1 = 0x08,
+ MIPI_FORMAT_SHORT2,
+ MIPI_FORMAT_SHORT3,
+ MIPI_FORMAT_SHORT4,
+ MIPI_FORMAT_SHORT5,
+ MIPI_FORMAT_SHORT6,
+ MIPI_FORMAT_SHORT7,
+ MIPI_FORMAT_SHORT8,
+ MIPI_FORMAT_EMBEDDED = 0x12,
+ MIPI_FORMAT_YUV420_8 = 0x18,
+ MIPI_FORMAT_YUV420_10,
+ MIPI_FORMAT_YUV420_8_LEGACY,
+ MIPI_FORMAT_YUV420_8_SHIFT = 0x1C,
+ MIPI_FORMAT_YUV420_10_SHIFT,
+ MIPI_FORMAT_YUV422_8 = 0x1E,
+ MIPI_FORMAT_YUV422_10,
+ MIPI_FORMAT_RGB444 = 0x20,
+ MIPI_FORMAT_RGB555,
+ MIPI_FORMAT_RGB565,
+ MIPI_FORMAT_RGB666,
+ MIPI_FORMAT_RGB888,
+ MIPI_FORMAT_RAW6 = 0x28,
+ MIPI_FORMAT_RAW7,
+ MIPI_FORMAT_RAW8,
+ MIPI_FORMAT_RAW10,
+ MIPI_FORMAT_RAW12,
+ MIPI_FORMAT_RAW14,
+ MIPI_FORMAT_CUSTOM0 = 0x30,
+ MIPI_FORMAT_CUSTOM1,
+ MIPI_FORMAT_CUSTOM2,
+ MIPI_FORMAT_CUSTOM3,
+ MIPI_FORMAT_CUSTOM4,
+ MIPI_FORMAT_CUSTOM5,
+ MIPI_FORMAT_CUSTOM6,
+ MIPI_FORMAT_CUSTOM7,
+ //MIPI_FORMAT_RAW16, /*not supported by 2401*/
+ //MIPI_FORMAT_RAW18,
+ N_MIPI_FORMAT
+} mipi_format_t;
+
+#define N_MIPI_FORMAT_CUSTOM 8
+
+/* The number of stores for compressed format types */
+#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM)
+#define UNCOMPRESSED_BITS_PER_PIXEL_10 10
+#define UNCOMPRESSED_BITS_PER_PIXEL_12 12
+#define COMPRESSED_BITS_PER_PIXEL_6 6
+#define COMPRESSED_BITS_PER_PIXEL_7 7
+#define COMPRESSED_BITS_PER_PIXEL_8 8
+enum mipi_compressor {
+ MIPI_COMPRESSOR_NONE = 0,
+ MIPI_COMPRESSOR_10_6_10,
+ MIPI_COMPRESSOR_10_7_10,
+ MIPI_COMPRESSOR_10_8_10,
+ MIPI_COMPRESSOR_12_6_12,
+ MIPI_COMPRESSOR_12_7_12,
+ MIPI_COMPRESSOR_12_8_12,
+ N_MIPI_COMPRESSOR_METHODS
+};
+
+typedef enum {
+ MIPI_PREDICTOR_NONE = 0,
+ MIPI_PREDICTOR_TYPE1,
+ MIPI_PREDICTOR_TYPE2,
+ N_MIPI_PREDICTOR_TYPES
+} mipi_predictor_t;
+
+typedef struct input_system_state_s input_system_state_t;
+struct input_system_state_s {
+ ibuf_ctrl_state_t ibuf_ctrl_state[N_IBUF_CTRL_ID];
+ csi_rx_fe_ctrl_state_t csi_rx_fe_ctrl_state[N_CSI_RX_FRONTEND_ID];
+ csi_rx_be_ctrl_state_t csi_rx_be_ctrl_state[N_CSI_RX_BACKEND_ID];
+ pixelgen_ctrl_state_t pixelgen_ctrl_state[N_PIXELGEN_ID];
+ stream2mmio_state_t stream2mmio_state[N_STREAM2MMIO_ID];
+ isys_irqc_state_t isys_irqc_state[N_ISYS_IRQ_ID];
+};
+#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
new file mode 100644
index 000000000000..3f60f59ae51f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
@@ -0,0 +1,129 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+
+#include "input_system_public.h"
+
+STORAGE_CLASS_INPUT_SYSTEM_C input_system_err_t input_system_get_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state)
+{
+ u32 i;
+
+ (void)(ID);
+
+ /* get the states of all CSI RX frontend devices */
+ for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) {
+ csi_rx_fe_ctrl_get_state(
+ (csi_rx_frontend_ID_t)i,
+ &state->csi_rx_fe_ctrl_state[i]);
+ }
+
+ /* get the states of all CIS RX backend devices */
+ for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) {
+ csi_rx_be_ctrl_get_state(
+ (csi_rx_backend_ID_t)i,
+ &state->csi_rx_be_ctrl_state[i]);
+ }
+
+ /* get the states of all pixelgen devices */
+ for (i = 0; i < N_PIXELGEN_ID; i++) {
+ pixelgen_ctrl_get_state(
+ (pixelgen_ID_t)i,
+ &state->pixelgen_ctrl_state[i]);
+ }
+
+ /* get the states of all stream2mmio devices */
+ for (i = 0; i < N_STREAM2MMIO_ID; i++) {
+ stream2mmio_get_state(
+ (stream2mmio_ID_t)i,
+ &state->stream2mmio_state[i]);
+ }
+
+ /* get the states of all ibuf-controller devices */
+ for (i = 0; i < N_IBUF_CTRL_ID; i++) {
+ ibuf_ctrl_get_state(
+ (ibuf_ctrl_ID_t)i,
+ &state->ibuf_ctrl_state[i]);
+ }
+
+ /* get the states of all isys irq controllers */
+ for (i = 0; i < N_ISYS_IRQ_ID; i++) {
+ isys_irqc_state_get((isys_irq_ID_t)i, &state->isys_irqc_state[i]);
+ }
+
+ /* TODO: get the states of all ISYS2401 DMA devices */
+ for (i = 0; i < N_ISYS2401_DMA_ID; i++) {
+ }
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void input_system_dump_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state)
+{
+ u32 i;
+
+ (void)(ID);
+
+ /* dump the states of all CSI RX frontend devices */
+ for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) {
+ csi_rx_fe_ctrl_dump_state(
+ (csi_rx_frontend_ID_t)i,
+ &state->csi_rx_fe_ctrl_state[i]);
+ }
+
+ /* dump the states of all CIS RX backend devices */
+ for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) {
+ csi_rx_be_ctrl_dump_state(
+ (csi_rx_backend_ID_t)i,
+ &state->csi_rx_be_ctrl_state[i]);
+ }
+
+ /* dump the states of all pixelgen devices */
+ for (i = 0; i < N_PIXELGEN_ID; i++) {
+ pixelgen_ctrl_dump_state(
+ (pixelgen_ID_t)i,
+ &state->pixelgen_ctrl_state[i]);
+ }
+
+ /* dump the states of all st2mmio devices */
+ for (i = 0; i < N_STREAM2MMIO_ID; i++) {
+ stream2mmio_dump_state(
+ (stream2mmio_ID_t)i,
+ &state->stream2mmio_state[i]);
+ }
+
+ /* dump the states of all ibuf-controller devices */
+ for (i = 0; i < N_IBUF_CTRL_ID; i++) {
+ ibuf_ctrl_dump_state(
+ (ibuf_ctrl_ID_t)i,
+ &state->ibuf_ctrl_state[i]);
+ }
+
+ /* dump the states of all isys irq controllers */
+ for (i = 0; i < N_ISYS_IRQ_ID; i++) {
+ isys_irqc_state_dump((isys_irq_ID_t)i, &state->isys_irqc_state[i]);
+ }
+
+ /* TODO: dump the states of all ISYS2401 DMA devices */
+ for (i = 0; i < N_ISYS2401_DMA_ID; i++) {
+ }
+
+ return;
+}
+#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_mamoiada_params.h b/drivers/staging/media/atomisp/pci/isp2401_mamoiada_params.h
new file mode 100644
index 000000000000..42f821473826
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2401_mamoiada_params.h
@@ -0,0 +1,228 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Version */
+#define RTL_VERSION
+
+/* Cell name */
+#define ISP_CELL_TYPE isp2401_mamoiada
+#define ISP_VMEM simd_vmem
+#define _HRT_ISP_VMEM isp2401_mamoiada_simd_vmem
+
+/* instruction pipeline depth */
+#define ISP_BRANCHDELAY 5
+
+/* bus */
+#define ISP_BUS_WIDTH 32
+#define ISP_BUS_ADDR_WIDTH 32
+#define ISP_BUS_BURST_SIZE 1
+
+/* data-path */
+#define ISP_SCALAR_WIDTH 32
+#define ISP_SLICE_NELEMS 4
+#define ISP_VEC_NELEMS 64
+#define ISP_VEC_ELEMBITS 14
+#define ISP_VEC_ELEM8BITS 16
+#define ISP_CLONE_DATAPATH_IS_16 1
+
+/* memories */
+#define ISP_DMEM_DEPTH 4096
+#define ISP_DMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_DEPTH 3072
+#define ISP_VMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_ELEMBITS 14
+#define ISP_VMEM_ELEM_PRECISION 14
+#define ISP_PMEM_DEPTH 2048
+#define ISP_PMEM_WIDTH 640
+#define ISP_VAMEM_ADDRESS_BITS 12
+#define ISP_VAMEM_ELEMBITS 12
+#define ISP_VAMEM_DEPTH 2048
+#define ISP_VAMEM_ALIGNMENT 2
+#define ISP_VA_ADDRESS_WIDTH 896
+#define ISP_VEC_VALSU_LATENCY ISP_VEC_NELEMS
+#define ISP_HIST_ADDRESS_BITS 12
+#define ISP_HIST_ALIGNMENT 4
+#define ISP_HIST_COMP_IN_PREC 12
+#define ISP_HIST_DEPTH 1024
+#define ISP_HIST_WIDTH 24
+#define ISP_HIST_COMPONENTS 4
+
+/* program counter */
+#define ISP_PC_WIDTH 13
+
+/* Template switches */
+#define ISP_SHIELD_INPUT_DMEM 0
+#define ISP_SHIELD_OUTPUT_DMEM 1
+#define ISP_SHIELD_INPUT_VMEM 0
+#define ISP_SHIELD_OUTPUT_VMEM 0
+#define ISP_SHIELD_INPUT_PMEM 1
+#define ISP_SHIELD_OUTPUT_PMEM 1
+#define ISP_SHIELD_INPUT_HIST 1
+#define ISP_SHIELD_OUTPUT_HIST 1
+/* When LUT is select the shielding is always on */
+#define ISP_SHIELD_INPUT_VAMEM 1
+#define ISP_SHIELD_OUTPUT_VAMEM 1
+
+#define ISP_HAS_IRQ 1
+#define ISP_HAS_SOFT_RESET 1
+#define ISP_HAS_VEC_DIV 0
+#define ISP_HAS_VFU_W_2O 1
+#define ISP_HAS_DEINT3 1
+#define ISP_HAS_LUT 1
+#define ISP_HAS_HIST 1
+#define ISP_HAS_VALSU 1
+#define ISP_HAS_3rdVALSU 1
+#define ISP_VRF1_HAS_2P 1
+
+#define ISP_SRU_GUARDING 1
+#define ISP_VLSU_GUARDING 1
+
+#define ISP_VRF_RAM 1
+#define ISP_SRF_RAM 1
+
+#define ISP_SPLIT_VMUL_VADD_IS 0
+#define ISP_RFSPLIT_FPGA 0
+
+/* RSN or Bus pipelining */
+#define ISP_RSN_PIPE 1
+#define ISP_VSF_BUS_PIPE 0
+
+/* extra slave port to vmem */
+#define ISP_IF_VMEM 0
+#define ISP_GDC_VMEM 0
+
+/* Streaming ports */
+#define ISP_IF 1
+#define ISP_IF_B 1
+#define ISP_GDC 1
+#define ISP_SCL 1
+#define ISP_GPFIFO 1
+#define ISP_SP 1
+
+/* Removing Issue Slot(s) */
+#define ISP_HAS_NOT_SIMD_IS2 0
+#define ISP_HAS_NOT_SIMD_IS3 0
+#define ISP_HAS_NOT_SIMD_IS4 0
+#define ISP_HAS_NOT_SIMD_IS4_VADD 0
+#define ISP_HAS_NOT_SIMD_IS5 0
+#define ISP_HAS_NOT_SIMD_IS6 0
+#define ISP_HAS_NOT_SIMD_IS7 0
+#define ISP_HAS_NOT_SIMD_IS8 0
+
+/* ICache */
+#define ISP_ICACHE 1
+#define ISP_ICACHE_ONLY 0
+#define ISP_ICACHE_PREFETCH 1
+#define ISP_ICACHE_INDEX_BITS 8
+#define ISP_ICACHE_SET_BITS 5
+#define ISP_ICACHE_BLOCKS_PER_SET_BITS 1
+
+/* Experimental Flags */
+#define ISP_EXP_1 0
+#define ISP_EXP_2 0
+#define ISP_EXP_3 0
+#define ISP_EXP_4 0
+#define ISP_EXP_5 0
+#define ISP_EXP_6 0
+
+/* Derived values */
+#define ISP_LOG2_PMEM_WIDTH 10
+#define ISP_VEC_WIDTH 896
+#define ISP_SLICE_WIDTH 56
+#define ISP_VMEM_WIDTH 896
+#define ISP_VMEM_ALIGN 128
+#define ISP_SIMDLSU 1
+#define ISP_LSU_IMM_BITS 12
+
+/* convenient shortcuts for software*/
+#define ISP_NWAY ISP_VEC_NELEMS
+#define NBITS ISP_VEC_ELEMBITS
+
+#define _isp_ceil_div(a, b) (((a) + (b) - 1) / (b))
+
+#define ISP_VEC_ALIGN ISP_VMEM_ALIGN
+
+/* HRT specific vector support */
+#define isp2401_mamoiada_vector_alignment ISP_VEC_ALIGN
+#define isp2401_mamoiada_vector_elem_bits ISP_VMEM_ELEMBITS
+#define isp2401_mamoiada_vector_elem_precision ISP_VMEM_ELEM_PRECISION
+#define isp2401_mamoiada_vector_num_elems ISP_VEC_NELEMS
+
+/* register file sizes */
+#define ISP_RF0_SIZE 64
+#define ISP_RF1_SIZE 16
+#define ISP_RF2_SIZE 64
+#define ISP_RF3_SIZE 4
+#define ISP_RF4_SIZE 64
+#define ISP_RF5_SIZE 16
+#define ISP_RF6_SIZE 16
+#define ISP_RF7_SIZE 16
+#define ISP_RF8_SIZE 16
+#define ISP_RF9_SIZE 16
+#define ISP_RF10_SIZE 16
+#define ISP_RF11_SIZE 16
+#define ISP_VRF1_SIZE 32
+#define ISP_VRF2_SIZE 32
+#define ISP_VRF3_SIZE 32
+#define ISP_VRF4_SIZE 32
+#define ISP_VRF5_SIZE 32
+#define ISP_VRF6_SIZE 32
+#define ISP_VRF7_SIZE 32
+#define ISP_VRF8_SIZE 32
+#define ISP_SRF1_SIZE 4
+#define ISP_SRF2_SIZE 64
+#define ISP_SRF3_SIZE 64
+#define ISP_SRF4_SIZE 32
+#define ISP_SRF5_SIZE 64
+#define ISP_FRF0_SIZE 16
+#define ISP_FRF1_SIZE 4
+#define ISP_FRF2_SIZE 16
+#define ISP_FRF3_SIZE 4
+#define ISP_FRF4_SIZE 4
+#define ISP_FRF5_SIZE 8
+#define ISP_FRF6_SIZE 4
+/* register file read latency */
+#define ISP_VRF1_READ_LAT 1
+#define ISP_VRF2_READ_LAT 1
+#define ISP_VRF3_READ_LAT 1
+#define ISP_VRF4_READ_LAT 1
+#define ISP_VRF5_READ_LAT 1
+#define ISP_VRF6_READ_LAT 1
+#define ISP_VRF7_READ_LAT 1
+#define ISP_VRF8_READ_LAT 1
+#define ISP_SRF1_READ_LAT 1
+#define ISP_SRF2_READ_LAT 1
+#define ISP_SRF3_READ_LAT 1
+#define ISP_SRF4_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+/* immediate sizes */
+#define ISP_IS1_IMM_BITS 14
+#define ISP_IS2_IMM_BITS 13
+#define ISP_IS3_IMM_BITS 14
+#define ISP_IS4_IMM_BITS 14
+#define ISP_IS5_IMM_BITS 9
+#define ISP_IS6_IMM_BITS 16
+#define ISP_IS7_IMM_BITS 9
+#define ISP_IS8_IMM_BITS 16
+#define ISP_IS9_IMM_BITS 11
+/* fifo depths */
+#define ISP_IF_FIFO_DEPTH 0
+#define ISP_IF_B_FIFO_DEPTH 0
+#define ISP_DMA_FIFO_DEPTH 0
+#define ISP_OF_FIFO_DEPTH 0
+#define ISP_GDC_FIFO_DEPTH 0
+#define ISP_SCL_FIFO_DEPTH 0
+#define ISP_GPFIFO_FIFO_DEPTH 0
+#define ISP_SP_FIFO_DEPTH 0
diff --git a/drivers/staging/media/atomisp/pci/isp2401_system_global.h b/drivers/staging/media/atomisp/pci/isp2401_system_global.h
new file mode 100644
index 000000000000..213b6ee52208
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2401_system_global.h
@@ -0,0 +1,457 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
+#define __SYSTEM_GLOBAL_H_INCLUDED__
+
+#include <hive_isp_css_defs.h>
+#include <type_support.h>
+
+/*
+ * The longest allowed (uninteruptible) bus transfer, does not
+ * take stalling into account
+ */
+#define HIVE_ISP_MAX_BURST_LENGTH 1024
+
+/*
+ * Maximum allowed burst length in words for the ISP DMA
+ * This value is set to 2 to prevent the ISP DMA from blocking
+ * the bus for too long; as the input system can only buffer
+ * 2 lines on Moorefield and Cherrytrail, the input system buffers
+ * may overflow if blocked for too long (BZ 2726).
+ */
+#define ISP_DMA_MAX_BURST_LENGTH 2
+
+/*
+ * Create a list of HAS and IS properties that defines the system
+ *
+ * The configuration assumes the following
+ * - The system is hetereogeneous; Multiple cells and devices classes
+ * - The cell and device instances are homogeneous, each device type
+ * belongs to the same class
+ * - Device instances supporting a subset of the class capabilities are
+ * allowed
+ *
+ * We could manage different device classes through the enumerated
+ * lists (C) or the use of classes (C++), but that is presently not
+ * fully supported
+ *
+ * N.B. the 3 input formatters are of 2 different classess
+ */
+
+#define USE_INPUT_SYSTEM_VERSION_2401
+
+/*
+ * Since this file is visible everywhere and the system definition
+ * macros are not, detect the separate definitions for {host, SP, ISP}
+ *
+ * The 2401 system has the nice property that it uses a vanilla 2400 SP
+ * so the SP will believe it is a 2400 system rather than 2401...
+ */
+/* #if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) || defined(__scalar_processor_2401) */
+#if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada)
+#define IS_ISP_2401_MAMOIADA_SYSTEM
+#define HAS_ISP_2401_MAMOIADA
+#define HAS_SP_2400
+/* #elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) || defined(__scalar_processor_2400)*/
+#elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada)
+#define IS_ISP_2400_MAMOIADA_SYSTEM
+#define HAS_ISP_2400_MAMOIADA
+#define HAS_SP_2400
+#else
+#error "system_global.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+
+#define HAS_MMU_VERSION_2
+#define HAS_DMA_VERSION_2
+#define HAS_GDC_VERSION_2
+#define HAS_VAMEM_VERSION_2
+#define HAS_HMEM_VERSION_1
+#define HAS_BAMEM_VERSION_2
+#define HAS_IRQ_VERSION_2
+#define HAS_IRQ_MAP_VERSION_2
+#define HAS_INPUT_FORMATTER_VERSION_2
+/* 2401: HAS_INPUT_SYSTEM_VERSION_3 */
+/* 2400: HAS_INPUT_SYSTEM_VERSION_2 */
+#define HAS_INPUT_SYSTEM_VERSION_2
+#define HAS_INPUT_SYSTEM_VERSION_2401
+#define HAS_BUFFERED_SENSOR
+#define HAS_FIFO_MONITORS_VERSION_2
+/* #define HAS_GP_REGS_VERSION_2 */
+#define HAS_GP_DEVICE_VERSION_2
+#define HAS_GPIO_VERSION_1
+#define HAS_TIMED_CTRL_VERSION_1
+#define HAS_RX_VERSION_2
+#define HAS_NO_INPUT_FORMATTER
+/*#define HAS_NO_PACKED_RAW_PIXELS*/
+/*#define HAS_NO_DVS_6AXIS_CONFIG_UPDATE*/
+
+#define DMA_DDR_TO_VAMEM_WORKAROUND
+#define DMA_DDR_TO_HMEM_WORKAROUND
+
+/*
+ * Semi global. "HRT" is accessible from SP, but
+ * the HRT types do not fully apply
+ */
+#define HRT_VADDRESS_WIDTH 32
+/* Surprise, this is a local property*/
+/*#define HRT_ADDRESS_WIDTH 64 */
+#define HRT_DATA_WIDTH 32
+
+#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
+#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8)
+
+/* The main bus connecting all devices */
+#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
+#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
+
+#define CSI2P_DISABLE_ISYS2401_ONLINE_MODE
+
+/* per-frame parameter handling support */
+#define SH_CSS_ENABLE_PER_FRAME_PARAMS
+
+typedef u32 hrt_bus_align_t;
+
+/*
+ * Enumerate the devices, device access through the API is by ID,
+ * through the DLI by address. The enumerator terminators are used
+ * to size the wiring arrays and as an exception value.
+ */
+typedef enum {
+ DDR0_ID = 0,
+ N_DDR_ID
+} ddr_ID_t;
+
+typedef enum {
+ ISP0_ID = 0,
+ N_ISP_ID
+} isp_ID_t;
+
+typedef enum {
+ SP0_ID = 0,
+ N_SP_ID
+} sp_ID_t;
+
+#if defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+#elif defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+#else
+#error "system_global.h: SYSTEM must be one of {2400, 2401}"
+#endif
+
+typedef enum {
+ DMA0_ID = 0,
+ N_DMA_ID
+} dma_ID_t;
+
+typedef enum {
+ GDC0_ID = 0,
+ GDC1_ID,
+ N_GDC_ID
+} gdc_ID_t;
+
+/* this extra define is needed because we want to use it also
+ in the preprocessor, and that doesn't work with enums.
+ */
+#define N_GDC_ID_CPP 2
+
+typedef enum {
+ VAMEM0_ID = 0,
+ VAMEM1_ID,
+ VAMEM2_ID,
+ N_VAMEM_ID
+} vamem_ID_t;
+
+typedef enum {
+ BAMEM0_ID = 0,
+ N_BAMEM_ID
+} bamem_ID_t;
+
+typedef enum {
+ HMEM0_ID = 0,
+ N_HMEM_ID
+} hmem_ID_t;
+
+typedef enum {
+ ISYS_IRQ0_ID = 0, /* port a */
+ ISYS_IRQ1_ID, /* port b */
+ ISYS_IRQ2_ID, /* port c */
+ N_ISYS_IRQ_ID
+} isys_irq_ID_t;
+
+typedef enum {
+ IRQ0_ID = 0, /* GP IRQ block */
+ IRQ1_ID, /* Input formatter */
+ IRQ2_ID, /* input system */
+ IRQ3_ID, /* input selector */
+ N_IRQ_ID
+} irq_ID_t;
+
+typedef enum {
+ FIFO_MONITOR0_ID = 0,
+ N_FIFO_MONITOR_ID
+} fifo_monitor_ID_t;
+
+/*
+ * Deprecated: Since all gp_reg instances are different
+ * and put in the address maps of other devices we cannot
+ * enumerate them as that assumes the instrances are the
+ * same.
+ *
+ * We define a single GP_DEVICE containing all gp_regs
+ * w.r.t. a single base address
+ *
+typedef enum {
+ GP_REGS0_ID = 0,
+ N_GP_REGS_ID
+} gp_regs_ID_t;
+ */
+typedef enum {
+ GP_DEVICE0_ID = 0,
+ N_GP_DEVICE_ID
+} gp_device_ID_t;
+
+typedef enum {
+ GP_TIMER0_ID = 0,
+ GP_TIMER1_ID,
+ GP_TIMER2_ID,
+ GP_TIMER3_ID,
+ GP_TIMER4_ID,
+ GP_TIMER5_ID,
+ GP_TIMER6_ID,
+ GP_TIMER7_ID,
+ N_GP_TIMER_ID
+} gp_timer_ID_t;
+
+typedef enum {
+ GPIO0_ID = 0,
+ N_GPIO_ID
+} gpio_ID_t;
+
+typedef enum {
+ TIMED_CTRL0_ID = 0,
+ N_TIMED_CTRL_ID
+} timed_ctrl_ID_t;
+
+typedef enum {
+ INPUT_FORMATTER0_ID = 0,
+ INPUT_FORMATTER1_ID,
+ INPUT_FORMATTER2_ID,
+ INPUT_FORMATTER3_ID,
+ N_INPUT_FORMATTER_ID
+} input_formatter_ID_t;
+
+/* The IF RST is outside the IF */
+#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
+#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
+#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
+#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
+
+#define INPUT_FORMATTER0_SRST_MASK 0x0001
+#define INPUT_FORMATTER1_SRST_MASK 0x0002
+#define INPUT_FORMATTER2_SRST_MASK 0x0004
+#define INPUT_FORMATTER3_SRST_MASK 0x0008
+
+typedef enum {
+ INPUT_SYSTEM0_ID = 0,
+ N_INPUT_SYSTEM_ID
+} input_system_ID_t;
+
+typedef enum {
+ RX0_ID = 0,
+ N_RX_ID
+} rx_ID_t;
+
+enum mipi_port_id {
+ MIPI_PORT0_ID = 0,
+ MIPI_PORT1_ID,
+ MIPI_PORT2_ID,
+ N_MIPI_PORT_ID
+};
+
+#define N_RX_CHANNEL_ID 4
+
+/* Generic port enumeration with an internal port type ID */
+typedef enum {
+ CSI_PORT0_ID = 0,
+ CSI_PORT1_ID,
+ CSI_PORT2_ID,
+ TPG_PORT0_ID,
+ PRBS_PORT0_ID,
+ FIFO_PORT0_ID,
+ MEMORY_PORT0_ID,
+ N_INPUT_PORT_ID
+} input_port_ID_t;
+
+typedef enum {
+ CAPTURE_UNIT0_ID = 0,
+ CAPTURE_UNIT1_ID,
+ CAPTURE_UNIT2_ID,
+ ACQUISITION_UNIT0_ID,
+ DMA_UNIT0_ID,
+ CTRL_UNIT0_ID,
+ GPREGS_UNIT0_ID,
+ FIFO_UNIT0_ID,
+ IRQ_UNIT0_ID,
+ N_SUB_SYSTEM_ID
+} sub_system_ID_t;
+
+#define N_CAPTURE_UNIT_ID 3
+#define N_ACQUISITION_UNIT_ID 1
+#define N_CTRL_UNIT_ID 1
+
+/*
+ * Input-buffer Controller.
+ */
+typedef enum {
+ IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */
+ IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */
+ IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */
+ N_IBUF_CTRL_ID
+} ibuf_ctrl_ID_t;
+/* end of Input-buffer Controller */
+
+/*
+ * Stream2MMIO.
+ */
+typedef enum {
+ STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */
+ STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */
+ STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */
+ N_STREAM2MMIO_ID
+} stream2mmio_ID_t;
+
+typedef enum {
+ /*
+ * Stream2MMIO 0 has 8 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
+ *
+ * Stream2MMIO 1 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
+ *
+ * Stream2MMIO 2 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
+ */
+ STREAM2MMIO_SID0_ID = 0,
+ STREAM2MMIO_SID1_ID,
+ STREAM2MMIO_SID2_ID,
+ STREAM2MMIO_SID3_ID,
+ STREAM2MMIO_SID4_ID,
+ STREAM2MMIO_SID5_ID,
+ STREAM2MMIO_SID6_ID,
+ STREAM2MMIO_SID7_ID,
+ N_STREAM2MMIO_SID_ID
+} stream2mmio_sid_ID_t;
+/* end of Stream2MMIO */
+
+/**
+ * Input System 2401: CSI-MIPI recevier.
+ */
+typedef enum {
+ CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */
+ CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */
+ CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */
+ N_CSI_RX_BACKEND_ID
+} csi_rx_backend_ID_t;
+
+typedef enum {
+ CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */
+ CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */
+ CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */
+#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1)
+} csi_rx_frontend_ID_t;
+
+typedef enum {
+ CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */
+ CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */
+ CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */
+ CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */
+ N_CSI_RX_DLANE_ID
+} csi_rx_fe_dlane_ID_t;
+/* end of CSI-MIPI receiver */
+
+typedef enum {
+ ISYS2401_DMA0_ID = 0,
+ N_ISYS2401_DMA_ID
+} isys2401_dma_ID_t;
+
+/**
+ * Pixel-generator. ("system_global.h")
+ */
+typedef enum {
+ PIXELGEN0_ID = 0,
+ PIXELGEN1_ID,
+ PIXELGEN2_ID,
+ N_PIXELGEN_ID
+} pixelgen_ID_t;
+/* end of pixel-generator. ("system_global.h") */
+
+typedef enum {
+ INPUT_SYSTEM_CSI_PORT0_ID = 0,
+ INPUT_SYSTEM_CSI_PORT1_ID,
+ INPUT_SYSTEM_CSI_PORT2_ID,
+
+ INPUT_SYSTEM_PIXELGEN_PORT0_ID,
+ INPUT_SYSTEM_PIXELGEN_PORT1_ID,
+ INPUT_SYSTEM_PIXELGEN_PORT2_ID,
+
+ N_INPUT_SYSTEM_INPUT_PORT_ID
+} input_system_input_port_ID_t;
+
+#define N_INPUT_SYSTEM_CSI_PORT 3
+
+typedef enum {
+ ISYS2401_DMA_CHANNEL_0 = 0,
+ ISYS2401_DMA_CHANNEL_1,
+ ISYS2401_DMA_CHANNEL_2,
+ ISYS2401_DMA_CHANNEL_3,
+ ISYS2401_DMA_CHANNEL_4,
+ ISYS2401_DMA_CHANNEL_5,
+ ISYS2401_DMA_CHANNEL_6,
+ ISYS2401_DMA_CHANNEL_7,
+ ISYS2401_DMA_CHANNEL_8,
+ ISYS2401_DMA_CHANNEL_9,
+ ISYS2401_DMA_CHANNEL_10,
+ ISYS2401_DMA_CHANNEL_11,
+ N_ISYS2401_DMA_CHANNEL
+} isys2401_dma_channel;
+
+enum ia_css_isp_memories {
+ IA_CSS_ISP_PMEM0 = 0,
+ IA_CSS_ISP_DMEM0,
+ IA_CSS_ISP_VMEM0,
+ IA_CSS_ISP_VAMEM0,
+ IA_CSS_ISP_VAMEM1,
+ IA_CSS_ISP_VAMEM2,
+ IA_CSS_ISP_HMEM0,
+ IA_CSS_SP_DMEM0,
+ IA_CSS_DDR,
+ N_IA_CSS_MEMORIES
+};
+
+#define IA_CSS_NUM_MEMORIES 9
+/* For driver compatibility */
+#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+
+#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_system_local.h b/drivers/staging/media/atomisp/pci/isp2401_system_local.h
new file mode 100644
index 000000000000..4bd95b818494
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2401_system_local.h
@@ -0,0 +1,406 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SYSTEM_LOCAL_H_INCLUDED__
+#define __SYSTEM_LOCAL_H_INCLUDED__
+
+#ifdef HRT_ISP_CSS_CUSTOM_HOST
+#ifndef HRT_USE_VIR_ADDRS
+#define HRT_USE_VIR_ADDRS
+#endif
+/* This interface is deprecated */
+/*#include "hive_isp_css_custom_host_hrt.h"*/
+#endif
+
+#include "system_global.h"
+
+#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */
+
+/* This interface is deprecated */
+#include "hive_types.h"
+
+/*
+ * Cell specific address maps
+ */
+#if HRT_ADDRESS_WIDTH == 64
+
+#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
+
+/* DDR */
+static const hrt_address DDR_BASE[N_DDR_ID] = {
+ 0x0000000120000000ULL
+};
+
+/* ISP */
+static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ 0x0000000000020000ULL
+};
+
+static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ 0x0000000000200000ULL
+};
+
+static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ 0x0000000000100000ULL
+};
+
+static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
+ 0x00000000001C0000ULL,
+ 0x00000000001D0000ULL,
+ 0x00000000001E0000ULL
+};
+
+static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
+ 0x00000000001F0000ULL
+};
+
+/* SP */
+static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ 0x0000000000010000ULL
+};
+
+static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ 0x0000000000300000ULL
+};
+
+/* MMU */
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) || defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+static const hrt_address MMU_BASE[N_MMU_ID] = {
+ 0x0000000000070000ULL,
+ 0x00000000000A0000ULL
+};
+#else
+#error "system_local.h: SYSTEM must be one of {2400, 2401 }"
+#endif
+
+/* DMA */
+static const hrt_address DMA_BASE[N_DMA_ID] = {
+ 0x0000000000040000ULL
+};
+
+static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
+ 0x00000000000CA000ULL
+};
+
+/* IRQ */
+static const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ 0x0000000000000500ULL,
+ 0x0000000000030A00ULL,
+ 0x000000000008C000ULL,
+ 0x0000000000090200ULL
+};
+/*
+ 0x0000000000000500ULL};
+ */
+
+/* GDC */
+static const hrt_address GDC_BASE[N_GDC_ID] = {
+ 0x0000000000050000ULL,
+ 0x0000000000060000ULL
+};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ 0x0000000000000000ULL
+};
+
+/*
+static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ 0x0000000000000000ULL};
+
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x0000000000090000ULL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x0000000000000000ULL
+};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+static const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x0000000000000600ULL;
+
+/* GPIO */
+static const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ 0x0000000000000400ULL
+};
+
+/* TIMED_CTRL */
+static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ 0x0000000000000100ULL
+};
+
+/* INPUT_FORMATTER */
+static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ 0x0000000000030000ULL,
+ 0x0000000000030200ULL,
+ 0x0000000000030400ULL,
+ 0x0000000000030600ULL
+}; /* memcpy() */
+
+/* INPUT_SYSTEM */
+static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ 0x0000000000080000ULL
+};
+/* 0x0000000000081000ULL, */ /* capture A */
+/* 0x0000000000082000ULL, */ /* capture B */
+/* 0x0000000000083000ULL, */ /* capture C */
+/* 0x0000000000084000ULL, */ /* Acquisition */
+/* 0x0000000000085000ULL, */ /* DMA */
+/* 0x0000000000089000ULL, */ /* ctrl */
+/* 0x000000000008A000ULL, */ /* GP regs */
+/* 0x000000000008B000ULL, */ /* FIFO */
+/* 0x000000000008C000ULL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+static const hrt_address RX_BASE[N_RX_ID] = {
+ 0x0000000000080100ULL
+};
+
+/* IBUF_CTRL, part of the Input System 2401 */
+static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
+ 0x00000000000C1800ULL, /* ibuf controller A */
+ 0x00000000000C3800ULL, /* ibuf controller B */
+ 0x00000000000C5800ULL /* ibuf controller C */
+};
+
+/* ISYS IRQ Controllers, part of the Input System 2401 */
+static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
+ 0x00000000000C1400ULL, /* port a */
+ 0x00000000000C3400ULL, /* port b */
+ 0x00000000000C5400ULL /* port c */
+};
+
+/* CSI FE, part of the Input System 2401 */
+static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
+ 0x00000000000C0400ULL, /* csi fe controller A */
+ 0x00000000000C2400ULL, /* csi fe controller B */
+ 0x00000000000C4400ULL /* csi fe controller C */
+};
+
+/* CSI BE, part of the Input System 2401 */
+static const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
+ 0x00000000000C0800ULL, /* csi be controller A */
+ 0x00000000000C2800ULL, /* csi be controller B */
+ 0x00000000000C4800ULL /* csi be controller C */
+};
+
+/* PIXEL Generator, part of the Input System 2401 */
+static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
+ 0x00000000000C1000ULL, /* pixel gen controller A */
+ 0x00000000000C3000ULL, /* pixel gen controller B */
+ 0x00000000000C5000ULL /* pixel gen controller C */
+};
+
+/* Stream2MMIO, part of the Input System 2401 */
+static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
+ 0x00000000000C0C00ULL, /* stream2mmio controller A */
+ 0x00000000000C2C00ULL, /* stream2mmio controller B */
+ 0x00000000000C4C00ULL /* stream2mmio controller C */
+};
+#elif HRT_ADDRESS_WIDTH == 32
+
+#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */
+
+/* DDR : Attention, this value not defined in 32-bit */
+static const hrt_address DDR_BASE[N_DDR_ID] = {
+ 0x00000000UL
+};
+
+/* ISP */
+static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ 0x00020000UL
+};
+
+static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ 0xffffffffUL
+};
+
+static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ 0xffffffffUL
+};
+
+static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
+ 0xffffffffUL,
+ 0xffffffffUL,
+ 0xffffffffUL
+};
+
+static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
+ 0xffffffffUL
+};
+
+/* SP */
+static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ 0x00010000UL
+};
+
+static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ 0x00300000UL
+};
+
+/* MMU */
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) || defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+static const hrt_address MMU_BASE[N_MMU_ID] = {
+ 0x00070000UL,
+ 0x000A0000UL
+};
+#else
+#error "system_local.h: SYSTEM must be one of {2400, 2401 }"
+#endif
+
+/* DMA */
+static const hrt_address DMA_BASE[N_DMA_ID] = {
+ 0x00040000UL
+};
+
+static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
+ 0x000CA000UL
+};
+
+/* IRQ */
+static const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ 0x00000500UL,
+ 0x00030A00UL,
+ 0x0008C000UL,
+ 0x00090200UL
+};
+/*
+ 0x00000500UL};
+ */
+
+/* GDC */
+static const hrt_address GDC_BASE[N_GDC_ID] = {
+ 0x00050000UL,
+ 0x00060000UL
+};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ 0x00000000UL
+};
+
+/*
+static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ 0x00000000UL};
+
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x00090000UL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x00000000UL
+};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+static const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x00000600UL;
+/* GPIO */
+static const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ 0x00000400UL
+};
+
+/* TIMED_CTRL */
+static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ 0x00000100UL
+};
+
+/* INPUT_FORMATTER */
+static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ 0x00030000UL,
+ 0x00030200UL,
+ 0x00030400UL
+};
+/* 0x00030600UL, */ /* memcpy() */
+
+/* INPUT_SYSTEM */
+static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ 0x00080000UL
+};
+/* 0x00081000UL, */ /* capture A */
+/* 0x00082000UL, */ /* capture B */
+/* 0x00083000UL, */ /* capture C */
+/* 0x00084000UL, */ /* Acquisition */
+/* 0x00085000UL, */ /* DMA */
+/* 0x00089000UL, */ /* ctrl */
+/* 0x0008A000UL, */ /* GP regs */
+/* 0x0008B000UL, */ /* FIFO */
+/* 0x0008C000UL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+static const hrt_address RX_BASE[N_RX_ID] = {
+ 0x00080100UL
+};
+
+/* IBUF_CTRL, part of the Input System 2401 */
+static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
+ 0x000C1800UL, /* ibuf controller A */
+ 0x000C3800UL, /* ibuf controller B */
+ 0x000C5800UL /* ibuf controller C */
+};
+
+/* ISYS IRQ Controllers, part of the Input System 2401 */
+static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
+ 0x000C1400ULL, /* port a */
+ 0x000C3400ULL, /* port b */
+ 0x000C5400ULL /* port c */
+};
+
+/* CSI FE, part of the Input System 2401 */
+static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
+ 0x000C0400UL, /* csi fe controller A */
+ 0x000C2400UL, /* csi fe controller B */
+ 0x000C4400UL /* csi fe controller C */
+};
+
+/* CSI BE, part of the Input System 2401 */
+static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
+ 0x000C0800UL, /* csi be controller A */
+ 0x000C2800UL, /* csi be controller B */
+ 0x000C4800UL /* csi be controller C */
+};
+
+/* PIXEL Generator, part of the Input System 2401 */
+static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
+ 0x000C1000UL, /* pixel gen controller A */
+ 0x000C3000UL, /* pixel gen controller B */
+ 0x000C5000UL /* pixel gen controller C */
+};
+
+/* Stream2MMIO, part of the Input System 2401 */
+static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
+ 0x000C0C00UL, /* stream2mmio controller A */
+ 0x000C2C00UL, /* stream2mmio controller B */
+ 0x000C4C00UL /* stream2mmio controller C */
+};
+
+#else
+#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
+#endif
+
+#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp_acquisition_defs.h b/drivers/staging/media/atomisp/pci/isp_acquisition_defs.h
new file mode 100644
index 000000000000..5bdc16c71e82
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp_acquisition_defs.h
@@ -0,0 +1,229 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp_acquisition_defs_h
+#define _isp_acquisition_defs_h
+
+#define _ISP_ACQUISITION_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_ACQUISITION_BYTES_PER_ELEM 4
+
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_IRQS 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define MEM2STREAM_FSM_STATE_BITS 2
+#define ACQ_SYNCHRONIZER_FSM_STATE_BITS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_REGS 12
+
+// Register id's of MMIO slave accesible registers
+#define ACQ_START_ADDR_REG_ID 0
+#define ACQ_MEM_REGION_SIZE_REG_ID 1
+#define ACQ_NUM_MEM_REGIONS_REG_ID 2
+#define ACQ_INIT_REG_ID 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_ID 4
+#define ACQ_RECEIVED_LONG_PACKETS_REG_ID 5
+#define ACQ_LAST_COMMAND_REG_ID 6
+#define ACQ_NEXT_COMMAND_REG_ID 7
+#define ACQ_LAST_ACKNOWLEDGE_REG_ID 8
+#define ACQ_NEXT_ACKNOWLEDGE_REG_ID 9
+#define ACQ_FSM_STATE_INFO_REG_ID 10
+#define ACQ_INT_CNTR_INFO_REG_ID 11
+
+// Register width
+#define ACQ_START_ADDR_REG_WIDTH 9
+#define ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ACQ_INIT_REG_WIDTH 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define ACQ_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define ACQ_LAST_COMMAND_REG_WIDTH 32
+#define ACQ_NEXT_COMMAND_REG_WIDTH 32
+#define ACQ_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_FSM_STATE_INFO_REG_WIDTH ((MEM2STREAM_FSM_STATE_BITS * 3) + (ACQ_SYNCHRONIZER_FSM_STATE_BITS * 3))
+#define ACQ_INT_CNTR_INFO_REG_WIDTH 32
+
+/* register reset value */
+#define ACQ_START_ADDR_REG_RSTVAL 0
+#define ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ACQ_INIT_REG_RSTVAL 0
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define ACQ_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define ACQ_LAST_COMMAND_REG_RSTVAL 0
+#define ACQ_NEXT_COMMAND_REG_RSTVAL 0
+#define ACQ_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ACQ_INT_CNTR_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define ACQ_INIT_RST_REG_BIT 0
+#define ACQ_INIT_RESYNC_BIT 2
+#define ACQ_INIT_RST_IDX ACQ_INIT_RST_REG_BIT
+#define ACQ_INIT_RST_BITS 1
+#define ACQ_INIT_RESYNC_IDX ACQ_INIT_RESYNC_BIT
+#define ACQ_INIT_RESYNC_BITS 1
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define ACQ_TOKEN_ID_LSB 0
+#define ACQ_TOKEN_ID_MSB 3
+#define ACQ_TOKEN_WIDTH (ACQ_TOKEN_ID_MSB - ACQ_TOKEN_ID_LSB + 1) // 4
+#define ACQ_TOKEN_ID_IDX 0
+#define ACQ_TOKEN_ID_BITS ACQ_TOKEN_WIDTH
+#define ACQ_INIT_CMD_INIT_IDX 4
+#define ACQ_INIT_CMD_INIT_BITS 3
+#define ACQ_CMD_START_ADDR_IDX 4
+#define ACQ_CMD_START_ADDR_BITS 9
+#define ACQ_CMD_NOFWORDS_IDX 13
+#define ACQ_CMD_NOFWORDS_BITS 9
+#define ACQ_MEM_REGION_ID_IDX 22
+#define ACQ_MEM_REGION_ID_BITS 9
+#define ACQ_PACKET_LENGTH_TOKEN_MSB 21
+#define ACQ_PACKET_LENGTH_TOKEN_LSB 13
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_MSB 9
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_LSB 4
+#define ACQ_PACKET_CH_ID_TOKEN_MSB 11
+#define ACQ_PACKET_CH_ID_TOKEN_LSB 10
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_MSB 12 /* only for capt_end_of_packet_written */
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_LSB 4 /* only for capt_end_of_packet_written */
+
+/* Command tokens IDs */
+#define ACQ_READ_REGION_AUTO_INCR_TOKEN_ID 0 //0000b
+#define ACQ_READ_REGION_TOKEN_ID 1 //0001b
+#define ACQ_READ_REGION_SOP_TOKEN_ID 2 //0010b
+#define ACQ_INIT_TOKEN_ID 8 //1000b
+
+/* Acknowledge token IDs */
+#define ACQ_READ_REGION_ACK_TOKEN_ID 0 //0000b
+#define ACQ_END_OF_PACKET_TOKEN_ID 4 //0100b
+#define ACQ_END_OF_REGION_TOKEN_ID 5 //0101b
+#define ACQ_SOP_MISMATCH_TOKEN_ID 6 //0110b
+#define ACQ_UNDEF_PH_TOKEN_ID 7 //0111b
+
+#define ACQ_TOKEN_MEMREGIONID_MSB 30
+#define ACQ_TOKEN_MEMREGIONID_LSB 22
+#define ACQ_TOKEN_NOFWORDS_MSB 21
+#define ACQ_TOKEN_NOFWORDS_LSB 13
+#define ACQ_TOKEN_STARTADDR_MSB 12
+#define ACQ_TOKEN_STARTADDR_LSB 4
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define WORD_COUNT_WIDTH 16
+#define PKT_CODE_WIDTH 6
+#define CHN_NO_WIDTH 2
+#define ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+#define EOF_CODE 1
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define ACQ_START_OF_FRAME 0
+#define ACQ_END_OF_FRAME 1
+#define ACQ_START_OF_LINE 2
+#define ACQ_END_OF_LINE 3
+#define ACQ_LINE_PAYLOAD 4
+#define ACQ_GEN_SH_PKT 5
+
+/* bit definition */
+#define ACQ_PKT_TYPE_IDX 16
+#define ACQ_PKT_TYPE_BITS 6
+#define ACQ_PKT_SOP_IDX 32
+#define ACQ_WORD_CNT_IDX 0
+#define ACQ_WORD_CNT_BITS 16
+#define ACQ_PKT_INFO_IDX 16
+#define ACQ_PKT_INFO_BITS 8
+#define ACQ_HEADER_DATA_IDX 0
+#define ACQ_HEADER_DATA_BITS 16
+#define ACQ_ACK_TOKEN_ID_IDX ACQ_TOKEN_ID_IDX
+#define ACQ_ACK_TOKEN_ID_BITS ACQ_TOKEN_ID_BITS
+#define ACQ_ACK_NOFWORDS_IDX 13
+#define ACQ_ACK_NOFWORDS_BITS 9
+#define ACQ_ACK_PKT_LEN_IDX 4
+#define ACQ_ACK_PKT_LEN_BITS 16
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+#define ACQ_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define ACQ_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define ACQ_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define ACQ_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define ACQ_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define ACQ_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define ACQ_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define ACQ_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define ACQ_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define ACQ_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define ACQ_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define ACQ_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define ACQ_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define ACQ_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define ACQ_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define ACQ_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define ACQ_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define ACQ_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define ACQ_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define ACQ_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define ACQ_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define ACQ_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define ACQ_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define ACQ_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define ACQ_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define ACQ_SOF_DATA 0 /* 00 0000 frame start */
+#define ACQ_EOF_DATA 1 /* 00 0001 frame end */
+#define ACQ_SOL_DATA 2 /* 00 0010 line start */
+#define ACQ_EOL_DATA 3 /* 00 0011 line end */
+#define ACQ_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define ACQ_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define ACQ_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define ACQ_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define ACQ_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define ACQ_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define ACQ_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define ACQ_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define ACQ_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_RESERVED_DATA_TYPE_MIN 56
+#define ACQ_RESERVED_DATA_TYPE_MAX 63
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define ACQ_YUV_RESERVED_DATA_TYPE 27
+#define ACQ_RGB_RESERVED_DATA_TYPE_MIN 37
+#define ACQ_RGB_RESERVED_DATA_TYPE_MAX 39
+#define ACQ_RAW_RESERVED_DATA_TYPE_MIN 46
+#define ACQ_RAW_RESERVED_DATA_TYPE_MAX 47
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_acquisition_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/isp_capture_defs.h b/drivers/staging/media/atomisp/pci/isp_capture_defs.h
new file mode 100644
index 000000000000..5ab796e5a53f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp_capture_defs.h
@@ -0,0 +1,278 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp_capture_defs_h
+#define _isp_capture_defs_h
+
+#define _ISP_CAPTURE_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_CAPTURE_BITS_PER_ELEM 32 /* only for data, not SOP */
+#define _ISP_CAPTURE_BYTES_PER_ELEM (_ISP_CAPTURE_BITS_PER_ELEM / 8)
+#define _ISP_CAPTURE_BYTES_PER_WORD 32 /* 256/8 */
+#define _ISP_CAPTURE_ELEM_PER_WORD _ISP_CAPTURE_BYTES_PER_WORD / _ISP_CAPTURE_BYTES_PER_ELEM
+
+/* --------------------------------------------------*/
+
+#define NOF_IRQS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define CAPT_NOF_REGS 16
+
+// Register id's of MMIO slave accesible registers
+#define CAPT_START_MODE_REG_ID 0
+#define CAPT_START_ADDR_REG_ID 1
+#define CAPT_MEM_REGION_SIZE_REG_ID 2
+#define CAPT_NUM_MEM_REGIONS_REG_ID 3
+#define CAPT_INIT_REG_ID 4
+#define CAPT_START_REG_ID 5
+#define CAPT_STOP_REG_ID 6
+
+#define CAPT_PACKET_LENGTH_REG_ID 7
+#define CAPT_RECEIVED_LENGTH_REG_ID 8
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_ID 9
+#define CAPT_RECEIVED_LONG_PACKETS_REG_ID 10
+#define CAPT_LAST_COMMAND_REG_ID 11
+#define CAPT_NEXT_COMMAND_REG_ID 12
+#define CAPT_LAST_ACKNOWLEDGE_REG_ID 13
+#define CAPT_NEXT_ACKNOWLEDGE_REG_ID 14
+#define CAPT_FSM_STATE_INFO_REG_ID 15
+
+// Register width
+#define CAPT_START_MODE_REG_WIDTH 1
+
+#define CAPT_START_REG_WIDTH 1
+#define CAPT_STOP_REG_WIDTH 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define CAPT_WRITE2MEM_FSM_STATE_BITS 2
+#define CAPT_SYNCHRONIZER_FSM_STATE_BITS 3
+
+#define CAPT_PACKET_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define CAPT_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define CAPT_LAST_COMMAND_REG_WIDTH 32
+#define CAPT_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_FSM_STATE_INFO_REG_WIDTH ((CAPT_WRITE2MEM_FSM_STATE_BITS * 3) + (CAPT_SYNCHRONIZER_FSM_STATE_BITS * 3))
+
+/* register reset value */
+#define CAPT_START_MODE_REG_RSTVAL 0
+#define CAPT_START_ADDR_REG_RSTVAL 0
+#define CAPT_MEM_REGION_SIZE_REG_RSTVAL 128
+#define CAPT_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define CAPT_INIT_REG_RSTVAL 0
+
+#define CAPT_START_REG_RSTVAL 0
+#define CAPT_STOP_REG_RSTVAL 0
+
+#define CAPT_PACKET_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define CAPT_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define CAPT_LAST_COMMAND_REG_RSTVAL 0
+#define CAPT_NEXT_COMMAND_REG_RSTVAL 0
+#define CAPT_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_FSM_STATE_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define CAPT_INIT_RST_REG_BIT 0
+#define CAPT_INIT_FLUSH_BIT 1
+#define CAPT_INIT_RESYNC_BIT 2
+#define CAPT_INIT_RESTART_BIT 3
+#define CAPT_INIT_RESTART_MEM_ADDR_LSB 4
+
+#define CAPT_INIT_RST_REG_IDX CAPT_INIT_RST_REG_BIT
+#define CAPT_INIT_RST_REG_BITS 1
+#define CAPT_INIT_FLUSH_IDX CAPT_INIT_FLUSH_BIT
+#define CAPT_INIT_FLUSH_BITS 1
+#define CAPT_INIT_RESYNC_IDX CAPT_INIT_RESYNC_BIT
+#define CAPT_INIT_RESYNC_BITS 1
+#define CAPT_INIT_RESTART_IDX CAPT_INIT_RESTART_BIT
+#define CAPT_INIT_RESTART_BITS 1
+#define CAPT_INIT_RESTART_MEM_ADDR_IDX CAPT_INIT_RESTART_MEM_ADDR_LSB
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define CAPT_TOKEN_ID_LSB 0
+#define CAPT_TOKEN_ID_MSB 3
+#define CAPT_TOKEN_WIDTH (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) /* 4 */
+
+/* Command tokens IDs */
+#define CAPT_START_TOKEN_ID 0 /* 0000b */
+#define CAPT_STOP_TOKEN_ID 1 /* 0001b */
+#define CAPT_FREEZE_TOKEN_ID 2 /* 0010b */
+#define CAPT_RESUME_TOKEN_ID 3 /* 0011b */
+#define CAPT_INIT_TOKEN_ID 8 /* 1000b */
+
+#define CAPT_START_TOKEN_BIT 0
+#define CAPT_STOP_TOKEN_BIT 0
+#define CAPT_FREEZE_TOKEN_BIT 0
+#define CAPT_RESUME_TOKEN_BIT 0
+#define CAPT_INIT_TOKEN_BIT 0
+
+/* Acknowledge token IDs */
+#define CAPT_END_OF_PACKET_RECEIVED_TOKEN_ID 0 /* 0000b */
+#define CAPT_END_OF_PACKET_WRITTEN_TOKEN_ID 1 /* 0001b */
+#define CAPT_END_OF_REGION_WRITTEN_TOKEN_ID 2 /* 0010b */
+#define CAPT_FLUSH_DONE_TOKEN_ID 3 /* 0011b */
+#define CAPT_PREMATURE_SOP_TOKEN_ID 4 /* 0100b */
+#define CAPT_MISSING_SOP_TOKEN_ID 5 /* 0101b */
+#define CAPT_UNDEF_PH_TOKEN_ID 6 /* 0110b */
+#define CAPT_STOP_ACK_TOKEN_ID 7 /* 0111b */
+
+#define CAPT_PACKET_LENGTH_TOKEN_MSB 19
+#define CAPT_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB 20
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB 25
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB 20
+#define CAPT_PACKET_CH_ID_TOKEN_MSB 27
+#define CAPT_PACKET_CH_ID_TOKEN_LSB 26
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB 29
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB 21
+
+/* bit definition */
+#define CAPT_CMD_IDX CAPT_TOKEN_ID_LSB
+#define CAPT_CMD_BITS (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1)
+#define CAPT_SOP_IDX 32
+#define CAPT_SOP_BITS 1
+#define CAPT_PKT_INFO_IDX 16
+#define CAPT_PKT_INFO_BITS 8
+#define CAPT_PKT_TYPE_IDX 0
+#define CAPT_PKT_TYPE_BITS 6
+#define CAPT_HEADER_DATA_IDX 0
+#define CAPT_HEADER_DATA_BITS 16
+#define CAPT_PKT_DATA_IDX 0
+#define CAPT_PKT_DATA_BITS 32
+#define CAPT_WORD_CNT_IDX 0
+#define CAPT_WORD_CNT_BITS 16
+#define CAPT_ACK_TOKEN_ID_IDX 0
+#define CAPT_ACK_TOKEN_ID_BITS 4
+//#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+//#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+//#define CAPT_ACK_PKT_INFO_IDX 20
+//#define CAPT_ACK_PKT_INFO_BITS 8
+//#define CAPT_ACK_MEM_REG_ID1_IDX 20 /* for capt_end_of_packet_written */
+//#define CAPT_ACK_MEM_REG_ID2_IDX 4 /* for capt_end_of_region_written */
+#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_SUPER_PKT_LEN_IDX CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_SUPER_PKT_LEN_BITS (CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB - CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_INFO_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_INFO_BITS (CAPT_PACKET_CH_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_MEM_REGION_ID_IDX CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB
+#define CAPT_ACK_MEM_REGION_ID_BITS (CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB - CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_TYPE_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_TYPE_BITS (CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_INIT_TOKEN_INIT_IDX 4
+#define CAPT_INIT_TOKEN_INIT_BITS 22
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define CAPT_WORD_COUNT_WIDTH 16
+#define CAPT_PKT_CODE_WIDTH 6
+#define CAPT_CHN_NO_WIDTH 2
+#define CAPT_ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define CAPT_START_OF_FRAME 0
+#define CAPT_END_OF_FRAME 1
+#define CAPT_START_OF_LINE 2
+#define CAPT_END_OF_LINE 3
+#define CAPT_LINE_PAYLOAD 4
+#define CAPT_GEN_SH_PKT 5
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+#define CAPT_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define CAPT_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define CAPT_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define CAPT_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define CAPT_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define CAPT_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define CAPT_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define CAPT_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define CAPT_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define CAPT_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define CAPT_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define CAPT_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define CAPT_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define CAPT_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define CAPT_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define CAPT_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define CAPT_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define CAPT_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define CAPT_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define CAPT_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define CAPT_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define CAPT_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define CAPT_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define CAPT_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define CAPT_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define CAPT_SOF_DATA 0 /* 00 0000 frame start */
+#define CAPT_EOF_DATA 1 /* 00 0001 frame end */
+#define CAPT_SOL_DATA 2 /* 00 0010 line start */
+#define CAPT_EOL_DATA 3 /* 00 0011 line end */
+#define CAPT_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define CAPT_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define CAPT_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define CAPT_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define CAPT_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define CAPT_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define CAPT_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define CAPT_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define CAPT_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_RESERVED_DATA_TYPE_MIN 56
+#define CAPT_RESERVED_DATA_TYPE_MAX 63
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define CAPT_YUV_RESERVED_DATA_TYPE 27
+#define CAPT_RGB_RESERVED_DATA_TYPE_MIN 37
+#define CAPT_RGB_RESERVED_DATA_TYPE_MAX 39
+#define CAPT_RAW_RESERVED_DATA_TYPE_MIN 46
+#define CAPT_RAW_RESERVED_DATA_TYPE_MAX 47
+
+/* --------------------------------------------------*/
+/* Capture Unit State */
+/* --------------------------------------------------*/
+#define CAPT_FREE_RUN 0
+#define CAPT_NO_SYNC 1
+#define CAPT_SYNC_SWP 2
+#define CAPT_SYNC_MWP 3
+#define CAPT_SYNC_WAIT 4
+#define CAPT_FREEZE 5
+#define CAPT_RUN 6
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_capture_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/memory_realloc.c b/drivers/staging/media/atomisp/pci/memory_realloc.c
new file mode 100644
index 000000000000..e640d5daf502
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/memory_realloc.c
@@ -0,0 +1,81 @@
+/*
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#include "memory_realloc.h"
+#include "ia_css_debug.h"
+#include "ia_css_refcount.h"
+#include "memory_access.h"
+
+static bool realloc_isp_css_mm_buf(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err,
+ uint16_t mmgr_attribute);
+
+bool reallocate_buffer(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err)
+{
+ bool ret;
+ u16 mmgr_attribute = MMGR_ATTRIBUTE_DEFAULT;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ret = realloc_isp_css_mm_buf(curr_buf,
+ curr_size, needed_size, force, err, mmgr_attribute);
+
+ IA_CSS_LEAVE_PRIVATE("ret=%d", ret);
+ return ret;
+}
+
+static bool realloc_isp_css_mm_buf(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err,
+ uint16_t mmgr_attribute)
+{
+ s32 id;
+
+ *err = IA_CSS_SUCCESS;
+ /* Possible optimization: add a function sh_css_isp_css_mm_realloc()
+ * and implement on top of hmm. */
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (ia_css_refcount_is_single(*curr_buf) && !force &&
+ *curr_size >= needed_size) {
+ IA_CSS_LEAVE_PRIVATE("false");
+ return false;
+ }
+
+ id = IA_CSS_REFCOUNT_PARAM_BUFFER;
+ ia_css_refcount_decrement(id, *curr_buf);
+ *curr_buf = ia_css_refcount_increment(id, mmgr_alloc_attr(needed_size,
+ mmgr_attribute));
+
+ if (!*curr_buf) {
+ *err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ *curr_size = 0;
+ } else {
+ *curr_size = needed_size;
+ }
+ IA_CSS_LEAVE_PRIVATE("true");
+ return true;
+}
diff --git a/drivers/staging/media/atomisp/pci/mmu/isp_mmu.c b/drivers/staging/media/atomisp/pci/mmu/isp_mmu.c
new file mode 100644
index 000000000000..8930fd629dc3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/mmu/isp_mmu.c
@@ -0,0 +1,566 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+/*
+ * ISP MMU management wrap code
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/mm.h> /* for GFP_ATOMIC */
+#include <linux/slab.h> /* for kmalloc */
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
+#include "atomisp_internal.h"
+#include "mmu/isp_mmu.h"
+
+/*
+ * 64-bit x86 processor physical address layout:
+ * 0 - 0x7fffffff DDR RAM (2GB)
+ * 0x80000000 - 0xffffffff MMIO (2GB)
+ * 0x100000000 - 0x3fffffffffff DDR RAM (64TB)
+ * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
+ * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
+ * We have to make sure memory is allocated from the lower 2GB for devices
+ * that are only 32-bit capable(e.g. the ISP MMU).
+ *
+ * For any confusion, contact bin.gao@intel.com.
+ */
+#define NR_PAGES_2GB (SZ_2G / PAGE_SIZE)
+
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+ unsigned int end_isp_virt);
+
+static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
+{
+ unsigned int *pt_virt = phys_to_virt(pt);
+
+ return *(pt_virt + idx);
+}
+
+static void atomisp_set_pte(phys_addr_t pt,
+ unsigned int idx, unsigned int pte)
+{
+ unsigned int *pt_virt = phys_to_virt(pt);
+ *(pt_virt + idx) = pte;
+}
+
+static void *isp_pt_phys_to_virt(phys_addr_t phys)
+{
+ return phys_to_virt(phys);
+}
+
+static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
+ unsigned int pte)
+{
+ return mmu->driver->pte_to_phys(mmu, pte);
+}
+
+static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
+
+ return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu));
+}
+
+/*
+ * allocate a uncacheable page table.
+ * return physical address.
+ */
+static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
+{
+ int i;
+ phys_addr_t page;
+ void *virt;
+
+ virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+
+ if (!virt)
+ return (phys_addr_t)NULL_PAGE;
+
+ /*
+ * we need a uncacheable page table.
+ */
+#ifdef CONFIG_X86
+ set_memory_uc((unsigned long)virt, 1);
+#endif
+
+ page = virt_to_phys(virt);
+
+ for (i = 0; i < 1024; i++) {
+ /* NEED CHECK */
+ atomisp_set_pte(page, i, mmu->driver->null_pte);
+ }
+
+ return page;
+}
+
+static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
+{
+ void *virt;
+
+ page &= ISP_PAGE_MASK;
+ /*
+ * reset the page to write back before free
+ */
+ virt = phys_to_virt(page);
+
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)virt, 1);
+#endif
+
+ free_page((unsigned long)virt);
+}
+
+static void mmu_remap_error(struct isp_mmu *mmu,
+ phys_addr_t l1_pt, unsigned int l1_idx,
+ phys_addr_t l2_pt, unsigned int l2_idx,
+ unsigned int isp_virt, phys_addr_t old_phys,
+ phys_addr_t new_phys)
+{
+ dev_err(atomisp_dev, "address remap:\n\n"
+ "\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\told: isp_virt = 0x%x, phys = 0x%llx\n"
+ "\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
+ isp_pt_phys_to_virt(l1_pt),
+ (u64)l1_pt, l1_idx,
+ isp_pt_phys_to_virt(l2_pt),
+ (u64)l2_pt, l2_idx, isp_virt,
+ (u64)old_phys, isp_virt,
+ (u64)new_phys);
+}
+
+static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
+ phys_addr_t l1_pt, unsigned int l1_idx,
+ phys_addr_t l2_pt, unsigned int l2_idx,
+ unsigned int isp_virt, unsigned int pte)
+{
+ dev_err(atomisp_dev, "unmap invalid L2 pte:\n\n"
+ "\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
+ isp_pt_phys_to_virt(l1_pt),
+ (u64)l1_pt, l1_idx,
+ isp_pt_phys_to_virt(l2_pt),
+ (u64)l2_pt, l2_idx, isp_virt,
+ pte);
+}
+
+static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
+ phys_addr_t l1_pt, unsigned int l1_idx,
+ unsigned int isp_virt, unsigned int pte)
+{
+ dev_err(atomisp_dev, "unmap invalid L1 pte (L2 PT):\n\n"
+ "\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
+ isp_pt_phys_to_virt(l1_pt),
+ (u64)l1_pt, l1_idx, (unsigned int)isp_virt,
+ pte);
+}
+
+static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
+{
+ dev_err(atomisp_dev, "unmap invalid L1PT:\n\n"
+ "L1PT = 0x%x\n", (unsigned int)pte);
+}
+
+/*
+ * Update L2 page table according to isp virtual address and page physical
+ * address
+ */
+static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int l1_idx, phys_addr_t l2_pt,
+ unsigned int start, unsigned int end, phys_addr_t phys)
+{
+ unsigned int ptr;
+ unsigned int idx;
+ unsigned int pte;
+
+ l2_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+ phys &= ISP_PAGE_MASK;
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L2_IDX(ptr);
+
+ pte = atomisp_get_pte(l2_pt, idx);
+
+ if (ISP_PTE_VALID(mmu, pte)) {
+ mmu_remap_error(mmu, l1_pt, l1_idx,
+ l2_pt, idx, ptr, pte, phys);
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -EINVAL;
+ }
+
+ pte = isp_pgaddr_to_pte_valid(mmu, phys);
+
+ atomisp_set_pte(l2_pt, idx, pte);
+ mmu->l2_pgt_refcount[l1_idx]++;
+ ptr += (1U << ISP_L2PT_OFFSET);
+ phys += (1U << ISP_L2PT_OFFSET);
+ } while (ptr < end && idx < ISP_L2PT_PTES - 1);
+
+ return 0;
+}
+
+/*
+ * Update L1 page table according to isp virtual address and page physical
+ * address
+ */
+static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int start, unsigned int end,
+ phys_addr_t phys)
+{
+ phys_addr_t l2_pt;
+ unsigned int ptr, l1_aligned;
+ unsigned int idx;
+ unsigned int l2_pte;
+ int ret;
+
+ l1_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+ phys &= ISP_PAGE_MASK;
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L1_IDX(ptr);
+
+ l2_pte = atomisp_get_pte(l1_pt, idx);
+
+ if (!ISP_PTE_VALID(mmu, l2_pte)) {
+ l2_pt = alloc_page_table(mmu);
+ if (l2_pt == NULL_PAGE) {
+ dev_err(atomisp_dev,
+ "alloc page table fail.\n");
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -ENOMEM;
+ }
+
+ l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
+
+ atomisp_set_pte(l1_pt, idx, l2_pte);
+ mmu->l2_pgt_refcount[idx] = 0;
+ }
+
+ l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
+
+ l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
+
+ if (l1_aligned < end) {
+ ret = mmu_l2_map(mmu, l1_pt, idx,
+ l2_pt, ptr, l1_aligned, phys);
+ phys += (l1_aligned - ptr);
+ ptr = l1_aligned;
+ } else {
+ ret = mmu_l2_map(mmu, l1_pt, idx,
+ l2_pt, ptr, end, phys);
+ phys += (end - ptr);
+ ptr = end;
+ }
+
+ if (ret) {
+ dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -EINVAL;
+ }
+ } while (ptr < end && idx < ISP_L1PT_PTES);
+
+ return 0;
+}
+
+/*
+ * Update page table according to isp virtual address and page physical
+ * address
+ */
+static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
+ phys_addr_t phys, unsigned int pgnr)
+{
+ unsigned int start, end;
+ phys_addr_t l1_pt;
+ int ret;
+
+ mutex_lock(&mmu->pt_mutex);
+ if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
+ /*
+ * allocate 1 new page for L1 page table
+ */
+ l1_pt = alloc_page_table(mmu);
+ if (l1_pt == NULL_PAGE) {
+ dev_err(atomisp_dev, "alloc page table fail.\n");
+ mutex_unlock(&mmu->pt_mutex);
+ return -ENOMEM;
+ }
+
+ /*
+ * setup L1 page table physical addr to MMU
+ */
+ mmu->base_address = l1_pt;
+ mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
+ memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
+ }
+
+ l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
+
+ start = (isp_virt) & ISP_PAGE_MASK;
+ end = start + (pgnr << ISP_PAGE_OFFSET);
+ phys &= ISP_PAGE_MASK;
+
+ ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
+
+ if (ret)
+ dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
+
+ mutex_unlock(&mmu->pt_mutex);
+ return ret;
+}
+
+/*
+ * Free L2 page table according to isp virtual address and page physical
+ * address
+ */
+static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int l1_idx, phys_addr_t l2_pt,
+ unsigned int start, unsigned int end)
+{
+ unsigned int ptr;
+ unsigned int idx;
+ unsigned int pte;
+
+ l2_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L2_IDX(ptr);
+
+ pte = atomisp_get_pte(l2_pt, idx);
+
+ if (!ISP_PTE_VALID(mmu, pte))
+ mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
+ l2_pt, idx, ptr, pte);
+
+ atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
+ mmu->l2_pgt_refcount[l1_idx]--;
+ ptr += (1U << ISP_L2PT_OFFSET);
+ } while (ptr < end && idx < ISP_L2PT_PTES - 1);
+
+ if (mmu->l2_pgt_refcount[l1_idx] == 0) {
+ free_page_table(mmu, l2_pt);
+ atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
+ }
+}
+
+/*
+ * Free L1 page table according to isp virtual address and page physical
+ * address
+ */
+static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int start, unsigned int end)
+{
+ phys_addr_t l2_pt;
+ unsigned int ptr, l1_aligned;
+ unsigned int idx;
+ unsigned int l2_pte;
+
+ l1_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L1_IDX(ptr);
+
+ l2_pte = atomisp_get_pte(l1_pt, idx);
+
+ if (!ISP_PTE_VALID(mmu, l2_pte)) {
+ mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
+ continue;
+ }
+
+ l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
+
+ l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
+
+ if (l1_aligned < end) {
+ mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
+ ptr = l1_aligned;
+ } else {
+ mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
+ ptr = end;
+ }
+ /*
+ * use the same L2 page next time, so we don't
+ * need to invalidate and free this PT.
+ */
+ /* atomisp_set_pte(l1_pt, idx, NULL_PTE); */
+ } while (ptr < end && idx < ISP_L1PT_PTES);
+}
+
+/*
+ * Free page table according to isp virtual address and page physical
+ * address
+ */
+static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
+ unsigned int pgnr)
+{
+ unsigned int start, end;
+ phys_addr_t l1_pt;
+
+ mutex_lock(&mmu->pt_mutex);
+ if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
+ mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
+ mutex_unlock(&mmu->pt_mutex);
+ return;
+ }
+
+ l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
+
+ start = (isp_virt) & ISP_PAGE_MASK;
+ end = start + (pgnr << ISP_PAGE_OFFSET);
+
+ mmu_l1_unmap(mmu, l1_pt, start, end);
+ mutex_unlock(&mmu->pt_mutex);
+}
+
+/*
+ * Free page tables according to isp start virtual address and end virtual
+ * address.
+ */
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+ unsigned int end_isp_virt)
+{
+ unsigned int pgnr;
+ unsigned int start, end;
+
+ start = (start_isp_virt) & ISP_PAGE_MASK;
+ end = (end_isp_virt) & ISP_PAGE_MASK;
+ pgnr = (end - start) >> ISP_PAGE_OFFSET;
+ mmu_unmap(mmu, start, pgnr);
+}
+
+int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
+ phys_addr_t phys, unsigned int pgnr)
+{
+ return mmu_map(mmu, isp_virt, phys, pgnr);
+}
+
+void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
+ unsigned int pgnr)
+{
+ mmu_unmap(mmu, isp_virt, pgnr);
+}
+
+static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
+ unsigned int start,
+ unsigned int size)
+{
+ isp_mmu_flush_tlb(mmu);
+}
+
+/*MMU init for internal structure*/
+int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
+{
+ if (!mmu) /* error */
+ return -EINVAL;
+ if (!driver) /* error */
+ return -EINVAL;
+
+ if (!driver->name)
+ dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
+
+ mmu->driver = driver;
+
+ if (!driver->tlb_flush_all) {
+ dev_err(atomisp_dev, "tlb_flush_all operation not provided.\n");
+ return -EINVAL;
+ }
+
+ if (!driver->tlb_flush_range)
+ driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
+
+ if (!driver->pte_valid_mask) {
+ dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
+ return -EINVAL;
+ }
+
+ mmu->l1_pte = driver->null_pte;
+
+ mutex_init(&mmu->pt_mutex);
+
+ return 0;
+}
+
+/*Free L1 and L2 page table*/
+void isp_mmu_exit(struct isp_mmu *mmu)
+{
+ unsigned int idx;
+ unsigned int pte;
+ phys_addr_t l1_pt, l2_pt;
+
+ if (!mmu)
+ return;
+
+ if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
+ dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
+ (unsigned int)mmu->l1_pte);
+ return;
+ }
+
+ l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
+
+ for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
+ pte = atomisp_get_pte(l1_pt, idx);
+
+ if (ISP_PTE_VALID(mmu, pte)) {
+ l2_pt = isp_pte_to_pgaddr(mmu, pte);
+
+ free_page_table(mmu, l2_pt);
+ }
+ }
+
+ free_page_table(mmu, l1_pt);
+}
diff --git a/drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c b/drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c
new file mode 100644
index 000000000000..031d7fa00510
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c
@@ -0,0 +1,77 @@
+/*
+ * Support for Merrifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2012 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ */
+#include "type_support.h"
+#include "mmu/isp_mmu.h"
+#include "mmu/sh_mmu_mrfld.h"
+#include "memory_access/memory_access.h"
+#include "atomisp_compat.h"
+
+#define MERR_VALID_PTE_MASK 0x80000000
+
+/*
+ * include SH header file here
+ */
+
+static unsigned int sh_phys_to_pte(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ return phys >> ISP_PAGE_OFFSET;
+}
+
+static phys_addr_t sh_pte_to_phys(struct isp_mmu *mmu,
+ unsigned int pte)
+{
+ unsigned int mask = mmu->driver->pte_valid_mask;
+
+ return (phys_addr_t)((pte & ~mask) << ISP_PAGE_OFFSET);
+}
+
+static unsigned int sh_get_pd_base(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ unsigned int pte = sh_phys_to_pte(mmu, phys);
+
+ return HOST_ADDRESS(pte);
+}
+
+/*
+ * callback to flush tlb.
+ *
+ * tlb_flush_range will at least flush TLBs containing
+ * address mapping from addr to addr + size.
+ *
+ * tlb_flush_all will flush all TLBs.
+ *
+ * tlb_flush_all is must be provided. if tlb_flush_range is
+ * not valid, it will set to tlb_flush_all by default.
+ */
+static void sh_tlb_flush(struct isp_mmu *mmu)
+{
+ atomisp_css_mmu_invalidate_cache();
+}
+
+struct isp_mmu_client sh_mmu_mrfld = {
+ .name = "Silicon Hive ISP3000 MMU",
+ .pte_valid_mask = MERR_VALID_PTE_MASK,
+ .null_pte = ~MERR_VALID_PTE_MASK,
+ .get_pd_base = sh_get_pd_base,
+ .tlb_flush_all = sh_tlb_flush,
+ .phys_to_pte = sh_phys_to_pte,
+ .pte_to_phys = sh_pte_to_phys,
+};
diff --git a/drivers/staging/media/atomisp/pci/mmu_defs.h b/drivers/staging/media/atomisp/pci/mmu_defs.h
new file mode 100644
index 000000000000..c038f39ffd25
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/mmu_defs.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _mmu_defs_h
+#define _mmu_defs_h
+
+#define _HRT_MMU_INVALIDATE_TLB_REG_IDX 0
+#define _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX 1
+
+#define _HRT_MMU_REG_ALIGN 4
+
+#endif /* _mmu_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h b/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h
new file mode 100644
index 000000000000..26a3fc4d48e8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h
@@ -0,0 +1,228 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef _IA_CSS_BINARY_H_
+#define _IA_CSS_BINARY_H_
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_err.h"
+#include "ia_css_stream_format.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_frame_public.h"
+#include "sh_css_metrics.h"
+#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h"
+
+/* The binary mode is used in pre-processor expressions so we cannot
+ * use an enum here. */
+#define IA_CSS_BINARY_MODE_COPY 0
+#define IA_CSS_BINARY_MODE_PREVIEW 1
+#define IA_CSS_BINARY_MODE_PRIMARY 2
+#define IA_CSS_BINARY_MODE_VIDEO 3
+#define IA_CSS_BINARY_MODE_PRE_ISP 4
+#define IA_CSS_BINARY_MODE_GDC 5
+#define IA_CSS_BINARY_MODE_POST_ISP 6
+#define IA_CSS_BINARY_MODE_ANR 7
+#define IA_CSS_BINARY_MODE_CAPTURE_PP 8
+#define IA_CSS_BINARY_MODE_VF_PP 9
+#define IA_CSS_BINARY_MODE_PRE_DE 10
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE0 11
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE1 12
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE2 13
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE3 14
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE4 15
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE5 16
+#define IA_CSS_BINARY_NUM_MODES 17
+
+#define MAX_NUM_PRIMARY_STAGES 6
+#define NUM_PRIMARY_HQ_STAGES 6 /* number of primary stages for ISP2.6.1 high quality pipe */
+#define NUM_PRIMARY_STAGES 1 /* number of primary satges for ISP1/ISP2.2 pipe */
+
+/* Indicate where binaries can read input from */
+#define IA_CSS_BINARY_INPUT_SENSOR 0
+#define IA_CSS_BINARY_INPUT_MEMORY 1
+#define IA_CSS_BINARY_INPUT_VARIABLE 2
+
+/* Should be included without the path.
+ However, that requires adding the path to numerous makefiles
+ that have nothing to do with isp parameters.
+ */
+#include "runtime/isp_param/interface/ia_css_isp_param_types.h"
+
+/* now these ports only include output ports but not vf output ports */
+enum {
+ IA_CSS_BINARY_OUTPUT_PORT_0 = 0,
+ IA_CSS_BINARY_OUTPUT_PORT_1 = 1,
+ IA_CSS_BINARY_MAX_OUTPUT_PORTS = 2
+};
+
+struct ia_css_cas_binary_descr {
+ unsigned int num_stage;
+ unsigned int num_output_stage;
+ struct ia_css_frame_info *in_info;
+ struct ia_css_frame_info *internal_out_info;
+ struct ia_css_frame_info *out_info;
+ struct ia_css_frame_info *vf_info;
+ bool *is_output_stage;
+};
+
+struct ia_css_binary_descr {
+ int mode;
+ bool online;
+ bool continuous;
+ bool striped;
+ bool two_ppc;
+ bool enable_yuv_ds;
+ bool enable_high_speed;
+ bool enable_dvs_6axis;
+ bool enable_reduced_pipe;
+ bool enable_dz;
+ bool enable_xnr;
+ bool enable_fractional_ds;
+ bool enable_dpc;
+
+ /* ISP2401 */
+ bool enable_luma_only;
+ bool enable_tnr;
+
+ bool enable_capture_pp_bli;
+ struct ia_css_resolution dvs_env;
+ enum atomisp_input_format stream_format;
+ struct ia_css_frame_info *in_info; /* the info of the input-frame with the
+ ISP required resolution. */
+ struct ia_css_frame_info *bds_out_info;
+ struct ia_css_frame_info *out_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame_info *vf_info;
+ unsigned int isp_pipe_version;
+ unsigned int required_bds_factor;
+ int stream_config_left_padding;
+};
+
+struct ia_css_binary {
+ const struct ia_css_binary_xinfo *info;
+ enum atomisp_input_format input_format;
+ struct ia_css_frame_info in_frame_info;
+ struct ia_css_frame_info internal_frame_info;
+ struct ia_css_frame_info out_frame_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_resolution effective_in_frame_res;
+ struct ia_css_frame_info vf_frame_info;
+ int input_buf_vectors;
+ int deci_factor_log2;
+ int vf_downscale_log2;
+ int s3atbl_width;
+ int s3atbl_height;
+ int s3atbl_isp_width;
+ int s3atbl_isp_height;
+ unsigned int morph_tbl_width;
+ unsigned int morph_tbl_aligned_width;
+ unsigned int morph_tbl_height;
+ int sctbl_width_per_color;
+ int sctbl_aligned_width_per_color;
+ int sctbl_height;
+ int sctbl_legacy_width_per_color;
+ int sctbl_legacy_height;
+ struct ia_css_sdis_info dis;
+ struct ia_css_resolution dvs_envelope;
+ bool online;
+ unsigned int uds_xc;
+ unsigned int uds_yc;
+ unsigned int left_padding;
+ struct sh_css_binary_metrics metrics;
+ struct ia_css_isp_param_host_segments mem_params;
+ struct ia_css_isp_param_css_segments css_params;
+};
+
+#define IA_CSS_BINARY_DEFAULT_SETTINGS \
+(struct ia_css_binary) { \
+ .input_format = ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY, \
+ .in_frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .internal_frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .out_frame_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .vf_frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+}
+
+enum ia_css_err
+ia_css_binary_init_infos(void);
+
+enum ia_css_err
+ia_css_binary_uninit(void);
+
+enum ia_css_err
+ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
+ bool online,
+ bool two_ppc,
+ enum atomisp_input_format stream_format,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *bds_out_info,
+ const struct ia_css_frame_info *out_info[],
+ const struct ia_css_frame_info *vf_info,
+ struct ia_css_binary *binary,
+ struct ia_css_resolution *dvs_env,
+ int stream_config_left_padding,
+ bool accelerator);
+
+enum ia_css_err
+ia_css_binary_find(struct ia_css_binary_descr *descr,
+ struct ia_css_binary *binary);
+
+/* @brief Get the shading information of the specified shading correction type.
+ *
+ * @param[in] binary: The isp binary which has the shading correction.
+ * @param[in] type: The shading correction type.
+ * @param[in] required_bds_factor: The bayer downscaling factor required in the pipe.
+ * @param[in] stream_config: The stream configuration.
+ * @param[out] shading_info: The shading information.
+ * The shading information necessary as API is stored in the shading_info.
+ * The driver needs to get this information to generate
+ * the shading table directly required from ISP.
+ * @param[out] pipe_config: The pipe configuration.
+ * The shading information related to ISP (but, not necessary as API) is stored in the pipe_config.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err
+ia_css_binary_get_shading_info(const struct ia_css_binary *binary,
+ enum ia_css_shading_correction_type type,
+ unsigned int required_bds_factor,
+ const struct ia_css_stream_config *stream_config,
+ struct ia_css_shading_info *shading_info,
+ struct ia_css_pipe_config *pipe_config);
+
+enum ia_css_err
+ia_css_binary_3a_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe);
+
+void
+ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe);
+
+void
+ia_css_binary_dvs_stat_grid_info(
+ const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe);
+
+unsigned
+ia_css_binary_max_vf_width(void);
+
+void
+ia_css_binary_destroy_isp_parameters(struct ia_css_binary *binary);
+
+void
+ia_css_binary_get_isp_binaries(struct ia_css_binary_xinfo **binaries,
+ uint32_t *num_isp_binaries);
+
+#endif /* _IA_CSS_BINARY_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
new file mode 100644
index 000000000000..2a23b7c6aeeb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
@@ -0,0 +1,1852 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <math_support.h>
+#include <gdc_device.h> /* HR_GDC_N */
+#include "isp.h" /* ISP_VEC_NELEMS */
+
+#include "ia_css_binary.h"
+#include "ia_css_debug.h"
+#include "ia_css_util.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_internal.h"
+#include "sh_css_sp.h"
+#include "sh_css_firmware.h"
+#include "sh_css_defs.h"
+#include "sh_css_legacy.h"
+
+#include "vf/vf_1.0/ia_css_vf.host.h"
+#include "sc/sc_1.0/ia_css_sc.host.h"
+#include "sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h" /* FRAC_ACC */
+
+#include "camera/pipe/interface/ia_css_pipe_binarydesc.h"
+
+#include "memory_access.h"
+
+#include "assert_support.h"
+
+#define IMPLIES(a, b) (!(a) || (b)) /* A => B */
+
+static struct ia_css_binary_xinfo *all_binaries; /* ISP binaries only (no SP) */
+static struct ia_css_binary_xinfo
+ *binary_infos[IA_CSS_BINARY_NUM_MODES] = { NULL, };
+
+static void
+ia_css_binary_dvs_env(const struct ia_css_binary_info *info,
+ const struct ia_css_resolution *dvs_env,
+ struct ia_css_resolution *binary_dvs_env)
+{
+ if (info->enable.dvs_envelope) {
+ assert(dvs_env);
+ binary_dvs_env->width = max(dvs_env->width, SH_CSS_MIN_DVS_ENVELOPE);
+ binary_dvs_env->height = max(dvs_env->height, SH_CSS_MIN_DVS_ENVELOPE);
+ }
+}
+
+static void
+ia_css_binary_internal_res(const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *bds_out_info,
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_binary_info *info,
+ struct ia_css_resolution *internal_res)
+{
+ unsigned int isp_tmp_internal_width = 0,
+ isp_tmp_internal_height = 0;
+ bool binary_supports_yuv_ds = info->enable.ds & 2;
+ struct ia_css_resolution binary_dvs_env;
+
+ binary_dvs_env.width = 0;
+ binary_dvs_env.height = 0;
+ ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env);
+
+ if (binary_supports_yuv_ds) {
+ if (in_info) {
+ isp_tmp_internal_width = in_info->res.width
+ + info->pipeline.left_cropping + binary_dvs_env.width;
+ isp_tmp_internal_height = in_info->res.height
+ + info->pipeline.top_cropping + binary_dvs_env.height;
+ }
+ } else if ((bds_out_info) && (out_info) &&
+ /* TODO: hack to make video_us case work. this should be reverted after
+ a nice solution in ISP */
+ (bds_out_info->res.width >= out_info->res.width)) {
+ isp_tmp_internal_width = bds_out_info->padded_width;
+ isp_tmp_internal_height = bds_out_info->res.height;
+ } else {
+ if (out_info) {
+ isp_tmp_internal_width = out_info->padded_width;
+ isp_tmp_internal_height = out_info->res.height;
+ }
+ }
+
+ /* We first calculate the resolutions used by the ISP. After that,
+ * we use those resolutions to compute sizes for tables etc. */
+ internal_res->width = __ISP_INTERNAL_WIDTH(isp_tmp_internal_width,
+ (int)binary_dvs_env.width,
+ info->pipeline.left_cropping, info->pipeline.mode,
+ info->pipeline.c_subsampling,
+ info->output.num_chunks, info->pipeline.pipelining);
+ internal_res->height = __ISP_INTERNAL_HEIGHT(isp_tmp_internal_height,
+ info->pipeline.top_cropping,
+ binary_dvs_env.height);
+}
+
+/* ISP2400 */
+/* Computation results of the origin coordinate of bayer on the shading table. */
+struct sh_css_shading_table_bayer_origin_compute_results {
+ u32 bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of bayer scaling. */
+ u32 bayer_scale_hor_ratio_out; /* Horizontal ratio (out) of bayer scaling. */
+ u32 bayer_scale_ver_ratio_in; /* Vertical ratio (in) of bayer scaling. */
+ u32 bayer_scale_ver_ratio_out; /* Vertical ratio (out) of bayer scaling. */
+ u32 sc_bayer_origin_x_bqs_on_shading_table; /* X coordinate (in bqs) of bayer origin on shading table. */
+ u32 sc_bayer_origin_y_bqs_on_shading_table; /* Y coordinate (in bqs) of bayer origin on shading table. */
+};
+
+/* ISP2401 */
+/* Requirements for the shading correction. */
+struct sh_css_binary_sc_requirements {
+ /* Bayer scaling factor, for the scaling which is applied before shading correction. */
+ u32 bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of scaling applied BEFORE shading correction. */
+ u32 bayer_scale_hor_ratio_out; /* Horizontal ratio (out) of scaling applied BEFORE shading correction. */
+ u32 bayer_scale_ver_ratio_in; /* Vertical ratio (in) of scaling applied BEFORE shading correction. */
+ u32 bayer_scale_ver_ratio_out; /* Vertical ratio (out) of scaling applied BEFORE shading correction. */
+
+ /* ISP internal frame is composed of the real sensor data and the padding data. */
+ u32 sensor_data_origin_x_bqs_on_internal; /* X origin (in bqs) of sensor data on internal frame
+ at shading correction. */
+ u32 sensor_data_origin_y_bqs_on_internal; /* Y origin (in bqs) of sensor data on internal frame
+ at shading correction. */
+};
+
+/* Get the requirements for the shading correction. */
+static enum ia_css_err
+#ifndef ISP2401
+ia_css_binary_compute_shading_table_bayer_origin(
+ const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct sh_css_shading_table_bayer_origin_compute_results *res) /* [out] */
+#else
+sh_css_binary_get_sc_requirements(
+ const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct sh_css_binary_sc_requirements *scr) /* [out] */
+#endif
+{
+ enum ia_css_err err;
+
+#ifndef ISP2401
+ /* Numerator and denominator of the fixed bayer downscaling factor.
+ (numerator >= denominator) */
+#else
+ /* Numerator and denominator of the fixed bayer downscaling factor. (numerator >= denominator) */
+#endif
+ unsigned int bds_num, bds_den;
+
+#ifndef ISP2401
+ /* Horizontal/Vertical ratio of bayer scaling
+ between input area and output area. */
+ unsigned int bs_hor_ratio_in;
+ unsigned int bs_hor_ratio_out;
+ unsigned int bs_ver_ratio_in;
+ unsigned int bs_ver_ratio_out;
+#else
+ /* Horizontal/Vertical ratio of bayer scaling between input area and output area. */
+ unsigned int bs_hor_ratio_in, bs_hor_ratio_out, bs_ver_ratio_in, bs_ver_ratio_out;
+#endif
+
+ /* Left padding set by InputFormatter. */
+#ifndef ISP2401
+ unsigned int left_padding_bqs; /* in bqs */
+#else
+ unsigned int left_padding_bqs;
+#endif
+
+#ifndef ISP2401
+ /* Flag for the NEED_BDS_FACTOR_2_00 macro defined in isp kernels. */
+ unsigned int need_bds_factor_2_00;
+
+ /* Left padding adjusted inside the isp. */
+ unsigned int left_padding_adjusted_bqs; /* in bqs */
+
+ /* Bad pixels caused by filters.
+ NxN-filter (before/after bayer scaling) moves the image position
+ to right/bottom directions by a few pixels.
+ It causes bad pixels at left/top sides,
+ and effective bayer size decreases. */
+ unsigned int bad_bqs_on_left_before_bs; /* in bqs */
+ unsigned int bad_bqs_on_left_after_bs; /* in bqs */
+ unsigned int bad_bqs_on_top_before_bs; /* in bqs */
+ unsigned int bad_bqs_on_top_after_bs; /* in bqs */
+
+ /* Get the numerator and denominator of bayer downscaling factor. */
+ err = sh_css_bds_factor_get_numerator_denominator
+ (required_bds_factor, &bds_num, &bds_den);
+ if (err != IA_CSS_SUCCESS)
+#else
+ /* Flags corresponding to NEED_BDS_FACTOR_2_00/NEED_BDS_FACTOR_1_50/NEED_BDS_FACTOR_1_25 macros
+ * defined in isp kernels. */
+ unsigned int need_bds_factor_2_00, need_bds_factor_1_50, need_bds_factor_1_25;
+
+ /* Left padding adjusted inside the isp kernels. */
+ unsigned int left_padding_adjusted_bqs;
+
+ /* Top padding padded inside the isp kernel for bayer downscaling binaries. */
+ unsigned int top_padding_bqs;
+
+ /* Bayer downscaling factor 1.0 by fixed-point. */
+ int bds_frac_acc = FRAC_ACC; /* FRAC_ACC is defined in ia_css_fixedbds_param.h. */
+
+ /* Right/Down shift amount caused by filters applied BEFORE shading corrertion. */
+ unsigned int right_shift_bqs_before_bs; /* right shift before bayer scaling */
+ unsigned int right_shift_bqs_after_bs; /* right shift after bayer scaling */
+ unsigned int down_shift_bqs_before_bs; /* down shift before bayer scaling */
+ unsigned int down_shift_bqs_after_bs; /* down shift after bayer scaling */
+
+ /* Origin of the real sensor data area on the internal frame at shading correction. */
+ unsigned int sensor_data_origin_x_bqs_on_internal;
+ unsigned int sensor_data_origin_y_bqs_on_internal;
+
+ IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p",
+ binary, required_bds_factor, stream_config);
+
+ /* Get the numerator and denominator of the required bayer downscaling factor. */
+ err = sh_css_bds_factor_get_numerator_denominator(required_bds_factor, &bds_num, &bds_den);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+#endif
+ return err;
+#ifdef ISP2401
+}
+#endif
+
+#ifndef ISP2401
+/* Set the horizontal/vertical ratio of bayer scaling
+between input area and output area. */
+#else
+IA_CSS_LOG("bds_num=%d, bds_den=%d", bds_num, bds_den);
+
+/* Set the horizontal/vertical ratio of bayer scaling between input area and output area. */
+#endif
+bs_hor_ratio_in = bds_num;
+bs_hor_ratio_out = bds_den;
+bs_ver_ratio_in = bds_num;
+bs_ver_ratio_out = bds_den;
+
+#ifndef ISP2401
+/* Set the left padding set by InputFormatter. (ifmtr.c) */
+#else
+/* Set the left padding set by InputFormatter. (ia_css_ifmtr_configure() in ifmtr.c) */
+#endif
+if (stream_config->left_padding == -1)
+ left_padding_bqs = _ISP_BQS(binary->left_padding);
+else
+#ifndef ISP2401
+ left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS
+ - _ISP_BQS(stream_config->left_padding));
+#else
+ left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS - _ISP_BQS(stream_config->left_padding));
+#endif
+
+#ifndef ISP2401
+/* Set the left padding adjusted inside the isp.
+When bds_factor 2.00 is needed, some padding is added to left_padding
+inside the isp, before bayer downscaling. (raw.isp.c)
+(Hopefully, left_crop/left_padding/top_crop should be defined in css
+appropriately, depending on bds_factor.)
+*/
+#else
+IA_CSS_LOG("stream.left_padding=%d, binary.left_padding=%d, left_padding_bqs=%d",
+ stream_config->left_padding, binary->left_padding, left_padding_bqs);
+
+/* Set the left padding adjusted inside the isp kernels.
+ * When the bds_factor isn't 1.00, the left padding size is adjusted inside the isp,
+ * before bayer downscaling. (scaled_hor_plane_index(), raw_compute_hphase() in raw.isp.c)
+ */
+#endif
+need_bds_factor_2_00 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_8_00))) != 0);
+
+#ifndef ISP2401
+if (need_bds_factor_2_00 && binary->info->sp.pipeline.left_cropping > 0)
+ left_padding_adjusted_bqs = left_padding_bqs + ISP_VEC_NELEMS;
+else
+#else
+need_bds_factor_1_50 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_25) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00))) != 0);
+
+need_bds_factor_1_25 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_25) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00))) != 0);
+
+if (binary->info->sp.pipeline.left_cropping > 0 &&
+ (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25))
+{
+ /*
+ * downscale 2.0 -> first_vec_adjusted_bqs = 128
+ * downscale 1.5 -> first_vec_adjusted_bqs = 96
+ * downscale 1.25 -> first_vec_adjusted_bqs = 80
+ */
+ unsigned int first_vec_adjusted_bqs
+ = ISP_VEC_NELEMS * bs_hor_ratio_in / bs_hor_ratio_out;
+ left_padding_adjusted_bqs = first_vec_adjusted_bqs
+ - _ISP_BQS(binary->info->sp.pipeline.left_cropping);
+} else
+#endif
+ left_padding_adjusted_bqs = left_padding_bqs;
+
+#ifndef ISP2401
+/* Currently, the bad pixel caused by filters before bayer scaling
+is NOT considered, because the bad pixel is subtle.
+When some large filter is used in the future,
+we need to consider the bad pixel.
+
+Currently, when bds_factor isn't 1.00, 3x3 anti-alias filter is applied
+to each color plane(Gr/R/B/Gb) before bayer downscaling.
+This filter moves each color plane to right/bottom directions
+by 1 pixel at the most, depending on downscaling factor.
+*/
+bad_bqs_on_left_before_bs = 0;
+bad_bqs_on_top_before_bs = 0;
+#else
+IA_CSS_LOG("supported_bds_factors=%d, need_bds_factor:2_00=%d, 1_50=%d, 1_25=%d",
+ binary->info->sp.bds.supported_bds_factors,
+ need_bds_factor_2_00, need_bds_factor_1_50, need_bds_factor_1_25);
+IA_CSS_LOG("left_cropping=%d, left_padding_adjusted_bqs=%d",
+ binary->info->sp.pipeline.left_cropping, left_padding_adjusted_bqs);
+
+/* Set the top padding padded inside the isp kernel for bayer downscaling binaries.
+ * When the bds_factor isn't 1.00, the top padding is padded inside the isp
+ * before bayer downscaling, because the top cropping size (input margin) is not enough.
+ * (calculate_input_line(), raw_compute_vphase(), dma_read_raw() in raw.isp.c)
+ * NOTE: In dma_read_raw(), the factor passed to raw_compute_vphase() is got by get_bds_factor_for_dma_read().
+ * This factor is BDS_FPVAL_100/BDS_FPVAL_125/BDS_FPVAL_150/BDS_FPVAL_200.
+ */
+top_padding_bqs = 0;
+if (binary->info->sp.pipeline.top_cropping > 0 &&
+ (required_bds_factor == SH_CSS_BDS_FACTOR_1_25 ||
+ required_bds_factor == SH_CSS_BDS_FACTOR_1_50 ||
+ required_bds_factor == SH_CSS_BDS_FACTOR_2_00))
+{
+ /* Calculation from calculate_input_line() and raw_compute_vphase() in raw.isp.c. */
+ int top_cropping_bqs = _ISP_BQS(binary->info->sp.pipeline.top_cropping);
+ /* top cropping (in bqs) */
+ int factor = bds_num * bds_frac_acc /
+ bds_den; /* downscaling factor by fixed-point */
+ int top_padding_bqsxfrac_acc = (top_cropping_bqs * factor - top_cropping_bqs *
+ bds_frac_acc)
+ + (2 * bds_frac_acc - factor); /* top padding by fixed-point (in bqs) */
+
+ top_padding_bqs = (unsigned int)((top_padding_bqsxfrac_acc + bds_frac_acc / 2 -
+ 1) / bds_frac_acc);
+}
+
+IA_CSS_LOG("top_cropping=%d, top_padding_bqs=%d", binary->info->sp.pipeline.top_cropping, top_padding_bqs);
+
+/* Set the right/down shift amount caused by filters applied BEFORE bayer scaling,
+ * which scaling is applied BEFORE shading corrertion.
+ *
+ * When the bds_factor isn't 1.00, 3x3 anti-alias filter is applied to each color plane(Gr/R/B/Gb)
+ * before bayer downscaling.
+ * This filter shifts each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
+ */
+right_shift_bqs_before_bs = 0;
+down_shift_bqs_before_bs = 0;
+#endif
+
+#ifndef ISP2401
+/* Currently, the bad pixel caused by filters after bayer scaling
+is NOT considered, because the bad pixel is subtle.
+When some large filter is used in the future,
+we need to consider the bad pixel.
+
+Currently, when DPC&BNR is processed between bayer scaling and
+shading correction, DPC&BNR moves each color plane to
+right/bottom directions by 1 pixel.
+*/
+bad_bqs_on_left_after_bs = 0;
+bad_bqs_on_top_after_bs = 0;
+#else
+if (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25)
+{
+ right_shift_bqs_before_bs = 1;
+ down_shift_bqs_before_bs = 1;
+}
+
+IA_CSS_LOG("right_shift_bqs_before_bs=%d, down_shift_bqs_before_bs=%d",
+ right_shift_bqs_before_bs, down_shift_bqs_before_bs);
+
+/* Set the right/down shift amount caused by filters applied AFTER bayer scaling,
+ * which scaling is applied BEFORE shading corrertion.
+ *
+ * When DPC&BNR is processed between bayer scaling and shading correction,
+ * DPC&BNR moves each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
+ */
+right_shift_bqs_after_bs = 0;
+down_shift_bqs_after_bs = 0;
+#endif
+
+#ifndef ISP2401
+/* Calculate the origin of bayer (real sensor data area)
+located on the shading table during the shading correction. */
+res->sc_bayer_origin_x_bqs_on_shading_table
+= ((left_padding_adjusted_bqs + bad_bqs_on_left_before_bs)
+ * bs_hor_ratio_out + bs_hor_ratio_in / 2) / bs_hor_ratio_in
++ bad_bqs_on_left_after_bs;
+/* "+ bs_hor_ratio_in/2": rounding for division by bs_hor_ratio_in */
+res->sc_bayer_origin_y_bqs_on_shading_table
+= (bad_bqs_on_top_before_bs
+ * bs_ver_ratio_out + bs_ver_ratio_in / 2) / bs_ver_ratio_in
++ bad_bqs_on_top_after_bs;
+/* "+ bs_ver_ratio_in/2": rounding for division by bs_ver_ratio_in */
+
+res->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in;
+res->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out;
+res->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in;
+res->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out;
+#else
+if (binary->info->mem_offsets.offsets.param->dmem.dp.size != 0) /* if DPC&BNR is enabled in the binary */
+{
+ right_shift_bqs_after_bs = 1;
+ down_shift_bqs_after_bs = 1;
+}
+
+IA_CSS_LOG("right_shift_bqs_after_bs=%d, down_shift_bqs_after_bs=%d",
+ right_shift_bqs_after_bs, down_shift_bqs_after_bs);
+
+/* Set the origin of the sensor data area on the internal frame at shading correction. */
+{
+ unsigned int bs_frac = bds_frac_acc; /* scaling factor 1.0 in fixed point */
+ unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
+
+ bs_out = bs_hor_ratio_out * bs_frac;
+ bs_in = bs_hor_ratio_in * bs_frac;
+ sensor_data_origin_x_bqs_on_internal
+ = ((left_padding_adjusted_bqs + right_shift_bqs_before_bs) * bs_out + bs_in / 2) / bs_in
+ + right_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
+
+ bs_out = bs_ver_ratio_out * bs_frac;
+ bs_in = bs_ver_ratio_in * bs_frac;
+ sensor_data_origin_y_bqs_on_internal
+ = ((top_padding_bqs + down_shift_bqs_before_bs) * bs_out + bs_in / 2) / bs_in
+ + down_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
+}
+
+scr->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in;
+scr->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out;
+scr->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in;
+scr->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out;
+scr->sensor_data_origin_x_bqs_on_internal = (uint32_t)sensor_data_origin_x_bqs_on_internal;
+scr->sensor_data_origin_y_bqs_on_internal = (uint32_t)sensor_data_origin_y_bqs_on_internal;
+
+IA_CSS_LOG("sc_requirements: %d, %d, %d, %d, %d, %d",
+ scr->bayer_scale_hor_ratio_in, scr->bayer_scale_hor_ratio_out,
+ scr->bayer_scale_ver_ratio_in, scr->bayer_scale_ver_ratio_out,
+ scr->sensor_data_origin_x_bqs_on_internal, scr->sensor_data_origin_y_bqs_on_internal);
+#endif
+
+#ifdef ISP2401
+IA_CSS_LEAVE_ERR_PRIVATE(err);
+#endif
+return err;
+}
+
+/* Get the shading information of Shading Correction Type 1. */
+static enum ia_css_err
+ia_css_binary_get_shading_info_type_1(const struct ia_css_binary
+ *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+#ifndef ISP2401
+ struct ia_css_shading_info *info) /* [out] */
+#else
+ struct ia_css_shading_info *shading_info, /* [out] */
+ struct ia_css_pipe_config *pipe_config) /* [out] */
+#endif
+{
+ enum ia_css_err err;
+#ifndef ISP2401
+ struct sh_css_shading_table_bayer_origin_compute_results res;
+#else
+ struct sh_css_binary_sc_requirements scr;
+#endif
+
+#ifndef ISP2401
+ assert(binary);
+ assert(info);
+#else
+ u32 in_width_bqs, in_height_bqs, internal_width_bqs, internal_height_bqs;
+ u32 num_hor_grids, num_ver_grids, bqs_per_grid_cell, tbl_width_bqs, tbl_height_bqs;
+ u32 sensor_org_x_bqs_on_internal, sensor_org_y_bqs_on_internal, sensor_width_bqs, sensor_height_bqs;
+ u32 sensor_center_x_bqs_on_internal, sensor_center_y_bqs_on_internal;
+ u32 left, right, upper, lower;
+ u32 adjust_left, adjust_right, adjust_upper, adjust_lower, adjust_width_bqs, adjust_height_bqs;
+ u32 internal_org_x_bqs_on_tbl, internal_org_y_bqs_on_tbl;
+ u32 sensor_org_x_bqs_on_tbl, sensor_org_y_bqs_on_tbl;
+#endif
+
+#ifndef ISP2401
+ info->type = IA_CSS_SHADING_CORRECTION_TYPE_1;
+#else
+ assert(binary);
+ assert(stream_config);
+ assert(shading_info);
+ assert(pipe_config);
+#endif
+
+#ifndef ISP2401
+ info->info.type_1.enable = binary->info->sp.enable.sc;
+ info->info.type_1.num_hor_grids = binary->sctbl_width_per_color;
+ info->info.type_1.num_ver_grids = binary->sctbl_height;
+ info->info.type_1.bqs_per_grid_cell = (1 << binary->deci_factor_log2);
+#else
+ IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p",
+ binary, required_bds_factor, stream_config);
+#endif
+
+ /* Initialize by default values. */
+#ifndef ISP2401
+ info->info.type_1.bayer_scale_hor_ratio_in = 1;
+ info->info.type_1.bayer_scale_hor_ratio_out = 1;
+ info->info.type_1.bayer_scale_ver_ratio_in = 1;
+ info->info.type_1.bayer_scale_ver_ratio_out = 1;
+ info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = 0;
+ info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = 0;
+
+ err = ia_css_binary_compute_shading_table_bayer_origin(
+ binary,
+ required_bds_factor,
+ stream_config,
+ &res);
+ if (err != IA_CSS_SUCCESS)
+#else
+ *shading_info = DEFAULT_SHADING_INFO_TYPE_1;
+
+ err = sh_css_binary_get_sc_requirements(binary, required_bds_factor, stream_config, &scr);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+#endif
+ return err;
+#ifdef ISP2401
+}
+
+IA_CSS_LOG("binary: id=%d, sctbl=%dx%d, deci=%d",
+ binary->info->sp.id, binary->sctbl_width_per_color, binary->sctbl_height, binary->deci_factor_log2);
+IA_CSS_LOG("binary: in=%dx%d, in_padded_w=%d, int=%dx%d, int_padded_w=%d, out=%dx%d, out_padded_w=%d",
+ binary->in_frame_info.res.width, binary->in_frame_info.res.height, binary->in_frame_info.padded_width,
+ binary->internal_frame_info.res.width, binary->internal_frame_info.res.height,
+ binary->internal_frame_info.padded_width,
+ binary->out_frame_info[0].res.width, binary->out_frame_info[0].res.height,
+ binary->out_frame_info[0].padded_width);
+
+/* Set the input size from sensor, which includes left/top crop size. */
+in_width_bqs = _ISP_BQS(binary->in_frame_info.res.width);
+in_height_bqs = _ISP_BQS(binary->in_frame_info.res.height);
+
+/* Frame size internally used in ISP, including sensor data and padding.
+ * This is the frame size, to which the shading correction is applied.
+ */
+internal_width_bqs = _ISP_BQS(binary->internal_frame_info.res.width);
+internal_height_bqs = _ISP_BQS(binary->internal_frame_info.res.height);
+
+/* Shading table. */
+num_hor_grids = binary->sctbl_width_per_color;
+num_ver_grids = binary->sctbl_height;
+bqs_per_grid_cell = (1 << binary->deci_factor_log2);
+tbl_width_bqs = (num_hor_grids - 1) * bqs_per_grid_cell;
+tbl_height_bqs = (num_ver_grids - 1) * bqs_per_grid_cell;
+#endif
+
+#ifndef ISP2401
+info->info.type_1.bayer_scale_hor_ratio_in = res.bayer_scale_hor_ratio_in;
+info->info.type_1.bayer_scale_hor_ratio_out = res.bayer_scale_hor_ratio_out;
+info->info.type_1.bayer_scale_ver_ratio_in = res.bayer_scale_ver_ratio_in;
+info->info.type_1.bayer_scale_ver_ratio_out = res.bayer_scale_ver_ratio_out;
+info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = res.sc_bayer_origin_x_bqs_on_shading_table;
+info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = res.sc_bayer_origin_y_bqs_on_shading_table;
+#else
+IA_CSS_LOG("tbl_width_bqs=%d, tbl_height_bqs=%d", tbl_width_bqs, tbl_height_bqs);
+#endif
+
+#ifdef ISP2401
+/* Real sensor data area on the internal frame at shading correction.
+ * Filters and scaling are applied to the internal frame before shading correction, depending on the binary.
+ */
+sensor_org_x_bqs_on_internal = scr.sensor_data_origin_x_bqs_on_internal;
+sensor_org_y_bqs_on_internal = scr.sensor_data_origin_y_bqs_on_internal;
+{
+ unsigned int bs_frac = 8; /* scaling factor 1.0 in fixed point (8 == FRAC_ACC macro in ISP) */
+ unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
+
+ bs_out = scr.bayer_scale_hor_ratio_out * bs_frac;
+ bs_in = scr.bayer_scale_hor_ratio_in * bs_frac;
+ sensor_width_bqs = (in_width_bqs * bs_out + bs_in / 2) / bs_in; /* "+ bs_in/2": rounding */
+
+ bs_out = scr.bayer_scale_ver_ratio_out * bs_frac;
+ bs_in = scr.bayer_scale_ver_ratio_in * bs_frac;
+ sensor_height_bqs = (in_height_bqs * bs_out + bs_in / 2) / bs_in; /* "+ bs_in/2": rounding */
+}
+
+/* Center of the sensor data on the internal frame at shading correction. */
+sensor_center_x_bqs_on_internal = sensor_org_x_bqs_on_internal + sensor_width_bqs / 2;
+sensor_center_y_bqs_on_internal = sensor_org_y_bqs_on_internal + sensor_height_bqs / 2;
+
+/* Size of left/right/upper/lower sides of the sensor center on the internal frame. */
+left = sensor_center_x_bqs_on_internal;
+right = internal_width_bqs - sensor_center_x_bqs_on_internal;
+upper = sensor_center_y_bqs_on_internal;
+lower = internal_height_bqs - sensor_center_y_bqs_on_internal;
+
+/* Align the size of left/right/upper/lower sides to a multiple of the grid cell size. */
+adjust_left = CEIL_MUL(left, bqs_per_grid_cell);
+adjust_right = CEIL_MUL(right, bqs_per_grid_cell);
+adjust_upper = CEIL_MUL(upper, bqs_per_grid_cell);
+adjust_lower = CEIL_MUL(lower, bqs_per_grid_cell);
+
+/* Shading table should cover the adjusted frame size. */
+adjust_width_bqs = adjust_left + adjust_right;
+adjust_height_bqs = adjust_upper + adjust_lower;
+
+IA_CSS_LOG("adjust_width_bqs=%d, adjust_height_bqs=%d", adjust_width_bqs, adjust_height_bqs);
+
+if (adjust_width_bqs > tbl_width_bqs || adjust_height_bqs > tbl_height_bqs)
+{
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+}
+
+/* Origin of the internal frame on the shading table. */
+internal_org_x_bqs_on_tbl = adjust_left - left;
+internal_org_y_bqs_on_tbl = adjust_upper - upper;
+
+/* Origin of the real sensor data area on the shading table. */
+sensor_org_x_bqs_on_tbl = internal_org_x_bqs_on_tbl + sensor_org_x_bqs_on_internal;
+sensor_org_y_bqs_on_tbl = internal_org_y_bqs_on_tbl + sensor_org_y_bqs_on_internal;
+
+/* The shading information necessary as API is stored in the shading_info. */
+shading_info->info.type_1.num_hor_grids = num_hor_grids;
+shading_info->info.type_1.num_ver_grids = num_ver_grids;
+shading_info->info.type_1.bqs_per_grid_cell = bqs_per_grid_cell;
+
+shading_info->info.type_1.bayer_scale_hor_ratio_in = scr.bayer_scale_hor_ratio_in;
+shading_info->info.type_1.bayer_scale_hor_ratio_out = scr.bayer_scale_hor_ratio_out;
+shading_info->info.type_1.bayer_scale_ver_ratio_in = scr.bayer_scale_ver_ratio_in;
+shading_info->info.type_1.bayer_scale_ver_ratio_out = scr.bayer_scale_ver_ratio_out;
+
+shading_info->info.type_1.isp_input_sensor_data_res_bqs.width = in_width_bqs;
+shading_info->info.type_1.isp_input_sensor_data_res_bqs.height = in_height_bqs;
+
+shading_info->info.type_1.sensor_data_res_bqs.width = sensor_width_bqs;
+shading_info->info.type_1.sensor_data_res_bqs.height = sensor_height_bqs;
+
+shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x = (int32_t)sensor_org_x_bqs_on_tbl;
+shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y = (int32_t)sensor_org_y_bqs_on_tbl;
+
+/* The shading information related to ISP (but, not necessary as API) is stored in the pipe_config. */
+pipe_config->internal_frame_origin_bqs_on_sctbl.x = (int32_t)internal_org_x_bqs_on_tbl;
+pipe_config->internal_frame_origin_bqs_on_sctbl.y = (int32_t)internal_org_y_bqs_on_tbl;
+
+IA_CSS_LOG("shading_info: grids=%dx%d, cell=%d, scale=%d,%d,%d,%d, input=%dx%d, data=%dx%d, origin=(%d,%d)",
+ shading_info->info.type_1.num_hor_grids,
+ shading_info->info.type_1.num_ver_grids,
+ shading_info->info.type_1.bqs_per_grid_cell,
+ shading_info->info.type_1.bayer_scale_hor_ratio_in,
+ shading_info->info.type_1.bayer_scale_hor_ratio_out,
+ shading_info->info.type_1.bayer_scale_ver_ratio_in,
+ shading_info->info.type_1.bayer_scale_ver_ratio_out,
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.width,
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.height,
+ shading_info->info.type_1.sensor_data_res_bqs.width,
+ shading_info->info.type_1.sensor_data_res_bqs.height,
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x,
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y);
+
+IA_CSS_LOG("pipe_config: origin=(%d,%d)",
+ pipe_config->internal_frame_origin_bqs_on_sctbl.x,
+ pipe_config->internal_frame_origin_bqs_on_sctbl.y);
+
+IA_CSS_LEAVE_ERR_PRIVATE(err);
+#endif
+return err;
+}
+
+enum ia_css_err
+ia_css_binary_get_shading_info(const struct ia_css_binary *binary, /* [in] */
+ enum ia_css_shading_correction_type type, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct ia_css_shading_info *shading_info, /* [out] */
+ struct ia_css_pipe_config *pipe_config) /* [out] */
+{
+ enum ia_css_err err;
+
+ assert(binary);
+ assert(shading_info);
+
+ IA_CSS_ENTER_PRIVATE("binary=%p, type=%d, required_bds_factor=%d, stream_config=%p",
+ binary, type, required_bds_factor, stream_config);
+
+ if (type == IA_CSS_SHADING_CORRECTION_TYPE_1)
+#ifndef ISP2401
+ err = ia_css_binary_get_shading_info_type_1(binary, required_bds_factor, stream_config,
+ shading_info);
+#else
+ err = ia_css_binary_get_shading_info_type_1(binary, required_bds_factor, stream_config,
+ shading_info, pipe_config);
+#endif
+
+ /* Other function calls can be added here when other shading correction types will be added in the future. */
+
+ else
+ err = IA_CSS_ERR_NOT_SUPPORTED;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void sh_css_binary_common_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info)
+{
+ assert(binary);
+ assert(info);
+
+ info->isp_in_width = binary->internal_frame_info.res.width;
+ info->isp_in_height = binary->internal_frame_info.res.height;
+
+ info->vamem_type = IA_CSS_VAMEM_TYPE_2;
+}
+
+void
+ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_dvs_grid_info *dvs_info;
+
+ (void)pipe;
+ assert(binary);
+ assert(info);
+
+ dvs_info = &info->dvs_grid.dvs_grid_info;
+
+ /* for DIS, we use a division instead of a ceil_div. If this is smaller
+ * than the 3a grid size, it indicates that the outer values are not
+ * valid for DIS.
+ */
+ dvs_info->enable = binary->info->sp.enable.dis;
+ dvs_info->width = binary->dis.grid.dim.width;
+ dvs_info->height = binary->dis.grid.dim.height;
+ dvs_info->aligned_width = binary->dis.grid.pad.width;
+ dvs_info->aligned_height = binary->dis.grid.pad.height;
+ dvs_info->bqs_per_grid_cell = 1 << binary->dis.deci_factor_log2;
+ dvs_info->num_hor_coefs = binary->dis.coef.dim.width;
+ dvs_info->num_ver_coefs = binary->dis.coef.dim.height;
+
+ sh_css_binary_common_grid_info(binary, info);
+}
+
+void
+ia_css_binary_dvs_stat_grid_info(
+ const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe)
+{
+ (void)pipe;
+ sh_css_binary_common_grid_info(binary, info);
+ return;
+}
+
+enum ia_css_err
+ia_css_binary_3a_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe) {
+ struct ia_css_3a_grid_info *s3a_info;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("binary=%p, info=%p, pipe=%p",
+ binary, info, pipe);
+
+ assert(binary);
+ assert(info);
+ s3a_info = &info->s3a_grid;
+
+ /* 3A statistics grid */
+ s3a_info->enable = binary->info->sp.enable.s3a;
+ s3a_info->width = binary->s3atbl_width;
+ s3a_info->height = binary->s3atbl_height;
+ s3a_info->aligned_width = binary->s3atbl_isp_width;
+ s3a_info->aligned_height = binary->s3atbl_isp_height;
+ s3a_info->bqs_per_grid_cell = (1 << binary->deci_factor_log2);
+ s3a_info->deci_factor_log2 = binary->deci_factor_log2;
+ s3a_info->elem_bit_depth = SH_CSS_BAYER_BITS;
+ s3a_info->use_dmem = binary->info->sp.s3a.s3atbl_use_dmem;
+#if defined(HAS_NO_HMEM)
+ s3a_info->has_histogram = 1;
+#else
+ s3a_info->has_histogram = 0;
+#endif
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void
+binary_init_pc_histogram(struct sh_css_pc_histogram *histo)
+{
+ assert(histo);
+
+ histo->length = 0;
+ histo->run = NULL;
+ histo->stall = NULL;
+}
+
+static void
+binary_init_metrics(struct sh_css_binary_metrics *metrics,
+ const struct ia_css_binary_info *info)
+{
+ assert(metrics);
+ assert(info);
+
+ metrics->mode = info->pipeline.mode;
+ metrics->id = info->id;
+ metrics->next = NULL;
+ binary_init_pc_histogram(&metrics->isp_histogram);
+ binary_init_pc_histogram(&metrics->sp_histogram);
+}
+
+/* move to host part of output module */
+static bool
+binary_supports_output_format(const struct ia_css_binary_xinfo *info,
+ enum ia_css_frame_format format)
+{
+ int i;
+
+ assert(info);
+
+ for (i = 0; i < info->num_output_formats; i++) {
+ if (info->output_formats[i] == format)
+ return true;
+ }
+ return false;
+}
+
+#ifdef ISP2401
+static bool
+binary_supports_input_format(const struct ia_css_binary_xinfo *info,
+ enum atomisp_input_format format)
+{
+ assert(info);
+ (void)format;
+
+ return true;
+}
+#endif
+
+static bool
+binary_supports_vf_format(const struct ia_css_binary_xinfo *info,
+ enum ia_css_frame_format format)
+{
+ int i;
+
+ assert(info);
+
+ for (i = 0; i < info->num_vf_formats; i++) {
+ if (info->vf_formats[i] == format)
+ return true;
+ }
+ return false;
+}
+
+/* move to host part of bds module */
+static bool
+supports_bds_factor(u32 supported_factors,
+ uint32_t bds_factor)
+{
+ return ((supported_factors & PACK_BDS_FACTOR(bds_factor)) != 0);
+}
+
+static enum ia_css_err
+binary_init_info(struct ia_css_binary_xinfo *info, unsigned int i,
+ bool *binary_found) {
+ const unsigned char *blob = sh_css_blob_info[i].blob;
+ unsigned int size = sh_css_blob_info[i].header.blob.size;
+
+ if ((!info) || (!binary_found))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ *info = sh_css_blob_info[i].header.info.isp;
+ *binary_found = blob;
+ info->blob_index = i;
+ /* we don't have this binary, skip it */
+ if (!size)
+ return IA_CSS_SUCCESS;
+
+ info->xmem_addr = sh_css_load_blob(blob, size);
+ if (!info->xmem_addr)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return IA_CSS_SUCCESS;
+}
+
+/* When binaries are put at the beginning, they will only
+ * be selected if no other primary matches.
+ */
+enum ia_css_err
+ia_css_binary_init_infos(void) {
+ unsigned int i;
+ unsigned int num_of_isp_binaries = sh_css_num_binaries - NUM_OF_SPS - NUM_OF_BLS;
+
+ if (num_of_isp_binaries == 0)
+ return IA_CSS_SUCCESS;
+
+ all_binaries = sh_css_malloc(num_of_isp_binaries *
+ sizeof(*all_binaries));
+ if (!all_binaries)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ for (i = 0; i < num_of_isp_binaries; i++)
+ {
+ enum ia_css_err ret;
+ struct ia_css_binary_xinfo *binary = &all_binaries[i];
+ bool binary_found;
+
+ ret = binary_init_info(binary, i, &binary_found);
+ if (ret != IA_CSS_SUCCESS)
+ return ret;
+ if (!binary_found)
+ continue;
+ /* Prepend new binary information */
+ binary->next = binary_infos[binary->sp.pipeline.mode];
+ binary_infos[binary->sp.pipeline.mode] = binary;
+ binary->blob = &sh_css_blob_info[i];
+ binary->mem_offsets = sh_css_blob_info[i].mem_offsets;
+ }
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_binary_uninit(void) {
+ unsigned int i;
+ struct ia_css_binary_xinfo *b;
+
+ for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++)
+ {
+ for (b = binary_infos[i]; b; b = b->next) {
+ if (b->xmem_addr)
+ hmm_free(b->xmem_addr);
+ b->xmem_addr = mmgr_NULL;
+ }
+ binary_infos[i] = NULL;
+ }
+ sh_css_free(all_binaries);
+ return IA_CSS_SUCCESS;
+}
+
+/* @brief Compute decimation factor for 3A statistics and shading correction.
+ *
+ * @param[in] width Frame width in pixels.
+ * @param[in] height Frame height in pixels.
+ * @return Log2 of decimation factor (= grid cell size) in bayer quads.
+ */
+static int
+binary_grid_deci_factor_log2(int width, int height)
+{
+ /* 3A/Shading decimation factor spcification (at August 2008)
+ * ------------------------------------------------------------------
+ * [Image Width (BQ)] [Decimation Factor (BQ)] [Resulting grid cells]
+ #ifndef ISP2401
+ * 1280 ?c 32 40 ?c
+ * 640 ?c 1279 16 40 ?c 80
+ * ?c 639 8 ?c 80
+ #else
+ * from 1280 32 from 40
+ * from 640 to 1279 16 from 40 to 80
+ * to 639 8 to 80
+ #endif
+ * ------------------------------------------------------------------
+ */
+ /* Maximum and minimum decimation factor by the specification */
+#define MAX_SPEC_DECI_FACT_LOG2 5
+#define MIN_SPEC_DECI_FACT_LOG2 3
+ /* the smallest frame width in bayer quads when decimation factor (log2) is 5 or 4, by the specification */
+#define DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ 1280
+#define DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ 640
+
+ int smallest_factor; /* the smallest factor (log2) where the number of cells does not exceed the limitation */
+ int spec_factor; /* the factor (log2) which satisfies the specification */
+
+ /* Currently supported maximum width and height are 5120(=80*64) and 3840(=60*64). */
+ assert(ISP_BQ_GRID_WIDTH(width,
+ MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_WIDTH);
+ assert(ISP_BQ_GRID_HEIGHT(height,
+ MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_HEIGHT);
+
+ /* Compute the smallest factor. */
+ smallest_factor = MAX_SPEC_DECI_FACT_LOG2;
+ while (ISP_BQ_GRID_WIDTH(width,
+ smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_WIDTH &&
+ ISP_BQ_GRID_HEIGHT(height, smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_HEIGHT
+ && smallest_factor > MIN_SPEC_DECI_FACT_LOG2)
+ smallest_factor--;
+
+ /* Get the factor by the specification. */
+ if (_ISP_BQS(width) >= DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ)
+ spec_factor = 5;
+ else if (_ISP_BQS(width) >= DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ)
+ spec_factor = 4;
+ else
+ spec_factor = 3;
+
+ /* If smallest_factor is smaller than or equal to spec_factor, choose spec_factor to follow the specification.
+ If smallest_factor is larger than spec_factor, choose smallest_factor.
+
+ ex. width=2560, height=1920
+ smallest_factor=4, spec_factor=5
+ smallest_factor < spec_factor -> return spec_factor
+
+ ex. width=300, height=3000
+ smallest_factor=5, spec_factor=3
+ smallest_factor > spec_factor -> return smallest_factor
+ */
+ return max(smallest_factor, spec_factor);
+
+#undef MAX_SPEC_DECI_FACT_LOG2
+#undef MIN_SPEC_DECI_FACT_LOG2
+#undef DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ
+#undef DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ
+}
+
+static int
+binary_in_frame_padded_width(int in_frame_width,
+ int isp_internal_width,
+ int dvs_env_width,
+ int stream_config_left_padding,
+ int left_cropping,
+ bool need_scaling)
+{
+ int rval;
+ int nr_of_left_paddings; /* number of paddings pixels on the left of an image line */
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* the output image line of Input System 2401 does not have the left paddings */
+ nr_of_left_paddings = 0;
+#else
+ /* in other cases, the left padding pixels are always 128 */
+ nr_of_left_paddings = 2 * ISP_VEC_NELEMS;
+#endif
+ if (need_scaling) {
+ /* In SDV use-case, we need to match left-padding of
+ * primary and the video binary. */
+ if (stream_config_left_padding != -1) {
+ /* Different than before, we do left&right padding. */
+ rval =
+ CEIL_MUL(in_frame_width + nr_of_left_paddings,
+ 2 * ISP_VEC_NELEMS);
+ } else {
+ /* Different than before, we do left&right padding. */
+ in_frame_width += dvs_env_width;
+ rval =
+ CEIL_MUL(in_frame_width +
+ (left_cropping ? nr_of_left_paddings : 0),
+ 2 * ISP_VEC_NELEMS);
+ }
+ } else {
+ rval = isp_internal_width;
+ }
+
+ return rval;
+}
+
+enum ia_css_err
+ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
+ bool online,
+ bool two_ppc,
+ enum atomisp_input_format stream_format,
+ const struct ia_css_frame_info *in_info, /* can be NULL */
+ const struct ia_css_frame_info *bds_out_info, /* can be NULL */
+ const struct ia_css_frame_info *out_info[], /* can be NULL */
+ const struct ia_css_frame_info *vf_info, /* can be NULL */
+ struct ia_css_binary *binary,
+ struct ia_css_resolution *dvs_env,
+ int stream_config_left_padding,
+ bool accelerator) {
+ const struct ia_css_binary_info *info = &xinfo->sp;
+ unsigned int dvs_env_width = 0,
+ dvs_env_height = 0,
+ vf_log_ds = 0,
+ s3a_log_deci = 0,
+ bits_per_pixel = 0,
+ /* Resolution at SC/3A/DIS kernel. */
+ sc_3a_dis_width = 0,
+ /* Resolution at SC/3A/DIS kernel. */
+ sc_3a_dis_padded_width = 0,
+ /* Resolution at SC/3A/DIS kernel. */
+ sc_3a_dis_height = 0,
+ isp_internal_width = 0,
+ isp_internal_height = 0,
+ s3a_isp_width = 0;
+
+ bool need_scaling = false;
+ struct ia_css_resolution binary_dvs_env, internal_res;
+ enum ia_css_err err;
+ unsigned int i;
+ const struct ia_css_frame_info *bin_out_info = NULL;
+
+ assert(info);
+ assert(binary);
+
+ binary->info = xinfo;
+ if (!accelerator)
+ {
+ /* binary->css_params has been filled by accelerator itself. */
+ err = ia_css_isp_param_allocate_isp_parameters(
+ &binary->mem_params, &binary->css_params,
+ &info->mem_initializers);
+ if (err != IA_CSS_SUCCESS) {
+ return err;
+ }
+ }
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ {
+ if (out_info[i] && (out_info[i]->res.width != 0)) {
+ bin_out_info = out_info[i];
+ break;
+ }
+ }
+ if (in_info && bin_out_info)
+ {
+ need_scaling = (in_info->res.width != bin_out_info->res.width) ||
+ (in_info->res.height != bin_out_info->res.height);
+ }
+
+ /* binary_dvs_env has to be equal or larger than SH_CSS_MIN_DVS_ENVELOPE */
+ binary_dvs_env.width = 0;
+ binary_dvs_env.height = 0;
+ ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env);
+ dvs_env_width = binary_dvs_env.width;
+ dvs_env_height = binary_dvs_env.height;
+ binary->dvs_envelope.width = dvs_env_width;
+ binary->dvs_envelope.height = dvs_env_height;
+
+ /* internal resolution calculation */
+ internal_res.width = 0;
+ internal_res.height = 0;
+ ia_css_binary_internal_res(in_info, bds_out_info, bin_out_info, dvs_env,
+ info, &internal_res);
+ isp_internal_width = internal_res.width;
+ isp_internal_height = internal_res.height;
+
+ /* internal frame info */
+ if (bin_out_info) /* { */
+ binary->internal_frame_info.format = bin_out_info->format;
+ /* } */
+ binary->internal_frame_info.res.width = isp_internal_width;
+ binary->internal_frame_info.padded_width = CEIL_MUL(isp_internal_width, 2 * ISP_VEC_NELEMS);
+ binary->internal_frame_info.res.height = isp_internal_height;
+ binary->internal_frame_info.raw_bit_depth = bits_per_pixel;
+
+ if (in_info)
+ {
+ binary->effective_in_frame_res.width = in_info->res.width;
+ binary->effective_in_frame_res.height = in_info->res.height;
+
+ bits_per_pixel = in_info->raw_bit_depth;
+
+ /* input info */
+ binary->in_frame_info.res.width = in_info->res.width +
+ info->pipeline.left_cropping;
+ binary->in_frame_info.res.height = in_info->res.height +
+ info->pipeline.top_cropping;
+
+ binary->in_frame_info.res.width += dvs_env_width;
+ binary->in_frame_info.res.height += dvs_env_height;
+
+ binary->in_frame_info.padded_width =
+ binary_in_frame_padded_width(in_info->res.width,
+ isp_internal_width,
+ dvs_env_width,
+ stream_config_left_padding,
+ info->pipeline.left_cropping,
+ need_scaling);
+
+ binary->in_frame_info.format = in_info->format;
+ binary->in_frame_info.raw_bayer_order = in_info->raw_bayer_order;
+ binary->in_frame_info.crop_info = in_info->crop_info;
+ }
+
+ if (online)
+ {
+ bits_per_pixel = ia_css_util_input_format_bpp(
+ stream_format, two_ppc);
+ }
+ binary->in_frame_info.raw_bit_depth = bits_per_pixel;
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ {
+ if (out_info[i]) {
+ binary->out_frame_info[i].res.width = out_info[i]->res.width;
+ binary->out_frame_info[i].res.height = out_info[i]->res.height;
+ binary->out_frame_info[i].padded_width = out_info[i]->padded_width;
+ if (info->pipeline.mode == IA_CSS_BINARY_MODE_COPY) {
+ binary->out_frame_info[i].raw_bit_depth = bits_per_pixel;
+ } else {
+ /* Only relevant for RAW format.
+ * At the moment, all outputs are raw, 16 bit per pixel, except for copy.
+ * To do this cleanly, the binary should specify in its info
+ * the bit depth per output channel.
+ */
+ binary->out_frame_info[i].raw_bit_depth = 16;
+ }
+ binary->out_frame_info[i].format = out_info[i]->format;
+ }
+ }
+
+ if (vf_info && (vf_info->res.width != 0))
+ {
+ err = ia_css_vf_configure(binary, bin_out_info,
+ (struct ia_css_frame_info *)vf_info, &vf_log_ds);
+ if (err != IA_CSS_SUCCESS) {
+ if (!accelerator) {
+ ia_css_isp_param_destroy_isp_parameters(
+ &binary->mem_params,
+ &binary->css_params);
+ }
+ return err;
+ }
+ }
+ binary->vf_downscale_log2 = vf_log_ds;
+
+ binary->online = online;
+ binary->input_format = stream_format;
+
+ /* viewfinder output info */
+ if ((vf_info) && (vf_info->res.width != 0))
+ {
+ unsigned int vf_out_vecs, vf_out_width, vf_out_height;
+
+ binary->vf_frame_info.format = vf_info->format;
+ if (!bin_out_info)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ vf_out_vecs = __ISP_VF_OUTPUT_WIDTH_VECS(bin_out_info->padded_width,
+ vf_log_ds);
+ vf_out_width = _ISP_VF_OUTPUT_WIDTH(vf_out_vecs);
+ vf_out_height = _ISP_VF_OUTPUT_HEIGHT(bin_out_info->res.height,
+ vf_log_ds);
+
+ /* For preview mode, output pin is used instead of vf. */
+ if (info->pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW) {
+ binary->out_frame_info[0].res.width =
+ (bin_out_info->res.width >> vf_log_ds);
+ binary->out_frame_info[0].padded_width = vf_out_width;
+ binary->out_frame_info[0].res.height = vf_out_height;
+
+ binary->vf_frame_info.res.width = 0;
+ binary->vf_frame_info.padded_width = 0;
+ binary->vf_frame_info.res.height = 0;
+ } else {
+ /* we also store the raw downscaled width. This is
+ * used for digital zoom in preview to zoom only on
+ * the width that we actually want to keep, not on
+ * the aligned width. */
+ binary->vf_frame_info.res.width =
+ (bin_out_info->res.width >> vf_log_ds);
+ binary->vf_frame_info.padded_width = vf_out_width;
+ binary->vf_frame_info.res.height = vf_out_height;
+ }
+ } else
+ {
+ binary->vf_frame_info.res.width = 0;
+ binary->vf_frame_info.padded_width = 0;
+ binary->vf_frame_info.res.height = 0;
+ }
+
+ if (info->enable.ca_gdc)
+ {
+ binary->morph_tbl_width =
+ _ISP_MORPH_TABLE_WIDTH(isp_internal_width);
+ binary->morph_tbl_aligned_width =
+ _ISP_MORPH_TABLE_ALIGNED_WIDTH(isp_internal_width);
+ binary->morph_tbl_height =
+ _ISP_MORPH_TABLE_HEIGHT(isp_internal_height);
+ } else
+ {
+ binary->morph_tbl_width = 0;
+ binary->morph_tbl_aligned_width = 0;
+ binary->morph_tbl_height = 0;
+ }
+
+ sc_3a_dis_width = binary->in_frame_info.res.width;
+ sc_3a_dis_padded_width = binary->in_frame_info.padded_width;
+ sc_3a_dis_height = binary->in_frame_info.res.height;
+ if (bds_out_info && in_info &&
+ bds_out_info->res.width != in_info->res.width)
+ {
+ /* TODO: Next, "internal_frame_info" should be derived from
+ * bds_out. So this part will change once it is in place! */
+ sc_3a_dis_width = bds_out_info->res.width + info->pipeline.left_cropping;
+ sc_3a_dis_padded_width = isp_internal_width;
+ sc_3a_dis_height = isp_internal_height;
+ }
+
+ s3a_isp_width = _ISP_S3A_ELEMS_ISP_WIDTH(sc_3a_dis_padded_width,
+ info->pipeline.left_cropping);
+ if (info->s3a.fixed_s3a_deci_log)
+ {
+ s3a_log_deci = info->s3a.fixed_s3a_deci_log;
+ } else
+ {
+ s3a_log_deci = binary_grid_deci_factor_log2(s3a_isp_width,
+ sc_3a_dis_height);
+ }
+ binary->deci_factor_log2 = s3a_log_deci;
+
+ if (info->enable.s3a)
+ {
+ binary->s3atbl_width =
+ _ISP_S3ATBL_WIDTH(sc_3a_dis_width,
+ s3a_log_deci);
+ binary->s3atbl_height =
+ _ISP_S3ATBL_HEIGHT(sc_3a_dis_height,
+ s3a_log_deci);
+ binary->s3atbl_isp_width =
+ _ISP_S3ATBL_ISP_WIDTH(s3a_isp_width,
+ s3a_log_deci);
+ binary->s3atbl_isp_height =
+ _ISP_S3ATBL_ISP_HEIGHT(sc_3a_dis_height,
+ s3a_log_deci);
+ } else
+ {
+ binary->s3atbl_width = 0;
+ binary->s3atbl_height = 0;
+ binary->s3atbl_isp_width = 0;
+ binary->s3atbl_isp_height = 0;
+ }
+
+ if (info->enable.sc)
+ {
+ if (!atomisp_hw_is_isp2401) {
+ binary->sctbl_width_per_color = _ISP2400_SCTBL_WIDTH_PER_COLOR(sc_3a_dis_padded_width, s3a_log_deci);
+ binary->sctbl_aligned_width_per_color = ISP2400_SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR;
+ binary->sctbl_height = _ISP2400_SCTBL_HEIGHT(sc_3a_dis_height, s3a_log_deci);
+ } else {
+ binary->sctbl_width_per_color = _ISP2401_SCTBL_WIDTH_PER_COLOR(isp_internal_width, s3a_log_deci);
+ binary->sctbl_aligned_width_per_color = ISP2401_SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR;
+ binary->sctbl_height = _ISP2401_SCTBL_HEIGHT(isp_internal_height, s3a_log_deci);
+ binary->sctbl_legacy_width_per_color = _ISP_SCTBL_LEGACY_WIDTH_PER_COLOR(sc_3a_dis_padded_width, s3a_log_deci);
+ binary->sctbl_legacy_height = _ISP_SCTBL_LEGACY_HEIGHT(sc_3a_dis_height, s3a_log_deci);
+ }
+ } else
+ {
+ binary->sctbl_width_per_color = 0;
+ binary->sctbl_aligned_width_per_color = 0;
+ binary->sctbl_height = 0;
+ if (atomisp_hw_is_isp2401) {
+ binary->sctbl_legacy_width_per_color = 0;
+ binary->sctbl_legacy_height = 0;
+ }
+ }
+ ia_css_sdis_init_info(&binary->dis,
+ sc_3a_dis_width,
+ sc_3a_dis_padded_width,
+ sc_3a_dis_height,
+ info->pipeline.isp_pipe_version,
+ info->enable.dis);
+ if (info->pipeline.left_cropping)
+ binary->left_padding = 2 * ISP_VEC_NELEMS - info->pipeline.left_cropping;
+ else
+ binary->left_padding = 0;
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_binary_find(struct ia_css_binary_descr *descr,
+ struct ia_css_binary *binary) {
+ int mode;
+ bool online;
+ bool two_ppc;
+ enum atomisp_input_format stream_format;
+ const struct ia_css_frame_info *req_in_info,
+ *req_bds_out_info,
+ *req_out_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS],
+ *req_bin_out_info = NULL,
+ *req_vf_info;
+
+ struct ia_css_binary_xinfo *xcandidate;
+#ifndef ISP2401
+ bool need_ds, need_dz, need_dvs, need_xnr, need_dpc;
+#else
+ bool need_ds, need_dz, need_dvs, need_xnr, need_dpc, need_tnr;
+#endif
+ bool striped;
+ bool enable_yuv_ds;
+ bool enable_high_speed;
+ bool enable_dvs_6axis;
+ bool enable_reduced_pipe;
+ bool enable_capture_pp_bli;
+#ifdef ISP2401
+ bool enable_luma_only;
+#endif
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ bool continuous;
+ unsigned int isp_pipe_version;
+ struct ia_css_resolution dvs_env, internal_res;
+ unsigned int i;
+
+ assert(descr);
+ /* MW: used after an error check, may accept NULL, but doubtfull */
+ assert(binary);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n",
+ descr, descr->mode,
+ binary);
+
+ mode = descr->mode;
+ online = descr->online;
+ two_ppc = descr->two_ppc;
+ stream_format = descr->stream_format;
+ req_in_info = descr->in_info;
+ req_bds_out_info = descr->bds_out_info;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ {
+ req_out_info[i] = descr->out_info[i];
+ if (req_out_info[i] && (req_out_info[i]->res.width != 0))
+ req_bin_out_info = req_out_info[i];
+ }
+ if (!req_bin_out_info)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+#ifndef ISP2401
+ req_vf_info = descr->vf_info;
+#else
+
+ if ((descr->vf_info) && (descr->vf_info->res.width == 0))
+ /* width==0 means that there is no vf pin (e.g. in SkyCam preview case) */
+ req_vf_info = NULL;
+ else
+ req_vf_info = descr->vf_info;
+#endif
+
+ need_xnr = descr->enable_xnr;
+ need_ds = descr->enable_fractional_ds;
+ need_dz = false;
+ need_dvs = false;
+ need_dpc = descr->enable_dpc;
+#ifdef ISP2401
+ need_tnr = descr->enable_tnr;
+#endif
+ enable_yuv_ds = descr->enable_yuv_ds;
+ enable_high_speed = descr->enable_high_speed;
+ enable_dvs_6axis = descr->enable_dvs_6axis;
+ enable_reduced_pipe = descr->enable_reduced_pipe;
+ enable_capture_pp_bli = descr->enable_capture_pp_bli;
+#ifdef ISP2401
+ enable_luma_only = descr->enable_luma_only;
+#endif
+ continuous = descr->continuous;
+ striped = descr->striped;
+ isp_pipe_version = descr->isp_pipe_version;
+
+ dvs_env.width = 0;
+ dvs_env.height = 0;
+ internal_res.width = 0;
+ internal_res.height = 0;
+
+ if (mode == IA_CSS_BINARY_MODE_VIDEO)
+ {
+ dvs_env = descr->dvs_env;
+ need_dz = descr->enable_dz;
+ /* Video is the only mode that has a nodz variant. */
+ need_dvs = dvs_env.width || dvs_env.height;
+ }
+
+ /* print a map of the binary file */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "BINARY INFO:\n");
+ for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++)
+ {
+ xcandidate = binary_infos[i];
+ if (xcandidate) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%d:\n", i);
+ while (xcandidate) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " Name:%s Type:%d Cont:%d\n",
+ xcandidate->blob->name, xcandidate->type,
+ xcandidate->sp.enable.continuous);
+ xcandidate = xcandidate->next;
+ }
+ }
+ }
+
+ /* printf("sh_css_binary_find: pipe version %d\n", isp_pipe_version); */
+ for (xcandidate = binary_infos[mode]; xcandidate;
+ xcandidate = xcandidate->next)
+ {
+ struct ia_css_binary_info *candidate = &xcandidate->sp;
+ /* printf("sh_css_binary_find: evaluating candidate:
+ * %d\n",candidate->id); */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n",
+ candidate, candidate->pipeline.mode, candidate->id);
+
+ /*
+ * MW: Only a limited set of jointly configured binaries can
+ * be used in a continuous preview/video mode unless it is
+ * the copy mode and runs on SP.
+ */
+ if (!candidate->enable.continuous &&
+ continuous && (mode != IA_CSS_BINARY_MODE_COPY)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n",
+ __LINE__, candidate->enable.continuous,
+ continuous, mode,
+ IA_CSS_BINARY_MODE_COPY);
+ continue;
+ }
+ if (striped && candidate->iterator.num_stripes == 1) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: binary is not striped\n",
+ __LINE__);
+ continue;
+ }
+
+ if (candidate->pipeline.isp_pipe_version != isp_pipe_version &&
+ (mode != IA_CSS_BINARY_MODE_COPY) &&
+ (mode != IA_CSS_BINARY_MODE_CAPTURE_PP) &&
+ (mode != IA_CSS_BINARY_MODE_VF_PP)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d != %d)\n",
+ __LINE__,
+ candidate->pipeline.isp_pipe_version, isp_pipe_version);
+ continue;
+ }
+ if (!candidate->enable.reduced_pipe && enable_reduced_pipe) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__,
+ candidate->enable.reduced_pipe,
+ enable_reduced_pipe);
+ continue;
+ }
+ if (!candidate->enable.dvs_6axis && enable_dvs_6axis) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__,
+ candidate->enable.dvs_6axis,
+ enable_dvs_6axis);
+ continue;
+ }
+ if (candidate->enable.high_speed && !enable_high_speed) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__,
+ candidate->enable.high_speed,
+ enable_high_speed);
+ continue;
+ }
+ if (!candidate->enable.xnr && need_xnr) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__,
+ candidate->enable.xnr,
+ need_xnr);
+ continue;
+ }
+ if (!(candidate->enable.ds & 2) && enable_yuv_ds) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__,
+ ((candidate->enable.ds & 2) != 0),
+ enable_yuv_ds);
+ continue;
+ }
+ if ((candidate->enable.ds & 2) && !enable_yuv_ds) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__,
+ ((candidate->enable.ds & 2) != 0),
+ enable_yuv_ds);
+ continue;
+ }
+
+ if (mode == IA_CSS_BINARY_MODE_VIDEO &&
+ candidate->enable.ds && need_ds)
+ need_dz = false;
+
+ /* when we require vf output, we need to have vf_veceven */
+ if ((req_vf_info) && !(candidate->enable.vf_veceven ||
+ /* or variable vf vec even */
+ candidate->vf_dec.is_variable ||
+ /* or more than one output pin. */
+ xcandidate->num_output_pins > 1)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n",
+ __LINE__, req_vf_info,
+ candidate->enable.vf_veceven,
+ candidate->vf_dec.is_variable,
+ xcandidate->num_output_pins, 1);
+ continue;
+ }
+ if (!candidate->enable.dvs_envelope && need_dvs) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__,
+ candidate->enable.dvs_envelope, (int)need_dvs);
+ continue;
+ }
+ /* internal_res check considers input, output, and dvs envelope sizes */
+ ia_css_binary_internal_res(req_in_info, req_bds_out_info,
+ req_bin_out_info, &dvs_env, candidate, &internal_res);
+ if (internal_res.width > candidate->internal.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.width,
+ candidate->internal.max_width);
+ continue;
+ }
+ if (internal_res.height > candidate->internal.max_height) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.height,
+ candidate->internal.max_height);
+ continue;
+ }
+ if (!candidate->enable.ds && need_ds && !(xcandidate->num_output_pins > 1)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.ds, (int)need_ds);
+ continue;
+ }
+ if (!candidate->enable.uds && !candidate->enable.dvs_6axis && need_dz) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n",
+ __LINE__, candidate->enable.uds,
+ candidate->enable.dvs_6axis, (int)need_dz);
+ continue;
+ }
+ if (online && candidate->input.source == IA_CSS_BINARY_INPUT_MEMORY) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_MEMORY);
+ continue;
+ }
+ if (!online && candidate->input.source == IA_CSS_BINARY_INPUT_SENSOR) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_SENSOR);
+ continue;
+ }
+ if (req_bin_out_info->res.width < candidate->output.min_width ||
+ req_bin_out_info->res.width > candidate->output.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n",
+ __LINE__,
+ req_bin_out_info->padded_width,
+ candidate->output.min_width,
+ req_bin_out_info->padded_width,
+ candidate->output.max_width);
+ continue;
+ }
+ if (xcandidate->num_output_pins > 1 &&
+ /* in case we have a second output pin, */
+ req_vf_info) { /* and we need vf output. */
+ if (req_vf_info->res.width > candidate->output.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__,
+ req_vf_info->res.width,
+ candidate->output.max_width);
+ continue;
+ }
+ }
+ if (req_in_info->padded_width > candidate->input.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, req_in_info->padded_width,
+ candidate->input.max_width);
+ continue;
+ }
+ if (!binary_supports_output_format(xcandidate, req_bin_out_info->format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d\n",
+ __LINE__,
+ binary_supports_output_format(xcandidate, req_bin_out_info->format));
+ continue;
+ }
+#ifdef ISP2401
+ if (!binary_supports_input_format(xcandidate, descr->stream_format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d\n",
+ __LINE__,
+ binary_supports_input_format(xcandidate, req_in_info->format));
+ continue;
+ }
+#endif
+ if (xcandidate->num_output_pins > 1 &&
+ /* in case we have a second output pin, */
+ req_vf_info && /* and we need vf output. */
+ /* check if the required vf format
+ is supported. */
+ !binary_supports_output_format(xcandidate, req_vf_info->format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1,
+ req_vf_info,
+ binary_supports_output_format(xcandidate, req_vf_info->format));
+ continue;
+ }
+
+ /* Check if vf_veceven supports the requested vf format */
+ if (xcandidate->num_output_pins == 1 &&
+ req_vf_info && candidate->enable.vf_veceven &&
+ !binary_supports_vf_format(xcandidate, req_vf_info->format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1,
+ req_vf_info, candidate->enable.vf_veceven,
+ binary_supports_vf_format(xcandidate, req_vf_info->format));
+ continue;
+ }
+
+ /* Check if vf_veceven supports the requested vf width */
+ if (xcandidate->num_output_pins == 1 &&
+ req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
+ if (req_vf_info->res.width > candidate->output.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__,
+ req_vf_info->res.width,
+ candidate->output.max_width);
+ continue;
+ }
+ }
+
+ if (!supports_bds_factor(candidate->bds.supported_bds_factors,
+ descr->required_bds_factor)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->bds.supported_bds_factors,
+ descr->required_bds_factor);
+ continue;
+ }
+
+ if (!candidate->enable.dpc && need_dpc) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->enable.dpc,
+ descr->enable_dpc);
+ continue;
+ }
+
+ if (candidate->uds.use_bci && enable_capture_pp_bli) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->uds.use_bci,
+ descr->enable_capture_pp_bli);
+ continue;
+ }
+
+#ifdef ISP2401
+ if (candidate->enable.luma_only != enable_luma_only) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d != %d\n",
+ __LINE__, candidate->enable.luma_only,
+ descr->enable_luma_only);
+ continue;
+ }
+
+ if (!candidate->enable.tnr && need_tnr) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.tnr,
+ descr->enable_tnr);
+ continue;
+ }
+
+#endif
+ /* reconfigure any variable properties of the binary */
+ err = ia_css_binary_fill_info(xcandidate, online, two_ppc,
+ stream_format, req_in_info,
+ req_bds_out_info,
+ req_out_info, req_vf_info,
+ binary, &dvs_env,
+ descr->stream_config_left_padding,
+ false);
+
+ if (err != IA_CSS_SUCCESS)
+ break;
+ binary_init_metrics(&binary->metrics, &binary->info->sp);
+ break;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() selected = %p, mode = %d ID = %d\n",
+ xcandidate, xcandidate ? xcandidate->sp.pipeline.mode : 0, xcandidate ? xcandidate->sp.id : 0);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() leave: return_err=%d\n", err);
+
+ return err;
+}
+
+unsigned
+ia_css_binary_max_vf_width(void)
+{
+ /* This is (should be) true for IPU1 and IPU2 */
+ /* For IPU3 (SkyCam) this pointer is guaranteed to be NULL simply because such a binary does not exist */
+ if (binary_infos[IA_CSS_BINARY_MODE_VF_PP])
+ return binary_infos[IA_CSS_BINARY_MODE_VF_PP]->sp.output.max_width;
+ return 0;
+}
+
+void
+ia_css_binary_destroy_isp_parameters(struct ia_css_binary *binary)
+{
+ if (binary) {
+ ia_css_isp_param_destroy_isp_parameters(&binary->mem_params,
+ &binary->css_params);
+ }
+}
+
+void
+ia_css_binary_get_isp_binaries(struct ia_css_binary_xinfo **binaries,
+ uint32_t *num_isp_binaries)
+{
+ assert(binaries);
+
+ if (num_isp_binaries)
+ *num_isp_binaries = 0;
+
+ *binaries = all_binaries;
+ if (all_binaries && num_isp_binaries) {
+ /* -1 to account for sp binary which is not stored in all_binaries */
+ if (sh_css_num_binaries > 0)
+ *num_isp_binaries = sh_css_num_binaries - 1;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h
new file mode 100644
index 000000000000..78e433fa3466
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h
@@ -0,0 +1,177 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_BUFQ_H
+#define _IA_CSS_BUFQ_H
+
+#include <type_support.h>
+#include "ia_css_bufq_comm.h"
+#include "ia_css_buffer.h"
+#include "ia_css_err.h"
+#define BUFQ_EVENT_SIZE 4
+
+/**
+ * @brief Query the internal frame ID.
+ *
+ * @param[in] key The query key.
+ * @param[out] val The query value.
+ *
+ * @return
+ * true, if the query succeeds;
+ * false, if the query fails.
+ */
+bool ia_css_query_internal_queue_id(
+ enum ia_css_buffer_type buf_type,
+ unsigned int thread_id,
+ enum sh_css_queue_id *val
+);
+
+/**
+ * @brief Map buffer type to a internal queue id.
+ *
+ * @param[in] thread id Thread in which the buffer type has to be mapped or unmapped
+ * @param[in] buf_type buffer type.
+ * @param[in] map boolean flag to specify map or unmap
+ * @return none
+ */
+void ia_css_queue_map(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type,
+ bool map
+);
+
+/**
+ * @brief Initialize buffer type to a queue id mapping
+ * @return none
+ */
+void ia_css_queue_map_init(void);
+
+/**
+ * @brief initializes bufq module
+ * It create instances of
+ * -host to SP buffer queue which is a list with predefined size,
+ * MxN queues where M is the number threads and N is the number queues per thread
+ *-SP to host buffer queue , is a list with N queues
+ *-host to SP event communication queue
+ * -SP to host event communication queue
+ * -queue for tagger commands
+ * @return none
+ */
+void ia_css_bufq_init(void);
+
+/**
+* @brief Enqueues an item into host to SP buffer queue
+ *
+ * @param thread_index[in] Thread in which the item to be enqueued
+ *
+ * @param queue_id[in] Index of the queue in the specified thread
+ * @param item[in] Object to enqueue.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_enqueue_buffer(
+ int thread_index,
+ int queue_id,
+ uint32_t item);
+
+/**
+* @brief Dequeues an item from SP to host buffer queue.
+ *
+ * @param queue_id[in] Specifies the index of the queue in the list where
+ * the item has to be read.
+ * @paramitem [out] Object to be dequeued into this item.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_dequeue_buffer(
+ int queue_id,
+ uint32_t *item);
+
+/**
+* @brief Enqueue an event item into host to SP communication event queue.
+ *
+ * @param[in] evt_id The event ID.
+ * @param[in] evt_payload_0 The event payload.
+ * @param[in] evt_payload_1 The event payload.
+ * @param[in] evt_payload_2 The event payload.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_enqueue_psys_event(
+ u8 evt_id,
+ u8 evt_payload_0,
+ u8 evt_payload_1,
+ uint8_t evt_payload_2
+);
+
+/**
+ * @brief Dequeue an item from SP to host communication event queue.
+ *
+ * @param item Object to be dequeued into this item.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_dequeue_psys_event(
+ u8 item[BUFQ_EVENT_SIZE]
+
+);
+
+/**
+ * @brief Enqueue an event item into host to SP EOF event queue.
+ *
+ * @param[in] evt_id The event ID.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_bufq_enqueue_isys_event(
+ uint8_t evt_id);
+
+/**
+* @brief Dequeue an item from SP to host communication EOF event queue.
+
+ *
+ * @param item Object to be dequeued into this item.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_bufq_dequeue_isys_event(
+ u8 item[BUFQ_EVENT_SIZE]);
+
+/**
+* @brief Enqueue a tagger command item into tagger command queue..
+ *
+ * @param item Object to be enqueue.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_enqueue_tag_cmd(
+ uint32_t item);
+
+/**
+* @brief Uninitializes bufq module.
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_deinit(void);
+
+/**
+* @brief Dump queue states
+ *
+ * @return None
+ *
+*/
+void ia_css_bufq_dump_queue_info(void);
+
+#endif /* _IA_CSS_BUFQ_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h
new file mode 100644
index 000000000000..508209711edc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h
@@ -0,0 +1,50 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_BUFQ_COMM_H
+#define _IA_CSS_BUFQ_COMM_H
+
+#include "system_global.h"
+
+enum sh_css_queue_id {
+ SH_CSS_INVALID_QUEUE_ID = -1,
+ SH_CSS_QUEUE_A_ID = 0,
+ SH_CSS_QUEUE_B_ID,
+ SH_CSS_QUEUE_C_ID,
+ SH_CSS_QUEUE_D_ID,
+ SH_CSS_QUEUE_E_ID,
+ SH_CSS_QUEUE_F_ID,
+ SH_CSS_QUEUE_G_ID,
+#if defined(HAS_NO_INPUT_SYSTEM)
+ /* input frame queue for skycam */
+ SH_CSS_QUEUE_H_ID,
+#endif
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ SH_CSS_QUEUE_H_ID, /* for metadata */
+#endif
+
+#if defined(HAS_NO_INPUT_SYSTEM) || defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#define SH_CSS_MAX_NUM_QUEUES (SH_CSS_QUEUE_H_ID + 1)
+#else
+#define SH_CSS_MAX_NUM_QUEUES (SH_CSS_QUEUE_G_ID + 1)
+#endif
+
+};
+
+#define SH_CSS_MAX_DYNAMIC_BUFFERS_PER_THREAD SH_CSS_MAX_NUM_QUEUES
+/* for now we staticaly assign queue 0 & 1 to parameter sets */
+#define IA_CSS_PARAMETER_SET_QUEUE_ID SH_CSS_QUEUE_A_ID
+#define IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID SH_CSS_QUEUE_B_ID
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
new file mode 100644
index 000000000000..7e01df257150
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
@@ -0,0 +1,566 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "assert_support.h" /* assert */
+#include "ia_css_buffer.h"
+#include "sp.h"
+#include "ia_css_bufq.h" /* Bufq API's */
+#include "ia_css_queue.h" /* ia_css_queue_t */
+#include "sw_event_global.h" /* Event IDs.*/
+#include "ia_css_eventq.h" /* ia_css_eventq_recv()*/
+#include "ia_css_debug.h" /* ia_css_debug_dtrace*/
+#include "sh_css_internal.h" /* sh_css_queue_type */
+#include "sp_local.h" /* sp_address_of */
+#include "ia_css_util.h" /* ia_css_convert_errno()*/
+#include "sh_css_firmware.h" /* sh_css_sp_fw*/
+
+#define BUFQ_DUMP_FILE_NAME_PREFIX_SIZE 256
+
+static char prefix[BUFQ_DUMP_FILE_NAME_PREFIX_SIZE] = {0};
+
+/*********************************************************/
+/* Global Queue objects used by CSS */
+/*********************************************************/
+
+struct sh_css_queues {
+ /* Host2SP buffer queue */
+ ia_css_queue_t host2sp_buffer_queue_handles
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+ /* SP2Host buffer queue */
+ ia_css_queue_t sp2host_buffer_queue_handles
+ [SH_CSS_MAX_NUM_QUEUES];
+
+ /* Host2SP event queue */
+ ia_css_queue_t host2sp_psys_event_queue_handle;
+
+ /* SP2Host event queue */
+ ia_css_queue_t sp2host_psys_event_queue_handle;
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ /* Host2SP ISYS event queue */
+ ia_css_queue_t host2sp_isys_event_queue_handle;
+
+ /* SP2Host ISYS event queue */
+ ia_css_queue_t sp2host_isys_event_queue_handle;
+#endif
+ /* Tagger command queue */
+ ia_css_queue_t host2sp_tag_cmd_queue_handle;
+};
+
+/*******************************************************
+*** Static variables
+********************************************************/
+static struct sh_css_queues css_queues;
+
+static int
+buffer_type_to_queue_id_map[SH_CSS_MAX_SP_THREADS][IA_CSS_NUM_DYNAMIC_BUFFER_TYPE];
+static bool queue_availability[SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+
+/*******************************************************
+*** Static functions
+********************************************************/
+static void map_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type
+);
+static void unmap_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type
+);
+
+static ia_css_queue_t *bufq_get_qhandle(
+ enum sh_css_queue_type type,
+ enum sh_css_queue_id id,
+ int thread
+);
+
+/*******************************************************
+*** Public functions
+********************************************************/
+void ia_css_queue_map_init(void)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++)
+ queue_availability[i][j] = true;
+ }
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ for (j = 0; j < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE; j++)
+ buffer_type_to_queue_id_map[i][j] = SH_CSS_INVALID_QUEUE_ID;
+ }
+}
+
+void ia_css_queue_map(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type,
+ bool map)
+{
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ assert(thread_id < SH_CSS_MAX_SP_THREADS);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_queue_map() enter: buf_type=%d, thread_id=%d\n", buf_type, thread_id);
+
+ if (map)
+ map_buffer_type_to_queue_id(thread_id, buf_type);
+ else
+ unmap_buffer_type_to_queue_id(thread_id, buf_type);
+}
+
+/*
+ * @brief Query the internal queue ID.
+ */
+bool ia_css_query_internal_queue_id(
+ enum ia_css_buffer_type buf_type,
+ unsigned int thread_id,
+ enum sh_css_queue_id *val)
+{
+ IA_CSS_ENTER("buf_type=%d, thread_id=%d, val = %p", buf_type, thread_id, val);
+
+ if ((!val) || (thread_id >= SH_CSS_MAX_SP_THREADS) ||
+ (buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE)) {
+ IA_CSS_LEAVE("return_val = false");
+ return false;
+ }
+
+ *val = buffer_type_to_queue_id_map[thread_id][buf_type];
+ if ((*val == SH_CSS_INVALID_QUEUE_ID) || (*val >= SH_CSS_MAX_NUM_QUEUES)) {
+ IA_CSS_LOG("INVALID queue ID MAP = %d\n", *val);
+ IA_CSS_LEAVE("return_val = false");
+ return false;
+ }
+ IA_CSS_LEAVE("return_val = true");
+ return true;
+}
+
+/*******************************************************
+*** Static functions
+********************************************************/
+static void map_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type)
+{
+ unsigned int i;
+
+ assert(thread_id < SH_CSS_MAX_SP_THREADS);
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ assert(buffer_type_to_queue_id_map[thread_id][buf_type] ==
+ SH_CSS_INVALID_QUEUE_ID);
+
+ /* queue 0 is reserved for parameters because it doesn't depend on events */
+ if (buf_type == IA_CSS_BUFFER_TYPE_PARAMETER_SET) {
+ assert(queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID]);
+ queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID] = false;
+ buffer_type_to_queue_id_map[thread_id][buf_type] =
+ IA_CSS_PARAMETER_SET_QUEUE_ID;
+ return;
+ }
+
+ /* queue 1 is reserved for per frame parameters because it doesn't depend on events */
+ if (buf_type == IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET) {
+ assert(queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID]);
+ queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID] = false;
+ buffer_type_to_queue_id_map[thread_id][buf_type] =
+ IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID;
+ return;
+ }
+
+ for (i = SH_CSS_QUEUE_C_ID; i < SH_CSS_MAX_NUM_QUEUES; i++) {
+ if (queue_availability[thread_id][i]) {
+ queue_availability[thread_id][i] = false;
+ buffer_type_to_queue_id_map[thread_id][buf_type] = i;
+ break;
+ }
+ }
+
+ assert(i != SH_CSS_MAX_NUM_QUEUES);
+ return;
+}
+
+static void unmap_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type)
+{
+ int queue_id;
+
+ assert(thread_id < SH_CSS_MAX_SP_THREADS);
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ assert(buffer_type_to_queue_id_map[thread_id][buf_type] !=
+ SH_CSS_INVALID_QUEUE_ID);
+
+ queue_id = buffer_type_to_queue_id_map[thread_id][buf_type];
+ buffer_type_to_queue_id_map[thread_id][buf_type] = SH_CSS_INVALID_QUEUE_ID;
+ queue_availability[thread_id][queue_id] = true;
+}
+
+static ia_css_queue_t *bufq_get_qhandle(
+ enum sh_css_queue_type type,
+ enum sh_css_queue_id id,
+ int thread)
+{
+ ia_css_queue_t *q = NULL;
+
+ switch (type) {
+ case sh_css_host2sp_buffer_queue:
+ if ((thread >= SH_CSS_MAX_SP_THREADS) || (thread < 0) ||
+ (id == SH_CSS_INVALID_QUEUE_ID))
+ break;
+ q = &css_queues.host2sp_buffer_queue_handles[thread][id];
+ break;
+ case sh_css_sp2host_buffer_queue:
+ if (id == SH_CSS_INVALID_QUEUE_ID)
+ break;
+ q = &css_queues.sp2host_buffer_queue_handles[id];
+ break;
+ case sh_css_host2sp_psys_event_queue:
+ q = &css_queues.host2sp_psys_event_queue_handle;
+ break;
+ case sh_css_sp2host_psys_event_queue:
+ q = &css_queues.sp2host_psys_event_queue_handle;
+ break;
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ case sh_css_host2sp_isys_event_queue:
+ q = &css_queues.host2sp_isys_event_queue_handle;
+ break;
+ case sh_css_sp2host_isys_event_queue:
+ q = &css_queues.sp2host_isys_event_queue_handle;
+ break;
+#endif
+ case sh_css_host2sp_tag_cmd_queue:
+ q = &css_queues.host2sp_tag_cmd_queue_handle;
+ break;
+ default:
+ break;
+ }
+
+ return q;
+}
+
+/* Local function to initialize a buffer queue. This reduces
+ * the chances of copy-paste errors or typos.
+ */
+static inline void
+init_bufq(unsigned int desc_offset,
+ unsigned int elems_offset,
+ ia_css_queue_t *handle)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int q_base_addr;
+ ia_css_queue_remote_t remoteq;
+
+ fw = &sh_css_sp_fw;
+ q_base_addr = fw->info.sp.host_sp_queue;
+
+ /* Setup queue location as SP and proc id as SP0_ID */
+ remoteq.location = IA_CSS_QUEUE_LOC_SP;
+ remoteq.proc_id = SP0_ID;
+ remoteq.cb_desc_addr = q_base_addr + desc_offset;
+ remoteq.cb_elems_addr = q_base_addr + elems_offset;
+ /* Initialize the queue instance and obtain handle */
+ ia_css_queue_remote_init(handle, &remoteq);
+}
+
+void ia_css_bufq_init(void)
+{
+ int i, j;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* Setup all the local queue descriptors for Host2SP Buffer Queues */
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++)
+ for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) {
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ host2sp_buffer_queues_desc[i][j]),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_buffer_queues_elems[i][j]),
+ &css_queues.host2sp_buffer_queue_handles[i][j]);
+ }
+
+ /* Setup all the local queue descriptors for SP2Host Buffer Queues */
+ for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) {
+ init_bufq(offsetof(struct host_sp_queues, sp2host_buffer_queues_desc[i]),
+ offsetof(struct host_sp_queues, sp2host_buffer_queues_elems[i]),
+ &css_queues.sp2host_buffer_queue_handles[i]);
+ }
+
+ /* Host2SP event queue*/
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ host2sp_psys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_psys_event_queue_elems),
+ &css_queues.host2sp_psys_event_queue_handle);
+
+ /* SP2Host event queue */
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ sp2host_psys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, sp2host_psys_event_queue_elems),
+ &css_queues.sp2host_psys_event_queue_handle);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ /* Host2SP ISYS event queue */
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ host2sp_isys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_isys_event_queue_elems),
+ &css_queues.host2sp_isys_event_queue_handle);
+
+ /* SP2Host ISYS event queue*/
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ sp2host_isys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, sp2host_isys_event_queue_elems),
+ &css_queues.sp2host_isys_event_queue_handle);
+
+ /* Host2SP tagger command queue */
+ init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_elems),
+ &css_queues.host2sp_tag_cmd_queue_handle);
+#endif
+
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+enum ia_css_err ia_css_bufq_enqueue_buffer(
+ int thread_index,
+ int queue_id,
+ uint32_t item)
+{
+ enum ia_css_err return_err = IA_CSS_SUCCESS;
+ ia_css_queue_t *q;
+ int error;
+
+ IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id);
+ if ((thread_index >= SH_CSS_MAX_SP_THREADS) || (thread_index < 0) ||
+ (queue_id == SH_CSS_INVALID_QUEUE_ID))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* Get the queue for communication */
+ q = bufq_get_qhandle(sh_css_host2sp_buffer_queue,
+ queue_id,
+ thread_index);
+ if (q) {
+ error = ia_css_queue_enqueue(q, item);
+ return_err = ia_css_convert_errno(error);
+ } else {
+ IA_CSS_ERROR("queue is not initialized");
+ return_err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+}
+
+enum ia_css_err ia_css_bufq_dequeue_buffer(
+ int queue_id,
+ uint32_t *item)
+{
+ enum ia_css_err return_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id);
+ if ((!item) ||
+ (queue_id <= SH_CSS_INVALID_QUEUE_ID) ||
+ (queue_id >= SH_CSS_MAX_NUM_QUEUES)
+ )
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ q = bufq_get_qhandle(sh_css_sp2host_buffer_queue,
+ queue_id,
+ -1);
+ if (q) {
+ error = ia_css_queue_dequeue(q, item);
+ return_err = ia_css_convert_errno(error);
+ } else {
+ IA_CSS_ERROR("queue is not initialized");
+ return_err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+}
+
+enum ia_css_err ia_css_bufq_enqueue_psys_event(
+ u8 evt_id,
+ u8 evt_payload_0,
+ u8 evt_payload_1,
+ uint8_t evt_payload_2)
+{
+ enum ia_css_err return_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("evt_id=%d", evt_id);
+ q = bufq_get_qhandle(sh_css_host2sp_psys_event_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ error = ia_css_eventq_send(q,
+ evt_id, evt_payload_0, evt_payload_1, evt_payload_2);
+
+ return_err = ia_css_convert_errno(error);
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+}
+
+enum ia_css_err ia_css_bufq_dequeue_psys_event(
+ u8 item[BUFQ_EVENT_SIZE])
+{
+ enum ia_css_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ /* No ENTER/LEAVE in this function since this is polled
+ * by some test apps. Enablign logging here floods the log
+ * files which may cause timeouts. */
+ if (!item)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ q = bufq_get_qhandle(sh_css_sp2host_psys_event_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ error = ia_css_eventq_recv(q, item);
+
+ return ia_css_convert_errno(error);
+}
+
+enum ia_css_err ia_css_bufq_dequeue_isys_event(
+ u8 item[BUFQ_EVENT_SIZE])
+{
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ enum ia_css_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ /* No ENTER/LEAVE in this function since this is polled
+ * by some test apps. Enablign logging here floods the log
+ * files which may cause timeouts. */
+ if (!item)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ q = bufq_get_qhandle(sh_css_sp2host_isys_event_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ error = ia_css_eventq_recv(q, item);
+ return ia_css_convert_errno(error);
+#else
+ (void)item;
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+#endif
+}
+
+enum ia_css_err ia_css_bufq_enqueue_isys_event(uint8_t evt_id)
+{
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ enum ia_css_err return_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("event_id=%d", evt_id);
+ q = bufq_get_qhandle(sh_css_host2sp_isys_event_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ error = ia_css_eventq_send(q, evt_id, 0, 0, 0);
+ return_err = ia_css_convert_errno(error);
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+#else
+ (void)evt_id;
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+#endif
+}
+
+enum ia_css_err ia_css_bufq_enqueue_tag_cmd(
+ uint32_t item)
+{
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ enum ia_css_err return_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("item=%d", item);
+ q = bufq_get_qhandle(sh_css_host2sp_tag_cmd_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ error = ia_css_queue_enqueue(q, item);
+
+ return_err = ia_css_convert_errno(error);
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+#else
+ (void)item;
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+#endif
+}
+
+enum ia_css_err ia_css_bufq_deinit(void)
+{
+ return IA_CSS_SUCCESS;
+}
+
+static void bufq_dump_queue_info(const char *prefix, ia_css_queue_t *qhandle)
+{
+ u32 free = 0, used = 0;
+
+ assert(prefix && qhandle);
+ ia_css_queue_get_used_space(qhandle, &used);
+ ia_css_queue_get_free_space(qhandle, &free);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: used=%u free=%u\n",
+ prefix, used, free);
+}
+
+void ia_css_bufq_dump_queue_info(void)
+{
+ int i, j;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Queue Information:\n");
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) {
+ snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
+ "host2sp_buffer_queue[%u][%u]", i, j);
+ bufq_dump_queue_info(prefix,
+ &css_queues.host2sp_buffer_queue_handles[i][j]);
+ }
+ }
+
+ for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) {
+ snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
+ "sp2host_buffer_queue[%u]", i);
+ bufq_dump_queue_info(prefix,
+ &css_queues.sp2host_buffer_queue_handles[i]);
+ }
+ bufq_dump_queue_info("host2sp_psys_event",
+ &css_queues.host2sp_psys_event_queue_handle);
+ bufq_dump_queue_info("sp2host_psys_event",
+ &css_queues.sp2host_psys_event_queue_handle);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ bufq_dump_queue_info("host2sp_isys_event",
+ &css_queues.host2sp_isys_event_queue_handle);
+ bufq_dump_queue_info("sp2host_isys_event",
+ &css_queues.sp2host_isys_event_queue_handle);
+ bufq_dump_queue_info("host2sp_tag_cmd",
+ &css_queues.host2sp_tag_cmd_queue_handle);
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
new file mode 100644
index 000000000000..61d612ec3a05
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
@@ -0,0 +1,502 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_DEBUG_H_
+#define _IA_CSS_DEBUG_H_
+
+/*! \file */
+
+#include <type_support.h>
+#include <stdarg.h>
+#include "ia_css_types.h"
+#include "ia_css_binary.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_metadata.h"
+#include "sh_css_internal.h"
+/* ISP2500 */
+#include "ia_css_pipe.h"
+
+/* available levels */
+/*! Level for tracing errors */
+#define IA_CSS_DEBUG_ERROR 1
+/*! Level for tracing warnings */
+#define IA_CSS_DEBUG_WARNING 3
+/*! Level for tracing debug messages */
+#define IA_CSS_DEBUG_VERBOSE 5
+/*! Level for tracing trace messages a.o. ia_css public function calls */
+#define IA_CSS_DEBUG_TRACE 6
+/*! Level for tracing trace messages a.o. ia_css private function calls */
+#define IA_CSS_DEBUG_TRACE_PRIVATE 7
+/*! Level for tracing parameter messages e.g. in and out params of functions */
+#define IA_CSS_DEBUG_PARAM 8
+/*! Level for tracing info messages */
+#define IA_CSS_DEBUG_INFO 9
+/* Global variable which controls the verbosity levels of the debug tracing */
+extern unsigned int ia_css_debug_trace_level;
+
+/*! @brief Enum defining the different isp parameters to dump.
+ * Values can be combined to dump a combination of sets.
+ */
+enum ia_css_debug_enable_param_dump {
+ IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /** FPN table */
+ IA_CSS_DEBUG_DUMP_OB = 1 << 1, /** OB table */
+ IA_CSS_DEBUG_DUMP_SC = 1 << 2, /** Shading table */
+ IA_CSS_DEBUG_DUMP_WB = 1 << 3, /** White balance */
+ IA_CSS_DEBUG_DUMP_DP = 1 << 4, /** Defect Pixel */
+ IA_CSS_DEBUG_DUMP_BNR = 1 << 5, /** Bayer Noise Reductions */
+ IA_CSS_DEBUG_DUMP_S3A = 1 << 6, /** 3A Statistics */
+ IA_CSS_DEBUG_DUMP_DE = 1 << 7, /** De Mosaicing */
+ IA_CSS_DEBUG_DUMP_YNR = 1 << 8, /** Luma Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CSC = 1 << 9, /** Color Space Conversion */
+ IA_CSS_DEBUG_DUMP_GC = 1 << 10, /** Gamma Correction */
+ IA_CSS_DEBUG_DUMP_TNR = 1 << 11, /** Temporal Noise Reduction */
+ IA_CSS_DEBUG_DUMP_ANR = 1 << 12, /** Advanced Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CE = 1 << 13, /** Chroma Enhancement */
+ IA_CSS_DEBUG_DUMP_ALL = 1 << 14 /** Dump all device parameters */
+};
+
+#define IA_CSS_ERROR(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, \
+ "%s() %d: error: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+#define IA_CSS_WARNING(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_WARNING, \
+ "%s() %d: warning: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+/* Logging macros for public functions (API functions) */
+#define IA_CSS_ENTER(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s(): enter: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Use this macro for small functions that do not call other functions. */
+#define IA_CSS_ENTER_LEAVE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s(): enter: leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#define IA_CSS_LEAVE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s(): leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Shorthand for returning an enum ia_css_err return value */
+#define IA_CSS_LEAVE_ERR(__err) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s() %d: leave: return_err=%d\n", __func__, __LINE__, __err)
+
+/* Use this macro for logging other than enter/leave.
+ * Note that this macro always uses the PRIVATE logging level.
+ */
+#define IA_CSS_LOG(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Logging macros for non-API functions. These have a lower trace level */
+#define IA_CSS_ENTER_PRIVATE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): enter: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#define IA_CSS_LEAVE_PRIVATE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Shorthand for returning an enum ia_css_err return value */
+#define IA_CSS_LEAVE_ERR_PRIVATE(__err) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s() %d: leave: return_err=%d\n", __func__, __LINE__, __err)
+
+/* Use this macro for small functions that do not call other functions. */
+#define IA_CSS_ENTER_LEAVE_PRIVATE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): enter: leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/*! @brief Function for tracing to the provided printf function in the
+ * environment.
+ * @param[in] level Level of the message.
+ * @param[in] fmt printf like format string
+ * @param[in] args arguments for the format string
+ */
+static inline void
+ia_css_debug_vdtrace(unsigned int level, const char *fmt, va_list args)
+{
+ if (ia_css_debug_trace_level >= level)
+ sh_css_vprint(fmt, args);
+}
+
+__printf(2, 3)
+void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...);
+
+/*! @brief Dump sp thread's stack contents
+ * SP thread's stack contents are set to 0xcafecafe. This function dumps the
+ * stack to inspect if the stack's boundaries are compromised.
+ * @return None
+ */
+void ia_css_debug_dump_sp_stack_info(void);
+
+/*! @brief Function to set the global dtrace verbosity level.
+ * @param[in] trace_level Maximum level of the messages to be traced.
+ * @return None
+ */
+void ia_css_debug_set_dtrace_level(
+ const unsigned int trace_level);
+
+/*! @brief Function to get the global dtrace verbosity level.
+ * @return global dtrace verbosity level
+ */
+unsigned int ia_css_debug_get_dtrace_level(void);
+
+/*! @brief Dump input formatter state.
+ * Dumps the input formatter state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_if_state(void);
+
+/*! @brief Dump isp hardware state.
+ * Dumps the isp hardware state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_isp_state(void);
+
+/*! @brief Dump sp hardware state.
+ * Dumps the sp hardware state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_sp_state(void);
+
+/* ISP2401 */
+/*! @brief Dump GAC hardware state.
+ * Dumps the GAC ACB hardware registers. may be useful for
+ * detecting a GAC which got hang.
+ * @return None
+ */
+void ia_css_debug_dump_gac_state(void);
+
+/*! @brief Dump dma controller state.
+ * Dumps the dma controller state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_dma_state(void);
+
+/*! @brief Dump internal sp software state.
+ * Dumps the sp software state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_sp_sw_debug_info(void);
+
+/*! @brief Dump all related hardware state to the trace output
+ * @param[in] context String to identify context in output.
+ * @return None
+ */
+void ia_css_debug_dump_debug_info(
+ const char *context);
+
+#if SP_DEBUG != SP_DEBUG_NONE
+void ia_css_debug_print_sp_debug_state(
+ const struct sh_css_sp_debug_state *state);
+#endif
+
+/*! @brief Dump all related binary info data
+ * @param[in] bi Binary info struct.
+ * @return None
+ */
+void ia_css_debug_binary_print(
+ const struct ia_css_binary *bi);
+
+void ia_css_debug_sp_dump_mipi_fifo_high_water(void);
+
+/*! @brief Dump isp gdc fifo state to the trace output
+ * Dumps the isp gdc fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_isp_gdc_fifo_state(void);
+
+/*! @brief Dump dma isp fifo state
+ * Dumps the dma isp fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_dma_isp_fifo_state(void);
+
+/*! @brief Dump dma sp fifo state
+ * Dumps the dma sp fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_dma_sp_fifo_state(void);
+
+/*! \brief Dump pif A isp fifo state
+ * Dumps the primary input formatter state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_pif_a_isp_fifo_state(void);
+
+/*! \brief Dump pif B isp fifo state
+ * Dumps the primary input formatter state to tracing output.
+ * \return None
+ */
+void ia_css_debug_dump_pif_b_isp_fifo_state(void);
+
+/*! @brief Dump stream-to-memory sp fifo state
+ * Dumps the stream-to-memory block state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_str2mem_sp_fifo_state(void);
+
+/*! @brief Dump isp sp fifo state
+ * Dumps the isp sp fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_isp_sp_fifo_state(void);
+
+/*! @brief Dump all fifo state info to the output
+ * Dumps all fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_all_fifo_state(void);
+
+/*! @brief Dump the rx state to the output
+ * Dumps the rx state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_rx_state(void);
+
+/*! @brief Dump the input system state to the output
+ * Dumps the input system state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_isys_state(void);
+
+/*! @brief Dump the frame info to the trace output
+ * Dumps the frame info to tracing output.
+ * @param[in] frame pointer to struct ia_css_frame
+ * @param[in] descr description output along with the frame info
+ * @return None
+ */
+void ia_css_debug_frame_print(
+ const struct ia_css_frame *frame,
+ const char *descr);
+
+/*! @brief Function to enable sp sleep mode.
+ * Function that enables sp sleep mode
+ * @param[in] mode indicates when to put sp to sleep
+ * @return None
+ */
+void ia_css_debug_enable_sp_sleep_mode(enum ia_css_sp_sleep_mode mode);
+
+/*! @brief Function to wake up sp when in sleep mode.
+ * After sp has been put to sleep, use this function to let it continue
+ * to run again.
+ * @return None
+ */
+void ia_css_debug_wake_up_sp(void);
+
+/*! @brief Function to dump isp parameters.
+ * Dump isp parameters to tracing output
+ * @param[in] stream pointer to ia_css_stream struct
+ * @param[in] enable flag indicating which parameters to dump.
+ * @return None
+ */
+void ia_css_debug_dump_isp_params(struct ia_css_stream *stream,
+ unsigned int enable);
+
+/*! @brief Function to dump some sp performance counters.
+ * Dump sp performance counters, currently input system errors.
+ * @return None
+ */
+void ia_css_debug_dump_perf_counters(void);
+
+#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
+void sh_css_dump_thread_wait_info(void);
+void sh_css_dump_pipe_stage_info(void);
+void sh_css_dump_pipe_stripe_info(void);
+#endif
+
+void ia_css_debug_dump_isp_binary(void);
+
+void sh_css_dump_sp_raw_copy_linecount(bool reduced);
+
+/*! @brief Dump the resolution info to the trace output
+ * Dumps the resolution info to the trace output.
+ * @param[in] res pointer to struct ia_css_resolution
+ * @param[in] label description of resolution output
+ * @return None
+ */
+void ia_css_debug_dump_resolution(
+ const struct ia_css_resolution *res,
+ const char *label);
+
+/*! @brief Dump the frame info to the trace output
+ * Dumps the frame info to the trace output.
+ * @param[in] info pointer to struct ia_css_frame_info
+ * @param[in] label description of frame_info output
+ * @return None
+ */
+void ia_css_debug_dump_frame_info(
+ const struct ia_css_frame_info *info,
+ const char *label);
+
+/*! @brief Dump the capture config info to the trace output
+ * Dumps the capture config info to the trace output.
+ * @param[in] config pointer to struct ia_css_capture_config
+ * @return None
+ */
+void ia_css_debug_dump_capture_config(
+ const struct ia_css_capture_config *config);
+
+/*! @brief Dump the pipe extra config info to the trace output
+ * Dumps the pipe extra config info to the trace output.
+ * @param[in] extra_config pointer to struct ia_css_pipe_extra_config
+ * @return None
+ */
+void ia_css_debug_dump_pipe_extra_config(
+ const struct ia_css_pipe_extra_config *extra_config);
+
+/*! @brief Dump the pipe config info to the trace output
+ * Dumps the pipe config info to the trace output.
+ * @param[in] config pointer to struct ia_css_pipe_config
+ * @return None
+ */
+void ia_css_debug_dump_pipe_config(
+ const struct ia_css_pipe_config *config);
+
+/*! @brief Dump the stream config source info to the trace output
+ * Dumps the stream config source info to the trace output.
+ * @param[in] config pointer to struct ia_css_stream_config
+ * @return None
+ */
+void ia_css_debug_dump_stream_config_source(
+ const struct ia_css_stream_config *config);
+
+/*! @brief Dump the mipi buffer config info to the trace output
+ * Dumps the mipi buffer config info to the trace output.
+ * @param[in] config pointer to struct ia_css_mipi_buffer_config
+ * @return None
+ */
+void ia_css_debug_dump_mipi_buffer_config(
+ const struct ia_css_mipi_buffer_config *config);
+
+/*! @brief Dump the metadata config info to the trace output
+ * Dumps the metadata config info to the trace output.
+ * @param[in] config pointer to struct ia_css_metadata_config
+ * @return None
+ */
+void ia_css_debug_dump_metadata_config(
+ const struct ia_css_metadata_config *config);
+
+/*! @brief Dump the stream config info to the trace output
+ * Dumps the stream config info to the trace output.
+ * @param[in] config pointer to struct ia_css_stream_config
+ * @param[in] num_pipes number of pipes for the stream
+ * @return None
+ */
+void ia_css_debug_dump_stream_config(
+ const struct ia_css_stream_config *config,
+ int num_pipes);
+
+/*! @brief Dump the state of the SP tagger
+ * Dumps the internal state of the SP tagger
+ * @return None
+ */
+void ia_css_debug_tagger_state(void);
+
+/**
+ * @brief Initialize the debug mode.
+ *
+ * WARNING:
+ * This API should be called ONLY once in the debug mode.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_debug_mode_init(void);
+
+/**
+ * @brief Disable the DMA channel.
+ *
+ * @param[in] dma_ID The ID of the target DMA.
+ * @param[in] channel_id The ID of the target DMA channel.
+ * @param[in] request_type The type of the DMA request.
+ * For example:
+ * - "0" indicates the writing request.
+ * - "1" indicates the reading request.
+ *
+ * This is part of the DMA API -> dma.h
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_debug_mode_disable_dma_channel(
+ int dma_ID,
+ int channel_id,
+ int request_type);
+/**
+ * @brief Enable the DMA channel.
+ *
+ * @param[in] dma_ID The ID of the target DMA.
+ * @param[in] channel_id The ID of the target DMA channel.
+ * @param[in] request_type The type of the DMA request.
+ * For example:
+ * - "0" indicates the writing request.
+ * - "1" indicates the reading request.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_debug_mode_enable_dma_channel(
+ int dma_ID,
+ int channel_id,
+ int request_type);
+
+/**
+ * @brief Dump tracer data.
+ * [Currently support is only for SKC]
+ *
+ * @return
+ * - none.
+ */
+void ia_css_debug_dump_trace(void);
+
+/* ISP2401 */
+/**
+ * @brief Program counter dumping (in loop)
+ *
+ * @param[in] id The ID of the SP
+ * @param[in] num_of_dumps The number of dumps
+ *
+ * @return
+ * - none
+ */
+void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps);
+
+/* ISP2500 */
+/*! @brief Dump all states for ISP hang case.
+ * Dumps the ISP previous and current configurations
+ * GACs status, SP0/1 statuses.
+ *
+ * @param[in] pipe The current pipe
+ *
+ * @return None
+ */
+void ia_css_debug_dump_hang_status(
+ struct ia_css_pipe *pipe);
+
+/*! @brief External command handler
+ * External command handler
+ *
+ * @return None
+ */
+void ia_css_debug_ext_command_handler(void);
+
+#endif /* _IA_CSS_DEBUG_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h
new file mode 100644
index 000000000000..27136381857f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h
@@ -0,0 +1,15 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* TO DO: Move debug related code from ia_css_internal.h in */
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h
new file mode 100644
index 000000000000..e9964bb421d6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h
@@ -0,0 +1,67 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_DEBUG_PIPE_H_
+#define _IA_CSS_DEBUG_PIPE_H_
+
+/*! \file */
+
+#include <ia_css_frame_public.h>
+#include <ia_css_stream_public.h>
+#include "ia_css_pipeline.h"
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_prologue(void);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_epilogue(void);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ * @param[in] stage Pipeline stage.
+ * @param[in] id Pipe id.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_stage(
+ struct ia_css_pipeline_stage *stage,
+ enum ia_css_pipe_id id);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ * @param[in] out_frame Output frame of SP raw copy.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_sp_raw_copy(
+ struct ia_css_frame *out_frame);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ * @param[in] stream_config info about sensor and input formatter.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_stream_config(
+ const struct ia_css_stream_config *stream_config);
+
+#endif /* _IA_CSS_DEBUG_PIPE_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
new file mode 100644
index 000000000000..6fadc20104bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
@@ -0,0 +1,3540 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "debug.h"
+#include "memory_access.h"
+
+#ifndef __INLINE_INPUT_SYSTEM__
+#define __INLINE_INPUT_SYSTEM__
+#endif
+#ifndef __INLINE_IBUF_CTRL__
+#define __INLINE_IBUF_CTRL__
+#endif
+#ifndef __INLINE_CSI_RX__
+#define __INLINE_CSI_RX__
+#endif
+#ifndef __INLINE_PIXELGEN__
+#define __INLINE_PIXELGEN__
+#endif
+#ifndef __INLINE_STREAM2MMIO__
+#define __INLINE_STREAM2MMIO__
+#endif
+
+#include "ia_css_debug.h"
+#include "ia_css_debug_pipe.h"
+#include "ia_css_irq.h"
+#include "ia_css_stream.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_params.h"
+#include "ia_css_bufq.h"
+/* ISP2401 */
+#include "ia_css_queue.h"
+
+#include "ia_css_isp_params.h"
+
+#include "system_local.h"
+#include "assert_support.h"
+#include "print_support.h"
+#include "string_support.h"
+
+#include "fifo_monitor.h"
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "input_formatter.h"
+#endif
+#include "dma.h"
+#include "irq.h"
+#include "gp_device.h"
+#include "sp.h"
+#include "isp.h"
+#include "type_support.h"
+#include "math_support.h" /* CEIL_DIV */
+#if defined(HAS_INPUT_FORMATTER_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#include "input_system.h" /* input_formatter_reg_load */
+#endif
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#include "ia_css_tagger_common.h"
+#endif
+
+#include "sh_css_internal.h"
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "ia_css_isys.h"
+#endif
+#include "sh_css_sp.h" /* sh_css_sp_get_debug_state() */
+
+#include "css_trace.h" /* tracer */
+
+#include "device_access.h" /* for ia_css_device_load_uint32 */
+
+/* Include all kernel host interfaces for ISP1 */
+#include "anr/anr_1.0/ia_css_anr.host.h"
+#include "cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "csc/csc_1.0/ia_css_csc.host.h"
+#include "de/de_1.0/ia_css_de.host.h"
+#include "dp/dp_1.0/ia_css_dp.host.h"
+#include "bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "gc/gc_1.0/ia_css_gc.host.h"
+#include "ob/ob_1.0/ia_css_ob.host.h"
+#include "s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "sc/sc_1.0/ia_css_sc.host.h"
+#include "tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "uds/uds_1.0/ia_css_uds_param.h"
+#include "wb/wb_1.0/ia_css_wb.host.h"
+#include "ynr/ynr_1.0/ia_css_ynr.host.h"
+
+/* Include additional kernel host interfaces for ISP2 */
+#include "aa/aa_2/ia_css_aa2.host.h"
+#include "anr/anr_2/ia_css_anr2.host.h"
+#include "cnr/cnr_2/ia_css_cnr2.host.h"
+#include "de/de_2/ia_css_de2.host.h"
+#include "gc/gc_2/ia_css_gc2.host.h"
+#include "ynr/ynr_2/ia_css_ynr2.host.h"
+
+/* Global variable to store the dtrace verbosity level */
+unsigned int ia_css_debug_trace_level = IA_CSS_DEBUG_WARNING;
+
+#define DPG_START "ia_css_debug_pipe_graph_dump_start "
+#define DPG_END " ia_css_debug_pipe_graph_dump_end\n"
+
+#define ENABLE_LINE_MAX_LENGTH (25)
+
+#ifdef ISP2401
+#define DBG_EXT_CMD_TRACE_PNTS_DUMP BIT(8)
+#define DBG_EXT_CMD_PUB_CFG_DUMP BIT(9)
+#define DBG_EXT_CMD_GAC_REG_DUMP BIT(10)
+#define DBG_EXT_CMD_GAC_ACB_REG_DUMP BIT(11)
+#define DBG_EXT_CMD_FIFO_DUMP BIT(12)
+#define DBG_EXT_CMD_QUEUE_DUMP BIT(13)
+#define DBG_EXT_CMD_DMA_DUMP BIT(14)
+#define DBG_EXT_CMD_MASK 0xAB0000CD
+
+#endif
+/*
+ * TODO:SH_CSS_MAX_SP_THREADS is not the max number of sp threads
+ * future rework should fix this and remove the define MAX_THREAD_NUM
+ */
+#define MAX_THREAD_NUM (SH_CSS_MAX_SP_THREADS + SH_CSS_MAX_SP_INTERNAL_THREADS)
+
+static struct pipe_graph_class {
+ bool do_init;
+ int height;
+ int width;
+ int eff_height;
+ int eff_width;
+ enum atomisp_input_format stream_format;
+} pg_inst = {true, 0, 0, 0, 0, N_ATOMISP_INPUT_FORMAT};
+
+static const char *const queue_id_to_str[] = {
+ /* [SH_CSS_QUEUE_A_ID] =*/ "queue_A",
+ /* [SH_CSS_QUEUE_B_ID] =*/ "queue_B",
+ /* [SH_CSS_QUEUE_C_ID] =*/ "queue_C",
+ /* [SH_CSS_QUEUE_D_ID] =*/ "queue_D",
+ /* [SH_CSS_QUEUE_E_ID] =*/ "queue_E",
+ /* [SH_CSS_QUEUE_F_ID] =*/ "queue_F",
+ /* [SH_CSS_QUEUE_G_ID] =*/ "queue_G",
+ /* [SH_CSS_QUEUE_H_ID] =*/ "queue_H"
+};
+
+static const char *const pipe_id_to_str[] = {
+ /* [IA_CSS_PIPE_ID_PREVIEW] =*/ "preview",
+ /* [IA_CSS_PIPE_ID_COPY] =*/ "copy",
+ /* [IA_CSS_PIPE_ID_VIDEO] =*/ "video",
+ /* [IA_CSS_PIPE_ID_CAPTURE] =*/ "capture",
+ /* [IA_CSS_PIPE_ID_YUVPP] =*/ "yuvpp",
+ /* [IA_CSS_PIPE_ID_ACC] =*/ "accelerator"
+};
+
+static char dot_id_input_bin[SH_CSS_MAX_BINARY_NAME + 10];
+static char ring_buffer[200];
+
+void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ ia_css_debug_vdtrace(level, fmt, ap);
+ va_end(ap);
+}
+
+static void debug_dump_long_array_formatted(
+ const sp_ID_t sp_id,
+ hrt_address stack_sp_addr,
+ unsigned int stack_size)
+{
+ unsigned int i;
+ u32 val;
+ u32 addr = (uint32_t)stack_sp_addr;
+ u32 stack_size_words = CEIL_DIV(stack_size, sizeof(uint32_t));
+
+ /* When size is not multiple of four, last word is only relevant for
+ * remaining bytes */
+ for (i = 0; i < stack_size_words; i++) {
+ val = sp_dmem_load_uint32(sp_id, (hrt_address)addr);
+ if ((i % 8) == 0)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n");
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "0x%08x ", val);
+ addr += sizeof(uint32_t);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n");
+}
+
+static void debug_dump_sp_stack_info(
+ const sp_ID_t sp_id)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_threads_stack;
+ unsigned int HIVE_ADDR_sp_threads_stack_size;
+ u32 stack_sizes[MAX_THREAD_NUM];
+ u32 stack_sp_addr[MAX_THREAD_NUM];
+ unsigned int i;
+
+ fw = &sh_css_sp_fw;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "sp_id(%u) stack info\n", sp_id);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "from objects stack_addr_offset:0x%x stack_size_offset:0x%x\n",
+ fw->info.sp.threads_stack,
+ fw->info.sp.threads_stack_size);
+
+ HIVE_ADDR_sp_threads_stack = fw->info.sp.threads_stack;
+ HIVE_ADDR_sp_threads_stack_size = fw->info.sp.threads_stack_size;
+
+ if (fw->info.sp.threads_stack == 0 ||
+ fw->info.sp.threads_stack_size == 0)
+ return;
+
+ (void)HIVE_ADDR_sp_threads_stack;
+ (void)HIVE_ADDR_sp_threads_stack_size;
+
+ sp_dmem_load(sp_id,
+ (unsigned int)sp_address_of(sp_threads_stack),
+ &stack_sp_addr, sizeof(stack_sp_addr));
+ sp_dmem_load(sp_id,
+ (unsigned int)sp_address_of(sp_threads_stack_size),
+ &stack_sizes, sizeof(stack_sizes));
+
+ for (i = 0 ; i < MAX_THREAD_NUM; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "thread: %u stack_addr: 0x%08x stack_size: %u\n",
+ i, stack_sp_addr[i], stack_sizes[i]);
+ debug_dump_long_array_formatted(sp_id, (hrt_address)stack_sp_addr[i],
+ stack_sizes[i]);
+ }
+}
+
+void ia_css_debug_dump_sp_stack_info(void)
+{
+ debug_dump_sp_stack_info(SP0_ID);
+}
+
+void ia_css_debug_set_dtrace_level(const unsigned int trace_level)
+{
+ ia_css_debug_trace_level = trace_level;
+ return;
+}
+
+unsigned int ia_css_debug_get_dtrace_level(void)
+{
+ return ia_css_debug_trace_level;
+}
+
+static const char *debug_stream_format2str(const enum atomisp_input_format
+ stream_format)
+{
+ switch (stream_format) {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ return "yuv420-8-legacy";
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ return "yuv420-8";
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ return "yuv420-10";
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ return "yuv420-16";
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ return "yuv422-8";
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ return "yuv422-10";
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ return "yuv422-16";
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ return "rgb444";
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ return "rgb555";
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ return "rgb565";
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ return "rgb666";
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ return "rgb888";
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ return "raw6";
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ return "raw7";
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ return "raw8";
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ return "raw10";
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ return "raw12";
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ return "raw14";
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ return "raw16";
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ return "binary8";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT1:
+ return "generic-short1";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT2:
+ return "generic-short2";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT3:
+ return "generic-short3";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT4:
+ return "generic-short4";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT5:
+ return "generic-short5";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT6:
+ return "generic-short6";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT7:
+ return "generic-short7";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT8:
+ return "generic-short8";
+ case ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT:
+ return "yuv420-8-shift";
+ case ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT:
+ return "yuv420-10-shift";
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ return "embedded-8";
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ return "user-def-8-type-1";
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ return "user-def-8-type-2";
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ return "user-def-8-type-3";
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ return "user-def-8-type-4";
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ return "user-def-8-type-5";
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ return "user-def-8-type-6";
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ return "user-def-8-type-7";
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ return "user-def-8-type-8";
+
+ default:
+ assert(!"Unknown stream format");
+ return "unknown-stream-format";
+ }
+};
+
+static const char *debug_frame_format2str(const enum ia_css_frame_format
+ frame_format)
+{
+ switch (frame_format) {
+ case IA_CSS_FRAME_FORMAT_NV11:
+ return "NV11";
+ case IA_CSS_FRAME_FORMAT_NV12:
+ return "NV12";
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ return "NV12_16";
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ return "NV12_TILEY";
+ case IA_CSS_FRAME_FORMAT_NV16:
+ return "NV16";
+ case IA_CSS_FRAME_FORMAT_NV21:
+ return "NV21";
+ case IA_CSS_FRAME_FORMAT_NV61:
+ return "NV61";
+ case IA_CSS_FRAME_FORMAT_YV12:
+ return "YV12";
+ case IA_CSS_FRAME_FORMAT_YV16:
+ return "YV16";
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ return "YUV420";
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ return "YUV420_16";
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ return "YUV422";
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ return "YUV422_16";
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ return "UYVY";
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ return "YUYV";
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ return "YUV444";
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ return "YUV_LINE";
+ case IA_CSS_FRAME_FORMAT_RAW:
+ return "RAW";
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ return "RGB565";
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ return "PLANAR_RGB888";
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ return "RGBA888";
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ return "QPLANE6";
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ return "BINARY_8";
+ case IA_CSS_FRAME_FORMAT_MIPI:
+ return "MIPI";
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ return "RAW_PACKED";
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ return "CSI_MIPI_YUV420_8";
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ return "CSI_MIPI_LEGACY_YUV420_8";
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10:
+ return "CSI_MIPI_YUV420_10";
+
+ default:
+ assert(!"Unknown frame format");
+ return "unknown-frame-format";
+ }
+}
+
+static void debug_print_sp_state(const sp_state_t *state, const char *cell)
+{
+ assert(cell);
+ assert(state);
+
+ ia_css_debug_dtrace(2, "%s state:\n", cell);
+ ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc);
+ ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register",
+ state->status_register);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping",
+ state->is_sleeping);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling",
+ state->is_stalling);
+ return;
+}
+
+static void debug_print_isp_state(const isp_state_t *state, const char *cell)
+{
+ assert(state);
+ assert(cell);
+
+ ia_css_debug_dtrace(2, "%s state:\n", cell);
+ ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc);
+ ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register",
+ state->status_register);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping",
+ state->is_sleeping);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling",
+ state->is_stalling);
+ return;
+}
+
+void ia_css_debug_dump_isp_state(void)
+{
+ isp_state_t state;
+ isp_stall_t stall;
+
+ isp_get_state(ISP0_ID, &state, &stall);
+
+ debug_print_isp_state(&state, "ISP");
+
+ if (state.is_stalling) {
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "[0] if_prim_a_FIFO stalled", stall.fifo0);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "[1] if_prim_b_FIFO stalled", stall.fifo1);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[2] dma_FIFO stalled",
+ stall.fifo2);
+#if defined(HAS_ISP_2400_MAMOIADA) || defined(HAS_ISP_2401_MAMOIADA) || defined(IS_ISP_2500_SYSTEM)
+
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[3] gdc0_FIFO stalled",
+ stall.fifo3);
+#if !defined(IS_ISP_2500_SYSTEM)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[4] gdc1_FIFO stalled",
+ stall.fifo4);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[5] gpio_FIFO stalled",
+ stall.fifo5);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[6] sp_FIFO stalled",
+ stall.fifo6);
+#else
+#error "ia_css_debug: ISP cell must be one of {2400_MAMOIADA,, 2401_MAMOIADA, 2500_SKYCAM}"
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "status & control stalled",
+ stall.stat_ctrl);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled",
+ stall.dmem);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vmem stalled",
+ stall.vmem);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem1 stalled",
+ stall.vamem1);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem2 stalled",
+ stall.vamem2);
+#if defined(HAS_ISP_2400_MAMOIADA) || defined(HAS_ISP_2401_MAMOIADA)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem3 stalled",
+ stall.vamem3);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "hmem stalled",
+ stall.hmem);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "pmem stalled",
+ stall.pmem);
+#endif
+ }
+ return;
+}
+
+void ia_css_debug_dump_sp_state(void)
+{
+ sp_state_t state;
+ sp_stall_t stall;
+
+ sp_get_state(SP0_ID, &state, &stall);
+ debug_print_sp_state(&state, "SP");
+ if (state.is_stalling) {
+#if defined(HAS_SP_2400) || defined(IS_ISP_2500_SYSTEM)
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isys_FIFO stalled",
+ stall.fifo0);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "if_sec_FIFO stalled",
+ stall.fifo1);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "str_to_mem_FIFO stalled", stall.fifo2);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dma_FIFO stalled",
+ stall.fifo3);
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "if_prim_a_FIFO stalled", stall.fifo4);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isp_FIFO stalled",
+ stall.fifo5);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gp_FIFO stalled",
+ stall.fifo6);
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "if_prim_b_FIFO stalled", stall.fifo7);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc0_FIFO stalled",
+ stall.fifo8);
+#if !defined(IS_ISP_2500_SYSTEM)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc1_FIFO stalled",
+ stall.fifo9);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "irq FIFO stalled",
+ stall.fifoa);
+#else
+#error "ia_css_debug: SP cell must be one of {SP2400, SP2500}"
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled",
+ stall.dmem);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "control master stalled",
+ stall.control_master);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "i-cache master stalled",
+ stall.icache_master);
+ }
+ ia_css_debug_dump_trace();
+ return;
+}
+
+static void debug_print_fifo_channel_state(const fifo_channel_state_t *state,
+ const char *descr)
+{
+ assert(state);
+ assert(descr);
+
+ ia_css_debug_dtrace(2, "FIFO channel: %s\n", descr);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "source valid",
+ state->src_valid);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo accept",
+ state->fifo_accept);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo valid",
+ state->fifo_valid);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "sink accept",
+ state->sink_accept);
+ return;
+}
+
+#if !defined(HAS_NO_INPUT_FORMATTER) && defined(USE_INPUT_SYSTEM_VERSION_2)
+void ia_css_debug_dump_pif_a_isp_fifo_state(void)
+{
+ fifo_channel_state_t pif_to_isp, isp_to_pif;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_IF0_TO_ISP0, &pif_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_IF0, &isp_to_pif);
+ debug_print_fifo_channel_state(&pif_to_isp, "Primary IF A to ISP");
+ debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF A");
+}
+
+void ia_css_debug_dump_pif_b_isp_fifo_state(void)
+{
+ fifo_channel_state_t pif_to_isp, isp_to_pif;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_IF1_TO_ISP0, &pif_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_IF1, &isp_to_pif);
+ debug_print_fifo_channel_state(&pif_to_isp, "Primary IF B to ISP");
+ debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF B");
+}
+
+void ia_css_debug_dump_str2mem_sp_fifo_state(void)
+{
+ fifo_channel_state_t s2m_to_sp, sp_to_s2m;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_STREAM2MEM0_TO_SP0, &s2m_to_sp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_SP0_TO_STREAM2MEM0, &sp_to_s2m);
+ debug_print_fifo_channel_state(&s2m_to_sp, "Stream-to-memory to SP");
+ debug_print_fifo_channel_state(&sp_to_s2m, "SP to stream-to-memory");
+}
+
+static void debug_print_if_state(input_formatter_state_t *state, const char *id)
+{
+ unsigned int val;
+
+#if defined(HAS_INPUT_FORMATTER_VERSION_1)
+ const char *st_reset = (state->reset ? "Active" : "Not active");
+#endif
+ const char *st_vsync_active_low =
+ (state->vsync_active_low ? "low" : "high");
+ const char *st_hsync_active_low =
+ (state->hsync_active_low ? "low" : "high");
+
+ const char *fsm_sync_status_str = "unknown";
+ const char *fsm_crop_status_str = "unknown";
+ const char *fsm_padding_status_str = "unknown";
+
+ int st_stline = state->start_line;
+ int st_stcol = state->start_column;
+ int st_crpht = state->cropped_height;
+ int st_crpwd = state->cropped_width;
+ int st_verdcm = state->ver_decimation;
+ int st_hordcm = state->hor_decimation;
+ int st_ver_deinterleaving = state->ver_deinterleaving;
+ int st_hor_deinterleaving = state->hor_deinterleaving;
+ int st_leftpd = state->left_padding;
+ int st_eoloff = state->eol_offset;
+ int st_vmstartaddr = state->vmem_start_address;
+ int st_vmendaddr = state->vmem_end_address;
+ int st_vmincr = state->vmem_increment;
+ int st_yuv420 = state->is_yuv420;
+ int st_allow_fifo_overflow = state->allow_fifo_overflow;
+ int st_block_fifo_when_no_req = state->block_fifo_when_no_req;
+
+ assert(state);
+ ia_css_debug_dtrace(2, "InputFormatter State (%s):\n", id);
+
+ ia_css_debug_dtrace(2, "\tConfiguration:\n");
+
+#if defined(HAS_INPUT_FORMATTER_VERSION_1)
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "Software reset", st_reset);
+#endif
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start line", st_stline);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start column", st_stcol);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped height", st_crpht);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped width", st_crpwd);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Ver decimation", st_verdcm);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Hor decimation", st_hordcm);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Ver deinterleaving", st_ver_deinterleaving);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Hor deinterleaving", st_hor_deinterleaving);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Left padding", st_leftpd);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "EOL offset (bytes)", st_eoloff);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
+ "VMEM start address", st_vmstartaddr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
+ "VMEM end address", st_vmendaddr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
+ "VMEM increment", st_vmincr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "YUV 420 format", st_yuv420);
+ ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n",
+ "Vsync", st_vsync_active_low);
+ ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n",
+ "Hsync", st_hsync_active_low);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Allow FIFO overflow", st_allow_fifo_overflow);
+ /* Flag that tells whether the IF gives backpressure on frames */
+ /*
+ * FYI, this is only on the frame request (indicate), when the IF has
+ * synch'd on a frame it will always give back pressure
+ */
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Block when no request", st_block_fifo_when_no_req);
+
+#if defined(HAS_INPUT_FORMATTER_VERSION_2)
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "IF_BLOCKED_FIFO_NO_REQ_ADDRESS",
+ input_formatter_reg_load(INPUT_FORMATTER0_ID,
+ HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS)
+ );
+
+ ia_css_debug_dtrace(2, "\t%-32s:\n", "InputSwitch State");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg0",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg0));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg1",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg1));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg2",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg2));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg3",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg3));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg4",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg4));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg5",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg5));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg6",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg6));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg7",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg7));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_fsync_lut",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_fsync_lut));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_srst",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_srst));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_slv_reg_srst",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_slv_reg_srst));
+#endif
+
+ ia_css_debug_dtrace(2, "\tFSM Status:\n");
+
+ val = state->fsm_sync_status;
+
+ if (val > 7)
+ fsm_sync_status_str = "ERROR";
+
+ switch (val & 0x7) {
+ case 0:
+ fsm_sync_status_str = "idle";
+ break;
+ case 1:
+ fsm_sync_status_str = "request frame";
+ break;
+ case 2:
+ fsm_sync_status_str = "request lines";
+ break;
+ case 3:
+ fsm_sync_status_str = "request vectors";
+ break;
+ case 4:
+ fsm_sync_status_str = "send acknowledge";
+ break;
+ default:
+ fsm_sync_status_str = "unknown";
+ break;
+ }
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n",
+ "FSM Synchronization Status", val,
+ fsm_sync_status_str);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Synchronization Counter",
+ state->fsm_sync_counter);
+
+ val = state->fsm_crop_status;
+
+ if (val > 7)
+ fsm_crop_status_str = "ERROR";
+
+ switch (val & 0x7) {
+ case 0:
+ fsm_crop_status_str = "idle";
+ break;
+ case 1:
+ fsm_crop_status_str = "wait line";
+ break;
+ case 2:
+ fsm_crop_status_str = "crop line";
+ break;
+ case 3:
+ fsm_crop_status_str = "crop pixel";
+ break;
+ case 4:
+ fsm_crop_status_str = "pass pixel";
+ break;
+ case 5:
+ fsm_crop_status_str = "pass line";
+ break;
+ case 6:
+ fsm_crop_status_str = "lost line";
+ break;
+ default:
+ fsm_crop_status_str = "unknown";
+ break;
+ }
+ ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n",
+ "FSM Crop Status", val, fsm_crop_status_str);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Crop Line Counter",
+ state->fsm_crop_line_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Crop Pixel Counter",
+ state->fsm_crop_pixel_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Deinterleaving idx buffer",
+ state->fsm_deinterleaving_index);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM H decimation counter",
+ state->fsm_dec_h_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM V decimation counter",
+ state->fsm_dec_v_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM block V decimation counter",
+ state->fsm_dec_block_v_counter);
+
+ val = state->fsm_padding_status;
+
+ if (val > 7)
+ fsm_padding_status_str = "ERROR";
+
+ switch (val & 0x7) {
+ case 0:
+ fsm_padding_status_str = "idle";
+ break;
+ case 1:
+ fsm_padding_status_str = "left pad";
+ break;
+ case 2:
+ fsm_padding_status_str = "write";
+ break;
+ case 3:
+ fsm_padding_status_str = "right pad";
+ break;
+ case 4:
+ fsm_padding_status_str = "send end of line";
+ break;
+ default:
+ fsm_padding_status_str = "unknown";
+ break;
+ }
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", "FSM Padding Status",
+ val, fsm_padding_status_str);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Padding element idx counter",
+ state->fsm_padding_elem_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support error",
+ state->fsm_vector_support_error);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support buf full",
+ state->fsm_vector_buffer_full);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support",
+ state->vector_support);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Fifo sensor data lost",
+ state->sensor_data_lost);
+ return;
+}
+
+static void debug_print_if_bin_state(input_formatter_bin_state_t *state)
+{
+ ia_css_debug_dtrace(2, "Stream-to-memory state:\n");
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "reset", state->reset);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "input endianness",
+ state->input_endianness);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "output endianness",
+ state->output_endianness);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "bitswap", state->bitswap);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "block_synch",
+ state->block_synch);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "packet_synch",
+ state->packet_synch);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "readpostwrite_sync",
+ state->readpostwrite_synch);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "is_2ppc", state->is_2ppc);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "en_status_update",
+ state->en_status_update);
+}
+
+void ia_css_debug_dump_if_state(void)
+{
+ input_formatter_state_t if_state;
+ input_formatter_bin_state_t if_bin_state;
+
+ input_formatter_get_state(INPUT_FORMATTER0_ID, &if_state);
+ debug_print_if_state(&if_state, "Primary IF A");
+ ia_css_debug_dump_pif_a_isp_fifo_state();
+
+ input_formatter_get_state(INPUT_FORMATTER1_ID, &if_state);
+ debug_print_if_state(&if_state, "Primary IF B");
+ ia_css_debug_dump_pif_b_isp_fifo_state();
+
+ input_formatter_bin_get_state(INPUT_FORMATTER3_ID, &if_bin_state);
+ debug_print_if_bin_state(&if_bin_state);
+ ia_css_debug_dump_str2mem_sp_fifo_state();
+}
+#endif
+
+void ia_css_debug_dump_dma_state(void)
+{
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+ cause issues for drivers
+ */
+ static dma_state_t state;
+ int i, ch_id;
+
+ const char *fsm_cmd_st_lbl = "FSM Command flag state";
+ const char *fsm_ctl_st_lbl = "FSM Control flag state";
+ const char *fsm_ctl_state = NULL;
+ const char *fsm_ctl_flag = NULL;
+ const char *fsm_pack_st = NULL;
+ const char *fsm_read_st = NULL;
+ const char *fsm_write_st = NULL;
+ char last_cmd_str[64];
+
+ dma_get_state(DMA0_ID, &state);
+ /* Print header for DMA dump status */
+ ia_css_debug_dtrace(2, "DMA dump status:\n");
+
+ /* Print FSM command flag state */
+ if (state.fsm_command_idle)
+ ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "IDLE");
+ if (state.fsm_command_run)
+ ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "RUN");
+ if (state.fsm_command_stalling)
+ ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl,
+ "STALL");
+ if (state.fsm_command_error)
+ ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl,
+ "ERROR");
+
+ /* Print last command along with the channel */
+ ch_id = state.last_command_channel;
+
+ switch (state.last_command) {
+ case DMA_COMMAND_READ:
+ snprintf(last_cmd_str, 64,
+ "Read 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_WRITE:
+ snprintf(last_cmd_str, 64,
+ "Write 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_SET_CHANNEL:
+ snprintf(last_cmd_str, 64, "Set Channel [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_SET_PARAM:
+ snprintf(last_cmd_str, 64,
+ "Set Param: %d [Channel: %d]",
+ state.last_command_param, ch_id);
+ break;
+ case DMA_COMMAND_READ_SPECIFIC:
+ snprintf(last_cmd_str, 64,
+ "Read Specific 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_WRITE_SPECIFIC:
+ snprintf(last_cmd_str, 64,
+ "Write Specific 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_INIT:
+ snprintf(last_cmd_str, 64,
+ "Init 2D Block on Device A [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_INIT_SPECIFIC:
+ snprintf(last_cmd_str, 64,
+ "Init Specific 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_RST:
+ snprintf(last_cmd_str, 64, "DMA SW Reset");
+ break;
+ case N_DMA_COMMANDS:
+ snprintf(last_cmd_str, 64, "UNKNOWN");
+ break;
+ default:
+ snprintf(last_cmd_str, 64,
+ "unknown [Channel: %d]", ch_id);
+ break;
+ }
+ ia_css_debug_dtrace(2, "\t%-32s: (0x%X : %s)\n",
+ "last command received", state.last_command,
+ last_cmd_str);
+
+ /* Print DMA registers */
+ ia_css_debug_dtrace(2, "\t%-32s\n",
+ "DMA registers, connection group 0");
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Command",
+ state.current_command);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address A",
+ state.current_addr_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address B",
+ state.current_addr_b);
+
+ if (state.fsm_ctrl_idle)
+ fsm_ctl_flag = "IDLE";
+ else if (state.fsm_ctrl_run)
+ fsm_ctl_flag = "RUN";
+ else if (state.fsm_ctrl_stalling)
+ fsm_ctl_flag = "STAL";
+ else if (state.fsm_ctrl_error)
+ fsm_ctl_flag = "ERROR";
+ else
+ fsm_ctl_flag = "UNKNOWN";
+
+ switch (state.fsm_ctrl_state) {
+ case DMA_CTRL_STATE_IDLE:
+ fsm_ctl_state = "Idle state";
+ break;
+ case DMA_CTRL_STATE_REQ_RCV:
+ fsm_ctl_state = "Req Rcv state";
+ break;
+ case DMA_CTRL_STATE_RCV:
+ fsm_ctl_state = "Rcv state";
+ break;
+ case DMA_CTRL_STATE_RCV_REQ:
+ fsm_ctl_state = "Rcv Req state";
+ break;
+ case DMA_CTRL_STATE_INIT:
+ fsm_ctl_state = "Init state";
+ break;
+ case N_DMA_CTRL_STATES:
+ fsm_ctl_state = "Unknown";
+ break;
+ }
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s -> %s\n", fsm_ctl_st_lbl,
+ fsm_ctl_flag, fsm_ctl_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source dev",
+ state.fsm_ctrl_source_dev);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source addr",
+ state.fsm_ctrl_source_addr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source stride",
+ state.fsm_ctrl_source_stride);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source width",
+ state.fsm_ctrl_source_width);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source height",
+ state.fsm_ctrl_source_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source dev",
+ state.fsm_ctrl_pack_source_dev);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest dev",
+ state.fsm_ctrl_pack_dest_dev);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest addr",
+ state.fsm_ctrl_dest_addr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest stride",
+ state.fsm_ctrl_dest_stride);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source width",
+ state.fsm_ctrl_pack_source_width);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest height",
+ state.fsm_ctrl_pack_dest_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest width",
+ state.fsm_ctrl_pack_dest_width);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source elems",
+ state.fsm_ctrl_pack_source_elems);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest elems",
+ state.fsm_ctrl_pack_dest_elems);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack extension",
+ state.fsm_ctrl_pack_extension);
+
+ if (state.pack_idle)
+ fsm_pack_st = "IDLE";
+ if (state.pack_run)
+ fsm_pack_st = "RUN";
+ if (state.pack_stalling)
+ fsm_pack_st = "STALL";
+ if (state.pack_error)
+ fsm_pack_st = "ERROR";
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Pack flag state",
+ fsm_pack_st);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack cnt height",
+ state.pack_cnt_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack src cnt width",
+ state.pack_src_cnt_width);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack dest cnt width",
+ state.pack_dest_cnt_width);
+
+ if (state.read_state == DMA_RW_STATE_IDLE)
+ fsm_read_st = "Idle state";
+ if (state.read_state == DMA_RW_STATE_REQ)
+ fsm_read_st = "Req state";
+ if (state.read_state == DMA_RW_STATE_NEXT_LINE)
+ fsm_read_st = "Next line";
+ if (state.read_state == DMA_RW_STATE_UNLOCK_CHANNEL)
+ fsm_read_st = "Unlock channel";
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Read state",
+ fsm_read_st);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt height",
+ state.read_cnt_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt width",
+ state.read_cnt_width);
+
+ if (state.write_state == DMA_RW_STATE_IDLE)
+ fsm_write_st = "Idle state";
+ if (state.write_state == DMA_RW_STATE_REQ)
+ fsm_write_st = "Req state";
+ if (state.write_state == DMA_RW_STATE_NEXT_LINE)
+ fsm_write_st = "Next line";
+ if (state.write_state == DMA_RW_STATE_UNLOCK_CHANNEL)
+ fsm_write_st = "Unlock channel";
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Write state",
+ fsm_write_st);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write height",
+ state.write_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write width",
+ state.write_width);
+
+ for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) {
+ dma_port_state_t *port = &state.port_states[i];
+
+ ia_css_debug_dtrace(2, "\tDMA device interface %d\n", i);
+ ia_css_debug_dtrace(2, "\t\tDMA internal side state\n");
+ ia_css_debug_dtrace(2,
+ "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n",
+ port->req_cs, port->req_we_n, port->req_run,
+ port->req_ack);
+ ia_css_debug_dtrace(2, "\t\tMaster Output side state\n");
+ ia_css_debug_dtrace(2,
+ "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n",
+ port->send_cs, port->send_we_n,
+ port->send_run, port->send_ack);
+ ia_css_debug_dtrace(2, "\t\tFifo state\n");
+ if (port->fifo_state == DMA_FIFO_STATE_WILL_BE_FULL)
+ ia_css_debug_dtrace(2, "\t\t\tFiFo will be full\n");
+ else if (port->fifo_state == DMA_FIFO_STATE_FULL)
+ ia_css_debug_dtrace(2, "\t\t\tFifo Full\n");
+ else if (port->fifo_state == DMA_FIFO_STATE_EMPTY)
+ ia_css_debug_dtrace(2, "\t\t\tFifo Empty\n");
+ else
+ ia_css_debug_dtrace(2, "\t\t\tFifo state unknown\n");
+
+ ia_css_debug_dtrace(2, "\t\tFifo counter %d\n\n",
+ port->fifo_counter);
+ }
+
+ for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) {
+ dma_channel_state_t *ch = &state.channel_states[i];
+
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "DMA channel register",
+ i);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Connection",
+ ch->connection);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Sign extend",
+ ch->sign_extend);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev A",
+ ch->stride_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev A",
+ ch->elems_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev A",
+ ch->cropping_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev A",
+ ch->width_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev B",
+ ch->stride_b);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev B",
+ ch->elems_b);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev B",
+ ch->cropping_b);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev B",
+ ch->width_b);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Height", ch->height);
+ }
+ ia_css_debug_dtrace(2, "\n");
+ return;
+}
+
+void ia_css_debug_dump_dma_sp_fifo_state(void)
+{
+ fifo_channel_state_t dma_to_sp, sp_to_dma;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_DMA0_TO_SP0, &dma_to_sp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_SP0_TO_DMA0, &sp_to_dma);
+ debug_print_fifo_channel_state(&dma_to_sp, "DMA to SP");
+ debug_print_fifo_channel_state(&sp_to_dma, "SP to DMA");
+ return;
+}
+
+void ia_css_debug_dump_dma_isp_fifo_state(void)
+{
+ fifo_channel_state_t dma_to_isp, isp_to_dma;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_DMA0_TO_ISP0, &dma_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_DMA0, &isp_to_dma);
+ debug_print_fifo_channel_state(&dma_to_isp, "DMA to ISP");
+ debug_print_fifo_channel_state(&isp_to_dma, "ISP to DMA");
+ return;
+}
+
+void ia_css_debug_dump_isp_sp_fifo_state(void)
+{
+ fifo_channel_state_t sp_to_isp, isp_to_sp;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_SP0_TO_ISP0, &sp_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_SP0, &isp_to_sp);
+ debug_print_fifo_channel_state(&sp_to_isp, "SP to ISP");
+ debug_print_fifo_channel_state(&isp_to_sp, "ISP to SP");
+ return;
+}
+
+void ia_css_debug_dump_isp_gdc_fifo_state(void)
+{
+ fifo_channel_state_t gdc_to_isp, isp_to_gdc;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_GDC0_TO_ISP0, &gdc_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_GDC0, &isp_to_gdc);
+ debug_print_fifo_channel_state(&gdc_to_isp, "GDC to ISP");
+ debug_print_fifo_channel_state(&isp_to_gdc, "ISP to GDC");
+ return;
+}
+
+void ia_css_debug_dump_all_fifo_state(void)
+{
+ int i;
+ fifo_monitor_state_t state;
+
+ fifo_monitor_get_state(FIFO_MONITOR0_ID, &state);
+
+ for (i = 0; i < N_FIFO_CHANNEL; i++)
+ debug_print_fifo_channel_state(&state.fifo_channels[i],
+ "squepfstqkt");
+ return;
+}
+
+static void debug_binary_info_print(const struct ia_css_binary_xinfo *info)
+{
+ assert(info);
+ ia_css_debug_dtrace(2, "id = %d\n", info->sp.id);
+ ia_css_debug_dtrace(2, "mode = %d\n", info->sp.pipeline.mode);
+ ia_css_debug_dtrace(2, "max_input_width = %d\n", info->sp.input.max_width);
+ ia_css_debug_dtrace(2, "min_output_width = %d\n",
+ info->sp.output.min_width);
+ ia_css_debug_dtrace(2, "max_output_width = %d\n",
+ info->sp.output.max_width);
+ ia_css_debug_dtrace(2, "top_cropping = %d\n", info->sp.pipeline.top_cropping);
+ ia_css_debug_dtrace(2, "left_cropping = %d\n", info->sp.pipeline.left_cropping);
+ ia_css_debug_dtrace(2, "xmem_addr = %d\n", info->xmem_addr);
+ ia_css_debug_dtrace(2, "enable_vf_veceven = %d\n",
+ info->sp.enable.vf_veceven);
+ ia_css_debug_dtrace(2, "enable_dis = %d\n", info->sp.enable.dis);
+ ia_css_debug_dtrace(2, "enable_uds = %d\n", info->sp.enable.uds);
+ ia_css_debug_dtrace(2, "enable ds = %d\n", info->sp.enable.ds);
+ ia_css_debug_dtrace(2, "s3atbl_use_dmem = %d\n", info->sp.s3a.s3atbl_use_dmem);
+ return;
+}
+
+void ia_css_debug_binary_print(const struct ia_css_binary *bi)
+{
+ unsigned int i;
+
+ debug_binary_info_print(bi->info);
+ ia_css_debug_dtrace(2,
+ "input: %dx%d, format = %d, padded width = %d\n",
+ bi->in_frame_info.res.width,
+ bi->in_frame_info.res.height,
+ bi->in_frame_info.format,
+ bi->in_frame_info.padded_width);
+ ia_css_debug_dtrace(2,
+ "internal :%dx%d, format = %d, padded width = %d\n",
+ bi->internal_frame_info.res.width,
+ bi->internal_frame_info.res.height,
+ bi->internal_frame_info.format,
+ bi->internal_frame_info.padded_width);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (bi->out_frame_info[i].res.width != 0) {
+ ia_css_debug_dtrace(2,
+ "out%d: %dx%d, format = %d, padded width = %d\n",
+ i,
+ bi->out_frame_info[i].res.width,
+ bi->out_frame_info[i].res.height,
+ bi->out_frame_info[i].format,
+ bi->out_frame_info[i].padded_width);
+ }
+ }
+ ia_css_debug_dtrace(2,
+ "vf out: %dx%d, format = %d, padded width = %d\n",
+ bi->vf_frame_info.res.width,
+ bi->vf_frame_info.res.height,
+ bi->vf_frame_info.format,
+ bi->vf_frame_info.padded_width);
+ ia_css_debug_dtrace(2, "online = %d\n", bi->online);
+ ia_css_debug_dtrace(2, "input_buf_vectors = %d\n",
+ bi->input_buf_vectors);
+ ia_css_debug_dtrace(2, "deci_factor_log2 = %d\n", bi->deci_factor_log2);
+ ia_css_debug_dtrace(2, "vf_downscale_log2 = %d\n",
+ bi->vf_downscale_log2);
+ ia_css_debug_dtrace(2, "dis_deci_factor_log2 = %d\n",
+ bi->dis.deci_factor_log2);
+ ia_css_debug_dtrace(2, "dis hor coef num = %d\n",
+ bi->dis.coef.pad.width);
+ ia_css_debug_dtrace(2, "dis ver coef num = %d\n",
+ bi->dis.coef.pad.height);
+ ia_css_debug_dtrace(2, "dis hor proj num = %d\n",
+ bi->dis.proj.pad.height);
+ ia_css_debug_dtrace(2, "sctbl_width_per_color = %d\n",
+ bi->sctbl_width_per_color);
+ ia_css_debug_dtrace(2, "s3atbl_width = %d\n", bi->s3atbl_width);
+ ia_css_debug_dtrace(2, "s3atbl_height = %d\n", bi->s3atbl_height);
+ return;
+}
+
+void ia_css_debug_frame_print(const struct ia_css_frame *frame,
+ const char *descr)
+{
+ char *data = NULL;
+
+ assert(frame);
+ assert(descr);
+
+ data = (char *)HOST_ADDRESS(frame->data);
+ ia_css_debug_dtrace(2, "frame %s (%p):\n", descr, frame);
+ ia_css_debug_dtrace(2, " resolution = %dx%d\n",
+ frame->info.res.width, frame->info.res.height);
+ ia_css_debug_dtrace(2, " padded width = %d\n",
+ frame->info.padded_width);
+ ia_css_debug_dtrace(2, " format = %d\n", frame->info.format);
+ ia_css_debug_dtrace(2, " is contiguous = %s\n",
+ frame->contiguous ? "yes" : "no");
+ switch (frame->info.format) {
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ ia_css_debug_dtrace(2, " Y = %p\n",
+ data + frame->planes.nv.y.offset);
+ ia_css_debug_dtrace(2, " UV = %p\n",
+ data + frame->planes.nv.uv.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ ia_css_debug_dtrace(2, " YUYV = %p\n",
+ data + frame->planes.yuyv.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ ia_css_debug_dtrace(2, " Y = %p\n",
+ data + frame->planes.yuv.y.offset);
+ ia_css_debug_dtrace(2, " U = %p\n",
+ data + frame->planes.yuv.u.offset);
+ ia_css_debug_dtrace(2, " V = %p\n",
+ data + frame->planes.yuv.v.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ ia_css_debug_dtrace(2, " RAW PACKED = %p\n",
+ data + frame->planes.raw.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW:
+ ia_css_debug_dtrace(2, " RAW = %p\n",
+ data + frame->planes.raw.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ ia_css_debug_dtrace(2, " RGB = %p\n",
+ data + frame->planes.rgb.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ ia_css_debug_dtrace(2, " R = %p\n",
+ data + frame->planes.plane6.r.offset);
+ ia_css_debug_dtrace(2, " RatB = %p\n",
+ data + frame->planes.plane6.r_at_b.offset);
+ ia_css_debug_dtrace(2, " Gr = %p\n",
+ data + frame->planes.plane6.gr.offset);
+ ia_css_debug_dtrace(2, " Gb = %p\n",
+ data + frame->planes.plane6.gb.offset);
+ ia_css_debug_dtrace(2, " B = %p\n",
+ data + frame->planes.plane6.b.offset);
+ ia_css_debug_dtrace(2, " BatR = %p\n",
+ data + frame->planes.plane6.b_at_r.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ ia_css_debug_dtrace(2, " Binary data = %p\n",
+ data + frame->planes.binary.data.offset);
+ break;
+ default:
+ ia_css_debug_dtrace(2, " unknown frame type\n");
+ break;
+ }
+ return;
+}
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
+ *state)
+{
+#endif
+
+#if SP_DEBUG == SP_DEBUG_DUMP
+
+ assert(state);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current SP software counter: %d\n",
+ state->debug[0]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer queue head: 0x%x\n",
+ state->debug[1]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer queue tail: 0x%x\n",
+ state->debug[2]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a buffer queue head: 0x%x\n",
+ state->debug[3]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a buffer queue tail: 0x%x\n",
+ state->debug[4]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full output buffer queue head: 0x%x\n",
+ state->debug[5]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full output buffer queue tail: 0x%x\n",
+ state->debug[6]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full s3a buffer queue head: 0x%x\n",
+ state->debug[7]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full s3a buffer queue tail: 0x%x\n",
+ state->debug[8]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue head: 0x%x\n",
+ state->debug[9]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue tail: 0x%x\n",
+ state->debug[10]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "num of stages of current pipeline: 0x%x\n",
+ state->debug[11]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 1: 0x%x\n",
+ state->debug[12]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 2: 0x%x\n",
+ state->debug[13]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current stage out_vf buffer idx: 0x%x\n",
+ state->debug[14]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current stage output buffer idx: 0x%x\n",
+ state->debug[15]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current stage s3a buffer idx: 0x%x\n",
+ state->debug[16]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first char of current stage name: 0x%x\n",
+ state->debug[17]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "current SP thread id: 0x%x\n",
+ state->debug[18]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer address 1: 0x%x\n",
+ state->debug[19]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer address 2: 0x%x\n",
+ state->debug[20]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty out_vf buffer address 1: 0x%x\n",
+ state->debug[21]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty out_vf buffer address 2: 0x%x\n",
+ state->debug[22]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_hi buffer address 1: 0x%x\n",
+ state->debug[23]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_hi buffer address 2: 0x%x\n",
+ state->debug[24]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_lo buffer address 1: 0x%x\n",
+ state->debug[25]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_lo buffer address 2: 0x%x\n",
+ state->debug[26]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_hor buffer address 1: 0x%x\n",
+ state->debug[27]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_hor buffer address 2: 0x%x\n",
+ state->debug[28]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_ver buffer address 1: 0x%x\n",
+ state->debug[29]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_ver buffer address 2: 0x%x\n",
+ state->debug[30]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty param buffer address: 0x%x\n",
+ state->debug[31]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect frame address: 0x%x\n",
+ state->debug[32]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect frame container address: 0x%x\n",
+ state->debug[33]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect frame container payload: 0x%x\n",
+ state->debug[34]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_hi address: 0x%x\n",
+ state->debug[35]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_hi container address: 0x%x\n",
+ state->debug[36]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_hi container payload: 0x%x\n",
+ state->debug[37]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_lo address: 0x%x\n",
+ state->debug[38]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_lo container address: 0x%x\n",
+ state->debug[39]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_lo container payload: 0x%x\n",
+ state->debug[40]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of calling flash start function: 0x%x\n",
+ state->debug[41]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of calling flash close function: 0x%x\n",
+ state->debug[42]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "number of flashed frame: 0x%x\n",
+ state->debug[43]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "flash in use flag: 0x%x\n",
+ state->debug[44]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of update frame flashed flag: 0x%x\n",
+ state->debug[46]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of active threads: 0x%x\n",
+ state->debug[45]);
+
+#elif SP_DEBUG == SP_DEBUG_COPY
+
+ /* Remember last_index because we only want to print new entries */
+ static int last_index;
+ int sp_index = state->index;
+ int n;
+
+ assert(state);
+ if (sp_index < last_index) {
+ /* SP has been reset */
+ last_index = 0;
+ }
+
+ if (last_index == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "copy-trace init: sp_dbg_if_start_line=%d, sp_dbg_if_start_column=%d, sp_dbg_if_cropped_height=%d, sp_debg_if_cropped_width=%d\n",
+ state->if_start_line,
+ state->if_start_column,
+ state->if_cropped_height,
+ state->if_cropped_width);
+ }
+
+ if ((last_index + SH_CSS_SP_DBG_TRACE_DEPTH) < sp_index) {
+ /* last index can be multiple rounds behind */
+ /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */
+ last_index = sp_index - SH_CSS_SP_DBG_TRACE_DEPTH;
+ }
+
+ for (n = last_index; n < sp_index; n++) {
+ int i = n % SH_CSS_SP_DBG_TRACE_DEPTH;
+
+ if (state->trace[i].frame != 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "copy-trace: frame=%d, line=%d, pixel_distance=%d, mipi_used_dword=%d, sp_index=%d\n",
+ state->trace[i].frame,
+ state->trace[i].line,
+ state->trace[i].pixel_distance,
+ state->trace[i].mipi_used_dword,
+ state->trace[i].sp_index);
+ }
+ }
+
+ last_index = sp_index;
+
+#elif SP_DEBUG == SP_DEBUG_TRACE
+
+ /*
+ * This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will
+ * me mapped on the file name string.
+ *
+ * Adjust this to your trace case!
+ */
+ static char const *const id2filename[8] = {
+ "param_buffer.sp.c | tagger.sp.c | pipe_data.sp.c",
+ "isp_init.sp.c",
+ "sp_raw_copy.hive.c",
+ "dma_configure.sp.c",
+ "sp.hive.c",
+ "event_proxy_sp.hive.c",
+ "circular_buffer.sp.c",
+ "frame_buffer.sp.c"
+ };
+
+#if 1
+ /* Example SH_CSS_SP_DBG_NR_OF_TRACES==1 */
+ /* Adjust this to your trace case */
+ static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = {
+ "default"
+ };
+#else
+ /* Example SH_CSS_SP_DBG_NR_OF_TRACES==4 */
+ /* Adjust this to your trace case */
+ static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = {
+ "copy", "preview/video", "capture", "acceleration"
+ };
+#endif
+
+ /* Remember host_index_last because we only want to print new entries */
+ static int host_index_last[SH_CSS_SP_DBG_NR_OF_TRACES] = { 0 };
+ int t, n;
+
+ assert(state);
+
+ for (t = 0; t < SH_CSS_SP_DBG_NR_OF_TRACES; t++) {
+ int sp_index_last = state->index_last[t];
+
+ if (sp_index_last < host_index_last[t]) {
+ /* SP has been reset */
+ host_index_last[t] = 0;
+ }
+
+ if ((host_index_last[t] + SH_CSS_SP_DBG_TRACE_DEPTH) <
+ sp_index_last) {
+ /* last index can be multiple rounds behind */
+ /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "Warning: trace %s has gap of %d traces\n",
+ trace_name[t],
+ (sp_index_last -
+ (host_index_last[t] +
+ SH_CSS_SP_DBG_TRACE_DEPTH)));
+
+ host_index_last[t] =
+ sp_index_last - SH_CSS_SP_DBG_TRACE_DEPTH;
+ }
+
+ for (n = host_index_last[t]; n < sp_index_last; n++) {
+ int i = n % SH_CSS_SP_DBG_TRACE_DEPTH;
+ int l = state->trace[t][i].location &
+ ((1 << SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS) - 1);
+ int fid = state->trace[t][i].location >>
+ SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS;
+ int ts = state->trace[t][i].time_stamp;
+
+ if (ts) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "%05d trace=%s, file=%s:%d, data=0x%08x\n",
+ ts,
+ trace_name[t],
+ id2filename[fid], l,
+ state->trace[t][i].data);
+ }
+ }
+ host_index_last[t] = sp_index_last;
+ }
+
+#elif SP_DEBUG == SP_DEBUG_MINIMAL
+ int i;
+ int base = 0;
+ int limit = SH_CSS_NUM_SP_DEBUG;
+ int step = 1;
+
+ assert(state);
+
+ for (i = base; i < limit; i += step) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "sp_dbg_trace[%d] = %d\n",
+ i, state->debug[i]);
+ }
+#endif
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+ return;
+}
+#endif
+
+#if defined(HAS_INPUT_FORMATTER_VERSION_2) && !defined(HAS_NO_INPUT_FORMATTER)
+static void debug_print_rx_mipi_port_state(mipi_port_state_t *state)
+{
+ int i;
+ unsigned int bits, infos;
+
+ assert(state);
+
+ bits = state->irq_status;
+ infos = ia_css_isys_rx_translate_irq_infos(bits);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: (irq reg = 0x%X)\n",
+ "receiver errors", bits);
+
+ if (infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+ ia_css_debug_dtrace(2, "\t\t\tbuffer overrun\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
+ ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+ ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission sync error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
+ ia_css_debug_dtrace(2, "\t\t\tcontrol error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+ ia_css_debug_dtrace(2, "\t\t\t2 or more ECC errors\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
+ ia_css_debug_dtrace(2, "\t\t\tCRC mismatch\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+ ia_css_debug_dtrace(2, "\t\t\tunknown error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+ ia_css_debug_dtrace(2, "\t\t\tframe sync error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+ ia_css_debug_dtrace(2, "\t\t\tframe data error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+ ia_css_debug_dtrace(2, "\t\t\tdata timeout\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+ ia_css_debug_dtrace(2, "\t\t\tunknown escape command entry\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+ ia_css_debug_dtrace(2, "\t\t\tline sync error\n");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "device_ready", state->device_ready);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_status", state->irq_status);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_enable", state->irq_enable);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "timeout_count", state->timeout_count);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "init_count", state->init_count);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16_18", state->raw16_18);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "sync_count", state->sync_count);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "rx_count", state->rx_count);
+
+ for (i = 0; i < MIPI_4LANE_CFG; i++) {
+ ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n",
+ "lane_sync_count[", i, "]",
+ state->lane_sync_count[i]);
+ }
+
+ for (i = 0; i < MIPI_4LANE_CFG; i++) {
+ ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n",
+ "lane_rx_count[", i, "]",
+ state->lane_rx_count[i]);
+ }
+
+ return;
+}
+
+static void debug_print_rx_channel_state(rx_channel_state_t *state)
+{
+ int i;
+
+ assert(state);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "compression_scheme0", state->comp_scheme0);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "compression_scheme1", state->comp_scheme1);
+
+ for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) {
+ ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n",
+ "MIPI Predictor ", i, state->pred[i]);
+ }
+
+ for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) {
+ ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n",
+ "MIPI Compressor ", i, state->comp[i]);
+ }
+
+ return;
+}
+
+static void debug_print_rx_state(receiver_state_t *state)
+{
+ int i;
+
+ assert(state);
+ ia_css_debug_dtrace(2, "CSI Receiver State:\n");
+
+ ia_css_debug_dtrace(2, "\tConfiguration:\n");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "fs_to_ls_delay", state->fs_to_ls_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "ls_to_data_delay", state->ls_to_data_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "data_to_le_delay", state->data_to_le_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "le_to_fe_delay", state->le_to_fe_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "fe_to_fs_delay", state->fe_to_fs_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "le_to_fs_delay", state->le_to_fs_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "is_two_ppc", state->is_two_ppc);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "backend_rst", state->backend_rst);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw18", state->raw18);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "force_raw8", state->force_raw8);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16", state->raw16);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_gsp_acc_ovl", state->be_gsp_acc_ovl);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_srst", state->be_srst);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_is_two_ppc", state->be_is_two_ppc);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_comp_format0", state->be_comp_format0);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_comp_format1", state->be_comp_format1);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_comp_format2", state->be_comp_format2);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_comp_format3", state->be_comp_format3);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_sel", state->be_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_raw16_config", state->be_raw16_config);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_raw18_config", state->be_raw18_config);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_force_raw8", state->be_force_raw8);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_irq_status", state->be_irq_status);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_irq_clear", state->be_irq_clear);
+
+ /* mipi port state */
+ for (i = 0; i < N_MIPI_PORT_ID; i++) {
+ ia_css_debug_dtrace(2, "\tMIPI Port %d State:\n", i);
+
+ debug_print_rx_mipi_port_state(&state->mipi_port_state[i]);
+ }
+ /* end of mipi port state */
+
+ /* rx channel state */
+ for (i = 0; i < N_RX_CHANNEL_ID; i++) {
+ ia_css_debug_dtrace(2, "\tRX Channel %d State:\n", i);
+
+ debug_print_rx_channel_state(&state->rx_channel_state[i]);
+ }
+ /* end of rx channel state */
+
+ return;
+}
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+void ia_css_debug_dump_rx_state(void)
+{
+#if defined(HAS_INPUT_FORMATTER_VERSION_2) && !defined(HAS_NO_INPUT_FORMATTER)
+ receiver_state_t state;
+
+ receiver_get_state(RX0_ID, &state);
+ debug_print_rx_state(&state);
+#endif
+}
+#endif
+
+void ia_css_debug_dump_sp_sw_debug_info(void)
+{
+#if SP_DEBUG != SP_DEBUG_NONE
+ struct sh_css_sp_debug_state state;
+
+ sh_css_sp_get_debug_state(&state);
+ ia_css_debug_print_sp_debug_state(&state);
+#endif
+ ia_css_bufq_dump_queue_info();
+ ia_css_pipeline_dump_thread_map_info();
+ return;
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+static void debug_print_isys_capture_unit_state(capture_unit_state_t *state)
+{
+ assert(state);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Packet_Length", state->Packet_Length);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Length", state->Received_Length);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Short_Packets",
+ state->Received_Short_Packets);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Long_Packets",
+ state->Received_Long_Packets);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Last_Command", state->Last_Command);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Next_Command", state->Next_Command);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Last_Acknowledge", state->Last_Acknowledge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Next_Acknowledge", state->Next_Acknowledge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM_State_Info", state->FSM_State_Info);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "StartMode", state->StartMode);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Start_Addr", state->Start_Addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Mem_Region_Size", state->Mem_Region_Size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Num_Mem_Regions", state->Num_Mem_Regions);
+ return;
+}
+
+static void debug_print_isys_acquisition_unit_state(
+ acquisition_unit_state_t *state)
+{
+ assert(state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Short_Packets",
+ state->Received_Short_Packets);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Long_Packets",
+ state->Received_Long_Packets);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Last_Command", state->Last_Command);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Next_Command", state->Next_Command);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Last_Acknowledge", state->Last_Acknowledge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Next_Acknowledge", state->Next_Acknowledge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM_State_Info", state->FSM_State_Info);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Int_Cntr_Info", state->Int_Cntr_Info);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Start_Addr", state->Start_Addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Mem_Region_Size", state->Mem_Region_Size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Num_Mem_Regions", state->Num_Mem_Regions);
+}
+
+static void debug_print_isys_ctrl_unit_state(ctrl_unit_state_t *state)
+{
+ assert(state);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_cmd", state->last_cmd);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_cmd", state->next_cmd);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_ack", state->last_ack);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_ack", state->next_ack);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "top_fsm_state", state->top_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captA_fsm_state", state->captA_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captB_fsm_state", state->captB_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captC_fsm_state", state->captC_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "acq_fsm_state", state->acq_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captA_start_addr", state->captA_start_addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captB_start_addr", state->captB_start_addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captC_start_addr", state->captC_start_addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captA_mem_region_size",
+ state->captA_mem_region_size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captB_mem_region_size",
+ state->captB_mem_region_size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captC_mem_region_size",
+ state->captC_mem_region_size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captA_num_mem_regions",
+ state->captA_num_mem_regions);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captB_num_mem_regions",
+ state->captB_num_mem_regions);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captC_num_mem_regions",
+ state->captC_num_mem_regions);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "acq_start_addr", state->acq_start_addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "acq_mem_region_size", state->acq_mem_region_size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "acq_num_mem_regions", state->acq_num_mem_regions);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "capt_reserve_one_mem_region",
+ state->capt_reserve_one_mem_region);
+
+ return;
+}
+
+static void debug_print_isys_state(input_system_state_t *state)
+{
+ int i;
+
+ assert(state);
+ ia_css_debug_dtrace(2, "InputSystem State:\n");
+
+ /* configuration */
+ ia_css_debug_dtrace(2, "\tConfiguration:\n");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_multiCastA_sel", state->str_multicastA_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_multicastB_sel", state->str_multicastB_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_multicastC_sel", state->str_multicastC_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_mux_sel", state->str_mux_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_mon_status", state->str_mon_status);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_mon_irq_cond", state->str_mon_irq_cond);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_mon_irq_en", state->str_mon_irq_en);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "isys_srst", state->isys_srst);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "isys_slv_reg_srst", state->isys_slv_reg_srst);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_deint_portA_cnt", state->str_deint_portA_cnt);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_deint_portB_cnd", state->str_deint_portB_cnt);
+ /* end of configuration */
+
+ /* capture unit state */
+ for (i = 0; i < N_CAPTURE_UNIT_ID; i++) {
+ capture_unit_state_t *capture_unit_state;
+
+ ia_css_debug_dtrace(2, "\tCaptureUnit %d State:\n", i);
+
+ capture_unit_state = &state->capture_unit[i];
+ debug_print_isys_capture_unit_state(capture_unit_state);
+ }
+ /* end of capture unit state */
+
+ /* acquisition unit state */
+ for (i = 0; i < N_ACQUISITION_UNIT_ID; i++) {
+ acquisition_unit_state_t *acquisition_unit_state;
+
+ ia_css_debug_dtrace(2, "\tAcquisitionUnit %d State:\n", i);
+
+ acquisition_unit_state = &state->acquisition_unit[i];
+ debug_print_isys_acquisition_unit_state(acquisition_unit_state);
+ }
+ /* end of acquisition unit state */
+
+ /* control unit state */
+ for (i = 0; i < N_CTRL_UNIT_ID; i++) {
+ ia_css_debug_dtrace(2, "\tControlUnit %d State:\n", i);
+
+ debug_print_isys_ctrl_unit_state(&state->ctrl_unit_state[i]);
+ }
+ /* end of control unit state */
+}
+
+void ia_css_debug_dump_isys_state(void)
+{
+ input_system_state_t state;
+
+ input_system_get_state(INPUT_SYSTEM0_ID, &state);
+ debug_print_isys_state(&state);
+
+ return;
+}
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+void ia_css_debug_dump_isys_state(void)
+{
+ /* Android compilation fails if made a local variable
+ stack size on android is limited to 2k and this structure
+ is around 3.5K, in place of static malloc can be done but
+ if this call is made too often it will lead to fragment memory
+ versus a fixed allocation */
+ static input_system_state_t state;
+
+ input_system_get_state(INPUT_SYSTEM0_ID, &state);
+ input_system_dump_state(INPUT_SYSTEM0_ID, &state);
+}
+#endif
+
+void ia_css_debug_dump_debug_info(const char *context)
+{
+ if (!context)
+ context = "No Context provided";
+
+ ia_css_debug_dtrace(2, "CSS Debug Info dump [Context = %s]\n", context);
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ ia_css_debug_dump_rx_state();
+#endif
+#if !defined(HAS_NO_INPUT_FORMATTER) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ ia_css_debug_dump_if_state();
+#endif
+ ia_css_debug_dump_isp_state();
+ ia_css_debug_dump_isp_sp_fifo_state();
+ ia_css_debug_dump_isp_gdc_fifo_state();
+ ia_css_debug_dump_sp_state();
+ ia_css_debug_dump_perf_counters();
+
+#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
+ sh_css_dump_thread_wait_info();
+ sh_css_dump_pipe_stage_info();
+ sh_css_dump_pipe_stripe_info();
+#endif
+ ia_css_debug_dump_dma_isp_fifo_state();
+ ia_css_debug_dump_dma_sp_fifo_state();
+ ia_css_debug_dump_dma_state();
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ ia_css_debug_dump_isys_state();
+
+ {
+ irq_controller_state_t state;
+
+ irq_controller_get_state(IRQ2_ID, &state);
+
+ ia_css_debug_dtrace(2, "\t%-32s:\n",
+ "Input System IRQ Controller State");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_edge", state.irq_edge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_mask", state.irq_mask);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_status", state.irq_status);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_enable", state.irq_enable);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_level_not_pulse",
+ state.irq_level_not_pulse);
+ }
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+ ia_css_debug_dump_isys_state();
+#endif
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ ia_css_debug_tagger_state();
+#endif
+ return;
+}
+
+/* this function is for debug use, it can make SP go to sleep
+ state after each frame, then user can dump the stable SP dmem.
+ this function can be called after ia_css_start_sp()
+ and before sh_css_init_buffer_queues()
+*/
+void ia_css_debug_enable_sp_sleep_mode(enum ia_css_sp_sleep_mode mode)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_sleep_mode;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode;
+
+ (void)HIVE_ADDR_sp_sleep_mode; /* Suppres warnings in CRUN */
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_sleep_mode),
+ (uint32_t)mode);
+}
+
+void ia_css_debug_wake_up_sp(void)
+{
+ /*hrt_ctl_start(SP); */
+ sp_ctrl_setbit(SP0_ID, SP_SC_REG, SP_START_BIT);
+}
+
+#if !defined(IS_ISP_2500_SYSTEM)
+#define FIND_DMEM_PARAMS_TYPE(stream, kernel, type) \
+ (struct HRTCAT(HRTCAT(sh_css_isp_, type), _params) *) \
+ findf_dmem_params(stream, offsetof(struct ia_css_memory_offsets, dmem.kernel))
+
+#define FIND_DMEM_PARAMS(stream, kernel) FIND_DMEM_PARAMS_TYPE(stream, kernel, kernel)
+
+/* Find a stage that support the kernel and return the parameters for that kernel */
+static char *
+findf_dmem_params(struct ia_css_stream *stream, short idx)
+{
+ int i;
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+ struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe);
+ struct ia_css_pipeline_stage *stage;
+
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ struct ia_css_binary *binary = stage->binary;
+ short *offsets = (short *)&binary->info->mem_offsets.offsets.param->dmem;
+ short dmem_offset = offsets[idx];
+ const struct ia_css_host_data *isp_data =
+ ia_css_isp_param_get_mem_init(&binary->mem_params,
+ IA_CSS_PARAM_CLASS_PARAM, IA_CSS_ISP_DMEM0);
+ if (dmem_offset < 0)
+ continue;
+ return &isp_data->address[dmem_offset];
+ }
+ }
+ return NULL;
+}
+#endif
+
+void ia_css_debug_dump_isp_params(struct ia_css_stream *stream,
+ unsigned int enable)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "ISP PARAMETERS:\n");
+#if defined(IS_ISP_2500_SYSTEM)
+ (void)enable;
+ (void)stream;
+#else
+
+ assert(stream);
+ if ((enable & IA_CSS_DEBUG_DUMP_FPN)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_fpn_dump(FIND_DMEM_PARAMS(stream, fpn), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_OB)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_ob_dump(FIND_DMEM_PARAMS(stream, ob), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_SC)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_sc_dump(FIND_DMEM_PARAMS(stream, sc), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_WB)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_wb_dump(FIND_DMEM_PARAMS(stream, wb), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_DP)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_dp_dump(FIND_DMEM_PARAMS(stream, dp), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_BNR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_bnr_dump(FIND_DMEM_PARAMS(stream, bnr), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_S3A)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_s3a_dump(FIND_DMEM_PARAMS(stream, s3a), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_DE)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_de_dump(FIND_DMEM_PARAMS(stream, de), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_YNR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_nr_dump(FIND_DMEM_PARAMS_TYPE(stream, nr, ynr), IA_CSS_DEBUG_VERBOSE);
+ ia_css_yee_dump(FIND_DMEM_PARAMS(stream, yee), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_CSC)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_csc_dump(FIND_DMEM_PARAMS(stream, csc), IA_CSS_DEBUG_VERBOSE);
+ ia_css_yuv2rgb_dump(FIND_DMEM_PARAMS_TYPE(stream, yuv2rgb, csc),
+ IA_CSS_DEBUG_VERBOSE);
+ ia_css_rgb2yuv_dump(FIND_DMEM_PARAMS_TYPE(stream, rgb2yuv, csc),
+ IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_GC)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_gc_dump(FIND_DMEM_PARAMS(stream, gc), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_TNR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_tnr_dump(FIND_DMEM_PARAMS(stream, tnr), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_ANR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_anr_dump(FIND_DMEM_PARAMS(stream, anr), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_CE)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_ce_dump(FIND_DMEM_PARAMS(stream, ce), IA_CSS_DEBUG_VERBOSE);
+ }
+#endif
+}
+
+void sh_css_dump_sp_raw_copy_linecount(bool reduced)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_raw_copy_line_count;
+ s32 raw_copy_line_count;
+ static s32 prev_raw_copy_line_count = -1;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_raw_copy_line_count =
+ fw->info.sp.raw_copy_line_count;
+
+ (void)HIVE_ADDR_raw_copy_line_count;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(raw_copy_line_count),
+ &raw_copy_line_count,
+ sizeof(raw_copy_line_count));
+
+ /* only indicate if copy loop is active */
+ if (reduced)
+ raw_copy_line_count = (raw_copy_line_count < 0) ? raw_copy_line_count : 1;
+ /* do the handling */
+ if (prev_raw_copy_line_count != raw_copy_line_count) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "sh_css_dump_sp_raw_copy_linecount() line_count=%d\n",
+ raw_copy_line_count);
+ prev_raw_copy_line_count = raw_copy_line_count;
+ }
+}
+
+void ia_css_debug_dump_isp_binary(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_pipeline_sp_curr_binary_id;
+ u32 curr_binary_id;
+ static u32 prev_binary_id = 0xFFFFFFFF;
+ static u32 sample_count;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_pipeline_sp_curr_binary_id = fw->info.sp.curr_binary_id;
+
+ (void)HIVE_ADDR_pipeline_sp_curr_binary_id;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(pipeline_sp_curr_binary_id),
+ &curr_binary_id,
+ sizeof(curr_binary_id));
+
+ /* do the handling */
+ sample_count++;
+ if (prev_binary_id != curr_binary_id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "sh_css_dump_isp_binary() pipe_id=%d, binary_id=%d, sample_count=%d\n",
+ (curr_binary_id >> 16),
+ (curr_binary_id & 0x0ffff),
+ sample_count);
+ sample_count = 0;
+ prev_binary_id = curr_binary_id;
+ }
+}
+
+void ia_css_debug_dump_perf_counters(void)
+{
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ const struct ia_css_fw_info *fw;
+ int i;
+ unsigned int HIVE_ADDR_ia_css_isys_sp_error_cnt;
+ s32 ia_css_sp_input_system_error_cnt[N_MIPI_PORT_ID +
+ 1]; /* 3 Capture Units and 1 Acquire Unit. */
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "Input System Error Counters:\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_ia_css_isys_sp_error_cnt =
+ fw->info.sp.perf_counter_input_system_error;
+
+ (void)HIVE_ADDR_ia_css_isys_sp_error_cnt;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_isys_sp_error_cnt),
+ &ia_css_sp_input_system_error_cnt,
+ sizeof(ia_css_sp_input_system_error_cnt));
+
+ for (i = 0; i < N_MIPI_PORT_ID + 1; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\tport[%d] = %d\n",
+ i, ia_css_sp_input_system_error_cnt[i]);
+ }
+#endif
+}
+
+/*
+
+void sh_css_init_ddr_debug_queue(void)
+{
+ hrt_vaddress ddr_debug_queue_addr =
+ mmgr_malloc(sizeof(debug_data_ddr_t));
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_debug_buffer_ddr_address;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_debug_buffer_ddr_address =
+ fw->info.sp.debug_buffer_ddr_address;
+
+ (void)HIVE_ADDR_debug_buffer_ddr_address;
+
+ debug_buffer_ddr_init(ddr_debug_queue_addr);
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(debug_buffer_ddr_address),
+ (uint32_t)(ddr_debug_queue_addr));
+}
+
+void sh_css_load_ddr_debug_queue(void)
+{
+ debug_synch_queue_ddr();
+}
+
+void ia_css_debug_dump_ddr_debug_queue(void)
+{
+ int i;
+ sh_css_load_ddr_debug_queue();
+ for (i = 0; i < DEBUG_BUF_SIZE; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "ddr_debug_queue[%d] = 0x%x\n",
+ i, debug_data_ptr->buf[i]);
+ }
+}
+*/
+
+/*
+ * @brief Initialize the debug mode.
+ * Refer to "ia_css_debug.h" for more details.
+ */
+bool ia_css_debug_mode_init(void)
+{
+ bool rc;
+
+ rc = sh_css_sp_init_dma_sw_reg(0);
+ return rc;
+}
+
+/*
+ * @brief Disable the DMA channel.
+ * Refer to "ia_css_debug.h" for more details.
+ */
+bool
+ia_css_debug_mode_disable_dma_channel(int dma_id,
+ int channel_id, int request_type)
+{
+ bool rc;
+
+ rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, false);
+
+ return rc;
+}
+
+/*
+ * @brief Enable the DMA channel.
+ * Refer to "ia_css_debug.h" for more details.
+ */
+bool
+ia_css_debug_mode_enable_dma_channel(int dma_id,
+ int channel_id, int request_type)
+{
+ bool rc;
+
+ rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, true);
+
+ return rc;
+}
+
+static
+void dtrace_dot(const char *fmt, ...)
+{
+ va_list ap;
+
+ assert(fmt);
+ va_start(ap, fmt);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_START);
+ ia_css_debug_vdtrace(IA_CSS_DEBUG_INFO, fmt, ap);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_END);
+ va_end(ap);
+}
+
+#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
+void sh_css_dump_thread_wait_info(void)
+{
+ const struct ia_css_fw_info *fw;
+ int i;
+ unsigned int HIVE_ADDR_sp_thread_wait;
+ s32 sp_thread_wait[MAX_THREAD_NUM];
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "SEM WAITS:\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_thread_wait =
+ fw->info.sp.debug_wait;
+
+ (void)HIVE_ADDR_sp_thread_wait;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_thread_wait),
+ &sp_thread_wait,
+ sizeof(sp_thread_wait));
+ for (i = 0; i < MAX_THREAD_NUM; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "\twait[%d] = 0x%X\n",
+ i, sp_thread_wait[i]);
+ }
+}
+
+void sh_css_dump_pipe_stage_info(void)
+{
+ const struct ia_css_fw_info *fw;
+ int i;
+ unsigned int HIVE_ADDR_sp_pipe_stage;
+ s32 sp_pipe_stage[MAX_THREAD_NUM];
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STAGE:\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_pipe_stage =
+ fw->info.sp.debug_stage;
+
+ (void)HIVE_ADDR_sp_pipe_stage;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_pipe_stage),
+ &sp_pipe_stage,
+ sizeof(sp_pipe_stage));
+ for (i = 0; i < MAX_THREAD_NUM; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "\tstage[%d] = %d\n",
+ i, sp_pipe_stage[i]);
+ }
+}
+
+void sh_css_dump_pipe_stripe_info(void)
+{
+ const struct ia_css_fw_info *fw;
+ int i;
+ unsigned int HIVE_ADDR_sp_pipe_stripe;
+ s32 sp_pipe_stripe[MAX_THREAD_NUM];
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STRIPE:\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_pipe_stripe =
+ fw->info.sp.debug_stripe;
+
+ (void)HIVE_ADDR_sp_pipe_stripe;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_pipe_stripe),
+ &sp_pipe_stripe,
+ sizeof(sp_pipe_stripe));
+ for (i = 0; i < MAX_THREAD_NUM; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "\tstripe[%d] = %d\n",
+ i, sp_pipe_stripe[i]);
+ }
+}
+#endif
+
+static void
+ia_css_debug_pipe_graph_dump_frame(
+ struct ia_css_frame *frame,
+ enum ia_css_pipe_id id,
+ char const *blob_name,
+ char const *frame_name,
+ bool in_frame)
+{
+ char bufinfo[100];
+
+ if (frame->dynamic_queue_id == SH_CSS_INVALID_QUEUE_ID) {
+ snprintf(bufinfo, sizeof(bufinfo), "Internal");
+ } else {
+ snprintf(bufinfo, sizeof(bufinfo), "Queue: %s %s",
+ pipe_id_to_str[id],
+ queue_id_to_str[frame->dynamic_queue_id]);
+ }
+ dtrace_dot(
+ "node [shape = box, fixedsize=true, width=2, height=0.7]; \"%p\" [label = \"%s\\n%d(%d) x %d, %dbpp\\n%s\"];",
+ frame,
+ debug_frame_format2str(frame->info.format),
+ frame->info.res.width,
+ frame->info.padded_width,
+ frame->info.res.height,
+ frame->info.raw_bit_depth,
+ bufinfo);
+
+ if (in_frame) {
+ dtrace_dot(
+ "\"%p\"->\"%s(pipe%d)\" [label = %s_frame];",
+ frame,
+ blob_name, id, frame_name);
+ } else {
+ dtrace_dot(
+ "\"%s(pipe%d)\"->\"%p\" [label = %s_frame];",
+ blob_name, id,
+ frame,
+ frame_name);
+ }
+}
+
+void
+ia_css_debug_pipe_graph_dump_prologue(void)
+{
+ dtrace_dot("digraph sh_css_pipe_graph {");
+ dtrace_dot("rankdir=LR;");
+
+ dtrace_dot("fontsize=9;");
+ dtrace_dot("label = \"\\nEnable options: rp=reduced pipe, vfve=vf_veceven, dvse=dvs_envelope, dvs6=dvs_6axis, bo=block_out, fbds=fixed_bayer_ds, bf6=bayer_fir_6db, rawb=raw_binning, cont=continuous, disc=dis_crop\\n"
+ "dp2a=dp_2adjacent, outp=output, outt=out_table, reff=ref_frame, par=params, gam=gamma, cagdc=ca_gdc, ispa=isp_addresses, inf=in_frame, outf=out_frame, hs=high_speed, inpc=input_chunking\"");
+}
+
+void ia_css_debug_pipe_graph_dump_epilogue(void)
+{
+ if (strlen(ring_buffer) > 0) {
+ dtrace_dot(ring_buffer);
+ }
+
+ if (pg_inst.stream_format != N_ATOMISP_INPUT_FORMAT) {
+ /* An input stream format has been set so assume we have
+ * an input system and sensor
+ */
+
+ dtrace_dot(
+ "node [shape = doublecircle, fixedsize=true, width=2.5]; \"input_system\" [label = \"Input system\"];");
+
+ dtrace_dot(
+ "\"input_system\"->\"%s\" [label = \"%s\"];",
+ dot_id_input_bin, debug_stream_format2str(pg_inst.stream_format));
+
+ dtrace_dot(
+ "node [shape = doublecircle, fixedsize=true, width=2.5]; \"sensor\" [label = \"Sensor\"];");
+
+ dtrace_dot(
+ "\"sensor\"->\"input_system\" [label = \"%s\\n%d x %d\\n(%d x %d)\"];",
+ debug_stream_format2str(pg_inst.stream_format),
+ pg_inst.width, pg_inst.height,
+ pg_inst.eff_width, pg_inst.eff_height);
+ }
+
+ dtrace_dot("}");
+
+ /* Reset temp strings */
+ memset(dot_id_input_bin, 0, sizeof(dot_id_input_bin));
+ memset(ring_buffer, 0, sizeof(ring_buffer));
+
+ pg_inst.do_init = true;
+ pg_inst.width = 0;
+ pg_inst.height = 0;
+ pg_inst.eff_width = 0;
+ pg_inst.eff_height = 0;
+ pg_inst.stream_format = N_ATOMISP_INPUT_FORMAT;
+}
+
+void
+ia_css_debug_pipe_graph_dump_stage(
+ struct ia_css_pipeline_stage *stage,
+ enum ia_css_pipe_id id)
+{
+ char blob_name[SH_CSS_MAX_BINARY_NAME + 10] = "<unknown type>";
+ char const *bin_type = "<unknown type>";
+ int i;
+
+ assert(stage);
+ if (stage->sp_func != IA_CSS_PIPELINE_NO_FUNC)
+ return;
+
+ if (pg_inst.do_init) {
+ ia_css_debug_pipe_graph_dump_prologue();
+ pg_inst.do_init = false;
+ }
+
+ if (stage->binary) {
+ bin_type = "binary";
+ if (stage->binary->info->blob)
+ snprintf(blob_name, sizeof(blob_name), "%s_stage%d",
+ stage->binary->info->blob->name, stage->stage_num);
+ } else if (stage->firmware) {
+ bin_type = "firmware";
+ strncpy_s(blob_name, sizeof(blob_name),
+ IA_CSS_EXT_ISP_PROG_NAME(stage->firmware), sizeof(blob_name));
+ }
+
+ /* Guard in case of binaries that don't have any binary_info */
+ if (stage->binary_info) {
+ char enable_info1[100];
+ char enable_info2[100];
+ char enable_info3[100];
+ char enable_info[200];
+ struct ia_css_binary_info *bi = stage->binary_info;
+
+ /* Split it in 2 function-calls to keep the amount of
+ * parameters per call "reasonable"
+ */
+ snprintf(enable_info1, sizeof(enable_info1),
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ bi->enable.reduced_pipe ? "rp," : "",
+ bi->enable.vf_veceven ? "vfve," : "",
+ bi->enable.dis ? "dis," : "",
+ bi->enable.dvs_envelope ? "dvse," : "",
+ bi->enable.uds ? "uds," : "",
+ bi->enable.dvs_6axis ? "dvs6," : "",
+ bi->enable.block_output ? "bo," : "",
+ bi->enable.ds ? "ds," : "",
+ bi->enable.bayer_fir_6db ? "bf6," : "",
+ bi->enable.raw_binning ? "rawb," : "",
+ bi->enable.continuous ? "cont," : "",
+ bi->enable.s3a ? "s3a," : "",
+ bi->enable.fpnr ? "fpnr," : "",
+ bi->enable.sc ? "sc," : ""
+ );
+
+ snprintf(enable_info2, sizeof(enable_info2),
+ "%s%s%s%s%s%s%s%s%s%s%s",
+ bi->enable.macc ? "macc," : "",
+ bi->enable.output ? "outp," : "",
+ bi->enable.ref_frame ? "reff," : "",
+ bi->enable.tnr ? "tnr," : "",
+ bi->enable.xnr ? "xnr," : "",
+ bi->enable.params ? "par," : "",
+ bi->enable.ca_gdc ? "cagdc," : "",
+ bi->enable.isp_addresses ? "ispa," : "",
+ bi->enable.in_frame ? "inf," : "",
+ bi->enable.out_frame ? "outf," : "",
+ bi->enable.high_speed ? "hs," : ""
+ );
+
+ /* And merge them into one string */
+ snprintf(enable_info, sizeof(enable_info), "%s%s",
+ enable_info1, enable_info2);
+ {
+ int l, p;
+ char *ei = enable_info;
+
+ l = strlen(ei);
+
+ /* Replace last ',' with \0 if present */
+ if (l && enable_info[l - 1] == ',')
+ enable_info[--l] = '\0';
+
+ if (l > ENABLE_LINE_MAX_LENGTH) {
+ /* Too big for one line, find last comma */
+ p = ENABLE_LINE_MAX_LENGTH;
+ while (ei[p] != ',')
+ p--;
+ /* Last comma found, copy till that comma */
+ strncpy_s(enable_info1,
+ sizeof(enable_info1),
+ ei, p);
+ enable_info1[p] = '\0';
+
+ ei += p + 1;
+ l = strlen(ei);
+
+ if (l <= ENABLE_LINE_MAX_LENGTH) {
+ /* The 2nd line fits */
+ /* we cannot use ei as argument because
+ * it is not guaranteed dword aligned
+ */
+ strncpy_s(enable_info2,
+ sizeof(enable_info2),
+ ei, l);
+ enable_info2[l] = '\0';
+ snprintf(enable_info, sizeof(enable_info), "%s\\n%s",
+ enable_info1, enable_info2);
+
+ } else {
+ /* 2nd line is still too long */
+ p = ENABLE_LINE_MAX_LENGTH;
+ while (ei[p] != ',')
+ p--;
+ strncpy_s(enable_info2,
+ sizeof(enable_info2),
+ ei, p);
+ enable_info2[p] = '\0';
+ ei += p + 1;
+ l = strlen(ei);
+
+ if (l <= ENABLE_LINE_MAX_LENGTH) {
+ /* The 3rd line fits */
+ /* we cannot use ei as argument because
+ * it is not guaranteed dword aligned
+ */
+ strcpy_s(enable_info3,
+ sizeof(enable_info3), ei);
+ enable_info3[l] = '\0';
+ snprintf(enable_info, sizeof(enable_info),
+ "%s\\n%s\\n%s",
+ enable_info1, enable_info2,
+ enable_info3);
+ } else {
+ /* 3rd line is still too long */
+ p = ENABLE_LINE_MAX_LENGTH;
+ while (ei[p] != ',')
+ p--;
+ strncpy_s(enable_info3,
+ sizeof(enable_info3),
+ ei, p);
+ enable_info3[p] = '\0';
+ ei += p + 1;
+ strcpy_s(enable_info3,
+ sizeof(enable_info3), ei);
+ snprintf(enable_info, sizeof(enable_info),
+ "%s\\n%s\\n%s",
+ enable_info1, enable_info2,
+ enable_info3);
+ }
+ }
+ }
+ }
+
+ dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, label=\"%s\\n%s\\n\\n%s\"]; \"%s(pipe%d)\"",
+ bin_type, blob_name, enable_info, blob_name, id);
+ } else {
+ dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, label=\"%s\\n%s\\n\"]; \"%s(pipe%d)\"",
+ bin_type, blob_name, blob_name, id);
+ }
+
+ if (stage->stage_num == 0) {
+ /*
+ * There are some implicite assumptions about which bin is the
+ * input binary e.g. which one is connected to the input system
+ * Priority:
+ * 1) sp_raw_copy bin has highest priority
+ * 2) First stage==0 binary of preview, video or capture
+ */
+ if (strlen(dot_id_input_bin) == 0) {
+ snprintf(dot_id_input_bin, sizeof(dot_id_input_bin),
+ "%s(pipe%d)", blob_name, id);
+ }
+ }
+
+ if (stage->args.in_frame) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.in_frame, id, blob_name,
+ "in", true);
+ }
+
+ for (i = 0; i < NUM_TNR_FRAMES; i++) {
+ if (stage->args.tnr_frames[i]) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.tnr_frames[i], id,
+ blob_name, "tnr_frame", true);
+ }
+ }
+
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) {
+ if (stage->args.delay_frames[i]) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.delay_frames[i], id,
+ blob_name, "delay_frame", true);
+ }
+ }
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (stage->args.out_frame[i]) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.out_frame[i], id, blob_name,
+ "out", false);
+ }
+ }
+
+ if (stage->args.out_vf_frame) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.out_vf_frame, id, blob_name,
+ "out_vf", false);
+ }
+}
+
+void
+ia_css_debug_pipe_graph_dump_sp_raw_copy(
+ struct ia_css_frame *out_frame)
+{
+ assert(out_frame);
+ if (pg_inst.do_init) {
+ ia_css_debug_pipe_graph_dump_prologue();
+ pg_inst.do_init = false;
+ }
+
+ dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, label=\"%s\\n%s\"]; \"%s(pipe%d)\"",
+ "sp-binary", "sp_raw_copy", "sp_raw_copy", 1);
+
+ snprintf(ring_buffer, sizeof(ring_buffer),
+ "node [shape = box, fixedsize=true, width=2, height=0.7]; \"%p\" [label = \"%s\\n%d(%d) x %d\\nRingbuffer\"];",
+ out_frame,
+ debug_frame_format2str(out_frame->info.format),
+ out_frame->info.res.width,
+ out_frame->info.padded_width,
+ out_frame->info.res.height);
+
+ dtrace_dot(ring_buffer);
+
+ dtrace_dot(
+ "\"%s(pipe%d)\"->\"%p\" [label = out_frame];",
+ "sp_raw_copy", 1, out_frame);
+
+ snprintf(dot_id_input_bin, sizeof(dot_id_input_bin), "%s(pipe%d)",
+ "sp_raw_copy", 1);
+}
+
+void
+ia_css_debug_pipe_graph_dump_stream_config(
+ const struct ia_css_stream_config *stream_config)
+{
+ pg_inst.width = stream_config->input_config.input_res.width;
+ pg_inst.height = stream_config->input_config.input_res.height;
+ pg_inst.eff_width = stream_config->input_config.effective_res.width;
+ pg_inst.eff_height = stream_config->input_config.effective_res.height;
+ pg_inst.stream_format = stream_config->input_config.format;
+}
+
+void
+ia_css_debug_dump_resolution(
+ const struct ia_css_resolution *res,
+ const char *label)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: =%d x =%d\n",
+ label, res->width, res->height);
+}
+
+void
+ia_css_debug_dump_frame_info(
+ const struct ia_css_frame_info *info,
+ const char *label)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", label);
+ ia_css_debug_dump_resolution(&info->res, "res");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "padded_width: %d\n",
+ info->padded_width);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n", info->format);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bit_depth: %d\n",
+ info->raw_bit_depth);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bayer_order: %d\n",
+ info->raw_bayer_order);
+}
+
+void
+ia_css_debug_dump_capture_config(
+ const struct ia_css_capture_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_xnr: %d\n",
+ config->enable_xnr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_raw_output: %d\n",
+ config->enable_raw_output);
+}
+
+void
+ia_css_debug_dump_pipe_extra_config(
+ const struct ia_css_pipe_extra_config *extra_config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__);
+ if (extra_config) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_raw_binning: %d\n",
+ extra_config->enable_raw_binning);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_yuv_ds: %d\n",
+ extra_config->enable_yuv_ds);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_high_speed: %d\n",
+ extra_config->enable_high_speed);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_dvs_6axis: %d\n",
+ extra_config->enable_dvs_6axis);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_reduced_pipe: %d\n",
+ extra_config->enable_reduced_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_fractional_ds: %d\n",
+ extra_config->enable_fractional_ds);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "disable_vf_pp: %d\n",
+ extra_config->disable_vf_pp);
+ }
+}
+
+void
+ia_css_debug_dump_pipe_config(
+ const struct ia_css_pipe_config *config)
+{
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("config = %p", config);
+ if (!config) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "isp_pipe_version: %d\n",
+ config->isp_pipe_version);
+ ia_css_debug_dump_resolution(&config->bayer_ds_out_res,
+ "bayer_ds_out_res");
+ ia_css_debug_dump_resolution(&config->capt_pp_in_res,
+ "capt_pp_in_res");
+ ia_css_debug_dump_resolution(&config->vf_pp_in_res, "vf_pp_in_res");
+
+ if (atomisp_hw_is_isp2401) {
+ ia_css_debug_dump_resolution(&config->output_system_in_res,
+ "output_system_in_res");
+ }
+ ia_css_debug_dump_resolution(&config->dvs_crop_out_res,
+ "dvs_crop_out_res");
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ ia_css_debug_dump_frame_info(&config->output_info[i], "output_info");
+ ia_css_debug_dump_frame_info(&config->vf_output_info[i],
+ "vf_output_info");
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "acc_extension: %p\n",
+ config->acc_extension);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_acc_stages: %d\n",
+ config->num_acc_stages);
+ ia_css_debug_dump_capture_config(&config->default_capture_config);
+ ia_css_debug_dump_resolution(&config->dvs_envelope, "dvs_envelope");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "dvs_frame_delay: %d\n",
+ config->dvs_frame_delay);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "acc_num_execs: %d\n",
+ config->acc_num_execs);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_dz: %d\n",
+ config->enable_dz);
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void
+ia_css_debug_dump_stream_config_source(
+ const struct ia_css_stream_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ switch (config->mode) {
+ case IA_CSS_INPUT_MODE_SENSOR:
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.port\n");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "port: %d\n",
+ config->source.port.port);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_lanes: %d\n",
+ config->source.port.num_lanes);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "timeout: %d\n",
+ config->source.port.timeout);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "compression: %d\n",
+ config->source.port.compression.type);
+ break;
+ case IA_CSS_INPUT_MODE_TPG:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.tpg\n");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "id: %d\n",
+ config->source.tpg.id);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n",
+ config->source.tpg.mode);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x_mask: 0x%x\n",
+ config->source.tpg.x_mask);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x_delta: %d\n",
+ config->source.tpg.x_delta);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "y_mask: 0x%x\n",
+ config->source.tpg.y_mask);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "y_delta: %d\n",
+ config->source.tpg.y_delta);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "xy_mask: 0x%x\n",
+ config->source.tpg.xy_mask);
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.prbs\n");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "id: %d\n",
+ config->source.prbs.id);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "h_blank: %d\n",
+ config->source.prbs.h_blank);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "v_blank: %d\n",
+ config->source.prbs.v_blank);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed: 0x%x\n",
+ config->source.prbs.seed);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed1: 0x%x\n",
+ config->source.prbs.seed1);
+ break;
+ default:
+ case IA_CSS_INPUT_MODE_FIFO:
+ case IA_CSS_INPUT_MODE_MEMORY:
+ break;
+ }
+}
+
+void
+ia_css_debug_dump_mipi_buffer_config(
+ const struct ia_css_mipi_buffer_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "size_mem_words: %d\n",
+ config->size_mem_words);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "nof_mipi_buffers: %d\n",
+ config->nof_mipi_buffers);
+}
+
+void
+ia_css_debug_dump_metadata_config(
+ const struct ia_css_metadata_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "data_type: %d\n",
+ config->data_type);
+ ia_css_debug_dump_resolution(&config->resolution, "resolution");
+}
+
+void
+ia_css_debug_dump_stream_config(
+ const struct ia_css_stream_config *config,
+ int num_pipes)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_pipes: %d\n", num_pipes);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode);
+ ia_css_debug_dump_stream_config_source(config);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "channel_id: %d\n",
+ config->channel_id);
+ ia_css_debug_dump_resolution(&config->input_config.input_res, "input_res");
+ ia_css_debug_dump_resolution(&config->input_config.effective_res,
+ "effective_res");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n",
+ config->input_config.format);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "bayer_order: %d\n",
+ config->input_config.bayer_order);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sensor_binning_factor: %d\n",
+ config->sensor_binning_factor);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pixels_per_clock: %d\n",
+ config->pixels_per_clock);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "online: %d\n",
+ config->online);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "init_num_cont_raw_buf: %d\n",
+ config->init_num_cont_raw_buf);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "target_num_cont_raw_buf: %d\n",
+ config->target_num_cont_raw_buf);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pack_raw_pixels: %d\n",
+ config->pack_raw_pixels);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "continuous: %d\n",
+ config->continuous);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "flash_gpio_pin: %d\n",
+ config->flash_gpio_pin);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "left_padding: %d\n",
+ config->left_padding);
+ ia_css_debug_dump_mipi_buffer_config(&config->mipi_buffer_config);
+ ia_css_debug_dump_metadata_config(&config->metadata_config);
+}
+
+/*
+ Trace support.
+
+ This tracer is using a buffer to trace the flow of the FW and dump misc values (see below for details).
+ Currently, support is only for SKC.
+ To enable support for other platforms:
+ - Allocate a buffer for tracing in DMEM. The longer the better.
+ - Use the DBG_init routine in sp.hive.c to initiatilize the tracer with the address and size selected.
+ - Add trace points in the SP code wherever needed.
+ - Enable the dump below with the required address and required adjustments.
+ Dump is called at the end of ia_css_debug_dump_sp_state().
+*/
+
+/*
+ dump_trace() : dump the trace points from DMEM2.
+ for every trace point, the following are printed: index, major:minor and the 16-bit attached value.
+ The routine looks for the first 0, and then prints from it cyclically.
+ Data forma in DMEM2:
+ first 4 DWORDS: header
+ DWORD 0: data description
+ byte 0: version
+ byte 1: number of threads (for future use)
+ byte 2+3: number ot TPs
+ DWORD 1: command byte + data (for future use)
+ byte 0: command
+ byte 1-3: command signature
+ DWORD 2-3: additional data (for future use)
+ Following data is 4-byte oriented:
+ byte 0: major
+ byte 1: minor
+ byte 2-3: data
+*/
+#if TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP
+#ifndef ISP2401
+static void debug_dump_one_trace(TRACE_CORE_ID proc_id)
+#else
+static void debug_dump_one_trace(enum TRACE_CORE_ID proc_id)
+#endif
+{
+#if defined(HAS_TRACER_V2)
+ u32 start_addr;
+ u32 start_addr_data;
+ u32 item_size;
+#ifndef ISP2401
+ u32 tmp;
+#else
+ u8 tid_val;
+ enum TRACE_DUMP_FORMAT dump_format;
+#endif
+ int i, j, max_trace_points, point_num, limit = -1;
+ /* using a static buffer here as the driver has issues allocating memory */
+ static u32 trace_read_buf[TRACE_BUFF_SIZE] = {0};
+ static struct trace_header_t header;
+ u8 *header_arr;
+
+ /* read the header and parse it */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "~~~ Tracer ");
+ switch (proc_id) {
+ case TRACE_SP0_ID:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP0");
+ start_addr = TRACE_SP0_ADDR;
+ start_addr_data = TRACE_SP0_DATA_ADDR;
+ item_size = TRACE_SP0_ITEM_SIZE;
+ max_trace_points = TRACE_SP0_MAX_POINTS;
+ break;
+ case TRACE_SP1_ID:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP1");
+ start_addr = TRACE_SP1_ADDR;
+ start_addr_data = TRACE_SP1_DATA_ADDR;
+ item_size = TRACE_SP1_ITEM_SIZE;
+ max_trace_points = TRACE_SP1_MAX_POINTS;
+ break;
+ case TRACE_ISP_ID:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ISP");
+ start_addr = TRACE_ISP_ADDR;
+ start_addr_data = TRACE_ISP_DATA_ADDR;
+ item_size = TRACE_ISP_ITEM_SIZE;
+ max_trace_points = TRACE_ISP_MAX_POINTS;
+ break;
+ default:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "\t\ttraces are not supported for this processor ID - exiting\n");
+ return;
+ }
+
+ if (!atomisp_hw_is_isp2401) {
+ tmp = ia_css_device_load_uint32(start_addr);
+ point_num = (tmp >> 16) & 0xFFFF;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", tmp & 0xFF,
+ point_num);
+ } else {
+ /* Loading byte-by-byte as using the master routine had issues */
+ header_arr = (uint8_t *)&header;
+ for (i = 0; i < (int)sizeof(struct trace_header_t); i++)
+ header_arr[i] = ia_css_device_load_uint8(start_addr + (i));
+
+ point_num = header.max_tracer_points;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", header.version,
+ point_num);
+
+ tmp = header.version;
+ }
+ if ((tmp & 0xFF) != TRACER_VER) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tUnknown version - exiting\n");
+ return;
+ }
+ if (point_num > max_trace_points) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tToo many points - exiting\n");
+ return;
+ }
+ /* copy the TPs and find the first 0 */
+ for (i = 0; i < point_num; i++) {
+ trace_read_buf[i] = ia_css_device_load_uint32(start_addr_data +
+ (i * item_size));
+ if ((limit == (-1)) && (trace_read_buf[i] == 0))
+ limit = i;
+ }
+ if (atomisp_hw_is_isp2401) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Status:\n");
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "\tT%d: %3d (%02x) %6d (%04x) %10d (%08x)\n", i,
+ header.thr_status_byte[i], header.thr_status_byte[i],
+ header.thr_status_word[i], header.thr_status_word[i],
+ header.thr_status_dword[i], header.thr_status_dword[i]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Scratch:\n");
+ for (i = 0; i < MAX_SCRATCH_DATA; i++)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%10d (%08x) ",
+ header.scratch_debug[i], header.scratch_debug[i]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\n");
+ }
+ /* two 0s in the beginning: empty buffer */
+ if ((trace_read_buf[0] == 0) && (trace_read_buf[1] == 0)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tEmpty tracer - exiting\n");
+ return;
+ }
+ /* no overrun: start from 0 */
+ if ((limit == point_num - 1) ||
+ /* first 0 is at the end - border case */
+ (trace_read_buf[limit + 1] ==
+ 0)) /* did not make a full cycle after the memset */
+ limit = 0;
+ /* overrun: limit is the first non-zero after the first zero */
+ else
+ limit++;
+
+ /* print the TPs */
+ for (i = 0; i < point_num; i++) {
+ j = (limit + i) % point_num;
+ if (trace_read_buf[j]) {
+ if (!atomisp_hw_is_isp2401) {
+ TRACE_DUMP_FORMAT dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]);
+ } else {
+ tid_val = FIELD_TID_UNPACK(trace_read_buf[j]);
+ dump_format = TRACE_DUMP_FORMAT_POINT;
+
+ /*
+ * When tid value is 111b, the data will be interpreted differently:
+ * tid val is ignored, major field contains 2 bits (msb) for format type
+ */
+ if (tid_val == FIELD_TID_SEL_FORMAT_PAT) {
+ dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]);
+ }
+ }
+ switch (dump_format) {
+ case TRACE_DUMP_FORMAT_POINT:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %d\n",
+ j, FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_MINOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_UNPACK(trace_read_buf[j]));
+ break;
+ /* ISP2400 */
+ case TRACE_DUMP_FORMAT_VALUE24_HEX:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x H\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ /* ISP2400 */
+ case TRACE_DUMP_FORMAT_VALUE24_DEC:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %d D\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ /* ISP2401 */
+ case TRACE_DUMP_FORMAT_POINT_NO_TID:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %x (%d)\n",
+ j,
+ FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]),
+ FIELD_MINOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_UNPACK(trace_read_buf[j]));
+ break;
+ /* ISP2401 */
+ case TRACE_DUMP_FORMAT_VALUE24:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x (%d)\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ case TRACE_DUMP_FORMAT_VALUE24_TIMING:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing %x\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ case TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing delta %x\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ default:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "no such trace dump format %d",
+ dump_format);
+ break;
+ }
+ }
+ }
+#else
+ (void)proc_id;
+#endif /* HAS_TRACER_V2 */
+}
+#endif /* TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP */
+
+void ia_css_debug_dump_trace(void)
+{
+#if TRACE_ENABLE_SP0
+ debug_dump_one_trace(TRACE_SP0_ID);
+#endif
+#if TRACE_ENABLE_SP1
+ debug_dump_one_trace(TRACE_SP1_ID);
+#endif
+#if TRACE_ENABLE_ISP
+ debug_dump_one_trace(TRACE_ISP_ID);
+#endif
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+/* Tagger state dump function. The tagger is only available when the CSS
+ * contains an input system (2400 or 2401). */
+void ia_css_debug_tagger_state(void)
+{
+ unsigned int i;
+ unsigned int HIVE_ADDR_tagger_frames;
+ ia_css_tagger_buf_sp_elem_t tbuf_frames[MAX_CB_ELEMS_FOR_TAGGER];
+
+ HIVE_ADDR_tagger_frames = sh_css_sp_fw.info.sp.tagger_frames_addr;
+
+ /* This variable is not used in crun */
+ (void)HIVE_ADDR_tagger_frames;
+
+ /* 2400 and 2401 only have 1 SP, so the tagger lives on SP0 */
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(tagger_frames),
+ tbuf_frames,
+ sizeof(tbuf_frames));
+
+ ia_css_debug_dtrace(2, "Tagger Info:\n");
+ for (i = 0; i < MAX_CB_ELEMS_FOR_TAGGER; i++) {
+ ia_css_debug_dtrace(2, "\t tagger frame[%d]: exp_id=%d, marked=%d, locked=%d\n",
+ i, tbuf_frames[i].exp_id, tbuf_frames[i].mark, tbuf_frames[i].lock);
+ }
+}
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+/* ISP2401 */
+void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps)
+{
+ unsigned int pc;
+ unsigned int i;
+ hrt_data sc = sp_ctrl_load(id, SP_SC_REG);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Status reg: 0x%X\n", id, sc);
+ sc = sp_ctrl_load(id, SP_CTRL_SINK_REG);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Stall reg: 0x%X\n", id, sc);
+ for (i = 0; i < num_of_dumps; i++) {
+ pc = sp_ctrl_load(id, SP_PC_REG);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d PC: 0x%X\n", id, pc);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h b/drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h
new file mode 100644
index 000000000000..1fcd0fadcac8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_EVENT_H
+#define _IA_CSS_EVENT_H
+
+#include <type_support.h>
+#include "sw_event_global.h" /*event macros.TODO : Change File Name..???*/
+
+bool ia_css_event_encode(
+ u8 *in,
+ u8 nr,
+ uint32_t *out);
+
+void ia_css_event_decode(
+ u32 event,
+ uint8_t *payload);
+
+#endif /*_IA_CSS_EVENT_H*/
diff --git a/drivers/staging/media/atomisp/pci/runtime/event/src/event.c b/drivers/staging/media/atomisp/pci/runtime/event/src/event.c
new file mode 100644
index 000000000000..c4578470ad8c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/event/src/event.c
@@ -0,0 +1,112 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "sh_css_sp.h"
+
+#include "dma.h" /* N_DMA_CHANNEL_ID */
+
+#include <type_support.h>
+#include "ia_css_binary.h"
+#include "sh_css_hrt.h"
+#include "sh_css_defs.h"
+#include "sh_css_internal.h"
+#include "ia_css_debug.h"
+#include "ia_css_debug_internal.h"
+#include "sh_css_legacy.h"
+
+#include "gdc_device.h" /* HRT_GDC_N */
+
+/*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */
+
+#include "memory_access.h"
+
+#include "assert_support.h"
+#include "platform_support.h" /* hrt_sleep() */
+
+#include "ia_css_queue.h" /* host_sp_enqueue_XXX */
+#include "ia_css_event.h" /* ia_css_event_encode */
+/*
+ * @brief Encode the information into the software-event.
+ * Refer to "sw_event_public.h" for details.
+ */
+bool ia_css_event_encode(
+ u8 *in,
+ u8 nr,
+ uint32_t *out)
+{
+ bool ret;
+ u32 nr_of_bits;
+ u32 i;
+
+ assert(in);
+ assert(out);
+ OP___assert(nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT);
+
+ /* initialize the output */
+ *out = 0;
+
+ /* get the number of bits per information */
+ nr_of_bits = sizeof(uint32_t) * 8 / nr;
+
+ /* compress the all inputs into a signle output */
+ for (i = 0; i < nr; i++) {
+ *out <<= nr_of_bits;
+ *out |= in[i];
+ }
+
+ /* get the return value */
+ ret = (nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT);
+
+ return ret;
+}
+
+void ia_css_event_decode(
+ u32 event,
+ uint8_t *payload)
+{
+ assert(payload[1] == 0);
+ assert(payload[2] == 0);
+ assert(payload[3] == 0);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_event_decode() enter:\n");
+
+ /* First decode according to the common case
+ * In case of a PORT_EOF event we overwrite with
+ * the specific values
+ * This is somewhat ugly but probably somewhat efficient
+ * (and it avoids some code duplication)
+ */
+ payload[0] = event & 0xff; /*event_code */
+ payload[1] = (event >> 8) & 0xff;
+ payload[2] = (event >> 16) & 0xff;
+ payload[3] = 0;
+
+ switch (payload[0]) {
+ case SH_CSS_SP_EVENT_PORT_EOF:
+ payload[2] = 0;
+ payload[3] = (event >> 24) & 0xff;
+ break;
+
+ case SH_CSS_SP_EVENT_ACC_STAGE_COMPLETE:
+ case SH_CSS_SP_EVENT_TIMER:
+ case SH_CSS_SP_EVENT_FRAME_TAGGED:
+ case SH_CSS_SP_EVENT_FW_WARNING:
+ case SH_CSS_SP_EVENT_FW_ASSERT:
+ payload[3] = (event >> 24) & 0xff;
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h b/drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h
new file mode 100644
index 000000000000..8602398ede52
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_EVENTQ_H
+#define _IA_CSS_EVENTQ_H
+
+#include "ia_css_queue.h" /* queue APIs */
+
+/**
+ * @brief HOST receives event from SP.
+ *
+ * @param[in] eventq_handle eventq_handle.
+ * @param[in] payload The event payload.
+ * @return 0 - Successfully dequeue.
+ * @return EINVAL - Invalid argument.
+ * @return ENODATA - Queue is empty.
+ */
+int ia_css_eventq_recv(
+ ia_css_queue_t *eventq_handle,
+ uint8_t *payload);
+
+/**
+ * @brief The Host sends the event to SP.
+ * The caller of this API will be blocked until the event
+ * is sent.
+ *
+ * @param[in] eventq_handle eventq_handle.
+ * @param[in] evt_id The event ID.
+ * @param[in] evt_payload_0 The event payload.
+ * @param[in] evt_payload_1 The event payload.
+ * @param[in] evt_payload_2 The event payload.
+ * @return 0 - Successfully enqueue.
+ * @return EINVAL - Invalid argument.
+ * @return ENOBUFS - Queue is full.
+ */
+int ia_css_eventq_send(
+ ia_css_queue_t *eventq_handle,
+ u8 evt_id,
+ u8 evt_payload_0,
+ u8 evt_payload_1,
+ uint8_t evt_payload_2);
+#endif /* _IA_CSS_EVENTQ_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c b/drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c
new file mode 100644
index 000000000000..0460f102d36f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c
@@ -0,0 +1,77 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "assert_support.h"
+#include "ia_css_queue.h" /* sp2host_dequeue_irq_event() */
+#include "ia_css_eventq.h"
+#include "ia_css_event.h" /* ia_css_event_encode()
+ ia_css_event_decode()
+ */
+#include "platform_support.h" /* hrt_sleep() */
+
+int ia_css_eventq_recv(
+ ia_css_queue_t *eventq_handle,
+ uint8_t *payload)
+{
+ u32 sp_event;
+ int error;
+
+ /* dequeue the IRQ event */
+ error = ia_css_queue_dequeue(eventq_handle, &sp_event);
+
+ /* check whether the IRQ event is available or not */
+ if (!error)
+ ia_css_event_decode(sp_event, payload);
+ return error;
+}
+
+/*
+ * @brief The Host sends the event to the SP.
+ * Refer to "sh_css_sp.h" for details.
+ */
+int ia_css_eventq_send(
+ ia_css_queue_t *eventq_handle,
+ u8 evt_id,
+ u8 evt_payload_0,
+ u8 evt_payload_1,
+ uint8_t evt_payload_2)
+{
+ u8 tmp[4];
+ u32 sw_event;
+ int error = ENOSYS;
+
+ /*
+ * Encode the queue type, the thread ID and
+ * the queue ID into the event.
+ */
+ tmp[0] = evt_id;
+ tmp[1] = evt_payload_0;
+ tmp[2] = evt_payload_1;
+ tmp[3] = evt_payload_2;
+ ia_css_event_encode(tmp, 4, &sw_event);
+
+ /* queue the software event (busy-waiting) */
+ for ( ; ; ) {
+ error = ia_css_queue_enqueue(eventq_handle, sw_event);
+ if (error != ENOBUFS) {
+ /* We were able to successfully send the event
+ or had a real failure. return the status*/
+ break;
+ }
+ /* Wait for the queue to be not full and try again*/
+ hrt_sleep();
+ }
+ return error;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
new file mode 100644
index 000000000000..613fa33ab930
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
@@ -0,0 +1,163 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FRAME_H__
+#define __IA_CSS_FRAME_H__
+
+/* ISP2401 */
+#include <ia_css_types.h>
+
+#include <ia_css_frame_format.h>
+#include <ia_css_frame_public.h>
+#include "dma.h"
+
+/*********************************************************************
+**** Frame INFO APIs
+**********************************************************************/
+/* @brief Sets the given width and alignment to the frame info
+ *
+ * @param
+ * @param[in] info The info to which parameters would set
+ * @param[in] width The width to be set to info
+ * @param[in] aligned The aligned to be set to info
+ * @return
+ */
+void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int min_padded_width);
+
+/* @brief Sets the given format to the frame info
+ *
+ * @param
+ * @param[in] info The info to which parameters would set
+ * @param[in] format The format to be set to info
+ * @return
+ */
+void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
+ enum ia_css_frame_format format);
+
+/* @brief Sets the frame info with the given parameters
+ *
+ * @param
+ * @param[in] info The info to which parameters would set
+ * @param[in] width The width to be set to info
+ * @param[in] height The height to be set to info
+ * @param[in] format The format to be set to info
+ * @param[in] aligned The aligned to be set to info
+ * @return
+ */
+void ia_css_frame_info_init(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int aligned);
+
+/* @brief Checks whether 2 frame infos has the same resolution
+ *
+ * @param
+ * @param[in] frame_a The first frame to be compared
+ * @param[in] frame_b The second frame to be compared
+ * @return Returns true if the frames are equal
+ */
+bool ia_css_frame_info_is_same_resolution(
+ const struct ia_css_frame_info *info_a,
+ const struct ia_css_frame_info *info_b);
+
+/* @brief Check the frame info is valid
+ *
+ * @param
+ * @param[in] info The frame attributes to be initialized
+ * @return The error code.
+ */
+enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
+
+/*********************************************************************
+**** Frame APIs
+**********************************************************************/
+
+/* @brief Initialize the plane depending on the frame type
+ *
+ * @param
+ * @param[in] frame The frame attributes to be initialized
+ * @return The error code.
+ */
+enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
+
+/* @brief Free an array of frames
+ *
+ * @param
+ * @param[in] num_frames The number of frames to be freed in the array
+ * @param[in] **frames_array The array of frames to be removed
+ * @return
+ */
+void ia_css_frame_free_multiple(unsigned int num_frames,
+ struct ia_css_frame **frames_array);
+
+/* @brief Allocate a CSS frame structure of given size in bytes..
+ *
+ * @param frame The allocated frame.
+ * @param[in] size_bytes The frame size in bytes.
+ * @param[in] contiguous Allocate memory physically contiguously or not.
+ * @return The error code.
+ *
+ * Allocate a frame using the given size in bytes.
+ * The frame structure is partially null initialized.
+ */
+enum ia_css_err ia_css_frame_allocate_with_buffer_size(
+ struct ia_css_frame **frame,
+ const unsigned int size_bytes,
+ const bool contiguous);
+
+/* @brief Check whether 2 frames are same type
+ *
+ * @param
+ * @param[in] frame_a The first frame to be compared
+ * @param[in] frame_b The second frame to be compared
+ * @return Returns true if the frames are equal
+ */
+bool ia_css_frame_is_same_type(
+ const struct ia_css_frame *frame_a,
+ const struct ia_css_frame *frame_b);
+
+/* @brief Configure a dma port from frame info
+ *
+ * @param
+ * @param[in] config The DAM port configuration
+ * @param[in] info The frame info
+ * @return
+ */
+void ia_css_dma_configure_from_info(
+ struct dma_port_config *config,
+ const struct ia_css_frame_info *info);
+
+/* ISP2401 */
+/* @brief Finds the cropping resolution
+ * This function finds the maximum cropping resolution in an input image keeping
+ * the aspect ratio for the given output resolution.Calculates the coordinates
+ * for cropping from the center and returns the starting pixel location of the
+ * region in the input image. Also returns the dimension of the cropping
+ * resolution.
+ *
+ * @param
+ * @param[in] in_res Resolution of input image
+ * @param[in] out_res Resolution of output image
+ * @param[out] crop_res Crop resolution of input image
+ * @return Returns IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS on error
+ */
+enum ia_css_err
+ia_css_frame_find_crop_resolution(const struct ia_css_resolution *in_res,
+ const struct ia_css_resolution *out_res,
+ struct ia_css_resolution *crop_res);
+
+#endif /* __IA_CSS_FRAME_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h
new file mode 100644
index 000000000000..8861d07193bd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h
@@ -0,0 +1,115 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FRAME_COMM_H__
+#define __IA_CSS_FRAME_COMM_H__
+
+#include "type_support.h"
+#include "platform_support.h"
+#include "runtime/bufq/interface/ia_css_bufq_comm.h"
+#include <system_types.h> /* hrt_vaddress */
+
+/*
+ * These structs are derived from structs defined in ia_css_types.h
+ * (just take out the "_sp" from the struct name to get the "original")
+ * All the fields that are not needed by the SP are removed.
+ */
+struct ia_css_frame_sp_plane {
+ unsigned int offset; /* offset in bytes to start of frame data */
+ /* offset is wrt data in sh_css_sp_sp_frame */
+};
+
+struct ia_css_frame_sp_binary_plane {
+ unsigned int size;
+ struct ia_css_frame_sp_plane data;
+};
+
+struct ia_css_frame_sp_yuv_planes {
+ struct ia_css_frame_sp_plane y;
+ struct ia_css_frame_sp_plane u;
+ struct ia_css_frame_sp_plane v;
+};
+
+struct ia_css_frame_sp_nv_planes {
+ struct ia_css_frame_sp_plane y;
+ struct ia_css_frame_sp_plane uv;
+};
+
+struct ia_css_frame_sp_rgb_planes {
+ struct ia_css_frame_sp_plane r;
+ struct ia_css_frame_sp_plane g;
+ struct ia_css_frame_sp_plane b;
+};
+
+struct ia_css_frame_sp_plane6 {
+ struct ia_css_frame_sp_plane r;
+ struct ia_css_frame_sp_plane r_at_b;
+ struct ia_css_frame_sp_plane gr;
+ struct ia_css_frame_sp_plane gb;
+ struct ia_css_frame_sp_plane b;
+ struct ia_css_frame_sp_plane b_at_r;
+};
+
+struct ia_css_sp_resolution {
+ u16 width; /* width of valid data in pixels */
+ u16 height; /* Height of valid data in lines */
+};
+
+/*
+ * Frame info struct. This describes the contents of an image frame buffer.
+ */
+struct ia_css_frame_sp_info {
+ struct ia_css_sp_resolution res;
+ u16 padded_width; /* stride of line in memory
+ (in pixels) */
+ unsigned char format; /* format of the frame data */
+ unsigned char raw_bit_depth; /* number of valid bits per pixel,
+ only valid for RAW bayer frames */
+ unsigned char raw_bayer_order; /* bayer order, only valid
+ for RAW bayer frames */
+ unsigned char padding[3]; /* Extend to 32 bit multiple */
+};
+
+struct ia_css_buffer_sp {
+ union {
+ hrt_vaddress xmem_addr;
+ enum sh_css_queue_id queue_id;
+ } buf_src;
+ enum ia_css_buffer_type buf_type;
+};
+
+struct ia_css_frame_sp {
+ struct ia_css_frame_sp_info info;
+ struct ia_css_buffer_sp buf_attr;
+ union {
+ struct ia_css_frame_sp_plane raw;
+ struct ia_css_frame_sp_plane rgb;
+ struct ia_css_frame_sp_rgb_planes planar_rgb;
+ struct ia_css_frame_sp_plane yuyv;
+ struct ia_css_frame_sp_yuv_planes yuv;
+ struct ia_css_frame_sp_nv_planes nv;
+ struct ia_css_frame_sp_plane6 plane6;
+ struct ia_css_frame_sp_binary_plane binary;
+ } planes;
+};
+
+void ia_css_frame_info_to_frame_sp_info(
+ struct ia_css_frame_sp_info *sp_info,
+ const struct ia_css_frame_info *info);
+
+void ia_css_resolution_to_sp_resolution(
+ struct ia_css_sp_resolution *sp_info,
+ const struct ia_css_resolution *info);
+
+#endif /*__IA_CSS_FRAME_COMM_H__*/
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
new file mode 100644
index 000000000000..fcd8b06034f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
@@ -0,0 +1,989 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_frame.h"
+#include <math_support.h>
+#include "assert_support.h"
+#include "ia_css_debug.h"
+#include "isp.h"
+#include "sh_css_internal.h"
+#include "memory_access.h"
+
+#define NV12_TILEY_TILE_WIDTH 128
+#define NV12_TILEY_TILE_HEIGHT 32
+
+/**************************************************************************
+** Static functions declarations
+**************************************************************************/
+static void frame_init_plane(struct ia_css_frame_plane *plane,
+ unsigned int width,
+ unsigned int stride,
+ unsigned int height,
+ unsigned int offset);
+
+static void frame_init_single_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel);
+
+static void frame_init_raw_single_plane(
+ struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bits_per_pixel);
+
+static void frame_init_mipi_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel);
+
+static void frame_init_nv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ unsigned int bytes_per_element);
+
+static void frame_init_yuv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ bool swap_uv,
+ unsigned int bytes_per_element);
+
+static void frame_init_rgb_planes(struct ia_css_frame *frame,
+ unsigned int bytes_per_element);
+
+static void frame_init_qplane6_planes(struct ia_css_frame *frame);
+
+static enum ia_css_err frame_allocate_buffer_data(struct ia_css_frame *frame);
+
+static enum ia_css_err frame_allocate_with_data(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool contiguous);
+
+static struct ia_css_frame *frame_create(unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool contiguous,
+ bool valid);
+
+static unsigned
+ia_css_elems_bytes_from_info(
+ const struct ia_css_frame_info *info);
+
+/**************************************************************************
+** CSS API functions, exposed by ia_css.h
+**************************************************************************/
+
+void ia_css_frame_zero(struct ia_css_frame *frame)
+{
+ assert(frame);
+ mmgr_clear(frame->data, frame->data_bytes);
+}
+
+enum ia_css_err ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (!frame || !info)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_from_info() enter:\n");
+ err =
+ ia_css_frame_allocate(frame, info->res.width, info->res.height,
+ info->format, info->padded_width,
+ info->raw_bit_depth);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_from_info() leave:\n");
+ return err;
+}
+
+enum ia_css_err ia_css_frame_allocate(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (!frame || width == 0 || height == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate() enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n",
+ width, height, format, padded_width, raw_bit_depth);
+
+ err = frame_allocate_with_data(frame, width, height, format,
+ padded_width, raw_bit_depth, false);
+
+ if ((*frame) && err == IA_CSS_SUCCESS)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n", *frame,
+ (*frame)->data);
+ else
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n",
+ (void *)-1, (unsigned int)-1);
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_map(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info,
+ const void __user *data,
+ u16 attribute,
+ void *context)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *me;
+
+ assert(frame);
+
+ /* Create the frame structure */
+ err = ia_css_frame_create_from_info(&me, info);
+
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ if (err == IA_CSS_SUCCESS) {
+ /* use mmgr_mmap to map */
+ me->data = (ia_css_ptr) mmgr_mmap(data,
+ me->data_bytes,
+ attribute, context);
+ if (me->data == mmgr_NULL)
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (err != IA_CSS_SUCCESS) {
+ sh_css_free(me);
+ me = NULL;
+ }
+
+ *frame = me;
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_create_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *me;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_create_from_info() enter:\n");
+ if (!frame || !info) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_create_from_info() leave: invalid arguments\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ me = frame_create(info->res.width,
+ info->res.height,
+ info->format,
+ info->padded_width,
+ info->raw_bit_depth,
+ false,
+ false);
+ if (!me) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_create_from_info() leave: frame create failed\n");
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ err = ia_css_frame_init_planes(me);
+
+ if (err != IA_CSS_SUCCESS) {
+ sh_css_free(me);
+ me = NULL;
+ }
+
+ *frame = me;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_create_from_info() leave:\n");
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_set_data(struct ia_css_frame *frame,
+ const ia_css_ptr mapped_data,
+ size_t data_bytes)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_set_data() enter:\n");
+ if (!frame) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_set_data() leave: NULL frame\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* If we are setting a valid data.
+ * Make sure that there is enough
+ * room for the expected frame format
+ */
+ if ((mapped_data != mmgr_NULL) && (frame->data_bytes > data_bytes)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_set_data() leave: invalid arguments\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ frame->data = mapped_data;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_frame_set_data() leave:\n");
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_contiguous() "
+ "enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n",
+ width, height, format, padded_width, raw_bit_depth);
+
+ err = frame_allocate_with_data(frame, width, height, format,
+ padded_width, raw_bit_depth, true);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_contiguous() leave: frame=%p\n",
+ frame ? *frame : (void *)-1);
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_allocate_contiguous_from_info(
+ struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(frame);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_contiguous_from_info() enter:\n");
+ err = ia_css_frame_allocate_contiguous(frame,
+ info->res.width,
+ info->res.height,
+ info->format,
+ info->padded_width,
+ info->raw_bit_depth);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_contiguous_from_info() leave:\n");
+ return err;
+}
+
+void ia_css_frame_free(struct ia_css_frame *frame)
+{
+ IA_CSS_ENTER_PRIVATE("frame = %p", frame);
+
+ if (frame) {
+ hmm_free(frame->data);
+ sh_css_free(frame);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/**************************************************************************
+** Module public functions
+**************************************************************************/
+
+enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info)
+{
+ assert(info);
+ if (info->res.width == 0 || info->res.height == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame)
+{
+ assert(frame);
+
+ switch (frame->info.format) {
+ case IA_CSS_FRAME_FORMAT_MIPI:
+ frame_init_mipi_plane(frame, &frame->planes.raw,
+ frame->info.res.height,
+ frame->info.padded_width,
+ frame->info.raw_bit_depth <= 8 ? 1 : 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ frame_init_raw_single_plane(frame, &frame->planes.raw,
+ frame->info.res.height,
+ frame->info.padded_width,
+ frame->info.raw_bit_depth);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW:
+ frame_init_single_plane(frame, &frame->planes.raw,
+ frame->info.res.height,
+ frame->info.padded_width,
+ frame->info.raw_bit_depth <= 8 ? 1 : 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ frame_init_single_plane(frame, &frame->planes.rgb,
+ frame->info.res.height,
+ frame->info.padded_width, 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ frame_init_single_plane(frame, &frame->planes.rgb,
+ frame->info.res.height,
+ frame->info.padded_width * 4, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ frame_init_rgb_planes(frame, 1);
+ break;
+ /* yuyv and uyvu have the same frame layout, only the data
+ * positioning differs.
+ */
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ frame_init_single_plane(frame, &frame->planes.yuyv,
+ frame->info.res.height,
+ frame->info.padded_width * 2, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ /* Needs 3 extra lines to allow vf_pp prefetching */
+ frame_init_single_plane(frame, &frame->planes.yuyv,
+ frame->info.res.height * 3 / 2 + 3,
+ frame->info.padded_width, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_NV11:
+ frame_init_nv_planes(frame, 4, 1, 1);
+ break;
+ /* nv12 and nv21 have the same frame layout, only the data
+ * positioning differs.
+ */
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ frame_init_nv_planes(frame, 2, 2, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ frame_init_nv_planes(frame, 2, 2, 2);
+ break;
+ /* nv16 and nv61 have the same frame layout, only the data
+ * positioning differs.
+ */
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ frame_init_nv_planes(frame, 2, 1, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ frame_init_yuv_planes(frame, 2, 2, false, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ frame_init_yuv_planes(frame, 2, 1, false, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ frame_init_yuv_planes(frame, 1, 1, false, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ frame_init_yuv_planes(frame, 2, 2, false, 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ frame_init_yuv_planes(frame, 2, 1, false, 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_YV12:
+ frame_init_yuv_planes(frame, 2, 2, true, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YV16:
+ frame_init_yuv_planes(frame, 2, 1, true, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ frame_init_qplane6_planes(frame);
+ break;
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ frame_init_single_plane(frame, &frame->planes.binary.data,
+ frame->info.res.height,
+ frame->info.padded_width, 1);
+ frame->planes.binary.size = 0;
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ return IA_CSS_SUCCESS;
+}
+
+void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int min_padded_width)
+{
+ unsigned int align;
+
+ IA_CSS_ENTER_PRIVATE("info = %p,width = %d, minimum padded width = %d",
+ info, width, min_padded_width);
+ if (!info) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ if (min_padded_width > width)
+ align = min_padded_width;
+ else
+ align = width;
+
+ info->res.width = width;
+ /* frames with a U and V plane of 8 bits per pixel need to have
+ all planes aligned, this means double the alignment for the
+ Y plane if the horizontal decimation is 2. */
+ if (info->format == IA_CSS_FRAME_FORMAT_YUV420 ||
+ info->format == IA_CSS_FRAME_FORMAT_YV12 ||
+ info->format == IA_CSS_FRAME_FORMAT_NV12 ||
+ info->format == IA_CSS_FRAME_FORMAT_NV21 ||
+ info->format == IA_CSS_FRAME_FORMAT_BINARY_8 ||
+ info->format == IA_CSS_FRAME_FORMAT_YUV_LINE)
+ info->padded_width =
+ CEIL_MUL(align, 2 * HIVE_ISP_DDR_WORD_BYTES);
+ else if (info->format == IA_CSS_FRAME_FORMAT_NV12_TILEY)
+ info->padded_width = CEIL_MUL(align, NV12_TILEY_TILE_WIDTH);
+ else if (info->format == IA_CSS_FRAME_FORMAT_RAW ||
+ info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)
+ info->padded_width = CEIL_MUL(align, 2 * ISP_VEC_NELEMS);
+ else {
+ info->padded_width = CEIL_MUL(align, HIVE_ISP_DDR_WORD_BYTES);
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
+ enum ia_css_frame_format format)
+{
+ assert(info);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_info_set_format() enter:\n");
+ info->format = format;
+}
+
+void ia_css_frame_info_init(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int aligned)
+{
+ IA_CSS_ENTER_PRIVATE("info = %p, width = %d, height = %d, format = %d, aligned = %d",
+ info, width, height, format, aligned);
+ if (!info) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ info->res.height = height;
+ info->format = format;
+ ia_css_frame_info_set_width(info, width, aligned);
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_frame_free_multiple(unsigned int num_frames,
+ struct ia_css_frame **frames_array)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_frames; i++) {
+ if (frames_array[i]) {
+ ia_css_frame_free(frames_array[i]);
+ frames_array[i] = NULL;
+ }
+ }
+}
+
+enum ia_css_err ia_css_frame_allocate_with_buffer_size(
+ struct ia_css_frame **frame,
+ const unsigned int buffer_size_bytes,
+ const bool contiguous)
+{
+ /* AM: Body coppied from frame_allocate_with_data(). */
+ enum ia_css_err err;
+ struct ia_css_frame *me = frame_create(0, 0,
+ IA_CSS_FRAME_FORMAT_NUM,/* Not valid format yet */
+ 0, 0, contiguous, false);
+
+ if (!me)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ /* Get the data size */
+ me->data_bytes = buffer_size_bytes;
+
+ err = frame_allocate_buffer_data(me);
+
+ if (err != IA_CSS_SUCCESS) {
+ sh_css_free(me);
+ me = NULL;
+ }
+
+ *frame = me;
+
+ return err;
+}
+
+bool ia_css_frame_info_is_same_resolution(
+ const struct ia_css_frame_info *info_a,
+ const struct ia_css_frame_info *info_b)
+{
+ if (!info_a || !info_b)
+ return false;
+ return (info_a->res.width == info_b->res.width) &&
+ (info_a->res.height == info_b->res.height);
+}
+
+bool ia_css_frame_is_same_type(const struct ia_css_frame *frame_a,
+ const struct ia_css_frame *frame_b)
+{
+ bool is_equal = false;
+ const struct ia_css_frame_info *info_a = &frame_a->info,
+ *info_b = &frame_b->info;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_is_same_type() enter:\n");
+
+ if (!info_a || !info_b)
+ return false;
+ if (info_a->format != info_b->format)
+ return false;
+ if (info_a->padded_width != info_b->padded_width)
+ return false;
+ is_equal = ia_css_frame_info_is_same_resolution(info_a, info_b);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_is_same_type() leave:\n");
+
+ return is_equal;
+}
+
+void
+ia_css_dma_configure_from_info(
+ struct dma_port_config *config,
+ const struct ia_css_frame_info *info)
+{
+ unsigned int is_raw_packed = info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED;
+ unsigned int bits_per_pixel = is_raw_packed ? info->raw_bit_depth :
+ ia_css_elems_bytes_from_info(info) * 8;
+ unsigned int pix_per_ddrword = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
+ unsigned int words_per_line = CEIL_DIV(info->padded_width, pix_per_ddrword);
+ unsigned int elems_b = pix_per_ddrword;
+
+ config->stride = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
+ config->elems = (uint8_t)elems_b;
+ config->width = (uint16_t)info->res.width;
+ config->crop = 0;
+ assert(config->width <= info->padded_width);
+}
+
+/**************************************************************************
+** Static functions
+**************************************************************************/
+
+static void frame_init_plane(struct ia_css_frame_plane *plane,
+ unsigned int width,
+ unsigned int stride,
+ unsigned int height,
+ unsigned int offset)
+{
+ plane->height = height;
+ plane->width = width;
+ plane->stride = stride;
+ plane->offset = offset;
+}
+
+static void frame_init_single_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel)
+{
+ unsigned int stride;
+
+ stride = subpixels_per_line * bytes_per_pixel;
+ /* Frame height needs to be even number - needed by hw ISYS2401
+ In case of odd number, round up to even.
+ Images won't be impacted by this round up,
+ only needed by jpeg/embedded data.
+ As long as buffer allocation and release are using data_bytes,
+ there won't be memory leak. */
+ frame->data_bytes = stride * CEIL_MUL2(height, 2);
+ frame_init_plane(plane, subpixels_per_line, stride, height, 0);
+ return;
+}
+
+static void frame_init_raw_single_plane(
+ struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bits_per_pixel)
+{
+ unsigned int stride;
+
+ assert(frame);
+
+ stride = HIVE_ISP_DDR_WORD_BYTES *
+ CEIL_DIV(subpixels_per_line,
+ HIVE_ISP_DDR_WORD_BITS / bits_per_pixel);
+ frame->data_bytes = stride * height;
+ frame_init_plane(plane, subpixels_per_line, stride, height, 0);
+ return;
+}
+
+static void frame_init_mipi_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel)
+{
+ unsigned int stride;
+
+ stride = subpixels_per_line * bytes_per_pixel;
+ frame->data_bytes = 8388608; /* 8*1024*1024 */
+ frame->valid = false;
+ frame->contiguous = true;
+ frame_init_plane(plane, subpixels_per_line, stride, height, 0);
+ return;
+}
+
+static void frame_init_nv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ unsigned int bytes_per_element)
+{
+ unsigned int y_width = frame->info.padded_width;
+ unsigned int y_height = frame->info.res.height;
+ unsigned int uv_width;
+ unsigned int uv_height;
+ unsigned int y_bytes;
+ unsigned int uv_bytes;
+ unsigned int y_stride;
+ unsigned int uv_stride;
+
+ assert(horizontal_decimation != 0 && vertical_decimation != 0);
+
+ uv_width = 2 * (y_width / horizontal_decimation);
+ uv_height = y_height / vertical_decimation;
+
+ if (frame->info.format == IA_CSS_FRAME_FORMAT_NV12_TILEY) {
+ y_width = CEIL_MUL(y_width, NV12_TILEY_TILE_WIDTH);
+ uv_width = CEIL_MUL(uv_width, NV12_TILEY_TILE_WIDTH);
+ y_height = CEIL_MUL(y_height, NV12_TILEY_TILE_HEIGHT);
+ uv_height = CEIL_MUL(uv_height, NV12_TILEY_TILE_HEIGHT);
+ }
+
+ y_stride = y_width * bytes_per_element;
+ uv_stride = uv_width * bytes_per_element;
+ y_bytes = y_stride * y_height;
+ uv_bytes = uv_stride * uv_height;
+
+ frame->data_bytes = y_bytes + uv_bytes;
+ frame_init_plane(&frame->planes.nv.y, y_width, y_stride, y_height, 0);
+ frame_init_plane(&frame->planes.nv.uv, uv_width,
+ uv_stride, uv_height, y_bytes);
+ return;
+}
+
+static void frame_init_yuv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ bool swap_uv,
+ unsigned int bytes_per_element)
+{
+ unsigned int y_width = frame->info.padded_width,
+ y_height = frame->info.res.height,
+ uv_width = y_width / horizontal_decimation,
+ uv_height = y_height / vertical_decimation,
+ y_stride, y_bytes, uv_bytes, uv_stride;
+
+ y_stride = y_width * bytes_per_element;
+ uv_stride = uv_width * bytes_per_element;
+ y_bytes = y_stride * y_height;
+ uv_bytes = uv_stride * uv_height;
+
+ frame->data_bytes = y_bytes + 2 * uv_bytes;
+ frame_init_plane(&frame->planes.yuv.y, y_width, y_stride, y_height, 0);
+ if (swap_uv) {
+ frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride,
+ uv_height, y_bytes);
+ frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride,
+ uv_height, y_bytes + uv_bytes);
+ } else {
+ frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride,
+ uv_height, y_bytes);
+ frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride,
+ uv_height, y_bytes + uv_bytes);
+ }
+ return;
+}
+
+static void frame_init_rgb_planes(struct ia_css_frame *frame,
+ unsigned int bytes_per_element)
+{
+ unsigned int width = frame->info.res.width,
+ height = frame->info.res.height, stride, bytes;
+
+ stride = width * bytes_per_element;
+ bytes = stride * height;
+ frame->data_bytes = 3 * bytes;
+ frame_init_plane(&frame->planes.planar_rgb.r, width, stride, height, 0);
+ frame_init_plane(&frame->planes.planar_rgb.g,
+ width, stride, height, 1 * bytes);
+ frame_init_plane(&frame->planes.planar_rgb.b,
+ width, stride, height, 2 * bytes);
+ return;
+}
+
+static void frame_init_qplane6_planes(struct ia_css_frame *frame)
+{
+ unsigned int width = frame->info.padded_width / 2,
+ height = frame->info.res.height / 2, bytes, stride;
+
+ stride = width * 2;
+ bytes = stride * height;
+
+ frame->data_bytes = 6 * bytes;
+ frame_init_plane(&frame->planes.plane6.r,
+ width, stride, height, 0 * bytes);
+ frame_init_plane(&frame->planes.plane6.r_at_b,
+ width, stride, height, 1 * bytes);
+ frame_init_plane(&frame->planes.plane6.gr,
+ width, stride, height, 2 * bytes);
+ frame_init_plane(&frame->planes.plane6.gb,
+ width, stride, height, 3 * bytes);
+ frame_init_plane(&frame->planes.plane6.b,
+ width, stride, height, 4 * bytes);
+ frame_init_plane(&frame->planes.plane6.b_at_r,
+ width, stride, height, 5 * bytes);
+ return;
+}
+
+static enum ia_css_err frame_allocate_buffer_data(struct ia_css_frame *frame)
+{
+#ifdef ISP2401
+ IA_CSS_ENTER_LEAVE_PRIVATE("frame->data_bytes=%d\n", frame->data_bytes);
+#endif
+ frame->data = mmgr_alloc_attr(frame->data_bytes,
+ frame->contiguous ?
+ MMGR_ATTRIBUTE_CONTIGUOUS :
+ MMGR_ATTRIBUTE_DEFAULT);
+
+ if (frame->data == mmgr_NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err frame_allocate_with_data(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool contiguous)
+{
+ enum ia_css_err err;
+ struct ia_css_frame *me = frame_create(width,
+ height,
+ format,
+ padded_width,
+ raw_bit_depth,
+ contiguous,
+ true);
+
+ if (!me)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ err = ia_css_frame_init_planes(me);
+
+ if (err == IA_CSS_SUCCESS)
+ err = frame_allocate_buffer_data(me);
+
+ if (err != IA_CSS_SUCCESS) {
+ sh_css_free(me);
+#ifndef ISP2401
+ return err;
+#else
+ me = NULL;
+#endif
+ }
+
+ *frame = me;
+
+ return err;
+}
+
+static struct ia_css_frame *frame_create(unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool contiguous,
+ bool valid)
+{
+ struct ia_css_frame *me = sh_css_malloc(sizeof(*me));
+
+ if (!me)
+ return NULL;
+
+ memset(me, 0, sizeof(*me));
+ me->info.res.width = width;
+ me->info.res.height = height;
+ me->info.format = format;
+ me->info.padded_width = padded_width;
+ me->info.raw_bit_depth = raw_bit_depth;
+ me->contiguous = contiguous;
+ me->valid = valid;
+ me->data_bytes = 0;
+ me->data = mmgr_NULL;
+ /* To indicate it is not valid frame. */
+ me->dynamic_queue_id = (int)SH_CSS_INVALID_QUEUE_ID;
+ me->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
+
+ return me;
+}
+
+static unsigned
+ia_css_elems_bytes_from_info(const struct ia_css_frame_info *info)
+{
+ if (info->format == IA_CSS_FRAME_FORMAT_RGB565)
+ return 2; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_YUV420_16)
+ return 2; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_YUV422_16)
+ return 2; /* bytes per pixel */
+ /* Note: Essentially NV12_16 is a 2 bytes per pixel format, this return value is used
+ * to configure DMA for the output buffer,
+ * At least in SKC this data is overwritten by isp_output_init.sp.c except for elements(elems),
+ * which is configured from this return value,
+ * NV12_16 is implemented by a double buffer of 8 bit elements hence elems should be configured as 8 */
+ if (info->format == IA_CSS_FRAME_FORMAT_NV12_16)
+ return 1; /* bytes per pixel */
+
+ if (info->format == IA_CSS_FRAME_FORMAT_RAW
+ || (info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)) {
+ if (info->raw_bit_depth)
+ return CEIL_DIV(info->raw_bit_depth, 8);
+ else
+ return 2; /* bytes per pixel */
+ }
+ if (info->format == IA_CSS_FRAME_FORMAT_PLANAR_RGB888)
+ return 3; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_RGBA888)
+ return 4; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_QPLANE6)
+ return 2; /* bytes per pixel */
+ return 1; /* Default is 1 byte per pixel */
+}
+
+void ia_css_frame_info_to_frame_sp_info(
+ struct ia_css_frame_sp_info *to,
+ const struct ia_css_frame_info *from)
+{
+ ia_css_resolution_to_sp_resolution(&to->res, &from->res);
+ to->padded_width = (uint16_t)from->padded_width;
+ to->format = (uint8_t)from->format;
+ to->raw_bit_depth = (uint8_t)from->raw_bit_depth;
+ to->raw_bayer_order = from->raw_bayer_order;
+}
+
+void ia_css_resolution_to_sp_resolution(
+ struct ia_css_sp_resolution *to,
+ const struct ia_css_resolution *from)
+{
+ to->width = (uint16_t)from->width;
+ to->height = (uint16_t)from->height;
+}
+
+/* ISP2401 */
+enum ia_css_err
+ia_css_frame_find_crop_resolution(const struct ia_css_resolution *in_res,
+ const struct ia_css_resolution *out_res,
+ struct ia_css_resolution *crop_res) {
+ u32 wd_even_ceil, ht_even_ceil;
+ u32 in_ratio, out_ratio;
+
+ if ((!in_res) || (!out_res) || (!crop_res))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ IA_CSS_ENTER_PRIVATE("in(%ux%u) -> out(%ux%u)", in_res->width,
+ in_res->height, out_res->width, out_res->height);
+
+ if ((in_res->width == 0)
+ || (in_res->height == 0)
+ || (out_res->width == 0)
+ || (out_res->height == 0))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if ((out_res->width > in_res->width) ||
+ (out_res->height > in_res->height))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* If aspect ratio (width/height) of out_res is higher than the aspect
+ * ratio of the in_res, then we crop vertically, otherwise we crop
+ * horizontally.
+ */
+ in_ratio = in_res->width * out_res->height;
+ out_ratio = out_res->width * in_res->height;
+
+ if (in_ratio == out_ratio)
+ {
+ crop_res->width = in_res->width;
+ crop_res->height = in_res->height;
+ } else if (out_ratio > in_ratio)
+ {
+ crop_res->width = in_res->width;
+ crop_res->height = ROUND_DIV(out_res->height * crop_res->width,
+ out_res->width);
+ } else
+ {
+ crop_res->height = in_res->height;
+ crop_res->width = ROUND_DIV(out_res->width * crop_res->height,
+ out_res->height);
+ }
+
+ /* Round new (cropped) width and height to an even number.
+ * binarydesc_calculate_bds_factor is such that we should consider as
+ * much of the input as possible. This is different only when we end up
+ * with an odd number in the last step. So, we take the next even number
+ * if it falls within the input, otherwise take the previous even no.
+ */
+ wd_even_ceil = EVEN_CEIL(crop_res->width);
+ ht_even_ceil = EVEN_CEIL(crop_res->height);
+ if ((wd_even_ceil > in_res->width) || (ht_even_ceil > in_res->height))
+ {
+ crop_res->width = EVEN_FLOOR(crop_res->width);
+ crop_res->height = EVEN_FLOOR(crop_res->height);
+ } else
+ {
+ crop_res->width = wd_even_ceil;
+ crop_res->height = ht_even_ceil;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("in(%ux%u) -> out(%ux%u)", crop_res->width,
+ crop_res->height, out_res->width, out_res->height);
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h b/drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h
new file mode 100644
index 000000000000..d4b0b2361176
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_IFMTR_H__
+#define __IA_CSS_IFMTR_H__
+
+#include <type_support.h>
+#include <ia_css_stream_public.h>
+#include <ia_css_binary.h>
+
+extern bool ifmtr_set_if_blocking_mode_reset;
+
+unsigned int ia_css_ifmtr_lines_needed_for_bayer_order(
+ const struct ia_css_stream_config *config);
+
+unsigned int ia_css_ifmtr_columns_needed_for_bayer_order(
+ const struct ia_css_stream_config *config);
+
+enum ia_css_err ia_css_ifmtr_configure(struct ia_css_stream_config *config,
+ struct ia_css_binary *binary);
+
+#endif /* __IA_CSS_IFMTR_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
new file mode 100644
index 000000000000..7a18eae8c638
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
@@ -0,0 +1,552 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+#include <linux/kernel.h>
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+
+#include "ia_css_ifmtr.h"
+#include <math_support.h>
+#include "sh_css_internal.h"
+#include "input_formatter.h"
+#include "assert_support.h"
+#include "sh_css_sp.h"
+#include "isp/modes/interface/input_buf.isp.h"
+
+/************************************************************
+ * Static functions declarations
+ ************************************************************/
+static enum ia_css_err ifmtr_start_column(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_column);
+
+static enum ia_css_err ifmtr_input_start_line(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_line);
+
+static void ifmtr_set_if_blocking_mode(
+ const input_formatter_cfg_t *const config_a,
+ const input_formatter_cfg_t *const config_b);
+
+/************************************************************
+ * Public functions
+ ************************************************************/
+
+/* ISP expects GRBG bayer order, we skip one line and/or one row
+ * to correct in case the input bayer order is different.
+ */
+unsigned int ia_css_ifmtr_lines_needed_for_bayer_order(
+ const struct ia_css_stream_config *config)
+{
+ assert(config);
+ if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_BGGR)
+ || (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
+ return 1;
+
+ return 0;
+}
+
+unsigned int ia_css_ifmtr_columns_needed_for_bayer_order(
+ const struct ia_css_stream_config *config)
+{
+ assert(config);
+ if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_RGGB)
+ || (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
+ return 1;
+
+ return 0;
+}
+
+enum ia_css_err ia_css_ifmtr_configure(struct ia_css_stream_config *config,
+ struct ia_css_binary *binary)
+{
+ unsigned int start_line, start_column = 0,
+ cropped_height,
+ cropped_width,
+ num_vectors,
+ buffer_height = 2,
+ buffer_width,
+ two_ppc,
+ vmem_increment = 0,
+ deinterleaving = 0,
+ deinterleaving_b = 0,
+ width_a = 0,
+ width_b = 0,
+ bits_per_pixel,
+ vectors_per_buffer,
+ vectors_per_line = 0,
+ buffers_per_line = 0,
+ buf_offset_a = 0,
+ buf_offset_b = 0,
+ line_width = 0,
+ width_b_factor = 1, start_column_b,
+ left_padding = 0;
+ input_formatter_cfg_t if_a_config, if_b_config;
+ enum atomisp_input_format input_format;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ u8 if_config_index;
+
+ /* Determine which input formatter config set is targeted. */
+ /* Index is equal to the CSI-2 port used. */
+ enum mipi_port_id port;
+
+ if (binary) {
+ cropped_height = binary->in_frame_info.res.height;
+ cropped_width = binary->in_frame_info.res.width;
+ /* This should correspond to the input buffer definition for
+ ISP binaries in input_buf.isp.h */
+ if (binary->info->sp.enable.continuous &&
+ binary->info->sp.pipeline.mode != IA_CSS_BINARY_MODE_COPY)
+ buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
+ else
+ buffer_width = binary->info->sp.input.max_width;
+ input_format = binary->input_format;
+ } else {
+ /* sp raw copy pipe (IA_CSS_PIPE_MODE_COPY): binary is NULL */
+ cropped_height = config->input_config.input_res.height;
+ cropped_width = config->input_config.input_res.width;
+ buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
+ input_format = config->input_config.format;
+ }
+ two_ppc = config->pixels_per_clock == 2;
+ if (config->mode == IA_CSS_INPUT_MODE_SENSOR
+ || config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ port = config->source.port.port;
+ if_config_index = (uint8_t)(port - MIPI_PORT0_ID);
+ } else if (config->mode == IA_CSS_INPUT_MODE_MEMORY) {
+ if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+ } else {
+ if_config_index = 0;
+ }
+
+ assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS
+ || if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED);
+
+ /* TODO: check to see if input is RAW and if current mode interprets
+ * RAW data in any particular bayer order. copy binary with output
+ * format other than raw should not result in dropping lines and/or
+ * columns.
+ */
+ err = ifmtr_input_start_line(config, cropped_height, &start_line);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ifmtr_start_column(config, cropped_width, &start_column);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ if (config->left_padding == -1)
+ if (!binary)
+ /* sp raw copy pipe: set left_padding value */
+ left_padding = 0;
+ else
+ left_padding = binary->left_padding;
+ else
+ left_padding = 2 * ISP_VEC_NELEMS - config->left_padding;
+
+ if (left_padding) {
+ num_vectors = CEIL_DIV(cropped_width + left_padding,
+ ISP_VEC_NELEMS);
+ } else {
+ num_vectors = CEIL_DIV(cropped_width, ISP_VEC_NELEMS);
+ num_vectors *= buffer_height;
+ /* todo: in case of left padding,
+ num_vectors is vectors per line,
+ otherwise vectors per line * buffer_height. */
+ }
+
+ start_column_b = start_column;
+
+ bits_per_pixel = input_formatter_get_alignment(INPUT_FORMATTER0_ID)
+ * 8 / ISP_VEC_NELEMS;
+ switch (input_format) {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ if (two_ppc) {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ deinterleaving_b = 1;
+ /* half lines */
+ width_a = cropped_width * deinterleaving / 2;
+ width_b_factor = 2;
+ /* full lines */
+ width_b = width_a * width_b_factor;
+ buffer_width *= deinterleaving * 2;
+ /* Patch from bayer to yuv */
+ num_vectors *= deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ vectors_per_line = num_vectors / buffer_height;
+ /* Even lines are half size */
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID) /
+ 2;
+ start_column /= 2;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 3;
+ width_a = cropped_width * deinterleaving / 2;
+ buffer_width = buffer_width * deinterleaving / 2;
+ /* Patch from bayer to yuv */
+ num_vectors = num_vectors / 2 * deinterleaving;
+ start_column = start_column * deinterleaving / 2;
+ }
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ if (two_ppc) {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ width_a = width_b = cropped_width * deinterleaving / 2;
+ buffer_width *= deinterleaving * 2;
+ num_vectors *= deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ vectors_per_line = num_vectors / buffer_height;
+ /* Even lines are half size */
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID) /
+ 2;
+ start_column *= deinterleaving;
+ start_column /= 2;
+ start_column_b = start_column;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ width_a = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving * 2;
+ num_vectors *= deinterleaving;
+ start_column *= deinterleaving;
+ }
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ if (two_ppc) {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ width_a = width_b = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving * 2;
+ num_vectors *= deinterleaving;
+ start_column *= deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ start_column_b = start_column;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 2;
+ width_a = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving;
+ num_vectors *= deinterleaving;
+ start_column *= deinterleaving;
+ }
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ num_vectors *= 2;
+ if (two_ppc) {
+ deinterleaving = 2; /* BR in if_a, G in if_b */
+ deinterleaving_b = 1; /* BR in if_a, G in if_b */
+ buffers_per_line = 4;
+ start_column_b = start_column;
+ start_column *= deinterleaving;
+ start_column_b *= deinterleaving_b;
+ } else {
+ deinterleaving = 3; /* BGR */
+ buffers_per_line = 3;
+ start_column *= deinterleaving;
+ }
+ vmem_increment = 1;
+ width_a = cropped_width * deinterleaving;
+ width_b = cropped_width * deinterleaving_b;
+ buffer_width *= buffers_per_line;
+ /* Patch from bayer to rgb */
+ num_vectors = num_vectors / 2 * deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ if (two_ppc) {
+ int crop_col = (start_column % 2) == 1;
+
+ vmem_increment = 2;
+ deinterleaving = 1;
+ width_a = width_b = cropped_width / 2;
+
+ /* When two_ppc is enabled AND we need to crop one extra
+ * column, if_a crops by one extra and we swap the
+ * output offsets to interleave the bayer pattern in
+ * the correct order.
+ */
+ buf_offset_a = crop_col ? 1 : 0;
+ buf_offset_b = crop_col ? 0 : 1;
+ start_column_b = start_column / 2;
+ start_column = start_column / 2 + crop_col;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 2;
+ if ((!binary) || (config->continuous && binary
+ && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)) {
+ /* !binary -> sp raw copy pipe, no deinterleaving */
+ deinterleaving = 1;
+ }
+ width_a = cropped_width;
+ /* Must be multiple of deinterleaving */
+ num_vectors = CEIL_MUL(num_vectors, deinterleaving);
+ }
+ buffer_height *= 2;
+ if ((!binary) || config->continuous)
+ /* !binary -> sp raw copy pipe */
+ buffer_height *= 2;
+ vectors_per_line = CEIL_DIV(cropped_width, ISP_VEC_NELEMS);
+ vectors_per_line = CEIL_MUL(vectors_per_line, deinterleaving);
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ if (two_ppc) {
+ num_vectors *= 2;
+ vmem_increment = 1;
+ deinterleaving = 2;
+ width_a = width_b = cropped_width;
+ /* B buffer is one line further */
+ buf_offset_b = buffer_width / ISP_VEC_NELEMS;
+ bits_per_pixel *= 2;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 2;
+ width_a = cropped_width;
+ start_column /= deinterleaving;
+ }
+ buffer_height *= 2;
+ break;
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT1:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT2:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT3:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT4:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT5:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT6:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT7:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT8:
+ case ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT:
+ case ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT:
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ break;
+ }
+ if (width_a == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (two_ppc)
+ left_padding /= 2;
+
+ /* Default values */
+ if (left_padding)
+ vectors_per_line = num_vectors;
+ if (!vectors_per_line) {
+ vectors_per_line = CEIL_MUL(num_vectors / buffer_height,
+ deinterleaving);
+ line_width = 0;
+ }
+ if (!line_width)
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID);
+ if (!buffers_per_line)
+ buffers_per_line = deinterleaving;
+ line_width = CEIL_MUL(line_width,
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID)
+ * vmem_increment);
+
+ vectors_per_buffer = buffer_height * buffer_width / ISP_VEC_NELEMS;
+
+ if (config->mode == IA_CSS_INPUT_MODE_TPG &&
+ ((binary && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_VIDEO) ||
+ (!binary))) {
+ /* !binary -> sp raw copy pipe */
+ /* workaround for TPG in video mode */
+ start_line = 0;
+ start_column = 0;
+ cropped_height -= start_line;
+ width_a -= start_column;
+ }
+
+ if_a_config.start_line = start_line;
+ if_a_config.start_column = start_column;
+ if_a_config.left_padding = left_padding / deinterleaving;
+ if_a_config.cropped_height = cropped_height;
+ if_a_config.cropped_width = width_a;
+ if_a_config.deinterleaving = deinterleaving;
+ if_a_config.buf_vecs = vectors_per_buffer;
+ if_a_config.buf_start_index = buf_offset_a;
+ if_a_config.buf_increment = vmem_increment;
+ if_a_config.buf_eol_offset =
+ buffer_width * bits_per_pixel / 8 - line_width;
+ if_a_config.is_yuv420_format =
+ (input_format == ATOMISP_INPUT_FORMAT_YUV420_8)
+ || (input_format == ATOMISP_INPUT_FORMAT_YUV420_10)
+ || (input_format == ATOMISP_INPUT_FORMAT_YUV420_16);
+ if_a_config.block_no_reqs = (config->mode != IA_CSS_INPUT_MODE_SENSOR);
+
+ if (two_ppc) {
+ if (deinterleaving_b) {
+ deinterleaving = deinterleaving_b;
+ width_b = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving;
+ /* Patch from bayer to rgb */
+ num_vectors = num_vectors / 2 *
+ deinterleaving * width_b_factor;
+ vectors_per_line = num_vectors / buffer_height;
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID);
+ }
+ if_b_config.start_line = start_line;
+ if_b_config.start_column = start_column_b;
+ if_b_config.left_padding = left_padding / deinterleaving;
+ if_b_config.cropped_height = cropped_height;
+ if_b_config.cropped_width = width_b;
+ if_b_config.deinterleaving = deinterleaving;
+ if_b_config.buf_vecs = vectors_per_buffer;
+ if_b_config.buf_start_index = buf_offset_b;
+ if_b_config.buf_increment = vmem_increment;
+ if_b_config.buf_eol_offset =
+ buffer_width * bits_per_pixel / 8 - line_width;
+ if_b_config.is_yuv420_format =
+ input_format == ATOMISP_INPUT_FORMAT_YUV420_8
+ || input_format == ATOMISP_INPUT_FORMAT_YUV420_10
+ || input_format == ATOMISP_INPUT_FORMAT_YUV420_16;
+ if_b_config.block_no_reqs =
+ (config->mode != IA_CSS_INPUT_MODE_SENSOR);
+
+ if (if_config_index != SH_CSS_IF_CONFIG_NOT_NEEDED) {
+ assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS);
+
+ ifmtr_set_if_blocking_mode(&if_a_config, &if_b_config);
+ /* Set the ifconfigs to SP group */
+ sh_css_sp_set_if_configs(&if_a_config, &if_b_config,
+ if_config_index);
+ }
+ } else {
+ if (if_config_index != SH_CSS_IF_CONFIG_NOT_NEEDED) {
+ assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS);
+
+ ifmtr_set_if_blocking_mode(&if_a_config, NULL);
+ /* Set the ifconfigs to SP group */
+ sh_css_sp_set_if_configs(&if_a_config, NULL,
+ if_config_index);
+ }
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+bool ifmtr_set_if_blocking_mode_reset = true;
+
+/************************************************************
+ * Static functions
+ ************************************************************/
+static void ifmtr_set_if_blocking_mode(
+ const input_formatter_cfg_t *const config_a,
+ const input_formatter_cfg_t *const config_b)
+{
+ int i;
+ bool block[] = { false, false, false, false };
+
+ assert(N_INPUT_FORMATTER_ID <= (ARRAY_SIZE(block)));
+
+ block[INPUT_FORMATTER0_ID] = (bool)config_a->block_no_reqs;
+ if (config_b)
+ block[INPUT_FORMATTER1_ID] = (bool)config_b->block_no_reqs;
+
+ /* TODO: next could cause issues when streams are started after
+ * eachother. */
+ /*IF should not be reconfigured/reset from host */
+ if (ifmtr_set_if_blocking_mode_reset) {
+ ifmtr_set_if_blocking_mode_reset = false;
+ for (i = 0; i < N_INPUT_FORMATTER_ID; i++) {
+ input_formatter_ID_t id = (input_formatter_ID_t)i;
+
+ input_formatter_rst(id);
+ input_formatter_set_fifo_blocking_mode(id, block[id]);
+ }
+ }
+
+ return;
+}
+
+static enum ia_css_err ifmtr_start_column(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_column)
+{
+ unsigned int in = config->input_config.input_res.width, start,
+ for_bayer = ia_css_ifmtr_columns_needed_for_bayer_order(config);
+
+ if (bin_in + 2 * for_bayer > in)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* On the hardware, we want to use the middle of the input, so we
+ * divide the start column by 2. */
+ start = (in - bin_in) / 2;
+ /* in case the number of extra columns is 2 or odd, we round the start
+ * column down */
+ start &= ~0x1;
+
+ /* now we add the one column (if needed) to correct for the bayer
+ * order).
+ */
+ start += for_bayer;
+ *start_column = start;
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err ifmtr_input_start_line(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_line)
+{
+ unsigned int in = config->input_config.input_res.height, start,
+ for_bayer = ia_css_ifmtr_lines_needed_for_bayer_order(config);
+
+ if (bin_in + 2 * for_bayer > in)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* On the hardware, we want to use the middle of the input, so we
+ * divide the start line by 2. On the simulator, we cannot handle extra
+ * lines at the end of the frame.
+ */
+ start = (in - bin_in) / 2;
+ /* in case the number of extra lines is 2 or odd, we round the start
+ * line down.
+ */
+ start &= ~0x1;
+
+ /* now we add the one line (if needed) to correct for the bayer order */
+ start += for_bayer;
+ *start_line = start;
+ return IA_CSS_SUCCESS;
+}
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h b/drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h
new file mode 100644
index 000000000000..d2dd231b6296
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_INPUTFIFO_H
+#define _IA_CSS_INPUTFIFO_H
+
+#include <sp.h>
+#include <isp.h>
+
+#include "ia_css_stream_format.h"
+
+/* SP access */
+void ia_css_inputfifo_send_input_frame(
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int ch_id,
+ enum atomisp_input_format input_format,
+ bool two_ppc);
+
+void ia_css_inputfifo_start_frame(
+ unsigned int ch_id,
+ enum atomisp_input_format input_format,
+ bool two_ppc);
+
+void ia_css_inputfifo_send_line(
+ unsigned int ch_id,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2);
+
+void ia_css_inputfifo_send_embedded_line(
+ unsigned int ch_id,
+ enum atomisp_input_format data_type,
+ const unsigned short *data,
+ unsigned int width);
+
+void ia_css_inputfifo_end_frame(
+ unsigned int ch_id);
+
+#endif /* _IA_CSS_INPUTFIFO_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c b/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c
new file mode 100644
index 000000000000..e5a339fb52f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c
@@ -0,0 +1,538 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "platform_support.h"
+
+#include "ia_css_inputfifo.h"
+
+#include "device_access.h"
+
+#define __INLINE_SP__
+#include "sp.h"
+#define __INLINE_ISP__
+#include "isp.h"
+#define __INLINE_IRQ__
+#include "irq.h"
+#define __INLINE_FIFO_MONITOR__
+#include "fifo_monitor.h"
+
+#define __INLINE_EVENT__
+#include "event_fifo.h"
+#define __INLINE_SP__
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "input_system.h" /* MIPI_PREDICTOR_NONE,... */
+#endif
+
+#include "assert_support.h"
+
+/* System independent */
+#include "sh_css_internal.h"
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "ia_css_isys.h"
+#endif
+
+#define HBLANK_CYCLES (187)
+#define MARKER_CYCLES (6)
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include <hive_isp_css_streaming_to_mipi_types_hrt.h>
+#endif
+
+/* The data type is used to send special cases:
+ * yuv420: odd lines (1, 3 etc) are twice as wide as even
+ * lines (0, 2, 4 etc).
+ * rgb: for two pixels per clock, the R and B values are sent
+ * to output_0 while only G is sent to output_1. This means
+ * that output_1 only gets half the number of values of output_0.
+ * WARNING: This type should also be used for Legacy YUV420.
+ * regular: used for all other data types (RAW, YUV422, etc)
+ */
+enum inputfifo_mipi_data_type {
+ inputfifo_mipi_data_type_regular,
+ inputfifo_mipi_data_type_yuv420,
+ inputfifo_mipi_data_type_yuv420_legacy,
+ inputfifo_mipi_data_type_rgb,
+};
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+static unsigned int inputfifo_curr_ch_id, inputfifo_curr_fmt_type;
+#endif
+struct inputfifo_instance {
+ unsigned int ch_id;
+ enum atomisp_input_format input_format;
+ bool two_ppc;
+ bool streaming;
+ unsigned int hblank_cycles;
+ unsigned int marker_cycles;
+ unsigned int fmt_type;
+ enum inputfifo_mipi_data_type type;
+};
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+/*
+ * Maintain a basic streaming to Mipi administration with ch_id as index
+ * ch_id maps on the "Mipi virtual channel ID" and can have value 0..3
+ */
+#define INPUTFIFO_NR_OF_S2M_CHANNELS (4)
+static struct inputfifo_instance
+ inputfifo_inst_admin[INPUTFIFO_NR_OF_S2M_CHANNELS];
+
+/* Streaming to MIPI */
+static unsigned int inputfifo_wrap_marker(
+ /* static inline unsigned inputfifo_wrap_marker( */
+ unsigned int marker)
+{
+ return marker |
+ (inputfifo_curr_ch_id << HIVE_STR_TO_MIPI_CH_ID_LSB) |
+ (inputfifo_curr_fmt_type << _HIVE_STR_TO_MIPI_FMT_TYPE_LSB);
+}
+
+static inline void
+_sh_css_fifo_snd(unsigned int token)
+{
+ while (!can_event_send_token(STR2MIPI_EVENT_ID))
+ hrt_sleep();
+ event_send_token(STR2MIPI_EVENT_ID, token);
+ return;
+}
+
+static void inputfifo_send_data_a(
+ /* static inline void inputfifo_send_data_a( */
+ unsigned int data)
+{
+ unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
+ (data << HIVE_STR_TO_MIPI_DATA_A_LSB);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_data_b(
+ /* static inline void inputfifo_send_data_b( */
+ unsigned int data)
+{
+ unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
+ (data << _HIVE_STR_TO_MIPI_DATA_B_LSB);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_data(
+ /* static inline void inputfifo_send_data( */
+ unsigned int a,
+ unsigned int b)
+{
+ unsigned int token = ((1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
+ (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
+ (a << HIVE_STR_TO_MIPI_DATA_A_LSB) |
+ (b << _HIVE_STR_TO_MIPI_DATA_B_LSB));
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_sol(void)
+/* static inline void inputfifo_send_sol(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_SOL_BIT);
+
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_eol(void)
+/* static inline void inputfifo_send_eol(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_EOL_BIT);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_sof(void)
+/* static inline void inputfifo_send_sof(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_SOF_BIT);
+
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_eof(void)
+/* static inline void inputfifo_send_eof(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_EOF_BIT);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_ch_id_and_fmt_type(
+ /* static inline
+ void inputfifo_send_ch_id_and_fmt_type( */
+ unsigned int ch_id,
+ unsigned int fmt_type)
+{
+ hrt_data token;
+
+ inputfifo_curr_ch_id = ch_id & _HIVE_ISP_CH_ID_MASK;
+ inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK;
+ /* we send an zero marker, this will wrap the ch_id and
+ * fmt_type automatically.
+ */
+ token = inputfifo_wrap_marker(0);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_empty_token(void)
+/* static inline void inputfifo_send_empty_token(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(0);
+
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_start_frame(
+ /* static inline void inputfifo_start_frame( */
+ unsigned int ch_id,
+ unsigned int fmt_type)
+{
+ inputfifo_send_ch_id_and_fmt_type(ch_id, fmt_type);
+ inputfifo_send_sof();
+ return;
+}
+
+static void inputfifo_end_frame(
+ unsigned int marker_cycles)
+{
+ unsigned int i;
+
+ for (i = 0; i < marker_cycles; i++)
+ inputfifo_send_empty_token();
+ inputfifo_send_eof();
+ return;
+}
+
+static void inputfifo_send_line2(
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2,
+ unsigned int hblank_cycles,
+ unsigned int marker_cycles,
+ unsigned int two_ppc,
+ enum inputfifo_mipi_data_type type)
+{
+ unsigned int i, is_rgb = 0, is_legacy = 0;
+
+ assert(data);
+ assert((data2) || (width2 == 0));
+ if (type == inputfifo_mipi_data_type_rgb)
+ is_rgb = 1;
+
+ if (type == inputfifo_mipi_data_type_yuv420_legacy)
+ is_legacy = 1;
+
+ for (i = 0; i < hblank_cycles; i++)
+ inputfifo_send_empty_token();
+ inputfifo_send_sol();
+ for (i = 0; i < marker_cycles; i++)
+ inputfifo_send_empty_token();
+ for (i = 0; i < width; i++, data++) {
+ /* for RGB in two_ppc, we only actually send 2 pixels per
+ * clock in the even pixels (0, 2 etc). In the other cycles,
+ * we only send 1 pixel, to data[0].
+ */
+ unsigned int send_two_pixels = two_ppc;
+
+ if ((is_rgb || is_legacy) && (i % 3 == 2))
+ send_two_pixels = 0;
+ if (send_two_pixels) {
+ if (i + 1 == width) {
+ /* for jpg (binary) copy, this can occur
+ * if the file contains an odd number of bytes.
+ */
+ inputfifo_send_data(
+ data[0], 0);
+ } else {
+ inputfifo_send_data(
+ data[0], data[1]);
+ }
+ /* Additional increment because we send 2 pixels */
+ data++;
+ i++;
+ } else if (two_ppc && is_legacy) {
+ inputfifo_send_data_b(data[0]);
+ } else {
+ inputfifo_send_data_a(data[0]);
+ }
+ }
+
+ for (i = 0; i < width2; i++, data2++) {
+ /* for RGB in two_ppc, we only actually send 2 pixels per
+ * clock in the even pixels (0, 2 etc). In the other cycles,
+ * we only send 1 pixel, to data2[0].
+ */
+ unsigned int send_two_pixels = two_ppc;
+
+ if ((is_rgb || is_legacy) && (i % 3 == 2))
+ send_two_pixels = 0;
+ if (send_two_pixels) {
+ if (i + 1 == width2) {
+ /* for jpg (binary) copy, this can occur
+ * if the file contains an odd number of bytes.
+ */
+ inputfifo_send_data(
+ data2[0], 0);
+ } else {
+ inputfifo_send_data(
+ data2[0], data2[1]);
+ }
+ /* Additional increment because we send 2 pixels */
+ data2++;
+ i++;
+ } else if (two_ppc && is_legacy) {
+ inputfifo_send_data_b(data2[0]);
+ } else {
+ inputfifo_send_data_a(data2[0]);
+ }
+ }
+ for (i = 0; i < hblank_cycles; i++)
+ inputfifo_send_empty_token();
+ inputfifo_send_eol();
+ return;
+}
+
+static void
+inputfifo_send_line(const unsigned short *data,
+ unsigned int width,
+ unsigned int hblank_cycles,
+ unsigned int marker_cycles,
+ unsigned int two_ppc,
+ enum inputfifo_mipi_data_type type)
+{
+ assert(data);
+ inputfifo_send_line2(data, width, NULL, 0,
+ hblank_cycles,
+ marker_cycles,
+ two_ppc,
+ type);
+}
+
+/* Send a frame of data into the input network via the GP FIFO.
+ * Parameters:
+ * - data: array of 16 bit values that contains all data for the frame.
+ * - width: width of a line in number of subpixels, for yuv420 it is the
+ * number of Y components per line.
+ * - height: height of the frame in number of lines.
+ * - ch_id: channel ID.
+ * - fmt_type: format type.
+ * - hblank_cycles: length of horizontal blanking in cycles.
+ * - marker_cycles: number of empty cycles after start-of-line and before
+ * end-of-frame.
+ * - two_ppc: boolean, describes whether to send one or two pixels per clock
+ * cycle. In this mode, we sent pixels N and N+1 in the same cycle,
+ * to IF_PRIM_A and IF_PRIM_B respectively. The caller must make
+ * sure the input data has been formatted correctly for this.
+ * For example, for RGB formats this means that unused values
+ * must be inserted.
+ * - yuv420: boolean, describes whether (non-legacy) yuv420 data is used. In
+ * this mode, the odd lines (1,3,5 etc) are half as long as the
+ * even lines (2,4,6 etc).
+ * Note that the first line is odd (1) and the second line is even
+ * (2).
+ *
+ * This function does not do any reordering of pixels, the caller must make
+ * sure the data is in the righ format. Please refer to the CSS receiver
+ * documentation for details on the data formats.
+ */
+
+static void inputfifo_send_frame(
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int ch_id,
+ unsigned int fmt_type,
+ unsigned int hblank_cycles,
+ unsigned int marker_cycles,
+ unsigned int two_ppc,
+ enum inputfifo_mipi_data_type type)
+{
+ unsigned int i;
+
+ assert(data);
+ inputfifo_start_frame(ch_id, fmt_type);
+
+ for (i = 0; i < height; i++) {
+ if ((type == inputfifo_mipi_data_type_yuv420) &&
+ (i & 1) == 1) {
+ inputfifo_send_line(data, 2 * width,
+ hblank_cycles,
+ marker_cycles,
+ two_ppc, type);
+ data += 2 * width;
+ } else {
+ inputfifo_send_line(data, width,
+ hblank_cycles,
+ marker_cycles,
+ two_ppc, type);
+ data += width;
+ }
+ }
+ inputfifo_end_frame(marker_cycles);
+ return;
+}
+
+static enum inputfifo_mipi_data_type inputfifo_determine_type(
+ enum atomisp_input_format input_format)
+{
+ enum inputfifo_mipi_data_type type;
+
+ type = inputfifo_mipi_data_type_regular;
+ if (input_format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) {
+ type =
+ inputfifo_mipi_data_type_yuv420_legacy;
+ } else if (input_format == ATOMISP_INPUT_FORMAT_YUV420_8 ||
+ input_format == ATOMISP_INPUT_FORMAT_YUV420_10 ||
+ input_format == ATOMISP_INPUT_FORMAT_YUV420_16) {
+ type =
+ inputfifo_mipi_data_type_yuv420;
+ } else if (input_format >= ATOMISP_INPUT_FORMAT_RGB_444 &&
+ input_format <= ATOMISP_INPUT_FORMAT_RGB_888) {
+ type =
+ inputfifo_mipi_data_type_rgb;
+ }
+ return type;
+}
+
+static struct inputfifo_instance *inputfifo_get_inst(
+ unsigned int ch_id)
+{
+ return &inputfifo_inst_admin[ch_id];
+}
+
+void ia_css_inputfifo_send_input_frame(
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int ch_id,
+ enum atomisp_input_format input_format,
+ bool two_ppc)
+{
+ unsigned int fmt_type, hblank_cycles, marker_cycles;
+ enum inputfifo_mipi_data_type type;
+
+ assert(data);
+ hblank_cycles = HBLANK_CYCLES;
+ marker_cycles = MARKER_CYCLES;
+ ia_css_isys_convert_stream_format_to_mipi_format(input_format,
+ MIPI_PREDICTOR_NONE,
+ &fmt_type);
+
+ type = inputfifo_determine_type(input_format);
+
+ inputfifo_send_frame(data, width, height,
+ ch_id, fmt_type, hblank_cycles, marker_cycles,
+ two_ppc, type);
+}
+
+void ia_css_inputfifo_start_frame(
+ unsigned int ch_id,
+ enum atomisp_input_format input_format,
+ bool two_ppc)
+{
+ struct inputfifo_instance *s2mi;
+
+ s2mi = inputfifo_get_inst(ch_id);
+
+ s2mi->ch_id = ch_id;
+ ia_css_isys_convert_stream_format_to_mipi_format(input_format,
+ MIPI_PREDICTOR_NONE,
+ &s2mi->fmt_type);
+ s2mi->two_ppc = two_ppc;
+ s2mi->type = inputfifo_determine_type(input_format);
+ s2mi->hblank_cycles = HBLANK_CYCLES;
+ s2mi->marker_cycles = MARKER_CYCLES;
+ s2mi->streaming = true;
+
+ inputfifo_start_frame(ch_id, s2mi->fmt_type);
+ return;
+}
+
+void ia_css_inputfifo_send_line(
+ unsigned int ch_id,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2)
+{
+ struct inputfifo_instance *s2mi;
+
+ assert(data);
+ assert((data2) || (width2 == 0));
+ s2mi = inputfifo_get_inst(ch_id);
+
+ /* Set global variables that indicate channel_id and format_type */
+ inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK;
+ inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK;
+
+ inputfifo_send_line2(data, width, data2, width2,
+ s2mi->hblank_cycles,
+ s2mi->marker_cycles,
+ s2mi->two_ppc,
+ s2mi->type);
+}
+
+void ia_css_inputfifo_send_embedded_line(
+ unsigned int ch_id,
+ enum atomisp_input_format data_type,
+ const unsigned short *data,
+ unsigned int width)
+{
+ struct inputfifo_instance *s2mi;
+ unsigned int fmt_type;
+
+ assert(data);
+ s2mi = inputfifo_get_inst(ch_id);
+ ia_css_isys_convert_stream_format_to_mipi_format(data_type,
+ MIPI_PREDICTOR_NONE, &fmt_type);
+
+ /* Set format_type for metadata line. */
+ inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK;
+
+ inputfifo_send_line(data, width, s2mi->hblank_cycles, s2mi->marker_cycles,
+ s2mi->two_ppc, inputfifo_mipi_data_type_regular);
+}
+
+void ia_css_inputfifo_end_frame(
+ unsigned int ch_id)
+{
+ struct inputfifo_instance *s2mi;
+
+ s2mi = inputfifo_get_inst(ch_id);
+
+ /* Set global variables that indicate channel_id and format_type */
+ inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK;
+ inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK;
+
+ /* Call existing HRT function */
+ inputfifo_end_frame(s2mi->marker_cycles);
+
+ s2mi->streaming = false;
+ return;
+}
+#endif /* #if !defined(HAS_NO_INPUT_SYSTEM) */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h b/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h
new file mode 100644
index 000000000000..2769183a8956
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h
@@ -0,0 +1,102 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_ISP_PARAM_H_
+#define _IA_CSS_ISP_PARAM_H_
+
+#include <ia_css_err.h>
+#include "ia_css_isp_param_types.h"
+
+/* Set functions for parameter memory descriptors */
+void
+ia_css_isp_param_set_mem_init(
+ struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ char *address, size_t size);
+
+void
+ia_css_isp_param_set_css_mem_init(
+ struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ hrt_vaddress address, size_t size);
+
+void
+ia_css_isp_param_set_isp_mem_init(
+ struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ u32 address, size_t size);
+
+/* Get functions for parameter memory descriptors */
+const struct ia_css_host_data *
+ia_css_isp_param_get_mem_init(
+ const struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem);
+
+const struct ia_css_data *
+ia_css_isp_param_get_css_mem_init(
+ const struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem);
+
+const struct ia_css_isp_data *
+ia_css_isp_param_get_isp_mem_init(
+ const struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem);
+
+/* Initialize the memory interface sizes and addresses */
+void
+ia_css_init_memory_interface(
+ struct ia_css_isp_param_css_segments *isp_mem_if,
+ const struct ia_css_isp_param_host_segments *mem_params,
+ const struct ia_css_isp_param_css_segments *css_params);
+
+/* Allocate memory parameters */
+enum ia_css_err
+ia_css_isp_param_allocate_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params,
+ const struct ia_css_isp_param_isp_segments *mem_initializers);
+
+/* Destroy memory parameters */
+void
+ia_css_isp_param_destroy_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params);
+
+/* Load fw parameters */
+void
+ia_css_isp_param_load_fw_params(
+ const char *fw,
+ union ia_css_all_memory_offsets *mem_offsets,
+ const struct ia_css_isp_param_memory_offsets *memory_offsets,
+ bool init);
+
+/* Copy host parameter images to ddr */
+enum ia_css_err
+ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ struct ia_css_isp_param_css_segments *ddr,
+ const struct ia_css_isp_param_host_segments *host,
+ enum ia_css_param_class pclass);
+
+/* Enable a pipeline by setting the control field in the isp dmem parameters */
+void
+ia_css_isp_param_enable_pipeline(
+ const struct ia_css_isp_param_host_segments *mem_params);
+
+#endif /* _IA_CSS_ISP_PARAM_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h b/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h
new file mode 100644
index 000000000000..5d23b2f57719
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h
@@ -0,0 +1,81 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef _IA_CSS_ISP_PARAM_TYPES_H_
+#define _IA_CSS_ISP_PARAM_TYPES_H_
+
+#include "ia_css_types.h"
+#include <platform_support.h>
+#include <system_global.h>
+
+/* Short hands */
+#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
+#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
+
+/* The driver depends on this, to be removed later. */
+#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+
+/* Explicit member numbering to avoid fish type checker bug */
+enum ia_css_param_class {
+ IA_CSS_PARAM_CLASS_PARAM = 0, /* Late binding parameters, like 3A */
+ IA_CSS_PARAM_CLASS_CONFIG = 1, /* Pipe config time parameters, like resolution */
+ IA_CSS_PARAM_CLASS_STATE = 2, /* State parameters, like tnr buffer index */
+#if 0 /* Not yet implemented */
+ IA_CSS_PARAM_CLASS_FRAME = 3, /* Frame time parameters, like output buffer */
+#endif
+};
+
+#define IA_CSS_NUM_PARAM_CLASSES (IA_CSS_PARAM_CLASS_STATE + 1)
+
+/* ISP parameter descriptor */
+struct ia_css_isp_parameter {
+ u32 offset; /* Offset in isp_<mem>)parameters, etc. */
+ u32 size; /* Disabled if 0 */
+};
+
+/* Address/size of each parameter class in each isp memory, host memory pointers */
+struct ia_css_isp_param_host_segments {
+ struct ia_css_host_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES];
+};
+
+/* Address/size of each parameter class in each isp memory, css memory pointers */
+struct ia_css_isp_param_css_segments {
+ struct ia_css_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES];
+};
+
+/* Address/size of each parameter class in each isp memory, isp memory pointers */
+struct ia_css_isp_param_isp_segments {
+ struct ia_css_isp_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES];
+};
+
+/* Memory offsets in binary info */
+struct ia_css_isp_param_memory_offsets {
+ u32 offsets[IA_CSS_NUM_PARAM_CLASSES]; /** offset wrt hdr in bytes */
+};
+
+/* Offsets for ISP kernel parameters per isp memory.
+ * Only relevant for standard ISP binaries, not ACC or SP.
+ */
+union ia_css_all_memory_offsets {
+ struct {
+ CSS_ALIGN(struct ia_css_memory_offsets *param, 8);
+ CSS_ALIGN(struct ia_css_config_memory_offsets *config, 8);
+ CSS_ALIGN(struct ia_css_state_memory_offsets *state, 8);
+ } offsets;
+ struct {
+ CSS_ALIGN(void *ptr, 8);
+ } array[IA_CSS_NUM_PARAM_CLASSES];
+};
+
+#endif /* _IA_CSS_ISP_PARAM_TYPES_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c b/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c
new file mode 100644
index 000000000000..443e412d05ad
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c
@@ -0,0 +1,216 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "memory_access.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_param.h"
+
+/* Set functions for parameter memory descriptors */
+
+void
+ia_css_isp_param_set_mem_init(
+ struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ char *address, size_t size)
+{
+ mem_init->params[pclass][mem].address = address;
+ mem_init->params[pclass][mem].size = (uint32_t)size;
+}
+
+void
+ia_css_isp_param_set_css_mem_init(
+ struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ hrt_vaddress address, size_t size)
+{
+ mem_init->params[pclass][mem].address = address;
+ mem_init->params[pclass][mem].size = (uint32_t)size;
+}
+
+void
+ia_css_isp_param_set_isp_mem_init(
+ struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ u32 address, size_t size)
+{
+ mem_init->params[pclass][mem].address = address;
+ mem_init->params[pclass][mem].size = (uint32_t)size;
+}
+
+/* Get functions for parameter memory descriptors */
+const struct ia_css_host_data *
+ia_css_isp_param_get_mem_init(
+ const struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem)
+{
+ return &mem_init->params[pclass][mem];
+}
+
+const struct ia_css_data *
+ia_css_isp_param_get_css_mem_init(
+ const struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem)
+{
+ return &mem_init->params[pclass][mem];
+}
+
+const struct ia_css_isp_data *
+ia_css_isp_param_get_isp_mem_init(
+ const struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem)
+{
+ return &mem_init->params[pclass][mem];
+}
+
+void
+ia_css_init_memory_interface(
+ struct ia_css_isp_param_css_segments *isp_mem_if,
+ const struct ia_css_isp_param_host_segments *mem_params,
+ const struct ia_css_isp_param_css_segments *css_params)
+{
+ unsigned int pclass, mem;
+
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ memset(isp_mem_if->params[pclass], 0, sizeof(isp_mem_if->params[pclass]));
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ if (!mem_params->params[pclass][mem].address)
+ continue;
+ isp_mem_if->params[pclass][mem].size = mem_params->params[pclass][mem].size;
+ if (pclass != IA_CSS_PARAM_CLASS_PARAM)
+ isp_mem_if->params[pclass][mem].address =
+ css_params->params[pclass][mem].address;
+ }
+ }
+}
+
+enum ia_css_err
+ia_css_isp_param_allocate_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params,
+ const struct ia_css_isp_param_isp_segments *mem_initializers) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int mem, pclass;
+
+ pclass = IA_CSS_PARAM_CLASS_PARAM;
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++)
+ {
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ u32 size = 0;
+
+ if (mem_initializers)
+ size = mem_initializers->params[pclass][mem].size;
+ mem_params->params[pclass][mem].size = size;
+ mem_params->params[pclass][mem].address = NULL;
+ css_params->params[pclass][mem].size = size;
+ css_params->params[pclass][mem].address = 0x0;
+ if (size) {
+ mem_params->params[pclass][mem].address = sh_css_calloc(1, size);
+ if (!mem_params->params[pclass][mem].address) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto cleanup;
+ }
+ if (pclass != IA_CSS_PARAM_CLASS_PARAM) {
+ css_params->params[pclass][mem].address = mmgr_malloc(size);
+ if (!css_params->params[pclass][mem].address) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto cleanup;
+ }
+ }
+ }
+ }
+ }
+ return err;
+cleanup:
+ ia_css_isp_param_destroy_isp_parameters(mem_params, css_params);
+ return err;
+}
+
+void
+ia_css_isp_param_destroy_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params)
+{
+ unsigned int mem, pclass;
+
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ if (mem_params->params[pclass][mem].address)
+ sh_css_free(mem_params->params[pclass][mem].address);
+ if (css_params->params[pclass][mem].address)
+ hmm_free(css_params->params[pclass][mem].address);
+ mem_params->params[pclass][mem].address = NULL;
+ css_params->params[pclass][mem].address = 0x0;
+ }
+ }
+}
+
+void
+ia_css_isp_param_load_fw_params(
+ const char *fw,
+ union ia_css_all_memory_offsets *mem_offsets,
+ const struct ia_css_isp_param_memory_offsets *memory_offsets,
+ bool init)
+{
+ unsigned int pclass;
+
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ mem_offsets->array[pclass].ptr = NULL;
+ if (init)
+ mem_offsets->array[pclass].ptr = (void *)(fw + memory_offsets->offsets[pclass]);
+ }
+}
+
+enum ia_css_err
+ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ struct ia_css_isp_param_css_segments *ddr,
+ const struct ia_css_isp_param_host_segments *host,
+ enum ia_css_param_class pclass) {
+ unsigned int mem;
+
+ for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++)
+ {
+ size_t size = host->params[pclass][mem].size;
+ hrt_vaddress ddr_mem_ptr = ddr->params[pclass][mem].address;
+ char *host_mem_ptr = host->params[pclass][mem].address;
+
+ if (size != ddr->params[pclass][mem].size)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ if (!size)
+ continue;
+ mmgr_store(ddr_mem_ptr, host_mem_ptr, size);
+ }
+ return IA_CSS_SUCCESS;
+}
+
+void
+ia_css_isp_param_enable_pipeline(
+ const struct ia_css_isp_param_host_segments *mem_params)
+{
+ /* By protocol b0 of the mandatory uint32_t first field of the
+ input parameter is a disable bit*/
+ short dmem_offset = 0;
+
+ if (mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].size == 0)
+ return;
+
+ *(uint32_t *)
+ &mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].address[dmem_offset]
+ = 0x0;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
new file mode 100644
index 000000000000..e2aca35452c0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
@@ -0,0 +1,184 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ISYS_H__
+#define __IA_CSS_ISYS_H__
+
+#include <type_support.h>
+#include <input_system.h>
+#include <ia_css_input_port.h>
+#include <ia_css_stream_format.h>
+#include <ia_css_stream_public.h>
+#include <system_global.h>
+#include "ia_css_isys_comm.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/**
+ * Virtual Input System. (Input System 2401)
+ */
+typedef input_system_cfg_t ia_css_isys_descr_t;
+/* end of Virtual Input System */
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+input_system_error_t ia_css_isys_init(void);
+void ia_css_isys_uninit(void);
+enum mipi_port_id ia_css_isys_port_to_mipi_port(
+ enum mipi_port_id api_port);
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+/**
+ * @brief Register one (virtual) stream. This is used to track when all
+ * virtual streams are configured inside the input system. The CSI RX is
+ * only started when all registered streams are configured.
+ *
+ * @param[in] port CSI port
+ * @param[in] isys_stream_id Stream handle generated with ia_css_isys_generate_stream_id()
+ * Must be lower than SH_CSS_MAX_ISYS_CHANNEL_NODES
+ * @return IA_CSS_SUCCESS if successful, IA_CSS_ERR_INTERNAL_ERROR if
+ * there is already a stream registered with the same handle
+ */
+enum ia_css_err ia_css_isys_csi_rx_register_stream(
+ enum mipi_port_id port,
+ uint32_t isys_stream_id);
+
+/**
+ * @brief Unregister one (virtual) stream. This is used to track when all
+ * virtual streams are configured inside the input system. The CSI RX is
+ * only started when all registered streams are configured.
+ *
+ * @param[in] port CSI port
+ * @param[in] isys_stream_id Stream handle generated with ia_css_isys_generate_stream_id()
+ * Must be lower than SH_CSS_MAX_ISYS_CHANNEL_NODES
+ * @return IA_CSS_SUCCESS if successful, IA_CSS_ERR_INTERNAL_ERROR if
+ * there is no stream registered with that handle
+ */
+enum ia_css_err ia_css_isys_csi_rx_unregister_stream(
+ enum mipi_port_id port,
+ uint32_t isys_stream_id);
+
+enum ia_css_err ia_css_isys_convert_compressed_format(
+ struct ia_css_csi2_compression *comp,
+ struct input_system_cfg_s *cfg);
+unsigned int ia_css_csi2_calculate_input_system_alignment(
+ enum atomisp_input_format fmt_type);
+#endif
+
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+/* CSS Receiver */
+void ia_css_isys_rx_configure(
+ const rx_cfg_t *config,
+ const enum ia_css_input_mode input_mode);
+
+void ia_css_isys_rx_disable(void);
+
+void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port);
+
+unsigned int ia_css_isys_rx_get_interrupt_reg(enum mipi_port_id port);
+void ia_css_isys_rx_get_irq_info(enum mipi_port_id port,
+ unsigned int *irq_infos);
+void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
+ unsigned int irq_infos);
+unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
+
+#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+/* @brief Translate format and compression to format type.
+ *
+ * @param[in] input_format The input format.
+ * @param[in] compression The compression scheme.
+ * @param[out] fmt_type Pointer to the resulting format type.
+ * @return Error code.
+ *
+ * Translate an input format and mipi compression pair to the fmt_type.
+ * This is normally done by the sensor, but when using the input fifo, this
+ * format type must be sumitted correctly by the application.
+ */
+enum ia_css_err ia_css_isys_convert_stream_format_to_mipi_format(
+ enum atomisp_input_format input_format,
+ mipi_predictor_t compression,
+ unsigned int *fmt_type);
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/**
+ * Virtual Input System. (Input System 2401)
+ */
+ia_css_isys_error_t ia_css_isys_stream_create(
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_h isys_stream,
+ uint32_t isys_stream_id);
+
+void ia_css_isys_stream_destroy(
+ ia_css_isys_stream_h isys_stream);
+
+ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
+ ia_css_isys_stream_h isys_stream,
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_cfg_t *isys_stream_cfg);
+
+void ia_css_isys_csi_rx_lut_rmgr_init(void);
+
+void ia_css_isys_csi_rx_lut_rmgr_uninit(void);
+
+bool ia_css_isys_csi_rx_lut_rmgr_acquire(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+void ia_css_isys_csi_rx_lut_rmgr_release(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+void ia_css_isys_ibuf_rmgr_init(void);
+
+void ia_css_isys_ibuf_rmgr_uninit(void);
+
+bool ia_css_isys_ibuf_rmgr_acquire(
+ u32 size,
+ uint32_t *start_addr);
+
+void ia_css_isys_ibuf_rmgr_release(
+ uint32_t *start_addr);
+
+void ia_css_isys_dma_channel_rmgr_init(void);
+
+void ia_css_isys_dma_channel_rmgr_uninit(void);
+
+bool ia_css_isys_dma_channel_rmgr_acquire(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+void ia_css_isys_dma_channel_rmgr_release(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+void ia_css_isys_stream2mmio_sid_rmgr_init(void);
+
+void ia_css_isys_stream2mmio_sid_rmgr_uninit(void);
+
+bool ia_css_isys_stream2mmio_sid_rmgr_acquire(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+void ia_css_isys_stream2mmio_sid_rmgr_release(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+/* end of Virtual Input System */
+#endif
+
+#endif /* __IA_CSS_ISYS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
new file mode 100644
index 000000000000..6ad7a0cd5146
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ISYS_COMM_H
+#define __IA_CSS_ISYS_COMM_H
+
+#include <type_support.h>
+#include <input_system.h>
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#include <platform_support.h> /* inline */
+#include <input_system_global.h>
+#include <ia_css_stream_public.h> /* IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH */
+
+#define SH_CSS_NODES_PER_THREAD 2
+#define SH_CSS_MAX_ISYS_CHANNEL_NODES (SH_CSS_MAX_SP_THREADS * SH_CSS_NODES_PER_THREAD)
+
+/*
+ * a) ia_css_isys_stream_h & ia_css_isys_stream_cfg_t come from host.
+ *
+ * b) Here it is better to use actual structures for stream handle
+ * instead of opaque handles. Otherwise, we need to have another
+ * communication channel to interpret that opaque handle(this handle is
+ * maintained by host and needs to be populated to sp for every stream open)
+ * */
+typedef virtual_input_system_stream_t *ia_css_isys_stream_h;
+typedef virtual_input_system_stream_cfg_t ia_css_isys_stream_cfg_t;
+
+/*
+ * error check for ISYS APIs.
+ * */
+typedef bool ia_css_isys_error_t;
+
+static inline uint32_t ia_css_isys_generate_stream_id(
+ u32 sp_thread_id,
+ uint32_t stream_id)
+{
+ return sp_thread_id * IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH + stream_id;
+}
+
+#endif /* USE_INPUT_SYSTEM_VERSION_2401*/
+#endif /*_IA_CSS_ISYS_COMM_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
new file mode 100644
index 000000000000..8f2ce2c057eb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
@@ -0,0 +1,167 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "bitop_support.h"
+#include "ia_css_pipeline.h" /* ia_css_pipeline_get_pipe_io_status() */
+#include "sh_css_internal.h" /* sh_css_sp_pipeline_io_status
+ * SH_CSS_MAX_SP_THREADS
+ */
+#include "csi_rx_rmgr.h"
+
+static isys_csi_rx_rsrc_t isys_csi_rx_rsrc[N_CSI_RX_BACKEND_ID];
+
+void ia_css_isys_csi_rx_lut_rmgr_init(void)
+{
+ memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc));
+}
+
+void ia_css_isys_csi_rx_lut_rmgr_uninit(void)
+{
+ memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc));
+}
+
+bool ia_css_isys_csi_rx_lut_rmgr_acquire(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ bool retval = false;
+ u32 max_num_packets_of_type;
+ u32 num_active_of_type;
+ isys_csi_rx_rsrc_t *cur_rsrc = NULL;
+ u16 i;
+
+ assert(backend < N_CSI_RX_BACKEND_ID);
+ assert((packet_type == CSI_MIPI_PACKET_TYPE_LONG) ||
+ (packet_type == CSI_MIPI_PACKET_TYPE_SHORT));
+ assert(entry);
+
+ if ((backend < N_CSI_RX_BACKEND_ID) && (entry)) {
+ cur_rsrc = &isys_csi_rx_rsrc[backend];
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
+ max_num_packets_of_type = N_LONG_PACKET_LUT_ENTRIES[backend];
+ num_active_of_type = cur_rsrc->num_long_packets;
+ } else {
+ max_num_packets_of_type = N_SHORT_PACKET_LUT_ENTRIES[backend];
+ num_active_of_type = cur_rsrc->num_short_packets;
+ }
+
+ if (num_active_of_type < max_num_packets_of_type) {
+ for (i = 0; i < max_num_packets_of_type; i++) {
+ if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
+ bitop_setbit(cur_rsrc->active_table, i);
+
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
+ entry->long_packet_entry = i;
+ entry->short_packet_entry = 0;
+ cur_rsrc->num_long_packets++;
+ } else {
+ entry->long_packet_entry = 0;
+ entry->short_packet_entry = i;
+ cur_rsrc->num_short_packets++;
+ }
+ cur_rsrc->num_active++;
+ retval = true;
+ break;
+ }
+ }
+ }
+ }
+ return retval;
+}
+
+void ia_css_isys_csi_rx_lut_rmgr_release(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ u32 max_num_packets;
+ isys_csi_rx_rsrc_t *cur_rsrc = NULL;
+ u32 packet_entry = 0;
+
+ assert(backend < N_CSI_RX_BACKEND_ID);
+ assert(entry);
+ assert((packet_type >= CSI_MIPI_PACKET_TYPE_LONG) ||
+ (packet_type <= CSI_MIPI_PACKET_TYPE_SHORT));
+
+ if ((backend < N_CSI_RX_BACKEND_ID) && (entry)) {
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
+ max_num_packets = N_LONG_PACKET_LUT_ENTRIES[backend];
+ packet_entry = entry->long_packet_entry;
+ } else {
+ max_num_packets = N_SHORT_PACKET_LUT_ENTRIES[backend];
+ packet_entry = entry->short_packet_entry;
+ }
+
+ cur_rsrc = &isys_csi_rx_rsrc[backend];
+ if ((packet_entry < max_num_packets) && (cur_rsrc->num_active > 0)) {
+ if (bitop_getbit(cur_rsrc->active_table, packet_entry) == 1) {
+ bitop_clearbit(cur_rsrc->active_table, packet_entry);
+
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG)
+ cur_rsrc->num_long_packets--;
+ else
+ cur_rsrc->num_short_packets--;
+ cur_rsrc->num_active--;
+ }
+ }
+ }
+}
+
+enum ia_css_err ia_css_isys_csi_rx_register_stream(
+ enum mipi_port_id port,
+ uint32_t isys_stream_id)
+{
+ enum ia_css_err retval = IA_CSS_ERR_INTERNAL_ERROR;
+
+ if ((port < N_INPUT_SYSTEM_CSI_PORT) &&
+ (isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) {
+ struct sh_css_sp_pipeline_io_status *pipe_io_status;
+
+ pipe_io_status = ia_css_pipeline_get_pipe_io_status();
+ if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 0) {
+ bitop_setbit(pipe_io_status->active[port], isys_stream_id);
+ pipe_io_status->running[port] = 0;
+ retval = IA_CSS_SUCCESS;
+ }
+ }
+ return retval;
+}
+
+enum ia_css_err ia_css_isys_csi_rx_unregister_stream(
+ enum mipi_port_id port,
+ uint32_t isys_stream_id)
+{
+ enum ia_css_err retval = IA_CSS_ERR_INTERNAL_ERROR;
+
+ if ((port < N_INPUT_SYSTEM_CSI_PORT) &&
+ (isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) {
+ struct sh_css_sp_pipeline_io_status *pipe_io_status;
+
+ pipe_io_status = ia_css_pipeline_get_pipe_io_status();
+ if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 1) {
+ bitop_clearbit(pipe_io_status->active[port], isys_stream_id);
+ retval = IA_CSS_SUCCESS;
+ }
+ }
+ return retval;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h
new file mode 100644
index 000000000000..79d7c4b29bf4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_RMGR_H_INCLUDED__
+#define __CSI_RX_RMGR_H_INCLUDED__
+
+typedef struct isys_csi_rx_rsrc_s isys_csi_rx_rsrc_t;
+struct isys_csi_rx_rsrc_s {
+ u32 active_table;
+ u32 num_active;
+ u16 num_long_packets;
+ u16 num_short_packets;
+};
+
+#endif /* __CSI_RX_RMGR_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c
new file mode 100644
index 000000000000..9055ed387673
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c
@@ -0,0 +1,121 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "ibuf_ctrl_rmgr.h"
+
+static ibuf_rsrc_t ibuf_rsrc;
+
+static ibuf_handle_t *getHandle(uint16_t index)
+{
+ ibuf_handle_t *handle = NULL;
+
+ if (index < MAX_IBUF_HANDLES)
+ handle = &ibuf_rsrc.handles[index];
+ return handle;
+}
+
+void ia_css_isys_ibuf_rmgr_init(void)
+{
+ memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc));
+ ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE;
+}
+
+void ia_css_isys_ibuf_rmgr_uninit(void)
+{
+ memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc));
+ ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE;
+}
+
+bool ia_css_isys_ibuf_rmgr_acquire(
+ u32 size,
+ uint32_t *start_addr)
+{
+ bool retval = false;
+ bool input_buffer_found = false;
+ u32 aligned_size;
+ ibuf_handle_t *handle = NULL;
+ u16 i;
+
+ assert(start_addr);
+ assert(size > 0);
+
+ aligned_size = (size + (IBUF_ALIGN - 1)) & ~(IBUF_ALIGN - 1);
+
+ /* Check if there is an available un-used handle with the size
+ * that will fulfill the request.
+ */
+ if (ibuf_rsrc.num_active < ibuf_rsrc.num_allocated) {
+ for (i = 0; i < ibuf_rsrc.num_allocated; i++) {
+ handle = getHandle(i);
+ if (!handle->active) {
+ if (handle->size >= aligned_size) {
+ handle->active = true;
+ input_buffer_found = true;
+ ibuf_rsrc.num_active++;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!input_buffer_found) {
+ /* There were no available handles that fulfilled the
+ * request. Allocate a new handle with the requested size.
+ */
+ if ((ibuf_rsrc.num_allocated < MAX_IBUF_HANDLES) &&
+ (ibuf_rsrc.free_size >= aligned_size)) {
+ handle = getHandle(ibuf_rsrc.num_allocated);
+ handle->start_addr = ibuf_rsrc.free_start_addr;
+ handle->size = aligned_size;
+ handle->active = true;
+
+ ibuf_rsrc.free_start_addr += aligned_size;
+ ibuf_rsrc.free_size -= aligned_size;
+ ibuf_rsrc.num_active++;
+ ibuf_rsrc.num_allocated++;
+
+ input_buffer_found = true;
+ }
+ }
+
+ if (input_buffer_found && handle) {
+ *start_addr = handle->start_addr;
+ retval = true;
+ }
+
+ return retval;
+}
+
+void ia_css_isys_ibuf_rmgr_release(
+ uint32_t *start_addr)
+{
+ u16 i;
+ ibuf_handle_t *handle = NULL;
+
+ assert(start_addr);
+
+ for (i = 0; i < ibuf_rsrc.num_allocated; i++) {
+ handle = getHandle(i);
+ if (handle->active && handle->start_addr == *start_addr) {
+ handle->active = false;
+ ibuf_rsrc.num_active--;
+ break;
+ }
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h
new file mode 100644
index 000000000000..7155e2c6e05c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_RMGR_H_INCLUDED__
+#define __IBUF_CTRL_RMGR_H_INCLUDED__
+
+#define MAX_IBUF_HANDLES 24
+#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
+#define IBUF_ALIGN 8
+
+typedef struct ibuf_handle_s ibuf_handle_t;
+struct ibuf_handle_s {
+ u32 start_addr;
+ u32 size;
+ bool active;
+};
+
+typedef struct ibuf_rsrc_s ibuf_rsrc_t;
+struct ibuf_rsrc_s {
+ u32 free_start_addr;
+ u32 free_size;
+ u16 num_active;
+ u16 num_allocated;
+ ibuf_handle_t handles[MAX_IBUF_HANDLES];
+};
+
+#endif /* __IBUF_CTRL_RMGR_H_INCLUDED */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
new file mode 100644
index 000000000000..930fa7a0ff53
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
@@ -0,0 +1,87 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "bitop_support.h"
+#include "isys_dma_rmgr.h"
+
+static isys_dma_rsrc_t isys_dma_rsrc[N_ISYS2401_DMA_ID];
+
+void ia_css_isys_dma_channel_rmgr_init(void)
+{
+ memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t));
+}
+
+void ia_css_isys_dma_channel_rmgr_uninit(void)
+{
+ memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t));
+}
+
+bool ia_css_isys_dma_channel_rmgr_acquire(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ bool retval = false;
+ isys2401_dma_channel i;
+ isys2401_dma_channel max_dma_channel;
+ isys_dma_rsrc_t *cur_rsrc = NULL;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(channel);
+
+ max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id];
+ cur_rsrc = &isys_dma_rsrc[dma_id];
+
+ if (cur_rsrc->num_active < max_dma_channel) {
+ for (i = ISYS2401_DMA_CHANNEL_0; i < N_ISYS2401_DMA_CHANNEL; i++) {
+ if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
+ bitop_setbit(cur_rsrc->active_table, i);
+ *channel = i;
+ cur_rsrc->num_active++;
+ retval = true;
+ break;
+ }
+ }
+ }
+
+ return retval;
+}
+
+void ia_css_isys_dma_channel_rmgr_release(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ isys2401_dma_channel max_dma_channel;
+ isys_dma_rsrc_t *cur_rsrc = NULL;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(channel);
+
+ max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id];
+ cur_rsrc = &isys_dma_rsrc[dma_id];
+
+ if ((*channel < max_dma_channel) && (cur_rsrc->num_active > 0)) {
+ if (bitop_getbit(cur_rsrc->active_table, *channel) == 1) {
+ bitop_clearbit(cur_rsrc->active_table, *channel);
+ cur_rsrc->num_active--;
+ }
+ }
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h
new file mode 100644
index 000000000000..e3d07ac390fc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_RMGR_H_INCLUDED__
+#define __ISYS_DMA_RMGR_H_INCLUDED__
+
+typedef struct isys_dma_rsrc_s isys_dma_rsrc_t;
+struct isys_dma_rsrc_s {
+ u32 active_table;
+ u16 num_active;
+};
+
+#endif /* __ISYS_DMA_RMGR_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
new file mode 100644
index 000000000000..b923233ec5b0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
@@ -0,0 +1,123 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "input_system.h"
+
+#ifdef HAS_INPUT_SYSTEM_VERSION_2
+#include "ia_css_isys.h"
+#include "platform_support.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#include "isys_dma.h" /* isys2401_dma_set_max_burst_size() */
+#include "isys_irq.h"
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+input_system_error_t ia_css_isys_init(void)
+{
+ backend_channel_cfg_t backend_ch0;
+ backend_channel_cfg_t backend_ch1;
+ target_cfg2400_t targetB;
+ target_cfg2400_t targetC;
+ u32 acq_mem_region_size = 24;
+ u32 acq_nof_mem_regions = 2;
+ input_system_error_t error = INPUT_SYSTEM_ERR_NO_ERROR;
+
+ memset(&backend_ch0, 0, sizeof(backend_channel_cfg_t));
+ memset(&backend_ch1, 0, sizeof(backend_channel_cfg_t));
+ memset(&targetB, 0, sizeof(targetB));
+ memset(&targetC, 0, sizeof(targetC));
+
+ error = input_system_configuration_reset();
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_csi_xmem_channel_cfg(
+ 0, /*ch_id */
+ INPUT_SYSTEM_PORT_A, /*port */
+ backend_ch0, /*backend_ch */
+ 32, /*mem_region_size */
+ 6, /*nof_mem_regions */
+ acq_mem_region_size, /*acq_mem_region_size */
+ acq_nof_mem_regions, /*acq_nof_mem_regions */
+ targetB, /*target */
+ 3); /*nof_xmem_buffers */
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_csi_xmem_channel_cfg(
+ 1, /*ch_id */
+ INPUT_SYSTEM_PORT_B, /*port */
+ backend_ch0, /*backend_ch */
+ 16, /*mem_region_size */
+ 3, /*nof_mem_regions */
+ acq_mem_region_size, /*acq_mem_region_size */
+ acq_nof_mem_regions, /*acq_nof_mem_regions */
+ targetB, /*target */
+ 3); /*nof_xmem_buffers */
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_csi_xmem_channel_cfg(
+ 2, /*ch_id */
+ INPUT_SYSTEM_PORT_C, /*port */
+ backend_ch1, /*backend_ch */
+ 32, /*mem_region_size */
+ 3, /*nof_mem_regions */
+ acq_mem_region_size, /*acq_mem_region_size */
+ acq_nof_mem_regions, /*acq_nof_mem_regions */
+ targetC, /*target */
+ 2); /*nof_xmem_buffers */
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_configuration_commit();
+
+ return error;
+}
+#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+input_system_error_t ia_css_isys_init(void)
+{
+ ia_css_isys_csi_rx_lut_rmgr_init();
+ ia_css_isys_ibuf_rmgr_init();
+ ia_css_isys_dma_channel_rmgr_init();
+ ia_css_isys_stream2mmio_sid_rmgr_init();
+
+ isys2401_dma_set_max_burst_size(ISYS2401_DMA0_ID,
+ 1 /* Non Burst DMA transactions */);
+
+ /* Enable 2401 input system IRQ status for driver to retrieve */
+ isys_irqc_status_enable(ISYS_IRQ0_ID);
+ isys_irqc_status_enable(ISYS_IRQ1_ID);
+ isys_irqc_status_enable(ISYS_IRQ2_ID);
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+void ia_css_isys_uninit(void)
+{
+}
+#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+void ia_css_isys_uninit(void)
+{
+ ia_css_isys_csi_rx_lut_rmgr_uninit();
+ ia_css_isys_ibuf_rmgr_uninit();
+ ia_css_isys_dma_channel_rmgr_uninit();
+ ia_css_isys_stream2mmio_sid_rmgr_uninit();
+}
+#endif
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
new file mode 100644
index 000000000000..53355a55d05d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
@@ -0,0 +1,89 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "bitop_support.h"
+#include "isys_stream2mmio_rmgr.h"
+
+static isys_stream2mmio_rsrc_t isys_stream2mmio_rsrc[N_STREAM2MMIO_ID];
+
+void ia_css_isys_stream2mmio_sid_rmgr_init(void)
+{
+ memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc));
+}
+
+void ia_css_isys_stream2mmio_sid_rmgr_uninit(void)
+{
+ memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc));
+}
+
+bool ia_css_isys_stream2mmio_sid_rmgr_acquire(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ bool retval = false;
+ stream2mmio_sid_ID_t max_sid;
+ isys_stream2mmio_rsrc_t *cur_rsrc = NULL;
+ stream2mmio_sid_ID_t i;
+
+ assert(stream2mmio < N_STREAM2MMIO_ID);
+ assert(sid);
+
+ if ((stream2mmio < N_STREAM2MMIO_ID) && (sid)) {
+ max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio];
+ cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio];
+
+ if (cur_rsrc->num_active < max_sid) {
+ for (i = STREAM2MMIO_SID0_ID; i < max_sid; i++) {
+ if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
+ bitop_setbit(cur_rsrc->active_table, i);
+ *sid = i;
+ cur_rsrc->num_active++;
+ retval = true;
+ break;
+ }
+ }
+ }
+ }
+ return retval;
+}
+
+void ia_css_isys_stream2mmio_sid_rmgr_release(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ stream2mmio_sid_ID_t max_sid;
+ isys_stream2mmio_rsrc_t *cur_rsrc = NULL;
+
+ assert(stream2mmio < N_STREAM2MMIO_ID);
+ assert(sid);
+
+ if ((stream2mmio < N_STREAM2MMIO_ID) && (sid)) {
+ max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio];
+ cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio];
+ if ((*sid < max_sid) && (cur_rsrc->num_active > 0)) {
+ if (bitop_getbit(cur_rsrc->active_table, *sid) == 1) {
+ bitop_clearbit(cur_rsrc->active_table, *sid);
+ cur_rsrc->num_active--;
+ }
+ }
+ }
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h
new file mode 100644
index 000000000000..b55cf02c8bce
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__
+
+typedef struct isys_stream2mmio_rsrc_s isys_stream2mmio_rsrc_t;
+struct isys_stream2mmio_rsrc_s {
+ u32 active_table;
+ u16 num_active;
+};
+
+#endif /* __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
new file mode 100644
index 000000000000..43665ddff8ea
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
@@ -0,0 +1,600 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define __INLINE_INPUT_SYSTEM__
+#include "input_system.h"
+#include "assert_support.h"
+#include "ia_css_isys.h"
+#include "ia_css_irq.h"
+#include "sh_css_internal.h"
+
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port)
+{
+ hrt_data bits = receiver_port_reg_load(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
+
+ bits |= (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT) |
+#if defined(HAS_RX_VERSION_2)
+ (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT) |
+#endif
+ (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT) |
+ /*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT) | */
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT);
+ /*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT); */
+
+ receiver_port_reg_store(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits);
+
+ /*
+ * The CSI is nested into the Iunit IRQ's
+ */
+ ia_css_irq_enable(IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR, true);
+
+ return;
+}
+
+/* This function converts between the enum used on the CSS API and the
+ * internal DLI enum type.
+ * We do not use an array for this since we cannot use named array
+ * initializers in Windows. Without that there is no easy way to guarantee
+ * that the array values would be in the correct order.
+ * */
+enum mipi_port_id ia_css_isys_port_to_mipi_port(enum mipi_port_id api_port)
+{
+ /* In this module the validity of the inptu variable should
+ * have been checked already, so we do not check for erroneous
+ * values. */
+ enum mipi_port_id port = MIPI_PORT0_ID;
+
+ if (api_port == MIPI_PORT1_ID)
+ port = MIPI_PORT1_ID;
+ else if (api_port == MIPI_PORT2_ID)
+ port = MIPI_PORT2_ID;
+
+ return port;
+}
+
+unsigned int ia_css_isys_rx_get_interrupt_reg(enum mipi_port_id port)
+{
+ return receiver_port_reg_load(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
+}
+
+void ia_css_rx_get_irq_info(unsigned int *irq_infos)
+{
+ ia_css_rx_port_get_irq_info(MIPI_PORT1_ID, irq_infos);
+}
+
+void ia_css_rx_port_get_irq_info(enum mipi_port_id api_port,
+ unsigned int *irq_infos)
+{
+ enum mipi_port_id port = ia_css_isys_port_to_mipi_port(api_port);
+
+ ia_css_isys_rx_get_irq_info(port, irq_infos);
+}
+
+void ia_css_isys_rx_get_irq_info(enum mipi_port_id port,
+ unsigned int *irq_infos)
+{
+ unsigned int bits;
+
+ assert(irq_infos);
+ bits = ia_css_isys_rx_get_interrupt_reg(port);
+ *irq_infos = ia_css_isys_rx_translate_irq_infos(bits);
+}
+
+/* Translate register bits to CSS API enum mask */
+unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits)
+{
+ unsigned int infos = 0;
+
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN;
+#if defined(HAS_RX_VERSION_2)
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT;
+#endif
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ECC_CORRECTED;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_CONTROL;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_CRC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC;
+
+ return infos;
+}
+
+void ia_css_rx_clear_irq_info(unsigned int irq_infos)
+{
+ ia_css_rx_port_clear_irq_info(MIPI_PORT1_ID, irq_infos);
+}
+
+void ia_css_rx_port_clear_irq_info(enum mipi_port_id api_port,
+ unsigned int irq_infos)
+{
+ enum mipi_port_id port = ia_css_isys_port_to_mipi_port(api_port);
+
+ ia_css_isys_rx_clear_irq_info(port, irq_infos);
+}
+
+void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
+ unsigned int irq_infos)
+{
+ hrt_data bits = receiver_port_reg_load(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
+
+ /* MW: Why do we remap the receiver bitmap */
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT;
+#if defined(HAS_RX_VERSION_2)
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT;
+#endif
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ECC_CORRECTED)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT;
+
+ receiver_port_reg_store(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits);
+
+ return;
+}
+#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+enum ia_css_err ia_css_isys_convert_stream_format_to_mipi_format(
+ enum atomisp_input_format input_format,
+ mipi_predictor_t compression,
+ unsigned int *fmt_type)
+{
+ assert(fmt_type);
+ /*
+ * Custom (user defined) modes. Used for compressed
+ * MIPI transfers
+ *
+ * Checkpatch thinks the indent before "if" is suspect
+ * I think the only suspect part is the missing "else"
+ * because of the return.
+ */
+ if (compression != MIPI_PREDICTOR_NONE) {
+ switch (input_format) {
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ *fmt_type = 6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ *fmt_type = 7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ *fmt_type = 8;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ *fmt_type = 10;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ *fmt_type = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ *fmt_type = 14;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ *fmt_type = 16;
+ break;
+ default:
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ return IA_CSS_SUCCESS;
+ }
+ /*
+ * This mapping comes from the Arasan CSS function spec
+ * (CSS_func_spec1.08_ahb_sep29_08.pdf).
+ *
+ * MW: For some reason the mapping is not 1-to-1
+ */
+ switch (input_format) {
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ *fmt_type = MIPI_FORMAT_RGB888;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ *fmt_type = MIPI_FORMAT_RGB555;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ *fmt_type = MIPI_FORMAT_RGB444;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ *fmt_type = MIPI_FORMAT_RGB565;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ *fmt_type = MIPI_FORMAT_RGB666;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ *fmt_type = MIPI_FORMAT_RAW8;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ *fmt_type = MIPI_FORMAT_RAW10;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ *fmt_type = MIPI_FORMAT_RAW6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ *fmt_type = MIPI_FORMAT_RAW7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ *fmt_type = MIPI_FORMAT_RAW12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ *fmt_type = MIPI_FORMAT_RAW14;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ *fmt_type = MIPI_FORMAT_YUV420_8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ *fmt_type = MIPI_FORMAT_YUV420_10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ *fmt_type = MIPI_FORMAT_YUV422_8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ *fmt_type = MIPI_FORMAT_YUV422_10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ *fmt_type = MIPI_FORMAT_YUV420_8_LEGACY;
+ break;
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ *fmt_type = MIPI_FORMAT_EMBEDDED;
+ break;
+#ifndef USE_INPUT_SYSTEM_VERSION_2401
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ /* This is not specified by Arasan, so we use
+ * 17 for now.
+ */
+ *fmt_type = MIPI_FORMAT_RAW16;
+ break;
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ *fmt_type = MIPI_FORMAT_BINARY_8;
+ break;
+#else
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ *fmt_type = MIPI_FORMAT_CUSTOM0;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ *fmt_type = MIPI_FORMAT_CUSTOM1;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ *fmt_type = MIPI_FORMAT_CUSTOM2;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ *fmt_type = MIPI_FORMAT_CUSTOM3;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ *fmt_type = MIPI_FORMAT_CUSTOM4;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ *fmt_type = MIPI_FORMAT_CUSTOM5;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ *fmt_type = MIPI_FORMAT_CUSTOM6;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ *fmt_type = MIPI_FORMAT_CUSTOM7;
+ break;
+#endif
+
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ default:
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ return IA_CSS_SUCCESS;
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(
+ enum ia_css_csi2_compression_type type)
+{
+ mipi_predictor_t predictor = MIPI_PREDICTOR_NONE;
+
+ switch (type) {
+ case IA_CSS_CSI2_COMPRESSION_TYPE_1:
+ predictor = MIPI_PREDICTOR_TYPE1 - 1;
+ break;
+ case IA_CSS_CSI2_COMPRESSION_TYPE_2:
+ predictor = MIPI_PREDICTOR_TYPE2 - 1;
+ default:
+ break;
+ }
+ return predictor;
+}
+
+enum ia_css_err ia_css_isys_convert_compressed_format(
+ struct ia_css_csi2_compression *comp,
+ struct input_system_cfg_s *cfg)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(comp);
+ assert(cfg);
+
+ if (comp->type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) {
+ /* compression register bit slicing
+ 4 bit for each user defined data type
+ 3 bit indicate compression scheme
+ 000 No compression
+ 001 10-6-10
+ 010 10-7-10
+ 011 10-8-10
+ 100 12-6-12
+ 101 12-6-12
+ 100 12-7-12
+ 110 12-8-12
+ 1 bit indicate predictor
+ */
+ if (comp->uncompressed_bits_per_pixel == UNCOMPRESSED_BITS_PER_PIXEL_10) {
+ switch (comp->compressed_bits_per_pixel) {
+ case COMPRESSED_BITS_PER_PIXEL_6:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_6_10;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_7:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_7_10;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_8:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_8_10;
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ } else if (comp->uncompressed_bits_per_pixel ==
+ UNCOMPRESSED_BITS_PER_PIXEL_12) {
+ switch (comp->compressed_bits_per_pixel) {
+ case COMPRESSED_BITS_PER_PIXEL_6:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_6_12;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_7:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_7_12;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_8:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_8_12;
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ } else
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ cfg->csi_port_attr.comp_predictor =
+ sh_css_csi2_compression_type_2_mipi_predictor(comp->type);
+ cfg->csi_port_attr.comp_enable = true;
+ } else /* No compression */
+ cfg->csi_port_attr.comp_enable = false;
+ return err;
+}
+
+unsigned int ia_css_csi2_calculate_input_system_alignment(
+ enum atomisp_input_format fmt_type)
+{
+ unsigned int memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES;
+
+ switch (fmt_type) {
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ memory_alignment_in_bytes = 2 * ISP_VEC_NELEMS;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ /* Planar YUV formats need to have all planes aligned, this means
+ * double the alignment for the Y plane if the horizontal decimation is 2. */
+ memory_alignment_in_bytes = 2 * HIVE_ISP_DDR_WORD_BYTES;
+ break;
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ default:
+ memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES;
+ break;
+ }
+ return memory_alignment_in_bytes;
+}
+
+#endif
+
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+void ia_css_isys_rx_configure(const rx_cfg_t *config,
+ const enum ia_css_input_mode input_mode)
+{
+#if defined(HAS_RX_VERSION_2)
+ bool port_enabled[N_MIPI_PORT_ID];
+ bool any_port_enabled = false;
+ enum mipi_port_id port;
+
+ if ((!config)
+ || (config->mode >= N_RX_MODE)
+ || (config->port >= N_MIPI_PORT_ID)) {
+ assert(0);
+ return;
+ }
+ for (port = (enum mipi_port_id)0; port < N_MIPI_PORT_ID; port++) {
+ if (is_receiver_port_enabled(RX0_ID, port))
+ any_port_enabled = true;
+ }
+ /* AM: Check whether this is a problem with multiple
+ * streams. MS: This is the case. */
+
+ port = config->port;
+ receiver_port_enable(RX0_ID, port, false);
+
+ port = config->port;
+
+ /* AM: Check whether this is a problem with multiple streams. */
+ if (MIPI_PORT_LANES[config->mode][port] != MIPI_0LANE_CFG) {
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX,
+ config->timeout);
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX,
+ config->initcount);
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX,
+ config->synccount);
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX,
+ config->rxcount);
+
+ port_enabled[port] = true;
+
+ if (input_mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ /* MW: A bit of a hack, straight wiring of the capture
+ * units,assuming they are linearly enumerated. */
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX
+ + (unsigned int)port,
+ INPUT_SYSTEM_CSI_BACKEND);
+ /* MW: Like the integration test example we overwite,
+ * the GPREG_MUX register */
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ (input_system_multiplex_t)port);
+ } else {
+ /*
+ * AM: A bit of a hack, wiring the input system.
+ */
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX
+ + (unsigned int)port,
+ INPUT_SYSTEM_INPUT_BUFFER);
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ INPUT_SYSTEM_ACQUISITION_UNIT);
+ }
+ }
+ /*
+ * The 2ppc is shared for all ports, so we cannot
+ * disable->configure->enable individual ports
+ */
+ /* AM: Check whether this is a problem with multiple streams. */
+ /* MS: 2ppc should be a property per binary and should be
+ * enabled/disabled per binary.
+ * Currently it is implemented as a system wide setting due
+ * to effort and risks. */
+ if (!any_port_enabled) {
+ receiver_reg_store(RX0_ID,
+ _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX,
+ config->is_two_ppc);
+ receiver_reg_store(RX0_ID, _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX,
+ config->is_two_ppc);
+ }
+ receiver_port_enable(RX0_ID, port, true);
+ /* TODO: JB: need to add the beneath used define to mizuchi */
+ /* sh_css_sw_hive_isp_css_2400_system_20121224_0125\css
+ * \hrt\input_system_defs.h
+ * #define INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG 0X207
+ */
+ /* TODO: need better name for define
+ * input_system_reg_store(INPUT_SYSTEM0_ID,
+ * INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG, 1);
+ */
+ input_system_reg_store(INPUT_SYSTEM0_ID, 0x207, 1);
+#else
+#error "rx.c: RX version must be one of {RX_VERSION_2}"
+#endif
+
+ return;
+}
+
+void ia_css_isys_rx_disable(void)
+{
+ enum mipi_port_id port;
+
+ for (port = (enum mipi_port_id)0; port < N_MIPI_PORT_ID; port++) {
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX,
+ false);
+ }
+ return;
+}
+#endif /* if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
new file mode 100644
index 000000000000..9a795a21d3e6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
@@ -0,0 +1,892 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "ia_css_isys.h"
+#include "ia_css_debug.h"
+#include "math_support.h"
+#include "string_support.h"
+#include "virtual_isys.h"
+#include "isp.h"
+#include "sh_css_defs.h"
+
+/*************************************************
+ *
+ * Forwarded Declaration
+ *
+ *************************************************/
+
+static bool create_input_system_channel(
+ input_system_cfg_t *cfg,
+ bool metadata,
+ input_system_channel_t *channel);
+
+static void destroy_input_system_channel(
+ input_system_channel_t *channel);
+
+static bool create_input_system_input_port(
+ input_system_cfg_t *cfg,
+ input_system_input_port_t *input_port);
+
+static void destroy_input_system_input_port(
+ input_system_input_port_t *input_port);
+
+static bool calculate_input_system_channel_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ input_system_channel_cfg_t *channel_cfg,
+ bool metadata);
+
+static bool calculate_input_system_input_port_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ input_system_input_port_cfg_t *input_port_cfg);
+
+static bool acquire_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+static void release_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+static bool acquire_ib_buffer(
+ s32 bits_per_pixel,
+ s32 pixels_per_line,
+ s32 lines_per_frame,
+ s32 align_in_bytes,
+ bool online,
+ ib_buffer_t *buf);
+
+static void release_ib_buffer(
+ ib_buffer_t *buf);
+
+static bool acquire_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+static void release_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+static bool acquire_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+static void release_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+static bool calculate_tpg_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ pixelgen_tpg_cfg_t *cfg);
+
+static bool calculate_prbs_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ pixelgen_prbs_cfg_t *cfg);
+
+static bool calculate_fe_cfg(
+ const input_system_cfg_t *isys_cfg,
+ csi_rx_frontend_cfg_t *cfg);
+
+static bool calculate_be_cfg(
+ const input_system_input_port_t *input_port,
+ const input_system_cfg_t *isys_cfg,
+ bool metadata,
+ csi_rx_backend_cfg_t *cfg);
+
+static bool calculate_stream2mmio_cfg(
+ const input_system_cfg_t *isys_cfg,
+ bool metadata,
+ stream2mmio_cfg_t *cfg);
+
+static bool calculate_ibuf_ctrl_cfg(
+ const input_system_channel_t *channel,
+ const input_system_input_port_t *input_port,
+ const input_system_cfg_t *isys_cfg,
+ ibuf_ctrl_cfg_t *cfg);
+
+static bool calculate_isys2401_dma_cfg(
+ const input_system_channel_t *channel,
+ const input_system_cfg_t *isys_cfg,
+ isys2401_dma_cfg_t *cfg);
+
+static bool calculate_isys2401_dma_port_cfg(
+ const input_system_cfg_t *isys_cfg,
+ bool raw_packed,
+ bool metadata,
+ isys2401_dma_port_cfg_t *cfg);
+
+static csi_mipi_packet_type_t get_csi_mipi_packet_type(
+ int32_t data_type);
+
+static int32_t calculate_stride(
+ s32 bits_per_pixel,
+ s32 pixels_per_line,
+ bool raw_packed,
+ int32_t align_in_bytes);
+
+/* end of Forwarded Declaration */
+
+/**************************************************
+ *
+ * Public Methods
+ *
+ **************************************************/
+ia_css_isys_error_t ia_css_isys_stream_create(
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_h isys_stream,
+ uint32_t isys_stream_id)
+{
+ ia_css_isys_error_t rc;
+
+ if (!isys_stream_descr || !isys_stream ||
+ isys_stream_id >= SH_CSS_MAX_ISYS_CHANNEL_NODES)
+ return false;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_create() enter:\n");
+
+ /*Reset isys_stream to 0*/
+ memset(isys_stream, 0, sizeof(*isys_stream));
+ isys_stream->enable_metadata = isys_stream_descr->metadata.enable;
+ isys_stream->id = isys_stream_id;
+
+ isys_stream->linked_isys_stream_id = isys_stream_descr->linked_isys_stream_id;
+ rc = create_input_system_input_port(isys_stream_descr,
+ &isys_stream->input_port);
+ if (rc == false)
+ return false;
+
+ rc = create_input_system_channel(isys_stream_descr, false,
+ &isys_stream->channel);
+ if (rc == false) {
+ destroy_input_system_input_port(&isys_stream->input_port);
+ return false;
+ }
+
+#ifdef ISP2401
+ /*
+ * Early polling is required for timestamp accuracy in certain cause.
+ * The ISYS HW polling is started on
+ * ia_css_isys_stream_capture_indication() instead of
+ * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of
+ * capture takes longer than getting an ISYS frame
+ */
+ isys_stream->polling_mode = isys_stream_descr->polling_mode;
+
+#endif
+ /* create metadata channel */
+ if (isys_stream_descr->metadata.enable) {
+ rc = create_input_system_channel(isys_stream_descr, true,
+ &isys_stream->md_channel);
+ if (rc == false) {
+ destroy_input_system_input_port(&isys_stream->input_port);
+ destroy_input_system_channel(&isys_stream->channel);
+ return false;
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_create() leave:\n");
+
+ return true;
+}
+
+void ia_css_isys_stream_destroy(
+ ia_css_isys_stream_h isys_stream)
+{
+ destroy_input_system_input_port(&isys_stream->input_port);
+ destroy_input_system_channel(&isys_stream->channel);
+ if (isys_stream->enable_metadata) {
+ /* Destroy metadata channel only if its allocated*/
+ destroy_input_system_channel(&isys_stream->md_channel);
+ }
+}
+
+ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
+ ia_css_isys_stream_h isys_stream,
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_cfg_t *isys_stream_cfg)
+{
+ ia_css_isys_error_t rc;
+
+ if (!isys_stream_cfg ||
+ !isys_stream_descr ||
+ !isys_stream)
+ return false;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_calculate_cfg() enter:\n");
+
+ rc = calculate_input_system_channel_cfg(
+ &isys_stream->channel,
+ &isys_stream->input_port,
+ isys_stream_descr,
+ &isys_stream_cfg->channel_cfg,
+ false);
+ if (rc == false)
+ return false;
+
+ /* configure metadata channel */
+ if (isys_stream_descr->metadata.enable) {
+ isys_stream_cfg->enable_metadata = true;
+ rc = calculate_input_system_channel_cfg(
+ &isys_stream->md_channel,
+ &isys_stream->input_port,
+ isys_stream_descr,
+ &isys_stream_cfg->md_channel_cfg,
+ true);
+ if (rc == false)
+ return false;
+ }
+
+ rc = calculate_input_system_input_port_cfg(
+ &isys_stream->channel,
+ &isys_stream->input_port,
+ isys_stream_descr,
+ &isys_stream_cfg->input_port_cfg);
+ if (rc == false)
+ return false;
+
+ isys_stream->valid = 1;
+ isys_stream_cfg->valid = 1;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_calculate_cfg() leave:\n");
+ return rc;
+}
+
+/* end of Public Methods */
+
+/**************************************************
+ *
+ * Private Methods
+ *
+ **************************************************/
+static bool create_input_system_channel(
+ input_system_cfg_t *cfg,
+ bool metadata,
+ input_system_channel_t *me)
+{
+ bool rc = true;
+
+ me->dma_id = ISYS2401_DMA0_ID;
+
+ switch (cfg->input_port_id) {
+ case INPUT_SYSTEM_CSI_PORT0_ID:
+ case INPUT_SYSTEM_PIXELGEN_PORT0_ID:
+ me->stream2mmio_id = STREAM2MMIO0_ID;
+ me->ibuf_ctrl_id = IBUF_CTRL0_ID;
+ break;
+
+ case INPUT_SYSTEM_CSI_PORT1_ID:
+ case INPUT_SYSTEM_PIXELGEN_PORT1_ID:
+ me->stream2mmio_id = STREAM2MMIO1_ID;
+ me->ibuf_ctrl_id = IBUF_CTRL1_ID;
+ break;
+
+ case INPUT_SYSTEM_CSI_PORT2_ID:
+ case INPUT_SYSTEM_PIXELGEN_PORT2_ID:
+ me->stream2mmio_id = STREAM2MMIO2_ID;
+ me->ibuf_ctrl_id = IBUF_CTRL2_ID;
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ if (!rc)
+ return false;
+
+ if (!acquire_sid(me->stream2mmio_id, &me->stream2mmio_sid_id)) {
+ return false;
+ }
+
+ if (!acquire_ib_buffer(
+ metadata ? cfg->metadata.bits_per_pixel :
+ cfg->input_port_resolution.bits_per_pixel,
+ metadata ? cfg->metadata.pixels_per_line :
+ cfg->input_port_resolution.pixels_per_line,
+ metadata ? cfg->metadata.lines_per_frame :
+ cfg->input_port_resolution.lines_per_frame,
+ metadata ? cfg->metadata.align_req_in_bytes :
+ cfg->input_port_resolution.align_req_in_bytes,
+ cfg->online,
+ &me->ib_buffer)) {
+ release_sid(me->stream2mmio_id, &me->stream2mmio_sid_id);
+ return false;
+ }
+
+ if (!acquire_dma_channel(me->dma_id, &me->dma_channel)) {
+ release_sid(me->stream2mmio_id, &me->stream2mmio_sid_id);
+ release_ib_buffer(&me->ib_buffer);
+ return false;
+ }
+
+ return true;
+}
+
+static void destroy_input_system_channel(
+ input_system_channel_t *me)
+{
+ release_sid(me->stream2mmio_id,
+ &me->stream2mmio_sid_id);
+
+ release_ib_buffer(&me->ib_buffer);
+
+ release_dma_channel(me->dma_id, &me->dma_channel);
+}
+
+static bool create_input_system_input_port(
+ input_system_cfg_t *cfg,
+ input_system_input_port_t *me)
+{
+ csi_mipi_packet_type_t packet_type;
+ bool rc = true;
+
+ switch (cfg->input_port_id) {
+ case INPUT_SYSTEM_CSI_PORT0_ID:
+ me->csi_rx.frontend_id = CSI_RX_FRONTEND0_ID;
+ me->csi_rx.backend_id = CSI_RX_BACKEND0_ID;
+
+ packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
+ me->csi_rx.packet_type = packet_type;
+
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ packet_type,
+ &me->csi_rx.backend_lut_entry);
+ break;
+ case INPUT_SYSTEM_PIXELGEN_PORT0_ID:
+ me->pixelgen.pixelgen_id = PIXELGEN0_ID;
+ break;
+ case INPUT_SYSTEM_CSI_PORT1_ID:
+ me->csi_rx.frontend_id = CSI_RX_FRONTEND1_ID;
+ me->csi_rx.backend_id = CSI_RX_BACKEND1_ID;
+
+ packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
+ me->csi_rx.packet_type = packet_type;
+
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ packet_type,
+ &me->csi_rx.backend_lut_entry);
+ break;
+ case INPUT_SYSTEM_PIXELGEN_PORT1_ID:
+ me->pixelgen.pixelgen_id = PIXELGEN1_ID;
+
+ break;
+ case INPUT_SYSTEM_CSI_PORT2_ID:
+ me->csi_rx.frontend_id = CSI_RX_FRONTEND2_ID;
+ me->csi_rx.backend_id = CSI_RX_BACKEND2_ID;
+
+ packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
+ me->csi_rx.packet_type = packet_type;
+
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ packet_type,
+ &me->csi_rx.backend_lut_entry);
+ break;
+ case INPUT_SYSTEM_PIXELGEN_PORT2_ID:
+ me->pixelgen.pixelgen_id = PIXELGEN2_ID;
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ me->source_type = cfg->mode;
+
+ /* for metadata */
+ me->metadata.packet_type = CSI_MIPI_PACKET_TYPE_UNDEFINED;
+ if (rc && cfg->metadata.enable) {
+ me->metadata.packet_type = get_csi_mipi_packet_type(
+ cfg->metadata.fmt_type);
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ me->metadata.packet_type,
+ &me->metadata.backend_lut_entry);
+ }
+
+ return rc;
+}
+
+static void destroy_input_system_input_port(
+ input_system_input_port_t *me)
+{
+ if (me->source_type == INPUT_SYSTEM_SOURCE_TYPE_SENSOR) {
+ release_be_lut_entry(
+ me->csi_rx.backend_id,
+ me->csi_rx.packet_type,
+ &me->csi_rx.backend_lut_entry);
+ }
+
+ if (me->metadata.packet_type != CSI_MIPI_PACKET_TYPE_UNDEFINED) {
+ /*Free the backend lut allocated for metadata*/
+ release_be_lut_entry(
+ me->csi_rx.backend_id,
+ me->metadata.packet_type,
+ &me->metadata.backend_lut_entry);
+ }
+}
+
+static bool calculate_input_system_channel_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ input_system_channel_cfg_t *channel_cfg,
+ bool metadata)
+{
+ bool rc;
+
+ rc = calculate_stream2mmio_cfg(isys_cfg, metadata,
+ &channel_cfg->stream2mmio_cfg);
+ if (!rc)
+ return false;
+
+ rc = calculate_ibuf_ctrl_cfg(
+ channel,
+ input_port,
+ isys_cfg,
+ &channel_cfg->ibuf_ctrl_cfg);
+ if (!rc)
+ return false;
+ if (metadata)
+ channel_cfg->ibuf_ctrl_cfg.stores_per_frame =
+ isys_cfg->metadata.lines_per_frame;
+
+ rc = calculate_isys2401_dma_cfg(
+ channel,
+ isys_cfg,
+ &channel_cfg->dma_cfg);
+ if (!rc)
+ return false;
+
+ rc = calculate_isys2401_dma_port_cfg(
+ isys_cfg,
+ false,
+ metadata,
+ &channel_cfg->dma_src_port_cfg);
+ if (!rc)
+ return false;
+
+ rc = calculate_isys2401_dma_port_cfg(
+ isys_cfg,
+ isys_cfg->raw_packed,
+ metadata,
+ &channel_cfg->dma_dest_port_cfg);
+ if (!rc)
+ return false;
+
+ return true;
+}
+
+static bool calculate_input_system_input_port_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ input_system_input_port_cfg_t *input_port_cfg)
+{
+ bool rc;
+
+ switch (input_port->source_type) {
+ case INPUT_SYSTEM_SOURCE_TYPE_SENSOR:
+ rc = calculate_fe_cfg(
+ isys_cfg,
+ &input_port_cfg->csi_rx_cfg.frontend_cfg);
+
+ rc &= calculate_be_cfg(
+ input_port,
+ isys_cfg,
+ false,
+ &input_port_cfg->csi_rx_cfg.backend_cfg);
+
+ if (rc && isys_cfg->metadata.enable)
+ rc &= calculate_be_cfg(input_port, isys_cfg, true,
+ &input_port_cfg->csi_rx_cfg.md_backend_cfg);
+ break;
+ case INPUT_SYSTEM_SOURCE_TYPE_TPG:
+ rc = calculate_tpg_cfg(
+ channel,
+ input_port,
+ isys_cfg,
+ &input_port_cfg->pixelgen_cfg.tpg_cfg);
+ break;
+ case INPUT_SYSTEM_SOURCE_TYPE_PRBS:
+ rc = calculate_prbs_cfg(
+ channel,
+ input_port,
+ isys_cfg,
+ &input_port_cfg->pixelgen_cfg.prbs_cfg);
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool acquire_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ return ia_css_isys_stream2mmio_sid_rmgr_acquire(stream2mmio, sid);
+}
+
+static void release_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ ia_css_isys_stream2mmio_sid_rmgr_release(stream2mmio, sid);
+}
+
+/* See also: ia_css_dma_configure_from_info() */
+static int32_t calculate_stride(
+ s32 bits_per_pixel,
+ s32 pixels_per_line,
+ bool raw_packed,
+ int32_t align_in_bytes)
+{
+ s32 bytes_per_line;
+ s32 pixels_per_word;
+ s32 words_per_line;
+ s32 pixels_per_line_padded;
+
+ pixels_per_line_padded = CEIL_MUL(pixels_per_line, align_in_bytes);
+
+ if (!raw_packed)
+ bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
+
+ pixels_per_word = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
+ words_per_line = ceil_div(pixels_per_line_padded, pixels_per_word);
+ bytes_per_line = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
+
+ return bytes_per_line;
+}
+
+static bool acquire_ib_buffer(
+ s32 bits_per_pixel,
+ s32 pixels_per_line,
+ s32 lines_per_frame,
+ s32 align_in_bytes,
+ bool online,
+ ib_buffer_t *buf)
+{
+ buf->stride = calculate_stride(bits_per_pixel, pixels_per_line, false,
+ align_in_bytes);
+ if (online)
+ buf->lines = 4; /* use double buffering for online usecases */
+ else
+ buf->lines = 2;
+
+ (void)(lines_per_frame);
+ return ia_css_isys_ibuf_rmgr_acquire(buf->stride * buf->lines,
+ &buf->start_addr);
+}
+
+static void release_ib_buffer(
+ ib_buffer_t *buf)
+{
+ ia_css_isys_ibuf_rmgr_release(&buf->start_addr);
+}
+
+static bool acquire_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ return ia_css_isys_dma_channel_rmgr_acquire(dma_id, channel);
+}
+
+static void release_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ ia_css_isys_dma_channel_rmgr_release(dma_id, channel);
+}
+
+static bool acquire_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ return ia_css_isys_csi_rx_lut_rmgr_acquire(backend, packet_type, entry);
+}
+
+static void release_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ ia_css_isys_csi_rx_lut_rmgr_release(backend, packet_type, entry);
+}
+
+static bool calculate_tpg_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ pixelgen_tpg_cfg_t *cfg)
+{
+ (void)channel;
+ (void)input_port;
+
+ memcpy_s(
+ (void *)cfg,
+ sizeof(pixelgen_tpg_cfg_t),
+ (void *)(&isys_cfg->tpg_port_attr),
+ sizeof(pixelgen_tpg_cfg_t));
+ return true;
+}
+
+static bool calculate_prbs_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ pixelgen_prbs_cfg_t *cfg)
+{
+ (void)channel;
+ (void)input_port;
+
+ memcpy_s(
+ (void *)cfg,
+ sizeof(pixelgen_prbs_cfg_t),
+ (void *)(&isys_cfg->prbs_port_attr),
+ sizeof(pixelgen_prbs_cfg_t));
+ return true;
+}
+
+static bool calculate_fe_cfg(
+ const input_system_cfg_t *isys_cfg,
+ csi_rx_frontend_cfg_t *cfg)
+{
+ cfg->active_lanes = isys_cfg->csi_port_attr.active_lanes;
+ return true;
+}
+
+static bool calculate_be_cfg(
+ const input_system_input_port_t *input_port,
+ const input_system_cfg_t *isys_cfg,
+ bool metadata,
+ csi_rx_backend_cfg_t *cfg)
+{
+ memcpy_s(
+ (void *)(&cfg->lut_entry),
+ sizeof(csi_rx_backend_lut_entry_t),
+ metadata ? (void *)(&input_port->metadata.backend_lut_entry) :
+ (void *)(&input_port->csi_rx.backend_lut_entry),
+ sizeof(csi_rx_backend_lut_entry_t));
+
+ cfg->csi_mipi_cfg.virtual_channel = isys_cfg->csi_port_attr.ch_id;
+ if (metadata) {
+ cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(
+ isys_cfg->metadata.fmt_type);
+ cfg->csi_mipi_cfg.comp_enable = false;
+ cfg->csi_mipi_cfg.data_type = isys_cfg->metadata.fmt_type;
+ } else {
+ cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(
+ isys_cfg->csi_port_attr.fmt_type);
+ cfg->csi_mipi_cfg.data_type = isys_cfg->csi_port_attr.fmt_type;
+ cfg->csi_mipi_cfg.comp_enable = isys_cfg->csi_port_attr.comp_enable;
+ cfg->csi_mipi_cfg.comp_scheme = isys_cfg->csi_port_attr.comp_scheme;
+ cfg->csi_mipi_cfg.comp_predictor = isys_cfg->csi_port_attr.comp_predictor;
+ cfg->csi_mipi_cfg.comp_bit_idx = cfg->csi_mipi_cfg.data_type -
+ MIPI_FORMAT_CUSTOM0;
+ }
+
+ return true;
+}
+
+static bool calculate_stream2mmio_cfg(
+ const input_system_cfg_t *isys_cfg,
+ bool metadata,
+ stream2mmio_cfg_t *cfg
+)
+{
+ cfg->bits_per_pixel = metadata ? isys_cfg->metadata.bits_per_pixel :
+ isys_cfg->input_port_resolution.bits_per_pixel;
+
+ cfg->enable_blocking =
+ ((isys_cfg->mode == INPUT_SYSTEM_SOURCE_TYPE_TPG) ||
+ (isys_cfg->mode == INPUT_SYSTEM_SOURCE_TYPE_PRBS));
+
+ return true;
+}
+
+static bool calculate_ibuf_ctrl_cfg(
+ const input_system_channel_t *channel,
+ const input_system_input_port_t *input_port,
+ const input_system_cfg_t *isys_cfg,
+ ibuf_ctrl_cfg_t *cfg)
+{
+ const s32 bits_per_byte = 8;
+ s32 bits_per_pixel;
+ s32 bytes_per_pixel;
+ s32 left_padding;
+
+ (void)input_port;
+
+ bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
+ bytes_per_pixel = ceil_div(bits_per_pixel, bits_per_byte);
+
+ left_padding = CEIL_MUL(isys_cfg->output_port_attr.left_padding, ISP_VEC_NELEMS)
+ * bytes_per_pixel;
+
+ cfg->online = isys_cfg->online;
+
+ cfg->dma_cfg.channel = channel->dma_channel;
+ cfg->dma_cfg.cmd = _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND;
+
+ cfg->dma_cfg.shift_returned_items = 0;
+ cfg->dma_cfg.elems_per_word_in_ibuf = 0;
+ cfg->dma_cfg.elems_per_word_in_dest = 0;
+
+ cfg->ib_buffer.start_addr = channel->ib_buffer.start_addr;
+ cfg->ib_buffer.stride = channel->ib_buffer.stride;
+ cfg->ib_buffer.lines = channel->ib_buffer.lines;
+
+ /*
+ #ifndef ISP2401
+ * zhengjie.lu@intel.com:
+ #endif
+ * "dest_buf_cfg" should be part of the input system output
+ * port configuration.
+ *
+ * TODO: move "dest_buf_cfg" to the input system output
+ * port configuration.
+ */
+
+ /* input_buf addr only available in sched mode;
+ this buffer is allocated in isp, crun mode addr
+ can be passed by after ISP allocation */
+ if (cfg->online) {
+ cfg->dest_buf_cfg.start_addr = ISP_INPUT_BUF_START_ADDR + left_padding;
+ cfg->dest_buf_cfg.stride = bytes_per_pixel
+ * isys_cfg->output_port_attr.max_isp_input_width;
+ cfg->dest_buf_cfg.lines = LINES_OF_ISP_INPUT_BUF;
+ } else if (isys_cfg->raw_packed) {
+ cfg->dest_buf_cfg.stride = calculate_stride(bits_per_pixel,
+ isys_cfg->input_port_resolution.pixels_per_line,
+ isys_cfg->raw_packed,
+ isys_cfg->input_port_resolution.align_req_in_bytes);
+ } else {
+ cfg->dest_buf_cfg.stride = channel->ib_buffer.stride;
+ }
+
+ /*
+ #ifndef ISP2401
+ * zhengjie.lu@intel.com:
+ #endif
+ * "items_per_store" is hard coded as "1", which is ONLY valid
+ * when the CSI-MIPI long packet is transferred.
+ *
+ * TODO: After the 1st stage of MERR+, make the proper solution to
+ * configure "items_per_store" so that it can also handle the CSI-MIPI
+ * short packet.
+ */
+ cfg->items_per_store = 1;
+
+ cfg->stores_per_frame = isys_cfg->input_port_resolution.lines_per_frame;
+
+ cfg->stream2mmio_cfg.sync_cmd = _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME;
+
+ /* TODO: Define conditions as when to use store words vs store packets */
+ cfg->stream2mmio_cfg.store_cmd = _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS;
+
+ return true;
+}
+
+static bool calculate_isys2401_dma_cfg(
+ const input_system_channel_t *channel,
+ const input_system_cfg_t *isys_cfg,
+ isys2401_dma_cfg_t *cfg)
+{
+ cfg->channel = channel->dma_channel;
+
+ /* only online/sensor mode goto vmem
+ offline/buffered_sensor, tpg and prbs will go to ddr */
+ if (isys_cfg->online)
+ cfg->connection = isys2401_dma_ibuf_to_vmem_connection;
+ else
+ cfg->connection = isys2401_dma_ibuf_to_ddr_connection;
+
+ cfg->extension = isys2401_dma_zero_extension;
+ cfg->height = 1;
+
+ return true;
+}
+
+/* See also: ia_css_dma_configure_from_info() */
+static bool calculate_isys2401_dma_port_cfg(
+ const input_system_cfg_t *isys_cfg,
+ bool raw_packed,
+ bool metadata,
+ isys2401_dma_port_cfg_t *cfg)
+{
+ s32 bits_per_pixel;
+ s32 pixels_per_line;
+ s32 align_req_in_bytes;
+
+ /* TODO: Move metadata away from isys_cfg to application layer */
+ if (metadata) {
+ bits_per_pixel = isys_cfg->metadata.bits_per_pixel;
+ pixels_per_line = isys_cfg->metadata.pixels_per_line;
+ align_req_in_bytes = isys_cfg->metadata.align_req_in_bytes;
+ } else {
+ bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
+ pixels_per_line = isys_cfg->input_port_resolution.pixels_per_line;
+ align_req_in_bytes = isys_cfg->input_port_resolution.align_req_in_bytes;
+ }
+
+ cfg->stride = calculate_stride(bits_per_pixel, pixels_per_line, raw_packed,
+ align_req_in_bytes);
+
+ if (!raw_packed)
+ bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
+
+ cfg->elements = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
+ cfg->cropping = 0;
+ cfg->width = CEIL_DIV(cfg->stride, HIVE_ISP_DDR_WORD_BYTES);
+
+ return true;
+}
+
+static csi_mipi_packet_type_t get_csi_mipi_packet_type(
+ int32_t data_type)
+{
+ csi_mipi_packet_type_t packet_type;
+
+ packet_type = CSI_MIPI_PACKET_TYPE_RESERVED;
+
+ if (data_type >= 0 && data_type <= MIPI_FORMAT_SHORT8)
+ packet_type = CSI_MIPI_PACKET_TYPE_SHORT;
+
+ if (data_type > MIPI_FORMAT_SHORT8 && data_type <= N_MIPI_FORMAT)
+ packet_type = CSI_MIPI_PACKET_TYPE_LONG;
+
+ return packet_type;
+}
+
+/* end of Private Methods */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h
new file mode 100644
index 000000000000..b675907791ad
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VIRTUAL_ISYS_H_INCLUDED__
+#define __VIRTUAL_ISYS_H_INCLUDED__
+
+/* cmd for storing a number of packets indicated by reg _STREAM2MMIO_NUM_ITEMS*/
+#define _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS 1
+
+/* command for waiting for a frame start */
+#define _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME 2
+
+#endif /* __VIRTUAL_ISYS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
new file mode 100644
index 000000000000..6a41efee5635
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
@@ -0,0 +1,286 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPELINE_H__
+#define __IA_CSS_PIPELINE_H__
+
+#include "sh_css_internal.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_pipeline_common.h"
+
+#define IA_CSS_PIPELINE_NUM_MAX (20)
+
+/* Pipeline stage to be executed on SP/ISP */
+struct ia_css_pipeline_stage {
+ unsigned int stage_num;
+ struct ia_css_binary *binary; /* built-in binary */
+ struct ia_css_binary_info *binary_info;
+ const struct ia_css_fw_info *firmware; /* acceleration binary */
+ /* SP function for SP stage */
+ enum ia_css_pipeline_stage_sp_func sp_func;
+ unsigned int max_input_width; /* For SP raw copy */
+ struct sh_css_binary_args args;
+ int mode;
+ bool out_frame_allocated[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ bool vf_frame_allocated;
+ struct ia_css_pipeline_stage *next;
+ bool enable_zoom;
+};
+
+/* Pipeline of n stages to be executed on SP/ISP per stage */
+struct ia_css_pipeline {
+ enum ia_css_pipe_id pipe_id;
+ u8 pipe_num;
+ bool stop_requested;
+ struct ia_css_pipeline_stage *stages;
+ struct ia_css_pipeline_stage *current_stage;
+ unsigned int num_stages;
+ struct ia_css_frame in_frame;
+ struct ia_css_frame out_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ unsigned int dvs_frame_delay;
+ unsigned int inout_port_config;
+ int num_execs;
+ bool acquire_isp_each_stage;
+ u32 pipe_qos_config;
+};
+
+#define DEFAULT_PIPELINE \
+(struct ia_css_pipeline) { \
+ .pipe_id = IA_CSS_PIPE_ID_PREVIEW, \
+ .in_frame = DEFAULT_FRAME, \
+ .out_frame = {DEFAULT_FRAME}, \
+ .vf_frame = {DEFAULT_FRAME}, \
+ .dvs_frame_delay = IA_CSS_FRAME_DELAY_1, \
+ .num_execs = -1, \
+ .acquire_isp_each_stage = true, \
+ .pipe_qos_config = QOS_INVALID \
+}
+
+/* Stage descriptor used to create a new stage in the pipeline */
+struct ia_css_pipeline_stage_desc {
+ struct ia_css_binary *binary;
+ const struct ia_css_fw_info *firmware;
+ enum ia_css_pipeline_stage_sp_func sp_func;
+ unsigned int max_input_width;
+ unsigned int mode;
+ struct ia_css_frame *in_frame;
+ struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame;
+};
+
+/* @brief initialize the pipeline module
+ *
+ * @return None
+ *
+ * Initializes the pipeline module. This API has to be called
+ * before any operation on the pipeline module is done
+ */
+void ia_css_pipeline_init(void);
+
+/* @brief initialize the pipeline structure with default values
+ *
+ * @param[out] pipeline structure to be initialized with defaults
+ * @param[in] pipe_id
+ * @param[in] pipe_num Number that uniquely identifies a pipeline.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * Initializes the pipeline structure with a set of default values.
+ * This API is expected to be used when a pipeline structure is allocated
+ * externally and needs sane defaults
+ */
+enum ia_css_err ia_css_pipeline_create(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay);
+
+/* @brief destroy a pipeline
+ *
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
+
+/* @brief Starts a pipeline
+ *
+ * @param[in] pipe_id
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
+ struct ia_css_pipeline *pipeline);
+
+/* @brief Request to stop a pipeline
+ *
+ * @param[in] pipeline
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
+
+/* @brief Check whether pipeline has stopped
+ *
+ * @param[in] pipeline
+ * @return true if the pipeline has stopped
+ *
+ */
+bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
+
+/* @brief clean all the stages pipeline and make it as new
+ *
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline);
+
+/* @brief Add a stage to pipeline.
+ *
+ * @param pipeline Pointer to the pipeline to be added to.
+ * @param[in] stage_desc The description of the stage
+ * @param[out] stage The successor of the stage.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * Add a new stage to a non-NULL pipeline.
+ * The stage consists of an ISP binary or firmware and input and output
+ * arguments.
+*/
+enum ia_css_err ia_css_pipeline_create_and_add_stage(
+ struct ia_css_pipeline *pipeline,
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **stage);
+
+/* @brief Finalize the stages in a pipeline
+ *
+ * @param pipeline Pointer to the pipeline to be added to.
+ * @return None
+ *
+ * This API is expected to be called after adding all stages
+*/
+void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
+ bool continuous);
+
+/* @brief gets a stage from the pipeline
+ *
+ * @param[in] pipeline
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage);
+
+/* @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
+ *
+ * @param[in] pipeline
+ * @param[in] fw_handle
+ * @param[out] stage Pointer to Stage
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline
+ *pipeline,
+ u32 fw_handle,
+ struct ia_css_pipeline_stage **stage);
+
+/* @brief Gets the Firmware handle corresponding the stage num from the pipeline
+ *
+ * @param[in] pipeline
+ * @param[in] stage_num
+ * @param[out] fw_handle
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline
+ *pipeline,
+ u32 stage_num,
+ uint32_t *fw_handle);
+
+/* @brief gets the output stage from the pipeline
+ *
+ * @param[in] pipeline
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_get_output_stage(
+ struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage);
+
+/* @brief Checks whether the pipeline uses params
+ *
+ * @param[in] pipeline
+ * @return true if the pipeline uses params
+ *
+ */
+bool ia_css_pipeline_uses_params(struct ia_css_pipeline *pipeline);
+
+/**
+ * @brief get the SP thread ID.
+ *
+ * @param[in] key The query key, typical use is pipe_num.
+ * @param[out] val The query value.
+ *
+ * @return
+ * true, if the query succeeds;
+ * false, if the query fails.
+ */
+bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val);
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+/**
+ * @brief Get the pipeline io status
+ *
+ * @param[in] None
+ * @return
+ * Pointer to pipe_io_status
+ */
+struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void);
+#endif
+
+/**
+ * @brief Map an SP thread to this pipeline
+ *
+ * @param[in] pipe_num
+ * @param[in] map true for mapping and false for unmapping sp threads.
+ *
+ */
+void ia_css_pipeline_map(unsigned int pipe_num, bool map);
+
+/**
+ * @brief Checks whether the pipeline is mapped to SP threads
+ *
+ * @param[in] Query key, typical use is pipe_num
+ *
+ * return
+ * true, pipeline is mapped to SP threads
+ * false, pipeline is not mapped to SP threads
+ */
+bool ia_css_pipeline_is_mapped(unsigned int key);
+
+/**
+ * @brief Print pipeline thread mapping
+ *
+ * @param[in] none
+ *
+ * return none
+ */
+void ia_css_pipeline_dump_thread_map_info(void);
+
+#endif /*__IA_CSS_PIPELINE_H__*/
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h
new file mode 100644
index 000000000000..b96a5b146096
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPELINE_COMMON_H__
+#define __IA_CSS_PIPELINE_COMMON_H__
+
+enum ia_css_pipeline_stage_sp_func {
+ IA_CSS_PIPELINE_RAW_COPY = 0,
+ IA_CSS_PIPELINE_BIN_COPY = 1,
+ IA_CSS_PIPELINE_ISYS_COPY = 2,
+ IA_CSS_PIPELINE_NO_FUNC = 3,
+};
+
+#define IA_CSS_PIPELINE_NUM_STAGE_FUNCS 3
+
+#endif /*__IA_CSS_PIPELINE_COMMON_H__*/
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
new file mode 100644
index 000000000000..8b9982de8deb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
@@ -0,0 +1,786 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_debug.h"
+#include "sw_event_global.h" /* encode_sw_event */
+#include "sp.h" /* cnd_sp_irq_enable() */
+#include "assert_support.h"
+#include "memory_access.h"
+#include "sh_css_sp.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_param.h"
+#include "ia_css_bufq.h"
+
+#define PIPELINE_NUM_UNMAPPED (~0U)
+#define PIPELINE_SP_THREAD_EMPTY_TOKEN (0x0)
+#define PIPELINE_SP_THREAD_RESERVED_TOKEN (0x1)
+
+/*******************************************************
+*** Static variables
+********************************************************/
+static unsigned int pipeline_num_to_sp_thread_map[IA_CSS_PIPELINE_NUM_MAX];
+static unsigned int pipeline_sp_thread_list[SH_CSS_MAX_SP_THREADS];
+
+/*******************************************************
+*** Static functions
+********************************************************/
+static void pipeline_init_sp_thread_map(void);
+static void pipeline_map_num_to_sp_thread(unsigned int pipe_num);
+static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num);
+static void pipeline_init_defaults(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay);
+
+static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage);
+static enum ia_css_err pipeline_stage_create(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **new_stage);
+static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline);
+static void ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me,
+ bool continuous);
+
+/*******************************************************
+*** Public functions
+********************************************************/
+void ia_css_pipeline_init(void)
+{
+ pipeline_init_sp_thread_map();
+}
+
+enum ia_css_err ia_css_pipeline_create(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay)
+{
+ assert(pipeline);
+ IA_CSS_ENTER_PRIVATE("pipeline = %p, pipe_id = %d, pipe_num = %d, dvs_frame_delay = %d",
+ pipeline, pipe_id, pipe_num, dvs_frame_delay);
+ if (!pipeline) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipeline_init_defaults(pipeline, pipe_id, pipe_num, dvs_frame_delay);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+void ia_css_pipeline_map(unsigned int pipe_num, bool map)
+{
+ assert(pipe_num < IA_CSS_PIPELINE_NUM_MAX);
+ IA_CSS_ENTER_PRIVATE("pipe_num = %d, map = %d", pipe_num, map);
+
+ if (pipe_num >= IA_CSS_PIPELINE_NUM_MAX) {
+ IA_CSS_ERROR("Invalid pipe number");
+ IA_CSS_LEAVE_PRIVATE("void");
+ return;
+ }
+ if (map)
+ pipeline_map_num_to_sp_thread(pipe_num);
+ else
+ pipeline_unmap_num_to_sp_thread(pipe_num);
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/* @brief destroy a pipeline
+ *
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline)
+{
+ assert(pipeline);
+ IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline);
+
+ if (!pipeline) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("void");
+ return;
+ }
+
+ IA_CSS_LOG("pipe_num = %d", pipeline->pipe_num);
+
+ /* Free the pipeline number */
+ ia_css_pipeline_clean(pipeline);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/* Run a pipeline and wait till it completes. */
+void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
+ struct ia_css_pipeline *pipeline)
+{
+ u8 pipe_num = 0;
+ unsigned int thread_id;
+
+ assert(pipeline);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_start() enter: pipe_id=%d, pipeline=%p\n",
+ pipe_id, pipeline);
+ pipeline->pipe_id = pipe_id;
+ sh_css_sp_init_pipeline(pipeline, pipe_id, pipe_num,
+ false, false, false, true, SH_CSS_BDS_FACTOR_1_00,
+ SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD,
+ IA_CSS_INPUT_MODE_MEMORY, NULL, NULL,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ (enum mipi_port_id)0,
+#endif
+ NULL, NULL);
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ if (!sh_css_sp_is_running()) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_start() error,leaving\n");
+ /* queues are invalid*/
+ return;
+ }
+ ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id,
+ 0,
+ 0);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_start() leave: return_void\n");
+}
+
+/*
+ * @brief Query the SP thread ID.
+ * Refer to "sh_css_internal.h" for details.
+ */
+bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val)
+{
+ IA_CSS_ENTER("key=%d, val=%p", key, val);
+
+ if ((!val) || (key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) {
+ IA_CSS_LEAVE("return value = false");
+ return false;
+ }
+
+ *val = pipeline_num_to_sp_thread_map[key];
+
+ if (*val == (unsigned int)PIPELINE_NUM_UNMAPPED) {
+ IA_CSS_LOG("unmapped pipeline number");
+ IA_CSS_LEAVE("return value = false");
+ return false;
+ }
+ IA_CSS_LEAVE("return value = true");
+ return true;
+}
+
+void ia_css_pipeline_dump_thread_map_info(void)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "pipeline_num_to_sp_thread_map:\n");
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "pipe_num: %u, tid: 0x%x\n", i, pipeline_num_to_sp_thread_map[i]);
+ }
+}
+
+enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+
+ assert(pipeline);
+
+ if (!pipeline)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_request_stop() enter: pipeline=%p\n",
+ pipeline);
+ pipeline->stop_requested = true;
+
+ /* Send stop event to the sp*/
+ /* This needs improvement, stop on all the pipes available
+ * in the stream*/
+ ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id);
+ if (!sh_css_sp_is_running()) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_request_stop() leaving\n");
+ /* queues are invalid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_STOP_STREAM,
+ (uint8_t)thread_id,
+ 0,
+ 0);
+ sh_css_sp_uninit_pipeline(pipeline->pipe_num);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_request_stop() leave: return_err=%d\n",
+ err);
+ return err;
+}
+
+void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline)
+{
+ struct ia_css_pipeline_stage *s;
+
+ assert(pipeline);
+ IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline);
+
+ if (!pipeline) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("void");
+ return;
+ }
+ s = pipeline->stages;
+
+ while (s) {
+ struct ia_css_pipeline_stage *next = s->next;
+
+ pipeline_stage_destroy(s);
+ s = next;
+ }
+ pipeline_init_defaults(pipeline, pipeline->pipe_id, pipeline->pipe_num,
+ pipeline->dvs_frame_delay);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/* @brief Add a stage to pipeline.
+ *
+ * @param pipeline Pointer to the pipeline to be added to.
+ * @param[in] stage_desc The description of the stage
+ * @param[out] stage The successor of the stage.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * Add a new stage to a non-NULL pipeline.
+ * The stage consists of an ISP binary or firmware and input and
+ * output arguments.
+*/
+enum ia_css_err ia_css_pipeline_create_and_add_stage(
+ struct ia_css_pipeline *pipeline,
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *last, *new_stage = NULL;
+ enum ia_css_err err;
+
+ /* other arguments can be NULL */
+ assert(pipeline);
+ assert(stage_desc);
+ last = pipeline->stages;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() enter:\n");
+ if (!stage_desc->binary && !stage_desc->firmware
+ && (stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() done: Invalid args\n");
+
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ /* Find the last stage */
+ while (last && last->next)
+ last = last->next;
+
+ /* if in_frame is not set, we use the out_frame from the previous
+ * stage, if no previous stage, it's an error.
+ */
+ if ((stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC)
+ && (!stage_desc->in_frame)
+ && (!stage_desc->firmware)
+ && (!stage_desc->binary->online)) {
+ /* Do this only for ISP stages*/
+ if (last && last->args.out_frame[0])
+ stage_desc->in_frame = last->args.out_frame[0];
+
+ if (!stage_desc->in_frame)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ /* Create the new stage */
+ err = pipeline_stage_create(stage_desc, &new_stage);
+ if (err != IA_CSS_SUCCESS) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() done: stage_create_failed\n");
+ return err;
+ }
+
+ if (last)
+ last->next = new_stage;
+ else
+ pipeline->stages = new_stage;
+
+ /* Output the new stage */
+ if (stage)
+ *stage = new_stage;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() done:\n");
+ return IA_CSS_SUCCESS;
+}
+
+void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
+ bool continuous)
+{
+ unsigned int i = 0;
+ struct ia_css_pipeline_stage *stage;
+
+ assert(pipeline);
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ stage->stage_num = i;
+ i++;
+ }
+ pipeline->num_stages = i;
+
+ ia_css_pipeline_set_zoom_stage(pipeline);
+ ia_css_pipeline_configure_inout_port(pipeline, continuous);
+}
+
+enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *s;
+
+ assert(pipeline);
+ assert(stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_get_stage() enter:\n");
+ for (s = pipeline->stages; s; s = s->next) {
+ if (s->mode == mode) {
+ *stage = s;
+ return IA_CSS_SUCCESS;
+ }
+ }
+ return IA_CSS_ERR_INTERNAL_ERROR;
+}
+
+enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline
+ *pipeline,
+ u32 fw_handle,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *s;
+
+ assert(pipeline);
+ assert(stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ for (s = pipeline->stages; s; s = s->next) {
+ if ((s->firmware) && (s->firmware->handle == fw_handle)) {
+ *stage = s;
+ return IA_CSS_SUCCESS;
+ }
+ }
+ return IA_CSS_ERR_INTERNAL_ERROR;
+}
+
+enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline
+ *pipeline,
+ u32 stage_num,
+ uint32_t *fw_handle)
+{
+ struct ia_css_pipeline_stage *s;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ if ((!pipeline) || (!fw_handle))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ for (s = pipeline->stages; s; s = s->next) {
+ if ((s->stage_num == stage_num) && (s->firmware)) {
+ *fw_handle = s->firmware->handle;
+ return IA_CSS_SUCCESS;
+ }
+ }
+ return IA_CSS_ERR_INTERNAL_ERROR;
+}
+
+enum ia_css_err ia_css_pipeline_get_output_stage(
+ struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *s;
+
+ assert(pipeline);
+ assert(stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_get_output_stage() enter:\n");
+
+ *stage = NULL;
+ /* First find acceleration firmware at end of pipe */
+ for (s = pipeline->stages; s; s = s->next) {
+ if (s->firmware && s->mode == mode &&
+ s->firmware->info.isp.sp.enable.output)
+ *stage = s;
+ }
+ if (*stage)
+ return IA_CSS_SUCCESS;
+ /* If no firmware, find binary in pipe */
+ return ia_css_pipeline_get_stage(pipeline, mode, stage);
+}
+
+bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipeline)
+{
+ /* Android compilation files if made an local variable
+ stack size on android is limited to 2k and this structure
+ is around 2.5K, in place of static malloc can be done but
+ if this call is made too often it will lead to fragment memory
+ versus a fixed allocation */
+ static struct sh_css_sp_group sp_group;
+ unsigned int thread_id;
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_group;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_group = fw->info.sp.group;
+
+ ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id);
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_group),
+ &sp_group, sizeof(struct sh_css_sp_group));
+ return sp_group.pipe[thread_id].num_stages == 0;
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void)
+{
+ return(&sh_css_sp_group.pipe_io_status);
+}
+#endif
+
+bool ia_css_pipeline_is_mapped(unsigned int key)
+{
+ bool ret = false;
+
+ IA_CSS_ENTER_PRIVATE("key = %d", key);
+
+ if ((key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) {
+ IA_CSS_ERROR("Invalid key!!");
+ IA_CSS_LEAVE_PRIVATE("return = %d", false);
+ return false;
+ }
+
+ ret = (bool)(pipeline_num_to_sp_thread_map[key] != (unsigned int)
+ PIPELINE_NUM_UNMAPPED);
+
+ IA_CSS_LEAVE_PRIVATE("return = %d", ret);
+ return ret;
+}
+
+/*******************************************************
+*** Static functions
+********************************************************/
+
+/* Pipeline:
+ * To organize the several different binaries for each type of mode,
+ * we use a pipeline. A pipeline contains a number of stages, each with
+ * their own binary and frame pointers.
+ * When stages are added to a pipeline, output frames that are not passed
+ * from outside are automatically allocated.
+ * When input frames are not passed from outside, each stage will use the
+ * output frame of the previous stage as input (the full resolution output,
+ * not the viewfinder output).
+ * Pipelines must be cleaned and re-created when settings of the binaries
+ * change.
+ */
+static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage)
+{
+ unsigned int i;
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (stage->out_frame_allocated[i]) {
+ ia_css_frame_free(stage->args.out_frame[i]);
+ stage->args.out_frame[i] = NULL;
+ }
+ }
+ if (stage->vf_frame_allocated) {
+ ia_css_frame_free(stage->args.out_vf_frame);
+ stage->args.out_vf_frame = NULL;
+ }
+ sh_css_free(stage);
+}
+
+static void pipeline_init_sp_thread_map(void)
+{
+ unsigned int i;
+
+ for (i = 1; i < SH_CSS_MAX_SP_THREADS; i++)
+ pipeline_sp_thread_list[i] = PIPELINE_SP_THREAD_EMPTY_TOKEN;
+
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++)
+ pipeline_num_to_sp_thread_map[i] = PIPELINE_NUM_UNMAPPED;
+}
+
+static void pipeline_map_num_to_sp_thread(unsigned int pipe_num)
+{
+ unsigned int i;
+ bool found_sp_thread = false;
+
+ /* pipe is not mapped to any thread */
+ assert(pipeline_num_to_sp_thread_map[pipe_num]
+ == (unsigned int)PIPELINE_NUM_UNMAPPED);
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ if (pipeline_sp_thread_list[i] ==
+ PIPELINE_SP_THREAD_EMPTY_TOKEN) {
+ pipeline_sp_thread_list[i] =
+ PIPELINE_SP_THREAD_RESERVED_TOKEN;
+ pipeline_num_to_sp_thread_map[pipe_num] = i;
+ found_sp_thread = true;
+ break;
+ }
+ }
+
+ /* Make sure a mapping is found */
+ /* I could do:
+ assert(i < SH_CSS_MAX_SP_THREADS);
+
+ But the below is more descriptive.
+ */
+ assert(found_sp_thread);
+}
+
+static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num)
+{
+ unsigned int thread_id;
+
+ assert(pipeline_num_to_sp_thread_map[pipe_num]
+ != (unsigned int)PIPELINE_NUM_UNMAPPED);
+
+ thread_id = pipeline_num_to_sp_thread_map[pipe_num];
+ pipeline_num_to_sp_thread_map[pipe_num] = PIPELINE_NUM_UNMAPPED;
+ pipeline_sp_thread_list[thread_id] = PIPELINE_SP_THREAD_EMPTY_TOKEN;
+}
+
+static enum ia_css_err pipeline_stage_create(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **new_stage)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage *stage = NULL;
+ struct ia_css_binary *binary;
+ struct ia_css_frame *vf_frame;
+ struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ const struct ia_css_fw_info *firmware;
+ unsigned int i;
+
+ /* Verify input parameters*/
+ if (!(stage_desc->in_frame) && !(stage_desc->firmware)
+ && (stage_desc->binary) && !(stage_desc->binary->online)) {
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+
+ binary = stage_desc->binary;
+ firmware = stage_desc->firmware;
+ vf_frame = stage_desc->vf_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ out_frame[i] = stage_desc->out_frame[i];
+ }
+
+ stage = sh_css_malloc(sizeof(*stage));
+ if (!stage) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ memset(stage, 0, sizeof(*stage));
+
+ if (firmware) {
+ stage->binary = NULL;
+ stage->binary_info =
+ (struct ia_css_binary_info *)&firmware->info.isp;
+ } else {
+ stage->binary = binary;
+ if (binary)
+ stage->binary_info =
+ (struct ia_css_binary_info *)binary->info;
+ else
+ stage->binary_info = NULL;
+ }
+
+ stage->firmware = firmware;
+ stage->sp_func = stage_desc->sp_func;
+ stage->max_input_width = stage_desc->max_input_width;
+ stage->mode = stage_desc->mode;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ stage->out_frame_allocated[i] = false;
+ stage->vf_frame_allocated = false;
+ stage->next = NULL;
+ sh_css_binary_args_reset(&stage->args);
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (!(out_frame[i]) && (binary)
+ && (binary->out_frame_info[i].res.width)) {
+ err = ia_css_frame_allocate_from_info(&out_frame[i],
+ &binary->out_frame_info[i]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ stage->out_frame_allocated[i] = true;
+ }
+ }
+ /* VF frame is not needed in case of need_pp
+ However, the capture binary needs a vf frame to write to.
+ */
+ if (!vf_frame) {
+ if ((binary && binary->vf_frame_info.res.width) ||
+ (firmware && firmware->info.isp.sp.enable.vf_veceven)
+ ) {
+ err = ia_css_frame_allocate_from_info(&vf_frame,
+ &binary->vf_frame_info);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ stage->vf_frame_allocated = true;
+ }
+ } else if (vf_frame && binary && binary->vf_frame_info.res.width
+ && !firmware) {
+ /* only mark as allocated if buffer pointer available */
+ if (vf_frame->data != mmgr_NULL)
+ stage->vf_frame_allocated = true;
+ }
+
+ stage->args.in_frame = stage_desc->in_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ stage->args.out_frame[i] = out_frame[i];
+ stage->args.out_vf_frame = vf_frame;
+ *new_stage = stage;
+ return err;
+ERR:
+ if (stage)
+ pipeline_stage_destroy(stage);
+ return err;
+}
+
+static void pipeline_init_defaults(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay)
+{
+ unsigned int i;
+
+ pipeline->pipe_id = pipe_id;
+ pipeline->stages = NULL;
+ pipeline->stop_requested = false;
+ pipeline->current_stage = NULL;
+ pipeline->in_frame = DEFAULT_FRAME;
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ pipeline->out_frame[i] = DEFAULT_FRAME;
+ pipeline->vf_frame[i] = DEFAULT_FRAME;
+ }
+ pipeline->num_execs = -1;
+ pipeline->acquire_isp_each_stage = true;
+ pipeline->pipe_num = (uint8_t)pipe_num;
+ pipeline->dvs_frame_delay = dvs_frame_delay;
+}
+
+static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline)
+{
+ struct ia_css_pipeline_stage *stage = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(pipeline);
+ if (pipeline->pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
+ /* in preview pipeline, vf_pp stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VF_PP, &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = true;
+ } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_CAPTURE) {
+ /* in capture pipeline, capture_pp stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP,
+ &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = true;
+ } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_VIDEO) {
+ /* in video pipeline, video stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VIDEO, &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = true;
+ } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_YUVPP) {
+ /* in yuvpp pipeline, first yuv_scaler stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP,
+ &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = true;
+ }
+}
+
+static void
+ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me,
+ bool continuous)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipeline_configure_inout_port() enter: pipe_id(%d) continuous(%d)\n",
+ me->pipe_id, continuous);
+ switch (me->pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ case IA_CSS_PIPE_ID_VIDEO:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)(continuous ? SH_CSS_COPYSINK_TYPE : SH_CSS_HOST_TYPE), 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ case IA_CSS_PIPE_ID_COPY: /*Copy pipe ports configured to "offline" mode*/
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ if (continuous) {
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_COPYSINK_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_TAGGERSINK_TYPE, 1);
+ } else {
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ }
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)(continuous ? SH_CSS_TAGGERSINK_TYPE : SH_CSS_HOST_TYPE),
+ 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)(SH_CSS_HOST_TYPE), 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ default:
+ break;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipeline_configure_inout_port() leave: inout_port_config(%x)\n",
+ me->inout_port_config);
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h b/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h
new file mode 100644
index 000000000000..6daeb060daf9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h
@@ -0,0 +1,175 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_QUEUE_H
+#define __IA_CSS_QUEUE_H
+
+#include <platform_support.h>
+#include <type_support.h>
+
+#include "ia_css_queue_comm.h"
+#include "../src/queue_access.h"
+
+/* Local Queue object descriptor */
+struct ia_css_queue_local {
+ ia_css_circbuf_desc_t *cb_desc; /*Circbuf desc for local queues*/
+ ia_css_circbuf_elem_t *cb_elems; /*Circbuf elements*/
+};
+
+typedef struct ia_css_queue_local ia_css_queue_local_t;
+
+/* Handle for queue object*/
+typedef struct ia_css_queue ia_css_queue_t;
+
+/*****************************************************************************
+ * Queue Public APIs
+ *****************************************************************************/
+/* @brief Initialize a local queue instance.
+ *
+ * @param[out] qhandle. Handle to queue instance for use with API
+ * @param[in] desc. Descriptor with queue properties filled-in
+ * @return 0 - Successful init of local queue instance.
+ * @return EINVAL - Invalid argument.
+ *
+ */
+int ia_css_queue_local_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_local_t *desc);
+
+/* @brief Initialize a remote queue instance
+ *
+ * @param[out] qhandle. Handle to queue instance for use with API
+ * @param[in] desc. Descriptor with queue properties filled-in
+ * @return 0 - Successful init of remote queue instance.
+ * @return EINVAL - Invalid argument.
+ */
+int ia_css_queue_remote_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_remote_t *desc);
+
+/* @brief Uninitialize a queue instance
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @return 0 - Successful uninit.
+ *
+ */
+int ia_css_queue_uninit(
+ ia_css_queue_t *qhandle);
+
+/* @brief Enqueue an item in the queue instance
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] item. Object to be enqueued.
+ * @return 0 - Successful enqueue.
+ * @return EINVAL - Invalid argument.
+ * @return ENOBUFS - Queue is full.
+ *
+ */
+int ia_css_queue_enqueue(
+ ia_css_queue_t *qhandle,
+ uint32_t item);
+
+/* @brief Dequeue an item from the queue instance
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[out] item. Object to be dequeued into this item.
+
+ * @return 0 - Successful dequeue.
+ * @return EINVAL - Invalid argument.
+ * @return ENODATA - Queue is empty.
+ *
+ */
+int ia_css_queue_dequeue(
+ ia_css_queue_t *qhandle,
+ uint32_t *item);
+
+/* @brief Check if the queue is empty
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] is_empty True if empty, False if not.
+ * @return 0 - Successful access state.
+ * @return EINVAL - Invalid argument.
+ * @return ENOSYS - Function not implemented.
+ *
+ */
+int ia_css_queue_is_empty(
+ ia_css_queue_t *qhandle,
+ bool *is_empty);
+
+/* @brief Check if the queue is full
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] is_full True if Full, False if not.
+ * @return 0 - Successfully access state.
+ * @return EINVAL - Invalid argument.
+ * @return ENOSYS - Function not implemented.
+ *
+ */
+int ia_css_queue_is_full(
+ ia_css_queue_t *qhandle,
+ bool *is_full);
+
+/* @brief Get used space in the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] size Number of available elements in the queue
+ * @return 0 - Successfully access state.
+ * @return EINVAL - Invalid argument.
+ *
+ */
+int ia_css_queue_get_used_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size);
+
+/* @brief Get free space in the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] size Number of free elements in the queue
+ * @return 0 - Successfully access state.
+ * @return EINVAL - Invalid argument.
+ *
+ */
+int ia_css_queue_get_free_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size);
+
+/* @brief Peek at an element in the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] offset Offset of element to peek,
+ * starting from head of queue
+ * @param[in] element Value of element returned
+ * @return 0 - Successfully access state.
+ * @return EINVAL - Invalid argument.
+ *
+ */
+int ia_css_queue_peek(
+ ia_css_queue_t *qhandle,
+ u32 offset,
+ uint32_t *element);
+
+/* @brief Get the usable size for the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[out] size Size value to be returned here.
+ * @return 0 - Successful get size.
+ * @return EINVAL - Invalid argument.
+ * @return ENOSYS - Function not implemented.
+ *
+ */
+int ia_css_queue_get_size(
+ ia_css_queue_t *qhandle,
+ uint32_t *size);
+
+#endif /* __IA_CSS_QUEUE_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h b/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h
new file mode 100644
index 000000000000..87fa4288d9a6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_QUEUE_COMM_H
+#define __IA_CSS_QUEUE_COMM_H
+
+#include "type_support.h"
+#include "ia_css_circbuf.h"
+/*****************************************************************************
+ * Queue Public Data Structures
+ *****************************************************************************/
+
+/* Queue location specifier */
+/* Avoiding enums to save space */
+#define IA_CSS_QUEUE_LOC_HOST 0
+#define IA_CSS_QUEUE_LOC_SP 1
+#define IA_CSS_QUEUE_LOC_ISP 2
+
+/* Queue type specifier */
+/* Avoiding enums to save space */
+#define IA_CSS_QUEUE_TYPE_LOCAL 0
+#define IA_CSS_QUEUE_TYPE_REMOTE 1
+
+/* for DDR Allocated queues,
+allocate minimum these many elements.
+DDR->SP' DMEM DMA transfer needs 32byte aligned address.
+Since each element size is 4 bytes, 8 elements need to be
+DMAed to access single element.*/
+#define IA_CSS_MIN_ELEM_COUNT 8
+#define IA_CSS_DMA_XFER_MASK (IA_CSS_MIN_ELEM_COUNT - 1)
+
+/* Remote Queue object descriptor */
+struct ia_css_queue_remote {
+ u32 cb_desc_addr; /*Circbuf desc address for remote queues*/
+ u32 cb_elems_addr; /*Circbuf elements addr for remote queue*/
+ u8 location; /* Cell location for queue */
+ u8 proc_id; /* Processor id for queue access */
+};
+
+typedef struct ia_css_queue_remote ia_css_queue_remote_t;
+
+#endif /* __IA_CSS_QUEUE_COMM_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c
new file mode 100644
index 000000000000..dd79c6f180af
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c
@@ -0,0 +1,422 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_queue.h"
+#include <math_support.h>
+#include <ia_css_circbuf.h>
+#include <ia_css_circbuf_desc.h>
+#include "queue_access.h"
+
+/*****************************************************************************
+ * Queue Public APIs
+ *****************************************************************************/
+int ia_css_queue_local_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_local_t *desc)
+{
+ if (NULL == qhandle || NULL == desc
+ || NULL == desc->cb_elems || NULL == desc->cb_desc) {
+ /* Invalid parameters, return error*/
+ return EINVAL;
+ }
+
+ /* Mark the queue as Local */
+ qhandle->type = IA_CSS_QUEUE_TYPE_LOCAL;
+
+ /* Create a local circular buffer queue*/
+ ia_css_circbuf_create(&qhandle->desc.cb_local,
+ desc->cb_elems,
+ desc->cb_desc);
+
+ return 0;
+}
+
+int ia_css_queue_remote_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_remote_t *desc)
+{
+ if (NULL == qhandle || NULL == desc) {
+ /* Invalid parameters, return error*/
+ return EINVAL;
+ }
+
+ /* Mark the queue as remote*/
+ qhandle->type = IA_CSS_QUEUE_TYPE_REMOTE;
+
+ /* Copy over the local queue descriptor*/
+ qhandle->location = desc->location;
+ qhandle->proc_id = desc->proc_id;
+ qhandle->desc.remote.cb_desc_addr = desc->cb_desc_addr;
+ qhandle->desc.remote.cb_elems_addr = desc->cb_elems_addr;
+
+ /* If queue is remote, we let the local processor
+ * do its init, before using it. This is just to get us
+ * started, we can remove this restriction as we go ahead
+ */
+
+ return 0;
+}
+
+int ia_css_queue_uninit(
+ ia_css_queue_t *qhandle)
+{
+ if (!qhandle)
+ return EINVAL;
+
+ /* Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Local queues are created. Destroy it*/
+ ia_css_circbuf_destroy(&qhandle->desc.cb_local);
+ }
+
+ return 0;
+}
+
+int ia_css_queue_enqueue(
+ ia_css_queue_t *qhandle,
+ uint32_t item)
+{
+ int error = 0;
+
+ if (!qhandle)
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ if (ia_css_circbuf_is_full(&qhandle->desc.cb_local)) {
+ /* Cannot push the element. Return*/
+ return ENOBUFS;
+ }
+
+ /* Push the element*/
+ ia_css_circbuf_push(&qhandle->desc.cb_local, item);
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ ia_css_circbuf_desc_t cb_desc;
+ ia_css_circbuf_elem_t cb_elem;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ /* a. Load the queue cb_desc from remote */
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ if (ia_css_circbuf_desc_is_full(&cb_desc))
+ return ENOBUFS;
+
+ cb_elem.val = item;
+
+ error = ia_css_queue_item_store(qhandle, cb_desc.end, &cb_elem);
+ if (error != 0)
+ return error;
+
+ cb_desc.end = (cb_desc.end + 1) % cb_desc.size;
+
+ /* c. Store the queue object */
+ /* Set only fields requiring update with
+ * valid value. Avoids uncessary calls
+ * to load/store functions
+ */
+ ignore_desc_flags = QUEUE_IGNORE_SIZE_START_STEP_FLAGS;
+
+ error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_dequeue(
+ ia_css_queue_t *qhandle,
+ uint32_t *item)
+{
+ int error = 0;
+
+ if (!qhandle || NULL == item)
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ if (ia_css_circbuf_is_empty(&qhandle->desc.cb_local)) {
+ /* Nothing to pop. Return empty queue*/
+ return ENODATA;
+ }
+
+ *item = ia_css_circbuf_pop(&qhandle->desc.cb_local);
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ ia_css_circbuf_elem_t cb_elem;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ if (ia_css_circbuf_desc_is_empty(&cb_desc))
+ return ENODATA;
+
+ error = ia_css_queue_item_load(qhandle, cb_desc.start, &cb_elem);
+ if (error != 0)
+ return error;
+
+ *item = cb_elem.val;
+
+ cb_desc.start = OP_std_modadd(cb_desc.start, 1, cb_desc.size);
+
+ /* c. Store the queue object */
+ /* Set only fields requiring update with
+ * valid value. Avoids uncessary calls
+ * to load/store functions
+ */
+ ignore_desc_flags = QUEUE_IGNORE_SIZE_END_STEP_FLAGS;
+ error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+ }
+ return 0;
+}
+
+int ia_css_queue_is_full(
+ ia_css_queue_t *qhandle,
+ bool *is_full)
+{
+ int error = 0;
+
+ if ((!qhandle) || (!is_full))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *is_full = ia_css_circbuf_is_full(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *is_full = ia_css_circbuf_desc_is_full(&cb_desc);
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_get_free_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size)
+{
+ int error = 0;
+
+ if ((!qhandle) || (!size))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *size = ia_css_circbuf_get_free_elems(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *size = ia_css_circbuf_desc_get_free_elems(&cb_desc);
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_get_used_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size)
+{
+ int error = 0;
+
+ if ((!qhandle) || (!size))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *size = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *size = ia_css_circbuf_desc_get_num_elems(&cb_desc);
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_peek(
+ ia_css_queue_t *qhandle,
+ u32 offset,
+ uint32_t *element)
+{
+ u32 num_elems = 0;
+ int error = 0;
+
+ if ((!qhandle) || (!element))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ /* Check if offset is valid */
+ num_elems = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local);
+ if (offset > num_elems)
+ return EINVAL;
+
+ *element = ia_css_circbuf_peek_from_start(&qhandle->desc.cb_local, (int)offset);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ ia_css_circbuf_elem_t cb_elem;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* Check if offset is valid */
+ num_elems = ia_css_circbuf_desc_get_num_elems(&cb_desc);
+ if (offset > num_elems)
+ return EINVAL;
+
+ offset = OP_std_modadd(cb_desc.start, offset, cb_desc.size);
+ error = ia_css_queue_item_load(qhandle, (uint8_t)offset, &cb_elem);
+ if (error != 0)
+ return error;
+
+ *element = cb_elem.val;
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_is_empty(
+ ia_css_queue_t *qhandle,
+ bool *is_empty)
+{
+ int error = 0;
+
+ if ((!qhandle) || (!is_empty))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *is_empty = ia_css_circbuf_is_empty(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *is_empty = ia_css_circbuf_desc_is_empty(&cb_desc);
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_get_size(
+ ia_css_queue_t *qhandle,
+ uint32_t *size)
+{
+ int error = 0;
+
+ if ((!qhandle) || (!size))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ /* Return maximum usable capacity */
+ *size = ia_css_circbuf_get_size(&qhandle->desc.cb_local);
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_START_END_STEP_FLAGS;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* Return maximum usable capacity */
+ *size = cb_desc.size;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c
new file mode 100644
index 000000000000..1e8d3eb82eab
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c
@@ -0,0 +1,176 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "type_support.h"
+#include "queue_access.h"
+#include "ia_css_circbuf.h"
+#include "sp.h"
+#include "memory_access.h"
+#include "assert_support.h"
+
+int ia_css_queue_load(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags)
+{
+ if (!rdesc || !cb_desc)
+ return EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG)) {
+ cb_desc->size = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, size));
+
+ if (cb_desc->size == 0) {
+ /* Adding back the workaround which was removed
+ while refactoring queues. When reading size
+ through sp_dmem_load_*, sometimes we get back
+ the value as zero. This causes division by 0
+ exception as the size is used in a modular
+ division operation. */
+ return EDOM;
+ }
+ }
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG))
+ cb_desc->start = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, start));
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG))
+ cb_desc->end = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, end));
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG))
+ cb_desc->step = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, step));
+
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ /* doing DMA transfer of entire structure */
+ mmgr_load(rdesc->desc.remote.cb_desc_addr,
+ (void *)cb_desc,
+ sizeof(ia_css_circbuf_desc_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return ENOTSUP;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_store(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags)
+{
+ if (!rdesc || !cb_desc)
+ return EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, size),
+ cb_desc->size);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, start),
+ cb_desc->start);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, end),
+ cb_desc->end);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, step),
+ cb_desc->step);
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ /* doing DMA transfer of entire structure */
+ mmgr_store(rdesc->desc.remote.cb_desc_addr,
+ (void *)cb_desc,
+ sizeof(ia_css_circbuf_desc_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return ENOTSUP;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_item_load(
+ struct ia_css_queue *rdesc,
+ u8 position,
+ ia_css_circbuf_elem_t *item)
+{
+ if (!rdesc || !item)
+ return EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ sp_dmem_load(rdesc->proc_id,
+ rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ mmgr_load(rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ (void *)item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return ENOTSUP;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_item_store(
+ struct ia_css_queue *rdesc,
+ u8 position,
+ ia_css_circbuf_elem_t *item)
+{
+ if (!rdesc || !item)
+ return EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ sp_dmem_store(rdesc->proc_id,
+ rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ mmgr_store(rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ (void *)item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return ENOTSUP;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h
new file mode 100644
index 000000000000..884c55a754d1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h
@@ -0,0 +1,85 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __QUEUE_ACCESS_H
+#define __QUEUE_ACCESS_H
+
+#include <type_support.h>
+#include <ia_css_queue_comm.h>
+#include <ia_css_circbuf.h>
+#include <error_support.h>
+
+#define QUEUE_IGNORE_START_FLAG 0x0001
+#define QUEUE_IGNORE_END_FLAG 0x0002
+#define QUEUE_IGNORE_SIZE_FLAG 0x0004
+#define QUEUE_IGNORE_STEP_FLAG 0x0008
+#define QUEUE_IGNORE_DESC_FLAGS_MAX 0x000f
+
+#define QUEUE_IGNORE_SIZE_START_STEP_FLAGS \
+ (QUEUE_IGNORE_SIZE_FLAG | \
+ QUEUE_IGNORE_START_FLAG | \
+ QUEUE_IGNORE_STEP_FLAG)
+
+#define QUEUE_IGNORE_SIZE_END_STEP_FLAGS \
+ (QUEUE_IGNORE_SIZE_FLAG | \
+ QUEUE_IGNORE_END_FLAG | \
+ QUEUE_IGNORE_STEP_FLAG)
+
+#define QUEUE_IGNORE_START_END_STEP_FLAGS \
+ (QUEUE_IGNORE_START_FLAG | \
+ QUEUE_IGNORE_END_FLAG | \
+ QUEUE_IGNORE_STEP_FLAG)
+
+#define QUEUE_CB_DESC_INIT(cb_desc) \
+ do { \
+ (cb_desc)->size = 0; \
+ (cb_desc)->step = 0; \
+ (cb_desc)->start = 0; \
+ (cb_desc)->end = 0; \
+ } while (0)
+
+struct ia_css_queue {
+ u8 type; /* Specify remote/local type of access */
+ u8 location; /* Cell location for queue */
+ u8 proc_id; /* Processor id for queue access */
+ union {
+ ia_css_circbuf_t cb_local;
+ struct {
+ u32 cb_desc_addr; /*Circbuf desc address for remote queues*/
+ u32 cb_elems_addr; /*Circbuf elements addr for remote queue*/
+ } remote;
+ } desc;
+};
+
+int ia_css_queue_load(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags);
+
+int ia_css_queue_store(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags);
+
+int ia_css_queue_item_load(
+ struct ia_css_queue *rdesc,
+ u8 position,
+ ia_css_circbuf_elem_t *item);
+
+int ia_css_queue_item_store(
+ struct ia_css_queue *rdesc,
+ u8 position,
+ ia_css_circbuf_elem_t *item);
+
+#endif /* __QUEUE_ACCESS_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h
new file mode 100644
index 000000000000..47a80ae8dbf3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h
@@ -0,0 +1,72 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_RMGR_H
+#define _IA_CSS_RMGR_H
+
+#include <ia_css_err.h>
+
+#ifndef __INLINE_RMGR__
+#define STORAGE_CLASS_RMGR_H extern
+#define STORAGE_CLASS_RMGR_C
+#else /* __INLINE_RMGR__ */
+#define STORAGE_CLASS_RMGR_H static inline
+#define STORAGE_CLASS_RMGR_C static inline
+#endif /* __INLINE_RMGR__ */
+
+/**
+ * @brief Initialize resource manager (host/common)
+ */
+enum ia_css_err ia_css_rmgr_init(void);
+
+/**
+ * @brief Uninitialize resource manager (host/common)
+ */
+void ia_css_rmgr_uninit(void);
+
+/*****************************************************************
+ * Interface definition - resource type (host/common)
+ *****************************************************************
+ *
+ * struct ia_css_rmgr_<type>_pool;
+ * struct ia_css_rmgr_<type>_handle;
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_init_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool);
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_uninit_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool);
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_acq_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool,
+ * struct ia_css_rmgr_<type>_handle **handle);
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_rel_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool,
+ * struct ia_css_rmgr_<type>_handle **handle);
+ *
+ *****************************************************************
+ * Interface definition - refcounting (host/common)
+ *****************************************************************
+ *
+ * void ia_css_rmgr_refcount_retain_<type>(
+ * struct ia_css_rmgr_<type>_handle **handle);
+ *
+ * void ia_css_rmgr_refcount_release_<type>(
+ * struct ia_css_rmgr_<type>_handle **handle);
+ */
+
+#include "ia_css_rmgr_vbuf.h"
+
+#endif /* _IA_CSS_RMGR_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h b/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h
new file mode 100644
index 000000000000..0660b65f2e34
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h
@@ -0,0 +1,99 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_RMGR_VBUF_H
+#define _IA_CSS_RMGR_VBUF_H
+
+#include "ia_css_rmgr.h"
+#include <type_support.h>
+#include <system_types.h>
+
+/**
+ * @brief Data structure for the resource handle (host, vbuf)
+ */
+struct ia_css_rmgr_vbuf_handle {
+ hrt_vaddress vptr;
+ u8 count;
+ u32 size;
+};
+
+/**
+ * @brief Data structure for the resource pool (host, vbuf)
+ */
+struct ia_css_rmgr_vbuf_pool {
+ u8 copy_on_write;
+ u8 recycle;
+ u32 size;
+ u32 index;
+ struct ia_css_rmgr_vbuf_handle **handles;
+};
+
+/**
+ * @brief VBUF resource pools
+ */
+extern struct ia_css_rmgr_vbuf_pool *vbuf_ref;
+extern struct ia_css_rmgr_vbuf_pool *vbuf_write;
+extern struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool;
+
+/**
+ * @brief Initialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+STORAGE_CLASS_RMGR_H enum ia_css_err ia_css_rmgr_init_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool);
+
+/**
+ * @brief Uninitialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+STORAGE_CLASS_RMGR_H void ia_css_rmgr_uninit_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool);
+
+/**
+ * @brief Acquire a handle from the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+STORAGE_CLASS_RMGR_H void ia_css_rmgr_acq_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle);
+
+/**
+ * @brief Release a handle to the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+STORAGE_CLASS_RMGR_H void ia_css_rmgr_rel_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle);
+
+/**
+ * @brief Retain the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle);
+
+/**
+ * @brief Release the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle);
+
+#endif /* _IA_CSS_RMGR_VBUF_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c
new file mode 100644
index 000000000000..23ae19ee65ca
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_rmgr.h"
+
+enum ia_css_err ia_css_rmgr_init(void)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ err = ia_css_rmgr_init_vbuf(vbuf_ref);
+ if (err == IA_CSS_SUCCESS)
+ err = ia_css_rmgr_init_vbuf(vbuf_write);
+ if (err == IA_CSS_SUCCESS)
+ err = ia_css_rmgr_init_vbuf(hmm_buffer_pool);
+ if (err != IA_CSS_SUCCESS)
+ ia_css_rmgr_uninit();
+ return err;
+}
+
+/*
+ * @brief Uninitialize resource pool (host)
+ */
+void ia_css_rmgr_uninit(void)
+{
+ ia_css_rmgr_uninit_vbuf(hmm_buffer_pool);
+ ia_css_rmgr_uninit_vbuf(vbuf_write);
+ ia_css_rmgr_uninit_vbuf(vbuf_ref);
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
new file mode 100644
index 000000000000..2c204dceb491
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
@@ -0,0 +1,336 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_rmgr.h"
+
+#include <type_support.h>
+#include <assert_support.h>
+#include <platform_support.h> /* memset */
+#include <memory_access.h> /* mmmgr_malloc, mhmm_free */
+#include <ia_css_debug.h>
+
+/*
+ * @brief VBUF resource handles
+ */
+#define NUM_HANDLES 1000
+static struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES];
+
+/*
+ * @brief VBUF resource pool - refpool
+ */
+static struct ia_css_rmgr_vbuf_pool refpool = {
+ false, /* copy_on_write */
+ false, /* recycle */
+ 0, /* size */
+ 0, /* index */
+ NULL, /* handles */
+};
+
+/*
+ * @brief VBUF resource pool - writepool
+ */
+static struct ia_css_rmgr_vbuf_pool writepool = {
+ true, /* copy_on_write */
+ false, /* recycle */
+ 0, /* size */
+ 0, /* index */
+ NULL, /* handles */
+};
+
+/*
+ * @brief VBUF resource pool - hmmbufferpool
+ */
+static struct ia_css_rmgr_vbuf_pool hmmbufferpool = {
+ true, /* copy_on_write */
+ true, /* recycle */
+ 32, /* size */
+ 0, /* index */
+ NULL, /* handles */
+};
+
+struct ia_css_rmgr_vbuf_pool *vbuf_ref = &refpool;
+struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool;
+struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool;
+
+/*
+ * @brief Initialize the reference count (host, vbuf)
+ */
+static void rmgr_refcount_init_vbuf(void)
+{
+ /* initialize the refcount table */
+ memset(&handle_table, 0, sizeof(handle_table));
+}
+
+/*
+ * @brief Retain the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
+{
+ int i;
+ struct ia_css_rmgr_vbuf_handle *h;
+
+ if ((!handle) || (!*handle)) {
+ IA_CSS_LOG("Invalid inputs");
+ return;
+ }
+ /* new vbuf to count on */
+ if ((*handle)->count == 0) {
+ h = *handle;
+ *handle = NULL;
+ for (i = 0; i < NUM_HANDLES; i++) {
+ if (handle_table[i].count == 0) {
+ *handle = &handle_table[i];
+ break;
+ }
+ }
+ /* if the loop dus not break and *handle == NULL
+ this is an error handle and report it.
+ */
+ if (!*handle) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_i_host_refcount_retain_vbuf() failed to find empty slot!\n");
+ return;
+ }
+ (*handle)->vptr = h->vptr;
+ (*handle)->size = h->size;
+ }
+ (*handle)->count++;
+}
+
+/*
+ * @brief Release the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
+{
+ if ((!handle) || ((*handle) == NULL) || (((*handle)->count) == 0)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_rmgr_refcount_release_vbuf() invalid arguments!\n");
+ return;
+ }
+ /* decrease reference count */
+ (*handle)->count--;
+ /* remove from admin */
+ if ((*handle)->count == 0) {
+ (*handle)->vptr = 0x0;
+ (*handle)->size = 0;
+ *handle = NULL;
+ }
+}
+
+/*
+ * @brief Initialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+enum ia_css_err ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ size_t bytes_needed;
+
+ rmgr_refcount_init_vbuf();
+ assert(pool);
+ if (!pool)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ /* initialize the recycle pool if used */
+ if (pool->recycle && pool->size) {
+ /* allocate memory for storing the handles */
+ bytes_needed =
+ sizeof(void *) *
+ pool->size;
+ pool->handles = sh_css_malloc(bytes_needed);
+ if (pool->handles)
+ memset(pool->handles, 0, bytes_needed);
+ else
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ } else {
+ /* just in case, set the size to 0 */
+ pool->size = 0;
+ pool->handles = NULL;
+ }
+ return err;
+}
+
+/*
+ * @brief Uninitialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
+{
+ u32 i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_rmgr_uninit_vbuf()\n");
+ if (!pool) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_rmgr_uninit_vbuf(): NULL argument\n");
+ return;
+ }
+ if (pool->handles) {
+ /* free the hmm buffers */
+ for (i = 0; i < pool->size; i++) {
+ if (pool->handles[i]) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ " freeing/releasing %x (count=%d)\n",
+ pool->handles[i]->vptr,
+ pool->handles[i]->count);
+ /* free memory */
+ hmm_free(pool->handles[i]->vptr);
+ /* remove from refcount admin */
+ ia_css_rmgr_refcount_release_vbuf(
+ &pool->handles[i]);
+ }
+ }
+ /* now free the pool handles list */
+ sh_css_free(pool->handles);
+ pool->handles = NULL;
+ }
+}
+
+/*
+ * @brief Push a handle to the pool
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+static
+void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ u32 i;
+ bool succes = false;
+
+ assert(pool);
+ assert(pool->recycle);
+ assert(pool->handles);
+ assert(handle);
+ for (i = 0; i < pool->size; i++) {
+ if (!pool->handles[i]) {
+ ia_css_rmgr_refcount_retain_vbuf(handle);
+ pool->handles[i] = *handle;
+ succes = true;
+ break;
+ }
+ }
+ assert(succes);
+}
+
+/*
+ * @brief Pop a handle from the pool
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+static
+void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ u32 i;
+ bool succes = false;
+
+ assert(pool);
+ assert(pool->recycle);
+ assert(pool->handles);
+ assert(handle);
+ assert(*handle);
+ for (i = 0; i < pool->size; i++) {
+ if ((pool->handles[i]) &&
+ (pool->handles[i]->size == (*handle)->size)) {
+ *handle = pool->handles[i];
+ pool->handles[i] = NULL;
+ /* dont release, we are returning it...
+ ia_css_rmgr_refcount_release_vbuf(handle); */
+ succes = true;
+ break;
+ }
+ }
+}
+
+/*
+ * @brief Acquire a handle from the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ struct ia_css_rmgr_vbuf_handle h;
+
+ if ((!pool) || (!handle) || (!*handle)) {
+ IA_CSS_LOG("Invalid inputs");
+ return;
+ }
+
+ if (pool->copy_on_write) {
+ /* only one reference, reuse (no new retain) */
+ if ((*handle)->count == 1)
+ return;
+ /* more than one reference, release current buffer */
+ if ((*handle)->count > 1) {
+ /* store current values */
+ h.vptr = 0x0;
+ h.size = (*handle)->size;
+ /* release ref to current buffer */
+ ia_css_rmgr_refcount_release_vbuf(handle);
+ *handle = &h;
+ }
+ /* get new buffer for needed size */
+ if ((*handle)->vptr == 0x0) {
+ if (pool->recycle) {
+ /* try and pop from pool */
+ rmgr_pop_handle(pool, handle);
+ }
+ if ((*handle)->vptr == 0x0) {
+ /* we need to allocate */
+ (*handle)->vptr = mmgr_malloc((*handle)->size);
+ } else {
+ /* we popped a buffer */
+ return;
+ }
+ }
+ }
+ /* Note that handle will change to an internally maintained one */
+ ia_css_rmgr_refcount_retain_vbuf(handle);
+}
+
+/*
+ * @brief Release a handle to the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_rel_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ if ((!pool) || (!handle) || (!*handle)) {
+ IA_CSS_LOG("Invalid inputs");
+ return;
+ }
+ /* release the handle */
+ if ((*handle)->count == 1) {
+ if (!pool->recycle) {
+ /* non recycling pool, free mem */
+ hmm_free((*handle)->vptr);
+ } else {
+ /* recycle to pool */
+ rmgr_push_handle(pool, handle);
+ }
+ }
+ ia_css_rmgr_refcount_release_vbuf(handle);
+ *handle = NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h b/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h
new file mode 100644
index 000000000000..543ca8968418
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h
@@ -0,0 +1,68 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SPCTRL_H__
+#define __IA_CSS_SPCTRL_H__
+
+#include <system_global.h>
+#include <ia_css_err.h>
+#include "ia_css_spctrl_comm.h"
+
+typedef struct {
+ u32 ddr_data_offset; /** posistion of data in DDR */
+ u32 dmem_data_addr; /** data segment address in dmem */
+ u32 dmem_bss_addr; /** bss segment address in dmem */
+ u32 data_size; /** data segment size */
+ u32 bss_size; /** bss segment size */
+ u32 spctrl_config_dmem_addr; /* <location of dmem_cfg in SP dmem */
+ u32 spctrl_state_dmem_addr; /* < location of state in SP dmem */
+ unsigned int sp_entry; /* < entry function ptr on SP */
+ const void *code; /** location of firmware */
+ u32 code_size;
+ char *program_name; /** not used on hardware, only for simulation */
+} ia_css_spctrl_cfg;
+
+/* Get the code addr in DDR of SP */
+hrt_vaddress get_sp_code_addr(sp_ID_t sp_id);
+
+/* ! Load firmware on to specfied SP
+*/
+enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id,
+ ia_css_spctrl_cfg *spctrl_cfg);
+
+/* ISP2401 */
+/*! Setup registers for reloading FW */
+void sh_css_spctrl_reload_fw(sp_ID_t sp_id);
+
+/*! Unload/release any memory allocated to hold the firmware
+*/
+enum ia_css_err ia_css_spctrl_unload_fw(sp_ID_t sp_id);
+
+/*! Intilaize dmem_cfg in SP dmem and start SP program
+*/
+enum ia_css_err ia_css_spctrl_start(sp_ID_t sp_id);
+
+/*! stop spctrl
+*/
+enum ia_css_err ia_css_spctrl_stop(sp_ID_t sp_id);
+
+/*! Query the state of SP
+*/
+ia_css_spctrl_sp_sw_state ia_css_spctrl_get_state(sp_ID_t sp_id);
+
+/*! Check if SP is idle/ready
+*/
+int ia_css_spctrl_is_idle(sp_ID_t sp_id);
+
+#endif /* __IA_CSS_SPCTRL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h b/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h
new file mode 100644
index 000000000000..ca37c4ab7544
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SPCTRL_COMM_H__
+#define __IA_CSS_SPCTRL_COMM_H__
+
+#include <type_support.h>
+
+/* state of SP */
+typedef enum {
+ IA_CSS_SP_SW_TERMINATED = 0,
+ IA_CSS_SP_SW_INITIALIZED,
+ IA_CSS_SP_SW_CONNECTED,
+ IA_CSS_SP_SW_RUNNING
+} ia_css_spctrl_sp_sw_state;
+
+/* Structure to encapsulate required arguments for
+ * initialization of SP DMEM using the SP itself
+ */
+struct ia_css_sp_init_dmem_cfg {
+ ia_css_ptr ddr_data_addr; /** data segment address in ddr */
+ u32 dmem_data_addr; /** data segment address in dmem */
+ u32 dmem_bss_addr; /** bss segment address in dmem */
+ u32 data_size; /** data segment size */
+ u32 bss_size; /** bss segment size */
+ sp_ID_t sp_id; /* <sp Id */
+};
+
+#define SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT \
+ (1 * SIZE_OF_IA_CSS_PTR) + \
+ (4 * sizeof(uint32_t)) + \
+ (1 * sizeof(sp_ID_t))
+
+#endif /* __IA_CSS_SPCTRL_COMM_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c
new file mode 100644
index 000000000000..db39fa273251
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c
@@ -0,0 +1,184 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#define __INLINE_SP__
+#include "sp.h"
+
+#include "memory_access.h"
+#include "assert_support.h"
+#include "ia_css_spctrl.h"
+#include "ia_css_debug.h"
+
+struct spctrl_context_info {
+ struct ia_css_sp_init_dmem_cfg dmem_config;
+ u32 spctrl_config_dmem_addr; /* location of dmem_cfg in SP dmem */
+ u32 spctrl_state_dmem_addr;
+ unsigned int sp_entry; /* entry function ptr on SP */
+ hrt_vaddress code_addr; /* sp firmware location in host mem-DDR*/
+ u32 code_size;
+ char *program_name; /* used in case of PLATFORM_SIM */
+};
+
+static struct spctrl_context_info spctrl_cofig_info[N_SP_ID];
+static bool spctrl_loaded[N_SP_ID] = {0};
+
+/* Load firmware */
+enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id,
+ ia_css_spctrl_cfg *spctrl_cfg)
+{
+ hrt_vaddress code_addr = mmgr_NULL;
+ struct ia_css_sp_init_dmem_cfg *init_dmem_cfg;
+
+ if ((sp_id >= N_SP_ID) || (!spctrl_cfg))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;
+
+ init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config;
+ init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr;
+ init_dmem_cfg->dmem_bss_addr = spctrl_cfg->dmem_bss_addr;
+ init_dmem_cfg->data_size = spctrl_cfg->data_size;
+ init_dmem_cfg->bss_size = spctrl_cfg->bss_size;
+ init_dmem_cfg->sp_id = sp_id;
+
+ spctrl_cofig_info[sp_id].spctrl_config_dmem_addr =
+ spctrl_cfg->spctrl_config_dmem_addr;
+ spctrl_cofig_info[sp_id].spctrl_state_dmem_addr =
+ spctrl_cfg->spctrl_state_dmem_addr;
+
+ /* store code (text + icache) and data to DDR
+ *
+ * Data used to be stored separately, because of access alignment constraints,
+ * fix the FW generation instead
+ */
+ code_addr = mmgr_malloc(spctrl_cfg->code_size);
+ if (code_addr == mmgr_NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ mmgr_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size);
+
+ if (sizeof(hrt_vaddress) > sizeof(hrt_data)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "size of hrt_vaddress can not be greater than hrt_data\n");
+ hmm_free(code_addr);
+ code_addr = mmgr_NULL;
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ init_dmem_cfg->ddr_data_addr = code_addr + spctrl_cfg->ddr_data_offset;
+ if ((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) != 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "DDR address pointer is not properly aligned for DMA transfer\n");
+ hmm_free(code_addr);
+ code_addr = mmgr_NULL;
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry;
+ spctrl_cofig_info[sp_id].code_addr = code_addr;
+ spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name;
+
+ /* now we program the base address into the icache and
+ * invalidate the cache.
+ */
+ sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG,
+ (hrt_data)spctrl_cofig_info[sp_id].code_addr);
+ sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
+ spctrl_loaded[sp_id] = true;
+ return IA_CSS_SUCCESS;
+}
+
+/* ISP2401 */
+/* reload pre-loaded FW */
+void sh_css_spctrl_reload_fw(sp_ID_t sp_id)
+{
+ /* now we program the base address into the icache and
+ * invalidate the cache.
+ */
+ sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG,
+ (hrt_data)spctrl_cofig_info[sp_id].code_addr);
+ sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
+ spctrl_loaded[sp_id] = true;
+}
+
+hrt_vaddress get_sp_code_addr(sp_ID_t sp_id)
+{
+ return spctrl_cofig_info[sp_id].code_addr;
+}
+
+enum ia_css_err ia_css_spctrl_unload_fw(sp_ID_t sp_id)
+{
+ if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id])))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* freeup the resource */
+ if (spctrl_cofig_info[sp_id].code_addr) {
+ hmm_free(spctrl_cofig_info[sp_id].code_addr);
+ spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;
+ }
+ spctrl_loaded[sp_id] = false;
+ return IA_CSS_SUCCESS;
+}
+
+/* Initialize dmem_cfg in SP dmem and start SP program*/
+enum ia_css_err ia_css_spctrl_start(sp_ID_t sp_id)
+{
+ if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id])))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* Set descr in the SP to initialize the SP DMEM */
+ /*
+ * The FW stores user-space pointers to the FW, the ISP pointer
+ * is only available here
+ *
+ */
+ assert(sizeof(unsigned int) <= sizeof(hrt_data));
+
+ sp_dmem_store(sp_id,
+ spctrl_cofig_info[sp_id].spctrl_config_dmem_addr,
+ &spctrl_cofig_info[sp_id].dmem_config,
+ sizeof(spctrl_cofig_info[sp_id].dmem_config));
+ /* set the start address */
+ sp_ctrl_store(sp_id, SP_START_ADDR_REG,
+ (hrt_data)spctrl_cofig_info[sp_id].sp_entry);
+ sp_ctrl_setbit(sp_id, SP_SC_REG, SP_RUN_BIT);
+ sp_ctrl_setbit(sp_id, SP_SC_REG, SP_START_BIT);
+ return IA_CSS_SUCCESS;
+}
+
+/* Query the state of SP1 */
+ia_css_spctrl_sp_sw_state ia_css_spctrl_get_state(sp_ID_t sp_id)
+{
+ ia_css_spctrl_sp_sw_state state = 0;
+ unsigned int HIVE_ADDR_sp_sw_state;
+
+ if (sp_id >= N_SP_ID)
+ return IA_CSS_SP_SW_TERMINATED;
+
+ HIVE_ADDR_sp_sw_state = spctrl_cofig_info[sp_id].spctrl_state_dmem_addr;
+ (void)HIVE_ADDR_sp_sw_state; /* Suppres warnings in CRUN */
+ if (sp_id == SP0_ID)
+ state = sp_dmem_load_uint32(sp_id, (unsigned int)sp_address_of(sp_sw_state));
+ return state;
+}
+
+int ia_css_spctrl_is_idle(sp_ID_t sp_id)
+{
+ int state = 0;
+
+ assert(sp_id < N_SP_ID);
+
+ state = sp_ctrl_getbit(sp_id, SP_SC_REG, SP_IDLE_BIT);
+ return state;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h b/drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h
new file mode 100644
index 000000000000..899294646494
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TAGGER_COMMON_H__
+#define __IA_CSS_TAGGER_COMMON_H__
+
+#include <system_local.h>
+#include <type_support.h>
+
+/**
+ * @brief The tagger's circular buffer.
+ *
+ * Should be one less than NUM_CONTINUOUS_FRAMES in sh_css_internal.h
+ */
+#if defined(HAS_SP_2400)
+#define MAX_CB_ELEMS_FOR_TAGGER 14
+#else
+#define MAX_CB_ELEMS_FOR_TAGGER 9
+#endif
+
+/**
+ * @brief Data structure for the tagger buffer element.
+ */
+typedef struct {
+ u32 frame; /* the frame value stored in the element */
+ u32 param; /* the param value stored in the element */
+ u8 mark; /* the mark on the element */
+ u8 lock; /* the lock on the element */
+ u8 exp_id; /* exp_id of frame, for debugging only */
+} ia_css_tagger_buf_sp_elem_t;
+
+#endif /* __IA_CSS_TAGGER_COMMON_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c b/drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c
new file mode 100644
index 000000000000..57dddd74d668
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h> /* for uint32_t */
+#include "ia_css_timer.h" /*struct ia_css_clock_tick */
+#include "sh_css_legacy.h" /* IA_CSS_PIPE_ID_NUM*/
+#include "gp_timer.h" /*gp_timer_read()*/
+#include "assert_support.h"
+
+enum ia_css_err
+ia_css_timer_get_current_tick(
+ struct ia_css_clock_tick *curr_ts) {
+ assert(curr_ts);
+ if (!curr_ts)
+ {
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ curr_ts->ticks = (clock_value_t)gp_timer_read(GP_TIMER_SEL);
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h b/drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h
new file mode 100644
index 000000000000..9b6c2893d950
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _scalar_processor_2400_params_h
+#define _scalar_processor_2400_params_h
+
+#include "cell_params.h"
+
+#endif /* _scalar_processor_2400_params_h */
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
new file mode 100644
index 000000000000..d77432254a2c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -0,0 +1,11110 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/*! \file */
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "ia_css.h"
+#include "sh_css_hrt.h" /* only for file 2 MIPI */
+#include "ia_css_buffer.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+#include "sh_css_mipi.h"
+#include "sh_css_sp.h" /* sh_css_sp_group */
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "ia_css_isys.h"
+#endif
+#include "ia_css_frame.h"
+#include "sh_css_defs.h"
+#include "sh_css_firmware.h"
+#include "sh_css_params.h"
+#include "sh_css_params_internal.h"
+#include "sh_css_param_shading.h"
+#include "ia_css_refcount.h"
+#include "ia_css_rmgr.h"
+#include "ia_css_debug.h"
+#include "ia_css_debug_pipe.h"
+#include "ia_css_device_access.h"
+#include "device_access.h"
+#include "sh_css_legacy.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_stream.h"
+#include "sh_css_stream_format.h"
+#include "ia_css_pipe.h"
+#include "ia_css_util.h"
+#include "ia_css_pipe_util.h"
+#include "ia_css_pipe_binarydesc.h"
+#include "ia_css_pipe_stagedesc.h"
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+#include "ia_css_isys.h"
+#endif
+
+#include "memory_access.h"
+#include "tag.h"
+#include "assert_support.h"
+#include "math_support.h"
+#include "sw_event_global.h" /* Event IDs.*/
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "ia_css_ifmtr.h"
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "input_system.h"
+#endif
+#include "mmu_device.h" /* mmu_set_page_table_base_index(), ... */
+#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
+#include "gdc_device.h" /* HRT_GDC_N */
+#include "dma.h" /* dma_set_max_burst_size() */
+#include "irq.h" /* virq */
+#include "sp.h" /* cnd_sp_irq_enable() */
+#include "isp.h" /* cnd_isp_irq_enable, ISP_VEC_NELEMS */
+#include "gp_device.h" /* gp_device_reg_store() */
+#define __INLINE_GPIO__
+#include "gpio.h"
+#include "timed_ctrl.h"
+#include "platform_support.h" /* hrt_sleep(), inline */
+#include "ia_css_inputfifo.h"
+#define WITH_PC_MONITORING 0
+
+#define SH_CSS_VIDEO_BUFFER_ALIGNMENT 0
+
+#if WITH_PC_MONITORING
+#define MULTIPLE_SAMPLES 1
+#define NOF_SAMPLES 60
+#include "linux/kthread.h"
+#include "linux/sched.h"
+#include "linux/delay.h"
+#include "sh_css_metrics.h"
+static int thread_alive;
+#endif /* WITH_PC_MONITORING */
+
+#include "ia_css_spctrl.h"
+#include "ia_css_version_data.h"
+#include "sh_css_struct.h"
+#include "ia_css_bufq.h"
+#include "ia_css_timer.h" /* clock_value_t */
+
+#include "isp/modes/interface/input_buf.isp.h"
+
+/* Name of the sp program: should not be built-in */
+#define SP_PROG_NAME "sp"
+/* Size of Refcount List */
+#define REFCOUNT_SIZE 1000
+
+/* for JPEG, we don't know the length of the image upfront,
+ * but since we support sensor upto 16MP, we take this as
+ * upper limit.
+ */
+#define JPEG_BYTES (16 * 1024 * 1024)
+
+#define STATS_ENABLED(stage) (stage && stage->binary && stage->binary->info && \
+ (stage->binary->info->sp.enable.s3a || stage->binary->info->sp.enable.dis))
+
+struct sh_css my_css;
+
+int (*sh_css_printf)(const char *fmt, va_list args) = NULL;
+
+/* modes of work: stream_create and stream_destroy will update the save/restore data
+ only when in working mode, not suspend/resume
+*/
+enum ia_sh_css_modes {
+ sh_css_mode_none = 0,
+ sh_css_mode_working,
+ sh_css_mode_suspend,
+ sh_css_mode_resume
+};
+
+/* a stream seed, to save and restore the stream data.
+ the stream seed contains all the data required to "grow" the seed again after it was closed.
+*/
+struct sh_css_stream_seed {
+ struct ia_css_stream
+ **orig_stream; /* pointer to restore the original handle */
+ struct ia_css_stream *stream; /* handle, used as ID too.*/
+ struct ia_css_stream_config stream_config; /* stream config struct */
+ int num_pipes;
+ struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM]; /* pipe handles */
+ struct ia_css_pipe
+ **orig_pipes[IA_CSS_PIPE_ID_NUM]; /* pointer to restore original handle */
+ struct ia_css_pipe_config
+ pipe_config[IA_CSS_PIPE_ID_NUM]; /* pipe config structs */
+};
+
+#define MAX_ACTIVE_STREAMS 5
+/* A global struct for save/restore to hold all the data that should sustain power-down:
+ MMU base, IRQ type, env for routines, binary loaded FW and the stream seeds.
+*/
+struct sh_css_save {
+ enum ia_sh_css_modes mode;
+ u32 mmu_base; /* the last mmu_base */
+ enum ia_css_irq_type irq_type;
+ struct sh_css_stream_seed stream_seeds[MAX_ACTIVE_STREAMS];
+ struct ia_css_fw *loaded_fw; /* fw struct previously loaded */
+ struct ia_css_env driver_env; /* driver-supplied env copy */
+};
+
+static bool my_css_save_initialized; /* if my_css_save was initialized */
+static struct sh_css_save my_css_save;
+
+/* pqiao NOTICE: this is for css internal buffer recycling when stopping pipeline,
+ this array is temporary and will be replaced by resource manager*/
+/* Taking the biggest Size for number of Elements */
+#define MAX_HMM_BUFFER_NUM \
+ (SH_CSS_MAX_NUM_QUEUES * (IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE + 2))
+
+struct sh_css_hmm_buffer_record {
+ bool in_use;
+ enum ia_css_buffer_type type;
+ struct ia_css_rmgr_vbuf_handle *h_vbuf;
+ hrt_address kernel_ptr;
+};
+
+static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM];
+
+#define GPIO_FLASH_PIN_MASK BIT(HIVE_GPIO_STROBE_TRIGGER_PIN)
+
+static bool fw_explicitly_loaded;
+
+/*
+ * Local prototypes
+ */
+
+static enum ia_css_err
+allocate_delay_frames(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+sh_css_pipe_start(struct ia_css_stream *stream);
+
+/* ISP 2401 */
+/*
+ * @brief Stop all "ia_css_pipe" instances in the target
+ * "ia_css_stream" instance.
+ *
+ * @param[in] stream Point to the target "ia_css_stream" instance.
+ *
+ * @return
+ * - IA_CSS_SUCCESS, if the "stop" requests have been successfully sent out.
+ * - CSS error code, otherwise.
+ *
+ *
+ * NOTE
+ * This API sends the "stop" requests to the "ia_css_pipe"
+ * instances in the same "ia_css_stream" instance. It will
+ * return without waiting for all "ia_css_pipe" instatnces
+ * being stopped.
+ */
+static enum ia_css_err
+sh_css_pipes_stop(struct ia_css_stream *stream);
+
+/*
+ * @brief Check if all "ia_css_pipe" instances in the target
+ * "ia_css_stream" instance have stopped.
+ *
+ * @param[in] stream Point to the target "ia_css_stream" instance.
+ *
+ * @return
+ * - true, if all "ia_css_pipe" instances in the target "ia_css_stream"
+ * instance have ben stopped.
+ * - false, otherwise.
+ */
+/* ISP 2401 */
+static bool
+sh_css_pipes_have_stopped(struct ia_css_stream *stream);
+
+/* ISP 2401 */
+static enum ia_css_err
+ia_css_pipe_check_format(struct ia_css_pipe *pipe,
+ enum ia_css_frame_format format);
+
+/* ISP 2401 */
+static enum ia_css_err
+check_pipe_resolutions(const struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+ia_css_pipe_load_extension(struct ia_css_pipe *pipe,
+ struct ia_css_fw_info *firmware);
+
+static void
+ia_css_pipe_unload_extension(struct ia_css_pipe *pipe,
+ struct ia_css_fw_info *firmware);
+static void
+ia_css_reset_defaults(struct sh_css *css);
+
+static void
+sh_css_init_host_sp_control_vars(void);
+
+static enum ia_css_err set_num_primary_stages(unsigned int *num,
+ enum ia_css_pipe_version version);
+
+static bool
+need_capture_pp(const struct ia_css_pipe *pipe);
+
+static bool
+need_yuv_scaler_stage(const struct ia_css_pipe *pipe);
+
+static enum ia_css_err ia_css_pipe_create_cas_scaler_desc_single_output(
+ struct ia_css_frame_info *cas_scaler_in_info,
+ struct ia_css_frame_info *cas_scaler_out_info,
+ struct ia_css_frame_info *cas_scaler_vf_info,
+ struct ia_css_cas_binary_descr *descr);
+
+static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr
+ *descr);
+
+static bool
+need_downscaling(const struct ia_css_resolution in_res,
+ const struct ia_css_resolution out_res);
+
+static bool need_capt_ldc(const struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+sh_css_pipe_load_binaries(struct ia_css_pipe *pipe);
+
+static
+enum ia_css_err sh_css_pipe_get_viewfinder_frame_info(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx);
+
+static enum ia_css_err
+sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx);
+
+static enum ia_css_err
+capture_start(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+video_start(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+preview_start(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+yuvpp_start(struct ia_css_pipe *pipe);
+
+static bool copy_on_sp(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *vf_frame, unsigned int idx);
+
+static enum ia_css_err
+init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *frame, enum ia_css_frame_format format);
+
+static enum ia_css_err
+init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *out_frame, unsigned int idx);
+
+static enum ia_css_err
+sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
+ const void *acc_fw);
+
+static enum ia_css_err
+alloc_continuous_frames(
+ struct ia_css_pipe *pipe, bool init_time);
+
+static void
+pipe_global_init(void);
+
+static enum ia_css_err
+pipe_generate_pipe_num(const struct ia_css_pipe *pipe,
+ unsigned int *pipe_number);
+
+static void
+pipe_release_pipe_num(unsigned int pipe_num);
+
+static enum ia_css_err
+create_host_pipeline_structure(struct ia_css_stream *stream);
+
+static enum ia_css_err
+create_host_pipeline(struct ia_css_stream *stream);
+
+static enum ia_css_err
+create_host_preview_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_video_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_copy_pipeline(struct ia_css_pipe *pipe,
+ unsigned int max_input_width,
+ struct ia_css_frame *out_frame);
+
+static enum ia_css_err
+create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_capture_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_yuvpp_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_acc_pipeline(struct ia_css_pipe *pipe);
+
+static unsigned int
+sh_css_get_sw_interrupt_value(unsigned int irq);
+
+static struct ia_css_binary *ia_css_pipe_get_shading_correction_binary(
+ const struct ia_css_pipe *pipe);
+
+static struct ia_css_binary *
+ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe);
+
+static struct ia_css_binary *
+ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe);
+
+static void
+sh_css_hmm_buffer_record_init(void);
+
+static void
+sh_css_hmm_buffer_record_uninit(void);
+
+static void
+sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record);
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
+ enum ia_css_buffer_type type,
+ hrt_address kernel_ptr);
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_validate(hrt_vaddress ddr_buffer_addr,
+ enum ia_css_buffer_type type);
+
+void
+ia_css_get_acc_configs(
+ struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config);
+
+#if CONFIG_ON_FRAME_ENQUEUE()
+static enum ia_css_err set_config_on_frame_enqueue(struct ia_css_frame_info
+ *info, struct frame_data_wrapper *frame);
+#endif
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+static unsigned int get_crop_lines_for_bayer_order(const struct
+ ia_css_stream_config *config);
+static unsigned int get_crop_columns_for_bayer_order(const struct
+ ia_css_stream_config *config);
+static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
+ unsigned int *extra_row, unsigned int *extra_column);
+static enum ia_css_err
+aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
+ struct ia_css_pipe *pipes[],
+ bool *do_crop_status);
+
+static bool
+aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe);
+
+static enum ia_css_err
+aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
+ struct ia_css_resolution *effective_res);
+#endif
+
+static void
+sh_css_pipe_free_shading_table(struct ia_css_pipe *pipe)
+{
+ assert(pipe);
+ if (!pipe) {
+ IA_CSS_ERROR("NULL input parameter");
+ return;
+ }
+
+ if (pipe->shading_table)
+ ia_css_shading_table_free(pipe->shading_table);
+ pipe->shading_table = NULL;
+}
+
+static enum ia_css_frame_format yuv420_copy_formats[] = {
+ IA_CSS_FRAME_FORMAT_NV12,
+ IA_CSS_FRAME_FORMAT_NV21,
+ IA_CSS_FRAME_FORMAT_YV12,
+ IA_CSS_FRAME_FORMAT_YUV420,
+ IA_CSS_FRAME_FORMAT_YUV420_16,
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8,
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8
+};
+
+static enum ia_css_frame_format yuv422_copy_formats[] = {
+ IA_CSS_FRAME_FORMAT_NV12,
+ IA_CSS_FRAME_FORMAT_NV16,
+ IA_CSS_FRAME_FORMAT_NV21,
+ IA_CSS_FRAME_FORMAT_NV61,
+ IA_CSS_FRAME_FORMAT_YV12,
+ IA_CSS_FRAME_FORMAT_YV16,
+ IA_CSS_FRAME_FORMAT_YUV420,
+ IA_CSS_FRAME_FORMAT_YUV420_16,
+ IA_CSS_FRAME_FORMAT_YUV422,
+ IA_CSS_FRAME_FORMAT_YUV422_16,
+ IA_CSS_FRAME_FORMAT_UYVY,
+ IA_CSS_FRAME_FORMAT_YUYV
+};
+
+/* Verify whether the selected output format is can be produced
+ * by the copy binary given the stream format.
+ * */
+static enum ia_css_err
+verify_copy_out_frame_format(struct ia_css_pipe *pipe) {
+ enum ia_css_frame_format out_fmt = pipe->output_info[0].format;
+ unsigned int i, found = 0;
+
+ assert(pipe);
+ assert(pipe->stream);
+
+ switch (pipe->stream->config.input_config.format)
+ {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ for (i = 0; i < ARRAY_SIZE(yuv420_copy_formats) && !found; i++)
+ found = (out_fmt == yuv420_copy_formats[i]);
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ for (i = 0; i < ARRAY_SIZE(yuv422_copy_formats) && !found; i++)
+ found = (out_fmt == yuv422_copy_formats[i]);
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV422_16 ||
+ out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_RGBA888 ||
+ out_fmt == IA_CSS_FRAME_FORMAT_RGB565);
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_RGBA888 ||
+ out_fmt == IA_CSS_FRAME_FORMAT_YUV420);
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_RAW) ||
+ (out_fmt == IA_CSS_FRAME_FORMAT_RAW_PACKED);
+ break;
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_BINARY_8);
+ break;
+ default:
+ break;
+ }
+ if (!found)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return IA_CSS_SUCCESS;
+}
+
+unsigned int
+ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream)
+{
+ int bpp = 0;
+
+ if (stream)
+ bpp = ia_css_util_input_format_bpp(stream->config.input_config.format,
+ stream->config.pixels_per_clock == 2);
+
+ return bpp;
+}
+
+#define GP_ISEL_TPG_MODE 0x90058
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+static enum ia_css_err
+sh_css_config_input_network(struct ia_css_stream *stream) {
+ unsigned int fmt_type;
+ struct ia_css_pipe *pipe = stream->last_pipe;
+ struct ia_css_binary *binary = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(stream);
+ assert(pipe);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() enter:\n");
+
+ if (pipe->pipeline.stages)
+ binary = pipe->pipeline.stages->binary;
+
+ err = ia_css_isys_convert_stream_format_to_mipi_format(
+ stream->config.input_config.format,
+ stream->csi_rx_config.comp,
+ &fmt_type);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ sh_css_sp_program_input_circuit(fmt_type,
+ stream->config.channel_id,
+ stream->config.mode);
+
+ if ((binary && (binary->online || stream->config.continuous)) ||
+ pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
+ {
+ err = ia_css_ifmtr_configure(&stream->config,
+ binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ if (stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
+ stream->config.mode == IA_CSS_INPUT_MODE_PRBS)
+ {
+ unsigned int hblank_cycles = 100,
+ vblank_lines = 6,
+ width,
+ height,
+ vblank_cycles;
+ width = (stream->config.input_config.input_res.width) / (1 +
+ (stream->config.pixels_per_clock == 2));
+ height = stream->config.input_config.input_res.height;
+ vblank_cycles = vblank_lines * (width + hblank_cycles);
+ sh_css_sp_configure_sync_gen(width, height, hblank_cycles,
+ vblank_cycles);
+ if (!atomisp_hw_is_isp2401) {
+ if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG) {
+ /* TODO: move define to proper file in tools */
+ ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0);
+ }
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() leave:\n");
+ return IA_CSS_SUCCESS;
+}
+#elif !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+static unsigned int csi2_protocol_calculate_max_subpixels_per_line(
+ enum atomisp_input_format format,
+ unsigned int pixels_per_line)
+{
+ unsigned int rval;
+
+ switch (format) {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: UYY0 UYY0 ... UYY0
+ * Line 1: VYY0 VYY0 ... VYY0
+ * Line 2: UYY0 UYY0 ... UYY0
+ * Line 3: VYY0 VYY0 ... VYY0
+ * ...
+ * Line (n-2): UYY0 UYY0 ... UYY0
+ * Line (n-1): VYY0 VYY0 ... VYY0
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ * The 0 is introduced by the input system
+ * (mipi backend).
+ */
+ rval = pixels_per_line * 2;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: YYYY YYYY ... YYYY
+ * Line 1: UYVY UYVY ... UYVY UYVY
+ * Line 2: YYYY YYYY ... YYYY
+ * Line 3: UYVY UYVY ... UYVY UYVY
+ * ...
+ * Line (n-2): YYYY YYYY ... YYYY
+ * Line (n-1): UYVY UYVY ... UYVY UYVY
+ *
+ * In this frame format, the odd-line is twice
+ * wider than the even-line.
+ */
+ rval = pixels_per_line * 2;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: UYVY UYVY ... UYVY
+ * Line 1: UYVY UYVY ... UYVY
+ * Line 2: UYVY UYVY ... UYVY
+ * Line 3: UYVY UYVY ... UYVY
+ * ...
+ * Line (n-2): UYVY UYVY ... UYVY
+ * Line (n-1): UYVY UYVY ... UYVY
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ */
+ rval = pixels_per_line * 2;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: ABGR ABGR ... ABGR
+ * Line 1: ABGR ABGR ... ABGR
+ * Line 2: ABGR ABGR ... ABGR
+ * Line 3: ABGR ABGR ... ABGR
+ * ...
+ * Line (n-2): ABGR ABGR ... ABGR
+ * Line (n-1): ABGR ABGR ... ABGR
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ */
+ rval = pixels_per_line * 4;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: Pixel Pixel ... Pixel
+ * Line 1: Pixel Pixel ... Pixel
+ * Line 2: Pixel Pixel ... Pixel
+ * Line 3: Pixel Pixel ... Pixel
+ * ...
+ * Line (n-2): Pixel Pixel ... Pixel
+ * Line (n-1): Pixel Pixel ... Pixel
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ */
+ rval = pixels_per_line;
+ break;
+ default:
+ rval = 0;
+ break;
+ }
+
+ return rval;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_id(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr)
+{
+ bool rc;
+
+ rc = true;
+ switch (stream_cfg->mode) {
+ case IA_CSS_INPUT_MODE_TPG:
+
+ if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID0) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID;
+ } else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID1) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID;
+ } else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID2) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID;
+ }
+
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+
+ if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID0) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID;
+ } else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID1) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID;
+ } else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID2) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID;
+ }
+
+ break;
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+
+ if (stream_cfg->source.port.port == MIPI_PORT0_ID) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT0_ID;
+ } else if (stream_cfg->source.port.port == MIPI_PORT1_ID) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT1_ID;
+ } else if (stream_cfg->source.port.port == MIPI_PORT2_ID) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT2_ID;
+ }
+
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_type(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr)
+{
+ bool rc;
+
+ rc = true;
+ switch (stream_cfg->mode) {
+ case IA_CSS_INPUT_MODE_TPG:
+
+ isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_TPG;
+
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+
+ isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_PRBS;
+
+ break;
+ case IA_CSS_INPUT_MODE_SENSOR:
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+
+ isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_SENSOR;
+ break;
+
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr,
+ int isys_stream_idx)
+{
+ bool rc;
+
+ rc = true;
+ switch (stream_cfg->mode) {
+ case IA_CSS_INPUT_MODE_TPG:
+ if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_RAMP) {
+ isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_RAMP;
+ } else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_CHECKERBOARD) {
+ isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_CHBO;
+ } else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_MONO) {
+ isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_MONO;
+ } else {
+ rc = false;
+ }
+
+ /*
+ * TODO
+ * - Make "color_cfg" as part of "ia_css_tpg_config".
+ */
+ isys_stream_descr->tpg_port_attr.color_cfg.R1 = 51;
+ isys_stream_descr->tpg_port_attr.color_cfg.G1 = 102;
+ isys_stream_descr->tpg_port_attr.color_cfg.B1 = 255;
+ isys_stream_descr->tpg_port_attr.color_cfg.R2 = 0;
+ isys_stream_descr->tpg_port_attr.color_cfg.G2 = 100;
+ isys_stream_descr->tpg_port_attr.color_cfg.B2 = 160;
+
+ isys_stream_descr->tpg_port_attr.mask_cfg.h_mask =
+ stream_cfg->source.tpg.x_mask;
+ isys_stream_descr->tpg_port_attr.mask_cfg.v_mask =
+ stream_cfg->source.tpg.y_mask;
+ isys_stream_descr->tpg_port_attr.mask_cfg.hv_mask =
+ stream_cfg->source.tpg.xy_mask;
+
+ isys_stream_descr->tpg_port_attr.delta_cfg.h_delta =
+ stream_cfg->source.tpg.x_delta;
+ isys_stream_descr->tpg_port_attr.delta_cfg.v_delta =
+ stream_cfg->source.tpg.y_delta;
+
+ /*
+ * TODO
+ * - Make "sync_gen_cfg" as part of "ia_css_tpg_config".
+ */
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.hblank_cycles = 100;
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.vblank_cycles = 100;
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.pixels_per_clock =
+ stream_cfg->pixels_per_clock;
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.nr_of_frames = (uint32_t)~(0x0);
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.pixels_per_line =
+ stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.width;
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.lines_per_frame =
+ stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.height;
+
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+
+ isys_stream_descr->prbs_port_attr.seed0 = stream_cfg->source.prbs.seed;
+ isys_stream_descr->prbs_port_attr.seed1 = stream_cfg->source.prbs.seed1;
+
+ /*
+ * TODO
+ * - Make "sync_gen_cfg" as part of "ia_css_prbs_config".
+ */
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.hblank_cycles = 100;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.vblank_cycles = 100;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_clock =
+ stream_cfg->pixels_per_clock;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.nr_of_frames = (uint32_t)~(0x0);
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_line =
+ stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.width;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.lines_per_frame =
+ stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.height;
+
+ break;
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: {
+ enum ia_css_err err;
+ unsigned int fmt_type;
+
+ err = ia_css_isys_convert_stream_format_to_mipi_format(
+ stream_cfg->isys_config[isys_stream_idx].format,
+ MIPI_PREDICTOR_NONE,
+ &fmt_type);
+ if (err != IA_CSS_SUCCESS)
+ rc = false;
+
+ isys_stream_descr->csi_port_attr.active_lanes =
+ stream_cfg->source.port.num_lanes;
+ isys_stream_descr->csi_port_attr.fmt_type = fmt_type;
+ isys_stream_descr->csi_port_attr.ch_id = stream_cfg->channel_id;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ isys_stream_descr->online = stream_cfg->online;
+#endif
+ err |= ia_css_isys_convert_compressed_format(
+ &stream_cfg->source.port.compression,
+ isys_stream_descr);
+ if (err != IA_CSS_SUCCESS)
+ rc = false;
+
+ /* metadata */
+ isys_stream_descr->metadata.enable = false;
+ if (stream_cfg->metadata_config.resolution.height > 0) {
+ err = ia_css_isys_convert_stream_format_to_mipi_format(
+ stream_cfg->metadata_config.data_type,
+ MIPI_PREDICTOR_NONE,
+ &fmt_type);
+ if (err != IA_CSS_SUCCESS)
+ rc = false;
+ isys_stream_descr->metadata.fmt_type = fmt_type;
+ isys_stream_descr->metadata.bits_per_pixel =
+ ia_css_util_input_format_bpp(stream_cfg->metadata_config.data_type, true);
+ isys_stream_descr->metadata.pixels_per_line =
+ stream_cfg->metadata_config.resolution.width;
+ isys_stream_descr->metadata.lines_per_frame =
+ stream_cfg->metadata_config.resolution.height;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* For new input system, number of str2mmio requests must be even.
+ * So we round up number of metadata lines to be even. */
+ if (isys_stream_descr->metadata.lines_per_frame > 0)
+ isys_stream_descr->metadata.lines_per_frame +=
+ (isys_stream_descr->metadata.lines_per_frame & 1);
+#endif
+ isys_stream_descr->metadata.align_req_in_bytes =
+ ia_css_csi2_calculate_input_system_alignment(
+ stream_cfg->metadata_config.data_type);
+ isys_stream_descr->metadata.enable = true;
+ }
+
+ break;
+ }
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_resolution(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr,
+ int isys_stream_idx)
+{
+ unsigned int bits_per_subpixel;
+ unsigned int max_subpixels_per_line;
+ unsigned int lines_per_frame;
+ unsigned int align_req_in_bytes;
+ enum atomisp_input_format fmt_type;
+
+ fmt_type = stream_cfg->isys_config[isys_stream_idx].format;
+ if ((stream_cfg->mode == IA_CSS_INPUT_MODE_SENSOR ||
+ stream_cfg->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) &&
+ stream_cfg->source.port.compression.type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) {
+ if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
+ UNCOMPRESSED_BITS_PER_PIXEL_10) {
+ fmt_type = ATOMISP_INPUT_FORMAT_RAW_10;
+ } else if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
+ UNCOMPRESSED_BITS_PER_PIXEL_12) {
+ fmt_type = ATOMISP_INPUT_FORMAT_RAW_12;
+ } else
+ return false;
+ }
+
+ bits_per_subpixel =
+ sh_css_stream_format_2_bits_per_subpixel(fmt_type);
+ if (bits_per_subpixel == 0)
+ return false;
+
+ max_subpixels_per_line =
+ csi2_protocol_calculate_max_subpixels_per_line(fmt_type,
+ stream_cfg->isys_config[isys_stream_idx].input_res.width);
+ if (max_subpixels_per_line == 0)
+ return false;
+
+ lines_per_frame = stream_cfg->isys_config[isys_stream_idx].input_res.height;
+ if (lines_per_frame == 0)
+ return false;
+
+ align_req_in_bytes = ia_css_csi2_calculate_input_system_alignment(fmt_type);
+
+ /* HW needs subpixel info for their settings */
+ isys_stream_descr->input_port_resolution.bits_per_pixel = bits_per_subpixel;
+ isys_stream_descr->input_port_resolution.pixels_per_line =
+ max_subpixels_per_line;
+ isys_stream_descr->input_port_resolution.lines_per_frame = lines_per_frame;
+ isys_stream_descr->input_port_resolution.align_req_in_bytes =
+ align_req_in_bytes;
+
+ return true;
+}
+
+static bool sh_css_translate_stream_cfg_to_isys_stream_descr(
+ struct ia_css_stream_config *stream_cfg,
+ bool early_polling,
+ ia_css_isys_descr_t *isys_stream_descr,
+ int isys_stream_idx)
+{
+ bool rc;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_translate_stream_cfg_to_isys_stream_descr() enter:\n");
+ rc = sh_css_translate_stream_cfg_to_input_system_input_port_id(stream_cfg,
+ isys_stream_descr);
+ rc &= sh_css_translate_stream_cfg_to_input_system_input_port_type(stream_cfg,
+ isys_stream_descr);
+ rc &= sh_css_translate_stream_cfg_to_input_system_input_port_attr(stream_cfg,
+ isys_stream_descr, isys_stream_idx);
+ rc &= sh_css_translate_stream_cfg_to_input_system_input_port_resolution(
+ stream_cfg, isys_stream_descr, isys_stream_idx);
+
+ isys_stream_descr->raw_packed = stream_cfg->pack_raw_pixels;
+ isys_stream_descr->linked_isys_stream_id = (int8_t)
+ stream_cfg->isys_config[isys_stream_idx].linked_isys_stream_id;
+ /*
+ * Early polling is required for timestamp accuracy in certain case.
+ * The ISYS HW polling is started on
+ * ia_css_isys_stream_capture_indication() instead of
+ * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of
+ * capture takes longer than getting an ISYS frame
+ *
+ * Only 2401 relevant ??
+ */
+#if 0 // FIXME: NOT USED on Yocto Aero
+ isys_stream_descr->polling_mode
+ = early_polling ? INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST
+ : INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n");
+#endif
+
+ return rc;
+}
+
+static bool sh_css_translate_binary_info_to_input_system_output_port_attr(
+ struct ia_css_binary *binary,
+ ia_css_isys_descr_t *isys_stream_descr)
+{
+ if (!binary)
+ return false;
+
+ isys_stream_descr->output_port_attr.left_padding = binary->left_padding;
+ isys_stream_descr->output_port_attr.max_isp_input_width =
+ binary->info->sp.input.max_width;
+
+ return true;
+}
+
+static enum ia_css_err
+sh_css_config_input_network(struct ia_css_stream *stream) {
+ bool rc;
+ ia_css_isys_descr_t isys_stream_descr;
+ unsigned int sp_thread_id;
+ struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
+ struct ia_css_pipe *pipe = NULL;
+ struct ia_css_binary *binary = NULL;
+ int i;
+ u32 isys_stream_id;
+ bool early_polling = false;
+
+ assert(stream);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() enter 0x%p:\n", stream);
+
+ if (stream->config.continuous == true)
+ {
+ if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
+ pipe = stream->last_pipe;
+ } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_YUVPP) {
+ pipe = stream->last_pipe;
+ } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) {
+ pipe = stream->last_pipe->pipe_settings.preview.copy_pipe;
+ } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) {
+ pipe = stream->last_pipe->pipe_settings.video.copy_pipe;
+ }
+ } else
+ {
+ pipe = stream->last_pipe;
+ if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
+ /*
+ * We need to poll the ISYS HW in capture_indication itself
+ * for "non-continuous" capture usecase for getting accurate
+ * isys frame capture timestamps.
+ * This is because the capturepipe propcessing takes longer
+ * to execute than the input system frame capture.
+ * 2401 specific
+ */
+ early_polling = true;
+ }
+ }
+
+ assert(pipe);
+ if (!pipe)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (pipe->pipeline.stages)
+ if (pipe->pipeline.stages->binary)
+ binary = pipe->pipeline.stages->binary;
+
+ if (binary)
+ {
+ /* this was being done in ifmtr in 2400.
+ * online and cont bypass the init_in_frameinfo_memory_defaults
+ * so need to do it here
+ */
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+ }
+
+ /* get the SP thread id */
+ rc = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &sp_thread_id);
+ if (!rc)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ /* get the target input terminal */
+ sp_pipeline_input_terminal = &sh_css_sp_group.pipe_io[sp_thread_id].input;
+
+ for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++)
+ {
+ /* initialization */
+ memset((void *)(&isys_stream_descr), 0, sizeof(ia_css_isys_descr_t));
+ sp_pipeline_input_terminal->context.virtual_input_system_stream[i].valid = 0;
+ sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i].valid = 0;
+
+ if (!stream->config.isys_config[i].valid)
+ continue;
+
+ /* translate the stream configuration to the Input System (2401) configuration */
+ rc = sh_css_translate_stream_cfg_to_isys_stream_descr(
+ &stream->config,
+ early_polling,
+ &(isys_stream_descr), i);
+
+ if (stream->config.online) {
+ rc &= sh_css_translate_binary_info_to_input_system_output_port_attr(
+ binary,
+ &(isys_stream_descr));
+ }
+
+ if (!rc)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, i);
+
+ /* create the virtual Input System (2401) */
+ rc = ia_css_isys_stream_create(
+ &(isys_stream_descr),
+ &sp_pipeline_input_terminal->context.virtual_input_system_stream[i],
+ isys_stream_id);
+ if (!rc)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ /* calculate the configuration of the virtual Input System (2401) */
+ rc = ia_css_isys_stream_calculate_cfg(
+ &sp_pipeline_input_terminal->context.virtual_input_system_stream[i],
+ &(isys_stream_descr),
+ &sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i]);
+ if (!rc) {
+ ia_css_isys_stream_destroy(
+ &sp_pipeline_input_terminal->context.virtual_input_system_stream[i]);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() leave:\n");
+
+ return IA_CSS_SUCCESS;
+}
+
+static inline struct ia_css_pipe *stream_get_last_pipe(
+ struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *last_pipe = NULL;
+
+ if (stream)
+ last_pipe = stream->last_pipe;
+
+ return last_pipe;
+}
+
+static inline struct ia_css_pipe *stream_get_copy_pipe(
+ struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *copy_pipe = NULL;
+ struct ia_css_pipe *last_pipe = NULL;
+ enum ia_css_pipe_id pipe_id;
+
+ last_pipe = stream_get_last_pipe(stream);
+
+ if ((stream) &&
+ (last_pipe) &&
+ (stream->config.continuous)) {
+ pipe_id = last_pipe->mode;
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = last_pipe->pipe_settings.preview.copy_pipe;
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = last_pipe->pipe_settings.video.copy_pipe;
+ break;
+ default:
+ copy_pipe = NULL;
+ break;
+ }
+ }
+
+ return copy_pipe;
+}
+
+static inline struct ia_css_pipe *stream_get_target_pipe(
+ struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *target_pipe;
+
+ /* get the pipe that consumes the stream */
+ if (stream->config.continuous) {
+ target_pipe = stream_get_copy_pipe(stream);
+ } else {
+ target_pipe = stream_get_last_pipe(stream);
+ }
+
+ return target_pipe;
+}
+
+static enum ia_css_err stream_csi_rx_helper(
+ struct ia_css_stream *stream,
+ enum ia_css_err (*func)(enum mipi_port_id, uint32_t))
+{
+ enum ia_css_err retval = IA_CSS_ERR_INTERNAL_ERROR;
+ u32 sp_thread_id, stream_id;
+ bool rc;
+ struct ia_css_pipe *target_pipe = NULL;
+
+ if ((!stream) || (stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
+ goto exit;
+
+ target_pipe = stream_get_target_pipe(stream);
+
+ if (!target_pipe)
+ goto exit;
+
+ rc = ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(target_pipe),
+ &sp_thread_id);
+
+ if (!rc)
+ goto exit;
+
+ /* (un)register all valid "virtual isys streams" within the ia_css_stream */
+ stream_id = 0;
+ do {
+ if (stream->config.isys_config[stream_id].valid) {
+ u32 isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, stream_id);
+
+ retval = func(stream->config.source.port.port, isys_stream_id);
+ }
+ stream_id++;
+ } while ((retval == IA_CSS_SUCCESS) &&
+ (stream_id < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH));
+
+exit:
+ return retval;
+}
+
+static inline enum ia_css_err stream_register_with_csi_rx(
+ struct ia_css_stream *stream)
+{
+ return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_register_stream);
+}
+
+static inline enum ia_css_err stream_unregister_with_csi_rx(
+ struct ia_css_stream *stream)
+{
+ return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_unregister_stream);
+}
+#endif
+
+#if WITH_PC_MONITORING
+static struct task_struct *my_kthread; /* Handle for the monitoring thread */
+static int sh_binary_running; /* Enable sampling in the thread */
+
+static void print_pc_histo(char *core_name, struct sh_css_pc_histogram *hist)
+{
+ unsigned int i;
+ unsigned int cnt_run = 0;
+ unsigned int cnt_stall = 0;
+
+ if (!hist)
+ return;
+
+ sh_css_print("%s histogram length = %d\n", core_name, hist->length);
+ sh_css_print("%s PC\turn\tstall\n", core_name);
+
+ for (i = 0; i < hist->length; i++) {
+ if ((hist->run[i] == 0) && (hist->run[i] == hist->stall[i]))
+ continue;
+ sh_css_print("%s %d\t%d\t%d\n",
+ core_name, i, hist->run[i], hist->stall[i]);
+ cnt_run += hist->run[i];
+ cnt_stall += hist->stall[i];
+ }
+
+ sh_css_print(" Statistics for %s, cnt_run = %d, cnt_stall = %d, hist->length = %d\n",
+ core_name, cnt_run, cnt_stall, hist->length);
+}
+
+static void print_pc_histogram(void)
+{
+ struct ia_css_binary_metrics *metrics;
+
+ for (metrics = sh_css_metrics.binary_metrics;
+ metrics;
+ metrics = metrics->next) {
+ if (metrics->mode == IA_CSS_BINARY_MODE_PREVIEW ||
+ metrics->mode == IA_CSS_BINARY_MODE_VF_PP) {
+ sh_css_print("pc_histogram for binary %d is SKIPPED\n",
+ metrics->id);
+ continue;
+ }
+
+ sh_css_print(" pc_histogram for binary %d\n", metrics->id);
+ print_pc_histo(" ISP", &metrics->isp_histogram);
+ print_pc_histo(" SP", &metrics->sp_histogram);
+ sh_css_print("print_pc_histogram() done for binay->id = %d, done.\n",
+ metrics->id);
+ }
+
+ sh_css_print("PC_MONITORING:print_pc_histogram() -- DONE\n");
+}
+
+static int pc_monitoring(void *data)
+{
+ int i = 0;
+
+ (void)data;
+ while (true) {
+ if (sh_binary_running) {
+ sh_css_metrics_sample_pcs();
+#if MULTIPLE_SAMPLES
+ for (i = 0; i < NOF_SAMPLES; i++)
+ sh_css_metrics_sample_pcs();
+#endif
+ }
+ usleep_range(10, 50);
+ }
+ return 0;
+}
+
+static void spying_thread_create(void)
+{
+ my_kthread = kthread_run(pc_monitoring, NULL, "sh_pc_monitor");
+ sh_css_metrics_enable_pc_histogram(1);
+}
+
+static void input_frame_info(struct ia_css_frame_info frame_info)
+{
+ sh_css_print("SH_CSS:input_frame_info() -- frame->info.res.width = %d, frame->info.res.height = %d, format = %d\n",
+ frame_info.res.width, frame_info.res.height, frame_info.format);
+}
+#endif /* WITH_PC_MONITORING */
+
+static void
+start_binary(struct ia_css_pipe *pipe,
+ struct ia_css_binary *binary)
+{
+ struct ia_css_stream *stream;
+
+ assert(pipe);
+ /* Acceleration uses firmware, the binary thus can be NULL */
+ /* assert(binary != NULL); */
+
+ (void)binary;
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ stream = pipe->stream;
+#else
+ (void)pipe;
+ (void)stream;
+#endif
+
+ if (binary)
+ sh_css_metrics_start_binary(&binary->metrics);
+
+#if WITH_PC_MONITORING
+ sh_css_print("PC_MONITORING: %s() -- binary id = %d , enable_dvs_envelope = %d\n",
+ __func__, binary->info->sp.id,
+ binary->info->sp.enable.dvs_envelope);
+ input_frame_info(binary->in_frame_info);
+
+ if (binary && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_VIDEO)
+ sh_binary_running = true;
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (stream->reconfigure_css_rx) {
+ ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
+ pipe->stream->config.mode);
+ stream->reconfigure_css_rx = false;
+ }
+#endif
+}
+
+/* start the copy function on the SP */
+static enum ia_css_err
+start_copy_on_sp(struct ia_css_pipe *pipe,
+ struct ia_css_frame *out_frame) {
+ (void)out_frame;
+ assert(pipe);
+ assert(pipe->stream);
+
+ if ((!pipe) || (!pipe->stream))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (pipe->stream->reconfigure_css_rx)
+ ia_css_isys_rx_disable();
+#endif
+
+ if (pipe->stream->config.input_config.format != ATOMISP_INPUT_FORMAT_BINARY_8)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ sh_css_sp_start_binary_copy(ia_css_pipe_get_pipe_num(pipe), out_frame, pipe->stream->config.pixels_per_clock == 2);
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (pipe->stream->reconfigure_css_rx)
+ {
+ ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
+ pipe->stream->config.mode);
+ pipe->stream->reconfigure_css_rx = false;
+ }
+#endif
+
+ return IA_CSS_SUCCESS;
+}
+
+void sh_css_binary_args_reset(struct sh_css_binary_args *args)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_TNR_FRAMES; i++)
+ args->tnr_frames[i] = NULL;
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++)
+ args->delay_frames[i] = NULL;
+ args->in_frame = NULL;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ args->out_frame[i] = NULL;
+ args->out_vf_frame = NULL;
+ args->copy_vf = false;
+ args->copy_output = true;
+ args->vf_downscale_log2 = 0;
+}
+
+static void start_pipe(
+ struct ia_css_pipe *me,
+ enum sh_css_pipe_config_override copy_ovrd,
+ enum ia_css_input_mode input_mode)
+{
+ const struct ia_css_coordinate *coord = NULL;
+ const struct ia_css_isp_parameters *params = NULL;
+
+#if defined(HAS_NO_INPUT_SYSTEM)
+ (void)input_mode;
+#endif
+
+ IA_CSS_ENTER_PRIVATE("me = %p, copy_ovrd = %d, input_mode = %d",
+ me, copy_ovrd, input_mode);
+
+ assert(me); /* all callers are in this file and call with non null argument */
+
+ if (!atomisp_hw_is_isp2401) {
+ coord = &me->config.internal_frame_origin_bqs_on_sctbl;
+ params = me->stream->isp_params_configs;
+ }
+
+ sh_css_sp_init_pipeline(&me->pipeline,
+ me->mode,
+ (uint8_t)ia_css_pipe_get_pipe_num(me),
+ me->config.default_capture_config.enable_xnr != 0,
+ me->stream->config.pixels_per_clock == 2,
+ me->stream->config.continuous,
+ false,
+ me->required_bds_factor,
+ copy_ovrd,
+ input_mode,
+ &me->stream->config.metadata_config,
+ &me->stream->info.metadata_info
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ , (input_mode == IA_CSS_INPUT_MODE_MEMORY) ?
+ (enum mipi_port_id)0 :
+ me->stream->config.source.port.port,
+#endif
+ coord,
+ params);
+
+ if (me->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ struct ia_css_pipeline_stage *stage;
+
+ stage = me->pipeline.stages;
+ if (stage) {
+ me->pipeline.current_stage = stage;
+ start_binary(me, stage->binary);
+ }
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_invalidate_shading_tables(struct ia_css_stream *stream)
+{
+ int i;
+
+ assert(stream);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_invalidate_shading_tables() enter:\n");
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ assert(stream->pipes[i]);
+ sh_css_pipe_free_shading_table(stream->pipes[i]);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_invalidate_shading_tables() leave: return_void\n");
+}
+
+static void
+enable_interrupts(enum ia_css_irq_type irq_type)
+{
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+ enum mipi_port_id port;
+#endif
+ bool enable_pulse = irq_type != IA_CSS_IRQ_TYPE_EDGE;
+
+ IA_CSS_ENTER_PRIVATE("");
+ /* Enable IRQ on the SP which signals that SP goes to idle
+ * (aka ready state) */
+ cnd_sp_irq_enable(SP0_ID, true);
+ /* Set the IRQ device 0 to either level or pulse */
+ irq_enable_pulse(IRQ0_ID, enable_pulse);
+
+ cnd_virq_enable_channel(virq_sp, true);
+
+ /* Enable SW interrupt 0, this is used to signal ISYS events */
+ cnd_virq_enable_channel(
+ (virq_id_t)(IRQ_SW_CHANNEL0_ID + IRQ_SW_CHANNEL_OFFSET),
+ true);
+ /* Enable SW interrupt 1, this is used to signal PSYS events */
+ cnd_virq_enable_channel(
+ (virq_id_t)(IRQ_SW_CHANNEL1_ID + IRQ_SW_CHANNEL_OFFSET),
+ true);
+#if !defined(HAS_IRQ_MAP_VERSION_2)
+ /* IRQ_SW_CHANNEL2_ID does not exist on 240x systems */
+ cnd_virq_enable_channel(
+ (virq_id_t)(IRQ_SW_CHANNEL2_ID + IRQ_SW_CHANNEL_OFFSET),
+ true);
+ virq_clear_all();
+#endif
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+ for (port = 0; port < N_MIPI_PORT_ID; port++)
+ ia_css_isys_rx_enable_all_interrupts(port);
+#endif
+
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+static bool sh_css_setup_spctrl_config(const struct ia_css_fw_info *fw,
+ const char *program,
+ ia_css_spctrl_cfg *spctrl_cfg)
+{
+ if ((!fw) || (!spctrl_cfg))
+ return false;
+ spctrl_cfg->sp_entry = 0;
+ spctrl_cfg->program_name = (char *)(program);
+
+ spctrl_cfg->ddr_data_offset = fw->blob.data_source;
+ spctrl_cfg->dmem_data_addr = fw->blob.data_target;
+ spctrl_cfg->dmem_bss_addr = fw->blob.bss_target;
+ spctrl_cfg->data_size = fw->blob.data_size;
+ spctrl_cfg->bss_size = fw->blob.bss_size;
+
+ spctrl_cfg->spctrl_config_dmem_addr = fw->info.sp.init_dmem_data;
+ spctrl_cfg->spctrl_state_dmem_addr = fw->info.sp.sw_state;
+
+ spctrl_cfg->code_size = fw->blob.size;
+ spctrl_cfg->code = fw->blob.code;
+ spctrl_cfg->sp_entry = fw->info.sp.sp_entry; /* entry function ptr on SP */
+
+ return true;
+}
+
+void
+ia_css_unload_firmware(void)
+{
+ if (sh_css_num_binaries) {
+ /* we have already loaded before so get rid of the old stuff */
+ ia_css_binary_uninit();
+ sh_css_unload_firmware();
+ }
+ fw_explicitly_loaded = false;
+}
+
+static void
+ia_css_reset_defaults(struct sh_css *css)
+{
+ struct sh_css default_css;
+
+ /* Reset everything to zero */
+ memset(&default_css, 0, sizeof(default_css));
+
+ /* Initialize the non zero values*/
+ default_css.check_system_idle = true;
+ default_css.num_cont_raw_frames = NUM_CONTINUOUS_FRAMES;
+
+ /* All should be 0: but memset does it already.
+ * default_css.num_mipi_frames[N_CSI_PORTS] = 0;
+ */
+
+ default_css.irq_type = IA_CSS_IRQ_TYPE_EDGE;
+
+ /*Set the defaults to the output */
+ *css = default_css;
+}
+
+enum ia_css_err
+ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
+ const struct ia_css_fw *fw) {
+ enum ia_css_err err;
+
+ if (!env)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (!fw)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() enter\n");
+
+ /* make sure we initialize my_css */
+ if (my_css.flush != env->cpu_mem_env.flush)
+ {
+ ia_css_reset_defaults(&my_css);
+ my_css.flush = env->cpu_mem_env.flush;
+ }
+
+ ia_css_unload_firmware(); /* in case we are called twice */
+ err = sh_css_load_firmware(dev, fw->data, fw->bytes);
+ if (err == IA_CSS_SUCCESS)
+ {
+ err = ia_css_binary_init_infos();
+ if (err == IA_CSS_SUCCESS)
+ fw_explicitly_loaded = true;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() leave\n");
+ return err;
+}
+
+enum ia_css_err
+ia_css_init(struct device *dev, const struct ia_css_env *env,
+ const struct ia_css_fw *fw,
+ u32 mmu_l1_base,
+ enum ia_css_irq_type irq_type) {
+ enum ia_css_err err;
+ ia_css_spctrl_cfg spctrl_cfg;
+
+ void (*flush_func)(struct ia_css_acc_fw *fw);
+ hrt_data select, enable;
+
+ /*
+ * The C99 standard does not specify the exact object representation of structs;
+ * the representation is compiler dependent.
+ *
+ * The structs that are communicated between host and SP/ISP should have the
+ * exact same object representation. The compiler that is used to compile the
+ * firmware is hivecc.
+ *
+ * To check if a different compiler, used to compile a host application, uses
+ * another object representation, macros are defined specifying the size of
+ * the structs as expected by the firmware.
+ *
+ * A host application shall verify that a sizeof( ) of the struct is equal to
+ * the SIZE_OF_XXX macro of the corresponding struct. If they are not
+ * equal, functionality will break.
+ */
+ /* Check struct sh_css_ddr_address_map */
+ COMPILATION_ERROR_IF(sizeof(struct sh_css_ddr_address_map) != SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT);
+ /* Check struct host_sp_queues */
+ COMPILATION_ERROR_IF(sizeof(struct host_sp_queues) != SIZE_OF_HOST_SP_QUEUES_STRUCT);
+ COMPILATION_ERROR_IF(sizeof(struct ia_css_circbuf_desc_s) != SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT);
+ COMPILATION_ERROR_IF(sizeof(struct ia_css_circbuf_elem_s) != SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT);
+
+ /* Check struct host_sp_communication */
+ COMPILATION_ERROR_IF(sizeof(struct host_sp_communication) != SIZE_OF_HOST_SP_COMMUNICATION_STRUCT);
+ COMPILATION_ERROR_IF(sizeof(struct sh_css_event_irq_mask) != SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT);
+
+ /* Check struct sh_css_hmm_buffer */
+ COMPILATION_ERROR_IF(sizeof(struct sh_css_hmm_buffer) != SIZE_OF_SH_CSS_HMM_BUFFER_STRUCT);
+ COMPILATION_ERROR_IF(sizeof(struct ia_css_isp_3a_statistics) != SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT);
+ COMPILATION_ERROR_IF(sizeof(struct ia_css_isp_dvs_statistics) != SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT);
+ COMPILATION_ERROR_IF(sizeof(struct ia_css_metadata) != SIZE_OF_IA_CSS_METADATA_STRUCT);
+
+ /* Check struct ia_css_init_dmem_cfg */
+ COMPILATION_ERROR_IF(sizeof(struct ia_css_sp_init_dmem_cfg) != SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT);
+
+ if (!fw && !fw_explicitly_loaded)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (!env)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ sh_css_printf = env->print_env.debug_print;
+
+ IA_CSS_ENTER("void");
+
+ flush_func = env->cpu_mem_env.flush;
+
+ pipe_global_init();
+ ia_css_pipeline_init();
+ ia_css_queue_map_init();
+
+ ia_css_device_access_init(&env->hw_access_env);
+
+ select = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_select)
+ & (~GPIO_FLASH_PIN_MASK);
+ enable = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_e)
+ | GPIO_FLASH_PIN_MASK;
+ sh_css_mmu_set_page_table_base_index(mmu_l1_base);
+
+ my_css_save.mmu_base = mmu_l1_base;
+
+ ia_css_reset_defaults(&my_css);
+
+ my_css_save.driver_env = *env;
+ my_css.flush = flush_func;
+
+ err = ia_css_rmgr_init();
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ IA_CSS_LOG("init: %d", my_css_save_initialized);
+
+ if (!my_css_save_initialized)
+ {
+ my_css_save_initialized = true;
+ my_css_save.mode = sh_css_mode_working;
+ memset(my_css_save.stream_seeds, 0,
+ sizeof(struct sh_css_stream_seed) * MAX_ACTIVE_STREAMS);
+ IA_CSS_LOG("init: %d mode=%d", my_css_save_initialized, my_css_save.mode);
+ }
+
+ mipi_init();
+
+#ifndef ISP2401
+ /* In case this has been programmed already, update internal
+ data structure ... DEPRECATED */
+ my_css.page_table_base_index = mmu_get_page_table_base_index(MMU0_ID);
+
+#endif
+ my_css.irq_type = irq_type;
+
+ my_css_save.irq_type = irq_type;
+
+ enable_interrupts(my_css.irq_type);
+
+ /* configure GPIO to output mode */
+ gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_select, select);
+ gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_e, enable);
+ gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_0, 0);
+
+ err = ia_css_refcount_init(REFCOUNT_SIZE);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ err = sh_css_params_init();
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ if (fw)
+ {
+ ia_css_unload_firmware(); /* in case we already had firmware loaded */
+ err = sh_css_load_firmware(dev, fw->data, fw->bytes);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ err = ia_css_binary_init_infos();
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ fw_explicitly_loaded = false;
+#ifndef ISP2401
+ my_css_save.loaded_fw = (struct ia_css_fw *)fw;
+#endif
+ }
+ if (!sh_css_setup_spctrl_config(&sh_css_sp_fw, SP_PROG_NAME, &spctrl_cfg))
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ err = ia_css_spctrl_load_fw(SP0_ID, &spctrl_cfg);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+#if WITH_PC_MONITORING
+ if (!thread_alive)
+ {
+ thread_alive++;
+ sh_css_print("PC_MONITORING: %s() -- create thread DISABLED\n",
+ __func__);
+ spying_thread_create();
+ }
+#endif
+ if (!sh_css_hrt_system_is_idle())
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_SYSTEM_NOT_IDLE);
+ return IA_CSS_ERR_SYSTEM_NOT_IDLE;
+ }
+ /* can be called here, queuing works, but:
+ - when sp is started later, it will wipe queued items
+ so for now we leave it for later and make sure
+ updates are not called to frequently.
+ sh_css_init_buffer_queues();
+ */
+
+#if defined(HAS_INPUT_SYSTEM_VERSION_2) && defined(HAS_INPUT_SYSTEM_VERSION_2401)
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 0);
+#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+ gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 1);
+#endif
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
+ ISP_DMA_MAX_BURST_LENGTH);
+
+ if (ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR)
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+#endif
+
+ sh_css_params_map_and_store_default_gdc_lut();
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_enable_isys_event_queue(bool enable) {
+ if (sh_css_sp_is_running())
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ sh_css_sp_enable_isys_event_queue(enable);
+ return IA_CSS_SUCCESS;
+}
+
+void *sh_css_malloc(size_t size)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_malloc() enter: size=%zu\n",
+ size);
+ /* FIXME: This first test can probably go away */
+ if (size == 0)
+ return NULL;
+ if (size > PAGE_SIZE)
+ return vmalloc(size);
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void *sh_css_calloc(size_t N, size_t size)
+{
+ void *p;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_calloc() enter: N=%zu, size=%zu\n", N, size);
+
+ /* FIXME: this test can probably go away */
+ if (size > 0) {
+ p = sh_css_malloc(N * size);
+ if (p)
+ memset(p, 0, size);
+ return p;
+ }
+ return NULL;
+}
+
+void sh_css_free(void *ptr)
+{
+ if (is_vmalloc_addr(ptr))
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+/* For Acceleration API: Flush FW (shared buffer pointer) arguments */
+void
+sh_css_flush(struct ia_css_acc_fw *fw)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_flush() enter:\n");
+ if ((fw) && (my_css.flush))
+ my_css.flush(fw);
+}
+
+/* Mapping sp threads. Currently, this is done when a stream is created and
+ * pipelines are ready to be converted to sp pipelines. Be careful if you are
+ * doing it from stream_create since we could run out of sp threads due to
+ * allocation on inactive pipelines. */
+static enum ia_css_err
+map_sp_threads(struct ia_css_stream *stream, bool map) {
+ struct ia_css_pipe *main_pipe = NULL;
+ struct ia_css_pipe *copy_pipe = NULL;
+ struct ia_css_pipe *capture_pipe = NULL;
+ struct ia_css_pipe *acc_pipe = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_pipe_id pipe_id;
+
+ assert(stream);
+ IA_CSS_ENTER_PRIVATE("stream = %p, map = %s",
+ stream, map ? "true" : "false");
+
+ if (!stream)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ main_pipe = stream->last_pipe;
+ pipe_id = main_pipe->mode;
+
+ ia_css_pipeline_map(main_pipe->pipe_num, map);
+
+ switch (pipe_id)
+ {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
+ acc_pipe = main_pipe->pipe_settings.preview.acc_pipe;
+ break;
+
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
+ break;
+
+ case IA_CSS_PIPE_ID_CAPTURE:
+ case IA_CSS_PIPE_ID_ACC:
+ default:
+ break;
+ }
+
+ if (acc_pipe)
+ {
+ ia_css_pipeline_map(acc_pipe->pipe_num, map);
+ }
+
+ if (capture_pipe)
+ {
+ ia_css_pipeline_map(capture_pipe->pipe_num, map);
+ }
+
+ /* Firmware expects copy pipe to be the last pipe mapped. (if needed) */
+ if (copy_pipe)
+ {
+ ia_css_pipeline_map(copy_pipe->pipe_num, map);
+ }
+ /* DH regular multi pipe - not continuous mode: map the next pipes too */
+ if (!stream->config.continuous)
+ {
+ int i;
+
+ for (i = 1; i < stream->num_pipes; i++)
+ ia_css_pipeline_map(stream->pipes[i]->pipe_num, map);
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* creates a host pipeline skeleton for all pipes in a stream. Called during
+ * stream_create. */
+static enum ia_css_err
+create_host_pipeline_structure(struct ia_css_stream *stream) {
+ struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
+ struct ia_css_pipe *acc_pipe = NULL;
+ enum ia_css_pipe_id pipe_id;
+ struct ia_css_pipe *main_pipe = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int copy_pipe_delay = 0,
+ capture_pipe_delay = 0;
+
+ assert(stream);
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+
+ if (!stream)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ main_pipe = stream->last_pipe;
+ assert(main_pipe);
+ if (!main_pipe)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe_id = main_pipe->mode;
+
+ switch (pipe_id)
+ {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ copy_pipe_delay = main_pipe->dvs_frame_delay;
+ capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
+ capture_pipe_delay = IA_CSS_FRAME_DELAY_0;
+ acc_pipe = main_pipe->pipe_settings.preview.acc_pipe;
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
+ main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+ copy_pipe_delay = main_pipe->dvs_frame_delay;
+ capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
+ capture_pipe_delay = IA_CSS_FRAME_DELAY_0;
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
+ main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ case IA_CSS_PIPE_ID_CAPTURE:
+ capture_pipe = main_pipe;
+ capture_pipe_delay = main_pipe->dvs_frame_delay;
+ break;
+
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
+ main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ case IA_CSS_PIPE_ID_ACC:
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
+ main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((err == IA_CSS_SUCCESS) && copy_pipe)
+ {
+ err = ia_css_pipeline_create(&copy_pipe->pipeline,
+ copy_pipe->mode,
+ copy_pipe->pipe_num,
+ copy_pipe_delay);
+ }
+
+ if ((err == IA_CSS_SUCCESS) && capture_pipe)
+ {
+ err = ia_css_pipeline_create(&capture_pipe->pipeline,
+ capture_pipe->mode,
+ capture_pipe->pipe_num,
+ capture_pipe_delay);
+ }
+
+ if ((err == IA_CSS_SUCCESS) && acc_pipe)
+ {
+ err = ia_css_pipeline_create(&acc_pipe->pipeline, acc_pipe->mode,
+ acc_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ }
+
+ /* DH regular multi pipe - not continuous mode: create the next pipelines too */
+ if (!stream->config.continuous)
+ {
+ int i;
+
+ for (i = 1; i < stream->num_pipes && IA_CSS_SUCCESS == err; i++) {
+ main_pipe = stream->pipes[i];
+ err = ia_css_pipeline_create(&main_pipe->pipeline,
+ main_pipe->mode,
+ main_pipe->pipe_num,
+ main_pipe->dvs_frame_delay);
+ }
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* creates a host pipeline for all pipes in a stream. Called during
+ * stream_start. */
+static enum ia_css_err
+create_host_pipeline(struct ia_css_stream *stream) {
+ struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
+ struct ia_css_pipe *acc_pipe = NULL;
+ enum ia_css_pipe_id pipe_id;
+ struct ia_css_pipe *main_pipe = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int max_input_width = 0;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+ if (!stream)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ main_pipe = stream->last_pipe;
+ pipe_id = main_pipe->mode;
+
+ /* No continuous frame allocation for capture pipe. It uses the
+ * "main" pipe's frames. */
+ if ((pipe_id == IA_CSS_PIPE_ID_PREVIEW) ||
+ (pipe_id == IA_CSS_PIPE_ID_VIDEO))
+ {
+ /* About pipe_id == IA_CSS_PIPE_ID_PREVIEW && stream->config.mode != IA_CSS_INPUT_MODE_MEMORY:
+ * The original condition pipe_id == IA_CSS_PIPE_ID_PREVIEW is too strong. E.g. in SkyCam (with memory
+ * based input frames) there is no continuous mode and thus no need for allocated continuous frames
+ * This is not only for SkyCam but for all preview cases that use DDR based input frames. For this
+ * reason the stream->config.mode != IA_CSS_INPUT_MODE_MEMORY has beed added.
+ */
+ if (stream->config.continuous ||
+ (pipe_id == IA_CSS_PIPE_ID_PREVIEW &&
+ stream->config.mode != IA_CSS_INPUT_MODE_MEMORY)) {
+ err = alloc_continuous_frames(main_pipe, true);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* old isys: need to allocate_mipi_frames() even in IA_CSS_PIPE_MODE_COPY */
+ if (pipe_id != IA_CSS_PIPE_ID_ACC)
+ {
+ err = allocate_mipi_frames(main_pipe, &stream->info);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if ((pipe_id != IA_CSS_PIPE_ID_ACC) &&
+ (main_pipe->config.mode != IA_CSS_PIPE_MODE_COPY))
+ {
+ err = allocate_mipi_frames(main_pipe, &stream->info);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+#endif
+
+ switch (pipe_id)
+ {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
+ acc_pipe = main_pipe->pipe_settings.preview.acc_pipe;
+ max_input_width =
+ main_pipe->pipe_settings.preview.preview_binary.info->sp.input.max_width;
+
+ err = create_host_preview_pipeline(main_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ break;
+
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
+ max_input_width =
+ main_pipe->pipe_settings.video.video_binary.info->sp.input.max_width;
+
+ err = create_host_video_pipeline(main_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ break;
+
+ case IA_CSS_PIPE_ID_CAPTURE:
+ capture_pipe = main_pipe;
+
+ break;
+
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = create_host_yuvpp_pipeline(main_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ break;
+
+ case IA_CSS_PIPE_ID_ACC:
+ err = create_host_acc_pipeline(main_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ if (copy_pipe)
+ {
+ err = create_host_copy_pipeline(copy_pipe, max_input_width,
+ main_pipe->continuous_frames[0]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ if (capture_pipe)
+ {
+ err = create_host_capture_pipeline(capture_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ if (acc_pipe)
+ {
+ err = create_host_acc_pipeline(acc_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ /* DH regular multi pipe - not continuous mode: create the next pipelines too */
+ if (!stream->config.continuous)
+ {
+ int i;
+
+ for (i = 1; i < stream->num_pipes && IA_CSS_SUCCESS == err; i++) {
+ switch (stream->pipes[i]->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = create_host_preview_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = create_host_video_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = create_host_capture_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = create_host_yuvpp_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ err = create_host_acc_pipeline(stream->pipes[i]);
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ }
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+init_pipe_defaults(enum ia_css_pipe_mode mode,
+ struct ia_css_pipe *pipe,
+ bool copy_pipe) {
+ if (!pipe)
+ {
+ IA_CSS_ERROR("NULL pipe parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* Initialize pipe to pre-defined defaults */
+ *pipe = IA_CSS_DEFAULT_PIPE;
+
+ /* TODO: JB should not be needed, but temporary backward reference */
+ switch (mode)
+ {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ pipe->mode = IA_CSS_PIPE_ID_PREVIEW;
+ pipe->pipe_settings.preview = IA_CSS_DEFAULT_PREVIEW_SETTINGS;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (copy_pipe) {
+ pipe->mode = IA_CSS_PIPE_ID_COPY;
+ } else {
+ pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
+ }
+ pipe->pipe_settings.capture = IA_CSS_DEFAULT_CAPTURE_SETTINGS;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ pipe->mode = IA_CSS_PIPE_ID_VIDEO;
+ pipe->pipe_settings.video = IA_CSS_DEFAULT_VIDEO_SETTINGS;
+ break;
+ case IA_CSS_PIPE_MODE_ACC:
+ pipe->mode = IA_CSS_PIPE_ID_ACC;
+ break;
+ case IA_CSS_PIPE_MODE_COPY:
+ pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
+ break;
+ case IA_CSS_PIPE_MODE_YUVPP:
+ pipe->mode = IA_CSS_PIPE_ID_YUVPP;
+ pipe->pipe_settings.yuvpp = IA_CSS_DEFAULT_YUVPP_SETTINGS;
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+static void
+pipe_global_init(void)
+{
+ u8 i;
+
+ my_css.pipe_counter = 0;
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ my_css.all_pipes[i] = NULL;
+ }
+}
+
+static enum ia_css_err
+pipe_generate_pipe_num(const struct ia_css_pipe *pipe,
+ unsigned int *pipe_number) {
+ const u8 INVALID_PIPE_NUM = (uint8_t)~(0);
+ u8 pipe_num = INVALID_PIPE_NUM;
+ u8 i;
+
+ if (!pipe)
+ {
+ IA_CSS_ERROR("NULL pipe parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* Assign a new pipe_num .... search for empty place */
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++)
+ {
+ if (!my_css.all_pipes[i]) {
+ /*position is reserved */
+ my_css.all_pipes[i] = (struct ia_css_pipe *)pipe;
+ pipe_num = i;
+ break;
+ }
+ }
+ if (pipe_num == INVALID_PIPE_NUM)
+ {
+ /* Max number of pipes already allocated */
+ IA_CSS_ERROR("Max number of pipes already created");
+ return IA_CSS_ERR_RESOURCE_EXHAUSTED;
+ }
+
+ my_css.pipe_counter++;
+
+ IA_CSS_LOG("pipe_num (%d)", pipe_num);
+
+ *pipe_number = pipe_num;
+ return IA_CSS_SUCCESS;
+}
+
+static void
+pipe_release_pipe_num(unsigned int pipe_num)
+{
+ my_css.all_pipes[pipe_num] = NULL;
+ my_css.pipe_counter--;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "pipe_release_pipe_num (%d)\n", pipe_num);
+}
+
+static enum ia_css_err
+create_pipe(enum ia_css_pipe_mode mode,
+ struct ia_css_pipe **pipe,
+ bool copy_pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipe *me;
+
+ if (!pipe)
+ {
+ IA_CSS_ERROR("NULL pipe parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ me = kmalloc(sizeof(*me), GFP_KERNEL);
+ if (!me)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ err = init_pipe_defaults(mode, me, copy_pipe);
+ if (err != IA_CSS_SUCCESS)
+ {
+ kfree(me);
+ return err;
+ }
+
+ err = pipe_generate_pipe_num(me, &me->pipe_num);
+ if (err != IA_CSS_SUCCESS)
+ {
+ kfree(me);
+ return err;
+ }
+
+ *pipe = me;
+ return IA_CSS_SUCCESS;
+}
+
+struct ia_css_pipe *
+find_pipe_by_num(uint32_t pipe_num)
+{
+ unsigned int i;
+
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ if (my_css.all_pipes[i] &&
+ ia_css_pipe_get_pipe_num(my_css.all_pipes[i]) == pipe_num) {
+ return my_css.all_pipes[i];
+ }
+ }
+ return NULL;
+}
+
+static void sh_css_pipe_free_acc_binaries(
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+
+ assert(pipe);
+ if (!pipe) {
+ IA_CSS_ERROR("NULL input pointer");
+ return;
+ }
+ pipeline = &pipe->pipeline;
+
+ /* loop through the stages and unload them */
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ struct ia_css_fw_info *firmware = (struct ia_css_fw_info *)
+ stage->firmware;
+ if (firmware)
+ ia_css_pipe_unload_extension(pipe, firmware);
+ }
+}
+
+enum ia_css_err
+ia_css_pipe_destroy(struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("pipe = %p", pipe);
+
+ if (!pipe)
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (pipe->stream)
+ {
+ IA_CSS_LOG("ia_css_stream_destroy not called!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ switch (pipe->config.mode)
+ {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ /* need to take into account that this function is also called
+ on the internal copy pipe */
+ if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) {
+ ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->continuous_frames);
+ ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->cont_md_buffers);
+ if (pipe->pipe_settings.preview.copy_pipe) {
+ err = ia_css_pipe_destroy(pipe->pipe_settings.preview.copy_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_destroy(): destroyed internal copy pipe err=%d\n",
+ err);
+ }
+ }
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) {
+ ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->continuous_frames);
+ ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->cont_md_buffers);
+ if (pipe->pipe_settings.video.copy_pipe) {
+ err = ia_css_pipe_destroy(pipe->pipe_settings.video.copy_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_destroy(): destroyed internal copy pipe err=%d\n",
+ err);
+ }
+ }
+#ifndef ISP2401
+ ia_css_frame_free_multiple(NUM_TNR_FRAMES,
+ pipe->pipe_settings.video.tnr_frames);
+#else
+ ia_css_frame_free_multiple(NUM_TNR_FRAMES,
+ pipe->pipe_settings.video.tnr_frames);
+#endif
+ ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES,
+ pipe->pipe_settings.video.delay_frames);
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES,
+ pipe->pipe_settings.capture.delay_frames);
+ break;
+ case IA_CSS_PIPE_MODE_ACC:
+ sh_css_pipe_free_acc_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_MODE_COPY:
+ break;
+ case IA_CSS_PIPE_MODE_YUVPP:
+ break;
+ }
+
+ sh_css_params_free_gdc_lut(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+
+ my_css.active_pipes[ia_css_pipe_get_pipe_num(pipe)] = NULL;
+ sh_css_pipe_free_shading_table(pipe);
+
+ ia_css_pipeline_destroy(&pipe->pipeline);
+ pipe_release_pipe_num(ia_css_pipe_get_pipe_num(pipe));
+
+ /* Temporarily, not every sh_css_pipe has an acc_extension. */
+ if (pipe->config.acc_extension)
+ {
+ ia_css_pipe_unload_extension(pipe, pipe->config.acc_extension);
+ }
+ kfree(pipe);
+ IA_CSS_LEAVE("err = %d", err);
+ return err;
+}
+
+void
+ia_css_uninit(void)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() enter: void\n");
+#if WITH_PC_MONITORING
+ sh_css_print("PC_MONITORING: %s() -- started\n", __func__);
+ print_pc_histogram();
+#endif
+
+ sh_css_params_free_default_gdc_lut();
+
+ /* TODO: JB: implement decent check and handling of freeing mipi frames */
+ //assert(ref_count_mipi_allocation == 0); //mipi frames are not freed
+ /* cleanup generic data */
+ sh_css_params_uninit();
+ ia_css_refcount_uninit();
+
+ ia_css_rmgr_uninit();
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ /* needed for reprogramming the inputformatter after power cycle of css */
+ ifmtr_set_if_blocking_mode_reset = true;
+#endif
+
+ if (!fw_explicitly_loaded) {
+ ia_css_unload_firmware();
+ }
+ ia_css_spctrl_unload_fw(SP0_ID);
+ sh_css_sp_set_sp_running(false);
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* check and free any remaining mipi frames */
+ free_mipi_frames(NULL);
+#endif
+
+ sh_css_sp_reset_global_vars();
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ ia_css_isys_uninit();
+#endif
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() leave: return_void\n");
+}
+
+#if defined(HAS_IRQ_MAP_VERSION_2)
+enum ia_css_err ia_css_irq_translate(
+ unsigned int *irq_infos)
+{
+ virq_id_t irq;
+ enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_more_irqs;
+ unsigned int infos = 0;
+
+ /* irq_infos can be NULL, but that would make the function useless */
+ /* assert(irq_infos != NULL); */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_irq_translate() enter: irq_infos=%p\n", irq_infos);
+
+ while (status == hrt_isp_css_irq_status_more_irqs) {
+ status = virq_get_channel_id(&irq);
+ if (status == hrt_isp_css_irq_status_error)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+#if WITH_PC_MONITORING
+ sh_css_print("PC_MONITORING: %s() irq = %d, sh_binary_running set to 0\n",
+ __func__, irq);
+ sh_binary_running = 0;
+#endif
+
+ switch (irq) {
+ case virq_sp:
+ /* When SP goes to idle, info is available in the
+ * event queue. */
+ infos |= IA_CSS_IRQ_INFO_EVENTS_READY;
+ break;
+ case virq_isp:
+ break;
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ case virq_isys_sof:
+ infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF;
+ break;
+ case virq_isys_eof:
+ infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF;
+ break;
+ case virq_isys_csi:
+ infos |= IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR;
+ break;
+#endif
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ case virq_ifmt0_id:
+ infos |= IA_CSS_IRQ_INFO_IF_ERROR;
+ break;
+#endif
+ case virq_dma:
+ infos |= IA_CSS_IRQ_INFO_DMA_ERROR;
+ break;
+ case virq_sw_pin_0:
+ infos |= sh_css_get_sw_interrupt_value(0);
+ break;
+ case virq_sw_pin_1:
+ infos |= sh_css_get_sw_interrupt_value(1);
+ /* pqiao TODO: also assumption here */
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (irq_infos)
+ *irq_infos = infos;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_irq_translate() leave: irq_infos=%u\n",
+ infos);
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_irq_enable(
+ enum ia_css_irq_info info,
+ bool enable)
+{
+ virq_id_t irq = N_virq_id;
+
+ IA_CSS_ENTER("info=%d, enable=%d", info, enable);
+
+ switch (info) {
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ case IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF:
+ irq = virq_isys_sof;
+ break;
+ case IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF:
+ irq = virq_isys_eof;
+ break;
+ case IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR:
+ irq = virq_isys_csi;
+ break;
+ case IA_CSS_IRQ_INFO_IF_ERROR:
+ irq = virq_ifmt0_id;
+ break;
+#else
+ case IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF:
+ case IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF:
+ case IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR:
+ case IA_CSS_IRQ_INFO_IF_ERROR:
+ /* Just ignore those unused IRQs without printing errors */
+ return IA_CSS_SUCCESS;
+#endif
+ case IA_CSS_IRQ_INFO_DMA_ERROR:
+ irq = virq_dma;
+ break;
+ case IA_CSS_IRQ_INFO_SW_0:
+ irq = virq_sw_pin_0;
+ break;
+ case IA_CSS_IRQ_INFO_SW_1:
+ irq = virq_sw_pin_1;
+ break;
+ default:
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ cnd_virq_enable_channel(irq, enable);
+
+ IA_CSS_LEAVE_ERR(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+#else
+#error "sh_css.c: IRQ MAP must be one of { IRQ_MAP_VERSION_2 }"
+#endif
+
+static unsigned int
+sh_css_get_sw_interrupt_value(unsigned int irq)
+{
+ unsigned int irq_value;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_get_sw_interrupt_value() enter: irq=%d\n", irq);
+ irq_value = sh_css_sp_get_sw_interrupt_value(irq);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_get_sw_interrupt_value() leave: irq_value=%d\n", irq_value);
+ return irq_value;
+}
+
+/* configure and load the copy binary, the next binary is used to
+ determine whether the copy binary needs to do left padding. */
+static enum ia_css_err load_copy_binary(
+ struct ia_css_pipe *pipe,
+ struct ia_css_binary *copy_binary,
+ struct ia_css_binary *next_binary)
+{
+ struct ia_css_frame_info copy_out_info, copy_in_info, copy_vf_info;
+ unsigned int left_padding;
+ enum ia_css_err err;
+ struct ia_css_binary_descr copy_descr;
+
+ /* next_binary can be NULL */
+ assert(pipe);
+ assert(copy_binary);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "load_copy_binary() enter:\n");
+
+ if (next_binary) {
+ copy_out_info = next_binary->in_frame_info;
+ left_padding = next_binary->left_padding;
+ } else {
+ copy_out_info = pipe->output_info[0];
+ copy_vf_info = pipe->vf_output_info[0];
+ ia_css_frame_info_set_format(&copy_vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE);
+ left_padding = 0;
+ }
+
+ ia_css_pipe_get_copy_binarydesc(pipe, &copy_descr,
+ &copy_in_info, &copy_out_info,
+ (next_binary) ? NULL : NULL/*TODO: &copy_vf_info*/);
+ err = ia_css_binary_find(&copy_descr, copy_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ copy_binary->left_padding = left_padding;
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+alloc_continuous_frames(
+ struct ia_css_pipe *pipe, bool init_time) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame_info ref_info;
+ enum ia_css_pipe_id pipe_id;
+ bool continuous;
+ unsigned int i, idx;
+ unsigned int num_frames;
+ struct ia_css_pipe *capture_pipe = NULL;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, init_time = %d", pipe, init_time);
+
+ if ((!pipe) || (!pipe->stream))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe_id = pipe->mode;
+ continuous = pipe->stream->config.continuous;
+
+ if (continuous)
+ {
+ if (init_time) {
+ num_frames = pipe->stream->config.init_num_cont_raw_buf;
+ pipe->stream->continuous_pipe = pipe;
+ } else
+ num_frames = pipe->stream->config.target_num_cont_raw_buf;
+ } else
+ {
+ num_frames = NUM_ONLINE_INIT_CONTINUOUS_FRAMES;
+ }
+
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ {
+ ref_info = pipe->pipe_settings.preview.preview_binary.in_frame_info;
+ } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ {
+ ref_info = pipe->pipe_settings.video.video_binary.in_frame_info;
+ } else
+ {
+ /* should not happen */
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* For CSI2+, the continuous frame will hold the full input frame */
+ ref_info.res.width = pipe->stream->config.input_config.input_res.width;
+ ref_info.res.height = pipe->stream->config.input_config.input_res.height;
+
+ /* Ensure padded width is aligned for 2401 */
+ ref_info.padded_width = CEIL_MUL(ref_info.res.width, 2 * ISP_VEC_NELEMS);
+#endif
+
+#if !defined(HAS_NO_PACKED_RAW_PIXELS)
+ if (pipe->stream->config.pack_raw_pixels)
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW_PACKED\n");
+ ref_info.format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
+ } else
+#endif
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW\n");
+ ref_info.format = IA_CSS_FRAME_FORMAT_RAW;
+ }
+
+ /* Write format back to binary */
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ {
+ pipe->pipe_settings.preview.preview_binary.in_frame_info.format =
+ ref_info.format;
+ capture_pipe = pipe->pipe_settings.preview.capture_pipe;
+ } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ {
+ pipe->pipe_settings.video.video_binary.in_frame_info.format = ref_info.format;
+ capture_pipe = pipe->pipe_settings.video.capture_pipe;
+ } else
+ {
+ /* should not happen */
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ if (init_time)
+ idx = 0;
+ else
+ idx = pipe->stream->config.init_num_cont_raw_buf;
+
+ for (i = idx; i < NUM_CONTINUOUS_FRAMES; i++)
+ {
+ /* free previous frame */
+ if (pipe->continuous_frames[i]) {
+ ia_css_frame_free(pipe->continuous_frames[i]);
+ pipe->continuous_frames[i] = NULL;
+ }
+ /* free previous metadata buffer */
+ ia_css_metadata_free(pipe->cont_md_buffers[i]);
+ pipe->cont_md_buffers[i] = NULL;
+
+ /* check if new frame needed */
+ if (i < num_frames) {
+ /* allocate new frame */
+ err = ia_css_frame_allocate_from_info(
+ &pipe->continuous_frames[i],
+ &ref_info);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* allocate metadata buffer */
+ pipe->cont_md_buffers[i] = ia_css_metadata_allocate(
+ &pipe->stream->info.metadata_info);
+ }
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream) {
+ if (!stream)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return alloc_continuous_frames(stream->continuous_pipe, false);
+}
+
+static enum ia_css_err
+load_preview_binaries(struct ia_css_pipe *pipe) {
+ struct ia_css_frame_info prev_in_info,
+ prev_bds_out_info,
+ prev_out_info,
+ prev_vf_info;
+ struct ia_css_binary_descr preview_descr;
+ bool online;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool continuous, need_vf_pp = false;
+ bool need_isp_copy_binary = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+#endif
+ /* preview only have 1 output pin now */
+ struct ia_css_frame_info *pipe_out_info = &pipe->output_info[0];
+ struct ia_css_preview_settings *mycs = &pipe->pipe_settings.preview;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->stream);
+ assert(pipe->mode == IA_CSS_PIPE_ID_PREVIEW);
+
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+#endif
+
+ if (mycs->preview_binary.info)
+ return IA_CSS_SUCCESS;
+
+ err = ia_css_util_check_input(&pipe->stream->config, false, false);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ /* Note: the current selection of vf_pp binary and
+ * parameterization of the preview binary contains a few pieces
+ * of hardcoded knowledge. This needs to be cleaned up such that
+ * the binary selection becomes more generic.
+ * The vf_pp binary is needed if one or more of the following features
+ * are required:
+ * 1. YUV downscaling.
+ * 2. Digital zoom.
+ * 3. An output format that is not supported by the preview binary.
+ * In practice this means something other than yuv_line or nv12.
+ * The decision if the vf_pp binary is needed for YUV downscaling is
+ * made after the preview binary selection, since some preview binaries
+ * can perform the requested YUV downscaling.
+ * */
+ need_vf_pp = pipe->config.enable_dz;
+ need_vf_pp |= pipe_out_info->format != IA_CSS_FRAME_FORMAT_YUV_LINE &&
+ !(pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12 ||
+ pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_16 ||
+ pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_TILEY);
+
+ /* Preview step 1 */
+ if (pipe->vf_yuv_ds_input_info.res.width)
+ prev_vf_info = pipe->vf_yuv_ds_input_info;
+ else
+ prev_vf_info = *pipe_out_info;
+ /* If vf_pp is needed, then preview must output yuv_line.
+ * The exception is when vf_pp is manually disabled, that is only
+ * used in combination with a pipeline extension that requires
+ * yuv_line as input.
+ * */
+ if (need_vf_pp)
+ ia_css_frame_info_set_format(&prev_vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ err = ia_css_pipe_get_preview_binarydesc(
+ pipe,
+ &preview_descr,
+ &prev_in_info,
+ &prev_bds_out_info,
+ &prev_out_info,
+ &prev_vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_binary_find(&preview_descr, &mycs->preview_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ if (atomisp_hw_is_isp2401) {
+ /* The delay latency determines the number of invalid frames after
+ * a stream is started. */
+ pipe->num_invalid_frames = pipe->dvs_frame_delay;
+ pipe->info.num_invalid_frames = pipe->num_invalid_frames;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "load_preview_binaries() num_invalid_frames=%d dvs_frame_delay=%d\n",
+ pipe->num_invalid_frames, pipe->dvs_frame_delay);
+ }
+
+ /* The vf_pp binary is needed when (further) YUV downscaling is required */
+ need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.width != pipe_out_info->res.width;
+ need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.height != pipe_out_info->res.height;
+
+ /* When vf_pp is needed, then the output format of the selected
+ * preview binary must be yuv_line. If this is not the case,
+ * then the preview binary selection is done again.
+ */
+ if (need_vf_pp &&
+ (mycs->preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE))
+ {
+ /* Preview step 2 */
+ if (pipe->vf_yuv_ds_input_info.res.width)
+ prev_vf_info = pipe->vf_yuv_ds_input_info;
+ else
+ prev_vf_info = *pipe_out_info;
+
+ ia_css_frame_info_set_format(&prev_vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ err = ia_css_pipe_get_preview_binarydesc(
+ pipe,
+ &preview_descr,
+ &prev_in_info,
+ &prev_bds_out_info,
+ &prev_out_info,
+ &prev_vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_binary_find(&preview_descr,
+ &mycs->preview_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ if (need_vf_pp)
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ /* Viewfinder post-processing */
+ ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
+ &mycs->preview_binary.out_frame_info[0],
+ pipe_out_info);
+ err = ia_css_binary_find(&vf_pp_descr,
+ &mycs->vf_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, only the Direct Sensor Mode
+ * Offline Preview uses the ISP copy binary.
+ */
+ need_isp_copy_binary = !online && sensor;
+#else
+ /* About pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY:
+ * This is typical the case with SkyCam (which has no input system) but it also applies to all cases
+ * where the driver chooses for memory based input frames. In these cases, a copy binary (which typical
+ * copies sensor data to DDR) does not have much use.
+ */
+ if (!atomisp_hw_is_isp2401)
+ need_isp_copy_binary = !online && !continuous;
+ else
+ need_isp_copy_binary = !online && !continuous && !(pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY);
+#endif
+
+ /* Copy */
+ if (need_isp_copy_binary)
+ {
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ &mycs->preview_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ if (pipe->shading_table)
+ {
+ ia_css_shading_table_free(pipe->shading_table);
+ pipe->shading_table = NULL;
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+static void
+ia_css_binary_unload(struct ia_css_binary *binary)
+{
+ ia_css_binary_destroy_isp_parameters(binary);
+}
+
+static enum ia_css_err
+unload_preview_binaries(struct ia_css_pipe *pipe) {
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.preview.copy_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.preview.preview_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.preview.vf_pp_binary);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static const struct ia_css_fw_info *last_output_firmware(
+ const struct ia_css_fw_info *fw)
+{
+ const struct ia_css_fw_info *last_fw = NULL;
+ /* fw can be NULL */
+ IA_CSS_ENTER_LEAVE_PRIVATE("");
+
+ for (; fw; fw = fw->next) {
+ const struct ia_css_fw_info *info = fw;
+
+ if (info->info.isp.sp.enable.output)
+ last_fw = fw;
+ }
+ return last_fw;
+}
+
+static enum ia_css_err add_firmwares(
+ struct ia_css_pipeline *me,
+ struct ia_css_binary *binary,
+ const struct ia_css_fw_info *fw,
+ const struct ia_css_fw_info *last_fw,
+ unsigned int binary_mode,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_frame *vf_frame,
+ struct ia_css_pipeline_stage **my_stage,
+ struct ia_css_pipeline_stage **vf_stage)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage *extra_stage = NULL;
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* all args can be NULL ??? */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_firmwares() enter:\n");
+
+ for (; fw; fw = fw->next) {
+ struct ia_css_frame *out[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+ struct ia_css_frame *in = NULL;
+ struct ia_css_frame *vf = NULL;
+
+ if ((fw == last_fw) && (fw->info.isp.sp.enable.out_frame != 0)) {
+ out[0] = out_frame;
+ }
+ if (fw->info.isp.sp.enable.in_frame != 0) {
+ in = in_frame;
+ }
+ if (fw->info.isp.sp.enable.out_frame != 0) {
+ vf = vf_frame;
+ }
+ ia_css_pipe_get_firmwares_stage_desc(&stage_desc, binary,
+ out, in, vf, fw, binary_mode);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &extra_stage);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ if (fw->info.isp.sp.enable.output != 0)
+ in_frame = extra_stage->args.out_frame[0];
+ if (my_stage && !*my_stage && extra_stage)
+ *my_stage = extra_stage;
+ if (vf_stage && !*vf_stage && extra_stage &&
+ fw->info.isp.sp.enable.vf_veceven)
+ *vf_stage = extra_stage;
+ }
+ return err;
+}
+
+static enum ia_css_err add_vf_pp_stage(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_binary *vf_pp_binary,
+ struct ia_css_pipeline_stage **vf_pp_stage)
+{
+ struct ia_css_pipeline *me = NULL;
+ const struct ia_css_fw_info *last_fw = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* out_frame can be NULL ??? */
+
+ if (!pipe)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (!in_frame)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (!vf_pp_binary)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (!vf_pp_stage)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_pipe_util_create_output_frames(out_frames);
+ me = &pipe->pipeline;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_vf_pp_stage() enter:\n");
+
+ *vf_pp_stage = NULL;
+
+ last_fw = last_output_firmware(pipe->vf_stage);
+ if (!pipe->extra_config.disable_vf_pp) {
+ if (last_fw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary,
+ out_frames, in_frame, NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary,
+ out_frames, in_frame, NULL);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc, vf_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ in_frame = (*vf_pp_stage)->args.out_frame[0];
+ }
+ err = add_firmwares(me, vf_pp_binary, pipe->vf_stage, last_fw,
+ IA_CSS_BINARY_MODE_VF_PP,
+ in_frame, out_frame, NULL,
+ vf_pp_stage, NULL);
+ return err;
+}
+
+static enum ia_css_err add_yuv_scaler_stage(
+ struct ia_css_pipe *pipe,
+ struct ia_css_pipeline *me,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_frame *internal_out_frame,
+ struct ia_css_binary *yuv_scaler_binary,
+ struct ia_css_pipeline_stage **pre_vf_pp_stage)
+{
+ const struct ia_css_fw_info *last_fw;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *vf_frame = NULL;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* out_frame can be NULL ??? */
+ assert(in_frame);
+ assert(pipe);
+ assert(me);
+ assert(yuv_scaler_binary);
+ assert(pre_vf_pp_stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_yuv_scaler_stage() enter:\n");
+
+ *pre_vf_pp_stage = NULL;
+ ia_css_pipe_util_create_output_frames(out_frames);
+
+ last_fw = last_output_firmware(pipe->output_stage);
+
+ if (last_fw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ yuv_scaler_binary, out_frames, in_frame, vf_frame);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_util_set_output_frames(out_frames, 1, internal_out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ yuv_scaler_binary, out_frames, in_frame, vf_frame);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ pre_vf_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ in_frame = (*pre_vf_pp_stage)->args.out_frame[0];
+
+ err = add_firmwares(me, yuv_scaler_binary, pipe->output_stage, last_fw,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ in_frame, out_frame, vf_frame,
+ NULL, pre_vf_pp_stage);
+ /* If a firmware produce vf_pp output, we set that as vf_pp input */
+ (*pre_vf_pp_stage)->args.vf_downscale_log2 =
+ yuv_scaler_binary->vf_downscale_log2;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_yuv_scaler_stage() leave:\n");
+ return err;
+}
+
+static enum ia_css_err add_capture_pp_stage(
+ struct ia_css_pipe *pipe,
+ struct ia_css_pipeline *me,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_binary *capture_pp_binary,
+ struct ia_css_pipeline_stage **capture_pp_stage)
+{
+ const struct ia_css_fw_info *last_fw = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *vf_frame = NULL;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* out_frame can be NULL ??? */
+ assert(in_frame);
+ assert(pipe);
+ assert(me);
+ assert(capture_pp_binary);
+ assert(capture_pp_stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_capture_pp_stage() enter:\n");
+
+ *capture_pp_stage = NULL;
+ ia_css_pipe_util_create_output_frames(out_frames);
+
+ last_fw = last_output_firmware(pipe->output_stage);
+ err = ia_css_frame_allocate_from_info(&vf_frame,
+ &capture_pp_binary->vf_frame_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ if (last_fw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ capture_pp_binary, out_frames, NULL, vf_frame);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ capture_pp_binary, out_frames, NULL, vf_frame);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ capture_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = add_firmwares(me, capture_pp_binary, pipe->output_stage, last_fw,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ in_frame, out_frame, vf_frame,
+ NULL, capture_pp_stage);
+ /* If a firmware produce vf_pp output, we set that as vf_pp input */
+ if (*capture_pp_stage) {
+ (*capture_pp_stage)->args.vf_downscale_log2 =
+ capture_pp_binary->vf_downscale_log2;
+ }
+ return err;
+}
+
+static void sh_css_setup_queues(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_host_sp_queues_initialized;
+
+ sh_css_hmm_buffer_record_init();
+
+ sh_css_event_init_irq_mask();
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_host_sp_queues_initialized =
+ fw->info.sp.host_sp_queues_initialized;
+
+ ia_css_bufq_init();
+
+ /* set "host_sp_queues_initialized" to "true" */
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_queues_initialized),
+ (uint32_t)(1));
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_setup_queues() leave:\n");
+}
+
+static enum ia_css_err
+init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *vf_frame, unsigned int idx) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+
+ assert(vf_frame);
+
+ sh_css_pipe_get_viewfinder_frame_info(pipe, &vf_frame->info, idx);
+ vf_frame->contiguous = false;
+ vf_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, thread_id, &queue_id);
+ vf_frame->dynamic_queue_id = queue_id;
+ vf_frame->buf_type = IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx;
+
+ err = ia_css_frame_init_planes(vf_frame);
+ return err;
+}
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+static unsigned int
+get_crop_lines_for_bayer_order(
+ const struct ia_css_stream_config *config)
+{
+ assert(config);
+ if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_BGGR)
+ || (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
+ return 1;
+
+ return 0;
+}
+
+static unsigned int
+get_crop_columns_for_bayer_order(
+ const struct ia_css_stream_config *config)
+{
+ assert(config);
+ if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_RGGB)
+ || (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
+ return 1;
+
+ return 0;
+}
+
+/* This function is to get the sum of all extra pixels in addition to the effective
+ * input, it includes dvs envelop and filter run-in */
+static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
+ unsigned int *extra_row, unsigned int *extra_column)
+{
+ enum ia_css_pipe_id pipe_id = pipe->mode;
+ unsigned int left_cropping = 0, top_cropping = 0;
+ unsigned int i;
+ struct ia_css_resolution dvs_env = pipe->config.dvs_envelope;
+
+ /* The dvs envelope info may not be correctly sent down via pipe config
+ * The check is made and the correct value is populated in the binary info
+ * Use this value when computing crop, else excess lines may get trimmed
+ */
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ if (pipe->pipe_settings.preview.preview_binary.info) {
+ left_cropping =
+ pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.left_cropping;
+ top_cropping =
+ pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.top_cropping;
+ }
+ dvs_env = pipe->pipe_settings.preview.preview_binary.dvs_envelope;
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ if (pipe->pipe_settings.video.video_binary.info) {
+ left_cropping =
+ pipe->pipe_settings.video.video_binary.info->sp.pipeline.left_cropping;
+ top_cropping =
+ pipe->pipe_settings.video.video_binary.info->sp.pipeline.top_cropping;
+ }
+ dvs_env = pipe->pipe_settings.video.video_binary.dvs_envelope;
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info) {
+ left_cropping +=
+ pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.left_cropping;
+ top_cropping +=
+ pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.top_cropping;
+ }
+ dvs_env.width +=
+ pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.width;
+ dvs_env.height +=
+ pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.height;
+ }
+ break;
+ default:
+ break;
+ }
+
+ *extra_row = top_cropping + dvs_env.height;
+ *extra_column = left_cropping + dvs_env.width;
+}
+
+void
+ia_css_get_crop_offsets(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *in_frame)
+{
+ unsigned int row = 0;
+ unsigned int column = 0;
+ struct ia_css_resolution *input_res;
+ struct ia_css_resolution *effective_res;
+ unsigned int extra_row = 0, extra_col = 0;
+ unsigned int min_reqd_height, min_reqd_width;
+
+ assert(pipe);
+ assert(pipe->stream);
+ assert(in_frame);
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p effective_wd = %u effective_ht = %u",
+ pipe, pipe->config.input_effective_res.width,
+ pipe->config.input_effective_res.height);
+
+ input_res = &pipe->stream->config.input_config.input_res;
+#ifndef ISP2401
+ effective_res = &pipe->stream->config.input_config.effective_res;
+#else
+ effective_res = &pipe->config.input_effective_res;
+#endif
+
+ get_pipe_extra_pixel(pipe, &extra_row, &extra_col);
+
+ in_frame->raw_bayer_order = pipe->stream->config.input_config.bayer_order;
+
+ min_reqd_height = effective_res->height + extra_row;
+ min_reqd_width = effective_res->width + extra_col;
+
+ if (input_res->height > min_reqd_height) {
+ row = (input_res->height - min_reqd_height) / 2;
+ row &= ~0x1;
+ }
+ if (input_res->width > min_reqd_width) {
+ column = (input_res->width - min_reqd_width) / 2;
+ column &= ~0x1;
+ }
+
+ /*
+ * TODO:
+ * 1. Require the special support for RAW10 packed mode.
+ * 2. Require the special support for the online use cases.
+ */
+
+ /* ISP expects GRBG bayer order, we skip one line and/or one row
+ * to correct in case the input bayer order is different.
+ */
+ column += get_crop_columns_for_bayer_order(&pipe->stream->config);
+ row += get_crop_lines_for_bayer_order(&pipe->stream->config);
+
+ in_frame->crop_info.start_column = column;
+ in_frame->crop_info.start_line = row;
+
+ IA_CSS_LEAVE_PRIVATE("void start_col: %u start_row: %u", column, row);
+
+ return;
+}
+#endif
+
+static enum ia_css_err
+init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *frame, enum ia_css_frame_format format) {
+ struct ia_css_frame *in_frame;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+
+ assert(frame);
+ in_frame = frame;
+
+ in_frame->info.format = format;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (format == IA_CSS_FRAME_FORMAT_RAW)
+ in_frame->info.format = (pipe->stream->config.pack_raw_pixels) ?
+ IA_CSS_FRAME_FORMAT_RAW_PACKED : IA_CSS_FRAME_FORMAT_RAW;
+#endif
+
+ in_frame->info.res.width = pipe->stream->config.input_config.input_res.width;
+ in_frame->info.res.height = pipe->stream->config.input_config.input_res.height;
+ in_frame->info.raw_bit_depth =
+ ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ ia_css_frame_info_set_width(&in_frame->info, pipe->stream->config.input_config.input_res.width, 0);
+ in_frame->contiguous = false;
+ in_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id);
+ in_frame->dynamic_queue_id = queue_id;
+ in_frame->buf_type = IA_CSS_BUFFER_TYPE_INPUT_FRAME;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ ia_css_get_crop_offsets(pipe, &in_frame->info);
+#endif
+ err = ia_css_frame_init_planes(in_frame);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "init_in_frameinfo_memory_defaults() bayer_order = %d:\n", in_frame->info.raw_bayer_order);
+
+ return err;
+}
+
+static enum ia_css_err
+init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *out_frame, unsigned int idx) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+
+ assert(out_frame);
+
+ sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, idx);
+ out_frame->contiguous = false;
+ out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, thread_id, &queue_id);
+ out_frame->dynamic_queue_id = queue_id;
+ out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx;
+ err = ia_css_frame_init_planes(out_frame);
+
+ return err;
+}
+
+/* Create stages for video pipe */
+static enum ia_css_err create_host_video_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline_stage_desc stage_desc;
+ struct ia_css_binary *copy_binary, *video_binary,
+ *yuv_scaler_binary, *vf_pp_binary;
+ struct ia_css_pipeline_stage *copy_stage = NULL;
+ struct ia_css_pipeline_stage *video_stage = NULL;
+ struct ia_css_pipeline_stage *yuv_scaler_stage = NULL;
+ struct ia_css_pipeline_stage *vf_pp_stage = NULL;
+ struct ia_css_pipeline *me;
+ struct ia_css_frame *in_frame = NULL;
+ struct ia_css_frame *out_frame;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool need_copy = false;
+ bool need_vf_pp = false;
+ bool need_yuv_pp = false;
+ unsigned int num_output_pins;
+ bool need_in_frameinfo_memory = false;
+
+ unsigned int i, num_yuv_scaler;
+ bool *is_output_stage = NULL;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_pipe_util_create_output_frames(out_frames);
+ out_frame = &pipe->out_frame_struct;
+
+ /* pipeline already created as part of create_host_pipeline_structure */
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+
+ me->dvs_frame_delay = pipe->dvs_frame_delay;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following: online or continuous
+ */
+ need_in_frameinfo_memory = !(pipe->stream->config.online ||
+ pipe->stream->config.continuous);
+#else
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode ==
+ IA_CSS_INPUT_MODE_MEMORY;
+#endif
+
+ /* Construct in_frame info (only in case we have dynamic input */
+ if (need_in_frameinfo_memory) {
+ in_frame = &pipe->in_frame_struct;
+ err = init_in_frameinfo_memory_defaults(pipe, in_frame,
+ IA_CSS_FRAME_FORMAT_RAW);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ out_frame->data = 0;
+ err = init_out_frameinfo_defaults(pipe, out_frame, 0);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ vf_frame = &pipe->vf_frame_struct;
+ vf_frame->data = 0;
+ err = init_vf_frameinfo_defaults(pipe, vf_frame, 0);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ copy_binary = &pipe->pipe_settings.video.copy_binary;
+ video_binary = &pipe->pipe_settings.video.video_binary;
+ vf_pp_binary = &pipe->pipe_settings.video.vf_pp_binary;
+ num_output_pins = video_binary->info->num_output_pins;
+
+ yuv_scaler_binary = pipe->pipe_settings.video.yuv_scaler_binary;
+ num_yuv_scaler = pipe->pipe_settings.video.num_yuv_scaler;
+ is_output_stage = pipe->pipe_settings.video.is_output_stage;
+
+ need_copy = (copy_binary && copy_binary->info);
+ need_vf_pp = (vf_pp_binary && vf_pp_binary->info);
+ need_yuv_pp = (yuv_scaler_binary && yuv_scaler_binary->info);
+
+ if (need_copy) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &copy_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ in_frame = me->stages->args.out_frame[0];
+ } else if (pipe->stream->config.continuous) {
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When continuous is enabled, configure in_frame with the
+ * last pipe, which is the copy pipe.
+ */
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+#else
+ in_frame = pipe->continuous_frames[0];
+#endif
+ }
+
+ ia_css_pipe_util_set_output_frames(out_frames, 0,
+ need_yuv_pp ? NULL : out_frame);
+
+ /* when the video binary supports a second output pin,
+ it can directly produce the vf_frame. */
+ if (need_vf_pp) {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
+ out_frames, in_frame, NULL);
+ } else {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
+ out_frames, in_frame, vf_frame);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &video_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ /* If we use copy iso video, the input must be yuv iso raw */
+ if (video_stage) {
+ video_stage->args.copy_vf =
+ video_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY;
+ video_stage->args.copy_output = video_stage->args.copy_vf;
+ }
+
+ /* when the video binary supports only 1 output pin, vf_pp is needed to
+ produce the vf_frame.*/
+ if (need_vf_pp && video_stage) {
+ in_frame = video_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary,
+ &vf_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ if (video_stage) {
+ int frm;
+#ifndef ISP2401
+ for (frm = 0; frm < NUM_TNR_FRAMES; frm++) {
+#else
+ for (frm = 0; frm < NUM_TNR_FRAMES; frm++) {
+#endif
+ video_stage->args.tnr_frames[frm] =
+ pipe->pipe_settings.video.tnr_frames[frm];
+ }
+ for (frm = 0; frm < MAX_NUM_VIDEO_DELAY_FRAMES; frm++) {
+ video_stage->args.delay_frames[frm] =
+ pipe->pipe_settings.video.delay_frames[frm];
+ }
+ }
+
+ /* Append Extension on Video out, if enabled */
+ if (!need_vf_pp && video_stage && pipe->config.acc_extension &&
+ (pipe->config.acc_extension->info.isp.type == IA_CSS_ACC_OUTPUT)) {
+ struct ia_css_frame *out = NULL;
+ struct ia_css_frame *in = NULL;
+
+ if ((pipe->config.acc_extension->info.isp.sp.enable.output) &&
+ (pipe->config.acc_extension->info.isp.sp.enable.in_frame) &&
+ (pipe->config.acc_extension->info.isp.sp.enable.out_frame)) {
+ /* In/Out Frame mapping to support output frame extension.*/
+ out = video_stage->args.out_frame[0];
+ err = ia_css_frame_allocate_from_info(&in, &pipe->output_info[0]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ video_stage->args.out_frame[0] = in;
+ }
+
+ err = add_firmwares(me, video_binary, pipe->output_stage,
+ last_output_firmware(pipe->output_stage),
+ IA_CSS_BINARY_MODE_VIDEO,
+ in, out, NULL, &video_stage, NULL);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ if (need_yuv_pp && video_stage) {
+ struct ia_css_frame *tmp_in_frame = video_stage->args.out_frame[0];
+ struct ia_css_frame *tmp_out_frame = NULL;
+
+ for (i = 0; i < num_yuv_scaler; i++) {
+ if (is_output_stage[i] == true) {
+ tmp_out_frame = out_frame;
+ } else {
+ tmp_out_frame = NULL;
+ }
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame,
+ NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* we use output port 1 as internal output port */
+ if (yuv_scaler_stage)
+ tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
+ }
+ }
+
+ pipe->pipeline.acquire_isp_each_stage = false;
+ ia_css_pipeline_finalize_stages(&pipe->pipeline,
+ pipe->stream->config.continuous);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+create_host_acc_pipeline(struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ const struct ia_css_fw_info *fw;
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (!pipe->stream))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe->pipeline.num_execs = pipe->config.acc_num_execs;
+ /* Reset pipe_qos_config to default disable all QOS extension stages */
+ if (pipe->config.acc_extension)
+ pipe->pipeline.pipe_qos_config = 0;
+
+ fw = pipe->vf_stage;
+ for (i = 0; fw; fw = fw->next)
+ {
+ err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ for (i = 0; i < pipe->config.num_acc_stages; i++)
+ {
+ struct ia_css_fw_info *fw = pipe->config.acc_stages[i];
+
+ err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* Create stages for preview */
+static enum ia_css_err
+create_host_preview_pipeline(struct ia_css_pipe *pipe) {
+ struct ia_css_pipeline_stage *copy_stage = NULL;
+ struct ia_css_pipeline_stage *preview_stage = NULL;
+ struct ia_css_pipeline_stage *vf_pp_stage = NULL;
+ struct ia_css_pipeline_stage_desc stage_desc;
+ struct ia_css_pipeline *me = NULL;
+ struct ia_css_binary *copy_binary, *preview_binary, *vf_pp_binary = NULL;
+ struct ia_css_frame *in_frame = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *out_frame;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ bool need_in_frameinfo_memory = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+ bool buffered_sensor = false;
+ bool online = false;
+ bool continuous = false;
+#endif
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ia_css_pipe_util_create_output_frames(out_frames);
+ /* pipeline already created as part of create_host_pipeline_structure */
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Preview
+ * - Buffered Sensor Mode Online Preview
+ * - Direct Sensor Mode Continuous Preview
+ * - Buffered Sensor Mode Continuous Preview
+ */
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+ buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor && (online || continuous)));
+#else
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+#endif
+ if (need_in_frameinfo_memory)
+ {
+ err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
+ IA_CSS_FRAME_FORMAT_RAW);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ in_frame = &me->in_frame;
+ } else
+ {
+ in_frame = NULL;
+ }
+
+ err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ out_frame = &me->out_frame[0];
+
+ copy_binary = &pipe->pipe_settings.preview.copy_binary;
+ preview_binary = &pipe->pipe_settings.preview.preview_binary;
+ if (pipe->pipe_settings.preview.vf_pp_binary.info)
+ vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary;
+
+ if (pipe->pipe_settings.preview.copy_binary.info)
+ {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &copy_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ in_frame = me->stages->args.out_frame[0];
+#ifndef ISP2401
+ } else
+ {
+#else
+ } else if (pipe->stream->config.continuous)
+ {
+#endif
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When continuous is enabled, configure in_frame with the
+ * last pipe, which is the copy pipe.
+ */
+ if (continuous || !online) {
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ }
+#else
+ in_frame = pipe->continuous_frames[0];
+#endif
+ }
+
+ if (vf_pp_binary)
+ {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
+ out_frames, in_frame, NULL);
+ } else
+ {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
+ out_frames, in_frame, NULL);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &preview_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ /* If we use copy iso preview, the input must be yuv iso raw */
+ preview_stage->args.copy_vf =
+ preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY;
+ preview_stage->args.copy_output = !preview_stage->args.copy_vf;
+ if (preview_stage->args.copy_vf && !preview_stage->args.out_vf_frame)
+ {
+ /* in case of copy, use the vf frame as output frame */
+ preview_stage->args.out_vf_frame =
+ preview_stage->args.out_frame[0];
+ }
+ if (vf_pp_binary)
+ {
+ if (preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)
+ in_frame = preview_stage->args.out_vf_frame;
+ else
+ in_frame = preview_stage->args.out_frame[0];
+ err = add_vf_pp_stage(pipe, in_frame, out_frame, vf_pp_binary,
+ &vf_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ pipe->pipeline.acquire_isp_each_stage = false;
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void send_raw_frames(struct ia_css_pipe *pipe)
+{
+ if (pipe->stream->config.continuous) {
+ unsigned int i;
+
+ sh_css_update_host2sp_cont_num_raw_frames
+ (pipe->stream->config.init_num_cont_raw_buf, true);
+ sh_css_update_host2sp_cont_num_raw_frames
+ (pipe->stream->config.target_num_cont_raw_buf, false);
+
+ /* Hand-over all the SP-internal buffers */
+ for (i = 0; i < pipe->stream->config.init_num_cont_raw_buf; i++) {
+ sh_css_update_host2sp_offline_frame(i,
+ pipe->continuous_frames[i], pipe->cont_md_buffers[i]);
+ }
+ }
+
+ return;
+}
+
+static enum ia_css_err
+preview_start(struct ia_css_pipe *pipe) {
+ struct ia_css_pipeline *me;
+ struct ia_css_binary *copy_binary, *preview_binary, *vf_pp_binary = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipe *copy_pipe, *capture_pipe;
+ struct ia_css_pipe *acc_pipe;
+ enum sh_css_pipe_config_override copy_ovrd;
+ enum ia_css_input_mode preview_pipe_input_mode;
+ const struct ia_css_coordinate *coord = NULL;
+ const struct ia_css_isp_parameters *params = NULL;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ me = &pipe->pipeline;
+
+ preview_pipe_input_mode = pipe->stream->config.mode;
+
+ copy_pipe = pipe->pipe_settings.preview.copy_pipe;
+ capture_pipe = pipe->pipe_settings.preview.capture_pipe;
+ acc_pipe = pipe->pipe_settings.preview.acc_pipe;
+
+ copy_binary = &pipe->pipe_settings.preview.copy_binary;
+ preview_binary = &pipe->pipe_settings.preview.preview_binary;
+ if (pipe->pipe_settings.preview.vf_pp_binary.info)
+ vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary;
+
+ sh_css_metrics_start_frame();
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* multi stream video needs mipi buffers */
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+#endif
+ send_raw_frames(pipe);
+
+ {
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+
+ if (pipe->stream->cont_capt)
+ {
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
+ &thread_id);
+ copy_ovrd |= 1 << thread_id;
+ }
+ }
+
+ if (atomisp_hw_is_isp2401) {
+ coord = &pipe->config.internal_frame_origin_bqs_on_sctbl;
+ params = pipe->stream->isp_params_configs;
+ }
+
+ /* Construct and load the copy pipe */
+ if (pipe->stream->config.continuous)
+ {
+ sh_css_sp_init_pipeline(&copy_pipe->pipeline,
+ IA_CSS_PIPE_ID_COPY,
+ (uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),
+ false,
+ pipe->stream->config.pixels_per_clock == 2, false,
+ false, pipe->required_bds_factor,
+ copy_ovrd,
+ pipe->stream->config.mode,
+ &pipe->stream->config.metadata_config,
+ &pipe->stream->info.metadata_info,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ pipe->stream->config.source.port.port,
+#endif
+ coord,
+ params);
+
+ /* make the preview pipe start with mem mode input, copy handles
+ the actual mode */
+ preview_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
+ }
+
+ /* Construct and load the capture pipe */
+ if (pipe->stream->cont_capt)
+ {
+ sh_css_sp_init_pipeline(&capture_pipe->pipeline,
+ IA_CSS_PIPE_ID_CAPTURE,
+ (uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),
+ capture_pipe->config.default_capture_config.enable_xnr != 0,
+ capture_pipe->stream->config.pixels_per_clock == 2,
+ true, /* continuous */
+ false, /* offline */
+ capture_pipe->required_bds_factor,
+ 0,
+ IA_CSS_INPUT_MODE_MEMORY,
+ &pipe->stream->config.metadata_config,
+ &pipe->stream->info.metadata_info,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ (enum mipi_port_id)0,
+#endif
+ coord,
+ params);
+ }
+
+ if (acc_pipe)
+ {
+ sh_css_sp_init_pipeline(&acc_pipe->pipeline,
+ IA_CSS_PIPE_ID_ACC,
+ (uint8_t)ia_css_pipe_get_pipe_num(acc_pipe),
+ false,
+ pipe->stream->config.pixels_per_clock == 2,
+ false, /* continuous */
+ false, /* offline */
+ pipe->required_bds_factor,
+ 0,
+ IA_CSS_INPUT_MODE_MEMORY,
+ NULL,
+ NULL,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ (enum mipi_port_id)0,
+#endif
+ coord,
+ params);
+ }
+
+ start_pipe(pipe, copy_ovrd, preview_pipe_input_mode);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
+ const struct ia_css_buffer *buffer) {
+ enum ia_css_err return_err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+ struct ia_css_rmgr_vbuf_handle p_vbuf;
+ struct ia_css_rmgr_vbuf_handle *h_vbuf;
+ struct sh_css_hmm_buffer ddr_buffer;
+ enum ia_css_buffer_type buf_type;
+ enum ia_css_pipe_id pipe_id;
+ bool ret_err;
+
+ IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
+
+ if ((!pipe) || (!buffer))
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ buf_type = buffer->type;
+ /* following code will be enabled when IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME
+ is removed */
+#if 0
+ if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ {
+ bool found_pipe = false;
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if ((buffer->data.frame->info.res.width == pipe->output_info[i].res.width) &&
+ (buffer->data.frame->info.res.height == pipe->output_info[i].res.height)) {
+ buf_type += i;
+ found_pipe = true;
+ break;
+ }
+ }
+ if (!found_pipe)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ {
+ bool found_pipe = false;
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if ((buffer->data.frame->info.res.width == pipe->vf_output_info[i].res.width) &&
+ (buffer->data.frame->info.res.height == pipe->vf_output_info[i].res.height)) {
+ buf_type += i;
+ found_pipe = true;
+ break;
+ }
+ }
+ if (!found_pipe)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+#endif
+ pipe_id = pipe->mode;
+
+ IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type);
+
+ assert(pipe_id < IA_CSS_PIPE_ID_NUM);
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ if ((buf_type == IA_CSS_BUFFER_TYPE_INVALID) ||
+ (buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE) ||
+ (pipe_id >= IA_CSS_PIPE_ID_NUM))
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ if (!ret_err)
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
+ if (!ret_err)
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES))
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (!sh_css_sp_is_running())
+ {
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ pipeline = &pipe->pipeline;
+
+ assert(pipeline ||
+ pipe_id == IA_CSS_PIPE_ID_COPY ||
+ pipe_id == IA_CSS_PIPE_ID_ACC);
+
+ assert(sizeof(NULL) <= sizeof(ddr_buffer.kernel_ptr));
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(NULL);
+ ddr_buffer.cookie_ptr = buffer->driver_cookie;
+ ddr_buffer.timing_data = buffer->timing_data;
+
+ if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS)
+ {
+ if (!buffer->data.stats_3a) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_3a);
+ ddr_buffer.payload.s3a = *buffer->data.stats_3a;
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS)
+ {
+ if (!buffer->data.stats_dvs) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_dvs);
+ ddr_buffer.payload.dis = *buffer->data.stats_dvs;
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_METADATA)
+ {
+ if (!buffer->data.metadata) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.metadata);
+ ddr_buffer.payload.metadata = *buffer->data.metadata;
+ } else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME))
+ {
+ if (!buffer->data.frame) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.frame);
+ ddr_buffer.payload.frame.frame_data = buffer->data.frame->data;
+ ddr_buffer.payload.frame.flashed = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_enqueue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
+ buf_type, buffer->data.frame->data);
+
+#if CONFIG_ON_FRAME_ENQUEUE()
+ return_err = set_config_on_frame_enqueue(
+ &buffer->data.frame->info,
+ &ddr_buffer.payload.frame);
+ if (return_err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(return_err);
+ return return_err;
+ }
+#endif
+ }
+
+ /* start of test for using rmgr for acq/rel memory */
+ p_vbuf.vptr = 0;
+ p_vbuf.count = 0;
+ p_vbuf.size = sizeof(struct sh_css_hmm_buffer);
+ h_vbuf = &p_vbuf;
+ /* TODO: change next to correct pool for optimization */
+ ia_css_rmgr_acq_vbuf(hmm_buffer_pool, &h_vbuf);
+
+ assert(h_vbuf);
+ assert(h_vbuf->vptr != 0x0);
+
+ if ((!h_vbuf) || (h_vbuf->vptr == 0x0))
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ mmgr_store(h_vbuf->vptr,
+ (void *)(&ddr_buffer),
+ sizeof(struct sh_css_hmm_buffer));
+ if ((buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS)
+ || (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS)
+ || (buf_type == IA_CSS_BUFFER_TYPE_LACE_STATISTICS))
+ {
+ if (!pipeline) {
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
+ IA_CSS_LOG("pipeline is empty!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ /* The SP will read the params
+ after it got empty 3a and dis */
+ if (STATS_ENABLED(stage)) {
+ /* there is a stage that needs it */
+ return_err = ia_css_bufq_enqueue_buffer(thread_id,
+ queue_id,
+ (uint32_t)h_vbuf->vptr);
+ }
+ }
+ } else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_METADATA))
+ {
+ return_err = ia_css_bufq_enqueue_buffer(thread_id,
+ queue_id,
+ (uint32_t)h_vbuf->vptr);
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ if ((return_err == IA_CSS_SUCCESS) &&
+ (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)) {
+ IA_CSS_LOG("pfp: enqueued OF %d to q %d thread %d",
+ ddr_buffer.payload.frame.frame_data,
+ queue_id, thread_id);
+ }
+#endif
+ }
+
+ if (return_err == IA_CSS_SUCCESS)
+ {
+ if (sh_css_hmm_buffer_record_acquire(
+ h_vbuf, buf_type,
+ HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
+ IA_CSS_LOG("send vbuf=%p", h_vbuf);
+ } else {
+ return_err = IA_CSS_ERR_INTERNAL_ERROR;
+ IA_CSS_ERROR("hmm_buffer_record[]: no available slots\n");
+ }
+ }
+
+ /*
+ * Tell the SP which queues are not empty,
+ * by sending the software event.
+ */
+ if (return_err == IA_CSS_SUCCESS)
+ {
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ return_err = ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED,
+ (uint8_t)thread_id,
+ queue_id,
+ 0);
+ } else
+ {
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
+ IA_CSS_ERROR("buffer not enqueued");
+ }
+
+ IA_CSS_LEAVE("return value = %d", return_err);
+
+ return return_err;
+}
+
+/*
+ * TODO: Free up the hmm memory space.
+ */
+enum ia_css_err
+ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
+ struct ia_css_buffer *buffer) {
+ enum ia_css_err return_err;
+ enum sh_css_queue_id queue_id;
+ hrt_vaddress ddr_buffer_addr = (hrt_vaddress)0;
+ struct sh_css_hmm_buffer ddr_buffer;
+ enum ia_css_buffer_type buf_type;
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ hrt_address kernel_ptr = 0;
+ bool ret_err;
+
+ IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
+
+ if ((!pipe) || (!buffer))
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe_id = pipe->mode;
+
+ buf_type = buffer->type;
+
+ IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type);
+
+ ddr_buffer.kernel_ptr = 0;
+
+ ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ if (!ret_err)
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
+ if (!ret_err)
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES))
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (!sh_css_sp_is_running())
+ {
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ return_err = ia_css_bufq_dequeue_buffer(queue_id,
+ (uint32_t *)&ddr_buffer_addr);
+
+ if (return_err == IA_CSS_SUCCESS)
+ {
+ struct ia_css_frame *frame;
+ struct sh_css_hmm_buffer_record *hmm_buffer_record = NULL;
+
+ IA_CSS_LOG("receive vbuf=%x", (int)ddr_buffer_addr);
+
+ /* Validate the ddr_buffer_addr and buf_type */
+ hmm_buffer_record = sh_css_hmm_buffer_record_validate(
+ ddr_buffer_addr, buf_type);
+ if (hmm_buffer_record) {
+ /* valid hmm_buffer_record found. Save the kernel_ptr
+ * for validation after performing mmgr_load. The
+ * vbuf handle and buffer_record can be released.
+ */
+ kernel_ptr = hmm_buffer_record->kernel_ptr;
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &hmm_buffer_record->h_vbuf);
+ sh_css_hmm_buffer_record_reset(hmm_buffer_record);
+ } else {
+ IA_CSS_ERROR("hmm_buffer_record not found (0x%x) buf_type(%d)",
+ ddr_buffer_addr, buf_type);
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ mmgr_load(ddr_buffer_addr,
+ &ddr_buffer,
+ sizeof(struct sh_css_hmm_buffer));
+
+ /* if the kernel_ptr is 0 or an invalid, return an error.
+ * do not access the buffer via the kernal_ptr.
+ */
+ if ((ddr_buffer.kernel_ptr == 0) ||
+ (kernel_ptr != HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
+ IA_CSS_ERROR("kernel_ptr invalid");
+ IA_CSS_ERROR("expected: (0x%llx)", (u64)kernel_ptr);
+ IA_CSS_ERROR("actual: (0x%llx)", (u64)HOST_ADDRESS(ddr_buffer.kernel_ptr));
+ IA_CSS_ERROR("buf_type: %d\n", buf_type);
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ if (ddr_buffer.kernel_ptr != 0) {
+ /* buffer->exp_id : all instances to be removed later once the driver change
+ * is completed. See patch #5758 for reference */
+ buffer->exp_id = 0;
+ buffer->driver_cookie = ddr_buffer.cookie_ptr;
+ buffer->timing_data = ddr_buffer.timing_data;
+
+ if ((buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) ||
+ (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)) {
+ buffer->isys_eof_clock_tick.ticks = ddr_buffer.isys_eof_clock_tick;
+ }
+
+ switch (buf_type) {
+ case IA_CSS_BUFFER_TYPE_INPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+ if ((pipe) && (pipe->stop_requested == true)) {
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* free mipi frames only for old input system
+ * for 2401 it is done in ia_css_stream_destroy call
+ */
+ return_err = free_mipi_frames(pipe);
+ if (return_err != IA_CSS_SUCCESS) {
+ IA_CSS_LOG("free_mipi_frames() failed");
+ IA_CSS_LEAVE_ERR(return_err);
+ return return_err;
+ }
+#endif
+ pipe->stop_requested = false;
+ }
+ case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
+ frame = (struct ia_css_frame *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->data.frame = frame;
+ buffer->exp_id = ddr_buffer.payload.frame.exp_id;
+ frame->exp_id = ddr_buffer.payload.frame.exp_id;
+ frame->isp_config_id = ddr_buffer.payload.frame.isp_parameters_id;
+ if (ddr_buffer.payload.frame.flashed == 1)
+ frame->flash_state =
+ IA_CSS_FRAME_FLASH_STATE_PARTIAL;
+ if (ddr_buffer.payload.frame.flashed == 2)
+ frame->flash_state =
+ IA_CSS_FRAME_FLASH_STATE_FULL;
+ frame->valid = pipe->num_invalid_frames == 0;
+ if (!frame->valid)
+ pipe->num_invalid_frames--;
+
+ if (frame->info.format == IA_CSS_FRAME_FORMAT_BINARY_8) {
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ frame->planes.binary.size = frame->data_bytes;
+#else
+ frame->planes.binary.size =
+ sh_css_sp_get_binary_copy_size();
+#endif
+ }
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
+ IA_CSS_LOG("pfp: dequeued OF %d with config id %d thread %d",
+ frame->data, frame->isp_config_id, thread_id);
+ }
+#endif
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_dequeue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
+ buf_type, buffer->data.frame->data);
+
+ break;
+ case IA_CSS_BUFFER_TYPE_3A_STATISTICS:
+ buffer->data.stats_3a =
+ (struct ia_css_isp_3a_statistics *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->exp_id = ddr_buffer.payload.s3a.exp_id;
+ buffer->data.stats_3a->exp_id = ddr_buffer.payload.s3a.exp_id;
+ buffer->data.stats_3a->isp_config_id = ddr_buffer.payload.s3a.isp_config_id;
+ break;
+ case IA_CSS_BUFFER_TYPE_DIS_STATISTICS:
+ buffer->data.stats_dvs =
+ (struct ia_css_isp_dvs_statistics *)
+ HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->exp_id = ddr_buffer.payload.dis.exp_id;
+ buffer->data.stats_dvs->exp_id = ddr_buffer.payload.dis.exp_id;
+ break;
+ case IA_CSS_BUFFER_TYPE_LACE_STATISTICS:
+ break;
+ case IA_CSS_BUFFER_TYPE_METADATA:
+ buffer->data.metadata =
+ (struct ia_css_metadata *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->exp_id = ddr_buffer.payload.metadata.exp_id;
+ buffer->data.metadata->exp_id = ddr_buffer.payload.metadata.exp_id;
+ break;
+ default:
+ return_err = IA_CSS_ERR_INTERNAL_ERROR;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Tell the SP which queues are not full,
+ * by sending the software event.
+ */
+ if (return_err == IA_CSS_SUCCESS)
+ {
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED,
+ 0,
+ queue_id,
+ 0);
+ }
+ IA_CSS_LEAVE("buffer=%p", buffer);
+
+ return return_err;
+}
+
+/*
+ * Cannot Move this to event module as it is of ia_css_event_type which is declared in ia_css.h
+ * TODO: modify and move it if possible.
+ *
+ * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
+ * 1) "enum ia_css_event_type" (ia_css_event_public.h)
+ * 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
+ * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
+ * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
+ */
+static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /** Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /** Second output frame ready. */
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /** Viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /** Second viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /** Indication that 3A statistics are available. */
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /** Indication that DIS statistics are available. */
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE, /** Pipeline Done event, sent after last pipeline stage. */
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED, /** Frame tagged. */
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /** Input frame ready. */
+ IA_CSS_EVENT_TYPE_METADATA_DONE, /** Metadata ready. */
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /** Indication that LACE statistics are available. */
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /** Extension stage executed. */
+ IA_CSS_EVENT_TYPE_TIMER, /** Timing measurement data. */
+ IA_CSS_EVENT_TYPE_PORT_EOF, /** End Of Frame event, sent when in buffered sensor mode. */
+ IA_CSS_EVENT_TYPE_FW_WARNING, /** Performance warning encountered by FW */
+ IA_CSS_EVENT_TYPE_FW_ASSERT, /** Assertion hit by FW */
+ 0, /* error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
+};
+
+enum ia_css_err
+ia_css_dequeue_event(struct ia_css_event *event) {
+ return ia_css_dequeue_psys_event(event);
+}
+
+enum ia_css_err
+ia_css_dequeue_psys_event(struct ia_css_event *event) {
+ enum ia_css_pipe_id pipe_id = 0;
+ u8 payload[4] = {0, 0, 0, 0};
+ enum ia_css_err ret_err;
+
+ /*TODO:
+ * a) use generic decoding function , same as the one used by sp.
+ * b) group decode and dequeue into eventQueue module
+ *
+ * We skip the IA_CSS_ENTER logging call
+ * to avoid flooding the logs when the host application
+ * uses polling. */
+ if (!event)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (!sh_css_sp_is_running())
+ {
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ /* dequeue the event (if any) from the psys event queue */
+ ret_err = ia_css_bufq_dequeue_psys_event(payload);
+ if (ret_err != IA_CSS_SUCCESS)
+ return ret_err;
+
+ IA_CSS_LOG("event dequeued from psys event queue");
+
+ /* Tell the SP that we dequeued an event from the event queue. */
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0);
+
+ /* Events are decoded into 4 bytes of payload, the first byte
+ * contains the sp event type. This is converted to a host enum.
+ * TODO: can this enum conversion be eliminated */
+ event->type = convert_event_sp_to_host_domain[payload[0]];
+ /* Some sane default values since not all events use all fields. */
+ event->pipe = NULL;
+ event->port = MIPI_PORT0_ID;
+ event->exp_id = 0;
+ event->fw_warning = IA_CSS_FW_WARNING_NONE;
+ event->fw_handle = 0;
+ event->timer_data = 0;
+ event->timer_code = 0;
+ event->timer_subcode = 0;
+
+ if (event->type == IA_CSS_EVENT_TYPE_TIMER)
+ {
+ /* timer event ??? get the 2nd event and decode the data into the event struct */
+ u32 tmp_data;
+ /* 1st event: LSB 16-bit timer data and code */
+ event->timer_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
+ event->timer_code = payload[2];
+ payload[0] = payload[1] = payload[2] = payload[3] = 0;
+ ret_err = ia_css_bufq_dequeue_psys_event(payload);
+ if (ret_err != IA_CSS_SUCCESS) {
+ /* no 2nd event ??? an error */
+ /* Putting IA_CSS_ERROR is resulting in failures in
+ * Merrifield smoke testing */
+ IA_CSS_WARNING("Timer: Error de-queuing the 2nd TIMER event!!!\n");
+ return ret_err;
+ }
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0);
+ event->type = convert_event_sp_to_host_domain[payload[0]];
+ /* It's a timer */
+ if (event->type == IA_CSS_EVENT_TYPE_TIMER) {
+ /* 2nd event data: MSB 16-bit timer and subcode */
+ tmp_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
+ event->timer_data |= (tmp_data << 16);
+ event->timer_subcode = payload[2];
+ }
+ /* It's a non timer event. So clear first half of the timer event data.
+ * If the second part of the TIMER event is not received, we discard
+ * the first half of the timer data and process the non timer event without
+ * affecting the flow. So the non timer event falls through
+ * the code. */
+ else {
+ event->timer_data = 0;
+ event->timer_code = 0;
+ event->timer_subcode = 0;
+ IA_CSS_ERROR("Missing 2nd timer event. Timer event discarded");
+ }
+ }
+ if (event->type == IA_CSS_EVENT_TYPE_PORT_EOF)
+ {
+ event->port = (enum mipi_port_id)payload[1];
+ event->exp_id = payload[3];
+ } else if (event->type == IA_CSS_EVENT_TYPE_FW_WARNING)
+ {
+ event->fw_warning = (enum ia_css_fw_warning)payload[1];
+ /* exp_id is only available in these warning types */
+ if (event->fw_warning == IA_CSS_FW_WARNING_EXP_ID_LOCKED ||
+ event->fw_warning == IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED)
+ event->exp_id = payload[3];
+ } else if (event->type == IA_CSS_EVENT_TYPE_FW_ASSERT)
+ {
+ event->fw_assert_module_id = payload[1]; /* module */
+ event->fw_assert_line_no = (payload[2] << 8) + payload[3];
+ /* payload[2] is line_no>>8, payload[3] is line_no&0xff */
+ } else if (event->type != IA_CSS_EVENT_TYPE_TIMER)
+ {
+ /* pipe related events.
+ * payload[1] contains the pipe_num,
+ * payload[2] contains the pipe_id. These are different. */
+ event->pipe = find_pipe_by_num(payload[1]);
+ pipe_id = (enum ia_css_pipe_id)payload[2];
+ /* Check to see if pipe still exists */
+ if (!event->pipe)
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+
+ if (event->type == IA_CSS_EVENT_TYPE_FRAME_TAGGED) {
+ /* find the capture pipe that goes with this */
+ int i, n;
+
+ n = event->pipe->stream->num_pipes;
+ for (i = 0; i < n; i++) {
+ struct ia_css_pipe *p =
+ event->pipe->stream->pipes[i];
+ if (p->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
+ event->pipe = p;
+ break;
+ }
+ }
+ event->exp_id = payload[3];
+ }
+ if (event->type == IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE) {
+ /* payload[3] contains the acc fw handle. */
+ u32 stage_num = (uint32_t)payload[3];
+
+ ret_err = ia_css_pipeline_get_fw_from_stage(
+ &event->pipe->pipeline,
+ stage_num,
+ &event->fw_handle);
+ if (ret_err != IA_CSS_SUCCESS) {
+ IA_CSS_ERROR("Invalid stage num received for ACC event. stage_num:%u",
+ stage_num);
+ return ret_err;
+ }
+ }
+ }
+
+ if (event->pipe)
+ IA_CSS_LEAVE("event_id=%d, pipe_id=%d", event->type, pipe_id);
+ else
+ IA_CSS_LEAVE("event_id=%d", event->type);
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_dequeue_isys_event(struct ia_css_event *event) {
+ u8 payload[4] = {0, 0, 0, 0};
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ /* We skip the IA_CSS_ENTER logging call
+ * to avoid flooding the logs when the host application
+ * uses polling. */
+ if (!event)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (!sh_css_sp_is_running())
+ {
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ err = ia_css_bufq_dequeue_isys_event(payload);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ IA_CSS_LOG("event dequeued from isys event queue");
+
+ /* Update SP state to indicate that element was dequeued. */
+ ia_css_bufq_enqueue_isys_event(IA_CSS_ISYS_SW_EVENT_EVENT_DEQUEUED);
+
+ /* Fill return struct with appropriate info */
+ event->type = IA_CSS_EVENT_TYPE_PORT_EOF;
+ /* EOF events are associated with a CSI port, not with a pipe */
+ event->pipe = NULL;
+ event->port = payload[1];
+ event->exp_id = payload[3];
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+static void
+acc_start(struct ia_css_pipe *pipe)
+{
+ assert(pipe);
+ assert(pipe->stream);
+
+ start_pipe(pipe, SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD,
+ pipe->stream->config.mode);
+}
+
+static enum ia_css_err
+sh_css_pipe_start(struct ia_css_stream *stream) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ struct ia_css_pipe *pipe;
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+
+ if (!stream)
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ pipe = stream->last_pipe;
+ if (!pipe)
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe_id = pipe->mode;
+
+ if (stream->started == true)
+ {
+ IA_CSS_WARNING("Cannot start stream that is already started");
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ pipe->stop_requested = false;
+
+ switch (pipe_id)
+ {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = preview_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = video_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = capture_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = yuvpp_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ acc_start(pipe);
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* DH regular multi pipe - not continuous mode: start the next pipes too */
+ if (!stream->config.continuous)
+ {
+ int i;
+
+ for (i = 1; i < stream->num_pipes && IA_CSS_SUCCESS == err ; i++) {
+ switch (stream->pipes[i]->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ stream->pipes[i]->stop_requested = false;
+ err = preview_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ stream->pipes[i]->stop_requested = false;
+ err = video_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ stream->pipes[i]->stop_requested = false;
+ err = capture_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ stream->pipes[i]->stop_requested = false;
+ err = yuvpp_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ stream->pipes[i]->stop_requested = false;
+ acc_start(stream->pipes[i]);
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ }
+ }
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* Force ISP parameter calculation after a mode change
+ * Acceleration API examples pass NULL for stream but they
+ * don't use ISP parameters anyway. So this should be okay.
+ * The SP binary (jpeg) copy does not use any parameters.
+ */
+ if (!copy_on_sp(pipe))
+ {
+ sh_css_invalidate_params(stream);
+ err = sh_css_param_update_isp_params(pipe,
+ stream->isp_params_configs, true, NULL);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ ia_css_debug_pipe_graph_dump_epilogue();
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+
+ if (!sh_css_sp_is_running())
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+
+ /* DH regular multi pipe - not continuous mode: enqueue event to the next pipes too */
+ if (!stream->config.continuous)
+ {
+ int i;
+
+ for (i = 1; i < stream->num_pipes; i++) {
+ ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(stream->pipes[i]),
+ &thread_id);
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+ }
+
+ /* in case of continuous capture mode, we also start capture thread and copy thread*/
+ if (pipe->stream->config.continuous)
+ {
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ copy_pipe = pipe->pipe_settings.preview.copy_pipe;
+ else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ copy_pipe = pipe->pipe_settings.video.copy_pipe;
+
+ if (!copy_pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(copy_pipe),
+ &thread_id);
+ /* by the time we reach here q is initialized and handle is available.*/
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+ if (pipe->stream->cont_capt)
+ {
+ struct ia_css_pipe *capture_pipe = NULL;
+
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ capture_pipe = pipe->pipe_settings.preview.capture_pipe;
+ else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ capture_pipe = pipe->pipe_settings.video.capture_pipe;
+
+ if (!capture_pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
+ &thread_id);
+ /* by the time we reach here q is initialized and handle is available.*/
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+
+ /* in case of PREVIEW mode, check whether QOS acc_pipe is available, then start the qos pipe */
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ {
+ struct ia_css_pipe *acc_pipe = NULL;
+
+ acc_pipe = pipe->pipe_settings.preview.acc_pipe;
+
+ if (acc_pipe) {
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(acc_pipe),
+ &thread_id);
+ /* by the time we reach here q is initialized and handle is available.*/
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+ }
+
+ stream->started = true;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* ISP2400 */
+void
+sh_css_enable_cont_capt(bool enable, bool stop_copy_preview)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_enable_cont_capt() enter: enable=%d\n", enable);
+//my_css.cont_capt = enable;
+ my_css.stop_copy_preview = stop_copy_preview;
+}
+
+bool
+sh_css_continuous_is_enabled(uint8_t pipe_num)
+{
+ struct ia_css_pipe *pipe;
+ bool continuous;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_continuous_is_enabled() enter: pipe_num=%d\n", pipe_num);
+
+ pipe = find_pipe_by_num(pipe_num);
+ continuous = pipe && pipe->stream->config.continuous;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_continuous_is_enabled() leave: enable=%d\n",
+ continuous);
+ return continuous;
+}
+
+/* ISP2400 */
+enum ia_css_err
+ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream,
+ int *buffer_depth) {
+ if (!buffer_depth)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_max_buffer_depth() enter: void\n");
+ (void)stream;
+ *buffer_depth = NUM_CONTINUOUS_FRAMES;
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_set_buffer_depth() enter: num_frames=%d\n", buffer_depth);
+ (void)stream;
+ if (buffer_depth > NUM_CONTINUOUS_FRAMES || buffer_depth < 1)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ /* ok, value allowed */
+ stream->config.target_num_cont_raw_buf = buffer_depth;
+ /* TODO: check what to regarding initialization */
+ return IA_CSS_SUCCESS;
+}
+
+/* ISP2401 */
+enum ia_css_err
+ia_css_stream_get_buffer_depth(struct ia_css_stream *stream,
+ int *buffer_depth) {
+ if (!buffer_depth)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_buffer_depth() enter: void\n");
+ (void)stream;
+ *buffer_depth = stream->config.target_num_cont_raw_buf;
+ return IA_CSS_SUCCESS;
+}
+
+/*
+ * @brief Stop all "ia_css_pipe" instances in the target
+ * "ia_css_stream" instance.
+ *
+ * Refer to "Local prototypes" for more info.
+ */
+/* ISP2401 */
+static enum ia_css_err
+sh_css_pipes_stop(struct ia_css_stream *stream)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipe *main_pipe;
+ enum ia_css_pipe_id main_pipe_id;
+ int i;
+
+ assert(stream);
+ if (!stream)
+ {
+ IA_CSS_LOG("stream does NOT exist!");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+
+ main_pipe = stream->last_pipe;
+ assert(main_pipe);
+ if (!main_pipe)
+ {
+ IA_CSS_LOG("main_pipe does NOT exist!");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+
+ main_pipe_id = main_pipe->mode;
+ IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
+
+ /*
+ * Stop all "ia_css_pipe" instances in this target
+ * "ia_css_stream" instance.
+ */
+ for (i = 0; i < stream->num_pipes; i++)
+ {
+ /* send the "stop" request to the "ia_css_pipe" instance */
+ IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d",
+ stream->pipes[i]->pipeline.pipe_id);
+ err = ia_css_pipeline_request_stop(&stream->pipes[i]->pipeline);
+
+ /*
+ * Exit this loop if "ia_css_pipeline_request_stop()"
+ * returns the error code.
+ *
+ * The error code would be generated in the following
+ * two cases:
+ * (1) The Scalar Processor has already been stopped.
+ * (2) The "Host->SP" event queue is full.
+ *
+ * As the convention of using CSS API 2.0/2.1, such CSS
+ * error code would be propogated from the CSS-internal
+ * API returned value to the CSS API returned value. Then
+ * the CSS driver should capture these error code and
+ * handle it in the driver exception handling mechanism.
+ */
+ if (err != IA_CSS_SUCCESS) {
+ goto ERR;
+ }
+ }
+
+ /*
+ * In the CSS firmware use scenario "Continuous Preview"
+ * as well as "Continuous Video", the "ia_css_pipe" instance
+ * "Copy Pipe" is activated. This "Copy Pipe" is private to
+ * the CSS firmware so that it is not listed in the target
+ * "ia_css_stream" instance.
+ *
+ * We need to stop this "Copy Pipe", as well.
+ */
+ if (main_pipe->stream->config.continuous)
+ {
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ /* get the reference to "Copy Pipe" */
+ if (main_pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ else if (main_pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+
+ /* return the error code if "Copy Pipe" does NOT exist */
+ assert(copy_pipe);
+ if (!copy_pipe) {
+ IA_CSS_LOG("Copy Pipe does NOT exist!");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+
+ /* send the "stop" request to "Copy Pipe" */
+ IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d",
+ copy_pipe->pipeline.pipe_id);
+ err = ia_css_pipeline_request_stop(&copy_pipe->pipeline);
+ }
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/*
+ * @brief Check if all "ia_css_pipe" instances in the target
+ * "ia_css_stream" instance have stopped.
+ *
+ * Refer to "Local prototypes" for more info.
+ */
+/* ISP2401 */
+static bool
+sh_css_pipes_have_stopped(struct ia_css_stream *stream)
+{
+ bool rval = true;
+
+ struct ia_css_pipe *main_pipe;
+ enum ia_css_pipe_id main_pipe_id;
+
+ int i;
+
+ assert(stream);
+ if (!stream) {
+ IA_CSS_LOG("stream does NOT exist!");
+ rval = false;
+ goto RET;
+ }
+
+ main_pipe = stream->last_pipe;
+ assert(main_pipe);
+
+ if (!main_pipe) {
+ IA_CSS_LOG("main_pipe does NOT exist!");
+ rval = false;
+ goto RET;
+ }
+
+ main_pipe_id = main_pipe->mode;
+ IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
+
+ /*
+ * Check if every "ia_css_pipe" instance in this target
+ * "ia_css_stream" instance has stopped.
+ */
+ for (i = 0; i < stream->num_pipes; i++) {
+ rval = rval && ia_css_pipeline_has_stopped(&stream->pipes[i]->pipeline);
+ IA_CSS_LOG("Pipe has stopped: pipe_id=%d, stopped=%d",
+ stream->pipes[i]->pipeline.pipe_id,
+ rval);
+ }
+
+ /*
+ * In the CSS firmware use scenario "Continuous Preview"
+ * as well as "Continuous Video", the "ia_css_pipe" instance
+ * "Copy Pipe" is activated. This "Copy Pipe" is private to
+ * the CSS firmware so that it is not listed in the target
+ * "ia_css_stream" instance.
+ *
+ * We need to check if this "Copy Pipe" has stopped, as well.
+ */
+ if (main_pipe->stream->config.continuous) {
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ /* get the reference to "Copy Pipe" */
+ if (main_pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ else if (main_pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+
+ /* return if "Copy Pipe" does NOT exist */
+ assert(copy_pipe);
+ if (!copy_pipe) {
+ IA_CSS_LOG("Copy Pipe does NOT exist!");
+
+ rval = false;
+ goto RET;
+ }
+
+ /* check if "Copy Pipe" has stopped or not */
+ rval = rval && ia_css_pipeline_has_stopped(&copy_pipe->pipeline);
+ IA_CSS_LOG("Pipe has stopped: pipe_id=%d, stopped=%d",
+ copy_pipe->pipeline.pipe_id,
+ rval);
+ }
+
+RET:
+ IA_CSS_LEAVE_PRIVATE("rval=%d", rval);
+ return rval;
+}
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+unsigned int
+sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
+{
+ OP___assert(port < N_CSI_PORTS);
+ OP___assert(idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_get_mipi_sizes_for_check(port %d, idx %d): %d\n",
+ port, idx, my_css.mipi_sizes_for_check[port][idx]);
+ return my_css.mipi_sizes_for_check[port][idx];
+}
+#endif
+
+static enum ia_css_err sh_css_pipe_configure_output(
+ struct ia_css_pipe *pipe,
+ unsigned int width,
+ unsigned int height,
+ unsigned int padded_width,
+ enum ia_css_frame_format format,
+ unsigned int idx)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, paddaed width = %d, format = %d, idx = %d",
+ pipe, width, height, padded_width, format, idx);
+ if (!pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ err = ia_css_util_check_res(width, height);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (pipe->output_info[idx].res.width != width ||
+ pipe->output_info[idx].res.height != height ||
+ pipe->output_info[idx].format != format) {
+ ia_css_frame_info_init(
+ &pipe->output_info[idx],
+ width,
+ height,
+ format,
+ padded_width);
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+sh_css_pipe_get_shading_info(struct ia_css_pipe *pipe,
+ struct ia_css_shading_info *shading_info,
+ struct ia_css_pipe_config *pipe_config)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_binary *binary = NULL;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_shading_info() enter:\n");
+
+ binary = ia_css_pipe_get_shading_correction_binary(pipe);
+
+ if (binary)
+ {
+ err = ia_css_binary_get_shading_info(binary,
+ IA_CSS_SHADING_CORRECTION_TYPE_1,
+ pipe->required_bds_factor,
+ (const struct ia_css_stream_config *)&pipe->stream->config,
+ shading_info, pipe_config);
+
+ /* Other function calls can be added here when other shading correction types will be added
+ * in the future.
+ */
+ } else
+ {
+ /* When the pipe does not have a binary which has the shading
+ * correction, this function does not need to fill the shading
+ * information. It is not a error case, and then
+ * this function should return IA_CSS_SUCCESS.
+ */
+ memset(shading_info, 0, sizeof(*shading_info));
+ }
+ return err;
+}
+
+static enum ia_css_err
+sh_css_pipe_get_grid_info(struct ia_css_pipe *pipe,
+ struct ia_css_grid_info *info) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe);
+ assert(info);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ binary = ia_css_pipe_get_s3a_binary(pipe);
+
+ if (binary)
+ {
+ err = ia_css_binary_3a_grid_info(binary, info, pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ } else
+ memset(&info->s3a_grid, 0, sizeof(info->s3a_grid));
+
+ binary = ia_css_pipe_get_sdis_binary(pipe);
+
+ if (binary)
+ {
+ ia_css_binary_dvs_grid_info(binary, info, pipe);
+ ia_css_binary_dvs_stat_grid_info(binary, info, pipe);
+ } else
+ {
+ memset(&info->dvs_grid.dvs_grid_info, 0,
+ sizeof(info->dvs_grid.dvs_grid_info));
+ memset(&info->dvs_grid.dvs_stat_grid_info, 0,
+ sizeof(info->dvs_grid.dvs_stat_grid_info));
+ }
+
+ if (binary)
+ {
+ /* copy pipe does not have ISP binary*/
+ info->isp_in_width = binary->internal_frame_info.res.width;
+ info->isp_in_height = binary->internal_frame_info.res.height;
+ }
+
+#if defined(HAS_VAMEM_VERSION_2)
+ info->vamem_type = IA_CSS_VAMEM_TYPE_2;
+#elif defined(HAS_VAMEM_VERSION_1)
+ info->vamem_type = IA_CSS_VAMEM_TYPE_1;
+#else
+#error "Unknown VAMEM version"
+#endif
+
+ERR :
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* ISP2401 */
+/*
+ * @brief Check if a format is supported by the pipe.
+ *
+ */
+static enum ia_css_err
+ia_css_pipe_check_format(struct ia_css_pipe *pipe,
+ enum ia_css_frame_format format) {
+ const enum ia_css_frame_format *supported_formats;
+ int number_of_formats;
+ int found = 0;
+ int i;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (NULL == pipe || NULL == pipe->pipe_settings.video.video_binary.info)
+ {
+ IA_CSS_ERROR("Pipe or binary info is not set");
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ supported_formats = pipe->pipe_settings.video.video_binary.info->output_formats;
+ number_of_formats = sizeof(pipe->pipe_settings.video.video_binary.info->output_formats) / sizeof(enum ia_css_frame_format);
+
+ for (i = 0; i < number_of_formats && !found; i++)
+ {
+ if (supported_formats[i] == format) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ {
+ IA_CSS_ERROR("Requested format is not supported by binary");
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+}
+
+static enum ia_css_err load_video_binaries(struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info video_in_info, tnr_info,
+ *video_vf_info, video_bds_out_info, *pipe_out_info, *pipe_vf_out_info;
+ bool online;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool continuous = pipe->stream->config.continuous;
+ unsigned int i;
+ unsigned int num_output_pins;
+ struct ia_css_frame_info video_bin_out_info;
+ bool need_scaler = false;
+ bool vf_res_different_than_output = false;
+ bool need_vf_pp = false;
+ int vf_ds_log2;
+ struct ia_css_video_settings *mycs = &pipe->pipe_settings.video;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_VIDEO);
+ /* we only test the video_binary because offline video doesn't need a
+ * vf_pp binary and online does not (always use) the copy_binary.
+ * All are always reset at the same time anyway.
+ */
+ if (mycs->video_binary.info)
+ return IA_CSS_SUCCESS;
+
+ online = pipe->stream->config.online;
+ pipe_out_info = &pipe->output_info[0];
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+
+ assert(pipe_out_info);
+
+ /*
+ * There is no explicit input format requirement for raw or yuv
+ * What matters is that there is a binary that supports the stream format.
+ * This is checked in the binary_find(), so no need to check it here
+ */
+ err = ia_css_util_check_input(&pipe->stream->config, false, false);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ /* cannot have online video and input_mode memory */
+ if (online && pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ err = ia_css_util_check_vf_out_info(pipe_out_info,
+ pipe_vf_out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ if (pipe->out_yuv_ds_input_info.res.width)
+ video_bin_out_info = pipe->out_yuv_ds_input_info;
+ else
+ video_bin_out_info = *pipe_out_info;
+
+ /* Video */
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ video_vf_info = pipe_vf_out_info;
+ vf_res_different_than_output = (video_vf_info->res.width !=
+ video_bin_out_info.res.width) ||
+ (video_vf_info->res.height != video_bin_out_info.res.height);
+ } else {
+ video_vf_info = NULL;
+ }
+
+ need_scaler = need_downscaling(video_bin_out_info.res, pipe_out_info->res);
+
+ /* we build up the pipeline starting at the end */
+ /* YUV post-processing if needed */
+ if (need_scaler) {
+ struct ia_css_cas_binary_descr cas_scaler_descr = { };
+
+ /* NV12 is the common format that is supported by both */
+ /* yuv_scaler and the video_xx_isp2_min binaries. */
+ video_bin_out_info.format = IA_CSS_FRAME_FORMAT_NV12;
+
+ err = ia_css_pipe_create_cas_scaler_desc_single_output(
+ &video_bin_out_info,
+ pipe_out_info,
+ NULL,
+ &cas_scaler_descr);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
+ mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(struct ia_css_binary), GFP_KERNEL);
+ if (!mycs->yuv_scaler_binary) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return err;
+ }
+ mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage
+ * sizeof(bool), GFP_KERNEL);
+ if (!mycs->is_output_stage) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return err;
+ }
+ for (i = 0; i < cas_scaler_descr.num_stage; i++) {
+ struct ia_css_binary_descr yuv_scaler_descr;
+
+ mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe,
+ &yuv_scaler_descr, &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
+ err = ia_css_binary_find(&yuv_scaler_descr,
+ &mycs->yuv_scaler_binary[i]);
+ if (err != IA_CSS_SUCCESS) {
+ kfree(mycs->is_output_stage);
+ mycs->is_output_stage = NULL;
+ return err;
+ }
+ }
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+ }
+
+ {
+ struct ia_css_binary_descr video_descr;
+ enum ia_css_frame_format vf_info_format;
+
+ err = ia_css_pipe_get_video_binarydesc(pipe,
+ &video_descr, &video_in_info, &video_bds_out_info, &video_bin_out_info,
+ video_vf_info,
+ pipe->stream->config.left_padding);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ /* In the case where video_vf_info is not NULL, this allows
+ * us to find a potential video library with desired vf format.
+ * If success, no vf_pp binary is needed.
+ * If failed, we will look up video binary with YUV_LINE vf format
+ */
+ err = ia_css_binary_find(&video_descr,
+ &mycs->video_binary);
+
+ if (err != IA_CSS_SUCCESS) {
+ if (video_vf_info) {
+ /* This will do another video binary lookup later for YUV_LINE format*/
+ need_vf_pp = true;
+ } else
+ return err;
+ } else if (video_vf_info) {
+ /* The first video binary lookup is successful, but we may
+ * still need vf_pp binary based on additiona check */
+ num_output_pins = mycs->video_binary.info->num_output_pins;
+ vf_ds_log2 = mycs->video_binary.vf_downscale_log2;
+
+ /* If the binary has dual output pins, we need vf_pp if the resolution
+ * is different. */
+ need_vf_pp |= ((num_output_pins == 2) && vf_res_different_than_output);
+
+ /* If the binary has single output pin, we need vf_pp if additional
+ * scaling is needed for vf */
+ need_vf_pp |= ((num_output_pins == 1) &&
+ ((video_vf_info->res.width << vf_ds_log2 != pipe_out_info->res.width) ||
+ (video_vf_info->res.height << vf_ds_log2 != pipe_out_info->res.height)));
+ }
+
+ if (need_vf_pp) {
+ /* save the current vf_info format for restoration later */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "load_video_binaries() need_vf_pp; find video binary with YUV_LINE again\n");
+
+ vf_info_format = video_vf_info->format;
+
+ if (!pipe->config.enable_vfpp_bci)
+ ia_css_frame_info_set_format(video_vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ ia_css_binary_destroy_isp_parameters(&mycs->video_binary);
+
+ err = ia_css_binary_find(&video_descr,
+ &mycs->video_binary);
+
+ /* restore original vf_info format */
+ ia_css_frame_info_set_format(video_vf_info,
+ vf_info_format);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ }
+
+ /* If a video binary does not use a ref_frame, we set the frame delay
+ * to 0. This is the case for the 1-stage low-power video binary. */
+ if (!mycs->video_binary.info->sp.enable.ref_frame)
+ pipe->dvs_frame_delay = 0;
+
+ /* The delay latency determines the number of invalid frames after
+ * a stream is started. */
+ pipe->num_invalid_frames = pipe->dvs_frame_delay;
+ pipe->info.num_invalid_frames = pipe->num_invalid_frames;
+
+ /* Viewfinder frames also decrement num_invalid_frames. If the pipe
+ * outputs a viewfinder output, then we need double the number of
+ * invalid frames */
+ if (video_vf_info)
+ pipe->num_invalid_frames *= 2;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "load_video_binaries() num_invalid_frames=%d dvs_frame_delay=%d\n",
+ pipe->num_invalid_frames, pipe->dvs_frame_delay);
+
+ /* pqiao TODO: temp hack for PO, should be removed after offline YUVPP is enabled */
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* Copy */
+ if (!online && !continuous) {
+ /* TODO: what exactly needs doing, prepend the copy binary to
+ * video base this only on !online?
+ */
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ &mycs->video_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+#else
+ (void)continuous;
+#endif
+
+#if !defined(HAS_OUTPUT_SYSTEM)
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && need_vf_pp) {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ if (mycs->video_binary.vf_frame_info.format
+ == IA_CSS_FRAME_FORMAT_YUV_LINE) {
+ ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
+ &mycs->video_binary.vf_frame_info,
+ pipe_vf_out_info);
+ } else {
+ /* output from main binary is not yuv line. currently this is
+ * possible only when bci is enabled on vfpp output */
+ assert(pipe->config.enable_vfpp_bci == true);
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe, &vf_pp_descr,
+ &mycs->video_binary.vf_frame_info,
+ pipe_vf_out_info, NULL, NULL);
+ }
+
+ err = ia_css_binary_find(&vf_pp_descr,
+ &mycs->vf_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+#endif
+
+ err = allocate_delay_frames(pipe);
+
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ if (mycs->video_binary.info->sp.enable.block_output) {
+ unsigned int tnr_width;
+ unsigned int tnr_height;
+
+ tnr_info = mycs->video_binary.out_frame_info[0];
+
+ if (atomisp_hw_is_isp2401) {
+ /* Select resolution for TNR. If
+ * output_system_in_resolution(GDC_out_resolution) is
+ * being used, then select that as it will also be in resolution for
+ * TNR. At present, it only make sense for Skycam */
+ if (pipe->config.output_system_in_res.width &&
+ pipe->config.output_system_in_res.height) {
+ tnr_width = pipe->config.output_system_in_res.width;
+ tnr_height = pipe->config.output_system_in_res.height;
+ } else {
+ tnr_width = tnr_info.res.width;
+ tnr_height = tnr_info.res.height;
+ }
+
+ /* Make tnr reference buffers output block width(in pix) align */
+ tnr_info.res.width = CEIL_MUL(tnr_width,
+ (mycs->video_binary.info->sp.block.block_width * ISP_NWAY));
+ tnr_info.padded_width = tnr_info.res.width;
+ } else {
+ tnr_height = tnr_info.res.height;
+ }
+
+ /* Make tnr reference buffers output block height align */
+ tnr_info.res.height = CEIL_MUL(tnr_height,
+ mycs->video_binary.info->sp.block.output_block_height);
+ } else {
+ tnr_info = mycs->video_binary.internal_frame_info;
+ }
+ tnr_info.format = IA_CSS_FRAME_FORMAT_YUV_LINE;
+ tnr_info.raw_bit_depth = SH_CSS_TNR_BIT_DEPTH;
+
+ for (i = 0; i < NUM_TNR_FRAMES; i++) {
+ if (mycs->tnr_frames[i]) {
+ ia_css_frame_free(mycs->tnr_frames[i]);
+ mycs->tnr_frames[i] = NULL;
+ }
+ err = ia_css_frame_allocate_from_info(
+ &mycs->tnr_frames[i],
+ &tnr_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+unload_video_binaries(struct ia_css_pipe *pipe) {
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.video.copy_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.video.video_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.video.vf_pp_binary);
+
+ for (i = 0; i < pipe->pipe_settings.video.num_yuv_scaler; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.video.yuv_scaler_binary[i]);
+
+ kfree(pipe->pipe_settings.video.is_output_stage);
+ pipe->pipe_settings.video.is_output_stage = NULL;
+ kfree(pipe->pipe_settings.video.yuv_scaler_binary);
+ pipe->pipe_settings.video.yuv_scaler_binary = NULL;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err video_start(struct ia_css_pipe *pipe)
+{
+ struct ia_css_binary *copy_binary;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipe *copy_pipe, *capture_pipe;
+ enum sh_css_pipe_config_override copy_ovrd;
+ enum ia_css_input_mode video_pipe_input_mode;
+
+ const struct ia_css_coordinate *coord = NULL;
+ const struct ia_css_isp_parameters *params = NULL;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ video_pipe_input_mode = pipe->stream->config.mode;
+
+ copy_pipe = pipe->pipe_settings.video.copy_pipe;
+ capture_pipe = pipe->pipe_settings.video.capture_pipe;
+
+ copy_binary = &pipe->pipe_settings.video.copy_binary;
+
+ sh_css_metrics_start_frame();
+
+ /* multi stream video needs mipi buffers */
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+#endif
+
+ send_raw_frames(pipe);
+ {
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+
+ if (pipe->stream->cont_capt) {
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
+ &thread_id);
+ copy_ovrd |= 1 << thread_id;
+ }
+ }
+
+ if (atomisp_hw_is_isp2401) {
+ coord = &pipe->config.internal_frame_origin_bqs_on_sctbl;
+ params = pipe->stream->isp_params_configs;
+ }
+
+ /* Construct and load the copy pipe */
+ if (pipe->stream->config.continuous) {
+ sh_css_sp_init_pipeline(&copy_pipe->pipeline,
+ IA_CSS_PIPE_ID_COPY,
+ (uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),
+ false,
+ pipe->stream->config.pixels_per_clock == 2, false,
+ false, pipe->required_bds_factor,
+ copy_ovrd,
+ pipe->stream->config.mode,
+ &pipe->stream->config.metadata_config,
+ &pipe->stream->info.metadata_info,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ pipe->stream->config.source.port.port,
+#endif
+ coord,
+ params);
+
+ /* make the video pipe start with mem mode input, copy handles
+ the actual mode */
+ video_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
+ }
+
+ /* Construct and load the capture pipe */
+ if (pipe->stream->cont_capt) {
+ sh_css_sp_init_pipeline(&capture_pipe->pipeline,
+ IA_CSS_PIPE_ID_CAPTURE,
+ (uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),
+ capture_pipe->config.default_capture_config.enable_xnr != 0,
+ capture_pipe->stream->config.pixels_per_clock == 2,
+ true, /* continuous */
+ false, /* offline */
+ capture_pipe->required_bds_factor,
+ 0,
+ IA_CSS_INPUT_MODE_MEMORY,
+ &pipe->stream->config.metadata_config,
+ &pipe->stream->info.metadata_info,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ (enum mipi_port_id)0,
+#endif
+ coord,
+ params);
+ }
+
+ start_pipe(pipe, copy_ovrd, video_pipe_input_mode);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static
+enum ia_css_err sh_css_pipe_get_viewfinder_frame_info(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx)
+{
+ assert(pipe);
+ assert(info);
+
+ /* We could print the pointer as input arg, and the values as output */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_viewfinder_frame_info() enter: void\n");
+
+ if (pipe->mode == IA_CSS_PIPE_ID_CAPTURE &&
+ (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER))
+ return IA_CSS_ERR_MODE_HAS_NO_VIEWFINDER;
+ /* offline video does not generate viewfinder output */
+ *info = pipe->vf_output_info[idx];
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_viewfinder_frame_info() leave: \
+ info.res.width=%d, info.res.height=%d, \
+ info.padded_width=%d, info.format=%d, \
+ info.raw_bit_depth=%d, info.raw_bayer_order=%d\n",
+ info->res.width, info->res.height,
+ info->padded_width, info->format,
+ info->raw_bit_depth, info->raw_bayer_order);
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+sh_css_pipe_configure_viewfinder(struct ia_css_pipe *pipe, unsigned int width,
+ unsigned int height, unsigned int min_width,
+ enum ia_css_frame_format format,
+ unsigned int idx) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, min_width = %d, format = %d, idx = %d\n",
+ pipe, width, height, min_width, format, idx);
+
+ if (!pipe)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ err = ia_css_util_check_res(width, height);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (pipe->vf_output_info[idx].res.width != width ||
+ pipe->vf_output_info[idx].res.height != height ||
+ pipe->vf_output_info[idx].format != format)
+ {
+ ia_css_frame_info_init(&pipe->vf_output_info[idx], width, height,
+ format, min_width);
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err load_copy_binaries(struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(pipe);
+ IA_CSS_ENTER_PRIVATE("");
+
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+ if (pipe->pipe_settings.capture.copy_binary.info)
+ return IA_CSS_SUCCESS;
+
+ err = ia_css_frame_check_info(&pipe->output_info[0]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ err = verify_copy_out_frame_format(pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ err = load_copy_binary(pipe,
+ &pipe->pipe_settings.capture.copy_binary,
+ NULL);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static bool need_capture_pp(
+ const struct ia_css_pipe *pipe)
+{
+ const struct ia_css_frame_info *out_info = &pipe->output_info[0];
+
+ IA_CSS_ENTER_LEAVE_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
+
+ if (atomisp_hw_is_isp2401) {
+ /* ldc and capture_pp are not supported in the same pipeline */
+ if (need_capt_ldc(pipe) == true)
+ return false;
+ }
+
+ /* determine whether we need to use the capture_pp binary.
+ * This is needed for:
+ * 1. XNR or
+ * 2. Digital Zoom or
+ * 3. YUV downscaling
+ */
+ if (pipe->out_yuv_ds_input_info.res.width &&
+ ((pipe->out_yuv_ds_input_info.res.width != out_info->res.width) ||
+ (pipe->out_yuv_ds_input_info.res.height != out_info->res.height)))
+ return true;
+
+ if (pipe->config.default_capture_config.enable_xnr != 0)
+ return true;
+
+ if ((pipe->stream->isp_params_configs->dz_config.dx < HRT_GDC_N) ||
+ (pipe->stream->isp_params_configs->dz_config.dy < HRT_GDC_N) ||
+ pipe->config.enable_dz)
+ return true;
+
+ return false;
+}
+
+static bool need_capt_ldc(
+ const struct ia_css_pipe *pipe)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
+ return (pipe->extra_config.enable_dvs_6axis) ? true : false;
+}
+
+static enum ia_css_err set_num_primary_stages(unsigned int *num,
+ enum ia_css_pipe_version version)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (!num)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ switch (version) {
+ case IA_CSS_PIPE_VERSION_2_6_1:
+ *num = NUM_PRIMARY_HQ_STAGES;
+ break;
+ case IA_CSS_PIPE_VERSION_2_2:
+ case IA_CSS_PIPE_VERSION_1:
+ *num = NUM_PRIMARY_STAGES;
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ break;
+ }
+
+ return err;
+}
+
+static enum ia_css_err load_primary_binaries(
+ struct ia_css_pipe *pipe)
+{
+ bool online = false;
+ bool memory = false;
+ bool continuous = false;
+ bool need_pp = false;
+ bool need_isp_copy_binary = false;
+ bool need_ldc = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+#endif
+ struct ia_css_frame_info prim_in_info,
+ prim_out_info,
+ capt_pp_out_info, vf_info,
+ *vf_pp_in_info, *pipe_out_info,
+ *pipe_vf_out_info, *capt_pp_in_info,
+ capt_ldc_out_info;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_capture_settings *mycs;
+ unsigned int i;
+ bool need_extra_yuv_scaler = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->stream);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ online = pipe->stream->config.online;
+ memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+ continuous = pipe->stream->config.continuous;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+#endif
+
+ mycs = &pipe->pipe_settings.capture;
+ pipe_out_info = &pipe->output_info[0];
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+
+ if (mycs->primary_binary[0].info)
+ return IA_CSS_SUCCESS;
+
+ err = set_num_primary_stages(&mycs->num_primary_stage,
+ pipe->config.isp_pipe_version);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ err = ia_css_util_check_vf_out_info(pipe_out_info, pipe_vf_out_info);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else {
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ need_pp = need_capture_pp(pipe);
+
+ /* we use the vf output info to get the primary/capture_pp binary
+ configured for vf_veceven. It will select the closest downscaling
+ factor. */
+ vf_info = *pipe_vf_out_info;
+
+ /*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed for Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. This should not be considered as a clean solution.
+ * Proper investigation should be done to come up with the clean
+ * solution.
+ * */
+ ia_css_frame_info_set_format(&vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ /* TODO: All this yuv_scaler and capturepp calculation logic
+ * can be shared later. Capture_pp is also a yuv_scale binary
+ * with extra XNR funcionality. Therefore, it can be made as the
+ * first step of the cascade. */
+ capt_pp_out_info = pipe->out_yuv_ds_input_info;
+ capt_pp_out_info.format = IA_CSS_FRAME_FORMAT_YUV420;
+ capt_pp_out_info.res.width /= MAX_PREFERRED_YUV_DS_PER_STEP;
+ capt_pp_out_info.res.height /= MAX_PREFERRED_YUV_DS_PER_STEP;
+ ia_css_frame_info_set_width(&capt_pp_out_info, capt_pp_out_info.res.width, 0);
+
+ /*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed for Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. This should not be considered as a clean solution.
+ * Proper investigation should be done to come up with the clean
+ * solution.
+ * */
+ need_extra_yuv_scaler = need_downscaling(capt_pp_out_info.res,
+ pipe_out_info->res);
+
+ if (need_extra_yuv_scaler) {
+ struct ia_css_cas_binary_descr cas_scaler_descr = { };
+
+ err = ia_css_pipe_create_cas_scaler_desc_single_output(
+ &capt_pp_out_info,
+ pipe_out_info,
+ NULL,
+ &cas_scaler_descr);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
+ mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(struct ia_css_binary), GFP_KERNEL);
+ if (!mycs->yuv_scaler_binary) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(bool), GFP_KERNEL);
+ if (!mycs->is_output_stage) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ for (i = 0; i < cas_scaler_descr.num_stage; i++) {
+ struct ia_css_binary_descr yuv_scaler_descr;
+
+ mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe,
+ &yuv_scaler_descr, &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
+ err = ia_css_binary_find(&yuv_scaler_descr,
+ &mycs->yuv_scaler_binary[i]);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+
+ } else {
+ capt_pp_out_info = pipe->output_info[0];
+ }
+
+ /* TODO Do we disable ldc for skycam */
+ need_ldc = need_capt_ldc(pipe);
+
+ if (atomisp_hw_is_isp2401 && need_ldc) {
+ /* ldc and capt_pp are not supported in the same pipeline */
+ struct ia_css_binary_descr capt_ldc_descr;
+
+ ia_css_pipe_get_ldc_binarydesc(pipe,
+ &capt_ldc_descr, &prim_out_info,
+ &capt_pp_out_info);
+
+ err = ia_css_binary_find(&capt_ldc_descr,
+ &mycs->capture_ldc_binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ need_pp = 0;
+ need_ldc = 0;
+ }
+ if (need_pp) {
+ struct ia_css_binary_descr capture_pp_descr;
+ struct ia_css_binary_descr prim_descr[MAX_NUM_PRIMARY_STAGES];
+
+ if (!atomisp_hw_is_isp2401)
+ capt_pp_in_info = need_ldc ? &capt_ldc_out_info : &prim_out_info;
+ else
+ capt_pp_in_info = &prim_out_info;
+
+ ia_css_pipe_get_capturepp_binarydesc(pipe,
+ &capture_pp_descr, capt_pp_in_info,
+ &capt_pp_out_info, &vf_info);
+ err = ia_css_binary_find(&capture_pp_descr,
+ &mycs->capture_pp_binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (need_ldc) {
+ struct ia_css_binary_descr capt_ldc_descr;
+
+ ia_css_pipe_get_ldc_binarydesc(pipe,
+ &capt_ldc_descr, &prim_out_info,
+ &capt_ldc_out_info);
+
+ err = ia_css_binary_find(&capt_ldc_descr,
+ &mycs->capture_ldc_binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else {
+ prim_out_info = *pipe_out_info;
+ }
+
+ /* Primary */
+ for (i = 0; i < mycs->num_primary_stage; i++) {
+ struct ia_css_frame_info *local_vf_info = NULL;
+
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] &&
+ (i == mycs->num_primary_stage - 1))
+ local_vf_info = &vf_info;
+ ia_css_pipe_get_primary_binarydesc(pipe, &prim_descr[i], &prim_in_info,
+ &prim_out_info, local_vf_info, i);
+ err = ia_css_binary_find(&prim_descr[i], &mycs->primary_binary[i]);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ /* Viewfinder post-processing */
+ if (need_pp)
+ vf_pp_in_info = &mycs->capture_pp_binary.vf_frame_info;
+ else
+ vf_pp_in_info = &mycs->primary_binary[mycs->num_primary_stage - 1].vf_frame_info;
+
+ /*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed for Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. Thisshould not be considered as a clean solution.
+ * Proper * investigation should be done to come up with the clean
+ * solution.
+ * */
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
+ err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ err = allocate_delay_frames(pipe);
+
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, only the Direct Sensor Mode
+ * Offline Capture uses the ISP copy binary.
+ */
+ need_isp_copy_binary = !online && sensor;
+#else
+ need_isp_copy_binary = !online && !continuous && !memory;
+#endif
+
+ /* ISP Copy */
+ if (need_isp_copy_binary) {
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ &mycs->primary_binary[0]);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+allocate_delay_frames(struct ia_css_pipe *pipe) {
+ unsigned int num_delay_frames = 0, i = 0;
+ unsigned int dvs_frame_delay = 0;
+ struct ia_css_frame_info ref_info;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_pipe_id mode = IA_CSS_PIPE_ID_VIDEO;
+ struct ia_css_frame **delay_frames = NULL;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (!pipe)
+ {
+ IA_CSS_ERROR("Invalid args - pipe %p", pipe);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ mode = pipe->mode;
+ dvs_frame_delay = pipe->dvs_frame_delay;
+
+ if (dvs_frame_delay > 0)
+ num_delay_frames = dvs_frame_delay + 1;
+
+ switch (mode)
+ {
+ case IA_CSS_PIPE_ID_CAPTURE: {
+ struct ia_css_capture_settings *mycs_capture = &pipe->pipe_settings.capture;
+ (void)mycs_capture;
+ return err;
+ }
+ break;
+ case IA_CSS_PIPE_ID_VIDEO: {
+ struct ia_css_video_settings *mycs_video = &pipe->pipe_settings.video;
+
+ ref_info = mycs_video->video_binary.internal_frame_info;
+ /*The ref frame expects
+ * 1. Y plane
+ * 2. UV plane with line interleaving, like below
+ * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
+ *
+ * This format is not YUV420(which has Y, U and V planes).
+ * Its closer to NV12, except that the UV plane has UV
+ * interleaving, like UVUVUVUVUVUVUVUVU...
+ *
+ * TODO: make this ref_frame format as a separate frame format
+ */
+ ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
+ delay_frames = mycs_video->delay_frames;
+ }
+ break;
+ case IA_CSS_PIPE_ID_PREVIEW: {
+ struct ia_css_preview_settings *mycs_preview = &pipe->pipe_settings.preview;
+
+ ref_info = mycs_preview->preview_binary.internal_frame_info;
+ /*The ref frame expects
+ * 1. Y plane
+ * 2. UV plane with line interleaving, like below
+ * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
+ *
+ * This format is not YUV420(which has Y, U and V planes).
+ * Its closer to NV12, except that the UV plane has UV
+ * interleaving, like UVUVUVUVUVUVUVUVU...
+ *
+ * TODO: make this ref_frame format as a separate frame format
+ */
+ ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
+ delay_frames = mycs_preview->delay_frames;
+ }
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ref_info.raw_bit_depth = SH_CSS_REF_BIT_DEPTH;
+
+ assert(num_delay_frames <= MAX_NUM_VIDEO_DELAY_FRAMES);
+ for (i = 0; i < num_delay_frames; i++)
+ {
+ err = ia_css_frame_allocate_from_info(&delay_frames[i], &ref_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err load_advanced_binaries(
+ struct ia_css_pipe *pipe) {
+ struct ia_css_frame_info pre_in_info, gdc_in_info,
+ post_in_info, post_out_info,
+ vf_info, *vf_pp_in_info, *pipe_out_info,
+ *pipe_vf_out_info;
+ bool need_pp;
+ bool need_isp_copy = true;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+ if (pipe->pipe_settings.capture.pre_isp_binary.info)
+ return IA_CSS_SUCCESS;
+ pipe_out_info = &pipe->output_info[0];
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+
+ vf_info = *pipe_vf_out_info;
+ err = ia_css_util_check_vf_out_info(pipe_out_info, &vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ need_pp = need_capture_pp(pipe);
+
+ ia_css_frame_info_set_format(&vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_pp) {
+ struct ia_css_binary_descr capture_pp_descr;
+
+ ia_css_pipe_get_capturepp_binarydesc(pipe,
+ &capture_pp_descr, &post_out_info, pipe_out_info, &vf_info);
+ err = ia_css_binary_find(&capture_pp_descr,
+ &pipe->pipe_settings.capture.capture_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ post_out_info = *pipe_out_info;
+ }
+
+ /* Post-gdc */
+ {
+ struct ia_css_binary_descr post_gdc_descr;
+
+ ia_css_pipe_get_post_gdc_binarydesc(pipe,
+ &post_gdc_descr, &post_in_info, &post_out_info, &vf_info);
+ err = ia_css_binary_find(&post_gdc_descr,
+ &pipe->pipe_settings.capture.post_isp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ /* Gdc */
+ {
+ struct ia_css_binary_descr gdc_descr;
+
+ ia_css_pipe_get_gdc_binarydesc(pipe, &gdc_descr, &gdc_in_info,
+ &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
+ err = ia_css_binary_find(&gdc_descr,
+ &pipe->pipe_settings.capture.anr_gdc_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding =
+ pipe->pipe_settings.capture.post_isp_binary.left_padding;
+
+ /* Pre-gdc */
+ {
+ struct ia_css_binary_descr pre_gdc_descr;
+
+ ia_css_pipe_get_pre_gdc_binarydesc(pipe, &pre_gdc_descr, &pre_in_info,
+ &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
+ err = ia_css_binary_find(&pre_gdc_descr,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ pipe->pipe_settings.capture.pre_isp_binary.left_padding =
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding;
+
+ /* Viewfinder post-processing */
+ if (need_pp) {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info;
+ } else {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.post_isp_binary.vf_frame_info;
+ }
+
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
+ err = ia_css_binary_find(&vf_pp_descr,
+ &pipe->pipe_settings.capture.vf_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ /* Copy */
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* For CSI2+, only the direct sensor mode/online requires ISP copy */
+ need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+#endif
+ if (need_isp_copy)
+ load_copy_binary(pipe,
+ &pipe->pipe_settings.capture.copy_binary,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+
+ return err;
+}
+
+static enum ia_css_err load_bayer_isp_binaries(
+ struct ia_css_pipe *pipe) {
+ struct ia_css_frame_info pre_isp_in_info, *pipe_out_info;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_binary_descr pre_de_descr;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+ pipe_out_info = &pipe->output_info[0];
+
+ if (pipe->pipe_settings.capture.pre_isp_binary.info)
+ return IA_CSS_SUCCESS;
+
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ ia_css_pipe_get_pre_de_binarydesc(pipe, &pre_de_descr,
+ &pre_isp_in_info,
+ pipe_out_info);
+
+ err = ia_css_binary_find(&pre_de_descr,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+
+ return err;
+}
+
+static enum ia_css_err load_low_light_binaries(
+ struct ia_css_pipe *pipe) {
+ struct ia_css_frame_info pre_in_info, anr_in_info,
+ post_in_info, post_out_info,
+ vf_info, *pipe_vf_out_info, *pipe_out_info,
+ *vf_pp_in_info;
+ bool need_pp;
+ bool need_isp_copy = true;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ if (pipe->pipe_settings.capture.pre_isp_binary.info)
+ return IA_CSS_SUCCESS;
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+ pipe_out_info = &pipe->output_info[0];
+
+ vf_info = *pipe_vf_out_info;
+ err = ia_css_util_check_vf_out_info(pipe_out_info,
+ &vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ need_pp = need_capture_pp(pipe);
+
+ ia_css_frame_info_set_format(&vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_pp) {
+ struct ia_css_binary_descr capture_pp_descr;
+
+ ia_css_pipe_get_capturepp_binarydesc(pipe,
+ &capture_pp_descr, &post_out_info, pipe_out_info, &vf_info);
+ err = ia_css_binary_find(&capture_pp_descr,
+ &pipe->pipe_settings.capture.capture_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ post_out_info = *pipe_out_info;
+ }
+
+ /* Post-anr */
+ {
+ struct ia_css_binary_descr post_anr_descr;
+
+ ia_css_pipe_get_post_anr_binarydesc(pipe,
+ &post_anr_descr, &post_in_info, &post_out_info, &vf_info);
+ err = ia_css_binary_find(&post_anr_descr,
+ &pipe->pipe_settings.capture.post_isp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ /* Anr */
+ {
+ struct ia_css_binary_descr anr_descr;
+
+ ia_css_pipe_get_anr_binarydesc(pipe, &anr_descr, &anr_in_info,
+ &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
+ err = ia_css_binary_find(&anr_descr,
+ &pipe->pipe_settings.capture.anr_gdc_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding =
+ pipe->pipe_settings.capture.post_isp_binary.left_padding;
+
+ /* Pre-anr */
+ {
+ struct ia_css_binary_descr pre_anr_descr;
+
+ ia_css_pipe_get_pre_anr_binarydesc(pipe, &pre_anr_descr, &pre_in_info,
+ &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
+ err = ia_css_binary_find(&pre_anr_descr,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ pipe->pipe_settings.capture.pre_isp_binary.left_padding =
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding;
+
+ /* Viewfinder post-processing */
+ if (need_pp) {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info;
+ } else {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.post_isp_binary.vf_frame_info;
+ }
+
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
+ err = ia_css_binary_find(&vf_pp_descr,
+ &pipe->pipe_settings.capture.vf_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ /* Copy */
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* For CSI2+, only the direct sensor mode/online requires ISP copy */
+ need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+#endif
+ if (need_isp_copy)
+ err = load_copy_binary(pipe,
+ &pipe->pipe_settings.capture.copy_binary,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+
+ return err;
+}
+
+static bool copy_on_sp(struct ia_css_pipe *pipe) {
+ bool rval;
+
+ assert(pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "copy_on_sp() enter:\n");
+
+ rval = true;
+
+ rval &= (pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
+
+ rval &= (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW);
+
+ rval &= ((pipe->stream->config.input_config.format ==
+ ATOMISP_INPUT_FORMAT_BINARY_8) ||
+ (pipe->config.mode == IA_CSS_PIPE_MODE_COPY));
+
+ return rval;
+}
+
+static enum ia_css_err load_capture_binaries(
+ struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool must_be_raw;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ if (pipe->pipe_settings.capture.primary_binary[0].info) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+
+ /* in primary, advanced,low light or bayer,
+ the input format must be raw */
+ must_be_raw =
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT;
+ err = ia_css_util_check_input(&pipe->stream->config, must_be_raw, false);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (copy_on_sp(pipe) &&
+ pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) {
+ ia_css_frame_info_init(
+ &pipe->output_info[0],
+ JPEG_BYTES,
+ 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8,
+ 0);
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+
+ switch (pipe->config.default_capture_config.mode) {
+ case IA_CSS_CAPTURE_MODE_RAW:
+ err = load_copy_binaries(pipe);
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (err == IA_CSS_SUCCESS)
+ pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
+#endif
+ break;
+ case IA_CSS_CAPTURE_MODE_BAYER:
+ err = load_bayer_isp_binaries(pipe);
+ break;
+ case IA_CSS_CAPTURE_MODE_PRIMARY:
+ err = load_primary_binaries(pipe);
+ break;
+ case IA_CSS_CAPTURE_MODE_ADVANCED:
+ err = load_advanced_binaries(pipe);
+ break;
+ case IA_CSS_CAPTURE_MODE_LOW_LIGHT:
+ err = load_low_light_binaries(pipe);
+ break;
+ }
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+unload_capture_binaries(struct ia_css_pipe *pipe) {
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((!pipe) || ((pipe->mode != IA_CSS_PIPE_ID_CAPTURE) && (pipe->mode != IA_CSS_PIPE_ID_COPY)))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.capture.copy_binary);
+ for (i = 0; i < MAX_NUM_PRIMARY_STAGES; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.capture.primary_binary[i]);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.pre_isp_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.anr_gdc_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.post_isp_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.capture_pp_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.capture_ldc_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.vf_pp_binary);
+
+ for (i = 0; i < pipe->pipe_settings.capture.num_yuv_scaler; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.capture.yuv_scaler_binary[i]);
+
+ kfree(pipe->pipe_settings.capture.is_output_stage);
+ pipe->pipe_settings.capture.is_output_stage = NULL;
+ kfree(pipe->pipe_settings.capture.yuv_scaler_binary);
+ pipe->pipe_settings.capture.yuv_scaler_binary = NULL;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static bool
+need_downscaling(const struct ia_css_resolution in_res,
+ const struct ia_css_resolution out_res) {
+ if (in_res.width > out_res.width || in_res.height > out_res.height)
+ return true;
+
+ return false;
+}
+
+static bool
+need_yuv_scaler_stage(const struct ia_css_pipe *pipe) {
+ unsigned int i;
+ struct ia_css_resolution in_res, out_res;
+
+ bool need_format_conversion = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP);
+
+ /* TODO: make generic function */
+ need_format_conversion =
+ ((pipe->stream->config.input_config.format ==
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) &&
+ (pipe->output_info[0].format != IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8));
+
+ in_res = pipe->config.input_effective_res;
+
+ if (pipe->config.enable_dz)
+ return true;
+
+ if ((pipe->output_info[0].res.width != 0) && need_format_conversion)
+ return true;
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ out_res = pipe->output_info[i].res;
+
+ /* A non-zero width means it is a valid output port */
+ if ((out_res.width != 0) && need_downscaling(in_res, out_res))
+ return true;
+ }
+
+ return false;
+}
+
+/* TODO: it is temporarily created from ia_css_pipe_create_cas_scaler_desc */
+/* which has some hard-coded knowledge which prevents reuse of the function. */
+/* Later, merge this with ia_css_pipe_create_cas_scaler_desc */
+static enum ia_css_err ia_css_pipe_create_cas_scaler_desc_single_output(
+ struct ia_css_frame_info *cas_scaler_in_info,
+ struct ia_css_frame_info *cas_scaler_out_info,
+ struct ia_css_frame_info *cas_scaler_vf_info,
+ struct ia_css_cas_binary_descr *descr) {
+ unsigned int i;
+ unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame_info tmp_in_info;
+
+ unsigned int max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
+
+ assert(cas_scaler_in_info);
+ assert(cas_scaler_out_info);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_create_cas_scaler_desc() enter:\n");
+
+ /* We assume that this function is used only for single output port case. */
+ descr->num_output_stage = 1;
+
+ hor_ds_factor = CEIL_DIV(cas_scaler_in_info->res.width,
+ cas_scaler_out_info->res.width);
+ ver_ds_factor = CEIL_DIV(cas_scaler_in_info->res.height,
+ cas_scaler_out_info->res.height);
+ /* use the same horizontal and vertical downscaling factor for simplicity */
+ assert(hor_ds_factor == ver_ds_factor);
+
+ i = 1;
+ while (i < hor_ds_factor) {
+ descr->num_stage++;
+ i *= max_scale_factor_per_stage;
+ }
+
+ descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->in_info) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->internal_out_info = kmalloc(descr->num_stage * sizeof(
+ struct ia_css_frame_info), GFP_KERNEL);
+ if (!descr->internal_out_info) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->out_info) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->vf_info) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL);
+ if (!descr->is_output_stage) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+
+ tmp_in_info = *cas_scaler_in_info;
+ for (i = 0; i < descr->num_stage; i++) {
+ descr->in_info[i] = tmp_in_info;
+ if ((tmp_in_info.res.width / max_scale_factor_per_stage) <=
+ cas_scaler_out_info->res.width) {
+ descr->is_output_stage[i] = true;
+ if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
+ descr->internal_out_info[i].res.width = cas_scaler_out_info->res.width;
+ descr->internal_out_info[i].res.height = cas_scaler_out_info->res.height;
+ descr->internal_out_info[i].padded_width = cas_scaler_out_info->padded_width;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ } else {
+ assert(i == (descr->num_stage - 1));
+ descr->internal_out_info[i].res.width = 0;
+ descr->internal_out_info[i].res.height = 0;
+ }
+ descr->out_info[i].res.width = cas_scaler_out_info->res.width;
+ descr->out_info[i].res.height = cas_scaler_out_info->res.height;
+ descr->out_info[i].padded_width = cas_scaler_out_info->padded_width;
+ descr->out_info[i].format = cas_scaler_out_info->format;
+ if (cas_scaler_vf_info) {
+ descr->vf_info[i].res.width = cas_scaler_vf_info->res.width;
+ descr->vf_info[i].res.height = cas_scaler_vf_info->res.height;
+ descr->vf_info[i].padded_width = cas_scaler_vf_info->padded_width;
+ ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
+ } else {
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ descr->vf_info[i].padded_width = 0;
+ }
+ } else {
+ descr->is_output_stage[i] = false;
+ descr->internal_out_info[i].res.width = tmp_in_info.res.width /
+ max_scale_factor_per_stage;
+ descr->internal_out_info[i].res.height = tmp_in_info.res.height /
+ max_scale_factor_per_stage;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ ia_css_frame_info_init(&descr->internal_out_info[i],
+ tmp_in_info.res.width / max_scale_factor_per_stage,
+ tmp_in_info.res.height / max_scale_factor_per_stage,
+ IA_CSS_FRAME_FORMAT_YUV420, 0);
+ descr->out_info[i].res.width = 0;
+ descr->out_info[i].res.height = 0;
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ }
+ tmp_in_info = descr->internal_out_info[i];
+ }
+ERR:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n",
+ err);
+ return err;
+}
+
+/* FIXME: merge most of this and single output version */
+static enum ia_css_err ia_css_pipe_create_cas_scaler_desc(
+ struct ia_css_pipe *pipe,
+ struct ia_css_cas_binary_descr *descr) {
+ struct ia_css_frame_info in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ struct ia_css_frame_info *out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info *vf_out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info tmp_in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ unsigned int i, j;
+ unsigned int hor_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE],
+ ver_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE],
+ scale_factor = 0;
+ unsigned int num_stages = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ unsigned int max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_create_cas_scaler_desc() enter:\n");
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ out_info[i] = NULL;
+ vf_out_info[i] = NULL;
+ hor_scale_factor[i] = 0;
+ ver_scale_factor[i] = 0;
+ }
+
+ in_info.res = pipe->config.input_effective_res;
+ in_info.padded_width = in_info.res.width;
+ descr->num_output_stage = 0;
+ /* Find out how much scaling we need for each output */
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (pipe->output_info[i].res.width != 0) {
+ out_info[i] = &pipe->output_info[i];
+ if (pipe->vf_output_info[i].res.width != 0)
+ vf_out_info[i] = &pipe->vf_output_info[i];
+ descr->num_output_stage += 1;
+ }
+
+ if (out_info[i]) {
+ hor_scale_factor[i] = CEIL_DIV(in_info.res.width, out_info[i]->res.width);
+ ver_scale_factor[i] = CEIL_DIV(in_info.res.height, out_info[i]->res.height);
+ /* use the same horizontal and vertical scaling factor for simplicity */
+ assert(hor_scale_factor[i] == ver_scale_factor[i]);
+ scale_factor = 1;
+ do {
+ num_stages++;
+ scale_factor *= max_scale_factor_per_stage;
+ } while (scale_factor < hor_scale_factor[i]);
+
+ in_info.res = out_info[i]->res;
+ }
+ }
+
+ if (need_yuv_scaler_stage(pipe) && (num_stages == 0))
+ num_stages = 1;
+
+ descr->num_stage = num_stages;
+
+ descr->in_info = kmalloc_array(descr->num_stage,
+ sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ if (!descr->in_info) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->internal_out_info = kmalloc(descr->num_stage * sizeof(
+ struct ia_css_frame_info), GFP_KERNEL);
+ if (!descr->internal_out_info) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->out_info) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->vf_info) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL);
+ if (!descr->is_output_stage) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (out_info[i]) {
+ if (i > 0) {
+ assert((out_info[i - 1]->res.width >= out_info[i]->res.width) &&
+ (out_info[i - 1]->res.height >= out_info[i]->res.height));
+ }
+ }
+ }
+
+ tmp_in_info.res = pipe->config.input_effective_res;
+ tmp_in_info.format = IA_CSS_FRAME_FORMAT_YUV420;
+ for (i = 0, j = 0; i < descr->num_stage; i++) {
+ assert(j < 2);
+ assert(out_info[j]);
+
+ descr->in_info[i] = tmp_in_info;
+ if ((tmp_in_info.res.width / max_scale_factor_per_stage) <=
+ out_info[j]->res.width) {
+ descr->is_output_stage[i] = true;
+ if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
+ descr->internal_out_info[i].res.width = out_info[j]->res.width;
+ descr->internal_out_info[i].res.height = out_info[j]->res.height;
+ descr->internal_out_info[i].padded_width = out_info[j]->padded_width;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ } else {
+ assert(i == (descr->num_stage - 1));
+ descr->internal_out_info[i].res.width = 0;
+ descr->internal_out_info[i].res.height = 0;
+ }
+ descr->out_info[i].res.width = out_info[j]->res.width;
+ descr->out_info[i].res.height = out_info[j]->res.height;
+ descr->out_info[i].padded_width = out_info[j]->padded_width;
+ descr->out_info[i].format = out_info[j]->format;
+ if (vf_out_info[j]) {
+ descr->vf_info[i].res.width = vf_out_info[j]->res.width;
+ descr->vf_info[i].res.height = vf_out_info[j]->res.height;
+ descr->vf_info[i].padded_width = vf_out_info[j]->padded_width;
+ ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
+ } else {
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ descr->vf_info[i].padded_width = 0;
+ }
+ j++;
+ } else {
+ descr->is_output_stage[i] = false;
+ descr->internal_out_info[i].res.width = tmp_in_info.res.width /
+ max_scale_factor_per_stage;
+ descr->internal_out_info[i].res.height = tmp_in_info.res.height /
+ max_scale_factor_per_stage;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ ia_css_frame_info_init(&descr->internal_out_info[i],
+ tmp_in_info.res.width / max_scale_factor_per_stage,
+ tmp_in_info.res.height / max_scale_factor_per_stage,
+ IA_CSS_FRAME_FORMAT_YUV420, 0);
+ descr->out_info[i].res.width = 0;
+ descr->out_info[i].res.height = 0;
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ }
+ tmp_in_info = descr->internal_out_info[i];
+ }
+ERR:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n",
+ err);
+ return err;
+}
+
+static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr
+ *descr) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_destroy_cas_scaler_desc() enter:\n");
+ kfree(descr->in_info);
+ descr->in_info = NULL;
+ kfree(descr->internal_out_info);
+ descr->internal_out_info = NULL;
+ kfree(descr->out_info);
+ descr->out_info = NULL;
+ kfree(descr->vf_info);
+ descr->vf_info = NULL;
+ kfree(descr->is_output_stage);
+ descr->is_output_stage = NULL;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_destroy_cas_scaler_desc() leave\n");
+}
+
+static enum ia_css_err
+load_yuvpp_binaries(struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool need_scaler = false;
+ struct ia_css_frame_info *vf_pp_in_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_yuvpp_settings *mycs;
+ struct ia_css_binary *next_binary;
+ struct ia_css_cas_binary_descr cas_scaler_descr = { };
+ unsigned int i, j;
+ bool need_isp_copy_binary = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->stream);
+ assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP);
+
+ if (pipe->pipe_settings.yuvpp.copy_binary.info)
+ goto ERR;
+
+ /* Set both must_be_raw and must_be_yuv to false then yuvpp can take rgb inputs */
+ err = ia_css_util_check_input(&pipe->stream->config, false, false);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ mycs = &pipe->pipe_settings.yuvpp;
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++)
+ {
+ if (pipe->vf_output_info[i].res.width != 0) {
+ err = ia_css_util_check_vf_out_info(&pipe->output_info[i],
+ &pipe->vf_output_info[i]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ vf_pp_in_info[i] = NULL;
+ }
+
+ need_scaler = need_yuv_scaler_stage(pipe);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_scaler)
+ {
+ struct ia_css_binary_descr yuv_scaler_descr;
+
+ err = ia_css_pipe_create_cas_scaler_desc(pipe,
+ &cas_scaler_descr);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ mycs->num_output = cas_scaler_descr.num_output_stage;
+ mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
+ mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(struct ia_css_binary), GFP_KERNEL);
+ if (!mycs->yuv_scaler_binary) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(bool), GFP_KERNEL);
+ if (!mycs->is_output_stage) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ for (i = 0; i < cas_scaler_descr.num_stage; i++) {
+ mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe,
+ &yuv_scaler_descr, &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
+ err = ia_css_binary_find(&yuv_scaler_descr,
+ &mycs->yuv_scaler_binary[i]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+ } else
+ {
+ mycs->num_output = 1;
+ }
+
+ if (need_scaler)
+ {
+ next_binary = &mycs->yuv_scaler_binary[0];
+ } else
+ {
+ next_binary = NULL;
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /*
+ * NOTES
+ * - Why does the "yuvpp" pipe needs "isp_copy_binary" (i.e. ISP Copy) when
+ * its input is "ATOMISP_INPUT_FORMAT_YUV422_8"?
+ *
+ * In most use cases, the first stage in the "yuvpp" pipe is the "yuv_scale_
+ * binary". However, the "yuv_scale_binary" does NOT support the input-frame
+ * format as "IA_CSS_STREAM _FORMAT_YUV422_8".
+ *
+ * Hence, the "isp_copy_binary" is required to be present in front of the "yuv
+ * _scale_binary". It would translate the input-frame to the frame formats that
+ * are supported by the "yuv_scale_binary".
+ *
+ * Please refer to "FrameWork/css/isp/pipes/capture_pp/capture_pp_1.0/capture_
+ * pp_defs.h" for the list of input-frame formats that are supported by the
+ * "yuv_scale_binary".
+ */
+ need_isp_copy_binary =
+ (pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV422_8);
+#else /* !USE_INPUT_SYSTEM_VERSION_2401 */
+ need_isp_copy_binary = true;
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+
+ if (need_isp_copy_binary)
+ {
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ next_binary);
+
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ /*
+ * NOTES
+ * - Why is "pipe->pipe_settings.capture.copy_binary.online" specified?
+ *
+ * In some use cases, the first stage in the "yuvpp" pipe is the
+ * "isp_copy_binary". The "isp_copy_binary" is designed to process
+ * the input from either the system DDR or from the IPU internal VMEM.
+ * So it provides the flag "online" to specify where its input is from,
+ * i.e.:
+ *
+ * (1) "online <= true", the input is from the IPU internal VMEM.
+ * (2) "online <= false", the input is from the system DDR.
+ *
+ * In other use cases, the first stage in the "yuvpp" pipe is the
+ * "yuv_scale_binary". "The "yuv_scale_binary" is designed to process the
+ * input ONLY from the system DDR. So it does not provide the flag "online"
+ * to specify where its input is from.
+ */
+ pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
+ }
+
+ /* Viewfinder post-processing */
+ if (need_scaler)
+ {
+ for (i = 0, j = 0; i < mycs->num_yuv_scaler; i++) {
+ if (mycs->is_output_stage[i]) {
+ assert(j < 2);
+ vf_pp_in_info[j] =
+ &mycs->yuv_scaler_binary[i].vf_frame_info;
+ j++;
+ }
+ }
+ mycs->num_vf_pp = j;
+ } else
+ {
+ vf_pp_in_info[0] =
+ &mycs->copy_binary.vf_frame_info;
+ for (i = 1; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ vf_pp_in_info[i] = NULL;
+ }
+ mycs->num_vf_pp = 1;
+ }
+ mycs->vf_pp_binary = kzalloc(mycs->num_vf_pp * sizeof(struct ia_css_binary),
+ GFP_KERNEL);
+ if (!mycs->vf_pp_binary)
+ {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ for (i = 0; i < mycs->num_vf_pp; i++)
+ {
+ if (pipe->vf_output_info[i].res.width != 0) {
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info[i], &pipe->vf_output_info[i]);
+ err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary[i]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ }
+ }
+
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ERR:
+ if (need_scaler)
+ {
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "load_yuvpp_binaries() leave, err=%d\n",
+ err);
+ return err;
+}
+
+static enum ia_css_err
+unload_yuvpp_binaries(struct ia_css_pipe *pipe) {
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.yuvpp.copy_binary);
+ for (i = 0; i < pipe->pipe_settings.yuvpp.num_yuv_scaler; i++)
+ {
+ ia_css_binary_unload(&pipe->pipe_settings.yuvpp.yuv_scaler_binary[i]);
+ }
+ for (i = 0; i < pipe->pipe_settings.yuvpp.num_vf_pp; i++)
+ {
+ ia_css_binary_unload(&pipe->pipe_settings.yuvpp.vf_pp_binary[i]);
+ }
+ kfree(pipe->pipe_settings.yuvpp.is_output_stage);
+ pipe->pipe_settings.yuvpp.is_output_stage = NULL;
+ kfree(pipe->pipe_settings.yuvpp.yuv_scaler_binary);
+ pipe->pipe_settings.yuvpp.yuv_scaler_binary = NULL;
+ kfree(pipe->pipe_settings.yuvpp.vf_pp_binary);
+ pipe->pipe_settings.yuvpp.vf_pp_binary = NULL;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err yuvpp_start(struct ia_css_pipe *pipe) {
+ struct ia_css_binary *copy_binary;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum sh_css_pipe_config_override copy_ovrd;
+ enum ia_css_input_mode yuvpp_pipe_input_mode;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ yuvpp_pipe_input_mode = pipe->stream->config.mode;
+
+ copy_binary = &pipe->pipe_settings.yuvpp.copy_binary;
+
+ sh_css_metrics_start_frame();
+
+ /* multi stream video needs mipi buffers */
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && (defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401))
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+#endif
+
+ {
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+ }
+
+ start_pipe(pipe, copy_ovrd, yuvpp_pipe_input_mode);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+sh_css_pipe_unload_binaries(struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if (!pipe)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* PIPE_MODE_COPY has no binaries, but has output frames to outside*/
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+
+ switch (pipe->mode)
+ {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = unload_preview_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = unload_video_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = unload_capture_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = unload_yuvpp_binaries(pipe);
+ break;
+ default:
+ break;
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+sh_css_pipe_load_binaries(struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_pipe_load_binaries() enter:\n");
+
+ /* PIPE_MODE_COPY has no binaries, but has output frames to outside*/
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
+ return err;
+
+ switch (pipe->mode)
+ {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = load_preview_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = load_video_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = load_capture_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = load_yuvpp_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ break;
+ default:
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ break;
+ }
+ if (err != IA_CSS_SUCCESS)
+ {
+ if (sh_css_pipe_unload_binaries(pipe) != IA_CSS_SUCCESS) {
+ /* currently css does not support multiple error returns in a single function,
+ * using IA_CSS_ERR_INTERNAL_ERROR in this case */
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ }
+ return err;
+}
+
+static enum ia_css_err
+create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
+ struct ia_css_pipeline *me;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage *vf_pp_stage = NULL,
+ *copy_stage = NULL,
+ *yuv_scaler_stage = NULL;
+ struct ia_css_binary *copy_binary,
+ *vf_pp_binary,
+ *yuv_scaler_binary;
+ bool need_scaler = false;
+ unsigned int num_stage, num_vf_pp_stage, num_output_stage;
+ unsigned int i, j;
+
+ struct ia_css_frame *in_frame = NULL;
+ struct ia_css_frame *out_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame *bin_out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_pipeline_stage_desc stage_desc;
+ bool need_in_frameinfo_memory = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+ bool buffered_sensor = false;
+ bool online = false;
+ bool continuous = false;
+#endif
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++)
+ {
+ out_frame[i] = NULL;
+ vf_frame[i] = NULL;
+ }
+ ia_css_pipe_util_create_output_frames(bin_out_frame);
+ num_stage = pipe->pipe_settings.yuvpp.num_yuv_scaler;
+ num_vf_pp_stage = pipe->pipe_settings.yuvpp.num_vf_pp;
+ num_output_stage = pipe->pipe_settings.yuvpp.num_output;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continuous Capture
+ */
+ sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+ buffered_sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor && continuous));
+#else
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+#endif
+ /* the input frame can come from:
+ * a) memory: connect yuvscaler to me->in_frame
+ * b) sensor, via copy binary: connect yuvscaler to copy binary later on */
+ if (need_in_frameinfo_memory)
+ {
+ /* TODO: improve for different input formats. */
+
+ /*
+ * "pipe->stream->config.input_config.format" represents the sensor output
+ * frame format, e.g. YUV422 8-bit.
+ *
+ * "in_frame_format" represents the imaging pipe's input frame format, e.g.
+ * Bayer-Quad RAW.
+ */
+ int in_frame_format;
+
+ if (pipe->stream->config.input_config.format ==
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) {
+ in_frame_format = IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8;
+ } else if (pipe->stream->config.input_config.format ==
+ ATOMISP_INPUT_FORMAT_YUV422_8) {
+ /*
+ * When the sensor output frame format is "ATOMISP_INPUT_FORMAT_YUV422_8",
+ * the "isp_copy_var" binary is selected as the first stage in the yuvpp
+ * pipe.
+ *
+ * For the "isp_copy_var" binary, it reads the YUV422-8 pixels from
+ * the frame buffer (at DDR) to the frame-line buffer (at VMEM).
+ *
+ * By now, the "isp_copy_var" binary does NOT provide a separated
+ * frame-line buffer to store the YUV422-8 pixels. Instead, it stores
+ * the YUV422-8 pixels in the frame-line buffer which is designed to
+ * store the Bayer-Quad RAW pixels.
+ *
+ * To direct the "isp_copy_var" binary reading from the RAW frame-line
+ * buffer, its input frame format must be specified as "IA_CSS_FRAME_
+ * FORMAT_RAW".
+ */
+ in_frame_format = IA_CSS_FRAME_FORMAT_RAW;
+ } else {
+ in_frame_format = IA_CSS_FRAME_FORMAT_NV12;
+ }
+
+ err = init_in_frameinfo_memory_defaults(pipe,
+ &me->in_frame,
+ in_frame_format);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ in_frame = &me->in_frame;
+ } else
+ {
+ in_frame = NULL;
+ }
+
+ for (i = 0; i < num_output_stage; i++)
+ {
+ assert(i < IA_CSS_PIPE_MAX_OUTPUT_STAGE);
+ if (pipe->output_info[i].res.width != 0) {
+ err = init_out_frameinfo_defaults(pipe, &me->out_frame[i], i);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ out_frame[i] = &me->out_frame[i];
+ }
+
+ /* Construct vf_frame info (only in case we have VF) */
+ if (pipe->vf_output_info[i].res.width != 0) {
+ err = init_vf_frameinfo_defaults(pipe, &me->vf_frame[i], i);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ vf_frame[i] = &me->vf_frame[i];
+ }
+ }
+
+ copy_binary = &pipe->pipe_settings.yuvpp.copy_binary;
+ vf_pp_binary = pipe->pipe_settings.yuvpp.vf_pp_binary;
+ yuv_scaler_binary = pipe->pipe_settings.yuvpp.yuv_scaler_binary;
+ need_scaler = need_yuv_scaler_stage(pipe);
+
+ if (pipe->pipe_settings.yuvpp.copy_binary.info)
+ {
+ struct ia_css_frame *in_frame_local = NULL;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* After isp copy is enabled in_frame needs to be passed. */
+ if (!online)
+ in_frame_local = in_frame;
+#endif
+
+ if (need_scaler) {
+ ia_css_pipe_util_set_output_frames(bin_out_frame, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ bin_out_frame, in_frame_local, NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(bin_out_frame, 0, out_frame[0]);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ bin_out_frame, in_frame_local, NULL);
+ }
+
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &copy_stage);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (copy_stage) {
+ /* if we use yuv scaler binary, vf output should be from there */
+ copy_stage->args.copy_vf = !need_scaler;
+ /* for yuvpp pipe, it should always be enabled */
+ copy_stage->args.copy_output = true;
+ /* connect output of copy binary to input of yuv scaler */
+ in_frame = copy_stage->args.out_frame[0];
+ }
+ }
+
+ if (need_scaler)
+ {
+ struct ia_css_frame *tmp_out_frame = NULL;
+ struct ia_css_frame *tmp_vf_frame = NULL;
+ struct ia_css_frame *tmp_in_frame = in_frame;
+
+ for (i = 0, j = 0; i < num_stage; i++) {
+ assert(j < num_output_stage);
+ if (pipe->pipe_settings.yuvpp.is_output_stage[i]) {
+ tmp_out_frame = out_frame[j];
+ tmp_vf_frame = vf_frame[j];
+ } else {
+ tmp_out_frame = NULL;
+ tmp_vf_frame = NULL;
+ }
+
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame,
+ NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* we use output port 1 as internal output port */
+ tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
+ if (pipe->pipe_settings.yuvpp.is_output_stage[i]) {
+ if (tmp_vf_frame && (tmp_vf_frame->info.res.width != 0)) {
+ in_frame = yuv_scaler_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, tmp_vf_frame, &vf_pp_binary[j],
+ &vf_pp_stage);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ j++;
+ }
+ }
+ } else if (copy_stage)
+ {
+ if (vf_frame[0] && vf_frame[0]->info.res.width != 0) {
+ in_frame = copy_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame[0], &vf_pp_binary[0],
+ &vf_pp_stage);
+ }
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+create_host_copy_pipeline(struct ia_css_pipe *pipe,
+ unsigned int max_input_width,
+ struct ia_css_frame *out_frame) {
+ struct ia_css_pipeline *me;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_copy_pipeline() enter:\n");
+
+ /* pipeline already created as part of create_host_pipeline_structure */
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+
+ /* Construct out_frame info */
+ out_frame->contiguous = false;
+ out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+
+ if (copy_on_sp(pipe) &&
+ pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8)
+ {
+ ia_css_frame_info_init(
+ &out_frame->info,
+ JPEG_BYTES,
+ 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8,
+ 0);
+ } else if (out_frame->info.format == IA_CSS_FRAME_FORMAT_RAW)
+ {
+ out_frame->info.raw_bit_depth =
+ ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ }
+
+ me->num_stages = 1;
+ me->pipe_id = IA_CSS_PIPE_ID_COPY;
+ pipe->mode = IA_CSS_PIPE_ID_COPY;
+
+ ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
+ IA_CSS_PIPELINE_RAW_COPY, max_input_width);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ NULL);
+
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_copy_pipeline() leave:\n");
+
+ return err;
+}
+
+static enum ia_css_err
+create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe) {
+ struct ia_css_pipeline *me = &pipe->pipeline;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage_desc stage_desc;
+ struct ia_css_frame *out_frame = &me->out_frame[0];
+ struct ia_css_pipeline_stage *out_stage = NULL;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+ unsigned int max_input_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_isyscopy_capture_pipeline() enter:\n");
+ ia_css_pipeline_clean(me);
+
+ /* Construct out_frame info */
+ err = sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, 0);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ out_frame->contiguous = false;
+ out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id, &queue_id);
+ out_frame->dynamic_queue_id = queue_id;
+ out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME;
+
+ me->num_stages = 1;
+ me->pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+ pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
+ ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
+ IA_CSS_PIPELINE_ISYS_COPY, max_input_width);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc, &out_stage);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ ia_css_pipeline_finalize_stages(me, pipe->stream->config.continuous);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_isyscopy_capture_pipeline() leave:\n");
+
+ return err;
+}
+
+static enum ia_css_err
+create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
+ struct ia_css_pipeline *me;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_capture_mode mode;
+ struct ia_css_pipeline_stage *current_stage = NULL;
+ struct ia_css_pipeline_stage *yuv_scaler_stage = NULL;
+ struct ia_css_binary *copy_binary,
+ *primary_binary[MAX_NUM_PRIMARY_STAGES],
+ *vf_pp_binary,
+ *pre_isp_binary,
+ *anr_gdc_binary,
+ *post_isp_binary,
+ *yuv_scaler_binary,
+ *capture_pp_binary,
+ *capture_ldc_binary;
+ bool need_pp = false;
+ bool raw;
+
+ struct ia_css_frame *in_frame;
+ struct ia_css_frame *out_frame;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame;
+ struct ia_css_pipeline_stage_desc stage_desc;
+ bool need_in_frameinfo_memory = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+ bool buffered_sensor = false;
+ bool online = false;
+ bool continuous = false;
+#endif
+ unsigned int i, num_yuv_scaler, num_primary_stage;
+ bool need_yuv_pp = false;
+ bool *is_output_stage = NULL;
+ bool need_ldc = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->stream);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ me = &pipe->pipeline;
+ mode = pipe->config.default_capture_config.mode;
+ raw = (mode == IA_CSS_CAPTURE_MODE_RAW);
+ ia_css_pipeline_clean(me);
+ ia_css_pipe_util_create_output_frames(out_frames);
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continuous Capture
+ */
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+ buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor && (online || continuous)));
+#else
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+#endif
+ if (need_in_frameinfo_memory)
+ {
+ err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
+ IA_CSS_FRAME_FORMAT_RAW);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ in_frame = &me->in_frame;
+ } else
+ {
+ in_frame = NULL;
+ }
+
+ err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ out_frame = &me->out_frame[0];
+
+ /* Construct vf_frame info (only in case we have VF) */
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0])
+ {
+ if (mode == IA_CSS_CAPTURE_MODE_RAW || mode == IA_CSS_CAPTURE_MODE_BAYER) {
+ /* These modes don't support viewfinder output */
+ vf_frame = NULL;
+ } else {
+ init_vf_frameinfo_defaults(pipe, &me->vf_frame[0], 0);
+ vf_frame = &me->vf_frame[0];
+ }
+ } else
+ {
+ vf_frame = NULL;
+ }
+
+ copy_binary = &pipe->pipe_settings.capture.copy_binary;
+ num_primary_stage = pipe->pipe_settings.capture.num_primary_stage;
+ if ((num_primary_stage == 0) && (mode == IA_CSS_CAPTURE_MODE_PRIMARY))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ for (i = 0; i < num_primary_stage; i++)
+ {
+ primary_binary[i] = &pipe->pipe_settings.capture.primary_binary[i];
+ }
+ vf_pp_binary = &pipe->pipe_settings.capture.vf_pp_binary;
+ pre_isp_binary = &pipe->pipe_settings.capture.pre_isp_binary;
+ anr_gdc_binary = &pipe->pipe_settings.capture.anr_gdc_binary;
+ post_isp_binary = &pipe->pipe_settings.capture.post_isp_binary;
+ capture_pp_binary = &pipe->pipe_settings.capture.capture_pp_binary;
+ yuv_scaler_binary = pipe->pipe_settings.capture.yuv_scaler_binary;
+ num_yuv_scaler = pipe->pipe_settings.capture.num_yuv_scaler;
+ is_output_stage = pipe->pipe_settings.capture.is_output_stage;
+ capture_ldc_binary = &pipe->pipe_settings.capture.capture_ldc_binary;
+
+ need_pp = (need_capture_pp(pipe) || pipe->output_stage) &&
+ mode != IA_CSS_CAPTURE_MODE_RAW &&
+ mode != IA_CSS_CAPTURE_MODE_BAYER;
+ need_yuv_pp = (yuv_scaler_binary && yuv_scaler_binary->info);
+ need_ldc = (capture_ldc_binary && capture_ldc_binary->info);
+
+ if (pipe->pipe_settings.capture.copy_binary.info)
+ {
+ if (raw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (!continuous) {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, in_frame, NULL);
+ } else {
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, in_frame, NULL);
+ }
+#else
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+#endif
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, in_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+ }
+
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else if (pipe->stream->config.continuous)
+ {
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ }
+
+ if (mode == IA_CSS_CAPTURE_MODE_PRIMARY)
+ {
+ struct ia_css_frame *local_in_frame = NULL;
+ struct ia_css_frame *local_out_frame = NULL;
+
+ for (i = 0; i < num_primary_stage; i++) {
+ if (i == 0)
+ local_in_frame = in_frame;
+ else
+ local_in_frame = NULL;
+#ifndef ISP2401
+ if (!need_pp && (i == num_primary_stage - 1))
+#else
+ if (!need_pp && (i == num_primary_stage - 1) && !need_ldc)
+#endif
+ local_out_frame = out_frame;
+ else
+ local_out_frame = NULL;
+ ia_css_pipe_util_set_output_frames(out_frames, 0, local_out_frame);
+ /*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed from Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. This * should not be considered as a clean solution.
+ * Proper investigation should be done to come up with the clean
+ * solution.
+ * */
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, primary_binary[i],
+ out_frames, local_in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ /* If we use copy iso primary,
+ the input must be yuv iso raw */
+ current_stage->args.copy_vf =
+ primary_binary[0]->info->sp.pipeline.mode ==
+ IA_CSS_BINARY_MODE_COPY;
+ current_stage->args.copy_output = current_stage->args.copy_vf;
+ } else if (mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
+ mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT)
+ {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc, NULL);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, anr_gdc_binary,
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc, NULL);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (need_pp) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, post_isp_binary,
+ out_frames, NULL, NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, post_isp_binary,
+ out_frames, NULL, NULL);
+ }
+
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc, &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else if (mode == IA_CSS_CAPTURE_MODE_BAYER)
+ {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ NULL);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+#ifndef ISP2401
+ if (need_pp && current_stage)
+ {
+ struct ia_css_frame *local_in_frame = NULL;
+
+ local_in_frame = current_stage->args.out_frame[0];
+
+ if (need_ldc) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, capture_ldc_binary,
+ out_frames, local_in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &current_stage);
+ local_in_frame = current_stage->args.out_frame[0];
+ }
+ err = add_capture_pp_stage(pipe, me, local_in_frame,
+ need_yuv_pp ? NULL : out_frame,
+#else
+ /* ldc and capture_pp not supported in same pipeline */
+ if (need_ldc && current_stage)
+ {
+ in_frame = current_stage->args.out_frame[0];
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, capture_ldc_binary,
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ NULL);
+ } else if (need_pp && current_stage)
+ {
+ in_frame = current_stage->args.out_frame[0];
+ err = add_capture_pp_stage(pipe, me, in_frame, need_yuv_pp ? NULL : out_frame,
+#endif
+ capture_pp_binary,
+ &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ if (need_yuv_pp && current_stage)
+ {
+ struct ia_css_frame *tmp_in_frame = current_stage->args.out_frame[0];
+ struct ia_css_frame *tmp_out_frame = NULL;
+
+ for (i = 0; i < num_yuv_scaler; i++) {
+ if (is_output_stage[i] == true)
+ tmp_out_frame = out_frame;
+ else
+ tmp_out_frame = NULL;
+
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame,
+ NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* we use output port 1 as internal output port */
+ tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
+ }
+ }
+
+ /*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The vf-pp
+ * stage has been removed from Skycam in the solution provided.
+ * The vf-pp stage should be re-introduced when required. This
+ * should not be considered as a clean solution. Proper
+ * investigation should be done to come up with the clean solution.
+ * */
+ if (mode != IA_CSS_CAPTURE_MODE_RAW && mode != IA_CSS_CAPTURE_MODE_BAYER && current_stage && vf_frame)
+ {
+ in_frame = current_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary,
+ &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_regular_capture_pipeline() leave:\n");
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+create_host_capture_pipeline(struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
+ err = create_host_isyscopy_capture_pipeline(pipe);
+ else
+ err = create_host_regular_capture_pipeline(pipe);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+
+ return err;
+}
+
+static enum ia_css_err capture_start(
+ struct ia_css_pipe *pipe) {
+ struct ia_css_pipeline *me;
+
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum sh_css_pipe_config_override copy_ovrd;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if (!pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ me = &pipe->pipeline;
+
+ if ((pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) &&
+ (pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
+ if (copy_on_sp(pipe)) {
+ err = start_copy_on_sp(pipe, &me->out_frame[0]);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* old isys: need to send_mipi_frames() in all pipe modes */
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+#endif
+
+ {
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+ }
+ start_pipe(pipe, copy_ovrd, pipe->stream->config.mode);
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /*
+ * old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured,
+ * which is currently done in start_binary(); but COPY pipe contains no binary,
+ * and does not call start_binary(); so we need to configure the rx here.
+ */
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY &&
+ pipe->stream->reconfigure_css_rx) {
+ ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
+ pipe->stream->config.mode);
+ pipe->stream->reconfigure_css_rx = false;
+ }
+#endif
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx) {
+ assert(pipe);
+ assert(info);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_output_frame_info() enter:\n");
+
+ *info = pipe->output_info[idx];
+ if (copy_on_sp(pipe) &&
+ pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8)
+ {
+ ia_css_frame_info_init(
+ info,
+ JPEG_BYTES,
+ 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8,
+ 0);
+ } else if (info->format == IA_CSS_FRAME_FORMAT_RAW ||
+ info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)
+ {
+ info->raw_bit_depth =
+ ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_output_frame_info() leave:\n");
+ return IA_CSS_SUCCESS;
+}
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+void
+ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height) {
+ assert(stream);
+
+ ia_css_inputfifo_send_input_frame(
+ data, width, height,
+ stream->config.channel_id,
+ stream->config.input_config.format,
+ stream->config.pixels_per_clock == 2);
+}
+
+void
+ia_css_stream_start_input_frame(const struct ia_css_stream *stream) {
+ assert(stream);
+
+ ia_css_inputfifo_start_frame(
+ stream->config.channel_id,
+ stream->config.input_config.format,
+ stream->config.pixels_per_clock == 2);
+}
+
+void
+ia_css_stream_send_input_line(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2) {
+ assert(stream);
+
+ ia_css_inputfifo_send_line(stream->config.channel_id,
+ data, width, data2, width2);
+}
+
+void
+ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
+ enum atomisp_input_format format,
+ const unsigned short *data,
+ unsigned int width) {
+ assert(stream);
+ if (!data || width == 0)
+ return;
+ ia_css_inputfifo_send_embedded_line(stream->config.channel_id,
+ format, data, width);
+}
+
+void
+ia_css_stream_end_input_frame(const struct ia_css_stream *stream) {
+ assert(stream);
+
+ ia_css_inputfifo_end_frame(stream->config.channel_id);
+}
+#endif
+
+static void
+append_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware) {
+ IA_CSS_ENTER_PRIVATE("l = %p, firmware = %p", l, firmware);
+ if (!l) {
+ IA_CSS_ERROR("NULL fw_info");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ while (*l)
+ l = &(*l)->next;
+ *l = firmware;
+ /*firmware->next = NULL;*/ /* when multiple acc extensions are loaded, 'next' can be not NULL */
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+static void
+remove_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware) {
+ assert(*l);
+ assert(firmware);
+ (void)l;
+ (void)firmware;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "remove_firmware() enter:\n");
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "remove_firmware() leave:\n");
+ return; /* removing single and multiple firmware is handled in acc_unload_extension() */
+}
+
+static enum ia_css_err upload_isp_code(struct ia_css_fw_info *firmware) {
+ hrt_vaddress binary;
+
+ if (!firmware) {
+ IA_CSS_ERROR("NULL input parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ binary = firmware->info.isp.xmem_addr;
+
+ if (!binary) {
+ unsigned int size = firmware->blob.size;
+ const unsigned char *blob;
+ const unsigned char *binary_name;
+
+ binary_name =
+ (const unsigned char *)(IA_CSS_EXT_ISP_PROG_NAME(
+ firmware));
+ blob = binary_name +
+ strlen((const char *)binary_name) +
+ 1;
+ binary = sh_css_load_blob(blob, size);
+ firmware->info.isp.xmem_addr = binary;
+ }
+
+ if (!binary)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+acc_load_extension(struct ia_css_fw_info *firmware) {
+ enum ia_css_err err;
+ struct ia_css_fw_info *hd = firmware;
+
+ while (hd)
+ {
+ err = upload_isp_code(hd);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ hd = hd->next;
+ }
+
+ if (!firmware)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ firmware->loaded = true;
+ return IA_CSS_SUCCESS;
+}
+
+static void
+acc_unload_extension(struct ia_css_fw_info *firmware) {
+ struct ia_css_fw_info *hd = firmware;
+ struct ia_css_fw_info *hdn = NULL;
+
+ if (!firmware) /* should not happen */
+ return;
+ /* unload and remove multiple firmwares */
+ while (hd) {
+ hdn = (hd->next) ? &(*hd->next) : NULL;
+ if (hd->info.isp.xmem_addr) {
+ hmm_free(hd->info.isp.xmem_addr);
+ hd->info.isp.xmem_addr = mmgr_NULL;
+ }
+ hd->isp_code = NULL;
+ hd->next = NULL;
+ hd = hdn;
+ }
+
+ firmware->loaded = false;
+}
+
+/* Load firmware for extension */
+static enum ia_css_err
+ia_css_pipe_load_extension(struct ia_css_pipe *pipe,
+ struct ia_css_fw_info *firmware) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("fw = %p pipe = %p", firmware, pipe);
+
+ if ((!firmware) || (!pipe))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (firmware->info.isp.type == IA_CSS_ACC_OUTPUT)
+ {
+ if (&pipe->output_stage)
+ append_firmware(&pipe->output_stage, firmware);
+ else {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ } else if (firmware->info.isp.type == IA_CSS_ACC_VIEWFINDER)
+ {
+ if (&pipe->vf_stage)
+ append_firmware(&pipe->vf_stage, firmware);
+ else {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ }
+ err = acc_load_extension(firmware);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* Unload firmware for extension */
+static void
+ia_css_pipe_unload_extension(struct ia_css_pipe *pipe,
+ struct ia_css_fw_info *firmware) {
+ IA_CSS_ENTER_PRIVATE("fw = %p pipe = %p", firmware, pipe);
+
+ if ((!firmware) || (!pipe)) {
+ IA_CSS_ERROR("NULL input parameters");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+
+ if (firmware->info.isp.type == IA_CSS_ACC_OUTPUT)
+ remove_firmware(&pipe->output_stage, firmware);
+ else if (firmware->info.isp.type == IA_CSS_ACC_VIEWFINDER)
+ remove_firmware(&pipe->vf_stage, firmware);
+ acc_unload_extension(firmware);
+
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+bool
+ia_css_pipeline_uses_params(struct ia_css_pipeline *me) {
+ struct ia_css_pipeline_stage *stage;
+
+ assert(me);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_uses_params() enter: me=%p\n", me);
+
+ for (stage = me->stages; stage; stage = stage->next)
+ if (stage->binary_info && stage->binary_info->enable.params) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_uses_params() leave: return_bool=true\n");
+ return true;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_uses_params() leave: return_bool=false\n");
+ return false;
+}
+
+static enum ia_css_err
+sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
+ const void *acc_fw) {
+ struct ia_css_fw_info *fw = (struct ia_css_fw_info *)acc_fw;
+ /* In QoS case, load_extension already called, so skipping */
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (fw->loaded == false)
+ err = acc_load_extension(fw);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_pipeline_add_acc_stage() enter: pipeline=%p, acc_fw=%p\n",
+ pipeline, acc_fw);
+
+ if (err == IA_CSS_SUCCESS)
+ {
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ ia_css_pipe_get_acc_stage_desc(&stage_desc, NULL, fw);
+ err = ia_css_pipeline_create_and_add_stage(pipeline,
+ &stage_desc,
+ NULL);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_pipeline_add_acc_stage() leave: return_err=%d\n", err);
+ return err;
+}
+
+/*
+ * @brief Tag a specific frame in continuous capture.
+ * Refer to "sh_css_internal.h" for details.
+ */
+enum ia_css_err ia_css_stream_capture_frame(struct ia_css_stream *stream,
+ unsigned int exp_id) {
+ struct sh_css_tag_descr tag_descr;
+ u32 encoded_tag_descr;
+ enum ia_css_err err;
+
+ assert(stream);
+ IA_CSS_ENTER("exp_id=%d", exp_id);
+
+ /* Only continuous streams have a tagger */
+ if (exp_id == 0 || !stream->config.continuous) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ /* Create the tag descriptor from the parameters */
+ sh_css_create_tag_descr(0, 0, 0, exp_id, &tag_descr);
+ /* Encode the tag descriptor into a 32-bit value */
+ encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr);
+ /* Enqueue the encoded tag to the host2sp queue.
+ * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
+ * on both host and the SP side.
+ * It is mainly because it is enough to have only one tag_cmd queue */
+ err = ia_css_bufq_enqueue_tag_cmd(encoded_tag_descr);
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+/*
+ * @brief Configure the continuous capture.
+ * Refer to "sh_css_internal.h" for details.
+ */
+enum ia_css_err ia_css_stream_capture(
+ struct ia_css_stream *stream,
+ int num_captures,
+ unsigned int skip,
+ int offset) {
+ struct sh_css_tag_descr tag_descr;
+ unsigned int encoded_tag_descr;
+ enum ia_css_err return_err;
+
+ if (!stream)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() enter: num_captures=%d, skip=%d, offset=%d\n",
+ num_captures, skip, offset);
+
+ /* Check if the tag descriptor is valid */
+ if (num_captures < SH_CSS_MINIMUM_TAG_ID) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leave: return_err=%d\n",
+ IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* Create the tag descriptor from the parameters */
+ sh_css_create_tag_descr(num_captures, skip, offset, 0, &tag_descr);
+
+ /* Encode the tag descriptor into a 32-bit value */
+ encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr);
+
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leaving:queues unavailable\n");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ /* Enqueue the encoded tag to the host2sp queue.
+ * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
+ * on both host and the SP side.
+ * It is mainly because it is enough to have only one tag_cmd queue */
+ return_err = ia_css_bufq_enqueue_tag_cmd((uint32_t)encoded_tag_descr);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leave: return_err=%d\n",
+ return_err);
+
+ return return_err;
+}
+
+void ia_css_stream_request_flash(struct ia_css_stream *stream) {
+ (void)stream;
+
+ assert(stream);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_request_flash() enter: void\n");
+
+#ifndef ISP2401
+ sh_css_write_host2sp_command(host2sp_cmd_start_flash);
+#else
+ if (sh_css_sp_is_running()) {
+ if (!sh_css_write_host2sp_command(host2sp_cmd_start_flash)) {
+ IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
+ ia_css_debug_dump_sp_sw_debug_info();
+ ia_css_debug_dump_debug_info(NULL);
+ }
+ } else
+ IA_CSS_LOG("SP is not running!");
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_request_flash() leave: return_void\n");
+}
+
+static void
+sh_css_init_host_sp_control_vars(void) {
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started;
+
+ unsigned int HIVE_ADDR_host_sp_queues_initialized;
+ unsigned int HIVE_ADDR_sp_sleep_mode;
+ unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
+#ifndef ISP2401
+ unsigned int HIVE_ADDR_sp_stop_copy_preview;
+#endif
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int o = offsetof(struct host_sp_communication, host2sp_command)
+ / sizeof(int);
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ unsigned int i;
+#endif
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_init_host_sp_control_vars() enter: void\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started;
+
+ HIVE_ADDR_host_sp_queues_initialized =
+ fw->info.sp.host_sp_queues_initialized;
+ HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode;
+ HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb;
+#ifndef ISP2401
+ HIVE_ADDR_sp_stop_copy_preview = fw->info.sp.stop_copy_preview;
+#endif
+ HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com;
+
+ (void)HIVE_ADDR_ia_css_ispctrl_sp_isp_started; /* Suppres warnings in CRUN */
+
+ (void)HIVE_ADDR_sp_sleep_mode;
+ (void)HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
+#ifndef ISP2401
+ (void)HIVE_ADDR_sp_stop_copy_preview;
+#endif
+ (void)HIVE_ADDR_host_sp_com;
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_ispctrl_sp_isp_started),
+ (uint32_t)(0));
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_queues_initialized),
+ (uint32_t)(0));
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_sleep_mode),
+ (uint32_t)(0));
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
+ (uint32_t)(false));
+#ifndef ISP2401
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_stop_copy_preview),
+ my_css.stop_copy_preview ? (uint32_t)(1) : (uint32_t)(0));
+#endif
+ store_sp_array_uint(host_sp_com, o, host2sp_cmd_ready);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ for (i = 0; i < N_CSI_PORTS; i++) {
+ sh_css_update_host2sp_num_mipi_frames
+ (my_css.num_mipi_frames[i]);
+ }
+#endif
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_init_host_sp_control_vars() leave: return_void\n");
+}
+
+/*
+ * create the internal structures and fill in the configuration data
+ */
+void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_config_defaults()\n");
+ *pipe_config = DEFAULT_PIPE_CONFIG;
+}
+
+void
+ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config
+ *extra_config) {
+ if (!extra_config) {
+ IA_CSS_ERROR("NULL input parameter");
+ return;
+ }
+
+ extra_config->enable_raw_binning = false;
+ extra_config->enable_yuv_ds = false;
+ extra_config->enable_high_speed = false;
+ extra_config->enable_dvs_6axis = false;
+ extra_config->enable_reduced_pipe = false;
+ extra_config->disable_vf_pp = false;
+ extra_config->enable_fractional_ds = false;
+}
+
+void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_config_defaults()\n");
+ assert(stream_config);
+ memset(stream_config, 0, sizeof(*stream_config));
+ stream_config->online = true;
+ stream_config->left_padding = -1;
+ stream_config->pixels_per_clock = 1;
+ /* temporary default value for backwards compatibility.
+ * This field used to be hardcoded within CSS but this has now
+ * been moved to the stream_config struct. */
+ stream_config->source.port.rxcount = 0x04040404;
+}
+
+static enum ia_css_err
+ia_css_acc_pipe_create(struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (!pipe)
+ {
+ IA_CSS_ERROR("NULL input parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* There is not meaning for num_execs = 0 semantically. Run atleast once. */
+ if (pipe->config.acc_num_execs == 0)
+ pipe->config.acc_num_execs = 1;
+
+ if (pipe->config.acc_extension)
+ {
+ err = ia_css_pipe_load_extension(pipe, pipe->config.acc_extension);
+ }
+
+ return err;
+}
+
+enum ia_css_err
+ia_css_pipe_create(const struct ia_css_pipe_config *config,
+ struct ia_css_pipe **pipe) {
+#ifndef ISP2401
+ if (!config)
+#else
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("config = %p, pipe = %p", config, pipe);
+
+ if (!config)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+#endif
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+#ifndef ISP2401
+ if (!pipe)
+#else
+}
+if (!pipe)
+{
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+#endif
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+#ifndef ISP2401
+ return ia_css_pipe_create_extra(config, NULL, pipe);
+#else
+}
+
+err = ia_css_pipe_create_extra(config, NULL, pipe);
+
+if (err == IA_CSS_SUCCESS)
+{
+ IA_CSS_LOG("pipe created successfully = %p", *pipe);
+}
+
+IA_CSS_LEAVE_ERR_PRIVATE(err);
+
+return err;
+#endif
+}
+
+enum ia_css_err
+ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
+ const struct ia_css_pipe_extra_config *extra_config,
+ struct ia_css_pipe **pipe) {
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ struct ia_css_pipe *internal_pipe = NULL;
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("config = %p, extra_config = %p and pipe = %p", config, extra_config, pipe);
+
+ /* do not allow to create more than the maximum limit */
+ if (my_css.pipe_counter >= IA_CSS_PIPELINE_NUM_MAX)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_RESOURCE_EXHAUSTED);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((!pipe) || (!config))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ia_css_debug_dump_pipe_config(config);
+ ia_css_debug_dump_pipe_extra_config(extra_config);
+
+ err = create_pipe(config->mode, &internal_pipe, false);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* now we have a pipe structure to fill */
+ internal_pipe->config = *config;
+ if (extra_config)
+ internal_pipe->extra_config = *extra_config;
+ else
+ ia_css_pipe_extra_config_defaults(&internal_pipe->extra_config);
+
+ if (config->mode == IA_CSS_PIPE_MODE_ACC)
+ {
+ /* Temporary hack to migrate acceleration to CSS 2.0.
+ * In the future the code for all pipe types should be
+ * unified. */
+ *pipe = internal_pipe;
+ if (!internal_pipe->config.acc_extension &&
+ internal_pipe->config.num_acc_stages ==
+ 0) { /* if no acc binary and no standalone stage */
+ *pipe = NULL;
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+ return ia_css_acc_pipe_create(internal_pipe);
+ }
+
+ /* Use config value when dvs_frame_delay setting equal to 2, otherwise always 1 by default */
+ if (internal_pipe->config.dvs_frame_delay == IA_CSS_FRAME_DELAY_2)
+ internal_pipe->dvs_frame_delay = 2;
+ else
+ internal_pipe->dvs_frame_delay = 1;
+
+ /* we still keep enable_raw_binning for backward compatibility, for any new
+ fractional bayer downscaling, we should use bayer_ds_out_res. if both are
+ specified, bayer_ds_out_res will take precedence.if none is specified, we
+ set bayer_ds_out_res equal to IF output resolution(IF may do cropping on
+ sensor output) or use default decimation factor 1. */
+ if (internal_pipe->extra_config.enable_raw_binning &&
+ internal_pipe->config.bayer_ds_out_res.width)
+ {
+ /* fill some code here, if no code is needed, please remove it during integration */
+ }
+
+ /* YUV downscaling */
+ if ((internal_pipe->config.vf_pp_in_res.width ||
+ internal_pipe->config.capt_pp_in_res.width))
+ {
+ enum ia_css_frame_format format;
+
+ if (internal_pipe->config.vf_pp_in_res.width) {
+ format = IA_CSS_FRAME_FORMAT_YUV_LINE;
+ ia_css_frame_info_init(
+ &internal_pipe->vf_yuv_ds_input_info,
+ internal_pipe->config.vf_pp_in_res.width,
+ internal_pipe->config.vf_pp_in_res.height,
+ format, 0);
+ }
+ if (internal_pipe->config.capt_pp_in_res.width) {
+ format = IA_CSS_FRAME_FORMAT_YUV420;
+ ia_css_frame_info_init(
+ &internal_pipe->out_yuv_ds_input_info,
+ internal_pipe->config.capt_pp_in_res.width,
+ internal_pipe->config.capt_pp_in_res.height,
+ format, 0);
+ }
+ }
+ if (internal_pipe->config.vf_pp_in_res.width &&
+ internal_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ {
+ ia_css_frame_info_init(
+ &internal_pipe->vf_yuv_ds_input_info,
+ internal_pipe->config.vf_pp_in_res.width,
+ internal_pipe->config.vf_pp_in_res.height,
+ IA_CSS_FRAME_FORMAT_YUV_LINE, 0);
+ }
+ /* handle bayer downscaling output info */
+ if (internal_pipe->config.bayer_ds_out_res.width)
+ {
+ ia_css_frame_info_init(
+ &internal_pipe->bds_output_info,
+ internal_pipe->config.bayer_ds_out_res.width,
+ internal_pipe->config.bayer_ds_out_res.height,
+ IA_CSS_FRAME_FORMAT_RAW, 0);
+ }
+
+ /* handle output info, assume always needed */
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++)
+ {
+ if (internal_pipe->config.output_info[i].res.width) {
+ err = sh_css_pipe_configure_output(
+ internal_pipe,
+ internal_pipe->config.output_info[i].res.width,
+ internal_pipe->config.output_info[i].res.height,
+ internal_pipe->config.output_info[i].padded_width,
+ internal_pipe->config.output_info[i].format,
+ i);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ sh_css_free(internal_pipe);
+ internal_pipe = NULL;
+ return err;
+ }
+ }
+
+ /* handle vf output info, when configured */
+ internal_pipe->enable_viewfinder[i] =
+ (internal_pipe->config.vf_output_info[i].res.width != 0);
+ if (internal_pipe->config.vf_output_info[i].res.width) {
+ err = sh_css_pipe_configure_viewfinder(
+ internal_pipe,
+ internal_pipe->config.vf_output_info[i].res.width,
+ internal_pipe->config.vf_output_info[i].res.height,
+ internal_pipe->config.vf_output_info[i].padded_width,
+ internal_pipe->config.vf_output_info[i].format,
+ i);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ sh_css_free(internal_pipe);
+ internal_pipe = NULL;
+ return err;
+ }
+ }
+ }
+ if (internal_pipe->config.acc_extension)
+ {
+ err = ia_css_pipe_load_extension(internal_pipe,
+ internal_pipe->config.acc_extension);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ sh_css_free(internal_pipe);
+ return err;
+ }
+ }
+ /* set all info to zeroes first */
+ memset(&internal_pipe->info, 0, sizeof(internal_pipe->info));
+
+ /* all went well, return the pipe */
+ *pipe = internal_pipe;
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
+ struct ia_css_pipe_info *pipe_info) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_get_info()\n");
+ assert(pipe_info);
+ if (!pipe_info)
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_pipe_get_info: pipe_info cannot be NULL\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (!pipe || !pipe->stream)
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_pipe_get_info: ia_css_stream_create needs to be called before ia_css_[stream/pipe]_get_info\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* we succeeded return the info */
+ *pipe_info = pipe->info;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_get_info() leave\n");
+ return IA_CSS_SUCCESS;
+}
+
+bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info) {
+ unsigned int i;
+
+ if (pipe_info) {
+ for (i = 0; i < IA_CSS_DVS_STAT_NUM_OF_LEVELS; i++) {
+ if (pipe_info->grid_info.dvs_grid.dvs_stat_grid_info.grd_cfg[i].grd_start.enable)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+enum ia_css_err
+ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
+ int pin_index,
+ enum ia_css_frame_format new_format) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, pin_index = %d, new_formats = %d", pipe, pin_index, new_format);
+
+ if (!pipe)
+ {
+ IA_CSS_ERROR("pipe is not set");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (0 != pin_index && 1 != pin_index)
+ {
+ IA_CSS_ERROR("pin index is not valid");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (new_format != IA_CSS_FRAME_FORMAT_NV12_TILEY)
+ {
+ IA_CSS_ERROR("new format is not valid");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ } else
+ {
+ err = ia_css_pipe_check_format(pipe, new_format);
+ if (err == IA_CSS_SUCCESS) {
+ if (pin_index == 0) {
+ pipe->output_info[0].format = new_format;
+ } else {
+ pipe->vf_output_info[0].format = new_format;
+ }
+ }
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+/* Configuration of INPUT_SYSTEM_VERSION_2401 is done on SP */
+static enum ia_css_err
+ia_css_stream_configure_rx(struct ia_css_stream *stream) {
+ struct ia_css_input_port *config;
+
+ assert(stream);
+
+ config = &stream->config.source.port;
+ /* AM: this code is not reliable, especially for 2400 */
+ if (config->num_lanes == 1)
+ stream->csi_rx_config.mode = MONO_1L_1L_0L;
+ else if (config->num_lanes == 2)
+ stream->csi_rx_config.mode = MONO_2L_1L_0L;
+ else if (config->num_lanes == 3)
+ stream->csi_rx_config.mode = MONO_3L_1L_0L;
+ else if (config->num_lanes == 4)
+ stream->csi_rx_config.mode = MONO_4L_1L_0L;
+ else if (config->num_lanes != 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (config->port > MIPI_PORT2_ID)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ stream->csi_rx_config.port =
+ ia_css_isys_port_to_mipi_port(config->port);
+ stream->csi_rx_config.timeout = config->timeout;
+ stream->csi_rx_config.initcount = 0;
+ stream->csi_rx_config.synccount = 0x28282828;
+ stream->csi_rx_config.rxcount = config->rxcount;
+ if (config->compression.type == IA_CSS_CSI2_COMPRESSION_TYPE_NONE)
+ stream->csi_rx_config.comp = MIPI_PREDICTOR_NONE;
+ else
+ {
+ /* not implemented yet, requires extension of the rx_cfg_t
+ * struct */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ stream->csi_rx_config.is_two_ppc = (stream->config.pixels_per_clock == 2);
+ stream->reconfigure_css_rx = true;
+ return IA_CSS_SUCCESS;
+}
+#endif
+
+static struct ia_css_pipe *
+find_pipe(struct ia_css_pipe *pipes[],
+ unsigned int num_pipes,
+ enum ia_css_pipe_mode mode,
+ bool copy_pipe) {
+ unsigned int i;
+
+ assert(pipes);
+ for (i = 0; i < num_pipes; i++) {
+ assert(pipes[i]);
+ if (pipes[i]->config.mode != mode)
+ continue;
+ if (copy_pipe && pipes[i]->mode != IA_CSS_PIPE_ID_COPY)
+ continue;
+ return pipes[i];
+ }
+ return NULL;
+}
+
+static enum ia_css_err
+ia_css_acc_stream_create(struct ia_css_stream *stream) {
+ int i;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(stream);
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+
+ if (!stream)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ for (i = 0; i < stream->num_pipes; i++)
+ {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+
+ assert(pipe);
+ if (!pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe->stream = stream;
+ }
+
+ /* Map SP threads before doing anything. */
+ err = map_sp_threads(stream, true);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ for (i = 0; i < stream->num_pipes; i++)
+ {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+
+ assert(pipe);
+ ia_css_pipe_map_queue(pipe, true);
+ }
+
+ err = create_host_pipeline_structure(stream);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ stream->started = false;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+metadata_info_init(const struct ia_css_metadata_config *mdc,
+ struct ia_css_metadata_info *md) {
+ /* Either both width and height should be set or neither */
+ if ((mdc->resolution.height > 0) ^ (mdc->resolution.width > 0))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ md->resolution = mdc->resolution;
+ /* We round up the stride to a multiple of the width
+ * of the port going to DDR, this is a HW requirements (DMA). */
+ md->stride = CEIL_MUL(mdc->resolution.width, HIVE_ISP_DDR_WORD_BYTES);
+ md->size = mdc->resolution.height * md->stride;
+ return IA_CSS_SUCCESS;
+}
+
+/* ISP2401 */
+static enum ia_css_err check_pipe_resolutions(const struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (!pipe || !pipe->stream) {
+ IA_CSS_ERROR("null arguments");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto EXIT;
+ }
+
+ if (ia_css_util_check_res(pipe->config.input_effective_res.width,
+ pipe->config.input_effective_res.height) != IA_CSS_SUCCESS) {
+ IA_CSS_ERROR("effective resolution not supported");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto EXIT;
+ }
+ if (!ia_css_util_resolution_is_zero(
+ pipe->stream->config.input_config.input_res)) {
+ if (!ia_css_util_res_leq(pipe->config.input_effective_res,
+ pipe->stream->config.input_config.input_res)) {
+ IA_CSS_ERROR("effective resolution is larger than input resolution");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto EXIT;
+ }
+ }
+ if (!ia_css_util_resolution_is_even(pipe->config.output_info[0].res)) {
+ IA_CSS_ERROR("output resolution must be even");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto EXIT;
+ }
+ if (!ia_css_util_resolution_is_even(pipe->config.vf_output_info[0].res)) {
+ IA_CSS_ERROR("VF resolution must be even");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto EXIT;
+ }
+EXIT:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_stream_create(const struct ia_css_stream_config *stream_config,
+ int num_pipes,
+ struct ia_css_pipe *pipes[],
+ struct ia_css_stream **stream) {
+ struct ia_css_pipe *curr_pipe;
+ struct ia_css_stream *curr_stream = NULL;
+ bool spcopyonly;
+ bool sensor_binning_changed;
+ int i, j;
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ struct ia_css_metadata_info md_info;
+ struct ia_css_resolution effective_res;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool aspect_ratio_crop_enabled = false;
+#endif
+
+ IA_CSS_ENTER("num_pipes=%d", num_pipes);
+ ia_css_debug_dump_stream_config(stream_config, num_pipes);
+
+ /* some checks */
+ if (num_pipes == 0 ||
+ !stream ||
+ !pipes)
+ {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* We don't support metadata for JPEG stream, since they both use str2mem */
+ if (stream_config->input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8 &&
+ stream_config->metadata_config.resolution.height > 0)
+ {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+#endif
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (stream_config->online && stream_config->pack_raw_pixels)
+ {
+ IA_CSS_LOG("online and pack raw is invalid on input system 2401");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ ia_css_debug_pipe_graph_dump_stream_config(stream_config);
+
+ /* check if mipi size specified */
+ if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (!stream_config->online)
+#endif
+ {
+ unsigned int port = (unsigned int)stream_config->source.port.port;
+
+ if (port >= N_MIPI_PORT_ID) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (my_css.size_mem_words != 0) {
+ my_css.mipi_frame_size[port] = my_css.size_mem_words;
+ } else if (stream_config->mipi_buffer_config.size_mem_words != 0) {
+ my_css.mipi_frame_size[port] = stream_config->mipi_buffer_config.size_mem_words;
+ } else {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_create() exit: error, need to set mipi frame size.\n");
+ assert(stream_config->mipi_buffer_config.size_mem_words != 0);
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (my_css.size_mem_words != 0) {
+ my_css.num_mipi_frames[port] =
+ 2; /* Temp change: Default for backwards compatibility. */
+ } else if (stream_config->mipi_buffer_config.nof_mipi_buffers != 0) {
+ my_css.num_mipi_frames[port] =
+ stream_config->mipi_buffer_config.nof_mipi_buffers;
+ } else {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_create() exit: error, need to set number of mipi frames.\n");
+ assert(stream_config->mipi_buffer_config.nof_mipi_buffers != 0);
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ }
+#endif
+
+ /* Currently we only supported metadata up to a certain size. */
+ err = metadata_info_init(&stream_config->metadata_config, &md_info);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ /* allocate the stream instance */
+ curr_stream = kmalloc(sizeof(struct ia_css_stream), GFP_KERNEL);
+ if (!curr_stream)
+ {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ /* default all to 0 */
+ memset(curr_stream, 0, sizeof(struct ia_css_stream));
+ curr_stream->info.metadata_info = md_info;
+
+ /* allocate pipes */
+ curr_stream->num_pipes = num_pipes;
+ curr_stream->pipes = kcalloc(num_pipes, sizeof(struct ia_css_pipe *), GFP_KERNEL);
+ if (!curr_stream->pipes)
+ {
+ curr_stream->num_pipes = 0;
+ kfree(curr_stream);
+ curr_stream = NULL;
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ /* store pipes */
+ spcopyonly = (num_pipes == 1) && (pipes[0]->config.mode == IA_CSS_PIPE_MODE_COPY);
+ for (i = 0; i < num_pipes; i++)
+ curr_stream->pipes[i] = pipes[i];
+ curr_stream->last_pipe = curr_stream->pipes[0];
+ /* take over stream config */
+ curr_stream->config = *stream_config;
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401) && defined(CSI2P_DISABLE_ISYS2401_ONLINE_MODE)
+ if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR &&
+ stream_config->online)
+ curr_stream->config.online = false;
+#endif
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (curr_stream->config.online)
+ {
+ curr_stream->config.source.port.num_lanes =
+ stream_config->source.port.num_lanes;
+ curr_stream->config.mode = IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ }
+#endif
+ /* in case driver doesn't configure init number of raw buffers, configure it here */
+ if (curr_stream->config.target_num_cont_raw_buf == 0)
+ curr_stream->config.target_num_cont_raw_buf = NUM_CONTINUOUS_FRAMES;
+ if (curr_stream->config.init_num_cont_raw_buf == 0)
+ curr_stream->config.init_num_cont_raw_buf = curr_stream->config.target_num_cont_raw_buf;
+
+ /* Enable locking & unlocking of buffers in RAW buffer pool */
+ if (curr_stream->config.ia_css_enable_raw_buffer_locking)
+ sh_css_sp_configure_enable_raw_pool_locking(
+ curr_stream->config.lock_all);
+
+ /* copy mode specific stuff */
+ switch (curr_stream->config.mode)
+ {
+ case IA_CSS_INPUT_MODE_SENSOR:
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ ia_css_stream_configure_rx(curr_stream);
+#endif
+ break;
+ case IA_CSS_INPUT_MODE_TPG:
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d",
+ curr_stream->config.source.tpg.x_mask,
+ curr_stream->config.source.tpg.y_mask,
+ curr_stream->config.source.tpg.x_delta,
+ curr_stream->config.source.tpg.y_delta,
+ curr_stream->config.source.tpg.xy_mask);
+
+ sh_css_sp_configure_tpg(
+ curr_stream->config.source.tpg.x_mask,
+ curr_stream->config.source.tpg.y_mask,
+ curr_stream->config.source.tpg.x_delta,
+ curr_stream->config.source.tpg.y_delta,
+ curr_stream->config.source.tpg.xy_mask);
+#endif
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ IA_CSS_LOG("mode prbs");
+ sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed);
+#endif
+ break;
+ case IA_CSS_INPUT_MODE_MEMORY:
+ IA_CSS_LOG("mode memory");
+ curr_stream->reconfigure_css_rx = false;
+ break;
+ default:
+ IA_CSS_LOG("mode sensor/default");
+ }
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ err = aspect_ratio_crop_init(curr_stream,
+ pipes,
+ &aspect_ratio_crop_enabled);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+#endif
+ for (i = 0; i < num_pipes; i++)
+ {
+ struct ia_css_resolution effective_res;
+
+ curr_pipe = pipes[i];
+ /* set current stream */
+ curr_pipe->stream = curr_stream;
+ /* take over effective info */
+
+ effective_res = curr_pipe->config.input_effective_res;
+ if (effective_res.height == 0 || effective_res.width == 0) {
+ effective_res = curr_pipe->stream->config.input_config.effective_res;
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* The aspect ratio cropping is currently only
+ * supported on the new input system. */
+ if (aspect_ratio_crop_check(aspect_ratio_crop_enabled, curr_pipe)) {
+ struct ia_css_resolution crop_res;
+
+ err = aspect_ratio_crop(curr_pipe, &crop_res);
+ if (err == IA_CSS_SUCCESS) {
+ effective_res = crop_res;
+ } else {
+ /* in case of error fallback to default
+ * effective resolution from driver. */
+ IA_CSS_LOG("aspect_ratio_crop() failed with err(%d)", err);
+ }
+ }
+#endif
+ curr_pipe->config.input_effective_res = effective_res;
+ }
+ IA_CSS_LOG("effective_res=%dx%d",
+ effective_res.width,
+ effective_res.height);
+ }
+
+ if (atomisp_hw_is_isp2401) {
+ for (i = 0; i < num_pipes; i++) {
+ if (pipes[i]->config.mode != IA_CSS_PIPE_MODE_ACC &&
+ pipes[i]->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ err = check_pipe_resolutions(pipes[i]);
+ if (err != IA_CSS_SUCCESS) {
+ goto ERR;
+ }
+ }
+ }
+ }
+
+ err = ia_css_stream_isp_parameters_init(curr_stream);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ IA_CSS_LOG("isp_params_configs: %p", curr_stream->isp_params_configs);
+
+ if (num_pipes == 1 && pipes[0]->config.mode == IA_CSS_PIPE_MODE_ACC)
+ {
+ *stream = curr_stream;
+ err = ia_css_acc_stream_create(curr_stream);
+ goto ERR;
+ }
+ /* sensor binning */
+ if (!spcopyonly)
+ {
+ sensor_binning_changed =
+ sh_css_params_set_binning_factor(curr_stream,
+ curr_stream->config.sensor_binning_factor);
+ } else
+ {
+ sensor_binning_changed = false;
+ }
+
+ IA_CSS_LOG("sensor_binning=%d, changed=%d",
+ curr_stream->config.sensor_binning_factor, sensor_binning_changed);
+ /* loop over pipes */
+ IA_CSS_LOG("num_pipes=%d", num_pipes);
+ curr_stream->cont_capt = false;
+ /* Temporary hack: we give the preview pipe a reference to the capture
+ * pipe in continuous capture mode. */
+ if (curr_stream->config.continuous)
+ {
+ /* Search for the preview pipe and create the copy pipe */
+ struct ia_css_pipe *preview_pipe;
+ struct ia_css_pipe *video_pipe;
+ struct ia_css_pipe *acc_pipe;
+ struct ia_css_pipe *capture_pipe = NULL;
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ if (num_pipes >= 2) {
+ curr_stream->cont_capt = true;
+ curr_stream->disable_cont_vf = curr_stream->config.disable_cont_viewfinder;
+
+ if (!atomisp_hw_is_isp2401)
+ curr_stream->stop_copy_preview = my_css.stop_copy_preview;
+ }
+
+ /* Create copy pipe here, since it may not be exposed to the driver */
+ preview_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_PREVIEW, false);
+ video_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_VIDEO, false);
+ acc_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_ACC, false);
+ if (acc_pipe && num_pipes == 2 && curr_stream->cont_capt == true)
+ curr_stream->cont_capt =
+ false; /* preview + QoS case will not need cont_capt switch */
+ if (curr_stream->cont_capt == true) {
+ capture_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_CAPTURE, false);
+ if (!capture_pipe) {
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+ }
+ /* We do not support preview and video pipe at the same time */
+ if (preview_pipe && video_pipe) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto ERR;
+ }
+
+ if (preview_pipe && !preview_pipe->pipe_settings.preview.copy_pipe) {
+ err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, &copy_pipe, true);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ ia_css_pipe_config_defaults(&copy_pipe->config);
+ preview_pipe->pipe_settings.preview.copy_pipe = copy_pipe;
+ copy_pipe->stream = curr_stream;
+ }
+ if (preview_pipe && (curr_stream->cont_capt == true)) {
+ preview_pipe->pipe_settings.preview.capture_pipe = capture_pipe;
+ }
+ if (video_pipe && !video_pipe->pipe_settings.video.copy_pipe) {
+ err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, &copy_pipe, true);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ ia_css_pipe_config_defaults(&copy_pipe->config);
+ video_pipe->pipe_settings.video.copy_pipe = copy_pipe;
+ copy_pipe->stream = curr_stream;
+ }
+ if (video_pipe && (curr_stream->cont_capt == true)) {
+ video_pipe->pipe_settings.video.capture_pipe = capture_pipe;
+ }
+ if (preview_pipe && acc_pipe) {
+ preview_pipe->pipe_settings.preview.acc_pipe = acc_pipe;
+ }
+ }
+ for (i = 0; i < num_pipes; i++)
+ {
+ curr_pipe = pipes[i];
+ /* set current stream */
+ curr_pipe->stream = curr_stream;
+
+ if (!atomisp_hw_is_isp2401) {
+ /* take over effective info */
+
+ effective_res = curr_pipe->config.input_effective_res;
+ err = ia_css_util_check_res(
+ effective_res.width,
+ effective_res.height);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ /* sensor binning per pipe */
+ if (sensor_binning_changed)
+ sh_css_pipe_free_shading_table(curr_pipe);
+ }
+
+ /* now pipes have been configured, info should be available */
+ for (i = 0; i < num_pipes; i++)
+ {
+ struct ia_css_pipe_info *pipe_info = NULL;
+
+ curr_pipe = pipes[i];
+
+ err = sh_css_pipe_load_binaries(curr_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ /* handle each pipe */
+ pipe_info = &curr_pipe->info;
+ for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
+ err = sh_css_pipe_get_output_frame_info(curr_pipe,
+ &pipe_info->output_info[j], j);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ if (atomisp_hw_is_isp2401)
+ pipe_info->output_system_in_res_info = curr_pipe->config.output_system_in_res;
+
+ if (!spcopyonly) {
+ if (!atomisp_hw_is_isp2401)
+ err = sh_css_pipe_get_shading_info(curr_pipe,
+ &pipe_info->shading_info, NULL);
+ else
+ err = sh_css_pipe_get_shading_info(curr_pipe,
+ &pipe_info->shading_info, &curr_pipe->config);
+
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ err = sh_css_pipe_get_grid_info(curr_pipe,
+ &pipe_info->grid_info);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
+ sh_css_pipe_get_viewfinder_frame_info(curr_pipe,
+ &pipe_info->vf_output_info[j], j);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ }
+
+ my_css.active_pipes[ia_css_pipe_get_pipe_num(curr_pipe)] = curr_pipe;
+ }
+
+ curr_stream->started = false;
+
+ /* Map SP threads before doing anything. */
+ err = map_sp_threads(curr_stream, true);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LOG("map_sp_threads: return_err=%d", err);
+ goto ERR;
+ }
+
+ for (i = 0; i < num_pipes; i++)
+ {
+ curr_pipe = pipes[i];
+ ia_css_pipe_map_queue(curr_pipe, true);
+ }
+
+ /* Create host side pipeline objects without stages */
+ err = create_host_pipeline_structure(curr_stream);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LOG("create_host_pipeline_structure: return_err=%d", err);
+ goto ERR;
+ }
+
+ /* assign curr_stream */
+ *stream = curr_stream;
+
+ERR:
+ if (err == IA_CSS_SUCCESS) {
+ /* working mode: enter into the seed list */
+ if (my_css_save.mode == sh_css_mode_working) {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
+ if (!my_css_save.stream_seeds[i].stream) {
+ IA_CSS_LOG("entered stream into loc=%d", i);
+ my_css_save.stream_seeds[i].orig_stream = stream;
+ my_css_save.stream_seeds[i].stream = curr_stream;
+ my_css_save.stream_seeds[i].num_pipes = num_pipes;
+ my_css_save.stream_seeds[i].stream_config = *stream_config;
+ for (j = 0; j < num_pipes; j++) {
+ my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
+ my_css_save.stream_seeds[i].pipes[j] = pipes[j];
+ my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
+ }
+ break;
+ }
+ }
+ } else {
+ ia_css_stream_destroy(curr_stream);
+ }
+ } else {
+ ia_css_stream_destroy(curr_stream);
+ }
+ IA_CSS_LEAVE("return_err=%d mode=%d", err, my_css_save.mode);
+ return err;
+}
+
+enum ia_css_err
+ia_css_stream_destroy(struct ia_css_stream *stream) {
+ int i;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+ if (!stream)
+ {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ ia_css_stream_isp_parameters_uninit(stream);
+
+ if ((stream->last_pipe) &&
+ ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num))
+ {
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ bool free_mpi;
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *entry = stream->pipes[i];
+ unsigned int sp_thread_id;
+ struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
+
+ assert(entry);
+ if (entry) {
+ /* get the SP thread id */
+ if (ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(entry), &sp_thread_id) != true)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ /* get the target input terminal */
+ sp_pipeline_input_terminal =
+ &sh_css_sp_group.pipe_io[sp_thread_id].input;
+
+ for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
+ ia_css_isys_stream_h isys_stream =
+ &sp_pipeline_input_terminal->context.virtual_input_system_stream[i];
+ if (stream->config.isys_config[i].valid && isys_stream->valid)
+ ia_css_isys_stream_destroy(isys_stream);
+ }
+ }
+ }
+ free_mpi = stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ if (atomisp_hw_is_isp2401) {
+ free_mpi |= stream->config.mode == IA_CSS_INPUT_MODE_TPG;
+ free_mpi |= stream->config.mode == IA_CSS_INPUT_MODE_PRBS;
+ }
+
+ if (free_mpi) {
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *entry = stream->pipes[i];
+ /* free any mipi frames that are remaining:
+ * some test stream create-destroy cycles do not generate output frames
+ * and the mipi buffer is not freed in the deque function
+ */
+ if (entry)
+ free_mipi_frames(entry);
+ }
+ }
+ stream_unregister_with_csi_rx(stream);
+#endif
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *curr_pipe = stream->pipes[i];
+
+ assert(curr_pipe);
+ ia_css_pipe_map_queue(curr_pipe, false);
+ }
+
+ err = map_sp_threads(stream, false);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ /* remove references from pipes to stream */
+ for (i = 0; i < stream->num_pipes; i++)
+ {
+ struct ia_css_pipe *entry = stream->pipes[i];
+
+ assert(entry);
+ if (entry) {
+ /* clear reference to stream */
+ entry->stream = NULL;
+ /* check internal copy pipe */
+ if (entry->mode == IA_CSS_PIPE_ID_PREVIEW &&
+ entry->pipe_settings.preview.copy_pipe) {
+ IA_CSS_LOG("clearing stream on internal preview copy pipe");
+ entry->pipe_settings.preview.copy_pipe->stream = NULL;
+ }
+ if (entry->mode == IA_CSS_PIPE_ID_VIDEO &&
+ entry->pipe_settings.video.copy_pipe) {
+ IA_CSS_LOG("clearing stream on internal video copy pipe");
+ entry->pipe_settings.video.copy_pipe->stream = NULL;
+ }
+ err = sh_css_pipe_unload_binaries(entry);
+ }
+ }
+ /* free associated memory of stream struct */
+ kfree(stream->pipes);
+ stream->pipes = NULL;
+ stream->num_pipes = 0;
+
+ /* working mode: take out of the seed list */
+ if (my_css_save.mode == sh_css_mode_working) {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
+ if (my_css_save.stream_seeds[i].stream == stream)
+ {
+ IA_CSS_LOG("took out stream %d", i);
+ my_css_save.stream_seeds[i].stream = NULL;
+ break;
+ }
+ }
+ }
+
+ kfree(stream);
+ IA_CSS_LEAVE_ERR(err);
+
+ return err;
+}
+
+enum ia_css_err
+ia_css_stream_get_info(const struct ia_css_stream *stream,
+ struct ia_css_stream_info *stream_info) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_info: enter/exit\n");
+ assert(stream);
+ assert(stream_info);
+
+ *stream_info = stream->info;
+ return IA_CSS_SUCCESS;
+}
+
+/*
+ * Rebuild a stream, including allocating structs, setting configuration and
+ * building the required pipes.
+ * The data is taken from the css_save struct updated upon stream creation.
+ * The stream handle is used to identify the correct entry in the css_save struct
+ */
+enum ia_css_err
+ia_css_stream_load(struct ia_css_stream *stream) {
+
+ if (!atomisp_hw_is_isp2401) {
+ int i;
+ enum ia_css_err err;
+
+ assert(stream);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() enter,\n");
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++)
+ {
+ if (my_css_save.stream_seeds[i].stream == stream) {
+ int j;
+
+ for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++) {
+ if ((err = ia_css_pipe_create(&my_css_save.stream_seeds[i].pipe_config[j],
+ &my_css_save.stream_seeds[i].pipes[j])) != IA_CSS_SUCCESS) {
+ if (j) {
+ int k;
+
+ for (k = 0; k < j; k++)
+ ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[k]);
+ }
+ return err;
+ }
+ }
+ err = ia_css_stream_create(&my_css_save.stream_seeds[i].stream_config,
+ my_css_save.stream_seeds[i].num_pipes,
+ my_css_save.stream_seeds[i].pipes,
+ &my_css_save.stream_seeds[i].stream);
+ if (err != IA_CSS_SUCCESS) {
+ ia_css_stream_destroy(stream);
+ for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
+ ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
+ return err;
+ }
+ break;
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() exit,\n");
+ return IA_CSS_SUCCESS;
+ } else {
+ /* TODO remove function - DEPRECATED */
+ (void)stream;
+ return IA_CSS_ERR_NOT_SUPPORTED;
+ }
+}
+
+enum ia_css_err
+ia_css_stream_start(struct ia_css_stream *stream) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("stream = %p", stream);
+ if ((!stream) || (!stream->last_pipe))
+ {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ IA_CSS_LOG("starting %d", stream->last_pipe->mode);
+
+ sh_css_sp_set_disable_continuous_viewfinder(stream->disable_cont_vf);
+
+ /* Create host side pipeline. */
+ err = create_host_pipeline(stream);
+ if (err != IA_CSS_SUCCESS)
+ {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if ((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) ||
+ (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
+ stream_register_with_csi_rx(stream);
+#endif
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* Initialize mipi size checks */
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ {
+ unsigned int idx;
+ unsigned int port = (unsigned int)(stream->config.source.port.port);
+
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++) {
+ sh_css_sp_group.config.mipi_sizes_for_check[port][idx] =
+ sh_css_get_mipi_sizes_for_check(port, idx);
+ }
+ }
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY)
+ {
+ err = sh_css_config_input_network(stream);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+#endif /* !HAS_NO_INPUT_SYSTEM */
+
+ err = sh_css_pipe_start(stream);
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_stream_stop(struct ia_css_stream *stream) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop() enter/exit\n");
+ assert(stream);
+ assert(stream->last_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop: stopping %d\n",
+ stream->last_pipe->mode);
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* De-initialize mipi size checks */
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ {
+ unsigned int idx;
+ unsigned int port = (unsigned int)(stream->config.source.port.port);
+
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++) {
+ sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = 0;
+ }
+ }
+#endif
+
+ if (!atomisp_hw_is_isp2401) {
+ err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline);
+ } else {
+ err = sh_css_pipes_stop(stream);
+ }
+
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ /* Ideally, unmapping should happen after pipeline_stop, but current
+ * semantics do not allow that. */
+ /* err = map_sp_threads(stream, false); */
+
+ return err;
+}
+
+bool
+ia_css_stream_has_stopped(struct ia_css_stream *stream) {
+ bool stopped;
+
+ assert(stream);
+
+ if (!atomisp_hw_is_isp2401) {
+ stopped = ia_css_pipeline_has_stopped(&stream->last_pipe->pipeline);
+ } else {
+ stopped = sh_css_pipes_have_stopped(stream);
+ }
+
+ return stopped;
+}
+
+/* ISP2400 */
+/*
+ * Destroy the stream and all the pipes related to it.
+ * The stream handle is used to identify the correct entry in the css_save struct
+ */
+enum ia_css_err
+ia_css_stream_unload(struct ia_css_stream *stream) {
+ int i;
+
+ assert(stream);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload() enter,\n");
+ /* some checks */
+ assert(stream);
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++)
+ if (my_css_save.stream_seeds[i].stream == stream)
+ {
+ int j;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_unload(): unloading %d (%p)\n", i,
+ my_css_save.stream_seeds[i].stream);
+ ia_css_stream_destroy(stream);
+ for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
+ ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_unload(): after unloading %d (%p)\n", i,
+ my_css_save.stream_seeds[i].stream);
+ break;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload() exit,\n");
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe,
+ enum ia_css_pipe_id *pipe_id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_temp_pipe_to_pipe_id() enter/exit\n");
+ if (pipe)
+ *pipe_id = pipe->mode;
+ else
+ *pipe_id = IA_CSS_PIPE_ID_COPY;
+
+ return IA_CSS_SUCCESS;
+}
+
+enum atomisp_input_format
+ia_css_stream_get_format(const struct ia_css_stream *stream) {
+ return stream->config.input_config.format;
+}
+
+bool
+ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream) {
+ return (stream->config.pixels_per_clock == 2);
+}
+
+struct ia_css_binary *
+ia_css_stream_get_shading_correction_binary(const struct ia_css_stream
+ *stream) {
+ struct ia_css_pipe *pipe;
+
+ assert(stream);
+
+ pipe = stream->pipes[0];
+
+ if (stream->num_pipes == 2) {
+ assert(stream->pipes[1]);
+ if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
+ stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->pipes[1];
+ }
+
+ return ia_css_pipe_get_shading_correction_binary(pipe);
+}
+
+struct ia_css_binary *
+ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream) {
+ int i;
+ struct ia_css_pipe *video_pipe = NULL;
+
+ /* First we find the video pipe */
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) {
+ video_pipe = pipe;
+ break;
+ }
+ }
+ if (video_pipe)
+ return &video_pipe->pipe_settings.video.video_binary;
+ return NULL;
+}
+
+struct ia_css_binary *
+ia_css_stream_get_3a_binary(const struct ia_css_stream *stream) {
+ struct ia_css_pipe *pipe;
+ struct ia_css_binary *s3a_binary = NULL;
+
+ assert(stream);
+
+ pipe = stream->pipes[0];
+
+ if (stream->num_pipes == 2) {
+ assert(stream->pipes[1]);
+ if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
+ stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->pipes[1];
+ }
+
+ s3a_binary = ia_css_pipe_get_s3a_binary(pipe);
+
+ return s3a_binary;
+}
+
+enum ia_css_err
+ia_css_stream_set_output_padded_width(struct ia_css_stream *stream,
+ unsigned int output_padded_width) {
+ struct ia_css_pipe *pipe;
+
+ assert(stream);
+
+ pipe = stream->last_pipe;
+
+ assert(pipe);
+
+ /* set the config also just in case (redundant info? why do we save config in pipe?) */
+ pipe->config.output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width;
+ pipe->output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width;
+
+ return IA_CSS_SUCCESS;
+}
+
+static struct ia_css_binary *
+ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe) {
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe);
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.preview.preview_binary;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ unsigned int i;
+
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.sc) {
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i];
+ break;
+ }
+ }
+ } else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_BAYER)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
+ if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (binary && binary->info->sp.enable.sc)
+ return binary;
+
+ return NULL;
+}
+
+static struct ia_css_binary *
+ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe) {
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe);
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.preview.preview_binary;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ unsigned int i;
+
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) {
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i];
+ break;
+ }
+ }
+ } else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_BAYER)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
+ if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary;
+ else
+ assert(0);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (binary && !binary->info->sp.enable.s3a)
+ binary = NULL;
+
+ return binary;
+}
+
+static struct ia_css_binary *
+ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe) {
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe);
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_VIDEO:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
+ break;
+ default:
+ break;
+ }
+
+ if (binary && !binary->info->sp.enable.dis)
+ binary = NULL;
+
+ return binary;
+}
+
+struct ia_css_pipeline *
+ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe) {
+ assert(pipe);
+
+ return (struct ia_css_pipeline *)&pipe->pipeline;
+}
+
+unsigned int
+ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe) {
+ assert(pipe);
+
+ /* KW was not sure this function was not returning a value
+ that was out of range; so added an assert, and, for the
+ case when asserts are not enabled, clip to the largest
+ value; pipe_num is unsigned so the value cannot be too small
+ */
+ assert(pipe->pipe_num < IA_CSS_PIPELINE_NUM_MAX);
+
+ if (pipe->pipe_num >= IA_CSS_PIPELINE_NUM_MAX)
+ return (IA_CSS_PIPELINE_NUM_MAX - 1);
+
+ return pipe->pipe_num;
+}
+
+unsigned int
+ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe) {
+ assert(pipe);
+
+ return (unsigned int)pipe->config.isp_pipe_version;
+}
+
+#define SP_START_TIMEOUT_US 30000000
+
+enum ia_css_err
+ia_css_start_sp(void) {
+ unsigned long timeout;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("");
+ sh_css_sp_start_isp();
+
+ /* waiting for the SP is completely started */
+ timeout = SP_START_TIMEOUT_US;
+ while ((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_INITIALIZED) && timeout)
+ {
+ timeout--;
+ hrt_sleep();
+ }
+ if (timeout == 0)
+ {
+ IA_CSS_ERROR("timeout during SP initialization");
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ /* Workaround, in order to run two streams in parallel. See TASK 4271*/
+ /* TODO: Fix this. */
+
+ sh_css_init_host_sp_control_vars();
+
+ /* buffers should be initialized only when sp is started */
+ /* AM: At the moment it will be done only when there is no stream active. */
+
+ sh_css_setup_queues();
+ ia_css_bufq_dump_queue_info();
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+/*
+ * Time to wait SP for termincate. Only condition when this can happen
+ * is a fatal hw failure, but we must be able to detect this and emit
+ * a proper error trace.
+ */
+#define SP_SHUTDOWN_TIMEOUT_US 200000
+
+enum ia_css_err
+ia_css_stop_sp(void) {
+ unsigned long timeout;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("void");
+
+ if (!sh_css_sp_is_running())
+ {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE("SP already stopped : return_err=%d", err);
+
+ /* Return an error - stop SP should not have been called by driver */
+ return err;
+ }
+
+ /* For now, stop whole SP */
+ if (!atomisp_hw_is_isp2401) {
+ sh_css_write_host2sp_command(host2sp_cmd_terminate);
+ } else {
+ if (!sh_css_write_host2sp_command(host2sp_cmd_terminate))
+ {
+ IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
+ ia_css_debug_dump_sp_sw_debug_info();
+ ia_css_debug_dump_debug_info(NULL);
+ }
+ }
+
+ sh_css_sp_set_sp_running(false);
+
+ timeout = SP_SHUTDOWN_TIMEOUT_US;
+ while (!ia_css_spctrl_is_idle(SP0_ID) && timeout)
+ {
+ timeout--;
+ hrt_sleep();
+ }
+ if ((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_TERMINATED))
+ IA_CSS_WARNING("SP has not terminated (SW)");
+
+ if (timeout == 0)
+ {
+ IA_CSS_WARNING("SP is not idle");
+ ia_css_debug_dump_sp_sw_debug_info();
+ }
+ timeout = SP_SHUTDOWN_TIMEOUT_US;
+ while (!isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT) && timeout)
+ {
+ timeout--;
+ hrt_sleep();
+ }
+ if (timeout == 0)
+ {
+ IA_CSS_WARNING("ISP is not idle");
+ ia_css_debug_dump_sp_sw_debug_info();
+ }
+
+ sh_css_hmm_buffer_record_uninit();
+
+ /* clear pending param sets from refcount */
+ sh_css_param_clear_param_sets();
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_update_continuous_frames(struct ia_css_stream *stream) {
+ struct ia_css_pipe *pipe;
+ unsigned int i;
+
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "sh_css_update_continuous_frames() enter:\n");
+
+ if (!stream)
+ {
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "sh_css_update_continuous_frames() leave: invalid stream, return_void\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe = stream->continuous_pipe;
+
+ for (i = stream->config.init_num_cont_raw_buf;
+ i < stream->config.target_num_cont_raw_buf; i++)
+ {
+ sh_css_update_host2sp_offline_frame(i,
+ pipe->continuous_frames[i], pipe->cont_md_buffers[i]);
+ }
+ sh_css_update_host2sp_cont_num_raw_frames
+ (stream->config.target_num_cont_raw_buf, true);
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "sh_css_update_continuous_frames() leave: return_void\n");
+
+ return IA_CSS_SUCCESS;
+}
+
+void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map) {
+ unsigned int thread_id;
+ enum ia_css_pipe_id pipe_id;
+ unsigned int pipe_num;
+ bool need_input_queue;
+
+ IA_CSS_ENTER("");
+ assert(pipe);
+
+ pipe_id = pipe->mode;
+ pipe_num = pipe->pipe_num;
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+#if defined(HAS_NO_INPUT_SYSTEM) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ need_input_queue = true;
+#else
+ need_input_queue = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+#endif
+
+ /* map required buffer queues to resources */
+ /* TODO: to be improved */
+ if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ if (pipe->pipe_settings.preview.preview_binary.info &&
+ pipe->pipe_settings.preview.preview_binary.info->sp.enable.s3a)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ } else if (pipe->mode == IA_CSS_PIPE_ID_CAPTURE) {
+ unsigned int i;
+
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info &&
+ pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) {
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ break;
+ }
+ }
+ } else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) {
+ if (pipe->pipe_settings.capture.pre_isp_binary.info &&
+ pipe->pipe_settings.capture.pre_isp_binary.info->sp.enable.s3a)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ }
+ } else if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0])
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ if (pipe->pipe_settings.video.video_binary.info &&
+ pipe->pipe_settings.video.video_binary.info->sp.enable.s3a)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ if (pipe->pipe_settings.video.video_binary.info &&
+ (pipe->pipe_settings.video.video_binary.info->sp.enable.dis
+ ))
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_DIS_STATISTICS, map);
+ } else if (pipe->mode == IA_CSS_PIPE_ID_COPY) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ if (!pipe->stream->config.continuous)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ } else if (pipe->mode == IA_CSS_PIPE_ID_ACC) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ } else if (pipe->mode == IA_CSS_PIPE_ID_YUVPP) {
+ unsigned int idx;
+
+ for (idx = 0; idx < IA_CSS_PIPE_MAX_OUTPUT_STAGE; idx++) {
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, map);
+ if (pipe->enable_viewfinder[idx])
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, map);
+ }
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ }
+ IA_CSS_LEAVE("");
+}
+
+#if CONFIG_ON_FRAME_ENQUEUE()
+static enum ia_css_err set_config_on_frame_enqueue(struct ia_css_frame_info
+ *info, struct frame_data_wrapper *frame) {
+ frame->config_on_frame_enqueue.padded_width = 0;
+
+ /* currently we support configuration on frame enqueue only on YUV formats */
+ /* on other formats the padded_width is zeroed for no configuration override */
+ switch (info->format) {
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ if (info->padded_width > info->res.width) {
+ frame->config_on_frame_enqueue.padded_width = info->padded_width;
+ } else if ((info->padded_width < info->res.width) && (info->padded_width > 0)) {
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* nothing to do if width == padded width or padded width is zeroed (the same) */
+ break;
+ default:
+ break;
+ }
+
+ return IA_CSS_SUCCESS;
+}
+#endif
+
+enum ia_css_err
+ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id) {
+ enum ia_css_err ret;
+
+ IA_CSS_ENTER("");
+
+ /* Only continuous streams have a tagger to which we can send the
+ * unlock message. */
+ if (!stream || !stream->config.continuous)
+ {
+ IA_CSS_ERROR("invalid stream pointer");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (exp_id > IA_CSS_ISYS_MAX_EXPOSURE_ID ||
+ exp_id < IA_CSS_ISYS_MIN_EXPOSURE_ID)
+ {
+ IA_CSS_ERROR("invalid expsure ID: %d\n", exp_id);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* Send the event. Since we verified that the exp_id is valid,
+ * we can safely assign it to an 8-bit argument here. */
+ ret = ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER, exp_id, 0, 0);
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+/* @brief Set the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ */
+enum ia_css_err
+ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
+ bool enable) {
+ unsigned int thread_id;
+ struct ia_css_pipeline_stage *stage;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("");
+
+ /* Parameter Check */
+ if (!pipe || !pipe->stream)
+ {
+ IA_CSS_ERROR("Invalid Pipe.");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!(pipe->config.acc_extension))
+ {
+ IA_CSS_ERROR("Invalid Pipe(No Extension Firmware)");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!sh_css_sp_is_running())
+ {
+ IA_CSS_ERROR("Leaving: queue unavailable.");
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ } else
+ {
+ /* Query the threadid and stage_num for the Extension firmware*/
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ err = ia_css_pipeline_get_stage_from_fw(&pipe->pipeline, fw_handle, &stage);
+ if (err == IA_CSS_SUCCESS) {
+ /* Set the Extension State;. TODO: Add check for stage firmware.type (QOS)*/
+ err = ia_css_bufq_enqueue_psys_event(
+ (uint8_t)IA_CSS_PSYS_SW_EVENT_STAGE_ENABLE_DISABLE,
+ (uint8_t)thread_id,
+ (uint8_t)stage->stage_num,
+ enable ? 1 : 0);
+ if (err == IA_CSS_SUCCESS) {
+ if (enable)
+ SH_CSS_QOS_STAGE_ENABLE(&sh_css_sp_group.pipe[thread_id], stage->stage_num);
+ else
+ SH_CSS_QOS_STAGE_DISABLE(&sh_css_sp_group.pipe[thread_id], stage->stage_num);
+ }
+ }
+ }
+ IA_CSS_LEAVE("err:%d handle:%u enable:%d", err, fw_handle, enable);
+ return err;
+}
+
+/* @brief Get the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ */
+enum ia_css_err
+ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
+ bool *enable) {
+ struct ia_css_pipeline_stage *stage;
+ unsigned int thread_id;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("");
+
+ /* Parameter Check */
+ if (!pipe || !pipe->stream)
+ {
+ IA_CSS_ERROR("Invalid Pipe.");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!(pipe->config.acc_extension))
+ {
+ IA_CSS_ERROR("Invalid Pipe (No Extension Firmware).");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!sh_css_sp_is_running())
+ {
+ IA_CSS_ERROR("Leaving: queue unavailable.");
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ } else
+ {
+ /* Query the threadid and stage_num corresponding to the Extension firmware*/
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ err = ia_css_pipeline_get_stage_from_fw(&pipe->pipeline, fw_handle, &stage);
+
+ if (err == IA_CSS_SUCCESS) {
+ /* Get the Extension State */
+ *enable = (SH_CSS_QOS_STAGE_IS_ENABLED(&sh_css_sp_group.pipe[thread_id],
+ stage->stage_num)) ? true : false;
+ }
+ }
+ IA_CSS_LEAVE("err:%d handle:%u enable:%d", err, fw_handle, *enable);
+ return err;
+}
+
+/* ISP2401 */
+enum ia_css_err
+ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
+ uint32_t fw_handle,
+ struct ia_css_isp_param_css_segments *css_seg,
+ struct ia_css_isp_param_isp_segments *isp_seg) {
+ unsigned int HIVE_ADDR_sp_group;
+ static struct sh_css_sp_group sp_group;
+ static struct sh_css_sp_stage sp_stage;
+ static struct sh_css_isp_stage isp_stage;
+ const struct ia_css_fw_info *fw;
+ unsigned int thread_id;
+ struct ia_css_pipeline_stage *stage;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ int stage_num = 0;
+ enum ia_css_isp_memories mem;
+ bool enabled;
+
+ IA_CSS_ENTER("");
+
+ fw = &sh_css_sp_fw;
+
+ /* Parameter Check */
+ if (!pipe || !pipe->stream)
+ {
+ IA_CSS_ERROR("Invalid Pipe.");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!(pipe->config.acc_extension))
+ {
+ IA_CSS_ERROR("Invalid Pipe (No Extension Firmware).");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!sh_css_sp_is_running())
+ {
+ IA_CSS_ERROR("Leaving: queue unavailable.");
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ } else
+ {
+ /* Query the thread_id and stage_num corresponding to the Extension firmware */
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ err = ia_css_pipeline_get_stage_from_fw(&pipe->pipeline, fw_handle, &stage);
+ if (err == IA_CSS_SUCCESS) {
+ /* Get the Extension State */
+ enabled = (SH_CSS_QOS_STAGE_IS_ENABLED(&sh_css_sp_group.pipe[thread_id],
+ stage->stage_num)) ? true : false;
+ /* Update mapped arg only when extension stage is not enabled */
+ if (enabled) {
+ IA_CSS_ERROR("Leaving: cannot update when stage is enabled.");
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ } else {
+ stage_num = stage->stage_num;
+
+ HIVE_ADDR_sp_group = fw->info.sp.group;
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_group),
+ &sp_group, sizeof(struct sh_css_sp_group));
+ mmgr_load(sp_group.pipe[thread_id].sp_stage_addr[stage_num],
+ &sp_stage, sizeof(struct sh_css_sp_stage));
+
+ mmgr_load(sp_stage.isp_stage_addr,
+ &isp_stage, sizeof(struct sh_css_isp_stage));
+
+ for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++) {
+ isp_stage.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].address =
+ css_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].address;
+ isp_stage.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].size =
+ css_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].size;
+ isp_stage.binary_info.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].address
+ =
+ isp_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].address;
+ isp_stage.binary_info.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].size
+ =
+ isp_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].size;
+ }
+
+ mmgr_store(sp_stage.isp_stage_addr,
+ &isp_stage, sizeof(struct sh_css_isp_stage));
+ }
+ }
+ }
+ IA_CSS_LEAVE("err:%d handle:%u", err, fw_handle);
+ return err;
+}
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+static enum ia_css_err
+aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
+ struct ia_css_pipe *pipes[],
+ bool *do_crop_status) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ int i;
+ struct ia_css_pipe *curr_pipe;
+ u32 pipe_mask = 0;
+
+ if ((!curr_stream) ||
+ (curr_stream->num_pipes == 0) ||
+ (!pipes) ||
+ (!do_crop_status))
+ {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ for (i = 0; i < curr_stream->num_pipes; i++)
+ {
+ curr_pipe = pipes[i];
+ pipe_mask |= (1 << curr_pipe->config.mode);
+ }
+
+ *do_crop_status =
+ (((pipe_mask & (1 << IA_CSS_PIPE_MODE_PREVIEW)) ||
+ (pipe_mask & (1 << IA_CSS_PIPE_MODE_VIDEO))) &&
+ (pipe_mask & (1 << IA_CSS_PIPE_MODE_CAPTURE)) &&
+ curr_stream->config.continuous);
+ return IA_CSS_SUCCESS;
+}
+
+static bool
+aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe) {
+ bool status = false;
+
+ if ((curr_pipe) && enabled) {
+ if ((curr_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) ||
+ (curr_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) ||
+ (curr_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE))
+ status = true;
+ }
+
+ return status;
+}
+
+static enum ia_css_err
+aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
+ struct ia_css_resolution *effective_res) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_resolution crop_res;
+ struct ia_css_resolution *in_res = NULL;
+ struct ia_css_resolution *out_res = NULL;
+ bool use_bds_output_info = false;
+ bool use_vf_pp_in_res = false;
+ bool use_capt_pp_in_res = false;
+
+ if ((!curr_pipe) ||
+ (!effective_res))
+ {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if ((curr_pipe->config.mode != IA_CSS_PIPE_MODE_PREVIEW) &&
+ (curr_pipe->config.mode != IA_CSS_PIPE_MODE_VIDEO) &&
+ (curr_pipe->config.mode != IA_CSS_PIPE_MODE_CAPTURE))
+ {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ use_bds_output_info =
+ ((curr_pipe->bds_output_info.res.width != 0) &&
+ (curr_pipe->bds_output_info.res.height != 0));
+
+ use_vf_pp_in_res =
+ ((curr_pipe->config.vf_pp_in_res.width != 0) &&
+ (curr_pipe->config.vf_pp_in_res.height != 0));
+
+ use_capt_pp_in_res =
+ ((curr_pipe->config.capt_pp_in_res.width != 0) &&
+ (curr_pipe->config.capt_pp_in_res.height != 0));
+
+ in_res = &curr_pipe->stream->config.input_config.effective_res;
+ out_res = &curr_pipe->output_info[0].res;
+
+ switch (curr_pipe->config.mode)
+ {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ if (use_bds_output_info)
+ out_res = &curr_pipe->bds_output_info.res;
+ else if (use_vf_pp_in_res)
+ out_res = &curr_pipe->config.vf_pp_in_res;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ if (use_bds_output_info)
+ out_res = &curr_pipe->bds_output_info.res;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (use_capt_pp_in_res)
+ out_res = &curr_pipe->config.capt_pp_in_res;
+ break;
+ case IA_CSS_PIPE_MODE_ACC:
+ case IA_CSS_PIPE_MODE_COPY:
+ case IA_CSS_PIPE_MODE_YUVPP:
+ default:
+ IA_CSS_ERROR("aspect ratio cropping invalid args: mode[%d]\n",
+ curr_pipe->config.mode);
+ assert(0);
+ break;
+ }
+
+ err = ia_css_frame_find_crop_resolution(in_res, out_res, &crop_res);
+ if (err == IA_CSS_SUCCESS)
+ {
+ *effective_res = crop_res;
+ } else
+ {
+ /* in case of error fallback to default
+ * effective resolution from driver. */
+ IA_CSS_LOG("ia_css_frame_find_crop_resolution() failed with err(%d)", err);
+ }
+ return err;
+}
+#endif
+
+static void
+sh_css_hmm_buffer_record_init(void) {
+ int i;
+
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++)
+ sh_css_hmm_buffer_record_reset(&hmm_buffer_record[i]);
+}
+
+static void
+sh_css_hmm_buffer_record_uninit(void) {
+ int i;
+ struct sh_css_hmm_buffer_record *buffer_record = NULL;
+
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if (buffer_record->in_use) {
+ if (buffer_record->h_vbuf)
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &buffer_record->h_vbuf);
+ sh_css_hmm_buffer_record_reset(buffer_record);
+ }
+ buffer_record++;
+ }
+}
+
+static void
+sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record) {
+ assert(buffer_record);
+ buffer_record->in_use = false;
+ buffer_record->type = IA_CSS_BUFFER_TYPE_INVALID;
+ buffer_record->h_vbuf = NULL;
+ buffer_record->kernel_ptr = 0;
+}
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
+ enum ia_css_buffer_type type,
+ hrt_address kernel_ptr) {
+ int i;
+ struct sh_css_hmm_buffer_record *buffer_record = NULL;
+ struct sh_css_hmm_buffer_record *out_buffer_record = NULL;
+
+ assert(h_vbuf);
+ assert((type > IA_CSS_BUFFER_TYPE_INVALID) &&
+ (type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE));
+ assert(kernel_ptr != 0);
+
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if (!buffer_record->in_use) {
+ buffer_record->in_use = true;
+ buffer_record->type = type;
+ buffer_record->h_vbuf = h_vbuf;
+ buffer_record->kernel_ptr = kernel_ptr;
+ out_buffer_record = buffer_record;
+ break;
+ }
+ buffer_record++;
+ }
+
+ return out_buffer_record;
+}
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_validate(hrt_vaddress ddr_buffer_addr,
+ enum ia_css_buffer_type type) {
+ int i;
+ struct sh_css_hmm_buffer_record *buffer_record = NULL;
+ bool found_record = false;
+
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if ((buffer_record->in_use) &&
+ (buffer_record->type == type) &&
+ (buffer_record->h_vbuf) &&
+ (buffer_record->h_vbuf->vptr == ddr_buffer_addr)) {
+ found_record = true;
+ break;
+ }
+ buffer_record++;
+ }
+
+ if (found_record)
+ return buffer_record;
+ else
+ return NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_defs.h b/drivers/staging/media/atomisp/pci/sh_css_defs.h
new file mode 100644
index 000000000000..fcd5081edf82
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_defs.h
@@ -0,0 +1,410 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_DEFS_H_
+#define _SH_CSS_DEFS_H_
+
+#include "isp.h"
+
+/*#include "vamem.h"*/ /* Cannot include for VAMEM properties this file is visible on ISP -> pipeline generator */
+
+#include "math_support.h" /* max(), min, etc etc */
+
+/* ID's for refcount */
+#define IA_CSS_REFCOUNT_PARAM_SET_POOL 0xCAFE0001
+#define IA_CSS_REFCOUNT_PARAM_BUFFER 0xCAFE0002
+
+/* Digital Image Stabilization */
+#define SH_CSS_DIS_DECI_FACTOR_LOG2 6
+
+/* UV offset: 1:uv=-128...127, 0:uv=0...255 */
+#define SH_CSS_UV_OFFSET_IS_0 0
+
+/* Bits of bayer is adjusted as 13 in ISP */
+#define SH_CSS_BAYER_BITS 13
+
+/* Max value of bayer data (unsigned 13bit in ISP) */
+#define SH_CSS_BAYER_MAXVAL ((1U << SH_CSS_BAYER_BITS) - 1)
+
+/* Bits of yuv in ISP */
+#define SH_CSS_ISP_YUV_BITS 8
+
+#define SH_CSS_DP_GAIN_SHIFT 5
+#define SH_CSS_BNR_GAIN_SHIFT 13
+#define SH_CSS_YNR_GAIN_SHIFT 13
+#define SH_CSS_AE_YCOEF_SHIFT 13
+#define SH_CSS_AF_FIR_SHIFT 13
+#define SH_CSS_YEE_DETAIL_GAIN_SHIFT 8 /* [u5.8] */
+#define SH_CSS_YEE_SCALE_SHIFT 8
+#define SH_CSS_TNR_COEF_SHIFT 13
+#define SH_CSS_MACC_COEF_SHIFT 11 /* [s2.11] for ISP1 */
+#define SH_CSS_MACC2_COEF_SHIFT 13 /* [s[exp].[13-exp]] for ISP2 */
+#define SH_CSS_DIS_COEF_SHIFT 13
+
+/* enumeration of the bayer downscale factors. When a binary supports multiple
+ * factors, the OR of these defines is used to build the mask of supported
+ * factors. The BDS factor is used in pre-processor expressions so we cannot
+ * use an enum here. */
+#define SH_CSS_BDS_FACTOR_1_00 (0)
+#define SH_CSS_BDS_FACTOR_1_25 (1)
+#define SH_CSS_BDS_FACTOR_1_50 (2)
+#define SH_CSS_BDS_FACTOR_2_00 (3)
+#define SH_CSS_BDS_FACTOR_2_25 (4)
+#define SH_CSS_BDS_FACTOR_2_50 (5)
+#define SH_CSS_BDS_FACTOR_3_00 (6)
+#define SH_CSS_BDS_FACTOR_4_00 (7)
+#define SH_CSS_BDS_FACTOR_4_50 (8)
+#define SH_CSS_BDS_FACTOR_5_00 (9)
+#define SH_CSS_BDS_FACTOR_6_00 (10)
+#define SH_CSS_BDS_FACTOR_8_00 (11)
+#define NUM_BDS_FACTORS (12)
+
+#define PACK_BDS_FACTOR(factor) (1 << (factor))
+
+/* Following macros should match with the type enum ia_css_pipe_version in
+ * ia_css_pipe_public.h. The reason to add these macros is that enum type
+ * will be evaluted to 0 in preprocessing time. */
+#define SH_CSS_ISP_PIPE_VERSION_1 1
+#define SH_CSS_ISP_PIPE_VERSION_2_2 2
+#define SH_CSS_ISP_PIPE_VERSION_2_6_1 3
+#define SH_CSS_ISP_PIPE_VERSION_2_7 4
+
+/*--------------- sRGB Gamma -----------------
+CCM : YCgCo[0,8191] -> RGB[0,4095]
+sRGB Gamma : RGB [0,4095] -> RGB[0,8191]
+CSC : RGB [0,8191] -> YUV[0,8191]
+
+CCM:
+Y[0,8191],CgCo[-4096,4095],coef[-8192,8191] -> RGB[0,4095]
+
+sRGB Gamma:
+RGB[0,4095] -(interpolation step16)-> RGB[0,255] -(LUT 12bit)-> RGB[0,4095] -> RGB[0,8191]
+
+CSC:
+RGB[0,8191],coef[-8192,8191] -> RGB[0,8191]
+--------------------------------------------*/
+/* Bits of input/output of sRGB Gamma */
+#define SH_CSS_RGB_GAMMA_INPUT_BITS 12 /* [0,4095] */
+#define SH_CSS_RGB_GAMMA_OUTPUT_BITS 13 /* [0,8191] */
+
+/* Bits of fractional part of interpolation in vamem, [0,4095]->[0,255] */
+#define SH_CSS_RGB_GAMMA_FRAC_BITS \
+ (SH_CSS_RGB_GAMMA_INPUT_BITS - SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE_LOG2)
+#define SH_CSS_RGB_GAMMA_ONE BIT(SH_CSS_RGB_GAMMA_FRAC_BITS)
+
+/* Bits of input of CCM, = 13, Y[0,8191],CgCo[-4096,4095] */
+#define SH_CSS_YUV2RGB_CCM_INPUT_BITS SH_CSS_BAYER_BITS
+
+/* Bits of output of CCM, = 12, RGB[0,4095] */
+#define SH_CSS_YUV2RGB_CCM_OUTPUT_BITS SH_CSS_RGB_GAMMA_INPUT_BITS
+
+/* Maximum value of output of CCM */
+#define SH_CSS_YUV2RGB_CCM_MAX_OUTPUT \
+ ((1 << SH_CSS_YUV2RGB_CCM_OUTPUT_BITS) - 1)
+
+#define SH_CSS_NUM_INPUT_BUF_LINES 4
+
+/* Left cropping only applicable for sufficiently large nway */
+#if ISP_VEC_NELEMS == 16
+#define SH_CSS_MAX_LEFT_CROPPING 0
+#define SH_CSS_MAX_TOP_CROPPING 0
+#else
+#define SH_CSS_MAX_LEFT_CROPPING 12
+#define SH_CSS_MAX_TOP_CROPPING 12
+#endif
+
+#define SH_CSS_SP_MAX_WIDTH 1280
+
+/* This is the maximum grid we can handle in the ISP binaries.
+ * The host code makes sure no bigger grid is ever selected. */
+#define SH_CSS_MAX_BQ_GRID_WIDTH 80
+#define SH_CSS_MAX_BQ_GRID_HEIGHT 60
+
+/* The minimum dvs envelope is 12x12(for IPU2) to make sure the
+ * invalid rows/columns that result from filter initialization are skipped. */
+#define SH_CSS_MIN_DVS_ENVELOPE 12U
+
+/* The FPGA system (vec_nelems == 16) only supports upto 5MP */
+#if ISP_VEC_NELEMS == 16
+#define SH_CSS_MAX_SENSOR_WIDTH 2560
+#define SH_CSS_MAX_SENSOR_HEIGHT 1920
+#else
+#define SH_CSS_MAX_SENSOR_WIDTH 4608
+#define SH_CSS_MAX_SENSOR_HEIGHT 3450
+#endif
+
+/* Limited to reduce vmem pressure */
+#if ISP_VMEM_DEPTH >= 3072
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH SH_CSS_MAX_SENSOR_WIDTH
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT SH_CSS_MAX_SENSOR_HEIGHT
+#else
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH 3264
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT 2448
+#endif
+/* When using bayer decimation */
+/*
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC 4224
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC 3168
+*/
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC SH_CSS_MAX_SENSOR_WIDTH
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC SH_CSS_MAX_SENSOR_HEIGHT
+
+#define SH_CSS_MIN_SENSOR_WIDTH 2
+#define SH_CSS_MIN_SENSOR_HEIGHT 2
+
+/*
+#define SH_CSS_MAX_VF_WIDTH_DEC 1920
+#define SH_CSS_MAX_VF_HEIGHT_DEC 1080
+*/
+#define SH_CSS_MAX_VF_WIDTH_DEC SH_CSS_MAX_VF_WIDTH
+#define SH_CSS_MAX_VF_HEIGHT_DEC SH_CSS_MAX_VF_HEIGHT
+
+/* We use 16 bits per coordinate component, including integer
+ and fractional bits */
+#define SH_CSS_MORPH_TABLE_GRID ISP_VEC_NELEMS
+#define SH_CSS_MORPH_TABLE_ELEM_BYTES 2
+#define SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD \
+ (HIVE_ISP_DDR_WORD_BYTES / SH_CSS_MORPH_TABLE_ELEM_BYTES)
+
+
+#define ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 1)
+#define ISP2400_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 1)
+
+#define ISP2400_SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR \
+ CEIL_MUL(ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+
+/* TODO: I will move macros of "*_SCTBL_*" to SC kernel.
+ "+ 2" should be "+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT". (michie, Sep/23/2014) */
+#define ISP2401_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 2)
+#define ISP2401_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 2)
+
+#define ISP2401_SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR \
+ CEIL_MUL(ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+
+/* Each line of this table is aligned to the maximum line width. */
+#define SH_CSS_MAX_S3ATBL_WIDTH SH_CSS_MAX_BQ_GRID_WIDTH
+
+/* Video mode specific DVS define */
+/* The video binary supports a delay of 1 or 2 frames */
+#define VIDEO_FRAME_DELAY 2
+/* +1 because DVS reads the previous and writes the current frame concurrently */
+#define MAX_NUM_VIDEO_DELAY_FRAMES (VIDEO_FRAME_DELAY + 1)
+
+/* Preview mode specific DVS define. */
+/* In preview we only need GDC functionality (and not the DVS functionality) */
+/* The minimum number of DVS frames you need is 2, one were GDC reads from and another where GDC writes into */
+#define NUM_PREVIEW_DVS_FRAMES (2)
+
+/* TNR is no longer exclusive to video, SkyCam preview has TNR too (same kernel as video).
+ * All uses the generic define NUM_TNR_FRAMES. The define NUM_VIDEO_TNR_FRAMES has been deprecated.
+ *
+ * Notes
+ * 1) The value depends on the used TNR kernel and is not something that depends on the mode
+ * and it is not something you just could choice.
+ * 2) For the luma only pipeline a version that supports two different sets of TNR reference frames
+ * is being used.
+ *.
+ */
+#define NUM_VALID_TNR_REF_FRAMES (1) /* At least one valid TNR reference frame is required */
+#define NUM_TNR_FRAMES_PER_REF_BUF_SET (2)
+/* In luma-only mode alternate illuminated frames are supported, that requires two double buffers */
+#define NUM_TNR_REF_BUF_SETS (1)
+
+#define NUM_TNR_FRAMES (NUM_TNR_FRAMES_PER_REF_BUF_SET * NUM_TNR_REF_BUF_SETS)
+
+#define NUM_VIDEO_TNR_FRAMES 2
+
+#define MAX_NUM_DELAY_FRAMES MAX(MAX_NUM_VIDEO_DELAY_FRAMES, NUM_PREVIEW_DVS_FRAMES)
+
+/* Note that this is the define used to configure all data structures common for all modes */
+/* It should be equal or bigger to the max number of DVS frames for all possible modes */
+/* Rules: these implement logic shared between the host code and ISP firmware.
+ The ISP firmware needs these rules to be applied at pre-processor time,
+ that's why these are macros, not functions. */
+#define _ISP_BQS(num) ((num) / 2)
+#define _ISP_VECS(width) CEIL_DIV(width, ISP_VEC_NELEMS)
+
+#define ISP_BQ_GRID_WIDTH(elements_per_line, deci_factor_log2) \
+ CEIL_SHIFT(elements_per_line / 2, deci_factor_log2)
+#define ISP_BQ_GRID_HEIGHT(lines_per_frame, deci_factor_log2) \
+ CEIL_SHIFT(lines_per_frame / 2, deci_factor_log2)
+#define ISP_C_VECTORS_PER_LINE(elements_per_line) \
+ _ISP_VECS(elements_per_line / 2)
+
+/* The morphing table is similar to the shading table in the sense that we
+ have 1 more value than we have cells in the grid. */
+#define _ISP_MORPH_TABLE_WIDTH(int_width) \
+ (CEIL_DIV(int_width, SH_CSS_MORPH_TABLE_GRID) + 1)
+#define _ISP_MORPH_TABLE_HEIGHT(int_height) \
+ (CEIL_DIV(int_height, SH_CSS_MORPH_TABLE_GRID) + 1)
+#define _ISP_MORPH_TABLE_ALIGNED_WIDTH(width) \
+ CEIL_MUL(_ISP_MORPH_TABLE_WIDTH(width), \
+ SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD)
+
+#define _ISP2400_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + 1)
+#define _ISP2400_SCTBL_HEIGHT(input_height, deci_factor_log2) \
+ (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + 1)
+#define _ISP2400_SCTBL_ALIGNED_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ CEIL_MUL(_ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2), \
+ ISP_VEC_NELEMS)
+
+/* To position the shading center grid point on the center of output image,
+ * one more grid cell is needed as margin. */
+#define SH_CSS_SCTBL_CENTERING_MARGIN 1
+
+/* The shading table width and height are the number of grids, not cells. The last grid should be counted. */
+#define SH_CSS_SCTBL_LAST_GRID_COUNT 1
+
+/* Number of horizontal grids per color in the shading table. */
+#define _ISP2401_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + \
+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+/* Number of vertical grids per color in the shading table. */
+#define _ISP2401_SCTBL_HEIGHT(input_height, deci_factor_log2) \
+ (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + \
+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+
+/* ISP2401: Legacy API: Number of horizontal grids per color in the shading table. */
+#define _ISP_SCTBL_LEGACY_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+/* ISP2401: Legacy API: Number of vertical grids per color in the shading table. */
+#define _ISP_SCTBL_LEGACY_HEIGHT(input_height, deci_factor_log2) \
+ (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+
+/* *****************************************************************
+ * Statistics for 3A (Auto Focus, Auto White Balance, Auto Exposure)
+ * *****************************************************************/
+/* if left cropping is used, 3A statistics are also cropped by 2 vectors. */
+#define _ISP_S3ATBL_WIDTH(in_width, deci_factor_log2) \
+ (_ISP_BQS(in_width) >> deci_factor_log2)
+#define _ISP_S3ATBL_HEIGHT(in_height, deci_factor_log2) \
+ (_ISP_BQS(in_height) >> deci_factor_log2)
+#define _ISP_S3A_ELEMS_ISP_WIDTH(width, left_crop) \
+ (width - ((left_crop) ? 2 * ISP_VEC_NELEMS : 0))
+
+#define _ISP_S3ATBL_ISP_WIDTH(in_width, deci_factor_log2) \
+ CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2)
+#define _ISP_S3ATBL_ISP_HEIGHT(in_height, deci_factor_log2) \
+ CEIL_SHIFT(_ISP_BQS(in_height), deci_factor_log2)
+#define ISP_S3ATBL_VECTORS \
+ _ISP_VECS(SH_CSS_MAX_S3ATBL_WIDTH * \
+ (sizeof(struct ia_css_3a_output) / sizeof(int32_t)))
+#define ISP_S3ATBL_HI_LO_STRIDE \
+ (ISP_S3ATBL_VECTORS * ISP_VEC_NELEMS)
+#define ISP_S3ATBL_HI_LO_STRIDE_BYTES \
+ (sizeof(unsigned short) * ISP_S3ATBL_HI_LO_STRIDE)
+
+/* Viewfinder support */
+#define __ISP_MAX_VF_OUTPUT_WIDTH(width, left_crop) \
+ (width - 2 * ISP_VEC_NELEMS + ((left_crop) ? 2 * ISP_VEC_NELEMS : 0))
+
+#define __ISP_VF_OUTPUT_WIDTH_VECS(out_width, vf_log_downscale) \
+ (_ISP_VECS((out_width) >> (vf_log_downscale)))
+
+#define _ISP_VF_OUTPUT_WIDTH(vf_out_vecs) ((vf_out_vecs) * ISP_VEC_NELEMS)
+#define _ISP_VF_OUTPUT_HEIGHT(out_height, vf_log_ds) \
+ ((out_height) >> (vf_log_ds))
+
+#define _ISP_LOG_VECTOR_STEP(mode) \
+ ((mode) == IA_CSS_BINARY_MODE_CAPTURE_PP ? 2 : 1)
+
+/* It is preferred to have not more than 2x scaling at one step
+ * in GDC (assumption is for capture_pp and yuv_scale stages) */
+#define MAX_PREFERRED_YUV_DS_PER_STEP 2
+
+/* Rules for computing the internal width. This is extremely complicated
+ * and definitely needs to be commented and explained. */
+#define _ISP_LEFT_CROP_EXTRA(left_crop) ((left_crop) > 0 ? 2 * ISP_VEC_NELEMS : 0)
+
+#define __ISP_MIN_INTERNAL_WIDTH(num_chunks, pipelining, mode) \
+ ((num_chunks) * (pipelining) * (1 << _ISP_LOG_VECTOR_STEP(mode)) * \
+ ISP_VEC_NELEMS)
+
+#define __ISP_PADDED_OUTPUT_WIDTH(out_width, dvs_env_width, left_crop) \
+ ((out_width) + MAX(dvs_env_width, _ISP_LEFT_CROP_EXTRA(left_crop)))
+
+#define __ISP_CHUNK_STRIDE_ISP(mode) \
+ ((1 << _ISP_LOG_VECTOR_STEP(mode)) * ISP_VEC_NELEMS)
+
+#define __ISP_CHUNK_STRIDE_DDR(c_subsampling, num_chunks) \
+ ((c_subsampling) * (num_chunks) * HIVE_ISP_DDR_WORD_BYTES)
+#define __ISP_INTERNAL_WIDTH(out_width, \
+ dvs_env_width, \
+ left_crop, \
+ mode, \
+ c_subsampling, \
+ num_chunks, \
+ pipelining) \
+ CEIL_MUL2(CEIL_MUL2(MAX(__ISP_PADDED_OUTPUT_WIDTH(out_width, \
+ dvs_env_width, \
+ left_crop), \
+ __ISP_MIN_INTERNAL_WIDTH(num_chunks, \
+ pipelining, \
+ mode) \
+ ), \
+ __ISP_CHUNK_STRIDE_ISP(mode) \
+ ), \
+ __ISP_CHUNK_STRIDE_DDR(c_subsampling, num_chunks) \
+ )
+
+#define __ISP_INTERNAL_HEIGHT(out_height, dvs_env_height, top_crop) \
+ ((out_height) + (dvs_env_height) + top_crop)
+
+/* @GC: Input can be up to sensor resolution when either bayer downscaling
+ * or raw binning is enabled.
+ * Also, during continuous mode, we need to align to 4*NWAY since input
+ * should support binning */
+#define _ISP_MAX_INPUT_WIDTH(max_internal_width, enable_ds, enable_fixed_bayer_ds, enable_raw_bin, \
+ enable_continuous) \
+ ((enable_ds) ? \
+ SH_CSS_MAX_SENSOR_WIDTH :\
+ (enable_fixed_bayer_ds) ? \
+ CEIL_MUL(SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC, 4 * ISP_VEC_NELEMS) : \
+ (enable_raw_bin) ? \
+ CEIL_MUL(SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH, 4 * ISP_VEC_NELEMS) : \
+ (enable_continuous) ? \
+ SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH \
+ : max_internal_width)
+
+#define _ISP_INPUT_WIDTH(internal_width, ds_input_width, enable_ds) \
+ ((enable_ds) ? (ds_input_width) : (internal_width))
+
+#define _ISP_MAX_INPUT_HEIGHT(max_internal_height, enable_ds, enable_fixed_bayer_ds, enable_raw_bin, \
+ enable_continuous) \
+ ((enable_ds) ? \
+ SH_CSS_MAX_SENSOR_HEIGHT :\
+ (enable_fixed_bayer_ds) ? \
+ SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC : \
+ (enable_raw_bin || enable_continuous) ? \
+ SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT \
+ : max_internal_height)
+
+#define _ISP_INPUT_HEIGHT(internal_height, ds_input_height, enable_ds) \
+ ((enable_ds) ? (ds_input_height) : (internal_height))
+
+#define SH_CSS_MAX_STAGES 8 /* primary_stage[1-6], capture_pp, vf_pp */
+
+/* For CSI2+ input system, it requires extra paddinga from vmem */
+#ifdef CONFIG_CSI2_PLUS
+#define _ISP_EXTRA_PADDING_VECS 2
+#else
+#define _ISP_EXTRA_PADDING_VECS 0
+#endif /* CONFIG_CSI2_PLUS */
+
+#endif /* _SH_CSS_DEFS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h b/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h
new file mode 100644
index 000000000000..23044aad654f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h
@@ -0,0 +1,36 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __SH_CSS_DVS_INFO_H__
+#define __SH_CSS_DVS_INFO_H__
+
+#include <math_support.h>
+
+/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */
+#define DVS_NUM_BLOCKS_X(X) (CEIL_MUL(CEIL_DIV((X), DVS_BLOCKDIM_X), 2))
+
+/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */
+#define DVS_NUM_BLOCKS_Y(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_LUMA))
+
+/* Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently.
+ * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */
+#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE
+
+#define DVS_INPUT_BYTES_PER_PIXEL (1)
+
+#define DVS_NUM_BLOCKS_X_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_X))
+
+#define DVS_NUM_BLOCKS_Y_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_CHROMA))
+
+#endif /* __SH_CSS_DVS_INFO_H__ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
new file mode 100644
index 000000000000..eb3c01574853
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
@@ -0,0 +1,333 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <math_support.h>
+#include "platform_support.h"
+#include "sh_css_firmware.h"
+
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_internal.h"
+#include "ia_css_isp_param.h"
+
+#include "memory_access.h"
+#include "assert_support.h"
+#include "string_support.h"
+
+#include "isp.h" /* PMEM_WIDTH_LOG2 */
+
+#include "ia_css_isp_params.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_isp_states.h"
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
+struct firmware_header {
+ struct sh_css_fw_bi_file_h file_header;
+ struct ia_css_fw_info binary_header;
+};
+
+struct fw_param {
+ const char *name;
+ const void *buffer;
+};
+
+static struct firmware_header *firmware_header;
+
+/* The string STR is a place holder
+ * which will be replaced with the actual RELEASE_VERSION
+ * during package generation. Please do not modify */
+static const char *isp2400_release_version = STR(irci_stable_candrpv_0415_20150521_0458);
+static const char *isp2401_release_version = STR(irci_ecr - master_20150911_0724);
+
+#define MAX_FW_REL_VER_NAME 300
+static char FW_rel_ver_name[MAX_FW_REL_VER_NAME] = "---";
+
+struct ia_css_fw_info sh_css_sp_fw;
+struct ia_css_blob_descr *sh_css_blob_info; /* Only ISP blob info (no SP) */
+unsigned int sh_css_num_binaries; /* This includes 1 SP binary */
+
+static struct fw_param *fw_minibuffer;
+
+char *sh_css_get_fw_version(void)
+{
+ return FW_rel_ver_name;
+}
+
+/*
+ * Split the loaded firmware into blobs
+ */
+
+/* Setup sp/sp1 binary */
+static enum ia_css_err
+setup_binary(struct ia_css_fw_info *fw, const char *fw_data,
+ struct ia_css_fw_info *sh_css_fw, unsigned int binary_id) {
+ const char *blob_data;
+
+ if ((!fw) || (!fw_data))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ blob_data = fw_data + fw->blob.offset;
+
+ *sh_css_fw = *fw;
+
+ sh_css_fw->blob.code = vmalloc(fw->blob.size);
+ if (!sh_css_fw->blob.code)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ memcpy((void *)sh_css_fw->blob.code, blob_data, fw->blob.size);
+ sh_css_fw->blob.data = (char *)sh_css_fw->blob.code + fw->blob.data_source;
+ fw_minibuffer[binary_id].buffer = sh_css_fw->blob.code;
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi,
+ struct ia_css_blob_descr *bd,
+ unsigned int index) {
+ const char *name;
+ const unsigned char *blob;
+
+ if ((!fw) || (!bd))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* Special case: only one binary in fw */
+ if (!bi) bi = (const struct ia_css_fw_info *)fw;
+
+ name = fw + bi->blob.prog_name_offset;
+ blob = (const unsigned char *)fw + bi->blob.offset;
+
+ /* sanity check */
+ if (bi->blob.size != bi->blob.text_size + bi->blob.icache_size + bi->blob.data_size + bi->blob.padding_size)
+ {
+ /* sanity check, note the padding bytes added for section to DDR alignment */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((bi->blob.offset % (1UL << (ISP_PMEM_WIDTH_LOG2 - 3))) != 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ bd->blob = blob;
+ bd->header = *bi;
+
+ if (bi->type == ia_css_isp_firmware || bi->type == ia_css_sp_firmware)
+ {
+ char *namebuffer;
+
+ namebuffer = kstrdup(name, GFP_KERNEL);
+ if (!namebuffer)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ bd->name = fw_minibuffer[index].name = namebuffer;
+ } else
+ {
+ bd->name = name;
+ }
+
+ if (bi->type == ia_css_isp_firmware)
+ {
+ size_t paramstruct_size = sizeof(struct ia_css_memory_offsets);
+ size_t configstruct_size = sizeof(struct ia_css_config_memory_offsets);
+ size_t statestruct_size = sizeof(struct ia_css_state_memory_offsets);
+
+ char *parambuf = kmalloc(paramstruct_size + configstruct_size +
+ statestruct_size,
+ GFP_KERNEL);
+ if (!parambuf)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = NULL;
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = NULL;
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = NULL;
+
+ fw_minibuffer[index].buffer = parambuf;
+
+ /* copy ia_css_memory_offsets */
+ memcpy(parambuf, (void *)(fw +
+ bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_PARAM]),
+ paramstruct_size);
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = parambuf;
+
+ /* copy ia_css_config_memory_offsets */
+ memcpy(parambuf + paramstruct_size,
+ (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_CONFIG]),
+ configstruct_size);
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = parambuf +
+ paramstruct_size;
+
+ /* copy ia_css_state_memory_offsets */
+ memcpy(parambuf + paramstruct_size + configstruct_size,
+ (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_STATE]),
+ statestruct_size);
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = parambuf +
+ paramstruct_size + configstruct_size;
+ }
+ return IA_CSS_SUCCESS;
+}
+
+bool
+sh_css_check_firmware_version(struct device *dev, const char *fw_data)
+{
+ struct sh_css_fw_bi_file_h *file_header;
+
+ const char *release_version;
+
+ if (!atomisp_hw_is_isp2401)
+ release_version = isp2400_release_version;
+ else
+ release_version = isp2401_release_version;
+
+ firmware_header = (struct firmware_header *)fw_data;
+ file_header = &firmware_header->file_header;
+
+ if (strcmp(file_header->version, release_version) != 0) {
+ dev_err(dev, "Firmware version may not be compatible with this driver\n");
+ dev_err(dev, "Expecting version '%s', but firmware is '%s'.\n",
+ release_version, file_header->version);
+ }
+
+ /* For now, let's just accept a wrong version, even if wrong */
+ return true;
+}
+
+enum ia_css_err
+sh_css_load_firmware(struct device *dev, const char *fw_data,
+ unsigned int fw_size) {
+ unsigned int i;
+ struct ia_css_fw_info *binaries;
+ struct sh_css_fw_bi_file_h *file_header;
+ bool valid_firmware = false;
+ const char *release_version;
+
+ if (!atomisp_hw_is_isp2401)
+ release_version = isp2400_release_version;
+ else
+ release_version = isp2401_release_version;
+
+ firmware_header = (struct firmware_header *)fw_data;
+ file_header = &firmware_header->file_header;
+ binaries = &firmware_header->binary_header;
+ strscpy(FW_rel_ver_name, file_header->version, min(sizeof(FW_rel_ver_name), sizeof(file_header->version)));
+ valid_firmware = sh_css_check_firmware_version(dev, fw_data);
+ if (!valid_firmware) {
+ IA_CSS_ERROR("CSS code version (%s) and firmware version (%s) mismatch!",
+ file_header->version, release_version);
+ return IA_CSS_ERR_VERSION_MISMATCH;
+ } else {
+ IA_CSS_LOG("successfully load firmware version %s", release_version);
+ }
+
+ /* some sanity checks */
+ if (!fw_data || fw_size < sizeof(struct sh_css_fw_bi_file_h))
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (file_header->h_size != sizeof(struct sh_css_fw_bi_file_h))
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ sh_css_num_binaries = file_header->binary_nr;
+ /* Only allocate memory for ISP blob info */
+ if (sh_css_num_binaries > NUM_OF_SPS)
+ {
+ sh_css_blob_info = kmalloc(
+ (sh_css_num_binaries - NUM_OF_SPS) *
+ sizeof(*sh_css_blob_info), GFP_KERNEL);
+ if (!sh_css_blob_info)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ } else {
+ sh_css_blob_info = NULL;
+ }
+
+ fw_minibuffer = kcalloc(sh_css_num_binaries, sizeof(struct fw_param),
+ GFP_KERNEL);
+ if (!fw_minibuffer)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ for (i = 0; i < sh_css_num_binaries; i++)
+ {
+ struct ia_css_fw_info *bi = &binaries[i];
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+ cause issues for drivers
+ */
+ static struct ia_css_blob_descr bd;
+ enum ia_css_err err;
+
+ err = sh_css_load_blob_info(fw_data, bi, &bd, i);
+
+ if (err != IA_CSS_SUCCESS)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (bi->blob.offset + bi->blob.size > fw_size)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (bi->type == ia_css_sp_firmware) {
+ if (i != SP_FIRMWARE)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ err = setup_binary(bi, fw_data, &sh_css_sp_fw, i);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ /* All subsequent binaries (including bootloaders) (i>NUM_OF_SPS) are ISP firmware */
+ if (i < NUM_OF_SPS)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (bi->type != ia_css_isp_firmware)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ if (!sh_css_blob_info) /* cannot happen but KW does not see this */
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ sh_css_blob_info[i - NUM_OF_SPS] = bd;
+ }
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+void sh_css_unload_firmware(void)
+{
+ /* release firmware minibuffer */
+ if (fw_minibuffer) {
+ unsigned int i = 0;
+
+ for (i = 0; i < sh_css_num_binaries; i++) {
+ if (fw_minibuffer[i].name)
+ kfree((void *)fw_minibuffer[i].name);
+ if (fw_minibuffer[i].buffer)
+ vfree((void *)fw_minibuffer[i].buffer);
+ }
+ kfree(fw_minibuffer);
+ fw_minibuffer = NULL;
+ }
+
+ memset(&sh_css_sp_fw, 0, sizeof(sh_css_sp_fw));
+ kfree(sh_css_blob_info);
+ sh_css_blob_info = NULL;
+ sh_css_num_binaries = 0;
+}
+
+hrt_vaddress
+sh_css_load_blob(const unsigned char *blob, unsigned int size)
+{
+ hrt_vaddress target_addr = mmgr_malloc(size);
+ /* this will allocate memory aligned to a DDR word boundary which
+ is required for the CSS DMA to read the instructions. */
+
+ assert(blob);
+ if (target_addr)
+ mmgr_store(target_addr, blob, size);
+ return target_addr;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.h b/drivers/staging/media/atomisp/pci/sh_css_firmware.h
new file mode 100644
index 000000000000..f6253392a6c9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.h
@@ -0,0 +1,55 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_FIRMWARE_H_
+#define _SH_CSS_FIRMWARE_H_
+
+#include <system_types.h>
+
+#include <ia_css_err.h>
+#include <ia_css_acc_types.h>
+
+/* This is for the firmware loaded from user space */
+struct sh_css_fw_bi_file_h {
+ char version[64]; /* branch tag + week day + time */
+ int binary_nr; /* Number of binaries */
+ unsigned int h_size; /* sizeof(struct sh_css_fw_bi_file_h) */
+};
+
+extern struct ia_css_fw_info sh_css_sp_fw;
+#if defined(HAS_BL)
+extern struct ia_css_fw_info sh_css_bl_fw;
+#endif /* HAS_BL */
+extern struct ia_css_blob_descr *sh_css_blob_info;
+extern unsigned int sh_css_num_binaries;
+
+char
+*sh_css_get_fw_version(void);
+
+bool
+sh_css_check_firmware_version(struct device *dev, const char *fw_data);
+
+enum ia_css_err
+sh_css_load_firmware(struct device *dev, const char *fw_data,
+ unsigned int fw_size);
+
+void sh_css_unload_firmware(void);
+
+hrt_vaddress sh_css_load_blob(const unsigned char *blob, unsigned int size);
+
+enum ia_css_err
+sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi,
+ struct ia_css_blob_descr *bd, unsigned int i);
+
+#endif /* _SH_CSS_FIRMWARE_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_frac.h b/drivers/staging/media/atomisp/pci/sh_css_frac.h
new file mode 100644
index 000000000000..cd2d755ec523
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_frac.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_FRAC_H
+#define __SH_CSS_FRAC_H
+
+#include <math_support.h>
+
+#define sISP_REG_BIT ISP_VEC_ELEMBITS
+#define uISP_REG_BIT ((unsigned int)(sISP_REG_BIT - 1))
+#define sSHIFT (16 - sISP_REG_BIT)
+#define uSHIFT ((unsigned int)(16 - uISP_REG_BIT))
+#define sFRACTION_BITS_FITTING(a) (a - sSHIFT)
+#define uFRACTION_BITS_FITTING(a) ((unsigned int)(a - uSHIFT))
+#define sISP_VAL_MIN (-(1 << uISP_REG_BIT))
+#define sISP_VAL_MAX ((1 << uISP_REG_BIT) - 1)
+#define uISP_VAL_MIN (0U)
+#define uISP_VAL_MAX ((unsigned int)((1 << uISP_REG_BIT) - 1))
+
+/* a:fraction bits for 16bit precision, b:fraction bits for ISP precision */
+#define sDIGIT_FITTING(v, a, b) \
+ min_t(int, max_t(int, (((v) >> sSHIFT) >> max(sFRACTION_BITS_FITTING(a) - (b), 0)), \
+ sISP_VAL_MIN), sISP_VAL_MAX)
+#define uDIGIT_FITTING(v, a, b) \
+ min((unsigned int)max((unsigned)(((v) >> uSHIFT) \
+ >> max((int)(uFRACTION_BITS_FITTING(a) - (b)), 0)), \
+ uISP_VAL_MIN), uISP_VAL_MAX)
+
+#endif /* __SH_CSS_FRAC_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_host_data.c b/drivers/staging/media/atomisp/pci/sh_css_host_data.c
new file mode 100644
index 000000000000..348183a221a8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_host_data.c
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+#include <ia_css_host_data.h>
+#include <sh_css_internal.h>
+
+struct ia_css_host_data *ia_css_host_data_allocate(size_t size)
+{
+ struct ia_css_host_data *me;
+
+ me = kmalloc(sizeof(struct ia_css_host_data), GFP_KERNEL);
+ if (!me)
+ return NULL;
+ me->size = (uint32_t)size;
+ me->address = sh_css_malloc(size);
+ if (!me->address) {
+ kfree(me);
+ return NULL;
+ }
+ return me;
+}
+
+void ia_css_host_data_free(struct ia_css_host_data *me)
+{
+ if (me) {
+ sh_css_free(me->address);
+ me->address = NULL;
+ kfree(me);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_hrt.c b/drivers/staging/media/atomisp/pci/sh_css_hrt.c
new file mode 100644
index 000000000000..94b2de5b5ef4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_hrt.c
@@ -0,0 +1,85 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "platform_support.h"
+
+#include "sh_css_hrt.h"
+#include "ia_css_debug.h"
+
+#include "device_access.h"
+
+#define __INLINE_EVENT__
+#include "event_fifo.h"
+#define __INLINE_SP__
+#include "sp.h"
+#define __INLINE_ISP__
+#include "isp.h"
+#define __INLINE_IRQ__
+#include "irq.h"
+#define __INLINE_FIFO_MONITOR__
+#include "fifo_monitor.h"
+
+/* System independent */
+#include "sh_css_internal.h"
+
+bool sh_css_hrt_system_is_idle(void)
+{
+ bool not_idle = false, idle;
+ fifo_channel_t ch;
+
+ idle = sp_ctrl_getbit(SP0_ID, SP_SC_REG, SP_IDLE_BIT);
+ not_idle |= !idle;
+ if (!idle)
+ IA_CSS_WARNING("SP not idle");
+
+ idle = isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT);
+ not_idle |= !idle;
+ if (!idle)
+ IA_CSS_WARNING("ISP not idle");
+
+ for (ch = 0; ch < N_FIFO_CHANNEL; ch++) {
+ fifo_channel_state_t state;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID, ch, &state);
+ if (state.fifo_valid) {
+ IA_CSS_WARNING("FIFO channel %d is not empty", ch);
+ not_idle = true;
+ }
+ }
+
+ return !not_idle;
+}
+
+enum ia_css_err sh_css_hrt_sp_wait(void)
+{
+#if defined(HAS_IRQ_MAP_VERSION_2)
+ irq_sw_channel_id_t irq_id = IRQ_SW_CHANNEL0_ID;
+#else
+ irq_sw_channel_id_t irq_id = IRQ_SW_CHANNEL2_ID;
+#endif
+ /*
+ * Wait till SP is idle or till there is a SW2 interrupt
+ * The SW2 interrupt will be used when frameloop runs on SP
+ * and signals an event with similar meaning as SP idle
+ * (e.g. frame_done)
+ */
+ while (!sp_ctrl_getbit(SP0_ID, SP_SC_REG, SP_IDLE_BIT) &&
+ ((irq_reg_load(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX) &
+ (1U << (irq_id + IRQ_SW_CHANNEL_OFFSET))) == 0)) {
+ hrt_sleep();
+ }
+
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_hrt.h b/drivers/staging/media/atomisp/pci/sh_css_hrt.h
new file mode 100644
index 000000000000..fd23ed1848ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_hrt.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_HRT_H_
+#define _SH_CSS_HRT_H_
+
+#include <sp.h>
+#include <isp.h>
+
+#include <ia_css_err.h>
+
+/* SP access */
+void sh_css_hrt_sp_start_si(void);
+
+void sh_css_hrt_sp_start_copy_frame(void);
+
+void sh_css_hrt_sp_start_isp(void);
+
+enum ia_css_err sh_css_hrt_sp_wait(void);
+
+bool sh_css_hrt_system_is_idle(void);
+
+#endif /* _SH_CSS_HRT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_internal.h b/drivers/staging/media/atomisp/pci/sh_css_internal.h
new file mode 100644
index 000000000000..5f271d9ae485
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_internal.h
@@ -0,0 +1,1061 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_INTERNAL_H_
+#define _SH_CSS_INTERNAL_H_
+
+#include <system_global.h>
+#include <math_support.h>
+#include <type_support.h>
+#include <platform_support.h>
+#include <stdarg.h>
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "input_formatter.h"
+#endif
+#include "input_system.h"
+
+#include "ia_css_types.h"
+#include "ia_css_acc_types.h"
+#include "ia_css_buffer.h"
+
+#include "ia_css_binary.h"
+#include "sh_css_firmware.h" /* not needed/desired on SP/ISP */
+#include "sh_css_legacy.h"
+#include "sh_css_defs.h"
+#include "sh_css_uds.h"
+#include "dma.h" /* N_DMA_CHANNEL_ID */
+#include "ia_css_circbuf_comm.h" /* Circular buffer */
+#include "ia_css_frame_comm.h"
+#include "ia_css_3a.h"
+#include "ia_css_dvs.h"
+#include "ia_css_metadata.h"
+#include "runtime/bufq/interface/ia_css_bufq.h"
+#include "ia_css_timer.h"
+
+/* TODO: Move to a more suitable place when sp pipeline design is done. */
+#define IA_CSS_NUM_CB_SEM_READ_RESOURCE 2
+#define IA_CSS_NUM_CB_SEM_WRITE_RESOURCE 1
+#define IA_CSS_NUM_CBS 2
+#define IA_CSS_CB_MAX_ELEMS 2
+
+/* Use case specific. index limited to IA_CSS_NUM_CB_SEM_READ_RESOURCE or
+ * IA_CSS_NUM_CB_SEM_WRITE_RESOURCE for read and write respectively.
+ * TODO: Enforce the limitation above.
+*/
+#define IA_CSS_COPYSINK_SEM_INDEX 0
+#define IA_CSS_TAGGER_SEM_INDEX 1
+
+/* Force generation of output event. Used by acceleration pipe. */
+#define IA_CSS_POST_OUT_EVENT_FORCE 2
+
+#define SH_CSS_MAX_BINARY_NAME 64
+
+#define SP_DEBUG_NONE (0)
+#define SP_DEBUG_DUMP (1)
+#define SP_DEBUG_COPY (2)
+#define SP_DEBUG_TRACE (3)
+#define SP_DEBUG_MINIMAL (4)
+
+#define SP_DEBUG SP_DEBUG_NONE
+#define SP_DEBUG_MINIMAL_OVERWRITE 1
+
+#define SH_CSS_TNR_BIT_DEPTH 8
+#define SH_CSS_REF_BIT_DEPTH 8
+
+/* keep next up to date with the definition for MAX_CB_ELEMS_FOR_TAGGER in tagger.sp.c */
+#if defined(HAS_SP_2400)
+#define NUM_CONTINUOUS_FRAMES 15
+#else
+#define NUM_CONTINUOUS_FRAMES 10
+#endif
+#define NUM_MIPI_FRAMES_PER_STREAM 2
+
+#define NUM_ONLINE_INIT_CONTINUOUS_FRAMES 2
+
+#define NR_OF_PIPELINES IA_CSS_PIPE_ID_NUM /* Must match with IA_CSS_PIPE_ID_NUM */
+
+#define SH_CSS_MAX_IF_CONFIGS 3 /* Must match with IA_CSS_NR_OF_CONFIGS (not defined yet).*/
+#define SH_CSS_IF_CONFIG_NOT_NEEDED 0xFF
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#define SH_CSS_ENABLE_METADATA
+#endif
+
+#if defined(SH_CSS_ENABLE_METADATA) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#define SH_CSS_ENABLE_METADATA_THREAD
+#endif
+
+/*
+ * SH_CSS_MAX_SP_THREADS:
+ * sp threads visible to host with connected communication queues
+ * these threads are capable of running an image pipe
+ * SH_CSS_MAX_SP_INTERNAL_THREADS:
+ * internal sp service threads, no communication queues to host
+ * these threads can't be used as image pipe
+ */
+
+#if defined(SH_CSS_ENABLE_METADATA_THREAD)
+#define SH_CSS_SP_INTERNAL_METADATA_THREAD 1
+#else
+#define SH_CSS_SP_INTERNAL_METADATA_THREAD 0
+#endif
+
+#define SH_CSS_SP_INTERNAL_SERVICE_THREAD 1
+
+#define SH_CSS_MAX_SP_THREADS 5
+
+#define SH_CSS_MAX_SP_INTERNAL_THREADS (\
+ SH_CSS_SP_INTERNAL_SERVICE_THREAD +\
+ SH_CSS_SP_INTERNAL_METADATA_THREAD)
+
+#define SH_CSS_MAX_PIPELINES SH_CSS_MAX_SP_THREADS
+
+/**
+ * The C99 standard does not specify the exact object representation of structs;
+ * the representation is compiler dependent.
+ *
+ * The structs that are communicated between host and SP/ISP should have the
+ * exact same object representation. The compiler that is used to compile the
+ * firmware is hivecc.
+ *
+ * To check if a different compiler, used to compile a host application, uses
+ * another object representation, macros are defined specifying the size of
+ * the structs as expected by the firmware.
+ *
+ * A host application shall verify that a sizeof( ) of the struct is equal to
+ * the SIZE_OF_XXX macro of the corresponding struct. If they are not
+ * equal, functionality will break.
+ */
+#define CALC_ALIGNMENT_MEMBER(x, y) (CEIL_MUL(x, y) - x)
+#define SIZE_OF_HRT_VADDRESS sizeof(hive_uint32)
+#define SIZE_OF_IA_CSS_PTR sizeof(uint32_t)
+
+/* Number of SP's */
+#define NUM_OF_SPS 1
+
+#define NUM_OF_BLS 0
+
+/* Enum for order of Binaries */
+enum sh_css_order_binaries {
+ SP_FIRMWARE = 0,
+ ISP_FIRMWARE
+};
+
+/*
+* JB: keep next enum in sync with thread id's
+* and pipe id's
+*/
+enum sh_css_pipe_config_override {
+ SH_CSS_PIPE_CONFIG_OVRD_NONE = 0,
+ SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD = 0xffff
+};
+
+enum host2sp_commands {
+ host2sp_cmd_error = 0,
+ /*
+ * The host2sp_cmd_ready command is the only command written by the SP
+ * It acknowledges that is previous command has been received.
+ * (this does not mean that the command has been executed)
+ * It also indicates that a new command can be send (it is a queue
+ * with depth 1).
+ */
+ host2sp_cmd_ready = 1,
+ /* Command written by the Host */
+ host2sp_cmd_dummy, /* No action, can be used as watchdog */
+ host2sp_cmd_start_flash, /* Request SP to start the flash */
+ host2sp_cmd_terminate, /* SP should terminate itself */
+ N_host2sp_cmd
+};
+
+/* Enumeration used to indicate the events that are produced by
+ * the SP and consumed by the Host.
+ *
+ * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
+ * 1) "enum ia_css_event_type" (ia_css_event_public.h)
+ * 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
+ * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
+ * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
+ */
+enum sh_css_sp_event_type {
+ SH_CSS_SP_EVENT_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_SECOND_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_VF_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_SECOND_VF_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_3A_STATISTICS_DONE,
+ SH_CSS_SP_EVENT_DIS_STATISTICS_DONE,
+ SH_CSS_SP_EVENT_PIPELINE_DONE,
+ SH_CSS_SP_EVENT_FRAME_TAGGED,
+ SH_CSS_SP_EVENT_INPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_METADATA_DONE,
+ SH_CSS_SP_EVENT_LACE_STATISTICS_DONE,
+ SH_CSS_SP_EVENT_ACC_STAGE_COMPLETE,
+ SH_CSS_SP_EVENT_TIMER,
+ SH_CSS_SP_EVENT_PORT_EOF,
+ SH_CSS_SP_EVENT_FW_WARNING,
+ SH_CSS_SP_EVENT_FW_ASSERT,
+ SH_CSS_SP_EVENT_NR_OF_TYPES /* must be last */
+};
+
+/* xmem address map allocation per pipeline, css pointers */
+/* Note that the struct below should only consist of hrt_vaddress-es
+ Otherwise this will cause a fail in the function ref_sh_css_ddr_address_map
+ */
+struct sh_css_ddr_address_map {
+ hrt_vaddress isp_param;
+ hrt_vaddress isp_mem_param[SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES];
+ hrt_vaddress macc_tbl;
+ hrt_vaddress fpn_tbl;
+ hrt_vaddress sc_tbl;
+ hrt_vaddress tetra_r_x;
+ hrt_vaddress tetra_r_y;
+ hrt_vaddress tetra_gr_x;
+ hrt_vaddress tetra_gr_y;
+ hrt_vaddress tetra_gb_x;
+ hrt_vaddress tetra_gb_y;
+ hrt_vaddress tetra_b_x;
+ hrt_vaddress tetra_b_y;
+ hrt_vaddress tetra_ratb_x;
+ hrt_vaddress tetra_ratb_y;
+ hrt_vaddress tetra_batr_x;
+ hrt_vaddress tetra_batr_y;
+ hrt_vaddress dvs_6axis_params_y;
+};
+
+#define SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT \
+ (SIZE_OF_HRT_VADDRESS + \
+ (SH_CSS_MAX_STAGES * IA_CSS_NUM_MEMORIES * SIZE_OF_HRT_VADDRESS) + \
+ (16 * SIZE_OF_HRT_VADDRESS))
+
+/* xmem address map allocation per pipeline */
+struct sh_css_ddr_address_map_size {
+ size_t isp_param;
+ size_t isp_mem_param[SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES];
+ size_t macc_tbl;
+ size_t fpn_tbl;
+ size_t sc_tbl;
+ size_t tetra_r_x;
+ size_t tetra_r_y;
+ size_t tetra_gr_x;
+ size_t tetra_gr_y;
+ size_t tetra_gb_x;
+ size_t tetra_gb_y;
+ size_t tetra_b_x;
+ size_t tetra_b_y;
+ size_t tetra_ratb_x;
+ size_t tetra_ratb_y;
+ size_t tetra_batr_x;
+ size_t tetra_batr_y;
+ size_t dvs_6axis_params_y;
+};
+
+struct sh_css_ddr_address_map_compound {
+ struct sh_css_ddr_address_map map;
+ struct sh_css_ddr_address_map_size size;
+};
+
+struct ia_css_isp_parameter_set_info {
+ struct sh_css_ddr_address_map
+ mem_map;/** pointers to Parameters in ISP format IMPT:
+ This should be first member of this struct */
+ u32
+ isp_parameters_id;/** Unique ID to track which config was actually applied to a particular frame */
+ ia_css_ptr
+ output_frame_ptr;/** Output frame to which this config has to be applied (optional) */
+};
+
+/* this struct contains all arguments that can be passed to
+ a binary. It depends on the binary which ones are used. */
+struct sh_css_binary_args {
+ struct ia_css_frame *in_frame; /* input frame */
+ struct ia_css_frame
+ *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES]; /* reference input frame */
+ struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES]; /* tnr frames */
+ struct ia_css_frame
+ *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; /* output frame */
+ struct ia_css_frame *out_vf_frame; /* viewfinder output frame */
+ bool copy_vf;
+ bool copy_output;
+ unsigned int vf_downscale_log2;
+};
+
+#if SP_DEBUG == SP_DEBUG_DUMP
+
+#define SH_CSS_NUM_SP_DEBUG 48
+
+struct sh_css_sp_debug_state {
+ unsigned int error;
+ unsigned int debug[SH_CSS_NUM_SP_DEBUG];
+};
+
+#elif SP_DEBUG == SP_DEBUG_COPY
+
+#define SH_CSS_SP_DBG_TRACE_DEPTH (40)
+
+struct sh_css_sp_debug_trace {
+ u16 frame;
+ u16 line;
+ u16 pixel_distance;
+ u16 mipi_used_dword;
+ u16 sp_index;
+};
+
+struct sh_css_sp_debug_state {
+ u16 if_start_line;
+ u16 if_start_column;
+ u16 if_cropped_height;
+ u16 if_cropped_width;
+ unsigned int index;
+ struct sh_css_sp_debug_trace
+ trace[SH_CSS_SP_DBG_TRACE_DEPTH];
+};
+
+#elif SP_DEBUG == SP_DEBUG_TRACE
+
+#if 1
+/* Example of just one global trace */
+#define SH_CSS_SP_DBG_NR_OF_TRACES (1)
+#define SH_CSS_SP_DBG_TRACE_DEPTH (40)
+#else
+/* E.g. if you like separate traces for 4 threads */
+#define SH_CSS_SP_DBG_NR_OF_TRACES (4)
+#define SH_CSS_SP_DBG_TRACE_DEPTH (10)
+#endif
+
+#define SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS (13)
+
+struct sh_css_sp_debug_trace {
+ u16 time_stamp;
+ u16 location; /* bit 15..13 = file_id, 12..0 = line nr. */
+ u32 data;
+};
+
+struct sh_css_sp_debug_state {
+ struct sh_css_sp_debug_trace
+ trace[SH_CSS_SP_DBG_NR_OF_TRACES][SH_CSS_SP_DBG_TRACE_DEPTH];
+ u16 index_last[SH_CSS_SP_DBG_NR_OF_TRACES];
+ u8 index[SH_CSS_SP_DBG_NR_OF_TRACES];
+};
+
+#elif SP_DEBUG == SP_DEBUG_MINIMAL
+
+#define SH_CSS_NUM_SP_DEBUG 128
+
+struct sh_css_sp_debug_state {
+ unsigned int error;
+ unsigned int debug[SH_CSS_NUM_SP_DEBUG];
+};
+
+#endif
+
+struct sh_css_sp_debug_command {
+ /*
+ * The DMA software-mask,
+ * Bit 31...24: unused.
+ * Bit 23...16: unused.
+ * Bit 15...08: reading-request enabling bits for DMA channel 7..0
+ * Bit 07...00: writing-request enabling bits for DMA channel 7..0
+ *
+ * For example, "0...0 0...0 11111011 11111101" indicates that the
+ * writing request through DMA Channel 1 and the reading request
+ * through DMA channel 2 are both disabled. The others are enabled.
+ */
+ u32 dma_sw_reg;
+};
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+/* SP input formatter configuration.*/
+struct sh_css_sp_input_formatter_set {
+ u32 stream_format;
+ input_formatter_cfg_t config_a;
+ input_formatter_cfg_t config_b;
+};
+#endif
+
+#define IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT (3)
+
+/* SP configuration information */
+struct sh_css_sp_config {
+ u8 no_isp_sync; /* Signal host immediately after start */
+ u8 enable_raw_pool_locking; /** Enable Raw Buffer Locking for HALv3 Support */
+ u8 lock_all;
+ /** If raw buffer locking is enabled, this flag indicates whether raw
+ frames are locked when their EOF event is successfully sent to the
+ host (true) or when they are passed to the preview/video pipe
+ (false). */
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ struct {
+ u8 a_changed;
+ u8 b_changed;
+ u8 isp_2ppc;
+ struct sh_css_sp_input_formatter_set
+ set[SH_CSS_MAX_IF_CONFIGS]; /* CSI-2 port is used as index. */
+ } input_formatter;
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ sync_generator_cfg_t sync_gen;
+ tpg_cfg_t tpg;
+ prbs_cfg_t prbs;
+ input_system_cfg_t input_circuit;
+ u8 input_circuit_cfg_changed;
+ u32 mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
+#endif
+ u8 enable_isys_event_queue;
+ u8 disable_cont_vf;
+};
+
+enum sh_css_stage_type {
+ SH_CSS_SP_STAGE_TYPE = 0,
+ SH_CSS_ISP_STAGE_TYPE = 1
+};
+
+#define SH_CSS_NUM_STAGE_TYPES 2
+
+#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS BIT(0)
+#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS_MASK \
+ ((SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << SH_CSS_MAX_SP_THREADS) - 1)
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+struct sh_css_sp_pipeline_terminal {
+ union {
+ /* Input System 2401 */
+ virtual_input_system_stream_t
+ virtual_input_system_stream[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
+ } context;
+ /*
+ * TODO
+ * - Remove "virtual_input_system_cfg" when the ISYS2401 DLI is ready.
+ */
+ union {
+ /* Input System 2401 */
+ virtual_input_system_stream_cfg_t
+ virtual_input_system_stream_cfg[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
+ } ctrl;
+};
+
+struct sh_css_sp_pipeline_io {
+ struct sh_css_sp_pipeline_terminal input;
+ /* pqiao: comment out temporarily to save dmem */
+ /*struct sh_css_sp_pipeline_terminal output;*/
+};
+
+/* This struct tracks how many streams are registered per CSI port.
+ * This is used to track which streams have already been configured.
+ * Only when all streams are configured, the CSI RX is started for that port.
+ */
+struct sh_css_sp_pipeline_io_status {
+ u32 active[N_INPUT_SYSTEM_CSI_PORT]; /** registered streams */
+ u32 running[N_INPUT_SYSTEM_CSI_PORT]; /** configured streams */
+};
+
+#endif
+enum sh_css_port_dir {
+ SH_CSS_PORT_INPUT = 0,
+ SH_CSS_PORT_OUTPUT = 1
+};
+
+enum sh_css_port_type {
+ SH_CSS_HOST_TYPE = 0,
+ SH_CSS_COPYSINK_TYPE = 1,
+ SH_CSS_TAGGERSINK_TYPE = 2
+};
+
+/* Pipe inout settings: output port on 7-4bits, input port on 3-0bits */
+#define SH_CSS_PORT_FLD_WIDTH_IN_BITS (4)
+#define SH_CSS_PORT_TYPE_BIT_FLD(pt) (0x1 << (pt))
+#define SH_CSS_PORT_FLD(pd) ((pd) ? SH_CSS_PORT_FLD_WIDTH_IN_BITS : 0)
+#define SH_CSS_PIPE_PORT_CONFIG_ON(p, pd, pt) ((p) |= (SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd)))
+#define SH_CSS_PIPE_PORT_CONFIG_OFF(p, pd, pt) ((p) &= ~(SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd)))
+#define SH_CSS_PIPE_PORT_CONFIG_SET(p, pd, pt, val) ((val) ? \
+ SH_CSS_PIPE_PORT_CONFIG_ON(p, pd, pt) : SH_CSS_PIPE_PORT_CONFIG_OFF(p, pd, pt))
+#define SH_CSS_PIPE_PORT_CONFIG_GET(p, pd, pt) ((p) & (SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd)))
+#define SH_CSS_PIPE_PORT_CONFIG_IS_CONTINUOUS(p) \
+ (!(SH_CSS_PIPE_PORT_CONFIG_GET(p, SH_CSS_PORT_INPUT, SH_CSS_HOST_TYPE) && \
+ SH_CSS_PIPE_PORT_CONFIG_GET(p, SH_CSS_PORT_OUTPUT, SH_CSS_HOST_TYPE)))
+
+#define IA_CSS_ACQUIRE_ISP_POS 31
+
+/* Flags for metadata processing */
+#define SH_CSS_METADATA_ENABLED 0x01
+#define SH_CSS_METADATA_PROCESSED 0x02
+#define SH_CSS_METADATA_OFFLINE_MODE 0x04
+#define SH_CSS_METADATA_WAIT_INPUT 0x08
+
+/* @brief Free an array of metadata buffers.
+ *
+ * @param[in] num_bufs Number of metadata buffers to be freed.
+ * @param[in] bufs Pointer of array of metadata buffers.
+ *
+ * This function frees an array of metadata buffers.
+ */
+void
+ia_css_metadata_free_multiple(unsigned int num_bufs,
+ struct ia_css_metadata **bufs);
+
+/* Macro for handling pipe_qos_config */
+#define QOS_INVALID (~0U)
+#define QOS_ALL_STAGES_DISABLED (0U)
+#define QOS_STAGE_MASK(num) (0x00000001 << num)
+#define SH_CSS_IS_QOS_PIPE(pipe) ((pipe)->pipe_qos_config != QOS_INVALID)
+#define SH_CSS_QOS_STAGE_ENABLE(pipe, num) ((pipe)->pipe_qos_config |= QOS_STAGE_MASK(num))
+#define SH_CSS_QOS_STAGE_DISABLE(pipe, num) ((pipe)->pipe_qos_config &= ~QOS_STAGE_MASK(num))
+#define SH_CSS_QOS_STAGE_IS_ENABLED(pipe, num) ((pipe)->pipe_qos_config & QOS_STAGE_MASK(num))
+#define SH_CSS_QOS_STAGE_IS_ALL_DISABLED(pipe) ((pipe)->pipe_qos_config == QOS_ALL_STAGES_DISABLED)
+#define SH_CSS_QOS_MODE_PIPE_ADD(mode, pipe) ((mode) |= (0x1 << (pipe)->pipe_id))
+#define SH_CSS_QOS_MODE_PIPE_REMOVE(mode, pipe) ((mode) &= ~(0x1 << (pipe)->pipe_id))
+#define SH_CSS_IS_QOS_ONLY_MODE(mode) ((mode) == (0x1 << IA_CSS_PIPE_ID_ACC))
+
+/* Information for a pipeline */
+struct sh_css_sp_pipeline {
+ u32 pipe_id; /* the pipe ID */
+ u32 pipe_num; /* the dynamic pipe number */
+ u32 thread_id; /* the sp thread ID */
+ u32 pipe_config; /* the pipe config */
+ u32 pipe_qos_config; /* Bitmap of multiple QOS extension fw state.
+ (0xFFFFFFFF) indicates non QOS pipe.*/
+ u32 inout_port_config;
+ u32 required_bds_factor;
+ u32 dvs_frame_delay;
+ u32 input_system_mode; /* enum ia_css_input_mode */
+ u32 port_id; /* port_id for input system */
+ u32 num_stages; /* the pipe config */
+ u32 running; /* needed for pipe termination */
+ hrt_vaddress sp_stage_addr[SH_CSS_MAX_STAGES];
+ hrt_vaddress scaler_pp_lut; /* Early bound LUT */
+ u32 dummy; /* stage ptr is only used on sp but lives in
+ this struct; needs cleanup */
+ s32 num_execs; /* number of times to run if this is
+ an acceleration pipe. */
+#if defined(SH_CSS_ENABLE_METADATA)
+ struct {
+ u32 format; /* Metadata format in hrt format */
+ u32 width; /* Width of a line */
+ u32 height; /* Number of lines */
+ u32 stride; /* Stride (in bytes) per line */
+ u32 size; /* Total size (in bytes) */
+ hrt_vaddress cont_buf; /* Address of continuous buffer */
+ } metadata;
+#endif
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ u32 output_frame_queue_id;
+#endif
+ union {
+ struct {
+ u32 bytes_available;
+ } bin;
+ struct {
+ u32 height;
+ u32 width;
+ u32 padded_width;
+ u32 max_input_width;
+ u32 raw_bit_depth;
+ } raw;
+ } copy;
+
+/* ISP2401 */
+
+ /* Parameters passed to Shading Correction kernel. */
+ struct {
+ u32 internal_frame_origin_x_bqs_on_sctbl; /* Origin X (bqs) of internal frame on shading table */
+ u32 internal_frame_origin_y_bqs_on_sctbl; /* Origin Y (bqs) of internal frame on shading table */
+ } shading;
+};
+
+/*
+ * The first frames (with comment Dynamic) can be dynamic or static
+ * The other frames (ref_in and below) can only be static
+ * Static means that the data address will not change during the life time
+ * of the associated pipe. Dynamic means that the data address can
+ * change with every (frame) iteration of the associated pipe
+ *
+ * s3a and dis are now also dynamic but (stil) handled separately
+ */
+#define SH_CSS_NUM_DYNAMIC_FRAME_IDS (3)
+
+struct ia_css_frames_sp {
+ struct ia_css_frame_sp in;
+ struct ia_css_frame_sp out[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_resolution effective_in_res;
+ struct ia_css_frame_sp out_vf;
+ struct ia_css_frame_sp_info internal_frame_info;
+ struct ia_css_buffer_sp s3a_buf;
+ struct ia_css_buffer_sp dvs_buf;
+#if defined SH_CSS_ENABLE_METADATA
+ struct ia_css_buffer_sp metadata_buf;
+#endif
+};
+
+/* Information for a single pipeline stage for an ISP */
+struct sh_css_isp_stage {
+ /*
+ * For compatibility and portabilty, only types
+ * from "stdint.h" are allowed
+ *
+ * Use of "enum" and "bool" is prohibited
+ * Multiple boolean flags can be stored in an
+ * integer
+ */
+ struct ia_css_blob_info blob_info;
+ struct ia_css_binary_info binary_info;
+ char binary_name[SH_CSS_MAX_BINARY_NAME];
+ struct ia_css_isp_param_css_segments mem_initializers;
+};
+
+/* Information for a single pipeline stage */
+struct sh_css_sp_stage {
+ /*
+ * For compatibility and portabilty, only types
+ * from "stdint.h" are allowed
+ *
+ * Use of "enum" and "bool" is prohibited
+ * Multiple boolean flags can be stored in an
+ * integer
+ */
+ u8 num; /* Stage number */
+ u8 isp_online;
+ u8 isp_copy_vf;
+ u8 isp_copy_output;
+ u8 sp_enable_xnr;
+ u8 isp_deci_log_factor;
+ u8 isp_vf_downscale_bits;
+ u8 deinterleaved;
+ /*
+ * NOTE: Programming the input circuit can only be done at the
+ * start of a session. It is illegal to program it during execution
+ * The input circuit defines the connectivity
+ */
+ u8 program_input_circuit;
+ /* enum ia_css_pipeline_stage_sp_func func; */
+ u8 func;
+ /* The type of the pipe-stage */
+ /* enum sh_css_stage_type stage_type; */
+ u8 stage_type;
+ u8 num_stripes;
+ u8 isp_pipe_version;
+ struct {
+ u8 vf_output;
+ u8 s3a;
+ u8 sdis;
+ u8 dvs_stats;
+ u8 lace_stats;
+ } enable;
+ /* Add padding to come to a word boundary */
+ /* unsigned char padding[0]; */
+
+ struct sh_css_crop_pos sp_out_crop_pos;
+ struct ia_css_frames_sp frames;
+ struct ia_css_resolution dvs_envelope;
+ struct sh_css_uds_info uds;
+ hrt_vaddress isp_stage_addr;
+ hrt_vaddress xmem_bin_addr;
+ hrt_vaddress xmem_map_addr;
+
+ u16 top_cropping;
+ u16 row_stripes_height;
+ u16 row_stripes_overlap_lines;
+ u8 if_config_index; /* Which should be applied by this stage. */
+};
+
+/*
+ * Time: 2012-07-19, 17:40.
+ * Note: Add a new data memeber "debug" in "sh_css_sp_group". This
+ * data member is used to pass the debugging command from the
+ * Host to the SP.
+ *
+ * Time: Before 2012-07-19.
+ * Note:
+ * Group all host initialized SP variables into this struct.
+ * This is initialized every stage through dma.
+ * The stage part itself is transferred through sh_css_sp_stage.
+*/
+struct sh_css_sp_group {
+ struct sh_css_sp_config config;
+ struct sh_css_sp_pipeline pipe[SH_CSS_MAX_SP_THREADS];
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+ struct sh_css_sp_pipeline_io pipe_io[SH_CSS_MAX_SP_THREADS];
+ struct sh_css_sp_pipeline_io_status pipe_io_status;
+#endif
+ struct sh_css_sp_debug_command debug;
+};
+
+/* Data in SP dmem that is set from the host every stage. */
+struct sh_css_sp_per_frame_data {
+ /* ddr address of sp_group and sp_stage */
+ hrt_vaddress sp_group_addr;
+};
+
+#define SH_CSS_NUM_SDW_IRQS 3
+
+/* Output data from SP to css */
+struct sh_css_sp_output {
+ unsigned int bin_copy_bytes_copied;
+#if SP_DEBUG != SP_DEBUG_NONE
+ struct sh_css_sp_debug_state debug;
+#endif
+ unsigned int sw_interrupt_value[SH_CSS_NUM_SDW_IRQS];
+};
+
+#define CONFIG_ON_FRAME_ENQUEUE() 0
+
+/**
+ * @brief Data structure for the circular buffer.
+ * The circular buffer is empty if "start == end". The
+ * circular buffer is full if "(end + 1) % size == start".
+ */
+/* Variable Sized Buffer Queue Elements */
+
+#define IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE 6
+#define IA_CSS_NUM_ELEMS_HOST2SP_PARAM_QUEUE 3
+#define IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE 6
+
+/* sp-to-host queue is expected to be emptied in ISR since
+ * it is used instead of HW interrupts (due to HW design issue).
+ * We need one queue element per CSI port. */
+#define IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE (2 * N_CSI_PORTS)
+/* The host-to-sp queue needs to allow for some delay
+ * in the emptying of this queue in the SP since there is no
+ * separate SP thread for this. */
+#define IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE (2 * N_CSI_PORTS)
+
+#if defined(HAS_SP_2400)
+#define IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE 13
+#define IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE 19
+#define IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE 26 /* holds events for all type of buffers, hence deeper */
+#else
+#define IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE 6
+#define IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE 6
+#define IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE 6
+#endif
+
+struct sh_css_hmm_buffer {
+ union {
+ struct ia_css_isp_3a_statistics s3a;
+ struct ia_css_isp_dvs_statistics dis;
+ hrt_vaddress skc_dvs_statistics;
+ hrt_vaddress lace_stat;
+ struct ia_css_metadata metadata;
+ struct frame_data_wrapper {
+ hrt_vaddress frame_data;
+ u32 flashed;
+ u32 exp_id;
+ u32 isp_parameters_id; /** Unique ID to track which config was
+ actually applied to a particular frame */
+#if CONFIG_ON_FRAME_ENQUEUE()
+ struct sh_css_config_on_frame_enqueue config_on_frame_enqueue;
+#endif
+ } frame;
+ hrt_vaddress ddr_ptrs;
+ } payload;
+ /*
+ * kernel_ptr is present for host administration purposes only.
+ * type is uint64_t in order to be 64-bit host compatible.
+ * uint64_t does not exist on SP/ISP.
+ * Size of the struct is checked by sp.hive.c.
+ */
+ CSS_ALIGN(u64 cookie_ptr, 8); /* TODO: check if this alignment is needed */
+ u64 kernel_ptr;
+ struct ia_css_time_meas timing_data;
+ clock_value_t isys_eof_clock_tick;
+};
+
+#if CONFIG_ON_FRAME_ENQUEUE()
+#define SIZE_OF_FRAME_STRUCT \
+ (SIZE_OF_HRT_VADDRESS + \
+ (3 * sizeof(uint32_t)) + \
+ sizeof(uint32_t))
+#else
+#define SIZE_OF_FRAME_STRUCT \
+ (SIZE_OF_HRT_VADDRESS + \
+ (3 * sizeof(uint32_t)))
+#endif
+
+#define SIZE_OF_PAYLOAD_UNION \
+ (MAX(MAX(MAX(MAX( \
+ SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT, \
+ SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT), \
+ SIZE_OF_IA_CSS_METADATA_STRUCT), \
+ SIZE_OF_FRAME_STRUCT), \
+ SIZE_OF_HRT_VADDRESS))
+
+/* Do not use sizeof(uint64_t) since that does not exist of SP */
+#define SIZE_OF_SH_CSS_HMM_BUFFER_STRUCT \
+ (SIZE_OF_PAYLOAD_UNION + \
+ CALC_ALIGNMENT_MEMBER(SIZE_OF_PAYLOAD_UNION, 8) + \
+ 8 + \
+ 8 + \
+ SIZE_OF_IA_CSS_TIME_MEAS_STRUCT + \
+ SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT + \
+ CALC_ALIGNMENT_MEMBER(SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT, 8))
+
+enum sh_css_queue_type {
+ sh_css_invalid_queue_type = -1,
+ sh_css_host2sp_buffer_queue,
+ sh_css_sp2host_buffer_queue,
+ sh_css_host2sp_psys_event_queue,
+ sh_css_sp2host_psys_event_queue,
+ sh_css_sp2host_isys_event_queue,
+ sh_css_host2sp_isys_event_queue,
+ sh_css_host2sp_tag_cmd_queue,
+};
+
+struct sh_css_event_irq_mask {
+ u16 or_mask;
+ u16 and_mask;
+};
+
+#define SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT \
+ (2 * sizeof(uint16_t))
+
+struct host_sp_communication {
+ /*
+ * Don't use enum host2sp_commands, because the sizeof an enum is
+ * compiler dependent and thus non-portable
+ */
+ u32 host2sp_command;
+
+ /*
+ * The frame buffers that are reused by the
+ * copy pipe in the offline preview mode.
+ *
+ * host2sp_offline_frames[0]: the input frame of the preview pipe.
+ * host2sp_offline_frames[1]: the output frame of the copy pipe.
+ *
+ * TODO:
+ * Remove it when the Host and the SP is decoupled.
+ */
+ hrt_vaddress host2sp_offline_frames[NUM_CONTINUOUS_FRAMES];
+ hrt_vaddress host2sp_offline_metadata[NUM_CONTINUOUS_FRAMES];
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ hrt_vaddress host2sp_mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ hrt_vaddress host2sp_mipi_metadata[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ u32 host2sp_num_mipi_frames[N_CSI_PORTS];
+#endif
+ u32 host2sp_cont_avail_num_raw_frames;
+ u32 host2sp_cont_extra_num_raw_frames;
+ u32 host2sp_cont_target_num_raw_frames;
+ struct sh_css_event_irq_mask host2sp_event_irq_mask[NR_OF_PIPELINES];
+
+};
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#define SIZE_OF_HOST_SP_COMMUNICATION_STRUCT \
+ (sizeof(uint32_t) + \
+ (NUM_CONTINUOUS_FRAMES * SIZE_OF_HRT_VADDRESS * 2) + \
+ (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM * SIZE_OF_HRT_VADDRESS * 2) + \
+ ((3 + N_CSI_PORTS) * sizeof(uint32_t)) + \
+ (NR_OF_PIPELINES * SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT))
+#else
+#define SIZE_OF_HOST_SP_COMMUNICATION_STRUCT \
+ (sizeof(uint32_t) + \
+ (NUM_CONTINUOUS_FRAMES * SIZE_OF_HRT_VADDRESS * 2) + \
+ (3 * sizeof(uint32_t)) + \
+ (NR_OF_PIPELINES * SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT))
+#endif
+
+struct host_sp_queues {
+ /*
+ * Queues for the dynamic frame information,
+ * i.e. the "in_frame" buffer, the "out_frame"
+ * buffer and the "vf_out_frame" buffer.
+ */
+ ia_css_circbuf_desc_t host2sp_buffer_queues_desc
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+ ia_css_circbuf_elem_t host2sp_buffer_queues_elems
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES]
+ [IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE];
+ ia_css_circbuf_desc_t sp2host_buffer_queues_desc
+ [SH_CSS_MAX_NUM_QUEUES];
+ ia_css_circbuf_elem_t sp2host_buffer_queues_elems
+ [SH_CSS_MAX_NUM_QUEUES][IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE];
+
+ /*
+ * The queues for the events.
+ */
+ ia_css_circbuf_desc_t host2sp_psys_event_queue_desc;
+
+ ia_css_circbuf_elem_t host2sp_psys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE];
+ ia_css_circbuf_desc_t sp2host_psys_event_queue_desc;
+
+ ia_css_circbuf_elem_t sp2host_psys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE];
+
+ /*
+ * The queues for the ISYS events.
+ */
+ ia_css_circbuf_desc_t host2sp_isys_event_queue_desc;
+
+ ia_css_circbuf_elem_t host2sp_isys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE];
+ ia_css_circbuf_desc_t sp2host_isys_event_queue_desc;
+
+ ia_css_circbuf_elem_t sp2host_isys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE];
+ /*
+ * The queue for the tagger commands.
+ * CHECK: are these last two present on the 2401 ?
+ */
+ ia_css_circbuf_desc_t host2sp_tag_cmd_queue_desc;
+
+ ia_css_circbuf_elem_t host2sp_tag_cmd_queue_elems
+ [IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE];
+};
+
+#define SIZE_OF_QUEUES_ELEMS \
+ (SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT * \
+ ((SH_CSS_MAX_SP_THREADS * SH_CSS_MAX_NUM_QUEUES * IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE) + \
+ (SH_CSS_MAX_NUM_QUEUES * IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE)))
+
+#define IA_CSS_NUM_CIRCBUF_DESCS 5
+
+#define SIZE_OF_QUEUES_DESC \
+ ((SH_CSS_MAX_SP_THREADS * SH_CSS_MAX_NUM_QUEUES * \
+ SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT) + \
+ (SH_CSS_MAX_NUM_QUEUES * SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT) + \
+ (IA_CSS_NUM_CIRCBUF_DESCS * SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT))
+
+#define SIZE_OF_HOST_SP_QUEUES_STRUCT \
+ (SIZE_OF_QUEUES_ELEMS + SIZE_OF_QUEUES_DESC)
+
+extern int (*sh_css_printf)(const char *fmt, va_list args);
+
+static inline void
+sh_css_print(const char *fmt, ...)
+{
+ va_list ap;
+
+ if (sh_css_printf) {
+ va_start(ap, fmt);
+ sh_css_printf(fmt, ap);
+ va_end(ap);
+ }
+}
+
+static inline void
+sh_css_vprint(const char *fmt, va_list args)
+{
+ if (sh_css_printf)
+ sh_css_printf(fmt, args);
+}
+
+/* The following #if is there because this header file is also included
+ by SP and ISP code but they do not need this data and HIVECC has alignment
+ issue with the firmware struct/union's.
+ More permanent solution will be to refactor this include.
+*/
+hrt_vaddress sh_css_params_ddr_address_map(void);
+
+enum ia_css_err
+sh_css_params_init(void);
+
+void
+sh_css_params_uninit(void);
+
+void *sh_css_malloc(size_t size);
+
+void *sh_css_calloc(size_t N, size_t size);
+
+void sh_css_free(void *ptr);
+
+/* For Acceleration API: Flush FW (shared buffer pointer) arguments */
+void sh_css_flush(struct ia_css_acc_fw *fw);
+
+void
+sh_css_binary_args_reset(struct sh_css_binary_args *args);
+
+/* Check two frames for equality (format, resolution, bits per element) */
+bool
+sh_css_frame_equal_types(const struct ia_css_frame *frame_a,
+ const struct ia_css_frame *frame_b);
+
+bool
+sh_css_frame_info_equal_resolution(const struct ia_css_frame_info *info_a,
+ const struct ia_css_frame_info *info_b);
+
+void
+sh_css_capture_enable_bayer_downscaling(bool enable);
+
+void
+sh_css_binary_print(const struct ia_css_binary *binary);
+
+/* aligned argument of sh_css_frame_info_set_width can be used for an extra alignment requirement.
+ When 0, no extra alignment is done. */
+void
+sh_css_frame_info_set_width(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int aligned);
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+
+unsigned int
+sh_css_get_mipi_sizes_for_check(const unsigned int port,
+ const unsigned int idx);
+
+#endif
+
+hrt_vaddress
+sh_css_store_sp_group_to_ddr(void);
+
+hrt_vaddress
+sh_css_store_sp_stage_to_ddr(unsigned int pipe, unsigned int stage);
+
+hrt_vaddress
+sh_css_store_isp_stage_to_ddr(unsigned int pipe, unsigned int stage);
+
+void
+sh_css_update_uds_and_crop_info(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+
+ bool enable_zoom
+);
+
+void
+sh_css_invalidate_shading_tables(struct ia_css_stream *stream);
+
+struct ia_css_pipeline *
+ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe);
+
+unsigned int
+ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe);
+
+unsigned int
+ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe);
+
+bool
+sh_css_continuous_is_enabled(uint8_t pipe_num);
+
+struct ia_css_pipe *
+find_pipe_by_num(uint32_t pipe_num);
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+void
+ia_css_get_crop_offsets(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *in_frame);
+#endif
+
+#endif /* _SH_CSS_INTERNAL_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_legacy.h b/drivers/staging/media/atomisp/pci/sh_css_legacy.h
new file mode 100644
index 000000000000..99ac690ba7aa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_legacy.h
@@ -0,0 +1,70 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_LEGACY_H_
+#define _SH_CSS_LEGACY_H_
+
+#include <type_support.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_pipe_public.h>
+#include <ia_css_stream_public.h>
+
+/* The pipe id type, distinguishes the kind of pipes that
+ * can be run in parallel.
+ */
+enum ia_css_pipe_id {
+ IA_CSS_PIPE_ID_PREVIEW,
+ IA_CSS_PIPE_ID_COPY,
+ IA_CSS_PIPE_ID_VIDEO,
+ IA_CSS_PIPE_ID_CAPTURE,
+ IA_CSS_PIPE_ID_YUVPP,
+ IA_CSS_PIPE_ID_ACC,
+ IA_CSS_PIPE_ID_NUM
+};
+
+struct ia_css_pipe_extra_config {
+ bool enable_raw_binning;
+ bool enable_yuv_ds;
+ bool enable_high_speed;
+ bool enable_dvs_6axis;
+ bool enable_reduced_pipe;
+ bool enable_fractional_ds;
+ bool disable_vf_pp;
+};
+
+enum ia_css_err
+ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
+ const struct ia_css_pipe_extra_config *extra_config,
+ struct ia_css_pipe **pipe);
+
+void
+ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config
+ *extra_config);
+
+enum ia_css_err
+ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe,
+ enum ia_css_pipe_id *pipe_id);
+
+/* DEPRECATED. FPN is not supported. */
+enum ia_css_err
+sh_css_set_black_frame(struct ia_css_stream *stream,
+ const struct ia_css_frame *raw_black_frame);
+
+/* ISP2400 */
+void
+sh_css_enable_cont_capt(bool enable, bool stop_copy_preview);
+
+#endif /* _SH_CSS_LEGACY_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_metadata.c b/drivers/staging/media/atomisp/pci/sh_css_metadata.c
new file mode 100644
index 000000000000..ebdf84d4a138
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_metadata.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_metadata.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_metrics.c b/drivers/staging/media/atomisp/pci/sh_css_metrics.c
new file mode 100644
index 000000000000..17f6dd9afab4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_metrics.c
@@ -0,0 +1,175 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "assert_support.h"
+#include "sh_css_metrics.h"
+
+#include "sp.h"
+#include "isp.h"
+
+#include "sh_css_internal.h"
+
+#define MULTIPLE_PCS 0
+#define SUSPEND 0
+#define NOF_PCS 1
+#define RESUME_MASK 0x8
+#define STOP_MASK 0x0
+
+static bool pc_histogram_enabled;
+static struct sh_css_pc_histogram *isp_histogram;
+static struct sh_css_pc_histogram *sp_histogram;
+
+struct sh_css_metrics sh_css_metrics;
+
+void
+sh_css_metrics_start_frame(void)
+{
+ sh_css_metrics.frame_metrics.num_frames++;
+}
+
+static void
+clear_histogram(struct sh_css_pc_histogram *histogram)
+{
+ unsigned int i;
+
+ assert(histogram);
+
+ for (i = 0; i < histogram->length; i++) {
+ histogram->run[i] = 0;
+ histogram->stall[i] = 0;
+ histogram->msink[i] = 0xFFFF;
+ }
+}
+
+void
+sh_css_metrics_enable_pc_histogram(bool enable)
+{
+ pc_histogram_enabled = enable;
+}
+
+static void
+make_histogram(struct sh_css_pc_histogram *histogram, unsigned int length)
+{
+ assert(histogram);
+
+ if (histogram->length)
+ return;
+ if (histogram->run)
+ return;
+ histogram->run = sh_css_malloc(length * sizeof(*histogram->run));
+ if (!histogram->run)
+ return;
+ histogram->stall = sh_css_malloc(length * sizeof(*histogram->stall));
+ if (!histogram->stall)
+ return;
+ histogram->msink = sh_css_malloc(length * sizeof(*histogram->msink));
+ if (!histogram->msink)
+ return;
+
+ histogram->length = length;
+ clear_histogram(histogram);
+}
+
+static void
+insert_binary_metrics(struct sh_css_binary_metrics **l,
+ struct sh_css_binary_metrics *metrics)
+{
+ assert(l);
+ assert(*l);
+ assert(metrics);
+
+ for (; *l; l = &(*l)->next)
+ if (*l == metrics)
+ return;
+
+ *l = metrics;
+ metrics->next = NULL;
+}
+
+void
+sh_css_metrics_start_binary(struct sh_css_binary_metrics *metrics)
+{
+ assert(metrics);
+
+ if (!pc_histogram_enabled)
+ return;
+
+ isp_histogram = &metrics->isp_histogram;
+ sp_histogram = &metrics->sp_histogram;
+ make_histogram(isp_histogram, ISP_PMEM_DEPTH);
+ make_histogram(sp_histogram, SP_PMEM_DEPTH);
+ insert_binary_metrics(&sh_css_metrics.binary_metrics, metrics);
+}
+
+void
+sh_css_metrics_sample_pcs(void)
+{
+ bool stall;
+ unsigned int pc;
+ unsigned int msink;
+
+#if SUSPEND
+ unsigned int sc = 0;
+ unsigned int stopped_sc = 0;
+ unsigned int resume_sc = 0;
+#endif
+
+#if MULTIPLE_PCS
+ int i;
+ unsigned int pc_tab[NOF_PCS];
+
+ for (i = 0; i < NOF_PCS; i++)
+ pc_tab[i] = 0;
+#endif
+
+ if (!pc_histogram_enabled)
+ return;
+
+ if (isp_histogram) {
+#if SUSPEND
+ /* STOP the ISP */
+ isp_ctrl_store(ISP0_ID, ISP_SC_REG, STOP_MASK);
+#endif
+ msink = isp_ctrl_load(ISP0_ID, ISP_CTRL_SINK_REG);
+#if MULTIPLE_PCS
+ for (i = 0; i < NOF_PCS; i++)
+ pc_tab[i] = isp_ctrl_load(ISP0_ID, ISP_PC_REG);
+#else
+ pc = isp_ctrl_load(ISP0_ID, ISP_PC_REG);
+#endif
+
+#if SUSPEND
+ /* RESUME the ISP */
+ isp_ctrl_store(ISP0_ID, ISP_SC_REG, RESUME_MASK);
+#endif
+ isp_histogram->msink[pc] &= msink;
+ stall = (msink != 0x7FF);
+
+ if (stall)
+ isp_histogram->stall[pc]++;
+ else
+ isp_histogram->run[pc]++;
+ }
+
+ if (sp_histogram && 0) {
+ msink = sp_ctrl_load(SP0_ID, SP_CTRL_SINK_REG);
+ pc = sp_ctrl_load(SP0_ID, SP_PC_REG);
+ sp_histogram->msink[pc] &= msink;
+ stall = (msink != 0x7FF);
+ if (stall)
+ sp_histogram->stall[pc]++;
+ else
+ sp_histogram->run[pc]++;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_metrics.h b/drivers/staging/media/atomisp/pci/sh_css_metrics.h
new file mode 100644
index 000000000000..f465d1545b8b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_metrics.h
@@ -0,0 +1,55 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_METRICS_H_
+#define _SH_CSS_METRICS_H_
+
+#include <type_support.h>
+
+struct sh_css_pc_histogram {
+ unsigned int length;
+ unsigned int *run;
+ unsigned int *stall;
+ unsigned int *msink;
+};
+
+struct sh_css_binary_metrics {
+ unsigned int mode;
+ unsigned int id;
+ struct sh_css_pc_histogram isp_histogram;
+ struct sh_css_pc_histogram sp_histogram;
+ struct sh_css_binary_metrics *next;
+};
+
+struct ia_css_frame_metrics {
+ unsigned int num_frames;
+};
+
+struct sh_css_metrics {
+ struct sh_css_binary_metrics *binary_metrics;
+ struct ia_css_frame_metrics frame_metrics;
+};
+
+extern struct sh_css_metrics sh_css_metrics;
+
+/* includes ia_css_binary.h, which depends on sh_css_metrics.h */
+#include "ia_css_types.h"
+
+/* Sample ISP and SP pc and add to histogram */
+void sh_css_metrics_enable_pc_histogram(bool enable);
+void sh_css_metrics_start_frame(void);
+void sh_css_metrics_start_binary(struct sh_css_binary_metrics *metrics);
+void sh_css_metrics_sample_pcs(void);
+
+#endif /* _SH_CSS_METRICS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
new file mode 100644
index 000000000000..35cbef5f9f71
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
@@ -0,0 +1,757 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_mipi.h"
+#include "sh_css_mipi.h"
+#include <type_support.h>
+#include "system_global.h"
+#include "ia_css_err.h"
+#include "ia_css_pipe.h"
+#include "ia_css_stream_format.h"
+#include "sh_css_stream_format.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_input_port.h"
+#include "ia_css_debug.h"
+#include "sh_css_struct.h"
+#include "sh_css_defs.h"
+#include "sh_css_sp.h" /* sh_css_update_host2sp_mipi_frame sh_css_update_host2sp_num_mipi_frames ... */
+#include "sw_event_global.h" /* IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY */
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+static u32
+ref_count_mipi_allocation[N_CSI_PORTS]; /* Initialized in mipi_init */
+#endif
+
+enum ia_css_err
+ia_css_mipi_frame_specify(const unsigned int size_mem_words,
+ const bool contiguous) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ my_css.size_mem_words = size_mem_words;
+ (void)contiguous;
+
+ return err;
+}
+
+/*
+ * Check if a source port or TPG/PRBS ID is valid
+ */
+static bool ia_css_mipi_is_source_port_valid(struct ia_css_pipe *pipe,
+ unsigned int *pport)
+{
+ bool ret = true;
+ unsigned int port = 0;
+ unsigned int max_ports = 0;
+
+ switch (pipe->stream->config.mode) {
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+ port = (unsigned int)pipe->stream->config.source.port.port;
+ max_ports = N_CSI_PORTS;
+ break;
+ case IA_CSS_INPUT_MODE_TPG:
+ port = (unsigned int)pipe->stream->config.source.tpg.id;
+ max_ports = N_CSS_TPG_IDS;
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+ port = (unsigned int)pipe->stream->config.source.prbs.id;
+ max_ports = N_CSS_PRBS_IDS;
+ break;
+ default:
+ assert(false);
+ ret = false;
+ break;
+ }
+
+ if (ret) {
+ assert(port < max_ports);
+
+ if (port >= max_ports)
+ ret = false;
+ }
+
+ *pport = port;
+
+ return ret;
+}
+
+/* Assumptions:
+ * - A line is multiple of 4 bytes = 1 word.
+ * - Each frame has SOF and EOF (each 1 word).
+ * - Each line has format header and optionally SOL and EOL (each 1 word).
+ * - Odd and even lines of YUV420 format are different in bites per pixel size.
+ * - Custom size of embedded data.
+ * -- Interleaved frames are not taken into account.
+ * -- Lines are multiples of 8B, and not necessary of (custom 3B, or 7B
+ * etc.).
+ * Result is given in DDR mem words, 32B or 256 bits
+ */
+enum ia_css_err
+ia_css_mipi_frame_calculate_size(const unsigned int width,
+ const unsigned int height,
+ const enum atomisp_input_format format,
+ const bool hasSOLandEOL,
+ const unsigned int embedded_data_size_words,
+ unsigned int *size_mem_words) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ unsigned int bits_per_pixel = 0;
+ unsigned int even_line_bytes = 0;
+ unsigned int odd_line_bytes = 0;
+ unsigned int words_per_odd_line = 0;
+ unsigned int words_for_first_line = 0;
+ unsigned int words_per_even_line = 0;
+ unsigned int mem_words_per_even_line = 0;
+ unsigned int mem_words_per_odd_line = 0;
+ unsigned int mem_words_for_first_line = 0;
+ unsigned int mem_words_for_EOF = 0;
+ unsigned int mem_words = 0;
+ unsigned int width_padded = width;
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* The changes will be reverted as soon as RAW
+ * Buffers are deployed by the 2401 Input System
+ * in the non-continuous use scenario.
+ */
+ width_padded += (2 * ISP_VEC_NELEMS);
+#endif
+
+ IA_CSS_ENTER("padded_width=%d, height=%d, format=%d, hasSOLandEOL=%d, embedded_data_size_words=%d\n",
+ width_padded, height, format, hasSOLandEOL, embedded_data_size_words);
+
+ switch (format)
+ {
+ case ATOMISP_INPUT_FORMAT_RAW_6: /* 4p, 3B, 24bits */
+ bits_per_pixel = 6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7: /* 8p, 7B, 56bits */
+ bits_per_pixel = 7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_8: /* 1p, 1B, 8bits */
+ case ATOMISP_INPUT_FORMAT_BINARY_8: /* 8bits, TODO: check. */
+ case ATOMISP_INPUT_FORMAT_YUV420_8: /* odd 2p, 2B, 16bits, even 2p, 4B, 32bits */
+ bits_per_pixel = 8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10: /* odd 4p, 5B, 40bits, even 4p, 10B, 80bits */
+ case ATOMISP_INPUT_FORMAT_RAW_10: /* 4p, 5B, 40bits */
+#if !defined(HAS_NO_PACKED_RAW_PIXELS)
+ /* The changes will be reverted as soon as RAW
+ * Buffers are deployed by the 2401 Input System
+ * in the non-continuous use scenario.
+ */
+ bits_per_pixel = 10;
+#else
+ bits_per_pixel = 16;
+#endif
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: /* 2p, 3B, 24bits */
+ case ATOMISP_INPUT_FORMAT_RAW_12: /* 2p, 3B, 24bits */
+ bits_per_pixel = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14: /* 4p, 7B, 56bits */
+ bits_per_pixel = 14;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444: /* 1p, 2B, 16bits */
+ case ATOMISP_INPUT_FORMAT_RGB_555: /* 1p, 2B, 16bits */
+ case ATOMISP_INPUT_FORMAT_RGB_565: /* 1p, 2B, 16bits */
+ case ATOMISP_INPUT_FORMAT_YUV422_8: /* 2p, 4B, 32bits */
+ bits_per_pixel = 16;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666: /* 4p, 9B, 72bits */
+ bits_per_pixel = 18;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_10: /* 2p, 5B, 40bits */
+ bits_per_pixel = 20;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_888: /* 1p, 3B, 24bits */
+ bits_per_pixel = 24;
+ break;
+
+ case ATOMISP_INPUT_FORMAT_YUV420_16: /* Not supported */
+ case ATOMISP_INPUT_FORMAT_YUV422_16: /* Not supported */
+ case ATOMISP_INPUT_FORMAT_RAW_16: /* TODO: not specified in MIPI SPEC, check */
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
+
+ /* Even lines for YUV420 formats are double in bits_per_pixel. */
+ if (format == ATOMISP_INPUT_FORMAT_YUV420_8
+ || format == ATOMISP_INPUT_FORMAT_YUV420_10
+ || format == ATOMISP_INPUT_FORMAT_YUV420_16)
+ {
+ even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >>
+ 3; /* ceil ( bits per line / 8) */
+ } else
+ {
+ even_line_bytes = odd_line_bytes;
+ }
+
+ /* a frame represented in memory: ()- optional; data - payload words.
+ * addr 0 1 2 3 4 5 6 7:
+ * first SOF (SOL) PACK_H data data data data data
+ * data data data data data data data data
+ * ...
+ * data data 0 0 0 0 0 0
+ * second (EOL) (SOL) PACK_H data data data data data
+ * data data data data data data data data
+ * ...
+ * data data 0 0 0 0 0 0
+ * ...
+ * last (EOL) EOF 0 0 0 0 0 0
+ *
+ * Embedded lines are regular lines stored before the first and after
+ * payload lines.
+ */
+
+ words_per_odd_line = (odd_line_bytes + 3) >> 2;
+ /* ceil(odd_line_bytes/4); word = 4 bytes */
+ words_per_even_line = (even_line_bytes + 3) >> 2;
+ words_for_first_line = words_per_odd_line + 2 + (hasSOLandEOL ? 1 : 0);
+ /* + SOF +packet header + optionally (SOL), but (EOL) is not in the first line */
+ words_per_odd_line += (1 + (hasSOLandEOL ? 2 : 0));
+ /* each non-first line has format header, and optionally (SOL) and (EOL). */
+ words_per_even_line += (1 + (hasSOLandEOL ? 2 : 0));
+
+ mem_words_per_odd_line = (words_per_odd_line + 7) >> 3;
+ /* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */
+ mem_words_for_first_line = (words_for_first_line + 7) >> 3;
+ mem_words_per_even_line = (words_per_even_line + 7) >> 3;
+ mem_words_for_EOF = 1; /* last line consisit of the optional (EOL) and EOF */
+
+ mem_words = ((embedded_data_size_words + 7) >> 3) +
+ mem_words_for_first_line +
+ (((height + 1) >> 1) - 1) * mem_words_per_odd_line +
+ /* ceil (height/2) - 1 (first line is calculated separatelly) */
+ (height >> 1) * mem_words_per_even_line + /* floor(height/2) */
+ mem_words_for_EOF;
+
+ *size_mem_words = mem_words; /* ceil(words/8); mem word is 32B = 8words. */
+ /* Check if the above is still needed. */
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+enum ia_css_err
+ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
+ const unsigned int size_mem_words) {
+ u32 idx;
+
+ enum ia_css_err err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+
+ OP___assert(port < N_CSI_PORTS);
+ OP___assert(size_mem_words != 0);
+
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT &&
+ my_css.mipi_sizes_for_check[port][idx] != 0;
+ idx++) /* do nothing */
+ {
+ }
+ if (idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT)
+ {
+ my_css.mipi_sizes_for_check[port][idx] = size_mem_words;
+ err = IA_CSS_SUCCESS;
+ }
+
+ return err;
+}
+#endif
+
+void
+mipi_init(void)
+{
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ unsigned int i;
+
+ for (i = 0; i < N_CSI_PORTS; i++)
+ ref_count_mipi_allocation[i] = 0;
+#endif
+}
+
+enum ia_css_err
+calculate_mipi_buff_size(
+ struct ia_css_stream_config *stream_cfg,
+ unsigned int *size_mem_words) {
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ (void)stream_cfg;
+ (void)size_mem_words;
+#else
+ unsigned int width;
+ unsigned int height;
+ enum atomisp_input_format format;
+ bool pack_raw_pixels;
+
+ unsigned int width_padded;
+ unsigned int bits_per_pixel = 0;
+
+ unsigned int even_line_bytes = 0;
+ unsigned int odd_line_bytes = 0;
+
+ unsigned int words_per_odd_line = 0;
+ unsigned int words_per_even_line = 0;
+
+ unsigned int mem_words_per_even_line = 0;
+ unsigned int mem_words_per_odd_line = 0;
+
+ unsigned int mem_words_per_buff_line = 0;
+ unsigned int mem_words_per_buff = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ /**
+ * zhengjie.lu@intel.com
+ *
+ * NOTE
+ * - In the struct "ia_css_stream_config", there
+ * are two members: "input_config" and "isys_config".
+ * Both of them provide the same information, e.g.
+ * input_res and format.
+ *
+ * Question here is that: which one shall be used?
+ */
+ width = stream_cfg->input_config.input_res.width;
+ height = stream_cfg->input_config.input_res.height;
+ format = stream_cfg->input_config.format;
+ pack_raw_pixels = stream_cfg->pack_raw_pixels;
+ /* end of NOTE */
+
+ /**
+ * zhengjie.lu@intel.com
+ *
+ * NOTE
+ * - The following code is derived from the
+ * existing code "ia_css_mipi_frame_calculate_size()".
+ *
+ * Question here is: why adding "2 * ISP_VEC_NELEMS"
+ * to "width_padded", but not making "width_padded"
+ * aligned with "2 * ISP_VEC_NELEMS"?
+ */
+ /* The changes will be reverted as soon as RAW
+ * Buffers are deployed by the 2401 Input System
+ * in the non-continuous use scenario.
+ */
+ width_padded = width + (2 * ISP_VEC_NELEMS);
+ /* end of NOTE */
+
+ IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n",
+ width_padded, height, format);
+
+ bits_per_pixel = sh_css_stream_format_2_bits_per_subpixel(format);
+ bits_per_pixel =
+ (format == ATOMISP_INPUT_FORMAT_RAW_10 && pack_raw_pixels) ? bits_per_pixel : 16;
+ if (bits_per_pixel == 0)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
+
+ /* Even lines for YUV420 formats are double in bits_per_pixel. */
+ if (format == ATOMISP_INPUT_FORMAT_YUV420_8
+ || format == ATOMISP_INPUT_FORMAT_YUV420_10)
+ {
+ even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >>
+ 3; /* ceil ( bits per line / 8) */
+ } else
+ {
+ even_line_bytes = odd_line_bytes;
+ }
+
+ words_per_odd_line = (odd_line_bytes + 3) >> 2;
+ /* ceil(odd_line_bytes/4); word = 4 bytes */
+ words_per_even_line = (even_line_bytes + 3) >> 2;
+
+ mem_words_per_odd_line = (words_per_odd_line + 7) >> 3;
+ /* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */
+ mem_words_per_even_line = (words_per_even_line + 7) >> 3;
+
+ mem_words_per_buff_line =
+ (mem_words_per_odd_line > mem_words_per_even_line) ? mem_words_per_odd_line : mem_words_per_even_line;
+ mem_words_per_buff = mem_words_per_buff_line * height;
+
+ *size_mem_words = mem_words_per_buff;
+
+ IA_CSS_LEAVE_ERR(err);
+#endif
+ return err;
+}
+
+static bool buffers_needed(struct ia_css_pipe *pipe)
+{
+ if (!atomisp_hw_is_isp2401) {
+ if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ return false;
+ else
+ return true;
+ }
+
+ if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR ||
+ pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
+ pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS)
+ return false;
+
+ return true;
+}
+
+enum ia_css_err
+allocate_mipi_frames(struct ia_css_pipe *pipe,
+ struct ia_css_stream_info *info) {
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ unsigned int port;
+ struct ia_css_frame_info mipi_intermediate_info;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) enter:\n", pipe);
+
+ assert(pipe);
+ assert(pipe->stream);
+ if ((!pipe) || (!pipe->stream))
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: pipe or stream is null.\n",
+ pipe);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (pipe->stream->config.online)
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: no buffers needed for 2401 pipe mode.\n",
+ pipe);
+ return IA_CSS_SUCCESS;
+ }
+
+#endif
+
+ if (!buffers_needed(pipe)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: no buffers needed for pipe mode.\n",
+ pipe);
+ return IA_CSS_SUCCESS; /* AM TODO: Check */
+ }
+
+ if (!atomisp_hw_is_isp2401)
+ port = (unsigned int)pipe->stream->config.source.port.port;
+ else
+ err = ia_css_mipi_is_source_port_valid(pipe, &port);
+
+ assert(port < N_CSI_PORTS);
+
+ if (port >= N_CSI_PORTS || err) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: error: port is not correct (port=%d).\n",
+ pipe, port);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ err = calculate_mipi_buff_size(
+ &pipe->stream->config,
+ &my_css.mipi_frame_size[port]);
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ if (ref_count_mipi_allocation[port] != 0)
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: already allocated for this port (port=%d).\n",
+ pipe, port);
+ return IA_CSS_SUCCESS;
+ }
+#else
+ /* 2401 system allows multiple streams to use same physical port. This is not
+ * true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
+ * TODO AM: Once that is changed (removed) this code should be removed as well.
+ * In that case only 2400 related code should remain.
+ */
+ if (ref_count_mipi_allocation[port] != 0)
+ {
+ ref_count_mipi_allocation[port]++;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) leave: nothing to do, already allocated for this port (port=%d).\n",
+ pipe, port);
+ return IA_CSS_SUCCESS;
+ }
+#endif
+
+ ref_count_mipi_allocation[port]++;
+
+ /* TODO: Cleaning needed. */
+ /* This code needs to modified to allocate the MIPI frames in the correct normal way
+ with an allocate from info, by justin */
+ mipi_intermediate_info = pipe->pipe_settings.video.video_binary.internal_frame_info;
+ mipi_intermediate_info.res.width = 0;
+ mipi_intermediate_info.res.height = 0;
+ /* To indicate it is not (yet) valid format. */
+ mipi_intermediate_info.format = IA_CSS_FRAME_FORMAT_NUM;
+ mipi_intermediate_info.padded_width = 0;
+ mipi_intermediate_info.raw_bit_depth = 0;
+
+ /* AM TODO: mipi frames number should come from stream struct. */
+ my_css.num_mipi_frames[port] = NUM_MIPI_FRAMES_PER_STREAM;
+
+ /* Incremental allocation (per stream), not for all streams at once. */
+ { /* limit the scope of i,j */
+ unsigned int i, j;
+
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++)
+ {
+ /* free previous frame */
+ if (my_css.mipi_frames[port][i]) {
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+ my_css.mipi_frames[port][i] = NULL;
+ }
+ /* check if new frame is needed */
+ if (i < my_css.num_mipi_frames[port]) {
+ /* allocate new frame */
+ err = ia_css_frame_allocate_with_buffer_size(
+ &my_css.mipi_frames[port][i],
+ my_css.mipi_frame_size[port] * HIVE_ISP_DDR_WORD_BYTES,
+ false);
+ if (err != IA_CSS_SUCCESS) {
+ for (j = 0; j < i; j++) {
+ if (my_css.mipi_frames[port][j]) {
+ ia_css_frame_free(my_css.mipi_frames[port][j]);
+ my_css.mipi_frames[port][j] = NULL;
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p, %d) exit: error: allocation failed.\n",
+ pipe, port);
+ return err;
+ }
+ }
+ if (info->metadata_info.size > 0) {
+ /* free previous metadata buffer */
+ if (my_css.mipi_metadata[port][i]) {
+ ia_css_metadata_free(my_css.mipi_metadata[port][i]);
+ my_css.mipi_metadata[port][i] = NULL;
+ }
+ /* check if need to allocate a new metadata buffer */
+ if (i < my_css.num_mipi_frames[port]) {
+ /* allocate new metadata buffer */
+ my_css.mipi_metadata[port][i] = ia_css_metadata_allocate(&info->metadata_info);
+ if (!my_css.mipi_metadata[port][i]) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_metadata(%p, %d) failed.\n",
+ pipe, port);
+ return err;
+ }
+ }
+ }
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit:\n", pipe);
+
+ return err;
+#else
+ (void)pipe;
+ (void)info;
+ return IA_CSS_SUCCESS;
+#endif
+}
+
+enum ia_css_err
+free_mipi_frames(struct ia_css_pipe *pipe) {
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ unsigned int port;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) enter:\n", pipe);
+
+ /* assert(pipe != NULL); TEMP: TODO: Should be assert only. */
+ if (pipe)
+ {
+ assert(pipe->stream);
+ if ((!pipe) || (!pipe->stream)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) exit: error: pipe or stream is null.\n",
+ pipe);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (!buffers_needed(pipe)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) exit: error: wrong mode.\n",
+ pipe);
+ return err;
+ }
+
+ if (!atomisp_hw_is_isp2401)
+ port = (unsigned int)pipe->stream->config.source.port.port;
+ else
+ err = ia_css_mipi_is_source_port_valid(pipe, &port);
+
+ assert(port < N_CSI_PORTS);
+
+ if (port >= N_CSI_PORTS || err) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p, %d) exit: error: pipe port is not correct.\n",
+ pipe, port);
+ return err;
+ }
+
+ if (ref_count_mipi_allocation[port] > 0) {
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ assert(ref_count_mipi_allocation[port] == 1);
+ if (ref_count_mipi_allocation[port] != 1) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) exit: error: wrong ref_count (ref_count=%d).\n",
+ pipe, ref_count_mipi_allocation[port]);
+ return err;
+ }
+#endif
+
+ ref_count_mipi_allocation[port]--;
+
+ if (ref_count_mipi_allocation[port] == 0) {
+ /* no streams are using this buffer, so free it */
+ unsigned int i;
+
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ if (my_css.mipi_frames[port][i]) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(port=%d, num=%d).\n", port, i);
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+ my_css.mipi_frames[port][i] = NULL;
+ }
+ if (my_css.mipi_metadata[port][i]) {
+ ia_css_metadata_free(my_css.mipi_metadata[port][i]);
+ my_css.mipi_metadata[port][i] = NULL;
+ }
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) exit (deallocated).\n", pipe);
+ }
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ else {
+ /* 2401 system allows multiple streams to use same physical port. This is not
+ * true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
+ * TODO AM: Once that is changed (removed) this code should be removed as well.
+ * In that case only 2400 related code should remain.
+ */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) leave: nothing to do, other streams still use this port (port=%d).\n",
+ pipe, port);
+ }
+#endif
+ }
+ } else /* pipe ==NULL */
+ {
+ /* AM TEMP: free-ing all mipi buffers just like a legacy code. */
+ for (port = CSI_PORT0_ID; port < N_CSI_PORTS; port++) {
+ unsigned int i;
+
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ if (my_css.mipi_frames[port][i]) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(port=%d, num=%d).\n", port, i);
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+ my_css.mipi_frames[port][i] = NULL;
+ }
+ if (my_css.mipi_metadata[port][i]) {
+ ia_css_metadata_free(my_css.mipi_metadata[port][i]);
+ my_css.mipi_metadata[port][i] = NULL;
+ }
+ }
+ ref_count_mipi_allocation[port] = 0;
+ }
+ }
+#else
+ (void)pipe;
+#endif
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+send_mipi_frames(struct ia_css_pipe *pipe) {
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ unsigned int i;
+#ifndef ISP2401
+ unsigned int port;
+#else
+ unsigned int port = 0;
+#endif
+
+ IA_CSS_ENTER_PRIVATE("pipe=%p", pipe);
+
+ assert(pipe);
+ assert(pipe->stream);
+ if (!pipe || !pipe->stream)
+ {
+ IA_CSS_ERROR("pipe or stream is null");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* multi stream video needs mipi buffers */
+ /* nothing to be done in other cases. */
+ if (!buffers_needed(pipe)) {
+ IA_CSS_LOG("nothing to be done for this mode");
+ return IA_CSS_SUCCESS;
+ /* TODO: AM: maybe this should be returning an error. */
+ }
+
+ if (!atomisp_hw_is_isp2401)
+ port = (unsigned int)pipe->stream->config.source.port.port;
+ else
+ err = ia_css_mipi_is_source_port_valid(pipe, &port);
+
+ assert(port < N_CSI_PORTS);
+
+ if (port >= N_CSI_PORTS || err) {
+ IA_CSS_ERROR("send_mipi_frames(%p) exit: invalid port specified (port=%d).\n",
+ pipe, port);
+ return err;
+ }
+
+ /* Hand-over the SP-internal mipi buffers */
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++)
+ {
+ /* Need to include the ofset for port. */
+ sh_css_update_host2sp_mipi_frame(port * NUM_MIPI_FRAMES_PER_STREAM + i,
+ my_css.mipi_frames[port][i]);
+ sh_css_update_host2sp_mipi_metadata(port * NUM_MIPI_FRAMES_PER_STREAM + i,
+ my_css.mipi_metadata[port][i]);
+ }
+ sh_css_update_host2sp_num_mipi_frames(my_css.num_mipi_frames[port]);
+
+ /**********************************
+ * Send an event to inform the SP
+ * that all MIPI frames are passed.
+ **********************************/
+ if (!sh_css_sp_is_running())
+ {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_ERROR("sp is not running");
+ return err;
+ }
+
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY,
+ (uint8_t)port,
+ (uint8_t)my_css.num_mipi_frames[port],
+ 0 /* not used */);
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+#else
+ (void)pipe;
+#endif
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.h b/drivers/staging/media/atomisp/pci/sh_css_mipi.h
new file mode 100644
index 000000000000..ab38589d6457
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_MIPI_H
+#define __SH_CSS_MIPI_H
+
+#include <ia_css_err.h> /* ia_css_err */
+#include <ia_css_types.h> /* ia_css_pipe */
+#include <ia_css_stream_public.h> /* ia_css_stream_config */
+
+void
+mipi_init(void);
+
+enum ia_css_err
+allocate_mipi_frames(struct ia_css_pipe *pipe, struct ia_css_stream_info *info);
+
+enum ia_css_err
+free_mipi_frames(struct ia_css_pipe *pipe);
+
+enum ia_css_err
+send_mipi_frames(struct ia_css_pipe *pipe);
+
+/**
+ * @brief Calculate the required MIPI buffer sizes.
+ * Based on the stream configuration, calculate the
+ * required MIPI buffer sizes (in DDR words).
+ *
+ * @param[in] stream_cfg Point to the target stream configuration
+ * @param[out] size_mem_words MIPI buffer size in DDR words.
+ *
+ * @return
+ */
+enum ia_css_err
+calculate_mipi_buff_size(
+ struct ia_css_stream_config *stream_cfg,
+ unsigned int *size_mem_words);
+
+#endif /* __SH_CSS_MIPI_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mmu.c b/drivers/staging/media/atomisp/pci/sh_css_mmu.c
new file mode 100644
index 000000000000..179b6f40be49
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_mmu.c
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_mmu.h"
+#include "ia_css_mmu_private.h"
+#include <ia_css_debug.h>
+#include "sh_css_sp.h"
+#include "sh_css_firmware.h"
+#include "sp.h"
+#include "mmu_device.h"
+
+void
+ia_css_mmu_invalidate_cache(void)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_mmu_invalidate_cache() enter\n");
+
+ /* if the SP is not running we should not access its dmem */
+ if (sh_css_sp_is_running()) {
+ HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb;
+
+ (void)HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb; /* Suppres warnings in CRUN */
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
+ true);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_mmu_invalidate_cache() leave\n");
+}
+
+/* Deprecated, this is an HRT backend function (memory_access.h) */
+void
+sh_css_mmu_set_page_table_base_index(hrt_data base_index)
+{
+ int i;
+
+ IA_CSS_ENTER_PRIVATE("base_index=0x%08x\n", base_index);
+ for (i = 0; i < N_MMU_ID; i++) {
+ mmu_ID_t mmu_id = i;
+
+ mmu_set_page_table_base_index(mmu_id, base_index);
+ mmu_invalidate_cache(mmu_id);
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_morph.c b/drivers/staging/media/atomisp/pci/sh_css_morph.c
new file mode 100644
index 000000000000..1f4fa25b1e79
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_morph.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_morph.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c
new file mode 100644
index 000000000000..52e29161cb35
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c
@@ -0,0 +1,286 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "sh_css_param_dvs.h"
+#include <assert_support.h>
+#include <type_support.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+#include "ia_css_debug.h"
+#include "memory_access.h"
+
+static struct ia_css_dvs_6axis_config *
+alloc_dvs_6axis_table(const struct ia_css_resolution *frame_res,
+ struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ unsigned int width_y = 0;
+ unsigned int height_y = 0;
+ unsigned int width_uv = 0;
+ unsigned int height_uv = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_dvs_6axis_config *dvs_config = NULL;
+
+ dvs_config = (struct ia_css_dvs_6axis_config *)sh_css_malloc(sizeof(
+ struct ia_css_dvs_6axis_config));
+ if (!dvs_config) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ } else {
+ /*Initialize new struct with latest config settings*/
+ if (dvs_config_src) {
+ dvs_config->width_y = width_y = dvs_config_src->width_y;
+ dvs_config->height_y = height_y = dvs_config_src->height_y;
+ dvs_config->width_uv = width_uv = dvs_config_src->width_uv;
+ dvs_config->height_uv = height_uv = dvs_config_src->height_uv;
+ IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y);
+ } else if (frame_res) {
+ dvs_config->width_y = width_y = DVS_TABLE_IN_BLOCKDIM_X_LUMA(frame_res->width);
+ dvs_config->height_y = height_y = DVS_TABLE_IN_BLOCKDIM_Y_LUMA(
+ frame_res->height);
+ dvs_config->width_uv = width_uv = DVS_TABLE_IN_BLOCKDIM_X_CHROMA(
+ frame_res->width /
+ 2); /* UV = Y/2, depens on colour format YUV 4.2.0*/
+ dvs_config->height_uv = height_uv = DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(
+ frame_res->height /
+ 2);/* UV = Y/2, depens on colour format YUV 4.2.0*/
+ IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y);
+ }
+
+ /* Generate Y buffers */
+ dvs_config->xcoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(
+ uint32_t));
+ if (!dvs_config->xcoords_y) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto exit;
+ }
+
+ dvs_config->ycoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(
+ uint32_t));
+ if (!dvs_config->ycoords_y) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto exit;
+ }
+
+ /* Generate UV buffers */
+ IA_CSS_LOG("UV W %d H %d", width_uv, height_uv);
+
+ dvs_config->xcoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv *
+ sizeof(uint32_t));
+ if (!dvs_config->xcoords_uv) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto exit;
+ }
+
+ dvs_config->ycoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv *
+ sizeof(uint32_t));
+ if (!dvs_config->ycoords_uv) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+exit:
+ if (err != IA_CSS_SUCCESS) {
+ free_dvs_6axis_table(
+ &dvs_config); /* we might have allocated some memory, release this */
+ dvs_config = NULL;
+ }
+ }
+
+ IA_CSS_LEAVE("dvs_config=%p", dvs_config);
+ return dvs_config;
+}
+
+static void
+init_dvs_6axis_table_from_default(struct ia_css_dvs_6axis_config *dvs_config,
+ const struct ia_css_resolution *dvs_offset)
+{
+ unsigned int x, y;
+ unsigned int width_y = dvs_config->width_y;
+ unsigned int height_y = dvs_config->height_y;
+ unsigned int width_uv = dvs_config->width_uv;
+ unsigned int height_uv = dvs_config->height_uv;
+
+ IA_CSS_LOG("Env_X=%d, Env_Y=%d, width_y=%d, height_y=%d",
+ dvs_offset->width, dvs_offset->height, width_y, height_y);
+ for (y = 0; y < height_y; y++) {
+ for (x = 0; x < width_y; x++) {
+ dvs_config->xcoords_y[y * width_y + x] = (dvs_offset->width + x *
+ DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+ for (y = 0; y < height_y; y++) {
+ for (x = 0; x < width_y; x++) {
+ dvs_config->ycoords_y[y * width_y + x] = (dvs_offset->height + y *
+ DVS_BLOCKDIM_Y_LUMA) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+ for (y = 0; y < height_uv; y++) {
+ for (x = 0; x < width_uv;
+ x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */
+ dvs_config->xcoords_uv[y * width_uv + x] = ((dvs_offset->width / 2) + x *
+ DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+ for (y = 0; y < height_uv; y++) {
+ for (x = 0; x < width_uv;
+ x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */
+ dvs_config->ycoords_uv[y * width_uv + x] = ((dvs_offset->height / 2) + y *
+ DVS_BLOCKDIM_Y_CHROMA) <<
+ DVS_COORD_FRAC_BITS;
+ }
+ }
+}
+
+static void
+init_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config,
+ struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ unsigned int width_y = dvs_config->width_y;
+ unsigned int height_y = dvs_config->height_y;
+ unsigned int width_uv = dvs_config->width_uv;
+ unsigned int height_uv = dvs_config->height_uv;
+
+ memcpy(dvs_config->xcoords_y, dvs_config_src->xcoords_y,
+ (width_y * height_y * sizeof(uint32_t)));
+ memcpy(dvs_config->ycoords_y, dvs_config_src->ycoords_y,
+ (width_y * height_y * sizeof(uint32_t)));
+ memcpy(dvs_config->xcoords_uv, dvs_config_src->xcoords_uv,
+ (width_uv * height_uv * sizeof(uint32_t)));
+ memcpy(dvs_config->ycoords_uv, dvs_config_src->ycoords_uv,
+ (width_uv * height_uv * sizeof(uint32_t)));
+}
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table(const struct ia_css_resolution *frame_res,
+ const struct ia_css_resolution *dvs_offset)
+{
+ struct ia_css_dvs_6axis_config *dvs_6axis_table;
+
+ assert(frame_res);
+ assert(dvs_offset);
+
+ dvs_6axis_table = alloc_dvs_6axis_table(frame_res, NULL);
+ if (dvs_6axis_table) {
+ init_dvs_6axis_table_from_default(dvs_6axis_table, dvs_offset);
+ return dvs_6axis_table;
+ }
+ return NULL;
+}
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config
+ *dvs_config_src)
+{
+ struct ia_css_dvs_6axis_config *dvs_6axis_table;
+
+ assert(dvs_config_src);
+
+ dvs_6axis_table = alloc_dvs_6axis_table(NULL, dvs_config_src);
+ if (dvs_6axis_table) {
+ init_dvs_6axis_table_from_config(dvs_6axis_table, dvs_config_src);
+ return dvs_6axis_table;
+ }
+ return NULL;
+}
+
+void
+free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config)
+{
+ assert(dvs_6axis_config);
+ assert(*dvs_6axis_config);
+
+ if ((dvs_6axis_config) && (*dvs_6axis_config)) {
+ IA_CSS_ENTER_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config));
+ if ((*dvs_6axis_config)->xcoords_y) {
+ sh_css_free((*dvs_6axis_config)->xcoords_y);
+ (*dvs_6axis_config)->xcoords_y = NULL;
+ }
+
+ if ((*dvs_6axis_config)->ycoords_y) {
+ sh_css_free((*dvs_6axis_config)->ycoords_y);
+ (*dvs_6axis_config)->ycoords_y = NULL;
+ }
+
+ /* Free up UV buffers */
+ if ((*dvs_6axis_config)->xcoords_uv) {
+ sh_css_free((*dvs_6axis_config)->xcoords_uv);
+ (*dvs_6axis_config)->xcoords_uv = NULL;
+ }
+
+ if ((*dvs_6axis_config)->ycoords_uv) {
+ sh_css_free((*dvs_6axis_config)->ycoords_uv);
+ (*dvs_6axis_config)->ycoords_uv = NULL;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config));
+ sh_css_free(*dvs_6axis_config);
+ *dvs_6axis_config = NULL;
+ }
+}
+
+void copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst,
+ const struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ unsigned int width_y;
+ unsigned int height_y;
+ unsigned int width_uv;
+ unsigned int height_uv;
+
+ assert(dvs_config_src);
+ assert(dvs_config_dst);
+ assert(dvs_config_src->xcoords_y);
+ assert(dvs_config_src->xcoords_uv);
+ assert(dvs_config_src->ycoords_y);
+ assert(dvs_config_src->ycoords_uv);
+ assert(dvs_config_src->width_y == dvs_config_dst->width_y);
+ assert(dvs_config_src->width_uv == dvs_config_dst->width_uv);
+ assert(dvs_config_src->height_y == dvs_config_dst->height_y);
+ assert(dvs_config_src->height_uv == dvs_config_dst->height_uv);
+
+ width_y = dvs_config_src->width_y;
+ height_y = dvs_config_src->height_y;
+ width_uv =
+ dvs_config_src->width_uv; /* = Y/2, depens on colour format YUV 4.2.0*/
+ height_uv = dvs_config_src->height_uv;
+
+ memcpy(dvs_config_dst->xcoords_y, dvs_config_src->xcoords_y,
+ (width_y * height_y * sizeof(uint32_t)));
+ memcpy(dvs_config_dst->ycoords_y, dvs_config_src->ycoords_y,
+ (width_y * height_y * sizeof(uint32_t)));
+
+ memcpy(dvs_config_dst->xcoords_uv, dvs_config_src->xcoords_uv,
+ (width_uv * height_uv * sizeof(uint32_t)));
+ memcpy(dvs_config_dst->ycoords_uv, dvs_config_src->ycoords_uv,
+ (width_uv * height_uv * sizeof(uint32_t)));
+}
+
+void
+ia_css_dvs_statistics_get(enum dvs_statistics_type type,
+ union ia_css_dvs_statistics_host *host_stats,
+ const union ia_css_dvs_statistics_isp *isp_stats)
+{
+ if (type == DVS_STATISTICS) {
+ ia_css_get_dvs_statistics(host_stats->p_dvs_statistics_host,
+ isp_stats->p_dvs_statistics_isp);
+ } else if (type == DVS2_STATISTICS) {
+ ia_css_get_dvs2_statistics(host_stats->p_dvs2_statistics_host,
+ isp_stats->p_dvs_statistics_isp);
+ }
+ return;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
new file mode 100644
index 000000000000..b4cffbbdafde
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
@@ -0,0 +1,85 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_PARAMS_DVS_H_
+#define _SH_CSS_PARAMS_DVS_H_
+
+#include <math_support.h>
+#include <ia_css_types.h>
+#include <sh_css_dvs_info.h>
+#include "gdc_global.h" /* gdc_warp_param_mem_t */
+
+#define DVS_ENV_MIN_X (12)
+#define DVS_ENV_MIN_Y (12)
+
+#define DVS_BLOCKDIM_X (64) /* X block height*/
+#define DVS_BLOCKDIM_Y_LUMA (64) /* Y block height*/
+#define DVS_BLOCKDIM_Y_CHROMA (32) /* UV height block size is half the Y block height*/
+
+/* ISP2400 */
+/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */
+#define DVS_NUM_BLOCKS_X(X) (CEIL_MUL(CEIL_DIV((X), DVS_BLOCKDIM_X), 2))
+
+/* ISP2400 */
+/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */
+#define DVS_NUM_BLOCKS_Y(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_LUMA))
+#define DVS_NUM_BLOCKS_X_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_X))
+#define DVS_NUM_BLOCKS_Y_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_CHROMA))
+
+#define DVS_TABLE_IN_BLOCKDIM_X_LUMA(X) (DVS_NUM_BLOCKS_X(X) + 1) /* N blocks have N + 1 set of coords */
+#define DVS_TABLE_IN_BLOCKDIM_X_CHROMA(X) (DVS_NUM_BLOCKS_X_CHROMA(X) + 1)
+#define DVS_TABLE_IN_BLOCKDIM_Y_LUMA(X) (DVS_NUM_BLOCKS_Y(X) + 1)
+#define DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(X) (DVS_NUM_BLOCKS_Y_CHROMA(X) + 1)
+
+#define DVS_ENVELOPE_X(X) (((X) == 0) ? (DVS_ENV_MIN_X) : (X))
+#define DVS_ENVELOPE_Y(X) (((X) == 0) ? (DVS_ENV_MIN_Y) : (X))
+
+#define DVS_COORD_FRAC_BITS (10)
+
+/* ISP2400 */
+#define DVS_INPUT_BYTES_PER_PIXEL (1)
+
+#define XMEM_ALIGN_LOG2 (5)
+
+#define DVS_6AXIS_COORDS_ELEMS CEIL_MUL(sizeof(gdc_warp_param_mem_t) \
+ , HIVE_ISP_DDR_WORD_BYTES)
+
+/* currently we only support two output with the same resolution, output 0 is th default one. */
+#define DVS_6AXIS_BYTES(binary) \
+ (DVS_6AXIS_COORDS_ELEMS \
+ * DVS_NUM_BLOCKS_X((binary)->out_frame_info[0].res.width) \
+ * DVS_NUM_BLOCKS_Y((binary)->out_frame_info[0].res.height))
+
+/*
+ * ISP2400:
+ * Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently.
+ * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */
+#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table(const struct ia_css_resolution *frame_res,
+ const struct ia_css_resolution *dvs_offset);
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config
+ *dvs_config_src);
+
+void
+free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config);
+
+void
+copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst,
+ const struct ia_css_dvs_6axis_config *dvs_config_src);
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_shading.c b/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
new file mode 100644
index 000000000000..4b648df2d073
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
@@ -0,0 +1,402 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+
+#include <math_support.h>
+#include "sh_css_param_shading.h"
+#include "ia_css_shading.h"
+#include "assert_support.h"
+#include "sh_css_defs.h"
+#include "sh_css_internal.h"
+#include "ia_css_debug.h"
+#include "ia_css_pipe_binarydesc.h"
+
+#include "sh_css_hrt.h"
+
+#include "platform_support.h"
+
+/* Bilinear interpolation on shading tables:
+ * For each target point T, we calculate the 4 surrounding source points:
+ * ul (upper left), ur (upper right), ll (lower left) and lr (lower right).
+ * We then calculate the distances from the T to the source points: x0, x1,
+ * y0 and y1.
+ * We then calculate the value of T:
+ * dx0*dy0*Slr + dx0*dy1*Sur + dx1*dy0*Sll + dx1*dy1*Sul.
+ * We choose a grid size of 1x1 which means:
+ * dx1 = 1-dx0
+ * dy1 = 1-dy0
+ *
+ * Sul dx0 dx1 Sur
+ * .<----->|<------------->.
+ * ^
+ * dy0|
+ * v T
+ * - .
+ * ^
+ * |
+ * dy1|
+ * v
+ * . .
+ * Sll Slr
+ *
+ * Padding:
+ * The area that the ISP operates on can include padding both on the left
+ * and the right. We need to padd the shading table such that the shading
+ * values end up on the correct pixel values. This means we must padd the
+ * shading table to match the ISP padding.
+ * We can have 5 cases:
+ * 1. All 4 points fall in the left padding.
+ * 2. The left 2 points fall in the left padding.
+ * 3. All 4 points fall in the cropped (target) region.
+ * 4. The right 2 points fall in the right padding.
+ * 5. All 4 points fall in the right padding.
+ * Cases 1 and 5 are easy to handle: we simply use the
+ * value 1 in the shading table.
+ * Cases 2 and 4 require interpolation that takes into
+ * account how far into the padding area the pixels
+ * fall. We extrapolate the shading table into the
+ * padded area and then interpolate.
+ */
+static void
+crop_and_interpolate(unsigned int cropped_width,
+ unsigned int cropped_height,
+ unsigned int left_padding,
+ int right_padding,
+ int top_padding,
+ const struct ia_css_shading_table *in_table,
+ struct ia_css_shading_table *out_table,
+ enum ia_css_sc_color color)
+{
+ unsigned int i, j,
+ sensor_width,
+ sensor_height,
+ table_width,
+ table_height,
+ table_cell_h,
+ out_cell_size,
+ in_cell_size,
+ out_start_row,
+ padded_width;
+ int out_start_col, /* can be negative to indicate padded space */
+ table_cell_w;
+ unsigned short *in_ptr,
+ *out_ptr;
+
+ assert(in_table);
+ assert(out_table);
+
+ sensor_width = in_table->sensor_width;
+ sensor_height = in_table->sensor_height;
+ table_width = in_table->width;
+ table_height = in_table->height;
+ in_ptr = in_table->data[color];
+ out_ptr = out_table->data[color];
+
+ padded_width = cropped_width + left_padding + right_padding;
+ out_cell_size = CEIL_DIV(padded_width, out_table->width - 1);
+ in_cell_size = CEIL_DIV(sensor_width, table_width - 1);
+
+ out_start_col = ((int)sensor_width - (int)cropped_width) / 2 - left_padding;
+ out_start_row = ((int)sensor_height - (int)cropped_height) / 2 - top_padding;
+ table_cell_w = (int)((table_width - 1) * in_cell_size);
+ table_cell_h = (table_height - 1) * in_cell_size;
+
+ for (i = 0; i < out_table->height; i++) {
+ int ty, src_y0, src_y1;
+ unsigned int sy0, sy1, dy0, dy1, divy;
+
+ /* calculate target point and make sure it falls within
+ the table */
+ ty = out_start_row + i * out_cell_size;
+
+ /* calculate closest source points in shading table and
+ make sure they fall within the table */
+ src_y0 = ty / (int)in_cell_size;
+ if (in_cell_size < out_cell_size)
+ src_y1 = (ty + out_cell_size) / in_cell_size;
+ else
+ src_y1 = src_y0 + 1;
+ src_y0 = clamp(src_y0, 0, (int)table_height - 1);
+ src_y1 = clamp(src_y1, 0, (int)table_height - 1);
+ ty = min(clamp(ty, 0, (int)sensor_height - 1),
+ (int)table_cell_h);
+
+ /* calculate closest source points for distance computation */
+ sy0 = min(src_y0 * in_cell_size, sensor_height - 1);
+ sy1 = min(src_y1 * in_cell_size, sensor_height - 1);
+ /* calculate distance between source and target pixels */
+ dy0 = ty - sy0;
+ dy1 = sy1 - ty;
+ divy = sy1 - sy0;
+ if (divy == 0) {
+ dy0 = 1;
+ divy = 1;
+ }
+
+ for (j = 0; j < out_table->width; j++, out_ptr++) {
+ int tx, src_x0, src_x1;
+ unsigned int sx0, sx1, dx0, dx1, divx;
+ unsigned short s_ul, s_ur, s_ll, s_lr;
+
+ /* calculate target point */
+ tx = out_start_col + j * out_cell_size;
+ /* calculate closest source points. */
+ src_x0 = tx / (int)in_cell_size;
+ if (in_cell_size < out_cell_size) {
+ src_x1 = (tx + out_cell_size) /
+ (int)in_cell_size;
+ } else {
+ src_x1 = src_x0 + 1;
+ }
+ /* if src points fall in padding, select closest ones.*/
+ src_x0 = clamp(src_x0, 0, (int)table_width - 1);
+ src_x1 = clamp(src_x1, 0, (int)table_width - 1);
+ tx = min(clamp(tx, 0, (int)sensor_width - 1),
+ (int)table_cell_w);
+ /* calculate closest source points for distance
+ computation */
+ sx0 = min(src_x0 * in_cell_size, sensor_width - 1);
+ sx1 = min(src_x1 * in_cell_size, sensor_width - 1);
+ /* calculate distances between source and target
+ pixels */
+ dx0 = tx - sx0;
+ dx1 = sx1 - tx;
+ divx = sx1 - sx0;
+ /* if we're at the edge, we just use the closest
+ point still in the grid. We make up for the divider
+ in this case by setting the distance to
+ out_cell_size, since it's actually 0. */
+ if (divx == 0) {
+ dx0 = 1;
+ divx = 1;
+ }
+
+ /* get source pixel values */
+ s_ul = in_ptr[(table_width * src_y0) + src_x0];
+ s_ur = in_ptr[(table_width * src_y0) + src_x1];
+ s_ll = in_ptr[(table_width * src_y1) + src_x0];
+ s_lr = in_ptr[(table_width * src_y1) + src_x1];
+
+ *out_ptr = (unsigned short)((dx0 * dy0 * s_lr + dx0 * dy1 * s_ur + dx1 * dy0 *
+ s_ll + dx1 * dy1 * s_ul) /
+ (divx * divy));
+ }
+ }
+}
+
+void
+sh_css_params_shading_id_table_generate(
+ struct ia_css_shading_table **target_table,
+ unsigned int table_width,
+ unsigned int table_height)
+{
+ /* initialize table with ones, shift becomes zero */
+ unsigned int i, j;
+ struct ia_css_shading_table *result;
+
+ assert(target_table);
+
+ result = ia_css_shading_table_alloc(table_width, table_height);
+ if (!result) {
+ *target_table = NULL;
+ return;
+ }
+
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ for (j = 0; j < table_height * table_width; j++)
+ result->data[i][j] = 1;
+ }
+ result->fraction_bits = 0;
+ *target_table = result;
+}
+
+void
+prepare_shading_table(const struct ia_css_shading_table *in_table,
+ unsigned int sensor_binning,
+ struct ia_css_shading_table **target_table,
+ const struct ia_css_binary *binary,
+ unsigned int bds_factor)
+{
+ unsigned int input_width,
+ input_height,
+ table_width,
+ table_height,
+ left_padding,
+ top_padding,
+ padded_width,
+ left_cropping,
+ i;
+ unsigned int bds_numerator, bds_denominator;
+ int right_padding;
+
+ struct ia_css_shading_table *result;
+
+ assert(target_table);
+ assert(binary);
+
+ if (!in_table) {
+ sh_css_params_shading_id_table_generate(target_table,
+ binary->sctbl_legacy_width_per_color,
+ binary->sctbl_legacy_height);
+ return;
+ }
+
+ padded_width = binary->in_frame_info.padded_width;
+ /* We use the ISP input resolution for the shading table because
+ shading correction is performed in the bayer domain (before bayer
+ down scaling). */
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ padded_width = CEIL_MUL(binary->effective_in_frame_res.width + 2 *
+ ISP_VEC_NELEMS,
+ 2 * ISP_VEC_NELEMS);
+#endif
+ input_height = binary->in_frame_info.res.height;
+ input_width = binary->in_frame_info.res.width;
+ left_padding = binary->left_padding;
+ left_cropping = (binary->info->sp.pipeline.left_cropping == 0) ?
+ binary->dvs_envelope.width : 2 * ISP_VEC_NELEMS;
+
+ sh_css_bds_factor_get_numerator_denominator
+ (bds_factor, &bds_numerator, &bds_denominator);
+
+ left_padding = (left_padding + binary->info->sp.pipeline.left_cropping) *
+ bds_numerator / bds_denominator -
+ binary->info->sp.pipeline.left_cropping;
+ right_padding = (binary->internal_frame_info.res.width -
+ binary->effective_in_frame_res.width * bds_denominator /
+ bds_numerator - left_cropping) * bds_numerator / bds_denominator;
+ top_padding = binary->info->sp.pipeline.top_cropping * bds_numerator /
+ bds_denominator -
+ binary->info->sp.pipeline.top_cropping;
+
+#if !defined(USE_WINDOWS_BINNING_FACTOR)
+ /* @deprecated{This part of the code will be replaced by the code
+ * in the #else section below to make the calculation same across
+ * all platforms.
+ * Android and Windows platforms interpret the binning_factor parameter
+ * differently. In Android, the binning factor is expressed in the form
+ * 2^N * 2^N, whereas in Windows platform, the binning factor is N*N}
+ */
+
+ /* We take into account the binning done by the sensor. We do this
+ by cropping the non-binned part of the shading table and then
+ increasing the size of a grid cell with this same binning factor. */
+ input_width <<= sensor_binning;
+ input_height <<= sensor_binning;
+ /* We also scale the padding by the same binning factor. This will
+ make it much easier later on to calculate the padding of the
+ shading table. */
+ left_padding <<= sensor_binning;
+ right_padding <<= sensor_binning;
+ top_padding <<= sensor_binning;
+#else
+ input_width *= sensor_binning;
+ input_height *= sensor_binning;
+ left_padding *= sensor_binning;
+ right_padding *= sensor_binning;
+ top_padding *= sensor_binning;
+#endif /*USE_WINDOWS_BINNING_FACTOR*/
+
+ /* during simulation, the used resolution can exceed the sensor
+ resolution, so we clip it. */
+ input_width = min(input_width, in_table->sensor_width);
+ input_height = min(input_height, in_table->sensor_height);
+
+ /* This prepare_shading_table() function is called only in legacy API (not in new API).
+ Then, the legacy shading table width and height should be used. */
+ table_width = binary->sctbl_legacy_width_per_color;
+ table_height = binary->sctbl_legacy_height;
+
+ result = ia_css_shading_table_alloc(table_width, table_height);
+ if (!result) {
+ *target_table = NULL;
+ return;
+ }
+ result->sensor_width = in_table->sensor_width;
+ result->sensor_height = in_table->sensor_height;
+ result->fraction_bits = in_table->fraction_bits;
+
+ /* now we crop the original shading table and then interpolate to the
+ requested resolution and decimation factor. */
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ crop_and_interpolate(input_width, input_height,
+ left_padding, right_padding, top_padding,
+ in_table,
+ result, i);
+ }
+ *target_table = result;
+}
+
+struct ia_css_shading_table *
+ia_css_shading_table_alloc(
+ unsigned int width,
+ unsigned int height)
+{
+ unsigned int i;
+ struct ia_css_shading_table *me;
+
+ IA_CSS_ENTER("");
+
+ me = kmalloc(sizeof(*me), GFP_KERNEL);
+ if (!me)
+ return me;
+
+ me->width = width;
+ me->height = height;
+ me->sensor_width = 0;
+ me->sensor_height = 0;
+ me->fraction_bits = 0;
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ me->data[i] =
+ sh_css_malloc(width * height * sizeof(*me->data[0]));
+ if (!me->data[i]) {
+ unsigned int j;
+
+ for (j = 0; j < i; j++) {
+ sh_css_free(me->data[j]);
+ me->data[j] = NULL;
+ }
+ kfree(me);
+ return NULL;
+ }
+ }
+
+ IA_CSS_LEAVE("");
+ return me;
+}
+
+void
+ia_css_shading_table_free(struct ia_css_shading_table *table)
+{
+ unsigned int i;
+
+ if (!table)
+ return;
+
+ /* We only output logging when the table is not NULL, otherwise
+ * logs will give the impression that a table was freed.
+ * */
+ IA_CSS_ENTER("");
+
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ if (table->data[i]) {
+ sh_css_free(table->data[i]);
+ table->data[i] = NULL;
+ }
+ }
+ kfree(table);
+
+ IA_CSS_LEAVE("");
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_shading.h b/drivers/staging/media/atomisp/pci/sh_css_param_shading.h
new file mode 100644
index 000000000000..6e480d31c201
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_shading.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_PARAMS_SHADING_H
+#define __SH_CSS_PARAMS_SHADING_H
+
+#include <ia_css_types.h>
+#include <ia_css_binary.h>
+
+void
+sh_css_params_shading_id_table_generate(
+ struct ia_css_shading_table **target_table,
+ unsigned int table_width,
+ unsigned int table_height);
+
+void
+prepare_shading_table(const struct ia_css_shading_table *in_table,
+ unsigned int sensor_binning,
+ struct ia_css_shading_table **target_table,
+ const struct ia_css_binary *binary,
+ unsigned int bds_factor);
+
+#endif /* __SH_CSS_PARAMS_SHADING_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
new file mode 100644
index 000000000000..2e719f7db89e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
@@ -0,0 +1,5247 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "gdc_device.h" /* gdc_lut_store(), ... */
+#include "isp.h" /* ISP_VEC_ELEMBITS */
+#include "vamem.h"
+#if !defined(HAS_NO_HMEM)
+#ifndef __INLINE_HMEM__
+#define __INLINE_HMEM__
+#endif
+#include "hmem.h"
+#endif /* !defined(HAS_NO_HMEM) */
+#define IA_CSS_INCLUDE_PARAMETERS
+#define IA_CSS_INCLUDE_ACC_PARAMETERS
+
+#include "sh_css_params.h"
+#include "ia_css_queue.h"
+#include "sw_event_global.h" /* Event IDs */
+
+#include "platform_support.h"
+#include "assert_support.h"
+#include "misc_support.h" /* NOT_USED */
+#include "math_support.h" /* max(), min() EVEN_FLOOR()*/
+
+#include "ia_css_stream.h"
+#include "sh_css_params_internal.h"
+#include "sh_css_param_shading.h"
+#include "sh_css_param_dvs.h"
+#include "ia_css_refcount.h"
+#include "sh_css_internal.h"
+#include "ia_css_control.h"
+#include "ia_css_shading.h"
+#include "sh_css_defs.h"
+#include "sh_css_sp.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_debug.h"
+#include "memory_access.h"
+#if 0 /* FIXME */
+#include "memory_realloc.h"
+#endif
+#include "ia_css_isp_param.h"
+#include "ia_css_isp_params.h"
+#include "ia_css_mipi.h"
+#include "ia_css_morph.h"
+#include "ia_css_host_data.h"
+#include "ia_css_pipe.h"
+#include "ia_css_pipe_binarydesc.h"
+#if 0
+#include "ia_css_system_ctrl.h"
+#endif
+
+/* Include all kernel host interfaces for ISP1 */
+
+#include "anr/anr_1.0/ia_css_anr.host.h"
+#include "cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "csc/csc_1.0/ia_css_csc.host.h"
+#include "de/de_1.0/ia_css_de.host.h"
+#include "dp/dp_1.0/ia_css_dp.host.h"
+#include "bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "dvs/dvs_1.0/ia_css_dvs.host.h"
+#include "fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "gc/gc_1.0/ia_css_gc.host.h"
+#include "macc/macc_1.0/ia_css_macc.host.h"
+#include "ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "ob/ob_1.0/ia_css_ob.host.h"
+#include "raw/raw_1.0/ia_css_raw.host.h"
+#include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
+#include "s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "sc/sc_1.0/ia_css_sc.host.h"
+#include "sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "uds/uds_1.0/ia_css_uds_param.h"
+#include "wb/wb_1.0/ia_css_wb.host.h"
+#include "ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "xnr/xnr_1.0/ia_css_xnr.host.h"
+
+/* Include additional kernel host interfaces for ISP2 */
+
+#include "aa/aa_2/ia_css_aa2.host.h"
+#include "anr/anr_2/ia_css_anr2.host.h"
+#include "bh/bh_2/ia_css_bh.host.h"
+#include "cnr/cnr_2/ia_css_cnr2.host.h"
+#include "ctc/ctc1_5/ia_css_ctc1_5.host.h"
+#include "de/de_2/ia_css_de2.host.h"
+#include "gc/gc_2/ia_css_gc2.host.h"
+#include "sdis/sdis_2/ia_css_sdis2.host.h"
+#include "ynr/ynr_2/ia_css_ynr2.host.h"
+#include "fc/fc_1.0/ia_css_formats.host.h"
+
+#include "xnr/xnr_3.0/ia_css_xnr3.host.h"
+
+#if defined(HAS_OUTPUT_SYSTEM)
+#include <components/output_system/sc_output_system_1.0/host/output_system.host.h>
+#endif
+
+#include "sh_css_frac.h"
+#include "ia_css_bufq.h"
+
+#define FPNTBL_BYTES(binary) \
+ (sizeof(char) * (binary)->in_frame_info.res.height * \
+ (binary)->in_frame_info.padded_width)
+
+#define ISP2400_SCTBL_BYTES(binary) \
+ (sizeof(unsigned short) * (binary)->sctbl_height * \
+ (binary)->sctbl_aligned_width_per_color * IA_CSS_SC_NUM_COLORS)
+
+#define ISP2401_SCTBL_BYTES(binary) \
+ (sizeof(unsigned short) * max((binary)->sctbl_height, (binary)->sctbl_legacy_height) * \
+ /* height should be the larger height between new api and legacy api */ \
+ (binary)->sctbl_aligned_width_per_color * IA_CSS_SC_NUM_COLORS)
+
+#define MORPH_PLANE_BYTES(binary) \
+ (SH_CSS_MORPH_TABLE_ELEM_BYTES * (binary)->morph_tbl_aligned_width * \
+ (binary)->morph_tbl_height)
+
+/* We keep a second copy of the ptr struct for the SP to access.
+ Again, this would not be necessary on the chip. */
+static hrt_vaddress sp_ddr_ptrs;
+
+/* sp group address on DDR */
+static hrt_vaddress xmem_sp_group_ptrs;
+
+static hrt_vaddress xmem_sp_stage_ptrs[IA_CSS_PIPE_ID_NUM]
+[SH_CSS_MAX_STAGES];
+static hrt_vaddress xmem_isp_stage_ptrs[IA_CSS_PIPE_ID_NUM]
+[SH_CSS_MAX_STAGES];
+
+static hrt_vaddress default_gdc_lut;
+static int interleaved_lut_temp[4][HRT_GDC_N];
+
+/* END DO NOT MOVE INTO VIMALS_WORLD */
+
+/* Digital Zoom lookup table. See documentation for more details about the
+ * contents of this table.
+ */
+#if defined(HAS_GDC_VERSION_2)
+#if defined(CONFIG_CSI2_PLUS)
+/*
+ * Coefficients from
+ * Css_Mizuchi/regressions/20140424_0930/all/applications/common/gdc_v2_common/lut.h
+ */
+
+static const int zoom_table[4][HRT_GDC_N] = {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -2, -2, -2, -2, -2, -2, -2,
+ -3, -3, -3, -3, -3, -3, -3, -4,
+ -4, -4, -4, -4, -5, -5, -5, -5,
+ -5, -5, -6, -6, -6, -6, -7, -7,
+ -7, -7, -7, -8, -8, -8, -8, -9,
+ -9, -9, -9, -10, -10, -10, -10, -11,
+ -11, -11, -12, -12, -12, -12, -13, -13,
+ -13, -14, -14, -14, -15, -15, -15, -15,
+ -16, -16, -16, -17, -17, -17, -18, -18,
+ -18, -19, -19, -20, -20, -20, -21, -21,
+ -21, -22, -22, -22, -23, -23, -24, -24,
+ -24, -25, -25, -25, -26, -26, -27, -27,
+ -28, -28, -28, -29, -29, -30, -30, -30,
+ -31, -31, -32, -32, -33, -33, -33, -34,
+ -34, -35, -35, -36, -36, -37, -37, -37,
+ -38, -38, -39, -39, -40, -40, -41, -41,
+ -42, -42, -43, -43, -44, -44, -45, -45,
+ -46, -46, -47, -47, -48, -48, -49, -49,
+ -50, -50, -51, -51, -52, -52, -53, -53,
+ -54, -54, -55, -55, -56, -56, -57, -57,
+ -58, -59, -59, -60, -60, -61, -61, -62,
+ -62, -63, -63, -64, -65, -65, -66, -66,
+ -67, -67, -68, -69, -69, -70, -70, -71,
+ -71, -72, -73, -73, -74, -74, -75, -75,
+ -76, -77, -77, -78, -78, -79, -80, -80,
+ -81, -81, -82, -83, -83, -84, -84, -85,
+ -86, -86, -87, -87, -88, -89, -89, -90,
+ -91, -91, -92, -92, -93, -94, -94, -95,
+ -96, -96, -97, -97, -98, -99, -99, -100,
+ -101, -101, -102, -102, -103, -104, -104, -105,
+ -106, -106, -107, -108, -108, -109, -109, -110,
+ -111, -111, -112, -113, -113, -114, -115, -115,
+ -116, -117, -117, -118, -119, -119, -120, -121,
+ -121, -122, -122, -123, -124, -124, -125, -126,
+ -126, -127, -128, -128, -129, -130, -130, -131,
+ -132, -132, -133, -134, -134, -135, -136, -136,
+ -137, -138, -138, -139, -140, -140, -141, -142,
+ -142, -143, -144, -144, -145, -146, -146, -147,
+ -148, -148, -149, -150, -150, -151, -152, -152,
+ -153, -154, -154, -155, -156, -156, -157, -158,
+ -158, -159, -160, -160, -161, -162, -162, -163,
+ -164, -164, -165, -166, -166, -167, -168, -168,
+ -169, -170, -170, -171, -172, -172, -173, -174,
+ -174, -175, -176, -176, -177, -178, -178, -179,
+ -180, -180, -181, -181, -182, -183, -183, -184,
+ -185, -185, -186, -187, -187, -188, -189, -189,
+ -190, -191, -191, -192, -193, -193, -194, -194,
+ -195, -196, -196, -197, -198, -198, -199, -200,
+ -200, -201, -201, -202, -203, -203, -204, -205,
+ -205, -206, -206, -207, -208, -208, -209, -210,
+ -210, -211, -211, -212, -213, -213, -214, -215,
+ -215, -216, -216, -217, -218, -218, -219, -219,
+ -220, -221, -221, -222, -222, -223, -224, -224,
+ -225, -225, -226, -227, -227, -228, -228, -229,
+ -229, -230, -231, -231, -232, -232, -233, -233,
+ -234, -235, -235, -236, -236, -237, -237, -238,
+ -239, -239, -240, -240, -241, -241, -242, -242,
+ -243, -244, -244, -245, -245, -246, -246, -247,
+ -247, -248, -248, -249, -249, -250, -250, -251,
+ -251, -252, -252, -253, -253, -254, -254, -255,
+ -256, -256, -256, -257, -257, -258, -258, -259,
+ -259, -260, -260, -261, -261, -262, -262, -263,
+ -263, -264, -264, -265, -265, -266, -266, -266,
+ -267, -267, -268, -268, -269, -269, -270, -270,
+ -270, -271, -271, -272, -272, -273, -273, -273,
+ -274, -274, -275, -275, -275, -276, -276, -277,
+ -277, -277, -278, -278, -279, -279, -279, -280,
+ -280, -280, -281, -281, -282, -282, -282, -283,
+ -283, -283, -284, -284, -284, -285, -285, -285,
+ -286, -286, -286, -287, -287, -287, -288, -288,
+ -288, -289, -289, -289, -289, -290, -290, -290,
+ -291, -291, -291, -291, -292, -292, -292, -293,
+ -293, -293, -293, -294, -294, -294, -294, -295,
+ -295, -295, -295, -295, -296, -296, -296, -296,
+ -297, -297, -297, -297, -297, -298, -298, -298,
+ -298, -298, -299, -299, -299, -299, -299, -299,
+ -300, -300, -300, -300, -300, -300, -300, -301,
+ -301, -301, -301, -301, -301, -301, -301, -301,
+ -302, -302, -302, -302, -302, -302, -302, -302,
+ -302, -302, -302, -302, -302, -303, -303, -303,
+ -303, -303, -303, -303, -303, -303, -303, -303,
+ -303, -303, -303, -303, -303, -303, -303, -303,
+ -303, -303, -303, -303, -303, -303, -303, -303,
+ -303, -303, -302, -302, -302, -302, -302, -302,
+ -302, -302, -302, -302, -302, -302, -301, -301,
+ -301, -301, -301, -301, -301, -301, -300, -300,
+ -300, -300, -300, -300, -299, -299, -299, -299,
+ -299, -299, -298, -298, -298, -298, -298, -297,
+ -297, -297, -297, -296, -296, -296, -296, -295,
+ -295, -295, -295, -294, -294, -294, -293, -293,
+ -293, -293, -292, -292, -292, -291, -291, -291,
+ -290, -290, -290, -289, -289, -289, -288, -288,
+ -288, -287, -287, -286, -286, -286, -285, -285,
+ -284, -284, -284, -283, -283, -282, -282, -281,
+ -281, -280, -280, -279, -279, -279, -278, -278,
+ -277, -277, -276, -276, -275, -275, -274, -273,
+ -273, -272, -272, -271, -271, -270, -270, -269,
+ -268, -268, -267, -267, -266, -266, -265, -264,
+ -264, -263, -262, -262, -261, -260, -260, -259,
+ -259, -258, -257, -256, -256, -255, -254, -254,
+ -253, -252, -252, -251, -250, -249, -249, -248,
+ -247, -246, -246, -245, -244, -243, -242, -242,
+ -241, -240, -239, -238, -238, -237, -236, -235,
+ -234, -233, -233, -232, -231, -230, -229, -228,
+ -227, -226, -226, -225, -224, -223, -222, -221,
+ -220, -219, -218, -217, -216, -215, -214, -213,
+ -212, -211, -210, -209, -208, -207, -206, -205,
+ -204, -203, -202, -201, -200, -199, -198, -197,
+ -196, -194, -193, -192, -191, -190, -189, -188,
+ -187, -185, -184, -183, -182, -181, -180, -178,
+ -177, -176, -175, -174, -172, -171, -170, -169,
+ -167, -166, -165, -164, -162, -161, -160, -158,
+ -157, -156, -155, -153, -152, -151, -149, -148,
+ -147, -145, -144, -142, -141, -140, -138, -137,
+ -135, -134, -133, -131, -130, -128, -127, -125,
+ -124, -122, -121, -120, -118, -117, -115, -114,
+ -112, -110, -109, -107, -106, -104, -103, -101,
+ -100, -98, -96, -95, -93, -92, -90, -88,
+ -87, -85, -83, -82, -80, -78, -77, -75,
+ -73, -72, -70, -68, -67, -65, -63, -61,
+ -60, -58, -56, -54, -52, -51, -49, -47,
+ -45, -43, -42, -40, -38, -36, -34, -32,
+ -31, -29, -27, -25, -23, -21, -19, -17,
+ -15, -13, -11, -9, -7, -5, -3, -1
+ },
+ {
+ 0, 2, 4, 6, 8, 10, 12, 14,
+ 16, 18, 20, 22, 25, 27, 29, 31,
+ 33, 36, 38, 40, 43, 45, 47, 50,
+ 52, 54, 57, 59, 61, 64, 66, 69,
+ 71, 74, 76, 79, 81, 84, 86, 89,
+ 92, 94, 97, 99, 102, 105, 107, 110,
+ 113, 116, 118, 121, 124, 127, 129, 132,
+ 135, 138, 141, 144, 146, 149, 152, 155,
+ 158, 161, 164, 167, 170, 173, 176, 179,
+ 182, 185, 188, 191, 194, 197, 200, 203,
+ 207, 210, 213, 216, 219, 222, 226, 229,
+ 232, 235, 239, 242, 245, 248, 252, 255,
+ 258, 262, 265, 269, 272, 275, 279, 282,
+ 286, 289, 292, 296, 299, 303, 306, 310,
+ 313, 317, 321, 324, 328, 331, 335, 338,
+ 342, 346, 349, 353, 357, 360, 364, 368,
+ 372, 375, 379, 383, 386, 390, 394, 398,
+ 402, 405, 409, 413, 417, 421, 425, 429,
+ 432, 436, 440, 444, 448, 452, 456, 460,
+ 464, 468, 472, 476, 480, 484, 488, 492,
+ 496, 500, 504, 508, 512, 516, 521, 525,
+ 529, 533, 537, 541, 546, 550, 554, 558,
+ 562, 567, 571, 575, 579, 584, 588, 592,
+ 596, 601, 605, 609, 614, 618, 622, 627,
+ 631, 635, 640, 644, 649, 653, 657, 662,
+ 666, 671, 675, 680, 684, 689, 693, 698,
+ 702, 707, 711, 716, 720, 725, 729, 734,
+ 738, 743, 747, 752, 757, 761, 766, 771,
+ 775, 780, 784, 789, 794, 798, 803, 808,
+ 813, 817, 822, 827, 831, 836, 841, 846,
+ 850, 855, 860, 865, 870, 874, 879, 884,
+ 889, 894, 898, 903, 908, 913, 918, 923,
+ 928, 932, 937, 942, 947, 952, 957, 962,
+ 967, 972, 977, 982, 986, 991, 996, 1001,
+ 1006, 1011, 1016, 1021, 1026, 1031, 1036, 1041,
+ 1046, 1051, 1056, 1062, 1067, 1072, 1077, 1082,
+ 1087, 1092, 1097, 1102, 1107, 1112, 1117, 1122,
+ 1128, 1133, 1138, 1143, 1148, 1153, 1158, 1164,
+ 1169, 1174, 1179, 1184, 1189, 1195, 1200, 1205,
+ 1210, 1215, 1221, 1226, 1231, 1236, 1242, 1247,
+ 1252, 1257, 1262, 1268, 1273, 1278, 1284, 1289,
+ 1294, 1299, 1305, 1310, 1315, 1321, 1326, 1331,
+ 1336, 1342, 1347, 1352, 1358, 1363, 1368, 1374,
+ 1379, 1384, 1390, 1395, 1400, 1406, 1411, 1417,
+ 1422, 1427, 1433, 1438, 1443, 1449, 1454, 1460,
+ 1465, 1470, 1476, 1481, 1487, 1492, 1497, 1503,
+ 1508, 1514, 1519, 1525, 1530, 1535, 1541, 1546,
+ 1552, 1557, 1563, 1568, 1574, 1579, 1585, 1590,
+ 1596, 1601, 1606, 1612, 1617, 1623, 1628, 1634,
+ 1639, 1645, 1650, 1656, 1661, 1667, 1672, 1678,
+ 1683, 1689, 1694, 1700, 1705, 1711, 1716, 1722,
+ 1727, 1733, 1738, 1744, 1749, 1755, 1761, 1766,
+ 1772, 1777, 1783, 1788, 1794, 1799, 1805, 1810,
+ 1816, 1821, 1827, 1832, 1838, 1844, 1849, 1855,
+ 1860, 1866, 1871, 1877, 1882, 1888, 1893, 1899,
+ 1905, 1910, 1916, 1921, 1927, 1932, 1938, 1943,
+ 1949, 1955, 1960, 1966, 1971, 1977, 1982, 1988,
+ 1993, 1999, 2005, 2010, 2016, 2021, 2027, 2032,
+ 2038, 2043, 2049, 2055, 2060, 2066, 2071, 2077,
+ 2082, 2088, 2093, 2099, 2105, 2110, 2116, 2121,
+ 2127, 2132, 2138, 2143, 2149, 2154, 2160, 2165,
+ 2171, 2177, 2182, 2188, 2193, 2199, 2204, 2210,
+ 2215, 2221, 2226, 2232, 2237, 2243, 2248, 2254,
+ 2259, 2265, 2270, 2276, 2281, 2287, 2292, 2298,
+ 2304, 2309, 2314, 2320, 2325, 2331, 2336, 2342,
+ 2347, 2353, 2358, 2364, 2369, 2375, 2380, 2386,
+ 2391, 2397, 2402, 2408, 2413, 2419, 2424, 2429,
+ 2435, 2440, 2446, 2451, 2457, 2462, 2467, 2473,
+ 2478, 2484, 2489, 2495, 2500, 2505, 2511, 2516,
+ 2522, 2527, 2532, 2538, 2543, 2549, 2554, 2559,
+ 2565, 2570, 2575, 2581, 2586, 2591, 2597, 2602,
+ 2607, 2613, 2618, 2623, 2629, 2634, 2639, 2645,
+ 2650, 2655, 2661, 2666, 2671, 2676, 2682, 2687,
+ 2692, 2698, 2703, 2708, 2713, 2719, 2724, 2729,
+ 2734, 2740, 2745, 2750, 2755, 2760, 2766, 2771,
+ 2776, 2781, 2786, 2792, 2797, 2802, 2807, 2812,
+ 2817, 2823, 2828, 2833, 2838, 2843, 2848, 2853,
+ 2859, 2864, 2869, 2874, 2879, 2884, 2889, 2894,
+ 2899, 2904, 2909, 2914, 2919, 2924, 2930, 2935,
+ 2940, 2945, 2950, 2955, 2960, 2965, 2970, 2975,
+ 2980, 2984, 2989, 2994, 2999, 3004, 3009, 3014,
+ 3019, 3024, 3029, 3034, 3039, 3044, 3048, 3053,
+ 3058, 3063, 3068, 3073, 3078, 3082, 3087, 3092,
+ 3097, 3102, 3106, 3111, 3116, 3121, 3126, 3130,
+ 3135, 3140, 3145, 3149, 3154, 3159, 3163, 3168,
+ 3173, 3177, 3182, 3187, 3191, 3196, 3201, 3205,
+ 3210, 3215, 3219, 3224, 3228, 3233, 3238, 3242,
+ 3247, 3251, 3256, 3260, 3265, 3269, 3274, 3279,
+ 3283, 3287, 3292, 3296, 3301, 3305, 3310, 3314,
+ 3319, 3323, 3327, 3332, 3336, 3341, 3345, 3349,
+ 3354, 3358, 3362, 3367, 3371, 3375, 3380, 3384,
+ 3388, 3393, 3397, 3401, 3405, 3410, 3414, 3418,
+ 3422, 3426, 3431, 3435, 3439, 3443, 3447, 3451,
+ 3455, 3460, 3464, 3468, 3472, 3476, 3480, 3484,
+ 3488, 3492, 3496, 3500, 3504, 3508, 3512, 3516,
+ 3520, 3524, 3528, 3532, 3536, 3540, 3544, 3548,
+ 3552, 3555, 3559, 3563, 3567, 3571, 3575, 3578,
+ 3582, 3586, 3590, 3593, 3597, 3601, 3605, 3608,
+ 3612, 3616, 3619, 3623, 3627, 3630, 3634, 3638,
+ 3641, 3645, 3649, 3652, 3656, 3659, 3663, 3666,
+ 3670, 3673, 3677, 3680, 3684, 3687, 3691, 3694,
+ 3698, 3701, 3704, 3708, 3711, 3714, 3718, 3721,
+ 3724, 3728, 3731, 3734, 3738, 3741, 3744, 3747,
+ 3751, 3754, 3757, 3760, 3763, 3767, 3770, 3773,
+ 3776, 3779, 3782, 3785, 3788, 3791, 3794, 3798,
+ 3801, 3804, 3807, 3809, 3812, 3815, 3818, 3821,
+ 3824, 3827, 3830, 3833, 3836, 3839, 3841, 3844,
+ 3847, 3850, 3853, 3855, 3858, 3861, 3864, 3866,
+ 3869, 3872, 3874, 3877, 3880, 3882, 3885, 3887,
+ 3890, 3893, 3895, 3898, 3900, 3903, 3905, 3908,
+ 3910, 3913, 3915, 3917, 3920, 3922, 3925, 3927,
+ 3929, 3932, 3934, 3936, 3939, 3941, 3943, 3945,
+ 3948, 3950, 3952, 3954, 3956, 3958, 3961, 3963,
+ 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979,
+ 3981, 3983, 3985, 3987, 3989, 3991, 3993, 3994,
+ 3996, 3998, 4000, 4002, 4004, 4005, 4007, 4009,
+ 4011, 4012, 4014, 4016, 4017, 4019, 4021, 4022,
+ 4024, 4025, 4027, 4028, 4030, 4031, 4033, 4034,
+ 4036, 4037, 4039, 4040, 4042, 4043, 4044, 4046,
+ 4047, 4048, 4050, 4051, 4052, 4053, 4055, 4056,
+ 4057, 4058, 4059, 4060, 4062, 4063, 4064, 4065,
+ 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073,
+ 4074, 4075, 4075, 4076, 4077, 4078, 4079, 4079,
+ 4080, 4081, 4082, 4082, 4083, 4084, 4084, 4085,
+ 4086, 4086, 4087, 4087, 4088, 4088, 4089, 4089,
+ 4090, 4090, 4091, 4091, 4092, 4092, 4092, 4093,
+ 4093, 4093, 4094, 4094, 4094, 4094, 4095, 4095,
+ 4095, 4095, 4095, 4095, 4095, 4095, 4095, 4095
+ },
+ {
+ 4096, 4095, 4095, 4095, 4095, 4095, 4095, 4095,
+ 4095, 4095, 4095, 4094, 4094, 4094, 4094, 4093,
+ 4093, 4093, 4092, 4092, 4092, 4091, 4091, 4090,
+ 4090, 4089, 4089, 4088, 4088, 4087, 4087, 4086,
+ 4086, 4085, 4084, 4084, 4083, 4082, 4082, 4081,
+ 4080, 4079, 4079, 4078, 4077, 4076, 4075, 4075,
+ 4074, 4073, 4072, 4071, 4070, 4069, 4068, 4067,
+ 4066, 4065, 4064, 4063, 4062, 4060, 4059, 4058,
+ 4057, 4056, 4055, 4053, 4052, 4051, 4050, 4048,
+ 4047, 4046, 4044, 4043, 4042, 4040, 4039, 4037,
+ 4036, 4034, 4033, 4031, 4030, 4028, 4027, 4025,
+ 4024, 4022, 4021, 4019, 4017, 4016, 4014, 4012,
+ 4011, 4009, 4007, 4005, 4004, 4002, 4000, 3998,
+ 3996, 3994, 3993, 3991, 3989, 3987, 3985, 3983,
+ 3981, 3979, 3977, 3975, 3973, 3971, 3969, 3967,
+ 3965, 3963, 3961, 3958, 3956, 3954, 3952, 3950,
+ 3948, 3945, 3943, 3941, 3939, 3936, 3934, 3932,
+ 3929, 3927, 3925, 3922, 3920, 3917, 3915, 3913,
+ 3910, 3908, 3905, 3903, 3900, 3898, 3895, 3893,
+ 3890, 3887, 3885, 3882, 3880, 3877, 3874, 3872,
+ 3869, 3866, 3864, 3861, 3858, 3855, 3853, 3850,
+ 3847, 3844, 3841, 3839, 3836, 3833, 3830, 3827,
+ 3824, 3821, 3818, 3815, 3812, 3809, 3807, 3804,
+ 3801, 3798, 3794, 3791, 3788, 3785, 3782, 3779,
+ 3776, 3773, 3770, 3767, 3763, 3760, 3757, 3754,
+ 3751, 3747, 3744, 3741, 3738, 3734, 3731, 3728,
+ 3724, 3721, 3718, 3714, 3711, 3708, 3704, 3701,
+ 3698, 3694, 3691, 3687, 3684, 3680, 3677, 3673,
+ 3670, 3666, 3663, 3659, 3656, 3652, 3649, 3645,
+ 3641, 3638, 3634, 3630, 3627, 3623, 3619, 3616,
+ 3612, 3608, 3605, 3601, 3597, 3593, 3590, 3586,
+ 3582, 3578, 3575, 3571, 3567, 3563, 3559, 3555,
+ 3552, 3548, 3544, 3540, 3536, 3532, 3528, 3524,
+ 3520, 3516, 3512, 3508, 3504, 3500, 3496, 3492,
+ 3488, 3484, 3480, 3476, 3472, 3468, 3464, 3460,
+ 3455, 3451, 3447, 3443, 3439, 3435, 3431, 3426,
+ 3422, 3418, 3414, 3410, 3405, 3401, 3397, 3393,
+ 3388, 3384, 3380, 3375, 3371, 3367, 3362, 3358,
+ 3354, 3349, 3345, 3341, 3336, 3332, 3327, 3323,
+ 3319, 3314, 3310, 3305, 3301, 3296, 3292, 3287,
+ 3283, 3279, 3274, 3269, 3265, 3260, 3256, 3251,
+ 3247, 3242, 3238, 3233, 3228, 3224, 3219, 3215,
+ 3210, 3205, 3201, 3196, 3191, 3187, 3182, 3177,
+ 3173, 3168, 3163, 3159, 3154, 3149, 3145, 3140,
+ 3135, 3130, 3126, 3121, 3116, 3111, 3106, 3102,
+ 3097, 3092, 3087, 3082, 3078, 3073, 3068, 3063,
+ 3058, 3053, 3048, 3044, 3039, 3034, 3029, 3024,
+ 3019, 3014, 3009, 3004, 2999, 2994, 2989, 2984,
+ 2980, 2975, 2970, 2965, 2960, 2955, 2950, 2945,
+ 2940, 2935, 2930, 2924, 2919, 2914, 2909, 2904,
+ 2899, 2894, 2889, 2884, 2879, 2874, 2869, 2864,
+ 2859, 2853, 2848, 2843, 2838, 2833, 2828, 2823,
+ 2817, 2812, 2807, 2802, 2797, 2792, 2786, 2781,
+ 2776, 2771, 2766, 2760, 2755, 2750, 2745, 2740,
+ 2734, 2729, 2724, 2719, 2713, 2708, 2703, 2698,
+ 2692, 2687, 2682, 2676, 2671, 2666, 2661, 2655,
+ 2650, 2645, 2639, 2634, 2629, 2623, 2618, 2613,
+ 2607, 2602, 2597, 2591, 2586, 2581, 2575, 2570,
+ 2565, 2559, 2554, 2549, 2543, 2538, 2532, 2527,
+ 2522, 2516, 2511, 2505, 2500, 2495, 2489, 2484,
+ 2478, 2473, 2467, 2462, 2457, 2451, 2446, 2440,
+ 2435, 2429, 2424, 2419, 2413, 2408, 2402, 2397,
+ 2391, 2386, 2380, 2375, 2369, 2364, 2358, 2353,
+ 2347, 2342, 2336, 2331, 2325, 2320, 2314, 2309,
+ 2304, 2298, 2292, 2287, 2281, 2276, 2270, 2265,
+ 2259, 2254, 2248, 2243, 2237, 2232, 2226, 2221,
+ 2215, 2210, 2204, 2199, 2193, 2188, 2182, 2177,
+ 2171, 2165, 2160, 2154, 2149, 2143, 2138, 2132,
+ 2127, 2121, 2116, 2110, 2105, 2099, 2093, 2088,
+ 2082, 2077, 2071, 2066, 2060, 2055, 2049, 2043,
+ 2038, 2032, 2027, 2021, 2016, 2010, 2005, 1999,
+ 1993, 1988, 1982, 1977, 1971, 1966, 1960, 1955,
+ 1949, 1943, 1938, 1932, 1927, 1921, 1916, 1910,
+ 1905, 1899, 1893, 1888, 1882, 1877, 1871, 1866,
+ 1860, 1855, 1849, 1844, 1838, 1832, 1827, 1821,
+ 1816, 1810, 1805, 1799, 1794, 1788, 1783, 1777,
+ 1772, 1766, 1761, 1755, 1749, 1744, 1738, 1733,
+ 1727, 1722, 1716, 1711, 1705, 1700, 1694, 1689,
+ 1683, 1678, 1672, 1667, 1661, 1656, 1650, 1645,
+ 1639, 1634, 1628, 1623, 1617, 1612, 1606, 1601,
+ 1596, 1590, 1585, 1579, 1574, 1568, 1563, 1557,
+ 1552, 1546, 1541, 1535, 1530, 1525, 1519, 1514,
+ 1508, 1503, 1497, 1492, 1487, 1481, 1476, 1470,
+ 1465, 1460, 1454, 1449, 1443, 1438, 1433, 1427,
+ 1422, 1417, 1411, 1406, 1400, 1395, 1390, 1384,
+ 1379, 1374, 1368, 1363, 1358, 1352, 1347, 1342,
+ 1336, 1331, 1326, 1321, 1315, 1310, 1305, 1299,
+ 1294, 1289, 1284, 1278, 1273, 1268, 1262, 1257,
+ 1252, 1247, 1242, 1236, 1231, 1226, 1221, 1215,
+ 1210, 1205, 1200, 1195, 1189, 1184, 1179, 1174,
+ 1169, 1164, 1158, 1153, 1148, 1143, 1138, 1133,
+ 1128, 1122, 1117, 1112, 1107, 1102, 1097, 1092,
+ 1087, 1082, 1077, 1072, 1067, 1062, 1056, 1051,
+ 1046, 1041, 1036, 1031, 1026, 1021, 1016, 1011,
+ 1006, 1001, 996, 991, 986, 982, 977, 972,
+ 967, 962, 957, 952, 947, 942, 937, 932,
+ 928, 923, 918, 913, 908, 903, 898, 894,
+ 889, 884, 879, 874, 870, 865, 860, 855,
+ 850, 846, 841, 836, 831, 827, 822, 817,
+ 813, 808, 803, 798, 794, 789, 784, 780,
+ 775, 771, 766, 761, 757, 752, 747, 743,
+ 738, 734, 729, 725, 720, 716, 711, 707,
+ 702, 698, 693, 689, 684, 680, 675, 671,
+ 666, 662, 657, 653, 649, 644, 640, 635,
+ 631, 627, 622, 618, 614, 609, 605, 601,
+ 596, 592, 588, 584, 579, 575, 571, 567,
+ 562, 558, 554, 550, 546, 541, 537, 533,
+ 529, 525, 521, 516, 512, 508, 504, 500,
+ 496, 492, 488, 484, 480, 476, 472, 468,
+ 464, 460, 456, 452, 448, 444, 440, 436,
+ 432, 429, 425, 421, 417, 413, 409, 405,
+ 402, 398, 394, 390, 386, 383, 379, 375,
+ 372, 368, 364, 360, 357, 353, 349, 346,
+ 342, 338, 335, 331, 328, 324, 321, 317,
+ 313, 310, 306, 303, 299, 296, 292, 289,
+ 286, 282, 279, 275, 272, 269, 265, 262,
+ 258, 255, 252, 248, 245, 242, 239, 235,
+ 232, 229, 226, 222, 219, 216, 213, 210,
+ 207, 203, 200, 197, 194, 191, 188, 185,
+ 182, 179, 176, 173, 170, 167, 164, 161,
+ 158, 155, 152, 149, 146, 144, 141, 138,
+ 135, 132, 129, 127, 124, 121, 118, 116,
+ 113, 110, 107, 105, 102, 99, 97, 94,
+ 92, 89, 86, 84, 81, 79, 76, 74,
+ 71, 69, 66, 64, 61, 59, 57, 54,
+ 52, 50, 47, 45, 43, 40, 38, 36,
+ 33, 31, 29, 27, 25, 22, 20, 18,
+ 16, 14, 12, 10, 8, 6, 4, 2
+ },
+ {
+ 0, -1, -3, -5, -7, -9, -11, -13,
+ -15, -17, -19, -20, -23, -25, -27, -28,
+ -30, -33, -34, -36, -39, -40, -42, -43,
+ -45, -46, -49, -50, -52, -54, -56, -58,
+ -60, -61, -62, -65, -66, -68, -70, -72,
+ -73, -74, -77, -78, -80, -82, -83, -85,
+ -87, -89, -90, -92, -93, -95, -96, -98,
+ -100, -102, -103, -105, -106, -107, -108, -110,
+ -112, -114, -116, -116, -118, -120, -122, -122,
+ -124, -126, -127, -128, -130, -131, -133, -133,
+ -136, -137, -138, -139, -141, -142, -144, -145,
+ -147, -147, -150, -151, -151, -153, -155, -156,
+ -157, -159, -160, -161, -163, -164, -165, -166,
+ -168, -168, -170, -171, -172, -174, -174, -176,
+ -177, -178, -180, -181, -182, -183, -184, -185,
+ -187, -188, -189, -190, -191, -192, -193, -195,
+ -196, -196, -198, -199, -200, -200, -202, -204,
+ -204, -205, -206, -207, -208, -209, -211, -212,
+ -212, -213, -214, -215, -216, -217, -218, -220,
+ -220, -221, -222, -223, -224, -225, -225, -227,
+ -227, -228, -229, -230, -230, -231, -233, -234,
+ -234, -235, -235, -237, -238, -239, -239, -240,
+ -240, -242, -242, -243, -243, -245, -246, -247,
+ -247, -249, -248, -249, -250, -251, -251, -253,
+ -253, -253, -255, -255, -256, -256, -257, -258,
+ -259, -259, -260, -261, -261, -262, -262, -264,
+ -263, -265, -265, -265, -266, -267, -267, -268,
+ -269, -269, -269, -270, -271, -271, -272, -273,
+ -273, -273, -274, -274, -276, -275, -276, -277,
+ -277, -278, -278, -278, -279, -279, -280, -281,
+ -280, -281, -282, -283, -283, -282, -284, -284,
+ -284, -285, -285, -286, -286, -286, -287, -287,
+ -288, -288, -288, -289, -289, -289, -290, -290,
+ -290, -291, -291, -292, -291, -291, -292, -292,
+ -292, -293, -293, -293, -294, -294, -295, -295,
+ -294, -295, -295, -296, -297, -297, -297, -297,
+ -297, -297, -298, -298, -297, -298, -298, -298,
+ -299, -299, -300, -299, -299, -300, -299, -300,
+ -301, -300, -300, -301, -300, -301, -301, -301,
+ -301, -301, -302, -301, -302, -301, -302, -302,
+ -302, -302, -302, -302, -302, -302, -303, -302,
+ -303, -302, -303, -303, -302, -303, -303, -303,
+ -302, -303, -303, -302, -303, -303, -302, -303,
+ -303, -302, -303, -303, -302, -303, -303, -303,
+ -303, -302, -303, -303, -302, -302, -302, -303,
+ -302, -302, -302, -301, -303, -302, -301, -302,
+ -301, -301, -301, -302, -301, -301, -301, -300,
+ -301, -300, -300, -300, -300, -299, -300, -299,
+ -300, -300, -299, -300, -299, -299, -299, -299,
+ -298, -299, -298, -297, -297, -297, -296, -297,
+ -296, -296, -296, -296, -295, -296, -295, -296,
+ -295, -294, -294, -294, -293, -294, -294, -293,
+ -293, -292, -293, -292, -292, -292, -291, -290,
+ -291, -290, -291, -289, -289, -290, -289, -289,
+ -288, -288, -288, -288, -286, -287, -286, -286,
+ -286, -285, -286, -284, -284, -284, -284, -283,
+ -283, -283, -282, -282, -282, -281, -280, -281,
+ -279, -280, -280, -278, -279, -278, -278, -277,
+ -278, -276, -276, -277, -275, -276, -274, -275,
+ -274, -273, -273, -272, -273, -272, -272, -271,
+ -270, -270, -269, -269, -269, -268, -268, -267,
+ -267, -266, -266, -266, -265, -265, -264, -264,
+ -263, -263, -262, -262, -261, -261, -260, -260,
+ -259, -259, -258, -258, -257, -257, -256, -256,
+ -256, -255, -254, -254, -253, -253, -252, -252,
+ -251, -251, -250, -250, -249, -249, -248, -248,
+ -247, -247, -246, -246, -245, -245, -244, -244,
+ -243, -242, -242, -241, -241, -240, -239, -239,
+ -239, -238, -238, -237, -237, -235, -235, -235,
+ -234, -234, -232, -233, -232, -232, -231, -229,
+ -230, -229, -228, -228, -227, -226, -227, -225,
+ -224, -225, -223, -223, -222, -222, -221, -221,
+ -220, -219, -219, -218, -218, -216, -217, -216,
+ -215, -215, -214, -213, -212, -213, -211, -211,
+ -210, -210, -209, -209, -208, -206, -207, -206,
+ -205, -204, -204, -204, -203, -202, -202, -200,
+ -200, -200, -200, -198, -197, -197, -196, -195,
+ -195, -195, -194, -194, -192, -192, -191, -191,
+ -189, -189, -188, -188, -187, -186, -186, -186,
+ -185, -185, -183, -183, -182, -182, -181, -181,
+ -180, -178, -178, -177, -177, -176, -176, -174,
+ -174, -173, -173, -172, -172, -172, -170, -170,
+ -168, -168, -167, -167, -167, -165, -165, -164,
+ -164, -164, -162, -162, -161, -160, -160, -158,
+ -158, -158, -157, -156, -155, -155, -154, -153,
+ -153, -152, -151, -151, -150, -149, -149, -148,
+ -147, -147, -146, -146, -144, -144, -144, -142,
+ -142, -141, -142, -140, -140, -139, -138, -138,
+ -137, -136, -136, -134, -134, -133, -134, -132,
+ -132, -131, -130, -130, -128, -128, -128, -127,
+ -127, -126, -124, -124, -124, -123, -123, -122,
+ -121, -120, -120, -119, -118, -118, -117, -117,
+ -116, -115, -115, -115, -114, -113, -111, -111,
+ -110, -110, -109, -109, -108, -107, -107, -106,
+ -105, -104, -104, -103, -102, -103, -102, -101,
+ -101, -100, -99, -99, -98, -97, -97, -96,
+ -96, -95, -94, -94, -93, -92, -92, -91,
+ -91, -90, -89, -88, -88, -88, -87, -86,
+ -85, -86, -84, -84, -83, -82, -82, -81,
+ -81, -80, -80, -78, -79, -77, -77, -77,
+ -76, -76, -75, -74, -74, -73, -72, -72,
+ -72, -71, -70, -70, -69, -68, -68, -68,
+ -66, -67, -66, -65, -65, -65, -63, -63,
+ -62, -62, -61, -61, -60, -60, -60, -58,
+ -58, -58, -56, -56, -56, -55, -54, -55,
+ -54, -54, -53, -52, -51, -51, -51, -50,
+ -49, -49, -49, -49, -48, -47, -46, -46,
+ -46, -46, -45, -43, -43, -43, -43, -42,
+ -42, -42, -40, -40, -40, -39, -39, -38,
+ -38, -38, -37, -37, -36, -36, -35, -35,
+ -34, -35, -34, -33, -33, -32, -32, -31,
+ -31, -31, -30, -29, -29, -29, -28, -27,
+ -28, -28, -27, -26, -26, -25, -25, -25,
+ -24, -24, -24, -23, -23, -22, -22, -22,
+ -21, -21, -20, -20, -20, -20, -19, -18,
+ -19, -18, -18, -17, -18, -17, -16, -17,
+ -16, -15, -15, -15, -14, -14, -15, -13,
+ -13, -13, -13, -12, -12, -11, -12, -11,
+ -12, -10, -10, -10, -10, -10, -9, -10,
+ -9, -9, -9, -8, -8, -7, -8, -7,
+ -7, -7, -6, -6, -6, -7, -6, -6,
+ -5, -5, -5, -5, -5, -4, -4, -5,
+ -4, -4, -3, -3, -3, -3, -3, -2,
+ -3, -2, -2, -2, -1, -2, -1, -2,
+ -1, -1, -1, -1, -1, 0, -1, 0,
+ -1, -1, 0, 0, -1, 0, 0, -1,
+ 1, 1, 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+ }
+};
+#else /* defined(CONFIG_CSI2_PLUS) */
+static const int zoom_table[4][HRT_GDC_N] = {
+ {
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
+ -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4
+ },
+ {
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4,
+ 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4,
+ 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
+ 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
+ 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
+ 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
+ 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
+ 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
+ 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
+ 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
+ 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
+ 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
+ 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
+ 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
+ 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
+ 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
+ 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
+ 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
+ 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
+ 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
+ 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
+ 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
+ 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
+ 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
+ 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
+ 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
+ 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
+ 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
+ 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
+ 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
+ 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
+ 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
+ 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
+ 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
+ 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
+ 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
+ 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
+ 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
+ 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
+ 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
+ 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
+ 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
+ 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
+ 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
+ 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
+ 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
+ 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
+ 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
+ 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
+ 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
+ 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
+ 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
+ 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
+ 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
+ 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
+ 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
+ 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
+ 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
+ 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
+ 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
+ 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
+ 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
+ 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
+ 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
+ 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
+ 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
+ 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
+ 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
+ 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
+ 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
+ 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
+ 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
+ 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
+ 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
+ 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
+ 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
+ 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
+ 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
+ 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
+ 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
+ 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
+ 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
+ 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
+ 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
+ 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
+ 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
+ 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
+ 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
+ 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
+ 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
+ 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
+ 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
+ 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
+ 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
+ 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
+ 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
+ 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
+ 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
+ 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
+ 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
+ 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
+ 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
+ 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
+ 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
+ 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
+ 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
+ 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
+ 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
+ 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
+ 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
+ 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
+ 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
+ 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
+ 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
+ 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
+ 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
+ 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
+ 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
+ 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
+ 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
+ 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
+ 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4
+ },
+ {
+ 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4,
+ 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
+ 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
+ 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
+ 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
+ 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
+ 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
+ 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
+ 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
+ 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
+ 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
+ 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
+ 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
+ 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
+ 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
+ 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
+ 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
+ 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
+ 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
+ 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
+ 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
+ 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
+ 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
+ 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
+ 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
+ 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
+ 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
+ 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
+ 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
+ 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
+ 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
+ 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
+ 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
+ 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
+ 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
+ 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
+ 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
+ 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
+ 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
+ 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
+ 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
+ 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
+ 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
+ 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
+ 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
+ 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
+ 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
+ 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
+ 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
+ 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
+ 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
+ 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
+ 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
+ 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
+ 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
+ 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
+ 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
+ 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
+ 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
+ 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
+ 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
+ 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
+ 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
+ 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
+ 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
+ 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
+ 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
+ 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
+ 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
+ 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
+ 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
+ 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
+ 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
+ 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
+ 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
+ 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
+ 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
+ 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
+ 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
+ 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
+ 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
+ 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
+ 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
+ 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
+ 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
+ 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
+ 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
+ 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
+ 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
+ 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
+ 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
+ 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
+ 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
+ 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
+ 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
+ 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
+ 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
+ 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
+ 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
+ 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
+ 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
+ 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
+ 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
+ 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
+ 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
+ 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
+ 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
+ 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
+ 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
+ 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
+ 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
+ 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
+ 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
+ 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
+ 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
+ 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
+ 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
+ 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
+ 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
+ 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
+ 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
+ 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4,
+ 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4
+ },
+ {
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
+ -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4,
+ 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4
+ }
+};
+#endif
+#else
+#error "sh_css_params.c: GDC version must be \
+one of {GDC_VERSION_2}"
+#endif
+
+static const struct ia_css_dz_config default_dz_config = {
+ HRT_GDC_N,
+ HRT_GDC_N,
+ {
+ \
+ {0, 0}, \
+ {0, 0}, \
+ }
+};
+
+static const struct ia_css_vector default_motion_config = {
+ 0,
+ 0
+};
+
+/* ------ deprecated(bz675) : from ------ */
+static const struct ia_css_shading_settings default_shading_settings = {
+ 1 /* enable shading table conversion in the css
+ (This matches the legacy way.) */
+};
+
+/* ------ deprecated(bz675) : to ------ */
+
+struct ia_css_isp_skc_dvs_statistics {
+ ia_css_ptr p_data;
+};
+
+static enum ia_css_err
+ref_sh_css_ddr_address_map(
+ struct sh_css_ddr_address_map *map,
+ struct sh_css_ddr_address_map *out);
+
+static enum ia_css_err
+write_ia_css_isp_parameter_set_info_to_ddr(
+ struct ia_css_isp_parameter_set_info *me,
+ hrt_vaddress *out);
+
+static enum ia_css_err
+free_ia_css_isp_parameter_set_info(hrt_vaddress ptr);
+
+static enum ia_css_err
+sh_css_params_write_to_ddr_internal(
+ struct ia_css_pipe *pipe,
+ unsigned int pipe_id,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *stage,
+ struct sh_css_ddr_address_map *ddr_map,
+ struct sh_css_ddr_address_map_size *ddr_map_size);
+
+static enum ia_css_err
+sh_css_create_isp_params(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters **isp_params_out);
+
+static bool
+sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ bool use_default_config,
+ struct ia_css_pipe *pipe_in);
+
+static enum ia_css_err
+sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe_in);
+
+static enum ia_css_err
+sh_css_set_global_isp_config_on_pipe(
+ struct ia_css_pipe *curr_pipe,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe);
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+static enum ia_css_err
+sh_css_set_per_frame_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe);
+#endif
+
+static enum ia_css_err
+sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+ struct ia_css_resolution pipe_in_res,
+ bool enable_zoom);
+
+hrt_vaddress
+sh_css_params_ddr_address_map(void)
+{
+ return sp_ddr_ptrs;
+}
+
+/* ****************************************************
+ * Each coefficient is stored as 7bits to fit 2 of them into one
+ * ISP vector element, so we will store 4 coefficents on every
+ * memory word (32bits)
+ *
+ * 0: Coefficient 0 used bits
+ * 1: Coefficient 1 used bits
+ * 2: Coefficient 2 used bits
+ * 3: Coefficient 3 used bits
+ * x: not used
+ *
+ * xx33333332222222 | xx11111110000000
+ *
+ * ***************************************************
+ */
+static struct ia_css_host_data *
+convert_allocate_fpntbl(struct ia_css_isp_parameters *params)
+{
+ unsigned int i, j;
+ short *data_ptr;
+ struct ia_css_host_data *me;
+ unsigned int isp_format_data_size;
+ u32 *isp_format_data_ptr;
+
+ assert(params);
+
+ data_ptr = params->fpn_config.data;
+ isp_format_data_size = params->fpn_config.height * params->fpn_config.width *
+ sizeof(uint32_t);
+
+ me = ia_css_host_data_allocate(isp_format_data_size);
+
+ if (!me)
+ return NULL;
+
+ isp_format_data_ptr = (uint32_t *)me->address;
+
+ for (i = 0; i < params->fpn_config.height; i++) {
+ for (j = 0;
+ j < params->fpn_config.width;
+ j += 4, data_ptr += 4, isp_format_data_ptr++) {
+ int data = data_ptr[0] << 0 |
+ data_ptr[1] << 7 |
+ data_ptr[2] << 16 |
+ data_ptr[3] << 23;
+ *isp_format_data_ptr = data;
+ }
+ }
+ return me;
+}
+
+static enum ia_css_err
+store_fpntbl(struct ia_css_isp_parameters *params, hrt_vaddress ptr) {
+ struct ia_css_host_data *isp_data;
+
+ assert(params);
+ assert(ptr != mmgr_NULL);
+
+ isp_data = convert_allocate_fpntbl(params);
+ if (!isp_data)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ ia_css_params_store_ia_css_host_data(ptr, isp_data);
+
+ ia_css_host_data_free(isp_data);
+ return IA_CSS_SUCCESS;
+}
+
+static void
+convert_raw_to_fpn(struct ia_css_isp_parameters *params)
+{
+ int maxval = 0;
+ unsigned int i;
+
+ assert(params);
+
+ /* Find the maximum value in the table */
+ for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++) {
+ int val = params->fpn_config.data[i];
+ /* Make sure FPN value can be represented in 13-bit unsigned
+ * number (ISP precision - 1), but note that actual input range
+ * depends on precision of input frame data.
+ */
+ if (val < 0) {
+ /* Checkpatch patch */
+ val = 0;
+ } else if (val >= (1 << 13)) {
+ /* Checkpatch patch */
+ /* MW: BUG, is "13" a system or application property */
+ val = (1 << 13) - 1;
+ }
+ maxval = max(maxval, val);
+ }
+ /* Find the lowest shift value to remap the values in the range
+ * 0..maxval to 0..2^shiftval*63.
+ */
+ params->fpn_config.shift = 0;
+ while (maxval > 63) {
+ /* MW: BUG, is "63" a system or application property */
+ maxval >>= 1;
+ params->fpn_config.shift++;
+ }
+ /* Adjust the values in the table for the shift value */
+ for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++)
+ ((unsigned short *)params->fpn_config.data)[i] >>= params->fpn_config.shift;
+}
+
+static void
+ia_css_process_kernel(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ void (*process)(unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params))
+{
+ int i;
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+ struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe);
+ struct ia_css_pipeline_stage *stage;
+
+ /* update the other buffers to the pipe specific copies */
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ if (!stage || !stage->binary) continue;
+ process(pipeline->pipe_id, stage, params);
+ }
+ }
+}
+
+static enum ia_css_err
+sh_css_select_dp_10bpp_config(const struct ia_css_pipe *pipe,
+ bool *is_dp_10bpp) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ /* Currently we check if 10bpp DPC configuration is required based
+ * on the use case,i.e. if BDS and DPC is both enabled. The more cleaner
+ * design choice would be to expose the type of DPC (either 10bpp or 13bpp)
+ * using the binary info, but the current control flow does not allow this
+ * implementation. (This is because the configuration is set before a
+ * binary is selected, and the binary info is not available)
+ */
+ if ((!pipe) || (!is_dp_10bpp))
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ } else
+ {
+ *is_dp_10bpp = false;
+
+ /* check if DPC is enabled from the host */
+ if (pipe->config.enable_dpc) {
+ /*check if BDS is enabled*/
+ unsigned int required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
+
+ if ((pipe->config.bayer_ds_out_res.width != 0) &&
+ (pipe->config.bayer_ds_out_res.height != 0)) {
+ if (IA_CSS_SUCCESS == binarydesc_calculate_bds_factor(
+ pipe->config.input_effective_res,
+ pipe->config.bayer_ds_out_res,
+ &required_bds_factor)) {
+ if (required_bds_factor != SH_CSS_BDS_FACTOR_1_00) {
+ /*we use 10bpp BDS configuration*/
+ *is_dp_10bpp = true;
+ }
+ }
+ }
+ }
+ }
+
+ return err;
+}
+
+enum ia_css_err
+sh_css_set_black_frame(struct ia_css_stream *stream,
+ const struct ia_css_frame *raw_black_frame) {
+ struct ia_css_isp_parameters *params;
+ /* this function desperately needs to be moved to the ISP or SP such
+ * that it can use the DMA.
+ */
+ unsigned int height, width, y, x, k, data;
+ hrt_vaddress ptr;
+
+ assert(stream);
+ assert(raw_black_frame);
+
+ params = stream->isp_params_configs;
+ height = raw_black_frame->info.res.height;
+ width = raw_black_frame->info.padded_width,
+
+ ptr = raw_black_frame->data
+ + raw_black_frame->planes.raw.offset;
+
+ IA_CSS_ENTER_PRIVATE("black_frame=%p", raw_black_frame);
+
+ if (params->fpn_config.data &&
+ (params->fpn_config.width != width || params->fpn_config.height != height))
+ {
+ sh_css_free(params->fpn_config.data);
+ params->fpn_config.data = NULL;
+ }
+ if (!params->fpn_config.data)
+ {
+ params->fpn_config.data = sh_css_malloc(height * width * sizeof(short));
+ if (!params->fpn_config.data) {
+ IA_CSS_ERROR("out of memory");
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ params->fpn_config.width = width;
+ params->fpn_config.height = height;
+ params->fpn_config.shift = 0;
+ }
+
+ /* store raw to fpntbl */
+ for (y = 0; y < height; y++)
+ {
+ for (x = 0; x < width; x += (ISP_VEC_NELEMS * 2)) {
+ int ofs = y * width + x;
+
+ for (k = 0; k < ISP_VEC_NELEMS; k += 2) {
+ mmgr_load(ptr, (void *)(&data), sizeof(int));
+ params->fpn_config.data[ofs + 2 * k] =
+ (short)(data & 0xFFFF);
+ params->fpn_config.data[ofs + 2 * k + 2] =
+ (short)((data >> 16) & 0xFFFF);
+ ptr += sizeof(int); /* byte system address */
+ }
+ for (k = 0; k < ISP_VEC_NELEMS; k += 2) {
+ mmgr_load(ptr, (void *)(&data), sizeof(int));
+ params->fpn_config.data[ofs + 2 * k + 1] =
+ (short)(data & 0xFFFF);
+ params->fpn_config.data[ofs + 2 * k + 3] =
+ (short)((data >> 16) & 0xFFFF);
+ ptr += sizeof(int); /* byte system address */
+ }
+ }
+ }
+
+ /* raw -> fpn */
+ convert_raw_to_fpn(params);
+
+ /* overwrite isp parameter */
+ ia_css_process_kernel(stream, params, ia_css_kernel_process_param[IA_CSS_FPN_ID]);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+
+ return IA_CSS_SUCCESS;
+}
+
+bool
+sh_css_params_set_binning_factor(struct ia_css_stream *stream,
+ unsigned int binning_fact)
+{
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(stream);
+
+ params = stream->isp_params_configs;
+
+ if (params->sensor_binning != binning_fact) {
+ params->sensor_binning = binning_fact;
+ params->sc_table_changed = true;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+
+ return params->sc_table_changed;
+}
+
+static void
+sh_css_update_shading_table_status(struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params)
+{
+ if (params && pipe && (pipe->pipe_num != params->sc_table_last_pipe_num)) {
+ params->sc_table_dirty = true;
+ params->sc_table_last_pipe_num = pipe->pipe_num;
+ }
+}
+
+static void
+sh_css_set_shading_table(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_shading_table *table)
+{
+ IA_CSS_ENTER_PRIVATE("");
+ if (!table)
+ return;
+ assert(stream);
+
+ if (!table->enable)
+ table = NULL;
+
+ if ((table != params->sc_table) || params->sc_table_dirty) {
+ params->sc_table = table;
+ params->sc_table_changed = true;
+ params->sc_table_dirty = false;
+ /* Not very clean, this goes to sh_css.c to invalidate the
+ * shading table for all pipes. Should replaced by a loop
+ * and a pipe-specific call.
+ */
+ if (!params->output_frame)
+ sh_css_invalidate_shading_tables(stream);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+ia_css_params_store_ia_css_host_data(
+ hrt_vaddress ddr_addr,
+ struct ia_css_host_data *data)
+{
+ assert(data);
+ assert(data->address);
+ assert(ddr_addr != mmgr_NULL);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ mmgr_store(ddr_addr,
+ (void *)(data->address),
+ (size_t)data->size);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+struct ia_css_host_data *
+ia_css_params_alloc_convert_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ const struct ia_css_shading_table *shading_table)
+{
+ const struct ia_css_binary *binary = stage->binary;
+ struct ia_css_host_data *sctbl;
+ unsigned int i, j, aligned_width, row_padding;
+ unsigned int sctbl_size;
+ short int *ptr;
+
+ assert(binary);
+ assert(shading_table);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (!shading_table) {
+ IA_CSS_LEAVE_PRIVATE("void");
+ return NULL;
+ }
+
+ aligned_width = binary->sctbl_aligned_width_per_color;
+ row_padding = aligned_width - shading_table->width;
+ sctbl_size = shading_table->height * IA_CSS_SC_NUM_COLORS * aligned_width *
+ sizeof(short);
+
+ sctbl = ia_css_host_data_allocate((size_t)sctbl_size);
+
+ if (!sctbl)
+ return NULL;
+ ptr = (short int *)sctbl->address;
+ memset(ptr,
+ 0,
+ sctbl_size);
+
+ for (i = 0; i < shading_table->height; i++) {
+ for (j = 0; j < IA_CSS_SC_NUM_COLORS; j++) {
+ memcpy(ptr,
+ &shading_table->data[j]
+ [i * shading_table->width],
+ shading_table->width * sizeof(short));
+ ptr += aligned_width;
+ }
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+ return sctbl;
+}
+
+enum ia_css_err ia_css_params_store_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ hrt_vaddress sc_tbl,
+ const struct ia_css_shading_table *sc_config)
+{
+ struct ia_css_host_data *isp_sc_tbl;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (!sc_config) {
+ IA_CSS_LEAVE_PRIVATE("void");
+ return IA_CSS_SUCCESS;
+ }
+
+ isp_sc_tbl = ia_css_params_alloc_convert_sctbl(stage, sc_config);
+ if (!isp_sc_tbl) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ /* store the shading table to ddr */
+ ia_css_params_store_ia_css_host_data(sc_tbl, isp_sc_tbl);
+ ia_css_host_data_free(isp_sc_tbl);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+
+ return IA_CSS_SUCCESS;
+}
+
+static void
+sh_css_enable_pipeline(const struct ia_css_binary *binary)
+{
+ if (!binary)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ ia_css_isp_param_enable_pipeline(&binary->mem_params);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static enum ia_css_err
+ia_css_process_zoom_and_motion(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *first_stage) {
+ /* first_stage can be NULL */
+ const struct ia_css_pipeline_stage *stage;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_resolution pipe_in_res;
+
+ pipe_in_res.width = 0;
+ pipe_in_res.height = 0;
+
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* Go through all stages to udate uds and cropping */
+ for (stage = first_stage; stage; stage = stage->next)
+ {
+ struct ia_css_binary *binary;
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+ cause issues for drivers
+ */
+ static struct ia_css_binary tmp_binary;
+
+ const struct ia_css_binary_xinfo *info = NULL;
+
+ binary = stage->binary;
+ if (binary) {
+ info = binary->info;
+ } else {
+ const struct sh_css_binary_args *args = &stage->args;
+ const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+
+ if (args->out_frame[0])
+ out_infos[0] = &args->out_frame[0]->info;
+ info = &stage->firmware->info.isp;
+ ia_css_binary_fill_info(info, false, false,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ args->in_frame ? &args->in_frame->info : NULL,
+ NULL,
+ out_infos,
+ args->out_vf_frame ? &args->out_vf_frame->info
+ : NULL,
+ &tmp_binary,
+ NULL,
+ -1, true);
+ binary = &tmp_binary;
+ binary->info = info;
+ }
+
+ if (stage == first_stage) {
+ /* we will use pipe_in_res to scale the zoom crop region if needed */
+ pipe_in_res = binary->effective_in_frame_res;
+ }
+
+ assert(stage->stage_num < SH_CSS_MAX_STAGES);
+ if (params->dz_config.zoom_region.resolution.width == 0 &&
+ params->dz_config.zoom_region.resolution.height == 0) {
+ sh_css_update_uds_and_crop_info(
+ &info->sp,
+ &binary->in_frame_info,
+ &binary->out_frame_info[0],
+ &binary->dvs_envelope,
+ &params->dz_config,
+ &params->motion_config,
+ &params->uds[stage->stage_num].uds,
+ &params->uds[stage->stage_num].crop_pos,
+ stage->enable_zoom);
+ } else {
+ err = sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ &info->sp,
+ &binary->in_frame_info,
+ &binary->out_frame_info[0],
+ &binary->dvs_envelope,
+ &params->dz_config,
+ &params->motion_config,
+ &params->uds[stage->stage_num].uds,
+ &params->uds[stage->stage_num].crop_pos,
+ pipe_in_res,
+ stage->enable_zoom);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ }
+ params->isp_params_changed = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+ return err;
+}
+
+static void
+sh_css_set_gamma_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_gamma_table *table)
+{
+ if (!table)
+ return;
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ params->gc_table = *table;
+ params->config_changed[IA_CSS_GC_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_gamma_table(const struct ia_css_isp_parameters *params,
+ struct ia_css_gamma_table *table)
+{
+ if (!table)
+ return;
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ *table = params->gc_table;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_ctc_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ params->ctc_table = *table;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_ctc_table(const struct ia_css_isp_parameters *params,
+ struct ia_css_ctc_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ *table = params->ctc_table;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_macc_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ params->macc_table = *table;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_macc_table(const struct ia_css_isp_parameters *params,
+ struct ia_css_macc_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ *table = params->macc_table;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void ia_css_morph_table_free(
+ struct ia_css_morph_table *me)
+{
+ unsigned int i;
+
+ if (!me)
+ return;
+
+ IA_CSS_ENTER("");
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ if (me->coordinates_x[i]) {
+ sh_css_free(me->coordinates_x[i]);
+ me->coordinates_x[i] = NULL;
+ }
+ if (me->coordinates_y[i]) {
+ sh_css_free(me->coordinates_y[i]);
+ me->coordinates_y[i] = NULL;
+ }
+ }
+
+ sh_css_free(me);
+ IA_CSS_LEAVE("void");
+}
+
+struct ia_css_morph_table *ia_css_morph_table_allocate(
+ unsigned int width,
+ unsigned int height)
+{
+ unsigned int i;
+ struct ia_css_morph_table *me;
+
+ IA_CSS_ENTER("");
+
+ me = sh_css_malloc(sizeof(*me));
+ if (!me) {
+ IA_CSS_ERROR("out of memory");
+ return me;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ me->coordinates_x[i] = NULL;
+ me->coordinates_y[i] = NULL;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ me->coordinates_x[i] =
+ sh_css_malloc(height * width *
+ sizeof(*me->coordinates_x[i]));
+ me->coordinates_y[i] =
+ sh_css_malloc(height * width *
+ sizeof(*me->coordinates_y[i]));
+
+ if ((!me->coordinates_x[i]) ||
+ (!me->coordinates_y[i])) {
+ ia_css_morph_table_free(me);
+ me = NULL;
+ return me;
+ }
+ }
+ me->width = width;
+ me->height = height;
+ IA_CSS_LEAVE("");
+ return me;
+}
+
+static enum ia_css_err sh_css_params_default_morph_table(
+ struct ia_css_morph_table **table,
+ const struct ia_css_binary *binary)
+{
+ /* MW 2400 advanced requires different scaling */
+ unsigned int i, j, k, step, width, height;
+ short start_x[IA_CSS_MORPH_TABLE_NUM_PLANES] = { -8, 0, -8, 0, 0, -8 },
+ start_y[IA_CSS_MORPH_TABLE_NUM_PLANES] = { 0, 0, -8, -8, -8, 0 };
+ struct ia_css_morph_table *tab;
+
+ assert(table);
+ assert(binary);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ step = (ISP_VEC_NELEMS / 16) * 128,
+ width = binary->morph_tbl_width,
+ height = binary->morph_tbl_height;
+
+ tab = ia_css_morph_table_allocate(width, height);
+ if (!tab)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ short val_y = start_y[i];
+
+ for (j = 0; j < height; j++) {
+ short val_x = start_x[i];
+ unsigned short *x_ptr, *y_ptr;
+
+ x_ptr = &tab->coordinates_x[i][j * width];
+ y_ptr = &tab->coordinates_y[i][j * width];
+ for (k = 0; k < width;
+ k++, x_ptr++, y_ptr++, val_x += (short)step) {
+ if (k == 0)
+ *x_ptr = 0;
+ else if (k == width - 1)
+ *x_ptr = val_x + 2 * start_x[i];
+ else
+ *x_ptr = val_x;
+ if (j == 0)
+ *y_ptr = 0;
+ else
+ *y_ptr = val_y;
+ }
+ val_y += (short)step;
+ }
+ }
+ *table = tab;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+
+ return IA_CSS_SUCCESS;
+}
+
+static void
+sh_css_set_morph_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_morph_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ if (table->enable == false)
+ table = NULL;
+ params->morph_table = table;
+ params->morph_table_changed = true;
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+ia_css_translate_3a_statistics(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics_map *isp_stats)
+{
+ IA_CSS_ENTER("");
+ if (host_stats->grid.use_dmem) {
+ IA_CSS_LOG("3A: DMEM");
+ ia_css_s3a_dmem_decode(host_stats, isp_stats->dmem_stats);
+ } else {
+ IA_CSS_LOG("3A: VMEM");
+ ia_css_s3a_vmem_decode(host_stats, isp_stats->vmem_stats_hi,
+ isp_stats->vmem_stats_lo);
+ }
+#if !defined(HAS_NO_HMEM)
+ IA_CSS_LOG("3A: HMEM");
+ ia_css_s3a_hmem_decode(host_stats, isp_stats->hmem_stats);
+#endif
+
+ IA_CSS_LEAVE("void");
+}
+
+void
+ia_css_isp_3a_statistics_map_free(struct ia_css_isp_3a_statistics_map *me)
+{
+ if (me) {
+ if (me->data_allocated) {
+ sh_css_free(me->data_ptr);
+ me->data_ptr = NULL;
+ me->data_allocated = false;
+ }
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_isp_3a_statistics_map *
+ia_css_isp_3a_statistics_map_allocate(
+ const struct ia_css_isp_3a_statistics *isp_stats,
+ void *data_ptr)
+{
+ struct ia_css_isp_3a_statistics_map *me;
+ /* Windows compiler does not like adding sizes to a void *
+ * so we use a local char * instead. */
+ char *base_ptr;
+
+ me = sh_css_malloc(sizeof(*me));
+ if (!me) {
+ IA_CSS_LEAVE("cannot allocate memory");
+ goto err;
+ }
+
+ me->data_ptr = data_ptr;
+ me->data_allocated = !data_ptr;
+ if (!data_ptr) {
+ me->data_ptr = sh_css_malloc(isp_stats->size);
+ if (!me->data_ptr) {
+ IA_CSS_LEAVE("cannot allocate memory");
+ goto err;
+ }
+ }
+ base_ptr = me->data_ptr;
+
+ me->size = isp_stats->size;
+ /* GCC complains when we assign a char * to a void *, so these
+ * casts are necessary unfortunately. */
+ me->dmem_stats = (void *)base_ptr;
+ me->vmem_stats_hi = (void *)(base_ptr + isp_stats->dmem_size);
+ me->vmem_stats_lo = (void *)(base_ptr + isp_stats->dmem_size +
+ isp_stats->vmem_size);
+ me->hmem_stats = (void *)(base_ptr + isp_stats->dmem_size +
+ 2 * isp_stats->vmem_size);
+
+ IA_CSS_LEAVE("map=%p", me);
+ return me;
+
+err:
+ if (me)
+ sh_css_free(me);
+ return NULL;
+}
+
+enum ia_css_err
+ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics *isp_stats) {
+ struct ia_css_isp_3a_statistics_map *map;
+ enum ia_css_err ret = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
+
+ assert(host_stats);
+ assert(isp_stats);
+
+ map = ia_css_isp_3a_statistics_map_allocate(isp_stats, NULL);
+ if (map)
+ {
+ mmgr_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_3a_statistics(host_stats, map);
+ ia_css_isp_3a_statistics_map_free(map);
+ } else
+ {
+ IA_CSS_ERROR("out of memory");
+ ret = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+/* Parameter encoding is not yet orthogonal.
+ This function hnadles some of the exceptions.
+*/
+static void
+ia_css_set_param_exceptions(const struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ /* Copy also to DP. Should be done by the driver. */
+ params->dp_config.gr = params->wb_config.gr;
+ params->dp_config.r = params->wb_config.r;
+ params->dp_config.b = params->wb_config.b;
+ params->dp_config.gb = params->wb_config.gb;
+
+ if (atomisp_hw_is_isp2401) {
+ assert(pipe);
+ assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
+
+ if (pipe->mode < IA_CSS_PIPE_ID_NUM) {
+ params->pipe_dp_config[pipe->mode].gr = params->wb_config.gr;
+ params->pipe_dp_config[pipe->mode].r = params->wb_config.r;
+ params->pipe_dp_config[pipe->mode].b = params->wb_config.b;
+ params->pipe_dp_config[pipe->mode].gb = params->wb_config.gb;
+ }
+ }
+}
+
+/* ISP2401 */
+static void
+sh_css_set_dp_config(const struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ assert(pipe);
+ assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+ if (pipe->mode < IA_CSS_PIPE_ID_NUM) {
+ params->pipe_dp_config[pipe->mode] = *config;
+ params->pipe_dpc_config_changed[pipe->mode] = true;
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_dp_config(const struct ia_css_pipe *pipe,
+ const struct ia_css_isp_parameters *params,
+ struct ia_css_dp_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ assert(pipe);
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ *config = params->pipe_dp_config[pipe->mode];
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+ params->nr_config = *config;
+ params->yee_config.nr = *config;
+ params->config_changed[IA_CSS_NR_ID] = true;
+ params->config_changed[IA_CSS_YEE_ID] = true;
+ params->config_changed[IA_CSS_BNR_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_ee_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ee_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+ ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+
+ params->ee_config = *config;
+ params->yee_config.ee = *config;
+ params->config_changed[IA_CSS_YEE_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_ee_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ee_config *config)
+{
+ if (!config)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ assert(params);
+ *config = params->ee_config;
+
+ ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_6axis_config *dvs_config)
+{
+ if (!dvs_config)
+ return;
+ assert(params);
+ assert(pipe);
+ assert(dvs_config->height_y == dvs_config->height_uv);
+ assert((dvs_config->width_y - 1) == 2 * (dvs_config->width_uv - 1));
+ assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
+
+ IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config);
+
+ copy_dvs_6axis_table(params->pipe_dvs_6axis_config[pipe->mode], dvs_config);
+
+#if !defined(HAS_NO_DVS_6AXIS_CONFIG_UPDATE)
+ params->pipe_dvs_6axis_config_changed[pipe->mode] = true;
+#endif
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe,
+ const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_6axis_config *dvs_config)
+{
+ if (!dvs_config)
+ return;
+ assert(params);
+ assert(pipe);
+ assert(dvs_config->height_y == dvs_config->height_uv);
+ assert((dvs_config->width_y - 1) == 2 * dvs_config->width_uv - 1);
+
+ IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config);
+
+ if ((pipe->mode < IA_CSS_PIPE_ID_NUM) &&
+ (dvs_config->width_y == params->pipe_dvs_6axis_config[pipe->mode]->width_y) &&
+ (dvs_config->height_y == params->pipe_dvs_6axis_config[pipe->mode]->height_y) &&
+ (dvs_config->width_uv == params->pipe_dvs_6axis_config[pipe->mode]->width_uv) &&
+ (dvs_config->height_uv == params->pipe_dvs_6axis_config[pipe->mode]->height_uv)
+ &&
+ dvs_config->xcoords_y &&
+ dvs_config->ycoords_y &&
+ dvs_config->xcoords_uv &&
+ dvs_config->ycoords_uv) {
+ copy_dvs_6axis_table(dvs_config, params->pipe_dvs_6axis_config[pipe->mode]);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_baa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ params->bds_config = *config;
+ params->config_changed[IA_CSS_BDS_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_baa_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ *config = params->bds_config;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_dz_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dz_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("dx=%d, dy=%d", config->dx, config->dy);
+
+ assert(config->dx <= HRT_GDC_N);
+ assert(config->dy <= HRT_GDC_N);
+
+ params->dz_config = *config;
+ params->dz_config_changed = true;
+ /* JK: Why isp params changed?? */
+ params->isp_params_changed = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_dz_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dz_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ *config = params->dz_config;
+
+ IA_CSS_LEAVE_PRIVATE("dx=%d, dy=%d", config->dx, config->dy);
+}
+
+static void
+sh_css_set_motion_vector(struct ia_css_isp_parameters *params,
+ const struct ia_css_vector *motion)
+{
+ if (!motion)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("x=%d, y=%d", motion->x, motion->y);
+
+ params->motion_config = *motion;
+ /* JK: Why do isp params change? */
+ params->motion_config_changed = true;
+ params->isp_params_changed = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_motion_vector(const struct ia_css_isp_parameters *params,
+ struct ia_css_vector *motion)
+{
+ if (!motion)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("motion=%p", motion);
+
+ *motion = params->motion_config;
+
+ IA_CSS_LEAVE_PRIVATE("x=%d, y=%d", motion->x, motion->y);
+}
+
+struct ia_css_isp_config *
+sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe)
+{
+ if (!pipe) {
+ IA_CSS_ERROR("pipe=%p", NULL);
+ return NULL;
+ }
+ return pipe->config.p_isp_config;
+}
+
+enum ia_css_err
+ia_css_stream_set_isp_config(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config) {
+ return ia_css_stream_set_isp_config_on_pipe(stream, config, NULL);
+}
+
+enum ia_css_err
+ia_css_stream_set_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if ((!stream) || (!config))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ IA_CSS_ENTER("stream=%p, config=%p, pipe=%p", stream, config, pipe);
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ if (config->output_frame)
+ err = sh_css_set_per_frame_isp_config_on_pipe(stream, config, pipe);
+ else
+#endif
+ err = sh_css_set_global_isp_config_on_pipe(stream->pipes[0], config, pipe);
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config) {
+ struct ia_css_pipe *pipe_in = pipe;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("pipe=%p", pipe);
+
+ if ((!pipe) || (!pipe->stream))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "config=%p\n", config);
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ if (config->output_frame)
+ err = sh_css_set_per_frame_isp_config_on_pipe(pipe->stream, config, pipe);
+ else
+#endif
+ err = sh_css_set_global_isp_config_on_pipe(pipe, config, pipe_in);
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+static enum ia_css_err
+sh_css_set_global_isp_config_on_pipe(
+ struct ia_css_pipe *curr_pipe,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_err err1 = IA_CSS_SUCCESS;
+ enum ia_css_err err2 = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", curr_pipe, config, pipe);
+
+ err1 = sh_css_init_isp_params_from_config(curr_pipe, curr_pipe->stream->isp_params_configs, config, pipe);
+
+ /* Now commit all changes to the SP */
+ err2 = sh_css_param_update_isp_params(curr_pipe, curr_pipe->stream->isp_params_configs, sh_css_sp_is_running(), pipe);
+
+ /* The following code is intentional. The sh_css_init_isp_params_from_config interface
+ * throws an error when both DPC and BDS is enabled. The CSS API must pass this error
+ * information to the caller, ie. the host. We do not return this error immediately,
+ * but instead continue with updating the ISP params to enable testing of features
+ * which are currently in TR phase. */
+
+ err = (err1 != IA_CSS_SUCCESS) ? err1 : ((err2 != IA_CSS_SUCCESS) ? err2 : err);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+static enum ia_css_err
+sh_css_set_per_frame_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe) {
+ unsigned int i;
+ bool per_frame_config_created = false;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_err err1 = IA_CSS_SUCCESS;
+ enum ia_css_err err2 = IA_CSS_SUCCESS;
+ enum ia_css_err err3 = IA_CSS_SUCCESS;
+
+ struct sh_css_ddr_address_map *ddr_ptrs;
+ struct sh_css_ddr_address_map_size *ddr_ptrs_size;
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", stream, config, pipe);
+
+ if (!pipe)
+ {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto exit;
+ }
+
+ /* create per-frame ISP params object with default values
+ * from stream->isp_params_configs if one doesn't already exist
+ */
+ if (!stream->per_frame_isp_params_configs)
+ {
+ err = sh_css_create_isp_params(stream,
+ &stream->per_frame_isp_params_configs);
+ if (err != IA_CSS_SUCCESS)
+ goto exit;
+ per_frame_config_created = true;
+ }
+
+ params = stream->per_frame_isp_params_configs;
+
+ /* update new ISP params object with the new config */
+ if (!sh_css_init_isp_params_from_global(stream, params, false, pipe))
+ {
+ err1 = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ err2 = sh_css_init_isp_params_from_config(stream->pipes[0], params, config, pipe);
+
+ if (per_frame_config_created)
+ {
+ ddr_ptrs = &params->ddr_ptrs;
+ ddr_ptrs_size = &params->ddr_ptrs_size;
+ /* create per pipe reference to general ddr_ptrs */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ ref_sh_css_ddr_address_map(ddr_ptrs, &params->pipe_ddr_ptrs[i]);
+ params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size;
+ }
+ }
+
+ /* now commit to ddr */
+ err3 = sh_css_param_update_isp_params(stream->pipes[0], params, sh_css_sp_is_running(), pipe);
+
+ /* The following code is intentional. The sh_css_init_sp_params_from_config and
+ * sh_css_init_isp_params_from_config throws an error when both DPC and BDS is enabled.
+ * The CSS API must pass this error information to the caller, ie. the host.
+ * We do not return this error immediately, but instead continue with updating the ISP params
+ * to enable testing of features which are currently in TR phase. */
+ err = (err1 != IA_CSS_SUCCESS) ? err1 :
+ (err2 != IA_CSS_SUCCESS) ? err2 :
+ (err3 != IA_CSS_SUCCESS) ? err3 : err;
+exit:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+#endif
+
+static enum ia_css_err
+sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe_in) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool is_dp_10bpp = true;
+
+ assert(pipe);
+
+ IA_CSS_ENTER_PRIVATE("pipe=%p, config=%p, params=%p", pipe, config, params);
+
+ ia_css_set_configs(params, config);
+
+ sh_css_set_nr_config(params, config->nr_config);
+ sh_css_set_ee_config(params, config->ee_config);
+ sh_css_set_baa_config(params, config->baa_config);
+ if ((pipe->mode < IA_CSS_PIPE_ID_NUM) &&
+ (params->pipe_dvs_6axis_config[pipe->mode]))
+ sh_css_set_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
+ sh_css_set_dz_config(params, config->dz_config);
+ sh_css_set_motion_vector(params, config->motion_vector);
+ sh_css_update_shading_table_status(pipe_in, params);
+ sh_css_set_shading_table(pipe->stream, params, config->shading_table);
+ sh_css_set_morph_table(params, config->morph_table);
+ sh_css_set_macc_table(params, config->macc_table);
+ sh_css_set_gamma_table(params, config->gamma_table);
+ sh_css_set_ctc_table(params, config->ctc_table);
+ /* ------ deprecated(bz675) : from ------ */
+ sh_css_set_shading_settings(params, config->shading_settings);
+ /* ------ deprecated(bz675) : to ------ */
+
+ params->dis_coef_table_changed = (config->dvs_coefs);
+ params->dvs2_coef_table_changed = (config->dvs2_coefs);
+
+ params->output_frame = config->output_frame;
+ params->isp_parameters_id = config->isp_config_id;
+
+ /* Currently we do not offer CSS interface to set different
+ * configurations for DPC, i.e. depending on DPC being enabled
+ * before (NORM+OBC) or after. The folllowing code to set the
+ * DPC configuration should be updated when this interface is made
+ * available */
+ if (atomisp_hw_is_isp2401) {
+ sh_css_set_dp_config(pipe, params, config->dp_config);
+ ia_css_set_param_exceptions(pipe, params);
+ }
+
+ if (IA_CSS_SUCCESS ==
+ sh_css_select_dp_10bpp_config(pipe, &is_dp_10bpp))
+ {
+ /* return an error when both DPC and BDS is enabled by the
+ * user. */
+ /* we do not exit from this point immediately to allow internal
+ * firmware feature testing. */
+ if (is_dp_10bpp) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ } else
+ {
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto exit;
+ }
+
+ if (!atomisp_hw_is_isp2401)
+ ia_css_set_param_exceptions(pipe, params);
+
+exit:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+void
+ia_css_stream_get_isp_config(
+ const struct ia_css_stream *stream,
+ struct ia_css_isp_config *config)
+{
+ IA_CSS_ENTER("void");
+ ia_css_pipe_get_isp_config(stream->pipes[0], config);
+ IA_CSS_LEAVE("void");
+}
+
+void
+ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config)
+{
+ struct ia_css_isp_parameters *params = NULL;
+
+ assert(config);
+
+ IA_CSS_ENTER("config=%p", config);
+
+ params = pipe->stream->isp_params_configs;
+ assert(params);
+
+ ia_css_get_configs(params, config);
+
+ sh_css_get_ee_config(params, config->ee_config);
+ sh_css_get_baa_config(params, config->baa_config);
+ sh_css_get_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
+ sh_css_get_dp_config(pipe, params, config->dp_config);
+ sh_css_get_macc_table(params, config->macc_table);
+ sh_css_get_gamma_table(params, config->gamma_table);
+ sh_css_get_ctc_table(params, config->ctc_table);
+ sh_css_get_dz_config(params, config->dz_config);
+ sh_css_get_motion_vector(params, config->motion_vector);
+ /* ------ deprecated(bz675) : from ------ */
+ sh_css_get_shading_settings(params, config->shading_settings);
+ /* ------ deprecated(bz675) : to ------ */
+
+ config->output_frame = params->output_frame;
+ config->isp_config_id = params->isp_parameters_id;
+
+ IA_CSS_LEAVE("void");
+}
+
+/*
+ * coding style says the return of "mmgr_NULL" is the error signal
+ *
+ * Deprecated: Implement mmgr_realloc()
+ */
+static bool realloc_isp_css_mm_buf(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err,
+ uint16_t mmgr_attribute)
+{
+ s32 id;
+
+ *err = IA_CSS_SUCCESS;
+ /* Possible optimization: add a function sh_css_isp_css_mm_realloc()
+ * and implement on top of hmm. */
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (!force && *curr_size >= needed_size) {
+ IA_CSS_LEAVE_PRIVATE("false");
+ return false;
+ }
+ /* don't reallocate if single ref to buffer and same size */
+ if (*curr_size == needed_size && ia_css_refcount_is_single(*curr_buf)) {
+ IA_CSS_LEAVE_PRIVATE("false");
+ return false;
+ }
+
+ id = IA_CSS_REFCOUNT_PARAM_BUFFER;
+ ia_css_refcount_decrement(id, *curr_buf);
+ *curr_buf = ia_css_refcount_increment(id, mmgr_alloc_attr(needed_size,
+ mmgr_attribute));
+
+ if (!*curr_buf) {
+ *err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ *curr_size = 0;
+ } else {
+ *curr_size = needed_size;
+ }
+ IA_CSS_LEAVE_PRIVATE("true");
+ return true;
+}
+
+static bool reallocate_buffer(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err)
+{
+ bool ret;
+ u16 mmgr_attribute = MMGR_ATTRIBUTE_DEFAULT;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ret = realloc_isp_css_mm_buf(curr_buf,
+ curr_size, needed_size, force, err, mmgr_attribute);
+
+ IA_CSS_LEAVE_PRIVATE("ret=%d", ret);
+ return ret;
+}
+
+struct ia_css_isp_3a_statistics *
+ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
+{
+ struct ia_css_isp_3a_statistics *me;
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ assert(grid);
+
+ /* MW: Does "grid->enable" also control the histogram output ?? */
+ if (!grid->enable)
+ return NULL;
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ if (grid->use_dmem) {
+ me->dmem_size = sizeof(struct ia_css_3a_output) *
+ grid->aligned_width *
+ grid->aligned_height;
+ } else {
+ me->vmem_size = ISP_S3ATBL_HI_LO_STRIDE_BYTES *
+ grid->aligned_height;
+ }
+#if !defined(HAS_NO_HMEM)
+ me->hmem_size = sizeof_hmem(HMEM0_ID);
+#endif
+
+ /* All subsections need to be aligned to the system bus width */
+ me->dmem_size = CEIL_MUL(me->dmem_size, HIVE_ISP_DDR_WORD_BYTES);
+ me->vmem_size = CEIL_MUL(me->vmem_size, HIVE_ISP_DDR_WORD_BYTES);
+ me->hmem_size = CEIL_MUL(me->hmem_size, HIVE_ISP_DDR_WORD_BYTES);
+
+ me->size = me->dmem_size + me->vmem_size * 2 + me->hmem_size;
+ me->data_ptr = mmgr_malloc(me->size);
+ if (me->data_ptr == mmgr_NULL) {
+ sh_css_free(me);
+ me = NULL;
+ goto err;
+ }
+ if (me->dmem_size)
+ me->data.dmem.s3a_tbl = me->data_ptr;
+ if (me->vmem_size) {
+ me->data.vmem.s3a_tbl_hi = me->data_ptr + me->dmem_size;
+ me->data.vmem.s3a_tbl_lo = me->data_ptr + me->dmem_size + me->vmem_size;
+ }
+ if (me->hmem_size)
+ me->data_hmem.rgby_tbl = me->data_ptr + me->dmem_size + 2 * me->vmem_size;
+
+err:
+ IA_CSS_LEAVE("return=%p", me);
+ return me;
+}
+
+void
+ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me)
+{
+ if (me) {
+ hmm_free(me->data_ptr);
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void)
+{
+ return NULL;
+}
+
+struct ia_css_metadata *
+ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info)
+{
+ struct ia_css_metadata *md = NULL;
+
+ IA_CSS_ENTER("");
+
+ if (metadata_info->size == 0)
+ return NULL;
+
+ md = sh_css_malloc(sizeof(*md));
+ if (!md)
+ goto error;
+
+ md->info = *metadata_info;
+ md->exp_id = 0;
+ md->address = mmgr_malloc(metadata_info->size);
+ if (md->address == mmgr_NULL)
+ goto error;
+
+ IA_CSS_LEAVE("return=%p", md);
+ return md;
+
+error:
+ ia_css_metadata_free(md);
+ IA_CSS_LEAVE("return=%p", NULL);
+ return NULL;
+}
+
+void
+ia_css_metadata_free(struct ia_css_metadata *me)
+{
+ if (me) {
+ /* The enter and leave macros are placed inside
+ * the condition to avoid false logging of metadata
+ * free events when metadata is disabled.
+ * We found this to be confusing during development
+ * and debugging. */
+ IA_CSS_ENTER("me=%p", me);
+ hmm_free(me->address);
+ sh_css_free(me);
+ IA_CSS_LEAVE("void");
+ }
+}
+
+void
+ia_css_metadata_free_multiple(unsigned int num_bufs,
+ struct ia_css_metadata **bufs)
+{
+ unsigned int i;
+
+ if (bufs) {
+ for (i = 0; i < num_bufs; i++)
+ ia_css_metadata_free(bufs[i]);
+ }
+}
+
+static unsigned int g_param_buffer_dequeue_count;
+static unsigned int g_param_buffer_enqueue_count;
+
+enum ia_css_err
+ia_css_stream_isp_parameters_init(struct ia_css_stream *stream) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int i;
+ struct sh_css_ddr_address_map *ddr_ptrs;
+ struct sh_css_ddr_address_map_size *ddr_ptrs_size;
+ struct ia_css_isp_parameters *params;
+
+ assert(stream);
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (!stream)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* TMP: tracking of paramsets */
+ g_param_buffer_dequeue_count = 0;
+ g_param_buffer_enqueue_count = 0;
+
+ stream->per_frame_isp_params_configs = NULL;
+ err = sh_css_create_isp_params(stream,
+ &stream->isp_params_configs);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ params = stream->isp_params_configs;
+ if (!sh_css_init_isp_params_from_global(stream, params, true, NULL))
+ {
+ /* we do not return the error immediately to enable internal
+ * firmware feature testing */
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ddr_ptrs = &params->ddr_ptrs;
+ ddr_ptrs_size = &params->ddr_ptrs_size;
+
+ /* create per pipe reference to general ddr_ptrs */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ {
+ ref_sh_css_ddr_address_map(ddr_ptrs, &params->pipe_ddr_ptrs[i]);
+ params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size;
+ }
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void
+ia_css_set_sdis_config(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *dvs_coefs)
+{
+ ia_css_set_sdis_horicoef_config(params, dvs_coefs);
+ ia_css_set_sdis_vertcoef_config(params, dvs_coefs);
+ ia_css_set_sdis_horiproj_config(params, dvs_coefs);
+ ia_css_set_sdis_vertproj_config(params, dvs_coefs);
+}
+
+static void
+ia_css_set_sdis2_config(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *dvs2_coefs)
+{
+ ia_css_set_sdis2_horicoef_config(params, dvs2_coefs);
+ ia_css_set_sdis2_vertcoef_config(params, dvs2_coefs);
+ ia_css_set_sdis2_horiproj_config(params, dvs2_coefs);
+ ia_css_set_sdis2_vertproj_config(params, dvs2_coefs);
+}
+
+static enum ia_css_err
+sh_css_create_isp_params(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters **isp_params_out) {
+ bool succ = true;
+ unsigned int i;
+ struct sh_css_ddr_address_map *ddr_ptrs;
+ struct sh_css_ddr_address_map_size *ddr_ptrs_size;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ size_t params_size;
+ struct ia_css_isp_parameters *params =
+ sh_css_malloc(sizeof(struct ia_css_isp_parameters));
+
+ if (!params)
+ {
+ *isp_params_out = NULL;
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_ERROR("%s:%d error: cannot allocate memory", __FILE__, __LINE__);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ } else
+ {
+ memset(params, 0, sizeof(struct ia_css_isp_parameters));
+ }
+
+ ddr_ptrs = &params->ddr_ptrs;
+ ddr_ptrs_size = &params->ddr_ptrs_size;
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ {
+ memset(&params->pipe_ddr_ptrs[i], 0,
+ sizeof(params->pipe_ddr_ptrs[i]));
+ memset(&params->pipe_ddr_ptrs_size[i], 0,
+ sizeof(params->pipe_ddr_ptrs_size[i]));
+ }
+
+ memset(ddr_ptrs, 0, sizeof(*ddr_ptrs));
+ memset(ddr_ptrs_size, 0, sizeof(*ddr_ptrs_size));
+
+ params_size = sizeof(params->uds);
+ ddr_ptrs_size->isp_param = params_size;
+ ddr_ptrs->isp_param =
+ ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
+ mmgr_malloc(params_size));
+ succ &= (ddr_ptrs->isp_param != mmgr_NULL);
+
+ ddr_ptrs_size->macc_tbl = sizeof(struct ia_css_macc_table);
+ ddr_ptrs->macc_tbl =
+ ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
+ mmgr_malloc(sizeof(struct ia_css_macc_table)));
+ succ &= (ddr_ptrs->macc_tbl != mmgr_NULL);
+
+ *isp_params_out = params;
+ return err;
+}
+
+static bool
+sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ bool use_default_config,
+ struct ia_css_pipe *pipe_in)
+{
+ bool retval = true;
+ int i = 0;
+ bool is_dp_10bpp = true;
+ unsigned int isp_pipe_version = ia_css_pipe_get_isp_pipe_version(
+ stream->pipes[0]);
+ struct ia_css_isp_parameters *stream_params = stream->isp_params_configs;
+
+ if (!use_default_config && !stream_params) {
+ retval = false;
+ goto exit;
+ }
+
+ params->output_frame = NULL;
+ params->isp_parameters_id = 0;
+
+ if (use_default_config) {
+ ia_css_set_xnr3_config(params, &default_xnr3_config);
+
+ sh_css_set_nr_config(params, &default_nr_config);
+ sh_css_set_ee_config(params, &default_ee_config);
+ if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1)
+ sh_css_set_macc_table(params, &default_macc_table);
+ else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2)
+ sh_css_set_macc_table(params, &default_macc2_table);
+ sh_css_set_gamma_table(params, &default_gamma_table);
+ sh_css_set_ctc_table(params, &default_ctc_table);
+ sh_css_set_baa_config(params, &default_baa_config);
+ sh_css_set_dz_config(params, &default_dz_config);
+ /* ------ deprecated(bz675) : from ------ */
+ sh_css_set_shading_settings(params, &default_shading_settings);
+ /* ------ deprecated(bz675) : to ------ */
+
+ ia_css_set_s3a_config(params, &default_3a_config);
+ ia_css_set_wb_config(params, &default_wb_config);
+ ia_css_set_csc_config(params, &default_cc_config);
+ ia_css_set_tnr_config(params, &default_tnr_config);
+ ia_css_set_ob_config(params, &default_ob_config);
+ ia_css_set_dp_config(params, &default_dp_config);
+
+ if (!atomisp_hw_is_isp2401) {
+ ia_css_set_param_exceptions(pipe_in, params);
+ } else {
+ for (i = 0; i < stream->num_pipes; i++) {
+ if (sh_css_select_dp_10bpp_config(stream->pipes[i],
+ &is_dp_10bpp) == IA_CSS_SUCCESS) {
+ /* set the return value as false if both DPC and
+ * BDS is enabled by the user. But we do not return
+ * the value immediately to enable internal firmware
+ * feature testing. */
+ if (is_dp_10bpp) {
+ sh_css_set_dp_config(stream->pipes[i], params, &default_dp_10bpp_config);
+ } else {
+ sh_css_set_dp_config(stream->pipes[i], params, &default_dp_config);
+ }
+ } else {
+ retval = false;
+ goto exit;
+ }
+
+ ia_css_set_param_exceptions(stream->pipes[i], params);
+ }
+ }
+
+ ia_css_set_de_config(params, &default_de_config);
+ ia_css_set_gc_config(params, &default_gc_config);
+ ia_css_set_anr_config(params, &default_anr_config);
+ ia_css_set_anr2_config(params, &default_anr_thres);
+ ia_css_set_ce_config(params, &default_ce_config);
+ ia_css_set_xnr_table_config(params, &default_xnr_table);
+ ia_css_set_ecd_config(params, &default_ecd_config);
+ ia_css_set_ynr_config(params, &default_ynr_config);
+ ia_css_set_fc_config(params, &default_fc_config);
+ ia_css_set_cnr_config(params, &default_cnr_config);
+ ia_css_set_macc_config(params, &default_macc_config);
+ ia_css_set_ctc_config(params, &default_ctc_config);
+ ia_css_set_aa_config(params, &default_aa_config);
+ ia_css_set_r_gamma_config(params, &default_r_gamma_table);
+ ia_css_set_g_gamma_config(params, &default_g_gamma_table);
+ ia_css_set_b_gamma_config(params, &default_b_gamma_table);
+ ia_css_set_yuv2rgb_config(params, &default_yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, &default_rgb2yuv_cc_config);
+ ia_css_set_xnr_config(params, &default_xnr_config);
+ ia_css_set_sdis_config(params, &default_sdis_config);
+ ia_css_set_sdis2_config(params, &default_sdis2_config);
+ ia_css_set_formats_config(params, &default_formats_config);
+
+ params->fpn_config.data = NULL;
+ params->config_changed[IA_CSS_FPN_ID] = true;
+ params->fpn_config.enabled = 0;
+
+ params->motion_config = default_motion_config;
+ params->motion_config_changed = true;
+
+ params->morph_table = NULL;
+ params->morph_table_changed = true;
+
+ params->sc_table = NULL;
+ params->sc_table_changed = true;
+ params->sc_table_dirty = false;
+ params->sc_table_last_pipe_num = 0;
+
+ ia_css_sdis2_clear_coefficients(&params->dvs2_coefs);
+ params->dvs2_coef_table_changed = true;
+
+ ia_css_sdis_clear_coefficients(&params->dvs_coefs);
+ params->dis_coef_table_changed = true;
+ } else {
+ ia_css_set_xnr3_config(params, &stream_params->xnr3_config);
+
+ sh_css_set_nr_config(params, &stream_params->nr_config);
+ sh_css_set_ee_config(params, &stream_params->ee_config);
+ if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1)
+ sh_css_set_macc_table(params, &stream_params->macc_table);
+ else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2)
+ sh_css_set_macc_table(params, &stream_params->macc_table);
+ sh_css_set_gamma_table(params, &stream_params->gc_table);
+ sh_css_set_ctc_table(params, &stream_params->ctc_table);
+ sh_css_set_baa_config(params, &stream_params->bds_config);
+ sh_css_set_dz_config(params, &stream_params->dz_config);
+ /* ------ deprecated(bz675) : from ------ */
+ sh_css_set_shading_settings(params, &stream_params->shading_settings);
+ /* ------ deprecated(bz675) : to ------ */
+
+ ia_css_set_s3a_config(params, &stream_params->s3a_config);
+ ia_css_set_wb_config(params, &stream_params->wb_config);
+ ia_css_set_csc_config(params, &stream_params->cc_config);
+ ia_css_set_tnr_config(params, &stream_params->tnr_config);
+ ia_css_set_ob_config(params, &stream_params->ob_config);
+ ia_css_set_dp_config(params, &stream_params->dp_config);
+ ia_css_set_de_config(params, &stream_params->de_config);
+ ia_css_set_gc_config(params, &stream_params->gc_config);
+ ia_css_set_anr_config(params, &stream_params->anr_config);
+ ia_css_set_anr2_config(params, &stream_params->anr_thres);
+ ia_css_set_ce_config(params, &stream_params->ce_config);
+ ia_css_set_xnr_table_config(params, &stream_params->xnr_table);
+ ia_css_set_ecd_config(params, &stream_params->ecd_config);
+ ia_css_set_ynr_config(params, &stream_params->ynr_config);
+ ia_css_set_fc_config(params, &stream_params->fc_config);
+ ia_css_set_cnr_config(params, &stream_params->cnr_config);
+ ia_css_set_macc_config(params, &stream_params->macc_config);
+ ia_css_set_ctc_config(params, &stream_params->ctc_config);
+ ia_css_set_aa_config(params, &stream_params->aa_config);
+ ia_css_set_r_gamma_config(params, &stream_params->r_gamma_table);
+ ia_css_set_g_gamma_config(params, &stream_params->g_gamma_table);
+ ia_css_set_b_gamma_config(params, &stream_params->b_gamma_table);
+ ia_css_set_yuv2rgb_config(params, &stream_params->yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, &stream_params->rgb2yuv_cc_config);
+ ia_css_set_xnr_config(params, &stream_params->xnr_config);
+ ia_css_set_formats_config(params, &stream_params->formats_config);
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ if (IA_CSS_SUCCESS ==
+ sh_css_select_dp_10bpp_config(stream->pipes[i], &is_dp_10bpp)) {
+ /* set the return value as false if both DPC and
+ * BDS is enabled by the user. But we do not return
+ * the value immediately to enable internal firmware
+ * feature testing. */
+
+ if (is_dp_10bpp) {
+ retval = false;
+ /* FIXME: should it ignore this error? */
+ }
+ } else {
+ retval = false;
+ goto exit;
+ }
+ if (atomisp_hw_is_isp2401) {
+ if (stream->pipes[i]->mode < IA_CSS_PIPE_ID_NUM) {
+ sh_css_set_dp_config(stream->pipes[i], params,
+ &stream_params->pipe_dp_config[stream->pipes[i]->mode]);
+ ia_css_set_param_exceptions(stream->pipes[i], params);
+ } else {
+ retval = false;
+ goto exit;
+ }
+ }
+ }
+
+ if (!atomisp_hw_is_isp2401)
+ ia_css_set_param_exceptions(pipe_in, params);
+
+ params->fpn_config.data = stream_params->fpn_config.data;
+ params->config_changed[IA_CSS_FPN_ID] =
+ stream_params->config_changed[IA_CSS_FPN_ID];
+ params->fpn_config.enabled = stream_params->fpn_config.enabled;
+
+ sh_css_set_motion_vector(params, &stream_params->motion_config);
+ sh_css_set_morph_table(params, stream_params->morph_table);
+
+ if (stream_params->sc_table) {
+ sh_css_update_shading_table_status(pipe_in, params);
+ sh_css_set_shading_table(stream, params, stream_params->sc_table);
+ } else {
+ params->sc_table = NULL;
+ params->sc_table_changed = true;
+ params->sc_table_dirty = false;
+ params->sc_table_last_pipe_num = 0;
+ }
+
+ /* Only IA_CSS_PIPE_ID_VIDEO & IA_CSS_PIPE_ID_CAPTURE will support dvs_6axis_config*/
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (stream_params->pipe_dvs_6axis_config[i]) {
+ if (params->pipe_dvs_6axis_config[i]) {
+ copy_dvs_6axis_table(params->pipe_dvs_6axis_config[i],
+ stream_params->pipe_dvs_6axis_config[i]);
+ } else {
+ params->pipe_dvs_6axis_config[i] =
+ generate_dvs_6axis_table_from_config(stream_params->pipe_dvs_6axis_config[i]);
+ }
+ }
+ }
+ ia_css_set_sdis_config(params, &stream_params->dvs_coefs);
+ params->dis_coef_table_changed = stream_params->dis_coef_table_changed;
+
+ ia_css_set_sdis2_config(params, &stream_params->dvs2_coefs);
+ params->dvs2_coef_table_changed = stream_params->dvs2_coef_table_changed;
+ params->sensor_binning = stream_params->sensor_binning;
+ }
+
+exit:
+ return retval;
+}
+
+enum ia_css_err
+sh_css_params_init(void) {
+ int i, p;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* TMP: tracking of paramsets */
+ g_param_buffer_dequeue_count = 0;
+ g_param_buffer_enqueue_count = 0;
+
+ for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++)
+ {
+ for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
+ xmem_sp_stage_ptrs[p][i] =
+ ia_css_refcount_increment(-1,
+ mmgr_calloc(1,
+ sizeof(struct sh_css_sp_stage)));
+ xmem_isp_stage_ptrs[p][i] =
+ ia_css_refcount_increment(-1,
+ mmgr_calloc(1,
+ sizeof(struct sh_css_isp_stage)));
+
+ if ((xmem_sp_stage_ptrs[p][i] == mmgr_NULL) ||
+ (xmem_isp_stage_ptrs[p][i] == mmgr_NULL)) {
+ sh_css_params_uninit();
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ }
+ }
+
+ ia_css_config_gamma_table();
+ ia_css_config_ctc_table();
+ ia_css_config_rgb_gamma_tables();
+ ia_css_config_xnr_table();
+
+ sp_ddr_ptrs = ia_css_refcount_increment(-1, mmgr_calloc(1,
+ CEIL_MUL(sizeof(struct sh_css_ddr_address_map),
+ HIVE_ISP_DDR_WORD_BYTES)));
+ xmem_sp_group_ptrs = ia_css_refcount_increment(-1, mmgr_calloc(1,
+ sizeof(struct sh_css_sp_group)));
+
+ if ((sp_ddr_ptrs == mmgr_NULL) ||
+ (xmem_sp_group_ptrs == mmgr_NULL))
+ {
+ ia_css_uninit();
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static void host_lut_store(const void *lut)
+{
+ unsigned int i;
+
+ for (i = 0; i < N_GDC_ID; i++)
+ gdc_lut_store((gdc_ID_t)i, (const int (*)[HRT_GDC_N]) lut);
+}
+
+/* Note that allocation is in ipu address space. */
+inline hrt_vaddress sh_css_params_alloc_gdc_lut(void)
+{
+ return mmgr_malloc(sizeof(zoom_table));
+}
+
+inline void sh_css_params_free_gdc_lut(hrt_vaddress addr)
+{
+ if (addr != mmgr_NULL)
+ hmm_free(addr);
+}
+
+enum ia_css_err ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
+ const void *lut)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool stream_started = false;
+
+ IA_CSS_ENTER("pipe=%p lut=%p", pipe, lut);
+
+ if (!lut || !pipe) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE("err=%d", err);
+ return err;
+ }
+
+ /* If the pipe belongs to a stream and the stream has started, it is not
+ * safe to store lut to gdc HW. If pipe->stream is NULL, then no stream is
+ * created with this pipe, so it is safe to do this operation as long as
+ * ia_css_init() has been called. */
+ if (pipe->stream && pipe->stream->started) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "unable to set scaler lut since stream has started\n");
+ stream_started = true;
+ err = IA_CSS_ERR_NOT_SUPPORTED;
+ }
+
+ /* Free any existing tables. */
+ sh_css_params_free_gdc_lut(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+
+ if (!stream_started) {
+ if (!atomisp_hw_is_isp2401)
+ pipe->scaler_pp_lut = mmgr_malloc(sizeof(zoom_table));
+ else
+ pipe->scaler_pp_lut = sh_css_params_alloc_gdc_lut();
+
+ if (pipe->scaler_pp_lut == mmgr_NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "unable to allocate scaler_pp_lut\n");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ } else {
+ gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])lut,
+ interleaved_lut_temp);
+ mmgr_store(pipe->scaler_pp_lut,
+ (int *)interleaved_lut_temp,
+ sizeof(zoom_table));
+ }
+ }
+
+ IA_CSS_LEAVE("lut(%u) err=%d", pipe->scaler_pp_lut, err);
+ return err;
+}
+
+/* if pipe is NULL, returns default lut addr. */
+hrt_vaddress sh_css_pipe_get_pp_gdc_lut(const struct ia_css_pipe *pipe)
+{
+ assert(pipe);
+
+ if (pipe->scaler_pp_lut != mmgr_NULL)
+ return pipe->scaler_pp_lut;
+ else
+ return sh_css_params_get_default_gdc_lut();
+}
+
+enum ia_css_err sh_css_params_map_and_store_default_gdc_lut(void)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* Is table already mapped? Nothing to do if it is mapped. */
+ if (default_gdc_lut != mmgr_NULL)
+ return err;
+
+ host_lut_store((void *)zoom_table);
+
+ if (!atomisp_hw_is_isp2401)
+ default_gdc_lut = mmgr_malloc(sizeof(zoom_table));
+ else
+ default_gdc_lut = sh_css_params_alloc_gdc_lut();
+
+ if (default_gdc_lut == mmgr_NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])zoom_table,
+ interleaved_lut_temp);
+ mmgr_store(default_gdc_lut, (int *)interleaved_lut_temp,
+ sizeof(zoom_table));
+
+ IA_CSS_LEAVE_PRIVATE("lut(%u) err=%d", default_gdc_lut, err);
+ return err;
+}
+
+void sh_css_params_free_default_gdc_lut(void)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ sh_css_params_free_gdc_lut(default_gdc_lut);
+ default_gdc_lut = mmgr_NULL;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+hrt_vaddress sh_css_params_get_default_gdc_lut(void)
+{
+ return default_gdc_lut;
+}
+
+static void free_param_set_callback(
+ hrt_vaddress ptr)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ free_ia_css_isp_parameter_set_info(ptr);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void free_buffer_callback(
+ hrt_vaddress ptr)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ hmm_free(ptr);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_param_clear_param_sets(void)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/*
+ * MW: we can define hmm_free() to return a NULL
+ * then you can write ptr = hmm_free(ptr);
+ */
+#define safe_free(id, x) \
+ do { \
+ ia_css_refcount_decrement(id, x); \
+ (x) = mmgr_NULL; \
+ } while (0)
+
+static void free_map(struct sh_css_ddr_address_map *map)
+{
+ unsigned int i;
+
+ hrt_vaddress *addrs = (hrt_vaddress *)map;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* free buffers */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
+ sizeof(size_t)); i++) {
+ if (addrs[i] == mmgr_NULL)
+ continue;
+ safe_free(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+ia_css_stream_isp_parameters_uninit(struct ia_css_stream *stream)
+{
+ int i;
+ struct ia_css_isp_parameters *params = stream->isp_params_configs;
+ struct ia_css_isp_parameters *per_frame_params =
+ stream->per_frame_isp_params_configs;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ if (!params) {
+ IA_CSS_LEAVE_PRIVATE("isp_param_configs is NULL");
+ return;
+ }
+
+ /* free existing ddr_ptr maps */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ free_map(&params->pipe_ddr_ptrs[i]);
+ if (per_frame_params)
+ free_map(&per_frame_params->pipe_ddr_ptrs[i]);
+ /* Free up theDVS table memory blocks before recomputing new table */
+ if (params->pipe_dvs_6axis_config[i])
+ free_dvs_6axis_table(&params->pipe_dvs_6axis_config[i]);
+ if (per_frame_params && per_frame_params->pipe_dvs_6axis_config[i])
+ free_dvs_6axis_table(&per_frame_params->pipe_dvs_6axis_config[i]);
+ }
+ free_map(&params->ddr_ptrs);
+ if (per_frame_params)
+ free_map(&per_frame_params->ddr_ptrs);
+
+ if (params->fpn_config.data) {
+ sh_css_free(params->fpn_config.data);
+ params->fpn_config.data = NULL;
+ }
+
+ /* Free up sc_config (temporal shading table) if it is allocated. */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ if (per_frame_params) {
+ if (per_frame_params->sc_config) {
+ ia_css_shading_table_free(per_frame_params->sc_config);
+ per_frame_params->sc_config = NULL;
+ }
+ }
+
+ sh_css_free(params);
+ if (per_frame_params)
+ sh_css_free(per_frame_params);
+ stream->isp_params_configs = NULL;
+ stream->per_frame_isp_params_configs = NULL;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_params_uninit(void)
+{
+ unsigned int p, i;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ia_css_refcount_decrement(-1, sp_ddr_ptrs);
+ sp_ddr_ptrs = mmgr_NULL;
+ ia_css_refcount_decrement(-1, xmem_sp_group_ptrs);
+ xmem_sp_group_ptrs = mmgr_NULL;
+
+ for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++)
+ for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
+ ia_css_refcount_decrement(-1, xmem_sp_stage_ptrs[p][i]);
+ xmem_sp_stage_ptrs[p][i] = mmgr_NULL;
+ ia_css_refcount_decrement(-1, xmem_isp_stage_ptrs[p][i]);
+ xmem_isp_stage_ptrs[p][i] = mmgr_NULL;
+ }
+
+ /* go through the pools to clear references */
+ ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback);
+ ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_BUFFER, &free_buffer_callback);
+ ia_css_refcount_clear(-1, &free_buffer_callback);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static struct ia_css_host_data *
+convert_allocate_morph_plane(
+ unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int aligned_width)
+{
+ unsigned int i, j, padding, w;
+ struct ia_css_host_data *me;
+ unsigned int isp_data_size;
+ u16 *isp_data_ptr;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* currently we don't have morph table interpolation yet,
+ * so we allow a wider table to be used. This will be removed
+ * in the future. */
+ if (width > aligned_width) {
+ padding = 0;
+ w = aligned_width;
+ } else {
+ padding = aligned_width - width;
+ w = width;
+ }
+ isp_data_size = height * (w + padding) * sizeof(uint16_t);
+
+ me = ia_css_host_data_allocate((size_t)isp_data_size);
+
+ if (!me) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return NULL;
+ }
+
+ isp_data_ptr = (uint16_t *)me->address;
+
+ memset(isp_data_ptr, 0, (size_t)isp_data_size);
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < w; j++)
+ *isp_data_ptr++ = (uint16_t)data[j];
+ isp_data_ptr += padding;
+ data += width;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+ return me;
+}
+
+static enum ia_css_err
+store_morph_plane(
+ unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ hrt_vaddress dest,
+ unsigned int aligned_width) {
+ struct ia_css_host_data *isp_data;
+
+ assert(dest != mmgr_NULL);
+
+ isp_data = convert_allocate_morph_plane(data, width, height, aligned_width);
+ if (!isp_data)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ ia_css_params_store_ia_css_host_data(dest, isp_data);
+
+ ia_css_host_data_free(isp_data);
+ return IA_CSS_SUCCESS;
+}
+
+static void sh_css_update_isp_params_to_ddr(
+ struct ia_css_isp_parameters *params,
+ hrt_vaddress ddr_ptr)
+{
+ size_t size = sizeof(params->uds);
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(params);
+
+ mmgr_store(ddr_ptr, &params->uds, size);
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void sh_css_update_isp_mem_params_to_ddr(
+ const struct ia_css_binary *binary,
+ hrt_vaddress ddr_mem_ptr,
+ size_t size,
+ enum ia_css_isp_memories mem)
+{
+ const struct ia_css_host_data *params;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ params = ia_css_isp_param_get_mem_init(&binary->mem_params,
+ IA_CSS_PARAM_CLASS_PARAM, mem);
+ mmgr_store(ddr_mem_ptr, params->address, size);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void ia_css_dequeue_param_buffers(/*unsigned int pipe_num*/ void)
+{
+ unsigned int i;
+ hrt_vaddress cpy;
+ enum sh_css_queue_id param_queue_ids[3] = { IA_CSS_PARAMETER_SET_QUEUE_ID,
+ IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID,
+ SH_CSS_INVALID_QUEUE_ID
+ };
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LEAVE_PRIVATE("sp is not running");
+ /* SP is not running. The queues are not valid */
+ return;
+ }
+
+ for (i = 0; SH_CSS_INVALID_QUEUE_ID != param_queue_ids[i]; i++) {
+ cpy = (hrt_vaddress)0;
+ /* clean-up old copy */
+ while (ia_css_bufq_dequeue_buffer(param_queue_ids[i],
+ (uint32_t *)&cpy) == IA_CSS_SUCCESS) {
+ /* TMP: keep track of dequeued param set count
+ */
+ g_param_buffer_dequeue_count++;
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED,
+ 0,
+ param_queue_ids[i],
+ 0);
+
+ IA_CSS_LOG("dequeued param set %x from %d, release ref", cpy, 0);
+ free_ia_css_isp_parameter_set_info(cpy);
+ cpy = (hrt_vaddress)0;
+ }
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+process_kernel_parameters(unsigned int pipe_id,
+ struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params,
+ unsigned int isp_pipe_version,
+ unsigned int raw_bit_depth)
+{
+ unsigned int param_id;
+
+ (void)isp_pipe_version;
+ (void)raw_bit_depth;
+
+ sh_css_enable_pipeline(stage->binary);
+
+ if (params->config_changed[IA_CSS_OB_ID]) {
+ ia_css_ob_configure(&params->stream_configs.ob,
+ isp_pipe_version, raw_bit_depth);
+ }
+ if (params->config_changed[IA_CSS_S3A_ID]) {
+ ia_css_s3a_configure(raw_bit_depth);
+ }
+ /* Copy stage uds parameters to config, since they can differ per stage.
+ */
+ params->crop_config.crop_pos = params->uds[stage->stage_num].crop_pos;
+ params->uds_config.crop_pos = params->uds[stage->stage_num].crop_pos;
+ params->uds_config.uds = params->uds[stage->stage_num].uds;
+ /* Call parameter process functions for all kernels */
+ /* Skip SC, since that is called on a temp sc table */
+ for (param_id = 0; param_id < IA_CSS_NUM_PARAMETER_IDS; param_id++) {
+ if (param_id == IA_CSS_SC_ID) continue;
+ if (params->config_changed[param_id])
+ ia_css_kernel_process_param[param_id](pipe_id, stage, params);
+ }
+}
+
+enum ia_css_err
+sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
+ struct ia_css_isp_parameters *params,
+ bool commit,
+ struct ia_css_pipe *pipe_in) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ hrt_vaddress cpy;
+ int i;
+ unsigned int raw_bit_depth = 10;
+ unsigned int isp_pipe_version = SH_CSS_ISP_PIPE_VERSION_1;
+ bool acc_cluster_params_changed = false;
+ unsigned int thread_id, pipe_num;
+
+ (void)acc_cluster_params_changed;
+
+ assert(curr_pipe);
+
+ IA_CSS_ENTER_PRIVATE("pipe=%p, isp_parameters_id=%d", pipe_in, params->isp_parameters_id);
+ raw_bit_depth = ia_css_stream_input_format_bits_per_pixel(curr_pipe->stream);
+
+ /* now make the map available to the sp */
+ if (!commit)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* enqueue a copies of the mem_map to
+ the designated pipelines */
+ for (i = 0; i < curr_pipe->stream->num_pipes; i++)
+ {
+ struct ia_css_pipe *pipe;
+ struct sh_css_ddr_address_map *cur_map;
+ struct sh_css_ddr_address_map_size *cur_map_size;
+ struct ia_css_isp_parameter_set_info isp_params_info;
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+
+ enum sh_css_queue_id queue_id;
+
+ pipe = curr_pipe->stream->pipes[i];
+ pipeline = ia_css_pipe_get_pipeline(pipe);
+ pipe_num = ia_css_pipe_get_pipe_num(pipe);
+ isp_pipe_version = ia_css_pipe_get_isp_pipe_version(pipe);
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ ia_css_query_internal_queue_id(params->output_frame
+ ? IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET
+ : IA_CSS_BUFFER_TYPE_PARAMETER_SET,
+ thread_id, &queue_id);
+#else
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_PARAMETER_SET, thread_id,
+ &queue_id);
+#endif
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ break;
+ }
+ cur_map = &params->pipe_ddr_ptrs[pipeline->pipe_id];
+ cur_map_size = &params->pipe_ddr_ptrs_size[pipeline->pipe_id];
+
+ /* TODO: Normally, zoom and motion parameters shouldn't
+ * be part of "isp_params" as it is resolution/pipe dependent
+ * Therefore, move the zoom config elsewhere (e.g. shading
+ * table can be taken as an example! @GC
+ * */
+ {
+ /* we have to do this per pipeline because */
+ /* the processing is a.o. resolution dependent */
+ err = ia_css_process_zoom_and_motion(params,
+ pipeline->stages);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ /* check if to actually update the parameters for this pipe */
+ /* When API change is implemented making good distinction between
+ * stream config and pipe config this skipping code can be moved out of the #ifdef */
+ if (pipe_in && (pipe != pipe_in)) {
+ IA_CSS_LOG("skipping pipe %p", pipe);
+ continue;
+ }
+
+ /* BZ 125915, should be moved till after "update other buff" */
+ /* update the other buffers to the pipe specific copies */
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ unsigned int mem;
+
+ if (!stage || !stage->binary)
+ continue;
+
+ process_kernel_parameters(pipeline->pipe_id,
+ stage, params,
+ isp_pipe_version, raw_bit_depth);
+
+ err = sh_css_params_write_to_ddr_internal(
+ pipe,
+ pipeline->pipe_id,
+ params,
+ stage,
+ cur_map,
+ cur_map_size);
+
+ if (err != IA_CSS_SUCCESS)
+ break;
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ params->isp_mem_params_changed
+ [pipeline->pipe_id][stage->stage_num][mem] = false;
+ }
+ } /* for */
+ if (err != IA_CSS_SUCCESS)
+ break;
+ /* update isp_params to pipe specific copies */
+ if (params->isp_params_changed) {
+ reallocate_buffer(&cur_map->isp_param,
+ &cur_map_size->isp_param,
+ cur_map_size->isp_param,
+ true,
+ &err);
+ if (err != IA_CSS_SUCCESS)
+ break;
+ sh_css_update_isp_params_to_ddr(params, cur_map->isp_param);
+ }
+
+ /* last make referenced copy */
+ err = ref_sh_css_ddr_address_map(
+ cur_map,
+ &isp_params_info.mem_map);
+ if (err != IA_CSS_SUCCESS)
+ break;
+
+ /* Update Parameters ID */
+ isp_params_info.isp_parameters_id = params->isp_parameters_id;
+
+ /* Update output frame pointer */
+ isp_params_info.output_frame_ptr =
+ (params->output_frame) ? params->output_frame->data : mmgr_NULL;
+
+ /* now write the copy to ddr */
+ err = write_ia_css_isp_parameter_set_info_to_ddr(&isp_params_info, &cpy);
+ if (err != IA_CSS_SUCCESS)
+ break;
+
+ /* enqueue the set to sp */
+ IA_CSS_LOG("queue param set %x to %d", cpy, thread_id);
+
+ err = ia_css_bufq_enqueue_buffer(thread_id, queue_id, (uint32_t)cpy);
+ if (err != IA_CSS_SUCCESS) {
+ free_ia_css_isp_parameter_set_info(cpy);
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ IA_CSS_LOG("pfp: FAILED to add config id %d for OF %d to q %d on thread %d",
+ isp_params_info.isp_parameters_id,
+ isp_params_info.output_frame_ptr,
+ queue_id, thread_id);
+#endif
+ break;
+ } else {
+ /* TMP: check discrepancy between nr of enqueued
+ * parameter sets and dequeued sets
+ */
+ g_param_buffer_enqueue_count++;
+ assert(g_param_buffer_enqueue_count < g_param_buffer_dequeue_count + 50);
+ /*
+ * Tell the SP which queues are not empty,
+ * by sending the software event.
+ */
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED,
+ (uint8_t)thread_id,
+ (uint8_t)queue_id,
+ 0);
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ IA_CSS_LOG("pfp: added config id %d for OF %d to q %d on thread %d",
+ isp_params_info.isp_parameters_id,
+ isp_params_info.output_frame_ptr,
+ queue_id, thread_id);
+#endif
+ }
+ /* clean-up old copy */
+ ia_css_dequeue_param_buffers(/*pipe_num*/);
+ params->pipe_dvs_6axis_config_changed[pipeline->pipe_id] = false;
+ } /* end for each 'active' pipeline */
+ /* clear the changed flags after all params
+ for all pipelines have been updated */
+ params->isp_params_changed = false;
+ params->sc_table_changed = false;
+ params->dis_coef_table_changed = false;
+ params->dvs2_coef_table_changed = false;
+ params->morph_table_changed = false;
+ params->dz_config_changed = false;
+ params->motion_config_changed = false;
+ /* ------ deprecated(bz675) : from ------ */
+ params->shading_settings_changed = false;
+ /* ------ deprecated(bz675) : to ------ */
+
+ memset(&params->config_changed[0], 0, sizeof(params->config_changed));
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+sh_css_params_write_to_ddr_internal(
+ struct ia_css_pipe *pipe,
+ unsigned int pipe_id,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *stage,
+ struct sh_css_ddr_address_map *ddr_map,
+ struct sh_css_ddr_address_map_size *ddr_map_size) {
+ enum ia_css_err err;
+ const struct ia_css_binary *binary;
+
+ unsigned int stage_num;
+ unsigned int mem;
+ bool buff_realloced;
+
+ /* struct is > 128 bytes so it should not be on stack (see checkpatch) */
+ static struct ia_css_macc_table converted_macc_table;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(params);
+ assert(ddr_map);
+ assert(ddr_map_size);
+ assert(stage);
+
+ binary = stage->binary;
+ assert(binary);
+
+ stage_num = stage->stage_num;
+
+ if (binary->info->sp.enable.fpnr)
+ {
+ buff_realloced = reallocate_buffer(&ddr_map->fpn_tbl,
+ &ddr_map_size->fpn_tbl,
+ (size_t)(FPNTBL_BYTES(binary)),
+ params->config_changed[IA_CSS_FPN_ID],
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (params->config_changed[IA_CSS_FPN_ID] || buff_realloced) {
+ if (params->fpn_config.enabled) {
+ err = store_fpntbl(params, ddr_map->fpn_tbl);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ }
+ }
+
+ if (binary->info->sp.enable.sc)
+ {
+ u32 enable_conv;
+ size_t bytes;
+
+ if (!atomisp_hw_is_isp2401)
+ bytes = ISP2400_SCTBL_BYTES(binary);
+ else
+ bytes = ISP2401_SCTBL_BYTES(binary);
+
+ enable_conv = params->shading_settings.enable_shading_table_conversion;
+
+ buff_realloced = reallocate_buffer(&ddr_map->sc_tbl,
+ &ddr_map_size->sc_tbl,
+ bytes,
+ params->sc_table_changed,
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (params->shading_settings_changed ||
+ params->sc_table_changed || buff_realloced) {
+ if (enable_conv == 0) {
+ if (params->sc_table) {
+ /* store the shading table to ddr */
+ err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_table);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* set sc_config to isp */
+ params->sc_config = (struct ia_css_shading_table *)params->sc_table;
+ ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
+ params->sc_config = NULL;
+ } else {
+ /* generate the identical shading table */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ sh_css_params_shading_id_table_generate(&params->sc_config,
+ binary->sctbl_width_per_color,
+ binary->sctbl_height);
+ if (!params->sc_config) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ /* store the shading table to ddr */
+ err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* set sc_config to isp */
+ ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
+
+ /* free the shading table */
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ } else { /* legacy */
+ /* ------ deprecated(bz675) : from ------ */
+ /* shading table is full resolution, reduce */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ prepare_shading_table(
+ (const struct ia_css_shading_table *)params->sc_table,
+ params->sensor_binning,
+ &params->sc_config,
+ binary, pipe->required_bds_factor);
+ if (!params->sc_config) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ /* store the shading table to ddr */
+ err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* set sc_config to isp */
+ ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
+
+ /* free the shading table */
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ /* ------ deprecated(bz675) : to ------ */
+ }
+ }
+ }
+
+ /* DPC configuration is made pipe specific to allow flexibility in positioning of the
+ * DPC kernel. The code below sets the pipe specific configuration to
+ * individual binaries. */
+ if (atomisp_hw_is_isp2401 &&
+ params->pipe_dpc_config_changed[pipe_id] && binary->info->sp.enable.dpc)
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
+
+ if (size) {
+ ia_css_dp_encode((struct sh_css_isp_dp_params *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->pipe_dp_config[pipe_id], size);
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+ }
+ }
+
+ if (params->config_changed[IA_CSS_MACC_ID] && binary->info->sp.enable.macc)
+ {
+ unsigned int i, j, idx;
+ unsigned int idx_map[] = {
+ 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8
+ };
+
+ for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) {
+ idx = 4 * idx_map[i];
+ j = 4 * i;
+
+ if (binary->info->sp.pipeline.isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1) {
+ converted_macc_table.data[idx] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ converted_macc_table.data[idx + 1] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j + 1],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ converted_macc_table.data[idx + 2] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j + 2],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ converted_macc_table.data[idx + 3] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j + 3],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ } else if (binary->info->sp.pipeline.isp_pipe_version ==
+ SH_CSS_ISP_PIPE_VERSION_2_2) {
+ converted_macc_table.data[idx] =
+ params->macc_table.data[j];
+ converted_macc_table.data[idx + 1] =
+ params->macc_table.data[j + 1];
+ converted_macc_table.data[idx + 2] =
+ params->macc_table.data[j + 2];
+ converted_macc_table.data[idx + 3] =
+ params->macc_table.data[j + 3];
+ }
+ }
+ reallocate_buffer(&ddr_map->macc_tbl,
+ &ddr_map_size->macc_tbl,
+ ddr_map_size->macc_tbl,
+ true,
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ mmgr_store(ddr_map->macc_tbl,
+ converted_macc_table.data,
+ sizeof(converted_macc_table.data));
+ }
+
+ if (binary->info->sp.enable.dvs_6axis)
+ {
+ /* because UV is packed into the Y plane, calc total
+ * YYU size = /2 gives size of UV-only,
+ * total YYU size = UV-only * 3.
+ */
+ buff_realloced = reallocate_buffer(
+ &ddr_map->dvs_6axis_params_y,
+ &ddr_map_size->dvs_6axis_params_y,
+ (size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3),
+ params->pipe_dvs_6axis_config_changed[pipe_id],
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (params->pipe_dvs_6axis_config_changed[pipe_id] || buff_realloced) {
+ const struct ia_css_frame_info *dvs_in_frame_info;
+
+ if (stage->args.delay_frames[0]) {
+ /*When delay frames are present(as in case of video),
+ they are used for dvs. Configure DVS using those params*/
+ dvs_in_frame_info = &stage->args.delay_frames[0]->info;
+ } else {
+ /*Otherwise, use input frame to configure DVS*/
+ dvs_in_frame_info = &stage->args.in_frame->info;
+ }
+
+ /* Generate default DVS unity table on start up*/
+ if (!params->pipe_dvs_6axis_config[pipe_id]) {
+ struct ia_css_resolution dvs_offset = {0};
+
+ if (!atomisp_hw_is_isp2401) {
+ dvs_offset.width = (PIX_SHIFT_FILTER_RUN_IN_X + binary->dvs_envelope.width) / 2;
+ } else {
+ if (binary->dvs_envelope.width || binary->dvs_envelope.height) {
+ dvs_offset.width = (PIX_SHIFT_FILTER_RUN_IN_X + binary->dvs_envelope.width) / 2;
+ }
+ }
+ dvs_offset.height = (PIX_SHIFT_FILTER_RUN_IN_Y + binary->dvs_envelope.height) / 2;
+
+ params->pipe_dvs_6axis_config[pipe_id] =
+ generate_dvs_6axis_table(&binary->out_frame_info[0].res, &dvs_offset);
+ if (!params->pipe_dvs_6axis_config[pipe_id]) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ params->pipe_dvs_6axis_config_changed[pipe_id] = true;
+
+ store_dvs_6axis_config(params->pipe_dvs_6axis_config[pipe_id],
+ binary,
+ dvs_in_frame_info,
+ ddr_map->dvs_6axis_params_y);
+ params->isp_params_changed = true;
+ }
+ }
+ }
+
+ if (binary->info->sp.enable.ca_gdc)
+ {
+ unsigned int i;
+ hrt_vaddress *virt_addr_tetra_x[
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+ size_t *virt_size_tetra_x[
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+ hrt_vaddress *virt_addr_tetra_y[
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+ size_t *virt_size_tetra_y[
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+
+ virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
+ virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
+ virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
+ virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
+ virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
+ virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
+
+ virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
+ virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
+ virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
+ virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
+ virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
+ virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
+
+ virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
+ virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
+ virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
+ virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
+ virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
+ virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
+
+ virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
+ virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
+ virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
+ virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
+ virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
+ virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
+
+ buff_realloced = false;
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ buff_realloced |=
+ reallocate_buffer(virt_addr_tetra_x[i],
+ virt_size_tetra_x[i],
+ (size_t)
+ (MORPH_PLANE_BYTES(binary)),
+ params->morph_table_changed,
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ buff_realloced |=
+ reallocate_buffer(virt_addr_tetra_y[i],
+ virt_size_tetra_y[i],
+ (size_t)
+ (MORPH_PLANE_BYTES(binary)),
+ params->morph_table_changed,
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ if (params->morph_table_changed || buff_realloced) {
+ const struct ia_css_morph_table *table = params->morph_table;
+ struct ia_css_morph_table *id_table = NULL;
+
+ if ((table) &&
+ (table->width < binary->morph_tbl_width ||
+ table->height < binary->morph_tbl_height)) {
+ table = NULL;
+ }
+ if (!table) {
+ err = sh_css_params_default_morph_table(&id_table,
+ binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ table = id_table;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ store_morph_plane(table->coordinates_x[i],
+ table->width,
+ table->height,
+ *virt_addr_tetra_x[i],
+ binary->morph_tbl_aligned_width);
+ store_morph_plane(table->coordinates_y[i],
+ table->width,
+ table->height,
+ *virt_addr_tetra_y[i],
+ binary->morph_tbl_aligned_width);
+ }
+ if (id_table)
+ ia_css_morph_table_free(id_table);
+ }
+ }
+
+ /* After special cases like SC, FPN since they may change parameters */
+ for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++)
+ {
+ const struct ia_css_isp_data *isp_data =
+ ia_css_isp_param_get_isp_mem_init(&binary->info->sp.mem_initializers,
+ IA_CSS_PARAM_CLASS_PARAM, mem);
+ size_t size = isp_data->size;
+
+ if (!size) continue;
+ buff_realloced = reallocate_buffer(&ddr_map->isp_mem_param[stage_num][mem],
+ &ddr_map_size->isp_mem_param[stage_num][mem],
+ size,
+ params->isp_mem_params_changed[pipe_id][stage_num][mem],
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (params->isp_mem_params_changed[pipe_id][stage_num][mem] || buff_realloced) {
+ sh_css_update_isp_mem_params_to_ddr(binary,
+ ddr_map->isp_mem_param[stage_num][mem],
+ ddr_map_size->isp_mem_param[stage_num][mem], mem);
+ }
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+const struct ia_css_fpn_table *ia_css_get_fpn_table(struct ia_css_stream
+ *stream)
+{
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER_LEAVE("void");
+ assert(stream);
+
+ params = stream->isp_params_configs;
+
+ return &params->fpn_config;
+}
+
+struct ia_css_shading_table *ia_css_get_shading_table(struct ia_css_stream
+ *stream)
+{
+ struct ia_css_shading_table *table = NULL;
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER("void");
+
+ assert(stream);
+
+ params = stream->isp_params_configs;
+ if (!params)
+ return NULL;
+
+ if (params->shading_settings.enable_shading_table_conversion == 0) {
+ if (params->sc_table) {
+ table = (struct ia_css_shading_table *)params->sc_table;
+ } else {
+ const struct ia_css_binary *binary
+ = ia_css_stream_get_shading_correction_binary(stream);
+ if (binary) {
+ /* generate the identical shading table */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ sh_css_params_shading_id_table_generate(&params->sc_config,
+ binary->sctbl_width_per_color,
+ binary->sctbl_height);
+ table = params->sc_config;
+ /* The sc_config will be freed in the
+ * ia_css_stream_isp_parameters_uninit function. */
+ }
+ }
+ } else {
+ /* ------ deprecated(bz675) : from ------ */
+ const struct ia_css_binary *binary
+ = ia_css_stream_get_shading_correction_binary(stream);
+ struct ia_css_pipe *pipe;
+
+ /**********************************************************************/
+ /* following code is copied from function ia_css_stream_get_shading_correction_binary()
+ * to match with the binary */
+ pipe = stream->pipes[0];
+
+ if (stream->num_pipes == 2) {
+ assert(stream->pipes[1]);
+ if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
+ stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->pipes[1];
+ }
+ /**********************************************************************/
+ if (binary) {
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ prepare_shading_table(
+ (const struct ia_css_shading_table *)params->sc_table,
+ params->sensor_binning,
+ &params->sc_config,
+ binary, pipe->required_bds_factor);
+
+ table = params->sc_config;
+ /* The sc_config will be freed in the
+ * ia_css_stream_isp_parameters_uninit function. */
+ }
+ /* ------ deprecated(bz675) : to ------ */
+ }
+
+ IA_CSS_LEAVE("table=%p", table);
+
+ return table;
+}
+
+hrt_vaddress sh_css_store_sp_group_to_ddr(void)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("void");
+ mmgr_store(xmem_sp_group_ptrs,
+ &sh_css_sp_group,
+ sizeof(struct sh_css_sp_group));
+ return xmem_sp_group_ptrs;
+}
+
+hrt_vaddress sh_css_store_sp_stage_to_ddr(
+ unsigned int pipe,
+ unsigned int stage)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("void");
+ mmgr_store(xmem_sp_stage_ptrs[pipe][stage],
+ &sh_css_sp_stage,
+ sizeof(struct sh_css_sp_stage));
+ return xmem_sp_stage_ptrs[pipe][stage];
+}
+
+hrt_vaddress sh_css_store_isp_stage_to_ddr(
+ unsigned int pipe,
+ unsigned int stage)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("void");
+ mmgr_store(xmem_isp_stage_ptrs[pipe][stage],
+ &sh_css_isp_stage,
+ sizeof(struct sh_css_isp_stage));
+ return xmem_isp_stage_ptrs[pipe][stage];
+}
+
+static enum ia_css_err ref_sh_css_ddr_address_map(
+ struct sh_css_ddr_address_map *map,
+ struct sh_css_ddr_address_map *out)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int i;
+
+ /* we will use a union to copy things; overlaying an array
+ with the struct; that way adding fields in the struct
+ will keep things working, and we will not get type errors.
+ */
+ union {
+ struct sh_css_ddr_address_map *map;
+ hrt_vaddress *addrs;
+ } in_addrs, to_addrs;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(map);
+ assert(out);
+
+ in_addrs.map = map;
+ to_addrs.map = out;
+
+ assert(sizeof(struct sh_css_ddr_address_map_size) / sizeof(size_t) ==
+ sizeof(struct sh_css_ddr_address_map) / sizeof(hrt_vaddress));
+
+ /* copy map using size info */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
+ sizeof(size_t)); i++) {
+ if (in_addrs.addrs[i] == mmgr_NULL)
+ to_addrs.addrs[i] = mmgr_NULL;
+ else
+ to_addrs.addrs[i] = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
+ in_addrs.addrs[i]);
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err write_ia_css_isp_parameter_set_info_to_ddr(
+ struct ia_css_isp_parameter_set_info *me,
+ hrt_vaddress *out)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool succ;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(me);
+ assert(out);
+
+ *out = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_SET_POOL, mmgr_malloc(
+ sizeof(struct ia_css_isp_parameter_set_info)));
+ succ = (*out != mmgr_NULL);
+ if (succ)
+ mmgr_store(*out,
+ me, sizeof(struct ia_css_isp_parameter_set_info));
+ else
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+free_ia_css_isp_parameter_set_info(
+ hrt_vaddress ptr) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_isp_parameter_set_info isp_params_info;
+ unsigned int i;
+ hrt_vaddress *addrs = (hrt_vaddress *) &isp_params_info.mem_map;
+
+ IA_CSS_ENTER_PRIVATE("ptr = %u", ptr);
+
+ /* sanity check - ptr must be valid */
+ if (!ia_css_refcount_is_valid(ptr))
+ {
+ IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_SET_POOL(0x%x) invalid arg", __func__,
+ ptr);
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ mmgr_load(ptr, &isp_params_info.mem_map, sizeof(struct sh_css_ddr_address_map));
+ /* copy map using size info */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
+ sizeof(size_t)); i++)
+ {
+ if (addrs[i] == mmgr_NULL)
+ continue;
+
+ /* sanity check - ptr must be valid */
+ if (!ia_css_refcount_is_valid(addrs[i])) {
+ IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_BUFFER(0x%x) invalid arg", __func__,
+ ptr);
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ continue;
+ }
+
+ ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]);
+ }
+ ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_SET_POOL, ptr);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* Mark all parameters as changed to force recomputing the derived ISP parameters */
+void
+sh_css_invalidate_params(struct ia_css_stream *stream)
+{
+ struct ia_css_isp_parameters *params;
+ unsigned int i, j, mem;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(stream);
+
+ params = stream->isp_params_configs;
+ params->isp_params_changed = true;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ for (j = 0; j < SH_CSS_MAX_STAGES; j++) {
+ for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) {
+ params->isp_mem_params_changed[i][j][mem] = true;
+ }
+ }
+ }
+
+ memset(&params->config_changed[0], 1, sizeof(params->config_changed));
+ params->dis_coef_table_changed = true;
+ params->dvs2_coef_table_changed = true;
+ params->morph_table_changed = true;
+ params->sc_table_changed = true;
+ params->dz_config_changed = true;
+ params->motion_config_changed = true;
+
+ /*Free up theDVS table memory blocks before recomputing new table */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (params->pipe_dvs_6axis_config[i]) {
+ free_dvs_6axis_table(&params->pipe_dvs_6axis_config[i]);
+ params->pipe_dvs_6axis_config_changed[i] = true;
+ }
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_update_uds_and_crop_info(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+
+ bool enable_zoom)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(info);
+ assert(in_frame_info);
+ assert(out_frame_info);
+ assert(dvs_env);
+ assert(zoom);
+ assert(motion_vector);
+ assert(uds);
+ assert(sp_out_crop_pos);
+
+ uds->curr_dx = enable_zoom ? (uint16_t)zoom->dx : HRT_GDC_N;
+ uds->curr_dy = enable_zoom ? (uint16_t)zoom->dy : HRT_GDC_N;
+
+ if (info->enable.dvs_envelope) {
+ unsigned int crop_x = 0,
+ crop_y = 0,
+ uds_xc = 0,
+ uds_yc = 0,
+ env_width, env_height;
+ int half_env_x, half_env_y;
+ int motion_x = motion_vector->x;
+ int motion_y = motion_vector->y;
+ bool upscale_x = in_frame_info->res.width < out_frame_info->res.width;
+ bool upscale_y = in_frame_info->res.height < out_frame_info->res.height;
+
+ if (info->enable.uds && !info->enable.ds) {
+ /**
+ * we calculate with the envelope that we can actually
+ * use, the min dvs envelope is for the filter
+ * initialization.
+ */
+ env_width = dvs_env->width -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ env_height = dvs_env->height -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ half_env_x = env_width / 2;
+ half_env_y = env_height / 2;
+ /**
+ * for digital zoom, we use the dvs envelope and make
+ * sure that we don't include the 8 leftmost pixels or
+ * 8 topmost rows.
+ */
+ if (upscale_x) {
+ uds_xc = (in_frame_info->res.width
+ + env_width
+ + SH_CSS_MIN_DVS_ENVELOPE) / 2;
+ } else {
+ uds_xc = (out_frame_info->res.width
+ + env_width) / 2
+ + SH_CSS_MIN_DVS_ENVELOPE;
+ }
+ if (upscale_y) {
+ uds_yc = (in_frame_info->res.height
+ + env_height
+ + SH_CSS_MIN_DVS_ENVELOPE) / 2;
+ } else {
+ uds_yc = (out_frame_info->res.height
+ + env_height) / 2
+ + SH_CSS_MIN_DVS_ENVELOPE;
+ }
+ /* clip the motion vector to +/- half the envelope */
+ motion_x = clamp(motion_x, -half_env_x, half_env_x);
+ motion_y = clamp(motion_y, -half_env_y, half_env_y);
+ uds_xc += motion_x;
+ uds_yc += motion_y;
+ /* uds can be pipelined, remove top lines */
+ crop_y = 2;
+ } else if (info->enable.ds) {
+ env_width = dvs_env->width;
+ env_height = dvs_env->height;
+ half_env_x = env_width / 2;
+ half_env_y = env_height / 2;
+ /* clip the motion vector to +/- half the envelope */
+ motion_x = clamp(motion_x, -half_env_x, half_env_x);
+ motion_y = clamp(motion_y, -half_env_y, half_env_y);
+ /* for video with downscaling, the envelope is included
+ in the input resolution. */
+ uds_xc = in_frame_info->res.width / 2 + motion_x;
+ uds_yc = in_frame_info->res.height / 2 + motion_y;
+ crop_x = info->pipeline.left_cropping;
+ /* ds == 2 (yuv_ds) can be pipelined, remove top
+ lines */
+ if (info->enable.ds & 1)
+ crop_y = info->pipeline.top_cropping;
+ else
+ crop_y = 2;
+ } else {
+ /* video nodz: here we can only crop. We make sure we
+ crop at least the first 8x8 pixels away. */
+ env_width = dvs_env->width -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ env_height = dvs_env->height -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ half_env_x = env_width / 2;
+ half_env_y = env_height / 2;
+ motion_x = clamp(motion_x, -half_env_x, half_env_x);
+ motion_y = clamp(motion_y, -half_env_y, half_env_y);
+ crop_x = SH_CSS_MIN_DVS_ENVELOPE
+ + half_env_x + motion_x;
+ crop_y = SH_CSS_MIN_DVS_ENVELOPE
+ + half_env_y + motion_y;
+ }
+
+ /* Must enforce that the crop position is even */
+ crop_x = EVEN_FLOOR(crop_x);
+ crop_y = EVEN_FLOOR(crop_y);
+ uds_xc = EVEN_FLOOR(uds_xc);
+ uds_yc = EVEN_FLOOR(uds_yc);
+
+ uds->xc = (uint16_t)uds_xc;
+ uds->yc = (uint16_t)uds_yc;
+ sp_out_crop_pos->x = (uint16_t)crop_x;
+ sp_out_crop_pos->y = (uint16_t)crop_y;
+ } else {
+ /* for down scaling, we always use the center of the image */
+ uds->xc = (uint16_t)in_frame_info->res.width / 2;
+ uds->yc = (uint16_t)in_frame_info->res.height / 2;
+ sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping;
+ sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping;
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static enum ia_css_err
+sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+ struct ia_css_resolution pipe_in_res,
+ bool enable_zoom) {
+ unsigned int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ /* Note:
+ * Filter_Envelope = 0 for NND/LUT
+ * Filter_Envelope = 1 for BCI
+ * Filter_Envelope = 3 for BLI
+ * Currently, not considering this filter envelope because, In uds.sp.c is recalculating
+ * the dx/dy based on filter envelope and other information (ia_css_uds_sp_scale_params)
+ * Ideally, That should be done on host side not on sp side.
+ */
+ unsigned int filter_envelope = 0;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(info);
+ assert(in_frame_info);
+ assert(out_frame_info);
+ assert(dvs_env);
+ assert(zoom);
+ assert(motion_vector);
+ assert(uds);
+ assert(sp_out_crop_pos);
+ x0 = zoom->zoom_region.origin.x;
+ y0 = zoom->zoom_region.origin.y;
+ x1 = zoom->zoom_region.resolution.width + x0;
+ y1 = zoom->zoom_region.resolution.height + y0;
+
+ if ((x0 > x1) || (y0 > y1) || (x1 > pipe_in_res.width) || (y1 > pipe_in_res.height))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (!enable_zoom)
+ {
+ uds->curr_dx = HRT_GDC_N;
+ uds->curr_dy = HRT_GDC_N;
+ }
+
+ if (info->enable.dvs_envelope)
+ {
+ /* Zoom region is only supported by the UDS module on ISP
+ * 2 and higher. It is not supported in video mode on ISP 1 */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else
+ {
+ if (enable_zoom) {
+ /* A. Calculate dx/dy based on crop region using in_frame_info
+ * Scale the crop region if in_frame_info to the stage is not same as
+ * actual effective input of the pipeline
+ */
+ if (in_frame_info->res.width != pipe_in_res.width ||
+ in_frame_info->res.height != pipe_in_res.height) {
+ x0 = (x0 * in_frame_info->res.width) / (pipe_in_res.width);
+ y0 = (y0 * in_frame_info->res.height) / (pipe_in_res.height);
+ x1 = (x1 * in_frame_info->res.width) / (pipe_in_res.width);
+ y1 = (y1 * in_frame_info->res.height) / (pipe_in_res.height);
+ }
+ uds->curr_dx =
+ ((x1 - x0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.width;
+ uds->curr_dy =
+ ((y1 - y0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.height;
+
+ /* B. Calculate xc/yc based on crop region */
+ uds->xc = (uint16_t)x0 + (((x1) - (x0)) / 2);
+ uds->yc = (uint16_t)y0 + (((y1) - (y0)) / 2);
+ } else {
+ uds->xc = (uint16_t)in_frame_info->res.width / 2;
+ uds->yc = (uint16_t)in_frame_info->res.height / 2;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "uds->curr_dx=%d, uds->xc=%d, uds->yc=%d\n",
+ uds->curr_dx, uds->xc, uds->yc);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x0=%d, y0=%d, x1=%d, y1=%d\n",
+ x0, y0, x1, y1);
+ sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping;
+ sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping;
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+ return err;
+}
+
+struct ia_css_3a_statistics *
+ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
+{
+ struct ia_css_3a_statistics *me;
+ int grid_size;
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ assert(grid);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+ grid_size = grid->width * grid->height;
+ me->data = sh_css_malloc(grid_size * sizeof(*me->data));
+ if (!me->data)
+ goto err;
+#if !defined(HAS_NO_HMEM)
+ /* No weighted histogram, no structure, treat the histogram data as a byte dump in a byte array */
+ me->rgby_data = (struct ia_css_3a_rgby_output *)sh_css_malloc(sizeof_hmem(
+ HMEM0_ID));
+#else
+ me->rgby_data = NULL;
+#endif
+
+ IA_CSS_LEAVE("return=%p", me);
+ return me;
+err:
+ ia_css_3a_statistics_free(me);
+
+ IA_CSS_LEAVE("return=%p", NULL);
+ return NULL;
+}
+
+void
+ia_css_3a_statistics_free(struct ia_css_3a_statistics *me)
+{
+ if (me) {
+ sh_css_free(me->rgby_data);
+ sh_css_free(me->data);
+ memset(me, 0, sizeof(struct ia_css_3a_statistics));
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_dvs_statistics *
+ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs_statistics *me;
+
+ assert(grid);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+ me->hor_proj = sh_css_malloc(grid->height * IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->hor_proj));
+ if (!me->hor_proj)
+ goto err;
+
+ me->ver_proj = sh_css_malloc(grid->width * IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->ver_proj));
+ if (!me->ver_proj)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs_statistics_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me)
+{
+ if (me) {
+ sh_css_free(me->hor_proj);
+ sh_css_free(me->ver_proj);
+ memset(me, 0, sizeof(struct ia_css_dvs_statistics));
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_dvs_coefficients *
+ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs_coefficients *me;
+
+ assert(grid);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+
+ me->hor_coefs = sh_css_malloc(grid->num_hor_coefs *
+ IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->hor_coefs));
+ if (!me->hor_coefs)
+ goto err;
+
+ me->ver_coefs = sh_css_malloc(grid->num_ver_coefs *
+ IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->ver_coefs));
+ if (!me->ver_coefs)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs_coefficients_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me)
+{
+ if (me) {
+ sh_css_free(me->hor_coefs);
+ sh_css_free(me->ver_coefs);
+ memset(me, 0, sizeof(struct ia_css_dvs_coefficients));
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_dvs2_statistics *
+ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs2_statistics *me;
+
+ assert(grid);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+
+ me->hor_prod.odd_real = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->hor_prod.odd_real));
+ if (!me->hor_prod.odd_real)
+ goto err;
+
+ me->hor_prod.odd_imag = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->hor_prod.odd_imag));
+ if (!me->hor_prod.odd_imag)
+ goto err;
+
+ me->hor_prod.even_real = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->hor_prod.even_real));
+ if (!me->hor_prod.even_real)
+ goto err;
+
+ me->hor_prod.even_imag = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->hor_prod.even_imag));
+ if (!me->hor_prod.even_imag)
+ goto err;
+
+ me->ver_prod.odd_real = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->ver_prod.odd_real));
+ if (!me->ver_prod.odd_real)
+ goto err;
+
+ me->ver_prod.odd_imag = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->ver_prod.odd_imag));
+ if (!me->ver_prod.odd_imag)
+ goto err;
+
+ me->ver_prod.even_real = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->ver_prod.even_real));
+ if (!me->ver_prod.even_real)
+ goto err;
+
+ me->ver_prod.even_imag = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->ver_prod.even_imag));
+ if (!me->ver_prod.even_imag)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs2_statistics_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me)
+{
+ if (me) {
+ sh_css_free(me->hor_prod.odd_real);
+ sh_css_free(me->hor_prod.odd_imag);
+ sh_css_free(me->hor_prod.even_real);
+ sh_css_free(me->hor_prod.even_imag);
+ sh_css_free(me->ver_prod.odd_real);
+ sh_css_free(me->ver_prod.odd_imag);
+ sh_css_free(me->ver_prod.even_real);
+ sh_css_free(me->ver_prod.even_imag);
+ memset(me, 0, sizeof(struct ia_css_dvs2_statistics));
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_dvs2_coefficients *
+ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs2_coefficients *me;
+
+ assert(grid);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+
+ me->hor_coefs.odd_real = sh_css_malloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.odd_real));
+ if (!me->hor_coefs.odd_real)
+ goto err;
+
+ me->hor_coefs.odd_imag = sh_css_malloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.odd_imag));
+ if (!me->hor_coefs.odd_imag)
+ goto err;
+
+ me->hor_coefs.even_real = sh_css_malloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.even_real));
+ if (!me->hor_coefs.even_real)
+ goto err;
+
+ me->hor_coefs.even_imag = sh_css_malloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.even_imag));
+ if (!me->hor_coefs.even_imag)
+ goto err;
+
+ me->ver_coefs.odd_real = sh_css_malloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.odd_real));
+ if (!me->ver_coefs.odd_real)
+ goto err;
+
+ me->ver_coefs.odd_imag = sh_css_malloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.odd_imag));
+ if (!me->ver_coefs.odd_imag)
+ goto err;
+
+ me->ver_coefs.even_real = sh_css_malloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.even_real));
+ if (!me->ver_coefs.even_real)
+ goto err;
+
+ me->ver_coefs.even_imag = sh_css_malloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.even_imag));
+ if (!me->ver_coefs.even_imag)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs2_coefficients_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me)
+{
+ if (me) {
+ sh_css_free(me->hor_coefs.odd_real);
+ sh_css_free(me->hor_coefs.odd_imag);
+ sh_css_free(me->hor_coefs.even_real);
+ sh_css_free(me->hor_coefs.even_imag);
+ sh_css_free(me->ver_coefs.odd_real);
+ sh_css_free(me->ver_coefs.odd_imag);
+ sh_css_free(me->ver_coefs.even_real);
+ sh_css_free(me->ver_coefs.even_imag);
+ memset(me, 0, sizeof(struct ia_css_dvs2_coefficients));
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_dvs_6axis_config *
+ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream)
+{
+ struct ia_css_dvs_6axis_config *dvs_config = NULL;
+ struct ia_css_isp_parameters *params = NULL;
+ unsigned int width_y;
+ unsigned int height_y;
+ unsigned int width_uv;
+ unsigned int height_uv;
+
+ assert(stream);
+ params = stream->isp_params_configs;
+
+ /* Backward compatibility by default consider pipe as Video*/
+ if (!params || (params &&
+ !params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO])) {
+ goto err;
+ }
+
+ dvs_config = (struct ia_css_dvs_6axis_config *)sh_css_calloc(1,
+ sizeof(struct ia_css_dvs_6axis_config));
+ if (!dvs_config)
+ goto err;
+
+ dvs_config->width_y = width_y =
+ params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_y;
+ dvs_config->height_y = height_y =
+ params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_y;
+ dvs_config->width_uv = width_uv =
+ params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_uv;
+ dvs_config->height_uv = height_uv =
+ params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_uv;
+ IA_CSS_LOG("table Y: W %d H %d", width_y, height_y);
+ IA_CSS_LOG("table UV: W %d H %d", width_uv, height_uv);
+ dvs_config->xcoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(
+ uint32_t));
+ if (!dvs_config->xcoords_y)
+ goto err;
+
+ dvs_config->ycoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(
+ uint32_t));
+ if (!dvs_config->ycoords_y)
+ goto err;
+
+ dvs_config->xcoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv *
+ sizeof(uint32_t));
+ if (!dvs_config->xcoords_uv)
+ goto err;
+
+ dvs_config->ycoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv *
+ sizeof(uint32_t));
+ if (!dvs_config->ycoords_uv)
+ goto err;
+
+ return dvs_config;
+err:
+ ia_css_dvs2_6axis_config_free(dvs_config);
+ return NULL;
+}
+
+void
+ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config)
+{
+ if (dvs_6axis_config) {
+ sh_css_free(dvs_6axis_config->xcoords_y);
+ sh_css_free(dvs_6axis_config->ycoords_y);
+ sh_css_free(dvs_6axis_config->xcoords_uv);
+ sh_css_free(dvs_6axis_config->ycoords_uv);
+ memset(dvs_6axis_config, 0, sizeof(struct ia_css_dvs_6axis_config));
+ sh_css_free(dvs_6axis_config);
+ }
+}
+
+void
+ia_css_en_dz_capt_pipe(struct ia_css_stream *stream, bool enable)
+{
+ struct ia_css_pipe *pipe;
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+ enum ia_css_pipe_id pipe_id;
+ enum ia_css_err err;
+ int i;
+
+ if (!stream)
+ return;
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ pipe = stream->pipes[i];
+ pipeline = ia_css_pipe_get_pipeline(pipe);
+ pipe_id = pipeline->pipe_id;
+
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) {
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP,
+ &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = enable;
+ break;
+ }
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.h b/drivers/staging/media/atomisp/pci/sh_css_params.h
new file mode 100644
index 000000000000..96d503967fd1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.h
@@ -0,0 +1,188 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_PARAMS_H_
+#define _SH_CSS_PARAMS_H_
+
+/*! \file */
+
+/* Forward declaration to break mutual dependency */
+struct ia_css_isp_parameters;
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_legacy.h"
+
+#include "sh_css_defs.h" /* SH_CSS_MAX_STAGES */
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_params.h"
+#include "uds/uds_1.0/ia_css_uds_param.h"
+#include "crop/crop_1.0/ia_css_crop_types.h"
+
+#define PIX_SHIFT_FILTER_RUN_IN_X 12
+#define PIX_SHIFT_FILTER_RUN_IN_Y 12
+
+#include "ob/ob_1.0/ia_css_ob_param.h"
+/* Isp configurations per stream */
+struct sh_css_isp_param_configs {
+ /* OB (Optical Black) */
+ struct sh_css_isp_ob_stream_config ob;
+};
+
+/* Isp parameters per stream */
+struct ia_css_isp_parameters {
+ /* UDS */
+ struct sh_css_sp_uds_params uds[SH_CSS_MAX_STAGES];
+ struct sh_css_isp_param_configs stream_configs;
+ struct ia_css_fpn_table fpn_config;
+ struct ia_css_vector motion_config;
+ const struct ia_css_morph_table *morph_table;
+ const struct ia_css_shading_table *sc_table;
+ struct ia_css_shading_table *sc_config;
+ struct ia_css_macc_table macc_table;
+ struct ia_css_gamma_table gc_table;
+ struct ia_css_ctc_table ctc_table;
+ struct ia_css_xnr_table xnr_table;
+
+ struct ia_css_dz_config dz_config;
+ struct ia_css_3a_config s3a_config;
+ struct ia_css_wb_config wb_config;
+ struct ia_css_cc_config cc_config;
+ struct ia_css_cc_config yuv2rgb_cc_config;
+ struct ia_css_cc_config rgb2yuv_cc_config;
+ struct ia_css_tnr_config tnr_config;
+ struct ia_css_ob_config ob_config;
+ /*----- DPC configuration -----*/
+ /* The default DPC configuration is retained and currently set
+ * using the stream configuration. The code generated from genparams
+ * uses this configuration to set the DPC parameters per stage but this
+ * will be overwritten by the per pipe configuration */
+ struct ia_css_dp_config dp_config;
+ /* ------ pipe specific DPC configuration ------ */
+ /* Please note that this implementation is a temporary solution and
+ * should be replaced by CSS per pipe configuration when the support
+ * is ready (HSD 1303967698)*/
+ struct ia_css_dp_config pipe_dp_config[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_nr_config nr_config;
+ struct ia_css_ee_config ee_config;
+ struct ia_css_de_config de_config;
+ struct ia_css_gc_config gc_config;
+ struct ia_css_anr_config anr_config;
+ struct ia_css_ce_config ce_config;
+ struct ia_css_formats_config formats_config;
+ /* ---- deprecated: replaced with pipe_dvs_6axis_config---- */
+ struct ia_css_dvs_6axis_config *dvs_6axis_config;
+ struct ia_css_ecd_config ecd_config;
+ struct ia_css_ynr_config ynr_config;
+ struct ia_css_yee_config yee_config;
+ struct ia_css_fc_config fc_config;
+ struct ia_css_cnr_config cnr_config;
+ struct ia_css_macc_config macc_config;
+ struct ia_css_ctc_config ctc_config;
+ struct ia_css_aa_config aa_config;
+ struct ia_css_aa_config bds_config;
+ struct ia_css_aa_config raa_config;
+ struct ia_css_rgb_gamma_table r_gamma_table;
+ struct ia_css_rgb_gamma_table g_gamma_table;
+ struct ia_css_rgb_gamma_table b_gamma_table;
+ struct ia_css_anr_thres anr_thres;
+ struct ia_css_xnr_config xnr_config;
+ struct ia_css_xnr3_config xnr3_config;
+ struct ia_css_uds_config uds_config;
+ struct ia_css_crop_config crop_config;
+ struct ia_css_output_config output_config;
+ struct ia_css_dvs_6axis_config *pipe_dvs_6axis_config[IA_CSS_PIPE_ID_NUM];
+ /* ------ deprecated(bz675) : from ------ */
+ struct ia_css_shading_settings shading_settings;
+ /* ------ deprecated(bz675) : to ------ */
+ struct ia_css_dvs_coefficients dvs_coefs;
+ struct ia_css_dvs2_coefficients dvs2_coefs;
+
+ bool isp_params_changed;
+
+ bool isp_mem_params_changed
+ [IA_CSS_PIPE_ID_NUM][SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES];
+ bool dz_config_changed;
+ bool motion_config_changed;
+ bool dis_coef_table_changed;
+ bool dvs2_coef_table_changed;
+ bool morph_table_changed;
+ bool sc_table_changed;
+ bool sc_table_dirty;
+ unsigned int sc_table_last_pipe_num;
+ bool anr_thres_changed;
+ /* ---- deprecated: replaced with pipe_dvs_6axis_config_changed ---- */
+ bool dvs_6axis_config_changed;
+ /* ------ pipe specific DPC configuration ------ */
+ /* Please note that this implementation is a temporary solution and
+ * should be replaced by CSS per pipe configuration when the support
+ * is ready (HSD 1303967698) */
+ bool pipe_dpc_config_changed[IA_CSS_PIPE_ID_NUM];
+ /* ------ deprecated(bz675) : from ------ */
+ bool shading_settings_changed;
+ /* ------ deprecated(bz675) : to ------ */
+ bool pipe_dvs_6axis_config_changed[IA_CSS_PIPE_ID_NUM];
+
+ bool config_changed[IA_CSS_NUM_PARAMETER_IDS];
+
+ unsigned int sensor_binning;
+ /* local buffers, used to re-order the 3a statistics in vmem-format */
+ struct sh_css_ddr_address_map pipe_ddr_ptrs[IA_CSS_PIPE_ID_NUM];
+ struct sh_css_ddr_address_map_size pipe_ddr_ptrs_size[IA_CSS_PIPE_ID_NUM];
+ struct sh_css_ddr_address_map ddr_ptrs;
+ struct sh_css_ddr_address_map_size ddr_ptrs_size;
+ struct ia_css_frame
+ *output_frame; /** Output frame the config is to be applied to (optional) */
+ u32 isp_parameters_id; /** Unique ID to track which config was actually applied to a particular frame */
+};
+
+void
+ia_css_params_store_ia_css_host_data(
+ hrt_vaddress ddr_addr,
+ struct ia_css_host_data *data);
+
+enum ia_css_err
+ia_css_params_store_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ hrt_vaddress ddr_addr,
+ const struct ia_css_shading_table *shading_table);
+
+struct ia_css_host_data *
+ia_css_params_alloc_convert_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ const struct ia_css_shading_table *shading_table);
+
+struct ia_css_isp_config *
+sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe);
+
+/* ipu address allocation/free for gdc lut */
+hrt_vaddress
+sh_css_params_alloc_gdc_lut(void);
+void
+sh_css_params_free_gdc_lut(hrt_vaddress addr);
+
+enum ia_css_err
+sh_css_params_map_and_store_default_gdc_lut(void);
+
+void
+sh_css_params_free_default_gdc_lut(void);
+
+hrt_vaddress
+sh_css_params_get_default_gdc_lut(void);
+
+hrt_vaddress
+sh_css_pipe_get_pp_gdc_lut(const struct ia_css_pipe *pipe);
+
+#endif /* _SH_CSS_PARAMS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params_internal.h b/drivers/staging/media/atomisp/pci/sh_css_params_internal.h
new file mode 100644
index 000000000000..baca24532f9f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_params_internal.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_PARAMS_INTERNAL_H_
+#define _SH_CSS_PARAMS_INTERNAL_H_
+
+void
+sh_css_param_clear_param_sets(void);
+
+#endif /* _SH_CSS_PARAMS_INTERNAL_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_pipe.c b/drivers/staging/media/atomisp/pci/sh_css_pipe.c
new file mode 100644
index 000000000000..1f57ffad8921
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_pipe.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_pipe.h and ia_css_pipe_public.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_properties.c b/drivers/staging/media/atomisp/pci/sh_css_properties.c
new file mode 100644
index 000000000000..50f99c53c3d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_properties.c
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_properties.h"
+#include <assert_support.h>
+#include "ia_css_types.h"
+#include "gdc_device.h"
+
+void
+ia_css_get_properties(struct ia_css_properties *properties)
+{
+ assert(properties);
+#if defined(HAS_GDC_VERSION_2) || defined(HAS_GDC_VERSION_3)
+ /*
+ * MW: We don't want to store the coordinates
+ * full range in memory: Truncate
+ */
+ properties->gdc_coord_one = gdc_get_unity(GDC0_ID) / HRT_GDC_COORD_SCALE;
+#else
+#error "Unknown GDC version"
+#endif
+
+ properties->l1_base_is_index = true;
+
+#if defined(HAS_VAMEM_VERSION_1)
+ properties->vamem_type = IA_CSS_VAMEM_TYPE_1;
+#elif defined(HAS_VAMEM_VERSION_2)
+ properties->vamem_type = IA_CSS_VAMEM_TYPE_2;
+#else
+#error "Unknown VAMEM version"
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_shading.c b/drivers/staging/media/atomisp/pci/sh_css_shading.c
new file mode 100644
index 000000000000..2a2d0f4db44b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_shading.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_shading.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.c b/drivers/staging/media/atomisp/pci/sh_css_sp.c
new file mode 100644
index 000000000000..e574396ad0f4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.c
@@ -0,0 +1,1829 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "sh_css_sp.h"
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "input_formatter.h"
+#endif
+
+#include "dma.h" /* N_DMA_CHANNEL_ID */
+
+#include "ia_css_buffer.h"
+#include "ia_css_binary.h"
+#include "sh_css_hrt.h"
+#include "sh_css_defs.h"
+#include "sh_css_internal.h"
+#include "ia_css_control.h"
+#include "ia_css_debug.h"
+#include "ia_css_debug_pipe.h"
+#include "ia_css_event_public.h"
+#include "ia_css_mmu.h"
+#include "ia_css_stream.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_params.h"
+#include "sh_css_legacy.h"
+#include "ia_css_frame_comm.h"
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "ia_css_isys.h"
+#endif
+
+#include "gdc_device.h" /* HRT_GDC_N */
+
+/*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */
+
+#include "memory_access.h"
+
+#include "assert_support.h"
+#include "platform_support.h" /* hrt_sleep() */
+
+#include "sw_event_global.h" /* Event IDs.*/
+#include "ia_css_event.h"
+#include "mmu_device.h"
+#include "ia_css_spctrl.h"
+
+#ifndef offsetof
+#define offsetof(T, x) ((unsigned int)&(((T *)0)->x))
+#endif
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#define IA_CSS_INCLUDE_STATES
+#include "ia_css_isp_states.h"
+
+#include "isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h"
+
+struct sh_css_sp_group sh_css_sp_group;
+struct sh_css_sp_stage sh_css_sp_stage;
+struct sh_css_isp_stage sh_css_isp_stage;
+static struct sh_css_sp_output sh_css_sp_output;
+static struct sh_css_sp_per_frame_data per_frame_data;
+
+/* true if SP supports frame loop and host2sp_commands */
+/* For the moment there is only code that sets this bool to true */
+/* TODO: add code that sets this bool to false */
+static bool sp_running;
+
+static enum ia_css_err
+set_output_frame_buffer(const struct ia_css_frame *frame,
+ unsigned int idx);
+
+static void
+sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf,
+ const enum sh_css_queue_id queue_id,
+ const hrt_vaddress xmem_addr,
+ const enum ia_css_buffer_type buf_type);
+
+static void
+initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr);
+
+static void
+initialize_stage_frames(struct ia_css_frames_sp *frames);
+
+/* This data is stored every frame */
+void
+store_sp_group_data(void)
+{
+ per_frame_data.sp_group_addr = sh_css_store_sp_group_to_ddr();
+}
+
+static void
+copy_isp_stage_to_sp_stage(void)
+{
+ /* [WW07.5]type casting will cause potential issues */
+ sh_css_sp_stage.num_stripes = (uint8_t)
+ sh_css_isp_stage.binary_info.iterator.num_stripes;
+ sh_css_sp_stage.row_stripes_height = (uint16_t)
+ sh_css_isp_stage.binary_info.iterator.row_stripes_height;
+ sh_css_sp_stage.row_stripes_overlap_lines = (uint16_t)
+ sh_css_isp_stage.binary_info.iterator.row_stripes_overlap_lines;
+ sh_css_sp_stage.top_cropping = (uint16_t)
+ sh_css_isp_stage.binary_info.pipeline.top_cropping;
+ /* moved to sh_css_sp_init_stage
+ sh_css_sp_stage.enable.vf_output =
+ sh_css_isp_stage.binary_info.enable.vf_veceven ||
+ sh_css_isp_stage.binary_info.num_output_pins > 1;
+ */
+ sh_css_sp_stage.enable.sdis = sh_css_isp_stage.binary_info.enable.dis;
+ sh_css_sp_stage.enable.s3a = sh_css_isp_stage.binary_info.enable.s3a;
+}
+
+void
+store_sp_stage_data(enum ia_css_pipe_id id, unsigned int pipe_num,
+ unsigned int stage)
+{
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ copy_isp_stage_to_sp_stage();
+ if (id != IA_CSS_PIPE_ID_COPY)
+ sh_css_sp_stage.isp_stage_addr =
+ sh_css_store_isp_stage_to_ddr(pipe_num, stage);
+ sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] =
+ sh_css_store_sp_stage_to_ddr(pipe_num, stage);
+
+ /* Clear for next frame */
+ sh_css_sp_stage.program_input_circuit = false;
+}
+
+static void
+store_sp_per_frame_data(const struct ia_css_fw_info *fw)
+{
+ unsigned int HIVE_ADDR_sp_per_frame_data = 0;
+
+ assert(fw);
+
+ switch (fw->type) {
+ case ia_css_sp_firmware:
+ HIVE_ADDR_sp_per_frame_data = fw->info.sp.per_frame_data;
+ break;
+ case ia_css_acc_firmware:
+ HIVE_ADDR_sp_per_frame_data = fw->info.acc.per_frame_data;
+ break;
+ case ia_css_isp_firmware:
+ return;
+ }
+
+ sp_dmem_store(SP0_ID,
+ (unsigned int)sp_address_of(sp_per_frame_data),
+ &per_frame_data,
+ sizeof(per_frame_data));
+}
+
+static void
+sh_css_store_sp_per_frame_data(enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ const struct ia_css_fw_info *sp_fw)
+{
+ if (!sp_fw)
+ sp_fw = &sh_css_sp_fw;
+
+ store_sp_stage_data(pipe_id, pipe_num, 0);
+ store_sp_group_data();
+ store_sp_per_frame_data(sp_fw);
+}
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+void
+sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
+ unsigned int i;
+ unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
+ debug) / sizeof(int);
+
+ assert(state);
+
+ (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
+ for (i = 0; i < sizeof(*state) / sizeof(int); i++)
+ ((unsigned *)state)[i] = load_sp_array_uint(sp_output, i + offset);
+}
+
+#endif
+
+void
+sh_css_sp_start_binary_copy(unsigned int pipe_num,
+ struct ia_css_frame *out_frame,
+ unsigned int two_ppc)
+{
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ struct sh_css_sp_pipeline *pipe;
+ u8 stage_num = 0;
+
+ assert(out_frame);
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ pipe = &sh_css_sp_group.pipe[thread_id];
+
+ pipe->copy.bin.bytes_available = out_frame->data_bytes;
+ pipe->num_stages = 1;
+ pipe->pipe_id = pipe_id;
+ pipe->pipe_num = pipe_num;
+ pipe->thread_id = thread_id;
+ pipe->pipe_config = 0x0; /* No parameters */
+ pipe->pipe_qos_config = QOS_INVALID;
+
+ if (pipe->inout_port_config == 0) {
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ }
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe->pipe_id, pipe->inout_port_config);
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
+#else
+ (void)two_ppc;
+#endif
+
+ sh_css_sp_stage.num = stage_num;
+ sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
+ sh_css_sp_stage.func =
+ (unsigned int)IA_CSS_PIPELINE_BIN_COPY;
+
+ set_output_frame_buffer(out_frame, 0);
+
+ /* sp_bin_copy_init on the SP does not deal with dynamica/static yet */
+ /* For now always update the dynamic data from out frames. */
+ sh_css_store_sp_per_frame_data(pipe_id, pipe_num, &sh_css_sp_fw);
+}
+
+static void
+sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
+ unsigned int pipe_num,
+ unsigned int two_ppc,
+ unsigned int max_input_width,
+ enum sh_css_pipe_config_override pipe_conf_override,
+ unsigned int if_config_index)
+{
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ u8 stage_num = 0;
+ struct sh_css_sp_pipeline *pipe;
+
+ assert(out_frame);
+
+ {
+ /*
+ * Clear sh_css_sp_stage for easy debugging.
+ * program_input_circuit must be saved as it is set outside
+ * this function.
+ */
+ u8 program_input_circuit;
+
+ program_input_circuit = sh_css_sp_stage.program_input_circuit;
+ memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
+ sh_css_sp_stage.program_input_circuit = program_input_circuit;
+ }
+
+ pipe_id = IA_CSS_PIPE_ID_COPY;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ pipe = &sh_css_sp_group.pipe[thread_id];
+
+ pipe->copy.raw.height = out_frame->info.res.height;
+ pipe->copy.raw.width = out_frame->info.res.width;
+ pipe->copy.raw.padded_width = out_frame->info.padded_width;
+ pipe->copy.raw.raw_bit_depth = out_frame->info.raw_bit_depth;
+ pipe->copy.raw.max_input_width = max_input_width;
+ pipe->num_stages = 1;
+ pipe->pipe_id = pipe_id;
+ /* TODO: next indicates from which queues parameters need to be
+ sampled, needs checking/improvement */
+ if (pipe_conf_override == SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD)
+ pipe->pipe_config =
+ (SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id);
+ else
+ pipe->pipe_config = pipe_conf_override;
+
+ pipe->pipe_qos_config = QOS_INVALID;
+
+ if (pipe->inout_port_config == 0) {
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ }
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe->pipe_id, pipe->inout_port_config);
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
+#else
+ (void)two_ppc;
+#endif
+
+ sh_css_sp_stage.num = stage_num;
+ sh_css_sp_stage.xmem_bin_addr = 0x0;
+ sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
+ sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_RAW_COPY;
+ sh_css_sp_stage.if_config_index = (uint8_t)if_config_index;
+ set_output_frame_buffer(out_frame, 0);
+
+ ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame);
+}
+
+static void
+sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
+ unsigned int pipe_num, unsigned int max_input_width,
+ unsigned int if_config_index)
+{
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ u8 stage_num = 0;
+ struct sh_css_sp_pipeline *pipe;
+#if defined SH_CSS_ENABLE_METADATA
+ enum sh_css_queue_id queue_id;
+#endif
+
+ assert(out_frame);
+
+ {
+ /*
+ * Clear sh_css_sp_stage for easy debugging.
+ * program_input_circuit must be saved as it is set outside
+ * this function.
+ */
+ u8 program_input_circuit;
+
+ program_input_circuit = sh_css_sp_stage.program_input_circuit;
+ memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
+ sh_css_sp_stage.program_input_circuit = program_input_circuit;
+ }
+
+ pipe_id = IA_CSS_PIPE_ID_COPY;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ pipe = &sh_css_sp_group.pipe[thread_id];
+
+ pipe->copy.raw.height = out_frame->info.res.height;
+ pipe->copy.raw.width = out_frame->info.res.width;
+ pipe->copy.raw.padded_width = out_frame->info.padded_width;
+ pipe->copy.raw.raw_bit_depth = out_frame->info.raw_bit_depth;
+ pipe->copy.raw.max_input_width = max_input_width;
+ pipe->num_stages = 1;
+ pipe->pipe_id = pipe_id;
+ pipe->pipe_config = 0x0; /* No parameters */
+ pipe->pipe_qos_config = QOS_INVALID;
+
+ initialize_stage_frames(&sh_css_sp_stage.frames);
+ sh_css_sp_stage.num = stage_num;
+ sh_css_sp_stage.xmem_bin_addr = 0x0;
+ sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
+ sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_ISYS_COPY;
+ sh_css_sp_stage.if_config_index = (uint8_t)if_config_index;
+
+ set_output_frame_buffer(out_frame, 0);
+
+#if defined SH_CSS_ENABLE_METADATA
+ if (pipe->metadata.height > 0) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id,
+ &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf,
+ queue_id, mmgr_EXCEPTION,
+ IA_CSS_BUFFER_TYPE_METADATA);
+ }
+#endif
+
+ ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame);
+}
+
+unsigned int
+sh_css_sp_get_binary_copy_size(void)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
+ unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
+ bin_copy_bytes_copied) / sizeof(int);
+ (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
+ return load_sp_array_uint(sp_output, offset);
+}
+
+unsigned int
+sh_css_sp_get_sw_interrupt_value(unsigned int irq)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
+ unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
+ sw_interrupt_value)
+ / sizeof(int);
+ (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
+ return load_sp_array_uint(sp_output, offset + irq);
+}
+
+static void
+sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf,
+ const enum sh_css_queue_id queue_id,
+ const hrt_vaddress xmem_addr,
+ const enum ia_css_buffer_type buf_type)
+{
+ assert(buf_type < IA_CSS_NUM_BUFFER_TYPE);
+ if (queue_id > SH_CSS_INVALID_QUEUE_ID) {
+ /*
+ * value >=0 indicates that function init_frame_pointers()
+ * should use the dynamic data address
+ */
+ assert(queue_id < SH_CSS_MAX_NUM_QUEUES);
+
+ /* Klocwork assumes assert can be disabled;
+ Since we can get there with any type, and it does not
+ know that frame_in->dynamic_data_index can only be set
+ for one of the types in the assert) it has to assume we
+ can get here for any type. however this could lead to an
+ out of bounds reference when indexing buf_type about 10
+ lines below. In order to satisfy KW an additional if
+ has been added. This one will always yield true.
+ */
+ if ((queue_id < SH_CSS_MAX_NUM_QUEUES)) {
+ dest_buf->buf_src.queue_id = queue_id;
+ }
+ } else {
+ assert(xmem_addr != mmgr_EXCEPTION);
+ dest_buf->buf_src.xmem_addr = xmem_addr;
+ }
+ dest_buf->buf_type = buf_type;
+}
+
+static void
+sh_css_copy_frame_to_spframe(struct ia_css_frame_sp *sp_frame_out,
+ const struct ia_css_frame *frame_in)
+{
+ assert(frame_in);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_copy_frame_to_spframe():\n");
+
+ sh_css_copy_buffer_attr_to_spbuffer(&sp_frame_out->buf_attr,
+ frame_in->dynamic_queue_id,
+ frame_in->data,
+ frame_in->buf_type);
+
+ ia_css_frame_info_to_frame_sp_info(&sp_frame_out->info, &frame_in->info);
+
+ switch (frame_in->info.format) {
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ sp_frame_out->planes.raw.offset = frame_in->planes.raw.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ sp_frame_out->planes.rgb.offset = frame_in->planes.rgb.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ sp_frame_out->planes.planar_rgb.r.offset =
+ frame_in->planes.planar_rgb.r.offset;
+ sp_frame_out->planes.planar_rgb.g.offset =
+ frame_in->planes.planar_rgb.g.offset;
+ sp_frame_out->planes.planar_rgb.b.offset =
+ frame_in->planes.planar_rgb.b.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ sp_frame_out->planes.yuyv.offset = frame_in->planes.yuyv.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_NV11:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ sp_frame_out->planes.nv.y.offset =
+ frame_in->planes.nv.y.offset;
+ sp_frame_out->planes.nv.uv.offset =
+ frame_in->planes.nv.uv.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ sp_frame_out->planes.yuv.y.offset =
+ frame_in->planes.yuv.y.offset;
+ sp_frame_out->planes.yuv.u.offset =
+ frame_in->planes.yuv.u.offset;
+ sp_frame_out->planes.yuv.v.offset =
+ frame_in->planes.yuv.v.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ sp_frame_out->planes.plane6.r.offset =
+ frame_in->planes.plane6.r.offset;
+ sp_frame_out->planes.plane6.r_at_b.offset =
+ frame_in->planes.plane6.r_at_b.offset;
+ sp_frame_out->planes.plane6.gr.offset =
+ frame_in->planes.plane6.gr.offset;
+ sp_frame_out->planes.plane6.gb.offset =
+ frame_in->planes.plane6.gb.offset;
+ sp_frame_out->planes.plane6.b.offset =
+ frame_in->planes.plane6.b.offset;
+ sp_frame_out->planes.plane6.b_at_r.offset =
+ frame_in->planes.plane6.b_at_r.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ sp_frame_out->planes.binary.data.offset =
+ frame_in->planes.binary.data.offset;
+ break;
+ default:
+ /* This should not happen, but in case it does,
+ * nullify the planes
+ */
+ memset(&sp_frame_out->planes, 0, sizeof(sp_frame_out->planes));
+ break;
+ }
+}
+
+static enum ia_css_err
+set_input_frame_buffer(const struct ia_css_frame *frame) {
+ if (!frame)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ switch (frame->info.format)
+ {
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10:
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.in, frame);
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+set_output_frame_buffer(const struct ia_css_frame *frame,
+ unsigned int idx) {
+ if (!frame)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ switch (frame->info.format)
+ {
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ case IA_CSS_FRAME_FORMAT_NV11:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out[idx], frame);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+set_view_finder_buffer(const struct ia_css_frame *frame) {
+ if (!frame)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ switch (frame->info.format)
+ {
+ /* the dual output pin */
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+
+ /* for vf_veceven */
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out_vf, frame);
+ return IA_CSS_SUCCESS;
+}
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+void sh_css_sp_set_if_configs(
+ const input_formatter_cfg_t *config_a,
+ const input_formatter_cfg_t *config_b,
+ const uint8_t if_config_index
+)
+{
+ assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
+ assert(config_a);
+
+ sh_css_sp_group.config.input_formatter.set[if_config_index].config_a =
+ *config_a;
+ sh_css_sp_group.config.input_formatter.a_changed = true;
+
+ if (config_b) {
+ sh_css_sp_group.config.input_formatter.set[if_config_index].config_b =
+ *config_b;
+ sh_css_sp_group.config.input_formatter.b_changed = true;
+ }
+
+ return;
+}
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+void
+sh_css_sp_program_input_circuit(int fmt_type,
+ int ch_id,
+ enum ia_css_input_mode input_mode)
+{
+ sh_css_sp_group.config.input_circuit.no_side_band = false;
+ sh_css_sp_group.config.input_circuit.fmt_type = fmt_type;
+ sh_css_sp_group.config.input_circuit.ch_id = ch_id;
+ sh_css_sp_group.config.input_circuit.input_mode = input_mode;
+ /*
+ * The SP group is only loaded at SP boot time and is read once
+ * change flags as "input_circuit_cfg_changed" must be reset on the SP
+ */
+ sh_css_sp_group.config.input_circuit_cfg_changed = true;
+ sh_css_sp_stage.program_input_circuit = true;
+}
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+void
+sh_css_sp_configure_sync_gen(int width, int height,
+ int hblank_cycles,
+ int vblank_cycles)
+{
+ sh_css_sp_group.config.sync_gen.width = width;
+ sh_css_sp_group.config.sync_gen.height = height;
+ sh_css_sp_group.config.sync_gen.hblank_cycles = hblank_cycles;
+ sh_css_sp_group.config.sync_gen.vblank_cycles = vblank_cycles;
+}
+
+void
+sh_css_sp_configure_tpg(int x_mask,
+ int y_mask,
+ int x_delta,
+ int y_delta,
+ int xy_mask)
+{
+ sh_css_sp_group.config.tpg.x_mask = x_mask;
+ sh_css_sp_group.config.tpg.y_mask = y_mask;
+ sh_css_sp_group.config.tpg.x_delta = x_delta;
+ sh_css_sp_group.config.tpg.y_delta = y_delta;
+ sh_css_sp_group.config.tpg.xy_mask = xy_mask;
+}
+
+void
+sh_css_sp_configure_prbs(int seed)
+{
+ sh_css_sp_group.config.prbs.seed = seed;
+}
+#endif
+
+void
+sh_css_sp_configure_enable_raw_pool_locking(bool lock_all)
+{
+ sh_css_sp_group.config.enable_raw_pool_locking = true;
+ sh_css_sp_group.config.lock_all = lock_all;
+}
+
+void
+sh_css_sp_enable_isys_event_queue(bool enable)
+{
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ sh_css_sp_group.config.enable_isys_event_queue = enable;
+#else
+ (void)enable;
+#endif
+}
+
+void
+sh_css_sp_set_disable_continuous_viewfinder(bool flag)
+{
+ sh_css_sp_group.config.disable_cont_vf = flag;
+}
+
+static enum ia_css_err
+sh_css_sp_write_frame_pointers(const struct sh_css_binary_args *args) {
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ int i;
+
+ assert(args);
+
+ if (args->in_frame)
+ err = set_input_frame_buffer(args->in_frame);
+ if (err == IA_CSS_SUCCESS && args->out_vf_frame)
+ err = set_view_finder_buffer(args->out_vf_frame);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ {
+ if (err == IA_CSS_SUCCESS && args->out_frame[i])
+ err = set_output_frame_buffer(args->out_frame[i], i);
+ }
+
+ /* we don't pass this error back to the upper layer, so we add a assert here
+ because we actually hit the error here but it still works by accident... */
+ if (err != IA_CSS_SUCCESS) assert(false);
+ return err;
+}
+
+static void
+sh_css_sp_init_group(bool two_ppc,
+ enum atomisp_input_format input_format,
+ bool no_isp_sync,
+ uint8_t if_config_index)
+{
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc;
+#else
+ (void)two_ppc;
+#endif
+
+ sh_css_sp_group.config.no_isp_sync = (uint8_t)no_isp_sync;
+ /* decide whether the frame is processed online or offline */
+ if (if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED) return;
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
+ sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format =
+ input_format;
+#else
+ (void)input_format;
+#endif
+}
+
+void
+sh_css_stage_write_binary_info(struct ia_css_binary_info *info)
+{
+ assert(info);
+ sh_css_isp_stage.binary_info = *info;
+}
+
+static enum ia_css_err
+copy_isp_mem_if_to_ddr(struct ia_css_binary *binary) {
+ enum ia_css_err err;
+
+ err = ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ &binary->css_params,
+ &binary->mem_params,
+ IA_CSS_PARAM_CLASS_CONFIG);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ &binary->css_params,
+ &binary->mem_params,
+ IA_CSS_PARAM_CLASS_STATE);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ return IA_CSS_SUCCESS;
+}
+
+static bool
+is_sp_stage(struct ia_css_pipeline_stage *stage)
+{
+ assert(stage);
+ return stage->sp_func != IA_CSS_PIPELINE_NO_FUNC;
+}
+
+static enum ia_css_err
+configure_isp_from_args(
+ const struct sh_css_sp_pipeline *pipeline,
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args,
+ bool two_ppc,
+ bool deinterleaved) {
+ ia_css_fpn_configure(binary, &binary->in_frame_info);
+ ia_css_crop_configure(binary, &args->delay_frames[0]->info);
+ ia_css_qplane_configure(pipeline, binary, &binary->in_frame_info);
+ ia_css_output0_configure(binary, &args->out_frame[0]->info);
+ ia_css_output1_configure(binary, &args->out_vf_frame->info);
+ ia_css_copy_output_configure(binary, args->copy_output);
+ ia_css_output0_configure(binary, &args->out_frame[0]->info);
+#ifdef ISP2401
+ ia_css_sc_configure(binary, pipeline->shading.internal_frame_origin_x_bqs_on_sctbl,
+ pipeline->shading.internal_frame_origin_y_bqs_on_sctbl);
+#endif
+ ia_css_iterator_configure(binary, &args->in_frame->info);
+ ia_css_dvs_configure(binary, &args->out_frame[0]->info);
+ ia_css_output_configure(binary, &args->out_frame[0]->info);
+ ia_css_raw_configure(pipeline, binary, &args->in_frame->info, &binary->in_frame_info, two_ppc, deinterleaved);
+ ia_css_ref_configure(binary, (const struct ia_css_frame **)args->delay_frames, pipeline->dvs_frame_delay);
+ ia_css_tnr_configure(binary, (const struct ia_css_frame **)args->tnr_frames);
+ ia_css_bayer_io_config(binary, args);
+ return IA_CSS_SUCCESS;
+}
+
+static void
+initialize_isp_states(const struct ia_css_binary *binary)
+{
+ unsigned int i;
+
+ if (!binary->info->mem_offsets.offsets.state)
+ return;
+ for (i = 0; i < IA_CSS_NUM_STATE_IDS; i++) {
+ ia_css_kernel_init_state[i](binary);
+ }
+}
+
+static void
+initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr)
+{
+ buf_attr->buf_src.queue_id = SH_CSS_INVALID_QUEUE_ID;
+ buf_attr->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
+}
+
+static void
+initialize_stage_frames(struct ia_css_frames_sp *frames)
+{
+ unsigned int i;
+
+ initialize_frame_buffer_attribute(&frames->in.buf_attr);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ initialize_frame_buffer_attribute(&frames->out[i].buf_attr);
+ }
+ initialize_frame_buffer_attribute(&frames->out_vf.buf_attr);
+ initialize_frame_buffer_attribute(&frames->s3a_buf);
+ initialize_frame_buffer_attribute(&frames->dvs_buf);
+#if defined SH_CSS_ENABLE_METADATA
+ initialize_frame_buffer_attribute(&frames->metadata_buf);
+#endif
+}
+
+static enum ia_css_err
+sh_css_sp_init_stage(struct ia_css_binary *binary,
+ const char *binary_name,
+ const struct ia_css_blob_info *blob_info,
+ const struct sh_css_binary_args *args,
+ unsigned int pipe_num,
+ unsigned int stage,
+ bool xnr,
+ const struct ia_css_isp_param_css_segments *isp_mem_if,
+ unsigned int if_config_index,
+ bool two_ppc) {
+ const struct ia_css_binary_xinfo *xinfo;
+ const struct ia_css_binary_info *info;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ int i;
+ struct ia_css_pipe *pipe = NULL;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+ bool continuous = sh_css_continuous_is_enabled((uint8_t)pipe_num);
+
+ assert(binary);
+ assert(blob_info);
+ assert(args);
+ assert(isp_mem_if);
+
+ xinfo = binary->info;
+ info = &xinfo->sp;
+ {
+ /*
+ * Clear sh_css_sp_stage for easy debugging.
+ * program_input_circuit must be saved as it is set outside
+ * this function.
+ */
+ u8 program_input_circuit;
+
+ program_input_circuit = sh_css_sp_stage.program_input_circuit;
+ memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
+ sh_css_sp_stage.program_input_circuit = (uint8_t)program_input_circuit;
+ }
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+ if (!info)
+ {
+ sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] = mmgr_NULL;
+ return IA_CSS_SUCCESS;
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ (void)continuous;
+ sh_css_sp_stage.deinterleaved = 0;
+#else
+ sh_css_sp_stage.deinterleaved = ((stage == 0) && continuous);
+#endif
+
+ initialize_stage_frames(&sh_css_sp_stage.frames);
+ /*
+ * TODO: Make the Host dynamically determine
+ * the stage type.
+ */
+ sh_css_sp_stage.stage_type = SH_CSS_ISP_STAGE_TYPE;
+ sh_css_sp_stage.num = (uint8_t)stage;
+ sh_css_sp_stage.isp_online = (uint8_t)binary->online;
+ sh_css_sp_stage.isp_copy_vf = (uint8_t)args->copy_vf;
+ sh_css_sp_stage.isp_copy_output = (uint8_t)args->copy_output;
+ sh_css_sp_stage.enable.vf_output = (args->out_vf_frame != NULL);
+
+ /* Copy the frame infos first, to be overwritten by the frames,
+ if these are present.
+ */
+ sh_css_sp_stage.frames.effective_in_res.width = binary->effective_in_frame_res.width;
+ sh_css_sp_stage.frames.effective_in_res.height = binary->effective_in_frame_res.height;
+
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.in.info,
+ &binary->in_frame_info);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ {
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.out[i].info,
+ &binary->out_frame_info[i]);
+ }
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.internal_frame_info,
+ &binary->internal_frame_info);
+ sh_css_sp_stage.dvs_envelope.width = binary->dvs_envelope.width;
+ sh_css_sp_stage.dvs_envelope.height = binary->dvs_envelope.height;
+ sh_css_sp_stage.isp_pipe_version = (uint8_t)info->pipeline.isp_pipe_version;
+ sh_css_sp_stage.isp_deci_log_factor = (uint8_t)binary->deci_factor_log2;
+ sh_css_sp_stage.isp_vf_downscale_bits = (uint8_t)binary->vf_downscale_log2;
+
+ sh_css_sp_stage.if_config_index = (uint8_t)if_config_index;
+
+ sh_css_sp_stage.sp_enable_xnr = (uint8_t)xnr;
+ sh_css_sp_stage.xmem_bin_addr = xinfo->xmem_addr;
+ sh_css_sp_stage.xmem_map_addr = sh_css_params_ddr_address_map();
+ sh_css_isp_stage.blob_info = *blob_info;
+ sh_css_stage_write_binary_info((struct ia_css_binary_info *)info);
+
+ /* Make sure binary name is smaller than allowed string size */
+ assert(strlen(binary_name) < SH_CSS_MAX_BINARY_NAME - 1);
+ strncpy(sh_css_isp_stage.binary_name, binary_name, SH_CSS_MAX_BINARY_NAME - 1);
+ sh_css_isp_stage.binary_name[SH_CSS_MAX_BINARY_NAME - 1] = 0;
+ sh_css_isp_stage.mem_initializers = *isp_mem_if;
+
+ /*
+ * Even when a stage does not need uds and does not params,
+ * ia_css_uds_sp_scale_params() seems to be called (needs
+ * further investigation). This function can not deal with
+ * dx, dy = {0, 0}
+ */
+
+ err = sh_css_sp_write_frame_pointers(args);
+ /* TODO: move it to a better place */
+ if (binary->info->sp.enable.s3a)
+ {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_3A_STATISTICS, thread_id,
+ &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.s3a_buf, queue_id,
+ mmgr_EXCEPTION,
+ IA_CSS_BUFFER_TYPE_3A_STATISTICS);
+ }
+ if (binary->info->sp.enable.dis)
+ {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_DIS_STATISTICS, thread_id,
+ &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.dvs_buf, queue_id,
+ mmgr_EXCEPTION,
+ IA_CSS_BUFFER_TYPE_DIS_STATISTICS);
+ }
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id, &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_METADATA);
+#endif
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifndef ISP2401
+ if (args->in_frame)
+ {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (!pipe)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ ia_css_get_crop_offsets(pipe, &args->in_frame->info);
+ } else if (&binary->in_frame_info)
+ {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (!pipe)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+#else
+ if (stage == 0)
+ {
+ if (args->in_frame) {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (!pipe)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ ia_css_get_crop_offsets(pipe, &args->in_frame->info);
+ } else if (&binary->in_frame_info) {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (!pipe)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+ }
+#endif
+ }
+#else
+ (void)pipe; /*avoid build warning*/
+#endif
+
+ err = configure_isp_from_args(&sh_css_sp_group.pipe[thread_id],
+ binary, args, two_ppc, sh_css_sp_stage.deinterleaved);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ initialize_isp_states(binary);
+
+ /* we do this only for preview pipe because in fill_binary_info function
+ * we assign vf_out res to out res, but for ISP internal processing, we need
+ * the original out res. for video pipe, it has two output pins --- out and
+ * vf_out, so it can keep these two resolutions already. */
+ if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
+ (binary->vf_downscale_log2 > 0))
+ {
+ /* TODO: Remove this after preview output decimation is fixed
+ * by configuring out&vf info fiels properly */
+ sh_css_sp_stage.frames.out[0].info.padded_width
+ <<= binary->vf_downscale_log2;
+ sh_css_sp_stage.frames.out[0].info.res.width
+ <<= binary->vf_downscale_log2;
+ sh_css_sp_stage.frames.out[0].info.res.height
+ <<= binary->vf_downscale_log2;
+ }
+ err = copy_isp_mem_if_to_ddr(binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+sp_init_stage(struct ia_css_pipeline_stage *stage,
+ unsigned int pipe_num,
+ bool xnr,
+ unsigned int if_config_index,
+ bool two_ppc) {
+ struct ia_css_binary *binary;
+ const struct ia_css_fw_info *firmware;
+ const struct sh_css_binary_args *args;
+ unsigned int stage_num;
+ /*
+ * Initialiser required because of the "else" path below.
+ * Is this a valid path ?
+ */
+ const char *binary_name = "";
+ const struct ia_css_binary_xinfo *info = NULL;
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+ cause issues for drivers
+ */
+ static struct ia_css_binary tmp_binary;
+ const struct ia_css_blob_info *blob_info = NULL;
+ struct ia_css_isp_param_css_segments isp_mem_if;
+ /* LA: should be ia_css_data, should not contain host pointer.
+ However, CSS/DDR pointer is not available yet.
+ Hack is to store it in params->ddr_ptrs and then copy it late in the SP just before vmem init.
+ TODO: Call this after CSS/DDR allocation and store that pointer.
+ Best is to allocate it at stage creation time together with host pointer.
+ Remove vmem from params.
+ */
+ struct ia_css_isp_param_css_segments *mem_if = &isp_mem_if;
+
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(stage);
+
+ binary = stage->binary;
+ firmware = stage->firmware;
+ args = &stage->args;
+ stage_num = stage->stage_num;
+
+ if (binary)
+ {
+ info = binary->info;
+ binary_name = (const char *)(info->blob->name);
+ blob_info = &info->blob->header.blob;
+ ia_css_init_memory_interface(mem_if, &binary->mem_params, &binary->css_params);
+ } else if (firmware)
+ {
+ const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+
+ if (args->out_frame[0])
+ out_infos[0] = &args->out_frame[0]->info;
+ info = &firmware->info.isp;
+ ia_css_binary_fill_info(info, false, false,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ args->in_frame ? &args->in_frame->info : NULL,
+ NULL,
+ out_infos,
+ args->out_vf_frame ? &args->out_vf_frame->info
+ : NULL,
+ &tmp_binary,
+ NULL,
+ -1, true);
+ binary = &tmp_binary;
+ binary->info = info;
+ binary_name = IA_CSS_EXT_ISP_PROG_NAME(firmware);
+ blob_info = &firmware->blob;
+ mem_if = (struct ia_css_isp_param_css_segments *)&firmware->mem_initializers;
+ } else
+ {
+ /* SP stage */
+ assert(stage->sp_func != IA_CSS_PIPELINE_NO_FUNC);
+ /* binary and blob_info are now NULL.
+ These will be passed to sh_css_sp_init_stage
+ and dereferenced there, so passing a NULL
+ pointer is no good. return an error */
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ err = sh_css_sp_init_stage(binary,
+ (const char *)binary_name,
+ blob_info,
+ args,
+ pipe_num,
+ stage_num,
+ xnr,
+ mem_if,
+ if_config_index,
+ two_ppc);
+ return err;
+}
+
+static void
+sp_init_sp_stage(struct ia_css_pipeline_stage *stage,
+ unsigned int pipe_num,
+ bool two_ppc,
+ enum sh_css_pipe_config_override copy_ovrd,
+ unsigned int if_config_index)
+{
+ const struct sh_css_binary_args *args = &stage->args;
+
+ assert(stage);
+ switch (stage->sp_func) {
+ case IA_CSS_PIPELINE_RAW_COPY:
+ sh_css_sp_start_raw_copy(args->out_frame[0],
+ pipe_num, two_ppc,
+ stage->max_input_width,
+ copy_ovrd, if_config_index);
+ break;
+ case IA_CSS_PIPELINE_BIN_COPY:
+ assert(false); /* TBI */
+ case IA_CSS_PIPELINE_ISYS_COPY:
+ sh_css_sp_start_isys_copy(args->out_frame[0],
+ pipe_num, stage->max_input_width, if_config_index);
+ break;
+ case IA_CSS_PIPELINE_NO_FUNC:
+ assert(false);
+ }
+}
+
+void
+sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ enum ia_css_pipe_id id,
+ u8 pipe_num,
+ bool xnr,
+ bool two_ppc,
+ bool continuous,
+ bool offline,
+ unsigned int required_bds_factor,
+ enum sh_css_pipe_config_override copy_ovrd,
+ enum ia_css_input_mode input_mode,
+ const struct ia_css_metadata_config *md_config,
+ const struct ia_css_metadata_info *md_info,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ const enum mipi_port_id port_id,
+#endif
+ const struct ia_css_coordinate
+ *internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame
+ positioned on shading table at shading correction in ISP. */
+ const struct ia_css_isp_parameters *params
+ ) {
+ /* Get first stage */
+ struct ia_css_pipeline_stage *stage = NULL;
+ struct ia_css_binary *first_binary = NULL;
+ struct ia_css_pipe *pipe = NULL;
+ unsigned int num;
+
+ enum ia_css_pipe_id pipe_id = id;
+ unsigned int thread_id;
+ u8 if_config_index, tmp_if_config_index;
+
+ assert(me);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ assert(me->stages);
+
+ first_binary = me->stages->binary;
+
+ if (input_mode == IA_CSS_INPUT_MODE_SENSOR ||
+ input_mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ {
+ assert(port_id < N_MIPI_PORT_ID);
+ if (port_id >= N_MIPI_PORT_ID) /* should not happen but KW does not know */
+ return; /* we should be able to return an error */
+ if_config_index = (uint8_t)(port_id - MIPI_PORT0_ID);
+ } else if (input_mode == IA_CSS_INPUT_MODE_MEMORY)
+ {
+ if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+ } else
+ {
+ if_config_index = 0x0;
+ }
+#else
+ (void)input_mode;
+ if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+#endif
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));
+
+ /* Count stages */
+ for (stage = me->stages, num = 0; stage; stage = stage->next, num++)
+ {
+ stage->stage_num = num;
+ ia_css_debug_pipe_graph_dump_stage(stage, id);
+ }
+ me->num_stages = num;
+
+ if (first_binary)
+ {
+ /* Init pipeline data */
+ sh_css_sp_init_group(two_ppc, first_binary->input_format,
+ offline, if_config_index);
+ } /* if (first_binary != NULL) */
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401) || defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* Signal the host immediately after start for SP_ISYS_COPY only */
+ if ((me->num_stages == 1) && me->stages &&
+ (me->stages->sp_func == IA_CSS_PIPELINE_ISYS_COPY))
+ sh_css_sp_group.config.no_isp_sync = true;
+#endif
+
+ /* Init stage data */
+ sh_css_init_host2sp_frame_data();
+
+ sh_css_sp_group.pipe[thread_id].num_stages = 0;
+ sh_css_sp_group.pipe[thread_id].pipe_id = pipe_id;
+ sh_css_sp_group.pipe[thread_id].thread_id = thread_id;
+ sh_css_sp_group.pipe[thread_id].pipe_num = pipe_num;
+ sh_css_sp_group.pipe[thread_id].num_execs = me->num_execs;
+ sh_css_sp_group.pipe[thread_id].pipe_qos_config = me->pipe_qos_config;
+ sh_css_sp_group.pipe[thread_id].required_bds_factor = required_bds_factor;
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ sh_css_sp_group.pipe[thread_id].input_system_mode
+ = (uint32_t)input_mode;
+ sh_css_sp_group.pipe[thread_id].port_id = port_id;
+#endif
+ sh_css_sp_group.pipe[thread_id].dvs_frame_delay = (uint32_t)me->dvs_frame_delay;
+
+ /* TODO: next indicates from which queues parameters need to be
+ sampled, needs checking/improvement */
+ if (ia_css_pipeline_uses_params(me))
+ {
+ sh_css_sp_group.pipe[thread_id].pipe_config =
+ SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id;
+ }
+
+ /* For continuous use-cases, SP copy is responsible for sampling the
+ * parameters */
+ if (continuous)
+ sh_css_sp_group.pipe[thread_id].pipe_config = 0;
+
+ sh_css_sp_group.pipe[thread_id].inout_port_config = me->inout_port_config;
+
+ pipe = find_pipe_by_num(pipe_num);
+ assert(pipe);
+ if (!pipe)
+ {
+ return;
+ }
+ sh_css_sp_group.pipe[thread_id].scaler_pp_lut = sh_css_pipe_get_pp_gdc_lut(pipe);
+
+#if defined(SH_CSS_ENABLE_METADATA)
+ if (md_info && md_info->size > 0)
+ {
+ sh_css_sp_group.pipe[thread_id].metadata.width = md_info->resolution.width;
+ sh_css_sp_group.pipe[thread_id].metadata.height = md_info->resolution.height;
+ sh_css_sp_group.pipe[thread_id].metadata.stride = md_info->stride;
+ sh_css_sp_group.pipe[thread_id].metadata.size = md_info->size;
+ ia_css_isys_convert_stream_format_to_mipi_format(
+ md_config->data_type, MIPI_PREDICTOR_NONE,
+ &sh_css_sp_group.pipe[thread_id].metadata.format);
+ }
+#else
+ (void)md_config;
+ (void)md_info;
+#endif
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ sh_css_sp_group.pipe[thread_id].output_frame_queue_id = (uint32_t)SH_CSS_INVALID_QUEUE_ID;
+ if (pipe_id != IA_CSS_PIPE_ID_COPY)
+ {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id,
+ (enum sh_css_queue_id *)(
+ &sh_css_sp_group.pipe[thread_id].output_frame_queue_id));
+ }
+#endif
+
+ if (atomisp_hw_is_isp2401) {
+ /* For the shading correction type 1 (the legacy shading table conversion in css is not used),
+ * the parameters are passed to the isp for the shading table centering.
+ */
+ if (internal_frame_origin_bqs_on_sctbl &&
+ params && params->shading_settings.enable_shading_table_conversion == 0)
+ {
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl
+ = (uint32_t)internal_frame_origin_bqs_on_sctbl->x;
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl
+ = (uint32_t)internal_frame_origin_bqs_on_sctbl->y;
+ } else
+ {
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl =
+ 0;
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl =
+ 0;
+ }
+ }
+
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe_id, sh_css_sp_group.pipe[thread_id].inout_port_config);
+
+ for (stage = me->stages, num = 0; stage; stage = stage->next, num++)
+ {
+ sh_css_sp_group.pipe[thread_id].num_stages++;
+ if (is_sp_stage(stage)) {
+ sp_init_sp_stage(stage, pipe_num, two_ppc,
+ copy_ovrd, if_config_index);
+ } else {
+ if ((stage->stage_num != 0) ||
+ SH_CSS_PIPE_PORT_CONFIG_IS_CONTINUOUS(me->inout_port_config))
+ tmp_if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+ else
+ tmp_if_config_index = if_config_index;
+ sp_init_stage(stage, pipe_num,
+ xnr, tmp_if_config_index, two_ppc);
+ }
+
+ store_sp_stage_data(pipe_id, pipe_num, num);
+ }
+ sh_css_sp_group.pipe[thread_id].pipe_config |= (uint32_t)
+ (me->acquire_isp_each_stage << IA_CSS_ACQUIRE_ISP_POS);
+ store_sp_group_data();
+}
+
+void
+sh_css_sp_uninit_pipeline(unsigned int pipe_num)
+{
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ /*memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));*/
+ sh_css_sp_group.pipe[thread_id].num_stages = 0;
+}
+
+bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command)
+{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_command)
+ / sizeof(int);
+ enum host2sp_commands last_cmd = host2sp_cmd_error;
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ /* Previous command must be handled by SP (by design) */
+ last_cmd = load_sp_array_uint(host_sp_com, offset);
+ if (last_cmd != host2sp_cmd_ready)
+ IA_CSS_ERROR("last host command not handled by SP(%d)", last_cmd);
+
+ store_sp_array_uint(host_sp_com, offset, host2sp_command);
+
+ return (last_cmd == host2sp_cmd_ready);
+}
+
+enum host2sp_commands
+sh_css_read_host2sp_command(void) {
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_command)
+ / sizeof(int);
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+ return (enum host2sp_commands)load_sp_array_uint(host_sp_com, offset);
+}
+
+/*
+ * Frame data is no longer part of the sp_stage structure but part of a
+ * separate structure. The aim is to make the sp_data struct static
+ * (it defines a pipeline) and that the dynamic (per frame) data is stored
+ * separetly.
+ *
+ * This function must be called first every where were you start constructing
+ * a new pipeline by defining one or more stages with use of variable
+ * sh_css_sp_stage. Even the special cases like accelerator and copy_frame
+ * These have a pipeline of just 1 stage.
+ */
+void
+sh_css_init_host2sp_frame_data(void)
+{
+ /* Clean table */
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+ /*
+ * rvanimme: don't clean it to save static frame info line ref_in
+ * ref_out, and tnr_frames. Once this static data is in a
+ * separate data struct, this may be enable (but still, there is
+ * no need for it)
+ */
+}
+
+/*
+ * @brief Update the offline frame information in host_sp_communication.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+void
+sh_css_update_host2sp_offline_frame(
+ unsigned int frame_num,
+ struct ia_css_frame *frame,
+ struct ia_css_metadata *metadata)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int offset;
+
+ assert(frame_num < NUM_CONTINUOUS_FRAMES);
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_offline_frames)
+ / sizeof(int);
+ offset += frame_num;
+ store_sp_array_uint(host_sp_com, offset, frame ? frame->data : 0);
+
+ /* Write metadata buffer into SP DMEM */
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_offline_metadata)
+ / sizeof(int);
+ offset += frame_num;
+ store_sp_array_uint(host_sp_com, offset, metadata ? metadata->address : 0);
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+/*
+ * @brief Update the mipi frame information in host_sp_communication.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+void
+sh_css_update_host2sp_mipi_frame(
+ unsigned int frame_num,
+ struct ia_css_frame *frame)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int offset;
+
+ /* MIPI buffers are dedicated to port, so now there are more of them. */
+ assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM));
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_mipi_frames)
+ / sizeof(int);
+ offset += frame_num;
+
+ store_sp_array_uint(host_sp_com, offset,
+ frame ? frame->data : 0);
+}
+
+/*
+ * @brief Update the mipi metadata information in host_sp_communication.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+void
+sh_css_update_host2sp_mipi_metadata(
+ unsigned int frame_num,
+ struct ia_css_metadata *metadata)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int o;
+
+ /* MIPI buffers are dedicated to port, so now there are more of them. */
+ assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM));
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ o = offsetof(struct host_sp_communication, host2sp_mipi_metadata)
+ / sizeof(int);
+ o += frame_num;
+ store_sp_array_uint(host_sp_com, o,
+ metadata ? metadata->address : 0);
+}
+
+void
+sh_css_update_host2sp_num_mipi_frames(unsigned int num_frames)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int offset;
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_num_mipi_frames)
+ / sizeof(int);
+
+ store_sp_array_uint(host_sp_com, offset, num_frames);
+}
+#endif
+
+void
+sh_css_update_host2sp_cont_num_raw_frames(unsigned int num_frames,
+ bool set_avail)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int extra_num_frames, avail_num_frames;
+ unsigned int offset, offset_extra;
+
+ /* Write new frame data into SP DMEM */
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com;
+ if (set_avail) {
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_cont_avail_num_raw_frames)
+ / sizeof(int);
+ avail_num_frames = load_sp_array_uint(host_sp_com, offset);
+ extra_num_frames = num_frames - avail_num_frames;
+ offset_extra = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_cont_extra_num_raw_frames)
+ / sizeof(int);
+ store_sp_array_uint(host_sp_com, offset_extra, extra_num_frames);
+ } else
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_cont_target_num_raw_frames)
+ / sizeof(int);
+
+ store_sp_array_uint(host_sp_com, offset, num_frames);
+}
+
+void
+sh_css_event_init_irq_mask(void)
+{
+ int i;
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask_init;
+
+ event_irq_mask_init.or_mask = IA_CSS_EVENT_TYPE_ALL;
+ event_irq_mask_init.and_mask = IA_CSS_EVENT_TYPE_NONE;
+ (void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */
+
+ assert(sizeof(event_irq_mask_init) % HRT_BUS_BYTES == 0);
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_event_irq_mask[i]);
+ assert(offset % HRT_BUS_BYTES == 0);
+ sp_dmem_store(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_com) + offset,
+ &event_irq_mask_init, sizeof(event_irq_mask_init));
+ }
+}
+
+enum ia_css_err
+ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
+ unsigned int or_mask,
+ unsigned int and_mask) {
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask;
+ unsigned int pipe_num;
+
+ assert(pipe);
+
+ assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES);
+ /* Linux kernel does not have UINT16_MAX
+ * Therefore decided to comment out these 2 asserts for Linux
+ * Alternatives that were not chosen:
+ * - add a conditional #define for UINT16_MAX
+ * - compare with (uint16_t)~0 or 0xffff
+ * - different assert for Linux and Windows
+ */
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ IA_CSS_LOG("or_mask=%x, and_mask=%x", or_mask, and_mask);
+ event_irq_mask.or_mask = (uint16_t)or_mask;
+ event_irq_mask.and_mask = (uint16_t)and_mask;
+
+ pipe_num = ia_css_pipe_get_pipe_num(pipe);
+ if (pipe_num >= IA_CSS_PIPE_ID_NUM)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_event_irq_mask[pipe_num]);
+ assert(offset % HRT_BUS_BYTES == 0);
+ sp_dmem_store(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_com) + offset,
+ &event_irq_mask, sizeof(event_irq_mask));
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
+ unsigned int *or_mask,
+ unsigned int *and_mask) {
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask;
+ unsigned int pipe_num;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ IA_CSS_ENTER_LEAVE("");
+
+ assert(pipe);
+ assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES);
+
+ pipe_num = ia_css_pipe_get_pipe_num(pipe);
+ if (pipe_num >= IA_CSS_PIPE_ID_NUM)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_event_irq_mask[pipe_num]);
+ assert(offset % HRT_BUS_BYTES == 0);
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_com) + offset,
+ &event_irq_mask, sizeof(event_irq_mask));
+
+ if (or_mask)
+ *or_mask = event_irq_mask.or_mask;
+
+ if (and_mask)
+ *and_mask = event_irq_mask.and_mask;
+
+ return IA_CSS_SUCCESS;
+}
+
+void
+sh_css_sp_set_sp_running(bool flag)
+{
+ sp_running = flag;
+}
+
+bool
+sh_css_sp_is_running(void)
+{
+ return sp_running;
+}
+
+void
+sh_css_sp_start_isp(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_sw_state;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_sw_state = fw->info.sp.sw_state;
+
+ if (sp_running)
+ return;
+
+ (void)HIVE_ADDR_sp_sw_state; /* Suppres warnings in CRUN */
+
+ /* no longer here, sp started immediately */
+ /*ia_css_debug_pipe_graph_dump_epilogue();*/
+
+ store_sp_group_data();
+ store_sp_per_frame_data(fw);
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_sw_state),
+ (uint32_t)(IA_CSS_SP_SW_TERMINATED));
+
+ /* Note 1: The sp_start_isp function contains a wait till
+ * the input network is configured by the SP.
+ * Note 2: Not all SP binaries supports host2sp_commands.
+ * In case a binary does support it, the host2sp_command
+ * will have status cmd_ready after return of the function
+ * sh_css_hrt_sp_start_isp. There is no race-condition here
+ * because only after the process_frame command has been
+ * received, the SP starts configuring the input network.
+ */
+
+ /* we need to set sp_running before we call ia_css_mmu_invalidate_cache
+ * as ia_css_mmu_invalidate_cache checks on sp_running to
+ * avoid that it accesses dmem while the SP is not powered
+ */
+ sp_running = true;
+ ia_css_mmu_invalidate_cache();
+ /* Invalidate all MMU caches */
+ mmu_invalidate_cache_all();
+
+ ia_css_spctrl_start(SP0_ID);
+}
+
+bool
+ia_css_isp_has_started(void)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started;
+ (void)HIVE_ADDR_ia_css_ispctrl_sp_isp_started; /* Suppres warnings in CRUN */
+
+ return (bool)load_sp_uint(ia_css_ispctrl_sp_isp_started);
+}
+
+/*
+ * @brief Initialize the DMA software-mask in the debug mode.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+bool
+sh_css_sp_init_dma_sw_reg(int dma_id)
+{
+ int i;
+
+ /* enable all the DMA channels */
+ for (i = 0; i < N_DMA_CHANNEL_ID; i++) {
+ /* enable the writing request */
+ sh_css_sp_set_dma_sw_reg(dma_id,
+ i,
+ 0,
+ true);
+ /* enable the reading request */
+ sh_css_sp_set_dma_sw_reg(dma_id,
+ i,
+ 1,
+ true);
+ }
+
+ return true;
+}
+
+/*
+ * @brief Set the DMA software-mask in the debug mode.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+bool
+sh_css_sp_set_dma_sw_reg(int dma_id,
+ int channel_id,
+ int request_type,
+ bool enable)
+{
+ u32 sw_reg;
+ u32 bit_val;
+ u32 bit_offset;
+ u32 bit_mask;
+
+ (void)dma_id;
+
+ assert(channel_id >= 0 && channel_id < N_DMA_CHANNEL_ID);
+ assert(request_type >= 0);
+
+ /* get the software-mask */
+ sw_reg =
+ sh_css_sp_group.debug.dma_sw_reg;
+
+ /* get the offest of the target bit */
+ bit_offset = (8 * request_type) + channel_id;
+
+ /* clear the value of the target bit */
+ bit_mask = ~(1 << bit_offset);
+ sw_reg &= bit_mask;
+
+ /* set the value of the bit for the DMA channel */
+ bit_val = enable ? 1 : 0;
+ bit_val <<= bit_offset;
+ sw_reg |= bit_val;
+
+ /* update the software status of DMA channels */
+ sh_css_sp_group.debug.dma_sw_reg = sw_reg;
+
+ return true;
+}
+
+void
+sh_css_sp_reset_global_vars(void)
+{
+ memset(&sh_css_sp_group, 0, sizeof(struct sh_css_sp_group));
+ memset(&sh_css_sp_stage, 0, sizeof(struct sh_css_sp_stage));
+ memset(&sh_css_isp_stage, 0, sizeof(struct sh_css_isp_stage));
+ memset(&sh_css_sp_output, 0, sizeof(struct sh_css_sp_output));
+ memset(&per_frame_data, 0, sizeof(struct sh_css_sp_per_frame_data));
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.h b/drivers/staging/media/atomisp/pci/sh_css_sp.h
new file mode 100644
index 000000000000..7d4e13f1e038
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.h
@@ -0,0 +1,248 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_SP_H_
+#define _SH_CSS_SP_H_
+
+#include <system_global.h>
+#include <type_support.h>
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "input_formatter.h"
+#endif
+
+#include "ia_css_binary.h"
+#include "ia_css_types.h"
+#include "ia_css_pipeline.h"
+
+/* Function to initialize the data and bss section descr of the binary */
+void
+sh_css_sp_store_init_dmem(const struct ia_css_fw_info *fw);
+
+void
+store_sp_stage_data(enum ia_css_pipe_id id, unsigned int pipe_num,
+ unsigned int stage);
+
+void
+sh_css_stage_write_binary_info(struct ia_css_binary_info *info);
+
+void
+store_sp_group_data(void);
+
+/* Start binary (jpeg) copy on the SP */
+void
+sh_css_sp_start_binary_copy(unsigned int pipe_num,
+ struct ia_css_frame *out_frame,
+ unsigned int two_ppc);
+
+unsigned int
+sh_css_sp_get_binary_copy_size(void);
+
+/* Return the value of a SW interrupt */
+unsigned int
+sh_css_sp_get_sw_interrupt_value(unsigned int irq);
+
+void
+sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ enum ia_css_pipe_id id,
+ u8 pipe_num,
+ bool xnr,
+ bool two_ppc,
+ bool continuous,
+ bool offline,
+ unsigned int required_bds_factor,
+ enum sh_css_pipe_config_override copy_ovrd,
+ enum ia_css_input_mode input_mode,
+ const struct ia_css_metadata_config *md_config,
+ const struct ia_css_metadata_info *md_info,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ const enum mipi_port_id port_id,
+#endif
+ const struct ia_css_coordinate
+ *internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame
+ positioned on shading table at shading correction in ISP. */
+ const struct ia_css_isp_parameters *params
+ );
+
+void
+sh_css_sp_uninit_pipeline(unsigned int pipe_num);
+
+bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command);
+
+enum host2sp_commands
+sh_css_read_host2sp_command(void);
+
+void
+sh_css_init_host2sp_frame_data(void);
+
+/**
+ * @brief Update the offline frame information in host_sp_communication.
+ *
+ * @param[in] frame_num The offline frame number.
+ * @param[in] frame The pointer to the offline frame.
+ */
+void
+sh_css_update_host2sp_offline_frame(
+ unsigned int frame_num,
+ struct ia_css_frame *frame,
+ struct ia_css_metadata *metadata);
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+/**
+ * @brief Update the mipi frame information in host_sp_communication.
+ *
+ * @param[in] frame_num The mipi frame number.
+ * @param[in] frame The pointer to the mipi frame.
+ */
+void
+sh_css_update_host2sp_mipi_frame(
+ unsigned int frame_num,
+ struct ia_css_frame *frame);
+
+/**
+ * @brief Update the mipi metadata information in host_sp_communication.
+ *
+ * @param[in] frame_num The mipi frame number.
+ * @param[in] metadata The pointer to the mipi metadata.
+ */
+void
+sh_css_update_host2sp_mipi_metadata(
+ unsigned int frame_num,
+ struct ia_css_metadata *metadata);
+
+/**
+ * @brief Update the nr of mipi frames to use in host_sp_communication.
+ *
+ * @param[in] num_frames The number of mipi frames to use.
+ */
+void
+sh_css_update_host2sp_num_mipi_frames(unsigned int num_frames);
+#endif
+
+/**
+ * @brief Update the nr of offline frames to use in host_sp_communication.
+ *
+ * @param[in] num_frames The number of raw frames to use.
+ */
+void
+sh_css_update_host2sp_cont_num_raw_frames(unsigned int num_frames,
+ bool set_avail);
+
+void
+sh_css_event_init_irq_mask(void);
+
+void
+sh_css_sp_start_isp(void);
+
+void
+sh_css_sp_set_sp_running(bool flag);
+
+bool
+sh_css_sp_is_running(void);
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+void
+sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state);
+
+#endif
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+void
+sh_css_sp_set_if_configs(
+ const input_formatter_cfg_t *config_a,
+ const input_formatter_cfg_t *config_b,
+ const uint8_t if_config_index);
+#endif
+
+void
+sh_css_sp_program_input_circuit(int fmt_type,
+ int ch_id,
+ enum ia_css_input_mode input_mode);
+
+void
+sh_css_sp_configure_sync_gen(int width,
+ int height,
+ int hblank_cycles,
+ int vblank_cycles);
+
+void
+sh_css_sp_configure_tpg(int x_mask,
+ int y_mask,
+ int x_delta,
+ int y_delta,
+ int xy_mask);
+
+void
+sh_css_sp_configure_prbs(int seed);
+
+void
+sh_css_sp_configure_enable_raw_pool_locking(bool lock_all);
+
+void
+sh_css_sp_enable_isys_event_queue(bool enable);
+
+void
+sh_css_sp_set_disable_continuous_viewfinder(bool flag);
+
+void
+sh_css_sp_reset_global_vars(void);
+
+/**
+ * @brief Initialize the DMA software-mask in the debug mode.
+ * This API should be ONLY called in the debugging mode.
+ * And it should be always called before the first call of
+ * "sh_css_set_dma_sw_reg(...)".
+ *
+ * @param[in] dma_id The ID of the target DMA.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool
+sh_css_sp_init_dma_sw_reg(int dma_id);
+
+/**
+ * @brief Set the DMA software-mask in the debug mode.
+ * This API should be ONLYL called in the debugging mode. Must
+ * call "sh_css_set_dma_sw_reg(...)" before this
+ * API is called for the first time.
+ *
+ * @param[in] dma_id The ID of the target DMA.
+ * @param[in] channel_id The ID of the target DMA channel.
+ * @param[in] request_type The type of the DMA request.
+ * For example:
+ * - "0" indicates the writing request.
+ * - "1" indicates the reading request.
+ *
+ * @param[in] enable If it is "true", the target DMA
+ * channel is enabled in the software.
+ * Otherwise, the target DMA channel
+ * is disabled in the software.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool
+sh_css_sp_set_dma_sw_reg(int dma_id,
+ int channel_id,
+ int request_type,
+ bool enable);
+
+extern struct sh_css_sp_group sh_css_sp_group;
+extern struct sh_css_sp_stage sh_css_sp_stage;
+extern struct sh_css_isp_stage sh_css_isp_stage;
+
+#endif /* _SH_CSS_SP_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_stream.c b/drivers/staging/media/atomisp/pci/sh_css_stream.c
new file mode 100644
index 000000000000..60bddbb3d4c6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_stream.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_stream.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_stream_format.c b/drivers/staging/media/atomisp/pci/sh_css_stream_format.c
new file mode 100644
index 000000000000..548d4a3567b2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_stream_format.c
@@ -0,0 +1,76 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "sh_css_stream_format.h"
+#include <ia_css_stream_format.h>
+
+unsigned int sh_css_stream_format_2_bits_per_subpixel(
+ enum atomisp_input_format format)
+{
+ unsigned int rval;
+
+ switch (format) {
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ rval = 4;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ rval = 5;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ rval = 6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ rval = 7;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ rval = 8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ rval = 10;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ rval = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ rval = 14;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ rval = 16;
+ break;
+ default:
+ rval = 0;
+ break;
+ }
+
+ return rval;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_stream_format.h b/drivers/staging/media/atomisp/pci/sh_css_stream_format.h
new file mode 100644
index 000000000000..32ebd6e0f344
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_stream_format.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_STREAM_FORMAT_H
+#define __SH_CSS_STREAM_FORMAT_H
+
+#include <ia_css_stream_format.h>
+
+unsigned int sh_css_stream_format_2_bits_per_subpixel(
+ enum atomisp_input_format format);
+
+#endif /* __SH_CSS_STREAM_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_struct.h b/drivers/staging/media/atomisp/pci/sh_css_struct.h
new file mode 100644
index 000000000000..81b9598ef8b7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_struct.h
@@ -0,0 +1,85 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_STRUCT_H
+#define __SH_CSS_STRUCT_H
+
+/* This header files contains the definition of the
+ sh_css struct and friends; locigally the file would
+ probably be called sh_css.h after the pattern
+ <type>.h but sh_css.h is the predecesssor of ia_css.h
+ so this could cause confusion; hence the _struct
+ in the filename
+*/
+
+#include <type_support.h>
+#include <system_types.h>
+#include "ia_css_pipeline.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_queue.h"
+#include "ia_css_irq.h"
+
+struct sh_css {
+ struct ia_css_pipe *active_pipes[IA_CSS_PIPELINE_NUM_MAX];
+ /* All of the pipes created at any point of time. At this moment there can
+ * be no more than MAX_SP_THREADS of them because pipe_num is reused as SP
+ * thread_id to which a pipe's pipeline is associated. At a later point, if
+ * we support more pipe objects, we should add test code to test that
+ * possibility. Also, active_pipes[] should be able to hold only
+ * SH_CSS_MAX_SP_THREADS objects. Anything else is misleading. */
+ struct ia_css_pipe *all_pipes[IA_CSS_PIPELINE_NUM_MAX];
+ void *(*malloc)(size_t bytes, bool zero_mem);
+ void (*free)(void *ptr);
+ void (*flush)(struct ia_css_acc_fw *fw);
+
+/* ISP2401 */
+ void *(*malloc_ex)(size_t bytes, bool zero_mem, const char *caller_func,
+ int caller_line);
+ void (*free_ex)(void *ptr, const char *caller_func, int caller_line);
+
+/* ISP2400 */
+ bool stop_copy_preview;
+
+ bool check_system_idle;
+ unsigned int num_cont_raw_frames;
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ unsigned int num_mipi_frames[N_CSI_PORTS];
+ struct ia_css_frame
+ *mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ struct ia_css_metadata
+ *mipi_metadata[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ unsigned int
+ mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
+ unsigned int mipi_frame_size[N_CSI_PORTS];
+#endif
+ hrt_vaddress sp_bin_addr;
+ hrt_data page_table_base_index;
+ unsigned int
+ size_mem_words; /* \deprecated{Use ia_css_mipi_buffer_config instead.}*/
+ enum ia_css_irq_type irq_type;
+ unsigned int pipe_counter;
+
+ unsigned int type; /* 2400 or 2401 for now */
+};
+
+#define IPU_2400 1
+#define IPU_2401 2
+
+#define IS_2400() (my_css.type == IPU_2400)
+#define IS_2401() (my_css.type == IPU_2401)
+
+extern struct sh_css my_css;
+
+#endif /* __SH_CSS_STRUCT_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_uds.h b/drivers/staging/media/atomisp/pci/sh_css_uds.h
new file mode 100644
index 000000000000..d9bcae6007bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_uds.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_UDS_H_
+#define _SH_CSS_UDS_H_
+
+#include <type_support.h>
+
+#define SIZE_OF_SH_CSS_UDS_INFO_IN_BITS (4 * 16)
+#define SIZE_OF_SH_CSS_CROP_POS_IN_BITS (2 * 16)
+
+/* Uds types, used in pipeline_global.h and sh_css_internal.h */
+
+struct sh_css_uds_info {
+ u16 curr_dx;
+ u16 curr_dy;
+ u16 xc;
+ u16 yc;
+};
+
+struct sh_css_crop_pos {
+ u16 x;
+ u16 y;
+};
+
+#endif /* _SH_CSS_UDS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_version.c b/drivers/staging/media/atomisp/pci/sh_css_version.c
new file mode 100644
index 000000000000..eb986e15c7fa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_version.c
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "../../include/linux/atomisp.h"
+#include "ia_css_version.h"
+#include "ia_css_version_data.h"
+#include "ia_css_err.h"
+#include "sh_css_firmware.h"
+
+enum ia_css_err
+ia_css_get_version(char *version, int max_size) {
+ char *css_version;
+
+ if (!atomisp_hw_is_isp2401)
+ css_version = ISP2400_CSS_VERSION_STRING;
+ else
+ css_version = ISP2401_CSS_VERSION_STRING;
+
+ if (max_size <= (int)strlen(css_version) + (int)strlen(sh_css_get_fw_version()) + 5)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ strcpy(version, css_version);
+ strcat(version, "FW:");
+ strcat(version, sh_css_get_fw_version());
+ strcat(version, "; ");
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/str2mem_defs.h b/drivers/staging/media/atomisp/pci/str2mem_defs.h
new file mode 100644
index 000000000000..1cb62444cf68
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/str2mem_defs.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ST2MEM_DEFS_H
+#define _ST2MEM_DEFS_H
+
+#define _STR2MEM_CRUN_BIT 0x100000
+#define _STR2MEM_CMD_BITS 0x0F0000
+#define _STR2MEM_COUNT_BITS 0x00FFFF
+
+#define _STR2MEM_BLOCKS_CMD 0xA0000
+#define _STR2MEM_PACKETS_CMD 0xB0000
+#define _STR2MEM_BYTES_CMD 0xC0000
+#define _STR2MEM_BYTES_FROM_PACKET_CMD 0xD0000
+
+#define _STR2MEM_SOFT_RESET_REG_ID 0
+#define _STR2MEM_INPUT_ENDIANNESS_REG_ID 1
+#define _STR2MEM_OUTPUT_ENDIANNESS_REG_ID 2
+#define _STR2MEM_BIT_SWAPPING_REG_ID 3
+#define _STR2MEM_BLOCK_SYNC_LEVEL_REG_ID 4
+#define _STR2MEM_PACKET_SYNC_LEVEL_REG_ID 5
+#define _STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID 6
+#define _STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID 7
+#define _STR2MEM_EN_STAT_UPDATE_ID 8
+
+#define _STR2MEM_REG_ALIGN 4
+
+#endif /* _ST2MEM_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h b/drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h
new file mode 100644
index 000000000000..60143b8743a2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _streaming_to_mipi_defs_h
+#define _streaming_to_mipi_defs_h
+
+#define HIVE_STR_TO_MIPI_VALID_A_BIT 0
+#define HIVE_STR_TO_MIPI_VALID_B_BIT 1
+#define HIVE_STR_TO_MIPI_SOL_BIT 2
+#define HIVE_STR_TO_MIPI_EOL_BIT 3
+#define HIVE_STR_TO_MIPI_SOF_BIT 4
+#define HIVE_STR_TO_MIPI_EOF_BIT 5
+#define HIVE_STR_TO_MIPI_CH_ID_LSB 6
+
+#define HIVE_STR_TO_MIPI_DATA_A_LSB (HIVE_STR_TO_MIPI_VALID_B_BIT + 1)
+
+#endif /* _streaming_to_mipi_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/system_global.h b/drivers/staging/media/atomisp/pci/system_global.h
new file mode 100644
index 000000000000..7f833c15f3ce
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/system_global.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#ifdef ISP2401
+# include "isp2401_system_global.h"
+#else
+# include "isp2400_system_global.h"
+#endif
diff --git a/drivers/staging/media/atomisp/pci/system_local.h b/drivers/staging/media/atomisp/pci/system_local.h
new file mode 100644
index 000000000000..fbb5daadac9f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/system_local.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#ifdef ISP2401
+# include "isp2401_system_local.h"
+#else
+# include "isp2400_system_local.h"
+#endif
diff --git a/drivers/staging/media/atomisp/pci/timed_controller_defs.h b/drivers/staging/media/atomisp/pci/timed_controller_defs.h
new file mode 100644
index 000000000000..75451e090f4f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/timed_controller_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _timed_controller_defs_h
+#define _timed_controller_defs_h
+
+#define _HRT_TIMED_CONTROLLER_CMD_REG_IDX 0
+
+#define _HRT_TIMED_CONTROLLER_REG_ALIGN 4
+
+#endif /* _timed_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/version.h b/drivers/staging/media/atomisp/pci/version.h
new file mode 100644
index 000000000000..bbc4948baea9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/version.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef HRT_VERSION_H
+#define HRT_VERSION_H
+#define HRT_VERSION_MAJOR 1
+#define HRT_VERSION_MINOR 4
+#define HRT_VERSION 1_4
+#endif
diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/staging/media/hantro/Kconfig
index 99aed9a5b0b9..5b6cf9f62b1a 100644
--- a/drivers/staging/media/hantro/Kconfig
+++ b/drivers/staging/media/hantro/Kconfig
@@ -2,11 +2,13 @@
config VIDEO_HANTRO
tristate "Hantro VPU driver"
depends on ARCH_MXC || ARCH_ROCKCHIP || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CONTROLLER_REQUEST_API
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
+ select V4L2_H264
help
Support for the Hantro IP based Video Processing Units present on
Rockchip and NXP i.MX8M SoCs, which accelerate video and image
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile
index 68c29a9c4946..743ce08eb184 100644
--- a/drivers/staging/media/hantro/Makefile
+++ b/drivers/staging/media/hantro/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
obj-$(CONFIG_VIDEO_HANTRO) += hantro-vpu.o
hantro-vpu-y += \
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index 327ddef45345..3005207fc6fb 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -26,10 +26,6 @@
#include "hantro_hw.h"
-#define MB_DIM 16
-#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
-#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
-
struct hantro_ctx;
struct hantro_codec_ops;
@@ -421,7 +417,8 @@ hantro_get_dst_buf(struct hantro_ctx *ctx)
}
static inline bool
-hantro_needs_postproc(struct hantro_ctx *ctx, const struct hantro_fmt *fmt)
+hantro_needs_postproc(const struct hantro_ctx *ctx,
+ const struct hantro_fmt *fmt)
{
return !hantro_is_encoder_ctx(ctx) && fmt->fourcc != V4L2_PIX_FMT_NV12;
}
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index ace13973e2d0..0db8ad455160 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -80,15 +80,6 @@ hantro_enc_buf_finish(struct hantro_ctx *ctx, struct vb2_buffer *buf,
return 0;
}
-static int
-hantro_dec_buf_finish(struct hantro_ctx *ctx, struct vb2_buffer *buf,
- unsigned int bytesused)
-{
- /* For decoders set bytesused as per the output picture. */
- buf->planes[0].bytesused = ctx->dst_fmt.plane_fmt[0].sizeimage;
- return 0;
-}
-
static void hantro_job_finish(struct hantro_dev *vpu,
struct hantro_ctx *ctx,
unsigned int bytesused,
@@ -101,8 +92,8 @@ static void hantro_job_finish(struct hantro_dev *vpu,
pm_runtime_put_autosuspend(vpu->dev);
clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
- src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
if (WARN_ON(!src))
return;
@@ -112,14 +103,14 @@ static void hantro_job_finish(struct hantro_dev *vpu,
src->sequence = ctx->sequence_out++;
dst->sequence = ctx->sequence_cap++;
- ret = ctx->buf_finish(ctx, &dst->vb2_buf, bytesused);
- if (ret)
- result = VB2_BUF_STATE_ERROR;
-
- v4l2_m2m_buf_done(src, result);
- v4l2_m2m_buf_done(dst, result);
+ if (ctx->buf_finish) {
+ ret = ctx->buf_finish(ctx, &dst->vb2_buf, bytesused);
+ if (ret)
+ result = VB2_BUF_STATE_ERROR;
+ }
- v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ result);
}
void hantro_irq_done(struct hantro_dev *vpu, unsigned int bytesused,
@@ -431,7 +422,6 @@ static int hantro_open(struct file *filp)
ctx->buf_finish = hantro_enc_buf_finish;
} else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
- ctx->buf_finish = hantro_dec_buf_finish;
} else {
ret = -ENODEV;
goto err_ctx_free;
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
index f2d3e81fb6ce..d561f125085a 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -11,7 +11,7 @@
*/
#include <linux/types.h>
-#include <linux/sort.h>
+#include <media/v4l2-h264.h>
#include <media/v4l2-mem2mem.h>
#include "hantro.h"
@@ -240,229 +240,6 @@ static void prepare_table(struct hantro_ctx *ctx)
reorder_scaling_list(ctx);
}
-struct hantro_h264_reflist_builder {
- const struct v4l2_h264_dpb_entry *dpb;
- s32 pocs[HANTRO_H264_DPB_SIZE];
- u8 unordered_reflist[HANTRO_H264_DPB_SIZE];
- int frame_nums[HANTRO_H264_DPB_SIZE];
- s32 curpoc;
- u8 num_valid;
-};
-
-static s32 get_poc(enum v4l2_field field, s32 top_field_order_cnt,
- s32 bottom_field_order_cnt)
-{
- switch (field) {
- case V4L2_FIELD_TOP:
- return top_field_order_cnt;
- case V4L2_FIELD_BOTTOM:
- return bottom_field_order_cnt;
- default:
- break;
- }
-
- return min(top_field_order_cnt, bottom_field_order_cnt);
-}
-
-static void
-init_reflist_builder(struct hantro_ctx *ctx,
- struct hantro_h264_reflist_builder *b)
-{
- const struct v4l2_ctrl_h264_slice_params *slice_params;
- const struct v4l2_ctrl_h264_decode_params *dec_param;
- const struct v4l2_ctrl_h264_sps *sps;
- struct vb2_v4l2_buffer *buf = hantro_get_dst_buf(ctx);
- const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
- int cur_frame_num, max_frame_num;
- unsigned int i;
-
- dec_param = ctx->h264_dec.ctrls.decode;
- slice_params = &ctx->h264_dec.ctrls.slices[0];
- sps = ctx->h264_dec.ctrls.sps;
- max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
- cur_frame_num = slice_params->frame_num;
-
- memset(b, 0, sizeof(*b));
- b->dpb = dpb;
- b->curpoc = get_poc(buf->field, dec_param->top_field_order_cnt,
- dec_param->bottom_field_order_cnt);
-
- for (i = 0; i < ARRAY_SIZE(ctx->h264_dec.dpb); i++) {
- int buf_idx;
-
- if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
- continue;
-
- buf_idx = vb2_find_timestamp(cap_q, dpb[i].reference_ts, 0);
- if (buf_idx < 0)
- continue;
-
- buf = to_vb2_v4l2_buffer(vb2_get_buffer(cap_q, buf_idx));
-
- /*
- * Handle frame_num wraparound as described in section
- * '8.2.4.1 Decoding process for picture numbers' of the spec.
- * TODO: This logic will have to be adjusted when we start
- * supporting interlaced content.
- */
- if (dpb[i].frame_num > cur_frame_num)
- b->frame_nums[i] = (int)dpb[i].frame_num - max_frame_num;
- else
- b->frame_nums[i] = dpb[i].frame_num;
-
- b->pocs[i] = get_poc(buf->field, dpb[i].top_field_order_cnt,
- dpb[i].bottom_field_order_cnt);
- b->unordered_reflist[b->num_valid] = i;
- b->num_valid++;
- }
-
- for (i = b->num_valid; i < ARRAY_SIZE(ctx->h264_dec.dpb); i++)
- b->unordered_reflist[i] = i;
-}
-
-static int p_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
-{
- const struct hantro_h264_reflist_builder *builder = data;
- const struct v4l2_h264_dpb_entry *a, *b;
- u8 idxa, idxb;
-
- idxa = *((u8 *)ptra);
- idxb = *((u8 *)ptrb);
- a = &builder->dpb[idxa];
- b = &builder->dpb[idxb];
-
- if ((a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) !=
- (b->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)) {
- /* Short term pics firt. */
- if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
- return -1;
- else
- return 1;
- }
-
- /*
- * Short term pics in descending pic num order, long term ones in
- * ascending order.
- */
- if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
- return HANTRO_CMP(builder->frame_nums[idxb],
- builder->frame_nums[idxa]);
-
- return HANTRO_CMP(a->pic_num, b->pic_num);
-}
-
-static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
-{
- const struct hantro_h264_reflist_builder *builder = data;
- const struct v4l2_h264_dpb_entry *a, *b;
- s32 poca, pocb;
- u8 idxa, idxb;
-
- idxa = *((u8 *)ptra);
- idxb = *((u8 *)ptrb);
- a = &builder->dpb[idxa];
- b = &builder->dpb[idxb];
-
- if ((a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) !=
- (b->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)) {
- /* Short term pics firt. */
- if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
- return -1;
- else
- return 1;
- }
-
- /* Long term pics in ascending pic num order. */
- if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return HANTRO_CMP(a->pic_num, b->pic_num);
-
- poca = builder->pocs[idxa];
- pocb = builder->pocs[idxb];
-
- /*
- * Short term pics with POC < cur POC first in POC descending order
- * followed by short term pics with POC > cur POC in POC ascending
- * order.
- */
- if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return HANTRO_CMP(poca, pocb);
- else if (poca < builder->curpoc)
- return HANTRO_CMP(pocb, poca);
-
- return HANTRO_CMP(poca, pocb);
-}
-
-static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
-{
- const struct hantro_h264_reflist_builder *builder = data;
- const struct v4l2_h264_dpb_entry *a, *b;
- s32 poca, pocb;
- u8 idxa, idxb;
-
- idxa = *((u8 *)ptra);
- idxb = *((u8 *)ptrb);
- a = &builder->dpb[idxa];
- b = &builder->dpb[idxb];
-
- if ((a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) !=
- (b->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)) {
- /* Short term pics firt. */
- if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
- return -1;
- else
- return 1;
- }
-
- /* Long term pics in ascending pic num order. */
- if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return HANTRO_CMP(a->pic_num, b->pic_num);
-
- poca = builder->pocs[idxa];
- pocb = builder->pocs[idxb];
-
- /*
- * Short term pics with POC > cur POC first in POC ascending order
- * followed by short term pics with POC < cur POC in POC descending
- * order.
- */
- if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return HANTRO_CMP(pocb, poca);
- else if (poca < builder->curpoc)
- return HANTRO_CMP(pocb, poca);
-
- return HANTRO_CMP(poca, pocb);
-}
-
-static void
-build_p_ref_list(const struct hantro_h264_reflist_builder *builder,
- u8 *reflist)
-{
- memcpy(reflist, builder->unordered_reflist,
- sizeof(builder->unordered_reflist));
- sort_r(reflist, builder->num_valid, sizeof(*reflist),
- p_ref_list_cmp, NULL, builder);
-}
-
-static void
-build_b_ref_lists(const struct hantro_h264_reflist_builder *builder,
- u8 *b0_reflist, u8 *b1_reflist)
-{
- memcpy(b0_reflist, builder->unordered_reflist,
- sizeof(builder->unordered_reflist));
- sort_r(b0_reflist, builder->num_valid, sizeof(*b0_reflist),
- b0_ref_list_cmp, NULL, builder);
-
- memcpy(b1_reflist, builder->unordered_reflist,
- sizeof(builder->unordered_reflist));
- sort_r(b1_reflist, builder->num_valid, sizeof(*b1_reflist),
- b1_ref_list_cmp, NULL, builder);
-
- if (builder->num_valid > 1 &&
- !memcmp(b1_reflist, b0_reflist, builder->num_valid))
- swap(b1_reflist[0], b1_reflist[1]);
-}
-
static bool dpb_entry_match(const struct v4l2_h264_dpb_entry *a,
const struct v4l2_h264_dpb_entry *b)
{
@@ -560,7 +337,7 @@ int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
{
struct hantro_h264_dec_hw_ctx *h264_ctx = &ctx->h264_dec;
struct hantro_h264_dec_ctrls *ctrls = &h264_ctx->ctrls;
- struct hantro_h264_reflist_builder reflist_builder;
+ struct v4l2_h264_reflist_builder reflist_builder;
hantro_start_prepare_run(ctx);
@@ -596,10 +373,12 @@ int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
prepare_table(ctx);
/* Build the P/B{0,1} ref lists. */
- init_reflist_builder(ctx, &reflist_builder);
- build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
- build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
- h264_ctx->reflists.b1);
+ v4l2_h264_init_reflist_builder(&reflist_builder, ctrls->decode,
+ &ctrls->slices[0], ctrls->sps,
+ ctx->h264_dec.dpb);
+ v4l2_h264_build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
+ v4l2_h264_build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
+ h264_ctx->reflists.b1);
return 0;
}
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 7dfc9bad7297..4053d8710e04 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -18,6 +18,10 @@
#define DEC_8190_ALIGN_MASK 0x07U
+#define MB_DIM 16
+#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
+#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
+
struct hantro_dev;
struct hantro_ctx;
struct hantro_buf;
@@ -176,6 +180,33 @@ void hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
int hantro_h264_dec_init(struct hantro_ctx *ctx);
void hantro_h264_dec_exit(struct hantro_ctx *ctx);
+static inline size_t
+hantro_h264_mv_size(unsigned int width, unsigned int height)
+{
+ /*
+ * A decoded 8-bit 4:2:0 NV12 frame may need memory for up to
+ * 448 bytes per macroblock with additional 32 bytes on
+ * multi-core variants.
+ *
+ * The H264 decoder needs extra space on the output buffers
+ * to store motion vectors. This is needed for reference
+ * frames and only if the format is non-post-processed NV12.
+ *
+ * Memory layout is as follow:
+ *
+ * +---------------------------+
+ * | Y-plane 256 bytes x MBs |
+ * +---------------------------+
+ * | UV-plane 128 bytes x MBs |
+ * +---------------------------+
+ * | MV buffer 64 bytes x MBs |
+ * +---------------------------+
+ * | MC sync 32 bytes |
+ * +---------------------------+
+ */
+ return 64 * MB_WIDTH(width) * MB_WIDTH(height) + 32;
+}
+
void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx);
void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index f4ae2cee0f18..f28a94e2fa93 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -30,6 +30,11 @@
#include "hantro_hw.h"
#include "hantro_v4l2.h"
+static int hantro_set_fmt_out(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp);
+static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp);
+
static const struct hantro_fmt *
hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts)
{
@@ -227,12 +232,12 @@ static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
return 0;
}
-static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
- bool capture)
+static int hantro_try_fmt(const struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp,
+ enum v4l2_buf_type type)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
const struct hantro_fmt *fmt, *vpu_fmt;
+ bool capture = !V4L2_TYPE_IS_OUTPUT(type);
bool coded;
coded = capture == hantro_is_encoder_ctx(ctx);
@@ -246,7 +251,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
fmt = hantro_find_format(ctx, pix_mp->pixelformat);
if (!fmt) {
fmt = hantro_get_default_fmt(ctx, coded);
- f->fmt.pix_mp.pixelformat = fmt->fourcc;
+ pix_mp->pixelformat = fmt->fourcc;
}
if (coded) {
@@ -273,32 +278,11 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
/* Fill remaining fields */
v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
pix_mp->height);
- /*
- * A decoded 8-bit 4:2:0 NV12 frame may need memory for up to
- * 448 bytes per macroblock with additional 32 bytes on
- * multi-core variants.
- *
- * The H264 decoder needs extra space on the output buffers
- * to store motion vectors. This is needed for reference
- * frames and only if the format is non-post-processed NV12.
- *
- * Memory layout is as follow:
- *
- * +---------------------------+
- * | Y-plane 256 bytes x MBs |
- * +---------------------------+
- * | UV-plane 128 bytes x MBs |
- * +---------------------------+
- * | MV buffer 64 bytes x MBs |
- * +---------------------------+
- * | MC sync 32 bytes |
- * +---------------------------+
- */
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE &&
!hantro_needs_postproc(ctx, fmt))
pix_mp->plane_fmt[0].sizeimage +=
- 64 * MB_WIDTH(pix_mp->width) *
- MB_WIDTH(pix_mp->height) + 32;
+ hantro_h264_mv_size(pix_mp->width,
+ pix_mp->height);
} else if (!pix_mp->plane_fmt[0].sizeimage) {
/*
* For coded formats the application can specify
@@ -315,13 +299,13 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
static int vidioc_try_fmt_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_try_fmt(file, priv, f, true);
+ return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
}
static int vidioc_try_fmt_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_try_fmt(file, priv, f, false);
+ return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
}
static void
@@ -355,11 +339,12 @@ hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
}
hantro_reset_fmt(fmt, vpu_fmt);
- fmt->num_planes = 1;
fmt->width = vpu_fmt->frmsize.min_width;
fmt->height = vpu_fmt->frmsize.min_height;
- fmt->plane_fmt[0].sizeimage = vpu_fmt->header_size +
- fmt->width * fmt->height * vpu_fmt->max_depth;
+ if (hantro_is_encoder_ctx(ctx))
+ hantro_set_fmt_cap(ctx, fmt);
+ else
+ hantro_set_fmt_out(ctx, fmt);
}
static void
@@ -381,9 +366,12 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx)
}
hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
- v4l2_fill_pixfmt_mp(raw_fmt, raw_vpu_fmt->fourcc,
- encoded_fmt->width,
- encoded_fmt->height);
+ raw_fmt->width = encoded_fmt->width;
+ raw_fmt->width = encoded_fmt->width;
+ if (hantro_is_encoder_ctx(ctx))
+ hantro_set_fmt_out(ctx, raw_fmt);
+ else
+ hantro_set_fmt_cap(ctx, raw_fmt);
}
void hantro_reset_fmts(struct hantro_ctx *ctx)
@@ -409,15 +397,15 @@ hantro_update_requires_request(struct hantro_ctx *ctx, u32 fourcc)
}
}
-static int
-vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
+static int hantro_set_fmt_out(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp)
{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct hantro_ctx *ctx = fh_to_ctx(priv);
- struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ struct vb2_queue *vq;
int ret;
- ret = vidioc_try_fmt_out_mplane(file, priv, f);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ ret = hantro_try_fmt(ctx, pix_mp, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (ret)
return ret;
@@ -479,16 +467,15 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
return 0;
}
-static int vidioc_s_fmt_cap_mplane(struct file *file, void *priv,
- struct v4l2_format *f)
+static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp)
{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct hantro_ctx *ctx = fh_to_ctx(priv);
struct vb2_queue *vq;
int ret;
/* Change not allowed if queue is busy. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(vq))
return -EBUSY;
@@ -509,7 +496,7 @@ static int vidioc_s_fmt_cap_mplane(struct file *file, void *priv,
return -EBUSY;
}
- ret = vidioc_try_fmt_cap_mplane(file, priv, f);
+ ret = hantro_try_fmt(ctx, pix_mp, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (ret)
return ret;
@@ -543,6 +530,18 @@ static int vidioc_s_fmt_cap_mplane(struct file *file, void *priv,
return 0;
}
+static int
+vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ return hantro_set_fmt_out(fh_to_ctx(priv), &f->fmt.pix_mp);
+}
+
+static int
+vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ return hantro_set_fmt_cap(fh_to_ctx(priv), &f->fmt.pix_mp);
+}
+
const struct v4l2_ioctl_ops hantro_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
@@ -608,7 +607,7 @@ hantro_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
}
static int
-hantro_buf_plane_check(struct vb2_buffer *vb, const struct hantro_fmt *vpu_fmt,
+hantro_buf_plane_check(struct vb2_buffer *vb,
struct v4l2_pix_format_mplane *pixfmt)
{
unsigned int sz;
@@ -630,12 +629,18 @@ static int hantro_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pix_fmt;
+ int ret;
if (V4L2_TYPE_IS_OUTPUT(vq->type))
- return hantro_buf_plane_check(vb, ctx->vpu_src_fmt,
- &ctx->src_fmt);
-
- return hantro_buf_plane_check(vb, ctx->vpu_dst_fmt, &ctx->dst_fmt);
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+ ret = hantro_buf_plane_check(vb, pix_fmt);
+ if (ret)
+ return ret;
+ vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
+ return 0;
}
static void hantro_buf_queue(struct vb2_buffer *vb)
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
index 8f1ae50a4abd..f555aac8a9d5 100644
--- a/drivers/staging/media/imx/Kconfig
+++ b/drivers/staging/media/imx/Kconfig
@@ -2,8 +2,9 @@
config VIDEO_IMX_MEDIA
tristate "i.MX5/6 V4L2 media core driver"
depends on ARCH_MXC || COMPILE_TEST
- depends on MEDIA_CONTROLLER && VIDEO_V4L2 && IMX_IPUV3_CORE
- depends on VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && IMX_IPUV3_CORE
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
diff --git a/drivers/staging/media/imx/TODO b/drivers/staging/media/imx/TODO
index 6f29b5ca5324..a371cdedcdb0 100644
--- a/drivers/staging/media/imx/TODO
+++ b/drivers/staging/media/imx/TODO
@@ -17,35 +17,6 @@
decided whether this feature is useful enough to make it generally
available by exporting to v4l2-core.
-- After all async subdevices have been bound, v4l2_fwnode_parse_link()
- is used to form the media links between the devices discovered in
- the OF graph.
-
- While this approach allows support for arbitrary OF graphs, there
- are some assumptions for this to work:
-
- 1. If a port owned by a device in the graph has endpoint nodes, the
- port is treated as a media pad.
-
- This presents problems for devices that don't make this port = pad
- assumption. Examples are SMIAPP compatible cameras which define only
- a single output port node, but which define multiple pads owned
- by multiple subdevices (pixel-array, binner, scaler). Or video
- decoders (entity function MEDIA_ENT_F_ATV_DECODER), which also define
- only a single output port node, but define multiple pads for video,
- VBI, and audio out.
-
- A workaround at present is to set the port reg properties to
- correspond to the media pad index that the port represents. A
- possible long-term solution is to implement a subdev API that
- maps a port id to a media pad index.
-
- 2. Every endpoint of a port owned by a device in the graph is treated
- as a media link.
-
- Which means a port must not contain mixed-use endpoints, they
- must all refer to media links between V4L2 subdevices.
-
- i.MX7: all of the above, since it uses the imx media core
- i.MX7: use Frame Interval Monitor
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 2a4f77e83ed3..7658b83466a7 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -106,8 +106,8 @@ static int prp_enum_mbus_code(struct v4l2_subdev *sd,
switch (code->pad) {
case PRP_SINK_PAD:
- ret = imx_media_enum_ipu_format(&code->code, code->index,
- CS_SEL_ANY);
+ ret = imx_media_enum_ipu_formats(&code->code, code->index,
+ PIXFMT_SEL_YUV_RGB);
break;
case PRP_SRC_PAD_PRPENC:
case PRP_SRC_PAD_PRPVF:
@@ -180,10 +180,12 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
MIN_H, MAX_H, H_ALIGN, S_ALIGN);
cc = imx_media_find_ipu_format(sdformat->format.code,
- CS_SEL_ANY);
+ PIXFMT_SEL_YUV_RGB);
if (!cc) {
- imx_media_enum_ipu_format(&code, 0, CS_SEL_ANY);
- cc = imx_media_find_ipu_format(code, CS_SEL_ANY);
+ imx_media_enum_ipu_formats(&code, 0,
+ PIXFMT_SEL_YUV_RGB);
+ cc = imx_media_find_ipu_format(code,
+ PIXFMT_SEL_YUV_RGB);
sdformat->format.code = cc->codes[0];
}
@@ -438,7 +440,8 @@ static int prp_registered(struct v4l2_subdev *sd)
priv->frame_interval.denominator = 30;
/* set a default mbus format */
- imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
return imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
V4L2_FIELD_NONE, NULL);
}
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 09c4e3f33807..b1f84e0ac486 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -850,7 +850,8 @@ static int prp_enum_mbus_code(struct v4l2_subdev *sd,
if (code->pad >= PRPENCVF_NUM_PADS)
return -EINVAL;
- return imx_media_enum_ipu_format(&code->code, code->index, CS_SEL_ANY);
+ return imx_media_enum_ipu_formats(&code->code, code->index,
+ PIXFMT_SEL_YUV_RGB);
}
static int prp_get_fmt(struct v4l2_subdev *sd,
@@ -885,12 +886,14 @@ static void prp_try_fmt(struct prp_priv *priv,
{
struct v4l2_mbus_framefmt *infmt;
- *cc = imx_media_find_ipu_format(sdformat->format.code, CS_SEL_ANY);
+ *cc = imx_media_find_ipu_format(sdformat->format.code,
+ PIXFMT_SEL_YUV_RGB);
if (!*cc) {
u32 code;
- imx_media_enum_ipu_format(&code, 0, CS_SEL_ANY);
- *cc = imx_media_find_ipu_format(code, CS_SEL_ANY);
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV_RGB);
+ *cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
+
sdformat->format.code = (*cc)->codes[0];
}
@@ -1248,7 +1251,8 @@ static int prp_registered(struct v4l2_subdev *sd)
u32 code;
/* set a default mbus format */
- imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
640, 480, code, V4L2_FIELD_NONE,
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index d37b776ff86d..c1931eb2540e 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -91,11 +91,11 @@ static int capture_enum_framesizes(struct file *file, void *fh,
};
int ret;
- cc = imx_media_find_format(fsize->pixel_format, CS_SEL_ANY, true);
+ cc = imx_media_find_pixel_format(fsize->pixel_format, PIXFMT_SEL_ANY);
if (!cc)
return -EINVAL;
- fse.code = cc->codes[0];
+ fse.code = cc->codes ? cc->codes[0] : 0;
ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_size, NULL, &fse);
if (ret)
@@ -133,11 +133,11 @@ static int capture_enum_frameintervals(struct file *file, void *fh,
};
int ret;
- cc = imx_media_find_format(fival->pixel_format, CS_SEL_ANY, true);
+ cc = imx_media_find_pixel_format(fival->pixel_format, PIXFMT_SEL_ANY);
if (!cc)
return -EINVAL;
- fie.code = cc->codes[0];
+ fie.code = cc->codes ? cc->codes[0] : 0;
ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_interval,
NULL, &fie);
@@ -167,17 +167,19 @@ static int capture_enum_fmt_vid_cap(struct file *file, void *fh,
return ret;
}
- cc_src = imx_media_find_ipu_format(fmt_src.format.code, CS_SEL_ANY);
+ cc_src = imx_media_find_ipu_format(fmt_src.format.code,
+ PIXFMT_SEL_YUV_RGB);
if (cc_src) {
- u32 cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
- CS_SEL_YUV : CS_SEL_RGB;
+ enum imx_pixfmt_sel fmt_sel =
+ (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
- ret = imx_media_enum_format(&fourcc, f->index, cs_sel);
+ ret = imx_media_enum_pixel_formats(&fourcc, f->index, fmt_sel);
if (ret)
return ret;
} else {
cc_src = imx_media_find_mbus_format(fmt_src.format.code,
- CS_SEL_ANY, true);
+ PIXFMT_SEL_ANY);
if (WARN_ON(!cc_src))
return -EINVAL;
@@ -209,22 +211,24 @@ static int __capture_try_fmt_vid_cap(struct capture_priv *priv,
{
const struct imx_media_pixfmt *cc, *cc_src;
- cc_src = imx_media_find_ipu_format(fmt_src->format.code, CS_SEL_ANY);
+ cc_src = imx_media_find_ipu_format(fmt_src->format.code,
+ PIXFMT_SEL_YUV_RGB);
if (cc_src) {
- u32 fourcc, cs_sel;
+ enum imx_pixfmt_sel fmt_sel;
+ u32 fourcc;
- cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
- CS_SEL_YUV : CS_SEL_RGB;
+ fmt_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
fourcc = f->fmt.pix.pixelformat;
- cc = imx_media_find_format(fourcc, cs_sel, false);
+ cc = imx_media_find_pixel_format(fourcc, fmt_sel);
if (!cc) {
- imx_media_enum_format(&fourcc, 0, cs_sel);
- cc = imx_media_find_format(fourcc, cs_sel, false);
+ imx_media_enum_pixel_formats(&fourcc, 0, fmt_sel);
+ cc = imx_media_find_pixel_format(fourcc, fmt_sel);
}
} else {
cc_src = imx_media_find_mbus_format(fmt_src->format.code,
- CS_SEL_ANY, true);
+ PIXFMT_SEL_ANY);
if (WARN_ON(!cc_src))
return -EINVAL;
@@ -789,8 +793,8 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
&fmt_src.format, NULL);
vdev->compose.width = fmt_src.format.width;
vdev->compose.height = fmt_src.format.height;
- vdev->cc = imx_media_find_format(vdev->fmt.fmt.pix.pixelformat,
- CS_SEL_ANY, false);
+ vdev->cc = imx_media_find_pixel_format(vdev->fmt.fmt.pix.pixelformat,
+ PIXFMT_SEL_ANY);
v4l2_info(sd, "Registered %s as /dev/%s\n", vfd->name,
video_device_node_name(vfd));
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
index 2cc77f6e84b6..fab1155a5958 100644
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -25,6 +25,8 @@
#define fh_to_ctx(__fh) container_of(__fh, struct ipu_csc_scaler_ctx, fh)
+#define IMX_CSC_SCALER_NAME "imx-csc-scaler"
+
enum {
V4L2_M2M_SRC = 0,
V4L2_M2M_DST = 1,
@@ -150,10 +152,10 @@ err:
static int ipu_csc_scaler_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- strscpy(cap->driver, "imx-media-csc-scaler", sizeof(cap->driver));
- strscpy(cap->card, "imx-media-csc-scaler", sizeof(cap->card));
- strscpy(cap->bus_info, "platform:imx-media-csc-scaler",
- sizeof(cap->bus_info));
+ strscpy(cap->driver, IMX_CSC_SCALER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX_CSC_SCALER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", IMX_CSC_SCALER_NAME);
return 0;
}
@@ -164,7 +166,8 @@ static int ipu_csc_scaler_enum_fmt(struct file *file, void *fh,
u32 fourcc;
int ret;
- ret = imx_media_enum_format(&fourcc, f->index, CS_SEL_ANY);
+ ret = imx_media_enum_pixel_formats(&fourcc, f->index,
+ PIXFMT_SEL_YUV_RGB);
if (ret)
return ret;
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index e76a6a85baa3..d7e5b9ed27b8 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -58,6 +58,8 @@ struct csi_priv {
struct ipu_soc *ipu;
struct v4l2_subdev sd;
struct media_pad pad[CSI_NUM_PADS];
+ struct v4l2_async_notifier notifier;
+
/* the video device at IDMAC output pad */
struct imx_media_video_dev *vdev;
struct imx_media_fim *fim;
@@ -118,6 +120,11 @@ static inline struct csi_priv *sd_to_dev(struct v4l2_subdev *sdev)
return container_of(sdev, struct csi_priv, sd);
}
+static inline struct csi_priv *notifier_to_dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi_priv, notifier);
+}
+
static inline bool is_parallel_bus(struct v4l2_fwnode_endpoint *ep)
{
return ep->bus_type != V4L2_MBUS_CSI2_DPHY;
@@ -157,8 +164,7 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
static int csi_get_upstream_endpoint(struct csi_priv *priv,
struct v4l2_fwnode_endpoint *ep)
{
- struct device_node *endpoint, *port;
- struct media_entity *src;
+ struct fwnode_handle *endpoint;
struct v4l2_subdev *sd;
struct media_pad *pad;
@@ -169,50 +175,43 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
return -EPIPE;
sd = priv->src_sd;
- src = &sd->entity;
- if (src->function == MEDIA_ENT_F_VID_MUX) {
+ switch (sd->grp_id) {
+ case IMX_MEDIA_GRP_ID_CSI_MUX:
/*
- * CSI is connected directly to video mux, skip up to
+ * CSI is connected directly to CSI mux, skip up to
* CSI-2 receiver if it is in the path, otherwise stay
- * with video mux.
+ * with the CSI mux.
*/
- sd = imx_media_pipeline_subdev(src, IMX_MEDIA_GRP_ID_CSI2,
+ sd = imx_media_pipeline_subdev(&sd->entity,
+ IMX_MEDIA_GRP_ID_CSI2,
true);
- if (!IS_ERR(sd))
- src = &sd->entity;
+ if (IS_ERR(sd))
+ sd = priv->src_sd;
+ break;
+ case IMX_MEDIA_GRP_ID_CSI2:
+ break;
+ default:
+ /*
+ * the source is neither the CSI mux nor the CSI-2 receiver,
+ * get the source pad directly upstream from CSI itself.
+ */
+ sd = &priv->sd;
+ break;
}
- /*
- * If the source is neither the video mux nor the CSI-2 receiver,
- * get the source pad directly upstream from CSI itself.
- */
- if (src->function != MEDIA_ENT_F_VID_MUX &&
- sd->grp_id != IMX_MEDIA_GRP_ID_CSI2)
- src = &priv->sd.entity;
-
- /* get source pad of entity directly upstream from src */
- pad = imx_media_pipeline_pad(src, 0, 0, true);
+ /* get source pad of entity directly upstream from sd */
+ pad = imx_media_pipeline_pad(&sd->entity, 0, 0, true);
if (!pad)
return -ENODEV;
- sd = media_entity_to_v4l2_subdev(pad->entity);
+ endpoint = imx_media_get_pad_fwnode(pad);
+ if (IS_ERR(endpoint))
+ return PTR_ERR(endpoint);
- /*
- * NOTE: this assumes an OF-graph port id is the same as a
- * media pad index.
- */
- port = of_graph_get_port_by_id(sd->dev->of_node, pad->index);
- if (!port)
- return -ENODEV;
-
- endpoint = of_get_next_child(port, NULL);
- of_node_put(port);
- if (!endpoint)
- return -ENODEV;
+ v4l2_fwnode_endpoint_parse(endpoint, ep);
- v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), ep);
- of_node_put(endpoint);
+ fwnode_handle_put(endpoint);
return 0;
}
@@ -1234,12 +1233,12 @@ static int csi_enum_mbus_code(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, code->which);
- incc = imx_media_find_mbus_format(infmt->code, CS_SEL_ANY, true);
+ incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
switch (code->pad) {
case CSI_SINK_PAD:
- ret = imx_media_enum_mbus_format(&code->code, code->index,
- CS_SEL_ANY, true);
+ ret = imx_media_enum_mbus_formats(&code->code, code->index,
+ PIXFMT_SEL_ANY);
break;
case CSI_SRC_PAD_DIRECT:
case CSI_SRC_PAD_IDMAC:
@@ -1256,11 +1255,13 @@ static int csi_enum_mbus_code(struct v4l2_subdev *sd,
}
code->code = infmt->code;
} else {
- u32 cs_sel = (incc->cs == IPUV3_COLORSPACE_YUV) ?
- CS_SEL_YUV : CS_SEL_RGB;
- ret = imx_media_enum_ipu_format(&code->code,
- code->index,
- cs_sel);
+ enum imx_pixfmt_sel fmt_sel =
+ (incc->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
+
+ ret = imx_media_enum_ipu_formats(&code->code,
+ code->index,
+ fmt_sel);
}
break;
default:
@@ -1433,8 +1434,7 @@ static void csi_try_fmt(struct csi_priv *priv,
switch (sdformat->pad) {
case CSI_SRC_PAD_DIRECT:
case CSI_SRC_PAD_IDMAC:
- incc = imx_media_find_mbus_format(infmt->code,
- CS_SEL_ANY, true);
+ incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
sdformat->format.width = compose->width;
sdformat->format.height = compose->height;
@@ -1443,14 +1443,15 @@ static void csi_try_fmt(struct csi_priv *priv,
sdformat->format.code = infmt->code;
*cc = incc;
} else {
- u32 cs_sel = (incc->cs == IPUV3_COLORSPACE_YUV) ?
- CS_SEL_YUV : CS_SEL_RGB;
+ enum imx_pixfmt_sel fmt_sel =
+ (incc->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
*cc = imx_media_find_ipu_format(sdformat->format.code,
- cs_sel);
+ fmt_sel);
if (!*cc) {
- imx_media_enum_ipu_format(&code, 0, cs_sel);
- *cc = imx_media_find_ipu_format(code, cs_sel);
+ imx_media_enum_ipu_formats(&code, 0, fmt_sel);
+ *cc = imx_media_find_ipu_format(code, fmt_sel);
sdformat->format.code = (*cc)->codes[0];
}
}
@@ -1470,12 +1471,12 @@ static void csi_try_fmt(struct csi_priv *priv,
MIN_H, MAX_H, H_ALIGN, S_ALIGN);
*cc = imx_media_find_mbus_format(sdformat->format.code,
- CS_SEL_ANY, true);
+ PIXFMT_SEL_ANY);
if (!*cc) {
- imx_media_enum_mbus_format(&code, 0,
- CS_SEL_ANY, false);
+ imx_media_enum_mbus_formats(&code, 0,
+ PIXFMT_SEL_YUV_RGB);
*cc = imx_media_find_mbus_format(code,
- CS_SEL_ANY, false);
+ PIXFMT_SEL_YUV_RGB);
sdformat->format.code = (*cc)->codes[0];
}
@@ -1761,7 +1762,7 @@ static int csi_registered(struct v4l2_subdev *sd)
for (i = 0; i < CSI_NUM_PADS; i++) {
code = 0;
if (i != CSI_SINK_PAD)
- imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
/* set a default mbus format */
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
@@ -1828,9 +1829,32 @@ static void csi_unregistered(struct v4l2_subdev *sd)
ipu_csi_put(priv->csi);
}
+/*
+ * The CSI has only one fwnode endpoint, at the sink pad. Verify the
+ * endpoint belongs to us, and return CSI_SINK_PAD.
+ */
+static int csi_get_fwnode_pad(struct media_entity *entity,
+ struct fwnode_endpoint *endpoint)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct fwnode_handle *csi_port = dev_fwnode(priv->dev);
+ struct fwnode_handle *csi_ep;
+ int ret;
+
+ csi_ep = fwnode_get_next_child_node(csi_port, NULL);
+
+ ret = endpoint->local_fwnode == csi_ep ? CSI_SINK_PAD : -ENXIO;
+
+ fwnode_handle_put(csi_ep);
+
+ return ret;
+}
+
static const struct media_entity_operations csi_entity_ops = {
.link_setup = csi_link_setup,
.link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = csi_get_fwnode_pad,
};
static const struct v4l2_subdev_core_ops csi_core_ops = {
@@ -1867,59 +1891,72 @@ static const struct v4l2_subdev_internal_ops csi_internal_ops = {
.unregistered = csi_unregistered,
};
-static int imx_csi_parse_endpoint(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
+static int imx_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
{
- return fwnode_device_is_available(asd->match.fwnode) ? 0 : -ENOTCONN;
+ struct csi_priv *priv = notifier_to_dev(notifier);
+ struct media_pad *sink = &priv->sd.entity.pads[CSI_SINK_PAD];
+
+ /*
+ * If the subdev is a video mux, it must be one of the CSI
+ * muxes. Mark it as such via its group id.
+ */
+ if (sd->entity.function == MEDIA_ENT_F_VID_MUX)
+ sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink);
}
+static const struct v4l2_async_notifier_operations csi_notify_ops = {
+ .bound = imx_csi_notify_bound,
+};
+
static int imx_csi_async_register(struct csi_priv *priv)
{
- struct v4l2_async_notifier *notifier;
- struct fwnode_handle *fwnode;
+ struct v4l2_async_subdev *asd = NULL;
+ struct fwnode_handle *ep;
unsigned int port;
int ret;
- notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
- if (!notifier)
- return -ENOMEM;
-
- v4l2_async_notifier_init(notifier);
-
- fwnode = dev_fwnode(priv->dev);
+ v4l2_async_notifier_init(&priv->notifier);
/* get this CSI's port id */
- ret = fwnode_property_read_u32(fwnode, "reg", &port);
+ ret = fwnode_property_read_u32(dev_fwnode(priv->dev), "reg", &port);
if (ret < 0)
- goto out_free;
+ return ret;
- ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(
- priv->dev->parent, notifier, sizeof(struct v4l2_async_subdev),
- port, imx_csi_parse_endpoint);
- if (ret < 0)
- goto out_cleanup;
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(priv->dev->parent),
+ port, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (ep) {
+ asd = kzalloc(sizeof(*asd), GFP_KERNEL);
+ if (!asd) {
+ fwnode_handle_put(ep);
+ return -ENOMEM;
+ }
- ret = v4l2_async_subdev_notifier_register(&priv->sd, notifier);
- if (ret < 0)
- goto out_cleanup;
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &priv->notifier, ep, asd);
- ret = v4l2_async_register_subdev(&priv->sd);
- if (ret < 0)
- goto out_unregister;
+ fwnode_handle_put(ep);
- priv->sd.subdev_notifier = notifier;
+ if (ret) {
+ kfree(asd);
+ /* OK if asd already exists */
+ if (ret != -EEXIST)
+ return ret;
+ }
+ }
- return 0;
+ priv->notifier.ops = &csi_notify_ops;
-out_unregister:
- v4l2_async_notifier_unregister(notifier);
-out_cleanup:
- v4l2_async_notifier_cleanup(notifier);
-out_free:
- kfree(notifier);
+ ret = v4l2_async_subdev_notifier_register(&priv->sd,
+ &priv->notifier);
+ if (ret)
+ return ret;
- return ret;
+ return v4l2_async_register_subdev(&priv->sd);
}
static int imx_csi_probe(struct platform_device *pdev)
@@ -1999,9 +2036,13 @@ static int imx_csi_probe(struct platform_device *pdev)
ret = imx_csi_async_register(priv);
if (ret)
- goto free;
+ goto cleanup;
return 0;
+
+cleanup:
+ v4l2_async_notifier_unregister(&priv->notifier);
+ v4l2_async_notifier_cleanup(&priv->notifier);
free:
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
@@ -2015,6 +2056,8 @@ static int imx_csi_remove(struct platform_device *pdev)
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
+ v4l2_async_notifier_unregister(&priv->notifier);
+ v4l2_async_notifier_cleanup(&priv->notifier);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
index 66b505f7e8df..5fe4b22ab847 100644
--- a/drivers/staging/media/imx/imx-media-dev-common.c
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -24,47 +24,39 @@ static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
{
- v4l2_info(sd->v4l2_dev, "subdev %s bound\n", sd->name);
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+
+ dev_dbg(imxmd->md.dev, "subdev %s bound\n", sd->name);
return 0;
}
/*
- * Create the media links for all subdevs that registered.
+ * Create the missing media links from the CSI-2 receiver.
* Called after all async subdevs have bound.
*/
-static int imx_media_create_links(struct v4l2_async_notifier *notifier)
+static void imx_media_create_csi2_links(struct imx_media_dev *imxmd)
{
- struct imx_media_dev *imxmd = notifier2dev(notifier);
- struct v4l2_subdev *sd;
+ struct v4l2_subdev *sd, *csi2 = NULL;
list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
- switch (sd->grp_id) {
- case IMX_MEDIA_GRP_ID_IPU_VDIC:
- case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
- case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
- case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
- /*
- * links have already been created for the
- * sync-registered subdevs.
- */
- break;
- case IMX_MEDIA_GRP_ID_IPU_CSI0:
- case IMX_MEDIA_GRP_ID_IPU_CSI1:
- case IMX_MEDIA_GRP_ID_CSI:
- imx_media_create_csi_of_links(imxmd, sd);
- break;
- default:
- /*
- * if this subdev has fwnode links, create media
- * links for them.
- */
- imx_media_create_of_links(imxmd, sd);
+ if (sd->grp_id == IMX_MEDIA_GRP_ID_CSI2) {
+ csi2 = sd;
break;
}
}
+ if (!csi2)
+ return;
- return 0;
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ /* skip if not a CSI or a CSI mux */
+ if (!(sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) &&
+ !(sd->grp_id & IMX_MEDIA_GRP_ID_CSI) &&
+ !(sd->grp_id & IMX_MEDIA_GRP_ID_CSI_MUX))
+ continue;
+
+ v4l2_create_fwnode_links(csi2, sd);
+ }
}
/*
@@ -196,9 +188,7 @@ int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
mutex_lock(&imxmd->mutex);
- ret = imx_media_create_links(notifier);
- if (ret)
- goto unlock;
+ imx_media_create_csi2_links(imxmd);
ret = imx_media_create_pad_vdev_lists(imxmd);
if (ret)
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 2c3c2adca683..6d2205461e56 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -32,7 +32,7 @@ static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
return ret;
}
- v4l2_info(&imxmd->v4l2_dev, "subdev %s bound\n", sd->name);
+ dev_dbg(imxmd->md.dev, "subdev %s bound\n", sd->name);
return 0;
}
diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
index d4237e1a4241..da4109b2fd13 100644
--- a/drivers/staging/media/imx/imx-media-internal-sd.c
+++ b/drivers/staging/media/imx/imx-media-internal-sd.c
@@ -142,9 +142,9 @@ static int create_internal_link(struct imx_media_dev *imxmd,
&sink->entity.pads[link->remote_pad]))
return 0;
- v4l2_info(&imxmd->v4l2_dev, "%s:%d -> %s:%d\n",
- src->name, link->local_pad,
- sink->name, link->remote_pad);
+ dev_dbg(imxmd->md.dev, "%s:%d -> %s:%d\n",
+ src->name, link->local_pad,
+ sink->name, link->remote_pad);
ret = media_create_pad_link(&src->entity, link->local_pad,
&sink->entity, link->remote_pad, 0);
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index 2d3efd2a6dde..82e13e972e23 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -74,117 +74,3 @@ err_out:
return ret;
}
EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
-
-/*
- * Create a single media link to/from sd using a fwnode link.
- *
- * NOTE: this function assumes an OF port node is equivalent to
- * a media pad (port id equal to media pad index), and that an
- * OF endpoint node is equivalent to a media link.
- */
-static int create_of_link(struct imx_media_dev *imxmd,
- struct v4l2_subdev *sd,
- struct v4l2_fwnode_link *link)
-{
- struct v4l2_subdev *remote, *src, *sink;
- int src_pad, sink_pad;
-
- if (link->local_port >= sd->entity.num_pads)
- return -EINVAL;
-
- remote = imx_media_find_subdev_by_fwnode(imxmd, link->remote_node);
- if (!remote)
- return 0;
-
- if (sd->entity.pads[link->local_port].flags & MEDIA_PAD_FL_SINK) {
- src = remote;
- src_pad = link->remote_port;
- sink = sd;
- sink_pad = link->local_port;
- } else {
- src = sd;
- src_pad = link->local_port;
- sink = remote;
- sink_pad = link->remote_port;
- }
-
- /* make sure link doesn't already exist before creating */
- if (media_entity_find_link(&src->entity.pads[src_pad],
- &sink->entity.pads[sink_pad]))
- return 0;
-
- v4l2_info(sd->v4l2_dev, "%s:%d -> %s:%d\n",
- src->name, src_pad, sink->name, sink_pad);
-
- return media_create_pad_link(&src->entity, src_pad,
- &sink->entity, sink_pad, 0);
-}
-
-/*
- * Create media links to/from sd using its device-tree endpoints.
- */
-int imx_media_create_of_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *sd)
-{
- struct v4l2_fwnode_link link;
- struct device_node *ep;
- int ret;
-
- for_each_endpoint_of_node(sd->dev->of_node, ep) {
- ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
- if (ret)
- continue;
-
- ret = create_of_link(imxmd, sd, &link);
- v4l2_fwnode_put_link(&link);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_media_create_of_links);
-
-/*
- * Create media links to the given CSI subdevice's sink pads,
- * using its device-tree endpoints.
- */
-int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *csi)
-{
- struct device_node *csi_np = csi->dev->of_node;
- struct device_node *ep;
-
- for_each_child_of_node(csi_np, ep) {
- struct fwnode_handle *fwnode, *csi_ep;
- struct v4l2_fwnode_link link;
- int ret;
-
- memset(&link, 0, sizeof(link));
-
- link.local_node = of_fwnode_handle(csi_np);
- link.local_port = CSI_SINK_PAD;
-
- csi_ep = of_fwnode_handle(ep);
-
- fwnode = fwnode_graph_get_remote_endpoint(csi_ep);
- if (!fwnode)
- continue;
-
- fwnode = fwnode_get_parent(fwnode);
- fwnode_property_read_u32(fwnode, "reg", &link.remote_port);
- fwnode = fwnode_get_next_parent(fwnode);
- if (is_of_node(fwnode) &&
- of_node_name_eq(to_of_node(fwnode), "ports"))
- fwnode = fwnode_get_next_parent(fwnode);
- link.remote_node = fwnode;
-
- ret = create_of_link(imxmd, csi, &link);
- fwnode_handle_put(link.remote_node);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_media_create_csi_of_links);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index fae981698c49..c2088f7ceef5 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -7,36 +7,30 @@
#include <linux/module.h>
#include "imx-media.h"
+#define IMX_BUS_FMTS(fmt...) (const u32[]) {fmt, 0}
+
/*
* List of supported pixel formats for the subdevs.
- *
- * In all of these tables, the non-mbus formats (with no
- * mbus codes) must all fall at the end of the table.
*/
-
-static const struct imx_media_pixfmt yuv_formats[] = {
+static const struct imx_media_pixfmt pixel_formats[] = {
+ /*** YUV formats start here ***/
{
.fourcc = V4L2_PIX_FMT_UYVY,
- .codes = {
+ .codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_UYVY8_1X16
- },
+ ),
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_YUYV,
- .codes = {
+ .codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_YUYV8_1X16
- },
+ ),
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 16,
- },
- /***
- * non-mbus YUV formats start here. NOTE! when adding non-mbus
- * formats, NUM_NON_MBUS_YUV_FORMATS must be updated below.
- ***/
- {
+ }, {
.fourcc = V4L2_PIX_FMT_YUV420,
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 12,
@@ -61,213 +55,212 @@ static const struct imx_media_pixfmt yuv_formats[] = {
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 16,
.planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV32,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_AYUV8_1X32),
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 32,
+ .ipufmt = true,
},
-};
-
-#define NUM_NON_MBUS_YUV_FORMATS 5
-#define NUM_YUV_FORMATS ARRAY_SIZE(yuv_formats)
-#define NUM_MBUS_YUV_FORMATS (NUM_YUV_FORMATS - NUM_NON_MBUS_YUV_FORMATS)
-
-static const struct imx_media_pixfmt rgb_formats[] = {
+ /*** RGB formats start here ***/
{
.fourcc = V4L2_PIX_FMT_RGB565,
- .codes = {MEDIA_BUS_FMT_RGB565_2X8_LE},
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_RGB565_2X8_LE),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.cycles = 2,
}, {
.fourcc = V4L2_PIX_FMT_RGB24,
- .codes = {
+ .codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_RGB888_2X12_LE
- },
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24,
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 24,
}, {
.fourcc = V4L2_PIX_FMT_XRGB32,
- .codes = {MEDIA_BUS_FMT_ARGB8888_1X32},
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_ARGB8888_1X32),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_ARGB8888_1X32),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
.ipufmt = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
},
/*** raw bayer and grayscale formats start here ***/
{
.fourcc = V4L2_PIX_FMT_SBGGR8,
- .codes = {MEDIA_BUS_FMT_SBGGR8_1X8},
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR8_1X8),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
- .codes = {MEDIA_BUS_FMT_SGBRG8_1X8},
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG8_1X8),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
- .codes = {MEDIA_BUS_FMT_SGRBG8_1X8},
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG8_1X8),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB8,
- .codes = {MEDIA_BUS_FMT_SRGGB8_1X8},
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB8_1X8),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR16,
- .codes = {
+ .codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SBGGR12_1X12,
MEDIA_BUS_FMT_SBGGR14_1X14,
MEDIA_BUS_FMT_SBGGR16_1X16
- },
+ ),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG16,
- .codes = {
+ .codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SGBRG12_1X12,
MEDIA_BUS_FMT_SGBRG14_1X14,
- MEDIA_BUS_FMT_SGBRG16_1X16,
- },
+ MEDIA_BUS_FMT_SGBRG16_1X16
+ ),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG16,
- .codes = {
+ .codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SGRBG12_1X12,
MEDIA_BUS_FMT_SGRBG14_1X14,
- MEDIA_BUS_FMT_SGRBG16_1X16,
- },
+ MEDIA_BUS_FMT_SGRBG16_1X16
+ ),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB16,
- .codes = {
+ .codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SRGGB12_1X12,
MEDIA_BUS_FMT_SRGGB14_1X14,
- MEDIA_BUS_FMT_SRGGB16_1X16,
- },
+ MEDIA_BUS_FMT_SRGGB16_1X16
+ ),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_GREY,
- .codes = {
+ .codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_Y8_1X8,
MEDIA_BUS_FMT_Y10_1X10,
- MEDIA_BUS_FMT_Y12_1X12,
- },
+ MEDIA_BUS_FMT_Y12_1X12
+ ),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_Y10,
- .codes = {MEDIA_BUS_FMT_Y10_1X10},
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y10_1X10),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_Y12,
- .codes = {MEDIA_BUS_FMT_Y12_1X12},
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y12_1X12),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
},
- /***
- * non-mbus RGB formats start here. NOTE! when adding non-mbus
- * formats, NUM_NON_MBUS_RGB_FORMATS must be updated below.
- ***/
- {
- .fourcc = V4L2_PIX_FMT_BGR24,
- .cs = IPUV3_COLORSPACE_RGB,
- .bpp = 24,
- }, {
- .fourcc = V4L2_PIX_FMT_XBGR32,
- .cs = IPUV3_COLORSPACE_RGB,
- .bpp = 32,
- }, {
- .fourcc = V4L2_PIX_FMT_BGRX32,
- .cs = IPUV3_COLORSPACE_RGB,
- .bpp = 32,
- }, {
- .fourcc = V4L2_PIX_FMT_RGBX32,
- .cs = IPUV3_COLORSPACE_RGB,
- .bpp = 32,
- },
};
-#define NUM_NON_MBUS_RGB_FORMATS 2
-#define NUM_RGB_FORMATS ARRAY_SIZE(rgb_formats)
-#define NUM_MBUS_RGB_FORMATS (NUM_RGB_FORMATS - NUM_NON_MBUS_RGB_FORMATS)
+/*
+ * Search in the pixel_formats[] array for an entry with the given fourcc
+ * that matches the requested selection criteria and return it.
+ *
+ * @fourcc: Search for an entry with the given fourcc pixel format.
+ * @fmt_sel: Allow entries only with the given selection criteria.
+ */
+const struct imx_media_pixfmt *
+imx_media_find_pixel_format(u32 fourcc, enum imx_pixfmt_sel fmt_sel)
+{
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
-static const struct imx_media_pixfmt ipu_yuv_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUV32,
- .codes = {MEDIA_BUS_FMT_AYUV8_1X32},
- .cs = IPUV3_COLORSPACE_YUV,
- .bpp = 32,
- .ipufmt = true,
- },
-};
+ fmt_sel &= ~PIXFMT_SEL_IPU;
-#define NUM_IPU_YUV_FORMATS ARRAY_SIZE(ipu_yuv_formats)
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
-static const struct imx_media_pixfmt ipu_rgb_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_XRGB32,
- .codes = {MEDIA_BUS_FMT_ARGB8888_1X32},
- .cs = IPUV3_COLORSPACE_RGB,
- .bpp = 32,
- .ipufmt = true,
- },
-};
+ if (sel_ipu != fmt->ipufmt)
+ continue;
-#define NUM_IPU_RGB_FORMATS ARRAY_SIZE(ipu_rgb_formats)
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
-static void init_mbus_colorimetry(struct v4l2_mbus_framefmt *mbus,
- const struct imx_media_pixfmt *fmt)
-{
- mbus->colorspace = (fmt->cs == IPUV3_COLORSPACE_RGB) ?
- V4L2_COLORSPACE_SRGB : V4L2_COLORSPACE_SMPTE170M;
- mbus->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(mbus->colorspace);
- mbus->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(mbus->colorspace);
- mbus->quantization =
- V4L2_MAP_QUANTIZATION_DEFAULT(fmt->cs == IPUV3_COLORSPACE_RGB,
- mbus->colorspace,
- mbus->ycbcr_enc);
+ if ((fmt_sel & sel) && fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
}
+EXPORT_SYMBOL_GPL(imx_media_find_pixel_format);
-static const
-struct imx_media_pixfmt *__find_format(u32 fourcc,
- u32 code,
- bool allow_non_mbus,
- bool allow_bayer,
- const struct imx_media_pixfmt *array,
- u32 array_size)
+/*
+ * Search in the pixel_formats[] array for an entry with the given media
+ * bus code that matches the requested selection criteria and return it.
+ *
+ * @code: Search for an entry with the given media-bus code.
+ * @fmt_sel: Allow entries only with the given selection criteria.
+ */
+const struct imx_media_pixfmt *
+imx_media_find_mbus_format(u32 code, enum imx_pixfmt_sel fmt_sel)
{
- const struct imx_media_pixfmt *fmt;
- int i, j;
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
+
+ fmt_sel &= ~PIXFMT_SEL_IPU;
- for (i = 0; i < array_size; i++) {
- fmt = &array[i];
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+ unsigned int j;
- if ((!allow_non_mbus && !fmt->codes[0]) ||
- (!allow_bayer && fmt->bayer))
+ if (sel_ipu != fmt->ipufmt)
continue;
- if (fourcc && fmt->fourcc == fourcc)
- return fmt;
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
- if (!code)
+ if (!(fmt_sel & sel) || !fmt->codes)
continue;
for (j = 0; fmt->codes[j]; j++) {
@@ -275,199 +268,103 @@ struct imx_media_pixfmt *__find_format(u32 fourcc,
return fmt;
}
}
+
return NULL;
}
+EXPORT_SYMBOL_GPL(imx_media_find_mbus_format);
-static const struct imx_media_pixfmt *find_format(u32 fourcc,
- u32 code,
- enum codespace_sel cs_sel,
- bool allow_non_mbus,
- bool allow_bayer)
+/*
+ * Enumerate entries in the pixel_formats[] array that match the
+ * requested selection criteria. Return the fourcc that matches the
+ * selection criteria at the requested match index.
+ *
+ * @fourcc: The returned fourcc that matches the search criteria at
+ * the requested match index.
+ * @index: The requested match index.
+ * @fmt_sel: Include in the enumeration entries with the given selection
+ * criteria.
+ */
+int imx_media_enum_pixel_formats(u32 *fourcc, u32 index,
+ enum imx_pixfmt_sel fmt_sel)
{
- const struct imx_media_pixfmt *ret;
-
- switch (cs_sel) {
- case CS_SEL_YUV:
- return __find_format(fourcc, code, allow_non_mbus, allow_bayer,
- yuv_formats, NUM_YUV_FORMATS);
- case CS_SEL_RGB:
- return __find_format(fourcc, code, allow_non_mbus, allow_bayer,
- rgb_formats, NUM_RGB_FORMATS);
- case CS_SEL_ANY:
- ret = __find_format(fourcc, code, allow_non_mbus, allow_bayer,
- yuv_formats, NUM_YUV_FORMATS);
- if (ret)
- return ret;
- return __find_format(fourcc, code, allow_non_mbus, allow_bayer,
- rgb_formats, NUM_RGB_FORMATS);
- default:
- return NULL;
- }
-}
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
-static int enum_format(u32 *fourcc, u32 *code, u32 index,
- enum codespace_sel cs_sel,
- bool allow_non_mbus,
- bool allow_bayer)
-{
- const struct imx_media_pixfmt *fmt;
- u32 mbus_yuv_sz = NUM_MBUS_YUV_FORMATS;
- u32 mbus_rgb_sz = NUM_MBUS_RGB_FORMATS;
- u32 yuv_sz = NUM_YUV_FORMATS;
- u32 rgb_sz = NUM_RGB_FORMATS;
-
- switch (cs_sel) {
- case CS_SEL_YUV:
- if (index >= yuv_sz ||
- (!allow_non_mbus && index >= mbus_yuv_sz))
- return -EINVAL;
- fmt = &yuv_formats[index];
- break;
- case CS_SEL_RGB:
- if (index >= rgb_sz ||
- (!allow_non_mbus && index >= mbus_rgb_sz))
- return -EINVAL;
- fmt = &rgb_formats[index];
- if (!allow_bayer && fmt->bayer)
- return -EINVAL;
- break;
- case CS_SEL_ANY:
- if (!allow_non_mbus) {
- if (index >= mbus_yuv_sz) {
- index -= mbus_yuv_sz;
- if (index >= mbus_rgb_sz)
- return -EINVAL;
- fmt = &rgb_formats[index];
- if (!allow_bayer && fmt->bayer)
- return -EINVAL;
- } else {
- fmt = &yuv_formats[index];
- }
- } else {
- if (index >= yuv_sz + rgb_sz)
- return -EINVAL;
- if (index >= yuv_sz) {
- fmt = &rgb_formats[index - yuv_sz];
- if (!allow_bayer && fmt->bayer)
- return -EINVAL;
- } else {
- fmt = &yuv_formats[index];
- }
+ fmt_sel &= ~PIXFMT_SEL_IPU;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+
+ if (sel_ipu != fmt->ipufmt)
+ continue;
+
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
+
+ if (!(fmt_sel & sel))
+ continue;
+
+ if (index == 0) {
+ *fourcc = fmt->fourcc;
+ return 0;
}
- break;
- default:
- return -EINVAL;
- }
- if (fourcc)
- *fourcc = fmt->fourcc;
- if (code)
- *code = fmt->codes[0];
+ index--;
+ }
- return 0;
+ return -EINVAL;
}
+EXPORT_SYMBOL_GPL(imx_media_enum_pixel_formats);
-const struct imx_media_pixfmt *
-imx_media_find_format(u32 fourcc, enum codespace_sel cs_sel, bool allow_bayer)
+/*
+ * Enumerate entries in the pixel_formats[] array that match the
+ * requested search criteria. Return the media-bus code that matches
+ * the search criteria at the requested match index.
+ *
+ * @code: The returned media-bus code that matches the search criteria at
+ * the requested match index.
+ * @index: The requested match index.
+ * @fmt_sel: Include in the enumeration entries with the given selection
+ * criteria.
+ */
+int imx_media_enum_mbus_formats(u32 *code, u32 index,
+ enum imx_pixfmt_sel fmt_sel)
{
- return find_format(fourcc, 0, cs_sel, true, allow_bayer);
-}
-EXPORT_SYMBOL_GPL(imx_media_find_format);
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
-int imx_media_enum_format(u32 *fourcc, u32 index, enum codespace_sel cs_sel)
-{
- return enum_format(fourcc, NULL, index, cs_sel, true, false);
-}
-EXPORT_SYMBOL_GPL(imx_media_enum_format);
+ fmt_sel &= ~PIXFMT_SEL_IPU;
-const struct imx_media_pixfmt *
-imx_media_find_mbus_format(u32 code, enum codespace_sel cs_sel,
- bool allow_bayer)
-{
- return find_format(0, code, cs_sel, false, allow_bayer);
-}
-EXPORT_SYMBOL_GPL(imx_media_find_mbus_format);
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+ unsigned int j;
-int imx_media_enum_mbus_format(u32 *code, u32 index, enum codespace_sel cs_sel,
- bool allow_bayer)
-{
- return enum_format(NULL, code, index, cs_sel, false, allow_bayer);
-}
-EXPORT_SYMBOL_GPL(imx_media_enum_mbus_format);
+ if (sel_ipu != fmt->ipufmt)
+ continue;
-const struct imx_media_pixfmt *
-imx_media_find_ipu_format(u32 code, enum codespace_sel cs_sel)
-{
- const struct imx_media_pixfmt *array, *fmt, *ret = NULL;
- u32 array_size;
- int i, j;
-
- switch (cs_sel) {
- case CS_SEL_YUV:
- array_size = NUM_IPU_YUV_FORMATS;
- array = ipu_yuv_formats;
- break;
- case CS_SEL_RGB:
- array_size = NUM_IPU_RGB_FORMATS;
- array = ipu_rgb_formats;
- break;
- case CS_SEL_ANY:
- array_size = NUM_IPU_YUV_FORMATS + NUM_IPU_RGB_FORMATS;
- array = ipu_yuv_formats;
- break;
- default:
- return NULL;
- }
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
- for (i = 0; i < array_size; i++) {
- if (cs_sel == CS_SEL_ANY && i >= NUM_IPU_YUV_FORMATS)
- fmt = &ipu_rgb_formats[i - NUM_IPU_YUV_FORMATS];
- else
- fmt = &array[i];
+ if (!(fmt_sel & sel) || !fmt->codes)
+ continue;
- for (j = 0; code && fmt->codes[j]; j++) {
- if (code == fmt->codes[j]) {
- ret = fmt;
- goto out;
+ for (j = 0; fmt->codes[j]; j++) {
+ if (index == 0) {
+ *code = fmt->codes[j];
+ return 0;
}
- }
- }
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(imx_media_find_ipu_format);
-
-int imx_media_enum_ipu_format(u32 *code, u32 index, enum codespace_sel cs_sel)
-{
- switch (cs_sel) {
- case CS_SEL_YUV:
- if (index >= NUM_IPU_YUV_FORMATS)
- return -EINVAL;
- *code = ipu_yuv_formats[index].codes[0];
- break;
- case CS_SEL_RGB:
- if (index >= NUM_IPU_RGB_FORMATS)
- return -EINVAL;
- *code = ipu_rgb_formats[index].codes[0];
- break;
- case CS_SEL_ANY:
- if (index >= NUM_IPU_YUV_FORMATS + NUM_IPU_RGB_FORMATS)
- return -EINVAL;
- if (index >= NUM_IPU_YUV_FORMATS) {
- index -= NUM_IPU_YUV_FORMATS;
- *code = ipu_rgb_formats[index].codes[0];
- } else {
- *code = ipu_yuv_formats[index].codes[0];
+ index--;
}
- break;
- default:
- return -EINVAL;
}
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL_GPL(imx_media_enum_ipu_format);
+EXPORT_SYMBOL_GPL(imx_media_enum_mbus_formats);
int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
u32 width, u32 height, u32 code, u32 field,
@@ -478,17 +375,27 @@ int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
mbus->width = width;
mbus->height = height;
mbus->field = field;
+
if (code == 0)
- imx_media_enum_mbus_format(&code, 0, CS_SEL_YUV, false);
- lcc = imx_media_find_mbus_format(code, CS_SEL_ANY, false);
+ imx_media_enum_mbus_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ lcc = imx_media_find_mbus_format(code, PIXFMT_SEL_ANY);
if (!lcc) {
- lcc = imx_media_find_ipu_format(code, CS_SEL_ANY);
+ lcc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
if (!lcc)
return -EINVAL;
}
mbus->code = code;
- init_mbus_colorimetry(mbus, lcc);
+
+ mbus->colorspace = V4L2_COLORSPACE_SRGB;
+ mbus->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(mbus->colorspace);
+ mbus->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(mbus->colorspace);
+ mbus->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(lcc->cs == IPUV3_COLORSPACE_RGB,
+ mbus->colorspace,
+ mbus->ycbcr_enc);
+
if (cc)
*cc = lcc;
@@ -542,9 +449,11 @@ void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
const struct imx_media_pixfmt *cc;
bool is_rgb = false;
- cc = imx_media_find_mbus_format(tryfmt->code, CS_SEL_ANY, true);
+ cc = imx_media_find_mbus_format(tryfmt->code, PIXFMT_SEL_ANY);
if (!cc)
- cc = imx_media_find_ipu_format(tryfmt->code, CS_SEL_ANY);
+ cc = imx_media_find_ipu_format(tryfmt->code,
+ PIXFMT_SEL_YUV_RGB);
+
if (cc && cc->cs == IPUV3_COLORSPACE_RGB)
is_rgb = true;
@@ -587,17 +496,18 @@ void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
EXPORT_SYMBOL_GPL(imx_media_try_colorimetry);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
- struct v4l2_mbus_framefmt *mbus,
+ const struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc)
{
u32 width;
u32 stride;
if (!cc) {
- cc = imx_media_find_ipu_format(mbus->code, CS_SEL_ANY);
+ cc = imx_media_find_ipu_format(mbus->code,
+ PIXFMT_SEL_YUV_RGB);
if (!cc)
- cc = imx_media_find_mbus_format(mbus->code, CS_SEL_ANY,
- true);
+ cc = imx_media_find_mbus_format(mbus->code,
+ PIXFMT_SEL_ANY);
if (!cc)
return -EINVAL;
}
@@ -609,8 +519,8 @@ int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
if (cc->ipufmt && cc->cs == IPUV3_COLORSPACE_YUV) {
u32 code;
- imx_media_enum_mbus_format(&code, 0, CS_SEL_YUV, false);
- cc = imx_media_find_mbus_format(code, CS_SEL_YUV, false);
+ imx_media_enum_mbus_formats(&code, 0, PIXFMT_SEL_YUV);
+ cc = imx_media_find_mbus_format(code, PIXFMT_SEL_YUV);
}
/* Round up width for minimum burst size */
@@ -639,7 +549,7 @@ int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_pix_fmt);
int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
- struct v4l2_mbus_framefmt *mbus)
+ const struct v4l2_mbus_framefmt *mbus)
{
int ret;
@@ -657,12 +567,13 @@ int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_ipu_image);
int imx_media_ipu_image_to_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
- struct ipu_image *image)
+ const struct ipu_image *image)
{
const struct imx_media_pixfmt *fmt;
- fmt = imx_media_find_format(image->pix.pixelformat, CS_SEL_ANY, true);
- if (!fmt)
+ fmt = imx_media_find_pixel_format(image->pix.pixelformat,
+ PIXFMT_SEL_ANY);
+ if (!fmt || !fmt->codes || !fmt->codes[0])
return -EINVAL;
memset(mbus, 0, sizeof(*mbus));
@@ -924,6 +835,39 @@ imx_media_pipeline_video_device(struct media_entity *start_entity,
EXPORT_SYMBOL_GPL(imx_media_pipeline_video_device);
/*
+ * Find a fwnode endpoint that maps to the given subdevice's pad.
+ * If there are multiple endpoints that map to the pad, only the
+ * first endpoint encountered is returned.
+ *
+ * On success the refcount of the returned fwnode endpoint is
+ * incremented.
+ */
+struct fwnode_handle *imx_media_get_pad_fwnode(struct media_pad *pad)
+{
+ struct fwnode_handle *endpoint;
+ struct v4l2_subdev *sd;
+
+ if (!is_media_entity_v4l2_subdev(pad->entity))
+ return ERR_PTR(-ENODEV);
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ fwnode_graph_for_each_endpoint(dev_fwnode(sd->dev), endpoint) {
+ int pad_idx = media_entity_get_fwnode_pad(&sd->entity,
+ endpoint,
+ pad->flags);
+ if (pad_idx < 0)
+ continue;
+
+ if (pad_idx == pad->index)
+ return endpoint;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(imx_media_get_pad_fwnode);
+
+/*
* Turn current pipeline streaming on/off starting from entity.
*/
int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index 0d83c2c41606..303b5407fb64 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -548,7 +548,8 @@ static int vdic_enum_mbus_code(struct v4l2_subdev *sd,
if (code->pad >= VDIC_NUM_PADS)
return -EINVAL;
- return imx_media_enum_ipu_format(&code->code, code->index, CS_SEL_YUV);
+ return imx_media_enum_ipu_formats(&code->code, code->index,
+ PIXFMT_SEL_YUV);
}
static int vdic_get_fmt(struct v4l2_subdev *sd,
@@ -583,12 +584,13 @@ static void vdic_try_fmt(struct vdic_priv *priv,
{
struct v4l2_mbus_framefmt *infmt;
- *cc = imx_media_find_ipu_format(sdformat->format.code, CS_SEL_YUV);
+ *cc = imx_media_find_ipu_format(sdformat->format.code,
+ PIXFMT_SEL_YUV);
if (!*cc) {
u32 code;
- imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
- *cc = imx_media_find_ipu_format(code, CS_SEL_YUV);
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+ *cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV);
sdformat->format.code = (*cc)->codes[0];
}
@@ -850,7 +852,7 @@ static int vdic_registered(struct v4l2_subdev *sd)
for (i = 0; i < VDIC_NUM_PADS; i++) {
code = 0;
if (i != VDIC_SINK_PAD_IDMAC)
- imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
/* set a default mbus format */
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index 11861191324a..f17135158029 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -68,8 +68,13 @@ enum {
#define IMX_MEDIA_EOF_TIMEOUT 1000
struct imx_media_pixfmt {
+ /* the in-memory FourCC pixel format */
u32 fourcc;
- u32 codes[4];
+ /*
+ * the set of equivalent media bus codes for the fourcc.
+ * NOTE! codes pointer is NULL for in-memory-only formats.
+ */
+ const u32 *codes;
int bpp; /* total bpp */
/* cycles per pixel for generic (bayer) formats for the parallel bus */
int cycles;
@@ -79,6 +84,15 @@ struct imx_media_pixfmt {
bool ipufmt; /* is one of the IPU internal formats */
};
+enum imx_pixfmt_sel {
+ PIXFMT_SEL_YUV = BIT(0), /* select YUV formats */
+ PIXFMT_SEL_RGB = BIT(1), /* select RGB formats */
+ PIXFMT_SEL_BAYER = BIT(2), /* select BAYER formats */
+ PIXFMT_SEL_IPU = BIT(3), /* select IPU-internal formats */
+ PIXFMT_SEL_YUV_RGB = PIXFMT_SEL_YUV | PIXFMT_SEL_RGB,
+ PIXFMT_SEL_ANY = PIXFMT_SEL_YUV | PIXFMT_SEL_RGB | PIXFMT_SEL_BAYER,
+};
+
struct imx_media_buffer {
struct vb2_v4l2_buffer vbuf; /* v4l buffer must be first */
struct list_head list;
@@ -149,24 +163,29 @@ struct imx_media_dev {
struct v4l2_subdev *sync_sd[2][NUM_IPU_SUBDEVS];
};
-enum codespace_sel {
- CS_SEL_YUV = 0,
- CS_SEL_RGB,
- CS_SEL_ANY,
-};
-
/* imx-media-utils.c */
const struct imx_media_pixfmt *
-imx_media_find_format(u32 fourcc, enum codespace_sel cs_sel, bool allow_bayer);
-int imx_media_enum_format(u32 *fourcc, u32 index, enum codespace_sel cs_sel);
-const struct imx_media_pixfmt *
-imx_media_find_mbus_format(u32 code, enum codespace_sel cs_sel,
- bool allow_bayer);
-int imx_media_enum_mbus_format(u32 *code, u32 index, enum codespace_sel cs_sel,
- bool allow_bayer);
+imx_media_find_pixel_format(u32 fourcc, enum imx_pixfmt_sel sel);
+int imx_media_enum_pixel_formats(u32 *fourcc, u32 index,
+ enum imx_pixfmt_sel sel);
const struct imx_media_pixfmt *
-imx_media_find_ipu_format(u32 code, enum codespace_sel cs_sel);
-int imx_media_enum_ipu_format(u32 *code, u32 index, enum codespace_sel cs_sel);
+imx_media_find_mbus_format(u32 code, enum imx_pixfmt_sel sel);
+int imx_media_enum_mbus_formats(u32 *code, u32 index,
+ enum imx_pixfmt_sel sel);
+
+static inline const struct imx_media_pixfmt *
+imx_media_find_ipu_format(u32 code, enum imx_pixfmt_sel fmt_sel)
+{
+ return imx_media_find_mbus_format(code, fmt_sel | PIXFMT_SEL_IPU);
+}
+
+static inline int imx_media_enum_ipu_formats(u32 *code, u32 index,
+ enum imx_pixfmt_sel fmt_sel)
+{
+ return imx_media_enum_mbus_formats(code, index,
+ fmt_sel | PIXFMT_SEL_IPU);
+}
+
int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
u32 width, u32 height, u32 code, u32 field,
const struct imx_media_pixfmt **cc);
@@ -175,12 +194,12 @@ int imx_media_init_cfg(struct v4l2_subdev *sd,
void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
bool ic_route);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
- struct v4l2_mbus_framefmt *mbus,
+ const struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc);
int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
- struct v4l2_mbus_framefmt *mbus);
+ const struct v4l2_mbus_framefmt *mbus);
int imx_media_ipu_image_to_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
- struct ipu_image *image);
+ const struct ipu_image *image);
void imx_media_grp_id_to_sd_name(char *sd_name, int sz,
u32 grp_id, int ipu_id);
struct v4l2_subdev *
@@ -201,6 +220,7 @@ imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
struct video_device *
imx_media_pipeline_video_device(struct media_entity *start_entity,
enum v4l2_buf_type buftype, bool upstream);
+struct fwnode_handle *imx_media_get_pad_fwnode(struct media_pad *pad);
struct imx_media_dma_buf {
void *virt;
@@ -243,10 +263,6 @@ void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd);
/* imx-media-of.c */
int imx_media_add_of_subdevs(struct imx_media_dev *dev,
struct device_node *np);
-int imx_media_create_of_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *sd);
-int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *csi);
int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct device_node *csi_np);
@@ -292,5 +308,6 @@ void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev);
#define IMX_MEDIA_GRP_ID_IPU_IC_PRP BIT(13)
#define IMX_MEDIA_GRP_ID_IPU_IC_PRPENC BIT(14)
#define IMX_MEDIA_GRP_ID_IPU_IC_PRPVF BIT(15)
+#define IMX_MEDIA_GRP_ID_CSI_MUX BIT(16)
#endif
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index 8ab823042c09..94d87d27d389 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
#include "imx-media.h"
@@ -35,6 +36,7 @@
struct csi2_dev {
struct device *dev;
struct v4l2_subdev sd;
+ struct v4l2_async_notifier notifier;
struct media_pad pad[CSI2_NUM_PADS];
struct clk *dphy_clk;
struct clk *pllref_clk;
@@ -90,6 +92,11 @@ static inline struct csi2_dev *sd_to_dev(struct v4l2_subdev *sdev)
return container_of(sdev, struct csi2_dev, sd);
}
+static inline struct csi2_dev *notifier_to_dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi2_dev, notifier);
+}
+
/*
* The required sequence of MIPI CSI-2 startup as specified in the i.MX6
* reference manual is as follows:
@@ -509,6 +516,7 @@ static int csi2_registered(struct v4l2_subdev *sd)
static const struct media_entity_operations csi2_entity_ops = {
.link_setup = csi2_link_setup,
.link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
};
static const struct v4l2_subdev_video_ops csi2_video_ops = {
@@ -530,34 +538,75 @@ static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
.registered = csi2_registered,
};
-static int csi2_parse_endpoint(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
+static int csi2_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct csi2_dev *csi2 = sd_to_dev(sd);
+ struct csi2_dev *csi2 = notifier_to_dev(notifier);
+ struct media_pad *sink = &csi2->sd.entity.pads[CSI2_SINK_PAD];
- if (!fwnode_device_is_available(asd->match.fwnode)) {
- v4l2_err(&csi2->sd, "remote is not available\n");
- return -EINVAL;
- }
+ return v4l2_create_fwnode_links_to_pad(sd, sink);
+}
- if (vep->bus_type != V4L2_MBUS_CSI2_DPHY) {
- v4l2_err(&csi2->sd, "invalid bus type, must be MIPI CSI2\n");
- return -EINVAL;
- }
+static const struct v4l2_async_notifier_operations csi2_notify_ops = {
+ .bound = csi2_notify_bound,
+};
- csi2->bus = vep->bus.mipi_csi2;
+static int csi2_async_register(struct csi2_dev *csi2)
+{
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_subdev *asd = NULL;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_notifier_init(&csi2->notifier);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi2->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ csi2->bus = vep.bus.mipi_csi2;
dev_dbg(csi2->dev, "data lanes: %d\n", csi2->bus.num_data_lanes);
dev_dbg(csi2->dev, "flags: 0x%08x\n", csi2->bus.flags);
- return 0;
+ asd = kzalloc(sizeof(*asd), GFP_KERNEL);
+ if (!asd) {
+ ret = -ENOMEM;
+ goto err_parse;
+ }
+
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &csi2->notifier, ep, asd);
+ if (ret)
+ goto err_parse;
+
+ fwnode_handle_put(ep);
+
+ csi2->notifier.ops = &csi2_notify_ops;
+
+ ret = v4l2_async_subdev_notifier_register(&csi2->sd,
+ &csi2->notifier);
+ if (ret)
+ return ret;
+
+ return v4l2_async_register_subdev(&csi2->sd);
+
+err_parse:
+ fwnode_handle_put(ep);
+ kfree(asd);
+ return ret;
}
static int csi2_probe(struct platform_device *pdev)
{
- unsigned int sink_port = 0;
struct csi2_dev *csi2;
struct resource *res;
int i, ret;
@@ -633,15 +682,15 @@ static int csi2_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &csi2->sd);
- ret = v4l2_async_register_fwnode_subdev(
- &csi2->sd, sizeof(struct v4l2_async_subdev),
- &sink_port, 1, csi2_parse_endpoint);
+ ret = csi2_async_register(csi2);
if (ret)
- goto dphy_off;
+ goto clean_notifier;
return 0;
-dphy_off:
+clean_notifier:
+ v4l2_async_notifier_unregister(&csi2->notifier);
+ v4l2_async_notifier_cleanup(&csi2->notifier);
clk_disable_unprepare(csi2->dphy_clk);
pllref_off:
clk_disable_unprepare(csi2->pllref_clk);
@@ -655,6 +704,8 @@ static int csi2_remove(struct platform_device *pdev)
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csi2_dev *csi2 = sd_to_dev(sd);
+ v4l2_async_notifier_unregister(&csi2->notifier);
+ v4l2_async_notifier_cleanup(&csi2->notifier);
v4l2_async_unregister_subdev(sd);
clk_disable_unprepare(csi2->dphy_clk);
clk_disable_unprepare(csi2->pllref_clk);
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index acbdffb77668..a3f3df901704 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -155,6 +155,7 @@
struct imx7_csi {
struct device *dev;
struct v4l2_subdev sd;
+ struct v4l2_async_notifier notifier;
struct imx_media_video_dev *vdev;
struct imx_media_dev *imxmd;
struct media_pad pad[IMX7_CSI_PADS_NUM];
@@ -168,8 +169,6 @@ struct imx7_csi {
struct media_entity *sink;
- struct v4l2_fwnode_endpoint upstream_ep;
-
struct v4l2_mbus_framefmt format_mbus[IMX7_CSI_PADS_NUM];
const struct imx_media_pixfmt *cc[IMX7_CSI_PADS_NUM];
struct v4l2_fract frame_interval[IMX7_CSI_PADS_NUM];
@@ -195,6 +194,12 @@ struct imx7_csi {
struct completion last_eof_completion;
};
+static struct imx7_csi *
+imx7_csi_notifier_to_dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct imx7_csi, notifier);
+}
+
static u32 imx7_csi_reg_read(struct imx7_csi *csi, unsigned int offset)
{
return readl(csi->regbase + offset);
@@ -428,61 +433,6 @@ static void imx7_csi_deinit(struct imx7_csi *csi)
csi->is_init = false;
}
-static int imx7_csi_get_upstream_endpoint(struct imx7_csi *csi,
- struct v4l2_fwnode_endpoint *ep,
- bool skip_mux)
-{
- struct device_node *endpoint, *port;
- struct media_entity *src;
- struct v4l2_subdev *sd;
- struct media_pad *pad;
-
- if (!csi->src_sd)
- return -EPIPE;
-
- src = &csi->src_sd->entity;
-
- /*
- * if the source is neither a mux or csi2 get the one directly upstream
- * from this csi
- */
- if (src->function != MEDIA_ENT_F_VID_IF_BRIDGE &&
- src->function != MEDIA_ENT_F_VID_MUX)
- src = &csi->sd.entity;
-
-skip_video_mux:
- /* get source pad of entity directly upstream from src */
- pad = imx_media_pipeline_pad(src, 0, 0, true);
- if (!pad)
- return -ENODEV;
-
- sd = media_entity_to_v4l2_subdev(pad->entity);
-
- /* To get bus type we may need to skip video mux */
- if (skip_mux && src->function == MEDIA_ENT_F_VID_MUX) {
- src = &sd->entity;
- goto skip_video_mux;
- }
-
- /*
- * NOTE: this assumes an OF-graph port id is the same as a
- * media pad index.
- */
- port = of_graph_get_port_by_id(sd->dev->of_node, pad->index);
- if (!port)
- return -ENODEV;
-
- endpoint = of_get_next_child(port, NULL);
- of_node_put(port);
- if (!endpoint)
- return -ENODEV;
-
- v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), ep);
- of_node_put(endpoint);
-
- return 0;
-}
-
static int imx7_csi_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
@@ -549,23 +499,27 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sink_fmt)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
- struct v4l2_fwnode_endpoint upstream_ep = {};
+ struct media_pad *pad;
int ret;
ret = v4l2_subdev_link_validate_default(sd, link, source_fmt, sink_fmt);
if (ret)
return ret;
- ret = imx7_csi_get_upstream_endpoint(csi, &upstream_ep, true);
- if (ret) {
- v4l2_err(&csi->sd, "failed to find upstream endpoint\n");
- return ret;
- }
+ if (!csi->src_sd)
+ return -EPIPE;
+
+ /*
+ * find the entity that is selected by the CSI mux. This is needed
+ * to distinguish between a parallel or CSI-2 pipeline.
+ */
+ pad = imx_media_pipeline_pad(&csi->src_sd->entity, 0, 0, true);
+ if (!pad)
+ return -ENODEV;
mutex_lock(&csi->lock);
- csi->upstream_ep = upstream_ep;
- csi->is_csi2 = (upstream_ep.bus_type == V4L2_MBUS_CSI2_DPHY);
+ csi->is_csi2 = (pad->entity->function == MEDIA_ENT_F_VID_IF_BRIDGE);
mutex_unlock(&csi->lock);
@@ -958,8 +912,8 @@ static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
switch (code->pad) {
case IMX7_CSI_PAD_SINK:
- ret = imx_media_enum_mbus_format(&code->code, code->index,
- CS_SEL_ANY, true);
+ ret = imx_media_enum_mbus_formats(&code->code, code->index,
+ PIXFMT_SEL_ANY);
break;
case IMX7_CSI_PAD_SRC:
if (code->index != 0) {
@@ -1019,8 +973,8 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
switch (sdformat->pad) {
case IMX7_CSI_PAD_SRC:
- in_cc = imx_media_find_mbus_format(in_fmt->code, CS_SEL_ANY,
- true);
+ in_cc = imx_media_find_mbus_format(in_fmt->code,
+ PIXFMT_SEL_ANY);
sdformat->format.width = in_fmt->width;
sdformat->format.height = in_fmt->height;
@@ -1035,11 +989,12 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
break;
case IMX7_CSI_PAD_SINK:
*cc = imx_media_find_mbus_format(sdformat->format.code,
- CS_SEL_ANY, true);
+ PIXFMT_SEL_ANY);
if (!*cc) {
- imx_media_enum_mbus_format(&code, 0, CS_SEL_ANY, false);
- *cc = imx_media_find_mbus_format(code, CS_SEL_ANY,
- false);
+ imx_media_enum_mbus_formats(&code, 0,
+ PIXFMT_SEL_YUV_RGB);
+ *cc = imx_media_find_mbus_format(code,
+ PIXFMT_SEL_YUV_RGB);
sdformat->format.code = (*cc)->codes[0];
}
@@ -1177,6 +1132,7 @@ static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
static const struct media_entity_operations imx7_csi_entity_ops = {
.link_setup = imx7_csi_link_setup,
.link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
};
static const struct v4l2_subdev_video_ops imx7_csi_video_ops = {
@@ -1201,11 +1157,64 @@ static const struct v4l2_subdev_internal_ops imx7_csi_internal_ops = {
.unregistered = imx7_csi_unregistered,
};
-static int imx7_csi_parse_endpoint(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
+static int imx7_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier);
+ struct media_pad *sink = &csi->sd.entity.pads[IMX7_CSI_PAD_SINK];
+
+ /* The bound subdev must always be the CSI mux */
+ if (WARN_ON(sd->entity.function != MEDIA_ENT_F_VID_MUX))
+ return -ENXIO;
+
+ /* Mark it as such via its group id */
+ sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink);
+}
+
+static const struct v4l2_async_notifier_operations imx7_csi_notify_ops = {
+ .bound = imx7_csi_notify_bound,
+};
+
+static int imx7_csi_async_register(struct imx7_csi *csi)
{
- return fwnode_device_is_available(asd->match.fwnode) ? 0 : -EINVAL;
+ struct v4l2_async_subdev *asd = NULL;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_notifier_init(&csi->notifier);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (ep) {
+ asd = kzalloc(sizeof(*asd), GFP_KERNEL);
+ if (!asd) {
+ fwnode_handle_put(ep);
+ return -ENOMEM;
+ }
+
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &csi->notifier, ep, asd);
+
+ fwnode_handle_put(ep);
+
+ if (ret) {
+ kfree(asd);
+ /* OK if asd already exists */
+ if (ret != -EEXIST)
+ return ret;
+ }
+ }
+
+ csi->notifier.ops = &imx7_csi_notify_ops;
+
+ ret = v4l2_async_subdev_notifier_register(&csi->sd, &csi->notifier);
+ if (ret)
+ return ret;
+
+ return v4l2_async_register_subdev(&csi->sd);
}
static int imx7_csi_probe(struct platform_device *pdev)
@@ -1288,19 +1297,21 @@ static int imx7_csi_probe(struct platform_device *pdev)
if (ret < 0)
goto free;
- ret = v4l2_async_register_fwnode_subdev(&csi->sd,
- sizeof(struct v4l2_async_subdev),
- NULL, 0,
- imx7_csi_parse_endpoint);
+ ret = imx7_csi_async_register(csi);
if (ret)
- goto free;
+ goto subdev_notifier_cleanup;
return 0;
+subdev_notifier_cleanup:
+ v4l2_async_notifier_unregister(&csi->notifier);
+ v4l2_async_notifier_cleanup(&csi->notifier);
+
free:
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
cleanup:
+ v4l2_async_notifier_unregister(&imxmd->notifier);
v4l2_async_notifier_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_unregister(&imxmd->md);
@@ -1325,6 +1336,8 @@ static int imx7_csi_remove(struct platform_device *pdev)
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
+ v4l2_async_notifier_unregister(&csi->notifier);
+ v4l2_async_notifier_cleanup(&csi->notifier);
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index fbc1a924652a..7612993cc1d6 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -14,32 +14,30 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/irq.h>
#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_graph.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/reset.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/spinlock.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
-#include "imx-media.h"
-
-#define CSIS_DRIVER_NAME "imx7-mipi-csis"
-#define CSIS_SUBDEV_NAME CSIS_DRIVER_NAME
+#define CSIS_DRIVER_NAME "imx7-mipi-csis"
+#define CSIS_SUBDEV_NAME CSIS_DRIVER_NAME
-#define CSIS_PAD_SINK 0
-#define CSIS_PAD_SOURCE 1
-#define CSIS_PADS_NUM 2
+#define CSIS_PAD_SINK 0
+#define CSIS_PAD_SOURCE 1
+#define CSIS_PADS_NUM 2
-#define MIPI_CSIS_DEF_PIX_WIDTH 640
-#define MIPI_CSIS_DEF_PIX_HEIGHT 480
+#define MIPI_CSIS_DEF_PIX_WIDTH 640
+#define MIPI_CSIS_DEF_PIX_HEIGHT 480
/* Register map definition */
@@ -64,42 +62,42 @@
#define MIPI_CSIS_CLK_CTRL_WCLK_SRC BIT(0)
/* CSIS Interrupt mask */
-#define MIPI_CSIS_INTMSK 0x10
-#define MIPI_CSIS_INTMSK_EVEN_BEFORE BIT(31)
-#define MIPI_CSIS_INTMSK_EVEN_AFTER BIT(30)
-#define MIPI_CSIS_INTMSK_ODD_BEFORE BIT(29)
-#define MIPI_CSIS_INTMSK_ODD_AFTER BIT(28)
-#define MIPI_CSIS_INTMSK_FRAME_START BIT(24)
-#define MIPI_CSIS_INTMSK_FRAME_END BIT(20)
-#define MIPI_CSIS_INTMSK_ERR_SOT_HS BIT(16)
-#define MIPI_CSIS_INTMSK_ERR_LOST_FS BIT(12)
-#define MIPI_CSIS_INTMSK_ERR_LOST_FE BIT(8)
-#define MIPI_CSIS_INTMSK_ERR_OVER BIT(4)
-#define MIPI_CSIS_INTMSK_ERR_WRONG_CFG BIT(3)
-#define MIPI_CSIS_INTMSK_ERR_ECC BIT(2)
-#define MIPI_CSIS_INTMSK_ERR_CRC BIT(1)
-#define MIPI_CSIS_INTMSK_ERR_UNKNOWN BIT(0)
+#define MIPI_CSIS_INTMSK 0x10
+#define MIPI_CSIS_INTMSK_EVEN_BEFORE BIT(31)
+#define MIPI_CSIS_INTMSK_EVEN_AFTER BIT(30)
+#define MIPI_CSIS_INTMSK_ODD_BEFORE BIT(29)
+#define MIPI_CSIS_INTMSK_ODD_AFTER BIT(28)
+#define MIPI_CSIS_INTMSK_FRAME_START BIT(24)
+#define MIPI_CSIS_INTMSK_FRAME_END BIT(20)
+#define MIPI_CSIS_INTMSK_ERR_SOT_HS BIT(16)
+#define MIPI_CSIS_INTMSK_ERR_LOST_FS BIT(12)
+#define MIPI_CSIS_INTMSK_ERR_LOST_FE BIT(8)
+#define MIPI_CSIS_INTMSK_ERR_OVER BIT(4)
+#define MIPI_CSIS_INTMSK_ERR_WRONG_CFG BIT(3)
+#define MIPI_CSIS_INTMSK_ERR_ECC BIT(2)
+#define MIPI_CSIS_INTMSK_ERR_CRC BIT(1)
+#define MIPI_CSIS_INTMSK_ERR_UNKNOWN BIT(0)
/* CSIS Interrupt source */
-#define MIPI_CSIS_INTSRC 0x14
-#define MIPI_CSIS_INTSRC_EVEN_BEFORE BIT(31)
-#define MIPI_CSIS_INTSRC_EVEN_AFTER BIT(30)
-#define MIPI_CSIS_INTSRC_EVEN BIT(30)
-#define MIPI_CSIS_INTSRC_ODD_BEFORE BIT(29)
-#define MIPI_CSIS_INTSRC_ODD_AFTER BIT(28)
-#define MIPI_CSIS_INTSRC_ODD (0x3 << 28)
-#define MIPI_CSIS_INTSRC_NON_IMAGE_DATA (0xf << 28)
-#define MIPI_CSIS_INTSRC_FRAME_START BIT(24)
-#define MIPI_CSIS_INTSRC_FRAME_END BIT(20)
-#define MIPI_CSIS_INTSRC_ERR_SOT_HS BIT(16)
-#define MIPI_CSIS_INTSRC_ERR_LOST_FS BIT(12)
-#define MIPI_CSIS_INTSRC_ERR_LOST_FE BIT(8)
-#define MIPI_CSIS_INTSRC_ERR_OVER BIT(4)
-#define MIPI_CSIS_INTSRC_ERR_WRONG_CFG BIT(3)
-#define MIPI_CSIS_INTSRC_ERR_ECC BIT(2)
-#define MIPI_CSIS_INTSRC_ERR_CRC BIT(1)
-#define MIPI_CSIS_INTSRC_ERR_UNKNOWN BIT(0)
-#define MIPI_CSIS_INTSRC_ERRORS 0xfffff
+#define MIPI_CSIS_INTSRC 0x14
+#define MIPI_CSIS_INTSRC_EVEN_BEFORE BIT(31)
+#define MIPI_CSIS_INTSRC_EVEN_AFTER BIT(30)
+#define MIPI_CSIS_INTSRC_EVEN BIT(30)
+#define MIPI_CSIS_INTSRC_ODD_BEFORE BIT(29)
+#define MIPI_CSIS_INTSRC_ODD_AFTER BIT(28)
+#define MIPI_CSIS_INTSRC_ODD (0x3 << 28)
+#define MIPI_CSIS_INTSRC_NON_IMAGE_DATA (0xf << 28)
+#define MIPI_CSIS_INTSRC_FRAME_START BIT(24)
+#define MIPI_CSIS_INTSRC_FRAME_END BIT(20)
+#define MIPI_CSIS_INTSRC_ERR_SOT_HS BIT(16)
+#define MIPI_CSIS_INTSRC_ERR_LOST_FS BIT(12)
+#define MIPI_CSIS_INTSRC_ERR_LOST_FE BIT(8)
+#define MIPI_CSIS_INTSRC_ERR_OVER BIT(4)
+#define MIPI_CSIS_INTSRC_ERR_WRONG_CFG BIT(3)
+#define MIPI_CSIS_INTSRC_ERR_ECC BIT(2)
+#define MIPI_CSIS_INTSRC_ERR_CRC BIT(1)
+#define MIPI_CSIS_INTSRC_ERR_UNKNOWN BIT(0)
+#define MIPI_CSIS_INTSRC_ERRORS 0xfffff
/* D-PHY status control */
#define MIPI_CSIS_DPHYSTATUS 0x20
@@ -121,19 +119,19 @@
#define MIPI_CSIS_DPHYCTRL_ENABLE (0x1f << 0)
/* D-PHY Master and Slave Control register Low */
-#define MIPI_CSIS_DPHYBCTRL_L 0x30
+#define MIPI_CSIS_DPHYBCTRL_L 0x30
/* D-PHY Master and Slave Control register High */
-#define MIPI_CSIS_DPHYBCTRL_H 0x34
+#define MIPI_CSIS_DPHYBCTRL_H 0x34
/* D-PHY Slave Control register Low */
-#define MIPI_CSIS_DPHYSCTRL_L 0x38
+#define MIPI_CSIS_DPHYSCTRL_L 0x38
/* D-PHY Slave Control register High */
-#define MIPI_CSIS_DPHYSCTRL_H 0x3c
+#define MIPI_CSIS_DPHYSCTRL_H 0x3c
/* ISP Configuration register */
-#define MIPI_CSIS_ISPCONFIG_CH0 0x40
-#define MIPI_CSIS_ISPCONFIG_CH1 0x50
-#define MIPI_CSIS_ISPCONFIG_CH2 0x60
-#define MIPI_CSIS_ISPCONFIG_CH3 0x70
+#define MIPI_CSIS_ISPCONFIG_CH0 0x40
+#define MIPI_CSIS_ISPCONFIG_CH1 0x50
+#define MIPI_CSIS_ISPCONFIG_CH2 0x60
+#define MIPI_CSIS_ISPCONFIG_CH3 0x70
#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP_MSK (0xff << 24)
#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP(x) ((x) << 24)
@@ -143,35 +141,36 @@
#define MIPI_CSIS_ISPCFG_FMT_RAW8 (0x2a << 2)
#define MIPI_CSIS_ISPCFG_FMT_RAW10 (0x2b << 2)
#define MIPI_CSIS_ISPCFG_FMT_RAW12 (0x2c << 2)
+#define MIPI_CSIS_ISPCFG_FMT_RAW14 (0x2d << 2)
/* User defined formats, x = 1...4 */
-#define MIPI_CSIS_ISPCFG_FMT_USER(x) ((0x30 + (x) - 1) << 2)
-#define MIPI_CSIS_ISPCFG_FMT_MASK (0x3f << 2)
+#define MIPI_CSIS_ISPCFG_FMT_USER(x) ((0x30 + (x) - 1) << 2)
+#define MIPI_CSIS_ISPCFG_FMT_MASK (0x3f << 2)
/* ISP Image Resolution register */
-#define MIPI_CSIS_ISPRESOL_CH0 0x44
-#define MIPI_CSIS_ISPRESOL_CH1 0x54
-#define MIPI_CSIS_ISPRESOL_CH2 0x64
-#define MIPI_CSIS_ISPRESOL_CH3 0x74
-#define CSIS_MAX_PIX_WIDTH 0xffff
-#define CSIS_MAX_PIX_HEIGHT 0xffff
+#define MIPI_CSIS_ISPRESOL_CH0 0x44
+#define MIPI_CSIS_ISPRESOL_CH1 0x54
+#define MIPI_CSIS_ISPRESOL_CH2 0x64
+#define MIPI_CSIS_ISPRESOL_CH3 0x74
+#define CSIS_MAX_PIX_WIDTH 0xffff
+#define CSIS_MAX_PIX_HEIGHT 0xffff
/* ISP SYNC register */
-#define MIPI_CSIS_ISPSYNC_CH0 0x48
-#define MIPI_CSIS_ISPSYNC_CH1 0x58
-#define MIPI_CSIS_ISPSYNC_CH2 0x68
-#define MIPI_CSIS_ISPSYNC_CH3 0x78
+#define MIPI_CSIS_ISPSYNC_CH0 0x48
+#define MIPI_CSIS_ISPSYNC_CH1 0x58
+#define MIPI_CSIS_ISPSYNC_CH2 0x68
+#define MIPI_CSIS_ISPSYNC_CH3 0x78
#define MIPI_CSIS_ISPSYNC_HSYNC_LINTV_OFFSET 18
#define MIPI_CSIS_ISPSYNC_VSYNC_SINTV_OFFSET 12
#define MIPI_CSIS_ISPSYNC_VSYNC_EINTV_OFFSET 0
/* Non-image packet data buffers */
-#define MIPI_CSIS_PKTDATA_ODD 0x2000
-#define MIPI_CSIS_PKTDATA_EVEN 0x3000
-#define MIPI_CSIS_PKTDATA_SIZE SZ_4K
+#define MIPI_CSIS_PKTDATA_ODD 0x2000
+#define MIPI_CSIS_PKTDATA_EVEN 0x3000
+#define MIPI_CSIS_PKTDATA_SIZE SZ_4K
-#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
+#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
enum {
ST_POWERED = 1,
@@ -223,6 +222,7 @@ struct csi_state {
struct device *dev;
struct media_pad pads[CSIS_PADS_NUM];
struct v4l2_subdev mipi_sd;
+ struct v4l2_async_notifier notifier;
struct v4l2_subdev *src_sd;
u8 index;
@@ -253,45 +253,102 @@ struct csi_state {
struct csis_hw_reset hw_reset;
struct regulator *mipi_phy_regulator;
- bool sink_linked;
};
struct csis_pix_format {
- unsigned int pix_width_alignment;
u32 code;
u32 fmt_reg;
- u8 data_alignment;
+ u8 width;
};
static const struct csis_pix_format mipi_csis_formats[] = {
+ /* YUV formats. */
{
- .code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
- .data_alignment = 16,
- }, {
- .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
- .data_alignment = 16,
+ .width = 8,
}, {
+ .code = MEDIA_BUS_FMT_UYVY10_2X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
+ .width = 10,
+ },
+ /* RAW (Bayer and greyscale) formats. */
+ {
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
.fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
- .data_alignment = 8,
+ .width = 8,
}, {
- .code = MEDIA_BUS_FMT_YUYV8_2X8,
- .fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
- .data_alignment = 16,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .width = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .width = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .width = 8,
}, {
.code = MEDIA_BUS_FMT_Y8_1X8,
.fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
- .data_alignment = 8,
+ .width = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .width = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .width = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .width = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .width = 10,
}, {
.code = MEDIA_BUS_FMT_Y10_1X10,
.fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
- .data_alignment = 16,
+ .width = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .width = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .width = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .width = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .width = 12,
}, {
.code = MEDIA_BUS_FMT_Y12_1X12,
.fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
- .data_alignment = 16,
+ .width = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW14,
+ .width = 14,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW14,
+ .width = 14,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW14,
+ .width = 14,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW14,
+ .width = 14,
}
};
@@ -329,6 +386,12 @@ static int mipi_csis_dump_regs(struct csi_state *state)
return 0;
}
+static struct csi_state *
+mipi_notifier_to_csis_state(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi_state, notifier);
+}
+
static struct csi_state *mipi_sd_to_csis_state(struct v4l2_subdev *sdev)
{
return container_of(sdev, struct csi_state, mipi_sd);
@@ -405,7 +468,8 @@ static void __mipi_csis_set_format(struct csi_state *state)
/* Color format */
val = mipi_csis_read(state, MIPI_CSIS_ISPCONFIG_CH0);
- val = (val & ~MIPI_CSIS_ISPCFG_FMT_MASK) | state->csis_fmt->fmt_reg;
+ val &= ~(MIPI_CSIS_ISPCFG_ALIGN_32BIT | MIPI_CSIS_ISPCFG_FMT_MASK);
+ val |= state->csis_fmt->fmt_reg;
mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH0, val);
/* Pixel resolution */
@@ -437,13 +501,6 @@ static void mipi_csis_set_params(struct csi_state *state)
mipi_csis_set_hsync_settle(state, state->hs_settle);
- val = mipi_csis_read(state, MIPI_CSIS_ISPCONFIG_CH0);
- if (state->csis_fmt->data_alignment == 32)
- val |= MIPI_CSIS_ISPCFG_ALIGN_32BIT;
- else
- val &= ~MIPI_CSIS_ISPCFG_ALIGN_32BIT;
- mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH0, val);
-
val = (0 << MIPI_CSIS_ISPSYNC_HSYNC_LINTV_OFFSET) |
(0 << MIPI_CSIS_ISPSYNC_VSYNC_SINTV_OFFSET) |
(0 << MIPI_CSIS_ISPSYNC_VSYNC_EINTV_OFFSET);
@@ -622,17 +679,7 @@ static int mipi_csis_link_setup(struct media_entity *entity,
mutex_lock(&state->lock);
- if (local_pad->flags & MEDIA_PAD_FL_SOURCE) {
- if (flags & MEDIA_LNK_FL_ENABLED) {
- if (state->sink_linked) {
- ret = -EBUSY;
- goto out;
- }
- state->sink_linked = true;
- } else {
- state->sink_linked = false;
- }
- } else {
+ if (local_pad->flags & MEDIA_PAD_FL_SINK) {
if (flags & MEDIA_LNK_FL_ENABLED) {
if (state->src_sd) {
ret = -EBUSY;
@@ -649,48 +696,6 @@ out:
return ret;
}
-static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd,
- struct v4l2_subdev_pad_config *cfg)
-{
- struct v4l2_mbus_framefmt *mf;
- unsigned int i;
- int ret;
-
- for (i = 0; i < CSIS_PADS_NUM; i++) {
- mf = v4l2_subdev_get_try_format(mipi_sd, cfg, i);
-
- ret = imx_media_init_mbus_fmt(mf, MIPI_CSIS_DEF_PIX_HEIGHT,
- MIPI_CSIS_DEF_PIX_WIDTH, 0,
- V4L2_FIELD_NONE, NULL);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static struct csis_pix_format const *
-mipi_csis_try_format(struct v4l2_subdev *mipi_sd, struct v4l2_mbus_framefmt *mf)
-{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
- struct csis_pix_format const *csis_fmt;
-
- csis_fmt = find_csis_format(mf->code);
- if (!csis_fmt)
- csis_fmt = &mipi_csis_formats[0];
-
- v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH,
- csis_fmt->pix_width_alignment,
- &mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1,
- 0);
-
- state->format_mbus.code = csis_fmt->code;
- state->format_mbus.width = mf->width;
- state->format_mbus.height = mf->height;
-
- return csis_fmt;
-}
-
static struct v4l2_mbus_framefmt *
mipi_csis_get_format(struct csi_state *state,
struct v4l2_subdev_pad_config *cfg,
@@ -703,53 +708,160 @@ mipi_csis_get_format(struct csi_state *state,
return &state->format_mbus;
}
-static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
+static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct v4l2_mbus_framefmt *fmt_sink;
+ struct v4l2_mbus_framefmt *fmt_source;
+ enum v4l2_subdev_format_whence which;
+
+ which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt_sink = mipi_csis_get_format(state, cfg, which, CSIS_PAD_SINK);
+
+ fmt_sink->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt_sink->width = MIPI_CSIS_DEF_PIX_WIDTH;
+ fmt_sink->height = MIPI_CSIS_DEF_PIX_HEIGHT;
+ fmt_sink->field = V4L2_FIELD_NONE;
+
+ fmt_sink->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ fmt_sink->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt_sink->colorspace);
+ fmt_sink->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt_sink->colorspace);
+ fmt_sink->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(false, fmt_sink->colorspace,
+ fmt_sink->ycbcr_enc);
+
+ /*
+ * When called from mipi_csis_subdev_init() to initialize the active
+ * configuration, cfg is NULL, which indicates there's no source pad
+ * configuration to set.
+ */
+ if (!cfg)
+ return 0;
+
+ fmt_source = mipi_csis_get_format(state, cfg, which, CSIS_PAD_SOURCE);
+ *fmt_source = *fmt_sink;
+
+ return 0;
+}
+
+static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *sdformat)
{
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
- struct csis_pix_format const *csis_fmt;
struct v4l2_mbus_framefmt *fmt;
- if (sdformat->pad >= CSIS_PADS_NUM)
- return -EINVAL;
-
+ mutex_lock(&state->lock);
fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
+ sdformat->format = *fmt;
+ mutex_unlock(&state->lock);
- mutex_lock(&state->lock);
- if (sdformat->pad == CSIS_PAD_SOURCE) {
- sdformat->format = *fmt;
- goto unlock;
- }
+ return 0;
+}
+
+static int mipi_csis_enum_mbus_code(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
- csis_fmt = mipi_csis_try_format(mipi_sd, &sdformat->format);
+ /*
+ * The CSIS can't transcode in any way, the source format is identical
+ * to the sink format.
+ */
+ if (code->pad == CSIS_PAD_SOURCE) {
+ struct v4l2_mbus_framefmt *fmt;
- sdformat->format = *fmt;
+ if (code->index > 0)
+ return -EINVAL;
- if (csis_fmt && sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- state->csis_fmt = csis_fmt;
- else
- cfg->try_fmt = sdformat->format;
+ fmt = mipi_csis_get_format(state, cfg, code->which, code->pad);
+ code->code = fmt->code;
+ return 0;
+ }
-unlock:
- mutex_unlock(&state->lock);
+ if (code->pad != CSIS_PAD_SINK)
+ return -EINVAL;
+
+ if (code->index >= ARRAY_SIZE(mipi_csis_formats))
+ return -EINVAL;
+
+ code->code = mipi_csis_formats[code->index].code;
return 0;
}
-static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd,
+static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *sdformat)
{
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct csis_pix_format const *csis_fmt;
struct v4l2_mbus_framefmt *fmt;
+ unsigned int align;
- mutex_lock(&state->lock);
+ /*
+ * The CSIS can't transcode in any way, the source format can't be
+ * modified.
+ */
+ if (sdformat->pad == CSIS_PAD_SOURCE)
+ return mipi_csis_get_fmt(mipi_sd, cfg, sdformat);
+
+ if (sdformat->pad != CSIS_PAD_SINK)
+ return -EINVAL;
fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
+ mutex_lock(&state->lock);
+
+ /* Validate the media bus code and clamp the size. */
+ csis_fmt = find_csis_format(sdformat->format.code);
+ if (!csis_fmt)
+ csis_fmt = &mipi_csis_formats[0];
+
+ fmt->code = csis_fmt->code;
+ fmt->width = sdformat->format.width;
+ fmt->height = sdformat->format.height;
+
+ /*
+ * The total number of bits per line must be a multiple of 8. We thus
+ * need to align the width for formats that are not multiples of 8
+ * bits.
+ */
+ switch (csis_fmt->width % 8) {
+ case 0:
+ align = 1;
+ break;
+ case 4:
+ align = 2;
+ break;
+ case 2:
+ case 6:
+ align = 4;
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ align = 8;
+ break;
+ }
+
+ v4l_bound_align_image(&fmt->width, 1, CSIS_MAX_PIX_WIDTH, align,
+ &fmt->height, 1, CSIS_MAX_PIX_HEIGHT, 1, 0);
+
sdformat->format = *fmt;
+ /* Propagate the format from sink to source. */
+ fmt = mipi_csis_get_format(state, cfg, sdformat->which,
+ CSIS_PAD_SOURCE);
+ *fmt = sdformat->format;
+
+ /* Store the CSIS format descriptor for active formats. */
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ state->csis_fmt = csis_fmt;
+
mutex_unlock(&state->lock);
return 0;
@@ -801,6 +913,7 @@ static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
static const struct media_entity_operations mipi_csis_entity_ops = {
.link_setup = mipi_csis_link_setup,
.link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
};
static const struct v4l2_subdev_video_ops mipi_csis_video_ops = {
@@ -809,6 +922,7 @@ static const struct v4l2_subdev_video_ops mipi_csis_video_ops = {
static const struct v4l2_subdev_pad_ops mipi_csis_pad_ops = {
.init_cfg = mipi_csis_init_cfg,
+ .enum_mbus_code = mipi_csis_enum_mbus_code,
.get_fmt = mipi_csis_get_fmt,
.set_fmt = mipi_csis_set_fmt,
};
@@ -841,33 +955,25 @@ static int mipi_csis_parse_dt(struct platform_device *pdev,
static int mipi_csis_pm_resume(struct device *dev, bool runtime);
-static int mipi_csis_parse_endpoint(struct device *dev,
- struct v4l2_fwnode_endpoint *ep,
- struct v4l2_async_subdev *asd)
+static int mipi_csis_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
{
- struct v4l2_subdev *mipi_sd = dev_get_drvdata(dev);
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
-
- if (ep->bus_type != V4L2_MBUS_CSI2_DPHY) {
- dev_err(dev, "invalid bus type, must be MIPI CSI2\n");
- return -EINVAL;
- }
-
- state->bus = ep->bus.mipi_csi2;
-
- dev_dbg(state->dev, "data lanes: %d\n", state->bus.num_data_lanes);
- dev_dbg(state->dev, "flags: 0x%08x\n", state->bus.flags);
+ struct csi_state *state = mipi_notifier_to_csis_state(notifier);
+ struct media_pad *sink = &state->mipi_sd.entity.pads[CSIS_PAD_SINK];
- return 0;
+ return v4l2_create_fwnode_links_to_pad(sd, sink);
}
+static const struct v4l2_async_notifier_operations mipi_csis_notify_ops = {
+ .bound = mipi_csis_notify_bound,
+};
+
static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
struct platform_device *pdev,
const struct v4l2_subdev_ops *ops)
{
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
- unsigned int sink_port = 0;
- int ret;
v4l2_subdev_init(mipi_sd, ops);
mipi_sd->owner = THIS_MODULE;
@@ -883,26 +989,66 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
mipi_sd->dev = &pdev->dev;
state->csis_fmt = &mipi_csis_formats[0];
- state->format_mbus.code = mipi_csis_formats[0].code;
- state->format_mbus.width = MIPI_CSIS_DEF_PIX_WIDTH;
- state->format_mbus.height = MIPI_CSIS_DEF_PIX_HEIGHT;
- state->format_mbus.field = V4L2_FIELD_NONE;
+ mipi_csis_init_cfg(mipi_sd, NULL);
v4l2_set_subdevdata(mipi_sd, &pdev->dev);
state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_pads_init(&mipi_sd->entity, CSIS_PADS_NUM,
- state->pads);
+ return media_entity_pads_init(&mipi_sd->entity, CSIS_PADS_NUM,
+ state->pads);
+}
+
+static int mipi_csis_async_register(struct csi_state *state)
+{
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_subdev *asd = NULL;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_notifier_init(&state->notifier);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(state->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ state->bus = vep.bus.mipi_csi2;
+
+ dev_dbg(state->dev, "data lanes: %d\n", state->bus.num_data_lanes);
+ dev_dbg(state->dev, "flags: 0x%08x\n", state->bus.flags);
+
+ asd = kzalloc(sizeof(*asd), GFP_KERNEL);
+ if (!asd) {
+ ret = -ENOMEM;
+ goto err_parse;
+ }
+
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &state->notifier, ep, asd);
+ if (ret)
+ goto err_parse;
+
+ fwnode_handle_put(ep);
+
+ state->notifier.ops = &mipi_csis_notify_ops;
+
+ ret = v4l2_async_subdev_notifier_register(&state->mipi_sd,
+ &state->notifier);
if (ret)
return ret;
- ret = v4l2_async_register_fwnode_subdev(mipi_sd,
- sizeof(struct v4l2_async_subdev),
- &sink_port, 1,
- mipi_csis_parse_endpoint);
- if (ret < 0)
- dev_err(&pdev->dev, "async fwnode register failed: %d\n", ret);
+ return v4l2_async_register_subdev(&state->mipi_sd);
+
+err_parse:
+ fwnode_handle_put(ep);
+ kfree(asd);
return ret;
}
@@ -915,33 +1061,14 @@ static int mipi_csis_dump_regs_show(struct seq_file *m, void *private)
}
DEFINE_SHOW_ATTRIBUTE(mipi_csis_dump_regs);
-static int mipi_csis_debugfs_init(struct csi_state *state)
+static void mipi_csis_debugfs_init(struct csi_state *state)
{
- struct dentry *d;
-
- if (!debugfs_initialized())
- return -ENODEV;
-
state->debugfs_root = debugfs_create_dir(dev_name(state->dev), NULL);
- if (!state->debugfs_root)
- return -ENOMEM;
-
- d = debugfs_create_bool("debug_enable", 0600, state->debugfs_root,
- &state->debug);
- if (!d)
- goto remove_debugfs;
-
- d = debugfs_create_file("dump_regs", 0600, state->debugfs_root,
- state, &mipi_csis_dump_regs_fops);
- if (!d)
- goto remove_debugfs;
-
- return 0;
-remove_debugfs:
- debugfs_remove_recursive(state->debugfs_root);
-
- return -ENOMEM;
+ debugfs_create_bool("debug_enable", 0600, state->debugfs_root,
+ &state->debug);
+ debugfs_create_file("dump_regs", 0600, state->debugfs_root, state,
+ &mipi_csis_dump_regs_fops);
}
static void mipi_csis_debugfs_exit(struct csi_state *state)
@@ -1009,6 +1136,12 @@ static int mipi_csis_probe(struct platform_device *pdev)
if (ret < 0)
goto disable_clock;
+ ret = mipi_csis_async_register(state);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "async register failed: %d\n", ret);
+ goto cleanup;
+ }
+
memcpy(state->events, mipi_csis_events, sizeof(state->events));
mipi_csis_debugfs_init(state);
@@ -1027,7 +1160,10 @@ static int mipi_csis_probe(struct platform_device *pdev)
unregister_all:
mipi_csis_debugfs_exit(state);
+cleanup:
media_entity_cleanup(&state->mipi_sd.entity);
+ v4l2_async_notifier_unregister(&state->notifier);
+ v4l2_async_notifier_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->mipi_sd);
disable_clock:
mipi_csis_clk_disable(state);
@@ -1115,6 +1251,8 @@ static int mipi_csis_remove(struct platform_device *pdev)
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
mipi_csis_debugfs_exit(state);
+ v4l2_async_notifier_unregister(&state->notifier);
+ v4l2_async_notifier_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->mipi_sd);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/staging/media/ipu3/Kconfig b/drivers/staging/media/ipu3/Kconfig
index 4b51c67eac88..3e9640523e50 100644
--- a/drivers/staging/media/ipu3/Kconfig
+++ b/drivers/staging/media/ipu3/Kconfig
@@ -2,8 +2,9 @@
config VIDEO_IPU3_IMGU
tristate "Intel ipu3-imgu driver"
depends on PCI && VIDEO_V4L2
- depends on MEDIA_CONTROLLER && VIDEO_V4L2_SUBDEV_API
depends on X86
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select IOMMU_IOVA
select VIDEOBUF2_DMA_SG
help
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
index 52063b651810..3fd1fc10b68d 100644
--- a/drivers/staging/media/ipu3/TODO
+++ b/drivers/staging/media/ipu3/TODO
@@ -8,11 +8,5 @@ staging directory.
- IPU3 driver documentation (Laurent)
Comments on configuring v4l2 subdevs for CIO2 and ImgU.
-- Switch to yavta from v4l2n in driver docs.
-
- Elaborate the functionality of different selection rectangles in driver
documentation. This may require driver changes as well.
-
-- Document different operation modes, and which buffer queues are relevant
- in each mode. To process an image, which queues require a buffer an in
- which ones is it optional?
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index 1c9c3ba4d518..a607b0158c81 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -450,7 +450,7 @@ struct ipu3_uapi_awb_fr_config_s {
__u32 bayer_sign;
__u8 bayer_nf;
__u8 reserved2[7];
-} __attribute__((aligned(32))) __packed;
+} __packed;
/**
* struct ipu3_uapi_4a_config - 4A config
@@ -466,7 +466,8 @@ struct ipu3_uapi_4a_config {
struct ipu3_uapi_ae_grid_config ae_grd_config;
__u8 padding[20];
struct ipu3_uapi_af_config_s af_config;
- struct ipu3_uapi_awb_fr_config_s awb_fr_config;
+ struct ipu3_uapi_awb_fr_config_s awb_fr_config
+ __attribute__((aligned(32)));
} __packed;
/**
@@ -2477,7 +2478,7 @@ struct ipu3_uapi_acc_param {
struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32)));
struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32)));
struct ipu3_uapi_anr_config anr;
- struct ipu3_uapi_awb_fr_config_s awb_fr __attribute__((aligned(32)));
+ struct ipu3_uapi_awb_fr_config_s awb_fr;
struct ipu3_uapi_ae_config ae;
struct ipu3_uapi_af_config_s af;
struct ipu3_uapi_awb_config awb;
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
index 4533dacad4be..fbd53d7c097c 100644
--- a/drivers/staging/media/ipu3/ipu3-css-params.c
+++ b/drivers/staging/media/ipu3/ipu3-css-params.c
@@ -49,14 +49,13 @@ imgu_css_scaler_setup_lut(unsigned int taps, unsigned int input_width,
int tap, phase, phase_sum_left, phase_sum_right;
int exponent = imgu_css_scaler_get_exp(output_width, input_width);
int mantissa = (1 << exponent) * output_width;
- unsigned int phase_step;
+ unsigned int phase_step, phase_taps;
if (input_width == output_width) {
for (phase = 0; phase < IMGU_SCALER_PHASES; phase++) {
- for (tap = 0; tap < taps; tap++) {
- coeff_lut[phase * IMGU_SCALER_FILTER_TAPS + tap]
- = 0;
- }
+ phase_taps = phase * IMGU_SCALER_FILTER_TAPS;
+ for (tap = 0; tap < taps; tap++)
+ coeff_lut[phase_taps + tap] = 0;
}
info->phase_step = IMGU_SCALER_PHASES *
@@ -71,6 +70,7 @@ imgu_css_scaler_setup_lut(unsigned int taps, unsigned int input_width,
}
for (phase = 0; phase < IMGU_SCALER_PHASES; phase++) {
+ phase_taps = phase * IMGU_SCALER_FILTER_TAPS;
for (tap = 0; tap < taps; tap++) {
/* flip table to for convolution reverse indexing */
s64 coeff = coeffs[coeffs_size -
@@ -81,9 +81,7 @@ imgu_css_scaler_setup_lut(unsigned int taps, unsigned int input_width,
/* Add +"0.5" */
coeff += 1 << (IMGU_SCALER_COEFF_BITS - 1);
coeff >>= IMGU_SCALER_COEFF_BITS;
-
- coeff_lut[phase * IMGU_SCALER_FILTER_TAPS + tap] =
- coeff;
+ coeff_lut[phase_taps + tap] = coeff;
}
}
diff --git a/drivers/staging/media/ipu3/ipu3-css-pool.h b/drivers/staging/media/ipu3/ipu3-css-pool.h
index f4a60b41401b..a8ccd4f70320 100644
--- a/drivers/staging/media/ipu3/ipu3-css-pool.h
+++ b/drivers/staging/media/ipu3/ipu3-css-pool.h
@@ -15,14 +15,12 @@ struct imgu_device;
* @size: size of the buffer in bytes.
* @vaddr: kernel virtual address.
* @daddr: iova dma address to access IPU3.
- * @vma: private, a pointer to &struct vm_struct,
- * used for imgu_dmamap_free.
*/
struct imgu_css_map {
size_t size;
void *vaddr;
dma_addr_t daddr;
- struct vm_struct *vma;
+ struct page **pages;
};
/**
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 4f04fe838b0c..3c700ae9c94e 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -1911,6 +1911,13 @@ int imgu_css_meta_fmt_set(struct v4l2_meta_format *fmt)
switch (fmt->dataformat) {
case V4L2_META_FMT_IPU3_PARAMS:
fmt->buffersize = sizeof(struct ipu3_uapi_params);
+
+ /*
+ * Sanity check for the parameter struct size. This must
+ * not change!
+ */
+ BUILD_BUG_ON(sizeof(struct ipu3_uapi_params) != 39328);
+
break;
case V4L2_META_FMT_IPU3_STAT_3A:
fmt->buffersize = sizeof(struct ipu3_uapi_stats_3a);
diff --git a/drivers/staging/media/ipu3/ipu3-dmamap.c b/drivers/staging/media/ipu3/ipu3-dmamap.c
index 7431322379f6..8a19b0024152 100644
--- a/drivers/staging/media/ipu3/ipu3-dmamap.c
+++ b/drivers/staging/media/ipu3/ipu3-dmamap.c
@@ -96,6 +96,7 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
unsigned long shift = iova_shift(&imgu->iova_domain);
struct device *dev = &imgu->pci_dev->dev;
size_t size = PAGE_ALIGN(len);
+ int count = size >> PAGE_SHIFT;
struct page **pages;
dma_addr_t iovaddr;
struct iova *iova;
@@ -114,7 +115,7 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
/* Call IOMMU driver to setup pgt */
iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
- for (i = 0; i < size / PAGE_SIZE; ++i) {
+ for (i = 0; i < count; ++i) {
rval = imgu_mmu_map(imgu->mmu, iovaddr,
page_to_phys(pages[i]), PAGE_SIZE);
if (rval)
@@ -123,33 +124,23 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
iovaddr += PAGE_SIZE;
}
- /* Now grab a virtual region */
- map->vma = __get_vm_area(size, VM_USERMAP, VMALLOC_START, VMALLOC_END);
- if (!map->vma)
+ map->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
+ if (!map->vaddr)
goto out_unmap;
- map->vma->pages = pages;
- /* And map it in KVA */
- if (map_vm_area(map->vma, PAGE_KERNEL, pages))
- goto out_vunmap;
-
+ map->pages = pages;
map->size = size;
map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
- map->vaddr = map->vma->addr;
dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
- size, &map->daddr, map->vma->addr);
-
- return map->vma->addr;
+ size, &map->daddr, map->vaddr);
-out_vunmap:
- vunmap(map->vma->addr);
+ return map->vaddr;
out_unmap:
imgu_dmamap_free_buffer(pages, size);
imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
i * PAGE_SIZE);
- map->vma = NULL;
out_free_iova:
__free_iova(&imgu->iova_domain, iova);
@@ -177,8 +168,6 @@ void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
*/
void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
{
- struct vm_struct *area = map->vma;
-
dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
__func__, map->size, &map->daddr, map->vaddr);
@@ -187,11 +176,8 @@ void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
imgu_dmamap_unmap(imgu, map);
- if (WARN_ON(!area) || WARN_ON(!area->pages))
- return;
-
- imgu_dmamap_free_buffer(area->pages, map->size);
vunmap(map->vaddr);
+ imgu_dmamap_free_buffer(map->pages, map->size);
map->vaddr = NULL;
}
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c
index 5f3ff964f3e7..cb9bf5fb29a5 100644
--- a/drivers/staging/media/ipu3/ipu3-mmu.c
+++ b/drivers/staging/media/ipu3/ipu3-mmu.c
@@ -174,8 +174,10 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
spin_lock_irqsave(&mmu->lock, flags);
l2pt = mmu->l2pts[l1pt_idx];
- if (l2pt)
- goto done;
+ if (l2pt) {
+ spin_unlock_irqrestore(&mmu->lock, flags);
+ return l2pt;
+ }
spin_unlock_irqrestore(&mmu->lock, flags);
@@ -190,8 +192,9 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
l2pt = mmu->l2pts[l1pt_idx];
if (l2pt) {
+ spin_unlock_irqrestore(&mmu->lock, flags);
imgu_mmu_free_page_table(new_l2pt);
- goto done;
+ return l2pt;
}
l2pt = new_l2pt;
@@ -200,7 +203,6 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt));
mmu->l1pt[l1pt_idx] = pteval;
-done:
spin_unlock_irqrestore(&mmu->lock, flags);
return l2pt;
}
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 09c8ede1457c..4dc8d9165f63 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -152,7 +152,6 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
struct imgu_v4l2_subdev,
subdev);
-
struct v4l2_mbus_framefmt *mf;
u32 pad = fmt->pad;
unsigned int pipe = imgu_sd->pipe;
@@ -367,8 +366,10 @@ static void imgu_vb2_buf_queue(struct vb2_buffer *vb)
vb2_set_plane_payload(vb, 0, need_bytes);
+ mutex_lock(&imgu->streaming_lock);
if (imgu->streaming)
imgu_queue_buffers(imgu, false, node->pipe);
+ mutex_unlock(&imgu->streaming_lock);
dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__,
node->pipe, node->id);
@@ -468,10 +469,13 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
dev_dbg(dev, "%s node name %s pipe %u id %u", __func__,
node->name, node->pipe, node->id);
+ mutex_lock(&imgu->streaming_lock);
if (imgu->streaming) {
r = -EBUSY;
+ mutex_unlock(&imgu->streaming_lock);
goto fail_return_bufs;
}
+ mutex_unlock(&imgu->streaming_lock);
if (!node->enabled) {
dev_err(dev, "IMGU node is not enabled");
@@ -485,7 +489,6 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
if (r < 0)
goto fail_return_bufs;
-
if (!imgu_all_nodes_streaming(imgu, node))
return 0;
@@ -498,9 +501,11 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Start streaming of the whole pipeline now */
dev_dbg(dev, "IMGU streaming is ready to start");
+ mutex_lock(&imgu->streaming_lock);
r = imgu_s_stream(imgu, true);
if (!r)
imgu->streaming = true;
+ mutex_unlock(&imgu->streaming_lock);
return 0;
@@ -532,6 +537,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
dev_err(&imgu->pci_dev->dev,
"failed to stop subdev streaming\n");
+ mutex_lock(&imgu->streaming_lock);
/* Was this the first node with streaming disabled? */
if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) {
/* Yes, really stop streaming now */
@@ -542,6 +548,8 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
}
imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
+ mutex_unlock(&imgu->streaming_lock);
+
media_pipeline_stop(&node->vdev.entity);
}
@@ -597,6 +605,9 @@ static int enum_fmts(struct v4l2_fmtdesc *f, u32 type)
{
unsigned int i, j;
+ if (f->mbus_code != 0 && f->mbus_code != MEDIA_BUS_FMT_FIXED)
+ return -EINVAL;
+
for (i = j = 0; i < ARRAY_SIZE(formats); ++i) {
if (formats[i].type == type) {
if (j == f->index)
@@ -826,6 +837,9 @@ static int imgu_meta_enum_format(struct file *file, void *fh,
if (fmt->index > 0 || fmt->type != node->vbq.type)
return -EINVAL;
+ if (fmt->mbus_code != 0 && fmt->mbus_code != MEDIA_BUS_FMT_FIXED)
+ return -EINVAL;
+
strscpy(fmt->description, meta_fmts[i].name, sizeof(fmt->description));
fmt->pixelformat = meta_fmts[i].fourcc;
@@ -845,54 +859,6 @@ static int imgu_vidioc_g_meta_fmt(struct file *file, void *fh,
return 0;
}
-static int imgu_vidioc_enum_input(struct file *file, void *fh,
- struct v4l2_input *input)
-{
- if (input->index > 0)
- return -EINVAL;
- strscpy(input->name, "camera", sizeof(input->name));
- input->type = V4L2_INPUT_TYPE_CAMERA;
-
- return 0;
-}
-
-static int imgu_vidioc_g_input(struct file *file, void *fh, unsigned int *input)
-{
- *input = 0;
-
- return 0;
-}
-
-static int imgu_vidioc_s_input(struct file *file, void *fh, unsigned int input)
-{
- return input == 0 ? 0 : -EINVAL;
-}
-
-static int imgu_vidioc_enum_output(struct file *file, void *fh,
- struct v4l2_output *output)
-{
- if (output->index > 0)
- return -EINVAL;
- strscpy(output->name, "camera", sizeof(output->name));
- output->type = V4L2_INPUT_TYPE_CAMERA;
-
- return 0;
-}
-
-static int imgu_vidioc_g_output(struct file *file, void *fh,
- unsigned int *output)
-{
- *output = 0;
-
- return 0;
-}
-
-static int imgu_vidioc_s_output(struct file *file, void *fh,
- unsigned int output)
-{
- return output == 0 ? 0 : -EINVAL;
-}
-
/******************** function pointers ********************/
static struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = {
@@ -965,14 +931,6 @@ static const struct v4l2_ioctl_ops imgu_v4l2_ioctl_ops = {
.vidioc_s_fmt_vid_out_mplane = imgu_vidioc_s_fmt,
.vidioc_try_fmt_vid_out_mplane = imgu_vidioc_try_fmt,
- .vidioc_enum_output = imgu_vidioc_enum_output,
- .vidioc_g_output = imgu_vidioc_g_output,
- .vidioc_s_output = imgu_vidioc_s_output,
-
- .vidioc_enum_input = imgu_vidioc_enum_input,
- .vidioc_g_input = imgu_vidioc_g_input,
- .vidioc_s_input = imgu_vidioc_s_input,
-
/* buffer queue management */
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
@@ -1086,7 +1044,7 @@ static void imgu_node_to_v4l2(u32 node, struct video_device *vdev,
vdev->ioctl_ops = &imgu_v4l2_ioctl_ops;
}
- vdev->device_caps = V4L2_CAP_STREAMING | cap;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | cap;
}
static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
@@ -1292,19 +1250,17 @@ static void imgu_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu,
static int imgu_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe)
{
- int i, r;
+ int i;
for (i = 0; i < IMGU_NODE_NUM; i++) {
- r = imgu_v4l2_node_setup(imgu, pipe, i);
- if (r)
- goto cleanup;
- }
+ int r = imgu_v4l2_node_setup(imgu, pipe, i);
+ if (r) {
+ imgu_v4l2_nodes_cleanup_pipe(imgu, pipe, i);
+ return r;
+ }
+ }
return 0;
-
-cleanup:
- imgu_v4l2_nodes_cleanup_pipe(imgu, pipe, i);
- return r;
}
static void imgu_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i)
diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
index 4d53aad31483..ee1bba6bdcac 100644
--- a/drivers/staging/media/ipu3/ipu3.c
+++ b/drivers/staging/media/ipu3/ipu3.c
@@ -261,6 +261,7 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe
ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
struct imgu_vb2_buffer, list);
+ list_del(&ivb->list);
vb = &ivb->vbb.vb2_buf;
r = imgu_css_set_parameters(&imgu->css, pipe,
vb2_plane_vaddr(vb, 0));
@@ -274,7 +275,6 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
dev_dbg(&imgu->pci_dev->dev,
"queue user parameters %d to css.", vb->index);
- list_del(&ivb->list);
} else if (imgu_pipe->queue_enabled[node]) {
struct imgu_css_buffer *buf =
imgu_queue_getbuf(imgu, node, pipe);
@@ -675,6 +675,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev,
return r;
mutex_init(&imgu->lock);
+ mutex_init(&imgu->streaming_lock);
atomic_set(&imgu->qbuf_barrier, 0);
init_waitqueue_head(&imgu->buf_drain_wq);
@@ -738,6 +739,7 @@ out_mmu_exit:
out_css_powerdown:
imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
out_mutex_destroy:
+ mutex_destroy(&imgu->streaming_lock);
mutex_destroy(&imgu->lock);
return r;
@@ -755,6 +757,7 @@ static void imgu_pci_remove(struct pci_dev *pci_dev)
imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
imgu_dmamap_exit(imgu);
imgu_mmu_exit(imgu->mmu);
+ mutex_destroy(&imgu->streaming_lock);
mutex_destroy(&imgu->lock);
}
diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h
index 73b123b2b8a2..8cd6a0077d99 100644
--- a/drivers/staging/media/ipu3/ipu3.h
+++ b/drivers/staging/media/ipu3/ipu3.h
@@ -146,6 +146,10 @@ struct imgu_device {
* vid_buf.list and css->queue
*/
struct mutex lock;
+
+ /* Lock to protect writes to streaming flag in this struct */
+ struct mutex streaming_lock;
+
/* Forbid streaming and buffer queuing during system suspend. */
atomic_t qbuf_barrier;
/* Indicate if system suspend take place while imgu is streaming. */
diff --git a/drivers/staging/media/meson/vdec/codec_vp9.c b/drivers/staging/media/meson/vdec/codec_vp9.c
index 60e4fc0052b3..897f5d7a6aad 100644
--- a/drivers/staging/media/meson/vdec/codec_vp9.c
+++ b/drivers/staging/media/meson/vdec/codec_vp9.c
@@ -854,6 +854,36 @@ static int codec_vp9_stop(struct amvdec_session *sess)
return 0;
}
+/*
+ * Program LAST & GOLDEN frames into the motion compensation reference cache
+ * controller
+ */
+static void codec_vp9_set_mcrcc(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+ u32 val;
+
+ /* Reset mcrcc */
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL1, 0x2);
+ /* Disable on I-frame */
+ if (vp9->cur_frame->type == KEY_FRAME || vp9->cur_frame->intra_only) {
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL1, 0x0);
+ return;
+ }
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, BIT(1));
+ val = amvdec_read_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff;
+ val |= (val << 16);
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL2, val);
+ val = amvdec_read_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff;
+ val |= (val << 16);
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL3, val);
+
+ /* Enable mcrcc progressive-mode */
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL1, 0xff0);
+}
+
static void codec_vp9_set_sao(struct amvdec_session *sess,
struct vb2_buffer *vb)
{
@@ -1267,6 +1297,7 @@ static void codec_vp9_process_frame(struct amvdec_session *sess)
amvdec_write_dos(core, HEVC_PARSER_PICTURE_SIZE,
(vp9->height << 16) | vp9->width);
+ codec_vp9_set_mcrcc(sess);
codec_vp9_set_sao(sess, &vp9->cur_frame->vbuf->vb2_buf);
vp9_loop_filter_frame_init(core, &vp9->seg_4lf,
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 4dcbc5065821..6c254907a27b 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -2,8 +2,10 @@
config VIDEO_OMAP4
tristate "OMAP 4 Camera support"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C
+ depends on VIDEO_V4L2 && I2C
depends on ARCH_OMAP4 || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select MFD_SYSCON
select VIDEOBUF2_DMA_CONTIG
help
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml b/drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml
deleted file mode 100644
index 5dacece35702..000000000000
--- a/drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/phy/rockchip-mipi-dphy-rx0.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Rockchip SoC MIPI RX0 D-PHY Device Tree Bindings
-
-maintainers:
- - Helen Koike <helen.koike@collabora.com>
- - Ezequiel Garcia <ezequiel@collabora.com>
-
-description: |
- The Rockchip SoC has a MIPI D-PHY bus with an RX0 entry which connects to
- the ISP1 (Image Signal Processing unit v1.0) for CSI cameras.
-
-properties:
- compatible:
- const: rockchip,rk3399-mipi-dphy-rx0
-
- reg:
- maxItems: 1
-
- clocks:
- items:
- - description: MIPI D-PHY ref clock
- - description: MIPI D-PHY RX0 cfg clock
- - description: Video in/out general register file clock
-
- clock-names:
- items:
- - const: dphy-ref
- - const: dphy-cfg
- - const: grf
-
- '#phy-cells':
- const: 0
-
- power-domains:
- description: Video in/out power domain.
- maxItems: 1
-
-required:
- - compatible
- - clocks
- - clock-names
- - '#phy-cells'
- - power-domains
-
-additionalProperties: false
-
-examples:
- - |
-
- /*
- * MIPI D-PHY RX0 use registers in "general register files", it
- * should be a child of the GRF.
- *
- * grf: syscon@ff770000 {
- * compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
- * ...
- * };
- */
-
- #include <dt-bindings/clock/rk3399-cru.h>
- #include <dt-bindings/power/rk3399-power.h>
-
- mipi_dphy_rx0: mipi-dphy-rx0 {
- compatible = "rockchip,rk3399-mipi-dphy-rx0";
- clocks = <&cru SCLK_MIPIDPHY_REF>,
- <&cru SCLK_DPHY_RX0_CFG>,
- <&cru PCLK_VIO_GRF>;
- clock-names = "dphy-ref", "dphy-cfg", "grf";
- power-domains = <&power RK3399_PD_VIO>;
- #phy-cells = <0>;
- };
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig b/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig
index bd0147624de1..fb74df829371 100644
--- a/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig
@@ -2,7 +2,7 @@
config PHY_ROCKCHIP_DPHY_RX0
tristate "Rockchip MIPI Synopsys DPHY RX0 driver"
- depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
select GENERIC_PHY_MIPI_DPHY
select GENERIC_PHY
help
diff --git a/drivers/staging/media/rkisp1/Kconfig b/drivers/staging/media/rkisp1/Kconfig
index b859a493caba..41f5def9ea44 100644
--- a/drivers/staging/media/rkisp1/Kconfig
+++ b/drivers/staging/media/rkisp1/Kconfig
@@ -2,12 +2,14 @@
config VIDEO_ROCKCHIP_ISP1
tristate "Rockchip Image Signal Processing v1 Unit driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && OF
depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
select V4L2_FWNODE
- select PHY_ROCKCHIP_DPHY_RX0
+ select GENERIC_PHY_MIPI_DPHY
default n
help
Enable this to support the Image Signal Processing (ISP) module
diff --git a/drivers/staging/media/rkisp1/Makefile b/drivers/staging/media/rkisp1/Makefile
index 69ca59c7ef34..ab32a77db8f7 100644
--- a/drivers/staging/media/rkisp1/Makefile
+++ b/drivers/staging/media/rkisp1/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rockchip-isp1.o
rockchip-isp1-objs += rkisp1-capture.o \
rkisp1-common.o \
diff --git a/drivers/staging/media/rkisp1/TODO b/drivers/staging/media/rkisp1/TODO
index 0aa9877dd64a..c0cbec0a164d 100644
--- a/drivers/staging/media/rkisp1/TODO
+++ b/drivers/staging/media/rkisp1/TODO
@@ -1,12 +1,6 @@
-* Don't use v4l2_async_notifier_parse_fwnode_endpoints_by_port().
-e.g. isp_parse_of_endpoints in drivers/media/platform/omap3isp/isp.c
-cio2_parse_firmware in drivers/media/pci/intel/ipu3/ipu3-cio2.c.
* Fix pad format size for statistics and parameters entities.
* Use threaded interrupt for rkisp1_stats_isr(), remove work queue.
* Fix checkpatch errors.
-* Make sure uapi structs have the same size and layout in 32 and 62 bits,
-and that there are no holes in the structures (pahole is a utility that
-can be used to test this).
* Review and comment every lock
* Handle quantization
* Document rkisp1-common.h
diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c
index 24fe6a7888aa..f69235f82c45 100644
--- a/drivers/staging/media/rkisp1/rkisp1-capture.c
+++ b/drivers/staging/media/rkisp1/rkisp1-capture.c
@@ -52,7 +52,6 @@ enum rkisp1_plane {
*/
struct rkisp1_capture_fmt_cfg {
u32 fourcc;
- u8 fmt_type;
u8 uv_swap;
u32 write_format;
u32 output_format;
@@ -87,133 +86,106 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_mp_fmts[] = {
/* yuv422 */
{
.fourcc = V4L2_PIX_FMT_YUYV,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
}, {
.fourcc = V4L2_PIX_FMT_YVYU,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
- .fmt_type = RKISP1_FMT_YUV,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
}, {
.fourcc = V4L2_PIX_FMT_YVU422M,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
},
/* yuv420 */
{
.fourcc = V4L2_PIX_FMT_NV21,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
}, {
.fourcc = V4L2_PIX_FMT_NV21M,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
}, {
.fourcc = V4L2_PIX_FMT_NV12M,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
},
/* yuv444 */
{
.fourcc = V4L2_PIX_FMT_YUV444M,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
},
/* yuv400 */
{
.fourcc = V4L2_PIX_FMT_GREY,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
},
/* raw */
{
.fourcc = V4L2_PIX_FMT_SRGGB8,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB10,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG10,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG10,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR10,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB12,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG12,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG12,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR12,
- .fmt_type = RKISP1_FMT_BAYER,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
},
};
@@ -222,43 +194,36 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
/* yuv422 */
{
.fourcc = V4L2_PIX_FMT_YUYV,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
}, {
.fourcc = V4L2_PIX_FMT_YVYU,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
}, {
.fourcc = V4L2_PIX_FMT_YVU422M,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
@@ -266,37 +231,31 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
/* yuv420 */
{
.fourcc = V4L2_PIX_FMT_NV21,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
}, {
.fourcc = V4L2_PIX_FMT_NV21M,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
}, {
.fourcc = V4L2_PIX_FMT_NV12M,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
@@ -304,7 +263,6 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
/* yuv444 */
{
.fourcc = V4L2_PIX_FMT_YUV444M,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV444,
@@ -312,7 +270,6 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
/* yuv400 */
{
.fourcc = V4L2_PIX_FMT_GREY,
- .fmt_type = RKISP1_FMT_YUV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV400,
@@ -320,17 +277,14 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
/* rgb */
{
.fourcc = V4L2_PIX_FMT_RGB24,
- .fmt_type = RKISP1_FMT_RGB,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB888,
}, {
.fourcc = V4L2_PIX_FMT_RGB565,
- .fmt_type = RKISP1_FMT_RGB,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB565,
}, {
.fourcc = V4L2_PIX_FMT_BGR666,
- .fmt_type = RKISP1_FMT_RGB,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB666,
},
@@ -429,11 +383,14 @@ static void rkisp1_mp_config(struct rkisp1_capture *cap)
cap->config->mi.cr_size_init);
rkisp1_irq_frame_end_enable(cap);
- if (cap->pix.cfg->uv_swap) {
- reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
- reg = (reg & ~BIT(0)) |
- RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP;
+ /* set uv swapping for semiplanar formats */
+ if (cap->pix.info->comp_planes == 2) {
+ reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+ if (cap->pix.cfg->uv_swap)
+ reg |= RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP;
+ else
+ reg &= ~RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP;
rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
}
@@ -453,7 +410,7 @@ static void rkisp1_sp_config(struct rkisp1_capture *cap)
{
const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
struct rkisp1_device *rkisp1 = cap->rkisp1;
- u32 mi_ctrl;
+ u32 mi_ctrl, reg;
rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
cap->config->mi.y_size_init);
@@ -467,11 +424,15 @@ static void rkisp1_sp_config(struct rkisp1_capture *cap)
rkisp1_write(rkisp1, cap->sp_y_stride, RKISP1_CIF_MI_SP_Y_LLENGTH);
rkisp1_irq_frame_end_enable(cap);
- if (cap->pix.cfg->uv_swap) {
- u32 reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
- rkisp1_write(rkisp1, reg & ~BIT(1),
- RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+ /* set uv swapping for semiplanar formats */
+ if (cap->pix.info->comp_planes == 2) {
+ reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+ if (cap->pix.cfg->uv_swap)
+ reg |= RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP;
+ else
+ reg &= ~RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP;
+ rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
}
rkisp1_mi_config_ctrl(cap);
@@ -504,13 +465,12 @@ static void rkisp1_sp_disable(struct rkisp1_capture *cap)
static void rkisp1_mp_enable(struct rkisp1_capture *cap)
{
- const struct rkisp1_capture_fmt_cfg *isp_fmt = cap->pix.cfg;
u32 mi_ctrl;
rkisp1_mp_disable(cap);
mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
- if (isp_fmt->fmt_type == RKISP1_FMT_BAYER)
+ if (v4l2_is_format_bayer(cap->pix.info))
mi_ctrl |= RKISP1_CIF_MI_CTRL_RAW_ENABLE;
/* YUV */
else
@@ -778,6 +738,14 @@ static void rkisp1_vb2_buf_queue(struct vb2_buffer *vb)
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB);
}
+ /*
+ * uv swap can be supported for planar formats by switching
+ * the address of cb and cr
+ */
+ if (cap->pix.info->comp_planes == 3 && cap->pix.cfg->uv_swap)
+ swap(ispbuf->buff_addr[RKISP1_PLANE_CR],
+ ispbuf->buff_addr[RKISP1_PLANE_CB]);
+
spin_lock_irqsave(&cap->buf.lock, flags);
/*
@@ -927,6 +895,8 @@ static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue)
struct rkisp1_device *rkisp1 = cap->rkisp1;
int ret;
+ mutex_lock(&cap->rkisp1->stream_lock);
+
rkisp1_stream_stop(cap);
media_pipeline_stop(&node->vdev.entity);
ret = rkisp1_pipeline_sink_walk(&node->vdev.entity, NULL,
@@ -939,10 +909,12 @@ static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue)
v4l2_pipeline_pm_put(&node->vdev.entity);
ret = pm_runtime_put(rkisp1->dev);
- if (ret)
+ if (ret < 0)
dev_err(rkisp1->dev, "power down failed error:%d\n", ret);
rkisp1_dummy_buf_destroy(cap);
+
+ mutex_unlock(&cap->rkisp1->stream_lock);
}
/*
@@ -987,12 +959,14 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
struct media_entity *entity = &cap->vnode.vdev.entity;
int ret;
+ mutex_lock(&cap->rkisp1->stream_lock);
+
ret = rkisp1_dummy_buf_create(cap);
if (ret)
goto err_ret_buffers;
ret = pm_runtime_get_sync(cap->rkisp1->dev);
- if (ret) {
+ if (ret < 0) {
dev_err(cap->rkisp1->dev, "power up failed %d\n", ret);
goto err_destroy_dummy;
}
@@ -1016,6 +990,8 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
goto err_pipe_disable;
}
+ mutex_unlock(&cap->rkisp1->stream_lock);
+
return 0;
err_pipe_disable:
@@ -1029,6 +1005,7 @@ err_destroy_dummy:
rkisp1_dummy_buf_destroy(cap);
err_ret_buffers:
rkisp1_return_all_buffers(cap, VB2_BUF_STATE_QUEUED);
+ mutex_unlock(&cap->rkisp1->stream_lock);
return ret;
}
@@ -1250,6 +1227,8 @@ static int rkisp1_capture_link_validate(struct media_link *link)
media_entity_to_v4l2_subdev(link->source->entity);
struct rkisp1_capture *cap = video_get_drvdata(vdev);
struct rkisp1_isp *isp = &cap->rkisp1->isp;
+ u8 isp_pix_enc = isp->src_fmt->pixel_enc;
+ u8 cap_pix_enc = cap->pix.info->pixel_enc;
struct v4l2_subdev_format sd_fmt;
int ret;
@@ -1260,7 +1239,9 @@ static int rkisp1_capture_link_validate(struct media_link *link)
return -EPIPE;
}
- if (cap->pix.cfg->fmt_type != isp->src_fmt->fmt_type) {
+ if (cap_pix_enc != isp_pix_enc &&
+ !(isp_pix_enc == V4L2_PIXEL_ENC_YUV &&
+ cap_pix_enc == V4L2_PIXEL_ENC_RGB)) {
dev_err(cap->rkisp1->dev,
"format type mismatch in link '%s:%d->%s:%d'\n",
link->source->entity->name, link->source->index,
diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h
index b291cc60de8e..0c4fe503adc9 100644
--- a/drivers/staging/media/rkisp1/rkisp1-common.h
+++ b/drivers/staging/media/rkisp1/rkisp1-common.h
@@ -52,13 +52,6 @@ enum rkisp1_stream_id {
RKISP1_SELFPATH,
};
-enum rkisp1_fmt_pix_type {
- RKISP1_FMT_YUV,
- RKISP1_FMT_RGB,
- RKISP1_FMT_BAYER,
- RKISP1_FMT_JPEG,
-};
-
enum rkisp1_fmt_raw_pat_type {
RKISP1_RAW_RGGB = 0,
RKISP1_RAW_GRBG,
@@ -80,8 +73,9 @@ enum rkisp1_isp_pad {
*/
struct rkisp1_sensor_async {
struct v4l2_async_subdev asd;
- struct v4l2_mbus_config mbus;
unsigned int lanes;
+ enum v4l2_mbus_type mbus_type;
+ unsigned int mbus_flags;
struct v4l2_subdev *sd;
struct v4l2_ctrl *pixel_rate_ctrl;
struct phy *dphy;
@@ -225,7 +219,7 @@ struct rkisp1_resizer {
struct media_pad pads[RKISP1_ISP_PAD_MAX];
struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
const struct rkisp1_rsz_config *config;
- enum rkisp1_fmt_pix_type fmt_type;
+ enum v4l2_pixel_encoding pixel_enc;
struct mutex ops_lock;
};
@@ -247,6 +241,7 @@ struct rkisp1_debug {
* @rkisp1_capture: capture video device
* @stats: ISP statistics output device
* @params: ISP input parameters device
+ * @stream_lock: lock to serialize start/stop streaming in capture devices.
*/
struct rkisp1_device {
void __iomem *base_addr;
@@ -266,6 +261,7 @@ struct rkisp1_device {
struct rkisp1_params params;
struct media_pipeline pipe;
struct vb2_alloc_ctx *alloc_ctx;
+ struct mutex stream_lock;
struct rkisp1_debug debug;
};
@@ -278,7 +274,7 @@ struct rkisp1_device {
*/
struct rkisp1_isp_mbus_info {
u32 mbus_code;
- enum rkisp1_fmt_pix_type fmt_type;
+ enum v4l2_pixel_encoding pixel_enc;
u32 mipi_dt;
u32 yuv_seq;
u8 bus_width;
diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c
index b1b3c058e957..9ac38bafb839 100644
--- a/drivers/staging/media/rkisp1/rkisp1-dev.c
+++ b/drivers/staging/media/rkisp1/rkisp1-dev.c
@@ -233,47 +233,6 @@ static int rkisp1_subdev_notifier_complete(struct v4l2_async_notifier *notifier)
return 0;
}
-static int rkisp1_fwnode_parse(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
-{
- struct rkisp1_sensor_async *s_asd =
- container_of(asd, struct rkisp1_sensor_async, asd);
-
- if (vep->bus_type != V4L2_MBUS_CSI2_DPHY) {
- dev_err(dev, "Only CSI2 bus type is currently supported\n");
- return -EINVAL;
- }
-
- if (vep->base.port != 0) {
- dev_err(dev, "The ISP has only port 0\n");
- return -EINVAL;
- }
-
- s_asd->mbus.type = vep->bus_type;
- s_asd->mbus.flags = vep->bus.mipi_csi2.flags;
- s_asd->lanes = vep->bus.mipi_csi2.num_data_lanes;
-
- switch (vep->bus.mipi_csi2.num_data_lanes) {
- case 1:
- s_asd->mbus.flags |= V4L2_MBUS_CSI2_1_LANE;
- break;
- case 2:
- s_asd->mbus.flags |= V4L2_MBUS_CSI2_2_LANE;
- break;
- case 3:
- s_asd->mbus.flags |= V4L2_MBUS_CSI2_3_LANE;
- break;
- case 4:
- s_asd->mbus.flags |= V4L2_MBUS_CSI2_4_LANE;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static const struct v4l2_async_notifier_operations rkisp1_subdev_notifier_ops = {
.bound = rkisp1_subdev_notifier_bound,
.unbind = rkisp1_subdev_notifier_unbind,
@@ -283,23 +242,66 @@ static const struct v4l2_async_notifier_operations rkisp1_subdev_notifier_ops =
static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
{
struct v4l2_async_notifier *ntf = &rkisp1->notifier;
- struct device *dev = rkisp1->dev;
+ unsigned int next_id = 0;
int ret;
v4l2_async_notifier_init(ntf);
- ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(dev, ntf,
- sizeof(struct rkisp1_sensor_async),
- 0, rkisp1_fwnode_parse);
- if (ret)
- return ret;
+ while (1) {
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct rkisp1_sensor_async *rk_asd = NULL;
+ struct fwnode_handle *ep;
- if (list_empty(&ntf->asd_list))
- return -ENODEV;
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(rkisp1->dev),
+ 0, next_id, FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ break;
- ntf->ops = &rkisp1_subdev_notifier_ops;
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ rk_asd = kzalloc(sizeof(*rk_asd), GFP_KERNEL);
+ if (!rk_asd) {
+ ret = -ENOMEM;
+ goto err_parse;
+ }
+
+ rk_asd->mbus_type = vep.bus_type;
+ rk_asd->mbus_flags = vep.bus.mipi_csi2.flags;
+ rk_asd->lanes = vep.bus.mipi_csi2.num_data_lanes;
+
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(ntf, ep,
+ &rk_asd->asd);
+ if (ret)
+ goto err_parse;
+
+ dev_dbg(rkisp1->dev, "registered ep id %d with %d lanes\n",
+ vep.base.id, rk_asd->lanes);
+
+ next_id = vep.base.id + 1;
+
+ fwnode_handle_put(ep);
+
+ continue;
+err_parse:
+ fwnode_handle_put(ep);
+ kfree(rk_asd);
+ v4l2_async_notifier_cleanup(ntf);
+ return ret;
+ }
- return v4l2_async_notifier_register(&rkisp1->v4l2_dev, ntf);
+ if (next_id == 0)
+ dev_dbg(rkisp1->dev, "no remote subdevice found\n");
+ ntf->ops = &rkisp1_subdev_notifier_ops;
+ ret = v4l2_async_notifier_register(&rkisp1->v4l2_dev, ntf);
+ if (ret) {
+ v4l2_async_notifier_cleanup(ntf);
+ return ret;
+ }
+ return 0;
}
/* ----------------------------------------------------------------------------
@@ -454,16 +456,17 @@ static void rkisp1_debug_init(struct rkisp1_device *rkisp1)
static int rkisp1_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
const struct rkisp1_match_data *clk_data;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct rkisp1_device *rkisp1;
struct v4l2_device *v4l2_dev;
unsigned int i;
int ret, irq;
- match = of_match_node(rkisp1_of_match, node);
+ clk_data = of_device_get_match_data(&pdev->dev);
+ if (!clk_data)
+ return -ENODEV;
+
rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
if (!rkisp1)
return -ENOMEM;
@@ -471,6 +474,8 @@ static int rkisp1_probe(struct platform_device *pdev)
dev_set_drvdata(dev, rkisp1);
rkisp1->dev = dev;
+ mutex_init(&rkisp1->stream_lock);
+
rkisp1->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rkisp1->base_addr))
return PTR_ERR(rkisp1->base_addr);
@@ -487,7 +492,6 @@ static int rkisp1_probe(struct platform_device *pdev)
}
rkisp1->irq = irq;
- clk_data = match->data;
for (i = 0; i < clk_data->size; i++)
rkisp1->clks[i].id = clk_data->clks[i];
diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c
index fa53f05e37d8..dc2b59a0160a 100644
--- a/drivers/staging/media/rkisp1/rkisp1-isp.c
+++ b/drivers/staging/media/rkisp1/rkisp1-isp.c
@@ -61,116 +61,116 @@
static const struct rkisp1_isp_mbus_info rkisp1_isp_formats[] = {
{
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
- .fmt_type = RKISP1_FMT_YUV,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
.direction = RKISP1_DIR_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW10,
.bayer_pat = RKISP1_RAW_RGGB,
.bus_width = 10,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW10,
.bayer_pat = RKISP1_RAW_BGGR,
.bus_width = 10,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW10,
.bayer_pat = RKISP1_RAW_GBRG,
.bus_width = 10,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW10,
.bayer_pat = RKISP1_RAW_GRBG,
.bus_width = 10,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW12,
.bayer_pat = RKISP1_RAW_RGGB,
.bus_width = 12,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW12,
.bayer_pat = RKISP1_RAW_BGGR,
.bus_width = 12,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW12,
.bayer_pat = RKISP1_RAW_GBRG,
.bus_width = 12,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW12,
.bayer_pat = RKISP1_RAW_GRBG,
.bus_width = 12,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW8,
.bayer_pat = RKISP1_RAW_RGGB,
.bus_width = 8,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW8,
.bayer_pat = RKISP1_RAW_BGGR,
.bus_width = 8,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW8,
.bayer_pat = RKISP1_RAW_GBRG,
.bus_width = 8,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .fmt_type = RKISP1_FMT_BAYER,
+ .pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = RKISP1_CIF_CSI2_DT_RAW8,
.bayer_pat = RKISP1_RAW_GRBG,
.bus_width = 8,
.direction = RKISP1_DIR_SINK_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_YUYV8_1X16,
- .fmt_type = RKISP1_FMT_YUV,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
.mipi_dt = RKISP1_CIF_CSI2_DT_YUV422_8b,
.yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCBYCR,
.bus_width = 16,
.direction = RKISP1_DIR_SINK,
}, {
.mbus_code = MEDIA_BUS_FMT_YVYU8_1X16,
- .fmt_type = RKISP1_FMT_YUV,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
.mipi_dt = RKISP1_CIF_CSI2_DT_YUV422_8b,
.yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCRYCB,
.bus_width = 16,
.direction = RKISP1_DIR_SINK,
}, {
.mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
- .fmt_type = RKISP1_FMT_YUV,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
.mipi_dt = RKISP1_CIF_CSI2_DT_YUV422_8b,
.yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CBYCRY,
.bus_width = 16,
.direction = RKISP1_DIR_SINK,
}, {
.mbus_code = MEDIA_BUS_FMT_VYUY8_1X16,
- .fmt_type = RKISP1_FMT_YUV,
+ .pixel_enc = V4L2_PIXEL_ENC_YUV,
.mipi_dt = RKISP1_CIF_CSI2_DT_YUV422_8b,
.yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CRYCBY,
.bus_width = 16,
@@ -288,10 +288,10 @@ static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
RKISP1_ISP_PAD_SINK_VIDEO,
V4L2_SUBDEV_FORMAT_ACTIVE);
- if (sink_fmt->fmt_type == RKISP1_FMT_BAYER) {
+ if (sink_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
acq_mult = 1;
- if (src_fmt->fmt_type == RKISP1_FMT_BAYER) {
- if (sensor->mbus.type == V4L2_MBUS_BT656)
+ if (src_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
+ if (sensor->mbus_type == V4L2_MBUS_BT656)
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT_ITU656;
else
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT;
@@ -299,17 +299,17 @@ static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
rkisp1_write(rkisp1, RKISP1_CIF_ISP_DEMOSAIC_TH(0xc),
RKISP1_CIF_ISP_DEMOSAIC);
- if (sensor->mbus.type == V4L2_MBUS_BT656)
+ if (sensor->mbus_type == V4L2_MBUS_BT656)
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU656;
else
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU601;
}
- } else if (sink_fmt->fmt_type == RKISP1_FMT_YUV) {
+ } else if (sink_fmt->pixel_enc == V4L2_PIXEL_ENC_YUV) {
acq_mult = 2;
- if (sensor->mbus.type == V4L2_MBUS_CSI2_DPHY) {
+ if (sensor->mbus_type == V4L2_MBUS_CSI2_DPHY) {
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
} else {
- if (sensor->mbus.type == V4L2_MBUS_BT656)
+ if (sensor->mbus_type == V4L2_MBUS_BT656)
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU656;
else
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
@@ -319,17 +319,17 @@ static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
}
/* Set up input acquisition properties */
- if (sensor->mbus.type == V4L2_MBUS_BT656 ||
- sensor->mbus.type == V4L2_MBUS_PARALLEL) {
- if (sensor->mbus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ if (sensor->mbus_type == V4L2_MBUS_BT656 ||
+ sensor->mbus_type == V4L2_MBUS_PARALLEL) {
+ if (sensor->mbus_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
signal = RKISP1_CIF_ISP_ACQ_PROP_POS_EDGE;
}
- if (sensor->mbus.type == V4L2_MBUS_PARALLEL) {
- if (sensor->mbus.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ if (sensor->mbus_type == V4L2_MBUS_PARALLEL) {
+ if (sensor->mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
signal |= RKISP1_CIF_ISP_ACQ_PROP_VSYNC_LOW;
- if (sensor->mbus.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ if (sensor->mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
signal |= RKISP1_CIF_ISP_ACQ_PROP_HSYNC_LOW;
}
@@ -357,7 +357,7 @@ static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
RKISP1_CIF_ISP_PIC_SIZE_ERROR | RKISP1_CIF_ISP_FRAME_IN;
rkisp1_write(rkisp1, irq_mask, RKISP1_CIF_ISP_IMSC);
- if (src_fmt->fmt_type == RKISP1_FMT_BAYER) {
+ if (src_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
rkisp1_params_disable(&rkisp1->params);
} else {
struct v4l2_mbus_framefmt *src_frm;
@@ -401,29 +401,11 @@ static int rkisp1_config_dvp(struct rkisp1_device *rkisp1)
static int rkisp1_config_mipi(struct rkisp1_device *rkisp1)
{
const struct rkisp1_isp_mbus_info *sink_fmt = rkisp1->isp.sink_fmt;
- unsigned int lanes;
+ unsigned int lanes = rkisp1->active_sensor->lanes;
u32 mipi_ctrl;
- /*
- * rkisp1->active_sensor->mbus is set in isp or d-phy notifier_bound
- * function
- */
- switch (rkisp1->active_sensor->mbus.flags & V4L2_MBUS_CSI2_LANES) {
- case V4L2_MBUS_CSI2_4_LANE:
- lanes = 4;
- break;
- case V4L2_MBUS_CSI2_3_LANE:
- lanes = 3;
- break;
- case V4L2_MBUS_CSI2_2_LANE:
- lanes = 2;
- break;
- case V4L2_MBUS_CSI2_1_LANE:
- lanes = 1;
- break;
- default:
+ if (lanes < 1 || lanes > 4)
return -EINVAL;
- }
mipi_ctrl = RKISP1_CIF_MIPI_CTRL_NUM_LANES(lanes - 1) |
RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(0xf) |
@@ -470,11 +452,11 @@ static int rkisp1_config_path(struct rkisp1_device *rkisp1)
u32 dpcl = rkisp1_read(rkisp1, RKISP1_CIF_VI_DPCL);
int ret = 0;
- if (sensor->mbus.type == V4L2_MBUS_BT656 ||
- sensor->mbus.type == V4L2_MBUS_PARALLEL) {
+ if (sensor->mbus_type == V4L2_MBUS_BT656 ||
+ sensor->mbus_type == V4L2_MBUS_PARALLEL) {
ret = rkisp1_config_dvp(rkisp1);
dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_PARALLEL;
- } else if (sensor->mbus.type == V4L2_MBUS_CSI2_DPHY) {
+ } else if (sensor->mbus_type == V4L2_MBUS_CSI2_DPHY) {
ret = rkisp1_config_mipi(rkisp1);
dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_MIPI;
}
@@ -561,7 +543,7 @@ static void rkisp1_isp_start(struct rkisp1_device *rkisp1)
rkisp1_config_clk(rkisp1);
/* Activate MIPI */
- if (sensor->mbus.type == V4L2_MBUS_CSI2_DPHY) {
+ if (sensor->mbus_type == V4L2_MBUS_CSI2_DPHY) {
val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
rkisp1_write(rkisp1, val | RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA,
RKISP1_CIF_MIPI_CTRL);
@@ -956,7 +938,7 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
rkisp1->active_sensor = container_of(sensor_sd->asd,
struct rkisp1_sensor_async, asd);
- if (rkisp1->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY)
+ if (rkisp1->active_sensor->mbus_type != V4L2_MBUS_CSI2_DPHY)
return -EINVAL;
atomic_set(&rkisp1->isp.frame_sequence, -1);
diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c
index 87799fbf0363..d049374413dc 100644
--- a/drivers/staging/media/rkisp1/rkisp1-resizer.c
+++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c
@@ -14,7 +14,7 @@
#define RKISP1_RSZ_MP_DEV_NAME RKISP1_DRIVER_NAME "_resizer_mainpath"
#define RKISP1_DEF_FMT MEDIA_BUS_FMT_YUYV8_2X8
-#define RKISP1_DEF_FMT_TYPE RKISP1_FMT_YUV
+#define RKISP1_DEF_PIXEL_ENC V4L2_PIXEL_ENC_YUV
#define RKISP1_MBUS_FMT_HDIV 2
#define RKISP1_MBUS_FMT_VDIV 1
@@ -365,13 +365,18 @@ static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
struct v4l2_rect sink_y, sink_c, src_y, src_c;
struct v4l2_mbus_framefmt *src_fmt;
struct v4l2_rect *sink_crop;
+ struct rkisp1_capture *cap = &rsz->rkisp1->capture_devs[rsz->id];
sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
V4L2_SUBDEV_FORMAT_ACTIVE);
src_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SRC,
V4L2_SUBDEV_FORMAT_ACTIVE);
- if (rsz->fmt_type == RKISP1_FMT_BAYER) {
+ /*
+ * The resizer only works on yuv formats,
+ * so return if it is bayer format.
+ */
+ if (rsz->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
rkisp1_rsz_disable(rsz, when);
return;
}
@@ -384,15 +389,20 @@ static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
sink_c.width = sink_y.width / RKISP1_MBUS_FMT_HDIV;
sink_c.height = sink_y.height / RKISP1_MBUS_FMT_VDIV;
- if (rsz->fmt_type == RKISP1_FMT_YUV) {
- struct rkisp1_capture *cap =
- &rsz->rkisp1->capture_devs[rsz->id];
- const struct v4l2_format_info *pixfmt_info =
- v4l2_format_info(cap->pix.fmt.pixelformat);
-
- hdiv = pixfmt_info->hdiv;
- vdiv = pixfmt_info->vdiv;
+ /*
+ * The resizer is used not only to change the dimensions of the frame
+ * but also to change the scale for YUV formats,
+ * (4:2:2 -> 4:2:0 for example). So the width/height of the CbCr
+ * streams should be set according to the pixel format in the capture.
+ * The resizer always gets the input as YUV422. If the capture format
+ * is RGB then the memory input should be YUV422 so we don't change the
+ * default hdiv, vdiv in that case.
+ */
+ if (v4l2_is_format_yuv(cap->pix.info)) {
+ hdiv = cap->pix.info->hdiv;
+ vdiv = cap->pix.info->vdiv;
}
+
src_c.width = src_y.width / hdiv;
src_c.height = src_y.height / vdiv;
@@ -498,7 +508,7 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
if (rsz->id == RKISP1_MAINPATH &&
- mbus_info->fmt_type == RKISP1_FMT_BAYER) {
+ mbus_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
sink_crop->left = 0;
sink_crop->top = 0;
sink_crop->width = sink_fmt->width;
@@ -537,7 +547,7 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
}
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
- rsz->fmt_type = mbus_info->fmt_type;
+ rsz->pixel_enc = mbus_info->pixel_enc;
/* Propagete to source pad */
src_fmt->code = sink_fmt->code;
@@ -722,7 +732,7 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
pads[RKISP1_RSZ_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
MEDIA_PAD_FL_MUST_CONNECT;
- rsz->fmt_type = RKISP1_DEF_FMT_TYPE;
+ rsz->pixel_enc = RKISP1_DEF_PIXEL_ENC;
mutex_init(&rsz->ops_lock);
ret = media_entity_pads_init(&sd->entity, 2, pads);
diff --git a/drivers/staging/media/rkvdec/Kconfig b/drivers/staging/media/rkvdec/Kconfig
new file mode 100644
index 000000000000..c02199b5e0fd
--- /dev/null
+++ b/drivers/staging/media/rkvdec/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_ROCKCHIP_VDEC
+ tristate "Rockchip Video Decoder driver"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ select V4L2_H264
+ help
+ Support for the Rockchip Video Decoder IP present on Rockchip SoCs,
+ which accelerates video decoding.
+ To compile this driver as a module, choose M here: the module
+ will be called rockchip-vdec.
diff --git a/drivers/staging/media/rkvdec/Makefile b/drivers/staging/media/rkvdec/Makefile
new file mode 100644
index 000000000000..c08fed0a39f9
--- /dev/null
+++ b/drivers/staging/media/rkvdec/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rockchip-vdec.o
+
+rockchip-vdec-y += rkvdec.o rkvdec-h264.o
diff --git a/drivers/staging/media/rkvdec/TODO b/drivers/staging/media/rkvdec/TODO
new file mode 100644
index 000000000000..e0f0f12f0ac5
--- /dev/null
+++ b/drivers/staging/media/rkvdec/TODO
@@ -0,0 +1,11 @@
+* Support for VP9 is planned for this driver.
+
+ Given the V4L controls for those CODECs will be part of
+ the uABI, it will be required to have the driver in staging.
+
+ For this reason, we are keeping this driver in staging for now.
+
+* Evaluate introducing a helper to consolidate duplicated
+ code in rkvdec_request_validate and cedrus_request_validate.
+ The helper needs to the driver private data associated with
+ the videobuf2 queue, from a media request.
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
new file mode 100644
index 000000000000..cd4980d06be7
--- /dev/null
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip Video Decoder H264 backend
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ * Boris Brezillon <boris.brezillon@collabora.com>
+ *
+ * Copyright (C) 2016 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <media/v4l2-h264.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "rkvdec.h"
+#include "rkvdec-regs.h"
+
+/* Size with u32 units. */
+#define RKV_CABAC_INIT_BUFFER_SIZE (3680 + 128)
+#define RKV_RPS_SIZE ((128 + 128) / 4)
+#define RKV_SCALING_LIST_SIZE (6 * 16 + 6 * 64 + 128)
+#define RKV_ERROR_INFO_SIZE (256 * 144 * 4)
+
+#define RKVDEC_NUM_REFLIST 3
+
+struct rkvdec_sps_pps_packet {
+ u32 info[8];
+};
+
+struct rkvdec_ps_field {
+ u16 offset;
+ u8 len;
+};
+
+#define PS_FIELD(_offset, _len) \
+ ((struct rkvdec_ps_field){ _offset, _len })
+
+#define SEQ_PARAMETER_SET_ID PS_FIELD(0, 4)
+#define PROFILE_IDC PS_FIELD(4, 8)
+#define CONSTRAINT_SET3_FLAG PS_FIELD(12, 1)
+#define CHROMA_FORMAT_IDC PS_FIELD(13, 2)
+#define BIT_DEPTH_LUMA PS_FIELD(15, 3)
+#define BIT_DEPTH_CHROMA PS_FIELD(18, 3)
+#define QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG PS_FIELD(21, 1)
+#define LOG2_MAX_FRAME_NUM_MINUS4 PS_FIELD(22, 4)
+#define MAX_NUM_REF_FRAMES PS_FIELD(26, 5)
+#define PIC_ORDER_CNT_TYPE PS_FIELD(31, 2)
+#define LOG2_MAX_PIC_ORDER_CNT_LSB_MINUS4 PS_FIELD(33, 4)
+#define DELTA_PIC_ORDER_ALWAYS_ZERO_FLAG PS_FIELD(37, 1)
+#define PIC_WIDTH_IN_MBS PS_FIELD(38, 9)
+#define PIC_HEIGHT_IN_MBS PS_FIELD(47, 9)
+#define FRAME_MBS_ONLY_FLAG PS_FIELD(56, 1)
+#define MB_ADAPTIVE_FRAME_FIELD_FLAG PS_FIELD(57, 1)
+#define DIRECT_8X8_INFERENCE_FLAG PS_FIELD(58, 1)
+#define MVC_EXTENSION_ENABLE PS_FIELD(59, 1)
+#define NUM_VIEWS PS_FIELD(60, 2)
+#define VIEW_ID(i) PS_FIELD(62 + ((i) * 10), 10)
+#define NUM_ANCHOR_REFS_L(i) PS_FIELD(82 + ((i) * 11), 1)
+#define ANCHOR_REF_L(i) PS_FIELD(83 + ((i) * 11), 10)
+#define NUM_NON_ANCHOR_REFS_L(i) PS_FIELD(104 + ((i) * 11), 1)
+#define NON_ANCHOR_REFS_L(i) PS_FIELD(105 + ((i) * 11), 10)
+#define PIC_PARAMETER_SET_ID PS_FIELD(128, 8)
+#define PPS_SEQ_PARAMETER_SET_ID PS_FIELD(136, 5)
+#define ENTROPY_CODING_MODE_FLAG PS_FIELD(141, 1)
+#define BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT_FLAG PS_FIELD(142, 1)
+#define NUM_REF_IDX_L_DEFAULT_ACTIVE_MINUS1(i) PS_FIELD(143 + ((i) * 5), 5)
+#define WEIGHTED_PRED_FLAG PS_FIELD(153, 1)
+#define WEIGHTED_BIPRED_IDC PS_FIELD(154, 2)
+#define PIC_INIT_QP_MINUS26 PS_FIELD(156, 7)
+#define PIC_INIT_QS_MINUS26 PS_FIELD(163, 6)
+#define CHROMA_QP_INDEX_OFFSET PS_FIELD(169, 5)
+#define DEBLOCKING_FILTER_CONTROL_PRESENT_FLAG PS_FIELD(174, 1)
+#define CONSTRAINED_INTRA_PRED_FLAG PS_FIELD(175, 1)
+#define REDUNDANT_PIC_CNT_PRESENT PS_FIELD(176, 1)
+#define TRANSFORM_8X8_MODE_FLAG PS_FIELD(177, 1)
+#define SECOND_CHROMA_QP_INDEX_OFFSET PS_FIELD(178, 5)
+#define SCALING_LIST_ENABLE_FLAG PS_FIELD(183, 1)
+#define SCALING_LIST_ADDRESS PS_FIELD(184, 32)
+#define IS_LONG_TERM(i) PS_FIELD(216 + (i), 1)
+
+#define DPB_OFFS(i, j) (288 + ((j) * 32 * 7) + ((i) * 7))
+#define DPB_INFO(i, j) PS_FIELD(DPB_OFFS(i, j), 5)
+#define BOTTOM_FLAG(i, j) PS_FIELD(DPB_OFFS(i, j) + 5, 1)
+#define VIEW_INDEX_OFF(i, j) PS_FIELD(DPB_OFFS(i, j) + 6, 1)
+
+/* Data structure describing auxiliary buffer format. */
+struct rkvdec_h264_priv_tbl {
+ s8 cabac_table[4][464][2];
+ u8 scaling_list[RKV_SCALING_LIST_SIZE];
+ u32 rps[RKV_RPS_SIZE];
+ struct rkvdec_sps_pps_packet param_set[256];
+ u8 err_info[RKV_ERROR_INFO_SIZE];
+};
+
+#define RKVDEC_H264_DPB_SIZE 16
+
+struct rkvdec_h264_reflists {
+ u8 p[RKVDEC_H264_DPB_SIZE];
+ u8 b0[RKVDEC_H264_DPB_SIZE];
+ u8 b1[RKVDEC_H264_DPB_SIZE];
+ u8 num_valid;
+};
+
+struct rkvdec_h264_run {
+ struct rkvdec_run base;
+ const struct v4l2_ctrl_h264_decode_params *decode_params;
+ const struct v4l2_ctrl_h264_slice_params *slices_params;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pps *pps;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
+};
+
+struct rkvdec_h264_ctx {
+ struct rkvdec_aux_buf priv_tbl;
+ struct rkvdec_h264_reflists reflists;
+};
+
+#define CABAC_ENTRY(ctxidx, idc0_m, idc0_n, idc1_m, idc1_n, \
+ idc2_m, idc2_n, intra_m, intra_n) \
+ [0][(ctxidx)] = {idc0_m, idc0_n}, \
+ [1][(ctxidx)] = {idc1_m, idc1_n}, \
+ [2][(ctxidx)] = {idc2_m, idc2_n}, \
+ [3][(ctxidx)] = {intra_m, intra_n}
+
+/*
+ * Constant CABAC table.
+ * Built from the tables described in section '9.3.1.1 Initialisation process
+ * for context variables' of the H264 spec.
+ */
+static const s8 rkvdec_h264_cabac_table[4][464][2] = {
+ /* Table 9-12 – Values of variables m and n for ctxIdx from 0 to 10 */
+ CABAC_ENTRY(0, 20, -15, 20, -15, 20, -15, 20, -15),
+ CABAC_ENTRY(1, 2, 54, 2, 54, 2, 54, 2, 54),
+ CABAC_ENTRY(2, 3, 74, 3, 74, 3, 74, 3, 74),
+ CABAC_ENTRY(3, 20, -15, 20, -15, 20, -15, 20, -15),
+ CABAC_ENTRY(4, 2, 54, 2, 54, 2, 54, 2, 54),
+ CABAC_ENTRY(5, 3, 74, 3, 74, 3, 74, 3, 74),
+ CABAC_ENTRY(6, -28, 127, -28, 127, -28, 127, -28, 127),
+ CABAC_ENTRY(7, -23, 104, -23, 104, -23, 104, -23, 104),
+ CABAC_ENTRY(8, -6, 53, -6, 53, -6, 53, -6, 53),
+ CABAC_ENTRY(9, -1, 54, -1, 54, -1, 54, -1, 54),
+ CABAC_ENTRY(10, 7, 51, 7, 51, 7, 51, 7, 51),
+
+ /* Table 9-13 – Values of variables m and n for ctxIdx from 11 to 23 */
+ CABAC_ENTRY(11, 23, 33, 22, 25, 29, 16, 0, 0),
+ CABAC_ENTRY(12, 23, 2, 34, 0, 25, 0, 0, 0),
+ CABAC_ENTRY(13, 21, 0, 16, 0, 14, 0, 0, 0),
+ CABAC_ENTRY(14, 1, 9, -2, 9, -10, 51, 0, 0),
+ CABAC_ENTRY(15, 0, 49, 4, 41, -3, 62, 0, 0),
+ CABAC_ENTRY(16, -37, 118, -29, 118, -27, 99, 0, 0),
+ CABAC_ENTRY(17, 5, 57, 2, 65, 26, 16, 0, 0),
+ CABAC_ENTRY(18, -13, 78, -6, 71, -4, 85, 0, 0),
+ CABAC_ENTRY(19, -11, 65, -13, 79, -24, 102, 0, 0),
+ CABAC_ENTRY(20, 1, 62, 5, 52, 5, 57, 0, 0),
+ CABAC_ENTRY(21, 12, 49, 9, 50, 6, 57, 0, 0),
+ CABAC_ENTRY(22, -4, 73, -3, 70, -17, 73, 0, 0),
+ CABAC_ENTRY(23, 17, 50, 10, 54, 14, 57, 0, 0),
+
+ /* Table 9-14 – Values of variables m and n for ctxIdx from 24 to 39 */
+ CABAC_ENTRY(24, 18, 64, 26, 34, 20, 40, 0, 0),
+ CABAC_ENTRY(25, 9, 43, 19, 22, 20, 10, 0, 0),
+ CABAC_ENTRY(26, 29, 0, 40, 0, 29, 0, 0, 0),
+ CABAC_ENTRY(27, 26, 67, 57, 2, 54, 0, 0, 0),
+ CABAC_ENTRY(28, 16, 90, 41, 36, 37, 42, 0, 0),
+ CABAC_ENTRY(29, 9, 104, 26, 69, 12, 97, 0, 0),
+ CABAC_ENTRY(30, -46, 127, -45, 127, -32, 127, 0, 0),
+ CABAC_ENTRY(31, -20, 104, -15, 101, -22, 117, 0, 0),
+ CABAC_ENTRY(32, 1, 67, -4, 76, -2, 74, 0, 0),
+ CABAC_ENTRY(33, -13, 78, -6, 71, -4, 85, 0, 0),
+ CABAC_ENTRY(34, -11, 65, -13, 79, -24, 102, 0, 0),
+ CABAC_ENTRY(35, 1, 62, 5, 52, 5, 57, 0, 0),
+ CABAC_ENTRY(36, -6, 86, 6, 69, -6, 93, 0, 0),
+ CABAC_ENTRY(37, -17, 95, -13, 90, -14, 88, 0, 0),
+ CABAC_ENTRY(38, -6, 61, 0, 52, -6, 44, 0, 0),
+ CABAC_ENTRY(39, 9, 45, 8, 43, 4, 55, 0, 0),
+
+ /* Table 9-15 – Values of variables m and n for ctxIdx from 40 to 53 */
+ CABAC_ENTRY(40, -3, 69, -2, 69, -11, 89, 0, 0),
+ CABAC_ENTRY(41, -6, 81, -5, 82, -15, 103, 0, 0),
+ CABAC_ENTRY(42, -11, 96, -10, 96, -21, 116, 0, 0),
+ CABAC_ENTRY(43, 6, 55, 2, 59, 19, 57, 0, 0),
+ CABAC_ENTRY(44, 7, 67, 2, 75, 20, 58, 0, 0),
+ CABAC_ENTRY(45, -5, 86, -3, 87, 4, 84, 0, 0),
+ CABAC_ENTRY(46, 2, 88, -3, 100, 6, 96, 0, 0),
+ CABAC_ENTRY(47, 0, 58, 1, 56, 1, 63, 0, 0),
+ CABAC_ENTRY(48, -3, 76, -3, 74, -5, 85, 0, 0),
+ CABAC_ENTRY(49, -10, 94, -6, 85, -13, 106, 0, 0),
+ CABAC_ENTRY(50, 5, 54, 0, 59, 5, 63, 0, 0),
+ CABAC_ENTRY(51, 4, 69, -3, 81, 6, 75, 0, 0),
+ CABAC_ENTRY(52, -3, 81, -7, 86, -3, 90, 0, 0),
+ CABAC_ENTRY(53, 0, 88, -5, 95, -1, 101, 0, 0),
+
+ /* Table 9-16 – Values of variables m and n for ctxIdx from 54 to 59 */
+ CABAC_ENTRY(54, -7, 67, -1, 66, 3, 55, 0, 0),
+ CABAC_ENTRY(55, -5, 74, -1, 77, -4, 79, 0, 0),
+ CABAC_ENTRY(56, -4, 74, 1, 70, -2, 75, 0, 0),
+ CABAC_ENTRY(57, -5, 80, -2, 86, -12, 97, 0, 0),
+ CABAC_ENTRY(58, -7, 72, -5, 72, -7, 50, 0, 0),
+ CABAC_ENTRY(59, 1, 58, 0, 61, 1, 60, 0, 0),
+
+ /* Table 9-17 – Values of variables m and n for ctxIdx from 60 to 69 */
+ CABAC_ENTRY(60, 0, 41, 0, 41, 0, 41, 0, 41),
+ CABAC_ENTRY(61, 0, 63, 0, 63, 0, 63, 0, 63),
+ CABAC_ENTRY(62, 0, 63, 0, 63, 0, 63, 0, 63),
+ CABAC_ENTRY(63, 0, 63, 0, 63, 0, 63, 0, 63),
+ CABAC_ENTRY(64, -9, 83, -9, 83, -9, 83, -9, 83),
+ CABAC_ENTRY(65, 4, 86, 4, 86, 4, 86, 4, 86),
+ CABAC_ENTRY(66, 0, 97, 0, 97, 0, 97, 0, 97),
+ CABAC_ENTRY(67, -7, 72, -7, 72, -7, 72, -7, 72),
+ CABAC_ENTRY(68, 13, 41, 13, 41, 13, 41, 13, 41),
+ CABAC_ENTRY(69, 3, 62, 3, 62, 3, 62, 3, 62),
+
+ /* Table 9-18 – Values of variables m and n for ctxIdx from 70 to 104 */
+ CABAC_ENTRY(70, 0, 45, 13, 15, 7, 34, 0, 11),
+ CABAC_ENTRY(71, -4, 78, 7, 51, -9, 88, 1, 55),
+ CABAC_ENTRY(72, -3, 96, 2, 80, -20, 127, 0, 69),
+ CABAC_ENTRY(73, -27, 126, -39, 127, -36, 127, -17, 127),
+ CABAC_ENTRY(74, -28, 98, -18, 91, -17, 91, -13, 102),
+ CABAC_ENTRY(75, -25, 101, -17, 96, -14, 95, 0, 82),
+ CABAC_ENTRY(76, -23, 67, -26, 81, -25, 84, -7, 74),
+ CABAC_ENTRY(77, -28, 82, -35, 98, -25, 86, -21, 107),
+ CABAC_ENTRY(78, -20, 94, -24, 102, -12, 89, -27, 127),
+ CABAC_ENTRY(79, -16, 83, -23, 97, -17, 91, -31, 127),
+ CABAC_ENTRY(80, -22, 110, -27, 119, -31, 127, -24, 127),
+ CABAC_ENTRY(81, -21, 91, -24, 99, -14, 76, -18, 95),
+ CABAC_ENTRY(82, -18, 102, -21, 110, -18, 103, -27, 127),
+ CABAC_ENTRY(83, -13, 93, -18, 102, -13, 90, -21, 114),
+ CABAC_ENTRY(84, -29, 127, -36, 127, -37, 127, -30, 127),
+ CABAC_ENTRY(85, -7, 92, 0, 80, 11, 80, -17, 123),
+ CABAC_ENTRY(86, -5, 89, -5, 89, 5, 76, -12, 115),
+ CABAC_ENTRY(87, -7, 96, -7, 94, 2, 84, -16, 122),
+ CABAC_ENTRY(88, -13, 108, -4, 92, 5, 78, -11, 115),
+ CABAC_ENTRY(89, -3, 46, 0, 39, -6, 55, -12, 63),
+ CABAC_ENTRY(90, -1, 65, 0, 65, 4, 61, -2, 68),
+ CABAC_ENTRY(91, -1, 57, -15, 84, -14, 83, -15, 84),
+ CABAC_ENTRY(92, -9, 93, -35, 127, -37, 127, -13, 104),
+ CABAC_ENTRY(93, -3, 74, -2, 73, -5, 79, -3, 70),
+ CABAC_ENTRY(94, -9, 92, -12, 104, -11, 104, -8, 93),
+ CABAC_ENTRY(95, -8, 87, -9, 91, -11, 91, -10, 90),
+ CABAC_ENTRY(96, -23, 126, -31, 127, -30, 127, -30, 127),
+ CABAC_ENTRY(97, 5, 54, 3, 55, 0, 65, -1, 74),
+ CABAC_ENTRY(98, 6, 60, 7, 56, -2, 79, -6, 97),
+ CABAC_ENTRY(99, 6, 59, 7, 55, 0, 72, -7, 91),
+ CABAC_ENTRY(100, 6, 69, 8, 61, -4, 92, -20, 127),
+ CABAC_ENTRY(101, -1, 48, -3, 53, -6, 56, -4, 56),
+ CABAC_ENTRY(102, 0, 68, 0, 68, 3, 68, -5, 82),
+ CABAC_ENTRY(103, -4, 69, -7, 74, -8, 71, -7, 76),
+ CABAC_ENTRY(104, -8, 88, -9, 88, -13, 98, -22, 125),
+
+ /* Table 9-19 – Values of variables m and n for ctxIdx from 105 to 165 */
+ CABAC_ENTRY(105, -2, 85, -13, 103, -4, 86, -7, 93),
+ CABAC_ENTRY(106, -6, 78, -13, 91, -12, 88, -11, 87),
+ CABAC_ENTRY(107, -1, 75, -9, 89, -5, 82, -3, 77),
+ CABAC_ENTRY(108, -7, 77, -14, 92, -3, 72, -5, 71),
+ CABAC_ENTRY(109, 2, 54, -8, 76, -4, 67, -4, 63),
+ CABAC_ENTRY(110, 5, 50, -12, 87, -8, 72, -4, 68),
+ CABAC_ENTRY(111, -3, 68, -23, 110, -16, 89, -12, 84),
+ CABAC_ENTRY(112, 1, 50, -24, 105, -9, 69, -7, 62),
+ CABAC_ENTRY(113, 6, 42, -10, 78, -1, 59, -7, 65),
+ CABAC_ENTRY(114, -4, 81, -20, 112, 5, 66, 8, 61),
+ CABAC_ENTRY(115, 1, 63, -17, 99, 4, 57, 5, 56),
+ CABAC_ENTRY(116, -4, 70, -78, 127, -4, 71, -2, 66),
+ CABAC_ENTRY(117, 0, 67, -70, 127, -2, 71, 1, 64),
+ CABAC_ENTRY(118, 2, 57, -50, 127, 2, 58, 0, 61),
+ CABAC_ENTRY(119, -2, 76, -46, 127, -1, 74, -2, 78),
+ CABAC_ENTRY(120, 11, 35, -4, 66, -4, 44, 1, 50),
+ CABAC_ENTRY(121, 4, 64, -5, 78, -1, 69, 7, 52),
+ CABAC_ENTRY(122, 1, 61, -4, 71, 0, 62, 10, 35),
+ CABAC_ENTRY(123, 11, 35, -8, 72, -7, 51, 0, 44),
+ CABAC_ENTRY(124, 18, 25, 2, 59, -4, 47, 11, 38),
+ CABAC_ENTRY(125, 12, 24, -1, 55, -6, 42, 1, 45),
+ CABAC_ENTRY(126, 13, 29, -7, 70, -3, 41, 0, 46),
+ CABAC_ENTRY(127, 13, 36, -6, 75, -6, 53, 5, 44),
+ CABAC_ENTRY(128, -10, 93, -8, 89, 8, 76, 31, 17),
+ CABAC_ENTRY(129, -7, 73, -34, 119, -9, 78, 1, 51),
+ CABAC_ENTRY(130, -2, 73, -3, 75, -11, 83, 7, 50),
+ CABAC_ENTRY(131, 13, 46, 32, 20, 9, 52, 28, 19),
+ CABAC_ENTRY(132, 9, 49, 30, 22, 0, 67, 16, 33),
+ CABAC_ENTRY(133, -7, 100, -44, 127, -5, 90, 14, 62),
+ CABAC_ENTRY(134, 9, 53, 0, 54, 1, 67, -13, 108),
+ CABAC_ENTRY(135, 2, 53, -5, 61, -15, 72, -15, 100),
+ CABAC_ENTRY(136, 5, 53, 0, 58, -5, 75, -13, 101),
+ CABAC_ENTRY(137, -2, 61, -1, 60, -8, 80, -13, 91),
+ CABAC_ENTRY(138, 0, 56, -3, 61, -21, 83, -12, 94),
+ CABAC_ENTRY(139, 0, 56, -8, 67, -21, 64, -10, 88),
+ CABAC_ENTRY(140, -13, 63, -25, 84, -13, 31, -16, 84),
+ CABAC_ENTRY(141, -5, 60, -14, 74, -25, 64, -10, 86),
+ CABAC_ENTRY(142, -1, 62, -5, 65, -29, 94, -7, 83),
+ CABAC_ENTRY(143, 4, 57, 5, 52, 9, 75, -13, 87),
+ CABAC_ENTRY(144, -6, 69, 2, 57, 17, 63, -19, 94),
+ CABAC_ENTRY(145, 4, 57, 0, 61, -8, 74, 1, 70),
+ CABAC_ENTRY(146, 14, 39, -9, 69, -5, 35, 0, 72),
+ CABAC_ENTRY(147, 4, 51, -11, 70, -2, 27, -5, 74),
+ CABAC_ENTRY(148, 13, 68, 18, 55, 13, 91, 18, 59),
+ CABAC_ENTRY(149, 3, 64, -4, 71, 3, 65, -8, 102),
+ CABAC_ENTRY(150, 1, 61, 0, 58, -7, 69, -15, 100),
+ CABAC_ENTRY(151, 9, 63, 7, 61, 8, 77, 0, 95),
+ CABAC_ENTRY(152, 7, 50, 9, 41, -10, 66, -4, 75),
+ CABAC_ENTRY(153, 16, 39, 18, 25, 3, 62, 2, 72),
+ CABAC_ENTRY(154, 5, 44, 9, 32, -3, 68, -11, 75),
+ CABAC_ENTRY(155, 4, 52, 5, 43, -20, 81, -3, 71),
+ CABAC_ENTRY(156, 11, 48, 9, 47, 0, 30, 15, 46),
+ CABAC_ENTRY(157, -5, 60, 0, 44, 1, 7, -13, 69),
+ CABAC_ENTRY(158, -1, 59, 0, 51, -3, 23, 0, 62),
+ CABAC_ENTRY(159, 0, 59, 2, 46, -21, 74, 0, 65),
+ CABAC_ENTRY(160, 22, 33, 19, 38, 16, 66, 21, 37),
+ CABAC_ENTRY(161, 5, 44, -4, 66, -23, 124, -15, 72),
+ CABAC_ENTRY(162, 14, 43, 15, 38, 17, 37, 9, 57),
+ CABAC_ENTRY(163, -1, 78, 12, 42, 44, -18, 16, 54),
+ CABAC_ENTRY(164, 0, 60, 9, 34, 50, -34, 0, 62),
+ CABAC_ENTRY(165, 9, 69, 0, 89, -22, 127, 12, 72),
+
+ /* Table 9-20 – Values of variables m and n for ctxIdx from 166 to 226 */
+ CABAC_ENTRY(166, 11, 28, 4, 45, 4, 39, 24, 0),
+ CABAC_ENTRY(167, 2, 40, 10, 28, 0, 42, 15, 9),
+ CABAC_ENTRY(168, 3, 44, 10, 31, 7, 34, 8, 25),
+ CABAC_ENTRY(169, 0, 49, 33, -11, 11, 29, 13, 18),
+ CABAC_ENTRY(170, 0, 46, 52, -43, 8, 31, 15, 9),
+ CABAC_ENTRY(171, 2, 44, 18, 15, 6, 37, 13, 19),
+ CABAC_ENTRY(172, 2, 51, 28, 0, 7, 42, 10, 37),
+ CABAC_ENTRY(173, 0, 47, 35, -22, 3, 40, 12, 18),
+ CABAC_ENTRY(174, 4, 39, 38, -25, 8, 33, 6, 29),
+ CABAC_ENTRY(175, 2, 62, 34, 0, 13, 43, 20, 33),
+ CABAC_ENTRY(176, 6, 46, 39, -18, 13, 36, 15, 30),
+ CABAC_ENTRY(177, 0, 54, 32, -12, 4, 47, 4, 45),
+ CABAC_ENTRY(178, 3, 54, 102, -94, 3, 55, 1, 58),
+ CABAC_ENTRY(179, 2, 58, 0, 0, 2, 58, 0, 62),
+ CABAC_ENTRY(180, 4, 63, 56, -15, 6, 60, 7, 61),
+ CABAC_ENTRY(181, 6, 51, 33, -4, 8, 44, 12, 38),
+ CABAC_ENTRY(182, 6, 57, 29, 10, 11, 44, 11, 45),
+ CABAC_ENTRY(183, 7, 53, 37, -5, 14, 42, 15, 39),
+ CABAC_ENTRY(184, 6, 52, 51, -29, 7, 48, 11, 42),
+ CABAC_ENTRY(185, 6, 55, 39, -9, 4, 56, 13, 44),
+ CABAC_ENTRY(186, 11, 45, 52, -34, 4, 52, 16, 45),
+ CABAC_ENTRY(187, 14, 36, 69, -58, 13, 37, 12, 41),
+ CABAC_ENTRY(188, 8, 53, 67, -63, 9, 49, 10, 49),
+ CABAC_ENTRY(189, -1, 82, 44, -5, 19, 58, 30, 34),
+ CABAC_ENTRY(190, 7, 55, 32, 7, 10, 48, 18, 42),
+ CABAC_ENTRY(191, -3, 78, 55, -29, 12, 45, 10, 55),
+ CABAC_ENTRY(192, 15, 46, 32, 1, 0, 69, 17, 51),
+ CABAC_ENTRY(193, 22, 31, 0, 0, 20, 33, 17, 46),
+ CABAC_ENTRY(194, -1, 84, 27, 36, 8, 63, 0, 89),
+ CABAC_ENTRY(195, 25, 7, 33, -25, 35, -18, 26, -19),
+ CABAC_ENTRY(196, 30, -7, 34, -30, 33, -25, 22, -17),
+ CABAC_ENTRY(197, 28, 3, 36, -28, 28, -3, 26, -17),
+ CABAC_ENTRY(198, 28, 4, 38, -28, 24, 10, 30, -25),
+ CABAC_ENTRY(199, 32, 0, 38, -27, 27, 0, 28, -20),
+ CABAC_ENTRY(200, 34, -1, 34, -18, 34, -14, 33, -23),
+ CABAC_ENTRY(201, 30, 6, 35, -16, 52, -44, 37, -27),
+ CABAC_ENTRY(202, 30, 6, 34, -14, 39, -24, 33, -23),
+ CABAC_ENTRY(203, 32, 9, 32, -8, 19, 17, 40, -28),
+ CABAC_ENTRY(204, 31, 19, 37, -6, 31, 25, 38, -17),
+ CABAC_ENTRY(205, 26, 27, 35, 0, 36, 29, 33, -11),
+ CABAC_ENTRY(206, 26, 30, 30, 10, 24, 33, 40, -15),
+ CABAC_ENTRY(207, 37, 20, 28, 18, 34, 15, 41, -6),
+ CABAC_ENTRY(208, 28, 34, 26, 25, 30, 20, 38, 1),
+ CABAC_ENTRY(209, 17, 70, 29, 41, 22, 73, 41, 17),
+ CABAC_ENTRY(210, 1, 67, 0, 75, 20, 34, 30, -6),
+ CABAC_ENTRY(211, 5, 59, 2, 72, 19, 31, 27, 3),
+ CABAC_ENTRY(212, 9, 67, 8, 77, 27, 44, 26, 22),
+ CABAC_ENTRY(213, 16, 30, 14, 35, 19, 16, 37, -16),
+ CABAC_ENTRY(214, 18, 32, 18, 31, 15, 36, 35, -4),
+ CABAC_ENTRY(215, 18, 35, 17, 35, 15, 36, 38, -8),
+ CABAC_ENTRY(216, 22, 29, 21, 30, 21, 28, 38, -3),
+ CABAC_ENTRY(217, 24, 31, 17, 45, 25, 21, 37, 3),
+ CABAC_ENTRY(218, 23, 38, 20, 42, 30, 20, 38, 5),
+ CABAC_ENTRY(219, 18, 43, 18, 45, 31, 12, 42, 0),
+ CABAC_ENTRY(220, 20, 41, 27, 26, 27, 16, 35, 16),
+ CABAC_ENTRY(221, 11, 63, 16, 54, 24, 42, 39, 22),
+ CABAC_ENTRY(222, 9, 59, 7, 66, 0, 93, 14, 48),
+ CABAC_ENTRY(223, 9, 64, 16, 56, 14, 56, 27, 37),
+ CABAC_ENTRY(224, -1, 94, 11, 73, 15, 57, 21, 60),
+ CABAC_ENTRY(225, -2, 89, 10, 67, 26, 38, 12, 68),
+ CABAC_ENTRY(226, -9, 108, -10, 116, -24, 127, 2, 97),
+
+ /* Table 9-21 – Values of variables m and n for ctxIdx from 227 to 275 */
+ CABAC_ENTRY(227, -6, 76, -23, 112, -24, 115, -3, 71),
+ CABAC_ENTRY(228, -2, 44, -15, 71, -22, 82, -6, 42),
+ CABAC_ENTRY(229, 0, 45, -7, 61, -9, 62, -5, 50),
+ CABAC_ENTRY(230, 0, 52, 0, 53, 0, 53, -3, 54),
+ CABAC_ENTRY(231, -3, 64, -5, 66, 0, 59, -2, 62),
+ CABAC_ENTRY(232, -2, 59, -11, 77, -14, 85, 0, 58),
+ CABAC_ENTRY(233, -4, 70, -9, 80, -13, 89, 1, 63),
+ CABAC_ENTRY(234, -4, 75, -9, 84, -13, 94, -2, 72),
+ CABAC_ENTRY(235, -8, 82, -10, 87, -11, 92, -1, 74),
+ CABAC_ENTRY(236, -17, 102, -34, 127, -29, 127, -9, 91),
+ CABAC_ENTRY(237, -9, 77, -21, 101, -21, 100, -5, 67),
+ CABAC_ENTRY(238, 3, 24, -3, 39, -14, 57, -5, 27),
+ CABAC_ENTRY(239, 0, 42, -5, 53, -12, 67, -3, 39),
+ CABAC_ENTRY(240, 0, 48, -7, 61, -11, 71, -2, 44),
+ CABAC_ENTRY(241, 0, 55, -11, 75, -10, 77, 0, 46),
+ CABAC_ENTRY(242, -6, 59, -15, 77, -21, 85, -16, 64),
+ CABAC_ENTRY(243, -7, 71, -17, 91, -16, 88, -8, 68),
+ CABAC_ENTRY(244, -12, 83, -25, 107, -23, 104, -10, 78),
+ CABAC_ENTRY(245, -11, 87, -25, 111, -15, 98, -6, 77),
+ CABAC_ENTRY(246, -30, 119, -28, 122, -37, 127, -10, 86),
+ CABAC_ENTRY(247, 1, 58, -11, 76, -10, 82, -12, 92),
+ CABAC_ENTRY(248, -3, 29, -10, 44, -8, 48, -15, 55),
+ CABAC_ENTRY(249, -1, 36, -10, 52, -8, 61, -10, 60),
+ CABAC_ENTRY(250, 1, 38, -10, 57, -8, 66, -6, 62),
+ CABAC_ENTRY(251, 2, 43, -9, 58, -7, 70, -4, 65),
+ CABAC_ENTRY(252, -6, 55, -16, 72, -14, 75, -12, 73),
+ CABAC_ENTRY(253, 0, 58, -7, 69, -10, 79, -8, 76),
+ CABAC_ENTRY(254, 0, 64, -4, 69, -9, 83, -7, 80),
+ CABAC_ENTRY(255, -3, 74, -5, 74, -12, 92, -9, 88),
+ CABAC_ENTRY(256, -10, 90, -9, 86, -18, 108, -17, 110),
+ CABAC_ENTRY(257, 0, 70, 2, 66, -4, 79, -11, 97),
+ CABAC_ENTRY(258, -4, 29, -9, 34, -22, 69, -20, 84),
+ CABAC_ENTRY(259, 5, 31, 1, 32, -16, 75, -11, 79),
+ CABAC_ENTRY(260, 7, 42, 11, 31, -2, 58, -6, 73),
+ CABAC_ENTRY(261, 1, 59, 5, 52, 1, 58, -4, 74),
+ CABAC_ENTRY(262, -2, 58, -2, 55, -13, 78, -13, 86),
+ CABAC_ENTRY(263, -3, 72, -2, 67, -9, 83, -13, 96),
+ CABAC_ENTRY(264, -3, 81, 0, 73, -4, 81, -11, 97),
+ CABAC_ENTRY(265, -11, 97, -8, 89, -13, 99, -19, 117),
+ CABAC_ENTRY(266, 0, 58, 3, 52, -13, 81, -8, 78),
+ CABAC_ENTRY(267, 8, 5, 7, 4, -6, 38, -5, 33),
+ CABAC_ENTRY(268, 10, 14, 10, 8, -13, 62, -4, 48),
+ CABAC_ENTRY(269, 14, 18, 17, 8, -6, 58, -2, 53),
+ CABAC_ENTRY(270, 13, 27, 16, 19, -2, 59, -3, 62),
+ CABAC_ENTRY(271, 2, 40, 3, 37, -16, 73, -13, 71),
+ CABAC_ENTRY(272, 0, 58, -1, 61, -10, 76, -10, 79),
+ CABAC_ENTRY(273, -3, 70, -5, 73, -13, 86, -12, 86),
+ CABAC_ENTRY(274, -6, 79, -1, 70, -9, 83, -13, 90),
+ CABAC_ENTRY(275, -8, 85, -4, 78, -10, 87, -14, 97),
+
+ /* Table 9-22 – Values of variables m and n for ctxIdx from 277 to 337 */
+ CABAC_ENTRY(277, -13, 106, -21, 126, -22, 127, -6, 93),
+ CABAC_ENTRY(278, -16, 106, -23, 124, -25, 127, -6, 84),
+ CABAC_ENTRY(279, -10, 87, -20, 110, -25, 120, -8, 79),
+ CABAC_ENTRY(280, -21, 114, -26, 126, -27, 127, 0, 66),
+ CABAC_ENTRY(281, -18, 110, -25, 124, -19, 114, -1, 71),
+ CABAC_ENTRY(282, -14, 98, -17, 105, -23, 117, 0, 62),
+ CABAC_ENTRY(283, -22, 110, -27, 121, -25, 118, -2, 60),
+ CABAC_ENTRY(284, -21, 106, -27, 117, -26, 117, -2, 59),
+ CABAC_ENTRY(285, -18, 103, -17, 102, -24, 113, -5, 75),
+ CABAC_ENTRY(286, -21, 107, -26, 117, -28, 118, -3, 62),
+ CABAC_ENTRY(287, -23, 108, -27, 116, -31, 120, -4, 58),
+ CABAC_ENTRY(288, -26, 112, -33, 122, -37, 124, -9, 66),
+ CABAC_ENTRY(289, -10, 96, -10, 95, -10, 94, -1, 79),
+ CABAC_ENTRY(290, -12, 95, -14, 100, -15, 102, 0, 71),
+ CABAC_ENTRY(291, -5, 91, -8, 95, -10, 99, 3, 68),
+ CABAC_ENTRY(292, -9, 93, -17, 111, -13, 106, 10, 44),
+ CABAC_ENTRY(293, -22, 94, -28, 114, -50, 127, -7, 62),
+ CABAC_ENTRY(294, -5, 86, -6, 89, -5, 92, 15, 36),
+ CABAC_ENTRY(295, 9, 67, -2, 80, 17, 57, 14, 40),
+ CABAC_ENTRY(296, -4, 80, -4, 82, -5, 86, 16, 27),
+ CABAC_ENTRY(297, -10, 85, -9, 85, -13, 94, 12, 29),
+ CABAC_ENTRY(298, -1, 70, -8, 81, -12, 91, 1, 44),
+ CABAC_ENTRY(299, 7, 60, -1, 72, -2, 77, 20, 36),
+ CABAC_ENTRY(300, 9, 58, 5, 64, 0, 71, 18, 32),
+ CABAC_ENTRY(301, 5, 61, 1, 67, -1, 73, 5, 42),
+ CABAC_ENTRY(302, 12, 50, 9, 56, 4, 64, 1, 48),
+ CABAC_ENTRY(303, 15, 50, 0, 69, -7, 81, 10, 62),
+ CABAC_ENTRY(304, 18, 49, 1, 69, 5, 64, 17, 46),
+ CABAC_ENTRY(305, 17, 54, 7, 69, 15, 57, 9, 64),
+ CABAC_ENTRY(306, 10, 41, -7, 69, 1, 67, -12, 104),
+ CABAC_ENTRY(307, 7, 46, -6, 67, 0, 68, -11, 97),
+ CABAC_ENTRY(308, -1, 51, -16, 77, -10, 67, -16, 96),
+ CABAC_ENTRY(309, 7, 49, -2, 64, 1, 68, -7, 88),
+ CABAC_ENTRY(310, 8, 52, 2, 61, 0, 77, -8, 85),
+ CABAC_ENTRY(311, 9, 41, -6, 67, 2, 64, -7, 85),
+ CABAC_ENTRY(312, 6, 47, -3, 64, 0, 68, -9, 85),
+ CABAC_ENTRY(313, 2, 55, 2, 57, -5, 78, -13, 88),
+ CABAC_ENTRY(314, 13, 41, -3, 65, 7, 55, 4, 66),
+ CABAC_ENTRY(315, 10, 44, -3, 66, 5, 59, -3, 77),
+ CABAC_ENTRY(316, 6, 50, 0, 62, 2, 65, -3, 76),
+ CABAC_ENTRY(317, 5, 53, 9, 51, 14, 54, -6, 76),
+ CABAC_ENTRY(318, 13, 49, -1, 66, 15, 44, 10, 58),
+ CABAC_ENTRY(319, 4, 63, -2, 71, 5, 60, -1, 76),
+ CABAC_ENTRY(320, 6, 64, -2, 75, 2, 70, -1, 83),
+ CABAC_ENTRY(321, -2, 69, -1, 70, -2, 76, -7, 99),
+ CABAC_ENTRY(322, -2, 59, -9, 72, -18, 86, -14, 95),
+ CABAC_ENTRY(323, 6, 70, 14, 60, 12, 70, 2, 95),
+ CABAC_ENTRY(324, 10, 44, 16, 37, 5, 64, 0, 76),
+ CABAC_ENTRY(325, 9, 31, 0, 47, -12, 70, -5, 74),
+ CABAC_ENTRY(326, 12, 43, 18, 35, 11, 55, 0, 70),
+ CABAC_ENTRY(327, 3, 53, 11, 37, 5, 56, -11, 75),
+ CABAC_ENTRY(328, 14, 34, 12, 41, 0, 69, 1, 68),
+ CABAC_ENTRY(329, 10, 38, 10, 41, 2, 65, 0, 65),
+ CABAC_ENTRY(330, -3, 52, 2, 48, -6, 74, -14, 73),
+ CABAC_ENTRY(331, 13, 40, 12, 41, 5, 54, 3, 62),
+ CABAC_ENTRY(332, 17, 32, 13, 41, 7, 54, 4, 62),
+ CABAC_ENTRY(333, 7, 44, 0, 59, -6, 76, -1, 68),
+ CABAC_ENTRY(334, 7, 38, 3, 50, -11, 82, -13, 75),
+ CABAC_ENTRY(335, 13, 50, 19, 40, -2, 77, 11, 55),
+ CABAC_ENTRY(336, 10, 57, 3, 66, -2, 77, 5, 64),
+ CABAC_ENTRY(337, 26, 43, 18, 50, 25, 42, 12, 70),
+
+ /* Table 9-23 – Values of variables m and n for ctxIdx from 338 to 398 */
+ CABAC_ENTRY(338, 14, 11, 19, -6, 17, -13, 15, 6),
+ CABAC_ENTRY(339, 11, 14, 18, -6, 16, -9, 6, 19),
+ CABAC_ENTRY(340, 9, 11, 14, 0, 17, -12, 7, 16),
+ CABAC_ENTRY(341, 18, 11, 26, -12, 27, -21, 12, 14),
+ CABAC_ENTRY(342, 21, 9, 31, -16, 37, -30, 18, 13),
+ CABAC_ENTRY(343, 23, -2, 33, -25, 41, -40, 13, 11),
+ CABAC_ENTRY(344, 32, -15, 33, -22, 42, -41, 13, 15),
+ CABAC_ENTRY(345, 32, -15, 37, -28, 48, -47, 15, 16),
+ CABAC_ENTRY(346, 34, -21, 39, -30, 39, -32, 12, 23),
+ CABAC_ENTRY(347, 39, -23, 42, -30, 46, -40, 13, 23),
+ CABAC_ENTRY(348, 42, -33, 47, -42, 52, -51, 15, 20),
+ CABAC_ENTRY(349, 41, -31, 45, -36, 46, -41, 14, 26),
+ CABAC_ENTRY(350, 46, -28, 49, -34, 52, -39, 14, 44),
+ CABAC_ENTRY(351, 38, -12, 41, -17, 43, -19, 17, 40),
+ CABAC_ENTRY(352, 21, 29, 32, 9, 32, 11, 17, 47),
+ CABAC_ENTRY(353, 45, -24, 69, -71, 61, -55, 24, 17),
+ CABAC_ENTRY(354, 53, -45, 63, -63, 56, -46, 21, 21),
+ CABAC_ENTRY(355, 48, -26, 66, -64, 62, -50, 25, 22),
+ CABAC_ENTRY(356, 65, -43, 77, -74, 81, -67, 31, 27),
+ CABAC_ENTRY(357, 43, -19, 54, -39, 45, -20, 22, 29),
+ CABAC_ENTRY(358, 39, -10, 52, -35, 35, -2, 19, 35),
+ CABAC_ENTRY(359, 30, 9, 41, -10, 28, 15, 14, 50),
+ CABAC_ENTRY(360, 18, 26, 36, 0, 34, 1, 10, 57),
+ CABAC_ENTRY(361, 20, 27, 40, -1, 39, 1, 7, 63),
+ CABAC_ENTRY(362, 0, 57, 30, 14, 30, 17, -2, 77),
+ CABAC_ENTRY(363, -14, 82, 28, 26, 20, 38, -4, 82),
+ CABAC_ENTRY(364, -5, 75, 23, 37, 18, 45, -3, 94),
+ CABAC_ENTRY(365, -19, 97, 12, 55, 15, 54, 9, 69),
+ CABAC_ENTRY(366, -35, 125, 11, 65, 0, 79, -12, 109),
+ CABAC_ENTRY(367, 27, 0, 37, -33, 36, -16, 36, -35),
+ CABAC_ENTRY(368, 28, 0, 39, -36, 37, -14, 36, -34),
+ CABAC_ENTRY(369, 31, -4, 40, -37, 37, -17, 32, -26),
+ CABAC_ENTRY(370, 27, 6, 38, -30, 32, 1, 37, -30),
+ CABAC_ENTRY(371, 34, 8, 46, -33, 34, 15, 44, -32),
+ CABAC_ENTRY(372, 30, 10, 42, -30, 29, 15, 34, -18),
+ CABAC_ENTRY(373, 24, 22, 40, -24, 24, 25, 34, -15),
+ CABAC_ENTRY(374, 33, 19, 49, -29, 34, 22, 40, -15),
+ CABAC_ENTRY(375, 22, 32, 38, -12, 31, 16, 33, -7),
+ CABAC_ENTRY(376, 26, 31, 40, -10, 35, 18, 35, -5),
+ CABAC_ENTRY(377, 21, 41, 38, -3, 31, 28, 33, 0),
+ CABAC_ENTRY(378, 26, 44, 46, -5, 33, 41, 38, 2),
+ CABAC_ENTRY(379, 23, 47, 31, 20, 36, 28, 33, 13),
+ CABAC_ENTRY(380, 16, 65, 29, 30, 27, 47, 23, 35),
+ CABAC_ENTRY(381, 14, 71, 25, 44, 21, 62, 13, 58),
+ CABAC_ENTRY(382, 8, 60, 12, 48, 18, 31, 29, -3),
+ CABAC_ENTRY(383, 6, 63, 11, 49, 19, 26, 26, 0),
+ CABAC_ENTRY(384, 17, 65, 26, 45, 36, 24, 22, 30),
+ CABAC_ENTRY(385, 21, 24, 22, 22, 24, 23, 31, -7),
+ CABAC_ENTRY(386, 23, 20, 23, 22, 27, 16, 35, -15),
+ CABAC_ENTRY(387, 26, 23, 27, 21, 24, 30, 34, -3),
+ CABAC_ENTRY(388, 27, 32, 33, 20, 31, 29, 34, 3),
+ CABAC_ENTRY(389, 28, 23, 26, 28, 22, 41, 36, -1),
+ CABAC_ENTRY(390, 28, 24, 30, 24, 22, 42, 34, 5),
+ CABAC_ENTRY(391, 23, 40, 27, 34, 16, 60, 32, 11),
+ CABAC_ENTRY(392, 24, 32, 18, 42, 15, 52, 35, 5),
+ CABAC_ENTRY(393, 28, 29, 25, 39, 14, 60, 34, 12),
+ CABAC_ENTRY(394, 23, 42, 18, 50, 3, 78, 39, 11),
+ CABAC_ENTRY(395, 19, 57, 12, 70, -16, 123, 30, 29),
+ CABAC_ENTRY(396, 22, 53, 21, 54, 21, 53, 34, 26),
+ CABAC_ENTRY(397, 22, 61, 14, 71, 22, 56, 29, 39),
+ CABAC_ENTRY(398, 11, 86, 11, 83, 25, 61, 19, 66),
+
+ /* Values of variables m and n for ctxIdx from 399 to 463 (not documented) */
+ CABAC_ENTRY(399, 12, 40, 25, 32, 21, 33, 31, 21),
+ CABAC_ENTRY(400, 11, 51, 21, 49, 19, 50, 31, 31),
+ CABAC_ENTRY(401, 14, 59, 21, 54, 17, 61, 25, 50),
+ CABAC_ENTRY(402, -4, 79, -5, 85, -3, 78, -17, 120),
+ CABAC_ENTRY(403, -7, 71, -6, 81, -8, 74, -20, 112),
+ CABAC_ENTRY(404, -5, 69, -10, 77, -9, 72, -18, 114),
+ CABAC_ENTRY(405, -9, 70, -7, 81, -10, 72, -11, 85),
+ CABAC_ENTRY(406, -8, 66, -17, 80, -18, 75, -15, 92),
+ CABAC_ENTRY(407, -10, 68, -18, 73, -12, 71, -14, 89),
+ CABAC_ENTRY(408, -19, 73, -4, 74, -11, 63, -26, 71),
+ CABAC_ENTRY(409, -12, 69, -10, 83, -5, 70, -15, 81),
+ CABAC_ENTRY(410, -16, 70, -9, 71, -17, 75, -14, 80),
+ CABAC_ENTRY(411, -15, 67, -9, 67, -14, 72, 0, 68),
+ CABAC_ENTRY(412, -20, 62, -1, 61, -16, 67, -14, 70),
+ CABAC_ENTRY(413, -19, 70, -8, 66, -8, 53, -24, 56),
+ CABAC_ENTRY(414, -16, 66, -14, 66, -14, 59, -23, 68),
+ CABAC_ENTRY(415, -22, 65, 0, 59, -9, 52, -24, 50),
+ CABAC_ENTRY(416, -20, 63, 2, 59, -11, 68, -11, 74),
+ CABAC_ENTRY(417, 9, -2, 17, -10, 9, -2, 23, -13),
+ CABAC_ENTRY(418, 26, -9, 32, -13, 30, -10, 26, -13),
+ CABAC_ENTRY(419, 33, -9, 42, -9, 31, -4, 40, -15),
+ CABAC_ENTRY(420, 39, -7, 49, -5, 33, -1, 49, -14),
+ CABAC_ENTRY(421, 41, -2, 53, 0, 33, 7, 44, 3),
+ CABAC_ENTRY(422, 45, 3, 64, 3, 31, 12, 45, 6),
+ CABAC_ENTRY(423, 49, 9, 68, 10, 37, 23, 44, 34),
+ CABAC_ENTRY(424, 45, 27, 66, 27, 31, 38, 33, 54),
+ CABAC_ENTRY(425, 36, 59, 47, 57, 20, 64, 19, 82),
+ CABAC_ENTRY(426, -6, 66, -5, 71, -9, 71, -3, 75),
+ CABAC_ENTRY(427, -7, 35, 0, 24, -7, 37, -1, 23),
+ CABAC_ENTRY(428, -7, 42, -1, 36, -8, 44, 1, 34),
+ CABAC_ENTRY(429, -8, 45, -2, 42, -11, 49, 1, 43),
+ CABAC_ENTRY(430, -5, 48, -2, 52, -10, 56, 0, 54),
+ CABAC_ENTRY(431, -12, 56, -9, 57, -12, 59, -2, 55),
+ CABAC_ENTRY(432, -6, 60, -6, 63, -8, 63, 0, 61),
+ CABAC_ENTRY(433, -5, 62, -4, 65, -9, 67, 1, 64),
+ CABAC_ENTRY(434, -8, 66, -4, 67, -6, 68, 0, 68),
+ CABAC_ENTRY(435, -8, 76, -7, 82, -10, 79, -9, 92),
+ CABAC_ENTRY(436, -5, 85, -3, 81, -3, 78, -14, 106),
+ CABAC_ENTRY(437, -6, 81, -3, 76, -8, 74, -13, 97),
+ CABAC_ENTRY(438, -10, 77, -7, 72, -9, 72, -15, 90),
+ CABAC_ENTRY(439, -7, 81, -6, 78, -10, 72, -12, 90),
+ CABAC_ENTRY(440, -17, 80, -12, 72, -18, 75, -18, 88),
+ CABAC_ENTRY(441, -18, 73, -14, 68, -12, 71, -10, 73),
+ CABAC_ENTRY(442, -4, 74, -3, 70, -11, 63, -9, 79),
+ CABAC_ENTRY(443, -10, 83, -6, 76, -5, 70, -14, 86),
+ CABAC_ENTRY(444, -9, 71, -5, 66, -17, 75, -10, 73),
+ CABAC_ENTRY(445, -9, 67, -5, 62, -14, 72, -10, 70),
+ CABAC_ENTRY(446, -1, 61, 0, 57, -16, 67, -10, 69),
+ CABAC_ENTRY(447, -8, 66, -4, 61, -8, 53, -5, 66),
+ CABAC_ENTRY(448, -14, 66, -9, 60, -14, 59, -9, 64),
+ CABAC_ENTRY(449, 0, 59, 1, 54, -9, 52, -5, 58),
+ CABAC_ENTRY(450, 2, 59, 2, 58, -11, 68, 2, 59),
+ CABAC_ENTRY(451, 21, -13, 17, -10, 9, -2, 21, -10),
+ CABAC_ENTRY(452, 33, -14, 32, -13, 30, -10, 24, -11),
+ CABAC_ENTRY(453, 39, -7, 42, -9, 31, -4, 28, -8),
+ CABAC_ENTRY(454, 46, -2, 49, -5, 33, -1, 28, -1),
+ CABAC_ENTRY(455, 51, 2, 53, 0, 33, 7, 29, 3),
+ CABAC_ENTRY(456, 60, 6, 64, 3, 31, 12, 29, 9),
+ CABAC_ENTRY(457, 61, 17, 68, 10, 37, 23, 35, 20),
+ CABAC_ENTRY(458, 55, 34, 66, 27, 31, 38, 29, 36),
+ CABAC_ENTRY(459, 42, 62, 47, 57, 20, 64, 14, 67),
+};
+
+static void set_ps_field(u32 *buf, struct rkvdec_ps_field field, u32 value)
+{
+ u8 bit = field.offset % 32, word = field.offset / 32;
+ u64 mask = GENMASK_ULL(bit + field.len - 1, bit);
+ u64 val = ((u64)value << bit) & mask;
+
+ buf[word] &= ~mask;
+ buf[word] |= val;
+ if (bit + field.len > 32) {
+ buf[word + 1] &= ~(mask >> 32);
+ buf[word + 1] |= val >> 32;
+ }
+}
+
+static void assemble_hw_pps(struct rkvdec_ctx *ctx,
+ struct rkvdec_h264_run *run)
+{
+ struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_ctrl_h264_sps *sps = run->sps;
+ const struct v4l2_ctrl_h264_pps *pps = run->pps;
+ const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
+ const struct v4l2_h264_dpb_entry *dpb = dec_params->dpb;
+ struct rkvdec_h264_priv_tbl *priv_tbl = h264_ctx->priv_tbl.cpu;
+ struct rkvdec_sps_pps_packet *hw_ps;
+ dma_addr_t scaling_list_address;
+ u32 scaling_distance;
+ u32 i;
+
+ /*
+ * HW read the SPS/PPS information from PPS packet index by PPS id.
+ * offset from the base can be calculated by PPS_id * 32 (size per PPS
+ * packet unit). so the driver copy SPS/PPS information to the exact PPS
+ * packet unit for HW accessing.
+ */
+ hw_ps = &priv_tbl->param_set[pps->pic_parameter_set_id];
+ memset(hw_ps, 0, sizeof(*hw_ps));
+
+#define WRITE_PPS(value, field) set_ps_field(hw_ps->info, field, value)
+ /* write sps */
+ WRITE_PPS(0xf, SEQ_PARAMETER_SET_ID);
+ WRITE_PPS(0xff, PROFILE_IDC);
+ WRITE_PPS(1, CONSTRAINT_SET3_FLAG);
+ WRITE_PPS(sps->chroma_format_idc, CHROMA_FORMAT_IDC);
+ WRITE_PPS(sps->bit_depth_luma_minus8 + 8, BIT_DEPTH_LUMA);
+ WRITE_PPS(sps->bit_depth_chroma_minus8 + 8, BIT_DEPTH_CHROMA);
+ WRITE_PPS(0, QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG);
+ WRITE_PPS(sps->log2_max_frame_num_minus4, LOG2_MAX_FRAME_NUM_MINUS4);
+ WRITE_PPS(sps->max_num_ref_frames, MAX_NUM_REF_FRAMES);
+ WRITE_PPS(sps->pic_order_cnt_type, PIC_ORDER_CNT_TYPE);
+ WRITE_PPS(sps->log2_max_pic_order_cnt_lsb_minus4,
+ LOG2_MAX_PIC_ORDER_CNT_LSB_MINUS4);
+ WRITE_PPS(!!(sps->flags & V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO),
+ DELTA_PIC_ORDER_ALWAYS_ZERO_FLAG);
+ WRITE_PPS(DIV_ROUND_UP(ctx->coded_fmt.fmt.pix_mp.width, 16), PIC_WIDTH_IN_MBS);
+ WRITE_PPS(DIV_ROUND_UP(ctx->coded_fmt.fmt.pix_mp.height, 16), PIC_HEIGHT_IN_MBS);
+ WRITE_PPS(!!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY),
+ FRAME_MBS_ONLY_FLAG);
+ WRITE_PPS(!!(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD),
+ MB_ADAPTIVE_FRAME_FIELD_FLAG);
+ WRITE_PPS(!!(sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE),
+ DIRECT_8X8_INFERENCE_FLAG);
+
+ /* write pps */
+ WRITE_PPS(0xff, PIC_PARAMETER_SET_ID);
+ WRITE_PPS(0x1f, PPS_SEQ_PARAMETER_SET_ID);
+ WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE),
+ ENTROPY_CODING_MODE_FLAG);
+ WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT),
+ BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT_FLAG);
+ WRITE_PPS(pps->num_ref_idx_l0_default_active_minus1,
+ NUM_REF_IDX_L_DEFAULT_ACTIVE_MINUS1(0));
+ WRITE_PPS(pps->num_ref_idx_l1_default_active_minus1,
+ NUM_REF_IDX_L_DEFAULT_ACTIVE_MINUS1(1));
+ WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED),
+ WEIGHTED_PRED_FLAG);
+ WRITE_PPS(pps->weighted_bipred_idc, WEIGHTED_BIPRED_IDC);
+ WRITE_PPS(pps->pic_init_qp_minus26, PIC_INIT_QP_MINUS26);
+ WRITE_PPS(pps->pic_init_qs_minus26, PIC_INIT_QS_MINUS26);
+ WRITE_PPS(pps->chroma_qp_index_offset, CHROMA_QP_INDEX_OFFSET);
+ WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT),
+ DEBLOCKING_FILTER_CONTROL_PRESENT_FLAG);
+ WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED),
+ CONSTRAINED_INTRA_PRED_FLAG);
+ WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT),
+ REDUNDANT_PIC_CNT_PRESENT);
+ WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE),
+ TRANSFORM_8X8_MODE_FLAG);
+ WRITE_PPS(pps->second_chroma_qp_index_offset,
+ SECOND_CHROMA_QP_INDEX_OFFSET);
+
+ /* always use the matrix sent from userspace */
+ WRITE_PPS(1, SCALING_LIST_ENABLE_FLAG);
+
+ scaling_distance = offsetof(struct rkvdec_h264_priv_tbl, scaling_list);
+ scaling_list_address = h264_ctx->priv_tbl.dma + scaling_distance;
+ WRITE_PPS(scaling_list_address, SCALING_LIST_ADDRESS);
+
+ for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
+ u32 is_longterm = 0;
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ is_longterm = 1;
+
+ WRITE_PPS(is_longterm, IS_LONG_TERM(i));
+ }
+}
+
+static void assemble_hw_rps(struct rkvdec_ctx *ctx,
+ struct rkvdec_h264_run *run)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
+ const struct v4l2_ctrl_h264_slice_params *sl_params = &run->slices_params[0];
+ const struct v4l2_h264_dpb_entry *dpb = dec_params->dpb;
+ struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
+ const struct v4l2_ctrl_h264_sps *sps = run->sps;
+ struct rkvdec_h264_priv_tbl *priv_tbl = h264_ctx->priv_tbl.cpu;
+ u32 max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+
+ u32 *hw_rps = priv_tbl->rps;
+ u32 i, j;
+ u16 *p = (u16 *)hw_rps;
+
+ memset(hw_rps, 0, sizeof(priv_tbl->rps));
+
+ /*
+ * Assign an invalid pic_num if DPB entry at that position is inactive.
+ * If we assign 0 in that position hardware will treat that as a real
+ * reference picture with pic_num 0, triggering output picture
+ * corruption.
+ */
+ for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
+ if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM ||
+ dpb[i].frame_num < sl_params->frame_num) {
+ p[i] = dpb[i].frame_num;
+ continue;
+ }
+
+ p[i] = dpb[i].frame_num - max_frame_num;
+ }
+
+ for (j = 0; j < RKVDEC_NUM_REFLIST; j++) {
+ for (i = 0; i < h264_ctx->reflists.num_valid; i++) {
+ u8 dpb_valid = 0;
+ u8 idx = 0;
+
+ switch (j) {
+ case 0:
+ idx = h264_ctx->reflists.p[i];
+ break;
+ case 1:
+ idx = h264_ctx->reflists.b0[i];
+ break;
+ case 2:
+ idx = h264_ctx->reflists.b1[i];
+ break;
+ }
+
+ if (idx >= ARRAY_SIZE(dec_params->dpb))
+ continue;
+ dpb_valid = !!(dpb[idx].flags &
+ V4L2_H264_DPB_ENTRY_FLAG_ACTIVE);
+
+ set_ps_field(hw_rps, DPB_INFO(i, j),
+ idx | dpb_valid << 4);
+ }
+ }
+}
+
+/*
+ * NOTE: The values in a scaling list are in zig-zag order, apply inverse
+ * scanning process to get the values in matrix order.
+ */
+static const u32 zig_zag_4x4[16] = {
+ 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
+};
+
+static const u32 zig_zag_8x8[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+static void reorder_scaling_list(struct rkvdec_ctx *ctx,
+ struct rkvdec_h264_run *run)
+{
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling = run->scaling_matrix;
+ const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
+ const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
+ const size_t num_list_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8);
+ const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
+ struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
+ struct rkvdec_h264_priv_tbl *tbl = h264_ctx->priv_tbl.cpu;
+ u8 *dst = tbl->scaling_list;
+ const u8 *src;
+ int i, j;
+
+ BUILD_BUG_ON(ARRAY_SIZE(zig_zag_4x4) != list_len_4x4);
+ BUILD_BUG_ON(ARRAY_SIZE(zig_zag_8x8) != list_len_8x8);
+ BUILD_BUG_ON(ARRAY_SIZE(tbl->scaling_list) <
+ num_list_4x4 * list_len_4x4 +
+ num_list_8x8 * list_len_8x8);
+
+ src = &scaling->scaling_list_4x4[0][0];
+ for (i = 0; i < num_list_4x4; ++i) {
+ for (j = 0; j < list_len_4x4; ++j)
+ dst[zig_zag_4x4[j]] = src[j];
+ src += list_len_4x4;
+ dst += list_len_4x4;
+ }
+
+ src = &scaling->scaling_list_8x8[0][0];
+ for (i = 0; i < num_list_8x8; ++i) {
+ for (j = 0; j < list_len_8x8; ++j)
+ dst[zig_zag_8x8[j]] = src[j];
+ src += list_len_8x8;
+ dst += list_len_8x8;
+ }
+}
+
+/*
+ * dpb poc related registers table
+ */
+static const u32 poc_reg_tbl_top_field[16] = {
+ RKVDEC_REG_H264_POC_REFER0(0),
+ RKVDEC_REG_H264_POC_REFER0(2),
+ RKVDEC_REG_H264_POC_REFER0(4),
+ RKVDEC_REG_H264_POC_REFER0(6),
+ RKVDEC_REG_H264_POC_REFER0(8),
+ RKVDEC_REG_H264_POC_REFER0(10),
+ RKVDEC_REG_H264_POC_REFER0(12),
+ RKVDEC_REG_H264_POC_REFER0(14),
+ RKVDEC_REG_H264_POC_REFER1(1),
+ RKVDEC_REG_H264_POC_REFER1(3),
+ RKVDEC_REG_H264_POC_REFER1(5),
+ RKVDEC_REG_H264_POC_REFER1(7),
+ RKVDEC_REG_H264_POC_REFER1(9),
+ RKVDEC_REG_H264_POC_REFER1(11),
+ RKVDEC_REG_H264_POC_REFER1(13),
+ RKVDEC_REG_H264_POC_REFER2(0)
+};
+
+static const u32 poc_reg_tbl_bottom_field[16] = {
+ RKVDEC_REG_H264_POC_REFER0(1),
+ RKVDEC_REG_H264_POC_REFER0(3),
+ RKVDEC_REG_H264_POC_REFER0(5),
+ RKVDEC_REG_H264_POC_REFER0(7),
+ RKVDEC_REG_H264_POC_REFER0(9),
+ RKVDEC_REG_H264_POC_REFER0(11),
+ RKVDEC_REG_H264_POC_REFER0(13),
+ RKVDEC_REG_H264_POC_REFER1(0),
+ RKVDEC_REG_H264_POC_REFER1(2),
+ RKVDEC_REG_H264_POC_REFER1(4),
+ RKVDEC_REG_H264_POC_REFER1(6),
+ RKVDEC_REG_H264_POC_REFER1(8),
+ RKVDEC_REG_H264_POC_REFER1(10),
+ RKVDEC_REG_H264_POC_REFER1(12),
+ RKVDEC_REG_H264_POC_REFER1(14),
+ RKVDEC_REG_H264_POC_REFER2(1)
+};
+
+static struct vb2_buffer *
+get_ref_buf(struct rkvdec_ctx *ctx, struct rkvdec_h264_run *run,
+ unsigned int dpb_idx)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb;
+ struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
+ int buf_idx = -1;
+
+ if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ buf_idx = vb2_find_timestamp(cap_q,
+ dpb[dpb_idx].reference_ts, 0);
+
+ /*
+ * If a DPB entry is unused or invalid, address of current destination
+ * buffer is returned.
+ */
+ if (buf_idx < 0)
+ return &run->base.bufs.dst->vb2_buf;
+
+ return vb2_get_buffer(cap_q, buf_idx);
+}
+
+static void config_registers(struct rkvdec_ctx *ctx,
+ struct rkvdec_h264_run *run)
+{
+ struct rkvdec_dev *rkvdec = ctx->dev;
+ const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
+ const struct v4l2_ctrl_h264_sps *sps = run->sps;
+ const struct v4l2_h264_dpb_entry *dpb = dec_params->dpb;
+ struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
+ dma_addr_t priv_start_addr = h264_ctx->priv_tbl.dma;
+ const struct v4l2_pix_format_mplane *dst_fmt;
+ struct vb2_v4l2_buffer *src_buf = run->base.bufs.src;
+ struct vb2_v4l2_buffer *dst_buf = run->base.bufs.dst;
+ const struct v4l2_format *f;
+ dma_addr_t rlc_addr;
+ dma_addr_t refer_addr;
+ u32 rlc_len;
+ u32 hor_virstride = 0;
+ u32 ver_virstride = 0;
+ u32 y_virstride = 0;
+ u32 yuv_virstride = 0;
+ u32 offset;
+ dma_addr_t dst_addr;
+ u32 reg, i;
+
+ reg = RKVDEC_MODE(RKVDEC_MODE_H264);
+ writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_SYSCTRL);
+
+ f = &ctx->decoded_fmt;
+ dst_fmt = &f->fmt.pix_mp;
+ hor_virstride = (sps->bit_depth_luma_minus8 + 8) * dst_fmt->width / 8;
+ ver_virstride = round_up(dst_fmt->height, 16);
+ y_virstride = hor_virstride * ver_virstride;
+
+ if (sps->chroma_format_idc == 0)
+ yuv_virstride = y_virstride;
+ else if (sps->chroma_format_idc == 1)
+ yuv_virstride += y_virstride + y_virstride / 2;
+ else if (sps->chroma_format_idc == 2)
+ yuv_virstride += 2 * y_virstride;
+
+ reg = RKVDEC_Y_HOR_VIRSTRIDE(hor_virstride / 16) |
+ RKVDEC_UV_HOR_VIRSTRIDE(hor_virstride / 16) |
+ RKVDEC_SLICE_NUM_HIGHBIT |
+ RKVDEC_SLICE_NUM_LOWBITS(0x7ff);
+ writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_PICPAR);
+
+ /* config rlc base address */
+ rlc_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ writel_relaxed(rlc_addr, rkvdec->regs + RKVDEC_REG_STRM_RLC_BASE);
+ writel_relaxed(rlc_addr, rkvdec->regs + RKVDEC_REG_RLCWRITE_BASE);
+
+ rlc_len = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ reg = RKVDEC_STRM_LEN(rlc_len);
+ writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_STRM_LEN);
+
+ /* config cabac table */
+ offset = offsetof(struct rkvdec_h264_priv_tbl, cabac_table);
+ writel_relaxed(priv_start_addr + offset,
+ rkvdec->regs + RKVDEC_REG_CABACTBL_PROB_BASE);
+
+ /* config output base address */
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ writel_relaxed(dst_addr, rkvdec->regs + RKVDEC_REG_DECOUT_BASE);
+
+ reg = RKVDEC_Y_VIRSTRIDE(y_virstride / 16);
+ writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_Y_VIRSTRIDE);
+
+ reg = RKVDEC_YUV_VIRSTRIDE(yuv_virstride / 16);
+ writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_YUV_VIRSTRIDE);
+
+ /* config ref pic address & poc */
+ for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
+ struct vb2_buffer *vb_buf = get_ref_buf(ctx, run, i);
+
+ refer_addr = vb2_dma_contig_plane_dma_addr(vb_buf, 0) |
+ RKVDEC_COLMV_USED_FLAG_REF;
+
+ if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_FIELD))
+ refer_addr |= RKVDEC_TOPFIELD_USED_REF |
+ RKVDEC_BOTFIELD_USED_REF;
+ else if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_BOTTOM_FIELD)
+ refer_addr |= RKVDEC_BOTFIELD_USED_REF;
+ else
+ refer_addr |= RKVDEC_TOPFIELD_USED_REF;
+
+ writel_relaxed(dpb[i].top_field_order_cnt,
+ rkvdec->regs + poc_reg_tbl_top_field[i]);
+ writel_relaxed(dpb[i].bottom_field_order_cnt,
+ rkvdec->regs + poc_reg_tbl_bottom_field[i]);
+
+ if (i < V4L2_H264_NUM_DPB_ENTRIES - 1)
+ writel_relaxed(refer_addr,
+ rkvdec->regs + RKVDEC_REG_H264_BASE_REFER(i));
+ else
+ writel_relaxed(refer_addr,
+ rkvdec->regs + RKVDEC_REG_H264_BASE_REFER15);
+ }
+
+ /*
+ * Since support frame mode only
+ * top_field_order_cnt is the same as bottom_field_order_cnt
+ */
+ reg = RKVDEC_CUR_POC(dec_params->top_field_order_cnt);
+ writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_CUR_POC0);
+
+ reg = RKVDEC_CUR_POC(dec_params->bottom_field_order_cnt);
+ writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_CUR_POC1);
+
+ /* config hw pps address */
+ offset = offsetof(struct rkvdec_h264_priv_tbl, param_set);
+ writel_relaxed(priv_start_addr + offset,
+ rkvdec->regs + RKVDEC_REG_PPS_BASE);
+
+ /* config hw rps address */
+ offset = offsetof(struct rkvdec_h264_priv_tbl, rps);
+ writel_relaxed(priv_start_addr + offset,
+ rkvdec->regs + RKVDEC_REG_RPS_BASE);
+
+ reg = RKVDEC_AXI_DDR_RDATA(0);
+ writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_AXI_DDR_RDATA);
+
+ reg = RKVDEC_AXI_DDR_WDATA(0);
+ writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_AXI_DDR_WDATA);
+
+ offset = offsetof(struct rkvdec_h264_priv_tbl, err_info);
+ writel_relaxed(priv_start_addr + offset,
+ rkvdec->regs + RKVDEC_REG_H264_ERRINFO_BASE);
+}
+
+#define RKVDEC_H264_MAX_DEPTH_IN_BYTES 2
+
+static int rkvdec_h264_adjust_fmt(struct rkvdec_ctx *ctx,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *fmt = &f->fmt.pix_mp;
+
+ fmt->num_planes = 1;
+ fmt->plane_fmt[0].sizeimage = fmt->width * fmt->height *
+ RKVDEC_H264_MAX_DEPTH_IN_BYTES;
+ return 0;
+}
+
+static int rkvdec_h264_start(struct rkvdec_ctx *ctx)
+{
+ struct rkvdec_dev *rkvdec = ctx->dev;
+ struct rkvdec_h264_priv_tbl *priv_tbl;
+ struct rkvdec_h264_ctx *h264_ctx;
+ int ret;
+
+ h264_ctx = kzalloc(sizeof(*h264_ctx), GFP_KERNEL);
+ if (!h264_ctx)
+ return -ENOMEM;
+
+ priv_tbl = dma_alloc_coherent(rkvdec->dev, sizeof(*priv_tbl),
+ &h264_ctx->priv_tbl.dma, GFP_KERNEL);
+ if (!priv_tbl) {
+ ret = -ENOMEM;
+ goto err_free_ctx;
+ }
+
+ h264_ctx->priv_tbl.size = sizeof(*priv_tbl);
+ h264_ctx->priv_tbl.cpu = priv_tbl;
+ memcpy(priv_tbl->cabac_table, rkvdec_h264_cabac_table,
+ sizeof(rkvdec_h264_cabac_table));
+
+ ctx->priv = h264_ctx;
+ return 0;
+
+err_free_ctx:
+ kfree(h264_ctx);
+ return ret;
+}
+
+static void rkvdec_h264_stop(struct rkvdec_ctx *ctx)
+{
+ struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
+ struct rkvdec_dev *rkvdec = ctx->dev;
+
+ dma_free_coherent(rkvdec->dev, h264_ctx->priv_tbl.size,
+ h264_ctx->priv_tbl.cpu, h264_ctx->priv_tbl.dma);
+ kfree(h264_ctx);
+}
+
+static void rkvdec_h264_run_preamble(struct rkvdec_ctx *ctx,
+ struct rkvdec_h264_run *run)
+{
+ struct v4l2_ctrl *ctrl;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS);
+ run->decode_params = ctrl ? ctrl->p_cur.p : NULL;
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS);
+ run->slices_params = ctrl ? ctrl->p_cur.p : NULL;
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_MPEG_VIDEO_H264_SPS);
+ run->sps = ctrl ? ctrl->p_cur.p : NULL;
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_MPEG_VIDEO_H264_PPS);
+ run->pps = ctrl ? ctrl->p_cur.p : NULL;
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX);
+ run->scaling_matrix = ctrl ? ctrl->p_cur.p : NULL;
+
+ rkvdec_run_preamble(ctx, &run->base);
+}
+
+static int rkvdec_h264_run(struct rkvdec_ctx *ctx)
+{
+ struct v4l2_h264_reflist_builder reflist_builder;
+ struct rkvdec_dev *rkvdec = ctx->dev;
+ struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
+ struct rkvdec_h264_run run;
+
+ rkvdec_h264_run_preamble(ctx, &run);
+
+ /* Build the P/B{0,1} ref lists. */
+ v4l2_h264_init_reflist_builder(&reflist_builder, run.decode_params,
+ &run.slices_params[0], run.sps,
+ run.decode_params->dpb);
+ h264_ctx->reflists.num_valid = reflist_builder.num_valid;
+ v4l2_h264_build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
+ v4l2_h264_build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
+ h264_ctx->reflists.b1);
+
+ reorder_scaling_list(ctx, &run);
+ assemble_hw_pps(ctx, &run);
+ assemble_hw_rps(ctx, &run);
+ config_registers(ctx, &run);
+
+ rkvdec_run_postamble(ctx, &run.base);
+
+ schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
+
+ writel(0xffffffff, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
+ writel(0xffffffff, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
+ writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
+ writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
+
+ /* Start decoding! */
+ writel(RKVDEC_INTERRUPT_DEC_E | RKVDEC_CONFIG_DEC_CLK_GATE_E |
+ RKVDEC_TIMEOUT_E | RKVDEC_BUF_EMPTY_E,
+ rkvdec->regs + RKVDEC_REG_INTERRUPT);
+
+ return 0;
+}
+
+const struct rkvdec_coded_fmt_ops rkvdec_h264_fmt_ops = {
+ .adjust_fmt = rkvdec_h264_adjust_fmt,
+ .start = rkvdec_h264_start,
+ .stop = rkvdec_h264_stop,
+ .run = rkvdec_h264_run,
+};
diff --git a/drivers/staging/media/rkvdec/rkvdec-regs.h b/drivers/staging/media/rkvdec/rkvdec-regs.h
new file mode 100644
index 000000000000..15b9bee92016
--- /dev/null
+++ b/drivers/staging/media/rkvdec/rkvdec-regs.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef RKVDEC_REGS_H_
+#define RKVDEC_REGS_H_
+
+/* rkvcodec registers */
+#define RKVDEC_REG_INTERRUPT 0x004
+#define RKVDEC_INTERRUPT_DEC_E BIT(0)
+#define RKVDEC_CONFIG_DEC_CLK_GATE_E BIT(1)
+#define RKVDEC_E_STRMD_CLKGATE_DIS BIT(2)
+#define RKVDEC_TIMEOUT_MODE BIT(3)
+#define RKVDEC_IRQ_DIS BIT(4)
+#define RKVDEC_TIMEOUT_E BIT(5)
+#define RKVDEC_BUF_EMPTY_E BIT(6)
+#define RKVDEC_STRM_E_WAITDECFIFO_EMPTY BIT(7)
+#define RKVDEC_IRQ BIT(8)
+#define RKVDEC_IRQ_RAW BIT(9)
+#define RKVDEC_E_REWRITE_VALID BIT(10)
+#define RKVDEC_COMMONIRQ_MODE BIT(11)
+#define RKVDEC_RDY_STA BIT(12)
+#define RKVDEC_BUS_STA BIT(13)
+#define RKVDEC_ERR_STA BIT(14)
+#define RKVDEC_TIMEOUT_STA BIT(15)
+#define RKVDEC_BUF_EMPTY_STA BIT(16)
+#define RKVDEC_COLMV_REF_ERR_STA BIT(17)
+#define RKVDEC_CABU_END_STA BIT(18)
+#define RKVDEC_H264ORVP9_ERR_MODE BIT(19)
+#define RKVDEC_SOFTRST_EN_P BIT(20)
+#define RKVDEC_FORCE_SOFTRESET_VALID BIT(21)
+#define RKVDEC_SOFTRESET_RDY BIT(22)
+
+#define RKVDEC_REG_SYSCTRL 0x008
+#define RKVDEC_IN_ENDIAN BIT(0)
+#define RKVDEC_IN_SWAP32_E BIT(1)
+#define RKVDEC_IN_SWAP64_E BIT(2)
+#define RKVDEC_STR_ENDIAN BIT(3)
+#define RKVDEC_STR_SWAP32_E BIT(4)
+#define RKVDEC_STR_SWAP64_E BIT(5)
+#define RKVDEC_OUT_ENDIAN BIT(6)
+#define RKVDEC_OUT_SWAP32_E BIT(7)
+#define RKVDEC_OUT_CBCR_SWAP BIT(8)
+#define RKVDEC_RLC_MODE_DIRECT_WRITE BIT(10)
+#define RKVDEC_RLC_MODE BIT(11)
+#define RKVDEC_STRM_START_BIT(x) (((x) & 0x7f) << 12)
+#define RKVDEC_MODE(x) (((x) & 0x03) << 20)
+#define RKVDEC_MODE_H264 1
+#define RKVDEC_MODE_VP9 2
+#define RKVDEC_RPS_MODE BIT(24)
+#define RKVDEC_STRM_MODE BIT(25)
+#define RKVDEC_H264_STRM_LASTPKT BIT(26)
+#define RKVDEC_H264_FIRSTSLICE_FLAG BIT(27)
+#define RKVDEC_H264_FRAME_ORSLICE BIT(28)
+#define RKVDEC_BUSPR_SLOT_DIS BIT(29)
+
+#define RKVDEC_REG_PICPAR 0x00C
+#define RKVDEC_Y_HOR_VIRSTRIDE(x) ((x) & 0x1ff)
+#define RKVDEC_SLICE_NUM_HIGHBIT BIT(11)
+#define RKVDEC_UV_HOR_VIRSTRIDE(x) (((x) & 0x1ff) << 12)
+#define RKVDEC_SLICE_NUM_LOWBITS(x) (((x) & 0x7ff) << 21)
+
+#define RKVDEC_REG_STRM_RLC_BASE 0x010
+
+#define RKVDEC_REG_STRM_LEN 0x014
+#define RKVDEC_STRM_LEN(x) ((x) & 0x7ffffff)
+
+#define RKVDEC_REG_CABACTBL_PROB_BASE 0x018
+#define RKVDEC_REG_DECOUT_BASE 0x01C
+
+#define RKVDEC_REG_Y_VIRSTRIDE 0x020
+#define RKVDEC_Y_VIRSTRIDE(x) ((x) & 0xfffff)
+
+#define RKVDEC_REG_YUV_VIRSTRIDE 0x024
+#define RKVDEC_YUV_VIRSTRIDE(x) ((x) & 0x1fffff)
+#define RKVDEC_REG_H264_BASE_REFER(i) (((i) * 0x04) + 0x028)
+
+#define RKVDEC_REG_H264_BASE_REFER15 0x0C0
+#define RKVDEC_FIELD_REF BIT(0)
+#define RKVDEC_TOPFIELD_USED_REF BIT(1)
+#define RKVDEC_BOTFIELD_USED_REF BIT(2)
+#define RKVDEC_COLMV_USED_FLAG_REF BIT(3)
+
+#define RKVDEC_REG_VP9_LAST_FRAME_BASE 0x02c
+#define RKVDEC_REG_VP9_GOLDEN_FRAME_BASE 0x030
+#define RKVDEC_REG_VP9_ALTREF_FRAME_BASE 0x034
+
+#define RKVDEC_REG_VP9_CPRHEADER_OFFSET 0x028
+#define RKVDEC_VP9_CPRHEADER_OFFSET(x) ((x) & 0xffff)
+
+#define RKVDEC_REG_VP9_REFERLAST_BASE 0x02C
+#define RKVDEC_REG_VP9_REFERGOLDEN_BASE 0x030
+#define RKVDEC_REG_VP9_REFERALFTER_BASE 0x034
+
+#define RKVDEC_REG_VP9COUNT_BASE 0x038
+#define RKVDEC_VP9COUNT_UPDATE_EN BIT(0)
+
+#define RKVDEC_REG_VP9_SEGIDLAST_BASE 0x03C
+#define RKVDEC_REG_VP9_SEGIDCUR_BASE 0x040
+#define RKVDEC_REG_VP9_FRAME_SIZE(i) ((i) * 0x04 + 0x044)
+#define RKVDEC_VP9_FRAMEWIDTH(x) (((x) & 0xffff) << 0)
+#define RKVDEC_VP9_FRAMEHEIGHT(x) (((x) & 0xffff) << 16)
+
+#define RKVDEC_VP9_SEGID_GRP(i) ((i) * 0x04 + 0x050)
+#define RKVDEC_SEGID_ABS_DELTA(x) ((x) & 0x1)
+#define RKVDEC_SEGID_FRAME_QP_DELTA_EN(x) (((x) & 0x1) << 1)
+#define RKVDEC_SEGID_FRAME_QP_DELTA(x) (((x) & 0x1ff) << 2)
+#define RKVDEC_SEGID_FRAME_LOOPFILTER_VALUE_EN(x) (((x) & 0x1) << 11)
+#define RKVDEC_SEGID_FRAME_LOOPFILTER_VALUE(x) (((x) & 0x7f) << 12)
+#define RKVDEC_SEGID_REFERINFO_EN(x) (((x) & 0x1) << 19)
+#define RKVDEC_SEGID_REFERINFO(x) (((x) & 0x03) << 20)
+#define RKVDEC_SEGID_FRAME_SKIP_EN(x) (((x) & 0x1) << 22)
+
+#define RKVDEC_VP9_CPRHEADER_CONFIG 0x070
+#define RKVDEC_VP9_TX_MODE(x) ((x) & 0x07)
+#define RKVDEC_VP9_FRAME_REF_MODE(x) (((x) & 0x03) << 3)
+
+#define RKVDEC_VP9_REF_SCALE(i) ((i) * 0x04 + 0x074)
+#define RKVDEC_VP9_REF_HOR_SCALE(x) ((x) & 0xffff)
+#define RKVDEC_VP9_REF_VER_SCALE(x) (((x) & 0xffff) << 16)
+
+#define RKVDEC_VP9_REF_DELTAS_LASTFRAME 0x080
+#define RKVDEC_REF_DELTAS_LASTFRAME(pos, val) (((val) & 0x7f) << ((pos) * 7))
+
+#define RKVDEC_VP9_INFO_LASTFRAME 0x084
+#define RKVDEC_MODE_DELTAS_LASTFRAME(pos, val) (((val) & 0x7f) << ((pos) * 7))
+#define RKVDEC_SEG_EN_LASTFRAME BIT(16)
+#define RKVDEC_LAST_SHOW_FRAME BIT(17)
+#define RKVDEC_LAST_INTRA_ONLY BIT(18)
+#define RKVDEC_LAST_WIDHHEIGHT_EQCUR BIT(19)
+#define RKVDEC_COLOR_SPACE_LASTKEYFRAME(x) (((x) & 0x07) << 20)
+
+#define RKVDEC_VP9_INTERCMD_BASE 0x088
+
+#define RKVDEC_VP9_INTERCMD_NUM 0x08C
+#define RKVDEC_INTERCMD_NUM(x) ((x) & 0xffffff)
+
+#define RKVDEC_VP9_LASTTILE_SIZE 0x090
+#define RKVDEC_LASTTILE_SIZE(x) ((x) & 0xffffff)
+
+#define RKVDEC_VP9_HOR_VIRSTRIDE(i) ((i) * 0x04 + 0x094)
+#define RKVDEC_HOR_Y_VIRSTRIDE(x) ((x) & 0x1ff)
+#define RKVDEC_HOR_UV_VIRSTRIDE(x) (((x) & 0x1ff) << 16)
+
+#define RKVDEC_REG_H264_POC_REFER0(i) (((i) * 0x04) + 0x064)
+#define RKVDEC_REG_H264_POC_REFER1(i) (((i) * 0x04) + 0x0C4)
+#define RKVDEC_REG_H264_POC_REFER2(i) (((i) * 0x04) + 0x120)
+#define RKVDEC_POC_REFER(x) ((x) & 0xffffffff)
+
+#define RKVDEC_REG_CUR_POC0 0x0A0
+#define RKVDEC_REG_CUR_POC1 0x128
+#define RKVDEC_CUR_POC(x) ((x) & 0xffffffff)
+
+#define RKVDEC_REG_RLCWRITE_BASE 0x0A4
+#define RKVDEC_REG_PPS_BASE 0x0A8
+#define RKVDEC_REG_RPS_BASE 0x0AC
+
+#define RKVDEC_REG_STRMD_ERR_EN 0x0B0
+#define RKVDEC_STRMD_ERR_EN(x) ((x) & 0xffffffff)
+
+#define RKVDEC_REG_STRMD_ERR_STA 0x0B4
+#define RKVDEC_STRMD_ERR_STA(x) ((x) & 0xfffffff)
+#define RKVDEC_COLMV_ERR_REF_PICIDX(x) (((x) & 0x0f) << 28)
+
+#define RKVDEC_REG_STRMD_ERR_CTU 0x0B8
+#define RKVDEC_STRMD_ERR_CTU(x) ((x) & 0xff)
+#define RKVDEC_STRMD_ERR_CTU_YOFFSET(x) (((x) & 0xff) << 8)
+#define RKVDEC_STRMFIFO_SPACE2FULL(x) (((x) & 0x7f) << 16)
+#define RKVDEC_VP9_ERR_EN_CTU0 BIT(24)
+
+#define RKVDEC_REG_SAO_CTU_POS 0x0BC
+#define RKVDEC_SAOWR_XOFFSET(x) ((x) & 0x1ff)
+#define RKVDEC_SAOWR_YOFFSET(x) (((x) & 0x3ff) << 16)
+
+#define RKVDEC_VP9_LAST_FRAME_YSTRIDE 0x0C0
+#define RKVDEC_VP9_GOLDEN_FRAME_YSTRIDE 0x0C4
+#define RKVDEC_VP9_ALTREF_FRAME_YSTRIDE 0x0C8
+#define RKVDEC_VP9_REF_YSTRIDE(x) (((x) & 0xfffff) << 0)
+
+#define RKVDEC_VP9_LAST_FRAME_YUVSTRIDE 0x0CC
+#define RKVDEC_VP9_REF_YUVSTRIDE(x) (((x) & 0x1fffff) << 0)
+
+#define RKVDEC_VP9_REF_COLMV_BASE 0x0D0
+
+#define RKVDEC_REG_PERFORMANCE_CYCLE 0x100
+#define RKVDEC_PERFORMANCE_CYCLE(x) ((x) & 0xffffffff)
+
+#define RKVDEC_REG_AXI_DDR_RDATA 0x104
+#define RKVDEC_AXI_DDR_RDATA(x) ((x) & 0xffffffff)
+
+#define RKVDEC_REG_AXI_DDR_WDATA 0x108
+#define RKVDEC_AXI_DDR_WDATA(x) ((x) & 0xffffffff)
+
+#define RKVDEC_REG_FPGADEBUG_RESET 0x10C
+#define RKVDEC_BUSIFD_RESETN BIT(0)
+#define RKVDEC_CABAC_RESETN BIT(1)
+#define RKVDEC_DEC_CTRL_RESETN BIT(2)
+#define RKVDEC_TRANSD_RESETN BIT(3)
+#define RKVDEC_INTRA_RESETN BIT(4)
+#define RKVDEC_INTER_RESETN BIT(5)
+#define RKVDEC_RECON_RESETN BIT(6)
+#define RKVDEC_FILER_RESETN BIT(7)
+
+#define RKVDEC_REG_PERFORMANCE_SEL 0x110
+#define RKVDEC_PERF_SEL_CNT0(x) ((x) & 0x3f)
+#define RKVDEC_PERF_SEL_CNT1(x) (((x) & 0x3f) << 8)
+#define RKVDEC_PERF_SEL_CNT2(x) (((x) & 0x3f) << 16)
+
+#define RKVDEC_REG_PERFORMANCE_CNT(i) ((i) * 0x04 + 0x114)
+#define RKVDEC_PERF_CNT(x) ((x) & 0xffffffff)
+
+#define RKVDEC_REG_H264_ERRINFO_BASE 0x12C
+
+#define RKVDEC_REG_H264_ERRINFO_NUM 0x130
+#define RKVDEC_SLICEDEC_NUM(x) ((x) & 0x3fff)
+#define RKVDEC_STRMD_DECT_ERR_FLAG BIT(15)
+#define RKVDEC_ERR_PKT_NUM(x) (((x) & 0x3fff) << 16)
+
+#define RKVDEC_REG_H264_ERR_E 0x134
+#define RKVDEC_H264_ERR_EN_HIGHBITS(x) ((x) & 0x3fffffff)
+
+#define RKVDEC_REG_PREF_LUMA_CACHE_COMMAND 0x410
+#define RKVDEC_REG_PREF_CHR_CACHE_COMMAND 0x450
+
+#endif /* RKVDEC_REGS_H_ */
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
new file mode 100644
index 000000000000..225eeca73356
--- /dev/null
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -0,0 +1,1103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip Video Decoder driver
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on rkvdec driver by Google LLC. (Tomasz Figa <tfiga@chromium.org>)
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "rkvdec.h"
+#include "rkvdec-regs.h"
+
+static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
+ {
+ .per_request = true,
+ .mandatory = true,
+ .cfg.id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS,
+ },
+ {
+ .per_request = true,
+ .mandatory = true,
+ .cfg.id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS,
+ },
+ {
+ .per_request = true,
+ .mandatory = true,
+ .cfg.id = V4L2_CID_MPEG_VIDEO_H264_SPS,
+ },
+ {
+ .per_request = true,
+ .mandatory = true,
+ .cfg.id = V4L2_CID_MPEG_VIDEO_H264_PPS,
+ },
+ {
+ .per_request = true,
+ .mandatory = true,
+ .cfg.id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
+ },
+ {
+ .mandatory = true,
+ .cfg.id = V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE,
+ .cfg.min = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
+ .cfg.max = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
+ .cfg.def = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
+ },
+ {
+ .mandatory = true,
+ .cfg.id = V4L2_CID_MPEG_VIDEO_H264_START_CODE,
+ .cfg.min = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
+ .cfg.def = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
+ .cfg.max = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
+ },
+};
+
+static const struct rkvdec_ctrls rkvdec_h264_ctrls = {
+ .ctrls = rkvdec_h264_ctrl_descs,
+ .num_ctrls = ARRAY_SIZE(rkvdec_h264_ctrl_descs),
+};
+
+static const u32 rkvdec_h264_decoded_fmts[] = {
+ V4L2_PIX_FMT_NV12,
+};
+
+static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 4096,
+ .step_width = 16,
+ .min_height = 48,
+ .max_height = 2304,
+ .step_height = 16,
+ },
+ .ctrls = &rkvdec_h264_ctrls,
+ .ops = &rkvdec_h264_fmt_ops,
+ .num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_decoded_fmts),
+ .decoded_fmts = rkvdec_h264_decoded_fmts,
+ }
+};
+
+static const struct rkvdec_coded_fmt_desc *
+rkvdec_find_coded_fmt_desc(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) {
+ if (rkvdec_coded_fmts[i].fourcc == fourcc)
+ return &rkvdec_coded_fmts[i];
+ }
+
+ return NULL;
+}
+
+static void rkvdec_reset_fmt(struct rkvdec_ctx *ctx, struct v4l2_format *f,
+ u32 fourcc)
+{
+ memset(f, 0, sizeof(*f));
+ f->fmt.pix_mp.pixelformat = fourcc;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709,
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static void rkvdec_reset_coded_fmt(struct rkvdec_ctx *ctx)
+{
+ struct v4l2_format *f = &ctx->coded_fmt;
+
+ ctx->coded_fmt_desc = &rkvdec_coded_fmts[0];
+ rkvdec_reset_fmt(ctx, f, ctx->coded_fmt_desc->fourcc);
+
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ f->fmt.pix_mp.width = ctx->coded_fmt_desc->frmsize.min_width;
+ f->fmt.pix_mp.height = ctx->coded_fmt_desc->frmsize.min_height;
+
+ if (ctx->coded_fmt_desc->ops->adjust_fmt)
+ ctx->coded_fmt_desc->ops->adjust_fmt(ctx, f);
+}
+
+static void rkvdec_reset_decoded_fmt(struct rkvdec_ctx *ctx)
+{
+ struct v4l2_format *f = &ctx->decoded_fmt;
+
+ rkvdec_reset_fmt(ctx, f, ctx->coded_fmt_desc->decoded_fmts[0]);
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ v4l2_fill_pixfmt_mp(&f->fmt.pix_mp,
+ ctx->coded_fmt_desc->decoded_fmts[0],
+ ctx->coded_fmt.fmt.pix_mp.width,
+ ctx->coded_fmt.fmt.pix_mp.height);
+ f->fmt.pix_mp.plane_fmt[0].sizeimage += 128 *
+ DIV_ROUND_UP(f->fmt.pix_mp.width, 16) *
+ DIV_ROUND_UP(f->fmt.pix_mp.height, 16);
+}
+
+static int rkvdec_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct rkvdec_coded_fmt_desc *fmt;
+
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ fmt = rkvdec_find_coded_fmt_desc(fsize->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = fmt->frmsize;
+ return 0;
+}
+
+static int rkvdec_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct rkvdec_dev *rkvdec = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ strscpy(cap->driver, rkvdec->dev->driver->name,
+ sizeof(cap->driver));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ rkvdec->dev->driver->name);
+ return 0;
+}
+
+static int rkvdec_try_capture_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ const struct rkvdec_coded_fmt_desc *coded_desc;
+ unsigned int i;
+
+ /*
+ * The codec context should point to a coded format desc, if the format
+ * on the coded end has not been set yet, it should point to the
+ * default value.
+ */
+ coded_desc = ctx->coded_fmt_desc;
+ if (WARN_ON(!coded_desc))
+ return -EINVAL;
+
+ for (i = 0; i < coded_desc->num_decoded_fmts; i++) {
+ if (coded_desc->decoded_fmts[i] == pix_mp->pixelformat)
+ break;
+ }
+
+ if (i == coded_desc->num_decoded_fmts)
+ pix_mp->pixelformat = coded_desc->decoded_fmts[0];
+
+ /* Always apply the frmsize constraint of the coded end. */
+ v4l2_apply_frmsize_constraints(&pix_mp->width,
+ &pix_mp->height,
+ &coded_desc->frmsize);
+
+ v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat,
+ pix_mp->width, pix_mp->height);
+ pix_mp->plane_fmt[0].sizeimage +=
+ 128 *
+ DIV_ROUND_UP(pix_mp->width, 16) *
+ DIV_ROUND_UP(pix_mp->height, 16);
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int rkvdec_try_output_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ const struct rkvdec_coded_fmt_desc *desc;
+
+ desc = rkvdec_find_coded_fmt_desc(pix_mp->pixelformat);
+ if (!desc) {
+ pix_mp->pixelformat = rkvdec_coded_fmts[0].fourcc;
+ desc = &rkvdec_coded_fmts[0];
+ }
+
+ v4l2_apply_frmsize_constraints(&pix_mp->width,
+ &pix_mp->height,
+ &desc->frmsize);
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ /* All coded formats are considered single planar for now. */
+ pix_mp->num_planes = 1;
+
+ if (desc->ops->adjust_fmt) {
+ int ret;
+
+ ret = desc->ops->adjust_fmt(ctx, f);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rkvdec_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f,
+ int (*try_fmt)(struct file *, void *,
+ struct v4l2_format *))
+{
+ struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct vb2_queue *vq;
+
+ if (!try_fmt)
+ return -EINVAL;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ return try_fmt(file, priv, f);
+}
+
+static int rkvdec_s_capture_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ int ret;
+
+ ret = rkvdec_s_fmt(file, priv, f, rkvdec_try_capture_fmt);
+ if (ret)
+ return ret;
+
+ ctx->decoded_fmt = *f;
+ return 0;
+}
+
+static int rkvdec_s_output_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ const struct rkvdec_coded_fmt_desc *desc;
+ struct v4l2_format *cap_fmt;
+ struct vb2_queue *peer_vq;
+ int ret;
+
+ /*
+ * Since format change on the OUTPUT queue will reset the CAPTURE
+ * queue, we can't allow doing so when the CAPTURE queue has buffers
+ * allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(peer_vq))
+ return -EBUSY;
+
+ ret = rkvdec_s_fmt(file, priv, f, rkvdec_try_output_fmt);
+ if (ret)
+ return ret;
+
+ desc = rkvdec_find_coded_fmt_desc(f->fmt.pix_mp.pixelformat);
+ if (!desc)
+ return -EINVAL;
+ ctx->coded_fmt_desc = desc;
+ ctx->coded_fmt = *f;
+
+ /*
+ * Current decoded format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the decoded format again after we return, so we don't need
+ * anything smarter.
+ *
+ * Note that this will propagates any size changes to the decoded format.
+ */
+ rkvdec_reset_decoded_fmt(ctx);
+
+ /* Propagate colorspace information to capture. */
+ cap_fmt = &ctx->decoded_fmt;
+ cap_fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ cap_fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ cap_fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ cap_fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ return 0;
+}
+
+static int rkvdec_g_output_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+
+ *f = ctx->coded_fmt;
+ return 0;
+}
+
+static int rkvdec_g_capture_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+
+ *f = ctx->decoded_fmt;
+ return 0;
+}
+
+static int rkvdec_enum_output_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(rkvdec_coded_fmts))
+ return -EINVAL;
+
+ f->pixelformat = rkvdec_coded_fmts[f->index].fourcc;
+ return 0;
+}
+
+static int rkvdec_enum_capture_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+
+ if (WARN_ON(!ctx->coded_fmt_desc))
+ return -EINVAL;
+
+ if (f->index >= ctx->coded_fmt_desc->num_decoded_fmts)
+ return -EINVAL;
+
+ f->pixelformat = ctx->coded_fmt_desc->decoded_fmts[f->index];
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops rkvdec_ioctl_ops = {
+ .vidioc_querycap = rkvdec_querycap,
+ .vidioc_enum_framesizes = rkvdec_enum_framesizes,
+
+ .vidioc_try_fmt_vid_cap_mplane = rkvdec_try_capture_fmt,
+ .vidioc_try_fmt_vid_out_mplane = rkvdec_try_output_fmt,
+ .vidioc_s_fmt_vid_out_mplane = rkvdec_s_output_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = rkvdec_s_capture_fmt,
+ .vidioc_g_fmt_vid_out_mplane = rkvdec_g_output_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = rkvdec_g_capture_fmt,
+ .vidioc_enum_fmt_vid_out = rkvdec_enum_output_fmt,
+ .vidioc_enum_fmt_vid_cap = rkvdec_enum_capture_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static int rkvdec_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_format *f;
+ unsigned int i;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ f = &ctx->coded_fmt;
+ else
+ f = &ctx->decoded_fmt;
+
+ if (*num_planes) {
+ if (*num_planes != f->fmt.pix_mp.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *num_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++)
+ sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static int rkvdec_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_format *f;
+ unsigned int i;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ f = &ctx->coded_fmt;
+ else
+ f = &ctx->decoded_fmt;
+
+ for (i = 0; i < f->fmt.pix_mp.num_planes; ++i) {
+ u32 sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < sizeimage)
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
+ return 0;
+}
+
+static void rkvdec_buf_queue(struct vb2_buffer *vb)
+{
+ struct rkvdec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int rkvdec_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static void rkvdec_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct rkvdec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_hdl);
+}
+
+static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct rkvdec_ctx *ctx = vb2_get_drv_priv(q);
+ const struct rkvdec_coded_fmt_desc *desc;
+ int ret;
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ return 0;
+
+ desc = ctx->coded_fmt_desc;
+ if (WARN_ON(!desc))
+ return -EINVAL;
+
+ if (desc->ops->start) {
+ ret = desc->ops->start(ctx);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rkvdec_queue_cleanup(struct vb2_queue *vq, u32 state)
+{
+ struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq);
+
+ while (true) {
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (!vbuf)
+ break;
+
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->ctrl_hdl);
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
+
+static void rkvdec_stop_streaming(struct vb2_queue *q)
+{
+ struct rkvdec_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc;
+
+ if (WARN_ON(!desc))
+ return;
+
+ if (desc->ops->stop)
+ desc->ops->stop(ctx);
+ }
+
+ rkvdec_queue_cleanup(q, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops rkvdec_queue_ops = {
+ .queue_setup = rkvdec_queue_setup,
+ .buf_prepare = rkvdec_buf_prepare,
+ .buf_queue = rkvdec_buf_queue,
+ .buf_out_validate = rkvdec_buf_out_validate,
+ .buf_request_complete = rkvdec_buf_request_complete,
+ .start_streaming = rkvdec_start_streaming,
+ .stop_streaming = rkvdec_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int rkvdec_request_validate(struct media_request *req)
+{
+ struct media_request_object *obj;
+ const struct rkvdec_ctrls *ctrls;
+ struct v4l2_ctrl_handler *hdl;
+ struct rkvdec_ctx *ctx = NULL;
+ unsigned int count, i;
+ int ret;
+
+ list_for_each_entry(obj, &req->objects, list) {
+ if (vb2_request_object_is_buffer(obj)) {
+ struct vb2_buffer *vb;
+
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ ctx = vb2_get_drv_priv(vb->vb2_queue);
+ break;
+ }
+ }
+
+ if (!ctx)
+ return -EINVAL;
+
+ count = vb2_request_buffer_cnt(req);
+ if (!count)
+ return -ENOENT;
+ else if (count > 1)
+ return -EINVAL;
+
+ hdl = v4l2_ctrl_request_hdl_find(req, &ctx->ctrl_hdl);
+ if (!hdl)
+ return -ENOENT;
+
+ ret = 0;
+ ctrls = ctx->coded_fmt_desc->ctrls;
+ for (i = 0; ctrls && i < ctrls->num_ctrls; i++) {
+ u32 id = ctrls->ctrls[i].cfg.id;
+ struct v4l2_ctrl *ctrl;
+
+ if (!ctrls->ctrls[i].per_request || !ctrls->ctrls[i].mandatory)
+ continue;
+
+ ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, id);
+ if (!ctrl) {
+ ret = -ENOENT;
+ break;
+ }
+ }
+
+ v4l2_ctrl_request_hdl_put(hdl);
+
+ if (ret)
+ return ret;
+
+ return vb2_request_validate(req);
+}
+
+static const struct media_device_ops rkvdec_media_ops = {
+ .req_validate = rkvdec_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+static void rkvdec_job_finish_no_pm(struct rkvdec_ctx *ctx,
+ enum vb2_buffer_state result)
+{
+ if (ctx->coded_fmt_desc->ops->done) {
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ ctx->coded_fmt_desc->ops->done(ctx, src_buf, dst_buf, result);
+ }
+
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ result);
+}
+
+static void rkvdec_job_finish(struct rkvdec_ctx *ctx,
+ enum vb2_buffer_state result)
+{
+ struct rkvdec_dev *rkvdec = ctx->dev;
+
+ pm_runtime_mark_last_busy(rkvdec->dev);
+ pm_runtime_put_autosuspend(rkvdec->dev);
+ rkvdec_job_finish_no_pm(ctx, result);
+}
+
+void rkvdec_run_preamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run)
+{
+ struct media_request *src_req;
+
+ memset(run, 0, sizeof(*run));
+
+ run->bufs.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ run->bufs.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Apply request(s) controls if needed. */
+ src_req = run->bufs.src->vb2_buf.req_obj.req;
+ if (src_req)
+ v4l2_ctrl_request_setup(src_req, &ctx->ctrl_hdl);
+
+ v4l2_m2m_buf_copy_metadata(run->bufs.src, run->bufs.dst, true);
+}
+
+void rkvdec_run_postamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run)
+{
+ struct media_request *src_req = run->bufs.src->vb2_buf.req_obj.req;
+
+ if (src_req)
+ v4l2_ctrl_request_complete(src_req, &ctx->ctrl_hdl);
+}
+
+static void rkvdec_device_run(void *priv)
+{
+ struct rkvdec_ctx *ctx = priv;
+ struct rkvdec_dev *rkvdec = ctx->dev;
+ const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc;
+ int ret;
+
+ if (WARN_ON(!desc))
+ return;
+
+ ret = pm_runtime_get_sync(rkvdec->dev);
+ if (ret < 0) {
+ rkvdec_job_finish_no_pm(ctx, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ ret = desc->ops->run(ctx);
+ if (ret)
+ rkvdec_job_finish(ctx, VB2_BUF_STATE_ERROR);
+}
+
+static struct v4l2_m2m_ops rkvdec_m2m_ops = {
+ .device_run = rkvdec_device_run,
+};
+
+static int rkvdec_queue_init(void *priv,
+ struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct rkvdec_ctx *ctx = priv;
+ struct rkvdec_dev *rkvdec = ctx->dev;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &rkvdec_queue_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ /*
+ * Driver does mostly sequential access, so sacrifice TLB efficiency
+ * for faster allocation. Also, no CPU access on the source queue,
+ * so no kernel mapping needed.
+ */
+ src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &rkvdec->vdev_lock;
+ src_vq->dev = rkvdec->v4l2_dev.dev;
+ src_vq->supports_requests = true;
+ src_vq->requires_requests = true;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->bidirectional = true;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &rkvdec_queue_ops;
+ dst_vq->buf_struct_size = sizeof(struct rkvdec_decoded_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &rkvdec->vdev_lock;
+ dst_vq->dev = rkvdec->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int rkvdec_add_ctrls(struct rkvdec_ctx *ctx,
+ const struct rkvdec_ctrls *ctrls)
+{
+ unsigned int i;
+
+ for (i = 0; i < ctrls->num_ctrls; i++) {
+ const struct v4l2_ctrl_config *cfg = &ctrls->ctrls[i].cfg;
+
+ v4l2_ctrl_new_custom(&ctx->ctrl_hdl, cfg, ctx);
+ if (ctx->ctrl_hdl.error)
+ return ctx->ctrl_hdl.error;
+ }
+
+ return 0;
+}
+
+static int rkvdec_init_ctrls(struct rkvdec_ctx *ctx)
+{
+ unsigned int i, nctrls = 0;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++)
+ nctrls += rkvdec_coded_fmts[i].ctrls->num_ctrls;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, nctrls);
+
+ for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) {
+ ret = rkvdec_add_ctrls(ctx, rkvdec_coded_fmts[i].ctrls);
+ if (ret)
+ goto err_free_handler;
+ }
+
+ ret = v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+ if (ret)
+ goto err_free_handler;
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
+ return 0;
+
+err_free_handler:
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ return ret;
+}
+
+static int rkvdec_open(struct file *filp)
+{
+ struct rkvdec_dev *rkvdec = video_drvdata(filp);
+ struct rkvdec_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = rkvdec;
+ rkvdec_reset_coded_fmt(ctx);
+ rkvdec_reset_decoded_fmt(ctx);
+ v4l2_fh_init(&ctx->fh, video_devdata(filp));
+
+ ret = rkvdec_init_ctrls(ctx);
+ if (ret)
+ goto err_free_ctx;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rkvdec->m2m_dev, ctx,
+ rkvdec_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_cleanup_ctrls;
+ }
+
+ filp->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ return 0;
+
+err_cleanup_ctrls:
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+err_free_ctx:
+ kfree(ctx);
+ return ret;
+}
+
+static int rkvdec_release(struct file *filp)
+{
+ struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(filp->private_data);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations rkvdec_fops = {
+ .owner = THIS_MODULE,
+ .open = rkvdec_open,
+ .release = rkvdec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int rkvdec_v4l2_init(struct rkvdec_dev *rkvdec)
+{
+ int ret;
+
+ ret = v4l2_device_register(rkvdec->dev, &rkvdec->v4l2_dev);
+ if (ret) {
+ dev_err(rkvdec->dev, "Failed to register V4L2 device\n");
+ return ret;
+ }
+
+ rkvdec->m2m_dev = v4l2_m2m_init(&rkvdec_m2m_ops);
+ if (IS_ERR(rkvdec->m2m_dev)) {
+ v4l2_err(&rkvdec->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(rkvdec->m2m_dev);
+ goto err_unregister_v4l2;
+ }
+
+ rkvdec->mdev.dev = rkvdec->dev;
+ strscpy(rkvdec->mdev.model, "rkvdec", sizeof(rkvdec->mdev.model));
+ strscpy(rkvdec->mdev.bus_info, "platform:rkvdec",
+ sizeof(rkvdec->mdev.bus_info));
+ media_device_init(&rkvdec->mdev);
+ rkvdec->mdev.ops = &rkvdec_media_ops;
+ rkvdec->v4l2_dev.mdev = &rkvdec->mdev;
+
+ rkvdec->vdev.lock = &rkvdec->vdev_lock;
+ rkvdec->vdev.v4l2_dev = &rkvdec->v4l2_dev;
+ rkvdec->vdev.fops = &rkvdec_fops;
+ rkvdec->vdev.release = video_device_release_empty;
+ rkvdec->vdev.vfl_dir = VFL_DIR_M2M;
+ rkvdec->vdev.device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+ rkvdec->vdev.ioctl_ops = &rkvdec_ioctl_ops;
+ video_set_drvdata(&rkvdec->vdev, rkvdec);
+ strscpy(rkvdec->vdev.name, "rkvdec", sizeof(rkvdec->vdev.name));
+
+ ret = video_register_device(&rkvdec->vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(&rkvdec->v4l2_dev, "Failed to register video device\n");
+ goto err_cleanup_mc;
+ }
+
+ ret = v4l2_m2m_register_media_controller(rkvdec->m2m_dev, &rkvdec->vdev,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ v4l2_err(&rkvdec->v4l2_dev,
+ "Failed to initialize V4L2 M2M media controller\n");
+ goto err_unregister_vdev;
+ }
+
+ ret = media_device_register(&rkvdec->mdev);
+ if (ret) {
+ v4l2_err(&rkvdec->v4l2_dev, "Failed to register media device\n");
+ goto err_unregister_mc;
+ }
+
+ return 0;
+
+err_unregister_mc:
+ v4l2_m2m_unregister_media_controller(rkvdec->m2m_dev);
+
+err_unregister_vdev:
+ video_unregister_device(&rkvdec->vdev);
+
+err_cleanup_mc:
+ media_device_cleanup(&rkvdec->mdev);
+ v4l2_m2m_release(rkvdec->m2m_dev);
+
+err_unregister_v4l2:
+ v4l2_device_unregister(&rkvdec->v4l2_dev);
+ return ret;
+}
+
+static void rkvdec_v4l2_cleanup(struct rkvdec_dev *rkvdec)
+{
+ media_device_unregister(&rkvdec->mdev);
+ v4l2_m2m_unregister_media_controller(rkvdec->m2m_dev);
+ video_unregister_device(&rkvdec->vdev);
+ media_device_cleanup(&rkvdec->mdev);
+ v4l2_m2m_release(rkvdec->m2m_dev);
+ v4l2_device_unregister(&rkvdec->v4l2_dev);
+}
+
+static irqreturn_t rkvdec_irq_handler(int irq, void *priv)
+{
+ struct rkvdec_dev *rkvdec = priv;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = readl(rkvdec->regs + RKVDEC_REG_INTERRUPT);
+ state = (status & RKVDEC_RDY_STA) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ writel(0, rkvdec->regs + RKVDEC_REG_INTERRUPT);
+ if (cancel_delayed_work(&rkvdec->watchdog_work)) {
+ struct rkvdec_ctx *ctx;
+
+ ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev);
+ rkvdec_job_finish(ctx, state);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rkvdec_watchdog_func(struct work_struct *work)
+{
+ struct rkvdec_dev *rkvdec;
+ struct rkvdec_ctx *ctx;
+
+ rkvdec = container_of(to_delayed_work(work), struct rkvdec_dev,
+ watchdog_work);
+ ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev);
+ if (ctx) {
+ dev_err(rkvdec->dev, "Frame processing timed out!\n");
+ writel(RKVDEC_IRQ_DIS, rkvdec->regs + RKVDEC_REG_INTERRUPT);
+ writel(0, rkvdec->regs + RKVDEC_REG_SYSCTRL);
+ rkvdec_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct of_device_id of_rkvdec_match[] = {
+ { .compatible = "rockchip,rk3399-vdec" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_rkvdec_match);
+
+static const char * const rkvdec_clk_names[] = {
+ "axi", "ahb", "cabac", "core"
+};
+
+static int rkvdec_probe(struct platform_device *pdev)
+{
+ struct rkvdec_dev *rkvdec;
+ struct resource *res;
+ unsigned int i;
+ int ret, irq;
+
+ rkvdec = devm_kzalloc(&pdev->dev, sizeof(*rkvdec), GFP_KERNEL);
+ if (!rkvdec)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rkvdec);
+ rkvdec->dev = &pdev->dev;
+ mutex_init(&rkvdec->vdev_lock);
+ INIT_DELAYED_WORK(&rkvdec->watchdog_work, rkvdec_watchdog_func);
+
+ rkvdec->clocks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(rkvdec_clk_names),
+ sizeof(*rkvdec->clocks), GFP_KERNEL);
+ if (!rkvdec->clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(rkvdec_clk_names); i++)
+ rkvdec->clocks[i].id = rkvdec_clk_names[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(rkvdec_clk_names),
+ rkvdec->clocks);
+ if (ret)
+ return ret;
+
+ /*
+ * Bump ACLK to max. possible freq. (500 MHz) to improve performance
+ * When 4k video playback.
+ */
+ clk_set_rate(rkvdec->clocks[0].clk, 500 * 1000 * 1000);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rkvdec->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rkvdec->regs))
+ return PTR_ERR(rkvdec->regs);
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Could not set DMA coherent mask.\n");
+ return ret;
+ }
+
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Could not get vdec IRQ\n");
+ return -ENXIO;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ rkvdec_irq_handler, IRQF_ONESHOT,
+ dev_name(&pdev->dev), rkvdec);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request vdec IRQ\n");
+ return ret;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = rkvdec_v4l2_init(rkvdec);
+ if (ret)
+ goto err_disable_runtime_pm;
+
+ return 0;
+
+err_disable_runtime_pm:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int rkvdec_remove(struct platform_device *pdev)
+{
+ struct rkvdec_dev *rkvdec = platform_get_drvdata(pdev);
+
+ rkvdec_v4l2_cleanup(rkvdec);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int rkvdec_runtime_resume(struct device *dev)
+{
+ struct rkvdec_dev *rkvdec = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(ARRAY_SIZE(rkvdec_clk_names),
+ rkvdec->clocks);
+}
+
+static int rkvdec_runtime_suspend(struct device *dev)
+{
+ struct rkvdec_dev *rkvdec = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(rkvdec_clk_names),
+ rkvdec->clocks);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops rkvdec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(rkvdec_runtime_suspend, rkvdec_runtime_resume, NULL)
+};
+
+static struct platform_driver rkvdec_driver = {
+ .probe = rkvdec_probe,
+ .remove = rkvdec_remove,
+ .driver = {
+ .name = "rkvdec",
+ .of_match_table = of_match_ptr(of_rkvdec_match),
+ .pm = &rkvdec_pm_ops,
+ },
+};
+module_platform_driver(rkvdec_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@collabora.com>");
+MODULE_DESCRIPTION("Rockchip Video Decoder driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
new file mode 100644
index 000000000000..2fc9f46b6910
--- /dev/null
+++ b/drivers/staging/media/rkvdec/rkvdec.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Rockchip Video Decoder driver
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on rkvdec driver by Google LLC. (Tomasz Figa <tfiga@chromium.org>)
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+#ifndef RKVDEC_H_
+#define RKVDEC_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+struct rkvdec_ctx;
+
+struct rkvdec_ctrl_desc {
+ u32 per_request : 1;
+ u32 mandatory : 1;
+ struct v4l2_ctrl_config cfg;
+};
+
+struct rkvdec_ctrls {
+ const struct rkvdec_ctrl_desc *ctrls;
+ unsigned int num_ctrls;
+};
+
+struct rkvdec_run {
+ struct {
+ struct vb2_v4l2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+ } bufs;
+};
+
+struct rkvdec_vp9_decoded_buffer_info {
+ /* Info needed when the decoded frame serves as a reference frame. */
+ u16 width;
+ u16 height;
+ u32 bit_depth : 4;
+};
+
+struct rkvdec_decoded_buffer {
+ /* Must be the first field in this struct. */
+ struct v4l2_m2m_buffer base;
+};
+
+static inline struct rkvdec_decoded_buffer *
+vb2_to_rkvdec_decoded_buf(struct vb2_buffer *buf)
+{
+ return container_of(buf, struct rkvdec_decoded_buffer,
+ base.vb.vb2_buf);
+}
+
+struct rkvdec_coded_fmt_ops {
+ int (*adjust_fmt)(struct rkvdec_ctx *ctx,
+ struct v4l2_format *f);
+ int (*start)(struct rkvdec_ctx *ctx);
+ void (*stop)(struct rkvdec_ctx *ctx);
+ int (*run)(struct rkvdec_ctx *ctx);
+ void (*done)(struct rkvdec_ctx *ctx, struct vb2_v4l2_buffer *src_buf,
+ struct vb2_v4l2_buffer *dst_buf,
+ enum vb2_buffer_state result);
+};
+
+struct rkvdec_coded_fmt_desc {
+ u32 fourcc;
+ struct v4l2_frmsize_stepwise frmsize;
+ const struct rkvdec_ctrls *ctrls;
+ const struct rkvdec_coded_fmt_ops *ops;
+ unsigned int num_decoded_fmts;
+ const u32 *decoded_fmts;
+};
+
+struct rkvdec_dev {
+ struct v4l2_device v4l2_dev;
+ struct media_device mdev;
+ struct video_device vdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct device *dev;
+ struct clk_bulk_data *clocks;
+ void __iomem *regs;
+ struct mutex vdev_lock; /* serializes ioctls */
+ struct delayed_work watchdog_work;
+};
+
+struct rkvdec_ctx {
+ struct v4l2_fh fh;
+ struct v4l2_format coded_fmt;
+ struct v4l2_format decoded_fmt;
+ const struct rkvdec_coded_fmt_desc *coded_fmt_desc;
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct rkvdec_dev *dev;
+ void *priv;
+};
+
+static inline struct rkvdec_ctx *fh_to_rkvdec_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct rkvdec_ctx, fh);
+}
+
+struct rkvdec_aux_buf {
+ void *cpu;
+ dma_addr_t dma;
+ size_t size;
+};
+
+void rkvdec_run_preamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run);
+void rkvdec_run_postamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run);
+
+extern const struct rkvdec_coded_fmt_ops rkvdec_h264_fmt_ops;
+#endif /* RKVDEC_H_ */
diff --git a/drivers/staging/media/soc_camera/soc-camera.rst b/drivers/staging/media/soc_camera/soc-camera.rst
new file mode 100644
index 000000000000..7c39711aebf8
--- /dev/null
+++ b/drivers/staging/media/soc_camera/soc-camera.rst
@@ -0,0 +1,171 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+The Soc-Camera Drivers
+======================
+
+Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+
+Terminology
+-----------
+
+The following terms are used in this document:
+ - camera / camera device / camera sensor - a video-camera sensor chip, capable
+ of connecting to a variety of systems and interfaces, typically uses i2c for
+ control and configuration, and a parallel or a serial bus for data.
+ - camera host - an interface, to which a camera is connected. Typically a
+ specialised interface, present on many SoCs, e.g. PXA27x and PXA3xx, SuperH,
+ i.MX27, i.MX31.
+ - camera host bus - a connection between a camera host and a camera. Can be
+ parallel or serial, consists of data and control lines, e.g. clock, vertical
+ and horizontal synchronization signals.
+
+Purpose of the soc-camera subsystem
+-----------------------------------
+
+The soc-camera subsystem initially provided a unified API between camera host
+drivers and camera sensor drivers. Later the soc-camera sensor API has been
+replaced with the V4L2 standard subdev API. This also made camera driver re-use
+with non-soc-camera hosts possible. The camera host API to the soc-camera core
+has been preserved.
+
+Soc-camera implements a V4L2 interface to the user, currently only the "mmap"
+method is supported by host drivers. However, the soc-camera core also provides
+support for the "read" method.
+
+The subsystem has been designed to support multiple camera host interfaces and
+multiple cameras per interface, although most applications have only one camera
+sensor.
+
+Existing drivers
+----------------
+
+As of 3.7 there are seven host drivers in the mainline: atmel-isi.c,
+mx1_camera.c (broken, scheduled for removal), mx2_camera.c, mx3_camera.c,
+omap1_camera.c, pxa_camera.c, sh_mobile_ceu_camera.c, and multiple sensor
+drivers under drivers/media/i2c/soc_camera/.
+
+Camera host API
+---------------
+
+A host camera driver is registered using the
+
+.. code-block:: none
+
+ soc_camera_host_register(struct soc_camera_host *);
+
+function. The host object can be initialized as follows:
+
+.. code-block:: none
+
+ struct soc_camera_host *ici;
+ ici->drv_name = DRV_NAME;
+ ici->ops = &camera_host_ops;
+ ici->priv = pcdev;
+ ici->v4l2_dev.dev = &pdev->dev;
+ ici->nr = pdev->id;
+
+All camera host methods are passed in a struct soc_camera_host_ops:
+
+.. code-block:: none
+
+ static struct soc_camera_host_ops camera_host_ops = {
+ .owner = THIS_MODULE,
+ .add = camera_add_device,
+ .remove = camera_remove_device,
+ .set_fmt = camera_set_fmt_cap,
+ .try_fmt = camera_try_fmt_cap,
+ .init_videobuf2 = camera_init_videobuf2,
+ .poll = camera_poll,
+ .querycap = camera_querycap,
+ .set_bus_param = camera_set_bus_param,
+ /* The rest of host operations are optional */
+ };
+
+.add and .remove methods are called when a sensor is attached to or detached
+from the host. .set_bus_param is used to configure physical connection
+parameters between the host and the sensor. .init_videobuf2 is called by
+soc-camera core when a video-device is opened, the host driver would typically
+call vb2_queue_init() in this method. Further video-buffer management is
+implemented completely by the specific camera host driver. If the host driver
+supports non-standard pixel format conversion, it should implement a
+.get_formats and, possibly, a .put_formats operations. See below for more
+details about format conversion. The rest of the methods are called from
+respective V4L2 operations.
+
+Camera API
+----------
+
+Sensor drivers can use struct soc_camera_link, typically provided by the
+platform, and used to specify to which camera host bus the sensor is connected,
+and optionally provide platform .power and .reset methods for the camera. This
+struct is provided to the camera driver via the I2C client device platform data
+and can be obtained, using the soc_camera_i2c_to_link() macro. Care should be
+taken, when using soc_camera_vdev_to_subdev() and when accessing struct
+soc_camera_device, using v4l2_get_subdev_hostdata(): both only work, when
+running on an soc-camera host. The actual camera driver operation is implemented
+using the V4L2 subdev API. Additionally soc-camera camera drivers can use
+auxiliary soc-camera helper functions like soc_camera_power_on() and
+soc_camera_power_off(), which switch regulators, provided by the platform and call
+board-specific power switching methods. soc_camera_apply_board_flags() takes
+camera bus configuration capability flags and applies any board transformations,
+e.g. signal polarity inversion. soc_mbus_get_fmtdesc() can be used to obtain a
+pixel format descriptor, corresponding to a certain media-bus pixel format code.
+soc_camera_limit_side() can be used to restrict beginning and length of a frame
+side, based on camera capabilities.
+
+VIDIOC_S_CROP and VIDIOC_S_FMT behaviour
+----------------------------------------
+
+Above user ioctls modify image geometry as follows:
+
+VIDIOC_S_CROP: sets location and sizes of the sensor window. Unit is one sensor
+pixel. Changing sensor window sizes preserves any scaling factors, therefore
+user window sizes change as well.
+
+VIDIOC_S_FMT: sets user window. Should preserve previously set sensor window as
+much as possible by modifying scaling factors. If the sensor window cannot be
+preserved precisely, it may be changed too.
+
+In soc-camera there are two locations, where scaling and cropping can take
+place: in the camera driver and in the host driver. User ioctls are first passed
+to the host driver, which then generally passes them down to the camera driver.
+It is more efficient to perform scaling and cropping in the camera driver to
+save camera bus bandwidth and maximise the framerate. However, if the camera
+driver failed to set the required parameters with sufficient precision, the host
+driver may decide to also use its own scaling and cropping to fulfill the user's
+request.
+
+Camera drivers are interfaced to the soc-camera core and to host drivers over
+the v4l2-subdev API, which is completely functional, it doesn't pass any data.
+Therefore all camera drivers shall reply to .g_fmt() requests with their current
+output geometry. This is necessary to correctly configure the camera bus.
+.s_fmt() and .try_fmt() have to be implemented too. Sensor window and scaling
+factors have to be maintained by camera drivers internally. According to the
+V4L2 API all capture drivers must support the VIDIOC_CROPCAP ioctl, hence we
+rely on camera drivers implementing .cropcap(). If the camera driver does not
+support cropping, it may choose to not implement .s_crop(), but to enable
+cropping support by the camera host driver at least the .g_crop method must be
+implemented.
+
+User window geometry is kept in .user_width and .user_height fields in struct
+soc_camera_device and used by the soc-camera core and host drivers. The core
+updates these fields upon successful completion of a .s_fmt() call, but if these
+fields change elsewhere, e.g. during .s_crop() processing, the host driver is
+responsible for updating them.
+
+Format conversion
+-----------------
+
+V4L2 distinguishes between pixel formats, as they are stored in memory, and as
+they are transferred over a media bus. Soc-camera provides support to
+conveniently manage these formats. A table of standard transformations is
+maintained by soc-camera core, which describes, what FOURCC pixel format will
+be obtained, if a media-bus pixel format is stored in memory according to
+certain rules. E.g. if MEDIA_BUS_FMT_YUYV8_2X8 data is sampled with 8 bits per
+sample and stored in memory in the little-endian order with no gaps between
+bytes, data in memory will represent the V4L2_PIX_FMT_YUYV FOURCC format. These
+standard transformations will be used by soc-camera or by camera host drivers to
+configure camera drivers to produce the FOURCC format, requested by the user,
+using the VIDIOC_S_FMT ioctl(). Apart from those standard format conversions,
+host drivers can also provide their own conversion rules by implementing a
+.get_formats and, if required, a .put_formats methods.
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
index 17733e9a088f..da369950bbf2 100644
--- a/drivers/staging/media/sunxi/cedrus/Kconfig
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_SUNXI_CEDRUS
tristate "Allwinner Cedrus VPU driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_DEV && VIDEO_V4L2
depends on HAS_DMA
depends on OF
- depends on MEDIA_CONTROLLER_REQUEST_API
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
select SUNXI_SRAM
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
diff --git a/drivers/staging/media/tegra-video/Kconfig b/drivers/staging/media/tegra-video/Kconfig
new file mode 100644
index 000000000000..f6c61ec74386
--- /dev/null
+++ b/drivers/staging/media/tegra-video/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_TEGRA
+ tristate "NVIDIA Tegra VI driver"
+ depends on TEGRA_HOST1X
+ depends on VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Choose this option if you have an NVIDIA Tegra SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tegra-video.
diff --git a/drivers/staging/media/tegra-video/Makefile b/drivers/staging/media/tegra-video/Makefile
new file mode 100644
index 000000000000..dfa2ef8f99ef
--- /dev/null
+++ b/drivers/staging/media/tegra-video/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+tegra-video-objs := \
+ video.o \
+ vi.o \
+ csi.o
+
+tegra-video-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
+obj-$(CONFIG_VIDEO_TEGRA) += tegra-video.o
diff --git a/drivers/staging/media/tegra-video/TODO b/drivers/staging/media/tegra-video/TODO
new file mode 100644
index 000000000000..6ceb7549c218
--- /dev/null
+++ b/drivers/staging/media/tegra-video/TODO
@@ -0,0 +1,11 @@
+TODO list
+* Currently driver supports Tegra build-in TPG only with direct media links
+ from CSI to VI. Add kernel config CONFIG_VIDEO_TEGRA_TPG and update the
+ driver to do TPG Vs Sensor media links based on CONFIG_VIDEO_TEGRA_TPG.
+* Add real camera sensor capture support.
+* Add Tegra CSI MIPI pads calibration.
+* Add MIPI clock Settle time computation based on the data rate.
+* Add support for Ganged mode.
+* Add RAW10 packed video format support to Tegra210 video formats.
+* Add support for suspend and resume.
+* Make sure v4l2-compliance tests pass with all of the above implementations.
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
new file mode 100644
index 000000000000..40ea195d141d
--- /dev/null
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/device.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "csi.h"
+#include "video.h"
+
+static inline struct tegra_csi *
+host1x_client_to_csi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_csi, client);
+}
+
+static inline struct tegra_csi_channel *to_csi_chan(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct tegra_csi_channel, subdev);
+}
+
+/*
+ * CSI is a separate subdevice which has 6 source pads to generate
+ * test pattern. CSI subdevice pad ops are used only for TPG and
+ * allows below TPG formats.
+ */
+static const struct v4l2_mbus_framefmt tegra_csi_tpg_fmts[] = {
+ {
+ TEGRA_DEF_WIDTH,
+ TEGRA_DEF_HEIGHT,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ V4L2_FIELD_NONE,
+ V4L2_COLORSPACE_SRGB
+ },
+ {
+ TEGRA_DEF_WIDTH,
+ TEGRA_DEF_HEIGHT,
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ V4L2_FIELD_NONE,
+ V4L2_COLORSPACE_SRGB
+ },
+};
+
+static const struct v4l2_frmsize_discrete tegra_csi_tpg_sizes[] = {
+ { 1280, 720 },
+ { 1920, 1080 },
+ { 3840, 2160 },
+};
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+static int csi_enum_bus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(tegra_csi_tpg_fmts))
+ return -EINVAL;
+
+ code->code = tegra_csi_tpg_fmts[code->index].code;
+
+ return 0;
+}
+
+static int csi_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+
+ fmt->format = csi_chan->format;
+
+ return 0;
+}
+
+static int csi_get_frmrate_table_index(struct tegra_csi *csi, u32 code,
+ u32 width, u32 height)
+{
+ const struct tpg_framerate *frmrate;
+ unsigned int i;
+
+ frmrate = csi->soc->tpg_frmrate_table;
+ for (i = 0; i < csi->soc->tpg_frmrate_table_size; i++) {
+ if (frmrate[i].code == code &&
+ frmrate[i].frmsize.width == width &&
+ frmrate[i].frmsize.height == height) {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void csi_chan_update_blank_intervals(struct tegra_csi_channel *csi_chan,
+ u32 code, u32 width, u32 height)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
+ int index;
+
+ index = csi_get_frmrate_table_index(csi_chan->csi, code,
+ width, height);
+ if (index >= 0) {
+ csi_chan->h_blank = frmrate[index].h_blank;
+ csi_chan->v_blank = frmrate[index].v_blank;
+ csi_chan->framerate = frmrate[index].framerate;
+ }
+}
+
+static int csi_enum_framesizes(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ unsigned int i;
+
+ if (fse->index >= ARRAY_SIZE(tegra_csi_tpg_sizes))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
+ if (fse->code == tegra_csi_tpg_fmts[i].code)
+ break;
+
+ if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
+ return -EINVAL;
+
+ fse->min_width = tegra_csi_tpg_sizes[fse->index].width;
+ fse->max_width = tegra_csi_tpg_sizes[fse->index].width;
+ fse->min_height = tegra_csi_tpg_sizes[fse->index].height;
+ fse->max_height = tegra_csi_tpg_sizes[fse->index].height;
+
+ return 0;
+}
+
+static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct tegra_csi *csi = csi_chan->csi;
+ const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
+ int index;
+
+ /* one framerate per format and resolution */
+ if (fie->index > 0)
+ return -EINVAL;
+
+ index = csi_get_frmrate_table_index(csi_chan->csi, fie->code,
+ fie->width, fie->height);
+ if (index < 0)
+ return -EINVAL;
+
+ fie->interval.numerator = 1;
+ fie->interval.denominator = frmrate[index].framerate;
+
+ return 0;
+}
+
+static int csi_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct v4l2_mbus_framefmt *format = &fmt->format;
+ const struct v4l2_frmsize_discrete *sizes;
+ unsigned int i;
+
+ sizes = v4l2_find_nearest_size(tegra_csi_tpg_sizes,
+ ARRAY_SIZE(tegra_csi_tpg_sizes),
+ width, height,
+ format->width, format->width);
+ format->width = sizes->width;
+ format->height = sizes->height;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
+ if (format->code == tegra_csi_tpg_fmts[i].code)
+ break;
+
+ if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
+ i = 0;
+
+ format->code = tegra_csi_tpg_fmts[i].code;
+ format->field = V4L2_FIELD_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ /* update blanking intervals from frame rate table and format */
+ csi_chan_update_blank_intervals(csi_chan, format->code,
+ format->width, format->height);
+ csi_chan->format = *format;
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+static int tegra_csi_g_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *vfi)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+
+ vfi->interval.numerator = 1;
+ vfi->interval.denominator = csi_chan->framerate;
+
+ return 0;
+}
+
+static int tegra_csi_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct tegra_csi *csi = csi_chan->csi;
+ int ret = 0;
+
+ csi_chan->pg_mode = chan->pg_mode;
+ if (enable) {
+ ret = pm_runtime_get_sync(csi->dev);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to get runtime PM: %d\n", ret);
+ pm_runtime_put_noidle(csi->dev);
+ return ret;
+ }
+
+ ret = csi->ops->csi_start_streaming(csi_chan);
+ if (ret < 0)
+ goto rpm_put;
+
+ return 0;
+ }
+
+ csi->ops->csi_stop_streaming(csi_chan);
+
+rpm_put:
+ pm_runtime_put(csi->dev);
+ return ret;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+static const struct v4l2_subdev_video_ops tegra_csi_video_ops = {
+ .s_stream = tegra_csi_s_stream,
+ .g_frame_interval = tegra_csi_g_frame_interval,
+ .s_frame_interval = tegra_csi_g_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops tegra_csi_pad_ops = {
+ .enum_mbus_code = csi_enum_bus_code,
+ .enum_frame_size = csi_enum_framesizes,
+ .enum_frame_interval = csi_enum_frameintervals,
+ .get_fmt = csi_get_format,
+ .set_fmt = csi_set_format,
+};
+
+static const struct v4l2_subdev_ops tegra_csi_ops = {
+ .video = &tegra_csi_video_ops,
+ .pad = &tegra_csi_pad_ops,
+};
+
+static int tegra_csi_tpg_channels_alloc(struct tegra_csi *csi)
+{
+ struct device_node *node = csi->dev->of_node;
+ unsigned int port_num;
+ struct tegra_csi_channel *chan;
+ unsigned int tpg_channels = csi->soc->csi_max_channels;
+
+ /* allocate CSI channel for each CSI x2 ports */
+ for (port_num = 0; port_num < tpg_channels; port_num++) {
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ list_add_tail(&chan->list, &csi->csi_chans);
+ chan->csi = csi;
+ chan->csi_port_num = port_num;
+ chan->numlanes = 2;
+ chan->of_node = node;
+ chan->numpads = 1;
+ chan->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ }
+
+ return 0;
+}
+
+static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
+{
+ struct tegra_csi *csi = chan->csi;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ /* initialize the default format */
+ chan->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ chan->format.field = V4L2_FIELD_NONE;
+ chan->format.colorspace = V4L2_COLORSPACE_SRGB;
+ chan->format.width = TEGRA_DEF_WIDTH;
+ chan->format.height = TEGRA_DEF_HEIGHT;
+ csi_chan_update_blank_intervals(chan, chan->format.code,
+ chan->format.width,
+ chan->format.height);
+ /* initialize V4L2 subdevice and media entity */
+ subdev = &chan->subdev;
+ v4l2_subdev_init(subdev, &tegra_csi_ops);
+ subdev->dev = csi->dev;
+ snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s-%d", "tpg",
+ chan->csi_port_num);
+
+ v4l2_set_subdevdata(subdev, chan);
+ subdev->fwnode = of_fwnode_handle(chan->of_node);
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ /* initialize media entity pads */
+ ret = media_entity_pads_init(&subdev->entity, chan->numpads,
+ chan->pads);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to initialize media entity: %d\n", ret);
+ subdev->dev = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void tegra_csi_error_recover(struct v4l2_subdev *sd)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(sd);
+ struct tegra_csi *csi = csi_chan->csi;
+
+ /* stop streaming during error recovery */
+ csi->ops->csi_stop_streaming(csi_chan);
+ csi->ops->csi_err_recover(csi_chan);
+ csi->ops->csi_start_streaming(csi_chan);
+}
+
+static int tegra_csi_channels_init(struct tegra_csi *csi)
+{
+ struct tegra_csi_channel *chan;
+ int ret;
+
+ list_for_each_entry(chan, &csi->csi_chans, list) {
+ ret = tegra_csi_channel_init(chan);
+ if (ret) {
+ dev_err(csi->dev,
+ "failed to initialize channel-%d: %d\n",
+ chan->csi_port_num, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_csi_channels_cleanup(struct tegra_csi *csi)
+{
+ struct v4l2_subdev *subdev;
+ struct tegra_csi_channel *chan, *tmp;
+
+ list_for_each_entry_safe(chan, tmp, &csi->csi_chans, list) {
+ subdev = &chan->subdev;
+ if (subdev->dev)
+ media_entity_cleanup(&subdev->entity);
+ list_del(&chan->list);
+ kfree(chan);
+ }
+}
+
+static int __maybe_unused csi_runtime_suspend(struct device *dev)
+{
+ struct tegra_csi *csi = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(csi->soc->num_clks, csi->clks);
+
+ return 0;
+}
+
+static int __maybe_unused csi_runtime_resume(struct device *dev)
+{
+ struct tegra_csi *csi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(csi->soc->num_clks, csi->clks);
+ if (ret < 0) {
+ dev_err(csi->dev, "failed to enable clocks: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_csi_init(struct host1x_client *client)
+{
+ struct tegra_csi *csi = host1x_client_to_csi(client);
+ struct tegra_video_device *vid = dev_get_drvdata(client->host);
+ int ret;
+
+ INIT_LIST_HEAD(&csi->csi_chans);
+
+ ret = tegra_csi_tpg_channels_alloc(csi);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to allocate tpg channels: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = tegra_csi_channels_init(csi);
+ if (ret < 0)
+ goto cleanup;
+
+ vid->csi = csi;
+
+ return 0;
+
+cleanup:
+ tegra_csi_channels_cleanup(csi);
+ return ret;
+}
+
+static int tegra_csi_exit(struct host1x_client *client)
+{
+ struct tegra_csi *csi = host1x_client_to_csi(client);
+
+ tegra_csi_channels_cleanup(csi);
+
+ return 0;
+}
+
+static const struct host1x_client_ops csi_client_ops = {
+ .init = tegra_csi_init,
+ .exit = tegra_csi_exit,
+};
+
+static int tegra_csi_probe(struct platform_device *pdev)
+{
+ struct tegra_csi *csi;
+ unsigned int i;
+ int ret;
+
+ csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(csi->iomem))
+ return PTR_ERR(csi->iomem);
+
+ csi->soc = of_device_get_match_data(&pdev->dev);
+
+ csi->clks = devm_kcalloc(&pdev->dev, csi->soc->num_clks,
+ sizeof(*csi->clks), GFP_KERNEL);
+ if (!csi->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < csi->soc->num_clks; i++)
+ csi->clks[i].id = csi->soc->clk_names[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, csi->soc->num_clks, csi->clks);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get the clocks: %d\n", ret);
+ return ret;
+ }
+
+ if (!pdev->dev.pm_domain) {
+ ret = -ENOENT;
+ dev_warn(&pdev->dev, "PM domain is not attached: %d\n", ret);
+ return ret;
+ }
+
+ csi->dev = &pdev->dev;
+ csi->ops = csi->soc->ops;
+ platform_set_drvdata(pdev, csi);
+ pm_runtime_enable(&pdev->dev);
+
+ /* initialize host1x interface */
+ INIT_LIST_HEAD(&csi->client.list);
+ csi->client.ops = &csi_client_ops;
+ csi->client.dev = &pdev->dev;
+
+ ret = host1x_client_register(&csi->client);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to register host1x client: %d\n", ret);
+ goto rpm_disable;
+ }
+
+ return 0;
+
+rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int tegra_csi_remove(struct platform_device *pdev)
+{
+ struct tegra_csi *csi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&csi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to unregister host1x client: %d\n", err);
+ return err;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_csi_of_id_table[] = {
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-csi", .data = &tegra210_csi_soc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_csi_of_id_table);
+
+static const struct dev_pm_ops tegra_csi_pm_ops = {
+ SET_RUNTIME_PM_OPS(csi_runtime_suspend, csi_runtime_resume, NULL)
+};
+
+struct platform_driver tegra_csi_driver = {
+ .driver = {
+ .name = "tegra-csi",
+ .of_match_table = tegra_csi_of_id_table,
+ .pm = &tegra_csi_pm_ops,
+ },
+ .probe = tegra_csi_probe,
+ .remove = tegra_csi_remove,
+};
diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h
new file mode 100644
index 000000000000..93bd2a05797d
--- /dev/null
+++ b/drivers/staging/media/tegra-video/csi.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __TEGRA_CSI_H__
+#define __TEGRA_CSI_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * Each CSI brick supports max of 4 lanes that can be used as either
+ * one x4 port using both CILA and CILB partitions of a CSI brick or can
+ * be used as two x2 ports with one x2 from CILA and the other x2 from
+ * CILB.
+ */
+#define CSI_PORTS_PER_BRICK 2
+
+/* each CSI channel can have one sink and one source pads */
+#define TEGRA_CSI_PADS_NUM 2
+
+enum tegra_csi_cil_port {
+ PORT_A = 0,
+ PORT_B,
+};
+
+enum tegra_csi_block {
+ CSI_CIL_AB = 0,
+ CSI_CIL_CD,
+ CSI_CIL_EF,
+};
+
+struct tegra_csi;
+
+/**
+ * struct tegra_csi_channel - Tegra CSI channel
+ *
+ * @list: list head for this entry
+ * @subdev: V4L2 subdevice associated with this channel
+ * @pads: media pads for the subdevice entity
+ * @numpads: number of pads.
+ * @csi: Tegra CSI device structure
+ * @of_node: csi device tree node
+ * @numlanes: number of lanes used per port/channel
+ * @csi_port_num: CSI channel port number
+ * @pg_mode: test pattern generator mode for channel
+ * @format: active format of the channel
+ * @framerate: active framerate for TPG
+ * @h_blank: horizontal blanking for TPG active format
+ * @v_blank: vertical blanking for TPG active format
+ */
+struct tegra_csi_channel {
+ struct list_head list;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[TEGRA_CSI_PADS_NUM];
+ unsigned int numpads;
+ struct tegra_csi *csi;
+ struct device_node *of_node;
+ unsigned int numlanes;
+ u8 csi_port_num;
+ u8 pg_mode;
+ struct v4l2_mbus_framefmt format;
+ unsigned int framerate;
+ unsigned int h_blank;
+ unsigned int v_blank;
+};
+
+/**
+ * struct tpg_framerate - Tegra CSI TPG framerate configuration
+ *
+ * @frmsize: frame resolution
+ * @code: media bus format code
+ * @h_blank: horizontal blanking used for TPG
+ * @v_blank: vertical blanking interval used for TPG
+ * @framerate: framerate achieved with the corresponding blanking intervals,
+ * format and resolution.
+ */
+struct tpg_framerate {
+ struct v4l2_frmsize_discrete frmsize;
+ u32 code;
+ unsigned int h_blank;
+ unsigned int v_blank;
+ unsigned int framerate;
+};
+
+/**
+ * struct tegra_csi_ops - Tegra CSI operations
+ *
+ * @csi_start_streaming: programs csi hardware to enable streaming.
+ * @csi_stop_streaming: programs csi hardware to disable streaming.
+ * @csi_err_recover: csi hardware block recovery in case of any capture errors
+ * due to missing source stream or due to improper csi input from
+ * the external source.
+ */
+struct tegra_csi_ops {
+ int (*csi_start_streaming)(struct tegra_csi_channel *csi_chan);
+ void (*csi_stop_streaming)(struct tegra_csi_channel *csi_chan);
+ void (*csi_err_recover)(struct tegra_csi_channel *csi_chan);
+};
+
+/**
+ * struct tegra_csi_soc - NVIDIA Tegra CSI SoC structure
+ *
+ * @ops: csi hardware operations
+ * @csi_max_channels: supported max streaming channels
+ * @clk_names: csi and cil clock names
+ * @num_clks: total clocks count
+ * @tpg_frmrate_table: csi tpg frame rate table with blanking intervals
+ * @tpg_frmrate_table_size: size of frame rate table
+ */
+struct tegra_csi_soc {
+ const struct tegra_csi_ops *ops;
+ unsigned int csi_max_channels;
+ const char * const *clk_names;
+ unsigned int num_clks;
+ const struct tpg_framerate *tpg_frmrate_table;
+ unsigned int tpg_frmrate_table_size;
+};
+
+/**
+ * struct tegra_csi - NVIDIA Tegra CSI device structure
+ *
+ * @dev: device struct
+ * @client: host1x_client struct
+ * @iomem: register base
+ * @clks: clock for CSI and CIL
+ * @soc: pointer to SoC data structure
+ * @ops: csi operations
+ * @channels: list head for CSI channels
+ */
+struct tegra_csi {
+ struct device *dev;
+ struct host1x_client client;
+ void __iomem *iomem;
+ struct clk_bulk_data *clks;
+ const struct tegra_csi_soc *soc;
+ const struct tegra_csi_ops *ops;
+ struct list_head csi_chans;
+};
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_csi_soc tegra210_csi_soc;
+#endif
+
+void tegra_csi_error_recover(struct v4l2_subdev *subdev);
+#endif
diff --git a/drivers/staging/media/tegra-video/tegra210.c b/drivers/staging/media/tegra-video/tegra210.c
new file mode 100644
index 000000000000..3baa4e314203
--- /dev/null
+++ b/drivers/staging/media/tegra-video/tegra210.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+/*
+ * This source file contains Tegra210 supported video formats,
+ * VI and CSI SoC specific data, operations and registers accessors.
+ */
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/kthread.h>
+
+#include "csi.h"
+#include "vi.h"
+
+#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT msecs_to_jiffies(200)
+
+/* Tegra210 VI registers */
+#define TEGRA_VI_CFG_VI_INCR_SYNCPT 0x000
+#define VI_CFG_VI_INCR_SYNCPT_COND(x) (((x) & 0xff) << 8)
+#define VI_CSI_PP_FRAME_START(port) (5 + (port) * 4)
+#define VI_CSI_MW_ACK_DONE(port) (7 + (port) * 4)
+#define TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL 0x004
+#define VI_INCR_SYNCPT_NO_STALL BIT(8)
+#define TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x008
+#define TEGRA_VI_CFG_CG_CTRL 0x0b8
+#define VI_CG_2ND_LEVEL_EN 0x1
+
+/* Tegra210 VI CSI registers */
+#define TEGRA_VI_CSI_SW_RESET 0x000
+#define TEGRA_VI_CSI_SINGLE_SHOT 0x004
+#define SINGLE_SHOT_CAPTURE 0x1
+#define TEGRA_VI_CSI_IMAGE_DEF 0x00c
+#define BYPASS_PXL_TRANSFORM_OFFSET 24
+#define IMAGE_DEF_FORMAT_OFFSET 16
+#define IMAGE_DEF_DEST_MEM 0x1
+#define TEGRA_VI_CSI_IMAGE_SIZE 0x018
+#define IMAGE_SIZE_HEIGHT_OFFSET 16
+#define TEGRA_VI_CSI_IMAGE_SIZE_WC 0x01c
+#define TEGRA_VI_CSI_IMAGE_DT 0x020
+#define TEGRA_VI_CSI_SURFACE0_OFFSET_MSB 0x024
+#define TEGRA_VI_CSI_SURFACE0_OFFSET_LSB 0x028
+#define TEGRA_VI_CSI_SURFACE1_OFFSET_MSB 0x02c
+#define TEGRA_VI_CSI_SURFACE1_OFFSET_LSB 0x030
+#define TEGRA_VI_CSI_SURFACE2_OFFSET_MSB 0x034
+#define TEGRA_VI_CSI_SURFACE2_OFFSET_LSB 0x038
+#define TEGRA_VI_CSI_SURFACE0_STRIDE 0x054
+#define TEGRA_VI_CSI_SURFACE1_STRIDE 0x058
+#define TEGRA_VI_CSI_SURFACE2_STRIDE 0x05c
+#define TEGRA_VI_CSI_SURFACE_HEIGHT0 0x060
+#define TEGRA_VI_CSI_ERROR_STATUS 0x084
+
+/* Tegra210 CSI Pixel Parser registers: Starts from 0x838, offset 0x0 */
+#define TEGRA_CSI_INPUT_STREAM_CONTROL 0x000
+#define CSI_SKIP_PACKET_THRESHOLD_OFFSET 16
+#define TEGRA_CSI_PIXEL_STREAM_CONTROL0 0x004
+#define CSI_PP_PACKET_HEADER_SENT BIT(4)
+#define CSI_PP_DATA_IDENTIFIER_ENABLE BIT(5)
+#define CSI_PP_WORD_COUNT_SELECT_HEADER BIT(6)
+#define CSI_PP_CRC_CHECK_ENABLE BIT(7)
+#define CSI_PP_WC_CHECK BIT(8)
+#define CSI_PP_OUTPUT_FORMAT_STORE (0x3 << 16)
+#define CSI_PPA_PAD_LINE_NOPAD (0x2 << 24)
+#define CSI_PP_HEADER_EC_DISABLE (0x1 << 27)
+#define CSI_PPA_PAD_FRAME_NOPAD (0x2 << 28)
+#define TEGRA_CSI_PIXEL_STREAM_CONTROL1 0x008
+#define CSI_PP_TOP_FIELD_FRAME_OFFSET 0
+#define CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET 4
+#define TEGRA_CSI_PIXEL_STREAM_GAP 0x00c
+#define PP_FRAME_MIN_GAP_OFFSET 16
+#define TEGRA_CSI_PIXEL_STREAM_PP_COMMAND 0x010
+#define CSI_PP_ENABLE 0x1
+#define CSI_PP_DISABLE 0x2
+#define CSI_PP_RST 0x3
+#define CSI_PP_SINGLE_SHOT_ENABLE (0x1 << 2)
+#define CSI_PP_START_MARKER_FRAME_MAX_OFFSET 12
+#define TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME 0x014
+#define TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK 0x018
+#define TEGRA_CSI_PIXEL_PARSER_STATUS 0x01c
+
+/* Tegra210 CSI PHY registers */
+/* CSI_PHY_CIL_COMMAND_0 offset 0x0d0 from TEGRA_CSI_PIXEL_PARSER_0_BASE */
+#define TEGRA_CSI_PHY_CIL_COMMAND 0x0d0
+#define CSI_A_PHY_CIL_NOP 0x0
+#define CSI_A_PHY_CIL_ENABLE 0x1
+#define CSI_A_PHY_CIL_DISABLE 0x2
+#define CSI_A_PHY_CIL_ENABLE_MASK 0x3
+#define CSI_B_PHY_CIL_NOP (0x0 << 8)
+#define CSI_B_PHY_CIL_ENABLE (0x1 << 8)
+#define CSI_B_PHY_CIL_DISABLE (0x2 << 8)
+#define CSI_B_PHY_CIL_ENABLE_MASK (0x3 << 8)
+
+#define TEGRA_CSI_CIL_PAD_CONFIG0 0x000
+#define BRICK_CLOCK_A_4X (0x1 << 16)
+#define BRICK_CLOCK_B_4X (0x2 << 16)
+#define TEGRA_CSI_CIL_PAD_CONFIG1 0x004
+#define TEGRA_CSI_CIL_PHY_CONTROL 0x008
+#define TEGRA_CSI_CIL_INTERRUPT_MASK 0x00c
+#define TEGRA_CSI_CIL_STATUS 0x010
+#define TEGRA_CSI_CILX_STATUS 0x014
+#define TEGRA_CSI_CIL_SW_SENSOR_RESET 0x020
+
+#define TEGRA_CSI_PATTERN_GENERATOR_CTRL 0x000
+#define PG_MODE_OFFSET 2
+#define PG_ENABLE 0x1
+#define PG_DISABLE 0x0
+#define TEGRA_CSI_PG_BLANK 0x004
+#define PG_VBLANK_OFFSET 16
+#define TEGRA_CSI_PG_PHASE 0x008
+#define TEGRA_CSI_PG_RED_FREQ 0x00c
+#define PG_RED_VERT_INIT_FREQ_OFFSET 16
+#define PG_RED_HOR_INIT_FREQ_OFFSET 0
+#define TEGRA_CSI_PG_RED_FREQ_RATE 0x010
+#define TEGRA_CSI_PG_GREEN_FREQ 0x014
+#define PG_GREEN_VERT_INIT_FREQ_OFFSET 16
+#define PG_GREEN_HOR_INIT_FREQ_OFFSET 0
+#define TEGRA_CSI_PG_GREEN_FREQ_RATE 0x018
+#define TEGRA_CSI_PG_BLUE_FREQ 0x01c
+#define PG_BLUE_VERT_INIT_FREQ_OFFSET 16
+#define PG_BLUE_HOR_INIT_FREQ_OFFSET 0
+#define TEGRA_CSI_PG_BLUE_FREQ_RATE 0x020
+#define TEGRA_CSI_PG_AOHDR 0x024
+#define TEGRA_CSI_CSI_SW_STATUS_RESET 0x214
+#define TEGRA_CSI_CLKEN_OVERRIDE 0x218
+
+#define TEGRA210_CSI_PORT_OFFSET 0x34
+#define TEGRA210_CSI_CIL_OFFSET 0x0f4
+#define TEGRA210_CSI_TPG_OFFSET 0x18c
+
+#define CSI_PP_OFFSET(block) ((block) * 0x800)
+#define TEGRA210_VI_CSI_BASE(x) (0x100 + (x) * 0x100)
+
+/* Tegra210 VI registers accessors */
+static void tegra_vi_write(struct tegra_vi_channel *chan, unsigned int addr,
+ u32 val)
+{
+ writel_relaxed(val, chan->vi->iomem + addr);
+}
+
+static u32 tegra_vi_read(struct tegra_vi_channel *chan, unsigned int addr)
+{
+ return readl_relaxed(chan->vi->iomem + addr);
+}
+
+/* Tegra210 VI_CSI registers accessors */
+static void vi_csi_write(struct tegra_vi_channel *chan, unsigned int addr,
+ u32 val)
+{
+ void __iomem *vi_csi_base;
+
+ vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(chan->portno);
+
+ writel_relaxed(val, vi_csi_base + addr);
+}
+
+static u32 vi_csi_read(struct tegra_vi_channel *chan, unsigned int addr)
+{
+ void __iomem *vi_csi_base;
+
+ vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(chan->portno);
+
+ return readl_relaxed(vi_csi_base + addr);
+}
+
+/*
+ * Tegra210 VI channel capture operations
+ */
+static int tegra_channel_capture_setup(struct tegra_vi_channel *chan)
+{
+ u32 height = chan->format.height;
+ u32 width = chan->format.width;
+ u32 format = chan->fmtinfo->img_fmt;
+ u32 data_type = chan->fmtinfo->img_dt;
+ u32 word_count = (width * chan->fmtinfo->bit_width) / 8;
+
+ vi_csi_write(chan, TEGRA_VI_CSI_ERROR_STATUS, 0xffffffff);
+ vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_DEF,
+ ((chan->pg_mode ? 0 : 1) << BYPASS_PXL_TRANSFORM_OFFSET) |
+ (format << IMAGE_DEF_FORMAT_OFFSET) |
+ IMAGE_DEF_DEST_MEM);
+ vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_DT, data_type);
+ vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_SIZE_WC, word_count);
+ vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_SIZE,
+ (height << IMAGE_SIZE_HEIGHT_OFFSET) | width);
+ return 0;
+}
+
+static void tegra_channel_vi_soft_reset(struct tegra_vi_channel *chan)
+{
+ /* disable clock gating to enable continuous clock */
+ tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, 0);
+ /*
+ * Soft reset memory client interface, pixel format logic, sensor
+ * control logic, and a shadow copy logic to bring VI to clean state.
+ */
+ vi_csi_write(chan, TEGRA_VI_CSI_SW_RESET, 0xf);
+ usleep_range(100, 200);
+ vi_csi_write(chan, TEGRA_VI_CSI_SW_RESET, 0x0);
+
+ /* enable back VI clock gating */
+ tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
+}
+
+static void tegra_channel_capture_error_recover(struct tegra_vi_channel *chan)
+{
+ struct v4l2_subdev *subdev;
+ u32 val;
+
+ /*
+ * Recover VI and CSI hardware blocks in case of missing frame start
+ * events due to source not streaming or noisy csi inputs from the
+ * external source or many outstanding frame start or MW_ACK_DONE
+ * events which can cause CSI and VI hardware hang.
+ * This helps to have a clean capture for next frame.
+ */
+ val = vi_csi_read(chan, TEGRA_VI_CSI_ERROR_STATUS);
+ dev_dbg(&chan->video.dev, "TEGRA_VI_CSI_ERROR_STATUS 0x%08x\n", val);
+ vi_csi_write(chan, TEGRA_VI_CSI_ERROR_STATUS, val);
+
+ val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
+ dev_dbg(&chan->video.dev,
+ "TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x%08x\n", val);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
+
+ /* recover VI by issuing software reset and re-setup for capture */
+ tegra_channel_vi_soft_reset(chan);
+ tegra_channel_capture_setup(chan);
+
+ /* recover CSI block */
+ subdev = tegra_channel_get_remote_subdev(chan);
+ tegra_csi_error_recover(subdev);
+}
+
+static struct tegra_channel_buffer *
+dequeue_buf_done(struct tegra_vi_channel *chan)
+{
+ struct tegra_channel_buffer *buf = NULL;
+
+ spin_lock(&chan->done_lock);
+ if (list_empty(&chan->done)) {
+ spin_unlock(&chan->done_lock);
+ return NULL;
+ }
+
+ buf = list_first_entry(&chan->done,
+ struct tegra_channel_buffer, queue);
+ if (buf)
+ list_del_init(&buf->queue);
+ spin_unlock(&chan->done_lock);
+
+ return buf;
+}
+
+static void release_buffer(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb = &buf->buf;
+
+ vb->sequence = chan->sequence++;
+ vb->field = V4L2_FIELD_NONE;
+ vb->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vb->vb2_buf, state);
+}
+
+static int tegra_channel_capture_frame(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ u32 thresh, value, frame_start, mw_ack_done;
+ int bytes_per_line = chan->format.bytesperline;
+ int err;
+
+ /* program buffer address by using surface 0 */
+ vi_csi_write(chan, TEGRA_VI_CSI_SURFACE0_OFFSET_MSB,
+ (u64)buf->addr >> 32);
+ vi_csi_write(chan, TEGRA_VI_CSI_SURFACE0_OFFSET_LSB, buf->addr);
+ vi_csi_write(chan, TEGRA_VI_CSI_SURFACE0_STRIDE, bytes_per_line);
+
+ /*
+ * Tegra VI block interacts with host1x syncpt for synchronizing
+ * programmed condition of capture state and hardware operation.
+ * Frame start and Memory write acknowledge syncpts has their own
+ * FIFO of depth 2.
+ *
+ * Syncpoint trigger conditions set through VI_INCR_SYNCPT register
+ * are added to HW syncpt FIFO and when the HW triggers, syncpt
+ * condition is removed from the FIFO and counter at syncpoint index
+ * will be incremented by the hardware and software can wait for
+ * counter to reach threshold to synchronize capturing frame with the
+ * hardware capture events.
+ */
+
+ /* increase channel syncpoint threshold for FRAME_START */
+ thresh = host1x_syncpt_incr_max(chan->frame_start_sp, 1);
+
+ /* Program FRAME_START trigger condition syncpt request */
+ frame_start = VI_CSI_PP_FRAME_START(chan->portno);
+ value = VI_CFG_VI_INCR_SYNCPT_COND(frame_start) |
+ host1x_syncpt_id(chan->frame_start_sp);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+
+ /* increase channel syncpoint threshold for MW_ACK_DONE */
+ buf->mw_ack_sp_thresh = host1x_syncpt_incr_max(chan->mw_ack_sp, 1);
+
+ /* Program MW_ACK_DONE trigger condition syncpt request */
+ mw_ack_done = VI_CSI_MW_ACK_DONE(chan->portno);
+ value = VI_CFG_VI_INCR_SYNCPT_COND(mw_ack_done) |
+ host1x_syncpt_id(chan->mw_ack_sp);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+
+ /* enable single shot capture */
+ vi_csi_write(chan, TEGRA_VI_CSI_SINGLE_SHOT, SINGLE_SHOT_CAPTURE);
+
+ /* wait for syncpt counter to reach frame start event threshold */
+ err = host1x_syncpt_wait(chan->frame_start_sp, thresh,
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
+ if (err) {
+ dev_err_ratelimited(&chan->video.dev,
+ "frame start syncpt timeout: %d\n", err);
+ /* increment syncpoint counter for timedout events */
+ host1x_syncpt_incr(chan->frame_start_sp);
+ spin_lock(&chan->sp_incr_lock);
+ host1x_syncpt_incr(chan->mw_ack_sp);
+ spin_unlock(&chan->sp_incr_lock);
+ /* clear errors and recover */
+ tegra_channel_capture_error_recover(chan);
+ release_buffer(chan, buf, VB2_BUF_STATE_ERROR);
+ return err;
+ }
+
+ /* move buffer to capture done queue */
+ spin_lock(&chan->done_lock);
+ list_add_tail(&buf->queue, &chan->done);
+ spin_unlock(&chan->done_lock);
+
+ /* wait up kthread for capture done */
+ wake_up_interruptible(&chan->done_wait);
+
+ return 0;
+}
+
+static void tegra_channel_capture_done(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+ u32 value;
+ int ret;
+
+ /* wait for syncpt counter to reach MW_ACK_DONE event threshold */
+ ret = host1x_syncpt_wait(chan->mw_ack_sp, buf->mw_ack_sp_thresh,
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
+ if (ret) {
+ dev_err_ratelimited(&chan->video.dev,
+ "MW_ACK_DONE syncpt timeout: %d\n", ret);
+ state = VB2_BUF_STATE_ERROR;
+ /* increment syncpoint counter for timedout event */
+ spin_lock(&chan->sp_incr_lock);
+ host1x_syncpt_incr(chan->mw_ack_sp);
+ spin_unlock(&chan->sp_incr_lock);
+ }
+
+ release_buffer(chan, buf, state);
+}
+
+static int chan_capture_kthread_start(void *data)
+{
+ struct tegra_vi_channel *chan = data;
+ struct tegra_channel_buffer *buf;
+ int err = 0;
+
+ while (1) {
+ /*
+ * Source is not streaming if error is non-zero.
+ * So, do not dequeue buffers on error and let the thread sleep
+ * till kthread stop signal is received.
+ */
+ wait_event_interruptible(chan->start_wait,
+ kthread_should_stop() ||
+ (!list_empty(&chan->capture) &&
+ !err));
+
+ if (kthread_should_stop())
+ break;
+
+ /* dequeue the buffer and start capture */
+ spin_lock(&chan->start_lock);
+ if (list_empty(&chan->capture)) {
+ spin_unlock(&chan->start_lock);
+ continue;
+ }
+
+ buf = list_first_entry(&chan->capture,
+ struct tegra_channel_buffer, queue);
+ list_del_init(&buf->queue);
+ spin_unlock(&chan->start_lock);
+
+ err = tegra_channel_capture_frame(chan, buf);
+ if (err)
+ vb2_queue_error(&chan->queue);
+ }
+
+ return 0;
+}
+
+static int chan_capture_kthread_finish(void *data)
+{
+ struct tegra_vi_channel *chan = data;
+ struct tegra_channel_buffer *buf;
+
+ while (1) {
+ wait_event_interruptible(chan->done_wait,
+ !list_empty(&chan->done) ||
+ kthread_should_stop());
+
+ /* dequeue buffers and finish capture */
+ buf = dequeue_buf_done(chan);
+ while (buf) {
+ tegra_channel_capture_done(chan, buf);
+ buf = dequeue_buf_done(chan);
+ }
+
+ if (kthread_should_stop())
+ break;
+ }
+
+ return 0;
+}
+
+static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+ struct media_pipeline *pipe = &chan->video.pipe;
+ u32 val;
+ int ret;
+
+ tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
+
+ /* clear errors */
+ val = vi_csi_read(chan, TEGRA_VI_CSI_ERROR_STATUS);
+ vi_csi_write(chan, TEGRA_VI_CSI_ERROR_STATUS, val);
+
+ val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
+
+ /*
+ * Sync point FIFO full stalls the host interface.
+ * Setting NO_STALL will drop INCR_SYNCPT methods when fifos are
+ * full and the corresponding condition bits in INCR_SYNCPT_ERROR
+ * register will be set.
+ * This allows SW to process error recovery.
+ */
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL,
+ VI_INCR_SYNCPT_NO_STALL);
+
+ /* start the pipeline */
+ ret = media_pipeline_start(&chan->video.entity, pipe);
+ if (ret < 0)
+ goto error_pipeline_start;
+
+ tegra_channel_capture_setup(chan);
+ ret = tegra_channel_set_stream(chan, true);
+ if (ret < 0)
+ goto error_set_stream;
+
+ chan->sequence = 0;
+
+ /* start kthreads to capture data to buffer and return them */
+ chan->kthread_start_capture = kthread_run(chan_capture_kthread_start,
+ chan, "%s:0",
+ chan->video.name);
+ if (IS_ERR(chan->kthread_start_capture)) {
+ ret = PTR_ERR(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ dev_err(&chan->video.dev,
+ "failed to run capture start kthread: %d\n", ret);
+ goto error_kthread_start;
+ }
+
+ chan->kthread_finish_capture = kthread_run(chan_capture_kthread_finish,
+ chan, "%s:1",
+ chan->video.name);
+ if (IS_ERR(chan->kthread_finish_capture)) {
+ ret = PTR_ERR(chan->kthread_finish_capture);
+ chan->kthread_finish_capture = NULL;
+ dev_err(&chan->video.dev,
+ "failed to run capture finish kthread: %d\n", ret);
+ goto error_kthread_done;
+ }
+
+ return 0;
+
+error_kthread_done:
+ kthread_stop(chan->kthread_start_capture);
+error_kthread_start:
+ tegra_channel_set_stream(chan, false);
+error_set_stream:
+ media_pipeline_stop(&chan->video.entity);
+error_pipeline_start:
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void tegra210_vi_stop_streaming(struct vb2_queue *vq)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ if (chan->kthread_start_capture) {
+ kthread_stop(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ }
+
+ if (chan->kthread_finish_capture) {
+ kthread_stop(chan->kthread_finish_capture);
+ chan->kthread_finish_capture = NULL;
+ }
+
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR);
+ tegra_channel_set_stream(chan, false);
+ media_pipeline_stop(&chan->video.entity);
+}
+
+/*
+ * Tegra210 VI Pixel memory format enum.
+ * These format enum value gets programmed into corresponding Tegra VI
+ * channel register bits.
+ */
+enum tegra210_image_format {
+ TEGRA210_IMAGE_FORMAT_T_L8 = 16,
+
+ TEGRA210_IMAGE_FORMAT_T_R16_I = 32,
+ TEGRA210_IMAGE_FORMAT_T_B5G6R5,
+ TEGRA210_IMAGE_FORMAT_T_R5G6B5,
+ TEGRA210_IMAGE_FORMAT_T_A1B5G5R5,
+ TEGRA210_IMAGE_FORMAT_T_A1R5G5B5,
+ TEGRA210_IMAGE_FORMAT_T_B5G5R5A1,
+ TEGRA210_IMAGE_FORMAT_T_R5G5B5A1,
+ TEGRA210_IMAGE_FORMAT_T_A4B4G4R4,
+ TEGRA210_IMAGE_FORMAT_T_A4R4G4B4,
+ TEGRA210_IMAGE_FORMAT_T_B4G4R4A4,
+ TEGRA210_IMAGE_FORMAT_T_R4G4B4A4,
+
+ TEGRA210_IMAGE_FORMAT_T_A8B8G8R8 = 64,
+ TEGRA210_IMAGE_FORMAT_T_A8R8G8B8,
+ TEGRA210_IMAGE_FORMAT_T_B8G8R8A8,
+ TEGRA210_IMAGE_FORMAT_T_R8G8B8A8,
+ TEGRA210_IMAGE_FORMAT_T_A2B10G10R10,
+ TEGRA210_IMAGE_FORMAT_T_A2R10G10B10,
+ TEGRA210_IMAGE_FORMAT_T_B10G10R10A2,
+ TEGRA210_IMAGE_FORMAT_T_R10G10B10A2,
+
+ TEGRA210_IMAGE_FORMAT_T_A8Y8U8V8 = 193,
+ TEGRA210_IMAGE_FORMAT_T_V8U8Y8A8,
+
+ TEGRA210_IMAGE_FORMAT_T_A2Y10U10V10 = 197,
+ TEGRA210_IMAGE_FORMAT_T_V10U10Y10A2,
+ TEGRA210_IMAGE_FORMAT_T_Y8_U8__Y8_V8,
+ TEGRA210_IMAGE_FORMAT_T_Y8_V8__Y8_U8,
+ TEGRA210_IMAGE_FORMAT_T_U8_Y8__V8_Y8,
+ TEGRA210_IMAGE_FORMAT_T_V8_Y8__U8_Y8,
+
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N444 = 224,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N444,
+ TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N444,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N422,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N422,
+ TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N422,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N420,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N420,
+ TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N420,
+ TEGRA210_IMAGE_FORMAT_T_X2LC10LB10LA10,
+ TEGRA210_IMAGE_FORMAT_T_A2R6R6R6R6R6,
+};
+
+#define TEGRA210_VIDEO_FMT(DATA_TYPE, BIT_WIDTH, MBUS_CODE, BPP, \
+ FORMAT, FOURCC) \
+{ \
+ TEGRA_IMAGE_DT_##DATA_TYPE, \
+ BIT_WIDTH, \
+ MEDIA_BUS_FMT_##MBUS_CODE, \
+ BPP, \
+ TEGRA210_IMAGE_FORMAT_##FORMAT, \
+ V4L2_PIX_FMT_##FOURCC, \
+}
+
+/* Tegra210 supported video formats */
+static const struct tegra_video_format tegra210_video_formats[] = {
+ /* RAW 8 */
+ TEGRA210_VIDEO_FMT(RAW8, 8, SRGGB8_1X8, 1, T_L8, SRGGB8),
+ TEGRA210_VIDEO_FMT(RAW8, 8, SGRBG8_1X8, 1, T_L8, SGRBG8),
+ TEGRA210_VIDEO_FMT(RAW8, 8, SGBRG8_1X8, 1, T_L8, SGBRG8),
+ TEGRA210_VIDEO_FMT(RAW8, 8, SBGGR8_1X8, 1, T_L8, SBGGR8),
+ /* RAW 10 */
+ TEGRA210_VIDEO_FMT(RAW10, 10, SRGGB10_1X10, 2, T_R16_I, SRGGB10),
+ TEGRA210_VIDEO_FMT(RAW10, 10, SGRBG10_1X10, 2, T_R16_I, SGRBG10),
+ TEGRA210_VIDEO_FMT(RAW10, 10, SGBRG10_1X10, 2, T_R16_I, SGBRG10),
+ TEGRA210_VIDEO_FMT(RAW10, 10, SBGGR10_1X10, 2, T_R16_I, SBGGR10),
+ /* RAW 12 */
+ TEGRA210_VIDEO_FMT(RAW12, 12, SRGGB12_1X12, 2, T_R16_I, SRGGB12),
+ TEGRA210_VIDEO_FMT(RAW12, 12, SGRBG12_1X12, 2, T_R16_I, SGRBG12),
+ TEGRA210_VIDEO_FMT(RAW12, 12, SGBRG12_1X12, 2, T_R16_I, SGBRG12),
+ TEGRA210_VIDEO_FMT(RAW12, 12, SBGGR12_1X12, 2, T_R16_I, SBGGR12),
+ /* RGB888 */
+ TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X24, 4, T_A8R8G8B8, RGB24),
+ TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X32_PADHI, 4, T_A8B8G8R8,
+ XBGR32),
+ /* YUV422 */
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 2, T_U8_Y8__V8_Y8, UYVY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_1X16, 2, T_V8_Y8__U8_Y8, VYUY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_1X16, 2, T_Y8_U8__Y8_V8, YUYV),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_1X16, 2, T_Y8_V8__Y8_U8, YVYU),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 1, T_Y8__V8U8_N422, NV16),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_2X8, 2, T_U8_Y8__V8_Y8, UYVY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_2X8, 2, T_V8_Y8__U8_Y8, VYUY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_2X8, 2, T_Y8_U8__Y8_V8, YUYV),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_2X8, 2, T_Y8_V8__Y8_U8, YVYU),
+};
+
+/* Tegra210 VI operations */
+static const struct tegra_vi_ops tegra210_vi_ops = {
+ .vi_start_streaming = tegra210_vi_start_streaming,
+ .vi_stop_streaming = tegra210_vi_stop_streaming,
+};
+
+/* Tegra210 VI SoC data */
+const struct tegra_vi_soc tegra210_vi_soc = {
+ .video_formats = tegra210_video_formats,
+ .nformats = ARRAY_SIZE(tegra210_video_formats),
+ .ops = &tegra210_vi_ops,
+ .hw_revision = 3,
+ .vi_max_channels = 6,
+ .vi_max_clk_hz = 499200000,
+};
+
+/* Tegra210 CSI PHY registers accessors */
+static void csi_write(struct tegra_csi *csi, u8 portno, unsigned int addr,
+ u32 val)
+{
+ void __iomem *csi_pp_base;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+
+ writel_relaxed(val, csi_pp_base + addr);
+}
+
+/* Tegra210 CSI Pixel parser registers accessors */
+static void pp_write(struct tegra_csi *csi, u8 portno, u32 addr, u32 val)
+{
+ void __iomem *csi_pp_base;
+ unsigned int offset;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ writel_relaxed(val, csi_pp_base + offset + addr);
+}
+
+static u32 pp_read(struct tegra_csi *csi, u8 portno, u32 addr)
+{
+ void __iomem *csi_pp_base;
+ unsigned int offset;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ return readl_relaxed(csi_pp_base + offset + addr);
+}
+
+/* Tegra210 CSI CIL A/B port registers accessors */
+static void cil_write(struct tegra_csi *csi, u8 portno, u32 addr, u32 val)
+{
+ void __iomem *csi_cil_base;
+ unsigned int offset;
+
+ csi_cil_base = csi->iomem + CSI_PP_OFFSET(portno >> 1) +
+ TEGRA210_CSI_CIL_OFFSET;
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ writel_relaxed(val, csi_cil_base + offset + addr);
+}
+
+static u32 cil_read(struct tegra_csi *csi, u8 portno, u32 addr)
+{
+ void __iomem *csi_cil_base;
+ unsigned int offset;
+
+ csi_cil_base = csi->iomem + CSI_PP_OFFSET(portno >> 1) +
+ TEGRA210_CSI_CIL_OFFSET;
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ return readl_relaxed(csi_cil_base + offset + addr);
+}
+
+/* Tegra210 CSI Test pattern generator registers accessor */
+static void tpg_write(struct tegra_csi *csi, u8 portno, unsigned int addr,
+ u32 val)
+{
+ void __iomem *csi_pp_base;
+ unsigned int offset;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET +
+ TEGRA210_CSI_TPG_OFFSET;
+
+ writel_relaxed(val, csi_pp_base + offset + addr);
+}
+
+/*
+ * Tegra210 CSI operations
+ */
+static void tegra210_csi_error_recover(struct tegra_csi_channel *csi_chan)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ unsigned int portno = csi_chan->csi_port_num;
+ u32 val;
+
+ /*
+ * Recover CSI hardware in case of capture errors by issuing
+ * software reset to CSICIL sensor, pixel parser, and clear errors
+ * to have clean capture on next streaming.
+ */
+ val = pp_read(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_PIXEL_PARSER_STATUS 0x%08x\n", val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CIL_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CIL_STATUS 0x%08x\n", val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CILX_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CILX_STATUS 0x%08x\n", val);
+
+ if (csi_chan->numlanes == 4) {
+ /* reset CSI CIL sensor */
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
+ /*
+ * SW_STATUS_RESET resets all status bits of PPA, PPB, CILA,
+ * CILB status registers and debug counters.
+ * So, SW_STATUS_RESET can be used only when CSI brick is in
+ * x4 mode.
+ */
+ csi_write(csi, portno, TEGRA_CSI_CSI_SW_STATUS_RESET, 0x1);
+
+ /* sleep for 20 clock cycles to drain the FIFO */
+ usleep_range(10, 20);
+
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
+ csi_write(csi, portno, TEGRA_CSI_CSI_SW_STATUS_RESET, 0x0);
+ } else {
+ /* reset CSICIL sensor */
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
+ usleep_range(10, 20);
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
+
+ /* clear the errors */
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS,
+ 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, 0xffffffff);
+ }
+}
+
+static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ unsigned int portno = csi_chan->csi_port_num;
+ u32 val;
+
+ csi_write(csi, portno, TEGRA_CSI_CLKEN_OVERRIDE, 0);
+
+ /* clean up status */
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
+
+ /* CIL PHY registers setup */
+ cil_write(csi, portno, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
+ cil_write(csi, portno, TEGRA_CSI_CIL_PHY_CONTROL, 0xa);
+
+ /*
+ * The CSI unit provides for connection of up to six cameras in
+ * the system and is organized as three identical instances of
+ * two MIPI support blocks, each with a separate 4-lane
+ * interface that can be configured as a single camera with 4
+ * lanes or as a dual camera with 2 lanes available for each
+ * camera.
+ */
+ if (csi_chan->numlanes == 4) {
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_STATUS, 0xffffffff);
+ cil_write(csi, portno + 1, TEGRA_CSI_CILX_STATUS, 0xffffffff);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
+
+ cil_write(csi, portno, TEGRA_CSI_CIL_PAD_CONFIG0,
+ BRICK_CLOCK_A_4X);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_PHY_CONTROL, 0xa);
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND,
+ CSI_A_PHY_CIL_ENABLE | CSI_B_PHY_CIL_ENABLE);
+ } else {
+ val = ((portno & 1) == PORT_A) ?
+ CSI_A_PHY_CIL_ENABLE | CSI_B_PHY_CIL_NOP :
+ CSI_B_PHY_CIL_ENABLE | CSI_A_PHY_CIL_NOP;
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND, val);
+ }
+
+ /* CSI pixel parser registers setup */
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
+ (0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
+ CSI_PP_SINGLE_SHOT_ENABLE | CSI_PP_RST);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK, 0x0);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_CONTROL0,
+ CSI_PP_PACKET_HEADER_SENT |
+ CSI_PP_DATA_IDENTIFIER_ENABLE |
+ CSI_PP_WORD_COUNT_SELECT_HEADER |
+ CSI_PP_CRC_CHECK_ENABLE | CSI_PP_WC_CHECK |
+ CSI_PP_OUTPUT_FORMAT_STORE | CSI_PPA_PAD_LINE_NOPAD |
+ CSI_PP_HEADER_EC_DISABLE | CSI_PPA_PAD_FRAME_NOPAD |
+ (portno & 1));
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_CONTROL1,
+ (0x1 << CSI_PP_TOP_FIELD_FRAME_OFFSET) |
+ (0x1 << CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET));
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_GAP,
+ 0x14 << PP_FRAME_MIN_GAP_OFFSET);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME, 0x0);
+ pp_write(csi, portno, TEGRA_CSI_INPUT_STREAM_CONTROL,
+ (0x3f << CSI_SKIP_PACKET_THRESHOLD_OFFSET) |
+ (csi_chan->numlanes - 1));
+
+ /* TPG setup */
+ if (csi_chan->pg_mode) {
+ tpg_write(csi, portno, TEGRA_CSI_PATTERN_GENERATOR_CTRL,
+ ((csi_chan->pg_mode - 1) << PG_MODE_OFFSET) |
+ PG_ENABLE);
+ tpg_write(csi, portno, TEGRA_CSI_PG_BLANK,
+ csi_chan->v_blank << PG_VBLANK_OFFSET |
+ csi_chan->h_blank);
+ tpg_write(csi, portno, TEGRA_CSI_PG_PHASE, 0x0);
+ tpg_write(csi, portno, TEGRA_CSI_PG_RED_FREQ,
+ (0x10 << PG_RED_VERT_INIT_FREQ_OFFSET) |
+ (0x10 << PG_RED_HOR_INIT_FREQ_OFFSET));
+ tpg_write(csi, portno, TEGRA_CSI_PG_RED_FREQ_RATE, 0x0);
+ tpg_write(csi, portno, TEGRA_CSI_PG_GREEN_FREQ,
+ (0x10 << PG_GREEN_VERT_INIT_FREQ_OFFSET) |
+ (0x10 << PG_GREEN_HOR_INIT_FREQ_OFFSET));
+ tpg_write(csi, portno, TEGRA_CSI_PG_GREEN_FREQ_RATE, 0x0);
+ tpg_write(csi, portno, TEGRA_CSI_PG_BLUE_FREQ,
+ (0x10 << PG_BLUE_VERT_INIT_FREQ_OFFSET) |
+ (0x10 << PG_BLUE_HOR_INIT_FREQ_OFFSET));
+ tpg_write(csi, portno, TEGRA_CSI_PG_BLUE_FREQ_RATE, 0x0);
+ }
+
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
+ (0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
+ CSI_PP_SINGLE_SHOT_ENABLE | CSI_PP_ENABLE);
+
+ return 0;
+}
+
+static void tegra210_csi_stop_streaming(struct tegra_csi_channel *csi_chan)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ unsigned int portno = csi_chan->csi_port_num;
+ u32 val;
+
+ val = pp_read(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS);
+
+ dev_dbg(csi->dev, "TEGRA_CSI_PIXEL_PARSER_STATUS 0x%08x\n", val);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS, val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CIL_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CIL_STATUS 0x%08x\n", val);
+ cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CILX_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CILX_STATUS 0x%08x\n", val);
+ cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, val);
+
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
+ (0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
+ CSI_PP_DISABLE);
+
+ if (csi_chan->pg_mode) {
+ tpg_write(csi, portno, TEGRA_CSI_PATTERN_GENERATOR_CTRL,
+ PG_DISABLE);
+ return;
+ }
+
+ if (csi_chan->numlanes == 4) {
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND,
+ CSI_A_PHY_CIL_DISABLE |
+ CSI_B_PHY_CIL_DISABLE);
+ } else {
+ val = ((portno & 1) == PORT_A) ?
+ CSI_A_PHY_CIL_DISABLE | CSI_B_PHY_CIL_NOP :
+ CSI_B_PHY_CIL_DISABLE | CSI_A_PHY_CIL_NOP;
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND, val);
+ }
+}
+
+/*
+ * Tegra210 CSI TPG frame rate table with horizontal and vertical
+ * blanking intervals for corresponding format and resolution.
+ * Blanking intervals are tuned values from design team for max TPG
+ * clock rate.
+ */
+static const struct tpg_framerate tegra210_tpg_frmrate_table[] = {
+ {
+ .frmsize = { 1280, 720 },
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .framerate = 120,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 1920, 1080 },
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .framerate = 60,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 3840, 2160 },
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .framerate = 20,
+ .h_blank = 8,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 1280, 720 },
+ .code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ .framerate = 60,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 1920, 1080 },
+ .code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ .framerate = 30,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 3840, 2160 },
+ .code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ .framerate = 8,
+ .h_blank = 8,
+ .v_blank = 8,
+ },
+};
+
+static const char * const tegra210_csi_cil_clks[] = {
+ "csi",
+ "cilab",
+ "cilcd",
+ "cile",
+ "csi_tpg",
+};
+
+/* Tegra210 CSI operations */
+static const struct tegra_csi_ops tegra210_csi_ops = {
+ .csi_start_streaming = tegra210_csi_start_streaming,
+ .csi_stop_streaming = tegra210_csi_stop_streaming,
+ .csi_err_recover = tegra210_csi_error_recover,
+};
+
+/* Tegra210 CSI SoC data */
+const struct tegra_csi_soc tegra210_csi_soc = {
+ .ops = &tegra210_csi_ops,
+ .csi_max_channels = 6,
+ .clk_names = tegra210_csi_cil_clks,
+ .num_clks = ARRAY_SIZE(tegra210_csi_cil_clks),
+ .tpg_frmrate_table = tegra210_tpg_frmrate_table,
+ .tpg_frmrate_table_size = ARRAY_SIZE(tegra210_tpg_frmrate_table),
+};
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
new file mode 100644
index 000000000000..1b5e660155f5
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -0,0 +1,1074 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/lcm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <soc/tegra/pmc.h>
+
+#include "vi.h"
+#include "video.h"
+
+#define SURFACE_ALIGN_BYTES 64
+#define MAX_CID_CONTROLS 1
+
+static const struct tegra_video_format tegra_default_format = {
+ .img_dt = TEGRA_IMAGE_DT_RAW10,
+ .bit_width = 10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .bpp = 2,
+ .img_fmt = TEGRA_IMAGE_FORMAT_DEF,
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+};
+
+static inline struct tegra_vi *
+host1x_client_to_vi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_vi, client);
+}
+
+static inline struct tegra_channel_buffer *
+to_tegra_channel_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct tegra_channel_buffer, buf);
+}
+
+static int tegra_get_format_idx_by_code(struct tegra_vi *vi,
+ unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < vi->soc->nformats; ++i) {
+ if (vi->soc->video_formats[i].code == code)
+ return i;
+ }
+
+ return -1;
+}
+
+static u32 tegra_get_format_fourcc_by_idx(struct tegra_vi *vi,
+ unsigned int index)
+{
+ if (index >= vi->soc->nformats)
+ return -EINVAL;
+
+ return vi->soc->video_formats[index].fourcc;
+}
+
+static const struct tegra_video_format *
+tegra_get_format_by_fourcc(struct tegra_vi *vi, u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vi->soc->nformats; ++i) {
+ if (vi->soc->video_formats[i].fourcc == fourcc)
+ return &vi->soc->video_formats[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * videobuf2 queue operations
+ */
+static int tegra_channel_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ if (*nplanes)
+ return sizes[0] < chan->format.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = chan->format.sizeimage;
+ alloc_devs[0] = chan->vi->dev;
+
+ return 0;
+}
+
+static int tegra_channel_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf);
+ unsigned long size = chan->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(chan->video.v4l2_dev,
+ "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ buf->chan = chan;
+ buf->addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ return 0;
+}
+
+static void tegra_channel_buffer_queue(struct vb2_buffer *vb)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf);
+
+ /* put buffer into the capture queue */
+ spin_lock(&chan->start_lock);
+ list_add_tail(&buf->queue, &chan->capture);
+ spin_unlock(&chan->start_lock);
+
+ /* wait up kthread for capture */
+ wake_up_interruptible(&chan->start_wait);
+}
+
+struct v4l2_subdev *
+tegra_channel_get_remote_subdev(struct tegra_vi_channel *chan)
+{
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ struct media_entity *entity;
+
+ pad = media_entity_remote_pad(&chan->pad);
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ return subdev;
+}
+
+int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ /* stream CSI */
+ subdev = tegra_channel_get_remote_subdev(chan);
+ ret = v4l2_subdev_call(subdev, video, s_stream, on);
+ if (on && ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ return 0;
+}
+
+void tegra_channel_release_buffers(struct tegra_vi_channel *chan,
+ enum vb2_buffer_state state)
+{
+ struct tegra_channel_buffer *buf, *nbuf;
+
+ spin_lock(&chan->start_lock);
+ list_for_each_entry_safe(buf, nbuf, &chan->capture, queue) {
+ vb2_buffer_done(&buf->buf.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+ spin_unlock(&chan->start_lock);
+
+ spin_lock(&chan->done_lock);
+ list_for_each_entry_safe(buf, nbuf, &chan->done, queue) {
+ vb2_buffer_done(&buf->buf.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+ spin_unlock(&chan->done_lock);
+}
+
+static int tegra_channel_start_streaming(struct vb2_queue *vq, u32 count)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+ int ret;
+
+ ret = pm_runtime_get_sync(chan->vi->dev);
+ if (ret < 0) {
+ dev_err(chan->vi->dev, "failed to get runtime PM: %d\n", ret);
+ pm_runtime_put_noidle(chan->vi->dev);
+ return ret;
+ }
+
+ ret = chan->vi->ops->vi_start_streaming(vq, count);
+ if (ret < 0)
+ pm_runtime_put(chan->vi->dev);
+
+ return ret;
+}
+
+static void tegra_channel_stop_streaming(struct vb2_queue *vq)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ chan->vi->ops->vi_stop_streaming(vq);
+ pm_runtime_put(chan->vi->dev);
+}
+
+static const struct vb2_ops tegra_channel_queue_qops = {
+ .queue_setup = tegra_channel_queue_setup,
+ .buf_prepare = tegra_channel_buffer_prepare,
+ .buf_queue = tegra_channel_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = tegra_channel_start_streaming,
+ .stop_streaming = tegra_channel_stop_streaming,
+};
+
+/*
+ * V4L2 ioctl operations
+ */
+static int tegra_channel_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ strscpy(cap->driver, "tegra-video", sizeof(cap->driver));
+ strscpy(cap->card, chan->video.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(chan->vi->dev));
+
+ return 0;
+}
+
+static int tegra_channel_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ return v4l2_g_parm_cap(&chan->video, subdev, a);
+}
+
+static int tegra_channel_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ return v4l2_s_parm_cap(&chan->video, subdev, a);
+}
+
+static int tegra_channel_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *sizes)
+{
+ int ret;
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = sizes->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, sizes->pixel_format);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ fse.code = fmtinfo->code;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ sizes->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ sizes->discrete.width = fse.max_width;
+ sizes->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int tegra_channel_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *ivals)
+{
+ int ret;
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = ivals->index,
+ .width = ivals->width,
+ .height = ivals->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, ivals->pixel_format);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ fie.code = fmtinfo->code;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ ivals->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ ivals->discrete.numerator = fie.interval.numerator;
+ ivals->discrete.denominator = fie.interval.denominator;
+
+ return 0;
+}
+
+static int tegra_channel_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ unsigned int index = 0, i;
+ unsigned long *fmts_bitmap = chan->tpg_fmts_bitmap;
+
+ if (f->index >= bitmap_weight(fmts_bitmap, MAX_FORMAT_NUM))
+ return -EINVAL;
+
+ for (i = 0; i < f->index + 1; i++, index++)
+ index = find_next_bit(fmts_bitmap, MAX_FORMAT_NUM, index);
+
+ f->pixelformat = tegra_get_format_fourcc_by_idx(chan->vi, index - 1);
+
+ return 0;
+}
+
+static int tegra_channel_get_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ format->fmt.pix = chan->format;
+
+ return 0;
+}
+
+static void tegra_channel_fmt_align(struct tegra_vi_channel *chan,
+ struct v4l2_pix_format *pix,
+ unsigned int bpp)
+{
+ unsigned int align;
+ unsigned int min_width;
+ unsigned int max_width;
+ unsigned int width;
+ unsigned int min_bpl;
+ unsigned int max_bpl;
+ unsigned int bpl;
+
+ /*
+ * The transfer alignment requirements are expressed in bytes. Compute
+ * minimum and maximum values, clamp the requested width and convert
+ * it back to pixels. Use bytesperline to adjust the width.
+ */
+ align = lcm(SURFACE_ALIGN_BYTES, bpp);
+ min_width = roundup(TEGRA_MIN_WIDTH, align);
+ max_width = rounddown(TEGRA_MAX_WIDTH, align);
+ width = roundup(pix->width * bpp, align);
+
+ pix->width = clamp(width, min_width, max_width) / bpp;
+ pix->height = clamp(pix->height, TEGRA_MIN_HEIGHT, TEGRA_MAX_HEIGHT);
+
+ /* Clamp the requested bytes per line value. If the maximum bytes per
+ * line value is zero, the module doesn't support user configurable
+ * line sizes. Override the requested value with the minimum in that
+ * case.
+ */
+ min_bpl = pix->width * bpp;
+ max_bpl = rounddown(TEGRA_MAX_WIDTH, SURFACE_ALIGN_BYTES);
+ bpl = roundup(pix->bytesperline, SURFACE_ALIGN_BYTES);
+
+ pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
+ pix->sizeimage = pix->bytesperline * pix->height;
+}
+
+static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ struct v4l2_pix_format *pix)
+{
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev_pad_config *pad_cfg;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ pad_cfg = v4l2_subdev_alloc_pad_config(subdev);
+ if (!pad_cfg)
+ return -ENOMEM;
+ /*
+ * Retrieve the format information and if requested format isn't
+ * supported, keep the current format.
+ */
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, pix->pixelformat);
+ if (!fmtinfo) {
+ pix->pixelformat = chan->format.pixelformat;
+ pix->colorspace = chan->format.colorspace;
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi,
+ pix->pixelformat);
+ }
+
+ pix->field = V4L2_FIELD_NONE;
+ fmt.which = V4L2_SUBDEV_FORMAT_TRY;
+ fmt.pad = 0;
+ v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
+ v4l2_subdev_call(subdev, pad, set_fmt, pad_cfg, &fmt);
+ v4l2_fill_pix_format(pix, &fmt.format);
+ tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
+
+ v4l2_subdev_free_pad_config(pad_cfg);
+
+ return 0;
+}
+
+static int tegra_channel_try_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ return __tegra_channel_try_format(chan, &format->fmt.pix);
+}
+
+static int tegra_channel_set_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ struct v4l2_pix_format *pix = &format->fmt.pix;
+ int ret;
+
+ if (vb2_is_busy(&chan->queue))
+ return -EBUSY;
+
+ /* get supported format by try_fmt */
+ ret = __tegra_channel_try_format(chan, pix);
+ if (ret)
+ return ret;
+
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, pix->pixelformat);
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = 0;
+ v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
+ subdev = tegra_channel_get_remote_subdev(chan);
+ v4l2_subdev_call(subdev, pad, set_fmt, NULL, &fmt);
+ v4l2_fill_pix_format(pix, &fmt.format);
+ tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
+
+ chan->format = *pix;
+ chan->fmtinfo = fmtinfo;
+
+ return 0;
+}
+
+static int tegra_channel_enum_input(struct file *file, void *fh,
+ struct v4l2_input *inp)
+{
+ /* currently driver supports internal TPG only */
+ if (inp->index)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ strscpy(inp->name, "Tegra TPG", sizeof(inp->name));
+
+ return 0;
+}
+
+static int tegra_channel_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int tegra_channel_s_input(struct file *file, void *priv,
+ unsigned int input)
+{
+ if (input > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops tegra_channel_ioctl_ops = {
+ .vidioc_querycap = tegra_channel_querycap,
+ .vidioc_g_parm = tegra_channel_g_parm,
+ .vidioc_s_parm = tegra_channel_s_parm,
+ .vidioc_enum_framesizes = tegra_channel_enum_framesizes,
+ .vidioc_enum_frameintervals = tegra_channel_enum_frameintervals,
+ .vidioc_enum_fmt_vid_cap = tegra_channel_enum_format,
+ .vidioc_g_fmt_vid_cap = tegra_channel_get_format,
+ .vidioc_s_fmt_vid_cap = tegra_channel_set_format,
+ .vidioc_try_fmt_vid_cap = tegra_channel_try_format,
+ .vidioc_enum_input = tegra_channel_enum_input,
+ .vidioc_g_input = tegra_channel_g_input,
+ .vidioc_s_input = tegra_channel_s_input,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * V4L2 file operations
+ */
+static const struct v4l2_file_operations tegra_channel_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * V4L2 control operations
+ */
+static int vi_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct tegra_vi_channel *chan = container_of(ctrl->handler,
+ struct tegra_vi_channel,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ /* pattern change takes effect on next stream */
+ chan->pg_mode = ctrl->val + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vi_ctrl_ops = {
+ .s_ctrl = vi_s_ctrl,
+};
+
+static const char *const vi_pattern_strings[] = {
+ "Black/White Direct Mode",
+ "Color Patch Mode",
+};
+
+static int tegra_channel_setup_ctrl_handler(struct tegra_vi_channel *chan)
+{
+ int ret;
+
+ /* add test pattern control handler to v4l2 device */
+ v4l2_ctrl_new_std_menu_items(&chan->ctrl_handler, &vi_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(vi_pattern_strings) - 1,
+ 0, 0, vi_pattern_strings);
+ if (chan->ctrl_handler.error) {
+ dev_err(chan->vi->dev, "failed to add TPG ctrl handler: %d\n",
+ chan->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ return chan->ctrl_handler.error;
+ }
+
+ /* setup the controls */
+ ret = v4l2_ctrl_handler_setup(&chan->ctrl_handler);
+ if (ret < 0) {
+ dev_err(chan->vi->dev,
+ "failed to setup v4l2 ctrl handler: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* VI only support 2 formats in TPG mode */
+static void vi_tpg_fmts_bitmap_init(struct tegra_vi_channel *chan)
+{
+ int index;
+
+ bitmap_zero(chan->tpg_fmts_bitmap, MAX_FORMAT_NUM);
+
+ index = tegra_get_format_idx_by_code(chan->vi,
+ MEDIA_BUS_FMT_SRGGB10_1X10);
+ bitmap_set(chan->tpg_fmts_bitmap, index, 1);
+
+ index = tegra_get_format_idx_by_code(chan->vi,
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI);
+ bitmap_set(chan->tpg_fmts_bitmap, index, 1);
+}
+
+static void tegra_channel_cleanup(struct tegra_vi_channel *chan)
+{
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ media_entity_cleanup(&chan->video.entity);
+ host1x_syncpt_free(chan->mw_ack_sp);
+ host1x_syncpt_free(chan->frame_start_sp);
+ mutex_destroy(&chan->video_lock);
+}
+
+void tegra_channels_cleanup(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan, *tmp;
+
+ if (!vi)
+ return;
+
+ list_for_each_entry_safe(chan, tmp, &vi->vi_chans, list) {
+ tegra_channel_cleanup(chan);
+ list_del(&chan->list);
+ kfree(chan);
+ }
+}
+
+static int tegra_channel_init(struct tegra_vi_channel *chan)
+{
+ struct tegra_vi *vi = chan->vi;
+ struct tegra_video_device *vid = dev_get_drvdata(vi->client.host);
+ unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
+ int ret;
+
+ mutex_init(&chan->video_lock);
+ INIT_LIST_HEAD(&chan->capture);
+ INIT_LIST_HEAD(&chan->done);
+ spin_lock_init(&chan->start_lock);
+ spin_lock_init(&chan->done_lock);
+ spin_lock_init(&chan->sp_incr_lock);
+ init_waitqueue_head(&chan->start_wait);
+ init_waitqueue_head(&chan->done_wait);
+
+ /* initialize the video format */
+ chan->fmtinfo = &tegra_default_format;
+ chan->format.pixelformat = chan->fmtinfo->fourcc;
+ chan->format.colorspace = V4L2_COLORSPACE_SRGB;
+ chan->format.field = V4L2_FIELD_NONE;
+ chan->format.width = TEGRA_DEF_WIDTH;
+ chan->format.height = TEGRA_DEF_HEIGHT;
+ chan->format.bytesperline = TEGRA_DEF_WIDTH * chan->fmtinfo->bpp;
+ chan->format.sizeimage = chan->format.bytesperline * TEGRA_DEF_HEIGHT;
+ tegra_channel_fmt_align(chan, &chan->format, chan->fmtinfo->bpp);
+
+ chan->frame_start_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!chan->frame_start_sp) {
+ dev_err(vi->dev, "failed to request frame start syncpoint\n");
+ return -ENOMEM;
+ }
+
+ chan->mw_ack_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!chan->mw_ack_sp) {
+ dev_err(vi->dev, "failed to request memory ack syncpoint\n");
+ ret = -ENOMEM;
+ goto free_fs_syncpt;
+ }
+
+ /* initialize the media entity */
+ chan->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&chan->video.entity, 1, &chan->pad);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to initialize media entity: %d\n", ret);
+ goto free_mw_ack_syncpt;
+ }
+
+ ret = v4l2_ctrl_handler_init(&chan->ctrl_handler, MAX_CID_CONTROLS);
+ if (chan->ctrl_handler.error) {
+ dev_err(vi->dev,
+ "failed to initialize v4l2 ctrl handler: %d\n", ret);
+ goto cleanup_media;
+ }
+
+ /* initialize the video_device */
+ chan->video.fops = &tegra_channel_fops;
+ chan->video.v4l2_dev = &vid->v4l2_dev;
+ chan->video.release = video_device_release_empty;
+ chan->video.queue = &chan->queue;
+ snprintf(chan->video.name, sizeof(chan->video.name), "%s-%s-%u",
+ dev_name(vi->dev), "output", chan->portno);
+ chan->video.vfl_type = VFL_TYPE_VIDEO;
+ chan->video.vfl_dir = VFL_DIR_RX;
+ chan->video.ioctl_ops = &tegra_channel_ioctl_ops;
+ chan->video.ctrl_handler = &chan->ctrl_handler;
+ chan->video.lock = &chan->video_lock;
+ chan->video.device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ video_set_drvdata(&chan->video, chan);
+
+ chan->queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ chan->queue.io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ chan->queue.lock = &chan->video_lock;
+ chan->queue.drv_priv = chan;
+ chan->queue.buf_struct_size = sizeof(struct tegra_channel_buffer);
+ chan->queue.ops = &tegra_channel_queue_qops;
+ chan->queue.mem_ops = &vb2_dma_contig_memops;
+ chan->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ chan->queue.min_buffers_needed = 2;
+ chan->queue.dev = vi->dev;
+ ret = vb2_queue_init(&chan->queue);
+ if (ret < 0) {
+ dev_err(vi->dev, "failed to initialize vb2 queue: %d\n", ret);
+ goto free_v4l2_ctrl_hdl;
+ }
+
+ return 0;
+
+free_v4l2_ctrl_hdl:
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+cleanup_media:
+ media_entity_cleanup(&chan->video.entity);
+free_mw_ack_syncpt:
+ host1x_syncpt_free(chan->mw_ack_sp);
+free_fs_syncpt:
+ host1x_syncpt_free(chan->frame_start_sp);
+ return ret;
+}
+
+static int tegra_vi_tpg_channels_alloc(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan;
+ unsigned int port_num;
+ unsigned int nchannels = vi->soc->vi_max_channels;
+
+ for (port_num = 0; port_num < nchannels; port_num++) {
+ /*
+ * Do not use devm_kzalloc as memory is freed immediately
+ * when device instance is unbound but application might still
+ * be holding the device node open. Channel memory allocated
+ * with kzalloc is freed during video device release callback.
+ */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->vi = vi;
+ chan->portno = port_num;
+ list_add_tail(&chan->list, &vi->vi_chans);
+ }
+
+ return 0;
+}
+
+static int tegra_vi_channels_init(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan;
+ int ret;
+
+ list_for_each_entry(chan, &vi->vi_chans, list) {
+ ret = tegra_channel_init(chan);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to initialize channel-%d: %d\n",
+ chan->portno, ret);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ list_for_each_entry_continue_reverse(chan, &vi->vi_chans, list)
+ tegra_channel_cleanup(chan);
+
+ return ret;
+}
+
+void tegra_v4l2_nodes_cleanup_tpg(struct tegra_video_device *vid)
+{
+ struct tegra_vi *vi = vid->vi;
+ struct tegra_csi *csi = vid->csi;
+ struct tegra_csi_channel *csi_chan;
+ struct tegra_vi_channel *chan;
+
+ list_for_each_entry(chan, &vi->vi_chans, list) {
+ video_unregister_device(&chan->video);
+ mutex_lock(&chan->video_lock);
+ vb2_queue_release(&chan->queue);
+ mutex_unlock(&chan->video_lock);
+ }
+
+ list_for_each_entry(csi_chan, &csi->csi_chans, list)
+ v4l2_device_unregister_subdev(&csi_chan->subdev);
+}
+
+int tegra_v4l2_nodes_setup_tpg(struct tegra_video_device *vid)
+{
+ struct tegra_vi *vi = vid->vi;
+ struct tegra_csi *csi = vid->csi;
+ struct tegra_vi_channel *vi_chan;
+ struct tegra_csi_channel *csi_chan;
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ int ret;
+
+ if (!vi || !csi)
+ return -ENODEV;
+
+ csi_chan = list_first_entry(&csi->csi_chans,
+ struct tegra_csi_channel, list);
+
+ list_for_each_entry(vi_chan, &vi->vi_chans, list) {
+ struct media_entity *source = &csi_chan->subdev.entity;
+ struct media_entity *sink = &vi_chan->video.entity;
+ struct media_pad *source_pad = csi_chan->pads;
+ struct media_pad *sink_pad = &vi_chan->pad;
+
+ ret = v4l2_device_register_subdev(&vid->v4l2_dev,
+ &csi_chan->subdev);
+ if (ret) {
+ dev_err(vi->dev,
+ "failed to register subdev: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = video_register_device(&vi_chan->video,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to register video device: %d\n", ret);
+ goto cleanup;
+ }
+
+ dev_dbg(vi->dev, "creating %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+
+ ret = media_create_pad_link(source, source_pad->index,
+ sink, sink_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to create %s:%u -> %s:%u link: %d\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index, ret);
+ goto cleanup;
+ }
+
+ ret = tegra_channel_setup_ctrl_handler(vi_chan);
+ if (ret < 0)
+ goto cleanup;
+
+ v4l2_set_subdev_hostdata(&csi_chan->subdev, vi_chan);
+ vi_tpg_fmts_bitmap_init(vi_chan);
+ csi_chan = list_next_entry(csi_chan, list);
+ }
+
+ return 0;
+
+cleanup:
+ tegra_v4l2_nodes_cleanup_tpg(vid);
+ return ret;
+}
+
+static int __maybe_unused vi_runtime_resume(struct device *dev)
+{
+ struct tegra_vi *vi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_enable(vi->vdd);
+ if (ret) {
+ dev_err(dev, "failed to enable VDD supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_set_rate(vi->clk, vi->soc->vi_max_clk_hz);
+ if (ret) {
+ dev_err(dev, "failed to set vi clock rate: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ ret = clk_prepare_enable(vi->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable vi clock: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ return 0;
+
+disable_vdd:
+ regulator_disable(vi->vdd);
+ return ret;
+}
+
+static int __maybe_unused vi_runtime_suspend(struct device *dev)
+{
+ struct tegra_vi *vi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vi->clk);
+
+ regulator_disable(vi->vdd);
+
+ return 0;
+}
+
+static int tegra_vi_init(struct host1x_client *client)
+{
+ struct tegra_video_device *vid = dev_get_drvdata(client->host);
+ struct tegra_vi *vi = host1x_client_to_vi(client);
+ struct tegra_vi_channel *chan, *tmp;
+ int ret;
+
+ vid->media_dev.hw_revision = vi->soc->hw_revision;
+ snprintf(vid->media_dev.bus_info, sizeof(vid->media_dev.bus_info),
+ "platform:%s", dev_name(vi->dev));
+
+ INIT_LIST_HEAD(&vi->vi_chans);
+
+ ret = tegra_vi_tpg_channels_alloc(vi);
+ if (ret < 0) {
+ dev_err(vi->dev, "failed to allocate tpg channels: %d\n", ret);
+ goto free_chans;
+ }
+
+ ret = tegra_vi_channels_init(vi);
+ if (ret < 0)
+ goto free_chans;
+
+ vid->vi = vi;
+
+ return 0;
+
+free_chans:
+ list_for_each_entry_safe(chan, tmp, &vi->vi_chans, list) {
+ list_del(&chan->list);
+ kfree(chan);
+ }
+
+ return ret;
+}
+
+static int tegra_vi_exit(struct host1x_client *client)
+{
+ /*
+ * Do not cleanup the channels here as application might still be
+ * holding video device nodes. Channels cleanup will happen during
+ * v4l2_device release callback which gets called after all video
+ * device nodes are released.
+ */
+
+ return 0;
+}
+
+static const struct host1x_client_ops vi_client_ops = {
+ .init = tegra_vi_init,
+ .exit = tegra_vi_exit,
+};
+
+static int tegra_vi_probe(struct platform_device *pdev)
+{
+ struct tegra_vi *vi;
+ int ret;
+
+ vi = devm_kzalloc(&pdev->dev, sizeof(*vi), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
+
+ vi->iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vi->iomem))
+ return PTR_ERR(vi->iomem);
+
+ vi->soc = of_device_get_match_data(&pdev->dev);
+
+ vi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(vi->clk)) {
+ ret = PTR_ERR(vi->clk);
+ dev_err(&pdev->dev, "failed to get vi clock: %d\n", ret);
+ return ret;
+ }
+
+ vi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
+ if (IS_ERR(vi->vdd)) {
+ ret = PTR_ERR(vi->vdd);
+ dev_err(&pdev->dev, "failed to get VDD supply: %d\n", ret);
+ return ret;
+ }
+
+ if (!pdev->dev.pm_domain) {
+ ret = -ENOENT;
+ dev_warn(&pdev->dev, "PM domain is not attached: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to populate vi child device: %d\n", ret);
+ return ret;
+ }
+
+ vi->dev = &pdev->dev;
+ vi->ops = vi->soc->ops;
+ platform_set_drvdata(pdev, vi);
+ pm_runtime_enable(&pdev->dev);
+
+ /* initialize host1x interface */
+ INIT_LIST_HEAD(&vi->client.list);
+ vi->client.ops = &vi_client_ops;
+ vi->client.dev = &pdev->dev;
+
+ ret = host1x_client_register(&vi->client);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to register host1x client: %d\n", ret);
+ goto rpm_disable;
+ }
+
+ return 0;
+
+rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int tegra_vi_remove(struct platform_device *pdev)
+{
+ struct tegra_vi *vi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&vi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to unregister host1x client: %d\n", err);
+ return err;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_vi_of_id_table[] = {
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-vi", .data = &tegra210_vi_soc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_vi_of_id_table);
+
+static const struct dev_pm_ops tegra_vi_pm_ops = {
+ SET_RUNTIME_PM_OPS(vi_runtime_suspend, vi_runtime_resume, NULL)
+};
+
+struct platform_driver tegra_vi_driver = {
+ .driver = {
+ .name = "tegra-vi",
+ .of_match_table = tegra_vi_of_id_table,
+ .pm = &tegra_vi_pm_ops,
+ },
+ .probe = tegra_vi_probe,
+ .remove = tegra_vi_remove,
+};
diff --git a/drivers/staging/media/tegra-video/vi.h b/drivers/staging/media/tegra-video/vi.h
new file mode 100644
index 000000000000..6272c9a61809
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vi.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __TEGRA_VI_H__
+#define __TEGRA_VI_H__
+
+#include <linux/host1x.h>
+#include <linux/list.h>
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+
+#define TEGRA_MIN_WIDTH 32U
+#define TEGRA_MAX_WIDTH 32768U
+#define TEGRA_MIN_HEIGHT 32U
+#define TEGRA_MAX_HEIGHT 32768U
+
+#define TEGRA_DEF_WIDTH 1920
+#define TEGRA_DEF_HEIGHT 1080
+#define TEGRA_IMAGE_FORMAT_DEF 32
+
+#define MAX_FORMAT_NUM 64
+
+enum tegra_vi_pg_mode {
+ TEGRA_VI_PG_DISABLED = 0,
+ TEGRA_VI_PG_DIRECT,
+ TEGRA_VI_PG_PATCH,
+};
+
+/**
+ * struct tegra_vi_ops - Tegra VI operations
+ * @vi_start_streaming: starts media pipeline, subdevice streaming, sets up
+ * VI for capture and runs capture start and capture finish
+ * kthreads for capturing frames to buffer and returns them back.
+ * @vi_stop_streaming: stops media pipeline and subdevice streaming and returns
+ * back any queued buffers.
+ */
+struct tegra_vi_ops {
+ int (*vi_start_streaming)(struct vb2_queue *vq, u32 count);
+ void (*vi_stop_streaming)(struct vb2_queue *vq);
+};
+
+/**
+ * struct tegra_vi_soc - NVIDIA Tegra Video Input SoC structure
+ *
+ * @video_formats: supported video formats
+ * @nformats: total video formats
+ * @ops: vi operations
+ * @hw_revision: VI hw_revision
+ * @vi_max_channels: supported max streaming channels
+ * @vi_max_clk_hz: VI clock max frequency
+ */
+struct tegra_vi_soc {
+ const struct tegra_video_format *video_formats;
+ const unsigned int nformats;
+ const struct tegra_vi_ops *ops;
+ u32 hw_revision;
+ unsigned int vi_max_channels;
+ unsigned int vi_max_clk_hz;
+};
+
+/**
+ * struct tegra_vi - NVIDIA Tegra Video Input device structure
+ *
+ * @dev: device struct
+ * @client: host1x_client struct
+ * @iomem: register base
+ * @clk: main clock for VI block
+ * @vdd: vdd regulator for VI hardware, normally it is avdd_dsi_csi
+ * @soc: pointer to SoC data structure
+ * @ops: vi operations
+ * @vi_chans: list head for VI channels
+ */
+struct tegra_vi {
+ struct device *dev;
+ struct host1x_client client;
+ void __iomem *iomem;
+ struct clk *clk;
+ struct regulator *vdd;
+ const struct tegra_vi_soc *soc;
+ const struct tegra_vi_ops *ops;
+ struct list_head vi_chans;
+};
+
+/**
+ * struct tegra_vi_channel - Tegra video channel
+ *
+ * @list: list head for this entry
+ * @video: V4L2 video device associated with the video channel
+ * @video_lock: protects the @format and @queue fields
+ * @pad: media pad for the video device entity
+ *
+ * @vi: Tegra video input device structure
+ * @frame_start_sp: host1x syncpoint pointer to synchronize programmed capture
+ * start condition with hardware frame start events through host1x
+ * syncpoint counters.
+ * @mw_ack_sp: host1x syncpoint pointer to synchronize programmed memory write
+ * ack trigger condition with hardware memory write done at end of
+ * frame through host1x syncpoint counters.
+ * @sp_incr_lock: protects cpu syncpoint increment.
+ *
+ * @kthread_start_capture: kthread to start capture of single frame when
+ * vb buffer is available. This thread programs VI CSI hardware
+ * for single frame capture and waits for frame start event from
+ * the hardware. On receiving frame start event, it wakes up
+ * kthread_finish_capture thread to wait for finishing frame data
+ * write to the memory. In case of missing frame start event, this
+ * thread returns buffer back to vb with VB2_BUF_STATE_ERROR.
+ * @start_wait: waitqueue for starting frame capture when buffer is available.
+ * @kthread_finish_capture: kthread to finish the buffer capture and return to.
+ * This thread is woken up by kthread_start_capture on receiving
+ * frame start event from the hardware and this thread waits for
+ * MW_ACK_DONE event which indicates completion of writing frame
+ * data to the memory. On receiving MW_ACK_DONE event, buffer is
+ * returned back to vb with VB2_BUF_STATE_DONE and in case of
+ * missing MW_ACK_DONE event, buffer is returned back to vb with
+ * VB2_BUF_STATE_ERROR.
+ * @done_wait: waitqueue for finishing capture data writes to memory.
+ *
+ * @format: active V4L2 pixel format
+ * @fmtinfo: format information corresponding to the active @format
+ * @queue: vb2 buffers queue
+ * @sequence: V4L2 buffers sequence number
+ *
+ * @capture: list of queued buffers for capture
+ * @start_lock: protects the capture queued list
+ * @done: list of capture done queued buffers
+ * @done_lock: protects the capture done queue list
+ *
+ * @portno: VI channel port number
+ *
+ * @ctrl_handler: V4L2 control handler of this video channel
+ * @tpg_fmts_bitmap: a bitmap for supported TPG formats
+ * @pg_mode: test pattern generator mode (disabled/direct/patch)
+ */
+struct tegra_vi_channel {
+ struct list_head list;
+ struct video_device video;
+ /* protects the @format and @queue fields */
+ struct mutex video_lock;
+ struct media_pad pad;
+
+ struct tegra_vi *vi;
+ struct host1x_syncpt *frame_start_sp;
+ struct host1x_syncpt *mw_ack_sp;
+ /* protects the cpu syncpoint increment */
+ spinlock_t sp_incr_lock;
+
+ struct task_struct *kthread_start_capture;
+ wait_queue_head_t start_wait;
+ struct task_struct *kthread_finish_capture;
+ wait_queue_head_t done_wait;
+
+ struct v4l2_pix_format format;
+ const struct tegra_video_format *fmtinfo;
+ struct vb2_queue queue;
+ u32 sequence;
+
+ struct list_head capture;
+ /* protects the capture queued list */
+ spinlock_t start_lock;
+ struct list_head done;
+ /* protects the capture done queue list */
+ spinlock_t done_lock;
+
+ unsigned char portno;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ DECLARE_BITMAP(tpg_fmts_bitmap, MAX_FORMAT_NUM);
+ enum tegra_vi_pg_mode pg_mode;
+};
+
+/**
+ * struct tegra_channel_buffer - video channel buffer
+ *
+ * @buf: vb2 buffer base object
+ * @queue: buffer list entry in the channel queued buffers list
+ * @chan: channel that uses the buffer
+ * @addr: Tegra IOVA buffer address for VI output
+ * @mw_ack_sp_thresh: MW_ACK_DONE syncpoint threshold corresponding
+ * to the capture buffer.
+ */
+struct tegra_channel_buffer {
+ struct vb2_v4l2_buffer buf;
+ struct list_head queue;
+ struct tegra_vi_channel *chan;
+ dma_addr_t addr;
+ u32 mw_ack_sp_thresh;
+};
+
+/*
+ * VI channel input data type enum.
+ * These data type enum value gets programmed into corresponding Tegra VI
+ * channel register bits.
+ */
+enum tegra_image_dt {
+ TEGRA_IMAGE_DT_YUV420_8 = 24,
+ TEGRA_IMAGE_DT_YUV420_10,
+
+ TEGRA_IMAGE_DT_YUV420CSPS_8 = 28,
+ TEGRA_IMAGE_DT_YUV420CSPS_10,
+ TEGRA_IMAGE_DT_YUV422_8,
+ TEGRA_IMAGE_DT_YUV422_10,
+ TEGRA_IMAGE_DT_RGB444,
+ TEGRA_IMAGE_DT_RGB555,
+ TEGRA_IMAGE_DT_RGB565,
+ TEGRA_IMAGE_DT_RGB666,
+ TEGRA_IMAGE_DT_RGB888,
+
+ TEGRA_IMAGE_DT_RAW6 = 40,
+ TEGRA_IMAGE_DT_RAW7,
+ TEGRA_IMAGE_DT_RAW8,
+ TEGRA_IMAGE_DT_RAW10,
+ TEGRA_IMAGE_DT_RAW12,
+ TEGRA_IMAGE_DT_RAW14,
+};
+
+/**
+ * struct tegra_video_format - Tegra video format description
+ *
+ * @img_dt: image data type
+ * @bit_width: format width in bits per component
+ * @code: media bus format code
+ * @bpp: bytes per pixel (when stored in memory)
+ * @img_fmt: image format
+ * @fourcc: V4L2 pixel format FCC identifier
+ */
+struct tegra_video_format {
+ enum tegra_image_dt img_dt;
+ unsigned int bit_width;
+ unsigned int code;
+ unsigned int bpp;
+ u32 img_fmt;
+ u32 fourcc;
+};
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_vi_soc tegra210_vi_soc;
+#endif
+
+struct v4l2_subdev *
+tegra_channel_get_remote_subdev(struct tegra_vi_channel *chan);
+int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on);
+void tegra_channel_release_buffers(struct tegra_vi_channel *chan,
+ enum vb2_buffer_state state);
+void tegra_channels_cleanup(struct tegra_vi *vi);
+#endif
diff --git a/drivers/staging/media/tegra-video/video.c b/drivers/staging/media/tegra-video/video.c
new file mode 100644
index 000000000000..30816aa41e81
--- /dev/null
+++ b/drivers/staging/media/tegra-video/video.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "video.h"
+
+static void tegra_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct tegra_video_device *vid;
+
+ vid = container_of(v4l2_dev, struct tegra_video_device, v4l2_dev);
+
+ /* cleanup channels here as all video device nodes are released */
+ tegra_channels_cleanup(vid->vi);
+
+ v4l2_device_unregister(v4l2_dev);
+ media_device_unregister(&vid->media_dev);
+ media_device_cleanup(&vid->media_dev);
+ kfree(vid);
+}
+
+static int host1x_video_probe(struct host1x_device *dev)
+{
+ struct tegra_video_device *vid;
+ int ret;
+
+ vid = kzalloc(sizeof(*vid), GFP_KERNEL);
+ if (!vid)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, vid);
+
+ vid->media_dev.dev = &dev->dev;
+ strscpy(vid->media_dev.model, "NVIDIA Tegra Video Input Device",
+ sizeof(vid->media_dev.model));
+
+ media_device_init(&vid->media_dev);
+ ret = media_device_register(&vid->media_dev);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "failed to register media device: %d\n", ret);
+ goto cleanup;
+ }
+
+ vid->v4l2_dev.mdev = &vid->media_dev;
+ vid->v4l2_dev.release = tegra_v4l2_dev_release;
+ ret = v4l2_device_register(&dev->dev, &vid->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "V4L2 device registration failed: %d\n", ret);
+ goto unregister_media;
+ }
+
+ ret = host1x_device_init(dev);
+ if (ret < 0)
+ goto unregister_v4l2;
+
+ /*
+ * Both vi and csi channels are available now.
+ * Register v4l2 nodes and create media links for TPG.
+ */
+ ret = tegra_v4l2_nodes_setup_tpg(vid);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "failed to setup tpg graph: %d\n", ret);
+ goto device_exit;
+ }
+
+ return 0;
+
+device_exit:
+ host1x_device_exit(dev);
+ /* vi exit ops does not clean channels, so clean them here */
+ tegra_channels_cleanup(vid->vi);
+unregister_v4l2:
+ v4l2_device_unregister(&vid->v4l2_dev);
+unregister_media:
+ media_device_unregister(&vid->media_dev);
+cleanup:
+ media_device_cleanup(&vid->media_dev);
+ kfree(vid);
+ return ret;
+}
+
+static int host1x_video_remove(struct host1x_device *dev)
+{
+ struct tegra_video_device *vid = dev_get_drvdata(&dev->dev);
+
+ tegra_v4l2_nodes_cleanup_tpg(vid);
+
+ host1x_device_exit(dev);
+
+ /* This calls v4l2_dev release callback on last reference */
+ v4l2_device_put(&vid->v4l2_dev);
+
+ return 0;
+}
+
+static const struct of_device_id host1x_video_subdevs[] = {
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-csi", },
+ { .compatible = "nvidia,tegra210-vi", },
+#endif
+ { }
+};
+
+static struct host1x_driver host1x_video_driver = {
+ .driver = {
+ .name = "tegra-video",
+ },
+ .probe = host1x_video_probe,
+ .remove = host1x_video_remove,
+ .subdevs = host1x_video_subdevs,
+};
+
+static struct platform_driver * const drivers[] = {
+ &tegra_csi_driver,
+ &tegra_vi_driver,
+};
+
+static int __init host1x_video_init(void)
+{
+ int err;
+
+ err = host1x_driver_register(&host1x_video_driver);
+ if (err < 0)
+ return err;
+
+ err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ if (err < 0)
+ goto unregister_host1x;
+
+ return 0;
+
+unregister_host1x:
+ host1x_driver_unregister(&host1x_video_driver);
+ return err;
+}
+module_init(host1x_video_init);
+
+static void __exit host1x_video_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+ host1x_driver_unregister(&host1x_video_driver);
+}
+module_exit(host1x_video_exit);
+
+MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra Host1x Video driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/tegra-video/video.h b/drivers/staging/media/tegra-video/video.h
new file mode 100644
index 000000000000..fadaf2189dc9
--- /dev/null
+++ b/drivers/staging/media/tegra-video/video.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __TEGRA_VIDEO_H__
+#define __TEGRA_VIDEO_H__
+
+#include <linux/host1x.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+
+#include "vi.h"
+#include "csi.h"
+
+struct tegra_video_device {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct tegra_vi *vi;
+ struct tegra_csi *csi;
+};
+
+int tegra_v4l2_nodes_setup_tpg(struct tegra_video_device *vid);
+void tegra_v4l2_nodes_cleanup_tpg(struct tegra_video_device *vid);
+
+extern struct platform_driver tegra_vi_driver;
+extern struct platform_driver tegra_csi_driver;
+#endif
diff --git a/drivers/staging/media/usbvision/Kconfig b/drivers/staging/media/usbvision/Kconfig
index c6e1afb5ac48..1c7da2a2caac 100644
--- a/drivers/staging/media/usbvision/Kconfig
+++ b/drivers/staging/media/usbvision/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_USBVISION
tristate "USB video devices based on Nogatech NT1003/1004/1005 (Deprecated)"
- depends on MEDIA_USB_SUPPORT && I2C && VIDEO_V4L2
+ depends on MEDIA_USB_SUPPORT && I2C && VIDEO_V4L2 && USB
select VIDEO_TUNER
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
help
diff --git a/drivers/staging/media/usbvision/usbvision-core.c b/drivers/staging/media/usbvision/usbvision-core.c
index f05a5c84dc18..e35dee35b068 100644
--- a/drivers/staging/media/usbvision/usbvision-core.c
+++ b/drivers/staging/media/usbvision/usbvision-core.c
@@ -1268,7 +1268,7 @@ static void usbvision_isoc_irq(struct urb *urb)
if (!USBVISION_IS_OPERATIONAL(usbvision))
return;
- /* any urb with wrong status is ignored without acknowledgement */
+ /* any urb with wrong status is ignored without acknowledgment */
if (urb->status == -ENOENT)
return;
diff --git a/drivers/staging/most/usb/Kconfig b/drivers/staging/most/usb/Kconfig
index a86f1f63def4..75dc25c0e0e5 100644
--- a/drivers/staging/most/usb/Kconfig
+++ b/drivers/staging/most/usb/Kconfig
@@ -7,7 +7,7 @@ config MOST_USB
tristate "USB"
depends on USB && NET
help
- Say Y here if you want to connect via USB to network tranceiver.
+ Say Y here if you want to connect via USB to network transceiver.
This device driver depends on the networking AIM.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c
index e8c5a8c98375..2640c5b326a4 100644
--- a/drivers/staging/most/usb/usb.c
+++ b/drivers/staging/most/usb/usb.c
@@ -5,7 +5,6 @@
* Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/usb.h>
@@ -140,9 +139,10 @@ static void wq_netinfo(struct work_struct *wq_obj);
static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
{
int retval;
- __le16 *dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
+ __le16 *dma_buf;
u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
if (!dma_buf)
return -ENOMEM;
@@ -153,7 +153,9 @@ static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
*buf = le16_to_cpu(*dma_buf);
kfree(dma_buf);
- return retval;
+ if (retval < 0)
+ return retval;
+ return 0;
}
/**
@@ -184,16 +186,18 @@ static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
/**
* get_stream_frame_size - calculate frame size of current configuration
+ * @dev: device structure
* @cfg: channel configuration
*/
-static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
+static unsigned int get_stream_frame_size(struct device *dev,
+ struct most_channel_config *cfg)
{
- unsigned int frame_size = 0;
+ unsigned int frame_size;
unsigned int sub_size = cfg->subbuffer_size;
if (!sub_size) {
- pr_warn("Misconfig: Subbuffer size zero.\n");
- return frame_size;
+ dev_warn(dev, "Misconfig: Subbuffer size zero.\n");
+ return 0;
}
switch (cfg->data_type) {
case MOST_CH_ISOC:
@@ -201,7 +205,7 @@ static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
break;
case MOST_CH_SYNC:
if (cfg->packets_per_xact == 0) {
- pr_warn("Misconfig: Packets per XACT zero\n");
+ dev_warn(dev, "Misconfig: Packets per XACT zero\n");
frame_size = 0;
} else if (cfg->packets_per_xact == 0xFF) {
frame_size = (USB_MTU / sub_size) * sub_size;
@@ -210,7 +214,8 @@ static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
}
break;
default:
- pr_warn("Query frame size of non-streaming channel\n");
+ dev_warn(dev, "Query frame size of non-streaming channel\n");
+ frame_size = 0;
break;
}
return frame_size;
@@ -233,11 +238,7 @@ static int hdm_poison_channel(struct most_interface *iface, int channel)
unsigned long flags;
spinlock_t *lock; /* temp. lock */
- if (unlikely(!iface)) {
- dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
- return -EIO;
- }
- if (unlikely(channel < 0 || channel >= iface->num_channels)) {
+ if (channel < 0 || channel >= iface->num_channels) {
dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
return -ECHRNG;
}
@@ -274,17 +275,17 @@ static int hdm_poison_channel(struct most_interface *iface, int channel)
static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
{
struct most_channel_config *conf = &mdev->conf[channel];
- unsigned int frame_size = get_stream_frame_size(conf);
+ unsigned int frame_size = get_stream_frame_size(&mdev->dev, conf);
unsigned int j, num_frames;
if (!frame_size)
- return -EIO;
+ return -EINVAL;
num_frames = mbo->buffer_length / frame_size;
if (num_frames < 1) {
dev_err(&mdev->usb_device->dev,
"Missed minimal transfer unit.\n");
- return -EIO;
+ return -EINVAL;
}
for (j = num_frames - 1; j > 0; j--)
@@ -308,11 +309,11 @@ static int hdm_remove_padding(struct most_dev *mdev, int channel,
struct mbo *mbo)
{
struct most_channel_config *const conf = &mdev->conf[channel];
- unsigned int frame_size = get_stream_frame_size(conf);
+ unsigned int frame_size = get_stream_frame_size(&mdev->dev, conf);
unsigned int j, num_frames;
if (!frame_size)
- return -EIO;
+ return -EINVAL;
num_frames = mbo->processed_length / USB_MTU;
for (j = 1; j < num_frames; j++)
@@ -386,103 +387,6 @@ static void hdm_write_completion(struct urb *urb)
* padding bytes -if necessary- and calls the completion function.
*
* Context: interrupt!
- *
- * **************************************************************************
- * Error codes returned by in urb->status
- * or in iso_frame_desc[n].status (for ISO)
- * *************************************************************************
- *
- * USB device drivers may only test urb status values in completion handlers.
- * This is because otherwise there would be a race between HCDs updating
- * these values on one CPU, and device drivers testing them on another CPU.
- *
- * A transfer's actual_length may be positive even when an error has been
- * reported. That's because transfers often involve several packets, so that
- * one or more packets could finish before an error stops further endpoint I/O.
- *
- * For isochronous URBs, the urb status value is non-zero only if the URB is
- * unlinked, the device is removed, the host controller is disabled or the total
- * transferred length is less than the requested length and the URB_SHORT_NOT_OK
- * flag is set. Completion handlers for isochronous URBs should only see
- * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
- * Individual frame descriptor status fields may report more status codes.
- *
- *
- * 0 Transfer completed successfully
- *
- * -ENOENT URB was synchronously unlinked by usb_unlink_urb
- *
- * -EINPROGRESS URB still pending, no results yet
- * (That is, if drivers see this it's a bug.)
- *
- * -EPROTO (*, **) a) bitstuff error
- * b) no response packet received within the
- * prescribed bus turn-around time
- * c) unknown USB error
- *
- * -EILSEQ (*, **) a) CRC mismatch
- * b) no response packet received within the
- * prescribed bus turn-around time
- * c) unknown USB error
- *
- * Note that often the controller hardware does not
- * distinguish among cases a), b), and c), so a
- * driver cannot tell whether there was a protocol
- * error, a failure to respond (often caused by
- * device disconnect), or some other fault.
- *
- * -ETIME (**) No response packet received within the prescribed
- * bus turn-around time. This error may instead be
- * reported as -EPROTO or -EILSEQ.
- *
- * -ETIMEDOUT Synchronous USB message functions use this code
- * to indicate timeout expired before the transfer
- * completed, and no other error was reported by HC.
- *
- * -EPIPE (**) Endpoint stalled. For non-control endpoints,
- * reset this status with usb_clear_halt().
- *
- * -ECOMM During an IN transfer, the host controller
- * received data from an endpoint faster than it
- * could be written to system memory
- *
- * -ENOSR During an OUT transfer, the host controller
- * could not retrieve data from system memory fast
- * enough to keep up with the USB data rate
- *
- * -EOVERFLOW (*) The amount of data returned by the endpoint was
- * greater than either the max packet size of the
- * endpoint or the remaining buffer size. "Babble".
- *
- * -EREMOTEIO The data read from the endpoint did not fill the
- * specified buffer, and URB_SHORT_NOT_OK was set in
- * urb->transfer_flags.
- *
- * -ENODEV Device was removed. Often preceded by a burst of
- * other errors, since the hub driver doesn't detect
- * device removal events immediately.
- *
- * -EXDEV ISO transfer only partially completed
- * (only set in iso_frame_desc[n].status, not urb->status)
- *
- * -EINVAL ISO madness, if this happens: Log off and go home
- *
- * -ECONNRESET URB was asynchronously unlinked by usb_unlink_urb
- *
- * -ESHUTDOWN The device or host controller has been disabled due
- * to some problem that could not be worked around,
- * such as a physical disconnect.
- *
- *
- * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
- * hardware problems such as bad devices (including firmware) or cables.
- *
- * (**) This is also one of several codes that different kinds of host
- * controller use to indicate a transfer has failed because of device
- * disconnect. In the interval before the hub driver starts disconnect
- * processing, devices may receive such fault reports for every request.
- *
- * See <https://www.kernel.org/doc/Documentation/driver-api/usb/error-codes.rst>
*/
static void hdm_read_completion(struct urb *urb)
{
@@ -552,36 +456,33 @@ static void hdm_read_completion(struct urb *urb)
static int hdm_enqueue(struct most_interface *iface, int channel,
struct mbo *mbo)
{
- struct most_dev *mdev;
+ struct most_dev *mdev = to_mdev(iface);
struct most_channel_config *conf;
int retval = 0;
struct urb *urb;
unsigned long length;
void *virt_address;
- if (unlikely(!iface || !mbo))
- return -EIO;
- if (unlikely(iface->num_channels <= channel || channel < 0))
+ if (!mbo)
+ return -EINVAL;
+ if (iface->num_channels <= channel || channel < 0)
return -ECHRNG;
- mdev = to_mdev(iface);
+ urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
conf = &mdev->conf[channel];
mutex_lock(&mdev->io_mutex);
if (!mdev->usb_device) {
retval = -ENODEV;
- goto unlock_io_mutex;
- }
-
- urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
- if (!urb) {
- retval = -ENOMEM;
- goto unlock_io_mutex;
+ goto err_free_urb;
}
if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
hdm_add_padding(mdev, channel, mbo)) {
- retval = -EIO;
+ retval = -EINVAL;
goto err_free_urb;
}
@@ -619,13 +520,13 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
"URB submit failed with error %d.\n", retval);
goto err_unanchor_urb;
}
- goto unlock_io_mutex;
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
err_unanchor_urb:
usb_unanchor_urb(urb);
err_free_urb:
usb_free_urb(urb);
-unlock_io_mutex:
mutex_unlock(&mdev->io_mutex);
return retval;
}
@@ -669,19 +570,20 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
struct most_dev *mdev = to_mdev(iface);
struct device *dev = &mdev->usb_device->dev;
- mdev->is_channel_healthy[channel] = true;
- mdev->clear_work[channel].channel = channel;
- mdev->clear_work[channel].mdev = mdev;
- INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
-
- if (unlikely(!iface || !conf)) {
- dev_err(dev, "Bad interface or config pointer.\n");
+ if (!conf) {
+ dev_err(dev, "Bad config pointer.\n");
return -EINVAL;
}
- if (unlikely(channel < 0 || channel >= iface->num_channels)) {
+ if (channel < 0 || channel >= iface->num_channels) {
dev_err(dev, "Channel ID out of range.\n");
return -EINVAL;
}
+
+ mdev->is_channel_healthy[channel] = true;
+ mdev->clear_work[channel].channel = channel;
+ mdev->clear_work[channel].mdev = mdev;
+ INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
+
if (!conf->num_buffers || !conf->buffer_size) {
dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
return -EINVAL;
@@ -701,7 +603,7 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
mdev->padding_active[channel] = true;
- frame_size = get_stream_frame_size(conf);
+ frame_size = get_stream_frame_size(&mdev->dev, conf);
if (frame_size == 0 || frame_size > USB_MTU) {
dev_warn(dev, "Misconfig: frame size wrong\n");
return -EINVAL;
@@ -745,10 +647,8 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel,
unsigned char,
unsigned char *))
{
- struct most_dev *mdev;
+ struct most_dev *mdev = to_mdev(iface);
- BUG_ON(!iface);
- mdev = to_mdev(iface);
mdev->on_netinfo = on_netinfo;
if (!on_netinfo)
return;
@@ -787,22 +687,22 @@ static void wq_netinfo(struct work_struct *wq_obj)
u16 hi, mi, lo, link;
u8 hw_addr[6];
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi)) {
dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
return;
}
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi)) {
dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
return;
}
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo)) {
dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
return;
}
- if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
+ if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link)) {
dev_err(dev, "Vendor request 'link status' failed\n");
return;
}
@@ -830,6 +730,8 @@ static void wq_clear_halt(struct work_struct *wq_obj)
struct most_dev *mdev = clear_work->mdev;
unsigned int channel = clear_work->channel;
int pipe = clear_work->pipe;
+ int snd_pipe;
+ int peer;
mutex_lock(&mdev->io_mutex);
most_stop_enqueue(&mdev->iface, channel);
@@ -847,9 +749,12 @@ static void wq_clear_halt(struct work_struct *wq_obj)
*/
if (mdev->conf[channel].data_type == MOST_CH_ASYNC &&
mdev->conf[channel].direction == MOST_CH_RX) {
- int peer = 1 - channel;
- int snd_pipe = usb_sndbulkpipe(mdev->usb_device,
- mdev->ep_address[peer]);
+ if (channel == 0)
+ peer = 1;
+ else
+ peer = 0;
+ snd_pipe = usb_sndbulkpipe(mdev->usb_device,
+ mdev->ep_address[peer]);
usb_clear_halt(mdev->usb_device, snd_pipe);
}
mdev->is_channel_healthy[channel] = true;
@@ -904,12 +809,12 @@ static int get_stat_reg_addr(const struct regs *regs, int size,
int i;
for (i = 0; i < size; i++) {
- if (!strcmp(name, regs[i].name)) {
+ if (sysfs_streq(name, regs[i].name)) {
*reg_addr = regs[i].reg;
return 0;
}
}
- return -EFAULT;
+ return -EINVAL;
}
#define get_static_reg_addr(regs, name, reg_addr) \
@@ -924,14 +829,14 @@ static ssize_t value_show(struct device *dev, struct device_attribute *attr,
u16 reg_addr;
int err;
- if (!strcmp(name, "arb_address"))
+ if (sysfs_streq(name, "arb_address"))
return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr);
- if (!strcmp(name, "arb_value"))
+ if (sysfs_streq(name, "arb_value"))
reg_addr = dci_obj->reg_addr;
else if (get_static_reg_addr(ro_regs, name, &reg_addr) &&
get_static_reg_addr(rw_regs, name, &reg_addr))
- return -EFAULT;
+ return -EINVAL;
err = drci_rd_reg(dci_obj->usb_device, reg_addr, &val);
if (err < 0)
@@ -948,24 +853,25 @@ static ssize_t value_store(struct device *dev, struct device_attribute *attr,
const char *name = attr->attr.name;
struct most_dci_obj *dci_obj = to_dci_obj(dev);
struct usb_device *usb_dev = dci_obj->usb_device;
- int err = kstrtou16(buf, 16, &val);
+ int err;
+ err = kstrtou16(buf, 16, &val);
if (err)
return err;
- if (!strcmp(name, "arb_address")) {
+ if (sysfs_streq(name, "arb_address")) {
dci_obj->reg_addr = val;
return count;
}
- if (!strcmp(name, "arb_value"))
+ if (sysfs_streq(name, "arb_value"))
err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
- else if (!strcmp(name, "sync_ep"))
+ else if (sysfs_streq(name, "sync_ep"))
err = start_sync_ep(usb_dev, val);
else if (!get_static_reg_addr(rw_regs, name, &reg_addr))
err = drci_wr_reg(usb_dev, reg_addr, val);
else
- return -EFAULT;
+ return -EINVAL;
if (err < 0)
return err;
@@ -1008,19 +914,13 @@ static struct attribute *dci_attrs[] = {
NULL,
};
-static struct attribute_group dci_attr_group = {
- .attrs = dci_attrs,
-};
-
-static const struct attribute_group *dci_attr_groups[] = {
- &dci_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(dci);
static void release_dci(struct device *dev)
{
struct most_dci_obj *dci = to_dci_obj(dev);
+ put_device(dev->parent);
kfree(dci);
}
@@ -1048,18 +948,23 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
struct usb_device *usb_dev = interface_to_usbdev(interface);
struct device *dev = &usb_dev->dev;
- struct most_dev *mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ struct most_dev *mdev;
unsigned int i;
unsigned int num_endpoints;
struct most_channel_capability *tmp_cap;
struct usb_endpoint_descriptor *ep_desc;
- int ret = 0;
+ int ret = -ENOMEM;
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
- goto err_out_of_memory;
+ return -ENOMEM;
usb_set_intfdata(interface, mdev);
num_endpoints = usb_iface_desc->desc.bNumEndpoints;
+ if (num_endpoints > MAX_NUM_ENDPOINTS) {
+ kfree(mdev);
+ return -EINVAL;
+ }
mutex_init(&mdev->io_mutex);
INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
@@ -1134,17 +1039,17 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
init_usb_anchor(&mdev->busy_urbs[i]);
spin_lock_init(&mdev->channel_lock[i]);
}
- dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
- le16_to_cpu(usb_dev->descriptor.idVendor),
- le16_to_cpu(usb_dev->descriptor.idProduct),
- usb_dev->bus->busnum,
- usb_dev->devnum);
-
- dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
- usb_dev->bus->busnum,
- usb_dev->devpath,
- usb_dev->config->desc.bConfigurationValue,
- usb_iface_desc->desc.bInterfaceNumber);
+ dev_dbg(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
+ le16_to_cpu(usb_dev->descriptor.idVendor),
+ le16_to_cpu(usb_dev->descriptor.idProduct),
+ usb_dev->bus->busnum,
+ usb_dev->devnum);
+
+ dev_dbg(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
+ usb_dev->bus->busnum,
+ usb_dev->devpath,
+ usb_dev->config->desc.bConfigurationValue,
+ usb_iface_desc->desc.bInterfaceNumber);
ret = most_register_interface(&mdev->iface);
if (ret)
@@ -1164,7 +1069,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
mdev->dci->dev.init_name = "dci";
mdev->dci->dev.parent = get_device(mdev->iface.dev);
- mdev->dci->dev.groups = dci_attr_groups;
+ mdev->dci->dev.groups = dci_groups;
mdev->dci->dev.release = release_dci;
if (device_register(&mdev->dci->dev)) {
mutex_unlock(&mdev->io_mutex);
@@ -1188,11 +1093,6 @@ err_free_conf:
kfree(mdev->conf);
err_free_mdev:
put_device(&mdev->dev);
-err_out_of_memory:
- if (ret == 0 || ret == -ENOMEM) {
- ret = -ENOMEM;
- dev_err(dev, "out of memory\n");
- }
return ret;
}
@@ -1225,14 +1125,43 @@ static void hdm_disconnect(struct usb_interface *interface)
kfree(mdev->cap);
kfree(mdev->conf);
kfree(mdev->ep_address);
+ put_device(&mdev->dci->dev);
put_device(&mdev->dev);
}
+static int hdm_suspend(struct usb_interface *interface, pm_message_t message)
+{
+ struct most_dev *mdev = usb_get_intfdata(interface);
+ int i;
+
+ mutex_lock(&mdev->io_mutex);
+ for (i = 0; i < mdev->iface.num_channels; i++) {
+ most_stop_enqueue(&mdev->iface, i);
+ usb_kill_anchored_urbs(&mdev->busy_urbs[i]);
+ }
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+}
+
+static int hdm_resume(struct usb_interface *interface)
+{
+ struct most_dev *mdev = usb_get_intfdata(interface);
+ int i;
+
+ mutex_lock(&mdev->io_mutex);
+ for (i = 0; i < mdev->iface.num_channels; i++)
+ most_resume_enqueue(&mdev->iface, i);
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+}
+
static struct usb_driver hdm_usb = {
.name = "hdm_usb",
.id_table = usbid,
.probe = hdm_probe,
.disconnect = hdm_disconnect,
+ .resume = hdm_resume,
+ .suspend = hdm_suspend,
};
module_usb_driver(hdm_usb);
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 9e5cf68731bb..82aa93634eda 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -523,11 +523,10 @@
0x01000000 0 0x00000000 0x1e160000 0 0x00010000 /* io space */
>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0xF0000 0 0 1>;
- interrupt-map = <0x10000 0 0 1 &gic GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>,
- <0x20000 0 0 1 &gic GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>,
- <0x30000 0 0 1 &gic GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH
+ GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH
+ GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
diff --git a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt
deleted file mode 100644
index a369d715378b..000000000000
--- a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Mediatek Mt7621 PCIe PHY
-
-Required properties:
-- compatible: must be "mediatek,mt7621-pci-phy"
-- reg: base address and length of the PCIe PHY block
-- #phy-cells: must be <1> for pcie0_phy and for pcie1_phy.
-
-Example:
- pcie0_phy: pcie-phy@1e149000 {
- compatible = "mediatek,mt7621-pci-phy";
- reg = <0x1e149000 0x0700>;
- #phy-cells = <1>;
- };
-
- pcie1_phy: pcie-phy@1e14a000 {
- compatible = "mediatek,mt7621-pci-phy";
- reg = <0x1e14a000 0x0700>;
- #phy-cells = <1>;
- };
-
- /* users of the PCIe phy */
-
- pcie: pcie@1e140000 {
- ...
- ...
- phys = <&pcie0_phy 0>, <&pcie0_phy 1>, <&pcie1_phy 0>;
- phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
- };
diff --git a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml
new file mode 100644
index 000000000000..cf32bbc45b5d
--- /dev/null
+++ b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/mediatek,mt7621-pci-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Mediatek Mt7621 PCIe PHY Device Tree Bindings
+
+maintainers:
+ - Sergio Paracuellos <sergio.paracuellos@gmail.com>
+
+properties:
+ compatible:
+ const: mediatek,mt7621-pci-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 1
+ description: selects if the phy is dual-ported
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ pcie0_phy: pcie-phy@1e149000 {
+ compatible = "mediatek,mt7621-pci-phy";
+ reg = <0x1e149000 0x0700>;
+ #phy-cells = <1>;
+ };
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index f58e3a51fc71..f961b353c22e 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -55,7 +55,7 @@
#define RALINK_PCI_IOBASE 0x002C
/* PCICFG virtual bridges */
-#define PCIE_P2P_MAX 3
+#define PCIE_P2P_CNT 3
#define PCIE_P2P_BR_DEVNUM_SHIFT(p) (16 + (p) * 4)
#define PCIE_P2P_BR_DEVNUM0_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(0)
#define PCIE_P2P_BR_DEVNUM1_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(1)
@@ -97,6 +97,7 @@
* @pcie_rst: pointer to port reset control
* @gpio_rst: gpio reset
* @slot: port slot
+ * @irq: GIC irq
* @enabled: indicates if port is enabled
*/
struct mt7621_pcie_port {
@@ -107,6 +108,7 @@ struct mt7621_pcie_port {
struct reset_control *pcie_rst;
struct gpio_desc *gpio_rst;
u32 slot;
+ int irq;
bool enabled;
};
@@ -120,6 +122,7 @@ struct mt7621_pcie_port {
* @dev: Pointer to PCIe device
* @io_map_base: virtual memory base address for io
* @ports: pointer to PCIe port information
+ * @irq_map: irq mapping info according pcie link status
* @resets_inverted: depends on chip revision
* reset lines are inverted.
*/
@@ -135,6 +138,7 @@ struct mt7621_pcie {
} offset;
unsigned long io_map_base;
struct list_head ports;
+ int irq_map[PCIE_P2P_CNT];
bool resets_inverted;
};
@@ -279,6 +283,16 @@ static void setup_cm_memory_region(struct mt7621_pcie *pcie)
}
}
+static int mt7621_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
+{
+ struct mt7621_pcie *pcie = pdev->bus->sysdata;
+ struct device *dev = pcie->dev;
+ int irq = pcie->irq_map[slot];
+
+ dev_info(dev, "bus=%d slot=%d irq=%d\n", pdev->bus->number, slot, irq);
+ return irq;
+}
+
static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -330,6 +344,7 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
{
struct mt7621_pcie_port *port;
struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *pnode = dev->of_node;
struct resource regs;
char name[10];
@@ -371,6 +386,12 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
port->slot = slot;
port->pcie = pcie;
+ port->irq = platform_get_irq(pdev, slot);
+ if (port->irq < 0) {
+ dev_err(dev, "Failed to get IRQ for PCIe%d\n", slot);
+ return -ENXIO;
+ }
+
INIT_LIST_HEAD(&port->list);
list_add_tail(&port->list, &pcie->ports);
@@ -502,17 +523,25 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
mt7621_pcie_reset_ep_deassert(pcie);
+ tmp = NULL;
list_for_each_entry(port, &pcie->ports, list) {
u32 slot = port->slot;
if (!mt7621_pcie_port_is_linkup(port)) {
dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
slot);
- if (slot != 1)
- phy_power_off(port->phy);
mt7621_control_assert(port);
mt7621_pcie_port_clk_disable(port);
port->enabled = false;
+
+ if (slot == 0) {
+ tmp = port;
+ continue;
+ }
+
+ if (slot == 1 && tmp && !tmp->enabled)
+ phy_power_off(tmp->phy);
+
}
}
}
@@ -576,14 +605,16 @@ static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
{
u32 pcie_link_status = 0;
- u32 n;
- int i;
- u32 p2p_br_devnum[PCIE_P2P_MAX];
+ u32 n = 0;
+ int i = 0;
+ u32 p2p_br_devnum[PCIE_P2P_CNT];
+ int irqs[PCIE_P2P_CNT];
struct mt7621_pcie_port *port;
list_for_each_entry(port, &pcie->ports, list) {
u32 slot = port->slot;
+ irqs[i++] = port->irq;
if (port->enabled)
pcie_link_status |= BIT(slot);
}
@@ -591,12 +622,16 @@ static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
if (pcie_link_status == 0)
return -1;
- n = 0;
- for (i = 0; i < PCIE_P2P_MAX; i++)
+ /*
+ * Assign device numbers from zero to the enabled ports,
+ * then assigning remaining device numbers to any disabled
+ * ports.
+ */
+ for (i = 0; i < PCIE_P2P_CNT; i++)
if (pcie_link_status & BIT(i))
p2p_br_devnum[i] = n++;
- for (i = 0; i < PCIE_P2P_MAX; i++)
+ for (i = 0; i < PCIE_P2P_CNT; i++)
if ((pcie_link_status & BIT(i)) == 0)
p2p_br_devnum[i] = n++;
@@ -606,6 +641,15 @@ static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
(p2p_br_devnum[1] << PCIE_P2P_BR_DEVNUM1_SHIFT) |
(p2p_br_devnum[2] << PCIE_P2P_BR_DEVNUM2_SHIFT));
+ /* Assign IRQs */
+ n = 0;
+ for (i = 0; i < PCIE_P2P_CNT; i++)
+ if (pcie_link_status & BIT(i))
+ pcie->irq_map[n++] = irqs[i];
+
+ for (i = n; i < PCIE_P2P_CNT; i++)
+ pcie->irq_map[i] = -1;
+
return 0;
}
@@ -630,7 +674,7 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host,
host->busnr = pcie->busn.start;
host->dev.parent = pcie->dev;
host->ops = &mt7621_pci_ops;
- host->map_irq = of_irq_parse_and_map_pci;
+ host->map_irq = mt7621_map_irq;
host->swizzle_irq = pci_common_swizzle;
host->sysdata = pcie;
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index d0f06790d38f..caaf9e34f1ee 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -220,7 +220,7 @@ static int rt2880_pinmux_index(struct rt2880_priv *p)
/* allocate our function and group mapping index buffers */
f = p->func = devm_kcalloc(p->dev,
p->func_count,
- sizeof(struct rt2880_pmx_func),
+ sizeof(*p->func),
GFP_KERNEL);
gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int),
GFP_KERNEL);
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 313d22f6210f..c8d0c63fdd1d 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1230,6 +1230,7 @@ static int pi433_probe(struct spi_device *spi)
device->cdev = cdev_alloc();
if (!device->cdev) {
dev_dbg(device->dev, "allocation of cdev failed");
+ retval = -ENOMEM;
goto cdev_failed;
}
device->cdev->owner = THIS_MODULE;
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 1795533cbd3a..058889687907 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -1559,18 +1559,17 @@ void ql_dump_stat(struct ql_adapter *qdev)
#ifdef QL_DEV_DUMP
#define DUMP_QDEV_FIELD(qdev, type, field) \
- pr_err("qdev->%-24s = " type "\n", #field, (qdev)->(field))
+ pr_err("qdev->%-24s = " type "\n", #field, (qdev)->field)
#define DUMP_QDEV_DMA_FIELD(qdev, field) \
pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
pr_err("%s[%d].%s = " type "\n", \
- #array, index, #field, (qdev)->array[index].field);
+ #array, index, #field, (qdev)->array[index].field)
void ql_dump_qdev(struct ql_adapter *qdev)
{
int i;
DUMP_QDEV_FIELD(qdev, "%lx", flags);
- DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
DUMP_QDEV_FIELD(qdev, "%p", pdev);
DUMP_QDEV_FIELD(qdev, "%p", ndev);
DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
@@ -1758,8 +1757,6 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
rx_ring->lbq.prod_idx_db_reg);
pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
- pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
- pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
pr_err("rx_ring->sbq.base_dma = %llx\n",
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index c92820f07968..402edaeffe12 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -214,19 +214,20 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
u32 mask;
u32 value;
- direction =
- (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE;
+ if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
+ direction = DMA_TO_DEVICE;
+ else
+ direction = DMA_FROM_DEVICE;
- map = pci_map_single(qdev->pdev, ptr, size, direction);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
+ map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
+ if (dma_mapping_error(&qdev->pdev->dev, map)) {
netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
if (status)
- return status;
+ goto lock_failed;
status = ql_wait_cfg(qdev, bit);
if (status) {
@@ -235,8 +236,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
goto exit;
}
- ql_write32(qdev, ICB_L, (u32) map);
- ql_write32(qdev, ICB_H, (u32) (map >> 32));
+ ql_write32(qdev, ICB_L, (u32)map);
+ ql_write32(qdev, ICB_H, (u32)(map >> 32));
mask = CFG_Q_MASK | (bit << 16);
value = bit | (q_id << CFG_Q_SHIFT);
@@ -248,7 +249,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
status = ql_wait_cfg(qdev, bit);
exit:
ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
- pci_unmap_single(qdev->pdev, map, size, direction);
+lock_failed:
+ dma_unmap_single(&qdev->pdev->dev, map, size, direction);
return status;
}
@@ -261,52 +263,50 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ case MAC_ADDR_TYPE_CAM_MAC: {
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ MAC_ADDR_ADR | MAC_ADDR_RS |
+ type); /* type */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ if (status)
+ break;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ MAC_ADDR_ADR | MAC_ADDR_RS |
+ type); /* type */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ if (status)
+ break;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ if (type == MAC_ADDR_TYPE_CAM_MAC) {
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+ MAC_ADDR_MW, 0);
if (status)
- goto exit;
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
+ (index
+ << MAC_ADDR_IDX_SHIFT) | /* index */
+ MAC_ADDR_ADR |
+ MAC_ADDR_RS | type); /* type */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+ MAC_ADDR_MR, 0);
+ if (status)
+ break;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
- if (type == MAC_ADDR_TYPE_CAM_MAC) {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW,
- 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
- MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- }
- break;
}
+ break;
+ }
case MAC_ADDR_TYPE_VLAN:
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
@@ -314,7 +314,6 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
-exit:
return status;
}
@@ -328,107 +327,93 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
int status = 0;
switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC:
- {
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower = (addr[2] << 24) | (addr[3] << 16) |
- (addr[4] << 8) | (addr[5]);
-
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
+ case MAC_ADDR_TYPE_MULTI_MAC: {
+ u32 upper = (addr[0] << 8) | addr[1];
+ u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+ (addr[5]);
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
break;
- }
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- u32 cam_output;
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower =
- (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
+ MAC_ADDR_E);
+ ql_write32(qdev, MAC_ADDR_DATA, lower);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
+ MAC_ADDR_E);
+
+ ql_write32(qdev, MAC_ADDR_DATA, upper);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ break;
+ }
+ case MAC_ADDR_TYPE_CAM_MAC: {
+ u32 cam_output;
+ u32 upper = (addr[0] << 8) | addr[1];
+ u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ type); /* type */
+ ql_write32(qdev, MAC_ADDR_DATA, lower);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- /* This field should also include the queue id
- * and possibly the function id. Right now we hardcode
- * the route field to NIC core.
- */
- cam_output = (CAM_OUT_ROUTE_NIC |
- (qdev->
- func << CAM_OUT_FUNC_SHIFT) |
- (0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- cam_output |= CAM_OUT_RV;
- /* route to NIC core */
- ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+ type); /* type */
+ ql_write32(qdev, MAC_ADDR_DATA, upper);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
break;
- }
- case MAC_ADDR_TYPE_VLAN:
- {
- u32 enable_bit = *((u32 *) &addr[0]);
- /* For VLAN, the addr actually holds a bit that
- * either enables or disables the vlan id we are
- * addressing. It's either MAC_ADDR_E on or off.
- * That's bit-27 we're talking about.
- */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type | /* type */
- enable_bit); /* enable/disable */
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ /* This field should also include the queue id
+ * and possibly the function id. Right now we hardcode
+ * the route field to NIC core.
+ */
+ cam_output = (CAM_OUT_ROUTE_NIC |
+ (qdev->func << CAM_OUT_FUNC_SHIFT) |
+ (0 << CAM_OUT_CQ_ID_SHIFT));
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ cam_output |= CAM_OUT_RV;
+ /* route to NIC core */
+ ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+ break;
+ }
+ case MAC_ADDR_TYPE_VLAN: {
+ u32 enable_bit = *((u32 *)&addr[0]);
+ /* For VLAN, the addr actually holds a bit that
+ * either enables or disables the vlan id we are
+ * addressing. It's either MAC_ADDR_E on or off.
+ * That's bit-27 we're talking about.
+ */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
break;
- }
+ ql_write32(qdev, MAC_ADDR_IDX,
+ offset | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type | /* type */
+ enable_bit); /* enable/disable */
+ break;
+ }
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
netif_crit(qdev, ifup, qdev->ndev,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
-exit:
return status;
}
@@ -455,7 +440,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
+ status = ql_set_mac_addr_reg(qdev, (u8 *)addr,
MAC_ADDR_TYPE_CAM_MAC,
qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -857,7 +842,7 @@ int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
if (status)
goto exit;
- *data = (u64) lo | ((u64) hi << 32);
+ *data = (u64)lo | ((u64)hi << 32);
exit:
return status;
@@ -983,14 +968,14 @@ static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
{
struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
- pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
- qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
+ qdev->lbq_buf_size, DMA_FROM_DEVICE);
if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
ql_lbq_block_size(qdev)) {
/* last chunk of the master page */
- pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
+ ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
}
return lbq_desc;
@@ -1036,10 +1021,10 @@ static int qlge_refill_sb(struct rx_ring *rx_ring,
return -ENOMEM;
skb_reserve(skb, QLGE_SB_PAD);
- sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
+ sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
dev_kfree_skb_any(skb);
return -EIO;
@@ -1064,10 +1049,10 @@ static int qlge_refill_lb(struct rx_ring *rx_ring,
page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
if (unlikely(!page))
return -ENOMEM;
- dma_addr = pci_map_page(qdev->pdev, page, 0,
+ dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
__free_pages(page, qdev->lbq_buf_order);
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
@@ -1224,20 +1209,20 @@ static void ql_unmap_send(struct ql_adapter *qdev,
qdev->ndev,
"unmapping OAL area.\n");
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
- pci_unmap_page(qdev->pdev,
+ dma_unmap_page(&qdev->pdev->dev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
- maplen), PCI_DMA_TODEVICE);
+ maplen), DMA_TO_DEVICE);
}
}
@@ -1263,9 +1248,9 @@ static int ql_map_send(struct ql_adapter *qdev,
/*
* Map the skb buffer first.
*/
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping failed with error: %d\n", err);
@@ -1310,10 +1295,10 @@ static int ql_map_send(struct ql_adapter *qdev,
* etc...
*/
/* Tack on the OAL in the eighth segment of IOCB. */
- map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
+ map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
sizeof(struct oal),
- PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ DMA_TO_DEVICE);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
@@ -1584,8 +1569,8 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
}
skb_reserve(new_skb, NET_IP_ALIGN);
- pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
skb_put_data(new_skb, skb->data, length);
@@ -1647,7 +1632,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
+ struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
@@ -1707,8 +1692,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* Headers fit nicely into a small buffer.
*/
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
@@ -1737,10 +1722,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* buffer.
*/
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&qdev->pdev->dev,
+ sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
+ DMA_FROM_DEVICE);
skb_put_data(skb, sbq_desc->p.skb->data, length);
} else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1750,9 +1735,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
skb_put(skb, length);
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
sbq_desc->p.skb = NULL;
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
@@ -1787,9 +1772,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
"No skb available, drop the packet.\n");
return NULL;
}
- pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
qdev->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
@@ -1820,8 +1805,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
int size, i = 0;
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
* This is an non TCP/UDP IP frame, so
@@ -1936,7 +1921,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
+ struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
@@ -2317,7 +2302,7 @@ static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
u32 enable_bit = MAC_ADDR_E;
int err;
- err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
@@ -2348,7 +2333,7 @@ static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
u32 enable_bit = 0;
int err;
- err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
@@ -2489,7 +2474,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+ mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
mac_iocb_ptr->net_trans_offset =
@@ -2527,7 +2512,7 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
__sum16 *check;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+ mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
@@ -2558,7 +2543,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
struct ql_adapter *qdev = netdev_priv(ndev);
int tso;
struct tx_ring *tx_ring;
- u32 tx_ring_idx = (u32) skb->queue_mapping;
+ u32 tx_ring_idx = (u32)skb->queue_mapping;
tx_ring = &qdev->tx_ring[tx_ring_idx];
@@ -2585,7 +2570,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
mac_iocb_ptr->txq_idx = tx_ring_idx;
tx_ring_desc->skb = skb;
- mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
+ mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
if (skb_vlan_tag_present(skb)) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
@@ -2636,17 +2621,17 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
static void ql_free_shadow_space(struct ql_adapter *qdev)
{
if (qdev->rx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
qdev->rx_ring_shadow_reg_area = NULL;
}
if (qdev->tx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->tx_ring_shadow_reg_area,
- qdev->tx_ring_shadow_reg_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ PAGE_SIZE,
+ qdev->tx_ring_shadow_reg_area,
+ qdev->tx_ring_shadow_reg_dma);
qdev->tx_ring_shadow_reg_area = NULL;
}
}
@@ -2654,8 +2639,8 @@ static void ql_free_shadow_space(struct ql_adapter *qdev)
static int ql_alloc_shadow_space(struct ql_adapter *qdev)
{
qdev->rx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->rx_ring_shadow_reg_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
if (!qdev->rx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
@@ -2663,8 +2648,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
}
qdev->tx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->tx_ring_shadow_reg_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
if (!qdev->tx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
@@ -2673,10 +2658,10 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
return 0;
err_wqp_sh_area:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
return -ENOMEM;
}
@@ -2702,8 +2687,8 @@ static void ql_free_tx_resources(struct ql_adapter *qdev,
struct tx_ring *tx_ring)
{
if (tx_ring->wq_base) {
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
+ dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
}
kfree(tx_ring->q);
@@ -2714,8 +2699,8 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
struct tx_ring *tx_ring)
{
tx_ring->wq_base =
- pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
- &tx_ring->wq_base_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ &tx_ring->wq_base_dma, GFP_ATOMIC);
if (!tx_ring->wq_base ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
@@ -2729,8 +2714,8 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
return 0;
err:
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
+ dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
pci_alloc_err:
netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
@@ -2748,17 +2733,17 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
&lbq->queue[lbq->next_to_clean];
if (lbq_desc->p.pg_chunk.offset == last_offset)
- pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
put_page(lbq_desc->p.pg_chunk.page);
lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
}
if (rx_ring->master_chunk.page) {
- pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
+ ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
put_page(rx_ring->master_chunk.page);
rx_ring->master_chunk.page = NULL;
}
@@ -2777,9 +2762,9 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
return;
}
if (sbq_desc->p.skb) {
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
}
@@ -2820,8 +2805,8 @@ static int qlge_init_bq(struct qlge_bq *bq)
__le64 *buf_ptr;
int i;
- bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
- &bq->base_dma);
+ bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
+ &bq->base_dma, GFP_ATOMIC);
if (!bq->base) {
netif_err(qdev, ifup, qdev->ndev,
"ring %u %s allocation failed.\n", rx_ring->cq_id,
@@ -2850,8 +2835,8 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
{
/* Free the small buffer queue. */
if (rx_ring->sbq.base) {
- pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
- rx_ring->sbq.base, rx_ring->sbq.base_dma);
+ dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
+ rx_ring->sbq.base, rx_ring->sbq.base_dma);
rx_ring->sbq.base = NULL;
}
@@ -2861,8 +2846,8 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
/* Free the large buffer queue. */
if (rx_ring->lbq.base) {
- pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
- rx_ring->lbq.base, rx_ring->lbq.base_dma);
+ dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
+ rx_ring->lbq.base, rx_ring->lbq.base_dma);
rx_ring->lbq.base = NULL;
}
@@ -2872,9 +2857,9 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
/* Free the rx queue. */
if (rx_ring->cq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->cq_size,
- rx_ring->cq_base, rx_ring->cq_base_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ rx_ring->cq_size,
+ rx_ring->cq_base, rx_ring->cq_base_dma);
rx_ring->cq_base = NULL;
}
}
@@ -2890,8 +2875,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
* Allocate the completion queue for this rx_ring.
*/
rx_ring->cq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
- &rx_ring->cq_base_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
+ &rx_ring->cq_base_dma, GFP_ATOMIC);
if (!rx_ring->cq_base) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
@@ -3008,7 +2993,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
/* PCI doorbell mem area + 0x00 for consumer index register */
- rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
+ rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
rx_ring->cnsmr_idx = 0;
rx_ring->curr_entry = rx_ring->cq_base;
@@ -3108,7 +3093,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
* Assign doorbell registers for this tx_ring.
*/
/* TX PCI doorbell mem area for tx producer index */
- tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
+ tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
tx_ring->prod_idx = 0;
/* TX PCI doorbell mem area + 0x04 */
tx_ring->valid_db_reg = doorbell_area + 0x04;
@@ -3131,7 +3116,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
ql_init_tx_ring(qdev, tx_ring);
err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
- (u16) tx_ring->wq_id);
+ (u16)tx_ring->wq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
@@ -3431,9 +3416,9 @@ static int ql_request_irq(struct ql_adapter *qdev)
&qdev->rx_ring[0]);
status =
request_irq(pdev->irq, qlge_isr,
- test_bit(QL_MSI_ENABLED,
- &qdev->
- flags) ? 0 : IRQF_SHARED,
+ test_bit(QL_MSI_ENABLED, &qdev->flags)
+ ? 0
+ : IRQF_SHARED,
intr_context->name, &qdev->rx_ring[0]);
if (status)
goto err_irq;
@@ -3463,7 +3448,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
struct ricb *ricb = &qdev->ricb;
int status = 0;
int i;
- u8 *hash_id = (u8 *) ricb->hash_cq_id;
+ u8 *hash_id = (u8 *)ricb->hash_cq_id;
memset((void *)ricb, 0, sizeof(*ricb));
@@ -4125,11 +4110,11 @@ static struct net_device_stats *qlge_get_stats(struct net_device
/* Get RX stats. */
pkts = mcast = dropped = errors = bytes = 0;
for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
- pkts += rx_ring->rx_packets;
- bytes += rx_ring->rx_bytes;
- dropped += rx_ring->rx_dropped;
- errors += rx_ring->rx_errors;
- mcast += rx_ring->rx_multicast;
+ pkts += rx_ring->rx_packets;
+ bytes += rx_ring->rx_bytes;
+ dropped += rx_ring->rx_dropped;
+ errors += rx_ring->rx_errors;
+ mcast += rx_ring->rx_multicast;
}
ndev->stats.rx_packets = pkts;
ndev->stats.rx_bytes = bytes;
@@ -4140,9 +4125,9 @@ static struct net_device_stats *qlge_get_stats(struct net_device
/* Get TX stats. */
pkts = errors = bytes = 0;
for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
- pkts += tx_ring->tx_packets;
- bytes += tx_ring->tx_bytes;
- errors += tx_ring->tx_errors;
+ pkts += tx_ring->tx_packets;
+ bytes += tx_ring->tx_bytes;
+ errors += tx_ring->tx_errors;
}
ndev->stats.tx_packets = pkts;
ndev->stats.tx_bytes = bytes;
@@ -4218,7 +4203,7 @@ static void qlge_set_multicast_list(struct net_device *ndev)
goto exit;
i = 0;
netdev_for_each_mc_addr(ha, ndev) {
- if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
+ if (ql_set_mac_addr_reg(qdev, (u8 *)ha->addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
@@ -4255,7 +4240,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+ status = ql_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC,
qdev->func * MAX_CQ);
if (status)
@@ -4430,13 +4415,14 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
}
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
set_bit(QL_DMA64, &qdev->flags);
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
}
if (err) {
@@ -4448,8 +4434,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
pdev->needs_freset = 1;
pci_save_state(pdev);
qdev->reg_base =
- ioremap(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
+ ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
@@ -4458,8 +4443,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
qdev->doorbell_area =
- ioremap(pci_resource_start(pdev, 3),
- pci_resource_len(pdev, 3));
+ ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 93283c7deec4..817793b9aff2 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -264,8 +264,10 @@ void expire_timeout_chk(struct adapter *padapter)
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
- DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
- updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ DBG_88E("asoc expire %pM, state = 0x%x\n",
+ (psta->hwaddr), psta->state);
+ updated = ap_free_sta(padapter, psta, true,
+ WLAN_REASON_DEAUTH_LEAVING);
} else {
/* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
if (psta->sleepq_len > (NR_XMITFRAME / pstapriv->asoc_list_cnt) &&
@@ -294,16 +296,21 @@ void expire_timeout_chk(struct adapter *padapter)
for (i = 0; i < chk_alive_num; i++) {
int ret = _FAIL;
- psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
+ psta = rtw_get_stainfo_by_offset(pstapriv,
+ chk_alive_list[i]);
- if (psta->state & WIFI_SLEEP_STATE)
- ret = issue_nulldata(padapter, psta->hwaddr, 0, 1, 50);
- else
- ret = issue_nulldata(padapter, psta->hwaddr, 0, 3, 50);
+ if (psta->state & WIFI_SLEEP_STATE) {
+ ret = issue_nulldata(padapter, psta->hwaddr,
+ 0, 1, 50);
+ } else {
+ ret = issue_nulldata(padapter, psta->hwaddr,
+ 0, 3, 50);
+ }
psta->keep_alive_trycnt++;
if (ret == _SUCCESS) {
- DBG_88E("asoc check, sta(%pM) is alive\n", (psta->hwaddr));
+ DBG_88E("asoc check, sta(%pM) is alive\n",
+ (psta->hwaddr));
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
continue;
@@ -315,11 +322,13 @@ void expire_timeout_chk(struct adapter *padapter)
psta->keep_alive_trycnt = 0;
- DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
+ DBG_88E("asoc expire %pM, state = 0x%x\n",
+ psta->hwaddr, psta->state);
spin_lock_bh(&pstapriv->asoc_list_lock);
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
- updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ updated = ap_free_sta(padapter, psta, true,
+ WLAN_REASON_DEAUTH_LEAVING);
spin_unlock_bh(&pstapriv->asoc_list_lock);
}
@@ -431,7 +440,8 @@ static void update_bmc_sta(struct adapter *padapter)
supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates);
- memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
+ memcpy(psta->bssrateset, &pcur_network->SupportedRates,
+ supportRateNum);
psta->bssratelen = supportRateNum;
/* b/g mode ra_bitmap */
@@ -445,7 +455,8 @@ static void update_bmc_sta(struct adapter *padapter)
tx_ra_bitmap = 0xf;
raid = networktype_to_raid(network_type);
- init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
+ init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) &
+ 0x3f;
/* ap mode */
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
@@ -456,7 +467,8 @@ static void update_bmc_sta(struct adapter *padapter)
arg = psta->mac_id & 0x1f;
arg |= BIT(7);
tx_ra_bitmap |= ((raid << 28) & 0xf0000000);
- DBG_88E("%s, mask = 0x%x, arg = 0x%x\n", __func__, tx_ra_bitmap, arg);
+ DBG_88E("%s, mask = 0x%x, arg = 0x%x\n", __func__,
+ tx_ra_bitmap, arg);
/* bitmap[0:27] = tx_rate_bitmap */
/* bitmap[28:31]= Rate Adaptive id */
@@ -647,7 +659,8 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
/* Beacon Control related register */
- rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&bcn_interval));
+ rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL,
+ (u8 *)(&bcn_interval));
UpdateBrateTbl(padapter, pnetwork->SupportedRates);
rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, pnetwork->SupportedRates);
@@ -657,7 +670,10 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
}
/* set channel, bwmode */
- p = rtw_get_ie((pnetwork->ies + sizeof(struct ndis_802_11_fixed_ie)), _HT_ADD_INFO_IE_, &ie_len, (pnetwork->ie_length - sizeof(struct ndis_802_11_fixed_ie)));
+ p = rtw_get_ie(pnetwork->ies + sizeof(struct ndis_802_11_fixed_ie),
+ _HT_ADD_INFO_IE_, &ie_len,
+ pnetwork->ie_length -
+ sizeof(struct ndis_802_11_fixed_ie));
if (p && ie_len) {
pht_info = (struct HT_info_element *)(p + 2);
@@ -682,7 +698,8 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
*/
set_channel_bwmode(padapter, cur_channel, cur_ch_offset, cur_bwmode);
- DBG_88E("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode, cur_ch_offset);
+ DBG_88E("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode,
+ cur_ch_offset);
/* */
pmlmeext->cur_channel = cur_channel;
@@ -771,17 +788,19 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
cap = get_unaligned_le16(ie);
/* SSID */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len,
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0) {
memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid));
- memcpy(pbss_network->ssid.ssid, (p + 2), ie_len);
+ memcpy(pbss_network->ssid.ssid, p + 2, ie_len);
pbss_network->ssid.ssid_length = ie_len;
}
/* channel */
channel = 0;
pbss_network->Configuration.Length = 0;
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len,
+ (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0)
channel = *(p + 2);
@@ -789,14 +808,16 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
/* get supported rates */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len,
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p) {
memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
}
/* get ext_supported rates */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_);
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_,
+ &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p) {
memcpy(supportRate + supportRateNum, p + 2, ie_len);
supportRateNum += ie_len;
@@ -807,7 +828,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
/* parsing ERP_IE */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len,
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0)
ERP_IE_handler(padapter, (struct ndis_802_11_var_ie *)p);
@@ -824,7 +846,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pairwise_cipher = 0;
psecuritypriv->wpa2_group_cipher = _NO_PRIVACY_;
psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len,
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0) {
if (rtw_parse_wpa2_ie(p, ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
@@ -844,7 +867,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
psecuritypriv->wpa_pairwise_cipher = _NO_PRIVACY_;
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
- (pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2));
if ((p) && (!memcmp(p + 2, OUI1, 4))) {
if (rtw_parse_wpa_ie(p, ie_len + 2, &group_cipher,
&pairwise_cipher, NULL) == _SUCCESS) {
@@ -869,7 +892,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
if (pregistrypriv->wmm_enable) {
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
- (pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2));
if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
pmlmepriv->qospriv.qos_option = 1;
@@ -892,7 +915,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
}
/* parsing HT_CAP_IE */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len,
- (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0) {
struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p + 2);
@@ -916,7 +939,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* parsing HT_INFO_IE */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len,
- (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0)
pHT_info_ie = p;
switch (network_type) {
@@ -1226,17 +1249,17 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
}
/*
-op_mode
-Set to 0 (HT pure) under the following conditions
- - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
- - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
-Set to 1 (HT non-member protection) if there may be non-HT STAs
- in both the primary and the secondary channel
-Set to 2 if only HT STAs are associated in BSS,
- however and at least one 20 MHz HT STA is associated
-Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
- (currently non-GF HT station is considered as non-HT STA also)
-*/
+ * op_mode
+ * Set to 0 (HT pure) under the following conditions
+ * - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
+ * - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
+ * Set to 1 (HT non-member protection) if there may be non-HT STAs
+ * in both the primary and the secondary channel
+ * Set to 2 if only HT STAs are associated in BSS,
+ * however and at least one 20 MHz HT STA is associated
+ * Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
+ * (currently non-GF HT station is considered as non-HT STA also)
+ */
static int rtw_ht_operation_update(struct adapter *padapter)
{
u16 cur_op_mode, new_op_mode;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index c525682d0edf..9bb3ec0cd62f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -370,28 +370,27 @@ static u16 Efuse_GetCurrentSize(struct adapter *pAdapter)
while (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data) &&
AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- if (efuse_data != 0xFF) {
- if ((efuse_data & 0x1F) == 0x0F) { /* extended header */
- hoffset = efuse_data;
+ if (efuse_data == 0xFF)
+ break;
+ if ((efuse_data & 0x1F) == 0x0F) { /* extended header */
+ hoffset = efuse_data;
+ efuse_addr++;
+ efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data);
+ if ((efuse_data & 0x0F) == 0x0F) {
efuse_addr++;
- efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data);
- if ((efuse_data & 0x0F) == 0x0F) {
- efuse_addr++;
- continue;
- } else {
- hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
- hworden = efuse_data & 0x0F;
- }
+ continue;
} else {
- hoffset = (efuse_data >> 4) & 0x0F;
- hworden = efuse_data & 0x0F;
+ hoffset = ((hoffset & 0xE0) >> 5) |
+ ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
}
- word_cnts = Efuse_CalculateWordCnts(hworden);
- /* read next header */
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
} else {
- break;
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
}
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ /* read next header */
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
}
rtw_hal_set_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index e186982d5908..caf600eba03b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -253,11 +253,11 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
}
/* DS parameter set */
- ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&(pdev_network->Configuration.DSConfig), &sz);
+ ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&pdev_network->Configuration.DSConfig, &sz);
/* IBSS Parameter Set */
- ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz);
+ ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&pdev_network->Configuration.ATIMWindow, &sz);
if (rateLen > 8)
ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz);
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index d1406cc99768..32dccae186ca 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -90,7 +90,6 @@ static void SwLedBlink1(struct LED_871x *pLed)
{
struct adapter *padapter = pLed->padapter;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
if (pLed->BlinkingLedState == RTW_LED_ON) {
@@ -128,9 +127,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
break;
case LED_BLINK_SCAN:
pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
+ if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
@@ -164,9 +161,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
break;
case LED_BLINK_TXRX:
pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
+ if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
@@ -188,7 +183,6 @@ static void SwLedBlink1(struct LED_871x *pLed)
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
}
- pLed->BlinkTimes = 0;
pLed->bLedBlinkInProgress = false;
} else {
if (pLed->bLedOn)
@@ -208,12 +202,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
break;
case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- bStopBlinking = false;
- else
- bStopBlinking = true;
-
- if (bStopBlinking) {
+ if (pLed->BlinkingLedState != RTW_LED_ON) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
if (pLed->bLedOn)
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 04897cd48370..8d035f67ef61 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1781,7 +1781,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
if (!p || len == 0) { /* non-HT */
- if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
+ if (pbss_network->Configuration.DSConfig <= 0)
continue;
ICS[0][pbss_network->Configuration.DSConfig] = 1;
@@ -1932,11 +1932,11 @@ static void site_survey(struct adapter *padapter)
if (pmlmeext->sitesurvey_res.ssid[i].ssid_length) {
/* todo: to issue two probe req??? */
issue_probereq(padapter,
- &(pmlmeext->sitesurvey_res.ssid[i]),
+ &pmlmeext->sitesurvey_res.ssid[i],
NULL, false);
/* msleep(SURVEY_TO>>1); */
issue_probereq(padapter,
- &(pmlmeext->sitesurvey_res.ssid[i]),
+ &pmlmeext->sitesurvey_res.ssid[i],
NULL, false);
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index c4f58507dbfd..c000382c96d9 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -173,7 +173,7 @@ int ips_leave(struct adapter *padapter)
DBG_88E_LEVEL(_drv_info_, "nolinked power save leave\n");
- if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) || (_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm)) {
+ if ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_)) {
DBG_88E("==>%s, channel(%d), processing(%x)\n", __func__, padapter->mlmeextpriv.cur_channel, pwrpriv->bips_processing);
set_channel_bwmode(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
for (keyid = 0; keyid < 4; keyid++) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index d4278361e002..a036ef104198 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -1525,21 +1525,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
- if (sub_skb) {
- skb_reserve(sub_skb, 12);
- skb_put_data(sub_skb, pdata, nSubframe_Length);
- } else {
- sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
- if (sub_skb) {
- sub_skb->data = pdata;
- sub_skb->len = nSubframe_Length;
- skb_set_tail_pointer(sub_skb, nSubframe_Length);
- } else {
- DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes);
- break;
- }
+ if (!sub_skb) {
+ DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes);
+ break;
}
+ skb_reserve(sub_skb, 12);
+ skb_put_data(sub_skb, pdata, nSubframe_Length);
+
subframes[nr_subframes++] = sub_skb;
if (nr_subframes >= MAX_SUBFRAME_COUNT) {
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 486ee4bd4744..3d1d29e9f8e0 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -111,7 +111,7 @@ static int _rtl88e_fw_free_to_go(struct adapter *adapt)
do {
value32 = usb_read32(adapt, REG_MCUFWDL);
- if (value32 & FWDL_ChkSum_rpt)
+ if (value32 & FWDL_CHKSUM_RPT)
break;
} while (counter++ < POLLING_READY_TIMEOUT_COUNT);
@@ -146,7 +146,7 @@ int rtl88eu_download_fw(struct adapter *adapt)
struct dvobj_priv *dvobj = adapter_to_dvobj(adapt);
struct device *device = dvobj_to_dev(dvobj);
const struct firmware *fw;
- const char fw_name[] = "rtlwifi/rtl8188eufw.bin";
+ static const char fw_name[] = "rtlwifi/rtl8188eufw.bin";
struct rtl92c_firmware_header *pfwheader = NULL;
u8 *download_data, *fw_data;
size_t download_size;
@@ -192,7 +192,8 @@ int rtl88eu_download_fw(struct adapter *adapt)
rtl88e_firmware_selfreset(adapt);
}
_rtl88e_enable_fw_download(adapt, true);
- usb_write8(adapt, REG_MCUFWDL, usb_read8(adapt, REG_MCUFWDL) | FWDL_ChkSum_rpt);
+ usb_write8(adapt, REG_MCUFWDL,
+ usb_read8(adapt, REG_MCUFWDL) | FWDL_CHKSUM_RPT);
_rtl88e_write_fw(adapt, download_data, download_size);
_rtl88e_enable_fw_download(adapt, false);
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 698377ea60ee..28974808839d 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -5,8 +5,6 @@
*
******************************************************************************/
-/* include files */
-
#include "odm_precomp.h"
#include "phy.h"
@@ -193,7 +191,7 @@ void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
odm_DIG(pDM_Odm);
odm_CCKPacketDetectionThresh(pDM_Odm);
- if (*(pDM_Odm->pbPowerSaving))
+ if (*pDM_Odm->pbPowerSaving)
return;
odm_RefreshRateAdaptiveMask(pDM_Odm);
@@ -229,13 +227,13 @@ void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
u8 i;
struct sta_info *pEntry;
- if (*(pDM_Odm->pBandWidth) == ODM_BW40M) {
- if (*(pDM_Odm->pSecChOffset) == 1)
- pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) - 2;
- else if (*(pDM_Odm->pSecChOffset) == 2)
- pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) + 2;
+ if (*pDM_Odm->pBandWidth == ODM_BW40M) {
+ if (*pDM_Odm->pSecChOffset == 1)
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel - 2;
+ else if (*pDM_Odm->pSecChOffset == 2)
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel + 2;
} else {
- pDM_Odm->ControlChannel = *(pDM_Odm->pChannel);
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel;
}
for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
@@ -270,16 +268,16 @@ void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm)
void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm)
{
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoHook_Debug==>\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumTxBytesUnicast=%llu\n", *(pDM_Odm->pNumTxBytesUnicast)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumRxBytesUnicast=%llu\n", *(pDM_Odm->pNumRxBytesUnicast)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pWirelessMode=0x%x\n", *(pDM_Odm->pWirelessMode)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecChOffset=%d\n", *(pDM_Odm->pSecChOffset)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecurity=%d\n", *(pDM_Odm->pSecurity)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pBandWidth=%d\n", *(pDM_Odm->pBandWidth)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pChannel=%d\n", *(pDM_Odm->pChannel)));
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess=%d\n", *(pDM_Odm->pbScanInProcess)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving=%d\n", *(pDM_Odm->pbPowerSaving)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumTxBytesUnicast=%llu\n", *pDM_Odm->pNumTxBytesUnicast));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumRxBytesUnicast=%llu\n", *pDM_Odm->pNumRxBytesUnicast));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pWirelessMode=0x%x\n", *pDM_Odm->pWirelessMode));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecChOffset=%d\n", *pDM_Odm->pSecChOffset));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecurity=%d\n", *pDM_Odm->pSecurity));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pBandWidth=%d\n", *pDM_Odm->pBandWidth));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pChannel=%d\n", *pDM_Odm->pChannel));
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess=%d\n", *pDM_Odm->pbScanInProcess));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving=%d\n", *pDM_Odm->pbPowerSaving));
}
void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
@@ -348,7 +346,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
return;
}
- if (*(pDM_Odm->pbScanInProcess)) {
+ if (*pDM_Odm->pbScanInProcess) {
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: In Scan Progress\n"));
return;
}
@@ -508,7 +506,7 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
{
struct adapter *adapter = pDM_Odm->Adapter;
u32 ret_value;
- struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+ struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
return;
@@ -581,7 +579,7 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
{
u8 CurCCK_CCAThres;
- struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+ struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD | ODM_BB_FA_CNT)))
return;
@@ -739,7 +737,7 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u
} else if (rssi_level == DM_RATR_STA_MIDDLE) {
rate_bitmap = 0x000ff000;
} else {
- if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ if (*pDM_Odm->pBandWidth == ODM_BW40M)
rate_bitmap = 0x000ff015;
else
rate_bitmap = 0x000ff005;
@@ -945,7 +943,7 @@ void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm)
{
pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
- if (*(pDM_Odm->mp_mode) != 1)
+ if (*pDM_Odm->mp_mode != 1)
pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
@@ -1035,11 +1033,11 @@ void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
u64 cur_tx_bytes = 0;
u64 cur_rx_bytes = 0;
u8 bbtchange = false;
- struct xmit_priv *pxmitpriv = &(Adapter->xmitpriv);
- struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ struct xmit_priv *pxmitpriv = &Adapter->xmitpriv;
+ struct recv_priv *precvpriv = &Adapter->recvpriv;
struct registry_priv *pregpriv = &Adapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &(Adapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pregpriv->wifi_spec == 1) /* (pmlmeinfo->HT_enable == 0)) */
goto dm_CheckEdcaTurbo_EXIT;
diff --git a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
index a6f2731b076d..65a346ae3cb0 100644
--- a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
@@ -5,8 +5,6 @@
*
******************************************************************************/
-/* include files */
-
#include "odm_precomp.h"
#define READ_AND_CONFIG READ_AND_CONFIG_MP
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index b9025815b682..920688fc9e9f 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -345,8 +345,8 @@ static void dm_txpwr_track_setpwr(struct odm_dm_struct *dm_odm)
{
if (dm_odm->BbSwingFlagOfdm || dm_odm->BbSwingFlagCck) {
ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("dm_txpwr_track_setpwr CH=%d\n", *(dm_odm->pChannel)));
- phy_set_tx_power_level(dm_odm->Adapter, *(dm_odm->pChannel));
+ ("dm_txpwr_track_setpwr CH=%d\n", *dm_odm->pChannel));
+ phy_set_tx_power_level(dm_odm->Adapter, *dm_odm->pChannel);
dm_odm->BbSwingFlagOfdm = false;
dm_odm->BbSwingFlagCck = false;
}
@@ -786,7 +786,7 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8],
}
}
-static void save_adda_registers(struct adapter *adapt, u32 *addareg,
+static void save_adda_registers(struct adapter *adapt, const u32 *addareg,
u32 *backup, u32 register_num)
{
u32 i;
@@ -795,7 +795,7 @@ static void save_adda_registers(struct adapter *adapt, u32 *addareg,
backup[i] = phy_query_bb_reg(adapt, addareg[i], bMaskDWord);
}
-static void save_mac_registers(struct adapter *adapt, u32 *mac_reg,
+static void save_mac_registers(struct adapter *adapt, const u32 *mac_reg,
u32 *backup)
{
u32 i;
@@ -806,7 +806,7 @@ static void save_mac_registers(struct adapter *adapt, u32 *mac_reg,
backup[i] = usb_read32(adapt, mac_reg[i]);
}
-static void reload_adda_reg(struct adapter *adapt, u32 *adda_reg,
+static void reload_adda_reg(struct adapter *adapt, const u32 *adda_reg,
u32 *backup, u32 regiester_num)
{
u32 i;
@@ -815,8 +815,8 @@ static void reload_adda_reg(struct adapter *adapt, u32 *adda_reg,
phy_set_bb_reg(adapt, adda_reg[i], bMaskDWord, backup[i]);
}
-static void reload_mac_registers(struct adapter *adapt,
- u32 *mac_reg, u32 *backup)
+static void reload_mac_registers(struct adapter *adapt, const u32 *mac_reg,
+ u32 *backup)
{
u32 i;
@@ -826,7 +826,7 @@ static void reload_mac_registers(struct adapter *adapt,
usb_write32(adapt, mac_reg[i], backup[i]);
}
-static void path_adda_on(struct adapter *adapt, u32 *adda_reg,
+static void path_adda_on(struct adapter *adapt, const u32 *adda_reg,
bool is_path_a_on, bool is2t)
{
u32 path_on;
@@ -844,7 +844,8 @@ static void path_adda_on(struct adapter *adapt, u32 *adda_reg,
phy_set_bb_reg(adapt, adda_reg[i], bMaskDWord, path_on);
}
-static void mac_setting_calibration(struct adapter *adapt, u32 *mac_reg, u32 *backup)
+static void mac_setting_calibration(struct adapter *adapt, const u32 *mac_reg,
+ u32 *backup)
{
u32 i = 0;
@@ -952,30 +953,31 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
struct odm_dm_struct *dm_odm = &adapt->HalData->odmpriv;
u32 i;
u8 path_a_ok, path_b_ok;
- u32 adda_reg[IQK_ADDA_REG_NUM] = {
- rFPGA0_XCD_SwitchControl, rBlue_Tooth,
- rRx_Wait_CCA, rTx_CCK_RFON,
- rTx_CCK_BBON, rTx_OFDM_RFON,
- rTx_OFDM_BBON, rTx_To_Rx,
- rTx_To_Tx, rRx_CCK,
- rRx_OFDM, rRx_Wait_RIFS,
- rRx_TO_Rx, rStandby,
- rSleep, rPMPD_ANAEN};
-
- u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
- REG_TXPAUSE, REG_BCN_CTRL,
- REG_BCN_CTRL_1, REG_GPIO_MUXCFG};
-
+ static const u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth,
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN
+ };
+ static const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL,
+ REG_BCN_CTRL_1, REG_GPIO_MUXCFG
+ };
/* since 92C & 92D have the different define in IQK_BB_REG */
- u32 iqk_bb_reg_92c[IQK_BB_REG_NUM] = {
- rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar,
- rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB,
- rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
- rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD};
+ static const u32 iqk_bb_reg_92c[IQK_BB_REG_NUM] = {
+ rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar,
+ rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB,
+ rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
+ rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD
+ };
u32 retry_count = 9;
- if (*(dm_odm->mp_mode) == 1)
+ if (*dm_odm->mp_mode == 1)
retry_count = 9;
else
retry_count = 2;
@@ -1320,7 +1322,7 @@ void rtl88eu_phy_lc_calibrate(struct adapter *adapt)
if (singletone || carrier_sup)
return;
- while (*(dm_odm->pbScanInProcess) && timecount < timeout) {
+ while (*dm_odm->pbScanInProcess && timecount < timeout) {
mdelay(50);
timecount += 50;
}
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 00a9f692bb06..6702f263c770 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -79,7 +79,7 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
}
}
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- ptr = (u8 *)(&(tx_agc[idx1]));
+ ptr = (u8 *)(&tx_agc[idx1]);
for (idx2 = 0; idx2 < 4; idx2++) {
if (*ptr > RF6052_MAX_TX_PWR)
*ptr = RF6052_MAX_TX_PWR;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 371e746915dd..176716d3e903 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -256,7 +256,7 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, min_t(u32, rate_len, 8), cur_network->SupportedRates, &pktlen);
/* DS parameter set */
- pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pktlen);
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&cur_network->Configuration.DSConfig, &pktlen);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
u32 ATIMWindow;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 241f55b92808..1af919ff6d93 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -29,9 +29,6 @@ static void dm_InitGPIOSetting(struct adapter *Adapter)
usb_write8(Adapter, REG_GPIO_MUXCFG, tmp1byte);
}
-/* */
-/* functions */
-/* */
static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
{
struct hal_data_8188e *hal_data = Adapter->HalData;
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index c0114ad79788..0d3e4a6e7e85 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -50,7 +50,7 @@ struct __queue {
static inline struct list_head *get_list_head(struct __queue *queue)
{
- return &(queue->queue);
+ return &queue->queue;
}
static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index dd943c831d91..be30c9434a29 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -817,7 +817,7 @@ So the following defines for 92C is not entire!!!!!!
/* 2 MCUFWDL */
#define MCUFWDL_EN BIT(0)
#define MCUFWDL_RDY BIT(1)
-#define FWDL_ChkSum_rpt BIT(2)
+#define FWDL_CHKSUM_RPT BIT(2)
#define MACINI_RDY BIT(3)
#define BBINI_RDY BIT(4)
#define RFINI_RDY BIT(5)
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 9a89791720e0..d5968ef9f43d 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -93,7 +93,7 @@ static char *translate_scan(struct adapter *padapter,
struct wlan_network *pnetwork,
char *start, char *stop)
{
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct iw_event iwe;
u16 cap;
__le16 le_tmp;
@@ -417,7 +417,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
ret = -EOPNOTSUPP;
goto exit;
}
- memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->KeyMaterial, pwep->KeyLength);
psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0);
}
@@ -444,8 +444,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
- memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
padapter->securitypriv.busetkipkey = false;
}
@@ -454,8 +454,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
rtw_setstakey_cmd(padapter, (unsigned char *)psta, true);
} else { /* group key */
memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16 ));
- memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
+ memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
padapter->securitypriv.binstallGrpkey = true;
DBG_88E(" ~~~~set sta key:groupkey\n");
@@ -620,7 +620,7 @@ static int rtw_wx_get_name(struct net_device *dev,
u32 ht_ielen = 0;
char *p;
u8 ht_cap = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
NDIS_802_11_RATES_EX *prates = NULL;
@@ -669,7 +669,7 @@ static int rtw_wx_get_freq(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -738,7 +738,7 @@ static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_get_mode\n"));
@@ -938,10 +938,10 @@ static int rtw_wx_set_wap(struct net_device *dev,
uint ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct sockaddr *temp = (struct sockaddr *)awrq;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct list_head *phead;
u8 *dst_bssid, *src_bssid;
- struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
enum ndis_802_11_auth_mode authmode;
@@ -1002,7 +1002,7 @@ static int rtw_wx_get_wap(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
@@ -1188,8 +1188,8 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
{
struct list_head *plist, *phead;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
char *ev = extra;
char *stop = ev + wrqu->data.length;
@@ -1217,7 +1217,7 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
break;
}
- spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
phead = get_list_head(queue);
plist = phead->next;
@@ -1358,7 +1358,7 @@ static int rtw_wx_get_essid(struct net_device *dev,
{
u32 len;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_essid\n"));
@@ -1564,7 +1564,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
struct ndis_802_11_wep wep;
enum ndis_802_11_auth_mode authmode;
- struct iw_point *erq = &(wrqu->encoding);
+ struct iw_point *erq = &wrqu->encoding;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
@@ -1675,8 +1675,8 @@ static int rtw_wx_get_enc(struct net_device *dev,
{
uint key;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_point *erq = &(wrqu->encoding);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct iw_point *erq = &wrqu->encoding;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -1759,7 +1759,7 @@ static int rtw_wx_set_auth(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_param *param = (struct iw_param *)&(wrqu->param);
+ struct iw_param *param = (struct iw_param *)&wrqu->param;
int ret = 0;
switch (param->flags & IW_AUTH_INDEX) {
@@ -2012,14 +2012,9 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
if (!p->pointer || p->length != sizeof(struct ieee_param))
return -EINVAL;
- param = (struct ieee_param *)rtw_malloc(p->length);
- if (!param)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param))
+ return PTR_ERR(param);
switch (param->cmd) {
case IEEE_CMD_SET_WPA_PARAM:
@@ -2093,7 +2088,7 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
u8 keylen;
struct cmd_obj *pcmd;
struct setkey_parm *psetkeyparm;
- struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
int res = _SUCCESS;
DBG_88E("%s\n", __func__);
@@ -2130,7 +2125,7 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
keylen = 16;
}
- memcpy(&(psetkeyparm->key[0]), key, keylen);
+ memcpy(&psetkeyparm->key[0], key, keylen);
pcmd->cmdcode = _SetKey_CMD_;
pcmd->parmbuf = (u8 *)psetkeyparm;
@@ -2173,7 +2168,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
struct sta_info *psta = NULL, *pbcmc_sta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_88E("%s\n", __func__);
@@ -2245,7 +2240,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
- memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->KeyMaterial, pwep->KeyLength);
psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
@@ -2256,7 +2251,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
/* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
/* psecuritypriv->dot11PrivacyKeyIndex = keyid", but can rtw_set_key to cam */
- memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->KeyMaterial, pwep->KeyLength);
psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
@@ -2283,8 +2278,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
@@ -2326,8 +2321,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psta->dot118021XPrivacy = _TKIP_;
/* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
- memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
@@ -2357,8 +2352,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
@@ -2398,7 +2393,7 @@ static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int
{
int ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
unsigned char *pbuf = param->u.bcn_ie.buf;
@@ -2436,7 +2431,7 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_88E("rtw_add_sta(aid =%d) =%pM\n", param->u.add_sta.aid, (param->sta_addr));
@@ -2489,7 +2484,7 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
{
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
int updated = 0;
@@ -2524,7 +2519,7 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
struct sta_data *psta_data = (struct sta_data *)param_ex->data;
@@ -2580,7 +2575,7 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_88E("rtw_get_sta_wpaie, sta_addr: %pM\n", (param->sta_addr));
@@ -2616,8 +2611,8 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
{
unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int ie_len;
DBG_88E("%s, len =%d\n", __func__, len);
@@ -2651,7 +2646,7 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
DBG_88E("%s, len =%d\n", __func__, len);
@@ -2680,7 +2675,7 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
DBG_88E("%s, len =%d\n", __func__, len);
@@ -2710,9 +2705,9 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 value;
@@ -2734,7 +2729,7 @@ static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param,
static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2748,7 +2743,7 @@ static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *p
static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2762,7 +2757,7 @@ static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *para
static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2789,14 +2784,9 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
if (!p->pointer || p->length != sizeof(struct ieee_param))
return -EINVAL;
- param = (struct ieee_param *)rtw_malloc(p->length);
- if (!param)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param))
+ return PTR_ERR(param);
switch (param->cmd) {
case RTL871X_HOSTAPD_FLUSH:
@@ -2882,7 +2872,7 @@ static int rtw_wx_set_priv(struct net_device *dev,
/* added for wps2.0 @20110524 */
if (dwrq->flags == 0x8766 && len > 8) {
u32 cp_sz;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 *probereq_wpsie = ext;
int probereq_wpsie_len = len;
u8 wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index daf6db354982..bf86d03820ca 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -77,7 +77,7 @@ static int rtw_android_get_rssi(struct net_device *net, char *command,
int total_len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(net);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pcur_network = &pmlmepriv->cur_network;
int bytes_written = 0;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index d3664e508cbe..a7cd4de65b28 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -81,8 +81,8 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb);
static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb);
static short _rtl92e_pci_initdescring(struct net_device *dev);
-static void _rtl92e_irq_tx_tasklet(struct r8192_priv *priv);
-static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv);
+static void _rtl92e_irq_tx_tasklet(unsigned long data);
+static void _rtl92e_irq_rx_tasklet(unsigned long data);
static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv);
static int _rtl92e_up(struct net_device *dev, bool is_silent_reset);
static int _rtl92e_try_up(struct net_device *dev);
@@ -516,8 +516,9 @@ static int _rtl92e_handle_assoc_response(struct net_device *dev,
return 0;
}
-static void _rtl92e_prepare_beacon(struct r8192_priv *priv)
+static void _rtl92e_prepare_beacon(unsigned long data)
{
+ struct r8192_priv *priv = (struct r8192_priv *)data;
struct net_device *dev = priv->rtllib->dev;
struct sk_buff *pskb = NULL, *pnewskb = NULL;
struct cb_desc *tcb_desc = NULL;
@@ -1007,14 +1008,11 @@ static void _rtl92e_init_priv_task(struct net_device *dev)
(void *)rtl92e_hw_wakeup_wq, dev);
INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq,
(void *)rtl92e_hw_sleep_wq, dev);
- tasklet_init(&priv->irq_rx_tasklet,
- (void(*)(unsigned long))_rtl92e_irq_rx_tasklet,
+ tasklet_init(&priv->irq_rx_tasklet, _rtl92e_irq_rx_tasklet,
(unsigned long)priv);
- tasklet_init(&priv->irq_tx_tasklet,
- (void(*)(unsigned long))_rtl92e_irq_tx_tasklet,
+ tasklet_init(&priv->irq_tx_tasklet, _rtl92e_irq_tx_tasklet,
(unsigned long)priv);
- tasklet_init(&priv->irq_prepare_beacon_tasklet,
- (void(*)(unsigned long))_rtl92e_prepare_beacon,
+ tasklet_init(&priv->irq_prepare_beacon_tasklet, _rtl92e_prepare_beacon,
(unsigned long)priv);
}
@@ -2113,13 +2111,17 @@ static void _rtl92e_tx_resume(struct net_device *dev)
}
}
-static void _rtl92e_irq_tx_tasklet(struct r8192_priv *priv)
+static void _rtl92e_irq_tx_tasklet(unsigned long data)
{
+ struct r8192_priv *priv = (struct r8192_priv *)data;
+
_rtl92e_tx_resume(priv->rtllib->dev);
}
-static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv)
+static void _rtl92e_irq_rx_tasklet(unsigned long data)
{
+ struct r8192_priv *priv = (struct r8192_priv *)data;
+
_rtl92e_rx_normal(priv->rtllib->dev);
rtl92e_writel(priv->rtllib->dev, INTA_MASK,
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 20e494186c9e..462835684e8b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -456,7 +456,7 @@ static void _rtl92e_dm_bandwidth_autoswitch(struct net_device *dev)
if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ||
!priv->rtllib->bandwidth_auto_switch.bautoswitch_enable)
return;
- if (priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz == false) {
+ if (!priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz) {
if (priv->undecorated_smoothed_pwdb <=
priv->rtllib->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = true;
@@ -1297,7 +1297,7 @@ static void _rtl92e_dm_dig_init(struct net_device *dev)
static void _rtl92e_dm_ctrl_initgain_byrssi(struct net_device *dev)
{
- if (dm_digtable.dig_enable_flag == false)
+ if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
@@ -1332,7 +1332,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev)
u8 i;
static u8 fw_dig;
- if (dm_digtable.dig_enable_flag == false)
+ if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm_switch)
@@ -1366,7 +1366,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
static u32 reset_cnt;
u8 i;
- if (dm_digtable.dig_enable_flag == false)
+ if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm_switch) {
@@ -1501,7 +1501,7 @@ static void _rtl92e_dm_initial_gain(struct net_device *dev)
reset_cnt = 0;
}
- if (rtllib_act_scanning(priv->rtllib, true) == true) {
+ if (rtllib_act_scanning(priv->rtllib, true)) {
force_write = 1;
return;
}
@@ -2444,7 +2444,7 @@ static void _rtl92e_dm_init_dynamic_tx_power(struct net_device *dev)
static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- unsigned int txhipower_threshhold = 0;
+ unsigned int txhipower_threshold = 0;
unsigned int txlowpower_threshold = 0;
if (priv->rtllib->bdynamic_txpower_enable != true) {
@@ -2454,10 +2454,10 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
}
if ((priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_ATHEROS) &&
(priv->rtllib->mode == IEEE_G)) {
- txhipower_threshhold = TX_POWER_ATHEROAP_THRESH_HIGH;
+ txhipower_threshold = TX_POWER_ATHEROAP_THRESH_HIGH;
txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
} else {
- txhipower_threshhold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
+ txhipower_threshold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
}
@@ -2465,7 +2465,7 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
priv->undecorated_smoothed_pwdb);
if (priv->rtllib->state == RTLLIB_LINKED) {
- if (priv->undecorated_smoothed_pwdb >= txhipower_threshhold) {
+ if (priv->undecorated_smoothed_pwdb >= txhipower_threshold) {
priv->bDynamicTxHighPower = true;
priv->bDynamicTxLowPower = false;
} else {
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index d83d72594312..8abc921ecb3e 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -371,7 +371,7 @@ void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
if ((ieee->iw_mode == IW_MODE_ADHOC) ||
(ieee->iw_mode == IW_MODE_MASTER)) {
pHTInfoEle->ControlChl = ieee->current_network.channel;
- pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false) ?
+ pHTInfoEle->ExtChlOffset = ((!pHT->bRegBW40MHz) ?
HT_EXTCHNL_OFFSET_NO_EXT :
(ieee->current_network.channel <= 6)
? HT_EXTCHNL_OFFSET_UPPER :
@@ -526,7 +526,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34};
- if (pHTInfo->bCurrentHTSupport == false) {
+ if (!pHTInfo->bCurrentHTSupport) {
netdev_warn(ieee->dev, "%s(): HT_DISABLE\n", __func__);
return;
}
@@ -873,7 +873,7 @@ void HTSetConnectBwMode(struct rtllib_device *ieee,
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
- if (pHTInfo->bRegBW40MHz == false)
+ if (!pHTInfo->bRegBW40MHz)
return;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index e101f7b13c7e..195d963c4fbb 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -520,55 +520,68 @@ static bool AddReorderEntry(struct rx_ts_record *pTS, struct rx_reorder_entry *p
return true;
}
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray, u8 index)
+static void indicate_packets(struct ieee80211_device *ieee,
+ struct ieee80211_rxb *rxb)
{
- u8 i = 0, j = 0;
+ struct net_device_stats *stats = &ieee->stats;
+ struct net_device *dev = ieee->dev;
u16 ethertype;
-// if(index > 1)
-// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): hahahahhhh, We indicate packet from reorder list, index is %u\n",__func__,index);
- for (j = 0; j < index; j++) {
-//added by amy for reorder
- struct ieee80211_rxb *prxb = prxbIndicateArray[j];
- for (i = 0; i < prxb->nr_subframes; i++) {
- struct sk_buff *sub_skb = prxb->subframes[i];
+ u8 i;
+
+ for (i = 0; i < rxb->nr_subframes; i++) {
+ struct sk_buff *sub_skb = rxb->subframes[i];
+
+ if (!sub_skb)
+ continue;
/* convert hdr + possible LLC headers into Ethernet header */
- ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
- if (sub_skb->len >= 8 &&
- ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
+ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
+ if (sub_skb->len >= 8 &&
+ ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
+ ethertype != ETH_P_AARP &&
+ ethertype != ETH_P_IPX) ||
+ !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
- skb_pull(sub_skb, SNAP_SIZE);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
- } else {
+ skb_pull(sub_skb, SNAP_SIZE);
+ } else {
/* Leave Ethernet header part of hdr and full payload */
- put_unaligned_be16(sub_skb->len, skb_push(sub_skb, 2));
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
- }
- //stats->rx_packets++;
- //stats->rx_bytes += sub_skb->len;
+ put_unaligned_be16(sub_skb->len, skb_push(sub_skb, 2));
+ }
+ memcpy(skb_push(sub_skb, ETH_ALEN), rxb->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), rxb->dst, ETH_ALEN);
+
+ stats->rx_packets++;
+ stats->rx_bytes += sub_skb->len;
+ if (is_multicast_ether_addr(rxb->dst))
+ stats->multicast++;
/* Indicate the packets to upper layer */
- if (sub_skb) {
- sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev);
- memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
- sub_skb->dev = ieee->dev;
- sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
- //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */
- ieee->last_rx_ps_time = jiffies;
- netif_rx(sub_skb);
- }
- }
+ sub_skb->protocol = eth_type_trans(sub_skb, dev);
+ memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
+ sub_skb->dev = dev;
+ /* 802.11 crc not sufficient */
+ sub_skb->ip_summed = CHECKSUM_NONE;
+ ieee->last_rx_ps_time = jiffies;
+ netif_rx(sub_skb);
+ }
+}
+
+void ieee80211_indicate_packets(struct ieee80211_device *ieee,
+ struct ieee80211_rxb **prxbIndicateArray,
+ u8 index)
+{
+ u8 i;
+
+ for (i = 0; i < index; i++) {
+ struct ieee80211_rxb *prxb = prxbIndicateArray[i];
+
+ indicate_packets(ieee, prxb);
kfree(prxb);
prxb = NULL;
}
}
-
static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
struct ieee80211_rxb *prxb,
struct rx_ts_record *pTS, u16 SeqNum)
@@ -877,7 +890,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
u16 fc, type, stype, sc;
struct net_device_stats *stats;
unsigned int frag;
- u16 ethertype;
//added by amy for reorder
u8 TID = 0;
u16 SeqNum = 0;
@@ -1260,47 +1272,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
//added by amy for reorder
if (!ieee->pHTInfo->bCurRxReorderEnable || !pTS) {
-//added by amy for reorder
- for (i = 0; i < rxb->nr_subframes; i++) {
- struct sk_buff *sub_skb = rxb->subframes[i];
-
- if (sub_skb) {
- /* convert hdr + possible LLC headers into Ethernet header */
- ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
- if (sub_skb->len >= 8 &&
- ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and
- * replace EtherType */
- skb_pull(sub_skb, SNAP_SIZE);
- memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
- } else {
- u16 len;
- /* Leave Ethernet header part of hdr and full payload */
- len = be16_to_cpu(htons(sub_skb->len));
- memcpy(skb_push(sub_skb, 2), &len, 2);
- memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
- }
-
- stats->rx_packets++;
- stats->rx_bytes += sub_skb->len;
- if (is_multicast_ether_addr(dst)) {
- stats->multicast++;
- }
-
- /* Indicate the packets to upper layer */
- sub_skb->protocol = eth_type_trans(sub_skb, dev);
- memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
- sub_skb->dev = dev;
- sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
- //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */
- ieee->last_rx_ps_time = jiffies;
- netif_rx(sub_skb);
- }
- }
+ indicate_packets(ieee, rxb);
kfree(rxb);
rxb = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 0ee054d82832..63a561ab4a76 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -372,9 +372,9 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, struct cb_desc *tcb_
return;
}
- if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
+ if (pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI40MHz)
tcb_desc->bUseShortGI = true;
- else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
+ else if (!pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI20MHz)
tcb_desc->bUseShortGI = true;
}
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index f0b85338b567..2f0d0ffa6fae 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,12 +71,13 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_init_queue(&pxmitpriv->apsd_queue);
_init_queue(&pxmitpriv->free_xmit_queue);
/*
- * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ * Please allocate memory with sz = (struct xmit_frame) * NR_XMITFRAME,
* and initialize free_xmit_frame below.
* Please also apply free_txobj to link_up all the xmit_frames...
*/
pxmitpriv->pallocated_frame_buf =
- kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4, GFP_ATOMIC);
+ kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4,
+ GFP_ATOMIC);
if (!pxmitpriv->pallocated_frame_buf) {
pxmitpriv->pxmit_frame_buf = NULL;
return -ENOMEM;
@@ -126,8 +127,8 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
for (i = 0; i < NR_XMITBUFF; i++) {
INIT_LIST_HEAD(&pxmitbuf->list);
- pxmitbuf->pallocated_buf = kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ,
- GFP_ATOMIC);
+ pxmitbuf->pallocated_buf =
+ kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ, GFP_ATOMIC);
if (!pxmitbuf->pallocated_buf)
return -ENOMEM;
pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ -
@@ -350,7 +351,7 @@ static int xmitframe_addmic(struct _adapter *padapter,
struct sta_info *stainfo;
struct qos_priv *pqospriv = &(padapter->mlmepriv.qospriv);
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct security_priv *psecpriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
bool bmcst = is_multicast_ether_addr(pattrib->ra);
@@ -368,15 +369,14 @@ static int xmitframe_addmic(struct _adapter *padapter,
0x0, 0x0};
pframe = pxmitframe->buf_addr + TXDESC_OFFSET;
if (bmcst) {
- if (!memcmp(psecuritypriv->XGrptxmickey
- [psecuritypriv->XGrpKeyid].skey,
+ if (!memcmp(psecpriv->XGrptxmickey
+ [psecpriv->XGrpKeyid].skey,
null_key, 16))
return -ENOMEM;
/*start to calculate the mic code*/
r8712_secmicsetkey(&micdata,
- psecuritypriv->
- XGrptxmickey[psecuritypriv->
- XGrpKeyid].skey);
+ psecpriv->XGrptxmickey
+ [psecpriv->XGrpKeyid].skey);
} else {
if (!memcmp(&stainfo->tkiptxmickey.skey[0],
null_key, 16))
@@ -416,7 +416,7 @@ static int xmitframe_addmic(struct _adapter *padapter,
length = pattrib->last_txcmdsz -
pattrib->hdrlen -
pattrib->iv_len -
- ((psecuritypriv->sw_encrypt)
+ ((psecpriv->sw_encrypt)
? pattrib->icv_len : 0);
r8712_secmicappend(&micdata, payload,
length);
@@ -424,7 +424,7 @@ static int xmitframe_addmic(struct _adapter *padapter,
} else {
length = pxmitpriv->frag_len -
pattrib->hdrlen - pattrib->iv_len -
- ((psecuritypriv->sw_encrypt) ?
+ ((psecpriv->sw_encrypt) ?
pattrib->icv_len : 0);
r8712_secmicappend(&micdata, payload,
length);
@@ -477,75 +477,72 @@ static int make_wlanhdr(struct _adapter *padapter, u8 *hdr,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
__le16 *fctrl = &pwlanhdr->frame_ctl;
+ u8 *bssid;
memset(hdr, 0, WLANHDR_OFFSET);
SetFrameSubType(fctrl, pattrib->subtype);
- if (pattrib->subtype & WIFI_DATA_TYPE) {
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- /* to_ds = 1, fr_ds = 0; */
- SetToDs(fctrl);
- memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv),
- ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- /* to_ds = 0, fr_ds = 1; */
- SetFrDs(fctrl);
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv),
- ETH_ALEN);
- memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
- } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv,
- WIFI_ADHOC_MASTER_STATE)) {
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv),
- ETH_ALEN);
- } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv),
- ETH_ALEN);
- } else {
- return -EINVAL;
- }
+ if (!(pattrib->subtype & WIFI_DATA_TYPE))
+ return 0;
- if (pattrib->encrypt)
- SetPrivacy(fctrl);
- if (pqospriv->qos_option) {
- qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
- if (pattrib->priority)
- SetPriority(qc, pattrib->priority);
- SetAckpolicy(qc, pattrib->ack_policy);
- }
- /* TODO: fill HT Control Field */
- /* Update Seq Num will be handled by f/w */
- {
- struct sta_info *psta;
- bool bmcst = is_multicast_ether_addr(pattrib->ra);
-
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- if (bmcst)
- psta = r8712_get_bcmc_stainfo(padapter);
- else
- psta =
- r8712_get_stainfo(&padapter->stapriv,
- pattrib->ra);
- }
- if (psta) {
- psta->sta_xmitpriv.txseq_tid
- [pattrib->priority]++;
- psta->sta_xmitpriv.txseq_tid[pattrib->priority]
- &= 0xFFF;
- pattrib->seqnum = psta->sta_xmitpriv.
- txseq_tid[pattrib->priority];
- SetSeqNum(hdr, pattrib->seqnum);
- }
+ bssid = get_bssid(pmlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ /* to_ds = 1, fr_ds = 0; */
+ SetToDs(fctrl);
+ ether_addr_copy(pwlanhdr->addr1, bssid);
+ ether_addr_copy(pwlanhdr->addr2, pattrib->src);
+ ether_addr_copy(pwlanhdr->addr3, pattrib->dst);
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ /* to_ds = 0, fr_ds = 1; */
+ SetFrDs(fctrl);
+ ether_addr_copy(pwlanhdr->addr1, pattrib->dst);
+ ether_addr_copy(pwlanhdr->addr2, bssid);
+ ether_addr_copy(pwlanhdr->addr3, pattrib->src);
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ ether_addr_copy(pwlanhdr->addr1, pattrib->dst);
+ ether_addr_copy(pwlanhdr->addr2, pattrib->src);
+ ether_addr_copy(pwlanhdr->addr3, bssid);
+ } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
+ ether_addr_copy(pwlanhdr->addr1, pattrib->dst);
+ ether_addr_copy(pwlanhdr->addr2, pattrib->src);
+ ether_addr_copy(pwlanhdr->addr3, bssid);
+ } else {
+ return -EINVAL;
+ }
+
+ if (pattrib->encrypt)
+ SetPrivacy(fctrl);
+ if (pqospriv->qos_option) {
+ qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
+ if (pattrib->priority)
+ SetPriority(qc, pattrib->priority);
+ SetAckpolicy(qc, pattrib->ack_policy);
+ }
+ /* TODO: fill HT Control Field */
+ /* Update Seq Num will be handled by f/w */
+ {
+ struct sta_info *psta;
+ bool bmcst = is_multicast_ether_addr(pattrib->ra);
+
+ if (pattrib->psta)
+ psta = pattrib->psta;
+ else if (bmcst)
+ psta = r8712_get_bcmc_stainfo(padapter);
+ else
+ psta = r8712_get_stainfo(&padapter->stapriv,
+ pattrib->ra);
+
+ if (psta) {
+ u16 *txtid = psta->sta_xmitpriv.txseq_tid;
+
+ txtid[pattrib->priority]++;
+ txtid[pattrib->priority] &= 0xFFF;
+ pattrib->seqnum = txtid[pattrib->priority];
+ SetSeqNum(hdr, pattrib->seqnum);
}
}
+
return 0;
}
@@ -589,7 +586,7 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
addr_t addr;
u8 *pframe, *mem_start, *ptxdesc;
struct sta_info *psta;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct security_priv *psecpriv = &padapter->securitypriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
@@ -632,15 +629,13 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
case _WEP40_:
case _WEP104_:
WEP_IV(pattrib->iv, psta->txpn,
- (u8)psecuritypriv->
- PrivacyKeyIndex);
+ (u8)psecpriv->PrivacyKeyIndex);
break;
case _TKIP_:
if (bmcst)
TKIP_IV(pattrib->iv,
psta->txpn,
- (u8)psecuritypriv->
- XGrpKeyid);
+ (u8)psecpriv->XGrpKeyid);
else
TKIP_IV(pattrib->iv, psta->txpn,
0);
@@ -648,8 +643,7 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
case _AES_:
if (bmcst)
AES_IV(pattrib->iv, psta->txpn,
- (u8)psecuritypriv->
- XGrpKeyid);
+ (u8)psecpriv->XGrpKeyid);
else
AES_IV(pattrib->iv, psta->txpn,
0);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index f227828094bf..c0c0c781fe17 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -115,7 +115,7 @@ struct pkt_attrib {
u8 icv_len;
unsigned char iv[8];
unsigned char icv[8];
- u8 dst[ETH_ALEN];
+ u8 dst[ETH_ALEN] __aligned(2); /* for ether_addr_copy */
u8 src[ETH_ALEN];
u8 ta[ETH_ALEN];
u8 ra[ETH_ALEN];
diff --git a/drivers/staging/rtl8712/usb_halinit.c b/drivers/staging/rtl8712/usb_halinit.c
index 6cc4a704c3a0..313c569748e9 100644
--- a/drivers/staging/rtl8712/usb_halinit.c
+++ b/drivers/staging/rtl8712/usb_halinit.c
@@ -58,7 +58,7 @@ u8 r8712_usb_hal_bus_init(struct _adapter *adapter)
r8712_write8(adapter, SYS_ISO_CTRL + 1, val8);
val8 = r8712_read8(adapter, SYS_ISO_CTRL + 1);
val8 = val8 & 0xEF;
- /* attatch AFE PLL to MACTOP/BB/PCIe Digital */
+ /* attach AFE PLL to MACTOP/BB/PCIe Digital */
r8712_write8(adapter, SYS_ISO_CTRL + 1, val8);
val8 = r8712_read8(adapter, AFE_XTAL_CTRL + 1);
val8 = val8 & 0xFB;
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index be731f1a2209..91b65731fcaa 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -440,7 +440,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
/* block-ack parameters */
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
@@ -532,13 +532,6 @@ struct ieee80211_ht_addt_info {
#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
-/* block-ack parameters */
-#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
-#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
-#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
-#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
-
/*
* A-PMDU buffer sizes
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index efb5135ad743..bd18d1803e27 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -822,7 +822,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork->IELength = 0;
/* Added by Albert 2009/02/18 */
- /* If the the driver wants to use the bssid to create the connection. */
+ /* If the driver wants to use the bssid to create the connection. */
/* If not, we have to copy the connecting AP's MAC address to it so that */
/* the driver just has the bssid information for PMKIDList searching. */
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index d7a58af76ea0..e65c5a870b46 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -1097,9 +1097,6 @@ inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
(!adapter_to_pwrctl(padapter)->bInSuspend) &&
(!check_fwstate(&padapter->mlmepriv,
WIFI_ASOC_STATE|WIFI_UNDER_LINKING))) {
- struct pwrctrl_priv *pwrpriv;
-
- pwrpriv = adapter_to_pwrctl(padapter);
rtw_set_ips_deny(padapter, 0);
_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 1);
}
@@ -2917,12 +2914,11 @@ void rtw_append_exented_cap(struct adapter *padapter, u8 *out_ie, uint *pout_len
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
u8 cap_content[8] = {0};
- u8 *pframe;
if (phtpriv->bss_coexist)
SET_EXT_CAPABILITY_ELE_BSS_COEXIST(cap_content, 1);
- pframe = rtw_set_ie(out_ie + *pout_len, EID_EXTCapability, 8, cap_content, pout_len);
+ rtw_set_ie(out_ie + *pout_len, EID_EXTCapability, 8, cap_content, pout_len);
}
inline void rtw_set_to_roam(struct adapter *adapter, u8 to_roam)
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 8f9da1d49343..d6d7198dfe45 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -1084,7 +1084,7 @@ auth_fail:
unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_frame)
{
- unsigned int seq, len, status, algthm, offset;
+ unsigned int seq, len, status, offset;
unsigned char *p;
unsigned int go2asoc = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1103,7 +1103,6 @@ unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_fram
offset = (GetPrivacy(pframe)) ? 4 : 0;
- algthm = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset));
seq = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 2));
status = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 4));
@@ -1170,7 +1169,7 @@ authclnt_fail:
unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
{
- u16 capab_info, listen_interval;
+ u16 capab_info;
struct rtw_ieee802_11_elems elems;
struct sta_info *pstat;
unsigned char reassoc, *p, *pos, *wpa_ie;
@@ -1216,8 +1215,6 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
capab_info = RTW_GET_LE16(pframe + WLAN_HDR_A3_LEN);
/* capab_info = le16_to_cpu(*(unsigned short *)(pframe + WLAN_HDR_A3_LEN)); */
- /* listen_interval = le16_to_cpu(*(unsigned short *)(pframe + WLAN_HDR_A3_LEN+2)); */
- listen_interval = RTW_GET_LE16(pframe + WLAN_HDR_A3_LEN+2);
left = pkt_len - (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
pos = pframe + (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 5245098b9ecf..7e1da0e35812 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -10,14 +10,11 @@
#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <rtw_recv.h>
+#include <net/cfg80211.h>
static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
-u8 rtw_rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-u8 rtw_bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-
static void rtw_signal_stat_timer_hdl(struct timer_list *t);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
@@ -1625,11 +1622,11 @@ sint wlanhdr_to_ethhdr(union recv_frame *precvframe)
psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
/* convert hdr + possible LLC headers into Ethernet header */
/* eth_type = (psnap_type[0] << 8) | psnap_type[1]; */
- if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
+ if ((!memcmp(psnap, rfc1042_header, SNAP_SIZE) &&
(memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2)) &&
(memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2))) ||
/* eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) || */
- !memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
+ !memcmp(psnap, bridge_tunnel_header, SNAP_SIZE)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
bsnaphdr = true;
} else
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 5ebf691bd743..0f95009a30b6 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -756,7 +756,7 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
- if (psecuritypriv->binstallGrpkey == false) {
+ if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
if (start == 0)
@@ -1837,7 +1837,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
- if (psecuritypriv->binstallGrpkey == false) {
+ if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
if (start == 0)
@@ -2369,7 +2369,7 @@ u8 rtw_handle_tkip_countermeasure(struct adapter *adapter, const char *caller)
struct security_priv *securitypriv = &(adapter->securitypriv);
u8 status = _SUCCESS;
- if (securitypriv->btkip_countermeasure == true) {
+ if (securitypriv->btkip_countermeasure) {
unsigned long passing_ms = jiffies_to_msecs(jiffies - securitypriv->btkip_countermeasure_time);
if (passing_ms > 60*1000) {
DBG_871X_LEVEL(_drv_always_, "%s("ADPT_FMT") countermeasure time:%lus > 60s\n",
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 09d2ca30d653..e3f56c6cc882 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -553,7 +553,6 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
{
struct sta_info *psta;
- struct tx_servq *ptxservq;
u32 res = _SUCCESS;
NDIS_802_11_MAC_ADDRESS bcast_addr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -571,7 +570,6 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
/* default broadcast & multicast use macid 1 */
psta->mac_id = 1;
- ptxservq = &(psta->sta_xmitpriv.be_q);
exit:
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 110338dbe372..69bcd172b298 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -1271,13 +1271,13 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
unsigned char *pbuf;
u32 wpa_ielen = 0;
u8 *pbssid = GetAddr3Ptr(pframe);
- u32 hidden_ssid = 0;
struct HT_info_element *pht_info = NULL;
struct rtw_ieee80211_ht_cap *pht_cap = NULL;
u32 bcn_channel;
unsigned short ht_cap_info;
unsigned char ht_info_infos_0;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ int ssid_len;
if (is_client_associated_to_ap(Adapter) == false)
return true;
@@ -1370,21 +1370,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* checking SSID */
+ ssid_len = 0;
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (!p) {
- DBG_871X("%s marc: cannot find SSID for survey event\n", __func__);
- hidden_ssid = true;
- } else {
- hidden_ssid = false;
- }
-
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- bssid->Ssid.Ssid[0] = '\0';
+ if (p) {
+ ssid_len = *(p + 1);
+ if (ssid_len > NDIS_802_11_LENGTH_SSID)
+ ssid_len = 0;
}
+ memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
+ bssid->Ssid.SsidLength = ssid_len;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
"cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index d5793e4614bf..3705a60a0546 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -12,49 +12,6 @@
#include <Mp_Precomp.h>
/* Global variables */
-static const char *const BtProfileString[] = {
- "NONE",
- "A2DP",
- "PAN",
- "HID",
- "SCO",
-};
-
-static const char *const BtSpecString[] = {
- "1.0b",
- "1.1",
- "1.2",
- "2.0+EDR",
- "2.1+EDR",
- "3.0+HS",
- "4.0",
-};
-
-static const char *const BtLinkRoleString[] = {
- "Master",
- "Slave",
-};
-
-static const char *const h2cStaString[] = {
- "successful",
- "h2c busy",
- "rf off",
- "fw not read",
-};
-
-static const char *const ioStaString[] = {
- "success",
- "can not IO",
- "rf off",
- "fw not read",
- "wait io timeout",
- "invalid len",
- "idle Q empty",
- "insert waitQ fail",
- "unknown fail",
- "wrong level",
- "h2c stopped",
-};
BTC_COEXIST GLBtCoexist;
static u8 GLBtcWiFiInScanState;
@@ -450,7 +407,7 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
break;
case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
- *pu8 = padapter->securitypriv.dot11PrivacyAlgrthm == 0 ? false : true;
+ *pu8 = padapter->securitypriv.dot11PrivacyAlgrthm != 0;
break;
case BTC_GET_BL_WIFI_UNDER_B_MODE:
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 767e2a784f78..10250642d30a 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -1816,7 +1816,7 @@ static void phy_CrossReferenceHTAndVHTTxPowerLimit(struct adapter *padapter)
s8 tempPwrLmt = 0;
for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) {
+ for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) {
for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) {
for (rateSection = 0; rateSection < MAX_RATE_SECTION_NUM; ++rateSection) {
tempPwrLmt = pHalData->TxPwrLimit_5G[regulation][bw][rateSection][channel][ODM_RF_PATH_A];
@@ -1877,7 +1877,7 @@ void PHY_ConvertTxPowerLimitToPowerIndex(struct adapter *Adapter)
phy_CrossReferenceHTAndVHTTxPowerLimit(Adapter);
for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_2_4G_BANDWITH_NUM; ++bw) {
+ for (bw = 0; bw < MAX_2_4G_BANDWIDTH_NUM; ++bw) {
for (channel = 0; channel < CHANNEL_MAX_NUMBER_2G; ++channel) {
for (rateSection = 0; rateSection < MAX_RATE_SECTION_NUM; ++rateSection) {
tempPwrLmt = pHalData->TxPwrLimit_2_4G[regulation][bw][rateSection][channel][ODM_RF_PATH_A];
@@ -1920,7 +1920,7 @@ void PHY_InitTxPowerLimit(struct adapter *Adapter)
/* DBG_871X("=====> PHY_InitTxPowerLimit()!\n"); */
for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- for (j = 0; j < MAX_2_4G_BANDWITH_NUM; ++j)
+ for (j = 0; j < MAX_2_4G_BANDWIDTH_NUM; ++j)
for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
for (m = 0; m < CHANNEL_MAX_NUMBER_2G; ++m)
for (l = 0; l < MAX_RF_PATH_NUM; ++l)
@@ -1928,7 +1928,7 @@ void PHY_InitTxPowerLimit(struct adapter *Adapter)
}
for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- for (j = 0; j < MAX_5G_BANDWITH_NUM; ++j)
+ for (j = 0; j < MAX_5G_BANDWIDTH_NUM; ++j)
for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
for (m = 0; m < CHANNEL_MAX_NUMBER_5G; ++m)
for (l = 0; l < MAX_RF_PATH_NUM; ++l)
diff --git a/drivers/staging/rtl8723bs/hal/odm.c b/drivers/staging/rtl8723bs/hal/odm.c
index aa6631ee4ea7..f2a9e95a1563 100644
--- a/drivers/staging/rtl8723bs/hal/odm.c
+++ b/drivers/staging/rtl8723bs/hal/odm.c
@@ -7,19 +7,6 @@
#include "odm_precomp.h"
-static const u16 dB_Invert_Table[8][12] = {
- {1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4},
- {4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16},
- {18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63},
- {71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251},
- {282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000},
- {1122, 1259, 1413, 1585, 1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
- {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000, 11220, 12589, 14125,
- 15849},
- {17783, 19953, 22387, 25119, 28184, 31623, 35481, 39811, 44668, 50119,
- 56234, 65535}
- };
-
/* Global var */
u32 OFDMSwingTable[OFDM_TABLE_SIZE] = {
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index b77d1fe33a28..16e8f66a3171 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -541,7 +541,7 @@ typedef enum tag_Operation_Mode_Definition {
/* ODM_CMNINFO_WM_MODE */
typedef enum tag_Wireless_Mode_Definition {
- ODM_WM_UNKNOW = 0x0,
+ ODM_WM_UNKNOWN = 0x0,
ODM_WM_B = BIT0,
ODM_WM_G = BIT1,
ODM_WM_A = BIT2,
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h b/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
index f2c0707aad4c..1c6c08000e27 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
+++ b/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
@@ -31,8 +31,8 @@
#define ODM_REG_TX_ANT_CTRL_11N 0x80C
#define ODM_REG_BB_PWR_SAV5_11N 0x818
#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
-#define ODM_REG_RX_DEFUALT_A_11N 0x858
-#define ODM_REG_RX_DEFUALT_B_11N 0x85A
+#define ODM_REG_RX_DEFAULT_A_11N 0x858
+#define ODM_REG_RX_DEFAULT_B_11N 0x85A
#define ODM_REG_BB_PWR_SAV3_11N 0x85C
#define ODM_REG_ANTSEL_CTRL_11N 0x860
#define ODM_REG_RX_ANT_CTRL_11N 0x864
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index c3051ebaeb78..29c29e2e125b 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -311,39 +311,21 @@ static void rtl8723bs_recv_tasklet(unsigned long priv)
}
pkt_copy = rtw_skb_alloc(alloc_sz);
-
- if (pkt_copy) {
- pkt_copy->dev = padapter->pnetdev;
- precvframe->u.hdr.pkt = pkt_copy;
- skb_reserve(pkt_copy, 8 - ((SIZE_PTR)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
- skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
- memcpy(pkt_copy->data, (ptr + rx_report_sz + pattrib->shift_sz), skb_len);
- precvframe->u.hdr.rx_head = pkt_copy->head;
- precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data;
- precvframe->u.hdr.rx_end = skb_end_pointer(pkt_copy);
- } else {
- if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
- DBG_8192C("%s: alloc_skb fail, drop frag frame\n", __func__);
- rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
- break;
- }
-
- precvframe->u.hdr.pkt = rtw_skb_clone(precvbuf->pskb);
- if (precvframe->u.hdr.pkt) {
- _pkt *pkt_clone = precvframe->u.hdr.pkt;
-
- pkt_clone->data = ptr + rx_report_sz + pattrib->shift_sz;
- skb_reset_tail_pointer(pkt_clone);
- precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail
- = pkt_clone->data;
- precvframe->u.hdr.rx_end = pkt_clone->data + skb_len;
- } else {
- DBG_8192C("%s: rtw_skb_clone fail\n", __func__);
- rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
- break;
- }
+ if (!pkt_copy) {
+ DBG_8192C("%s: alloc_skb fail, drop frame\n", __func__);
+ rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
+ break;
}
+ pkt_copy->dev = padapter->pnetdev;
+ precvframe->u.hdr.pkt = pkt_copy;
+ skb_reserve(pkt_copy, 8 - ((SIZE_PTR)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
+ skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
+ memcpy(pkt_copy->data, (ptr + rx_report_sz + pattrib->shift_sz), skb_len);
+ precvframe->u.hdr.rx_head = pkt_copy->head;
+ precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data;
+ precvframe->u.hdr.rx_end = skb_end_pointer(pkt_copy);
+
recvframe_put(precvframe, skb_len);
/* recvframe_pull(precvframe, drvinfo_sz + RXDESC_SIZE); */
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index 7853af53051d..e42d8c18e1ae 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -544,13 +544,9 @@ static void _InitRetryFunction(struct adapter *padapter)
static void HalRxAggr8723BSdio(struct adapter *padapter)
{
- struct registry_priv *pregistrypriv;
u8 valueDMATimeout;
u8 valueDMAPageCount;
-
- pregistrypriv = &padapter->registrypriv;
-
valueDMATimeout = 0x06;
valueDMAPageCount = 0x06;
diff --git a/drivers/staging/rtl8723bs/include/hal_data.h b/drivers/staging/rtl8723bs/include/hal_data.h
index e5e667df6154..fa5d70016f05 100644
--- a/drivers/staging/rtl8723bs/include/hal_data.h
+++ b/drivers/staging/rtl8723bs/include/hal_data.h
@@ -56,9 +56,9 @@ enum RT_AMPDU_BURST {
/* Tx Power Limit Table Size */
#define MAX_REGULATION_NUM 4
#define MAX_RF_PATH_NUM_IN_POWER_LIMIT_TABLE 4
-#define MAX_2_4G_BANDWITH_NUM 4
+#define MAX_2_4G_BANDWIDTH_NUM 4
#define MAX_RATE_SECTION_NUM 10
-#define MAX_5G_BANDWITH_NUM 4
+#define MAX_5G_BANDWIDTH_NUM 4
#define MAX_BASE_NUM_IN_PHY_REG_PG_2_4G 10 /* CCK:1, OFDM:1, HT:4, VHT:4 */
#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 9 /* OFDM:1, HT:4, VHT:4 */
@@ -280,14 +280,14 @@ struct hal_com_data {
/* Power Limit Table for 2.4G */
s8 TxPwrLimit_2_4G[MAX_REGULATION_NUM]
- [MAX_2_4G_BANDWITH_NUM]
+ [MAX_2_4G_BANDWIDTH_NUM]
[MAX_RATE_SECTION_NUM]
[CHANNEL_MAX_NUMBER_2G]
[MAX_RF_PATH_NUM];
/* Power Limit Table for 5G */
s8 TxPwrLimit_5G[MAX_REGULATION_NUM]
- [MAX_5G_BANDWITH_NUM]
+ [MAX_5G_BANDWIDTH_NUM]
[MAX_RATE_SECTION_NUM]
[CHANNEL_MAX_NUMBER_5G]
[MAX_RF_PATH_NUM];
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 98c3e92245b7..a851b818ef0e 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -38,8 +38,6 @@
#define RX_MAX_QUEUE 2
#define MAX_SUBFRAME_COUNT 64
-extern u8 rtw_rfc1042_header[];
-extern u8 rtw_bridge_tunnel_header[];
/* for Rx reordering buffer control */
struct recv_reorder_ctrl
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 1ba85a43f05a..2fb80b6eb51d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -98,7 +98,7 @@ static struct ieee80211_channel rtw_2ghz_channels[] = {
static void rtw_2g_channels_init(struct ieee80211_channel *channels)
{
- memcpy((void*)channels, (void*)rtw_2ghz_channels,
+ memcpy((void *)channels, (void *)rtw_2ghz_channels,
sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
);
}
@@ -133,8 +133,8 @@ static struct ieee80211_supported_band *rtw_spt_band_alloc(
if (!spt_band)
goto exit;
- spt_band->channels = (struct ieee80211_channel*)(((u8 *)spt_band)+sizeof(struct ieee80211_supported_band));
- spt_band->bitrates = (struct ieee80211_rate*)(((u8 *)spt_band->channels)+sizeof(struct ieee80211_channel)*n_channels);
+ spt_band->channels = (struct ieee80211_channel *)(((u8 *)spt_band)+sizeof(struct ieee80211_supported_band));
+ spt_band->bitrates = (struct ieee80211_rate *)(((u8 *)spt_band->channels)+sizeof(struct ieee80211_channel)*n_channels);
spt_band->band = band;
spt_band->n_channels = n_channels;
spt_band->n_bitrates = n_bitrates;
@@ -152,22 +152,6 @@ exit:
return spt_band;
}
-static void rtw_spt_band_free(struct ieee80211_supported_band *spt_band)
-{
- u32 size = 0;
-
- if (!spt_band)
- return;
-
- if (spt_band->band == NL80211_BAND_2GHZ)
- {
- size = sizeof(struct ieee80211_supported_band)
- + sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
- + sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM;
- }
- kfree(spt_band);
-}
-
static const struct ieee80211_txrx_stypes
rtw_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_ADHOC] = {
@@ -358,7 +342,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
memcpy(pbuf, pnetwork->network.IEs, pnetwork->network.IELength);
len += pnetwork->network.IELength;
- *((__le64*)pbuf) = cpu_to_le64(notify_timestamp);
+ *((__le64 *)pbuf) = cpu_to_le64(notify_timestamp);
bss = cfg80211_inform_bss_frame(wiphy, notify_channel, (struct ieee80211_mgmt *)buf,
len, notify_signal, GFP_ATOMIC);
@@ -1129,7 +1113,7 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
{
if (mac_addr)
- memcpy(param->sta_addr, (void*)mac_addr, ETH_ALEN);
+ memcpy(param->sta_addr, (void *)mac_addr, ETH_ALEN);
ret = rtw_cfg80211_ap_set_encryption(ndev, param, param_len);
}
@@ -2485,7 +2469,7 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
* for two MAC addresses
*/
skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
- pdata = (unsigned char*)skb->data;
+ pdata = (unsigned char *)skb->data;
memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
@@ -2540,7 +2524,7 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- memcpy(pframe, (void*)buf, len);
+ memcpy(pframe, (void *)buf, len);
pattrib->pktlen = len;
pwlanhdr = (struct ieee80211_hdr *)pframe;
@@ -3030,7 +3014,7 @@ static int _cfg80211_rtw_mgmt_tx(struct adapter *padapter, u8 tx_ch, const u8 *b
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- memcpy(pframe, (void*)buf, len);
+ memcpy(pframe, (void *)buf, len);
pattrib->pktlen = len;
pwlanhdr = (struct ieee80211_hdr *)pframe;
@@ -3163,29 +3147,6 @@ exit:
return ret;
}
-static void cfg80211_rtw_mgmt_frame_register(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- u16 frame_type, bool reg)
-{
- struct net_device *ndev = wdev_to_ndev(wdev);
- struct adapter *adapter;
-
- if (ndev == NULL)
- goto exit;
-
- adapter = (struct adapter *)rtw_netdev_priv(ndev);
-
-#ifdef DEBUG_CFG80211
- DBG_871X(FUNC_ADPT_FMT" frame_type:%x, reg:%d\n", FUNC_ADPT_ARG(adapter),
- frame_type, reg);
-#endif
-
- if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
- return;
-exit:
- return;
-}
-
#if defined(CONFIG_PNO_SUPPORT)
static int cfg80211_rtw_sched_scan_start(struct wiphy *wiphy,
struct net_device *dev,
@@ -3397,7 +3358,6 @@ static struct cfg80211_ops rtw_cfg80211_ops = {
.change_bss = cfg80211_rtw_change_bss,
.mgmt_tx = cfg80211_rtw_mgmt_tx,
- .mgmt_frame_register = cfg80211_rtw_mgmt_frame_register,
#if defined(CONFIG_PNO_SUPPORT)
.sched_scan_start = cfg80211_rtw_sched_scan_start,
@@ -3487,7 +3447,7 @@ void rtw_wdev_free(struct wireless_dev *wdev)
if (!wdev)
return;
- rtw_spt_band_free(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
+ kfree(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
wiphy_free(wdev->wiphy);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 5059b874080e..902ac8169948 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2561,14 +2561,16 @@ static int rtw_wps_start(struct net_device *dev,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct iw_point *pdata = &wrqu->data;
u32 u32wps_start = 0;
- unsigned int uintRet = 0;
if ((true == padapter->bDriverStopped) || (true == padapter->bSurpriseRemoved) || (NULL == pdata)) {
ret = -EINVAL;
goto exit;
}
- uintRet = copy_from_user((void *)&u32wps_start, pdata->pointer, 4);
+ if (copy_from_user((void *)&u32wps_start, pdata->pointer, 4)) {
+ ret = -EFAULT;
+ goto exit;
+ }
if (u32wps_start == 0)
u32wps_start = *extra;
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index d29f59bbb613..50a3c2c3a8d2 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -1057,9 +1057,9 @@ static int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
status = _netdev_open(pnetdev);
mutex_unlock(&(adapter_to_dvobj(padapter)->hw_init_mutex));
}
- }
- else
+ } else {
status = (_SUCCESS == ips_netdrv_open(padapter)) ? (0) : (-1);
+ }
return status;
}
@@ -1192,8 +1192,7 @@ void rtw_dev_unload(struct adapter *padapter)
padapter->bup = false;
DBG_871X("<=== %s\n", __func__);
- }
- else {
+ } else {
RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("%s: bup ==false\n", __func__));
DBG_871X("%s: bup ==false\n", __func__);
}
@@ -1223,8 +1222,7 @@ static int rtw_suspend_free_assoc_resource(struct adapter *padapter)
rtw_disassoc_cmd(padapter, 0, false);
/* s2-2. indicate disconnect to os */
rtw_indicate_disconnect(padapter);
- }
- else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
rtw_sta_flush(padapter);
}
@@ -1270,9 +1268,8 @@ void rtw_suspend_wow(struct adapter *padapter)
padapter->bDriverStopped = false; /* for 32k command */
/* 2. disable interrupt */
- if (padapter->intf_stop) {
+ if (padapter->intf_stop)
padapter->intf_stop(padapter);
- }
/* 2.1 clean interrupt */
if (padapter->HalFunc.clear_interrupt)
@@ -1448,14 +1445,13 @@ int rtw_suspend_common(struct adapter *padapter)
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
#ifdef CONFIG_WOWLAN
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
pwrpriv->wowlan_mode = true;
- } else if (pwrpriv->wowlan_pno_enable == true) {
+ else if (pwrpriv->wowlan_pno_enable == true)
pwrpriv->wowlan_mode |= pwrpriv->wowlan_pno_enable;
- }
if (pwrpriv->wowlan_mode == true)
- rtw_suspend_wow(padapter);
+ rtw_suspend_wow(padapter);
else
rtw_suspend_normal(padapter);
@@ -1522,9 +1518,8 @@ int rtw_resume_process_wow(struct adapter *padapter)
pwrpriv->bFwCurrentInPSMode = false;
- if (padapter->intf_stop) {
+ if (padapter->intf_stop)
padapter->intf_stop(padapter);
- }
if (padapter->HalFunc.clear_interrupt)
padapter->HalFunc.clear_interrupt(padapter);
@@ -1541,18 +1536,15 @@ int rtw_resume_process_wow(struct adapter *padapter)
padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_WOWLAN, (u8 *)&poidparam);
psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
- if (psta) {
+ if (psta)
set_sta_rate(padapter, psta);
- }
-
padapter->bDriverStopped = false;
DBG_871X("%s: wowmode resuming, DriverStopped:%d\n", __func__, padapter->bDriverStopped);
rtw_start_drv_threads(padapter);
- if (padapter->intf_start) {
+ if (padapter->intf_start)
padapter->intf_start(padapter);
- }
/* start netif queue */
if (pnetdev) {
@@ -1656,9 +1648,8 @@ int rtw_resume_process_ap_wow(struct adapter *padapter)
DBG_871X("%s: wowmode resuming, DriverStopped:%d\n", __func__, padapter->bDriverStopped);
rtw_start_drv_threads(padapter);
- if (padapter->intf_start) {
+ if (padapter->intf_start)
padapter->intf_start(padapter);
- }
/* start netif queue */
if (pnetdev) {
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index 60c35d92ba29..eb4d1c3008fe 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -9,6 +9,7 @@
#include <drv_types.h>
#include <rtw_debug.h>
#include <linux/jiffies.h>
+#include <net/cfg80211.h>
void rtw_os_free_recvframe(union recv_frame *precvframe)
{
@@ -60,27 +61,20 @@ _pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8
pattrib = &prframe->u.hdr.attrib;
sub_skb = rtw_skb_alloc(nSubframe_Length + 12);
- if (sub_skb) {
- skb_reserve(sub_skb, 12);
- skb_put_data(sub_skb, (pdata + ETH_HLEN), nSubframe_Length);
- } else {
- sub_skb = rtw_skb_clone(prframe->u.hdr.pkt);
- if (sub_skb) {
- sub_skb->data = pdata + ETH_HLEN;
- sub_skb->len = nSubframe_Length;
- skb_set_tail_pointer(sub_skb, nSubframe_Length);
- } else {
- DBG_871X("%s(): rtw_skb_clone() Fail!!!\n", __func__);
- return NULL;
- }
+ if (!sub_skb) {
+ DBG_871X("%s(): rtw_skb_alloc() Fail!!!\n", __func__);
+ return NULL;
}
+ skb_reserve(sub_skb, 12);
+ skb_put_data(sub_skb, (pdata + ETH_HLEN), nSubframe_Length);
+
eth_type = RTW_GET_BE16(&sub_skb->data[6]);
if (sub_skb->len >= 8 &&
- ((!memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
+ ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
- !memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
+ !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
/*
* remove RFC1042 or Bridge-Tunnel encapsulation and replace
* EtherType
@@ -230,7 +224,7 @@ static void rtw_os_ksocket_send(struct adapter *padapter, union recv_frame *prec
if (rx_pid == psta->pid) {
int i;
- u16 len = *(u16*)(skb->data+ETH_HLEN+2);
+ u16 len = *(u16 *)(skb->data+ETH_HLEN+2);
DBG_871X("eth, RC: len = 0x%x\n", len);
for (i = 0; i < len; i++)
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index b093b5629171..5b1392deb0a7 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -514,7 +514,7 @@ static void rtw_dev_remove(struct sdio_func *func)
rtw_unregister_netdevs(dvobj);
- if (padapter->bSurpriseRemoved == false) {
+ if (!padapter->bSurpriseRemoved) {
int err;
/* test surprise remove */
@@ -554,12 +554,12 @@ static int rtw_sdio_suspend(struct device *dev)
struct adapter *padapter = psdpriv->if1;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
- if (padapter->bDriverStopped == true) {
+ if (padapter->bDriverStopped) {
DBG_871X("%s bDriverStopped = %d\n", __func__, padapter->bDriverStopped);
return 0;
}
- if (pwrpriv->bInSuspend == true) {
+ if (pwrpriv->bInSuspend) {
DBG_871X("%s bInSuspend = %d\n", __func__, pwrpriv->bInSuspend);
pdbgpriv->dbg_suspend_error_cnt++;
return 0;
@@ -574,7 +574,7 @@ static int rtw_resume_process(struct adapter *padapter)
struct dvobj_priv *psdpriv = padapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
- if (pwrpriv->bInSuspend == false) {
+ if (!pwrpriv->bInSuspend) {
pdbgpriv->dbg_resume_error_cnt++;
DBG_871X("%s bInSuspend = %d\n", __func__, pwrpriv->bInSuspend);
return -1;
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 59568d18ce23..a1a82e59dfee 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -297,6 +297,62 @@ static int lynxfb_ops_pan_display(struct fb_var_screeninfo *var,
return hw_sm750_pan_display(crtc, var, info);
}
+static inline void lynxfb_set_visual_mode(struct fb_info *info)
+{
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+ default:
+ break;
+ }
+}
+
+static inline int lynxfb_set_color_offsets(struct fb_info *info)
+{
+ lynxfb_set_visual_mode(info);
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ info->var.red.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.offset = 0;
+ info->var.green.length = 8;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 8;
+ info->var.transp.length = 0;
+ info->var.transp.offset = 0;
+ break;
+ case 16:
+ info->var.red.offset = 11;
+ info->var.red.length = 5;
+ info->var.green.offset = 5;
+ info->var.green.length = 6;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 5;
+ info->var.transp.length = 0;
+ info->var.transp.offset = 0;
+ break;
+ case 24:
+ case 32:
+ info->var.red.offset = 16;
+ info->var.red.length = 8;
+ info->var.green.offset = 8;
+ info->var.green.length = 8;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int lynxfb_ops_set_par(struct fb_info *info)
{
struct lynxfb_par *par;
@@ -328,48 +384,13 @@ static int lynxfb_ops_set_par(struct fb_info *info)
* and these data should be set before setcolreg routine
*/
- switch (var->bits_per_pixel) {
- case 8:
- fix->visual = FB_VISUAL_PSEUDOCOLOR;
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 0;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 16:
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.length = 0;
- var->transp.offset = 0;
- fix->visual = FB_VISUAL_TRUECOLOR;
- break;
- case 24:
- case 32:
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- fix->visual = FB_VISUAL_TRUECOLOR;
- break;
- default:
- ret = -EINVAL;
- break;
- }
+ ret = lynxfb_set_color_offsets(info);
+
var->height = var->width = -1;
var->accel_flags = 0;/*FB_ACCELF_TEXT;*/
if (ret) {
- pr_err("pixel bpp format not satisfied\n.");
+ pr_err("bpp %d not supported\n", var->bits_per_pixel);
return ret;
}
ret = hw_sm750_crtc_setMode(crtc, var, fix);
@@ -511,10 +532,12 @@ lynxfb_resume_err:
static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
+ int ret;
struct lynxfb_par *par;
struct lynxfb_crtc *crtc;
resource_size_t request;
+ ret = 0;
par = info->par;
crtc = &par->crtc;
@@ -523,43 +546,13 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
var->yres,
var->bits_per_pixel);
- switch (var->bits_per_pixel) {
- case 8:
- info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 0;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 16:
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.length = 0;
- var->transp.offset = 0;
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- break;
- case 24:
- case 32:
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- break;
- default:
+ ret = lynxfb_set_color_offsets(info);
+
+ if (ret) {
pr_err("bpp %d not supported\n", var->bits_per_pixel);
- return -EINVAL;
+ return ret;
}
+
var->height = var->width = -1;
var->accel_flags = 0;/* FB_ACCELF_TEXT; */
@@ -709,7 +702,9 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
} else {
output->paths = sm750_crt;
crtc->channel = sm750_primary;
- /* not consider of padding stuffs for oScreen,need fix */
+ /* not consider of padding stuffs for oScreen,
+ * need fix
+ */
crtc->oScreen = sm750_dev->vidmem_size >> 1;
crtc->vScreen = sm750_dev->pvMem + crtc->oScreen;
}
@@ -893,15 +888,8 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
pr_info("fix->mmio_start = %lx\n", fix->mmio_start);
fix->mmio_len = sm750_dev->vidreg_size;
pr_info("fix->mmio_len = %x\n", fix->mmio_len);
- switch (var->bits_per_pixel) {
- case 8:
- fix->visual = FB_VISUAL_PSEUDOCOLOR;
- break;
- case 16:
- case 32:
- fix->visual = FB_VISUAL_TRUECOLOR;
- break;
- }
+
+ lynxfb_set_visual_mode(info);
/* set var */
var->activate = FB_ACTIVATE_NOW;
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index ce90adcb449d..19823c7277a4 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -59,16 +59,19 @@ struct lynx_accel {
int (*de_wait)(void);/* see if hardware ready to work */
- int (*de_fillrect)(struct lynx_accel *, u32, u32, u32, u32,
- u32, u32, u32, u32, u32);
+ int (*de_fillrect)(struct lynx_accel *,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32, u32);
- int (*de_copyarea)(struct lynx_accel *, u32, u32, u32, u32,
- u32, u32, u32, u32,
- u32, u32, u32, u32);
+ int (*de_copyarea)(struct lynx_accel *,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32);
- int (*de_imageblit)(struct lynx_accel *, const char *, u32, u32, u32, u32,
- u32, u32, u32, u32,
- u32, u32, u32, u32);
+ int (*de_imageblit)(struct lynx_accel *, const char *,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32);
};
@@ -163,7 +166,7 @@ struct lynxfb_output {
*/
void *priv;
- int (*proc_setBLANK)(struct lynxfb_output*, int);
+ int (*proc_setBLANK)(struct lynxfb_output *output, int blank);
};
struct lynxfb_par {
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index b8d60701f898..7136d751cff5 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -51,7 +51,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
/* now map mmio and vidmem */
sm750_dev->pvReg = ioremap(sm750_dev->vidreg_start,
- sm750_dev->vidreg_size);
+ sm750_dev->vidreg_size);
if (!sm750_dev->pvReg) {
pr_err("mmio failed\n");
ret = -EFAULT;
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index ddbb7e97d118..7408eb29cf38 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -43,6 +43,7 @@ static struct var_t vars[] = {
{ CAPS_STOP, .u.s = {"[:dv ap 100]" } },
{ RATE, .u.n = {"[:ra %d]", 7, 0, 9, 150, 25, NULL } },
{ PITCH, .u.n = {"[:dv ap %d]", 100, 0, 100, 0, 0, NULL } },
+ { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv gv %d]", 13, 0, 16, 0, 5, NULL } },
{ PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } },
{ VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } },
@@ -59,6 +60,8 @@ static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
@@ -87,6 +90,7 @@ static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 798c42dfa16c..96f24c848cc5 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -139,6 +139,7 @@ static struct var_t vars[] = {
{ CAPS_STOP, .u.s = {"[:dv ap 100]" } },
{ RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } },
{ PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } },
+ { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } },
{ PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } },
{ VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } },
@@ -155,6 +156,8 @@ static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
@@ -183,6 +186,7 @@ static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index dccb4ea29d37..780214b5ca16 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -44,7 +44,7 @@ static struct var_t vars[] = {
{ CAPS_START, .u.s = {"[:dv ap 160] " } },
{ CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
{ RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
- { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
+ { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
{ PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
{ VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } },
@@ -61,6 +61,8 @@ static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
@@ -89,6 +91,7 @@ static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index 7df1a84297f6..e393438af81b 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -24,6 +24,7 @@ static struct var_t vars[] = {
{ PAUSE, .u.s = {"PAUSE\n"} },
{ RATE, .u.n = {"RATE %d\n", 8, 1, 16, 0, 0, NULL } },
{ PITCH, .u.n = {"PITCH %d\n", 8, 0, 16, 0, 0, NULL } },
+ { INFLECTION, .u.n = {"INFLECTION %d\n", 8, 0, 16, 0, 0, NULL } },
{ VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } },
{ TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
@@ -39,6 +40,8 @@ static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
@@ -65,6 +68,7 @@ static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index f591ec095582..9a7029539f35 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -38,6 +38,7 @@ static struct var_t vars[] = {
{ PAUSE, .u.n = {"\x01P" } },
{ RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } },
{ PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } },
+ { INFLECTION, .u.n = {"\x01%dr", 5, 0, 9, 0, 0, NULL } },
{ VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
{ TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
{ PUNCT, .u.n = {"\x01%db", 0, 0, 2, 0, 0, NULL } },
@@ -57,6 +58,8 @@ static struct kobj_attribute freq_attribute =
__ATTR(freq, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
@@ -96,6 +99,7 @@ static struct attribute *synth_attrs[] = {
&freq_attribute.attr,
/* &lang_attribute.attr, */
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index fc6a9416829c..d3272c6d199a 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -42,7 +42,8 @@ enum var_id_t {
SAY_CONTROL, SAY_WORD_CTL, NO_INTERRUPT, KEY_ECHO,
SPELL_DELAY, PUNC_LEVEL, READING_PUNC,
ATTRIB_BLEEP, BLEEPS,
- RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG, DIRECT, PAUSE,
+ RATE, PITCH, INFLECTION, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
+ DIRECT, PAUSE,
CAPS_START, CAPS_STOP, CHARTAB,
MAXVARS
};
diff --git a/drivers/staging/speakup/spkguide.txt b/drivers/staging/speakup/spkguide.txt
index c23549c54c3c..1e622cd34363 100644
--- a/drivers/staging/speakup/spkguide.txt
+++ b/drivers/staging/speakup/spkguide.txt
@@ -406,6 +406,7 @@ freq
full_time
jiffy_delta
pitch
+inflection
punct
rate
tone
@@ -518,9 +519,9 @@ All the entries in the Speakup sys system are readable, some are
writable by root only, and some are writable by everyone. Unless you
know what you are doing, you should probably leave the ones that are
writable by root only alone. Most of the names are self explanatory.
-Vol for controlling volume, pitch for pitch, rate for controlling speaking
-rate, etc. If you find one you aren't sure about, you can post a query
-on the Speakup list.
+Vol for controlling volume, pitch for pitch, inflection for pitch range, rate
+for controlling speaking rate, etc. If you find one you aren't sure about, you
+can post a query on the Speakup list.
6. Changing Synthesizers
diff --git a/drivers/staging/speakup/sysfs-driver-speakup b/drivers/staging/speakup/sysfs-driver-speakup
index be3f5d6962e9..c6a32c434ce9 100644
--- a/drivers/staging/speakup/sysfs-driver-speakup
+++ b/drivers/staging/speakup/sysfs-driver-speakup
@@ -325,6 +325,12 @@ KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Gets or sets the pitch of the synthesizer. The range is 0-9.
+What: /sys/accessibility/speakup/soft/inflection
+KernelVersion: 5.8
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the inflection of the synthesizer, i.e. the pitch
+ range. The range is 0-9.
+
What: /sys/accessibility/speakup/soft/punct
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 5741d1cb6227..d7f6bec7ff06 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -37,6 +37,7 @@ static struct st_var_header var_headers[] = {
{ "bell_pos", BELL_POS, VAR_NUM, &spk_bell_pos, NULL },
{ "rate", RATE, VAR_NUM, NULL, NULL },
{ "pitch", PITCH, VAR_NUM, NULL, NULL },
+ { "inflection", INFLECTION, VAR_NUM, NULL, NULL },
{ "vol", VOL, VAR_NUM, NULL, NULL },
{ "tone", TONE, VAR_NUM, NULL, NULL },
{ "punct", PUNCT, VAR_NUM, NULL, NULL },
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index dd979ee4dcf1..99c57ceeb357 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -292,7 +292,7 @@ static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
* @tasktype: Type of taskmgmt command
* @scsidev: Scsidev that issued command
*
- * Create a cmdrsp packet and send it to the Serivce Partition
+ * Create a cmdrsp packet and send it to the Service Partition
* that will service this request.
*
* Return: Int representing whether command was queued successfully or not
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index 33485184a98a..f783b632141b 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -233,7 +233,7 @@ static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
}
static void snd_bcm2835_pcm_transfer(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect *rec, size_t bytes)
+ struct snd_pcm_indirect *rec, size_t bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
@@ -346,7 +346,7 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name,
&snd_bcm2835_playback_ops);
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
- chip->card->dev, 128 * 1024, 128 * 1024);
+ chip->card->dev, 128 * 1024, 128 * 1024);
if (spdif)
chip->pcm_spdif = pcm;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 597acef35d0b..4f1adddb804f 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -369,8 +369,8 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
if (dev->capture.vc_start_timestamp != -1 && pts) {
ktime_t timestamp;
- s64 runtime_us = pts -
- dev->capture.vc_start_timestamp;
+ s64 runtime_us = pts - dev->capture.vc_start_timestamp;
+
timestamp = ktime_add_us(dev->capture.kernel_start_ts,
runtime_us);
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
@@ -420,9 +420,8 @@ static int enable_camera(struct bm2835_mmal_dev *dev)
return -EINVAL;
}
- ret = vchiq_mmal_component_enable(
- dev->instance,
- dev->component[COMP_CAMERA]);
+ ret = vchiq_mmal_component_enable(dev->instance,
+ dev->component[COMP_CAMERA]);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"Failed enabling camera, ret %d\n", ret);
@@ -451,10 +450,8 @@ static int disable_camera(struct bm2835_mmal_dev *dev)
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Disabling camera\n");
- ret =
- vchiq_mmal_component_disable(
- dev->instance,
- dev->component[COMP_CAMERA]);
+ ret = vchiq_mmal_component_disable(dev->instance,
+ dev->component[COMP_CAMERA]);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"Failed disabling camera, ret %d\n", ret);
@@ -555,8 +552,8 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
/* enable the camera port */
dev->capture.port->cb_ctx = dev;
- ret =
- vchiq_mmal_port_enable(dev->instance, dev->capture.port, buffer_cb);
+ ret = vchiq_mmal_port_enable(dev->instance, dev->capture.port,
+ buffer_cb);
if (ret) {
v4l2_err(&dev->v4l2_dev,
"Failed to enable capture port - error %d. Disabling camera port again\n",
@@ -668,7 +665,7 @@ static int set_overlay_params(struct bm2835_mmal_dev *dev,
MMAL_DISPLAY_SET_ALPHA |
MMAL_DISPLAY_SET_DEST_RECT |
MMAL_DISPLAY_SET_FULLSCREEN,
- .layer = PREVIEW_LAYER,
+ .layer = 2,
.alpha = dev->overlay.global_alpha,
.fullscreen = 0,
.dest_rect = {
@@ -767,16 +764,14 @@ static int vidioc_overlay(struct file *file, void *f, unsigned int on)
(!on && !dev->component[COMP_PREVIEW]->enabled))
return 0; /* already in requested state */
- src =
- &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
+ src = &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
if (!on) {
/* disconnect preview ports and disable component */
ret = vchiq_mmal_port_disable(dev->instance, src);
if (!ret)
- ret =
- vchiq_mmal_port_connect_tunnel(dev->instance, src,
- NULL);
+ ret = vchiq_mmal_port_connect_tunnel(dev->instance, src,
+ NULL);
if (ret >= 0)
ret = vchiq_mmal_component_disable(
dev->instance,
@@ -800,9 +795,8 @@ static int vidioc_overlay(struct file *file, void *f, unsigned int on)
if (enable_camera(dev) < 0)
return -EINVAL;
- ret = vchiq_mmal_component_enable(
- dev->instance,
- dev->component[COMP_PREVIEW]);
+ ret = vchiq_mmal_component_enable(dev->instance,
+ dev->component[COMP_PREVIEW]);
if (ret < 0)
return ret;
@@ -1001,6 +995,141 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+
+static int mmal_setup_video_component(struct bm2835_mmal_dev *dev,
+ struct v4l2_format *f)
+{
+ bool overlay_enabled = !!dev->component[COMP_PREVIEW]->enabled;
+ struct vchiq_mmal_port *preview_port;
+ int ret;
+
+ preview_port = &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
+
+ /* Preview and encode ports need to match on resolution */
+ if (overlay_enabled) {
+ /* Need to disable the overlay before we can update
+ * the resolution
+ */
+ ret = vchiq_mmal_port_disable(dev->instance, preview_port);
+ if (!ret) {
+ ret = vchiq_mmal_port_connect_tunnel(dev->instance,
+ preview_port,
+ NULL);
+ }
+ }
+ preview_port->es.video.width = f->fmt.pix.width;
+ preview_port->es.video.height = f->fmt.pix.height;
+ preview_port->es.video.crop.x = 0;
+ preview_port->es.video.crop.y = 0;
+ preview_port->es.video.crop.width = f->fmt.pix.width;
+ preview_port->es.video.crop.height = f->fmt.pix.height;
+ preview_port->es.video.frame_rate.num =
+ dev->capture.timeperframe.denominator;
+ preview_port->es.video.frame_rate.den =
+ dev->capture.timeperframe.numerator;
+ ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
+
+ if (overlay_enabled) {
+ ret = vchiq_mmal_port_connect_tunnel(dev->instance,
+ preview_port,
+ &dev->component[COMP_PREVIEW]->input[0]);
+ if (ret)
+ return ret;
+
+ ret = vchiq_mmal_port_enable(dev->instance, preview_port, NULL);
+ }
+
+ return ret;
+}
+
+static int mmal_setup_encode_component(struct bm2835_mmal_dev *dev,
+ struct v4l2_format *f,
+ struct vchiq_mmal_port *port,
+ struct vchiq_mmal_port *camera_port,
+ struct vchiq_mmal_component *component)
+{
+ struct mmal_fmt *mfmt = get_format(f);
+ int ret;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "vid_cap - set up encode comp\n");
+
+ /* configure buffering */
+ camera_port->current_buffer.size = camera_port->recommended_buffer.size;
+ camera_port->current_buffer.num = camera_port->recommended_buffer.num;
+
+ ret = vchiq_mmal_port_connect_tunnel(dev->instance, camera_port,
+ &component->input[0]);
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s failed to create connection\n", __func__);
+ /* ensure capture is not going to be tried */
+ dev->capture.port = NULL;
+ return ret;
+ }
+
+ port->es.video.width = f->fmt.pix.width;
+ port->es.video.height = f->fmt.pix.height;
+ port->es.video.crop.x = 0;
+ port->es.video.crop.y = 0;
+ port->es.video.crop.width = f->fmt.pix.width;
+ port->es.video.crop.height = f->fmt.pix.height;
+ port->es.video.frame_rate.num =
+ dev->capture.timeperframe.denominator;
+ port->es.video.frame_rate.den =
+ dev->capture.timeperframe.numerator;
+
+ port->format.encoding = mfmt->mmal;
+ port->format.encoding_variant = 0;
+ /* Set any encoding specific parameters */
+ switch (mfmt->mmal_component) {
+ case COMP_VIDEO_ENCODE:
+ port->format.bitrate = dev->capture.encode_bitrate;
+ break;
+ case COMP_IMAGE_ENCODE:
+ /* Could set EXIF parameters here */
+ break;
+ default:
+ break;
+ }
+
+ ret = vchiq_mmal_port_set_format(dev->instance, port);
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s failed to set format %dx%d fmt %08X\n",
+ __func__,
+ f->fmt.pix.width,
+ f->fmt.pix.height,
+ f->fmt.pix.pixelformat);
+ return ret;
+ }
+
+ ret = vchiq_mmal_component_enable(dev->instance, component);
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s Failed to enable encode components\n", __func__);
+ return ret;
+ }
+
+ /* configure buffering */
+ port->current_buffer.num = 1;
+ port->current_buffer.size = f->fmt.pix.sizeimage;
+ if (port->format.encoding == MMAL_ENCODING_JPEG) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "JPG - buf size now %d was %d\n",
+ f->fmt.pix.sizeimage,
+ port->current_buffer.size);
+ port->current_buffer.size =
+ (f->fmt.pix.sizeimage < (100 << 10)) ?
+ (100 << 10) : f->fmt.pix.sizeimage;
+ }
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "vid_cap - cur_buf.size set to %d\n", f->fmt.pix.sizeimage);
+ port->current_buffer.alignment = 0;
+
+ return 0;
+}
+
static int mmal_setup_components(struct bm2835_mmal_dev *dev,
struct v4l2_format *f)
{
@@ -1075,8 +1204,7 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
}
remove_padding = mfmt->remove_padding;
- vchiq_mmal_port_parameter_set(dev->instance,
- camera_port,
+ vchiq_mmal_port_parameter_set(dev->instance, camera_port,
MMAL_PARAMETER_NO_IMAGE_PADDING,
&remove_padding, sizeof(remove_padding));
@@ -1096,46 +1224,7 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
if (!ret &&
camera_port ==
&dev->component[COMP_CAMERA]->output[CAM_PORT_VIDEO]) {
- bool overlay_enabled =
- !!dev->component[COMP_PREVIEW]->enabled;
- struct vchiq_mmal_port *preview_port =
- &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
- /* Preview and encode ports need to match on resolution */
- if (overlay_enabled) {
- /* Need to disable the overlay before we can update
- * the resolution
- */
- ret =
- vchiq_mmal_port_disable(dev->instance,
- preview_port);
- if (!ret)
- ret =
- vchiq_mmal_port_connect_tunnel(
- dev->instance,
- preview_port,
- NULL);
- }
- preview_port->es.video.width = f->fmt.pix.width;
- preview_port->es.video.height = f->fmt.pix.height;
- preview_port->es.video.crop.x = 0;
- preview_port->es.video.crop.y = 0;
- preview_port->es.video.crop.width = f->fmt.pix.width;
- preview_port->es.video.crop.height = f->fmt.pix.height;
- preview_port->es.video.frame_rate.num =
- dev->capture.timeperframe.denominator;
- preview_port->es.video.frame_rate.den =
- dev->capture.timeperframe.numerator;
- ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
- if (overlay_enabled) {
- ret = vchiq_mmal_port_connect_tunnel(
- dev->instance,
- preview_port,
- &dev->component[COMP_PREVIEW]->input[0]);
- if (!ret)
- ret = vchiq_mmal_port_enable(dev->instance,
- preview_port,
- NULL);
- }
+ ret = mmal_setup_video_component(dev, f);
}
if (ret) {
@@ -1145,128 +1234,39 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
f->fmt.pix.pixelformat);
/* ensure capture is not going to be tried */
dev->capture.port = NULL;
- } else {
- if (encode_component) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "vid_cap - set up encode comp\n");
+ return ret;
+ }
- /* configure buffering */
- camera_port->current_buffer.size =
- camera_port->recommended_buffer.size;
- camera_port->current_buffer.num =
- camera_port->recommended_buffer.num;
+ if (encode_component) {
+ ret = mmal_setup_encode_component(dev, f, port,
+ camera_port,
+ encode_component);
- ret =
- vchiq_mmal_port_connect_tunnel(
- dev->instance,
- camera_port,
- &encode_component->input[0]);
- if (ret) {
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "%s failed to create connection\n",
- __func__);
- /* ensure capture is not going to be tried */
- dev->capture.port = NULL;
- } else {
- port->es.video.width = f->fmt.pix.width;
- port->es.video.height = f->fmt.pix.height;
- port->es.video.crop.x = 0;
- port->es.video.crop.y = 0;
- port->es.video.crop.width = f->fmt.pix.width;
- port->es.video.crop.height = f->fmt.pix.height;
- port->es.video.frame_rate.num =
- dev->capture.timeperframe.denominator;
- port->es.video.frame_rate.den =
- dev->capture.timeperframe.numerator;
-
- port->format.encoding = mfmt->mmal;
- port->format.encoding_variant = 0;
- /* Set any encoding specific parameters */
- switch (mfmt->mmal_component) {
- case COMP_VIDEO_ENCODE:
- port->format.bitrate =
- dev->capture.encode_bitrate;
- break;
- case COMP_IMAGE_ENCODE:
- /* Could set EXIF parameters here */
- break;
- default:
- break;
- }
- ret = vchiq_mmal_port_set_format(dev->instance,
- port);
- if (ret)
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "%s failed to set format %dx%d fmt %08X\n",
- __func__,
- f->fmt.pix.width,
- f->fmt.pix.height,
- f->fmt.pix.pixelformat
- );
- }
+ if (ret)
+ return ret;
+ } else {
+ /* configure buffering */
+ camera_port->current_buffer.num = 1;
+ camera_port->current_buffer.size = f->fmt.pix.sizeimage;
+ camera_port->current_buffer.alignment = 0;
+ }
- if (!ret) {
- ret = vchiq_mmal_component_enable(
- dev->instance,
- encode_component);
- if (ret) {
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "%s Failed to enable encode components\n",
- __func__);
- }
- }
- if (!ret) {
- /* configure buffering */
- port->current_buffer.num = 1;
- port->current_buffer.size =
- f->fmt.pix.sizeimage;
- if (port->format.encoding ==
- MMAL_ENCODING_JPEG) {
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "JPG - buf size now %d was %d\n",
- f->fmt.pix.sizeimage,
- port->current_buffer.size);
- port->current_buffer.size =
- (f->fmt.pix.sizeimage <
- (100 << 10)) ?
- (100 << 10) : f->fmt.pix.sizeimage;
- }
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "vid_cap - cur_buf.size set to %d\n",
- f->fmt.pix.sizeimage);
- port->current_buffer.alignment = 0;
- }
- } else {
- /* configure buffering */
- camera_port->current_buffer.num = 1;
- camera_port->current_buffer.size = f->fmt.pix.sizeimage;
- camera_port->current_buffer.alignment = 0;
- }
+ dev->capture.fmt = mfmt;
+ dev->capture.stride = f->fmt.pix.bytesperline;
+ dev->capture.width = camera_port->es.video.crop.width;
+ dev->capture.height = camera_port->es.video.crop.height;
+ dev->capture.buffersize = port->current_buffer.size;
- if (!ret) {
- dev->capture.fmt = mfmt;
- dev->capture.stride = f->fmt.pix.bytesperline;
- dev->capture.width = camera_port->es.video.crop.width;
- dev->capture.height = camera_port->es.video.crop.height;
- dev->capture.buffersize = port->current_buffer.size;
-
- /* select port for capture */
- dev->capture.port = port;
- dev->capture.camera_port = camera_port;
- dev->capture.encode_component = encode_component;
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "Set dev->capture.fmt %08X, %dx%d, stride %d, size %d",
- port->format.encoding,
- dev->capture.width, dev->capture.height,
- dev->capture.stride, dev->capture.buffersize);
- }
- }
+ /* select port for capture */
+ dev->capture.port = port;
+ dev->capture.camera_port = camera_port;
+ dev->capture.encode_component = encode_component;
+ v4l2_dbg(1, bcm2835_v4l2_debug,
+ &dev->v4l2_dev,
+ "Set dev->capture.fmt %08X, %dx%d, stride %d, size %d",
+ port->format.encoding,
+ dev->capture.width, dev->capture.height,
+ dev->capture.stride, dev->capture.buffersize);
/* todo: Need to convert the vchiq/mmal error into a v4l2 error. */
return ret;
@@ -1658,9 +1658,8 @@ static int mmal_init(struct bm2835_mmal_dev *dev)
dev->capture.enc_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
/* get the preview component ready */
- ret = vchiq_mmal_component_init(
- dev->instance, "ril.video_render",
- &dev->component[COMP_PREVIEW]);
+ ret = vchiq_mmal_component_init(dev->instance, "ril.video_render",
+ &dev->component[COMP_PREVIEW]);
if (ret < 0)
goto unreg_camera;
@@ -1672,9 +1671,8 @@ static int mmal_init(struct bm2835_mmal_dev *dev)
}
/* get the image encoder component ready */
- ret = vchiq_mmal_component_init(
- dev->instance, "ril.image_encode",
- &dev->component[COMP_IMAGE_ENCODE]);
+ ret = vchiq_mmal_component_init(dev->instance, "ril.image_encode",
+ &dev->component[COMP_IMAGE_ENCODE]);
if (ret < 0)
goto unreg_preview;
@@ -1734,15 +1732,13 @@ static int mmal_init(struct bm2835_mmal_dev *dev)
unreg_vid_encoder:
pr_err("Cleanup: Destroy video encoder\n");
- vchiq_mmal_component_finalise(
- dev->instance,
- dev->component[COMP_VIDEO_ENCODE]);
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->component[COMP_VIDEO_ENCODE]);
unreg_image_encoder:
pr_err("Cleanup: Destroy image encoder\n");
- vchiq_mmal_component_finalise(
- dev->instance,
- dev->component[COMP_IMAGE_ENCODE]);
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->component[COMP_IMAGE_ENCODE]);
unreg_preview:
pr_err("Cleanup: Destroy video render\n");
@@ -1775,8 +1771,7 @@ static int bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
/* video device needs to be able to access instance data */
video_set_drvdata(vfd, dev);
- ret = video_register_device(vfd,
- VFL_TYPE_VIDEO,
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO,
video_nr[dev->camera_num]);
if (ret < 0)
return ret;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
index b5fce38de038..75524adff0f5 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
@@ -30,79 +30,77 @@ enum {
CAM_PORT_COUNT
};
-#define PREVIEW_LAYER 2
-
extern int bcm2835_v4l2_debug;
struct bm2835_mmal_dev {
/* v4l2 devices */
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct mutex mutex;
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct mutex mutex;
/* controls */
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *ctrls[V4L2_CTRL_COUNT];
- enum v4l2_scene_mode scene_mode;
- struct mmal_colourfx colourfx;
- int hflip;
- int vflip;
- int red_gain;
- int blue_gain;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrls[V4L2_CTRL_COUNT];
+ enum v4l2_scene_mode scene_mode;
+ struct mmal_colourfx colourfx;
+ int hflip;
+ int vflip;
+ int red_gain;
+ int blue_gain;
enum mmal_parameter_exposuremode exposure_mode_user;
enum v4l2_exposure_auto_type exposure_mode_v4l2_user;
/* active exposure mode may differ if selected via a scene mode */
enum mmal_parameter_exposuremode exposure_mode_active;
enum mmal_parameter_exposuremeteringmode metering_mode;
- unsigned int manual_shutter_speed;
- bool exp_auto_priority;
+ unsigned int manual_shutter_speed;
+ bool exp_auto_priority;
bool manual_iso_enabled;
u32 iso;
/* allocated mmal instance and components */
- struct vchiq_mmal_instance *instance;
- struct vchiq_mmal_component *component[COMP_COUNT];
+ struct vchiq_mmal_instance *instance;
+ struct vchiq_mmal_component *component[COMP_COUNT];
int camera_use_count;
struct v4l2_window overlay;
struct {
- unsigned int width; /* width */
- unsigned int height; /* height */
- unsigned int stride; /* stride */
- unsigned int buffersize; /* buffer size with padding */
- struct mmal_fmt *fmt;
+ unsigned int width; /* width */
+ unsigned int height; /* height */
+ unsigned int stride; /* stride */
+ unsigned int buffersize; /* buffer size with padding */
+ struct mmal_fmt *fmt;
struct v4l2_fract timeperframe;
/* H264 encode bitrate */
- int encode_bitrate;
+ int encode_bitrate;
/* H264 bitrate mode. CBR/VBR */
- int encode_bitrate_mode;
+ int encode_bitrate_mode;
/* H264 profile */
enum v4l2_mpeg_video_h264_profile enc_profile;
/* H264 level */
enum v4l2_mpeg_video_h264_level enc_level;
/* JPEG Q-factor */
- int q_factor;
+ int q_factor;
- struct vb2_queue vb_vidq;
+ struct vb2_queue vb_vidq;
/* VC start timestamp for streaming */
- s64 vc_start_timestamp;
+ s64 vc_start_timestamp;
/* Kernel start timestamp for streaming */
ktime_t kernel_start_ts;
/* Sequence number of last buffer */
- u32 sequence;
+ u32 sequence;
- struct vchiq_mmal_port *port; /* port being used for capture */
+ struct vchiq_mmal_port *port; /* port being used for capture */
/* camera port being used for capture */
- struct vchiq_mmal_port *camera_port;
+ struct vchiq_mmal_port *camera_port;
/* component being used for encode */
struct vchiq_mmal_component *encode_component;
/* number of frames remaining which driver should capture */
- unsigned int frame_count;
+ unsigned int frame_count;
/* last frame completion */
- struct completion frame_cmplt;
+ struct completion frame_cmplt;
} capture;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index 5137fcf203d6..b096a12387f7 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -135,8 +135,8 @@ static const struct v4l2_to_mmal_effects_setting
};
struct v4l2_mmal_scene_config {
- enum v4l2_scene_mode v4l2_scene;
- enum mmal_parameter_exposuremode exposure_mode;
+ enum v4l2_scene_mode v4l2_scene;
+ enum mmal_parameter_exposuremode exposure_mode;
enum mmal_parameter_exposuremeteringmode metering_mode;
};
@@ -377,11 +377,9 @@ static int ctrl_set_metering_mode(struct bm2835_mmal_dev *dev,
dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT;
break;
- /* todo matrix weighting not added to Linux API till 3.9
- * case V4L2_EXPOSURE_METERING_MATRIX:
- * dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX;
- * break;
- */
+ case V4L2_EXPOSURE_METERING_MATRIX:
+ dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX;
+ break;
}
if (dev->scene_mode == V4L2_SCENE_MODE_NONE) {
@@ -516,42 +514,41 @@ static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev,
struct mmal_parameter_imagefx_parameters imagefx;
for (i = 0; i < ARRAY_SIZE(v4l2_to_mmal_effects_values); i++) {
- if (ctrl->val == v4l2_to_mmal_effects_values[i].v4l2_effect) {
- imagefx.effect =
- v4l2_to_mmal_effects_values[i].mmal_effect;
- imagefx.num_effect_params =
- v4l2_to_mmal_effects_values[i].num_effect_params;
-
- if (imagefx.num_effect_params > MMAL_MAX_IMAGEFX_PARAMETERS)
- imagefx.num_effect_params = MMAL_MAX_IMAGEFX_PARAMETERS;
-
- for (j = 0; j < imagefx.num_effect_params; j++)
- imagefx.effect_parameter[j] =
- v4l2_to_mmal_effects_values[i].effect_params[j];
-
- dev->colourfx.enable =
- v4l2_to_mmal_effects_values[i].col_fx_enable;
- if (!v4l2_to_mmal_effects_values[i].col_fx_fixed_cbcr) {
- dev->colourfx.u =
- v4l2_to_mmal_effects_values[i].u;
- dev->colourfx.v =
- v4l2_to_mmal_effects_values[i].v;
- }
+ if (ctrl->val != v4l2_to_mmal_effects_values[i].v4l2_effect)
+ continue;
+
+ imagefx.effect =
+ v4l2_to_mmal_effects_values[i].mmal_effect;
+ imagefx.num_effect_params =
+ v4l2_to_mmal_effects_values[i].num_effect_params;
- control = &dev->component[COMP_CAMERA]->control;
+ if (imagefx.num_effect_params > MMAL_MAX_IMAGEFX_PARAMETERS)
+ imagefx.num_effect_params = MMAL_MAX_IMAGEFX_PARAMETERS;
- ret = vchiq_mmal_port_parameter_set(
- dev->instance, control,
- MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
- &imagefx, sizeof(imagefx));
- if (ret)
- goto exit;
+ for (j = 0; j < imagefx.num_effect_params; j++)
+ imagefx.effect_parameter[j] =
+ v4l2_to_mmal_effects_values[i].effect_params[j];
- ret = vchiq_mmal_port_parameter_set(
- dev->instance, control,
- MMAL_PARAMETER_COLOUR_EFFECT,
- &dev->colourfx, sizeof(dev->colourfx));
+ dev->colourfx.enable =
+ v4l2_to_mmal_effects_values[i].col_fx_enable;
+ if (!v4l2_to_mmal_effects_values[i].col_fx_fixed_cbcr) {
+ dev->colourfx.u = v4l2_to_mmal_effects_values[i].u;
+ dev->colourfx.v = v4l2_to_mmal_effects_values[i].v;
}
+
+ control = &dev->component[COMP_CAMERA]->control;
+
+ ret = vchiq_mmal_port_parameter_set(
+ dev->instance, control,
+ MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
+ &imagefx, sizeof(imagefx));
+ if (ret)
+ goto exit;
+
+ ret = vchiq_mmal_port_parameter_set(
+ dev->instance, control,
+ MMAL_PARAMETER_COLOUR_EFFECT,
+ &dev->colourfx, sizeof(dev->colourfx));
}
exit:
@@ -841,8 +838,7 @@ static int ctrl_set_scene_mode(struct bm2835_mmal_dev *dev,
enum mmal_parameter_exposuremeteringmode metering_mode;
for (i = 0; i < ARRAY_SIZE(scene_configs); i++) {
- if (scene_configs[i].v4l2_scene ==
- ctrl->val) {
+ if (scene_configs[i].v4l2_scene == ctrl->val) {
scene = &scene_configs[i];
break;
}
@@ -1045,8 +1041,8 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
{
.id = V4L2_CID_EXPOSURE_METERING,
.type = MMAL_CONTROL_TYPE_STD_MENU,
- .min = ~0x7,
- .max = V4L2_EXPOSURE_METERING_SPOT,
+ .min = ~0xf,
+ .max = V4L2_EXPOSURE_METERING_MATRIX,
.def = V4L2_EXPOSURE_METERING_AVERAGE,
.step = 0,
.imenu = NULL,
@@ -1282,21 +1278,18 @@ int set_framerate_params(struct bm2835_mmal_dev *dev)
struct mmal_parameter_fps_range fps_range;
int ret;
+ fps_range.fps_high.num = dev->capture.timeperframe.denominator;
+ fps_range.fps_high.den = dev->capture.timeperframe.numerator;
+
if ((dev->exposure_mode_active != MMAL_PARAM_EXPOSUREMODE_OFF) &&
(dev->exp_auto_priority)) {
- /* Variable FPS. Define min FPS as 1fps.
- * Max as max defined FPS.
- */
+ /* Variable FPS. Define min FPS as 1fps. */
fps_range.fps_low.num = 1;
fps_range.fps_low.den = 1;
- fps_range.fps_high.num = dev->capture.timeperframe.denominator;
- fps_range.fps_high.den = dev->capture.timeperframe.numerator;
} else {
/* Fixed FPS - set min and max to be the same */
- fps_range.fps_low.num = fps_range.fps_high.num =
- dev->capture.timeperframe.denominator;
- fps_range.fps_low.den = fps_range.fps_high.den =
- dev->capture.timeperframe.numerator;
+ fps_range.fps_low.num = fps_range.fps_high.num;
+ fps_range.fps_low.den = fps_range.fps_high.den;
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
index ff5398737b4a..ce88fac7c24b 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
@@ -26,13 +26,13 @@ struct mmal_msg_context;
/* mapping between v4l and mmal video modes */
struct mmal_fmt {
- u32 fourcc; /* v4l2 format id */
- int flags; /* v4l2 flags field */
- u32 mmal;
- int depth;
- u32 mmal_component; /* MMAL component index to be used to encode */
- u32 ybbp; /* depth of first Y plane for planar formats */
- bool remove_padding; /* Does the GPU have to remove padding,
+ u32 fourcc; /* v4l2 format id */
+ int flags; /* v4l2 flags field */
+ u32 mmal;
+ int depth;
+ u32 mmal_component; /* MMAL component index to be used to encode */
+ u32 ybbp; /* depth of first Y plane for planar formats */
+ bool remove_padding; /* Does the GPU have to remove padding,
* or can we do hide padding via bytesperline.
*/
};
@@ -40,10 +40,10 @@ struct mmal_fmt {
/* buffer for one video frame */
struct mmal_buffer {
/* v4l buffer data -- must be first */
- struct vb2_v4l2_buffer vb;
+ struct vb2_v4l2_buffer vb;
/* list of buffers available */
- struct list_head list;
+ struct list_head list;
void *buffer; /* buffer pointer */
unsigned long buffer_size; /* size of allocated buffer */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
index 80a99128f5f3..f4ac5a6149ea 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
@@ -724,11 +724,11 @@ struct mmal_parameter_imagefx_parameters {
#define MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN 16
struct mmal_parameter_camera_info_camera_t {
- u32 port_id;
- u32 max_width;
- u32 max_height;
- u32 lens_present;
- u8 camera_name[MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN];
+ u32 port_id;
+ u32 max_width;
+ u32 max_height;
+ u32 lens_present;
+ u8 camera_name[MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN];
};
enum mmal_parameter_camera_info_flash_type_t {
@@ -744,8 +744,8 @@ struct mmal_parameter_camera_info_flash_t {
};
struct mmal_parameter_camera_info_t {
- u32 num_cameras;
- u32 num_flashes;
+ u32 num_cameras;
+ u32 num_flashes;
struct mmal_parameter_camera_info_camera_t
cameras[MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS];
struct mmal_parameter_camera_info_flash_t
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index ff2b960d8cac..1a981e98e82b 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -60,32 +60,18 @@ struct vchi_service_handle;
* (local / remote)
*****************************************************************************/
-#ifdef __cplusplus
-extern "C" {
-#endif
-
// Routine used to initialise the vchi on both local + remote connections
extern int32_t vchi_initialise(struct vchi_instance_handle **instance_handle);
-extern int32_t vchi_exit(void);
-
extern int32_t vchi_connect(struct vchi_instance_handle *instance_handle);
//When this is called, ensure that all services have no data pending.
//Bulk transfers can remain 'queued'
extern int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle);
-// helper functions
-extern void *vchi_allocate_buffer(struct vchi_service_handle *handle, uint32_t *length);
-extern void vchi_free_buffer(struct vchi_service_handle *handle, void *address);
-extern uint32_t vchi_current_time(struct vchi_instance_handle *instance_handle);
-
/******************************************************************************
* Global service API
*****************************************************************************/
-// Routine to destroy a service
-extern int32_t vchi_service_destroy(const struct vchi_service_handle *handle);
-
// Routine to open a named service
extern int32_t vchi_service_open(struct vchi_instance_handle *instance_handle,
struct service_creation *setup,
@@ -103,23 +89,12 @@ extern int32_t vchi_service_use(const struct vchi_service_handle *handle);
// Routine to decrement ref count on a named service
extern int32_t vchi_service_release(const struct vchi_service_handle *handle);
-// Routine to set a control option for a named service
-extern int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
- enum vchi_service_option option,
- int value);
-
/* Routine to send a message from kernel memory across a service */
extern int
vchi_queue_kernel_message(struct vchi_service_handle *handle,
void *data,
unsigned int size);
-/* Routine to send a message from user memory across a service */
-extern int
-vchi_queue_user_message(struct vchi_service_handle *handle,
- void __user *data,
- unsigned int size);
-
// Routine to receive a msg from a service
// Dequeue is equivalent to hold, copy into client buffer, release
extern int32_t vchi_msg_dequeue(struct vchi_service_handle *handle,
@@ -149,54 +124,14 @@ extern int32_t vchi_msg_hold(struct vchi_service_handle *handle,
enum vchi_flags flags,
struct vchi_held_msg *message_descriptor);
-// Initialise an iterator to look through messages in place
-extern int32_t vchi_msg_look_ahead(struct vchi_service_handle *handle,
- struct vchi_msg_iter *iter,
- enum vchi_flags flags);
-
/*******************************************************************************
* Global service support API - operations on held messages
* and message iterators
******************************************************************************/
-// Routine to get the address of a held message
-extern void *vchi_held_msg_ptr(const struct vchi_held_msg *message);
-
-// Routine to get the size of a held message
-extern int32_t vchi_held_msg_size(const struct vchi_held_msg *message);
-
-// Routine to get the transmit timestamp as written into the header by the peer
-extern uint32_t vchi_held_msg_tx_timestamp(const struct vchi_held_msg *message);
-
-// Routine to get the reception timestamp, written as we parsed the header
-extern uint32_t vchi_held_msg_rx_timestamp(const struct vchi_held_msg *message);
-
// Routine to release a held message after it has been processed
extern int32_t vchi_held_msg_release(struct vchi_held_msg *message);
-// Indicates whether the iterator has a next message.
-extern int32_t vchi_msg_iter_has_next(const struct vchi_msg_iter *iter);
-
-// Return the pointer and length for the next message and advance the iterator.
-extern int32_t vchi_msg_iter_next(struct vchi_msg_iter *iter,
- void **data,
- uint32_t *msg_size);
-
-// Remove the last message returned by vchi_msg_iter_next.
-// Can only be called once after each call to vchi_msg_iter_next.
-extern int32_t vchi_msg_iter_remove(struct vchi_msg_iter *iter);
-
-// Hold the last message returned by vchi_msg_iter_next.
-// Can only be called once after each call to vchi_msg_iter_next.
-extern int32_t vchi_msg_iter_hold(struct vchi_msg_iter *iter,
- struct vchi_held_msg *message);
-
-// Return information for the next message, and hold it, advancing the iterator.
-extern int32_t vchi_msg_iter_hold_next(struct vchi_msg_iter *iter,
- void **data, // } may be NULL
- uint32_t *msg_size, // }
- struct vchi_held_msg *message);
-
/******************************************************************************
* Global bulk API
*****************************************************************************/
@@ -208,13 +143,6 @@ extern int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle,
enum vchi_flags flags,
void *transfer_handle);
-// Prepare interface for a transfer from the other side into relocatable memory.
-int32_t vchi_bulk_queue_receive_reloc(const struct vchi_service_handle *handle,
- uint32_t offset,
- uint32_t data_size,
- const enum vchi_flags flags,
- void * const bulk_handle);
-
// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
extern int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
const void *data_src,
@@ -226,15 +154,6 @@ extern int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
* Configuration plumbing
*****************************************************************************/
-#ifdef __cplusplus
-}
-#endif
-
-extern int32_t vchi_bulk_queue_transmit_reloc(struct vchi_service_handle *handle,
- uint32_t offset,
- uint32_t data_size,
- enum vchi_flags flags,
- void *transfer_handle);
#endif /* VCHI_H_ */
/****************************** End of file **********************************/
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index c18c6ca0b6c0..38a13e4618a8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -371,14 +371,15 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
pagelistinfo->scatterlist = scatterlist;
pagelistinfo->scatterlist_mapped = 0;
- if (is_vmalloc_addr(buf)) {
+ if (is_vmalloc_addr((void __force *)buf)) {
unsigned long length = count;
unsigned int off = offset;
for (actual_pages = 0; actual_pages < num_pages;
actual_pages++) {
- struct page *pg = vmalloc_to_page(buf + (actual_pages *
- PAGE_SIZE));
+ struct page *pg =
+ vmalloc_to_page((void __force *)(buf +
+ (actual_pages * PAGE_SIZE)));
size_t bytes = PAGE_SIZE - off;
if (!pg) {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index a1ea9777a444..28ea8c3a4cba 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1209,7 +1209,9 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* The completion must point to the
** msgbuf. */
- completion->header = msgbuf;
+ completion->header =
+ (struct vchiq_header __force *)
+ msgbuf;
}
if ((completion->reason ==
@@ -2353,7 +2355,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum vchiq_status ret = VCHIQ_SUCCESS;
char entity[16];
int *entity_uc;
- int local_uc, local_entity_uc;
+ int local_uc;
if (!arm_state)
goto out;
@@ -2377,7 +2379,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
write_lock_bh(&arm_state->susp_res_lock);
local_uc = ++arm_state->videocore_use_count;
- local_entity_uc = ++(*entity_uc);
+ ++(*entity_uc);
vchiq_log_trace(vchiq_susp_log_level,
"%s %s count %d, state count %d",
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 1640906e3929..79b75efa6868 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -14,12 +14,7 @@ static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
static int g_once_init;
static struct mutex g_connected_mutex;
-/****************************************************************************
-*
-* Function to initialize our lock.
-*
-***************************************************************************/
-
+/* Function to initialize our lock */
static void connected_init(void)
{
if (!g_once_init) {
@@ -28,15 +23,12 @@ static void connected_init(void)
}
}
-/****************************************************************************
-*
-* This function is used to defer initialization until the vchiq stack is
-* initialized. If the stack is already initialized, then the callback will
-* be made immediately, otherwise it will be deferred until
-* vchiq_call_connected_callbacks is called.
-*
-***************************************************************************/
-
+/*
+ * This function is used to defer initialization until the vchiq stack is
+ * initialized. If the stack is already initialized, then the callback will
+ * be made immediately, otherwise it will be deferred until
+ * vchiq_call_connected_callbacks is called.
+ */
void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
{
connected_init();
@@ -63,13 +55,10 @@ void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
mutex_unlock(&g_connected_mutex);
}
-/****************************************************************************
-*
-* This function is called by the vchiq stack once it has been connected to
-* the videocore and clients can start to use the stack.
-*
-***************************************************************************/
-
+/*
+ * This function is called by the vchiq stack once it has been connected to
+ * the videocore and clients can start to use the stack.
+ */
void vchiq_call_connected_callbacks(void)
{
int i;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index edcd97373809..ae9183db44ee 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -372,6 +372,10 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
service->state->id, service->handle);
status = VCHIQ_SUCCESS;
}
+
+ if (reason != VCHIQ_MESSAGE_AVAILABLE)
+ vchiq_release_message(service->handle, header);
+
return status;
}
@@ -1480,15 +1484,6 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
: VCHIQ_SRVSTATE_OPEN);
}
- service->remoteport = remoteport;
- service->client_id = ((int *)header->data)[1];
- if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
- NULL, NULL) == VCHIQ_RETRY) {
- /* Bail out if not ready */
- service->remoteport = VCHIQ_PORT_FREE;
- goto bail_not_ready;
- }
-
/* Success - the message has been dealt with */
unlock_service(service);
return 1;
@@ -3147,6 +3142,12 @@ error_exit:
return status;
}
+enum vchiq_status vchiq_queue_kernel_message(unsigned int handle, void *context,
+ size_t size)
+{
+ return vchiq_queue_message(handle, memcpy_copy_callback, context, size);
+}
+
void
vchiq_release_message(unsigned int handle,
struct vchiq_header *header)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index cedd8e721aae..1fe6cd8b86c0 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -587,6 +587,13 @@ lock_service(struct vchiq_service *service);
extern void
unlock_service(struct vchiq_service *service);
+extern enum vchiq_status
+vchiq_queue_message(unsigned int handle,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size);
+
/* The following functions are called from vchiq_core, and external
** implementations must be provided. */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index 39b77ea19210..b62fd6d6f1ac 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -105,12 +105,8 @@ extern enum vchiq_status vchiq_close_service(unsigned int service);
extern enum vchiq_status vchiq_remove_service(unsigned int service);
extern enum vchiq_status vchiq_use_service(unsigned int service);
extern enum vchiq_status vchiq_release_service(unsigned int service);
-extern enum vchiq_status
-vchiq_queue_message(unsigned int handle,
- ssize_t (*copy_callback)(void *context, void *dest,
- size_t offset, size_t maxsize),
- void *context,
- size_t size);
+extern enum vchiq_status vchiq_queue_kernel_message(unsigned int handle,
+ void *context, size_t size);
extern void vchiq_release_message(unsigned int service,
struct vchiq_header *header);
extern enum vchiq_status vchiq_bulk_transmit(unsigned int service,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index efdd3b1c7d85..75d87b6992c4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -9,8 +9,6 @@
#include "vchiq_util.h"
-#define vchiq_status_to_vchi(status) ((int32_t)status)
-
struct shim_service {
unsigned int handle;
@@ -84,35 +82,15 @@ int32_t vchi_msg_remove(struct vchi_service_handle *handle)
}
EXPORT_SYMBOL(vchi_msg_remove);
-/***********************************************************
- * Name: vchi_msg_queue
- *
- * Arguments: struct vchi_service_handle *handle,
- * ssize_t (*copy_callback)(void *context, void *dest,
- * size_t offset, size_t maxsize),
- * void *context,
- * uint32_t data_size
- *
- * Description: Thin wrapper to queue a message onto a connection
- *
- * Returns: int32_t - success == 0
- *
- ***********************************************************/
-static
-int32_t vchi_msg_queue(struct vchi_service_handle *handle,
- ssize_t (*copy_callback)(void *context, void *dest,
- size_t offset, size_t maxsize),
- void *context,
- uint32_t data_size)
+int vchi_queue_kernel_message(struct vchi_service_handle *handle, void *data,
+ unsigned int size)
{
struct shim_service *service = (struct shim_service *)handle;
enum vchiq_status status;
while (1) {
- status = vchiq_queue_message(service->handle,
- copy_callback,
- context,
- data_size);
+ status = vchiq_queue_kernel_message(service->handle, data,
+ size);
/*
* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
@@ -125,65 +103,10 @@ int32_t vchi_msg_queue(struct vchi_service_handle *handle,
msleep(1);
}
- return vchiq_status_to_vchi(status);
-}
-
-static ssize_t
-vchi_queue_kernel_message_callback(void *context,
- void *dest,
- size_t offset,
- size_t maxsize)
-{
- memcpy(dest, context + offset, maxsize);
- return maxsize;
-}
-
-int
-vchi_queue_kernel_message(struct vchi_service_handle *handle,
- void *data,
- unsigned int size)
-{
- return vchi_msg_queue(handle,
- vchi_queue_kernel_message_callback,
- data,
- size);
+ return status;
}
EXPORT_SYMBOL(vchi_queue_kernel_message);
-struct vchi_queue_user_message_context {
- void __user *data;
-};
-
-static ssize_t
-vchi_queue_user_message_callback(void *context,
- void *dest,
- size_t offset,
- size_t maxsize)
-{
- struct vchi_queue_user_message_context *copycontext = context;
-
- if (copy_from_user(dest, copycontext->data + offset, maxsize))
- return -EFAULT;
-
- return maxsize;
-}
-
-int
-vchi_queue_user_message(struct vchi_service_handle *handle,
- void __user *data,
- unsigned int size)
-{
- struct vchi_queue_user_message_context copycontext = {
- .data = data
- };
-
- return vchi_msg_queue(handle,
- vchi_queue_user_message_callback,
- &copycontext,
- size);
-}
-EXPORT_SYMBOL(vchi_queue_user_message);
-
/***********************************************************
* Name: vchi_bulk_queue_receive
*
@@ -221,7 +144,7 @@ int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle, void *data_d
break;
default:
WARN(1, "unsupported message\n");
- return vchiq_status_to_vchi(VCHIQ_ERROR);
+ return VCHIQ_ERROR;
}
while (1) {
@@ -238,7 +161,7 @@ int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle, void *data_d
msleep(1);
}
- return vchiq_status_to_vchi(status);
+ return status;
}
EXPORT_SYMBOL(vchi_bulk_queue_receive);
@@ -282,7 +205,7 @@ int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
break;
default:
WARN(1, "unsupported message\n");
- return vchiq_status_to_vchi(VCHIQ_ERROR);
+ return VCHIQ_ERROR;
}
while (1) {
@@ -300,7 +223,7 @@ int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
msleep(1);
}
- return vchiq_status_to_vchi(status);
+ return status;
}
EXPORT_SYMBOL(vchi_bulk_queue_transmit);
@@ -447,7 +370,7 @@ int32_t vchi_initialise(struct vchi_instance_handle **instance_handle)
*instance_handle = (struct vchi_instance_handle *)instance;
- return vchiq_status_to_vchi(status);
+ return status;
}
EXPORT_SYMBOL(vchi_initialise);
@@ -485,7 +408,7 @@ int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle)
{
struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
- return vchiq_status_to_vchi(vchiq_shutdown(instance));
+ return vchiq_shutdown(instance);
}
EXPORT_SYMBOL(vchi_disconnect);
@@ -521,7 +444,7 @@ static enum vchiq_status shim_callback(enum vchiq_reason reason,
service->callback(service->callback_param,
VCHI_CALLBACK_MSG_AVAILABLE, NULL);
- goto done;
+ break;
case VCHIQ_BULK_TRANSMIT_DONE:
service->callback(service->callback_param,
@@ -538,10 +461,6 @@ static enum vchiq_status shim_callback(enum vchiq_reason reason,
VCHI_CALLBACK_SERVICE_CLOSED, NULL);
break;
- case VCHIQ_SERVICE_OPENED:
- /* No equivalent VCHI reason */
- break;
-
case VCHIQ_BULK_TRANSMIT_ABORTED:
service->callback(service->callback_param,
VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
@@ -560,8 +479,6 @@ static enum vchiq_status shim_callback(enum vchiq_reason reason,
}
release:
- vchiq_release_message(service->handle, header);
-done:
return VCHIQ_SUCCESS;
}
@@ -636,62 +553,12 @@ int32_t vchi_service_close(const struct vchi_service_handle *handle)
if (status == VCHIQ_SUCCESS)
service_free(service);
- ret = vchiq_status_to_vchi(status);
+ ret = status;
}
return ret;
}
EXPORT_SYMBOL(vchi_service_close);
-int32_t vchi_service_destroy(const struct vchi_service_handle *handle)
-{
- int32_t ret = -1;
- struct shim_service *service = (struct shim_service *)handle;
-
- if (service) {
- enum vchiq_status status = vchiq_remove_service(service->handle);
-
- if (status == VCHIQ_SUCCESS) {
- service_free(service);
- service = NULL;
- }
-
- ret = vchiq_status_to_vchi(status);
- }
- return ret;
-}
-EXPORT_SYMBOL(vchi_service_destroy);
-
-int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
- enum vchi_service_option option,
- int value)
-{
- int32_t ret = -1;
- struct shim_service *service = (struct shim_service *)handle;
- enum vchiq_service_option vchiq_option;
-
- switch (option) {
- case VCHI_SERVICE_OPTION_TRACE:
- vchiq_option = VCHIQ_SERVICE_OPTION_TRACE;
- break;
- case VCHI_SERVICE_OPTION_SYNCHRONOUS:
- vchiq_option = VCHIQ_SERVICE_OPTION_SYNCHRONOUS;
- break;
- default:
- service = NULL;
- break;
- }
- if (service) {
- enum vchiq_status status =
- vchiq_set_service_option(service->handle,
- vchiq_option,
- value);
-
- ret = vchiq_status_to_vchi(status);
- }
- return ret;
-}
-EXPORT_SYMBOL(vchi_service_set_option);
-
int32_t vchi_get_peer_version(const struct vchi_service_handle *handle, short *peer_version)
{
int32_t ret = -1;
@@ -701,7 +568,7 @@ int32_t vchi_get_peer_version(const struct vchi_service_handle *handle, short *p
enum vchiq_status status;
status = vchiq_get_peer_version(service->handle, peer_version);
- ret = vchiq_status_to_vchi(status);
+ ret = status;
}
return ret;
}
@@ -723,7 +590,7 @@ int32_t vchi_service_use(const struct vchi_service_handle *handle)
struct shim_service *service = (struct shim_service *)handle;
if (service)
- ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
+ ret = vchiq_use_service(service->handle);
return ret;
}
EXPORT_SYMBOL(vchi_service_use);
@@ -744,8 +611,7 @@ int32_t vchi_service_release(const struct vchi_service_handle *handle)
struct shim_service *service = (struct shim_service *)handle;
if (service)
- ret = vchiq_status_to_vchi(
- vchiq_release_service(service->handle));
+ ret = vchiq_release_service(service->handle);
return ret;
}
EXPORT_SYMBOL(vchi_service_release);
diff --git a/drivers/staging/vt6655/Makefile b/drivers/staging/vt6655/Makefile
index a151f30fc46f..e70357ec0af8 100644
--- a/drivers/staging/vt6655/Makefile
+++ b/drivers/staging/vt6655/Makefile
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# TODO: all of these should be removed
-ccflags-y := -DLINUX -D__KERNEL__ -D__NO_VERSION__
-ccflags-y += -DHOSTAP
vt6655_stage-y += device_main.o \
card.o \
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index b4cdc0b7fee7..6b25d75d2501 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -12,12 +12,10 @@
* Date: Aug.22, 2002
*
* Functions:
- * BBuGetFrameTime - Calculate data frame transmitting time
- * BBvCalculateParameter - Calculate PhyLength, PhyService and Phy Signal
- * parameter for baseband Tx
- * BBbReadEmbedded - Embedded read baseband register via MAC
- * BBbWriteEmbedded - Embedded write baseband register via MAC
- * BBbVT3253Init - VIA VT3253 baseband chip init code
+ * bb_get_frame_time - Calculate data frame transmitting time
+ * bb_read_embedded - Embedded read baseband register via MAC
+ * bb_write_embedded - Embedded write baseband register via MAC
+ * bb_vt3253_init - VIA VT3253 baseband chip init code
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
@@ -1695,53 +1693,53 @@ static const unsigned short awcFrameTime[MAX_RATE] = {
*
* Parameters:
* In:
- * byPreambleType - Preamble Type
- * byPktType - PK_TYPE_11A, PK_TYPE_11B, PK_TYPE_11GB, PK_TYPE_11GA
- * cbFrameLength - Baseband Type
- * wRate - Tx Rate
+ * by_preamble_type - Preamble Type
+ * by_pkt_type - PK_TYPE_11A, PK_TYPE_11B, PK_TYPE_11GB, PK_TYPE_11GA
+ * cb_frame_length - Baseband Type
+ * tx_rate - Tx Rate
* Out:
*
* Return Value: FrameTime
*
*/
-unsigned int BBuGetFrameTime(unsigned char byPreambleType,
- unsigned char byPktType,
- unsigned int cbFrameLength, unsigned short wRate)
+unsigned int bb_get_frame_time(unsigned char by_preamble_type,
+ unsigned char by_pkt_type,
+ unsigned int cb_frame_length,
+ unsigned short tx_rate)
{
- unsigned int uFrameTime;
- unsigned int uPreamble;
- unsigned int uTmp;
- unsigned int uRateIdx = (unsigned int)wRate;
- unsigned int uRate = 0;
+ unsigned int frame_time;
+ unsigned int preamble;
+ unsigned int tmp;
+ unsigned int rate_idx = (unsigned int)tx_rate;
+ unsigned int rate = 0;
- if (uRateIdx > RATE_54M)
+ if (rate_idx > RATE_54M)
return 0;
- uRate = (unsigned int)awcFrameTime[uRateIdx];
+ rate = (unsigned int)awcFrameTime[rate_idx];
- if (uRateIdx <= 3) { /* CCK mode */
- if (byPreambleType == 1) /* Short */
- uPreamble = 96;
+ if (rate_idx <= 3) { /* CCK mode */
+ if (by_preamble_type == 1) /* Short */
+ preamble = 96;
else
- uPreamble = 192;
-
- uFrameTime = (cbFrameLength * 80) / uRate; /* ????? */
- uTmp = (uFrameTime * uRate) / 80;
- if (cbFrameLength != uTmp)
- uFrameTime++;
+ preamble = 192;
+ frame_time = (cb_frame_length * 80) / rate; /* ????? */
+ tmp = (frame_time * rate) / 80;
+ if (cb_frame_length != tmp)
+ frame_time++;
- return uPreamble + uFrameTime;
+ return preamble + frame_time;
}
- uFrameTime = (cbFrameLength * 8 + 22) / uRate; /* ???????? */
- uTmp = ((uFrameTime * uRate) - 22) / 8;
- if (cbFrameLength != uTmp)
- uFrameTime++;
+ frame_time = (cb_frame_length * 8 + 22) / rate; /* ???????? */
+ tmp = ((frame_time * rate) - 22) / 8;
+ if (cb_frame_length != tmp)
+ frame_time++;
- uFrameTime = uFrameTime * 4; /* ??????? */
- if (byPktType != PK_TYPE_11A)
- uFrameTime += 6; /* ?????? */
+ frame_time = frame_time * 4; /* ??????? */
+ if (by_pkt_type != PK_TYPE_11A)
+ frame_time += 6; /* ?????? */
- return 20 + uFrameTime; /* ?????? */
+ return 20 + frame_time; /* ?????? */
}
/*
@@ -1899,34 +1897,34 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
* Parameters:
* In:
* iobase - I/O base address
- * byBBAddr - address of register in Baseband
+ * by_bb_addr - address of register in Baseband
* Out:
- * pbyData - data read
+ * pby_data - data read
*
* Return Value: true if succeeded; false if failed.
*
*/
-bool BBbReadEmbedded(struct vnt_private *priv,
- unsigned char byBBAddr, unsigned char *pbyData)
+bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
+ unsigned char *pby_data)
{
void __iomem *iobase = priv->PortOffset;
unsigned short ww;
- unsigned char byValue;
+ unsigned char by_value;
/* BB reg offset */
- VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
+ VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
/* turn on REGR */
MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
- if (byValue & BBREGCTL_DONE)
+ VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+ if (by_value & BBREGCTL_DONE)
break;
}
/* get BB data */
- VNSvInPortB(iobase + MAC_REG_BBREGDATA, pbyData);
+ VNSvInPortB(iobase + MAC_REG_BBREGDATA, pby_data);
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x30)\n");
@@ -1941,32 +1939,32 @@ bool BBbReadEmbedded(struct vnt_private *priv,
* Parameters:
* In:
* iobase - I/O base address
- * byBBAddr - address of register in Baseband
- * byData - data to write
+ * by_bb_addr - address of register in Baseband
+ * by_data - data to write
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
-bool BBbWriteEmbedded(struct vnt_private *priv,
- unsigned char byBBAddr, unsigned char byData)
+bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
+ unsigned char by_data)
{
void __iomem *iobase = priv->PortOffset;
unsigned short ww;
- unsigned char byValue;
+ unsigned char by_value;
/* BB reg offset */
- VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
+ VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
/* set BB data */
- VNSvOutPortB(iobase + MAC_REG_BBREGDATA, byData);
+ VNSvOutPortB(iobase + MAC_REG_BBREGDATA, by_data);
/* turn on BBREGCTL_REGW */
MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
- if (byValue & BBREGCTL_DONE)
+ VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+ if (by_value & BBREGCTL_DONE)
break;
}
@@ -1992,29 +1990,29 @@ bool BBbWriteEmbedded(struct vnt_private *priv,
*
*/
-bool BBbVT3253Init(struct vnt_private *priv)
+bool bb_vt3253_init(struct vnt_private *priv)
{
- bool bResult = true;
+ bool result = true;
int ii;
void __iomem *iobase = priv->PortOffset;
- unsigned char byRFType = priv->byRFType;
- unsigned char byLocalID = priv->byLocalID;
+ unsigned char by_rf_type = priv->byRFType;
+ unsigned char by_local_id = priv->byLocalID;
- if (byRFType == RF_RFMD2959) {
- if (byLocalID <= REV_ID_VT3253_A1) {
+ if (by_rf_type == RF_RFMD2959) {
+ if (by_local_id <= REV_ID_VT3253_A1) {
for (ii = 0; ii < CB_VT3253_INIT_FOR_RFMD; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253InitTab_RFMD[ii][0],
byVT3253InitTab_RFMD[ii][1]);
} else {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_RFMD; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_RFMD[ii][0],
byVT3253B0_RFMD[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC_FOR_RFMD2959; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC4_RFMD2959[ii][0],
byVT3253B0_AGC4_RFMD2959[ii][1]);
@@ -2029,14 +2027,14 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[1] = -50;
priv->ldBmThreshold[2] = 0;
priv->ldBmThreshold[3] = 0;
- } else if ((byRFType == RF_AIROHA) || (byRFType == RF_AL2230S)) {
+ } else if ((by_rf_type == RF_AIROHA) || (by_rf_type == RF_AL2230S)) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x1C;
@@ -2047,14 +2045,14 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[1] = -48;
priv->ldBmThreshold[2] = 0;
priv->ldBmThreshold[3] = 0;
- } else if (byRFType == RF_UW2451) {
+ } else if (by_rf_type == RF_UW2451) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_UW2451[ii][0],
byVT3253B0_UW2451[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0],
byVT3253B0_AGC[ii][1]);
@@ -2069,9 +2067,9 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[1] = -50;
priv->ldBmThreshold[2] = 0;
priv->ldBmThreshold[3] = 0;
- } else if (byRFType == RF_UW2452) {
+ } else if (by_rf_type == RF_UW2452) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_UW2451[ii][0],
byVT3253B0_UW2451[ii][1]);
@@ -2080,7 +2078,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
* 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
+ /*bResult &= bb_write_embedded(iobase,0x09,0x41);*/
/* Init ANT B select,
* RX Config CR10 = 0x28->0x2A,
@@ -2088,23 +2086,23 @@ bool BBbVT3253Init(struct vnt_private *priv)
* make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
+ /*bResult &= bb_write_embedded(iobase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
- bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
+ result &= bb_write_embedded(priv, 0xd7, 0x06);
/* {{RobertYu:20050125, request by Jack */
- bResult &= BBbWriteEmbedded(priv, 0x90, 0x20);
- bResult &= BBbWriteEmbedded(priv, 0x97, 0xeb);
+ result &= bb_write_embedded(priv, 0x90, 0x20);
+ result &= bb_write_embedded(priv, 0x97, 0xeb);
/* }} */
/* {{RobertYu:20050221, request by Jack */
- bResult &= BBbWriteEmbedded(priv, 0xa6, 0x00);
- bResult &= BBbWriteEmbedded(priv, 0xa8, 0x30);
+ result &= bb_write_embedded(priv, 0xa6, 0x00);
+ result &= bb_write_embedded(priv, 0xa8, 0x30);
/* }} */
- bResult &= BBbWriteEmbedded(priv, 0xb0, 0x58);
+ result &= bb_write_embedded(priv, 0xb0, 0x58);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x14;
@@ -2117,14 +2115,14 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[3] = 0;
/* }} RobertYu */
- } else if (byRFType == RF_VT3226) {
+ } else if (by_rf_type == RF_VT3226) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x1C;
@@ -2138,9 +2136,9 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* Fix VT3226 DFC system timing issue */
MACvSetRFLE_LatchBase(iobase);
/* {{ RobertYu: 20050104 */
- } else if (byRFType == RF_AIROHA7230) {
+ } else if (by_rf_type == RF_AIROHA7230) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
@@ -2148,17 +2146,17 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* Init ANT B select,TX Config CR09 = 0x61->0x45,
* 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
+ /* bResult &= bb_write_embedded(iobase,0x09,0x41);*/
/* Init ANT B select,RX Config CR10 = 0x28->0x2A,
* 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
+ /* bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
- bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
+ result &= bb_write_embedded(priv, 0xd7, 0x06);
/* }} */
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x1C;
@@ -2176,12 +2174,12 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->abyBBVGA[0] = 0x1C;
}
- if (byLocalID > REV_ID_VT3253_A1) {
- BBbWriteEmbedded(priv, 0x04, 0x7F);
- BBbWriteEmbedded(priv, 0x0D, 0x01);
+ if (by_local_id > REV_ID_VT3253_A1) {
+ bb_write_embedded(priv, 0x04, 0x7F);
+ bb_write_embedded(priv, 0x0D, 0x01);
}
- return bResult;
+ return result;
}
/*
@@ -2197,42 +2195,42 @@ bool BBbVT3253Init(struct vnt_private *priv)
*
*/
void
-BBvSetShortSlotTime(struct vnt_private *priv)
+bb_set_short_slot_time(struct vnt_private *priv)
{
- unsigned char byBBRxConf = 0;
- unsigned char byBBVGA = 0;
+ unsigned char by_bb_rx_conf = 0;
+ unsigned char by_bb_vga = 0;
- BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
+ bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
if (priv->bShortSlotTime)
- byBBRxConf &= 0xDF; /* 1101 1111 */
+ by_bb_rx_conf &= 0xDF; /* 1101 1111 */
else
- byBBRxConf |= 0x20; /* 0010 0000 */
+ by_bb_rx_conf |= 0x20; /* 0010 0000 */
/* patch for 3253B0 Baseband with Cardbus module */
- BBbReadEmbedded(priv, 0xE7, &byBBVGA);
- if (byBBVGA == priv->abyBBVGA[0])
- byBBRxConf |= 0x20; /* 0010 0000 */
+ bb_read_embedded(priv, 0xE7, &by_bb_vga);
+ if (by_bb_vga == priv->abyBBVGA[0])
+ by_bb_rx_conf |= 0x20; /* 0010 0000 */
- BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
+ bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
-void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
+void bb_set_vga_gain_offset(struct vnt_private *priv, unsigned char by_data)
{
- unsigned char byBBRxConf = 0;
+ unsigned char by_bb_rx_conf = 0;
- BBbWriteEmbedded(priv, 0xE7, byData);
+ bb_write_embedded(priv, 0xE7, by_data);
- BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
+ bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
/* patch for 3253B0 Baseband with Cardbus module */
- if (byData == priv->abyBBVGA[0])
- byBBRxConf |= 0x20; /* 0010 0000 */
+ if (by_data == priv->abyBBVGA[0])
+ by_bb_rx_conf |= 0x20; /* 0010 0000 */
else if (priv->bShortSlotTime)
- byBBRxConf &= 0xDF; /* 1101 1111 */
+ by_bb_rx_conf &= 0xDF; /* 1101 1111 */
else
- byBBRxConf |= 0x20; /* 0010 0000 */
- priv->byBBVGACurrent = byData;
- BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
+ by_bb_rx_conf |= 0x20; /* 0010 0000 */
+ priv->byBBVGACurrent = by_data;
+ bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
/*
@@ -2248,12 +2246,12 @@ void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
*
*/
void
-BBvSoftwareReset(struct vnt_private *priv)
+bb_software_reset(struct vnt_private *priv)
{
- BBbWriteEmbedded(priv, 0x50, 0x40);
- BBbWriteEmbedded(priv, 0x50, 0);
- BBbWriteEmbedded(priv, 0x9C, 0x01);
- BBbWriteEmbedded(priv, 0x9C, 0);
+ bb_write_embedded(priv, 0x50, 0x40);
+ bb_write_embedded(priv, 0x50, 0);
+ bb_write_embedded(priv, 0x9C, 0x01);
+ bb_write_embedded(priv, 0x9C, 0);
}
/*
@@ -2269,13 +2267,13 @@ BBvSoftwareReset(struct vnt_private *priv)
*
*/
void
-BBvPowerSaveModeON(struct vnt_private *priv)
+bb_power_save_mode_on(struct vnt_private *priv)
{
- unsigned char byOrgData;
+ unsigned char by_org_data;
- BBbReadEmbedded(priv, 0x0D, &byOrgData);
- byOrgData |= BIT(0);
- BBbWriteEmbedded(priv, 0x0D, byOrgData);
+ bb_read_embedded(priv, 0x0D, &by_org_data);
+ by_org_data |= BIT(0);
+ bb_write_embedded(priv, 0x0D, by_org_data);
}
/*
@@ -2291,13 +2289,13 @@ BBvPowerSaveModeON(struct vnt_private *priv)
*
*/
void
-BBvPowerSaveModeOFF(struct vnt_private *priv)
+bb_power_save_mode_off(struct vnt_private *priv)
{
- unsigned char byOrgData;
+ unsigned char by_org_data;
- BBbReadEmbedded(priv, 0x0D, &byOrgData);
- byOrgData &= ~(BIT(0));
- BBbWriteEmbedded(priv, 0x0D, byOrgData);
+ bb_read_embedded(priv, 0x0D, &by_org_data);
+ by_org_data &= ~(BIT(0));
+ bb_write_embedded(priv, 0x0D, by_org_data);
}
/*
@@ -2306,7 +2304,7 @@ BBvPowerSaveModeOFF(struct vnt_private *priv)
* Parameters:
* In:
* priv - Device Structure
- * byAntennaMode - Antenna Mode
+ * by_antenna_mode - Antenna Mode
* Out:
* none
*
@@ -2315,22 +2313,22 @@ BBvPowerSaveModeOFF(struct vnt_private *priv)
*/
void
-BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
+bb_set_tx_antenna_mode(struct vnt_private *priv, unsigned char by_antenna_mode)
{
- unsigned char byBBTxConf;
+ unsigned char by_bb_tx_conf;
- BBbReadEmbedded(priv, 0x09, &byBBTxConf); /* CR09 */
- if (byAntennaMode == ANT_DIVERSITY) {
+ bb_read_embedded(priv, 0x09, &by_bb_tx_conf); /* CR09 */
+ if (by_antenna_mode == ANT_DIVERSITY) {
/* bit 1 is diversity */
- byBBTxConf |= 0x02;
- } else if (byAntennaMode == ANT_A) {
+ by_bb_tx_conf |= 0x02;
+ } else if (by_antenna_mode == ANT_A) {
/* bit 2 is ANTSEL */
- byBBTxConf &= 0xF9; /* 1111 1001 */
- } else if (byAntennaMode == ANT_B) {
- byBBTxConf &= 0xFD; /* 1111 1101 */
- byBBTxConf |= 0x04;
+ by_bb_tx_conf &= 0xF9; /* 1111 1001 */
+ } else if (by_antenna_mode == ANT_B) {
+ by_bb_tx_conf &= 0xFD; /* 1111 1101 */
+ by_bb_tx_conf |= 0x04;
}
- BBbWriteEmbedded(priv, 0x09, byBBTxConf); /* CR09 */
+ bb_write_embedded(priv, 0x09, by_bb_tx_conf); /* CR09 */
}
/*
@@ -2339,7 +2337,7 @@ BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
* Parameters:
* In:
* priv - Device Structure
- * byAntennaMode - Antenna Mode
+ * by_antenna_mode - Antenna Mode
* Out:
* none
*
@@ -2348,25 +2346,25 @@ BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
*/
void
-BBvSetRxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
+bb_set_rx_antenna_mode(struct vnt_private *priv, unsigned char by_antenna_mode)
{
- unsigned char byBBRxConf;
+ unsigned char by_bb_rx_conf;
- BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
- if (byAntennaMode == ANT_DIVERSITY) {
- byBBRxConf |= 0x01;
+ bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
+ if (by_antenna_mode == ANT_DIVERSITY) {
+ by_bb_rx_conf |= 0x01;
- } else if (byAntennaMode == ANT_A) {
- byBBRxConf &= 0xFC; /* 1111 1100 */
- } else if (byAntennaMode == ANT_B) {
- byBBRxConf &= 0xFE; /* 1111 1110 */
- byBBRxConf |= 0x02;
+ } else if (by_antenna_mode == ANT_A) {
+ by_bb_rx_conf &= 0xFC; /* 1111 1100 */
+ } else if (by_antenna_mode == ANT_B) {
+ by_bb_rx_conf &= 0xFE; /* 1111 1110 */
+ by_bb_rx_conf |= 0x02;
}
- BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
+ bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
/*
- * Description: BBvSetDeepSleep
+ * Description: bb_set_deep_sleep
*
* Parameters:
* In:
@@ -2378,15 +2376,9 @@ BBvSetRxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
*
*/
void
-BBvSetDeepSleep(struct vnt_private *priv, unsigned char byLocalID)
+bb_set_deep_sleep(struct vnt_private *priv, unsigned char by_local_id)
{
- BBbWriteEmbedded(priv, 0x0C, 0x17); /* CR12 */
- BBbWriteEmbedded(priv, 0x0D, 0xB9); /* CR13 */
+ bb_write_embedded(priv, 0x0C, 0x17); /* CR12 */
+ bb_write_embedded(priv, 0x0D, 0xB9); /* CR13 */
}
-void
-BBvExitDeepSleep(struct vnt_private *priv, unsigned char byLocalID)
-{
- BBbWriteEmbedded(priv, 0x0C, 0x00); /* CR12 */
- BBbWriteEmbedded(priv, 0x0D, 0x01); /* CR13 */
-}
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 0cc2e07829c5..9354ce724446 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -46,30 +46,31 @@
#define TOP_RATE_2M 0x00200000
#define TOP_RATE_1M 0x00100000
-unsigned int BBuGetFrameTime(unsigned char byPreambleType,
- unsigned char byPktType,
- unsigned int cbFrameLength,
- unsigned short wRate);
+unsigned int bb_get_frame_time(unsigned char by_preamble_type,
+ unsigned char by_pkt_type,
+ unsigned int cb_frame_length,
+ unsigned short w_rate);
void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy);
-bool BBbReadEmbedded(struct vnt_private *priv, unsigned char byBBAddr,
- unsigned char *pbyData);
-bool BBbWriteEmbedded(struct vnt_private *priv, unsigned char byBBAddr,
- unsigned char byData);
+bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
+ unsigned char *pby_data);
+bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
+ unsigned char by_data);
-void BBvSetShortSlotTime(struct vnt_private *priv);
-void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData);
+void bb_set_short_slot_time(struct vnt_private *priv);
+void bb_set_vga_gain_offset(struct vnt_private *priv, unsigned char by_data);
/* VT3253 Baseband */
-bool BBbVT3253Init(struct vnt_private *priv);
-void BBvSoftwareReset(struct vnt_private *priv);
-void BBvPowerSaveModeON(struct vnt_private *priv);
-void BBvPowerSaveModeOFF(struct vnt_private *priv);
-void BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode);
-void BBvSetRxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode);
-void BBvSetDeepSleep(struct vnt_private *priv, unsigned char byLocalID);
-void BBvExitDeepSleep(struct vnt_private *priv, unsigned char byLocalID);
+bool bb_vt3253_init(struct vnt_private *priv);
+void bb_software_reset(struct vnt_private *priv);
+void bb_power_save_mode_on(struct vnt_private *priv);
+void bb_power_save_mode_off(struct vnt_private *priv);
+void bb_set_tx_antenna_mode(struct vnt_private *priv,
+ unsigned char by_antenna_mode);
+void bb_set_rx_antenna_mode(struct vnt_private *priv,
+ unsigned char by_antenna_mode);
+void bb_set_deep_sleep(struct vnt_private *priv, unsigned char by_local_id);
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index e65c9825ea5a..6148310c06d6 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -11,15 +11,12 @@
* CARDvUpdateBasicTopRate - Update BasicTopRate
* CARDbAddBasicRate - Add to BasicRateSet
* CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
- * CARDvSetLoopbackMode - Set Loopback mode
- * CARDbSoftwareReset - Sortware reset NIC
* CARDqGetTSFOffset - Calculate TSFOffset
* CARDbGetCurrentTSF - Read Current NIC TSF counter
* CARDqGetNextTBTT - Calculate Next Beacon TSF counter
* CARDvSetFirstNextTBTT - Set NIC Beacon time
* CARDvUpdateNextTBTT - Sync. NIC Beacon time
* CARDbRadioPowerOff - Turn Off NIC Radio Power
- * CARDbRadioPowerOn - Turn On NIC Radio Power
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
@@ -198,22 +195,22 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x20;
priv->abyBBVGA[2] = 0x10;
priv->abyBBVGA[3] = 0x10;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x1C)
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
MACvSetBBType(priv->PortOffset, BB_TYPE_11A);
priv->abyBBVGA[0] = 0x18;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x14) {
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
- BBbWriteEmbedded(priv, 0xE1, 0x57);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE1, 0x57);
}
} else {
MACvSetBBType(priv->PortOffset, BB_TYPE_11A);
}
- BBbWriteEmbedded(priv, 0x88, 0x03);
+ bb_write_embedded(priv, 0x88, 0x03);
bySlot = C_SLOT_SHORT;
bySIFS = C_SIFS_A;
byDIFS = C_SIFS_A + 2 * C_SLOT_SHORT;
@@ -224,19 +221,19 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
priv->abyBBVGA[3] = 0x00;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x20)
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
priv->abyBBVGA[0] = 0x14;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x18) {
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
- BBbWriteEmbedded(priv, 0xE1, 0xD3);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE1, 0xD3);
}
}
- BBbWriteEmbedded(priv, 0x88, 0x02);
+ bb_write_embedded(priv, 0x88, 0x02);
bySlot = C_SLOT_LONG;
bySIFS = C_SIFS_BG;
byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
@@ -247,19 +244,19 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
priv->abyBBVGA[3] = 0x00;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x20)
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
priv->abyBBVGA[0] = 0x14;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x18) {
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
- BBbWriteEmbedded(priv, 0xE1, 0xD3);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE1, 0xD3);
}
}
- BBbWriteEmbedded(priv, 0x88, 0x08);
+ bb_write_embedded(priv, 0x88, 0x08);
bySIFS = C_SIFS_BG;
if (priv->bShortSlotTime) {
@@ -310,7 +307,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->bySlot = bySlot;
VNSvOutPortB(priv->PortOffset + MAC_REG_SLOT, priv->bySlot);
- BBvSetShortSlotTime(priv);
+ bb_set_short_slot_time(priv);
}
if (priv->byCWMaxMin != byCWMaxMin) {
priv->byCWMaxMin = byCWMaxMin;
@@ -431,7 +428,7 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
- BBvSetDeepSleep(priv, priv->byLocalID);
+ bb_set_deep_sleep(priv, priv->byLocalID);
priv->bRadioOff = true;
pr_debug("chester power off\n");
@@ -439,60 +436,6 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
LED_ACTSET); /* LED issue */
}
-/*
- * Description: Turn on Radio power
- *
- * Parameters:
- * In:
- * priv - The adapter to be turned on
- * Out:
- * none
- *
- * Return Value: true if success; otherwise false
- */
-bool CARDbRadioPowerOn(struct vnt_private *priv)
-{
- bool bResult = true;
-
- pr_debug("chester power on\n");
- if (priv->bRadioControlOff) {
- if (priv->bHWRadioOff)
- pr_debug("chester bHWRadioOff\n");
- if (priv->bRadioControlOff)
- pr_debug("chester bRadioControlOff\n");
- return false; }
-
- if (!priv->bRadioOff) {
- pr_debug("chester pbRadioOff\n");
- return true; }
-
- BBvExitDeepSleep(priv, priv->byLocalID);
-
- MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
-
- switch (priv->byRFType) {
- case RF_RFMD2959:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
- SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
- SOFTPWRCTL_SWPE1);
- break;
-
- case RF_AIROHA:
- case RF_AL2230S:
- case RF_AIROHA7230:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
- (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
- break;
- }
-
- priv->bRadioOff = false;
- pr_debug("chester power on\n");
- MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0,
- LED_ACTSET); /* LED issue */
- return bResult;
-}
-
void CARDvSafeResetTx(struct vnt_private *priv)
{
unsigned int uu;
@@ -816,54 +759,6 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv)
}
/*
- * Description: Set NIC Loopback mode
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * wLoopbackMode - Loopback mode to be set
- * Out:
- * none
- *
- * Return Value: none
- */
-void CARDvSetLoopbackMode(struct vnt_private *priv,
- unsigned short wLoopbackMode)
-{
- switch (wLoopbackMode) {
- case CARD_LB_NONE:
- case CARD_LB_MAC:
- case CARD_LB_PHY:
- break;
- default:
- break;
- }
- /* set MAC loopback */
- MACvSetLoopbackMode(priv, LOBYTE(wLoopbackMode));
- /* set Baseband loopback */
-}
-
-/*
- * Description: Software Reset NIC
- *
- * Parameters:
- * In:
- * priv - The adapter to be reset
- * Out:
- * none
- *
- * Return Value: none
- */
-bool CARDbSoftwareReset(struct vnt_private *priv)
-{
- /* reset MAC */
- if (!MACbSafeSoftwareReset(priv))
- return false;
-
- return true;
-}
-
-/*
* Description: Calculate TSF offset of two TSF input
* Get TSF Offset from RxBCN's TSF and local TSF
*
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 337266add6b2..568a2ddd6588 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -44,9 +44,6 @@ struct vnt_private;
void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type);
void CARDvUpdateBasicTopRate(struct vnt_private *priv);
bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
-void CARDvSetLoopbackMode(struct vnt_private *priv,
- unsigned short wLoopbackMode);
-bool CARDbSoftwareReset(struct vnt_private *priv);
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval);
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
@@ -58,7 +55,6 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv);
void CARDvSafeResetTx(struct vnt_private *priv);
void CARDvSafeResetRx(struct vnt_private *priv);
void CARDbRadioPowerOff(struct vnt_private *priv);
-bool CARDbRadioPowerOn(struct vnt_private *priv);
bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type);
bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
u64 qwBSSTimestamp);
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index dec6f0f23b88..62a85c1ca6c4 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -173,7 +173,7 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
priv->byBBVGACurrent != priv->abyBBVGA[0]) {
priv->byBBVGACurrent = priv->abyBBVGA[0];
- BBvSetVGAGainOffset(priv, priv->byBBVGACurrent);
+ bb_set_vga_gain_offset(priv, priv->byBBVGACurrent);
}
/* clear NAV */
@@ -195,7 +195,7 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
if (priv->bEnablePSMode)
RFvWriteWakeProgSyn(priv, priv->byRFType, ch->hw_value);
- BBvSoftwareReset(priv);
+ bb_software_reset(priv);
if (priv->byLocalID > REV_ID_VT3253_B1) {
unsigned long flags;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 5c86cc60eb5c..41cbec4134b0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -21,18 +21,17 @@
* device_alloc_rx_buf - rx buffer pre-allocated function
* device_free_rx_buf - free rx buffer function
* device_free_tx_buf - free tx buffer function
- * device_init_rd0_ring- initial rd dma0 ring
- * device_init_rd1_ring- initial rd dma1 ring
- * device_init_td0_ring- initial tx dma0 ring buffer
- * device_init_td1_ring- initial tx dma1 ring buffer
- * device_init_registers- initial MAC & BBP & RF internal registers.
- * device_init_rings- initial tx/rx ring buffer
- * device_free_rings- free all allocated ring buffer
- * device_tx_srv- tx interrupt service function
+ * device_init_rd0_ring - initial rd dma0 ring
+ * device_init_rd1_ring - initial rd dma1 ring
+ * device_init_td0_ring - initial tx dma0 ring buffer
+ * device_init_td1_ring - initial tx dma1 ring buffer
+ * device_init_registers - initial MAC & BBP & RF internal registers.
+ * device_init_rings - initial tx/rx ring buffer
+ * device_free_rings - free all allocated ring buffer
+ * device_tx_srv - tx interrupt service function
*
* Revision History:
*/
-#undef __NO_VERSION__
#include <linux/file.h>
#include "device.h"
@@ -202,7 +201,7 @@ static void device_init_registers(struct vnt_private *priv)
unsigned char byOFDMPwrdBm = 0;
MACbShutdown(priv);
- BBvSoftwareReset(priv);
+ bb_software_reset(priv);
/* Do MACbSoftwareReset in MACvInitialize */
MACbSoftwareReset(priv);
@@ -279,8 +278,8 @@ static void device_init_registers(struct vnt_private *priv)
}
/* Set initial antenna mode */
- BBvSetTxAntennaMode(priv, priv->byTxAntennaMode);
- BBvSetRxAntennaMode(priv, priv->byRxAntennaMode);
+ bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
+ bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
/* zonetype initial */
priv->byOriginalZonetype = priv->abyEEPROM[EEP_OFS_ZONETYPE];
@@ -357,16 +356,16 @@ static void device_init_registers(struct vnt_private *priv)
VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
/* initialize BBP registers */
- BBbVT3253Init(priv);
+ bb_vt3253_init(priv);
if (priv->bUpdateBBVGA) {
priv->byBBVGACurrent = priv->abyBBVGA[0];
priv->byBBVGANew = priv->byBBVGACurrent;
- BBvSetVGAGainOffset(priv, priv->abyBBVGA[0]);
+ bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
}
- BBvSetRxAntennaMode(priv, priv->byRxAntennaMode);
- BBvSetTxAntennaMode(priv, priv->byTxAntennaMode);
+ bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
+ bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
/* Set BB and packet type at the same time. */
/* Set Short Slot Time, xIFS, and RSPINF. */
@@ -1001,7 +1000,7 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
if (priv->uBBVGADiffCount == 1) {
/* first VGA diff gain */
- BBvSetVGAGainOffset(priv, priv->byBBVGANew);
+ bb_set_vga_gain_offset(priv, priv->byBBVGANew);
dev_dbg(&priv->pcid->dev,
"First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
@@ -1017,7 +1016,7 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
priv->byBBVGACurrent,
(int)priv->uBBVGADiffCount);
- BBvSetVGAGainOffset(priv, priv->byBBVGANew);
+ bb_set_vga_gain_offset(priv, priv->byBBVGANew);
}
}
@@ -1445,7 +1444,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->bShortSlotTime = false;
CARDbSetPhyParameter(priv, priv->byBBType);
- BBvSetVGAGainOffset(priv, priv->abyBBVGA[0]);
+ bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
}
if (changed & BSS_CHANGED_TXPOWER)
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index d6ca6e5551a7..747d79265a7c 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -419,7 +419,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
+ bb_power_save_mode_off(priv); /* RobertYu:20050106, have DC value for Calibration */
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
@@ -443,7 +443,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- BBvPowerSaveModeON(priv); /* RobertYu:20050106 */
+ bb_power_save_mode_on(priv); /* RobertYu:20050106 */
/* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
/* 3-wire control for power saving mode */
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 37fcc42ed000..cfab64d2b312 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -165,16 +165,21 @@ s_uGetTxRsvTime(
{
unsigned int uDataTime, uAckTime;
- uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
- if (byPktType == PK_TYPE_11B) /* llb,CCK mode */
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopCCKBasicRate);
- else /* 11g 2.4G OFDM mode & 11a 5G OFDM mode */
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopOFDMBasicRate);
-
- if (bNeedAck)
- return uDataTime + pDevice->uSIFS + uAckTime;
- else
+ uDataTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
+
+ if (!bNeedAck)
return uDataTime;
+
+ /*
+ * CCK mode - 11b
+ * OFDM mode - 11g 2.4G & 11a 5G
+ */
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14,
+ byPktType == PK_TYPE_11B ?
+ pDevice->byTopCCKBasicRate :
+ pDevice->byTopOFDMBasicRate);
+
+ return uDataTime + pDevice->uSIFS + uAckTime;
}
static __le16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
@@ -195,24 +200,28 @@ s_uGetRTSCTSRsvTime(
unsigned short wCurrentRate
)
{
- unsigned int uRrvTime, uRTSTime, uCTSTime, uAckTime, uDataTime;
-
- uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
+ unsigned int uRrvTime = 0;
+ unsigned int uRTSTime = 0;
+ unsigned int uCTSTime = 0;
+ unsigned int uAckTime = 0;
+ unsigned int uDataTime = 0;
- uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wCurrentRate);
+ uDataTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, cbFrameLength, wCurrentRate);
if (byRTSRsvType == 0) { /* RTSTxRrvTime_bb */
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
- uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uRTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = uAckTime;
} else if (byRTSRsvType == 1) { /* RTSTxRrvTime_ba, only in 2.4GHZ */
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uRTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
} else if (byRTSRsvType == 2) { /* RTSTxRrvTime_aa */
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopOFDMBasicRate);
- uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uRTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 20, pDevice->byTopOFDMBasicRate);
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = uAckTime;
} else if (byRTSRsvType == 3) { /* CTSTxRrvTime_ba, only in 2.4GHZ */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
uRrvTime = uCTSTime + uAckTime + uDataTime + 2 * pDevice->uSIFS;
return cpu_to_le16((u16)uRrvTime);
}
@@ -239,138 +248,83 @@ s_uGetDataDuration(
)
{
bool bLastFrag = false;
- unsigned int uAckTime = 0, uNextPktTime = 0;
+ unsigned int uAckTime = 0, uNextPktTime = 0, len;
if (uFragIdx == (uMACfragNum - 1))
bLastFrag = true;
+ if (uFragIdx == (uMACfragNum - 2))
+ len = cbLastFragmentSize;
+ else
+ len = cbFrameLength;
+
switch (byDurType) {
case DATADUR_B: /* DATADUR_B */
- if (((uMACfragNum == 1)) || bLastFrag) {/* Non Frag or Last Frag */
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- return pDevice->uSIFS + uAckTime;
- } else {
+ if (bNeedAck) {
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType,
+ byPktType, 14,
+ pDevice->byTopCCKBasicRate);
+ }
+ /* Non Frag or Last Frag */
+ if ((uMACfragNum == 1) || bLastFrag) {
+ if (!bNeedAck)
return 0;
- }
- } else {/* First Frag or Mid Frag */
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
-
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- return pDevice->uSIFS + uAckTime + uNextPktTime;
- } else {
- return pDevice->uSIFS + uNextPktTime;
- }
+ } else {
+ /* First Frag or Mid Frag */
+ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType,
+ len, wRate, bNeedAck);
}
- break;
+
+ return pDevice->uSIFS + uAckTime + uNextPktTime;
case DATADUR_A: /* DATADUR_A */
- if (((uMACfragNum == 1)) || bLastFrag) {/* Non Frag or Last Frag */
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime;
- } else {
+ if (bNeedAck) {
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType,
+ byPktType, 14,
+ pDevice->byTopOFDMBasicRate);
+ }
+ /* Non Frag or Last Frag */
+ if ((uMACfragNum == 1) || bLastFrag) {
+ if (!bNeedAck)
return 0;
- }
- } else {/* First Frag or Mid Frag */
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
-
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime + uNextPktTime;
- } else {
- return pDevice->uSIFS + uNextPktTime;
- }
+ } else {
+ /* First Frag or Mid Frag */
+ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType,
+ len, wRate, bNeedAck);
}
- break;
+
+ return pDevice->uSIFS + uAckTime + uNextPktTime;
case DATADUR_A_F0: /* DATADUR_A_F0 */
- if (((uMACfragNum == 1)) || bLastFrag) {/* Non Frag or Last Frag */
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime;
- } else {
+ case DATADUR_A_F1: /* DATADUR_A_F1 */
+ if (bNeedAck) {
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType,
+ byPktType, 14,
+ pDevice->byTopOFDMBasicRate);
+ }
+ /* Non Frag or Last Frag */
+ if ((uMACfragNum == 1) || bLastFrag) {
+ if (!bNeedAck)
return 0;
- }
- } else { /* First Frag or Mid Frag */
- if (byFBOption == AUTO_FB_0) {
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
-
- } else { /* (byFBOption == AUTO_FB_1) */
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- }
+ } else {
+ /* First Frag or Mid Frag */
+ if (wRate < RATE_18M)
+ wRate = RATE_18M;
+ else if (wRate > RATE_54M)
+ wRate = RATE_54M;
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime + uNextPktTime;
- } else {
- return pDevice->uSIFS + uNextPktTime;
- }
- }
- break;
+ wRate -= RATE_18M;
- case DATADUR_A_F1: /* DATADUR_A_F1 */
- if (((uMACfragNum == 1)) || bLastFrag) { /* Non Frag or Last Frag */
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime;
- } else {
- return 0;
- }
- } else { /* First Frag or Mid Frag */
- if (byFBOption == AUTO_FB_0) {
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
-
- } else { /* (byFBOption == AUTO_FB_1) */
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- }
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime + uNextPktTime;
- } else {
- return pDevice->uSIFS + uNextPktTime;
- }
+ if (byFBOption == AUTO_FB_0)
+ wRate = wFB_Opt0[FB_RATE0][wRate];
+ else
+ wRate = wFB_Opt1[FB_RATE0][wRate];
+
+ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType,
+ len, wRate, bNeedAck);
}
- break;
+
+ return pDevice->uSIFS + uAckTime + uNextPktTime;
default:
break;
@@ -396,17 +350,17 @@ s_uGetRTSCTSDuration(
switch (byDurType) {
case RTSDUR_BB: /* RTSDuration_bb */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA: /* RTSDuration_ba */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_AA: /* RTSDuration_aa */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
@@ -415,7 +369,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_BA_F0: /* RTSDuration_ba_f0 */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -424,7 +378,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_AA_F0: /* RTSDuration_aa_f0 */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -433,7 +387,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_BA_F1: /* RTSDuration_ba_f1 */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -442,7 +396,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_AA_F1: /* RTSDuration_aa_f1 */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -1040,16 +994,14 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
bool bRTS = (bool)(fifo_ctl & FIFOCTL_RTS);
struct vnt_tx_desc *ptdCurr;
unsigned int cbHeaderLength = 0;
- void *pvRrvTime;
- struct vnt_mic_hdr *pMICHDR;
- void *pvRTS;
- void *pvCTS;
- void *pvTxDataHd;
+ void *pvRrvTime = NULL;
+ struct vnt_mic_hdr *pMICHDR = NULL;
+ void *pvRTS = NULL;
+ void *pvCTS = NULL;
+ void *pvTxDataHd = NULL;
unsigned short wTxBufSize; /* FFinfo size */
unsigned char byFBOption = AUTO_FB_NONE;
- pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
-
cbFrameSize = skb->len + 4;
if (info->control.hw_key) {
diff --git a/drivers/staging/vt6656/Makefile b/drivers/staging/vt6656/Makefile
index 375f54e9f58b..f696a9d7a143 100644
--- a/drivers/staging/vt6656/Makefile
+++ b/drivers/staging/vt6656/Makefile
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# TODO: all of these should be removed
-ccflags-y := -DLINUX -D__KERNEL__ -DEXPORT_SYMTAB -D__NO_VERSION__
-ccflags-y += -DHOSTAP
vt6656_stage-y += main_usb.o \
card.o \
@@ -13,7 +10,6 @@ vt6656_stage-y += main_usb.o \
key.o \
rf.o \
usbpipe.o \
- channel.o \
- firmware.o
+ channel.o
obj-$(CONFIG_VT6656) += vt6656_stage.o
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index a19a563d8bcc..41ae779ec61f 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -23,13 +23,15 @@
*/
#include <linux/bits.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
+#include "device.h"
#include "mac.h"
#include "baseband.h"
#include "rf.h"
#include "usbpipe.h"
-static u8 vnt_vt3184_agc[] = {
+static const u8 vnt_vt3184_agc[] = {
0x00, 0x00, 0x02, 0x02, 0x04, 0x04, 0x06, 0x06,
0x08, 0x08, 0x0a, 0x0a, 0x0c, 0x0c, 0x0e, 0x0e, /* 0x0f */
0x10, 0x10, 0x12, 0x12, 0x14, 0x14, 0x16, 0x16,
@@ -76,7 +78,7 @@ static u8 vnt_vt3184_al2230[] = {
};
/* {{RobertYu:20060515, new BB setting for VT3226D0 */
-static u8 vnt_vt3184_vt3226d0[] = {
+static const u8 vnt_vt3184_vt3226d0[] = {
0x31, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00,
0x70, 0x45, 0x2a, 0x76, 0x00, 0x00, 0x80, 0x00, /* 0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -111,198 +113,85 @@ static u8 vnt_vt3184_vt3226d0[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* 0xff */
};
-static const u16 vnt_frame_time[MAX_RATE] = {
- 10, 20, 55, 110, 24, 36, 48, 72, 96, 144, 192, 216
+struct vnt_threshold {
+ u8 bb_pre_ed_rssi;
+ u8 cr_201;
+ u8 cr_206;
};
-/*
- * Description: Calculate data frame transmitting time
- *
- * Parameters:
- * In:
- * preamble_type - Preamble Type
- * pkt_type - PK_TYPE_11A, PK_TYPE_11B, PK_TYPE_11GB, PK_TYPE_11GA
- * frame_length - Baseband Type
- * tx_rate - Tx Rate
- * Out:
- *
- * Return Value: FrameTime
- *
- */
-unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
- unsigned int frame_length, u16 tx_rate)
-{
- unsigned int frame_time;
- unsigned int preamble;
- unsigned int rate = 0;
-
- if (tx_rate > RATE_54M)
- return 0;
-
- rate = (unsigned int)vnt_frame_time[tx_rate];
-
- if (tx_rate <= 3) {
- if (preamble_type == 1)
- preamble = 96;
- else
- preamble = 192;
-
- frame_time = DIV_ROUND_UP(frame_length * 80, rate);
- return preamble + frame_time;
- }
-
- frame_time = DIV_ROUND_UP(frame_length * 8 + 22, rate);
- frame_time = frame_time * 4;
-
- if (pkt_type != PK_TYPE_11A)
- frame_time += 6;
- return 20 + frame_time;
-}
-
-/*
- * Description: Calculate Length, Service, and Signal fields of Phy for Tx
- *
- * Parameters:
- * In:
- * priv - Device Structure
- * frame_length - Tx Frame Length
- * tx_rate - Tx Rate
- * Out:
- * struct vnt_phy_field *phy
- * - pointer to Phy Length field
- * - pointer to Phy Service field
- * - pointer to Phy Signal field
- *
- * Return Value: none
- *
- */
-void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
-{
- u32 bit_count;
- u32 count = 0;
- u32 tmp;
- int ext_bit;
- u8 preamble_type = priv->preamble_type;
-
- bit_count = frame_length * 8;
- ext_bit = false;
-
- switch (tx_rate) {
- case RATE_1M:
- count = bit_count;
-
- phy->signal = 0x00;
-
- break;
- case RATE_2M:
- count = bit_count / 2;
-
- if (preamble_type == 1)
- phy->signal = 0x09;
- else
- phy->signal = 0x01;
-
- break;
- case RATE_5M:
- count = DIV_ROUND_UP(bit_count * 10, 55);
-
- if (preamble_type == 1)
- phy->signal = 0x0a;
- else
- phy->signal = 0x02;
-
- break;
- case RATE_11M:
- count = bit_count / 11;
- tmp = count * 11;
-
- if (tmp != bit_count) {
- count++;
-
- if ((bit_count - tmp) <= 3)
- ext_bit = true;
- }
-
- if (preamble_type == 1)
- phy->signal = 0x0b;
- else
- phy->signal = 0x03;
-
- break;
- case RATE_6M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9b;
- else
- phy->signal = 0x8b;
-
- break;
- case RATE_9M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9f;
- else
- phy->signal = 0x8f;
-
- break;
- case RATE_12M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9a;
- else
- phy->signal = 0x8a;
-
- break;
- case RATE_18M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9e;
- else
- phy->signal = 0x8e;
-
- break;
- case RATE_24M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x99;
- else
- phy->signal = 0x89;
-
- break;
- case RATE_36M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9d;
- else
- phy->signal = 0x8d;
-
- break;
- case RATE_48M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x98;
- else
- phy->signal = 0x88;
+static const struct vnt_threshold al2230_vnt_threshold[] = {
+ {0, 0x00, 0x30}, /* Max sensitivity */
+ {68, 0x00, 0x36},
+ {67, 0x00, 0x43},
+ {66, 0x00, 0x51},
+ {65, 0x00, 0x62},
+ {64, 0x00, 0x79},
+ {63, 0x00, 0x93},
+ {62, 0x00, 0xb9},
+ {61, 0x00, 0xe3},
+ {60, 0x01, 0x18},
+ {59, 0x01, 0x54},
+ {58, 0x01, 0xa0},
+ {57, 0x02, 0x20},
+ {56, 0x02, 0xa0},
+ {55, 0x03, 0x00},
+ {53, 0x06, 0x00},
+ {51, 0x09, 0x00},
+ {49, 0x0e, 0x00},
+ {47, 0x15, 0x00},
+ {46, 0x1a, 0x00},
+ {45, 0xff, 0x00}
+};
- break;
- case RATE_54M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9c;
- else
- phy->signal = 0x8c;
- break;
- default:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9c;
- else
- phy->signal = 0x8c;
- break;
- }
+static const struct vnt_threshold vt3226_vnt_threshold[] = {
+ {0, 0x00, 0x24}, /* Max sensitivity */
+ {68, 0x00, 0x2d},
+ {67, 0x00, 0x36},
+ {66, 0x00, 0x43},
+ {65, 0x00, 0x52},
+ {64, 0x00, 0x68},
+ {63, 0x00, 0x80},
+ {62, 0x00, 0x9c},
+ {61, 0x00, 0xc0},
+ {60, 0x00, 0xea},
+ {59, 0x01, 0x30},
+ {58, 0x01, 0x70},
+ {57, 0x01, 0xb0},
+ {56, 0x02, 0x30},
+ {55, 0x02, 0xc0},
+ {53, 0x04, 0x00},
+ {51, 0x07, 0x00},
+ {49, 0x0a, 0x00},
+ {47, 0x11, 0x00},
+ {45, 0x18, 0x00},
+ {43, 0x26, 0x00},
+ {42, 0x36, 0x00},
+ {41, 0xff, 0x00}
+};
- if (pkt_type == PK_TYPE_11B) {
- phy->service = 0x00;
- if (ext_bit)
- phy->service |= 0x80;
- phy->len = cpu_to_le16((u16)count);
- } else {
- phy->service = 0x00;
- phy->len = cpu_to_le16((u16)frame_length);
- }
-}
+static const struct vnt_threshold vt3342_vnt_threshold[] = {
+ {0, 0x00, 0x38}, /* Max sensitivity */
+ {66, 0x00, 0x43},
+ {65, 0x00, 0x52},
+ {64, 0x00, 0x68},
+ {63, 0x00, 0x80},
+ {62, 0x00, 0x9c},
+ {61, 0x00, 0xc0},
+ {60, 0x00, 0xea},
+ {59, 0x01, 0x30},
+ {58, 0x01, 0x70},
+ {57, 0x01, 0xb0},
+ {56, 0x02, 0x30},
+ {55, 0x02, 0xc0},
+ {53, 0x04, 0x00},
+ {51, 0x07, 0x00},
+ {49, 0x0a, 0x00},
+ {47, 0x11, 0x00},
+ {45, 0x18, 0x00},
+ {43, 0x26, 0x00},
+ {42, 0x36, 0x00},
+ {41, 0xff, 0x00}
+};
/*
* Description: Set Antenna mode
@@ -352,9 +241,10 @@ int vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode)
int vnt_vt3184_init(struct vnt_private *priv)
{
- int ret = 0;
+ int ret;
u16 length;
- u8 *addr;
+ u8 *addr = NULL;
+ const u8 *c_addr;
u8 data;
ret = vnt_control_in(priv, MESSAGE_TYPE_READ, 0, MESSAGE_REQUEST_EEPROM,
@@ -366,23 +256,15 @@ int vnt_vt3184_init(struct vnt_private *priv)
dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type);
- if (priv->rf_type == RF_AL2230 ||
- priv->rf_type == RF_AL2230S) {
+ if ((priv->rf_type == RF_AL2230) ||
+ (priv->rf_type == RF_AL2230S) ||
+ (priv->rf_type == RF_AIROHA7230)) {
priv->bb_rx_conf = vnt_vt3184_al2230[10];
length = sizeof(vnt_vt3184_al2230);
addr = vnt_vt3184_al2230;
- priv->bb_vga[0] = 0x1C;
- priv->bb_vga[1] = 0x10;
- priv->bb_vga[2] = 0x0;
- priv->bb_vga[3] = 0x0;
-
- } else if (priv->rf_type == RF_AIROHA7230) {
- priv->bb_rx_conf = vnt_vt3184_al2230[10];
- length = sizeof(vnt_vt3184_al2230);
- addr = vnt_vt3184_al2230;
-
- addr[0xd7] = 0x06;
+ if (priv->rf_type == RF_AIROHA7230)
+ addr[0xd7] = 0x06;
priv->bb_vga[0] = 0x1c;
priv->bb_vga[1] = 0x10;
@@ -390,25 +272,11 @@ int vnt_vt3184_init(struct vnt_private *priv)
priv->bb_vga[3] = 0x0;
} else if ((priv->rf_type == RF_VT3226) ||
- (priv->rf_type == RF_VT3226D0)) {
+ (priv->rf_type == RF_VT3226D0) ||
+ (priv->rf_type == RF_VT3342A0)) {
priv->bb_rx_conf = vnt_vt3184_vt3226d0[10];
length = sizeof(vnt_vt3184_vt3226d0);
- addr = vnt_vt3184_vt3226d0;
-
- priv->bb_vga[0] = 0x20;
- priv->bb_vga[1] = 0x10;
- priv->bb_vga[2] = 0x0;
- priv->bb_vga[3] = 0x0;
-
- /* Fix VT3226 DFC system timing issue */
- ret = vnt_mac_reg_bits_on(priv, MAC_REG_SOFTPWRCTL2,
- SOFTPWRCTL_RFLEOPT);
- if (ret)
- goto end;
- } else if (priv->rf_type == RF_VT3342A0) {
- priv->bb_rx_conf = vnt_vt3184_vt3226d0[10];
- length = sizeof(vnt_vt3184_vt3226d0);
- addr = vnt_vt3184_vt3226d0;
+ c_addr = vnt_vt3184_vt3226d0;
priv->bb_vga[0] = 0x20;
priv->bb_vga[1] = 0x10;
@@ -424,8 +292,11 @@ int vnt_vt3184_init(struct vnt_private *priv)
goto end;
}
+ if (addr)
+ c_addr = addr;
+
ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
- MESSAGE_REQUEST_BBREG, length, addr);
+ MESSAGE_REQUEST_BBREG, length, c_addr);
if (ret)
goto end;
@@ -435,19 +306,13 @@ int vnt_vt3184_init(struct vnt_private *priv)
if (ret)
goto end;
- if (priv->rf_type == RF_VT3226 ||
- priv->rf_type == RF_VT3342A0) {
- ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x23);
- if (ret)
- goto end;
+ if ((priv->rf_type == RF_VT3226) ||
+ (priv->rf_type == RF_VT3342A0) ||
+ (priv->rf_type == RF_VT3226D0)) {
+ data = (priv->rf_type == RF_VT3226D0) ? 0x11 : 0x23;
- ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, BIT(0));
- if (ret)
- goto end;
- } else if (priv->rf_type == RF_VT3226D0) {
ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x11);
+ MAC_REG_ITRTMSET, data);
if (ret)
goto end;
@@ -507,21 +372,22 @@ int vnt_set_short_slot_time(struct vnt_private *priv)
ret = vnt_control_in_u8(priv, MESSAGE_REQUEST_BBREG, 0xe7, &bb_vga);
if (ret)
- goto end;
+ return ret;
if (bb_vga == priv->bb_vga[0])
priv->bb_rx_conf |= 0x20;
- ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0a,
- priv->bb_rx_conf);
-
-end:
- return ret;
+ return vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0a,
+ priv->bb_rx_conf);
}
-void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data)
+int vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data)
{
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xE7, data);
+ int ret;
+
+ ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xE7, data);
+ if (ret)
+ return ret;
/* patch for 3253B0 Baseband with Cardbus module */
if (priv->short_slot_time)
@@ -529,7 +395,8 @@ void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data)
else
priv->bb_rx_conf |= 0x20; /* 0010 0000 */
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0a, priv->bb_rx_conf);
+ return vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0a,
+ priv->bb_rx_conf);
}
/*
@@ -570,268 +437,57 @@ int vnt_exit_deep_sleep(struct vnt_private *priv)
return vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0d, 0x01);
}
-void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning)
+int vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning)
{
- u8 cr_201 = 0x0, cr_206 = 0x0;
- u8 ed_inx = priv->bb_pre_ed_index;
+ const struct vnt_threshold *threshold = NULL;
+ u8 length;
+ u8 cr_201, cr_206;
+ u8 ed_inx;
+ int ret;
switch (priv->rf_type) {
case RF_AL2230:
case RF_AL2230S:
case RF_AIROHA7230:
- if (scanning) { /* Max sensitivity */
- ed_inx = 0;
- cr_206 = 0x30;
- break;
- }
-
- if (priv->bb_pre_ed_rssi <= 45) {
- ed_inx = 20;
- cr_201 = 0xff;
- } else if (priv->bb_pre_ed_rssi <= 46) {
- ed_inx = 19;
- cr_201 = 0x1a;
- } else if (priv->bb_pre_ed_rssi <= 47) {
- ed_inx = 18;
- cr_201 = 0x15;
- } else if (priv->bb_pre_ed_rssi <= 49) {
- ed_inx = 17;
- cr_201 = 0xe;
- } else if (priv->bb_pre_ed_rssi <= 51) {
- ed_inx = 16;
- cr_201 = 0x9;
- } else if (priv->bb_pre_ed_rssi <= 53) {
- ed_inx = 15;
- cr_201 = 0x6;
- } else if (priv->bb_pre_ed_rssi <= 55) {
- ed_inx = 14;
- cr_201 = 0x3;
- } else if (priv->bb_pre_ed_rssi <= 56) {
- ed_inx = 13;
- cr_201 = 0x2;
- cr_206 = 0xa0;
- } else if (priv->bb_pre_ed_rssi <= 57) {
- ed_inx = 12;
- cr_201 = 0x2;
- cr_206 = 0x20;
- } else if (priv->bb_pre_ed_rssi <= 58) {
- ed_inx = 11;
- cr_201 = 0x1;
- cr_206 = 0xa0;
- } else if (priv->bb_pre_ed_rssi <= 59) {
- ed_inx = 10;
- cr_201 = 0x1;
- cr_206 = 0x54;
- } else if (priv->bb_pre_ed_rssi <= 60) {
- ed_inx = 9;
- cr_201 = 0x1;
- cr_206 = 0x18;
- } else if (priv->bb_pre_ed_rssi <= 61) {
- ed_inx = 8;
- cr_206 = 0xe3;
- } else if (priv->bb_pre_ed_rssi <= 62) {
- ed_inx = 7;
- cr_206 = 0xb9;
- } else if (priv->bb_pre_ed_rssi <= 63) {
- ed_inx = 6;
- cr_206 = 0x93;
- } else if (priv->bb_pre_ed_rssi <= 64) {
- ed_inx = 5;
- cr_206 = 0x79;
- } else if (priv->bb_pre_ed_rssi <= 65) {
- ed_inx = 4;
- cr_206 = 0x62;
- } else if (priv->bb_pre_ed_rssi <= 66) {
- ed_inx = 3;
- cr_206 = 0x51;
- } else if (priv->bb_pre_ed_rssi <= 67) {
- ed_inx = 2;
- cr_206 = 0x43;
- } else if (priv->bb_pre_ed_rssi <= 68) {
- ed_inx = 1;
- cr_206 = 0x36;
- } else {
- ed_inx = 0;
- cr_206 = 0x30;
- }
+ threshold = al2230_vnt_threshold;
+ length = ARRAY_SIZE(al2230_vnt_threshold);
break;
case RF_VT3226:
case RF_VT3226D0:
- if (scanning) { /* Max sensitivity */
- ed_inx = 0;
- cr_206 = 0x24;
- break;
- }
-
- if (priv->bb_pre_ed_rssi <= 41) {
- ed_inx = 22;
- cr_201 = 0xff;
- } else if (priv->bb_pre_ed_rssi <= 42) {
- ed_inx = 21;
- cr_201 = 0x36;
- } else if (priv->bb_pre_ed_rssi <= 43) {
- ed_inx = 20;
- cr_201 = 0x26;
- } else if (priv->bb_pre_ed_rssi <= 45) {
- ed_inx = 19;
- cr_201 = 0x18;
- } else if (priv->bb_pre_ed_rssi <= 47) {
- ed_inx = 18;
- cr_201 = 0x11;
- } else if (priv->bb_pre_ed_rssi <= 49) {
- ed_inx = 17;
- cr_201 = 0xa;
- } else if (priv->bb_pre_ed_rssi <= 51) {
- ed_inx = 16;
- cr_201 = 0x7;
- } else if (priv->bb_pre_ed_rssi <= 53) {
- ed_inx = 15;
- cr_201 = 0x4;
- } else if (priv->bb_pre_ed_rssi <= 55) {
- ed_inx = 14;
- cr_201 = 0x2;
- cr_206 = 0xc0;
- } else if (priv->bb_pre_ed_rssi <= 56) {
- ed_inx = 13;
- cr_201 = 0x2;
- cr_206 = 0x30;
- } else if (priv->bb_pre_ed_rssi <= 57) {
- ed_inx = 12;
- cr_201 = 0x1;
- cr_206 = 0xb0;
- } else if (priv->bb_pre_ed_rssi <= 58) {
- ed_inx = 11;
- cr_201 = 0x1;
- cr_206 = 0x70;
- } else if (priv->bb_pre_ed_rssi <= 59) {
- ed_inx = 10;
- cr_201 = 0x1;
- cr_206 = 0x30;
- } else if (priv->bb_pre_ed_rssi <= 60) {
- ed_inx = 9;
- cr_206 = 0xea;
- } else if (priv->bb_pre_ed_rssi <= 61) {
- ed_inx = 8;
- cr_206 = 0xc0;
- } else if (priv->bb_pre_ed_rssi <= 62) {
- ed_inx = 7;
- cr_206 = 0x9c;
- } else if (priv->bb_pre_ed_rssi <= 63) {
- ed_inx = 6;
- cr_206 = 0x80;
- } else if (priv->bb_pre_ed_rssi <= 64) {
- ed_inx = 5;
- cr_206 = 0x68;
- } else if (priv->bb_pre_ed_rssi <= 65) {
- ed_inx = 4;
- cr_206 = 0x52;
- } else if (priv->bb_pre_ed_rssi <= 66) {
- ed_inx = 3;
- cr_206 = 0x43;
- } else if (priv->bb_pre_ed_rssi <= 67) {
- ed_inx = 2;
- cr_206 = 0x36;
- } else if (priv->bb_pre_ed_rssi <= 68) {
- ed_inx = 1;
- cr_206 = 0x2d;
- } else {
- ed_inx = 0;
- cr_206 = 0x24;
- }
+ threshold = vt3226_vnt_threshold;
+ length = ARRAY_SIZE(vt3226_vnt_threshold);
break;
case RF_VT3342A0:
- if (scanning) { /* need Max sensitivity */
- ed_inx = 0;
- cr_206 = 0x38;
- break;
- }
-
- if (priv->bb_pre_ed_rssi <= 41) {
- ed_inx = 20;
- cr_201 = 0xff;
- } else if (priv->bb_pre_ed_rssi <= 42) {
- ed_inx = 19;
- cr_201 = 0x36;
- } else if (priv->bb_pre_ed_rssi <= 43) {
- ed_inx = 18;
- cr_201 = 0x26;
- } else if (priv->bb_pre_ed_rssi <= 45) {
- ed_inx = 17;
- cr_201 = 0x18;
- } else if (priv->bb_pre_ed_rssi <= 47) {
- ed_inx = 16;
- cr_201 = 0x11;
- } else if (priv->bb_pre_ed_rssi <= 49) {
- ed_inx = 15;
- cr_201 = 0xa;
- } else if (priv->bb_pre_ed_rssi <= 51) {
- ed_inx = 14;
- cr_201 = 0x7;
- } else if (priv->bb_pre_ed_rssi <= 53) {
- ed_inx = 13;
- cr_201 = 0x4;
- } else if (priv->bb_pre_ed_rssi <= 55) {
- ed_inx = 12;
- cr_201 = 0x2;
- cr_206 = 0xc0;
- } else if (priv->bb_pre_ed_rssi <= 56) {
- ed_inx = 11;
- cr_201 = 0x2;
- cr_206 = 0x30;
- } else if (priv->bb_pre_ed_rssi <= 57) {
- ed_inx = 10;
- cr_201 = 0x1;
- cr_206 = 0xb0;
- } else if (priv->bb_pre_ed_rssi <= 58) {
- ed_inx = 9;
- cr_201 = 0x1;
- cr_206 = 0x70;
- } else if (priv->bb_pre_ed_rssi <= 59) {
- ed_inx = 8;
- cr_201 = 0x1;
- cr_206 = 0x30;
- } else if (priv->bb_pre_ed_rssi <= 60) {
- ed_inx = 7;
- cr_206 = 0xea;
- } else if (priv->bb_pre_ed_rssi <= 61) {
- ed_inx = 6;
- cr_206 = 0xc0;
- } else if (priv->bb_pre_ed_rssi <= 62) {
- ed_inx = 5;
- cr_206 = 0x9c;
- } else if (priv->bb_pre_ed_rssi <= 63) {
- ed_inx = 4;
- cr_206 = 0x80;
- } else if (priv->bb_pre_ed_rssi <= 64) {
- ed_inx = 3;
- cr_206 = 0x68;
- } else if (priv->bb_pre_ed_rssi <= 65) {
- ed_inx = 2;
- cr_206 = 0x52;
- } else if (priv->bb_pre_ed_rssi <= 66) {
- ed_inx = 1;
- cr_206 = 0x43;
- } else {
- ed_inx = 0;
- cr_206 = 0x38;
- }
+ threshold = vt3342_vnt_threshold;
+ length = ARRAY_SIZE(vt3342_vnt_threshold);
break;
}
+ if (!threshold)
+ return -EINVAL;
+
+ for (ed_inx = scanning ? 0 : length - 1; ed_inx > 0; ed_inx--) {
+ if (priv->bb_pre_ed_rssi <= threshold[ed_inx].bb_pre_ed_rssi)
+ break;
+ }
+
+ cr_201 = threshold[ed_inx].cr_201;
+ cr_206 = threshold[ed_inx].cr_206;
+
if (ed_inx == priv->bb_pre_ed_index && !scanning)
- return;
+ return 0;
priv->bb_pre_ed_index = ed_inx;
dev_dbg(&priv->usb->dev, "%s bb_pre_ed_rssi %d\n",
__func__, priv->bb_pre_ed_rssi);
- if (!cr_201 && !cr_206)
- return;
+ ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xc9, cr_201);
+ if (ret)
+ return ret;
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xc9, cr_201);
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xce, cr_206);
+ return vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xce, cr_206);
}
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index dc42aa6ae1d9..12456ebc23ec 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -66,25 +66,12 @@
#define TOP_RATE_2M 0x00200000
#define TOP_RATE_1M 0x00100000
-/* Length, Service, and Signal fields of Phy for Tx */
-struct vnt_phy_field {
- u8 signal;
- u8 service;
- __le16 len;
-} __packed;
-
-unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
- unsigned int frame_length, u16 tx_rate);
-
-void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy);
-
int vnt_set_short_slot_time(struct vnt_private *priv);
-void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data);
+int vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data);
int vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode);
int vnt_vt3184_init(struct vnt_private *priv);
int vnt_set_deep_sleep(struct vnt_private *priv);
int vnt_exit_deep_sleep(struct vnt_private *priv);
-void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning);
+int vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning);
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index dc3ab10eb630..10f3dfda83b5 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -26,7 +26,8 @@
*
*/
-#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
#include "device.h"
#include "card.h"
#include "baseband.h"
@@ -45,20 +46,12 @@ static const u16 cw_rxbcntsf_off[MAX_RATE] = {
192, 96, 34, 17, 34, 23, 17, 11, 8, 5, 4, 3
};
-/*
- * Description: Set NIC media channel
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * connection_channel - Channel to be set
- * Out:
- * none
- */
-void vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
+int vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
{
+ int ret;
+
if (connection_channel > CB_MAX_CHANNEL || !connection_channel)
- return;
+ return -EINVAL;
/* clear NAV */
vnt_mac_reg_bits_on(priv, MAC_REG_MACCR, MACCR_CLRNAV);
@@ -67,284 +60,73 @@ void vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
vnt_mac_reg_bits_off(priv, MAC_REG_CHANNEL,
(BIT(7) | BIT(5) | BIT(4)));
- vnt_control_out(priv, MESSAGE_TYPE_SELECT_CHANNEL,
- connection_channel, 0, 0, NULL);
-
- vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG, MAC_REG_CHANNEL,
- (u8)(connection_channel | 0x80));
-}
-
-/*
- * Description: Get CCK mode basic rate
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * rate_idx - Receiving data rate
- * Out:
- * none
- *
- * Return Value: response Control frame rate
- *
- */
-static u16 vnt_get_cck_rate(struct vnt_private *priv, u16 rate_idx)
-{
- u16 ui = rate_idx;
-
- while (ui > RATE_1M) {
- if (priv->basic_rates & (1 << ui))
- return ui;
- ui--;
- }
+ ret = vnt_control_out(priv, MESSAGE_TYPE_SELECT_CHANNEL,
+ connection_channel, 0, 0, NULL);
+ if (ret)
+ return ret;
- return RATE_1M;
+ return vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG, MAC_REG_CHANNEL,
+ (u8)(connection_channel | 0x80));
}
-/*
- * Description: Get OFDM mode basic rate
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * rate_idx - Receiving data rate
- * Out:
- * none
- *
- * Return Value: response Control frame rate
- *
- */
-static u16 vnt_get_ofdm_rate(struct vnt_private *priv, u16 rate_idx)
-{
- u16 ui = rate_idx;
-
- dev_dbg(&priv->usb->dev, "%s basic rate: %d\n",
- __func__, priv->basic_rates);
-
- if (!vnt_ofdm_min_rate(priv)) {
- dev_dbg(&priv->usb->dev, "%s (NO OFDM) %d\n",
- __func__, rate_idx);
- if (rate_idx > RATE_24M)
- rate_idx = RATE_24M;
- return rate_idx;
- }
-
- while (ui > RATE_11M) {
- if (priv->basic_rates & (1 << ui)) {
- dev_dbg(&priv->usb->dev, "%s rate: %d\n",
- __func__, ui);
- return ui;
- }
- ui--;
- }
-
- dev_dbg(&priv->usb->dev, "%s basic rate: 24M\n", __func__);
+static const u8 vnt_rspinf_b_short_table[] = {
+ 0x70, 0x00, 0x00, 0x00, 0x38, 0x00, 0x09, 0x00,
+ 0x15, 0x00, 0x0a, 0x00, 0x0b, 0x00, 0x0b, 0x80
+};
- return RATE_24M;
-}
+static const u8 vnt_rspinf_b_long_table[] = {
+ 0x70, 0x00, 0x00, 0x00, 0x38, 0x00, 0x01, 0x00,
+ 0x15, 0x00, 0x02, 0x00, 0x0b, 0x00, 0x03, 0x80
+};
-/*
- * Description: Calculate TxRate and RsvTime fields for RSPINF in OFDM mode.
- *
- * Parameters:
- * In:
- * rate - Tx Rate
- * bb_type - Tx Packet type
- * Out:
- * tx_rate - pointer to RSPINF TxRate field
- * rsv_time- pointer to RSPINF RsvTime field
- *
- * Return Value: none
- *
- */
-static void vnt_calculate_ofdm_rate(u16 rate, u8 bb_type,
- u8 *tx_rate, u8 *rsv_time)
-{
- switch (rate) {
- case RATE_6M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9b;
- *rsv_time = 24;
- } else {
- *tx_rate = 0x8b;
- *rsv_time = 30;
- }
- break;
- case RATE_9M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9f;
- *rsv_time = 16;
- } else {
- *tx_rate = 0x8f;
- *rsv_time = 22;
- }
- break;
- case RATE_12M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9a;
- *rsv_time = 12;
- } else {
- *tx_rate = 0x8a;
- *rsv_time = 18;
- }
- break;
- case RATE_18M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9e;
- *rsv_time = 8;
- } else {
- *tx_rate = 0x8e;
- *rsv_time = 14;
- }
- break;
- case RATE_36M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9d;
- *rsv_time = 4;
- } else {
- *tx_rate = 0x8d;
- *rsv_time = 10;
- }
- break;
- case RATE_48M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x98;
- *rsv_time = 4;
- } else {
- *tx_rate = 0x88;
- *rsv_time = 10;
- }
- break;
- case RATE_54M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9c;
- *rsv_time = 4;
- } else {
- *tx_rate = 0x8c;
- *rsv_time = 10;
- }
- break;
- case RATE_24M:
- default:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x99;
- *rsv_time = 8;
- } else {
- *tx_rate = 0x89;
- *rsv_time = 14;
- }
- break;
- }
-}
+static const u8 vnt_rspinf_a_table[] = {
+ 0x9b, 0x18, 0x9f, 0x10, 0x9a, 0x0a, 0x9e, 0x08, 0x99,
+ 0x08, 0x9d, 0x04, 0x98, 0x04, 0x9c, 0x04, 0x9c, 0x04
+};
-/*
- * Description: Set RSPINF
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: None.
- *
- */
+static const u8 vnt_rspinf_gb_table[] = {
+ 0x8b, 0x1e, 0x8f, 0x16, 0x8a, 0x12, 0x8e, 0x0e, 0x89,
+ 0x0e, 0x8d, 0x0a, 0x88, 0x0a, 0x8c, 0x0a, 0x8c, 0x0a
+};
-void vnt_set_rspinf(struct vnt_private *priv, u8 bb_type)
+int vnt_set_rspinf(struct vnt_private *priv, u8 bb_type)
{
- struct vnt_phy_field phy[4];
- u8 tx_rate[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; /* For OFDM */
- u8 rsv_time[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
- u8 data[34];
- int i;
-
- /*RSPINF_b_1*/
- vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_1M),
- PK_TYPE_11B, &phy[0]);
-
- /*RSPINF_b_2*/
- vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_2M),
- PK_TYPE_11B, &phy[1]);
-
- /*RSPINF_b_5*/
- vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_5M),
- PK_TYPE_11B, &phy[2]);
-
- /*RSPINF_b_11*/
- vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_11M),
- PK_TYPE_11B, &phy[3]);
-
- /*RSPINF_a_6*/
- vnt_calculate_ofdm_rate(RATE_6M, bb_type, &tx_rate[0], &rsv_time[0]);
-
- /*RSPINF_a_9*/
- vnt_calculate_ofdm_rate(RATE_9M, bb_type, &tx_rate[1], &rsv_time[1]);
+ const u8 *data;
+ u16 len;
+ int ret;
- /*RSPINF_a_12*/
- vnt_calculate_ofdm_rate(RATE_12M, bb_type, &tx_rate[2], &rsv_time[2]);
-
- /*RSPINF_a_18*/
- vnt_calculate_ofdm_rate(RATE_18M, bb_type, &tx_rate[3], &rsv_time[3]);
-
- /*RSPINF_a_24*/
- vnt_calculate_ofdm_rate(RATE_24M, bb_type, &tx_rate[4], &rsv_time[4]);
-
- /*RSPINF_a_36*/
- vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_36M),
- bb_type, &tx_rate[5], &rsv_time[5]);
-
- /*RSPINF_a_48*/
- vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_48M),
- bb_type, &tx_rate[6], &rsv_time[6]);
-
- /*RSPINF_a_54*/
- vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_54M),
- bb_type, &tx_rate[7], &rsv_time[7]);
-
- /*RSPINF_a_72*/
- vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_54M),
- bb_type, &tx_rate[8], &rsv_time[8]);
-
- put_unaligned(phy[0].len, (u16 *)&data[0]);
- data[2] = phy[0].signal;
- data[3] = phy[0].service;
-
- put_unaligned(phy[1].len, (u16 *)&data[4]);
- data[6] = phy[1].signal;
- data[7] = phy[1].service;
-
- put_unaligned(phy[2].len, (u16 *)&data[8]);
- data[10] = phy[2].signal;
- data[11] = phy[2].service;
+ if (priv->preamble_type) {
+ data = vnt_rspinf_b_short_table;
+ len = ARRAY_SIZE(vnt_rspinf_b_short_table);
+ } else {
+ data = vnt_rspinf_b_long_table;
+ len = ARRAY_SIZE(vnt_rspinf_b_long_table);
+ }
- put_unaligned(phy[3].len, (u16 *)&data[12]);
- data[14] = phy[3].signal;
- data[15] = phy[3].service;
+ /* RSPINF_b_1 to RSPINF_b_11 */
+ ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_RSPINF_B_1,
+ MESSAGE_REQUEST_MACREG, len, data);
+ if (ret)
+ return ret;
- for (i = 0; i < 9; i++) {
- data[16 + i * 2] = tx_rate[i];
- data[16 + i * 2 + 1] = rsv_time[i];
+ if (bb_type == BB_TYPE_11A) {
+ data = vnt_rspinf_a_table;
+ len = ARRAY_SIZE(vnt_rspinf_a_table);
+ } else {
+ data = vnt_rspinf_gb_table;
+ len = ARRAY_SIZE(vnt_rspinf_gb_table);
}
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_RSPINF_B_1,
- MESSAGE_REQUEST_MACREG, 34, &data[0]);
+ /* RSPINF_a_6 to RSPINF_a_72 */
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_RSPINF_A_6,
+ MESSAGE_REQUEST_MACREG, len, data);
}
-/*
- * Description: Update IFS
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * Out:
- * none
- *
- * Return Value: None.
- *
- */
-void vnt_update_ifs(struct vnt_private *priv)
+int vnt_update_ifs(struct vnt_private *priv)
{
u8 max_min = 0;
u8 data[4];
+ int ret;
if (priv->packet_type == PK_TYPE_11A) {
priv->slot = C_SLOT_SHORT;
@@ -367,89 +149,36 @@ void vnt_update_ifs(struct vnt_private *priv)
priv->eifs = C_EIFS;
- switch (priv->rf_type) {
- case RF_VT3226D0:
- if (priv->bb_type != BB_TYPE_11B) {
- priv->sifs -= 1;
- priv->difs -= 1;
- break;
- }
- /* fall through */
- case RF_AIROHA7230:
- case RF_AL2230:
- case RF_AL2230S:
- if (priv->bb_type != BB_TYPE_11B)
- break;
- /* fall through */
- case RF_RFMD2959:
- case RF_VT3226:
- case RF_VT3342A0:
- priv->sifs -= 3;
- priv->difs -= 3;
- break;
- case RF_MAXIM2829:
- if (priv->bb_type == BB_TYPE_11A) {
- priv->sifs -= 5;
- priv->difs -= 5;
- } else {
- priv->sifs -= 2;
- priv->difs -= 2;
- }
-
- break;
- }
-
data[0] = (u8)priv->sifs;
data[1] = (u8)priv->difs;
data[2] = (u8)priv->eifs;
data[3] = (u8)priv->slot;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_SIFS,
- MESSAGE_REQUEST_MACREG, 4, &data[0]);
+ ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_SIFS,
+ MESSAGE_REQUEST_MACREG, 4, &data[0]);
+ if (ret)
+ return ret;
max_min |= 0xa0;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_CWMAXMIN0,
- MESSAGE_REQUEST_MACREG, 1, &max_min);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_CWMAXMIN0,
+ MESSAGE_REQUEST_MACREG, 1, &max_min);
}
void vnt_update_top_rates(struct vnt_private *priv)
{
- u8 top_ofdm = RATE_24M, top_cck = RATE_1M;
- u8 i;
-
- /*Determines the highest basic rate.*/
- for (i = RATE_54M; i >= RATE_6M; i--) {
- if (priv->basic_rates & (u16)(1 << i)) {
- top_ofdm = i;
- break;
- }
- }
-
- priv->top_ofdm_basic_rate = top_ofdm;
+ int pos;
- for (i = RATE_11M;; i--) {
- if (priv->basic_rates & (u16)(1 << i)) {
- top_cck = i;
- break;
- }
- if (i == RATE_1M)
- break;
- }
+ pos = fls(priv->basic_rates & GENMASK(RATE_54M, RATE_6M));
+ priv->top_ofdm_basic_rate = pos ? (pos - 1) : RATE_24M;
- priv->top_cck_basic_rate = top_cck;
+ pos = fls(priv->basic_rates & GENMASK(RATE_11M, RATE_1M));
+ priv->top_cck_basic_rate = pos ? (pos - 1) : RATE_1M;
}
-int vnt_ofdm_min_rate(struct vnt_private *priv)
+bool vnt_ofdm_min_rate(struct vnt_private *priv)
{
- int ii;
-
- for (ii = RATE_54M; ii >= RATE_6M; ii--) {
- if ((priv->basic_rates) & ((u16)BIT(ii)))
- return true;
- }
-
- return false;
+ return priv->basic_rates & GENMASK(RATE_54M, RATE_6M) ? true : false;
}
u8 vnt_get_pkt_type(struct vnt_private *priv)
@@ -481,23 +210,8 @@ u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2)
return tsf1 - tsf2 - (u64)cw_rxbcntsf_off[rx_rate % MAX_RATE];
}
-/*
- * Description: Sync. TSF counter to BSS
- * Get TSF offset and write to HW
- *
- * Parameters:
- * In:
- * priv - The adapter to be sync.
- * time_stamp - Rx BCN's TSF
- * local_tsf - Local TSF
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
- u64 time_stamp, u64 local_tsf)
+int vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
+ u64 time_stamp, u64 local_tsf)
{
u64 tsf_offset = 0;
u8 data[8];
@@ -513,8 +227,8 @@ void vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
data[6] = (u8)(tsf_offset >> 48);
data[7] = (u8)(tsf_offset >> 56);
- vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
- MESSAGE_REQUEST_TSF, 0, 8, data);
+ return vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
+ MESSAGE_REQUEST_TSF, 0, 8, data);
}
/*
@@ -589,21 +303,7 @@ u64 vnt_get_next_tbtt(u64 tsf, u16 beacon_interval)
return tsf;
}
-/*
- * Description: Set NIC TSF counter for first Beacon time
- * Get NEXTTBTT from adjusted TSF and Beacon Interval
- *
- * Parameters:
- * In:
- * dwIoBase - IO Base
- * beacon_interval - Beacon Interval
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval)
+int vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval)
{
u64 next_tbtt = 0;
u8 data[8];
@@ -621,29 +321,15 @@ void vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval)
data[6] = (u8)(next_tbtt >> 48);
data[7] = (u8)(next_tbtt >> 56);
- vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
- MESSAGE_REQUEST_TBTT, 0, 8, data);
+ return vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
+ MESSAGE_REQUEST_TBTT, 0, 8, data);
}
-/*
- * Description: Sync NIC TSF counter for Beacon time
- * Get NEXTTBTT and write to HW
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * tsf - Current TSF counter
- * beacon_interval - Beacon Interval
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
- u16 beacon_interval)
+int vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
+ u16 beacon_interval)
{
u8 data[8];
+ int ret;
tsf = vnt_get_next_tbtt(tsf, beacon_interval);
@@ -656,10 +342,13 @@ void vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
data[6] = (u8)(tsf >> 48);
data[7] = (u8)(tsf >> 56);
- vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
- MESSAGE_REQUEST_TBTT, 0, 8, data);
+ ret = vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
+ MESSAGE_REQUEST_TBTT, 0, 8, data);
+ if (ret)
+ return ret;
dev_dbg(&priv->usb->dev, "%s TBTT: %8llx\n", __func__, tsf);
+ return 0;
}
/*
@@ -723,9 +412,13 @@ int vnt_radio_power_on(struct vnt_private *priv)
{
int ret = 0;
- vnt_exit_deep_sleep(priv);
+ ret = vnt_exit_deep_sleep(priv);
+ if (ret)
+ return ret;
- vnt_mac_reg_bits_on(priv, MAC_REG_HOSTCR, HOSTCR_RXON);
+ ret = vnt_mac_reg_bits_on(priv, MAC_REG_HOSTCR, HOSTCR_RXON);
+ if (ret)
+ return ret;
switch (priv->rf_type) {
case RF_AL2230:
@@ -734,56 +427,69 @@ int vnt_radio_power_on(struct vnt_private *priv)
case RF_VT3226:
case RF_VT3226D0:
case RF_VT3342A0:
- vnt_mac_reg_bits_on(priv, MAC_REG_SOFTPWRCTL,
- (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
- break;
+ ret = vnt_mac_reg_bits_on(priv, MAC_REG_SOFTPWRCTL,
+ (SOFTPWRCTL_SWPE2 |
+ SOFTPWRCTL_SWPE3));
+ if (ret)
+ return ret;
}
- vnt_mac_reg_bits_off(priv, MAC_REG_GPIOCTL1, GPIO3_INTMD);
-
- return ret;
+ return vnt_mac_reg_bits_off(priv, MAC_REG_GPIOCTL1, GPIO3_INTMD);
}
-void vnt_set_bss_mode(struct vnt_private *priv)
+int vnt_set_bss_mode(struct vnt_private *priv)
{
- if (priv->rf_type == RF_AIROHA7230 && priv->bb_type == BB_TYPE_11A)
- vnt_mac_set_bb_type(priv, BB_TYPE_11G);
- else
- vnt_mac_set_bb_type(priv, priv->bb_type);
+ int ret;
+ unsigned char type = priv->bb_type;
+ unsigned char data = 0;
+ unsigned char bb_vga_0 = 0x1c;
+ unsigned char bb_vga_2_3 = 0x00;
- priv->packet_type = vnt_get_pkt_type(priv);
+ if (priv->rf_type == RF_AIROHA7230 && priv->bb_type == BB_TYPE_11A)
+ type = BB_TYPE_11G;
- if (priv->bb_type == BB_TYPE_11A)
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x88, 0x03);
- else if (priv->bb_type == BB_TYPE_11B)
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x88, 0x02);
- else if (priv->bb_type == BB_TYPE_11G)
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x88, 0x08);
+ ret = vnt_mac_set_bb_type(priv, type);
+ if (ret)
+ return ret;
- vnt_update_ifs(priv);
- vnt_set_rspinf(priv, (u8)priv->bb_type);
+ priv->packet_type = vnt_get_pkt_type(priv);
if (priv->bb_type == BB_TYPE_11A) {
- if (priv->rf_type == RF_AIROHA7230) {
- priv->bb_vga[0] = 0x20;
+ data = 0x03;
+ bb_vga_0 = 0x20;
+ bb_vga_2_3 = 0x10;
+ } else if (priv->bb_type == BB_TYPE_11B) {
+ data = 0x02;
+ } else if (priv->bb_type == BB_TYPE_11G) {
+ data = 0x08;
+ }
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
- 0xe7, priv->bb_vga[0]);
- }
+ if (data) {
+ ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
+ 0x88, data);
+ if (ret)
+ return ret;
+ }
- priv->bb_vga[2] = 0x10;
- priv->bb_vga[3] = 0x10;
- } else {
- if (priv->rf_type == RF_AIROHA7230) {
- priv->bb_vga[0] = 0x1c;
+ ret = vnt_update_ifs(priv);
+ if (ret)
+ return ret;
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
- 0xe7, priv->bb_vga[0]);
- }
+ ret = vnt_set_rspinf(priv, priv->bb_type);
+ if (ret)
+ return ret;
- priv->bb_vga[2] = 0x0;
- priv->bb_vga[3] = 0x0;
+ if (priv->rf_type == RF_AIROHA7230) {
+ priv->bb_vga[0] = bb_vga_0;
+
+ ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
+ 0xe7, priv->bb_vga[0]);
+ if (ret)
+ return ret;
}
- vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
+ priv->bb_vga[2] = bb_vga_2_3;
+ priv->bb_vga[3] = bb_vga_2_3;
+
+ return vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
}
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index 75cd340c0cce..a524fdc60ae3 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -25,23 +25,23 @@
struct vnt_private;
-void vnt_set_channel(struct vnt_private *priv, u32 connection_channel);
-void vnt_set_rspinf(struct vnt_private *priv, u8 bb_type);
-void vnt_update_ifs(struct vnt_private *priv);
+int vnt_set_channel(struct vnt_private *priv, u32 connection_channel);
+int vnt_set_rspinf(struct vnt_private *priv, u8 bb_type);
+int vnt_update_ifs(struct vnt_private *priv);
void vnt_update_top_rates(struct vnt_private *priv);
-int vnt_ofdm_min_rate(struct vnt_private *priv);
-void vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
- u64 time_stamp, u64 local_tsf);
+bool vnt_ofdm_min_rate(struct vnt_private *priv);
+int vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
+ u64 time_stamp, u64 local_tsf);
bool vnt_get_current_tsf(struct vnt_private *priv, u64 *current_tsf);
bool vnt_clear_current_tsf(struct vnt_private *priv);
-void vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval);
-void vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
- u16 beacon_interval);
+int vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval);
+int vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
+ u16 beacon_interval);
u64 vnt_get_next_tbtt(u64 tsf, u16 beacon_interval);
u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2);
int vnt_radio_power_off(struct vnt_private *priv);
int vnt_radio_power_on(struct vnt_private *priv);
u8 vnt_get_pkt_type(struct vnt_private *priv);
-void vnt_set_bss_mode(struct vnt_private *priv);
+int vnt_set_bss_mode(struct vnt_private *priv);
#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index e6ee9411f080..947530fefe94 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -73,6 +73,10 @@
#define DEVICE_VERSION "mac80211"
+#define FIRMWARE_VERSION 0x133 /* version 1.51 */
+#define FIRMWARE_NAME "vntwusb.fw"
+#define FIRMWARE_CHUNK_SIZE 0x400
+
#define CONFIG_PATH "/etc/vntconfiguration.dat"
#define MAX_UINTS 8
@@ -202,8 +206,7 @@ struct vnt_rsp_card_init {
* Enum of context types for SendPacket
*/
enum {
- CONTEXT_DATA_PACKET = 1,
- CONTEXT_MGMT_PACKET,
+ CONTEXT_DATA_PACKET = 0,
CONTEXT_BEACON_PACKET
};
@@ -234,18 +237,14 @@ struct vnt_rcb {
struct vnt_usb_send_context {
void *priv;
struct sk_buff *skb;
- struct urb *urb;
- struct ieee80211_hdr *hdr;
- unsigned int buf_len;
+ void *tx_buffer;
u32 frame_len;
u16 tx_hdr_size;
u16 tx_rate;
u8 type;
u8 pkt_no;
u8 pkt_type;
- u8 need_ack;
bool in_use;
- unsigned char data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
};
/*
@@ -288,6 +287,7 @@ struct vnt_private {
/* Variables to track resources for the BULK Out Pipe */
struct vnt_usb_send_context *tx_context[CB_MAX_TX_DESC];
+ struct usb_anchor tx_submitted;
u32 num_tx_context;
/* Variables to track resources for the Interrupt In Pipe */
@@ -344,13 +344,9 @@ struct vnt_private {
u8 ofdm_pwr_tbl[14];
u8 ofdm_a_pwr_tbl[42];
- u16 current_rate;
u16 tx_rate_fb0;
u16 tx_rate_fb1;
- u8 short_retry_limit;
- u8 long_retry_limit;
-
enum nl80211_iftype op_mode;
int short_slot_time;
@@ -383,8 +379,6 @@ struct vnt_private {
u8 bb_pre_ed_rssi;
u8 bb_pre_ed_index;
- u16 wake_up_count;
-
/* command timer */
struct delayed_work run_command_work;
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
deleted file mode 100644
index 70358d427211..000000000000
--- a/drivers/staging/vt6656/firmware.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * File: baseband.c
- *
- * Purpose: Implement functions to access baseband
- *
- * Author: Yiching Chen
- *
- * Date: May 20, 2004
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include <linux/compiler.h>
-#include "firmware.h"
-#include "usbpipe.h"
-
-#define FIRMWARE_VERSION 0x133 /* version 1.51 */
-#define FIRMWARE_NAME "vntwusb.fw"
-
-#define FIRMWARE_CHUNK_SIZE 0x400
-
-int vnt_download_firmware(struct vnt_private *priv)
-{
- struct device *dev = &priv->usb->dev;
- const struct firmware *fw;
- u16 length;
- int ii;
- int ret = 0;
-
- dev_dbg(dev, "---->Download firmware\n");
-
- ret = request_firmware(&fw, FIRMWARE_NAME, dev);
- if (ret) {
- dev_err(dev, "firmware file %s request failed (%d)\n",
- FIRMWARE_NAME, ret);
- goto end;
- }
-
- for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
- length = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
-
- ret = vnt_control_out(priv, 0, 0x1200 + ii, 0x0000, length,
- fw->data + ii);
- if (ret)
- goto free_fw;
-
- dev_dbg(dev, "Download firmware...%d %zu\n", ii, fw->size);
- }
-
-free_fw:
- release_firmware(fw);
-end:
- return ret;
-}
-MODULE_FIRMWARE(FIRMWARE_NAME);
-
-int vnt_firmware_branch_to_sram(struct vnt_private *priv)
-{
- dev_dbg(&priv->usb->dev, "---->Branch to Sram\n");
-
- return vnt_control_out(priv, 1, 0x1200, 0x0000, 0, NULL);
-}
-
-int vnt_check_firmware_version(struct vnt_private *priv)
-{
- int ret = 0;
-
- ret = vnt_control_in(priv, MESSAGE_TYPE_READ, 0,
- MESSAGE_REQUEST_VERSION, 2,
- (u8 *)&priv->firmware_version);
- if (ret) {
- dev_dbg(&priv->usb->dev,
- "Could not get firmware version: %d.\n", ret);
- goto end;
- }
-
- dev_dbg(&priv->usb->dev, "Firmware Version [%04x]\n",
- priv->firmware_version);
-
- if (priv->firmware_version == 0xFFFF) {
- dev_dbg(&priv->usb->dev, "In Loader.\n");
- ret = -EINVAL;
- goto end;
- }
-
- if (priv->firmware_version < FIRMWARE_VERSION) {
- /* branch to loader for download new firmware */
- ret = vnt_firmware_branch_to_sram(priv);
- if (ret) {
- dev_dbg(&priv->usb->dev,
- "Could not branch to SRAM: %d.\n", ret);
- } else {
- ret = -EINVAL;
- }
- }
-
-end:
- return ret;
-}
diff --git a/drivers/staging/vt6656/firmware.h b/drivers/staging/vt6656/firmware.h
deleted file mode 100644
index 161126faf396..000000000000
--- a/drivers/staging/vt6656/firmware.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * File: firmware.h
- *
- * Purpose: Version and Release Information
- *
- * Author: Yiching Chen
- *
- * Date: May 20, 2004
- *
- */
-
-#ifndef __FIRMWARE_H__
-#define __FIRMWARE_H__
-
-#include "device.h"
-
-int vnt_download_firmware(struct vnt_private *priv);
-int vnt_firmware_branch_to_sram(struct vnt_private *priv);
-int vnt_check_firmware_version(struct vnt_private *priv);
-
-#endif /* __FIRMWARE_H__ */
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index ac3b188984d0..c66cb53cfc09 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -35,7 +35,7 @@ int vnt_key_init_table(struct vnt_private *priv)
static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
struct ieee80211_key_conf *key, u32 key_type,
- u32 mode, bool onfly_latch)
+ u32 mode)
{
struct vnt_private *priv = hw->priv;
u8 broadcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -68,17 +68,11 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
entry = MAX_KEY_TABLE - 1;
key->hw_key_idx = entry;
/* fall through */
- case VNT_KEY_ALLGROUP:
- key_mode |= VNT_KEY_ALLGROUP;
- if (onfly_latch)
- key_mode |= VNT_KEY_ONFLY_ALL;
- /* fall through */
case VNT_KEY_GROUP_ADDRESS:
- key_mode |= mode;
- /* fall through */
+ key_mode = mode | (mode << 4);
+ break;
case VNT_KEY_GROUP:
- key_mode |= (mode << 4);
- key_mode |= VNT_KEY_GROUP;
+ key_mode = mode << 4;
break;
case VNT_KEY_PAIRWISE:
key_mode |= mode;
@@ -88,8 +82,7 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
return -EINVAL;
}
- if (onfly_latch)
- key_mode |= VNT_KEY_ONFLY;
+ key_mode |= key_type;
if (mode == KEY_CTL_WEP) {
if (key->keylen == WLAN_KEY_LEN_WEP40)
@@ -98,9 +91,8 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
key->key[15] |= 0x80;
}
- vnt_mac_set_keyentry(priv, key_mode, entry, key_inx, bssid, key->key);
-
- return 0;
+ return vnt_mac_set_keyentry(priv, key_mode, entry,
+ key_inx, bssid, key->key);
}
int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
@@ -109,28 +101,21 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct vnt_private *priv = hw->priv;
u8 *mac_addr = NULL;
u8 key_dec_mode = 0;
- int ret = 0, u;
if (sta)
mac_addr = &sta->addr[0];
switch (key->cipher) {
- case 0:
- for (u = 0 ; u < MAX_KEY_TABLE; u++)
- vnt_mac_disable_keyentry(priv, u);
- return ret;
-
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- for (u = 0; u < MAX_KEY_TABLE; u++)
- vnt_mac_disable_keyentry(priv, u);
-
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
- KEY_CTL_WEP, true);
+ KEY_CTL_WEP);
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- return ret;
+ return vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
+ KEY_CTL_WEP);
+
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -151,11 +136,9 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
- key_dec_mode, true);
- else
- vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS,
- key_dec_mode, true);
+ return vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
+ key_dec_mode);
- return 0;
+ return vnt_set_keymode(hw, mac_addr, key,
+ VNT_KEY_GROUP_ADDRESS, key_dec_mode);
}
diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
index 918c07cf86cd..1f3449e66143 100644
--- a/drivers/staging/vt6656/key.h
+++ b/drivers/staging/vt6656/key.h
@@ -25,13 +25,14 @@
#define KEY_CTL_TKIP 0x02
#define KEY_CTL_CCMP 0x03
-#define VNT_KEY_DEFAULTKEY 0x1
-#define VNT_KEY_GROUP_ADDRESS 0x2
-#define VNT_KEY_ALLGROUP 0x4
-#define VNT_KEY_GROUP 0x40
-#define VNT_KEY_PAIRWISE 0x00
-#define VNT_KEY_ONFLY 0x8000
#define VNT_KEY_ONFLY_ALL 0x4000
+#define VNT_KEY_ONFLY 0x8000
+#define VNT_KEY_ALLGROUP 0x04
+#define VNT_KEY_GROUP 0x40
+#define VNT_KEY_PAIRWISE VNT_KEY_ONFLY
+#define VNT_KEY_GROUP_ADDRESS (VNT_KEY_ALLGROUP | VNT_KEY_GROUP)
+#define VNT_KEY_DEFAULTKEY (VNT_KEY_GROUP_ADDRESS | VNT_KEY_ONFLY |\
+ VNT_KEY_ONFLY_ALL)
int vnt_key_init_table(struct vnt_private *priv);
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 5cacf6e60e90..da7067c34643 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -22,90 +22,40 @@
#include "mac.h"
#include "usbpipe.h"
-/*
- * Description:
- * Write MAC Multicast Address Mask
- *
- * Parameters:
- * In:
- * mc_filter (mac filter)
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_mac_set_filter(struct vnt_private *priv, u64 mc_filter)
+int vnt_mac_set_filter(struct vnt_private *priv, u64 mc_filter)
{
__le64 le_mc = cpu_to_le64(mc_filter);
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_MAR0,
- MESSAGE_REQUEST_MACREG, sizeof(le_mc), (u8 *)&le_mc);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_MAR0,
+ MESSAGE_REQUEST_MACREG, sizeof(le_mc),
+ (u8 *)&le_mc);
}
-/*
- * Description:
- * Shut Down MAC
- *
- * Parameters:
- * In:
- * Out:
- * none
- *
- *
- */
-void vnt_mac_shutdown(struct vnt_private *priv)
+int vnt_mac_shutdown(struct vnt_private *priv)
{
- vnt_control_out(priv, MESSAGE_TYPE_MACSHUTDOWN, 0, 0, 0, NULL);
+ return vnt_control_out(priv, MESSAGE_TYPE_MACSHUTDOWN, 0, 0, 0, NULL);
}
-void vnt_mac_set_bb_type(struct vnt_private *priv, u8 type)
+int vnt_mac_set_bb_type(struct vnt_private *priv, u8 type)
{
u8 data[2];
data[0] = type;
data[1] = EnCFG_BBType_MASK;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data),
+ data);
}
-/*
- * Description:
- * Disable the Key Entry by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_mac_disable_keyentry(struct vnt_private *priv, u8 entry_idx)
+int vnt_mac_disable_keyentry(struct vnt_private *priv, u8 entry_idx)
{
- vnt_control_out(priv, MESSAGE_TYPE_CLRKEYENTRY, 0, 0,
- sizeof(entry_idx), &entry_idx);
+ return vnt_control_out(priv, MESSAGE_TYPE_CLRKEYENTRY, 0, 0,
+ sizeof(entry_idx), &entry_idx);
}
-/*
- * Description:
- * Set the Key by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
- u32 key_idx, u8 *addr, u8 *key)
+int vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
+ u32 key_idx, u8 *addr, u8 *key)
{
struct vnt_mac_set_key set_key;
u16 offset;
@@ -124,9 +74,9 @@ void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
dev_dbg(&priv->usb->dev, "offset %d key ctl %d set key %24ph\n",
offset, key_ctl, (u8 *)&set_key);
- vnt_control_out(priv, MESSAGE_TYPE_SETKEY, offset,
- (u16)key_idx, sizeof(struct vnt_mac_set_key),
- (u8 *)&set_key);
+ return vnt_control_out(priv, MESSAGE_TYPE_SETKEY, offset,
+ (u16)key_idx, sizeof(struct vnt_mac_set_key),
+ (u8 *)&set_key);
}
int vnt_mac_reg_bits_off(struct vnt_private *priv, u8 reg_ofs, u8 bits)
@@ -151,76 +101,76 @@ int vnt_mac_reg_bits_on(struct vnt_private *priv, u8 reg_ofs, u8 bits)
MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_write_word(struct vnt_private *priv, u8 reg_ofs, u16 word)
+int vnt_mac_write_word(struct vnt_private *priv, u8 reg_ofs, u16 word)
{
u8 data[2];
data[0] = (u8)(word & 0xff);
data[1] = (u8)(word >> 8);
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, reg_ofs,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, reg_ofs,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_set_bssid_addr(struct vnt_private *priv, u8 *addr)
+int vnt_mac_set_bssid_addr(struct vnt_private *priv, u8 *addr)
{
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_BSSID0,
- MESSAGE_REQUEST_MACREG, ETH_ALEN, addr);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_BSSID0,
+ MESSAGE_REQUEST_MACREG, ETH_ALEN, addr);
}
-void vnt_mac_enable_protect_mode(struct vnt_private *priv)
+int vnt_mac_enable_protect_mode(struct vnt_private *priv)
{
u8 data[2];
data[0] = EnCFG_ProtectMd;
data[1] = EnCFG_ProtectMd;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_disable_protect_mode(struct vnt_private *priv)
+int vnt_mac_disable_protect_mode(struct vnt_private *priv)
{
u8 data[2];
data[0] = 0;
data[1] = EnCFG_ProtectMd;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_enable_barker_preamble_mode(struct vnt_private *priv)
+int vnt_mac_enable_barker_preamble_mode(struct vnt_private *priv)
{
u8 data[2];
data[0] = EnCFG_BarkerPream;
data[1] = EnCFG_BarkerPream;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG2,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG2,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_disable_barker_preamble_mode(struct vnt_private *priv)
+int vnt_mac_disable_barker_preamble_mode(struct vnt_private *priv)
{
u8 data[2];
data[0] = 0;
data[1] = EnCFG_BarkerPream;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG2,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG2,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_set_beacon_interval(struct vnt_private *priv, u16 interval)
+int vnt_mac_set_beacon_interval(struct vnt_private *priv, u16 interval)
{
u8 data[2];
data[0] = (u8)(interval & 0xff);
data[1] = (u8)(interval >> 8);
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_BI,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_BI,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
int vnt_mac_set_led(struct vnt_private *priv, u8 state, u8 led)
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index c532b27de37f..dae70b5c7634 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -177,7 +177,7 @@
#define EnCFG_BBType_a 0x00
#define EnCFG_BBType_b BIT(0)
#define EnCFG_BBType_g BIT(1)
-#define EnCFG_BBType_MASK (BIT(0) | BIT(1))
+#define EnCFG_BBType_MASK (EnCFG_BBType_b | EnCFG_BBType_g)
#define EnCFG_ProtectMd BIT(5)
/* Bits in the EnhanceCFG_1 register */
@@ -355,21 +355,21 @@ struct vnt_mac_set_key {
u8 key[WLAN_KEY_LEN_CCMP];
} __packed;
-void vnt_mac_set_filter(struct vnt_private *priv, u64 mc_filter);
-void vnt_mac_shutdown(struct vnt_private *priv);
-void vnt_mac_set_bb_type(struct vnt_private *priv, u8 type);
-void vnt_mac_disable_keyentry(struct vnt_private *priv, u8 entry_idx);
-void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
- u32 key_idx, u8 *addr, u8 *key);
+int vnt_mac_set_filter(struct vnt_private *priv, u64 mc_filter);
+int vnt_mac_shutdown(struct vnt_private *priv);
+int vnt_mac_set_bb_type(struct vnt_private *priv, u8 type);
+int vnt_mac_disable_keyentry(struct vnt_private *priv, u8 entry_idx);
+int vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
+ u32 key_idx, u8 *addr, u8 *key);
int vnt_mac_reg_bits_off(struct vnt_private *priv, u8 reg_ofs, u8 bits);
int vnt_mac_reg_bits_on(struct vnt_private *priv, u8 reg_ofs, u8 bits);
-void vnt_mac_write_word(struct vnt_private *priv, u8 reg_ofs, u16 word);
-void vnt_mac_set_bssid_addr(struct vnt_private *priv, u8 *addr);
-void vnt_mac_enable_protect_mode(struct vnt_private *priv);
-void vnt_mac_disable_protect_mode(struct vnt_private *priv);
-void vnt_mac_enable_barker_preamble_mode(struct vnt_private *priv);
-void vnt_mac_disable_barker_preamble_mode(struct vnt_private *priv);
-void vnt_mac_set_beacon_interval(struct vnt_private *priv, u16 interval);
+int vnt_mac_write_word(struct vnt_private *priv, u8 reg_ofs, u16 word);
+int vnt_mac_set_bssid_addr(struct vnt_private *priv, u8 *addr);
+int vnt_mac_enable_protect_mode(struct vnt_private *priv);
+int vnt_mac_disable_protect_mode(struct vnt_private *priv);
+int vnt_mac_enable_barker_preamble_mode(struct vnt_private *priv);
+int vnt_mac_disable_barker_preamble_mode(struct vnt_private *priv);
+int vnt_mac_set_beacon_interval(struct vnt_private *priv, u16 interval);
int vnt_mac_set_led(struct vnt_private *privpriv, u8 state, u8 led);
#endif /* __MAC_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 5f78cad3b647..8bf851c53f4e 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -33,7 +33,6 @@
#include "wcmd.h"
#include "rxtx.h"
#include "rf.h"
-#include "firmware.h"
#include "usbpipe.h"
#include "channel.h"
@@ -60,8 +59,6 @@ MODULE_PARM_DESC(tx_buffers, "Number of receive usb tx buffers");
#define RTS_THRESH_DEF 2347
#define FRAG_THRESH_DEF 2346
-#define SHORT_RETRY_DEF 8
-#define LONG_RETRY_DEF 4
/* BasebandType[] baseband type selected
* 0: indicate 802.11a type
@@ -94,15 +91,91 @@ static void vnt_set_options(struct vnt_private *priv)
else
priv->num_rcb = vnt_rx_buffers;
- priv->short_retry_limit = SHORT_RETRY_DEF;
- priv->long_retry_limit = LONG_RETRY_DEF;
priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->bb_type = BBP_TYPE_DEF;
priv->packet_type = priv->bb_type;
- priv->preamble_type = 0;
+ priv->preamble_type = PREAMBLE_LONG;
priv->exist_sw_net_addr = false;
}
+static int vnt_download_firmware(struct vnt_private *priv)
+{
+ struct device *dev = &priv->usb->dev;
+ const struct firmware *fw;
+ u16 length;
+ int ii;
+ int ret = 0;
+
+ dev_dbg(dev, "---->Download firmware\n");
+
+ ret = request_firmware(&fw, FIRMWARE_NAME, dev);
+ if (ret) {
+ dev_err(dev, "firmware file %s request failed (%d)\n",
+ FIRMWARE_NAME, ret);
+ goto end;
+ }
+
+ for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
+ length = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
+
+ ret = vnt_control_out(priv, 0, 0x1200 + ii, 0x0000, length,
+ fw->data + ii);
+ if (ret)
+ goto free_fw;
+
+ dev_dbg(dev, "Download firmware...%d %zu\n", ii, fw->size);
+ }
+
+free_fw:
+ release_firmware(fw);
+end:
+ return ret;
+}
+
+static int vnt_firmware_branch_to_sram(struct vnt_private *priv)
+{
+ dev_dbg(&priv->usb->dev, "---->Branch to Sram\n");
+
+ return vnt_control_out(priv, 1, 0x1200, 0x0000, 0, NULL);
+}
+
+static int vnt_check_firmware_version(struct vnt_private *priv)
+{
+ int ret = 0;
+
+ ret = vnt_control_in(priv, MESSAGE_TYPE_READ, 0,
+ MESSAGE_REQUEST_VERSION, 2,
+ (u8 *)&priv->firmware_version);
+ if (ret) {
+ dev_dbg(&priv->usb->dev,
+ "Could not get firmware version: %d.\n", ret);
+ goto end;
+ }
+
+ dev_dbg(&priv->usb->dev, "Firmware Version [%04x]\n",
+ priv->firmware_version);
+
+ if (priv->firmware_version == 0xFFFF) {
+ dev_dbg(&priv->usb->dev, "In Loader.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (priv->firmware_version < FIRMWARE_VERSION) {
+ /* branch to loader for download new firmware */
+ ret = vnt_firmware_branch_to_sram(priv);
+ if (ret) {
+ dev_dbg(&priv->usb->dev,
+ "Could not branch to SRAM: %d.\n", ret);
+ } else {
+ ret = -EINVAL;
+ }
+ }
+
+end:
+ return ret;
+}
+
/*
* initialization of MAC & BBP registers
*/
@@ -146,8 +219,8 @@ static int vnt_init_registers(struct vnt_private *priv)
init_cmd->exist_sw_net_addr = priv->exist_sw_net_addr;
for (ii = 0; ii < ARRAY_SIZE(init_cmd->sw_net_addr); ii++)
init_cmd->sw_net_addr[ii] = priv->current_net_addr[ii];
- init_cmd->short_retry_limit = priv->short_retry_limit;
- init_cmd->long_retry_limit = priv->long_retry_limit;
+ init_cmd->short_retry_limit = priv->hw->wiphy->retry_short;
+ init_cmd->long_retry_limit = priv->hw->wiphy->retry_long;
/* issue card_init command to device */
ret = vnt_control_out(priv, MESSAGE_TYPE_CARDINIT, 0, 0,
@@ -324,19 +397,6 @@ static int vnt_init_registers(struct vnt_private *priv)
dev_dbg(&priv->usb->dev, "Network address = %pM\n",
priv->current_net_addr);
- /*
- * set BB and packet type at the same time
- * set Short Slot Time, xIFS, and RSPINF
- */
- if (priv->bb_type == BB_TYPE_11A)
- priv->short_slot_time = true;
- else
- priv->short_slot_time = false;
-
- ret = vnt_set_short_slot_time(priv);
- if (ret)
- goto end;
-
priv->radio_ctl = priv->eeprom[EEP_OFS_RADIOCTL];
if ((priv->radio_ctl & EEP_RADIOCTL_ENABLE) != 0) {
@@ -385,17 +445,13 @@ static void vnt_free_tx_bufs(struct vnt_private *priv)
struct vnt_usb_send_context *tx_context;
int ii;
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+
for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = priv->tx_context[ii];
if (!tx_context)
continue;
- /* deallocate URBs */
- if (tx_context->urb) {
- usb_kill_urb(tx_context->urb);
- usb_free_urb(tx_context->urb);
- }
-
kfree(tx_context);
}
}
@@ -436,6 +492,8 @@ static int vnt_alloc_bufs(struct vnt_private *priv)
struct vnt_rcb *rcb;
int ii;
+ init_usb_anchor(&priv->tx_submitted);
+
for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL);
if (!tx_context) {
@@ -446,14 +504,6 @@ static int vnt_alloc_bufs(struct vnt_private *priv)
priv->tx_context[ii] = tx_context;
tx_context->priv = priv;
tx_context->pkt_no = ii;
-
- /* allocate URBs */
- tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_context->urb) {
- ret = -ENOMEM;
- goto free_tx;
- }
-
tx_context->in_use = false;
}
@@ -683,15 +733,14 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
priv->bb_type = BB_TYPE_11G;
}
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- if (priv->bb_type == BB_TYPE_11B)
- priv->current_rate = RATE_1M;
- else
- priv->current_rate = RATE_54M;
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
+ vnt_rf_setpower(priv, conf->chandef.chan);
- vnt_rf_setpower(priv, priv->current_rate,
- conf->chandef.chan->hw_value);
- }
+ if (conf->flags & (IEEE80211_CONF_OFFCHANNEL | IEEE80211_CONF_IDLE))
+ /* Set max sensitivity*/
+ vnt_update_pre_ed_threshold(priv, true);
+ else
+ vnt_update_pre_ed_threshold(priv, false);
return 0;
}
@@ -718,10 +767,10 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (conf->use_short_preamble) {
vnt_mac_enable_barker_preamble_mode(priv);
- priv->preamble_type = true;
+ priv->preamble_type = PREAMBLE_SHORT;
} else {
vnt_mac_disable_barker_preamble_mode(priv);
- priv->preamble_type = false;
+ priv->preamble_type = PREAMBLE_LONG;
}
}
@@ -740,16 +789,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
vnt_set_short_slot_time(priv);
vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
- vnt_update_pre_ed_threshold(priv, false);
}
if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE |
BSS_CHANGED_ERP_SLOT))
vnt_set_bss_mode(priv);
- if (changed & BSS_CHANGED_TXPOWER)
- vnt_rf_setpower(priv, priv->current_rate,
- conf->chandef.chan->hw_value);
+ if (changed & (BSS_CHANGED_TXPOWER | BSS_CHANGED_BANDWIDTH))
+ vnt_rf_setpower(priv, conf->chandef.chan);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->usb->dev,
@@ -767,10 +814,17 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
priv->op_mode != NL80211_IFTYPE_AP) {
if (conf->assoc && conf->beacon_rate) {
+ u16 ps_beacon_int = conf->beacon_int;
+
+ if (conf->dtim_period)
+ ps_beacon_int *= conf->dtim_period;
+ else if (hw->conf.listen_interval)
+ ps_beacon_int *= hw->conf.listen_interval;
+
vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL,
TFTCTL_TSFCNTREN);
- vnt_mac_set_beacon_interval(priv, conf->beacon_int);
+ vnt_mac_set_beacon_interval(priv, ps_beacon_int);
vnt_reset_next_tbtt(priv, conf->beacon_int);
@@ -778,7 +832,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
conf->sync_tsf, priv->current_tsf);
vnt_update_next_tbtt(priv,
- conf->sync_tsf, conf->beacon_int);
+ conf->sync_tsf, ps_beacon_int);
} else {
vnt_clear_current_tsf(priv);
@@ -868,25 +922,6 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return 0;
}
-static void vnt_sw_scan_start(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *addr)
-{
- struct vnt_private *priv = hw->priv;
-
- /* Set max sensitivity*/
- vnt_update_pre_ed_threshold(priv, true);
-}
-
-static void vnt_sw_scan_complete(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct vnt_private *priv = hw->priv;
-
- /* Return sensitivity to channel level*/
- vnt_update_pre_ed_threshold(priv, false);
-}
-
static int vnt_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -932,8 +967,6 @@ static const struct ieee80211_ops vnt_mac_ops = {
.prepare_multicast = vnt_prepare_multicast,
.configure_filter = vnt_configure,
.set_key = vnt_set_key,
- .sw_scan_start = vnt_sw_scan_start,
- .sw_scan_complete = vnt_sw_scan_complete,
.get_stats = vnt_get_stats,
.get_tsf = vnt_get_tsf,
.set_tsf = vnt_set_tsf,
@@ -1010,6 +1043,8 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
ieee80211_hw_set(priv->hw, SUPPORTS_PS);
ieee80211_hw_set(priv->hw, PS_NULLFUNC_STACK);
+ priv->hw->extra_tx_headroom =
+ sizeof(struct vnt_tx_buffer) + sizeof(struct vnt_tx_usb_header);
priv->hw->max_signal = 100;
SET_IEEE80211_DEV(priv->hw, &intf->dev);
@@ -1078,3 +1113,5 @@ static struct usb_driver vt6656_driver = {
};
module_usb_driver(vt6656_driver);
+
+MODULE_FIRMWARE(FIRMWARE_NAME);
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index 7a086c72d5a8..2f49c870272a 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -63,41 +63,29 @@ void vnt_enable_power_saving(struct vnt_private *priv, u16 listen_interval)
*/
vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_GO2DOZE);
- if (listen_interval >= 2) {
- /* clear always listen beacon */
- vnt_mac_reg_bits_off(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
-
- /* first time set listen next beacon */
- vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_LNBCN);
- } else {
- /* always listen beacon */
- vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
- }
+ /* always listen beacon */
+ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
dev_dbg(&priv->usb->dev, "PS:Power Saving Mode Enable...\n");
}
-/*
- *
- * Routine Description:
- * Disable hw power saving functions
- *
- * Return Value:
- * None.
- *
- */
-
-void vnt_disable_power_saving(struct vnt_private *priv)
+int vnt_disable_power_saving(struct vnt_private *priv)
{
+ int ret;
+
/* disable power saving hw function */
- vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0,
- 0, 0, NULL);
+ ret = vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0,
+ 0, 0, NULL);
+ if (ret)
+ return ret;
/* clear AutoSleep */
vnt_mac_reg_bits_off(priv, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* set always listen beacon */
vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
+
+ return 0;
}
/*
diff --git a/drivers/staging/vt6656/power.h b/drivers/staging/vt6656/power.h
index 58755ae16e5a..160872026db3 100644
--- a/drivers/staging/vt6656/power.h
+++ b/drivers/staging/vt6656/power.h
@@ -18,7 +18,7 @@
#define C_PWBT 1000 /* micro sec. power up before TBTT */
-void vnt_disable_power_saving(struct vnt_private *priv);
+int vnt_disable_power_saving(struct vnt_private *priv);
void vnt_enable_power_saving(struct vnt_private *priv, u16 listen_interval);
int vnt_next_tbtt_wakeup(struct vnt_private *priv);
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 43237b7e1dbe..5b8da06e3916 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -21,22 +21,16 @@
*
*/
+#include <linux/errno.h>
#include "mac.h"
#include "rf.h"
#include "baseband.h"
#include "usbpipe.h"
#define CB_AL2230_INIT_SEQ 15
-#define AL2230_PWR_IDX_LEN 64
-
#define CB_AL7230_INIT_SEQ 16
-#define AL7230_PWR_IDX_LEN 64
-
#define CB_VT3226_INIT_SEQ 11
-#define VT3226_PWR_IDX_LEN 64
-
#define CB_VT3342_INIT_SEQ 13
-#define VT3342_PWR_IDX_LEN 64
static u8 al2230_init_table[CB_AL2230_INIT_SEQ][3] = {
{0x03, 0xf7, 0x90},
@@ -518,72 +512,45 @@ static u8 vt3342_channel_table1[CB_MAX_CHANNEL][3] = {
{0x03, 0x00, 0x04}
};
-/* Power Table */
-static const u32 al2230_power_table[AL2230_PWR_IDX_LEN] = {
- 0x04040900,
- 0x04041900,
- 0x04042900,
- 0x04043900,
- 0x04044900,
- 0x04045900,
- 0x04046900,
- 0x04047900,
- 0x04048900,
- 0x04049900,
- 0x0404a900,
- 0x0404b900,
- 0x0404c900,
- 0x0404d900,
- 0x0404e900,
- 0x0404f900,
- 0x04050900,
- 0x04051900,
- 0x04052900,
- 0x04053900,
- 0x04054900,
- 0x04055900,
- 0x04056900,
- 0x04057900,
- 0x04058900,
- 0x04059900,
- 0x0405a900,
- 0x0405b900,
- 0x0405c900,
- 0x0405d900,
- 0x0405e900,
- 0x0405f900,
- 0x04060900,
- 0x04061900,
- 0x04062900,
- 0x04063900,
- 0x04064900,
- 0x04065900,
- 0x04066900,
- 0x04067900,
- 0x04068900,
- 0x04069900,
- 0x0406a900,
- 0x0406b900,
- 0x0406c900,
- 0x0406d900,
- 0x0406e900,
- 0x0406f900,
- 0x04070900,
- 0x04071900,
- 0x04072900,
- 0x04073900,
- 0x04074900,
- 0x04075900,
- 0x04076900,
- 0x04077900,
- 0x04078900,
- 0x04079900,
- 0x0407a900,
- 0x0407b900,
- 0x0407c900,
- 0x0407d900,
- 0x0407e900,
- 0x0407f900
+enum {
+ VNT_TABLE_INIT = 0,
+ VNT_TABLE_INIT_2 = 0,
+ VNT_TABLE_0 = 1,
+ VNT_TABLE_1 = 2,
+ VNT_TABLE_2 = 1
+};
+
+struct vnt_table_info {
+ u8 *addr;
+ int length;
+};
+
+static const struct vnt_table_info vnt_table_seq[][3] = {
+ { /* RF_AL2230, RF_AL2230S init table, channel table 0 and 1 */
+ {&al2230_init_table[0][0], CB_AL2230_INIT_SEQ * 3},
+ {&al2230_channel_table0[0][0], CB_MAX_CHANNEL_24G * 3},
+ {&al2230_channel_table1[0][0], CB_MAX_CHANNEL_24G * 3}
+ }, { /* RF_AIROHA7230 init table, channel table 0 and 1 */
+ {&al7230_init_table[0][0], CB_AL7230_INIT_SEQ * 3},
+ {&al7230_channel_table0[0][0], CB_MAX_CHANNEL * 3},
+ {&al7230_channel_table1[0][0], CB_MAX_CHANNEL * 3}
+ }, { /* RF_VT3226 init table, channel table 0 and 1 */
+ {&vt3226_init_table[0][0], CB_VT3226_INIT_SEQ * 3},
+ {&vt3226_channel_table0[0][0], CB_MAX_CHANNEL_24G * 3},
+ {&vt3226_channel_table1[0][0], CB_MAX_CHANNEL_24G * 3}
+ }, { /* RF_VT3226D0 init table, channel table 0 and 1 */
+ {&vt3226d0_init_table[0][0], CB_VT3226_INIT_SEQ * 3},
+ {&vt3226_channel_table0[0][0], CB_MAX_CHANNEL_24G * 3},
+ {&vt3226_channel_table1[0][0], CB_MAX_CHANNEL_24G * 3}
+ }, { /* RF_VT3342A0 init table, channel table 0 and 1 */
+ {&vt3342a0_init_table[0][0], CB_VT3342_INIT_SEQ * 3},
+ {&vt3342_channel_table0[0][0], CB_MAX_CHANNEL * 3},
+ {&vt3342_channel_table1[0][0], CB_MAX_CHANNEL * 3}
+ }, { /* RF_AIROHA7230 init table 2 and channel table 2 */
+ {&al7230_init_table_amode[0][0], CB_AL7230_INIT_SEQ * 3},
+ {&al7230_channel_table2[0][0], CB_MAX_CHANNEL * 3},
+ {NULL, 0}
+ }
};
/*
@@ -600,124 +567,90 @@ int vnt_rf_write_embedded(struct vnt_private *priv, u32 data)
reg_data[2] = (u8)(data >> 16);
reg_data[3] = (u8)(data >> 24);
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_IFRF,
- 0, 0, ARRAY_SIZE(reg_data), reg_data);
-
- return true;
-}
-
-/* Set Tx power by rate and channel number */
-int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel)
-{
- u8 power = priv->cck_pwr;
-
- if (channel == 0)
- return -EINVAL;
-
- switch (rate) {
- case RATE_1M:
- case RATE_2M:
- case RATE_5M:
- case RATE_11M:
- channel--;
-
- if (channel < sizeof(priv->cck_pwr_tbl))
- power = priv->cck_pwr_tbl[channel];
- break;
- case RATE_6M:
- case RATE_9M:
- case RATE_12M:
- case RATE_18M:
- case RATE_24M:
- case RATE_36M:
- case RATE_48M:
- case RATE_54M:
- if (channel > CB_MAX_CHANNEL_24G)
- power = priv->ofdm_a_pwr_tbl[channel - 15];
- else
- power = priv->ofdm_pwr_tbl[channel - 1];
- break;
- }
-
- return vnt_rf_set_txpower(priv, power, rate);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_IFRF, 0, 0,
+ ARRAY_SIZE(reg_data), reg_data);
}
static u8 vnt_rf_addpower(struct vnt_private *priv)
{
+ int base;
s32 rssi = -priv->current_rssi;
if (!rssi)
return 7;
- if (priv->rf_type == RF_VT3226D0) {
- if (rssi < -70)
- return 9;
- else if (rssi < -65)
- return 7;
- else if (rssi < -60)
- return 5;
- } else {
- if (rssi < -80)
- return 9;
- else if (rssi < -75)
- return 7;
- else if (rssi < -70)
- return 5;
- }
+ if (priv->rf_type == RF_VT3226D0)
+ base = -60;
+ else
+ base = -70;
+
+ if (rssi < base)
+ return ((rssi - base + 1) / -5) * 2 + 5;
return 0;
}
/* Set Tx power by power level and rate */
-int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
+static int vnt_rf_set_txpower(struct vnt_private *priv, u8 power,
+ struct ieee80211_channel *ch)
{
u32 power_setting = 0;
- int ret = true;
+ int ret = 0;
power += vnt_rf_addpower(priv);
if (power > VNT_RF_MAX_POWER)
power = VNT_RF_MAX_POWER;
if (priv->power == power)
- return true;
+ return 0;
priv->power = power;
switch (priv->rf_type) {
case RF_AL2230:
- if (power >= AL2230_PWR_IDX_LEN)
- return false;
+ power_setting = 0x0404090 | (power << 12);
- ret &= vnt_rf_write_embedded(priv, al2230_power_table[power]);
+ ret = vnt_rf_write_embedded(priv, power_setting);
+ if (ret)
+ return ret;
- if (rate <= RATE_11M)
- ret &= vnt_rf_write_embedded(priv, 0x0001b400);
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM)
+ ret = vnt_rf_write_embedded(priv, 0x0001b400);
else
- ret &= vnt_rf_write_embedded(priv, 0x0005a400);
+ ret = vnt_rf_write_embedded(priv, 0x0005a400);
+
break;
case RF_AL2230S:
- if (power >= AL2230_PWR_IDX_LEN)
- return false;
+ power_setting = 0x0404090 | (power << 12);
- ret &= vnt_rf_write_embedded(priv, al2230_power_table[power]);
+ ret = vnt_rf_write_embedded(priv, power_setting);
+ if (ret)
+ return ret;
- if (rate <= RATE_11M) {
- ret &= vnt_rf_write_embedded(priv, 0x040c1400);
- ret &= vnt_rf_write_embedded(priv, 0x00299b00);
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM) {
+ ret = vnt_rf_write_embedded(priv, 0x040c1400);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x00299b00);
} else {
- ret &= vnt_rf_write_embedded(priv, 0x0005a400);
- ret &= vnt_rf_write_embedded(priv, 0x00099b00);
+ ret = vnt_rf_write_embedded(priv, 0x0005a400);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x00099b00);
}
+
break;
case RF_AIROHA7230:
- if (rate <= RATE_11M)
- ret &= vnt_rf_write_embedded(priv, 0x111bb900);
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM)
+ ret = vnt_rf_write_embedded(priv, 0x111bb900);
else
- ret &= vnt_rf_write_embedded(priv, 0x221bb900);
+ ret = vnt_rf_write_embedded(priv, 0x221bb900);
- if (power >= AL7230_PWR_IDX_LEN)
- return false;
+ if (ret)
+ return ret;
/*
* 0x080F1B00 for 3 wire control TxGain(D10)
@@ -725,61 +658,68 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
*/
power_setting = 0x080c0b00 | (power << 12);
- ret &= vnt_rf_write_embedded(priv, power_setting);
-
+ ret = vnt_rf_write_embedded(priv, power_setting);
break;
case RF_VT3226:
- if (power >= VT3226_PWR_IDX_LEN)
- return false;
power_setting = ((0x3f - power) << 20) | (0x17 << 8);
- ret &= vnt_rf_write_embedded(priv, power_setting);
-
+ ret = vnt_rf_write_embedded(priv, power_setting);
break;
case RF_VT3226D0:
- if (power >= VT3226_PWR_IDX_LEN)
- return false;
-
- if (rate <= RATE_11M) {
- u16 hw_value = priv->hw->conf.chandef.chan->hw_value;
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM) {
+ u16 hw_value = ch->hw_value;
power_setting = ((0x3f - power) << 20) | (0xe07 << 8);
- ret &= vnt_rf_write_embedded(priv, power_setting);
- ret &= vnt_rf_write_embedded(priv, 0x03c6a200);
+ ret = vnt_rf_write_embedded(priv, power_setting);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x03c6a200);
+ if (ret)
+ return ret;
dev_dbg(&priv->usb->dev,
"%s 11b channel [%d]\n", __func__, hw_value);
hw_value--;
- if (hw_value < ARRAY_SIZE(vt3226d0_lo_current_table))
- ret &= vnt_rf_write_embedded(priv,
+ if (hw_value < ARRAY_SIZE(vt3226d0_lo_current_table)) {
+ ret = vnt_rf_write_embedded(priv,
vt3226d0_lo_current_table[hw_value]);
+ if (ret)
+ return ret;
+ }
- ret &= vnt_rf_write_embedded(priv, 0x015C0800);
+ ret = vnt_rf_write_embedded(priv, 0x015C0800);
} else {
dev_dbg(&priv->usb->dev,
"@@@@ %s> 11G mode\n", __func__);
power_setting = ((0x3f - power) << 20) | (0x7 << 8);
- ret &= vnt_rf_write_embedded(priv, power_setting);
- ret &= vnt_rf_write_embedded(priv, 0x00C6A200);
- ret &= vnt_rf_write_embedded(priv, 0x016BC600);
- ret &= vnt_rf_write_embedded(priv, 0x00900800);
+ ret = vnt_rf_write_embedded(priv, power_setting);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x00C6A200);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x016BC600);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x00900800);
}
+
break;
case RF_VT3342A0:
- if (power >= VT3342_PWR_IDX_LEN)
- return false;
-
power_setting = ((0x3f - power) << 20) | (0x27 << 8);
- ret &= vnt_rf_write_embedded(priv, power_setting);
-
+ ret = vnt_rf_write_embedded(priv, power_setting);
break;
default:
break;
@@ -787,6 +727,36 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
return ret;
}
+/* Set Tx power by channel number type */
+int vnt_rf_setpower(struct vnt_private *priv,
+ struct ieee80211_channel *ch)
+{
+ u16 channel;
+ u8 power = priv->cck_pwr;
+
+ if (!ch)
+ return -EINVAL;
+
+ /* set channel number to array number */
+ channel = ch->hw_value - 1;
+
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM) {
+ if (channel < ARRAY_SIZE(priv->cck_pwr_tbl))
+ power = priv->cck_pwr_tbl[channel];
+ } else if (ch->band == NL80211_BAND_5GHZ) {
+ /* remove 14 channels to array size */
+ channel -= 14;
+
+ if (channel < ARRAY_SIZE(priv->ofdm_a_pwr_tbl))
+ power = priv->ofdm_a_pwr_tbl[channel];
+ } else {
+ if (channel < ARRAY_SIZE(priv->ofdm_pwr_tbl))
+ power = priv->ofdm_pwr_tbl[channel];
+ }
+
+ return vnt_rf_set_txpower(priv, power, ch);
+}
+
/* Convert rssi to dbm */
void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm)
{
@@ -813,140 +783,73 @@ void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm)
int vnt_rf_table_download(struct vnt_private *priv)
{
- int ret = 0;
- u16 length1 = 0, length2 = 0, length3 = 0;
- u8 *addr1 = NULL, *addr2 = NULL, *addr3 = NULL;
- u16 length, value;
- u8 array[256];
+ int ret;
+ int idx = -1;
+ const struct vnt_table_info *table_seq;
switch (priv->rf_type) {
case RF_AL2230:
case RF_AL2230S:
- length1 = CB_AL2230_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL_24G * 3;
- length3 = CB_MAX_CHANNEL_24G * 3;
- addr1 = &al2230_init_table[0][0];
- addr2 = &al2230_channel_table0[0][0];
- addr3 = &al2230_channel_table1[0][0];
+ idx = 0;
break;
case RF_AIROHA7230:
- length1 = CB_AL7230_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL * 3;
- length3 = CB_MAX_CHANNEL * 3;
- addr1 = &al7230_init_table[0][0];
- addr2 = &al7230_channel_table0[0][0];
- addr3 = &al7230_channel_table1[0][0];
+ idx = 1;
break;
case RF_VT3226:
- length1 = CB_VT3226_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL_24G * 3;
- length3 = CB_MAX_CHANNEL_24G * 3;
- addr1 = &vt3226_init_table[0][0];
- addr2 = &vt3226_channel_table0[0][0];
- addr3 = &vt3226_channel_table1[0][0];
+ idx = 2;
break;
case RF_VT3226D0:
- length1 = CB_VT3226_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL_24G * 3;
- length3 = CB_MAX_CHANNEL_24G * 3;
- addr1 = &vt3226d0_init_table[0][0];
- addr2 = &vt3226_channel_table0[0][0];
- addr3 = &vt3226_channel_table1[0][0];
+ idx = 3;
break;
case RF_VT3342A0:
- length1 = CB_VT3342_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL * 3;
- length3 = CB_MAX_CHANNEL * 3;
- addr1 = &vt3342a0_init_table[0][0];
- addr2 = &vt3342_channel_table0[0][0];
- addr3 = &vt3342_channel_table1[0][0];
+ idx = 4;
break;
}
- /* Init Table */
- memcpy(array, addr1, length1);
+ if (idx < 0)
+ return 0;
+ table_seq = &vnt_table_seq[idx][0];
+
+ /* Init Table */
ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_RF_INIT, length1, array);
+ MESSAGE_REQUEST_RF_INIT,
+ table_seq[VNT_TABLE_INIT].length,
+ table_seq[VNT_TABLE_INIT].addr);
if (ret)
- goto end;
+ return ret;
/* Channel Table 0 */
- value = 0;
- while (length2 > 0) {
- if (length2 >= 64)
- length = 64;
- else
- length = length2;
-
- memcpy(array, addr2, length);
-
- ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, value,
- MESSAGE_REQUEST_RF_CH0, length, array);
- if (ret)
- goto end;
-
- length2 -= length;
- value += length;
- addr2 += length;
- }
-
- /* Channel table 1 */
- value = 0;
- while (length3 > 0) {
- if (length3 >= 64)
- length = 64;
- else
- length = length3;
-
- memcpy(array, addr3, length);
+ ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+ MESSAGE_REQUEST_RF_CH0,
+ table_seq[VNT_TABLE_0].length,
+ table_seq[VNT_TABLE_0].addr);
+ if (ret)
+ return ret;
- ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, value,
- MESSAGE_REQUEST_RF_CH1, length, array);
- if (ret)
- goto end;
-
- length3 -= length;
- value += length;
- addr3 += length;
- }
+ /* Channel Table 1 */
+ ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+ MESSAGE_REQUEST_RF_CH1,
+ table_seq[VNT_TABLE_1].length,
+ table_seq[VNT_TABLE_1].addr);
if (priv->rf_type == RF_AIROHA7230) {
- length1 = CB_AL7230_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL * 3;
- addr1 = &al7230_init_table_amode[0][0];
- addr2 = &al7230_channel_table2[0][0];
-
- memcpy(array, addr1, length1);
+ table_seq = &vnt_table_seq[5][0];
/* Init Table 2 */
ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_RF_INIT2, length1, array);
+ MESSAGE_REQUEST_RF_INIT2,
+ table_seq[VNT_TABLE_INIT_2].length,
+ table_seq[VNT_TABLE_INIT_2].addr);
if (ret)
- goto end;
+ return ret;
- /* Channel Table 0 */
- value = 0;
- while (length2 > 0) {
- if (length2 >= 64)
- length = 64;
- else
- length = length2;
-
- memcpy(array, addr2, length);
-
- ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, value,
- MESSAGE_REQUEST_RF_CH2, length,
- array);
- if (ret)
- goto end;
-
- length2 -= length;
- value += length;
- addr2 += length;
- }
+ /* Channel Table 2 */
+ ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+ MESSAGE_REQUEST_RF_CH2,
+ table_seq[VNT_TABLE_2].length,
+ table_seq[VNT_TABLE_2].addr);
}
-end:
return ret;
}
diff --git a/drivers/staging/vt6656/rf.h b/drivers/staging/vt6656/rf.h
index 7494546d71b8..493faaf4e2b5 100644
--- a/drivers/staging/vt6656/rf.h
+++ b/drivers/staging/vt6656/rf.h
@@ -41,8 +41,7 @@
#define VNT_RF_REG_LEN 0x17 /* 24 bit length */
int vnt_rf_write_embedded(struct vnt_private *priv, u32 data);
-int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel);
-int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate);
+int vnt_rf_setpower(struct vnt_private *priv, struct ieee80211_channel *ch);
void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm);
int vnt_rf_table_download(struct vnt_private *priv);
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 9439d190f431..5dd6b4d2bf20 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -13,8 +13,6 @@
*
* Functions:
* vnt_generate_tx_parameter - Generate tx dma required parameter.
- * vnt_get_rtscts_duration_le- get rtx/cts required duration
- * vnt_get_rtscts_rsvtime_le- get rts/cts reserved time
* vnt_get_rsvtime- get frame reserved time
* vnt_fill_cts_head- fulfill CTS ctl header
*
@@ -38,13 +36,24 @@ static const u16 vnt_time_stampoff[2][MAX_RATE] = {
{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23},
};
-#define RTSDUR_BB 0
-#define RTSDUR_BA 1
-#define RTSDUR_AA 2
-#define CTSDUR_BA 3
#define DATADUR_B 10
#define DATADUR_A 11
+static const u8 vnt_phy_signal[] = {
+ 0x00, /* RATE_1M */
+ 0x01, /* RATE_2M */
+ 0x02, /* RATE_5M */
+ 0x03, /* RATE_11M */
+ 0x8b, /* RATE_6M */
+ 0x8f, /* RATE_9M */
+ 0x8a, /* RATE_12M */
+ 0x8e, /* RATE_18M */
+ 0x89, /* RATE_24M */
+ 0x8d, /* RATE_36M */
+ 0x88, /* RATE_48M */
+ 0x8c /* RATE_54M */
+};
+
static struct vnt_usb_send_context
*vnt_get_free_context(struct vnt_private *priv)
{
@@ -60,11 +69,6 @@ static struct vnt_usb_send_context
context = priv->tx_context[ii];
if (!context->in_use) {
context->in_use = true;
- memset(context->data, 0,
- MAX_TOTAL_SIZE_WITH_ALL_HEADERS);
-
- context->hdr = NULL;
-
return context;
}
}
@@ -78,146 +82,105 @@ static struct vnt_usb_send_context
return NULL;
}
-static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
+/* Get Length, Service, and Signal fields of Phy for Tx */
+static void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
+ u16 tx_rate, u8 pkt_type,
+ struct vnt_phy_field *phy)
{
- return cpu_to_le16(vnt_time_stampoff[priv->preamble_type % 2]
- [rate % MAX_RATE]);
-}
+ u32 bit_count;
+ u32 count = 0;
+ u32 tmp;
+ int ext_bit;
+ int i;
+ u8 mask = 0;
+ u8 preamble_type = priv->preamble_type;
+
+ bit_count = frame_length * 8;
+ ext_bit = false;
+
+ switch (tx_rate) {
+ case RATE_1M:
+ count = bit_count;
+ break;
+ case RATE_2M:
+ count = bit_count / 2;
+ break;
+ case RATE_5M:
+ count = DIV_ROUND_UP(bit_count * 10, 55);
+ break;
+ case RATE_11M:
+ count = bit_count / 11;
+ tmp = count * 11;
-static u32 vnt_get_rsvtime(struct vnt_private *priv, u8 pkt_type,
- u32 frame_length, u16 rate, int need_ack)
-{
- u32 data_time, ack_time;
+ if (tmp != bit_count) {
+ count++;
- data_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- frame_length, rate);
+ if ((bit_count - tmp) <= 3)
+ ext_bit = true;
+ }
+
+ break;
+ }
- if (pkt_type == PK_TYPE_11B)
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
- (u16)priv->top_cck_basic_rate);
- else
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
- (u16)priv->top_ofdm_basic_rate);
+ if (tx_rate > RATE_11M) {
+ if (pkt_type == PK_TYPE_11A)
+ mask = BIT(4);
+ } else if (tx_rate > RATE_1M) {
+ if (preamble_type == PREAMBLE_SHORT)
+ mask = BIT(3);
+ }
- if (need_ack)
- return data_time + priv->sifs + ack_time;
+ i = tx_rate > RATE_54M ? RATE_54M : tx_rate;
+ phy->signal = vnt_phy_signal[i] | mask;
+ phy->service = 0x00;
- return data_time;
+ if (pkt_type == PK_TYPE_11B) {
+ if (ext_bit)
+ phy->service |= 0x80;
+ phy->len = cpu_to_le16((u16)count);
+ } else {
+ phy->len = cpu_to_le16((u16)frame_length);
+ }
}
-static __le16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
- u32 frame_length, u16 rate, int need_ack)
+static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
{
- return cpu_to_le16((u16)vnt_get_rsvtime(priv, pkt_type,
- frame_length, rate, need_ack));
+ return cpu_to_le16(vnt_time_stampoff[priv->preamble_type % 2]
+ [rate % MAX_RATE]);
}
-static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv, u8 rsv_type,
- u8 pkt_type, u32 frame_length,
- u16 current_rate)
+static __le16 vnt_rxtx_rsvtime_le16(struct vnt_usb_send_context *context)
{
- u32 rrv_time, rts_time, cts_time, ack_time, data_time;
-
- rrv_time = 0;
- rts_time = 0;
- cts_time = 0;
- ack_time = 0;
-
- data_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- frame_length, current_rate);
-
- if (rsv_type == 0) {
- rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 20, priv->top_cck_basic_rate);
- ack_time = vnt_get_frame_time(priv->preamble_type,
- pkt_type, 14,
- priv->top_cck_basic_rate);
- cts_time = ack_time;
-
- } else if (rsv_type == 1) {
- rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 20, priv->top_cck_basic_rate);
- cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_cck_basic_rate);
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_ofdm_basic_rate);
- } else if (rsv_type == 2) {
- rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 20, priv->top_ofdm_basic_rate);
- ack_time = vnt_get_frame_time(priv->preamble_type,
- pkt_type, 14,
- priv->top_ofdm_basic_rate);
- cts_time = ack_time;
-
- } else if (rsv_type == 3) {
- cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_cck_basic_rate);
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_ofdm_basic_rate);
-
- rrv_time = cts_time + ack_time + data_time + 2 * priv->sifs;
-
- return cpu_to_le16((u16)rrv_time);
- }
-
- rrv_time = rts_time + cts_time + ack_time + data_time + 3 * priv->sifs;
+ struct vnt_private *priv = context->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(context->skb);
+ struct ieee80211_rate *rate = ieee80211_get_tx_rate(priv->hw, info);
- return cpu_to_le16((u16)rrv_time);
+ return ieee80211_generic_frame_duration(priv->hw,
+ info->control.vif, info->band,
+ context->frame_len,
+ rate);
}
-static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
- u8 dur_type, u8 pkt_type, u16 rate)
+static __le16 vnt_get_rts_duration(struct vnt_usb_send_context *context)
{
struct vnt_private *priv = context->priv;
- u32 cts_time = 0, dur_time = 0;
- u32 frame_length = context->frame_len;
- u8 need_ack = context->need_ack;
-
- switch (dur_type) {
- case RTSDUR_BB:
- case RTSDUR_BA:
- cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_cck_basic_rate);
- dur_time = cts_time + 2 * priv->sifs +
- vnt_get_rsvtime(priv, pkt_type,
- frame_length, rate, need_ack);
- break;
-
- case RTSDUR_AA:
- cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_ofdm_basic_rate);
- dur_time = cts_time + 2 * priv->sifs +
- vnt_get_rsvtime(priv, pkt_type,
- frame_length, rate, need_ack);
- break;
-
- case CTSDUR_BA:
- dur_time = priv->sifs + vnt_get_rsvtime(priv,
- pkt_type, frame_length, rate, need_ack);
- break;
-
- default:
- break;
- }
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(context->skb);
- return cpu_to_le16((u16)dur_time);
+ return ieee80211_rts_duration(priv->hw, priv->vif,
+ context->frame_len, info);
}
-static u16 vnt_mac_hdr_pos(struct vnt_usb_send_context *tx_context,
- struct ieee80211_hdr *hdr)
+static __le16 vnt_get_cts_duration(struct vnt_usb_send_context *context)
{
- u8 *head = tx_context->data + offsetof(struct vnt_tx_buffer, fifo_head);
- u8 *hdr_pos = (u8 *)hdr;
-
- tx_context->hdr = hdr;
- if (!tx_context->hdr)
- return 0;
+ struct vnt_private *priv = context->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(context->skb);
- return (u16)(hdr_pos - head);
+ return ieee80211_ctstoself_duration(priv->hw, priv->vif,
+ context->frame_len, info);
}
-static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
- struct vnt_tx_datahead_g *buf)
+static void vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
+ struct vnt_tx_datahead_g *buf)
{
struct vnt_private *priv = tx_context->priv;
struct ieee80211_hdr *hdr =
@@ -236,14 +199,10 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
buf->time_stamp_off_b = vnt_time_stamp_off(priv,
priv->top_cck_basic_rate);
-
- tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr);
-
- return le16_to_cpu(buf->duration_a);
}
-static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
- struct vnt_tx_datahead_ab *buf)
+static void vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
+ struct vnt_tx_datahead_ab *buf)
{
struct vnt_private *priv = tx_context->priv;
struct ieee80211_hdr *hdr =
@@ -258,14 +217,10 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
/* Get Duration and TimeStampOff */
buf->duration = hdr->duration_id;
buf->time_stamp_off = vnt_time_stamp_off(priv, rate);
-
- tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr);
-
- return le16_to_cpu(buf->duration);
}
-static int vnt_fill_ieee80211_rts(struct vnt_usb_send_context *tx_context,
- struct ieee80211_rts *rts, __le16 duration)
+static void vnt_fill_ieee80211_rts(struct vnt_usb_send_context *tx_context,
+ struct ieee80211_rts *rts, __le16 duration)
{
struct ieee80211_hdr *hdr =
(struct ieee80211_hdr *)tx_context->skb->data;
@@ -276,72 +231,56 @@ static int vnt_fill_ieee80211_rts(struct vnt_usb_send_context *tx_context,
ether_addr_copy(rts->ra, hdr->addr1);
ether_addr_copy(rts->ta, hdr->addr2);
-
- return 0;
}
-static u16 vnt_rxtx_rts_g_head(struct vnt_usb_send_context *tx_context,
- struct vnt_rts_g *buf)
+static void vnt_rxtx_rts_g_head(struct vnt_usb_send_context *tx_context,
+ struct vnt_rts_g *buf)
{
struct vnt_private *priv = tx_context->priv;
u16 rts_frame_len = 20;
- u16 current_rate = tx_context->tx_rate;
vnt_get_phy_field(priv, rts_frame_len, priv->top_cck_basic_rate,
PK_TYPE_11B, &buf->b);
vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate,
tx_context->pkt_type, &buf->a);
- buf->duration_bb = vnt_get_rtscts_duration_le(tx_context, RTSDUR_BB,
- PK_TYPE_11B,
- priv->top_cck_basic_rate);
- buf->duration_aa = vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA,
- tx_context->pkt_type,
- current_rate);
- buf->duration_ba = vnt_get_rtscts_duration_le(tx_context, RTSDUR_BA,
- tx_context->pkt_type,
- current_rate);
+ buf->duration_bb = vnt_get_rts_duration(tx_context);
+ buf->duration_aa = buf->duration_bb;
+ buf->duration_ba = buf->duration_bb;
vnt_fill_ieee80211_rts(tx_context, &buf->data, buf->duration_aa);
- return vnt_rxtx_datahead_g(tx_context, &buf->data_head);
+ vnt_rxtx_datahead_g(tx_context, &buf->data_head);
}
-static u16 vnt_rxtx_rts_ab_head(struct vnt_usb_send_context *tx_context,
- struct vnt_rts_ab *buf)
+static void vnt_rxtx_rts_ab_head(struct vnt_usb_send_context *tx_context,
+ struct vnt_rts_ab *buf)
{
struct vnt_private *priv = tx_context->priv;
- u16 current_rate = tx_context->tx_rate;
u16 rts_frame_len = 20;
vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate,
tx_context->pkt_type, &buf->ab);
- buf->duration = vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA,
- tx_context->pkt_type,
- current_rate);
+ buf->duration = vnt_get_rts_duration(tx_context);
vnt_fill_ieee80211_rts(tx_context, &buf->data, buf->duration);
- return vnt_rxtx_datahead_ab(tx_context, &buf->data_head);
+ vnt_rxtx_datahead_ab(tx_context, &buf->data_head);
}
-static u16 vnt_fill_cts_head(struct vnt_usb_send_context *tx_context,
- union vnt_tx_data_head *head)
+static void vnt_fill_cts_head(struct vnt_usb_send_context *tx_context,
+ union vnt_tx_data_head *head)
{
struct vnt_private *priv = tx_context->priv;
struct vnt_cts *buf = &head->cts_g;
u32 cts_frame_len = 14;
- u16 current_rate = tx_context->tx_rate;
/* Get SignalField,ServiceField,Length */
vnt_get_phy_field(priv, cts_frame_len, priv->top_cck_basic_rate,
PK_TYPE_11B, &buf->b);
/* Get CTSDuration_ba */
- buf->duration_ba =
- vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA,
- tx_context->pkt_type,
- current_rate);
+ buf->duration_ba = vnt_get_cts_duration(tx_context);
/*Get CTS Frame body*/
buf->data.duration = buf->duration_ba;
buf->data.frame_control =
@@ -349,127 +288,19 @@ static u16 vnt_fill_cts_head(struct vnt_usb_send_context *tx_context,
ether_addr_copy(buf->data.ra, priv->current_net_addr);
- return vnt_rxtx_datahead_g(tx_context, &buf->data_head);
+ vnt_rxtx_datahead_g(tx_context, &buf->data_head);
}
-static u16 vnt_rxtx_rts(struct vnt_usb_send_context *tx_context,
- union vnt_tx_head *tx_head, bool need_mic)
+/* returns true if mic_hdr is needed */
+static bool vnt_fill_txkey(struct vnt_tx_buffer *tx_buffer, struct sk_buff *skb)
{
- struct vnt_private *priv = tx_context->priv;
- struct vnt_rrv_time_rts *buf = &tx_head->tx_rts.rts;
- union vnt_tx_data_head *head = &tx_head->tx_rts.tx.head;
- u32 frame_len = tx_context->frame_len;
- u16 current_rate = tx_context->tx_rate;
- u8 need_ack = tx_context->need_ack;
-
- buf->rts_rrv_time_aa = vnt_get_rtscts_rsvtime_le(priv, 2,
- tx_context->pkt_type, frame_len, current_rate);
- buf->rts_rrv_time_ba = vnt_get_rtscts_rsvtime_le(priv, 1,
- tx_context->pkt_type, frame_len, current_rate);
- buf->rts_rrv_time_bb = vnt_get_rtscts_rsvtime_le(priv, 0,
- tx_context->pkt_type, frame_len, current_rate);
-
- buf->rrv_time_a = vnt_rxtx_rsvtime_le16(priv, tx_context->pkt_type,
- frame_len, current_rate,
- need_ack);
- buf->rrv_time_b = vnt_rxtx_rsvtime_le16(priv, PK_TYPE_11B, frame_len,
- priv->top_cck_basic_rate, need_ack);
-
- if (need_mic)
- head = &tx_head->tx_rts.tx.mic.head;
-
- return vnt_rxtx_rts_g_head(tx_context, &head->rts_g);
-}
-
-static u16 vnt_rxtx_cts(struct vnt_usb_send_context *tx_context,
- union vnt_tx_head *tx_head, bool need_mic)
-{
- struct vnt_private *priv = tx_context->priv;
- struct vnt_rrv_time_cts *buf = &tx_head->tx_cts.cts;
- union vnt_tx_data_head *head = &tx_head->tx_cts.tx.head;
- u32 frame_len = tx_context->frame_len;
- u16 current_rate = tx_context->tx_rate;
- u8 need_ack = tx_context->need_ack;
-
- buf->rrv_time_a = vnt_rxtx_rsvtime_le16(priv, tx_context->pkt_type,
- frame_len, current_rate, need_ack);
- buf->rrv_time_b = vnt_rxtx_rsvtime_le16(priv, PK_TYPE_11B,
- frame_len, priv->top_cck_basic_rate, need_ack);
-
- buf->cts_rrv_time_ba = vnt_get_rtscts_rsvtime_le(priv, 3,
- tx_context->pkt_type, frame_len, current_rate);
-
- if (need_mic)
- head = &tx_head->tx_cts.tx.mic.head;
-
- return vnt_fill_cts_head(tx_context, head);
-}
-
-static u16 vnt_rxtx_ab(struct vnt_usb_send_context *tx_context,
- union vnt_tx_head *tx_head, bool need_rts, bool need_mic)
-{
- struct vnt_private *priv = tx_context->priv;
- struct vnt_rrv_time_ab *buf = &tx_head->tx_ab.ab;
- union vnt_tx_data_head *head = &tx_head->tx_ab.tx.head;
- u32 frame_len = tx_context->frame_len;
- u16 current_rate = tx_context->tx_rate;
- u8 need_ack = tx_context->need_ack;
-
- buf->rrv_time = vnt_rxtx_rsvtime_le16(priv, tx_context->pkt_type,
- frame_len, current_rate, need_ack);
-
- if (need_mic)
- head = &tx_head->tx_ab.tx.mic.head;
-
- if (need_rts) {
- if (tx_context->pkt_type == PK_TYPE_11B)
- buf->rts_rrv_time = vnt_get_rtscts_rsvtime_le(priv, 0,
- tx_context->pkt_type, frame_len, current_rate);
- else /* PK_TYPE_11A */
- buf->rts_rrv_time = vnt_get_rtscts_rsvtime_le(priv, 2,
- tx_context->pkt_type, frame_len, current_rate);
-
- return vnt_rxtx_rts_ab_head(tx_context, &head->rts_ab);
- }
-
- return vnt_rxtx_datahead_ab(tx_context, &head->data_head_ab);
-}
-
-static u16 vnt_generate_tx_parameter(struct vnt_usb_send_context *tx_context,
- struct vnt_tx_buffer *tx_buffer,
- struct vnt_mic_hdr **mic_hdr, u32 need_mic,
- bool need_rts)
-{
- if (tx_context->pkt_type == PK_TYPE_11GB ||
- tx_context->pkt_type == PK_TYPE_11GA) {
- if (need_rts) {
- if (need_mic)
- *mic_hdr =
- &tx_buffer->tx_head.tx_rts.tx.mic.hdr;
-
- return vnt_rxtx_rts(tx_context, &tx_buffer->tx_head,
- need_mic);
- }
-
- if (need_mic)
- *mic_hdr = &tx_buffer->tx_head.tx_cts.tx.mic.hdr;
-
- return vnt_rxtx_cts(tx_context, &tx_buffer->tx_head, need_mic);
- }
-
- if (need_mic)
- *mic_hdr = &tx_buffer->tx_head.tx_ab.tx.mic.hdr;
-
- return vnt_rxtx_ab(tx_context, &tx_buffer->tx_head, need_rts, need_mic);
-}
-
-static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
- u8 *key_buffer, struct ieee80211_key_conf *tx_key,
- struct sk_buff *skb, u16 payload_len,
- struct vnt_mic_hdr *mic_hdr)
-{
- struct ieee80211_hdr *hdr = tx_context->hdr;
+ struct vnt_tx_fifo_head *fifo = &tx_buffer->fifo_head;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *tx_key = info->control.hw_key;
+ struct vnt_mic_hdr *mic_hdr;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u64 pn64;
+ u16 payload_len = skb->len;
u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
/* strip header and icv len from payload */
@@ -479,24 +310,31 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
switch (tx_key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- memcpy(key_buffer, iv, 3);
- memcpy(key_buffer + 3, tx_key->key, tx_key->keylen);
+ memcpy(fifo->tx_key, iv, 3);
+ memcpy(fifo->tx_key + 3, tx_key->key, tx_key->keylen);
if (tx_key->keylen == WLAN_KEY_LEN_WEP40) {
- memcpy(key_buffer + 8, iv, 3);
- memcpy(key_buffer + 11,
+ memcpy(fifo->tx_key + 8, iv, 3);
+ memcpy(fifo->tx_key + 11,
tx_key->key, WLAN_KEY_LEN_WEP40);
}
+ fifo->frag_ctl |= cpu_to_le16(FRAGCTL_LEGACY);
break;
case WLAN_CIPHER_SUITE_TKIP:
- ieee80211_get_tkip_p2k(tx_key, skb, key_buffer);
+ ieee80211_get_tkip_p2k(tx_key, skb, fifo->tx_key);
+ fifo->frag_ctl |= cpu_to_le16(FRAGCTL_TKIP);
break;
case WLAN_CIPHER_SUITE_CCMP:
-
- if (!mic_hdr)
- return;
+ if (info->control.use_cts_prot) {
+ if (info->control.use_rts)
+ mic_hdr = &tx_buffer->tx_head.tx_rts.tx.mic.hdr;
+ else
+ mic_hdr = &tx_buffer->tx_head.tx_cts.tx.mic.hdr;
+ } else {
+ mic_hdr = &tx_buffer->tx_head.tx_ab.tx.mic.hdr;
+ }
mic_hdr->id = 0x59;
mic_hdr->payload_len = cpu_to_be16(payload_len);
@@ -527,12 +365,137 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
if (ieee80211_has_a4(hdr->frame_control))
ether_addr_copy(mic_hdr->addr4, hdr->addr4);
- memcpy(key_buffer, tx_key->key, WLAN_KEY_LEN_CCMP);
+ memcpy(fifo->tx_key, tx_key->key, WLAN_KEY_LEN_CCMP);
- break;
+ fifo->frag_ctl |= cpu_to_le16(FRAGCTL_AES);
+ return true;
default:
break;
}
+
+ return false;
+}
+
+static void vnt_rxtx_rts(struct vnt_usb_send_context *tx_context)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_context->skb);
+ struct vnt_tx_buffer *tx_buffer = tx_context->tx_buffer;
+ union vnt_tx_head *tx_head = &tx_buffer->tx_head;
+ struct vnt_rrv_time_rts *buf = &tx_head->tx_rts.rts;
+ union vnt_tx_data_head *head = &tx_head->tx_rts.tx.head;
+
+ buf->rts_rrv_time_aa = vnt_get_rts_duration(tx_context);
+ buf->rts_rrv_time_ba = buf->rts_rrv_time_aa;
+ buf->rts_rrv_time_bb = buf->rts_rrv_time_aa;
+
+ buf->rrv_time_a = vnt_rxtx_rsvtime_le16(tx_context);
+ buf->rrv_time_b = buf->rrv_time_a;
+
+ if (info->control.hw_key) {
+ if (vnt_fill_txkey(tx_buffer, tx_context->skb))
+ head = &tx_head->tx_rts.tx.mic.head;
+ }
+
+ vnt_rxtx_rts_g_head(tx_context, &head->rts_g);
+}
+
+static void vnt_rxtx_cts(struct vnt_usb_send_context *tx_context)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_context->skb);
+ struct vnt_tx_buffer *tx_buffer = tx_context->tx_buffer;
+ union vnt_tx_head *tx_head = &tx_buffer->tx_head;
+ struct vnt_rrv_time_cts *buf = &tx_head->tx_cts.cts;
+ union vnt_tx_data_head *head = &tx_head->tx_cts.tx.head;
+
+ buf->rrv_time_a = vnt_rxtx_rsvtime_le16(tx_context);
+ buf->rrv_time_b = buf->rrv_time_a;
+
+ buf->cts_rrv_time_ba = vnt_get_cts_duration(tx_context);
+
+ if (info->control.hw_key) {
+ if (vnt_fill_txkey(tx_buffer, tx_context->skb))
+ head = &tx_head->tx_cts.tx.mic.head;
+ }
+
+ vnt_fill_cts_head(tx_context, head);
+}
+
+static void vnt_rxtx_ab(struct vnt_usb_send_context *tx_context)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_context->skb);
+ struct vnt_tx_buffer *tx_buffer = tx_context->tx_buffer;
+ union vnt_tx_head *tx_head = &tx_buffer->tx_head;
+ struct vnt_rrv_time_ab *buf = &tx_head->tx_ab.ab;
+ union vnt_tx_data_head *head = &tx_head->tx_ab.tx.head;
+
+ buf->rrv_time = vnt_rxtx_rsvtime_le16(tx_context);
+
+ if (info->control.hw_key) {
+ if (vnt_fill_txkey(tx_buffer, tx_context->skb))
+ head = &tx_head->tx_ab.tx.mic.head;
+ }
+
+ if (info->control.use_rts) {
+ buf->rts_rrv_time = vnt_get_rts_duration(tx_context);
+
+ vnt_rxtx_rts_ab_head(tx_context, &head->rts_ab);
+
+ return;
+ }
+
+ vnt_rxtx_datahead_ab(tx_context, &head->data_head_ab);
+}
+
+static void vnt_generate_tx_parameter(struct vnt_usb_send_context *tx_context)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_context->skb);
+
+ if (info->control.use_cts_prot) {
+ if (info->control.use_rts) {
+ vnt_rxtx_rts(tx_context);
+
+ return;
+ }
+
+ vnt_rxtx_cts(tx_context);
+
+ return;
+ }
+
+ vnt_rxtx_ab(tx_context);
+}
+
+static u16 vnt_get_hdr_size(struct ieee80211_tx_info *info)
+{
+ u16 size = sizeof(struct vnt_tx_datahead_ab);
+
+ if (info->control.use_cts_prot) {
+ if (info->control.use_rts)
+ size = sizeof(struct vnt_rts_g);
+ else
+ size = sizeof(struct vnt_cts);
+ } else if (info->control.use_rts) {
+ size = sizeof(struct vnt_rts_ab);
+ }
+
+ if (info->control.hw_key) {
+ if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ size += sizeof(struct vnt_mic_hdr);
+ }
+
+ /* Get rrv_time header */
+ if (info->control.use_cts_prot) {
+ if (info->control.use_rts)
+ size += sizeof(struct vnt_rrv_time_rts);
+ else
+ size += sizeof(struct vnt_rrv_time_cts);
+ } else {
+ size += sizeof(struct vnt_rrv_time_ab);
+ }
+
+ size += sizeof(struct vnt_tx_fifo_head);
+
+ return size;
}
int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
@@ -540,30 +503,18 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *tx_rate = &info->control.rates[0];
struct ieee80211_rate *rate;
- struct ieee80211_key_conf *tx_key;
struct ieee80211_hdr *hdr;
- struct vnt_mic_hdr *mic_hdr = NULL;
struct vnt_tx_buffer *tx_buffer;
struct vnt_tx_fifo_head *tx_buffer_head;
struct vnt_usb_send_context *tx_context;
unsigned long flags;
- u16 tx_bytes, tx_header_size, tx_body_size, current_rate, duration_id;
u8 pkt_type;
- bool need_rts = false;
- bool need_mic = false;
hdr = (struct ieee80211_hdr *)(skb->data);
rate = ieee80211_get_tx_rate(priv->hw, info);
- current_rate = rate->hw_value;
- if (priv->current_rate != current_rate &&
- !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
- priv->current_rate = current_rate;
- vnt_schedule_command(priv, WLAN_CMD_SETPOWER);
- }
-
- if (current_rate > RATE_11M) {
+ if (rate->hw_value > RATE_11M) {
if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
@@ -589,17 +540,23 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
return -ENOMEM;
}
- tx_context->skb = skb;
tx_context->pkt_type = pkt_type;
- tx_context->need_ack = false;
tx_context->frame_len = skb->len + 4;
- tx_context->tx_rate = current_rate;
+ tx_context->tx_rate = rate->hw_value;
spin_unlock_irqrestore(&priv->lock, flags);
- tx_buffer = (struct vnt_tx_buffer *)tx_context->data;
+ tx_context->skb = skb_clone(skb, GFP_ATOMIC);
+ if (!tx_context->skb) {
+ tx_context->in_use = false;
+ return -ENOMEM;
+ }
+
+ tx_buffer = skb_push(skb, vnt_get_hdr_size(info));
+ tx_context->tx_buffer = tx_buffer;
tx_buffer_head = &tx_buffer->fifo_head;
- tx_body_size = skb->len;
+
+ tx_context->type = CONTEXT_DATA_PACKET;
/*Set fifo controls */
if (pkt_type == PK_TYPE_11A)
@@ -623,92 +580,43 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us);
}
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_NEEDACK);
- tx_context->need_ack = true;
- }
if (ieee80211_has_retry(hdr->frame_control))
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LRETRY);
- if (tx_rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- priv->preamble_type = PREAMBLE_SHORT;
- else
- priv->preamble_type = PREAMBLE_LONG;
-
- if (tx_rate->flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- need_rts = true;
+ if (info->control.use_rts)
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_RTS);
- }
if (ieee80211_has_a4(hdr->frame_control))
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LHEAD);
tx_buffer_head->frag_ctl =
- cpu_to_le16(ieee80211_get_hdrlen_from_skb(skb) << 10);
+ cpu_to_le16(ieee80211_hdrlen(hdr->frame_control) << 10);
- if (info->control.hw_key) {
- tx_key = info->control.hw_key;
- switch (info->control.hw_key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_LEGACY);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_TKIP);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_AES);
- need_mic = true;
- default:
- break;
- }
- tx_context->frame_len += tx_key->icv_len;
- }
+ if (info->control.hw_key)
+ tx_context->frame_len += info->control.hw_key->icv_len;
- tx_buffer_head->current_rate = cpu_to_le16(current_rate);
+ tx_buffer_head->current_rate = cpu_to_le16(rate->hw_value);
- duration_id = vnt_generate_tx_parameter(tx_context, tx_buffer, &mic_hdr,
- need_mic, need_rts);
-
- tx_header_size = tx_context->tx_hdr_size;
- if (!tx_header_size) {
- tx_context->in_use = false;
- return -ENOMEM;
- }
+ vnt_generate_tx_parameter(tx_context);
tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_NONFRAG);
- tx_bytes = tx_header_size + tx_body_size;
-
- memcpy(tx_context->hdr, skb->data, tx_body_size);
-
- if (info->control.hw_key) {
- tx_key = info->control.hw_key;
- if (tx_key->keylen > 0)
- vnt_fill_txkey(tx_context, tx_buffer_head->tx_key,
- tx_key, skb, tx_body_size, mic_hdr);
- }
-
priv->seq_counter = (le16_to_cpu(hdr->seq_ctrl) &
IEEE80211_SCTL_SEQ) >> 4;
- tx_buffer->tx_byte_count = cpu_to_le16(tx_bytes);
- tx_buffer->pkt_no = tx_context->pkt_no;
- tx_buffer->type = 0x00;
-
- tx_bytes += 4;
-
- tx_context->type = CONTEXT_DATA_PACKET;
- tx_context->buf_len = tx_bytes;
-
spin_lock_irqsave(&priv->lock, flags);
- if (vnt_tx_context(priv, tx_context)) {
+ if (vnt_tx_context(priv, tx_context, skb)) {
+ dev_kfree_skb(tx_context->skb);
spin_unlock_irqrestore(&priv->lock, flags);
return -EIO;
}
+ dev_kfree_skb(skb);
+
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
@@ -716,14 +624,13 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
{
- struct vnt_beacon_buffer *beacon_buffer;
struct vnt_tx_short_buf_head *short_head;
struct ieee80211_tx_info *info;
struct vnt_usb_send_context *context;
struct ieee80211_mgmt *mgmt_hdr;
unsigned long flags;
u32 frame_size = skb->len + 4;
- u16 current_rate, count;
+ u16 current_rate;
spin_lock_irqsave(&priv->lock, flags);
@@ -738,8 +645,8 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->lock, flags);
- beacon_buffer = (struct vnt_beacon_buffer *)&context->data[0];
- short_head = &beacon_buffer->short_head;
+ mgmt_hdr = (struct ieee80211_mgmt *)skb->data;
+ short_head = skb_push(skb, sizeof(*short_head));
if (priv->bb_type == BB_TYPE_11A) {
current_rate = RATE_6M;
@@ -764,10 +671,6 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
vnt_time_stamp_off(priv, current_rate);
}
- /* Generate Beacon Header */
- mgmt_hdr = &beacon_buffer->mgmt_hdr;
- memcpy(mgmt_hdr, skb->data, skb->len);
-
/* Get Duration */
short_head->duration = mgmt_hdr->duration;
@@ -786,18 +689,11 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
if (priv->seq_counter > 0x0fff)
priv->seq_counter = 0;
- count = sizeof(struct vnt_tx_short_buf_head) + skb->len;
-
- beacon_buffer->tx_byte_count = cpu_to_le16(count);
- beacon_buffer->pkt_no = context->pkt_no;
- beacon_buffer->type = 0x01;
-
context->type = CONTEXT_BEACON_PACKET;
- context->buf_len = count + 4; /* USB header */
spin_lock_irqsave(&priv->lock, flags);
- if (vnt_tx_context(priv, context))
+ if (vnt_tx_context(priv, context, skb))
ieee80211_free_txskb(priv->hw, context->skb);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index 0e6226af7d41..6ca2ca32d036 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -23,6 +23,13 @@
#define DEFAULT_MGN_LIFETIME_RES_64us 125 /* 64us */
#define DEFAULT_MSDU_LIFETIME_RES_64us 8000
+/* Length, Service, and Signal fields of Phy for Tx */
+struct vnt_phy_field {
+ u8 signal;
+ u8 service;
+ __le16 len;
+} __packed;
+
/* MIC HDR data header */
struct vnt_mic_hdr {
u8 id;
@@ -70,14 +77,12 @@ struct vnt_tx_datahead_g {
__le16 duration_a;
__le16 time_stamp_off_b;
__le16 time_stamp_off_a;
- struct ieee80211_hdr hdr;
} __packed;
struct vnt_tx_datahead_ab {
struct vnt_phy_field ab;
__le16 duration;
__le16 time_stamp_off;
- struct ieee80211_hdr hdr;
} __packed;
/* RTS buffer header */
@@ -155,9 +160,6 @@ struct vnt_tx_fifo_head {
} __packed;
struct vnt_tx_buffer {
- u8 type;
- u8 pkt_no;
- __le16 tx_byte_count;
struct vnt_tx_fifo_head fifo_head;
union vnt_tx_head tx_head;
} __packed;
@@ -170,14 +172,6 @@ struct vnt_tx_short_buf_head {
__le16 time_stamp_off;
} __packed;
-struct vnt_beacon_buffer {
- u8 type;
- u8 pkt_no;
- __le16 tx_byte_count;
- struct vnt_tx_short_buf_head short_head;
- struct ieee80211_mgmt mgmt_hdr;
-} __packed;
-
int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb);
int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif);
int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 91b62c3dff7b..82b774be6485 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -77,7 +77,7 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data)
}
int vnt_control_out_blocks(struct vnt_private *priv,
- u16 block, u8 reg, u16 length, u8 *data)
+ u16 block, u8 reg, u16 length, const u8 *data)
{
int ret = 0, i;
@@ -196,32 +196,18 @@ static void vnt_int_process_data(struct vnt_private *priv)
if (int_data->tsr3 & TSR_VALID)
vnt_int_report_rate(priv, int_data->pkt3, int_data->tsr3);
- if (int_data->isr0 != 0) {
- if (int_data->isr0 & ISR_BNTX &&
- priv->op_mode == NL80211_IFTYPE_AP)
- vnt_schedule_command(priv, WLAN_CMD_BECON_SEND);
-
- if (int_data->isr0 & ISR_TBTT &&
- priv->hw->conf.flags & IEEE80211_CONF_PS) {
- if (!priv->wake_up_count)
- priv->wake_up_count =
- priv->hw->conf.listen_interval;
+ if (!int_data->isr0)
+ return;
- if (priv->wake_up_count)
- --priv->wake_up_count;
+ if (int_data->isr0 & ISR_BNTX && priv->op_mode == NL80211_IFTYPE_AP)
+ vnt_schedule_command(priv, WLAN_CMD_BECON_SEND);
- /* Turn on wake up to listen next beacon */
- if (priv->wake_up_count == 1)
- vnt_schedule_command(priv,
- WLAN_CMD_TBTT_WAKEUP);
- }
- priv->current_tsf = le64_to_cpu(int_data->tsf);
+ priv->current_tsf = le64_to_cpu(int_data->tsf);
- low_stats->dot11RTSSuccessCount += int_data->rts_success;
- low_stats->dot11RTSFailureCount += int_data->rts_fail;
- low_stats->dot11ACKFailureCount += int_data->ack_fail;
- low_stats->dot11FCSErrorCount += int_data->fcs_err;
- }
+ low_stats->dot11RTSSuccessCount += int_data->rts_success;
+ low_stats->dot11RTSFailureCount += int_data->rts_fail;
+ low_stats->dot11ACKFailureCount += int_data->ack_fail;
+ low_stats->dot11FCSErrorCount += int_data->fcs_err;
}
static void vnt_start_interrupt_urb_complete(struct urb *urb)
@@ -442,7 +428,8 @@ static void vnt_tx_context_complete(struct urb *urb)
switch (urb->status) {
case 0:
- dev_dbg(&priv->usb->dev, "Write %d bytes\n", context->buf_len);
+ dev_dbg(&priv->usb->dev,
+ "Write %d bytes\n", urb->actual_length);
break;
case -ECONNRESET:
case -ENOENT:
@@ -467,30 +454,53 @@ static void vnt_tx_context_complete(struct urb *urb)
}
int vnt_tx_context(struct vnt_private *priv,
- struct vnt_usb_send_context *context)
+ struct vnt_usb_send_context *context,
+ struct sk_buff *skb)
{
+ struct vnt_tx_usb_header *usb;
+ struct urb *urb;
int status;
- struct urb *urb = context->urb;
+ u16 count = skb->len;
+
+ usb = skb_push(skb, sizeof(*usb));
+ usb->tx_byte_count = cpu_to_le16(count);
+ usb->pkt_no = context->pkt_no;
+ usb->type = context->type;
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) {
context->in_use = false;
return -ENODEV;
}
+ if (skb->len > MAX_TOTAL_SIZE_WITH_ALL_HEADERS) {
+ context->in_use = false;
+ return -E2BIG;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ context->in_use = false;
+ return -ENOMEM;
+ }
+
usb_fill_bulk_urb(urb,
priv->usb,
usb_sndbulkpipe(priv->usb, 3),
- context->data,
- context->buf_len,
+ skb->data,
+ skb->len,
vnt_tx_context_complete,
context);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_dbg(&priv->usb->dev, "Submit Tx URB failed %d\n", status);
-
+ usb_unanchor_urb(urb);
context->in_use = false;
}
+ usb_free_urb(urb);
+
return status;
}
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index 35697b58d748..52c2a928c9c1 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -41,6 +41,12 @@ struct vnt_interrupt_data {
u8 sw[2];
} __packed;
+struct vnt_tx_usb_header {
+ u8 type;
+ u8 pkt_no;
+ __le16 tx_byte_count;
+} __packed;
+
#define VNT_REG_BLOCK_SIZE 64
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
@@ -52,11 +58,12 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data);
int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data);
int vnt_control_out_blocks(struct vnt_private *priv,
- u16 block, u8 reg, u16 len, u8 *data);
+ u16 block, u8 reg, u16 len, const u8 *data);
int vnt_start_interrupt_urb(struct vnt_private *priv);
int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb);
int vnt_tx_context(struct vnt_private *priv,
- struct vnt_usb_send_context *context);
+ struct vnt_usb_send_context *context,
+ struct sk_buff *skb);
#endif /* __USBPIPE_H__ */
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 2c5250ca2801..0ccc87da394e 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -122,8 +122,7 @@ void vnt_run_command(struct work_struct *work)
case WLAN_CMD_SETPOWER_START:
- vnt_rf_setpower(priv, priv->current_rate,
- priv->hw->conf.chandef.chan->hw_value);
+ vnt_rf_setpower(priv, priv->hw->conf.chandef.chan);
break;
diff --git a/drivers/staging/wfx/Makefile b/drivers/staging/wfx/Makefile
index 0d9c1ed092f6..0e0cc982ceab 100644
--- a/drivers/staging/wfx/Makefile
+++ b/drivers/staging/wfx/Makefile
@@ -7,6 +7,7 @@ wfx-y := \
bh.o \
hwio.o \
fwio.o \
+ hif_tx_mib.o \
hif_tx.o \
hif_rx.o \
queue.o \
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
index efcb7c6a5aa7..42bf36d43970 100644
--- a/drivers/staging/wfx/TODO
+++ b/drivers/staging/wfx/TODO
@@ -1,60 +1,25 @@
This is a list of things that need to be done to get this driver out of the
staging directory.
- - All structures defined in hif_api_*.h are intended to sent/received to/from
- hardware. All their members whould be declared __le32 or __le16.
- See:
- https://lore.kernel.org/lkml/20191111202852.GX26530@ZenIV.linux.org.uk
+ - The HIF API is not yet clean enough.
- - Once previous item done, it will be possible to audit the driver with
- `sparse'. It will probably find tons of problems with big endian
- architectures.
-
- - hif_api_*.h whave been imported from firmware code. Some of the structures
- are never used in driver.
-
- - Driver try to maintains power save status of the stations. However, this
- work is already done by mac80211. sta_asleep_mask and pspoll_mask should be
- dropped.
-
- - wfx_tx_queues_get() should be reworked. It currently try compute itself the
- QoS policy. However, firmware already do the job. Firmware would prefer to
- have a few packets in each queue and be able to choose itself which queue to
- use.
+ - The code that check the corectness of received message (in rx_helper()) can
+ be improved. See:
+ https://lore.kernel.org/driverdev-devel/2302785.6C7ODC2LYm@pc-42/
- As suggested by Felix, rate control could be improved following this idea:
https://lore.kernel.org/lkml/3099559.gv3Q75KnN1@pc-42/
- - When driver is about to loose BSS, it forge its own Null Func request (see
- wfx_cqm_bssloss_sm()). It should use mechanism provided by mac80211.
-
- - AP is actually is setup after a call to wfx_bss_info_changed(). Yet,
- ieee80211_ops provide callback start_ap().
-
- - The current process for joining a network is incredibly complex. Should be
- reworked.
-
- - Monitoring mode is not implemented despite being mandatory by mac80211.
-
- - "compatible" value are not correct. They should be "vendor,chip". See:
- https://lore.kernel.org/driverdev-devel/5226570.CMH5hVlZcI@pc-42
-
- - The "state" field from wfx_vif should be replaced by "vif->type".
-
- - It seems that wfx_upload_keys() is useless.
-
- - "event_queue" from wfx_vif seems overkill. These event are rare and they
- probably could be handled in a simpler fashion.
-
- Feature called "secure link" should be either developed (using kernel
crypto API) or dropped.
+ - The device allows to filter multicast traffic. The code to support these
+ filters exists in the driver but it is disabled because it has never been
+ tested.
+
- In wfx_cmd_send(), "async" allow to send command without waiting the reply.
It may help in some situation, but it is not yet used. In add, it may cause
some trouble:
https://lore.kernel.org/driverdev-devel/alpine.DEB.2.21.1910041317381.2992@hadrien/
So, fix it (by replacing the mutex with a semaphore) or drop it.
- - Chip support P2P, but driver does not implement it.
-
- - Chip support kind of Mesh, but driver does not implement it.
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index 9fcab00a3733..1cbaf8bb4fa3 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -70,7 +70,7 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
if (wfx_data_read(wdev, skb->data, alloc_len))
goto err;
- piggyback = le16_to_cpup((u16 *)(skb->data + alloc_len - 2));
+ piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2));
_trace_piggyback(piggyback, false);
hif = (struct hif_msg *)skb->data;
@@ -84,13 +84,12 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
// piggyback is probably correct.
return piggyback;
}
- le16_to_cpus(&hif->len);
- computed_len = round_up(hif->len - sizeof(hif->len), 16)
- + sizeof(struct hif_sl_msg)
- + sizeof(struct hif_sl_tag);
+ computed_len =
+ round_up(le16_to_cpu(hif->len) - sizeof(hif->len), 16) +
+ sizeof(struct hif_sl_msg) +
+ sizeof(struct hif_sl_tag);
} else {
- le16_to_cpus(&hif->len);
- computed_len = round_up(hif->len, 2);
+ computed_len = round_up(le16_to_cpu(hif->len), 2);
}
if (computed_len != read_len) {
dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n",
@@ -103,13 +102,11 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
if (!(hif->id & HIF_ID_IS_INDICATION)) {
(*is_cnf)++;
if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT)
- release_count = le32_to_cpu(((struct hif_cnf_multi_transmit *)hif->body)->num_tx_confs);
+ release_count = ((struct hif_cnf_multi_transmit *)hif->body)->num_tx_confs;
else
release_count = 1;
WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter");
wdev->hif.tx_buffers_used -= release_count;
- if (!wdev->hif.tx_buffers_used)
- wake_up(&wdev->hif.tx_buffers_empty);
}
_trace_hif_recv(hif, wdev->hif.tx_buffers_used);
@@ -120,9 +117,11 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1);
}
- skb_put(skb, hif->len);
+ skb_put(skb, le16_to_cpu(hif->len));
// wfx_handle_rx takes care on SKB livetime
wfx_handle_rx(wdev, skb);
+ if (!wdev->hif.tx_buffers_used)
+ wake_up(&wdev->hif.tx_buffers_empty);
return piggyback;
@@ -307,6 +306,35 @@ void wfx_bh_request_tx(struct wfx_dev *wdev)
queue_work(system_highpri_wq, &wdev->hif.bh);
}
+/*
+ * If IRQ is not available, this function allow to manually poll the control
+ * register and simulate an IRQ ahen an event happened.
+ *
+ * Note that the device has a bug: If an IRQ raise while host read control
+ * register, the IRQ is lost. So, use this function carefully (only duing
+ * device initialisation).
+ */
+void wfx_bh_poll_irq(struct wfx_dev *wdev)
+{
+ ktime_t now, start;
+ u32 reg;
+
+ WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ");
+ start = ktime_get();
+ for (;;) {
+ control_reg_read(wdev, &reg);
+ now = ktime_get();
+ if (reg & 0xFFF)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, 1000))) {
+ dev_err(wdev->dev, "time out while polling control register\n");
+ return;
+ }
+ udelay(200);
+ }
+ wfx_bh_request_rx(wdev);
+}
+
void wfx_bh_register(struct wfx_dev *wdev)
{
INIT_WORK(&wdev->hif.bh, bh_work);
diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h
index 93ca98424e0b..4b73437869e1 100644
--- a/drivers/staging/wfx/bh.h
+++ b/drivers/staging/wfx/bh.h
@@ -28,5 +28,6 @@ void wfx_bh_register(struct wfx_dev *wdev);
void wfx_bh_unregister(struct wfx_dev *wdev);
void wfx_bh_request_rx(struct wfx_dev *wdev);
void wfx_bh_request_tx(struct wfx_dev *wdev);
+void wfx_bh_poll_irq(struct wfx_dev *wdev);
#endif /* WFX_BH_H */
diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h
index 62d6ecabe4cb..0370b6c59863 100644
--- a/drivers/staging/wfx/bus.h
+++ b/drivers/staging/wfx/bus.h
@@ -25,6 +25,8 @@ struct hwbus_ops {
void *dst, size_t count);
int (*copy_to_io)(void *bus_priv, unsigned int addr,
const void *src, size_t count);
+ int (*irq_subscribe)(void *bus_priv);
+ int (*irq_unsubscribe)(void *bus_priv);
void (*lock)(void *bus_priv);
void (*unlock)(void *bus_priv);
size_t (*align_size)(void *bus_priv, size_t size);
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
index dedc3ff58d3e..496bfc8bbacc 100644
--- a/drivers/staging/wfx/bus_sdio.c
+++ b/drivers/staging/wfx/bus_sdio.c
@@ -6,10 +6,12 @@
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/module.h>
+#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <linux/irq.h>
#include "bus.h"
#include "wfx.h"
@@ -91,53 +93,58 @@ static void wfx_sdio_irq_handler(struct sdio_func *func)
{
struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
- if (bus->core)
- wfx_bh_request_rx(bus->core);
- else
- WARN(!bus->core, "race condition in driver init/deinit");
+ wfx_bh_request_rx(bus->core);
}
static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv)
{
struct wfx_sdio_priv *bus = priv;
- if (!bus->core) {
- WARN(!bus->core, "race condition in driver init/deinit");
- return IRQ_NONE;
- }
sdio_claim_host(bus->func);
wfx_bh_request_rx(bus->core);
sdio_release_host(bus->func);
return IRQ_HANDLED;
}
-static int wfx_sdio_irq_subscribe(struct wfx_sdio_priv *bus)
+static int wfx_sdio_irq_subscribe(void *priv)
{
+ struct wfx_sdio_priv *bus = priv;
+ u32 flags;
int ret;
+ u8 cccr;
- if (bus->of_irq) {
- ret = request_irq(bus->of_irq, wfx_sdio_irq_handler_ext,
- IRQF_TRIGGER_RISING, "wfx", bus);
- } else {
+ if (!bus->of_irq) {
sdio_claim_host(bus->func);
ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler);
sdio_release_host(bus->func);
+ return ret;
}
- return ret;
+
+ sdio_claim_host(bus->func);
+ cccr = sdio_f0_readb(bus->func, SDIO_CCCR_IENx, NULL);
+ cccr |= BIT(0);
+ cccr |= BIT(bus->func->num);
+ sdio_f0_writeb(bus->func, cccr, SDIO_CCCR_IENx, NULL);
+ sdio_release_host(bus->func);
+ flags = irq_get_trigger_type(bus->of_irq);
+ if (!flags)
+ flags = IRQF_TRIGGER_HIGH;
+ flags |= IRQF_ONESHOT;
+ return devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL,
+ wfx_sdio_irq_handler_ext, flags,
+ "wfx", bus);
}
-static int wfx_sdio_irq_unsubscribe(struct wfx_sdio_priv *bus)
+static int wfx_sdio_irq_unsubscribe(void *priv)
{
+ struct wfx_sdio_priv *bus = priv;
int ret;
- if (bus->of_irq) {
- free_irq(bus->of_irq, bus);
- ret = 0;
- } else {
- sdio_claim_host(bus->func);
- ret = sdio_release_irq(bus->func);
- sdio_release_host(bus->func);
- }
+ if (bus->of_irq)
+ devm_free_irq(&bus->func->dev, bus->of_irq, bus);
+ sdio_claim_host(bus->func);
+ ret = sdio_release_irq(bus->func);
+ sdio_release_host(bus->func);
return ret;
}
@@ -151,12 +158,20 @@ static size_t wfx_sdio_align_size(void *priv, size_t size)
static const struct hwbus_ops wfx_sdio_hwbus_ops = {
.copy_from_io = wfx_sdio_copy_from_io,
.copy_to_io = wfx_sdio_copy_to_io,
+ .irq_subscribe = wfx_sdio_irq_subscribe,
+ .irq_unsubscribe = wfx_sdio_irq_unsubscribe,
.lock = wfx_sdio_lock,
.unlock = wfx_sdio_unlock,
.align_size = wfx_sdio_align_size,
};
-static const struct of_device_id wfx_sdio_of_match[];
+static const struct of_device_id wfx_sdio_of_match[] = {
+ { .compatible = "silabs,wfx-sdio" },
+ { .compatible = "silabs,wf200" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
+
static int wfx_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -165,7 +180,8 @@ static int wfx_sdio_probe(struct sdio_func *func,
int ret;
if (func->num != 1) {
- dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n", func->num);
+ dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n",
+ func->num);
return -ENODEV;
}
@@ -207,18 +223,12 @@ static int wfx_sdio_probe(struct sdio_func *func,
goto err1;
}
- ret = wfx_sdio_irq_subscribe(bus);
- if (ret)
- goto err1;
-
ret = wfx_probe(bus->core);
if (ret)
- goto err2;
+ goto err1;
return 0;
-err2:
- wfx_sdio_irq_unsubscribe(bus);
err1:
sdio_claim_host(func);
sdio_disable_func(func);
@@ -232,7 +242,6 @@ static void wfx_sdio_remove(struct sdio_func *func)
struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
wfx_release(bus->core);
- wfx_sdio_irq_unsubscribe(bus);
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
@@ -248,15 +257,6 @@ static const struct sdio_device_id wfx_sdio_ids[] = {
};
MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
-#ifdef CONFIG_OF
-static const struct of_device_id wfx_sdio_of_match[] = {
- { .compatible = "silabs,wfx-sdio" },
- { .compatible = "silabs,wf200" },
- { },
-};
-MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
-#endif
-
struct sdio_driver wfx_sdio_driver = {
.name = "wfx-sdio",
.id_table = wfx_sdio_ids,
@@ -264,6 +264,6 @@ struct sdio_driver wfx_sdio_driver = {
.remove = wfx_sdio_remove,
.drv = {
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(wfx_sdio_of_match),
+ .of_match_table = wfx_sdio_of_match,
}
};
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index 61e99b09decb..e8da61fb096b 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -12,6 +12,7 @@
#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include "bus.h"
@@ -39,7 +40,6 @@ struct wfx_spi_priv {
struct spi_device *func;
struct wfx_dev *core;
struct gpio_desc *gpio_reset;
- struct work_struct request_rx;
bool need_swab;
};
@@ -140,25 +140,30 @@ static irqreturn_t wfx_spi_irq_handler(int irq, void *priv)
{
struct wfx_spi_priv *bus = priv;
- if (!bus->core) {
- WARN(!bus->core, "race condition in driver init/deinit");
- return IRQ_NONE;
- }
- queue_work(system_highpri_wq, &bus->request_rx);
+ wfx_bh_request_rx(bus->core);
return IRQ_HANDLED;
}
-static void wfx_spi_request_rx(struct work_struct *work)
+static int wfx_spi_irq_subscribe(void *priv)
{
- struct wfx_spi_priv *bus =
- container_of(work, struct wfx_spi_priv, request_rx);
-
- wfx_bh_request_rx(bus->core);
+ struct wfx_spi_priv *bus = priv;
+ u32 flags;
+
+ flags = irq_get_trigger_type(bus->func->irq);
+ if (!flags)
+ flags = IRQF_TRIGGER_HIGH;
+ flags |= IRQF_ONESHOT;
+ return devm_request_threaded_irq(&bus->func->dev, bus->func->irq, NULL,
+ wfx_spi_irq_handler, IRQF_ONESHOT,
+ "wfx", bus);
}
-static void wfx_flush_irq_work(void *w)
+static int wfx_spi_irq_unsubscribe(void *priv)
{
- flush_work(w);
+ struct wfx_spi_priv *bus = priv;
+
+ devm_free_irq(&bus->func->dev, bus->func->irq, bus);
+ return 0;
}
static size_t wfx_spi_align_size(void *priv, size_t size)
@@ -170,6 +175,8 @@ static size_t wfx_spi_align_size(void *priv, size_t size)
static const struct hwbus_ops wfx_spi_hwbus_ops = {
.copy_from_io = wfx_spi_copy_from_io,
.copy_to_io = wfx_spi_copy_to_io,
+ .irq_subscribe = wfx_spi_irq_subscribe,
+ .irq_unsubscribe = wfx_spi_irq_unsubscribe,
.lock = wfx_spi_lock,
.unlock = wfx_spi_unlock,
.align_size = wfx_spi_align_size,
@@ -216,22 +223,11 @@ static int wfx_spi_probe(struct spi_device *func)
usleep_range(2000, 2500);
}
- INIT_WORK(&bus->request_rx, wfx_spi_request_rx);
bus->core = wfx_init_common(&func->dev, &wfx_spi_pdata,
&wfx_spi_hwbus_ops, bus);
if (!bus->core)
return -EIO;
- ret = devm_add_action_or_reset(&func->dev, wfx_flush_irq_work,
- &bus->request_rx);
- if (ret)
- return ret;
-
- ret = devm_request_irq(&func->dev, func->irq, wfx_spi_irq_handler,
- IRQF_TRIGGER_RISING, "wfx", bus);
- if (ret)
- return ret;
-
return wfx_probe(bus->core);
}
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
index c5b83fedeb55..0e959ebc38b5 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/staging/wfx/data_rx.c
@@ -49,7 +49,7 @@ static int wfx_drop_encrypt_data(struct wfx_dev *wdev,
}
/* Firmware strips ICV in case of MIC failure. */
- if (arg->status == HIF_STATUS_MICFAILURE)
+ if (arg->status == HIF_STATUS_RX_FAIL_MIC)
icv_len = 0;
if (skb->len < hdrlen + iv_len + icv_len) {
@@ -79,7 +79,7 @@ void wfx_rx_cb(struct wfx_vif *wvif,
ieee80211_is_beacon(frame->frame_control)))
goto drop;
- if (arg->status == HIF_STATUS_MICFAILURE)
+ if (arg->status == HIF_STATUS_RX_FAIL_MIC)
hdr->flag |= RX_FLAG_MMIC_ERROR;
else if (arg->status)
goto drop;
@@ -118,18 +118,6 @@ void wfx_rx_cb(struct wfx_vif *wvif,
arg->rx_flags.match_uc_addr &&
mgmt->u.action.category == WLAN_CATEGORY_BACK)
goto drop;
- if (ieee80211_is_beacon(frame->frame_control) &&
- !arg->status && wvif->vif &&
- ether_addr_equal(ieee80211_get_SA(frame),
- wvif->vif->bss_conf.bssid)) {
- /* Disable beacon filter once we're associated... */
- if (wvif->disable_beacon_filter &&
- (wvif->vif->bss_conf.assoc ||
- wvif->vif->bss_conf.ibss_joined)) {
- wvif->disable_beacon_filter = false;
- schedule_work(&wvif->update_filtering_work);
- }
- }
ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
return;
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
index 61c28bfd2a37..125dbfc1f875 100644
--- a/drivers/staging/wfx/data_rx.h
+++ b/drivers/staging/wfx/data_rx.h
@@ -8,10 +8,9 @@
#ifndef WFX_DATA_RX_H
#define WFX_DATA_RX_H
-#include "hif_api_cmd.h"
-
struct wfx_vif;
struct sk_buff;
+struct hif_ind_rx;
void wfx_rx_cb(struct wfx_vif *wvif,
const struct hif_ind_rx *arg, struct sk_buff *skb);
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index 42183c70d4df..f042ef36b408 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -6,6 +6,7 @@
* Copyright (c) 2010, ST-Ericsson
*/
#include <net/mac80211.h>
+#include <linux/etherdevice.h>
#include "data_tx.h"
#include "wfx.h"
@@ -16,12 +17,11 @@
#include "traces.h"
#include "hif_tx_mib.h"
-#define WFX_INVALID_RATE_ID 15
-#define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
-
static int wfx_get_hw_rate(struct wfx_dev *wdev,
const struct ieee80211_tx_rate *rate)
{
+ struct ieee80211_supported_band *band;
+
if (rate->idx < 0)
return -1;
if (rate->flags & IEEE80211_TX_RC_MCS) {
@@ -33,7 +33,8 @@ static int wfx_get_hw_rate(struct wfx_dev *wdev,
}
// WFx only support 2GHz, else band information should be retrieved
// from ieee80211_tx_info
- return wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates[rate->idx].hw_value;
+ band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ return band->bitrates[rate->idx].hw_value;
}
/* TX policy cache implementation */
@@ -41,73 +42,13 @@ static int wfx_get_hw_rate(struct wfx_dev *wdev,
static void wfx_tx_policy_build(struct wfx_vif *wvif, struct tx_policy *policy,
struct ieee80211_tx_rate *rates)
{
- int i;
- size_t count;
struct wfx_dev *wdev = wvif->wdev;
+ int i, rateid;
+ u8 count;
WARN(rates[0].idx < 0, "invalid rate policy");
memset(policy, 0, sizeof(*policy));
- for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
- if (rates[i].idx < 0)
- break;
- count = i;
-
- /* HACK!!! Device has problems (at least) switching from
- * 54Mbps CTS to 1Mbps. This switch takes enormous amount
- * of time (100-200 ms), leading to valuable throughput drop.
- * As a workaround, additional g-rates are injected to the
- * policy.
- */
- if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
- rates[0].idx > 4 && rates[0].count > 2 &&
- rates[1].idx < 2) {
- int mid_rate = (rates[0].idx + 4) >> 1;
-
- /* Decrease number of retries for the initial rate */
- rates[0].count -= 2;
-
- if (mid_rate != 4) {
- /* Keep fallback rate at 1Mbps. */
- rates[3] = rates[1];
-
- /* Inject 1 transmission on lowest g-rate */
- rates[2].idx = 4;
- rates[2].count = 1;
- rates[2].flags = rates[1].flags;
-
- /* Inject 1 transmission on mid-rate */
- rates[1].idx = mid_rate;
- rates[1].count = 1;
-
- /* Fallback to 1 Mbps is a really bad thing,
- * so let's try to increase probability of
- * successful transmission on the lowest g rate
- * even more
- */
- if (rates[0].count >= 3) {
- --rates[0].count;
- ++rates[2].count;
- }
-
- /* Adjust amount of rates defined */
- count += 2;
- } else {
- /* Keep fallback rate at 1Mbps. */
- rates[2] = rates[1];
-
- /* Inject 2 transmissions on lowest g-rate */
- rates[1].idx = 4;
- rates[1].count = 2;
-
- /* Adjust amount of rates defined */
- count += 1;
- }
- }
-
for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
- int rateid;
- u8 count;
-
if (rates[i].idx < 0)
break;
WARN_ON(rates[i].count > 15);
@@ -158,8 +99,7 @@ static int wfx_tx_policy_release(struct tx_policy_cache *cache,
}
static int wfx_tx_policy_get(struct wfx_vif *wvif,
- struct ieee80211_tx_rate *rates,
- bool *renew)
+ struct ieee80211_tx_rate *rates, bool *renew)
{
int idx;
struct tx_policy_cache *cache = &wvif->tx_policy_cache;
@@ -171,7 +111,7 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
if (list_empty(&cache->free)) {
WARN(1, "unable to get a valid Tx policy");
spin_unlock_bh(&cache->lock);
- return WFX_INVALID_RATE_ID;
+ return HIF_TX_RETRY_POLICY_INVALID;
}
idx = wfx_tx_policy_find(cache, &wanted);
if (idx >= 0) {
@@ -189,10 +129,8 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
idx = entry - cache->cache;
}
wfx_tx_policy_use(cache, &cache->cache[idx]);
- if (list_empty(&cache->free)) {
- /* Lock TX queues. */
- wfx_tx_queues_lock(wvif->wdev);
- }
+ if (list_empty(&cache->free))
+ ieee80211_stop_queues(wvif->wdev->hw);
spin_unlock_bh(&cache->lock);
return idx;
}
@@ -202,15 +140,13 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
int usage, locked;
struct tx_policy_cache *cache = &wvif->tx_policy_cache;
- if (idx == WFX_INVALID_RATE_ID)
+ if (idx == HIF_TX_RETRY_POLICY_INVALID)
return;
spin_lock_bh(&cache->lock);
locked = list_empty(&cache->free);
usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
- if (locked && !usage) {
- /* Unlock TX queues. */
- wfx_tx_queues_unlock(wvif->wdev);
- }
+ if (locked && !usage)
+ ieee80211_wake_queues(wvif->wdev->hw);
spin_unlock_bh(&cache->lock);
}
@@ -218,15 +154,17 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif)
{
struct tx_policy *policies = wvif->tx_policy_cache.cache;
u8 tmp_rates[12];
- int i;
+ int i, is_used;
do {
spin_lock_bh(&wvif->tx_policy_cache.lock);
- for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
- if (!policies[i].uploaded &&
- memzcmp(policies[i].rates, sizeof(policies[i].rates)))
+ for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) {
+ is_used = memzcmp(policies[i].rates,
+ sizeof(policies[i].rates));
+ if (!policies[i].uploaded && is_used)
break;
- if (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES) {
+ }
+ if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) {
policies[i].uploaded = true;
memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
spin_unlock_bh(&wvif->tx_policy_cache.lock);
@@ -234,7 +172,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif)
} else {
spin_unlock_bh(&wvif->tx_policy_cache.lock);
}
- } while (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES);
+ } while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache));
return 0;
}
@@ -244,9 +182,7 @@ void wfx_tx_policy_upload_work(struct work_struct *work)
container_of(work, struct wfx_vif, tx_policy_upload_work);
wfx_tx_policy_upload(wvif);
-
wfx_tx_unlock(wvif->wdev);
- wfx_tx_queues_unlock(wvif->wdev);
}
void wfx_tx_policy_init(struct wfx_vif *wvif)
@@ -260,7 +196,7 @@ void wfx_tx_policy_init(struct wfx_vif *wvif)
INIT_LIST_HEAD(&cache->used);
INIT_LIST_HEAD(&cache->free);
- for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
+ for (i = 0; i < ARRAY_SIZE(cache->cache); ++i)
list_add(&cache->cache[i].link, &cache->free);
}
@@ -281,16 +217,11 @@ static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
struct wfx_tx_priv *tx_priv,
struct ieee80211_sta *sta)
{
- u32 mask = ~BIT(tx_priv->raw_link_id);
struct wfx_sta_priv *sta_priv;
int tid = ieee80211_get_tid(hdr);
- spin_lock_bh(&wvif->ps_state_lock);
- if (ieee80211_is_auth(hdr->frame_control))
- wvif->sta_asleep_mask &= mask;
- spin_unlock_bh(&wvif->ps_state_lock);
-
if (sta) {
+ tx_priv->has_sta = true;
sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
spin_lock_bh(&sta_priv->lock);
sta_priv->buffered[tid]++;
@@ -299,9 +230,8 @@ static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
}
}
-static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
- struct ieee80211_sta *sta,
- struct ieee80211_hdr *hdr)
+static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr)
{
struct wfx_sta_priv *sta_priv =
sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL;
@@ -313,7 +243,7 @@ static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
return 0;
if (is_multicast_ether_addr(da))
return 0;
- return WFX_LINK_ID_NO_ASSOC;
+ return HIF_LINK_ID_NOT_ASSOCIATED;
}
static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
@@ -358,7 +288,8 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
if (rates[i].idx == -1) {
rates[i].idx = 0;
rates[i].count = 8; // == hw->max_rate_tries
- rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+ rates[i].flags = rates[i - 1].flags &
+ IEEE80211_TX_RC_MCS;
break;
}
}
@@ -375,24 +306,19 @@ static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
rate_id = wfx_tx_policy_get(wvif,
tx_info->driver_rates, &tx_policy_renew);
- if (rate_id == WFX_INVALID_RATE_ID)
+ if (rate_id == HIF_TX_RETRY_POLICY_INVALID)
dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
if (tx_policy_renew) {
- /* FIXME: It's not so optimal to stop TX queues every now and
- * then. Better to reimplement task scheduling with a counter.
- */
wfx_tx_lock(wvif->wdev);
- wfx_tx_queues_lock(wvif->wdev);
- if (!schedule_work(&wvif->tx_policy_upload_work)) {
- wfx_tx_queues_unlock(wvif->wdev);
+ if (!schedule_work(&wvif->tx_policy_upload_work))
wfx_tx_unlock(wvif->wdev);
- }
}
return rate_id;
}
-static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info)
+static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev,
+ struct ieee80211_tx_info *tx_info)
{
struct ieee80211_tx_rate *rate = &tx_info->driver_rates[0];
struct hif_ht_tx_parameters ret = { };
@@ -429,7 +355,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int queue_id = tx_info->hw_queue;
+ int queue_id = skb_get_queue_mapping(skb);
size_t offset = (size_t)skb->data & 3;
int wmsg_len = sizeof(struct hif_msg) +
sizeof(struct hif_req_tx) + offset;
@@ -441,14 +367,8 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
// Fill tx_priv
tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
- tx_priv->raw_link_id = wfx_tx_get_raw_link_id(wvif, sta, hdr);
- tx_priv->link_id = tx_priv->raw_link_id;
if (ieee80211_has_protected(hdr->frame_control))
tx_priv->hw_key = hw_key;
- if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
- tx_priv->link_id = WFX_LINK_ID_AFTER_DTIM;
- if (sta && (sta->uapsd_queues & BIT(queue_id)))
- tx_priv->link_id = WFX_LINK_ID_UAPSD;
// Fill hif_msg
WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
@@ -461,7 +381,8 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
hif_msg->id = HIF_REQ_ID_TX;
hif_msg->interface = wvif->id;
if (skb->len > wvif->wdev->hw_caps.size_inp_ch_buf) {
- dev_warn(wvif->wdev->dev, "requested frame size (%d) is larger than maximum supported (%d)\n",
+ dev_warn(wvif->wdev->dev,
+ "requested frame size (%d) is larger than maximum supported (%d)\n",
skb->len, wvif->wdev->hw_caps.size_inp_ch_buf);
skb_pull(skb, wmsg_len);
return -EIO;
@@ -472,13 +393,14 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// packet_id just need to be unique on device. 32bits are more than
// necessary for that task, so we tae advantage of it to add some extra
// data for debug.
- req->packet_id = queue_id << 28 |
- IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16 |
- (atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF);
+ req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF;
+ req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16;
+ req->packet_id |= queue_id << 28;
+
req->data_flags.fc_offset = offset;
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
req->data_flags.after_dtim = 1;
- req->queue_id.peer_sta_id = tx_priv->raw_link_id;
+ req->queue_id.peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
// Queue index are inverted between firmware and Linux
req->queue_id.queue_id = 3 - queue_id;
req->ht_tx_parameters = wfx_tx_get_tx_parms(wvif->wdev, tx_info);
@@ -486,7 +408,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// Auxiliary operations
wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
- wfx_tx_queue_put(wvif->wdev, &wvif->wdev->tx_queue[queue_id], skb);
+ wfx_tx_queues_put(wvif->wdev, skb);
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
schedule_work(&wvif->update_tim_work);
wfx_bh_request_tx(wvif->wdev);
@@ -528,28 +450,59 @@ drop:
ieee80211_tx_status_irqsafe(wdev->hw, skb);
}
-void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
+static struct ieee80211_hdr *wfx_skb_hdr80211(struct sk_buff *skb)
{
- int i;
- int tx_count;
- struct sk_buff *skb;
- struct ieee80211_tx_rate *rate;
- struct ieee80211_tx_info *tx_info;
- const struct wfx_tx_priv *tx_priv;
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
- skb = wfx_pending_get(wvif->wdev, arg->packet_id);
- if (!skb) {
- dev_warn(wvif->wdev->dev,
- "received unknown packet_id (%#.8x) from chip\n",
- arg->packet_id);
- return;
+ return (struct ieee80211_hdr *)(req->frame + req->data_flags.fc_offset);
+}
+
+static void wfx_tx_update_sta(struct wfx_vif *wvif, struct ieee80211_hdr *hdr)
+{
+ int tid = ieee80211_get_tid(hdr);
+ struct wfx_sta_priv *sta_priv;
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock(); // protect sta
+ sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ if (sta) {
+ sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+ spin_lock_bh(&sta_priv->lock);
+ WARN(!sta_priv->buffered[tid], "inconsistent notification");
+ sta_priv->buffered[tid]--;
+ if (!sta_priv->buffered[tid])
+ ieee80211_sta_set_buffered(sta, tid, false);
+ spin_unlock_bh(&sta_priv->lock);
+ } else {
+ dev_dbg(wvif->wdev->dev, "%s: sta does not exist anymore\n",
+ __func__);
}
- tx_info = IEEE80211_SKB_CB(skb);
- tx_priv = wfx_skb_tx_priv(skb);
- _trace_tx_stats(arg, skb,
- wfx_pending_get_pkt_us_delay(wvif->wdev, skb));
+ rcu_read_unlock();
+}
+
+static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
+ unsigned int offset = sizeof(struct hif_msg) +
+ sizeof(struct hif_req_tx) +
+ req->data_flags.fc_offset;
+
+ WARN_ON(!wvif);
+ wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
+ skb_pull(skb, offset);
+ ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb);
+}
+
+static void wfx_tx_fill_rates(struct wfx_dev *wdev,
+ struct ieee80211_tx_info *tx_info,
+ const struct hif_cnf_tx *arg)
+{
+ struct ieee80211_tx_rate *rate;
+ int tx_count;
+ int i;
- // You can touch to tx_priv, but don't touch to tx_info->status.
tx_count = arg->ack_failures;
if (!arg->status || arg->ack_failures)
tx_count += 1; // Also report success
@@ -558,16 +511,14 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
if (rate->idx < 0)
break;
if (tx_count < rate->count &&
- arg->status == HIF_STATUS_RETRY_EXCEEDED &&
+ arg->status == HIF_STATUS_TX_FAIL_RETRIES &&
arg->ack_failures)
- dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n",
+ dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n",
rate->count, tx_count);
if (tx_count <= rate->count && tx_count &&
- arg->txed_rate != wfx_get_hw_rate(wvif->wdev, rate))
- dev_dbg(wvif->wdev->dev,
- "inconsistent tx_info rates: %d != %d\n",
- arg->txed_rate,
- wfx_get_hw_rate(wvif->wdev, rate));
+ arg->txed_rate != wfx_get_hw_rate(wdev, rate))
+ dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n",
+ arg->txed_rate, wfx_get_hw_rate(wdev, rate));
if (tx_count > rate->count) {
tx_count -= rate->count;
} else if (!tx_count) {
@@ -579,8 +530,30 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
}
}
if (tx_count)
- dev_dbg(wvif->wdev->dev,
- "%d more retries than expected\n", tx_count);
+ dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count);
+}
+
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
+{
+ struct ieee80211_tx_info *tx_info;
+ const struct wfx_tx_priv *tx_priv;
+ struct sk_buff *skb;
+
+ skb = wfx_pending_get(wvif->wdev, arg->packet_id);
+ if (!skb) {
+ dev_warn(wvif->wdev->dev, "received unknown packet_id (%#.8x) from chip\n",
+ arg->packet_id);
+ return;
+ }
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_priv = wfx_skb_tx_priv(skb);
+ _trace_tx_stats(arg, skb,
+ wfx_pending_get_pkt_us_delay(wvif->wdev, skb));
+
+ // You can touch to tx_priv, but don't touch to tx_info->status.
+ wfx_tx_fill_rates(wvif->wdev, tx_info, arg);
+ if (tx_priv->has_sta)
+ wfx_tx_update_sta(wvif, wfx_skb_hdr80211(skb));
skb_trim(skb, skb->len - wfx_tx_get_icv_len(tx_priv->hw_key));
// From now, you can touch to tx_info->status, but do not touch to
@@ -590,63 +563,64 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
memset(tx_info->pad, 0, sizeof(tx_info->pad));
if (!arg->status) {
- if (wvif->bss_loss_state &&
- arg->packet_id == wvif->bss_loss_confirm_id)
- wfx_cqm_bssloss_sm(wvif, 0, 1, 0);
tx_info->status.tx_time =
- arg->media_delay - arg->tx_queue_delay;
+ le32_to_cpu(arg->media_delay) -
+ le32_to_cpu(arg->tx_queue_delay);
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
else
tx_info->flags |= IEEE80211_TX_STAT_ACK;
- } else if (arg->status == HIF_REQUEUE) {
- WARN(!arg->tx_result_flags.requeue, "incoherent status and result_flags");
+ } else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) {
+ WARN(!arg->tx_result_flags.requeue,
+ "incoherent status and result_flags");
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
wvif->after_dtim_tx_allowed = false; // DTIM period elapsed
schedule_work(&wvif->update_tim_work);
}
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
- } else {
- if (wvif->bss_loss_state &&
- arg->packet_id == wvif->bss_loss_confirm_id)
- wfx_cqm_bssloss_sm(wvif, 0, 0, 1);
}
- wfx_pending_remove(wvif->wdev, skb);
+ wfx_skb_dtor(wvif, skb);
}
-static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb)
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_sta *sta;
- struct wfx_sta_priv *sta_priv;
- int tid = ieee80211_get_tid(hdr);
+ struct wfx_dev *wdev = hw->priv;
+ struct sk_buff_head dropped;
+ struct wfx_queue *queue;
+ struct wfx_vif *wvif;
+ struct hif_msg *hif;
+ struct sk_buff *skb;
+ int vif_id = -1;
+ int i;
- rcu_read_lock(); // protect sta
- sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
- if (sta) {
- sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
- spin_lock_bh(&sta_priv->lock);
- WARN(!sta_priv->buffered[tid], "inconsistent notification");
- sta_priv->buffered[tid]--;
- if (!sta_priv->buffered[tid])
- ieee80211_sta_set_buffered(sta, tid, false);
- spin_unlock_bh(&sta_priv->lock);
+ if (vif)
+ vif_id = ((struct wfx_vif *)vif->drv_priv)->id;
+ skb_queue_head_init(&dropped);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (!(BIT(i) & queues))
+ continue;
+ queue = &wdev->tx_queue[i];
+ if (drop)
+ wfx_tx_queue_drop(wdev, queue, vif_id, &dropped);
+ if (wdev->chip_frozen)
+ continue;
+ if (wait_event_timeout(wdev->tx_dequeue,
+ wfx_tx_queue_empty(wdev, queue, vif_id),
+ msecs_to_jiffies(1000)) <= 0)
+ dev_warn(wdev->dev,
+ "frames queued while flushing tx queues?");
+ }
+ wfx_tx_flush(wdev);
+ if (wdev->chip_frozen)
+ wfx_pending_drop(wdev, &dropped);
+ while ((skb = skb_dequeue(&dropped)) != NULL) {
+ hif = (struct hif_msg *)skb->data;
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ if (wfx_skb_tx_priv(skb)->has_sta)
+ wfx_tx_update_sta(wvif, wfx_skb_hdr80211(skb));
+ ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb));
+ wfx_skb_dtor(wvif, skb);
}
- rcu_read_unlock();
}
-void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
-{
- struct hif_msg *hif = (struct hif_msg *)skb->data;
- struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
- struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- unsigned int offset = sizeof(struct hif_req_tx) +
- sizeof(struct hif_msg) +
- req->data_flags.fc_offset;
-
- WARN_ON(!wvif);
- skb_pull(skb, offset);
- wfx_notify_buffered_tx(wvif, skb);
- wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
- ieee80211_tx_status_irqsafe(wdev->hw, skb);
-}
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
index c545dd75449b..54fff24508fb 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/staging/wfx/data_tx.h
@@ -26,7 +26,7 @@ struct tx_policy {
};
struct tx_policy_cache {
- struct tx_policy cache[HIF_MIB_NUM_TX_RATE_RETRY_POLICIES];
+ struct tx_policy cache[HIF_TX_RETRY_POLICY_MAX];
// FIXME: use a trees and drop hash from tx_policy
struct list_head used;
struct list_head free;
@@ -36,8 +36,7 @@ struct tx_policy_cache {
struct wfx_tx_priv {
ktime_t xmit_timestamp;
struct ieee80211_key_conf *hw_key;
- u8 link_id;
- u8 raw_link_id;
+ bool has_sta;
} __packed;
void wfx_tx_policy_init(struct wfx_vif *wvif);
@@ -46,7 +45,8 @@ void wfx_tx_policy_upload_work(struct work_struct *work);
void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg);
-void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
{
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
index 1164aba118a1..10d649985696 100644
--- a/drivers/staging/wfx/debug.c
+++ b/drivers/staging/wfx/debug.c
@@ -61,19 +61,26 @@ const char *get_reg_name(unsigned long id)
static int wfx_counters_show(struct seq_file *seq, void *v)
{
- int ret;
+ int ret, i;
struct wfx_dev *wdev = seq->private;
- struct hif_mib_extended_count_table counters;
+ struct hif_mib_extended_count_table counters[3];
+
+ for (i = 0; i < ARRAY_SIZE(counters); i++) {
+ ret = hif_get_counters_table(wdev, i, counters + i);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -EIO;
+ }
- ret = hif_get_counters_table(wdev, &counters);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return -EIO;
+ seq_printf(seq, "%-24s %12s %12s %12s\n",
+ "", "global", "iface 0", "iface 1");
#define PUT_COUNTER(name) \
- seq_printf(seq, "%24s %d\n", #name ":",\
- le32_to_cpu(counters.count_##name))
+ seq_printf(seq, "%-24s %12d %12d %12d\n", #name, \
+ le32_to_cpu(counters[2].count_##name), \
+ le32_to_cpu(counters[0].count_##name), \
+ le32_to_cpu(counters[1].count_##name))
PUT_COUNTER(tx_packets);
PUT_COUNTER(tx_multicast_frames);
@@ -105,6 +112,12 @@ static int wfx_counters_show(struct seq_file *seq, void *v)
#undef PUT_COUNTER
+ for (i = 0; i < ARRAY_SIZE(counters[0].reserved); i++)
+ seq_printf(seq, "reserved[%02d]%12s %12d %12d %12d\n", i, "",
+ le32_to_cpu(counters[2].reserved[i]),
+ le32_to_cpu(counters[0].reserved[i]),
+ le32_to_cpu(counters[1].reserved[i]));
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wfx_counters);
@@ -142,7 +155,7 @@ static int wfx_rx_stats_show(struct seq_file *seq, void *v)
mutex_lock(&wdev->rx_stats_lock);
seq_printf(seq, "Timestamp: %dus\n", st->date);
seq_printf(seq, "Low power clock: frequency %uHz, external %s\n",
- st->pwr_clk_freq,
+ le32_to_cpu(st->pwr_clk_freq),
st->is_ext_pwr_clk ? "yes" : "no");
seq_printf(seq,
"Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
@@ -152,9 +165,12 @@ static int wfx_rx_stats_show(struct seq_file *seq, void *v)
for (i = 0; i < ARRAY_SIZE(channel_names); i++) {
if (channel_names[i])
seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n",
- channel_names[i], st->nb_rx_by_rate[i],
- st->per[i], st->rssi[i] / 100,
- st->snr[i] / 100, st->cfo[i]);
+ channel_names[i],
+ le32_to_cpu(st->nb_rx_by_rate[i]),
+ le16_to_cpu(st->per[i]),
+ (s16)le16_to_cpu(st->rssi[i]) / 100,
+ (s16)le16_to_cpu(st->snr[i]) / 100,
+ (s16)le16_to_cpu(st->cfo[i]));
}
mutex_unlock(&wdev->rx_stats_lock);
@@ -162,6 +178,30 @@ static int wfx_rx_stats_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats);
+static int wfx_tx_power_loop_show(struct seq_file *seq, void *v)
+{
+ struct wfx_dev *wdev = seq->private;
+ struct hif_tx_power_loop_info *st = &wdev->tx_power_loop_info;
+ int tmp;
+
+ mutex_lock(&wdev->tx_power_loop_info_lock);
+ tmp = le16_to_cpu(st->tx_gain_dig);
+ seq_printf(seq, "Tx gain digital: %d\n", tmp);
+ tmp = le16_to_cpu(st->tx_gain_pa);
+ seq_printf(seq, "Tx gain PA: %d\n", tmp);
+ tmp = (s16)le16_to_cpu(st->target_pout);
+ seq_printf(seq, "Target Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25);
+ tmp = (s16)le16_to_cpu(st->p_estimation);
+ seq_printf(seq, "FEM Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25);
+ tmp = le16_to_cpu(st->vpdet);
+ seq_printf(seq, "Vpdet: %d mV\n", tmp);
+ seq_printf(seq, "Measure index: %d\n", st->measurement_index);
+ mutex_unlock(&wdev->tx_power_loop_info_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_tx_power_loop);
+
static ssize_t wfx_send_pds_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -234,7 +274,7 @@ static ssize_t wfx_send_hif_msg_write(struct file *file,
request = memdup_user(user_buf, count);
if (IS_ERR(request))
return PTR_ERR(request);
- if (request->len != count) {
+ if (le16_to_cpu(request->len) != count) {
kfree(request);
return -EINVAL;
}
@@ -301,6 +341,8 @@ int wfx_debug_init(struct wfx_dev *wdev)
d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir);
debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops);
debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops);
+ debugfs_create_file("tx_power_loop", 0444, d, wdev,
+ &wfx_tx_power_loop_fops);
debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops);
debugfs_create_file("burn_slk_key", 0200, d, wdev,
&wfx_burn_slk_key_fops);
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
index 9d61082c1e6c..72bb3d2a9613 100644
--- a/drivers/staging/wfx/fwio.c
+++ b/drivers/staging/wfx/fwio.c
@@ -99,16 +99,16 @@ static int sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf,
return ret;
}
-int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
- const struct firmware **fw, int *file_offset)
+static int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
+ const struct firmware **fw, int *file_offset)
{
int keyset_file;
char filename[256];
const char *data;
int ret;
- snprintf(filename, sizeof(filename), "%s_%02X.sec", wdev->pdata.file_fw,
- keyset_chip);
+ snprintf(filename, sizeof(filename), "%s_%02X.sec",
+ wdev->pdata.file_fw, keyset_chip);
ret = firmware_request_nowarn(fw, filename, wdev->dev);
if (ret) {
dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n",
@@ -325,8 +325,8 @@ static int init_gpr(struct wfx_dev *wdev)
gpr_init[i].value);
if (ret < 0)
return ret;
- dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index,
- gpr_init[i].value);
+ dev_dbg(wdev->dev, " index %02x: %08x\n",
+ gpr_init[i].index, gpr_init[i].value);
}
return 0;
}
@@ -360,7 +360,7 @@ int wfx_init_device(struct wfx_dev *wdev)
dev_dbg(wdev->dev, "initial config register value: %08x\n", reg);
hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg);
- if (hw_revision == 0 || hw_revision > 2) {
+ if (hw_revision == 0) {
dev_err(wdev->dev, "bad hardware revision number: %d\n",
hw_revision);
return -ENODEV;
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
index 071b71e2a107..21cde19cff75 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -10,56 +10,54 @@
#include "hif_api_general.h"
-#define HIF_NUM_AC 4
-
#define HIF_API_SSID_SIZE API_SSID_SIZE
enum hif_requests_ids {
- HIF_REQ_ID_RESET = 0x0a,
- HIF_REQ_ID_READ_MIB = 0x05,
- HIF_REQ_ID_WRITE_MIB = 0x06,
- HIF_REQ_ID_START_SCAN = 0x07,
- HIF_REQ_ID_STOP_SCAN = 0x08,
- HIF_REQ_ID_TX = 0x04,
- HIF_REQ_ID_JOIN = 0x0b,
- HIF_REQ_ID_SET_PM_MODE = 0x10,
- HIF_REQ_ID_SET_BSS_PARAMS = 0x11,
- HIF_REQ_ID_ADD_KEY = 0x0c,
- HIF_REQ_ID_REMOVE_KEY = 0x0d,
- HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13,
- HIF_REQ_ID_START = 0x17,
- HIF_REQ_ID_BEACON_TRANSMIT = 0x18,
- HIF_REQ_ID_UPDATE_IE = 0x1b,
- HIF_REQ_ID_MAP_LINK = 0x1c,
+ HIF_REQ_ID_RESET = 0x0a,
+ HIF_REQ_ID_READ_MIB = 0x05,
+ HIF_REQ_ID_WRITE_MIB = 0x06,
+ HIF_REQ_ID_START_SCAN = 0x07,
+ HIF_REQ_ID_STOP_SCAN = 0x08,
+ HIF_REQ_ID_TX = 0x04,
+ HIF_REQ_ID_JOIN = 0x0b,
+ HIF_REQ_ID_SET_PM_MODE = 0x10,
+ HIF_REQ_ID_SET_BSS_PARAMS = 0x11,
+ HIF_REQ_ID_ADD_KEY = 0x0c,
+ HIF_REQ_ID_REMOVE_KEY = 0x0d,
+ HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_REQ_ID_START = 0x17,
+ HIF_REQ_ID_BEACON_TRANSMIT = 0x18,
+ HIF_REQ_ID_UPDATE_IE = 0x1b,
+ HIF_REQ_ID_MAP_LINK = 0x1c,
};
enum hif_confirmations_ids {
- HIF_CNF_ID_RESET = 0x0a,
- HIF_CNF_ID_READ_MIB = 0x05,
- HIF_CNF_ID_WRITE_MIB = 0x06,
- HIF_CNF_ID_START_SCAN = 0x07,
- HIF_CNF_ID_STOP_SCAN = 0x08,
- HIF_CNF_ID_TX = 0x04,
- HIF_CNF_ID_MULTI_TRANSMIT = 0x1e,
- HIF_CNF_ID_JOIN = 0x0b,
- HIF_CNF_ID_SET_PM_MODE = 0x10,
- HIF_CNF_ID_SET_BSS_PARAMS = 0x11,
- HIF_CNF_ID_ADD_KEY = 0x0c,
- HIF_CNF_ID_REMOVE_KEY = 0x0d,
- HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13,
- HIF_CNF_ID_START = 0x17,
- HIF_CNF_ID_BEACON_TRANSMIT = 0x18,
- HIF_CNF_ID_UPDATE_IE = 0x1b,
- HIF_CNF_ID_MAP_LINK = 0x1c,
+ HIF_CNF_ID_RESET = 0x0a,
+ HIF_CNF_ID_READ_MIB = 0x05,
+ HIF_CNF_ID_WRITE_MIB = 0x06,
+ HIF_CNF_ID_START_SCAN = 0x07,
+ HIF_CNF_ID_STOP_SCAN = 0x08,
+ HIF_CNF_ID_TX = 0x04,
+ HIF_CNF_ID_MULTI_TRANSMIT = 0x1e,
+ HIF_CNF_ID_JOIN = 0x0b,
+ HIF_CNF_ID_SET_PM_MODE = 0x10,
+ HIF_CNF_ID_SET_BSS_PARAMS = 0x11,
+ HIF_CNF_ID_ADD_KEY = 0x0c,
+ HIF_CNF_ID_REMOVE_KEY = 0x0d,
+ HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_CNF_ID_START = 0x17,
+ HIF_CNF_ID_BEACON_TRANSMIT = 0x18,
+ HIF_CNF_ID_UPDATE_IE = 0x1b,
+ HIF_CNF_ID_MAP_LINK = 0x1c,
};
enum hif_indications_ids {
- HIF_IND_ID_RX = 0x84,
- HIF_IND_ID_SCAN_CMPL = 0x86,
- HIF_IND_ID_JOIN_COMPLETE = 0x8f,
- HIF_IND_ID_SET_PM_MODE_CMPL = 0x89,
- HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c,
- HIF_IND_ID_EVENT = 0x85
+ HIF_IND_ID_RX = 0x84,
+ HIF_IND_ID_SCAN_CMPL = 0x86,
+ HIF_IND_ID_JOIN_COMPLETE = 0x8f,
+ HIF_IND_ID_SET_PM_MODE_CMPL = 0x89,
+ HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c,
+ HIF_IND_ID_EVENT = 0x85
};
union hif_commands_ids {
@@ -68,27 +66,11 @@ union hif_commands_ids {
enum hif_indications_ids indication;
};
-enum hif_status {
- HIF_STATUS_SUCCESS = 0x0,
- HIF_STATUS_FAILURE = 0x1,
- HIF_INVALID_PARAMETER = 0x2,
- HIF_STATUS_WARNING = 0x3,
- HIF_ERROR_UNSUPPORTED_MSG_ID = 0x4,
- HIF_STATUS_DECRYPTFAILURE = 0x10,
- HIF_STATUS_MICFAILURE = 0x11,
- HIF_STATUS_NO_KEY_FOUND = 0x12,
- HIF_STATUS_RETRY_EXCEEDED = 0x13,
- HIF_STATUS_TX_LIFETIME_EXCEEDED = 0x14,
- HIF_REQUEUE = 0x15,
- HIF_STATUS_REFUSED = 0x16,
- HIF_STATUS_BUSY = 0x17
-};
-
struct hif_reset_flags {
- u8 reset_stat:1;
- u8 reset_all_int:1;
- u8 reserved1:6;
- u8 reserved2[3];
+ u8 reset_stat:1;
+ u8 reset_all_int:1;
+ u8 reserved1:6;
+ u8 reserved2[3];
} __packed;
struct hif_req_reset {
@@ -96,117 +78,101 @@ struct hif_req_reset {
} __packed;
struct hif_req_read_mib {
- u16 mib_id;
- u16 reserved;
+ __le16 mib_id;
+ __le16 reserved;
} __packed;
struct hif_cnf_read_mib {
- u32 status;
- u16 mib_id;
- u16 length;
- u8 mib_data[];
+ __le32 status;
+ __le16 mib_id;
+ __le16 length;
+ u8 mib_data[];
} __packed;
struct hif_req_write_mib {
- u16 mib_id;
- u16 length;
- u8 mib_data[];
+ __le16 mib_id;
+ __le16 length;
+ u8 mib_data[];
} __packed;
struct hif_cnf_write_mib {
- u32 status;
+ __le32 status;
} __packed;
struct hif_ie_flags {
- u8 beacon:1;
- u8 probe_resp:1;
- u8 probe_req:1;
- u8 reserved1:5;
- u8 reserved2;
+ u8 beacon:1;
+ u8 probe_resp:1;
+ u8 probe_req:1;
+ u8 reserved1:5;
+ u8 reserved2;
} __packed;
struct hif_ie_tlv {
- u8 type;
- u8 length;
- u8 data[];
+ u8 type;
+ u8 length;
+ u8 data[];
} __packed;
struct hif_req_update_ie {
struct hif_ie_flags ie_flags;
- u16 num_ies;
+ __le16 num_ies;
struct hif_ie_tlv ie[];
} __packed;
struct hif_cnf_update_ie {
- u32 status;
+ __le32 status;
} __packed;
struct hif_scan_type {
- u8 type:1;
- u8 mode:1;
- u8 reserved:6;
+ u8 type:1;
+ u8 mode:1;
+ u8 reserved:6;
} __packed;
struct hif_scan_flags {
- u8 fbg:1;
- u8 reserved1:1;
- u8 pre:1;
- u8 reserved2:5;
+ u8 fbg:1;
+ u8 reserved1:1;
+ u8 pre:1;
+ u8 reserved2:5;
} __packed;
struct hif_auto_scan_param {
- u16 interval;
- u8 reserved;
+ __le16 interval;
+ u8 reserved;
s8 rssi_thr;
} __packed;
struct hif_ssid_def {
- u32 ssid_length;
- u8 ssid[HIF_API_SSID_SIZE];
+ __le32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
} __packed;
#define HIF_API_MAX_NB_SSIDS 2
#define HIF_API_MAX_NB_CHANNELS 14
-struct hif_req_start_scan {
- u8 band;
- struct hif_scan_type scan_type;
- struct hif_scan_flags scan_flags;
- u8 max_transmit_rate;
- struct hif_auto_scan_param auto_scan_param;
- u8 num_of_probe_requests;
- u8 probe_delay;
- u8 num_of_ssids;
- u8 num_of_channels;
- u32 min_channel_time;
- u32 max_channel_time;
- s32 tx_power_level;
- u8 ssid_and_channel_lists[];
-} __packed;
-
struct hif_req_start_scan_alt {
- u8 band;
+ u8 band;
struct hif_scan_type scan_type;
struct hif_scan_flags scan_flags;
- u8 max_transmit_rate;
+ u8 max_transmit_rate;
struct hif_auto_scan_param auto_scan_param;
- u8 num_of_probe_requests;
- u8 probe_delay;
- u8 num_of_ssids;
- u8 num_of_channels;
- u32 min_channel_time;
- u32 max_channel_time;
- s32 tx_power_level;
+ u8 num_of_probe_requests;
+ u8 probe_delay;
+ u8 num_of_ssids;
+ u8 num_of_channels;
+ __le32 min_channel_time;
+ __le32 max_channel_time;
+ __le32 tx_power_level; // signed value
struct hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS];
- u8 channel_list[];
+ u8 channel_list[];
} __packed;
struct hif_cnf_start_scan {
- u32 status;
+ __le32 status;
} __packed;
struct hif_cnf_stop_scan {
- u32 status;
+ __le32 status;
} __packed;
enum hif_pm_mode_status {
@@ -216,10 +182,10 @@ enum hif_pm_mode_status {
};
struct hif_ind_scan_cmpl {
- u32 status;
- u8 pm_mode;
- u8 num_channels_completed;
- u16 reserved;
+ __le32 status;
+ u8 pm_mode;
+ u8 num_channels_completed;
+ __le16 reserved;
} __packed;
enum hif_queue_id {
@@ -241,46 +207,48 @@ enum hif_stbc {
};
struct hif_queue {
- u8 queue_id:2;
- u8 peer_sta_id:4;
- u8 reserved:2;
+ u8 queue_id:2;
+ u8 peer_sta_id:4;
+ u8 reserved:2;
} __packed;
struct hif_data_flags {
- u8 more:1;
- u8 fc_offset:3;
- u8 after_dtim:1;
- u8 reserved:3;
+ u8 more:1;
+ u8 fc_offset:3;
+ u8 after_dtim:1;
+ u8 reserved:3;
} __packed;
struct hif_tx_flags {
- u8 start_exp:1;
- u8 reserved:3;
- u8 retry_policy_index:4;
+ u8 start_exp:1;
+ u8 reserved:3;
+ u8 retry_policy_index:4;
} __packed;
struct hif_ht_tx_parameters {
- u8 frame_format:4;
- u8 fec_coding:1;
- u8 short_gi:1;
- u8 reserved1:1;
- u8 stbc:1;
- u8 reserved2;
- u8 aggregation:1;
- u8 reserved3:7;
- u8 reserved4;
+ u8 frame_format:4;
+ u8 fec_coding:1;
+ u8 short_gi:1;
+ u8 reserved1:1;
+ u8 stbc:1;
+ u8 reserved2;
+ u8 aggregation:1;
+ u8 reserved3:7;
+ u8 reserved4;
} __packed;
struct hif_req_tx {
- u32 packet_id;
- u8 max_tx_rate;
+ // packet_id is not interpreted by the device, so it is not necessary to
+ // declare it little endian
+ u32 packet_id;
+ u8 max_tx_rate;
struct hif_queue queue_id;
struct hif_data_flags data_flags;
struct hif_tx_flags tx_flags;
- u32 reserved;
- u32 expire_time;
+ __le32 reserved;
+ __le32 expire_time;
struct hif_ht_tx_parameters ht_tx_parameters;
- u8 frame[];
+ u8 frame[];
} __packed;
enum hif_qos_ackplcy {
@@ -291,26 +259,29 @@ enum hif_qos_ackplcy {
};
struct hif_tx_result_flags {
- u8 aggr:1;
- u8 requeue:1;
- u8 ack_policy:2;
- u8 txop_limit:1;
- u8 reserved1:3;
- u8 reserved2;
+ u8 aggr:1;
+ u8 requeue:1;
+ u8 ack_policy:2;
+ u8 txop_limit:1;
+ u8 reserved1:3;
+ u8 reserved2;
} __packed;
struct hif_cnf_tx {
- u32 status;
- u32 packet_id;
- u8 txed_rate;
- u8 ack_failures;
+ __le32 status;
+ // packet_id is copied from struct hif_req_tx without been interpreted
+ // by the device, so it is not necessary to declare it little endian
+ u32 packet_id;
+ u8 txed_rate;
+ u8 ack_failures;
struct hif_tx_result_flags tx_result_flags;
- u32 media_delay;
- u32 tx_queue_delay;
+ __le32 media_delay;
+ __le32 tx_queue_delay;
} __packed;
struct hif_cnf_multi_transmit {
- u32 num_tx_confs;
+ u8 num_tx_confs;
+ u8 reserved[3];
struct hif_cnf_tx tx_conf_payload[];
} __packed;
@@ -323,147 +294,150 @@ enum hif_ri_flags_encrypt {
};
struct hif_rx_flags {
- u8 encryp:3;
- u8 in_aggr:1;
- u8 first_aggr:1;
- u8 last_aggr:1;
- u8 defrag:1;
- u8 beacon:1;
- u8 tim:1;
- u8 bitmap:1;
- u8 match_ssid:1;
- u8 match_bssid:1;
- u8 more:1;
- u8 reserved1:1;
- u8 ht:1;
- u8 stbc:1;
- u8 match_uc_addr:1;
- u8 match_mc_addr:1;
- u8 match_bc_addr:1;
- u8 key_type:1;
- u8 key_index:4;
- u8 reserved2:1;
- u8 peer_sta_id:4;
- u8 reserved3:2;
- u8 reserved4:1;
+ u8 encryp:3;
+ u8 in_aggr:1;
+ u8 first_aggr:1;
+ u8 last_aggr:1;
+ u8 defrag:1;
+ u8 beacon:1;
+ u8 tim:1;
+ u8 bitmap:1;
+ u8 match_ssid:1;
+ u8 match_bssid:1;
+ u8 more:1;
+ u8 reserved1:1;
+ u8 ht:1;
+ u8 stbc:1;
+ u8 match_uc_addr:1;
+ u8 match_mc_addr:1;
+ u8 match_bc_addr:1;
+ u8 key_type:1;
+ u8 key_index:4;
+ u8 reserved2:1;
+ u8 peer_sta_id:4;
+ u8 reserved3:2;
+ u8 reserved4:1;
} __packed;
struct hif_ind_rx {
- u32 status;
- u16 channel_number;
- u8 rxed_rate;
- u8 rcpi_rssi;
+ __le32 status;
+ u8 channel_number;
+ u8 reserved;
+ u8 rxed_rate;
+ u8 rcpi_rssi;
struct hif_rx_flags rx_flags;
- u8 frame[];
+ u8 frame[];
} __packed;
struct hif_req_edca_queue_params {
- u8 queue_id;
- u8 reserved1;
- u8 aifsn;
- u8 reserved2;
- u16 cw_min;
- u16 cw_max;
- u16 tx_op_limit;
- u16 allowed_medium_time;
- u32 reserved3;
+ u8 queue_id;
+ u8 reserved1;
+ u8 aifsn;
+ u8 reserved2;
+ __le16 cw_min;
+ __le16 cw_max;
+ __le16 tx_op_limit;
+ __le16 allowed_medium_time;
+ __le32 reserved3;
} __packed;
struct hif_cnf_edca_queue_params {
- u32 status;
+ __le32 status;
} __packed;
struct hif_join_flags {
- u8 reserved1:2;
- u8 force_no_beacon:1;
- u8 force_with_ind:1;
- u8 reserved2:4;
+ u8 reserved1:2;
+ u8 force_no_beacon:1;
+ u8 force_with_ind:1;
+ u8 reserved2:4;
} __packed;
struct hif_req_join {
- u8 infrastructure_bss_mode:1;
- u8 reserved1:7;
- u8 band;
- u16 channel_number;
- u8 bssid[ETH_ALEN];
- u16 atim_window;
- u8 short_preamble:1;
- u8 reserved2:7;
- u8 probe_for_join;
- u8 reserved3;
+ u8 infrastructure_bss_mode:1;
+ u8 reserved1:7;
+ u8 band;
+ u8 channel_number;
+ u8 reserved;
+ u8 bssid[ETH_ALEN];
+ __le16 atim_window;
+ u8 short_preamble:1;
+ u8 reserved2:7;
+ u8 probe_for_join;
+ u8 reserved3;
struct hif_join_flags join_flags;
- u32 ssid_length;
- u8 ssid[HIF_API_SSID_SIZE];
- u32 beacon_interval;
- u32 basic_rate_set;
+ __le32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ __le32 beacon_interval;
+ __le32 basic_rate_set;
} __packed;
struct hif_cnf_join {
- u32 status;
+ __le32 status;
} __packed;
struct hif_ind_join_complete {
- u32 status;
+ __le32 status;
} __packed;
struct hif_bss_flags {
- u8 lost_count_only:1;
- u8 reserved:7;
+ u8 lost_count_only:1;
+ u8 reserved:7;
} __packed;
struct hif_req_set_bss_params {
struct hif_bss_flags bss_flags;
- u8 beacon_lost_count;
- u16 aid;
- u32 operational_rate_set;
+ u8 beacon_lost_count;
+ __le16 aid;
+ __le32 operational_rate_set;
} __packed;
struct hif_cnf_set_bss_params {
- u32 status;
+ __le32 status;
} __packed;
struct hif_pm_mode {
- u8 enter_psm:1;
- u8 reserved:6;
- u8 fast_psm:1;
+ u8 enter_psm:1;
+ u8 reserved:6;
+ u8 fast_psm:1;
} __packed;
struct hif_req_set_pm_mode {
struct hif_pm_mode pm_mode;
- u8 fast_psm_idle_period;
- u8 ap_psm_change_period;
- u8 min_auto_ps_poll_period;
+ u8 fast_psm_idle_period;
+ u8 ap_psm_change_period;
+ u8 min_auto_ps_poll_period;
} __packed;
struct hif_cnf_set_pm_mode {
- u32 status;
+ __le32 status;
} __packed;
struct hif_ind_set_pm_mode_cmpl {
- u32 status;
- u8 pm_mode;
- u8 reserved[3];
+ __le32 status;
+ u8 pm_mode;
+ u8 reserved[3];
} __packed;
struct hif_req_start {
- u8 mode;
- u8 band;
- u16 channel_number;
- u32 reserved1;
- u32 beacon_interval;
- u8 dtim_period;
- u8 short_preamble:1;
- u8 reserved2:7;
- u8 reserved3;
- u8 ssid_length;
- u8 ssid[HIF_API_SSID_SIZE];
- u32 basic_rate_set;
+ u8 mode;
+ u8 band;
+ u8 channel_number;
+ u8 reserved1;
+ __le32 reserved2;
+ __le32 beacon_interval;
+ u8 dtim_period;
+ u8 short_preamble:1;
+ u8 reserved3:7;
+ u8 reserved4;
+ u8 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ __le32 basic_rate_set;
} __packed;
struct hif_cnf_start {
- u32 status;
+ __le32 status;
} __packed;
enum hif_beacon {
@@ -472,46 +446,49 @@ enum hif_beacon {
};
struct hif_req_beacon_transmit {
- u8 enable_beaconing;
- u8 reserved[3];
+ u8 enable_beaconing;
+ u8 reserved[3];
} __packed;
struct hif_cnf_beacon_transmit {
- u32 status;
+ __le32 status;
} __packed;
+#define HIF_LINK_ID_MAX 14
+#define HIF_LINK_ID_NOT_ASSOCIATED (HIF_LINK_ID_MAX + 1)
+
enum hif_sta_map_direction {
HIF_STA_MAP = 0x0,
HIF_STA_UNMAP = 0x1
};
struct hif_map_link_flags {
- u8 map_direction:1;
- u8 mfpc:1;
- u8 reserved:6;
+ u8 map_direction:1;
+ u8 mfpc:1;
+ u8 reserved:6;
} __packed;
struct hif_req_map_link {
- u8 mac_addr[ETH_ALEN];
+ u8 mac_addr[ETH_ALEN];
struct hif_map_link_flags map_link_flags;
- u8 peer_sta_id;
+ u8 peer_sta_id;
} __packed;
struct hif_cnf_map_link {
- u32 status;
+ __le32 status;
} __packed;
struct hif_suspend_resume_flags {
- u8 resume:1;
- u8 reserved1:2;
- u8 bc_mc_only:1;
- u8 reserved2:4;
- u8 reserved3;
+ u8 resume:1;
+ u8 reserved1:2;
+ u8 bc_mc_only:1;
+ u8 reserved2:4;
+ u8 reserved3;
} __packed;
struct hif_ind_suspend_resume_tx {
struct hif_suspend_resume_flags suspend_resume_flags;
- u16 peer_sta_set;
+ __le16 peer_sta_set;
} __packed;
@@ -541,102 +518,102 @@ enum hif_key_type {
};
struct hif_wep_pairwise_key {
- u8 peer_address[ETH_ALEN];
- u8 reserved;
- u8 key_length;
- u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+ u8 peer_address[ETH_ALEN];
+ u8 reserved;
+ u8 key_length;
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
} __packed;
struct hif_wep_group_key {
- u8 key_id;
- u8 key_length;
- u8 reserved[2];
- u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 key_length;
+ u8 reserved[2];
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
} __packed;
struct hif_tkip_pairwise_key {
- u8 peer_address[ETH_ALEN];
- u8 reserved[2];
- u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
- u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
- u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE];
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE];
} __packed;
struct hif_tkip_group_key {
- u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
- u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
- u8 key_id;
- u8 reserved[3];
- u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
} __packed;
struct hif_aes_pairwise_key {
- u8 peer_address[ETH_ALEN];
- u8 reserved[2];
- u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
} __packed;
struct hif_aes_group_key {
- u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
- u8 key_id;
- u8 reserved[3];
- u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
} __packed;
struct hif_wapi_pairwise_key {
- u8 peer_address[ETH_ALEN];
- u8 key_id;
- u8 reserved;
- u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
- u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+ u8 peer_address[ETH_ALEN];
+ u8 key_id;
+ u8 reserved;
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
} __packed;
struct hif_wapi_group_key {
- u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
- u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
- u8 key_id;
- u8 reserved[3];
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
} __packed;
struct hif_igtk_group_key {
- u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE];
- u8 key_id;
- u8 reserved[3];
- u8 ipn[HIF_API_IPN_SIZE];
+ u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 ipn[HIF_API_IPN_SIZE];
} __packed;
union hif_privacy_key_data {
- struct hif_wep_pairwise_key wep_pairwise_key;
- struct hif_wep_group_key wep_group_key;
- struct hif_tkip_pairwise_key tkip_pairwise_key;
- struct hif_tkip_group_key tkip_group_key;
- struct hif_aes_pairwise_key aes_pairwise_key;
- struct hif_aes_group_key aes_group_key;
- struct hif_wapi_pairwise_key wapi_pairwise_key;
- struct hif_wapi_group_key wapi_group_key;
- struct hif_igtk_group_key igtk_group_key;
+ struct hif_wep_pairwise_key wep_pairwise_key;
+ struct hif_wep_group_key wep_group_key;
+ struct hif_tkip_pairwise_key tkip_pairwise_key;
+ struct hif_tkip_group_key tkip_group_key;
+ struct hif_aes_pairwise_key aes_pairwise_key;
+ struct hif_aes_group_key aes_group_key;
+ struct hif_wapi_pairwise_key wapi_pairwise_key;
+ struct hif_wapi_group_key wapi_group_key;
+ struct hif_igtk_group_key igtk_group_key;
};
struct hif_req_add_key {
- u8 type;
- u8 entry_index;
- u8 int_id:2;
- u8 reserved1:6;
- u8 reserved2;
+ u8 type;
+ u8 entry_index;
+ u8 int_id:2;
+ u8 reserved1:6;
+ u8 reserved2;
union hif_privacy_key_data key;
} __packed;
struct hif_cnf_add_key {
- u32 status;
+ __le32 status;
} __packed;
struct hif_req_remove_key {
- u8 entry_index;
- u8 reserved[3];
+ u8 entry_index;
+ u8 reserved[3];
} __packed;
struct hif_cnf_remove_key {
- u32 status;
+ __le32 status;
} __packed;
enum hif_event_ind {
@@ -656,13 +633,13 @@ enum hif_ps_mode_error {
};
union hif_event_data {
- u8 rcpi_rssi;
- u32 ps_mode_error;
- u32 peer_sta_set;
+ u8 rcpi_rssi;
+ __le32 ps_mode_error;
+ __le32 peer_sta_set;
};
struct hif_ind_event {
- u32 event_id;
+ __le32 event_id;
union hif_event_data event_data;
} __packed;
diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h
index a069c3a21b4d..dba18a7ae919 100644
--- a/drivers/staging/wfx/hif_api_general.h
+++ b/drivers/staging/wfx/hif_api_general.h
@@ -17,13 +17,13 @@
#define __packed __attribute__((__packed__))
#endif
-#define API_SSID_SIZE 32
+#define API_SSID_SIZE 32
-#define HIF_ID_IS_INDICATION 0x80
-#define HIF_COUNTER_MAX 7
+#define HIF_ID_IS_INDICATION 0x80
+#define HIF_COUNTER_MAX 7
struct hif_msg {
- u16 len;
+ __le16 len;
u8 id;
u8 reserved:1;
u8 interface:2;
@@ -33,239 +33,252 @@ struct hif_msg {
} __packed;
enum hif_general_requests_ids {
- HIF_REQ_ID_CONFIGURATION = 0x09,
- HIF_REQ_ID_CONTROL_GPIO = 0x26,
- HIF_REQ_ID_SET_SL_MAC_KEY = 0x27,
- HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
- HIF_REQ_ID_SL_CONFIGURE = 0x29,
- HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a,
- HIF_REQ_ID_PTA_SETTINGS = 0x2b,
- HIF_REQ_ID_PTA_PRIORITY = 0x2c,
- HIF_REQ_ID_PTA_STATE = 0x2d,
- HIF_REQ_ID_SHUT_DOWN = 0x32,
+ HIF_REQ_ID_CONFIGURATION = 0x09,
+ HIF_REQ_ID_CONTROL_GPIO = 0x26,
+ HIF_REQ_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_REQ_ID_SL_CONFIGURE = 0x29,
+ HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_REQ_ID_PTA_SETTINGS = 0x2b,
+ HIF_REQ_ID_PTA_PRIORITY = 0x2c,
+ HIF_REQ_ID_PTA_STATE = 0x2d,
+ HIF_REQ_ID_SHUT_DOWN = 0x32,
};
enum hif_general_confirmations_ids {
- HIF_CNF_ID_CONFIGURATION = 0x09,
- HIF_CNF_ID_CONTROL_GPIO = 0x26,
- HIF_CNF_ID_SET_SL_MAC_KEY = 0x27,
- HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
- HIF_CNF_ID_SL_CONFIGURE = 0x29,
- HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a,
- HIF_CNF_ID_PTA_SETTINGS = 0x2b,
- HIF_CNF_ID_PTA_PRIORITY = 0x2c,
- HIF_CNF_ID_PTA_STATE = 0x2d,
- HIF_CNF_ID_SHUT_DOWN = 0x32,
+ HIF_CNF_ID_CONFIGURATION = 0x09,
+ HIF_CNF_ID_CONTROL_GPIO = 0x26,
+ HIF_CNF_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_CNF_ID_SL_CONFIGURE = 0x29,
+ HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_CNF_ID_PTA_SETTINGS = 0x2b,
+ HIF_CNF_ID_PTA_PRIORITY = 0x2c,
+ HIF_CNF_ID_PTA_STATE = 0x2d,
+ HIF_CNF_ID_SHUT_DOWN = 0x32,
};
enum hif_general_indications_ids {
- HIF_IND_ID_EXCEPTION = 0xe0,
- HIF_IND_ID_STARTUP = 0xe1,
- HIF_IND_ID_WAKEUP = 0xe2,
- HIF_IND_ID_GENERIC = 0xe3,
- HIF_IND_ID_ERROR = 0xe4,
- HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5
+ HIF_IND_ID_EXCEPTION = 0xe0,
+ HIF_IND_ID_STARTUP = 0xe1,
+ HIF_IND_ID_WAKEUP = 0xe2,
+ HIF_IND_ID_GENERIC = 0xe3,
+ HIF_IND_ID_ERROR = 0xe4,
+ HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5
};
-enum hif_hi_status {
- HI_STATUS_SUCCESS = 0x0000,
- HI_STATUS_FAILURE = 0x0001,
- HI_INVALID_PARAMETER = 0x0002,
- HI_STATUS_GPIO_WARNING = 0x0003,
- HI_ERROR_UNSUPPORTED_MSG_ID = 0x0004,
- SL_MAC_KEY_STATUS_SUCCESS = 0x005A,
- SL_MAC_KEY_STATUS_FAILED_KEY_ALREADY_BURNED = 0x006B,
- SL_MAC_KEY_STATUS_FAILED_RAM_MODE_NOT_ALLOWED = 0x007C,
- SL_MAC_KEY_STATUS_FAILED_UNKNOWN_MODE = 0x008D,
- SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS = 0x009E,
- SL_PUB_KEY_EXCHANGE_STATUS_FAILED = 0x00AF,
- PREVENT_ROLLBACK_CNF_SUCCESS = 0x1234,
- PREVENT_ROLLBACK_CNF_WRONG_MAGIC_WORD = 0x1256
-};
+#define HIF_STATUS_SUCCESS (cpu_to_le32(0x0000))
+#define HIF_STATUS_FAIL (cpu_to_le32(0x0001))
+#define HIF_STATUS_INVALID_PARAMETER (cpu_to_le32(0x0002))
+#define HIF_STATUS_WARNING (cpu_to_le32(0x0003))
+#define HIF_STATUS_UNKNOWN_REQUEST (cpu_to_le32(0x0004))
+#define HIF_STATUS_RX_FAIL_DECRYPT (cpu_to_le32(0x0010))
+#define HIF_STATUS_RX_FAIL_MIC (cpu_to_le32(0x0011))
+#define HIF_STATUS_RX_FAIL_NO_KEY (cpu_to_le32(0x0012))
+#define HIF_STATUS_TX_FAIL_RETRIES (cpu_to_le32(0x0013))
+#define HIF_STATUS_TX_FAIL_TIMEOUT (cpu_to_le32(0x0014))
+#define HIF_STATUS_TX_FAIL_REQUEUE (cpu_to_le32(0x0015))
+#define HIF_STATUS_REFUSED (cpu_to_le32(0x0016))
+#define HIF_STATUS_BUSY (cpu_to_le32(0x0017))
+#define HIF_STATUS_SLK_SET_KEY_SUCCESS (cpu_to_le32(0x005A))
+#define HIF_STATUS_SLK_SET_KEY_ALREADY_BURNED (cpu_to_le32(0x006B))
+#define HIF_STATUS_SLK_SET_KEY_DISALLOWED_MODE (cpu_to_le32(0x007C))
+#define HIF_STATUS_SLK_SET_KEY_UNKNOWN_MODE (cpu_to_le32(0x008D))
+#define HIF_STATUS_SLK_NEGO_SUCCESS (cpu_to_le32(0x009E))
+#define HIF_STATUS_SLK_NEGO_FAILED (cpu_to_le32(0x00AF))
+#define HIF_STATUS_ROLLBACK_SUCCESS (cpu_to_le32(0x1234))
+#define HIF_STATUS_ROLLBACK_FAIL (cpu_to_le32(0x1256))
enum hif_api_rate_index {
- API_RATE_INDEX_B_1MBPS = 0,
- API_RATE_INDEX_B_2MBPS = 1,
- API_RATE_INDEX_B_5P5MBPS = 2,
- API_RATE_INDEX_B_11MBPS = 3,
- API_RATE_INDEX_PBCC_22MBPS = 4,
- API_RATE_INDEX_PBCC_33MBPS = 5,
- API_RATE_INDEX_G_6MBPS = 6,
- API_RATE_INDEX_G_9MBPS = 7,
- API_RATE_INDEX_G_12MBPS = 8,
- API_RATE_INDEX_G_18MBPS = 9,
- API_RATE_INDEX_G_24MBPS = 10,
- API_RATE_INDEX_G_36MBPS = 11,
- API_RATE_INDEX_G_48MBPS = 12,
- API_RATE_INDEX_G_54MBPS = 13,
- API_RATE_INDEX_N_6P5MBPS = 14,
- API_RATE_INDEX_N_13MBPS = 15,
- API_RATE_INDEX_N_19P5MBPS = 16,
- API_RATE_INDEX_N_26MBPS = 17,
- API_RATE_INDEX_N_39MBPS = 18,
- API_RATE_INDEX_N_52MBPS = 19,
- API_RATE_INDEX_N_58P5MBPS = 20,
- API_RATE_INDEX_N_65MBPS = 21,
- API_RATE_NUM_ENTRIES = 22
+ API_RATE_INDEX_B_1MBPS = 0,
+ API_RATE_INDEX_B_2MBPS = 1,
+ API_RATE_INDEX_B_5P5MBPS = 2,
+ API_RATE_INDEX_B_11MBPS = 3,
+ API_RATE_INDEX_PBCC_22MBPS = 4,
+ API_RATE_INDEX_PBCC_33MBPS = 5,
+ API_RATE_INDEX_G_6MBPS = 6,
+ API_RATE_INDEX_G_9MBPS = 7,
+ API_RATE_INDEX_G_12MBPS = 8,
+ API_RATE_INDEX_G_18MBPS = 9,
+ API_RATE_INDEX_G_24MBPS = 10,
+ API_RATE_INDEX_G_36MBPS = 11,
+ API_RATE_INDEX_G_48MBPS = 12,
+ API_RATE_INDEX_G_54MBPS = 13,
+ API_RATE_INDEX_N_6P5MBPS = 14,
+ API_RATE_INDEX_N_13MBPS = 15,
+ API_RATE_INDEX_N_19P5MBPS = 16,
+ API_RATE_INDEX_N_26MBPS = 17,
+ API_RATE_INDEX_N_39MBPS = 18,
+ API_RATE_INDEX_N_52MBPS = 19,
+ API_RATE_INDEX_N_58P5MBPS = 20,
+ API_RATE_INDEX_N_65MBPS = 21,
+ API_RATE_NUM_ENTRIES = 22
};
enum hif_fw_type {
- HIF_FW_TYPE_ETF = 0x0,
- HIF_FW_TYPE_WFM = 0x1,
- HIF_FW_TYPE_WSM = 0x2
+ HIF_FW_TYPE_ETF = 0x0,
+ HIF_FW_TYPE_WFM = 0x1,
+ HIF_FW_TYPE_WSM = 0x2
};
struct hif_capabilities {
- u8 link_mode:2;
- u8 reserved1:6;
- u8 reserved2;
- u8 reserved3;
- u8 reserved4;
+ u8 link_mode:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
} __packed;
struct hif_otp_regul_sel_mode_info {
- u8 region_sel_mode:4;
- u8 reserved:4;
+ u8 region_sel_mode:4;
+ u8 reserved:4;
} __packed;
struct hif_otp_phy_info {
- u8 phy1_region:3;
- u8 phy0_region:3;
- u8 otp_phy_ver:2;
+ u8 phy1_region:3;
+ u8 phy0_region:3;
+ u8 otp_phy_ver:2;
} __packed;
-#define API_OPN_SIZE 14
-#define API_UID_SIZE 8
-#define API_DISABLED_CHANNEL_LIST_SIZE 2
-#define API_FIRMWARE_LABEL_SIZE 128
-
struct hif_ind_startup {
- u32 status;
- u16 hardware_id;
- u8 opn[API_OPN_SIZE];
- u8 uid[API_UID_SIZE];
- u16 num_inp_ch_bufs;
- u16 size_inp_ch_buf;
- u8 num_links_ap;
- u8 num_interfaces;
- u8 mac_addr[2][ETH_ALEN];
- u8 api_version_minor;
- u8 api_version_major;
+ // As the others, this struct is interpreted as little endian by the
+ // device. However, this struct is also used by the driver. We prefer to
+ // declare it in native order and doing byte swap on reception.
+ __le32 status;
+ u16 hardware_id;
+ u8 opn[14];
+ u8 uid[8];
+ u16 num_inp_ch_bufs;
+ u16 size_inp_ch_buf;
+ u8 num_links_ap;
+ u8 num_interfaces;
+ u8 mac_addr[2][ETH_ALEN];
+ u8 api_version_minor;
+ u8 api_version_major;
struct hif_capabilities capabilities;
- u8 firmware_build;
- u8 firmware_minor;
- u8 firmware_major;
- u8 firmware_type;
- u8 disabled_channel_list[API_DISABLED_CHANNEL_LIST_SIZE];
+ u8 firmware_build;
+ u8 firmware_minor;
+ u8 firmware_major;
+ u8 firmware_type;
+ u8 disabled_channel_list[2];
struct hif_otp_regul_sel_mode_info regul_sel_mode_info;
struct hif_otp_phy_info otp_phy_info;
- u32 supported_rate_mask;
- u8 firmware_label[API_FIRMWARE_LABEL_SIZE];
+ u32 supported_rate_mask;
+ u8 firmware_label[128];
} __packed;
struct hif_ind_wakeup {
} __packed;
struct hif_req_configuration {
- u16 length;
- u8 pds_data[];
+ __le16 length;
+ u8 pds_data[];
} __packed;
struct hif_cnf_configuration {
- u32 status;
+ __le32 status;
} __packed;
enum hif_gpio_mode {
- HIF_GPIO_MODE_D0 = 0x0,
- HIF_GPIO_MODE_D1 = 0x1,
- HIF_GPIO_MODE_OD0 = 0x2,
- HIF_GPIO_MODE_OD1 = 0x3,
- HIF_GPIO_MODE_TRISTATE = 0x4,
- HIF_GPIO_MODE_TOGGLE = 0x5,
- HIF_GPIO_MODE_READ = 0x6
+ HIF_GPIO_MODE_D0 = 0x0,
+ HIF_GPIO_MODE_D1 = 0x1,
+ HIF_GPIO_MODE_OD0 = 0x2,
+ HIF_GPIO_MODE_OD1 = 0x3,
+ HIF_GPIO_MODE_TRISTATE = 0x4,
+ HIF_GPIO_MODE_TOGGLE = 0x5,
+ HIF_GPIO_MODE_READ = 0x6
};
struct hif_req_control_gpio {
- u8 gpio_label;
- u8 gpio_mode;
+ u8 gpio_label;
+ u8 gpio_mode;
} __packed;
-enum hif_gpio_error {
- HIF_GPIO_ERROR_0 = 0x0,
- HIF_GPIO_ERROR_1 = 0x1,
- HIF_GPIO_ERROR_2 = 0x2
-};
-
struct hif_cnf_control_gpio {
- u32 status;
- u32 value;
+ __le32 status;
+ __le32 value;
} __packed;
enum hif_generic_indication_type {
- HIF_GENERIC_INDICATION_TYPE_RAW = 0x0,
- HIF_GENERIC_INDICATION_TYPE_STRING = 0x1,
- HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2
+ HIF_GENERIC_INDICATION_TYPE_RAW = 0x0,
+ HIF_GENERIC_INDICATION_TYPE_STRING = 0x1,
+ HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2,
+ HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO = 0x3,
};
struct hif_rx_stats {
- u32 nb_rx_frame;
- u32 nb_crc_frame;
- u32 per_total;
- u32 throughput;
- u32 nb_rx_by_rate[API_RATE_NUM_ENTRIES];
- u16 per[API_RATE_NUM_ENTRIES];
- s16 snr[API_RATE_NUM_ENTRIES];
- s16 rssi[API_RATE_NUM_ENTRIES];
- s16 cfo[API_RATE_NUM_ENTRIES];
- u32 date;
- u32 pwr_clk_freq;
- u8 is_ext_pwr_clk;
+ __le32 nb_rx_frame;
+ __le32 nb_crc_frame;
+ __le32 per_total;
+ __le32 throughput;
+ __le32 nb_rx_by_rate[API_RATE_NUM_ENTRIES];
+ __le16 per[API_RATE_NUM_ENTRIES];
+ __le16 snr[API_RATE_NUM_ENTRIES]; // signed value
+ __le16 rssi[API_RATE_NUM_ENTRIES]; // signed value
+ __le16 cfo[API_RATE_NUM_ENTRIES]; // signed value
+ __le32 date;
+ __le32 pwr_clk_freq;
+ u8 is_ext_pwr_clk;
s8 current_temp;
} __packed;
+struct hif_tx_power_loop_info {
+ __le16 tx_gain_dig;
+ __le16 tx_gain_pa;
+ __le16 target_pout; // signed value
+ __le16 p_estimation; // signed value
+ __le16 vpdet;
+ u8 measurement_index;
+ u8 reserved;
+} __packed;
+
union hif_indication_data {
- struct hif_rx_stats rx_stats;
- u8 raw_data[1];
+ struct hif_rx_stats rx_stats;
+ struct hif_tx_power_loop_info tx_power_loop_info;
+ u8 raw_data[1];
};
struct hif_ind_generic {
- u32 indication_type;
+ __le32 indication_type;
union hif_indication_data indication_data;
} __packed;
-
-#define HIF_EXCEPTION_DATA_SIZE 124
-
-struct hif_ind_exception {
- u8 data[HIF_EXCEPTION_DATA_SIZE];
-} __packed;
-
-
enum hif_error {
- HIF_ERROR_FIRMWARE_ROLLBACK = 0x0,
- HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x1,
- HIF_ERROR_OUTDATED_SESSION_KEY = 0x2,
- HIF_ERROR_INVALID_SESSION_KEY = 0x3,
- HIF_ERROR_OOR_VOLTAGE = 0x4,
- HIF_ERROR_PDS_VERSION = 0x5,
- HIF_ERROR_OOR_TEMPERATURE = 0x6,
- HIF_ERROR_REQ_DURING_KEY_EXCHANGE = 0x7,
- HIF_ERROR_MULTI_TX_CNF_SECURELINK = 0x8,
- HIF_ERROR_SECURELINK_OVERFLOW = 0x9,
- HIF_ERROR_SECURELINK_DECRYPTION = 0xa
+ HIF_ERROR_FIRMWARE_ROLLBACK = 0x00,
+ HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x01,
+ HIF_ERROR_SLK_OUTDATED_SESSION_KEY = 0x02,
+ HIF_ERROR_SLK_SESSION_KEY = 0x03,
+ HIF_ERROR_OOR_VOLTAGE = 0x04,
+ HIF_ERROR_PDS_PAYLOAD = 0x05,
+ HIF_ERROR_OOR_TEMPERATURE = 0x06,
+ HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE = 0x07,
+ HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED = 0x08,
+ HIF_ERROR_SLK_OVERFLOW = 0x09,
+ HIF_ERROR_SLK_DECRYPTION = 0x0a,
+ HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE = 0x0b,
+ HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW = 0x0c,
+ HIF_ERROR_HIF_RX_DATA_TOO_LARGE = 0x0e,
+ HIF_ERROR_HIF_TX_QUEUE_FULL = 0x0d,
+ HIF_ERROR_HIF_BUS = 0x0f,
+ HIF_ERROR_PDS_TESTFEATURE = 0x10,
};
struct hif_ind_error {
- u32 type;
- u8 data[];
+ __le32 type;
+ u8 data[];
+} __packed;
+
+struct hif_ind_exception {
+ __le32 type;
+ u8 data[];
} __packed;
enum hif_secure_link_state {
- SEC_LINK_UNAVAILABLE = 0x0,
- SEC_LINK_RESERVED = 0x1,
- SEC_LINK_EVAL = 0x2,
- SEC_LINK_ENFORCED = 0x3
+ SEC_LINK_UNAVAILABLE = 0x0,
+ SEC_LINK_RESERVED = 0x1,
+ SEC_LINK_EVAL = 0x2,
+ SEC_LINK_ENFORCED = 0x3
};
enum hif_sl_encryption_type {
@@ -282,156 +295,70 @@ struct hif_sl_msg_hdr {
struct hif_sl_msg {
struct hif_sl_msg_hdr hdr;
- u16 len;
- u8 payload[];
+ __le16 len;
+ u8 payload[];
} __packed;
-#define AES_CCM_TAG_SIZE 16
+#define AES_CCM_TAG_SIZE 16
struct hif_sl_tag {
- u8 tag[16];
+ u8 tag[16];
} __packed;
enum hif_sl_mac_key_dest {
- SL_MAC_KEY_DEST_OTP = 0x78,
- SL_MAC_KEY_DEST_RAM = 0x87
+ SL_MAC_KEY_DEST_OTP = 0x78,
+ SL_MAC_KEY_DEST_RAM = 0x87
};
-#define API_KEY_VALUE_SIZE 32
+#define API_KEY_VALUE_SIZE 32
struct hif_req_set_sl_mac_key {
- u8 otp_or_ram;
- u8 key_value[API_KEY_VALUE_SIZE];
+ u8 otp_or_ram;
+ u8 key_value[API_KEY_VALUE_SIZE];
} __packed;
struct hif_cnf_set_sl_mac_key {
- u32 status;
+ __le32 status;
} __packed;
-#define API_HOST_PUB_KEY_SIZE 32
-#define API_HOST_PUB_KEY_MAC_SIZE 64
-
enum hif_sl_session_key_alg {
- HIF_SL_CURVE25519 = 0x01,
- HIF_SL_KDF = 0x02
+ HIF_SL_CURVE25519 = 0x01,
+ HIF_SL_KDF = 0x02
};
+#define API_HOST_PUB_KEY_SIZE 32
+#define API_HOST_PUB_KEY_MAC_SIZE 64
+
struct hif_req_sl_exchange_pub_keys {
- u8 algorithm:2;
- u8 reserved1:6;
- u8 reserved2[3];
- u8 host_pub_key[API_HOST_PUB_KEY_SIZE];
- u8 host_pub_key_mac[API_HOST_PUB_KEY_MAC_SIZE];
+ u8 algorithm:2;
+ u8 reserved1:6;
+ u8 reserved2[3];
+ u8 host_pub_key[API_HOST_PUB_KEY_SIZE];
+ u8 host_pub_key_mac[API_HOST_PUB_KEY_MAC_SIZE];
} __packed;
struct hif_cnf_sl_exchange_pub_keys {
- u32 status;
+ __le32 status;
} __packed;
-#define API_NCP_PUB_KEY_SIZE 32
-#define API_NCP_PUB_KEY_MAC_SIZE 64
+#define API_NCP_PUB_KEY_SIZE 32
+#define API_NCP_PUB_KEY_MAC_SIZE 64
struct hif_ind_sl_exchange_pub_keys {
- u32 status;
- u8 ncp_pub_key[API_NCP_PUB_KEY_SIZE];
- u8 ncp_pub_key_mac[API_NCP_PUB_KEY_MAC_SIZE];
+ __le32 status;
+ u8 ncp_pub_key[API_NCP_PUB_KEY_SIZE];
+ u8 ncp_pub_key_mac[API_NCP_PUB_KEY_MAC_SIZE];
} __packed;
-#define API_ENCR_BMP_SIZE 32
-
struct hif_req_sl_configure {
- u8 encr_bmp[API_ENCR_BMP_SIZE];
- u8 disable_session_key_protection:1;
- u8 reserved1:7;
- u8 reserved2[3];
+ u8 encr_bmp[32];
+ u8 disable_session_key_protection:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
} __packed;
struct hif_cnf_sl_configure {
- u32 status;
-} __packed;
-
-struct hif_req_prevent_rollback {
- u32 magic_word;
-} __packed;
-
-struct hif_cnf_prevent_rollback {
- u32 status;
-} __packed;
-
-enum hif_pta_mode {
- PTA_1W_WLAN_MASTER = 0,
- PTA_1W_COEX_MASTER = 1,
- PTA_2W = 2,
- PTA_3W = 3,
- PTA_4W = 4
-};
-
-enum hif_signal_level {
- SIGNAL_LOW = 0,
- SIGNAL_HIGH = 1
-};
-
-enum hif_coex_type {
- COEX_TYPE_GENERIC = 0,
- COEX_TYPE_BLE = 1
-};
-
-enum hif_grant_state {
- NO_GRANT = 0,
- GRANT = 1
-};
-
-struct hif_req_pta_settings {
- u8 pta_mode;
- u8 request_signal_active_level;
- u8 priority_signal_active_level;
- u8 freq_signal_active_level;
- u8 grant_signal_active_level;
- u8 coex_type;
- u8 default_grant_state;
- u8 simultaneous_rx_accesses;
- u8 priority_sampling_time;
- u8 tx_rx_sampling_time;
- u8 freq_sampling_time;
- u8 grant_valid_time;
- u8 fem_control_time;
- u8 first_slot_time;
- u16 periodic_tx_rx_sampling_time;
- u16 coex_quota;
- u16 wlan_quota;
-} __packed;
-
-struct hif_cnf_pta_settings {
- u32 status;
-} __packed;
-
-enum hif_pta_priority {
- HIF_PTA_PRIORITY_COEX_MAXIMIZED = 0x00000562,
- HIF_PTA_PRIORITY_COEX_HIGH = 0x00000462,
- HIF_PTA_PRIORITY_BALANCED = 0x00001461,
- HIF_PTA_PRIORITY_WLAN_HIGH = 0x00001851,
- HIF_PTA_PRIORITY_WLAN_MAXIMIZED = 0x00001A51
-};
-
-struct hif_req_pta_priority {
- u32 priority;
-} __packed;
-
-struct hif_cnf_pta_priority {
- u32 status;
-} __packed;
-
-enum hif_pta_state {
- PTA_OFF = 0,
- PTA_ON = 1
-};
-
-struct hif_req_pta_state {
- u32 pta_state;
-} __packed;
-
-struct hif_cnf_pta_state {
- u32 status;
+ __le32 status;
} __packed;
#endif
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
index 0c67cd4c1593..6f1434795fa8 100644
--- a/drivers/staging/wfx/hif_api_mib.h
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -10,175 +10,88 @@
#include "hif_api_general.h"
-#define HIF_API_IPV4_ADDRESS_SIZE 4
-#define HIF_API_IPV6_ADDRESS_SIZE 16
+#define HIF_API_IPV4_ADDRESS_SIZE 4
+#define HIF_API_IPV6_ADDRESS_SIZE 16
enum hif_mib_ids {
- HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000,
- HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001,
- HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002,
- HIF_MIB_ID_CCA_CONFIG = 0x2003,
- HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010,
- HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011,
- HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012,
- HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013,
- HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014,
- HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015,
- HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016,
- HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017,
- HIF_MIB_ID_SET_DATA_FILTERING = 0x2018,
- HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019,
- HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A,
- HIF_MIB_ID_RX_FILTER = 0x201B,
- HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C,
- HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D,
- HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030,
- HIF_MIB_ID_TSF_COUNTER = 0x2031,
- HIF_MIB_ID_STATISTICS_TABLE = 0x2032,
- HIF_MIB_ID_COUNTERS_TABLE = 0x2033,
- HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034,
- HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035,
- HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040,
+ HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000,
+ HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001,
+ HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002,
+ HIF_MIB_ID_CCA_CONFIG = 0x2003,
+ HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010,
+ HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011,
+ HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013,
+ HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014,
+ HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016,
+ HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017,
+ HIF_MIB_ID_SET_DATA_FILTERING = 0x2018,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019,
+ HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A,
+ HIF_MIB_ID_RX_FILTER = 0x201B,
+ HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D,
+ HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030,
+ HIF_MIB_ID_TSF_COUNTER = 0x2031,
+ HIF_MIB_ID_STATISTICS_TABLE = 0x2032,
+ HIF_MIB_ID_COUNTERS_TABLE = 0x2033,
+ HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035,
+ HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040,
HIF_MIB_ID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME = 0x2041,
- HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042,
- HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043,
- HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044,
- HIF_MIB_ID_SLOT_TIME = 0x2045,
- HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046,
- HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047,
- HIF_MIB_ID_TEMPLATE_FRAME = 0x2048,
- HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049,
- HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A,
- HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B,
- HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C,
- HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D,
- HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E,
- HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F,
- HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050,
- HIF_MIB_ID_SET_HT_PROTECTION = 0x2051,
- HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052,
- HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053,
- HIF_MIB_ID_INACTIVITY_TIMER = 0x2054,
- HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055,
- HIF_MIB_ID_BEACON_STATS = 0x2056,
+ HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044,
+ HIF_MIB_ID_SLOT_TIME = 0x2045,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046,
+ HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047,
+ HIF_MIB_ID_TEMPLATE_FRAME = 0x2048,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A,
+ HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B,
+ HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050,
+ HIF_MIB_ID_SET_HT_PROTECTION = 0x2051,
+ HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052,
+ HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053,
+ HIF_MIB_ID_INACTIVITY_TIMER = 0x2054,
+ HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055,
+ HIF_MIB_ID_BEACON_STATS = 0x2056,
};
-#define HIF_OP_POWER_MODE_MASK 0xf
-
enum hif_op_power_mode {
- HIF_OP_POWER_MODE_ACTIVE = 0x0,
- HIF_OP_POWER_MODE_DOZE = 0x1,
- HIF_OP_POWER_MODE_QUIESCENT = 0x2
+ HIF_OP_POWER_MODE_ACTIVE = 0x0,
+ HIF_OP_POWER_MODE_DOZE = 0x1,
+ HIF_OP_POWER_MODE_QUIESCENT = 0x2
};
struct hif_mib_gl_operational_power_mode {
- u8 power_mode:4;
- u8 reserved1:3;
- u8 wup_ind_activation:1;
- u8 reserved2[3];
-} __packed;
-
-struct hif_mib_gl_block_ack_info {
- u8 rx_buffer_size;
- u8 rx_max_num_agreements;
- u8 tx_buffer_size;
- u8 tx_max_num_agreements;
+ u8 power_mode:4;
+ u8 reserved1:3;
+ u8 wup_ind_activation:1;
+ u8 reserved2[3];
} __packed;
struct hif_mib_gl_set_multi_msg {
- u8 enable_multi_tx_conf:1;
- u8 reserved1:7;
- u8 reserved2[3];
-} __packed;
-
-enum hif_cca_thr_mode {
- HIF_CCA_THR_MODE_RELATIVE = 0x0,
- HIF_CCA_THR_MODE_ABSOLUTE = 0x1
-};
-
-struct hif_mib_gl_cca_config {
- u8 cca_thr_mode;
- u8 reserved[3];
-} __packed;
-
-#define MAX_NUMBER_DATA_FILTERS 0xA
-
-#define MAX_NUMBER_IPV4_ADDR_CONDITIONS 0x4
-#define MAX_NUMBER_IPV6_ADDR_CONDITIONS 0x4
-#define MAX_NUMBER_MAC_ADDR_CONDITIONS 0x4
-#define MAX_NUMBER_UC_MC_BC_CONDITIONS 0x4
-#define MAX_NUMBER_ETHER_TYPE_CONDITIONS 0x4
-#define MAX_NUMBER_PORT_CONDITIONS 0x4
-#define MAX_NUMBER_MAGIC_CONDITIONS 0x4
-#define MAX_NUMBER_ARP_CONDITIONS 0x2
-#define MAX_NUMBER_NS_CONDITIONS 0x2
-
-struct hif_mib_ethertype_data_frame_condition {
- u8 condition_idx;
- u8 reserved;
- u16 ether_type;
-} __packed;
-
-enum hif_udp_tcp_protocol {
- HIF_PROTOCOL_UDP = 0x0,
- HIF_PROTOCOL_TCP = 0x1,
- HIF_PROTOCOL_BOTH_UDP_TCP = 0x2
-};
-
-enum hif_which_port {
- HIF_PORT_DST = 0x0,
- HIF_PORT_SRC = 0x1,
- HIF_PORT_SRC_OR_DST = 0x2
-};
-
-struct hif_mib_ports_data_frame_condition {
- u8 condition_idx;
- u8 protocol;
- u8 which_port;
- u8 reserved1;
- u16 port_number;
- u8 reserved2[2];
-} __packed;
-
-#define HIF_API_MAGIC_PATTERN_SIZE 32
-
-struct hif_mib_magic_data_frame_condition {
- u8 condition_idx;
- u8 offset;
- u8 magic_pattern_length;
- u8 reserved;
- u8 magic_pattern[HIF_API_MAGIC_PATTERN_SIZE];
+ u8 enable_multi_tx_conf:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
} __packed;
enum hif_mac_addr_type {
- HIF_MAC_ADDR_A1 = 0x0,
- HIF_MAC_ADDR_A2 = 0x1,
- HIF_MAC_ADDR_A3 = 0x2
+ HIF_MAC_ADDR_A1 = 0x0,
+ HIF_MAC_ADDR_A2 = 0x1,
+ HIF_MAC_ADDR_A3 = 0x2
};
struct hif_mib_mac_addr_data_frame_condition {
- u8 condition_idx;
- u8 address_type;
- u8 mac_address[ETH_ALEN];
-} __packed;
-
-enum hif_ip_addr_mode {
- HIF_IP_ADDR_SRC = 0x0,
- HIF_IP_ADDR_DST = 0x1
-};
-
-struct hif_mib_ipv4_addr_data_frame_condition {
- u8 condition_idx;
- u8 address_mode;
- u8 reserved[2];
- u8 i_pv4_address[HIF_API_IPV4_ADDRESS_SIZE];
-} __packed;
-
-struct hif_mib_ipv6_addr_data_frame_condition {
- u8 condition_idx;
- u8 address_mode;
- u8 reserved[2];
- u8 i_pv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+ u8 condition_idx;
+ u8 address_type;
+ u8 mac_address[ETH_ALEN];
} __packed;
#define HIF_FILTER_UNICAST 0x1
@@ -186,365 +99,289 @@ struct hif_mib_ipv6_addr_data_frame_condition {
#define HIF_FILTER_BROADCAST 0x4
struct hif_mib_uc_mc_bc_data_frame_condition {
- u8 condition_idx;
- u8 allowed_frames;
- u8 reserved[2];
+ u8 condition_idx;
+ u8 allowed_frames;
+ u8 reserved[2];
} __packed;
struct hif_mib_config_data_filter {
- u8 filter_idx;
- u8 enable;
- u8 reserved1[2];
- u8 eth_type_cond;
- u8 port_cond;
- u8 magic_cond;
- u8 mac_cond;
- u8 ipv4_cond;
- u8 ipv6_cond;
- u8 uc_mc_bc_cond;
- u8 reserved2;
+ u8 filter_idx;
+ u8 enable;
+ u8 reserved1[2];
+ u8 eth_type_cond;
+ u8 port_cond;
+ u8 magic_cond;
+ u8 mac_cond;
+ u8 ipv4_cond;
+ u8 ipv6_cond;
+ u8 uc_mc_bc_cond;
+ u8 reserved2;
} __packed;
struct hif_mib_set_data_filtering {
- u8 invert_matching:1;
- u8 reserved1:7;
- u8 enable:1;
- u8 reserved2:7;
- u8 reserved3[2];
+ u8 invert_matching:1;
+ u8 reserved1:7;
+ u8 enable:1;
+ u8 reserved2:7;
+ u8 reserved3[2];
} __packed;
enum hif_arp_ns_frame_treatment {
- HIF_ARP_NS_FILTERING_DISABLE = 0x0,
- HIF_ARP_NS_FILTERING_ENABLE = 0x1,
- HIF_ARP_NS_REPLY_ENABLE = 0x2
+ HIF_ARP_NS_FILTERING_DISABLE = 0x0,
+ HIF_ARP_NS_FILTERING_ENABLE = 0x1,
+ HIF_ARP_NS_REPLY_ENABLE = 0x2
};
struct hif_mib_arp_ip_addr_table {
- u8 condition_idx;
- u8 arp_enable;
- u8 reserved[2];
- u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
-} __packed;
-
-struct hif_mib_ns_ip_addr_table {
- u8 condition_idx;
- u8 ns_enable;
- u8 reserved[2];
- u8 ipv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+ u8 condition_idx;
+ u8 arp_enable;
+ u8 reserved[2];
+ u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
} __packed;
struct hif_mib_rx_filter {
- u8 reserved1:1;
- u8 bssid_filter:1;
- u8 reserved2:1;
- u8 fwd_probe_req:1;
- u8 keep_alive_filter:1;
- u8 reserved3:3;
- u8 reserved4[3];
+ u8 reserved1:1;
+ u8 bssid_filter:1;
+ u8 reserved2:1;
+ u8 fwd_probe_req:1;
+ u8 keep_alive_filter:1;
+ u8 reserved3:3;
+ u8 reserved4[3];
} __packed;
-#define HIF_API_OUI_SIZE 3
-#define HIF_API_MATCH_DATA_SIZE 3
-
struct hif_ie_table_entry {
- u8 ie_id;
- u8 has_changed:1;
- u8 no_longer:1;
- u8 has_appeared:1;
- u8 reserved:1;
- u8 num_match_data:4;
- u8 oui[HIF_API_OUI_SIZE];
- u8 match_data[HIF_API_MATCH_DATA_SIZE];
+ u8 ie_id;
+ u8 has_changed:1;
+ u8 no_longer:1;
+ u8 has_appeared:1;
+ u8 reserved:1;
+ u8 num_match_data:4;
+ u8 oui[3];
+ u8 match_data[3];
} __packed;
struct hif_mib_bcn_filter_table {
- u32 num_of_info_elmts;
+ __le32 num_of_info_elmts;
struct hif_ie_table_entry ie_table[];
} __packed;
enum hif_beacon_filter {
- HIF_BEACON_FILTER_DISABLE = 0x0,
- HIF_BEACON_FILTER_ENABLE = 0x1,
- HIF_BEACON_FILTER_AUTO_ERP = 0x2
+ HIF_BEACON_FILTER_DISABLE = 0x0,
+ HIF_BEACON_FILTER_ENABLE = 0x1,
+ HIF_BEACON_FILTER_AUTO_ERP = 0x2
};
struct hif_mib_bcn_filter_enable {
- u32 enable;
- u32 bcn_count;
-} __packed;
-
-struct hif_mib_group_seq_counter {
- u32 bits4716;
- u16 bits1500;
- u16 reserved;
-} __packed;
-
-struct hif_mib_tsf_counter {
- u32 tsf_counterlo;
- u32 tsf_counterhi;
-} __packed;
-
-struct hif_mib_stats_table {
- s16 latest_snr;
- u8 latest_rcpi;
- s8 latest_rssi;
+ __le32 enable;
+ __le32 bcn_count;
} __packed;
struct hif_mib_extended_count_table {
- u32 count_plcp_errors;
- u32 count_fcs_errors;
- u32 count_tx_packets;
- u32 count_rx_packets;
- u32 count_rx_packet_errors;
- u32 count_rx_decryption_failures;
- u32 count_rx_mic_failures;
- u32 count_rx_no_key_failures;
- u32 count_tx_multicast_frames;
- u32 count_tx_frames_success;
- u32 count_tx_frame_failures;
- u32 count_tx_frames_retried;
- u32 count_tx_frames_multi_retried;
- u32 count_rx_frame_duplicates;
- u32 count_rts_success;
- u32 count_rts_failures;
- u32 count_ack_failures;
- u32 count_rx_multicast_frames;
- u32 count_rx_frames_success;
- u32 count_rx_cmacicv_errors;
- u32 count_rx_cmac_replays;
- u32 count_rx_mgmt_ccmp_replays;
- u32 count_rx_bipmic_errors;
- u32 count_rx_beacon;
- u32 count_miss_beacon;
- u32 reserved[15];
+ __le32 count_plcp_errors;
+ __le32 count_fcs_errors;
+ __le32 count_tx_packets;
+ __le32 count_rx_packets;
+ __le32 count_rx_packet_errors;
+ __le32 count_rx_decryption_failures;
+ __le32 count_rx_mic_failures;
+ __le32 count_rx_no_key_failures;
+ __le32 count_tx_multicast_frames;
+ __le32 count_tx_frames_success;
+ __le32 count_tx_frame_failures;
+ __le32 count_tx_frames_retried;
+ __le32 count_tx_frames_multi_retried;
+ __le32 count_rx_frame_duplicates;
+ __le32 count_rts_success;
+ __le32 count_rts_failures;
+ __le32 count_ack_failures;
+ __le32 count_rx_multicast_frames;
+ __le32 count_rx_frames_success;
+ __le32 count_rx_cmacicv_errors;
+ __le32 count_rx_cmac_replays;
+ __le32 count_rx_mgmt_ccmp_replays;
+ __le32 count_rx_bipmic_errors;
+ __le32 count_rx_beacon;
+ __le32 count_miss_beacon;
+ __le32 reserved[15];
} __packed;
struct hif_mib_count_table {
- u32 count_plcp_errors;
- u32 count_fcs_errors;
- u32 count_tx_packets;
- u32 count_rx_packets;
- u32 count_rx_packet_errors;
- u32 count_rx_decryption_failures;
- u32 count_rx_mic_failures;
- u32 count_rx_no_key_failures;
- u32 count_tx_multicast_frames;
- u32 count_tx_frames_success;
- u32 count_tx_frame_failures;
- u32 count_tx_frames_retried;
- u32 count_tx_frames_multi_retried;
- u32 count_rx_frame_duplicates;
- u32 count_rts_success;
- u32 count_rts_failures;
- u32 count_ack_failures;
- u32 count_rx_multicast_frames;
- u32 count_rx_frames_success;
- u32 count_rx_cmacicv_errors;
- u32 count_rx_cmac_replays;
- u32 count_rx_mgmt_ccmp_replays;
- u32 count_rx_bipmic_errors;
-} __packed;
-
-struct hif_mib_max_tx_power_level {
- s32 max_tx_power_level_rf_port1;
- s32 max_tx_power_level_rf_port2;
-} __packed;
-
-struct hif_mib_beacon_stats {
- s32 latest_tbtt_diff;
- u32 reserved[4];
+ __le32 count_plcp_errors;
+ __le32 count_fcs_errors;
+ __le32 count_tx_packets;
+ __le32 count_rx_packets;
+ __le32 count_rx_packet_errors;
+ __le32 count_rx_decryption_failures;
+ __le32 count_rx_mic_failures;
+ __le32 count_rx_no_key_failures;
+ __le32 count_tx_multicast_frames;
+ __le32 count_tx_frames_success;
+ __le32 count_tx_frame_failures;
+ __le32 count_tx_frames_retried;
+ __le32 count_tx_frames_multi_retried;
+ __le32 count_rx_frame_duplicates;
+ __le32 count_rts_success;
+ __le32 count_rts_failures;
+ __le32 count_ack_failures;
+ __le32 count_rx_multicast_frames;
+ __le32 count_rx_frames_success;
+ __le32 count_rx_cmacicv_errors;
+ __le32 count_rx_cmac_replays;
+ __le32 count_rx_mgmt_ccmp_replays;
+ __le32 count_rx_bipmic_errors;
} __packed;
struct hif_mib_mac_address {
- u8 mac_addr[ETH_ALEN];
- u16 reserved;
-} __packed;
-
-struct hif_mib_dot11_max_transmit_msdu_lifetime {
- u32 max_life_time;
-} __packed;
-
-struct hif_mib_dot11_max_receive_lifetime {
- u32 max_life_time;
+ u8 mac_addr[ETH_ALEN];
+ __le16 reserved;
} __packed;
struct hif_mib_wep_default_key_id {
- u8 wep_default_key_id;
- u8 reserved[3];
+ u8 wep_default_key_id;
+ u8 reserved[3];
} __packed;
struct hif_mib_dot11_rts_threshold {
- u32 threshold;
+ __le32 threshold;
} __packed;
struct hif_mib_slot_time {
- u32 slot_time;
+ __le32 slot_time;
} __packed;
struct hif_mib_current_tx_power_level {
- s32 power_level;
+ __le32 power_level; // signed value
} __packed;
struct hif_mib_non_erp_protection {
- u8 use_cts_to_self:1;
- u8 reserved1:7;
- u8 reserved2[3];
+ u8 use_cts_to_self:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
} __packed;
enum hif_tmplt {
- HIF_TMPLT_PRBREQ = 0x0,
- HIF_TMPLT_BCN = 0x1,
- HIF_TMPLT_NULL = 0x2,
- HIF_TMPLT_QOSNUL = 0x3,
- HIF_TMPLT_PSPOLL = 0x4,
- HIF_TMPLT_PRBRES = 0x5,
- HIF_TMPLT_ARP = 0x6,
- HIF_TMPLT_NA = 0x7
+ HIF_TMPLT_PRBREQ = 0x0,
+ HIF_TMPLT_BCN = 0x1,
+ HIF_TMPLT_NULL = 0x2,
+ HIF_TMPLT_QOSNUL = 0x3,
+ HIF_TMPLT_PSPOLL = 0x4,
+ HIF_TMPLT_PRBRES = 0x5,
+ HIF_TMPLT_ARP = 0x6,
+ HIF_TMPLT_NA = 0x7
};
-#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700
+#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700
struct hif_mib_template_frame {
- u8 frame_type;
- u8 init_rate:7;
- u8 mode:1;
- u16 frame_length;
- u8 frame[HIF_API_MAX_TEMPLATE_FRAME_SIZE];
+ u8 frame_type;
+ u8 init_rate:7;
+ u8 mode:1;
+ __le16 frame_length;
+ u8 frame[];
} __packed;
struct hif_mib_beacon_wake_up_period {
- u8 wakeup_period_min;
- u8 receive_dtim:1;
- u8 reserved1:7;
- u8 wakeup_period_max;
- u8 reserved2;
+ u8 wakeup_period_min;
+ u8 receive_dtim:1;
+ u8 reserved1:7;
+ u8 wakeup_period_max;
+ u8 reserved2;
} __packed;
struct hif_mib_rcpi_rssi_threshold {
- u8 detection:1;
- u8 rcpi_rssi:1;
- u8 upperthresh:1;
- u8 lowerthresh:1;
- u8 reserved:4;
- u8 lower_threshold;
- u8 upper_threshold;
- u8 rolling_average_count;
+ u8 detection:1;
+ u8 rcpi_rssi:1;
+ u8 upperthresh:1;
+ u8 lowerthresh:1;
+ u8 reserved:4;
+ u8 lower_threshold;
+ u8 upper_threshold;
+ u8 rolling_average_count;
} __packed;
#define DEFAULT_BA_MAX_RX_BUFFER_SIZE 16
struct hif_mib_block_ack_policy {
- u8 block_ack_tx_tid_policy;
- u8 reserved1;
- u8 block_ack_rx_tid_policy;
- u8 block_ack_rx_max_buffer_size;
-} __packed;
-
-struct hif_mib_override_int_rate {
- u8 internal_tx_rate;
- u8 non_erp_internal_tx_rate;
- u8 reserved[2];
+ u8 block_ack_tx_tid_policy;
+ u8 reserved1;
+ u8 block_ack_rx_tid_policy;
+ u8 block_ack_rx_max_buffer_size;
} __packed;
enum hif_mpdu_start_spacing {
- HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0,
- HIF_MPDU_START_SPACING_QUARTER = 0x1,
- HIF_MPDU_START_SPACING_HALF = 0x2,
- HIF_MPDU_START_SPACING_ONE = 0x3,
- HIF_MPDU_START_SPACING_TWO = 0x4,
- HIF_MPDU_START_SPACING_FOUR = 0x5,
- HIF_MPDU_START_SPACING_EIGHT = 0x6,
- HIF_MPDU_START_SPACING_SIXTEEN = 0x7
+ HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0,
+ HIF_MPDU_START_SPACING_QUARTER = 0x1,
+ HIF_MPDU_START_SPACING_HALF = 0x2,
+ HIF_MPDU_START_SPACING_ONE = 0x3,
+ HIF_MPDU_START_SPACING_TWO = 0x4,
+ HIF_MPDU_START_SPACING_FOUR = 0x5,
+ HIF_MPDU_START_SPACING_EIGHT = 0x6,
+ HIF_MPDU_START_SPACING_SIXTEEN = 0x7
};
struct hif_mib_set_association_mode {
- u8 preambtype_use:1;
- u8 mode:1;
- u8 rateset:1;
- u8 spacing:1;
- u8 reserved1:4;
- u8 short_preamble:1;
- u8 reserved2:7;
- u8 greenfield:1;
- u8 reserved3:7;
- u8 mpdu_start_spacing;
- u32 basic_rate_set;
+ u8 preambtype_use:1;
+ u8 mode:1;
+ u8 rateset:1;
+ u8 spacing:1;
+ u8 reserved1:4;
+ u8 short_preamble:1;
+ u8 reserved2:7;
+ u8 greenfield:1;
+ u8 reserved3:7;
+ u8 mpdu_start_spacing;
+ __le32 basic_rate_set;
} __packed;
struct hif_mib_set_uapsd_information {
- u8 trig_bckgrnd:1;
- u8 trig_be:1;
- u8 trig_video:1;
- u8 trig_voice:1;
- u8 reserved1:4;
- u8 deliv_bckgrnd:1;
- u8 deliv_be:1;
- u8 deliv_video:1;
- u8 deliv_voice:1;
- u8 reserved2:4;
- u16 min_auto_trigger_interval;
- u16 max_auto_trigger_interval;
- u16 auto_trigger_step;
+ u8 trig_bckgrnd:1;
+ u8 trig_be:1;
+ u8 trig_video:1;
+ u8 trig_voice:1;
+ u8 reserved1:4;
+ u8 deliv_bckgrnd:1;
+ u8 deliv_be:1;
+ u8 deliv_video:1;
+ u8 deliv_voice:1;
+ u8 reserved2:4;
+ __le16 min_auto_trigger_interval;
+ __le16 max_auto_trigger_interval;
+ __le16 auto_trigger_step;
} __packed;
struct hif_mib_tx_rate_retry_policy {
- u8 policy_index;
- u8 short_retry_count;
- u8 long_retry_count;
- u8 first_rate_sel:2;
- u8 terminate:1;
- u8 count_init:1;
- u8 reserved1:4;
- u8 rate_recovery_count;
- u8 reserved2[3];
- u8 rates[12];
+ u8 policy_index;
+ u8 short_retry_count;
+ u8 long_retry_count;
+ u8 first_rate_sel:2;
+ u8 terminate:1;
+ u8 count_init:1;
+ u8 reserved1:4;
+ u8 rate_recovery_count;
+ u8 reserved2[3];
+ u8 rates[12];
} __packed;
-#define HIF_MIB_NUM_TX_RATE_RETRY_POLICIES 15
+#define HIF_TX_RETRY_POLICY_MAX 15
+#define HIF_TX_RETRY_POLICY_INVALID HIF_TX_RETRY_POLICY_MAX
struct hif_mib_set_tx_rate_retry_policy {
- u8 num_tx_rate_policies;
- u8 reserved[3];
+ u8 num_tx_rate_policies;
+ u8 reserved[3];
struct hif_mib_tx_rate_retry_policy tx_rate_retry_policy[];
} __packed;
struct hif_mib_protected_mgmt_policy {
- u8 pmf_enable:1;
- u8 unpmf_allowed:1;
- u8 host_enc_auth_frames:1;
- u8 reserved1:5;
- u8 reserved2[3];
-} __packed;
-
-struct hif_mib_set_ht_protection {
- u8 dual_cts_prot:1;
- u8 reserved1:7;
- u8 reserved2[3];
+ u8 pmf_enable:1;
+ u8 unpmf_allowed:1;
+ u8 host_enc_auth_frames:1;
+ u8 reserved1:5;
+ u8 reserved2[3];
} __packed;
struct hif_mib_keep_alive_period {
- u16 keep_alive_period;
- u8 reserved[2];
-} __packed;
-
-struct hif_mib_arp_keep_alive_period {
- u16 arp_keep_alive_period;
- u8 encr_type;
- u8 reserved;
- u8 sender_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
- u8 target_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
-} __packed;
-
-struct hif_mib_inactivity_timer {
- u8 min_active_time;
- u8 max_active_time;
- u16 reserved;
-} __packed;
-
-struct hif_mib_interface_protection {
- u8 use_cts_prot:1;
- u8 reserved1:7;
- u8 reserved2[3];
+ __le16 keep_alive_period;
+ u8 reserved[2];
} __packed;
#endif
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
index 33c22c5d629d..bb156033d1e1 100644
--- a/drivers/staging/wfx/hif_rx.c
+++ b/drivers/staging/wfx/hif_rx.c
@@ -22,9 +22,9 @@ static int hif_generic_confirm(struct wfx_dev *wdev,
const struct hif_msg *hif, const void *buf)
{
// All confirm messages start with status
- int status = le32_to_cpu(*((__le32 *) buf));
+ int status = le32_to_cpup((__le32 *)buf);
int cmd = hif->id;
- int len = hif->len - 4; // drop header
+ int len = le16_to_cpu(hif->len) - 4; // drop header
WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
@@ -100,10 +100,10 @@ static int hif_startup_indication(struct wfx_dev *wdev,
return -EINVAL;
}
memcpy(&wdev->hw_caps, body, sizeof(struct hif_ind_startup));
- le32_to_cpus(&wdev->hw_caps.status);
- le16_to_cpus(&wdev->hw_caps.hardware_id);
- le16_to_cpus(&wdev->hw_caps.num_inp_ch_bufs);
- le16_to_cpus(&wdev->hw_caps.size_inp_ch_buf);
+ le16_to_cpus((__le16 *)&wdev->hw_caps.hardware_id);
+ le16_to_cpus((__le16 *)&wdev->hw_caps.num_inp_ch_bufs);
+ le16_to_cpus((__le16 *)&wdev->hw_caps.size_inp_ch_buf);
+ le32_to_cpus((__le32 *)&wdev->hw_caps.supported_rate_mask);
complete(&wdev->firmware_ready);
return 0;
@@ -127,7 +127,7 @@ static int hif_keys_indication(struct wfx_dev *wdev,
u8 pubkey[API_NCP_PUB_KEY_SIZE];
// SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS is used by legacy secure link
- if (body->status && body->status != SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ if (body->status && body->status != HIF_STATUS_SLK_NEGO_SUCCESS)
dev_warn(wdev->dev, "secure link negociation error\n");
memcpy(pubkey, body->ncp_pub_key, sizeof(pubkey));
memreverse(pubkey, sizeof(pubkey));
@@ -158,26 +158,39 @@ static int hif_event_indication(struct wfx_dev *wdev,
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
const struct hif_ind_event *body = buf;
- struct wfx_hif_event *event;
- int first;
+ int type = le32_to_cpu(body->event_id);
+ int cause;
- WARN_ON(!wvif);
- if (!wvif)
+ if (!wvif) {
+ dev_warn(wdev->dev, "received event for non-existent vif\n");
return 0;
+ }
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
-
- memcpy(&event->evt, body, sizeof(struct hif_ind_event));
- spin_lock(&wvif->event_queue_lock);
- first = list_empty(&wvif->event_queue);
- list_add_tail(&event->link, &wvif->event_queue);
- spin_unlock(&wvif->event_queue_lock);
-
- if (first)
- schedule_work(&wvif->event_handler_work);
-
+ switch (type) {
+ case HIF_EVENT_IND_RCPI_RSSI:
+ wfx_event_report_rssi(wvif, body->event_data.rcpi_rssi);
+ break;
+ case HIF_EVENT_IND_BSSLOST:
+ schedule_delayed_work(&wvif->beacon_loss_work, 0);
+ break;
+ case HIF_EVENT_IND_BSSREGAINED:
+ cancel_delayed_work(&wvif->beacon_loss_work);
+ dev_dbg(wdev->dev, "ignore BSSREGAINED indication\n");
+ break;
+ case HIF_EVENT_IND_PS_MODE_ERROR:
+ cause = le32_to_cpu(body->event_data.ps_mode_error);
+ dev_warn(wdev->dev, "error while processing power save request: %d\n",
+ cause);
+ if (cause == HIF_PS_ERROR_AP_NOT_RESP_TO_POLL) {
+ wvif->bss_not_support_ps_poll = true;
+ schedule_work(&wvif->update_pm_work);
+ }
+ break;
+ default:
+ dev_warn(wdev->dev, "unhandled event indication: %.2x\n",
+ type);
+ break;
+ }
return 0;
}
@@ -224,53 +237,21 @@ static int hif_suspend_resume_indication(struct wfx_dev *wdev,
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
const struct hif_ind_suspend_resume_tx *body = buf;
- WARN_ON(!wvif);
- WARN(!body->suspend_resume_flags.bc_mc_only, "unsupported suspend/resume notification");
- if (body->suspend_resume_flags.resume)
- wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
- else
- wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
-
- return 0;
-}
-
-static int hif_error_indication(struct wfx_dev *wdev,
- const struct hif_msg *hif, const void *buf)
-{
- const struct hif_ind_error *body = buf;
- u8 *pRollback = (u8 *) body->data;
- u32 *pStatus = (u32 *) body->data;
-
- switch (body->type) {
- case HIF_ERROR_FIRMWARE_ROLLBACK:
- dev_err(wdev->dev,
- "asynchronous error: firmware rollback error %d\n",
- *pRollback);
- break;
- case HIF_ERROR_FIRMWARE_DEBUG_ENABLED:
- dev_err(wdev->dev, "asynchronous error: firmware debug feature enabled\n");
- break;
- case HIF_ERROR_OUTDATED_SESSION_KEY:
- dev_err(wdev->dev, "asynchronous error: secure link outdated key: %#.8x\n",
- *pStatus);
- break;
- case HIF_ERROR_INVALID_SESSION_KEY:
- dev_err(wdev->dev, "asynchronous error: invalid session key\n");
- break;
- case HIF_ERROR_OOR_VOLTAGE:
- dev_err(wdev->dev, "asynchronous error: out-of-range overvoltage: %#.8x\n",
- *pStatus);
- break;
- case HIF_ERROR_PDS_VERSION:
- dev_err(wdev->dev,
- "asynchronous error: wrong PDS payload or version: %#.8x\n",
- *pStatus);
- break;
- default:
- dev_err(wdev->dev, "asynchronous error: unknown (%d)\n",
- body->type);
- break;
+ if (body->suspend_resume_flags.bc_mc_only) {
+ WARN_ON(!wvif);
+ if (body->suspend_resume_flags.resume)
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
+ else
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
+ } else {
+ WARN(body->peer_sta_set, "misunderstood indication");
+ WARN(hif->interface != 2, "misunderstood indication");
+ if (body->suspend_resume_flags.resume)
+ wfx_suspend_hot_dev(wdev, STA_NOTIFY_AWAKE);
+ else
+ wfx_suspend_hot_dev(wdev, STA_NOTIFY_SLEEP);
}
+
return 0;
}
@@ -278,13 +259,14 @@ static int hif_generic_indication(struct wfx_dev *wdev,
const struct hif_msg *hif, const void *buf)
{
const struct hif_ind_generic *body = buf;
+ int type = le32_to_cpu(body->indication_type);
- switch (body->indication_type) {
+ switch (type) {
case HIF_GENERIC_INDICATION_TYPE_RAW:
return 0;
case HIF_GENERIC_INDICATION_TYPE_STRING:
dev_info(wdev->dev, "firmware says: %s\n",
- (char *) body->indication_data.raw_data);
+ (char *)body->indication_data.raw_data);
return 0;
case HIF_GENERIC_INDICATION_TYPE_RX_STATS:
mutex_lock(&wdev->rx_stats_lock);
@@ -296,22 +278,103 @@ static int hif_generic_indication(struct wfx_dev *wdev,
sizeof(wdev->rx_stats));
mutex_unlock(&wdev->rx_stats_lock);
return 0;
+ case HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO:
+ mutex_lock(&wdev->tx_power_loop_info_lock);
+ memcpy(&wdev->tx_power_loop_info,
+ &body->indication_data.tx_power_loop_info,
+ sizeof(wdev->tx_power_loop_info));
+ mutex_unlock(&wdev->tx_power_loop_info_lock);
+ return 0;
default:
- dev_err(wdev->dev,
- "generic_indication: unknown indication type: %#.8x\n",
- body->indication_type);
+ dev_err(wdev->dev, "generic_indication: unknown indication type: %#.8x\n",
+ type);
return -EIO;
}
}
+static const struct {
+ int val;
+ const char *str;
+ bool has_param;
+} hif_errors[] = {
+ { HIF_ERROR_FIRMWARE_ROLLBACK,
+ "rollback status" },
+ { HIF_ERROR_FIRMWARE_DEBUG_ENABLED,
+ "debug feature enabled" },
+ { HIF_ERROR_PDS_PAYLOAD,
+ "PDS version is not supported" },
+ { HIF_ERROR_PDS_TESTFEATURE,
+ "PDS ask for an unknown test mode" },
+ { HIF_ERROR_OOR_VOLTAGE,
+ "out-of-range power supply voltage", true },
+ { HIF_ERROR_OOR_TEMPERATURE,
+ "out-of-range temperature", true },
+ { HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE,
+ "secure link does not expect request during key exchange" },
+ { HIF_ERROR_SLK_SESSION_KEY,
+ "secure link session key is invalid" },
+ { HIF_ERROR_SLK_OVERFLOW,
+ "secure link overflow" },
+ { HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE,
+ "secure link messages list does not match message encryption" },
+ { HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW,
+ "bus clock is too slow (<1kHz)" },
+ { HIF_ERROR_HIF_RX_DATA_TOO_LARGE,
+ "HIF message too large" },
+ // Following errors only exists in old firmware versions:
+ { HIF_ERROR_HIF_TX_QUEUE_FULL,
+ "HIF messages queue is full" },
+ { HIF_ERROR_HIF_BUS,
+ "HIF bus" },
+ { HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED,
+ "secure link does not support multi-tx confirmations" },
+ { HIF_ERROR_SLK_OUTDATED_SESSION_KEY,
+ "secure link session key is outdated" },
+ { HIF_ERROR_SLK_DECRYPTION,
+ "secure link params (nonce or tag) mismatch" },
+};
+
+static int hif_error_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
+{
+ const struct hif_ind_error *body = buf;
+ int type = le32_to_cpu(body->type);
+ int param = (s8)body->data[0];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hif_errors); i++)
+ if (type == hif_errors[i].val)
+ break;
+ if (i < ARRAY_SIZE(hif_errors))
+ if (hif_errors[i].has_param)
+ dev_err(wdev->dev, "asynchronous error: %s: %d\n",
+ hif_errors[i].str, param);
+ else
+ dev_err(wdev->dev, "asynchronous error: %s\n",
+ hif_errors[i].str);
+ else
+ dev_err(wdev->dev, "asynchronous error: unknown: %08x\n", type);
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET,
+ 16, 1, hif, le16_to_cpu(hif->len), false);
+ wdev->chip_frozen = true;
+
+ return 0;
+};
+
static int hif_exception_indication(struct wfx_dev *wdev,
const struct hif_msg *hif, const void *buf)
{
- size_t len = hif->len - 4; // drop header
+ const struct hif_ind_exception *body = buf;
+ int type = le32_to_cpu(body->type);
- dev_err(wdev->dev, "firmware exception\n");
- print_hex_dump_bytes("Dump: ", DUMP_PREFIX_NONE, buf, len);
- wdev->chip_frozen = 1;
+ if (type == 4)
+ dev_err(wdev->dev, "firmware assert %d\n",
+ le32_to_cpup((__le32 *)body->data));
+ else
+ dev_err(wdev->dev, "firmware exception\n");
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET,
+ 16, 1, hif, le16_to_cpu(hif->len), false);
+ wdev->chip_frozen = true;
return -1;
}
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index 77bca43aca42..893b67f2f792 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -23,8 +23,8 @@ void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd)
mutex_init(&hif_cmd->key_renew_lock);
}
-static void wfx_fill_header(struct hif_msg *hif, int if_id, unsigned int cmd,
- size_t size)
+static void wfx_fill_header(struct hif_msg *hif, int if_id,
+ unsigned int cmd, size_t size)
{
if (if_id == -1)
if_id = 2;
@@ -47,8 +47,8 @@ static void *wfx_alloc_hif(size_t body_len, struct hif_msg **hif)
return NULL;
}
-int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
- size_t reply_len, bool async)
+int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
+ void *reply, size_t reply_len, bool async)
{
const char *mib_name = "";
const char *mib_sep = "";
@@ -82,6 +82,9 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
if (async)
return 0;
+ if (wdev->poll_irq)
+ wfx_bh_poll_irq(wdev);
+
ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ);
if (!ret) {
dev_err(wdev->dev, "chip is abnormally long to answer\n");
@@ -91,7 +94,7 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
if (!ret) {
dev_err(wdev->dev, "chip did not answer\n");
wfx_pending_dump_old_frames(wdev, 3000);
- wdev->chip_frozen = 1;
+ wdev->chip_frozen = true;
reinit_completion(&wdev->hif_cmd.done);
ret = -ETIMEDOUT;
} else {
@@ -103,7 +106,7 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
if (ret &&
(cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) {
- mib_name = get_mib_name(((u16 *) request)[2]);
+ mib_name = get_mib_name(((u16 *)request)[2]);
mib_sep = "/";
}
if (ret < 0)
@@ -128,7 +131,11 @@ int hif_shutdown(struct wfx_dev *wdev)
int ret;
struct hif_msg *hif;
+ if (wdev->chip_frozen)
+ return 0;
wfx_alloc_hif(0, &hif);
+ if (!hif)
+ return -ENOMEM;
wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0);
ret = wfx_cmd_send(wdev, hif, NULL, 0, true);
// After this command, chip won't reply. Be sure to give enough time to
@@ -152,6 +159,8 @@ int hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len)
struct hif_msg *hif;
struct hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif);
+ if (!hif)
+ return -ENOMEM;
body->length = cpu_to_le16(len);
memcpy(body->pds_data, conf, len);
wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len);
@@ -166,6 +175,8 @@ int hif_reset(struct wfx_vif *wvif, bool reset_stat)
struct hif_msg *hif;
struct hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
body->reset_flags.reset_stat = reset_stat;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -173,8 +184,8 @@ int hif_reset(struct wfx_vif *wvif, bool reset_stat)
return ret;
}
-int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
- size_t val_len)
+int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *val, size_t val_len)
{
int ret;
struct hif_msg *hif;
@@ -182,36 +193,43 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
struct hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif);
struct hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL);
+ if (!body || !reply) {
+ ret = -ENOMEM;
+ goto out;
+ }
body->mib_id = cpu_to_le16(mib_id);
wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, reply, buf_len, false);
- if (!ret && mib_id != reply->mib_id) {
- dev_warn(wdev->dev,
- "%s: confirmation mismatch request\n", __func__);
+ if (!ret && mib_id != le16_to_cpu(reply->mib_id)) {
+ dev_warn(wdev->dev, "%s: confirmation mismatch request\n",
+ __func__);
ret = -EIO;
}
if (ret == -ENOMEM)
- dev_err(wdev->dev,
- "buffer is too small to receive %s (%zu < %d)\n",
- get_mib_name(mib_id), val_len, reply->length);
+ dev_err(wdev->dev, "buffer is too small to receive %s (%zu < %d)\n",
+ get_mib_name(mib_id), val_len,
+ le16_to_cpu(reply->length));
if (!ret)
- memcpy(val, &reply->mib_data, reply->length);
+ memcpy(val, &reply->mib_data, le16_to_cpu(reply->length));
else
memset(val, 0xFF, val_len);
+out:
kfree(hif);
kfree(reply);
return ret;
}
-int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
- size_t val_len)
+int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *val, size_t val_len)
{
int ret;
struct hif_msg *hif;
int buf_len = sizeof(struct hif_req_write_mib) + val_len;
struct hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif);
+ if (!hif)
+ return -ENOMEM;
body->mib_id = cpu_to_le16(mib_id);
body->length = cpu_to_le16(val_len);
memcpy(&body->mib_data, val, val_len);
@@ -236,6 +254,8 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
compiletime_assert(IEEE80211_MAX_SSID_LEN == HIF_API_SSID_SIZE,
"API inconsistency");
+ if (!hif)
+ return -ENOMEM;
for (i = 0; i < req->n_ssids; i++) {
memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid,
IEEE80211_MAX_SSID_LEN);
@@ -268,7 +288,7 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
tmo_chan_bg = le32_to_cpu(body->max_channel_time) * USEC_PER_TU;
tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
tmo_chan_fg *= body->num_of_probe_requests;
- tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg);
+ tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg) + 512 * USEC_PER_TU;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -283,6 +303,8 @@ int hif_stop_scan(struct wfx_vif *wvif)
// body associated to HIF_REQ_ID_STOP_SCAN is empty
wfx_alloc_hif(0, &hif);
+ if (!hif)
+ return -ENOMEM;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
@@ -296,19 +318,24 @@ int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct hif_msg *hif;
struct hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
+ WARN_ON(!conf->beacon_int);
WARN_ON(!conf->basic_rates);
+ WARN_ON(sizeof(body->ssid) < ssidlen);
+ WARN(!conf->ibss_joined && !ssidlen, "joining an unknown BSS");
+ if (!hif)
+ return -ENOMEM;
body->infrastructure_bss_mode = !conf->ibss_joined;
body->short_preamble = conf->use_short_preamble;
if (channel && channel->flags & IEEE80211_CHAN_NO_IR)
body->probe_for_join = 0;
else
body->probe_for_join = 1;
- body->channel_number = cpu_to_le16(channel->hw_value);
+ body->channel_number = channel->hw_value;
body->beacon_interval = cpu_to_le32(conf->beacon_int);
body->basic_rate_set =
cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
- if (!conf->ibss_joined && ssid) {
+ if (ssid) {
body->ssid_length = cpu_to_le32(ssidlen);
memcpy(body->ssid, ssid, ssidlen);
}
@@ -318,17 +345,17 @@ int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
return ret;
}
-int hif_set_bss_params(struct wfx_vif *wvif,
- const struct hif_req_set_bss_params *arg)
+int hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count)
{
int ret;
struct hif_msg *hif;
- struct hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body),
- &hif);
+ struct hif_req_set_bss_params *body =
+ wfx_alloc_hif(sizeof(*body), &hif);
- memcpy(body, arg, sizeof(*body));
- cpu_to_le16s(&body->aid);
- cpu_to_le32s(&body->operational_rate_set);
+ if (!hif)
+ return -ENOMEM;
+ body->aid = cpu_to_le16(aid);
+ body->beacon_lost_count = beacon_lost_count;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS,
sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -343,6 +370,8 @@ int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg)
// FIXME: only send necessary bits
struct hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
// FIXME: swap bytes as necessary in body
memcpy(body, arg, sizeof(*body));
if (wfx_api_older_than(wdev, 1, 5))
@@ -363,6 +392,8 @@ int hif_remove_key(struct wfx_dev *wdev, int idx)
struct hif_msg *hif;
struct hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
body->entry_index = idx;
wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
@@ -382,6 +413,8 @@ int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
return -ENOMEM;
WARN_ON(arg->aifs > 255);
+ if (!hif)
+ return -ENOMEM;
body->aifsn = arg->aifs;
body->cw_min = cpu_to_le16(arg->cw_min);
body->cw_max = cpu_to_le16(arg->cw_max);
@@ -408,6 +441,8 @@ int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
if (!body)
return -ENOMEM;
+ if (!hif)
+ return -ENOMEM;
if (ps) {
body->pm_mode.enter_psm = 1;
// Firmware does not support more than 128ms
@@ -428,9 +463,12 @@ int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct hif_msg *hif;
struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
+ WARN_ON(!conf->beacon_int);
+ if (!hif)
+ return -ENOMEM;
body->dtim_period = conf->dtim_period;
body->short_preamble = conf->use_short_preamble;
- body->channel_number = cpu_to_le16(channel->hw_value);
+ body->channel_number = channel->hw_value;
body->beacon_interval = cpu_to_le32(conf->beacon_int);
body->basic_rate_set =
cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
@@ -449,6 +487,8 @@ int hif_beacon_transmit(struct wfx_vif *wvif, bool enable)
struct hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body),
&hif);
+ if (!hif)
+ return -ENOMEM;
body->enable_beaconing = enable ? 1 : 0;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT,
sizeof(*body));
@@ -463,9 +503,11 @@ int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
struct hif_msg *hif;
struct hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
if (mac_addr)
ether_addr_copy(body->mac_addr, mac_addr);
- body->map_link_flags = *(struct hif_map_link_flags *) &flags;
+ body->map_link_flags = *(struct hif_map_link_flags *)&flags;
body->peer_sta_id = sta_id;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -480,6 +522,8 @@ int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
int buf_len = sizeof(struct hif_req_update_ie) + ies_len;
struct hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
+ if (!hif)
+ return -ENOMEM;
body->ie_flags.beacon = 1;
body->num_ies = cpu_to_le16(1);
memcpy(body->ie, ies, ies_len);
@@ -489,14 +533,16 @@ int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
return ret;
}
-int hif_sl_send_pub_keys(struct wfx_dev *wdev, const uint8_t *pubkey,
- const uint8_t *pubkey_hmac)
+int hif_sl_send_pub_keys(struct wfx_dev *wdev,
+ const u8 *pubkey, const u8 *pubkey_hmac)
{
int ret;
struct hif_msg *hif;
struct hif_req_sl_exchange_pub_keys *body = wfx_alloc_hif(sizeof(*body),
&hif);
+ if (!hif)
+ return -ENOMEM;
body->algorithm = HIF_SL_CURVE25519;
memcpy(body->host_pub_key, pubkey, sizeof(body->host_pub_key));
memcpy(body->host_pub_key_mac, pubkey_hmac,
@@ -506,7 +552,7 @@ int hif_sl_send_pub_keys(struct wfx_dev *wdev, const uint8_t *pubkey,
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
kfree(hif);
// Compatibility with legacy secure link
- if (ret == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ if (ret == le32_to_cpu(HIF_STATUS_SLK_NEGO_SUCCESS))
ret = 0;
return ret;
}
@@ -517,6 +563,8 @@ int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap)
struct hif_msg *hif;
struct hif_req_sl_configure *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
memcpy(body->encr_bmp, bitmap, sizeof(body->encr_bmp));
wfx_fill_header(hif, -1, HIF_REQ_ID_SL_CONFIGURE, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
@@ -524,21 +572,22 @@ int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap)
return ret;
}
-int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
- int destination)
+int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key, int destination)
{
int ret;
struct hif_msg *hif;
struct hif_req_set_sl_mac_key *body = wfx_alloc_hif(sizeof(*body),
&hif);
+ if (!hif)
+ return -ENOMEM;
memcpy(body->key_value, slk_key, sizeof(body->key_value));
body->otp_or_ram = destination;
wfx_fill_header(hif, -1, HIF_REQ_ID_SET_SL_MAC_KEY, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
kfree(hif);
// Compatibility with legacy secure link
- if (ret == SL_MAC_KEY_STATUS_SUCCESS)
+ if (ret == le32_to_cpu(HIF_STATUS_SLK_SET_KEY_SUCCESS))
ret = 0;
return ret;
}
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
index f8520a14c14c..e9eca9330178 100644
--- a/drivers/staging/wfx/hif_tx.h
+++ b/drivers/staging/wfx/hif_tx.h
@@ -10,12 +10,11 @@
#ifndef WFX_HIF_TX_H
#define WFX_HIF_TX_H
-#include "hif_api_cmd.h"
-
struct ieee80211_channel;
struct ieee80211_bss_conf;
struct ieee80211_tx_queue_params;
struct cfg80211_scan_request;
+struct hif_req_add_key;
struct wfx_dev;
struct wfx_vif;
@@ -48,8 +47,7 @@ int hif_stop_scan(struct wfx_vif *wvif);
int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout);
-int hif_set_bss_params(struct wfx_vif *wvif,
- const struct hif_req_set_bss_params *arg);
+int hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count);
int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg);
int hif_remove_key(struct wfx_dev *wdev, int idx);
int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
@@ -59,8 +57,8 @@ int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
int hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id);
int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len);
-int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
- int destination);
+int hif_sl_set_mac_key(struct wfx_dev *wdev,
+ const u8 *slk_key, int destination);
int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap);
int hif_sl_send_pub_keys(struct wfx_dev *wdev,
const u8 *pubkey, const u8 *pubkey_hmac);
diff --git a/drivers/staging/wfx/hif_tx_mib.c b/drivers/staging/wfx/hif_tx_mib.c
new file mode 100644
index 000000000000..1689cb42acc0
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx_mib.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of host-to-chip MIBs of WFxxx Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+
+#include <linux/etherdevice.h>
+
+#include "wfx.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+#include "hif_api_mib.h"
+
+int hif_set_output_power(struct wfx_vif *wvif, int val)
+{
+ struct hif_mib_current_tx_power_level arg = {
+ .power_level = cpu_to_le32(val * 10),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
+ &arg, sizeof(arg));
+}
+
+int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
+ unsigned int dtim_interval,
+ unsigned int listen_interval)
+{
+ struct hif_mib_beacon_wake_up_period val = {
+ .wakeup_period_min = dtim_interval,
+ .receive_dtim = 0,
+ .wakeup_period_max = listen_interval,
+ };
+
+ if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
+ return -EINVAL;
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
+ &val, sizeof(val));
+}
+
+int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
+ int rssi_thold, int rssi_hyst)
+{
+ struct hif_mib_rcpi_rssi_threshold arg = {
+ .rolling_average_count = 8,
+ .detection = 1,
+ };
+
+ if (!rssi_thold && !rssi_hyst) {
+ arg.upperthresh = 1;
+ arg.lowerthresh = 1;
+ } else {
+ arg.upper_threshold = rssi_thold + rssi_hyst;
+ arg.upper_threshold = (arg.upper_threshold + 110) * 2;
+ arg.lower_threshold = rssi_thold;
+ arg.lower_threshold = (arg.lower_threshold + 110) * 2;
+ }
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD, &arg, sizeof(arg));
+}
+
+int hif_get_counters_table(struct wfx_dev *wdev, int vif_id,
+ struct hif_mib_extended_count_table *arg)
+{
+ if (wfx_api_older_than(wdev, 1, 3)) {
+ // extended_count_table is wider than count_table
+ memset(arg, 0xFF, sizeof(*arg));
+ return hif_read_mib(wdev, vif_id, HIF_MIB_ID_COUNTERS_TABLE,
+ arg, sizeof(struct hif_mib_count_table));
+ } else {
+ return hif_read_mib(wdev, vif_id,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, arg,
+ sizeof(struct hif_mib_extended_count_table));
+ }
+}
+
+int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
+{
+ struct hif_mib_mac_address msg = { };
+
+ if (mac)
+ ether_addr_copy(msg.mac_addr, mac);
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
+ &msg, sizeof(msg));
+}
+
+int hif_set_rx_filter(struct wfx_vif *wvif,
+ bool filter_bssid, bool filter_prbreq)
+{
+ struct hif_mib_rx_filter val = { };
+
+ if (filter_bssid)
+ val.bssid_filter = 1;
+ if (!filter_prbreq)
+ val.fwd_probe_req = 1;
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER,
+ &val, sizeof(val));
+}
+
+int hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len,
+ const struct hif_ie_table_entry *tbl)
+{
+ int ret;
+ struct hif_mib_bcn_filter_table *val;
+ int buf_len = struct_size(val, ie_table, tbl_len);
+
+ val = kzalloc(buf_len, GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+ val->num_of_info_elmts = cpu_to_le32(tbl_len);
+ memcpy(val->ie_table, tbl, tbl_len * sizeof(*tbl));
+ ret = hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_TABLE, val, buf_len);
+ kfree(val);
+ return ret;
+}
+
+int hif_beacon_filter_control(struct wfx_vif *wvif,
+ int enable, int beacon_count)
+{
+ struct hif_mib_bcn_filter_enable arg = {
+ .enable = cpu_to_le32(enable),
+ .bcn_count = cpu_to_le32(beacon_count),
+ };
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE,
+ &arg, sizeof(arg));
+}
+
+int hif_set_operational_mode(struct wfx_dev *wdev, enum hif_op_power_mode mode)
+{
+ struct hif_mib_gl_operational_power_mode val = {
+ .power_mode = mode,
+ .wup_ind_activation = 1,
+ };
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
+ &val, sizeof(val));
+}
+
+int hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
+ u8 frame_type, int init_rate)
+{
+ struct hif_mib_template_frame *arg;
+
+ WARN(skb->len > HIF_API_MAX_TEMPLATE_FRAME_SIZE, "frame is too big");
+ skb_push(skb, 4);
+ arg = (struct hif_mib_template_frame *)skb->data;
+ skb_pull(skb, 4);
+ arg->init_rate = init_rate;
+ arg->frame_type = frame_type;
+ arg->frame_length = cpu_to_le16(skb->len);
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
+ arg, sizeof(*arg) + skb->len);
+}
+
+int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
+{
+ struct hif_mib_protected_mgmt_policy val = { };
+
+ WARN(required && !capable, "incoherent arguments");
+ if (capable) {
+ val.pmf_enable = 1;
+ val.host_enc_auth_frames = 1;
+ }
+ if (!required)
+ val.unpmf_allowed = 1;
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY,
+ &val, sizeof(val));
+}
+
+int hif_set_block_ack_policy(struct wfx_vif *wvif,
+ u8 tx_tid_policy, u8 rx_tid_policy)
+{
+ struct hif_mib_block_ack_policy val = {
+ .block_ack_tx_tid_policy = tx_tid_policy,
+ .block_ack_rx_tid_policy = rx_tid_policy,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
+ &val, sizeof(val));
+}
+
+int hif_set_association_mode(struct wfx_vif *wvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct hif_mib_set_association_mode val = {
+ .preambtype_use = 1,
+ .mode = 1,
+ .spacing = 1,
+ .short_preamble = info->use_short_preamble,
+ };
+
+ rcu_read_lock(); // protect sta
+ if (info->bssid && !info->ibss_joined)
+ sta = ieee80211_find_sta(wvif->vif, info->bssid);
+
+ // FIXME: it is strange to not retrieve all information from bss_info
+ if (sta && sta->ht_cap.ht_supported) {
+ val.mpdu_start_spacing = sta->ht_cap.ampdu_density;
+ if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
+ val.greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+ }
+ rcu_read_unlock();
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
+}
+
+int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
+ int policy_index, u8 *rates)
+{
+ struct hif_mib_set_tx_rate_retry_policy *arg;
+ size_t size = struct_size(arg, tx_rate_retry_policy, 1);
+ int ret;
+
+ arg = kzalloc(size, GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
+ arg->num_tx_rate_policies = 1;
+ arg->tx_rate_retry_policy[0].policy_index = policy_index;
+ arg->tx_rate_retry_policy[0].short_retry_count = 255;
+ arg->tx_rate_retry_policy[0].long_retry_count = 255;
+ arg->tx_rate_retry_policy[0].first_rate_sel = 1;
+ arg->tx_rate_retry_policy[0].terminate = 1;
+ arg->tx_rate_retry_policy[0].count_init = 1;
+ memcpy(&arg->tx_rate_retry_policy[0].rates, rates,
+ sizeof(arg->tx_rate_retry_policy[0].rates));
+ ret = hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+ kfree(arg);
+ return ret;
+}
+
+int hif_set_mac_addr_condition(struct wfx_vif *wvif,
+ int idx, const u8 *mac_addr)
+{
+ struct hif_mib_mac_addr_data_frame_condition val = {
+ .condition_idx = idx,
+ .address_type = HIF_MAC_ADDR_A1,
+ };
+
+ ether_addr_copy(val.mac_address, mac_addr);
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
+ &val, sizeof(val));
+}
+
+int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif, int idx, u8 allowed_frames)
+{
+ struct hif_mib_uc_mc_bc_data_frame_condition val = {
+ .condition_idx = idx,
+ .allowed_frames = allowed_frames,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
+ &val, sizeof(val));
+}
+
+int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable, int idx,
+ int mac_filters, int frames_types_filters)
+{
+ struct hif_mib_config_data_filter val = {
+ .enable = enable,
+ .filter_idx = idx,
+ .mac_cond = mac_filters,
+ .uc_mc_bc_cond = frames_types_filters,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CONFIG_DATA_FILTER, &val, sizeof(val));
+}
+
+int hif_set_data_filtering(struct wfx_vif *wvif, bool enable, bool invert)
+{
+ struct hif_mib_set_data_filtering val = {
+ .enable = enable,
+ .invert_matching = invert,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_DATA_FILTERING, &val, sizeof(val));
+}
+
+int hif_keep_alive_period(struct wfx_vif *wvif, int period)
+{
+ struct hif_mib_keep_alive_period arg = {
+ .keep_alive_period = cpu_to_le16(period),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD,
+ &arg, sizeof(arg));
+};
+
+int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr)
+{
+ struct hif_mib_arp_ip_addr_table arg = {
+ .condition_idx = idx,
+ .arp_enable = HIF_ARP_NS_FILTERING_DISABLE,
+ };
+
+ if (addr) {
+ // Caution: type of addr is __be32
+ memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
+ arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
+ }
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
+ &arg, sizeof(arg));
+}
+
+int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable)
+{
+ struct hif_mib_gl_set_multi_msg arg = {
+ .enable_multi_tx_conf = enable,
+ };
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
+ &arg, sizeof(arg));
+}
+
+int hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val)
+{
+ struct hif_mib_set_uapsd_information arg = { };
+
+ if (val & BIT(IEEE80211_AC_VO))
+ arg.trig_voice = 1;
+ if (val & BIT(IEEE80211_AC_VI))
+ arg.trig_video = 1;
+ if (val & BIT(IEEE80211_AC_BE))
+ arg.trig_be = 1;
+ if (val & BIT(IEEE80211_AC_BK))
+ arg.trig_bckgrnd = 1;
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION,
+ &arg, sizeof(arg));
+}
+
+int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
+{
+ struct hif_mib_non_erp_protection arg = {
+ .use_cts_to_self = enable,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
+}
+
+int hif_slot_time(struct wfx_vif *wvif, int val)
+{
+ struct hif_mib_slot_time arg = {
+ .slot_time = cpu_to_le32(val),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
+ &arg, sizeof(arg));
+}
+
+int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
+{
+ struct hif_mib_wep_default_key_id arg = {
+ .wep_default_key_id = val,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
+ &arg, sizeof(arg));
+}
+
+int hif_rts_threshold(struct wfx_vif *wvif, int val)
+{
+ struct hif_mib_dot11_rts_threshold arg = {
+ .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
+}
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
index 26b1406f9f6c..86683de7de7c 100644
--- a/drivers/staging/wfx/hif_tx_mib.h
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -9,398 +9,48 @@
#ifndef WFX_HIF_TX_MIB_H
#define WFX_HIF_TX_MIB_H
-#include <linux/etherdevice.h>
-
-#include "wfx.h"
-#include "hif_tx.h"
-#include "hif_api_mib.h"
-
-static inline int hif_set_output_power(struct wfx_vif *wvif, int val)
-{
- struct hif_mib_current_tx_power_level arg = {
- .power_level = cpu_to_le32(val * 10),
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
- &arg, sizeof(arg));
-}
-
-static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
- unsigned int dtim_interval,
- unsigned int listen_interval)
-{
- struct hif_mib_beacon_wake_up_period val = {
- .wakeup_period_min = dtim_interval,
- .receive_dtim = 0,
- .wakeup_period_max = cpu_to_le16(listen_interval),
- };
-
- if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
- return -EINVAL;
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
- &val, sizeof(val));
-}
-
-static inline int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
- int rssi_thold, int rssi_hyst)
-{
- struct hif_mib_rcpi_rssi_threshold arg = {
- .rolling_average_count = 8,
- .detection = 1,
- };
-
- if (!rssi_thold && !rssi_hyst) {
- arg.upperthresh = 1;
- arg.lowerthresh = 1;
- } else {
- arg.upper_threshold = rssi_thold + rssi_hyst;
- arg.upper_threshold = (arg.upper_threshold + 110) * 2;
- arg.lower_threshold = rssi_thold;
- arg.lower_threshold = (arg.lower_threshold + 110) * 2;
- }
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_RCPI_RSSI_THRESHOLD, &arg, sizeof(arg));
-}
-
-static inline int hif_get_counters_table(struct wfx_dev *wdev,
- struct hif_mib_extended_count_table *arg)
-{
- if (wfx_api_older_than(wdev, 1, 3)) {
- // extended_count_table is wider than count_table
- memset(arg, 0xFF, sizeof(*arg));
- return hif_read_mib(wdev, 0, HIF_MIB_ID_COUNTERS_TABLE,
- arg, sizeof(struct hif_mib_count_table));
- } else {
- return hif_read_mib(wdev, 0,
- HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, arg,
- sizeof(struct hif_mib_extended_count_table));
- }
-}
-
-static inline int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
-{
- struct hif_mib_mac_address msg = { };
-
- if (mac)
- ether_addr_copy(msg.mac_addr, mac);
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
- &msg, sizeof(msg));
-}
-
-static inline int hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid,
- bool fwd_probe_req)
-{
- struct hif_mib_rx_filter val = { };
-
- if (filter_bssid)
- val.bssid_filter = 1;
- if (fwd_probe_req)
- val.fwd_probe_req = 1;
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER,
- &val, sizeof(val));
-}
-
-static inline int hif_set_beacon_filter_table(struct wfx_vif *wvif,
- int tbl_len,
- struct hif_ie_table_entry *tbl)
-{
- int ret;
- struct hif_mib_bcn_filter_table *val;
- int buf_len = struct_size(val, ie_table, tbl_len);
-
- val = kzalloc(buf_len, GFP_KERNEL);
- if (!val)
- return -ENOMEM;
- val->num_of_info_elmts = cpu_to_le32(tbl_len);
- memcpy(val->ie_table, tbl, tbl_len * sizeof(*tbl));
- ret = hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_BEACON_FILTER_TABLE, val, buf_len);
- kfree(val);
- return ret;
-}
-
-static inline int hif_beacon_filter_control(struct wfx_vif *wvif,
- int enable, int beacon_count)
-{
- struct hif_mib_bcn_filter_enable arg = {
- .enable = cpu_to_le32(enable),
- .bcn_count = cpu_to_le32(beacon_count),
- };
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_BEACON_FILTER_ENABLE,
- &arg, sizeof(arg));
-}
-
-static inline int hif_set_operational_mode(struct wfx_dev *wdev,
- enum hif_op_power_mode mode)
-{
- struct hif_mib_gl_operational_power_mode val = {
- .power_mode = mode,
- .wup_ind_activation = 1,
- };
-
- return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
- &val, sizeof(val));
-}
-
-static inline int hif_set_template_frame(struct wfx_vif *wvif,
- struct sk_buff *skb,
- u8 frame_type, int init_rate)
-{
- struct hif_mib_template_frame *arg;
-
- skb_push(skb, 4);
- arg = (struct hif_mib_template_frame *)skb->data;
- skb_pull(skb, 4);
- arg->init_rate = init_rate;
- arg->frame_type = frame_type;
- arg->frame_length = cpu_to_le16(skb->len);
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
- arg, sizeof(*arg));
-}
-
-static inline int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
-{
- struct hif_mib_protected_mgmt_policy val = { };
-
- WARN(required && !capable, "incoherent arguments");
- if (capable) {
- val.pmf_enable = 1;
- val.host_enc_auth_frames = 1;
- }
- if (!required)
- val.unpmf_allowed = 1;
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_PROTECTED_MGMT_POLICY,
- &val, sizeof(val));
-}
-
-static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
- u8 tx_tid_policy, u8 rx_tid_policy)
-{
- struct hif_mib_block_ack_policy val = {
- .block_ack_tx_tid_policy = tx_tid_policy,
- .block_ack_rx_tid_policy = rx_tid_policy,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
- &val, sizeof(val));
-}
-
-static inline int hif_set_association_mode(struct wfx_vif *wvif,
- struct ieee80211_bss_conf *info)
-{
- int basic_rates = wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates);
- struct ieee80211_sta *sta = NULL;
- struct hif_mib_set_association_mode val = {
- .preambtype_use = 1,
- .mode = 1,
- .rateset = 1,
- .spacing = 1,
- .short_preamble = info->use_short_preamble,
- .basic_rate_set = cpu_to_le32(basic_rates)
- };
-
- rcu_read_lock(); // protect sta
- if (info->bssid && !info->ibss_joined)
- sta = ieee80211_find_sta(wvif->vif, info->bssid);
-
- // FIXME: it is strange to not retrieve all information from bss_info
- if (sta && sta->ht_cap.ht_supported) {
- val.mpdu_start_spacing = sta->ht_cap.ampdu_density;
- if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
- val.greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
- }
- rcu_read_unlock();
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
-}
-
-static inline int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
- int policy_index, uint8_t *rates)
-{
- struct hif_mib_set_tx_rate_retry_policy *arg;
- size_t size = struct_size(arg, tx_rate_retry_policy, 1);
- int ret;
-
- arg = kzalloc(size, GFP_KERNEL);
- arg->num_tx_rate_policies = 1;
- arg->tx_rate_retry_policy[0].policy_index = policy_index;
- arg->tx_rate_retry_policy[0].short_retry_count = 255;
- arg->tx_rate_retry_policy[0].long_retry_count = 255;
- arg->tx_rate_retry_policy[0].first_rate_sel = 1;
- arg->tx_rate_retry_policy[0].terminate = 1;
- arg->tx_rate_retry_policy[0].count_init = 1;
- memcpy(&arg->tx_rate_retry_policy[0].rates, rates,
- sizeof(arg->tx_rate_retry_policy[0].rates));
- ret = hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
- kfree(arg);
- return ret;
-}
-
-static inline int hif_set_mac_addr_condition(struct wfx_vif *wvif,
- int idx, const u8 *mac_addr)
-{
- struct hif_mib_mac_addr_data_frame_condition val = {
- .condition_idx = idx,
- .address_type = HIF_MAC_ADDR_A1,
- };
-
- ether_addr_copy(val.mac_address, mac_addr);
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
- &val, sizeof(val));
-}
-
-static inline int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
- int idx, u8 allowed_frames)
-{
- struct hif_mib_uc_mc_bc_data_frame_condition val = {
- .condition_idx = idx,
- .allowed_frames = allowed_frames,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
- &val, sizeof(val));
-}
-
-static inline int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable,
- int idx, int mac_filters,
- int frames_types_filters)
-{
- struct hif_mib_config_data_filter val = {
- .enable = enable,
- .filter_idx = idx,
- .mac_cond = mac_filters,
- .uc_mc_bc_cond = frames_types_filters,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_CONFIG_DATA_FILTER, &val, sizeof(val));
-}
-
-static inline int hif_set_data_filtering(struct wfx_vif *wvif,
- bool enable, bool invert)
-{
- struct hif_mib_set_data_filtering val = {
- .enable = enable,
- .invert_matching = invert,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_DATA_FILTERING, &val, sizeof(val));
-}
-
-static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
-{
- struct hif_mib_keep_alive_period arg = {
- .keep_alive_period = cpu_to_le16(period),
- };
-
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD,
- &arg, sizeof(arg));
-};
-
-static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx,
- __be32 *addr)
-{
- struct hif_mib_arp_ip_addr_table arg = {
- .condition_idx = idx,
- .arp_enable = HIF_ARP_NS_FILTERING_DISABLE,
- };
-
- if (addr) {
- // Caution: type of addr is __be32
- memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
- arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
- }
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
- &arg, sizeof(arg));
-}
-
-static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable)
-{
- struct hif_mib_gl_set_multi_msg arg = {
- .enable_multi_tx_conf = enable,
- };
-
- return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
- &arg, sizeof(arg));
-}
-
-static inline int hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val)
-{
- struct hif_mib_set_uapsd_information arg = { };
-
- if (val & BIT(IEEE80211_AC_VO))
- arg.trig_voice = 1;
- if (val & BIT(IEEE80211_AC_VI))
- arg.trig_video = 1;
- if (val & BIT(IEEE80211_AC_BE))
- arg.trig_be = 1;
- if (val & BIT(IEEE80211_AC_BK))
- arg.trig_bckgrnd = 1;
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_UAPSD_INFORMATION,
- &arg, sizeof(arg));
-}
-
-static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
-{
- struct hif_mib_non_erp_protection arg = {
- .use_cts_to_self = enable,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
-}
-
-static inline int hif_slot_time(struct wfx_vif *wvif, int val)
-{
- struct hif_mib_slot_time arg = {
- .slot_time = cpu_to_le32(val),
- };
-
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
- &arg, sizeof(arg));
-}
-
-static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool enable)
-{
- struct hif_mib_set_ht_protection arg = {
- .dual_cts_prot = enable,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_HT_PROTECTION,
- &arg, sizeof(arg));
-}
-
-static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
-{
- struct hif_mib_wep_default_key_id arg = {
- .wep_default_key_id = val,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
- &arg, sizeof(arg));
-}
-
-static inline int hif_rts_threshold(struct wfx_vif *wvif, int val)
-{
- struct hif_mib_dot11_rts_threshold arg = {
- .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF),
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
-}
+struct wfx_vif;
+struct sk_buff;
+
+int hif_set_output_power(struct wfx_vif *wvif, int val);
+int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
+ unsigned int dtim_interval,
+ unsigned int listen_interval);
+int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
+ int rssi_thold, int rssi_hyst);
+int hif_get_counters_table(struct wfx_dev *wdev, int vif_id,
+ struct hif_mib_extended_count_table *arg);
+int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac);
+int hif_set_rx_filter(struct wfx_vif *wvif,
+ bool filter_bssid, bool fwd_probe_req);
+int hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len,
+ const struct hif_ie_table_entry *tbl);
+int hif_beacon_filter_control(struct wfx_vif *wvif,
+ int enable, int beacon_count);
+int hif_set_operational_mode(struct wfx_dev *wdev, enum hif_op_power_mode mode);
+int hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
+ u8 frame_type, int init_rate);
+int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required);
+int hif_set_block_ack_policy(struct wfx_vif *wvif,
+ u8 tx_tid_policy, u8 rx_tid_policy);
+int hif_set_association_mode(struct wfx_vif *wvif,
+ struct ieee80211_bss_conf *info);
+int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
+ int policy_index, u8 *rates);
+int hif_set_mac_addr_condition(struct wfx_vif *wvif,
+ int idx, const u8 *mac_addr);
+int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
+ int idx, u8 allowed_frames);
+int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable, int idx,
+ int mac_filters, int frames_types_filters);
+int hif_set_data_filtering(struct wfx_vif *wvif, bool enable, bool invert);
+int hif_keep_alive_period(struct wfx_vif *wvif, int period);
+int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr);
+int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable);
+int hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val);
+int hif_erp_use_protection(struct wfx_vif *wvif, bool enable);
+int hif_slot_time(struct wfx_vif *wvif, int val);
+int hif_wep_default_key_id(struct wfx_vif *wvif, int val);
+int hif_rts_threshold(struct wfx_vif *wvif, int val);
#endif
diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c
index d3a141d95a0e..777217cdf9a7 100644
--- a/drivers/staging/wfx/hwio.c
+++ b/drivers/staging/wfx/hwio.c
@@ -106,8 +106,8 @@ err:
return ret;
}
-static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf,
- size_t len)
+static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr,
+ void *buf, size_t len)
{
int ret;
int i;
@@ -195,8 +195,8 @@ static int indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr,
return ret;
}
-static int indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr,
- u32 *val)
+static int indirect_read32_locked(struct wfx_dev *wdev, int reg,
+ u32 addr, u32 *val)
{
int ret;
__le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
@@ -205,15 +205,15 @@ static int indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr,
return -ENOMEM;
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = indirect_read(wdev, reg, addr, tmp, sizeof(u32));
- *val = cpu_to_le32(*tmp);
+ *val = le32_to_cpu(*tmp);
_trace_io_ind_read32(reg, addr, *val);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
kfree(tmp);
return ret;
}
-static int indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr,
- u32 val)
+static int indirect_write32_locked(struct wfx_dev *wdev, int reg,
+ u32 addr, u32 val)
{
int ret;
__le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
@@ -233,7 +233,7 @@ int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len)
{
int ret;
- WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ WARN((long)buf & 3, "%s: unaligned buffer", __func__);
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv,
WFX_REG_IN_OUT_QUEUE, buf, len);
@@ -249,7 +249,7 @@ int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len)
{
int ret;
- WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ WARN((long)buf & 3, "%s: unaligned buffer", __func__);
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv,
WFX_REG_IN_OUT_QUEUE, buf, len);
diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c
index 96adfa330604..5ee2ffc5f935 100644
--- a/drivers/staging/wfx/key.c
+++ b/drivers/staging/wfx/key.c
@@ -5,6 +5,7 @@
* Copyright (c) 2017-2019, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "key.h"
@@ -20,14 +21,12 @@ static int wfx_alloc_key(struct wfx_dev *wdev)
return -1;
wdev->key_map |= BIT(idx);
- wdev->keys[idx].entry_index = idx;
return idx;
}
static void wfx_free_key(struct wfx_dev *wdev, int idx)
{
WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation");
- memset(&wdev->keys[idx], 0, sizeof(wdev->keys[idx]));
wdev->key_map &= ~BIT(idx);
}
@@ -159,7 +158,7 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
int ret;
- struct hif_req_add_key *k;
+ struct hif_req_add_key k = { };
struct ieee80211_key_seq seq;
struct wfx_dev *wdev = wvif->wdev;
int idx = wfx_alloc_key(wvif->wdev);
@@ -169,44 +168,44 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
ieee80211_get_key_rx_seq(key, 0, &seq);
if (idx < 0)
return -EINVAL;
- k = &wdev->keys[idx];
- k->int_id = wvif->id;
+ k.int_id = wvif->id;
+ k.entry_index = idx;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) {
if (pairwise)
- k->type = fill_wep_pair(&k->key.wep_pairwise_key, key,
- sta->addr);
+ k.type = fill_wep_pair(&k.key.wep_pairwise_key, key,
+ sta->addr);
else
- k->type = fill_wep_group(&k->key.wep_group_key, key);
+ k.type = fill_wep_group(&k.key.wep_group_key, key);
} else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
if (pairwise)
- k->type = fill_tkip_pair(&k->key.tkip_pairwise_key, key,
- sta->addr);
+ k.type = fill_tkip_pair(&k.key.tkip_pairwise_key, key,
+ sta->addr);
else
- k->type = fill_tkip_group(&k->key.tkip_group_key, key,
- &seq, wvif->vif->type);
+ k.type = fill_tkip_group(&k.key.tkip_group_key, key,
+ &seq, wvif->vif->type);
} else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) {
if (pairwise)
- k->type = fill_ccmp_pair(&k->key.aes_pairwise_key, key,
- sta->addr);
+ k.type = fill_ccmp_pair(&k.key.aes_pairwise_key, key,
+ sta->addr);
else
- k->type = fill_ccmp_group(&k->key.aes_group_key, key,
- &seq);
+ k.type = fill_ccmp_group(&k.key.aes_group_key, key,
+ &seq);
} else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) {
if (pairwise)
- k->type = fill_sms4_pair(&k->key.wapi_pairwise_key, key,
- sta->addr);
+ k.type = fill_sms4_pair(&k.key.wapi_pairwise_key, key,
+ sta->addr);
else
- k->type = fill_sms4_group(&k->key.wapi_group_key, key);
+ k.type = fill_sms4_group(&k.key.wapi_group_key, key);
} else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
- k->type = fill_aes_cmac_group(&k->key.igtk_group_key, key,
- &seq);
+ k.type = fill_aes_cmac_group(&k.key.igtk_group_key, key,
+ &seq);
} else {
dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher);
wfx_free_key(wdev, idx);
return -EOPNOTSUPP;
}
- ret = hif_add_key(wdev, k);
+ ret = hif_add_key(wdev, &k);
if (ret) {
wfx_free_key(wdev, idx);
return -EOPNOTSUPP;
@@ -229,7 +228,7 @@ int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
int ret = -EOPNOTSUPP;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
mutex_lock(&wvif->wdev->conf_mutex);
if (cmd == SET_KEY)
@@ -240,29 +239,3 @@ int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
-int wfx_upload_keys(struct wfx_vif *wvif)
-{
- int i;
- struct hif_req_add_key *key;
- struct wfx_dev *wdev = wvif->wdev;
-
- for (i = 0; i < ARRAY_SIZE(wdev->keys); i++) {
- if (wdev->key_map & BIT(i)) {
- key = &wdev->keys[i];
- if (key->int_id == wvif->id)
- hif_add_key(wdev, key);
- }
- }
- return 0;
-}
-
-void wfx_wep_key_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, wep_key_work);
-
- wfx_tx_flush(wvif->wdev);
- hif_wep_default_key_id(wvif, wvif->wep_default_key_id);
- wfx_pending_requeue(wvif->wdev, wvif->wep_pending_skb);
- wvif->wep_pending_skb = NULL;
- wfx_tx_unlock(wvif->wdev);
-}
diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h
index 9436ccdf4d3b..ff31fc9c565a 100644
--- a/drivers/staging/wfx/key.h
+++ b/drivers/staging/wfx/key.h
@@ -16,7 +16,5 @@ struct wfx_vif;
int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
-int wfx_upload_keys(struct wfx_vif *wvif);
-void wfx_wep_key_work(struct work_struct *work);
#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index 3c4c240229ad..6bd96f476388 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -28,6 +28,7 @@
#include "bh.h"
#include "sta.h"
#include "key.h"
+#include "scan.h"
#include "debug.h"
#include "data_tx.h"
#include "secure_link.h"
@@ -106,7 +107,7 @@ static const struct ieee80211_supported_band wfx_band_2ghz = {
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
.mcs = {
.rx_mask = { 0xFF }, // MCS0 to MCS7
- .rx_highest = 65,
+ .rx_highest = cpu_to_le16(72),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
},
@@ -133,15 +134,19 @@ static const struct ieee80211_ops wfx_ops = {
.remove_interface = wfx_remove_interface,
.config = wfx_config,
.tx = wfx_tx,
+ .join_ibss = wfx_join_ibss,
+ .leave_ibss = wfx_leave_ibss,
.conf_tx = wfx_conf_tx,
.hw_scan = wfx_hw_scan,
.cancel_hw_scan = wfx_cancel_hw_scan,
+ .start_ap = wfx_start_ap,
+ .stop_ap = wfx_stop_ap,
.sta_add = wfx_sta_add,
.sta_remove = wfx_sta_remove,
- .sta_notify = wfx_sta_notify,
.set_tim = wfx_set_tim,
.set_key = wfx_set_key,
.set_rts_threshold = wfx_set_rts_threshold,
+ .set_default_unicast_key = wfx_set_default_unicast_key,
.bss_info_changed = wfx_bss_info_changed,
.prepare_multicast = wfx_prepare_multicast,
.configure_filter = wfx_configure_filter,
@@ -165,8 +170,8 @@ bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
return false;
}
-struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
- const char *label)
+struct gpio_desc *wfx_get_gpio(struct device *dev,
+ int override, const char *label)
{
struct gpio_desc *ret;
char label_buf[256];
@@ -187,18 +192,18 @@ struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
if (!ret || PTR_ERR(ret) == -ENOENT)
dev_warn(dev, "gpio %s is not defined\n", label);
else
- dev_warn(dev,
- "error while requesting gpio %s\n", label);
+ dev_warn(dev, "error while requesting gpio %s\n",
+ label);
ret = NULL;
} else {
- dev_dbg(dev,
- "using gpio %d for %s\n", desc_to_gpio(ret), label);
+ dev_dbg(dev, "using gpio %d for %s\n",
+ desc_to_gpio(ret), label);
}
return ret;
}
/* NOTE: wfx_send_pds() destroy buf */
-int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len)
+int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
{
int ret;
int start, brace_level, i;
@@ -224,16 +229,19 @@ int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len)
buf[i] = '}';
ret = hif_configuration(wdev, buf + start,
i - start + 1);
- if (ret == HIF_STATUS_FAILURE) {
- dev_err(wdev->dev, "PDS bytes %d to %d: invalid data (unsupported options?)\n", start, i);
+ if (ret > 0) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: invalid data (unsupported options?)\n",
+ start, i);
return -EINVAL;
}
if (ret == -ETIMEDOUT) {
- dev_err(wdev->dev, "PDS bytes %d to %d: chip didn't reply (corrupted file?)\n", start, i);
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip didn't reply (corrupted file?)\n",
+ start, i);
return ret;
}
if (ret) {
- dev_err(wdev->dev, "PDS bytes %d to %d: chip returned an unknown error\n", start, i);
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip returned an unknown error\n",
+ start, i);
return -EIO;
}
buf[i] = ',';
@@ -247,7 +255,7 @@ static int wfx_send_pdata_pds(struct wfx_dev *wdev)
{
int ret = 0;
const struct firmware *pds;
- unsigned char *tmp_buf;
+ u8 *tmp_buf;
ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev);
if (ret) {
@@ -266,9 +274,9 @@ static void wfx_free_common(void *data)
{
struct wfx_dev *wdev = data;
+ mutex_destroy(&wdev->tx_power_loop_info_lock);
mutex_destroy(&wdev->rx_stats_lock);
mutex_destroy(&wdev->conf_mutex);
- wfx_tx_queues_deinit(wdev);
ieee80211_free_hw(wdev->hw);
}
@@ -286,7 +294,6 @@ struct wfx_dev *wfx_init_common(struct device *dev,
SET_IEEE80211_DEV(hw, dev);
- ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -315,7 +322,7 @@ struct wfx_dev *wfx_init_common(struct device *dev,
hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- hw->wiphy->max_ap_assoc_sta = WFX_MAX_STA_IN_AP_MODE;
+ hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX;
hw->wiphy->max_scan_ssids = 2;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations);
@@ -338,7 +345,10 @@ struct wfx_dev *wfx_init_common(struct device *dev,
mutex_init(&wdev->conf_mutex);
mutex_init(&wdev->rx_stats_lock);
+ mutex_init(&wdev->tx_power_loop_info_lock);
init_completion(&wdev->firmware_ready);
+ INIT_DELAYED_WORK(&wdev->cooling_timeout_work,
+ wfx_cooling_timeout_work);
wfx_init_hif_cmd(&wdev->hif_cmd);
wfx_tx_queues_init(wdev);
@@ -359,23 +369,24 @@ int wfx_probe(struct wfx_dev *wdev)
// prevent bh() to touch it.
gpio_saved = wdev->pdata.gpio_wakeup;
wdev->pdata.gpio_wakeup = NULL;
+ wdev->poll_irq = true;
wfx_bh_register(wdev);
err = wfx_init_device(wdev);
if (err)
- goto err1;
+ goto err0;
- err = wait_for_completion_interruptible_timeout(&wdev->firmware_ready,
- 10 * HZ);
+ wfx_bh_poll_irq(wdev);
+ err = wait_for_completion_timeout(&wdev->firmware_ready, 1 * HZ);
if (err <= 0) {
if (err == 0) {
- dev_err(wdev->dev, "timeout while waiting for startup indication. IRQ configuration error?\n");
+ dev_err(wdev->dev, "timeout while waiting for startup indication\n");
err = -ETIMEDOUT;
} else if (err == -ERESTARTSYS) {
dev_info(wdev->dev, "probe interrupted by user\n");
}
- goto err1;
+ goto err0;
}
// FIXME: fill wiphy::hw_version
@@ -384,7 +395,7 @@ int wfx_probe(struct wfx_dev *wdev)
wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label,
wdev->hw_caps.api_version_major,
wdev->hw_caps.api_version_minor,
- wdev->keyset, *((u32 *) &wdev->hw_caps.capabilities));
+ wdev->keyset, *((u32 *)&wdev->hw_caps.capabilities));
snprintf(wdev->hw->wiphy->fw_version,
sizeof(wdev->hw->wiphy->fw_version),
"%d.%d.%d",
@@ -397,14 +408,14 @@ int wfx_probe(struct wfx_dev *wdev)
"unsupported firmware API version (expect 1 while firmware returns %d)\n",
wdev->hw_caps.api_version_major);
err = -ENOTSUPP;
- goto err1;
+ goto err0;
}
err = wfx_sl_init(wdev);
if (err && wdev->hw_caps.capabilities.link_mode == SEC_LINK_ENFORCED) {
dev_err(wdev->dev,
"chip require secure_link, but can't negociate it\n");
- goto err1;
+ goto err0;
}
if (wdev->hw_caps.regul_sel_mode_info.region_sel_mode) {
@@ -417,7 +428,16 @@ int wfx_probe(struct wfx_dev *wdev)
wdev->pdata.file_pds);
err = wfx_send_pdata_pds(wdev);
if (err < 0)
- goto err1;
+ goto err0;
+
+ wdev->poll_irq = false;
+ err = wdev->hwbus_ops->irq_subscribe(wdev->hwbus_priv);
+ if (err)
+ goto err0;
+
+ err = hif_use_multi_tx_conf(wdev, true);
+ if (err)
+ dev_err(wdev->dev, "misconfigured IRQ?\n");
wdev->pdata.gpio_wakeup = gpio_saved;
if (wdev->pdata.gpio_wakeup) {
@@ -432,8 +452,6 @@ int wfx_probe(struct wfx_dev *wdev)
hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE);
}
- hif_use_multi_tx_conf(wdev, true);
-
for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) {
eth_zero_addr(wdev->addresses[i].addr);
macaddr = of_get_mac_address(wdev->dev->of_node);
@@ -466,8 +484,9 @@ int wfx_probe(struct wfx_dev *wdev)
err2:
ieee80211_unregister_hw(wdev->hw);
- ieee80211_free_hw(wdev->hw);
err1:
+ wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
+err0:
wfx_bh_unregister(wdev);
return err;
}
@@ -476,6 +495,7 @@ void wfx_release(struct wfx_dev *wdev)
{
ieee80211_unregister_hw(wdev->hw);
hif_shutdown(wdev);
+ wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
wfx_bh_unregister(wdev);
wfx_sl_deinit(wdev);
}
diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h
index 9c9410072def..f832ce409fda 100644
--- a/drivers/staging/wfx/main.h
+++ b/drivers/staging/wfx/main.h
@@ -13,10 +13,10 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
-#include "bus.h"
#include "hif_api_general.h"
struct wfx_dev;
+struct hwbus_ops;
struct wfx_platform_data {
/* Keyset and ".sec" extention will appended to this string */
@@ -41,6 +41,6 @@ void wfx_release(struct wfx_dev *wdev);
struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
const char *label);
bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor);
-int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len);
+int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len);
#endif
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 39d9127ce4b9..3248ecefda56 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -35,6 +35,7 @@ void wfx_tx_flush(struct wfx_dev *wdev)
if (wdev->chip_frozen)
return;
+ wfx_tx_lock(wdev);
mutex_lock(&wdev->hif_cmd.lock);
ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
!wdev->hif.tx_buffers_used,
@@ -44,9 +45,10 @@ void wfx_tx_flush(struct wfx_dev *wdev)
wdev->hif.tx_buffers_used);
wfx_pending_dump_old_frames(wdev, 3000);
// FIXME: drop pending frames here
- wdev->chip_frozen = 1;
+ wdev->chip_frozen = true;
}
mutex_unlock(&wdev->hif_cmd.lock);
+ wfx_tx_unlock(wdev);
}
void wfx_tx_lock_flush(struct wfx_dev *wdev)
@@ -55,261 +57,142 @@ void wfx_tx_lock_flush(struct wfx_dev *wdev)
wfx_tx_flush(wdev);
}
-void wfx_tx_queues_lock(struct wfx_dev *wdev)
+void wfx_tx_queues_init(struct wfx_dev *wdev)
{
int i;
- struct wfx_queue *queue;
+ skb_queue_head_init(&wdev->tx_pending);
+ init_waitqueue_head(&wdev->tx_dequeue);
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- queue = &wdev->tx_queue[i];
- spin_lock_bh(&queue->queue.lock);
- if (queue->tx_locked_cnt++ == 0)
- ieee80211_stop_queue(wdev->hw, queue->queue_id);
- spin_unlock_bh(&queue->queue.lock);
+ skb_queue_head_init(&wdev->tx_queue[i].normal);
+ skb_queue_head_init(&wdev->tx_queue[i].cab);
}
}
-void wfx_tx_queues_unlock(struct wfx_dev *wdev)
+void wfx_tx_queues_check_empty(struct wfx_dev *wdev)
{
int i;
- struct wfx_queue *queue;
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending));
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- queue = &wdev->tx_queue[i];
- spin_lock_bh(&queue->queue.lock);
- WARN(!queue->tx_locked_cnt, "queue already unlocked");
- if (--queue->tx_locked_cnt == 0)
- ieee80211_wake_queue(wdev->hw, queue->queue_id);
- spin_unlock_bh(&queue->queue.lock);
+ WARN_ON(atomic_read(&wdev->tx_queue[i].pending_frames));
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].normal));
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].cab));
}
}
-/* If successful, LOCKS the TX queue! */
-void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
+static bool __wfx_tx_queue_empty(struct wfx_dev *wdev,
+ struct sk_buff_head *skb_queue, int vif_id)
{
- int i;
- bool done;
- struct wfx_queue *queue;
- struct sk_buff *item;
- struct wfx_dev *wdev = wvif->wdev;
- struct hif_msg *hif;
-
- if (wvif->wdev->chip_frozen) {
- wfx_tx_lock_flush(wdev);
- wfx_tx_queues_clear(wdev);
- return;
- }
+ struct hif_msg *hif_msg;
+ struct sk_buff *skb;
- do {
- done = true;
- wfx_tx_lock_flush(wdev);
- for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
- queue = &wdev->tx_queue[i];
- spin_lock_bh(&queue->queue.lock);
- skb_queue_walk(&queue->queue, item) {
- hif = (struct hif_msg *) item->data;
- if (hif->interface == wvif->id)
- done = false;
- }
- spin_unlock_bh(&queue->queue.lock);
- }
- if (!done) {
- wfx_tx_unlock(wdev);
- msleep(20);
+ spin_lock_bh(&skb_queue->lock);
+ skb_queue_walk(skb_queue, skb) {
+ hif_msg = (struct hif_msg *)skb->data;
+ if (vif_id < 0 || hif_msg->interface == vif_id) {
+ spin_unlock_bh(&skb_queue->lock);
+ return false;
}
- } while (!done);
-}
-
-static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
- struct sk_buff_head *gc_list)
-{
- int i;
- struct sk_buff *item;
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
-
- spin_lock_bh(&queue->queue.lock);
- while ((item = __skb_dequeue(&queue->queue)) != NULL)
- skb_queue_head(gc_list, item);
- spin_lock_nested(&stats->pending.lock, 1);
- for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
- stats->link_map_cache[i] -= queue->link_map_cache[i];
- queue->link_map_cache[i] = 0;
- }
- spin_unlock(&stats->pending.lock);
- spin_unlock_bh(&queue->queue.lock);
-}
-
-void wfx_tx_queues_clear(struct wfx_dev *wdev)
-{
- int i;
- struct sk_buff *item;
- struct sk_buff_head gc_list;
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
-
- skb_queue_head_init(&gc_list);
- for (i = 0; i < IEEE80211_NUM_ACS; ++i)
- wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
- wake_up(&stats->wait_link_id_empty);
- while ((item = skb_dequeue(&gc_list)) != NULL)
- wfx_skb_dtor(wdev, item);
-}
-
-void wfx_tx_queues_init(struct wfx_dev *wdev)
-{
- int i;
-
- memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
- memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
- skb_queue_head_init(&wdev->tx_queue_stats.pending);
- init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
-
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- wdev->tx_queue[i].queue_id = i;
- skb_queue_head_init(&wdev->tx_queue[i].queue);
}
+ spin_unlock_bh(&skb_queue->lock);
+ return true;
}
-void wfx_tx_queues_deinit(struct wfx_dev *wdev)
+bool wfx_tx_queue_empty(struct wfx_dev *wdev,
+ struct wfx_queue *queue, int vif_id)
{
- WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
- wfx_tx_queues_clear(wdev);
+ return __wfx_tx_queue_empty(wdev, &queue->normal, vif_id) &&
+ __wfx_tx_queue_empty(wdev, &queue->cab, vif_id);
}
-int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map)
+static void __wfx_tx_queue_drop(struct wfx_dev *wdev,
+ struct sk_buff_head *skb_queue, int vif_id,
+ struct sk_buff_head *dropped)
{
- int ret, i;
-
- if (!link_id_map)
- return 0;
-
- spin_lock_bh(&queue->queue.lock);
- if (link_id_map == (u32)-1) {
- ret = skb_queue_len(&queue->queue);
- } else {
- ret = 0;
- for (i = 0; i < ARRAY_SIZE(queue->link_map_cache); i++)
- if (link_id_map & BIT(i))
- ret += queue->link_map_cache[i];
+ struct sk_buff *skb, *tmp;
+ struct hif_msg *hif_msg;
+
+ spin_lock_bh(&skb_queue->lock);
+ skb_queue_walk_safe(skb_queue, skb, tmp) {
+ hif_msg = (struct hif_msg *)skb->data;
+ if (vif_id < 0 || hif_msg->interface == vif_id) {
+ __skb_unlink(skb, skb_queue);
+ skb_queue_head(dropped, skb);
+ }
}
- spin_unlock_bh(&queue->queue.lock);
- return ret;
+ spin_unlock_bh(&skb_queue->lock);
}
-void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
- struct sk_buff *skb)
+void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
+ int vif_id, struct sk_buff_head *dropped)
{
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
- struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
-
- WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
- spin_lock_bh(&queue->queue.lock);
- __skb_queue_tail(&queue->queue, skb);
-
- ++queue->link_map_cache[tx_priv->link_id];
-
- spin_lock_nested(&stats->pending.lock, 1);
- ++stats->link_map_cache[tx_priv->link_id];
- spin_unlock(&stats->pending.lock);
- spin_unlock_bh(&queue->queue.lock);
-}
-
-static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
- struct wfx_queue *queue,
- u32 link_id_map)
-{
- struct sk_buff *skb = NULL;
- struct sk_buff *item;
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
- struct wfx_tx_priv *tx_priv;
- bool wakeup_stats = false;
-
- spin_lock_bh(&queue->queue.lock);
- skb_queue_walk(&queue->queue, item) {
- tx_priv = wfx_skb_tx_priv(item);
- if (link_id_map & BIT(tx_priv->link_id)) {
- skb = item;
- break;
- }
- }
- if (skb) {
- tx_priv = wfx_skb_tx_priv(skb);
- tx_priv->xmit_timestamp = ktime_get();
- __skb_unlink(skb, &queue->queue);
- --queue->link_map_cache[tx_priv->link_id];
-
- spin_lock_nested(&stats->pending.lock, 1);
- __skb_queue_tail(&stats->pending, skb);
- if (!--stats->link_map_cache[tx_priv->link_id])
- wakeup_stats = true;
- spin_unlock(&stats->pending.lock);
- }
- spin_unlock_bh(&queue->queue.lock);
- if (wakeup_stats)
- wake_up(&stats->wait_link_id_empty);
- return skb;
+ __wfx_tx_queue_drop(wdev, &queue->cab, vif_id, dropped);
+ __wfx_tx_queue_drop(wdev, &queue->normal, vif_id, dropped);
+ wake_up(&wdev->tx_dequeue);
}
-int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
+void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb)
{
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
- struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- WARN_ON(skb_get_queue_mapping(skb) > 3);
- spin_lock_bh(&queue->queue.lock);
- ++queue->link_map_cache[tx_priv->link_id];
-
- spin_lock_nested(&stats->pending.lock, 1);
- ++stats->link_map_cache[tx_priv->link_id];
- __skb_unlink(skb, &stats->pending);
- spin_unlock(&stats->pending.lock);
- __skb_queue_tail(&queue->queue, skb);
- spin_unlock_bh(&queue->queue.lock);
- return 0;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ skb_queue_tail(&queue->cab, skb);
+ else
+ skb_queue_tail(&queue->normal, skb);
}
-int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
+void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
{
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
-
- spin_lock_bh(&stats->pending.lock);
- __skb_unlink(skb, &stats->pending);
- spin_unlock_bh(&stats->pending.lock);
- wfx_skb_dtor(wdev, skb);
+ struct wfx_queue *queue;
+ struct sk_buff *skb;
- return 0;
+ WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device",
+ __func__);
+ while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
+ queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ WARN_ON(!atomic_read(&queue->pending_frames));
+ atomic_dec(&queue->pending_frames);
+ skb_queue_head(dropped, skb);
+ }
}
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
{
- struct sk_buff *skb;
+ struct wfx_queue *queue;
struct hif_req_tx *req;
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct sk_buff *skb;
- spin_lock_bh(&stats->pending.lock);
- skb_queue_walk(&stats->pending, skb) {
+ spin_lock_bh(&wdev->tx_pending.lock);
+ skb_queue_walk(&wdev->tx_pending, skb) {
req = wfx_skb_txreq(skb);
if (req->packet_id == packet_id) {
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&wdev->tx_pending.lock);
+ queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ WARN_ON(!atomic_read(&queue->pending_frames));
+ atomic_dec(&queue->pending_frames);
+ skb_unlink(skb, &wdev->tx_pending);
return skb;
}
}
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&wdev->tx_pending.lock);
WARN(1, "cannot find packet in pending queue");
return NULL;
}
void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
{
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
ktime_t now = ktime_get();
struct wfx_tx_priv *tx_priv;
struct hif_req_tx *req;
struct sk_buff *skb;
bool first = true;
- spin_lock_bh(&stats->pending.lock);
- skb_queue_walk(&stats->pending, skb) {
+ spin_lock_bh(&wdev->tx_pending.lock);
+ skb_queue_walk(&wdev->tx_pending, skb) {
tx_priv = wfx_skb_tx_priv(skb);
req = wfx_skb_txreq(skb);
if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
@@ -324,7 +207,7 @@ void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
ktime_ms_delta(now, tx_priv->xmit_timestamp));
}
}
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&wdev->tx_pending.lock);
}
unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
@@ -336,138 +219,66 @@ unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
return ktime_us_delta(now, tx_priv->xmit_timestamp);
}
-bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
+bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
{
+ struct wfx_dev *wdev = wvif->wdev;
int i;
- struct sk_buff_head *queue;
- bool ret = true;
-
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- queue = &wdev->tx_queue[i].queue;
- spin_lock_bh(&queue->lock);
- if (!skb_queue_empty(queue))
- ret = false;
- spin_unlock_bh(&queue->lock);
- }
- return ret;
-}
-
-static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
- struct wfx_queue *queue)
-{
- struct hif_req_tx *req = wfx_skb_txreq(skb);
- struct ieee80211_key_conf *hw_key = wfx_skb_tx_priv(skb)->hw_key;
- struct ieee80211_hdr *frame =
- (struct ieee80211_hdr *)(req->frame + req->data_flags.fc_offset);
-
- // FIXME: mac80211 is smart enough to handle BSS loss. Driver should not
- // try to do anything about that.
- if (ieee80211_is_nullfunc(frame->frame_control)) {
- mutex_lock(&wvif->bss_loss_lock);
- if (wvif->bss_loss_state) {
- wvif->bss_loss_confirm_id = req->packet_id;
- req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
- }
- mutex_unlock(&wvif->bss_loss_lock);
- }
- // FIXME: identify the exact scenario matched by this condition. Does it
- // happen yet?
- if (ieee80211_has_protected(frame->frame_control) &&
- hw_key && hw_key->keyidx != wvif->wep_default_key_id &&
- (hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
- wfx_tx_lock(wvif->wdev);
- WARN_ON(wvif->wep_pending_skb);
- wvif->wep_default_key_id = hw_key->keyidx;
- wvif->wep_pending_skb = skb;
- if (!schedule_work(&wvif->wep_key_work))
- wfx_tx_unlock(wvif->wdev);
- return true;
- } else {
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
return false;
- }
-}
-
-static int wfx_get_prio_queue(struct wfx_vif *wvif,
- u32 tx_allowed_mask, int *total)
-{
- static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
- BIT(WFX_LINK_ID_UAPSD);
- const struct ieee80211_tx_queue_params *edca;
- unsigned int score, best = -1;
- int winner = -1;
- int i;
-
- /* search for a winner using edca params */
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- int queued;
-
- edca = &wvif->edca_params[i];
- queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
- tx_allowed_mask);
- if (!queued)
- continue;
- *total += queued;
- score = ((edca->aifs + edca->cw_min) << 16) +
- ((edca->cw_max - edca->cw_min) *
- (get_random_int() & 0xFFFF));
- if (score < best && (winner < 0 || i != 3)) {
- best = score;
- winner = i;
- }
- }
-
- /* override winner if bursting */
- if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
- winner != wvif->wdev->tx_burst_idx &&
- !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
- tx_allowed_mask & urgent) &&
- wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
- winner = wvif->wdev->tx_burst_idx;
-
- return winner;
-}
-
-static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
- struct wfx_queue **queue_p,
- u32 *tx_allowed_mask_p)
-{
- int idx;
- u32 tx_allowed_mask;
- int total = 0;
-
- /* Search for unicast traffic */
- tx_allowed_mask = ~wvif->sta_asleep_mask;
- tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
- if (wvif->sta_asleep_mask)
- tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
- else
- tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
- idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
- if (idx < 0)
- return -ENOENT;
-
- *queue_p = &wvif->wdev->tx_queue[idx];
- *tx_allowed_mask_p = tx_allowed_mask;
- return 0;
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i)
+ // Note: since only AP can have mcast frames in queue and only
+ // one vif can be AP, all queued frames has same interface id
+ if (!skb_queue_empty_lockless(&wdev->tx_queue[i].cab))
+ return true;
+ return false;
}
-struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif)
+static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
{
- struct wfx_dev *wdev = wvif->wdev;
- struct ieee80211_tx_info *tx_info;
+ struct wfx_queue *sorted_queues[IEEE80211_NUM_ACS];
+ struct wfx_vif *wvif;
struct hif_msg *hif;
struct sk_buff *skb;
- int i;
+ int i, j;
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- skb_queue_walk(&wdev->tx_queue[i].queue, skb) {
- tx_info = IEEE80211_SKB_CB(skb);
+ // bubble sort
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ sorted_queues[i] = &wdev->tx_queue[i];
+ for (j = i; j > 0; j--)
+ if (atomic_read(&sorted_queues[j]->pending_frames) >
+ atomic_read(&sorted_queues[j - 1]->pending_frames))
+ swap(sorted_queues[j - 1], sorted_queues[j]);
+ }
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ if (!wvif->after_dtim_tx_allowed)
+ continue;
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ skb = skb_dequeue(&sorted_queues[i]->cab);
+ if (!skb)
+ continue;
+ // Note: since only AP can have mcast frames in queue
+ // and only one vif can be AP, all queued frames has
+ // same interface id
hif = (struct hif_msg *)skb->data;
- if ((tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) &&
- (hif->interface == wvif->id))
- return (struct hif_msg *)skb->data;
+ WARN_ON(hif->interface != wvif->id);
+ WARN_ON(sorted_queues[i] !=
+ &wdev->tx_queue[skb_get_queue_mapping(skb)]);
+ atomic_inc(&sorted_queues[i]->pending_frames);
+ return skb;
+ }
+ // No more multicast to sent
+ wvif->after_dtim_tx_allowed = false;
+ schedule_work(&wvif->update_tim_work);
+ }
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ skb = skb_dequeue(&sorted_queues[i]->normal);
+ if (skb) {
+ WARN_ON(sorted_queues[i] !=
+ &wdev->tx_queue[skb_get_queue_mapping(skb)]);
+ atomic_inc(&sorted_queues[i]->pending_frames);
+ return skb;
}
}
return NULL;
@@ -475,90 +286,20 @@ struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif)
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
{
+ struct wfx_tx_priv *tx_priv;
struct sk_buff *skb;
- struct hif_msg *hif = NULL;
- struct wfx_queue *queue = NULL;
- struct wfx_queue *vif_queue = NULL;
- u32 tx_allowed_mask = 0;
- u32 vif_tx_allowed_mask = 0;
- struct wfx_vif *wvif;
- int not_found;
- int burst;
- int i;
if (atomic_read(&wdev->tx_lock))
return NULL;
- wvif = NULL;
- while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- if (wvif->after_dtim_tx_allowed) {
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- skb = wfx_tx_queue_get(wvif->wdev,
- &wdev->tx_queue[i],
- BIT(WFX_LINK_ID_AFTER_DTIM));
- if (skb) {
- hif = (struct hif_msg *)skb->data;
- // Cannot happen since only one vif can
- // be AP at time
- WARN_ON(wvif->id != hif->interface);
- return hif;
- }
- }
- // No more multicast to sent
- wvif->after_dtim_tx_allowed = false;
- schedule_work(&wvif->update_tim_work);
- }
- }
-
for (;;) {
- int ret = -ENOENT;
- int queue_num;
-
- wvif = NULL;
- while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- spin_lock_bh(&wvif->ps_state_lock);
-
- not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
- &vif_tx_allowed_mask);
-
- spin_unlock_bh(&wvif->ps_state_lock);
-
- if (!not_found) {
- if (queue && queue != vif_queue)
- dev_info(wdev->dev, "vifs disagree about queue priority\n");
- tx_allowed_mask |= vif_tx_allowed_mask;
- queue = vif_queue;
- ret = 0;
- }
- }
-
- if (ret)
- return NULL;
-
- queue_num = queue - wdev->tx_queue;
-
- skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
+ skb = wfx_tx_queues_get_skb(wdev);
if (!skb)
- continue;
- hif = (struct hif_msg *)skb->data;
- wvif = wdev_to_wvif(wdev, hif->interface);
- WARN_ON(!wvif);
-
- if (hif_handle_tx_data(wvif, skb, queue))
- continue; /* Handled by WSM */
-
- /* allow bursting if txop is set */
- if (wvif->edca_params[queue_num].txop)
- burst = wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
- else
- burst = 1;
-
- /* store index of bursting queue */
- if (burst > 1)
- wdev->tx_burst_idx = queue_num;
- else
- wdev->tx_burst_idx = -1;
-
- return hif;
+ return NULL;
+ skb_queue_tail(&wdev->tx_pending, skb);
+ wake_up(&wdev->tx_dequeue);
+ tx_priv = wfx_skb_tx_priv(skb);
+ tx_priv->xmit_timestamp = ktime_get();
+ return (struct hif_msg *)skb->data;
}
}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
index 90bb060d1204..0c3b7244498e 100644
--- a/drivers/staging/wfx/queue.h
+++ b/drivers/staging/wfx/queue.h
@@ -9,29 +9,15 @@
#define WFX_QUEUE_H
#include <linux/skbuff.h>
-
-#include "hif_api_cmd.h"
-
-#define WFX_MAX_STA_IN_AP_MODE 14
-#define WFX_LINK_ID_NO_ASSOC 15
-#define WFX_LINK_ID_AFTER_DTIM (WFX_LINK_ID_NO_ASSOC + 1)
-#define WFX_LINK_ID_UAPSD (WFX_LINK_ID_NO_ASSOC + 2)
-#define WFX_LINK_ID_MAX (WFX_LINK_ID_NO_ASSOC + 3)
+#include <linux/atomic.h>
struct wfx_dev;
struct wfx_vif;
struct wfx_queue {
- struct sk_buff_head queue;
- int tx_locked_cnt;
- int link_map_cache[WFX_LINK_ID_MAX];
- u8 queue_id;
-};
-
-struct wfx_queue_stats {
- int link_map_cache[WFX_LINK_ID_MAX];
- struct sk_buff_head pending;
- wait_queue_head_t wait_link_id_empty;
+ struct sk_buff_head normal;
+ struct sk_buff_head cab; // Content After (DTIM) Beacon
+ atomic_t pending_frames;
};
void wfx_tx_lock(struct wfx_dev *wdev);
@@ -40,22 +26,18 @@ void wfx_tx_flush(struct wfx_dev *wdev);
void wfx_tx_lock_flush(struct wfx_dev *wdev);
void wfx_tx_queues_init(struct wfx_dev *wdev);
-void wfx_tx_queues_deinit(struct wfx_dev *wdev);
-void wfx_tx_queues_lock(struct wfx_dev *wdev);
-void wfx_tx_queues_unlock(struct wfx_dev *wdev);
-void wfx_tx_queues_clear(struct wfx_dev *wdev);
-bool wfx_tx_queues_is_empty(struct wfx_dev *wdev);
-void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
+void wfx_tx_queues_check_empty(struct wfx_dev *wdev);
+bool wfx_tx_queues_has_cab(struct wfx_vif *wvif);
+void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb);
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
-struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif);
-void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
- struct sk_buff *skb);
-int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
+bool wfx_tx_queue_empty(struct wfx_dev *wdev, struct wfx_queue *queue,
+ int vif_id);
+void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
+ int vif_id, struct sk_buff_head *dropped);
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
-int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
-int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb);
+void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped);
unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
struct sk_buff *skb);
void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms);
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
index 6e1e50048651..57ea9997800b 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/staging/wfx/scan.c
@@ -57,8 +57,10 @@ static int send_scan_req(struct wfx_vif *wvif,
wvif->scan_abort = false;
reinit_completion(&wvif->scan_complete);
timeout = hif_scan(wvif, req, start_idx, i - start_idx);
- if (timeout < 0)
+ if (timeout < 0) {
+ wfx_tx_unlock(wvif->wdev);
return timeout;
+ }
ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
hif_set_output_power(wvif, wvif->vif->bss_conf.txpower);
@@ -86,18 +88,22 @@ void wfx_hw_scan_work(struct work_struct *work)
struct ieee80211_scan_request *hw_req = wvif->scan_req;
int chan_cur, ret;
- mutex_lock(&wvif->scan_lock);
mutex_lock(&wvif->wdev->conf_mutex);
+ mutex_lock(&wvif->scan_lock);
+ if (wvif->join_in_progress) {
+ dev_info(wvif->wdev->dev, "%s: abort in-progress REQ_JOIN",
+ __func__);
+ wfx_reset(wvif);
+ }
update_probe_tmpl(wvif, &hw_req->req);
- wfx_fwd_probe_req(wvif, true);
chan_cur = 0;
do {
ret = send_scan_req(wvif, &hw_req->req, chan_cur);
if (ret > 0)
chan_cur += ret;
} while (ret > 0 && chan_cur < hw_req->req.n_channels);
- mutex_unlock(&wvif->wdev->conf_mutex);
mutex_unlock(&wvif->scan_lock);
+ mutex_unlock(&wvif->wdev->conf_mutex);
__ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0);
}
@@ -111,9 +117,6 @@ int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_AP)
return -EOPNOTSUPP;
- if (wvif->state == WFX_STATE_PRE_STA)
- return -EBUSY;
-
wvif->scan_req = hw_req;
schedule_work(&wvif->scan_work);
return 0;
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index 9d430346a58b..12e8a5b638f1 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -5,6 +5,7 @@
* Copyright (c) 2017-2019, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "sta.h"
@@ -37,117 +38,32 @@ u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
return ret;
}
-static void __wfx_free_event_queue(struct list_head *list)
+void wfx_cooling_timeout_work(struct work_struct *work)
{
- struct wfx_hif_event *event, *tmp;
+ struct wfx_dev *wdev = container_of(to_delayed_work(work),
+ struct wfx_dev,
+ cooling_timeout_work);
- list_for_each_entry_safe(event, tmp, list, link) {
- list_del(&event->link);
- kfree(event);
- }
-}
-
-static void wfx_free_event_queue(struct wfx_vif *wvif)
-{
- LIST_HEAD(list);
-
- spin_lock(&wvif->event_queue_lock);
- list_splice_init(&wvif->event_queue, &list);
- spin_unlock(&wvif->event_queue_lock);
-
- __wfx_free_event_queue(&list);
+ wdev->chip_frozen = true;
+ wfx_tx_unlock(wdev);
}
-void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
+void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd)
{
- int tx = 0;
-
- mutex_lock(&wvif->bss_loss_lock);
- cancel_work_sync(&wvif->bss_params_work);
-
- if (init) {
- schedule_delayed_work(&wvif->bss_loss_work, HZ);
- wvif->bss_loss_state = 0;
-
- if (!atomic_read(&wvif->wdev->tx_lock))
- tx = 1;
- } else if (good) {
- cancel_delayed_work_sync(&wvif->bss_loss_work);
- wvif->bss_loss_state = 0;
- schedule_work(&wvif->bss_params_work);
- } else if (bad) {
- /* FIXME Should we just keep going until we time out? */
- if (wvif->bss_loss_state < 3)
- tx = 1;
+ if (cmd == STA_NOTIFY_AWAKE) {
+ // Device recover normal temperature
+ if (cancel_delayed_work(&wdev->cooling_timeout_work))
+ wfx_tx_unlock(wdev);
} else {
- cancel_delayed_work_sync(&wvif->bss_loss_work);
- wvif->bss_loss_state = 0;
- }
-
- /* Spit out a NULL packet to our AP if necessary */
- // FIXME: call ieee80211_beacon_loss/ieee80211_connection_loss instead
- if (tx) {
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- struct ieee80211_tx_control control = { };
-
- wvif->bss_loss_state++;
-
- skb = ieee80211_nullfunc_get(wvif->wdev->hw, wvif->vif, false);
- if (!skb)
- goto end;
- hdr = (struct ieee80211_hdr *)skb->data;
- memset(IEEE80211_SKB_CB(skb), 0,
- sizeof(*IEEE80211_SKB_CB(skb)));
- IEEE80211_SKB_CB(skb)->control.vif = wvif->vif;
- IEEE80211_SKB_CB(skb)->driver_rates[0].idx = 0;
- IEEE80211_SKB_CB(skb)->driver_rates[0].count = 1;
- IEEE80211_SKB_CB(skb)->driver_rates[1].idx = -1;
- rcu_read_lock(); // protect control.sta
- control.sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
- wfx_tx(wvif->wdev->hw, &control, skb);
- rcu_read_unlock();
+ // Device is too hot
+ schedule_delayed_work(&wdev->cooling_timeout_work, 10 * HZ);
+ wfx_tx_lock(wdev);
}
-end:
- mutex_unlock(&wvif->bss_loss_lock);
-}
-
-int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
-{
- wvif->fwd_probe_req = enable;
- return hif_set_rx_filter(wvif, wvif->filter_bssid,
- wvif->fwd_probe_req);
-}
-
-static int wfx_set_mcast_filter(struct wfx_vif *wvif,
- struct wfx_grp_addr_table *fp)
-{
- int i;
-
- // Temporary workaround for filters
- return hif_set_data_filtering(wvif, false, true);
-
- if (!fp->enable)
- return hif_set_data_filtering(wvif, false, true);
-
- for (i = 0; i < fp->num_addresses; i++)
- hif_set_mac_addr_condition(wvif, i, fp->address_list[i]);
- hif_set_uc_mc_bc_condition(wvif, 0,
- HIF_FILTER_UNICAST | HIF_FILTER_BROADCAST);
- hif_set_config_data_filter(wvif, true, 0, BIT(1),
- BIT(fp->num_addresses) - 1);
- hif_set_data_filtering(wvif, true, true);
-
- return 0;
}
-void wfx_update_filtering(struct wfx_vif *wvif)
+static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon)
{
- int ret;
- int bf_enable;
- int bf_count;
- int n_filter_ies;
- struct hif_ie_table_entry filter_ies[] = {
+ const struct hif_ie_table_entry filter_ies[] = {
{
.ie_id = WLAN_EID_VENDOR_SPECIFIC,
.has_changed = 1,
@@ -167,40 +83,33 @@ void wfx_update_filtering(struct wfx_vif *wvif)
}
};
- if (wvif->state == WFX_STATE_PASSIVE)
- return;
-
- if (wvif->disable_beacon_filter) {
- bf_enable = 0;
- bf_count = 1;
- n_filter_ies = 0;
- } else if (wvif->vif->type != NL80211_IFTYPE_STATION) {
- bf_enable = HIF_BEACON_FILTER_ENABLE | HIF_BEACON_FILTER_AUTO_ERP;
- bf_count = 0;
- n_filter_ies = 2;
+ if (!filter_beacon) {
+ hif_beacon_filter_control(wvif, 0, 1);
} else {
- bf_enable = HIF_BEACON_FILTER_ENABLE;
- bf_count = 0;
- n_filter_ies = 3;
+ hif_set_beacon_filter_table(wvif, 3, filter_ies);
+ hif_beacon_filter_control(wvif, HIF_BEACON_FILTER_ENABLE, 0);
}
-
- ret = hif_set_rx_filter(wvif, wvif->filter_bssid, wvif->fwd_probe_req);
- if (!ret)
- ret = hif_set_beacon_filter_table(wvif, n_filter_ies, filter_ies);
- if (!ret)
- ret = hif_beacon_filter_control(wvif, bf_enable, bf_count);
- if (!ret)
- ret = wfx_set_mcast_filter(wvif, &wvif->mcast_filter);
- if (ret)
- dev_err(wvif->wdev->dev, "update filtering failed: %d\n", ret);
}
-static void wfx_update_filtering_work(struct work_struct *work)
+static void wfx_filter_mcast(struct wfx_vif *wvif, bool filter_mcast)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- update_filtering_work);
+ int i;
- wfx_update_filtering(wvif);
+ // Temporary workaround for filters
+ hif_set_data_filtering(wvif, false, true);
+ return;
+
+ if (!filter_mcast) {
+ hif_set_data_filtering(wvif, false, true);
+ return;
+ }
+ for (i = 0; i < wvif->filter_mcast_count; i++)
+ hif_set_mac_addr_condition(wvif, i, wvif->filter_mcast_addr[i]);
+ hif_set_uc_mc_bc_condition(wvif, 0,
+ HIF_FILTER_UNICAST | HIF_FILTER_BROADCAST);
+ hif_set_config_data_filter(wvif, true, 0, BIT(1),
+ BIT(wvif->filter_mcast_count) - 1);
+ hif_set_data_filtering(wvif, true, true);
}
u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
@@ -213,72 +122,127 @@ u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
int count = netdev_hw_addr_list_count(mc_list);
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- memset(&wvif->mcast_filter, 0x00, sizeof(wvif->mcast_filter));
- if (!count ||
- count > ARRAY_SIZE(wvif->mcast_filter.address_list))
+ if (count > ARRAY_SIZE(wvif->filter_mcast_addr)) {
+ wvif->filter_mcast_count = 0;
continue;
+ }
+ wvif->filter_mcast_count = count;
i = 0;
netdev_hw_addr_list_for_each(ha, mc_list) {
- ether_addr_copy(wvif->mcast_filter.address_list[i],
- ha->addr);
+ ether_addr_copy(wvif->filter_mcast_addr[i], ha->addr);
i++;
}
- wvif->mcast_filter.enable = true;
- wvif->mcast_filter.num_addresses = count;
}
return 0;
}
-void wfx_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 unused)
+void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 unused)
{
struct wfx_vif *wvif = NULL;
struct wfx_dev *wdev = hw->priv;
+ bool filter_bssid, filter_prbreq, filter_beacon, filter_mcast;
+
+ // Notes:
+ // - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered
+ // - PS-Poll (FIF_PSPOLL) are never filtered
+ // - RTS, CTS and Ack (FIF_CONTROL) are always filtered
+ // - Broken frames (FIF_FCSFAIL and FIF_PLCPFAIL) are always filtered
+ // - Firmware does (yet) allow to forward unicast traffic sent to
+ // other stations (aka. promiscuous mode)
+ *total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS |
+ FIF_PROBE_REQ | FIF_PSPOLL;
- *total_flags &= FIF_OTHER_BSS | FIF_FCSFAIL | FIF_PROBE_REQ;
-
+ mutex_lock(&wdev->conf_mutex);
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
mutex_lock(&wvif->scan_lock);
- wvif->filter_bssid = (*total_flags &
- (FIF_OTHER_BSS | FIF_PROBE_REQ)) ? 0 : 1;
- wvif->disable_beacon_filter = !(*total_flags & FIF_PROBE_REQ);
- wfx_fwd_probe_req(wvif, true);
- wfx_update_filtering(wvif);
+
+ // Note: FIF_BCN_PRBRESP_PROMISC covers probe response and
+ // beacons from other BSS
+ if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
+ filter_beacon = false;
+ else
+ filter_beacon = true;
+ wfx_filter_beacon(wvif, filter_beacon);
+
+ if (*total_flags & FIF_ALLMULTI) {
+ filter_mcast = false;
+ } else if (!wvif->filter_mcast_count) {
+ dev_dbg(wdev->dev, "disabling unconfigured multicast filter");
+ filter_mcast = false;
+ } else {
+ filter_mcast = true;
+ }
+ wfx_filter_mcast(wvif, filter_mcast);
+
+ if (*total_flags & FIF_OTHER_BSS)
+ filter_bssid = false;
+ else
+ filter_bssid = true;
+
+ // In AP mode, chip can reply to probe request itself
+ if (*total_flags & FIF_PROBE_REQ &&
+ wvif->vif->type == NL80211_IFTYPE_AP) {
+ dev_dbg(wdev->dev, "do not forward probe request in AP mode\n");
+ *total_flags &= ~FIF_PROBE_REQ;
+ }
+
+ if (*total_flags & FIF_PROBE_REQ)
+ filter_prbreq = false;
+ else
+ filter_prbreq = true;
+ hif_set_rx_filter(wvif, filter_bssid, filter_prbreq);
+
mutex_unlock(&wvif->scan_lock);
}
+ mutex_unlock(&wdev->conf_mutex);
}
-static int wfx_update_pm(struct wfx_vif *wvif)
+int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
{
- struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
- bool ps = conf->flags & IEEE80211_CONF_PS;
- int ps_timeout = conf->dynamic_ps_timeout;
struct ieee80211_channel *chan0 = NULL, *chan1 = NULL;
+ struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
- WARN_ON(conf->dynamic_ps_timeout < 0);
- if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
- return 0;
- if (!ps)
- ps_timeout = 0;
- if (wvif->uapsd_mask)
- ps_timeout = 0;
-
- // Kernel disable powersave when an AP is in use. In contrary, it is
- // absolutely necessary to enable legacy powersave for WF200 if channels
- // are differents.
+ WARN(!wvif->vif->bss_conf.assoc && enable_ps,
+ "enable_ps is reliable only if associated");
if (wdev_to_wvif(wvif->wdev, 0))
chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan;
if (wdev_to_wvif(wvif->wdev, 1))
chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan;
if (chan0 && chan1 && chan0->hw_value != chan1->hw_value &&
wvif->vif->type != NL80211_IFTYPE_AP) {
- ps = true;
- ps_timeout = 0;
+ // It is necessary to enable powersave if channels
+ // are differents.
+ if (enable_ps)
+ *enable_ps = true;
+ if (wvif->bss_not_support_ps_poll)
+ return 30;
+ else
+ return 0;
}
+ if (enable_ps)
+ *enable_ps = wvif->vif->bss_conf.ps;
+ if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps)
+ return conf->dynamic_ps_timeout;
+ else
+ return -1;
+}
+
+int wfx_update_pm(struct wfx_vif *wvif)
+{
+ int ps_timeout;
+ bool ps;
+
+ if (!wvif->vif->bss_conf.assoc)
+ return 0;
+ ps_timeout = wfx_get_ps_timeout(wvif, &ps);
+ if (!ps)
+ ps_timeout = 0;
+ WARN_ON(ps_timeout < 0);
+ if (wvif->uapsd_mask)
+ ps_timeout = 0;
if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
TU_TO_JIFFIES(512)))
@@ -287,18 +251,25 @@ static int wfx_update_pm(struct wfx_vif *wvif)
return hif_set_pm(wvif, ps, ps_timeout);
}
+static void wfx_update_pm_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ update_pm_work);
+
+ wfx_update_pm(wvif);
+}
+
int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 queue, const struct ieee80211_tx_queue_params *params)
{
struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
int old_uapsd = wvif->uapsd_mask;
WARN_ON(queue >= hw->queues);
mutex_lock(&wdev->conf_mutex);
assign_bit(queue, &wvif->uapsd_mask, params->uapsd);
- memcpy(&wvif->edca_params[queue], params, sizeof(*params));
hif_set_edca_queue_params(wvif, queue, params);
if (wvif->vif->type == NL80211_IFTYPE_STATION &&
old_uapsd != wvif->uapsd_mask) {
@@ -319,32 +290,9 @@ int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return 0;
}
-static int __wfx_flush(struct wfx_dev *wdev, bool drop)
-{
- for (;;) {
- if (drop)
- wfx_tx_queues_clear(wdev);
- if (wait_event_timeout(wdev->tx_queue_stats.wait_link_id_empty,
- wfx_tx_queues_is_empty(wdev),
- 2 * HZ) <= 0)
- return -ETIMEDOUT;
- wfx_tx_flush(wdev);
- if (wfx_tx_queues_is_empty(wdev))
- return 0;
- dev_warn(wdev->dev, "frames queued while flushing tx queues");
- }
-}
-
-void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
-{
- // FIXME: only flush requested vif and queues
- __wfx_flush(hw->priv, drop);
-}
-
/* WSM callbacks */
-static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
+void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
{
/* RSSI: signed Q8.0, RCPI: unsigned Q7.1
* RSSI = RCPI / 2 - 110
@@ -360,100 +308,23 @@ static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
ieee80211_cqm_rssi_notify(wvif->vif, cqm_evt, rcpi_rssi, GFP_KERNEL);
}
-static void wfx_event_handler_work(struct work_struct *work)
+static void wfx_beacon_loss_work(struct work_struct *work)
{
- struct wfx_vif *wvif =
- container_of(work, struct wfx_vif, event_handler_work);
- struct wfx_hif_event *event;
-
- LIST_HEAD(list);
-
- spin_lock(&wvif->event_queue_lock);
- list_splice_init(&wvif->event_queue, &list);
- spin_unlock(&wvif->event_queue_lock);
-
- list_for_each_entry(event, &list, link) {
- switch (event->evt.event_id) {
- case HIF_EVENT_IND_BSSLOST:
- cancel_work_sync(&wvif->unjoin_work);
- mutex_lock(&wvif->scan_lock);
- wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
- mutex_unlock(&wvif->scan_lock);
- break;
- case HIF_EVENT_IND_BSSREGAINED:
- wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
- cancel_work_sync(&wvif->unjoin_work);
- break;
- case HIF_EVENT_IND_RCPI_RSSI:
- wfx_event_report_rssi(wvif,
- event->evt.event_data.rcpi_rssi);
- break;
- case HIF_EVENT_IND_PS_MODE_ERROR:
- dev_warn(wvif->wdev->dev,
- "error while processing power save request\n");
- break;
- default:
- dev_warn(wvif->wdev->dev,
- "unhandled event indication: %.2x\n",
- event->evt.event_id);
- break;
- }
- }
- __wfx_free_event_queue(&list);
-}
-
-static void wfx_bss_loss_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- bss_loss_work.work);
-
- ieee80211_connection_loss(wvif->vif);
-}
-
-static void wfx_bss_params_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- bss_params_work);
+ struct wfx_vif *wvif = container_of(to_delayed_work(work),
+ struct wfx_vif, beacon_loss_work);
+ struct ieee80211_bss_conf *bss_conf = &wvif->vif->bss_conf;
- mutex_lock(&wvif->wdev->conf_mutex);
- wvif->bss_params.bss_flags.lost_count_only = 1;
- hif_set_bss_params(wvif, &wvif->bss_params);
- wvif->bss_params.bss_flags.lost_count_only = 0;
- mutex_unlock(&wvif->wdev->conf_mutex);
+ ieee80211_beacon_loss(wvif->vif);
+ schedule_delayed_work(to_delayed_work(work),
+ msecs_to_jiffies(bss_conf->beacon_int));
}
-static void wfx_do_unjoin(struct wfx_vif *wvif)
+void wfx_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx)
{
- mutex_lock(&wvif->wdev->conf_mutex);
-
- if (!wvif->state)
- goto done;
-
- if (wvif->state == WFX_STATE_AP)
- goto done;
-
- cancel_work_sync(&wvif->update_filtering_work);
- wvif->state = WFX_STATE_PASSIVE;
-
- /* Unjoin is a reset. */
- wfx_tx_flush(wvif->wdev);
- hif_keep_alive_period(wvif, 0);
- hif_reset(wvif, false);
- wfx_tx_policy_init(wvif);
- hif_set_macaddr(wvif, wvif->vif->addr);
- wfx_free_event_queue(wvif);
- cancel_work_sync(&wvif->event_handler_work);
- wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
- /* Disable Block ACKs */
- hif_set_block_ack_policy(wvif, 0, 0);
-
- wvif->disable_beacon_filter = false;
- wfx_update_filtering(wvif);
- memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
-
-done:
- mutex_unlock(&wvif->wdev->conf_mutex);
+ hif_wep_default_key_id(wvif, idx);
}
static void wfx_set_mfp(struct wfx_vif *wvif,
@@ -472,8 +343,7 @@ static void wfx_set_mfp(struct wfx_vif *wvif,
rcu_read_lock();
if (bss)
- ptr = (const u16 *) ieee80211_bss_get_ie(bss,
- WLAN_EID_RSN);
+ ptr = (const u16 *)ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
if (ptr) {
ptr += pairwise_cipher_suite_count_offset;
@@ -487,6 +357,24 @@ static void wfx_set_mfp(struct wfx_vif *wvif,
hif_set_mfp(wvif, mfpc, mfpr);
}
+void wfx_reset(struct wfx_vif *wvif)
+{
+ struct wfx_dev *wdev = wvif->wdev;
+
+ wfx_tx_lock_flush(wdev);
+ hif_reset(wvif, false);
+ wfx_tx_policy_init(wvif);
+ if (wvif_count(wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ wfx_tx_unlock(wdev);
+ wvif->join_in_progress = false;
+ wvif->bss_not_support_ps_poll = false;
+ cancel_delayed_work_sync(&wvif->beacon_loss_work);
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ wfx_update_pm(wvif);
+}
+
static void wfx_do_join(struct wfx_vif *wvif)
{
int ret;
@@ -498,9 +386,6 @@ static void wfx_do_join(struct wfx_vif *wvif)
wfx_tx_lock_flush(wvif->wdev);
- if (wvif->state)
- wfx_do_unjoin(wvif);
-
bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
conf->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
@@ -509,109 +394,72 @@ static void wfx_do_join(struct wfx_vif *wvif)
return;
}
- mutex_lock(&wvif->wdev->conf_mutex);
-
- /* Sanity check beacon interval */
- if (!wvif->beacon_int)
- wvif->beacon_int = 1;
-
rcu_read_lock(); // protect ssidie
- if (!conf->ibss_joined)
+ if (bss)
ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
if (ssidie) {
ssidlen = ssidie[1];
- memcpy(ssid, &ssidie[2], ssidie[1]);
+ if (ssidlen > IEEE80211_MAX_SSID_LEN)
+ ssidlen = IEEE80211_MAX_SSID_LEN;
+ memcpy(ssid, &ssidie[2], ssidlen);
}
rcu_read_unlock();
- wfx_tx_flush(wvif->wdev);
-
- if (wvif_count(wvif->wdev) <= 1)
- hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
-
wfx_set_mfp(wvif, bss);
+ cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
- wvif->wdev->tx_burst_idx = -1;
+ wvif->join_in_progress = true;
ret = hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
if (ret) {
ieee80211_connection_loss(wvif->vif);
- wvif->join_complete_status = -1;
- /* Tx lock still held, unjoin will clear it. */
- if (!schedule_work(&wvif->unjoin_work))
- wfx_tx_unlock(wvif->wdev);
+ wfx_reset(wvif);
} else {
- wvif->join_complete_status = 0;
- if (wvif->vif->type == NL80211_IFTYPE_ADHOC)
- wvif->state = WFX_STATE_IBSS;
- else
- wvif->state = WFX_STATE_PRE_STA;
- wfx_tx_unlock(wvif->wdev);
-
- /* Upload keys */
- wfx_upload_keys(wvif);
-
/* Due to beacon filtering it is possible that the
* AP's beacon is not known for the mac80211 stack.
* Disable filtering temporary to make sure the stack
* receives at least one
*/
- wvif->disable_beacon_filter = true;
+ wfx_filter_beacon(wvif, false);
}
- wfx_update_filtering(wvif);
-
- mutex_unlock(&wvif->wdev->conf_mutex);
- if (bss)
- cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
-}
-
-static void wfx_unjoin_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, unjoin_work);
-
- wfx_do_unjoin(wvif);
wfx_tx_unlock(wvif->wdev);
}
int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
spin_lock_init(&sta_priv->lock);
sta_priv->vif_id = wvif->id;
- // FIXME: in station mode, the current API interprets new link-id as a
- // tdls peer.
- if (vif->type == NL80211_IFTYPE_STATION)
+ // In station mode, the firmware interprets new link-id as a TDLS peer.
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
return 0;
sta_priv->link_id = ffz(wvif->link_id_map);
wvif->link_id_map |= BIT(sta_priv->link_id);
WARN_ON(!sta_priv->link_id);
- WARN_ON(sta_priv->link_id >= WFX_MAX_STA_IN_AP_MODE);
+ WARN_ON(sta_priv->link_id >= HIF_LINK_ID_MAX);
hif_map_link(wvif, sta->addr, 0, sta_priv->link_id);
- spin_lock_bh(&wvif->ps_state_lock);
- if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
- IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
- wvif->sta_asleep_mask |= BIT(sta_priv->link_id);
- spin_unlock_bh(&wvif->ps_state_lock);
return 0;
}
int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
int i;
for (i = 0; i < ARRAY_SIZE(sta_priv->buffered); i++)
if (sta_priv->buffered[i])
- dev_warn(wvif->wdev->dev, "release station while %d pending frame on queue %d",
- sta_priv->buffered[i], i);
- // FIXME: see note in wfx_sta_add()
- if (vif->type == NL80211_IFTYPE_STATION)
+ // Not an error if paired with trace in
+ // wfx_tx_update_sta()
+ dev_dbg(wvif->wdev->dev, "release station while %d pending frame on queue %d",
+ sta_priv->buffered[i], i);
+ // See note in wfx_sta_add()
+ if (!sta_priv->link_id)
return 0;
// FIXME add a mutex?
hif_map_link(wvif, sta->addr, 1, sta_priv->link_id);
@@ -619,50 +467,10 @@ int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
-static int wfx_start_ap(struct wfx_vif *wvif)
-{
- int ret;
-
- wvif->beacon_int = wvif->vif->bss_conf.beacon_int;
- wvif->wdev->tx_burst_idx = -1;
- ret = hif_start(wvif, &wvif->vif->bss_conf, wvif->channel);
- if (ret)
- return ret;
- ret = wfx_upload_keys(wvif);
- if (ret)
- return ret;
- if (wvif_count(wvif->wdev) <= 1)
- hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
- wvif->state = WFX_STATE_AP;
- wfx_update_filtering(wvif);
- return 0;
-}
-
-static int wfx_update_beaconing(struct wfx_vif *wvif)
-{
- if (wvif->vif->type != NL80211_IFTYPE_AP)
- return 0;
- if (wvif->state == WFX_STATE_AP &&
- wvif->beacon_int == wvif->vif->bss_conf.beacon_int)
- return 0;
- wfx_tx_lock_flush(wvif->wdev);
- hif_reset(wvif, false);
- wfx_tx_policy_init(wvif);
- wvif->state = WFX_STATE_PASSIVE;
- wfx_start_ap(wvif);
- wfx_tx_unlock(wvif->wdev);
- return 0;
-}
-
static int wfx_upload_ap_templates(struct wfx_vif *wvif)
{
struct sk_buff *skb;
- if (wvif->vif->type == NL80211_IFTYPE_STATION ||
- wvif->vif->type == NL80211_IFTYPE_MONITOR ||
- wvif->vif->type == NL80211_IFTYPE_UNSPECIFIED)
- return 0;
-
skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
if (!skb)
return -ENOMEM;
@@ -679,52 +487,77 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif)
return 0;
}
+int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct wfx_dev *wdev = wvif->wdev;
+ int ret;
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ wfx_update_pm(wvif);
+ wvif = (struct wfx_vif *)vif->drv_priv;
+ wfx_upload_ap_templates(wvif);
+ ret = hif_start(wvif, &vif->bss_conf, wvif->channel);
+ if (ret > 0)
+ return -EIO;
+ return ret;
+}
+
+void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wfx_reset(wvif);
+}
+
static void wfx_join_finalize(struct wfx_vif *wvif,
struct ieee80211_bss_conf *info)
{
- struct ieee80211_sta *sta = NULL;
-
- wvif->beacon_int = info->beacon_int;
- rcu_read_lock(); // protect sta
- if (info->bssid && !info->ibss_joined)
- sta = ieee80211_find_sta(wvif->vif, info->bssid);
- if (sta)
- wvif->bss_params.operational_rate_set =
- wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
- else
- wvif->bss_params.operational_rate_set = -1;
- rcu_read_unlock();
- if (sta &&
- info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
- hif_dual_cts_protection(wvif, true);
- else
- hif_dual_cts_protection(wvif, false);
+ wvif->join_in_progress = false;
+ hif_set_association_mode(wvif, info);
+ hif_keep_alive_period(wvif, 0);
+ // beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use
+ // the same value.
+ hif_set_bss_params(wvif, info->aid, 7);
+ hif_set_beacon_wakeup_period(wvif, 1, 1);
+ wfx_update_pm(wvif);
+}
- wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
- cancel_work_sync(&wvif->unjoin_work);
+int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
- wvif->bss_params.beacon_lost_count = 20;
- wvif->bss_params.aid = info->aid;
+ wfx_upload_ap_templates(wvif);
+ wfx_do_join(wvif);
+ return 0;
+}
- hif_set_association_mode(wvif, info);
+void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
- if (!info->ibss_joined) {
- hif_keep_alive_period(wvif, 30 /* sec */);
- hif_set_bss_params(wvif, &wvif->bss_params);
- hif_set_beacon_wakeup_period(wvif, info->dtim_period,
- info->dtim_period);
- wfx_update_pm(wvif);
+ wfx_reset(wvif);
+}
+
+static void wfx_enable_beacon(struct wfx_vif *wvif, bool enable)
+{
+ // Driver has Content After DTIM Beacon in queue. Driver is waiting for
+ // a signal from the firmware. Since we are going to stop to send
+ // beacons, this signal will never happens. See also
+ // wfx_suspend_resume_mc()
+ if (!enable && wfx_tx_queues_has_cab(wvif)) {
+ wvif->after_dtim_tx_allowed = true;
+ wfx_bh_request_tx(wvif->wdev);
}
+ hif_beacon_transmit(wvif, enable);
}
-void wfx_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed)
+void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
{
struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- bool do_join = false;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
int i;
mutex_lock(&wdev->conf_mutex);
@@ -742,92 +575,52 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BEACON ||
- changed & BSS_CHANGED_AP_PROBE_RESP ||
- changed & BSS_CHANGED_BSSID ||
- changed & BSS_CHANGED_SSID ||
- changed & BSS_CHANGED_IBSS) {
- wvif->beacon_int = info->beacon_int;
- wfx_update_beaconing(wvif);
- wfx_upload_ap_templates(wvif);
- wfx_fwd_probe_req(wvif, false);
+ if (changed & BSS_CHANGED_BASIC_RATES ||
+ changed & BSS_CHANGED_BEACON_INT ||
+ changed & BSS_CHANGED_BSSID) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wfx_do_join(wvif);
}
- if (changed & BSS_CHANGED_BEACON_ENABLED &&
- wvif->state != WFX_STATE_IBSS)
- hif_beacon_transmit(wvif, info->enable_beacon);
+ if (changed & BSS_CHANGED_AP_PROBE_RESP ||
+ changed & BSS_CHANGED_BEACON)
+ wfx_upload_ap_templates(wvif);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ wfx_enable_beacon(wvif, info->enable_beacon);
- if (changed & BSS_CHANGED_BEACON_INFO)
+ if (changed & BSS_CHANGED_BEACON_INFO) {
+ if (vif->type != NL80211_IFTYPE_STATION)
+ dev_warn(wdev->dev, "%s: misunderstood change: BEACON_INFO\n",
+ __func__);
hif_set_beacon_wakeup_period(wvif, info->dtim_period,
info->dtim_period);
-
- /* assoc/disassoc, or maybe AID changed */
- if (changed & BSS_CHANGED_ASSOC) {
- wfx_tx_lock_flush(wdev);
- wvif->wep_default_key_id = -1;
- wfx_tx_unlock(wdev);
+ // We temporary forwarded beacon for join process. It is now no
+ // more necessary.
+ wfx_filter_beacon(wvif, true);
}
- if (changed & BSS_CHANGED_ASSOC && !info->assoc &&
- (wvif->state == WFX_STATE_STA || wvif->state == WFX_STATE_IBSS)) {
- /* Shedule unjoin work */
- wfx_tx_lock(wdev);
- if (!schedule_work(&wvif->unjoin_work))
- wfx_tx_unlock(wdev);
- } else {
- if (changed & BSS_CHANGED_BEACON_INT) {
- if (info->ibss_joined)
- do_join = true;
- else if (wvif->state == WFX_STATE_AP)
- wfx_update_beaconing(wvif);
- }
-
- if (changed & BSS_CHANGED_BSSID)
- do_join = true;
-
- if (changed & BSS_CHANGED_ASSOC ||
- changed & BSS_CHANGED_BSSID ||
- changed & BSS_CHANGED_IBSS ||
- changed & BSS_CHANGED_BASIC_RATES ||
- changed & BSS_CHANGED_HT) {
- if (info->assoc) {
- if (wvif->state < WFX_STATE_PRE_STA) {
- ieee80211_connection_loss(vif);
- mutex_unlock(&wdev->conf_mutex);
- return;
- } else if (wvif->state == WFX_STATE_PRE_STA) {
- wvif->state = WFX_STATE_STA;
- }
- } else {
- do_join = true;
- }
-
- if (info->assoc || info->ibss_joined)
- wfx_join_finalize(wvif, info);
- else
- memset(&wvif->bss_params, 0,
- sizeof(wvif->bss_params));
- }
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (info->assoc || info->ibss_joined)
+ wfx_join_finalize(wvif, info);
+ else if (!info->assoc && vif->type == NL80211_IFTYPE_STATION)
+ wfx_reset(wvif);
+ else
+ dev_warn(wdev->dev, "%s: misunderstood change: ASSOC\n",
+ __func__);
}
- if (changed & BSS_CHANGED_ASSOC ||
- changed & BSS_CHANGED_ERP_CTS_PROT ||
- changed & BSS_CHANGED_ERP_PREAMBLE) {
- u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
+ if (changed & BSS_CHANGED_KEEP_ALIVE)
+ hif_keep_alive_period(wvif, info->max_idle_period *
+ USEC_PER_TU / USEC_PER_MSEC);
+ if (changed & BSS_CHANGED_ERP_CTS_PROT)
hif_erp_use_protection(wvif, info->use_cts_prot);
- if (info->use_cts_prot)
- erp_ie[2] |= WLAN_ERP_USE_PROTECTION;
- if (info->use_short_preamble)
- erp_ie[2] |= WLAN_ERP_BARKER_PREAMBLE;
- if (wvif->vif->type != NL80211_IFTYPE_STATION)
- hif_update_ie_beacon(wvif, erp_ie, sizeof(erp_ie));
- }
- if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_ERP_SLOT)
+ if (changed & BSS_CHANGED_ERP_SLOT)
hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
- if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_CQM)
+ if (changed & BSS_CHANGED_CQM)
hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold,
info->cqm_rssi_hyst);
@@ -838,31 +631,6 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
wfx_update_pm(wvif);
mutex_unlock(&wdev->conf_mutex);
-
- if (do_join)
- wfx_do_join(wvif);
-}
-
-static void wfx_ps_notify_sta(struct wfx_vif *wvif,
- enum sta_notify_cmd notify_cmd, int link_id)
-{
- spin_lock_bh(&wvif->ps_state_lock);
- if (notify_cmd == STA_NOTIFY_SLEEP)
- wvif->sta_asleep_mask |= BIT(link_id);
- else // notify_cmd == STA_NOTIFY_AWAKE
- wvif->sta_asleep_mask &= ~BIT(link_id);
- spin_unlock_bh(&wvif->ps_state_lock);
- if (notify_cmd == STA_NOTIFY_AWAKE)
- wfx_bh_request_tx(wvif->wdev);
-}
-
-void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum sta_notify_cmd notify_cmd, struct ieee80211_sta *sta)
-{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
-
- wfx_ps_notify_sta(wvif, notify_cmd, sta_priv->link_id);
}
static int wfx_update_tim(struct wfx_vif *wvif)
@@ -873,10 +641,8 @@ static int wfx_update_tim(struct wfx_vif *wvif)
skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
&tim_offset, &tim_length);
- if (!skb) {
- __wfx_flush(wvif->wdev, true);
+ if (!skb)
return -ENOENT;
- }
tim_ptr = skb->data + tim_offset;
if (tim_offset && tim_length >= 6) {
@@ -886,7 +652,7 @@ static int wfx_update_tim(struct wfx_vif *wvif)
tim_ptr[2] = 0;
/* Set/reset aid0 bit */
- if (wfx_tx_queues_get_after_dtim(wvif))
+ if (wfx_tx_queues_has_cab(wvif))
tim_ptr[4] |= 1;
else
tim_ptr[4] &= ~1;
@@ -917,7 +683,9 @@ int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd)
{
- WARN(!wfx_tx_queues_get_after_dtim(wvif), "incorrect sequence");
+ if (notify_cmd != STA_NOTIFY_AWAKE)
+ return;
+ WARN(!wfx_tx_queues_has_cab(wvif), "incorrect sequence");
WARN(wvif->after_dtim_tx_allowed, "incorrect sequence");
wvif->after_dtim_tx_allowed = true;
wfx_bh_request_tx(wvif->wdev);
@@ -958,7 +726,7 @@ void wfx_change_chanctx(struct ieee80211_hw *hw,
int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *conf)
{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct ieee80211_channel *ch = conf->def.chan;
WARN(wvif->channel, "channel overwrite");
@@ -971,7 +739,7 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *conf)
{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct ieee80211_channel *ch = conf->def.chan;
WARN(wvif->channel != ch, "channel mismatch");
@@ -987,7 +755,7 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
int i, ret = 0;
struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_UAPSD |
@@ -1021,33 +789,18 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
wvif->wdev = wdev;
wvif->link_id_map = 1; // link-id 0 is reserved for multicast
- spin_lock_init(&wvif->ps_state_lock);
INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work);
-
- memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
-
- mutex_init(&wvif->bss_loss_lock);
- INIT_DELAYED_WORK(&wvif->bss_loss_work, wfx_bss_loss_work);
-
- wvif->wep_default_key_id = -1;
- INIT_WORK(&wvif->wep_key_work, wfx_wep_key_work);
-
- spin_lock_init(&wvif->event_queue_lock);
- INIT_LIST_HEAD(&wvif->event_queue);
- INIT_WORK(&wvif->event_handler_work, wfx_event_handler_work);
+ INIT_DELAYED_WORK(&wvif->beacon_loss_work, wfx_beacon_loss_work);
init_completion(&wvif->set_pm_mode_complete);
complete(&wvif->set_pm_mode_complete);
- INIT_WORK(&wvif->update_filtering_work, wfx_update_filtering_work);
- INIT_WORK(&wvif->bss_params_work, wfx_bss_params_work);
- INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work);
+ INIT_WORK(&wvif->update_pm_work, wfx_update_pm_work);
INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
mutex_init(&wvif->scan_lock);
init_completion(&wvif->scan_complete);
INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
- INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
mutex_unlock(&wdev->conf_mutex);
hif_set_macaddr(wvif, vif->addr);
@@ -1060,50 +813,25 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
else
hif_set_block_ack_policy(wvif, 0x00, 0x00);
- // Combo force powersave mode. We can re-enable it now
- ret = wfx_update_pm(wvif);
}
return ret;
}
-void wfx_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
mutex_lock(&wdev->conf_mutex);
WARN(wvif->link_id_map != 1, "corrupted state");
- switch (wvif->state) {
- case WFX_STATE_PRE_STA:
- case WFX_STATE_STA:
- case WFX_STATE_IBSS:
- wfx_tx_lock_flush(wdev);
- if (!schedule_work(&wvif->unjoin_work))
- wfx_tx_unlock(wdev);
- break;
- case WFX_STATE_AP:
- wvif->sta_asleep_mask = 0;
- /* reset.link_id = 0; */
- hif_reset(wvif, false);
- break;
- default:
- break;
- }
-
- wvif->state = WFX_STATE_PASSIVE;
- wfx_tx_queues_wait_empty_vif(wvif);
- wfx_tx_unlock(wdev);
- /* FIXME: In add to reset MAC address, try to reset interface */
+ hif_reset(wvif, false);
hif_set_macaddr(wvif, NULL);
+ wfx_tx_policy_init(wvif);
- wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
- cancel_work_sync(&wvif->unjoin_work);
- wfx_free_event_queue(wvif);
-
+ cancel_delayed_work_sync(&wvif->beacon_loss_work);
wdev->vif[wvif->id] = NULL;
wvif->vif = NULL;
@@ -1115,8 +843,6 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
else
hif_set_block_ack_policy(wvif, 0x00, 0x00);
- // Combo force powersave mode. We can re-enable it now
- wfx_update_pm(wvif);
}
}
@@ -1129,10 +855,5 @@ void wfx_stop(struct ieee80211_hw *hw)
{
struct wfx_dev *wdev = hw->priv;
- wfx_tx_lock_flush(wdev);
- mutex_lock(&wdev->conf_mutex);
- wfx_tx_queues_clear(wdev);
- mutex_unlock(&wdev->conf_mutex);
- wfx_tx_unlock(wdev);
- WARN(atomic_read(&wdev->tx_lock), "tx_lock is locked");
+ wfx_tx_queues_check_empty(wdev);
}
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
index cf99a8a74a81..8a20ad9ae017 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/staging/wfx/sta.h
@@ -10,34 +10,13 @@
#include <net/mac80211.h>
-#include "hif_api_cmd.h"
-
struct wfx_dev;
struct wfx_vif;
-enum wfx_state {
- WFX_STATE_PASSIVE = 0,
- WFX_STATE_PRE_STA,
- WFX_STATE_STA,
- WFX_STATE_IBSS,
- WFX_STATE_AP,
-};
-
-struct wfx_hif_event {
- struct list_head link;
- struct hif_ind_event evt;
-};
-
-struct wfx_grp_addr_table {
- bool enable;
- int num_addresses;
- u8 address_list[8][ETH_ALEN];
-};
-
struct wfx_sta_priv {
int link_id;
int vif_id;
- u8 buffered[IEEE80211_NUM_TIDS];
+ int buffered[IEEE80211_NUM_TIDS];
// Ensure atomicity of "buffered" and calls to ieee80211_sta_set_buffered()
spinlock_t lock;
};
@@ -47,6 +26,8 @@ int wfx_start(struct ieee80211_hw *hw);
void wfx_stop(struct ieee80211_hw *hw);
int wfx_config(struct ieee80211_hw *hw, u32 changed);
int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+void wfx_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx);
u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
@@ -54,8 +35,10 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop);
+int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 queue, const struct ieee80211_tx_queue_params *params);
void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -82,12 +65,13 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf);
// WSM Callbacks
+void wfx_cooling_timeout_work(struct work_struct *work);
+void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd);
void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd);
+void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi);
// Other Helpers
-void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad);
-void wfx_update_filtering(struct wfx_vif *wvif);
-int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable);
+void wfx_reset(struct wfx_vif *wvif);
u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates);
#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
index 30c6a13f0e22..0b6fbd518638 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/staging/wfx/traces.h
@@ -32,16 +32,16 @@
* xxx_name(XXX) \
* ...
*
- * 3. Instanciate that list_names:
+ * 3. Instantiate that list_names:
*
* list_names
*
- * 4. Redefine xxx_name() as a entry of array for __print_symbolic()
+ * 4. Redefine xxx_name() as an entry of array for __print_symbolic()
*
* #undef xxx_name
* #define xxx_name(msg) { msg, #msg },
*
- * 5. list_name can now nearlu be used with __print_symbolic() but,
+ * 5. list_name can now nearly be used with __print_symbolic() but,
* __print_symbolic() dislike last comma of list. So we define a new list
* with a dummy element:
*
@@ -104,8 +104,10 @@ hif_msg_list_enum
hif_mib_name(ARP_KEEP_ALIVE_PERIOD) \
hif_mib_name(BEACON_FILTER_ENABLE) \
hif_mib_name(BEACON_FILTER_TABLE) \
+ hif_mib_name(BEACON_STATS) \
hif_mib_name(BEACON_WAKEUP_PERIOD) \
hif_mib_name(BLOCK_ACK_POLICY) \
+ hif_mib_name(CCA_CONFIG) \
hif_mib_name(CONFIG_DATA_FILTER) \
hif_mib_name(COUNTERS_TABLE) \
hif_mib_name(CURRENT_TX_POWER_LEVEL) \
@@ -114,29 +116,32 @@ hif_msg_list_enum
hif_mib_name(DOT11_MAX_TRANSMIT_MSDU_LIFETIME) \
hif_mib_name(DOT11_RTS_THRESHOLD) \
hif_mib_name(DOT11_WEP_DEFAULT_KEY_ID) \
+ hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \
+ hif_mib_name(EXTENDED_COUNTERS_TABLE) \
hif_mib_name(GL_BLOCK_ACK_INFO) \
hif_mib_name(GL_OPERATIONAL_POWER_MODE) \
hif_mib_name(GL_SET_MULTI_MSG) \
+ hif_mib_name(GRP_SEQ_COUNTER) \
hif_mib_name(INACTIVITY_TIMER) \
hif_mib_name(INTERFACE_PROTECTION) \
hif_mib_name(IPV4_ADDR_DATAFRAME_CONDITION) \
hif_mib_name(IPV6_ADDR_DATAFRAME_CONDITION) \
hif_mib_name(KEEP_ALIVE_PERIOD) \
hif_mib_name(MAC_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(MAGIC_DATAFRAME_CONDITION) \
+ hif_mib_name(MAX_TX_POWER_LEVEL) \
hif_mib_name(NON_ERP_PROTECTION) \
hif_mib_name(NS_IP_ADDRESSES_TABLE) \
hif_mib_name(OVERRIDE_INTERNAL_TX_RATE) \
+ hif_mib_name(PORT_DATAFRAME_CONDITION) \
hif_mib_name(PROTECTED_MGMT_POLICY) \
- hif_mib_name(RX_FILTER) \
hif_mib_name(RCPI_RSSI_THRESHOLD) \
+ hif_mib_name(RX_FILTER) \
hif_mib_name(SET_ASSOCIATION_MODE) \
hif_mib_name(SET_DATA_FILTERING) \
- hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \
hif_mib_name(SET_HT_PROTECTION) \
- hif_mib_name(MAGIC_DATAFRAME_CONDITION) \
hif_mib_name(SET_TX_RATE_RETRY_POLICY) \
hif_mib_name(SET_UAPSD_INFORMATION) \
- hif_mib_name(PORT_DATAFRAME_CONDITION) \
hif_mib_name(SLOT_TIME) \
hif_mib_name(STATISTICS_TABLE) \
hif_mib_name(TEMPLATE_FRAME) \
@@ -169,7 +174,7 @@ DECLARE_EVENT_CLASS(hif_data,
int header_len;
__entry->tx_fill_level = tx_fill_level;
- __entry->msg_len = hif->len;
+ __entry->msg_len = le16_to_cpu(hif->len);
__entry->msg_id = hif->id;
__entry->if_id = hif->interface;
if (is_recv)
@@ -179,7 +184,7 @@ DECLARE_EVENT_CLASS(hif_data,
if (!is_recv &&
(__entry->msg_id == HIF_REQ_ID_READ_MIB ||
__entry->msg_id == HIF_REQ_ID_WRITE_MIB)) {
- __entry->mib = le16_to_cpup((u16 *) hif->body);
+ __entry->mib = le16_to_cpup((__le16 *)hif->body);
header_len = 4;
} else {
__entry->mib = -1;
@@ -193,8 +198,8 @@ DECLARE_EVENT_CLASS(hif_data,
TP_printk("%d:%d:%s_%s%s%s: %s%s (%d bytes)",
__entry->tx_fill_level,
__entry->if_id,
- __print_symbolic(__entry->msg_id, hif_msg_list),
__entry->msg_type,
+ __print_symbolic(__entry->msg_id, hif_msg_list),
__entry->mib != -1 ? "/" : "",
__entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "",
__print_hex(__entry->buf, __entry->buf_len),
@@ -382,8 +387,8 @@ TRACE_EVENT(tx_stats,
int i;
__entry->pkt_id = tx_cnf->packet_id;
- __entry->delay_media = tx_cnf->media_delay;
- __entry->delay_queue = tx_cnf->tx_queue_delay;
+ __entry->delay_media = le32_to_cpu(tx_cnf->media_delay);
+ __entry->delay_queue = le32_to_cpu(tx_cnf->tx_queue_delay);
__entry->delay_fw = delay;
__entry->ack_failures = tx_cnf->ack_failures;
if (!tx_cnf->status || __entry->ack_failures)
@@ -409,7 +414,7 @@ TRACE_EVENT(tx_stats,
__entry->flags |= 0x10;
if (tx_cnf->status)
__entry->flags |= 0x20;
- if (tx_cnf->status == HIF_REQUEUE)
+ if (tx_cnf->status == HIF_STATUS_TX_FAIL_REQUEUE)
__entry->flags |= 0x40;
),
TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus",
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
index 8b85bb1abb9c..73e216733ce4 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/staging/wfx/wfx.h
@@ -21,10 +21,7 @@
#include "main.h"
#include "queue.h"
#include "secure_link.h"
-#include "sta.h"
-#include "scan.h"
#include "hif_tx.h"
-#include "hif_api_general.h"
#define USEC_PER_TXOP 32 // see struct ieee80211_tx_queue_params
#define USEC_PER_TU 1024
@@ -45,21 +42,24 @@ struct wfx_dev {
struct hif_ind_startup hw_caps;
struct wfx_hif hif;
struct sl_context sl;
- int chip_frozen;
+ struct delayed_work cooling_timeout_work;
+ bool poll_irq;
+ bool chip_frozen;
struct mutex conf_mutex;
struct wfx_hif_cmd hif_cmd;
struct wfx_queue tx_queue[4];
- struct wfx_queue_stats tx_queue_stats;
- int tx_burst_idx;
+ struct sk_buff_head tx_pending;
+ wait_queue_head_t tx_dequeue;
atomic_t tx_lock;
atomic_t packet_id;
u32 key_map;
- struct hif_req_add_key keys[MAX_KEY_ENTRIES];
struct hif_rx_stats rx_stats;
struct mutex rx_stats_lock;
+ struct hif_tx_power_loop_info tx_power_loop_info;
+ struct mutex tx_power_loop_info_lock;
};
struct wfx_vif {
@@ -67,42 +67,23 @@ struct wfx_vif {
struct ieee80211_vif *vif;
struct ieee80211_channel *channel;
int id;
- enum wfx_state state;
-
- int bss_loss_state;
- u32 bss_loss_confirm_id;
- struct mutex bss_loss_lock;
- struct delayed_work bss_loss_work;
u32 link_id_map;
bool after_dtim_tx_allowed;
- struct wfx_grp_addr_table mcast_filter;
+ bool join_in_progress;
- s8 wep_default_key_id;
- struct sk_buff *wep_pending_skb;
- struct work_struct wep_key_work;
+ struct delayed_work beacon_loss_work;
struct tx_policy_cache tx_policy_cache;
struct work_struct tx_policy_upload_work;
- u32 sta_asleep_mask;
- spinlock_t ps_state_lock;
struct work_struct update_tim_work;
- int beacon_int;
- bool filter_bssid;
- bool fwd_probe_req;
- bool disable_beacon_filter;
- struct work_struct update_filtering_work;
+ int filter_mcast_count;
+ u8 filter_mcast_addr[8][ETH_ALEN];
unsigned long uapsd_mask;
- struct ieee80211_tx_queue_params edca_params[IEEE80211_NUM_ACS];
- struct hif_req_set_bss_params bss_params;
- struct work_struct bss_params_work;
-
- int join_complete_status;
- struct work_struct unjoin_work;
/* avoid some operations in parallel with scan */
struct mutex scan_lock;
@@ -111,11 +92,9 @@ struct wfx_vif {
bool scan_abort;
struct ieee80211_scan_request *scan_req;
+ bool bss_not_support_ps_poll;
+ struct work_struct update_pm_work;
struct completion set_pm_mode_complete;
-
- struct list_head event_queue;
- spinlock_t event_queue_lock;
- struct work_struct event_handler_work;
};
static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
diff --git a/drivers/staging/wilc1000/cfg80211.c b/drivers/staging/wilc1000/cfg80211.c
index 4bdcbc5fd2fd..b6065a0d660f 100644
--- a/drivers/staging/wilc1000/cfg80211.c
+++ b/drivers/staging/wilc1000/cfg80211.c
@@ -1217,33 +1217,31 @@ static int mgmt_tx_cancel_wait(struct wiphy *wiphy,
return 0;
}
-void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
- u16 frame_type, bool reg)
+void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct mgmt_frame_regs *upd)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(wdev->netdev);
+ u32 presp_bit = BIT(IEEE80211_STYPE_PROBE_REQ >> 4);
+ u32 action_bit = BIT(IEEE80211_STYPE_ACTION >> 4);
- if (!frame_type)
- return;
+ if (wl->initialized) {
+ bool prev = vif->mgmt_reg_stypes & presp_bit;
+ bool now = upd->interface_stypes & presp_bit;
- switch (frame_type) {
- case IEEE80211_STYPE_PROBE_REQ:
- vif->frame_reg[0].type = frame_type;
- vif->frame_reg[0].reg = reg;
- break;
+ if (now != prev)
+ wilc_frame_register(vif, IEEE80211_STYPE_PROBE_REQ, now);
- case IEEE80211_STYPE_ACTION:
- vif->frame_reg[1].type = frame_type;
- vif->frame_reg[1].reg = reg;
- break;
+ prev = vif->mgmt_reg_stypes & action_bit;
+ now = upd->interface_stypes & action_bit;
- default:
- break;
+ if (now != prev)
+ wilc_frame_register(vif, IEEE80211_STYPE_ACTION, now);
}
- if (!wl->initialized)
- return;
- wilc_frame_register(vif, frame_type, reg);
+ vif->mgmt_reg_stypes =
+ upd->interface_stypes & (presp_bit | action_bit);
}
static int set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev,
@@ -1665,7 +1663,7 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.cancel_remain_on_channel = cancel_remain_on_channel,
.mgmt_tx_cancel_wait = mgmt_tx_cancel_wait,
.mgmt_tx = mgmt_tx,
- .mgmt_frame_register = wilc_mgmt_frame_register,
+ .update_mgmt_frame_registrations = wilc_update_mgmt_frame_registrations,
.set_power_mgmt = set_power_mgmt,
.set_cqm_rssi_config = set_cqm_rssi_config,
diff --git a/drivers/staging/wilc1000/cfg80211.h b/drivers/staging/wilc1000/cfg80211.h
index 5e5d63f70df2..37b294cb3b37 100644
--- a/drivers/staging/wilc1000/cfg80211.h
+++ b/drivers/staging/wilc1000/cfg80211.h
@@ -21,8 +21,9 @@ void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked);
struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
const char *name,
struct net_device *real_dev);
-void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
- u16 frame_type, bool reg);
+void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct mgmt_frame_regs *upd);
struct wilc_vif *wilc_get_interface(struct wilc *wl);
struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl);
void wlan_deinit_locks(struct wilc *wilc);
diff --git a/drivers/staging/wilc1000/hif.c b/drivers/staging/wilc1000/hif.c
index 6c7de2f8d3f2..d025a3093015 100644
--- a/drivers/staging/wilc1000/hif.c
+++ b/drivers/staging/wilc1000/hif.c
@@ -11,6 +11,8 @@
#define WILC_FALSE_FRMWR_CHANNEL 100
+#define WILC_SCAN_WID_LIST_SIZE 6
+
struct wilc_rcvd_mac_info {
u8 status;
};
@@ -151,7 +153,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
void *user_arg, struct cfg80211_scan_request *request)
{
int result = 0;
- struct wid wid_list[5];
+ struct wid wid_list[WILC_SCAN_WID_LIST_SIZE];
u32 index = 0;
u32 i, scan_timeout;
u8 *buffer;
diff --git a/drivers/staging/wilc1000/netdev.c b/drivers/staging/wilc1000/netdev.c
index f94a17babd12..fda0ab97b02c 100644
--- a/drivers/staging/wilc1000/netdev.c
+++ b/drivers/staging/wilc1000/netdev.c
@@ -571,6 +571,7 @@ static int wilc_mac_open(struct net_device *ndev)
struct wilc *wl = vif->wilc;
unsigned char mac_add[ETH_ALEN] = {0};
int ret = 0;
+ struct mgmt_frame_regs mgmt_regs = {};
if (!wl || !wl->dev) {
netdev_err(ndev, "device not ready\n");
@@ -602,14 +603,12 @@ static int wilc_mac_open(struct net_device *ndev)
return -EINVAL;
}
- wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
- vif->ndev->ieee80211_ptr,
- vif->frame_reg[0].type,
- vif->frame_reg[0].reg);
- wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
- vif->ndev->ieee80211_ptr,
- vif->frame_reg[1].type,
- vif->frame_reg[1].reg);
+ mgmt_regs.interface_stypes = vif->mgmt_reg_stypes;
+ /* so we detect a change */
+ vif->mgmt_reg_stypes = 0;
+ wilc_update_mgmt_frame_registrations(vif->ndev->ieee80211_ptr->wiphy,
+ vif->ndev->ieee80211_ptr,
+ &mgmt_regs);
netif_wake_queue(ndev);
wl->open_ifcs++;
vif->mac_opened = 1;
@@ -792,12 +791,10 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
srcu_idx = srcu_read_lock(&wilc->srcu);
list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
u16 type = le16_to_cpup((__le16 *)buff);
+ u32 type_bit = BIT(type >> 4);
if (vif->priv.p2p_listen_state &&
- ((type == vif->frame_reg[0].type &&
- vif->frame_reg[0].reg) ||
- (type == vif->frame_reg[1].type &&
- vif->frame_reg[1].reg)))
+ vif->mgmt_reg_stypes & type_bit)
wilc_wfi_p2p_rx(vif, buff, size);
if (vif->monitor_flag)
diff --git a/drivers/staging/wilc1000/netdev.h b/drivers/staging/wilc1000/netdev.h
index 61cbec674a62..d0a006b68d08 100644
--- a/drivers/staging/wilc1000/netdev.h
+++ b/drivers/staging/wilc1000/netdev.h
@@ -24,8 +24,6 @@
#define PMKID_FOUND 1
#define NUM_STA_ASSOCIATED 8
-#define NUM_REG_FRAME 2
-
#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
#define DEFAULT_LINK_SPEED 72
@@ -151,11 +149,6 @@ struct wilc_priv {
u64 inc_roc_cookie;
};
-struct frame_reg {
- u16 type;
- bool reg;
-};
-
#define MAX_TCP_SESSION 25
#define MAX_PENDING_ACKS 256
@@ -187,7 +180,7 @@ struct wilc_vif {
u8 iftype;
int monitor_flag;
int mac_opened;
- struct frame_reg frame_reg[NUM_REG_FRAME];
+ u32 mgmt_reg_stypes;
struct net_device_stats netstats;
struct wilc *wilc;
u8 bssid[ETH_ALEN];
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 1f93ea381353..922484ea4e30 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config ISCSI_TARGET
tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
- depends on NET
+ depends on INET
select CRYPTO
select CRYPTO_CRC32C
select CRYPTO_CRC32C_INTEL if X86
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 731ee67fe914..85748e338858 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -15,6 +15,7 @@
#include <linux/sched/signal.h>
#include <linux/idr.h>
#include <linux/tcp.h> /* TCP_NODELAY */
+#include <net/ip.h>
#include <net/ipv6.h> /* ipv6_addr_v4mapped() */
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
@@ -855,7 +856,7 @@ int iscsit_setup_np(
struct sockaddr_storage *sockaddr)
{
struct socket *sock = NULL;
- int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len;
+ int backlog = ISCSIT_TCP_BACKLOG, ret, len;
switch (np->np_network_transport) {
case ISCSI_TCP:
@@ -897,34 +898,10 @@ int iscsit_setup_np(
/*
* Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
*/
- /* FIXME: Someone please explain why this is endian-safe */
- opt = 1;
- if (np->np_network_transport == ISCSI_TCP) {
- ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
- (char *)&opt, sizeof(opt));
- if (ret < 0) {
- pr_err("kernel_setsockopt() for TCP_NODELAY"
- " failed: %d\n", ret);
- goto fail;
- }
- }
-
- /* FIXME: Someone please explain why this is endian-safe */
- ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
- (char *)&opt, sizeof(opt));
- if (ret < 0) {
- pr_err("kernel_setsockopt() for SO_REUSEADDR"
- " failed\n");
- goto fail;
- }
-
- ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND,
- (char *)&opt, sizeof(opt));
- if (ret < 0) {
- pr_err("kernel_setsockopt() for IP_FREEBIND"
- " failed\n");
- goto fail;
- }
+ if (np->np_network_transport == ISCSI_TCP)
+ tcp_sock_set_nodelay(sock->sk);
+ sock_set_reuseaddr(sock->sk);
+ ip_sock_set_freebind(sock->sk);
ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
if (ret < 0) {
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 89183b3b178f..45ba07c6ec27 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1228,18 +1228,20 @@ void iscsit_print_session_params(struct iscsi_session *sess)
iscsi_dump_sess_ops(sess->sess_ops);
}
-static int iscsit_do_rx_data(
+int rx_data(
struct iscsi_conn *conn,
- struct iscsi_data_count *count)
+ struct kvec *iov,
+ int iov_count,
+ int data)
{
- int data = count->data_length, rx_loop = 0, total_rx = 0;
+ int rx_loop = 0, total_rx = 0;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
memset(&msg, 0, sizeof(struct msghdr));
- iov_iter_kvec(&msg.msg_iter, READ, count->iov, count->iov_count, data);
+ iov_iter_kvec(&msg.msg_iter, READ, iov, iov_count, data);
while (msg_data_left(&msg)) {
rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
@@ -1256,26 +1258,6 @@ static int iscsit_do_rx_data(
return total_rx;
}
-int rx_data(
- struct iscsi_conn *conn,
- struct kvec *iov,
- int iov_count,
- int data)
-{
- struct iscsi_data_count c;
-
- if (!conn || !conn->sock || !conn->conn_ops)
- return -1;
-
- memset(&c, 0, sizeof(struct iscsi_data_count));
- c.iov = iov;
- c.iov_count = iov_count;
- c.data_length = data;
- c.type = ISCSI_RX_DATA;
-
- return iscsit_do_rx_data(conn, &c);
-}
-
int tx_data(
struct iscsi_conn *conn,
struct kvec *iov,
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3305b47fdf53..16d5a4e117a2 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -545,32 +545,15 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+static int tcm_loop_queue_data_or_status(const char *func,
+ struct se_cmd *se_cmd, u8 scsi_status)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
- __func__, sc, sc->cmnd[0]);
-
- sc->result = SAM_STAT_GOOD;
- set_host_byte(sc, DID_OK);
- if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
- (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
- scsi_set_resid(sc, se_cmd->residual_count);
- sc->scsi_done(sc);
- return 0;
-}
-
-static int tcm_loop_queue_status(struct se_cmd *se_cmd)
-{
- struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
- struct tcm_loop_cmd, tl_se_cmd);
- struct scsi_cmnd *sc = tl_cmd->sc;
-
- pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
- __func__, sc, sc->cmnd[0]);
+ func, sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
@@ -581,7 +564,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
sc->result = SAM_STAT_CHECK_CONDITION;
set_driver_byte(sc, DRIVER_SENSE);
} else
- sc->result = se_cmd->scsi_status;
+ sc->result = scsi_status;
set_host_byte(sc, DID_OK);
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
@@ -591,6 +574,17 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
return 0;
}
+static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+{
+ return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
+}
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd)
+{
+ return tcm_loop_queue_data_or_status(__func__,
+ se_cmd, se_cmd->scsi_status);
+}
+
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 385e4cf9cfa6..6b72afee2f8b 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -677,7 +677,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return 0;
/*
@@ -1090,7 +1090,7 @@ int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *tg_pt_gp;
int primary, valid_states, rc = 0;
- if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return -ENODEV;
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
@@ -1920,7 +1920,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2177,7 +2177,7 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2263,7 +2263,7 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev)
{
- if (!(dev->transport->transport_flags &
+ if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index ff82b21fdcce..f04352285155 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1099,21 +1099,73 @@ static ssize_t block_size_store(struct config_item *item,
static ssize_t alua_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
- u8 flags = da->da_dev->transport->transport_flags;
+ u8 flags = da->da_dev->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
}
+static ssize_t alua_support_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
+ pr_err("dev[%p]: Unable to change SE Device alua_support:"
+ " alua_support has fixed value\n", dev);
+ return -EINVAL;
+ }
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
+ else
+ dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA;
+ return count;
+}
+
static ssize_t pgr_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
- u8 flags = da->da_dev->transport->transport_flags;
+ u8 flags = da->da_dev->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
}
+static ssize_t pgr_support_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
+ pr_err("dev[%p]: Unable to change SE Device pgr_support:"
+ " pgr_support has fixed value\n", dev);
+ return -EINVAL;
+ }
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
+ else
+ dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR;
+ return count;
+}
+
CONFIGFS_ATTR(, emulate_model_alias);
CONFIGFS_ATTR(, emulate_dpo);
CONFIGFS_ATTR(, emulate_fua_write);
@@ -1146,8 +1198,8 @@ CONFIGFS_ATTR(, unmap_granularity);
CONFIGFS_ATTR(, unmap_granularity_alignment);
CONFIGFS_ATTR(, unmap_zeroes_data);
CONFIGFS_ATTR(, max_write_same_len);
-CONFIGFS_ATTR_RO(, alua_support);
-CONFIGFS_ATTR_RO(, pgr_support);
+CONFIGFS_ATTR(, alua_support);
+CONFIGFS_ATTR(, pgr_support);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
@@ -1203,12 +1255,24 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
&attr_hw_block_size,
&attr_hw_max_sectors,
&attr_hw_queue_depth,
+ &attr_emulate_pr,
&attr_alua_support,
&attr_pgr_support,
NULL,
};
EXPORT_SYMBOL(passthrough_attrib_attrs);
+/*
+ * pr related dev_attrib attributes for devices passing through CDBs,
+ * but allowing in core pr emulation.
+ */
+struct configfs_attribute *passthrough_pr_attrib_attrs[] = {
+ &attr_enforce_pr_isids,
+ &attr_force_pr_aptpl,
+ NULL,
+};
+EXPORT_SYMBOL(passthrough_pr_attrib_attrs);
+
TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
@@ -1642,7 +1706,7 @@ static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "Passthrough\n");
spin_lock(&dev->dev_reservation_lock);
@@ -1784,7 +1848,7 @@ static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "SPC_PASSTHROUGH\n");
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return sprintf(page, "SPC2_RESERVATIONS\n");
@@ -1798,7 +1862,7 @@ static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
- (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
@@ -1811,7 +1875,7 @@ static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
- (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1858,7 +1922,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
u8 type = 0;
if (!dev->dev_attrib.emulate_pr ||
- (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return count;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return count;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 4cee1138284b..46b0e1ceb77f 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -100,9 +100,10 @@ out_unlock:
*/
if (unpacked_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08llx\n",
+ " Access for 0x%08llx from %s\n",
se_cmd->se_tfo->fabric_name,
- unpacked_lun);
+ unpacked_lun,
+ nacl->initiatorname);
return TCM_NON_EXISTENT_LUN;
}
@@ -174,9 +175,10 @@ out_unlock:
if (!se_lun) {
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08llx\n",
+ " Access for 0x%08llx for %s\n",
se_cmd->se_tfo->fabric_name,
- unpacked_lun);
+ unpacked_lun,
+ nacl->initiatorname);
return -ENODEV;
}
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
@@ -732,6 +734,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->se_hba = hba;
dev->transport = hba->backend->ops;
+ dev->transport_flags = dev->transport->transport_flags_default;
dev->prot_length = sizeof(struct t10_pi_tuple);
dev->hba_index = hba->hba_index;
@@ -1100,7 +1103,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
* emulate the response, since tcmu does not have the information
* required to process these commands.
*/
- if (!(dev->transport->transport_flags &
+ if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
if (cdb[0] == PERSISTENT_RESERVE_IN) {
cmd->execute_cmd = target_scsi3_emulate_pr_in;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5e931690e697..91e41cc55704 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -4086,7 +4086,7 @@ target_check_reservation(struct se_cmd *cmd)
return 0;
if (!dev->dev_attrib.emulate_pr)
return 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return 0;
spin_lock(&dev->dev_reservation_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index c9d92b3e777d..4e37fa9b409d 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1070,9 +1070,9 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH |
- TRANSPORT_FLAG_PASSTHROUGH_ALUA |
- TRANSPORT_FLAG_PASSTHROUGH_PGR,
+ .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA |
+ TRANSPORT_FLAG_PASSTHROUGH_PGR,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index d24e0a3ba3ff..62aa5fa63ac0 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -582,8 +582,7 @@ int core_tpg_add_lun(
if (ret)
goto out_kill_ref;
- if (!(dev->transport->transport_flags &
- TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
+ if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 594b724bbf79..da37af9c3a5e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1397,7 +1397,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0;
if (cmd->sam_task_attr == TCM_ACA_TAG) {
@@ -2012,7 +2012,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
@@ -2126,7 +2126,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return;
if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
@@ -3350,6 +3350,7 @@ static void target_tmr_work(struct work_struct *work)
cmd->se_tfo->queue_tm_rsp(cmd);
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index f769bb1e3735..28fb9441de7a 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -882,41 +882,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
-static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
- struct timer_list *timer)
+static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
+ struct timer_list *timer)
{
- struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
- int cmd_id;
-
- if (tcmu_cmd->cmd_id)
- goto setup_timer;
-
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
- pr_err("tcmu: Could not allocate cmd id.\n");
- return cmd_id;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
- pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
- udev->name, tmo / MSEC_PER_SEC);
-
-setup_timer:
if (!tmo)
- return 0;
+ return;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
if (!timer_pending(timer))
mod_timer(timer, tcmu_cmd->deadline);
- return 0;
+ pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
+ tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
}
static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
- int ret;
/*
* For backwards compat if qfull_time_out is not set use
@@ -931,13 +914,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
else
tmo = TCMU_TIME_OUT;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
- if (ret)
- return ret;
+ tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
- pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
+ tcmu_cmd, udev->name);
return 0;
}
@@ -959,7 +940,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, ret;
+ int iov_cnt, cmd_id;
uint32_t cmd_head;
uint64_t cdb_off;
bool copy_to_data_area;
@@ -1060,14 +1041,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
}
entry->req.iov_bidi_cnt = iov_cnt;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
- &udev->cmd_timer);
- if (ret) {
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
+ tcmu_cmd->cmd_id = cmd_id;
+
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
+
+ tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
+
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/*
@@ -1279,50 +1267,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
return handled;
}
-static int tcmu_check_expired_cmd(int id, void *p, void *data)
+static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
{
- struct tcmu_cmd *cmd = p;
- struct tcmu_dev *udev = cmd->tcmu_dev;
- u8 scsi_status;
struct se_cmd *se_cmd;
- bool is_running;
-
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
- return 0;
if (!time_after(jiffies, cmd->deadline))
- return 0;
+ return;
- is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
+ set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ list_del_init(&cmd->queue_entry);
se_cmd = cmd->se_cmd;
+ cmd->se_cmd = NULL;
- if (is_running) {
- /*
- * If cmd_time_out is disabled but qfull is set deadline
- * will only reflect the qfull timeout. Ignore it.
- */
- if (!udev->cmd_time_out)
- return 0;
+ pr_debug("Timing out inflight cmd %u on dev %s.\n",
+ cmd->cmd_id, cmd->tcmu_dev->name);
- set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
- /*
- * target_complete_cmd will translate this to LUN COMM FAILURE
- */
- scsi_status = SAM_STAT_CHECK_CONDITION;
- list_del_init(&cmd->queue_entry);
- cmd->se_cmd = NULL;
- } else {
- list_del_init(&cmd->queue_entry);
- idr_remove(&udev->commands, id);
- tcmu_free_cmd(cmd);
- scsi_status = SAM_STAT_TASK_SET_FULL;
- }
+ target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
+}
- pr_debug("Timing out cmd %u on dev %s that is %s.\n",
- id, udev->name, is_running ? "inflight" : "queued");
+static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
+{
+ struct se_cmd *se_cmd;
- target_complete_cmd(se_cmd, scsi_status);
- return 0;
+ if (!time_after(jiffies, cmd->deadline))
+ return;
+
+ pr_debug("Timing out queued cmd %p on dev %s.\n",
+ cmd, cmd->tcmu_dev->name);
+
+ list_del_init(&cmd->queue_entry);
+ se_cmd = cmd->se_cmd;
+ tcmu_free_cmd(cmd);
+
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
static void tcmu_device_timedout(struct tcmu_dev *udev)
@@ -1407,16 +1384,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
-static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
+static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
- bool drained = true;
sense_reason_t scsi_ret;
int ret;
if (list_empty(&udev->qfull_queue))
- return true;
+ return;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
@@ -1425,11 +1401,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
list_del_init(&tcmu_cmd->queue_entry);
- pr_debug("removing cmd %u on dev %s from queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("removing cmd %p on dev %s from queue\n",
+ tcmu_cmd, udev->name);
if (fail) {
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
/*
* We were not able to even start the command, so
* fail with busy to allow a retry in case runner
@@ -1444,10 +1419,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0) {
- pr_debug("cmd %u on dev %s failed with %u\n",
- tcmu_cmd->cmd_id, udev->name, scsi_ret);
-
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+ pr_debug("cmd %p on dev %s failed with %u\n",
+ tcmu_cmd, udev->name, scsi_ret);
/*
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
@@ -1462,13 +1435,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
* the queue
*/
list_splice_tail(&cmds, &udev->qfull_queue);
- drained = false;
break;
}
}
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
- return drained;
}
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
@@ -1652,6 +1623,8 @@ static void tcmu_dev_kref_release(struct kref *kref)
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
+ if (!list_empty(&udev->qfull_queue))
+ all_expired = false;
idr_destroy(&udev->commands);
WARN_ON(!all_expired);
@@ -2037,9 +2010,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
- if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
- continue;
-
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
cmd->cmd_id, udev->name,
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
@@ -2077,6 +2047,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
del_timer(&udev->cmd_timer);
+ run_qfull_queue(udev, false);
+
mutex_unlock(&udev->cmdr_lock);
}
@@ -2617,7 +2589,9 @@ static struct configfs_attribute *tcmu_action_attrs[] = {
static struct target_backend_ops tcmu_ops = {
.name = "user",
.owner = THIS_MODULE,
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA,
.attach_hba = tcmu_attach_hba,
.detach_hba = tcmu_detach_hba,
.alloc_device = tcmu_alloc_device,
@@ -2698,6 +2672,7 @@ static void find_free_blocks(void)
static void check_timedout_devices(void)
{
struct tcmu_dev *udev, *tmp_dev;
+ struct tcmu_cmd *cmd, *tmp_cmd;
LIST_HEAD(devs);
spin_lock_bh(&timed_out_udevs_lock);
@@ -2708,9 +2683,24 @@ static void check_timedout_devices(void)
spin_unlock_bh(&timed_out_udevs_lock);
mutex_lock(&udev->cmdr_lock);
- idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
- tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ /*
+ * If cmd_time_out is disabled but qfull is set deadline
+ * will only reflect the qfull timeout. Ignore it.
+ */
+ if (udev->cmd_time_out) {
+ list_for_each_entry_safe(cmd, tmp_cmd,
+ &udev->inflight_queue,
+ queue_entry) {
+ tcmu_check_expired_ring_cmd(cmd);
+ }
+ tcmu_set_next_deadline(&udev->inflight_queue,
+ &udev->cmd_timer);
+ }
+ list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
+ queue_entry) {
+ tcmu_check_expired_queue_cmd(cmd);
+ }
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
mutex_unlock(&udev->cmdr_lock);
@@ -2753,12 +2743,12 @@ static int __init tcmu_module_init(void)
goto out_unreg_device;
}
- for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
len += sizeof(struct configfs_attribute *);
- }
- for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
+ for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
+ len += sizeof(struct configfs_attribute *);
+ for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
len += sizeof(struct configfs_attribute *);
- }
len += sizeof(struct configfs_attribute *);
tcmu_attrs = kzalloc(len, GFP_KERNEL);
@@ -2767,13 +2757,12 @@ static int __init tcmu_module_init(void)
goto out_unreg_genl;
}
- for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
tcmu_attrs[i] = passthrough_attrib_attrs[i];
- }
- for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
- tcmu_attrs[i] = tcmu_attrib_attrs[k];
- i++;
- }
+ for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
+ tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
+ for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
+ tcmu_attrs[i++] = tcmu_attrib_attrs[k];
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
ret = transport_backend_register(&tcmu_ops);
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 8da63f38e6bd..e99d840c2511 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -3,6 +3,8 @@
config TEE
tristate "Trusted Execution Environment support"
depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
+ select CRYPTO
+ select CRYPTO_SHA1
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
help
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index cf2367ba08d6..dbed3f480dc0 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -233,9 +233,13 @@ int optee_open_session(struct tee_context *ctx,
msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
OPTEE_MSG_ATTR_META;
memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
- memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
msg_arg->params[1].u.value.c = arg->clnt_login;
+ rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
+ arg->clnt_login, arg->clnt_uuid);
+ if (rc)
+ goto out;
+
rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
if (rc)
goto out;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 6aec502c495c..64637e09a095 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -6,18 +6,33 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/cdev.h>
+#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uaccess.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
#include "tee_private.h"
#define TEE_NUM_DEVICES 32
#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
+#define TEE_UUID_NS_NAME_SIZE 128
+
+/*
+ * TEE Client UUID name space identifier (UUIDv4)
+ *
+ * Value here is random UUID that is allocated as name space identifier for
+ * forming Client UUID's for TEE environment using UUIDv5 scheme.
+ */
+static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
+ 0xa1, 0xb8, 0xec, 0x4b,
+ 0xc0, 0x8e, 0x01, 0xb6);
+
/*
* Unprivileged devices in the lower half range and privileged devices in
* the upper half range.
@@ -110,6 +125,143 @@ static int tee_release(struct inode *inode, struct file *filp)
return 0;
}
+/**
+ * uuid_v5() - Calculate UUIDv5
+ * @uuid: Resulting UUID
+ * @ns: Name space ID for UUIDv5 function
+ * @name: Name for UUIDv5 function
+ * @size: Size of name
+ *
+ * UUIDv5 is specific in RFC 4122.
+ *
+ * This implements section (for SHA-1):
+ * 4.3. Algorithm for Creating a Name-Based UUID
+ */
+static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
+ size_t size)
+{
+ unsigned char hash[SHA1_DIGEST_SIZE];
+ struct crypto_shash *shash = NULL;
+ struct shash_desc *desc = NULL;
+ int rc;
+
+ shash = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(shash)) {
+ rc = PTR_ERR(shash);
+ pr_err("shash(sha1) allocation failed\n");
+ return rc;
+ }
+
+ desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!desc) {
+ rc = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ desc->tfm = shash;
+
+ rc = crypto_shash_init(desc);
+ if (rc < 0)
+ goto out_free_desc;
+
+ rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
+ if (rc < 0)
+ goto out_free_desc;
+
+ rc = crypto_shash_update(desc, (const u8 *)name, size);
+ if (rc < 0)
+ goto out_free_desc;
+
+ rc = crypto_shash_final(desc, hash);
+ if (rc < 0)
+ goto out_free_desc;
+
+ memcpy(uuid->b, hash, UUID_SIZE);
+
+ /* Tag for version 5 */
+ uuid->b[6] = (hash[6] & 0x0F) | 0x50;
+ uuid->b[8] = (hash[8] & 0x3F) | 0x80;
+
+out_free_desc:
+ kfree(desc);
+
+out_free_shash:
+ crypto_free_shash(shash);
+ return rc;
+}
+
+int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ const u8 connection_data[TEE_IOCTL_UUID_LEN])
+{
+ gid_t ns_grp = (gid_t)-1;
+ kgid_t grp = INVALID_GID;
+ char *name = NULL;
+ int name_len;
+ int rc;
+
+ if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) {
+ /* Nil UUID to be passed to TEE environment */
+ uuid_copy(uuid, &uuid_null);
+ return 0;
+ }
+
+ /*
+ * In Linux environment client UUID is based on UUIDv5.
+ *
+ * Determine client UUID with following semantics for 'name':
+ *
+ * For TEEC_LOGIN_USER:
+ * uid=<uid>
+ *
+ * For TEEC_LOGIN_GROUP:
+ * gid=<gid>
+ *
+ */
+
+ name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ switch (connection_method) {
+ case TEE_IOCTL_LOGIN_USER:
+ name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x",
+ current_euid().val);
+ if (name_len >= TEE_UUID_NS_NAME_SIZE) {
+ rc = -E2BIG;
+ goto out_free_name;
+ }
+ break;
+
+ case TEE_IOCTL_LOGIN_GROUP:
+ memcpy(&ns_grp, connection_data, sizeof(gid_t));
+ grp = make_kgid(current_user_ns(), ns_grp);
+ if (!gid_valid(grp) || !in_egroup_p(grp)) {
+ rc = -EPERM;
+ goto out_free_name;
+ }
+
+ name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x",
+ grp.val);
+ if (name_len >= TEE_UUID_NS_NAME_SIZE) {
+ rc = -E2BIG;
+ goto out_free_name;
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ goto out_free_name;
+ }
+
+ rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
+out_free_name:
+ kfree(name);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid);
+
static int tee_ioctl_version(struct tee_context *ctx,
struct tee_ioctl_version_data __user *uvers)
{
@@ -333,6 +485,13 @@ static int tee_ioctl_open_session(struct tee_context *ctx,
goto out;
}
+ if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN &&
+ arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) {
+ pr_debug("login method not allowed for user-space client\n");
+ rc = -EPERM;
+ goto out;
+ }
+
rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
if (rc)
goto out;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index bd679b72bd05..827ac3d0fea9 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
+#include <linux/uio.h>
#include "tee_private.h"
static void tee_shm_release(struct tee_shm *shm)
@@ -161,8 +162,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
}
}
- if (ctx)
- teedev_ctx_get(ctx);
+ teedev_ctx_get(ctx);
return shm;
err_rem:
@@ -185,14 +185,15 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
size_t length, u32 flags)
{
struct tee_device *teedev = ctx->teedev;
- const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
+ const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
+ const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
struct tee_shm *shm;
void *ret;
int rc;
int num_pages;
unsigned long start;
- if (flags != req_flags)
+ if (flags != req_user_flags && flags != req_kernel_flags)
return ERR_PTR(-ENOTSUPP);
if (!tee_device_get(teedev))
@@ -226,7 +227,27 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
- rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages);
+ if (flags & TEE_SHM_USER_MAPPED) {
+ rc = get_user_pages_fast(start, num_pages, FOLL_WRITE,
+ shm->pages);
+ } else {
+ struct kvec *kiov;
+ int i;
+
+ kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
+ if (!kiov) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ for (i = 0; i < num_pages; i++) {
+ kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
+ kiov[i].iov_len = PAGE_SIZE;
+ }
+
+ rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
+ kfree(kiov);
+ }
if (rc > 0)
shm->num_pages = rc;
if (rc != num_pages) {
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index a8723b1eb8b0..8938ea81a525 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -3,9 +3,9 @@
* Copyright 2018-2020 NXP.
*/
+#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/err.h>
#include <linux/firmware/imx/sci.h>
-#include <linux/firmware/imx/types.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index 1eb757e8df3b..f02010738bb6 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -2,7 +2,6 @@
menuconfig USB4
tristate "Unified support for USB4 and Thunderbolt"
depends on PCI
- depends on X86 || COMPILE_TEST
select APPLE_PROPERTIES if EFI_STUB && X86
select CRC32
select CRYPTO
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index fbbe32ca1e69..ffcc8c3459e5 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1633,6 +1633,15 @@ static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
icm_veto_end(tb);
}
+static bool icm_tgl_is_supported(struct tb *tb)
+{
+ /*
+ * If the firmware is not running use software CM. This platform
+ * should fully support both.
+ */
+ return icm_firmware_running(tb->nhi);
+}
+
static void icm_handle_notification(struct work_struct *work)
{
struct icm_notification *n = container_of(work, typeof(*n), work);
@@ -2269,6 +2278,19 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->rtd3_veto = icm_icl_rtd3_veto;
tb->cm_ops = &icm_icl_ops;
break;
+
+ case PCI_DEVICE_ID_INTEL_TGL_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+ icm->is_supported = icm_tgl_is_supported;
+ icm->driver_ready = icm_icl_driver_ready;
+ icm->set_uuid = icm_icl_set_uuid;
+ icm->device_connected = icm_icl_device_connected;
+ icm->device_disconnected = icm_tr_device_disconnected;
+ icm->xdomain_connected = icm_tr_xdomain_connected;
+ icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+ icm->rtd3_veto = icm_icl_rtd3_veto;
+ tb->cm_ops = &icm_icl_ops;
+ break;
}
if (!icm->is_supported || !icm->is_supported(tb)) {
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 1be491ecbb45..d299dc168147 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1270,6 +1270,10 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
@@ -1285,6 +1289,7 @@ static struct pci_driver nhi_driver = {
.id_table = nhi_ids,
.probe = nhi_probe,
.remove = nhi_remove,
+ .shutdown = nhi_remove,
.driver.pm = &nhi_pm_ops,
};
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 5d276ee9b38e..80162e4b013f 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -73,6 +73,8 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
+#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b
+#define PCI_DEVICE_ID_INTEL_TGL_NHI1 0x9a1d
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index a2ce99051c51..d7d60cd9226f 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -263,7 +263,7 @@ static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
* itself. To be on the safe side keep the root port in D0 during
* the whole upgrade process.
*/
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ root_port = pcie_find_root_port(sw->tb->nhi->pdev);
if (root_port)
pm_runtime_get_noresume(&root_port->dev);
}
@@ -272,7 +272,7 @@ static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
{
struct pci_dev *root_port;
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ root_port = pcie_find_root_port(sw->tb->nhi->pdev);
if (root_port)
pm_runtime_put(&root_port->dev);
}
@@ -348,12 +348,6 @@ out:
return ret;
}
-static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
- size_t bytes)
-{
- return -EPERM;
-}
-
static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
@@ -399,7 +393,6 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
config.read_only = true;
} else {
config.name = "nvm_non_active";
- config.reg_read = tb_switch_nvm_no_read;
config.reg_write = tb_switch_nvm_write;
config.root_only = true;
}
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 436cc51c92c3..cdcc64ea2554 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -371,15 +371,14 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
* tty fields and return the kref reference.
*/
if (rc) {
- tty_port_tty_set(&hp->port, NULL);
- tty->driver_data = NULL;
- tty_port_put(&hp->port);
printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
- } else
+ } else {
/* We are ready... raise DTR/RTS */
if (C_BAUD(tty))
if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, 1);
+ tty_port_set_initialized(&hp->port, true);
+ }
/* Force wakeup of the polling thread */
hvc_kick();
@@ -389,22 +388,12 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
static void hvc_close(struct tty_struct *tty, struct file * filp)
{
- struct hvc_struct *hp;
+ struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
if (tty_hung_up_p(filp))
return;
- /*
- * No driver_data means that this close was issued after a failed
- * hvc_open by the tty layer's release_dev() function and we can just
- * exit cleanly because the kref reference wasn't made.
- */
- if (!tty->driver_data)
- return;
-
- hp = tty->driver_data;
-
spin_lock_irqsave(&hp->port.lock, flags);
if (--hp->port.count == 0) {
@@ -412,6 +401,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
/* We are done with the tty pointer now. */
tty_port_tty_set(&hp->port, NULL);
+ if (!tty_port_initialized(&hp->port))
+ return;
+
if (C_HUPCL(tty))
if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, 0);
@@ -428,6 +420,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
* waking periodically to check chars_in_buffer().
*/
tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
+ tty_port_set_initialized(&hp->port, false);
} else {
if (hp->port.count < 0)
printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index ee0604cd9c6b..55105ac38f89 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -196,8 +196,6 @@ module_param(hvcs_parm_num_devs, int, 0);
static const char hvcs_driver_name[] = "hvcs";
static const char hvcs_device_node[] = "hvcs";
-static const char hvcs_driver_string[]
- = "IBM hvcs (Hypervisor Virtual Console Server) Driver";
/* Status of partner info rescan triggered via sysfs. */
static int hvcs_rescan_status;
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 9d00ff5ef961..3703987c4666 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -638,16 +638,15 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
-static int mxser_change_speed(struct tty_struct *tty)
+static void mxser_change_speed(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
unsigned cflag, cval, fcr;
- int ret = 0;
unsigned char status;
cflag = tty->termios.c_cflag;
if (!info->ioaddr)
- return ret;
+ return;
if (mxser_set_baud_method[tty->index] == 0)
mxser_set_baud(tty, tty_get_baud_rate(tty));
@@ -803,8 +802,6 @@ static int mxser_change_speed(struct tty_struct *tty)
outb(fcr, info->ioaddr + UART_FCR); /* set fcr */
outb(cval, info->ioaddr + UART_LCR);
-
- return ret;
}
static void mxser_check_modem_status(struct tty_struct *tty,
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index d77ed82a4840..0a29a94ec438 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -504,18 +504,7 @@ static void gsm_print_packet(const char *hdr, int addr, int cr,
else
pr_cont("(F)");
- if (dlen) {
- int ct = 0;
- while (dlen--) {
- if (ct % 8 == 0) {
- pr_cont("\n");
- pr_debug(" ");
- }
- pr_cont("%02X ", *data++);
- ct++;
- }
- }
- pr_cont("\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_NONE, data, dlen);
}
@@ -673,11 +662,10 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
* FIXME: lock against link layer control transmissions
*/
-static void gsm_data_kick(struct gsm_mux *gsm)
+static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg, *nmsg;
int len;
- int skip_sof = 0;
list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
if (gsm->constipated && msg->addr)
@@ -699,18 +687,23 @@ static void gsm_data_kick(struct gsm_mux *gsm)
print_hex_dump_bytes("gsm_data_kick: ",
DUMP_PREFIX_OFFSET,
gsm->txframe, len);
-
- if (gsm->output(gsm, gsm->txframe + skip_sof,
- len - skip_sof) < 0)
+ if (gsm->output(gsm, gsm->txframe, len) < 0)
break;
/* FIXME: Can eliminate one SOF in many more cases */
gsm->tx_bytes -= msg->len;
- /* For a burst of frames skip the extra SOF within the
- burst */
- skip_sof = 1;
list_del(&msg->list);
kfree(msg);
+
+ if (dlci) {
+ tty_port_tty_wakeup(&dlci->port);
+ } else {
+ int i = 0;
+
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ tty_port_tty_wakeup(&gsm->dlci[i]->port);
+ }
}
}
@@ -762,7 +755,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
/* Add to the actual output queue */
list_add_tail(&msg->list, &gsm->tx_list);
gsm->tx_bytes += msg->len;
- gsm_data_kick(gsm);
+ gsm_data_kick(gsm, dlci);
}
/**
@@ -1223,7 +1216,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm);
+ gsm_data_kick(gsm, NULL);
spin_unlock_irqrestore(&gsm->tx_lock, flags);
break;
case CMD_FCOFF:
@@ -2545,7 +2538,7 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
/* Queue poll */
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm);
+ gsm_data_kick(gsm, NULL);
if (gsm->tx_bytes < TX_THRESH_LO) {
gsm_dlci_data_sweep(gsm);
}
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 991f49ee4026..b09eac4b6d64 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -423,13 +423,6 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
struct n_hdlc_buf *rbuf;
DECLARE_WAITQUEUE(wait, current);
- /* verify user access to buffer */
- if (!access_ok(buf, nr)) {
- pr_warn("%s(%d) %s() can't verify user buffer\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
-
add_wait_queue(&tty->read_wait, &wait);
for (;;) {
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index e2138e7d5dc6..2540b2e4c8e8 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1885,7 +1885,7 @@ static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
*/
static __init int register_PCI(int i, struct pci_dev *dev)
{
- int num_aiops, aiop, max_num_aiops, num_chan, chan;
+ int num_aiops, aiop, max_num_aiops, chan;
unsigned int aiopio[MAX_AIOPS_PER_BOARD];
CONTROLLER_t *ctlp;
@@ -2157,8 +2157,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
/* Reset the AIOPIC, init the serial ports */
for (aiop = 0; aiop < num_aiops; aiop++) {
sResetAiopByNum(ctlp, aiop);
- num_chan = ports_per_aiop;
- for (chan = 0; chan < num_chan; chan++)
+ for (chan = 0; chan < ports_per_aiop; chan++)
init_r_port(i, aiop, chan, dev);
}
@@ -2166,11 +2165,10 @@ static __init int register_PCI(int i, struct pci_dev *dev)
if ((rcktpt_type[i] == ROCKET_TYPE_MODEM) ||
(rcktpt_type[i] == ROCKET_TYPE_MODEMII) ||
(rcktpt_type[i] == ROCKET_TYPE_MODEMIII)) {
- num_chan = ports_per_aiop;
- for (chan = 0; chan < num_chan; chan++)
+ for (chan = 0; chan < ports_per_aiop; chan++)
sPCIModemReset(ctlp, chan, 1);
msleep(500);
- for (chan = 0; chan < num_chan; chan++)
+ for (chan = 0; chan < ports_per_aiop; chan++)
sPCIModemReset(ctlp, chan, 0);
msleep(500);
rmSpeakerReset(ctlp, rocketModel[i].model);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 45d9117cab68..fc118f649887 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1026,7 +1026,9 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
if (up->port.dev) {
uart->port.dev = up->port.dev;
- uart_get_rs485_mode(uart->port.dev, &uart->port.rs485);
+ ret = uart_get_rs485_mode(&uart->port);
+ if (ret)
+ goto err;
}
if (up->port.flags & UPF_FIXED_TYPE)
@@ -1040,7 +1042,7 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
gpios = mctrl_gpio_init(&uart->port, 0);
if (IS_ERR(gpios)) {
ret = PTR_ERR(gpios);
- goto out_unlock;
+ goto err;
} else {
uart->gpios = gpios;
}
@@ -1089,8 +1091,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
serial8250_apply_quirks(uart);
ret = uart_add_one_port(&serial8250_reg,
&uart->port);
- if (ret == 0)
- ret = uart->port.line;
+ if (ret)
+ goto err;
+
+ ret = uart->port.line;
} else {
dev_info(uart->port.dev,
"skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
@@ -1112,10 +1116,14 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
}
}
-out_unlock:
mutex_unlock(&serial_mutex);
return ret;
+
+err:
+ uart->port.dev = NULL;
+ mutex_unlock(&serial_mutex);
+ return ret;
}
EXPORT_SYMBOL(serial8250_register_8250_port);
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 5cd8c36c8fcc..70d7826788f5 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -109,6 +109,28 @@ static void early_serial8250_write(struct console *console,
uart_console_write(port, s, count, serial_putc);
}
+#ifdef CONFIG_CONSOLE_POLL
+static int early_serial8250_read(struct console *console,
+ char *s, unsigned int count)
+{
+ struct earlycon_device *device = console->data;
+ struct uart_port *port = &device->port;
+ unsigned int status;
+ int num_read = 0;
+
+ while (num_read < count) {
+ status = serial8250_early_in(port, UART_LSR);
+ if (!(status & UART_LSR_DR))
+ break;
+ s[num_read++] = serial8250_early_in(port, UART_RX);
+ }
+
+ return num_read;
+}
+#else
+#define early_serial8250_read NULL
+#endif
+
static void __init init_port(struct earlycon_device *device)
{
struct uart_port *port = &device->port;
@@ -149,6 +171,7 @@ int __init early_serial8250_setup(struct earlycon_device *device,
init_port(device);
device->con->write = early_serial8250_write;
+ device->con->read = early_serial8250_read;
return 0;
}
EARLYCON_DECLARE(uart8250, early_serial8250_setup);
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 59449b6500cd..ddb6aeb76dc5 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -25,13 +25,13 @@
#include "8250.h"
-#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052
-#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d
-#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c
-#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8
-#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2
-#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db
-#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea
+#define PCI_DEVICE_ID_ACCESSIO_COM_2S 0x1052
+#define PCI_DEVICE_ID_ACCESSIO_COM_4S 0x105d
+#define PCI_DEVICE_ID_ACCESSIO_COM_8S 0x106c
+#define PCI_DEVICE_ID_ACCESSIO_COM232_8 0x10a8
+#define PCI_DEVICE_ID_ACCESSIO_COM_2SM 0x10d2
+#define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db
+#define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea
#define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
#define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
@@ -755,9 +755,7 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
(kernel_ulong_t)&bd \
}
-#define EXAR_DEVICE(vend, devid, bd) { \
- PCI_VDEVICE(vend, PCI_DEVICE_ID_##devid), (kernel_ulong_t)&bd \
- }
+#define EXAR_DEVICE(vend, devid, bd) { PCI_DEVICE_DATA(vend, devid, &bd) }
#define IBM_DEVICE(devid, sdevid, bd) { \
PCI_DEVICE_SUB( \
@@ -769,14 +767,13 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
}
static const struct pci_device_id exar_pci_tbl[] = {
- EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x),
-
+ EXAR_DEVICE(ACCESSIO, COM_2S, acces_com_2x),
+ EXAR_DEVICE(ACCESSIO, COM_4S, acces_com_4x),
+ EXAR_DEVICE(ACCESSIO, COM_8S, acces_com_8x),
+ EXAR_DEVICE(ACCESSIO, COM232_8, acces_com_8x),
+ EXAR_DEVICE(ACCESSIO, COM_2SM, acces_com_2x),
+ EXAR_DEVICE(ACCESSIO, COM_4SM, acces_com_4x),
+ EXAR_DEVICE(ACCESSIO, COM_8SM, acces_com_8x),
CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect),
CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect),
@@ -794,24 +791,24 @@ static const struct pci_device_id exar_pci_tbl[] = {
IBM_DEVICE(XR17C152, SATURN_SERIAL_ONE_PORT, pbn_exar_ibm_saturn),
/* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */
- EXAR_DEVICE(EXAR, EXAR_XR17C152, pbn_exar_XR17C15x),
- EXAR_DEVICE(EXAR, EXAR_XR17C154, pbn_exar_XR17C15x),
- EXAR_DEVICE(EXAR, EXAR_XR17C158, pbn_exar_XR17C15x),
+ EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
+ EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x),
+ EXAR_DEVICE(EXAR, XR17C158, pbn_exar_XR17C15x),
/* Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs */
- EXAR_DEVICE(EXAR, EXAR_XR17V352, pbn_exar_XR17V35x),
- EXAR_DEVICE(EXAR, EXAR_XR17V354, pbn_exar_XR17V35x),
- EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x),
- EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358),
- EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358),
- EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x),
-
- EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2),
- EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4),
- EXAR_DEVICE(COMMTECH, COMMTECH_2324PCI335, pbn_fastcom335_4),
- EXAR_DEVICE(COMMTECH, COMMTECH_2328PCI335, pbn_fastcom335_8),
+ EXAR_DEVICE(EXAR, XR17V352, pbn_exar_XR17V35x),
+ EXAR_DEVICE(EXAR, XR17V354, pbn_exar_XR17V35x),
+ EXAR_DEVICE(EXAR, XR17V358, pbn_exar_XR17V35x),
+ EXAR_DEVICE(EXAR, XR17V4358, pbn_exar_XR17V4358),
+ EXAR_DEVICE(EXAR, XR17V8358, pbn_exar_XR17V8358),
+ EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_exar_XR17V35x),
+ EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_exar_XR17V35x),
+ EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_exar_XR17V35x),
+
+ EXAR_DEVICE(COMMTECH, 4222PCI335, pbn_fastcom335_2),
+ EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
+ EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4),
+ EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8),
{ 0, }
};
MODULE_DEVICE_TABLE(pci, exar_pci_tbl);
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 31c91c2f8c6e..d1d253c4b518 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -19,6 +19,7 @@
#define CHIP_ID2 0x21
#define CHIP_ID_F81865 0x0407
#define CHIP_ID_F81866 0x1010
+#define CHIP_ID_F81966 0x0215
#define CHIP_ID_F81216AD 0x1602
#define CHIP_ID_F81216H 0x0501
#define CHIP_ID_F81216 0x0802
@@ -62,9 +63,9 @@
#define F81216_LDN_HIGH 0x4
/*
- * F81866 registers
+ * F81866/966 registers
*
- * The IRQ setting mode of F81866 is not the same with F81216 series.
+ * The IRQ setting mode of F81866/966 is not the same with F81216 series.
* Level/Low: IRQ_MODE0:0, IRQ_MODE1:0
* Edge/High: IRQ_MODE0:1, IRQ_MODE1:0
*
@@ -155,6 +156,7 @@ static int fintek_8250_check_id(struct fintek_8250 *pdata)
switch (chip) {
case CHIP_ID_F81865:
case CHIP_ID_F81866:
+ case CHIP_ID_F81966:
case CHIP_ID_F81216AD:
case CHIP_ID_F81216H:
case CHIP_ID_F81216:
@@ -171,6 +173,7 @@ static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
int *max)
{
switch (pdata->pid) {
+ case CHIP_ID_F81966:
case CHIP_ID_F81865:
case CHIP_ID_F81866:
*min = F81866_LDN_LOW;
@@ -248,6 +251,7 @@ static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level)
sio_write_reg(pdata, LDN, pdata->index);
switch (pdata->pid) {
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1,
0);
@@ -274,6 +278,7 @@ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
{
switch (pdata->pid) {
case CHIP_ID_F81216H: /* 128Bytes FIFO */
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
sio_write_mask_reg(pdata, FIFO_CTRL,
FIFO_MODE_MASK | RXFTHR_MODE_MASK,
@@ -291,6 +296,7 @@ static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
sio_write_reg(pdata, LDN, pdata->index);
switch (pdata->pid) {
+ case CHIP_ID_F81966:
case CHIP_ID_F81866: /* set uart clock for high speed serial mode */
sio_write_mask_reg(pdata, F81866_UART_CLK,
F81866_UART_CLK_MASK,
@@ -327,6 +333,7 @@ static void fintek_8250_set_termios(struct uart_port *port,
case CHIP_ID_F81216H:
reg = RS485;
break;
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
reg = F81866_UART_CLK;
break;
@@ -373,6 +380,7 @@ static void fintek_8250_set_termios_handler(struct uart_8250_port *uart)
switch (pdata->pid) {
case CHIP_ID_F81216H:
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
uart->port.set_termios = fintek_8250_set_termios;
break;
@@ -443,6 +451,7 @@ static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
switch (pdata->pid) {
case CHIP_ID_F81216AD:
case CHIP_ID_F81216H:
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
case CHIP_ID_F81865:
uart->port.rs485_config = fintek_8250_rs485_config;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 0804469ff052..1a74d511b02a 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1869,12 +1869,6 @@ pci_moxa_setup(struct serial_private *priv,
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
-#define PCI_VENDOR_ID_PERICOM 0x12D8
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
-
#define PCI_VENDOR_ID_ACCESIO 0x494f
#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051
#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f77bf820b7a3..1632f7d25acc 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -681,6 +681,9 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
memset(rs485->padding, 0, sizeof(rs485->padding));
port->rs485 = *rs485;
+ gpiod_set_value(port->rs485_term_gpio,
+ rs485->flags & SER_RS485_TERMINATE_BUS);
+
/*
* Both serial8250_em485_init() and serial8250_em485_destroy()
* are idempotent.
@@ -1432,7 +1435,7 @@ static void serial8250_stop_rx(struct uart_port *port)
/**
* serial8250_em485_stop_tx() - generic ->rs485_stop_tx() callback
- * @up: uart 8250 port
+ * @p: uart 8250 port
*
* Generic callback usable by 8250 uart drivers to stop rs485 transmission.
*/
@@ -2615,6 +2618,8 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
+ unsigned int tolerance = port->uartclk / 100;
+
/*
* Ask the core to calculate the divisor for us.
* Allow 1% tolerance at the upper limit so uart clks marginally
@@ -2623,7 +2628,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
*/
return uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / UART_DIV_MAX,
- port->uartclk);
+ (port->uartclk + tolerance) / 16);
}
void
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index af0688156dd0..8195a31519ea 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -63,6 +63,7 @@ config SERIAL_8250_PNP
config SERIAL_8250_16550A_VARIANTS
bool "Support for variants of the 16550A serial port"
depends on SERIAL_8250
+ default !X86
help
The 8250 driver can probe for many variants of the venerable 16550A
serial port. Doing so takes additional time at boot.
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index c8186a05a453..e3d10794dbba 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -440,7 +440,7 @@ static int simple_config_check_notpicky(struct pcmcia_device *p_dev,
static int simple_config(struct pcmcia_device *link)
{
struct serial_info *info = link->priv;
- int i = -ENODEV, try;
+ int ret, try;
/*
* First pass: look for a config entry that looks normal.
@@ -472,8 +472,8 @@ found_port:
if (info->quirk && info->quirk->config)
info->quirk->config(link);
- i = pcmcia_enable_device(link);
- if (i != 0)
+ ret = pcmcia_enable_device(link);
+ if (ret != 0)
return -1;
return setup_serial(link, info, link->resource[0]->start, link->irq);
}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index adf9e80e7dc9..0282ad9cdaa7 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1034,13 +1034,22 @@ config SERIAL_SIFIVE_CONSOLE
boot time.)
config SERIAL_LANTIQ
- bool "Lantiq serial driver"
- depends on LANTIQ
+ tristate "Lantiq serial driver"
+ depends on (LANTIQ || X86) || COMPILE_TEST
select SERIAL_CORE
+ help
+ Support for UART on Lantiq and Intel SoCs.
+ To compile this driver as a module, select M here. The
+ module will be called lantiq.
+
+config SERIAL_LANTIQ_CONSOLE
+ bool "Console on Lantiq UART"
+ depends on SERIAL_LANTIQ=y
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
- Support for console and UART on Lantiq SoCs.
+ Select this option if you would like to use a Lantiq UART as the
+ system console.
config SERIAL_QE
tristate "Freescale QUICC Engine serial port support"
@@ -1462,6 +1471,7 @@ config SERIAL_STM32
tristate "STMicroelectronics STM32 serial port support"
select SERIAL_CORE
depends on ARCH_STM32 || COMPILE_TEST
+ select SERIAL_MCTRL_GPIO if GPIOLIB
help
This driver is for the on-chip Serial Controller on
STMicroelectronics STM32 MCUs.
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 2296bb0f9578..8efd7c2a34fe 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2435,6 +2435,37 @@ static void pl011_early_write(struct console *con, const char *s, unsigned n)
uart_console_write(&dev->port, s, n, pl011_putc);
}
+#ifdef CONFIG_CONSOLE_POLL
+static int pl011_getc(struct uart_port *port)
+{
+ if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE)
+ return NO_POLL_CHAR;
+
+ if (port->iotype == UPIO_MEM32)
+ return readl(port->membase + UART01x_DR);
+ else
+ return readb(port->membase + UART01x_DR);
+}
+
+static int pl011_early_read(struct console *con, char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+ int ch, num_read = 0;
+
+ while (num_read < n) {
+ ch = pl011_getc(&dev->port);
+ if (ch == NO_POLL_CHAR)
+ break;
+
+ s[num_read++] = ch;
+ }
+
+ return num_read;
+}
+#else
+#define pl011_early_read NULL
+#endif
+
/*
* On non-ACPI systems, earlycon is enabled by specifying
* "earlycon=pl011,<address>" on the kernel command line.
@@ -2454,6 +2485,7 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
return -ENODEV;
device->con->write = pl011_early_write;
+ device->con->read = pl011_early_read;
return 0;
}
@@ -2575,6 +2607,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = index;
+ spin_lock_init(&uap->port.lock);
amba_ports[index] = uap;
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 7e7f1398019f..0c80a79d7442 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -766,8 +766,6 @@ static int ar933x_uart_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- uart_get_rs485_mode(&pdev->dev, &port->rs485);
-
port->mapbase = mem_res->start;
port->line = id;
port->irq = irq_res->start;
@@ -786,6 +784,10 @@ static int ar933x_uart_probe(struct platform_device *pdev)
baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
+ ret = uart_get_rs485_mode(port);
+ if (ret)
+ goto err_disable_clk;
+
up->gpios = mctrl_gpio_init(port, 0);
if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS)
return PTR_ERR(up->gpios);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 8d7080efad9b..e43471b33710 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2491,8 +2491,6 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
- uart_get_rs485_mode(&mpdev->dev, &port->rs485);
-
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
port->ops = &atmel_pops;
@@ -2506,6 +2504,10 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
+ ret = uart_get_rs485_mode(port);
+ if (ret)
+ return ret;
+
/* for console, the clock could already be configured */
if (!atmel_port->clk) {
atmel_port->clk = clk_get(&mpdev->dev, "usart");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 5d41075964f2..90298c403042 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1231,9 +1231,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
struct lpuart_port, port);
struct dma_chan *chan = sport->dma_rx_chan;
- if (chan)
- dmaengine_terminate_all(chan);
-
+ dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
kfree(sport->rx_ring.buf);
sport->rx_ring.tail = 0;
@@ -1514,17 +1512,17 @@ static void lpuart_request_dma(struct lpuart_port *sport)
{
sport->dma_tx_chan = dma_request_chan(sport->port.dev, "tx");
if (IS_ERR(sport->dma_tx_chan)) {
- dev_info_once(sport->port.dev,
- "DMA tx channel request failed, operating without tx DMA (%ld)\n",
- PTR_ERR(sport->dma_tx_chan));
+ dev_dbg_once(sport->port.dev,
+ "DMA tx channel request failed, operating without tx DMA (%ld)\n",
+ PTR_ERR(sport->dma_tx_chan));
sport->dma_tx_chan = NULL;
}
sport->dma_rx_chan = dma_request_chan(sport->port.dev, "rx");
if (IS_ERR(sport->dma_rx_chan)) {
- dev_info_once(sport->port.dev,
- "DMA rx channel request failed, operating without rx DMA (%ld)\n",
- PTR_ERR(sport->dma_rx_chan));
+ dev_dbg_once(sport->port.dev,
+ "DMA rx channel request failed, operating without rx DMA (%ld)\n",
+ PTR_ERR(sport->dma_rx_chan));
sport->dma_rx_chan = NULL;
}
}
@@ -2621,7 +2619,9 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_attach_port;
- uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);
+ ret = uart_get_rs485_mode(&sport->port);
+ if (ret)
+ goto failed_get_rs485;
if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX)
dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
@@ -2634,6 +2634,7 @@ static int lpuart_probe(struct platform_device *pdev)
return 0;
+failed_get_rs485:
failed_attach_port:
failed_irq_request:
lpuart_disable_clks(sport);
@@ -2664,8 +2665,7 @@ static int lpuart_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int lpuart_suspend(struct device *dev)
+static int __maybe_unused lpuart_suspend(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
unsigned long temp;
@@ -2723,7 +2723,7 @@ static int lpuart_suspend(struct device *dev)
return 0;
}
-static int lpuart_resume(struct device *dev)
+static int __maybe_unused lpuart_resume(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
bool irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq));
@@ -2754,7 +2754,6 @@ static int lpuart_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(lpuart_pm_ops, lpuart_suspend, lpuart_resume);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index f4d68109bc8b..1265e8d86d8a 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -909,6 +909,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
usr2 &= ~USR2_ORE;
if (usr1 & (USR1_RRDY | USR1_AGTIM)) {
+ imx_uart_writel(sport, USR1_AGTIM, USR1);
+
__imx_uart_rxint(irq, dev_id);
ret = IRQ_HANDLED;
}
@@ -2252,6 +2254,8 @@ static int imx_uart_probe(struct platform_device *pdev)
return PTR_ERR(base);
rxirq = platform_get_irq(pdev, 0);
+ if (rxirq < 0)
+ return rxirq;
txirq = platform_get_irq_optional(pdev, 1);
rtsirq = platform_get_irq_optional(pdev, 2);
@@ -2302,7 +2306,11 @@ static int imx_uart_probe(struct platform_device *pdev)
sport->ucr4 = readl(sport->port.membase + UCR4);
sport->ufcr = readl(sport->port.membase + UFCR);
- uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);
+ ret = uart_get_rs485_mode(&sport->port);
+ if (ret) {
+ clk_disable_unprepare(sport->clk_ipg);
+ return ret;
+ }
if (sport->port.rs485.flags & SER_RS485_ENABLED &&
(!sport->have_rtscts && !sport->have_rtsgpio))
@@ -2398,6 +2406,9 @@ static int imx_uart_probe(struct platform_device *pdev)
}
}
+ /* We need to initialize lock even for non-registered console */
+ spin_lock_init(&sport->port.lock);
+
imx_uart_ports[sport->port.line] = sport;
platform_set_drvdata(pdev, sport);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index c9f94fa82be4..41396982e9e0 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -20,6 +20,8 @@
#include <linux/vt_kern.h>
#include <linux/input.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
#define MAX_CONFIG_LEN 40
@@ -27,6 +29,7 @@ static struct kgdb_io kgdboc_io_ops;
/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
static int configured = -1;
+static DEFINE_MUTEX(config_mutex);
static char config[MAX_CONFIG_LEN];
static struct kparam_string kps = {
@@ -38,6 +41,14 @@ static int kgdboc_use_kms; /* 1 if we use kernel mode switching */
static struct tty_driver *kgdb_tty_driver;
static int kgdb_tty_line;
+static struct platform_device *kgdboc_pdev;
+
+#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
+static struct kgdb_io kgdboc_earlycon_io_ops;
+static struct console *earlycon;
+static int (*earlycon_orig_exit)(struct console *con);
+#endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
+
#ifdef CONFIG_KDB_KEYBOARD
static int kgdboc_reset_connect(struct input_handler *handler,
struct input_dev *dev,
@@ -131,13 +142,27 @@ static void kgdboc_unregister_kbd(void)
#define kgdboc_restore_input()
#endif /* ! CONFIG_KDB_KEYBOARD */
+#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
+static void cleanup_earlycon(void)
+{
+ if (earlycon)
+ kgdb_unregister_io_module(&kgdboc_earlycon_io_ops);
+}
+#else /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
+static inline void cleanup_earlycon(void) { }
+#endif /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
+
static void cleanup_kgdboc(void)
{
+ cleanup_earlycon();
+
+ if (configured != 1)
+ return;
+
if (kgdb_unregister_nmi_console())
return;
kgdboc_unregister_kbd();
- if (configured == 1)
- kgdb_unregister_io_module(&kgdboc_io_ops);
+ kgdb_unregister_io_module(&kgdboc_io_ops);
}
static int configure_kgdboc(void)
@@ -198,20 +223,79 @@ nmi_con_failed:
kgdb_unregister_io_module(&kgdboc_io_ops);
noconfig:
kgdboc_unregister_kbd();
- config[0] = 0;
configured = 0;
- cleanup_kgdboc();
return err;
}
+static int kgdboc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ mutex_lock(&config_mutex);
+ if (configured != 1) {
+ ret = configure_kgdboc();
+
+ /* Convert "no device" to "defer" so we'll keep trying */
+ if (ret == -ENODEV)
+ ret = -EPROBE_DEFER;
+ }
+ mutex_unlock(&config_mutex);
+
+ return ret;
+}
+
+static struct platform_driver kgdboc_platform_driver = {
+ .probe = kgdboc_probe,
+ .driver = {
+ .name = "kgdboc",
+ .suppress_bind_attrs = true,
+ },
+};
+
static int __init init_kgdboc(void)
{
- /* Already configured? */
- if (configured == 1)
+ int ret;
+
+ /*
+ * kgdboc is a little bit of an odd "platform_driver". It can be
+ * up and running long before the platform_driver object is
+ * created and thus doesn't actually store anything in it. There's
+ * only one instance of kgdb so anything is stored as global state.
+ * The platform_driver is only created so that we can leverage the
+ * kernel's mechanisms (like -EPROBE_DEFER) to call us when our
+ * underlying tty is ready. Here we init our platform driver and
+ * then create the single kgdboc instance.
+ */
+ ret = platform_driver_register(&kgdboc_platform_driver);
+ if (ret)
+ return ret;
+
+ kgdboc_pdev = platform_device_alloc("kgdboc", PLATFORM_DEVID_NONE);
+ if (!kgdboc_pdev) {
+ ret = -ENOMEM;
+ goto err_did_register;
+ }
+
+ ret = platform_device_add(kgdboc_pdev);
+ if (!ret)
return 0;
- return configure_kgdboc();
+ platform_device_put(kgdboc_pdev);
+
+err_did_register:
+ platform_driver_unregister(&kgdboc_platform_driver);
+ return ret;
+}
+
+static void exit_kgdboc(void)
+{
+ mutex_lock(&config_mutex);
+ cleanup_kgdboc();
+ mutex_unlock(&config_mutex);
+
+ platform_device_unregister(kgdboc_pdev);
+ platform_driver_unregister(&kgdboc_platform_driver);
}
static int kgdboc_get_char(void)
@@ -234,24 +318,20 @@ static int param_set_kgdboc_var(const char *kmessage,
const struct kernel_param *kp)
{
size_t len = strlen(kmessage);
+ int ret = 0;
if (len >= MAX_CONFIG_LEN) {
pr_err("config string too long\n");
return -ENOSPC;
}
- /* Only copy in the string if the init function has not run yet */
- if (configured < 0) {
- strcpy(config, kmessage);
- return 0;
- }
-
if (kgdb_connected) {
pr_err("Cannot reconfigure while KGDB is connected.\n");
-
return -EBUSY;
}
+ mutex_lock(&config_mutex);
+
strcpy(config, kmessage);
/* Chop out \n char as a result of echo */
if (len && config[len - 1] == '\n')
@@ -260,8 +340,30 @@ static int param_set_kgdboc_var(const char *kmessage,
if (configured == 1)
cleanup_kgdboc();
- /* Go and configure with the new params. */
- return configure_kgdboc();
+ /*
+ * Configure with the new params as long as init already ran.
+ * Note that we can get called before init if someone loads us
+ * with "modprobe kgdboc kgdboc=..." or if they happen to use the
+ * the odd syntax of "kgdboc.kgdboc=..." on the kernel command.
+ */
+ if (configured >= 0)
+ ret = configure_kgdboc();
+
+ /*
+ * If we couldn't configure then clear out the config. Note that
+ * specifying an invalid config on the kernel command line vs.
+ * through sysfs have slightly different behaviors. If we fail
+ * to configure what was specified on the kernel command line
+ * we'll leave it in the 'config' and return -EPROBE_DEFER from
+ * our probe. When specified through sysfs userspace is
+ * responsible for loading the tty driver before setting up.
+ */
+ if (ret)
+ config[0] = '\0';
+
+ mutex_unlock(&config_mutex);
+
+ return ret;
}
static int dbg_restore_graphics;
@@ -275,14 +377,10 @@ static void kgdboc_pre_exp_handler(void)
/* Increment the module count when the debugger is active */
if (!kgdb_connected)
try_module_get(THIS_MODULE);
-
- atomic_inc(&ignore_console_lock_warning);
}
static void kgdboc_post_exp_handler(void)
{
- atomic_dec(&ignore_console_lock_warning);
-
/* decrement the module count when the debugger detaches */
if (!kgdb_connected)
module_put(THIS_MODULE);
@@ -301,7 +399,7 @@ static struct kgdb_io kgdboc_io_ops = {
.post_exception = kgdboc_post_exp_handler,
};
-#ifdef CONFIG_KGDB_SERIAL_CONSOLE
+#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
static int kgdboc_option_setup(char *opt)
{
if (!opt) {
@@ -324,23 +422,181 @@ __setup("kgdboc=", kgdboc_option_setup);
/* This is only available if kgdboc is a built in for early debugging */
static int __init kgdboc_early_init(char *opt)
{
- /* save the first character of the config string because the
- * init routine can destroy it.
- */
- char save_ch;
-
kgdboc_option_setup(opt);
- save_ch = config[0];
- init_kgdboc();
- config[0] = save_ch;
+ configure_kgdboc();
return 0;
}
early_param("ekgdboc", kgdboc_early_init);
-#endif /* CONFIG_KGDB_SERIAL_CONSOLE */
+
+static int kgdboc_earlycon_get_char(void)
+{
+ char c;
+
+ if (!earlycon->read(earlycon, &c, 1))
+ return NO_POLL_CHAR;
+
+ return c;
+}
+
+static void kgdboc_earlycon_put_char(u8 chr)
+{
+ earlycon->write(earlycon, &chr, 1);
+}
+
+static void kgdboc_earlycon_pre_exp_handler(void)
+{
+ struct console *con;
+ static bool already_warned;
+
+ if (already_warned)
+ return;
+
+ /*
+ * When the first normal console comes up the kernel will take all
+ * the boot consoles out of the list. Really, we should stop using
+ * the boot console when it does that but until a TTY is registered
+ * we have no other choice so we keep using it. Since not all
+ * serial drivers might be OK with this, print a warning once per
+ * boot if we detect this case.
+ */
+ for_each_console(con)
+ if (con == earlycon)
+ return;
+
+ already_warned = true;
+ pr_warn("kgdboc_earlycon is still using bootconsole\n");
+}
+
+static int kgdboc_earlycon_deferred_exit(struct console *con)
+{
+ /*
+ * If we get here it means the boot console is going away but we
+ * don't yet have a suitable replacement. Don't pass through to
+ * the original exit routine. We'll call it later in our deinit()
+ * function. For now, restore the original exit() function pointer
+ * as a sentinal that we've hit this point.
+ */
+ con->exit = earlycon_orig_exit;
+
+ return 0;
+}
+
+static void kgdboc_earlycon_deinit(void)
+{
+ if (!earlycon)
+ return;
+
+ if (earlycon->exit == kgdboc_earlycon_deferred_exit)
+ /*
+ * kgdboc_earlycon is exiting but original boot console exit
+ * was never called (AKA kgdboc_earlycon_deferred_exit()
+ * didn't ever run). Undo our trap.
+ */
+ earlycon->exit = earlycon_orig_exit;
+ else if (earlycon->exit)
+ /*
+ * We skipped calling the exit() routine so we could try to
+ * keep using the boot console even after it went away. We're
+ * finally done so call the function now.
+ */
+ earlycon->exit(earlycon);
+
+ earlycon = NULL;
+}
+
+static struct kgdb_io kgdboc_earlycon_io_ops = {
+ .name = "kgdboc_earlycon",
+ .read_char = kgdboc_earlycon_get_char,
+ .write_char = kgdboc_earlycon_put_char,
+ .pre_exception = kgdboc_earlycon_pre_exp_handler,
+ .deinit = kgdboc_earlycon_deinit,
+ .is_console = true,
+};
+
+#define MAX_CONSOLE_NAME_LEN (sizeof((struct console *) 0)->name)
+static char kgdboc_earlycon_param[MAX_CONSOLE_NAME_LEN] __initdata;
+static bool kgdboc_earlycon_late_enable __initdata;
+
+static int __init kgdboc_earlycon_init(char *opt)
+{
+ struct console *con;
+
+ kdb_init(KDB_INIT_EARLY);
+
+ /*
+ * Look for a matching console, or if the name was left blank just
+ * pick the first one we find.
+ */
+ console_lock();
+ for_each_console(con) {
+ if (con->write && con->read &&
+ (con->flags & (CON_BOOT | CON_ENABLED)) &&
+ (!opt || !opt[0] || strcmp(con->name, opt) == 0))
+ break;
+ }
+
+ if (!con) {
+ /*
+ * Both earlycon and kgdboc_earlycon are initialized during * early parameter parsing. We cannot guarantee earlycon gets
+ * in first and, in any case, on ACPI systems earlycon may
+ * defer its own initialization (usually to somewhere within
+ * setup_arch() ). To cope with either of these situations
+ * we can defer our own initialization to a little later in
+ * the boot.
+ */
+ if (!kgdboc_earlycon_late_enable) {
+ pr_info("No suitable earlycon yet, will try later\n");
+ if (opt)
+ strscpy(kgdboc_earlycon_param, opt,
+ sizeof(kgdboc_earlycon_param));
+ kgdboc_earlycon_late_enable = true;
+ } else {
+ pr_info("Couldn't find kgdb earlycon\n");
+ }
+ goto unlock;
+ }
+
+ earlycon = con;
+ pr_info("Going to register kgdb with earlycon '%s'\n", con->name);
+ if (kgdb_register_io_module(&kgdboc_earlycon_io_ops) != 0) {
+ earlycon = NULL;
+ pr_info("Failed to register kgdb with earlycon\n");
+ } else {
+ /* Trap exit so we can keep earlycon longer if needed. */
+ earlycon_orig_exit = con->exit;
+ con->exit = kgdboc_earlycon_deferred_exit;
+ }
+
+unlock:
+ console_unlock();
+
+ /* Non-zero means malformed option so we always return zero */
+ return 0;
+}
+
+early_param("kgdboc_earlycon", kgdboc_earlycon_init);
+
+/*
+ * This is only intended for the late adoption of an early console.
+ *
+ * It is not a reliable way to adopt regular consoles because we can not
+ * control what order console initcalls are made and, in any case, many
+ * regular consoles are registered much later in the boot process than
+ * the console initcalls!
+ */
+static int __init kgdboc_earlycon_late_init(void)
+{
+ if (kgdboc_earlycon_late_enable)
+ kgdboc_earlycon_init(kgdboc_earlycon_param);
+ return 0;
+}
+console_initcall(kgdboc_earlycon_late_init);
+
+#endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
module_init(init_kgdboc);
-module_exit(cleanup_kgdboc);
+module_exit(exit_kgdboc);
module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
MODULE_PARM_DESC(kgdboc, "<serial_device>[,baud]");
MODULE_DESCRIPTION("KGDB Console TTY Driver");
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index c5e46ff972e4..62813e421f12 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/lantiq.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -597,6 +598,7 @@ static const struct uart_ops lqasc_pops = {
.verify_port = lqasc_verify_port,
};
+#ifdef CONFIG_SERIAL_LANTIQ_CONSOLE
static void
lqasc_console_putchar(struct uart_port *port, int ch)
{
@@ -705,6 +707,14 @@ lqasc_serial_early_console_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(lantiq, "lantiq,asc", lqasc_serial_early_console_setup);
OF_EARLYCON_DECLARE(lantiq, "intel,lgm-asc", lqasc_serial_early_console_setup);
+#define LANTIQ_SERIAL_CONSOLE (&lqasc_console)
+
+#else
+
+#define LANTIQ_SERIAL_CONSOLE NULL
+
+#endif /* CONFIG_SERIAL_LANTIQ_CONSOLE */
+
static struct uart_driver lqasc_reg = {
.owner = THIS_MODULE,
.driver_name = DRVNAME,
@@ -712,7 +722,7 @@ static struct uart_driver lqasc_reg = {
.major = 0,
.minor = 0,
.nr = MAXPORTS,
- .cons = &lqasc_console,
+ .cons = LANTIQ_SERIAL_CONSOLE,
};
static int fetch_irq_lantiq(struct device *dev, struct ltq_uart_port *ltq_port)
@@ -814,8 +824,7 @@ static void free_irq_intel(struct uart_port *port)
free_irq(ltq_port->common_irq, port);
}
-static int __init
-lqasc_probe(struct platform_device *pdev)
+static int lqasc_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct ltq_uart_port *ltq_port;
@@ -899,6 +908,13 @@ lqasc_probe(struct platform_device *pdev)
return ret;
}
+static int lqasc_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+
+ return uart_remove_one_port(&lqasc_reg, port);
+}
+
static const struct ltq_soc_data soc_data_lantiq = {
.fetch_irq = fetch_irq_lantiq,
.request_irq = request_irq_lantiq,
@@ -916,8 +932,11 @@ static const struct of_device_id ltq_asc_match[] = {
{ .compatible = "intel,lgm-asc", .data = &soc_data_intel },
{},
};
+MODULE_DEVICE_TABLE(of, ltq_asc_match);
static struct platform_driver lqasc_driver = {
+ .probe = lqasc_probe,
+ .remove = lqasc_remove,
.driver = {
.name = DRVNAME,
.of_match_table = ltq_asc_match,
@@ -933,10 +952,21 @@ init_lqasc(void)
if (ret != 0)
return ret;
- ret = platform_driver_probe(&lqasc_driver, lqasc_probe);
+ ret = platform_driver_register(&lqasc_driver);
if (ret != 0)
uart_unregister_driver(&lqasc_reg);
return ret;
}
-device_initcall(init_lqasc);
+
+static void __exit exit_lqasc(void)
+{
+ platform_driver_unregister(&lqasc_driver);
+ uart_unregister_driver(&lqasc_reg);
+}
+
+module_init(init_lqasc);
+module_exit(exit_lqasc);
+
+MODULE_DESCRIPTION("Serial driver for Lantiq & Intel gateway SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index 9a836dcac157..b5898c932036 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -23,7 +23,6 @@
#include <linux/nmi.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/sizes.h>
#include <linux/soc/nxp/lpc32xx-misc.h>
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index f7d6b3c9ea45..8573fc9cb0cd 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -33,8 +33,7 @@
#include <linux/pm_wakeirq.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_data/serial-omap.h>
#define OMAP_MAX_HSUART_PORTS 10
@@ -153,7 +152,7 @@ struct uart_omap_port {
u32 errata;
u32 features;
- int rts_gpio;
+ struct gpio_desc *rts_gpiod;
struct pm_qos_request pm_qos_request;
u32 latency;
@@ -303,11 +302,11 @@ static void serial_omap_stop_tx(struct uart_port *port)
serial_out(up, UART_OMAP_SCR, up->scr);
res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
1 : 0;
- if (gpio_get_value(up->rts_gpio) != res) {
+ if (gpiod_get_value(up->rts_gpiod) != res) {
if (port->rs485.delay_rts_after_send > 0)
mdelay(
port->rs485.delay_rts_after_send);
- gpio_set_value(up->rts_gpio, res);
+ gpiod_set_value(up->rts_gpiod, res);
}
} else {
/* We're asked to stop, but there's still stuff in the
@@ -412,8 +411,8 @@ static void serial_omap_start_tx(struct uart_port *port)
/* if rts not already enabled */
res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
- if (gpio_get_value(up->rts_gpio) != res) {
- gpio_set_value(up->rts_gpio, res);
+ if (gpiod_get_value(up->rts_gpiod) != res) {
+ gpiod_set_value(up->rts_gpiod, res);
if (port->rs485.delay_rts_before_send > 0)
mdelay(port->rs485.delay_rts_before_send);
}
@@ -1414,12 +1413,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
* Just as a precaution, only allow rs485
* to be enabled if the gpio pin is valid
*/
- if (gpio_is_valid(up->rts_gpio)) {
+ if (up->rts_gpiod) {
/* enable / disable rts */
val = (port->rs485.flags & SER_RS485_ENABLED) ?
SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
val = (port->rs485.flags & val) ? 1 : 0;
- gpio_set_value(up->rts_gpio, val);
+ gpiod_set_value(up->rts_gpiod, val);
} else
port->rs485.flags &= ~SER_RS485_ENABLED;
@@ -1596,18 +1595,22 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
}
static int serial_omap_probe_rs485(struct uart_omap_port *up,
- struct device_node *np)
+ struct device *dev)
{
struct serial_rs485 *rs485conf = &up->port.rs485;
+ struct device_node *np = dev->of_node;
+ enum gpiod_flags gflags;
int ret;
rs485conf->flags = 0;
- up->rts_gpio = -EINVAL;
+ up->rts_gpiod = NULL;
if (!np)
return 0;
- uart_get_rs485_mode(up->dev, rs485conf);
+ ret = uart_get_rs485_mode(&up->port);
+ if (ret)
+ return ret;
if (of_property_read_bool(np, "rs485-rts-active-high")) {
rs485conf->flags |= SER_RS485_RTS_ON_SEND;
@@ -1618,19 +1621,20 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
}
/* check for tx enable gpio */
- up->rts_gpio = of_get_named_gpio(np, "rts-gpio", 0);
- if (gpio_is_valid(up->rts_gpio)) {
- ret = devm_gpio_request(up->dev, up->rts_gpio, "omap-serial");
- if (ret < 0)
- return ret;
- ret = rs485conf->flags & SER_RS485_RTS_AFTER_SEND ? 1 : 0;
- ret = gpio_direction_output(up->rts_gpio, ret);
- if (ret < 0)
+ gflags = rs485conf->flags & SER_RS485_RTS_AFTER_SEND ?
+ GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ up->rts_gpiod = devm_gpiod_get_optional(dev, "rts", gflags);
+ if (IS_ERR(up->rts_gpiod)) {
+ ret = PTR_ERR(up->rts_gpiod);
+ if (ret == -EPROBE_DEFER)
return ret;
- } else if (up->rts_gpio == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
+ /*
+ * FIXME: the code historically ignored any other error than
+ * -EPROBE_DEFER and just went on without GPIO.
+ */
+ up->rts_gpiod = NULL;
} else {
- up->rts_gpio = -EINVAL;
+ gpiod_set_consumer_name(up->rts_gpiod, "omap-serial");
}
return 0;
@@ -1703,7 +1707,7 @@ static int serial_omap_probe(struct platform_device *pdev)
dev_info(up->port.dev, "no wakeirq for uart%d\n",
up->port.line);
- ret = serial_omap_probe_rs485(up, pdev->dev.of_node);
+ ret = serial_omap_probe_rs485(up, &pdev->dev);
if (ret < 0)
goto err_rs485;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 6119090ce045..457c0bf8cbf8 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -141,9 +141,10 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport);
static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
- 32000000, 48000000, 64000000, 80000000,
- 96000000, 100000000, 102400000,
- 112000000, 120000000, 128000000};
+ 32000000, 48000000, 51200000, 64000000,
+ 80000000, 96000000, 100000000,
+ 102400000, 112000000, 120000000,
+ 128000000};
#define to_dev_port(ptr, member) \
container_of(ptr, struct qcom_geni_serial_port, member)
@@ -1090,6 +1091,36 @@ static void qcom_geni_serial_earlycon_write(struct console *con,
__qcom_geni_serial_console_write(&dev->port, s, n);
}
+#ifdef CONFIG_CONSOLE_POLL
+static int qcom_geni_serial_earlycon_read(struct console *con,
+ char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+ struct uart_port *uport = &dev->port;
+ int num_read = 0;
+ int ch;
+
+ while (num_read < n) {
+ ch = qcom_geni_serial_get_char(uport);
+ if (ch == NO_POLL_CHAR)
+ break;
+ s[num_read++] = ch;
+ }
+
+ return num_read;
+}
+
+static void __init qcom_geni_serial_enable_early_read(struct geni_se *se,
+ struct console *con)
+{
+ geni_se_setup_s_cmd(se, UART_START_READ, 0);
+ con->read = qcom_geni_serial_earlycon_read;
+}
+#else
+static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
+ struct console *con) { }
+#endif
+
static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
const char *opt)
{
@@ -1136,6 +1167,8 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
dev->con->write = qcom_geni_serial_earlycon_write;
dev->con->setup = NULL;
+ qcom_geni_serial_enable_early_read(&se, dev->con);
+
return 0;
}
OF_EARLYCON_DECLARE(qcom_geni, "qcom,geni-debug-uart",
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 73f951d65b93..d913d9b2762a 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -154,10 +154,33 @@ struct s3c24xx_uart_port {
#define portaddrl(port, reg) \
((unsigned long *)(unsigned long)((port)->membase + (reg)))
-#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
+static u32 rd_reg(struct uart_port *port, u32 reg)
+{
+ switch (port->iotype) {
+ case UPIO_MEM:
+ return readb_relaxed(portaddr(port, reg));
+ case UPIO_MEM32:
+ return readl_relaxed(portaddr(port, reg));
+ default:
+ return 0;
+ }
+ return 0;
+}
+
#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
-#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
+static void wr_reg(struct uart_port *port, u32 reg, u32 val)
+{
+ switch (port->iotype) {
+ case UPIO_MEM:
+ writeb_relaxed(val, portaddr(port, reg));
+ break;
+ case UPIO_MEM32:
+ writel_relaxed(val, portaddr(port, reg));
+ break;
+ }
+}
+
#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
/* Byte-order aware bit setting/clearing functions. */
@@ -714,7 +737,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
fifocnt--;
uerstat = rd_regl(port, S3C2410_UERSTAT);
- ch = rd_regb(port, S3C2410_URXH);
+ ch = rd_reg(port, S3C2410_URXH);
if (port->flags & UPF_CONS_FLOW) {
int txe = s3c24xx_serial_txempty_nofifo(port);
@@ -826,7 +849,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
}
if (port->x_char) {
- wr_regb(port, S3C2410_UTXH, port->x_char);
+ wr_reg(port, S3C2410_UTXH, port->x_char);
port->icount.tx++;
port->x_char = 0;
goto out;
@@ -852,7 +875,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
break;
- wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
+ wr_reg(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
count--;
@@ -916,7 +939,7 @@ static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
/* no modem control lines */
static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
{
- unsigned int umstat = rd_regb(port, S3C2410_UMSTAT);
+ unsigned int umstat = rd_reg(port, S3C2410_UMSTAT);
if (umstat & S3C2410_UMSTAT_CTS)
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
@@ -1281,14 +1304,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
struct s3c24xx_uart_info *info = ourport->info;
struct clk *clk;
unsigned long rate;
- unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
+ unsigned int cnt, baud, quot, best_quot = 0;
char clkname[MAX_CLK_NAME_LENGTH];
int calc_deviation, deviation = (1 << 30) - 1;
- clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
- ourport->info->def_clk_sel;
for (cnt = 0; cnt < info->num_clks; cnt++) {
- if (!(clk_sel & (1 << cnt)))
+ /* Keep selected clock if provided */
+ if (ourport->cfg->clk_sel &&
+ !(ourport->cfg->clk_sel & (1 << cnt)))
continue;
sprintf(clkname, "clk_uart_baud%d", cnt);
@@ -1974,7 +1997,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct s3c24xx_uart_port *ourport;
int index = probe_index;
- int ret;
+ int ret, prop = 0;
if (np) {
ret = of_alias_get_id(np, "serial");
@@ -2000,10 +2023,27 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
dev_get_platdata(&pdev->dev) :
ourport->drv_data->def_cfg;
- if (np)
+ if (np) {
of_property_read_u32(np,
"samsung,uart-fifosize", &ourport->port.fifosize);
+ if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
+ switch (prop) {
+ case 1:
+ ourport->port.iotype = UPIO_MEM;
+ break;
+ case 4:
+ ourport->port.iotype = UPIO_MEM32;
+ break;
+ default:
+ dev_warn(&pdev->dev, "unsupported reg-io-width (%d)\n",
+ prop);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
if (ourport->drv_data->fifosize[index])
ourport->port.fifosize = ourport->drv_data->fifosize[index];
else if (ourport->info->fifosize)
@@ -2185,7 +2225,7 @@ static int s3c24xx_serial_get_poll_char(struct uart_port *port)
if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
return NO_POLL_CHAR;
- return rd_regb(port, S3C2410_URXH);
+ return rd_reg(port, S3C2410_URXH);
}
static void s3c24xx_serial_put_poll_char(struct uart_port *port,
@@ -2200,7 +2240,7 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
while (!s3c24xx_serial_console_txrdy(port, ufcon))
cpu_relax();
- wr_regb(port, S3C2410_UTXH, c);
+ wr_reg(port, S3C2410_UTXH, c);
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -2212,7 +2252,7 @@ s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
while (!s3c24xx_serial_console_txrdy(port, ufcon))
cpu_relax();
- wr_regb(port, S3C2410_UTXH, ch);
+ wr_reg(port, S3C2410_UTXH, ch);
}
static void
@@ -2587,6 +2627,18 @@ module_platform_driver(samsung_serial_driver);
* Early console.
*/
+static void wr_reg_barrier(struct uart_port *port, u32 reg, u32 val)
+{
+ switch (port->iotype) {
+ case UPIO_MEM:
+ writeb(val, portaddr(port, reg));
+ break;
+ case UPIO_MEM32:
+ writel(val, portaddr(port, reg));
+ break;
+ }
+}
+
struct samsung_early_console_data {
u32 txfull_mask;
};
@@ -2612,7 +2664,7 @@ static void samsung_early_putc(struct uart_port *port, int c)
else
samsung_early_busyuart(port);
- writeb(c, port->membase + S3C2410_UTXH);
+ wr_reg_barrier(port, S3C2410_UTXH, c);
}
static void samsung_early_write(struct console *con, const char *s,
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 06e8071d5601..d2e5c6c86643 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -315,6 +315,7 @@ struct sc16is7xx_one {
struct kthread_work tx_work;
struct kthread_work reg_work;
struct sc16is7xx_one_config config;
+ bool irda_mode;
};
struct sc16is7xx_port {
@@ -327,7 +328,6 @@ struct sc16is7xx_port {
unsigned char buf[SC16IS7XX_FIFO_SIZE];
struct kthread_worker kworker;
struct task_struct *kworker_task;
- struct kthread_work irq_work;
struct mutex efr_lock;
struct sc16is7xx_one p[];
};
@@ -710,9 +710,9 @@ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
return true;
}
-static void sc16is7xx_ist(struct kthread_work *ws)
+static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
{
- struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
+ struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
mutex_lock(&s->efr_lock);
@@ -727,13 +727,6 @@ static void sc16is7xx_ist(struct kthread_work *ws)
}
mutex_unlock(&s->efr_lock);
-}
-
-static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
-{
- struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
-
- kthread_queue_work(&s->kworker, &s->irq_work);
return IRQ_HANDLED;
}
@@ -994,6 +987,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
static int sc16is7xx_startup(struct uart_port *port)
{
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned int val;
@@ -1032,6 +1026,13 @@ static int sc16is7xx_startup(struct uart_port *port)
/* Now, initialize the UART */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
+ /* Enable IrDA mode if requested in DT */
+ /* This bit must be written with LCR[7] = 0 */
+ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ SC16IS7XX_MCR_IRDA_BIT,
+ one->irda_mode ?
+ SC16IS7XX_MCR_IRDA_BIT : 0);
+
/* Enable the Rx and Tx FIFO */
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_RXDISABLE_BIT |
@@ -1176,10 +1177,11 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
static int sc16is7xx_probe(struct device *dev,
const struct sc16is7xx_devtype *devtype,
- struct regmap *regmap, int irq, unsigned long flags)
+ struct regmap *regmap, int irq)
{
struct sched_param sched_param = { .sched_priority = MAX_RT_PRIO / 2 };
unsigned long freq = 0, *pfreq = dev_get_platdata(dev);
+ unsigned int val;
u32 uartclk = 0;
int i, ret;
struct sc16is7xx_port *s;
@@ -1187,6 +1189,16 @@ static int sc16is7xx_probe(struct device *dev,
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ /*
+ * This device does not have an identification register that would
+ * tell us if we are really connected to the correct device.
+ * The best we can do is to check if communication is at all possible.
+ */
+ ret = regmap_read(regmap,
+ SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
+ if (ret < 0)
+ return ret;
+
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
if (!s) {
@@ -1221,7 +1233,6 @@ static int sc16is7xx_probe(struct device *dev,
mutex_init(&s->efr_lock);
kthread_init_worker(&s->kworker);
- kthread_init_work(&s->irq_work, sc16is7xx_ist);
s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
"sc16is7xx");
if (IS_ERR(s->kworker_task)) {
@@ -1302,9 +1313,33 @@ static int sc16is7xx_probe(struct device *dev,
sc16is7xx_power(&s->p[i].port, 0);
}
- /* Setup interrupt */
- ret = devm_request_irq(dev, irq, sc16is7xx_irq,
- flags, dev_name(dev), s);
+ if (dev->of_node) {
+ struct property *prop;
+ const __be32 *p;
+ u32 u;
+
+ of_property_for_each_u32(dev->of_node, "irda-mode-ports",
+ prop, p, u)
+ if (u < devtype->nr_uart)
+ s->p[u].irda_mode = true;
+ }
+
+ /*
+ * Setup interrupt. We first try to acquire the IRQ line as level IRQ.
+ * If that succeeds, we can allow sharing the interrupt as well.
+ * In case the interrupt controller doesn't support that, we fall
+ * back to a non-shared falling-edge trigger.
+ */
+ ret = devm_request_threaded_irq(dev, irq, NULL, sc16is7xx_irq,
+ IRQF_TRIGGER_LOW | IRQF_SHARED |
+ IRQF_ONESHOT,
+ dev_name(dev), s);
+ if (!ret)
+ return 0;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, sc16is7xx_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), s);
if (!ret)
return 0;
@@ -1378,7 +1413,6 @@ static struct regmap_config regcfg = {
static int sc16is7xx_spi_probe(struct spi_device *spi)
{
const struct sc16is7xx_devtype *devtype;
- unsigned long flags = 0;
struct regmap *regmap;
int ret;
@@ -1399,14 +1433,13 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
const struct spi_device_id *id_entry = spi_get_device_id(spi);
devtype = (struct sc16is7xx_devtype *)id_entry->driver_data;
- flags = IRQF_TRIGGER_FALLING;
}
regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
(devtype->nr_uart - 1);
regmap = devm_regmap_init_spi(spi, &regcfg);
- return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq, flags);
+ return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq);
}
static int sc16is7xx_spi_remove(struct spi_device *spi)
@@ -1445,7 +1478,6 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
const struct sc16is7xx_devtype *devtype;
- unsigned long flags = 0;
struct regmap *regmap;
if (i2c->dev.of_node) {
@@ -1454,14 +1486,13 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
return -ENODEV;
} else {
devtype = (struct sc16is7xx_devtype *)id->driver_data;
- flags = IRQF_TRIGGER_FALLING;
}
regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
(devtype->nr_uart - 1);
regmap = devm_regmap_init_i2c(i2c, &regcfg);
- return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq, flags);
+ return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq);
}
static int sc16is7xx_i2c_remove(struct i2c_client *client)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 66a5e2faf57e..57840cf90388 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -3295,8 +3295,10 @@ EXPORT_SYMBOL(uart_remove_one_port);
* This function implements the device tree binding described in
* Documentation/devicetree/bindings/serial/rs485.txt.
*/
-void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf)
+int uart_get_rs485_mode(struct uart_port *port)
{
+ struct serial_rs485 *rs485conf = &port->rs485;
+ struct device *dev = port->dev;
u32 rs485_delay[2];
int ret;
@@ -3315,6 +3317,7 @@ void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf)
* to get to a defined state with the following properties:
*/
rs485conf->flags &= ~(SER_RS485_RX_DURING_TX | SER_RS485_ENABLED |
+ SER_RS485_TERMINATE_BUS |
SER_RS485_RTS_AFTER_SEND);
rs485conf->flags |= SER_RS485_RTS_ON_SEND;
@@ -3328,6 +3331,23 @@ void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf)
rs485conf->flags &= ~SER_RS485_RTS_ON_SEND;
rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
}
+
+ /*
+ * Disabling termination by default is the safe choice: Else if many
+ * bus participants enable it, no communication is possible at all.
+ * Works fine for short cables and users may enable for longer cables.
+ */
+ port->rs485_term_gpio = devm_gpiod_get_optional(dev, "rs485-term",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(port->rs485_term_gpio)) {
+ ret = PTR_ERR(port->rs485_term_gpio);
+ port->rs485_term_gpio = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Cannot get rs485-term-gpios\n");
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(uart_get_rs485_mode);
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 0b9e804e61a9..c0dfe4382898 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -2,7 +2,6 @@
#include <linux/bitops.h>
#include <linux/serial_core.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#define SCI_MAJOR 204
#define SCI_MINOR_START 8
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 13eadcb8aec4..0b5110dad051 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -883,6 +883,7 @@ console_initcall(sifive_console_init);
static void __ssp_add_console_port(struct sifive_serial_port *ssp)
{
+ spin_lock_init(&ssp->port.lock);
sifive_serial_console_ports[ssp->port.line] = ssp;
}
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 5e93e8d40f59..8602ff357321 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -31,6 +31,7 @@
#include <linux/tty_flip.h>
#include <linux/tty.h>
+#include "serial_mctrl_gpio.h"
#include "stm32-usart.h"
static void stm32_stop_tx(struct uart_port *port);
@@ -158,9 +159,7 @@ static int stm32_init_rs485(struct uart_port *port,
if (!pdev->dev.of_node)
return -ENODEV;
- uart_get_rs485_mode(&pdev->dev, rs485conf);
-
- return 0;
+ return uart_get_rs485_mode(port);
}
static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
@@ -510,12 +509,29 @@ static void stm32_set_mctrl(struct uart_port *port, unsigned int mctrl)
stm32_set_bits(port, ofs->cr3, USART_CR3_RTSE);
else
stm32_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
+
+ mctrl_gpio_set(stm32_port->gpios, mctrl);
}
static unsigned int stm32_get_mctrl(struct uart_port *port)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ unsigned int ret;
+
/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
- return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+ ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+
+ return mctrl_gpio_get(stm32_port->gpios, &ret);
+}
+
+static void stm32_enable_ms(struct uart_port *port)
+{
+ mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
+}
+
+static void stm32_disable_ms(struct uart_port *port)
+{
+ mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
}
/* Transmit stop */
@@ -626,6 +642,9 @@ static void stm32_shutdown(struct uart_port *port)
u32 val, isr;
int ret;
+ /* Disable modem control interrupts */
+ stm32_disable_ms(port);
+
val = USART_CR1_TXEIE | USART_CR1_TE;
val |= stm32_port->cr1_irq | USART_CR1_RE;
val |= BIT(cfg->uart_enable_bit);
@@ -764,6 +783,12 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
}
+ /* Handle modem control interrupts */
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ stm32_enable_ms(port);
+ else
+ stm32_disable_ms(port);
+
usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
/*
@@ -898,6 +923,7 @@ static const struct uart_ops stm32_uart_ops = {
.throttle = stm32_throttle,
.unthrottle = stm32_unthrottle,
.stop_rx = stm32_stop_rx,
+ .enable_ms = stm32_enable_ms,
.break_ctl = stm32_break_ctl,
.startup = stm32_startup,
.shutdown = stm32_shutdown,
@@ -931,7 +957,9 @@ static int stm32_init_port(struct stm32_port *stm32port,
port->rs485_config = stm32_config_rs485;
- stm32_init_rs485(port, pdev);
+ ret = stm32_init_rs485(port, pdev);
+ if (ret)
+ return ret;
if (stm32port->info->cfg.has_wakeup) {
stm32port->wakeirq = platform_get_irq(pdev, 1);
@@ -960,11 +988,32 @@ static int stm32_init_port(struct stm32_port *stm32port,
stm32port->port.uartclk = clk_get_rate(stm32port->clk);
if (!stm32port->port.uartclk) {
- clk_disable_unprepare(stm32port->clk);
ret = -EINVAL;
+ goto err_clk;
+ }
+
+ stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
+ if (IS_ERR(stm32port->gpios)) {
+ ret = PTR_ERR(stm32port->gpios);
+ goto err_clk;
+ }
+
+ /* Both CTS/RTS gpios and "st,hw-flow-ctrl" should not be specified */
+ if (stm32port->hw_flow_control) {
+ if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
+ mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
+ dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
}
return ret;
+
+err_clk:
+ clk_disable_unprepare(stm32port->clk);
+
+ return ret;
}
static struct stm32_port *stm32_of_get_stm32_port(struct platform_device *pdev)
@@ -1375,7 +1424,18 @@ static int __maybe_unused stm32_serial_suspend(struct device *dev)
else
stm32_serial_enable_wakeup(port, false);
- pinctrl_pm_select_sleep_state(dev);
+ /*
+ * When "no_console_suspend" is enabled, keep the pinctrl default state
+ * and rely on bootloader stage to restore this state upon resume.
+ * Otherwise, apply the idle or sleep states depending on wakeup
+ * capabilities.
+ */
+ if (console_suspend_enabled || !uart_console(port)) {
+ if (device_may_wakeup(dev))
+ pinctrl_pm_select_idle_state(dev);
+ else
+ pinctrl_pm_select_sleep_state(dev);
+ }
return 0;
}
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index db8bf0d4982d..d4c916e78d40 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -274,6 +274,7 @@ struct stm32_port {
bool fifoen;
int wakeirq;
int rdr_mask; /* receive data register mask */
+ struct mctrl_gpios *gpios; /* modem control gpios */
};
static struct stm32_port stm32_ports[STM32_MAX_PORTS];
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 35e9e8faf8de..b9d672af8b65 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1235,9 +1235,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
writel(ctrl, port->membase + CDNS_UART_CR);
uart_console_write(port, s, count, cdns_uart_console_putchar);
- while ((readl(port->membase + CDNS_UART_SR) &
- (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE)) !=
- CDNS_UART_SR_TXEMPTY)
+ while (cdns_uart_tx_empty(port) != TIOCSER_TEMT)
cpu_relax();
/* restore interrupt state */
@@ -1262,6 +1260,7 @@ static int cdns_uart_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
+ unsigned long time_out;
if (!port->membase) {
pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
@@ -1272,6 +1271,13 @@ static int cdns_uart_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
+ /* Wait for tx_empty before setting up the console */
+ time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
+
+ while (time_before(jiffies, time_out) &&
+ cdns_uart_tx_empty(port) != TIOCSER_TEMT)
+ cpu_relax();
+
return uart_set_options(port, co, baud, parity, bits, flow);
}
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 0dc3878794fd..477cdc1e9cf3 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -106,7 +106,7 @@ static void sysrq_handle_loglevel(int key)
pr_info("Loglevel set to %d\n", i);
console_loglevel = i;
}
-static struct sysrq_key_op sysrq_loglevel_op = {
+static const struct sysrq_key_op sysrq_loglevel_op = {
.handler = sysrq_handle_loglevel,
.help_msg = "loglevel(0-9)",
.action_msg = "Changing Loglevel",
@@ -119,14 +119,14 @@ static void sysrq_handle_SAK(int key)
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
schedule_work(SAK_work);
}
-static struct sysrq_key_op sysrq_SAK_op = {
+static const struct sysrq_key_op sysrq_SAK_op = {
.handler = sysrq_handle_SAK,
.help_msg = "sak(k)",
.action_msg = "SAK",
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_SAK_op (*(struct sysrq_key_op *)NULL)
+#define sysrq_SAK_op (*(const struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_VT
@@ -135,14 +135,14 @@ static void sysrq_handle_unraw(int key)
vt_reset_unicode(fg_console);
}
-static struct sysrq_key_op sysrq_unraw_op = {
+static const struct sysrq_key_op sysrq_unraw_op = {
.handler = sysrq_handle_unraw,
.help_msg = "unraw(r)",
.action_msg = "Keyboard mode set to system default",
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
+#define sysrq_unraw_op (*(const struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
static void sysrq_handle_crash(int key)
@@ -152,7 +152,7 @@ static void sysrq_handle_crash(int key)
panic("sysrq triggered crash\n");
}
-static struct sysrq_key_op sysrq_crash_op = {
+static const struct sysrq_key_op sysrq_crash_op = {
.handler = sysrq_handle_crash,
.help_msg = "crash(c)",
.action_msg = "Trigger a crash",
@@ -165,18 +165,20 @@ static void sysrq_handle_reboot(int key)
local_irq_enable();
emergency_restart();
}
-static struct sysrq_key_op sysrq_reboot_op = {
+static const struct sysrq_key_op sysrq_reboot_op = {
.handler = sysrq_handle_reboot,
.help_msg = "reboot(b)",
.action_msg = "Resetting",
.enable_mask = SYSRQ_ENABLE_BOOT,
};
+const struct sysrq_key_op *__sysrq_reboot_op = &sysrq_reboot_op;
+
static void sysrq_handle_sync(int key)
{
emergency_sync();
}
-static struct sysrq_key_op sysrq_sync_op = {
+static const struct sysrq_key_op sysrq_sync_op = {
.handler = sysrq_handle_sync,
.help_msg = "sync(s)",
.action_msg = "Emergency Sync",
@@ -188,7 +190,7 @@ static void sysrq_handle_show_timers(int key)
sysrq_timer_list_show();
}
-static struct sysrq_key_op sysrq_show_timers_op = {
+static const struct sysrq_key_op sysrq_show_timers_op = {
.handler = sysrq_handle_show_timers,
.help_msg = "show-all-timers(q)",
.action_msg = "Show clockevent devices & pending hrtimers (no others)",
@@ -198,7 +200,7 @@ static void sysrq_handle_mountro(int key)
{
emergency_remount();
}
-static struct sysrq_key_op sysrq_mountro_op = {
+static const struct sysrq_key_op sysrq_mountro_op = {
.handler = sysrq_handle_mountro,
.help_msg = "unmount(u)",
.action_msg = "Emergency Remount R/O",
@@ -211,13 +213,13 @@ static void sysrq_handle_showlocks(int key)
debug_show_all_locks();
}
-static struct sysrq_key_op sysrq_showlocks_op = {
+static const struct sysrq_key_op sysrq_showlocks_op = {
.handler = sysrq_handle_showlocks,
.help_msg = "show-all-locks(d)",
.action_msg = "Show Locks Held",
};
#else
-#define sysrq_showlocks_op (*(struct sysrq_key_op *)NULL)
+#define sysrq_showlocks_op (*(const struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_SMP
@@ -264,7 +266,7 @@ static void sysrq_handle_showallcpus(int key)
}
}
-static struct sysrq_key_op sysrq_showallcpus_op = {
+static const struct sysrq_key_op sysrq_showallcpus_op = {
.handler = sysrq_handle_showallcpus,
.help_msg = "show-backtrace-all-active-cpus(l)",
.action_msg = "Show backtrace of all active CPUs",
@@ -282,7 +284,7 @@ static void sysrq_handle_showregs(int key)
show_regs(regs);
perf_event_print_debug();
}
-static struct sysrq_key_op sysrq_showregs_op = {
+static const struct sysrq_key_op sysrq_showregs_op = {
.handler = sysrq_handle_showregs,
.help_msg = "show-registers(p)",
.action_msg = "Show Regs",
@@ -294,7 +296,7 @@ static void sysrq_handle_showstate(int key)
show_state();
show_workqueue_state();
}
-static struct sysrq_key_op sysrq_showstate_op = {
+static const struct sysrq_key_op sysrq_showstate_op = {
.handler = sysrq_handle_showstate,
.help_msg = "show-task-states(t)",
.action_msg = "Show State",
@@ -305,7 +307,7 @@ static void sysrq_handle_showstate_blocked(int key)
{
show_state_filter(TASK_UNINTERRUPTIBLE);
}
-static struct sysrq_key_op sysrq_showstate_blocked_op = {
+static const struct sysrq_key_op sysrq_showstate_blocked_op = {
.handler = sysrq_handle_showstate_blocked,
.help_msg = "show-blocked-tasks(w)",
.action_msg = "Show Blocked State",
@@ -319,21 +321,21 @@ static void sysrq_ftrace_dump(int key)
{
ftrace_dump(DUMP_ALL);
}
-static struct sysrq_key_op sysrq_ftrace_dump_op = {
+static const struct sysrq_key_op sysrq_ftrace_dump_op = {
.handler = sysrq_ftrace_dump,
.help_msg = "dump-ftrace-buffer(z)",
.action_msg = "Dump ftrace buffer",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#else
-#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
+#define sysrq_ftrace_dump_op (*(const struct sysrq_key_op *)NULL)
#endif
static void sysrq_handle_showmem(int key)
{
show_mem(0, NULL);
}
-static struct sysrq_key_op sysrq_showmem_op = {
+static const struct sysrq_key_op sysrq_showmem_op = {
.handler = sysrq_handle_showmem,
.help_msg = "show-memory-usage(m)",
.action_msg = "Show Memory",
@@ -364,7 +366,7 @@ static void sysrq_handle_term(int key)
send_sig_all(SIGTERM);
console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
}
-static struct sysrq_key_op sysrq_term_op = {
+static const struct sysrq_key_op sysrq_term_op = {
.handler = sysrq_handle_term,
.help_msg = "terminate-all-tasks(e)",
.action_msg = "Terminate All Tasks",
@@ -394,7 +396,7 @@ static void sysrq_handle_moom(int key)
{
schedule_work(&moom_work);
}
-static struct sysrq_key_op sysrq_moom_op = {
+static const struct sysrq_key_op sysrq_moom_op = {
.handler = sysrq_handle_moom,
.help_msg = "memory-full-oom-kill(f)",
.action_msg = "Manual OOM execution",
@@ -406,7 +408,7 @@ static void sysrq_handle_thaw(int key)
{
emergency_thaw_all();
}
-static struct sysrq_key_op sysrq_thaw_op = {
+static const struct sysrq_key_op sysrq_thaw_op = {
.handler = sysrq_handle_thaw,
.help_msg = "thaw-filesystems(j)",
.action_msg = "Emergency Thaw of all frozen filesystems",
@@ -419,7 +421,7 @@ static void sysrq_handle_kill(int key)
send_sig_all(SIGKILL);
console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
}
-static struct sysrq_key_op sysrq_kill_op = {
+static const struct sysrq_key_op sysrq_kill_op = {
.handler = sysrq_handle_kill,
.help_msg = "kill-all-tasks(i)",
.action_msg = "Kill All Tasks",
@@ -430,7 +432,7 @@ static void sysrq_handle_unrt(int key)
{
normalize_rt_tasks();
}
-static struct sysrq_key_op sysrq_unrt_op = {
+static const struct sysrq_key_op sysrq_unrt_op = {
.handler = sysrq_handle_unrt,
.help_msg = "nice-all-RT-tasks(n)",
.action_msg = "Nice All RT Tasks",
@@ -440,7 +442,7 @@ static struct sysrq_key_op sysrq_unrt_op = {
/* Key Operations table and lock */
static DEFINE_SPINLOCK(sysrq_key_table_lock);
-static struct sysrq_key_op *sysrq_key_table[36] = {
+static const struct sysrq_key_op *sysrq_key_table[36] = {
&sysrq_loglevel_op, /* 0 */
&sysrq_loglevel_op, /* 1 */
&sysrq_loglevel_op, /* 2 */
@@ -516,9 +518,9 @@ static int sysrq_key_table_key2index(int key)
/*
* get and put functions for the table, exposed to modules.
*/
-struct sysrq_key_op *__sysrq_get_key_op(int key)
+static const struct sysrq_key_op *__sysrq_get_key_op(int key)
{
- struct sysrq_key_op *op_p = NULL;
+ const struct sysrq_key_op *op_p = NULL;
int i;
i = sysrq_key_table_key2index(key);
@@ -528,7 +530,7 @@ struct sysrq_key_op *__sysrq_get_key_op(int key)
return op_p;
}
-static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
+static void __sysrq_put_key_op(int key, const struct sysrq_key_op *op_p)
{
int i = sysrq_key_table_key2index(key);
@@ -538,7 +540,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
void __handle_sysrq(int key, bool check_mask)
{
- struct sysrq_key_op *op_p;
+ const struct sysrq_key_op *op_p;
int orig_log_level;
int orig_suppress_printk;
int i;
@@ -1061,8 +1063,8 @@ int sysrq_toggle_support(int enable_mask)
}
EXPORT_SYMBOL_GPL(sysrq_toggle_support);
-static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
- struct sysrq_key_op *remove_op_p)
+static int __sysrq_swap_key_ops(int key, const struct sysrq_key_op *insert_op_p,
+ const struct sysrq_key_op *remove_op_p)
{
int retval;
@@ -1085,13 +1087,13 @@ static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
return retval;
}
-int register_sysrq_key(int key, struct sysrq_key_op *op_p)
+int register_sysrq_key(int key, const struct sysrq_key_op *op_p)
{
return __sysrq_swap_key_ops(key, op_p, NULL);
}
EXPORT_SYMBOL(register_sysrq_key);
-int unregister_sysrq_key(int key, struct sysrq_key_op *op_p)
+int unregister_sysrq_key(int key, const struct sysrq_key_op *op_p)
{
return __sysrq_swap_key_ops(key, NULL, op_p);
}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 15d33fa0c925..568b2171f335 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -127,7 +127,11 @@ static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static bool dead_key_next;
-static int npadch = -1; /* -1 or number assembled on pad */
+
+/* Handles a number being assembled on the number pad */
+static bool npadch_active;
+static unsigned int npadch_value;
+
static unsigned int diacr;
static char rep; /* flag telling character repeat */
@@ -845,12 +849,12 @@ static void k_shift(struct vc_data *vc, unsigned char value, char up_flag)
shift_state &= ~(1 << value);
/* kludge */
- if (up_flag && shift_state != old_state && npadch != -1) {
+ if (up_flag && shift_state != old_state && npadch_active) {
if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, npadch);
+ to_utf8(vc, npadch_value);
else
- put_queue(vc, npadch & 0xff);
- npadch = -1;
+ put_queue(vc, npadch_value & 0xff);
+ npadch_active = false;
}
}
@@ -868,7 +872,7 @@ static void k_meta(struct vc_data *vc, unsigned char value, char up_flag)
static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag)
{
- int base;
+ unsigned int base;
if (up_flag)
return;
@@ -882,10 +886,12 @@ static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag)
base = 16;
}
- if (npadch == -1)
- npadch = value;
- else
- npadch = npadch * base + value;
+ if (!npadch_active) {
+ npadch_value = 0;
+ npadch_active = true;
+ }
+
+ npadch_value = npadch_value * base + value;
}
static void k_lock(struct vc_data *vc, unsigned char value, char up_flag)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index d54a549c5892..31bb3647a99c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -185,47 +185,54 @@ int set_selection_user(const struct tiocl_selection __user *sel,
return set_selection_kernel(&v, tty);
}
-static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
+static int vc_selection_store_chars(struct vc_data *vc, bool unicode)
{
- struct vc_data *vc = vc_cons[fg_console].d;
- int new_sel_start, new_sel_end, spc;
char *bp, *obp;
- int i, ps, pe;
- u32 c;
- int ret = 0;
- bool unicode;
-
- poke_blanked_console();
-
- v->xs = min_t(u16, v->xs - 1, vc->vc_cols - 1);
- v->ys = min_t(u16, v->ys - 1, vc->vc_rows - 1);
- v->xe = min_t(u16, v->xe - 1, vc->vc_cols - 1);
- v->ye = min_t(u16, v->ye - 1, vc->vc_rows - 1);
- ps = v->ys * vc->vc_size_row + (v->xs << 1);
- pe = v->ye * vc->vc_size_row + (v->xe << 1);
+ unsigned int i;
- if (v->sel_mode == TIOCL_SELCLEAR) {
- /* useful for screendump without selection highlights */
+ /* Allocate a new buffer before freeing the old one ... */
+ /* chars can take up to 4 bytes with unicode */
+ bp = kmalloc_array((vc_sel.end - vc_sel.start) / 2 + 1, unicode ? 4 : 1,
+ GFP_KERNEL);
+ if (!bp) {
+ printk(KERN_WARNING "selection: kmalloc() failed\n");
clear_selection();
- return 0;
+ return -ENOMEM;
}
+ kfree(vc_sel.buffer);
+ vc_sel.buffer = bp;
- if (mouse_reporting() && (v->sel_mode & TIOCL_SELMOUSEREPORT)) {
- mouse_report(tty, v->sel_mode & TIOCL_SELBUTTONMASK, v->xs,
- v->ys);
- return 0;
+ obp = bp;
+ for (i = vc_sel.start; i <= vc_sel.end; i += 2) {
+ u32 c = sel_pos(i, unicode);
+ if (unicode)
+ bp += store_utf8(c, bp);
+ else
+ *bp++ = c;
+ if (!isspace(c))
+ obp = bp;
+ if (!((i + 2) % vc->vc_size_row)) {
+ /* strip trailing blanks from line and add newline,
+ unless non-space at end of line. */
+ if (obp != bp) {
+ bp = obp;
+ *bp++ = '\r';
+ }
+ obp = bp;
+ }
}
+ vc_sel.buf_len = bp - vc_sel.buffer;
- if (ps > pe) /* make vc_sel.start <= vc_sel.end */
- swap(ps, pe);
+ return 0;
+}
- if (vc_sel.cons != vc_cons[fg_console].d) {
- clear_selection();
- vc_sel.cons = vc_cons[fg_console].d;
- }
- unicode = vt_do_kdgkbmode(fg_console) == K_UNICODE;
+static int vc_do_selection(struct vc_data *vc, unsigned short mode, int ps,
+ int pe)
+{
+ int new_sel_start, new_sel_end, spc;
+ bool unicode = vt_do_kdgkbmode(fg_console) == K_UNICODE;
- switch (v->sel_mode) {
+ switch (mode) {
case TIOCL_SELCHAR: /* character-by-character selection */
new_sel_start = ps;
new_sel_end = pe;
@@ -303,40 +310,44 @@ static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *
vc_sel.start = new_sel_start;
vc_sel.end = new_sel_end;
- /* Allocate a new buffer before freeing the old one ... */
- /* chars can take up to 4 bytes with unicode */
- bp = kmalloc_array((vc_sel.end - vc_sel.start) / 2 + 1, unicode ? 4 : 1,
- GFP_KERNEL);
- if (!bp) {
- printk(KERN_WARNING "selection: kmalloc() failed\n");
+ return vc_selection_store_chars(vc, unicode);
+}
+
+static int vc_selection(struct vc_data *vc, struct tiocl_selection *v,
+ struct tty_struct *tty)
+{
+ int ps, pe;
+
+ poke_blanked_console();
+
+ if (v->sel_mode == TIOCL_SELCLEAR) {
+ /* useful for screendump without selection highlights */
clear_selection();
- return -ENOMEM;
+ return 0;
}
- kfree(vc_sel.buffer);
- vc_sel.buffer = bp;
- obp = bp;
- for (i = vc_sel.start; i <= vc_sel.end; i += 2) {
- c = sel_pos(i, unicode);
- if (unicode)
- bp += store_utf8(c, bp);
- else
- *bp++ = c;
- if (!isspace(c))
- obp = bp;
- if (! ((i + 2) % vc->vc_size_row)) {
- /* strip trailing blanks from line and add newline,
- unless non-space at end of line. */
- if (obp != bp) {
- bp = obp;
- *bp++ = '\r';
- }
- obp = bp;
- }
+ v->xs = min_t(u16, v->xs - 1, vc->vc_cols - 1);
+ v->ys = min_t(u16, v->ys - 1, vc->vc_rows - 1);
+ v->xe = min_t(u16, v->xe - 1, vc->vc_cols - 1);
+ v->ye = min_t(u16, v->ye - 1, vc->vc_rows - 1);
+
+ if (mouse_reporting() && (v->sel_mode & TIOCL_SELMOUSEREPORT)) {
+ mouse_report(tty, v->sel_mode & TIOCL_SELBUTTONMASK, v->xs,
+ v->ys);
+ return 0;
}
- vc_sel.buf_len = bp - vc_sel.buffer;
- return ret;
+ ps = v->ys * vc->vc_size_row + (v->xs << 1);
+ pe = v->ye * vc->vc_size_row + (v->xe << 1);
+ if (ps > pe) /* make vc_sel.start <= vc_sel.end */
+ swap(ps, pe);
+
+ if (vc_sel.cons != vc) {
+ clear_selection();
+ vc_sel.cons = vc;
+ }
+
+ return vc_do_selection(vc, v->sel_mode, ps, pe);
}
int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
@@ -345,7 +356,7 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
mutex_lock(&vc_sel.lock);
console_lock();
- ret = __set_selection_kernel(v, tty);
+ ret = vc_selection(vc_cons[fg_console].d, v, tty);
console_unlock();
mutex_unlock(&vc_sel.lock);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 6e725c6c6256..73efb80815db 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -398,7 +398,7 @@ static void uio_dev_del_attributes(struct uio_device *idev)
static int uio_get_minor(struct uio_device *idev)
{
- int retval = -ENOMEM;
+ int retval;
mutex_lock(&minor_lock);
retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index f6ab3f28c838..6e27fe4fcca3 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -44,7 +44,6 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
{
struct uio_dmem_genirq_platdata *priv = info->priv;
struct uio_mem *uiomem;
- int ret = 0;
int dmem_region = priv->dmem_region_start;
uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
@@ -68,7 +67,7 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
mutex_unlock(&priv->alloc_lock);
/* Wait until the Runtime PM code has woken up the device */
pm_runtime_get_sync(&priv->pdev->dev);
- return ret;
+ return 0;
}
static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 3c5169eb23f5..4dae2320b103 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -361,6 +361,7 @@ hv_uio_remove(struct hv_device *dev)
if (!pdata)
return 0;
+ sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
uio_unregister_device(&pdata->info);
hv_uio_cleanup(dev, pdata);
hv_set_drvdata(dev, NULL);
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index 5685ba11480b..e701ab56b0a7 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -138,7 +138,7 @@ static int cdns_ti_probe(struct platform_device *pdev)
error = pm_runtime_get_sync(dev);
if (error < 0) {
dev_err(dev, "pm_runtime_get_sync failed: %d\n", error);
- goto err_get;
+ goto err;
}
/* assert RESET */
@@ -185,7 +185,6 @@ static int cdns_ti_probe(struct platform_device *pdev)
err:
pm_runtime_put_sync(data->dev);
-err_get:
pm_runtime_disable(data->dev);
return error;
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 4aafba20f450..19bbb5b7e6b6 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -82,8 +82,6 @@ static void cdns3_exit_roles(struct cdns3 *cdns)
cdns3_drd_exit(cdns);
}
-static enum usb_role cdsn3_hw_role_state_machine(struct cdns3 *cdns);
-
/**
* cdns3_core_init_role - initialize role of operation
* @cdns: Pointer to cdns3 structure
@@ -193,12 +191,12 @@ err:
}
/**
- * cdsn3_hw_role_state_machine - role switch state machine based on hw events.
+ * cdns3_hw_role_state_machine - role switch state machine based on hw events.
* @cdns: Pointer to controller structure.
*
* Returns next role to be entered based on hw events.
*/
-static enum usb_role cdsn3_hw_role_state_machine(struct cdns3 *cdns)
+static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns)
{
enum usb_role role;
int id, vbus;
@@ -291,14 +289,10 @@ int cdns3_hw_role_switch(struct cdns3 *cdns)
enum usb_role real_role, current_role;
int ret = 0;
- /* Do nothing if role based on syfs. */
- if (cdns->role_override)
- return 0;
-
pm_runtime_get_sync(cdns->dev);
current_role = cdns->role;
- real_role = cdsn3_hw_role_state_machine(cdns);
+ real_role = cdns3_hw_role_state_machine(cdns);
/* Do nothing if nothing changed */
if (current_role == real_role)
@@ -353,39 +347,6 @@ static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role)
pm_runtime_get_sync(cdns->dev);
- /*
- * FIXME: switch role framework should be extended to meet
- * requirements. Driver assumes that role can be controlled
- * by SW or HW. Temporary workaround is to use USB_ROLE_NONE to
- * switch from SW to HW control.
- *
- * For dr_mode == USB_DR_MODE_OTG:
- * if user sets USB_ROLE_HOST or USB_ROLE_DEVICE then driver
- * sets role_override flag and forces that role.
- * if user sets USB_ROLE_NONE, driver clears role_override and lets
- * HW state machine take over.
- *
- * For dr_mode != USB_DR_MODE_OTG:
- * Assumptions:
- * 1. Restricted user control between NONE and dr_mode.
- * 2. Driver doesn't need to rely on role_override flag.
- * 3. Driver needs to ensure that HW state machine is never called
- * if dr_mode != USB_DR_MODE_OTG.
- */
- if (role == USB_ROLE_NONE)
- cdns->role_override = 0;
- else
- cdns->role_override = 1;
-
- /*
- * HW state might have changed so driver need to trigger
- * HW state machine if dr_mode == USB_DR_MODE_OTG.
- */
- if (!cdns->role_override && cdns->dr_mode == USB_DR_MODE_OTG) {
- cdns3_hw_role_switch(cdns);
- goto pm_put;
- }
-
if (cdns->role == role)
goto pm_put;
@@ -528,6 +489,8 @@ static int cdns3_probe(struct platform_device *pdev)
sw_desc.get = cdns3_role_get;
sw_desc.allow_userspace_control = true;
sw_desc.driver_data = cdns;
+ if (device_property_read_bool(dev, "usb-role-switch"))
+ sw_desc.fwnode = dev->fwnode;
cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
if (IS_ERR(cdns->role_sw)) {
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 969eb94de204..1ad1f1fe61e9 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -62,7 +62,6 @@ struct cdns3_role_driver {
* This field based on firmware setting, kernel configuration
* and hardware configuration.
* @role_sw: pointer to role switch object.
- * @role_override: set 1 if role rely on SW.
*/
struct cdns3 {
struct device *dev;
@@ -90,7 +89,6 @@ struct cdns3 {
struct mutex mutex;
enum usb_dr_mode dr_mode;
struct usb_role_switch *role_sw;
- int role_override;
};
int cdns3_hw_role_switch(struct cdns3 *cdns);
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 16ad485f0b69..58089841ed52 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -329,7 +329,7 @@ int cdns3_drd_init(struct cdns3 *cdns)
cdns->otg_v1_regs = NULL;
cdns->otg_regs = regs;
writel(1, &cdns->otg_v0_regs->simulate);
- dev_info(cdns->dev, "DRD version v0 (%08x)\n",
+ dev_dbg(cdns->dev, "DRD version v0 (%08x)\n",
readl(&cdns->otg_v0_regs->version));
} else {
cdns->otg_v0_regs = NULL;
@@ -337,7 +337,7 @@ int cdns3_drd_init(struct cdns3 *cdns)
cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd;
cdns->version = CDNS3_CONTROLLER_V1;
writel(1, &cdns->otg_v1_regs->simulate);
- dev_info(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
+ dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
readl(&cdns->otg_v1_regs->did),
readl(&cdns->otg_v1_regs->rid));
}
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
index e71240b386b4..82645a2a0f52 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/ep0.c
@@ -332,13 +332,6 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
case TEST_K:
case TEST_SE0_NAK:
case TEST_PACKET:
- cdns3_ep0_complete_setup(priv_dev, 0, 1);
- /**
- * Little delay to give the controller some time
- * for sending status stage.
- * This time should be less then 3ms.
- */
- mdelay(1);
cdns3_set_register_bit(&priv_dev->regs->usb_cmd,
USB_CMD_STMODE |
USB_STS_TMODE_SEL(tmode - 1));
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 372460ea4df9..5e24c2e57c0d 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -82,7 +82,7 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
* @ptr: address of device controller register to be read and changed
* @mask: bits requested to clar
*/
-void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
+static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
{
mask = readl(ptr) & ~mask;
writel(mask, ptr);
@@ -137,7 +137,7 @@ struct usb_request *cdns3_next_request(struct list_head *list)
*
* Returns buffer or NULL if no buffers in list
*/
-struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
+static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
{
return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
}
@@ -148,7 +148,7 @@ struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
*
* Returns request or NULL if no requests in list
*/
-struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
+static struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
{
return list_first_entry_or_null(list, struct cdns3_request, list);
}
@@ -190,7 +190,7 @@ dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
return priv_ep->trb_pool_dma + offset;
}
-int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
+static int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
{
switch (priv_ep->type) {
case USB_ENDPOINT_XFER_ISOC:
@@ -345,7 +345,7 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
}
-void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
+static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
{
struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
int current_trb = priv_req->start_trb;
@@ -511,9 +511,9 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
}
}
-struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep,
- struct cdns3_request *priv_req)
+static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep,
+ struct cdns3_request *priv_req)
{
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN &&
priv_req->flags & REQUEST_INTERNAL) {
@@ -551,9 +551,9 @@ struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
return &priv_req->request;
}
-int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep,
- struct cdns3_request *priv_req)
+static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep,
+ struct cdns3_request *priv_req)
{
int deferred = 0;
@@ -836,7 +836,7 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
}
-void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
+static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
{
/* Work around for stale data address in TRB*/
if (priv_ep->wa1_set) {
@@ -1904,8 +1904,8 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
return 0;
}
-void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep)
+static void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
{
if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
return;
@@ -1925,8 +1925,8 @@ void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
}
-void cdns3_configure_dmult(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep)
+static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
{
struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
@@ -2548,7 +2548,7 @@ found:
link_trb = priv_req->trb;
/* Update ring only if removed request is on pending_req_list list */
- if (req_on_hw_ring) {
+ if (req_on_hw_ring && link_trb) {
link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
((priv_req->end_trb + 1) * TRB_SIZE));
link_trb->control = (link_trb->control & TRB_CYCLE) |
@@ -2965,7 +2965,7 @@ static int cdns3_init_eps(struct cdns3_device *priv_dev)
priv_ep->flags = 0;
- dev_info(priv_dev->dev, "Initialized %s support: %s %s\n",
+ dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n",
priv_ep->name,
priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "",
priv_ep->endpoint.caps.type_iso ? "ISO" : "");
@@ -3069,6 +3069,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
priv_dev->gadget.name = "usb-ss-gadget";
priv_dev->gadget.sg_supported = 1;
priv_dev->gadget.quirk_avoids_skb_reserve = 1;
+ priv_dev->gadget.irq = cdns->dev_irq;
spin_lock_init(&priv_dev->lock);
INIT_WORK(&priv_dev->pending_status_wq,
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index d53db520e209..8bafcfc6080d 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -18,17 +18,6 @@ config USB_CHIPIDEA
if USB_CHIPIDEA
-config USB_CHIPIDEA_OF
- tristate
- depends on OF
- default USB_CHIPIDEA
-
-config USB_CHIPIDEA_PCI
- tristate
- depends on USB_PCI
- depends on NOP_USB_XCEIV
- default USB_CHIPIDEA
-
config USB_CHIPIDEA_UDC
bool "ChipIdea device controller"
depends on USB_GADGET
@@ -43,4 +32,30 @@ config USB_CHIPIDEA_HOST
help
Say Y here to enable host controller functionality of the
ChipIdea driver.
+
+config USB_CHIPIDEA_PCI
+ tristate "Enable PCI glue driver" if EMBEDDED
+ depends on USB_PCI
+ depends on NOP_USB_XCEIV
+ default USB_CHIPIDEA
+
+config USB_CHIPIDEA_MSM
+ tristate "Enable MSM hsusb glue driver" if EMBEDDED
+ default USB_CHIPIDEA
+
+config USB_CHIPIDEA_IMX
+ tristate "Enable i.MX USB glue driver" if EMBEDDED
+ depends on OF
+ default USB_CHIPIDEA
+
+config USB_CHIPIDEA_GENERIC
+ tristate "Enable generic USB2 glue driver" if EMBEDDED
+ default USB_CHIPIDEA
+
+config USB_CHIPIDEA_TEGRA
+ tristate "Enable Tegra UDC glue driver" if EMBEDDED
+ depends on OF
+ depends on USB_CHIPIDEA_UDC
+ default USB_CHIPIDEA
+
endif
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index 12df94f78f72..fae779a23866 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -8,11 +8,8 @@ ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
# Glue/Bridge layers go here
-obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_usb2.o
-obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_msm.o
-obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_zevio.o
-
-obj-$(CONFIG_USB_CHIPIDEA_PCI) += ci_hdrc_pci.o
-
-obj-$(CONFIG_USB_CHIPIDEA_OF) += usbmisc_imx.o ci_hdrc_imx.o
-obj-$(CONFIG_USB_CHIPIDEA_OF) += ci_hdrc_tegra.o
+obj-$(CONFIG_USB_CHIPIDEA_GENERIC) += ci_hdrc_usb2.o
+obj-$(CONFIG_USB_CHIPIDEA_MSM) += ci_hdrc_msm.o
+obj-$(CONFIG_USB_CHIPIDEA_PCI) += ci_hdrc_pci.o
+obj-$(CONFIG_USB_CHIPIDEA_IMX) += ci_hdrc_imx.o usbmisc_imx.o
+obj-$(CONFIG_USB_CHIPIDEA_TEGRA) += ci_hdrc_tegra.o
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 644ecaef17ee..0697eb980e5f 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -25,6 +25,7 @@
#define TD_PAGE_COUNT 5
#define CI_HDRC_PAGE_SIZE 4096ul /* page size for TD's */
#define ENDPT_MAX 32
+#define CI_MAX_BUF_SIZE (TD_PAGE_COUNT * CI_HDRC_PAGE_SIZE)
/******************************************************************************
* REGISTERS
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index a479af3ae31d..5ae16368a0c7 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -271,6 +271,7 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
struct device *dev = ci->dev->parent;
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
+ struct imx_usbmisc_data *mdata = data->usbmisc_data;
switch (event) {
case CI_HDRC_IMX_HSIC_ACTIVE_EVENT:
@@ -284,11 +285,19 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
}
break;
case CI_HDRC_IMX_HSIC_SUSPEND_EVENT:
- ret = imx_usbmisc_hsic_set_connect(data->usbmisc_data);
+ ret = imx_usbmisc_hsic_set_connect(mdata);
if (ret)
dev_err(dev,
"hsic_set_connect failed, err=%d\n", ret);
break;
+ case CI_HDRC_CONTROLLER_VBUS_EVENT:
+ if (ci->vbus_active)
+ ret = imx_usbmisc_charger_detection(mdata, true);
+ else
+ ret = imx_usbmisc_charger_detection(mdata, false);
+ if (ci->usb_phy)
+ schedule_work(&ci->usb_phy->chg_work);
+ break;
default:
break;
}
@@ -414,6 +423,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
}
pdata.usb_phy = data->phy;
+ if (data->usbmisc_data)
+ data->usbmisc_data->usb_phy = data->phy;
if ((of_device_is_compatible(np, "fsl,imx53-usb") ||
of_device_is_compatible(np, "fsl,imx51-usb")) && pdata.usb_phy &&
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index c2051aeba13f..727d02b6dbd3 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -24,6 +24,7 @@ struct imx_usbmisc_data {
unsigned int hsic:1; /* HSIC controlller */
unsigned int ext_id:1; /* ID from exteranl event */
unsigned int ext_vbus:1; /* Vbus from exteranl event */
+ struct usb_phy *usb_phy;
};
int imx_usbmisc_init(struct imx_usbmisc_data *data);
@@ -31,5 +32,6 @@ int imx_usbmisc_init_post(struct imx_usbmisc_data *data);
int imx_usbmisc_set_wakeup(struct imx_usbmisc_data *data, bool enabled);
int imx_usbmisc_hsic_set_connect(struct imx_usbmisc_data *data);
int imx_usbmisc_hsic_set_clk(struct imx_usbmisc_data *data, bool on);
+int imx_usbmisc_charger_detection(struct imx_usbmisc_data *data, bool connect);
#endif /* __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H */
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index c044fba463e4..89e1d82d739b 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -28,13 +28,19 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
.flags = CI_HDRC_DISABLE_STREAMING,
};
-static struct ci_hdrc_platform_data ci_zynq_pdata = {
+static const struct ci_hdrc_platform_data ci_zynq_pdata = {
.capoffset = DEF_CAPOFFSET,
};
+static const struct ci_hdrc_platform_data ci_zevio_pdata = {
+ .capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_REGS_SHARED | CI_HDRC_FORCE_FULLSPEED,
+};
+
static const struct of_device_id ci_hdrc_usb2_of_match[] = {
- { .compatible = "chipidea,usb2"},
- { .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata},
+ { .compatible = "chipidea,usb2" },
+ { .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata },
+ { .compatible = "lsi,zevio-usb", .data = &ci_zevio_pdata },
{ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
@@ -64,13 +70,14 @@ static int ci_hdrc_usb2_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->clk = devm_clk_get(dev, NULL);
- if (!IS_ERR(priv->clk)) {
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(dev, "failed to enable the clock: %d\n", ret);
- return ret;
- }
+ priv->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable the clock: %d\n", ret);
+ return ret;
}
ci_pdata->name = dev_name(dev);
@@ -94,8 +101,7 @@ static int ci_hdrc_usb2_probe(struct platform_device *pdev)
return 0;
clk_err:
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_zevio.c b/drivers/usb/chipidea/ci_hdrc_zevio.c
deleted file mode 100644
index e1634da4a4b1..000000000000
--- a/drivers/usb/chipidea/ci_hdrc_zevio.c
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
- *
- * Based off drivers/usb/chipidea/ci_hdrc_msm.c
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/chipidea.h>
-
-#include "ci.h"
-
-static struct ci_hdrc_platform_data ci_hdrc_zevio_platdata = {
- .name = "ci_hdrc_zevio",
- .flags = CI_HDRC_REGS_SHARED | CI_HDRC_FORCE_FULLSPEED,
- .capoffset = DEF_CAPOFFSET,
-};
-
-static int ci_hdrc_zevio_probe(struct platform_device *pdev)
-{
- struct platform_device *ci_pdev;
-
- dev_dbg(&pdev->dev, "ci_hdrc_zevio_probe\n");
-
- ci_pdev = ci_hdrc_add_device(&pdev->dev,
- pdev->resource, pdev->num_resources,
- &ci_hdrc_zevio_platdata);
-
- if (IS_ERR(ci_pdev)) {
- dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
- return PTR_ERR(ci_pdev);
- }
-
- platform_set_drvdata(pdev, ci_pdev);
-
- return 0;
-}
-
-static int ci_hdrc_zevio_remove(struct platform_device *pdev)
-{
- struct platform_device *ci_pdev = platform_get_drvdata(pdev);
-
- ci_hdrc_remove_device(ci_pdev);
-
- return 0;
-}
-
-static const struct of_device_id ci_hdrc_zevio_dt_ids[] = {
- { .compatible = "lsi,zevio-usb", },
- { /* sentinel */ }
-};
-
-static struct platform_driver ci_hdrc_zevio_driver = {
- .probe = ci_hdrc_zevio_probe,
- .remove = ci_hdrc_zevio_remove,
- .driver = {
- .name = "zevio_usb",
- .of_match_table = ci_hdrc_zevio_dt_ids,
- },
-};
-
-MODULE_DEVICE_TABLE(of, ci_hdrc_zevio_dt_ids);
-module_platform_driver(ci_hdrc_zevio_driver);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index ae0bdc036464..9a7c53d09ab4 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -3,42 +3,16 @@
* core.c - ChipIdea USB IP core family device controller
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2020 NXP
*
* Author: David Lopo
- */
-
-/*
- * Description: ChipIdea USB IP core family device controller
- *
- * This driver is composed of several blocks:
- * - HW: hardware interface
- * - DBG: debug facilities (optional)
- * - UTIL: utilities
- * - ISR: interrupts handling
- * - ENDPT: endpoint operations (Gadget API)
- * - GADGET: gadget operations (Gadget API)
- * - BUS: bus glue code, bus abstraction layer
+ * Peter Chen <peter.chen@nxp.com>
*
- * Compile Options
- * - STALL_IN: non-empty bulk-in pipes cannot be halted
- * if defined mass storage compliance succeeds but with warnings
- * => case 4: Hi > Dn
- * => case 5: Hi > Di
- * => case 8: Hi <> Do
- * if undefined usbtest 13 fails
- * - TRACE: enable function tracing (depends on DEBUG)
- *
- * Main Features
- * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
- * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
- * - Normal & LPM support
- *
- * USBTEST Report
- * - OK: 0-12, 13 (STALL_IN defined) & 14
- * - Not Supported: 15 & 16 (ISO)
- *
- * TODO List
- * - Suspend & Remote Wakeup
+ * Main Features:
+ * - Four transfers are supported, usbtest is passed
+ * - USB Certification for gadget: CH9 and Mass Storage are passed
+ * - Low power mode
+ * - USB wakeup
*/
#include <linux/delay.h>
#include <linux/device.h>
@@ -272,7 +246,7 @@ static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
ci->rev = ci_get_revision(ci);
dev_dbg(ci->dev,
- "ChipIdea HDRC found, revision: %d, lpm: %d; cap: %p op: %p\n",
+ "revision: %d, lpm: %d; cap: %px op: %px\n",
ci->rev, ci->hw_bank.lpm, ci->hw_bank.cap, ci->hw_bank.op);
/* setup lock mode ? */
@@ -666,6 +640,7 @@ static int ci_usb_role_switch_set(struct usb_role_switch *sw,
static struct usb_role_switch_desc ci_role_switch = {
.set = ci_usb_role_switch_set,
.get = ci_usb_role_switch_get,
+ .allow_userspace_control = true,
};
static int ci_get_platdata(struct device *dev,
@@ -1149,8 +1124,11 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ci_otg_is_fsm_mode(ci)) {
/* only update vbus status for peripheral */
- if (ci->role == CI_ROLE_GADGET)
+ if (ci->role == CI_ROLE_GADGET) {
+ /* Pull down DP for possible charger detection */
+ hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
ci_handle_vbus_change(ci);
+ }
ret = ci_role_start(ci, ci->role);
if (ret) {
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 921bcf14dc06..db0cfde0cc3c 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -338,7 +338,7 @@ static int hw_usb_reset(struct ci_hdrc *ci)
*****************************************************************************/
static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
- unsigned length)
+ unsigned int length, struct scatterlist *s)
{
int i;
u32 temp;
@@ -366,7 +366,13 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
}
- temp = (u32) (hwreq->req.dma + hwreq->req.actual);
+ if (s) {
+ temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
+ node->td_remaining_size = CI_MAX_BUF_SIZE - length;
+ } else {
+ temp = (u32) (hwreq->req.dma + hwreq->req.actual);
+ }
+
if (length) {
node->ptr->page[0] = cpu_to_le32(temp);
for (i = 1; i < TD_PAGE_COUNT; i++) {
@@ -400,6 +406,122 @@ static inline u8 _usb_addr(struct ci_hw_ep *ep)
return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
}
+static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
+ struct ci_hw_req *hwreq)
+{
+ unsigned int rest = hwreq->req.length;
+ int pages = TD_PAGE_COUNT;
+ int ret = 0;
+
+ if (rest == 0) {
+ ret = add_td_to_list(hwep, hwreq, 0, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * The first buffer could be not page aligned.
+ * In that case we have to span into one extra td.
+ */
+ if (hwreq->req.dma % PAGE_SIZE)
+ pages--;
+
+ while (rest > 0) {
+ unsigned int count = min(hwreq->req.length - hwreq->req.actual,
+ (unsigned int)(pages * CI_HDRC_PAGE_SIZE));
+
+ ret = add_td_to_list(hwep, hwreq, count, NULL);
+ if (ret < 0)
+ return ret;
+
+ rest -= count;
+ }
+
+ if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
+ && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
+ ret = add_td_to_list(hwep, hwreq, 0, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
+ struct scatterlist *s)
+{
+ unsigned int rest = sg_dma_len(s);
+ int ret = 0;
+
+ hwreq->req.actual = 0;
+ while (rest > 0) {
+ unsigned int count = min_t(unsigned int, rest,
+ CI_MAX_BUF_SIZE);
+
+ ret = add_td_to_list(hwep, hwreq, count, s);
+ if (ret < 0)
+ return ret;
+
+ rest -= count;
+ }
+
+ return ret;
+}
+
+static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
+{
+ int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
+ / CI_HDRC_PAGE_SIZE;
+ int i;
+
+ node->ptr->token +=
+ cpu_to_le32(sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
+
+ for (i = empty_td_slot_index; i < TD_PAGE_COUNT; i++) {
+ u32 page = (u32) sg_dma_address(s) +
+ (i - empty_td_slot_index) * CI_HDRC_PAGE_SIZE;
+
+ page &= ~TD_RESERVED_MASK;
+ node->ptr->page[i] = cpu_to_le32(page);
+ }
+}
+
+static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+{
+ struct usb_request *req = &hwreq->req;
+ struct scatterlist *s = req->sg;
+ int ret = 0, i = 0;
+ struct td_node *node = NULL;
+
+ if (!s || req->zero || req->length == 0) {
+ dev_err(hwep->ci->dev, "not supported operation for sg\n");
+ return -EINVAL;
+ }
+
+ while (i++ < req->num_mapped_sgs) {
+ if (sg_dma_address(s) % PAGE_SIZE) {
+ dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
+ return -EINVAL;
+ }
+
+ if (node && (node->td_remaining_size >= sg_dma_len(s))) {
+ ci_add_buffer_entry(node, s);
+ node->td_remaining_size -= sg_dma_len(s);
+ } else {
+ ret = prepare_td_per_sg(hwep, hwreq, s);
+ if (ret)
+ return ret;
+
+ node = list_entry(hwreq->tds.prev,
+ struct td_node, td);
+ }
+
+ s = sg_next(s);
+ }
+
+ return ret;
+}
+
/**
* _hardware_enqueue: configures a request at hardware level
* @hwep: endpoint
@@ -411,8 +533,6 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
{
struct ci_hdrc *ci = hwep->ci;
int ret = 0;
- unsigned rest = hwreq->req.length;
- int pages = TD_PAGE_COUNT;
struct td_node *firstnode, *lastnode;
/* don't queue twice */
@@ -426,35 +546,13 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
if (ret)
return ret;
- /*
- * The first buffer could be not page aligned.
- * In that case we have to span into one extra td.
- */
- if (hwreq->req.dma % PAGE_SIZE)
- pages--;
-
- if (rest == 0) {
- ret = add_td_to_list(hwep, hwreq, 0);
- if (ret < 0)
- goto done;
- }
-
- while (rest > 0) {
- unsigned count = min(hwreq->req.length - hwreq->req.actual,
- (unsigned)(pages * CI_HDRC_PAGE_SIZE));
- ret = add_td_to_list(hwep, hwreq, count);
- if (ret < 0)
- goto done;
-
- rest -= count;
- }
+ if (hwreq->req.num_mapped_sgs)
+ ret = prepare_td_for_sg(hwep, hwreq);
+ else
+ ret = prepare_td_for_non_sg(hwep, hwreq);
- if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
- && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
- ret = add_td_to_list(hwep, hwreq, 0);
- if (ret < 0)
- goto done;
- }
+ if (ret)
+ return ret;
firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
@@ -1561,6 +1659,7 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&ci->lock, flags);
ci->vbus_active = is_active;
@@ -1570,10 +1669,14 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
usb_phy_set_charger_state(ci->usb_phy, is_active ?
USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
+ if (ci->platdata->notify_event)
+ ret = ci->platdata->notify_event(ci,
+ CI_HDRC_CONTROLLER_VBUS_EVENT);
+
if (ci->driver)
ci_hdrc_gadget_connect(_gadget, is_active);
- return 0;
+ return ret;
}
static int ci_udc_wakeup(struct usb_gadget *_gadget)
@@ -1936,6 +2039,7 @@ static int udc_start(struct ci_hdrc *ci)
ci->gadget.max_speed = USB_SPEED_HIGH;
ci->gadget.name = ci->platdata->name;
ci->gadget.otg_caps = otg_caps;
+ ci->gadget.sg_supported = 1;
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
ci->gadget.quirk_avoids_skb_reserve = 1;
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index ebb11b625bb8..5193df1e18c7 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -61,16 +61,14 @@ struct td_node {
struct list_head td;
dma_addr_t dma;
struct ci_hw_td *ptr;
+ int td_remaining_size;
};
/**
* struct ci_hw_req - usb request representation
* @req: request structure for gadget drivers
* @queue: link to QH list
- * @ptr: transfer descriptor for this request
- * @dma: dma address for the transfer descriptor
- * @zptr: transfer descriptor for the zero packet
- * @zdma: dma address of the zero packet's transfer descriptor
+ * @tds: link to TD list
*/
struct ci_hw_req {
struct usb_request req;
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index e81e33c26e6c..f136876cb4a3 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/usb/otg.h>
#include "ci_hdrc_imx.h"
@@ -99,6 +100,33 @@
#define MX7D_USB_VBUS_WAKEUP_SOURCE_AVALID MX7D_USB_VBUS_WAKEUP_SOURCE(1)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID MX7D_USB_VBUS_WAKEUP_SOURCE(2)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_SESS_END MX7D_USB_VBUS_WAKEUP_SOURCE(3)
+#define MX7D_USBNC_AUTO_RESUME BIT(2)
+/* The default DM/DP value is pull-down */
+#define MX7D_USBNC_USB_CTRL2_OPMODE(v) (v << 6)
+#define MX7D_USBNC_USB_CTRL2_OPMODE_NON_DRIVING MX7D_USBNC_USB_CTRL2_OPMODE(1)
+#define MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK (BIT(7) | BIT(6))
+#define MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN BIT(8)
+#define MX7D_USBNC_USB_CTRL2_DP_OVERRIDE_VAL BIT(12)
+#define MX7D_USBNC_USB_CTRL2_DP_OVERRIDE_EN BIT(13)
+#define MX7D_USBNC_USB_CTRL2_DM_OVERRIDE_VAL BIT(14)
+#define MX7D_USBNC_USB_CTRL2_DM_OVERRIDE_EN BIT(15)
+#define MX7D_USBNC_USB_CTRL2_DP_DM_MASK (BIT(12) | BIT(13) | \
+ BIT(14) | BIT(15))
+
+#define MX7D_USB_OTG_PHY_CFG1 0x30
+#define MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL BIT(0)
+#define MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 BIT(1)
+#define MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 BIT(2)
+#define MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB BIT(3)
+#define MX7D_USB_OTG_PHY_CFG2_DRVVBUS0 BIT(16)
+
+#define MX7D_USB_OTG_PHY_CFG2 0x34
+
+#define MX7D_USB_OTG_PHY_STATUS 0x3c
+#define MX7D_USB_OTG_PHY_STATUS_LINE_STATE0 BIT(0)
+#define MX7D_USB_OTG_PHY_STATUS_LINE_STATE1 BIT(1)
+#define MX7D_USB_OTG_PHY_STATUS_VBUS_VLD BIT(3)
+#define MX7D_USB_OTG_PHY_STATUS_CHRGDET BIT(29)
#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
MX6_BM_ID_WAKEUP)
@@ -114,6 +142,8 @@ struct usbmisc_ops {
int (*hsic_set_connect)(struct imx_usbmisc_data *data);
/* It's called during suspend/resume */
int (*hsic_set_clk)(struct imx_usbmisc_data *data, bool enabled);
+ /* usb charger detection */
+ int (*charger_detection)(struct imx_usbmisc_data *data);
};
struct imx_usbmisc {
@@ -609,10 +639,263 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base);
- reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
- reg &= ~MX7D_USB_VBUS_WAKEUP_SOURCE_MASK;
- writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID,
- usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ /* SoC non-burst setting */
+ reg = readl(usbmisc->base);
+ writel(reg | MX6_BM_NON_BURST_SETTING, usbmisc->base);
+
+ if (!data->hsic) {
+ reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ reg &= ~MX7D_USB_VBUS_WAKEUP_SOURCE_MASK;
+ writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID
+ | MX7D_USBNC_AUTO_RESUME,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ }
+
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ usbmisc_imx7d_set_wakeup(data, false);
+
+ return 0;
+}
+
+static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ struct usb_phy *usb_phy = data->usb_phy;
+ int val;
+ unsigned long flags;
+
+ /* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ writel(val | MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL,
+ usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ usleep_range(1000, 2000);
+
+ /*
+ * Per BC 1.2, check voltage of D+:
+ * DCP: if greater than VDAT_REF;
+ * CDP: if less than VDAT_REF.
+ */
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+ if (val & MX7D_USB_OTG_PHY_STATUS_CHRGDET) {
+ dev_dbg(data->dev, "It is a dedicate charging port\n");
+ usb_phy->chg_type = DCP_TYPE;
+ } else {
+ dev_dbg(data->dev, "It is a charging downstream port\n");
+ usb_phy->chg_type = CDP_TYPE;
+ }
+
+ return 0;
+}
+
+static void imx7_disable_charger_detector(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ val &= ~(MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL);
+ writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+
+ /* Set OPMODE to be 2'b00 and disable its override */
+ val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK;
+ writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2);
+
+ val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ writel(val & ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+}
+
+static int imx7d_charger_data_contact_detect(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ unsigned long flags;
+ u32 val;
+ int i, data_pin_contact_count = 0;
+
+ /* Enable Data Contact Detect (DCD) per the USB BC 1.2 */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ writel(val | MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB,
+ usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ for (i = 0; i < 100; i = i + 1) {
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+ if (!(val & MX7D_USB_OTG_PHY_STATUS_LINE_STATE0)) {
+ if (data_pin_contact_count++ > 5)
+ /* Data pin makes contact */
+ break;
+ usleep_range(5000, 10000);
+ } else {
+ data_pin_contact_count = 0;
+ usleep_range(5000, 6000);
+ }
+ }
+
+ /* Disable DCD after finished data contact check */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ writel(val & ~MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB,
+ usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ if (i == 100) {
+ dev_err(data->dev,
+ "VBUS is coming from a dedicated power supply.\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ struct usb_phy *usb_phy = data->usb_phy;
+ unsigned long flags;
+ u32 val;
+
+ /* VDP_SRC is connected to D+ and IDM_SINK is connected to D- */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL;
+ writel(val | MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0,
+ usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ usleep_range(1000, 2000);
+
+ /* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+ if (!(val & MX7D_USB_OTG_PHY_STATUS_CHRGDET)) {
+ dev_dbg(data->dev, "It is a standard downstream port\n");
+ usb_phy->chg_type = SDP_TYPE;
+ }
+
+ return 0;
+}
+
+/**
+ * Whole charger detection process:
+ * 1. OPMODE override to be non-driving
+ * 2. Data contact check
+ * 3. Primary detection
+ * 4. Secondary detection
+ * 5. Disable charger detection
+ */
+static int imx7d_charger_detection(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ struct usb_phy *usb_phy = data->usb_phy;
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ /* Check if vbus is valid */
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+ if (!(val & MX7D_USB_OTG_PHY_STATUS_VBUS_VLD)) {
+ dev_err(data->dev, "vbus is error\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Keep OPMODE to be non-driving mode during the whole
+ * charger detection process.
+ */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK;
+ val |= MX7D_USBNC_USB_CTRL2_OPMODE_NON_DRIVING;
+ writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2);
+
+ val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ writel(val | MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ ret = imx7d_charger_data_contact_detect(data);
+ if (ret)
+ return ret;
+
+ ret = imx7d_charger_primary_detection(data);
+ if (!ret && usb_phy->chg_type != SDP_TYPE)
+ ret = imx7d_charger_secondary_detection(data);
+
+ imx7_disable_charger_detector(data);
+
+ return ret;
+}
+
+static int usbmisc_imx7ulp_init(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ unsigned long flags;
+ u32 reg;
+
+ if (data->index >= 1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ reg = readl(usbmisc->base);
+ if (data->disable_oc) {
+ reg |= MX6_BM_OVER_CUR_DIS;
+ } else {
+ reg &= ~MX6_BM_OVER_CUR_DIS;
+
+ /*
+ * If the polarity is not configured keep it as setup by the
+ * bootloader.
+ */
+ if (data->oc_pol_configured && data->oc_pol_active_low)
+ reg |= MX6_BM_OVER_CUR_POLARITY;
+ else if (data->oc_pol_configured)
+ reg &= ~MX6_BM_OVER_CUR_POLARITY;
+ }
+ /* If the polarity is not set keep it as setup by the bootlader */
+ if (data->pwr_pol == 1)
+ reg |= MX6_BM_PWR_POLARITY;
+
+ writel(reg, usbmisc->base);
+
+ /* SoC non-burst setting */
+ reg = readl(usbmisc->base);
+ writel(reg | MX6_BM_NON_BURST_SETTING, usbmisc->base);
+
+ if (data->hsic) {
+ reg = readl(usbmisc->base);
+ writel(reg | MX6_BM_UTMI_ON_CLOCK, usbmisc->base);
+
+ reg = readl(usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET);
+ reg |= MX6_BM_HSIC_EN | MX6_BM_HSIC_CLK_ON;
+ writel(reg, usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET);
+
+ /*
+ * For non-HSIC controller, the autoresume is enabled
+ * at MXS PHY driver (usbphy_ctrl bit18).
+ */
+ reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ writel(reg | MX7D_USBNC_AUTO_RESUME,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ } else {
+ reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ reg &= ~MX7D_USB_VBUS_WAKEUP_SOURCE_MASK;
+ writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ }
spin_unlock_irqrestore(&usbmisc->lock, flags);
@@ -659,6 +942,14 @@ static const struct usbmisc_ops imx6sx_usbmisc_ops = {
static const struct usbmisc_ops imx7d_usbmisc_ops = {
.init = usbmisc_imx7d_init,
.set_wakeup = usbmisc_imx7d_set_wakeup,
+ .charger_detection = imx7d_charger_detection,
+};
+
+static const struct usbmisc_ops imx7ulp_usbmisc_ops = {
+ .init = usbmisc_imx7ulp_init,
+ .set_wakeup = usbmisc_imx7d_set_wakeup,
+ .hsic_set_connect = usbmisc_imx6_hsic_set_connect,
+ .hsic_set_clk = usbmisc_imx6_hsic_set_clk,
};
static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
@@ -737,6 +1028,39 @@ int imx_usbmisc_hsic_set_clk(struct imx_usbmisc_data *data, bool on)
return usbmisc->ops->hsic_set_clk(data, on);
}
EXPORT_SYMBOL_GPL(imx_usbmisc_hsic_set_clk);
+
+int imx_usbmisc_charger_detection(struct imx_usbmisc_data *data, bool connect)
+{
+ struct imx_usbmisc *usbmisc;
+ struct usb_phy *usb_phy;
+ int ret = 0;
+
+ if (!data)
+ return -EINVAL;
+
+ usbmisc = dev_get_drvdata(data->dev);
+ usb_phy = data->usb_phy;
+ if (!usbmisc->ops->charger_detection)
+ return -ENOTSUPP;
+
+ if (connect) {
+ ret = usbmisc->ops->charger_detection(data);
+ if (ret) {
+ dev_err(data->dev,
+ "Error occurs during detection: %d\n",
+ ret);
+ usb_phy->chg_state = USB_CHARGER_ABSENT;
+ } else {
+ usb_phy->chg_state = USB_CHARGER_PRESENT;
+ }
+ } else {
+ usb_phy->chg_state = USB_CHARGER_ABSENT;
+ usb_phy->chg_type = UNKNOWN_TYPE;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_usbmisc_charger_detection);
+
static const struct of_device_id usbmisc_imx_dt_ids[] = {
{
.compatible = "fsl,imx25-usbmisc",
@@ -780,7 +1104,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
},
{
.compatible = "fsl,imx7ulp-usbmisc",
- .data = &imx7d_usbmisc_ops,
+ .data = &imx7ulp_usbmisc_ops,
},
{ /* sentinel */ }
};
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ded8d93834ca..f67088bb8218 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -584,7 +584,7 @@ static void acm_softint(struct work_struct *work)
}
if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
- for (i = 0; i < ACM_NR; i++)
+ for (i = 0; i < acm->rx_buflimit; i++)
if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
acm_submit_read_urb(acm, i, GFP_NOIO);
}
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 0d8e3f3804a3..084c48c5848f 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -468,7 +468,8 @@ static int usblp_release(struct inode *inode, struct file *file)
usb_autopm_put_interface(usblp->intf);
if (!usblp->present) /* finish cleanup from disconnect */
- usblp_cleanup(usblp);
+ usblp_cleanup(usblp); /* any URBs must be dead */
+
mutex_unlock(&usblp_mutex);
return 0;
}
@@ -1375,9 +1376,11 @@ static void usblp_disconnect(struct usb_interface *intf)
usblp_unlink_urbs(usblp);
mutex_unlock(&usblp->mut);
+ usb_poison_anchored_urbs(&usblp->urbs);
if (!usblp->used)
usblp_cleanup(usblp);
+
mutex_unlock(&usblp_mutex);
}
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 44f28a114c2b..94b6fa6e585e 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -598,8 +598,6 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
return -EINVAL;
if (nbytes <= 0)
return 0;
- if (!access_ok(buf, nbytes))
- return -EFAULT;
mutex_lock(&usb_bus_idr_lock);
/* print devices for all busses */
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index b9db9812d6c5..96d4507d988a 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -251,9 +251,19 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
usbm->vma_use_count = 1;
INIT_LIST_HEAD(&usbm->memlist);
- if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) {
- dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
- return -EAGAIN;
+ if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(usbm->mem) >> PAGE_SHIFT,
+ size, vma->vm_page_prot) < 0) {
+ dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+ return -EAGAIN;
+ }
+ } else {
+ if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle,
+ size)) {
+ dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+ return -EAGAIN;
+ }
}
vma->vm_flags |= VM_IO;
@@ -1126,11 +1136,6 @@ static int proc_control(struct usb_dev_state *ps, void __user *arg)
ctrl.bRequestType, ctrl.bRequest, ctrl.wValue,
ctrl.wIndex, ctrl.wLength);
if (ctrl.bRequestType & 0x80) {
- if (ctrl.wLength && !access_ok(ctrl.data,
- ctrl.wLength)) {
- ret = -EINVAL;
- goto done;
- }
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
@@ -1215,10 +1220,6 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
}
tmo = bulk.timeout;
if (bulk.ep & 0x80) {
- if (len1 && !access_ok(bulk.data, len1)) {
- ret = -EINVAL;
- goto done;
- }
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index f0a259937da8..1547aa6e5314 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -159,6 +159,7 @@ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
* usb_hcd_pci_probe - initialize PCI-based HCDs
* @dev: USB Host Controller being probed
* @id: pci hotplug id connecting controller to HCD framework
+ * @driver: USB HC driver handle
* Context: !in_interrupt()
*
* Allocates basic PCI resources for this USB host controller, and
@@ -169,9 +170,9 @@ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
*
* Return: 0 if successful.
*/
-int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id,
+ const struct hc_driver *driver)
{
- struct hc_driver *driver;
struct usb_hcd *hcd;
int retval;
int hcd_irq = 0;
@@ -181,7 +182,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!id)
return -EINVAL;
- driver = (struct hc_driver *)id->driver_data;
+
if (!driver)
return -EINVAL;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index aa45840d8273..de624c47e190 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/genalloc.h>
#include <linux/io.h>
+#include <linux/kcov.h>
#include <linux/phy/phy.h>
#include <linux/usb.h>
@@ -1645,7 +1646,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
+ kcov_remote_start_usb((u64)urb->dev->bus->busnum);
urb->complete(urb);
+ kcov_remote_stop();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 2b6565c06c23..b1e14beaac5f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -39,6 +39,7 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
+#define USB_PRODUCT_USB5534B 0x5534
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
@@ -92,7 +93,7 @@ module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(old_scheme_first,
"start with the old device initialization scheme");
-static bool use_both_schemes = 1;
+static bool use_both_schemes = true;
module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_both_schemes,
"try the other device initialization scheme if the "
@@ -5621,8 +5622,11 @@ out_hdev_lock:
}
static const struct usb_device_id hub_id_table[] = {
- { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT
+ | USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_SMSC,
+ .idProduct = USB_PRODUCT_USB5534B,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index a97dd1ba964e..73f4482d833a 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* usb hub driver head file
*
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index 2ae90158ded7..fdd4897401e2 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* drivers/usb/core/otg_whitelist.h
*
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 9f4320b9d7fc..a2ca38e25e0c 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -1262,8 +1262,10 @@ void usb_create_sysfs_intf_files(struct usb_interface *intf)
if (!alt->string && !(udev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
alt->string = usb_cache_string(udev, alt->desc.iInterface);
- if (alt->string && device_create_file(&intf->dev, &dev_attr_interface))
- ; /* We don't actually care if the function fails. */
+ if (alt->string && device_create_file(&intf->dev, &dev_attr_interface)) {
+ /* This is not a serious error */
+ dev_dbg(&intf->dev, "interface string descriptor file not created\n");
+ }
intf->sysfs_files_created = 1;
}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 64ed4023a8c8..19e4c550bc73 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Released under the GPLv2 only.
*/
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 78a4925aa118..fec17a2d2447 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -524,10 +524,25 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
greset |= GRSTCTL_CSFTRST;
dwc2_writel(hsotg, greset, GRSTCTL);
- if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) {
- dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n",
- __func__);
- return -EBUSY;
+ if ((hsotg->hw_params.snpsid & DWC2_CORE_REV_MASK) <
+ (DWC2_CORE_REV_4_20a & DWC2_CORE_REV_MASK)) {
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL,
+ GRSTCTL_CSFTRST, 10000)) {
+ dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST\n",
+ __func__);
+ return -EBUSY;
+ }
+ } else {
+ if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL,
+ GRSTCTL_CSFTRST_DONE, 10000)) {
+ dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST_DONE\n",
+ __func__);
+ return -EBUSY;
+ }
+ greset = dwc2_readl(hsotg, GRSTCTL);
+ greset &= ~GRSTCTL_CSFTRST;
+ greset |= GRSTCTL_CSFTRST_DONE;
+ dwc2_writel(hsotg, greset, GRSTCTL);
}
/* Wait for AHB master IDLE state */
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 99b0bdfe0012..132d687f1590 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* core.h - DesignWare HS OTG Controller common declarations
*
@@ -1103,8 +1103,10 @@ struct dwc2_hsotg {
#define DWC2_CORE_REV_3_00a 0x4f54300a
#define DWC2_CORE_REV_3_10a 0x4f54310a
#define DWC2_CORE_REV_4_00a 0x4f54400a
+#define DWC2_CORE_REV_4_20a 0x4f54420a
#define DWC2_FS_IOT_REV_1_00a 0x5531100a
#define DWC2_HS_IOT_REV_1_00a 0x5532100a
+#define DWC2_CORE_REV_MASK 0x0000ffff
/* DWC OTG HW Core ID */
#define DWC2_OTG_ID 0x4f540000
@@ -1309,6 +1311,8 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
+int dwc2_check_core_version(struct dwc2_hsotg *hsotg);
+
/*
* Common core Functions.
* The following functions support managing the DWC_otg controller in either
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 876ff31261d5..55f1d14fc414 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -416,10 +416,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
if (ret && (ret != -ENOTSUPP))
dev_err(hsotg->dev, "exit power_down failed\n");
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
call_gadget(hsotg, resume);
+ } else {
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
}
- /* Change to L0 state */
- hsotg->lx_state = DWC2_L0;
} else {
if (hsotg->params.power_down)
return;
diff --git a/drivers/usb/dwc2/debug.h b/drivers/usb/dwc2/debug.h
index a8c565b6bc34..47252c56d410 100644
--- a/drivers/usb/dwc2/debug.h
+++ b/drivers/usb/dwc2/debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* debug.h - Designware USB2 DRD controller debug header
*
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 1224fa9df604..ea02ee63ac6d 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* hcd.h - DesignWare HS OTG Controller host-mode declarations
*
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index c4027bbcedec..c3d6dde2aca4 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* hw.h - DesignWare HS OTG Controller hardware definitions
*
@@ -126,6 +126,7 @@
#define GRSTCTL HSOTG_REG(0x010)
#define GRSTCTL_AHBIDLE BIT(31)
#define GRSTCTL_DMAREQ BIT(30)
+#define GRSTCTL_CSFTRST_DONE BIT(29)
#define GRSTCTL_TXFNUM_MASK (0x1f << 6)
#define GRSTCTL_TXFNUM_SHIFT 6
#define GRSTCTL_TXFNUM_LIMIT 0x1f
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 8ccc83f7eb3f..ce736d67c7c3 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -782,25 +782,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
u32 grxfsiz;
- /*
- * Attempt to ensure this device is really a DWC_otg Controller.
- * Read and verify the GSNPSID register contents. The value should be
- * 0x45f4xxxx, 0x5531xxxx or 0x5532xxxx
- */
-
- hw->snpsid = dwc2_readl(hsotg, GSNPSID);
- if ((hw->snpsid & GSNPSID_ID_MASK) != DWC2_OTG_ID &&
- (hw->snpsid & GSNPSID_ID_MASK) != DWC2_FS_IOT_ID &&
- (hw->snpsid & GSNPSID_ID_MASK) != DWC2_HS_IOT_ID) {
- dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
- hw->snpsid);
- return -ENODEV;
- }
-
- dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
- hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
- hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
-
hwcfg1 = dwc2_readl(hsotg, GHWCFG1);
hwcfg2 = dwc2_readl(hsotg, GHWCFG2);
hwcfg3 = dwc2_readl(hsotg, GHWCFG3);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 69972750e161..e571c8ae65ec 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -363,6 +363,37 @@ static bool dwc2_check_core_endianness(struct dwc2_hsotg *hsotg)
}
/**
+ * Check core version
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+int dwc2_check_core_version(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the GSNPSID register contents. The value should be
+ * 0x45f4xxxx, 0x5531xxxx or 0x5532xxxx
+ */
+
+ hw->snpsid = dwc2_readl(hsotg, GSNPSID);
+ if ((hw->snpsid & GSNPSID_ID_MASK) != DWC2_OTG_ID &&
+ (hw->snpsid & GSNPSID_ID_MASK) != DWC2_FS_IOT_ID &&
+ (hw->snpsid & GSNPSID_ID_MASK) != DWC2_HS_IOT_ID) {
+ dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
+ hw->snpsid);
+ return -ENODEV;
+ }
+
+ dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
+ hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
+ hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+ return 0;
+}
+
+/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
*
@@ -445,6 +476,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
"snps,need-phy-for-wake");
/*
+ * Before performing any core related operations
+ * check core version.
+ */
+ retval = dwc2_check_core_version(hsotg);
+ if (retval)
+ goto error;
+
+ /*
* Reset before dwc2_get_hwparams() then it could get power-on real
* reset value form registers.
*/
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 206caa0ea1c6..7a2304565a73 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -4,6 +4,7 @@ config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
depends on (USB || USB_GADGET) && HAS_DMA
select USB_XHCI_PLATFORM if USB_XHCI_HCD
+ select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
help
Say Y or M here if your system has a Dual Role SuperSpeed
USB controller based on the DesignWare USB3 IP Core.
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index edc17155cb2b..25c686a752b0 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -85,7 +85,9 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
* specified or set to OTG, then set the mode to peripheral.
*/
if (mode == USB_DR_MODE_OTG &&
- dwc->revision >= DWC3_REVISION_330A)
+ (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
+ !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
+ !DWC3_VER_IS_PRIOR(DWC3, 330A))
mode = USB_DR_MODE_PERIPHERAL;
}
@@ -121,17 +123,19 @@ static void __dwc3_set_mode(struct work_struct *work)
if (dwc->dr_mode != USB_DR_MODE_OTG)
return;
+ pm_runtime_get_sync(dwc->dev);
+
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
dwc3_otg_update(dwc, 0);
if (!dwc->desired_dr_role)
- return;
+ goto out;
if (dwc->desired_dr_role == dwc->current_dr_role)
- return;
+ goto out;
if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
- return;
+ goto out;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_HOST:
@@ -190,6 +194,9 @@ static void __dwc3_set_mode(struct work_struct *work)
break;
}
+out:
+ pm_runtime_mark_last_busy(dwc->dev);
+ pm_runtime_put_autosuspend(dwc->dev);
}
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
@@ -257,7 +264,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
* take a little more than 50ms. Set the polling rate at 20ms
* for 10 times instead.
*/
- if (dwc3_is_usb31(dwc) && dwc->revision >= DWC3_USB31_REVISION_190A)
+ if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
retries = 10;
do {
@@ -265,8 +272,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
if (!(reg & DWC3_DCTL_CSFTRST))
goto done;
- if (dwc3_is_usb31(dwc) &&
- dwc->revision >= DWC3_USB31_REVISION_190A)
+ if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
msleep(20);
else
udelay(1);
@@ -283,7 +289,7 @@ done:
* is cleared, we must wait at least 50ms before accessing the PHY
* domain (synchronization delay).
*/
- if (dwc3_is_usb31(dwc) && dwc->revision <= DWC3_USB31_REVISION_180A)
+ if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
msleep(50);
return 0;
@@ -298,7 +304,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
u32 reg;
u32 dft;
- if (dwc->revision < DWC3_REVISION_250A)
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A))
return;
if (dwc->fladj == 0)
@@ -579,7 +585,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
* will be '0' when the core is reset. Application needs to set it
* to '1' after the core initialization is completed.
*/
- if (dwc->revision > DWC3_REVISION_194A)
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
/*
@@ -670,7 +676,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
* be '0' when the core is reset. Application needs to set it to
* '1' after the core initialization is completed.
*/
- if (dwc->revision > DWC3_REVISION_194A)
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
/*
@@ -719,15 +725,13 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc)
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+ dwc->ip = DWC3_GSNPS_ID(reg);
/* This should read as U3 followed by revision number */
- if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) {
- /* Detected DWC_usb3 IP */
+ if (DWC3_IP_IS(DWC3)) {
dwc->revision = reg;
- } else if ((reg & DWC3_GSNPSID_MASK) == 0x33310000) {
- /* Detected DWC_usb31 IP */
+ } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) {
dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
- dwc->revision |= DWC3_REVISION_IS_DWC31;
dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE);
} else {
return false;
@@ -760,8 +764,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
*/
if ((dwc->dr_mode == USB_DR_MODE_HOST ||
dwc->dr_mode == USB_DR_MODE_OTG) &&
- (dwc->revision >= DWC3_REVISION_210A &&
- dwc->revision <= DWC3_REVISION_250A))
+ DWC3_VER_IS_WITHIN(DWC3, 210A, 250A))
reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
else
reg &= ~DWC3_GCTL_DSBLCLKGTNG;
@@ -804,7 +807,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
* and falls back to high-speed mode which causes
* the device to enter a Connect/Disconnect loop
*/
- if (dwc->revision < DWC3_REVISION_190A)
+ if (DWC3_VER_IS_PRIOR(DWC3, 190A))
reg |= DWC3_GCTL_U2RSTECN;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
@@ -957,7 +960,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
goto err0a;
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
- dwc->revision > DWC3_REVISION_194A) {
+ !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
if (!dwc->dis_u3_susphy_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
@@ -1004,20 +1007,20 @@ static int dwc3_core_init(struct dwc3 *dwc)
* the DWC_usb3 controller. It is NOT available in the
* DWC_usb31 controller.
*/
- if (!dwc3_is_usb31(dwc) && dwc->revision >= DWC3_REVISION_310A) {
+ if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
reg |= DWC3_GUCTL2_RST_ACTBITLATER;
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
- if (dwc->revision >= DWC3_REVISION_250A) {
+ if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
/*
* Enable hardware control of sending remote wakeup
* in HS when the device is in the L1 state.
*/
- if (dwc->revision >= DWC3_REVISION_290A)
+ if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
if (dwc->dis_tx_ipgap_linecheck_quirk)
@@ -1049,7 +1052,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
* Must config both number of packets and max burst settings to enable
* RX and/or TX threshold.
*/
- if (dwc3_is_usb31(dwc) && dwc->dr_mode == USB_DR_MODE_HOST) {
+ if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
u8 rx_maxburst = dwc->rx_max_burst_prd;
u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
@@ -1371,10 +1374,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
/* check whether the core supports IMOD */
bool dwc3_has_imod(struct dwc3 *dwc)
{
- return ((dwc3_is_usb3(dwc) &&
- dwc->revision >= DWC3_REVISION_300A) ||
- (dwc3_is_usb31(dwc) &&
- dwc->revision >= DWC3_USB31_REVISION_120A));
+ return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) ||
+ DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) ||
+ DWC3_IP_IS(DWC32);
}
static void dwc3_check_params(struct dwc3 *dwc)
@@ -1395,7 +1397,7 @@ static void dwc3_check_params(struct dwc3 *dwc)
* affected version.
*/
if (!dwc->imod_interval &&
- (dwc->revision == DWC3_REVISION_300A))
+ DWC3_VER_IS(DWC3, 300A))
dwc->imod_interval = 1;
/* Check the maximum_speed parameter */
@@ -1417,7 +1419,7 @@ static void dwc3_check_params(struct dwc3 *dwc)
/*
* default to superspeed plus if we are capable.
*/
- if (dwc3_is_usb31(dwc) &&
+ if ((DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) &&
(DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 4c171a8e215f..013f42a2b5dc 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* core.h - DesignWare USB3 DRD Core Header
*
@@ -69,6 +69,7 @@
#define DWC3_GEVNTCOUNT_EHB BIT(31)
#define DWC3_GSNPSID_MASK 0xffff0000
#define DWC3_GSNPSREV_MASK 0xffff
+#define DWC3_GSNPS_ID(p) (((p) & DWC3_GSNPSID_MASK) >> 16)
/* DWC3 registers memory space boundries */
#define DWC3_XHCI_REGS_START 0x0
@@ -365,6 +366,9 @@
#define DWC3_GHWPARAMS6_SRPSUPPORT BIT(10)
#define DWC3_GHWPARAMS6_EN_FPGA BIT(7)
+/* DWC_usb32 only */
+#define DWC3_GHWPARAMS6_MDWIDTH(n) ((n) & (0x3 << 8))
+
/* Global HWPARAMS7 Register */
#define DWC3_GHWPARAMS7_RAM1_DEPTH(n) ((n) & 0xffff)
#define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff)
@@ -491,6 +495,7 @@
#define DWC3_DGCMD_SELECTED_FIFO_FLUSH 0x09
#define DWC3_DGCMD_ALL_FIFO_FLUSH 0x0a
#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
+#define DWC3_DGCMD_SET_ENDPOINT_PRIME 0x0d
#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
@@ -697,6 +702,10 @@ struct dwc3_ep {
#define DWC3_EP_END_TRANSFER_PENDING BIT(4)
#define DWC3_EP_PENDING_REQUEST BIT(5)
#define DWC3_EP_DELAY_START BIT(6)
+#define DWC3_EP_WAIT_TRANSFER_COMPLETE BIT(7)
+#define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8)
+#define DWC3_EP_FORCE_RESTART_STREAM BIT(9)
+#define DWC3_EP_FIRST_STREAM_PRIMED BIT(10)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
@@ -949,7 +958,8 @@ struct dwc3_scratchpad_array {
* @nr_scratch: number of scratch buffers
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
- * @revision: revision register contents
+ * @ip: controller's ID
+ * @revision: controller's version of an IP
* @version_type: VERSIONTYPE register contents, a sub release of a revision
* @dr_mode: requested mode of operation
* @current_dr_role: current role of operation when in dual-role mode
@@ -1110,15 +1120,15 @@ struct dwc3 {
u32 u1u2;
u32 maximum_speed;
- /*
- * All 3.1 IP version constants are greater than the 3.0 IP
- * version constants. This works for most version checks in
- * dwc3. However, in the future, this may not apply as
- * features may be developed on newer versions of the 3.0 IP
- * that are not in the 3.1 IP.
- */
+ u32 ip;
+
+#define DWC3_IP 0x5533
+#define DWC31_IP 0x3331
+#define DWC32_IP 0x3332
+
u32 revision;
+#define DWC3_REVISION_ANY 0x0
#define DWC3_REVISION_173A 0x5533173a
#define DWC3_REVISION_175A 0x5533175a
#define DWC3_REVISION_180A 0x5533180a
@@ -1143,20 +1153,20 @@ struct dwc3 {
#define DWC3_REVISION_310A 0x5533310a
#define DWC3_REVISION_330A 0x5533330a
-/*
- * NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
- * just so dwc31 revisions are always larger than dwc3.
- */
-#define DWC3_REVISION_IS_DWC31 0x80000000
-#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_160A (0x3136302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_170A (0x3137302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_180A (0x3138302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_190A (0x3139302a | DWC3_REVISION_IS_DWC31)
+#define DWC31_REVISION_ANY 0x0
+#define DWC31_REVISION_110A 0x3131302a
+#define DWC31_REVISION_120A 0x3132302a
+#define DWC31_REVISION_160A 0x3136302a
+#define DWC31_REVISION_170A 0x3137302a
+#define DWC31_REVISION_180A 0x3138302a
+#define DWC31_REVISION_190A 0x3139302a
+
+#define DWC32_REVISION_ANY 0x0
+#define DWC32_REVISION_100A 0x3130302a
u32 version_type;
+#define DWC31_VERSIONTYPE_ANY 0x0
#define DWC31_VERSIONTYPE_EA01 0x65613031
#define DWC31_VERSIONTYPE_EA02 0x65613032
#define DWC31_VERSIONTYPE_EA03 0x65613033
@@ -1298,6 +1308,10 @@ struct dwc3_event_depevt {
#define DEPEVT_STREAMEVT_FOUND 1
#define DEPEVT_STREAMEVT_NOTFOUND 2
+/* Stream event parameter */
+#define DEPEVT_STREAM_PRIME 0xfffe
+#define DEPEVT_STREAM_NOSTREAM 0x0
+
/* Control-only Status */
#define DEPEVT_STATUS_CONTROL_DATA 1
#define DEPEVT_STATUS_CONTROL_STATUS 2
@@ -1400,17 +1414,26 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode);
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
-/* check whether we are on the DWC_usb3 core */
-static inline bool dwc3_is_usb3(struct dwc3 *dwc)
-{
- return !(dwc->revision & DWC3_REVISION_IS_DWC31);
-}
+#define DWC3_IP_IS(_ip) \
+ (dwc->ip == _ip##_IP)
-/* check whether we are on the DWC_usb31 core */
-static inline bool dwc3_is_usb31(struct dwc3 *dwc)
-{
- return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
-}
+#define DWC3_VER_IS(_ip, _ver) \
+ (DWC3_IP_IS(_ip) && dwc->revision == _ip##_REVISION_##_ver)
+
+#define DWC3_VER_IS_PRIOR(_ip, _ver) \
+ (DWC3_IP_IS(_ip) && dwc->revision < _ip##_REVISION_##_ver)
+
+#define DWC3_VER_IS_WITHIN(_ip, _from, _to) \
+ (DWC3_IP_IS(_ip) && \
+ dwc->revision >= _ip##_REVISION_##_from && \
+ (!(_ip##_REVISION_##_to) || \
+ dwc->revision <= _ip##_REVISION_##_to))
+
+#define DWC3_VER_TYPE_IS_WITHIN(_ip, _ver, _from, _to) \
+ (DWC3_VER_IS(_ip, _ver) && \
+ dwc->version_type >= _ip##_VERSIONTYPE_##_from && \
+ (!(_ip##_VERSIONTYPE_##_to) || \
+ dwc->version_type <= _ip##_VERSIONTYPE_##_to))
bool dwc3_has_imod(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 4a13ceaf4093..d8f600e0e88f 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* debug.h - DesignWare USB3 DRD Controller Debug Header
*
@@ -68,6 +68,8 @@ dwc3_gadget_generic_cmd_string(u8 cmd)
return "All FIFO Flush";
case DWC3_DGCMD_SET_ENDPOINT_NRDY:
return "Set Endpoint NRDY";
+ case DWC3_DGCMD_SET_ENDPOINT_PRIME:
+ return "Set Endpoint Prime";
case DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK:
return "Run SoC Bus Loopback Test";
default:
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 4fe8b1e1485c..6d9de334e46a 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -635,13 +635,18 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
+ int mdwidth;
u32 val;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
/* Convert to bytes */
- val *= DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
+
+ val *= mdwidth;
val >>= 3;
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -654,13 +659,18 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
+ int mdwidth;
u32 val;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
/* Convert to bytes */
- val *= DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
+
+ val *= mdwidth;
val >>= 3;
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 7db1ffc92bbd..2e483448d695 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -56,7 +56,7 @@ static irqreturn_t dwc3_otg_thread_irq(int irq, void *_dwc)
spin_lock(&dwc->lock);
if (dwc->otg_restart_host) {
dwc3_otg_host_init(dwc);
- dwc->otg_restart_host = 0;
+ dwc->otg_restart_host = false;
}
spin_unlock(&dwc->lock);
@@ -82,7 +82,7 @@ static irqreturn_t dwc3_otg_irq(int irq, void *_dwc)
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST &&
!(reg & DWC3_OEVT_DEVICEMODE))
- dwc->otg_restart_host = 1;
+ dwc->otg_restart_host = true;
dwc3_writel(dwc->regs, DWC3_OEVT, reg);
ret = IRQ_WAKE_THREAD;
}
@@ -653,6 +653,6 @@ void dwc3_drd_exit(struct dwc3 *dwc)
break;
}
- if (!dwc->edev)
+ if (dwc->otg_irq)
free_irq(dwc->otg_irq, dwc);
}
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 1e14a6f4884b..6505f7bd69e2 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
/* USBSS register offsets */
@@ -34,6 +35,7 @@
struct dwc3_keystone {
struct device *dev;
void __iomem *usbss;
+ struct phy *usb3_phy;
};
static inline u32 kdwc3_readl(void __iomem *base, u32 offset)
@@ -95,8 +97,38 @@ static int kdwc3_probe(struct platform_device *pdev)
if (IS_ERR(kdwc->usbss))
return PTR_ERR(kdwc->usbss);
- pm_runtime_enable(kdwc->dev);
+ /* PSC dependency on AM65 needs SERDES0 to be powered before USB0 */
+ kdwc->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
+ if (IS_ERR(kdwc->usb3_phy)) {
+ error = PTR_ERR(kdwc->usb3_phy);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "couldn't get usb3 phy: %d\n", error);
+
+ return error;
+ }
+
+ phy_pm_runtime_get_sync(kdwc->usb3_phy);
+ error = phy_reset(kdwc->usb3_phy);
+ if (error < 0) {
+ dev_err(dev, "usb3 phy reset failed: %d\n", error);
+ return error;
+ }
+
+ error = phy_init(kdwc->usb3_phy);
+ if (error < 0) {
+ dev_err(dev, "usb3 phy init failed: %d\n", error);
+ return error;
+ }
+
+ error = phy_power_on(kdwc->usb3_phy);
+ if (error < 0) {
+ dev_err(dev, "usb3 phy power on failed: %d\n", error);
+ phy_exit(kdwc->usb3_phy);
+ return error;
+ }
+
+ pm_runtime_enable(kdwc->dev);
error = pm_runtime_get_sync(kdwc->dev);
if (error < 0) {
dev_err(kdwc->dev, "pm_runtime_get_sync failed, error %d\n",
@@ -138,6 +170,9 @@ err_core:
err_irq:
pm_runtime_put_sync(kdwc->dev);
pm_runtime_disable(kdwc->dev);
+ phy_power_off(kdwc->usb3_phy);
+ phy_exit(kdwc->usb3_phy);
+ phy_pm_runtime_put_sync(kdwc->usb3_phy);
return error;
}
@@ -163,6 +198,10 @@ static int kdwc3_remove(struct platform_device *pdev)
pm_runtime_put_sync(kdwc->dev);
pm_runtime_disable(kdwc->dev);
+ phy_power_off(kdwc->usb3_phy);
+ phy_exit(kdwc->usb3_phy);
+ phy_pm_runtime_put_sync(kdwc->usb3_phy);
+
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index b81d085bc534..1f7f4d88ed9d 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -30,7 +30,7 @@
#include <linux/usb/role.h>
#include <linux/regulator/consumer.h>
-/* USB2 Ports Control Registers */
+/* USB2 Ports Control Registers, offsets are per-port */
#define U2P_REG_SIZE 0x20
@@ -50,14 +50,16 @@
/* USB Glue Control Registers */
-#define USB_R0 0x80
+#define G12A_GLUE_OFFSET 0x80
+
+#define USB_R0 0x00
#define USB_R0_P30_LANE0_TX2RX_LOOPBACK BIT(17)
#define USB_R0_P30_LANE0_EXT_PCLK_REQ BIT(18)
#define USB_R0_P30_PCS_RX_LOS_MASK_VAL_MASK GENMASK(28, 19)
#define USB_R0_U2D_SS_SCALEDOWN_MODE_MASK GENMASK(30, 29)
#define USB_R0_U2D_ACT BIT(31)
-#define USB_R1 0x84
+#define USB_R1 0x04
#define USB_R1_U3H_BIGENDIAN_GS BIT(0)
#define USB_R1_U3H_PME_ENABLE BIT(1)
#define USB_R1_U3H_HUB_PORT_OVERCURRENT_MASK GENMASK(4, 2)
@@ -69,23 +71,23 @@
#define USB_R1_U3H_FLADJ_30MHZ_REG_MASK GENMASK(24, 19)
#define USB_R1_P30_PCS_TX_SWING_FULL_MASK GENMASK(31, 25)
-#define USB_R2 0x88
+#define USB_R2 0x08
#define USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK GENMASK(25, 20)
#define USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK GENMASK(31, 26)
-#define USB_R3 0x8c
+#define USB_R3 0x0c
#define USB_R3_P30_SSC_ENABLE BIT(0)
#define USB_R3_P30_SSC_RANGE_MASK GENMASK(3, 1)
#define USB_R3_P30_SSC_REF_CLK_SEL_MASK GENMASK(12, 4)
#define USB_R3_P30_REF_SSP_EN BIT(13)
-#define USB_R4 0x90
+#define USB_R4 0x10
#define USB_R4_P21_PORT_RESET_0 BIT(0)
#define USB_R4_P21_SLEEP_M0 BIT(1)
#define USB_R4_MEM_PD_MASK GENMASK(3, 2)
#define USB_R4_P21_ONLY BIT(4)
-#define USB_R5 0x94
+#define USB_R5 0x14
#define USB_R5_ID_DIG_SYNC BIT(0)
#define USB_R5_ID_DIG_REG BIT(1)
#define USB_R5_ID_DIG_CFG_MASK GENMASK(3, 2)
@@ -96,15 +98,12 @@
#define USB_R5_ID_DIG_TH_MASK GENMASK(15, 8)
#define USB_R5_ID_DIG_CNT_MASK GENMASK(23, 16)
-enum {
- USB2_HOST_PHY = 0,
- USB2_OTG_PHY,
- USB3_HOST_PHY,
- PHY_COUNT,
-};
+#define PHY_COUNT 3
+#define USB2_OTG_PHY 1
-static const char *phy_names[PHY_COUNT] = {
- "usb2-phy0", "usb2-phy1", "usb3-phy0",
+static struct clk_bulk_data meson_gxl_clocks[] = {
+ { .id = "usb_ctrl" },
+ { .id = "ddr" },
};
static struct clk_bulk_data meson_g12a_clocks[] = {
@@ -117,27 +116,133 @@ static struct clk_bulk_data meson_a1_clocks[] = {
{ .id = "xtal_usb_ctrl" },
};
+static const char *meson_gxm_phy_names[] = {
+ "usb2-phy0", "usb2-phy1", "usb2-phy2",
+};
+
+static const char *meson_g12a_phy_names[] = {
+ "usb2-phy0", "usb2-phy1", "usb3-phy0",
+};
+
+/*
+ * Amlogic A1 has a single physical PHY, in slot 1, but still has the
+ * two U2 PHY controls register blocks like G12A.
+ * Handling the first PHY on slot 1 would need a large amount of code
+ * changes, and the current management is generic enough to handle it
+ * correctly when only the "usb2-phy1" phy is specified on-par with the
+ * DT bindings.
+ */
+static const char *meson_a1_phy_names[] = {
+ "usb2-phy0", "usb2-phy1"
+};
+
+struct dwc3_meson_g12a;
+
struct dwc3_meson_g12a_drvdata {
bool otg_switch_supported;
+ bool otg_phy_host_port_disable;
struct clk_bulk_data *clks;
int num_clks;
+ const char **phy_names;
+ int num_phys;
+ int (*setup_regmaps)(struct dwc3_meson_g12a *priv, void __iomem *base);
+ int (*usb2_init_phy)(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+ int (*set_phy_mode)(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+ int (*usb_init)(struct dwc3_meson_g12a *priv);
+ int (*usb_post_init)(struct dwc3_meson_g12a *priv);
+};
+
+static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base);
+static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base);
+
+static int dwc3_meson_g12a_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+static int dwc3_meson_gxl_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+
+static int dwc3_meson_g12a_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode);
+static int dwc3_meson_gxl_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode);
+
+static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv);
+static int dwc3_meson_gxl_usb_init(struct dwc3_meson_g12a *priv);
+
+static int dwc3_meson_gxl_usb_post_init(struct dwc3_meson_g12a *priv);
+
+/*
+ * For GXL and GXM SoCs:
+ * USB Phy muxing between the DWC2 Device controller and the DWC3 Host
+ * controller is buggy when switching from Device to Host when USB port
+ * is unpopulated, it causes the DWC3 to hard crash.
+ * When populated (including OTG switching with ID pin), the switch works
+ * like a charm like on the G12A platforms.
+ * In order to still switch from Host to Device on an USB Type-A port,
+ * an U2_PORT_DISABLE bit has been added to disconnect the DWC3 Host
+ * controller from the port, but when used the DWC3 controller must be
+ * reset to recover usage of the port.
+ */
+
+static struct dwc3_meson_g12a_drvdata gxl_drvdata = {
+ .otg_switch_supported = true,
+ .otg_phy_host_port_disable = true,
+ .clks = meson_gxl_clocks,
+ .num_clks = ARRAY_SIZE(meson_g12a_clocks),
+ .phy_names = meson_a1_phy_names,
+ .num_phys = ARRAY_SIZE(meson_a1_phy_names),
+ .setup_regmaps = dwc3_meson_gxl_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_gxl_set_phy_mode,
+ .usb_init = dwc3_meson_gxl_usb_init,
+ .usb_post_init = dwc3_meson_gxl_usb_post_init,
+};
+
+static struct dwc3_meson_g12a_drvdata gxm_drvdata = {
+ .otg_switch_supported = true,
+ .otg_phy_host_port_disable = true,
+ .clks = meson_gxl_clocks,
+ .num_clks = ARRAY_SIZE(meson_g12a_clocks),
+ .phy_names = meson_gxm_phy_names,
+ .num_phys = ARRAY_SIZE(meson_gxm_phy_names),
+ .setup_regmaps = dwc3_meson_gxl_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_gxl_set_phy_mode,
+ .usb_init = dwc3_meson_gxl_usb_init,
+ .usb_post_init = dwc3_meson_gxl_usb_post_init,
};
static struct dwc3_meson_g12a_drvdata g12a_drvdata = {
.otg_switch_supported = true,
.clks = meson_g12a_clocks,
.num_clks = ARRAY_SIZE(meson_g12a_clocks),
+ .phy_names = meson_g12a_phy_names,
+ .num_phys = ARRAY_SIZE(meson_g12a_phy_names),
+ .setup_regmaps = dwc3_meson_g12a_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_g12a_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_g12a_set_phy_mode,
+ .usb_init = dwc3_meson_g12a_usb_init,
};
static struct dwc3_meson_g12a_drvdata a1_drvdata = {
.otg_switch_supported = false,
.clks = meson_a1_clocks,
.num_clks = ARRAY_SIZE(meson_a1_clocks),
+ .phy_names = meson_a1_phy_names,
+ .num_phys = ARRAY_SIZE(meson_a1_phy_names),
+ .setup_regmaps = dwc3_meson_g12a_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_g12a_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_g12a_set_phy_mode,
+ .usb_init = dwc3_meson_g12a_usb_init,
};
struct dwc3_meson_g12a {
struct device *dev;
- struct regmap *regmap;
+ struct regmap *u2p_regmap[PHY_COUNT];
+ struct regmap *usb_glue_regmap;
struct reset_control *reset;
struct phy *phys[PHY_COUNT];
enum usb_dr_mode otg_mode;
@@ -150,49 +255,78 @@ struct dwc3_meson_g12a {
const struct dwc3_meson_g12a_drvdata *drvdata;
};
-static void dwc3_meson_g12a_usb2_set_mode(struct dwc3_meson_g12a *priv,
- int i, enum phy_mode mode)
+static int dwc3_meson_gxl_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode)
+{
+ return phy_set_mode(priv->phys[i], mode);
+}
+
+static int dwc3_meson_gxl_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode)
+{
+ /* On GXL PHY must be started in device mode for DWC2 init */
+ return priv->drvdata->set_phy_mode(priv, i,
+ (i == USB2_OTG_PHY) ? PHY_MODE_USB_DEVICE
+ : PHY_MODE_USB_HOST);
+}
+
+static int dwc3_meson_g12a_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode)
{
if (mode == PHY_MODE_USB_HOST)
- regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_HOST_DEVICE,
U2P_R0_HOST_DEVICE);
else
- regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_HOST_DEVICE, 0);
+
+ return 0;
}
-static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv)
+static int dwc3_meson_g12a_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode)
{
- int i;
+ int ret;
- if (priv->otg_mode == USB_DR_MODE_PERIPHERAL)
- priv->otg_phy_mode = PHY_MODE_USB_DEVICE;
- else
- priv->otg_phy_mode = PHY_MODE_USB_HOST;
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_POWER_ON_RESET,
+ U2P_R0_POWER_ON_RESET);
- for (i = 0 ; i < USB3_HOST_PHY ; ++i) {
- if (!priv->phys[i])
- continue;
+ if (priv->drvdata->otg_switch_supported && i == USB2_OTG_PHY) {
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
+ U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS);
- regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
- U2P_R0_POWER_ON_RESET,
- U2P_R0_POWER_ON_RESET);
+ ret = priv->drvdata->set_phy_mode(priv, i, mode);
+ } else
+ ret = priv->drvdata->set_phy_mode(priv, i,
+ PHY_MODE_USB_HOST);
- if (priv->drvdata->otg_switch_supported && i == USB2_OTG_PHY) {
- regmap_update_bits(priv->regmap,
- U2P_R0 + (U2P_REG_SIZE * i),
- U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
- U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_POWER_ON_RESET, 0);
+
+ return 0;
+}
- dwc3_meson_g12a_usb2_set_mode(priv, i,
- priv->otg_phy_mode);
- } else
- dwc3_meson_g12a_usb2_set_mode(priv, i,
- PHY_MODE_USB_HOST);
+static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
+{
+ int i, ret;
- regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
- U2P_R0_POWER_ON_RESET, 0);
+ for (i = 0; i < priv->drvdata->num_phys; ++i) {
+ if (!priv->phys[i])
+ continue;
+
+ if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+ continue;
+
+ ret = priv->drvdata->usb2_init_phy(priv, i, mode);
+ if (ret)
+ return ret;
}
return 0;
@@ -200,7 +334,7 @@ static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv)
static void dwc3_meson_g12a_usb3_init(struct dwc3_meson_g12a *priv)
{
- regmap_update_bits(priv->regmap, USB_R3,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R3,
USB_R3_P30_SSC_RANGE_MASK |
USB_R3_P30_REF_SSP_EN,
USB_R3_P30_SSC_ENABLE |
@@ -208,61 +342,77 @@ static void dwc3_meson_g12a_usb3_init(struct dwc3_meson_g12a *priv)
USB_R3_P30_REF_SSP_EN);
udelay(2);
- regmap_update_bits(priv->regmap, USB_R2,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R2,
USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK,
FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK, 0x15));
- regmap_update_bits(priv->regmap, USB_R2,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R2,
USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK,
FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK, 0x20));
udelay(2);
- regmap_update_bits(priv->regmap, USB_R1,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT,
USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT);
- regmap_update_bits(priv->regmap, USB_R1,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_P30_PCS_TX_SWING_FULL_MASK,
FIELD_PREP(USB_R1_P30_PCS_TX_SWING_FULL_MASK, 127));
}
-static void dwc3_meson_g12a_usb_otg_apply_mode(struct dwc3_meson_g12a *priv)
+static void dwc3_meson_g12a_usb_otg_apply_mode(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
{
- if (priv->otg_phy_mode == PHY_MODE_USB_DEVICE) {
- regmap_update_bits(priv->regmap, USB_R0,
+ if (mode == PHY_MODE_USB_DEVICE) {
+ if (priv->otg_mode != USB_DR_MODE_OTG &&
+ priv->drvdata->otg_phy_host_port_disable)
+ /* Isolate the OTG PHY port from the Host Controller */
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
+ USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK,
+ FIELD_PREP(USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK,
+ BIT(USB2_OTG_PHY)));
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R0,
USB_R0_U2D_ACT, USB_R0_U2D_ACT);
- regmap_update_bits(priv->regmap, USB_R0,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R0,
USB_R0_U2D_SS_SCALEDOWN_MODE_MASK, 0);
- regmap_update_bits(priv->regmap, USB_R4,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R4,
USB_R4_P21_SLEEP_M0, USB_R4_P21_SLEEP_M0);
} else {
- regmap_update_bits(priv->regmap, USB_R0,
+ if (priv->otg_mode != USB_DR_MODE_OTG &&
+ priv->drvdata->otg_phy_host_port_disable) {
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
+ USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK, 0);
+ msleep(500);
+ }
+ regmap_update_bits(priv->usb_glue_regmap, USB_R0,
USB_R0_U2D_ACT, 0);
- regmap_update_bits(priv->regmap, USB_R4,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R4,
USB_R4_P21_SLEEP_M0, 0);
}
}
-static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv)
+static int dwc3_meson_g12a_usb_init_glue(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
{
int ret;
- ret = dwc3_meson_g12a_usb2_init(priv);
+ ret = dwc3_meson_g12a_usb2_init(priv, mode);
if (ret)
return ret;
- regmap_update_bits(priv->regmap, USB_R1,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_U3H_FLADJ_30MHZ_REG_MASK,
FIELD_PREP(USB_R1_U3H_FLADJ_30MHZ_REG_MASK, 0x20));
- regmap_update_bits(priv->regmap, USB_R5,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_EN_0,
USB_R5_ID_DIG_EN_0);
- regmap_update_bits(priv->regmap, USB_R5,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_EN_1,
USB_R5_ID_DIG_EN_1);
- regmap_update_bits(priv->regmap, USB_R5,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_TH_MASK,
FIELD_PREP(USB_R5_ID_DIG_TH_MASK, 0xff));
@@ -270,12 +420,13 @@ static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv)
if (priv->usb3_ports)
dwc3_meson_g12a_usb3_init(priv);
- dwc3_meson_g12a_usb_otg_apply_mode(priv);
+ dwc3_meson_g12a_usb_otg_apply_mode(priv, mode);
return 0;
}
-static const struct regmap_config phy_meson_g12a_usb3_regmap_conf = {
+static const struct regmap_config phy_meson_g12a_usb_glue_regmap_conf = {
+ .name = "usb-glue",
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 4,
@@ -284,17 +435,19 @@ static const struct regmap_config phy_meson_g12a_usb3_regmap_conf = {
static int dwc3_meson_g12a_get_phys(struct dwc3_meson_g12a *priv)
{
+ const char *phy_name;
int i;
- for (i = 0 ; i < PHY_COUNT ; ++i) {
- priv->phys[i] = devm_phy_optional_get(priv->dev, phy_names[i]);
+ for (i = 0 ; i < priv->drvdata->num_phys ; ++i) {
+ phy_name = priv->drvdata->phy_names[i];
+ priv->phys[i] = devm_phy_optional_get(priv->dev, phy_name);
if (!priv->phys[i])
continue;
if (IS_ERR(priv->phys[i]))
return PTR_ERR(priv->phys[i]);
- if (i == USB3_HOST_PHY)
+ if (strstr(phy_name, "usb3"))
priv->usb3_ports++;
else
priv->usb2_ports++;
@@ -310,7 +463,7 @@ static enum phy_mode dwc3_meson_g12a_get_id(struct dwc3_meson_g12a *priv)
{
u32 reg;
- regmap_read(priv->regmap, USB_R5, &reg);
+ regmap_read(priv->usb_glue_regmap, USB_R5, &reg);
if (reg & (USB_R5_ID_DIG_SYNC | USB_R5_ID_DIG_REG))
return PHY_MODE_USB_DEVICE;
@@ -342,9 +495,11 @@ static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv,
priv->otg_phy_mode = mode;
- dwc3_meson_g12a_usb2_set_mode(priv, USB2_OTG_PHY, mode);
+ ret = priv->drvdata->set_phy_mode(priv, USB2_OTG_PHY, mode);
+ if (ret)
+ return ret;
- dwc3_meson_g12a_usb_otg_apply_mode(priv);
+ dwc3_meson_g12a_usb_otg_apply_mode(priv, mode);
return 0;
}
@@ -364,6 +519,13 @@ static int dwc3_meson_g12a_role_set(struct usb_role_switch *sw,
if (mode == priv->otg_phy_mode)
return 0;
+ if (priv->drvdata->otg_phy_host_port_disable)
+ dev_warn_once(priv->dev, "Manual OTG switch is broken on this "\
+ "SoC, when manual switching from "\
+ "Host to device, DWC3 controller "\
+ "will need to be resetted in order "\
+ "to recover usage of the Host port");
+
return dwc3_meson_g12a_otg_mode_set(priv, mode);
}
@@ -386,7 +548,8 @@ static irqreturn_t dwc3_meson_g12a_irq_thread(int irq, void *data)
dev_warn(priv->dev, "Failed to switch OTG mode\n");
}
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_IRQ, 0);
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
+ USB_R5_ID_DIG_IRQ, 0);
return IRQ_HANDLED;
}
@@ -421,7 +584,7 @@ static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
if (priv->otg_mode == USB_DR_MODE_OTG) {
/* Ack irq before registering */
- regmap_update_bits(priv->regmap, USB_R5,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_IRQ, 0);
irq = platform_get_irq(pdev, 0);
@@ -457,6 +620,77 @@ static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
return 0;
}
+static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base)
+{
+ /* GXL controls the PHY mode in the PHY registers unlike G12A */
+ priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev, base,
+ &phy_meson_g12a_usb_glue_regmap_conf);
+ if (IS_ERR(priv->usb_glue_regmap))
+ return PTR_ERR(priv->usb_glue_regmap);
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base)
+{
+ int i;
+
+ priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev,
+ base + G12A_GLUE_OFFSET,
+ &phy_meson_g12a_usb_glue_regmap_conf);
+ if (IS_ERR(priv->usb_glue_regmap))
+ return PTR_ERR(priv->usb_glue_regmap);
+
+ /* Create a regmap for each USB2 PHY control register set */
+ for (i = 0; i < priv->usb2_ports; i++) {
+ struct regmap_config u2p_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = U2P_R1,
+ };
+
+ u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
+ "u2p-%d", i);
+ if (!u2p_regmap_config.name)
+ return -ENOMEM;
+
+ priv->u2p_regmap[i] = devm_regmap_init_mmio(priv->dev,
+ base + (i * U2P_REG_SIZE),
+ &u2p_regmap_config);
+ if (IS_ERR(priv->u2p_regmap[i]))
+ return PTR_ERR(priv->u2p_regmap[i]);
+ }
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv)
+{
+ return dwc3_meson_g12a_usb_init_glue(priv, priv->otg_phy_mode);
+}
+
+static int dwc3_meson_gxl_usb_init(struct dwc3_meson_g12a *priv)
+{
+ return dwc3_meson_g12a_usb_init_glue(priv, PHY_MODE_USB_DEVICE);
+}
+
+static int dwc3_meson_gxl_usb_post_init(struct dwc3_meson_g12a *priv)
+{
+ int ret;
+
+ ret = priv->drvdata->set_phy_mode(priv, USB2_OTG_PHY,
+ priv->otg_phy_mode);
+ if (ret)
+ return ret;
+
+ dwc3_meson_g12a_usb_otg_apply_mode(priv, priv->otg_phy_mode);
+
+ return 0;
+}
+
static int dwc3_meson_g12a_probe(struct platform_device *pdev)
{
struct dwc3_meson_g12a *priv;
@@ -473,10 +707,8 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- priv->regmap = devm_regmap_init_mmio(dev, base,
- &phy_meson_g12a_usb3_regmap_conf);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
+ priv->drvdata = of_device_get_match_data(&pdev->dev);
+ priv->dev = dev;
priv->vbus = devm_regulator_get_optional(dev, "vbus");
if (IS_ERR(priv->vbus)) {
@@ -485,8 +717,6 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
priv->vbus = NULL;
}
- priv->drvdata = of_device_get_match_data(&pdev->dev);
-
ret = devm_clk_bulk_get(dev,
priv->drvdata->num_clks,
priv->drvdata->clks);
@@ -499,13 +729,12 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, priv);
- priv->dev = dev;
- priv->reset = devm_reset_control_get(dev, NULL);
+ priv->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(priv->reset)) {
ret = PTR_ERR(priv->reset);
dev_err(dev, "failed to get device reset, err=%d\n", ret);
- return ret;
+ goto err_disable_clks;
}
ret = reset_control_reset(priv->reset);
@@ -516,6 +745,10 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
if (ret)
goto err_disable_clks;
+ ret = priv->drvdata->setup_regmaps(priv, base);
+ if (ret)
+ return ret;
+
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
if (ret)
@@ -525,7 +758,14 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
/* Get dr_mode */
priv->otg_mode = usb_get_dr_mode(dev);
- dwc3_meson_g12a_usb_init(priv);
+ if (priv->otg_mode == USB_DR_MODE_PERIPHERAL)
+ priv->otg_phy_mode = PHY_MODE_USB_DEVICE;
+ else
+ priv->otg_phy_mode = PHY_MODE_USB_HOST;
+
+ ret = priv->drvdata->usb_init(priv);
+ if (ret)
+ goto err_disable_clks;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
@@ -541,6 +781,12 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
goto err_phys_exit;
}
+ if (priv->drvdata->usb_post_init) {
+ ret = priv->drvdata->usb_post_init(priv);
+ if (ret)
+ goto err_phys_power;
+ }
+
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret)
goto err_phys_power;
@@ -642,7 +888,9 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
reset_control_deassert(priv->reset);
- dwc3_meson_g12a_usb_init(priv);
+ ret = priv->drvdata->usb_init(priv);
+ if (ret)
+ return ret;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
@@ -675,6 +923,14 @@ static const struct dev_pm_ops dwc3_meson_g12a_dev_pm_ops = {
static const struct of_device_id dwc3_meson_g12a_match[] = {
{
+ .compatible = "amlogic,meson-gxl-usb-ctrl",
+ .data = &gxl_drvdata,
+ },
+ {
+ .compatible = "amlogic,meson-gxm-usb-ctrl",
+ .data = &gxm_drvdata,
+ },
+ {
.compatible = "amlogic,meson-g12a-usb-ctrl",
.data = &g12a_drvdata,
},
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index e64754be47b4..8852fbfdead4 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -27,7 +27,6 @@ struct dwc3_of_simple {
struct clk_bulk_data *clks;
int num_clocks;
struct reset_control *resets;
- bool pulse_resets;
bool need_reset;
};
@@ -38,7 +37,6 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- bool shared_resets = false;
simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL);
if (!simple)
@@ -54,13 +52,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "rockchip,rk3399-dwc3"))
simple->need_reset = true;
- if (of_device_is_compatible(np, "amlogic,meson-axg-dwc3") ||
- of_device_is_compatible(np, "amlogic,meson-gxl-dwc3")) {
- shared_resets = true;
- simple->pulse_resets = true;
- }
-
- simple->resets = of_reset_control_array_get(np, shared_resets, true,
+ simple->resets = of_reset_control_array_get(np, false, true,
true);
if (IS_ERR(simple->resets)) {
ret = PTR_ERR(simple->resets);
@@ -68,15 +60,9 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
return ret;
}
- if (simple->pulse_resets) {
- ret = reset_control_reset(simple->resets);
- if (ret)
- goto err_resetc_put;
- } else {
- ret = reset_control_deassert(simple->resets);
- if (ret)
- goto err_resetc_put;
- }
+ ret = reset_control_deassert(simple->resets);
+ if (ret)
+ goto err_resetc_put;
ret = clk_bulk_get_all(simple->dev, &simple->clks);
if (ret < 0)
@@ -102,8 +88,7 @@ err_clk_put:
clk_bulk_put_all(simple->num_clocks, simple->clks);
err_resetc_assert:
- if (!simple->pulse_resets)
- reset_control_assert(simple->resets);
+ reset_control_assert(simple->resets);
err_resetc_put:
reset_control_put(simple->resets);
@@ -118,8 +103,7 @@ static void __dwc3_of_simple_teardown(struct dwc3_of_simple *simple)
clk_bulk_put_all(simple->num_clocks, simple->clks);
simple->num_clocks = 0;
- if (!simple->pulse_resets)
- reset_control_assert(simple->resets);
+ reset_control_assert(simple->resets);
reset_control_put(simple->resets);
@@ -191,8 +175,6 @@ static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "xlnx,zynqmp-dwc3" },
{ .compatible = "cavium,octeon-7130-usb-uctl" },
{ .compatible = "sprd,sc9860-dwc3" },
- { .compatible = "amlogic,meson-axg-dwc3" },
- { .compatible = "amlogic,meson-gxl-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
{ /* Sentinel */ }
};
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 7051611229c9..b67372737dc9 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -114,6 +114,7 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+ PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 00746c2848c0..80c3ef134e41 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -95,7 +95,7 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
* Wait until device controller is ready. Only applies to 1.94a and
* later RTL.
*/
- if (dwc->revision >= DWC3_REVISION_194A) {
+ if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) {
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (reg & DWC3_DSTS_DCNRD)
@@ -122,7 +122,7 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
* The following code is racy when called from dwc3_gadget_wakeup,
* and is not needed, at least on newer versions
*/
- if (dwc->revision >= DWC3_REVISION_194A)
+ if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
return 0;
/* wait for a change in DSTS */
@@ -273,7 +273,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
- u32 timeout = 1000;
+ u32 timeout = 5000;
u32 saved_config = 0;
u32 reg;
@@ -356,6 +356,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
ret = 0;
break;
case DEPEVT_TRANSFER_NO_RESOURCE:
+ dev_WARN(dwc->dev, "No resource for %s\n",
+ dep->name);
ret = -EINVAL;
break;
case DEPEVT_TRANSFER_BUS_EXPIRY:
@@ -387,9 +389,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
- if (ret == 0 && DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
- dep->flags |= DWC3_EP_TRANSFER_STARTED;
- dwc3_gadget_ep_get_transfer_index(dep);
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
+ if (ret == 0)
+ dep->flags |= DWC3_EP_TRANSFER_STARTED;
+
+ if (ret != -ETIMEDOUT)
+ dwc3_gadget_ep_get_transfer_index(dep);
}
if (saved_config) {
@@ -415,7 +420,8 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
* IN transfers due to a mishandled error condition. Synopsys
* STAR 9000614252.
*/
- if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
+ if (dep->direction &&
+ !DWC3_VER_IS_PRIOR(DWC3, 260A) &&
(dwc->gadget.speed >= USB_SPEED_SUPER))
cmd |= DWC3_DEPCMD_CLEARPENDIN;
@@ -573,6 +579,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+ | DWC3_DEPCFG_XFER_COMPLETE_EN
| DWC3_DEPCFG_STREAM_EVENT_EN;
dep->stream_capable = true;
}
@@ -603,6 +610,9 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
}
+static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ bool interrupt);
+
/**
* __dwc3_gadget_ep_enable - initializes a hw endpoint
* @dep: endpoint to be initialized
@@ -663,7 +673,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
* Issue StartTransfer here with no-op TRB so we can always rely on No
* Response Update Transfer command.
*/
- if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
+ if (usb_endpoint_xfer_bulk(desc) ||
usb_endpoint_xfer_int(desc)) {
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
@@ -682,6 +692,29 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret < 0)
return ret;
+
+ if (dep->stream_capable) {
+ /*
+ * For streams, at start, there maybe a race where the
+ * host primes the endpoint before the function driver
+ * queues a request to initiate a stream. In that case,
+ * the controller will not see the prime to generate the
+ * ERDY and start stream. To workaround this, issue a
+ * no-op TRB as normal, but end it immediately. As a
+ * result, when the function driver queues the request,
+ * the next START_TRANSFER command will cause the
+ * controller to generate an ERDY to initiate the
+ * stream.
+ */
+ dwc3_stop_active_transfer(dep, true, true);
+
+ /*
+ * All stream eps will reinitiate stream on NoStream
+ * rejection until we can determine that the host can
+ * prime after the first transfer.
+ */
+ dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
+ }
}
out:
@@ -690,8 +723,6 @@ out:
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
- bool interrupt);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
@@ -912,7 +943,8 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
dma_addr_t dma, unsigned length, unsigned chain, unsigned node,
- unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt)
+ unsigned stream_id, unsigned short_not_ok,
+ unsigned no_interrupt, unsigned is_last)
{
struct dwc3 *dwc = dep->dwc;
struct usb_gadget *gadget = &dwc->gadget;
@@ -1005,6 +1037,8 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
+ else if (dep->stream_capable && is_last)
+ trb->ctrl |= DWC3_TRB_CTRL_LST;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
@@ -1032,6 +1066,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
unsigned stream_id = req->request.stream_id;
unsigned short_not_ok = req->request.short_not_ok;
unsigned no_interrupt = req->request.no_interrupt;
+ unsigned is_last = req->request.is_last;
if (req->request.num_sgs > 0) {
length = sg_dma_len(req->start_sg);
@@ -1052,7 +1087,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
- stream_id, short_not_ok, no_interrupt);
+ stream_id, short_not_ok, no_interrupt, is_last);
}
static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
@@ -1097,7 +1132,8 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
maxp - rem, false, 1,
req->request.stream_id,
req->request.short_not_ok,
- req->request.no_interrupt);
+ req->request.no_interrupt,
+ req->request.is_last);
} else {
dwc3_prepare_one_trb(dep, req, chain, i);
}
@@ -1141,7 +1177,8 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
false, 1, req->request.stream_id,
req->request.short_not_ok,
- req->request.no_interrupt);
+ req->request.no_interrupt,
+ req->request.is_last);
} else if (req->request.zero && req->request.length &&
(IS_ALIGNED(req->request.length, maxp))) {
struct dwc3 *dwc = dep->dwc;
@@ -1158,7 +1195,8 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
false, 1, req->request.stream_id,
req->request.short_not_ok,
- req->request.no_interrupt);
+ req->request.no_interrupt,
+ req->request.is_last);
} else {
dwc3_prepare_one_trb(dep, req, false, 0);
}
@@ -1194,6 +1232,14 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
if (!dwc3_calc_trbs_left(dep))
return;
+
+ /*
+ * Don't prepare beyond a transfer. In DWC_usb32, its transfer
+ * burst capability may try to read and use TRBs beyond the
+ * active transfer instead of stopping.
+ */
+ if (dep->stream_capable && req->request.is_last)
+ return;
}
list_for_each_entry_safe(req, n, &dep->pending_list, list) {
@@ -1217,9 +1263,19 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
if (!dwc3_calc_trbs_left(dep))
return;
+
+ /*
+ * Don't prepare beyond a transfer. In DWC_usb32, its transfer
+ * burst capability may try to read and use TRBs beyond the
+ * active transfer instead of stopping.
+ */
+ if (dep->stream_capable && req->request.is_last)
+ return;
}
}
+static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
+
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -1259,17 +1315,26 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret < 0) {
- /*
- * FIXME we need to iterate over the list of requests
- * here and stop, unmap, free and del each of the linked
- * requests instead of what we do now.
- */
- if (req->trb)
- memset(req->trb, 0, sizeof(struct dwc3_trb));
- dwc3_gadget_del_and_unmap_request(dep, req, ret);
+ struct dwc3_request *tmp;
+
+ if (ret == -EAGAIN)
+ return ret;
+
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ /* If ep isn't started, then there's no end transfer pending */
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+
return ret;
}
+ if (dep->stream_capable && req->request.is_last)
+ dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
return 0;
}
@@ -1402,17 +1467,15 @@ static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
int ret;
int i;
- if (list_empty(&dep->pending_list)) {
+ if (list_empty(&dep->pending_list) &&
+ list_empty(&dep->started_list)) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
return -EAGAIN;
}
- if (!dwc->dis_start_transfer_quirk && dwc3_is_usb31(dwc) &&
- (dwc->revision <= DWC3_USB31_REVISION_160A ||
- (dwc->revision == DWC3_USB31_REVISION_170A &&
- dwc->version_type >= DWC31_VERSIONTYPE_EA01 &&
- dwc->version_type <= DWC31_VERSIONTYPE_EA06))) {
-
+ if (!dwc->dis_start_transfer_quirk &&
+ (DWC3_VER_IS_PRIOR(DWC31, 170A) ||
+ DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) {
if (dwc->gadget.speed <= USB_SPEED_HIGH && dep->direction)
return dwc3_gadget_start_isoc_quirk(dep);
}
@@ -1425,6 +1488,27 @@ static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
break;
}
+ /*
+ * After a number of unsuccessful start attempts due to bus-expiry
+ * status, issue END_TRANSFER command and retry on the next XferNotReady
+ * event.
+ */
+ if (ret == -EAGAIN) {
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
+
+ cmd = DWC3_DEPCMD_ENDTRANSFER |
+ DWC3_DEPCMD_CMDIOC |
+ DWC3_DEPCMD_PARAM(dep->resource_index);
+
+ dep->resource_index = 0;
+ memset(&params, 0, sizeof(params));
+
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (!ret)
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+ }
+
return ret;
}
@@ -1457,6 +1541,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
req->status = DWC3_REQUEST_STATUS_QUEUED;
+ if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
+ return 0;
+
/* Start the transfer only after the END_TRANSFER is completed */
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
dep->flags |= DWC3_EP_DELAY_START;
@@ -1508,6 +1595,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
{
int i;
+ /* If req->trb is not set, then the request has not started */
+ if (!req->trb)
+ return;
+
/*
* If request was already started, this means we had to
* stop the transfer. With that we also need to ignore
@@ -1556,39 +1647,40 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
spin_lock_irqsave(&dwc->lock, flags);
- list_for_each_entry(r, &dep->pending_list, list) {
+ list_for_each_entry(r, &dep->cancelled_list, list) {
if (r == req)
- break;
+ goto out;
}
- if (r != req) {
- list_for_each_entry(r, &dep->started_list, list) {
- if (r == req)
- break;
+ list_for_each_entry(r, &dep->pending_list, list) {
+ if (r == req) {
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ goto out;
}
+ }
+
+ list_for_each_entry(r, &dep->started_list, list) {
if (r == req) {
+ struct dwc3_request *t;
+
/* wait until it is processed */
dwc3_stop_active_transfer(dep, true, true);
- if (!r->trb)
- goto out0;
+ /*
+ * Remove any started request if the transfer is
+ * cancelled.
+ */
+ list_for_each_entry_safe(r, t, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(r);
- dwc3_gadget_move_cancelled_request(req);
- if (dep->flags & DWC3_EP_TRANSFER_STARTED)
- goto out0;
- else
- goto out1;
+ goto out;
}
- dev_err(dwc->dev, "request %pK was not queued to %s\n",
- request, ep->name);
- ret = -EINVAL;
- goto out0;
}
-out1:
- dwc3_gadget_giveback(dep, req, -ECONNRESET);
-
-out0:
+ dev_err(dwc->dev, "request %pK was not queued to %s\n",
+ request, ep->name);
+ ret = -EINVAL;
+out:
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -1598,6 +1690,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3 *dwc = dep->dwc;
+ struct dwc3_request *req;
+ struct dwc3_request *tmp;
int ret;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
@@ -1634,13 +1728,37 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
else
dep->flags |= DWC3_EP_STALL;
} else {
+ /*
+ * Don't issue CLEAR_STALL command to control endpoints. The
+ * controller automatically clears the STALL when it receives
+ * the SETUP token.
+ */
+ if (dep->number <= 1) {
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ return 0;
+ }
ret = dwc3_send_clear_stall_ep_cmd(dep);
- if (ret)
+ if (ret) {
dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
- else
- dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ return ret;
+ }
+
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ list_for_each_entry_safe(req, tmp, &dep->pending_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) {
+ dep->flags &= ~DWC3_EP_DELAY_START;
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+ }
}
return ret;
@@ -1756,7 +1874,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
}
/* Recent versions do this automatically */
- if (dwc->revision < DWC3_REVISION_194A) {
+ if (DWC3_VER_IS_PRIOR(DWC3, 194A)) {
/* write zeroes to Link Change Request */
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
@@ -1818,12 +1936,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
- if (dwc->revision <= DWC3_REVISION_187A) {
+ if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
reg &= ~DWC3_DCTL_TRGTULST_MASK;
reg |= DWC3_DCTL_TRGTULST_RX_DET;
}
- if (dwc->revision >= DWC3_REVISION_194A)
+ if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
reg &= ~DWC3_DCTL_KEEP_CONNECT;
reg |= DWC3_DCTL_RUN_STOP;
@@ -1897,7 +2015,7 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
- if (dwc->revision < DWC3_REVISION_250A)
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A))
reg |= DWC3_DEVTEN_ULSTCNGEN;
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
@@ -1942,6 +2060,8 @@ static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
nump = min_t(u32, nump, 16);
@@ -1978,10 +2098,10 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
* bursts of data without going through any sort of endpoint throttling.
*/
reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
- if (dwc3_is_usb31(dwc))
- reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
- else
+ if (DWC3_IP_IS(DWC3))
reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
+ else
+ reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
@@ -2154,7 +2274,7 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g,
* STAR#9000525659: Clock Domain Crossing on DCTL in
* USB 2.0 Mode
*/
- if (dwc->revision < DWC3_REVISION_220A &&
+ if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
!dwc->dis_metastability_quirk) {
reg |= DWC3_DCFG_SUPERSPEED;
} else {
@@ -2172,18 +2292,18 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g,
reg |= DWC3_DCFG_SUPERSPEED;
break;
case USB_SPEED_SUPER_PLUS:
- if (dwc3_is_usb31(dwc))
- reg |= DWC3_DCFG_SUPERSPEED_PLUS;
- else
+ if (DWC3_IP_IS(DWC3))
reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
break;
default:
dev_err(dwc->dev, "invalid speed (%d)\n", speed);
- if (dwc->revision & DWC3_REVISION_IS_DWC31)
- reg |= DWC3_DCFG_SUPERSPEED_PLUS;
- else
+ if (DWC3_IP_IS(DWC3))
reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
}
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
@@ -2226,14 +2346,17 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
int size;
mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
+
/* MDWIDTH is represented in bits, we need it in bytes */
mdwidth /= 8;
size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
- if (dwc3_is_usb31(dwc))
- size = DWC31_GTXFIFOSIZ_TXFDEP(size);
- else
+ if (DWC3_IP_IS(DWC3))
size = DWC3_GTXFIFOSIZ_TXFDEP(size);
+ else
+ size = DWC31_GTXFIFOSIZ_TXFDEP(size);
/* FIFO Depth is in MDWDITH bytes. Multiply */
size *= mdwidth;
@@ -2270,16 +2393,18 @@ static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
int size;
mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
/* MDWIDTH is represented in bits, convert to bytes */
mdwidth /= 8;
/* All OUT endpoints share a single RxFIFO space */
size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
- if (dwc3_is_usb31(dwc))
- size = DWC31_GRXFIFOSIZ_RXFDEP(size);
- else
+ if (DWC3_IP_IS(DWC3))
size = DWC3_GRXFIFOSIZ_RXFDEP(size);
+ else
+ size = DWC31_GRXFIFOSIZ_RXFDEP(size);
/* FIFO depth is in MDWDITH bytes */
size *= mdwidth;
@@ -2483,9 +2608,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
for_each_sg(sg, s, pending, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
- if (trb->ctrl & DWC3_TRB_CTRL_HWO)
- break;
-
req->sg = sg_next(s);
req->num_pending_sgs--;
@@ -2534,10 +2656,8 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->request.actual = req->request.length - req->remaining;
- if (!dwc3_gadget_ep_request_completed(req)) {
- __dwc3_gadget_kick_transfer(dep);
+ if (!dwc3_gadget_ep_request_completed(req))
goto out;
- }
dwc3_gadget_giveback(dep, req, status);
@@ -2561,41 +2681,53 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
}
}
+static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
+{
+ struct dwc3_request *req;
+
+ if (!list_empty(&dep->pending_list))
+ return true;
+
+ /*
+ * We only need to check the first entry of the started list. We can
+ * assume the completed requests are removed from the started list.
+ */
+ req = next_request(&dep->started_list);
+ if (!req)
+ return false;
+
+ return !dwc3_gadget_ep_request_completed(req);
+}
+
static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
dep->frame_number = event->parameters;
}
-static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
- const struct dwc3_event_depevt *event)
+static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event, int status)
{
struct dwc3 *dwc = dep->dwc;
- unsigned status = 0;
- bool stop = false;
-
- dwc3_gadget_endpoint_frame_from_event(dep, event);
-
- if (event->status & DEPEVT_STATUS_BUSERR)
- status = -ECONNRESET;
-
- if (event->status & DEPEVT_STATUS_MISSED_ISOC) {
- status = -EXDEV;
-
- if (list_empty(&dep->started_list))
- stop = true;
- }
+ bool no_started_trb = true;
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
- if (stop)
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
+ goto out;
+
+ if (status == -EXDEV && list_empty(&dep->started_list))
dwc3_stop_active_transfer(dep, true, true);
+ else if (dwc3_gadget_ep_should_continue(dep))
+ if (__dwc3_gadget_kick_transfer(dep) == 0)
+ no_started_trb = false;
+out:
/*
* WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
* See dwc3_gadget_linksts_change_interrupt() for 1st half.
*/
- if (dwc->revision < DWC3_REVISION_183A) {
+ if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
u32 reg;
int i;
@@ -2606,7 +2738,7 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
continue;
if (!list_empty(&dep->started_list))
- return;
+ return no_started_trb;
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -2615,15 +2747,124 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
dwc->u1u2 = 0;
}
+
+ return no_started_trb;
+}
+
+static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ int status = 0;
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ dwc3_gadget_endpoint_frame_from_event(dep, event);
+
+ if (event->status & DEPEVT_STATUS_BUSERR)
+ status = -ECONNRESET;
+
+ if (event->status & DEPEVT_STATUS_MISSED_ISOC)
+ status = -EXDEV;
+
+ dwc3_gadget_endpoint_trbs_complete(dep, event, status);
+}
+
+static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ int status = 0;
+
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+
+ if (event->status & DEPEVT_STATUS_BUSERR)
+ status = -ECONNRESET;
+
+ if (dwc3_gadget_endpoint_trbs_complete(dep, event, status))
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
}
static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
dwc3_gadget_endpoint_frame_from_event(dep, event);
+
+ /*
+ * The XferNotReady event is generated only once before the endpoint
+ * starts. It will be generated again when END_TRANSFER command is
+ * issued. For some controller versions, the XferNotReady event may be
+ * generated while the END_TRANSFER command is still in process. Ignore
+ * it and wait for the next XferNotReady event after the command is
+ * completed.
+ */
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
+ return;
+
(void) __dwc3_gadget_start_isoc(dep);
}
+static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ if (event->status == DEPEVT_STREAMEVT_FOUND) {
+ dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
+ goto out;
+ }
+
+ /* Note: NoStream rejection event param value is 0 and not 0xFFFF */
+ switch (event->parameters) {
+ case DEPEVT_STREAM_PRIME:
+ /*
+ * If the host can properly transition the endpoint state from
+ * idle to prime after a NoStream rejection, there's no need to
+ * force restarting the endpoint to reinitiate the stream. To
+ * simplify the check, assume the host follows the USB spec if
+ * it primed the endpoint more than once.
+ */
+ if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
+ if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
+ dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
+ else
+ dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
+ }
+
+ break;
+ case DEPEVT_STREAM_NOSTREAM:
+ if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
+ !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
+ !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE))
+ break;
+
+ /*
+ * If the host rejects a stream due to no active stream, by the
+ * USB and xHCI spec, the endpoint will be put back to idle
+ * state. When the host is ready (buffer added/updated), it will
+ * prime the endpoint to inform the usb device controller. This
+ * triggers the device controller to issue ERDY to restart the
+ * stream. However, some hosts don't follow this and keep the
+ * endpoint in the idle state. No prime will come despite host
+ * streams are updated, and the device controller will not be
+ * triggered to generate ERDY to move the next stream data. To
+ * workaround this and maintain compatibility with various
+ * hosts, force to reinitate the stream until the host is ready
+ * instead of waiting for the host to prime the endpoint.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
+ unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
+
+ dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
+ } else {
+ dep->flags |= DWC3_EP_DELAY_START;
+ dwc3_stop_active_transfer(dep, true, true);
+ return;
+ }
+ break;
+ }
+
+out:
+ dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
+}
+
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
@@ -2668,8 +2909,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->flags &= ~DWC3_EP_DELAY_START;
}
break;
- case DWC3_DEPEVT_STREAMEVT:
case DWC3_DEPEVT_XFERCOMPLETE:
+ dwc3_gadget_endpoint_transfer_complete(dep, event);
+ break;
+ case DWC3_DEPEVT_STREAMEVT:
+ dwc3_gadget_endpoint_stream_event(dep, event);
+ break;
case DWC3_DEPEVT_RXTXFIFOEVT:
break;
}
@@ -2761,6 +3006,14 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
WARN_ON_ONCE(ret);
dep->resource_index = 0;
+ /*
+ * The END_TRANSFER command will cause the controller to generate a
+ * NoStream Event, and it's not due to the host DP NoStream rejection.
+ * Ignore the next NoStream event.
+ */
+ if (dep->stream_capable)
+ dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
+
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
else
@@ -2841,7 +3094,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
* STAR#9000466709: RTL: Device : Disconnect event not
* generated if setup packet pending in FIFO
*/
- if (dwc->revision < DWC3_REVISION_188A) {
+ if (DWC3_VER_IS_PRIOR(DWC3, 188A)) {
if (dwc->setup_packet_pending)
dwc3_gadget_disconnect_interrupt(dwc);
}
@@ -2900,7 +3153,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
* STAR#9000483510: RTL: SS : USB3 reset event may
* not be generated always when the link enters poll
*/
- if (dwc->revision < DWC3_REVISION_190A)
+ if (DWC3_VER_IS_PRIOR(DWC3, 190A))
dwc3_gadget_reset_interrupt(dwc);
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -2928,7 +3181,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
/* Enable USB2 LPM Capability */
- if ((dwc->revision > DWC3_REVISION_194A) &&
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
(speed != DWC3_DSTS_SUPERSPEED) &&
(speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
@@ -2947,11 +3200,10 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
* BESL value in the LPM token is less than or equal to LPM
* NYET threshold.
*/
- WARN_ONCE(dwc->revision < DWC3_REVISION_240A
- && dwc->has_lpm_erratum,
+ WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
"LPM Erratum not available on dwc3 revisions < 2.40a\n");
- if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
+ if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
dwc3_gadget_dctl_write_safe(dwc, reg);
@@ -3022,7 +3274,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
* operational mode
*/
pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
- if ((dwc->revision < DWC3_REVISION_250A) &&
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A) &&
(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
(next == DWC3_LINK_STATE_RESUME)) {
@@ -3048,7 +3300,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
* STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
* core send LGO_Ux entering U0
*/
- if (dwc->revision < DWC3_REVISION_183A) {
+ if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
if (next == DWC3_LINK_STATE_U0) {
u32 u1u2;
u32 reg;
@@ -3159,7 +3411,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEVICE_EVENT_EOPF:
/* It changed to be suspend event for version 2.30a and above */
- if (dwc->revision >= DWC3_REVISION_230A) {
+ if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
/*
* Ignore suspend event until the gadget enters into
* USB_STATE_CONFIGURED state.
@@ -3404,7 +3656,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
* is less than super speed because we don't have means, yet, to tell
* composite.c that we are USB 2.0 + LPM ECN.
*/
- if (dwc->revision < DWC3_REVISION_220A &&
+ if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
!dwc->dis_metastability_quirk)
dev_info(dwc->dev, "changing max_speed on rev %08x\n",
dwc->revision);
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index fbc7d8013f0b..24dca3872022 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* gadget.h - DesignWare USB3 DRD Gadget Header
*
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 86dbd012b984..bef1c1ac2067 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -104,7 +104,7 @@ int dwc3_host_init(struct dwc3 *dwc)
*
* This following flag tells XHCI to do just that.
*/
- if (dwc->revision <= DWC3_REVISION_300A)
+ if (DWC3_VER_IS_WITHIN(DWC3, ANY, 300A))
props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index 70acdf94a0bf..9bbe5d4bf076 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* io.h - DesignWare USB3 DRD IO Header
*
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 3054b89512ff..4c4fc6c41d9b 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* trace.h - DesignWare USB3 DRD Controller Trace Support
*
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 171280c80228..04ba11fff0ed 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -18,7 +18,6 @@
#include <asm/fixmap.h>
#include <linux/bcd.h>
#include <linux/export.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kthread.h>
diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
index 6e2b7266a695..8b4d71de45fc 100644
--- a/drivers/usb/early/xhci-dbc.h
+++ b/drivers/usb/early/xhci-dbc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xhci-dbc.h - xHCI debug capability early driver
*
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index cb4950cf1cdc..5c1eb96a5c57 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -96,40 +96,43 @@ function_descriptors(struct usb_function *f,
}
/**
- * next_ep_desc() - advance to the next EP descriptor
+ * next_desc() - advance to the next desc_type descriptor
* @t: currect pointer within descriptor array
+ * @desc_type: descriptor type
*
- * Return: next EP descriptor or NULL
+ * Return: next desc_type descriptor or NULL
*
- * Iterate over @t until either EP descriptor found or
+ * Iterate over @t until either desc_type descriptor found or
* NULL (that indicates end of list) encountered
*/
static struct usb_descriptor_header**
-next_ep_desc(struct usb_descriptor_header **t)
+next_desc(struct usb_descriptor_header **t, u8 desc_type)
{
for (; *t; t++) {
- if ((*t)->bDescriptorType == USB_DT_ENDPOINT)
+ if ((*t)->bDescriptorType == desc_type)
return t;
}
return NULL;
}
/*
- * for_each_ep_desc()- iterate over endpoint descriptors in the
- * descriptors list
- * @start: pointer within descriptor array.
- * @ep_desc: endpoint descriptor to use as the loop cursor
+ * for_each_desc() - iterate over desc_type descriptors in the
+ * descriptors list
+ * @start: pointer within descriptor array.
+ * @iter_desc: desc_type descriptor to use as the loop cursor
+ * @desc_type: wanted descriptr type
*/
-#define for_each_ep_desc(start, ep_desc) \
- for (ep_desc = next_ep_desc(start); \
- ep_desc; ep_desc = next_ep_desc(ep_desc+1))
+#define for_each_desc(start, iter_desc, desc_type) \
+ for (iter_desc = next_desc(start, desc_type); \
+ iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type))
/**
- * config_ep_by_speed() - configures the given endpoint
+ * config_ep_by_speed_and_alt() - configures the given endpoint
* according to gadget speed.
* @g: pointer to the gadget
* @f: usb function
* @_ep: the endpoint to configure
+ * @alt: alternate setting number
*
* Return: error code, 0 on success
*
@@ -142,11 +145,13 @@ next_ep_desc(struct usb_descriptor_header **t)
* Note: the supplied function should hold all the descriptors
* for supported speeds
*/
-int config_ep_by_speed(struct usb_gadget *g,
- struct usb_function *f,
- struct usb_ep *_ep)
+int config_ep_by_speed_and_alt(struct usb_gadget *g,
+ struct usb_function *f,
+ struct usb_ep *_ep,
+ u8 alt)
{
struct usb_endpoint_descriptor *chosen_desc = NULL;
+ struct usb_interface_descriptor *int_desc = NULL;
struct usb_descriptor_header **speed_desc = NULL;
struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
@@ -182,8 +187,21 @@ int config_ep_by_speed(struct usb_gadget *g,
default:
speed_desc = f->fs_descriptors;
}
+
+ /* find correct alternate setting descriptor */
+ for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) {
+ int_desc = (struct usb_interface_descriptor *)*d_spd;
+
+ if (int_desc->bAlternateSetting == alt) {
+ speed_desc = d_spd;
+ goto intf_found;
+ }
+ }
+ return -EIO;
+
+intf_found:
/* find descriptors */
- for_each_ep_desc(speed_desc, d_spd) {
+ for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) {
chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
if (chosen_desc->bEndpointAddress == _ep->address)
goto ep_found;
@@ -237,6 +255,32 @@ ep_found:
}
return 0;
}
+EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt);
+
+/**
+ * config_ep_by_speed() - configures the given endpoint
+ * according to gadget speed.
+ * @g: pointer to the gadget
+ * @f: usb function
+ * @_ep: the endpoint to configure
+ *
+ * Return: error code, 0 on success
+ *
+ * This function chooses the right descriptors for a given
+ * endpoint according to gadget speed and saves it in the
+ * endpoint desc field. If the endpoint already has a descriptor
+ * assigned to it - overwrites it with currently corresponding
+ * descriptor. The endpoint maxpacket field is updated according
+ * to the chosen descriptor.
+ * Note: the supplied function should hold all the descriptors
+ * for supported speeds
+ */
+int config_ep_by_speed(struct usb_gadget *g,
+ struct usb_function *f,
+ struct usb_ep *_ep)
+{
+ return config_ep_by_speed_and_alt(g, f, _ep, 0);
+}
EXPORT_SYMBOL_GPL(config_ep_by_speed);
/**
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 32b637e3e1fa..9dc06a4e1b30 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -13,8 +13,6 @@
int check_user_usb_string(const char *name,
struct usb_gadget_strings *stringtab_dev)
{
- unsigned primary_lang;
- unsigned sub_lang;
u16 num;
int ret;
@@ -22,17 +20,7 @@ int check_user_usb_string(const char *name,
if (ret)
return ret;
- primary_lang = num & 0x3ff;
- sub_lang = num >> 10;
-
- /* simple sanity check for valid langid */
- switch (primary_lang) {
- case 0:
- case 0x62 ... 0xfe:
- case 0x100 ... 0x3ff:
- return -EINVAL;
- }
- if (!sub_lang)
+ if (!usb_validate_langid(num))
return -EINVAL;
stringtab_dev->language = num;
@@ -260,6 +248,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
char *name;
int ret;
+ if (strlen(page) < len)
+ return -EOVERFLOW;
+
name = kstrdup(page, GFP_KERNEL);
if (!name)
return -ENOMEM;
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 7c152c28b26c..200596ea9557 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -723,6 +723,20 @@ static void acm_free_func(struct usb_function *f)
kfree(acm);
}
+static void acm_resume(struct usb_function *f)
+{
+ struct f_acm *acm = func_to_acm(f);
+
+ gserial_resume(&acm->port);
+}
+
+static void acm_suspend(struct usb_function *f)
+{
+ struct f_acm *acm = func_to_acm(f);
+
+ gserial_suspend(&acm->port);
+}
+
static struct usb_function *acm_alloc_func(struct usb_function_instance *fi)
{
struct f_serial_opts *opts;
@@ -750,6 +764,8 @@ static struct usb_function *acm_alloc_func(struct usb_function_instance *fi)
acm->port_num = opts->port_num;
acm->port.func.unbind = acm_unbind;
acm->port.func.free_func = acm_free_func;
+ acm->port.func.resume = acm_resume;
+ acm->port.func.suspend = acm_suspend;
return &acm->port.func;
}
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index b81a91d504bd..cfcc4e81fb77 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -291,8 +291,6 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
eem->port.out_ep = ep;
- status = -ENOMEM;
-
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 10f01f974f67..494f853f2206 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2508,7 +2508,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
os_descs_count = get_unaligned_le32(data);
data += 4;
len -= 4;
- };
+ }
/* Read descriptors */
raw_descs = data;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index f3816a5c861e..df671acdd464 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -252,9 +252,6 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
if (!count)
return 0;
- if (!access_ok(buffer, count))
- return -EFAULT;
-
spin_lock_irqsave(&hidg->read_spinlock, flags);
#define READ_COND (!list_empty(&hidg->completed_out_req))
@@ -339,9 +336,6 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
unsigned long flags;
ssize_t status = -ENOMEM;
- if (!access_ok(buffer, count))
- return -EFAULT;
-
spin_lock_irqsave(&hidg->write_spinlock, flags);
#define WRITE_COND (!hidg->write_pending)
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index 1406255d0865..e62713846350 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -348,6 +348,20 @@ static void gser_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
}
+static void gser_resume(struct usb_function *f)
+{
+ struct f_gser *gser = func_to_gser(f);
+
+ gserial_resume(&gser->port);
+}
+
+static void gser_suspend(struct usb_function *f)
+{
+ struct f_gser *gser = func_to_gser(f);
+
+ gserial_suspend(&gser->port);
+}
+
static struct usb_function *gser_alloc(struct usb_function_instance *fi)
{
struct f_gser *gser;
@@ -369,6 +383,8 @@ static struct usb_function *gser_alloc(struct usb_function_instance *fi)
gser->port.func.set_alt = gser_set_alt;
gser->port.func.disable = gser_disable;
gser->port.func.free_func = gser_free;
+ gser->port.func.resume = gser_resume;
+ gser->port.func.suspend = gser_suspend;
return &gser->port.func;
}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 36504931b2d1..2979cbe4d95f 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -531,6 +531,7 @@ static int uasp_prepare_r_request(struct usbg_cmd *cmd)
stream->req_in->sg = se_cmd->t_data_sg;
}
+ stream->req_in->is_last = 1;
stream->req_in->complete = uasp_status_data_cmpl;
stream->req_in->length = se_cmd->data_length;
stream->req_in->context = cmd;
@@ -554,6 +555,7 @@ static void uasp_prepare_status(struct usbg_cmd *cmd)
*/
iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
iu->status = se_cmd->scsi_status;
+ stream->req_status->is_last = 1;
stream->req_status->context = cmd;
stream->req_status->length = se_cmd->scsi_sense_length + 16;
stream->req_status->buf = iu;
@@ -991,6 +993,7 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
req->sg = se_cmd->t_data_sg;
}
+ req->is_last = 1;
req->complete = usbg_data_write_cmpl;
req->length = se_cmd->data_length;
req->context = cmd;
diff --git a/drivers/usb/gadget/function/f_uvc.h b/drivers/usb/gadget/function/f_uvc.h
index a81a17765558..1db972d4beeb 100644
--- a/drivers/usb/gadget/function/f_uvc.h
+++ b/drivers/usb/gadget/function/f_uvc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* f_uvc.h -- USB Video Class Gadget driver
*
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index c7e3a70ce6c1..f6167f7fea82 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* RNDIS Definitions for Remote NDIS
*
diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
index 81d3d4ed6dfb..5ea6b86f1fda 100644
--- a/drivers/usb/gadget/function/u_audio.h
+++ b/drivers/usb/gadget/function/u_audio.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_audio.h -- interface to USB gadget "ALSA sound card" utilities
*
diff --git a/drivers/usb/gadget/function/u_ecm.h b/drivers/usb/gadget/function/u_ecm.h
index 098ece573a5e..77cfb89932be 100644
--- a/drivers/usb/gadget/function/u_ecm.h
+++ b/drivers/usb/gadget/function/u_ecm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_ecm.h
*
diff --git a/drivers/usb/gadget/function/u_eem.h b/drivers/usb/gadget/function/u_eem.h
index 921386a375cf..3bd85dfcd71c 100644
--- a/drivers/usb/gadget/function/u_eem.h
+++ b/drivers/usb/gadget/function/u_eem.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_eem.h
*
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 332307d54292..10dd640684e2 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_ether.h -- interface to USB gadget "ethernet link" utilities
*
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index d8b92485b727..bd92b5703013 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_ether_configfs.h
*
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index f9b0cf67360d..f102ec23f3af 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_fs.h
*
diff --git a/drivers/usb/gadget/function/u_gether.h b/drivers/usb/gadget/function/u_gether.h
index ce4f07626f96..2f7a373ed449 100644
--- a/drivers/usb/gadget/function/u_gether.h
+++ b/drivers/usb/gadget/function/u_gether.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_gether.h
*
diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
index 1594bfa312eb..84e6da302499 100644
--- a/drivers/usb/gadget/function/u_hid.h
+++ b/drivers/usb/gadget/function/u_hid.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_hid.h
*
diff --git a/drivers/usb/gadget/function/u_midi.h b/drivers/usb/gadget/function/u_midi.h
index 29bf006c0a13..f6e14af7f566 100644
--- a/drivers/usb/gadget/function/u_midi.h
+++ b/drivers/usb/gadget/function/u_midi.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_midi.h
*
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index 70da3201a1d0..5408854d8407 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_ncm.h
*
diff --git a/drivers/usb/gadget/function/u_phonet.h b/drivers/usb/gadget/function/u_phonet.h
index 12fb613f85d1..c53233b37192 100644
--- a/drivers/usb/gadget/function/u_phonet.h
+++ b/drivers/usb/gadget/function/u_phonet.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_phonet.h - interface to Phonet
*
diff --git a/drivers/usb/gadget/function/u_printer.h b/drivers/usb/gadget/function/u_printer.h
index 78797764f478..318205fb778e 100644
--- a/drivers/usb/gadget/function/u_printer.h
+++ b/drivers/usb/gadget/function/u_printer.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_printer.h
*
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index 1e148b76f339..a8c409b2f52f 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_rndis.h
*
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 8167d379e115..3cfc6e2eba71 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -120,6 +120,8 @@ struct gs_port {
wait_queue_head_t drain_wait; /* wait while writes drain */
bool write_busy;
wait_queue_head_t close_wait;
+ bool suspended; /* port suspended */
+ bool start_delayed; /* delay start when suspended */
/* REVISIT this state ... */
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
@@ -630,13 +632,19 @@ static int gs_open(struct tty_struct *tty, struct file *file)
/* if connected, start the I/O stream */
if (port->port_usb) {
- struct gserial *gser = port->port_usb;
-
- pr_debug("gs_open: start ttyGS%d\n", port->port_num);
- gs_start_io(port);
-
- if (gser->connect)
- gser->connect(gser);
+ /* if port is suspended, wait resume to start I/0 stream */
+ if (!port->suspended) {
+ struct gserial *gser = port->port_usb;
+
+ pr_debug("gs_open: start ttyGS%d\n", port->port_num);
+ gs_start_io(port);
+
+ if (gser->connect)
+ gser->connect(gser);
+ } else {
+ pr_debug("delay start of ttyGS%d\n", port->port_num);
+ port->start_delayed = true;
+ }
}
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
@@ -680,7 +688,7 @@ raced_with_open:
pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
gser = port->port_usb;
- if (gser && gser->disconnect)
+ if (gser && !port->suspended && gser->disconnect)
gser->disconnect(gser);
/* wait for circular write buffer to drain, disconnect, or at
@@ -708,6 +716,7 @@ raced_with_open:
else
kfifo_reset(&port->port_write_buf);
+ port->start_delayed = false;
port->port.count = 0;
port->port.tty = NULL;
@@ -1403,6 +1412,38 @@ void gserial_disconnect(struct gserial *gser)
}
EXPORT_SYMBOL_GPL(gserial_disconnect);
+void gserial_suspend(struct gserial *gser)
+{
+ struct gs_port *port = gser->ioport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->suspended = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gserial_suspend);
+
+void gserial_resume(struct gserial *gser)
+{
+ struct gs_port *port = gser->ioport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->suspended = false;
+ if (!port->start_delayed) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ pr_debug("delayed start ttyGS%d\n", port->port_num);
+ gs_start_io(port);
+ if (gser->connect)
+ gser->connect(gser);
+ port->start_delayed = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gserial_resume);
+
static int userial_init(void)
{
unsigned i;
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index e5b08ab8cf7a..cadb76eecbc7 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_serial.h - interface to USB gadget "serial port"/TTY utilities
*
@@ -68,6 +68,8 @@ ssize_t gserial_get_console(unsigned char port_num, char *page);
/* connect/disconnect is handled by individual functions */
int gserial_connect(struct gserial *, u8 port_num);
void gserial_disconnect(struct gserial *);
+void gserial_suspend(struct gserial *p);
+void gserial_resume(struct gserial *p);
/* functions are bound to configurations by a config or gadget driver */
int gser_bind_config(struct usb_configuration *c, u8 port_num);
diff --git a/drivers/usb/gadget/function/u_tcm.h b/drivers/usb/gadget/function/u_tcm.h
index 3f7ccecb0f9b..2cd15d9a1c0d 100644
--- a/drivers/usb/gadget/function/u_tcm.h
+++ b/drivers/usb/gadget/function/u_tcm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_tcm.h
*
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index 6f1a9d73defe..39c0e29e1b46 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_uac1.h - Utility definitions for UAC1 function
*
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.h b/drivers/usb/gadget/function/u_uac1_legacy.h
index 5c1bdf46fe32..b5df9bcbbeba 100644
--- a/drivers/usb/gadget/function/u_uac1_legacy.h
+++ b/drivers/usb/gadget/function/u_uac1_legacy.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
*
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 82048791eb6e..b5035711172d 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_uac2.h
*
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 16da49a2fcf2..9a01a7d4f17f 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_uvc.h
*
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 1473d25ff17a..23ee25383c1f 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* uvc_gadget.h -- USB Video Class Gadget driver
*
@@ -77,6 +77,8 @@ struct uvc_video {
struct uvc_device *uvc;
struct usb_ep *ep;
+ struct work_struct pump;
+
/* Frame parameters */
u8 bpp;
u32 fcc;
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index 341391dbc81f..7e1d7ca29bf2 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* uvc_configfs.h
*
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 495f0ec663ea..4ca89eab6159 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -169,7 +169,9 @@ uvc_v4l2_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
if (ret < 0)
return ret;
- return uvcg_video_pump(video);
+ schedule_work(&video->pump);
+
+ return ret;
}
static int
diff --git a/drivers/usb/gadget/function/uvc_v4l2.h b/drivers/usb/gadget/function/uvc_v4l2.h
index 452d71059b3f..1576005b61fd 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.h
+++ b/drivers/usb/gadget/function/uvc_v4l2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* uvc_v4l2.h -- USB Video Class Gadget driver
*
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 5c042f380708..633e23d58d86 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -142,44 +142,12 @@ static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
return ret;
}
-/*
- * I somehow feel that synchronisation won't be easy to achieve here. We have
- * three events that control USB requests submission:
- *
- * - USB request completion: the completion handler will resubmit the request
- * if a video buffer is available.
- *
- * - USB interface setting selection: in response to a SET_INTERFACE request,
- * the handler will start streaming if a video buffer is available and if
- * video is not currently streaming.
- *
- * - V4L2 buffer queueing: the driver will start streaming if video is not
- * currently streaming.
- *
- * Race conditions between those 3 events might lead to deadlocks or other
- * nasty side effects.
- *
- * The "video currently streaming" condition can't be detected by the irqqueue
- * being empty, as a request can still be in flight. A separate "queue paused"
- * flag is thus needed.
- *
- * The paused flag will be set when we try to retrieve the irqqueue head if the
- * queue is empty, and cleared when we queue a buffer.
- *
- * The USB request completion handler will get the buffer at the irqqueue head
- * under protection of the queue spinlock. If the queue is empty, the streaming
- * paused flag will be set. Right after releasing the spinlock a userspace
- * application can queue a buffer. The flag will then cleared, and the ioctl
- * handler will restart the video stream.
- */
static void
uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
{
struct uvc_video *video = req->context;
struct uvc_video_queue *queue = &video->queue;
- struct uvc_buffer *buf;
unsigned long flags;
- int ret;
switch (req->status) {
case 0:
@@ -188,39 +156,20 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
case -ESHUTDOWN: /* disconnect from host. */
uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
uvcg_queue_cancel(queue, 1);
- goto requeue;
+ break;
default:
uvcg_info(&video->uvc->func,
"VS request completed with status %d.\n",
req->status);
uvcg_queue_cancel(queue, 0);
- goto requeue;
}
- spin_lock_irqsave(&video->queue.irqlock, flags);
- buf = uvcg_queue_head(&video->queue);
- if (buf == NULL) {
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
- goto requeue;
- }
-
- video->encode(req, video, buf);
-
- ret = uvcg_video_ep_queue(video, req);
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
-
- if (ret < 0) {
- uvcg_queue_cancel(queue, 0);
- goto requeue;
- }
-
- return;
-
-requeue:
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
+
+ schedule_work(&video->pump);
}
static int
@@ -294,18 +243,15 @@ error:
* This function fills the available USB requests (listed in req_free) with
* video data from the queued buffers.
*/
-int uvcg_video_pump(struct uvc_video *video)
+static void uvcg_video_pump(struct work_struct *work)
{
+ struct uvc_video *video = container_of(work, struct uvc_video, pump);
struct uvc_video_queue *queue = &video->queue;
struct usb_request *req;
struct uvc_buffer *buf;
unsigned long flags;
int ret;
- /* FIXME TODO Race between uvcg_video_pump and requests completion
- * handler ???
- */
-
while (1) {
/* Retrieve the first available USB request, protected by the
* request lock.
@@ -313,7 +259,7 @@ int uvcg_video_pump(struct uvc_video *video)
spin_lock_irqsave(&video->req_lock, flags);
if (list_empty(&video->req_free)) {
spin_unlock_irqrestore(&video->req_lock, flags);
- return 0;
+ return;
}
req = list_first_entry(&video->req_free, struct usb_request,
list);
@@ -345,7 +291,7 @@ int uvcg_video_pump(struct uvc_video *video)
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
- return 0;
+ return;
}
/*
@@ -363,6 +309,9 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
}
if (!enable) {
+ cancel_work_sync(&video->pump);
+ uvcg_queue_cancel(&video->queue, 0);
+
for (i = 0; i < UVC_NUM_REQUESTS; ++i)
if (video->req[i])
usb_ep_dequeue(video->ep, video->req[i]);
@@ -384,7 +333,9 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
} else
video->encode = uvc_video_encode_isoc;
- return uvcg_video_pump(video);
+ schedule_work(&video->pump);
+
+ return ret;
}
/*
@@ -394,6 +345,7 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
INIT_LIST_HEAD(&video->req_free);
spin_lock_init(&video->req_lock);
+ INIT_WORK(&video->pump, uvcg_video_pump);
video->uvc = uvc;
video->fcc = V4L2_PIX_FMT_YUYV;
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index dff12103f696..03adeefa343b 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* uvc_video.h -- USB Video Class Gadget driver
*
@@ -14,8 +14,6 @@
struct uvc_video;
-int uvcg_video_pump(struct uvc_video *video);
-
int uvcg_video_enable(struct uvc_video *video, int enable);
int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc);
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index dd81fd538cb8..a748ed0842e8 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail;
+ }
usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index 8d7a556ece30..563363aba48f 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail1;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index aa0de9e35afa..3afddd3bea6e 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1361,7 +1361,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
req->buf = dev->rbuf;
req->context = NULL;
- value = -EOPNOTSUPP;
switch (ctrl->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
@@ -1784,7 +1783,7 @@ static ssize_t
dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
{
struct dev_data *dev = fd->private_data;
- ssize_t value = len, length = len;
+ ssize_t value, length = len;
unsigned total;
u32 tag;
char *kbuf;
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index f18f77584fc2..9ed22c5fb7fe 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -229,18 +229,8 @@ static struct usb_composite_driver msg_driver = {
.unbind = msg_unbind,
};
+module_usb_composite_driver(msg_driver);
+
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Michal Nazarewicz");
MODULE_LICENSE("GPL");
-
-static int __init msg_init(void)
-{
- return usb_composite_probe(&msg_driver);
-}
-module_init(msg_init);
-
-static void __exit msg_cleanup(void)
-{
- usb_composite_unregister(&msg_driver);
-}
-module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index c61e71ba7045..0f1b45e3abd1 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index ca7d95bf7397..e01e366d89cd 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -7,6 +7,7 @@
*/
#include <linux/compiler.h>
+#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/kref.h>
@@ -123,8 +124,6 @@ static void raw_event_queue_destroy(struct raw_event_queue *queue)
struct raw_dev;
-#define USB_RAW_MAX_ENDPOINTS 32
-
enum ep_state {
STATE_EP_DISABLED,
STATE_EP_ENABLED,
@@ -134,6 +133,7 @@ struct raw_ep {
struct raw_dev *dev;
enum ep_state state;
struct usb_ep *ep;
+ u8 addr;
struct usb_request *req;
bool urb_queued;
bool disabling;
@@ -168,7 +168,8 @@ struct raw_dev {
bool ep0_out_pending;
bool ep0_urb_queued;
ssize_t ep0_status;
- struct raw_ep eps[USB_RAW_MAX_ENDPOINTS];
+ struct raw_ep eps[USB_RAW_EPS_NUM_MAX];
+ int eps_num;
struct completion ep0_done;
struct raw_event_queue queue;
@@ -202,8 +203,8 @@ static void dev_free(struct kref *kref)
usb_ep_free_request(dev->gadget->ep0, dev->req);
}
raw_event_queue_destroy(&dev->queue);
- for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) {
- if (dev->eps[i].state != STATE_EP_ENABLED)
+ for (i = 0; i < dev->eps_num; i++) {
+ if (dev->eps[i].state == STATE_EP_DISABLED)
continue;
usb_ep_disable(dev->eps[i].ep);
usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
@@ -249,12 +250,26 @@ static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
complete(&dev->ep0_done);
}
+static u8 get_ep_addr(const char *name)
+{
+ /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"),
+ * parse the endpoint address from its name. We deliberately use
+ * deprecated simple_strtoul() function here, as the number isn't
+ * followed by '\0' nor '\n'.
+ */
+ if (isdigit(name[2]))
+ return simple_strtoul(&name[2], NULL, 10);
+ /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */
+ return USB_RAW_EP_ADDR_ANY;
+}
+
static int gadget_bind(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
- int ret = 0;
+ int ret = 0, i = 0;
struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
struct usb_request *req;
+ struct usb_ep *ep;
unsigned long flags;
if (strcmp(gadget->name, dev->udc_name) != 0)
@@ -273,6 +288,13 @@ static int gadget_bind(struct usb_gadget *gadget,
dev->req->context = dev;
dev->req->complete = gadget_ep0_complete;
dev->gadget = gadget;
+ gadget_for_each_ep(ep, dev->gadget) {
+ dev->eps[i].ep = ep;
+ dev->eps[i].addr = get_ep_addr(ep->name);
+ dev->eps[i].state = STATE_EP_DISABLED;
+ i++;
+ }
+ dev->eps_num = i;
spin_unlock_irqrestore(&dev->lock, flags);
/* Matches kref_put() in gadget_unbind(). */
@@ -555,7 +577,7 @@ static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
if (copy_from_user(io, ptr, sizeof(*io)))
return ERR_PTR(-EFAULT);
- if (io->ep >= USB_RAW_MAX_ENDPOINTS)
+ if (io->ep >= USB_RAW_EPS_NUM_MAX)
return ERR_PTR(-EINVAL);
if (!usb_raw_io_flags_valid(io->flags))
return ERR_PTR(-EINVAL);
@@ -669,43 +691,61 @@ static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
if (IS_ERR(data))
return PTR_ERR(data);
ret = raw_process_ep0_io(dev, &io, data, false);
- if (ret)
+ if (ret < 0)
goto free;
length = min(io.length, (unsigned int)ret);
if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
ret = -EFAULT;
+ else
+ ret = length;
free:
kfree(data);
return ret;
}
-static bool check_ep_caps(struct usb_ep *ep,
- struct usb_endpoint_descriptor *desc)
+static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)
{
- switch (usb_endpoint_type(desc)) {
- case USB_ENDPOINT_XFER_ISOC:
- if (!ep->caps.type_iso)
- return false;
- break;
- case USB_ENDPOINT_XFER_BULK:
- if (!ep->caps.type_bulk)
- return false;
- break;
- case USB_ENDPOINT_XFER_INT:
- if (!ep->caps.type_int)
- return false;
- break;
- default:
- return false;
+ int ret = 0;
+ unsigned long flags;
+
+ if (value)
+ return -EINVAL;
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (dev->ep0_urb_queued) {
+ dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (!dev->ep0_in_pending && !dev->ep0_out_pending) {
+ dev_dbg(&dev->gadget->dev, "fail, no request pending\n");
+ ret = -EBUSY;
+ goto out_unlock;
}
- if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
- return false;
- if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
- return false;
+ ret = usb_ep_set_halt(dev->gadget->ep0);
+ if (ret < 0)
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_set_halt returned %d\n", ret);
+
+ if (dev->ep0_in_pending)
+ dev->ep0_in_pending = false;
+ else
+ dev->ep0_out_pending = false;
- return true;
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
}
static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
@@ -713,7 +753,7 @@ static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
int ret = 0, i;
unsigned long flags;
struct usb_endpoint_descriptor *desc;
- struct usb_ep *ep = NULL;
+ struct raw_ep *ep;
desc = memdup_user((void __user *)value, sizeof(*desc));
if (IS_ERR(desc))
@@ -741,41 +781,32 @@ static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
goto out_free;
}
- for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) {
- if (dev->eps[i].state == STATE_EP_ENABLED)
+ for (i = 0; i < dev->eps_num; i++) {
+ ep = &dev->eps[i];
+ if (ep->state != STATE_EP_DISABLED)
continue;
- break;
- }
- if (i == USB_RAW_MAX_ENDPOINTS) {
- dev_dbg(&dev->gadget->dev,
- "fail, no device endpoints available\n");
- ret = -EBUSY;
- goto out_free;
- }
-
- gadget_for_each_ep(ep, dev->gadget) {
- if (ep->enabled)
+ if (ep->addr != usb_endpoint_num(desc) &&
+ ep->addr != USB_RAW_EP_ADDR_ANY)
continue;
- if (!check_ep_caps(ep, desc))
+ if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL))
continue;
- ep->desc = desc;
- ret = usb_ep_enable(ep);
+ ep->ep->desc = desc;
+ ret = usb_ep_enable(ep->ep);
if (ret < 0) {
dev_err(&dev->gadget->dev,
"fail, usb_ep_enable returned %d\n", ret);
goto out_free;
}
- dev->eps[i].req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (!dev->eps[i].req) {
+ ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
+ if (!ep->req) {
dev_err(&dev->gadget->dev,
"fail, usb_ep_alloc_request failed\n");
- usb_ep_disable(ep);
+ usb_ep_disable(ep->ep);
ret = -ENOMEM;
goto out_free;
}
- dev->eps[i].ep = ep;
- dev->eps[i].state = STATE_EP_ENABLED;
- ep->driver_data = &dev->eps[i];
+ ep->state = STATE_EP_ENABLED;
+ ep->ep->driver_data = ep;
ret = i;
goto out_unlock;
}
@@ -794,10 +825,6 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
{
int ret = 0, i = value;
unsigned long flags;
- const void *desc;
-
- if (i < 0 || i >= USB_RAW_MAX_ENDPOINTS)
- return -EINVAL;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_RUNNING) {
@@ -810,7 +837,12 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
ret = -EBUSY;
goto out_unlock;
}
- if (dev->eps[i].state != STATE_EP_ENABLED) {
+ if (i < 0 || i >= dev->eps_num) {
+ dev_dbg(dev->dev, "fail, invalid endpoint\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (dev->eps[i].state == STATE_EP_DISABLED) {
dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
ret = -EINVAL;
goto out_unlock;
@@ -834,10 +866,8 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
spin_lock_irqsave(&dev->lock, flags);
usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
- desc = dev->eps[i].ep->desc;
- dev->eps[i].ep = NULL;
+ kfree(dev->eps[i].ep->desc);
dev->eps[i].state = STATE_EP_DISABLED;
- kfree(desc);
dev->eps[i].disabling = false;
out_unlock:
@@ -845,6 +875,74 @@ out_unlock:
return ret;
}
+static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev,
+ unsigned long value, bool set, bool halt)
+{
+ int ret = 0, i = value;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (i < 0 || i >= dev->eps_num) {
+ dev_dbg(dev->dev, "fail, invalid endpoint\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (dev->eps[i].state == STATE_EP_DISABLED) {
+ dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (dev->eps[i].disabling) {
+ dev_dbg(&dev->gadget->dev,
+ "fail, disable is in progress\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (dev->eps[i].urb_queued) {
+ dev_dbg(&dev->gadget->dev,
+ "fail, waiting for urb completion\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) {
+ dev_dbg(&dev->gadget->dev,
+ "fail, can't halt/wedge ISO endpoint\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (set && halt) {
+ ret = usb_ep_set_halt(dev->eps[i].ep);
+ if (ret < 0)
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_set_halt returned %d\n", ret);
+ } else if (!set && halt) {
+ ret = usb_ep_clear_halt(dev->eps[i].ep);
+ if (ret < 0)
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_clear_halt returned %d\n", ret);
+ } else if (set && !halt) {
+ ret = usb_ep_set_wedge(dev->eps[i].ep);
+ if (ret < 0)
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_set_wedge returned %d\n", ret);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
{
struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
@@ -866,7 +964,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
{
int ret = 0;
unsigned long flags;
- struct raw_ep *ep = &dev->eps[io->ep];
+ struct raw_ep *ep;
DECLARE_COMPLETION_ONSTACK(done);
spin_lock_irqsave(&dev->lock, flags);
@@ -880,6 +978,12 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
ret = -EBUSY;
goto out_unlock;
}
+ if (io->ep >= dev->eps_num) {
+ dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ep = &dev->eps[io->ep];
if (ep->state != STATE_EP_ENABLED) {
dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
ret = -EBUSY;
@@ -964,12 +1068,14 @@ static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
if (IS_ERR(data))
return PTR_ERR(data);
ret = raw_process_ep_io(dev, &io, data, false);
- if (ret)
+ if (ret < 0)
goto free;
length = min(io.length, (unsigned int)ret);
if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
ret = -EFAULT;
+ else
+ ret = length;
free:
kfree(data);
return ret;
@@ -1023,6 +1129,71 @@ out_unlock:
return ret;
}
+static void fill_ep_caps(struct usb_ep_caps *caps,
+ struct usb_raw_ep_caps *raw_caps)
+{
+ raw_caps->type_control = caps->type_control;
+ raw_caps->type_iso = caps->type_iso;
+ raw_caps->type_bulk = caps->type_bulk;
+ raw_caps->type_int = caps->type_int;
+ raw_caps->dir_in = caps->dir_in;
+ raw_caps->dir_out = caps->dir_out;
+}
+
+static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)
+{
+ limits->maxpacket_limit = ep->maxpacket_limit;
+ limits->max_streams = ep->max_streams;
+}
+
+static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0, i;
+ unsigned long flags;
+ struct usb_raw_eps_info *info;
+ struct raw_ep *ep;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ goto out_free;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ goto out_free;
+ }
+
+ memset(info, 0, sizeof(*info));
+ for (i = 0; i < dev->eps_num; i++) {
+ ep = &dev->eps[i];
+ strscpy(&info->eps[i].name[0], ep->ep->name,
+ USB_RAW_EP_NAME_MAX);
+ info->eps[i].addr = ep->addr;
+ fill_ep_caps(&ep->ep->caps, &info->eps[i].caps);
+ fill_ep_limits(ep->ep, &info->eps[i].limits);
+ }
+ ret = dev->eps_num;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (copy_to_user((void __user *)value, info, sizeof(*info)))
+ ret = -EFAULT;
+
+out_free:
+ kfree(info);
+out:
+ return ret;
+}
+
static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
{
struct raw_dev *dev = fd->private_data;
@@ -1065,6 +1236,24 @@ static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
case USB_RAW_IOCTL_VBUS_DRAW:
ret = raw_ioctl_vbus_draw(dev, value);
break;
+ case USB_RAW_IOCTL_EPS_INFO:
+ ret = raw_ioctl_eps_info(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP0_STALL:
+ ret = raw_ioctl_ep0_stall(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP_SET_HALT:
+ ret = raw_ioctl_ep_set_clear_halt_wedge(
+ dev, value, true, true);
+ break;
+ case USB_RAW_IOCTL_EP_CLEAR_HALT:
+ ret = raw_ioctl_ep_set_clear_halt_wedge(
+ dev, value, false, true);
+ break;
+ case USB_RAW_IOCTL_EP_SET_WEDGE:
+ ret = raw_ioctl_ep_set_clear_halt_wedge(
+ dev, value, true, false);
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
index f8d35dd60c34..cdf96911e4b1 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
@@ -134,11 +134,15 @@ static irqreturn_t ast_vhub_irq(int irq, void *data)
}
/* Handle device interrupts */
- for (i = 0; i < vhub->max_ports; i++) {
- u32 dev_mask = VHUB_IRQ_DEVICE1 << i;
+ if (istat & vhub->port_irq_mask) {
+ unsigned long bitmap = istat;
+ int offset = VHUB_IRQ_DEV1_BIT;
+ int size = VHUB_IRQ_DEV1_BIT + vhub->max_ports;
- if (istat & dev_mask)
+ for_each_set_bit_from(offset, &bitmap, size) {
+ i = offset - VHUB_IRQ_DEV1_BIT;
ast_vhub_dev_irq(&vhub->ports[i].dev);
+ }
}
/* Handle top-level vHub EP0 interrupts */
@@ -332,6 +336,8 @@ static int ast_vhub_probe(struct platform_device *pdev)
spin_lock_init(&vhub->lock);
vhub->pdev = pdev;
+ vhub->port_irq_mask = GENMASK(VHUB_IRQ_DEV1_BIT + vhub->max_ports - 1,
+ VHUB_IRQ_DEV1_BIT);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
vhub->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -402,7 +408,9 @@ static int ast_vhub_probe(struct platform_device *pdev)
goto err;
/* Init hub emulation */
- ast_vhub_init_hub(vhub);
+ rc = ast_vhub_init_hub(vhub);
+ if (rc)
+ goto err;
/* Initialize HW */
ast_vhub_init_hw(vhub);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index 6e565c3dbb5b..6497185ec4e7 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -50,6 +50,7 @@
#define KERNEL_VER bin2bcd(((LINUX_VERSION_CODE >> 8) & 0x0ff))
enum {
+ AST_VHUB_STR_INDEX_MAX = 4,
AST_VHUB_STR_MANUF = 3,
AST_VHUB_STR_PRODUCT = 2,
AST_VHUB_STR_SERIAL = 1,
@@ -72,13 +73,6 @@ static const struct usb_device_descriptor ast_vhub_dev_desc = {
.bNumConfigurations = 1,
};
-/* Patches to the above when forcing USB1 mode */
-static void ast_vhub_patch_dev_desc_usb1(struct usb_device_descriptor *desc)
-{
- desc->bcdUSB = cpu_to_le16(0x0100);
- desc->bDeviceProtocol = 0;
-}
-
/*
* Configuration descriptor: same comments as above
* regarding handling USB1 mode.
@@ -302,31 +296,81 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
if (len > dsize)
len = dsize;
- /* Patch it if forcing USB1 */
- if (desc_type == USB_DT_DEVICE && ep->vhub->force_usb1)
- ast_vhub_patch_dev_desc_usb1(ep->buf);
-
/* Shoot it from the EP buffer */
return ast_vhub_reply(ep, NULL, len);
}
+static struct usb_gadget_strings*
+ast_vhub_str_of_container(struct usb_gadget_string_container *container)
+{
+ return (struct usb_gadget_strings *)container->stash;
+}
+
+static int ast_vhub_collect_languages(struct ast_vhub *vhub, void *buf,
+ size_t size)
+{
+ int rc, hdr_len, nlangs, max_langs;
+ struct usb_gadget_strings *lang_str;
+ struct usb_gadget_string_container *container;
+ struct usb_string_descriptor *sdesc = buf;
+
+ nlangs = 0;
+ hdr_len = sizeof(struct usb_descriptor_header);
+ max_langs = (size - hdr_len) / sizeof(sdesc->wData[0]);
+ list_for_each_entry(container, &vhub->vhub_str_desc, list) {
+ if (nlangs >= max_langs)
+ break;
+
+ lang_str = ast_vhub_str_of_container(container);
+ sdesc->wData[nlangs++] = cpu_to_le16(lang_str->language);
+ }
+
+ rc = hdr_len + nlangs * sizeof(sdesc->wData[0]);
+ sdesc->bLength = rc;
+ sdesc->bDescriptorType = USB_DT_STRING;
+
+ return rc;
+}
+
+static struct usb_gadget_strings *ast_vhub_lookup_string(struct ast_vhub *vhub,
+ u16 lang_id)
+{
+ struct usb_gadget_strings *lang_str;
+ struct usb_gadget_string_container *container;
+
+ list_for_each_entry(container, &vhub->vhub_str_desc, list) {
+ lang_str = ast_vhub_str_of_container(container);
+ if (lang_str->language == lang_id)
+ return lang_str;
+ }
+
+ return NULL;
+}
+
static int ast_vhub_rep_string(struct ast_vhub_ep *ep,
u8 string_id, u16 lang_id,
u16 len)
{
- int rc = usb_gadget_get_string(&ep->vhub->vhub_str_desc,
- string_id, ep->buf);
+ int rc;
+ u8 buf[256];
+ struct ast_vhub *vhub = ep->vhub;
+ struct usb_gadget_strings *lang_str;
- /*
- * This should never happen unless we put too big strings in
- * the array above
- */
- BUG_ON(rc >= AST_VHUB_EP0_MAX_PACKET);
+ if (string_id == 0) {
+ rc = ast_vhub_collect_languages(vhub, buf, sizeof(buf));
+ } else {
+ lang_str = ast_vhub_lookup_string(vhub, lang_id);
+ if (!lang_str)
+ return std_req_stall;
- if (rc < 0)
+ rc = usb_gadget_get_string(lang_str, string_id, buf);
+ }
+
+ if (rc < 0 || rc >= AST_VHUB_EP0_MAX_PACKET)
return std_req_stall;
/* Shoot it from the EP buffer */
+ memcpy(ep->buf, buf, rc);
return ast_vhub_reply(ep, NULL, min_t(u16, rc, len));
}
@@ -832,11 +876,148 @@ void ast_vhub_hub_reset(struct ast_vhub *vhub)
writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG);
}
-static void ast_vhub_init_desc(struct ast_vhub *vhub)
+static void ast_vhub_of_parse_dev_desc(struct ast_vhub *vhub,
+ const struct device_node *vhub_np)
+{
+ u16 id;
+ u32 data;
+
+ if (!of_property_read_u32(vhub_np, "vhub-vendor-id", &data)) {
+ id = (u16)data;
+ vhub->vhub_dev_desc.idVendor = cpu_to_le16(id);
+ }
+ if (!of_property_read_u32(vhub_np, "vhub-product-id", &data)) {
+ id = (u16)data;
+ vhub->vhub_dev_desc.idProduct = cpu_to_le16(id);
+ }
+ if (!of_property_read_u32(vhub_np, "vhub-device-revision", &data)) {
+ id = (u16)data;
+ vhub->vhub_dev_desc.bcdDevice = cpu_to_le16(id);
+ }
+}
+
+static void ast_vhub_fixup_usb1_dev_desc(struct ast_vhub *vhub)
+{
+ vhub->vhub_dev_desc.bcdUSB = cpu_to_le16(0x0100);
+ vhub->vhub_dev_desc.bDeviceProtocol = 0;
+}
+
+static struct usb_gadget_string_container*
+ast_vhub_str_container_alloc(struct ast_vhub *vhub)
+{
+ unsigned int size;
+ struct usb_string *str_array;
+ struct usb_gadget_strings *lang_str;
+ struct usb_gadget_string_container *container;
+
+ size = sizeof(*container);
+ size += sizeof(struct usb_gadget_strings);
+ size += sizeof(struct usb_string) * AST_VHUB_STR_INDEX_MAX;
+ container = devm_kzalloc(&vhub->pdev->dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ lang_str = ast_vhub_str_of_container(container);
+ str_array = (struct usb_string *)(lang_str + 1);
+ lang_str->strings = str_array;
+ return container;
+}
+
+static void ast_vhub_str_deep_copy(struct usb_gadget_strings *dest,
+ const struct usb_gadget_strings *src)
{
+ struct usb_string *src_array = src->strings;
+ struct usb_string *dest_array = dest->strings;
+
+ dest->language = src->language;
+ if (src_array && dest_array) {
+ do {
+ *dest_array = *src_array;
+ dest_array++;
+ src_array++;
+ } while (src_array->s);
+ }
+}
+
+static int ast_vhub_str_alloc_add(struct ast_vhub *vhub,
+ const struct usb_gadget_strings *src_str)
+{
+ struct usb_gadget_strings *dest_str;
+ struct usb_gadget_string_container *container;
+
+ container = ast_vhub_str_container_alloc(vhub);
+ if (IS_ERR(container))
+ return PTR_ERR(container);
+
+ dest_str = ast_vhub_str_of_container(container);
+ ast_vhub_str_deep_copy(dest_str, src_str);
+ list_add_tail(&container->list, &vhub->vhub_str_desc);
+
+ return 0;
+}
+
+static const struct {
+ const char *name;
+ u8 id;
+} str_id_map[] = {
+ {"manufacturer", AST_VHUB_STR_MANUF},
+ {"product", AST_VHUB_STR_PRODUCT},
+ {"serial-number", AST_VHUB_STR_SERIAL},
+ {},
+};
+
+static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
+ const struct device_node *desc_np)
+{
+ u32 langid;
+ int ret = 0;
+ int i, offset;
+ const char *str;
+ struct device_node *child;
+ struct usb_string str_array[AST_VHUB_STR_INDEX_MAX];
+ struct usb_gadget_strings lang_str = {
+ .strings = (struct usb_string *)str_array,
+ };
+
+ for_each_child_of_node(desc_np, child) {
+ if (of_property_read_u32(child, "reg", &langid))
+ continue; /* no language identifier specified */
+
+ if (!usb_validate_langid(langid))
+ continue; /* invalid language identifier */
+
+ lang_str.language = langid;
+ for (i = offset = 0; str_id_map[i].name; i++) {
+ str = of_get_property(child, str_id_map[i].name, NULL);
+ if (str) {
+ str_array[offset].s = str;
+ str_array[offset].id = str_id_map[i].id;
+ offset++;
+ }
+ }
+ str_array[offset].id = 0;
+ str_array[offset].s = NULL;
+
+ ret = ast_vhub_str_alloc_add(vhub, &lang_str);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int ast_vhub_init_desc(struct ast_vhub *vhub)
+{
+ int ret;
+ struct device_node *desc_np;
+ const struct device_node *vhub_np = vhub->pdev->dev.of_node;
+
/* Initialize vhub Device Descriptor. */
memcpy(&vhub->vhub_dev_desc, &ast_vhub_dev_desc,
sizeof(vhub->vhub_dev_desc));
+ ast_vhub_of_parse_dev_desc(vhub, vhub_np);
+ if (vhub->force_usb1)
+ ast_vhub_fixup_usb1_dev_desc(vhub);
/* Initialize vhub Configuration Descriptor. */
memcpy(&vhub->vhub_conf_desc, &ast_vhub_conf_desc,
@@ -848,15 +1029,20 @@ static void ast_vhub_init_desc(struct ast_vhub *vhub)
vhub->vhub_hub_desc.bNbrPorts = vhub->max_ports;
/* Initialize vhub String Descriptors. */
- memcpy(&vhub->vhub_str_desc, &ast_vhub_strings,
- sizeof(vhub->vhub_str_desc));
+ INIT_LIST_HEAD(&vhub->vhub_str_desc);
+ desc_np = of_get_child_by_name(vhub_np, "vhub-strings");
+ if (desc_np)
+ ret = ast_vhub_of_parse_str_desc(vhub, desc_np);
+ else
+ ret = ast_vhub_str_alloc_add(vhub, &ast_vhub_strings);
+
+ return ret;
}
-void ast_vhub_init_hub(struct ast_vhub *vhub)
+int ast_vhub_init_hub(struct ast_vhub *vhub)
{
vhub->speed = USB_SPEED_UNKNOWN;
INIT_WORK(&vhub->wake_work, ast_vhub_wake_work);
- ast_vhub_init_desc(vhub);
+ return ast_vhub_init_desc(vhub);
}
-
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index fac79ef6d669..2e5a1ef14a75 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -51,14 +51,11 @@
#define VHUB_CTRL_UPSTREAM_CONNECT (1 << 0)
/* IER & ISR */
+#define VHUB_IRQ_DEV1_BIT 9
#define VHUB_IRQ_USB_CMD_DEADLOCK (1 << 18)
#define VHUB_IRQ_EP_POOL_NAK (1 << 17)
#define VHUB_IRQ_EP_POOL_ACK_STALL (1 << 16)
-#define VHUB_IRQ_DEVICE5 (1 << 13)
-#define VHUB_IRQ_DEVICE4 (1 << 12)
-#define VHUB_IRQ_DEVICE3 (1 << 11)
-#define VHUB_IRQ_DEVICE2 (1 << 10)
-#define VHUB_IRQ_DEVICE1 (1 << 9)
+#define VHUB_IRQ_DEVICE1 (1 << (VHUB_IRQ_DEV1_BIT))
#define VHUB_IRQ_BUS_RESUME (1 << 8)
#define VHUB_IRQ_BUS_SUSPEND (1 << 7)
#define VHUB_IRQ_BUS_RESET (1 << 6)
@@ -402,6 +399,7 @@ struct ast_vhub {
/* Per-port info */
struct ast_vhub_port *ports;
u32 max_ports;
+ u32 port_irq_mask;
/* Generic EP data structures */
struct ast_vhub_ep *epns;
@@ -423,7 +421,7 @@ struct ast_vhub {
struct usb_device_descriptor vhub_dev_desc;
struct ast_vhub_full_cdesc vhub_conf_desc;
struct usb_hub_descriptor vhub_hub_desc;
- struct usb_gadget_strings vhub_str_desc;
+ struct list_head vhub_str_desc;
};
/* Standard request handlers result codes */
@@ -533,7 +531,7 @@ int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...);
__VA_ARGS__)
/* hub.c */
-void ast_vhub_init_hub(struct ast_vhub *vhub);
+int ast_vhub_init_hub(struct ast_vhub *vhub);
enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq);
enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep,
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 22200341c8ec..d69f61ff0181 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -185,7 +185,7 @@ static int regs_dbg_release(struct inode *inode, struct file *file)
return 0;
}
-const struct file_operations queue_dbg_fops = {
+static const struct file_operations queue_dbg_fops = {
.owner = THIS_MODULE,
.open = queue_dbg_open,
.llseek = no_llseek,
@@ -193,7 +193,7 @@ const struct file_operations queue_dbg_fops = {
.release = queue_dbg_release,
};
-const struct file_operations regs_dbg_fops = {
+static const struct file_operations regs_dbg_fops = {
.owner = THIS_MODULE,
.open = regs_dbg_open,
.llseek = generic_file_llseek,
@@ -2043,10 +2043,56 @@ static const struct usba_udc_errata at91sam9g45_errata = {
.pulse_bias = at91sam9g45_pulse_bias,
};
+static const struct usba_ep_config ep_config_sam9[] __initconst = {
+ { .nr_banks = 1 }, /* ep 0 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
+ { .nr_banks = 3, .can_dma = 1 }, /* ep 3 */
+ { .nr_banks = 3, .can_dma = 1 }, /* ep 4 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */
+};
+
+static const struct usba_ep_config ep_config_sama5[] __initconst = {
+ { .nr_banks = 1 }, /* ep 0 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 3 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 4 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 7 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 8 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 9 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 10 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 11 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 12 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 13 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 14 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 15 */
+};
+
+static const struct usba_udc_config udc_at91sam9rl_cfg = {
+ .errata = &at91sam9rl_errata,
+ .config = ep_config_sam9,
+ .num_ep = ARRAY_SIZE(ep_config_sam9),
+};
+
+static const struct usba_udc_config udc_at91sam9g45_cfg = {
+ .errata = &at91sam9g45_errata,
+ .config = ep_config_sam9,
+ .num_ep = ARRAY_SIZE(ep_config_sam9),
+};
+
+static const struct usba_udc_config udc_sama5d3_cfg = {
+ .config = ep_config_sama5,
+ .num_ep = ARRAY_SIZE(ep_config_sama5),
+};
+
static const struct of_device_id atmel_udc_dt_ids[] = {
- { .compatible = "atmel,at91sam9rl-udc", .data = &at91sam9rl_errata },
- { .compatible = "atmel,at91sam9g45-udc", .data = &at91sam9g45_errata },
- { .compatible = "atmel,sama5d3-udc" },
+ { .compatible = "atmel,at91sam9rl-udc", .data = &udc_at91sam9rl_cfg },
+ { .compatible = "atmel,at91sam9g45-udc", .data = &udc_at91sam9g45_cfg },
+ { .compatible = "atmel,sama5d3-udc", .data = &udc_sama5d3_cfg },
{ /* sentinel */ }
};
@@ -2055,18 +2101,19 @@ MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
- u32 val;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct device_node *pp;
int i, ret;
struct usba_ep *eps, *ep;
+ const struct usba_udc_config *udc_config;
match = of_match_node(atmel_udc_dt_ids, np);
if (!match)
return ERR_PTR(-EINVAL);
- udc->errata = match->data;
+ udc_config = match->data;
+ udc->errata = udc_config->errata;
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
if (IS_ERR(udc->pmc))
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
@@ -2082,8 +2129,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
if (fifo_mode == 0) {
pp = NULL;
- while ((pp = of_get_next_child(np, pp)))
- udc->num_ep++;
+ udc->num_ep = udc_config->num_ep;
udc->configured_ep = 1;
} else {
udc->num_ep = usba_config_fifo_table(udc);
@@ -2100,52 +2146,38 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
pp = NULL;
i = 0;
- while ((pp = of_get_next_child(np, pp)) && i < udc->num_ep) {
+ while (i < udc->num_ep) {
+ const struct usba_ep_config *ep_cfg = &udc_config->config[i];
+
ep = &eps[i];
- ret = of_property_read_u32(pp, "reg", &val);
- if (ret) {
- dev_err(&pdev->dev, "of_probe: reg error(%d)\n", ret);
- goto err;
- }
- ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : val;
+ ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : i;
+
+ /* Only the first EP is 64 bytes */
+ if (ep->index == 0)
+ ep->fifo_size = 64;
+ else
+ ep->fifo_size = 1024;
- ret = of_property_read_u32(pp, "atmel,fifo-size", &val);
- if (ret) {
- dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret);
- goto err;
- }
if (fifo_mode) {
- if (val < udc->fifo_cfg[i].fifo_size) {
+ if (ep->fifo_size < udc->fifo_cfg[i].fifo_size)
dev_warn(&pdev->dev,
- "Using max fifo-size value from DT\n");
- ep->fifo_size = val;
- } else {
+ "Using default max fifo-size value\n");
+ else
ep->fifo_size = udc->fifo_cfg[i].fifo_size;
- }
- } else {
- ep->fifo_size = val;
}
- ret = of_property_read_u32(pp, "atmel,nb-banks", &val);
- if (ret) {
- dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret);
- goto err;
- }
+ ep->nr_banks = ep_cfg->nr_banks;
if (fifo_mode) {
- if (val < udc->fifo_cfg[i].nr_banks) {
+ if (ep->nr_banks < udc->fifo_cfg[i].nr_banks)
dev_warn(&pdev->dev,
- "Using max nb-banks value from DT\n");
- ep->nr_banks = val;
- } else {
+ "Using default max nb-banks value\n");
+ else
ep->nr_banks = udc->fifo_cfg[i].nr_banks;
- }
- } else {
- ep->nr_banks = val;
}
- ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
- ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
+ ep->can_dma = ep_cfg->can_dma;
+ ep->can_isoc = ep_cfg->can_isoc;
sprintf(ep->name, "ep%d", ep->index);
ep->ep.name = ep->name;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index a0225e4543d4..48e332439ed5 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -290,6 +290,12 @@ struct usba_ep {
#endif
};
+struct usba_ep_config {
+ u8 nr_banks;
+ unsigned int can_dma:1;
+ unsigned int can_isoc:1;
+};
+
struct usba_request {
struct usb_request req;
struct list_head queue;
@@ -307,6 +313,12 @@ struct usba_udc_errata {
void (*pulse_bias)(struct usba_udc *udc);
};
+struct usba_udc_config {
+ const struct usba_udc_errata *errata;
+ const struct usba_ep_config *config;
+ const int num_ep;
+};
+
struct usba_udc {
/* Protect hw registers from concurrent modifications */
spinlock_t lock;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 9b11046480fe..2e28dde8376f 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1297,6 +1297,8 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
usb_gadget_disconnect(udc->gadget);
+ if (udc->gadget->irq)
+ synchronize_irq(udc->gadget->irq);
udc->driver->unbind(udc->gadget);
usb_gadget_udc_stop(udc);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 6e3e3ebf715f..0eeaead5acea 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -187,31 +187,31 @@ static const struct {
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
/* and now some generic EPs so we have enough in multi config */
- EP_INFO("ep3out",
+ EP_INFO("ep-aout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep4in",
+ EP_INFO("ep-bin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep5out",
+ EP_INFO("ep-cout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep6out",
+ EP_INFO("ep-dout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep7in",
+ EP_INFO("ep-ein",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep8out",
+ EP_INFO("ep-fout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep9in",
+ EP_INFO("ep-gin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep10out",
+ EP_INFO("ep-hout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep11out",
+ EP_INFO("ep-iout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep12in",
+ EP_INFO("ep-jin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep13out",
+ EP_INFO("ep-kout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep14in",
+ EP_INFO("ep-lin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep15out",
+ EP_INFO("ep-mout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
#undef EP_INFO
@@ -427,6 +427,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
/* caller must hold lock */
static void set_link_state(struct dummy_hcd *dum_hcd)
+ __must_hold(&dum->lock)
{
struct dummy *dum = dum_hcd->dum;
unsigned int power_bit;
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index febabde62f71..b2638e83bb49 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2440,8 +2440,8 @@ static int fsl_udc_probe(struct platform_device *pdev)
udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2;
udc_controller->irq = platform_get_irq(pdev, 0);
- if (!udc_controller->irq) {
- ret = -ENODEV;
+ if (udc_controller->irq <= 0) {
+ ret = udc_controller->irq ? : -ENODEV;
goto err_iounmap;
}
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index aaf975c809bf..7164ad9800f1 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -48,7 +48,6 @@
#define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
static const char driver_name[] = DRIVER_NAME;
-static const char driver_desc[] = DRIVER_DESC;
#define gr_read32(x) (ioread32be((x)))
#define gr_write32(x, v) (iowrite32be((v), (x)))
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index cb997b82c008..465d0b7c6522 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1614,17 +1614,17 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
- struct lpc32xx_udc *udc = ep->udc;
+ struct lpc32xx_udc *udc;
u16 maxpacket;
u32 tmp;
unsigned long flags;
/* Verify EP data */
if ((!_ep) || (!ep) || (!desc) ||
- (desc->bDescriptorType != USB_DT_ENDPOINT)) {
- dev_dbg(udc->dev, "bad ep or descriptor\n");
+ (desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
- }
+
+ udc = ep->udc;
maxpacket = usb_endpoint_maxp(desc);
if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
@@ -1872,7 +1872,7 @@ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
- struct lpc32xx_udc *udc = ep->udc;
+ struct lpc32xx_udc *udc;
unsigned long flags;
if ((!ep) || (ep->hwep_num <= 1))
@@ -1882,6 +1882,7 @@ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
if (ep->is_in)
return -EAGAIN;
+ udc = ep->udc;
spin_lock_irqsave(&udc->lock, flags);
if (value == 1) {
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 75d16a8902e6..931e6362a13d 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1667,7 +1667,7 @@ static int m66592_probe(struct platform_device *pdev)
err_add_udc:
m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
-
+ m66592->ep0_req = NULL;
clean_up3:
if (m66592->pdata->on_chip) {
clk_disable(m66592->clk);
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 8fbc083b6732..23f33946d80c 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -901,7 +901,7 @@ loop:
}
set_current_state(TASK_RUNNING);
- dev_info(udc->dev, "SPI thread exiting");
+ dev_info(udc->dev, "SPI thread exiting\n");
return 0;
}
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 35e02a8d0091..5bb0568b934e 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1548,7 +1548,7 @@ static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
delegate = true;
/* delegate USB standard requests to the gadget driver */
- if (delegate == true) {
+ if (delegate) {
/* USB requests handled by gadget */
if (setup->wLength) {
/* DATA phase from gadget, STATUS phase from u3d */
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index a8273b589456..928057b206f1 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -54,7 +54,7 @@ static const char * const ep_name[] = {
*
* If use_dma is disabled, pio will be used instead.
*/
-static bool use_dma = 0;
+static bool use_dma = false;
module_param(use_dma, bool, 0644);
/*
@@ -2647,6 +2647,8 @@ net2272_plat_probe(struct platform_device *pdev)
err_req:
release_mem_region(base, len);
err:
+ kfree(dev);
+
return ret;
}
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index bf87c6c0d7f6..4139da885651 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2576,7 +2576,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
case USB_ENDPOINT_XFER_INT:
ep->ep.caps.type_int = true;
break;
- };
+ }
if (addr & USB_DIR_IN)
ep->ep.caps.dir_in = true;
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index 0507a2ca0f55..80002d97b59d 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -251,10 +251,6 @@ static void s3c2410_udc_done(struct s3c2410_ep *ep,
static void s3c2410_udc_nuke(struct s3c2410_udc *udc,
struct s3c2410_ep *ep, int status)
{
- /* Sanity check */
- if (&ep->queue == NULL)
- return;
-
while (!list_empty(&ep->queue)) {
struct s3c2410_request *req;
req = list_entry(ep->queue.next, struct s3c2410_request,
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 52a6add961f4..bbe1a04686da 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -158,6 +158,30 @@
#define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
#define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
+#define SSPX_CORE_CNT56 0x6fc
+#define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(x) ((x) & \
+ SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK)
+#define SSPX_CORE_CNT57 0x700
+#define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(x) ((x) & \
+ SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK)
+#define SSPX_CORE_CNT65 0x720
+#define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(x) ((x) & \
+ SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK)
+#define SSPX_CORE_CNT66 0x724
+#define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(x) ((x) & \
+ SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK)
+#define SSPX_CORE_CNT67 0x728
+#define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(x) ((x) & \
+ SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK)
+#define SSPX_CORE_CNT72 0x73c
+#define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(x) ((x) & \
+ SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK)
#define SSPX_CORE_PADCTL4 0x750
#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
@@ -492,6 +516,7 @@ struct tegra_xudc {
bool powergated;
struct usb_phy **usbphy;
+ struct usb_phy *curr_usbphy;
struct notifier_block vbus_nb;
struct completion disconnect_complete;
@@ -530,6 +555,7 @@ struct tegra_xudc_soc {
bool invalid_seq_num;
bool pls_quirk;
bool port_reset_quirk;
+ bool port_speed_quirk;
bool has_ipfs;
};
@@ -599,6 +625,78 @@ static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
trb->control);
}
+static void tegra_xudc_limit_port_speed(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ /* limit port speed to gen 1 */
+ val = xudc_readl(xudc, SSPX_CORE_CNT56);
+ val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x260);
+ xudc_writel(xudc, val, SSPX_CORE_CNT56);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT57);
+ val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x6D6);
+ xudc_writel(xudc, val, SSPX_CORE_CNT57);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT65);
+ val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0x4B0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT66);
+ val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x4B0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT67);
+ val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x4B0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT67);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT72);
+ val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
+ val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x10);
+ xudc_writel(xudc, val, SSPX_CORE_CNT72);
+}
+
+static void tegra_xudc_restore_port_speed(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ /* restore port speed to gen2 */
+ val = xudc_readl(xudc, SSPX_CORE_CNT56);
+ val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x438);
+ xudc_writel(xudc, val, SSPX_CORE_CNT56);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT57);
+ val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x528);
+ xudc_writel(xudc, val, SSPX_CORE_CNT57);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT65);
+ val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0xE10);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT66);
+ val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x348);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT67);
+ val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x5a0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT67);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT72);
+ val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
+ val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x1c21);
+ xudc_writel(xudc, val, SSPX_CORE_CNT72);
+}
+
static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
{
int err;
@@ -631,6 +729,9 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
reinit_completion(&xudc->disconnect_complete);
+ if (xudc->soc->port_speed_quirk)
+ tegra_xudc_restore_port_speed(xudc);
+
phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
@@ -719,6 +820,7 @@ static int tegra_xudc_vbus_notify(struct notifier_block *nb,
if (!xudc->suspended && phy_index != -1) {
xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
+ xudc->curr_usbphy = usbphy;
schedule_work(&xudc->usb_role_sw_work);
}
@@ -2042,6 +2144,20 @@ static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
return 0;
}
+static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
+ unsigned int m_a)
+{
+ int ret = 0;
+ struct tegra_xudc *xudc = to_xudc(gadget);
+
+ dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
+
+ if (xudc->curr_usbphy->chg_type == SDP_TYPE)
+ ret = usb_phy_set_power(xudc->curr_usbphy, m_a);
+
+ return ret;
+}
+
static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
{
struct tegra_xudc *xudc = to_xudc(gadget);
@@ -2058,6 +2174,7 @@ static struct usb_gadget_ops tegra_xudc_gadget_ops = {
.pullup = tegra_xudc_gadget_pullup,
.udc_start = tegra_xudc_gadget_start,
.udc_stop = tegra_xudc_gadget_stop,
+ .vbus_draw = tegra_xudc_gadget_vbus_draw,
.set_selfpowered = tegra_xudc_set_selfpowered,
};
@@ -3274,6 +3391,9 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
xudc_writel(xudc, val, BLCG);
}
+ if (xudc->soc->port_speed_quirk)
+ tegra_xudc_limit_port_speed(xudc);
+
/* Set a reasonable U3 exit timer value. */
val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
@@ -3506,6 +3626,7 @@ static struct tegra_xudc_soc tegra210_xudc_soc_data = {
.invalid_seq_num = true,
.pls_quirk = true,
.port_reset_quirk = true,
+ .port_speed_quirk = false,
.has_ipfs = true,
};
@@ -3519,6 +3640,21 @@ static struct tegra_xudc_soc tegra186_xudc_soc_data = {
.invalid_seq_num = false,
.pls_quirk = false,
.port_reset_quirk = false,
+ .port_speed_quirk = false,
+ .has_ipfs = false,
+};
+
+static struct tegra_xudc_soc tegra194_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .num_phys = 4,
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = true,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .port_speed_quirk = true,
.has_ipfs = false,
};
@@ -3531,6 +3667,10 @@ static const struct of_device_id tegra_xudc_of_match[] = {
.compatible = "nvidia,tegra186-xudc",
.data = &tegra186_xudc_soc_data
},
+ {
+ .compatible = "nvidia,tegra194-xudc",
+ .data = &tegra194_xudc_soc_data
+ },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
@@ -3840,11 +3980,11 @@ static int __maybe_unused tegra_xudc_suspend(struct device *dev)
flush_work(&xudc->usb_role_sw_work);
- /* Forcibly disconnect before powergating. */
- tegra_xudc_device_mode_off(xudc);
-
- if (!pm_runtime_status_suspended(dev))
+ if (!pm_runtime_status_suspended(dev)) {
+ /* Forcibly disconnect before powergating. */
+ tegra_xudc_device_mode_off(xudc);
tegra_xudc_powergate(xudc);
+ }
pm_runtime_disable(dev);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index b1cfc8279c3d..709553bdb233 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1732,6 +1732,7 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
* Process setup packet and delegate to gadget layer.
*/
static void xudc_handle_setup(struct xusb_udc *udc)
+ __must_hold(&udc->lock)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct usb_ctrlrequest setup;
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 7c24d1ce1088..58a4d3325090 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -65,3 +65,27 @@ usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf)
return buf [0];
}
EXPORT_SYMBOL_GPL(usb_gadget_get_string);
+
+/**
+ * usb_validate_langid - validate usb language identifiers
+ * @lang: usb language identifier
+ *
+ * Returns true for valid language identifier, otherwise false.
+ */
+bool usb_validate_langid(u16 langid)
+{
+ u16 primary_lang = langid & 0x3ff; /* bit [9:0] */
+ u16 sub_lang = langid >> 10; /* bit [15:10] */
+
+ switch (primary_lang) {
+ case 0:
+ case 0x62 ... 0xfe:
+ case 0x100 ... 0x3ff:
+ return false;
+ }
+ if (!sub_lang)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(usb_validate_langid);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 55bdfdf11e4c..62c348062e48 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -40,8 +40,17 @@ config USB_XHCI_DBGCAP
config USB_XHCI_PCI
tristate
depends on USB_PCI
+ depends on USB_XHCI_PCI_RENESAS || !USB_XHCI_PCI_RENESAS
default y
+config USB_XHCI_PCI_RENESAS
+ tristate "Support for additional Renesas xHCI controller with firwmare"
+ ---help---
+ Say 'Y' to enable the support for the Renesas xHCI controller with
+ firwmare. Make sure you have the firwmare for the device and
+ installed on your system for this device to work.
+ If unsure, say 'N'.
+
config USB_XHCI_PLATFORM
tristate "Generic xHCI driver for a platform device"
select USB_XHCI_RCAR if ARCH_RENESAS
@@ -97,6 +106,26 @@ config USB_XHCI_TEGRA
endif # USB_XHCI_HCD
+config USB_EHCI_BRCMSTB
+ tristate
+
+config USB_BRCMSTB
+ tristate "Broadcom STB USB support"
+ depends on (ARCH_BRCMSTB && PHY_BRCM_USB) || COMPILE_TEST
+ select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
+ select USB_EHCI_BRCMSTB if USB_EHCI_HCD
+ select USB_XHCI_PLATFORM if USB_XHCI_HCD
+ help
+ Enables support for XHCI, EHCI and OHCI host controllers
+ found in Broadcom STB SoC's.
+
+ To compile these drivers as modules, choose M here: the
+ modules will be called ohci-platform.ko, ehci-brcm.ko and
+ xhci-plat-hcd.ko
+
+ Disabling this will keep the controllers and corresponding
+ PHYs powered down.
+
config USB_EHCI_HCD
tristate "EHCI HCD (USB 2.0) support"
depends on HAS_DMA && HAS_IOMEM
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index b191361257cc..bc731332fed9 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_USB_EHCI_HCD_STI) += ehci-st.o
obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
+obj-$(CONFIG_USB_EHCI_BRCMSTB) += ehci-brcm.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
@@ -71,6 +72,7 @@ obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
+obj-$(CONFIG_USB_XHCI_PCI_RENESAS) += xhci-pci-renesas.o
obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
obj-$(CONFIG_USB_XHCI_HISTB) += xhci-histb.o
obj-$(CONFIG_USB_XHCI_MTK) += xhci-mtk.o
diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c
new file mode 100644
index 000000000000..3e0ebe8cc649
--- /dev/null
+++ b/drivers/usb/host/ehci-brcm.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Broadcom */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/iopoll.h>
+
+#include "ehci.h"
+
+#define hcd_to_ehci_priv(h) ((struct brcm_priv *)hcd_to_ehci(h)->priv)
+
+struct brcm_priv {
+ struct clk *clk;
+};
+
+/*
+ * ehci_brcm_wait_for_sof
+ * Wait for start of next microframe, then wait extra delay microseconds
+ */
+static inline void ehci_brcm_wait_for_sof(struct ehci_hcd *ehci, u32 delay)
+{
+ u32 frame_idx = ehci_readl(ehci, &ehci->regs->frame_index);
+ u32 val;
+ int res;
+
+ /* Wait for next microframe (every 125 usecs) */
+ res = readl_relaxed_poll_timeout(&ehci->regs->frame_index, val,
+ val != frame_idx, 1, 130);
+ if (res)
+ ehci_err(ehci, "Error waiting for SOF\n");
+ udelay(delay);
+}
+
+/*
+ * ehci_brcm_hub_control
+ * The EHCI controller has a bug where it can violate the SOF
+ * interval between the first two SOF's transmitted after resume
+ * if the resume occurs near the end of the microframe. This causees
+ * the controller to detect babble on the suspended port and
+ * will eventually cause the controller to reset the port.
+ * The fix is to Intercept the echi-hcd request to complete RESUME and
+ * align it to the start of the next microframe.
+ * See SWLINUX-1909 for more details
+ */
+static int ehci_brcm_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int ports = HCS_N_PORTS(ehci->hcs_params);
+ u32 __iomem *status_reg;
+ unsigned long flags;
+ int retval, irq_disabled = 0;
+
+ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+ /*
+ * RESUME is cleared when GetPortStatus() is called 20ms after start
+ * of RESUME
+ */
+ if ((typeReq == GetPortStatus) &&
+ (wIndex && wIndex <= ports) &&
+ ehci->reset_done[wIndex-1] &&
+ time_after_eq(jiffies, ehci->reset_done[wIndex-1]) &&
+ (ehci_readl(ehci, status_reg) & PORT_RESUME)) {
+
+ /*
+ * to make sure we are not interrupted until RESUME bit
+ * is cleared, disable interrupts on current CPU
+ */
+ ehci_dbg(ehci, "SOF alignment workaround\n");
+ irq_disabled = 1;
+ local_irq_save(flags);
+ ehci_brcm_wait_for_sof(ehci, 5);
+ }
+ retval = ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+ if (irq_disabled)
+ local_irq_restore(flags);
+ return retval;
+}
+
+static int ehci_brcm_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int len;
+
+ ehci->big_endian_mmio = 1;
+
+ ehci->caps = (void __iomem *)hcd->regs;
+ len = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+ ehci->regs = (void __iomem *)(hcd->regs + len);
+
+ /* This fixes the lockup during reboot due to prior interrupts */
+ ehci_writel(ehci, CMD_RESET, &ehci->regs->command);
+ mdelay(10);
+
+ /*
+ * SWLINUX-1705: Avoid OUT packet underflows during high memory
+ * bus usage
+ * port_status[0x0f] = Broadcom-proprietary USB_EHCI_INSNREG00 @ 0x90
+ */
+ ehci_writel(ehci, 0x00800040, &ehci->regs->port_status[0x10]);
+ ehci_writel(ehci, 0x00000001, &ehci->regs->port_status[0x12]);
+
+ return ehci_setup(hcd);
+}
+
+static struct hc_driver __read_mostly ehci_brcm_hc_driver;
+
+static const struct ehci_driver_overrides brcm_overrides __initconst = {
+ .reset = ehci_brcm_reset,
+ .extra_priv_size = sizeof(struct brcm_priv),
+};
+
+static int ehci_brcm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res_mem;
+ struct brcm_priv *priv;
+ struct usb_hcd *hcd;
+ int irq;
+ int err;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq ? irq : -EINVAL;
+
+ /* Hook the hub control routine to work around a bug */
+ ehci_brcm_hc_driver.hub_control = ehci_brcm_hub_control;
+
+ /* initialize hcd */
+ hcd = usb_create_hcd(&ehci_brcm_hc_driver, dev, dev_name(dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hcd);
+ priv = hcd_to_ehci_priv(hcd);
+
+ priv->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ err = PTR_ERR(priv->clk);
+ goto err_hcd;
+ }
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ goto err_hcd;
+
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto err_clk;
+ }
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err)
+ goto err_clk;
+
+ device_wakeup_enable(hcd->self.controller);
+ device_enable_async_suspend(hcd->self.controller);
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(priv->clk);
+err_hcd:
+ usb_put_hcd(hcd);
+
+ return err;
+}
+
+static int ehci_brcm_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
+
+ usb_remove_hcd(hcd);
+ clk_disable_unprepare(priv->clk);
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+static int __maybe_unused ehci_brcm_suspend(struct device *dev)
+{
+ int ret;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
+ bool do_wakeup = device_may_wakeup(dev);
+
+ ret = ehci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int __maybe_unused ehci_brcm_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+ /*
+ * SWLINUX-1705: Avoid OUT packet underflows during high memory
+ * bus usage
+ * port_status[0x0f] = Broadcom-proprietary USB_EHCI_INSNREG00
+ * @ 0x90
+ */
+ ehci_writel(ehci, 0x00800040, &ehci->regs->port_status[0x10]);
+ ehci_writel(ehci, 0x00000001, &ehci->regs->port_status[0x12]);
+
+ ehci_resume(hcd, false);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ehci_brcm_pm_ops, ehci_brcm_suspend,
+ ehci_brcm_resume);
+
+static const struct of_device_id brcm_ehci_of_match[] = {
+ { .compatible = "brcm,ehci-brcm-v2", },
+ { .compatible = "brcm,bcm7445-ehci", },
+ {}
+};
+
+static struct platform_driver ehci_brcm_driver = {
+ .probe = ehci_brcm_probe,
+ .remove = ehci_brcm_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "ehci-brcm",
+ .pm = &ehci_brcm_pm_ops,
+ .of_match_table = brcm_ehci_of_match,
+ }
+};
+
+static int __init ehci_brcm_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ ehci_init_driver(&ehci_brcm_hc_driver, &brcm_overrides);
+ return platform_driver_register(&ehci_brcm_driver);
+}
+module_init(ehci_brcm_init);
+
+static void __exit ehci_brcm_exit(void)
+{
+ platform_driver_unregister(&ehci_brcm_driver);
+}
+module_exit(ehci_brcm_exit);
+
+MODULE_ALIAS("platform:ehci-brcm");
+MODULE_DESCRIPTION("EHCI Broadcom STB driver");
+MODULE_AUTHOR("Al Cooper");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 9d18c6e6ab27..c95341d472f4 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (C) 2005-2010,2012 Freescale Semiconductor, Inc.
* Copyright (c) 2005 MontaVista Software
*/
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 1300c457d9ed..cffdc8d01b2a 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -108,7 +108,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
struct ehci_hcd *ehci;
struct ehci_hcd_mv *ehci_mv;
struct resource *r;
- int retval = -ENODEV;
+ int retval;
u32 offset;
u32 status;
@@ -143,8 +143,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
-
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ehci_mv->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(ehci_mv->base)) {
@@ -168,12 +166,10 @@ static int mv_ehci_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(r);
hcd->regs = ehci_mv->op_regs;
- hcd->irq = platform_get_irq(pdev, 0);
- if (!hcd->irq) {
- dev_err(&pdev->dev, "Cannot get irq.");
- retval = -ENODEV;
+ retval = platform_get_irq(pdev, 0);
+ if (retval < 0)
goto err_disable_clk;
- }
+ hcd->irq = retval;
ehci = hcd_to_ehci(hcd);
ehci->caps = (struct ehci_caps __iomem *) ehci_mv->cap_regs;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index c9f91e6c72b6..dc2676320527 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,12 +36,12 @@ static const struct ehci_driver_overrides ehci_mxc_overrides __initconst = {
static int ehci_mxc_drv_probe(struct platform_device *pdev)
{
- struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
struct usb_hcd *hcd;
struct resource *res;
int irq, ret;
struct ehci_mxc_priv *priv;
- struct device *dev = &pdev->dev;
struct ehci_hcd *ehci;
if (!pdata) {
@@ -50,13 +50,15 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
if (!hcd)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto err_alloc;
@@ -69,14 +71,14 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
priv = (struct ehci_mxc_priv *) ehci->priv;
/* enable clocks */
- priv->usbclk = devm_clk_get(&pdev->dev, "ipg");
+ priv->usbclk = devm_clk_get(dev, "ipg");
if (IS_ERR(priv->usbclk)) {
ret = PTR_ERR(priv->usbclk);
goto err_alloc;
}
clk_prepare_enable(priv->usbclk);
- priv->ahbclk = devm_clk_get(&pdev->dev, "ahb");
+ priv->ahbclk = devm_clk_get(dev, "ahb");
if (IS_ERR(priv->ahbclk)) {
ret = PTR_ERR(priv->ahbclk);
goto err_clk_ahb;
@@ -84,13 +86,12 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
clk_prepare_enable(priv->ahbclk);
/* "dr" device has its own clock on i.MX51 */
- priv->phyclk = devm_clk_get(&pdev->dev, "phy");
+ priv->phyclk = devm_clk_get(dev, "phy");
if (IS_ERR(priv->phyclk))
priv->phyclk = NULL;
if (priv->phyclk)
clk_prepare_enable(priv->phyclk);
-
/* call platform specific init function */
if (pdata->init) {
ret = pdata->init(pdev);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 1a48ab1bd3b2..3c3820ad9092 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -360,23 +360,21 @@ static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
if (is_bypassed_id(pdev))
return -ENODEV;
- return usb_hcd_pci_probe(pdev, id);
+ return usb_hcd_pci_probe(pdev, id, &ehci_pci_hc_driver);
}
static void ehci_pci_remove(struct pci_dev *pdev)
{
pci_clear_mwi(pdev);
- usb_hcd_pci_remove(pdev);
+ usb_hcd_pci_remove(pdev);
}
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids [] = { {
/* handle any USB 2.0 EHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
- .driver_data = (unsigned long) &ehci_pci_hc_driver,
}, {
PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST),
- .driver_data = (unsigned long) &ehci_pci_hc_driver,
},
{ /* end: all zeroes */ }
};
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index e4fc3f66d43b..e9a49007cce4 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -455,6 +455,10 @@ static int ehci_platform_resume(struct device *dev)
ehci_resume(hcd, priv->reset_on_resume);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
if (priv->quirk_poll)
quirk_poll_init(priv);
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 10d51daa6a1b..e077b2ca53c5 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -480,7 +480,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (!irq) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
err = -ENODEV;
goto cleanup_phy;
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 229b3de319e6..eabf22a78eae 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2001-2002 by David Brownell
*/
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 2ce5031d866d..81fbc019a9b3 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Freescale QUICC Engine USB Host Controller Driver
*
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
index 7b9cf0a38d6e..96d16752a73e 100644
--- a/drivers/usb/host/imx21-hcd.h
+++ b/drivers/usb/host/imx21-hcd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Macros and prototypes for i.MX21
*
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 22117a6aeb4a..585222af24ff 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -277,21 +277,24 @@ static const struct ohci_driver_overrides pci_overrides __initconst = {
static const struct pci_device_id pci_ids[] = { {
/* handle any USB OHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0),
- .driver_data = (unsigned long) &ohci_pci_hc_driver,
}, {
/* The device in the ConneXT I/O hub has no class reg */
PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_OHCI),
- .driver_data = (unsigned long) &ohci_pci_hc_driver,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
+static int ohci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ return usb_hcd_pci_probe(dev, id, &ohci_pci_hc_driver);
+}
+
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver ohci_pci_driver = {
.name = hcd_name,
.id_table = pci_ids,
- .probe = usb_hcd_pci_probe,
+ .probe = ohci_pci_probe,
.remove = usb_hcd_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 7addfc2cbadc..4a8456f12a73 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -299,6 +299,11 @@ static int ohci_platform_resume(struct device *dev)
}
ohci_resume(hcd, false);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
return 0;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index c158cda9e4b9..cff965240327 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -157,9 +157,10 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
* the call to usb_hcd_setup_local_mem() below does just that.
*/
- if (usb_hcd_setup_local_mem(hcd, mem->start,
- mem->start - mem->parent->start,
- resource_size(mem)) < 0)
+ retval = usb_hcd_setup_local_mem(hcd, mem->start,
+ mem->start - mem->parent->start,
+ resource_size(mem));
+ if (retval < 0)
goto err5;
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 27c26ca10bfd..b85a39588f9d 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index beb2efa71341..0b949acfa258 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -16,6 +16,9 @@
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
#include "pci-quirks.h"
#include "xhci-ext-caps.h"
@@ -205,7 +208,7 @@ static void usb_amd_find_chipset_info(void)
{
unsigned long flags;
struct amd_chipset_info info;
- info.need_pll_quirk = 0;
+ info.need_pll_quirk = false;
spin_lock_irqsave(&amd_lock, flags);
@@ -229,10 +232,10 @@ static void usb_amd_find_chipset_info(void)
case AMD_CHIPSET_SB800:
case AMD_CHIPSET_HUDSON2:
case AMD_CHIPSET_BOLTON:
- info.need_pll_quirk = 1;
+ info.need_pll_quirk = true;
break;
default:
- info.need_pll_quirk = 0;
+ info.need_pll_quirk = false;
break;
}
@@ -529,7 +532,7 @@ void usb_amd_dev_put(void)
amd_chipset.nb_type = 0;
memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
amd_chipset.isoc_reqs = 0;
- amd_chipset.need_pll_quirk = 0;
+ amd_chipset.need_pll_quirk = false;
spin_unlock_irqrestore(&amd_lock, flags);
@@ -1243,11 +1246,24 @@ iounmap:
static void quirk_usb_early_handoff(struct pci_dev *pdev)
{
+ int ret;
+
/* Skip Netlogic mips SoC's internal PCI USB controller.
* This device does not need/support EHCI/OHCI handoff
*/
if (pdev->vendor == 0x184e) /* vendor Netlogic */
return;
+
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
+ ret = rpi_firmware_init_vl805(pdev);
+ if (ret) {
+ /* Firmware might be outdated, or something failed */
+ dev_warn(&pdev->dev,
+ "Failed to load VL805's firmware: %d. Will continue to attempt to work, but bad things might happen. You should fix this...\n",
+ ret);
+ }
+ }
+
if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index 51973a923526..ab081475c113 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* R8A66597 HCD (Host Controller Driver)
*
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index e9209e3e6248..995bc52d2d22 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -81,7 +81,6 @@ static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
static struct mutex u132_module_lock;
static int u132_exiting;
static int u132_instances;
-static struct list_head u132_static_list;
/*
* end of the global variables protected by u132_module_lock
*/
@@ -177,7 +176,6 @@ struct u132_ring {
};
struct u132 {
struct kref kref;
- struct list_head u132_list;
struct mutex sw_lock;
struct mutex scheduler_lock;
struct u132_platform_data *board;
@@ -254,7 +252,6 @@ static void u132_hcd_delete(struct kref *kref)
struct usb_hcd *hcd = u132_to_hcd(u132);
u132->going += 1;
mutex_lock(&u132_module_lock);
- list_del_init(&u132->u132_list);
u132_instances -= 1;
mutex_unlock(&u132_module_lock);
dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13"
@@ -3089,7 +3086,6 @@ static int u132_probe(struct platform_device *pdev)
retval = 0;
hcd->rsrc_start = 0;
mutex_lock(&u132_module_lock);
- list_add_tail(&u132->u132_list, &u132_static_list);
u132->sequence_num = ++u132_instances;
mutex_unlock(&u132_module_lock);
u132_u132_init_kref(u132);
@@ -3192,7 +3188,6 @@ static struct platform_driver u132_platform_driver = {
static int __init u132_hcd_init(void)
{
int retval;
- INIT_LIST_HEAD(&u132_static_list);
u132_instances = 0;
u132_exiting = 0;
mutex_init(&u132_module_lock);
@@ -3213,14 +3208,9 @@ static int __init u132_hcd_init(void)
module_init(u132_hcd_init);
static void __exit u132_hcd_exit(void)
{
- struct u132 *u132;
- struct u132 *temp;
mutex_lock(&u132_module_lock);
u132_exiting += 1;
mutex_unlock(&u132_module_lock);
- list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) {
- platform_device_unregister(u132->platform_dev);
- }
platform_driver_unregister(&u132_platform_driver);
printk(KERN_INFO "u132-hcd driver deregistered\n");
wait_event(u132_hcd_wait, u132_instances == 0);
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 957c87efc746..9b88745d247f 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -287,17 +287,21 @@ static const struct hc_driver uhci_driver = {
static const struct pci_device_id uhci_pci_ids[] = { {
/* handle any USB UHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
- .driver_data = (unsigned long) &uhci_driver,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
+static int uhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ return usb_hcd_pci_probe(dev, id, &uhci_driver);
+}
+
static struct pci_driver uhci_pci_driver = {
.name = hcd_name,
.id_table = uhci_pci_ids,
- .probe = usb_hcd_pci_probe,
+ .probe = uhci_pci_probe,
.remove = usb_hcd_pci_remove,
.shutdown = uhci_shutdown,
diff --git a/drivers/usb/host/xhci-debugfs.h b/drivers/usb/host/xhci-debugfs.h
index f7a4e2492b00..56db635fcd6e 100644
--- a/drivers/usb/host/xhci-debugfs.h
+++ b/drivers/usb/host/xhci-debugfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xhci-debugfs.h - xHCI debugfs interface
*
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 268328c20681..fa59b242cd51 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xHCI host controller driver
*
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index acd56517215a..a93cfe817904 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015 MediaTek Inc.
* Author:
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index ca0a3a5721dd..3be021793cc8 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Marvell
*
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
new file mode 100644
index 000000000000..59b1965ad0a3
--- /dev/null
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2020 Linaro Limited */
+
+#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+#include "xhci-pci.h"
+
+#define RENESAS_FW_VERSION 0x6C
+#define RENESAS_ROM_CONFIG 0xF0
+#define RENESAS_FW_STATUS 0xF4
+#define RENESAS_FW_STATUS_MSB 0xF5
+#define RENESAS_ROM_STATUS 0xF6
+#define RENESAS_ROM_STATUS_MSB 0xF7
+#define RENESAS_DATA0 0xF8
+#define RENESAS_DATA1 0xFC
+
+#define RENESAS_FW_VERSION_FIELD GENMASK(23, 7)
+#define RENESAS_FW_VERSION_OFFSET 8
+
+#define RENESAS_FW_STATUS_DOWNLOAD_ENABLE BIT(0)
+#define RENESAS_FW_STATUS_LOCK BIT(1)
+#define RENESAS_FW_STATUS_RESULT GENMASK(6, 4)
+ #define RENESAS_FW_STATUS_INVALID 0
+ #define RENESAS_FW_STATUS_SUCCESS BIT(4)
+ #define RENESAS_FW_STATUS_ERROR BIT(5)
+#define RENESAS_FW_STATUS_SET_DATA0 BIT(8)
+#define RENESAS_FW_STATUS_SET_DATA1 BIT(9)
+
+#define RENESAS_ROM_STATUS_ACCESS BIT(0)
+#define RENESAS_ROM_STATUS_ERASE BIT(1)
+#define RENESAS_ROM_STATUS_RELOAD BIT(2)
+#define RENESAS_ROM_STATUS_RESULT GENMASK(6, 4)
+ #define RENESAS_ROM_STATUS_NO_RESULT 0
+ #define RENESAS_ROM_STATUS_SUCCESS BIT(4)
+ #define RENESAS_ROM_STATUS_ERROR BIT(5)
+#define RENESAS_ROM_STATUS_SET_DATA0 BIT(8)
+#define RENESAS_ROM_STATUS_SET_DATA1 BIT(9)
+#define RENESAS_ROM_STATUS_ROM_EXISTS BIT(15)
+
+#define RENESAS_ROM_ERASE_MAGIC 0x5A65726F
+#define RENESAS_ROM_WRITE_MAGIC 0x53524F4D
+
+#define RENESAS_RETRY 10000
+#define RENESAS_DELAY 10
+
+#define ROM_VALID_01 0x2013
+#define ROM_VALID_02 0x2026
+
+static int renesas_verify_fw_version(struct pci_dev *pdev, u32 version)
+{
+ switch (version) {
+ case ROM_VALID_01:
+ case ROM_VALID_02:
+ return 0;
+ }
+ dev_err(&pdev->dev, "FW has invalid version :%d\n", version);
+ return -EINVAL;
+}
+
+static int renesas_fw_download_image(struct pci_dev *dev,
+ const u32 *fw, size_t step, bool rom)
+{
+ size_t i;
+ int err;
+ u8 fw_status;
+ bool data0_or_data1;
+ u32 status_reg;
+
+ if (rom)
+ status_reg = RENESAS_ROM_STATUS_MSB;
+ else
+ status_reg = RENESAS_FW_STATUS_MSB;
+
+ /*
+ * The hardware does alternate between two 32-bit pages.
+ * (This is because each row of the firmware is 8 bytes).
+ *
+ * for even steps we use DATA0, for odd steps DATA1.
+ */
+ data0_or_data1 = (step & 1) == 1;
+
+ /* step+1. Read "Set DATAX" and confirm it is cleared. */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(dev, status_reg, &fw_status);
+ if (err) {
+ dev_err(&dev->dev, "Read Status failed: %d\n",
+ pcibios_err_to_errno(err));
+ return pcibios_err_to_errno(err);
+ }
+ if (!(fw_status & BIT(data0_or_data1)))
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) {
+ dev_err(&dev->dev, "Timeout for Set DATAX step: %zd\n", step);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * step+2. Write FW data to "DATAX".
+ * "LSB is left" => force little endian
+ */
+ err = pci_write_config_dword(dev, data0_or_data1 ?
+ RENESAS_DATA1 : RENESAS_DATA0,
+ (__force u32)cpu_to_le32(fw[step]));
+ if (err) {
+ dev_err(&dev->dev, "Write to DATAX failed: %d\n",
+ pcibios_err_to_errno(err));
+ return pcibios_err_to_errno(err);
+ }
+
+ udelay(100);
+
+ /* step+3. Set "Set DATAX". */
+ err = pci_write_config_byte(dev, status_reg, BIT(data0_or_data1));
+ if (err) {
+ dev_err(&dev->dev, "Write config for DATAX failed: %d\n",
+ pcibios_err_to_errno(err));
+ return pcibios_err_to_errno(err);
+ }
+
+ return 0;
+}
+
+static int renesas_fw_verify(const void *fw_data,
+ size_t length)
+{
+ u16 fw_version_pointer;
+ u16 fw_version;
+
+ /*
+ * The Firmware's Data Format is describe in
+ * "6.3 Data Format" R19UH0078EJ0500 Rev.5.00 page 124
+ */
+
+ /*
+ * The bootrom chips of the big brother have sizes up to 64k, let's
+ * assume that's the biggest the firmware can get.
+ */
+ if (length < 0x1000 || length >= 0x10000) {
+ pr_err("firmware is size %zd is not (4k - 64k).",
+ length);
+ return -EINVAL;
+ }
+
+ /* The First 2 bytes are fixed value (55aa). "LSB on Left" */
+ if (get_unaligned_le16(fw_data) != 0x55aa) {
+ pr_err("no valid firmware header found.");
+ return -EINVAL;
+ }
+
+ /* verify the firmware version position and print it. */
+ fw_version_pointer = get_unaligned_le16(fw_data + 4);
+ if (fw_version_pointer + 2 >= length) {
+ pr_err("fw ver pointer is outside of the firmware image");
+ return -EINVAL;
+ }
+
+ fw_version = get_unaligned_le16(fw_data + fw_version_pointer);
+ pr_err("got firmware version: %02x.", fw_version);
+
+ return 0;
+}
+
+static bool renesas_check_rom(struct pci_dev *pdev)
+{
+ u16 rom_status;
+ int retval;
+
+ /* Check if external ROM exists */
+ retval = pci_read_config_word(pdev, RENESAS_ROM_STATUS, &rom_status);
+ if (retval)
+ return false;
+
+ rom_status &= RENESAS_ROM_STATUS_ROM_EXISTS;
+ if (rom_status) {
+ dev_dbg(&pdev->dev, "External ROM exists\n");
+ return true; /* External ROM exists */
+ }
+
+ return false;
+}
+
+static int renesas_check_rom_state(struct pci_dev *pdev)
+{
+ u16 rom_state;
+ u32 version;
+ int err;
+
+ /* check FW version */
+ err = pci_read_config_dword(pdev, RENESAS_FW_VERSION, &version);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ version &= RENESAS_FW_VERSION_FIELD;
+ version = version >> RENESAS_FW_VERSION_OFFSET;
+
+ err = renesas_verify_fw_version(pdev, version);
+ if (err)
+ return err;
+
+ /*
+ * Test if ROM is present and loaded, if so we can skip everything
+ */
+ err = pci_read_config_word(pdev, RENESAS_ROM_STATUS, &rom_state);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ if (rom_state & BIT(15)) {
+ /* ROM exists */
+ dev_dbg(&pdev->dev, "ROM exists\n");
+
+ /* Check the "Result Code" Bits (6:4) and act accordingly */
+ switch (rom_state & RENESAS_ROM_STATUS_RESULT) {
+ case RENESAS_ROM_STATUS_SUCCESS:
+ return 0;
+
+ case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
+ return 0;
+
+ case RENESAS_ROM_STATUS_ERROR: /* Error State */
+ default: /* All other states are marked as "Reserved states" */
+ dev_err(&pdev->dev, "Invalid ROM..");
+ break;
+ }
+ }
+
+ return -EIO;
+}
+
+static int renesas_fw_check_running(struct pci_dev *pdev)
+{
+ u8 fw_state;
+ int err;
+
+ /* Check if device has ROM and loaded, if so skip everything */
+ err = renesas_check_rom(pdev);
+ if (err) { /* we have rom */
+ err = renesas_check_rom_state(pdev);
+ if (!err)
+ return err;
+ }
+
+ /*
+ * Test if the device is actually needing the firmware. As most
+ * BIOSes will initialize the device for us. If the device is
+ * initialized.
+ */
+ err = pci_read_config_byte(pdev, RENESAS_FW_STATUS, &fw_state);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ /*
+ * Check if "FW Download Lock" is locked. If it is and the FW is
+ * ready we can simply continue. If the FW is not ready, we have
+ * to give up.
+ */
+ if (fw_state & RENESAS_FW_STATUS_LOCK) {
+ dev_dbg(&pdev->dev, "FW Download Lock is engaged.");
+
+ if (fw_state & RENESAS_FW_STATUS_SUCCESS)
+ return 0;
+
+ dev_err(&pdev->dev,
+ "FW Download Lock is set and FW is not ready. Giving Up.");
+ return -EIO;
+ }
+
+ /*
+ * Check if "FW Download Enable" is set. If someone (us?) tampered
+ * with it and it can't be reset, we have to give up too... and
+ * ask for a forgiveness and a reboot.
+ */
+ if (fw_state & RENESAS_FW_STATUS_DOWNLOAD_ENABLE) {
+ dev_err(&pdev->dev,
+ "FW Download Enable is stale. Giving Up (poweroff/reboot needed).");
+ return -EIO;
+ }
+
+ /* Otherwise, Check the "Result Code" Bits (6:4) and act accordingly */
+ switch (fw_state & RENESAS_FW_STATUS_RESULT) {
+ case 0: /* No result yet */
+ dev_dbg(&pdev->dev, "FW is not ready/loaded yet.");
+
+ /* tell the caller, that this device needs the firmware. */
+ return 1;
+
+ case RENESAS_FW_STATUS_SUCCESS: /* Success, device should be working. */
+ dev_dbg(&pdev->dev, "FW is ready.");
+ return 0;
+
+ case RENESAS_FW_STATUS_ERROR: /* Error State */
+ dev_err(&pdev->dev,
+ "hardware is in an error state. Giving up (poweroff/reboot needed).");
+ return -ENODEV;
+
+ default: /* All other states are marked as "Reserved states" */
+ dev_err(&pdev->dev,
+ "hardware is in an invalid state %lx. Giving up (poweroff/reboot needed).",
+ (fw_state & RENESAS_FW_STATUS_RESULT) >> 4);
+ return -EINVAL;
+ }
+}
+
+static int renesas_fw_download(struct pci_dev *pdev,
+ const struct firmware *fw)
+{
+ const u32 *fw_data = (const u32 *)fw->data;
+ size_t i;
+ int err;
+ u8 fw_status;
+
+ /*
+ * For more information and the big picture: please look at the
+ * "Firmware Download Sequence" in "7.1 FW Download Interface"
+ * of R19UH0078EJ0500 Rev.5.00 page 131
+ */
+
+ /*
+ * 0. Set "FW Download Enable" bit in the
+ * "FW Download Control & Status Register" at 0xF4
+ */
+ err = pci_write_config_byte(pdev, RENESAS_FW_STATUS,
+ RENESAS_FW_STATUS_DOWNLOAD_ENABLE);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ /* 1 - 10 follow one step after the other. */
+ for (i = 0; i < fw->size / 4; i++) {
+ err = renesas_fw_download_image(pdev, fw_data, i, false);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Firmware Download Step %zd failed at position %zd bytes with (%d).",
+ i, i * 4, err);
+ return err;
+ }
+ }
+
+ /*
+ * This sequence continues until the last data is written to
+ * "DATA0" or "DATA1". Naturally, we wait until "SET DATA0/1"
+ * is cleared by the hardware beforehand.
+ */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_FW_STATUS_MSB,
+ &fw_status);
+ if (err)
+ return pcibios_err_to_errno(err);
+ if (!(fw_status & (BIT(0) | BIT(1))))
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY)
+ dev_warn(&pdev->dev, "Final Firmware Download step timed out.");
+
+ /*
+ * 11. After finishing writing the last data of FW, the
+ * System Software must clear "FW Download Enable"
+ */
+ err = pci_write_config_byte(pdev, RENESAS_FW_STATUS, 0);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ /* 12. Read "Result Code" and confirm it is good. */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_FW_STATUS, &fw_status);
+ if (err)
+ return pcibios_err_to_errno(err);
+ if (fw_status & RENESAS_FW_STATUS_SUCCESS)
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) {
+ /* Timed out / Error - let's see if we can fix this */
+ err = renesas_fw_check_running(pdev);
+ switch (err) {
+ case 0: /*
+ * we shouldn't end up here.
+ * maybe it took a little bit longer.
+ * But all should be well?
+ */
+ break;
+
+ case 1: /* (No result yet! */
+ dev_err(&pdev->dev, "FW Load timedout");
+ return -ETIMEDOUT;
+
+ default:
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void renesas_rom_erase(struct pci_dev *pdev)
+{
+ int retval, i;
+ u8 status;
+
+ dev_dbg(&pdev->dev, "Performing ROM Erase...\n");
+ retval = pci_write_config_dword(pdev, RENESAS_DATA0,
+ RENESAS_ROM_ERASE_MAGIC);
+ if (retval) {
+ dev_err(&pdev->dev, "ROM erase, magic word write failed: %d\n",
+ pcibios_err_to_errno(retval));
+ return;
+ }
+
+ retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
+ if (retval) {
+ dev_err(&pdev->dev, "ROM status read failed: %d\n",
+ pcibios_err_to_errno(retval));
+ return;
+ }
+ status |= RENESAS_ROM_STATUS_ERASE;
+ retval = pci_write_config_byte(pdev, RENESAS_ROM_STATUS, status);
+ if (retval) {
+ dev_err(&pdev->dev, "ROM erase set word write failed\n");
+ return;
+ }
+
+ /* sleep a bit while ROM is erased */
+ msleep(20);
+
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS,
+ &status);
+ status &= RENESAS_ROM_STATUS_ERASE;
+ if (!status)
+ break;
+
+ mdelay(RENESAS_DELAY);
+ }
+
+ if (i == RENESAS_RETRY)
+ dev_dbg(&pdev->dev, "Chip erase timedout: %x\n", status);
+
+ dev_dbg(&pdev->dev, "ROM Erase... Done success\n");
+}
+
+static bool renesas_setup_rom(struct pci_dev *pdev, const struct firmware *fw)
+{
+ const u32 *fw_data = (const u32 *)fw->data;
+ int err, i;
+ u8 status;
+
+ /* 2. Write magic word to Data0 */
+ err = pci_write_config_dword(pdev, RENESAS_DATA0,
+ RENESAS_ROM_WRITE_MAGIC);
+ if (err)
+ return false;
+
+ /* 3. Set External ROM access */
+ err = pci_write_config_byte(pdev, RENESAS_ROM_STATUS,
+ RENESAS_ROM_STATUS_ACCESS);
+ if (err)
+ goto remove_bypass;
+
+ /* 4. Check the result */
+ err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
+ if (err)
+ goto remove_bypass;
+ status &= GENMASK(6, 4);
+ if (status) {
+ dev_err(&pdev->dev,
+ "setting external rom failed: %x\n", status);
+ goto remove_bypass;
+ }
+
+ /* 5 to 16 Write FW to DATA0/1 while checking SetData0/1 */
+ for (i = 0; i < fw->size / 4; i++) {
+ err = renesas_fw_download_image(pdev, fw_data, i, true);
+ if (err) {
+ dev_err(&pdev->dev,
+ "ROM Download Step %d failed at position %d bytes with (%d)\n",
+ i, i * 4, err);
+ goto remove_bypass;
+ }
+ }
+
+ /*
+ * wait till DATA0/1 is cleared
+ */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS_MSB,
+ &status);
+ if (err)
+ goto remove_bypass;
+ if (!(status & (BIT(0) | BIT(1))))
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) {
+ dev_err(&pdev->dev, "Final Firmware ROM Download step timed out\n");
+ goto remove_bypass;
+ }
+
+ /* 17. Remove bypass */
+ err = pci_write_config_byte(pdev, RENESAS_ROM_STATUS, 0);
+ if (err)
+ return false;
+
+ udelay(10);
+
+ /* 18. check result */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
+ if (err) {
+ dev_err(&pdev->dev, "Read ROM status failed:%d\n",
+ pcibios_err_to_errno(err));
+ return false;
+ }
+ status &= RENESAS_ROM_STATUS_RESULT;
+ if (status == RENESAS_ROM_STATUS_SUCCESS) {
+ dev_dbg(&pdev->dev, "Download ROM success\n");
+ break;
+ }
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) { /* Timed out */
+ dev_err(&pdev->dev,
+ "Download to external ROM TO: %x\n", status);
+ return false;
+ }
+
+ dev_dbg(&pdev->dev, "Download to external ROM succeeded\n");
+
+ /* Last step set Reload */
+ err = pci_write_config_byte(pdev, RENESAS_ROM_STATUS,
+ RENESAS_ROM_STATUS_RELOAD);
+ if (err) {
+ dev_err(&pdev->dev, "Set ROM execute failed: %d\n",
+ pcibios_err_to_errno(err));
+ return false;
+ }
+
+ /*
+ * wait till Reload is cleared
+ */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
+ if (err)
+ return false;
+ if (!(status & RENESAS_ROM_STATUS_RELOAD))
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) {
+ dev_err(&pdev->dev, "ROM Exec timed out: %x\n", status);
+ return false;
+ }
+
+ return true;
+
+remove_bypass:
+ pci_write_config_byte(pdev, RENESAS_ROM_STATUS, 0);
+ return false;
+}
+
+static int renesas_load_fw(struct pci_dev *pdev, const struct firmware *fw)
+{
+ int err = 0;
+ bool rom;
+
+ /* Check if the device has external ROM */
+ rom = renesas_check_rom(pdev);
+ if (rom) {
+ /* perform chip erase first */
+ renesas_rom_erase(pdev);
+
+ /* lets try loading fw on ROM first */
+ rom = renesas_setup_rom(pdev, fw);
+ if (!rom) {
+ dev_dbg(&pdev->dev,
+ "ROM load failed, falling back on FW load\n");
+ } else {
+ dev_dbg(&pdev->dev,
+ "ROM load success\n");
+ goto exit;
+ }
+ }
+
+ err = renesas_fw_download(pdev, fw);
+
+exit:
+ if (err)
+ dev_err(&pdev->dev, "firmware failed to download (%d).", err);
+ return err;
+}
+
+int renesas_xhci_check_request_fw(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct xhci_driver_data *driver_data =
+ (struct xhci_driver_data *)id->driver_data;
+ const char *fw_name = driver_data->firmware;
+ const struct firmware *fw;
+ int err;
+
+ err = renesas_fw_check_running(pdev);
+ /* Continue ahead, if the firmware is already running. */
+ if (err == 0)
+ return 0;
+
+ if (err != 1)
+ return err;
+
+ pci_dev_get(pdev);
+ err = request_firmware(&fw, fw_name, &pdev->dev);
+ pci_dev_put(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "request_firmware failed: %d\n", err);
+ return err;
+ }
+
+ err = renesas_fw_verify(fw->data, fw->size);
+ if (err)
+ goto exit;
+
+ err = renesas_load_fw(pdev, fw);
+exit:
+ release_firmware(fw);
+ return err;
+}
+EXPORT_SYMBOL_GPL(renesas_xhci_check_request_fw);
+
+void renesas_xhci_pci_exit(struct pci_dev *dev)
+{
+}
+EXPORT_SYMBOL_GPL(renesas_xhci_pci_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 766b74723e64..ef513c2fb843 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -15,6 +15,7 @@
#include "xhci.h"
#include "xhci-trace.h"
+#include "xhci-pci.h"
#define SSIC_PORT_NUM 2
#define SSIC_PORT_CFG2 0x880c
@@ -87,7 +88,16 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xhci_driver_data *driver_data;
+ const struct pci_device_id *id;
+
+ id = pci_match_id(pdev->driver->id_table, pdev);
+
+ if (id && id->driver_data) {
+ driver_data = (struct xhci_driver_data *)id->driver_data;
+ xhci->quirks |= driver_data->quirks;
+ }
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
@@ -327,10 +337,15 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
struct xhci_hcd *xhci;
- struct hc_driver *driver;
struct usb_hcd *hcd;
+ struct xhci_driver_data *driver_data;
- driver = (struct hc_driver *)id->driver_data;
+ driver_data = (struct xhci_driver_data *)id->driver_data;
+ if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) {
+ retval = renesas_xhci_check_request_fw(dev, id);
+ if (retval)
+ return retval;
+ }
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);
@@ -341,7 +356,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
* to say USB 2.0, but I'm not sure what the implications would be in
* the other parts of the HCD code.
*/
- retval = usb_hcd_pci_probe(dev, id);
+ retval = usb_hcd_pci_probe(dev, id, &xhci_pci_hc_driver);
if (retval)
goto put_runtime_pm;
@@ -349,8 +364,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* USB 2.0 roothub is stored in the PCI device now. */
hcd = dev_get_drvdata(&dev->dev);
xhci = hcd_to_xhci(hcd);
- xhci->shared_hcd = usb_create_shared_hcd(driver, &dev->dev,
- pci_name(dev), hcd);
+ xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev,
+ pci_name(dev), hcd);
if (!xhci->shared_hcd) {
retval = -ENOMEM;
goto dealloc_usb2_hcd;
@@ -392,6 +407,9 @@ static void xhci_pci_remove(struct pci_dev *dev)
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ if (xhci->quirks & XHCI_RENESAS_FW_QUIRK)
+ renesas_xhci_pci_exit(dev);
+
xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
@@ -543,15 +561,26 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
+static const struct xhci_driver_data reneses_data = {
+ .quirks = XHCI_RENESAS_FW_QUIRK,
+ .firmware = "renesas_usb_fw.mem",
+};
+
/* PCI driver selection metadata; PCI hotplugging uses this */
-static const struct pci_device_id pci_ids[] = { {
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(0x1912, 0x0014),
+ .driver_data = (unsigned long)&reneses_data,
+ },
+ { PCI_DEVICE(0x1912, 0x0015),
+ .driver_data = (unsigned long)&reneses_data,
+ },
/* handle any USB 3.0 xHCI controller */
- PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
- .driver_data = (unsigned long) &xhci_pci_hc_driver,
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
},
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
+MODULE_FIRMWARE("renesas_usb_fw.mem");
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
diff --git a/drivers/usb/host/xhci-pci.h b/drivers/usb/host/xhci-pci.h
new file mode 100644
index 000000000000..acd7cf0a1706
--- /dev/null
+++ b/drivers/usb/host/xhci-pci.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2020 Linaro Limited */
+
+#ifndef XHCI_PCI_H
+#define XHCI_PCI_H
+
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
+int renesas_xhci_check_request_fw(struct pci_dev *dev,
+ const struct pci_device_id *id);
+void renesas_xhci_pci_exit(struct pci_dev *dev);
+
+#else
+static int renesas_xhci_check_request_fw(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ return 0;
+}
+
+static void renesas_xhci_pci_exit(struct pci_dev *dev) { };
+
+#endif
+
+struct xhci_driver_data {
+ u64 quirks;
+ const char *firmware;
+};
+
+#endif
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 1d4f6f85f0fe..f6b4089bfc4a 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -112,6 +112,10 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3)
};
+static const struct xhci_plat_priv xhci_plat_brcm = {
+ .quirks = XHCI_RESET_ON_RESUME,
+};
+
static const struct of_device_id usb_xhci_of_match[] = {
{
.compatible = "generic-xhci",
@@ -147,6 +151,12 @@ static const struct of_device_id usb_xhci_of_match[] = {
}, {
.compatible = "renesas,rcar-gen3-xhci",
.data = &xhci_plat_renesas_rcar_gen3,
+ }, {
+ .compatible = "brcm,xhci-brcm-v2",
+ .data = &xhci_plat_brcm,
+ }, {
+ .compatible = "brcm,bcm7445-xhci",
+ .data = &xhci_plat_brcm,
},
{},
};
@@ -362,6 +372,7 @@ static int xhci_plat_remove(struct platform_device *dev)
struct clk *reg_clk = xhci->reg_clk;
struct usb_hcd *shared_hcd = xhci->shared_hcd;
+ pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
usb_remove_hcd(shared_hcd);
@@ -375,8 +386,9 @@ static int xhci_plat_remove(struct platform_device *dev)
clk_disable_unprepare(reg_clk);
usb_put_hcd(hcd);
- pm_runtime_set_suspended(&dev->dev);
pm_runtime_disable(&dev->dev);
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_set_suspended(&dev->dev);
return 0;
}
@@ -407,7 +419,15 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
if (ret)
return ret;
- return xhci_resume(xhci, 0);
+ ret = xhci_resume(xhci, 0);
+ if (ret)
+ return ret;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
}
static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 5681723fc9cd..b49f6447bd3a 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xhci-plat.h - xHCI host controller driver platform Bus Glue.
*
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
index 012744a63a49..048ad3b8a6c7 100644
--- a/drivers/usb/host/xhci-rcar.h
+++ b/drivers/usb/host/xhci-rcar.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* drivers/usb/host/xhci-rcar.h
*
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 0fda0c0f4d31..2c255d0620b0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3433,8 +3433,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* New sg entry */
--num_sgs;
sent_len -= block_len;
- if (num_sgs != 0) {
- sg = sg_next(sg);
+ sg = sg_next(sg);
+ if (num_sgs != 0 && sg) {
block_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg);
addr += sent_len;
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index b19582b2a72c..627abd236dbe 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xHCI host controller driver
*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 86cfefdd6632..2c6c4f8d1ee1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xHCI host controller driver
@@ -1873,6 +1873,7 @@ struct xhci_hcd {
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
+#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/isp1760/isp1760-core.h b/drivers/usb/isp1760/isp1760-core.h
index 97cb4d7a3e1c..d9a0a4cc467c 100644
--- a/drivers/usb/isp1760/isp1760-core.h
+++ b/drivers/usb/isp1760/isp1760-core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the NXP ISP1760 chip
*
diff --git a/drivers/usb/isp1760/isp1760-regs.h b/drivers/usb/isp1760/isp1760-regs.h
index 1f00c3850cf7..fedc4f5cded0 100644
--- a/drivers/usb/isp1760/isp1760-regs.h
+++ b/drivers/usb/isp1760/isp1760-regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the NXP ISP1760 chip
*
diff --git a/drivers/usb/isp1760/isp1760-udc.h b/drivers/usb/isp1760/isp1760-udc.h
index 2d0b88747701..d2df650d54e9 100644
--- a/drivers/usb/isp1760/isp1760-udc.h
+++ b/drivers/usb/isp1760/isp1760-udc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the NXP ISP1761 device controller
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb.h b/drivers/usb/misc/sisusbvga/sisusb.h
index 8a5e6bb07d05..c0fb9e1c5361 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.h
+++ b/drivers/usb/misc/sisusbvga/sisusb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* sisusb - usb kernel driver for Net2280/SiS315 based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index ace09985dae4..aa33bc81ee52 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* $XFree86$ */
/* $XdotOrg$ */
/*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_struct.h b/drivers/usb/misc/sisusbvga/sisusb_struct.h
index 706d77090e00..3df64d2a9d43 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_struct.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_struct.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* General structure definitions for universal mode switching modules
*
diff --git a/drivers/usb/misc/usb_u132.h b/drivers/usb/misc/usb_u132.h
index 4bf77736914f..1584efbbd704 100644
--- a/drivers/usb/misc/usb_u132.h
+++ b/drivers/usb/misc/usb_u132.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common Header File for the Elan Digital Systems U132 adapter
* this file should be included by both the "ftdi-u132" and
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index 6087be236a35..d49db92ab26c 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3.h - MediaTek USB3 DRD header
*
diff --git a/drivers/usb/mtu3/mtu3_debug.h b/drivers/usb/mtu3/mtu3_debug.h
index e96a69234d05..fb6b28277c9b 100644
--- a/drivers/usb/mtu3/mtu3_debug.h
+++ b/drivers/usb/mtu3/mtu3_debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3_debug.h - debug header
*
diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
index c96e5dab0a48..fdeade6254ae 100644
--- a/drivers/usb/mtu3/mtu3_debugfs.c
+++ b/drivers/usb/mtu3/mtu3_debugfs.c
@@ -276,7 +276,7 @@ static const struct file_operations mtu3_ep_fops = {
.release = single_release,
};
-static struct debugfs_reg32 mtu3_prb_regs[] = {
+static const struct debugfs_reg32 mtu3_prb_regs[] = {
dump_prb_reg("enable", U3D_SSUSB_PRB_CTRL0),
dump_prb_reg("byte-sell", U3D_SSUSB_PRB_CTRL1),
dump_prb_reg("byte-selh", U3D_SSUSB_PRB_CTRL2),
@@ -349,7 +349,7 @@ static const struct file_operations mtu3_probe_fops = {
static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu)
{
struct ssusb_mtk *ssusb = mtu->ssusb;
- struct debugfs_reg32 *regs;
+ const struct debugfs_reg32 *regs;
struct dentry *dir_prb;
int i;
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
index 5e58c4dbd54a..760fe7d69c6b 100644
--- a/drivers/usb/mtu3/mtu3_dr.h
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3_dr.h - dual role switch and host glue layer header
*
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 8382d066749e..bf34f784f84b 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions
*
diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h
index 9cfde201db63..66e1c0ab5a99 100644
--- a/drivers/usb/mtu3/mtu3_qmu.h
+++ b/drivers/usb/mtu3/mtu3_qmu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3_qmu.h - Queue Management Unit driver header
*
diff --git a/drivers/usb/mtu3/mtu3_trace.h b/drivers/usb/mtu3/mtu3_trace.h
index 050e30f0fbd4..1b897636daf2 100644
--- a/drivers/usb/mtu3/mtu3_trace.h
+++ b/drivers/usb/mtu3/mtu3_trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* mtu3_trace.h - trace support
*
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index e021485c83ae..c8e67d15b510 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2005-2006 by Texas Instruments
*/
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index e64dd30e80e7..c4fe1f4cd17a 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -30,11 +30,11 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
irqreturn_t retval = IRQ_NONE, retval_dma = IRQ_NONE;
struct musb *musb = __hci;
- spin_lock_irqsave(&musb->lock, flags);
-
if (IS_ENABLED(CONFIG_USB_INVENTRA_DMA) && musb->dma_controller)
retval_dma = dma_controller_irq(irq, musb->dma_controller);
+ spin_lock_irqsave(&musb->lock, flags);
+
musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index 6196b0e8d77d..eebeadd26946 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -208,6 +208,12 @@ static irqreturn_t generic_interrupt(int irq, void *__hci)
musb->int_rx = musb_clearw(musb->mregs, MUSB_INTRRX);
musb->int_tx = musb_clearw(musb->mregs, MUSB_INTRTX);
+ if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
+ /* ep0 FADDR must be 0 when (re)entering peripheral mode */
+ musb_ep_select(musb->mregs, 0);
+ musb_writeb(musb->mregs, MUSB_FADDR, 0);
+ }
+
if (musb->int_usb || musb->int_tx || musb->int_rx)
retval = musb_interrupt(musb);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index d590110539ab..384a8039a7fd 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1795,7 +1795,7 @@ irqreturn_t musb_interrupt(struct musb *musb)
EXPORT_SYMBOL_GPL(musb_interrupt);
#ifndef CONFIG_MUSB_PIO_ONLY
-static bool use_dma = 1;
+static bool use_dma = true;
/* "modprobe ... use_dma=0" etc */
module_param(use_dma, bool, 0644);
@@ -2877,6 +2877,13 @@ static int musb_resume(struct device *dev)
musb_enable_interrupts(musb);
musb_platform_enable(musb);
+ /* session might be disabled in suspend */
+ if (musb->port_mode == MUSB_HOST &&
+ !(musb->ops->quirks & MUSB_PRESERVE_SESSION)) {
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ }
+
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
if (error)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 290a2bc46606..dbe5623db1e0 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver defines
*
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index c444a80fe1da..e5b3506c7b3f 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver debug defines
*
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 7b6281ab62ed..30a89aa8a3e7 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -168,6 +168,11 @@ static ssize_t musb_test_mode_write(struct file *file,
u8 test;
char buf[24];
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
if (test) {
@@ -176,11 +181,6 @@ static ssize_t musb_test_mode_write(struct file *file,
goto ret;
}
- memset(buf, 0x00, sizeof(buf));
-
- if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
if (strstarts(buf, "force host full-speed"))
test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS;
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 4b4d8dc5d3f2..7d67b69df0a0 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver DMA controller abstraction
*
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index d02663660813..f49f25b3bf56 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver peripheral defines
*
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8736f4251a22..8b7d22a0c0fb 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1774,9 +1774,15 @@ void musb_host_rx(struct musb *musb, u8 epnum)
status = -EPIPE;
} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
- musb_dbg(musb, "end %d RX proto error", epnum);
+ dev_err(musb->controller, "ep%d RX three-strikes error", epnum);
- status = -EPROTO;
+ /*
+ * The three-strikes error could only happen when the USB
+ * device is not accessible, for example detached or powered
+ * off. So return the fatal error -ESHUTDOWN so hopefully the
+ * USB device drivers won't immediately resubmit the same URB.
+ */
+ status = -ESHUTDOWN;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
rx_csr &= ~MUSB_RXCSR_H_ERROR;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 2999845632ce..32336571f05c 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver host defines
*
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index f17aabd95a50..12874d3b2a64 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver register I/O
*
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 5cd7264fc2cb..5fa110978f1a 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver register defines
*
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index b193daf69685..380ebc77eab1 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* musb_trace.h - MUSB Controller Trace Support
*
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index 859008fa0e3c..939a0361ae88 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2005-2006 by Texas Instruments
*/
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
index fd8025bbece7..8a253564fb18 100644
--- a/drivers/usb/musb/tusb6010.h
+++ b/drivers/usb/musb/tusb6010.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
*
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index 43d410f6641b..fbcc28ad9964 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc. */
#include <linux/usb/otg-fsm.h>
diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c
index 3ea1f5b9bcf8..8f62dc2a90ff 100644
--- a/drivers/usb/phy/phy-jz4770.c
+++ b/drivers/usb/phy/phy-jz4770.c
@@ -125,13 +125,13 @@ static int jz4770_phy_init(struct usb_phy *phy)
err = regulator_enable(priv->vcc_supply);
if (err) {
- dev_err(priv->dev, "Unable to enable VCC: %d", err);
+ dev_err(priv->dev, "Unable to enable VCC: %d\n", err);
return err;
}
err = clk_prepare_enable(priv->clk);
if (err) {
- dev_err(priv->dev, "Unable to start clock: %d", err);
+ dev_err(priv->dev, "Unable to start clock: %d\n", err);
return err;
}
@@ -191,7 +191,7 @@ static int jz4770_phy_probe(struct platform_device *pdev)
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
- dev_err(dev, "Failed to map registers");
+ dev_err(dev, "Failed to map registers\n");
return PTR_ERR(priv->base);
}
@@ -199,7 +199,7 @@ static int jz4770_phy_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk)) {
err = PTR_ERR(priv->clk);
if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clock");
+ dev_err(dev, "Failed to get clock\n");
return err;
}
@@ -207,14 +207,14 @@ static int jz4770_phy_probe(struct platform_device *pdev)
if (IS_ERR(priv->vcc_supply)) {
err = PTR_ERR(priv->vcc_supply);
if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator");
+ dev_err(dev, "Failed to get regulator\n");
return err;
}
err = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2);
if (err) {
if (err != -EPROBE_DEFER)
- dev_err(dev, "Unable to register PHY");
+ dev_err(dev, "Unable to register PHY\n");
return err;
}
diff --git a/drivers/usb/phy/phy-mv-usb.h b/drivers/usb/phy/phy-mv-usb.h
index 96701a1229ad..5d5c0abb0c3a 100644
--- a/drivers/usb/phy/phy-mv-usb.h
+++ b/drivers/usb/phy/phy-mv-usb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
*/
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index bfebf1f2e991..9a7e655d5280 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -377,7 +377,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq1, status);
- return status;
+ goto err_put_regulator;
}
status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq,
@@ -386,8 +386,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq2, status);
- free_irq(twl->irq1, twl);
- return status;
+ goto err_free_irq1;
}
twl->asleep = 0;
@@ -396,6 +395,13 @@ static int twl6030_usb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
return 0;
+
+err_free_irq1:
+ free_irq(twl->irq1, twl);
+err_put_regulator:
+ regulator_put(twl->usb3v3);
+
+ return status;
}
static int twl6030_usb_remove(struct platform_device *pdev)
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index ef1735d014da..eb34d762a63d 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Renesas USB driver
*
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index c3d3cc35cee0..7d3700bf41d9 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Renesas USB driver
*
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 65dc19ca528e..56b7106d254d 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Renesas USB driver
*
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 3b130529408b..a4ae9f97d9cd 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Renesas USB driver
*
diff --git a/drivers/usb/renesas_usbhs/rcar2.h b/drivers/usb/renesas_usbhs/rcar2.h
index 7d88732c5bff..046d07edb36f 100644
--- a/drivers/usb/renesas_usbhs/rcar2.h
+++ b/drivers/usb/renesas_usbhs/rcar2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
extern const struct renesas_usbhs_platform_info usbhs_rcar_gen2_plat_info;
diff --git a/drivers/usb/renesas_usbhs/rcar3.h b/drivers/usb/renesas_usbhs/rcar3.h
index c7c5ec1e3af2..d13db30bd21b 100644
--- a/drivers/usb/renesas_usbhs/rcar3.h
+++ b/drivers/usb/renesas_usbhs/rcar3.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
extern const struct renesas_usbhs_platform_info usbhs_rcar_gen3_plat_info;
diff --git a/drivers/usb/renesas_usbhs/rza.h b/drivers/usb/renesas_usbhs/rza.h
index 1ca42a6fd480..a29b75fef057 100644
--- a/drivers/usb/renesas_usbhs/rza.h
+++ b/drivers/usb/renesas_usbhs/rza.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
extern const struct renesas_usbhs_platform_info usbhs_rza1_plat_info;
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 5b17709821df..27d92af29635 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -49,8 +49,10 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
mutex_lock(&sw->lock);
ret = sw->set(sw, role);
- if (!ret)
+ if (!ret) {
sw->role = role;
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+ }
mutex_unlock(&sw->lock);
diff --git a/drivers/usb/serial/belkin_sa.h b/drivers/usb/serial/belkin_sa.h
index a13a98d284f2..89ec30c63cc6 100644
--- a/drivers/usb/serial/belkin_sa.h
+++ b/drivers/usb/serial/belkin_sa.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Definitions for Belkin USB Serial Adapter Driver
*
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index c5ecdcd51ffc..89675ee29645 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -73,6 +73,8 @@
#define CH341_LCR_CS6 0x01
#define CH341_LCR_CS5 0x00
+#define CH341_QUIRK_LIMITED_PRESCALER BIT(0)
+
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7523) },
@@ -87,6 +89,7 @@ struct ch341_private {
u8 mcr;
u8 msr;
u8 lcr;
+ unsigned long quirks;
};
static void ch341_set_termios(struct tty_struct *tty,
@@ -159,9 +162,11 @@ static const speed_t ch341_min_rates[] = {
* 2 <= div <= 256 if fact = 0, or
* 9 <= div <= 256 if fact = 1
*/
-static int ch341_get_divisor(speed_t speed)
+static int ch341_get_divisor(struct ch341_private *priv)
{
unsigned int fact, div, clk_div;
+ speed_t speed = priv->baud_rate;
+ bool force_fact0 = false;
int ps;
/*
@@ -187,8 +192,12 @@ static int ch341_get_divisor(speed_t speed)
clk_div = CH341_CLK_DIV(ps, fact);
div = CH341_CLKRATE / (clk_div * speed);
+ /* Some devices require a lower base clock if ps < 3. */
+ if (ps < 3 && (priv->quirks & CH341_QUIRK_LIMITED_PRESCALER))
+ force_fact0 = true;
+
/* Halve base clock (fact = 0) if required. */
- if (div < 9 || div > 255) {
+ if (div < 9 || div > 255 || force_fact0) {
div /= 2;
clk_div *= 2;
fact = 0;
@@ -227,7 +236,7 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
if (!priv->baud_rate)
return -EINVAL;
- val = ch341_get_divisor(priv->baud_rate);
+ val = ch341_get_divisor(priv);
if (val < 0)
return -EINVAL;
@@ -308,6 +317,54 @@ out: kfree(buffer);
return r;
}
+static int ch341_detect_quirks(struct usb_serial_port *port)
+{
+ struct ch341_private *priv = usb_get_serial_port_data(port);
+ struct usb_device *udev = port->serial->dev;
+ const unsigned int size = 2;
+ unsigned long quirks = 0;
+ char *buffer;
+ int r;
+
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ /*
+ * A subset of CH34x devices does not support all features. The
+ * prescaler is limited and there is no support for sending a RS232
+ * break condition. A read failure when trying to set up the latter is
+ * used to detect these devices.
+ */
+ r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), CH341_REQ_READ_REG,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ CH341_REG_BREAK, 0, buffer, size, DEFAULT_TIMEOUT);
+ if (r == -EPIPE) {
+ dev_dbg(&port->dev, "break control not supported\n");
+ quirks = CH341_QUIRK_LIMITED_PRESCALER;
+ r = 0;
+ goto out;
+ }
+
+ if (r != size) {
+ if (r >= 0)
+ r = -EIO;
+ dev_err(&port->dev, "failed to read break control: %d\n", r);
+ goto out;
+ }
+
+ r = 0;
+out:
+ kfree(buffer);
+
+ if (quirks) {
+ dev_dbg(&port->dev, "enabling quirk flags: 0x%02lx\n", quirks);
+ priv->quirks |= quirks;
+ }
+
+ return r;
+}
+
static int ch341_port_probe(struct usb_serial_port *port)
{
struct ch341_private *priv;
@@ -330,6 +387,11 @@ static int ch341_port_probe(struct usb_serial_port *port)
goto error;
usb_set_serial_port_data(port, priv);
+
+ r = ch341_detect_quirks(port);
+ if (r < 0)
+ goto error;
+
return 0;
error: kfree(priv);
diff --git a/drivers/usb/serial/io_16654.h b/drivers/usb/serial/io_16654.h
index 4980f72dc56f..f18501f056cf 100644
--- a/drivers/usb/serial/io_16654.h
+++ b/drivers/usb/serial/io_16654.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/************************************************************************
*
* 16654.H Definitions for 16C654 UART used on EdgePorts
diff --git a/drivers/usb/serial/io_edgeport.h b/drivers/usb/serial/io_edgeport.h
index 2e7fedbaf2ff..43ba53a3a6fa 100644
--- a/drivers/usb/serial/io_edgeport.h
+++ b/drivers/usb/serial/io_edgeport.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/************************************************************************
*
* io_edgeport.h Edgeport Linux Interface definitions
diff --git a/drivers/usb/serial/io_ionsp.h b/drivers/usb/serial/io_ionsp.h
index 4b8e4823bd45..db4fce815c97 100644
--- a/drivers/usb/serial/io_ionsp.h
+++ b/drivers/usb/serial/io_ionsp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/************************************************************************
*
* IONSP.H Definitions for I/O Networks Serial Protocol
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index 9bbcee37524e..50b899d55ed0 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*****************************************************************************
*
* Copyright (C) 1997-2002 Inside Out Networks, Inc.
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 0d1a5bb4636e..52cbc353051f 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/************************************************************************
*
* USBVEND.H Vendor-specific USB definitions
diff --git a/drivers/usb/serial/iuu_phoenix.h b/drivers/usb/serial/iuu_phoenix.h
index b400b262f72e..87992b24d904 100644
--- a/drivers/usb/serial/iuu_phoenix.h
+++ b/drivers/usb/serial/iuu_phoenix.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Infinity Unlimited USB Phoenix driver
*
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 0084edf518e8..e3d09a83cab1 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Definitions for MCT (Magic Control Technology) USB-RS232 Converter Driver
*
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 8bfffca3e4ae..254a8bbeea67 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1157,6 +1157,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1031, 0xff), /* Telit LE910C1-EUX */
+ .driver_info = NCTRL(0) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
diff --git a/drivers/usb/serial/oti6858.h b/drivers/usb/serial/oti6858.h
index 1226bf2347eb..5c25836fdcd9 100644
--- a/drivers/usb/serial/oti6858.h
+++ b/drivers/usb/serial/oti6858.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Ours Technology Inc. OTi-6858 USB to serial adapter driver.
*/
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 52db5519aaf0..7d3090ee7e0c 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Prolific PL2303 USB to serial adaptor driver header file
*/
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index ce0401d3137f..d147feae83e6 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */
{DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 13be21aad2f4..4b9845807bee 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -270,6 +270,10 @@ static void usb_wwan_indat_callback(struct urb *urb)
if (status) {
dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n",
__func__, status, endpoint);
+
+ /* don't resubmit on fatal errors */
+ if (status == -ESHUTDOWN || status == -ENOENT)
+ return;
} else {
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data,
diff --git a/drivers/usb/serial/visor.h b/drivers/usb/serial/visor.h
index 4bd69d047036..622d639ce74e 100644
--- a/drivers/usb/serial/visor.h
+++ b/drivers/usb/serial/visor.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* USB HandSpring Visor driver
*
diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h
index 269e727a92f9..7e63074c9128 100644
--- a/drivers/usb/serial/whiteheat.h
+++ b/drivers/usb/serial/whiteheat.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* USB ConnectTech WhiteHEAT driver
*
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index 16ce06039a4d..a6505ceb6693 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Debugging Functions Header File
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index 2dbf9c7d9749..dcd7b7e5eda8 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Header file for Special Initializers for certain USB Mass Storage devices
*
diff --git a/drivers/usb/storage/protocol.h b/drivers/usb/storage/protocol.h
index 072f1ffda2af..1d102463a66c 100644
--- a/drivers/usb/storage/protocol.h
+++ b/drivers/usb/storage/protocol.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Protocol Functions Header File
diff --git a/drivers/usb/storage/scsiglue.h b/drivers/usb/storage/scsiglue.h
index 2bc5ea045bf7..2a79c3ed4d86 100644
--- a/drivers/usb/storage/scsiglue.h
+++ b/drivers/usb/storage/scsiglue.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* SCSI Connecting Glue Header File
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index e605cbc3d8bf..b9f78ef3edc3 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -129,15 +129,11 @@ int sierra_ms_init(struct us_data *us)
int result, retries;
struct swoc_info *swocInfo;
struct usb_device *udev;
- struct Scsi_Host *sh;
retries = 3;
result = 0;
udev = us->pusb_dev;
- sh = us_to_host(us);
- scsi_get_host_dev(sh);
-
/* Force Modem mode */
if (swi_tru_install == TRU_FORCE_MODEM) {
usb_stor_dbg(us, "SWIMS: Forcing Modem Mode\n");
diff --git a/drivers/usb/storage/transport.h b/drivers/usb/storage/transport.h
index fb3bb4ee4ccf..74ffd0d7e7b6 100644
--- a/drivers/usb/storage/transport.h
+++ b/drivers/usb/storage/transport.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Transport Functions Header File
diff --git a/drivers/usb/storage/unusual_alauda.h b/drivers/usb/storage/unusual_alauda.h
index 0ec8c99a4976..13f61ec88cde 100644
--- a/drivers/usb/storage/unusual_alauda.h
+++ b/drivers/usb/storage/unusual_alauda.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Alauda-based card readers
*/
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index fb99e526cd48..0547daf116a2 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for devices based on the Cypress USB/ATA bridge
* with support for ATACB
diff --git a/drivers/usb/storage/unusual_datafab.h b/drivers/usb/storage/unusual_datafab.h
index fdab5e7d68ca..5335b5d2bd79 100644
--- a/drivers/usb/storage/unusual_datafab.h
+++ b/drivers/usb/storage/unusual_datafab.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Datafab USB Compact Flash reader
*/
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f6c3681fa2e9..b6a9a7451620 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Unusual Devices File
diff --git a/drivers/usb/storage/unusual_ene_ub6250.h b/drivers/usb/storage/unusual_ene_ub6250.h
index 9134b91fbd73..a3b32abc2b2f 100644
--- a/drivers/usb/storage/unusual_ene_ub6250.h
+++ b/drivers/usb/storage/unusual_ene_ub6250.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
#if defined(CONFIG_USB_STORAGE_ENE_UB6250) || \
defined(CONFIG_USB_STORAGE_ENE_UB6250_MODULE)
diff --git a/drivers/usb/storage/unusual_freecom.h b/drivers/usb/storage/unusual_freecom.h
index 949231c7a36b..9ca686364a93 100644
--- a/drivers/usb/storage/unusual_freecom.h
+++ b/drivers/usb/storage/unusual_freecom.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Freecom USB/IDE adaptor
*/
diff --git a/drivers/usb/storage/unusual_isd200.h b/drivers/usb/storage/unusual_isd200.h
index d03a02cc904e..f248190bd666 100644
--- a/drivers/usb/storage/unusual_isd200.h
+++ b/drivers/usb/storage/unusual_isd200.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for In-System Design, Inc. ISD200 ASIC
*/
diff --git a/drivers/usb/storage/unusual_jumpshot.h b/drivers/usb/storage/unusual_jumpshot.h
index c323338881ef..44878f849c1c 100644
--- a/drivers/usb/storage/unusual_jumpshot.h
+++ b/drivers/usb/storage/unusual_jumpshot.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Lexar "Jumpshot" Compact Flash reader
*/
diff --git a/drivers/usb/storage/unusual_karma.h b/drivers/usb/storage/unusual_karma.h
index 8f1eebd71d2c..9fbed4cbc895 100644
--- a/drivers/usb/storage/unusual_karma.h
+++ b/drivers/usb/storage/unusual_karma.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Rio Karma
*/
diff --git a/drivers/usb/storage/unusual_onetouch.h b/drivers/usb/storage/unusual_onetouch.h
index c76d4e990f7b..cdfee8f6cf37 100644
--- a/drivers/usb/storage/unusual_onetouch.h
+++ b/drivers/usb/storage/unusual_onetouch.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Maxtor OneTouch USB hard drive's button
*/
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index 7e14c2d7cf73..945dcb19d31d 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for Realtek RTS51xx USB card reader
*
diff --git a/drivers/usb/storage/unusual_sddr09.h b/drivers/usb/storage/unusual_sddr09.h
index 650cf2862754..bfb650974129 100644
--- a/drivers/usb/storage/unusual_sddr09.h
+++ b/drivers/usb/storage/unusual_sddr09.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for SanDisk SDDR-09 SmartMedia reader
*/
diff --git a/drivers/usb/storage/unusual_sddr55.h b/drivers/usb/storage/unusual_sddr55.h
index e89df2cea7bd..6d6f76eb0630 100644
--- a/drivers/usb/storage/unusual_sddr55.h
+++ b/drivers/usb/storage/unusual_sddr55.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for SanDisk SDDR-55 SmartMedia reader
*/
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 37157ed9a881..162b09d69f62 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Attached SCSI devices - Unusual Devices File
*
diff --git a/drivers/usb/storage/unusual_usbat.h b/drivers/usb/storage/unusual_usbat.h
index 05abf6870b8f..f9d3e5efc39d 100644
--- a/drivers/usb/storage/unusual_usbat.h
+++ b/drivers/usb/storage/unusual_usbat.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
*/
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 5850d624cac7..0451fac1adce 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Main Header File
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index b4f2aac7ae8a..559dd06117e7 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -64,7 +64,8 @@ config TYPEC_HD3SS3220
config TYPEC_TPS6598X
tristate "TI TPS6598x USB Power Delivery controller driver"
depends on I2C
- select REGMAP_I2C
+ depends on REGMAP_I2C
+ depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
help
Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
Delivery controller.
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 8d894bdff77d..c9234748537a 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -917,6 +917,12 @@ EXPORT_SYMBOL_GPL(typec_unregister_cable);
/* ------------------------------------------------------------------------- */
/* USB Type-C ports */
+static const char * const typec_orientations[] = {
+ [TYPEC_ORIENTATION_NONE] = "unknown",
+ [TYPEC_ORIENTATION_NORMAL] = "normal",
+ [TYPEC_ORIENTATION_REVERSE] = "reverse",
+};
+
static const char * const typec_roles[] = {
[TYPEC_SINK] = "sink",
[TYPEC_SOURCE] = "source",
@@ -1248,18 +1254,9 @@ static ssize_t orientation_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct typec_port *p = to_typec_port(dev);
- enum typec_orientation orientation = typec_get_orientation(p);
-
- switch (orientation) {
- case TYPEC_ORIENTATION_NORMAL:
- return sprintf(buf, "%s\n", "normal");
- case TYPEC_ORIENTATION_REVERSE:
- return sprintf(buf, "%s\n", "reverse");
- case TYPEC_ORIENTATION_NONE:
- default:
- return sprintf(buf, "%s\n", "unknown");
- }
+ struct typec_port *port = to_typec_port(dev);
+
+ return sprintf(buf, "%s\n", typec_orientations[port->orientation]);
}
static DEVICE_ATTR_RO(orientation);
@@ -1452,6 +1449,21 @@ void typec_set_pwr_opmode(struct typec_port *port,
EXPORT_SYMBOL_GPL(typec_set_pwr_opmode);
/**
+ * typec_find_orientation - Convert orientation string to enum typec_orientation
+ * @name: Orientation string
+ *
+ * This routine is used to find the typec_orientation by its string name @name.
+ *
+ * Returns the orientation value on success, otherwise negative error code.
+ */
+int typec_find_orientation(const char *name)
+{
+ return match_string(typec_orientations, ARRAY_SIZE(typec_orientations),
+ name);
+}
+EXPORT_SYMBOL_GPL(typec_find_orientation);
+
+/**
* typec_find_port_power_role - Get the typec port power capability
* @name: port power capability string
*
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 77eb97b2aa86..a4dbd11f8ee2 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -11,7 +11,7 @@ config TYPEC_MUX_PI3USB30532
config TYPEC_MUX_INTEL_PMC
tristate "Intel PMC mux control"
- depends on INTEL_PMC_IPC
+ depends on INTEL_SCU_IPC
select USB_ROLE_SWITCH
help
Driver for USB muxes controlled by Intel PMC FW. Intel PMC FW can
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 67c5139cfa0d..962bc69a6a59 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -15,7 +15,7 @@
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_tbt.h>
-#include <asm/intel_pmc_ipc.h>
+#include <asm/intel_scu_ipc.h>
#define PMC_USBC_CMD 0xa7
@@ -63,6 +63,7 @@ enum {
#define PMC_USB_ALTMODE_DP_MODE_SHIFT 8
/* TBT specific Mode Data bits */
+#define PMC_USB_ALTMODE_HPD_HIGH BIT(14)
#define PMC_USB_ALTMODE_TBT_TYPE BIT(17)
#define PMC_USB_ALTMODE_CABLE_TYPE BIT(18)
#define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20)
@@ -74,8 +75,8 @@ enum {
#define PMC_USB_ALTMODE_TBT_GEN(_g_) (((_g_) & GENMASK(1, 0)) << 28)
/* Display HPD Request bits */
+#define PMC_USB_DP_HPD_LVL BIT(4)
#define PMC_USB_DP_HPD_IRQ BIT(5)
-#define PMC_USB_DP_HPD_LVL BIT(6)
struct pmc_usb;
@@ -91,14 +92,34 @@ struct pmc_usb_port {
u8 usb2_port;
u8 usb3_port;
+
+ enum typec_orientation sbu_orientation;
+ enum typec_orientation hsl_orientation;
};
struct pmc_usb {
u8 num_ports;
struct device *dev;
+ struct intel_scu_ipc_dev *ipc;
struct pmc_usb_port *port;
};
+static int sbu_orientation(struct pmc_usb_port *port)
+{
+ if (port->sbu_orientation)
+ return port->sbu_orientation - 1;
+
+ return port->orientation - 1;
+}
+
+static int hsl_orientation(struct pmc_usb_port *port)
+{
+ if (port->hsl_orientation)
+ return port->hsl_orientation - 1;
+
+ return port->orientation - 1;
+}
+
static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
{
u8 response[4];
@@ -107,9 +128,8 @@ static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
* Error bit will always be 0 with the USBC command.
* Status can be checked from the response message.
*/
- intel_pmc_ipc_command(PMC_USBC_CMD, 0, msg, len,
- (void *)response, 1);
-
+ intel_scu_ipc_dev_command(port->pmc->ipc, PMC_USBC_CMD, 0, msg, len,
+ response, sizeof(response));
if (response[2]) {
if (response[2] & BIT(1))
return -EIO;
@@ -151,15 +171,15 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
- req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
- req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
+
+ req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
+ req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
req.mode_data |= (state->mode - TYPEC_STATE_MODAL) <<
PMC_USB_ALTMODE_DP_MODE_SHIFT;
if (data->status & DP_STATUS_HPD_STATE)
- req.mode_data |= PMC_USB_DP_HPD_LVL <<
- PMC_USB_ALTMODE_DP_MODE_SHIFT;
+ req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
return pmc_usb_command(port, (void *)&req, sizeof(req));
}
@@ -177,8 +197,9 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state)
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
- req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
- req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
+
+ req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
+ req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
if (TBT_ADAPTER(data->device_mode) == TBT_ADAPTER_TBT3)
req.mode_data |= PMC_USB_ALTMODE_TBT_TYPE;
@@ -215,8 +236,8 @@ static int pmc_usb_connect(struct pmc_usb_port *port)
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT;
- msg[1] |= (port->orientation - 1) << PMC_USB_MSG_ORI_HSL_SHIFT;
- msg[1] |= (port->orientation - 1) << PMC_USB_MSG_ORI_AUX_SHIFT;
+ msg[1] |= hsl_orientation(port) << PMC_USB_MSG_ORI_HSL_SHIFT;
+ msg[1] |= sbu_orientation(port) << PMC_USB_MSG_ORI_AUX_SHIFT;
return pmc_usb_command(port, msg, sizeof(msg));
}
@@ -300,6 +321,7 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
struct usb_role_switch_desc desc = { };
struct typec_switch_desc sw_desc = { };
struct typec_mux_desc mux_desc = { };
+ const char *str;
int ret;
ret = fwnode_property_read_u8(fwnode, "usb2-port-number", &port->usb2_port);
@@ -310,6 +332,14 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
if (ret)
return ret;
+ ret = fwnode_property_read_string(fwnode, "sbu-orientation", &str);
+ if (!ret)
+ port->sbu_orientation = typec_find_orientation(str);
+
+ ret = fwnode_property_read_string(fwnode, "hsl-orientation", &str);
+ if (!ret)
+ port->hsl_orientation = typec_find_orientation(str);
+
port->num = index;
port->pmc = pmc;
@@ -374,6 +404,10 @@ static int pmc_usb_probe(struct platform_device *pdev)
if (!pmc->port)
return -ENOMEM;
+ pmc->ipc = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ if (!pmc->ipc)
+ return -ENODEV;
+
pmc->dev = &pdev->dev;
/*
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 5b986d6c801d..fa3f39336246 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -41,8 +41,8 @@ config TYPEC_FUSB302
config TYPEC_WCOVE
tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
depends on ACPI
+ depends on MFD_INTEL_PMC_BXT
depends on INTEL_SOC_PMIC
- depends on INTEL_PMC_IPC
depends on BXT_WC_PMIC_OPREGION
help
This driver adds support for USB Type-C on Intel Broxton platforms
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index b498960ff72b..b28facece43c 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -9,14 +9,13 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/extcon.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/proc_fs.h>
#include <linux/regulator/consumer.h>
@@ -83,7 +82,7 @@ struct fusb302_chip {
struct work_struct irq_work;
bool irq_suspended;
bool irq_while_suspended;
- int gpio_int_n;
+ struct gpio_desc *gpio_int_n;
int gpio_int_n_irq;
struct extcon_dev *extcon;
@@ -1618,30 +1617,17 @@ done:
static int init_gpio(struct fusb302_chip *chip)
{
- struct device_node *node;
+ struct device *dev = chip->dev;
int ret = 0;
- node = chip->dev->of_node;
- chip->gpio_int_n = of_get_named_gpio(node, "fcs,int_n", 0);
- if (!gpio_is_valid(chip->gpio_int_n)) {
- ret = chip->gpio_int_n;
- dev_err(chip->dev, "cannot get named GPIO Int_N, ret=%d", ret);
- return ret;
- }
- ret = devm_gpio_request(chip->dev, chip->gpio_int_n, "fcs,int_n");
- if (ret < 0) {
- dev_err(chip->dev, "cannot request GPIO Int_N, ret=%d", ret);
- return ret;
- }
- ret = gpio_direction_input(chip->gpio_int_n);
- if (ret < 0) {
- dev_err(chip->dev,
- "cannot set GPIO Int_N to input, ret=%d", ret);
- return ret;
+ chip->gpio_int_n = devm_gpiod_get(dev, "fcs,int_n", GPIOD_IN);
+ if (IS_ERR(chip->gpio_int_n)) {
+ dev_err(dev, "failed to request gpio_int_n\n");
+ return PTR_ERR(chip->gpio_int_n);
}
- ret = gpio_to_irq(chip->gpio_int_n);
+ ret = gpiod_to_irq(chip->gpio_int_n);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"cannot request IRQ for GPIO Int_N, ret=%d", ret);
return ret;
}
diff --git a/drivers/usb/typec/tcpm/fusb302_reg.h b/drivers/usb/typec/tcpm/fusb302_reg.h
index 00b39d365478..edc0e4b0f1e6 100644
--- a/drivers/usb/typec/tcpm/fusb302_reg.h
+++ b/drivers/usb/typec/tcpm/fusb302_reg.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2016-2017 Google, Inc
*
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index 0698addd1185..b7c9fe5caabe 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -12,6 +12,7 @@
#include <linux/regmap.h>
#include <linux/interrupt.h>
#include <linux/usb/typec.h>
+#include <linux/usb/role.h>
/* Register offsets */
#define TPS_REG_VID 0x00
@@ -94,6 +95,7 @@ struct tps6598x {
struct typec_port *port;
struct typec_partner *partner;
struct usb_pd_identity partner_identity;
+ struct usb_role_switch *role_sw;
};
/*
@@ -190,6 +192,23 @@ static int tps6598x_read_partner_identity(struct tps6598x *tps)
return 0;
}
+static void tps6598x_set_data_role(struct tps6598x *tps,
+ enum typec_data_role role, bool connected)
+{
+ enum usb_role role_val;
+
+ if (role == TYPEC_HOST)
+ role_val = USB_ROLE_HOST;
+ else
+ role_val = USB_ROLE_DEVICE;
+
+ if (!connected)
+ role_val = USB_ROLE_NONE;
+
+ usb_role_switch_set_role(tps->role_sw, role_val);
+ typec_set_data_role(tps->port, role);
+}
+
static int tps6598x_connect(struct tps6598x *tps, u32 status)
{
struct typec_partner_desc desc;
@@ -220,7 +239,7 @@ static int tps6598x_connect(struct tps6598x *tps, u32 status)
typec_set_pwr_opmode(tps->port, mode);
typec_set_pwr_role(tps->port, TPS_STATUS_PORTROLE(status));
typec_set_vconn_role(tps->port, TPS_STATUS_VCONN(status));
- typec_set_data_role(tps->port, TPS_STATUS_DATAROLE(status));
+ tps6598x_set_data_role(tps, TPS_STATUS_DATAROLE(status), true);
tps->partner = typec_register_partner(tps->port, &desc);
if (IS_ERR(tps->partner))
@@ -240,7 +259,7 @@ static void tps6598x_disconnect(struct tps6598x *tps, u32 status)
typec_set_pwr_opmode(tps->port, TYPEC_PWR_MODE_USB);
typec_set_pwr_role(tps->port, TPS_STATUS_PORTROLE(status));
typec_set_vconn_role(tps->port, TPS_STATUS_VCONN(status));
- typec_set_data_role(tps->port, TPS_STATUS_DATAROLE(status));
+ tps6598x_set_data_role(tps, TPS_STATUS_DATAROLE(status), false);
}
static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
@@ -328,7 +347,7 @@ static int tps6598x_dr_set(struct typec_port *port, enum typec_data_role role)
goto out_unlock;
}
- typec_set_data_role(tps->port, role);
+ tps6598x_set_data_role(tps, role, true);
out_unlock:
mutex_unlock(&tps->lock);
@@ -452,6 +471,7 @@ static int tps6598x_probe(struct i2c_client *client)
{
struct typec_capability typec_cap = { };
struct tps6598x *tps;
+ struct fwnode_handle *fwnode;
u32 status;
u32 conf;
u32 vid;
@@ -495,11 +515,22 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ fwnode = device_get_named_child_node(&client->dev, "connector");
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
+ tps->role_sw = fwnode_usb_role_switch_get(fwnode);
+ if (IS_ERR(tps->role_sw)) {
+ ret = PTR_ERR(tps->role_sw);
+ goto err_fwnode_put;
+ }
+
typec_cap.revision = USB_TYPEC_REV_1_2;
typec_cap.pd_revision = 0x200;
typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
typec_cap.driver_data = tps;
typec_cap.ops = &tps6598x_ops;
+ typec_cap.fwnode = fwnode;
switch (TPS_SYSCONF_PORTINFO(conf)) {
case TPS_PORTINFO_SINK_ACCESSORY:
@@ -525,12 +556,16 @@ static int tps6598x_probe(struct i2c_client *client)
typec_cap.data = TYPEC_PORT_DFP;
break;
default:
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_role_put;
}
tps->port = typec_register_port(&client->dev, &typec_cap);
- if (IS_ERR(tps->port))
- return PTR_ERR(tps->port);
+ if (IS_ERR(tps->port)) {
+ ret = PTR_ERR(tps->port);
+ goto err_role_put;
+ }
+ fwnode_handle_put(fwnode);
if (status & TPS_STATUS_PLUG_PRESENT) {
ret = tps6598x_connect(tps, status);
@@ -545,12 +580,19 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret) {
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
- return ret;
+ goto err_role_put;
}
i2c_set_clientdata(client, tps);
return 0;
+
+err_role_put:
+ usb_role_switch_put(tps->role_sw);
+err_fwnode_put:
+ fwnode_handle_put(fwnode);
+
+ return ret;
}
static int tps6598x_remove(struct i2c_client *client)
@@ -559,10 +601,17 @@ static int tps6598x_remove(struct i2c_client *client)
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
+ usb_role_switch_put(tps->role_sw);
return 0;
}
+static const struct of_device_id tps6598x_of_match[] = {
+ { .compatible = "ti,tps6598x", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tps6598x_of_match);
+
static const struct i2c_device_id tps6598x_id[] = {
{ "tps6598x" },
{ }
@@ -572,6 +621,7 @@ MODULE_DEVICE_TABLE(i2c, tps6598x_id);
static struct i2c_driver tps6598x_i2c_driver = {
.driver = {
.name = "tps6598x",
+ .of_match_table = tps6598x_of_match,
},
.probe_new = tps6598x_probe,
.remove = tps6598x_remove,
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index b35e15a1f02c..8a8eb5cb8e0f 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -7,6 +7,10 @@ typec_ucsi-y := ucsi.o
typec_ucsi-$(CONFIG_TRACING) += trace.o
+ifneq ($(CONFIG_POWER_SUPPLY),)
+ typec_ucsi-y += psy.o
+endif
+
ifneq ($(CONFIG_TYPEC_DP_ALTMODE),)
typec_ucsi-y += displayport.o
endif
diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
new file mode 100644
index 000000000000..26ed0b520749
--- /dev/null
+++ b/drivers/usb/typec/ucsi/psy.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power Supply for UCSI
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: K V, Abhilash <abhilash.k.v@intel.com>
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/property.h>
+#include <linux/usb/pd.h>
+
+#include "ucsi.h"
+
+/* Power Supply access to expose source power information */
+enum ucsi_psy_online_states {
+ UCSI_PSY_OFFLINE = 0,
+ UCSI_PSY_FIXED_ONLINE,
+ UCSI_PSY_PROG_ONLINE,
+};
+
+static enum power_supply_property ucsi_psy_props[] = {
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static int ucsi_psy_get_online(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ val->intval = UCSI_PSY_OFFLINE;
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED &&
+ (con->status.flags & UCSI_CONSTAT_PWR_DIR) == TYPEC_SINK)
+ val->intval = UCSI_PSY_FIXED_ONLINE;
+ return 0;
+}
+
+static int ucsi_psy_get_voltage_min(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u32 pdo;
+
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
+ case UCSI_CONSTAT_PWR_OPMODE_PD:
+ pdo = con->src_pdos[0];
+ val->intval = pdo_fixed_voltage(pdo) * 1000;
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ case UCSI_CONSTAT_PWR_OPMODE_BC:
+ case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
+ val->intval = UCSI_TYPEC_VSAFE5V * 1000;
+ break;
+ default:
+ val->intval = 0;
+ break;
+ }
+ return 0;
+}
+
+static int ucsi_psy_get_voltage_max(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u32 pdo;
+
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
+ case UCSI_CONSTAT_PWR_OPMODE_PD:
+ if (con->num_pdos > 0) {
+ pdo = con->src_pdos[con->num_pdos - 1];
+ val->intval = pdo_fixed_voltage(pdo) * 1000;
+ } else {
+ val->intval = 0;
+ }
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ case UCSI_CONSTAT_PWR_OPMODE_BC:
+ case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
+ val->intval = UCSI_TYPEC_VSAFE5V * 1000;
+ break;
+ default:
+ val->intval = 0;
+ break;
+ }
+ return 0;
+}
+
+static int ucsi_psy_get_voltage_now(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ int index;
+ u32 pdo;
+
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
+ case UCSI_CONSTAT_PWR_OPMODE_PD:
+ index = rdo_index(con->rdo);
+ if (index > 0) {
+ pdo = con->src_pdos[index - 1];
+ val->intval = pdo_fixed_voltage(pdo) * 1000;
+ } else {
+ val->intval = 0;
+ }
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ case UCSI_CONSTAT_PWR_OPMODE_BC:
+ case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
+ val->intval = UCSI_TYPEC_VSAFE5V * 1000;
+ break;
+ default:
+ val->intval = 0;
+ break;
+ }
+ return 0;
+}
+
+static int ucsi_psy_get_current_max(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u32 pdo;
+
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
+ case UCSI_CONSTAT_PWR_OPMODE_PD:
+ if (con->num_pdos > 0) {
+ pdo = con->src_pdos[con->num_pdos - 1];
+ val->intval = pdo_max_current(pdo) * 1000;
+ } else {
+ val->intval = 0;
+ }
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ val->intval = UCSI_TYPEC_1_5_CURRENT * 1000;
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ val->intval = UCSI_TYPEC_3_0_CURRENT * 1000;
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_BC:
+ case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
+ /* UCSI can't tell b/w DCP/CDP or USB2/3x1/3x2 SDP chargers */
+ default:
+ val->intval = 0;
+ break;
+ }
+ return 0;
+}
+
+static int ucsi_psy_get_current_now(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u16 flags = con->status.flags;
+
+ if (UCSI_CONSTAT_PWR_OPMODE(flags) == UCSI_CONSTAT_PWR_OPMODE_PD)
+ val->intval = rdo_op_current(con->rdo) * 1000;
+ else
+ val->intval = 0;
+ return 0;
+}
+
+static int ucsi_psy_get_usb_type(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u16 flags = con->status.flags;
+
+ val->intval = POWER_SUPPLY_USB_TYPE_C;
+ if (flags & UCSI_CONSTAT_CONNECTED &&
+ UCSI_CONSTAT_PWR_OPMODE(flags) == UCSI_CONSTAT_PWR_OPMODE_PD)
+ val->intval = POWER_SUPPLY_USB_TYPE_PD;
+
+ return 0;
+}
+
+static int ucsi_psy_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ucsi_connector *con = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return ucsi_psy_get_usb_type(con, val);
+ case POWER_SUPPLY_PROP_ONLINE:
+ return ucsi_psy_get_online(con, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ return ucsi_psy_get_voltage_min(con, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ return ucsi_psy_get_voltage_max(con, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ return ucsi_psy_get_voltage_now(con, val);
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return ucsi_psy_get_current_max(con, val);
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return ucsi_psy_get_current_now(con, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum power_supply_usb_type ucsi_psy_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_C,
+ POWER_SUPPLY_USB_TYPE_PD,
+ POWER_SUPPLY_USB_TYPE_PD_PPS,
+};
+
+int ucsi_register_port_psy(struct ucsi_connector *con)
+{
+ struct power_supply_config psy_cfg = {};
+ struct device *dev = con->ucsi->dev;
+ char *psy_name;
+
+ psy_cfg.drv_data = con;
+ psy_cfg.fwnode = dev_fwnode(dev);
+
+ psy_name = devm_kasprintf(dev, GFP_KERNEL, "ucsi-source-psy-%s%d",
+ dev_name(dev), con->num);
+ if (!psy_name)
+ return -ENOMEM;
+
+ con->psy_desc.name = psy_name;
+ con->psy_desc.type = POWER_SUPPLY_TYPE_USB,
+ con->psy_desc.usb_types = ucsi_psy_usb_types;
+ con->psy_desc.num_usb_types = ARRAY_SIZE(ucsi_psy_usb_types);
+ con->psy_desc.properties = ucsi_psy_props,
+ con->psy_desc.num_properties = ARRAY_SIZE(ucsi_psy_props),
+ con->psy_desc.get_property = ucsi_psy_get_prop;
+
+ con->psy = power_supply_register(dev, &con->psy_desc, &psy_cfg);
+
+ return PTR_ERR_OR_ZERO(con->psy);
+}
+
+void ucsi_unregister_port_psy(struct ucsi_connector *con)
+{
+ if (IS_ERR_OR_NULL(con->psy))
+ return;
+
+ power_supply_unregister(con->psy);
+}
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index 48ad1dc1b1b2..cb62ad835761 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -35,16 +35,16 @@ const char *ucsi_cmd_str(u64 raw_cmd)
const char *ucsi_cci_str(u32 cci)
{
- if (cci & GENMASK(7, 0)) {
- if (cci & BIT(29))
+ if (UCSI_CCI_CONNECTOR(cci)) {
+ if (cci & UCSI_CCI_ACK_COMPLETE)
return "Event pending (ACK completed)";
- if (cci & BIT(31))
+ if (cci & UCSI_CCI_COMMAND_COMPLETE)
return "Event pending (command completed)";
return "Connector Change";
}
- if (cci & BIT(29))
+ if (cci & UCSI_CCI_ACK_COMPLETE)
return "ACK completed";
- if (cci & BIT(31))
+ if (cci & UCSI_CCI_COMMAND_COMPLETE)
return "Command completed";
return "";
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index ddf2ad3752de..d0c63afaf345 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -492,19 +492,45 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
}
}
+static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
+{
+ struct ucsi *ucsi = con->ucsi;
+ u64 command;
+ int ret;
+
+ command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
+ command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
+ command |= UCSI_GET_PDOS_SRC_PDOS;
+ ret = ucsi_run_command(ucsi, command, con->src_pdos,
+ sizeof(con->src_pdos));
+ if (ret < 0) {
+ dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
+ return;
+ }
+ con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
+ if (ret == 0)
+ dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
+ con->rdo = con->status.request_data_obj;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
+ ucsi_get_pdos(con, 1);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_1_5A);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_3_0A);
break;
default:
+ con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_USB);
break;
}
@@ -566,6 +592,8 @@ static void ucsi_partner_change(struct ucsi_connector *con)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
@@ -611,7 +639,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
- if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE)
+ if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE ||
+ con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE)
ucsi_pwr_opmode_change(con);
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
@@ -627,6 +656,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
@@ -905,6 +936,10 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
cap->driver_data = con;
cap->ops = &ucsi_ops;
+ ret = ucsi_register_port_psy(con);
+ if (ret)
+ return ret;
+
/* Register the connector */
con->port = typec_register_port(ucsi->dev, cap);
if (IS_ERR(con->port))
@@ -927,6 +962,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
@@ -1029,6 +1066,7 @@ err_unregister:
for (con = ucsi->connector; con->port; con++) {
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(con);
typec_unregister_port(con->port);
con->port = NULL;
}
@@ -1152,6 +1190,7 @@ void ucsi_unregister(struct ucsi *ucsi)
ucsi_unregister_partner(&ucsi->connector[i]);
ucsi_unregister_altmodes(&ucsi->connector[i],
UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
typec_unregister_port(ucsi->connector[i].port);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 8e831108f481..cba6f77bea61 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -5,6 +5,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/power_supply.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
@@ -21,7 +22,7 @@ struct ucsi_altmode;
#define UCSI_MESSAGE_OUT 32
/* Command Status and Connector Change Indication (CCI) bits */
-#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 0)) >> 1)
+#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 1)) >> 1)
#define UCSI_CCI_LENGTH(_c_) (((_c_) & GENMASK(15, 8)) >> 8)
#define UCSI_CCI_NOT_SUPPORTED BIT(25)
#define UCSI_CCI_CANCEL_COMPLETE BIT(26)
@@ -130,6 +131,11 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_ALTMODE_OFFSET(_r_) ((u64)(_r_) << 32)
#define UCSI_GET_ALTMODE_NUM_ALTMODES(_r_) ((u64)(_r_) << 40)
+/* GET_PDOS command bits */
+#define UCSI_GET_PDOS_PARTNER_PDO(_r_) ((u64)(_r_) << 23)
+#define UCSI_GET_PDOS_NUM_PDOS(_r_) ((u64)(_r_) << 32)
+#define UCSI_GET_PDOS_SRC_PDOS ((u64)1 << 34)
+
/* -------------------------------------------------------------------------- */
/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
@@ -294,6 +300,11 @@ struct ucsi {
#define UCSI_MAX_SVID 5
#define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6)
+#define UCSI_MAX_PDOS (4)
+
+#define UCSI_TYPEC_VSAFE5V 5000
+#define UCSI_TYPEC_1_5_CURRENT 1500
+#define UCSI_TYPEC_3_0_CURRENT 3000
struct ucsi_connector {
int num;
@@ -313,6 +324,11 @@ struct ucsi_connector {
struct ucsi_connector_status status;
struct ucsi_connector_capability cap;
+ struct power_supply *psy;
+ struct power_supply_desc psy_desc;
+ u32 rdo;
+ u32 src_pdos[UCSI_MAX_PDOS];
+ int num_pdos;
};
int ucsi_send_command(struct ucsi *ucsi, u64 command,
@@ -321,6 +337,14 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
void ucsi_altmode_update_active(struct ucsi_connector *con);
int ucsi_resume(struct ucsi *ucsi);
+#if IS_ENABLED(CONFIG_POWER_SUPPLY)
+int ucsi_register_port_psy(struct ucsi_connector *con);
+void ucsi_unregister_port_psy(struct ucsi_connector *con);
+#else
+static inline int ucsi_register_port_psy(struct ucsi_connector *con) { return 0; }
+static inline void ucsi_unregister_port_psy(struct ucsi_connector *con) { }
+#endif /* CONFIG_POWER_SUPPLY */
+
#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
struct typec_altmode *
ucsi_register_displayport(struct ucsi_connector *con,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 7957d2d41fc4..01c456f7c1f7 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -89,15 +89,14 @@ static struct vdpasim *dev_to_sim(struct device *dev)
static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
{
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
- int ret;
- ret = vringh_init_iotlb(&vq->vring, vdpasim_features,
- VDPASIM_QUEUE_MAX, false,
- (struct vring_desc *)(uintptr_t)vq->desc_addr,
- (struct vring_avail *)
- (uintptr_t)vq->driver_addr,
- (struct vring_used *)
- (uintptr_t)vq->device_addr);
+ vringh_init_iotlb(&vq->vring, vdpasim_features,
+ VDPASIM_QUEUE_MAX, false,
+ (struct vring_desc *)(uintptr_t)vq->desc_addr,
+ (struct vring_avail *)
+ (uintptr_t)vq->driver_addr,
+ (struct vring_used *)
+ (uintptr_t)vq->device_addr);
}
static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 8ad14e5c02bf..917fd84c1c6f 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -110,7 +110,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
"%s-%s", dev_driver_string(parent->dev),
group->name);
if (ret) {
- kfree(type);
+ kobject_put(&type->kobj);
return ERR_PTR(ret);
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 6c6b37b5c04e..3fc198f3eeb5 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -26,6 +26,7 @@
#include <linux/vfio.h>
#include <linux/vgaarb.h>
#include <linux/nospec.h>
+#include <linux/sched/mm.h>
#include "vfio_pci_private.h"
@@ -184,6 +185,7 @@ no_mmap:
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
static void vfio_pci_disable(struct vfio_pci_device *vdev);
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
/*
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
@@ -519,6 +521,10 @@ static void vfio_pci_release(void *device_data)
vfio_pci_vf_token_user_add(vdev, -1);
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
+ if (vdev->err_trigger)
+ eventfd_ctx_put(vdev->err_trigger);
+ if (vdev->req_trigger)
+ eventfd_ctx_put(vdev->req_trigger);
}
mutex_unlock(&vdev->reflck->lock);
@@ -736,6 +742,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
return 0;
}
+struct vfio_devices {
+ struct vfio_device **devices;
+ int cur_index;
+ int max_index;
+};
+
static long vfio_pci_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
@@ -809,7 +821,7 @@ static long vfio_pci_ioctl(void *device_data,
{
void __iomem *io;
size_t size;
- u16 orig_cmd;
+ u16 cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
@@ -829,10 +841,7 @@ static long vfio_pci_ioctl(void *device_data,
* Is it really there? Enable memory decode for
* implicit access in pci_map_rom().
*/
- pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
- pci_write_config_word(pdev, PCI_COMMAND,
- orig_cmd | PCI_COMMAND_MEMORY);
-
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
io = pci_map_rom(pdev, &size);
if (io) {
info.flags = VFIO_REGION_INFO_FLAG_READ;
@@ -840,8 +849,8 @@ static long vfio_pci_ioctl(void *device_data,
} else {
info.size = 0;
}
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
- pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
@@ -984,8 +993,16 @@ static long vfio_pci_ioctl(void *device_data,
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
- return vdev->reset_works ?
- pci_try_reset_function(vdev->pdev) : -EINVAL;
+ int ret;
+
+ if (!vdev->reset_works)
+ return -EINVAL;
+
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ ret = pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+
+ return ret;
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
struct vfio_pci_hot_reset_info hdr;
@@ -1065,8 +1082,9 @@ reset_info_exit:
int32_t *group_fds;
struct vfio_pci_group_entry *groups;
struct vfio_pci_group_info info;
+ struct vfio_devices devs = { .cur_index = 0 };
bool slot = false;
- int i, count = 0, ret = 0;
+ int i, group_idx, mem_idx = 0, count = 0, ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset, count);
@@ -1118,9 +1136,9 @@ reset_info_exit:
* user interface and store the group and iommu ID. This
* ensures the group is held across the reset.
*/
- for (i = 0; i < hdr.count; i++) {
+ for (group_idx = 0; group_idx < hdr.count; group_idx++) {
struct vfio_group *group;
- struct fd f = fdget(group_fds[i]);
+ struct fd f = fdget(group_fds[group_idx]);
if (!f.file) {
ret = -EBADF;
break;
@@ -1133,8 +1151,9 @@ reset_info_exit:
break;
}
- groups[i].group = group;
- groups[i].id = vfio_external_user_iommu_id(group);
+ groups[group_idx].group = group;
+ groups[group_idx].id =
+ vfio_external_user_iommu_id(group);
}
kfree(group_fds);
@@ -1153,13 +1172,63 @@ reset_info_exit:
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_validate_devs,
&info, slot);
- if (!ret)
- /* User has access, do the reset */
- ret = pci_reset_bus(vdev->pdev);
+ if (ret)
+ goto hot_reset_release;
+
+ devs.max_index = count;
+ devs.devices = kcalloc(count, sizeof(struct vfio_device *),
+ GFP_KERNEL);
+ if (!devs.devices) {
+ ret = -ENOMEM;
+ goto hot_reset_release;
+ }
+
+ /*
+ * We need to get memory_lock for each device, but devices
+ * can share mmap_sem, therefore we need to zap and hold
+ * the vma_lock for each device, and only then get each
+ * memory_lock.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_try_zap_and_vma_lock_cb,
+ &devs, slot);
+ if (ret)
+ goto hot_reset_release;
+
+ for (; mem_idx < devs.cur_index; mem_idx++) {
+ struct vfio_pci_device *tmp;
+
+ tmp = vfio_device_data(devs.devices[mem_idx]);
+
+ ret = down_write_trylock(&tmp->memory_lock);
+ if (!ret) {
+ ret = -EBUSY;
+ goto hot_reset_release;
+ }
+ mutex_unlock(&tmp->vma_lock);
+ }
+
+ /* User has access, do the reset */
+ ret = pci_reset_bus(vdev->pdev);
hot_reset_release:
- for (i--; i >= 0; i--)
- vfio_group_put_external_user(groups[i].group);
+ for (i = 0; i < devs.cur_index; i++) {
+ struct vfio_device *device;
+ struct vfio_pci_device *tmp;
+
+ device = devs.devices[i];
+ tmp = vfio_device_data(device);
+
+ if (i < mem_idx)
+ up_write(&tmp->memory_lock);
+ else
+ mutex_unlock(&tmp->vma_lock);
+ vfio_device_put(device);
+ }
+ kfree(devs.devices);
+
+ for (group_idx--; group_idx >= 0; group_idx--)
+ vfio_group_put_external_user(groups[group_idx].group);
kfree(groups);
return ret;
@@ -1299,6 +1368,202 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
}
+/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
+static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
+{
+ struct vfio_pci_mmap_vma *mmap_vma, *tmp;
+
+ /*
+ * Lock ordering:
+ * vma_lock is nested under mmap_sem for vm_ops callback paths.
+ * The memory_lock semaphore is used by both code paths calling
+ * into this function to zap vmas and the vm_ops.fault callback
+ * to protect the memory enable state of the device.
+ *
+ * When zapping vmas we need to maintain the mmap_sem => vma_lock
+ * ordering, which requires using vma_lock to walk vma_list to
+ * acquire an mm, then dropping vma_lock to get the mmap_sem and
+ * reacquiring vma_lock. This logic is derived from similar
+ * requirements in uverbs_user_mmap_disassociate().
+ *
+ * mmap_sem must always be the top-level lock when it is taken.
+ * Therefore we can only hold the memory_lock write lock when
+ * vma_list is empty, as we'd need to take mmap_sem to clear
+ * entries. vma_list can only be guaranteed empty when holding
+ * vma_lock, thus memory_lock is nested under vma_lock.
+ *
+ * This enables the vm_ops.fault callback to acquire vma_lock,
+ * followed by memory_lock read lock, while already holding
+ * mmap_sem without risk of deadlock.
+ */
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock))
+ return 0;
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ while (!list_empty(&vdev->vma_list)) {
+ mmap_vma = list_first_entry(&vdev->vma_list,
+ struct vfio_pci_mmap_vma,
+ vma_next);
+ mm = mmap_vma->vma->vm_mm;
+ if (mmget_not_zero(mm))
+ break;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ mm = NULL;
+ }
+ if (!mm)
+ return 1;
+ mutex_unlock(&vdev->vma_lock);
+
+ if (try) {
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ down_read(&mm->mmap_sem);
+ }
+ if (mmget_still_valid(mm)) {
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock)) {
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ list_for_each_entry_safe(mmap_vma, tmp,
+ &vdev->vma_list, vma_next) {
+ struct vm_area_struct *vma = mmap_vma->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+ }
+ mutex_unlock(&vdev->vma_lock);
+ }
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
+
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
+{
+ vfio_pci_zap_and_vma_lock(vdev, false);
+ down_write(&vdev->memory_lock);
+ mutex_unlock(&vdev->vma_lock);
+}
+
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
+{
+ u16 cmd;
+
+ down_write(&vdev->memory_lock);
+ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY))
+ pci_write_config_word(vdev->pdev, PCI_COMMAND,
+ cmd | PCI_COMMAND_MEMORY);
+
+ return cmd;
+}
+
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
+{
+ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
+ up_write(&vdev->memory_lock);
+}
+
+/* Caller holds vma_lock */
+static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
+ struct vm_area_struct *vma)
+{
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
+ if (!mmap_vma)
+ return -ENOMEM;
+
+ mmap_vma->vma = vma;
+ list_add(&mmap_vma->vma_next, &vdev->vma_list);
+
+ return 0;
+}
+
+/*
+ * Zap mmaps on open so that we can fault them in on access and therefore
+ * our vma_list only tracks mappings accessed since last zap.
+ */
+static void vfio_pci_mmap_open(struct vm_area_struct *vma)
+{
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
+static void vfio_pci_mmap_close(struct vm_area_struct *vma)
+{
+ struct vfio_pci_device *vdev = vma->vm_private_data;
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mutex_lock(&vdev->vma_lock);
+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+ if (mmap_vma->vma == vma) {
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ break;
+ }
+ }
+ mutex_unlock(&vdev->vma_lock);
+}
+
+static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_device *vdev = vma->vm_private_data;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+
+ mutex_lock(&vdev->vma_lock);
+ down_read(&vdev->memory_lock);
+
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ ret = VM_FAULT_SIGBUS;
+ mutex_unlock(&vdev->vma_lock);
+ goto up_out;
+ }
+
+ if (__vfio_pci_add_vma(vdev, vma)) {
+ ret = VM_FAULT_OOM;
+ mutex_unlock(&vdev->vma_lock);
+ goto up_out;
+ }
+
+ mutex_unlock(&vdev->vma_lock);
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ ret = VM_FAULT_SIGBUS;
+
+up_out:
+ up_read(&vdev->memory_lock);
+ return ret;
+}
+
+static const struct vm_operations_struct vfio_pci_mmap_ops = {
+ .open = vfio_pci_mmap_open,
+ .close = vfio_pci_mmap_close,
+ .fault = vfio_pci_mmap_fault,
+};
+
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
{
struct vfio_pci_device *vdev = device_data;
@@ -1357,8 +1622,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- req_len, vma->vm_page_prot);
+ /*
+ * See remap_pfn_range(), called from vfio_pci_fault() but we can't
+ * change vm_flags within the fault handler. Set them now.
+ */
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &vfio_pci_mmap_ops;
+
+ return 0;
}
static void vfio_pci_request(void *device_data, unsigned int count)
@@ -1608,6 +1879,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
+ mutex_init(&vdev->vma_lock);
+ INIT_LIST_HEAD(&vdev->vma_list);
+ init_rwsem(&vdev->memory_lock);
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
if (ret)
@@ -1861,12 +2135,6 @@ static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
}
-struct vfio_devices {
- struct vfio_device **devices;
- int cur_index;
- int max_index;
-};
-
static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
{
struct vfio_devices *devs = data;
@@ -1897,6 +2165,39 @@ static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
return 0;
}
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
+{
+ struct vfio_devices *devs = data;
+ struct vfio_device *device;
+ struct vfio_pci_device *vdev;
+
+ if (devs->cur_index == devs->max_index)
+ return -ENOSPC;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (!device)
+ return -EINVAL;
+
+ if (pci_dev_driver(pdev) != &vfio_pci_driver) {
+ vfio_device_put(device);
+ return -EBUSY;
+ }
+
+ vdev = vfio_device_data(device);
+
+ /*
+ * Locking multiple devices is prone to deadlock, runaway and
+ * unwind if we hit contention.
+ */
+ if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
+ vfio_device_put(device);
+ return -EBUSY;
+ }
+
+ devs->devices[devs->cur_index++] = device;
+ return 0;
+}
+
/*
* If a bus or slot reset is available for the provided device and:
* - All of the devices affected by that bus or slot reset are unused
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 90c0b80f8acf..8746c943247a 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -395,6 +395,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
*(__le32 *)(&p->write[off]) = cpu_to_le32(write);
}
+/* Caller should hold memory_lock semaphore */
+bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
+{
+ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
+
+ return cmd & PCI_COMMAND_MEMORY;
+}
+
/*
* Restore the *real* BARs after we detect a FLR or backdoor reset.
* (backdoor = some device specific technique that we didn't catch)
@@ -556,13 +564,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
new_cmd = le32_to_cpu(val);
+ phys_io = !!(phys_cmd & PCI_COMMAND_IO);
+ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
+ new_io = !!(new_cmd & PCI_COMMAND_IO);
+
phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
- phys_io = !!(phys_cmd & PCI_COMMAND_IO);
- virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
- new_io = !!(new_cmd & PCI_COMMAND_IO);
+ if (!new_mem)
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ else
+ down_write(&vdev->memory_lock);
/*
* If the user is writing mem/io enable (new_mem/io) and we
@@ -579,8 +592,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
}
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
- if (count < 0)
+ if (count < 0) {
+ if (offset == PCI_COMMAND)
+ up_write(&vdev->memory_lock);
return count;
+ }
/*
* Save current memory/io enable bits in vconfig to allow for
@@ -591,6 +607,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
*virt_cmd &= cpu_to_le16(~mask);
*virt_cmd |= cpu_to_le16(new_cmd & mask);
+
+ up_write(&vdev->memory_lock);
}
/* Emulate INTx disable */
@@ -828,8 +846,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
pos - offset + PCI_EXP_DEVCAP,
&cap);
- if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
+ if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+ }
}
/*
@@ -907,8 +928,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
pos - offset + PCI_AF_CAP,
&cap);
- if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
+ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+ }
}
return count;
@@ -1462,7 +1486,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
if (ret)
return ret;
- if (cap <= PCI_CAP_ID_MAX) {
+ /*
+ * ID 0 is a NULL capability, conflicting with our fake
+ * PCI_CAP_ID_BASIC. As it has no content, consider it
+ * hidden for now.
+ */
+ if (cap && cap <= PCI_CAP_ID_MAX) {
len = pci_cap_length[cap];
if (len == 0xFF) { /* Variable length */
len = vfio_cap_len(vdev, cap, pos);
@@ -1728,8 +1757,11 @@ void vfio_config_free(struct vfio_pci_device *vdev)
vdev->vconfig = NULL;
kfree(vdev->pci_config_map);
vdev->pci_config_map = NULL;
- kfree(vdev->msi_perm);
- vdev->msi_perm = NULL;
+ if (vdev->msi_perm) {
+ free_perm_bits(vdev->msi_perm);
+ kfree(vdev->msi_perm);
+ vdev->msi_perm = NULL;
+ }
}
/*
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 2056f3f85f59..1d9fb2592945 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -249,6 +249,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
struct pci_dev *pdev = vdev->pdev;
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
int ret;
+ u16 cmd;
if (!is_irq_none(vdev))
return -EINVAL;
@@ -258,13 +259,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
return -ENOMEM;
/* return the number of supported vectors if we can't get all: */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
if (ret < nvec) {
if (ret > 0)
pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
kfree(vdev->ctx);
return ret;
}
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
vdev->num_ctx = nvec;
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
@@ -287,6 +291,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
struct pci_dev *pdev = vdev->pdev;
struct eventfd_ctx *trigger;
int irq, ret;
+ u16 cmd;
if (vector < 0 || vector >= vdev->num_ctx)
return -EINVAL;
@@ -295,7 +300,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
if (vdev->ctx[vector].trigger) {
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
+
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
free_irq(irq, vdev->ctx[vector].trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(vdev->ctx[vector].trigger);
vdev->ctx[vector].trigger = NULL;
@@ -323,6 +332,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
* such a reset it would be unsuccessful. To avoid this, restore the
* cached value of the message prior to enabling.
*/
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
if (msix) {
struct msi_msg msg;
@@ -332,6 +342,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
ret = request_irq(irq, vfio_msihandler, 0,
vdev->ctx[vector].name, trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
if (ret) {
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(trigger);
@@ -376,6 +387,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
int i;
+ u16 cmd;
for (i = 0; i < vdev->num_ctx; i++) {
vfio_virqfd_disable(&vdev->ctx[i].unmask);
@@ -384,7 +396,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
/*
* Both disable paths above use pci_intx_for_msi() to clear DisINTx
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index ed20d73cc27c..65c61710c0e9 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -67,7 +67,7 @@ static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev,
*
* This is not fast path anyway.
*/
- sizealigned = _ALIGN_UP(posoff + count, PAGE_SIZE);
+ sizealigned = ALIGN(posoff + count, PAGE_SIZE);
ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned);
if (!ptr)
return -EFAULT;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 36ec69081ecd..86a02aff8735 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -92,6 +92,11 @@ struct vfio_pci_vf_token {
int users;
};
+struct vfio_pci_mmap_vma {
+ struct vm_area_struct *vma;
+ struct list_head vma_next;
+};
+
struct vfio_pci_device {
struct pci_dev *pdev;
void __iomem *barmap[PCI_STD_NUM_BARS];
@@ -132,6 +137,9 @@ struct vfio_pci_device {
struct list_head ioeventfds_list;
struct vfio_pci_vf_token *vf_token;
struct notifier_block nb;
+ struct mutex vma_lock;
+ struct list_head vma_list;
+ struct rw_semaphore memory_lock;
};
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
@@ -174,6 +182,13 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
pci_power_t state);
+extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
+extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
+ *vdev);
+extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
+extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
+ u16 cmd);
+
#ifdef CONFIG_VFIO_PCI_IGD
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
#else
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index a87992892a9f..916b184df3a5 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -162,6 +162,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
size_t x_start = 0, x_end = 0;
resource_size_t end;
void __iomem *io;
+ struct resource *res = &vdev->pdev->resource[bar];
ssize_t done;
if (pci_resource_start(pdev, bar))
@@ -177,6 +178,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
count = min(count, (size_t)(end - pos));
+ if (res->flags & IORESOURCE_MEM) {
+ down_read(&vdev->memory_lock);
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ up_read(&vdev->memory_lock);
+ return -EIO;
+ }
+ }
+
if (bar == PCI_ROM_RESOURCE) {
/*
* The ROM can fill less space than the BAR, so we start the
@@ -184,13 +193,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
* filling large ROM BARs much faster.
*/
io = pci_map_rom(pdev, &x_start);
- if (!io)
- return -ENOMEM;
+ if (!io) {
+ done = -ENOMEM;
+ goto out;
+ }
x_end = end;
} else {
int ret = vfio_pci_setup_barmap(vdev, bar);
- if (ret)
- return ret;
+ if (ret) {
+ done = ret;
+ goto out;
+ }
io = vdev->barmap[bar];
}
@@ -207,6 +220,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
if (bar == PCI_ROM_RESOURCE)
pci_unmap_rom(pdev, io);
+out:
+ if (res->flags & IORESOURCE_MEM)
+ up_read(&vdev->memory_lock);
return done;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 765e0e5d83ed..580099afeaff 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -85,6 +85,7 @@ struct vfio_group {
atomic_t opened;
wait_queue_head_t container_q;
bool noiommu;
+ unsigned int dev_counter;
struct kvm *kvm;
struct blocking_notifier_head notifier;
};
@@ -555,6 +556,7 @@ struct vfio_device *vfio_group_create_device(struct vfio_group *group,
mutex_lock(&group->device_lock);
list_add(&device->group_next, &group->device_list);
+ group->dev_counter++;
mutex_unlock(&group->device_lock);
return device;
@@ -567,6 +569,7 @@ static void vfio_device_release(struct kref *kref)
struct vfio_group *group = device->group;
list_del(&device->group_next);
+ group->dev_counter--;
mutex_unlock(&group->device_lock);
dev_set_drvdata(device->dev, NULL);
@@ -1945,6 +1948,9 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
if (!group)
return -ENODEV;
+ if (group->dev_counter > 1)
+ return -EINVAL;
+
ret = vfio_group_add_container_user(group);
if (ret)
goto err_pin_pages;
@@ -1952,7 +1958,8 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
container = group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
- ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
+ ret = driver->ops->pin_pages(container->iommu_data,
+ group->iommu_group, user_pfn,
npage, prot, phys_pfn);
else
ret = -ENOTTY;
@@ -2050,8 +2057,8 @@ int vfio_group_pin_pages(struct vfio_group *group,
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data,
- user_iova_pfn, npage,
- prot, phys_pfn);
+ group->iommu_group, user_iova_pfn,
+ npage, prot, phys_pfn);
else
ret = -ENOTTY;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index cc1d64765ce7..391fafe82c5c 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -69,8 +69,11 @@ struct vfio_iommu {
struct rb_root dma_list;
struct blocking_notifier_head notifier;
unsigned int dma_avail;
+ uint64_t pgsize_bitmap;
bool v2;
bool nesting;
+ bool dirty_page_tracking;
+ bool pinned_page_dirty_scope;
};
struct vfio_domain {
@@ -91,12 +94,14 @@ struct vfio_dma {
bool lock_cap; /* capable(CAP_IPC_LOCK) */
struct task_struct *task;
struct rb_root pfn_list; /* Ex-user pinned pfn list */
+ unsigned long *bitmap;
};
struct vfio_group {
struct iommu_group *iommu_group;
struct list_head next;
bool mdev_group; /* An mdev group */
+ bool pinned_page_dirty_scope;
};
struct vfio_iova {
@@ -112,7 +117,7 @@ struct vfio_pfn {
struct rb_node node;
dma_addr_t iova; /* Device address */
unsigned long pfn; /* Host pfn */
- atomic_t ref_count;
+ unsigned int ref_count;
};
struct vfio_regions {
@@ -125,8 +130,25 @@ struct vfio_regions {
#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
(!list_empty(&iommu->domain_list))
+#define DIRTY_BITMAP_BYTES(n) (ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE)
+
+/*
+ * Input argument of number of bits to bitmap_set() is unsigned integer, which
+ * further casts to signed integer for unaligned multi-bit operation,
+ * __bitmap_set().
+ * Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte,
+ * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page
+ * system.
+ */
+#define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
+#define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
+
static int put_pfn(unsigned long pfn, int prot);
+static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
+ struct iommu_group *iommu_group);
+
+static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
/*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
@@ -175,6 +197,81 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
rb_erase(&old->node, &iommu->dma_list);
}
+
+static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize)
+{
+ uint64_t npages = dma->size / pgsize;
+
+ if (npages > DIRTY_BITMAP_PAGES_MAX)
+ return -EINVAL;
+
+ /*
+ * Allocate extra 64 bits that are used to calculate shift required for
+ * bitmap_shift_left() to manipulate and club unaligned number of pages
+ * in adjacent vfio_dma ranges.
+ */
+ dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages) + sizeof(u64),
+ GFP_KERNEL);
+ if (!dma->bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void vfio_dma_bitmap_free(struct vfio_dma *dma)
+{
+ kfree(dma->bitmap);
+ dma->bitmap = NULL;
+}
+
+static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
+{
+ struct rb_node *p;
+ unsigned long pgshift = __ffs(pgsize);
+
+ for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) {
+ struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node);
+
+ bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1);
+ }
+}
+
+static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
+{
+ struct rb_node *n;
+
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+ int ret;
+
+ ret = vfio_dma_bitmap_alloc(dma, pgsize);
+ if (ret) {
+ struct rb_node *p;
+
+ for (p = rb_prev(n); p; p = rb_prev(p)) {
+ struct vfio_dma *dma = rb_entry(n,
+ struct vfio_dma, node);
+
+ vfio_dma_bitmap_free(dma);
+ }
+ return ret;
+ }
+ vfio_dma_populate_bitmap(dma, pgsize);
+ }
+ return 0;
+}
+
+static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu)
+{
+ struct rb_node *n;
+
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
+ vfio_dma_bitmap_free(dma);
+ }
+}
+
/*
* Helper Functions for host iova-pfn list
*/
@@ -233,7 +330,7 @@ static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
vpfn->iova = iova;
vpfn->pfn = pfn;
- atomic_set(&vpfn->ref_count, 1);
+ vpfn->ref_count = 1;
vfio_link_pfn(dma, vpfn);
return 0;
}
@@ -251,7 +348,7 @@ static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
if (vpfn)
- atomic_inc(&vpfn->ref_count);
+ vpfn->ref_count++;
return vpfn;
}
@@ -259,7 +356,8 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
{
int ret = 0;
- if (atomic_dec_and_test(&vpfn->ref_count)) {
+ vpfn->ref_count--;
+ if (!vpfn->ref_count) {
ret = put_pfn(vpfn->pfn, dma->prot);
vfio_remove_from_pfn_list(dma, vpfn);
}
@@ -317,6 +415,32 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}
+static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
+ unsigned long vaddr, unsigned long *pfn,
+ bool write_fault)
+{
+ int ret;
+
+ ret = follow_pfn(vma, vaddr, pfn);
+ if (ret) {
+ bool unlocked = false;
+
+ ret = fixup_user_fault(NULL, mm, vaddr,
+ FAULT_FLAG_REMOTE |
+ (write_fault ? FAULT_FLAG_WRITE : 0),
+ &unlocked);
+ if (unlocked)
+ return -EAGAIN;
+
+ if (ret)
+ return ret;
+
+ ret = follow_pfn(vma, vaddr, pfn);
+ }
+
+ return ret;
+}
+
static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
int prot, unsigned long *pfn)
{
@@ -339,12 +463,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
vaddr = untagged_addr(vaddr);
+retry:
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
if (vma && vma->vm_flags & VM_PFNMAP) {
- if (!follow_pfn(vma, vaddr, pfn) &&
- is_invalid_reserved_pfn(*pfn))
- ret = 0;
+ ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ if (!ret && !is_invalid_reserved_pfn(*pfn))
+ ret = -EFAULT;
}
done:
up_read(&mm->mmap_sem);
@@ -501,11 +629,13 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
}
static int vfio_iommu_type1_pin_pages(void *iommu_data,
+ struct iommu_group *iommu_group,
unsigned long *user_pfn,
int npage, int prot,
unsigned long *phys_pfn)
{
struct vfio_iommu *iommu = iommu_data;
+ struct vfio_group *group;
int i, j, ret;
unsigned long remote_vaddr;
struct vfio_dma *dma;
@@ -566,9 +696,26 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
vfio_unpin_page_external(dma, iova, do_accounting);
goto pin_unwind;
}
- }
+ if (iommu->dirty_page_tracking) {
+ unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
+
+ /*
+ * Bitmap populated with the smallest supported page
+ * size
+ */
+ bitmap_set(dma->bitmap,
+ (iova - dma->iova) >> pgshift, 1);
+ }
+ }
ret = i;
+
+ group = vfio_iommu_find_iommu_group(iommu, iommu_group);
+ if (!group->pinned_page_dirty_scope) {
+ group->pinned_page_dirty_scope = true;
+ update_pinned_page_dirty_scope(iommu);
+ }
+
goto pin_done;
pin_unwind:
@@ -800,19 +947,19 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
vfio_unmap_unpin(iommu, dma, true);
vfio_unlink_dma(iommu, dma);
put_task_struct(dma->task);
+ vfio_dma_bitmap_free(dma);
kfree(dma);
iommu->dma_avail++;
}
-static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
+static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
- unsigned long bitmap = ULONG_MAX;
- mutex_lock(&iommu->lock);
+ iommu->pgsize_bitmap = ULONG_MAX;
+
list_for_each_entry(domain, &iommu->domain_list, next)
- bitmap &= domain->domain->pgsize_bitmap;
- mutex_unlock(&iommu->lock);
+ iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
/*
* In case the IOMMU supports page sizes smaller than PAGE_SIZE
@@ -822,36 +969,143 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
* granularity while iommu driver can use the sub-PAGE_SIZE size
* to map the buffer.
*/
- if (bitmap & ~PAGE_MASK) {
- bitmap &= PAGE_MASK;
- bitmap |= PAGE_SIZE;
+ if (iommu->pgsize_bitmap & ~PAGE_MASK) {
+ iommu->pgsize_bitmap &= PAGE_MASK;
+ iommu->pgsize_bitmap |= PAGE_SIZE;
}
+}
+
+static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
+ struct vfio_dma *dma, dma_addr_t base_iova,
+ size_t pgsize)
+{
+ unsigned long pgshift = __ffs(pgsize);
+ unsigned long nbits = dma->size >> pgshift;
+ unsigned long bit_offset = (dma->iova - base_iova) >> pgshift;
+ unsigned long copy_offset = bit_offset / BITS_PER_LONG;
+ unsigned long shift = bit_offset % BITS_PER_LONG;
+ unsigned long leftover;
+
+ /*
+ * mark all pages dirty if any IOMMU capable device is not able
+ * to report dirty pages and all pages are pinned and mapped.
+ */
+ if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
+ bitmap_set(dma->bitmap, 0, nbits);
- return bitmap;
+ if (shift) {
+ bitmap_shift_left(dma->bitmap, dma->bitmap, shift,
+ nbits + shift);
+
+ if (copy_from_user(&leftover,
+ (void __user *)(bitmap + copy_offset),
+ sizeof(leftover)))
+ return -EFAULT;
+
+ bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift);
+ }
+
+ if (copy_to_user((void __user *)(bitmap + copy_offset), dma->bitmap,
+ DIRTY_BITMAP_BYTES(nbits + shift)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
+ dma_addr_t iova, size_t size, size_t pgsize)
+{
+ struct vfio_dma *dma;
+ struct rb_node *n;
+ unsigned long pgshift = __ffs(pgsize);
+ int ret;
+
+ /*
+ * GET_BITMAP request must fully cover vfio_dma mappings. Multiple
+ * vfio_dma mappings may be clubbed by specifying large ranges, but
+ * there must not be any previous mappings bisected by the range.
+ * An error will be returned if these conditions are not met.
+ */
+ dma = vfio_find_dma(iommu, iova, 1);
+ if (dma && dma->iova != iova)
+ return -EINVAL;
+
+ dma = vfio_find_dma(iommu, iova + size - 1, 0);
+ if (dma && dma->iova + dma->size != iova + size)
+ return -EINVAL;
+
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
+ if (dma->iova < iova)
+ continue;
+
+ if (dma->iova > iova + size - 1)
+ break;
+
+ ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
+ if (ret)
+ return ret;
+
+ /*
+ * Re-populate bitmap to include all pinned pages which are
+ * considered as dirty but exclude pages which are unpinned and
+ * pages which are marked dirty by vfio_dma_rw()
+ */
+ bitmap_clear(dma->bitmap, 0, dma->size >> pgshift);
+ vfio_dma_populate_bitmap(dma, pgsize);
+ }
+ return 0;
+}
+
+static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
+{
+ if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) ||
+ (bitmap_size < DIRTY_BITMAP_BYTES(npages)))
+ return -EINVAL;
+
+ return 0;
}
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
- struct vfio_iommu_type1_dma_unmap *unmap)
+ struct vfio_iommu_type1_dma_unmap *unmap,
+ struct vfio_bitmap *bitmap)
{
- uint64_t mask;
struct vfio_dma *dma, *dma_last = NULL;
- size_t unmapped = 0;
+ size_t unmapped = 0, pgsize;
int ret = 0, retries = 0;
+ unsigned long pgshift;
+
+ mutex_lock(&iommu->lock);
- mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
+ pgshift = __ffs(iommu->pgsize_bitmap);
+ pgsize = (size_t)1 << pgshift;
+
+ if (unmap->iova & (pgsize - 1)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!unmap->size || unmap->size & (pgsize - 1)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
- if (unmap->iova & mask)
- return -EINVAL;
- if (!unmap->size || unmap->size & mask)
- return -EINVAL;
if (unmap->iova + unmap->size - 1 < unmap->iova ||
- unmap->size > SIZE_MAX)
- return -EINVAL;
+ unmap->size > SIZE_MAX) {
+ ret = -EINVAL;
+ goto unlock;
+ }
- WARN_ON(mask & PAGE_MASK);
-again:
- mutex_lock(&iommu->lock);
+ /* When dirty tracking is enabled, allow only min supported pgsize */
+ if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
+ (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ WARN_ON((pgsize - 1) & PAGE_MASK);
+again:
/*
* vfio-iommu-type1 (v1) - User mappings were coalesced together to
* avoid tracking individual mappings. This means that the granularity
@@ -929,8 +1183,17 @@ again:
blocking_notifier_call_chain(&iommu->notifier,
VFIO_IOMMU_NOTIFY_DMA_UNMAP,
&nb_unmap);
+ mutex_lock(&iommu->lock);
goto again;
}
+
+ if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
+ ret = update_user_bitmap(bitmap->data, iommu, dma,
+ unmap->iova, pgsize);
+ if (ret)
+ break;
+ }
+
unmapped += dma->size;
vfio_remove_dma(iommu, dma);
}
@@ -1037,31 +1300,35 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
unsigned long vaddr = map->vaddr;
size_t size = map->size;
int ret = 0, prot = 0;
- uint64_t mask;
+ size_t pgsize;
struct vfio_dma *dma;
/* Verify that none of our __u64 fields overflow */
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
return -EINVAL;
- mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
-
- WARN_ON(mask & PAGE_MASK);
-
/* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
prot |= IOMMU_WRITE;
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
prot |= IOMMU_READ;
- if (!prot || !size || (size | iova | vaddr) & mask)
- return -EINVAL;
+ mutex_lock(&iommu->lock);
- /* Don't allow IOVA or virtual address wrap */
- if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
- return -EINVAL;
+ pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
- mutex_lock(&iommu->lock);
+ WARN_ON((pgsize - 1) & PAGE_MASK);
+
+ if (!prot || !size || (size | iova | vaddr) & (pgsize - 1)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Don't allow IOVA or virtual address wrap */
+ if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
if (vfio_find_dma(iommu, iova, size)) {
ret = -EEXIST;
@@ -1129,6 +1396,12 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
else
ret = vfio_pin_map_dma(iommu, dma, size);
+ if (!ret && iommu->dirty_page_tracking) {
+ ret = vfio_dma_bitmap_alloc(dma, pgsize);
+ if (ret)
+ vfio_remove_dma(iommu, dma);
+ }
+
out_unlock:
mutex_unlock(&iommu->lock);
return ret;
@@ -1267,6 +1540,51 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
return NULL;
}
+static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
+ struct iommu_group *iommu_group)
+{
+ struct vfio_domain *domain;
+ struct vfio_group *group = NULL;
+
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ group = find_iommu_group(domain, iommu_group);
+ if (group)
+ return group;
+ }
+
+ if (iommu->external_domain)
+ group = find_iommu_group(iommu->external_domain, iommu_group);
+
+ return group;
+}
+
+static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
+{
+ struct vfio_domain *domain;
+ struct vfio_group *group;
+
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ list_for_each_entry(group, &domain->group_list, next) {
+ if (!group->pinned_page_dirty_scope) {
+ iommu->pinned_page_dirty_scope = false;
+ return;
+ }
+ }
+ }
+
+ if (iommu->external_domain) {
+ domain = iommu->external_domain;
+ list_for_each_entry(group, &domain->group_list, next) {
+ if (!group->pinned_page_dirty_scope) {
+ iommu->pinned_page_dirty_scope = false;
+ return;
+ }
+ }
+ }
+
+ iommu->pinned_page_dirty_scope = true;
+}
+
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
phys_addr_t *base)
{
@@ -1667,12 +1985,23 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (!iommu->external_domain) {
INIT_LIST_HEAD(&domain->group_list);
iommu->external_domain = domain;
+ vfio_update_pgsize_bitmap(iommu);
} else {
kfree(domain);
}
list_add(&group->next,
&iommu->external_domain->group_list);
+ /*
+ * Non-iommu backed group cannot dirty memory directly,
+ * it can only use interfaces that provide dirty
+ * tracking.
+ * The iommu scope can only be promoted with the
+ * addition of a dirty tracking group.
+ */
+ group->pinned_page_dirty_scope = true;
+ if (!iommu->pinned_page_dirty_scope)
+ update_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);
return 0;
@@ -1792,9 +2121,17 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
list_add(&domain->next, &iommu->domain_list);
+ vfio_update_pgsize_bitmap(iommu);
done:
/* Delete the old one and insert new iova list */
vfio_iommu_iova_insert_copy(iommu, &iova_copy);
+
+ /*
+ * An iommu backed group can dirty memory directly and therefore
+ * demotes the iommu scope until it declares itself dirty tracking
+ * capable via the page pinning interface.
+ */
+ iommu->pinned_page_dirty_scope = false;
mutex_unlock(&iommu->lock);
vfio_iommu_resv_free(&group_resv_regions);
@@ -1947,6 +2284,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain;
struct vfio_group *group;
+ bool update_dirty_scope = false;
LIST_HEAD(iova_copy);
mutex_lock(&iommu->lock);
@@ -1954,6 +2292,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (iommu->external_domain) {
group = find_iommu_group(iommu->external_domain, iommu_group);
if (group) {
+ update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
@@ -1983,6 +2322,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
continue;
vfio_iommu_detach_group(domain, group);
+ update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
/*
@@ -2003,6 +2343,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
list_del(&domain->next);
kfree(domain);
vfio_iommu_aper_expand(iommu, &iova_copy);
+ vfio_update_pgsize_bitmap(iommu);
}
break;
}
@@ -2013,6 +2354,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
vfio_iommu_iova_free(&iova_copy);
detach_group_done:
+ /*
+ * Removal of a group without dirty tracking may allow the iommu scope
+ * to be promoted.
+ */
+ if (update_dirty_scope)
+ update_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);
}
@@ -2135,8 +2482,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
size_t size;
int iovas = 0, i = 0, ret;
- mutex_lock(&iommu->lock);
-
list_for_each_entry(iova, &iommu->iova_list, list)
iovas++;
@@ -2145,17 +2490,14 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
* Return 0 as a container with a single mdev device
* will have an empty list
*/
- ret = 0;
- goto out_unlock;
+ return 0;
}
size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
cap_iovas = kzalloc(size, GFP_KERNEL);
- if (!cap_iovas) {
- ret = -ENOMEM;
- goto out_unlock;
- }
+ if (!cap_iovas)
+ return -ENOMEM;
cap_iovas->nr_iovas = iovas;
@@ -2168,11 +2510,25 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
kfree(cap_iovas);
-out_unlock:
- mutex_unlock(&iommu->lock);
return ret;
}
+static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_iommu_type1_info_cap_migration cap_mig;
+
+ cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION;
+ cap_mig.header.version = 1;
+
+ cap_mig.flags = 0;
+ /* support minimum pgsize */
+ cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap);
+ cap_mig.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX;
+
+ return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
+}
+
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -2214,11 +2570,18 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
info.cap_offset = 0; /* output, no-recopy necessary */
}
+ mutex_lock(&iommu->lock);
info.flags = VFIO_IOMMU_INFO_PGSIZES;
- info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
+ info.iova_pgsizes = iommu->pgsize_bitmap;
+
+ ret = vfio_iommu_migration_build_caps(iommu, &caps);
+
+ if (!ret)
+ ret = vfio_iommu_iova_build_caps(iommu, &caps);
+
+ mutex_unlock(&iommu->lock);
- ret = vfio_iommu_iova_build_caps(iommu, &caps);
if (ret)
return ret;
@@ -2261,22 +2624,143 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
struct vfio_iommu_type1_dma_unmap unmap;
- long ret;
+ struct vfio_bitmap bitmap = { 0 };
+ int ret;
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
if (copy_from_user(&unmap, (void __user *)arg, minsz))
return -EFAULT;
- if (unmap.argsz < minsz || unmap.flags)
+ if (unmap.argsz < minsz ||
+ unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
return -EINVAL;
- ret = vfio_dma_do_unmap(iommu, &unmap);
+ if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
+ unsigned long pgshift;
+
+ if (unmap.argsz < (minsz + sizeof(bitmap)))
+ return -EINVAL;
+
+ if (copy_from_user(&bitmap,
+ (void __user *)(arg + minsz),
+ sizeof(bitmap)))
+ return -EFAULT;
+
+ if (!access_ok((void __user *)bitmap.data, bitmap.size))
+ return -EINVAL;
+
+ pgshift = __ffs(bitmap.pgsize);
+ ret = verify_bitmap_size(unmap.size >> pgshift,
+ bitmap.size);
+ if (ret)
+ return ret;
+ }
+
+ ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
if (ret)
return ret;
return copy_to_user((void __user *)arg, &unmap, minsz) ?
-EFAULT : 0;
+ } else if (cmd == VFIO_IOMMU_DIRTY_PAGES) {
+ struct vfio_iommu_type1_dirty_bitmap dirty;
+ uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
+ int ret = 0;
+
+ if (!iommu->v2)
+ return -EACCES;
+
+ minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap,
+ flags);
+
+ if (copy_from_user(&dirty, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (dirty.argsz < minsz || dirty.flags & ~mask)
+ return -EINVAL;
+
+ /* only one flag should be set at a time */
+ if (__ffs(dirty.flags) != __fls(dirty.flags))
+ return -EINVAL;
+
+ if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
+ size_t pgsize;
+
+ mutex_lock(&iommu->lock);
+ pgsize = 1 << __ffs(iommu->pgsize_bitmap);
+ if (!iommu->dirty_page_tracking) {
+ ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
+ if (!ret)
+ iommu->dirty_page_tracking = true;
+ }
+ mutex_unlock(&iommu->lock);
+ return ret;
+ } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
+ mutex_lock(&iommu->lock);
+ if (iommu->dirty_page_tracking) {
+ iommu->dirty_page_tracking = false;
+ vfio_dma_bitmap_free_all(iommu);
+ }
+ mutex_unlock(&iommu->lock);
+ return 0;
+ } else if (dirty.flags &
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
+ struct vfio_iommu_type1_dirty_bitmap_get range;
+ unsigned long pgshift;
+ size_t data_size = dirty.argsz - minsz;
+ size_t iommu_pgsize;
+
+ if (!data_size || data_size < sizeof(range))
+ return -EINVAL;
+
+ if (copy_from_user(&range, (void __user *)(arg + minsz),
+ sizeof(range)))
+ return -EFAULT;
+
+ if (range.iova + range.size < range.iova)
+ return -EINVAL;
+ if (!access_ok((void __user *)range.bitmap.data,
+ range.bitmap.size))
+ return -EINVAL;
+
+ pgshift = __ffs(range.bitmap.pgsize);
+ ret = verify_bitmap_size(range.size >> pgshift,
+ range.bitmap.size);
+ if (ret)
+ return ret;
+
+ mutex_lock(&iommu->lock);
+
+ iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
+
+ /* allow only smallest supported pgsize */
+ if (range.bitmap.pgsize != iommu_pgsize) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (range.iova & (iommu_pgsize - 1)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!range.size || range.size & (iommu_pgsize - 1)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (iommu->dirty_page_tracking)
+ ret = vfio_iova_dirty_bitmap(range.bitmap.data,
+ iommu, range.iova, range.size,
+ range.bitmap.pgsize);
+ else
+ ret = -EINVAL;
+out_unlock:
+ mutex_unlock(&iommu->lock);
+
+ return ret;
+ }
}
return -ENOTTY;
@@ -2344,10 +2828,19 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
vaddr = dma->vaddr + offset;
- if (write)
+ if (write) {
*copied = copy_to_user((void __user *)vaddr, data,
count) ? 0 : count;
- else
+ if (*copied && iommu->dirty_page_tracking) {
+ unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
+ /*
+ * Bitmap populated with the smallest supported page
+ * size
+ */
+ bitmap_set(dma->bitmap, offset >> pgshift,
+ *copied >> pgshift);
+ }
+ } else
*copied = copy_from_user(data, (void __user *)vaddr,
count) ? 0 : count;
if (kthread)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2927f02cc7e1..516519dcc8ff 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -747,6 +747,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
xdp->data = buf + pad;
xdp->data_end = xdp->data + len;
hdr->buflen = buflen;
+ xdp->frame_sz = buflen;
--net->refcnt_bias;
alloc_frag->offset += buflen;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c39952243fd3..8b104f76f324 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -2280,6 +2280,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
.fabric_name = "vhost",
+ .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
.tpg_get_wwn = vhost_scsi_get_fabric_wwn,
.tpg_get_tag = vhost_scsi_get_tpgt,
.tpg_check_demo_mode = vhost_scsi_check_true,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d450e16c5c25..21a59b598ed8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -730,7 +730,7 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
if (!map)
return NULL;
- return (void *)(uintptr_t)(map->addr + addr - map->start);
+ return (void __user *)(uintptr_t)(map->addr + addr - map->start);
}
/* Can we switch to this memory table? */
@@ -869,7 +869,7 @@ static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
* not happen in this case.
*/
static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
- void *addr, unsigned int size,
+ void __user *addr, unsigned int size,
int type)
{
void __user *uaddr = vhost_vq_meta_fetch(vq,
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index cac3e35d7630..92d80aa0c0ef 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -433,6 +433,27 @@ struct backlight_device *backlight_device_get_by_type(enum backlight_type type)
EXPORT_SYMBOL(backlight_device_get_by_type);
/**
+ * backlight_device_get_by_name - Get backlight device by name
+ * @name: Device name
+ *
+ * This function looks up a backlight device by its name. It obtains a reference
+ * on the backlight device and it is the caller's responsibility to drop the
+ * reference by calling backlight_put().
+ *
+ * Returns:
+ * A pointer to the backlight device if found, otherwise NULL.
+ */
+struct backlight_device *backlight_device_get_by_name(const char *name)
+{
+ struct device *dev;
+
+ dev = class_find_device_by_name(backlight_class, name);
+
+ return dev ? to_backlight_device(dev) : NULL;
+}
+EXPORT_SYMBOL(backlight_device_get_by_name);
+
+/**
* backlight_device_unregister - unregisters a backlight device object.
* @bd: the backlight device object to be unregistered and freed.
*
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 8554b4aa980c..46f97d1c3d21 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -14,13 +14,11 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/lcd.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-
#include <linux/spi/spi.h>
-#include <linux/spi/l4f00242t03.h>
struct l4f00242t03_priv {
struct spi_device *spi;
@@ -28,16 +26,18 @@ struct l4f00242t03_priv {
int lcd_state;
struct regulator *io_reg;
struct regulator *core_reg;
+ struct gpio_desc *reset;
+ struct gpio_desc *enable;
};
-static void l4f00242t03_reset(unsigned int gpio)
+static void l4f00242t03_reset(struct gpio_desc *gpiod)
{
pr_debug("l4f00242t03_reset.\n");
- gpio_set_value(gpio, 1);
+ gpiod_set_value(gpiod, 1);
mdelay(100);
- gpio_set_value(gpio, 0);
+ gpiod_set_value(gpiod, 0);
mdelay(10); /* tRES >= 100us */
- gpio_set_value(gpio, 1);
+ gpiod_set_value(gpiod, 1);
mdelay(20);
}
@@ -45,7 +45,6 @@ static void l4f00242t03_reset(unsigned int gpio)
static void l4f00242t03_lcd_init(struct spi_device *spi)
{
- struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
int ret;
@@ -76,21 +75,20 @@ static void l4f00242t03_lcd_init(struct spi_device *spi)
return;
}
- l4f00242t03_reset(pdata->reset_gpio);
+ l4f00242t03_reset(priv->reset);
- gpio_set_value(pdata->data_enable_gpio, 1);
+ gpiod_set_value(priv->enable, 1);
msleep(60);
spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
}
static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
{
- struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "Powering down LCD\n");
- gpio_set_value(pdata->data_enable_gpio, 0);
+ gpiod_set_value(priv->enable, 0);
regulator_disable(priv->io_reg);
regulator_disable(priv->core_reg);
@@ -168,13 +166,6 @@ static struct lcd_ops l4f_ops = {
static int l4f00242t03_probe(struct spi_device *spi)
{
struct l4f00242t03_priv *priv;
- struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
- int ret;
-
- if (pdata == NULL) {
- dev_err(&spi->dev, "Uninitialized platform data.\n");
- return -EINVAL;
- }
priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
GFP_KERNEL);
@@ -187,21 +178,21 @@ static int l4f00242t03_probe(struct spi_device *spi)
priv->spi = spi;
- ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio,
- GPIOF_OUT_INIT_HIGH, "lcd l4f00242t03 reset");
- if (ret) {
+ priv->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset)) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
- return ret;
+ return PTR_ERR(priv->reset);
}
+ gpiod_set_consumer_name(priv->reset, "lcd l4f00242t03 reset");
- ret = devm_gpio_request_one(&spi->dev, pdata->data_enable_gpio,
- GPIOF_OUT_INIT_LOW, "lcd l4f00242t03 data enable");
- if (ret) {
+ priv->enable = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->enable)) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
- return ret;
+ return PTR_ERR(priv->enable);
}
+ gpiod_set_consumer_name(priv->enable, "lcd l4f00242t03 data enable");
priv->io_reg = devm_regulator_get(&spi->dev, "vdd");
if (IS_ERR(priv->io_reg)) {
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index f68920131a4a..e94932c69f54 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -456,7 +456,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ret = regulator_enable(lp->enable);
if (ret < 0) {
dev_err(lp->dev, "failed to enable vddio: %d\n", ret);
- return ret;
+ goto disable_supply;
}
/*
@@ -471,24 +471,34 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ret = lp855x_configure(lp);
if (ret) {
dev_err(lp->dev, "device config err: %d", ret);
- return ret;
+ goto disable_vddio;
}
ret = lp855x_backlight_register(lp);
if (ret) {
dev_err(lp->dev,
"failed to register backlight. err: %d\n", ret);
- return ret;
+ goto disable_vddio;
}
ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group);
if (ret) {
dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret);
- return ret;
+ goto disable_vddio;
}
backlight_update_status(lp->bl);
+
return 0;
+
+disable_vddio:
+ if (lp->enable)
+ regulator_disable(lp->enable);
+disable_supply:
+ if (lp->supply)
+ regulator_disable(lp->supply);
+
+ return ret;
}
static int lp855x_remove(struct i2c_client *cl)
@@ -497,6 +507,8 @@ static int lp855x_remove(struct i2c_client *cl)
lp->bl->props.brightness = 0;
backlight_update_status(lp->bl);
+ if (lp->enable)
+ regulator_disable(lp->enable);
if (lp->supply)
regulator_disable(lp->supply);
sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 3d276b30a78c..4c8c34b99441 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -15,16 +15,21 @@
/* From DT binding */
#define WLED_MAX_STRINGS 4
+#define MOD_A 0
+#define MOD_B 1
#define WLED_DEFAULT_BRIGHTNESS 2048
#define WLED_SOFT_START_DLY_US 10000
#define WLED3_SINK_REG_BRIGHT_MAX 0xFFF
+#define WLED5_SINK_REG_BRIGHT_MAX_12B 0xFFF
+#define WLED5_SINK_REG_BRIGHT_MAX_15B 0x7FFF
/* WLED3/WLED4 control registers */
#define WLED3_CTRL_REG_FAULT_STATUS 0x08
#define WLED3_CTRL_REG_ILIM_FAULT_BIT BIT(0)
#define WLED3_CTRL_REG_OVP_FAULT_BIT BIT(1)
#define WLED4_CTRL_REG_SC_FAULT_BIT BIT(2)
+#define WLED5_CTRL_REG_OVP_PRE_ALARM_BIT BIT(4)
#define WLED3_CTRL_REG_INT_RT_STS 0x10
#define WLED3_CTRL_REG_OVP_FAULT_STATUS BIT(1)
@@ -40,6 +45,7 @@
#define WLED3_CTRL_REG_OVP 0x4d
#define WLED3_CTRL_REG_OVP_MASK GENMASK(1, 0)
+#define WLED5_CTRL_REG_OVP_MASK GENMASK(3, 0)
#define WLED3_CTRL_REG_ILIMIT 0x4e
#define WLED3_CTRL_REG_ILIMIT_MASK GENMASK(2, 0)
@@ -101,6 +107,44 @@
#define WLED4_SINK_REG_BRIGHT(n) (0x57 + (n * 0x10))
+/* WLED5 specific control registers */
+#define WLED5_CTRL_REG_OVP_INT_CTL 0x5f
+#define WLED5_CTRL_REG_OVP_INT_TIMER_MASK GENMASK(2, 0)
+
+/* WLED5 specific sink registers */
+#define WLED5_SINK_REG_MOD_A_EN 0x50
+#define WLED5_SINK_REG_MOD_B_EN 0x60
+#define WLED5_SINK_REG_MOD_EN_MASK BIT(7)
+
+#define WLED5_SINK_REG_MOD_A_SRC_SEL 0x51
+#define WLED5_SINK_REG_MOD_B_SRC_SEL 0x61
+#define WLED5_SINK_REG_MOD_SRC_SEL_HIGH 0
+#define WLED5_SINK_REG_MOD_SRC_SEL_EXT 0x03
+#define WLED5_SINK_REG_MOD_SRC_SEL_MASK GENMASK(1, 0)
+
+#define WLED5_SINK_REG_MOD_A_BRIGHTNESS_WIDTH_SEL 0x52
+#define WLED5_SINK_REG_MOD_B_BRIGHTNESS_WIDTH_SEL 0x62
+#define WLED5_SINK_REG_BRIGHTNESS_WIDTH_12B 0
+#define WLED5_SINK_REG_BRIGHTNESS_WIDTH_15B 1
+
+#define WLED5_SINK_REG_MOD_A_BRIGHTNESS_LSB 0x53
+#define WLED5_SINK_REG_MOD_A_BRIGHTNESS_MSB 0x54
+#define WLED5_SINK_REG_MOD_B_BRIGHTNESS_LSB 0x63
+#define WLED5_SINK_REG_MOD_B_BRIGHTNESS_MSB 0x64
+
+#define WLED5_SINK_REG_MOD_SYNC_BIT 0x65
+#define WLED5_SINK_REG_SYNC_MOD_A_BIT BIT(0)
+#define WLED5_SINK_REG_SYNC_MOD_B_BIT BIT(1)
+#define WLED5_SINK_REG_SYNC_MASK GENMASK(1, 0)
+
+/* WLED5 specific per-'string' registers below */
+#define WLED5_SINK_REG_STR_FULL_SCALE_CURR(n) (0x72 + (n * 0x10))
+
+#define WLED5_SINK_REG_STR_SRC_SEL(n) (0x73 + (n * 0x10))
+#define WLED5_SINK_REG_SRC_SEL_MOD_A 0
+#define WLED5_SINK_REG_SRC_SEL_MOD_B 1
+#define WLED5_SINK_REG_SRC_SEL_MASK GENMASK(1, 0)
+
struct wled_var_cfg {
const u32 *values;
u32 (*fn)(u32);
@@ -125,6 +169,8 @@ struct wled_config {
u32 num_strings;
u32 string_i_limit;
u32 enabled_strings[WLED_MAX_STRINGS];
+ u32 mod_sel;
+ u32 cabc_sel;
bool cs_out_en;
bool ext_gen;
bool cabc;
@@ -147,14 +193,39 @@ struct wled {
u32 max_brightness;
u32 short_count;
u32 auto_detect_count;
+ u32 version;
bool disabled_by_short;
bool has_short_detect;
+ bool cabc_disabled;
int short_irq;
int ovp_irq;
struct wled_config cfg;
struct delayed_work ovp_work;
+
+ /* Configures the brightness. Applicable for wled3, wled4 and wled5 */
int (*wled_set_brightness)(struct wled *wled, u16 brightness);
+
+ /* Configures the cabc register. Applicable for wled4 and wled5 */
+ int (*wled_cabc_config)(struct wled *wled, bool enable);
+
+ /*
+ * Toggles the sync bit for the brightness update to take place.
+ * Applicable for WLED3, WLED4 and WLED5.
+ */
+ int (*wled_sync_toggle)(struct wled *wled);
+
+ /*
+ * Time to wait before checking the OVP status after wled module enable.
+ * Applicable for WLED4 and WLED5.
+ */
+ int (*wled_ovp_delay)(struct wled *wled);
+
+ /*
+ * Determines if the auto string detection is required.
+ * Applicable for WLED4 and WLED5
+ */
+ bool (*wled_auto_detection_required)(struct wled *wled);
};
static int wled3_set_brightness(struct wled *wled, u16 brightness)
@@ -198,6 +269,28 @@ static int wled4_set_brightness(struct wled *wled, u16 brightness)
return 0;
}
+static int wled5_set_brightness(struct wled *wled, u16 brightness)
+{
+ int rc, offset;
+ u16 low_limit = wled->max_brightness * 1 / 1000;
+ u8 v[2];
+
+ /* WLED5's lower limit is 0.1% */
+ if (brightness < low_limit)
+ brightness = low_limit;
+
+ v[0] = brightness & 0xff;
+ v[1] = (brightness >> 8) & 0x7f;
+
+ offset = (wled->cfg.mod_sel == MOD_A) ?
+ WLED5_SINK_REG_MOD_A_BRIGHTNESS_LSB :
+ WLED5_SINK_REG_MOD_B_BRIGHTNESS_LSB;
+
+ rc = regmap_bulk_write(wled->regmap, wled->sink_addr + offset,
+ v, 2);
+ return rc;
+}
+
static void wled_ovp_work(struct work_struct *work)
{
struct wled *wled = container_of(work,
@@ -237,7 +330,7 @@ static int wled_module_enable(struct wled *wled, int val)
return 0;
}
-static int wled_sync_toggle(struct wled *wled)
+static int wled3_sync_toggle(struct wled *wled)
{
int rc;
unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
@@ -255,6 +348,88 @@ static int wled_sync_toggle(struct wled *wled)
return rc;
}
+static int wled5_sync_toggle(struct wled *wled)
+{
+ int rc;
+ u8 val;
+
+ val = (wled->cfg.mod_sel == MOD_A) ? WLED5_SINK_REG_SYNC_MOD_A_BIT :
+ WLED5_SINK_REG_SYNC_MOD_B_BIT;
+ rc = regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED5_SINK_REG_MOD_SYNC_BIT,
+ WLED5_SINK_REG_SYNC_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ return regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED5_SINK_REG_MOD_SYNC_BIT,
+ WLED5_SINK_REG_SYNC_MASK, 0);
+}
+
+static int wled_ovp_fault_status(struct wled *wled, bool *fault_set)
+{
+ int rc;
+ u32 int_rt_sts, fault_sts;
+
+ *fault_set = false;
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_INT_RT_STS,
+ &int_rt_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read INT_RT_STS rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FAULT_STATUS,
+ &fault_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read FAULT_STATUS rc=%d\n", rc);
+ return rc;
+ }
+
+ if (int_rt_sts & WLED3_CTRL_REG_OVP_FAULT_STATUS)
+ *fault_set = true;
+
+ if (wled->version == 4 && (fault_sts & WLED3_CTRL_REG_OVP_FAULT_BIT))
+ *fault_set = true;
+
+ if (wled->version == 5 && (fault_sts & (WLED3_CTRL_REG_OVP_FAULT_BIT |
+ WLED5_CTRL_REG_OVP_PRE_ALARM_BIT)))
+ *fault_set = true;
+
+ if (*fault_set)
+ dev_dbg(wled->dev, "WLED OVP fault detected, int_rt_sts=0x%x fault_sts=0x%x\n",
+ int_rt_sts, fault_sts);
+
+ return rc;
+}
+
+static int wled4_ovp_delay(struct wled *wled)
+{
+ return WLED_SOFT_START_DLY_US;
+}
+
+static int wled5_ovp_delay(struct wled *wled)
+{
+ int rc, delay_us;
+ u32 val;
+ u8 ovp_timer_ms[8] = {1, 2, 4, 8, 12, 16, 20, 24};
+
+ /* For WLED5, get the delay based on OVP timer */
+ rc = regmap_read(wled->regmap, wled->ctrl_addr +
+ WLED5_CTRL_REG_OVP_INT_CTL, &val);
+ if (rc < 0)
+ delay_us =
+ ovp_timer_ms[val & WLED5_CTRL_REG_OVP_INT_TIMER_MASK] * 1000;
+ else
+ delay_us = 2 * WLED_SOFT_START_DLY_US;
+
+ dev_dbg(wled->dev, "delay_time_us: %d\n", delay_us);
+
+ return delay_us;
+}
+
static int wled_update_status(struct backlight_device *bl)
{
struct wled *wled = bl_get_data(bl);
@@ -275,7 +450,7 @@ static int wled_update_status(struct backlight_device *bl)
goto unlock_mutex;
}
- rc = wled_sync_toggle(wled);
+ rc = wled->wled_sync_toggle(wled);
if (rc < 0) {
dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
goto unlock_mutex;
@@ -298,6 +473,50 @@ unlock_mutex:
return rc;
}
+static int wled4_cabc_config(struct wled *wled, bool enable)
+{
+ int i, j, rc;
+ u8 val;
+
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ j = wled->cfg.enabled_strings[i];
+
+ val = enable ? WLED4_SINK_REG_STR_CABC_MASK : 0;
+ rc = regmap_update_bits(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_CABC(j),
+ WLED4_SINK_REG_STR_CABC_MASK, val);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int wled5_cabc_config(struct wled *wled, bool enable)
+{
+ int rc, offset;
+ u8 reg;
+
+ if (wled->cabc_disabled)
+ return 0;
+
+ reg = enable ? wled->cfg.cabc_sel : 0;
+ offset = (wled->cfg.mod_sel == MOD_A) ? WLED5_SINK_REG_MOD_A_SRC_SEL :
+ WLED5_SINK_REG_MOD_B_SRC_SEL;
+
+ rc = regmap_update_bits(wled->regmap, wled->sink_addr + offset,
+ WLED5_SINK_REG_MOD_SRC_SEL_MASK, reg);
+ if (rc < 0) {
+ pr_err("Error in configuring CABC rc=%d\n", rc);
+ return rc;
+ }
+
+ if (!wled->cfg.cabc_sel)
+ wled->cabc_disabled = true;
+
+ return 0;
+}
+
#define WLED_SHORT_DLY_MS 20
#define WLED_SHORT_CNT_MAX 5
#define WLED_SHORT_RESET_CNT_DLY_US USEC_PER_SEC
@@ -345,9 +564,10 @@ unlock_mutex:
static void wled_auto_string_detection(struct wled *wled)
{
- int rc = 0, i;
- u32 sink_config = 0, int_sts;
+ int rc = 0, i, delay_time_us;
+ u32 sink_config = 0;
u8 sink_test = 0, sink_valid = 0, val;
+ bool fault_set;
/* Read configured sink configuration */
rc = regmap_read(wled->regmap, wled->sink_addr +
@@ -376,14 +596,9 @@ static void wled_auto_string_detection(struct wled *wled)
}
if (wled->cfg.cabc) {
- for (i = 0; i < wled->cfg.num_strings; i++) {
- rc = regmap_update_bits(wled->regmap, wled->sink_addr +
- WLED4_SINK_REG_STR_CABC(i),
- WLED4_SINK_REG_STR_CABC_MASK,
- 0);
- if (rc < 0)
- goto failed_detect;
- }
+ rc = wled->wled_cabc_config(wled, false);
+ if (rc < 0)
+ goto failed_detect;
}
/* Disable all sinks */
@@ -427,18 +642,17 @@ static void wled_auto_string_detection(struct wled *wled)
goto failed_detect;
}
- usleep_range(WLED_SOFT_START_DLY_US,
- WLED_SOFT_START_DLY_US + 1000);
+ delay_time_us = wled->wled_ovp_delay(wled);
+ usleep_range(delay_time_us, delay_time_us + 1000);
- rc = regmap_read(wled->regmap, wled->ctrl_addr +
- WLED3_CTRL_REG_INT_RT_STS, &int_sts);
+ rc = wled_ovp_fault_status(wled, &fault_set);
if (rc < 0) {
- dev_err(wled->dev, "Error in reading WLED3_CTRL_INT_RT_STS rc=%d\n",
+ dev_err(wled->dev, "Error in getting OVP fault_sts, rc=%d\n",
rc);
goto failed_detect;
}
- if (int_sts & WLED3_CTRL_REG_OVP_FAULT_STATUS)
+ if (fault_set)
dev_dbg(wled->dev, "WLED OVP fault detected with SINK %d\n",
i + 1);
else
@@ -478,30 +692,30 @@ static void wled_auto_string_detection(struct wled *wled)
}
/* Enable valid sinks */
- for (i = 0; i < wled->cfg.num_strings; i++) {
- if (wled->cfg.cabc) {
- rc = regmap_update_bits(wled->regmap, wled->sink_addr +
- WLED4_SINK_REG_STR_CABC(i),
- WLED4_SINK_REG_STR_CABC_MASK,
- WLED4_SINK_REG_STR_CABC_MASK);
- if (rc < 0)
+ if (wled->version == 4) {
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ if (sink_config &
+ BIT(WLED4_SINK_REG_CURR_SINK_SHFT + i))
+ val = WLED4_SINK_REG_STR_MOD_MASK;
+ else
+ /* Disable modulator_en for unused sink */
+ val = 0;
+
+ rc = regmap_write(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_MOD_EN(i), val);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to configure MODULATOR_EN rc=%d\n",
+ rc);
goto failed_detect;
- }
-
- if (sink_config & BIT(WLED4_SINK_REG_CURR_SINK_SHFT + i))
- val = WLED4_SINK_REG_STR_MOD_MASK;
- else
- val = 0x0; /* Disable modulator_en for unused sink */
-
- rc = regmap_write(wled->regmap, wled->sink_addr +
- WLED4_SINK_REG_STR_MOD_EN(i), val);
- if (rc < 0) {
- dev_err(wled->dev, "Failed to configure MODULATOR_EN rc=%d\n",
- rc);
- goto failed_detect;
+ }
}
}
+ /* Enable CABC */
+ rc = wled->wled_cabc_config(wled, true);
+ if (rc < 0)
+ goto failed_detect;
+
/* Restore the feedback setting */
rc = regmap_write(wled->regmap,
wled->ctrl_addr + WLED3_CTRL_REG_FEEDBACK_CONTROL, 0);
@@ -534,7 +748,8 @@ failed_detect:
#define WLED_AUTO_DETECT_OVP_COUNT 5
#define WLED_AUTO_DETECT_CNT_DLY_US USEC_PER_SEC
-static bool wled_auto_detection_required(struct wled *wled)
+
+static bool wled4_auto_detection_required(struct wled *wled)
{
s64 elapsed_time_us;
@@ -567,32 +782,39 @@ static bool wled_auto_detection_required(struct wled *wled)
return false;
}
+static bool wled5_auto_detection_required(struct wled *wled)
+{
+ if (!wled->cfg.auto_detection_enabled)
+ return false;
+
+ /*
+ * Unlike WLED4, WLED5 has OVP fault density interrupt configuration
+ * i.e. to count the number of OVP alarms for a certain duration before
+ * triggering OVP fault interrupt. By default, number of OVP fault
+ * events counted before an interrupt is fired is 32 and the time
+ * interval is 12 ms. If we see one OVP fault interrupt, then that
+ * should qualify for a real OVP fault condition to run auto detection
+ * algorithm.
+ */
+ return true;
+}
+
static int wled_auto_detection_at_init(struct wled *wled)
{
int rc;
- u32 fault_status, rt_status;
+ bool fault_set;
if (!wled->cfg.auto_detection_enabled)
return 0;
- rc = regmap_read(wled->regmap,
- wled->ctrl_addr + WLED3_CTRL_REG_INT_RT_STS,
- &rt_status);
+ rc = wled_ovp_fault_status(wled, &fault_set);
if (rc < 0) {
- dev_err(wled->dev, "Failed to read RT status rc=%d\n", rc);
- return rc;
- }
-
- rc = regmap_read(wled->regmap,
- wled->ctrl_addr + WLED3_CTRL_REG_FAULT_STATUS,
- &fault_status);
- if (rc < 0) {
- dev_err(wled->dev, "Failed to read fault status rc=%d\n", rc);
+ dev_err(wled->dev, "Error in getting OVP fault_sts, rc=%d\n",
+ rc);
return rc;
}
- if ((rt_status & WLED3_CTRL_REG_OVP_FAULT_STATUS) ||
- (fault_status & WLED3_CTRL_REG_OVP_FAULT_BIT)) {
+ if (fault_set) {
mutex_lock(&wled->lock);
wled_auto_string_detection(wled);
mutex_unlock(&wled->lock);
@@ -629,7 +851,7 @@ static irqreturn_t wled_ovp_irq_handler(int irq, void *_wled)
int_sts, fault_sts);
if (fault_sts & WLED3_CTRL_REG_OVP_FAULT_BIT) {
- if (wled_auto_detection_required(wled)) {
+ if (wled->wled_auto_detection_required(wled)) {
mutex_lock(&wled->lock);
wled_auto_string_detection(wled);
mutex_unlock(&wled->lock);
@@ -811,17 +1033,12 @@ static int wled4_setup(struct wled *wled)
wled->cfg.string_i_limit);
if (rc < 0)
return rc;
-
- addr = wled->sink_addr +
- WLED4_SINK_REG_STR_CABC(j);
- rc = regmap_update_bits(wled->regmap, addr,
- WLED4_SINK_REG_STR_CABC_MASK,
- wled->cfg.cabc ?
- WLED4_SINK_REG_STR_CABC_MASK : 0);
- if (rc < 0)
- return rc;
}
+ rc = wled4_cabc_config(wled, wled->cfg.cabc);
+ if (rc < 0)
+ return rc;
+
rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
WLED3_CTRL_REG_MOD_EN,
WLED3_CTRL_REG_MOD_EN_MASK,
@@ -835,7 +1052,7 @@ static int wled4_setup(struct wled *wled)
if (rc < 0)
return rc;
- rc = wled_sync_toggle(wled);
+ rc = wled->wled_sync_toggle(wled);
if (rc < 0) {
dev_err(wled->dev, "Failed to toggle sync reg rc:%d\n", rc);
return rc;
@@ -857,6 +1074,119 @@ static const struct wled_config wled4_config_defaults = {
.auto_detection_enabled = false,
};
+static int wled5_setup(struct wled *wled)
+{
+ int rc, temp, i, j, offset;
+ u8 sink_en = 0;
+ u16 addr;
+ u32 val;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_OVP,
+ WLED5_CTRL_REG_OVP_MASK, wled->cfg.ovp);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_ILIMIT,
+ WLED3_CTRL_REG_ILIMIT_MASK,
+ wled->cfg.boost_i_limit);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FREQ,
+ WLED3_CTRL_REG_FREQ_MASK,
+ wled->cfg.switch_freq);
+ if (rc < 0)
+ return rc;
+
+ /* Per sink/string configuration */
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ j = wled->cfg.enabled_strings[i];
+ addr = wled->sink_addr +
+ WLED4_SINK_REG_STR_FULL_SCALE_CURR(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED4_SINK_REG_STR_FULL_SCALE_CURR_MASK,
+ wled->cfg.string_i_limit);
+ if (rc < 0)
+ return rc;
+
+ addr = wled->sink_addr + WLED5_SINK_REG_STR_SRC_SEL(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED5_SINK_REG_SRC_SEL_MASK,
+ wled->cfg.mod_sel == MOD_A ?
+ WLED5_SINK_REG_SRC_SEL_MOD_A :
+ WLED5_SINK_REG_SRC_SEL_MOD_B);
+
+ temp = j + WLED4_SINK_REG_CURR_SINK_SHFT;
+ sink_en |= 1 << temp;
+ }
+
+ rc = wled5_cabc_config(wled, wled->cfg.cabc_sel ? true : false);
+ if (rc < 0)
+ return rc;
+
+ /* Enable one of the modulators A or B based on mod_sel */
+ addr = wled->sink_addr + WLED5_SINK_REG_MOD_A_EN;
+ val = (wled->cfg.mod_sel == MOD_A) ? WLED5_SINK_REG_MOD_EN_MASK : 0;
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED5_SINK_REG_MOD_EN_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ addr = wled->sink_addr + WLED5_SINK_REG_MOD_B_EN;
+ val = (wled->cfg.mod_sel == MOD_B) ? WLED5_SINK_REG_MOD_EN_MASK : 0;
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED5_SINK_REG_MOD_EN_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ offset = (wled->cfg.mod_sel == MOD_A) ?
+ WLED5_SINK_REG_MOD_A_BRIGHTNESS_WIDTH_SEL :
+ WLED5_SINK_REG_MOD_B_BRIGHTNESS_WIDTH_SEL;
+
+ addr = wled->sink_addr + offset;
+ val = (wled->max_brightness == WLED5_SINK_REG_BRIGHT_MAX_15B) ?
+ WLED5_SINK_REG_BRIGHTNESS_WIDTH_15B :
+ WLED5_SINK_REG_BRIGHTNESS_WIDTH_12B;
+ rc = regmap_write(wled->regmap, addr, val);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK,
+ WLED4_SINK_REG_CURR_SINK_MASK, sink_en);
+ if (rc < 0)
+ return rc;
+
+ /* This updates only FSC configuration in WLED5 */
+ rc = wled->wled_sync_toggle(wled);
+ if (rc < 0) {
+ pr_err("Failed to toggle sync reg rc:%d\n", rc);
+ return rc;
+ }
+
+ rc = wled_auto_detection_at_init(wled);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static const struct wled_config wled5_config_defaults = {
+ .boost_i_limit = 5,
+ .string_i_limit = 10,
+ .ovp = 4,
+ .num_strings = 4,
+ .switch_freq = 11,
+ .mod_sel = 0,
+ .cabc_sel = 0,
+ .cabc = false,
+ .external_pfet = false,
+ .auto_detection_enabled = false,
+};
+
static const u32 wled3_boost_i_limit_values[] = {
105, 385, 525, 805, 980, 1260, 1400, 1680,
};
@@ -875,6 +1205,16 @@ static const struct wled_var_cfg wled4_boost_i_limit_cfg = {
.size = ARRAY_SIZE(wled4_boost_i_limit_values),
};
+static inline u32 wled5_boost_i_limit_values_fn(u32 idx)
+{
+ return 525 + (idx * 175);
+}
+
+static const struct wled_var_cfg wled5_boost_i_limit_cfg = {
+ .fn = wled5_boost_i_limit_values_fn,
+ .size = 8,
+};
+
static const u32 wled3_ovp_values[] = {
35, 32, 29, 27,
};
@@ -893,6 +1233,21 @@ static const struct wled_var_cfg wled4_ovp_cfg = {
.size = ARRAY_SIZE(wled4_ovp_values),
};
+static inline u32 wled5_ovp_values_fn(u32 idx)
+{
+ /*
+ * 0000 - 38.5 V
+ * 0001 - 37 V ..
+ * 1111 - 16 V
+ */
+ return 38500 - (idx * 1500);
+}
+
+static const struct wled_var_cfg wled5_ovp_cfg = {
+ .fn = wled5_ovp_values_fn,
+ .size = 16,
+};
+
static u32 wled3_num_strings_values_fn(u32 idx)
{
return idx + 1;
@@ -940,6 +1295,14 @@ static const struct wled_var_cfg wled4_string_cfg = {
.size = 16,
};
+static const struct wled_var_cfg wled5_mod_sel_cfg = {
+ .size = 2,
+};
+
+static const struct wled_var_cfg wled5_cabc_sel_cfg = {
+ .size = 4,
+};
+
static u32 wled_values(const struct wled_var_cfg *cfg, u32 idx)
{
if (idx >= cfg->size)
@@ -951,7 +1314,7 @@ static u32 wled_values(const struct wled_var_cfg *cfg, u32 idx)
return idx;
}
-static int wled_configure(struct wled *wled, int version)
+static int wled_configure(struct wled *wled)
{
struct wled_config *cfg = &wled->cfg;
struct device *dev = wled->dev;
@@ -1016,6 +1379,44 @@ static int wled_configure(struct wled *wled, int version)
},
};
+ const struct wled_u32_opts wled5_opts[] = {
+ {
+ .name = "qcom,current-boost-limit",
+ .val_ptr = &cfg->boost_i_limit,
+ .cfg = &wled5_boost_i_limit_cfg,
+ },
+ {
+ .name = "qcom,current-limit-microamp",
+ .val_ptr = &cfg->string_i_limit,
+ .cfg = &wled4_string_i_limit_cfg,
+ },
+ {
+ .name = "qcom,ovp-millivolt",
+ .val_ptr = &cfg->ovp,
+ .cfg = &wled5_ovp_cfg,
+ },
+ {
+ .name = "qcom,switching-freq",
+ .val_ptr = &cfg->switch_freq,
+ .cfg = &wled3_switch_freq_cfg,
+ },
+ {
+ .name = "qcom,num-strings",
+ .val_ptr = &cfg->num_strings,
+ .cfg = &wled4_num_strings_cfg,
+ },
+ {
+ .name = "qcom,modulator-sel",
+ .val_ptr = &cfg->mod_sel,
+ .cfg = &wled5_mod_sel_cfg,
+ },
+ {
+ .name = "qcom,cabc-sel",
+ .val_ptr = &cfg->cabc_sel,
+ .cfg = &wled5_cabc_sel_cfg,
+ },
+ };
+
const struct wled_bool_opts bool_opts[] = {
{ "qcom,cs-out", &cfg->cs_out_en, },
{ "qcom,ext-gen", &cfg->ext_gen, },
@@ -1035,12 +1436,13 @@ static int wled_configure(struct wled *wled, int version)
if (rc)
wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
- switch (version) {
+ switch (wled->version) {
case 3:
u32_opts = wled3_opts;
size = ARRAY_SIZE(wled3_opts);
*cfg = wled3_config_defaults;
wled->wled_set_brightness = wled3_set_brightness;
+ wled->wled_sync_toggle = wled3_sync_toggle;
wled->max_string_count = 3;
wled->sink_addr = wled->ctrl_addr;
break;
@@ -1050,6 +1452,31 @@ static int wled_configure(struct wled *wled, int version)
size = ARRAY_SIZE(wled4_opts);
*cfg = wled4_config_defaults;
wled->wled_set_brightness = wled4_set_brightness;
+ wled->wled_sync_toggle = wled3_sync_toggle;
+ wled->wled_cabc_config = wled4_cabc_config;
+ wled->wled_ovp_delay = wled4_ovp_delay;
+ wled->wled_auto_detection_required =
+ wled4_auto_detection_required;
+ wled->max_string_count = 4;
+
+ prop_addr = of_get_address(dev->of_node, 1, NULL, NULL);
+ if (!prop_addr) {
+ dev_err(wled->dev, "invalid IO resources\n");
+ return -EINVAL;
+ }
+ wled->sink_addr = be32_to_cpu(*prop_addr);
+ break;
+
+ case 5:
+ u32_opts = wled5_opts;
+ size = ARRAY_SIZE(wled5_opts);
+ *cfg = wled5_config_defaults;
+ wled->wled_set_brightness = wled5_set_brightness;
+ wled->wled_sync_toggle = wled5_sync_toggle;
+ wled->wled_cabc_config = wled5_cabc_config;
+ wled->wled_ovp_delay = wled5_ovp_delay;
+ wled->wled_auto_detection_required =
+ wled5_auto_detection_required;
wled->max_string_count = 4;
prop_addr = of_get_address(dev->of_node, 1, NULL, NULL);
@@ -1186,7 +1613,6 @@ static int wled_probe(struct platform_device *pdev)
struct backlight_device *bl;
struct wled *wled;
struct regmap *regmap;
- int version;
u32 val;
int rc;
@@ -1203,18 +1629,22 @@ static int wled_probe(struct platform_device *pdev)
wled->regmap = regmap;
wled->dev = &pdev->dev;
- version = (uintptr_t)of_device_get_match_data(&pdev->dev);
- if (!version) {
+ wled->version = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ if (!wled->version) {
dev_err(&pdev->dev, "Unknown device version\n");
return -ENODEV;
}
mutex_init(&wled->lock);
- rc = wled_configure(wled, version);
+ rc = wled_configure(wled);
if (rc)
return rc;
- switch (version) {
+ val = WLED3_SINK_REG_BRIGHT_MAX;
+ of_property_read_u32(pdev->dev.of_node, "max-brightness", &val);
+ wled->max_brightness = val;
+
+ switch (wled->version) {
case 3:
wled->cfg.auto_detection_enabled = false;
rc = wled3_setup(wled);
@@ -1233,6 +1663,18 @@ static int wled_probe(struct platform_device *pdev)
}
break;
+ case 5:
+ wled->has_short_detect = true;
+ if (wled->cfg.cabc_sel)
+ wled->max_brightness = WLED5_SINK_REG_BRIGHT_MAX_12B;
+
+ rc = wled5_setup(wled);
+ if (rc) {
+ dev_err(&pdev->dev, "wled5_setup failed\n");
+ return rc;
+ }
+ break;
+
default:
dev_err(wled->dev, "Invalid WLED version\n");
break;
@@ -1254,7 +1696,7 @@ static int wled_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.brightness = val;
- props.max_brightness = WLED3_SINK_REG_BRIGHT_MAX;
+ props.max_brightness = wled->max_brightness;
bl = devm_backlight_device_register(&pdev->dev, wled->name,
&pdev->dev, wled,
&wled_ops, &props);
@@ -1277,6 +1719,7 @@ static const struct of_device_id wled_match_table[] = {
{ .compatible = "qcom,pm8941-wled", .data = (void *)3 },
{ .compatible = "qcom,pmi8998-wled", .data = (void *)4 },
{ .compatible = "qcom,pm660l-wled", .data = (void *)4 },
+ { .compatible = "qcom,pm8150l-wled", .data = (void *)5 },
{}
};
MODULE_DEVICE_TABLE(of, wled_match_table);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 91b0a719d221..844ada978bb7 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -472,7 +472,7 @@ config FB_OF
config FB_CONTROL
bool "Apple \"control\" display support"
- depends on (FB = y) && PPC_PMAC && PPC32
+ depends on (FB = y) && ((PPC_PMAC && PPC32) || COMPILE_TEST)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2008,7 +2008,7 @@ config FB_PS3_DEFAULT_SIZE_M
config FB_XILINX
tristate "Xilinx frame buffer support"
- depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP)
+ depends on FB && (MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 20e03e00b66d..6062104f3afb 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -1855,8 +1855,6 @@ static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var,
var->yspot = par->crsr.spot_y;
if (size > var->height * var->width)
return -ENAMETOOLONG;
- if (!access_ok(data, size))
- return -EFAULT;
delta = 1 << par->crsr.fmode;
lspr = lofsprite + (delta << 1);
if (par->bplcon0 & BPC0_LACE)
@@ -1935,8 +1933,6 @@ static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var,
return -EINVAL;
if (!var->height)
return -EINVAL;
- if (!access_ok(data, var->width * var->height))
- return -EFAULT;
delta = 1 << fmode;
lofsprite = shfsprite = (u_short *)spritememory;
lspr = lofsprite + (delta << 1);
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index 314ab82e01c0..6f7838979f0a 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -544,10 +544,6 @@ static int arcfb_probe(struct platform_device *dev)
par->cslut[1] = 0x06;
info->flags = FBINFO_FLAG_DEFAULT;
spin_lock_init(&par->lock);
- retval = register_framebuffer(info);
- if (retval < 0)
- goto err1;
- platform_set_drvdata(dev, info);
if (irq) {
par->irq = irq;
if (request_irq(par->irq, &arcfb_interrupt, IRQF_SHARED,
@@ -558,6 +554,10 @@ static int arcfb_probe(struct platform_device *dev)
goto err1;
}
}
+ retval = register_framebuffer(info);
+ if (retval < 0)
+ goto err1;
+ platform_set_drvdata(dev, info);
fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
videomemorysize >> 10);
@@ -593,6 +593,8 @@ static int arcfb_remove(struct platform_device *dev)
if (info) {
unregister_framebuffer(info);
+ if (irq)
+ free_irq(((struct arcfb_par *)(info->par))->irq, info);
vfree((void __force *)info->screen_base);
framebuffer_release(info);
}
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index d567f5d56c13..1e252192569a 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1114,7 +1114,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
sinfo->irq_base = platform_get_irq(pdev, 0);
if (sinfo->irq_base < 0) {
- dev_err(dev, "unable to get irq\n");
ret = sinfo->irq_base;
goto stop_clk;
}
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index d7e41c8dd533..d05d4195acad 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -334,20 +334,6 @@ static const struct aty128_meminfo sdr_128 = {
.name = "128-bit SDR SGRAM (1:1)",
};
-static const struct aty128_meminfo sdr_64 = {
- .ML = 4,
- .MB = 8,
- .Trcd = 3,
- .Trp = 3,
- .Twr = 1,
- .CL = 3,
- .Tr2w = 1,
- .LoopLatency = 17,
- .DspOn = 46,
- .Rloop = 17,
- .name = "64-bit SDR SGRAM (1:1)",
-};
-
static const struct aty128_meminfo sdr_sgram = {
.ML = 4,
.MB = 4,
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 175d2598f28e..b0ac895e5ac9 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -126,7 +126,7 @@
#ifdef DEBUG
#define DPRINTK(fmt, args...) printk(KERN_DEBUG "atyfb: " fmt, ## args)
#else
-#define DPRINTK(fmt, args...)
+#define DPRINTK(fmt, args...) no_printk(fmt, ##args)
#endif
#define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
@@ -3819,9 +3819,9 @@ static int __init atyfb_setup(char *options)
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "noaccel", 7)) {
- noaccel = 1;
+ noaccel = true;
} else if (!strncmp(this_opt, "nomtrr", 6)) {
- nomtrr = 1;
+ nomtrr = true;
} else if (!strncmp(this_opt, "vram:", 5))
vram = simple_strtoul(this_opt + 5, NULL, 0);
else if (!strncmp(this_opt, "pll:", 4))
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 38b61cdb5ca4..9c4f1be856ec 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -31,7 +31,6 @@
* more details.
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -48,12 +47,37 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
+#ifdef CONFIG_PPC_PMAC
#include <asm/prom.h>
#include <asm/btext.h>
+#endif
#include "macmodes.h"
#include "controlfb.h"
+#if !defined(CONFIG_PPC_PMAC) || !defined(CONFIG_PPC32)
+#define invalid_vram_cache(addr)
+#undef in_8
+#undef out_8
+#undef in_le32
+#undef out_le32
+#define in_8(addr) 0
+#define out_8(addr, val)
+#define in_le32(addr) 0
+#define out_le32(addr, val)
+#define pgprot_cached_wthru(prot) (prot)
+#else
+static void invalid_vram_cache(void __force *addr)
+{
+ eieio();
+ dcbf(addr);
+ mb();
+ eieio();
+ dcbf(addr);
+ mb();
+}
+#endif
+
struct fb_par_control {
int vmode, cmode;
int xres, yres;
@@ -117,38 +141,6 @@ struct fb_info_control {
#define CNTRL_REG(INFO,REG) (&(((INFO)->control_regs->REG).r))
-/******************** Prototypes for exported functions ********************/
-/*
- * struct fb_ops
- */
-static int controlfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static int controlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info);
-static int controlfb_blank(int blank_mode, struct fb_info *info);
-static int controlfb_mmap(struct fb_info *info,
- struct vm_area_struct *vma);
-static int controlfb_set_par (struct fb_info *info);
-static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info);
-
-/******************** Prototypes for internal functions **********************/
-
-static void set_control_clock(unsigned char *params);
-static int init_control(struct fb_info_control *p);
-static void control_set_hardware(struct fb_info_control *p,
- struct fb_par_control *par);
-static int control_of_init(struct device_node *dp);
-static void find_vram_size(struct fb_info_control *p);
-static int read_control_sense(struct fb_info_control *p);
-static int calc_clock_params(unsigned long clk, unsigned char *param);
-static int control_var_to_par(struct fb_var_screeninfo *var,
- struct fb_par_control *par, const struct fb_info *fb_info);
-static inline void control_par_to_var(struct fb_par_control *par,
- struct fb_var_screeninfo *var);
-static void control_init_info(struct fb_info *info, struct fb_info_control *p);
-static void control_cleanup(void);
-
-
/************************** Internal variables *******************************/
static struct fb_info_control *control_fb;
@@ -157,189 +149,6 @@ static int default_vmode __initdata = VMODE_NVRAM;
static int default_cmode __initdata = CMODE_NVRAM;
-static const struct fb_ops controlfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = controlfb_check_var,
- .fb_set_par = controlfb_set_par,
- .fb_setcolreg = controlfb_setcolreg,
- .fb_pan_display = controlfb_pan_display,
- .fb_blank = controlfb_blank,
- .fb_mmap = controlfb_mmap,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-};
-
-
-/******************** The functions for controlfb_ops ********************/
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- struct device_node *dp;
- int ret = -ENXIO;
-
- dp = of_find_node_by_name(NULL, "control");
- if (dp && !control_of_init(dp))
- ret = 0;
- of_node_put(dp);
-
- return ret;
-}
-
-void cleanup_module(void)
-{
- control_cleanup();
-}
-#endif
-
-/*
- * Checks a var structure
- */
-static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct fb_par_control par;
- int err;
-
- err = control_var_to_par(var, &par, info);
- if (err)
- return err;
- control_par_to_var(&par, var);
-
- return 0;
-}
-
-/*
- * Applies current var to display
- */
-static int controlfb_set_par (struct fb_info *info)
-{
- struct fb_info_control *p =
- container_of(info, struct fb_info_control, info);
- struct fb_par_control par;
- int err;
-
- if((err = control_var_to_par(&info->var, &par, info))) {
- printk (KERN_ERR "controlfb_set_par: error calling"
- " control_var_to_par: %d.\n", err);
- return err;
- }
-
- control_set_hardware(p, &par);
-
- info->fix.visual = (p->par.cmode == CMODE_8) ?
- FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
- info->fix.line_length = p->par.pitch;
- info->fix.xpanstep = 32 >> p->par.cmode;
- info->fix.ypanstep = 1;
-
- return 0;
-}
-
-/*
- * Set screen start address according to var offset values
- */
-static inline void set_screen_start(int xoffset, int yoffset,
- struct fb_info_control *p)
-{
- struct fb_par_control *par = &p->par;
-
- par->xoffset = xoffset;
- par->yoffset = yoffset;
- out_le32(CNTRL_REG(p,start_addr),
- par->yoffset * par->pitch + (par->xoffset << par->cmode));
-}
-
-
-static int controlfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- unsigned int xoffset, hstep;
- struct fb_info_control *p =
- container_of(info, struct fb_info_control, info);
- struct fb_par_control *par = &p->par;
-
- /*
- * make sure start addr will be 32-byte aligned
- */
- hstep = 0x1f >> par->cmode;
- xoffset = (var->xoffset + hstep) & ~hstep;
-
- if (xoffset+par->xres > par->vxres ||
- var->yoffset+par->yres > par->vyres)
- return -EINVAL;
-
- set_screen_start(xoffset, var->yoffset, p);
-
- return 0;
-}
-
-
-/*
- * Private mmap since we want to have a different caching on the framebuffer
- * for controlfb.
- * Note there's no locking in here; it's done in fb_mmap() in fbmem.c.
- */
-static int controlfb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- unsigned long mmio_pgoff;
- unsigned long start;
- u32 len;
-
- start = info->fix.smem_start;
- len = info->fix.smem_len;
- mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
- if (vma->vm_pgoff >= mmio_pgoff) {
- if (info->var.accel_flags)
- return -EINVAL;
- vma->vm_pgoff -= mmio_pgoff;
- start = info->fix.mmio_start;
- len = info->fix.mmio_len;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- } else {
- /* framebuffer */
- vma->vm_page_prot = pgprot_cached_wthru(vma->vm_page_prot);
- }
-
- return vm_iomap_memory(vma, start, len);
-}
-
-static int controlfb_blank(int blank_mode, struct fb_info *info)
-{
- struct fb_info_control *p =
- container_of(info, struct fb_info_control, info);
- unsigned ctrl;
-
- ctrl = le32_to_cpup(CNTRL_REG(p,ctrl));
- if (blank_mode > 0)
- switch (blank_mode) {
- case FB_BLANK_VSYNC_SUSPEND:
- ctrl &= ~3;
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- ctrl &= ~0x30;
- break;
- case FB_BLANK_POWERDOWN:
- ctrl &= ~0x33;
- /* fall through */
- case FB_BLANK_NORMAL:
- ctrl |= 0x400;
- break;
- default:
- break;
- }
- else {
- ctrl &= ~0x400;
- ctrl |= 0x33;
- }
- out_le32(CNTRL_REG(p,ctrl), ctrl);
-
- return 0;
-}
-
static int controlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
@@ -396,75 +205,18 @@ static void set_control_clock(unsigned char *params)
#endif
}
-
/*
- * finish off the driver initialization and register
+ * Set screen start address according to var offset values
*/
-static int __init init_control(struct fb_info_control *p)
+static inline void set_screen_start(int xoffset, int yoffset,
+ struct fb_info_control *p)
{
- int full, sense, vmode, cmode, vyres;
- struct fb_var_screeninfo var;
- int rc;
-
- printk(KERN_INFO "controlfb: ");
-
- full = p->total_vram == 0x400000;
-
- /* Try to pick a video mode out of NVRAM if we have one. */
- cmode = default_cmode;
- if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
- cmode = nvram_read_byte(NV_CMODE);
- if (cmode < CMODE_8 || cmode > CMODE_32)
- cmode = CMODE_8;
-
- vmode = default_vmode;
- if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
- vmode = nvram_read_byte(NV_VMODE);
- if (vmode < 1 || vmode > VMODE_MAX ||
- control_mac_modes[vmode - 1].m[full] < cmode) {
- sense = read_control_sense(p);
- printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
- vmode = mac_map_monitor_sense(sense);
- if (control_mac_modes[vmode - 1].m[full] < 0)
- vmode = VMODE_640_480_60;
- cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
- }
-
- /* Initialize info structure */
- control_init_info(&p->info, p);
-
- /* Setup default var */
- if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
- /* This shouldn't happen! */
- printk("mac_vmode_to_var(%d, %d,) failed\n", vmode, cmode);
-try_again:
- vmode = VMODE_640_480_60;
- cmode = CMODE_8;
- if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
- printk(KERN_ERR "controlfb: mac_vmode_to_var() failed\n");
- return -ENXIO;
- }
- printk(KERN_INFO "controlfb: ");
- }
- printk("using video mode %d and color mode %d.\n", vmode, cmode);
-
- vyres = (p->total_vram - CTRLFB_OFF) / (var.xres << cmode);
- if (vyres > var.yres)
- var.yres_virtual = vyres;
-
- /* Apply default var */
- var.activate = FB_ACTIVATE_NOW;
- rc = fb_set_var(&p->info, &var);
- if (rc && (vmode != VMODE_640_480_60 || cmode != CMODE_8))
- goto try_again;
-
- /* Register with fbdev layer */
- if (register_framebuffer(&p->info) < 0)
- return -ENXIO;
-
- fb_info(&p->info, "control display adapter\n");
+ struct fb_par_control *par = &p->par;
- return 0;
+ par->xoffset = xoffset;
+ par->yoffset = yoffset;
+ out_le32(CNTRL_REG(p,start_addr),
+ par->yoffset * par->pitch + (par->xoffset << par->cmode));
}
#define RADACAL_WRITE(a,d) \
@@ -528,67 +280,6 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
#endif /* CONFIG_BOOTX_TEXT */
}
-
-/*
- * Parse user specified options (`video=controlfb:')
- */
-static void __init control_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!strncmp(this_opt, "vmode:", 6)) {
- int vmode = simple_strtoul(this_opt+6, NULL, 0);
- if (vmode > 0 && vmode <= VMODE_MAX &&
- control_mac_modes[vmode - 1].m[1] >= 0)
- default_vmode = vmode;
- } else if (!strncmp(this_opt, "cmode:", 6)) {
- int depth = simple_strtoul(this_opt+6, NULL, 0);
- switch (depth) {
- case CMODE_8:
- case CMODE_16:
- case CMODE_32:
- default_cmode = depth;
- break;
- case 8:
- default_cmode = CMODE_8;
- break;
- case 15:
- case 16:
- default_cmode = CMODE_16;
- break;
- case 24:
- case 32:
- default_cmode = CMODE_32;
- break;
- }
- }
- }
-}
-
-static int __init control_init(void)
-{
- struct device_node *dp;
- char *option = NULL;
- int ret = -ENXIO;
-
- if (fb_get_options("controlfb", &option))
- return -ENODEV;
- control_setup(option);
-
- dp = of_find_node_by_name(NULL, "control");
- if (dp && !control_of_init(dp))
- ret = 0;
- of_node_put(dp);
-
- return ret;
-}
-
-module_init(control_init);
-
/* Work out which banks of VRAM we have installed. */
/* danj: I guess the card just ignores writes to nonexistant VRAM... */
@@ -605,12 +296,7 @@ static void __init find_vram_size(struct fb_info_control *p)
out_8(&p->frame_buffer[0x600000], 0xb3);
out_8(&p->frame_buffer[0x600001], 0x71);
- asm volatile("eieio; dcbf 0,%0" : : "r" (&p->frame_buffer[0x600000])
- : "memory" );
- mb();
- asm volatile("eieio; dcbi 0,%0" : : "r" (&p->frame_buffer[0x600000])
- : "memory" );
- mb();
+ invalid_vram_cache(&p->frame_buffer[0x600000]);
bank2 = (in_8(&p->frame_buffer[0x600000]) == 0xb3)
&& (in_8(&p->frame_buffer[0x600001]) == 0x71);
@@ -624,12 +310,7 @@ static void __init find_vram_size(struct fb_info_control *p)
out_8(&p->frame_buffer[0], 0x5a);
out_8(&p->frame_buffer[1], 0xc7);
- asm volatile("eieio; dcbf 0,%0" : : "r" (&p->frame_buffer[0])
- : "memory" );
- mb();
- asm volatile("eieio; dcbi 0,%0" : : "r" (&p->frame_buffer[0])
- : "memory" );
- mb();
+ invalid_vram_cache(&p->frame_buffer[0]);
bank1 = (in_8(&p->frame_buffer[0]) == 0x5a)
&& (in_8(&p->frame_buffer[1]) == 0xc7);
@@ -663,78 +344,6 @@ static void __init find_vram_size(struct fb_info_control *p)
(bank1 + bank2) << 1, bank1 << 1, bank2 << 1);
}
-
-/*
- * find "control" and initialize
- */
-static int __init control_of_init(struct device_node *dp)
-{
- struct fb_info_control *p;
- struct resource fb_res, reg_res;
-
- if (control_fb) {
- printk(KERN_ERR "controlfb: only one control is supported\n");
- return -ENXIO;
- }
-
- if (of_pci_address_to_resource(dp, 2, &fb_res) ||
- of_pci_address_to_resource(dp, 1, &reg_res)) {
- printk(KERN_ERR "can't get 2 addresses for control\n");
- return -ENXIO;
- }
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
- control_fb = p; /* save it for cleanups */
-
- /* Map in frame buffer and registers */
- p->fb_orig_base = fb_res.start;
- p->fb_orig_size = resource_size(&fb_res);
- /* use the big-endian aperture (??) */
- p->frame_buffer_phys = fb_res.start + 0x800000;
- p->control_regs_phys = reg_res.start;
- p->control_regs_size = resource_size(&reg_res);
-
- if (!p->fb_orig_base ||
- !request_mem_region(p->fb_orig_base,p->fb_orig_size,"controlfb")) {
- p->fb_orig_base = 0;
- goto error_out;
- }
- /* map at most 8MB for the frame buffer */
- p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
-
- if (!p->control_regs_phys ||
- !request_mem_region(p->control_regs_phys, p->control_regs_size,
- "controlfb regs")) {
- p->control_regs_phys = 0;
- goto error_out;
- }
- p->control_regs = ioremap(p->control_regs_phys, p->control_regs_size);
-
- p->cmap_regs_phys = 0xf301b000; /* XXX not in prom? */
- if (!request_mem_region(p->cmap_regs_phys, 0x1000, "controlfb cmap")) {
- p->cmap_regs_phys = 0;
- goto error_out;
- }
- p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
-
- if (!p->cmap_regs || !p->control_regs || !p->frame_buffer)
- goto error_out;
-
- find_vram_size(p);
- if (!p->total_vram)
- goto error_out;
-
- if (init_control(p) < 0)
- goto error_out;
-
- return 0;
-
-error_out:
- control_cleanup();
- return -ENXIO;
-}
-
/*
* Get the monitor sense value.
* Note that this can be called before calibrate_delay,
@@ -1019,6 +628,150 @@ static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeni
var->pixclock >>= par->regvals.clock_params[2];
}
+/******************** The functions for controlfb_ops ********************/
+
+/*
+ * Checks a var structure
+ */
+static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct fb_par_control par;
+ int err;
+
+ err = control_var_to_par(var, &par, info);
+ if (err)
+ return err;
+ control_par_to_var(&par, var);
+
+ return 0;
+}
+
+/*
+ * Applies current var to display
+ */
+static int controlfb_set_par (struct fb_info *info)
+{
+ struct fb_info_control *p =
+ container_of(info, struct fb_info_control, info);
+ struct fb_par_control par;
+ int err;
+
+ if((err = control_var_to_par(&info->var, &par, info))) {
+ printk (KERN_ERR "controlfb_set_par: error calling"
+ " control_var_to_par: %d.\n", err);
+ return err;
+ }
+
+ control_set_hardware(p, &par);
+
+ info->fix.visual = (p->par.cmode == CMODE_8) ?
+ FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
+ info->fix.line_length = p->par.pitch;
+ info->fix.xpanstep = 32 >> p->par.cmode;
+ info->fix.ypanstep = 1;
+
+ return 0;
+}
+
+static int controlfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ unsigned int xoffset, hstep;
+ struct fb_info_control *p =
+ container_of(info, struct fb_info_control, info);
+ struct fb_par_control *par = &p->par;
+
+ /*
+ * make sure start addr will be 32-byte aligned
+ */
+ hstep = 0x1f >> par->cmode;
+ xoffset = (var->xoffset + hstep) & ~hstep;
+
+ if (xoffset+par->xres > par->vxres ||
+ var->yoffset+par->yres > par->vyres)
+ return -EINVAL;
+
+ set_screen_start(xoffset, var->yoffset, p);
+
+ return 0;
+}
+
+static int controlfb_blank(int blank_mode, struct fb_info *info)
+{
+ struct fb_info_control __maybe_unused *p =
+ container_of(info, struct fb_info_control, info);
+ unsigned ctrl;
+
+ ctrl = in_le32(CNTRL_REG(p, ctrl));
+ if (blank_mode > 0)
+ switch (blank_mode) {
+ case FB_BLANK_VSYNC_SUSPEND:
+ ctrl &= ~3;
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ ctrl &= ~0x30;
+ break;
+ case FB_BLANK_POWERDOWN:
+ ctrl &= ~0x33;
+ /* fall through */
+ case FB_BLANK_NORMAL:
+ ctrl |= 0x400;
+ break;
+ default:
+ break;
+ }
+ else {
+ ctrl &= ~0x400;
+ ctrl |= 0x33;
+ }
+ out_le32(CNTRL_REG(p,ctrl), ctrl);
+
+ return 0;
+}
+
+/*
+ * Private mmap since we want to have a different caching on the framebuffer
+ * for controlfb.
+ * Note there's no locking in here; it's done in fb_mmap() in fbmem.c.
+ */
+static int controlfb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ unsigned long mmio_pgoff;
+ unsigned long start;
+ u32 len;
+
+ start = info->fix.smem_start;
+ len = info->fix.smem_len;
+ mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+ if (vma->vm_pgoff >= mmio_pgoff) {
+ if (info->var.accel_flags)
+ return -EINVAL;
+ vma->vm_pgoff -= mmio_pgoff;
+ start = info->fix.mmio_start;
+ len = info->fix.mmio_len;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ } else {
+ /* framebuffer */
+ vma->vm_page_prot = pgprot_cached_wthru(vma->vm_page_prot);
+ }
+
+ return vm_iomap_memory(vma, start, len);
+}
+
+static const struct fb_ops controlfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = controlfb_check_var,
+ .fb_set_par = controlfb_set_par,
+ .fb_setcolreg = controlfb_setcolreg,
+ .fb_pan_display = controlfb_pan_display,
+ .fb_blank = controlfb_blank,
+ .fb_mmap = controlfb_mmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
/*
* Set misc info vars for this driver
*/
@@ -1045,6 +798,115 @@ static void __init control_init_info(struct fb_info *info, struct fb_info_contro
info->fix.accel = FB_ACCEL_NONE;
}
+/*
+ * Parse user specified options (`video=controlfb:')
+ */
+static void __init control_setup(char *options)
+{
+ char *this_opt;
+
+ if (!options || !*options)
+ return;
+
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ if (!strncmp(this_opt, "vmode:", 6)) {
+ int vmode = simple_strtoul(this_opt+6, NULL, 0);
+ if (vmode > 0 && vmode <= VMODE_MAX &&
+ control_mac_modes[vmode - 1].m[1] >= 0)
+ default_vmode = vmode;
+ } else if (!strncmp(this_opt, "cmode:", 6)) {
+ int depth = simple_strtoul(this_opt+6, NULL, 0);
+ switch (depth) {
+ case CMODE_8:
+ case CMODE_16:
+ case CMODE_32:
+ default_cmode = depth;
+ break;
+ case 8:
+ default_cmode = CMODE_8;
+ break;
+ case 15:
+ case 16:
+ default_cmode = CMODE_16;
+ break;
+ case 24:
+ case 32:
+ default_cmode = CMODE_32;
+ break;
+ }
+ }
+ }
+}
+
+/*
+ * finish off the driver initialization and register
+ */
+static int __init init_control(struct fb_info_control *p)
+{
+ int full, sense, vmode, cmode, vyres;
+ struct fb_var_screeninfo var;
+ int rc;
+
+ printk(KERN_INFO "controlfb: ");
+
+ full = p->total_vram == 0x400000;
+
+ /* Try to pick a video mode out of NVRAM if we have one. */
+ cmode = default_cmode;
+ if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
+ cmode = nvram_read_byte(NV_CMODE);
+ if (cmode < CMODE_8 || cmode > CMODE_32)
+ cmode = CMODE_8;
+
+ vmode = default_vmode;
+ if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
+ vmode = nvram_read_byte(NV_VMODE);
+ if (vmode < 1 || vmode > VMODE_MAX ||
+ control_mac_modes[vmode - 1].m[full] < cmode) {
+ sense = read_control_sense(p);
+ printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
+ vmode = mac_map_monitor_sense(sense);
+ if (control_mac_modes[vmode - 1].m[full] < 0)
+ vmode = VMODE_640_480_60;
+ cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
+ }
+
+ /* Initialize info structure */
+ control_init_info(&p->info, p);
+
+ /* Setup default var */
+ if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
+ /* This shouldn't happen! */
+ printk("mac_vmode_to_var(%d, %d,) failed\n", vmode, cmode);
+try_again:
+ vmode = VMODE_640_480_60;
+ cmode = CMODE_8;
+ if (mac_vmode_to_var(vmode, cmode, &var) < 0) {
+ printk(KERN_ERR "controlfb: mac_vmode_to_var() failed\n");
+ return -ENXIO;
+ }
+ printk(KERN_INFO "controlfb: ");
+ }
+ printk("using video mode %d and color mode %d.\n", vmode, cmode);
+
+ vyres = (p->total_vram - CTRLFB_OFF) / (var.xres << cmode);
+ if (vyres > var.yres)
+ var.yres_virtual = vyres;
+
+ /* Apply default var */
+ var.activate = FB_ACTIVATE_NOW;
+ rc = fb_set_var(&p->info, &var);
+ if (rc && (vmode != VMODE_640_480_60 || cmode != CMODE_8))
+ goto try_again;
+
+ /* Register with fbdev layer */
+ if (register_framebuffer(&p->info) < 0)
+ return -ENXIO;
+
+ fb_info(&p->info, "control display adapter\n");
+
+ return 0;
+}
static void control_cleanup(void)
{
@@ -1071,4 +933,93 @@ static void control_cleanup(void)
kfree(p);
}
+/*
+ * find "control" and initialize
+ */
+static int __init control_of_init(struct device_node *dp)
+{
+ struct fb_info_control *p;
+ struct resource fb_res, reg_res;
+
+ if (control_fb) {
+ printk(KERN_ERR "controlfb: only one control is supported\n");
+ return -ENXIO;
+ }
+
+ if (of_pci_address_to_resource(dp, 2, &fb_res) ||
+ of_pci_address_to_resource(dp, 1, &reg_res)) {
+ printk(KERN_ERR "can't get 2 addresses for control\n");
+ return -ENXIO;
+ }
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ control_fb = p; /* save it for cleanups */
+
+ /* Map in frame buffer and registers */
+ p->fb_orig_base = fb_res.start;
+ p->fb_orig_size = resource_size(&fb_res);
+ /* use the big-endian aperture (??) */
+ p->frame_buffer_phys = fb_res.start + 0x800000;
+ p->control_regs_phys = reg_res.start;
+ p->control_regs_size = resource_size(&reg_res);
+
+ if (!p->fb_orig_base ||
+ !request_mem_region(p->fb_orig_base,p->fb_orig_size,"controlfb")) {
+ p->fb_orig_base = 0;
+ goto error_out;
+ }
+ /* map at most 8MB for the frame buffer */
+ p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
+
+ if (!p->control_regs_phys ||
+ !request_mem_region(p->control_regs_phys, p->control_regs_size,
+ "controlfb regs")) {
+ p->control_regs_phys = 0;
+ goto error_out;
+ }
+ p->control_regs = ioremap(p->control_regs_phys, p->control_regs_size);
+
+ p->cmap_regs_phys = 0xf301b000; /* XXX not in prom? */
+ if (!request_mem_region(p->cmap_regs_phys, 0x1000, "controlfb cmap")) {
+ p->cmap_regs_phys = 0;
+ goto error_out;
+ }
+ p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
+
+ if (!p->cmap_regs || !p->control_regs || !p->frame_buffer)
+ goto error_out;
+
+ find_vram_size(p);
+ if (!p->total_vram)
+ goto error_out;
+
+ if (init_control(p) < 0)
+ goto error_out;
+
+ return 0;
+
+error_out:
+ control_cleanup();
+ return -ENXIO;
+}
+
+static int __init control_init(void)
+{
+ struct device_node *dp;
+ char *option = NULL;
+ int ret = -ENXIO;
+
+ if (fb_get_options("controlfb", &option))
+ return -ENODEV;
+ control_setup(option);
+
+ dp = of_find_node_by_name(NULL, "control");
+ if (dp && !control_of_init(dp))
+ ret = 0;
+ of_node_put(dp);
+
+ return ret;
+}
+device_initcall(control_init);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 8e2e19f3bf44..d62a1e43864e 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -44,7 +44,7 @@
#ifdef DEBUG
#define DPRINTK(fmt, args...) printk(fmt,## args)
#else
-#define DPRINTK(fmt, args...)
+#define DPRINTK(fmt, args...) no_printk(fmt, ##args)
#endif
#define FBMON_FIX_HEADER 1
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 460826a7ad55..513f58f28b0f 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -1160,12 +1160,14 @@ EXPORT_SYMBOL(cyber2000fb_detach);
#define DDC_SDA_IN (1 << 6)
static void cyber2000fb_enable_ddc(struct cfb_info *cfb)
+ __acquires(&cfb->reg_b0_lock)
{
spin_lock(&cfb->reg_b0_lock);
cyber2000fb_writew(0x1bf, 0x3ce, cfb);
}
static void cyber2000fb_disable_ddc(struct cfb_info *cfb)
+ __releases(&cfb->reg_b0_lock)
{
cyber2000fb_writew(0x0bf, 0x3ce, cfb);
spin_unlock(&cfb->reg_b0_lock);
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index aa7583d963ac..13bbf7fe13bf 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -1966,13 +1966,13 @@ static int i810fb_setup(char *options)
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "mtrr", 4))
- mtrr = 1;
+ mtrr = true;
else if (!strncmp(this_opt, "accel", 5))
- accel = 1;
+ accel = true;
else if (!strncmp(this_opt, "extvga", 6))
- extvga = 1;
+ extvga = true;
else if (!strncmp(this_opt, "sync", 4))
- sync = 1;
+ sync = true;
else if (!strncmp(this_opt, "vram:", 5))
vram = (simple_strtoul(this_opt+5, NULL, 0));
else if (!strncmp(this_opt, "voffset:", 8))
@@ -1998,7 +1998,7 @@ static int i810fb_setup(char *options)
else if (!strncmp(this_opt, "vsync2:", 7))
vsync2 = simple_strtoul(this_opt+7, NULL, 0);
else if (!strncmp(this_opt, "dcolor", 6))
- dcolor = 1;
+ dcolor = true;
else if (!strncmp(this_opt, "ddc3", 4))
ddc3 = true;
else
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 370bf2553d43..884b16efa7e8 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -172,6 +172,7 @@ struct imxfb_info {
int num_modes;
struct regulator *lcd_pwr;
+ int lcd_pwr_enabled;
};
static const struct platform_device_id imxfb_devtype[] = {
@@ -801,16 +802,30 @@ static int imxfb_lcd_get_power(struct lcd_device *lcddev)
return FB_BLANK_UNBLANK;
}
+static int imxfb_regulator_set(struct imxfb_info *fbi, int enable)
+{
+ int ret;
+
+ if (enable == fbi->lcd_pwr_enabled)
+ return 0;
+
+ if (enable)
+ ret = regulator_enable(fbi->lcd_pwr);
+ else
+ ret = regulator_disable(fbi->lcd_pwr);
+
+ if (ret == 0)
+ fbi->lcd_pwr_enabled = enable;
+
+ return ret;
+}
+
static int imxfb_lcd_set_power(struct lcd_device *lcddev, int power)
{
struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
- if (!IS_ERR(fbi->lcd_pwr)) {
- if (power == FB_BLANK_UNBLANK)
- return regulator_enable(fbi->lcd_pwr);
- else
- return regulator_disable(fbi->lcd_pwr);
- }
+ if (!IS_ERR(fbi->lcd_pwr))
+ return imxfb_regulator_set(fbi, power == FB_BLANK_UNBLANK);
return 0;
}
diff --git a/drivers/video/fbdev/matrox/g450_pll.c b/drivers/video/fbdev/matrox/g450_pll.c
index c15f8a57498e..ff8e321a22ce 100644
--- a/drivers/video/fbdev/matrox/g450_pll.c
+++ b/drivers/video/fbdev/matrox/g450_pll.c
@@ -333,11 +333,9 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
unsigned int *deltaarray)
{
unsigned int mnpcount;
- unsigned int pixel_vco;
const struct matrox_pll_limits* pi;
struct matrox_pll_cache* ci;
- pixel_vco = 0;
switch (pll) {
case M_PIXEL_PLL_A:
case M_PIXEL_PLL_B:
@@ -420,7 +418,6 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
mnp = matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) << 16;
mnp |= matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) << 8;
- pixel_vco = g450_mnp2vco(minfo, mnp);
matroxfb_DAC_unlock_irqrestore(flags);
}
pi = &minfo->limits.video;
@@ -441,25 +438,6 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
unsigned int delta;
vco = g450_mnp2vco(minfo, mnp);
-#if 0
- if (pll == M_VIDEO_PLL) {
- unsigned int big, small;
-
- if (vco < pixel_vco) {
- small = vco;
- big = pixel_vco;
- } else {
- small = pixel_vco;
- big = vco;
- }
- while (big > small) {
- big >>= 1;
- }
- if (big == small) {
- continue;
- }
- }
-#endif
delta = pll_freq_delta(fout, g450_vco2f(mnp, vco));
for (idx = mnpcount; idx > 0; idx--) {
/* == is important; due to nextpll algorithm we get
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.h b/drivers/video/fbdev/matrox/matroxfb_base.h
index f85ad25659e5..759dee996af1 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.h
+++ b/drivers/video/fbdev/matrox/matroxfb_base.h
@@ -86,7 +86,7 @@
#ifdef DEBUG
#define dprintk(X...) printk(X)
#else
-#define dprintk(X...)
+#define dprintk(X...) no_printk(X)
#endif
#ifndef PCI_SS_VENDOR_ID_SIEMENS_NIXDORF
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
index 42569264801f..d40b806461ca 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
@@ -184,7 +184,6 @@ static void mb86290fb_imageblit16(u32 *cmd, u16 step, u16 dx, u16 dy,
static void mb86290fb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
- int mdr;
u32 *cmd = NULL;
void (*cmdfn) (u32 *, u16, u16, u16, u16, u16, u32, u32,
const struct fb_image *, struct fb_info *) = NULL;
@@ -196,7 +195,6 @@ static void mb86290fb_imageblit(struct fb_info *info,
u16 dx = image->dx, dy = image->dy;
int x2, y2, vxres, vyres;
- mdr = (GDC_ROP_COPY << 9);
x2 = image->dx + image->width;
y2 = image->dy + image->height;
vxres = info->var.xres_virtual;
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 4af28e4421e5..603731a5a72e 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -509,7 +509,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
uint16_t h_start_width, uint16_t h_sync_width,
uint16_t h_end_width, uint16_t v_start_width,
uint16_t v_sync_width, uint16_t v_end_width,
- struct ipu_di_signal_cfg sig)
+ const struct ipu_di_signal_cfg *sig)
{
unsigned long lock_flags;
uint32_t reg;
@@ -591,17 +591,17 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
/* DI settings */
old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF;
- old_conf |= sig.datamask_en << DI_D3_DATAMSK_SHIFT |
- sig.clksel_en << DI_D3_CLK_SEL_SHIFT |
- sig.clkidle_en << DI_D3_CLK_IDLE_SHIFT;
+ old_conf |= sig->datamask_en << DI_D3_DATAMSK_SHIFT |
+ sig->clksel_en << DI_D3_CLK_SEL_SHIFT |
+ sig->clkidle_en << DI_D3_CLK_IDLE_SHIFT;
mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF);
old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF;
- old_conf |= sig.data_pol << DI_D3_DATA_POL_SHIFT |
- sig.clk_pol << DI_D3_CLK_POL_SHIFT |
- sig.enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
- sig.Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
- sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
+ old_conf |= sig->data_pol << DI_D3_DATA_POL_SHIFT |
+ sig->clk_pol << DI_D3_CLK_POL_SHIFT |
+ sig->enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
+ sig->Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
+ sig->Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
map = &di_mappings[mx3fb->disp_data_fmt];
@@ -855,7 +855,7 @@ static int __set_par(struct fb_info *fbi, bool lock)
fbi->var.upper_margin,
fbi->var.vsync_len,
fbi->var.lower_margin +
- fbi->var.vsync_len, sig_cfg) != 0) {
+ fbi->var.vsync_len, &sig_cfg) != 0) {
dev_err(fbi->device,
"mx3fb: Error initializing panel.\n");
return -EINVAL;
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index e8a304f84ea8..1a9d6242916e 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1247,7 +1247,7 @@ static ssize_t omapfb_show_caps_num(struct device *dev,
size = 0;
while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
omapfb_get_caps(fbdev, plane, &caps);
- size += snprintf(&buf[size], PAGE_SIZE - size,
+ size += scnprintf(&buf[size], PAGE_SIZE - size,
"plane#%d %#010x %#010x %#010x\n",
plane, caps.ctrl, caps.plane_color, caps.wnd_color);
plane++;
@@ -1268,28 +1268,28 @@ static ssize_t omapfb_show_caps_text(struct device *dev,
size = 0;
while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
omapfb_get_caps(fbdev, plane, &caps);
- size += snprintf(&buf[size], PAGE_SIZE - size,
+ size += scnprintf(&buf[size], PAGE_SIZE - size,
"plane#%d:\n", plane);
for (i = 0; i < ARRAY_SIZE(ctrl_caps) &&
size < PAGE_SIZE; i++) {
if (ctrl_caps[i].flag & caps.ctrl)
- size += snprintf(&buf[size], PAGE_SIZE - size,
+ size += scnprintf(&buf[size], PAGE_SIZE - size,
" %s\n", ctrl_caps[i].name);
}
- size += snprintf(&buf[size], PAGE_SIZE - size,
+ size += scnprintf(&buf[size], PAGE_SIZE - size,
" plane colors:\n");
for (i = 0; i < ARRAY_SIZE(color_caps) &&
size < PAGE_SIZE; i++) {
if (color_caps[i].flag & caps.plane_color)
- size += snprintf(&buf[size], PAGE_SIZE - size,
+ size += scnprintf(&buf[size], PAGE_SIZE - size,
" %s\n", color_caps[i].name);
}
- size += snprintf(&buf[size], PAGE_SIZE - size,
+ size += scnprintf(&buf[size], PAGE_SIZE - size,
" window colors:\n");
for (i = 0; i < ARRAY_SIZE(color_caps) &&
size < PAGE_SIZE; i++) {
if (color_caps[i].flag & caps.wnd_color)
- size += snprintf(&buf[size], PAGE_SIZE - size,
+ size += scnprintf(&buf[size], PAGE_SIZE - size,
" %s\n", color_caps[i].name);
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index ce37da85cc45..4a16798b2ecd 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -557,11 +557,6 @@ u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel)
}
EXPORT_SYMBOL(dispc_mgr_get_sync_lost_irq);
-u32 dispc_wb_get_framedone_irq(void)
-{
- return DISPC_IRQ_FRAMEDONEWB;
-}
-
bool dispc_mgr_go_busy(enum omap_channel channel)
{
return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
@@ -579,30 +574,6 @@ void dispc_mgr_go(enum omap_channel channel)
}
EXPORT_SYMBOL(dispc_mgr_go);
-bool dispc_wb_go_busy(void)
-{
- return REG_GET(DISPC_CONTROL2, 6, 6) == 1;
-}
-
-void dispc_wb_go(void)
-{
- enum omap_plane plane = OMAP_DSS_WB;
- bool enable, go;
-
- enable = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
-
- if (!enable)
- return;
-
- go = REG_GET(DISPC_CONTROL2, 6, 6) == 1;
- if (go) {
- DSSERR("GO bit not down for WB\n");
- return;
- }
-
- REG_FLD_MOD(DISPC_CONTROL2, 1, 6, 6);
-}
-
static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value)
{
dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
@@ -1028,13 +999,6 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
}
}
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel)
-{
- enum omap_plane plane = OMAP_DSS_WB;
-
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), channel, 18, 16);
-}
-
static void dispc_ovl_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size)
{
@@ -2805,74 +2769,6 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
}
EXPORT_SYMBOL(dispc_ovl_setup);
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *mgr_timings)
-{
- int r;
- u32 l;
- enum omap_plane plane = OMAP_DSS_WB;
- const int pos_x = 0, pos_y = 0;
- const u8 zorder = 0, global_alpha = 0;
- const bool replication = false;
- bool truncation;
- int in_width = mgr_timings->x_res;
- int in_height = mgr_timings->y_res;
- enum omap_overlay_caps caps =
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
-
- DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
- "rot %d, mir %d\n", wi->paddr, wi->p_uv_addr, in_width,
- in_height, wi->width, wi->height, wi->color_mode, wi->rotation,
- wi->mirror);
-
- r = dispc_ovl_setup_common(plane, caps, wi->paddr, wi->p_uv_addr,
- wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
- wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
- wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, mgr_timings, mem_to_mem);
-
- switch (wi->color_mode) {
- case OMAP_DSS_COLOR_RGB16:
- case OMAP_DSS_COLOR_RGB24P:
- case OMAP_DSS_COLOR_ARGB16:
- case OMAP_DSS_COLOR_RGBA16:
- case OMAP_DSS_COLOR_RGB12U:
- case OMAP_DSS_COLOR_ARGB16_1555:
- case OMAP_DSS_COLOR_XRGB16_1555:
- case OMAP_DSS_COLOR_RGBX16:
- truncation = true;
- break;
- default:
- truncation = false;
- break;
- }
-
- /* setup extra DISPC_WB_ATTRIBUTES */
- l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
- l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */
- l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */
- if (mem_to_mem)
- l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */
- else
- l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
-
- if (mem_to_mem) {
- /* WBDELAYCOUNT */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
- } else {
- int wbdelay;
-
- wbdelay = min(mgr_timings->vfp + mgr_timings->vsw +
- mgr_timings->vbp, 255);
-
- /* WBDELAYCOUNT */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
- }
-
- return r;
-}
-
int dispc_ovl_enable(enum omap_plane plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -2903,16 +2799,6 @@ bool dispc_mgr_is_enabled(enum omap_channel channel)
}
EXPORT_SYMBOL(dispc_mgr_is_enabled);
-void dispc_wb_enable(bool enable)
-{
- dispc_ovl_enable(OMAP_DSS_WB, enable);
-}
-
-bool dispc_wb_is_enabled(void)
-{
- return dispc_ovl_enabled(OMAP_DSS_WB);
-}
-
static void dispc_lcd_enable_signal_polarity(bool act_high)
{
if (!dss_has_feature(FEAT_LCDENABLEPOL))
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index a2269008590f..21cfcbf74a6d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -89,17 +89,6 @@ enum dss_dsi_content_type {
DSS_DSI_CONTENT_GENERIC,
};
-enum dss_writeback_channel {
- DSS_WB_LCD1_MGR = 0,
- DSS_WB_LCD2_MGR = 1,
- DSS_WB_TV_MGR = 2,
- DSS_WB_OVL0 = 3,
- DSS_WB_OVL1 = 4,
- DSS_WB_OVL2 = 5,
- DSS_WB_OVL3 = 6,
- DSS_WB_LCD3_MGR = 7,
-};
-
enum dss_pll_id {
DSS_PLL_DSI1,
DSS_PLL_DSI2,
@@ -403,15 +392,6 @@ int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
void dispc_set_tv_pclk(unsigned long pclk);
-u32 dispc_wb_get_framedone_irq(void);
-bool dispc_wb_go_busy(void);
-void dispc_wb_go(void);
-void dispc_wb_enable(bool enable);
-bool dispc_wb_is_enabled(void);
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *timings);
-
u32 dispc_read_irqstatus(void);
void dispc_clear_irqstatus(u32 mask);
u32 dispc_read_irqenable(void);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index f81e2a46366d..d5404d56c922 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -209,49 +209,6 @@ static const struct venc_config venc_config_ntsc_trm = {
.gen_ctrl = 0x00F90000,
};
-static const struct venc_config venc_config_pal_bdghi = {
- .f_control = 0,
- .vidout_ctrl = 0,
- .sync_ctrl = 0,
- .hfltr_ctrl = 0,
- .x_color = 0,
- .line21 = 0,
- .ln_sel = 21,
- .htrigger_vtrigger = 0,
- .tvdetgp_int_start_stop_x = 0x00140001,
- .tvdetgp_int_start_stop_y = 0x00010001,
- .gen_ctrl = 0x00FB0000,
-
- .llen = 864-1,
- .flens = 625-1,
- .cc_carr_wss_carr = 0x2F7625ED,
- .c_phase = 0xDF,
- .gain_u = 0x111,
- .gain_v = 0x181,
- .gain_y = 0x140,
- .black_level = 0x3e,
- .blank_level = 0x3e,
- .m_control = 0<<2 | 1<<1,
- .bstamp_wss_data = 0x42,
- .s_carr = 0x2a098acb,
- .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0,
- .savid__eavid = 0x06A70108,
- .flen__fal = 23<<16 | 624<<0,
- .lal__phase_reset = 2<<17 | 310<<0,
- .hs_int_start_stop_x = 0x00920358,
- .hs_ext_start_stop_x = 0x000F035F,
- .vs_int_start_x = 0x1a7<<16,
- .vs_int_stop_x__vs_int_start_y = 0x000601A7,
- .vs_int_stop_y__vs_ext_start_x = 0x01AF0036,
- .vs_ext_stop_x__vs_ext_start_y = 0x27101af,
- .vs_ext_stop_y = 0x05,
- .avid_start_stop_x = 0x03530082,
- .avid_start_stop_y = 0x0270002E,
- .fid_int_start_x__fid_int_start_y = 0x0005008A,
- .fid_int_offset_y__fid_ext_start_x = 0x002E0138,
- .fid_ext_start_y__fid_ext_offset_y = 0x01380005,
-};
-
const struct omap_video_timings omap_dss_pal_timings = {
.x_res = 720,
.y_res = 574,
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 56995f44e76d..f40be68d5aac 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -482,9 +482,6 @@ static int omapfb_memory_read(struct fb_info *fbi,
if (!display || !display->driver->memory_read)
return -ENOENT;
- if (!access_ok(mr->buffer, mr->buffer_size))
- return -EFAULT;
-
if (mr->w > 4096 || mr->h > 4096)
return -EINVAL;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 4a5db170ef59..2d39dbfa742e 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -147,11 +147,11 @@ static ssize_t show_overlays(struct device *dev,
if (ovl == fbdev->overlays[ovlnum])
break;
- l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+ l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d",
t == 0 ? "" : ",", ovlnum);
}
- l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+ l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
omapfb_unlock(fbdev);
unlock_fb_info(fbi);
@@ -328,11 +328,11 @@ static ssize_t show_overlays_rotate(struct device *dev,
lock_fb_info(fbi);
for (t = 0; t < ofbi->num_overlays; t++) {
- l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+ l += scnprintf(buf + l, PAGE_SIZE - l, "%s%d",
t == 0 ? "" : ",", ofbi->rotation[t]);
}
- l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+ l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
unlock_fb_info(fbi);
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index fe2cadeb1b66..c7c98d8e2359 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -54,7 +54,7 @@
#define DPRINTK(a, b...) \
printk(KERN_DEBUG "pm2fb: %s: " a, __func__ , ## b)
#else
-#define DPRINTK(a, b...)
+#define DPRINTK(a, b...) no_printk(a, ##b)
#endif
#define PM2_PIXMAP_SIZE (1600 * 4)
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index 2f5e23c8f8ec..7497bd36334c 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -44,7 +44,7 @@
#define DPRINTK(a, b...) \
printk(KERN_DEBUG "pm3fb: %s: " a, __func__ , ## b)
#else
-#define DPRINTK(a, b...)
+#define DPRINTK(a, b...) no_printk(a, ##b)
#endif
#define PM3_PIXMAP_SIZE (2048 * 4)
@@ -306,7 +306,7 @@ static void pm3fb_init_engine(struct fb_info *info)
PM3PixelSize_GLOBAL_32BIT);
break;
default:
- DPRINTK(1, "Unsupported depth %d\n",
+ DPRINTK("Unsupported depth %d\n",
info->var.bits_per_pixel);
break;
}
@@ -349,8 +349,8 @@ static void pm3fb_init_engine(struct fb_info *info)
(1 << 10) | (0 << 3));
break;
default:
- DPRINTK(1, "Unsupported depth %d\n",
- info->current_par->depth);
+ DPRINTK("Unsupported depth %d\n",
+ info->var.bits_per_pixel);
break;
}
}
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 834f63edf700..9df78fb77267 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -44,7 +44,7 @@
#define GPU_CMD_BUF_SIZE (2 * 1024 * 1024)
#define GPU_FB_START (64 * 1024)
#define GPU_IOIF (0x0d000000UL)
-#define GPU_ALIGN_UP(x) _ALIGN_UP((x), 64)
+#define GPU_ALIGN_UP(x) ALIGN((x), 64)
#define GPU_MAX_LINE_LENGTH (65536 - 64)
#define GPU_INTR_STATUS_VSYNC_0 0 /* vsync on head A */
@@ -1015,7 +1015,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev)
}
#endif
- max_ps3fb_size = _ALIGN_UP(GPU_IOIF, 256*1024*1024) - GPU_IOIF;
+ max_ps3fb_size = ALIGN(GPU_IOIF, 256*1024*1024) - GPU_IOIF;
if (ps3fb_videomemory.size > max_ps3fb_size) {
dev_info(&dev->core, "Limiting ps3fb mem size to %lu bytes\n",
max_ps3fb_size);
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index aef8a3042590..eedfbd3572a8 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -557,12 +557,11 @@ static const struct fb_ops pxa168fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int pxa168fb_init_mode(struct fb_info *info,
+static void pxa168fb_init_mode(struct fb_info *info,
struct pxa168fb_mach_info *mi)
{
struct pxa168fb_info *fbi = info->par;
struct fb_var_screeninfo *var = &info->var;
- int ret = 0;
u32 total_w, total_h, refresh;
u64 div_result;
const struct fb_videomode *m;
@@ -593,8 +592,6 @@ static int pxa168fb_init_mode(struct fb_info *info,
div_result = 1000000000000ll;
do_div(div_result, total_w * total_h * refresh);
var->pixclock = (u32)div_result;
-
- return ret;
}
static int pxa168fb_probe(struct platform_device *pdev)
diff --git a/drivers/video/fbdev/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c
index 0601c13f2105..08c9ee46978e 100644
--- a/drivers/video/fbdev/riva/riva_hw.c
+++ b/drivers/video/fbdev/riva/riva_hw.c
@@ -1343,24 +1343,6 @@ int CalcStateExt
/*
* Load fixed function state and pre-calculated/stored state.
*/
-#if 0
-#define LOAD_FIXED_STATE(tbl,dev) \
- for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \
- chip->dev[tbl##Table##dev[i][0]] = tbl##Table##dev[i][1]
-#define LOAD_FIXED_STATE_8BPP(tbl,dev) \
- for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \
- chip->dev[tbl##Table##dev##_8BPP[i][0]] = tbl##Table##dev##_8BPP[i][1]
-#define LOAD_FIXED_STATE_15BPP(tbl,dev) \
- for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \
- chip->dev[tbl##Table##dev##_15BPP[i][0]] = tbl##Table##dev##_15BPP[i][1]
-#define LOAD_FIXED_STATE_16BPP(tbl,dev) \
- for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++) \
- chip->dev[tbl##Table##dev##_16BPP[i][0]] = tbl##Table##dev##_16BPP[i][1]
-#define LOAD_FIXED_STATE_32BPP(tbl,dev) \
- for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++) \
- chip->dev[tbl##Table##dev##_32BPP[i][0]] = tbl##Table##dev##_32BPP[i][1]
-#endif
-
#define LOAD_FIXED_STATE(tbl,dev) \
for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \
NV_WR32(&chip->dev[tbl##Table##dev[i][0]], 0, tbl##Table##dev[i][1])
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index eaea8c373753..4541afcf9386 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -721,9 +721,7 @@ static void s1d13xxxfb_fetch_hw_state(struct fb_info *info)
xres, yres, xres_virtual, yres_virtual, is_color, is_dual, is_tft);
}
-
-static int
-s1d13xxxfb_remove(struct platform_device *pdev)
+static void __s1d13xxxfb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
struct s1d13xxxfb_par *par = NULL;
@@ -749,6 +747,14 @@ s1d13xxxfb_remove(struct platform_device *pdev)
resource_size(&pdev->resource[0]));
release_mem_region(pdev->resource[1].start,
resource_size(&pdev->resource[1]));
+}
+
+static int s1d13xxxfb_remove(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+
+ unregister_framebuffer(info);
+ __s1d13xxxfb_remove(pdev);
return 0;
}
@@ -895,7 +901,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
return 0;
bail:
- s1d13xxxfb_remove(pdev);
+ __s1d13xxxfb_remove(pdev);
return ret;
}
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 2d285cc384cf..3e6e13f7a831 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -173,7 +173,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/cpufreq.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
@@ -799,8 +799,8 @@ static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
writel_relaxed(fbi->dbar2, fbi->base + DBAR2);
writel_relaxed(fbi->reg_lccr0 | LCCR0_LEN, fbi->base + LCCR0);
- if (machine_is_shannon())
- gpio_set_value(SHANNON_GPIO_DISP_EN, 1);
+ if (fbi->shannon_lcden)
+ gpiod_set_value(fbi->shannon_lcden, 1);
dev_dbg(fbi->dev, "DBAR1: 0x%08x\n", readl_relaxed(fbi->base + DBAR1));
dev_dbg(fbi->dev, "DBAR2: 0x%08x\n", readl_relaxed(fbi->base + DBAR2));
@@ -817,8 +817,8 @@ static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
dev_dbg(fbi->dev, "Disabling LCD controller\n");
- if (machine_is_shannon())
- gpio_set_value(SHANNON_GPIO_DISP_EN, 0);
+ if (fbi->shannon_lcden)
+ gpiod_set_value(fbi->shannon_lcden, 0);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&fbi->ctrlr_wait, &wait);
@@ -1173,12 +1173,10 @@ static int sa1100fb_probe(struct platform_device *pdev)
return ret;
}
- if (machine_is_shannon()) {
- ret = devm_gpio_request_one(&pdev->dev, SHANNON_GPIO_DISP_EN,
- GPIOF_OUT_INIT_LOW, "display enable");
- if (ret)
- return ret;
- }
+ fbi->shannon_lcden = gpiod_get_optional(&pdev->dev, "shannon-lcden",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(fbi->shannon_lcden))
+ return PTR_ERR(fbi->shannon_lcden);
/* Initialize video memory */
ret = sa1100fb_map_video_memory(fbi);
diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index d0aa33b0b88a..b4363444fa5d 100644
--- a/drivers/video/fbdev/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
@@ -10,6 +10,8 @@
* for more details.
*/
+struct gpio_desc;
+
#define LCCR0 0x0000 /* LCD Control Reg. 0 */
#define LCSR 0x0004 /* LCD Status Reg. */
#define DBAR1 0x0010 /* LCD DMA Base Address Reg. channel 1 */
@@ -33,6 +35,7 @@ struct sa1100fb_info {
struct device *dev;
const struct sa1100fb_rgb *rgb[NR_RGB];
void __iomem *base;
+ struct gpio_desc *shannon_lcden;
/*
* These are the addresses we mapped
diff --git a/drivers/video/fbdev/savage/savagefb.h b/drivers/video/fbdev/savage/savagefb.h
index aba04afe712d..3314d5b6b43b 100644
--- a/drivers/video/fbdev/savage/savagefb.h
+++ b/drivers/video/fbdev/savage/savagefb.h
@@ -21,7 +21,7 @@
#ifdef SAVAGEFB_DEBUG
# define DBG(x) printk (KERN_DEBUG "savagefb: %s\n", (x));
#else
-# define DBG(x)
+# define DBG(x) no_printk(x)
# define SavagePrintRegs(...)
#endif
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 12fa1050f3eb..8e06ba912d60 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -12,8 +12,7 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/uaccess.h>
#include <linux/regulator/consumer.h>
@@ -49,8 +48,6 @@
static u_int refreshrate = REFRESHRATE;
module_param(refreshrate, uint, 0);
-struct ssd1307fb_par;
-
struct ssd1307fb_deviceinfo {
u32 default_vcomh;
u32 default_dclk_div;
@@ -80,7 +77,6 @@ struct ssd1307fb_par {
u32 prechargep1;
u32 prechargep2;
struct pwm_device *pwm;
- u32 pwm_period;
struct gpio_desc *reset;
struct regulator *vbat_reg;
u32 vcomh;
@@ -298,9 +294,9 @@ static void ssd1307fb_deferred_io(struct fb_info *info,
static int ssd1307fb_init(struct ssd1307fb_par *par)
{
+ struct pwm_state pwmstate;
int ret;
u32 precharge, dclk, com_invdir, compins;
- struct pwm_args pargs;
if (par->device_info->need_pwm) {
par->pwm = pwm_get(&par->client->dev, NULL);
@@ -309,21 +305,15 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
return PTR_ERR(par->pwm);
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(par->pwm);
-
- pwm_get_args(par->pwm, &pargs);
+ pwm_init_state(par->pwm, &pwmstate);
+ pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
+ pwm_apply_state(par->pwm, &pwmstate);
- par->pwm_period = pargs.period;
/* Enable the PWM */
- pwm_config(par->pwm, par->pwm_period / 2, par->pwm_period);
pwm_enable(par->pwm);
dev_dbg(&par->client->dev, "Using PWM%d with a %dns period.\n",
- par->pwm->pwm, par->pwm_period);
+ par->pwm->pwm, pwm_get_period(par->pwm));
}
/* Set initial contrast */
@@ -586,25 +576,19 @@ static const struct of_device_id ssd1307fb_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ssd1307fb_of_match);
-static int ssd1307fb_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ssd1307fb_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct backlight_device *bl;
char bl_name[12];
struct fb_info *info;
- struct device_node *node = client->dev.of_node;
struct fb_deferred_io *ssd1307fb_defio;
u32 vmem_size;
struct ssd1307fb_par *par;
void *vmem;
int ret;
- if (!node) {
- dev_err(&client->dev, "No device tree data found!\n");
- return -EINVAL;
- }
-
- info = framebuffer_alloc(sizeof(struct ssd1307fb_par), &client->dev);
+ info = framebuffer_alloc(sizeof(struct ssd1307fb_par), dev);
if (!info)
return -ENOMEM;
@@ -612,67 +596,65 @@ static int ssd1307fb_probe(struct i2c_client *client,
par->info = info;
par->client = client;
- par->device_info = of_device_get_match_data(&client->dev);
+ par->device_info = device_get_match_data(dev);
- par->reset = devm_gpiod_get_optional(&client->dev, "reset",
- GPIOD_OUT_LOW);
+ par->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(par->reset)) {
- dev_err(&client->dev, "failed to get reset gpio: %ld\n",
+ dev_err(dev, "failed to get reset gpio: %ld\n",
PTR_ERR(par->reset));
ret = PTR_ERR(par->reset);
goto fb_alloc_error;
}
- par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
+ par->vbat_reg = devm_regulator_get_optional(dev, "vbat");
if (IS_ERR(par->vbat_reg)) {
ret = PTR_ERR(par->vbat_reg);
if (ret == -ENODEV) {
par->vbat_reg = NULL;
} else {
- dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
- ret);
+ dev_err(dev, "failed to get VBAT regulator: %d\n", ret);
goto fb_alloc_error;
}
}
- if (of_property_read_u32(node, "solomon,width", &par->width))
+ if (device_property_read_u32(dev, "solomon,width", &par->width))
par->width = 96;
- if (of_property_read_u32(node, "solomon,height", &par->height))
+ if (device_property_read_u32(dev, "solomon,height", &par->height))
par->height = 16;
- if (of_property_read_u32(node, "solomon,page-offset", &par->page_offset))
+ if (device_property_read_u32(dev, "solomon,page-offset", &par->page_offset))
par->page_offset = 1;
- if (of_property_read_u32(node, "solomon,com-offset", &par->com_offset))
+ if (device_property_read_u32(dev, "solomon,com-offset", &par->com_offset))
par->com_offset = 0;
- if (of_property_read_u32(node, "solomon,prechargep1", &par->prechargep1))
+ if (device_property_read_u32(dev, "solomon,prechargep1", &par->prechargep1))
par->prechargep1 = 2;
- if (of_property_read_u32(node, "solomon,prechargep2", &par->prechargep2))
+ if (device_property_read_u32(dev, "solomon,prechargep2", &par->prechargep2))
par->prechargep2 = 2;
- if (!of_property_read_u8_array(node, "solomon,lookup-table",
- par->lookup_table,
- ARRAY_SIZE(par->lookup_table)))
+ if (!device_property_read_u8_array(dev, "solomon,lookup-table",
+ par->lookup_table,
+ ARRAY_SIZE(par->lookup_table)))
par->lookup_table_set = 1;
- par->seg_remap = !of_property_read_bool(node, "solomon,segment-no-remap");
- par->com_seq = of_property_read_bool(node, "solomon,com-seq");
- par->com_lrremap = of_property_read_bool(node, "solomon,com-lrremap");
- par->com_invdir = of_property_read_bool(node, "solomon,com-invdir");
+ par->seg_remap = !device_property_read_bool(dev, "solomon,segment-no-remap");
+ par->com_seq = device_property_read_bool(dev, "solomon,com-seq");
+ par->com_lrremap = device_property_read_bool(dev, "solomon,com-lrremap");
+ par->com_invdir = device_property_read_bool(dev, "solomon,com-invdir");
par->area_color_enable =
- of_property_read_bool(node, "solomon,area-color-enable");
- par->low_power = of_property_read_bool(node, "solomon,low-power");
+ device_property_read_bool(dev, "solomon,area-color-enable");
+ par->low_power = device_property_read_bool(dev, "solomon,low-power");
par->contrast = 127;
par->vcomh = par->device_info->default_vcomh;
/* Setup display timing */
- if (of_property_read_u32(node, "solomon,dclk-div", &par->dclk_div))
+ if (device_property_read_u32(dev, "solomon,dclk-div", &par->dclk_div))
par->dclk_div = par->device_info->default_dclk_div;
- if (of_property_read_u32(node, "solomon,dclk-frq", &par->dclk_frq))
+ if (device_property_read_u32(dev, "solomon,dclk-frq", &par->dclk_frq))
par->dclk_frq = par->device_info->default_dclk_frq;
vmem_size = DIV_ROUND_UP(par->width, 8) * par->height;
@@ -680,15 +662,15 @@ static int ssd1307fb_probe(struct i2c_client *client,
vmem = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(vmem_size));
if (!vmem) {
- dev_err(&client->dev, "Couldn't allocate graphical memory.\n");
+ dev_err(dev, "Couldn't allocate graphical memory.\n");
ret = -ENOMEM;
goto fb_alloc_error;
}
- ssd1307fb_defio = devm_kzalloc(&client->dev, sizeof(*ssd1307fb_defio),
+ ssd1307fb_defio = devm_kzalloc(dev, sizeof(*ssd1307fb_defio),
GFP_KERNEL);
if (!ssd1307fb_defio) {
- dev_err(&client->dev, "Couldn't allocate deferred io.\n");
+ dev_err(dev, "Couldn't allocate deferred io.\n");
ret = -ENOMEM;
goto fb_alloc_error;
}
@@ -726,8 +708,7 @@ static int ssd1307fb_probe(struct i2c_client *client,
if (par->vbat_reg) {
ret = regulator_enable(par->vbat_reg);
if (ret) {
- dev_err(&client->dev, "failed to enable VBAT: %d\n",
- ret);
+ dev_err(dev, "failed to enable VBAT: %d\n", ret);
goto reset_oled_error;
}
}
@@ -738,17 +719,16 @@ static int ssd1307fb_probe(struct i2c_client *client,
ret = register_framebuffer(info);
if (ret) {
- dev_err(&client->dev, "Couldn't register the framebuffer\n");
+ dev_err(dev, "Couldn't register the framebuffer\n");
goto panel_init_error;
}
snprintf(bl_name, sizeof(bl_name), "ssd1307fb%d", info->node);
- bl = backlight_device_register(bl_name, &client->dev, par,
- &ssd1307fb_bl_ops, NULL);
+ bl = backlight_device_register(bl_name, dev, par, &ssd1307fb_bl_ops,
+ NULL);
if (IS_ERR(bl)) {
ret = PTR_ERR(bl);
- dev_err(&client->dev, "unable to register backlight device: %d\n",
- ret);
+ dev_err(dev, "unable to register backlight device: %d\n", ret);
goto bl_init_error;
}
@@ -756,7 +736,7 @@ static int ssd1307fb_probe(struct i2c_client *client,
bl->props.max_brightness = MAX_CONTRAST;
info->bl_dev = bl;
- dev_info(&client->dev, "fb%d: %s framebuffer device registered, using %d bytes of video memory\n", info->node, info->fix.id, vmem_size);
+ dev_info(dev, "fb%d: %s framebuffer device registered, using %d bytes of video memory\n", info->node, info->fix.id, vmem_size);
return 0;
@@ -810,7 +790,7 @@ static const struct i2c_device_id ssd1307fb_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id);
static struct i2c_driver ssd1307fb_driver = {
- .probe = ssd1307fb_probe,
+ .probe_new = ssd1307fb_probe,
.remove = ssd1307fb_remove,
.id_table = ssd1307fb_i2c_id,
.driver = {
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 07905d385949..5b014b479f83 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -64,9 +64,9 @@ static const struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
/* module options */
-static bool console = 1; /* Allow fbcon to open framebuffer */
-static bool fb_defio = 1; /* Detect mmap writes using page faults */
-static bool shadow = 1; /* Optionally disable shadow framebuffer */
+static bool console = true; /* Allow fbcon to open framebuffer */
+static bool fb_defio = true; /* Detect mmap writes using page faults */
+static bool shadow = true; /* Optionally disable shadow framebuffer */
static int pixel_limit; /* Optionally force a pixel resolution limit */
struct dlfb_deferred_free {
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 53d08d1b56f5..bee29aadc646 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -45,7 +45,7 @@ static const struct fb_fix_screeninfo uvesafb_fix = {
};
static int mtrr = 3; /* enable mtrr by default */
-static bool blank = 1; /* enable blanking by default */
+static bool blank = true; /* enable blanking by default */
static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
static bool pmi_setpal = true; /* use PMI for palette changes */
static bool nocrtc; /* ignore CRTC settings */
@@ -1560,7 +1560,7 @@ static ssize_t uvesafb_show_vbe_modes(struct device *dev,
int ret = 0, i;
for (i = 0; i < par->vbe_modes_cnt && ret < PAGE_SIZE; i++) {
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"%dx%d-%d, 0x%.4x\n",
par->vbe_modes[i].x_res, par->vbe_modes[i].y_res,
par->vbe_modes[i].depth, par->vbe_modes[i].mode_id);
@@ -1824,19 +1824,19 @@ static int uvesafb_setup(char *options)
else if (!strcmp(this_opt, "ywrap"))
ypan = 2;
else if (!strcmp(this_opt, "vgapal"))
- pmi_setpal = 0;
+ pmi_setpal = false;
else if (!strcmp(this_opt, "pmipal"))
- pmi_setpal = 1;
+ pmi_setpal = true;
else if (!strncmp(this_opt, "mtrr:", 5))
mtrr = simple_strtoul(this_opt+5, NULL, 0);
else if (!strcmp(this_opt, "nomtrr"))
mtrr = 0;
else if (!strcmp(this_opt, "nocrtc"))
- nocrtc = 1;
+ nocrtc = true;
else if (!strcmp(this_opt, "noedid"))
- noedid = 1;
+ noedid = true;
else if (!strcmp(this_opt, "noblank"))
- blank = 0;
+ blank = true;
else if (!strncmp(this_opt, "vtotal:", 7))
vram_total = simple_strtoul(this_opt + 7, NULL, 0);
else if (!strncmp(this_opt, "vremap:", 7))
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 4d20c4603e5a..8425afe37d7c 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -331,7 +331,7 @@ int __init valkyriefb_init(void)
struct resource r;
dp = of_find_node_by_name(NULL, "valkyrie");
- if (dp == 0)
+ if (!dp)
return 0;
if (of_address_to_resource(dp, 0, &r)) {
@@ -345,7 +345,7 @@ int __init valkyriefb_init(void)
#endif /* ppc (!CONFIG_MAC) */
p = kzalloc(sizeof(*p), GFP_ATOMIC);
- if (p == 0)
+ if (!p)
return -ENOMEM;
/* Map in frame buffer and registers */
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index a1fe24ea869b..df6de5a9dd4c 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -32,6 +32,7 @@
struct vesafb_par {
u32 pseudo_palette[256];
int wc_cookie;
+ struct resource *region;
};
static struct fb_var_screeninfo vesafb_defined = {
@@ -411,7 +412,7 @@ static int vesafb_probe(struct platform_device *dev)
/* request failure does not faze us, as vgacon probably has this
* region already (FIXME) */
- request_region(0x3c0, 32, "vesafb");
+ par->region = request_region(0x3c0, 32, "vesafb");
if (mtrr == 3) {
unsigned int temp_size = size_total;
@@ -439,7 +440,7 @@ static int vesafb_probe(struct platform_device *dev)
"vesafb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
vesafb_fix.smem_len, vesafb_fix.smem_start);
err = -EIO;
- goto err;
+ goto err_release_region;
}
printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, "
@@ -458,19 +459,22 @@ static int vesafb_probe(struct platform_device *dev)
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENOMEM;
- goto err;
+ goto err_release_region;
}
if (register_framebuffer(info)<0) {
err = -EINVAL;
fb_dealloc_cmap(&info->cmap);
- goto err;
+ goto err_release_region;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
-err:
+err_release_region:
arch_phys_wc_del(par->wc_cookie);
if (info->screen_base)
iounmap(info->screen_base);
+ if (par->region)
+ release_region(0x3c0, 32);
+err:
framebuffer_release(info);
release_mem_region(vesafb_fix.smem_start, size_total);
return err;
@@ -481,6 +485,8 @@ static int vesafb_remove(struct platform_device *pdev)
struct fb_info *info = platform_get_drvdata(pdev);
unregister_framebuffer(info);
+ if (((struct vesafb_par *)(info->par))->region)
+ release_region(0x3c0, 32);
framebuffer_release(info);
return 0;
diff --git a/drivers/video/fbdev/via/debug.h b/drivers/video/fbdev/via/debug.h
index 6a320bd76936..80fdfe4171c5 100644
--- a/drivers/video/fbdev/via/debug.h
+++ b/drivers/video/fbdev/via/debug.h
@@ -7,6 +7,8 @@
#ifndef __DEBUG_H__
#define __DEBUG_H__
+#include <linux/printk.h>
+
#ifndef VIAFB_DEBUG
#define VIAFB_DEBUG 0
#endif
@@ -14,14 +16,14 @@
#if VIAFB_DEBUG
#define DEBUG_MSG(f, a...) printk(f, ## a)
#else
-#define DEBUG_MSG(f, a...)
+#define DEBUG_MSG(f, a...) no_printk(f, ## a)
#endif
#define VIAFB_WARN 0
#if VIAFB_WARN
#define WARN_MSG(f, a...) printk(f, ## a)
#else
-#define WARN_MSG(f, a...)
+#define WARN_MSG(f, a...) no_printk(f, ## a)
#endif
#endif /* __DEBUG_H__ */
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index 852673c40a2f..22deb340a048 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1144,7 +1144,7 @@ static ssize_t viafb_dvp0_proc_write(struct file *file,
if (value != NULL) {
if (kstrtou8(value, 0, &reg_val) < 0)
return -EINVAL;
- DEBUG_MSG(KERN_INFO "DVP0:reg_val[%l]=:%x\n", i,
+ DEBUG_MSG(KERN_INFO "DVP0:reg_val[%lu]=:%x\n", i,
reg_val);
switch (i) {
case 0:
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index f744479dc7df..c61476247ba8 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -230,6 +230,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
for (i = 0; i < 256; i++)
vt8500lcd_setcolreg(i, 0, 0, 0, 0, info);
+ fallthrough;
case FB_BLANK_UNBLANK:
if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index 2d6e2738b792..d96ab28f8ce4 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -588,6 +588,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
memsize=par->mach->mem->size;
memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
vfree(par->saved_extmem);
+ par->saved_extmem = NULL;
}
if (par->saved_intmem) {
memsize=MEM_INT_SIZE;
@@ -596,6 +597,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
else
memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
vfree(par->saved_intmem);
+ par->saved_intmem = NULL;
}
}
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 856a8c4e84a2..e70792b3e367 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -1768,20 +1768,21 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
}
/**
- * hdmi_drm_infoframe_unpack() - unpack binary buffer to a HDMI DRM infoframe
+ * hdmi_drm_infoframe_unpack_only() - unpack binary buffer of CTA-861-G DRM
+ * infoframe DataBytes to a HDMI DRM
+ * infoframe
* @frame: HDMI DRM infoframe
* @buffer: source buffer
* @size: size of buffer
*
- * Unpacks the information contained in binary @buffer into a structured
- * @frame of the HDMI Dynamic Range and Mastering (DRM) information frame.
- * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
- * specification.
+ * Unpacks CTA-861-G DRM infoframe DataBytes contained in the binary @buffer
+ * into a structured @frame of the HDMI Dynamic Range and Mastering (DRM)
+ * infoframe.
*
* Returns 0 on success or a negative error code on failure.
*/
-static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
- const void *buffer, size_t size)
+int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame,
+ const void *buffer, size_t size)
{
const u8 *ptr = buffer;
const u8 *temp;
@@ -1790,23 +1791,13 @@ static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
int ret;
int i;
- if (size < HDMI_INFOFRAME_SIZE(DRM))
- return -EINVAL;
-
- if (ptr[0] != HDMI_INFOFRAME_TYPE_DRM ||
- ptr[1] != 1 ||
- ptr[2] != HDMI_DRM_INFOFRAME_SIZE)
- return -EINVAL;
-
- if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(DRM)) != 0)
+ if (size < HDMI_DRM_INFOFRAME_SIZE)
return -EINVAL;
ret = hdmi_drm_infoframe_init(frame);
if (ret)
return ret;
- ptr += HDMI_INFOFRAME_HEADER_SIZE;
-
frame->eotf = ptr[0] & 0x7;
frame->metadata_type = ptr[1] & 0x7;
@@ -1814,7 +1805,7 @@ static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
for (i = 0; i < 3; i++) {
x_lsb = *temp++;
x_msb = *temp++;
- frame->display_primaries[i].x = (x_msb << 8) | x_lsb;
+ frame->display_primaries[i].x = (x_msb << 8) | x_lsb;
y_lsb = *temp++;
y_msb = *temp++;
frame->display_primaries[i].y = (y_msb << 8) | y_lsb;
@@ -1830,6 +1821,42 @@ static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
return 0;
}
+EXPORT_SYMBOL(hdmi_drm_infoframe_unpack_only);
+
+/**
+ * hdmi_drm_infoframe_unpack() - unpack binary buffer to a HDMI DRM infoframe
+ * @frame: HDMI DRM infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks the CTA-861-G DRM infoframe contained in the binary @buffer into
+ * a structured @frame of the HDMI Dynamic Range and Mastering (DRM)
+ * infoframe. It also verifies the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
+ const void *buffer, size_t size)
+{
+ const u8 *ptr = buffer;
+ int ret;
+
+ if (size < HDMI_INFOFRAME_SIZE(DRM))
+ return -EINVAL;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_DRM ||
+ ptr[1] != 1 ||
+ ptr[2] != HDMI_DRM_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(DRM)) != 0)
+ return -EINVAL;
+
+ ret = hdmi_drm_infoframe_unpack_only(frame, ptr + HDMI_INFOFRAME_HEADER_SIZE,
+ size - HDMI_INFOFRAME_HEADER_SIZE);
+ return ret;
+}
/**
* hdmi_infoframe_unpack() - unpack binary buffer to a HDMI infoframe
diff --git a/drivers/visorbus/controlvmchannel.h b/drivers/visorbus/controlvmchannel.h
index 8c57562a070a..c87213554427 100644
--- a/drivers/visorbus/controlvmchannel.h
+++ b/drivers/visorbus/controlvmchannel.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
diff --git a/drivers/visorbus/vbuschannel.h b/drivers/visorbus/vbuschannel.h
index b1dce26166bf..4aaf6564eb9f 100644
--- a/drivers/visorbus/vbuschannel.h
+++ b/drivers/visorbus/vbuschannel.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
diff --git a/drivers/visorbus/visorbus_private.h b/drivers/visorbus/visorbus_private.h
index 366380b7f8d9..6956de605827 100644
--- a/drivers/visorbus/visorbus_private.h
+++ b/drivers/visorbus/visorbus_private.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index aa09f8527776..bf2ec59c1f9d 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -54,10 +54,10 @@ MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
struct hdq_data {
struct device *dev;
void __iomem *hdq_base;
- /* lock status update */
+ /* lock read/write/break operations */
struct mutex hdq_mutex;
+ /* interrupt status and a lock for it */
u8 hdq_irqstatus;
- /* device lock */
spinlock_t hdq_spinlock;
/* mode: 0-HDQ 1-W1 */
int mode;
@@ -120,13 +120,18 @@ static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
}
/* Clear saved irqstatus after using an interrupt */
-static void hdq_reset_irqstatus(struct hdq_data *hdq_data)
+static u8 hdq_reset_irqstatus(struct hdq_data *hdq_data, u8 bits)
{
unsigned long irqflags;
+ u8 status;
spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
- hdq_data->hdq_irqstatus = 0;
+ status = hdq_data->hdq_irqstatus;
+ /* this is a read-modify-write */
+ hdq_data->hdq_irqstatus &= ~bits;
spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
+
+ return status;
}
/* write out a byte and fill *status with HDQ_INT_STATUS */
@@ -135,6 +140,16 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
int ret;
u8 tmp_status;
+ ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
+ if (ret < 0) {
+ ret = -EINTR;
+ goto rtn;
+ }
+
+ if (hdq_data->hdq_irqstatus)
+ dev_err(hdq_data->dev, "TX irqstatus not cleared (%02x)\n",
+ hdq_data->hdq_irqstatus);
+
*status = 0;
hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
@@ -144,18 +159,19 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
/* wait for the TXCOMPLETE bit */
ret = wait_event_timeout(hdq_wait_queue,
- hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
+ (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
+ OMAP_HDQ_TIMEOUT);
+ *status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
if (ret == 0) {
dev_dbg(hdq_data->dev, "TX wait elapsed\n");
ret = -ETIMEDOUT;
goto out;
}
- *status = hdq_data->hdq_irqstatus;
/* check irqstatus */
if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
dev_dbg(hdq_data->dev, "timeout waiting for"
- " TXCOMPLETE/RXCOMPLETE, %x", *status);
+ " TXCOMPLETE/RXCOMPLETE, %x\n", *status);
ret = -ETIMEDOUT;
goto out;
}
@@ -166,11 +182,12 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
OMAP_HDQ_FLAG_CLEAR, &tmp_status);
if (ret) {
dev_dbg(hdq_data->dev, "timeout waiting GO bit"
- " return to zero, %x", tmp_status);
+ " return to zero, %x\n", tmp_status);
}
out:
- hdq_reset_irqstatus(hdq_data);
+ mutex_unlock(&hdq_data->hdq_mutex);
+rtn:
return ret;
}
@@ -181,9 +198,9 @@ static irqreturn_t hdq_isr(int irq, void *_hdq)
unsigned long irqflags;
spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
- hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
+ hdq_data->hdq_irqstatus |= hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
- dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
+ dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
if (hdq_data->hdq_irqstatus &
(OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
@@ -230,6 +247,10 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
goto rtn;
}
+ if (hdq_data->hdq_irqstatus)
+ dev_err(hdq_data->dev, "break irqstatus not cleared (%02x)\n",
+ hdq_data->hdq_irqstatus);
+
/* set the INIT and GO bit */
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
@@ -238,18 +259,19 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
/* wait for the TIMEOUT bit */
ret = wait_event_timeout(hdq_wait_queue,
- hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
+ (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TIMEOUT),
+ OMAP_HDQ_TIMEOUT);
+ tmp_status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TIMEOUT);
if (ret == 0) {
dev_dbg(hdq_data->dev, "break wait elapsed\n");
ret = -EINTR;
goto out;
}
- tmp_status = hdq_data->hdq_irqstatus;
/* check irqstatus */
if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
- dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
- tmp_status);
+ dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
+ tmp_status);
ret = -ETIMEDOUT;
goto out;
}
@@ -275,10 +297,9 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
&tmp_status);
if (ret)
dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
- " return to zero, %x", tmp_status);
+ " return to zero, %x\n", tmp_status);
out:
- hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
@@ -309,12 +330,15 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
*/
wait_event_timeout(hdq_wait_queue,
(hdq_data->hdq_irqstatus
- & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
+ & (OMAP_HDQ_INT_STATUS_RXCOMPLETE |
+ OMAP_HDQ_INT_STATUS_TIMEOUT)),
OMAP_HDQ_TIMEOUT);
-
+ status = hdq_reset_irqstatus(hdq_data,
+ OMAP_HDQ_INT_STATUS_RXCOMPLETE |
+ OMAP_HDQ_INT_STATUS_TIMEOUT);
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
OMAP_HDQ_CTRL_STATUS_DIR);
- status = hdq_data->hdq_irqstatus;
+
/* check irqstatus */
if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
dev_dbg(hdq_data->dev, "timeout waiting for"
@@ -322,11 +346,12 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
ret = -ETIMEDOUT;
goto out;
}
+ } else { /* interrupt had occurred before hdq_read_byte was called */
+ hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
}
/* the data is ready. Read it in! */
*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
out:
- hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
@@ -367,15 +392,15 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
(hdq_data->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_RXCOMPLETE),
OMAP_HDQ_TIMEOUT);
+ /* Must clear irqstatus for another RXCOMPLETE interrupt */
+ hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
+
if (err == 0) {
dev_dbg(hdq_data->dev, "RX wait elapsed\n");
goto out;
}
id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
- /* Must clear irqstatus for another RXCOMPLETE interrupt */
- hdq_reset_irqstatus(hdq_data);
-
/* read comp_bit */
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
@@ -383,6 +408,9 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
(hdq_data->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_RXCOMPLETE),
OMAP_HDQ_TIMEOUT);
+ /* Must clear irqstatus for another RXCOMPLETE interrupt */
+ hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
+
if (err == 0) {
dev_dbg(hdq_data->dev, "RX wait elapsed\n");
goto out;
@@ -409,6 +437,9 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
(hdq_data->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_TXCOMPLETE),
OMAP_HDQ_TIMEOUT);
+ /* Must clear irqstatus for another TXCOMPLETE interrupt */
+ hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
+
if (err == 0) {
dev_dbg(hdq_data->dev, "TX wait elapsed\n");
goto out;
@@ -418,7 +449,6 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
OMAP_HDQ_CTRL_STATUS_SINGLE);
out:
- hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
pm_runtime_mark_last_busy(hdq_data->dev);
@@ -464,7 +494,7 @@ static u8 omap_w1_read_byte(void *_hdq)
ret = hdq_read_byte(hdq_data, &val);
if (ret)
- ret = -1;
+ val = -1;
pm_runtime_mark_last_busy(hdq_data->dev);
pm_runtime_put_autosuspend(hdq_data->dev);
diff --git a/drivers/w1/slaves/w1_ds2430.c b/drivers/w1/slaves/w1_ds2430.c
index 6fb0563fb2ae..75bb8a88620b 100644
--- a/drivers/w1/slaves/w1_ds2430.c
+++ b/drivers/w1/slaves/w1_ds2430.c
@@ -290,6 +290,6 @@ static struct w1_family w1_family_14 = {
module_w1_family(w1_family_14);
MODULE_AUTHOR("Angelo Dureghello <angelo.dureghello@timesys.com>");
-MODULE_DESCRIPTION("w1 family 14 driver for DS2430, 256kb EEPROM");
+MODULE_DESCRIPTION("w1 family 14 driver for DS2430, 256b EEPROM");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2430));
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index e028e0092799..c1b4eda16719 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
+#include <linux/string.h>
#include <linux/w1.h>
@@ -25,7 +26,8 @@
#define W1_THERM_DS1825 0x3B
#define W1_THERM_DS28EA00 0x42
-/* Allow the strong pullup to be disabled, but default to enabled.
+/*
+ * Allow the strong pullup to be disabled, but default to enabled.
* If it was disabled a parasite powered device might not get the require
* current to do a temperature conversion. If it is enabled parasite powered
* devices have a better chance of getting the current required.
@@ -41,42 +43,211 @@
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
+/* Counter for devices supporting bulk reading */
+static u16 bulk_read_device_counter; /* =0 as per C standard */
+
+/* This command should be in public header w1.h but is not */
+#define W1_RECALL_EEPROM 0xB8
+
+/* Nb of try for an operation */
+#define W1_THERM_MAX_TRY 5
+
+/* ms delay to retry bus mutex */
+#define W1_THERM_RETRY_DELAY 20
+
+/* delay in ms to write in EEPROM */
+#define W1_THERM_EEPROM_WRITE_DELAY 10
+
+#define EEPROM_CMD_WRITE "save" /* cmd for write eeprom sysfs */
+#define EEPROM_CMD_READ "restore" /* cmd for read eeprom sysfs */
+#define BULK_TRIGGER_CMD "trigger" /* cmd to trigger a bulk read */
+
+#define MIN_TEMP -55 /* min temperature that can be mesured */
+#define MAX_TEMP 125 /* max temperature that can be mesured */
+
+/* Helpers Macros */
+
+/*
+ * return a pointer on the slave w1_therm_family_converter struct:
+ * always test family data existence before using this macro
+ */
+#define SLAVE_SPECIFIC_FUNC(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->specific_functions)
+
+/*
+ * return the power mode of the sl slave : 1-ext, 0-parasite, <0 unknown
+ * always test family data existence before using this macro
+ */
+#define SLAVE_POWERMODE(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->external_powered)
+
+/*
+ * return the resolution in bit of the sl slave : <0 unknown
+ * always test family data existence before using this macro
+ */
+#define SLAVE_RESOLUTION(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->resolution)
+
+/*
+ * return whether or not a converT command has been issued to the slave
+ * * 0: no bulk read is pending
+ * * -1: conversion is in progress
+ * * 1: conversion done, result to be read
+ */
+#define SLAVE_CONVERT_TRIGGERED(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->convert_triggered)
+
+/* return the address of the refcnt in the family data */
+#define THERM_REFCNT(family_data) \
+ (&((struct w1_therm_family_data *)family_data)->refcnt)
+
+/* Structs definition */
+
+/**
+ * struct w1_therm_family_converter - bind device specific functions
+ * @broken: flag for non-registred families
+ * @reserved: not used here
+ * @f: pointer to the device binding structure
+ * @convert: pointer to the device conversion function
+ * @get_conversion_time: pointer to the device conversion time function
+ * @set_resolution: pointer to the device set_resolution function
+ * @get_resolution: pointer to the device get_resolution function
+ * @write_data: pointer to the device writing function (2 or 3 bytes)
+ * @bulk_read: true if device family support bulk read, false otherwise
+ */
+struct w1_therm_family_converter {
+ u8 broken;
+ u16 reserved;
+ struct w1_family *f;
+ int (*convert)(u8 rom[9]);
+ int (*get_conversion_time)(struct w1_slave *sl);
+ int (*set_resolution)(struct w1_slave *sl, int val);
+ int (*get_resolution)(struct w1_slave *sl);
+ int (*write_data)(struct w1_slave *sl, const u8 *data);
+ bool bulk_read;
+};
+
+/**
+ * struct w1_therm_family_data - device data
+ * @rom: ROM device id (64bit Lasered ROM code + 1 CRC byte)
+ * @refcnt: ref count
+ * @external_powered: 1 device powered externally,
+ * 0 device parasite powered,
+ * -x error or undefined
+ * @resolution: current device resolution
+ * @convert_triggered: conversion state of the device
+ * @specific_functions: pointer to struct of device specific function
+ */
struct w1_therm_family_data {
uint8_t rom[9];
atomic_t refcnt;
+ int external_powered;
+ int resolution;
+ int convert_triggered;
+ struct w1_therm_family_converter *specific_functions;
};
+/**
+ * struct therm_info - store temperature reading
+ * @rom: read device data (8 data bytes + 1 CRC byte)
+ * @crc: computed crc from rom
+ * @verdict: 1 crc checked, 0 crc not matching
+ */
struct therm_info {
u8 rom[9];
u8 crc;
u8 verdict;
};
-/* return the address of the refcnt in the family data */
-#define THERM_REFCNT(family_data) \
- (&((struct w1_therm_family_data *)family_data)->refcnt)
+/* Hardware Functions declaration */
-static int w1_therm_add_slave(struct w1_slave *sl)
-{
- sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
- GFP_KERNEL);
- if (!sl->family_data)
- return -ENOMEM;
- atomic_set(THERM_REFCNT(sl->family_data), 1);
- return 0;
-}
+/**
+ * reset_select_slave() - reset and select a slave
+ * @sl: the slave to select
+ *
+ * Resets the bus and select the slave by sending a ROM MATCH cmd
+ * w1_reset_select_slave() from w1_io.c could not be used here because
+ * it sent a SKIP ROM command if only one device is on the line.
+ * At the beginning of the such process, sl->master->slave_count is 1 even if
+ * more devices are on the line, causing collision on the line.
+ *
+ * Context: The w1 master lock must be held.
+ *
+ * Return: 0 if success, negative kernel error code otherwise.
+ */
+static int reset_select_slave(struct w1_slave *sl);
-static void w1_therm_remove_slave(struct w1_slave *sl)
-{
- int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
+/**
+ * convert_t() - Query the device for temperature conversion and read
+ * @sl: pointer to the slave to read
+ * @info: pointer to a structure to store the read results
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int convert_t(struct w1_slave *sl, struct therm_info *info);
- while (refcnt) {
- msleep(1000);
- refcnt = atomic_read(THERM_REFCNT(sl->family_data));
- }
- kfree(sl->family_data);
- sl->family_data = NULL;
-}
+/**
+ * read_scratchpad() - read the data in device RAM
+ * @sl: pointer to the slave to read
+ * @info: pointer to a structure to store the read results
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int read_scratchpad(struct w1_slave *sl, struct therm_info *info);
+
+/**
+ * write_scratchpad() - write nb_bytes in the device RAM
+ * @sl: pointer to the slave to write in
+ * @data: pointer to an array of 3 bytes, as 3 bytes MUST be written
+ * @nb_bytes: number of bytes to be written (2 for DS18S20, 3 otherwise)
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int write_scratchpad(struct w1_slave *sl, const u8 *data, u8 nb_bytes);
+
+/**
+ * copy_scratchpad() - Copy the content of scratchpad in device EEPROM
+ * @sl: slave involved
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int copy_scratchpad(struct w1_slave *sl);
+
+/**
+ * recall_eeprom() - Restore EEPROM data to device RAM
+ * @sl: slave involved
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int recall_eeprom(struct w1_slave *sl);
+
+/**
+ * read_powermode() - Query the power mode of the slave
+ * @sl: slave to retrieve the power mode
+ *
+ * Ask the device to get its power mode (external or parasite)
+ * and store the power status in the &struct w1_therm_family_data.
+ *
+ * Return:
+ * * 0 parasite powered device
+ * * 1 externally powered device
+ * * <0 kernel error code
+ */
+static int read_powermode(struct w1_slave *sl);
+
+/**
+ * trigger_bulk_read() - function to trigger a bulk read on the bus
+ * @dev_master: the device master of the bus
+ *
+ * Send a SKIP ROM follow by a CONVERT T commmand on the bus.
+ * It also set the status flag in each slave &struct w1_therm_family_data
+ * to signal that a conversion is in progress.
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int trigger_bulk_read(struct w1_master *dev_master);
+
+/* Sysfs interface declaration */
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf);
@@ -87,21 +258,103 @@ static ssize_t w1_slave_store(struct device *device,
static ssize_t w1_seq_show(struct device *device,
struct device_attribute *attr, char *buf);
+static ssize_t temperature_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t ext_power_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t resolution_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t resolution_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size);
+
+static ssize_t eeprom_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size);
+
+static ssize_t alarms_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size);
+
+static ssize_t alarms_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t therm_bulk_read_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size);
+
+static ssize_t therm_bulk_read_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+/* Attributes declarations */
+
static DEVICE_ATTR_RW(w1_slave);
static DEVICE_ATTR_RO(w1_seq);
+static DEVICE_ATTR_RO(temperature);
+static DEVICE_ATTR_RO(ext_power);
+static DEVICE_ATTR_RW(resolution);
+static DEVICE_ATTR_WO(eeprom);
+static DEVICE_ATTR_RW(alarms);
+
+static DEVICE_ATTR_RW(therm_bulk_read); /* attribut at master level */
+
+/* Interface Functions declaration */
+
+/**
+ * w1_therm_add_slave() - Called when a new slave is discovered
+ * @sl: slave just discovered by the master.
+ *
+ * Called by the master when the slave is discovered on the bus. Used to
+ * initialize slave state before the beginning of any communication.
+ *
+ * Return: 0 - If success, negative kernel code otherwise
+ */
+static int w1_therm_add_slave(struct w1_slave *sl);
+
+/**
+ * w1_therm_remove_slave() - Called when a slave is removed
+ * @sl: slave to be removed.
+ *
+ * Called by the master when the slave is considered not to be on the bus
+ * anymore. Used to free memory.
+ */
+static void w1_therm_remove_slave(struct w1_slave *sl);
+
+/* Family attributes */
static struct attribute *w1_therm_attrs[] = {
&dev_attr_w1_slave.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_ext_power.attr,
+ &dev_attr_resolution.attr,
+ &dev_attr_eeprom.attr,
+ &dev_attr_alarms.attr,
+ NULL,
+};
+
+static struct attribute *w1_ds18s20_attrs[] = {
+ &dev_attr_w1_slave.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_ext_power.attr,
+ &dev_attr_eeprom.attr,
+ &dev_attr_alarms.attr,
NULL,
};
static struct attribute *w1_ds28ea00_attrs[] = {
&dev_attr_w1_slave.attr,
&dev_attr_w1_seq.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_ext_power.attr,
+ &dev_attr_resolution.attr,
+ &dev_attr_eeprom.attr,
+ &dev_attr_alarms.attr,
NULL,
};
+/* Attribute groups */
+
ATTRIBUTE_GROUPS(w1_therm);
+ATTRIBUTE_GROUPS(w1_ds18s20);
ATTRIBUTE_GROUPS(w1_ds28ea00);
#if IS_REACHABLE(CONFIG_HWMON)
@@ -154,6 +407,8 @@ static const struct hwmon_chip_info w1_chip_info = {
#define W1_CHIPINFO NULL
#endif
+/* Family operations */
+
static struct w1_family_ops w1_therm_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
@@ -161,6 +416,13 @@ static struct w1_family_ops w1_therm_fops = {
.chip_info = W1_CHIPINFO,
};
+static struct w1_family_ops w1_ds18s20_fops = {
+ .add_slave = w1_therm_add_slave,
+ .remove_slave = w1_therm_remove_slave,
+ .groups = w1_ds18s20_groups,
+ .chip_info = W1_CHIPINFO,
+};
+
static struct w1_family_ops w1_ds28ea00_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
@@ -168,9 +430,11 @@ static struct w1_family_ops w1_ds28ea00_fops = {
.chip_info = W1_CHIPINFO,
};
+/* Family binding operations struct */
+
static struct w1_family w1_therm_family_DS18S20 = {
.fid = W1_THERM_DS18S20,
- .fops = &w1_therm_fops,
+ .fops = &w1_ds18s20_fops,
};
static struct w1_family w1_therm_family_DS18B20 = {
@@ -193,377 +457,813 @@ static struct w1_family w1_therm_family_DS1825 = {
.fops = &w1_therm_fops,
};
-struct w1_therm_family_converter {
- u8 broken;
- u16 reserved;
- struct w1_family *f;
- int (*convert)(u8 rom[9]);
- int (*precision)(struct device *device, int val);
- int (*eeprom)(struct device *device);
-};
+/* Device dependent func */
+
+static inline int w1_DS18B20_convert_time(struct w1_slave *sl)
+{
+ int ret;
+
+ if (!sl->family_data)
+ return -ENODEV; /* device unknown */
+
+ /* return time in ms for conversion operation */
+ switch (SLAVE_RESOLUTION(sl)) {
+ case 9:
+ ret = 95;
+ break;
+ case 10:
+ ret = 190;
+ break;
+ case 11:
+ ret = 375;
+ break;
+ case 12:
+ default:
+ ret = 750;
+ }
+ return ret;
+}
+
+static inline int w1_DS18S20_convert_time(struct w1_slave *sl)
+{
+ (void)(sl);
+ return 750; /* always 750ms for DS18S20 */
+}
+
+static inline int w1_DS18B20_write_data(struct w1_slave *sl,
+ const u8 *data)
+{
+ return write_scratchpad(sl, data, 3);
+}
+
+static inline int w1_DS18S20_write_data(struct w1_slave *sl,
+ const u8 *data)
+{
+ /* No config register */
+ return write_scratchpad(sl, data, 2);
+}
+
+static inline int w1_DS18B20_set_resolution(struct w1_slave *sl, int val)
+{
+ int ret;
+ u8 new_config_register[3]; /* array of data to be written */
+ struct therm_info info;
+
+ /* resolution of DS18B20 is in the range [9..12] bits */
+ if (val < 9 || val > 12)
+ return -EINVAL;
+
+ val -= 9; /* soustract 9 the lowest resolution in bit */
+ val = (val << 5); /* shift to position bit 5 & bit 6 */
+
+ /*
+ * Read the scratchpad to change only the required bits
+ * (bit5 & bit 6 from byte 4)
+ */
+ ret = read_scratchpad(sl, &info);
+ if (!ret) {
+ new_config_register[0] = info.rom[2];
+ new_config_register[1] = info.rom[3];
+ /* config register is byte 4 & mask 0b10011111*/
+ new_config_register[2] = (info.rom[4] & 0x9F) |
+ (u8) val;
+ } else
+ return ret;
+
+ /* Write data in the device RAM */
+ ret = w1_DS18B20_write_data(sl, new_config_register);
+
+ return ret;
+}
+
+static inline int w1_DS18B20_get_resolution(struct w1_slave *sl)
+{
+ int ret;
+ u8 config_register;
+ struct therm_info info;
+
+ ret = read_scratchpad(sl, &info);
+
+ if (!ret) {
+ config_register = info.rom[4]; /* config register is byte 4 */
+ config_register &= 0x60; /* 0b01100000 keep only bit 5 & 6 */
+ config_register = (config_register >> 5); /* shift */
+ config_register += 9; /* add 9 the lowest resolution in bit */
+ ret = (int) config_register;
+ }
+ return ret;
+}
-/* write configuration to eeprom */
-static inline int w1_therm_eeprom(struct device *device);
+/**
+ * w1_DS18B20_convert_temp() - temperature computation for DS18B20
+ * @rom: data read from device RAM (8 data bytes + 1 CRC byte)
+ *
+ * Can be called for any DS18B20 compliant device.
+ *
+ * Return: value in millidegrees Celsius.
+ */
+static inline int w1_DS18B20_convert_temp(u8 rom[9])
+{
+ s16 t = le16_to_cpup((__le16 *)rom);
-/* Set precision for conversion */
-static inline int w1_DS18B20_precision(struct device *device, int val);
-static inline int w1_DS18S20_precision(struct device *device, int val);
+ return t*1000/16;
+}
-/* The return value is millidegrees Centigrade. */
-static inline int w1_DS18B20_convert_temp(u8 rom[9]);
-static inline int w1_DS18S20_convert_temp(u8 rom[9]);
+/**
+ * w1_DS18S20_convert_temp() - temperature computation for DS18S20
+ * @rom: data read from device RAM (8 data bytes + 1 CRC byte)
+ *
+ * Can be called for any DS18S20 compliant device.
+ *
+ * Return: value in millidegrees Celsius.
+ */
+static inline int w1_DS18S20_convert_temp(u8 rom[9])
+{
+ int t, h;
+
+ if (!rom[7]) {
+ pr_debug("%s: Invalid argument for conversion\n", __func__);
+ return 0;
+ }
+
+ if (rom[1] == 0)
+ t = ((s32)rom[0] >> 1)*1000;
+ else
+ t = 1000*(-1*(s32)(0x100-rom[0]) >> 1);
+
+ t -= 250;
+ h = 1000*((s32)rom[7] - (s32)rom[6]);
+ h /= (s32)rom[7];
+ t += h;
+
+ return t;
+}
+
+/* Device capability description */
static struct w1_therm_family_converter w1_therm_families[] = {
{
- .f = &w1_therm_family_DS18S20,
- .convert = w1_DS18S20_convert_temp,
- .precision = w1_DS18S20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS18S20,
+ .convert = w1_DS18S20_convert_temp,
+ .get_conversion_time = w1_DS18S20_convert_time,
+ .set_resolution = NULL, /* no config register */
+ .get_resolution = NULL, /* no config register */
+ .write_data = w1_DS18S20_write_data,
+ .bulk_read = true
},
{
- .f = &w1_therm_family_DS1822,
- .convert = w1_DS18B20_convert_temp,
- .precision = w1_DS18S20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS1822,
+ .convert = w1_DS18B20_convert_temp,
+ .get_conversion_time = w1_DS18B20_convert_time,
+ .set_resolution = w1_DS18B20_set_resolution,
+ .get_resolution = w1_DS18B20_get_resolution,
+ .write_data = w1_DS18B20_write_data,
+ .bulk_read = true
},
{
- .f = &w1_therm_family_DS18B20,
- .convert = w1_DS18B20_convert_temp,
- .precision = w1_DS18B20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS18B20,
+ .convert = w1_DS18B20_convert_temp,
+ .get_conversion_time = w1_DS18B20_convert_time,
+ .set_resolution = w1_DS18B20_set_resolution,
+ .get_resolution = w1_DS18B20_get_resolution,
+ .write_data = w1_DS18B20_write_data,
+ .bulk_read = true
},
{
- .f = &w1_therm_family_DS28EA00,
- .convert = w1_DS18B20_convert_temp,
- .precision = w1_DS18S20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS28EA00,
+ .convert = w1_DS18B20_convert_temp,
+ .get_conversion_time = w1_DS18B20_convert_time,
+ .set_resolution = w1_DS18B20_set_resolution,
+ .get_resolution = w1_DS18B20_get_resolution,
+ .write_data = w1_DS18B20_write_data,
+ .bulk_read = false
},
{
- .f = &w1_therm_family_DS1825,
- .convert = w1_DS18B20_convert_temp,
- .precision = w1_DS18S20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS1825,
+ .convert = w1_DS18B20_convert_temp,
+ .get_conversion_time = w1_DS18B20_convert_time,
+ .set_resolution = w1_DS18B20_set_resolution,
+ .get_resolution = w1_DS18B20_get_resolution,
+ .write_data = w1_DS18B20_write_data,
+ .bulk_read = true
}
};
-static inline int w1_therm_eeprom(struct device *device)
+/* Helpers Functions */
+
+/**
+ * device_family() - Retrieve a pointer on &struct w1_therm_family_converter
+ * @sl: slave to retrieve the device specific structure
+ *
+ * Return: pointer to the slaves's family converter, NULL if not known
+ */
+static struct w1_therm_family_converter *device_family(struct w1_slave *sl)
{
- struct w1_slave *sl = dev_to_w1_slave(device);
- struct w1_master *dev = sl->master;
- u8 rom[9], external_power;
- int ret, max_trying = 10;
- u8 *family_data = sl->family_data;
+ struct w1_therm_family_converter *ret = NULL;
+ int i;
- if (!sl->family_data) {
- ret = -ENODEV;
- goto error;
+ for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) {
+ if (w1_therm_families[i].f->fid == sl->family->fid) {
+ ret = &w1_therm_families[i];
+ break;
+ }
}
+ return ret;
+}
- /* prevent the slave from going away in sleep */
- atomic_inc(THERM_REFCNT(family_data));
+/**
+ * bus_mutex_lock() - Acquire the mutex
+ * @lock: w1 bus mutex to acquire
+ *
+ * It try to acquire the mutex W1_THERM_MAX_TRY times and wait
+ * W1_THERM_RETRY_DELAY between 2 attempts.
+ *
+ * Return: true is mutex is acquired and lock, false otherwise
+ */
+static inline bool bus_mutex_lock(struct mutex *lock)
+{
+ int max_trying = W1_THERM_MAX_TRY;
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto dec_refcnt;
+ /* try to acquire the mutex, if not, sleep retry_delay before retry) */
+ while (mutex_lock_interruptible(lock) != 0 && max_trying > 0) {
+ unsigned long sleep_rem;
- memset(rom, 0, sizeof(rom));
+ sleep_rem = msleep_interruptible(W1_THERM_RETRY_DELAY);
+ if (!sleep_rem)
+ max_trying--;
+ }
- while (max_trying--) {
- if (!w1_reset_select_slave(sl)) {
- unsigned int tm = 10;
- unsigned long sleep_rem;
+ if (!max_trying)
+ return false; /* Didn't acquire the bus mutex */
+
+ return true;
+}
- /* check if in parasite mode */
- w1_write_8(dev, W1_READ_PSUPPLY);
- external_power = w1_read_8(dev);
+/**
+ * support_bulk_read() - check if slave support bulk read
+ * @sl: device to check the ability
+ *
+ * Return: true if bulk read is supported, false if not or error
+ */
+static inline bool bulk_read_support(struct w1_slave *sl)
+{
+ if (SLAVE_SPECIFIC_FUNC(sl))
+ return SLAVE_SPECIFIC_FUNC(sl)->bulk_read;
+
+ dev_info(&sl->dev,
+ "%s: Device not supported by the driver\n", __func__);
+
+ return false; /* No device family */
+}
+
+/**
+ * conversion_time() - get the Tconv for the slave
+ * @sl: device to get the conversion time
+ *
+ * On device supporting resolution settings, conversion time depend
+ * on the resolution setting. This helper function get the slave timing,
+ * depending on its current setting.
+ *
+ * Return: conversion time in ms, negative values are kernel error code
+ */
+static inline int conversion_time(struct w1_slave *sl)
+{
+ if (SLAVE_SPECIFIC_FUNC(sl))
+ return SLAVE_SPECIFIC_FUNC(sl)->get_conversion_time(sl);
+
+ dev_info(&sl->dev,
+ "%s: Device not supported by the driver\n", __func__);
+
+ return -ENODEV; /* No device family */
+}
+
+/**
+ * temperature_from_RAM() - Convert the read info to temperature
+ * @sl: device that sent the RAM data
+ * @rom: read value on the slave device RAM
+ *
+ * Device dependent, the function bind the correct computation method.
+ *
+ * Return: temperature in 1/1000degC, 0 on error.
+ */
+static inline int temperature_from_RAM(struct w1_slave *sl, u8 rom[9])
+{
+ if (SLAVE_SPECIFIC_FUNC(sl))
+ return SLAVE_SPECIFIC_FUNC(sl)->convert(rom);
- if (w1_reset_select_slave(sl))
- continue;
+ dev_info(&sl->dev,
+ "%s: Device not supported by the driver\n", __func__);
- /* 10ms strong pullup/delay after the copy command */
- if (w1_strong_pullup == 2 ||
- (!external_power && w1_strong_pullup))
- w1_next_pullup(dev, tm);
+ return 0; /* No device family */
+}
+
+/**
+ * int_to_short() - Safe casting of int to short
+ *
+ * @i: integer to be converted to short
+ *
+ * Device register use 1 byte to store signed integer.
+ * This helper function convert the int in a signed short,
+ * using the min/max values that device can measure as limits.
+ * min/max values are defined by macro.
+ *
+ * Return: a short in the range of min/max value
+ */
+static inline s8 int_to_short(int i)
+{
+ /* Prepare to cast to short by eliminating out of range values */
+ i = i > MAX_TEMP ? MAX_TEMP : i;
+ i = i < MIN_TEMP ? MIN_TEMP : i;
+ return (s8) i;
+}
- w1_write_8(dev, W1_COPY_SCRATCHPAD);
+/* Interface Functions */
- if (external_power) {
- mutex_unlock(&dev->bus_mutex);
+static int w1_therm_add_slave(struct w1_slave *sl)
+{
+ struct w1_therm_family_converter *sl_family_conv;
+
+ /* Allocate memory */
+ sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
+ GFP_KERNEL);
+ if (!sl->family_data)
+ return -ENOMEM;
+
+ atomic_set(THERM_REFCNT(sl->family_data), 1);
+
+ /* Get a pointer to the device specific function struct */
+ sl_family_conv = device_family(sl);
+ if (!sl_family_conv) {
+ kfree(sl->family_data);
+ return -ENODEV;
+ }
+ /* save this pointer to the device structure */
+ SLAVE_SPECIFIC_FUNC(sl) = sl_family_conv;
+
+ if (bulk_read_support(sl)) {
+ /*
+ * add the sys entry to trigger bulk_read
+ * at master level only the 1st time
+ */
+ if (!bulk_read_device_counter) {
+ int err = device_create_file(&sl->master->dev,
+ &dev_attr_therm_bulk_read);
+
+ if (err)
+ dev_warn(&sl->dev,
+ "%s: Device has been added, but bulk read is unavailable. err=%d\n",
+ __func__, err);
+ }
+ /* Increment the counter */
+ bulk_read_device_counter++;
+ }
+
+ /* Getting the power mode of the device {external, parasite} */
+ SLAVE_POWERMODE(sl) = read_powermode(sl);
+
+ if (SLAVE_POWERMODE(sl) < 0) {
+ /* no error returned as device has been added */
+ dev_warn(&sl->dev,
+ "%s: Device has been added, but power_mode may be corrupted. err=%d\n",
+ __func__, SLAVE_POWERMODE(sl));
+ }
+
+ /* Getting the resolution of the device */
+ if (SLAVE_SPECIFIC_FUNC(sl)->get_resolution) {
+ SLAVE_RESOLUTION(sl) =
+ SLAVE_SPECIFIC_FUNC(sl)->get_resolution(sl);
+ if (SLAVE_RESOLUTION(sl) < 0) {
+ /* no error returned as device has been added */
+ dev_warn(&sl->dev,
+ "%s:Device has been added, but resolution may be corrupted. err=%d\n",
+ __func__, SLAVE_RESOLUTION(sl));
+ }
+ }
+
+ /* Finally initialize convert_triggered flag */
+ SLAVE_CONVERT_TRIGGERED(sl) = 0;
+
+ return 0;
+}
+
+static void w1_therm_remove_slave(struct w1_slave *sl)
+{
+ int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
+
+ if (bulk_read_support(sl)) {
+ bulk_read_device_counter--;
+ /* Delete the entry if no more device support the feature */
+ if (!bulk_read_device_counter)
+ device_remove_file(&sl->master->dev,
+ &dev_attr_therm_bulk_read);
+ }
+
+ while (refcnt) {
+ msleep(1000);
+ refcnt = atomic_read(THERM_REFCNT(sl->family_data));
+ }
+ kfree(sl->family_data);
+ sl->family_data = NULL;
+}
+
+/* Hardware Functions */
+
+/* Safe version of reset_select_slave - avoid using the one in w_io.c */
+static int reset_select_slave(struct w1_slave *sl)
+{
+ u8 match[9] = { W1_MATCH_ROM, };
+ u64 rn = le64_to_cpu(*((u64 *)&sl->reg_num));
+
+ if (w1_reset_bus(sl->master))
+ return -ENODEV;
+
+ memcpy(&match[1], &rn, 8);
+ w1_write_block(sl->master, match, 9);
+
+ return 0;
+}
+
+static int convert_t(struct w1_slave *sl, struct therm_info *info)
+{
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int t_conv;
+ int ret = -ENODEV;
+ bool strong_pullup;
- sleep_rem = msleep_interruptible(tm);
+ if (!sl->family_data)
+ goto error;
+
+ strong_pullup = (w1_strong_pullup == 2 ||
+ (!SLAVE_POWERMODE(sl) &&
+ w1_strong_pullup));
+
+ /* get conversion duration device and id dependent */
+ t_conv = conversion_time(sl);
+
+ memset(info->rom, 0, sizeof(info->rom));
+
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(sl->family_data));
+
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto dec_refcnt;
+ }
+
+ while (max_trying-- && ret) { /* ret should be 0 */
+
+ info->verdict = 0;
+ info->crc = 0;
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ unsigned long sleep_rem;
+
+ /* 750ms strong pullup (or delay) after the convert */
+ if (strong_pullup)
+ w1_next_pullup(dev_master, t_conv);
+
+ w1_write_8(dev_master, W1_CONVERT_TEMP);
+
+ if (strong_pullup) { /*some device need pullup */
+ sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
- goto dec_refcnt;
+ goto mt_unlock;
}
+ mutex_unlock(&dev_master->bus_mutex);
+ } else { /*no device need pullup */
+ mutex_unlock(&dev_master->bus_mutex);
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto dec_refcnt;
- } else if (!w1_strong_pullup) {
- sleep_rem = msleep_interruptible(tm);
+ sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
- goto mt_unlock;
+ goto dec_refcnt;
}
}
-
- break;
+ ret = read_scratchpad(sl, info);
+ goto dec_refcnt;
}
+
}
mt_unlock:
- mutex_unlock(&dev->bus_mutex);
+ mutex_unlock(&dev_master->bus_mutex);
dec_refcnt:
- atomic_dec(THERM_REFCNT(family_data));
+ atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
-/* DS18S20 does not feature configuration register */
-static inline int w1_DS18S20_precision(struct device *device, int val)
+static int read_scratchpad(struct w1_slave *sl, struct therm_info *info)
{
- return 0;
-}
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int ret = -ENODEV;
-static inline int w1_DS18B20_precision(struct device *device, int val)
-{
- struct w1_slave *sl = dev_to_w1_slave(device);
- struct w1_master *dev = sl->master;
- u8 rom[9], crc;
- int ret, max_trying = 10;
- u8 *family_data = sl->family_data;
- uint8_t precision_bits;
- uint8_t mask = 0x60;
+ info->verdict = 0;
- if (val > 12 || val < 9) {
- pr_warn("Unsupported precision\n");
- ret = -EINVAL;
+ if (!sl->family_data)
goto error;
- }
- if (!sl->family_data) {
- ret = -ENODEV;
- goto error;
- }
+ memset(info->rom, 0, sizeof(info->rom));
/* prevent the slave from going away in sleep */
- atomic_inc(THERM_REFCNT(family_data));
+ atomic_inc(THERM_REFCNT(sl->family_data));
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
+ }
- memset(rom, 0, sizeof(rom));
+ while (max_trying-- && ret) { /* ret should be 0 */
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ u8 nb_bytes_read;
+
+ w1_write_8(dev_master, W1_READ_SCRATCHPAD);
+
+ nb_bytes_read = w1_read_block(dev_master, info->rom, 9);
+ if (nb_bytes_read != 9) {
+ dev_warn(&sl->dev,
+ "w1_read_block(): returned %u instead of 9.\n",
+ nb_bytes_read);
+ ret = -EIO;
+ }
+
+ info->crc = w1_calc_crc8(info->rom, 8);
+
+ if (info->rom[8] == info->crc) {
+ info->verdict = 1;
+ ret = 0;
+ } else
+ ret = -EIO; /* CRC not checked */
+ }
- /* translate precision to bitmask (see datasheet page 9) */
- switch (val) {
- case 9:
- precision_bits = 0x00;
- break;
- case 10:
- precision_bits = 0x20;
- break;
- case 11:
- precision_bits = 0x40;
- break;
- case 12:
- default:
- precision_bits = 0x60;
- break;
}
+ mutex_unlock(&dev_master->bus_mutex);
- while (max_trying--) {
- crc = 0;
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(sl->family_data));
+error:
+ return ret;
+}
- if (!w1_reset_select_slave(sl)) {
- int count = 0;
+static int write_scratchpad(struct w1_slave *sl, const u8 *data, u8 nb_bytes)
+{
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int ret = -ENODEV;
- /* read values to only alter precision bits */
- w1_write_8(dev, W1_READ_SCRATCHPAD);
- count = w1_read_block(dev, rom, 9);
- if (count != 9)
- dev_warn(device, "w1_read_block() returned %u instead of 9.\n", count);
+ if (!sl->family_data)
+ goto error;
- crc = w1_calc_crc8(rom, 8);
- if (rom[8] == crc) {
- rom[4] = (rom[4] & ~mask) | (precision_bits & mask);
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(sl->family_data));
- if (!w1_reset_select_slave(sl)) {
- w1_write_8(dev, W1_WRITE_SCRATCHPAD);
- w1_write_8(dev, rom[2]);
- w1_write_8(dev, rom[3]);
- w1_write_8(dev, rom[4]);
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto dec_refcnt;
+ }
- break;
- }
- }
+ while (max_trying-- && ret) { /* ret should be 0 */
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ w1_write_8(dev_master, W1_WRITE_SCRATCHPAD);
+ w1_write_block(dev_master, data, nb_bytes);
+ ret = 0;
}
}
+ mutex_unlock(&dev_master->bus_mutex);
- mutex_unlock(&dev->bus_mutex);
dec_refcnt:
- atomic_dec(THERM_REFCNT(family_data));
+ atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
-static inline int w1_DS18B20_convert_temp(u8 rom[9])
+static int copy_scratchpad(struct w1_slave *sl)
{
- s16 t = le16_to_cpup((__le16 *)rom);
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int t_write, ret = -ENODEV;
+ bool strong_pullup;
- return t*1000/16;
-}
+ if (!sl->family_data)
+ goto error;
-static inline int w1_DS18S20_convert_temp(u8 rom[9])
-{
- int t, h;
+ t_write = W1_THERM_EEPROM_WRITE_DELAY;
+ strong_pullup = (w1_strong_pullup == 2 ||
+ (!SLAVE_POWERMODE(sl) &&
+ w1_strong_pullup));
- if (!rom[7])
- return 0;
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(sl->family_data));
- if (rom[1] == 0)
- t = ((s32)rom[0] >> 1)*1000;
- else
- t = 1000*(-1*(s32)(0x100-rom[0]) >> 1);
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto dec_refcnt;
+ }
- t -= 250;
- h = 1000*((s32)rom[7] - (s32)rom[6]);
- h /= (s32)rom[7];
- t += h;
+ while (max_trying-- && ret) { /* ret should be 0 */
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ unsigned long sleep_rem;
- return t;
-}
+ /* 10ms strong pullup (or delay) after the convert */
+ if (strong_pullup)
+ w1_next_pullup(dev_master, t_write);
-static inline int w1_convert_temp(u8 rom[9], u8 fid)
-{
- int i;
+ w1_write_8(dev_master, W1_COPY_SCRATCHPAD);
- for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i)
- if (w1_therm_families[i].f->fid == fid)
- return w1_therm_families[i].convert(rom);
+ if (strong_pullup) {
+ sleep_rem = msleep_interruptible(t_write);
+ if (sleep_rem != 0) {
+ ret = -EINTR;
+ goto mt_unlock;
+ }
+ }
+ ret = 0;
+ }
- return 0;
+ }
+
+mt_unlock:
+ mutex_unlock(&dev_master->bus_mutex);
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(sl->family_data));
+error:
+ return ret;
}
-static ssize_t w1_slave_store(struct device *device,
- struct device_attribute *attr, const char *buf,
- size_t size)
+static int recall_eeprom(struct w1_slave *sl)
{
- int val, ret;
- struct w1_slave *sl = dev_to_w1_slave(device);
- int i;
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int ret = -ENODEV;
- ret = kstrtoint(buf, 0, &val);
- if (ret)
- return ret;
+ if (!sl->family_data)
+ goto error;
- for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) {
- if (w1_therm_families[i].f->fid == sl->family->fid) {
- /* zero value indicates to write current configuration to eeprom */
- if (val == 0)
- ret = w1_therm_families[i].eeprom(device);
- else
- ret = w1_therm_families[i].precision(device, val);
- break;
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(sl->family_data));
+
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto dec_refcnt;
+ }
+
+ while (max_trying-- && ret) { /* ret should be 0 */
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+
+ w1_write_8(dev_master, W1_RECALL_EEPROM);
+
+ ret = 1; /* Slave will pull line to 0 */
+ while (ret)
+ ret = 1 - w1_touch_bit(dev_master, 1);
}
+
}
- return ret ? : size;
+
+ mutex_unlock(&dev_master->bus_mutex);
+
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(sl->family_data));
+error:
+ return ret;
}
-static ssize_t read_therm(struct device *device,
- struct w1_slave *sl, struct therm_info *info)
+static int read_powermode(struct w1_slave *sl)
{
- struct w1_master *dev = sl->master;
- u8 external_power;
- int ret, max_trying = 10;
- u8 *family_data = sl->family_data;
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int ret = -ENODEV;
- if (!family_data) {
- ret = -ENODEV;
+ if (!sl->family_data)
goto error;
- }
/* prevent the slave from going away in sleep */
- atomic_inc(THERM_REFCNT(family_data));
+ atomic_inc(THERM_REFCNT(sl->family_data));
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
+ }
- memset(info->rom, 0, sizeof(info->rom));
+ while ((max_trying--) && (ret < 0)) {
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ w1_write_8(dev_master, W1_READ_PSUPPLY);
+ /*
+ * Emit a read time slot and read only one bit,
+ * 1 is externally powered,
+ * 0 is parasite powered
+ */
+ ret = w1_touch_bit(dev_master, 1);
+ /* ret should be either 1 either 0 */
+ }
+ }
+ mutex_unlock(&dev_master->bus_mutex);
- while (max_trying--) {
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(sl->family_data));
+error:
+ return ret;
+}
- info->verdict = 0;
- info->crc = 0;
+static int trigger_bulk_read(struct w1_master *dev_master)
+{
+ struct w1_slave *sl = NULL; /* used to iterate through slaves */
+ int max_trying = W1_THERM_MAX_TRY;
+ int t_conv = 0;
+ int ret = -ENODEV;
+ bool strong_pullup = false;
+
+ /*
+ * Check whether there are parasite powered device on the bus,
+ * and compute duration of conversion for these devices
+ * so we can apply a strong pullup if required
+ */
+ list_for_each_entry(sl, &dev_master->slist, w1_slave_entry) {
+ if (!sl->family_data)
+ goto error;
+ if (bulk_read_support(sl)) {
+ int t_cur = conversion_time(sl);
+
+ t_conv = t_cur > t_conv ? t_cur : t_conv;
+ strong_pullup = strong_pullup ||
+ (w1_strong_pullup == 2 ||
+ (!SLAVE_POWERMODE(sl) &&
+ w1_strong_pullup));
+ }
+ }
- if (!w1_reset_select_slave(sl)) {
- int count = 0;
- unsigned int tm = 750;
- unsigned long sleep_rem;
+ /*
+ * t_conv is the max conversion time required on the bus
+ * If its 0, no device support the bulk read feature
+ */
+ if (!t_conv)
+ goto error;
- w1_write_8(dev, W1_READ_PSUPPLY);
- external_power = w1_read_8(dev);
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto error;
+ }
- if (w1_reset_select_slave(sl))
- continue;
+ while ((max_trying--) && (ret < 0)) { /* ret should be either 0 */
- /* 750ms strong pullup (or delay) after the convert */
- if (w1_strong_pullup == 2 ||
- (!external_power && w1_strong_pullup))
- w1_next_pullup(dev, tm);
+ if (!w1_reset_bus(dev_master)) { /* Just reset the bus */
+ unsigned long sleep_rem;
- w1_write_8(dev, W1_CONVERT_TEMP);
+ w1_write_8(dev_master, W1_SKIP_ROM);
- if (external_power) {
- mutex_unlock(&dev->bus_mutex);
+ if (strong_pullup) /* Apply pullup if required */
+ w1_next_pullup(dev_master, t_conv);
- sleep_rem = msleep_interruptible(tm);
- if (sleep_rem != 0) {
- ret = -EINTR;
- goto dec_refcnt;
- }
+ w1_write_8(dev_master, W1_CONVERT_TEMP);
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto dec_refcnt;
- } else if (!w1_strong_pullup) {
- sleep_rem = msleep_interruptible(tm);
+ /* set a flag to instruct that converT pending */
+ list_for_each_entry(sl,
+ &dev_master->slist, w1_slave_entry) {
+ if (bulk_read_support(sl))
+ SLAVE_CONVERT_TRIGGERED(sl) = -1;
+ }
+
+ if (strong_pullup) { /* some device need pullup */
+ sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
goto mt_unlock;
}
- }
-
- if (!w1_reset_select_slave(sl)) {
-
- w1_write_8(dev, W1_READ_SCRATCHPAD);
- count = w1_read_block(dev, info->rom, 9);
- if (count != 9) {
- dev_warn(device, "w1_read_block() "
- "returned %u instead of 9.\n",
- count);
+ mutex_unlock(&dev_master->bus_mutex);
+ } else {
+ mutex_unlock(&dev_master->bus_mutex);
+ sleep_rem = msleep_interruptible(t_conv);
+ if (sleep_rem != 0) {
+ ret = -EINTR;
+ goto set_flag;
}
-
- info->crc = w1_calc_crc8(info->rom, 8);
-
- if (info->rom[8] == info->crc)
- info->verdict = 1;
}
+ ret = 0;
+ goto set_flag;
}
-
- if (info->verdict)
- break;
}
mt_unlock:
- mutex_unlock(&dev->bus_mutex);
-dec_refcnt:
- atomic_dec(THERM_REFCNT(family_data));
+ mutex_unlock(&dev_master->bus_mutex);
+set_flag:
+ /* set a flag to register convsersion is done */
+ list_for_each_entry(sl, &dev_master->slist, w1_slave_entry) {
+ if (bulk_read_support(sl))
+ SLAVE_CONVERT_TRIGGERED(sl) = 1;
+ }
error:
return ret;
}
+/* Sysfs Interface definition */
+
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -572,43 +1272,405 @@ static ssize_t w1_slave_show(struct device *device,
u8 *family_data = sl->family_data;
int ret, i;
ssize_t c = PAGE_SIZE;
- u8 fid = sl->family->fid;
- ret = read_therm(device, sl, &info);
- if (ret)
- return ret;
+ if (bulk_read_support(sl)) {
+ if (SLAVE_CONVERT_TRIGGERED(sl) < 0) {
+ dev_dbg(device,
+ "%s: Conversion in progress, retry later\n",
+ __func__);
+ return 0;
+ } else if (SLAVE_CONVERT_TRIGGERED(sl) > 0) {
+ /* A bulk read has been issued, read the device RAM */
+ ret = read_scratchpad(sl, &info);
+ SLAVE_CONVERT_TRIGGERED(sl) = 0;
+ } else
+ ret = convert_t(sl, &info);
+ } else
+ ret = convert_t(sl, &info);
+
+ if (ret < 0) {
+ dev_dbg(device,
+ "%s: Temperature data may be corrupted. err=%d\n",
+ __func__, ret);
+ return 0;
+ }
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", info.rom[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
info.crc, (info.verdict) ? "YES" : "NO");
+
if (info.verdict)
memcpy(family_data, info.rom, sizeof(info.rom));
else
- dev_warn(device, "Read failed CRC check\n");
+ dev_warn(device, "%s:Read failed CRC check\n", __func__);
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
((u8 *)family_data)[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
- w1_convert_temp(info.rom, fid));
+ temperature_from_RAM(sl, info.rom));
+
ret = PAGE_SIZE - c;
return ret;
}
+static ssize_t w1_slave_store(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ int val, ret = 0;
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ ret = kstrtoint(buf, 10, &val); /* converting user entry to int */
+
+ if (ret) { /* conversion error */
+ dev_info(device,
+ "%s: conversion error. err= %d\n", __func__, ret);
+ return size; /* return size to avoid call back again */
+ }
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return size; /* No device family */
+ }
+
+ if (val == 0) /* val=0 : trigger a EEPROM save */
+ ret = copy_scratchpad(sl);
+ else {
+ if (SLAVE_SPECIFIC_FUNC(sl)->set_resolution)
+ ret = SLAVE_SPECIFIC_FUNC(sl)->set_resolution(sl, val);
+ }
+
+ if (ret) {
+ dev_info(device,
+ "%s: writing error %d\n", __func__, ret);
+ /* return size to avoid call back again */
+ } else
+ SLAVE_RESOLUTION(sl) = val;
+
+ return size; /* always return size to avoid infinite calling */
+}
+
+static ssize_t temperature_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct therm_info info;
+ int ret = 0;
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return 0; /* No device family */
+ }
+
+ if (bulk_read_support(sl)) {
+ if (SLAVE_CONVERT_TRIGGERED(sl) < 0) {
+ dev_dbg(device,
+ "%s: Conversion in progress, retry later\n",
+ __func__);
+ return 0;
+ } else if (SLAVE_CONVERT_TRIGGERED(sl) > 0) {
+ /* A bulk read has been issued, read the device RAM */
+ ret = read_scratchpad(sl, &info);
+ SLAVE_CONVERT_TRIGGERED(sl) = 0;
+ } else
+ ret = convert_t(sl, &info);
+ } else
+ ret = convert_t(sl, &info);
+
+ if (ret < 0) {
+ dev_dbg(device,
+ "%s: Temperature data may be corrupted. err=%d\n",
+ __func__, ret);
+ return 0;
+ }
+
+ return sprintf(buf, "%d\n", temperature_from_RAM(sl, info.rom));
+}
+
+static ssize_t ext_power_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ if (!sl->family_data) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return 0; /* No device family */
+ }
+
+ /* Getting the power mode of the device {external, parasite} */
+ SLAVE_POWERMODE(sl) = read_powermode(sl);
+
+ if (SLAVE_POWERMODE(sl) < 0) {
+ dev_dbg(device,
+ "%s: Power_mode may be corrupted. err=%d\n",
+ __func__, SLAVE_POWERMODE(sl));
+ }
+ return sprintf(buf, "%d\n", SLAVE_POWERMODE(sl));
+}
+
+static ssize_t resolution_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return 0; /* No device family */
+ }
+
+ /* get the correct function depending on the device */
+ SLAVE_RESOLUTION(sl) = SLAVE_SPECIFIC_FUNC(sl)->get_resolution(sl);
+ if (SLAVE_RESOLUTION(sl) < 0) {
+ dev_dbg(device,
+ "%s: Resolution may be corrupted. err=%d\n",
+ __func__, SLAVE_RESOLUTION(sl));
+ }
+
+ return sprintf(buf, "%d\n", SLAVE_RESOLUTION(sl));
+}
+
+static ssize_t resolution_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ int val;
+ int ret = 0;
+
+ ret = kstrtoint(buf, 10, &val); /* converting user entry to int */
+
+ if (ret) { /* conversion error */
+ dev_info(device,
+ "%s: conversion error. err= %d\n", __func__, ret);
+ return size; /* return size to avoid call back again */
+ }
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return size; /* No device family */
+ }
+
+ /*
+ * Don't deal with the val enterd by user,
+ * only device knows what is correct or not
+ */
+
+ /* get the correct function depending on the device */
+ ret = SLAVE_SPECIFIC_FUNC(sl)->set_resolution(sl, val);
+
+ if (ret) {
+ dev_info(device,
+ "%s: writing error %d\n", __func__, ret);
+ /* return size to avoid call back again */
+ } else
+ SLAVE_RESOLUTION(sl) = val;
+
+ return size;
+}
+
+static ssize_t eeprom_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ int ret = -EINVAL; /* Invalid argument */
+
+ if (size == sizeof(EEPROM_CMD_WRITE)) {
+ if (!strncmp(buf, EEPROM_CMD_WRITE, sizeof(EEPROM_CMD_WRITE)-1))
+ ret = copy_scratchpad(sl);
+ } else if (size == sizeof(EEPROM_CMD_READ)) {
+ if (!strncmp(buf, EEPROM_CMD_READ, sizeof(EEPROM_CMD_READ)-1))
+ ret = recall_eeprom(sl);
+ }
+
+ if (ret)
+ dev_info(device, "%s: error in process %d\n", __func__, ret);
+
+ return size;
+}
+
+static ssize_t alarms_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ int ret;
+ s8 th = 0, tl = 0;
+ struct therm_info scratchpad;
+
+ ret = read_scratchpad(sl, &scratchpad);
+
+ if (!ret) {
+ th = scratchpad.rom[2]; /* TH is byte 2 */
+ tl = scratchpad.rom[3]; /* TL is byte 3 */
+ } else {
+ dev_info(device,
+ "%s: error reading alarms register %d\n",
+ __func__, ret);
+ }
+
+ return sprintf(buf, "%hd %hd\n", tl, th);
+}
+
+static ssize_t alarms_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct therm_info info;
+ u8 new_config_register[3]; /* array of data to be written */
+ int temp, ret;
+ char *token = NULL;
+ s8 tl, th, tt; /* 1 byte per value + temp ring order */
+ char *p_args, *orig;
+
+ p_args = orig = kmalloc(size, GFP_KERNEL);
+ /* Safe string copys as buf is const */
+ if (!p_args) {
+ dev_warn(device,
+ "%s: error unable to allocate memory %d\n",
+ __func__, -ENOMEM);
+ return size;
+ }
+ strcpy(p_args, buf);
+
+ /* Split string using space char */
+ token = strsep(&p_args, " ");
+
+ if (!token) {
+ dev_info(device,
+ "%s: error parsing args %d\n", __func__, -EINVAL);
+ goto free_m;
+ }
+
+ /* Convert 1st entry to int */
+ ret = kstrtoint (token, 10, &temp);
+ if (ret) {
+ dev_info(device,
+ "%s: error parsing args %d\n", __func__, ret);
+ goto free_m;
+ }
+
+ tl = int_to_short(temp);
+
+ /* Split string using space char */
+ token = strsep(&p_args, " ");
+ if (!token) {
+ dev_info(device,
+ "%s: error parsing args %d\n", __func__, -EINVAL);
+ goto free_m;
+ }
+ /* Convert 2nd entry to int */
+ ret = kstrtoint (token, 10, &temp);
+ if (ret) {
+ dev_info(device,
+ "%s: error parsing args %d\n", __func__, ret);
+ goto free_m;
+ }
+
+ /* Prepare to cast to short by eliminating out of range values */
+ th = int_to_short(temp);
+
+ /* Reorder if required th and tl */
+ if (tl > th) {
+ tt = tl; tl = th; th = tt;
+ }
+
+ /*
+ * Read the scratchpad to change only the required bits
+ * (th : byte 2 - tl: byte 3)
+ */
+ ret = read_scratchpad(sl, &info);
+ if (!ret) {
+ new_config_register[0] = th; /* Byte 2 */
+ new_config_register[1] = tl; /* Byte 3 */
+ new_config_register[2] = info.rom[4];/* Byte 4 */
+ } else {
+ dev_info(device,
+ "%s: error reading from the slave device %d\n",
+ __func__, ret);
+ goto free_m;
+ }
+
+ /* Write data in the device RAM */
+ if (!SLAVE_SPECIFIC_FUNC(sl)) {
+ dev_info(device,
+ "%s: Device not supported by the driver %d\n",
+ __func__, -ENODEV);
+ goto free_m;
+ }
+
+ ret = SLAVE_SPECIFIC_FUNC(sl)->write_data(sl, new_config_register);
+ if (ret)
+ dev_info(device,
+ "%s: error writing to the slave device %d\n",
+ __func__, ret);
+
+free_m:
+ /* free allocated memory */
+ kfree(orig);
+
+ return size;
+}
+
+static ssize_t therm_bulk_read_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct w1_master *dev_master = dev_to_w1_master(device);
+ int ret = -EINVAL; /* Invalid argument */
+
+ if (size == sizeof(BULK_TRIGGER_CMD))
+ if (!strncmp(buf, BULK_TRIGGER_CMD,
+ sizeof(BULK_TRIGGER_CMD)-1))
+ ret = trigger_bulk_read(dev_master);
+
+ if (ret)
+ dev_info(device,
+ "%s: unable to trigger a bulk read on the bus. err=%d\n",
+ __func__, ret);
+
+ return size;
+}
+
+static ssize_t therm_bulk_read_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_master *dev_master = dev_to_w1_master(device);
+ struct w1_slave *sl = NULL;
+ int ret = 0;
+
+ list_for_each_entry(sl, &dev_master->slist, w1_slave_entry) {
+ if (sl->family_data) {
+ if (bulk_read_support(sl)) {
+ if (SLAVE_CONVERT_TRIGGERED(sl) == -1) {
+ ret = -1;
+ goto show_result;
+ }
+ if (SLAVE_CONVERT_TRIGGERED(sl) == 1)
+ /* continue to check other slaves */
+ ret = 1;
+ }
+ }
+ }
+show_result:
+ return sprintf(buf, "%d\n", ret);
+}
+
#if IS_REACHABLE(CONFIG_HWMON)
static int w1_read_temp(struct device *device, u32 attr, int channel,
long *val)
{
struct w1_slave *sl = dev_get_drvdata(device);
struct therm_info info;
- u8 fid = sl->family->fid;
int ret;
switch (attr) {
case hwmon_temp_input:
- ret = read_therm(device, sl, &info);
+ ret = convert_t(sl, &info);
if (ret)
return ret;
@@ -617,7 +1679,7 @@ static int w1_read_temp(struct device *device, u32 attr, int channel,
return ret;
}
- *val = w1_convert_temp(info.rom, fid);
+ *val = temperature_from_RAM(sl, info.rom);
ret = 0;
break;
default:
@@ -666,7 +1728,7 @@ static ssize_t w1_seq_show(struct device *device,
if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
goto error;
- /* In case the bus fails to send 0xFF, limit*/
+ /* In case the bus fails to send 0xFF, limit */
for (i = 0; i <= 64; i++) {
if (w1_reset_bus(sl->master))
goto error;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0663c604bd64..55b910c453da 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -678,6 +678,7 @@ config TS4800_WATCHDOG
config TS72XX_WATCHDOG
tristate "TS-72XX SBC Watchdog"
depends on MACH_TS72XX || COMPILE_TEST
+ select WATCHDOG_CORE
help
Technologic Systems TS-7200, TS-7250 and TS-7260 boards have
watchdog timer implemented in a external CPLD chip. Say Y here
@@ -867,6 +868,19 @@ config DIGICOLOR_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called digicolor_wdt.
+config ARM_SMC_WATCHDOG
+ tristate "ARM Secure Monitor Call based watchdog support"
+ depends on ARM || ARM64
+ depends on OF
+ depends on HAVE_ARM_SMCCC
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for a watchdog timer
+ implemented by the EL3 Secure Monitor on ARM platforms.
+ Requires firmware support.
+ To compile this driver as a module, choose M here: the
+ module will be called arm_smc_wdt.
+
config LPC18XX_WATCHDOG
tristate "LPC18xx/43xx Watchdog"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -1217,6 +1231,7 @@ config ITCO_WDT
depends on (X86 || IA64) && PCI
select WATCHDOG_CORE
depends on I2C || I2C=n
+ depends on MFD_INTEL_PMC_BXT || !MFD_INTEL_PMC_BXT
select LPC_ICH if !EXPERT
select I2C_I801 if !EXPERT && I2C
---help---
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 6de2e4ceef19..97bed1d3d97c 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_UNIPHIER_WATCHDOG) += uniphier_wdt.o
obj-$(CONFIG_RTD119X_WATCHDOG) += rtd119x_wdt.o
obj-$(CONFIG_SPRD_WATCHDOG) += sprd_wdt.o
obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o
+obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
diff --git a/drivers/watchdog/arm_smc_wdt.c b/drivers/watchdog/arm_smc_wdt.c
new file mode 100644
index 000000000000..8f3d0c3a005f
--- /dev/null
+++ b/drivers/watchdog/arm_smc_wdt.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Secure Monitor Call watchdog driver
+ *
+ * Copyright 2020 Google LLC.
+ * Julius Werner <jwerner@chromium.org>
+ * Based on mtk_wdt.c
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <uapi/linux/psci.h>
+
+#define DRV_NAME "arm_smc_wdt"
+#define DRV_VERSION "1.0"
+
+enum smcwd_call {
+ SMCWD_INIT = 0,
+ SMCWD_SET_TIMEOUT = 1,
+ SMCWD_ENABLE = 2,
+ SMCWD_PET = 3,
+ SMCWD_GET_TIMELEFT = 4,
+};
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+static unsigned int timeout;
+
+static int smcwd_call(struct watchdog_device *wdd, enum smcwd_call call,
+ unsigned long arg, struct arm_smccc_res *res)
+{
+ struct arm_smccc_res local_res;
+
+ if (!res)
+ res = &local_res;
+
+ arm_smccc_smc((u32)(uintptr_t)watchdog_get_drvdata(wdd), call, arg, 0,
+ 0, 0, 0, 0, res);
+
+ if (res->a0 == PSCI_RET_NOT_SUPPORTED)
+ return -ENODEV;
+ if (res->a0 == PSCI_RET_INVALID_PARAMS)
+ return -EINVAL;
+ if (res->a0 != PSCI_RET_SUCCESS)
+ return -EIO;
+ return 0;
+}
+
+static int smcwd_ping(struct watchdog_device *wdd)
+{
+ return smcwd_call(wdd, SMCWD_PET, 0, NULL);
+}
+
+static unsigned int smcwd_get_timeleft(struct watchdog_device *wdd)
+{
+ struct arm_smccc_res res;
+
+ smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, &res);
+ if (res.a0)
+ return 0;
+ return res.a1;
+}
+
+static int smcwd_set_timeout(struct watchdog_device *wdd, unsigned int timeout)
+{
+ int res;
+
+ res = smcwd_call(wdd, SMCWD_SET_TIMEOUT, timeout, NULL);
+ if (!res)
+ wdd->timeout = timeout;
+ return res;
+}
+
+static int smcwd_stop(struct watchdog_device *wdd)
+{
+ return smcwd_call(wdd, SMCWD_ENABLE, 0, NULL);
+}
+
+static int smcwd_start(struct watchdog_device *wdd)
+{
+ return smcwd_call(wdd, SMCWD_ENABLE, 1, NULL);
+}
+
+static const struct watchdog_info smcwd_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops smcwd_ops = {
+ .start = smcwd_start,
+ .stop = smcwd_stop,
+ .ping = smcwd_ping,
+ .set_timeout = smcwd_set_timeout,
+};
+
+static const struct watchdog_ops smcwd_timeleft_ops = {
+ .start = smcwd_start,
+ .stop = smcwd_stop,
+ .ping = smcwd_ping,
+ .set_timeout = smcwd_set_timeout,
+ .get_timeleft = smcwd_get_timeleft,
+};
+
+static int smcwd_probe(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd;
+ int err;
+ struct arm_smccc_res res;
+ u32 smc_func_id;
+
+ wdd = devm_kzalloc(&pdev->dev, sizeof(*wdd), GFP_KERNEL);
+ if (!wdd)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, wdd);
+
+ if (of_property_read_u32(pdev->dev.of_node, "arm,smc-id",
+ &smc_func_id))
+ smc_func_id = 0x82003D06;
+ watchdog_set_drvdata(wdd, (void *)(uintptr_t)smc_func_id);
+
+ err = smcwd_call(wdd, SMCWD_INIT, 0, &res);
+ if (err < 0)
+ return err;
+
+ wdd->info = &smcwd_info;
+ /* get_timeleft is optional */
+ if (smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL))
+ wdd->ops = &smcwd_ops;
+ else
+ wdd->ops = &smcwd_timeleft_ops;
+ wdd->timeout = res.a2;
+ wdd->max_timeout = res.a2;
+ wdd->min_timeout = res.a1;
+ wdd->parent = &pdev->dev;
+
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+ watchdog_set_nowayout(wdd, nowayout);
+ watchdog_init_timeout(wdd, timeout, &pdev->dev);
+ err = smcwd_set_timeout(wdd, wdd->timeout);
+ if (err)
+ return err;
+
+ err = devm_watchdog_register_device(&pdev->dev, wdd);
+ if (err)
+ return err;
+
+ dev_info(&pdev->dev,
+ "Watchdog registered (timeout=%d sec, nowayout=%d)\n",
+ wdd->timeout, nowayout);
+
+ return 0;
+}
+
+static const struct of_device_id smcwd_dt_ids[] = {
+ { .compatible = "arm,smc-wdt" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, smcwd_dt_ids);
+
+static struct platform_driver smcwd_driver = {
+ .probe = smcwd_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = smcwd_dt_ids,
+ },
+};
+
+module_platform_driver(smcwd_driver);
+
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds");
+
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Julius Werner <jwerner@chromium.org>");
+MODULE_DESCRIPTION("ARM Secure Monitor Call Watchdog Driver");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index 0ad15d55071c..706fb09c2f24 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -35,6 +35,15 @@ struct da9062_watchdog {
bool use_sw_pm;
};
+static unsigned int da9062_wdt_read_timeout(struct da9062_watchdog *wdt)
+{
+ unsigned int val;
+
+ regmap_read(wdt->hw->regmap, DA9062AA_CONTROL_D, &val);
+
+ return wdt_timeout[val & DA9062AA_TWDSCALE_MASK];
+}
+
static unsigned int da9062_wdt_timeout_to_sel(unsigned int secs)
{
unsigned int i;
@@ -58,11 +67,6 @@ static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt,
unsigned int regval)
{
struct da9062 *chip = wdt->hw;
- int ret;
-
- ret = da9062_reset_watchdog_timer(wdt);
- if (ret)
- return ret;
regmap_update_bits(chip->regmap,
DA9062AA_CONTROL_D,
@@ -183,7 +187,7 @@ MODULE_DEVICE_TABLE(of, da9062_compatible_id_table);
static int da9062_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int ret;
+ unsigned int timeout;
struct da9062 *chip;
struct da9062_watchdog *wdt;
@@ -213,11 +217,19 @@ static int da9062_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&wdt->wdtdev, wdt);
dev_set_drvdata(dev, &wdt->wdtdev);
- ret = devm_watchdog_register_device(dev, &wdt->wdtdev);
- if (ret < 0)
- return ret;
+ timeout = da9062_wdt_read_timeout(wdt);
+ if (timeout)
+ wdt->wdtdev.timeout = timeout;
+
+ /* Set timeout from DT value if available */
+ watchdog_init_timeout(&wdt->wdtdev, 0, dev);
+
+ if (timeout) {
+ da9062_wdt_set_timeout(&wdt->wdtdev, wdt->wdtdev.timeout);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdtdev.status);
+ }
- return da9062_wdt_ping(&wdt->wdtdev);
+ return devm_watchdog_register_device(dev, &wdt->wdtdev);
}
static int __maybe_unused da9062_wdt_suspend(struct device *dev)
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 3d65e92a4e3f..423584252606 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -46,15 +46,16 @@ static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs)
}
/*
- * Return 0 if watchdog is disabled, else non zero.
+ * Read the currently active timeout.
+ * Zero means the watchdog is disabled.
*/
-static unsigned int da9063_wdt_is_running(struct da9063 *da9063)
+static unsigned int da9063_wdt_read_timeout(struct da9063 *da9063)
{
unsigned int val;
regmap_read(da9063->regmap, DA9063_REG_CONTROL_D, &val);
- return val & DA9063_TWDSCALE_MASK;
+ return wdt_timeout[val & DA9063_TWDSCALE_MASK];
}
static int da9063_wdt_disable_timer(struct da9063 *da9063)
@@ -191,6 +192,7 @@ static int da9063_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct da9063 *da9063;
struct watchdog_device *wdd;
+ unsigned int timeout;
if (!dev->parent)
return -EINVAL;
@@ -214,13 +216,19 @@ static int da9063_wdt_probe(struct platform_device *pdev)
watchdog_set_restart_priority(wdd, 128);
watchdog_set_drvdata(wdd, da9063);
- /* Set default timeout, maybe override it with DT value, scale it */
wdd->timeout = DA9063_WDG_TIMEOUT;
+
+ /* Use pre-configured timeout if watchdog is already running. */
+ timeout = da9063_wdt_read_timeout(da9063);
+ if (timeout)
+ wdd->timeout = timeout;
+
+ /* Set timeout, maybe override it with DT value, scale it */
watchdog_init_timeout(wdd, 0, dev);
da9063_wdt_set_timeout(wdd, wdd->timeout);
- /* Change the timeout to the default value if the watchdog is running */
- if (da9063_wdt_is_running(da9063)) {
+ /* Update timeout if the watchdog is already running. */
+ if (timeout) {
da9063_wdt_update_timeout(da9063, wdd->timeout);
set_bit(WDOG_HW_RUNNING, &wdd->status);
}
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index e707c4797f76..a370a185a41c 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -64,6 +64,7 @@
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
#include <linux/platform_data/itco_wdt.h>
+#include <linux/mfd/intel_pmc_bxt.h>
#include "iTCO_vendor.h"
@@ -233,12 +234,24 @@ static int update_no_reboot_bit_cnt(void *priv, bool set)
return val != newval ? -EIO : 0;
}
+static int update_no_reboot_bit_pmc(void *priv, bool set)
+{
+ struct intel_pmc_dev *pmc = priv;
+ u32 bits = PMC_CFG_NO_REBOOT_EN;
+ u32 value = set ? bits : 0;
+
+ return intel_pmc_gcr_update(pmc, PMC_GCR_PMC_CFG_REG, bits, value);
+}
+
static void iTCO_wdt_no_reboot_bit_setup(struct iTCO_wdt_private *p,
- struct itco_wdt_platform_data *pdata)
+ struct platform_device *pdev,
+ struct itco_wdt_platform_data *pdata)
{
- if (pdata->update_no_reboot_bit) {
- p->update_no_reboot_bit = pdata->update_no_reboot_bit;
- p->no_reboot_priv = pdata->no_reboot_priv;
+ if (pdata->no_reboot_use_pmc) {
+ struct intel_pmc_dev *pmc = dev_get_drvdata(pdev->dev.parent);
+
+ p->update_no_reboot_bit = update_no_reboot_bit_pmc;
+ p->no_reboot_priv = pmc;
return;
}
@@ -478,14 +491,14 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- iTCO_wdt_no_reboot_bit_setup(p, pdata);
+ iTCO_wdt_no_reboot_bit_setup(p, pdev, pdata);
/*
* Get the Memory-Mapped GCS or PMC register, we need it for the
* NO_REBOOT flag (TCO v2 and v3).
*/
if (p->iTCO_version >= 2 && p->iTCO_version < 6 &&
- !pdata->update_no_reboot_bit) {
+ !pdata->no_reboot_use_pmc) {
p->gcs_pmc_res = platform_get_resource(pdev,
IORESOURCE_MEM,
ICH_RES_MEM_GCS_PMC);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 1fe472f56cb3..b84f80f7d342 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -2,7 +2,7 @@
/*
* Watchdog driver for IMX2 and later processors
*
- * Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de>
+ * Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <kernel@pengutronix.de>
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* some parts adapted by similar drivers from Darius Augulis and Vladimir
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index 60a32469f7de..e9ee22a7cb45 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -175,6 +175,11 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
wdog->timeout = DEFAULT_TIMEOUT;
watchdog_init_timeout(wdog, 0, dev);
+
+ ret = imx_sc_wdt_set_timeout(wdog, wdog->timeout);
+ if (ret)
+ return ret;
+
watchdog_stop_on_reboot(wdog);
watchdog_stop_on_unregister(wdog);
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 470213abfd3d..1ae03b64ef8b 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -33,14 +33,24 @@ enum {
SCU_WATCHDOG_KEEPALIVE,
};
-static inline int wdt_command(int sub, u32 *in, int inlen)
+struct mid_wdt {
+ struct watchdog_device wd;
+ struct device *dev;
+ struct intel_scu_ipc_dev *scu;
+};
+
+static inline int
+wdt_command(struct mid_wdt *mid, int sub, const void *in, size_t inlen, size_t size)
{
- return intel_scu_ipc_command(IPC_WATCHDOG, sub, in, inlen, NULL, 0);
+ struct intel_scu_ipc_dev *scu = mid->scu;
+
+ return intel_scu_ipc_dev_command_with_size(scu, IPC_WATCHDOG, sub, in,
+ inlen, size, NULL, 0);
}
static int wdt_start(struct watchdog_device *wd)
{
- struct device *dev = watchdog_get_drvdata(wd);
+ struct mid_wdt *mid = watchdog_get_drvdata(wd);
int ret, in_size;
int timeout = wd->timeout;
struct ipc_wd_start {
@@ -49,38 +59,41 @@ static int wdt_start(struct watchdog_device *wd)
} ipc_wd_start = { timeout - MID_WDT_PRETIMEOUT, timeout };
/*
- * SCU expects the input size for watchdog IPC to
- * be based on 4 bytes
+ * SCU expects the input size for watchdog IPC to be 2 which is the
+ * size of the structure in dwords. SCU IPC normally takes bytes
+ * but this is a special case where we specify size to be different
+ * than inlen.
*/
in_size = DIV_ROUND_UP(sizeof(ipc_wd_start), 4);
- ret = wdt_command(SCU_WATCHDOG_START, (u32 *)&ipc_wd_start, in_size);
+ ret = wdt_command(mid, SCU_WATCHDOG_START, &ipc_wd_start,
+ sizeof(ipc_wd_start), in_size);
if (ret)
- dev_crit(dev, "error starting watchdog: %d\n", ret);
+ dev_crit(mid->dev, "error starting watchdog: %d\n", ret);
return ret;
}
static int wdt_ping(struct watchdog_device *wd)
{
- struct device *dev = watchdog_get_drvdata(wd);
+ struct mid_wdt *mid = watchdog_get_drvdata(wd);
int ret;
- ret = wdt_command(SCU_WATCHDOG_KEEPALIVE, NULL, 0);
+ ret = wdt_command(mid, SCU_WATCHDOG_KEEPALIVE, NULL, 0, 0);
if (ret)
- dev_crit(dev, "Error executing keepalive: %d\n", ret);
+ dev_crit(mid->dev, "Error executing keepalive: %d\n", ret);
return ret;
}
static int wdt_stop(struct watchdog_device *wd)
{
- struct device *dev = watchdog_get_drvdata(wd);
+ struct mid_wdt *mid = watchdog_get_drvdata(wd);
int ret;
- ret = wdt_command(SCU_WATCHDOG_STOP, NULL, 0);
+ ret = wdt_command(mid, SCU_WATCHDOG_STOP, NULL, 0, 0);
if (ret)
- dev_crit(dev, "Error stopping watchdog: %d\n", ret);
+ dev_crit(mid->dev, "Error stopping watchdog: %d\n", ret);
return ret;
}
@@ -110,6 +123,7 @@ static int mid_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct watchdog_device *wdt_dev;
struct intel_mid_wdt_pdata *pdata = dev->platform_data;
+ struct mid_wdt *mid;
int ret;
if (!pdata) {
@@ -123,10 +137,13 @@ static int mid_wdt_probe(struct platform_device *pdev)
return ret;
}
- wdt_dev = devm_kzalloc(dev, sizeof(*wdt_dev), GFP_KERNEL);
- if (!wdt_dev)
+ mid = devm_kzalloc(dev, sizeof(*mid), GFP_KERNEL);
+ if (!mid)
return -ENOMEM;
+ mid->dev = dev;
+ wdt_dev = &mid->wd;
+
wdt_dev->info = &mid_wdt_info;
wdt_dev->ops = &mid_wdt_ops;
wdt_dev->min_timeout = MID_WDT_TIMEOUT_MIN;
@@ -135,7 +152,7 @@ static int mid_wdt_probe(struct platform_device *pdev)
wdt_dev->parent = dev;
watchdog_set_nowayout(wdt_dev, WATCHDOG_NOWAYOUT);
- watchdog_set_drvdata(wdt_dev, dev);
+ watchdog_set_drvdata(wdt_dev, mid);
ret = devm_request_irq(dev, pdata->irq, mid_wdt_irq,
IRQF_SHARED | IRQF_NO_SUSPEND, "watchdog",
@@ -145,6 +162,10 @@ static int mid_wdt_probe(struct platform_device *pdev)
return ret;
}
+ mid->scu = devm_intel_scu_ipc_dev_get(dev);
+ if (!mid->scu)
+ return -EPROBE_DEFER;
+
/*
* The firmware followed by U-Boot leaves the watchdog running
* with the default threshold which may vary. When we get here
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 22f335e1e164..60ed6252e5f4 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -29,6 +29,7 @@
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/coldfire.h>
#include <asm/m54xxsim.h>
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 9b91882fe3c4..1616f93dfad7 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -273,6 +273,7 @@ static int omap_wdt_probe(struct platform_device *pdev)
ret = watchdog_register_device(&wdev->wdog);
if (ret) {
+ pm_runtime_put(wdev->dev);
pm_runtime_disable(wdev->dev);
return ret;
}
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index dc3c06a92f93..1b9a6dc8f982 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -141,7 +141,7 @@ static long riowd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
default:
return -EINVAL;
- };
+ }
return 0;
}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index c6070e70dd73..b8ccb8990bfd 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -27,7 +27,6 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/tlb.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>